pax_global_header00006660000000000000000000000064145657523240014527gustar00rootroot0000000000000052 comment=b89c2a99daf39f6edcc56dffd22b7b4d22eef76b aws-crt-python-0.20.4+dfsg/000077500000000000000000000000001456575232400154305ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/.builder/000077500000000000000000000000001456575232400171345ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/.builder/actions/000077500000000000000000000000001456575232400205745ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/.builder/actions/aws_crt_python.py000066400000000000000000000066471456575232400242260ustar00rootroot00000000000000import Builder import argparse import os import sys # Fall back on using the "{python}" builder variable PYTHON_DEFAULT = '{python}' class AWSCrtPython(Builder.Action): python = PYTHON_DEFAULT # Some CI containers have pip installed via "rpm" or non-Python methods, and this causes issues when # we try to update pip via "python -m pip install --upgrade" because there are no RECORD files present. # Therefore, we have to seek alternative ways with a last resort of installing with "--ignore-installed" # if nothing else works AND the builder is running in GitHub actions. # As of writing, this is primarily an issue with the AL2-x64 image. def try_to_upgrade_pip(self, env): did_upgrade = False if (self.python == '{python}'): self.python = env.config["variables"]["python"] pip_result = env.shell.exec(self.python, '-m', 'pip', 'install', '--upgrade', 'pip', check=False) if pip_result.returncode == 0: did_upgrade = True else: print("Could not update pip via normal pip upgrade. Next trying via package manager...") if (did_upgrade == False): try: Builder.InstallPackages(['pip']).run(env) did_upgrade = True except Exception: print("Could not update pip via package manager. Next resorting to forcing an ignore install...") if (did_upgrade == False): # Only run in GitHub actions by checking for specific environment variable # Source: https://docs.github.com/en/actions/learn-github-actions/variables#default-environment-variables if (os.getenv("GITHUB_ACTIONS") is not None): pip_result = env.shell.exec( self.python, '-m', 'pip', 'install', '--upgrade', '--ignore-installed', 'pip', check=False) if pip_result.returncode == 0: did_upgrade = True else: print("Could not update pip via ignore install! Something is terribly wrong!") sys.exit(12) else: print("Not on GitHub actions - skipping reinstalling Pip. Update/Install pip manually and rerun the builder") def run(self, env): # allow custom python to be used parser = argparse.ArgumentParser() parser.add_argument('--python') args = parser.parse_known_args(env.args.args)[0] self.python = args.python if args.python else PYTHON_DEFAULT # Enable S3 tests env.shell.setenv('AWS_TEST_S3', '1') actions = [ # Upgrade Pip via a number of different methods self.try_to_upgrade_pip, [self.python, '-m', 'pip', 'install', '--upgrade', '--requirement', 'requirements-dev.txt'], Builder.SetupCrossCICrtEnvironment(), [self.python, '-m', 'pip', 'install', '--verbose', '.'], # "--failfast" because, given how our leak-detection in tests currently works, # once one test fails all the rest usually fail too. [self.python, '-m', 'unittest', 'discover', '--verbose', '--failfast'], # http_client_test.py launches external processes using the extra args [self.python, 'crt/aws-c-http/integration-testing/http_client_test.py', self.python, 'elasticurl.py'], ] return Builder.Script(actions, name='aws-crt-python') aws-crt-python-0.20.4+dfsg/.clang-format000066400000000000000000000031611456575232400200040ustar00rootroot00000000000000--- Language: Cpp # BasedOnStyle: Mozilla AlignAfterOpenBracket: AlwaysBreak AlignConsecutiveAssignments: false AlignConsecutiveDeclarations: false AlignEscapedNewlines: Right AlignOperands: true AlignTrailingComments: true AllowAllParametersOfDeclarationOnNextLine: false AllowShortBlocksOnASingleLine: false AllowShortCaseLabelsOnASingleLine: false AllowShortFunctionsOnASingleLine: Inline AllowShortIfStatementsOnASingleLine: false AllowShortLoopsOnASingleLine: false AlwaysBreakAfterReturnType: None AlwaysBreakBeforeMultilineStrings: false BinPackArguments: false BinPackParameters: false BreakBeforeBinaryOperators: None BreakBeforeBraces: Attach BreakBeforeTernaryOperators: true BreakStringLiterals: true ColumnLimit: 120 ContinuationIndentWidth: 4 DerivePointerAlignment: false IncludeBlocks: Preserve IndentCaseLabels: true IndentPPDirectives: AfterHash IndentWidth: 4 IndentWrappedFunctionNames: true KeepEmptyLinesAtTheStartOfBlocks: true MacroBlockBegin: '' MacroBlockEnd: '' MaxEmptyLinesToKeep: 1 PenaltyBreakAssignment: 2 PenaltyBreakBeforeFirstCallParameter: 19 PenaltyBreakComment: 300 PenaltyBreakFirstLessLess: 120 PenaltyBreakString: 1000 PenaltyExcessCharacter: 1000000 PenaltyReturnTypeOnItsOwnLine: 100000 PointerAlignment: Right ReflowComments: true SortIncludes: true SpaceAfterCStyleCast: false SpaceBeforeAssignmentOperators: true SpaceBeforeParens: ControlStatements SpaceInEmptyParentheses: false SpacesInContainerLiterals: true SpacesInCStyleCastParentheses: false SpacesInParentheses: false SpacesInSquareBrackets: false Standard: Cpp11 TabWidth: 4 UseTab: Never ... aws-crt-python-0.20.4+dfsg/.clang-tidy000066400000000000000000000012131456575232400174610ustar00rootroot00000000000000--- Checks: 'clang-diagnostic-*,clang-analyzer-*,readability-*,modernize-*,bugprone-*,misc-*,google-runtime-int,llvm-header-guard,fuchsia-restrict-system-includes,-clang-analyzer-valist.Uninitialized,-clang-analyzer-security.insecureAPI.rand,-clang-analyzer-alpha.*' WarningsAsErrors: '*' HeaderFilterRegex: '\./*' FormatStyle: 'file' CheckOptions: - key: readability-braces-around-statements.ShortStatementLines value: '1' - key: google-runtime-int.TypeSufix value: '_t' - key: fuchsia-restrict-system-includes.Includes value: '*,-stdint.h,-stdbool.h' ... aws-crt-python-0.20.4+dfsg/.github/000077500000000000000000000000001456575232400167705ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/.github/ISSUE_TEMPLATE/000077500000000000000000000000001456575232400211535ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/.github/ISSUE_TEMPLATE/bug-report.yml000066400000000000000000000045211456575232400237660ustar00rootroot00000000000000--- name: "🐛 Bug Report" description: Report a bug title: "(short issue description)" labels: [bug, needs-triage] assignees: [] body: - type: textarea id: description attributes: label: Describe the bug description: What is the problem? A clear and concise description of the bug. validations: required: true - type: textarea id: expected attributes: label: Expected Behavior description: | What did you expect to happen? validations: required: true - type: textarea id: current attributes: label: Current Behavior description: | What actually happened? Please include full errors, uncaught exceptions, stack traces, and relevant logs. If service responses are relevant, please include wire logs. validations: required: true - type: textarea id: reproduction attributes: label: Reproduction Steps description: | Provide a self-contained, concise snippet of code that can be used to reproduce the issue. For more complex issues provide a repo with the smallest sample that reproduces the bug. Avoid including business logic or unrelated code, it makes diagnosis more difficult. The code sample should be an SSCCE. See http://sscce.org/ for details. In short, please provide a code sample that we can copy/paste, run and reproduce. validations: required: true - type: textarea id: solution attributes: label: Possible Solution description: | Suggest a fix/reason for the bug validations: required: false - type: textarea id: context attributes: label: Additional Information/Context description: | Anything else that might be relevant for troubleshooting this bug. Providing context helps us come up with a solution that is most useful in the real world. validations: required: false - type: input id: aws-crt-python-version attributes: label: aws-crt-python version used validations: required: true - type: input id: compiler-version attributes: label: Python version used validations: required: true - type: input id: operating-system attributes: label: Operating System and version validations: required: true aws-crt-python-0.20.4+dfsg/.github/ISSUE_TEMPLATE/config.yml000066400000000000000000000003331456575232400231420ustar00rootroot00000000000000blank_issues_enabled: false contact_links: - name: 💬 General Question url: https://github.com/awslabs/aws-crt-python/discussions/categories/q-a about: Please ask and answer questions as a discussion thread aws-crt-python-0.20.4+dfsg/.github/ISSUE_TEMPLATE/documentation.yml000066400000000000000000000011141456575232400245440ustar00rootroot00000000000000--- name: "📕 Documentation Issue" description: Report an issue in the API Reference documentation or Developer Guide title: "(short issue description)" labels: [documentation, needs-triage] assignees: [] body: - type: textarea id: description attributes: label: Describe the issue description: A clear and concise description of the issue. validations: required: true - type: textarea id: links attributes: label: Links description: | Include links to affected documentation page(s). validations: required: true aws-crt-python-0.20.4+dfsg/.github/ISSUE_TEMPLATE/feature-request.yml000066400000000000000000000026231456575232400250220ustar00rootroot00000000000000--- name: 🚀 Feature Request description: Suggest an idea for this project title: "(short issue description)" labels: [feature-request, needs-triage] assignees: [] body: - type: textarea id: description attributes: label: Describe the feature description: A clear and concise description of the feature you are proposing. validations: required: true - type: textarea id: use-case attributes: label: Use Case description: | Why do you need this feature? For example: "I'm always frustrated when..." validations: required: true - type: textarea id: solution attributes: label: Proposed Solution description: | Suggest how to implement the addition or change. Please include prototype/workaround/sketch/reference implementation. validations: required: false - type: textarea id: other attributes: label: Other Information description: | Any alternative solutions or features you considered, a more detailed explanation, stack traces, related issues, links for context, etc. validations: required: false - type: checkboxes id: ack attributes: label: Acknowledgements options: - label: I may be able to implement this feature request required: false - label: This feature might incur a breaking change required: false aws-crt-python-0.20.4+dfsg/.github/PULL_REQUEST_TEMPLATE.md000066400000000000000000000002511456575232400225670ustar00rootroot00000000000000*Issue #, if available:* *Description of changes:* By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license. aws-crt-python-0.20.4+dfsg/.github/workflows/000077500000000000000000000000001456575232400210255ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/.github/workflows/ci.yml000066400000000000000000000241211456575232400221430ustar00rootroot00000000000000name: CI on: push: branches-ignore: - 'main' - 'docs' env: BUILDER_VERSION: v0.9.56 BUILDER_SOURCE: releases BUILDER_HOST: https://d19elf31gohf1l.cloudfront.net PACKAGE_NAME: aws-crt-python LINUX_BASE_IMAGE: ubuntu-18-x64 RUN: ${{ github.run_id }}-${{ github.run_number }} AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} AWS_REGION: us-east-1 jobs: manylinux1: runs-on: ubuntu-latest strategy: fail-fast: false matrix: image: - x64 - x86 python: - cp37-cp37m - cp38-cp38 - cp39-cp39 steps: - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-manylinux1-${{ matrix.image }} build -p ${{ env.PACKAGE_NAME }} --python /opt/python/${{ matrix.python }}/bin/python manylinux2014: runs-on: ubuntu-20.04 # latest strategy: fail-fast: false matrix: image: - x64 - x86 - aarch64 python: - cp37-cp37m - cp38-cp38 - cp39-cp39 - cp310-cp310 - cp311-cp311 - cp312-cp312 steps: # Only aarch64 needs this, but it doesn't hurt anything - name: Install qemu/docker run: docker run --rm --privileged multiarch/qemu-user-static --reset -p yes - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-manylinux2014-${{ matrix.image }} build -p ${{ env.PACKAGE_NAME }} --python /opt/python/${{ matrix.python }}/bin/python musllinux-1-1: runs-on: ubuntu-22.04 # latest strategy: fail-fast: false matrix: image: - x64 - aarch64 python: - cp37-cp37m - cp38-cp38 - cp39-cp39 - cp310-cp310 - cp311-cp311 - cp312-cp312 steps: # Only aarch64 needs this, but it doesn't hurt anything - name: Install qemu/docker run: docker run --rm --privileged multiarch/qemu-user-static --reset -p yes - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-musllinux-1-1-${{ matrix.image }} build -p ${{ env.PACKAGE_NAME }} --python /opt/python/${{ matrix.python }}/bin/python raspberry: runs-on: ubuntu-20.04 # latest strategy: fail-fast: false matrix: image: - raspbian-bullseye steps: # set arm arch - name: Install qemu/docker run: docker run --rm --privileged multiarch/qemu-user-static --reset -p yes - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ matrix.image }} build -p ${{ env.PACKAGE_NAME }} linux-compat: runs-on: ubuntu-22.04 # latest strategy: matrix: image: - al2-x64 - fedora-34-x64 - opensuse-leap - rhel8-x64 steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ matrix.image }} build -p ${{ env.PACKAGE_NAME }} linux-compiler-compat: runs-on: ubuntu-22.04 # latest strategy: matrix: compiler: - clang-3 - clang-6 - clang-8 - clang-9 - clang-10 - clang-11 - gcc-5 - gcc-6 - gcc-7 - gcc-8 steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ env.LINUX_BASE_IMAGE }} build -p ${{ env.PACKAGE_NAME }} --compiler=${{ matrix.compiler }} use-system-libcrypto: runs-on: ubuntu-20.04 # latest steps: - name: Build ${{ env.PACKAGE_NAME }} env: AWS_CRT_BUILD_USE_SYSTEM_LIBCRYPTO: '1' run: | python3 -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder')" chmod a+x builder ./builder build -p ${{ env.PACKAGE_NAME }} - name: Assert libcrypto.so used run: | # assert it's linked against the system's libcrypto.so AWSCRT_PATH=`python3 -c "import _awscrt; print(_awscrt.__file__)"` printf "AWSCRT_PATH: $AWSCRT_PATH\n" LINKED_AGAINST=`ldd $AWSCRT_PATH` printf "LINKED AGAINST:\n$LINKED_AGAINST\n" USES_LIBCRYPTO_SO=`echo "$LINKED_AGAINST" | grep 'libcrypto*.so' | head -1` test -n "$USES_LIBCRYPTO_SO" windows: runs-on: windows-2022 # latest strategy: matrix: arch: [x86, x64] steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python builder.pyz build -p ${{ env.PACKAGE_NAME }} --python "C:\\hostedtoolcache\\windows\\Python\\3.7.9\\${{ matrix.arch }}\\python.exe" osx: runs-on: macos-13 # latest steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python3 -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder')" chmod a+x builder ./builder build -p ${{ env.PACKAGE_NAME }} openbsd: runs-on: ubuntu-22.04 # latest steps: # Cannot use builder to checkout as OpenBSD doesn't ship git in the base install - uses: actions/checkout@v3 with: submodules: true - name: Build ${{ env.PACKAGE_NAME }} + consumers uses: cross-platform-actions/action@v0.23.0 with: operating_system: openbsd version: '7.4' cpu_count: 4 shell: bash environment_variables: AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY AWS_DEFAULT_REGION AWS_REGION run: | sudo pkg_add awscli py3-pip py3-urllib3 python3 -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz', 'builder')" chmod a+x builder ./builder build -p ${{ env.PACKAGE_NAME }} freebsd: runs-on: ubuntu-22.04 # latest steps: - uses: actions/checkout@v3 with: submodules: true - name: Build ${{ env.PACKAGE_NAME }} + consumers uses: cross-platform-actions/action@v0.23.0 with: operating_system: freebsd version: '14.0' cpu_count: 4 shell: bash environment_variables: AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY AWS_DEFAULT_REGION AWS_REGION run: | sudo pkg install -y python3 py39-urllib3 py39-pip py39-awscli cmake python3 -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz', 'builder')" chmod a+x builder ./builder build -p ${{ env.PACKAGE_NAME }} # check that tests requiring custom env-vars or AWS credentials are simply skipped tests-ok-without-env-vars: runs-on: ubuntu-22.04 # latest steps: - uses: actions/checkout@v3 with: submodules: true - name: Run tests without env-vars or AWS creds env: # unset env-vars that provide AWS credentials AWS_ACCESS_KEY_ID: AWS_SECRET_ACCESS_KEY: AWS_DEFAULT_REGION: run: | python3 -m pip install --upgrade --requirement requirements-dev.txt python3 -m pip install . --verbose python3 -m unittest discover --failfast --verbose package-source: runs-on: ubuntu-22.04 # latest steps: - uses: actions/checkout@v3 with: submodules: true - name: Package source + install run: | python3 setup.py sdist cd dist python3 -m pip install -v awscrt-1.0.0.dev0.tar.gz python3 -c "import awscrt.io" # check that docs can still build check-docs: runs-on: ubuntu-22.04 # latest steps: - uses: actions/checkout@v3 with: submodules: true - name: Check docs run: | python3 -m pip install sphinx python3 -m pip install --verbose . ./scripts/make-docs.py check-submodules: runs-on: ubuntu-22.04 # latest steps: - name: Checkout Source uses: actions/checkout@v3 with: submodules: true fetch-depth: 0 - name: Check Submodules # note: using "@main" because "@${{env.BUILDER_VERSION}}" doesn't work # https://github.com/actions/runner/issues/480 uses: awslabs/aws-crt-builder/.github/actions/check-submodules@main aws-crt-python-0.20.4+dfsg/.github/workflows/closed-issue-message.yml000066400000000000000000000013271456575232400255740ustar00rootroot00000000000000name: Closed Issue Message on: issues: types: [closed] jobs: auto_comment: runs-on: ubuntu-latest steps: - uses: aws-actions/closed-issue-message@v1 with: # These inputs are both required repo-token: "${{ secrets.GITHUB_TOKEN }}" message: | ### ⚠️COMMENT VISIBILITY WARNING⚠️ Comments on closed issues are hard for our team to see. If you need more assistance, please either tag a team member or open a new issue that references this one. If you wish to keep having a conversation with other community members under this issue feel free to do so. aws-crt-python-0.20.4+dfsg/.github/workflows/docs.yml000066400000000000000000000022651456575232400225050ustar00rootroot00000000000000# Update the API documentation whenever the `main` branch changes. # This documentation lives in its own `docs` branch. name: docs on: push: branches: - 'main' jobs: update-docs-branch: runs-on: ubuntu-20.04 # latest permissions: contents: write # allow push steps: - name: Checkout uses: actions/checkout@v3 with: submodules: true - name: Update docs branch run: | python3 -m pip install sphinx python3 -m pip install --verbose . ./scripts/make-docs.py - name: Commit run: | git config --local user.email "action@github.com" git config --local user.name "GitHub Action" git add --force docs git commit --message="update docs" - name: Push to docs branch uses: ad-m/github-push-action@v0.6.0 with: github_token: ${{ github.token }} branch: docs # Force push so that `docs` branch always looks like `main`, # but with 1 additional "update docs" commit. # This seems simpler than trying to cleanly merge `main` into # `docs` each time. force: true aws-crt-python-0.20.4+dfsg/.github/workflows/handle-stale-discussions.yml000066400000000000000000000006511456575232400264570ustar00rootroot00000000000000name: HandleStaleDiscussions on: schedule: - cron: '0 */4 * * *' discussion_comment: types: [created] jobs: handle-stale-discussions: name: Handle stale discussions runs-on: ubuntu-latest permissions: discussions: write steps: - name: Stale discussions action uses: aws-github-ops/handle-stale-discussions@v1 env: GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} aws-crt-python-0.20.4+dfsg/.github/workflows/lint.yml000066400000000000000000000013721456575232400225210ustar00rootroot00000000000000name: Lint on: push: branches-ignore: - 'main' - 'docs' jobs: clang-format: runs-on: ubuntu-20.04 # latest steps: - name: Checkout Sources uses: actions/checkout@v2 - name: clang-format lint uses: DoozyX/clang-format-lint-action@v0.3.1 with: # List of extensions to check extensions: c,h autopep8: runs-on: ubuntu-20.04 # latest steps: - name: Checkout Source uses: actions/checkout@v2 - name: Build and Test run: | python3 -m pip install --upgrade setuptools python3 -m pip install --upgrade wheel python3 -m pip install --upgrade autopep8 python3 -m autopep8 --exit-code --diff --recursive awscrt test .builder setup.py aws-crt-python-0.20.4+dfsg/.github/workflows/stale_issue.yml000066400000000000000000000046321456575232400240750ustar00rootroot00000000000000name: "Close stale issues" # Controls when the action will run. on: schedule: - cron: "*/60 * * * *" jobs: cleanup: runs-on: ubuntu-latest name: Stale issue job permissions: issues: write pull-requests: write steps: - uses: aws-actions/stale-issue-cleanup@v3 with: # Setting messages to an empty string will cause the automation to skip # that category ancient-issue-message: Greetings! Sorry to say but this is a very old issue that is probably not getting as much attention as it deservers. We encourage you to check if this is still an issue in the latest release and if you find that this is still a problem, please feel free to open a new one. stale-issue-message: Greetings! It looks like this issue hasn’t been active in longer than a week. We encourage you to check if this is still an issue in the latest release. Because it has been longer than a week since the last update on this, and in the absence of more information, we will be closing this issue soon. If you find that this is still a problem, please feel free to provide a comment or add an upvote to prevent automatic closure, or if the issue is already closed, please feel free to open a new one. stale-pr-message: Greetings! It looks like this PR hasn’t been active in longer than a week, add a comment or an upvote to prevent automatic closure, or if the issue is already closed, please feel free to open a new one. # These labels are required stale-issue-label: closing-soon exempt-issue-label: automation-exempt stale-pr-label: closing-soon exempt-pr-label: pr/needs-review response-requested-label: response-requested # Don't set closed-for-staleness label to skip closing very old issues # regardless of label closed-for-staleness-label: closed-for-staleness # Issue timing days-before-stale: 2 days-before-close: 5 days-before-ancient: 36500 # If you don't want to mark a issue as being ancient based on a # threshold of "upvotes", you can set this here. An "upvote" is # the total number of +1, heart, hooray, and rocket reactions # on an issue. minimum-upvotes-to-exempt: 1 repo-token: ${{ secrets.GITHUB_TOKEN }} loglevel: DEBUG # Set dry-run to true to not perform label or close actions. dry-run: false aws-crt-python-0.20.4+dfsg/.gitignore000066400000000000000000000276171456575232400174350ustar00rootroot00000000000000# Created by https://www.toptal.com/developers/gitignore/api/git,c++,cmake,python,visualstudio,visualstudiocode,macos # Edit at https://www.toptal.com/developers/gitignore?templates=git,c++,cmake,python,visualstudio,visualstudiocode,macos ### C++ ### # Prerequisites *.d # Compiled Object files *.slo *.lo *.o *.obj # Precompiled Headers *.gch *.pch # Compiled Dynamic libraries *.so *.dylib *.dll # Fortran module files *.mod *.smod # Compiled Static libraries *.lai *.la *.a *.lib # Executables *.exe *.out *.app ### CMake ### CMakeLists.txt.user CMakeCache.txt CMakeFiles CMakeScripts Testing Makefile cmake_install.cmake install_manifest.txt compile_commands.json CTestTestfile.cmake _deps ### CMake Patch ### # External projects *-prefix/ ### Git ### # Created by git for backups. To disable backups in Git: # $ git config --global mergetool.keepBackup false *.orig # Created by git when using merge tools for conflicts *.BACKUP.* *.BASE.* *.LOCAL.* *.REMOTE.* *_BACKUP_*.txt *_BASE_*.txt *_LOCAL_*.txt *_REMOTE_*.txt ### macOS ### # General .DS_Store .AppleDouble .LSOverride # Icon must end with two \r Icon # Thumbnails ._* # Files that might appear in the root of a volume .DocumentRevisions-V100 .fseventsd .Spotlight-V100 .TemporaryItems .Trashes .VolumeIcon.icns .com.apple.timemachine.donotpresent # Directories potentially created on remote AFP share .AppleDB .AppleDesktop Network Trash Folder Temporary Items .apdisk ### macOS Patch ### # iCloud generated files *.icloud ### Python ### # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class # C extensions # Distribution / packaging .Python build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ wheels/ share/python-wheels/ *.egg-info/ .installed.cfg *.egg MANIFEST # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .nox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *.cover *.py,cover .hypothesis/ .pytest_cache/ cover/ # Translations *.mo *.pot # Django stuff: *.log local_settings.py db.sqlite3 db.sqlite3-journal # Flask stuff: instance/ .webassets-cache # Scrapy stuff: .scrapy # Sphinx documentation docs/_build/ # PyBuilder .pybuilder/ target/ # Jupyter Notebook .ipynb_checkpoints # IPython profile_default/ ipython_config.py # pyenv # For a library or package, you might want to ignore these files since the code is # intended to run in multiple environments; otherwise, check them in: # .python-version # pipenv # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. # However, in case of collaboration, if having platform-specific dependencies or dependencies # having no cross-platform support, pipenv may install dependencies that don't work, or not # install all needed dependencies. #Pipfile.lock # poetry # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. # This is especially recommended for binary packages to ensure reproducibility, and is more # commonly ignored for libraries. # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control #poetry.lock # pdm # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. #pdm.lock # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it # in version control. # https://pdm.fming.dev/#use-with-ide .pdm.toml # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm __pypackages__/ # Celery stuff celerybeat-schedule celerybeat.pid # SageMath parsed files *.sage.py # Environments .env .venv env/ venv/ ENV/ env.bak/ venv.bak/ # Spyder project settings .spyderproject .spyproject # Rope project settings .ropeproject # mkdocs documentation /site # mypy .mypy_cache/ .dmypy.json dmypy.json # Pyre type checker .pyre/ # pytype static type analyzer .pytype/ # Cython debug symbols cython_debug/ # PyCharm # JetBrains specific template is maintained in a separate JetBrains.gitignore that can # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore # and can be added to the global gitignore or merged into this file. For a more nuclear # option (not recommended) you can uncomment the following to ignore the entire idea folder. #.idea/ ### Python Patch ### # Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration poetry.toml # ruff .ruff_cache/ # LSP config files pyrightconfig.json ### VisualStudioCode ### .vscode/* !.vscode/settings.json !.vscode/tasks.json !.vscode/launch.json !.vscode/extensions.json !.vscode/*.code-snippets # Local History for Visual Studio Code .history/ # Built Visual Studio Code Extensions *.vsix ### VisualStudioCode Patch ### # Ignore all local history of files .history .ionide ### VisualStudio ### ## Ignore Visual Studio temporary files, build results, and ## files generated by popular Visual Studio add-ons. ## ## Get latest from https://github.com/github/gitignore/blob/main/VisualStudio.gitignore # User-specific files *.rsuser *.suo *.user *.userosscache *.sln.docstates # User-specific files (MonoDevelop/Xamarin Studio) *.userprefs # Mono auto generated files mono_crash.* # Build results [Dd]ebug/ [Dd]ebugPublic/ [Rr]elease/ [Rr]eleases/ x64/ x86/ [Ww][Ii][Nn]32/ [Aa][Rr][Mm]/ [Aa][Rr][Mm]64/ bld/ [Bb]in/ [Oo]bj/ [Ll]og/ [Ll]ogs/ # Visual Studio 2015/2017 cache/options directory .vs/ # Uncomment if you have tasks that create the project's static files in wwwroot #wwwroot/ # Visual Studio 2017 auto generated files Generated\ Files/ # MSTest test Results [Tt]est[Rr]esult*/ [Bb]uild[Ll]og.* # NUnit *.VisualState.xml TestResult.xml nunit-*.xml # Build Results of an ATL Project [Dd]ebugPS/ [Rr]eleasePS/ dlldata.c # Benchmark Results BenchmarkDotNet.Artifacts/ # .NET Core project.lock.json project.fragment.lock.json artifacts/ # ASP.NET Scaffolding ScaffoldingReadMe.txt # StyleCop StyleCopReport.xml # Files built by Visual Studio *_i.c *_p.c *_h.h *.ilk *.meta *.iobj *.pdb *.ipdb *.pgc *.pgd *.rsp *.sbr *.tlb *.tli *.tlh *.tmp *.tmp_proj *_wpftmp.csproj *.tlog *.vspscc *.vssscc .builds *.pidb *.svclog *.scc # Chutzpah Test files _Chutzpah* # Visual C++ cache files ipch/ *.aps *.ncb *.opendb *.opensdf *.sdf *.cachefile *.VC.db *.VC.VC.opendb # Visual Studio profiler *.psess *.vsp *.vspx *.sap # Visual Studio Trace Files *.e2e # TFS 2012 Local Workspace $tf/ # Guidance Automation Toolkit *.gpState # ReSharper is a .NET coding add-in _ReSharper*/ *.[Rr]e[Ss]harper *.DotSettings.user # TeamCity is a build add-in _TeamCity* # DotCover is a Code Coverage Tool *.dotCover # AxoCover is a Code Coverage Tool .axoCover/* !.axoCover/settings.json # Coverlet is a free, cross platform Code Coverage Tool coverage*.json coverage*.xml coverage*.info # Visual Studio code coverage results *.coverage *.coveragexml # NCrunch _NCrunch_* .*crunch*.local.xml nCrunchTemp_* # MightyMoose *.mm.* AutoTest.Net/ # Web workbench (sass) .sass-cache/ # Installshield output folder [Ee]xpress/ # DocProject is a documentation generator add-in DocProject/buildhelp/ DocProject/Help/*.HxT DocProject/Help/*.HxC DocProject/Help/*.hhc DocProject/Help/*.hhk DocProject/Help/*.hhp DocProject/Help/Html2 DocProject/Help/html # Click-Once directory publish/ # Publish Web Output *.[Pp]ublish.xml *.azurePubxml # Note: Comment the next line if you want to checkin your web deploy settings, # but database connection strings (with potential passwords) will be unencrypted *.pubxml *.publishproj # Microsoft Azure Web App publish settings. Comment the next line if you want to # checkin your Azure Web App publish settings, but sensitive information contained # in these scripts will be unencrypted PublishScripts/ # NuGet Packages *.nupkg # NuGet Symbol Packages *.snupkg # The packages folder can be ignored because of Package Restore **/[Pp]ackages/* # except build/, which is used as an MSBuild target. !**/[Pp]ackages/build/ # Uncomment if necessary however generally it will be regenerated when needed #!**/[Pp]ackages/repositories.config # NuGet v3's project.json files produces more ignorable files *.nuget.props *.nuget.targets # Microsoft Azure Build Output csx/ *.build.csdef # Microsoft Azure Emulator ecf/ rcf/ # Windows Store app package directories and files AppPackages/ BundleArtifacts/ Package.StoreAssociation.xml _pkginfo.txt *.appx *.appxbundle *.appxupload # Visual Studio cache files # files ending in .cache can be ignored *.[Cc]ache # but keep track of directories ending in .cache !?*.[Cc]ache/ # Others ClientBin/ ~$* *~ *.dbmdl *.dbproj.schemaview *.jfm *.pfx *.publishsettings orleans.codegen.cs # Including strong name files can present a security risk # (https://github.com/github/gitignore/pull/2483#issue-259490424) #*.snk # Since there are multiple workflows, uncomment next line to ignore bower_components # (https://github.com/github/gitignore/pull/1529#issuecomment-104372622) #bower_components/ # RIA/Silverlight projects Generated_Code/ # Backup & report files from converting an old project file # to a newer Visual Studio version. Backup files are not needed, # because we have git ;-) _UpgradeReport_Files/ Backup*/ UpgradeLog*.XML UpgradeLog*.htm ServiceFabricBackup/ *.rptproj.bak # SQL Server files *.mdf *.ldf *.ndf # Business Intelligence projects *.rdl.data *.bim.layout *.bim_*.settings *.rptproj.rsuser *- [Bb]ackup.rdl *- [Bb]ackup ([0-9]).rdl *- [Bb]ackup ([0-9][0-9]).rdl # Microsoft Fakes FakesAssemblies/ # GhostDoc plugin setting file *.GhostDoc.xml # Node.js Tools for Visual Studio .ntvs_analysis.dat node_modules/ # Visual Studio 6 build log *.plg # Visual Studio 6 workspace options file *.opt # Visual Studio 6 auto-generated workspace file (contains which files were open etc.) *.vbw # Visual Studio 6 auto-generated project file (contains which files were open etc.) *.vbp # Visual Studio 6 workspace and project file (working project files containing files to include in project) *.dsw *.dsp # Visual Studio 6 technical files # Visual Studio LightSwitch build output **/*.HTMLClient/GeneratedArtifacts **/*.DesktopClient/GeneratedArtifacts **/*.DesktopClient/ModelManifest.xml **/*.Server/GeneratedArtifacts **/*.Server/ModelManifest.xml _Pvt_Extensions # Paket dependency manager .paket/paket.exe paket-files/ # FAKE - F# Make .fake/ # CodeRush personal settings .cr/personal # Python Tools for Visual Studio (PTVS) *.pyc # Cake - Uncomment if you are using it # tools/** # !tools/packages.config # Tabs Studio *.tss # Telerik's JustMock configuration file *.jmconfig # BizTalk build output *.btp.cs *.btm.cs *.odx.cs *.xsd.cs # OpenCover UI analysis results OpenCover/ # Azure Stream Analytics local run output ASALocalRun/ # MSBuild Binary and Structured Log *.binlog # NVidia Nsight GPU debugger configuration file *.nvuser # MFractors (Xamarin productivity tool) working folder .mfractor/ # Local History for Visual Studio .localhistory/ # Visual Studio History (VSHistory) files .vshistory/ # BeatPulse healthcheck temp database healthchecksdb # Backup folder for Package Reference Convert tool in Visual Studio 2017 MigrationBackup/ # Ionide (cross platform F# VS Code tools) working folder .ionide/ # Fody - auto-generated XML schema FodyWeavers.xsd # VS Code files for those working on multiple tools *.code-workspace # Local History for Visual Studio Code # Windows Installer files from build outputs *.cab *.msi *.msix *.msm *.msp # JetBrains Rider *.sln.iml ### VisualStudio Patch ### # Additional files built by Visual Studio # End of # https://www.toptal.com/developers/gitignore/api/git,c++,cmake,python,visualstudio,visualstudiocode,macos # credentials .key *.pem .crt # deps from build-deps.sh deps/ # API docs are updated automatically by .github/workflows/docs.yml docs/aws-crt-python-0.20.4+dfsg/.gitmodules000066400000000000000000000027361456575232400176150ustar00rootroot00000000000000[submodule "aws-common-runtime/aws-c-common"] path = crt/aws-c-common url = https://github.com/awslabs/aws-c-common.git [submodule "aws-common-runtime/aws-c-io"] path = crt/aws-c-io url = https://github.com/awslabs/aws-c-io.git [submodule "aws-common-runtime/aws-c-mqtt"] path = crt/aws-c-mqtt url = https://github.com/awslabs/aws-c-mqtt.git [submodule "aws-common-runtime/s2n"] path = crt/s2n url = https://github.com/awslabs/s2n.git [submodule "aws-common-runtime/aws-c-cal"] path = crt/aws-c-cal url = https://github.com/awslabs/aws-c-cal.git [submodule "aws-common-runtime/aws-c-http"] path = crt/aws-c-http url = https://github.com/awslabs/aws-c-http.git [submodule "aws-common-runtime/aws-c-compression"] path = crt/aws-c-compression url = https://github.com/awslabs/aws-c-compression.git [submodule "aws-common-runtime/aws-c-auth"] path = crt/aws-c-auth url = https://github.com/awslabs/aws-c-auth.git [submodule "aws-common-runtime/aws-c-s3"] path = crt/aws-c-s3 url = https://github.com/awslabs/aws-c-s3.git [submodule "aws-common-runtime/aws-c-event-stream"] path = crt/aws-c-event-stream url = https://github.com/awslabs/aws-c-event-stream.git [submodule "aws-common-runtime/aws-checksums"] path = crt/aws-checksums url = https://github.com/awslabs/aws-checksums.git [submodule "crt/aws-lc"] path = crt/aws-lc url = https://github.com/awslabs/aws-lc.git [submodule "crt/aws-c-sdkutils"] path = crt/aws-c-sdkutils url = https://github.com/awslabs/aws-c-sdkutils.git aws-crt-python-0.20.4+dfsg/.lgtm.yml000066400000000000000000000010661456575232400171770ustar00rootroot00000000000000extraction: cpp: index: # not sure why cpp builds are using python 2, but this should stop it build_command: "python3 setup.py build" # add tags for folders and files that we don't want alerts about # LGTM already has defaults tagging folders like "test/", so we're just adding non-obvious things here path_classifiers: library: # ignore alerts in libraries that the Common Runtime team doesn't own - crt/s2n - crt/aws-lc test: - codebuild - continuous-delivery - elasticurl.py - mqtt_test.py - s3_benchmark.py aws-crt-python-0.20.4+dfsg/CODE_OF_CONDUCT.md000066400000000000000000000004671456575232400202360ustar00rootroot00000000000000## Code of Conduct This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact opensource-codeofconduct@amazon.com with any additional questions or comments. aws-crt-python-0.20.4+dfsg/CONTRIBUTING.md000066400000000000000000000075031456575232400176660ustar00rootroot00000000000000# Contributing Guidelines Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional documentation, we greatly value feedback and contributions from our community. Please read through this document before submitting any issues or pull requests to ensure we have all the necessary information to effectively respond to your bug report or contribution. ## Reporting Bugs/Feature Requests We welcome you to use the GitHub issue tracker to report bugs or suggest features. When filing an issue, please check [existing open](https://github.com/awslabs/aws-crt-python/issues), or [recently closed](https://github.com/awslabs/aws-crt-python/issues?utf8=%E2%9C%93&q=is%3Aissue%20is%3Aclosed%20), issues to make sure somebody else hasn't already reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: * A reproducible test case or series of steps * The version of our code being used * Any modifications you've made relevant to the bug * Anything unusual about your environment or deployment ## Contributing via Pull Requests Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: 1. You are working against the latest source on the *main* branch. 2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. 3. You open an issue to discuss any significant work - we would hate for your time to be wasted. To send us a pull request, please: 1. Fork the repository. 2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. 3. Ensure local tests pass. 4. Commit to your fork using clear commit messages. 5. Send us a pull request, answering any default questions in the pull request interface. 6. Wait for a repository collaborator to look at your pull request, run the automated tests, and review. If additional changes or discussion is needed, a collaborator will get back to you, so please stay involved in the conversation. * Note: pull requests from forks will not run the automated tests without collaborator involvement for security reasons. If you make a pull request and see that the tests are pending, this is normal and expected. GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and [creating a pull request](https://help.github.com/articles/creating-a-pull-request/). ## Finding contributions to work on Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any ['help wanted'](https://github.com/awslabs/aws-crt-python/labels/help%20wanted) issues is a great place to start. ## Code of Conduct This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact opensource-codeofconduct@amazon.com with any additional questions or comments. ## Security issue notifications If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. ## Licensing See the [LICENSE](https://github.com/awslabs/aws-crt-python/blob/main/LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. We may ask you to sign a [Contributor License Agreement (CLA)](http://en.wikipedia.org/wiki/Contributor_License_Agreement) for larger changes. aws-crt-python-0.20.4+dfsg/LICENSE000066400000000000000000000261361456575232400164450ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. aws-crt-python-0.20.4+dfsg/MANIFEST.in000066400000000000000000000022631456575232400171710ustar00rootroot00000000000000graft source graft crt global-exclude .git* global-exclude .git*/** global-exclude .travis* global-exclude .travis/** global-exclude codebuild/** global-exclude crt/*/verification/** global-exclude crt/*/docs/** global-exclude docker-images/** prune crt/**/AWSCRTAndroidTestRunner prune crt/aws-c-auth/tests/aws-sig-v4-test-suite prune crt/aws-c-auth/tests/fuzz/corpus prune crt/aws-c-cal/ecdsa-fuzz-corpus prune crt/aws-c-s3/benchmarks prune crt/s2n/tests # s2n's cmake relies on a some files under test/ for compile time feature tests graft crt/s2n/tests/features exclude crt/aws-lc/**/*test*.go exclude crt/aws-lc/**/*test*.json exclude crt/aws-lc/**/*test*.py exclude crt/aws-lc/**/*test*.txt prune crt/aws-lc/crypto/cipher_extra/test prune crt/aws-lc/fuzz prune crt/aws-lc/ssl prune crt/aws-lc/tests graft crt/aws-lc/tests/compiler_features_tests prune crt/aws-lc/third_party graft crt/aws-lc/third_party/fiat graft crt/aws-lc/third_party/s2n-bignum prune crt/aws-lc/tool prune crt/aws-lc/util include crt/aws-lc/util/fipstools/CMakeLists.txt include crt/aws-lc/util/fipstools/acvp/modulewrapper/CMakeLists.txt # by default only test/test*.py are included, include the entire test suite graft test aws-crt-python-0.20.4+dfsg/NOTICE000066400000000000000000000001751456575232400163370ustar00rootroot00000000000000AWS Crt Python Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: Apache-2.0. aws-crt-python-0.20.4+dfsg/README.md000066400000000000000000000052531456575232400167140ustar00rootroot00000000000000## AWS CRT Python [![Version](https://img.shields.io/pypi/v/awscrt.svg?style=flat)](https://pypi.org/project/awscrt/) Python 3 bindings for the AWS Common Runtime. * [API documentation](https://awslabs.github.io/aws-crt-python) * [Development guide](guides/dev/README.md) for contributors to aws-crt-python's source code. ## License This library is licensed under the Apache 2.0 License. ## Minimum Requirements: * Python 3.7+ ## Installation To install from pip: ```bash python3 -m pip install awscrt ``` To install from Github: ```bash git clone https://github.com/awslabs/aws-crt-python.git cd aws-crt-python git submodule update --init python3 -m pip install . ``` To use from your Python application, declare `awscrt` as a dependency in your `setup.py` file. ### OpenSSL and LibCrypto (Unix only) aws-crt-python does not use OpenSSL for TLS. On Apple and Windows devices, the OS's default TLS library is used. On Unix devices, [s2n-tls](https://github.com/aws/s2n-tls) is used. But s2n-tls uses libcrypto, the cryptography math library bundled with OpenSSL. To simplify installation, aws-crt-python has its own copy of libcrypto. This lets you install a wheel from PyPI without having OpenSSL installed. Unix wheels on PyPI come with libcrypto statically compiled in. Code to build libcrypto comes from [AWS-LC](https://github.com/aws/aws-lc). AWS-LC's code is included in the PyPI source package, and the git repository includes it as a submodule. If you need aws-crt-python to use the libcrypto included on your system, set environment variable `AWS_CRT_BUILD_USE_SYSTEM_LIBCRYPTO=1` while building from source: ```sh AWS_CRT_BUILD_USE_SYSTEM_LIBCRYPTO=1 python3 -m pip install --no-binary :all: --verbose awscrt ``` ( `--no-binary :all:` ensures you do not use the precompiled wheel from PyPI) You can ignore all this on Windows and Apple platforms, where aws-crt-python uses the OS's default libraries for TLS and cryptography math. ## Mac-Only TLS Behavior Please note that on Mac, once a private key is used with a certificate, that certificate-key pair is imported into the Mac Keychain. All subsequent uses of that certificate will use the stored private key and ignore anything passed in programmatically. Beginning in v0.6.2, when a stored private key from the Keychain is used, the following will be logged at the "info" log level: ``` static: certificate has an existing certificate-key pair that was previously imported into the Keychain. Using key from Keychain instead of the one provided. ``` ## Crash Handler You can enable the crash handler by setting the environment variable `AWS_CRT_CRASH_HANDLER=1`. This will print the callstack to `stderr` in the event of a fatal error. aws-crt-python-0.20.4+dfsg/awscrt/000077500000000000000000000000001456575232400167335ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/awscrt/__init__.py000066400000000000000000000021271456575232400210460ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. from weakref import WeakSet __all__ = [ 'auth', 'crypto', 'http', 'io', 'mqtt', 's3', 'websocket', ] __version__ = '1.0.0.dev0' class NativeResource: """ Base for classes that bind to a native type. _binding is a python capsule referencing the native object. Note to developers: If NativeResource B depends on the existence of NativeResource A, have B's native code Py_INCREF/DECREF A's python class. This ensures that A will not be destroyed before B. If we simply had python class B referencing A, and the GC decided to clean up both, it might destroy A before B. """ # For tracking live NativeResources in tests/debug. # Note that WeakSet can accurately report if 0 objects exist, but iteration isn't 100% thread-safe. _track_lifetime = False _living = WeakSet() __slots__ = ('_binding', '__weakref__') def __init__(self): if NativeResource._track_lifetime: NativeResource._living.add(self) aws-crt-python-0.20.4+dfsg/awscrt/_test.py000066400000000000000000000137401456575232400204300ustar00rootroot00000000000000""" Private utilities for testing """ import _awscrt from awscrt import NativeResource import gc import inspect import os import sys import time import types from awscrt.io import ClientBootstrap, DefaultHostResolver, EventLoopGroup def native_memory_usage() -> int: """ Returns number of bytes currently allocated by awscrt's native code. `AWS_CRT_MEMORY_TRACING `environment variable must be set before module is loaded, or 0 will always be returned. Legal values are: * `AWS_CRT_MEMORY_TRACING=0`: No tracing * `AWS_CRT_MEMORY_TRACING=1`: Only track allocation sizes and total allocated * `AWS_CRT_MEMORY_TRACING=2`: Capture callstacks for each allocation """ return _awscrt.native_memory_usage() def dump_native_memory(): """ If there are outstanding allocations from awscrt's native code, dump them to log, along with any information gathered based on the tracing level. In order to see the dump, logging must initialized at `LogLevel.Trace` and the `AWS_CRT_MEMORY_TRACING` environment variable must be non-zero when module is loaded. Legal values are: * `AWS_CRT_MEMORY_TRACING=0`: No tracing * `AWS_CRT_MEMORY_TRACING=1`: Only track allocation sizes and total allocated * `AWS_CRT_MEMORY_TRACING=2`: Capture callstacks for each allocation """ return _awscrt.native_memory_dump() def join_all_native_threads(*, timeout_sec: float = -1.0) -> bool: """ Waits for all native threads to complete their join call. This can only be safely called from the main thread. This call may be required for native memory usage to reach zero. Args: timeout_sec (float): Number of seconds to wait before a timeout exception is raised. By default the wait is unbounded. Returns: bool: Returns whether threads could be joined before the timeout. """ return _awscrt.thread_join_all_managed(timeout_sec) def check_for_leaks(*, timeout_sec=10.0): """ Checks that all awscrt resources have been freed after a test. If any resources still exist, debugging info is printed and an exception is raised. Requirements: * `awscrt.NativeResource._track_lifetime = True`: must be set before test begins to ensure accurate tracking. * `AWS_CRT_MEMORY_TRACING=2`: environment variable that must be set before any awscrt modules are imported, to ensure accurate native leak checks. * `AWS_CRT_MEMORY_PRINT_SECRETS_OK=1`: optional environment variable that will print the full contents of leaked python objects. DO NOT SET THIS if the test results will be made public as it may result in secrets being leaked. """ ClientBootstrap.release_static_default() EventLoopGroup.release_static_default() DefaultHostResolver.release_static_default() if os.getenv('AWS_CRT_MEMORY_TRACING') != '2': raise RuntimeError("environment variable AWS_CRT_MEMORY_TRACING=2 must be set for accurate leak checks") if not NativeResource._track_lifetime: raise RuntimeError("awscrt.NativeResource._track_lifetime=True must be set for accurate leak checks") # Native resources might need a few more ticks to finish cleaning themselves up. wait_until = time.time() + timeout_sec while time.time() < wait_until: if not NativeResource._living and not native_memory_usage() > 0: return gc.collect() # join_all_native_threads() is sometimes required to get mem usage to 0 join_all_native_threads(timeout_sec=0.1) time.sleep(0.1) # Print out debugging info on leaking resources num_living_resources = len(NativeResource._living) if num_living_resources: leak_secrets_ok = os.getenv('AWS_CRT_MEMORY_PRINT_SECRETS_OK') == '1' if leak_secrets_ok: print("Leaking NativeResources:") else: print("Leaking NativeResources (set AWS_CRT_MEMORY_PRINT_SECRETS_OK=1 env var for more detailed report):") def _printobj(prefix, obj): # be sure not to accidentally print a dictionary with a password in it if leak_secrets_ok: s = str(obj) if len(s) > 1000: s = s[:1000] + '...TRUNCATED PRINT' print(prefix, s) else: print(prefix, type(obj)) for i in NativeResource._living: _printobj('-', i) # getrefcount(i) returns 4+ here, but 2 of those are due to debugging. # Don't show: # - 1 for WeakSet iterator due to this for-loop. # - 1 for getrefcount(i)'s reference. # But do show: # - 1 for item's self-reference. # - the rest are what's causing this leak. refcount = sys.getrefcount(i) - 2 # Gather list of referrers, but don't show those created by the act of iterating the WeakSet referrers = [] for r in gc.get_referrers(i): if isinstance(r, types.FrameType): frameinfo = inspect.getframeinfo(r) our_fault = (frameinfo.filename.endswith('_weakrefset.py') or frameinfo.filename.endswith('awscrt/_test.py')) if our_fault: continue referrers.append(r) print(' sys.getrefcount():', refcount) print(' gc.referrers():', len(referrers)) for r in referrers: if isinstance(r, types.FrameType): _printobj(' -', inspect.getframeinfo(r)) else: _printobj(' -', r) mem_bytes = native_memory_usage() if mem_bytes > 0: print('Leaking {} bytes native memory (enable Trace logging to see more)'.format(mem_bytes)) dump_native_memory() raise RuntimeError("awscrt leak check failed. {} NativeResource objects. {} bytes native memory".format( num_living_resources, mem_bytes)) aws-crt-python-0.20.4+dfsg/awscrt/auth.py000066400000000000000000000755351456575232400202650ustar00rootroot00000000000000""" AWS client-side authentication: standard credentials providers and signing. """ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. import _awscrt from awscrt import NativeResource import awscrt.exceptions from awscrt.http import HttpRequest, HttpProxyOptions from awscrt.io import ClientBootstrap, ClientTlsContext from concurrent.futures import Future import datetime from enum import IntEnum from typing import Optional, Sequence, Tuple class AwsCredentials(NativeResource): """ AwsCredentials are the public/private data needed to sign an authenticated AWS request. AwsCredentials are immutable. Args: access_key_id (str): Access key ID secret_access_key (str): Secret access key session_token (Optional[str]): Optional security token associated with the credentials. expiration (Optional[datetime.datetime]): Optional expiration datetime, that the credentials will no longer be valid past. Converted to UTC timezone and rounded down to nearest second. If not set, then credentials do not expire. Attributes: access_key_id (str): Access key ID secret_access_key (str): Secret access key session_token (Optional[str]): Security token associated with the credentials. None if not set. expiration (Optional[datetime.datetime]): Expiration datetime, that the credentials will no longer be valid past. None if credentials do not expire. Timezone is always UTC. """ __slots__ = () # C layer uses UINT64_MAX as timestamp for non-expiring credentials _NONEXPIRING_TIMESTAMP = 0xFFFFFFFFFFFFFFFF def __init__(self, access_key_id, secret_access_key, session_token=None, expiration=None): assert isinstance(access_key_id, str) assert isinstance(secret_access_key, str) assert isinstance(session_token, str) or session_token is None # C layer uses large int as timestamp for non-expiring credentials if expiration is None: expiration_timestamp = self._NONEXPIRING_TIMESTAMP else: expiration_timestamp = int(expiration.timestamp()) if expiration_timestamp < 0 or expiration_timestamp >= self._NONEXPIRING_TIMESTAMP: raise OverflowError("expiration datetime out of range") super().__init__() self._binding = _awscrt.credentials_new( access_key_id, secret_access_key, session_token, expiration_timestamp) @classmethod def _from_binding(cls, binding): """Construct from a pre-existing native object""" credentials = cls.__new__(cls) # avoid class's default constructor super(cls, credentials).__init__() # just invoke parent class's __init__() credentials._binding = binding return credentials @property def access_key_id(self): return _awscrt.credentials_access_key_id(self._binding) @property def secret_access_key(self): return _awscrt.credentials_secret_access_key(self._binding) @property def session_token(self): return _awscrt.credentials_session_token(self._binding) @property def expiration(self): timestamp = _awscrt.credentials_expiration_timestamp_seconds(self._binding) # C layer uses large int as timestamp for non-expiring credentials if timestamp == self._NONEXPIRING_TIMESTAMP: return None else: return datetime.datetime.fromtimestamp(timestamp, tz=datetime.timezone.utc) def __deepcopy__(self, memo): # AwsCredentials is immutable, so just return self. return self class AwsCredentialsProviderBase(NativeResource): # Pointless base class, kept for backwards compatibility. # AwsCredentialsProvider is (and always will be) the only subclass. # # Originally created with the thought that, when we supported # custom python providers, they would inherit from this class. # We ended up supporting custom python providers via # AwsCredentialsProvider.new_delegate() instead. pass class AwsCredentialsProvider(AwsCredentialsProviderBase): """ Credentials providers source the AwsCredentials needed to sign an authenticated AWS request. This class provides `new_X()` functions for several built-in provider types. To define a custom provider, use the :meth:`new_delegate()` function. """ __slots__ = () def __init__(self, binding): super().__init__() self._binding = binding @classmethod def new_default_chain(cls, client_bootstrap=None): """ Create the default provider chain used by most AWS SDKs. Generally: 1. Environment 2. Profile 3. (conditional, off by default) ECS 4. (conditional, on by default) EC2 Instance Metadata Args: client_bootstrap (Optional[ClientBootstrap]): Client bootstrap to use when initiating socket connection. If not set, uses the default static ClientBootstrap instead. Returns: AwsCredentialsProvider: """ assert isinstance(client_bootstrap, ClientBootstrap) or client_bootstrap is None if client_bootstrap is None: client_bootstrap = ClientBootstrap.get_or_create_static_default() binding = _awscrt.credentials_provider_new_chain_default(client_bootstrap) return cls(binding) @classmethod def new_static(cls, access_key_id, secret_access_key, session_token=None): """ Create a simple provider that just returns a fixed set of credentials. Args: access_key_id (str): Access key ID secret_access_key (str): Secret access key session_token (Optional[str]): Optional session token Returns: AwsCredentialsProvider: """ assert isinstance(access_key_id, str) assert isinstance(secret_access_key, str) assert isinstance(session_token, str) or session_token is None binding = _awscrt.credentials_provider_new_static(access_key_id, secret_access_key, session_token) return cls(binding) @classmethod def new_profile( cls, client_bootstrap=None, profile_name=None, config_filepath=None, credentials_filepath=None): """ Creates a provider that sources credentials from key-value profiles loaded from the aws credentials file. Args: client_bootstrap (Optional[ClientBootstrap]): Client bootstrap to use when initiating socket connection. If not set, uses the static default ClientBootstrap instead. profile_name (Optional[str]): Name of profile to use. If not set, uses value from AWS_PROFILE environment variable. If that is not set, uses value of "default" config_filepath (Optional[str]): Path to profile config file. If not set, uses value from AWS_CONFIG_FILE environment variable. If that is not set, uses value of "~/.aws/config" credentials_filepath (Optional[str]): Path to profile credentials file. If not set, uses value from AWS_SHARED_CREDENTIALS_FILE environment variable. If that is not set, uses value of "~/.aws/credentials" Returns: AwsCredentialsProvider: """ assert isinstance(client_bootstrap, ClientBootstrap) or client_bootstrap is None assert isinstance(profile_name, str) or profile_name is None assert isinstance(config_filepath, str) or config_filepath is None assert isinstance(credentials_filepath, str) or credentials_filepath is None if client_bootstrap is None: client_bootstrap = ClientBootstrap.get_or_create_static_default() binding = _awscrt.credentials_provider_new_profile( client_bootstrap, profile_name, config_filepath, credentials_filepath) return cls(binding) @classmethod def new_process(cls, profile_to_use=None): """ Creates a provider that sources credentials from running an external command or process. The command to run is sourced from a profile in the AWS config file, using the standard profile selection rules. The profile key the command is read from is "credential_process." Example:: [default] credential_process=/opt/amazon/bin/my-credential-fetcher --argsA=abc On successfully running the command, the output should be a json data with the following format:: { "Version": 1, "AccessKeyId": "accesskey", "SecretAccessKey": "secretAccessKey" "SessionToken": "....", "Expiration": "2019-05-29T00:21:43Z" } Version here identifies the command output format version. This provider is not part of the default provider chain. Args: profile_to_use (Optional[str]): Name of profile in which to look for credential_process. If not set, uses value from AWS_PROFILE environment variable. If that is not set, uses value of "default" Returns: AwsCredentialsProvider: """ binding = _awscrt.credentials_provider_new_process(profile_to_use) return cls(binding) @classmethod def new_environment(cls): """ Creates a provider that returns credentials sourced from environment variables. * AWS_ACCESS_KEY_ID * AWS_SECRET_ACCESS_KEY * AWS_SESSION_TOKEN Returns: AwsCredentialsProvider: """ binding = _awscrt.credentials_provider_new_environment() return cls(binding) @classmethod def new_chain(cls, providers): """ Creates a provider that sources credentials from an ordered sequence of providers. This provider uses the first set of credentials successfully queried. Providers are queried one at a time; a provider is not queried until the preceding provider has failed to source credentials. Args: providers (List[AwsCredentialsProvider]): List of credentials providers. Returns: AwsCredentialsProvider: """ binding = _awscrt.credentials_provider_new_chain(providers) return cls(binding) @classmethod def new_delegate(cls, get_credentials): """ Creates a provider that sources credentials from a custom synchronous callback. Args: get_credentials: Callable which takes no arguments and returns :class:`AwsCredentials`. Returns: AwsCredentialsProvider: """ # TODO: support async delegates assert callable(get_credentials) binding = _awscrt.credentials_provider_new_delegate(get_credentials) return cls(binding) @classmethod def new_cognito( cls, *, endpoint: str, identity: str, tls_ctx: awscrt.io.ClientTlsContext, logins: Optional[Sequence[Tuple[str, str]]] = None, custom_role_arn: Optional[str] = None, client_bootstrap: Optional[ClientBootstrap] = None, http_proxy_options: Optional[HttpProxyOptions] = None): """ Creates a provider that sources credentials from the AWS Cognito Identity service. Args: endpoint (str): Cognito Identity service regional endpoint to source credentials from. identity (str): Cognito identity to fetch credentials relative to. tls_ctx (ClientTlsContext): Client TLS context to use when querying cognito credentials by HTTP. logins (Optional[Sequence[tuple[str, str]]]): Sequence of tuples specifying pairs of identity provider name and token values, representing established login contexts for identity authentication purposes. custom_role_arn (Optional[str]): ARN of the role to be assumed when multiple roles were received in the token from the identity provider. client_bootstrap (Optional[ClientBootstrap]): Client bootstrap to use when initiating a socket connection. If not set, uses the static default ClientBootstrap instead. http_proxy_options (Optional[HttpProxyOptions]): Optional HTTP proxy options. If None is provided then an HTTP proxy is not used. Returns: AwsCredentialsProvider: """ assert isinstance(endpoint, str) assert isinstance(identity, str) assert isinstance(tls_ctx, ClientTlsContext) assert isinstance(custom_role_arn, str) or custom_role_arn is None assert isinstance(http_proxy_options, HttpProxyOptions) or http_proxy_options is None if client_bootstrap is None: client_bootstrap = ClientBootstrap.get_or_create_static_default() assert isinstance(client_bootstrap, ClientBootstrap) binding = _awscrt.credentials_provider_new_cognito( endpoint, identity, tls_ctx, client_bootstrap, logins, custom_role_arn, http_proxy_options) return cls(binding) @classmethod def new_x509( cls, *, endpoint: str, thing_name: str, role_alias: str, tls_ctx: awscrt.io.ClientTlsContext, client_bootstrap: Optional[ClientBootstrap] = None, http_proxy_options: Optional[HttpProxyOptions] = None): """ Creates a provider that sources credentials from IoT's X509 credentials service. Args: endpoint (str): X509 service regional endpoint to source credentials from. This is a per-account value that can be determined via the CLI: `aws iot describe-endpoint --endpoint-type iot:CredentialProvider` thing_name (str): The name of the IoT thing to use to fetch credentials. role_alias (str): The name of the role alias to fetch credentials through. tls_ctx (ClientTlsContext): The client TLS context to use when establishing the http connection to IoT's X509 credentials service. client_bootstrap (Optional[ClientBootstrap]): Client bootstrap to use when initiating a socket connection. If not set, uses the static default ClientBootstrap instead. http_proxy_options (Optional[HttpProxyOptions]): Optional HTTP proxy options. If None is provided then an HTTP proxy is not used. Returns: AwsCredentialsProvider: """ assert isinstance(endpoint, str) assert isinstance(thing_name, str) assert isinstance(role_alias, str) assert isinstance(tls_ctx, ClientTlsContext) assert isinstance(http_proxy_options, HttpProxyOptions) or http_proxy_options is None if client_bootstrap is None: client_bootstrap = ClientBootstrap.get_or_create_static_default() assert isinstance(client_bootstrap, ClientBootstrap) binding = _awscrt.credentials_provider_new_x509( endpoint, thing_name, role_alias, tls_ctx, client_bootstrap, http_proxy_options) return cls(binding) def get_credentials(self): """ Asynchronously fetch AwsCredentials. Returns: concurrent.futures.Future: A Future which will contain :class:`AwsCredentials` (or an exception) when the operation completes. The operation may complete on a different thread. """ future = Future() def _on_complete(error_code, binding): try: if error_code: future.set_exception(awscrt.exceptions.from_code(error_code)) else: credentials = AwsCredentials._from_binding(binding) future.set_result(credentials) except Exception as e: future.set_exception(e) try: _awscrt.credentials_provider_get_credentials(self._binding, _on_complete) except Exception as e: future.set_exception(e) return future class AwsSigningAlgorithm(IntEnum): """AWS signing algorithm enumeration.""" V4 = 0 """Signature Version 4""" V4_ASYMMETRIC = 1 """Signature Version 4 - Asymmetric""" V4_S3EXPRESS = 2 """Signature Version 4 - S3 Express""" class AwsSignatureType(IntEnum): """Which sort of signature should be computed from the signable.""" HTTP_REQUEST_HEADERS = 0 """ A signature for a full HTTP request should be computed, with header updates applied to the signing result. """ HTTP_REQUEST_QUERY_PARAMS = 1 """ A signature for a full HTTP request should be computed, with query param updates applied to the signing result. """ class AwsSignedBodyValue: """ Values for use with :attr:`AwsSigningConfig.signed_body_value`. Some services use special values (e.g. "UNSIGNED-PAYLOAD") when the body is not being signed in the usual way. """ EMPTY_SHA256 = 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' """The SHA-256 of the empty string.""" UNSIGNED_PAYLOAD = 'UNSIGNED-PAYLOAD' """Unsigned payload option (not accepted by all services)""" STREAMING_AWS4_HMAC_SHA256_PAYLOAD = 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD' """Each payload chunk will be signed (not accepted by all services)""" STREAMING_AWS4_HMAC_SHA256_EVENTS = 'STREAMING-AWS4-HMAC-SHA256-EVENTS' """Each event will be signed (not accepted by all services)""" class AwsSignedBodyHeaderType(IntEnum): """ Controls if signing adds a header containing the canonical request's signed body value. See :attr:`AwsSigningConfig.signed_body_value`. """ NONE = 0 """Do not add a header.""" X_AMZ_CONTENT_SHA_256 = 1 """Add the "x-amz-content-sha-256" header with the canonical request's signed body value""" class AwsSigningConfig(NativeResource): """ Configuration for use in AWS-related signing. AwsSigningConfig is immutable. It is good practice to use a new config for each signature, or the date might get too old. Args: algorithm (AwsSigningAlgorithm): Which signing algorithm to use. signature_type (AwsSignatureType): Which sort of signature should be computed from the signable. credentials_provider (AwsCredentialsProvider): Credentials provider to fetch signing credentials with. If the algorithm is :attr:`AwsSigningAlgorithm.V4_ASYMMETRIC`, ECC-based credentials will be derived from the fetched credentials. region (str): If the algorithm is :attr:`AwsSigningAlgorithm.V4`, the region to sign against. If the algorithm is :attr:`AwsSigningAlgorithm.V4_ASYMMETRIC`, the value of the "X-amzn-region-set" header (added in signing). service (str): Name of service to sign a request for. date (Optional[datetime.datetime]): Date and time to use during the signing process. If None is provided then `datetime.datetime.now(datetime.timezone.utc)` is used. Naive dates (lacking timezone info) are assumed to be in local time. should_sign_header (Optional[Callable[[str], bool]]): Optional function to control which headers are a part of the canonical request. Skipping auth-required headers will result in an unusable signature. Headers injected by the signing process are not skippable. This function does not override the internal check function (x-amzn-trace-id, user-agent), but rather supplements it. In particular, a header will get signed if and only if it returns true to both the internal check (skips x-amzn-trace-id, user-agent) and this function (if defined). use_double_uri_encode (bool): Whether to double-encode the resource path when constructing the canonical request (assuming the path is already encoded). Default is True. All services except S3 use double encoding. should_normalize_uri_path (bool): Whether the resource paths are normalized when building the canonical request. Default is True. signed_body_value (Optional[str]): If set, this value is used as the canonical request's body value. Typically, this is the SHA-256 of the payload, written as lowercase hex. If this has been precalculated, it can be set here. Special values used by certain services can also be set (see :class:`AwsSignedBodyValue`). If `None` is passed (the default), the typical value will be calculated from the payload during signing. signed_body_header_type (AwsSignedBodyHeaderType): Controls if signing adds a header containing the canonical request's signed body value. Default is to not add a header. expiration_in_seconds (Optional[int]): If set, and signature_type is :attr:`AwsSignatureType.HTTP_REQUEST_QUERY_PARAMS`, then signing will add "X-Amz-Expires" to the query string, equal to the value specified here. omit_session_token (bool): If set True, the "X-Amz-Security-Token" query param is omitted from the canonical request. The default False should be used for most services. """ __slots__ = ('_priv_should_sign_cb') _attributes = ( 'algorithm', 'signature_type', 'credentials_provider', 'region', 'service', 'date', 'should_sign_header', 'use_double_uri_encode', 'should_normalize_uri_path', 'signed_body_value', 'signed_body_header_type', 'expiration_in_seconds', 'omit_session_token', ) def __init__(self, algorithm=AwsSigningAlgorithm.V4, signature_type=AwsSignatureType.HTTP_REQUEST_HEADERS, credentials_provider=None, region="", service="", date=None, should_sign_header=None, use_double_uri_encode=True, should_normalize_uri_path=True, signed_body_value=None, signed_body_header_type=AwsSignedBodyHeaderType.NONE, expiration_in_seconds=None, omit_session_token=False, ): assert isinstance(algorithm, AwsSigningAlgorithm) assert isinstance(signature_type, AwsSignatureType) assert isinstance(credentials_provider, AwsCredentialsProvider) or credentials_provider is None assert isinstance(region, str) assert isinstance(service, str) assert callable(should_sign_header) or should_sign_header is None assert signed_body_value is None or (isinstance(signed_body_value, str) and len(signed_body_value) > 0) assert isinstance(signed_body_header_type, AwsSignedBodyHeaderType) assert expiration_in_seconds is None or expiration_in_seconds > 0 super().__init__() if date is None: date = datetime.datetime.now(datetime.timezone.utc) timestamp = date.timestamp() self._priv_should_sign_cb = should_sign_header if should_sign_header is not None: def should_sign_header_wrapper(name): return should_sign_header(name=name) else: should_sign_header_wrapper = None if expiration_in_seconds is None: # C layer uses 0 to indicate None expiration_in_seconds = 0 self._binding = _awscrt.signing_config_new( algorithm, signature_type, credentials_provider, region, service, date, timestamp, should_sign_header_wrapper, use_double_uri_encode, should_normalize_uri_path, signed_body_value, signed_body_header_type, expiration_in_seconds, omit_session_token) def replace(self, **kwargs): """ Return an AwsSigningConfig with the same attributes, except for those attributes given new values by whichever keyword arguments are specified. """ args = {x: kwargs.get(x, getattr(self, x)) for x in AwsSigningConfig._attributes} return AwsSigningConfig(**args) @property def algorithm(self): """AwsSigningAlgorithm: Which signing algorithm to use""" return AwsSigningAlgorithm(_awscrt.signing_config_get_algorithm(self._binding)) @property def signature_type(self): """AwsSignatureType: Which sort of signature should be computed from the signable.""" return AwsSignatureType(_awscrt.signing_config_get_signature_type(self._binding)) @property def credentials_provider(self): """ AwsCredentialsProvider: Credentials provider to fetch signing credentials with. If the algorithm is :attr:`AwsSigningAlgorithm.V4_ASYMMETRIC`, ECC-based credentials will be derived from the fetched credentials. """ return _awscrt.signing_config_get_credentials_provider(self._binding) @property def region(self): """ str: If signing algorithm is :attr:`AwsSigningAlgorithm.V4`, the region to sign against. If the algorithm is :attr:`AwsSigningAlgorithm.V4_ASYMMETRIC`, the value of the "X-amzn-region-set header" (added in signing). """ return _awscrt.signing_config_get_region(self._binding) @property def service(self): """str: Name of service to sign a request for""" return _awscrt.signing_config_get_service(self._binding) @property def date(self): """ datetime.datetime: Date and time to use during the signing process. If None is provided, then `datetime.datetime.now(datetime.timezone.utc)` at time of object construction is used. It is good practice to use a new config for each signature, or the date might get too old. """ return _awscrt.signing_config_get_date(self._binding) @property def should_sign_header(self): """ Optional[Callable[[str], bool]]: Optional function to control which headers are a part of the canonical request. Skipping auth-required headers will result in an unusable signature. Headers injected by the signing process are not skippable. This function does not override the internal check function (x-amzn-trace-id, user-agent), but rather supplements it. In particular, a header will get signed if and only if it returns true to both the internal check (skips x-amzn-trace-id, user-agent) and this function (if defined). """ return self._priv_should_sign_cb @property def use_double_uri_encode(self): """ bool: Whether to double-encode the resource path when constructing the canonical request (assuming the path is already encoded). By default, all services except S3 use double encoding. """ return _awscrt.signing_config_get_use_double_uri_encode(self._binding) @property def should_normalize_uri_path(self): """ bool: Whether the resource paths are normalized when building the canonical request. """ return _awscrt.signing_config_get_should_normalize_uri_path(self._binding) @property def signed_body_value(self): """ Optional[str]: What to use as the canonical request's body value. If `None` is set (the default), a value will be calculated from the payload during signing. Typically, this is the SHA-256 of the payload, written as lowercase hex. If this has been precalculated, it can be set here. Special values used by certain services can also be set (see :class:`AwsSignedBodyValue`). """ return _awscrt.signing_config_get_signed_body_value(self._binding) @property def signed_body_header_type(self): """ AwsSignedBodyHeaderType: Controls if signing adds a header containing the canonical request's signed body value. """ return AwsSignedBodyHeaderType(_awscrt.signing_config_get_signed_body_header_type(self._binding)) @property def expiration_in_seconds(self): """ Optional[int]: If set, and signature_type is :attr:`AwsSignatureType.HTTP_REQUEST_QUERY_PARAMS`, then signing will add "X-Amz-Expires" to the query string, equal to the value specified here. Otherwise, this is None has no effect. """ expiration = _awscrt.signing_config_get_expiration_in_seconds(self._binding) # C layer uses 0 to indicate None return None if expiration == 0 else expiration @property def omit_session_token(self): """ bool: Whether the "X-Amz-Security-Token" query param is omitted from the canonical request. This should be False for most services. """ return _awscrt.signing_config_get_omit_session_token(self._binding) def aws_sign_request(http_request, signing_config): """ Perform AWS HTTP request signing. The :class:`awscrt.http.HttpRequest` is transformed asynchronously, according to the :class:`AwsSigningConfig`. When signing: 1. It is good practice to use a new config for each signature, or the date might get too old. 2. Do not add the following headers to requests before signing, they may be added by the signer: x-amz-content-sha256, X-Amz-Date, Authorization 3. Do not add the following query params to requests before signing, they may be added by the signer: X-Amz-Signature, X-Amz-Date, X-Amz-Credential, X-Amz-Algorithm, X-Amz-SignedHeaders Args: http_request (awscrt.http.HttpRequest): The HTTP request to sign. signing_config (AwsSigningConfig): Configuration for signing. Returns: concurrent.futures.Future: A Future whose result will be the signed :class:`awscrt.http.HttpRequest`. The future will contain an exception if the signing process fails. """ assert isinstance(http_request, HttpRequest) assert isinstance(signing_config, AwsSigningConfig) future = Future() def _on_complete(error_code): try: if error_code: future.set_exception(awscrt.exceptions.from_code(error_code)) else: future.set_result(http_request) except Exception as e: future.set_exception(e) _awscrt.sign_request_aws(http_request, signing_config, _on_complete) return future aws-crt-python-0.20.4+dfsg/awscrt/checksums.py000066400000000000000000000013451456575232400212750ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. import _awscrt def crc32(input: bytes, previous_crc32: int = 0) -> int: """ Perform a CRC32 (Ethernet, gzip) computation. If continuing to update a running CRC, pass its value into `previous_crc32`. Returns an unsigned 32-bit integer. """ return _awscrt.checksums_crc32(input, previous_crc32) def crc32c(input: bytes, previous_crc32c: int = 0) -> int: """ Perform a Castagnoli CRC32c (iSCSI) computation. If continuing to update a running CRC, pass its value into `previous_crc32c`. Returns an unsigned 32-bit integer. """ return _awscrt.checksums_crc32c(input, previous_crc32c) aws-crt-python-0.20.4+dfsg/awscrt/common.py000066400000000000000000000007031456575232400205750ustar00rootroot00000000000000""" Cross-platform library for `awscrt`. """ import _awscrt def get_cpu_group_count() -> int: """ Returns number of processor groups on the system. Useful for working with non-uniform memory access (NUMA) nodes. """ return _awscrt.get_cpu_group_count() def get_cpu_count_for_group(group_idx: int) -> int: """ Returns number of processors in a given group. """ return _awscrt.get_cpu_count_for_group(group_idx) aws-crt-python-0.20.4+dfsg/awscrt/crypto.py000066400000000000000000000104271456575232400206310ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. import _awscrt from awscrt import NativeResource from typing import Union from enum import IntEnum class Hash: def __init__(self, native_handle): """ don't call me, I'm private """ self._hash = native_handle @staticmethod def sha1_new(): """ Creates a new instance of Hash, using the sha1 algorithm """ return Hash(native_handle=_awscrt.sha1_new()) @staticmethod def sha256_new(): """ Creates a new instance of Hash, using the sha256 algorithm """ return Hash(native_handle=_awscrt.sha256_new()) @staticmethod def md5_new(): """ Creates a new instance of Hash, using the md5 algorithm. """ return Hash(native_handle=_awscrt.md5_new()) def update(self, to_hash): _awscrt.hash_update(self._hash, to_hash) def digest(self, truncate_to=0): return _awscrt.hash_digest(self._hash, truncate_to) class HMAC: def __init__(self, native_handle): """ don't call me, I'm private """ self._hmac = native_handle @staticmethod def sha256_hmac_new(secret_key): """ Creates a new instance of HMAC, using SHA256 HMAC as the algorithm and secret_key as the secret """ return HMAC(native_handle=_awscrt.sha256_hmac_new(secret_key)) def update(self, to_hmac): _awscrt.hmac_update(self._hmac, to_hmac) def digest(self, truncate_to=0): return _awscrt.hmac_digest(self._hmac, truncate_to) class RSAEncryptionAlgorithm(IntEnum): """RSA Encryption Algorithm""" PKCS1_5 = 0 """ PKCSv1.5 padding """ OAEP_SHA256 = 1 """ OAEP padding with sha256 hash function """ OAEP_SHA512 = 2 """ OAEP padding with sha512 hash function """ class RSASignatureAlgorithm(IntEnum): """RSA Encryption Algorithm""" PKCS1_5_SHA256 = 0 """ PKCSv1.5 padding with sha256 hash function """ PSS_SHA256 = 1 """ PSS padding with sha256 hash function """ class RSA(NativeResource): def __init__(self, binding): super().__init__() self._binding = binding @staticmethod def new_private_key_from_pem_data(pem_data: Union[str, bytes, bytearray, memoryview]) -> 'RSA': """ Creates a new instance of private RSA key pair from pem data. Raises ValueError if pem does not have private key object. """ return RSA(binding=_awscrt.rsa_private_key_from_pem_data(pem_data)) @staticmethod def new_public_key_from_pem_data(pem_data: Union[str, bytes, bytearray, memoryview]) -> 'RSA': """ Creates a new instance of public RSA key pair from pem data. Raises ValueError if pem does not have public key object. """ return RSA(binding=_awscrt.rsa_public_key_from_pem_data(pem_data)) def encrypt(self, encryption_algorithm: RSAEncryptionAlgorithm, plaintext: Union[bytes, bytearray, memoryview]) -> bytes: """ Encrypts data using a given algorithm. """ return _awscrt.rsa_encrypt(self._binding, encryption_algorithm, plaintext) def decrypt(self, encryption_algorithm: RSAEncryptionAlgorithm, ciphertext: Union[bytes, bytearray, memoryview]) -> bytes: """ Decrypts data using a given algorithm. """ return _awscrt.rsa_decrypt(self._binding, encryption_algorithm, ciphertext) def sign(self, signature_algorithm: RSASignatureAlgorithm, digest: Union[bytes, bytearray, memoryview]) -> bytes: """ Signs data using a given algorithm. Note: function expects digest of the message, ex sha256 """ return _awscrt.rsa_sign(self._binding, signature_algorithm, digest) def verify(self, signature_algorithm: RSASignatureAlgorithm, digest: Union[bytes, bytearray, memoryview], signature: Union[bytes, bytearray, memoryview]) -> bool: """ Verifies signature against digest. Returns True if signature matches and False if not. """ return _awscrt.rsa_verify(self._binding, signature_algorithm, digest, signature) aws-crt-python-0.20.4+dfsg/awscrt/eventstream/000077500000000000000000000000001456575232400212705ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/awscrt/eventstream/__init__.py000066400000000000000000000212371456575232400234060ustar00rootroot00000000000000""" event-stream library for `awscrt`. """ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. from collections.abc import ByteString from enum import IntEnum from typing import Any from uuid import UUID __all__ = ['HeaderType', 'Header'] _BYTE_MIN = -2**7 _BYTE_MAX = 2**7 - 1 _INT16_MIN = -2**15 _INT16_MAX = 2**15 - 1 _INT32_MIN = -2**31 _INT32_MAX = 2**31 - 1 _INT64_MIN = -2**63 _INT64_MAX = 2**63 - 1 class HeaderType(IntEnum): """Supported types for the value within a Header""" BOOL_TRUE = 0 """Value is True. No actual value is transmitted on the wire.""" BOOL_FALSE = 1 """Value is False. No actual value is transmitted on the wire.""" BYTE = 2 """Value is signed 8-bit int.""" INT16 = 3 """Value is signed 16-bit int.""" INT32 = 4 """Value is signed 32-bit int.""" INT64 = 5 """Value is signed 64-bit int.""" BYTE_BUF = 6 """Value is raw bytes.""" STRING = 7 """Value is a str. Transmitted on the wire as utf-8""" TIMESTAMP = 8 """Value is a posix timestamp (seconds since Unix epoch). Transmitted on the wire as a 64-bit int""" UUID = 9 """Value is a UUID. Transmitted on the wire as 16 bytes""" def __format__(self, format_spec): # override so formatted string doesn't simply look like an int return str(self) class Header: """A header in an event-stream message. Each header has a name, value, and type. :class:`HeaderType` enumerates the supported value types. Create a header with one of the Header.from_X() functions. """ def __init__(self, name: str, value: Any, header_type: HeaderType): # do not call directly, use Header.from_xyz() methods. self._name = name self._value = value self._type = header_type @classmethod def from_bool(cls, name: str, value: bool) -> 'Header': """Create a Header of type :attr:`~HeaderType.BOOL_TRUE` or :attr:`~HeaderType.BOOL_FALSE`""" if value: return cls(name, True, HeaderType.BOOL_TRUE) else: return cls(name, False, HeaderType.BOOL_FALSE) @classmethod def from_byte(cls, name: str, value: int) -> 'Header': """Create a Header of type :attr:`~HeaderType.BYTE` The value must fit in an 8-bit signed int""" value = int(value) if value < _BYTE_MIN or value > _BYTE_MAX: raise ValueError("Value {} cannot fit in signed 8-bit byte".format(value)) return cls(name, value, HeaderType.BYTE) @classmethod def from_int16(cls, name: str, value: int) -> 'Header': """Create a Header of type :attr:`~HeaderType.INT16` The value must fit in an 16-bit signed int""" value = int(value) if value < _INT16_MIN or value > _INT16_MAX: raise ValueError("Value {} cannot fit in signed 16-bit int".format(value)) return cls(name, value, HeaderType.INT16) @classmethod def from_int32(cls, name: str, value: int) -> 'Header': """Create a Header of type :attr:`~HeaderType.INT32` The value must fit in an 32-bit signed int""" value = int(value) if value < _INT32_MIN or value > _INT32_MAX: raise ValueError("Value {} cannot fit in signed 32-bit int".format(value)) return cls(name, value, HeaderType.INT32) @classmethod def from_int64(cls, name: str, value: int) -> 'Header': """Create a Header of type :attr:`~HeaderType.INT64` The value must fit in an 64-bit signed int""" value = int(value) if value < _INT64_MIN or value > _INT64_MAX: raise ValueError("Value {} cannot fit in signed 64-bit int".format(value)) return cls(name, value, HeaderType.INT64) @classmethod def from_byte_buf(cls, name: str, value: ByteString) -> 'Header': """Create a Header of type :attr:`~HeaderType.BYTE_BUF` The value must be a bytes-like object""" return cls(name, value, HeaderType.BYTE_BUF) @classmethod def from_string(cls, name: str, value: str) -> 'Header': """Create a Header of type :attr:`~HeaderType.STRING`""" value = str(value) return cls(name, value, HeaderType.STRING) @classmethod def from_timestamp(cls, name: str, value: int) -> 'Header': """Create a Header of type :attr:`~HeaderType.TIMESTAMP` Value must be a posix timestamp (seconds since Unix epoch)""" value = int(value) if value < _INT64_MIN or value > _INT64_MAX: raise ValueError("Value {} exceeds timestamp limits".format(value)) return cls(name, value, HeaderType.TIMESTAMP) @classmethod def from_uuid(cls, name: str, value: UUID) -> 'Header': """Create a Header of type :attr:`~HeaderType.UUID` The value must be a UUID""" if not isinstance(value, UUID): raise TypeError("Value must be UUID, not {}".format(type(value))) return cls(name, value, HeaderType.UUID) @classmethod def _from_binding_tuple(cls, binding_tuple): # native code deals with a simplified tuple, rather than full class name, value, header_type = binding_tuple header_type = HeaderType(header_type) if header_type == HeaderType.UUID: value = UUID(bytes=value) return cls(name, value, header_type) def _as_binding_tuple(self): # native code deals with a simplified tuple, rather than full class if self._type == HeaderType.UUID: value = self._value.bytes else: value = self._value return (self._name, value, self._type) @property def name(self) -> str: """Header name""" return self._name @property def type(self) -> HeaderType: """Header type""" return self._type @property def value(self) -> Any: """Header value The header's type determines the value's type. Use the value_as_X() methods for type-checked queries.""" return self._value def _value_as(self, header_type: HeaderType) -> Any: if self._type != header_type: raise TypeError("Header type is {}, not {}".format(self._type, header_type)) return self._value def value_as_bool(self) -> bool: """Return bool value Raises an exception if type is not :attr:`~HeaderType.BOOL_TRUE` or :attr:`~HeaderType.BOOL_FALSE`""" if self._type == HeaderType.BOOL_TRUE: return True if self._type == HeaderType.BOOL_FALSE: return False raise TypeError( "Header type is {}, not {} or {}".format( self._type, HeaderType.BOOL_TRUE, HeaderType.BOOL_FALSE)) def value_as_byte(self) -> int: """Return value of 8-bit signed int Raises an exception if type is not :attr:`~HeaderType.BYTE`""" return self._value_as(HeaderType.BYTE) def value_as_int16(self) -> int: """Return value of 16-bit signed int Raises an exception if type is not :attr:`~HeaderType.INT16`""" return self._value_as(HeaderType.INT16) def value_as_int32(self) -> int: """Return value of 32-bit signed int Raises an exception if type is not :attr:`~HeaderType.INT32`""" return self._value_as(HeaderType.INT32) def value_as_int64(self) -> int: """Return value of 64-bit signed int Raises an exception if type is not :attr:`~HeaderType.INT64`""" return self._value_as(HeaderType.INT64) def value_as_byte_buf(self) -> ByteString: """Return value of bytes Raises an exception if type is not :attr:`~HeaderType.BYTE_BUF`""" return self._value_as(HeaderType.BYTE_BUF) def value_as_string(self) -> str: """Return value of string Raises an exception if type is not :attr:`~HeaderType.STRING`""" return self._value_as(HeaderType.STRING) def value_as_timestamp(self) -> int: """Return value of timestamp (seconds since Unix epoch) Raises an exception if type is not :attr:`~HeaderType.TIMESTAMP`""" return self._value_as(HeaderType.TIMESTAMP) def value_as_uuid(self) -> UUID: """Return value of UUID Raises an exception if type is not :attr:`~HeaderType.UUID`""" return self._value_as(HeaderType.UUID) def __str__(self): return "{}: {} <{}>".format( self._name, repr(self._value), self._type.name) def __repr__(self): return "{}({}, {}, {})".format( self.__class__.__name__, repr(self._name), repr(self._value), repr(self._type)) aws-crt-python-0.20.4+dfsg/awscrt/eventstream/rpc.py000066400000000000000000000574471456575232400224470ustar00rootroot00000000000000""" event-stream RPC (remote procedure call) protocol library for `awscrt`. """ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. import _awscrt from abc import ABC, abstractmethod from awscrt import NativeResource import awscrt.exceptions from awscrt.eventstream import Header from awscrt.io import ClientBootstrap, SocketOptions, TlsConnectionOptions from collections.abc import ByteString, Callable from concurrent.futures import Future from enum import IntEnum from functools import partial from typing import Optional, Sequence __all__ = [ 'MessageType', 'MessageFlag', 'ClientConnectionHandler', 'ClientConnection', 'ClientContinuation', 'ClientContinuationHandler', ] class MessageType(IntEnum): """Types of messages in the event-stream RPC protocol. The :attr:`~MessageType.APPLICATION_MESSAGE` and :attr:`~MessageType.APPLICATION_ERROR` types may only be sent on streams, and will never arrive as a protocol message (stream-id 0). For all other message types, they may only be sent as protocol messages (stream-id 0), and will never arrive as a stream message. Different message types expect specific headers and flags, consult documentation.""" APPLICATION_MESSAGE = 0 """Application message""" APPLICATION_ERROR = 1 """Application error""" PING = 2 """Ping""" PING_RESPONSE = 3 """Ping response""" CONNECT = 4 """Connect""" CONNECT_ACK = 5 """Connect acknowledgement If the :attr:`MessageFlag.CONNECTION_ACCEPTED` flag is not present, the connection has been rejected.""" PROTOCOL_ERROR = 6 """Protocol error""" INTERNAL_ERROR = 7 """Internal error""" def __format__(self, format_spec): # override so formatted string doesn't simply look like an int return str(self) class MessageFlag: """Flags for messages in the event-stream RPC protocol. Flags may be XORed together. Not all flags can be used with all message types, consult documentation. """ # TODO: when python 3.5 is dropped this class should inherit from IntFlag. # When doing this, be sure to update type-hints and callbacks to pass # MessageFlag instead of plain int. NONE = 0 """No flags""" CONNECTION_ACCEPTED = 0x1 """Connection accepted If this flag is absent from a :attr:`MessageType.CONNECT_ACK`, the connection has been rejected.""" TERMINATE_STREAM = 0x2 """Terminate stream This message may be used with any message type. The sender will close their connection after the message is written to the wire. The receiver will close their connection after delivering the message to the user.""" def __format__(self, format_spec): # override so formatted string doesn't simply look like an int return str(self) class ClientConnectionHandler(ABC): """Base class for handling connection events. Inherit from this class and override methods to handle connection events. All callbacks for this connection will be invoked on the same thread, and :meth:`on_connection_setup()` will always be the first callback invoked. """ @abstractmethod def on_connection_setup(self, connection, error, **kwargs) -> None: """Invoked upon completion of the setup attempt. If setup was successful, the connection is provided to the user. Note that the network connection stays alive until it is closed, even if no local references to the connection object remain. The user should store a reference to this connection, and call `connection.close()` when they are done with it to avoid leaking resources. Setup will always be the first callback invoked on the handler. If setup failed, no further callbacks will be invoked on this handler. Args: connection: The connection, if setup was successful, or None if setup failed. error: None, if setup was successful, or an Exception if setup failed. `**kwargs`: Forward compatibility kwargs. """ pass @abstractmethod def on_connection_shutdown(self, reason: Optional[Exception], **kwargs) -> None: """Invoked when the connection finishes shutting down. This event will not be invoked if connection setup failed. Args: reason: Reason will be None if the user initiated the shutdown, otherwise the reason will be an Exception. **kwargs: Forward compatibility kwargs. """ pass @abstractmethod def on_protocol_message( self, headers: Sequence[Header], payload: bytes, message_type: MessageType, flags: int, **kwargs) -> None: """Invoked when a message for the connection (stream-id 0) is received. Args: headers: Message headers. payload: Binary message payload. message_type: Message type. flags: Message flags. Values from :class:`MessageFlag` may be XORed together. Not all flags can be used with all message types, consult documentation. **kwargs: Forward compatibility kwargs. """ pass def _to_binding_msg_args(headers, payload, message_type, flags): """ Transform args that a python send-msg function would take, into args that a native send-msg function would take. """ # python functions for sending messages if headers is None: headers = [] else: headers = [i._as_binding_tuple() for i in headers] if payload is None: payload = b'' if flags is None: flags = MessageFlag.NONE return (headers, payload, message_type, flags) def _from_binding_msg_args(headers, payload, message_type, flags): """ Transform msg-received args that came from native, into msg-received args presented to python users. """ headers = [Header._from_binding_tuple(i) for i in headers] if payload is None: payload = b'' message_type = MessageType(message_type) return (headers, payload, message_type, flags) def _on_message_flush(bound_future, bound_callback, error_code): # invoked when a message is flushed (written to wire), or canceled due to connection error. e = awscrt.exceptions.from_code(error_code) if error_code else None try: if bound_callback: bound_callback(error=e) finally: # ensure future completes, even if user callback had unhandled exception if error_code: bound_future.set_exception(e) else: bound_future.set_result(None) class ClientConnection(NativeResource): """A client connection for the event-stream RPC protocol. Use :meth:`ClientConnection.connect()` to establish a new connection. Note that the network connection stays alive until it is closed, even if no local references to the connection object remain. The user should store a reference to any connections, and call :meth:`close()` when they are done with them to avoid leaking resources. Attributes: host_name (str): Remote host name. port (int): Remote port. shutdown_future (concurrent.futures.Future[None]): Completes when this connection has finished shutting down. Future will contain a result of None, or an exception indicating why shutdown occurred. """ __slots__ = ['host_name', 'port', 'shutdown_future', '_connect_future', '_handler'] def __init__(self, host_name, port, handler): # Do no instantiate directly, use static connect method super().__init__() self.host_name = host_name # type: str self.port = port # type: int self.shutdown_future = Future() # type: Future self.shutdown_future.set_running_or_notify_cancel() # prevent cancel self._connect_future = Future() # type: Future self._connect_future.set_running_or_notify_cancel() # prevent cancel self._handler = handler # type: ClientConnectionHandler @classmethod def connect( cls, *, handler: ClientConnectionHandler, host_name: str, port: int, bootstrap: ClientBootstrap = None, socket_options: Optional[SocketOptions] = None, tls_connection_options: Optional[TlsConnectionOptions] = None) -> 'concurrent.futures.Future': """Asynchronously establish a new ClientConnection. Args: handler: Handler for connection events. host_name: Connect to host. port: Connect to port. bootstrap: Client bootstrap to use when initiating socket connection. If None is provided, the default singleton is used. socket_options: Optional socket options. If None is provided, then default options are used. tls_connection_options: Optional TLS connection options. If None is provided, then the connection will be attempted over plain-text. Returns: concurrent.futures.Future: A Future which completes when the connection succeeds or fails. If successful, the Future will contain None. Otherwise it will contain an exception. If the connection is successful, it will be made available via the handler's on_connection_setup callback. Note that this network connection stays alive until it is closed, even if no local references to the connection object remain. The user should store a reference to any connections, and call :meth:`close()` when they are done with them to avoid leaking resources. """ if not socket_options: socket_options = SocketOptions() # Connection is not made available to user until setup callback fires connection = cls(host_name, port, handler) if not bootstrap: bootstrap = ClientBootstrap.get_or_create_static_default() # connection._binding is set within the following call */ _awscrt.event_stream_rpc_client_connection_connect( host_name, port, bootstrap, socket_options, tls_connection_options, connection) return connection._connect_future def _on_connection_setup(self, error_code): if error_code: connection = None error = awscrt.exceptions.from_code(error_code) else: connection = self error = None try: self._handler.on_connection_setup(connection=connection, error=error) finally: # ensure future completes, even if user callback had unhandled exception if error: self._connect_future.set_exception(error) else: self._connect_future.set_result(None) def _on_connection_shutdown(self, error_code): reason = awscrt.exceptions.from_code(error_code) if error_code else None try: self._handler.on_connection_shutdown(reason=reason) finally: # ensure future completes, even if user callback had unhandled exception if reason: self.shutdown_future.set_exception(reason) else: self.shutdown_future.set_result(None) def _on_protocol_message(self, headers, payload, message_type, flags): # transform from simple types to actual classes headers, payload, message_type, flags = _from_binding_msg_args(headers, payload, message_type, flags) self._handler.on_protocol_message( headers=headers, payload=payload, message_type=message_type, flags=flags) def close(self): """Close the connection. Shutdown is asynchronous. This call has no effect if the connection is already closed or closing. Note that, if the network connection hasn't already ended, `close()` MUST be called to avoid leaking resources. The network connection will not terminate simply because there are no references to the connection object. Returns: concurrent.futures.Future: This connection's :attr:`shutdown_future`, which completes when shutdown has finished. """ # TODO: let user pass their own exception/error-code/reason for closing _awscrt.event_stream_rpc_client_connection_close(self._binding) return self.shutdown_future def is_open(self): """ Returns: bool: True if this connection is open and usable, False otherwise. Check :attr:`shutdown_future` to know when the connection is completely finished shutting down. """ return _awscrt.event_stream_rpc_client_connection_is_open(self._binding) def send_protocol_message( self, *, headers: Optional[Sequence[Header]] = None, payload: Optional[ByteString] = None, message_type: MessageType, flags: Optional[int] = None, on_flush: Callable = None) -> 'concurrent.futures.Future': """Send a protocol message. Protocol messages use stream-id 0. Use the returned future, or the `on_flush` callback, to be informed when the message is successfully written to the wire, or fails to send. Keyword Args: headers: Message headers. payload: Binary message payload. message_type: Message type. flags: Message flags. Values from :class:`MessageFlag` may be XORed together. Not all flags can be used with all message types, consult documentation. on_flush: Callback invoked when the message is successfully written to the wire, or fails to send. The function should take the following arguments and return nothing: * `error` (Optional[Exception]): None if the message was successfully written to the wire, or an Exception if it failed to send. * `**kwargs` (dict): Forward compatibility kwargs. This callback is always invoked on the connection's event-loop thread. Returns: A future which completes with a result of None if the message is successfully written to the wire, or an exception if the message fails to send. """ future = Future() future.set_running_or_notify_cancel() # prevent cancel # native code deals with simplified types headers, payload, message_type, flags = _to_binding_msg_args(headers, payload, message_type, flags) _awscrt.event_stream_rpc_client_connection_send_protocol_message( self._binding, headers, payload, message_type, flags, partial(_on_message_flush, future, on_flush)) return future def new_stream(self, handler: 'ClientContinuationHandler') -> 'ClientContinuation': """ Create a new stream. The stream will send no data until :meth:`ClientContinuation.activate()` is called. Call activate() when you're ready for callbacks and events to fire. Args: handler: Handler to process continuation messages and state changes. Returns: The new continuation object. """ continuation = ClientContinuation(handler, self) continuation._binding = _awscrt.event_stream_rpc_client_connection_new_stream(self) return continuation class ClientContinuation(NativeResource): """ A continuation of messages on a given stream-id. Create with :meth:`ClientConnection.new_stream()`. The stream will send no data until :meth:`ClientContinuation.activate()` is called. Call activate() when you're ready for callbacks and events to fire. Attributes: connection (ClientConnection): This stream's connection. closed_future (concurrent.futures.Future) : Future which completes with a result of None when the continuation has closed. """ def __init__(self, handler, connection): # Do not instantiate directly, use ClientConnection.new_stream() super().__init__() self._handler = handler self.connection = connection # type: ClientConnection self.closed_future = Future() # type: Future self.closed_future.set_running_or_notify_cancel() # prevent cancel def activate( self, *, operation: str, headers: Sequence[Header] = None, payload: ByteString = None, message_type: MessageType, flags: int = None, on_flush: Callable = None): """ Activate the stream by sending its first message. Use the returned future, or the `on_flush` callback, to be informed when the message is successfully written to the wire, or fails to send. activate() may only be called once, use send_message() to write further messages on this stream-id. Keyword Args: operation: Operation name for this stream. headers: Message headers. payload: Binary message payload. message_type: Message type. flags: Message flags. Values from :class:`MessageFlag` may be XORed together. Not all flags can be used with all message types, consult documentation. on_flush: Callback invoked when the message is successfully written to the wire, or fails to send. The function should take the following arguments and return nothing: * `error` (Optional[Exception]): None if the message was successfully written to the wire, or an Exception if it failed to send. * `**kwargs` (dict): Forward compatibility kwargs. This callback is always invoked on the connection's event-loop thread. Returns: A future which completes with a result of None if the message is successfully written to the wire, or an exception if the message fails to send. """ flush_future = Future() flush_future.set_running_or_notify_cancel() # prevent cancel # native code deals with simplified types headers, payload, message_type, flags = _to_binding_msg_args(headers, payload, message_type, flags) _awscrt.event_stream_rpc_client_continuation_activate( self._binding, # don't give binding a reference to self until activate() is called. # this reference is used for invoking callbacks, and its existence # keeps the python object alive until the closed callback fires self, operation, headers, payload, message_type, flags, partial(_on_message_flush, flush_future, on_flush)) return flush_future def send_message( self, *, headers: Sequence[Header] = None, payload: ByteString = None, message_type: MessageType, flags: int = None, on_flush: Callable = None) -> 'concurrent.futures.Future': """ Send a continuation message. Use the returned future, or the `on_flush` callback, to be informed when the message is successfully written to the wire, or fails to send. Note that the the first message on a stream-id must be sent with activate(), send_message() is for all messages that follow. Keyword Args: operation: Operation name for this stream. headers: Message headers. payload: Binary message payload. message_type: Message type. flags: Message flags. Values from :class:`MessageFlag` may be XORed together. Not all flags can be used with all message types, consult documentation. on_flush: Callback invoked when the message is successfully written to the wire, or fails to send. The function should take the following arguments and return nothing: * `error` (Optional[Exception]): None if the message was successfully written to the wire, or an Exception if it failed to send. * `**kwargs` (dict): Forward compatibility kwargs. This callback is always invoked on the connection's event-loop thread. Returns: A future which completes with a result of None if the message is successfully written to the wire, or an exception if the message fails to send. """ future = Future() future.set_running_or_notify_cancel() # prevent cancel # native code deals with simplified types headers, payload, message_type, flags = _to_binding_msg_args(headers, payload, message_type, flags) _awscrt.event_stream_rpc_client_continuation_send_message( self._binding, headers, payload, message_type, flags, partial(_on_message_flush, future, on_flush)) return future def is_closed(self): return _awscrt.event_stream_rpc_client_continuation_is_closed(self._binding) def _on_continuation_closed(self): try: self._handler.on_continuation_closed() finally: # ensure future completes, even if user callback had unhandled exception self.closed_future.set_result(None) def _on_continuation_message(self, headers, payload, message_type, flags): # transform from simple types to actual classes headers, payload, message_type, flags = _from_binding_msg_args(headers, payload, message_type, flags) self._handler.on_continuation_message( headers=headers, payload=payload, message_type=message_type, flags=flags) class ClientContinuationHandler(ABC): """Base class for handling stream continuation events. Inherit from this class and override methods to handle events. All callbacks will be invoked on the same thread (the same thread used by the connection). A common pattern is to store the continuation within its handler. Example:: continuation_handler.continuation = connection.new_stream(continuation_handler) """ @abstractmethod def on_continuation_message( self, headers: Sequence[Header], payload: bytes, message_type: MessageType, flags: int, **kwargs) -> None: """Invoked when a message is received on this continuation. Args: headers: Message headers. payload: Binary message payload. message_type: Message type. flags: Message flags. Values from :class:`MessageFlag` may be XORed together. Not all flags can be used with all message types, consult documentation. **kwargs: Forward compatibility kwargs. """ pass @abstractmethod def on_continuation_closed(self, **kwargs) -> None: """Invoked when the continuation is closed. Once the continuation is closed, no more messages may be sent or received. The continuation is closed when a message is sent or received with the TERMINATE_STREAM flag, or when the connection shuts down. Args: **kwargs: Forward compatibility kwargs. """ pass aws-crt-python-0.20.4+dfsg/awscrt/exceptions.py000066400000000000000000000027301456575232400214700ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. import _awscrt def from_code(code): """Given an AWS Common Runtime error code, return an exception. Returns a common Python exception type, if it's appropriate. For example, `code=1` aka `AWS_ERROR_OOM` will result in `MemoryError`. Otherwise, an :class:`AwsCrtError` is returned. Args: code (int): error code. Returns: BaseException: """ builtin = _awscrt.get_corresponding_builtin_exception(code) if builtin: return builtin() name = _awscrt.get_error_name(code) msg = _awscrt.get_error_message(code) return AwsCrtError(code=code, name=name, message=msg) class AwsCrtError(Exception): """ Base exception class for AWS Common Runtime exceptions. Args: code (int): Int value of error. name (str): Name of error. message (str): Message about error. Attributes: code (int): Int value of error. name (str): Name of error. message (str): Message about error. """ def __init__(self, code, name, message): self.code = code self.name = name self.message = message def __repr__(self): return "{0}(name={1}, message={2}, code={3})".format( self.__class__.__name__, repr(self.name), repr(self.message), self.code) def __str__(self): return "{}: {}".format(self.name, self.message) aws-crt-python-0.20.4+dfsg/awscrt/http.py000066400000000000000000000507651456575232400203010ustar00rootroot00000000000000""" HTTP All network operations in `awscrt.http` are asynchronous. """ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. import _awscrt from concurrent.futures import Future from awscrt import NativeResource import awscrt.exceptions from awscrt.io import ClientBootstrap, InputStream, TlsConnectionOptions, SocketOptions from enum import IntEnum class HttpVersion(IntEnum): """HTTP protocol version enumeration""" Unknown = 0 #: Unknown Http1_0 = 1 #: HTTP/1.0 Http1_1 = 2 #: HTTP/1.1 Http2 = 3 #: HTTP/2 class HttpConnectionBase(NativeResource): """Base for HTTP connection classes.""" __slots__ = ('_shutdown_future', '_version') def __init__(self): super().__init__() self._shutdown_future = Future() @property def shutdown_future(self): """ concurrent.futures.Future: Completes when this connection has finished shutting down. Future will contain a result of None, or an exception indicating why shutdown occurred. Note that the connection may have been garbage-collected before this future completes. """ return self._shutdown_future @property def version(self): """HttpVersion: Protocol used by this connection""" return self._version def close(self): """Close the connection. Shutdown is asynchronous. This call has no effect if the connection is already closing. Returns: concurrent.futures.Future: This connection's :attr:`shutdown_future`, which completes when shutdown has finished. """ _awscrt.http_connection_close(self._binding) return self.shutdown_future def is_open(self): """ Returns: bool: True if this connection is open and usable, False otherwise. Check :attr:`shutdown_future` to know when the connection is completely finished shutting down. """ return _awscrt.http_connection_is_open(self._binding) class HttpClientConnection(HttpConnectionBase): """ An HTTP client connection. Use :meth:`HttpClientConnection.new()` to establish a new connection. """ __slots__ = ('_host_name', '_port') @classmethod def new(cls, host_name, port, bootstrap=None, socket_options=None, tls_connection_options=None, proxy_options=None): """ Asynchronously establish a new HttpClientConnection. Args: host_name (str): Connect to host. port (int): Connect to port. bootstrap (Optional [ClientBootstrap]): Client bootstrap to use when initiating socket connection. If None is provided, the default singleton is used. socket_options (Optional[SocketOptions]): Optional socket options. If None is provided, then default options are used. tls_connection_options (Optional[TlsConnectionOptions]): Optional TLS connection options. If None is provided, then the connection will be attempted over plain-text. proxy_options (Optional[HttpProxyOptions]): Optional proxy options. If None is provided then a proxy is not used. Returns: concurrent.futures.Future: A Future which completes when connection succeeds or fails. If successful, the Future will contain a new :class:`HttpClientConnection`. Otherwise, it will contain an exception. """ assert isinstance(bootstrap, ClientBootstrap) or bootstrap is None assert isinstance(host_name, str) assert isinstance(port, int) assert isinstance(tls_connection_options, TlsConnectionOptions) or tls_connection_options is None assert isinstance(socket_options, SocketOptions) or socket_options is None assert isinstance(proxy_options, HttpProxyOptions) or proxy_options is None future = Future() try: if not socket_options: socket_options = SocketOptions() if not bootstrap: bootstrap = ClientBootstrap.get_or_create_static_default() connection = cls() connection._host_name = host_name connection._port = port def on_connection_setup(binding, error_code, http_version): if error_code == 0: connection._binding = binding connection._version = HttpVersion(http_version) future.set_result(connection) else: future.set_exception(awscrt.exceptions.from_code(error_code)) # on_shutdown MUST NOT reference the connection itself, just the shutdown_future within it. # Otherwise we create a circular reference that prevents the connection from getting GC'd. shutdown_future = connection.shutdown_future def on_shutdown(error_code): if error_code: shutdown_future.set_exception(awscrt.exceptions.from_code(error_code)) else: shutdown_future.set_result(None) _awscrt.http_client_connection_new( bootstrap, on_connection_setup, on_shutdown, host_name, port, socket_options, tls_connection_options, proxy_options) except Exception as e: future.set_exception(e) return future @property def host_name(self): """Remote hostname""" return self._host_name @property def port(self): """Remote port""" return self._port def request(self, request, on_response=None, on_body=None): """Create :class:`HttpClientStream` to carry out the request/response exchange. NOTE: The HTTP stream sends no data until :meth:`HttpClientStream.activate()` is called. Call activate() when you're ready for callbacks and events to fire. Args: request (HttpRequest): Definition for outgoing request. on_response: Optional callback invoked once main response headers are received. The function should take the following arguments and return nothing: * `http_stream` (:class:`HttpClientStream`): HTTP stream carrying out this request/response exchange. * `status_code` (int): Response status code. * `headers` (List[Tuple[str, str]]): Response headers as a list of (name,value) pairs. * `**kwargs` (dict): Forward compatibility kwargs. An exception raise by this function will cause the HTTP stream to end in error. This callback is always invoked on the connection's event-loop thread. on_body: Optional callback invoked 0+ times as response body data is received. The function should take the following arguments and return nothing: * `http_stream` (:class:`HttpClientStream`): HTTP stream carrying out this request/response exchange. * `chunk` (buffer): Response body data (not necessarily a whole "chunk" of chunked encoding). * `**kwargs` (dict): Forward-compatibility kwargs. An exception raise by this function will cause the HTTP stream to end in error. This callback is always invoked on the connection's event-loop thread. Returns: HttpClientStream: """ return HttpClientStream(self, request, on_response, on_body) class HttpStreamBase(NativeResource): """Base for HTTP stream classes""" __slots__ = ('_connection', '_completion_future', '_on_body_cb') def __init__(self, connection, on_body=None): super().__init__() self._connection = connection self._completion_future = Future() self._on_body_cb = on_body @property def connection(self): return self._connection @property def completion_future(self): return self._completion_future def _on_body(self, chunk): if self._on_body_cb: self._on_body_cb(http_stream=self, chunk=chunk) class HttpClientStream(HttpStreamBase): """HTTP stream that sends a request and receives a response. Create an HttpClientStream with :meth:`HttpClientConnection.request()`. NOTE: The HTTP stream sends no data until :meth:`HttpClientStream.activate()` is called. Call activate() when you're ready for callbacks and events to fire. Attributes: connection (HttpClientConnection): This stream's connection. completion_future (concurrent.futures.Future): Future that will contain the response status code (int) when the request/response exchange completes. If the exchange fails to complete, the Future will contain an exception indicating why it failed. """ __slots__ = ('_response_status_code', '_on_response_cb', '_on_body_cb', '_request') def __init__(self, connection, request, on_response=None, on_body=None): assert isinstance(connection, HttpClientConnection) assert isinstance(request, HttpRequest) assert callable(on_response) or on_response is None assert callable(on_body) or on_body is None super().__init__(connection, on_body) self._on_response_cb = on_response self._response_status_code = None # keep HttpRequest alive until stream completes self._request = request self._binding = _awscrt.http_client_stream_new(self, connection, request) @property def response_status_code(self): """int: The response status code. This is None until a response arrives.""" return self._response_status_code def activate(self): """Begin sending the request. The HTTP stream does nothing until this is called. Call activate() when you are ready for its callbacks and events to fire. """ _awscrt.http_client_stream_activate(self) def _on_response(self, status_code, name_value_pairs): self._response_status_code = status_code if self._on_response_cb: self._on_response_cb(http_stream=self, status_code=status_code, headers=name_value_pairs) def _on_complete(self, error_code): # done with HttpRequest, drop reference self._request = None if error_code == 0: self._completion_future.set_result(self._response_status_code) else: self._completion_future.set_exception(awscrt.exceptions.from_code(error_code)) class HttpMessageBase(NativeResource): """ Base for HttpRequest and HttpResponse classes. """ __slots__ = ('_headers', '_body_stream') def __init__(self, binding, headers, body_stream=None): assert isinstance(headers, HttpHeaders) super().__init__() self._binding = binding self._headers = headers self._body_stream = None if body_stream: self.body_stream = body_stream @property def headers(self): """HttpHeaders: Headers to send.""" return self._headers @property def body_stream(self): return self._body_stream @body_stream.setter def body_stream(self, stream): self._body_stream = InputStream.wrap(stream) _awscrt.http_message_set_body_stream(self._binding, self._body_stream) class HttpRequest(HttpMessageBase): """ Definition for an outgoing HTTP request. The request may be transformed (ex: signing the request) before its data is eventually sent. Args: method (str): HTTP request method (verb). Default value is "GET". path (str): HTTP path-and-query value. Default value is "/". headers (Optional[HttpHeaders]): Optional headers. If None specified, an empty :class:`HttpHeaders` is created. body_stream(Optional[Union[InputStream, io.IOBase]]): Optional body as binary stream. """ __slots__ = () def __init__(self, method='GET', path='/', headers=None, body_stream=None): assert isinstance(headers, HttpHeaders) or headers is None if headers is None: headers = HttpHeaders() binding = _awscrt.http_message_new_request(headers) super().__init__(binding, headers, body_stream) self.method = method self.path = path @classmethod def _from_bindings(cls, request_binding, headers_binding): """Construct HttpRequest and its HttpHeaders from pre-existing native objects""" # avoid class's default constructor # just invoke parent class's __init__() request = cls.__new__(cls) headers = HttpHeaders._from_binding(headers_binding) super(cls, request).__init__(request_binding, headers) return request @property def method(self): """str: HTTP request method (verb).""" return _awscrt.http_message_get_request_method(self._binding) @method.setter def method(self, method): _awscrt.http_message_set_request_method(self._binding, method) @property def path(self): """str: HTTP path-and-query value.""" return _awscrt.http_message_get_request_path(self._binding) @path.setter def path(self, path): return _awscrt.http_message_set_request_path(self._binding, path) class HttpHeaders(NativeResource): """ Collection of HTTP headers. A given header name may have multiple values. Header names are always treated in a case-insensitive manner. HttpHeaders can be iterated over as (name,value) pairs. Args: name_value_pairs (Optional[List[Tuple[str, str]]]): Construct from a collection of (name,value) pairs. """ __slots__ = () def __init__(self, name_value_pairs=None): super().__init__() self._binding = _awscrt.http_headers_new() if name_value_pairs: self.add_pairs(name_value_pairs) @classmethod def _from_binding(cls, binding): """Construct from a pre-existing native object""" headers = cls.__new__(cls) # avoid class's default constructor super(cls, headers).__init__() # just invoke parent class's __init__() headers._binding = binding return headers def add(self, name, value): """ Add a name-value pair. Args: name (str): Name. value (str): Value. """ assert isinstance(name, str) assert isinstance(value, str) _awscrt.http_headers_add(self._binding, name, value) def add_pairs(self, name_value_pairs): """ Add list of (name,value) pairs. Args: name_value_pairs (List[Tuple[str, str]]): List of (name,value) pairs. """ _awscrt.http_headers_add_pairs(self._binding, name_value_pairs) def set(self, name, value): """ Set a name-value pair, any existing values for the name are removed. Args: name (str): Name. value (str): Value. """ assert isinstance(name, str) assert isinstance(value, str) _awscrt.http_headers_set(self._binding, name, value) def get_values(self, name): """ Return an iterator over the values for this name. Args: name (str): Name. Returns: Iterator[Tuple[str, str]]: """ assert isinstance(name, str) name = name.lower() for i in range(_awscrt.http_headers_count(self._binding)): name_i, value_i = _awscrt.http_headers_get_index(self._binding, i) if name_i.lower() == name: yield value_i def get(self, name, default=None): """ Get the first value for this name, ignoring any additional values. Returns `default` if no values exist. Args: name (str): Name. default (Optional[str]): If `name` not found, this value is returned. Defaults to None. Returns: str: """ assert isinstance(name, str) return _awscrt.http_headers_get(self._binding, name, default) def remove(self, name): """ Remove all values for this name. Raises a KeyError if name not found. Args: name (str): Header name. """ assert isinstance(name, str) _awscrt.http_headers_remove(self._binding, name) def remove_value(self, name, value): """ Remove a specific value for this name. Raises a ValueError if value not found. Args: name (str): Name. value (str): Value. """ assert isinstance(name, str) assert isinstance(value, str) _awscrt.http_headers_remove_value(self._binding, name, value) def clear(self): """ Clear all headers. """ _awscrt.http_headers_clear(self._binding) def __iter__(self): """ Iterate over all (name,value) pairs. """ for i in range(_awscrt.http_headers_count(self._binding)): yield _awscrt.http_headers_get_index(self._binding, i) def __str__(self): return self.__class__.__name__ + "(" + str([pair for pair in self]) + ")" class HttpProxyConnectionType(IntEnum): """Proxy connection type enumeration""" Legacy = 0 """ Use the old connection establishment logic that would use: 1. Forwarding if not using TLS 2. Tunneling if using TLS """ Forwarding = 1 """ Establish a request forwarding connection to the proxy. In this case, TLS is not a valid option. """ Tunneling = 2 """Establish a tunneling connection through the proxy to the ultimate endpoint.""" class HttpProxyAuthenticationType(IntEnum): """Proxy authentication type enumeration.""" Nothing = 0 """No authentication""" Basic = 1 """Username and password""" class HttpProxyOptions: """ Proxy options for HTTP clients. Args: host_name (str): Name of the proxy server to connect through. port (int): Port number of the proxy server to connect through. tls_connection_options (Optional[TlsConnectionOptions]): Optional `TlsConnectionOptions` for the Local to Proxy connection. Must be distinct from the `TlsConnectionOptions` provided to the HTTP connection. auth_type (HttpProxyAuthenticationType): Type of proxy authentication to use. Default is :const:`HttpProxyAuthenticationType.Nothing`. auth_username (Optional[str]): Username to use when `auth_type` is :const:`HttpProxyAuthenticationType.Basic`. auth_password (Optional[str]): Username to use when `auth_type` is :const:`HttpProxyAuthenticationType.Basic`. connection_type (Optional[HttpProxyConnectionType): Type of proxy connection to make. Default is :const:`HttpProxyConnectionType.Legacy`. Attributes: host_name (str): Name of the proxy server to connect through. port (int): Port number of the proxy server to connect through. tls_connection_options (Optional[TlsConnectionOptions]): Optional `TlsConnectionOptions` for the Local to Proxy connection. Must be distinct from the `TlsConnectionOptions` provided to the HTTP connection. auth_type (HttpProxyAuthenticationType): Type of proxy authentication to use. auth_username (Optional[str]): Username to use when `auth_type` is :const:`HttpProxyAuthenticationType.Basic`. auth_password (Optional[str]): Username to use when `auth_type` is :const:`HttpProxyAuthenticationType.Basic`. connection_type (HttpProxyConnectionType): Type of proxy connection to make. """ def __init__(self, host_name, port, tls_connection_options=None, auth_type=HttpProxyAuthenticationType.Nothing, auth_username=None, auth_password=None, connection_type=HttpProxyConnectionType.Legacy): self.host_name = host_name self.port = port self.tls_connection_options = tls_connection_options self.auth_type = auth_type self.auth_username = auth_username self.auth_password = auth_password self.connection_type = connection_type aws-crt-python-0.20.4+dfsg/awscrt/io.py000066400000000000000000000672171456575232400177310ustar00rootroot00000000000000""" I/O library for `awscrt`. All networking in `awscrt` is asynchronous. Long-running event-loop threads are used for concurrency. """ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. import _awscrt from awscrt import NativeResource from enum import IntEnum import threading from typing import Union class LogLevel(IntEnum): NoLogs = 0 #: Fatal = 1 #: Error = 2 #: Warn = 3 #: Info = 4 #: Debug = 5 #: Trace = 6 #: def init_logging(log_level, file_name): """Initialize logging in `awscrt`. Args: log_level (LogLevel): Display messages of this importance and higher. `LogLevel.NoLogs` will disable logging. file_name (str): Logging destination. To write to stdout or stderr pass 'stdout' or 'stderr' as strings. Otherwise, a file path is assumed. """ assert log_level is not None assert file_name is not None _awscrt.init_logging(log_level, file_name) class EventLoopGroup(NativeResource): """A collection of event-loops. An event-loop is a thread for doing async work, such as I/O. Classes that need to do async work will ask the EventLoopGroup for an event-loop to use. Args: num_threads (Optional[int]): Maximum number of event-loops to create. If unspecified, one is created for each processor on the machine. cpu_group (Optional[int]): Optional processor group to which all threads will be pinned. Useful for systems with non-uniform memory access (NUMA) nodes. If specified, the number of threads will be capped at the number of processors in the group. Attributes: shutdown_event (threading.Event): Signals when EventLoopGroup's threads have all finished shutting down. Shutdown begins when the EventLoopGroup object is destroyed. """ _static_event_loop_group = None _static_event_loop_group_lock = threading.Lock() __slots__ = ('shutdown_event') def __init__(self, num_threads=None, cpu_group=None): super().__init__() if num_threads is None: # C uses 0 to indicate defaults num_threads = 0 if cpu_group is None: is_pinned = False cpu_group = 0 else: is_pinned = True shutdown_event = threading.Event() def on_shutdown(): shutdown_event.set() self.shutdown_event = shutdown_event self._binding = _awscrt.event_loop_group_new(num_threads, is_pinned, cpu_group, on_shutdown) @staticmethod def get_or_create_static_default(): with EventLoopGroup._static_event_loop_group_lock: if EventLoopGroup._static_event_loop_group is None: EventLoopGroup._static_event_loop_group = EventLoopGroup() return EventLoopGroup._static_event_loop_group @staticmethod def release_static_default(): with EventLoopGroup._static_event_loop_group_lock: EventLoopGroup._static_event_loop_group = None class HostResolverBase(NativeResource): """DNS host resolver.""" __slots__ = () class DefaultHostResolver(HostResolverBase): """Default DNS host resolver. Args: event_loop_group (EventLoopGroup): EventLoopGroup to use. max_hosts(int): Max host names to cache. """ _static_host_resolver = None _static_host_resolver_lock = threading.Lock() __slots__ = () def __init__(self, event_loop_group, max_hosts=16): assert isinstance(event_loop_group, EventLoopGroup) super().__init__() self._binding = _awscrt.host_resolver_new_default(max_hosts, event_loop_group) @staticmethod def get_or_create_static_default(): with DefaultHostResolver._static_host_resolver_lock: if DefaultHostResolver._static_host_resolver is None: DefaultHostResolver._static_host_resolver = DefaultHostResolver( EventLoopGroup.get_or_create_static_default()) return DefaultHostResolver._static_host_resolver @staticmethod def release_static_default(): with DefaultHostResolver._static_host_resolver_lock: DefaultHostResolver._static_host_resolver = None class ClientBootstrap(NativeResource): """Handles creation and setup of client socket connections. Args: event_loop_group (EventLoopGroup): EventLoopGroup to use. host_resolver (HostResolverBase): DNS host resolver to use. Attributes: shutdown_event (threading.Event): Signals when the ClientBootstrap's internal resources finish shutting down. Shutdown begins when the ClientBootstrap object is destroyed. """ _static_client_bootstrap = None _static_client_bootstrap_lock = threading.Lock() __slots__ = ('shutdown_event') def __init__(self, event_loop_group, host_resolver): assert isinstance(event_loop_group, EventLoopGroup) assert isinstance(host_resolver, HostResolverBase) super().__init__() shutdown_event = threading.Event() def on_shutdown(): shutdown_event.set() self.shutdown_event = shutdown_event self._binding = _awscrt.client_bootstrap_new(event_loop_group, host_resolver, on_shutdown) @staticmethod def get_or_create_static_default(): with ClientBootstrap._static_client_bootstrap_lock: if ClientBootstrap._static_client_bootstrap is None: ClientBootstrap._static_client_bootstrap = ClientBootstrap( EventLoopGroup.get_or_create_static_default(), DefaultHostResolver.get_or_create_static_default()) return ClientBootstrap._static_client_bootstrap @staticmethod def release_static_default(): with ClientBootstrap._static_client_bootstrap_lock: ClientBootstrap._static_client_bootstrap = None def _read_binary_file(filepath): with open(filepath, mode='rb') as fh: contents = fh.read() return contents class SocketDomain(IntEnum): IPv4 = 0 #: IPv6 = 1 #: Local = 2 #: Unix domain sockets (at at least something like them) class SocketType(IntEnum): Stream = 0 """A streaming socket sends reliable messages over a two-way connection. This means TCP when used with `SocketDomain.IPv4/6`, and Unix domain sockets when used with `SocketDomain.Local`""" DGram = 1 """A datagram socket is connectionless and sends unreliable messages. This means UDP when used with `SocketDomain.IPv4/6`. `SocketDomain.Local` is not compatible with `DGram` """ class SocketOptions: """Socket options. Attributes: domain (SocketDomain): Socket domain. type (SocketType): Socket type. connect_timeout_ms (int): Connection timeout, in milliseconds. keep_alive (bool): If set True, periodically transmit keepalive messages for detecting a disconnected peer. keep_alive_timeout_secs (int): Duration, in seconds, between keepalive transmissions in idle condition. If 0, then a default value is used. keep_alive_interval_secs (int): Duration, in seconds, between keepalive retransmissions, if acknowledgement of previous keepalive transmission is not received. If 0, then a default value is used. keep_alive_max_probes (int): If set, sets the number of keepalive probes allowed to fail before a connection is considered lost. """ __slots__ = ( 'domain', 'type', 'connect_timeout_ms', 'keep_alive', 'keep_alive_timeout_secs', 'keep_alive_interval_secs', 'keep_alive_max_probes' ) def __init__(self): for slot in self.__slots__: setattr(self, slot, None) self.domain = SocketDomain.IPv6 self.type = SocketType.Stream self.connect_timeout_ms = 5000 self.keep_alive = False self.keep_alive_interval_secs = 0 self.keep_alive_timeout_secs = 0 self.keep_alive_max_probes = 0 class TlsVersion(IntEnum): SSLv3 = 0 #: TLSv1 = 1 #: TLSv1_1 = 2 #: TLSv1_2 = 3 #: TLSv1_3 = 4 #: DEFAULT = 128 #: class TlsCipherPref(IntEnum): """TLS Cipher Preference. Each TlsCipherPref represents an ordered list of TLS Ciphers to use when negotiating a TLS Connection. At present, the ability to configure arbitrary orderings of TLS Ciphers is not allowed, and only a curated list of vetted TlsCipherPref's are exposed.""" DEFAULT = 0 """The underlying platform's default TLS Cipher Preference ordering. This is usually the best option, as it will be automatically updated as the underlying OS or platform changes, and will always be supported on all platforms.""" PQ_TLSv1_0_2021_05 = 6 #: """A TLS Cipher Preference ordering that supports TLS 1.0 through TLS 1.3, and has Kyber Round 3 as its highest priority post-quantum key exchange algorithm. PQ algorithms in this preference list will always be used in hybrid mode, and will be combined with a classical ECDHE key exchange that is performed in addition to the PQ key exchange. This preference makes a best-effort to negotiate a PQ algorithm, but if the peer does not support any PQ algorithms the TLS connection will fall back to a single classical algorithm for key exchange (such as ECDHE or RSA). NIST has announced that they plan to eventually standardize Kyber. However, the NIST standardization process might introduce minor changes that could cause the final Kyber standard to differ from the Kyber Round 3 implementation available in this preference list.""" def is_supported(self): """Return whether this Cipher Preference is available in the underlying platform's TLS implementation""" return _awscrt.is_tls_cipher_supported(self.value) class TlsContextOptions: """Options to create a TLS context. The static `TlsContextOptions.create_X()` methods provide common TLS configurations. A default-initialized TlsContextOptions has `verify_peer` set True. Attributes: min_tls_ver (TlsVersion): Minimum TLS version to use. System defaults are used by default. cipher_pref (TlsCipherPref): The TLS Cipher Preference to use. System defaults are used by default. verify_peer (bool): Whether to validate the peer's x.509 certificate. alpn_list (Optional[List[str]]): If set, names to use in Application Layer Protocol Negotiation (ALPN). ALPN is not supported on all systems, see :meth:`is_alpn_available()`. This can be customized per connection, via :meth:`TlsConnectionOptions.set_alpn_list()`. """ __slots__ = ( 'min_tls_ver', 'ca_dirpath', 'ca_buffer', 'cipher_pref', 'alpn_list', 'certificate_buffer', 'private_key_buffer', 'pkcs12_filepath', 'pkcs12_password', 'verify_peer', '_pkcs11_lib', '_pkcs11_user_pin', '_pkcs11_slot_id', '_pkcs11_token_label', '_pkcs11_private_key_label', '_pkcs11_cert_file_path', '_pkcs11_cert_file_contents', '_windows_cert_store_path', ) def __init__(self): for slot in self.__slots__: setattr(self, slot, None) self.min_tls_ver = TlsVersion.DEFAULT self.cipher_pref = TlsCipherPref.DEFAULT self.verify_peer = True @staticmethod def create_client_with_mtls_from_path(cert_filepath, pk_filepath): """ Create options configured for use with mutual TLS in client mode. Both files are treated as PKCS #7 PEM armored. They are loaded from disk and stored in buffers internally. Args: cert_filepath (str): Path to certificate file. pk_filepath (str): Path to private key file. Returns: TlsContextOptions: """ assert isinstance(cert_filepath, str) assert isinstance(pk_filepath, str) cert_buffer = _read_binary_file(cert_filepath) key_buffer = _read_binary_file(pk_filepath) return TlsContextOptions.create_client_with_mtls(cert_buffer, key_buffer) @staticmethod def create_client_with_mtls(cert_buffer, key_buffer): """ Create options configured for use with mutual TLS in client mode. Both buffers are treated as PKCS #7 PEM armored. Args: cert_buffer (bytes): Certificate contents key_buffer (bytes): Private key contents. Returns: TlsContextOptions: """ assert isinstance(cert_buffer, bytes) assert isinstance(key_buffer, bytes) opt = TlsContextOptions() opt.certificate_buffer = cert_buffer opt.private_key_buffer = key_buffer return opt @staticmethod def create_client_with_mtls_pkcs11(*, pkcs11_lib: 'Pkcs11Lib', user_pin: Union[str, None], slot_id: int = None, token_label: str = None, private_key_label: str = None, cert_file_path: str = None, cert_file_contents=None): """ Create options configured for use with mutual TLS in client mode, using a PKCS#11 library for private key operations. NOTE: This configuration only works on Unix devices. Keyword Args: pkcs11_lib (Pkcs11Lib): Use this PKCS#11 library user_pin (str): User PIN, for logging into the PKCS#11 token. Pass `None` to log into a token with a "protected authentication path". slot_id (Optional[int]): ID of slot containing PKCS#11 token. If not specified, the token will be chosen based on other criteria (such as token label). token_label (Optional[str]): Label of the PKCS#11 token to use. If not specified, the token will be chosen based on other criteria (such as slot ID). private_key_label (Optional[str]): Label of private key object on PKCS#11 token. If not specified, the key will be chosen based on other criteria (such as being the only available private key on the token). cert_file_path (Optional[str]): Use this X.509 certificate (path to file on disk). The certificate must be PEM-formatted. The certificate may be specified by other means instead (ex: `cert_file_contents`) cert_file_contents (Optional[Union[str, bytes, bytearray]]): Use this X.509 certificate (contents in memory). The certificate must be PEM-formatted. The certificate may be specified by other means instead (ex: `cert_file_path`) """ assert isinstance(pkcs11_lib, Pkcs11Lib) assert isinstance(user_pin, str) or user_pin is None assert isinstance(slot_id, int) or slot_id is None assert isinstance(token_label, str) or token_label is None assert isinstance(private_key_label, str) or private_key_label is None assert isinstance(cert_file_path, str) or cert_file_path is None # note: not validating cert_file_contents, because "bytes-like object" isn't a strict type opt = TlsContextOptions() opt._pkcs11_lib = pkcs11_lib opt._pkcs11_user_pin = user_pin opt._pkcs11_slot_id = slot_id opt._pkcs11_token_label = token_label opt._pkcs11_private_key_label = private_key_label opt._pkcs11_cert_file_path = cert_file_path opt._pkcs11_cert_file_contents = cert_file_contents return opt @staticmethod def create_client_with_mtls_pkcs12(pkcs12_filepath, pkcs12_password): """ Create options configured for use with mutual TLS in client mode. NOTE: This configuration only works on Apple devices. Args: pkcs12_filepath (str): Path to PKCS #12 file. The file is loaded from disk and stored internally. pkcs12_password (str): Password to PKCS #12 file. Returns: TlsContextOptions: """ assert isinstance(pkcs12_filepath, str) assert isinstance(pkcs12_password, str) opt = TlsContextOptions() opt.pkcs12_filepath = pkcs12_filepath opt.pkcs12_password = pkcs12_password return opt @staticmethod def create_client_with_mtls_windows_cert_store_path(cert_path): """ Create options configured for use with mutual TLS in client mode, using a certificate in a Windows certificate store. NOTE: This configuration only works on Windows devices. Args: cert_path (str): Path to certificate in a Windows certificate store. The path must use backslashes and end with the certificate's thumbprint. Example: ``CurrentUser\\MY\\A11F8A9B5DF5B98BA3508FBCA575D09570E0D2C6`` Returns: TlsContextOptions """ assert isinstance(cert_path, str) opt = TlsContextOptions() opt._windows_cert_store_path = cert_path return opt @staticmethod def create_server_from_path(cert_filepath, pk_filepath): """ Create options configured for use in server mode. Both files are treated as PKCS #7 PEM armored. They are loaded from disk and stored in buffers internally. Args: cert_filepath (str): Path to certificate file. pk_filepath (str): Path to private key file. Returns: TlsContextOptions: """ assert isinstance(cert_filepath, str) assert isinstance(pk_filepath, str) cert_buffer = _read_binary_file(cert_filepath) key_buffer = _read_binary_file(pk_filepath) return TlsContextOptions.create_server(cert_buffer, key_buffer) @staticmethod def create_server(cert_buffer, key_buffer): """ Create options configured for use in server mode. Both buffers are treated as PKCS #7 PEM armored. Args: cert_buffer (bytes): Certificate contents. key_buffer (bytes): Private key contents. Returns: TlsContextOptions: """ assert isinstance(cert_buffer, bytes) assert isinstance(key_buffer, bytes) opt = TlsContextOptions() opt.certificate_buffer = cert_buffer opt.private_key_buffer = key_buffer opt.verify_peer = False return opt @staticmethod def create_server_pkcs12(pkcs12_filepath, pkcs12_password): """ Create options configured for use in server mode. NOTE: This configuration only works on Apple devices. Args: pkcs12_filepath (str): Path to PKCS #12 file. pkcs12_password (str): Password to PKCS #12 file. Returns: TlsContextOptions: """ assert isinstance(pkcs12_filepath, str) assert isinstance(pkcs12_password, str) opt = TlsContextOptions() opt.pkcs12_filepath = pkcs12_filepath opt.pkcs12_password = pkcs12_password opt.verify_peer = False return opt def override_default_trust_store_from_path(self, ca_dirpath=None, ca_filepath=None): """Override default trust store. Args: ca_dirpath (Optional[str]): Path to directory containing trusted certificates, which will overrides the default trust store. Only supported on Unix. ca_filepath(Optional[str]): Path to file containing PEM armored chain of trusted CA certificates. """ assert isinstance(ca_dirpath, str) or ca_dirpath is None assert isinstance(ca_filepath, str) or ca_filepath is None if ca_filepath: ca_buffer = _read_binary_file(ca_filepath) self.override_default_trust_store(ca_buffer) self.ca_dirpath = ca_dirpath def override_default_trust_store(self, rootca_buffer): """Override default trust store. Args: rootca_buffer (bytes): PEM armored chain of trusted CA certificates. """ assert isinstance(rootca_buffer, bytes) self.ca_buffer = rootca_buffer class ClientTlsContext(NativeResource): """Client TLS context. A context is expensive, but can be used for the lifetime of the application by all outgoing connections that wish to use the same TLS configuration. Args: options (TlsContextOptions): Configuration options. """ __slots__ = () def __init__(self, options): assert isinstance(options, TlsContextOptions) super().__init__() self._binding = _awscrt.client_tls_ctx_new( options.min_tls_ver.value, options.cipher_pref.value, options.ca_dirpath, options.ca_buffer, _alpn_list_to_str(options.alpn_list), options.certificate_buffer, options.private_key_buffer, options.pkcs12_filepath, options.pkcs12_password, options.verify_peer, options._pkcs11_lib, options._pkcs11_user_pin, options._pkcs11_slot_id, options._pkcs11_token_label, options._pkcs11_private_key_label, options._pkcs11_cert_file_path, options._pkcs11_cert_file_contents, options._windows_cert_store_path, ) def new_connection_options(self): """Create a :class:`TlsConnectionOptions` that makes use of this TLS context. Returns: TlsConnectionOptions: """ return TlsConnectionOptions(self) class TlsConnectionOptions(NativeResource): """Connection-specific TLS options. Note that, while a TLS context is an expensive object, a :class:`TlsConnectionOptions` is cheap. Args: tls_ctx (ClientTlsContext): TLS context. A context can be shared by many connections. Attributes: tls_ctx (ClientTlsContext): TLS context. """ __slots__ = ('tls_ctx') def __init__(self, tls_ctx): assert isinstance(tls_ctx, ClientTlsContext) super().__init__() self.tls_ctx = tls_ctx self._binding = _awscrt.tls_connections_options_new_from_ctx(tls_ctx) def set_alpn_list(self, alpn_list): """Set names to use in Application Layer Protocol Negotiation (ALPN). This overrides any ALPN list on the TLS context, see :attr:`TlsContextOptions.alpn_list`. ALPN is not supported on all systems, see :meth:`is_alpn_available()`. Args: alpn_list (List[str]): List of protocol names. """ _awscrt.tls_connection_options_set_alpn_list(self, _alpn_list_to_str(alpn_list)) def set_server_name(self, server_name): """Set server name. Sets name for TLS Server Name Indication (SNI). Name is also used for x.509 validation. Args: server_name (str): Server name. """ _awscrt.tls_connection_options_set_server_name(self, server_name) def _alpn_list_to_str(alpn_list): """ Transform ['h2', 'http/1.1'] -> "h2;http/1.1" None is returned if list is None or empty """ if alpn_list: assert not isinstance(alpn_list, str) return ';'.join(alpn_list) return None def is_alpn_available(): """Returns True if Application Layer Protocol Negotiation (ALPN) is supported on this system.""" return _awscrt.is_alpn_available() class InputStream(NativeResource): """InputStream allows `awscrt` native code to read from Python binary I/O classes. Args: stream (io.IOBase): Python binary I/O stream to wrap. """ __slots__ = ('_stream') # TODO: Implement IOBase interface so Python can read from this class as well. def __init__(self, stream): # duck-type instead of checking inheritance from IOBase. # At the least, stream must have read() if not callable(getattr(stream, 'read', None)): raise TypeError('I/O stream type expected') assert not isinstance(stream, InputStream) super().__init__() self._stream = stream self._binding = _awscrt.input_stream_new(self) def _read_into_memoryview(self, m): # Read into memoryview m. # Return number of bytes read, or None if no data available. try: # prefer the most efficient read methods, if hasattr(self._stream, 'readinto1'): return self._stream.readinto1(m) if hasattr(self._stream, 'readinto'): return self._stream.readinto(m) if hasattr(self._stream, 'read1'): data = self._stream.read1(len(m)) else: data = self._stream.read(len(m)) n = len(data) m[:n] = data return n except BlockingIOError: return None def _seek(self, offset, whence): return self._stream.seek(offset, whence) @classmethod def wrap(cls, stream, allow_none=False): """ Given some stream type, returns an :class:`InputStream`. Args: stream (Union[io.IOBase, InputStream, None]): Binary I/O stream to wrap. allow_none (bool): Whether to allow `stream` to be None. If False (default), and `stream` is None, an exception is raised. Returns: Union[InputStream, None]: If `stream` is already an :class:`InputStream`, it is returned. Otherwise, an :class:`InputStream` which wraps the `stream` is returned. If `allow_none` is True, and `stream` is None, then None is returned. """ if stream is None and allow_none: return None if isinstance(stream, InputStream): return stream return cls(stream) class Pkcs11Lib(NativeResource): """ Handle to a loaded PKCS#11 library. For most use cases, a single instance of :class:`Pkcs11Lib` should be used for the lifetime of your application. Keyword Args: file (str): Path to PKCS#11 library. behavior (Optional[InitializeFinalizeBehavior]): Specifies how `C_Initialize()` and `C_Finalize()` will be called on the PKCS#11 library (default is :attr:`InitializeFinalizeBehavior.DEFAULT`) """ class InitializeFinalizeBehavior(IntEnum): """ An enumeration. Controls how `C_Initialize()` and `C_Finalize()` are called on the PKCS#11 library. """ DEFAULT = 0 """ Relaxed behavior that accommodates most use cases. `C_Initialize()` is called on creation, and "already-initialized" errors are ignored. `C_Finalize()` is never called, just in case another part of your application is still using the PKCS#11 library. """ OMIT = 1 """ Skip calling `C_Initialize()` and `C_Finalize()`. Use this if your application has already initialized the PKCS#11 library, and you do not want `C_Initialize()` called again. """ STRICT = 2 """ `C_Initialize()` is called on creation and `C_Finalize()` is called on cleanup. If `C_Initialize()` reports that's it's already initialized, this is treated as an error. Use this if you need perfect cleanup (ex: running valgrind with --leak-check). """ def __init__(self, *, file: str, behavior: InitializeFinalizeBehavior = None): super().__init__() if behavior is None: behavior = Pkcs11Lib.InitializeFinalizeBehavior.DEFAULT self._binding = _awscrt.pkcs11_lib_new(file, behavior) aws-crt-python-0.20.4+dfsg/awscrt/mqtt.py000066400000000000000000001077661456575232400203130ustar00rootroot00000000000000""" MQTT All network operations in `awscrt.mqtt` are asynchronous. """ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. import _awscrt from concurrent.futures import Future from enum import IntEnum from inspect import signature from awscrt import NativeResource import awscrt.exceptions from awscrt.http import HttpProxyOptions, HttpRequest from awscrt.io import ClientBootstrap, ClientTlsContext, SocketOptions from dataclasses import dataclass from awscrt.mqtt5 import Client as Mqtt5Client class QoS(IntEnum): """Quality of Service enumeration [MQTT-4.3] """ AT_MOST_ONCE = 0 """QoS 0 - At most once delivery The message is delivered according to the capabilities of the underlying network. No response is sent by the receiver and no retry is performed by the sender. The message arrives at the receiver either once or not at all. """ AT_LEAST_ONCE = 1 """QoS 1 - At least once delivery This quality of service ensures that the message arrives at the receiver at least once. """ EXACTLY_ONCE = 2 """QoS 2 - Exactly once delivery This is the highest quality of service, for use when neither loss nor duplication of messages are acceptable. There is an increased overhead associated with this quality of service. Note that, while this client supports QoS 2, the AWS IoT Core server does not support QoS 2 at time of writing (May 2020). """ def to_mqtt5(self): from awscrt.mqtt5 import QoS as Mqtt5QoS """Convert a Mqtt3 QoS to Mqtt5 QoS """ return Mqtt5QoS(self.value) def _try_qos(qos_value): """Return None if the value cannot be converted to Qos (ex: 0x80 subscribe failure)""" try: return QoS(qos_value) except Exception: return None class ConnectReturnCode(IntEnum): """Connect return code enumeration. [MQTT-3.2.2.3] """ ACCEPTED = 0 """Connection Accepted.""" UNACCEPTABLE_PROTOCOL_VERSION = 1 """Connection Refused, unacceptable protocol version. The Server does not support the level of the MQTT protocol requested by the Client. """ IDENTIFIER_REJECTED = 2 """Connection Refused, identifier rejected. The Client identifier is correct UTF-8 but not allowed by the Server. """ SERVER_UNAVAILABLE = 3 """Connection Refused, Server unavailable. The Network Connection has been made but the MQTT service is unavailable. """ BAD_USERNAME_OR_PASSWORD = 4 """Connection Refused, bad user name or password. The data in the user name or password is malformed. """ NOT_AUTHORIZED = 5 """Connection Refused, not authorized. The Client is not authorized to connect. """ class Will: """A Will message is published by the server if a client is lost unexpectedly. The Will message is stored on the server when a client connects. It is published if the client connection is lost without the server receiving a DISCONNECT packet. [MQTT-3.1.2-8] Args: topic (str): Topic to publish Will message on. qos (QoS): QoS used when publishing the Will message. payload (bytes): Content of Will message. retain (bool): Whether the Will message is to be retained when it is published. Attributes: topic (str): Topic to publish Will message on. qos (QoS): QoS used when publishing the Will message. payload (bytes): Content of Will message. retain (bool): Whether the Will message is to be retained when it is published. """ __slots__ = ('topic', 'qos', 'payload', 'retain') def __init__(self, topic, qos, payload, retain): self.topic = topic self.qos = qos self.payload = payload self.retain = retain @dataclass class OnConnectionSuccessData: """Dataclass containing data related to a on_connection_success Callback Args: return_code (ConnectReturnCode): Connect return. code received from the server. session_present (bool): True if the connection resumes an existing session. False if new session. Note that the server has forgotten all previous subscriptions if this is False. Subscriptions can be re-established via resubscribe_existing_topics() if the connection was a reconnection. """ return_code: ConnectReturnCode = None session_present: bool = False @dataclass class OnConnectionFailureData: """Dataclass containing data related to a on_connection_failure Callback Args: error (ConnectReturnCode): Error code with reason for connection failure """ error: awscrt.exceptions.AwsCrtError = None @dataclass class OnConnectionClosedData: """Dataclass containing data related to a on_connection_closed Callback. Currently unused. """ pass class Client(NativeResource): """MQTT client. Args: bootstrap (Optional [ClientBootstrap]): Client bootstrap to use when initiating new socket connections. If None is provided, the default singleton is used. tls_ctx (Optional[ClientTlsContext]): TLS context for secure socket connections. If None is provided, then an unencrypted connection is used. """ __slots__ = ('tls_ctx') def __init__(self, bootstrap=None, tls_ctx=None): assert isinstance(bootstrap, ClientBootstrap) or bootstrap is None assert tls_ctx is None or isinstance(tls_ctx, ClientTlsContext) super().__init__() self.tls_ctx = tls_ctx if not bootstrap: bootstrap = ClientBootstrap.get_or_create_static_default() self._binding = _awscrt.mqtt_client_new(bootstrap, tls_ctx) @dataclass class OperationStatisticsData: """Dataclass containing some simple statistics about the current state of the connection's queue of operations Args: incomplete_operation_count (int): total number of operations submitted to the connection that have not yet been completed. Unacked operations are a subset of this. incomplete_operation_size (int): total packet size of operations submitted to the connection that have not yet been completed. Unacked operations are a subset of this. unacked_operation_count (int): total number of operations that have been sent to the server and are waiting for a corresponding ACK before they can be completed. unacked_operation_size (int): total packet size of operations that have been sent to the server and are waiting for a corresponding ACK before they can be completed. """ incomplete_operation_count: int = 0 incomplete_operation_size: int = 0 unacked_operation_count: int = 0 unacked_operation_size: int = 0 class Connection(NativeResource): """MQTT client connection. Args: client (Client): MQTT client to spawn connection from. host_name (str): Server name to connect to. port (int): Server port to connect to. client_id (str): ID to place in CONNECT packet. Must be unique across all devices/clients. If an ID is already in use, the other client will be disconnected. clean_session (bool): Whether or not to start a clean session with each reconnect. If True, the server will forget all subscriptions with each reconnect. Set False to request that the server resume an existing session or start a new session that may be resumed after a connection loss. The `session_present` bool in the connection callback informs whether an existing session was successfully resumed. If an existing session is resumed, the server remembers previous subscriptions and sends messages (with QoS1 or higher) that were published while the client was offline. on_connection_interrupted: Optional callback invoked whenever the MQTT connection is lost. The MQTT client will automatically attempt to reconnect. The function should take the following arguments return nothing: * `connection` (:class:`Connection`): This MQTT Connection. * `error` (:class:`awscrt.exceptions.AwsCrtError`): Exception which caused connection loss. * `**kwargs` (dict): Forward-compatibility kwargs. on_connection_resumed: Optional callback invoked whenever the MQTT connection is automatically resumed. Function should take the following arguments and return nothing: * `connection` (:class:`Connection`): This MQTT Connection * `return_code` (:class:`ConnectReturnCode`): Connect return code received from the server. * `session_present` (bool): True if resuming existing session. False if new session. Note that the server has forgotten all previous subscriptions if this is False. Subscriptions can be re-established via resubscribe_existing_topics(). * `**kwargs` (dict): Forward-compatibility kwargs. on_connection_success: Optional callback invoked whenever the connection successfully connects. This callback is invoked for every successful connect and every successful reconnect. Function should take the following arguments and return nothing: * `connection` (:class:`Connection`): This MQTT Connection * `callback_data` (:class:`OnConnectionSuccessData`): The data returned from the connection success. on_connection_failure: Optional callback invoked whenever the connection fails to connect. This callback is invoked for every failed connect and every failed reconnect. Function should take the following arguments and return nothing: * `connection` (:class:`Connection`): This MQTT Connection * `callback_data` (:class:`OnConnectionFailureData`): The data returned from the connection failure. on_connection_closed: Optional callback invoked whenever the connection has been disconnected and shutdown successfully. Function should take the following arguments and return nothing: * `connection` (:class:`Connection`): This MQTT Connection * `callback_data` (:class:`OnConnectionClosedData`): The data returned from the connection close. reconnect_min_timeout_secs (int): Minimum time to wait between reconnect attempts. Must be <= `reconnect_max_timeout_secs`. Wait starts at min and doubles with each attempt until max is reached. reconnect_max_timeout_secs (int): Maximum time to wait between reconnect attempts. Must be >= `reconnect_min_timeout_secs`. Wait starts at min and doubles with each attempt until max is reached. keep_alive_secs (int): The keep alive value, in seconds, to send in CONNECT packet. A PING will automatically be sent at this interval. The server will assume the connection is lost if no PING is received after 1.5X this value. This duration must be longer than ping_timeout_ms. ping_timeout_ms (int): Milliseconds to wait for ping response before client assumes the connection is invalid and attempts to reconnect. This duration must be shorter than `keep_alive_secs`. protocol_operation_timeout_ms (int): Milliseconds to wait for the response to the operation requires response by protocol. Set to zero to disable timeout. Otherwise, the operation will fail if no response is received within this amount of time after the packet is written to the socket It applied to PUBLISH (QoS>0) and UNSUBSCRIBE now. will (Will): Will to send with CONNECT packet. The will is published by the server when its connection to the client is unexpectedly lost. username (str): Username to connect with. password (str): Password to connect with. socket_options (Optional[awscrt.io.SocketOptions]): Optional socket options. use_websocket (bool): If true, connect to MQTT over websockets. websocket_proxy_options (Optional[awscrt.http.HttpProxyOptions]): Optional proxy options for websocket connections. Deprecated, use `proxy_options` instead. websocket_handshake_transform: Optional function to transform websocket handshake request. If provided, function is called each time a websocket connection is attempted. The function may modify the HTTP request before it is sent to the server. See :class:`WebsocketHandshakeTransformArgs` for more info. Function should take the following arguments and return nothing: * `transform_args` (:class:`WebsocketHandshakeTransformArgs`): Contains HTTP request to be transformed. Function must call `transform_args.done()` when complete. * `**kwargs` (dict): Forward-compatibility kwargs. proxy_options (Optional[awscrt.http.HttpProxyOptions]): Optional proxy options for all connections. """ def __init__(self, client, host_name, port, client_id, clean_session=True, on_connection_interrupted=None, on_connection_resumed=None, reconnect_min_timeout_secs=5, reconnect_max_timeout_secs=60, keep_alive_secs=1200, ping_timeout_ms=3000, protocol_operation_timeout_ms=0, will=None, username=None, password=None, socket_options=None, use_websockets=False, websocket_proxy_options=None, websocket_handshake_transform=None, proxy_options=None, on_connection_success=None, on_connection_failure=None, on_connection_closed=None ): assert isinstance(client, Client) or isinstance(client, Mqtt5Client) assert callable(on_connection_interrupted) or on_connection_interrupted is None assert callable(on_connection_resumed) or on_connection_resumed is None assert isinstance(will, Will) or will is None assert isinstance(socket_options, SocketOptions) or socket_options is None assert isinstance(websocket_proxy_options, HttpProxyOptions) or websocket_proxy_options is None assert isinstance(proxy_options, HttpProxyOptions) or proxy_options is None assert callable(websocket_handshake_transform) or websocket_handshake_transform is None assert callable(on_connection_success) or on_connection_success is None assert callable(on_connection_failure) or on_connection_failure is None assert callable(on_connection_closed) or on_connection_closed is None if reconnect_min_timeout_secs > reconnect_max_timeout_secs: raise ValueError("'reconnect_min_timeout_secs' cannot exceed 'reconnect_max_timeout_secs'") if keep_alive_secs * 1000 <= ping_timeout_ms: raise ValueError("'keep_alive_secs' duration must be longer than 'ping_timeout_ms'") if proxy_options and websocket_proxy_options: raise ValueError("'websocket_proxy_options' has been deprecated in favor of 'proxy_options'. " "Both parameters may not be set.") super().__init__() # init-only self.client = client self._client_version = 5 if isinstance(client, Mqtt5Client) else 3 self._on_connection_interrupted_cb = on_connection_interrupted self._on_connection_resumed_cb = on_connection_resumed self._use_websockets = use_websockets self._ws_handshake_transform_cb = websocket_handshake_transform self._on_connection_success_cb = on_connection_success self._on_connection_failure_cb = on_connection_failure self._on_connection_closed_cb = on_connection_closed # may be changed at runtime, take effect the the next time connect/reconnect occurs self.client_id = client_id self.host_name = host_name self.port = port self.clean_session = clean_session self.reconnect_min_timeout_secs = reconnect_min_timeout_secs self.reconnect_max_timeout_secs = reconnect_max_timeout_secs self.keep_alive_secs = keep_alive_secs self.ping_timeout_ms = ping_timeout_ms self.protocol_operation_timeout_ms = protocol_operation_timeout_ms self.will = will self.username = username self.password = password self.socket_options = socket_options if socket_options else SocketOptions() self.proxy_options = proxy_options if proxy_options else websocket_proxy_options self._binding = _awscrt.mqtt_client_connection_new( self, client, use_websockets, self._client_version ) def _check_uses_old_message_callback_signature(self, callback): # The callback used to have fewer args. Passing only those args, if it # only has two args and no forward-compatibility to cover case where # user function failed to take forward-compatibility **kwargs. callback_sig = signature(callback) try: # try new signature callback_sig.bind(topic='topic', payload='payload', dup=True, qos=QoS(1), retain=True) return False except TypeError: # try old signature callback_sig.bind(topic='topic', payload='payload') return True def _on_connection_interrupted(self, error_code): if self._on_connection_interrupted_cb: self._on_connection_interrupted_cb(connection=self, error=awscrt.exceptions.from_code(error_code)) def _on_connection_resumed(self, return_code, session_present): if self._on_connection_resumed_cb: self._on_connection_resumed_cb( connection=self, return_code=ConnectReturnCode(return_code), session_present=session_present) def _ws_handshake_transform(self, http_request_binding, http_headers_binding, native_userdata): if self._ws_handshake_transform_cb is None: _awscrt.mqtt_ws_handshake_transform_complete(None, native_userdata) return def _on_complete(f): _awscrt.mqtt_ws_handshake_transform_complete(f.exception(), native_userdata) future = Future() future.add_done_callback(_on_complete) http_request = HttpRequest._from_bindings(http_request_binding, http_headers_binding) transform_args = WebsocketHandshakeTransformArgs(self, http_request, future) try: self._ws_handshake_transform_cb(transform_args=transform_args) except Exception as e: # Call set_done() if user failed to do so before uncaught exception was raised, # there's a chance the callback wasn't callable and user has no idea we tried to hand them the baton. if not future.done(): transform_args.set_done(e) def _on_connection_closed(self): if self: if self._on_connection_closed_cb: data = OnConnectionClosedData() self._on_connection_closed_cb(connection=self, callback_data=data) def _on_connection_success(self, return_code, session_present): if self: if self._on_connection_success_cb: data = OnConnectionSuccessData( return_code=ConnectReturnCode(return_code), session_present=session_present) self._on_connection_success_cb(connection=self, callback_data=data) def _on_connection_failure(self, error_code): if self: if self._on_connection_failure_cb: data = OnConnectionFailureData(error=awscrt.exceptions.from_code(error_code)) self._on_connection_failure_cb(connection=self, callback_data=data) def connect(self): """Open the actual connection to the server (async). Returns: concurrent.futures.Future: Future which completes when connection succeeds or fails. If connection fails, Future will contain an exception. If connection succeeds, Future will contain a dict with the following members: * ['session_present'] (bool): is True if resuming existing session and False if new session. """ future = Future() def on_connect(error_code, return_code, session_present): if return_code: future.set_exception(Exception(ConnectReturnCode(return_code))) elif error_code: future.set_exception(awscrt.exceptions.from_code(error_code)) else: future.set_result(dict(session_present=session_present)) try: _awscrt.mqtt_client_connection_connect( self._binding, self.client_id, self.host_name, self.port, self.socket_options, self.client.tls_ctx, self.reconnect_min_timeout_secs, self.reconnect_max_timeout_secs, self.keep_alive_secs, self.ping_timeout_ms, self.protocol_operation_timeout_ms, self.will, self.username, self.password, self.clean_session, on_connect, self.proxy_options ) except Exception as e: future.set_exception(e) return future def reconnect(self): """DEPRECATED. awscrt.mqtt.ClientConnection automatically reconnects. To cease reconnect attempts, call disconnect(). To resume the connection, call connect(). """ future = Future() def on_connect(error_code, return_code, session_present): if return_code: future.set_exception(Exception(ConnectReturnCode(return_code))) elif error_code: future.set_exception(awscrt.exceptions.from_code(error_code)) else: future.set_result(dict(session_present=session_present)) try: _awscrt.mqtt_client_connection_reconnect(self._binding, on_connect) except Exception as e: future.set_exception(e) return future def disconnect(self): """Close the connection (async). Returns: concurrent.futures.Future: Future which completes when the connection is closed. The future will contain an empty dict. """ future = Future() def on_disconnect(): future.set_result(dict()) try: _awscrt.mqtt_client_connection_disconnect(self._binding, on_disconnect) except Exception as e: future.set_exception(e) return future def subscribe(self, topic, qos, callback=None): """Subscribe to a topic filter (async). The client sends a SUBSCRIBE packet and the server responds with a SUBACK. subscribe() may be called while the device is offline, though the async operation cannot complete successfully until the connection resumes. Once subscribed, `callback` is invoked each time a message matching the `topic` is received. It is possible for such messages to arrive before the SUBACK is received. Args: topic (str): Subscribe to this topic filter, which may include wildcards. qos (QoS): Maximum requested QoS that server may use when sending messages to the client. The server may grant a lower QoS in the SUBACK (see returned Future) callback: Optional callback invoked when message received. Function should take the following arguments and return nothing: * `topic` (str): Topic receiving message. * `payload` (bytes): Payload of message. * `dup` (bool): DUP flag. If True, this might be re-delivery of an earlier attempt to send the message. * `qos` (:class:`QoS`): Quality of Service used to deliver the message. * `retain` (bool): Retain flag. If True, the message was sent as a result of a new subscription being made by the client. * `**kwargs` (dict): Forward-compatibility kwargs. Returns: Tuple[concurrent.futures.Future, int]: Tuple containing a Future and the ID of the SUBSCRIBE packet. The Future completes when a SUBACK is received from the server. If successful, the Future will contain a dict with the following members: * ['packet_id'] (int): ID of the SUBSCRIBE packet being acknowledged. * ['topic'] (str): Topic filter of the SUBSCRIBE packet being acknowledged. * ['qos'] (:class:`QoS`): Maximum QoS that was granted by the server. This may be lower than the requested QoS. If unsuccessful, the Future contains an exception. The exception will be a :class:`SubscribeError` if a SUBACK was received in which the server rejected the subscription. Other exception types indicate other errors with the operation. """ future = Future() packet_id = 0 if callback: uses_old_signature = self._check_uses_old_message_callback_signature(callback) def callback_wrapper(topic, payload, dup, qos, retain): if uses_old_signature: callback(topic=topic, payload=payload) else: callback(topic=topic, payload=payload, dup=dup, qos=QoS(qos), retain=retain) else: callback_wrapper = None def suback(packet_id, topic, qos, error_code): if error_code: future.set_exception(awscrt.exceptions.from_code(error_code)) else: qos = _try_qos(qos) if qos is None: future.set_exception(SubscribeError(topic)) else: future.set_result(dict( packet_id=packet_id, topic=topic, qos=qos, )) try: assert callable(callback) or callback is None from awscrt.mqtt5 import QoS as Mqtt5QoS if (isinstance(qos, Mqtt5QoS)): qos = qos.to_mqtt3() assert isinstance(qos, QoS) packet_id = _awscrt.mqtt_client_connection_subscribe( self._binding, topic, qos.value, callback_wrapper, suback) except Exception as e: future.set_exception(e) return future, packet_id def on_message(self, callback): """Set callback to be invoked when ANY message is received. callback: Callback to invoke when message received, or None to disable. Function should take the following arguments and return nothing: * `topic` (str): Topic receiving message. * `payload` (bytes): Payload of message. * `dup` (bool): DUP flag. If True, this might be re-delivery of an earlier attempt to send the message. * `qos` (:class:`QoS`): Quality of Service used to deliver the message. * `retain` (bool): Retain flag. If True, the message was sent as a result of a new subscription being made by the client. * `**kwargs` (dict): Forward-compatibility kwargs. """ assert callable(callback) or callback is None if callback: uses_old_signature = self._check_uses_old_message_callback_signature(callback) def callback_wrapper(topic, payload, dup, qos, retain): if uses_old_signature: callback(topic=topic, payload=payload) else: callback(topic=topic, payload=payload, dup=dup, qos=QoS(qos), retain=retain) else: callback_wrapper = None _awscrt.mqtt_client_connection_on_message(self._binding, callback_wrapper) def unsubscribe(self, topic): """Unsubscribe from a topic filter (async). The client sends an UNSUBSCRIBE packet, and the server responds with an UNSUBACK. Args: topic (str): Unsubscribe from this topic filter. Returns: Tuple[concurrent.futures.Future, int]: Tuple containing a Future and the ID of the UNSUBSCRIBE packet. The Future completes when an UNSUBACK is received from the server. If successful, the Future will contain a dict with the following members: * ['packet_id'] (int): ID of the UNSUBSCRIBE packet being acknowledged. """ future = Future() packet_id = 0 def unsuback(packet_id, error_code): if error_code != 0: future.set_exception(awscrt.exceptions.from_code(error_code)) else: future.set_result(dict(packet_id=packet_id)) try: packet_id = _awscrt.mqtt_client_connection_unsubscribe(self._binding, topic, unsuback) except Exception as e: future.set_exception(e) return future, packet_id def resubscribe_existing_topics(self): """ Subscribe again to all current topics. This is to help when resuming a connection with a clean session. **Important**: Currently the resubscribe function does not take the AWS IoT Core maximum subscriptions per subscribe request quota into account. If the client has more subscriptions than the maximum, resubscribing must be done manually using the `subscribe()` function for each desired topic filter. The client will be disconnected by AWS IoT Core if the resubscribe exceeds the subscriptions per subscribe request quota. The AWS IoT Core maximum subscriptions per subscribe request quota is listed at the following URL: https://docs.aws.amazon.com/general/latest/gr/iot-core.html#genref_max_subscriptions_per_subscribe_request Returns: Tuple[concurrent.futures.Future, int]: Tuple containing a Future and the ID of the SUBSCRIBE packet. The Future completes when a SUBACK is received from the server. If successful, the Future will contain a dict with the following members: * ['packet_id']: ID of the SUBSCRIBE packet being acknowledged, or None if there were no topics to resubscribe to. * ['topics']: A list of (topic, qos) tuples, where qos will be None if the topic failed to resubscribe. If there were no topics to resubscribe to, then the list will be empty. """ packet_id = 0 future = Future() def on_suback(packet_id, topic_qos_tuples, error_code): if error_code: future.set_exception(awscrt.exceptions.from_code(error_code)) else: future.set_result(dict( packet_id=packet_id, topics=[(topic, _try_qos(qos)) for (topic, qos) in topic_qos_tuples], )) try: packet_id = _awscrt.mqtt_client_connection_resubscribe_existing_topics(self._binding, on_suback) if packet_id is None: # There were no topics to resubscribe to. future.set_result(dict(packet_id=None, topics=[])) except Exception as e: future.set_exception(e) return future, packet_id def publish(self, topic, payload, qos, retain=False): """Publish message (async). If the device is offline, the PUBLISH packet will be sent once the connection resumes. Args: topic (str): Topic name. payload (Union[str, bytes, bytearray]): Contents of message. qos (QoS): Quality of Service for delivering this message. retain (bool): If True, the server will store the message and its QoS so that it can be delivered to future subscribers whose subscriptions match its topic name. Returns: Tuple[concurrent.futures.Future, int]: Tuple containing a Future and the ID of the PUBLISH packet. The QoS determines when the Future completes: * For QoS 0, completes as soon as the packet is sent. * For QoS 1, completes when PUBACK is received. * For QoS 2, completes when PUBCOMP is received. If successful, the Future will contain a dict with the following members: * ['packet_id'] (int): ID of the PUBLISH packet that is complete. """ future = Future() packet_id = 0 def puback(packet_id, error_code): if error_code != 0: future.set_exception(awscrt.exceptions.from_code(error_code)) else: future.set_result(dict(packet_id=packet_id)) try: from awscrt.mqtt5 import QoS as Mqtt5QoS if (isinstance(qos, Mqtt5QoS)): qos = qos.to_mqtt3() assert isinstance(qos, QoS) packet_id = _awscrt.mqtt_client_connection_publish(self._binding, topic, payload, qos.value, retain, puback) except Exception as e: future.set_exception(e) return future, packet_id def get_stats(self): """Queries the connection's internal statistics for incomplete operations. Returns: The (:class:`OperationStatisticsData`) containing the statistics """ result = _awscrt.mqtt_client_connection_get_stats(self._binding) return OperationStatisticsData(result[0], result[1], result[2], result[3]) class WebsocketHandshakeTransformArgs: """ Argument to a "websocket_handshake_transform" function. A websocket_handshake_transform function has signature: ``fn(transform_args: WebsocketHandshakeTransformArgs, **kwargs) -> None`` The function implementer may modify `transform_args.http_request` as desired. They MUST call `transform_args.set_done()` when complete, passing an exception if something went wrong. Failure to call `set_done()` will hang the application. The implementer may do asynchronous work before calling `transform_args.set_done()`, they are not required to call `set_done()` within the scope of the transform function. An example of async work would be to fetch credentials from another service, sign the request headers, and finally call `set_done()` to mark the transform complete. The default websocket handshake request uses path "/mqtt". All required headers are present, plus the optional header "Sec-WebSocket-Protocol: mqtt". Args: mqtt_connection (Connection): Connection this handshake is for. http_request (awscrt.http.HttpRequest): HTTP request for this handshake. done_future (concurrent.futures.Future): Future to complete when the :meth:`set_done()` is called. It will contain None if successful, or an exception will be set. Attributes: mqtt_connection (Connection): Connection this handshake is for. http_request (awscrt.http.HttpRequest): HTTP request for this handshake. """ def __init__(self, mqtt_connection, http_request, done_future): self.mqtt_connection = mqtt_connection self.http_request = http_request self._done_future = done_future def set_done(self, exception=None): """ Mark the transformation complete. If exception is passed in, the handshake is canceled. """ if exception is None: self._done_future.set_result(None) else: self._done_future.set_exception(exception) class SubscribeError(Exception): """ Subscription rejected by server. """ pass aws-crt-python-0.20.4+dfsg/awscrt/mqtt5.py000066400000000000000000002641651456575232400203750ustar00rootroot00000000000000""" MQTT5 """ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. from typing import Any, Callable import _awscrt from concurrent.futures import Future from enum import IntEnum from awscrt import NativeResource, exceptions from awscrt.http import HttpProxyOptions, HttpRequest from awscrt.io import ClientBootstrap, SocketOptions, ClientTlsContext from dataclasses import dataclass from collections.abc import Sequence from inspect import signature class QoS(IntEnum): """MQTT message delivery quality of service. Enum values match `MQTT5 spec `__ encoding values. """ AT_MOST_ONCE = 0 """ The message is delivered according to the capabilities of the underlying network. No response is sent by the receiver and no retry is performed by the sender. The message arrives at the receiver either once or not at all. """ AT_LEAST_ONCE = 1 """ A level of service that ensures that the message arrives at the receiver at least once. """ EXACTLY_ONCE = 2 """ A level of service that ensures that the message arrives at the receiver exactly once. Note that this client does not currently support QoS 2 as of (August 2022) """ def to_mqtt3(self): from awscrt.mqtt import QoS as Mqtt3QoS """Convert a Mqtt5 QoS to Mqtt3 """ return Mqtt3QoS(self.value) def _try_qos(value): try: return QoS(value) except Exception: return None class ConnectReasonCode(IntEnum): """Server return code for connect attempts. Enum values match `MQTT5 spec `__ encoding values. """ SUCCESS = 0 """ Returned when the connection is accepted. """ UNSPECIFIED_ERROR = 128 """ Returned when the server has a failure but does not want to specify a reason or none of the other reason codes apply. """ MALFORMED_PACKET = 129 """ Returned when data in the CONNECT packet could not be correctly parsed by the server. """ PROTOCOL_ERROR = 130 """ Returned when data in the CONNECT packet does not conform to the MQTT5 specification requirements. """ IMPLEMENTATION_SPECIFIC_ERROR = 131 """ Returned when the CONNECT packet is valid but was not accepted by the server. """ UNSUPPORTED_PROTOCOL_VERSION = 132 """ Returned when the server does not support MQTT5 protocol version specified in the connection. """ CLIENT_IDENTIFIER_NOT_VALID = 133 """ Returned when the client identifier in the CONNECT packet is a valid string but not one that is allowed on the server. """ BAD_USERNAME_OR_PASSWORD = 134 """ Returned when the server does not accept the username and/or password specified by the client in the connection packet. """ NOT_AUTHORIZED = 135 """ Returned when the client is not authorized to connect to the server. """ SERVER_UNAVAILABLE = 136 """ Returned when the MQTT5 server is not available. """ SERVER_BUSY = 137 """ Returned when the server is too busy to make a connection. It is recommended that the client try again later. """ BANNED = 138 """ Returned when the client has been banned by the server. """ BAD_AUTHENTICATION_METHOD = 140 """ Returned when the authentication method used in the connection is either not supported on the server or it does not match the authentication method currently in use in the CONNECT packet. """ TOPIC_NAME_INVALID = 144 """ Returned when the Will topic name sent in the CONNECT packet is correctly formed, but is not accepted by the server. """ PACKET_TOO_LARGE = 149 """ Returned when the CONNECT packet exceeded the maximum permissible size on the server. """ QUOTA_EXCEEDED = 151 """ Returned when the quota limits set on the server have been met and/or exceeded. """ PAYLOAD_FORMAT_INVALID = 153 """ Returned when the Will payload in the CONNECT packet does not match the specified payload format indicator. """ RETAIN_NOT_SUPPORTED = 154 """ Returned when the server does not retain messages but the CONNECT packet on the client had Will retain enabled. """ QOS_NOT_SUPPORTED = 155 """ Returned when the server does not support the QOS setting set in the Will QOS in the CONNECT packet. """ USE_ANOTHER_SERVER = 156 """ Returned when the server is telling the client to temporarily use another server instead of the one they are trying to connect to. """ SERVER_MOVED = 157 """ Returned when the server is telling the client to permanently use another server instead of the one they are trying to connect to. """ CONNECTION_RATE_EXCEEDED = 159 """ Returned when the server connection rate limit has been exceeded. """ def _try_connect_reason_code(value): try: return ConnectReasonCode(value) except Exception: return None class DisconnectReasonCode(IntEnum): """Reason code inside DISCONNECT packets. Helps determine why a connection was terminated. Enum values match `MQTT5 spec `__ encoding values. """ NORMAL_DISCONNECTION = 0 """ Returned when the remote endpoint wishes to disconnect normally. Will not trigger the publish of a Will message if a Will message was configured on the connection. May be sent by the client or server. """ DISCONNECT_WITH_WILL_MESSAGE = 4 """ Returns when the client wants to disconnect but requires that the server publish the Will message configured on the connection. May only be sent by the client. """ UNSPECIFIED_ERROR = 128 """ Returned when the connection was closed but the sender does not want to specify a reason or none of the other reason codes apply. May be sent by the client or the server. """ MALFORMED_PACKET = 129 """ Indicates the remote endpoint received a packet that does not conform to the MQTT specification. May be sent by the client or the server. """ PROTOCOL_ERROR = 130 """ Returned when an unexpected or out-of-order packet was received by the remote endpoint. May be sent by the client or the server. """ IMPLEMENTATION_SPECIFIC_ERROR = 131 """ Returned when a valid packet was received by the remote endpoint, but could not be processed by the current implementation. May be sent by the client or the server. """ NOT_AUTHORIZED = 135 """ Returned when the remote endpoint received a packet that represented an operation that was not authorized within the current connection. May only be sent by the server. """ SERVER_BUSY = 137 """ Returned when the server is busy and cannot continue processing packets from the client. May only be sent by the server. """ SERVER_SHUTTING_DOWN = 139 """ Returned when the server is shutting down. May only be sent by the server. """ KEEP_ALIVE_TIMEOUT = 141 """ Returned when the server closes the connection because no packet from the client has been received in 1.5 times the KeepAlive time set when the connection was established. May only be sent by the server. """ SESSION_TAKEN_OVER = 142 """ Returned when the server has established another connection with the same client ID as a client's current connection, causing the current client to become disconnected. May only be sent by the server. """ TOPIC_FILTER_INVALID = 143 """ Returned when the topic filter name is correctly formed but not accepted by the server. May only be sent by the server. """ TOPIC_NAME_INVALID = 144 """ Returned when topic name is correctly formed, but is not accepted. May be sent by the client or the server. """ RECEIVE_MAXIMUM_EXCEEDED = 147 """ Returned when the remote endpoint reached a state where there were more in-progress QoS1+ publishes then the limit it established for itself when the connection was opened. May be sent by the client or the server. """ TOPIC_ALIAS_INVALID = 148 """ Returned when the remote endpoint receives a PUBLISH packet that contained a topic alias greater than the maximum topic alias limit that it established for itself when the connection was opened. May be sent by the client or the server. """ PACKET_TOO_LARGE = 149 """ Returned when the remote endpoint received a packet whose size was greater than the maximum packet size limit it established for itself when the connection was opened. May be sent by the client or the server. """ MESSAGE_RATE_TOO_HIGH = 150 """ Returned when the remote endpoint's incoming data rate was too high. May be sent by the client or the server. """ QUOTA_EXCEEDED = 151 """ Returned when an internal quota of the remote endpoint was exceeded. May be sent by the client or the server. """ ADMINISTRATIVE_ACTION = 152 """ Returned when the connection was closed due to an administrative action. May be sent by the client or the server. """ PAYLOAD_FORMAT_INVALID = 153 """ Returned when the remote endpoint received a packet where payload format did not match the format specified by the payload format indicator. May be sent by the client or the server. """ RETAIN_NOT_SUPPORTED = 154 """ Returned when the server does not support retained messages. May only be sent by the server. """ QOS_NOT_SUPPORTED = 155 """ Returned when the client sends a QoS that is greater than the maximum QoS established when the connection was opened. May only be sent by the server. """ USE_ANOTHER_SERVER = 156 """ Returned by the server to tell the client to temporarily use a different server. May only be sent by the server. """ SERVER_MOVED = 157 """ Returned by the server to tell the client to permanently use a different server. May only be sent by the server. """ SHARED_SUBSCRIPTIONS_NOT_SUPPORTED = 158 """ Returned by the server to tell the client that shared subscriptions are not supported on the server. May only be sent by the server. """ CONNECTION_RATE_EXCEEDED = 159 """ Returned when the server disconnects the client due to the connection rate being too high. May only be sent by the server. """ MAXIMUM_CONNECT_TIME = 160 """ Returned by the server when the maximum connection time authorized for the connection was exceeded. May only be sent by the server. """ SUBSCRIPTION_IDENTIFIERS_NOT_SUPPORTED = 161 """ Returned by the server when it received a SUBSCRIBE packet with a subscription identifier, but the server does not support subscription identifiers. May only be sent by the server. """ WILDCARD_SUBSCRIPTIONS_NOT_SUPPORTED = 162 """ Returned by the server when it received a SUBSCRIBE packet with a wildcard topic filter, but the server does not support wildcard topic filters. May only be sent by the server. """ def _try_disconnect_reason_code(value): try: return DisconnectReasonCode(value) except Exception: return None class PubackReasonCode(IntEnum): """Reason code inside PUBACK packets that indicates the result of the associated PUBLISH request. Enum values match `MQTT5 spec `__ encoding values. """ SUCCESS = 0 """ Returned when the (QoS 1) publish was accepted by the recipient. May be sent by the client or the server. """ NO_MATCHING_SUBSCRIBERS = 16 """ Returned when the (QoS 1) publish was accepted but there were no matching subscribers. May only be sent by the server. """ UNSPECIFIED_ERROR = 128 """ Returned when the (QoS 1) publish was not accepted and the receiver does not want to specify a reason or none of the other reason codes apply. May be sent by the client or the server. """ IMPLEMENTATION_SPECIFIC_ERROR = 131 """ Returned when the (QoS 1) publish was valid but the receiver was not willing to accept it. May be sent by the client or the server. """ NOT_AUTHORIZED = 135 """ Returned when the (QoS 1) publish was not authorized by the receiver. May be sent by the client or the server. """ TOPIC_NAME_INVALID = 144 """ Returned when the topic name was valid but the receiver was not willing to accept it. May be sent by the client or the server. """ PACKET_IDENTIFIER_IN_USE = 145 """ Returned when the packet identifier used in the associated PUBLISH was already in use. This can indicate a mismatch in the session state between client and server. May be sent by the client or the server. """ QUOTA_EXCEEDED = 151 """ Returned when the associated PUBLISH failed because an internal quota on the recipient was exceeded. May be sent by the client or the server. """ PAYLOAD_FORMAT_INVALID = 153 """ Returned when the PUBLISH packet's payload format did not match its payload format indicator property. May be sent by the client or the server. """ def _try_puback_reason_code(value): try: return PubackReasonCode(value) except Exception: return None class SubackReasonCode(IntEnum): """Reason code inside SUBACK packet payloads. Enum values match `MQTT5 spec `__ encoding values. This will only be sent by the server and not the client. """ GRANTED_QOS_0 = 0 """ Returned when the subscription was accepted and the maximum QoS sent will be QoS 0. """ GRANTED_QOS_1 = 1 """ Returned when the subscription was accepted and the maximum QoS sent will be QoS 1. """ GRANTED_QOS_2 = 2 """ Returned when the subscription was accepted and the maximum QoS sent will be QoS 2. """ UNSPECIFIED_ERROR = 128 """ Returned when the connection was closed but the sender does not want to specify a reason or none of the other reason codes apply. """ IMPLEMENTATION_SPECIFIC_ERROR = 131 """ Returned when the subscription was valid but the server did not accept it. """ NOT_AUTHORIZED = 135 """ Returned when the client was not authorized to make the subscription on the server. """ TOPIC_FILTER_INVALID = 143 """ Returned when the subscription topic filter was correctly formed but not allowed for the client. """ PACKET_IDENTIFIER_IN_USE = 145 """ Returned when the packet identifier was already in use on the server. """ QUOTA_EXCEEDED = 151 """ Returned when a subscribe-related quota set on the server was exceeded. """ SHARED_SUBSCRIPTIONS_NOT_SUPPORTED = 158 """ Returned when the subscription's topic filter was a shared subscription and the server does not support shared subscriptions. """ SUBSCRIPTION_IDENTIFIERS_NOT_SUPPORTED = 161 """ Returned when the SUBSCRIBE packet contained a subscription identifier and the server does not support subscription identifiers. """ WILDCARD_SUBSCRIPTIONS_NOT_SUPPORTED = 162 """ Returned when the subscription's topic filter contains a wildcard but the server does not support wildcard subscriptions. """ def _try_suback_reason_code(value): try: return SubackReasonCode(value) except Exception: return None class UnsubackReasonCode(IntEnum): """Reason codes inside UNSUBACK packet payloads that specify the results for each topic filter in the associated UNSUBSCRIBE packet. Enum values match `MQTT5 spec `__ encoding values. """ SUCCESS = 0 """ Returned when the unsubscribe was successful and the client is no longer subscribed to the topic filter on the server. """ NO_SUBSCRIPTION_EXISTED = 17 """ Returned when the topic filter did not match one of the client's existing topic filters on the server. """ UNSPECIFIED_ERROR = 128 """ Returned when the unsubscribe of the topic filter was not accepted and the server does not want to specify a reason or none of the other reason codes apply. """ IMPLEMENTATION_SPECIFIC_ERROR = 131 """ Returned when the topic filter was valid but the server does not accept an unsubscribe for it. """ NOT_AUTHORIZED = 135 """ Returned when the client was not authorized to unsubscribe from that topic filter on the server. """ TOPIC_NAME_INVALID = 144 """ Returned when the topic filter was correctly formed but is not allowed for the client on the server. """ PACKET_IDENTIFIER_IN_USE = 145 """ Returned when the packet identifier was already in use on the server. """ def _try_unsuback_reason_code(value): try: return UnsubackReasonCode(value) except Exception: return None class ClientSessionBehaviorType(IntEnum): """Controls how the mqtt client should behave with respect to MQTT sessions. """ DEFAULT = 0 """ Default client session behavior. Maps to CLEAN. """ CLEAN = 1 """ Always ask for a clean session when connecting """ REJOIN_POST_SUCCESS = 2 """ Always attempt to rejoin an existing session after an initial connection success. Session rejoin requires an appropriate non-zero session expiry interval in the client's CONNECT options. """ REJOIN_ALWAYS = 3 """ Always attempt to rejoin an existing session. Since the client does not support durable session persistence, this option is not guaranteed to be spec compliant because any unacknowledged qos1 publishes (which are part of the client session state) will not be present on the initial connection. Until we support durable session resumption, this option is technically spec-breaking, but useful. Always rejoin requires an appropriate non-zero session expiry interval in the client's CONNECT options. """ class PayloadFormatIndicator(IntEnum): """Optional property describing a PUBLISH payload's format. Enum values match `MQTT5 spec `__ encoding values. """ AWS_MQTT5_PFI_BYTES = 0 """ The payload is arbitrary binary data """ AWS_MQTT5_PFI_UTF8 = 1 """ The payload is a well-formed utf-8 string value. """ def _try_payload_format_indicator(value): try: return PayloadFormatIndicator(value) except Exception: return None class RetainHandlingType(IntEnum): """Configures how retained messages should be handled when subscribing with a topic filter that matches topics with associated retained messages. Enum values match `MQTT5 spec `_ encoding values. """ SEND_ON_SUBSCRIBE = 0 """ The server should always send all retained messages on topics that match a subscription's filter. """ SEND_ON_SUBSCRIBE_IF_NEW = 1 """ The server should send retained messages on topics that match the subscription's filter, but only for the first matching subscription, per session. """ DONT_SEND = 2 """ Subscriptions must not trigger any retained message publishes from the server. """ class RetainAndHandlingType(IntEnum): """DEPRECATED. `RetainAndHandlingType` is deprecated, please use `RetainHandlingType` """ SEND_ON_SUBSCRIBE = 0 """ The server should always send all retained messages on topics that match a subscription's filter. """ SEND_ON_SUBSCRIBE_IF_NEW = 1 """ The server should send retained messages on topics that match the subscription's filter, but only for the first matching subscription, per session. """ DONT_SEND = 2 """ Subscriptions must not trigger any retained message publishes from the server. """ class ExtendedValidationAndFlowControlOptions(IntEnum): """Additional controls for client behavior with respect to operation validation and flow control; these checks go beyond the MQTT5 spec to respect limits of specific MQTT brokers. """ NONE = 0 """ Do not do any additional validation or flow control """ AWS_IOT_CORE_DEFAULTS = 1 """ Apply additional client-side validation and operational flow control that respects the default AWS IoT Core limits. Currently applies the following additional validation: * No more than 8 subscriptions per SUBSCRIBE packet * Topics and topic filters have a maximum of 7 slashes (8 segments), not counting any AWS rules prefix * Topics must be 256 bytes or less in length * Client id must be 128 or less bytes in length Also applies the following flow control: * Outbound throughput throttled to 512KB/s * Outbound publish TPS throttled to 100 """ class ClientOperationQueueBehaviorType(IntEnum): """Controls how disconnects affect the queued and in-progress operations tracked by the client. Also controls how operations are handled while the client is not connected. In particular, if the client is not connected, then any operation that would be failed on disconnect (according to these rules) will be rejected. """ DEFAULT = 0 """ Default client operation queue behavior. Maps to FAIL_QOS0_PUBLISH_ON_DISCONNECT. """ FAIL_NON_QOS1_PUBLISH_ON_DISCONNECT = 1 """ Re-queues QoS 1+ publishes on disconnect; un-acked publishes go to the front while unprocessed publishes stay in place. All other operations (QoS 0 publishes, subscribe, unsubscribe) are failed. """ FAIL_QOS0_PUBLISH_ON_DISCONNECT = 2 """ QoS 0 publishes that are not complete at the time of disconnection are failed. Un-acked QoS 1+ publishes are re-queued at the head of the line for immediate retransmission on a session resumption. All other operations are requeued in original order behind any retransmissions. """ FAIL_ALL_ON_DISCONNECT = 3 """ All operations that are not complete at the time of disconnection are failed, except operations that the MQTT5 spec requires to be retransmitted (un-acked QoS1+ publishes). """ class ExponentialBackoffJitterMode(IntEnum): """Controls how the reconnect delay is modified in order to smooth out the distribution of reconnection attempt timepoints for a large set of reconnecting clients. See `Exponential Backoff and Jitter `_ """ DEFAULT = 0 """ Maps to Full """ NONE = 1 """ Do not perform any randomization on the reconnect delay """ FULL = 2 """ Fully random between no delay and the current exponential backoff value. """ DECORRELATED = 3 """ Backoff is taken randomly from the interval between the base backoff interval and a scaling (greater than 1) of the current backoff value """ @dataclass class UserProperty: """MQTT5 User Property Args: name (str): Property name value (str): Property value """ name: str = None value: str = None def _init_user_properties(user_properties_tuples): if user_properties_tuples is None: return None return [UserProperty(name=name, value=value) for (name, value) in user_properties_tuples] class OutboundTopicAliasBehaviorType(IntEnum): """An enumeration that controls how the client applies topic aliasing to outbound publish packets. Topic alias behavior is described in `MQTT5 Topic Aliasing `_ """ DEFAULT = 0, """Maps to Disabled. This keeps the client from being broken (by default) if the broker topic aliasing implementation has a problem. """ MANUAL = 1, """ Outbound aliasing is the user's responsibility. Client will cache and use previously-established aliases if they fall within the negotiated limits of the connection. The user must still always submit a full topic in their publishes because disconnections disrupt topic alias mappings unpredictably. The client will properly use a requested alias when the most-recently-seen binding for a topic alias value matches the alias and topic in the publish packet. """ LRU = 2, """ (Recommended) The client will ignore any user-specified topic aliasing and instead use an LRU cache to drive alias usage. """ DISABLED = 3, """Completely disable outbound topic aliasing.""" class InboundTopicAliasBehaviorType(IntEnum): """An enumeration that controls whether or not the client allows the broker to send publishes that use topic aliasing. Topic alias behavior is described in `MQTT5 Topic Aliasing `_ """ DEFAULT = 0, """Maps to Disabled. This keeps the client from being broken (by default) if the broker topic aliasing implementation has a problem. """ ENABLED = 1, """Allow the server to send PUBLISH packets to the client that use topic aliasing""" DISABLED = 2, """Forbid the server from sending PUBLISH packets to the client that use topic aliasing""" @dataclass class TopicAliasingOptions: """ Configuration for all client topic aliasing behavior. Args: outbound_behavior (OutboundTopicAliasBehaviorType): Controls what kind of outbound topic aliasing behavior the client should attempt to use. If topic aliasing is not supported by the server, this setting has no effect and any attempts to directly manipulate the topic alias id in outbound publishes will be ignored. If left undefined, then outbound topic aliasing is disabled. outbound_cache_max_size (int): If outbound topic aliasing is set to LRU, this controls the maximum size of the cache. If outbound topic aliasing is set to LRU and this is zero or undefined, a sensible default is used (25). If outbound topic aliasing is not set to LRU, then this setting has no effect. inbound_behavior (InboundTopicAliasBehaviorType): Controls whether or not the client allows the broker to use topic aliasing when sending publishes. Even if inbound topic aliasing is enabled, it is up to the server to choose whether or not to use it. If left undefined, then inbound topic aliasing is disabled. inbound_cache_max_size (int): If inbound topic aliasing is enabled, this will control the size of the inbound alias cache. If inbound aliases are enabled and this is zero or undefined, then a sensible default will be used (25). If inbound aliases are disabled, this setting has no effect. Behaviorally, this value overrides anything present in the topic_alias_maximum field of the CONNECT packet options. """ outbound_behavior: OutboundTopicAliasBehaviorType = None outbound_cache_max_size: int = None inbound_behavior: InboundTopicAliasBehaviorType = None inbound_cache_max_size: int = None @dataclass class NegotiatedSettings: """ Mqtt behavior settings that are dynamically negotiated as part of the CONNECT/CONNACK exchange. While you can infer all of these values from a combination of: - defaults as specified in the mqtt5 spec - your CONNECT settings - the CONNACK from the broker the client instead does the combining for you and emits a NegotiatedSettings object with final, authoritative values. Negotiated settings are communicated with every successful connection establishment. Args: maximum_qos (QoS): The maximum QoS allowed for publishes on this connection instance session_expiry_interval_sec (int): The amount of time in seconds the server will retain the MQTT session after a disconnect. receive_maximum_from_server (int): The number of in-flight QoS 1 and QoS 2 publications the server is willing to process concurrently. maximum_packet_size_to_server (int): The maximum packet size the server is willing to accept. topic_alias_maximum_to_server (int): the maximum allowed topic alias value on publishes sent from client to server topic_alias_maximum_to_client (int): the maximum allowed topic alias value on publishes sent from server to client server_keep_alive_sec (int): The maximum amount of time in seconds between client packets. The client will use PINGREQs to ensure this limit is not breached. The server will disconnect the client for inactivity if no MQTT packet is received in a time interval equal to 1.5 x this value. retain_available (bool): Whether the server supports retained messages. wildcard_subscriptions_available (bool): Whether the server supports wildcard subscriptions. subscription_identifiers_available (bool): Whether the server supports subscription identifiers shared_subscriptions_available (bool): Whether the server supports shared subscriptions rejoined_session (bool): Whether the client has rejoined an existing session. client_id (str): The final client id in use by the newly-established connection. This will be the configured client id if one was given in the configuration, otherwise, if no client id was specified, this will be the client id assigned by the server. Reconnection attempts will always use the auto-assigned client id, allowing for auto-assigned session resumption. """ maximum_qos: QoS = None session_expiry_interval_sec: int = None receive_maximum_from_server: int = None maximum_packet_size_to_server: int = None topic_alias_maximum_to_server: int = None topic_alias_maximum_to_client: int = None server_keep_alive_sec: int = None retain_available: bool = None wildcard_subscriptions_available: bool = None subscription_identifiers_available: bool = None shared_subscriptions_available: bool = None rejoined_session: bool = None client_id: str = None @dataclass class ConnackPacket: """Data model of an `MQTT5 CONNACK `_ packet. Args: session_present (bool): True if the client rejoined an existing session on the server, false otherwise. reason_code (ConnectReasonCode): Indicates either success or the reason for failure for the connection attempt. session_expiry_interval_sec (int): A time interval, in seconds, that the server will persist this connection's MQTT session state for. If present, this value overrides any session expiry specified in the preceding CONNECT packet. receive_maximum (int): The maximum amount of in-flight QoS 1 or 2 messages that the server is willing to handle at once. If omitted or None, the limit is based on the valid MQTT packet id space (65535). maximum_qos (QoS): The maximum message delivery quality of service that the server will allow on this connection. retain_available (bool): Indicates whether the server supports retained messages. If None, retained messages are supported. maximum_packet_size (int): Specifies the maximum packet size, in bytes, that the server is willing to accept. If None, there is no limit beyond what is imposed by the MQTT spec itself. assigned_client_identifier (str): Specifies a client identifier assigned to this connection by the server. Only valid when the client id of the preceding CONNECT packet was left empty. topic_alias_maximum (int): The maximum allowed value for topic aliases in outbound publish packets. If 0 or None, then outbound topic aliasing is not allowed. reason_string (str): Additional diagnostic information about the result of the connection attempt. user_properties (Sequence[UserProperty]): List of MQTT5 user properties included with the packet. wildcard_subscriptions_available (bool): Indicates whether the server supports wildcard subscriptions. If None, wildcard subscriptions are supported. subscription_identifiers_available (bool): Indicates whether the server supports subscription identifiers. If None, subscription identifiers are supported. shared_subscription_available (bool): Indicates whether the server supports shared subscription topic filters. If None, shared subscriptions are supported. server_keep_alive (int) : DEPRECATED. Please use `server_keep_alive_sec`. server_keep_alive_sec (int): Server-requested override of the keep alive interval, in seconds. If None, the keep alive value sent by the client should be used. response_information (str): A value that can be used in the creation of a response topic associated with this connection. MQTT5-based request/response is outside the purview of the MQTT5 spec and this client. server_reference (str): Property indicating an alternate server that the client may temporarily or permanently attempt to connect to instead of the configured endpoint. Will only be set if the reason code indicates another server may be used (ServerMoved, UseAnotherServer). """ session_present: bool = None reason_code: ConnectReasonCode = None session_expiry_interval_sec: int = None receive_maximum: int = None maximum_qos: QoS = None retain_available: bool = None maximum_packet_size: int = None assigned_client_identifier: str = None topic_alias_maximum: int = None reason_string: str = None user_properties: 'Sequence[UserProperty]' = None wildcard_subscriptions_available: bool = None subscription_identifiers_available: bool = None shared_subscription_available: bool = None server_keep_alive_sec: int = None response_information: str = None server_reference: str = None @property def server_keep_alive(self): return self.server_keep_alive_sec @server_keep_alive.setter def server_keep_alive(self, value): self.server_keep_alive_sec = value @dataclass class DisconnectPacket: """Data model of an `MQTT5 DISCONNECT `_ packet. Args: reason_code (DisconnectReasonCode): Value indicating the reason that the sender is closing the connection session_expiry_interval_sec (int): A change to the session expiry interval negotiated at connection time as part of the disconnect. Only valid for DISCONNECT packets sent from client to server. It is not valid to attempt to change session expiry from zero to a non-zero value. reason_string (str): Additional diagnostic information about the reason that the sender is closing the connection user_properties (Sequence[UserProperty]): List of MQTT5 user properties included with the packet. server_reference (str): Property indicating an alternate server that the client may temporarily or permanently attempt to connect to instead of the configured endpoint. Will only be set if the reason code indicates another server may be used (ServerMoved, UseAnotherServer). """ reason_code: DisconnectReasonCode = DisconnectReasonCode.NORMAL_DISCONNECTION session_expiry_interval_sec: int = None reason_string: str = None user_properties: 'Sequence[UserProperty]' = None server_reference: str = None @dataclass class Subscription: """Configures a single subscription within a Subscribe operation Args: topic_filter (str): The topic filter to subscribe to qos (QoS): The maximum QoS on which the subscriber will accept publish messages no_local (bool): Whether the server will not send publishes to a client when that client was the one who sent the publish retain_as_published (bool): Whether messages sent due to this subscription keep the retain flag preserved on the message retain_handling_type (RetainHandlingType): Whether retained messages on matching topics be sent in reaction to this subscription """ topic_filter: str qos: QoS = QoS.AT_MOST_ONCE no_local: bool = False retain_as_published: bool = False retain_handling_type: RetainHandlingType or RetainAndHandlingType = RetainHandlingType.SEND_ON_SUBSCRIBE @dataclass class SubscribePacket: """Data model of an `MQTT5 SUBSCRIBE `_ packet. Args: subscriptions (Sequence[Subscription]): The list of topic filters that the client wishes to listen to subscription_identifier (int): The positive int to associate with all topic filters in this request. Publish packets that match a subscription in this request should include this identifier in the resulting message. user_properties (Sequence[UserProperty]): The list of MQTT5 user properties included with the packet. """ subscriptions: 'Sequence[Subscription]' subscription_identifier: int = None user_properties: 'Sequence[UserProperty]' = None @dataclass class SubackPacket: """Data model of an `MQTT5 SUBACK `_ packet. Args: reason_string (str): Additional diagnostic information about the result of the SUBSCRIBE attempt. user_properties (Sequence[UserProperty]): List of MQTT5 user properties included with the packet. reason_codes (Sequence[SubackReasonCode]): List of reason codes indicating the result of each individual subscription entry in the associated SUBSCRIBE packet. """ reason_string: str = None user_properties: 'Sequence[UserProperty]' = None reason_codes: 'Sequence[SubackReasonCode]' = None @dataclass class UnsubscribePacket: """Data model of an `MQTT5 UNSUBSCRIBE `_ packet. Args: topic_filters (Sequence[str]): List of topic filters that the client wishes to unsubscribe from. user_properties (Sequence[UserProperty]): List of MQTT5 user properties included with the packet. """ topic_filters: 'Sequence[str]' user_properties: 'Sequence[UserProperty]' = None @dataclass class UnsubackPacket: """Data model of an `MQTT5 UNSUBACK `_ packet. Args: reason_string (str): Additional diagnostic information about the result of the UNSUBSCRIBE attempt. user_properties (Sequence[UserProperty]): List of MQTT5 user properties included with the packet. reason_codes (Sequence[DisconnectReasonCode]): A list of reason codes indicating the result of unsubscribing from each individual topic filter entry in the associated UNSUBSCRIBE packet. """ reason_string: str = None user_properties: 'Sequence[UserProperty]' = None reason_codes: 'Sequence[DisconnectReasonCode]' = None @dataclass class PublishPacket: """Data model of an `MQTT5 PUBLISH `_ packet Args: payload (Any): The payload of the publish message. qos (QoS): The MQTT quality of service associated with this PUBLISH packet. retain (bool): True if this is a retained message, false otherwise. topic (str): The topic associated with this PUBLISH packet. payload_format_indicator (PayloadFormatIndicator): Property specifying the format of the payload data. The mqtt5 client does not enforce or use this value in a meaningful way. message_expiry_interval_sec (int): Sent publishes - indicates the maximum amount of time allowed to elapse for message delivery before the server should instead delete the message (relative to a recipient). Received publishes - indicates the remaining amount of time (from the server's perspective) before the message would have been deleted relative to the subscribing client. If left None, indicates no expiration timeout. topic_alias (int): An integer value that is used to identify the Topic instead of using the Topic Name. On outbound publishes, this will only be used if the outbound topic aliasing behavior has been set to Manual. response_topic (str): Opaque topic string intended to assist with request/response implementations. Not internally meaningful to MQTT5 or this client. correlation_data (Any): Opaque binary data used to correlate between publish messages, as a potential method for request-response implementation. Not internally meaningful to MQTT5. subscription_identifiers (Sequence[int]): The subscription identifiers of all the subscriptions this message matched. content_type (str): Property specifying the content type of the payload. Not internally meaningful to MQTT5. user_properties (Sequence[UserProperty]): List of MQTT5 user properties included with the packet. """ payload: Any = "" # Unicode objects are converted to C strings using 'utf-8' encoding qos: QoS = QoS.AT_MOST_ONCE retain: bool = False topic: str = "" payload_format_indicator: PayloadFormatIndicator = None message_expiry_interval_sec: int = None topic_alias: int = None response_topic: str = None correlation_data: Any = None # Unicode objects are converted to C strings using 'utf-8' encoding subscription_identifiers: 'Sequence[int]' = None # ignore attempts to set but provide in received packets content_type: str = None user_properties: 'Sequence[UserProperty]' = None @dataclass class PubackPacket: """Data model of an `MQTT5 PUBACK `_ packet Args: reason_code (PubackReasonCode): Success indicator or failure reason for the associated PUBLISH packet. reason_string (str): Additional diagnostic information about the result of the PUBLISH attempt. user_properties (Sequence[UserProperty]): List of MQTT5 user properties included with the packet. """ reason_code: PubackReasonCode = None reason_string: str = None user_properties: 'Sequence[UserProperty]' = None @dataclass class ConnectPacket: """Data model of an `MQTT5 CONNECT `_ packet. Args: keep_alive_interval_sec (int): The maximum time interval, in seconds, that is permitted to elapse between the point at which the client finishes transmitting one MQTT packet and the point it starts sending the next. The client will use PINGREQ packets to maintain this property. If the responding CONNACK contains a keep alive property value, then that is the negotiated keep alive value. Otherwise, the keep alive sent by the client is the negotiated value. client_id (str): A unique string identifying the client to the server. Used to restore session state between connections. If left empty, the broker will auto-assign a unique client id. When reconnecting, the mqtt5 client will always use the auto-assigned client id. username (str): A string value that the server may use for client authentication and authorization. password (str): Opaque binary data that the server may use for client authentication and authorization. session_expiry_interval_sec (int): A time interval, in seconds, that the client requests the server to persist this connection's MQTT session state for. Has no meaning if the client has not been configured to rejoin sessions. Must be non-zero in order to successfully rejoin a session. If the responding CONNACK contains a session expiry property value, then that is the negotiated session expiry value. Otherwise, the session expiry sent by the client is the negotiated value. request_response_information (bool): If true, requests that the server send response information in the subsequent CONNACK. This response information may be used to set up request-response implementations over MQTT, but doing so is outside the scope of the MQTT5 spec and client. request_problem_information (bool): If true, requests that the server send additional diagnostic information (via response string or user properties) in DISCONNECT or CONNACK packets from the server. receive_maximum (int): Notifies the server of the maximum number of in-flight QoS 1 and 2 messages the client is willing to handle. If omitted or None, then no limit is requested. maximum_packet_size (int): Notifies the server of the maximum packet size the client is willing to handle. If omitted or None, then no limit beyond the natural limits of MQTT packet size is requested. will_delay_interval_sec (int): A time interval, in seconds, that the server should wait (for a session reconnection) before sending the will message associated with the connection's session. If omitted or None, the server will send the will when the associated session is destroyed. If the session is destroyed before a will delay interval has elapsed, then the will must be sent at the time of session destruction. will (PublishPacket): The definition of a message to be published when the connection's session is destroyed by the server or when the will delay interval has elapsed, whichever comes first. If None, then nothing will be sent. user_properties (Sequence[UserProperty]): List of MQTT5 user properties included with the packet. """ keep_alive_interval_sec: int = None client_id: str = None username: str = None password: str = None session_expiry_interval_sec: int = None request_response_information: bool = None request_problem_information: bool = None receive_maximum: int = None maximum_packet_size: int = None will_delay_interval_sec: int = None will: PublishPacket = None user_properties: 'Sequence[UserProperty]' = None class WebsocketHandshakeTransformArgs: """ Argument to a "websocket_handshake_transform" function. A websocket_handshake_transform function has signature: ``fn(transform_args: WebsocketHandshakeTransformArgs) -> None`` The function implementer may modify `transform_args.http_request` as desired. They MUST call `transform_args.set_done()` when complete, passing an exception if something went wrong. Failure to call `set_done()` will hang the application. The implementer may do asynchronous work before calling `transform_args.set_done()`, they are not required to call `set_done()` within the scope of the transform function. An example of async work would be to fetch credentials from another service, sign the request headers, and finally call `set_done()` to mark the transform complete. The default websocket handshake request uses path "/mqtt". All required headers are present, plus the optional header "Sec-WebSocket-Protocol: mqtt". Args: client (Client): Client this handshake is for. http_request (awscrt.http.HttpRequest): HTTP request for this handshake. done_future (concurrent.futures.Future): Future to complete when the :meth:`set_done()` is called. It will contain None if successful, or an exception will be set. Attributes: client (Client): Client this handshake is for. http_request (awscrt.http.HttpRequest): HTTP request for this handshake. """ def __init__(self, client, http_request, done_future): self.client = client self.http_request = http_request self._done_future = done_future def set_done(self, exception=None): """ Mark the transformation complete. If exception is passed in, the handshake is canceled. """ if exception is None: self._done_future.set_result(None) else: self._done_future.set_exception(exception) @dataclass class PublishReceivedData: """Dataclass containing data related to a Publish Received Callback Args: publish_packet (PublishPacket): Data model of an `MQTT5 PUBLISH `_ packet. """ publish_packet: PublishPacket = None @dataclass class OperationStatisticsData: """Dataclass containing some simple statistics about the current state of the client's queue of operations Args: incomplete_operation_count (int): total number of operations submitted to the client that have not yet been completed. Unacked operations are a subset of this. incomplete_operation_size (int): total packet size of operations submitted to the client that have not yet been completed. Unacked operations are a subset of this. unacked_operation_count (int): total number of operations that have been sent to the server and are waiting for a corresponding ACK before they can be completed. unacked_operation_size (int): total packet size of operations that have been sent to the server and are waiting for a corresponding ACK before they can be completed. """ incomplete_operation_count: int = 0 incomplete_operation_size: int = 0 unacked_operation_count: int = 0 unacked_operation_size: int = 0 @dataclass class LifecycleStoppedData: """Dataclass containing results of an Stopped Lifecycle Event Currently Unused""" pass @dataclass class LifecycleAttemptingConnectData: """Dataclass containing results of an Attempting Connect Lifecycle Event Currently Unused""" pass @dataclass class LifecycleConnectSuccessData: """Dataclass containing results of a Connect Success Lifecycle Event Args: connack_packet (ConnackPacket): Data model of an `MQTT5 CONNACK `_ packet. negotiated_settings: (NegotiatedSettings): Mqtt behavior settings that have been dynamically negotiated as part of the CONNECT/CONNACK exchange. """ connack_packet: ConnackPacket = None negotiated_settings: NegotiatedSettings = None @dataclass class LifecycleConnectFailureData: """Dataclass containing results of a Connect Failure Lifecycle Event Args: connack_packet (ConnackPacket): Data model of an `MQTT5 CONNACK `_ packet. error_code (int): (:class:`awscrt.exceptions.AwsCrtError`): Exception which caused connection failure. """ connack_packet: ConnackPacket = None exception: Exception = None @dataclass class LifecycleDisconnectData: """Dataclass containing results of a Disconnect Lifecycle Event Args: disconnect_packet (DisconnectPacket): Data model of an `MQTT5 DISCONNECT `_ packet. error_code (int): (:class:`awscrt.exceptions.AwsCrtError`): Exception which caused disconnection. """ disconnect_packet: DisconnectPacket = None exception: Exception = None @dataclass class PublishCompletionData: """Dataclass containing results of a Publish Args: puback (PubackPacket): On a successful completion of a QoS1 publish a PubackPacket will be included. """ puback: PubackPacket = None # This will be None on a QoS0 @dataclass class ClientOptions: """Configuration for the creation of MQTT5 clients Args: host_name (str): Host name of the MQTT server to connect to. port (int): Network port of the MQTT server to connect to. bootstrap (ClientBootstrap): The Client bootstrap used socket_options (SocketOptions): The socket properties of the underlying MQTT connections made by the client or None if defaults are used. tls_ctx (ClientTlsContext): The TLS context for secure socket connections. If None, then a plaintext connection will be used. http_proxy_options (HttpProxyOptions): The (tunneling) HTTP proxy usage when establishing MQTT connections websocket_handshake_transform (Callable[[WebsocketHandshakeTransformArgs],]): This callback allows a custom transformation of the HTTP request that acts as the websocket handshake. Websockets will be used if this is set to a valid transformation callback. To use websockets but not perform a transformation, just set this as a trivial completion callback. If None, the connection will be made with direct MQTT. connect_options (ConnectPacket): All configurable options with respect to the CONNECT packet sent by the client, including the will. These connect properties will be used for every connection attempt made by the client. session_behavior (ClientSessionBehaviorType): How the MQTT5 client should behave with respect to MQTT sessions. extended_validation_and_flow_control_options (ExtendedValidationAndFlowControlOptions): The additional controls for client behavior with respect to operation validation and flow control; these checks go beyond the base MQTT5 spec to respect limits of specific MQTT brokers. offline_queue_behavior (ClientOperationQueueBehaviorType): Returns how disconnects affect the queued and in-progress operations tracked by the client. Also controls how new operations are handled while the client is not connected. In particular, if the client is not connected, then any operation that would be failed on disconnect (according to these rules) will also be rejected. retry_jitter_mode (ExponentialBackoffJitterMode): How the reconnect delay is modified in order to smooth out the distribution of reconnection attempt timepoints for a large set of reconnecting clients. min_reconnect_delay_ms (int): The minimum amount of time to wait to reconnect after a disconnect. Exponential backoff is performed with jitter after each connection failure. max_reconnect_delay_ms (int): The maximum amount of time to wait to reconnect after a disconnect. Exponential backoff is performed with jitter after each connection failure. min_connected_time_to_reset_reconnect_delay_ms (int): The amount of time that must elapse with an established connection before the reconnect delay is reset to the minimum. This helps alleviate bandwidth-waste in fast reconnect cycles due to permission failures on operations. ping_timeout_ms (int): The time interval to wait after sending a PINGREQ for a PINGRESP to arrive. If one does not arrive, the client will close the current connection. connack_timeout_ms (int): The time interval to wait after sending a CONNECT request for a CONNACK to arrive. If one does not arrive, the connection will be shut down. ack_timeout_sec (int): The time interval to wait for an ack after sending a QoS 1+ PUBLISH, SUBSCRIBE, or UNSUBSCRIBE before failing the operation. topic_aliasing_options (TopicAliasingOptions): All configurable options with respect to client topic aliasing behavior. on_publish_callback_fn (Callable[[PublishReceivedData],]): Callback for all publish packets received by client. on_lifecycle_event_stopped_fn (Callable[[LifecycleStoppedData],]): Callback for Lifecycle Event Stopped. on_lifecycle_event_attempting_connect_fn (Callable[[LifecycleAttemptingConnectData],]): Callback for Lifecycle Event Attempting Connect. on_lifecycle_event_connection_success_fn (Callable[[LifecycleConnectSuccessData],]): Callback for Lifecycle Event Connection Success. on_lifecycle_event_connection_failure_fn (Callable[[LifecycleConnectFailureData],]): Callback for Lifecycle Event Connection Failure. on_lifecycle_event_disconnection_fn (Callable[[LifecycleDisconnectData],]): Callback for Lifecycle Event Disconnection. """ host_name: str port: int = None bootstrap: ClientBootstrap = None socket_options: SocketOptions = None tls_ctx: ClientTlsContext = None http_proxy_options: HttpProxyOptions = None websocket_handshake_transform: Callable[[WebsocketHandshakeTransformArgs], None] = None connect_options: ConnectPacket = None session_behavior: ClientSessionBehaviorType = None extended_validation_and_flow_control_options: ExtendedValidationAndFlowControlOptions = None offline_queue_behavior: ClientOperationQueueBehaviorType = None retry_jitter_mode: ExponentialBackoffJitterMode = None min_reconnect_delay_ms: int = None max_reconnect_delay_ms: int = None min_connected_time_to_reset_reconnect_delay_ms: int = None ping_timeout_ms: int = None connack_timeout_ms: int = None ack_timeout_sec: int = None topic_aliasing_options: TopicAliasingOptions = None on_publish_callback_fn: Callable[[PublishReceivedData], None] = None on_lifecycle_event_stopped_fn: Callable[[LifecycleStoppedData], None] = None on_lifecycle_event_attempting_connect_fn: Callable[[LifecycleAttemptingConnectData], None] = None on_lifecycle_event_connection_success_fn: Callable[[LifecycleConnectSuccessData], None] = None on_lifecycle_event_connection_failure_fn: Callable[[LifecycleConnectFailureData], None] = None on_lifecycle_event_disconnection_fn: Callable[[LifecycleDisconnectData], None] = None def _check_callback(callback): if callback is not None: try: callback_sig = signature(callback) callback_sig.bind(None) return callback except BaseException: raise TypeError( "Callable should take one argument") return None class _ClientCore: def __init__(self, client_options: ClientOptions): self._ws_handshake_transform_cb = _check_callback(client_options.websocket_handshake_transform) self._on_publish_cb = _check_callback(client_options.on_publish_callback_fn) self._on_lifecycle_stopped_cb = _check_callback(client_options.on_lifecycle_event_stopped_fn) self._on_lifecycle_attempting_connect_cb = _check_callback( client_options.on_lifecycle_event_attempting_connect_fn) self._on_lifecycle_connection_success_cb = _check_callback( client_options.on_lifecycle_event_connection_success_fn) self._on_lifecycle_connection_failure_cb = _check_callback( client_options.on_lifecycle_event_connection_failure_fn) self._on_lifecycle_disconnection_cb = _check_callback(client_options.on_lifecycle_event_disconnection_fn) def _ws_handshake_transform(self, http_request_binding, http_headers_binding, native_userdata): if self._ws_handshake_transform_cb is None: _awscrt.mqtt5_ws_handshake_transform_complete(None, native_userdata) return def _on_complete(f): _awscrt.mqtt5_ws_handshake_transform_complete(f.exception(), native_userdata) future = Future() future.add_done_callback(_on_complete) try: http_request = HttpRequest._from_bindings(http_request_binding, http_headers_binding) transform_args = WebsocketHandshakeTransformArgs(self, http_request, future) self._ws_handshake_transform_cb(transform_args=transform_args) except Exception as e: # Call set_done() if user failed to do so before uncaught exception was raised, # there's a chance the callback wasn't callable and user has no idea we tried to hand them the baton. if not future.done(): transform_args.set_done(e) def _on_publish( self, payload, qos, retain, topic, payload_format_indicator_exists, payload_format_indicator, message_expiry_interval_sec_exists, message_expiry_interval_sec, topic_alias_exists, topic_alias, response_topic, correlation_data, subscription_identifiers_tuples, content_type, user_properties_tuples): if self._on_publish_cb is None: return publish_packet = PublishPacket() publish_packet.topic = topic publish_packet.payload = payload publish_packet.qos = _try_qos(qos) publish_packet.retain = retain if payload_format_indicator_exists: publish_packet.payload_format_indicator = _try_payload_format_indicator(payload_format_indicator) if message_expiry_interval_sec_exists: publish_packet.message_expiry_interval_sec = message_expiry_interval_sec if topic_alias_exists: publish_packet.topic_alias = topic_alias publish_packet.response_topic = response_topic publish_packet.correlation_data = correlation_data if publish_packet.subscription_identifiers is not None: publish_packet.subscription_identifiers = [subscription_identifier for (subscription_identifier) in subscription_identifiers_tuples] publish_packet.content_type = content_type publish_packet.user_properties = _init_user_properties(user_properties_tuples) self._on_publish_cb(PublishReceivedData(publish_packet=publish_packet)) return def _on_lifecycle_stopped(self): if self._on_lifecycle_stopped_cb: self._on_lifecycle_stopped_cb(LifecycleStoppedData()) def _on_lifecycle_attempting_connect(self): if self._on_lifecycle_attempting_connect_cb: self._on_lifecycle_attempting_connect_cb(LifecycleAttemptingConnectData()) def _on_lifecycle_connection_success( self, connack_session_present, connack_reason_code, connack_session_expiry_interval_sec_exists, connack_session_expiry_interval_sec, connack_receive_maximum_exists, connack_receive_maximum, connack_maximum_qos_exists, connack_maximum_qos, connack_retain_available_exists, connack_retain_available, connack_maximum_packet_size_exists, connack_maximum_packet_size, connack_assigned_client_identifier, connack_topic_alias_maximum_exists, connack_topic_alias_maximum, connack_reason_string, connack_user_properties_tuples, connack_wildcard_subscriptions_available_exist, connack_wildcard_subscriptions_available, connack_subscription_identifiers_available_exists, connack_subscription_identifiers_available, connack_shared_subscriptions_available_exists, connack_shared_subscriptions_available, connack_server_keep_alive_exists, connack_server_keep_alive, connack_response_information, connack_server_reference, settings_maximum_qos, settings_session_expiry_interval_sec, settings_receive_maximum_from_server, settings_maximum_packet_size_to_server, settings_topic_alias_maximum_to_server, settings_topic_alias_maximum_to_client, settings_server_keep_alive, settings_retain_available, settings_wildcard_subscriptions_available, settings_subscription_identifiers_available, settings_shared_subscriptions_available, settings_rejoined_session, settings_client_id): if self._on_lifecycle_connection_success_cb is None: return connack_packet = ConnackPacket() connack_packet.session_present = connack_session_present connack_packet.reason_code = _try_connect_reason_code(connack_reason_code) if connack_session_expiry_interval_sec_exists: connack_packet.session_expiry_interval_sec = connack_session_expiry_interval_sec if connack_receive_maximum_exists: connack_packet.receive_maximum = connack_receive_maximum if connack_maximum_qos_exists: connack_packet.maximum_qos = _try_qos(connack_maximum_qos) if connack_retain_available_exists: connack_packet.retain_available = connack_retain_available if connack_maximum_packet_size_exists: connack_packet.maximum_packet_size = connack_maximum_packet_size connack_packet.assigned_client_identifier = connack_assigned_client_identifier if connack_topic_alias_maximum_exists: connack_packet.topic_alias_maximum = connack_topic_alias_maximum connack_packet.reason_string = connack_reason_string connack_packet.user_properties = _init_user_properties(connack_user_properties_tuples) if connack_wildcard_subscriptions_available_exist: connack_packet.wildcard_subscriptions_available = connack_wildcard_subscriptions_available if connack_subscription_identifiers_available_exists: connack_packet.subscription_identifiers_available = connack_subscription_identifiers_available if connack_shared_subscriptions_available_exists: connack_packet.shared_subscription_available = connack_shared_subscriptions_available if connack_server_keep_alive_exists: connack_packet.server_keep_alive_sec = connack_server_keep_alive connack_packet.response_information = connack_response_information connack_packet.server_reference = connack_server_reference negotiated_settings = NegotiatedSettings() negotiated_settings.maximum_qos = _try_qos(settings_maximum_qos) negotiated_settings.session_expiry_interval_sec = settings_session_expiry_interval_sec negotiated_settings.receive_maximum_from_server = settings_receive_maximum_from_server negotiated_settings.maximum_packet_size_to_server = settings_maximum_packet_size_to_server negotiated_settings.topic_alias_maximum_to_server = settings_topic_alias_maximum_to_server negotiated_settings.topic_alias_maximum_to_client = settings_topic_alias_maximum_to_client negotiated_settings.server_keep_alive_sec = settings_server_keep_alive negotiated_settings.retain_available = settings_retain_available negotiated_settings.wildcard_subscriptions_available = settings_wildcard_subscriptions_available negotiated_settings.subscription_identifiers_available = settings_subscription_identifiers_available negotiated_settings.shared_subscriptions_available = settings_shared_subscriptions_available negotiated_settings.rejoined_session = settings_rejoined_session negotiated_settings.client_id = settings_client_id self._on_lifecycle_connection_success_cb( LifecycleConnectSuccessData( connack_packet=connack_packet, negotiated_settings=negotiated_settings)) def _on_lifecycle_connection_failure( self, error_code, connack_packet_exists, connack_session_present, connack_reason_code, connack_session_expiry_interval_exists, connack_session_expiry_interval_sec, connack_receive_maximum_exists, connack_receive_maximum, connack_maximum_qos_exists, connack_maximum_qos, connack_retain_available_exists, connack_retain_available, connack_maximum_packet_size_exists, connack_maximum_packet_size, connack_assigned_client_identifier, connack_reason_string, connack_user_properties_tuples, connack_wildcard_subscriptions_available_exist, connack_wildcard_subscriptions_available, connack_subscription_identifiers_available_exists, connack_subscription_identifiers_available, connack_shared_subscriptions_available_exists, connack_shared_subscriptions_available, connack_server_keep_alive_exists, connack_server_keep_alive, connack_response_information, connack_server_reference): if self._on_lifecycle_connection_failure_cb is None: return if connack_packet_exists: connack_packet = ConnackPacket() connack_packet.session_present = connack_session_present connack_packet.reason_code = _try_connect_reason_code(connack_reason_code) if connack_session_expiry_interval_exists: connack_packet.session_expiry_interval_sec = connack_session_expiry_interval_sec if connack_receive_maximum_exists: connack_packet.receive_maximum = connack_receive_maximum if connack_maximum_qos_exists: connack_packet.maximum_qos = _try_qos(connack_maximum_qos) if connack_retain_available_exists: connack_packet.retain_available = connack_retain_available if connack_maximum_packet_size_exists: connack_packet.maximum_packet_size = connack_maximum_packet_size connack_packet.assigned_client_identifier = connack_assigned_client_identifier connack_packet.reason_string = connack_reason_string connack_packet.user_properties = _init_user_properties(connack_user_properties_tuples) if connack_wildcard_subscriptions_available_exist: connack_packet.wildcard_subscriptions_available = connack_wildcard_subscriptions_available if connack_subscription_identifiers_available_exists: connack_packet.subscription_identifiers_available = connack_subscription_identifiers_available if connack_shared_subscriptions_available_exists: connack_packet.shared_subscription_available = connack_shared_subscriptions_available if connack_server_keep_alive_exists: connack_packet.server_keep_alive_sec = connack_server_keep_alive connack_packet.response_information = connack_response_information connack_packet.server_reference = connack_server_reference self._on_lifecycle_connection_failure_cb( LifecycleConnectFailureData( connack_packet=connack_packet, exception=exceptions.from_code(error_code))) else: self._on_lifecycle_connection_failure_cb( LifecycleConnectFailureData( connack_packet=None, exception=exceptions.from_code(error_code))) def _on_lifecycle_disconnection( self, error_code, disconnect_packet_exists, reason_code, session_expiry_interval_sec_exists, session_expiry_interval_sec, reason_string, user_properties_tuples, server_reference): if self._on_lifecycle_disconnection_cb is None: return if disconnect_packet_exists: disconnect_packet = DisconnectPacket() disconnect_packet.reason_code = _try_disconnect_reason_code(reason_code) if session_expiry_interval_sec_exists: disconnect_packet.session_expiry_interval_sec = session_expiry_interval_sec disconnect_packet.reason_string = reason_string disconnect_packet.user_properties = _init_user_properties(user_properties_tuples) disconnect_packet.server_reference = server_reference self._on_lifecycle_disconnection_cb( LifecycleDisconnectData( disconnect_packet=disconnect_packet, exception=exceptions.from_code(error_code))) else: self._on_lifecycle_disconnection_cb( LifecycleDisconnectData( disconnect_packet=None, exception=exceptions.from_code(error_code))) @dataclass class _Mqtt5to3AdapterOptions: """This internal class stores the options that required for creating a new Mqtt3 connection from the mqtt5 client Args: host_name (str): Host name of the MQTT server to connect to. port (int): Network port of the MQTT server to connect to. client_id (str): A unique string identifying the client to the server. Used to restore session state between connections. If left empty, the broker will auto-assign a unique client id. When reconnecting, the mqtt5 client will always use the auto-assigned client id. socket_options (SocketOptions): The socket properties of the underlying MQTT connections made by the client or None if defaults are used. min_reconnect_delay_ms (int): The minimum amount of time to wait to reconnect after a disconnect. Exponential backoff is performed with jitter after each connection failure. max_reconnect_delay_ms (int): The maximum amount of time to wait to reconnect after a disconnect. Exponential backoff is performed with jitter after each connection failure. ping_timeout_ms (int): The time interval to wait after sending a PINGREQ for a PINGRESP to arrive. If one does not arrive, the client will close the current connection. keep_alive_secs (int): The keep alive value, in seconds, A PING will automatically be sent at this interval. ack_timeout_secs (int): The time interval to wait for an ack after sending a QoS 1+ PUBLISH, SUBSCRIBE, or UNSUBSCRIBE before failing the operation. clean_session (bool): Whether or not to start a clean session with each reconnect. The default values are referred from awscrt.mqtt.Connection """ def __init__( self, host_name: str, port: int, client_id: str, socket_options: SocketOptions, min_reconnect_delay_ms: int, max_reconnect_delay_ms: int, ping_timeout_ms: int, keep_alive_secs: int, ack_timeout_secs: int, clean_session: int): self.host_name = host_name self.port = port self.client_id = "" if client_id is None else client_id self.socket_options = socket_options self.min_reconnect_delay_ms = 5 if min_reconnect_delay_ms is None else min_reconnect_delay_ms self.max_reconnect_delay_ms: int = 60 if max_reconnect_delay_ms is None else max_reconnect_delay_ms self.ping_timeout_ms: int = 3000 if ping_timeout_ms is None else ping_timeout_ms self.keep_alive_secs: int = 1200 if keep_alive_secs is None else keep_alive_secs self.ack_timeout_secs: int = 0 if ack_timeout_secs is None else ack_timeout_secs self.clean_session: bool = True if clean_session is None else clean_session class Client(NativeResource): """This class wraps the aws-c-mqtt MQTT5 client to provide the basic MQTT5 pub/sub functionalities via the AWS Common Runtime One Client class creates one connection. Args: client_options (ClientOptions): The ClientOptions dataclass to used to configure the new Client. """ def __init__(self, client_options: ClientOptions): super().__init__() core = _ClientCore(client_options) bootstrap = client_options.bootstrap if not bootstrap: bootstrap = ClientBootstrap.get_or_create_static_default() connect_options = client_options.connect_options if not connect_options: connect_options = ConnectPacket() socket_options = client_options.socket_options if not socket_options: socket_options = SocketOptions() if not connect_options.will: is_will_none = True will = PublishPacket() else: is_will_none = False will = connect_options.will websocket_is_none = client_options.websocket_handshake_transform is None self.tls_ctx = client_options.tls_ctx self._binding = _awscrt.mqtt5_client_new(self, client_options.host_name, client_options.port, bootstrap, socket_options, client_options.tls_ctx, client_options.http_proxy_options, connect_options.client_id, connect_options.keep_alive_interval_sec, connect_options.username, connect_options.password, connect_options.session_expiry_interval_sec, connect_options.request_response_information, connect_options.request_problem_information, connect_options.receive_maximum, connect_options.maximum_packet_size, connect_options.will_delay_interval_sec, connect_options.user_properties, is_will_none, will.qos, will.payload, will.retain, will.topic, will.payload_format_indicator, will.message_expiry_interval_sec, will.topic_alias, will.response_topic, will.correlation_data, will.content_type, will.user_properties, client_options.session_behavior, client_options.extended_validation_and_flow_control_options, client_options.offline_queue_behavior, client_options.retry_jitter_mode, client_options.min_reconnect_delay_ms, client_options.max_reconnect_delay_ms, client_options.min_connected_time_to_reset_reconnect_delay_ms, client_options.ping_timeout_ms, client_options.ack_timeout_sec, client_options.topic_aliasing_options, websocket_is_none, core) # Store the options for adapter self.adapter_options = _Mqtt5to3AdapterOptions( host_name=client_options.host_name, port=client_options.port, client_id=connect_options.client_id, socket_options=socket_options, min_reconnect_delay_ms=client_options.min_reconnect_delay_ms, max_reconnect_delay_ms=client_options.max_reconnect_delay_ms, ping_timeout_ms=client_options.ping_timeout_ms, keep_alive_secs=connect_options.keep_alive_interval_sec, ack_timeout_secs=client_options.ack_timeout_sec, clean_session=( client_options.session_behavior < ClientSessionBehaviorType.REJOIN_ALWAYS if client_options.session_behavior else True)) def start(self): """Notifies the MQTT5 client that you want it maintain connectivity to the configured endpoint. The client will attempt to stay connected using the properties of the reconnect-related parameters in the mqtt5 client configuration. This is an asynchronous operation.""" _awscrt.mqtt5_client_start(self._binding) def stop(self, disconnect_packet: DisconnectPacket = None): """Notifies the MQTT5 client that you want it to end connectivity to the configured endpoint, disconnecting any existing connection and halting any reconnect attempts. This is an asynchronous operation. Args: disconnect_packet (DisconnectPacket): (optional) Properties of a DISCONNECT packet to send as part of the shutdown process """ is_disconnect_packet_none = disconnect_packet is None if is_disconnect_packet_none: disconnect_packet = DisconnectPacket() _awscrt.mqtt5_client_stop(self._binding, is_disconnect_packet_none, disconnect_packet.reason_code, disconnect_packet.session_expiry_interval_sec, disconnect_packet.reason_string, disconnect_packet.user_properties, disconnect_packet.server_reference) def publish(self, publish_packet: PublishPacket): """Tells the client to attempt to send a PUBLISH packet. Will return a future containing a PubAckResult if the publish is successful. The data in the PubAckResult varies depending on the QoS of the Publish. For QoS 0, the PubAckResult will not contain data. For QoS 1, the PubAckResult will contain a PubAckPacket. See PubAckResult class documentation for more info. Args: publish_packet (PublishPacket): PUBLISH packet to send to the server Returns: A future with a (:class:`PublishCompletionData`) """ future = Future() # TODO QoS 2 Pubcomp will be handled through the same callback in the future def puback(error_code, qos, reason_code, reason_string, user_properties_tuples): publish_completion_data = PublishCompletionData() puback_packet = PubackPacket() publish_completion_data.puback = puback_packet if error_code != 0: future.set_exception(exceptions.from_code(error_code)) else: if qos == 1: puback_packet.reason_code = _try_puback_reason_code(reason_code) puback_packet.reason_string = reason_string puback_packet.user_properties = _init_user_properties(user_properties_tuples) future.set_result(publish_completion_data) _awscrt.mqtt5_client_publish(self._binding, publish_packet.qos, publish_packet.payload, publish_packet.retain, publish_packet.topic, publish_packet.payload_format_indicator, publish_packet.message_expiry_interval_sec, publish_packet.topic_alias, publish_packet.response_topic, publish_packet.correlation_data, publish_packet.content_type, publish_packet.user_properties, puback) return future def subscribe(self, subscribe_packet: SubscribePacket): """Tells the client to attempt to subscribe to one or more topic filters. Args: subscribe_packet (SubscribePacket): SUBSCRIBE packet to send to the server Returns: A future with a (:class:`SubackPacket`) """ future = Future() def suback(error_code, reason_codes, reason_string, user_properties_tuples): suback_packet = SubackPacket() if error_code != 0: future.set_exception(exceptions.from_code(error_code)) else: suback_packet.reason_codes = [_try_suback_reason_code(reason_code) for (reason_code) in reason_codes] suback_packet.reason_string = reason_string suback_packet.user_properties = _init_user_properties(user_properties_tuples) future.set_result(suback_packet) _awscrt.mqtt5_client_subscribe(self._binding, subscribe_packet.subscriptions, subscribe_packet.subscription_identifier, subscribe_packet.user_properties, suback) return future def unsubscribe(self, unsubscribe_packet: UnsubscribePacket): """Tells the client to attempt to unsubscribe from one or more topic filters. Args: unsubscribe_packet (UnsubscribePacket): UNSUBSCRIBE packet to send to the server Returns: A future with a (:class:`UnsubackPacket`) """ future = Future() def unsuback(error_code, reason_codes, reason_string, user_properties_tuples): unsuback_packet = UnsubackPacket() if error_code != 0: future.set_exception(exceptions.from_code(error_code)) else: unsuback_packet.reason_codes = [_try_unsuback_reason_code( reason_code) for (reason_code) in reason_codes] unsuback_packet.reason_string = reason_string unsuback_packet.user_properties = _init_user_properties(user_properties_tuples) future.set_result(unsuback_packet) _awscrt.mqtt5_client_unsubscribe(self._binding, unsubscribe_packet.topic_filters, unsubscribe_packet.user_properties, unsuback) return future def get_stats(self): """Queries the client's internal statistics for incomplete operations. Returns: The (:class:`OperationStatisticsData`) containing the statistics """ result = _awscrt.mqtt5_client_get_stats(self._binding) return OperationStatisticsData(result[0], result[1], result[2], result[3]) def new_connection(self, on_connection_interrupted=None, on_connection_resumed=None, on_connection_success=None, on_connection_failure=None, on_connection_closed=None): from awscrt.mqtt import Connection """ Returns a new Mqtt3 Connection Object wraps the Mqtt5 client. Args: on_connection_interrupted: Optional callback invoked whenever the MQTT connection is lost. The MQTT client will automatically attempt to reconnect. The function should take the following arguments return nothing: * `connection` (:class:`Connection`): This MQTT Connection. * `error` (:class:`awscrt.exceptions.AwsCrtError`): Exception which caused connection loss. * `**kwargs` (dict): Forward-compatibility kwargs. on_connection_resumed: Optional callback invoked whenever the MQTT connection is automatically resumed. Function should take the following arguments and return nothing: * `connection` (:class:`Connection`): This MQTT Connection * `return_code` (:class:`ConnectReturnCode`): Connect return code received from the server. * `session_present` (bool): True if resuming existing session. False if new session. Note that the server has forgotten all previous subscriptions if this is False. Subscriptions can be re-established via resubscribe_existing_topics(). * `**kwargs` (dict): Forward-compatibility kwargs. on_connection_success: Optional callback invoked whenever the connection successfully connects. This callback is invoked for every successful connect and every successful reconnect. Function should take the following arguments and return nothing: * `connection` (:class:`Connection`): This MQTT Connection * `callback_data` (:class:`OnConnectionSuccessData`): The data returned from the connection success. on_connection_failure: Optional callback invoked whenever the connection fails to connect. This callback is invoked for every failed connect and every failed reconnect. Function should take the following arguments and return nothing: * `connection` (:class:`Connection`): This MQTT Connection * `callback_data` (:class:`OnConnectionFailureData`): The data returned from the connection failure. on_connection_closed: Optional callback invoked whenever the connection has been disconnected and shutdown successfully. Function should take the following arguments and return nothing: * `connection` (:class:`Connection`): This MQTT Connection * `callback_data` (:class:`OnConnectionClosedData`): The data returned from the connection close. Returns: The (:class:`Connection`) wrapper for the mqtt5 client """ return Connection( self, self.adapter_options.host_name, self.adapter_options.port, self.adapter_options.client_id, clean_session=self.adapter_options.clean_session, on_connection_interrupted=on_connection_interrupted, on_connection_resumed=on_connection_resumed, on_connection_success=on_connection_success, on_connection_failure=on_connection_failure, on_connection_closed=on_connection_closed, reconnect_min_timeout_secs=self.adapter_options.min_reconnect_delay_ms, reconnect_max_timeout_secs=self.adapter_options.max_reconnect_delay_ms, keep_alive_secs=self.adapter_options.keep_alive_secs, ping_timeout_ms=self.adapter_options.ping_timeout_ms, protocol_operation_timeout_ms=self.adapter_options.ack_timeout_secs * 1000, socket_options=self.adapter_options.socket_options, # For the arguments below, set it to `None` will directly use the options from mqtt5 client underlying. will=None, username=None, password=None, # Similar to previous options, set it False will use mqtt5 setup for # websockets. It is not necessary means the websocket is disabled. use_websockets=False, websocket_proxy_options=None, websocket_handshake_transform=None, proxy_options=None ) aws-crt-python-0.20.4+dfsg/awscrt/s3.py000066400000000000000000000765211456575232400176450ustar00rootroot00000000000000""" S3 client """ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. import _awscrt from concurrent.futures import Future from awscrt import NativeResource from awscrt.http import HttpRequest from awscrt.io import ClientBootstrap, TlsConnectionOptions from awscrt.auth import AwsCredentials, AwsCredentialsProvider, AwsSignatureType, AwsSignedBodyHeaderType, AwsSignedBodyValue, AwsSigningAlgorithm, AwsSigningConfig from awscrt.auth import AwsCredentialsProvider, AwsSignatureType, AwsSignedBodyHeaderType, AwsSignedBodyValue, \ AwsSigningAlgorithm, AwsSigningConfig import awscrt.exceptions import threading from dataclasses import dataclass from typing import List, Optional, Tuple from enum import IntEnum class CrossProcessLock(NativeResource): """ Class representing an exclusive cross-process lock, scoped by `lock_scope_name` Recommended usage is to either explicitly call acquire() followed by release() when the lock is no longer required, or use this in a 'with' statement. acquire() will throw a RuntimeError with AWS_MUTEX_CALLER_NOT_OWNER as the error code, if the lock could not be acquired. If the lock has not been explicitly released when the process exits, it will be released by the operating system. Keyword Args: lock_scope_name (str): Unique string identifying the caller holding the lock. """ def __init__(self, lock_scope_name): super().__init__() self._binding = _awscrt.s3_cross_process_lock_new(lock_scope_name) def acquire(self): _awscrt.s3_cross_process_lock_acquire(self._binding) def __enter__(self): self.acquire() def release(self): _awscrt.s3_cross_process_lock_release(self._binding) def __exit__(self, exc_type, exc_value, exc_tb): self.release() class S3RequestType(IntEnum): """The type of the AWS S3 request""" DEFAULT = 0 """ Default type, for all S3 request types other than :attr:`~S3RequestType.GET_OBJECT`/:attr:`~S3RequestType.PUT_OBJECT`. """ GET_OBJECT = 1 """ Get Object S3 request """ PUT_OBJECT = 2 """ Put Object S3 request """ class S3RequestTlsMode(IntEnum): """TLS mode for S3 request""" ENABLED = 0 """ Enable TLS for S3 request. """ DISABLED = 1 """ Disable TLS for S3 request. """ class S3ChecksumAlgorithm(IntEnum): """ Checksum algorithm used to verify object integrity. https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html """ CRC32C = 1 """CRC32C""" CRC32 = 2 """CRC32""" SHA1 = 3 """SHA-1""" SHA256 = 4 """SHA-256""" class S3ChecksumLocation(IntEnum): """Where to put the checksum.""" HEADER = 1 """ Add checksum as a request header field. The checksum is calculated before any part of the request is sent to the server. """ TRAILER = 2 """ Add checksum as a request trailer field. The checksum is calculated as the body is streamed to the server, then added as a trailer field. This may be more efficient than HEADER, but can only be used with "streaming" requests that support it. """ @dataclass class S3ChecksumConfig: """Configures how the S3Client calculates and verifies checksums.""" algorithm: Optional[S3ChecksumAlgorithm] = None """ If set, the S3Client will calculate a checksum using this algorithm and add it to the request. If you set this, you must also set `location`. """ location: Optional[S3ChecksumLocation] = None """Where to put the request checksum.""" validate_response: bool = False """Whether to retrieve and validate response checksums.""" class S3Client(NativeResource): """S3 client Keyword Args: bootstrap (Optional [ClientBootstrap]): Client bootstrap to use when initiating socket connection. If None is provided, the default singleton is used. region (str): Region that the S3 bucket lives in. tls_mode (Optional[S3RequestTlsMode]): How TLS should be used while performing the request If this is :attr:`S3RequestTlsMode.ENABLED`: If `tls_connection_options` is set, then those TLS options will be used If `tls_connection_options` is unset, then default TLS options will be used If this is :attr:`S3RequestTlsMode.DISABLED`: No TLS options will be used, regardless of `tls_connection_options` value. signing_config (Optional[AwsSigningConfig]): Configuration for signing of the client. Use :func:`create_default_s3_signing_config()` to create the default config. If not set, a default config will be used with anonymous credentials and skip signing the request. If set: Credentials provider is required. Other configs are all optional, and will be default to what needs to sign the request for S3, only overrides when Non-zero/Not-empty is set. S3 Client will derive the right config for signing process based on this. Notes: 1. For SIGV4_S3EXPRESS, S3 client will use the credentials in the config to derive the S3 Express credentials that are used in the signing process. 2. Client may make modifications to signing config before passing it on to signer. credential_provider (Optional[AwsCredentialsProvider]): Deprecated, prefer `signing_config` instead. Credentials providers source the :class:`~awscrt.auth.AwsCredentials` needed to sign an authenticated AWS request. If None is provided, the request will not be signed. tls_connection_options (Optional[TlsConnectionOptions]): Optional TLS Options to be used for each connection, unless `tls_mode` is :attr:`S3RequestTlsMode.DISABLED` part_size (Optional[int]): Size, in bytes, of parts that files will be downloaded or uploaded in. Note: for :attr:`S3RequestType.PUT_OBJECT` request, client will adjust the part size to meet the service limits. (max number of parts per upload is 10,000, minimum upload part size is 5 MiB) multipart_upload_threshold (Optional[int]): The size threshold in bytes, for when to use multipart uploads. This only affects :attr:`S3RequestType.PUT_OBJECT` request. Uploads over this size will use the multipart upload strategy. Uploads this size or less will use a single request. If not set, maximal of `part_size` and 5 MiB will be used. throughput_target_gbps (Optional[float]): Throughput target in Gigabits per second (Gbps) that we are trying to reach. You can also use `get_recommended_throughput_target_gbps()` to get recommended value for your system. 10.0 Gbps by default (may change in future) enable_s3express (Optional[bool]): To enable S3 Express support for the client. The typical usage for a S3 Express request is to set this to true and let the request to be signed with `AwsSigningAlgorithm.V4_S3EXPRESS`, either from the client-level `signing_config` or the request-level override. memory_limit (Optional[int]): Memory limit, in bytes, of how much memory client can use for buffering data for requests. Default values scale with target throughput and are currently between 2GiB and 8GiB (may change in future) """ __slots__ = ('shutdown_event', '_region') def __init__( self, *, bootstrap=None, region, tls_mode=None, signing_config=None, credential_provider=None, tls_connection_options=None, part_size=None, multipart_upload_threshold=None, throughput_target_gbps=None, enable_s3express=False, memory_limit=None): assert isinstance(bootstrap, ClientBootstrap) or bootstrap is None assert isinstance(region, str) assert isinstance(signing_config, AwsSigningConfig) or signing_config is None assert isinstance(credential_provider, AwsCredentialsProvider) or credential_provider is None assert isinstance(tls_connection_options, TlsConnectionOptions) or tls_connection_options is None assert isinstance(part_size, int) or part_size is None assert isinstance( throughput_target_gbps, int) or isinstance( throughput_target_gbps, float) or throughput_target_gbps is None assert isinstance(enable_s3express, bool) or enable_s3express is None if credential_provider and signing_config: raise ValueError("'credential_provider' has been deprecated in favor of 'signing_config'. " "Both parameters may not be set.") super().__init__() shutdown_event = threading.Event() def on_shutdown(): shutdown_event.set() self._region = region self.shutdown_event = shutdown_event if not bootstrap: bootstrap = ClientBootstrap.get_or_create_static_default() s3_client_core = _S3ClientCore( bootstrap, credential_provider, signing_config, tls_connection_options) # C layer uses 0 to indicate defaults if tls_mode is None: tls_mode = 0 if part_size is None: part_size = 0 if multipart_upload_threshold is None: multipart_upload_threshold = 0 if throughput_target_gbps is None: throughput_target_gbps = 0 if memory_limit is None: memory_limit = 0 self._binding = _awscrt.s3_client_new( bootstrap, signing_config, credential_provider, tls_connection_options, on_shutdown, region, tls_mode, part_size, multipart_upload_threshold, throughput_target_gbps, enable_s3express, memory_limit, s3_client_core) def make_request( self, *, type, request, operation_name=None, recv_filepath=None, send_filepath=None, signing_config=None, credential_provider=None, checksum_config=None, part_size=None, multipart_upload_threshold=None, on_headers=None, on_body=None, on_done=None, on_progress=None): """Create the Request to the the S3 server, :attr:`~S3RequestType.GET_OBJECT`/:attr:`~S3RequestType.PUT_OBJECT` requests are split it into multi-part requests under the hood for acceleration. Keyword Args: type (S3RequestType): The type of S3 request passed in, :attr:`~S3RequestType.GET_OBJECT`/:attr:`~S3RequestType.PUT_OBJECT` can be accelerated request (HttpRequest): The overall outgoing API request for S3 operation. If the request body is a file, set send_filepath for better performance. operation_name(Optional[str]): Optional S3 operation name (e.g. "CreateBucket"). This will only be used when `type` is :attr:`~S3RequestType.DEFAULT`; it is automatically populated for other types. This name is used to fill out details in metrics and error reports. recv_filepath (Optional[str]): Optional file path. If set, the response body is written directly to a file and the `on_body` callback is not invoked. This should give better performance than writing to file from the `on_body` callback. send_filepath (Optional[str]): Optional file path. If set, the request body is read directly from a file and the request's `body_stream` is ignored. This should give better performance than reading a file from a stream. signing_config (Optional[AwsSigningConfig]): Configuration for signing of the request to override the configuration from client. Use :func:`create_default_s3_signing_config()` to create the default config. If None is provided, the client configuration will be used. If set: All fields are optional. The credentials will be resolve from client if not set. S3 Client will derive the right config for signing process based on this. Notes: 1. For SIGV4_S3EXPRESS, S3 client will use the credentials in the config to derive the S3 Express credentials that are used in the signing process. 2. Client may make modifications to signing config before passing it on to signer. credential_provider (Optional[AwsCredentialsProvider]): Deprecated, prefer `signing_config` instead. Credentials providers source the :class:`~awscrt.auth.AwsCredentials` needed to sign an authenticated AWS request, for this request only. If None is provided, the client configuration will be used. checksum_config (Optional[S3ChecksumConfig]): Optional checksum settings. part_size (Optional[int]): Size, in bytes, of parts that files will be downloaded or uploaded in. If not set, the part size configured for the client will be used. Note: for :attr:`S3RequestType.PUT_OBJECT` request, client will adjust the part size to meet the service limits. (max number of parts per upload is 10,000, minimum upload part size is 5 MiB) multipart_upload_threshold (Optional[int]): The size threshold in bytes, for when to use multipart uploads. This only affects :attr:`S3RequestType.PUT_OBJECT` request. Uploads over this size will use the multipart upload strategy. Uploads this size or less will use a single request. If set, this should be at least `part_size`. If not set, `part_size` adjusted by client will be used as the threshold. If both `part_size` and `multipart_upload_threshold` are not set, the values from `aws_s3_client_config` are used. on_headers: Optional callback invoked as the response received, and even the API request has been split into multiple parts, this callback will only be invoked once as it's just making one API request to S3. The function should take the following arguments and return nothing: * `status_code` (int): Response status code. * `headers` (List[Tuple[str, str]]): Response headers as a list of (name,value) pairs. * `**kwargs` (dict): Forward-compatibility kwargs. on_body: Optional callback invoked 0+ times as the response body received from S3 server. If simply writing to a file, use `recv_filepath` instead of `on_body` for better performance. The function should take the following arguments and return nothing: * `chunk` (buffer): Response body data (not necessarily a whole "chunk" of chunked encoding). * `offset` (int): The offset of the chunk started in the whole body. * `**kwargs` (dict): Forward-compatibility kwargs. on_done: Optional callback invoked when the request has finished the job. The function should take the following arguments and return nothing: * `error` (Optional[Exception]): None if the request was successfully sent and valid response received, or an Exception if it failed. * `error_headers` (Optional[List[Tuple[str, str]]]): If request failed because server side sent an unsuccessful response, the headers of the response is provided here. Else None will be returned. * `error_body` (Optional[bytes]): If request failed because server side sent an unsuccessful response, the body of the response is provided here. Else None will be returned. * `error_operation_name` (Optional[str]): If request failed because server side sent and unsuccessful response, this is the name of the S3 operation it was responding to. For example, if a :attr:`~S3RequestType.PUT_OBJECT` fails this could be "PutObject", "CreateMultipartUpload", "UploadPart", "CompleteMultipartUpload", or others. For :attr:`~S3RequestType.DEFAULT`, this is the `operation_name` passed to :meth:`S3Client.make_request()`. This will be None if the request failed for another reason, or the S3 operation name is unknown. * `status_code` (Optional[int]): HTTP response status code (if available). If request failed because server side sent an unsuccessful response, this is its status code. If the operation was successful, this is the final response's status code. If the operation failed for another reason, None is returned. * `did_validate_checksum` (bool): Was the server side checksum compared against a calculated checksum of the response body. This may be false even if :attr:`S3ChecksumConfig.validate_response` was set because the object was uploaded without a checksum, or downloaded differently from how it's uploaded. * `checksum_validation_algorithm` (Optional[S3ChecksumAlgorithm]): The checksum algorithm used to validate the response. * `**kwargs` (dict): Forward-compatibility kwargs. on_progress: Optional callback invoked when part of the transfer is done to report the progress. The function should take the following arguments and return nothing: * `progress` (int): Number of bytes of data that just get transferred * `**kwargs` (dict): Forward-compatibility kwargs. Returns: S3Request """ return S3Request( client=self, type=type, request=request, operation_name=operation_name, recv_filepath=recv_filepath, send_filepath=send_filepath, signing_config=signing_config, credential_provider=credential_provider, checksum_config=checksum_config, part_size=part_size, multipart_upload_threshold=multipart_upload_threshold, on_headers=on_headers, on_body=on_body, on_done=on_done, on_progress=on_progress, region=self._region) class S3Request(NativeResource): """S3 request Create a new S3Request with :meth:`S3Client.make_request()` Attributes: finished_future (concurrent.futures.Future): Future that will resolve when the s3 request has finished successfully. If the error happens, the Future will contain an exception indicating why it failed. Note: Future will set before on_done invoked shutdown_event (threading.Event): Signals when underlying threads and structures have all finished shutting down. Shutdown begins when the S3Request object is destroyed. """ __slots__ = ('_finished_future', 'shutdown_event') def __init__( self, *, client, type, request, operation_name=None, recv_filepath=None, send_filepath=None, signing_config=None, credential_provider=None, checksum_config=None, part_size=None, multipart_upload_threshold=None, on_headers=None, on_body=None, on_done=None, on_progress=None, region=None): assert isinstance(client, S3Client) assert isinstance(request, HttpRequest) assert callable(on_headers) or on_headers is None assert callable(on_body) or on_body is None assert callable(on_done) or on_done is None assert isinstance(part_size, int) or part_size is None assert isinstance(multipart_upload_threshold, int) or multipart_upload_threshold is None super().__init__() self._finished_future = Future() self.shutdown_event = threading.Event() # C layer uses 0 to indicate defaults if part_size is None: part_size = 0 if multipart_upload_threshold is None: multipart_upload_threshold = 0 checksum_algorithm = 0 checksum_location = 0 validate_response_checksum = False if checksum_config is not None: if checksum_config.algorithm is not None: checksum_algorithm = checksum_config.algorithm.value if checksum_config.location is not None: checksum_location = checksum_config.location.value validate_response_checksum = checksum_config.validate_response s3_request_core = _S3RequestCore( request, self._finished_future, self.shutdown_event, signing_config, credential_provider, on_headers, on_body, on_done, on_progress) self._binding = _awscrt.s3_client_make_meta_request( self, client, request, type, operation_name, signing_config, credential_provider, recv_filepath, send_filepath, region, checksum_algorithm, checksum_location, validate_response_checksum, part_size, multipart_upload_threshold, s3_request_core) @property def finished_future(self): return self._finished_future def cancel(self): _awscrt.s3_meta_request_cancel(self) class S3ResponseError(awscrt.exceptions.AwsCrtError): ''' An error response from S3. Subclasses :class:`awscrt.exceptions.AwsCrtError`. Attributes: status_code (int): HTTP response status code. headers (list[tuple[str, str]]): Headers from HTTP response. body (Optional[bytes]): Body of HTTP response (if any). This is usually XML. It may be None in the case of a HEAD response. operation_name (Optional[str]): Name of the S3 operation that failed (if known). For example, if a :attr:`~S3RequestType.PUT_OBJECT` fails this could be "PutObject", "CreateMultipartUpload", "UploadPart", "CompleteMultipartUpload", or others. For :attr:`~S3RequestType.DEFAULT`, this is the `operation_name` passed to :meth:`S3Client.make_request()`. If the S3 operation name is unknown, this will be None. code (int): CRT error code. name (str): CRT error name. message (str): CRT error message. ''' def __init__(self, *, code: int, name: str, message: str, status_code: List[Tuple[str, str]] = None, headers: List[Tuple[str, str]] = None, body: Optional[bytes] = None, operation_name: Optional[str] = None): super().__init__(code, name, message) self.status_code = status_code self.headers = headers self.body = body self.operation_name = operation_name class _S3ClientCore: ''' Private class to keep all the related Python object alive until C land clean up for S3Client ''' def __init__(self, bootstrap, credential_provider=None, signing_config=None, tls_connection_options=None): self._bootstrap = bootstrap self._credential_provider = credential_provider self._signing_config = signing_config self._tls_connection_options = tls_connection_options class _S3RequestCore: ''' Private class to keep all the related Python object alive until C land clean up for S3Request ''' def __init__( self, request, finish_future, shutdown_event, signing_config=None, credential_provider=None, on_headers=None, on_body=None, on_done=None, on_progress=None): # Stores exception raised in on_headers or on_body callback so that we can rethrow it in the on_done callback self._python_callback_exception = None self._request = request self._signing_config = signing_config self._credential_provider = credential_provider self._on_headers_cb = on_headers self._on_body_cb = on_body self._on_done_cb = on_done self._on_progress_cb = on_progress self._finished_future = finish_future self._shutdown_event = shutdown_event def _on_headers(self, status_code, headers): if self._on_headers_cb: try: self._on_headers_cb(status_code=status_code, headers=headers) return True except BaseException as e: self._python_callback_exception = e return False def _on_body(self, chunk, offset): if self._on_body_cb: try: self._on_body_cb(chunk=chunk, offset=offset) return True except BaseException as e: self._python_callback_exception = e return False def _on_shutdown(self): self._shutdown_event.set() def _on_finish( self, error_code, status_code, error_headers, error_body, error_operation_name, did_validate_checksum, checksum_validation_algorithm): # If C layer gives status_code 0, that means "unknown" if status_code == 0: status_code = None error = None if error_code: error = awscrt.exceptions.from_code(error_code) if isinstance(error, awscrt.exceptions.AwsCrtError): if (error.name == "AWS_ERROR_CRT_CALLBACK_EXCEPTION" and self._python_callback_exception is not None): error = self._python_callback_exception # If the failure was due to a response, make it into an S3ResponseError. # When failure is due to a response, its headers are always included. elif status_code is not None \ and error_headers is not None: error = S3ResponseError( code=error.code, name=error.name, message=error.message, status_code=status_code, headers=error_headers, body=error_body, operation_name=error_operation_name) self._finished_future.set_exception(error) else: self._finished_future.set_result(None) if checksum_validation_algorithm: checksum_validation_algorithm = S3ChecksumAlgorithm(checksum_validation_algorithm) else: checksum_validation_algorithm = None if self._on_done_cb: self._on_done_cb( error=error, error_headers=error_headers, error_body=error_body, error_operation_name=error_operation_name, status_code=status_code, did_validate_checksum=did_validate_checksum, checksum_validation_algorithm=checksum_validation_algorithm) def _on_progress(self, progress): if self._on_progress_cb: self._on_progress_cb(progress) def create_default_s3_signing_config(*, region: str, credential_provider: AwsCredentialsProvider, **kwargs): """Create a default `AwsSigningConfig` for S3 service. Attributes: region (str): The region to sign against. credential_provider (AwsCredentialsProvider): Credentials provider to fetch signing credentials with. `**kwargs`: Forward compatibility kwargs. Returns: AwsSigningConfig """ return AwsSigningConfig( algorithm=AwsSigningAlgorithm.V4, signature_type=AwsSignatureType.HTTP_REQUEST_HEADERS, service="s3", signed_body_header_type=AwsSignedBodyHeaderType.X_AMZ_CONTENT_SHA_256, signed_body_value=AwsSignedBodyValue.UNSIGNED_PAYLOAD, region=region, credentials_provider=credential_provider, use_double_uri_encode=False, should_normalize_uri_path=False, ) def get_ec2_instance_type(): """ First this function will check it's running on EC2 via. attempting to read DMI info to avoid making IMDS calls. If the function detects it's on EC2, and it was able to detect the instance type without a call to IMDS it will return it. Finally, it will call IMDS and return the instance type from there. Note that in the case of the IMDS call, a new client stack is spun up using 1 background thread. The call is made synchronously with a 1 second timeout: It's not cheap. To make this easier, the underlying result is cached internally and will be freed when this module is unloaded is called. Returns: A string indicating the instance type or None if it could not be determined. """ return _awscrt.s3_get_ec2_instance_type() def is_optimized_for_system(): """ Returns: true if the current build of this module has an optimized configuration for the current system. """ return _awscrt.s3_is_crt_s3_optimized_for_system() def get_optimized_platforms(): """ Returns: A list[str] of platform identifiers, such as EC2 instance types, for which S3 client is pre-optimized and have a recommended throughput_target_gbps. You can use `get_recommended_throughput_target_gbps()` to obtain the recommended throughput_target_gbps for those platforms. """ return _awscrt.s3_get_optimized_platforms() def get_recommended_throughput_target_gbps() -> Optional[float]: """ Returns: Recommended throughput, in gigabits per second, based on detected system configuration. If the best throughput configuration is unknown, returns None. Use this as the S3Client's `throughput_target_gbps`. """ # Currently the CRT returns 0 if it was unable to make a good guess on configuration. Pre-known configs, # have this value set. Eventually, the CRT will make a full calculation based on NIC and CPU configuration, # but until then handle 0. max_value = _awscrt.s3_get_recommended_throughput_target_gbps() if max_value > 0: return max_value else: return None aws-crt-python-0.20.4+dfsg/awscrt/websocket.py000066400000000000000000000677111456575232400213070ustar00rootroot00000000000000""" WebSocket - `RFC 6455 `_ Use the :func:`connect()` to establish a :class:`WebSocket` client connection. Note from the developer: This is a very low-level API, which forces the user to deal with things like data fragmentation. A higher-level API could easily be built on top of this. .. _authoring-callbacks: Authoring Callbacks ------------------- All network operations in `awscrt.websocket` are asynchronous. Callbacks are always invoked on the WebSocket's networking thread. You MUST NOT perform blocking network operations from any callback, or you will cause a deadlock. For example: do not send a frame, and then wait for that frame to complete, within a callback. The WebSocket cannot do work until your callback returns, so the thread will be stuck. You can send the frame from within the callback, just don't wait for it to complete within the callback. If you want to do blocking waits, do it from a thread you control, like the main thread. It's fine for the main thread to send a frame, and wait until it completes. All functions and methods in `awscrt.websocket` are thread-safe. They can be called from any mix of threads. .. _flow-control-reading: Flow Control (reading) ---------------------- By default, the WebSocket will read from the network as fast as it can hand you the data. You must prevent the WebSocket from reading data faster than you can process it, or memory usage could balloon until your application explodes. There are two ways to manage this. First, and simplest, is to process incoming data synchronously within the `on_incoming_frame` callbacks. Since callbacks are invoked on the WebSocket's networking thread, the WebSocket cannot read more data until the callback returns. Therefore, processing the data in a synchronous manner (i.e. writing to disk, printing to screen, etc) will naturally affect `TCP flow control `_, and prevent data from arriving too fast. However, you MUST NOT perform a blocking network operation from within the callback or you risk deadlock (see :ref:`authoring-callbacks`). The second, more complex, way requires you to manage the size of the read window. Do this if you are processing the data asynchronously (i.e. sending the data along on another network connection). Create the WebSocket with `manage_read_window` set true, and set `initial_read_window` to the number of bytes you are ready to receive right away. Whenever the read window reaches 0, you will stop receiving anything. The read window shrinks as you receive the payload from "data" frames (TEXT, BINARY, CONTINUATION). Call :meth:`WebSocket.increment_read_window()` to increase the window again keep frames flowing in. You only need to worry about the payload from "data" frames. The WebSocket automatically increments its window to account for any other incoming bytes, including other parts of a frame (opcode, payload-length, etc) and the payload of other frame types (PING, PONG, CLOSE). You'll probably want to do it like this: Pick the max amount of memory to buffer, and set this as the `initial_read_window`. When data arrives, the window has shrunk by that amount. Send this data along on the other network connection. When that data is done sending, call `increment_read_window()` by the amount you just finished sending. If you don't want to receive any data at first, set the `initial_read_window` to 0, and `increment_read_window()` when you're ready. Maintaining a larger window is better for overall throughput. .. _flow-control-writing: Flow Control (writing) ---------------------- You must also ensure that you do not continually send frames faster than the other side can read them, or memory usage could balloon until your application explodes. The simplest approach is to only send 1 frame at a time. Use the :meth:`WebSocket.send_frame()` `on_complete` callback to know when the send is complete. Then you can try and send another. A more complex, but higher throughput, way is to let multiple frames be in flight but have a cap. If the number of frames in flight, or bytes in flight, reaches your cap then wait until some frames complete before trying to send more. .. _api: API --- """ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. import _awscrt from awscrt import NativeResource import awscrt.exceptions from awscrt.http import HttpProxyOptions, HttpRequest from awscrt.io import ClientBootstrap, TlsConnectionOptions, SocketOptions from dataclasses import dataclass from enum import IntEnum import sys from typing import Callable, Optional, Sequence, Tuple, Union class Opcode(IntEnum): """An opcode defines a frame's type. RFC 6455 classifies TEXT and BINARY as `data frames `_. A CONTINUATION frame "continues" the most recent data frame. All other opcodes are for `control frames `_. """ CONTINUATION = 0x0, """Continues the most recent TEXT or BINARY data frame. See `RFC 6455 section 5.4 - Fragmentation `_. """ TEXT = 0x1 """The data frame for sending text. The payload must contain UTF-8.""" BINARY = 0x2 """The data frame for sending binary.""" CLOSE = 0x8 """The control frame which is the final frame sent by an endpoint. The CLOSE frame may include a payload, but its format is very particular. See `RFC 6455 section 5.5.1 `_. """ PING = 0x9 """A control frame that may serve as either a keepalive or as a means to verify that the remote endpoint is still responsive. DO NOT manually send a PONG frame in response to a PING, the implementation does this automatically. A PING frame may include a payload. See `RFC 6455 section 5.5.2 `_. """ PONG = 0xA """The control frame that is the response to a PING frame. DO NOT manually send a PONG frame in response to a PING, the implementation does this automatically. See `RFC 6455 section 5.5.3 `_. """ def is_data_frame(self): """True if this is a "data frame" opcode. TEXT, BINARY, and CONTINUATION are "data frames". The rest are "control" frames. If the WebSocket was created with `manage_read_window`, then the read window shrinks as "data frames" are received. See :ref:`flow-control-reading` for a thorough explanation. """ return self.value in (Opcode.TEXT, Opcode.BINARY, Opcode.CONTINUATION) MAX_PAYLOAD_LENGTH = 0x7FFFFFFFFFFFFFFF """The maximum frame payload length allowed by RFC 6455""" @dataclass class OnConnectionSetupData: """Data passed to the `on_connection_setup` callback""" exception: Optional[Exception] = None """If the connection failed, this exception explains why. This is None if the connection succeeded.""" websocket: Optional['WebSocket'] = None """If the connection succeeded, here's the WebSocket. You should store this WebSocket somewhere (the connection will shut down if the class is garbage collected). This is None if the connection failed. """ handshake_response_status: Optional[int] = None """The HTTP response status-code, if you're interested. This is present if an HTTP response was received, regardless of whether the handshake was accepted or rejected. This always has the value 101 for successful connections. This is None if the connection failed before receiving an HTTP response. """ handshake_response_headers: Optional[Sequence[Tuple[str, str]]] = None """The HTTP response headers, if you're interested. These are present if an HTTP response was received, regardless of whether the handshake was accepted or rejected. This is None if the connection failed before receiving an HTTP response. """ handshake_response_body: bytes = None """The HTTP response body, if you're interested. This is only present if the server sent a full HTTP response rejecting the handshake. It is not present if the connection succeeded, or the connection failed for other reasons. """ @dataclass class OnConnectionShutdownData: """Data passed to the `on_connection_shutdown` callback""" exception: Optional[Exception] = None """If the connection shut down cleanly, this is None. If the connection shut down due to error, or an error occurs while shutting down, this exception explains why.""" @dataclass class IncomingFrame: """Describes the frame you are receiving. Used in `on_incoming_frame` callbacks """ opcode: Opcode """This frame's opcode.""" payload_length: int """This frame's payload length (in bytes).""" fin: bool """The FIN bit indicates whether this is the final fragment in a message. See `RFC 6455 section 5.4 - Fragmentation `_""" def is_data_frame(self): """True if this is a "data frame". TEXT, BINARY, and CONTINUATION are "data frames". The rest are "control frames". If the WebSocket was created with `manage_read_window`, then the read window shrinks as "data frames" are received. See :ref:`flow-control-reading` for a thorough explanation. """ return self.opcode.is_data_frame() @dataclass class OnIncomingFrameBeginData: """Data passed to the `on_incoming_frame_begin` callback. Each `on_incoming_frame_begin` call will be followed by 0+ `on_incoming_frame_payload` calls, followed by one `on_incoming_frame_complete` call.""" frame: IncomingFrame """Describes the frame you are starting to receive.""" @dataclass class OnIncomingFramePayloadData: """Data passed to the `on_incoming_frame_payload` callback. This callback will be invoked 0+ times. Each time, `data` will contain a bit more of the payload. Once all `frame.payload_length` bytes have been received (or the network connection is lost), the `on_incoming_frame_complete` callback will be invoked. If the WebSocket was created with `manage_read_window`, and this is a "data frame" (TEXT, BINARY, CONTINUATION), then the read window shrinks by `len(data)`. See :ref:`flow-control-reading` for a thorough explanation. """ frame: IncomingFrame """Describes the frame whose payload you are receiving.""" data: bytes """The next chunk of this frame's payload.""" @dataclass class OnIncomingFrameCompleteData: """Data passed to the `on_incoming_frame_complete` callback.""" frame: IncomingFrame """Describes the frame you are done receiving.""" exception: Optional[Exception] = None """If `exception` is set, then something went wrong processing the frame or the connection was lost before the frame was fully received.""" @dataclass class OnSendFrameCompleteData: """Data passed to the :meth:`WebSocket.send_frame()` `on_complete` callback.""" exception: Optional[Exception] = None """If `exception` is set, the connection was lost before this frame could be completely sent. If `exception` is None, the frame was successfully written to the OS socket. Note that this data may still be buffered in the OS, it has not necessarily left this machine or reached the other endpoint yet.""" class WebSocket(NativeResource): """A WebSocket connection. Use :meth:`connect()` to establish a new client connection. """ def __init__(self, binding): # Do not init a WebSocket directly, use websocket.connect() super().__init__() self._binding = binding def close(self): """Close the WebSocket asynchronously. You should call this when you are done with a healthy WebSocket, to ensure that it shuts down and cleans up. You don't need to call this on a WebSocket that has already shut down, or is in the middle of shutting down, but it is safe to do so. This function is idempotent. To determine when shutdown has completed, you can use the `on_shutdown_complete` callback (passed into :meth:`connect()`). """ _awscrt.websocket_close(self._binding) def send_frame( self, opcode: Opcode, payload: Optional[Union[str, bytes, bytearray, memoryview]] = None, *, fin: bool = True, on_complete: Optional[Callable[[OnSendFrameCompleteData], None]] = None, ): """Send a WebSocket frame asynchronously. See `RFC 6455 section 5 - Data Framing `_ for details on all frame types. This is a low-level API, which requires you to send the appropriate payload for each type of opcode. If you are not an expert, stick to sending :attr:`Opcode.TEXT` or :attr:`Opcode.BINARY` frames, and don't touch the FIN bit. See :ref:`flow-control-writing` to learn about limiting the amount of unsent data buffered in memory. Args: opcode: :class:`Opcode` for this frame. payload: Any `bytes-like object `_. `str` will always be encoded as UTF-8. It is fine to pass a `str` for a BINARY frame. None will result in an empty payload, the same as passing empty `bytes()` fin: The FIN bit indicates that this is the final fragment in a message. Do not set this False unless you understand `WebSocket fragmentation `_ on_complete: Optional callback, invoked when the frame has finished sending. Takes a single :class:`OnSendFrameCompleteData` argument. If :attr:`OnSendFrameCompleteData.exception` is set, the connection was lost before this frame could be completely sent. But if `exception` is None, the frame was successfully written to the OS socket. (This doesn't mean the other endpoint has received the data yet, or even guarantee that the data has left the machine yet, but it's on track to get there). Be sure to read about :ref:`authoring-callbacks`. """ def _on_complete(error_code): cbdata = OnSendFrameCompleteData() if error_code: cbdata.exception = awscrt.exceptions.from_code(error_code) # Do not let exceptions from the user's callback bubble up any further. try: if on_complete is not None: on_complete(cbdata) except BaseException: print("Exception in WebSocket.send_frame on_complete callback", file=sys.stderr) sys.excepthook(*sys.exc_info()) self.close() _awscrt.websocket_send_frame( self._binding, Opcode(opcode), # needless cast to ensure opcode is valid payload, fin, _on_complete) def increment_read_window(self, size: int): """Manually increment the read window by this many bytes, to continue receiving frames. See :ref:`flow-control-reading` for a thorough explanation. If the WebSocket was created without `manage_read_window`, this function does nothing. This function may be called from any thread. Args: size: in bytes """ if size < 0: raise ValueError("Increment size cannot be negative") _awscrt.websocket_increment_read_window(self._binding, size) class _WebSocketCore(NativeResource): # Private class that handles wrangling callback data from C -> Python. # This class is kept alive by C until the final callback occurs. # # The only reason this class inherits from NativeResource, # is so our tests will tell us if the memory leaks. def __init__(self, on_connection_setup, on_connection_shutdown, on_incoming_frame_begin, on_incoming_frame_payload, on_incoming_frame_complete): super().__init__() self._on_connection_setup_cb = on_connection_setup self._on_connection_shutdown_cb = on_connection_shutdown self._on_incoming_frame_begin_cb = on_incoming_frame_begin self._on_incoming_frame_payload_cb = on_incoming_frame_payload self._on_incoming_frame_complete_cb = on_incoming_frame_complete def _on_connection_setup( self, error_code, websocket_binding, handshake_response_status, handshake_response_headers, handshake_response_body): cbdata = OnConnectionSetupData() if error_code: cbdata.exception = awscrt.exceptions.from_code(error_code) else: cbdata.websocket = WebSocket(websocket_binding) cbdata.handshake_response_status = handshake_response_status cbdata.handshake_response_headers = handshake_response_headers cbdata.handshake_response_body = handshake_response_body # Do not let exceptions from the user's callback bubble up any further. try: self._on_connection_setup_cb(cbdata) except BaseException: print("Exception in WebSocket on_connection_setup callback", file=sys.stderr) sys.excepthook(*sys.exc_info()) if cbdata.websocket is not None: cbdata.websocket.close() def _on_connection_shutdown(self, error_code): cbdata = OnConnectionShutdownData() if error_code: cbdata.exception = awscrt.exceptions.from_code(error_code) # Do not let exceptions from the user's callback bubble up any further. try: if self._on_connection_shutdown_cb is not None: self._on_connection_shutdown_cb(cbdata) except BaseException: print("Exception in WebSocket on_connection_shutdown callback", file=sys.stderr) sys.excepthook(*sys.exc_info()) def _on_incoming_frame_begin(self, opcode_int, payload_length, fin): self._current_incoming_frame = IncomingFrame(Opcode(opcode_int), payload_length, fin) cbdata = OnIncomingFrameBeginData(self._current_incoming_frame) # Do not let exceptions from the user's callback bubble up any further: try: if self._on_incoming_frame_begin_cb is not None: self._on_incoming_frame_begin_cb(cbdata) except BaseException: print("Exception in WebSocket on_incoming_frame_begin callback", file=sys.stderr) sys.excepthook(*sys.exc_info()) return False # close websocket return True def _on_incoming_frame_payload(self, data): cbdata = OnIncomingFramePayloadData(self._current_incoming_frame, data) # Do not let exceptions from the user's callback bubble up any further: try: if self._on_incoming_frame_payload_cb is not None: self._on_incoming_frame_payload_cb(cbdata) except BaseException: print("Exception in WebSocket on_incoming_frame_payload callback", file=sys.stderr) sys.excepthook(*sys.exc_info()) return False # close websocket return True def _on_incoming_frame_complete(self, error_code): cbdata = OnIncomingFrameCompleteData(self._current_incoming_frame) if error_code: cbdata.exception = awscrt.exceptions.from_code(error_code) del self._current_incoming_frame # Do not let exceptions from the user's callback bubble up any further: try: if self._on_incoming_frame_complete_cb is not None: self._on_incoming_frame_complete_cb(cbdata) except BaseException: print("Exception in WebSocket on_incoming_frame_complete callback", file=sys.stderr) sys.excepthook(*sys.exc_info()) return False # close websocket return True def connect( *, host: str, port: Optional[int] = None, handshake_request: HttpRequest, bootstrap: Optional[ClientBootstrap] = None, socket_options: Optional[SocketOptions] = None, tls_connection_options: Optional[TlsConnectionOptions] = None, proxy_options: Optional[HttpProxyOptions] = None, manage_read_window: bool = False, initial_read_window: Optional[int] = None, on_connection_setup: Callable[[OnConnectionSetupData], None], on_connection_shutdown: Optional[Callable[[OnConnectionShutdownData], None]] = None, on_incoming_frame_begin: Optional[Callable[[OnIncomingFrameBeginData], None]] = None, on_incoming_frame_payload: Optional[Callable[[OnIncomingFramePayloadData], None]] = None, on_incoming_frame_complete: Optional[Callable[[OnIncomingFrameCompleteData], None]] = None, ): """Asynchronously establish a client WebSocket connection. The `on_connection_setup` callback is invoked once the connection has succeeded or failed. If successful, a :class:`WebSocket` will be provided in the :class:`OnConnectionSetupData`. You should store this WebSocket somewhere, so that you can continue using it (the connection will shut down if the class is garbage collected). The WebSocket will shut down after one of these things occur: * You call :meth:`WebSocket.close()` * You, or the server, sends a CLOSE frame. * The underlying socket shuts down. * All references to the WebSocket are dropped, causing it to be garbage collected. However, you should NOT rely on this behavior. You should call :meth:`~WebSocket.close()` when you are done with a healthy WebSocket, to ensure that it shuts down and cleans up. It is very easy to accidentally keep a reference around without realizing it. Be sure to read about :ref:`authoring-callbacks`. Args: host: Hostname to connect to. port: Port to connect to. If not specified, it defaults to port 443 when `tls_connection_options` is present, and port 80 otherwise. handshake_request: HTTP request for the initial WebSocket handshake. The request's method MUST be "GET", and the following headers are required:: Host: Upgrade: websocket Connection: Upgrade Sec-WebSocket-Key: Sec-WebSocket-Version: 13 You can use :meth:`create_handshake_request()` to make a valid WebSocket handshake request, modifying the path and headers to fit your needs, and then passing it here. bootstrap: Client bootstrap to use when initiating socket connection. If not specified, the default singleton is used. socket_options: Socket options. If not specified, default options are used. proxy_options: HTTP Proxy options. If not specified, no proxy is used. manage_read_window: Set true to manually manage the flow-control read window. If false (the default), data arrives as fast as possible. See :ref:`flow-control-reading` for a thorough explanation. initial_read_window: The initial size of the read window, in bytes. This must be set if `manage_read_window` is true, otherwise it is ignored. See :ref:`flow-control-reading` for a thorough explanation. An initial size of 0 will prevent any frames from arriving until :meth:`WebSocket.increment_read_window()` is called. on_connection_setup: Callback invoked when the connect completes. Takes a single :class:`OnConnectionSetupData` argument. If successful, :attr:`OnConnectionSetupData.websocket` will be set. You should store the :class:`WebSocket` somewhere, so you can use it to send data when you're ready. The other callbacks will be invoked as events occur, until the final `on_connection_shutdown` callback. If unsuccessful, :attr:`OnConnectionSetupData.exception` will be set, and no further callbacks will be invoked. If this callback raises an exception, the connection will shut down. on_connection_shutdown: Optional callback, invoked when a connection shuts down. Takes a single :class:`OnConnectionShutdownData` argument. This callback is never invoked if `on_connection_setup` reported an exception. on_incoming_frame_begin: Optional callback, invoked once at the start of each incoming frame. Takes a single :class:`OnIncomingFrameBeginData` argument. Each `on_incoming_frame_begin` call will be followed by 0+ `on_incoming_frame_payload` calls, followed by one `on_incoming_frame_complete` call. The "frame complete" callback is guaranteed to be invoked once for each "frame begin" callback, even if the connection is lost before the whole frame has been received. If this callback raises an exception, the connection will shut down. on_incoming_frame_payload: Optional callback, invoked 0+ times as payload data arrives. Takes a single :class:`OnIncomingFramePayloadData` argument. If `manage_read_window` is on, and this is a "data frame", then the read window shrinks accordingly. See :ref:`flow-control-reading` for a thorough explanation. If this callback raises an exception, the connection will shut down. on_incoming_frame_complete: Optional callback, invoked when the WebSocket is done processing an incoming frame. Takes a single :class:`OnIncomingFrameCompleteData` argument. If :attr:`OnIncomingFrameCompleteData.exception` is set, then something went wrong processing the frame or the connection was lost before the frame could be completed. If this callback raises an exception, the connection will shut down. """ if manage_read_window: if initial_read_window is None: raise ValueError("'initial_read_window' must be set if 'manage_read_window' is enabled") else: initial_read_window = 0 # value is ignored anyway if initial_read_window < 0: raise ValueError("'initial_read_window' cannot be negative") if port is None: port = 0 # C layer uses zero to indicate "defaults please" if bootstrap is None: bootstrap = ClientBootstrap.get_or_create_static_default() if socket_options is None: socket_options = SocketOptions() core = _WebSocketCore( on_connection_setup, on_connection_shutdown, on_incoming_frame_begin, on_incoming_frame_payload, on_incoming_frame_complete) _awscrt.websocket_client_connect( host, port, handshake_request, bootstrap, socket_options, tls_connection_options, proxy_options, manage_read_window, initial_read_window, core) def create_handshake_request(*, host: str, path: str = '/') -> HttpRequest: """Create an HTTP request with all the required fields for a WebSocket handshake. The method will be "GET", and the following headers are added:: Host: Upgrade: websocket Connection: Upgrade Sec-WebSocket-Key: Sec-WebSocket-Version: 13 You may can add headers, or modify the path, before using this request. Args: host: Value for "Host" header path: Path (and query) string. Defaults to "/". """ http_request_binding, http_headers_binding = _awscrt.websocket_create_handshake_request(host, path) return HttpRequest._from_bindings(http_request_binding, http_headers_binding) aws-crt-python-0.20.4+dfsg/builder.json000066400000000000000000000017151456575232400177550ustar00rootroot00000000000000{ "name": "aws-crt-python", "!cmake_args": [ "-DS2N_NO_PQ_ASM=ON" ], "env": { "AWS_CRT_BUILD_WARNINGS_ARE_ERRORS": "1" }, "hosts": { "manylinux": { "_comment": "Use existing compiler on manylinux. These are the images we use for release. We want to be sure things work with the defaults.", "needs_compiler": false }, "musllinux": { "_comment": "Use existing compiler on musllinux. These are the images we use for release. We want to be sure things work with the defaults.", "needs_compiler": false } }, "targets": { "android": { "enabled": false, "_comment": "disabled until we have a reason to support python on android" } }, "_comment": "build steps defined in: .builder/actions/aws_crt_python.py", "build_steps": [ "aws-crt-python" ], "upstream": [], "downstream": [] } aws-crt-python-0.20.4+dfsg/codebuild/000077500000000000000000000000001456575232400173625ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/codebuild/CanaryWrapper.py000066400000000000000000000431621456575232400225200ustar00rootroot00000000000000# Python wrapper script for collecting Canary metrics, setting-up/tearing-down alarms, reporting metrics to Cloudwatch, # checking the alarms to ensure everything is correct at the end of the run, and pushing the log to S3 if successful. # Needs to be installed prior to running # Part of standard packages in Python 3.4+ import argparse import time import datetime # Dependencies in project folder from CanaryWrapper_Classes import * from CanaryWrapper_MetricFunctions import * # Code for command line argument parsing # ================================================================================ command_parser = argparse.ArgumentParser("CanaryWrapper") command_parser.add_argument("--canary_executable", type=str, required=True, help="The path to the canary executable (or program - like 'python3')") command_parser.add_argument("--canary_arguments", type=str, default="", help="The arguments to pass/launch the canary executable with") command_parser.add_argument("--git_hash", type=str, required=True, help="The Git commit hash that we are running the canary with") command_parser.add_argument("--git_repo_name", type=str, required=True, help="The name of the Git repository") command_parser.add_argument("--git_hash_as_namespace", type=bool, default=False, help="(OPTIONAL, default=False) If true, the git hash will be used as the name of the Cloudwatch namespace") command_parser.add_argument("--output_log_filepath", type=str, default="output.log", help="(OPTIONAL, default=output.log) The file to output log info to. Set to 'None' to disable") command_parser.add_argument("--output_to_console", type=bool, default=True, help="(OPTIONAL, default=True) If true, info will be output to the console") command_parser.add_argument("--cloudwatch_region", type=str, default="us-east-1", help="(OPTIONAL, default=us-east-1) The AWS region for Cloudwatch") command_parser.add_argument("--s3_bucket_name", type=str, default="canary-wrapper-folder", help="(OPTIONAL, default=canary-wrapper-folder) The name of the S3 bucket where success logs will be stored") command_parser.add_argument("--snapshot_wait_time", type=int, default=600, help="(OPTIONAL, default=600) The number of seconds between gathering and sending snapshot reports") command_parser.add_argument("--ticket_category", type=str, default="AWS", help="(OPTIONAL, default=AWS) The category to register the ticket under") command_parser.add_argument("--ticket_type", type=str, default="SDKs and Tools", help="(OPTIONAL, default='SDKs and Tools') The type to register the ticket under") command_parser.add_argument("--ticket_item", type=str, default="IoT SDK for CPP", help="(OPTIONAL, default='IoT SDK for CPP') The item to register the ticket under") command_parser.add_argument("--ticket_group", type=str, default="AWS IoT Device SDK", help="(OPTIONAL, default='AWS IoT Device SDK') The group to register the ticket under") command_parser.add_argument("--dependencies", type=str, default="", help="(OPTIONAL, default='') Any dependencies and their commit hashes. \ Current expected format is '(name or path);(hash);(next name or path);(hash);(etc...)'.") command_parser.add_argument("--lambda_name", type=str, default="iot-send-email-lambda", help="(OPTIONAL, default='CanarySendEmailLambda') The name of the Lambda used to send emails") command_parser.add_argument("--codebuild_log_path", type=str, default="", help="The CODEBUILD_LOG_PATH environment variable. Leave blank to ignore") command_parser_arguments = command_parser.parse_args() if (command_parser_arguments.output_log_filepath == "None"): command_parser_arguments.output_log_filepath = None if (command_parser_arguments.snapshot_wait_time <= 0): command_parser_arguments.snapshot_wait_time = 60 # Deal with possibly empty values in semi-critical commands/arguments if (command_parser_arguments.canary_executable == ""): print ("ERROR - required canary_executable is empty!", flush=True) exit (1) # cannot run without a canary executable if (command_parser_arguments.git_hash == ""): print ("ERROR - required git_hash is empty!", flush=True) exit (1) # cannot run without git hash if (command_parser_arguments.git_repo_name == ""): print ("ERROR - required git_repo_name is empty!", flush=True) exit (1) # cannot run without git repo name if (command_parser_arguments.git_hash_as_namespace is not True and command_parser_arguments.git_hash_as_namespace is not False): command_parser_arguments.git_hash_as_namespace = False if (command_parser_arguments.output_log_filepath == ""): command_parser_arguments.output_log_filepath = None if (command_parser_arguments.output_to_console != True and command_parser_arguments.output_to_console != False): command_parser_arguments.output_to_console = True if (command_parser_arguments.cloudwatch_region == ""): command_parser_arguments.cloudwatch_region = "us-east-1" if (command_parser_arguments.s3_bucket_name == ""): command_parser_arguments.s3_bucket_name = "canary-wrapper-folder" if (command_parser_arguments.ticket_category == ""): command_parser_arguments.ticket_category = "AWS" if (command_parser_arguments.ticket_type == ""): command_parser_arguments.ticket_type = "SDKs and Tools" if (command_parser_arguments.ticket_item == ""): command_parser_arguments.ticket_item = "IoT SDK for CPP" if (command_parser_arguments.ticket_group == ""): command_parser_arguments.ticket_group = "AWS IoT Device SDK" # ================================================================================ datetime_now = datetime.datetime.now() datetime_string = datetime_now.strftime("%d-%m-%Y/%H-%M-%S") print("Datetime string is: " + datetime_string, flush=True) # Make the snapshot class data_snapshot = DataSnapshot( git_hash=command_parser_arguments.git_hash, git_repo_name=command_parser_arguments.git_repo_name, datetime_string=datetime_string, git_hash_as_namespace=command_parser_arguments.git_hash_as_namespace, git_fixed_namespace_text="mqtt5_canary", output_log_filepath="output.txt", output_to_console=command_parser_arguments.output_to_console, cloudwatch_region="us-east-1", cloudwatch_make_dashboard=False, cloudwatch_teardown_alarms_on_complete=True, cloudwatch_teardown_dashboard_on_complete=True, s3_bucket_name=command_parser_arguments.s3_bucket_name, s3_bucket_upload_on_complete=True, lambda_name=command_parser_arguments.lambda_name, metric_frequency=command_parser_arguments.snapshot_wait_time) # Make sure nothing failed if (data_snapshot.abort_due_to_internal_error == True): print ("INFO - Stopping application due to error caused by credentials") print ("Please fix your credentials and then restart this application again", flush=True) exit(0) # Register metrics data_snapshot.register_metric( new_metric_name="total_cpu_usage", new_metric_function=get_metric_total_cpu_usage, new_metric_unit="Percent", new_metric_alarm_threshold=70, new_metric_reports_to_skip=1, new_metric_alarm_severity=5, is_percent=True) data_snapshot.register_metric( new_metric_name="total_memory_usage_value", new_metric_function=get_metric_total_memory_usage_value, new_metric_unit="Bytes") data_snapshot.register_metric( new_metric_name="total_memory_usage_percent", new_metric_function=get_metric_total_memory_usage_percent, new_metric_unit="Percent", new_metric_alarm_threshold=70, new_metric_reports_to_skip=0, new_metric_alarm_severity=5, is_percent=True) # Print diagnosis information data_snapshot.output_diagnosis_information(command_parser_arguments.dependencies) # Make the snapshot (metrics) monitor snapshot_monitor = SnapshotMonitor( wrapper_data_snapshot=data_snapshot, wrapper_metrics_wait_time=command_parser_arguments.snapshot_wait_time) # Make sure nothing failed if (snapshot_monitor.had_internal_error == True): print ("INFO - Stopping application due to error caused by credentials") print ("Please fix your credentials and then restart this application again", flush=True) exit(0) # Make the application monitor application_monitor = ApplicationMonitor( wrapper_application_path=command_parser_arguments.canary_executable, wrapper_application_arguments=command_parser_arguments.canary_arguments, wrapper_application_restart_on_finish=False, data_snapshot=data_snapshot # pass the data_snapshot for printing to the log ) # Make sure nothing failed if (application_monitor.error_has_occurred == True): print ("INFO - Stopping application due to error caused by credentials") print ("Please fix your credentials and then restart this application again", flush=True) exit(0) # For tracking if we stopped due to a metric alarm stopped_due_to_metric_alarm = False execution_sleep_time = 30 def execution_loop(): while True: snapshot_monitor.monitor_loop_function( time_passed=execution_sleep_time, psutil_process=application_monitor.application_process_psutil) application_monitor.monitor_loop_function( time_passed=execution_sleep_time) # Did a metric go into alarm? if (snapshot_monitor.has_cut_ticket == True): # Set that we had an 'internal error' so we go down the right code path snapshot_monitor.had_internal_error = True break # If an error has occurred or otherwise this thread needs to stop, then break the loop if (application_monitor.error_has_occurred == True or snapshot_monitor.had_internal_error == True): break time.sleep(execution_sleep_time) def application_thread(): start_email_body = "MQTT5 Short Running Canary Wrapper has started for " start_email_body += "\"" + command_parser_arguments.git_repo_name + "\" commit \"" + command_parser_arguments.git_hash + "\"" start_email_body += "\nThe wrapper will run for the length the MQTT5 Canary application is set to run for, which is determined by " start_email_body += "the arguments set. The arguments used for this run are listed below:" start_email_body += "\n Arguments: " + command_parser_arguments.canary_arguments snapshot_monitor.send_email(email_body=start_email_body, email_subject_text_append="Started") # Start the application going snapshot_monitor.start_monitoring() application_monitor.start_monitoring() # Allow the snapshot monitor to cut tickets snapshot_monitor.can_cut_ticket = True # Start the execution loop execution_loop() # Make sure everything is stopped snapshot_monitor.stop_monitoring() application_monitor.stop_monitoring() # Track whether this counts as an error (and therefore we should cleanup accordingly) or not wrapper_error_occurred = False # Finished Email send_finished_email = True finished_email_body = "MQTT5 Short Running Canary Wrapper has stopped." finished_email_body += "\n\n" try: # Find out why we stopped if (snapshot_monitor.had_internal_error == True): if (snapshot_monitor.has_cut_ticket == True): # We do not need to cut a ticket here - it's cut by the snapshot monitor! print ("ERROR - Snapshot monitor stopped due to metric in alarm!", flush=True) finished_email_body += "Failure due to required metrics being in alarm! A new ticket should have been cut!" finished_email_body += "\nMetrics in Alarm: " + str(snapshot_monitor.cloudwatch_current_alarms_triggered) wrapper_error_occurred = True else: print ("ERROR - Snapshot monitor stopped due to internal error!", flush=True) cut_ticket_using_cloudwatch( git_repo_name=command_parser_arguments.git_repo_name, git_hash=command_parser_arguments.git_hash, git_hash_as_namespace=command_parser_arguments.git_hash_as_namespace, git_fixed_namespace_text="mqtt5_canary", cloudwatch_region="us-east-1", ticket_description="Snapshot monitor stopped due to internal error! Reason info: " + snapshot_monitor.internal_error_reason, ticket_reason="Snapshot monitor stopped due to internal error", ticket_allow_duplicates=True, ticket_category=command_parser_arguments.ticket_category, ticket_item=command_parser_arguments.ticket_item, ticket_group=command_parser_arguments.ticket_group, ticket_type=command_parser_arguments.ticket_type, ticket_severity=4) wrapper_error_occurred = True finished_email_body += "Failure due to Snapshot monitor stopping due to an internal error." finished_email_body += " Reason given for error: " + snapshot_monitor.internal_error_reason elif (application_monitor.error_has_occurred == True): if (application_monitor.error_due_to_credentials == True): print ("INFO - Stopping application due to error caused by credentials") print ("Please fix your credentials and then restart this application again", flush=True) wrapper_error_occurred = True send_finished_email = False else: # Is the error something in the canary failed? if (application_monitor.error_code != 0): cut_ticket_using_cloudwatch( git_repo_name=command_parser_arguments.git_repo_name, git_hash=command_parser_arguments.git_hash, git_hash_as_namespace=command_parser_arguments.git_hash_as_namespace, git_fixed_namespace_text="mqtt5_canary", cloudwatch_region="us-east-1", ticket_description="The Short Running Canary exited with a non-zero exit code! This likely means something in the canary failed.", ticket_reason="The Short Running Canary exited with a non-zero exit code", ticket_allow_duplicates=True, ticket_category=command_parser_arguments.ticket_category, ticket_item=command_parser_arguments.ticket_item, ticket_group=command_parser_arguments.ticket_group, ticket_type=command_parser_arguments.ticket_type, ticket_severity=4) wrapper_error_occurred = True finished_email_body += "Failure due to MQTT5 application exiting with a non-zero exit code! This means something in the Canary application itself failed" else: print ("INFO - Stopping application. No error has occurred, application has stopped normally", flush=True) application_monitor.print_stdout() finished_email_body += "Short Running Canary finished successfully and run without errors!" wrapper_error_occurred = False else: print ("ERROR - Short Running Canary stopped due to unknown reason!", flush=True) cut_ticket_using_cloudwatch( git_repo_name=command_parser_arguments.git_repo_name, git_hash=command_parser_arguments.git_hash, git_hash_as_namespace=command_parser_arguments.git_hash_as_namespace, git_fixed_namespace_text="mqtt5_canary", cloudwatch_region="us-east-1", ticket_description="The Short Running Canary stopped for an unknown reason!", ticket_reason="The Short Running Canary stopped for unknown reason", ticket_allow_duplicates=True, ticket_category=command_parser_arguments.ticket_category, ticket_item=command_parser_arguments.ticket_item, ticket_group=command_parser_arguments.ticket_group, ticket_type=command_parser_arguments.ticket_type, ticket_severity=4) wrapper_error_occurred = True finished_email_body += "Failure due to unknown reason! This shouldn't happen and means something has gone wrong!" except Exception as e: print ("ERROR: Could not (possibly) cut ticket due to exception!") print ("Exception: " + str(e), flush=True) # Clean everything up and stop snapshot_monitor.cleanup_monitor(error_occurred=wrapper_error_occurred) application_monitor.cleanup_monitor(error_occurred=wrapper_error_occurred) print ("Short Running Canary finished!", flush=True) finished_email_body += "\n\nYou can find the log file for this run at the following S3 location: " finished_email_body += "https://s3.console.aws.amazon.com/s3/object/" finished_email_body += command_parser_arguments.s3_bucket_name finished_email_body += "?region=" + command_parser_arguments.cloudwatch_region finished_email_body += "&prefix=" + command_parser_arguments.git_repo_name + "/" + datetime_string + "/" if (wrapper_error_occurred == True): finished_email_body += "Failed_Logs/" finished_email_body += command_parser_arguments.git_hash + ".log" if (command_parser_arguments.codebuild_log_path != ""): print ("\n Codebuild log path: " + command_parser_arguments.codebuild_log_path + "\n") # Send the finish email if (send_finished_email == True): if (wrapper_error_occurred == True): snapshot_monitor.send_email(email_body=finished_email_body, email_subject_text_append="Had an error") else: snapshot_monitor.send_email(email_body=finished_email_body, email_subject_text_append="Finished") exit (application_monitor.error_code) # Start the application! application_thread() aws-crt-python-0.20.4+dfsg/codebuild/CanaryWrapper_Classes.py000066400000000000000000001672221456575232400242010ustar00rootroot00000000000000# Contains all of the classes that are shared across both the Canary Wrapper and the Persistent Canary Wrapper scripts # If a class can/is reused, then it should be in this file. # Needs to be installed prior to running import boto3 import psutil # Part of standard packages in Python 3.4+ import time import os import json import subprocess import zipfile import datetime # ================================================================================ # Class that holds metric data and has a few utility functions for getting that data in a format we can use for Cloudwatch class DataSnapshot_Metric(): def __init__(self, metric_name, metric_function, metric_dimensions=[], metric_unit="None", metric_alarm_threshold=None, metric_alarm_severity=6, git_hash="", git_repo_name="", reports_to_skip=0, is_percent=False): self.metric_name = metric_name self.metric_function = metric_function self.metric_dimensions = metric_dimensions self.metric_unit = metric_unit self.metric_alarm_threshold = metric_alarm_threshold self.metric_alarm_name = self.metric_name + "-" + git_repo_name + "-" + git_hash self.metric_alarm_description = 'Alarm for metric "' + self.metric_name + '" - git hash: ' + git_hash self.metric_value = None self.reports_to_skip = reports_to_skip self.metric_alarm_severity = metric_alarm_severity self.is_percent = is_percent # Gets the latest metric value from the metric_function callback def get_metric_value(self, psutil_process : psutil.Process): if not self.metric_function is None: self.metric_value = self.metric_function(psutil_process) return self.metric_value # Returns the data needed to send to Cloudwatch when posting metrics def get_metric_cloudwatch_dictionary(self): if (self.reports_to_skip > 0): self.reports_to_skip -= 1 return None # skips sending to Cloudwatch if (self.metric_value == None): return None # skips sending to Cloudwatch return { "MetricName": self.metric_name, "Dimensions": self.metric_dimensions, "Value": self.metric_value, "Unit": self.metric_unit } class DataSnapshot_Dashboard_Widget(): def __init__(self, widget_name, metric_namespace, metric_dimension, cloudwatch_region="us-east-1", widget_period=60) -> None: self.metric_list = [] self.region = cloudwatch_region self.widget_name = widget_name self.metric_namespace = metric_namespace self.metric_dimension = metric_dimension self.widget_period = widget_period def add_metric_to_widget(self, new_metric_name): try: self.metric_list.append(new_metric_name) except Exception as e: print ("[DataSnapshot_Dashboard] ERROR - could not add metric to dashboard widget due to exception!") print ("[DataSnapshot_Dashboard] Exception: " + str(e)) def remove_metric_from_widget(self, existing_metric_name): try: self.metric_list.remove(existing_metric_name) except Exception as e: print ("[DataSnapshot_Dashboard] ERROR - could not remove metric from dashboard widget due to exception!") print ("[DataSnapshot_Dashboard] Exception: " + str(e)) def get_widget_dictionary(self): metric_list_json = [] for metric_name in self.metric_list: metric_list_json.append([self.metric_namespace, metric_name, self.metric_dimension, metric_name]) return { "type":"metric", "properties" : { "metrics" : metric_list_json, "region": self.region, "title": self.widget_name, "period": self.widget_period, }, "width": 14, "height": 10 } # ================================================================================ # Class that keeps track of the metrics registered, sets up Cloudwatch and S3, and sends periodic reports # Is the backbone of the reporting operation class DataSnapshot(): def __init__(self, git_hash=None, git_repo_name=None, git_hash_as_namespace=False, git_fixed_namespace_text="mqtt5_canary", datetime_string=None, output_log_filepath=None, output_to_console=True, cloudwatch_region="us-east-1", cloudwatch_make_dashboard=False, cloudwatch_teardown_alarms_on_complete=True, cloudwatch_teardown_dashboard_on_complete=True, s3_bucket_name="canary-wrapper-bucket", s3_bucket_upload_on_complete=True, lambda_name="CanarySendEmailLambda", metric_frequency=None): # Setting initial values # ================== self.first_metric_call = True self.metrics = [] self.metrics_numbers = [] self.metric_report_number = 0 self.metric_report_non_zero_count = 4 # Needed so we can initialize Cloudwatch alarms, etc, outside of the init function # but before we start sending data. # This boolean tracks whether we have done the post-initialization prior to sending the first report. self.perform_final_initialization = True # Watched by the thread creating the snapshot. Will cause the thread(s) to abort and return an error. self.abort_due_to_internal_error = False self.abort_due_to_internal_error_reason = "" self.abort_due_to_internal_error_due_to_credentials = False self.git_hash = None self.git_repo_name = None self.git_hash_as_namespace = git_hash_as_namespace self.git_fixed_namespace_text = git_fixed_namespace_text self.git_metric_namespace = None self.cloudwatch_region = cloudwatch_region self.cloudwatch_client = None self.cloudwatch_make_dashboard = cloudwatch_make_dashboard self.cloudwatch_teardown_alarms_on_complete = cloudwatch_teardown_alarms_on_complete self.cloudwatch_teardown_dashboard_on_complete = cloudwatch_teardown_dashboard_on_complete self.cloudwatch_dashboard_name = "" self.cloudwatch_dashboard_widgets = [] self.s3_bucket_name = s3_bucket_name self.s3_client = None self.s3_bucket_upload_on_complete = s3_bucket_upload_on_complete self.output_to_file_filepath = output_log_filepath self.output_to_file = False self.output_file = None self.output_to_console = output_to_console self.lambda_client = None self.lambda_name = lambda_name self.datetime_string = datetime_string self.metric_frequency = metric_frequency # ================== # Check for valid credentials # ================== try: tmp_sts_client = boto3.client('sts') tmp_sts_client.get_caller_identity() except Exception as e: print ("[DataSnapshot] ERROR - AWS credentials are NOT valid!") self.abort_due_to_internal_error = True self.abort_due_to_internal_error_reason = "AWS credentials are NOT valid!" self.abort_due_to_internal_error_due_to_credentials = True return # ================== # Git related stuff # ================== if (git_hash == None or git_repo_name == None): print("[DataSnapshot] ERROR - a Git hash and repository name are REQUIRED for the canary wrapper to run!") self.abort_due_to_internal_error = True self.abort_due_to_internal_error_reason = "No Git hash and repository passed!" return self.git_hash = git_hash self.git_repo_name = git_repo_name if (self.git_hash_as_namespace == False): self.git_metric_namespace = self.git_fixed_namespace_text else: if (self.datetime_string == None): git_namespace_prepend_text = self.git_repo_name + "-" + self.git_hash else: git_namespace_prepend_text = self.git_repo_name + "/" + self.datetime_string + "-" + self.git_hash self.git_metric_namespace = git_namespace_prepend_text # ================== # Cloudwatch related stuff # ================== try: self.cloudwatch_client = boto3.client('cloudwatch', self.cloudwatch_region) self.cloudwatch_dashboard_name = self.git_metric_namespace except Exception as e: self.print_message("[DataSnapshot] ERROR - could not make Cloudwatch client due to exception!") self.print_message("[DataSnapshot] Exception: " + str(e)) self.cloudwatch_client = None self.abort_due_to_internal_error = True self.abort_due_to_internal_error_reason = "Could not make Cloudwatch client!" return # ================== # S3 related stuff # ================== try: self.s3_client = boto3.client("s3") except Exception as e: self.print_message("[DataSnapshot] ERROR - could not make S3 client due to exception!") self.print_message("[DataSnapshot] Exception: " + str(e)) self.s3_client = None self.abort_due_to_internal_error = True self.abort_due_to_internal_error_reason = "Could not make S3 client!" return # ================== # Lambda related stuff # ================== try: self.lambda_client = boto3.client("lambda", self.cloudwatch_region) except Exception as e: self.print_message("[DataSnapshot] ERROR - could not make Lambda client due to exception!") self.print_message("[DataSnapshot] Exception: " + str(e)) self.lambda_client = None self.abort_due_to_internal_error = True self.abort_due_to_internal_error_reason = "Could not make Lambda client!" return # ================== # File output (logs) related stuff # ================== if (not output_log_filepath is None): self.output_to_file = True self.output_file = open(self.output_to_file_filepath, "w") else: self.output_to_file = False self.output_file = None # ================== self.print_message("[DataSnapshot] Data snapshot created!") # Cleans the class - closing any files, removing alarms, and sending data to S3. # Should be called at the end when you are totally finished shadowing metrics def cleanup(self, error_occurred=False): if (self.s3_bucket_upload_on_complete == True): self.export_result_to_s3_bucket(copy_output_log=True, log_is_error=error_occurred) self._cleanup_cloudwatch_alarms() if (self.cloudwatch_make_dashboard == True): self._cleanup_cloudwatch_dashboard() self.print_message("[DataSnapshot] Data snapshot cleaned!") if (self.output_file is not None): self.output_file.close() self.output_file = None # Utility function for printing messages def print_message(self, message): if self.output_to_file == True: self.output_file.write(message + "\n") if self.output_to_console == True: print(message, flush=True) # Utility function - adds the metric alarms to Cloudwatch. We do run this right before the first # collection of metrics so we can register metrics before we initialize Cloudwatch def _init_cloudwatch_pre_first_run(self): for metric in self.metrics: if (not metric.metric_alarm_threshold is None): self._add_cloudwatch_metric_alarm(metric) if (self.cloudwatch_make_dashboard == True): self._init_cloudwatch_pre_first_run_dashboard() # Utility function - adds the Cloudwatch Dashboard for the currently running data snapshot def _init_cloudwatch_pre_first_run_dashboard(self): try: # Remove the old dashboard if it exists before adding a new one self._cleanup_cloudwatch_dashboard() new_dashboard_widgets_array = [] for widget in self.cloudwatch_dashboard_widgets: new_dashboard_widgets_array.append(widget.get_widget_dictionary()) new_dashboard_body = { "start": "-PT1H", "widgets": new_dashboard_widgets_array, } new_dashboard_body_json = json.dumps(new_dashboard_body) self.cloudwatch_client.put_dashboard( DashboardName=self.cloudwatch_dashboard_name, DashboardBody= new_dashboard_body_json) self.print_message("[DataSnapshot] Added Cloudwatch dashboard successfully") except Exception as e: self.print_message("[DataSnapshot] ERROR - Cloudwatch client could not make dashboard due to exception!") self.print_message("[DataSnapshot] Exception: " + str(e)) self.abort_due_to_internal_error = True self.abort_due_to_internal_error_reason = "Cloudwatch client could not make dashboard due to exception" return # Utility function - The function that adds each individual metric alarm. def _add_cloudwatch_metric_alarm(self, metric): if self.cloudwatch_client is None: self.print_message("[DataSnapshot] ERROR - Cloudwatch client not setup. Cannot register alarm") return try: self.cloudwatch_client.put_metric_alarm( AlarmName=metric.metric_alarm_name, AlarmDescription=metric.metric_alarm_description, MetricName=metric.metric_name, Namespace=self.git_metric_namespace, Statistic="Maximum", Dimensions=metric.metric_dimensions, Period=60, # How long (in seconds) is an evaluation period? EvaluationPeriods=120, # How many periods does it need to be invalid for? DatapointsToAlarm=1, # How many data points need to be invalid? Threshold=metric.metric_alarm_threshold, ComparisonOperator="GreaterThanOrEqualToThreshold", ) except Exception as e: self.print_message("[DataSnapshot] ERROR - could not register alarm for metric due to exception: " + metric.metric_name) self.print_message("[DataSnapshot] Exception: " + str(e)) # Utility function - removes all the Cloudwatch alarms for the metrics def _cleanup_cloudwatch_alarms(self): if (self.cloudwatch_teardown_alarms_on_complete == True): try: for metric in self.metrics: if (not metric.metric_alarm_threshold is None): self.cloudwatch_client.delete_alarms(AlarmNames=[metric.metric_alarm_name]) except Exception as e: self.print_message("[DataSnapshot] ERROR - could not delete alarms due to exception!") self.print_message("[DataSnapshot] Exception: " + str(e)) # Utility function - removes all Cloudwatch dashboards created def _cleanup_cloudwatch_dashboard(self): if (self.cloudwatch_teardown_dashboard_on_complete == True): try: self.cloudwatch_client.delete_dashboards(DashboardNames=[self.cloudwatch_dashboard_name]) self.print_message("[DataSnapshot] Cloudwatch Dashboards deleted successfully!") except Exception as e: self.print_message("[DataSnapshot] ERROR - dashboard cleaning function failed due to exception!") self.print_message("[DataSnapshot] Exception: " + str(e)) self.abort_due_to_internal_error = True self.abort_due_to_internal_error_reason = "Cloudwatch dashboard cleaning function failed due to exception" return # Returns the results of the metric alarms. Will return a list containing tuples with the following structure: # [Boolean (False = the alarm is in the ALARM state), String (Name of the alarm that is in the ALARM state), int (severity of alarm)] # Currently this function will only return a list of failed alarms, so if the returned list is empty, then it means all # alarms did not get to the ALARM state in Cloudwatch for the registered metrics def get_cloudwatch_alarm_results(self): return self._check_cloudwatch_alarm_states() # Utility function - collects the metric alarm results and returns them in a list. def _check_cloudwatch_alarm_states(self): return_result_list = [] tmp = None for metric in self.metrics: tmp = self._check_cloudwatch_alarm_state_metric(metric) if (tmp[1] != None): # Do not cut a ticket for the "Alive_Alarm" that we use to check if the Canary is running if ("Alive_Alarm" in tmp[1] == False): if (tmp[0] != True): return_result_list.append(tmp) return return_result_list # Utility function - checks each individual alarm and returns a tuple with the following format: # [Boolean (False if the alarm is in the ALARM state, otherwise it is true), String (name of the alarm), Int (severity of alarm)] def _check_cloudwatch_alarm_state_metric(self, metric): alarms_response = self.cloudwatch_client.describe_alarms_for_metric( MetricName=metric.metric_name, Namespace=self.git_metric_namespace, Dimensions=metric.metric_dimensions) return_result = [True, None, metric.metric_alarm_severity] for metric_alarm_dict in alarms_response["MetricAlarms"]: if metric_alarm_dict["StateValue"] == "ALARM": return_result[0] = False return_result[1] = metric_alarm_dict["AlarmName"] break return return_result # Exports a file with the same name as the commit Git hash to an S3 bucket in a folder with the Git repo name. # By default, this file will only contain the Git hash. # If copy_output_log is true, then the output log will be copied into this file, which may be useful for debugging. def export_result_to_s3_bucket(self, copy_output_log=False, log_is_error=False): if (self.s3_client is None): self.print_message("[DataSnapshot] ERROR - No S3 client initialized! Cannot send log to S3") self.abort_due_to_internal_error = True self.abort_due_to_internal_error_reason = "S3 client not initialized and therefore cannot send log to S3" return s3_file = open(self.git_hash + ".log", "w") s3_file.write(self.git_hash) # Might be useful for debugging? if (copy_output_log == True and self.output_to_file == True): # Are we still writing? If so, then we need to close the file first so everything is written to it is_output_file_open_previously = False if (self.output_file != None): self.output_file.close() is_output_file_open_previously = True self.output_file = open(self.output_to_file_filepath, "r") s3_file.write("\n\nOUTPUT LOG\n") s3_file.write("==========================================================================================\n") output_file_lines = self.output_file.readlines() for line in output_file_lines: s3_file.write(line) self.output_file.close() # If we were writing to the output previously, then we need to open in RW mode so we can continue to write to it if (is_output_file_open_previously == True): self.output_to_file = open(self.output_to_file_filepath, "a") s3_file.close() # Upload to S3 try: if (log_is_error == False): if (self.datetime_string == None): self.s3_client.upload_file(self.git_hash + ".log", self.s3_bucket_name, self.git_repo_name + "/" + self.git_hash + ".log") else: self.s3_client.upload_file(self.git_hash + ".log", self.s3_bucket_name, self.git_repo_name + "/" + self.datetime_string + "/" + self.git_hash + ".log") else: if (self.datetime_string == None): self.s3_client.upload_file(self.git_hash + ".log", self.s3_bucket_name, self.git_repo_name + "/Failed_Logs/" + self.git_hash + ".log") else: self.s3_client.upload_file(self.git_hash + ".log", self.s3_bucket_name, self.git_repo_name + "/Failed_Logs/" + self.datetime_string + "/" + self.git_hash + ".log") self.print_message("[DataSnapshot] Uploaded to S3!") except Exception as e: self.print_message("[DataSnapshot] ERROR - could not upload to S3 due to exception!") self.print_message("[DataSnapshot] Exception: " + str(e)) self.abort_due_to_internal_error = True self.abort_due_to_internal_error_reason = "S3 client had exception and therefore could not upload log!" os.remove(self.git_hash + ".log") return # Delete the file when finished os.remove(self.git_hash + ".log") # Sends an email via a special lambda. The payload has to contain a message and a subject # * (REQUIRED) message is the message you want to send in the body of the email # * (REQUIRED) subject is the subject that the email will be sent with def lambda_send_email(self, message, subject): payload = {"Message":message, "Subject":subject} payload_string = json.dumps(payload) try: self.lambda_client.invoke( FunctionName=self.lambda_name, InvocationType="Event", ClientContext="MQTT Wrapper Script", Payload=payload_string ) except Exception as e: self.print_message("[DataSnapshot] ERROR - could not send email via Lambda due to exception!") self.print_message("[DataSnapshot] Exception: " + str(e)) self.abort_due_to_internal_error = True self.abort_due_to_internal_error_reason = "Lambda email function had an exception!" return # Registers a metric to be polled by the Snapshot. # * (REQUIRED) new_metric_name is the name of the metric. Cloudwatch will use this name # * (REQUIRED) new_metric_function is expected to be a pointer to a Python function and will not work if you pass a value/object # * (OPTIONAL) new_metric_unit is the metric unit. There is a list of possible metric unit types on the Boto3 documentation for Cloudwatch # * (OPTIONAL) new_metric_alarm_threshold is the value that the metric has to exceed in order to be registered as an alarm # * (OPTIONAL) new_reports_to_skip is the number of reports this metric will return nothing, but will get it's value. # * Useful for CPU calculations that require deltas # * (OPTIONAL) new_metric_alarm_severity is the severity of the ticket if this alarm is triggered. A severity of 6+ means no ticket. # * (OPTIONAL) is_percent whether or not to display the metric as a percent when printing it (default=false) def register_metric(self, new_metric_name, new_metric_function, new_metric_unit="None", new_metric_alarm_threshold=None, new_metric_reports_to_skip=0, new_metric_alarm_severity=6, is_percent=False): new_metric_dimensions = [] if (self.git_hash_as_namespace == False): git_namespace_prepend_text = self.git_repo_name + "-" + self.git_hash new_metric_dimensions.append( {"Name": git_namespace_prepend_text, "Value": new_metric_name}) else: new_metric_dimensions.append( {"Name": "System_Metrics", "Value": new_metric_name}) new_metric = DataSnapshot_Metric( metric_name=new_metric_name, metric_function=new_metric_function, metric_dimensions=new_metric_dimensions, metric_unit=new_metric_unit, metric_alarm_threshold=new_metric_alarm_threshold, metric_alarm_severity=new_metric_alarm_severity, git_hash=self.git_hash, git_repo_name=self.git_repo_name, reports_to_skip=new_metric_reports_to_skip, is_percent=is_percent ) self.metrics.append(new_metric) # append an empty list so we can track it's metrics over time self.metrics_numbers.append([]) def register_dashboard_widget(self, new_widget_name, metrics_to_add=[], new_widget_period=60): # We need to know what metric dimension to get the metric(s) from metric_dimension_string = "" if (self.git_hash_as_namespace == False): metric_dimension_string = self.git_repo_name + "-" + self.git_hash else: metric_dimension_string = "System_Metrics" widget = self._find_cloudwatch_widget(name=new_widget_name) if (widget == None): widget = DataSnapshot_Dashboard_Widget( widget_name=new_widget_name, metric_namespace=self.git_metric_namespace, metric_dimension=metric_dimension_string, cloudwatch_region=self.cloudwatch_region, widget_period=new_widget_period) self.cloudwatch_dashboard_widgets.append(widget) for metric in metrics_to_add: self.register_metric_to_dashboard_widget(widget_name=new_widget_name, metric_name=metric) def register_metric_to_dashboard_widget(self, widget_name, metric_name, widget=None): if widget is None: widget = self._find_cloudwatch_widget(name=widget_name) if widget is None: print ("[DataSnapshot] ERROR - could not find widget with name: " + widget_name, flush=True) return # Adjust metric name so it has the git hash, repo, etc metric_name_formatted = metric_name widget.add_metric_to_widget(new_metric_name=metric_name_formatted) return def remove_metric_from_dashboard_widget(self, widget_name, metric_name, widget=None): if widget is None: widget = self._find_cloudwatch_widget(name=widget_name) if widget is None: print ("[DataSnapshot] ERROR - could not find widget with name: " + widget_name, flush=True) return widget.remove_metric_from_widget(existing_metric_name=metric_name) return def _find_cloudwatch_widget(self, name): result = None for widget in self.cloudwatch_dashboard_widgets: if widget.widget_name == name: return widget return result # Prints the metrics to the console def export_metrics_console(self): datetime_now = datetime.datetime.now() datetime_string = datetime_now.strftime("%d-%m-%Y/%H:%M:%S") self.print_message("\n[DataSnapshot] Metric report: " + str(self.metric_report_number) + " (" + datetime_string + ")") for metric in self.metrics: if (metric.is_percent == True): self.print_message(" " + metric.metric_name + " - value: " + str(metric.metric_value) + "%") else: self.print_message(" " + metric.metric_name + " - value: " + str(metric.metric_value)) self.print_message("") # Sends all registered metrics to Cloudwatch. # Does NOT need to called on loop. Call post_metrics on loop to send all the metrics as expected. # This is just the Cloudwatch part of that loop. def export_metrics_cloudwatch(self): if (self.cloudwatch_client == None): self.print_message("[DataSnapshot] Error - cannot export Cloudwatch metrics! Cloudwatch was not initialized.") self.abort_due_to_internal_error = True self.abort_due_to_internal_error_reason = "Could not export Cloudwatch metrics due to no Cloudwatch client initialized!" return self.print_message("[DataSnapshot] Preparing to send to Cloudwatch...") metrics_data = [] metric_data_tmp = None for metric in self.metrics: metric_data_tmp = metric.get_metric_cloudwatch_dictionary() if (not metric_data_tmp is None): metrics_data.append(metric_data_tmp) if (len(metrics_data) == 0): self.print_message("[DataSnapshot] INFO - no metric data to send. Skipping...") return try: self.cloudwatch_client.put_metric_data( Namespace=self.git_metric_namespace, MetricData=metrics_data) self.print_message("[DataSnapshot] Metrics sent to Cloudwatch.") except Exception as e: self.print_message("[DataSnapshot] Error - something when wrong posting cloudwatch metrics!") self.print_message("[DataSnapshot] Exception: " + str(e)) self.abort_due_to_internal_error = True self.abort_due_to_internal_error_reason = "Could not export Cloudwatch metrics due to exception in Cloudwatch client!" return # Call this at a set interval to post the metrics to Cloudwatch, etc. # This is the function you want to call repeatedly after you have everything setup. def post_metrics(self, psutil_process : psutil.Process): if (self.perform_final_initialization == True): self.perform_final_initialization = False self._init_cloudwatch_pre_first_run() # Update the metric values internally for i in range(0, len(self.metrics)): metric_value = self.metrics[i].get_metric_value(psutil_process) self.metrics_numbers[i].insert(0, metric_value) # Only keep the last metric_report_non_zero_count results if (len(self.metrics_numbers[i]) > self.metric_report_non_zero_count): amount_to_delete = len(self.metrics_numbers[i]) - self.metric_report_non_zero_count del self.metrics_numbers[i][-amount_to_delete:] # If we have metric_report_non_zero_count amount of metrics, make sure there is at least one # non-zero. If it is all zero, then print a log so we can easily find it if (len(self.metrics_numbers[i]) == self.metric_report_non_zero_count): non_zero_found = False for j in range(0, len(self.metrics_numbers[i])): if (self.metrics_numbers[i][j] != 0.0 and self.metrics_numbers[i][j] != None): non_zero_found = True break if (non_zero_found == False): self.print_message("\n[DataSnapshot] METRIC ZERO ERROR!") self.print_message(f"[DataSnapshot] Metric index {i} has been zero for last {self.metric_report_non_zero_count} reports!") self.print_message("\n") self.metric_report_number += 1 self.export_metrics_console() self.export_metrics_cloudwatch() def output_diagnosis_information(self, dependencies_list): # Print general diagnosis information self.print_message("\n========== Canary Wrapper diagnosis information ==========") self.print_message("\nRunning Canary for repository: " + self.git_repo_name) self.print_message("\t Commit hash: " + self.git_hash) if not dependencies_list == "": self.print_message("\nDependencies:") dependencies_list = dependencies_list.split(";") dependencies_list_found_hash = False for i in range(0, len(dependencies_list)): # There's probably a better way to do this... if (dependencies_list_found_hash == True): dependencies_list_found_hash = False continue self.print_message("* " + dependencies_list[i]) if (i+1 < len(dependencies_list)): self.print_message("\t Commit hash: " + dependencies_list[i+1]) dependencies_list_found_hash = True else: self.print_message("\t Commit hash: Unknown") if (self.metric_frequency != None): self.print_message("\nMetric Snapshot Frequency: " + str(self.metric_frequency) + " seconds") self.print_message("\nMetrics:") for metric in self.metrics: self.print_message("* " + metric.metric_name) if metric.metric_alarm_threshold is not None: self.print_message("\t Alarm Threshold: " + str(metric.metric_alarm_threshold)) self.print_message("\t Alarm Severity: " + str(metric.metric_alarm_severity)) else: self.print_message("\t No alarm set for metric.") self.print_message("\n") self.print_message("==========================================================") self.print_message("\n") # ================================================================================ class SnapshotMonitor(): def __init__(self, wrapper_data_snapshot, wrapper_metrics_wait_time) -> None: self.data_snapshot = wrapper_data_snapshot self.had_internal_error = False self.error_due_to_credentials = False self.internal_error_reason = "" self.error_due_to_alarm = False self.can_cut_ticket = False self.has_cut_ticket = False # A list of all the alarms triggered in the last check, cached for later # NOTE - this is only the alarm names! Not the severity. This just makes it easier to process self.cloudwatch_current_alarms_triggered = [] # Check for errors if (self.data_snapshot.abort_due_to_internal_error == True): self.had_internal_error = True self.internal_error_reason = "Could not initialize DataSnapshot. Likely credentials are not setup!" if (self.data_snapshot.abort_due_to_internal_error_due_to_credentials == True): self.error_due_to_credentials = True self.data_snapshot.cleanup() return # How long to wait before posting a metric self.metric_post_timer = 0 self.metric_post_timer_time = wrapper_metrics_wait_time def register_metric(self, new_metric_name, new_metric_function, new_metric_unit="None", new_metric_alarm_threshold=None, new_metric_reports_to_skip=0, new_metric_alarm_severity=6): try: self.data_snapshot.register_metric( new_metric_name=new_metric_name, new_metric_function=new_metric_function, new_metric_unit=new_metric_unit, new_metric_alarm_threshold=new_metric_alarm_threshold, new_metric_reports_to_skip=new_metric_reports_to_skip, new_metric_alarm_severity=new_metric_alarm_severity) except Exception as e: self.print_message("[SnaptshotMonitor] ERROR - could not register metric in data snapshot due to exception!") self.print_message("[SnaptshotMonitor] Exception: " + str(e)) self.had_internal_error = True self.internal_error_reason = "Could not register metric in data snapshot due to exception" return def register_dashboard_widget(self, new_widget_name, metrics_to_add=[], widget_period=60): self.data_snapshot.register_dashboard_widget(new_widget_name=new_widget_name, metrics_to_add=metrics_to_add, new_widget_period=widget_period) def output_diagnosis_information(self, dependencies=""): self.data_snapshot.output_diagnosis_information(dependencies_list=dependencies) def check_alarms_for_new_alarms(self, triggered_alarms): if len(triggered_alarms) > 0: self.data_snapshot.print_message( "WARNING - One or more alarms are in state of ALARM") old_alarms_still_active = [] new_alarms = [] new_alarms_highest_severity = 6 new_alarm_found = True new_alarm_ticket_description = "Canary has metrics in ALARM state!\n\nMetrics in alarm:\n" for triggered_alarm in triggered_alarms: new_alarm_found = True # Is this a new alarm? for old_alarm_name in self.cloudwatch_current_alarms_triggered: if (old_alarm_name == triggered_alarm[1]): new_alarm_found = False old_alarms_still_active.append(triggered_alarm[1]) new_alarm_ticket_description += "* (STILL IN ALARM) " + triggered_alarm[1] + "\n" new_alarm_ticket_description += "\tSeverity: " + str(triggered_alarm[2]) new_alarm_ticket_description += "\n" break # If it is a new alarm, then add it to our list so we can cut a new ticket if (new_alarm_found == True): self.data_snapshot.print_message(' (NEW) Alarm with name "' + triggered_alarm[1] + '" is in the ALARM state!') new_alarms.append(triggered_alarm[1]) if (triggered_alarm[2] < new_alarms_highest_severity): new_alarms_highest_severity = triggered_alarm[2] new_alarm_ticket_description += "* " + triggered_alarm[1] + "\n" new_alarm_ticket_description += "\tSeverity: " + str(triggered_alarm[2]) new_alarm_ticket_description += "\n" if len(new_alarms) > 0: if (self.can_cut_ticket == True): cut_ticket_using_cloudwatch( git_repo_name=self.data_snapshot.git_repo_name, git_hash=self.data_snapshot.git_hash, git_hash_as_namespace=False, git_fixed_namespace_text=self.data_snapshot.git_fixed_namespace_text, cloudwatch_region="us-east-1", ticket_description="New metric(s) went into alarm for the Canary! Metrics in alarm: " + str(new_alarms), ticket_reason="New metric(s) went into alarm", ticket_allow_duplicates=True, ticket_category="AWS", ticket_item="IoT SDK for CPP", ticket_group="AWS IoT Device SDK", ticket_type="SDKs and Tools", ticket_severity=4) self.has_cut_ticket = True # Cache the new alarms and the old alarms self.cloudwatch_current_alarms_triggered = old_alarms_still_active + new_alarms else: self.cloudwatch_current_alarms_triggered.clear() def monitor_loop_function(self, psutil_process : psutil.Process, time_passed=30): # Check for internal errors if (self.data_snapshot.abort_due_to_internal_error == True): self.had_internal_error = True self.internal_error_reason = "Data Snapshot internal error: " + self.data_snapshot.abort_due_to_internal_error_reason return try: # Poll the metric alarms if (self.had_internal_error == False): # Get a report of all the alarms that might have been set to an alarm state triggered_alarms = self.data_snapshot.get_cloudwatch_alarm_results() self.check_alarms_for_new_alarms(triggered_alarms) except Exception as e: self.print_message("[SnaptshotMonitor] ERROR - exception occurred checking metric alarms!") self.print_message("[SnaptshotMonitor] (Likely session credentials expired)") self.had_internal_error = True self.internal_error_reason = "Exception occurred checking metric alarms! Likely session credentials expired" return if (self.metric_post_timer <= 0): if (self.had_internal_error == False): try: self.data_snapshot.post_metrics(psutil_process) except Exception as e: self.print_message("[SnaptshotMonitor] ERROR - exception occurred posting metrics!") self.print_message("[SnaptshotMonitor] (Likely session credentials expired)") print (e, flush=True) self.had_internal_error = True self.internal_error_reason = "Exception occurred posting metrics! Likely session credentials expired" return # reset the timer self.metric_post_timer += self.metric_post_timer_time # Gather and post the metrics self.metric_post_timer -= time_passed def send_email(self, email_body, email_subject_text_append=None): if (email_subject_text_append != None): self.data_snapshot.lambda_send_email(email_body, "Canary: " + self.data_snapshot.git_repo_name + ":" + self.data_snapshot.git_hash + " - " + email_subject_text_append) else: self.data_snapshot.lambda_send_email(email_body, "Canary: " + self.data_snapshot.git_repo_name + ":" + self.data_snapshot.git_hash) def stop_monitoring(self): # Stub - just added for consistency pass def start_monitoring(self): # Stub - just added for consistency pass def restart_monitoring(self): # Stub - just added for consistency pass def cleanup_monitor(self, error_occurred=False): self.data_snapshot.cleanup(error_occurred=error_occurred) def print_message(self, message): if (self.data_snapshot != None): self.data_snapshot.print_message(message) else: print(message, flush=True) # ================================================================================ class ApplicationMonitor(): def __init__(self, wrapper_application_path, wrapper_application_arguments, wrapper_application_restart_on_finish=True, data_snapshot=None) -> None: self.application_process = None self.application_process_psutil = None self.error_has_occurred = False self.error_due_to_credentials = False self.error_reason = "" self.error_code = 0 self.wrapper_application_path = wrapper_application_path self.wrapper_application_arguments = wrapper_application_arguments self.wrapper_application_restart_on_finish = wrapper_application_restart_on_finish self.data_snapshot=data_snapshot self.stdout_file_path = "Canary_Stdout_File.txt" def start_monitoring(self): self.print_message("[ApplicationMonitor] Starting to monitor application...") if (self.application_process == None): try: canary_command = self.wrapper_application_path + " " + self.wrapper_application_arguments self.application_process = subprocess.Popen(canary_command + " | tee " + self.stdout_file_path, shell=True) self.application_process_psutil = psutil.Process(self.application_process.pid) self.print_message ("[ApplicationMonitor] Application started...") except Exception as e: self.print_message ("[ApplicationMonitor] ERROR - Could not launch Canary/Application due to exception!") self.print_message ("[ApplicationMonitor] Exception: " + str(e)) self.error_has_occurred = True self.error_reason = "Could not launch Canary/Application due to exception" self.error_code = 1 return else: self.print_message("[ApplicationMonitor] ERROR - Monitor already has an application process! Cannot monitor two applications with one monitor class!") def restart_monitoring(self): self.print_message ("[ApplicationMonitor] Restarting monitor application...") if (self.application_process != None): try: self.stop_monitoring() self.start_monitoring() self.print_message("\n[ApplicationMonitor] Restarted monitor application!") self.print_message("================================================================================") except Exception as e: self.print_message("[ApplicationMonitor] ERROR - Could not restart Canary/Application due to exception!") self.print_message("[ApplicationMonitor] Exception: " + str(e)) self.error_has_occurred = True self.error_reason = "Could not restart Canary/Application due to exception" self.error_code = 1 return else: self.print_message("[ApplicationMonitor] ERROR - Application process restart called but process is/was not running!") self.error_has_occurred = True self.error_reason = "Could not restart Canary/Application due to application process not being started initially" self.error_code = 1 return def stop_monitoring(self): self.print_message ("[ApplicationMonitor] Stopping monitor application...") if (not self.application_process == None): self.application_process.terminate() self.application_process.wait() self.print_message ("[ApplicationMonitor] Stopped monitor application!") self.application_process = None self.print_stdout() else: self.print_message ("[ApplicationMonitor] ERROR - cannot stop monitor application because no process is found!") def print_stdout(self): # Print the STDOUT file if (os.path.isfile(self.stdout_file_path)): self.print_message("Just finished Application STDOUT: ") with open(self.stdout_file_path, "r") as stdout_file: self.print_message(stdout_file.read()) os.remove(self.stdout_file_path) def monitor_loop_function(self, time_passed=30): if (self.application_process != None): application_process_return_code = None try: application_process_return_code = self.application_process.poll() except Exception as e: self.print_message("[ApplicationMonitor] ERROR - exception occurred while trying to poll application status!") self.print_message("[ApplicationMonitor] Exception: " + str(e)) self.error_has_occurred = True self.error_reason = "Exception when polling application status" self.error_code = 1 return # If it is not none, then the application finished if (application_process_return_code != None): self.print_message("[ApplicationMonitor] Monitor application has stopped! Processing result...") if (application_process_return_code != 0): self.print_message("[ApplicationMonitor] ERROR - Something Crashed in Canary/Application!") self.print_message("[ApplicationMonitor] Error code: " + str(application_process_return_code)) self.error_has_occurred = True self.error_reason = "Canary application crashed!" self.error_code = application_process_return_code else: # Should we restart? if (self.wrapper_application_restart_on_finish == True): self.print_message("[ApplicationMonitor] NOTE - Canary finished running and is restarting...") self.restart_monitoring() else: self.print_message("[ApplicationMonitor] Monitor application has stopped and monitor is not supposed to restart... Finishing...") self.error_has_occurred = True self.error_reason = "Canary Application Finished" self.error_code = 0 else: self.print_message("[ApplicationMonitor] Monitor application is still running...") def cleanup_monitor(self, error_occurred=False): pass def print_message(self, message): if (self.data_snapshot != None): self.data_snapshot.print_message(message) else: print(message, flush=True) # ================================================================================ class S3Monitor(): def __init__(self, s3_bucket_name, s3_file_name, s3_file_name_in_zip, canary_local_application_path, data_snapshot) -> None: self.s3_client = None self.s3_current_object_version_id = None self.s3_current_object_last_modified = None self.s3_bucket_name = s3_bucket_name self.s3_file_name = s3_file_name self.s3_file_name_only_path, self.s3_file_name_only_extension = os.path.splitext(s3_file_name) self.data_snapshot = data_snapshot self.canary_local_application_path = canary_local_application_path self.s3_file_name_in_zip = s3_file_name_in_zip self.s3_file_name_in_zip_only_path = None self.s3_file_name_in_zip_only_extension = None if (self.s3_file_name_in_zip != None): self.s3_file_name_in_zip_only_path, self.s3_file_name_in_zip_only_extension = os.path.splitext(s3_file_name_in_zip) self.s3_file_needs_replacing = False self.had_internal_error = False self.error_due_to_credentials = False self.internal_error_reason = "" # Check for valid credentials # ================== try: tmp_sts_client = boto3.client('sts') tmp_sts_client.get_caller_identity() except Exception as e: self.print_message("[S3Monitor] ERROR - (S3 Check) AWS credentials are NOT valid!") self.had_internal_error = True self.error_due_to_credentials = True self.internal_error_reason = "AWS credentials are NOT valid!" return # ================== try: self.s3_client = boto3.client("s3") except Exception as e: self.print_message("[S3Monitor] ERROR - (S3 Check) Could not make S3 client") self.had_internal_error = True self.internal_error_reason = "Could not make S3 client for S3 Monitor" return def check_for_file_change(self): try: version_check_response = self.s3_client.list_object_versions( Bucket=self.s3_bucket_name, Prefix=self.s3_file_name_only_path) if "Versions" in version_check_response: for version in version_check_response["Versions"]: if (version["IsLatest"] == True): if (version["VersionId"] != self.s3_current_object_version_id or version["LastModified"] != self.s3_current_object_last_modified): self.print_message("[S3Monitor] Found new version of Canary/Application in S3!") self.print_message("[S3Monitor] Changing running Canary/Application to new one...") # Will be checked by thread to trigger replacing the file self.s3_file_needs_replacing = True self.s3_current_object_version_id = version["VersionId"] self.s3_current_object_last_modified = version["LastModified"] return except Exception as e: self.print_message("[S3Monitor] ERROR - Could not check for new version of file in S3 due to exception!") self.print_message("[S3Monitor] Exception: " + str(e)) self.had_internal_error = True self.internal_error_reason = "Could not check for S3 file due to exception in S3 client" def replace_current_file_for_new_file(self): try: self.print_message("[S3Monitor] Making directory...") if not os.path.exists("tmp"): os.makedirs("tmp") except Exception as e: self.print_message ("[S3Monitor] ERROR - could not make tmp directory to place S3 file into!") self.had_internal_error = True self.internal_error_reason = "Could not make TMP folder for S3 file download" return # Download the file new_file_path = "tmp/new_file" + self.s3_file_name_only_extension try: self.print_message("[S3Monitor] Downloading file...") s3_resource = boto3.resource("s3") s3_resource.meta.client.download_file(self.s3_bucket_name, self.s3_file_name, new_file_path) except Exception as e: self.print_message("[S3Monitor] ERROR - could not download latest S3 file into TMP folder!") self.had_internal_error = True self.internal_error_reason = "Could not download latest S3 file into TMP folder" return # Is it a zip file? if (self.s3_file_name_in_zip != None): self.print_message("[S3Monitor] New file is zip file. Unzipping...") # Unzip it! with zipfile.ZipFile(new_file_path, 'r') as zip_file: zip_file.extractall("tmp/new_file_zip") new_file_path = "tmp/new_file_zip/" + self.s3_file_name_in_zip_only_path + self.s3_file_name_in_zip_only_extension try: # is there a file already present there? if os.path.exists(self.canary_local_application_path) == True: os.remove(self.canary_local_application_path) self.print_message("[S3Monitor] Moving file...") os.replace(new_file_path, self.canary_local_application_path) self.print_message("[S3Monitor] Getting execution rights...") os.system("chmod u+x " + self.canary_local_application_path) except Exception as e: self.print_message("[S3Monitor] ERROR - could not move file into local application path due to exception!") self.print_message("[S3Monitor] Exception: " + str(e)) self.had_internal_error = True self.internal_error_reason = "Could not move file into local application path" return self.print_message("[S3Monitor] New file downloaded and moved into correct location!") self.s3_file_needs_replacing = False def stop_monitoring(self): # Stub - just added for consistency pass def start_monitoring(self): # Stub - just added for consistency pass def restart_monitoring(self): # Stub - just added for consistency pass def cleanup_monitor(self): # Stub - just added for consistency pass def monitor_loop_function(self, time_passed=30): self.check_for_file_change() def print_message(self, message): if (self.data_snapshot != None): self.data_snapshot.print_message(message) else: print(message, flush=True) # ================================================================================ # Cuts a ticket to SIM using a temporary Cloudwatch metric that is quickly created, triggered, and destroyed. # Can be called in any thread - creates its own Cloudwatch client and any data it needs is passed in. # # See (https://w.amazon.com/bin/view/CloudWatchAlarms/Internal/CloudWatchAlarmsSIMTicketing) for more details # on how the alarm is sent using Cloudwatch. def cut_ticket_using_cloudwatch( ticket_description="Description here!", ticket_reason="Reason here!", ticket_severity=5, ticket_category="AWS", ticket_type="SDKs and Tools", ticket_item="IoT SDK for CPP", ticket_group="AWS IoT Device SDK", ticket_allow_duplicates=False, git_repo_name="REPO NAME", git_hash="HASH", git_hash_as_namespace=False, git_fixed_namespace_text="mqtt5_canary", cloudwatch_region="us-east-1"): git_metric_namespace = "" if (git_hash_as_namespace == False): git_metric_namespace = git_fixed_namespace_text else: git_namespace_prepend_text = git_repo_name + "-" + git_hash git_metric_namespace = git_namespace_prepend_text try: cloudwatch_client = boto3.client('cloudwatch', cloudwatch_region) ticket_alarm_name = git_repo_name + "-" + git_hash + "-AUTO-TICKET" except Exception as e: print ("ERROR - could not create Cloudwatch client to make ticket metric alarm due to exception!") print ("Exception: " + str(e), flush=True) return new_metric_dimensions = [] if (git_hash_as_namespace == False): git_namespace_prepend_text = git_repo_name + "-" + git_hash new_metric_dimensions.append( {"Name": git_namespace_prepend_text, "Value": ticket_alarm_name}) else: new_metric_dimensions.append( {"Name": "System_Metrics", "Value": ticket_alarm_name}) ticket_arn = f"arn:aws:cloudwatch::cwa-internal:ticket:{ticket_severity}:{ticket_category}:{ticket_type}:{ticket_item}:{ticket_group}:" if (ticket_allow_duplicates == True): # use "DO-NOT-DEDUPE" so we can run the same commit again and it will cut another ticket. ticket_arn += "DO-NOT-DEDUPE" # In the ticket ARN, all spaces need to be replaced with + ticket_arn = ticket_arn.replace(" ", "+") ticket_alarm_description = f"AUTO CUT CANARY WRAPPER TICKET\n\nREASON: {ticket_reason}\n\nDESCRIPTION: {ticket_description}\n\n" # Register a metric alarm so it can auto-cut a ticket for us try: cloudwatch_client.put_metric_alarm( AlarmName=ticket_alarm_name, AlarmDescription=ticket_alarm_description, MetricName=ticket_alarm_name, Namespace=git_metric_namespace, Statistic="Maximum", Dimensions=new_metric_dimensions, Period=60, # How long (in seconds) is an evaluation period? EvaluationPeriods=1, # How many periods does it need to be invalid for? DatapointsToAlarm=1, # How many data points need to be invalid? Threshold=1, ComparisonOperator="GreaterThanOrEqualToThreshold", # The data above does not really matter - it just needs to be valid input data. # This is the part that tells Cloudwatch to cut the ticket AlarmActions=[ticket_arn] ) except Exception as e: print ("ERROR - could not create ticket metric alarm due to exception!") print ("Exception: " + str(e), flush=True) return # Trigger the alarm so it cuts the ticket try: cloudwatch_client.set_alarm_state( AlarmName=ticket_alarm_name, StateValue="ALARM", StateReason="AUTO TICKET CUT") except Exception as e: print ("ERROR - could not cut ticket due to exception!") print ("Exception: " + str(e), flush=True) return print("Waiting for ticket metric to trigger...", flush=True) # Wait a little bit (2 seconds)... time.sleep(2) # Remove the metric print("Removing ticket metric...", flush=True) cloudwatch_client.delete_alarms(AlarmNames=[ticket_alarm_name]) print ("Finished cutting ticket via Cloudwatch!", flush=True) return # A helper function that gets the majority of the ticket information from the arguments result from argparser. def cut_ticket_using_cloudwatch_from_args( ticket_description="", ticket_reason="", ticket_severity=6, arguments=None): # Do not cut a ticket for a severity of 6+ if (ticket_severity >= 6): return cut_ticket_using_cloudwatch( ticket_description=ticket_description, ticket_reason=ticket_reason, ticket_severity=ticket_severity, ticket_category=arguments.ticket_category, ticket_type=arguments.ticket_type, ticket_item=arguments.ticket_item, ticket_group=arguments.ticket_group, ticket_allow_duplicates=False, git_repo_name=arguments.git_repo_name, git_hash=arguments.git_hash, git_hash_as_namespace=arguments.git_hash_as_namespace) aws-crt-python-0.20.4+dfsg/codebuild/CanaryWrapper_MetricFunctions.py000066400000000000000000000033661456575232400257160ustar00rootroot00000000000000# Contains all of the metric reporting functions for the Canary Wrappers # Needs to be installed prior to running import psutil cache_cpu_psutil_process = None def get_metric_total_cpu_usage(psutil_process : psutil.Process): global cache_cpu_psutil_process try: if (psutil_process == None): print ("ERROR - No psutil.process passed! Cannot gather metric!", flush=True) return None # We always need to skip the first CPU poll if (cache_cpu_psutil_process != psutil_process): psutil.cpu_percent(interval=None) cache_cpu_psutil_process = psutil_process return None return psutil.cpu_percent(interval=None) except Exception as e: print ("ERROR - exception occurred gathering metrics!") print ("Exception: " + str(e), flush=True) return None # Note: This value is in BYTES. def get_metric_total_memory_usage_value(psutil_process : psutil.Process): try: if (psutil_process == None): print ("ERROR - No psutil.process passed! Cannot gather metric!", flush=True) return None return psutil.virtual_memory()[3] except Exception as e: print ("ERROR - exception occurred gathering metrics!") print ("Exception: " + str(e), flush=True) return None def get_metric_total_memory_usage_percent(psutil_process : psutil.Process): try: if (psutil_process == None): print ("ERROR - No psutil.process passed! Cannot gather metric!", flush=True) return None return psutil.virtual_memory()[2] except Exception as e: print ("ERROR - exception occurred gathering metrics!") print ("Exception: " + str(e), flush=True) return None aws-crt-python-0.20.4+dfsg/codebuild/cd/000077500000000000000000000000001456575232400177505ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/codebuild/cd/manylinux-x64-build.yml000066400000000000000000000021251456575232400242330ustar00rootroot00000000000000version: 0.2 #this build spec assumes the manylinux1 image for pypi #additional packages we installed: cmake 3.5, libcrypto 1.1.0j, gcc 4.8.4 phases: install: commands: pre_build: commands: - export CC=gcc - cd aws-crt-python - /opt/python/cp37-cp37m/bin/python ./continuous-delivery/update-version.py build: commands: - echo Build started on `date` - /opt/python/cp37-cp37m/bin/python setup.py sdist bdist_wheel - auditwheel repair --plat manylinux1_x86_64 dist/awscrt-*cp37-cp37m-linux_x86_64.whl - /opt/python/cp38-cp38/bin/python setup.py sdist bdist_wheel - auditwheel repair --plat manylinux1_x86_64 dist/awscrt-*cp38-cp38-linux_x86_64.whl - /opt/python/cp39-cp39/bin/python setup.py sdist bdist_wheel - auditwheel repair --plat manylinux1_x86_64 dist/awscrt-*cp39-cp39-linux_x86_64.whl # python 3.9 is the last version manylinux1 will ever receive - cp -r wheelhouse ../dist - cp dist/*.tar.gz ../dist/ post_build: commands: - echo Build completed on `date` artifacts: files: - 'dist/*' aws-crt-python-0.20.4+dfsg/codebuild/cd/manylinux-x86-build.yml000066400000000000000000000020471456575232400242420ustar00rootroot00000000000000version: 0.2 #this build spec assumes the manylinux1 image for pypi #additional packages we installed: cmake 3.5, libcrypto 1.1.0j, gcc 4.8.4 phases: install: commands: pre_build: commands: - export CC=gcc - cd aws-crt-python - /opt/python/cp37-cp37m/bin/python ./continuous-delivery/update-version.py build: commands: - echo Build started on `date` - /opt/python/cp37-cp37m/bin/python setup.py sdist bdist_wheel - auditwheel repair --plat manylinux1_i686 dist/awscrt-*cp37-cp37m-linux_i686.whl - /opt/python/cp38-cp38/bin/python setup.py sdist bdist_wheel - auditwheel repair --plat manylinux1_i686 dist/awscrt-*cp38-cp38-linux_i686.whl - /opt/python/cp39-cp39/bin/python setup.py sdist bdist_wheel - auditwheel repair --plat manylinux1_i686 dist/awscrt-*cp39-cp39-linux_i686.whl # python 3.9 is the last version manylinux1 will ever receive - cp -r wheelhouse ../dist post_build: commands: - echo Build completed on `date` artifacts: files: - 'dist/*' aws-crt-python-0.20.4+dfsg/codebuild/cd/manylinux1-tee.yml000066400000000000000000000014231456575232400233530ustar00rootroot00000000000000version: 0.2 #this build spec assumes the manylinux1 image for pypi #additional packages we installed: cmake 3.5, libcrypto 1.1.0j, gcc 4.8.4 phases: install: commands: pre_build: commands: - export CC=gcc build: commands: - echo Build started on `date` - mkdir $CODEBUILD_SRC_DIR/dist - cp -r $CODEBUILD_SRC_DIR_manylinux1_x86_64/dist/* $CODEBUILD_SRC_DIR/dist/ - cp -r $CODEBUILD_SRC_DIR_manylinux1_x86/dist/* $CODEBUILD_SRC_DIR/dist/ - cp -r $CODEBUILD_SRC_DIR_manylinux2014_arm/* $CODEBUILD_SRC_DIR/dist/ - cp -r $CODEBUILD_SRC_DIR_manylinux2014_x86_64/* $CODEBUILD_SRC_DIR/dist/ - ls $CODEBUILD_SRC_DIR/dist/ post_build: commands: - echo Build completed on `date` artifacts: files: - 'dist/*' aws-crt-python-0.20.4+dfsg/codebuild/cd/publish_to_prod_pypi.yml000066400000000000000000000016601456575232400247330ustar00rootroot00000000000000version: 0.2 # this image assumes Ubuntu 14.04 base image phases: install: commands: - sudo apt-get update -y - sudo apt-get install python3 python3-pip -y - python3 -m pip install --user --upgrade pip - python3 -m pip install --user --upgrade twine setuptools wheel boto3 PyOpenSSL six pre_build: commands: - export CC=gcc build: commands: - echo Build started on `date` - mkdir dist - cp -rv $CODEBUILD_SRC_DIR_aws_crt_python_windows/dist/* dist/ - cp -rv $CODEBUILD_SRC_DIR_aws_crt_python_manylinux1/dist/* dist/ - cp -rv $CODEBUILD_SRC_DIR_aws_crt_python_musllinux1_1/dist/* dist/ - cp -rv $CODEBUILD_SRC_DIR_aws_crt_python_osx/* dist/ - ls -la dist/ - cd aws-crt-python - python3 continuous-delivery/pull-pypirc.py prod - python3 -m twine upload -r pypi ../dist/* post_build: commands: - echo Build completed on `date` aws-crt-python-0.20.4+dfsg/codebuild/cd/publish_to_test_pypi.yml000066400000000000000000000016651456575232400247530ustar00rootroot00000000000000version: 0.2 # this image assumes Ubuntu 14.04 base image phases: install: commands: - sudo apt-get update -y - sudo apt-get install python3 python3-pip -y - python3 -m pip install --user --upgrade pip - python3 -m pip install --user --upgrade twine setuptools wheel boto3 PyOpenSSL six pre_build: commands: - export CC=gcc build: commands: - echo Build started on `date` - mkdir dist - cp -rv $CODEBUILD_SRC_DIR_aws_crt_python_windows/dist/* dist/ - cp -rv $CODEBUILD_SRC_DIR_aws_crt_python_manylinux1/dist/* dist/ - cp -rv $CODEBUILD_SRC_DIR_aws_crt_python_musllinux1_1/dist/* dist/ - cp -rv $CODEBUILD_SRC_DIR_aws_crt_python_osx/* dist/ - ls -la dist/ - cd aws-crt-python - python3 continuous-delivery/pull-pypirc.py alpha - python3 -m twine upload -r testpypi ../dist/* post_build: commands: - echo Build completed on `date` aws-crt-python-0.20.4+dfsg/codebuild/cd/test_prod_pypi.yml000066400000000000000000000011771456575232400235450ustar00rootroot00000000000000version: 0.2 # this image assumes Ubuntu phases: install: commands: - sudo apt-get update -y - sudo apt-get install python3 python3-pip -y - python3 -m pip install --upgrade pip pre_build: commands: - export CC=gcc build: commands: - echo Build started on `date` - cd aws-crt-python - CURRENT_TAG_VERSION=$(git describe --tags | cut -f2 -dv) - python3 continuous-delivery/pip-install-with-retry.py --no-cache-dir --user awscrt==$CURRENT_TAG_VERSION - python3 continuous-delivery/test-pip-install.py post_build: commands: - echo Build completed on `date` aws-crt-python-0.20.4+dfsg/codebuild/cd/test_test_pypi.yml000066400000000000000000000012601456575232400235510ustar00rootroot00000000000000version: 0.2 # this image assumes Ubuntu base image phases: install: commands: - sudo apt-get update -y - sudo apt-get install python3 python3-pip -y - python3 -m pip install --upgrade pip pre_build: commands: - export CC=gcc build: commands: - echo Build started on `date` - cd aws-crt-python - CURRENT_TAG_VERSION=$(git describe --tags | cut -f2 -dv) - python3 continuous-delivery/pip-install-with-retry.py --no-cache-dir -i https://testpypi.python.org/simple --user awscrt==$CURRENT_TAG_VERSION - python3 continuous-delivery/test-pip-install.py post_build: commands: - echo Build completed on `date` aws-crt-python-0.20.4+dfsg/codebuild/cd/test_version_exists.yml000066400000000000000000000011771456575232400246240ustar00rootroot00000000000000version: 0.2 #this build spec assumes the ubuntu 14.04 trusty image #this build run simply verifies we haven't published something at this tag yet. #if we have we fail the build and stop the pipeline, if we haven't we allow the pipeline to run. phases: install: commands: - sudo apt-get update -y - sudo apt-get install python3 python3-pip -y - pip3 install --upgrade setuptools pre_build: commands: build: commands: - echo Build started on `date` - cd aws-crt-python - bash ./continuous-delivery/test-version-exists post_build: commands: - echo Build completed on `date` aws-crt-python-0.20.4+dfsg/codebuild/cd/windows-tee.yml000066400000000000000000000010231456575232400227340ustar00rootroot00000000000000version: 0.2 #this build spec assumes the manylinux1 image for pypi #additional packages we installed: cmake 3.5, libcrypto 1.1.0j, gcc 4.8.4 phases: install: commands: pre_build: commands: - export CC=gcc build: commands: - echo Build started on `date` - mkdir dist - cp -r $CODEBUILD_SRC_DIR_aws_crt_python_win64/* dist/ - cp -r $CODEBUILD_SRC_DIR_aws_crt_python_win32/* dist/ post_build: commands: - echo Build completed on `date` artifacts: files: - 'dist/*' aws-crt-python-0.20.4+dfsg/codebuild/linux-integration-tests.sh000077500000000000000000000006151456575232400245430ustar00rootroot00000000000000#!/bin/bash set -euxo pipefail if test -f "/tmp/setup_proxy_test_env.sh"; then source /tmp/setup_proxy_test_env.sh fi env git submodule update --init # build package cd $CODEBUILD_SRC_DIR export AWS_TEST_S3=YES python -m pip install --upgrade --requirement requirements-dev.txt python -m pip install --verbose . python -m unittest discover --failfast --verbose 2>&1 | tee /tmp/tests.log aws-crt-python-0.20.4+dfsg/codebuild/linux-integration-tests.yml000066400000000000000000000017441456575232400247330ustar00rootroot00000000000000version: 0.2 #this build spec assumes the manylinux1 image for pypi #additional packages we installed: cmake 3.5, libcrypto 1.1.0j, gcc 4.8.4 env: shell: bash variables: BUILDER_VERSION: v0.9.44 BUILDER_SOURCE: releases BUILDER_HOST: https://d19elf31gohf1l.cloudfront.net PACKAGE_NAME: aws-crt-python phases: install: commands: - add-apt-repository ppa:ubuntu-toolchain-r/test - apt-get update -y - apt-get install gcc-7 cmake ninja-build python3 python3-pip -y pre_build: commands: - export CC=gcc-7 build: commands: - echo Build started on `date` - git submodule update --init # Build library and test - python3 -c "from urllib.request import urlretrieve; urlretrieve('$BUILDER_HOST/$BUILDER_SOURCE/$BUILDER_VERSION/builder.pyz?run=$CODEBUILD_BUILD_ID', 'builder.pyz')" - python3 builder.pyz build --project aws-crt-python downstream post_build: commands: - echo Build completed on `date` aws-crt-python-0.20.4+dfsg/codebuild/mqtt5-python-canary-test.sh000077500000000000000000000005251456575232400245440ustar00rootroot00000000000000#!/bin/bash set -euxo pipefail env git submodule update --init # build package cd $CODEBUILD_SRC_DIR export AWS_TEST_S3=YES python -m pip install --verbose . python codebuild/CanaryWrapper.py --canary_executable 'python test/mqtt5_canary.py' --git_hash ${GIT_HASH} --git_repo_name $PACKAGE_NAME --codebuild_log_path $CODEBUILD_LOG_PATH aws-crt-python-0.20.4+dfsg/codebuild/mqtt5-python-canary-test.yml000066400000000000000000000026621456575232400247340ustar00rootroot00000000000000version: 0.2 #this build spec assumes the manylinux1 image for pypi #additional packages we installed: cmake 3.5, libcrypto 1.1.0j, gcc 4.8.4 env: shell: bash variables: CANARY_DURATION: 25200 CANARY_THREADS: 3 CANARY_TPS: 50 CANARY_CLIENT_COUNT: 10 CANARY_LOG_FILE: 'canary_log.txt' CANARY_LOG_LEVEL: 'ERROR' BUILDER_VERSION: v0.9.21 BUILDER_SOURCE: releases BUILDER_HOST: https://d19elf31gohf1l.cloudfront.net PACKAGE_NAME: aws-crt-python CANARY_TEST_EXE: 'python -m unittest --failfast --verbose 2>&1 | tee /tmp/tests.log test.test_mqtt5_canary' CANARY_SERVER_ARN: Mqtt5MosquittoSever phases: install: commands: - add-apt-repository ppa:ubuntu-toolchain-r/test - apt-get update -y - apt-get install gcc-7 cmake ninja-build python3 -y - python3 -m pip install psutil - python3 -m pip install boto3 pre_build: commands: - export CC=gcc-7 build: commands: - echo Build started on `date` - source ./codebuild/mqtt5_test_setup.sh s3://aws-crt-test-stuff/TestIotProdMQTT5EnvironmentVariables.txt us-east-1 - export ENDPOINT=$(aws secretsmanager get-secret-value --secret-id "$CANARY_SERVER_ARN" --query "SecretString" | cut -f2 -d":" | sed -e 's/[\\\"\}]//g') - export GIT_HASH=$(git rev-parse HEAD) - $CODEBUILD_SRC_DIR/codebuild/mqtt5-python-canary-test.sh post_build: commands: - echo Build completed on `date`aws-crt-python-0.20.4+dfsg/codebuild/mqtt5_test_setup.sh000077500000000000000000000130241456575232400232520ustar00rootroot00000000000000#!/bin/sh # Get the S3 URL containing all of the MQTT5 testing environment variables passed in to the bash script testing_env_bucket=$1 region=$2 # Make sure we have something: if [ "${testing_env_bucket}" != "" ] && [ "${region}" != "" ]; then echo "S3 bucket for environment variables found and region" else echo "Could not get S3 bucket for environment variables and/or region." echo "You need to run this script and pass the S3 URL of the file containing" echo "all of the environment variables to set, as well as the secrets for certificates and private keys" echo "" echo "Example: mqtt5_test_setup.sh s3:/// " echo "" echo "When finished, run 'cleanup' to remove the files downloaded:" echo "" echo "Example: mqtt5_test_setup.sh s3:/// cleanup" echo "" return 1 fi # Is this just a request to clean up? # NOTE: This blindly assumes there is a environment_files.txt file if [ "${region}" != "cleanup" ]; then sleep 0.1 # we have to do something to do an else... else echo "Undoing environment variables" unset $(grep -v '^#' ${PWD}/environment_files.txt | xargs | cut -d "=" -f 1) unset AWS_TEST_MQTT5_CERTIFICATE_FILE unset AWS_TEST_MQTT5_KEY_FILE unset AWS_TEST_MQTT5_IOT_CERTIFICATE_PATH unset AWS_TEST_MQTT5_IOT_KEY_PATH echo "Cleaning up resources..." rm "${PWD}/environment_files.txt" rm "${PWD}/crt_certificate.pem" rm "${PWD}/crt_privatekey.pem" rm "${PWD}/iot_certificate.pem" rm "${PWD}/iot_privatekey.pem" echo "Success!" return 0 fi # Get the file from S3 aws s3 cp ${testing_env_bucket} ${PWD}/environment_files.txt testing_env_file=$( cat environment_files.txt ) # Make sure we have data of some form if [ "${testing_env_file}" != "" ]; then echo "Environment variables secret found" else echo "Could not get environment variables from secrets!" return 1 fi # Make all the variables in mqtt5_environment_variables.txt exported # so we can run MQTT5 tests export $(grep -v '^#' environment_files.txt | xargs) # CRT/non-builder certificate and key processing # Get the certificate and key secrets (dumps straight to a file) crt_cert_file=$(aws secretsmanager get-secret-value --secret-id "${AWS_TEST_MQTT5_CERTIFICATE_FILE_SECRET}" --query "SecretString" --region ${region} | cut -f2 -d":" | cut -f2 -d\") && echo -e "$crt_cert_file" > ${PWD}/crt_certificate.pem crt_key_file=$(aws secretsmanager get-secret-value --secret-id "${AWS_TEST_MQTT5_KEY_FILE_SECRET}" --query "SecretString" --region ${region} | cut -f2 -d":" | cut -f2 -d\") && echo -e "$crt_key_file" > ${PWD}/crt_privatekey.pem # Does the certificate file have data? If not, then abort! if [ "${crt_cert_file}" != "" ]; then echo "CRT Certificate secret found" else echo "Could not get CRT certificate from secrets!" # Clean up... unset $(grep -v '^#' environment_files.txt | xargs | cut -d "=" -f 1) rm "${PWD}/environment_files.txt" rm "${PWD}/crt_certificate.pem" rm "${PWD}/crt_privatekey.pem" return 1 fi # Does the private key file have data? If not, then abort! if [ "${crt_key_file}" != "" ]; then echo "CRT Private key secret found" else echo "Could not get CRT private key from secrets!" # Clean up... unset $(grep -v '^#' environment_files.txt | xargs | cut -d "=" -f 1) rm "${PWD}/environment_files.txt" rm "${PWD}/crt_certificate.pem" rm "${PWD}/crt_privatekey.pem" return 1 fi # Set the certificate and key paths (absolute paths for best compatibility) export AWS_TEST_MQTT5_CERTIFICATE_FILE="${PWD}/crt_certificate.pem" export AWS_TEST_MQTT5_KEY_FILE="${PWD}/crt_privatekey.pem" # IoT/Builder certificate and key processing # Get the certificate and key secrets (dumps straight to a file) iot_cert_file=$(aws secretsmanager get-secret-value --secret-id "${AWS_TEST_MQTT5_IOT_CERTIFICATE_PATH_SECRET}" --query "SecretString" --region ${region} | cut -f2 -d":" | cut -f2 -d\") && echo -e "$iot_cert_file" > ./iot_certificate.pem iot_key_file=$(aws secretsmanager get-secret-value --secret-id "${AWS_TEST_MQTT5_IOT_KEY_PATH_SECRET}" --query "SecretString" --region ${region} | cut -f2 -d":" | cut -f2 -d\") && echo -e "$iot_key_file" > ./iot_privatekey.pem # Does the certificate file have data? If not, then abort! if [ "${iot_cert_file}" != "" ]; then echo "IoT Certificate secret found" else echo "Could not get IoT certificate from secrets!" # Clean up... unset $(grep -v '^#' environment_files.txt | xargs | cut -d "=" -f 1) unset AWS_TEST_MQTT5_CERTIFICATE_FILE unset AWS_TEST_MQTT5_KEY_FILE rm "${PWD}/environment_files.txt" rm "${PWD}/crt_certificate.pem" rm "${PWD}/crt_privatekey.pem" rm "${PWD}/iot_certificate.pem" rm "${PWD}/iot_privatekey.pem" return 1 fi # Does the private key file have data? If not, then abort! if [ "${iot_key_file}" != "" ]; then echo "IoT Private key secret found" else echo "Could not get IoT private key from secrets!" # Clean up... unset $(grep -v '^#' environment_files.txt | xargs | cut -d "=" -f 1) unset AWS_TEST_MQTT5_CERTIFICATE_FILE unset AWS_TEST_MQTT5_KEY_FILE rm "${PWD}/environment_files.txt" rm "${PWD}/crt_certificate.pem" rm "${PWD}/crt_privatekey.pem" rm "${PWD}/iot_certificate.pem" rm "${PWD}/iot_privatekey.pem" return 1 fi # Set IoT certificate and key paths export AWS_TEST_MQTT5_IOT_CERTIFICATE_PATH="${PWD}/iot_certificate.pem" export AWS_TEST_MQTT5_IOT_KEY_PATH="${PWD}/iot_privatekey.pem" # Everything is set echo "Success: Environment variables set!" return 0aws-crt-python-0.20.4+dfsg/continuous-delivery/000077500000000000000000000000001456575232400214575ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/continuous-delivery/build-wheels-manylinux2014-aarch64-jenkins.sh000077500000000000000000000011731456575232400316220ustar00rootroot00000000000000#!/bin/bash #run build-wheels script in manylinux2014 docker image set -ex DOCKER_IMAGE=123124136734.dkr.ecr.us-east-1.amazonaws.com/aws-crt-manylinux2014-aarch64:latest $(aws --region us-east-1 ecr get-login --no-include-email) docker pull $DOCKER_IMAGE # NOTE: run as current user to avoid git "dubious ownership" error, # and so that output artifacts don't belong to "root" docker run --rm \ --mount type=bind,source=`pwd`,target=/aws-crt-python \ --user "$(id -u):$(id -g)" \ --workdir /aws-crt-python \ --entrypoint /bin/bash \ $DOCKER_IMAGE \ continuous-delivery/build-wheels-manylinux2014-aarch64.sh aws-crt-python-0.20.4+dfsg/continuous-delivery/build-wheels-manylinux2014-aarch64.sh000077500000000000000000000020511456575232400301570ustar00rootroot00000000000000#!/bin/bash #assumes image based on manylinux2014 + extras (cmake3, libcrypto, etc) set -ex /opt/python/cp39-cp39/bin/python ./continuous-delivery/update-version.py /opt/python/cp37-cp37m/bin/python setup.py sdist bdist_wheel auditwheel repair --plat manylinux2014_aarch64 dist/awscrt-*cp37*.whl /opt/python/cp38-cp38/bin/python setup.py sdist bdist_wheel auditwheel repair --plat manylinux2014_aarch64 dist/awscrt-*cp38*.whl /opt/python/cp39-cp39/bin/python setup.py sdist bdist_wheel auditwheel repair --plat manylinux2014_aarch64 dist/awscrt-*cp39*.whl /opt/python/cp310-cp310/bin/python setup.py sdist bdist_wheel auditwheel repair --plat manylinux2014_aarch64 dist/awscrt-*cp310*.whl /opt/python/cp311-cp311/bin/python setup.py sdist bdist_wheel auditwheel repair --plat manylinux2014_aarch64 dist/awscrt-*cp311*.whl # Don't need to build wheels for Python 3.12 and later. # The 3.11 wheel uses the stable ABI, so it works with newer versions too. rm dist/*.whl cp -rv wheelhouse/* dist/ #now you just need to run twine (that's in a different script) aws-crt-python-0.20.4+dfsg/continuous-delivery/build-wheels-manylinux2014-x86_64-jenkins.sh000077500000000000000000000011661456575232400313320ustar00rootroot00000000000000#!/bin/bash #run build-wheels script in manylinux2014 docker image set -ex DOCKER_IMAGE=123124136734.dkr.ecr.us-east-1.amazonaws.com/aws-crt-manylinux2014-x64:latest $(aws --region us-east-1 ecr get-login --no-include-email) docker pull $DOCKER_IMAGE # NOTE: run as current user to avoid git "dubious ownership" error, # and so that output artifacts don't belong to "root" docker run --rm \ --mount type=bind,source=`pwd`,target=/aws-crt-python \ --user "$(id -u):$(id -g)" \ --workdir /aws-crt-python \ --entrypoint /bin/bash \ $DOCKER_IMAGE \ continuous-delivery/build-wheels-manylinux2014-x86_64.sh aws-crt-python-0.20.4+dfsg/continuous-delivery/build-wheels-manylinux2014-x86_64.sh000077500000000000000000000020441456575232400276670ustar00rootroot00000000000000#!/bin/bash #assumes image based on manylinux2014 + extras (cmake3, libcrypto, etc) set -ex /opt/python/cp39-cp39/bin/python ./continuous-delivery/update-version.py /opt/python/cp37-cp37m/bin/python setup.py sdist bdist_wheel auditwheel repair --plat manylinux2014_x86_64 dist/awscrt-*cp37*.whl /opt/python/cp38-cp38/bin/python setup.py sdist bdist_wheel auditwheel repair --plat manylinux2014_x86_64 dist/awscrt-*cp38*.whl /opt/python/cp39-cp39/bin/python setup.py sdist bdist_wheel auditwheel repair --plat manylinux2014_x86_64 dist/awscrt-*cp39*.whl /opt/python/cp310-cp310/bin/python setup.py sdist bdist_wheel auditwheel repair --plat manylinux2014_x86_64 dist/awscrt-*cp310*.whl /opt/python/cp311-cp311/bin/python setup.py sdist bdist_wheel auditwheel repair --plat manylinux2014_x86_64 dist/awscrt-*cp311*.whl # Don't need to build wheels for Python 3.12 and later. # The 3.11 wheel uses the stable ABI, so it works with newer versions too. rm dist/*.whl cp -rv wheelhouse/* dist/ #now you just need to run twine (that's in a different script) aws-crt-python-0.20.4+dfsg/continuous-delivery/build-wheels-musllinux-1-1-aarch64-jenkins.sh000077500000000000000000000011731456575232400316230ustar00rootroot00000000000000#!/bin/bash #run build-wheels script in musllinux_1_1 docker image set -ex DOCKER_IMAGE=123124136734.dkr.ecr.us-east-1.amazonaws.com/aws-crt-musllinux-1-1-aarch64:latest $(aws --region us-east-1 ecr get-login --no-include-email) docker pull $DOCKER_IMAGE # NOTE: run as current user to avoid git "dubious ownership" error, # and so that output artifacts don't belong to "root" docker run --rm \ --mount type=bind,source=`pwd`,target=/aws-crt-python \ --user "$(id -u):$(id -g)" \ --workdir /aws-crt-python \ --entrypoint /bin/bash \ $DOCKER_IMAGE \ continuous-delivery/build-wheels-musllinux-1-1-aarch64.sh aws-crt-python-0.20.4+dfsg/continuous-delivery/build-wheels-musllinux-1-1-aarch64.sh000077500000000000000000000020071456575232400301610ustar00rootroot00000000000000#!/bin/bash #assumes image based on musllinux_1_1 set -ex /opt/python/cp39-cp39/bin/python ./continuous-delivery/update-version.py /opt/python/cp37-cp37m/bin/python setup.py sdist bdist_wheel auditwheel repair --plat musllinux_1_1_aarch64 dist/awscrt-*cp37*.whl /opt/python/cp38-cp38/bin/python setup.py sdist bdist_wheel auditwheel repair --plat musllinux_1_1_aarch64 dist/awscrt-*cp38*.whl /opt/python/cp39-cp39/bin/python setup.py sdist bdist_wheel auditwheel repair --plat musllinux_1_1_aarch64 dist/awscrt-*cp39*.whl /opt/python/cp310-cp310/bin/python setup.py sdist bdist_wheel auditwheel repair --plat musllinux_1_1_aarch64 dist/awscrt-*cp310*.whl /opt/python/cp311-cp311/bin/python setup.py sdist bdist_wheel auditwheel repair --plat musllinux_1_1_aarch64 dist/awscrt-*cp311*.whl # Don't need to build wheels for Python 3.12 and later. # The 3.11 wheel uses the stable ABI, so it works with newer versions too. rm dist/*.whl cp -rv wheelhouse/* dist/ #now you just need to run twine (that's in a different script) aws-crt-python-0.20.4+dfsg/continuous-delivery/build-wheels-musllinux-1-1-x86_64-jenkins.sh000077500000000000000000000011661456575232400313330ustar00rootroot00000000000000#!/bin/bash #run build-wheels script in musllinux_1_1 docker image set -ex DOCKER_IMAGE=123124136734.dkr.ecr.us-east-1.amazonaws.com/aws-crt-musllinux-1-1-x64:latest $(aws --region us-east-1 ecr get-login --no-include-email) docker pull $DOCKER_IMAGE # NOTE: run as current user to avoid git "dubious ownership" error, # and so that output artifacts don't belong to "root" docker run --rm \ --mount type=bind,source=`pwd`,target=/aws-crt-python \ --user "$(id -u):$(id -g)" \ --workdir /aws-crt-python \ --entrypoint /bin/bash \ $DOCKER_IMAGE \ continuous-delivery/build-wheels-musllinux-1-1-x86_64.sh aws-crt-python-0.20.4+dfsg/continuous-delivery/build-wheels-musllinux-1-1-x86_64.sh000077500000000000000000000020211456575232400276630ustar00rootroot00000000000000#!/bin/bash #assumes image based on musllinux_1_1 + extras (pip) set -ex /opt/python/cp39-cp39/bin/python ./continuous-delivery/update-version.py /opt/python/cp37-cp37m/bin/python setup.py sdist bdist_wheel auditwheel repair --plat musllinux_1_1_x86_64 dist/awscrt-*cp37*.whl /opt/python/cp38-cp38/bin/python setup.py sdist bdist_wheel auditwheel repair --plat musllinux_1_1_x86_64 dist/awscrt-*cp38*.whl /opt/python/cp39-cp39/bin/python setup.py sdist bdist_wheel auditwheel repair --plat musllinux_1_1_x86_64 dist/awscrt-*cp39*.whl /opt/python/cp310-cp310/bin/python setup.py sdist bdist_wheel auditwheel repair --plat musllinux_1_1_x86_64 dist/awscrt-*cp310*.whl /opt/python/cp311-cp311/bin/python setup.py sdist bdist_wheel auditwheel repair --plat musllinux_1_1_x86_64 dist/awscrt-*cp311*.whl # Don't need to build wheels for Python 3.12 and later. # The 3.11 wheel uses the stable ABI, so it works with newer versions too. rm dist/*.whl cp -rv wheelhouse/* dist/ #now you just need to run twine (that's in a different script) aws-crt-python-0.20.4+dfsg/continuous-delivery/build-wheels-osx.sh000077500000000000000000000017031456575232400252120ustar00rootroot00000000000000#!/bin/bash #before running this, you'll need cmake3 and a compiler. These python versions are just #using the default python installers from python.org. Each version needs updated pip, wheel, and setuptools set -ex /Library/Frameworks/Python.framework/Versions/3.9/bin/python3 ./continuous-delivery/update-version.py /Library/Frameworks/Python.framework/Versions/3.7/bin/python3 setup.py sdist bdist_wheel /Library/Frameworks/Python.framework/Versions/3.8/bin/python3 setup.py sdist bdist_wheel /Library/Frameworks/Python.framework/Versions/3.9/bin/python3 setup.py sdist bdist_wheel /Library/Frameworks/Python.framework/Versions/3.10/bin/python3 setup.py sdist bdist_wheel /Library/Frameworks/Python.framework/Versions/3.11/bin/python3 setup.py sdist bdist_wheel # Don't need to build wheels for Python 3.12 and later. # The 3.11 wheel uses the stable ABI, so it works with newer versions too. #now you just need to run twine (that's in a different script) aws-crt-python-0.20.4+dfsg/continuous-delivery/build-wheels-win32.bat000066400000000000000000000011651456575232400254760ustar00rootroot00000000000000 "C:\Program Files (x86)\Python39-32\python.exe" .\continuous-delivery\update-version.py || goto error "C:\Program Files (x86)\Python37-32\python.exe" setup.py sdist bdist_wheel || goto error "C:\Program Files (x86)\Python38-32\python.exe" setup.py sdist bdist_wheel || goto error "C:\Program Files (x86)\Python39-32\python.exe" setup.py sdist bdist_wheel || goto error "C:\Program Files (x86)\Python310-32\python.exe" setup.py sdist bdist_wheel || goto error "C:\Program Files (x86)\Python311-32\python.exe" setup.py sdist bdist_wheel || goto error goto :EOF :error echo Failed with error #%errorlevel%. exit /b %errorlevel% aws-crt-python-0.20.4+dfsg/continuous-delivery/build-wheels-win64.bat000066400000000000000000000010741456575232400255020ustar00rootroot00000000000000"C:\Program Files\Python39\python.exe" continuous-delivery\update-version.py || goto error "C:\Program Files\Python37\python.exe" setup.py sdist bdist_wheel || goto error "C:\Program Files\Python38\python.exe" setup.py sdist bdist_wheel || goto error "C:\Program Files\Python39\python.exe" setup.py sdist bdist_wheel || goto error "C:\Program Files\Python310\python.exe" setup.py sdist bdist_wheel || goto error "C:\Program Files\Python311\python.exe" setup.py sdist bdist_wheel || goto error goto :EOF :error echo Failed with error #%errorlevel%. exit /b %errorlevel% aws-crt-python-0.20.4+dfsg/continuous-delivery/pip-install-with-retry.py000066400000000000000000000021771456575232400264100ustar00rootroot00000000000000import time import sys import subprocess DOCS = """Given cmdline args, executes: python3 -m pip install [args...] Keeps retrying until the new version becomes available in pypi (or we time out)""" if len(sys.argv) < 2: sys.exit(DOCS) RETRY_INTERVAL_SECS = 10 GIVE_UP_AFTER_SECS = 60 * 15 pip_install_args = [sys.executable, '-m', 'pip', 'install'] + sys.argv[1:] start_time = time.time() while True: print(subprocess.list2cmdline(pip_install_args)) result = subprocess.run(pip_install_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) stdout = result.stdout.decode().strip() if stdout: print(stdout) if result.returncode == 0: # success sys.exit(0) if "could not find a version" in stdout.lower(): elapsed_secs = time.time() - start_time if elapsed_secs < GIVE_UP_AFTER_SECS: # try again print("Retrying in", RETRY_INTERVAL_SECS, "secs...") time.sleep(RETRY_INTERVAL_SECS) continue else: print("Giving up on retries after", int(elapsed_secs), "total secs.") # fail sys.exit(result.returncode) aws-crt-python-0.20.4+dfsg/continuous-delivery/pull-pypirc.py000066400000000000000000000025121456575232400243110ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. import boto3 import base64 import os import argparse def get_secret(stage): secret_name = '{}/aws-crt-python/.pypirc'.format(stage) region_name = 'us-east-1' # Create a Secrets Manager client session = boto3.session.Session() client = session.client( service_name='secretsmanager', region_name=region_name ) secret = None get_secret_value_response = client.get_secret_value(SecretId=secret_name) # Decrypts secret using the associated KMS CMK. # Depending on whether the secret is a string or binary, one of these fields will be populated. if 'SecretString' in get_secret_value_response: secret = get_secret_value_response['SecretString'] else: decoded_binary_secret = base64.b64decode(get_secret_value_response['SecretBinary']) secret = decoded_binary_secret with open(os.path.join(os.path.expanduser('~/'), '.pypirc'), 'w') as f: f.write(secret) print('.pypirc written to {}'.format(os.path.join(os.path.expanduser('~/'), '.pypirc'))) parser = argparse.ArgumentParser() parser.add_argument('stage', help='Stage to deploy the pypi package to (e.g. alpha, prod, etc...)', type=str) args = parser.parse_args() get_secret(args.stage) aws-crt-python-0.20.4+dfsg/continuous-delivery/sanity-check-test-pypi.bat000066400000000000000000000007261456575232400264720ustar00rootroot00000000000000FOR /F "delims=" %%A in ('git describe --tags') do ( set TAG_VERSION=%%A ) set CURRENT_VERSION=%TAG_VERSION:v=% "C:\Program Files\Python37\python.exe" continuous-delivery\pip-install-with-retry.py --no-cache-dir -i https://testpypi.python.org/simple --user awscrt==%CURRENT_VERSION% || goto error "C:\Program Files\Python37\python.exe" continuous-delivery\test-pip-install.py || goto error goto :EOF :error echo Failed with error #%errorlevel%. exit /b %errorlevel% aws-crt-python-0.20.4+dfsg/continuous-delivery/sanity-check-test-pypi.sh000066400000000000000000000004141456575232400263300ustar00rootroot00000000000000#!/bin/bash set -ex CURRENT_TAG_VERSION=$(git describe --tags | cut -f2 -dv) python3 continuous-delivery/pip-install-with-retry.py --no-cache-dir -i https://testpypi.python.org/simple --user awscrt==$CURRENT_TAG_VERSION python3 continuous-delivery/test-pip-install.py aws-crt-python-0.20.4+dfsg/continuous-delivery/test-pip-install.py000066400000000000000000000004161456575232400252430ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. import awscrt.io print('if the next statement does not explode, the pip install was successful') print('Is alpn supported? {}', awscrt.io.is_alpn_available()) aws-crt-python-0.20.4+dfsg/continuous-delivery/test-version-exists000077500000000000000000000014401456575232400253630ustar00rootroot00000000000000#!/usr/bin/env bash set -ex #force a failure if there's no tag git describe --tags # now get the tag CURRENT_TAG=$(git describe --tags | cut -f2 -dv) # convert v0.2.12-2-g50254a9 to 0.2.12 CURRENT_TAG_VERSION=$(git describe --tags | cut -f1 -d'-' | cut -f2 -dv) # if there's a hash on the tag, then this is not a release tagged commit if [ "$CURRENT_TAG" != "$CURRENT_TAG_VERSION" ]; then echo "Current tag version is not a release tag, cut a new release if you want to publish." exit 1 fi if python3 -m pip install --no-cache-dir -vvv awscrt==$CURRENT_TAG_VERSION; then echo "$CURRENT_TAG_VERSION is already in pypi, cut a new tag if you want to upload another version." exit 1 fi echo "$CURRENT_TAG_VERSION currently does not exist in pypi, allowing pipeline to continue." exit 0 aws-crt-python-0.20.4+dfsg/continuous-delivery/update-version.py000066400000000000000000000010741456575232400250000ustar00rootroot00000000000000#!/usr/bin/env python3 import os import re import subprocess tag = subprocess.check_output(['git', 'describe', '--tags']) # strip the leading v version = str(tag[1:].strip(), 'utf8') init_path = os.path.join(os.path.dirname(__file__), '..', 'awscrt', '__init__.py') print("Updating awscrt.__version__ to version {}".format(version)) contents = None with open(init_path, 'r+') as init_py: contents = init_py.read() contents = re.sub(r"__version__ = '[^']+'", f"__version__ = '{version}'", contents) with open(init_path, 'w') as init_py: init_py.write(contents) aws-crt-python-0.20.4+dfsg/crt/000077500000000000000000000000001456575232400162205ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/CMakeLists.txt000066400000000000000000000045771456575232400207750ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. cmake_minimum_required(VERSION 3.1) # This CMakeLists.txt exists so we can build all the C libraries we depend on # simultaneously. This is much faster than building dependencies one at a time. # # This CMakeLists.txt does NOT build the Python extension itself. # We let setuptools handle that. project(aws-crt-dependencies) # Note: set() calls must use CACHE, and must be called before the option() they're overriding, # or they won't work right on CMake 3.12 and below. # see: https://cmake.org/cmake/help/v3.13/policy/CMP0077.html # This magic lets us build everything all at once set(IN_SOURCE_BUILD ON CACHE BOOL "") list(APPEND CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/aws-c-common/cmake) include(AwsFindPackage) # Build dependencies as static libs set(BUILD_SHARED_LIBS OFF CACHE BOOL "") set(CMAKE_POSITION_INDEPENDENT_CODE ON CACHE BOOL "") # Don't build the dependencies' tests set(BUILD_TESTING OFF CACHE BOOL "") include(CTest) # On Unix we use S2N for TLS and AWS-LC crypto. # (On Windows and Apple we use the default OS libraries) if(UNIX AND NOT APPLE) option(USE_OPENSSL "Set this if you want to use your system's OpenSSL compatible libcrypto" OFF) if(NOT USE_OPENSSL) set(DISABLE_GO ON CACHE BOOL "Build without using Go, we don't want the extra dependency") set(BUILD_LIBSSL OFF CACHE BOOL "Don't need libssl, only need libcrypto") if(CMAKE_C_COMPILER_ID MATCHES "GNU" AND CMAKE_C_COMPILER_VERSION VERSION_LESS "5.0") set(DISABLE_PERL OFF CACHE BOOL "Build with Perl to avoid using pre-compiled binary with AVX512") set(MY_ASSEMBLER_IS_TOO_OLD_FOR_512AVX ON CACHE BOOL "Disable AVX512 on old GCC that not supports it") else() set(DISABLE_PERL ON CACHE BOOL "Build without using Perl, we don't want the extra dependency") endif() add_subdirectory(aws-lc) endif() set(UNSAFE_TREAT_WARNINGS_AS_ERRORS OFF CACHE BOOL "") add_subdirectory(s2n) endif() add_subdirectory(aws-c-common) add_subdirectory(aws-c-sdkutils) add_subdirectory(aws-c-cal) add_subdirectory(aws-c-io) add_subdirectory(aws-checksums) add_subdirectory(aws-c-compression) add_subdirectory(aws-c-event-stream) add_subdirectory(aws-c-http) add_subdirectory(aws-c-auth) add_subdirectory(aws-c-mqtt) add_subdirectory(aws-c-s3) aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/000077500000000000000000000000001456575232400201715ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/.builder/000077500000000000000000000000001456575232400216755ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/.builder/action/000077500000000000000000000000001456575232400231525ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/.builder/action/auth-ci-prep.py000066400000000000000000000005361456575232400260260ustar00rootroot00000000000000import Builder import json import os import re import subprocess import sys class AuthCiPrep(Builder.Action): def run(self, env): env.shell.setenv("AWS_TESTING_COGNITO_IDENTITY", env.shell.get_secret("aws-c-auth-testing/cognito-identity"), quiet=True) actions = [] return Builder.Script(actions, name='auth-ci-prep') aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/.clang-format000066400000000000000000000031611456575232400225450ustar00rootroot00000000000000--- Language: Cpp # BasedOnStyle: Mozilla AlignAfterOpenBracket: AlwaysBreak AlignConsecutiveAssignments: false AlignConsecutiveDeclarations: false AlignEscapedNewlines: Right AlignOperands: true AlignTrailingComments: true AllowAllParametersOfDeclarationOnNextLine: false AllowShortBlocksOnASingleLine: false AllowShortCaseLabelsOnASingleLine: false AllowShortFunctionsOnASingleLine: Inline AllowShortIfStatementsOnASingleLine: false AllowShortLoopsOnASingleLine: false AlwaysBreakAfterReturnType: None AlwaysBreakBeforeMultilineStrings: false BinPackArguments: false BinPackParameters: false BreakBeforeBinaryOperators: None BreakBeforeBraces: Attach BreakBeforeTernaryOperators: true BreakStringLiterals: true ColumnLimit: 120 ContinuationIndentWidth: 4 DerivePointerAlignment: false IncludeBlocks: Preserve IndentCaseLabels: true IndentPPDirectives: AfterHash IndentWidth: 4 IndentWrappedFunctionNames: true KeepEmptyLinesAtTheStartOfBlocks: true MacroBlockBegin: '' MacroBlockEnd: '' MaxEmptyLinesToKeep: 1 PenaltyBreakAssignment: 2 PenaltyBreakBeforeFirstCallParameter: 19 PenaltyBreakComment: 300 PenaltyBreakFirstLessLess: 120 PenaltyBreakString: 1000 PenaltyExcessCharacter: 1000000 PenaltyReturnTypeOnItsOwnLine: 100000 PointerAlignment: Right ReflowComments: true SortIncludes: true SpaceAfterCStyleCast: false SpaceBeforeAssignmentOperators: true SpaceBeforeParens: ControlStatements SpaceInEmptyParentheses: false SpacesInContainerLiterals: true SpacesInCStyleCastParentheses: false SpacesInParentheses: false SpacesInSquareBrackets: false Standard: Cpp11 TabWidth: 4 UseTab: Never ... aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/.clang-tidy000066400000000000000000000013651456575232400222320ustar00rootroot00000000000000--- Checks: 'clang-diagnostic-*,clang-analyzer-*,readability-*,modernize-*,bugprone-*,misc-*,google-runtime-int,llvm-header-guard,fuchsia-restrict-system-includes,-clang-analyzer-valist.Uninitialized,-clang-analyzer-security.insecureAPI.rand,-clang-analyzer-alpha.*,-readability-magic-numbers,-readability-non-const-parameter,-readability-isolate-declaration' WarningsAsErrors: '*' HeaderFilterRegex: '.*(? packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ matrix.image }} build -p ${{ env.PACKAGE_NAME }} linux-compiler-compat: runs-on: ubuntu-20.04 # latest strategy: matrix: compiler: - clang-3 - clang-6 - clang-8 - clang-9 - clang-10 - clang-11 - gcc-4.8 - gcc-5 - gcc-6 - gcc-7 - gcc-8 steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ env.LINUX_BASE_IMAGE }} build -p ${{ env.PACKAGE_NAME }} --compiler=${{ matrix.compiler }} linux-shared-libs: runs-on: ubuntu-20.04 # latest steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ env.LINUX_BASE_IMAGE }} build -p ${{ env.PACKAGE_NAME }} --cmake-extra=-DBUILD_SHARED_LIBS=ON windows: runs-on: windows-2022 # latest steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python builder.pyz build -p ${{ env.PACKAGE_NAME }} windows-vc14: runs-on: windows-2019 # windows-2019 is last env with Visual Studio 2015 (v14.0) strategy: matrix: arch: [x86, x64] steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python builder.pyz build -p ${{ env.PACKAGE_NAME }} --target windows-${{ matrix.arch }} --compiler msvc-14 windows-shared-libs: runs-on: windows-2022 # latest steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python builder.pyz build -p ${{ env.PACKAGE_NAME }} --cmake-extra=-DBUILD_SHARED_LIBS=ON windows-app-verifier: runs-on: windows-2022 # latest steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python builder.pyz build -p ${{ env.PACKAGE_NAME }} run_tests=false --cmake-extra=-DBUILD_TESTING=ON - name: Run and check AppVerifier run: | python .\aws-c-auth\build\deps\aws-c-common\scripts\appverifier_ctest.py --build_directory .\aws-c-auth\build\aws-c-auth osx: runs-on: macos-12 # latest steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python3 -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder')" chmod a+x builder ./builder build -p ${{ env.PACKAGE_NAME }} # Test downstream repos. # This should not be required because we can run into a chicken and egg problem if there is a change that needs some fix in a downstream repo. downstream: runs-on: ubuntu-20.04 # latest steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ env.LINUX_BASE_IMAGE }} build downstream -p ${{ env.PACKAGE_NAME }} aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/.github/workflows/clang-format.yml000066400000000000000000000004671456575232400266720ustar00rootroot00000000000000name: Lint on: [push] jobs: clang-format: runs-on: ubuntu-20.04 # latest steps: - name: Checkout Sources uses: actions/checkout@v1 - name: clang-format lint uses: DoozyX/clang-format-lint-action@v0.3.1 with: # List of extensions to check extensions: c,h aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/.github/workflows/closed-issue-message.yml000066400000000000000000000013271456575232400303350ustar00rootroot00000000000000name: Closed Issue Message on: issues: types: [closed] jobs: auto_comment: runs-on: ubuntu-latest steps: - uses: aws-actions/closed-issue-message@v1 with: # These inputs are both required repo-token: "${{ secrets.GITHUB_TOKEN }}" message: | ### ⚠️COMMENT VISIBILITY WARNING⚠️ Comments on closed issues are hard for our team to see. If you need more assistance, please either tag a team member or open a new issue that references this one. If you wish to keep having a conversation with other community members under this issue feel free to do so. aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/.github/workflows/codecov.yml000066400000000000000000000015331456575232400257350ustar00rootroot00000000000000name: Code coverage check on: push: env: BUILDER_VERSION: v0.9.55 BUILDER_HOST: https://d19elf31gohf1l.cloudfront.net BUILDER_SOURCE: releases PACKAGE_NAME: aws-c-auth AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} AWS_REGION: us-east-1 jobs: codecov-linux: runs-on: ubuntu-22.04 steps: - name: Checkout Sources uses: actions/checkout@v3 - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python3 -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder')" chmod a+x builder ./builder build -p ${{ env.PACKAGE_NAME }} --compiler=gcc-9 --cmake-extra=-DASSERT_LOCK_HELD=ON --coverage aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/.github/workflows/handle-stale-discussions.yml000066400000000000000000000006471456575232400312250ustar00rootroot00000000000000name: HandleStaleDiscussions on: schedule: - cron: '0 */4 * * *' discussion_comment: types: [created] jobs: handle-stale-discussions: name: Handle stale discussions runs-on: ubuntu-latest permissions: discussions: write steps: - name: Stale discussions action uses: aws-github-ops/handle-stale-discussions@v1 env: GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}}aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/.github/workflows/stale_issue.yml000066400000000000000000000046321456575232400266360ustar00rootroot00000000000000name: "Close stale issues" # Controls when the action will run. on: schedule: - cron: "*/60 * * * *" jobs: cleanup: runs-on: ubuntu-latest name: Stale issue job permissions: issues: write pull-requests: write steps: - uses: aws-actions/stale-issue-cleanup@v3 with: # Setting messages to an empty string will cause the automation to skip # that category ancient-issue-message: Greetings! Sorry to say but this is a very old issue that is probably not getting as much attention as it deservers. We encourage you to check if this is still an issue in the latest release and if you find that this is still a problem, please feel free to open a new one. stale-issue-message: Greetings! It looks like this issue hasn’t been active in longer than a week. We encourage you to check if this is still an issue in the latest release. Because it has been longer than a week since the last update on this, and in the absence of more information, we will be closing this issue soon. If you find that this is still a problem, please feel free to provide a comment or add an upvote to prevent automatic closure, or if the issue is already closed, please feel free to open a new one. stale-pr-message: Greetings! It looks like this PR hasn’t been active in longer than a week, add a comment or an upvote to prevent automatic closure, or if the issue is already closed, please feel free to open a new one. # These labels are required stale-issue-label: closing-soon exempt-issue-label: automation-exempt stale-pr-label: closing-soon exempt-pr-label: pr/needs-review response-requested-label: response-requested # Don't set closed-for-staleness label to skip closing very old issues # regardless of label closed-for-staleness-label: closed-for-staleness # Issue timing days-before-stale: 2 days-before-close: 5 days-before-ancient: 36500 # If you don't want to mark a issue as being ancient based on a # threshold of "upvotes", you can set this here. An "upvote" is # the total number of +1, heart, hooray, and rocket reactions # on an issue. minimum-upvotes-to-exempt: 1 repo-token: ${{ secrets.GITHUB_TOKEN }} loglevel: DEBUG # Set dry-run to true to not perform label or close actions. dry-run: false aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/.gitignore000066400000000000000000000010271456575232400221610ustar00rootroot00000000000000# IDE Artifacts .metadata .build .idea *.d Debug Release *~ *# *.iml tags #vim swap file *.swp #compiled python files *.pyc #Vagrant stuff Vagrantfile .vagrant #Mac stuff .DS_Store #doxygen doxygen/html/ doxygen/latex/ #cmake artifacts dependencies _build build _build_* cmake-build* # Compiled Object files *.slo *.lo *.o *.obj # Precompiled Headers *.gch *.pch # Compiled Dynamic libraries *.so *.dylib *.dll # Fortran module files *.mod # Compiled Static libraries *.lai *.la *.a *.lib # Executables *.exe *.out *.app aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/CMakeLists.txt000066400000000000000000000064671456575232400227460ustar00rootroot00000000000000 cmake_minimum_required(VERSION 3.1) project(aws-c-auth C) if (POLICY CMP0069) cmake_policy(SET CMP0069 NEW) # Enable LTO/IPO if available in the compiler, see AwsCFlags endif() if (DEFINED CMAKE_PREFIX_PATH) file(TO_CMAKE_PATH "${CMAKE_PREFIX_PATH}" CMAKE_PREFIX_PATH) endif() if (DEFINED CMAKE_INSTALL_PREFIX) file(TO_CMAKE_PATH "${CMAKE_INSTALL_PREFIX}" CMAKE_INSTALL_PREFIX) endif() if (UNIX AND NOT APPLE) include(GNUInstallDirs) elseif(NOT DEFINED CMAKE_INSTALL_LIBDIR) set(CMAKE_INSTALL_LIBDIR "lib") endif() # This is required in order to append /lib/cmake to each element in CMAKE_PREFIX_PATH set(AWS_MODULE_DIR "/${CMAKE_INSTALL_LIBDIR}/cmake") string(REPLACE ";" "${AWS_MODULE_DIR};" AWS_MODULE_PATH "${CMAKE_PREFIX_PATH}${AWS_MODULE_DIR}") # Append that generated list to the module search path list(APPEND CMAKE_MODULE_PATH ${AWS_MODULE_PATH}) include(AwsCFlags) include(AwsCheckHeaders) include(AwsSharedLibSetup) include(AwsSanitizers) include(AwsFindPackage) option(BUILD_RELOCATABLE_BINARIES "Build Relocatable Binaries, this will turn off features that will fail on older kernels than used for the build." OFF) file(GLOB AWS_AUTH_ROOT_HEADERS "include/aws/auth/*.h" ) file(GLOB AWS_AUTH_PRIVATE_HEADERS "include/aws/auth/private/*.h" ) file(GLOB AWS_AUTH_ROOT_SRC "source/*.c" ) if (WIN32) if (MSVC) source_group("Header Files\\aws\\auth" FILES ${AWS_AUTH_HEADERS}) source_group("Source Files" FILES ${AWS_AUTH_SRC}) endif () endif() file(GLOB AUTH_HEADERS ${AWS_AUTH_ROOT_HEADERS} ${AWS_AUTH_PRIVATE_HEADERS} ) file(GLOB AUTH_SRC ${AWS_AUTH_ROOT_SRC} ) add_library(${PROJECT_NAME} ${LIBTYPE} ${AUTH_HEADERS} ${AUTH_SRC}) aws_set_common_properties(${PROJECT_NAME}) aws_prepare_symbol_visibility_args(${PROJECT_NAME} "AWS_AUTH") aws_check_headers(${PROJECT_NAME} ${AWS_AUTH_ROOT_HEADERS}) aws_add_sanitizers(${PROJECT_NAME}) # We are not ABI stable yet set_target_properties(${PROJECT_NAME} PROPERTIES VERSION 1.0.0) target_compile_definitions(${PROJECT_NAME} PRIVATE -DCJSON_HIDE_SYMBOLS) if (BUILD_RELOCATABLE_BINARIES) target_compile_definitions(${PROJECT_NAME} PRIVATE "-DCOMPAT_MODE") endif() target_include_directories(${PROJECT_NAME} PUBLIC $ $) aws_use_package(aws-c-sdkutils) aws_use_package(aws-c-cal) aws_use_package(aws-c-http) target_link_libraries(${PROJECT_NAME} PUBLIC ${DEP_AWS_LIBS}) aws_prepare_shared_lib_exports(${PROJECT_NAME}) install(FILES ${AWS_AUTH_ROOT_HEADERS} DESTINATION "include/aws/auth" COMPONENT Development) if (BUILD_SHARED_LIBS) set (TARGET_DIR "shared") else() set (TARGET_DIR "static") endif() install(EXPORT "${PROJECT_NAME}-targets" DESTINATION "${LIBRARY_DIRECTORY}/${PROJECT_NAME}/cmake/${TARGET_DIR}/" NAMESPACE AWS:: COMPONENT Development) configure_file("cmake/${PROJECT_NAME}-config.cmake" "${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-config.cmake" @ONLY) install(FILES "${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-config.cmake" DESTINATION "${LIBRARY_DIRECTORY}/${PROJECT_NAME}/cmake/" COMPONENT Development) include(CTest) if (BUILD_TESTING) add_subdirectory(tests) endif() aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/CODE_OF_CONDUCT.md000066400000000000000000000004671456575232400227770ustar00rootroot00000000000000## Code of Conduct This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact opensource-codeofconduct@amazon.com with any additional questions or comments. aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/CONTRIBUTING.md000066400000000000000000000067431456575232400224340ustar00rootroot00000000000000# Contributing Guidelines Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional documentation, we greatly value feedback and contributions from our community. Please read through this document before submitting any issues or pull requests to ensure we have all the necessary information to effectively respond to your bug report or contribution. ## Reporting Bugs/Feature Requests We welcome you to use the GitHub issue tracker to report bugs or suggest features. When filing an issue, please check [existing open](https://github.com/awslabs/aws-c-auth/issues), or [recently closed](https://github.com/awslabs/aws-c-auth/issues?utf8=%E2%9C%93&q=is%3Aissue%20is%3Aclosed%20), issues to make sure somebody else hasn't already reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: * A reproducible test case or series of steps * The version of our code being used * Any modifications you've made relevant to the bug * Anything unusual about your environment or deployment ## Contributing via Pull Requests Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: 1. You are working against the latest source on the *main* branch. 2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. 3. You open an issue to discuss any significant work - we would hate for your time to be wasted. To send us a pull request, please: 1. Fork the repository. 2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. 3. Ensure local tests pass. 4. Commit to your fork using clear commit messages. 5. Send us a pull request, answering any default questions in the pull request interface. 6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and [creating a pull request](https://help.github.com/articles/creating-a-pull-request/). ## Finding contributions to work on Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any ['help wanted'](https://github.com/awslabs/aws-c-auth/labels/help%20wanted) issues is a great place to start. ## Code of Conduct This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact opensource-codeofconduct@amazon.com with any additional questions or comments. ## Security issue notifications If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. ## Licensing See the [LICENSE](https://github.com/awslabs/aws-c-auth/blob/main/LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. We may ask you to sign a [Contributor License Agreement (CLA)](http://en.wikipedia.org/wiki/Contributor_License_Agreement) for larger changes. aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/LICENSE000066400000000000000000000261361456575232400212060ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/NOTICE000066400000000000000000000001631456575232400210750ustar00rootroot00000000000000AWS C Auth Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: Apache-2.0. aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/README.md000066400000000000000000000057061456575232400214600ustar00rootroot00000000000000## AWS C Auth C99 library implementation of AWS client-side authentication: standard credentials providers and signing. From a cryptographic perspective, only functions with the suffix "_constant_time" should be considered constant time. ## License This library is licensed under the Apache 2.0 License. ## Usage ### Building CMake 3.1+ is required to build. `` must be an absolute path in the following instructions. #### Linux-Only Dependencies If you are building on Linux, you will need to build aws-lc and s2n-tls first. ``` git clone git@github.com:awslabs/aws-lc.git cmake -S aws-lc -B aws-lc/build -DCMAKE_INSTALL_PREFIX= cmake --build aws-lc/build --target install git clone git@github.com:aws/s2n-tls.git cmake -S s2n-tls -B s2n-tls/build -DCMAKE_INSTALL_PREFIX= -DCMAKE_PREFIX_PATH= cmake --build s2n-tls/build --target install ``` #### Building aws-c-auth and Remaining Dependencies ``` git clone git@github.com:awslabs/aws-c-common.git cmake -S aws-c-common -B aws-c-common/build -DCMAKE_INSTALL_PREFIX= cmake --build aws-c-common/build --target install git clone git@github.com:awslabs/aws-c-cal.git cmake -S aws-c-cal -B aws-c-cal/build -DCMAKE_INSTALL_PREFIX= -DCMAKE_PREFIX_PATH= cmake --build aws-c-cal/build --target install git clone git@github.com:awslabs/aws-c-io.git cmake -S aws-c-io -B aws-c-io/build -DCMAKE_INSTALL_PREFIX= -DCMAKE_PREFIX_PATH= cmake --build aws-c-io/build --target install git clone git@github.com:awslabs/aws-c-compression.git cmake -S aws-c-compression -B aws-c-compression/build -DCMAKE_INSTALL_PREFIX= -DCMAKE_PREFIX_PATH= cmake --build aws-c-compression/build --target install git clone git@github.com:awslabs/aws-c-http.git cmake -S aws-c-http -B aws-c-http/build -DCMAKE_INSTALL_PREFIX= -DCMAKE_PREFIX_PATH= cmake --build aws-c-http/build --target install git clone git@github.com:awslabs/aws-c-auth.git cmake -S aws-c-auth -B aws-c-auth/build -DCMAKE_INSTALL_PREFIX= -DCMAKE_PREFIX_PATH= cmake --build aws-c-auth/build --target install ``` ### Testing Certain tests require a specific environment setup in order to run successfully. This may be a specific execution environment (EC2, ECS, etc...) or it may require certain environment variables to be set that configure properties (often sensitive materials, like keys). Whether or not these tests are enabled is controlled by certain CMAKE properties: * AWS_BUILDING_ON_EC2 - indicates real IMDS credentials provider test(s) should run * AWS_BUILDING_ON_ECS - indciates real ECS credentials provider tests(s) should run * AWS_HAS_CI_ENVIRONMENT - indicates that all tests that require environmentally injected secrets/properties should run Environment properties are injected by CRT builder process via the custom builder step defined in `./.builder/action/aws-c-auth-test.py` aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/builder.json000066400000000000000000000010441456575232400225110ustar00rootroot00000000000000{ "name": "aws-c-auth", "targets": { "android": { "enabled": false, "_comment": "disabled until we need to support it. LibCrypto needs to be configured on build machine." } }, "upstream": [ { "name": "aws-c-http" }, { "name": "aws-c-cal" }, { "name": "aws-c-sdkutils" } ], "downstream": [ { "name": "aws-c-s3" } ], "+cmake_args": [ "-DAWS_HAS_CI_ENVIRONMENT=ON" ], "test_steps": [ "auth-ci-prep", "test" ] } aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/cmake/000077500000000000000000000000001456575232400212515ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/cmake/aws-c-auth-config.cmake000066400000000000000000000012561456575232400254730ustar00rootroot00000000000000include(CMakeFindDependencyMacro) find_dependency(aws-c-common) find_dependency(aws-c-cal) find_dependency(aws-c-io) find_dependency(aws-c-http) find_dependency(aws-c-sdkutils) macro(aws_load_targets type) include(${CMAKE_CURRENT_LIST_DIR}/${type}/@PROJECT_NAME@-targets.cmake) endmacro() # try to load the lib follow BUILD_SHARED_LIBS. Fall back if not exist. if (BUILD_SHARED_LIBS) if (EXISTS "${CMAKE_CURRENT_LIST_DIR}/shared") aws_load_targets(shared) else() aws_load_targets(static) endif() else() if (EXISTS "${CMAKE_CURRENT_LIST_DIR}/static") aws_load_targets(static) else() aws_load_targets(shared) endif() endif() aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/format-check.sh000077500000000000000000000007631456575232400231010ustar00rootroot00000000000000#!/bin/bash if [[ -z $CLANG_FORMAT ]] ; then CLANG_FORMAT=clang-format fi if NOT type $CLANG_FORMAT 2> /dev/null ; then echo "No appropriate clang-format found." exit 1 fi FAIL=0 SOURCE_FILES=`find source include tests -type f \( -name '*.h' -o -name '*.c' \)` for i in $SOURCE_FILES do $CLANG_FORMAT -output-replacements-xml $i | grep -c " /dev/null if [ $? -ne 1 ] then echo "$i failed clang-format check." FAIL=1 fi done exit $FAIL aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/include/000077500000000000000000000000001456575232400216145ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/include/aws/000077500000000000000000000000001456575232400224065ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/include/aws/auth/000077500000000000000000000000001456575232400233475ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/include/aws/auth/auth.h000066400000000000000000000060221456575232400244610ustar00rootroot00000000000000#ifndef AWS_AUTH_AUTH_H #define AWS_AUTH_AUTH_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include AWS_PUSH_SANE_WARNING_LEVEL #define AWS_C_AUTH_PACKAGE_ID 6 /** * Auth-specific error codes */ enum aws_auth_errors { AWS_AUTH_PROFILE_PARSE_RECOVERABLE_ERROR = AWS_ERROR_SDKUTILS_PARSE_RECOVERABLE, AWS_AUTH_PROFILE_PARSE_FATAL_ERROR = AWS_ERROR_SDKUTILS_PARSE_FATAL, AWS_AUTH_SIGNING_UNSUPPORTED_ALGORITHM = AWS_ERROR_ENUM_BEGIN_RANGE(AWS_C_AUTH_PACKAGE_ID), AWS_AUTH_SIGNING_MISMATCHED_CONFIGURATION, AWS_AUTH_SIGNING_NO_CREDENTIALS, AWS_AUTH_SIGNING_ILLEGAL_REQUEST_QUERY_PARAM, AWS_AUTH_SIGNING_ILLEGAL_REQUEST_HEADER, AWS_AUTH_SIGNING_INVALID_CONFIGURATION, AWS_AUTH_CREDENTIALS_PROVIDER_INVALID_ENVIRONMENT, AWS_AUTH_CREDENTIALS_PROVIDER_INVALID_DELEGATE, AWS_AUTH_CREDENTIALS_PROVIDER_PROFILE_SOURCE_FAILURE, AWS_AUTH_CREDENTIALS_PROVIDER_IMDS_SOURCE_FAILURE, AWS_AUTH_CREDENTIALS_PROVIDER_STS_SOURCE_FAILURE, AWS_AUTH_CREDENTIALS_PROVIDER_HTTP_STATUS_FAILURE, AWS_AUTH_PROVIDER_PARSER_UNEXPECTED_RESPONSE, AWS_AUTH_CREDENTIALS_PROVIDER_ECS_SOURCE_FAILURE, AWS_AUTH_CREDENTIALS_PROVIDER_X509_SOURCE_FAILURE, AWS_AUTH_CREDENTIALS_PROVIDER_PROCESS_SOURCE_FAILURE, AWS_AUTH_CREDENTIALS_PROVIDER_STS_WEB_IDENTITY_SOURCE_FAILURE, AWS_AUTH_SIGNING_UNSUPPORTED_SIGNATURE_TYPE, AWS_AUTH_SIGNING_MISSING_PREVIOUS_SIGNATURE, AWS_AUTH_SIGNING_INVALID_CREDENTIALS, AWS_AUTH_CANONICAL_REQUEST_MISMATCH, AWS_AUTH_SIGV4A_SIGNATURE_VALIDATION_FAILURE, AWS_AUTH_CREDENTIALS_PROVIDER_COGNITO_SOURCE_FAILURE, AWS_AUTH_CREDENTIALS_PROVIDER_DELEGATE_FAILURE, AWS_AUTH_SSO_TOKEN_PROVIDER_SOURCE_FAILURE, AWS_AUTH_SSO_TOKEN_INVALID, AWS_AUTH_SSO_TOKEN_EXPIRED, AWS_AUTH_CREDENTIALS_PROVIDER_SSO_SOURCE_FAILURE, AWS_AUTH_IMDS_CLIENT_SOURCE_FAILURE, AWS_AUTH_PROFILE_STS_CREDENTIALS_PROVIDER_CYCLE_FAILURE, AWS_AUTH_ERROR_END_RANGE = AWS_ERROR_ENUM_END_RANGE(AWS_C_AUTH_PACKAGE_ID) }; /** * Auth-specific logging subjects */ enum aws_auth_log_subject { AWS_LS_AUTH_GENERAL = AWS_LOG_SUBJECT_BEGIN_RANGE(AWS_C_AUTH_PACKAGE_ID), AWS_LS_AUTH_PROFILE, AWS_LS_AUTH_CREDENTIALS_PROVIDER, AWS_LS_AUTH_SIGNING, AWS_LS_IMDS_CLIENT, AWS_LS_AUTH_LAST = AWS_LOG_SUBJECT_END_RANGE(AWS_C_AUTH_PACKAGE_ID) }; AWS_EXTERN_C_BEGIN /** * Initializes internal datastructures used by aws-c-auth. * Must be called before using any functionality in aws-c-auth. * * @param allocator memory allocator to use for any module-level memory allocation */ AWS_AUTH_API void aws_auth_library_init(struct aws_allocator *allocator); /** * Clean up internal datastructures used by aws-c-auth. * Must not be called until application is done using functionality in aws-c-auth. */ AWS_AUTH_API void aws_auth_library_clean_up(void); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_AUTH_AUTH_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/include/aws/auth/aws_imds_client.h000066400000000000000000000421071456575232400266700ustar00rootroot00000000000000#ifndef AWS_AUTH_IMDS_CLIENT_H #define AWS_AUTH_IMDS_CLIENT_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include AWS_PUSH_SANE_WARNING_LEVEL typedef void(aws_imds_client_shutdown_completed_fn)(void *user_data); /** * Optional callback and user data to be invoked when an imds client has fully shut down */ struct aws_imds_client_shutdown_options { aws_imds_client_shutdown_completed_fn *shutdown_callback; void *shutdown_user_data; }; /** * Configuration options when creating an imds client */ struct aws_imds_client_options { /* * Completion callback to be invoked when the client has fully shut down */ struct aws_imds_client_shutdown_options shutdown_options; /* * Client bootstrap to use when this client makes network connections */ struct aws_client_bootstrap *bootstrap; /* * Retry strategy instance that governs how failed requests are retried */ struct aws_retry_strategy *retry_strategy; /* * What version of the imds protocol to use * * Defaults to IMDS_PROTOCOL_V2 */ enum aws_imds_protocol_version imds_version; /* * If true, fallback from v2 to v1 will be disabled for all cases */ bool ec2_metadata_v1_disabled; /* * Table holding all cross-system functional dependencies for an imds client. * * For mocking the http layer in tests, leave NULL otherwise */ struct aws_auth_http_system_vtable *function_table; }; /* * Standard callback for instance metadata queries */ typedef void( aws_imds_client_on_get_resource_callback_fn)(const struct aws_byte_buf *resource, int error_code, void *user_data); /** * https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-categories.html */ struct aws_imds_iam_profile { struct aws_date_time last_updated; struct aws_byte_cursor instance_profile_arn; struct aws_byte_cursor instance_profile_id; }; /** * Block of per-instance EC2-specific data * * https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-identity-documents.html */ struct aws_imds_instance_info { /* an array of aws_byte_cursor */ struct aws_array_list marketplace_product_codes; struct aws_byte_cursor availability_zone; struct aws_byte_cursor private_ip; struct aws_byte_cursor version; struct aws_byte_cursor instance_id; /* an array of aws_byte_cursor */ struct aws_array_list billing_products; struct aws_byte_cursor instance_type; struct aws_byte_cursor account_id; struct aws_byte_cursor image_id; struct aws_date_time pending_time; struct aws_byte_cursor architecture; struct aws_byte_cursor kernel_id; struct aws_byte_cursor ramdisk_id; struct aws_byte_cursor region; }; /* the item typed stored in array is pointer to aws_byte_cursor */ typedef void( aws_imds_client_on_get_array_callback_fn)(const struct aws_array_list *array, int error_code, void *user_data); typedef void(aws_imds_client_on_get_credentials_callback_fn)( const struct aws_credentials *credentials, int error_code, void *user_data); typedef void(aws_imds_client_on_get_iam_profile_callback_fn)( const struct aws_imds_iam_profile *iam_profile_info, int error_code, void *user_data); typedef void(aws_imds_client_on_get_instance_info_callback_fn)( const struct aws_imds_instance_info *instance_info, int error_code, void *user_data); /** * AWS EC2 Metadata Client is used to retrieve AWS EC2 Instance Metadata info. */ struct aws_imds_client; AWS_EXTERN_C_BEGIN /** * Creates a new imds client * * @param allocator memory allocator to use for creation and queries * @param options configuration options for the imds client * * @return a newly-constructed imds client, or NULL on failure */ AWS_AUTH_API struct aws_imds_client *aws_imds_client_new( struct aws_allocator *allocator, const struct aws_imds_client_options *options); /** * Increments the ref count on the client * * @param client imds client to acquire a reference to */ AWS_AUTH_API void aws_imds_client_acquire(struct aws_imds_client *client); /** * Decrements the ref count on the client * * @param client imds client to release a reference to */ AWS_AUTH_API void aws_imds_client_release(struct aws_imds_client *client); /** * Queries a generic resource (string) from the ec2 instance metadata document * * @param client imds client to use for the query * @param resource_path path of the resource to query * @param callback callback function to invoke on query success or failure * @param user_data opaque data to invoke the completion callback with * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise */ AWS_AUTH_API int aws_imds_client_get_resource_async( struct aws_imds_client *client, struct aws_byte_cursor resource_path, aws_imds_client_on_get_resource_callback_fn callback, void *user_data); /** * Gets the ami id of the ec2 instance from the instance metadata document * * @param client imds client to use for the query * @param callback callback function to invoke on query success or failure * @param user_data opaque data to invoke the completion callback with * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise */ AWS_AUTH_API int aws_imds_client_get_ami_id( struct aws_imds_client *client, aws_imds_client_on_get_resource_callback_fn callback, void *user_data); /** * Gets the ami launch index of the ec2 instance from the instance metadata document * * @param client imds client to use for the query * @param callback callback function to invoke on query success or failure * @param user_data opaque data to invoke the completion callback with * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise */ AWS_AUTH_API int aws_imds_client_get_ami_launch_index( struct aws_imds_client *client, aws_imds_client_on_get_resource_callback_fn callback, void *user_data); /** * Gets the ami manifest path of the ec2 instance from the instance metadata document * * @param client imds client to use for the query * @param callback callback function to invoke on query success or failure * @param user_data opaque data to invoke the completion callback with * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise */ AWS_AUTH_API int aws_imds_client_get_ami_manifest_path( struct aws_imds_client *client, aws_imds_client_on_get_resource_callback_fn callback, void *user_data); /** * Gets the list of ancestor ami ids of the ec2 instance from the instance metadata document * * @param client imds client to use for the query * @param callback callback function to invoke on query success or failure * @param user_data opaque data to invoke the completion callback with * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise */ AWS_AUTH_API int aws_imds_client_get_ancestor_ami_ids( struct aws_imds_client *client, aws_imds_client_on_get_array_callback_fn callback, void *user_data); /** * Gets the instance-action of the ec2 instance from the instance metadata document * * @param client imds client to use for the query * @param callback callback function to invoke on query success or failure * @param user_data opaque data to invoke the completion callback with * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise */ AWS_AUTH_API int aws_imds_client_get_instance_action( struct aws_imds_client *client, aws_imds_client_on_get_resource_callback_fn callback, void *user_data); /** * Gets the instance id of the ec2 instance from the instance metadata document * * @param client imds client to use for the query * @param callback callback function to invoke on query success or failure * @param user_data opaque data to invoke the completion callback with * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise */ AWS_AUTH_API int aws_imds_client_get_instance_id( struct aws_imds_client *client, aws_imds_client_on_get_resource_callback_fn callback, void *user_data); /** * Gets the instance type of the ec2 instance from the instance metadata document * * @param client imds client to use for the query * @param callback callback function to invoke on query success or failure * @param user_data opaque data to invoke the completion callback with * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise */ AWS_AUTH_API int aws_imds_client_get_instance_type( struct aws_imds_client *client, aws_imds_client_on_get_resource_callback_fn callback, void *user_data); /** * Gets the mac address of the ec2 instance from the instance metadata document * * @param client imds client to use for the query * @param callback callback function to invoke on query success or failure * @param user_data opaque data to invoke the completion callback with * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise */ AWS_AUTH_API int aws_imds_client_get_mac_address( struct aws_imds_client *client, aws_imds_client_on_get_resource_callback_fn callback, void *user_data); /** * Gets the private ip address of the ec2 instance from the instance metadata document * * @param client imds client to use for the query * @param callback callback function to invoke on query success or failure * @param user_data opaque data to invoke the completion callback with * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise */ AWS_AUTH_API int aws_imds_client_get_private_ip_address( struct aws_imds_client *client, aws_imds_client_on_get_resource_callback_fn callback, void *user_data); /** * Gets the availability zone of the ec2 instance from the instance metadata document * * @param client imds client to use for the query * @param callback callback function to invoke on query success or failure * @param user_data opaque data to invoke the completion callback with * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise */ AWS_AUTH_API int aws_imds_client_get_availability_zone( struct aws_imds_client *client, aws_imds_client_on_get_resource_callback_fn callback, void *user_data); /** * Gets the product codes of the ec2 instance from the instance metadata document * * @param client imds client to use for the query * @param callback callback function to invoke on query success or failure * @param user_data opaque data to invoke the completion callback with * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise */ AWS_AUTH_API int aws_imds_client_get_product_codes( struct aws_imds_client *client, aws_imds_client_on_get_resource_callback_fn callback, void *user_data); /** * Gets the public key of the ec2 instance from the instance metadata document * * @param client imds client to use for the query * @param callback callback function to invoke on query success or failure * @param user_data opaque data to invoke the completion callback with * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise */ AWS_AUTH_API int aws_imds_client_get_public_key( struct aws_imds_client *client, aws_imds_client_on_get_resource_callback_fn callback, void *user_data); /** * Gets the ramdisk id of the ec2 instance from the instance metadata document * * @param client imds client to use for the query * @param callback callback function to invoke on query success or failure * @param user_data opaque data to invoke the completion callback with * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise */ AWS_AUTH_API int aws_imds_client_get_ramdisk_id( struct aws_imds_client *client, aws_imds_client_on_get_resource_callback_fn callback, void *user_data); /** * Gets the reservation id of the ec2 instance from the instance metadata document * * @param client imds client to use for the query * @param callback callback function to invoke on query success or failure * @param user_data opaque data to invoke the completion callback with * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise */ AWS_AUTH_API int aws_imds_client_get_reservation_id( struct aws_imds_client *client, aws_imds_client_on_get_resource_callback_fn callback, void *user_data); /** * Gets the list of the security groups of the ec2 instance from the instance metadata document * * @param client imds client to use for the query * @param callback callback function to invoke on query success or failure * @param user_data opaque data to invoke the completion callback with * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise */ AWS_AUTH_API int aws_imds_client_get_security_groups( struct aws_imds_client *client, aws_imds_client_on_get_array_callback_fn callback, void *user_data); /** * Gets the list of block device mappings of the ec2 instance from the instance metadata document * * @param client imds client to use for the query * @param callback callback function to invoke on query success or failure * @param user_data opaque data to invoke the completion callback with * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise */ AWS_AUTH_API int aws_imds_client_get_block_device_mapping( struct aws_imds_client *client, aws_imds_client_on_get_array_callback_fn callback, void *user_data); /** * Gets the attached iam role of the ec2 instance from the instance metadata document * * @param client imds client to use for the query * @param callback callback function to invoke on query success or failure * @param user_data opaque data to invoke the completion callback with * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise */ AWS_AUTH_API int aws_imds_client_get_attached_iam_role( struct aws_imds_client *client, aws_imds_client_on_get_resource_callback_fn callback, void *user_data); /** * Gets temporary credentials based on the attached iam role of the ec2 instance * * @param client imds client to use for the query * @param iam_role_name iam role name to get temporary credentials through * @param callback callback function to invoke on query success or failure * @param user_data opaque data to invoke the completion callback with * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise */ AWS_AUTH_API int aws_imds_client_get_credentials( struct aws_imds_client *client, struct aws_byte_cursor iam_role_name, aws_imds_client_on_get_credentials_callback_fn callback, void *user_data); /** * Gets the iam profile information of the ec2 instance from the instance metadata document * * @param client imds client to use for the query * @param callback callback function to invoke on query success or failure * @param user_data opaque data to invoke the completion callback with * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise */ AWS_AUTH_API int aws_imds_client_get_iam_profile( struct aws_imds_client *client, aws_imds_client_on_get_iam_profile_callback_fn callback, void *user_data); /** * Gets the user data of the ec2 instance from the instance metadata document * * @param client imds client to use for the query * @param callback callback function to invoke on query success or failure * @param user_data opaque data to invoke the completion callback with * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise */ AWS_AUTH_API int aws_imds_client_get_user_data( struct aws_imds_client *client, aws_imds_client_on_get_resource_callback_fn callback, void *user_data); /** * Gets the signature of the ec2 instance from the instance metadata document * * @param client imds client to use for the query * @param callback callback function to invoke on query success or failure * @param user_data opaque data to invoke the completion callback with * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise */ AWS_AUTH_API int aws_imds_client_get_instance_signature( struct aws_imds_client *client, aws_imds_client_on_get_resource_callback_fn callback, void *user_data); /** * Gets the instance information data block of the ec2 instance from the instance metadata document * * @param client imds client to use for the query * @param callback callback function to invoke on query success or failure * @param user_data opaque data to invoke the completion callback with * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise */ AWS_AUTH_API int aws_imds_client_get_instance_info( struct aws_imds_client *client, aws_imds_client_on_get_instance_info_callback_fn callback, void *user_data); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_AUTH_IMDS_CLIENT_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/include/aws/auth/credentials.h000066400000000000000000001161631456575232400260250ustar00rootroot00000000000000#ifndef AWS_AUTH_CREDENTIALS_H #define AWS_AUTH_CREDENTIALS_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include AWS_PUSH_SANE_WARNING_LEVEL struct aws_client_bootstrap; struct aws_auth_http_system_vtable; struct aws_credentials; struct aws_credentials_provider; struct aws_ecc_key_pair; struct aws_string; extern const uint16_t aws_sts_assume_role_default_duration_secs; /* * Signature for the credentials sourcing callback */ typedef void(aws_on_get_credentials_callback_fn)(struct aws_credentials *credentials, int error_code, void *user_data); typedef int(aws_credentials_provider_get_credentials_fn)( struct aws_credentials_provider *provider, aws_on_get_credentials_callback_fn callback, void *user_data); typedef void(aws_credentials_provider_destroy_fn)(struct aws_credentials_provider *provider); /* * Common function table that all credentials provider implementations must support */ struct aws_credentials_provider_vtable { aws_credentials_provider_get_credentials_fn *get_credentials; aws_credentials_provider_destroy_fn *destroy; }; typedef void(aws_credentials_provider_shutdown_completed_fn)(void *user_data); /* * All credentials providers support an optional shutdown callback that * gets invoked, with appropriate user data, when the resources used by the provider * are no longer in use. For example, the imds provider uses this to * signal when it is no longer using the client bootstrap used in its * internal connection manager. */ struct aws_credentials_provider_shutdown_options { aws_credentials_provider_shutdown_completed_fn *shutdown_callback; void *shutdown_user_data; }; /** * A baseclass for credentials providers. A credentials provider is an object that has an asynchronous * query function for retrieving AWS credentials. * * Ref-counted. Thread-safe. */ struct aws_credentials_provider { struct aws_credentials_provider_vtable *vtable; struct aws_allocator *allocator; struct aws_credentials_provider_shutdown_options shutdown_options; void *impl; struct aws_atomic_var ref_count; }; /* * Config structs for creating all the different credentials providers */ /** * Configuration options for a provider that returns a fixed set of credentials */ struct aws_credentials_provider_static_options { struct aws_credentials_provider_shutdown_options shutdown_options; struct aws_byte_cursor access_key_id; struct aws_byte_cursor secret_access_key; struct aws_byte_cursor session_token; }; /** * Configuration options for a provider that returns credentials based on environment variable values */ struct aws_credentials_provider_environment_options { struct aws_credentials_provider_shutdown_options shutdown_options; }; /** * Configuration options for a provider that sources credentials from the aws config and credentials files * (by default ~/.aws/config and ~/.aws/credentials) */ struct aws_credentials_provider_profile_options { struct aws_credentials_provider_shutdown_options shutdown_options; /* * Override of what profile to use to source credentials from ('default' by default) */ struct aws_byte_cursor profile_name_override; /* * Override path to the profile config file (~/.aws/config by default) */ struct aws_byte_cursor config_file_name_override; /* * Override path to the profile credentials file (~/.aws/credentials by default) */ struct aws_byte_cursor credentials_file_name_override; /** * (Optional) * Use a cached merged profile collection. A merge collection has both config file * (~/.aws/config) and credentials file based profile collection (~/.aws/credentials) using * `aws_profile_collection_new_from_merge`. * If this option is provided, `config_file_name_override` and `credentials_file_name_override` will be ignored. */ struct aws_profile_collection *profile_collection_cached; /* * Bootstrap to use for any network connections made while sourcing credentials (for example, * a profile that uses assume-role will need to hit STS) */ struct aws_client_bootstrap *bootstrap; /* * Client TLS context to use for any secure network connections made while sourcing credentials * (for example, a profile that uses assume-role will need to hit STS). * * If a TLS context is needed, and you did not pass one in, it will be created automatically. * However, you are encouraged to pass in a shared one since these are expensive objects. * If using BYO_CRYPTO, you must provide the TLS context since it cannot be created automatically. */ struct aws_tls_ctx *tls_ctx; /* For mocking the http layer in tests, leave NULL otherwise */ struct aws_auth_http_system_vtable *function_table; }; /** * Configuration options for a provider that functions as a caching decorator. Credentials sourced through this * provider will be cached within it until their expiration time. When the cached credentials expire, new * credentials will be fetched when next queried. */ struct aws_credentials_provider_cached_options { struct aws_credentials_provider_shutdown_options shutdown_options; /* * The provider to cache credentials query results from */ struct aws_credentials_provider *source; /* * An optional expiration time period for sourced credentials. For a given set of cached credentials, * the refresh time period will be the minimum of this time and any expiration timestamp on the credentials * themselves. */ uint64_t refresh_time_in_milliseconds; /* For mocking, leave NULL otherwise */ aws_io_clock_fn *high_res_clock_fn; aws_io_clock_fn *system_clock_fn; }; /** * Configuration options for a provider that queries, in order, a list of providers. This provider uses the * first set of credentials successfully queried. Providers are queried one at a time; a provider is not queried * until the preceding provider has failed to source credentials. */ struct aws_credentials_provider_chain_options { struct aws_credentials_provider_shutdown_options shutdown_options; /* * Pointer to an array of credentials providers to use */ struct aws_credentials_provider **providers; /* * Number of elements in the array of credentials providers */ size_t provider_count; }; /* * EC2 IMDS_V1 takes one http request to get resource, while IMDS_V2 takes one more token (Http PUT) request * to get secure token used in following request. */ enum aws_imds_protocol_version { /** * Defaults to IMDS_PROTOCOL_V2. It can be set to either one and IMDS Client * will figure out (by looking at response code) which protocol an instance * is using. But a more clear setting will reduce unnecessary network request. */ IMDS_PROTOCOL_V2, IMDS_PROTOCOL_V1, }; /** * Configuration options for the provider that sources credentials from ec2 instance metadata */ struct aws_credentials_provider_imds_options { struct aws_credentials_provider_shutdown_options shutdown_options; /* * Connection bootstrap to use for any network connections made while sourcing credentials */ struct aws_client_bootstrap *bootstrap; /* * Which version of the imds query protocol to use. */ enum aws_imds_protocol_version imds_version; /* * If true, fallback from v2 to v1 will be disabled for all cases */ bool ec2_metadata_v1_disabled; /* For mocking the http layer in tests, leave NULL otherwise */ struct aws_auth_http_system_vtable *function_table; }; /* * Configuration options for the provider that sources credentials from ECS container metadata * * ECS creds provider can be used to access creds via either * relative uri to a fixed endpoint http://169.254.170.2, * or via a full uri specified by environment variables: * AWS_CONTAINER_CREDENTIALS_RELATIVE_URI * AWS_CONTAINER_CREDENTIALS_FULL_URI * AWS_CONTAINER_AUTHORIZATION_TOKEN * If both relative uri and absolute uri are set, relative uri * has higher priority. Token is used in auth header but only for * absolute uri. * While above information is used in request only, endpoint info * is needed when creating ecs provider to initiate the connection * manager, more specifically, host and http scheme (tls or not) * from endpoint are needed. */ struct aws_credentials_provider_ecs_options { struct aws_credentials_provider_shutdown_options shutdown_options; /* * Connection bootstrap to use for any network connections made while sourcing credentials */ struct aws_client_bootstrap *bootstrap; /* * Host to query credentials from */ struct aws_byte_cursor host; /* * Http path and query string for the credentials query */ struct aws_byte_cursor path_and_query; /* * Authorization token to include in the credentials query */ struct aws_byte_cursor auth_token; /* * Client TLS context to use when making query. * If set, port 443 is used. If NULL, port 80 is used. */ struct aws_tls_ctx *tls_ctx; /* For mocking the http layer in tests, leave NULL otherwise */ struct aws_auth_http_system_vtable *function_table; /* * Port to query credentials from. If zero, 80/443 will be used based on whether or not tls is enabled. */ uint32_t port; }; /** * Configuration options for the X509 credentials provider * * The x509 credentials provider sources temporary credentials from AWS IoT Core using TLS mutual authentication. * See details: https://docs.aws.amazon.com/iot/latest/developerguide/authorizing-direct-aws.html * An end to end demo with detailed steps can be found here: * https://aws.amazon.com/blogs/security/how-to-eliminate-the-need-for-hardcoded-aws-credentials-in-devices-by-using-the-aws-iot-credentials-provider/ */ struct aws_credentials_provider_x509_options { struct aws_credentials_provider_shutdown_options shutdown_options; /* * Connection bootstrap to use for any network connections made while sourcing credentials */ struct aws_client_bootstrap *bootstrap; /* TLS connection options that have been initialized with your x509 certificate and private key */ const struct aws_tls_connection_options *tls_connection_options; /* IoT thing name you registered with AWS IOT for your device, it will be used in http request header */ struct aws_byte_cursor thing_name; /* Iot role alias you created with AWS IoT for your IAM role, it will be used in http request path */ struct aws_byte_cursor role_alias; /** * Per-account X509 credentials sourcing endpoint. */ struct aws_byte_cursor endpoint; /** * (Optional) Http proxy configuration for the http request that fetches credentials */ const struct aws_http_proxy_options *proxy_options; /* For mocking the http layer in tests, leave NULL otherwise */ struct aws_auth_http_system_vtable *function_table; }; /** * Configuration options for the STS web identity provider * * Sts with web identity credentials provider sources a set of temporary security credentials for users who have been * authenticated in a mobile or web application with a web identity provider. * Example providers include Amazon Cognito, Login with Amazon, Facebook, Google, or any OpenID Connect-compatible * identity provider like Elastic Kubernetes Service * https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html * The required parameters used in the request (region, roleArn, sessionName, tokenFilePath) are automatically resolved * by SDK from envrionment variables or config file if not set. --------------------------------------------------------------------------------- | Parameter | Environment Variable Name | Config File Property Name | ---------------------------------------------------------------------------------- | region | AWS_DEFAULT_REGION | region | | role_arn | AWS_ROLE_ARN | role_arn | | role_session_name | AWS_ROLE_SESSION_NAME | role_session_name | | token_file_path | AWS_WEB_IDENTITY_TOKEN_FILE | web_identity_token_file | |--------------------------------------------------------------------------------| * The order of resolution is the following * 1. Parameters * 2. Environment Variables * 3. Config File */ struct aws_credentials_provider_sts_web_identity_options { struct aws_credentials_provider_shutdown_options shutdown_options; /* * Connection bootstrap to use for any network connections made while sourcing credentials */ struct aws_client_bootstrap *bootstrap; /** * (Optional) * Use a cached config profile collection. You can also pass a merged collection. */ struct aws_profile_collection *config_profile_collection_cached; /* * Client TLS context to use when querying STS web identity provider. * Required. */ struct aws_tls_ctx *tls_ctx; /* For mocking the http layer in tests, leave NULL otherwise */ struct aws_auth_http_system_vtable *function_table; /* * (Optional) * Override of what profile to use, if not set, 'default' will be used. */ struct aws_byte_cursor profile_name_override; /* * (Optional) * Override of region, if not set, it will be resolved from env or profile. */ struct aws_byte_cursor region; /* * (Optional) * Override of role_arn, if not set, it will be resolved from env or profile. */ struct aws_byte_cursor role_arn; /* * (Optional) * Override of role_session_name, if not set, it will be resolved from env or profile. */ struct aws_byte_cursor role_session_name; /* * (Optional) * Override of token_file_path, if not set, it will be resolved from env or profile. */ struct aws_byte_cursor token_file_path; }; /* * Configuration for the SSOCredentialsProvider that sends a GetRoleCredentialsRequest to the AWS Single * Sign-On Service to maintain short-lived sessions to use for authentication. * * https://docs.aws.amazon.com/sdkref/latest/guide/feature-sso-credentials.html */ struct aws_credentials_provider_sso_options { struct aws_credentials_provider_shutdown_options shutdown_options; /* * Override of what profile to use to source credentials from ('default' by default) */ struct aws_byte_cursor profile_name_override; /* * Override path to the profile config file (~/.aws/config by default) */ struct aws_byte_cursor config_file_name_override; /** * (Optional) * Use a cached config profile collection. You can also pass a merged collection. * config_file_name_override will be ignored if this option is provided. */ struct aws_profile_collection *config_file_cached; /* * Connection bootstrap to use for any network connections made while sourcing credentials * Required. */ struct aws_client_bootstrap *bootstrap; /* * Client TLS context to use when querying SSO provider. * Required. */ struct aws_tls_ctx *tls_ctx; /* For mocking, leave NULL otherwise */ struct aws_auth_http_system_vtable *function_table; aws_io_clock_fn *system_clock_fn; }; /** * Configuration options for the STS credentials provider */ struct aws_credentials_provider_sts_options { /* * Connection bootstrap to use for any network connections made while sourcing credentials */ struct aws_client_bootstrap *bootstrap; /* * Client TLS context to use when querying STS. * Required. */ struct aws_tls_ctx *tls_ctx; /* * Credentials provider to be used to sign the requests made to STS to fetch credentials. */ struct aws_credentials_provider *creds_provider; /* * Arn of the role to assume by fetching credentials for */ struct aws_byte_cursor role_arn; /* * Assumed role session identifier to be associated with the sourced credentials */ struct aws_byte_cursor session_name; /* * How long sourced credentials should remain valid for, in seconds. 900 is the minimum allowed value. */ uint16_t duration_seconds; /** * (Optional) Http proxy configuration for the AssumeRole http request that fetches credentials */ const struct aws_http_proxy_options *http_proxy_options; struct aws_credentials_provider_shutdown_options shutdown_options; /* For mocking, leave NULL otherwise */ struct aws_auth_http_system_vtable *function_table; aws_io_clock_fn *system_clock_fn; }; /** * * Configuration options for the process credentials provider * * The process credentials provider sources credentials from running a command or process. * The command to run is sourced from a profile in the AWS config file, using the standard * profile selection rules. The profile key the command is read from is "credential_process." * E.g.: * [default] * credential_process=/opt/amazon/bin/my-credential-fetcher --argsA=abc * On successfully running the command, the output should be a json data with the following * format: * { "Version": 1, "AccessKeyId": "accesskey", "SecretAccessKey": "secretAccessKey" "SessionToken": "....", "Expiration": "2019-05-29T00:21:43Z" } * Version here identifies the command output format version. */ struct aws_credentials_provider_process_options { struct aws_credentials_provider_shutdown_options shutdown_options; /** * In which profile name to look for credential_process, * if not provided, we will try environment variable: AWS_PROFILE. */ struct aws_byte_cursor profile_to_use; /** * (Optional) * Use a cached config profile collection. You can also pass a merged collection. */ struct aws_profile_collection *config_profile_collection_cached; }; /** * Configuration options for the default credentials provider chain. */ struct aws_credentials_provider_chain_default_options { struct aws_credentials_provider_shutdown_options shutdown_options; /* * Connection bootstrap to use for any network connections made while sourcing credentials */ struct aws_client_bootstrap *bootstrap; /* * Client TLS context to use for any secure network connections made while sourcing credentials. * * If not provided the default chain will construct a new one, but these * are expensive objects so you are encouraged to pass in a shared one. * * Must be provided if using BYO_CRYPTO. */ struct aws_tls_ctx *tls_ctx; /** * (Optional) * Use a cached merged profile collection. A merge collection has both config file * (~/.aws/config) and credentials file based profile collection (~/.aws/credentials) using * `aws_profile_collection_new_from_merge`. * If this option is provided, `config_file_name_override` and `credentials_file_name_override` will be ignored. */ struct aws_profile_collection *profile_collection_cached; /* * (Optional) * Override of what profile to use, if not set, 'default' will be used. */ struct aws_byte_cursor profile_name_override; /* * (Optional) * If enabled, the Environment Credentials Provider is not added to the chain. */ bool skip_environment_credentials_provider; }; typedef int(aws_credentials_provider_delegate_get_credentials_fn)( void *delegate_user_data, aws_on_get_credentials_callback_fn callback, void *callback_user_data); /** * Configuration options for the delegate credentials provider. */ struct aws_credentials_provider_delegate_options { struct aws_credentials_provider_shutdown_options shutdown_options; /** * Delegated get_credentials() callback. */ aws_credentials_provider_delegate_get_credentials_fn *get_credentials; /** * User data for delegated callbacks. */ void *delegate_user_data; }; /** * A (string) pair defining an identity provider and a valid login token sourced from it. */ struct aws_cognito_identity_provider_token_pair { /** * Name of an identity provider */ struct aws_byte_cursor identity_provider_name; /** * Valid login token source from the identity provider */ struct aws_byte_cursor identity_provider_token; }; /** * Configuration options needed to create a Cognito-based Credentials Provider */ struct aws_credentials_provider_cognito_options { struct aws_credentials_provider_shutdown_options shutdown_options; /** * Cognito service regional endpoint to source credentials from. */ struct aws_byte_cursor endpoint; /** * Cognito identity to fetch credentials relative to. */ struct aws_byte_cursor identity; /** * Optional set of identity provider token pairs to allow for authenticated identity access. */ struct aws_cognito_identity_provider_token_pair *logins; size_t login_count; /** * Optional ARN of the role to be assumed when multiple roles were received in the token from the identity provider. */ struct aws_byte_cursor *custom_role_arn; /* * Connection bootstrap to use for network connections made while sourcing credentials */ struct aws_client_bootstrap *bootstrap; /* * Client TLS context to use when querying cognito credentials. * Required. */ struct aws_tls_ctx *tls_ctx; /** * (Optional) Http proxy configuration for the http request that fetches credentials */ const struct aws_http_proxy_options *http_proxy_options; /* For mocking the http layer in tests, leave NULL otherwise */ struct aws_auth_http_system_vtable *function_table; }; AWS_EXTERN_C_BEGIN /* * Credentials APIs * * expiration_timepoint_seconds is the timepoint, in seconds since epoch, that the credentials will no longer * be valid. For credentials that do not expire, use UINT64_MAX. */ /** * Creates a new set of aws credentials * * @param allocator memory allocator to use * @param access_key_id_cursor value for the aws access key id field * @param secret_access_key_cursor value for the secret access key field * @param session_token_cursor (optional) security token associated with the credentials * @param expiration_timepoint_seconds timepoint, in seconds since epoch, that the credentials will no longer * be valid past. For credentials that do not expire, use UINT64_MAX * * @return a valid credentials object, or NULL */ AWS_AUTH_API struct aws_credentials *aws_credentials_new( struct aws_allocator *allocator, struct aws_byte_cursor access_key_id_cursor, struct aws_byte_cursor secret_access_key_cursor, struct aws_byte_cursor session_token_cursor, uint64_t expiration_timepoint_seconds); /** * Creates a new set of aws anonymous credentials. * Use Anonymous credentials, when you want to skip the signing process. * * @param allocator memory allocator to use * * @return a valid credentials object, or NULL */ AWS_AUTH_API struct aws_credentials *aws_credentials_new_anonymous(struct aws_allocator *allocator); /** * Creates a new set of AWS credentials * * @param allocator memory allocator to use * @param access_key_id value for the aws access key id field * @param secret_access_key value for the secret access key field * @param session_token (optional) security token associated with the credentials * @param expiration_timepoint_seconds timepoint, in seconds since epoch, that the credentials will no longer * be valid past. For credentials that do not expire, use UINT64_MAX * * @return a valid credentials object, or NULL */ AWS_AUTH_API struct aws_credentials *aws_credentials_new_from_string( struct aws_allocator *allocator, const struct aws_string *access_key_id, const struct aws_string *secret_access_key, const struct aws_string *session_token, uint64_t expiration_timepoint_seconds); /** * Creates a set of AWS credentials that includes an ECC key pair. These credentials do not have a value for * the secret access key; the ecc key takes over that field's role in sigv4a signing. * * @param allocator memory allocator to use for all memory allocation * @param access_key_id access key id for the credential set * @param ecc_key ecc key to use during signing when using these credentials * @param session_token (optional) session token associated with the credentials * @param expiration_timepoint_in_seconds (optional) if session-based, time at which these credentials expire * @return a new pair of AWS credentials, or NULL */ AWS_AUTH_API struct aws_credentials *aws_credentials_new_ecc( struct aws_allocator *allocator, struct aws_byte_cursor access_key_id, struct aws_ecc_key_pair *ecc_key, struct aws_byte_cursor session_token, uint64_t expiration_timepoint_in_seconds); /* * Takes a pair of AWS credentials and performs the sigv4a key expansion algorithm to generate a unique * ecc P256 key pair based on the credentials. The ecc key is written to the buffer in DER format. * * Sigv4a signing takes the raw DER-encoded ecc key as an optional parameter in signing (if not present, * key expansion will be done for the caller before signing). */ AWS_AUTH_API struct aws_credentials *aws_credentials_new_ecc_from_aws_credentials( struct aws_allocator *allocator, const struct aws_credentials *credentials); /** * Add a reference to some credentials * * @param credentials credentials to increment the ref count on */ AWS_AUTH_API void aws_credentials_acquire(const struct aws_credentials *credentials); /** * Remove a reference to some credentials * * @param credentials credentials to decrement the ref count on */ AWS_AUTH_API void aws_credentials_release(const struct aws_credentials *credentials); /** * Get the AWS access key id from a set of credentials * * @param credentials credentials to get the access key id from * @return a byte cursor to the access key id */ AWS_AUTH_API struct aws_byte_cursor aws_credentials_get_access_key_id(const struct aws_credentials *credentials); /** * Get the AWS secret access key from a set of credentials * * @param credentials credentials to get the secret access key from * @return a byte cursor to the secret access key */ AWS_AUTH_API struct aws_byte_cursor aws_credentials_get_secret_access_key(const struct aws_credentials *credentials); /** * Get the AWS session token from a set of credentials * * @param credentials credentials to get the session token from * @return a byte cursor to the session token or an empty byte cursor if there is no session token */ AWS_AUTH_API struct aws_byte_cursor aws_credentials_get_session_token(const struct aws_credentials *credentials); /** * Get the expiration timepoint (in seconds since epoch) associated with a set of credentials * * @param credentials credentials to get the expiration timepoint for * @return the time, in seconds since epoch, the credentials will expire; UINT64_MAX for credentials * without a specific expiration time */ AWS_AUTH_API uint64_t aws_credentials_get_expiration_timepoint_seconds(const struct aws_credentials *credentials); /** * Get the elliptic curve key associated with this set of credentials * @param credentials credentials to get the the elliptic curve key for * @return the elliptic curve key associated with the credentials, or NULL if no key is associated with * these credentials */ AWS_AUTH_API struct aws_ecc_key_pair *aws_credentials_get_ecc_key_pair(const struct aws_credentials *credentials); /** * If credentials are anonymous, then the signing process is skipped. * * @param credentials credentials to check * * @return true if the credentials are anonymous; false otherwise. */ AWS_AUTH_API bool aws_credentials_is_anonymous(const struct aws_credentials *credentials); /** * Derives an ecc key pair (based on the nist P256 curve) from the access key id and secret access key components * of a set of AWS credentials using an internal key derivation specification. Used to perform sigv4a signing in * the hybrid mode based on AWS credentials. * * @param allocator memory allocator to use for all memory allocation * @param credentials AWS credentials to derive the ECC key from using the AWS sigv4a key deriviation specification * @return a new ecc key pair or NULL on failure */ AWS_AUTH_API struct aws_ecc_key_pair *aws_ecc_key_pair_new_ecdsa_p256_key_from_aws_credentials( struct aws_allocator *allocator, const struct aws_credentials *credentials); /* * Credentials provider APIs */ /** * Release a reference to a credentials provider * * @param provider provider to decrement the ref count on */ AWS_AUTH_API struct aws_credentials_provider *aws_credentials_provider_release(struct aws_credentials_provider *provider); /* * Add a reference to a credentials provider * * @param provider provider to increment the ref count on */ AWS_AUTH_API struct aws_credentials_provider *aws_credentials_provider_acquire(struct aws_credentials_provider *provider); /* * Async function for retrieving credentials from a provider * * @param provider credentials provider to source from * @param callback completion callback to invoke when the fetch has completed or failed * @param user_data user data to pass to the completion callback * * @return AWS_OP_SUCCESS if the fetch was successfully started, AWS_OP_ERR otherwise. The completion * callback will only be invoked if-and-only-if the return value was AWS_OP_SUCCESS. * */ AWS_AUTH_API int aws_credentials_provider_get_credentials( struct aws_credentials_provider *provider, aws_on_get_credentials_callback_fn callback, void *user_data); /* * Credentials provider variant creation */ /** * Creates a simple provider that just returns a fixed set of credentials * * @param allocator memory allocator to use for all memory allocation * @param options provider-specific configuration options * * @return the newly-constructed credentials provider, or NULL if an error occurred. */ AWS_AUTH_API struct aws_credentials_provider *aws_credentials_provider_new_static( struct aws_allocator *allocator, const struct aws_credentials_provider_static_options *options); /** * Creates a simple anonymous credentials provider * * @param allocator memory allocator to use for all memory allocation * @param shutdown_options an optional shutdown callback that gets * invoked when the resources used by the provider are no longer in use. * * @return the newly-constructed credentials provider, or NULL if an error occurred. */ AWS_AUTH_API struct aws_credentials_provider *aws_credentials_provider_new_anonymous( struct aws_allocator *allocator, const struct aws_credentials_provider_shutdown_options *shutdown_options); /** * Creates a provider that returns credentials sourced from the environment variables: * * AWS_ACCESS_KEY_ID * AWS_SECRET_ACCESS_KEY * AWS_SESSION_TOKEN * * @param allocator memory allocator to use for all memory allocation * @param options provider-specific configuration options * * @return the newly-constructed credentials provider, or NULL if an error occurred. */ AWS_AUTH_API struct aws_credentials_provider *aws_credentials_provider_new_environment( struct aws_allocator *allocator, const struct aws_credentials_provider_environment_options *options); /** * Creates a provider that functions as a caching decorating of another provider. * * For example, the default chain is implemented as: * * CachedProvider -> ProviderChain(EnvironmentProvider -> ProfileProvider -> ECS/EC2IMD etc...) * * A reference is taken on the target provider * * @param allocator memory allocator to use for all memory allocation * @param options provider-specific configuration options * * @return the newly-constructed credentials provider, or NULL if an error occurred. */ AWS_AUTH_API struct aws_credentials_provider *aws_credentials_provider_new_cached( struct aws_allocator *allocator, const struct aws_credentials_provider_cached_options *options); /** * Creates a provider that sources credentials from key-value profiles loaded from the aws credentials * file ("~/.aws/credentials" by default) and the aws config file ("~/.aws/config" by * default) * * @param allocator memory allocator to use for all memory allocation * @param options provider-specific configuration options * * @return the newly-constructed credentials provider, or NULL if an error occurred. */ AWS_AUTH_API struct aws_credentials_provider *aws_credentials_provider_new_profile( struct aws_allocator *allocator, const struct aws_credentials_provider_profile_options *options); /** * Creates a provider that assumes an IAM role via. STS AssumeRole() API. This provider will fetch new credentials * upon each call to aws_credentials_provider_get_credentials(). * * @param allocator memory allocator to use for all memory allocation * @param options provider-specific configuration options * * @return the newly-constructed credentials provider, or NULL if an error occurred. */ AWS_AUTH_API struct aws_credentials_provider *aws_credentials_provider_new_sts( struct aws_allocator *allocator, const struct aws_credentials_provider_sts_options *options); /** * Creates a provider that sources credentials from an ordered sequence of providers, with the overall result * being from the first provider to return a valid set of credentials * * References are taken on all supplied providers * * @param allocator memory allocator to use for all memory allocation * @param options provider-specific configuration options * * @return the newly-constructed credentials provider, or NULL if an error occurred. */ AWS_AUTH_API struct aws_credentials_provider *aws_credentials_provider_new_chain( struct aws_allocator *allocator, const struct aws_credentials_provider_chain_options *options); /** * Creates a provider that sources credentials from the ec2 instance metadata service * * @param allocator memory allocator to use for all memory allocation * @param options provider-specific configuration options * * @return the newly-constructed credentials provider, or NULL if an error occurred. */ AWS_AUTH_API struct aws_credentials_provider *aws_credentials_provider_new_imds( struct aws_allocator *allocator, const struct aws_credentials_provider_imds_options *options); /** * Creates a provider that sources credentials from the ecs role credentials service * * @param allocator memory allocator to use for all memory allocation * @param options provider-specific configuration options * * @return the newly-constructed credentials provider, or NULL if an error occurred. */ AWS_AUTH_API struct aws_credentials_provider *aws_credentials_provider_new_ecs( struct aws_allocator *allocator, const struct aws_credentials_provider_ecs_options *options); /** * Creates a provider that sources credentials from IoT Core * * @param allocator memory allocator to use for all memory allocation * @param options provider-specific configuration options * * @return the newly-constructed credentials provider, or NULL if an error occurred. */ AWS_AUTH_API struct aws_credentials_provider *aws_credentials_provider_new_x509( struct aws_allocator *allocator, const struct aws_credentials_provider_x509_options *options); /** * Creates a provider that sources credentials from STS using AssumeRoleWithWebIdentity * * @param allocator memory allocator to use for all memory allocation * @param options provider-specific configuration options * * @return the newly-constructed credentials provider, or NULL if an error occurred. */ AWS_AUTH_API struct aws_credentials_provider *aws_credentials_provider_new_sts_web_identity( struct aws_allocator *allocator, const struct aws_credentials_provider_sts_web_identity_options *options); /** * Creates a provider that sources credentials from SSO using a SSOToken. * * @param allocator memory allocator to use for all memory allocation * @param options provider-specific configuration options * * @return the newly-constructed credentials provider, or NULL if an error occurred. */ AWS_AUTH_API struct aws_credentials_provider *aws_credentials_provider_new_sso( struct aws_allocator *allocator, const struct aws_credentials_provider_sso_options *options); /* * Creates a provider that sources credentials from running an external command or process * * @param allocator memory allocator to use for all memory allocation * @param options provider-specific configuration options * * @return the newly-constructed credentials provider, or NULL if an error occurred. */ AWS_AUTH_API struct aws_credentials_provider *aws_credentials_provider_new_process( struct aws_allocator *allocator, const struct aws_credentials_provider_process_options *options); /** * Create a credentials provider depends on provided vtable to fetch the credentials. * * @param allocator memory allocator to use for all memory allocation * @param options provider-specific configuration options * * @return the newly-constructed credentials provider, or NULL if an error occurred. */ AWS_AUTH_API struct aws_credentials_provider *aws_credentials_provider_new_delegate( struct aws_allocator *allocator, const struct aws_credentials_provider_delegate_options *options); /** * Creates a provider that sources credentials from the Cognito-Identity service via an * invocation of the GetCredentialsForIdentity API call. * * @param allocator memory allocator to use for all memory allocation * @param options provider-specific configuration options * * @return the newly-constructed credentials provider, or NULL if an error occurred. */ AWS_AUTH_API struct aws_credentials_provider *aws_credentials_provider_new_cognito( struct aws_allocator *allocator, const struct aws_credentials_provider_cognito_options *options); /** * Creates a cognito-based provider that has a caching layer wrapped around it * * @param allocator memory allocator to use for all memory allocation * @param options cognito-specific configuration options * * @return the newly-constructed credentials provider, or NULL if an error occurred. */ AWS_AUTH_API struct aws_credentials_provider *aws_credentials_provider_new_cognito_caching( struct aws_allocator *allocator, const struct aws_credentials_provider_cognito_options *options); /** * Creates the default provider chain used by most AWS SDKs. * * Generally: * * (1) Environment * (2) Profile * (3) STS web identity * (4) (conditional, off by default) ECS * (5) (conditional, on by default) EC2 Instance Metadata * * Support for environmental control of the default provider chain is not yet * implemented. * * @param allocator memory allocator to use for all memory allocation * @param options provider-specific configuration options * * @return the newly-constructed credentials provider, or NULL if an error occurred. */ AWS_AUTH_API struct aws_credentials_provider *aws_credentials_provider_new_chain_default( struct aws_allocator *allocator, const struct aws_credentials_provider_chain_default_options *options); AWS_AUTH_API extern const struct aws_auth_http_system_vtable *g_aws_credentials_provider_http_function_table; AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_AUTH_CREDENTIALS_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/include/aws/auth/exports.h000066400000000000000000000016271456575232400252320ustar00rootroot00000000000000#ifndef AWS_AUTH_EXPORTS_H #define AWS_AUTH_EXPORTS_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #if defined(USE_WINDOWS_DLL_SEMANTICS) || defined(WIN32) # ifdef AWS_AUTH_USE_IMPORT_EXPORT # ifdef AWS_AUTH_EXPORTS # define AWS_AUTH_API __declspec(dllexport) # else # define AWS_AUTH_API __declspec(dllimport) # endif /* AWS_AUTH_EXPORTS */ # else # define AWS_AUTH_API # endif /*USE_IMPORT_EXPORT */ #else # if ((__GNUC__ >= 4) || defined(__clang__)) && defined(AWS_AUTH_USE_IMPORT_EXPORT) && defined(AWS_AUTH_EXPORTS) # define AWS_AUTH_API __attribute__((visibility("default"))) # else # define AWS_AUTH_API # endif /* __GNUC__ >= 4 || defined(__clang__) */ #endif /* defined(USE_WINDOWS_DLL_SEMANTICS) || defined(WIN32) */ #endif /* AWS_AUTH_EXPORTS_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/include/aws/auth/private/000077500000000000000000000000001456575232400250215ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/include/aws/auth/private/aws_profile.h000066400000000000000000000011061456575232400275020ustar00rootroot00000000000000#ifndef AWS_AUTH_AWS_PROFILE_H #define AWS_AUTH_AWS_PROFILE_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include AWS_EXTERN_C_BEGIN /** * Returns a set of credentials associated with a profile, based on the properties within the profile */ AWS_AUTH_API struct aws_credentials *aws_credentials_new_from_profile( struct aws_allocator *allocator, const struct aws_profile *profile); AWS_EXTERN_C_END #endif /* AWS_AUTH_AWS_PROFILE_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/include/aws/auth/private/aws_signing.h000066400000000000000000000101661456575232400275060ustar00rootroot00000000000000#ifndef AWS_AUTH_SIGNING_SIGV4_H #define AWS_AUTH_SIGNING_SIGV4_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include struct aws_ecc_key_pair; struct aws_signable; struct aws_signing_config_aws; struct aws_signing_result; /* * Private signing API * * Technically this could be folded directly into signing.c but it's useful to be able * to call the individual stages of the signing process for testing. */ /* * A structure that contains all the state related to signing a request for AWS. We pass * this around rather than a million parameters. */ struct aws_signing_state_aws { struct aws_allocator *allocator; const struct aws_signable *signable; aws_signing_complete_fn *on_complete; void *userdata; struct aws_signing_config_aws config; struct aws_byte_buf config_string_buffer; struct aws_signing_result result; int error_code; /* persistent, constructed values that are either/or * (1) consumed by later stages of the signing process, * (2) used in multiple places */ struct aws_byte_buf canonical_request; struct aws_byte_buf string_to_sign; struct aws_byte_buf signed_headers; struct aws_byte_buf canonical_header_block; struct aws_byte_buf payload_hash; struct aws_byte_buf credential_scope; struct aws_byte_buf access_credential_scope; struct aws_byte_buf date; struct aws_byte_buf signature; /* The "payload" to be used in the string-to-sign. * For a normal HTTP request, this is the hashed canonical-request. * But for other types of signing (i.e chunk, event) it's something else. */ struct aws_byte_buf string_to_sign_payload; /* temp buf for writing out strings */ struct aws_byte_buf scratch_buf; char expiration_array[32]; /* serialization of the pre-signing expiration duration value */ }; AWS_EXTERN_C_BEGIN AWS_AUTH_API struct aws_signing_state_aws *aws_signing_state_new( struct aws_allocator *allocator, const struct aws_signing_config_aws *config, const struct aws_signable *signable, aws_signing_complete_fn *on_complete, void *userdata); AWS_AUTH_API void aws_signing_state_destroy(struct aws_signing_state_aws *state); /* * A set of functions that together performs the AWS signing process based * on the algorithm and signature type requested in the shared config. * * These must be called (presumably by the signer) in sequential order: * * (1) aws_signing_build_canonical_request * (2) aws_signing_build_string_to_sign * (3) aws_signing_build_authorization_value */ AWS_AUTH_API int aws_signing_build_canonical_request(struct aws_signing_state_aws *state); AWS_AUTH_API int aws_signing_build_string_to_sign(struct aws_signing_state_aws *state); AWS_AUTH_API int aws_signing_build_authorization_value(struct aws_signing_state_aws *state); /* * Named constants particular to the sigv4 signing algorithm. Can be moved to a public header * as needed. */ AWS_AUTH_API extern const struct aws_string *g_aws_signing_content_header_name; AWS_AUTH_API extern const struct aws_string *g_aws_signing_algorithm_query_param_name; AWS_AUTH_API extern const struct aws_string *g_aws_signing_credential_query_param_name; AWS_AUTH_API extern const struct aws_string *g_aws_signing_date_name; AWS_AUTH_API extern const struct aws_string *g_aws_signing_signed_headers_query_param_name; AWS_AUTH_API extern const struct aws_string *g_aws_signing_security_token_name; AWS_AUTH_API extern const struct aws_string *g_aws_signing_s3session_token_name; AWS_AUTH_API extern const struct aws_string *g_signature_type_sigv4a_http_request; /** * Initializes the internal table of headers that should not be signed */ AWS_AUTH_API int aws_signing_init_signing_tables(struct aws_allocator *allocator); /** * Cleans up the internal table of headers that should not be signed */ AWS_AUTH_API void aws_signing_clean_up_signing_tables(void); AWS_EXTERN_C_END #endif /* AWS_AUTH_SIGNING_SIGV4_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/include/aws/auth/private/credentials_utils.h000066400000000000000000000144061456575232400307140ustar00rootroot00000000000000#ifndef AWS_AUTH_CREDENTIALS_PRIVATE_H #define AWS_AUTH_CREDENTIALS_PRIVATE_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include struct aws_http_connection; struct aws_http_connection_manager; struct aws_http_make_request_options; struct aws_http_stream; struct aws_json_value; /* * Internal struct tracking an asynchronous credentials query. * Used by both the cached provider and the test mocks. * */ struct aws_credentials_query { struct aws_linked_list_node node; struct aws_credentials_provider *provider; aws_on_get_credentials_callback_fn *callback; void *user_data; }; typedef struct aws_http_connection_manager *(aws_http_connection_manager_new_fn)( struct aws_allocator *allocator, const struct aws_http_connection_manager_options *options); typedef void(aws_http_connection_manager_release_fn)(struct aws_http_connection_manager *manager); typedef void(aws_http_connection_manager_acquire_connection_fn)( struct aws_http_connection_manager *manager, aws_http_connection_manager_on_connection_setup_fn *callback, void *user_data); typedef int(aws_http_connection_manager_release_connection_fn)( struct aws_http_connection_manager *manager, struct aws_http_connection *connection); typedef struct aws_http_stream *(aws_http_connection_make_request_fn)( struct aws_http_connection *client_connection, const struct aws_http_make_request_options *options); typedef int(aws_http_stream_activate_fn)(struct aws_http_stream *stream); typedef struct aws_http_connection *(aws_http_stream_get_connection_fn)(const struct aws_http_stream *stream); typedef int(aws_http_stream_get_incoming_response_status_fn)(const struct aws_http_stream *stream, int *out_status); typedef void(aws_http_stream_release_fn)(struct aws_http_stream *stream); typedef void(aws_http_connection_close_fn)(struct aws_http_connection *connection); /* * Table of all downstream http functions used by the credentials providers that make http calls. Allows for simple * mocking. */ struct aws_auth_http_system_vtable { aws_http_connection_manager_new_fn *aws_http_connection_manager_new; aws_http_connection_manager_release_fn *aws_http_connection_manager_release; aws_http_connection_manager_acquire_connection_fn *aws_http_connection_manager_acquire_connection; aws_http_connection_manager_release_connection_fn *aws_http_connection_manager_release_connection; aws_http_connection_make_request_fn *aws_http_connection_make_request; aws_http_stream_activate_fn *aws_http_stream_activate; aws_http_stream_get_connection_fn *aws_http_stream_get_connection; aws_http_stream_get_incoming_response_status_fn *aws_http_stream_get_incoming_response_status; aws_http_stream_release_fn *aws_http_stream_release; aws_http_connection_close_fn *aws_http_connection_close; int (*aws_high_res_clock_get_ticks)(uint64_t *timestamp); }; enum aws_parse_credentials_expiration_format { AWS_PCEF_STRING_ISO_8601_DATE, AWS_PCEF_NUMBER_UNIX_EPOCH, AWS_PCEF_NUMBER_UNIX_EPOCH_MS, }; struct aws_parse_credentials_from_json_doc_options { const char *access_key_id_name; const char *secret_access_key_name; const char *token_name; const char *expiration_name; const char *top_level_object_name; enum aws_parse_credentials_expiration_format expiration_format; bool token_required; bool expiration_required; }; AWS_EXTERN_C_BEGIN /* * Misc. credentials-related APIs */ AWS_AUTH_API void aws_credentials_query_init( struct aws_credentials_query *query, struct aws_credentials_provider *provider, aws_on_get_credentials_callback_fn *callback, void *user_data); AWS_AUTH_API void aws_credentials_query_clean_up(struct aws_credentials_query *query); AWS_AUTH_API void aws_credentials_provider_init_base( struct aws_credentials_provider *provider, struct aws_allocator *allocator, struct aws_credentials_provider_vtable *vtable, void *impl); AWS_AUTH_API void aws_credentials_provider_destroy(struct aws_credentials_provider *provider); AWS_AUTH_API void aws_credentials_provider_invoke_shutdown_callback(struct aws_credentials_provider *provider); /** * This API is used internally to parse credentials from json document. * It _ONLY_ parses the first level of json structure. json document like * this will produce a valid credentials: { "accessKeyId" : "...", "secretAccessKey" : "...", "Token" : "...", "expiration" : "2019-05-29T00:21:43Z" } * but json document like this won't: { "credentials": { "accessKeyId" : "...", "secretAccessKey" : "...", "sessionToken" : "...", "expiration" : "2019-05-29T00:21:43Z" } } * In general, the keys' names of credentials in json document are: * "AccessKeyId", "SecretAccessKey", "Token" and "Expiration", * but there are cases services use different keys like "sessionToken". * A valid credentials must have "access key" and "secrete access key". * For some services, token and expiration are not required. * So in this API, the keys are provided by callers and this API will * performe a case insensitive search. */ AWS_AUTH_API struct aws_credentials *aws_parse_credentials_from_aws_json_object( struct aws_allocator *allocator, struct aws_json_value *document_root, const struct aws_parse_credentials_from_json_doc_options *options); /** * This API is similar to aws_parse_credentials_from_aws_json_object, * except it accpets a char buffer json document as it's input. */ AWS_AUTH_API struct aws_credentials *aws_parse_credentials_from_json_document( struct aws_allocator *allocator, struct aws_byte_cursor json_document, const struct aws_parse_credentials_from_json_doc_options *options); AWS_AUTH_API enum aws_retry_error_type aws_credentials_provider_compute_retry_error_type(int response_code, int error_code); /* * Loads an aws config profile collection */ AWS_AUTH_API struct aws_profile_collection *aws_load_profile_collection_from_config_file( struct aws_allocator *allocator, struct aws_byte_cursor config_file_name_override); AWS_EXTERN_C_END #endif /* AWS_AUTH_CREDENTIALS_PRIVATE_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/include/aws/auth/private/key_derivation.h000066400000000000000000000027271456575232400302160ustar00rootroot00000000000000#ifndef AWS_AUTH_KEY_DERIVATION_H #define AWS_AUTH_KEY_DERIVATION_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include struct aws_byte_buf; AWS_EXTERN_C_BEGIN /* * Some utility functions used while deriving an ecc key from aws credentials. * * The functions operate on the raw bytes of a buffer, treating them as a (base 255) big-endian * integer. */ /** * Compares two byte buffers lexically. The buffers must be of equal size. Lexical comparison from front-to-back * corresponds to arithmetic comparison when the byte sequences are considered to be big-endian large integers. * The output parameter comparison_result is set to: * -1 if lhs_raw_be_bigint < rhs_raw_be_bigint * 0 if lhs_raw_be_bigint == rhs_raw_be_bigint * 1 if lhs_raw_be_bigint > rhs_raw_be_bigint * * @return AWS_OP_SUCCESS or AWS_OP_ERR * * This is a constant-time operation. */ AWS_AUTH_API int aws_be_bytes_compare_constant_time( const struct aws_byte_buf *lhs_raw_be_bigint, const struct aws_byte_buf *rhs_raw_be_bigint, int *comparison_result); /** * Adds one to a big integer represented as a sequence of bytes (in big-endian order). A maximal (unsigned) value * will roll over to 0. * * This is a constant-time operation. */ AWS_AUTH_API void aws_be_bytes_add_one_constant_time(struct aws_byte_buf *raw_be_bigint); AWS_EXTERN_C_END #endif /* AWS_AUTH_KEY_DERIVATION_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/include/aws/auth/private/sigv4_http_request.h000066400000000000000000000004601456575232400310350ustar00rootroot00000000000000#ifndef AWS_AUTH_SIGV4_HTTP_REQUEST_H #define AWS_AUTH_SIGV4_HTTP_REQUEST_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include AWS_EXTERN_C_BEGIN AWS_EXTERN_C_END #endif /* AWS_AUTH_SIGV4_HTTP_REQUEST_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/include/aws/auth/private/sso_token_providers.h000066400000000000000000000073761456575232400313100ustar00rootroot00000000000000#ifndef AWS_AUTH_TOKEN_PROVIDERS_PRIVATE_H #define AWS_AUTH_TOKEN_PROVIDERS_PRIVATE_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include /** * Configuration options for a provider that sources sso token information from the aws profile (by default * ~/.aws/config) and token from ~/.aws/sso/cache/.json. */ struct aws_token_provider_sso_profile_options { struct aws_credentials_provider_shutdown_options shutdown_options; /* * Override of what profile to use to source credentials from ('default' by default) */ struct aws_byte_cursor profile_name_override; /* * Override path to the profile config file (~/.aws/config by default) */ struct aws_byte_cursor config_file_name_override; /** * (Optional) * Use a cached config profile collection. You can also pass a merged collection. * config_file_name_override will be ignored if this option is provided. */ struct aws_profile_collection *config_file_cached; /* For mocking, leave NULL otherwise */ aws_io_clock_fn *system_clock_fn; }; /** * Configuration options for a provider that sources sso token information from the aws profile (by default * ~/.aws/config) and token from ~/.aws/sso/cache/.json. */ struct aws_token_provider_sso_session_options { struct aws_credentials_provider_shutdown_options shutdown_options; /* * Override of what profile to use to source credentials from ('default' by default) */ struct aws_byte_cursor profile_name_override; /* * Override path to the profile config file (~/.aws/config by default) */ struct aws_byte_cursor config_file_name_override; /** * (Optional) * Use a cached config profile collection. You can also pass a merged collection. * config_file_name_override will be ignored if this option is provided. */ struct aws_profile_collection *config_file_cached; /* * Connection bootstrap to use for any network connections made */ struct aws_client_bootstrap *bootstrap; /* * Client TLS context to use for any network connections made. */ struct aws_tls_ctx *tls_ctx; /* For mocking, leave NULL otherwise */ aws_io_clock_fn *system_clock_fn; }; AWS_EXTERN_C_BEGIN /** * Creates a provider that sources sso token based credentials from key-value profiles loaded from the aws * config("~/.aws/config" by default) and ~/.aws/sso/cache/.json * This is the legacy way which doesn't support refreshing credentials. * * @param allocator memory allocator to use for all memory allocation * @param options provider-specific configuration options * * @return the newly-constructed credentials provider, or NULL if an error occurred. */ AWS_AUTH_API struct aws_credentials_provider *aws_token_provider_new_sso_profile( struct aws_allocator *allocator, const struct aws_token_provider_sso_profile_options *options); /** * Creates a provider that sources sso token based credentials from key-value profiles loaded from the aws * config("~/.aws/config" by default) and ~/.aws/sso/cache/.json * Note: Token refresh is not currently supported * * @param allocator memory allocator to use for all memory allocation * @param options provider-specific configuration options * * @return the newly-constructed credentials provider, or NULL if an error occurred. */ AWS_AUTH_API struct aws_credentials_provider *aws_token_provider_new_sso_session( struct aws_allocator *allocator, const struct aws_token_provider_sso_session_options *options); AWS_EXTERN_C_END #endif /* AWS_AUTH_TOKEN_PROVIDERS_PRIVATE_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/include/aws/auth/private/sso_token_utils.h000066400000000000000000000034011456575232400304140ustar00rootroot00000000000000#ifndef AWS_AUTH_TOKEN_PRIVATE_H #define AWS_AUTH_TOKEN_PRIVATE_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include /* structure to represent a parsed sso token */ struct aws_sso_token { struct aws_allocator *allocator; struct aws_string *access_token; struct aws_date_time expiration; }; AWS_EXTERN_C_BEGIN /* Construct token path which is ~/.aws/sso/cache/.json */ AWS_AUTH_API struct aws_string *aws_construct_sso_token_path(struct aws_allocator *allocator, const struct aws_string *input); AWS_AUTH_API void aws_sso_token_destroy(struct aws_sso_token *token); /* Parse `aws_sso_token` from the give file path */ AWS_AUTH_API struct aws_sso_token *aws_sso_token_new_from_file(struct aws_allocator *allocator, const struct aws_string *file_path); /** * Creates a set of AWS credentials based on a token with expiration. * * @param allocator memory allocator to use for all memory allocation * @param token token for the credentials * @param expiration_timepoint_in_seconds time at which these credentials expire * @return a new pair of AWS credentials, or NULL */ AWS_AUTH_API struct aws_credentials *aws_credentials_new_token( struct aws_allocator *allocator, struct aws_byte_cursor token, uint64_t expiration_timepoint_in_seconds); /** * Get the token from a set of AWS credentials * * @param credentials credentials to get the token from * @return a byte cursor to the token or an empty byte cursor if there is no token */ AWS_AUTH_API struct aws_byte_cursor aws_credentials_get_token(const struct aws_credentials *credentials); AWS_EXTERN_C_END #endif /* AWS_AUTH_TOKEN_PRIVATE_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/include/aws/auth/signable.h000066400000000000000000000205061456575232400253070ustar00rootroot00000000000000#ifndef AWS_AUTH_SIGNABLE_H #define AWS_AUTH_SIGNABLE_H #include AWS_PUSH_SANE_WARNING_LEVEL struct aws_http_message; struct aws_http_headers; struct aws_input_stream; struct aws_signable; struct aws_string; /* * While not referenced directly in this file, this is the structure expected to be in the property lists */ struct aws_signable_property_list_pair { struct aws_byte_cursor name; struct aws_byte_cursor value; }; typedef int(aws_signable_get_property_fn)( const struct aws_signable *signable, const struct aws_string *name, struct aws_byte_cursor *out_value); typedef int(aws_signable_get_property_list_fn)( const struct aws_signable *signable, const struct aws_string *name, struct aws_array_list **out_list); typedef int(aws_signable_get_payload_stream_fn)( const struct aws_signable *signable, struct aws_input_stream **out_input_stream); typedef void(aws_signable_destroy_fn)(struct aws_signable *signable); struct aws_signable_vtable { aws_signable_get_property_fn *get_property; aws_signable_get_property_list_fn *get_property_list; aws_signable_get_payload_stream_fn *get_payload_stream; aws_signable_destroy_fn *destroy; }; /** * Signable is a generic interface for any kind of object that can be cryptographically signed. * * Like signing_result, the signable interface presents * * (1) Properties - A set of key-value pairs * (2) Property Lists - A set of named key-value pair lists * * as well as * * (3) A message payload modeled as a stream * * When creating a signable "subclass" the query interface should map to retrieving * the properties of the underlying object needed by signing algorithms that can operate on it. * * As an example, if a signable implementation wrapped an http request, you would query * request elements like method and uri from the property interface, headers would be queried * via the property list interface, and the request body would map to the payload stream. * * String constants that map to agreed on keys for particular signable types * ("METHOD", "URI", "HEADERS", etc...) are exposed in appropriate header files. */ struct aws_signable { struct aws_allocator *allocator; void *impl; struct aws_signable_vtable *vtable; }; AWS_EXTERN_C_BEGIN /** * Cleans up and frees all resources associated with a signable instance * * @param signable signable object to destroy */ AWS_AUTH_API void aws_signable_destroy(struct aws_signable *signable); /** * Retrieves a property (key-value pair) from a signable. Global property name constants are * included below. * * @param signable signable object to retrieve a property from * @param name name of the property to query * @param out_value output parameter for the property's value * * @return AWS_OP_SUCCESS if the property was successfully fetched, AWS_OP_ERR otherwise */ AWS_AUTH_API int aws_signable_get_property( const struct aws_signable *signable, const struct aws_string *name, struct aws_byte_cursor *out_value); /** * Retrieves a named property list (list of key-value pairs) from a signable. Global property list name * constants are included below. * * @param signable signable object to retrieve a property list from * @param name name of the property list to fetch * @param out_property_list output parameter for the fetched property list * * @return AWS_OP_SUCCESS if the property list was successfully fetched, AWS_OP_ERR otherwise */ AWS_AUTH_API int aws_signable_get_property_list( const struct aws_signable *signable, const struct aws_string *name, struct aws_array_list **out_property_list); /** * Retrieves the signable's message payload as a stream. * * @param signable signable to get the payload of * @param out_input_stream output parameter for the payload stream * * @return AWS_OP_SUCCESS if successful, AWS_OP_ERR otherwise */ AWS_AUTH_API int aws_signable_get_payload_stream(const struct aws_signable *signable, struct aws_input_stream **out_input_stream); /* * Some global property and property-list name constants */ /** * Name of the property list that wraps the headers of an http request */ AWS_AUTH_API extern const struct aws_string *g_aws_http_headers_property_list_name; /** * Name of the property list that wraps the query params of an http request. Only used by signing_result. * For input to a http signing algorithm, query params are assumed to be part of the uri. */ AWS_AUTH_API extern const struct aws_string *g_aws_http_query_params_property_list_name; /** * Name of the property that holds the method of an http request */ AWS_AUTH_API extern const struct aws_string *g_aws_http_method_property_name; /** * Name of the property that holds the URI of an http request */ AWS_AUTH_API extern const struct aws_string *g_aws_http_uri_property_name; /** * Name of the property that holds the signature value. This is always added to signing results. * Depending on the requested signature type, the signature may be padded or encoded differently: * (1) Header - hex encoding of the binary signature value * (2) QueryParam - hex encoding of the binary signature value * (3) Chunk/Sigv4 - hex encoding of the binary signature value * (4) Chunk/Sigv4a - fixed-size-rhs-padded (with AWS_SIGV4A_SIGNATURE_PADDING_BYTE) hex encoding of the * binary signature value * (5) Event - binary signature value (NYI) */ AWS_AUTH_API extern const struct aws_string *g_aws_signature_property_name; /** * Name of the property that holds the (hex-encoded) signature value of the signing event that preceded this one. * This property must appear on signables that represent chunks or events. */ AWS_AUTH_API extern const struct aws_string *g_aws_previous_signature_property_name; /** * Name of the property that holds the canonical request associated with this signable. * This property must appear on signables that represent an http request's canonical request. */ AWS_AUTH_API extern const struct aws_string *g_aws_canonical_request_property_name; /* * Common signable constructors */ /** * Creates a signable wrapper around an http request. * * @param allocator memory allocator to use to create the signable * @param request http request to create a signable for * * @return the new signable object, or NULL if failure */ AWS_AUTH_API struct aws_signable *aws_signable_new_http_request(struct aws_allocator *allocator, struct aws_http_message *request); /** * Creates a signable that represents a unit of chunked encoding within an http request. * This can also be used for Transcribe event signing with encoded payload as chunk_data. * * @param allocator memory allocator use to create the signable * @param chunk_data stream representing the data in the chunk; it should be in its final, encoded form * @param previous_signature the signature computed in the most recent signing that preceded this one. It can be * found by copying the "signature" property from the signing_result of that most recent signing. * * @return the new signable object, or NULL if failure */ AWS_AUTH_API struct aws_signable *aws_signable_new_chunk( struct aws_allocator *allocator, struct aws_input_stream *chunk_data, struct aws_byte_cursor previous_signature); /** * Creates a signable wrapper around a set of headers. * * @param allocator memory allocator use to create the signable * @param trailing_headers http headers to create a signable for * @param previous_signature the signature computed in the most recent signing that preceded this one. It can be * found by copying the "signature" property from the signing_result of that most recent signing. * * @return the new signable object, or NULL if failure */ AWS_AUTH_API struct aws_signable *aws_signable_new_trailing_headers( struct aws_allocator *allocator, struct aws_http_headers *trailing_headers, struct aws_byte_cursor previous_signature); /** * Creates a signable that represents a pre-computed canonical request from an http request * @param allocator memory allocator use to create the signable * @param canonical_request text of the canonical request * @return the new signable object, or NULL if failure */ AWS_AUTH_API struct aws_signable *aws_signable_new_canonical_request( struct aws_allocator *allocator, struct aws_byte_cursor canonical_request); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_AUTH_SIGNABLE_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/include/aws/auth/signing.h000066400000000000000000000124071456575232400251620ustar00rootroot00000000000000#ifndef AWS_AUTH_SIGNER_H #define AWS_AUTH_SIGNER_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include AWS_PUSH_SANE_WARNING_LEVEL struct aws_ecc_key_pair; struct aws_signable; struct aws_signing_result; /** * Gets called by the signing function when the signing is complete. * * Note that result will be destroyed after this function returns, so either copy it, * or do all necessary adjustments inside the callback. * * When performing event or chunk signing, you will need to copy out the signature value in order * to correctly configure the signable that wraps the event or chunk you want signed next. The signature is * found in the "signature" property on the signing result. This value must be added as the * "previous-signature" property on the next signable. */ typedef void(aws_signing_complete_fn)(struct aws_signing_result *result, int error_code, void *userdata); AWS_EXTERN_C_BEGIN /* * Takes a signable object and a configuration struct and computes the changes to the signable necessary * for compliance with the signer's signing algorithm. * * This signing function currently supports only the sigv4 algorithm. * * When using this signing function to sign AWS http requests: * * (1) Do not add the following headers to requests before signing: * x-amz-content-sha256, * X-Amz-Date, * Authorization * * (2) Do not add the following query params to requests before signing: * X-Amz-Signature, * X-Amz-Date, * X-Amz-Credential, * X-Amz-Algorithm, * X-Amz-SignedHeaders * * The signing result will tell exactly what header and/or query params to add to the request * to become a fully-signed AWS http request. * * * When using this signing function to sign chunks: * * (1) Use aws_signable_new_chunk() to create the signable object representing the chunk * * The signing result will include the chunk's signature as the "signature" property. * * */ /** * (Asynchronous) entry point to sign something (a request, a chunk, an event) with an AWS signing process. * Depending on the configuration, the signing process may or may not complete synchronously. * * @param allocator memory allocator to use throughout the signing process * @param signable the thing to be signed. See signable.h for common constructors for signables that * wrap different types. * @param base_config pointer to a signing configuration, currently this must be of type aws_signing_config_aws * @param on_complete completion callback to be invoked when signing has finished * @param user_data opaque user data that will be passed to the completion callback * * @return AWS_OP_SUCCESS if the signing attempt was *initiated* successfully, AWS_OP_ERR otherwise */ AWS_AUTH_API int aws_sign_request_aws( struct aws_allocator *allocator, const struct aws_signable *signable, const struct aws_signing_config_base *base_config, aws_signing_complete_fn *on_complete, void *userdata); /** * Test-only API used for cross-library signing verification tests * * Verifies: * (1) The canonical request generated during sigv4a signing of the request matches what is passed in * (2) The signature passed in is a valid ECDSA signature of the hashed string-to-sign derived from the * canonical request * * @param allocator memory allocator to use throughout the signing verification process * @param signable the thing to be signed. See signable.h for common constructors for signables that * wrap different types. * @param base_config pointer to a signing configuration, currently this must be of type aws_signing_config_aws * @param expected_canonical_request_cursor expected result when building the canonical request * @param signature_cursor the actual signature computed from a previous signing of the signable * @param ecc_key_pub_x the x coordinate of the public part of the ecc key to verify the signature * @param ecc_key_pub_y the y coordinate of the public part of the ecc key to verify the signature * * @return AWS_OP_SUCCESS if the signing attempt was *initiated* successfully, AWS_OP_ERR otherwise */ AWS_AUTH_API int aws_verify_sigv4a_signing( struct aws_allocator *allocator, const struct aws_signable *signable, const struct aws_signing_config_base *base_config, struct aws_byte_cursor expected_canonical_request_cursor, struct aws_byte_cursor signature_cursor, struct aws_byte_cursor ecc_key_pub_x, struct aws_byte_cursor ecc_key_pub_y); /** * Another helper function to check a computed sigv4a signature. */ AWS_AUTH_API int aws_validate_v4a_authorization_value( struct aws_allocator *allocator, struct aws_ecc_key_pair *ecc_key, struct aws_byte_cursor string_to_sign_cursor, struct aws_byte_cursor signature_value_cursor); /** * Removes any padding added to the end of a sigv4a signature. Signature must be hex-encoded. * @param signature signature to remove padding from * @return cursor that ranges over only the valid hex encoding of the sigv4a signature */ AWS_AUTH_API struct aws_byte_cursor aws_trim_padded_sigv4a_signature(struct aws_byte_cursor signature); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_AUTH_SIGNER_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/include/aws/auth/signing_config.h000066400000000000000000000251561456575232400265140ustar00rootroot00000000000000#ifndef AWS_AUTH_SIGNING_CONFIG_H #define AWS_AUTH_SIGNING_CONFIG_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include AWS_PUSH_SANE_WARNING_LEVEL struct aws_credentials; typedef bool(aws_should_sign_header_fn)(const struct aws_byte_cursor *name, void *userdata); /** * A primitive RTTI indicator for signing configuration structs * * There must be one entry per config structure type and it's a fatal error * to put the wrong value in the "config_type" member of your config structure. */ enum aws_signing_config_type { AWS_SIGNING_CONFIG_AWS = 1 }; /** * All signing configuration structs must match this by having * the config_type member as the first member. */ struct aws_signing_config_base { enum aws_signing_config_type config_type; }; /** * What version of the AWS signing process should we use. */ enum aws_signing_algorithm { AWS_SIGNING_ALGORITHM_V4, AWS_SIGNING_ALGORITHM_V4_ASYMMETRIC, AWS_SIGNING_ALGORITHM_V4_S3EXPRESS, }; /** * What sort of signature should be computed from the signable? */ enum aws_signature_type { /** * A signature for a full http request should be computed, with header updates applied to the signing result. */ AWS_ST_HTTP_REQUEST_HEADERS, /** * A signature for a full http request should be computed, with query param updates applied to the signing result. */ AWS_ST_HTTP_REQUEST_QUERY_PARAMS, /** * Compute a signature for a payload chunk. The signable's input stream should be the chunk data and the * signable should contain the most recent signature value (either the original http request or the most recent * chunk) in the "previous-signature" property. */ AWS_ST_HTTP_REQUEST_CHUNK, /** * Compute a signature for an event stream event. The signable's input stream should be the encoded event-stream * message (headers + payload), the signable should contain the most recent signature value (either the original * http request or the most recent event) in the "previous-signature" property. * * This option is only supported for Sigv4 for now. */ AWS_ST_HTTP_REQUEST_EVENT, /** * Compute a signature for an http request via it's already-computed canonical request. Only the authorization * signature header is added to the signing result. */ AWS_ST_CANONICAL_REQUEST_HEADERS, /** * Compute a signature for an http request via it's already-computed canonical request. Only the authorization * signature query param is added to the signing result. */ AWS_ST_CANONICAL_REQUEST_QUERY_PARAMS, /** * Compute a signature for the trailing headers. * the signable should contain the most recent signature value (either the original http request or the most recent * chunk) in the "previous-signature" property. */ AWS_ST_HTTP_REQUEST_TRAILING_HEADERS }; /** * The SHA-256 of an empty string: * 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' * For use with `aws_signing_config_aws.signed_body_value`. */ AWS_AUTH_API extern const struct aws_byte_cursor g_aws_signed_body_value_empty_sha256; /** * 'UNSIGNED-PAYLOAD' * For use with `aws_signing_config_aws.signed_body_value`. */ AWS_AUTH_API extern const struct aws_byte_cursor g_aws_signed_body_value_unsigned_payload; /** * 'STREAMING-UNSIGNED-PAYLOAD-TRAILER' * For use with `aws_signing_config_aws.signed_body_value`. */ AWS_AUTH_API extern const struct aws_byte_cursor g_aws_signed_body_value_streaming_unsigned_payload_trailer; /** * 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD' * For use with `aws_signing_config_aws.signed_body_value`. */ AWS_AUTH_API extern const struct aws_byte_cursor g_aws_signed_body_value_streaming_aws4_hmac_sha256_payload; /** * 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER' * For use with `aws_signing_config_aws.signed_body_value`. */ AWS_AUTH_API extern const struct aws_byte_cursor g_aws_signed_body_value_streaming_aws4_hmac_sha256_payload_trailer; /** * 'STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD' * For use with `aws_signing_config_aws.signed_body_value`. */ AWS_AUTH_API extern const struct aws_byte_cursor g_aws_signed_body_value_streaming_aws4_ecdsa_p256_sha256_payload; /** * 'STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD-TRAILER' * For use with `aws_signing_config_aws.signed_body_value`. */ AWS_AUTH_API extern const struct aws_byte_cursor g_aws_signed_body_value_streaming_aws4_ecdsa_p256_sha256_payload_trailer; /** * 'STREAMING-AWS4-HMAC-SHA256-EVENTS' * For use with `aws_signing_config_aws.signed_body_value`. * * Event signing is only supported for Sigv4 for now. */ AWS_AUTH_API extern const struct aws_byte_cursor g_aws_signed_body_value_streaming_aws4_hmac_sha256_events; /** * Controls if signing adds a header containing the canonical request's body value */ enum aws_signed_body_header_type { /** * Do not add a header */ AWS_SBHT_NONE, /** * Add the "x-amz-content-sha256" header with the canonical request's body value */ AWS_SBHT_X_AMZ_CONTENT_SHA256, }; /** * A configuration structure for use in AWS-related signing. Currently covers sigv4 only, but is not required to. */ struct aws_signing_config_aws { /** * What kind of config structure is this? */ enum aws_signing_config_type config_type; /** * What signing algorithm to use. */ enum aws_signing_algorithm algorithm; /** * What sort of signature should be computed? */ enum aws_signature_type signature_type; /* * Region-related configuration * (1) If Sigv4, the region to sign against * (2) If Sigv4a, the value of the X-amzn-region-set header (added in signing) */ struct aws_byte_cursor region; /** * name of service to sign a request for */ struct aws_byte_cursor service; /** * Raw date to use during the signing process. */ struct aws_date_time date; /** * Optional function to control which headers are a part of the canonical request. * Skipping auth-required headers will result in an unusable signature. Headers injected by the signing process * are not skippable. * * This function does not override the internal check function (x-amzn-trace-id, user-agent), but rather * supplements it. In particular, a header will get signed if and only if it returns true to both * the internal check (skips x-amzn-trace-id, user-agent) and this function (if defined). */ aws_should_sign_header_fn *should_sign_header; void *should_sign_header_ud; /* * Put all flags in here at the end. If this grows, stay aware of bit-space overflow and ABI compatibilty. */ struct { /** * We assume the uri will be encoded once in preparation for transmission. Certain services * do not decode before checking signature, requiring us to actually double-encode the uri in the canonical * request in order to pass a signature check. */ uint32_t use_double_uri_encode : 1; /** * Controls whether or not the uri paths should be normalized when building the canonical request */ uint32_t should_normalize_uri_path : 1; /** * Controls whether "X-Amz-Security-Token" is omitted from the canonical request. * "X-Amz-Security-Token" is added during signing, as a header or * query param, when credentials have a session token. * If false (the default), this parameter is included in the canonical request. * If true, this parameter is still added, but omitted from the canonical request. */ uint32_t omit_session_token : 1; } flags; /** * Optional string to use as the canonical request's body value. * If string is empty, a value will be calculated from the payload during signing. * Typically, this is the SHA-256 of the (request/chunk/event) payload, written as lowercase hex. * If this has been precalculated, it can be set here. Special values used by certain services can also be set * (e.g. "UNSIGNED-PAYLOAD" "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" "STREAMING-AWS4-HMAC-SHA256-EVENTS"). */ struct aws_byte_cursor signed_body_value; /** * Controls what body "hash" header, if any, should be added to the canonical request and the signed request: * AWS_SBHT_NONE - no header should be added * AWS_SBHT_X_AMZ_CONTENT_SHA256 - the body "hash" should be added in the X-Amz-Content-Sha256 header */ enum aws_signed_body_header_type signed_body_header; /* * Signing key control: * * If "credentials" is valid: * use it * Else if "credentials_provider" is valid * query credentials from the provider * If sigv4a is being used * use the ecc-based credentials derived from the query result * Else * use the query result * Else * fail * */ /* * AWS Credentials to sign with. If Sigv4a is the algorithm and the credentials supplied are not ecc-based, * a temporary ecc-based credentials object will be built and used instead. */ const struct aws_credentials *credentials; /* * AWS credentials provider to fetch credentials from. If the signing algorithm is asymmetric sigv4, then the * ecc-based credentials will be derived from the fetched credentials. */ struct aws_credentials_provider *credentials_provider; /** * If non-zero and the signing transform is query param, then signing will add X-Amz-Expires to the query * string, equal to the value specified here. If this value is zero or if header signing is being used then * this parameter has no effect. */ uint64_t expiration_in_seconds; }; AWS_EXTERN_C_BEGIN /** * Returns a c-string that describes the supplied signing algorithm * * @param algorithm signing algorithm to get a friendly string name for * * @return friendly string name of the supplied algorithm, or "Unknown" if the algorithm is not recognized */ AWS_AUTH_API const char *aws_signing_algorithm_to_string(enum aws_signing_algorithm algorithm); /** * Checks a signing configuration for invalid settings combinations. * * @param config signing configuration to validate * * @return - AWS_OP_SUCCESS if the configuration is valid, AWS_OP_ERR otherwise */ AWS_AUTH_API int aws_validate_aws_signing_config_aws(const struct aws_signing_config_aws *config); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_AUTH_SIGNING_CONFIG_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/include/aws/auth/signing_result.h000066400000000000000000000133061456575232400265570ustar00rootroot00000000000000#ifndef AWS_AUTH_SIGNING_RESULT_H #define AWS_AUTH_SIGNING_RESULT_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include AWS_PUSH_SANE_WARNING_LEVEL struct aws_array_list; struct aws_byte_cursor; struct aws_http_message; struct aws_string; struct aws_signing_result_property { struct aws_string *name; struct aws_string *value; }; /** * A structure for tracking all the signer-requested changes to a signable. Interpreting * these changes is signing-algorithm specific. * * A signing result consists of * * (1) Properties - A set of key-value pairs * (2) Property Lists - A set of named key-value pair lists * * The hope is that these two generic structures are enough to model the changes required * by any generic message-signing algorithm. * * Note that the key-value pairs of a signing_result are different types (but same intent) as * the key-value pairs in the signable interface. This is because the signing result stands alone * and owns its own copies of all values, whereas a signable can wrap an existing object and thus * use non-owning references (like byte cursors) if appropriate to its implementation. */ struct aws_signing_result { struct aws_allocator *allocator; struct aws_hash_table properties; struct aws_hash_table property_lists; }; AWS_EXTERN_C_BEGIN /** * Initialize a signing result to its starting state * * @param result signing result to initialize * @param allocator allocator to use for all memory allocation * * @return AWS_OP_SUCCESS if initialization was successful, AWS_OP_ERR otherwise */ AWS_AUTH_API int aws_signing_result_init(struct aws_signing_result *result, struct aws_allocator *allocator); /** * Clean up all resources held by the signing result * * @param result signing result to clean up resources for */ AWS_AUTH_API void aws_signing_result_clean_up(struct aws_signing_result *result); /** * Sets the value of a property on a signing result * * @param result signing result to modify * @param property_name name of the property to set * @param property_value value that the property should assume * * @return AWS_OP_SUCCESS if the set was successful, AWS_OP_ERR otherwise */ AWS_AUTH_API int aws_signing_result_set_property( struct aws_signing_result *result, const struct aws_string *property_name, const struct aws_byte_cursor *property_value); /** * Gets the value of a property on a signing result * * @param result signing result to query from * @param property_name name of the property to query the value of * @param out_property_value output parameter for the property value * * @return AWS_OP_SUCCESS if the get was successful, AWS_OP_ERR otherwise */ AWS_AUTH_API int aws_signing_result_get_property( const struct aws_signing_result *result, const struct aws_string *property_name, struct aws_string **out_property_value); /** * Adds a key-value pair to a named property list. If the named list does not yet exist, it will be created as * an empty list before the pair is added. No uniqueness checks are made against existing pairs. * * @param result signing result to modify * @param list_name name of the list to add the property key-value pair to * @param property_name key value of the key-value pair to append * @param property_value property value of the key-value pair to append * * @return AWS_OP_SUCCESS if the operation was successful, AWS_OP_ERR otherwise */ AWS_AUTH_API int aws_signing_result_append_property_list( struct aws_signing_result *result, const struct aws_string *list_name, const struct aws_byte_cursor *property_name, const struct aws_byte_cursor *property_value); /** * Gets a named property list on the signing result. If the list does not exist, *out_list will be set to null * * @param result signing result to query * @param list_name name of the list of key-value pairs to get * @param out_list output parameter for the list of key-value pairs * */ AWS_AUTH_API void aws_signing_result_get_property_list( const struct aws_signing_result *result, const struct aws_string *list_name, struct aws_array_list **out_list); /** * Looks for a property within a named property list on the signing result. If the list does not exist, or the property * does not exist within the list, *out_value will be set to NULL. * * @param result signing result to query * @param list_name name of the list of key-value pairs to search through for the property * @param property_name name of the property to search for within the list * @param out_value output parameter for the property value, if found * */ AWS_AUTH_API void aws_signing_result_get_property_value_in_property_list( const struct aws_signing_result *result, const struct aws_string *list_name, const struct aws_string *property_name, struct aws_string **out_value); /* * Specific implementation that applies a signing result to a mutable http request * * @param request http request to apply the signing result to * @param allocator memory allocator to use for all memory allocation * @param result signing result to apply to the request * * @return AWS_OP_SUCCESS if the application operation was successful, AWS_OP_ERR otherwise */ AWS_AUTH_API int aws_apply_signing_result_to_http_request( struct aws_http_message *request, struct aws_allocator *allocator, const struct aws_signing_result *result); AWS_AUTH_API extern const struct aws_string *g_aws_signing_authorization_header_name; AWS_AUTH_API extern const struct aws_string *g_aws_signing_authorization_query_param_name; AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_AUTH_SIGNING_RESULT_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/source/000077500000000000000000000000001456575232400214715ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/source/auth.c000066400000000000000000000166041456575232400226050ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #define AWS_DEFINE_ERROR_INFO_AUTH(CODE, STR) AWS_DEFINE_ERROR_INFO(CODE, STR, "aws-c-auth") /* clang-format off */ static struct aws_error_info s_errors[] = { AWS_DEFINE_ERROR_INFO_AUTH( AWS_AUTH_SIGNING_UNSUPPORTED_ALGORITHM, "Attempt to sign an http request with an unsupported version of the AWS signing protocol"), AWS_DEFINE_ERROR_INFO_AUTH( AWS_AUTH_SIGNING_MISMATCHED_CONFIGURATION, "Attempt to sign an http request with a signing configuration unrecognized by the invoked signer"), AWS_DEFINE_ERROR_INFO_AUTH( AWS_AUTH_SIGNING_NO_CREDENTIALS, "Attempt to sign an http request without credentials"), AWS_DEFINE_ERROR_INFO_AUTH( AWS_AUTH_SIGNING_ILLEGAL_REQUEST_QUERY_PARAM, "Attempt to sign an http request that includes a query param that signing may add"), AWS_DEFINE_ERROR_INFO_AUTH( AWS_AUTH_SIGNING_ILLEGAL_REQUEST_HEADER, "Attempt to sign an http request that includes a header that signing may add"), AWS_DEFINE_ERROR_INFO_AUTH( AWS_AUTH_SIGNING_INVALID_CONFIGURATION, "Attempt to sign an http request with an invalid signing configuration"), AWS_DEFINE_ERROR_INFO_AUTH( AWS_AUTH_CREDENTIALS_PROVIDER_INVALID_ENVIRONMENT, "Valid credentials could not be sourced from process environment"), AWS_DEFINE_ERROR_INFO_AUTH( AWS_AUTH_CREDENTIALS_PROVIDER_INVALID_DELEGATE, "Valid credentials could not be sourced from the provided vtable"), AWS_DEFINE_ERROR_INFO_AUTH( AWS_AUTH_CREDENTIALS_PROVIDER_PROFILE_SOURCE_FAILURE, "Valid credentials could not be sourced by a profile provider"), AWS_DEFINE_ERROR_INFO_AUTH( AWS_AUTH_CREDENTIALS_PROVIDER_IMDS_SOURCE_FAILURE, "Valid credentials could not be sourced by the IMDS provider"), AWS_DEFINE_ERROR_INFO_AUTH( AWS_AUTH_CREDENTIALS_PROVIDER_STS_SOURCE_FAILURE, "Valid credentials could not be sourced by the STS provider"), AWS_DEFINE_ERROR_INFO_AUTH( AWS_AUTH_CREDENTIALS_PROVIDER_HTTP_STATUS_FAILURE, "Unsuccessful status code returned from credentials-fetching http request"), AWS_DEFINE_ERROR_INFO_AUTH( AWS_AUTH_PROVIDER_PARSER_UNEXPECTED_RESPONSE, "Invalid response document encountered while querying credentials via http"), AWS_DEFINE_ERROR_INFO_AUTH( AWS_AUTH_CREDENTIALS_PROVIDER_ECS_SOURCE_FAILURE, "Valid credentials could not be sourced by the ECS provider"), AWS_DEFINE_ERROR_INFO_AUTH( AWS_AUTH_CREDENTIALS_PROVIDER_X509_SOURCE_FAILURE, "Valid credentials could not be sourced by the X509 provider"), AWS_DEFINE_ERROR_INFO_AUTH( AWS_AUTH_CREDENTIALS_PROVIDER_PROCESS_SOURCE_FAILURE, "Valid credentials could not be sourced by the process provider"), AWS_DEFINE_ERROR_INFO_AUTH( AWS_AUTH_CREDENTIALS_PROVIDER_STS_WEB_IDENTITY_SOURCE_FAILURE, "Valid credentials could not be sourced by the sts web identity provider"), AWS_DEFINE_ERROR_INFO_AUTH( AWS_AUTH_SIGNING_UNSUPPORTED_SIGNATURE_TYPE, "Attempt to sign using an unusupported signature type"), AWS_DEFINE_ERROR_INFO_AUTH( AWS_AUTH_SIGNING_MISSING_PREVIOUS_SIGNATURE, "Attempt to sign a streaming item without supplying a previous signature"), AWS_DEFINE_ERROR_INFO_AUTH( AWS_AUTH_SIGNING_INVALID_CREDENTIALS, "Attempt to perform a signing operation with invalid credentials"), AWS_DEFINE_ERROR_INFO_AUTH( AWS_AUTH_CANONICAL_REQUEST_MISMATCH, "Expected canonical request did not match the computed canonical request"), AWS_DEFINE_ERROR_INFO_AUTH( AWS_AUTH_SIGV4A_SIGNATURE_VALIDATION_FAILURE, "The supplied sigv4a signature was not a valid signature for the hashed string to sign"), AWS_DEFINE_ERROR_INFO_AUTH( AWS_AUTH_CREDENTIALS_PROVIDER_COGNITO_SOURCE_FAILURE, "Valid credentials could not be sourced by the cognito provider"), AWS_DEFINE_ERROR_INFO_AUTH( AWS_AUTH_CREDENTIALS_PROVIDER_DELEGATE_FAILURE, "Valid credentials could not be sourced by the delegate provider"), AWS_DEFINE_ERROR_INFO_AUTH( AWS_AUTH_SSO_TOKEN_PROVIDER_SOURCE_FAILURE, "Valid token could not be sourced by the sso token provider"), AWS_DEFINE_ERROR_INFO_AUTH( AWS_AUTH_SSO_TOKEN_INVALID, "Token sourced by the sso token provider is invalid."), AWS_DEFINE_ERROR_INFO_AUTH( AWS_AUTH_SSO_TOKEN_EXPIRED, "Token sourced by the sso token provider is expired."), AWS_DEFINE_ERROR_INFO_AUTH( AWS_AUTH_CREDENTIALS_PROVIDER_SSO_SOURCE_FAILURE, "Valid credentials could not be sourced by the sso credentials provider"), AWS_DEFINE_ERROR_INFO_AUTH( AWS_AUTH_IMDS_CLIENT_SOURCE_FAILURE, "Failed to source the IMDS resource"), AWS_DEFINE_ERROR_INFO_AUTH( AWS_AUTH_PROFILE_STS_CREDENTIALS_PROVIDER_CYCLE_FAILURE, "Failed to resolve credentials because the profile contains a cycle in the assumeRole chain.") }; /* clang-format on */ static struct aws_error_info_list s_error_list = { .error_list = s_errors, .count = sizeof(s_errors) / sizeof(struct aws_error_info), }; static struct aws_log_subject_info s_auth_log_subject_infos[] = { DEFINE_LOG_SUBJECT_INFO( AWS_LS_AUTH_GENERAL, "AuthGeneral", "Subject for aws-c-auth logging that defies categorization."), DEFINE_LOG_SUBJECT_INFO(AWS_LS_AUTH_PROFILE, "AuthProfile", "Subject for config profile related logging."), DEFINE_LOG_SUBJECT_INFO( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "AuthCredentialsProvider", "Subject for credentials provider related logging."), DEFINE_LOG_SUBJECT_INFO(AWS_LS_AUTH_SIGNING, "AuthSigning", "Subject for AWS request signing logging."), }; static struct aws_log_subject_info_list s_auth_log_subject_list = { .subject_list = s_auth_log_subject_infos, .count = AWS_ARRAY_SIZE(s_auth_log_subject_infos), }; static bool s_library_initialized = false; static struct aws_allocator *s_library_allocator = NULL; void aws_auth_library_init(struct aws_allocator *allocator) { if (s_library_initialized) { return; } if (allocator) { s_library_allocator = allocator; } else { s_library_allocator = aws_default_allocator(); } aws_sdkutils_library_init(s_library_allocator); aws_cal_library_init(s_library_allocator); aws_http_library_init(s_library_allocator); aws_register_error_info(&s_error_list); aws_register_log_subject_info_list(&s_auth_log_subject_list); AWS_FATAL_ASSERT(aws_signing_init_signing_tables(allocator) == AWS_OP_SUCCESS); s_library_initialized = true; } void aws_auth_library_clean_up(void) { if (!s_library_initialized) { return; } s_library_initialized = false; aws_signing_clean_up_signing_tables(); aws_unregister_log_subject_info_list(&s_auth_log_subject_list); aws_unregister_error_info(&s_error_list); aws_http_library_clean_up(); aws_cal_library_clean_up(); aws_sdkutils_library_clean_up(); s_library_allocator = NULL; } aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/source/aws_imds_client.c000066400000000000000000002145471456575232400250160ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(_MSC_VER) # pragma warning(disable : 4204) # pragma warning(disable : 4232) #endif /* _MSC_VER */ /* instance role credentials body response is currently ~ 1300 characters + name length */ #define IMDS_RESPONSE_SIZE_INITIAL 2048 #define IMDS_RESPONSE_TOKEN_SIZE_INITIAL 64 #define IMDS_RESPONSE_SIZE_LIMIT 65535 #define IMDS_CONNECT_TIMEOUT_DEFAULT_IN_SECONDS 2 #define IMDS_DEFAULT_RETRIES 1 AWS_STATIC_STRING_FROM_LITERAL(s_imds_host, "169.254.169.254"); enum imds_token_state { AWS_IMDS_TS_INVALID, AWS_IMDS_TS_VALID, AWS_IMDS_TS_UPDATE_IN_PROGRESS, }; enum imds_token_copy_result { /* Token is valid and copied to requester */ AWS_IMDS_TCR_SUCCESS, /* Token is updating, so requester is added in waiting queue */ AWS_IMDS_TCR_WAITING_IN_QUEUE, /* unexpected error,like mem allocation error */ AWS_IMDS_TCR_UNEXPECTED_ERROR, }; struct imds_token_query { struct aws_linked_list_node node; void *user_data; }; struct aws_imds_client { struct aws_allocator *allocator; struct aws_http_connection_manager *connection_manager; struct aws_retry_strategy *retry_strategy; const struct aws_auth_http_system_vtable *function_table; struct aws_imds_client_shutdown_options shutdown_options; /* will be set to true by default, means using IMDS V2 */ bool token_required; struct aws_byte_buf cached_token; uint64_t cached_token_expiration_timestamp; enum imds_token_state token_state; struct aws_linked_list pending_queries; struct aws_mutex token_lock; struct aws_condition_variable token_signal; bool ec2_metadata_v1_disabled; struct aws_atomic_var ref_count; }; static void s_aws_imds_client_destroy(struct aws_imds_client *client) { if (!client) { return; } /** * s_aws_imds_client_destroy is only called after all in-flight requests are finished, * thus nothing is going to try and access retry_strategy again at this point. */ aws_retry_strategy_release(client->retry_strategy); aws_condition_variable_clean_up(&client->token_signal); aws_mutex_clean_up(&client->token_lock); aws_byte_buf_clean_up(&client->cached_token); client->function_table->aws_http_connection_manager_release(client->connection_manager); /* freeing the client takes place in the shutdown callback below */ } static void s_on_connection_manager_shutdown(void *user_data) { struct aws_imds_client *client = user_data; if (client && client->shutdown_options.shutdown_callback) { client->shutdown_options.shutdown_callback(client->shutdown_options.shutdown_user_data); } aws_mem_release(client->allocator, client); } void aws_imds_client_release(struct aws_imds_client *client) { if (!client) { return; } size_t old_value = aws_atomic_fetch_sub(&client->ref_count, 1); if (old_value == 1) { s_aws_imds_client_destroy(client); } } void aws_imds_client_acquire(struct aws_imds_client *client) { aws_atomic_fetch_add(&client->ref_count, 1); } struct aws_imds_client *aws_imds_client_new( struct aws_allocator *allocator, const struct aws_imds_client_options *options) { if (!options->bootstrap) { AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Client bootstrap is required for querying IMDS"); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } struct aws_imds_client *client = aws_mem_calloc(allocator, 1, sizeof(struct aws_imds_client)); if (!client) { return NULL; } if (aws_mutex_init(&client->token_lock)) { goto on_error; } if (aws_condition_variable_init(&client->token_signal)) { goto on_error; } if (aws_byte_buf_init(&client->cached_token, allocator, IMDS_RESPONSE_TOKEN_SIZE_INITIAL)) { goto on_error; } aws_linked_list_init(&client->pending_queries); aws_atomic_store_int(&client->ref_count, 1); client->allocator = allocator; client->function_table = options->function_table ? options->function_table : g_aws_credentials_provider_http_function_table; client->token_required = options->imds_version == IMDS_PROTOCOL_V1 ? false : true; client->ec2_metadata_v1_disabled = options->ec2_metadata_v1_disabled; client->shutdown_options = options->shutdown_options; struct aws_socket_options socket_options; AWS_ZERO_STRUCT(socket_options); socket_options.type = AWS_SOCKET_STREAM; socket_options.domain = AWS_SOCKET_IPV4; socket_options.connect_timeout_ms = (uint32_t)aws_timestamp_convert( IMDS_CONNECT_TIMEOUT_DEFAULT_IN_SECONDS, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_MILLIS, NULL); struct aws_http_connection_manager_options manager_options; AWS_ZERO_STRUCT(manager_options); manager_options.bootstrap = options->bootstrap; manager_options.initial_window_size = IMDS_RESPONSE_SIZE_LIMIT; manager_options.socket_options = &socket_options; manager_options.tls_connection_options = NULL; manager_options.host = aws_byte_cursor_from_string(s_imds_host); manager_options.port = 80; manager_options.max_connections = 10; manager_options.shutdown_complete_callback = s_on_connection_manager_shutdown; manager_options.shutdown_complete_user_data = client; client->connection_manager = client->function_table->aws_http_connection_manager_new(allocator, &manager_options); if (!client->connection_manager) { goto on_error; } if (options->retry_strategy) { client->retry_strategy = options->retry_strategy; aws_retry_strategy_acquire(client->retry_strategy); } else { struct aws_exponential_backoff_retry_options retry_options = { .el_group = options->bootstrap->event_loop_group, .max_retries = IMDS_DEFAULT_RETRIES, }; /* exponential backoff is plenty here. We're hitting a local endpoint and do not run the risk of bringing * down more than the local VM. */ client->retry_strategy = aws_retry_strategy_new_exponential_backoff(allocator, &retry_options); } if (!client->retry_strategy) { goto on_error; } return client; on_error: s_aws_imds_client_destroy(client); return NULL; } /* * Tracking structure for each outstanding async query to an imds client */ struct imds_user_data { /* immutable post-creation */ struct aws_allocator *allocator; struct aws_imds_client *client; aws_imds_client_on_get_resource_callback_fn *original_callback; void *original_user_data; /* mutable */ struct aws_http_connection *connection; struct aws_http_message *request; struct aws_byte_buf current_result; struct aws_byte_buf imds_token; struct aws_string *resource_path; struct aws_retry_token *retry_token; /* * initial value is copy of client->token_required, * will be adapted according to response. */ bool imds_token_required; /* Indicate the request is a fallback from a failure call. */ bool is_fallback_request; bool is_imds_token_request; bool ec2_metadata_v1_disabled; int status_code; int error_code; struct aws_atomic_var ref_count; }; static void s_user_data_destroy(struct imds_user_data *user_data) { if (user_data == NULL) { return; } struct aws_imds_client *client = user_data->client; if (user_data->connection) { client->function_table->aws_http_connection_manager_release_connection( client->connection_manager, user_data->connection); } aws_byte_buf_clean_up(&user_data->current_result); aws_byte_buf_clean_up(&user_data->imds_token); aws_string_destroy(user_data->resource_path); if (user_data->request) { aws_http_message_destroy(user_data->request); } aws_retry_token_release(user_data->retry_token); aws_imds_client_release(client); aws_mem_release(user_data->allocator, user_data); } static struct imds_user_data *s_user_data_new( struct aws_imds_client *client, struct aws_byte_cursor resource_path, aws_imds_client_on_get_resource_callback_fn *callback, void *user_data) { struct imds_user_data *wrapped_user_data = aws_mem_calloc(client->allocator, 1, sizeof(struct imds_user_data)); if (!wrapped_user_data) { goto on_error; } wrapped_user_data->allocator = client->allocator; wrapped_user_data->client = client; aws_imds_client_acquire(client); wrapped_user_data->original_user_data = user_data; wrapped_user_data->original_callback = callback; if (aws_byte_buf_init(&wrapped_user_data->current_result, client->allocator, IMDS_RESPONSE_SIZE_INITIAL)) { goto on_error; } if (aws_byte_buf_init(&wrapped_user_data->imds_token, client->allocator, IMDS_RESPONSE_TOKEN_SIZE_INITIAL)) { goto on_error; } wrapped_user_data->resource_path = aws_string_new_from_array(client->allocator, resource_path.ptr, resource_path.len); if (!wrapped_user_data->resource_path) { goto on_error; } wrapped_user_data->imds_token_required = client->token_required; wrapped_user_data->ec2_metadata_v1_disabled = client->ec2_metadata_v1_disabled; aws_atomic_store_int(&wrapped_user_data->ref_count, 1); return wrapped_user_data; on_error: s_user_data_destroy(wrapped_user_data); return NULL; } static void s_user_data_acquire(struct imds_user_data *user_data) { if (user_data == NULL) { return; } aws_atomic_fetch_add(&user_data->ref_count, 1); } static void s_user_data_release(struct imds_user_data *user_data) { if (!user_data) { return; } size_t old_value = aws_atomic_fetch_sub(&user_data->ref_count, 1); if (old_value == 1) { s_user_data_destroy(user_data); } } static void s_reset_scratch_user_data(struct imds_user_data *user_data) { user_data->current_result.len = 0; user_data->status_code = 0; if (user_data->request) { aws_http_message_destroy(user_data->request); user_data->request = NULL; } } static enum imds_token_copy_result s_copy_token_safely(struct imds_user_data *user_data); static void s_update_token_safely( struct aws_imds_client *client, struct aws_byte_buf *token, bool token_required, uint64_t expire_timestamp); static void s_query_complete(struct imds_user_data *user_data); static void s_on_acquire_connection(struct aws_http_connection *connection, int error_code, void *user_data); static void s_on_retry_token_acquired(struct aws_retry_strategy *, int, struct aws_retry_token *, void *); static int s_on_incoming_body_fn(struct aws_http_stream *stream, const struct aws_byte_cursor *data, void *user_data) { (void)stream; (void)data; struct imds_user_data *imds_user_data = user_data; struct aws_imds_client *client = imds_user_data->client; if (data->len + imds_user_data->current_result.len > IMDS_RESPONSE_SIZE_LIMIT) { client->function_table->aws_http_connection_close(imds_user_data->connection); AWS_LOGF_ERROR( AWS_LS_IMDS_CLIENT, "(id=%p) IMDS client query response exceeded maximum allowed length", (void *)client); return aws_raise_error(AWS_AUTH_IMDS_CLIENT_SOURCE_FAILURE); } if (aws_byte_buf_append_dynamic(&imds_user_data->current_result, data)) { client->function_table->aws_http_connection_close(imds_user_data->connection); AWS_LOGF_ERROR(AWS_LS_IMDS_CLIENT, "(id=%p) IMDS client query error appending response", (void *)client); return AWS_OP_ERR; } return AWS_OP_SUCCESS; } static int s_on_incoming_headers_fn( struct aws_http_stream *stream, enum aws_http_header_block header_block, const struct aws_http_header *header_array, size_t num_headers, void *user_data) { (void)header_array; (void)num_headers; if (header_block != AWS_HTTP_HEADER_BLOCK_MAIN) { return AWS_OP_SUCCESS; } struct imds_user_data *imds_user_data = user_data; struct aws_imds_client *client = imds_user_data->client; if (header_block == AWS_HTTP_HEADER_BLOCK_MAIN) { if (imds_user_data->status_code == 0) { if (client->function_table->aws_http_stream_get_incoming_response_status( stream, &imds_user_data->status_code)) { AWS_LOGF_ERROR( AWS_LS_IMDS_CLIENT, "(id=%p) IMDS client failed to get http status code", (void *)client); return AWS_OP_ERR; } AWS_LOGF_DEBUG( AWS_LS_IMDS_CLIENT, "(id=%p) IMDS client query received http status code %d for requester %p.", (void *)client, imds_user_data->status_code, user_data); } } return AWS_OP_SUCCESS; } AWS_STATIC_STRING_FROM_LITERAL(s_imds_host_header, "Host"); AWS_STATIC_STRING_FROM_LITERAL(s_imds_accept_header, "Accept"); AWS_STATIC_STRING_FROM_LITERAL(s_imds_accept_header_value, "*/*"); AWS_STATIC_STRING_FROM_LITERAL(s_imds_user_agent_header, "User-Agent"); AWS_STATIC_STRING_FROM_LITERAL(s_imds_user_agent_header_value, "aws-sdk-crt/aws-imds-client"); AWS_STATIC_STRING_FROM_LITERAL(s_imds_h1_0_keep_alive_header, "Connection"); AWS_STATIC_STRING_FROM_LITERAL(s_imds_h1_0_keep_alive_header_value, "keep-alive"); AWS_STATIC_STRING_FROM_LITERAL(s_imds_token_resource_path, "/latest/api/token"); AWS_STATIC_STRING_FROM_LITERAL(s_imds_token_ttl_header, "x-aws-ec2-metadata-token-ttl-seconds"); AWS_STATIC_STRING_FROM_LITERAL(s_imds_token_header, "x-aws-ec2-metadata-token"); AWS_STATIC_STRING_FROM_LITERAL(s_imds_token_ttl_default_value, "21600"); /* s_imds_token_ttl_default_value - 5secs for refreshing the cached token */ static const uint64_t s_imds_token_ttl_secs = 21595; static void s_on_stream_complete_fn(struct aws_http_stream *stream, int error_code, void *user_data); static int s_make_imds_http_query( struct imds_user_data *user_data, const struct aws_byte_cursor *verb, const struct aws_byte_cursor *uri, const struct aws_http_header *headers, size_t header_count) { AWS_FATAL_ASSERT(user_data->connection); struct aws_imds_client *client = user_data->client; struct aws_http_stream *stream = NULL; struct aws_http_message *request = aws_http_message_new_request(user_data->allocator); if (request == NULL) { return AWS_OP_ERR; } if (headers && aws_http_message_add_header_array(request, headers, header_count)) { goto on_error; } struct aws_http_header host_header = { .name = aws_byte_cursor_from_string(s_imds_host_header), .value = aws_byte_cursor_from_string(s_imds_host), }; if (aws_http_message_add_header(request, host_header)) { goto on_error; } struct aws_http_header accept_header = { .name = aws_byte_cursor_from_string(s_imds_accept_header), .value = aws_byte_cursor_from_string(s_imds_accept_header_value), }; if (aws_http_message_add_header(request, accept_header)) { goto on_error; } struct aws_http_header user_agent_header = { .name = aws_byte_cursor_from_string(s_imds_user_agent_header), .value = aws_byte_cursor_from_string(s_imds_user_agent_header_value), }; if (aws_http_message_add_header(request, user_agent_header)) { goto on_error; } struct aws_http_header keep_alive_header = { .name = aws_byte_cursor_from_string(s_imds_h1_0_keep_alive_header), .value = aws_byte_cursor_from_string(s_imds_h1_0_keep_alive_header_value), }; if (aws_http_message_add_header(request, keep_alive_header)) { goto on_error; } if (aws_http_message_set_request_method(request, *verb)) { goto on_error; } if (aws_http_message_set_request_path(request, *uri)) { goto on_error; } user_data->request = request; struct aws_http_make_request_options request_options = { .self_size = sizeof(request_options), .on_response_headers = s_on_incoming_headers_fn, .on_response_header_block_done = NULL, .on_response_body = s_on_incoming_body_fn, .on_complete = s_on_stream_complete_fn, .response_first_byte_timeout_ms = 1000, .user_data = user_data, .request = request, }; /* for test with mocking http stack where make request finishes immediately and releases client before stream activate call */ s_user_data_acquire(user_data); stream = client->function_table->aws_http_connection_make_request(user_data->connection, &request_options); if (!stream || client->function_table->aws_http_stream_activate(stream)) { goto on_error; } s_user_data_release(user_data); return AWS_OP_SUCCESS; on_error: user_data->client->function_table->aws_http_stream_release(stream); aws_http_message_destroy(request); user_data->request = NULL; s_user_data_release(user_data); return AWS_OP_ERR; } /* * Process the http response from the token put request. */ static void s_client_on_token_response(struct imds_user_data *user_data) { /* Gets 400 means token is required but the request itself failed. */ if (user_data->status_code == AWS_HTTP_STATUS_CODE_400_BAD_REQUEST) { s_update_token_safely(user_data->client, NULL, true, 0 /*expire_timestamp*/); return; } if (user_data->status_code == AWS_HTTP_STATUS_CODE_200_OK && user_data->current_result.len != 0) { AWS_LOGF_DEBUG(AWS_LS_IMDS_CLIENT, "(id=%p) IMDS client has fetched the token", (void *)user_data->client); struct aws_byte_cursor cursor = aws_byte_cursor_from_buf(&(user_data->current_result)); aws_byte_cursor_trim_pred(&cursor, aws_char_is_space); aws_byte_buf_reset(&user_data->imds_token, true /*zero contents*/); if (aws_byte_buf_append_and_update(&user_data->imds_token, &cursor)) { s_update_token_safely(user_data->client, NULL /*token*/, true /*token_required*/, 0 /*expire_timestamp*/); return; } /* The token was ALWAYS last for 6 hours, 21600 secs. Use current timestamp plus 21595 secs as the expiration * timestamp for current token */ uint64_t current = 0; user_data->client->function_table->aws_high_res_clock_get_ticks(¤t); uint64_t expire_timestamp = aws_add_u64_saturating( current, aws_timestamp_convert(s_imds_token_ttl_secs, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL)); AWS_ASSERT(cursor.len != 0); s_update_token_safely(user_data->client, &user_data->imds_token, true /*token_required*/, expire_timestamp); } else if (user_data->ec2_metadata_v1_disabled) { AWS_LOGF_DEBUG( AWS_LS_IMDS_CLIENT, "(id=%p) IMDS client failed to fetch token for requester %p, and fall back to v1 is disabled." "Received response status code: %d", (void *)user_data->client, (void *)user_data, user_data->status_code); s_update_token_safely(user_data->client, NULL /*token*/, true /*token_required*/, 0 /*expire_timestamp*/); } else { /* Request failed; falling back to insecure request. * TODO: The retryable error (503 throttle) will also fall back to v1. Instead, we should just resend the token * request. */ AWS_LOGF_DEBUG( AWS_LS_IMDS_CLIENT, "(id=%p) IMDS client failed to fetch token for requester %p, fall back to v1 for the same " "requester. Received response status code: %d", (void *)user_data->client, (void *)user_data, user_data->status_code); s_update_token_safely(user_data->client, NULL /*token*/, false /* token_required*/, 0 /*expire_timestamp*/); } } static int s_client_start_query_token(struct aws_imds_client *client) { struct imds_user_data *user_data = s_user_data_new(client, aws_byte_cursor_from_c_str(""), NULL, (void *)client); if (!user_data) { AWS_LOGF_ERROR( AWS_LS_IMDS_CLIENT, "(id=%p) IMDS client failed to query token with error: %s.", (void *)client, aws_error_str(aws_last_error())); return AWS_OP_ERR; } user_data->is_imds_token_request = true; if (aws_retry_strategy_acquire_retry_token( client->retry_strategy, NULL, s_on_retry_token_acquired, user_data, 100)) { s_user_data_release(user_data); return AWS_OP_ERR; } return AWS_OP_SUCCESS; } /* Make an http request to put a ttl and hopefully get a token back. */ static void s_client_do_query_token(struct imds_user_data *user_data) { /* start query token for imds client */ struct aws_byte_cursor uri = aws_byte_cursor_from_string(s_imds_token_resource_path); /* Hard-coded 6 hour TTL for the token. */ struct aws_http_header token_ttl_header = { .name = aws_byte_cursor_from_string(s_imds_token_ttl_header), .value = aws_byte_cursor_from_string(s_imds_token_ttl_default_value), }; struct aws_http_header headers[] = { token_ttl_header, }; struct aws_byte_cursor verb = aws_byte_cursor_from_c_str("PUT"); if (s_make_imds_http_query(user_data, &verb, &uri, headers, AWS_ARRAY_SIZE(headers))) { user_data->error_code = aws_last_error(); if (user_data->error_code == AWS_ERROR_SUCCESS) { user_data->error_code = AWS_ERROR_UNKNOWN; } s_query_complete(user_data); } } /* * Make the http request to fetch the resource */ static void s_do_query_resource(struct imds_user_data *user_data) { struct aws_http_header token_header = { .name = aws_byte_cursor_from_string(s_imds_token_header), .value = aws_byte_cursor_from_buf(&user_data->imds_token), }; struct aws_http_header headers[] = { token_header, }; size_t headers_count = 0; struct aws_http_header *headers_array_ptr = NULL; if (user_data->imds_token_required) { headers_count = 1; headers_array_ptr = headers; } struct aws_byte_cursor verb = aws_byte_cursor_from_c_str("GET"); struct aws_byte_cursor path_cursor = aws_byte_cursor_from_string(user_data->resource_path); if (s_make_imds_http_query(user_data, &verb, &path_cursor, headers_array_ptr, headers_count)) { user_data->error_code = aws_last_error(); if (user_data->error_code == AWS_ERROR_SUCCESS) { user_data->error_code = AWS_ERROR_UNKNOWN; } s_query_complete(user_data); } } int s_get_resource_async_with_imds_token(struct imds_user_data *user_data); static void s_query_complete(struct imds_user_data *user_data) { if (user_data->is_imds_token_request) { s_client_on_token_response(user_data); s_user_data_release(user_data); return; } if (user_data->status_code == AWS_HTTP_STATUS_CODE_401_UNAUTHORIZED) { struct aws_imds_client *client = user_data->client; aws_mutex_lock(&client->token_lock); if (aws_byte_buf_eq(&user_data->imds_token, &client->cached_token)) { /* If the token used matches the cached token, that means the cached token is invalid. */ client->token_state = AWS_IMDS_TS_INVALID; AWS_LOGF_DEBUG( AWS_LS_IMDS_CLIENT, "(id=%p) IMDS client's cached token is invalidated by requester %p.", (void *)client, (void *)user_data); } /* let following requests use token as it's required. */ client->token_required = true; aws_mutex_unlock(&client->token_lock); if (!user_data->imds_token_required && !user_data->is_fallback_request) { AWS_LOGF_DEBUG( AWS_LS_IMDS_CLIENT, "(id=%p) IMDS client failed to fetch resource via V1, try to use V2. requester %p.", (void *)user_data->client, (void *)user_data); /* V1 request, fallback to V2 and try again. */ s_reset_scratch_user_data(user_data); user_data->is_fallback_request = true; aws_retry_token_release(user_data->retry_token); /* Try V2 now. */ if (s_get_resource_async_with_imds_token(user_data)) { s_user_data_release(user_data); } return; } else { /* Not retirable error. */ AWS_LOGF_ERROR( AWS_LS_IMDS_CLIENT, "(id=%p) IMDS client failed to fetch resource. Server response 401 UNAUTHORIZED. requester %p.", (void *)user_data->client, (void *)user_data); user_data->error_code = AWS_AUTH_IMDS_CLIENT_SOURCE_FAILURE; } } /* TODO: if server sent out error, we will still report as succeed with the error body received from server. */ /* TODO: retry for 503 throttle. */ user_data->original_callback( user_data->error_code ? NULL : &user_data->current_result, user_data->error_code, user_data->original_user_data); s_user_data_release(user_data); } static void s_on_acquire_connection(struct aws_http_connection *connection, int error_code, void *user_data) { struct imds_user_data *imds_user_data = user_data; imds_user_data->connection = connection; if (!connection) { AWS_LOGF_WARN( AWS_LS_IMDS_CLIENT, "id=%p: IMDS Client failed to acquire a connection, error code %d(%s)", (void *)imds_user_data->client, error_code, aws_error_str(error_code)); imds_user_data->error_code = error_code; s_query_complete(imds_user_data); return; } if (imds_user_data->is_imds_token_request) { s_client_do_query_token(imds_user_data); } else { s_do_query_resource(imds_user_data); } } static void s_on_retry_ready(struct aws_retry_token *token, int error_code, void *user_data) { (void)token; struct imds_user_data *imds_user_data = user_data; struct aws_imds_client *client = imds_user_data->client; if (!error_code) { client->function_table->aws_http_connection_manager_acquire_connection( client->connection_manager, s_on_acquire_connection, user_data); } else { AWS_LOGF_WARN( AWS_LS_IMDS_CLIENT, "id=%p: IMDS Client failed to retry the request with error code %d(%s)", (void *)client, error_code, aws_error_str(error_code)); imds_user_data->error_code = error_code; s_query_complete(imds_user_data); } } static void s_on_stream_complete_fn(struct aws_http_stream *stream, int error_code, void *user_data) { struct imds_user_data *imds_user_data = user_data; struct aws_imds_client *client = imds_user_data->client; aws_http_message_destroy(imds_user_data->request); imds_user_data->request = NULL; imds_user_data->connection = NULL; struct aws_http_connection *connection = client->function_table->aws_http_stream_get_connection(stream); client->function_table->aws_http_stream_release(stream); client->function_table->aws_http_connection_manager_release_connection(client->connection_manager, connection); /* on encountering error, see if we could try again */ /* TODO: check the status code as well? */ if (error_code) { AWS_LOGF_WARN( AWS_LS_IMDS_CLIENT, "id=%p: Stream completed with error code %d(%s)", (void *)client, error_code, aws_error_str(error_code)); if (!aws_retry_strategy_schedule_retry( imds_user_data->retry_token, AWS_RETRY_ERROR_TYPE_TRANSIENT, s_on_retry_ready, user_data)) { AWS_LOGF_DEBUG( AWS_LS_IMDS_CLIENT, "id=%p: Stream completed, retrying the last request on a new connection.", (void *)client); return; } else { AWS_LOGF_ERROR(AWS_LS_IMDS_CLIENT, "id=%p: Stream completed, retries have been exhausted.", (void *)client); imds_user_data->error_code = error_code; } } else if (aws_retry_token_record_success(imds_user_data->retry_token)) { AWS_LOGF_ERROR( AWS_LS_IMDS_CLIENT, "id=%p: Error while recording successful retry: %s", (void *)client, aws_error_str(aws_last_error())); } s_query_complete(imds_user_data); } static void s_on_retry_token_acquired( struct aws_retry_strategy *strategy, int error_code, struct aws_retry_token *token, void *user_data) { (void)strategy; struct imds_user_data *imds_user_data = user_data; struct aws_imds_client *client = imds_user_data->client; if (!error_code) { AWS_LOGF_DEBUG(AWS_LS_IMDS_CLIENT, "id=%p: IMDS Client successfully acquired retry token.", (void *)client); imds_user_data->retry_token = token; client->function_table->aws_http_connection_manager_acquire_connection( client->connection_manager, s_on_acquire_connection, imds_user_data); } else { AWS_LOGF_WARN( AWS_LS_IMDS_CLIENT, "id=%p: IMDS Client failed to acquire retry token, error code %d(%s)", (void *)client, error_code, aws_error_str(error_code)); imds_user_data->error_code = error_code; s_query_complete(imds_user_data); } } static void s_complete_pending_queries( struct aws_imds_client *client, struct aws_linked_list *queries, bool token_required, struct aws_byte_buf *token) { /* poll swapped out pending queries if there is any */ while (!aws_linked_list_empty(queries)) { struct aws_linked_list_node *node = aws_linked_list_pop_back(queries); struct imds_token_query *query = AWS_CONTAINER_OF(node, struct imds_token_query, node); struct imds_user_data *requester = query->user_data; aws_mem_release(client->allocator, query); bool should_continue = true; if (requester->imds_token_required && !token_required) { if (requester->is_fallback_request) { AWS_LOGF_ERROR( AWS_LS_IMDS_CLIENT, "(id=%p) IMDS client failed to fetch resource without token, and also failed to fetch token. " "requester %p.", (void *)requester->client, (void *)requester); requester->error_code = AWS_AUTH_IMDS_CLIENT_SOURCE_FAILURE; should_continue = false; } else { AWS_LOGF_DEBUG( AWS_LS_IMDS_CLIENT, "(id=%p) IMDS client failed to fetch token, fallback to v1. requester %p.", (void *)requester->client, (void *)requester); requester->is_fallback_request = true; } } requester->imds_token_required = token_required; if (token) { aws_byte_buf_reset(&requester->imds_token, true); struct aws_byte_cursor cursor = aws_byte_cursor_from_buf(token); if (aws_byte_buf_append_dynamic(&requester->imds_token, &cursor)) { AWS_LOGF_ERROR( AWS_LS_IMDS_CLIENT, "(id=%p) IMDS client failed to copy IMDS token for requester %p.", (void *)client, (void *)requester); should_continue = false; } } else if (token_required) { requester->error_code = AWS_AUTH_IMDS_CLIENT_SOURCE_FAILURE; should_continue = false; } if (should_continue && aws_retry_strategy_acquire_retry_token( client->retry_strategy, NULL, s_on_retry_token_acquired, requester, 100)) { AWS_LOGF_ERROR( AWS_LS_IMDS_CLIENT, "(id=%p) IMDS client failed to allocate retry token for requester %p to send resource request.", (void *)client, (void *)requester); should_continue = false; } if (!should_continue) { if (requester->error_code == AWS_ERROR_SUCCESS) { requester->error_code = aws_last_error() == AWS_ERROR_SUCCESS ? AWS_ERROR_UNKNOWN : aws_last_error(); } s_query_complete(requester); } } } static enum imds_token_copy_result s_copy_token_safely(struct imds_user_data *user_data) { struct aws_imds_client *client = user_data->client; enum imds_token_copy_result ret = AWS_IMDS_TCR_UNEXPECTED_ERROR; struct aws_linked_list pending_queries; aws_linked_list_init(&pending_queries); uint64_t current = 0; user_data->client->function_table->aws_high_res_clock_get_ticks(¤t); aws_mutex_lock(&client->token_lock); if (client->token_state == AWS_IMDS_TS_VALID) { if (current > client->cached_token_expiration_timestamp) { /* The cached token expired. Switch the state */ client->token_state = AWS_IMDS_TS_INVALID; AWS_LOGF_DEBUG( AWS_LS_IMDS_CLIENT, "(id=%p) IMDS client's cached token expired. Fetching new token for requester %p.", (void *)client, (void *)user_data); } else { aws_byte_buf_reset(&user_data->imds_token, true); struct aws_byte_cursor cursor = aws_byte_cursor_from_buf(&client->cached_token); if (aws_byte_buf_append_dynamic(&user_data->imds_token, &cursor)) { ret = AWS_IMDS_TCR_UNEXPECTED_ERROR; } else { ret = AWS_IMDS_TCR_SUCCESS; } } } if (client->token_state != AWS_IMDS_TS_VALID) { ret = AWS_IMDS_TCR_WAITING_IN_QUEUE; struct imds_token_query *query = aws_mem_calloc(client->allocator, 1, sizeof(struct imds_token_query)); query->user_data = user_data; aws_linked_list_push_back(&client->pending_queries, &query->node); if (client->token_state == AWS_IMDS_TS_INVALID) { if (s_client_start_query_token(client)) { ret = AWS_IMDS_TCR_UNEXPECTED_ERROR; aws_linked_list_swap_contents(&pending_queries, &client->pending_queries); } else { client->token_state = AWS_IMDS_TS_UPDATE_IN_PROGRESS; } } } aws_mutex_unlock(&client->token_lock); s_complete_pending_queries(client, &pending_queries, true, NULL); switch (ret) { case AWS_IMDS_TCR_SUCCESS: AWS_LOGF_DEBUG( AWS_LS_IMDS_CLIENT, "(id=%p) IMDS client copied token to requester %p successfully.", (void *)client, (void *)user_data); break; case AWS_IMDS_TCR_WAITING_IN_QUEUE: AWS_LOGF_DEBUG( AWS_LS_IMDS_CLIENT, "(id=%p) IMDS client's token is invalid and is now updating.", (void *)client); break; case AWS_IMDS_TCR_UNEXPECTED_ERROR: AWS_LOGF_DEBUG( AWS_LS_IMDS_CLIENT, "(id=%p) IMDS client encountered unexpected error when processing token query for requester %p, error: " "%s.", (void *)client, (void *)user_data, aws_error_str(aws_last_error())); break; } return ret; } /** * Once a requseter returns from token request, it should call this function to unblock all other * waiting requesters. When the token parameter is NULL, means the token request failed. Now we need * a new requester to acquire the token again. */ static void s_update_token_safely( struct aws_imds_client *client, struct aws_byte_buf *token, bool token_required, uint64_t expire_timestamp) { AWS_FATAL_ASSERT(client); bool updated = false; struct aws_linked_list pending_queries; aws_linked_list_init(&pending_queries); aws_mutex_lock(&client->token_lock); client->token_required = token_required; if (token) { aws_byte_buf_reset(&client->cached_token, true); struct aws_byte_cursor cursor = aws_byte_cursor_from_buf(token); if (aws_byte_buf_append_dynamic(&client->cached_token, &cursor) == AWS_OP_SUCCESS) { client->token_state = AWS_IMDS_TS_VALID; client->cached_token_expiration_timestamp = expire_timestamp; updated = true; } } else { client->token_state = AWS_IMDS_TS_INVALID; } aws_linked_list_swap_contents(&pending_queries, &client->pending_queries); aws_mutex_unlock(&client->token_lock); s_complete_pending_queries(client, &pending_queries, token_required, token); if (updated) { AWS_LOGF_DEBUG( AWS_LS_IMDS_CLIENT, "(id=%p) IMDS client updated the cached token successfully.", (void *)client); } else { AWS_LOGF_ERROR(AWS_LS_IMDS_CLIENT, "(id=%p) IMDS client failed to update the token from IMDS.", (void *)client); } } int s_get_resource_async_with_imds_token(struct imds_user_data *user_data) { enum imds_token_copy_result res = s_copy_token_safely(user_data); if (res == AWS_IMDS_TCR_UNEXPECTED_ERROR) { return AWS_OP_ERR; } if (res == AWS_IMDS_TCR_WAITING_IN_QUEUE) { return AWS_OP_SUCCESS; } if (aws_retry_strategy_acquire_retry_token( user_data->client->retry_strategy, NULL, s_on_retry_token_acquired, user_data, 100)) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } int aws_imds_client_get_resource_async( struct aws_imds_client *client, struct aws_byte_cursor resource_path, aws_imds_client_on_get_resource_callback_fn callback, void *user_data) { struct imds_user_data *wrapped_user_data = s_user_data_new(client, resource_path, callback, user_data); if (wrapped_user_data == NULL) { goto error; } if (!wrapped_user_data->imds_token_required) { if (aws_retry_strategy_acquire_retry_token( client->retry_strategy, NULL, s_on_retry_token_acquired, wrapped_user_data, 100)) { goto error; } } else if (s_get_resource_async_with_imds_token(wrapped_user_data)) { goto error; } return AWS_OP_SUCCESS; error: s_user_data_release(wrapped_user_data); return AWS_OP_ERR; } /** * Higher level API definitions to get specific IMDS info * Reference: * https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-categories.html * https://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/util/EC2MetadataUtils.html * https://github.com/aws/aws-sdk-java-v2/blob/25f640c3b4f2e339c93a7da1494ab3310e128248/core/regions/src/main/java/software/amazon/awssdk/regions/internal/util/EC2MetadataUtils.java * IMDS client only implements resource acquisition that needs one resource request. * Complicated resource like network interface information defined in Java V2 SDK is not implemented here. * To get a full map of network interface information, we need more than ten requests, but sometimes we only care about * one or two of them. */ static struct aws_byte_cursor s_instance_identity_document = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("instance-identity/document"); static struct aws_byte_cursor s_instance_identity_signature = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("instance-identity/signature"); static struct aws_byte_cursor s_ec2_metadata_root = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/latest/meta-data"); static struct aws_byte_cursor s_ec2_credentials_root = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/latest/meta-data/iam/security-credentials/"); static struct aws_byte_cursor s_ec2_userdata_root = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/latest/user-data/"); static struct aws_byte_cursor s_ec2_dynamicdata_root = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/latest/dynamic/"); struct imds_get_array_user_data { struct aws_allocator *allocator; aws_imds_client_on_get_array_callback_fn *callback; void *user_data; }; struct imds_get_credentials_user_data { struct aws_allocator *allocator; aws_imds_client_on_get_credentials_callback_fn *callback; void *user_data; }; struct imds_get_iam_user_data { struct aws_allocator *allocator; aws_imds_client_on_get_iam_profile_callback_fn *callback; void *user_data; }; struct imds_get_instance_user_data { struct aws_allocator *allocator; aws_imds_client_on_get_instance_info_callback_fn *callback; void *user_data; }; static void s_process_array_resource(const struct aws_byte_buf *resource, int error_code, void *user_data) { struct imds_get_array_user_data *wrapped_user_data = user_data; struct aws_array_list resource_array; AWS_ZERO_STRUCT(resource_array); if (resource && !error_code) { struct aws_byte_cursor resource_cursor = aws_byte_cursor_from_buf(resource); if (aws_array_list_init_dynamic( &resource_array, wrapped_user_data->allocator, 10, sizeof(struct aws_byte_cursor))) { goto on_finish; } aws_byte_cursor_split_on_char(&resource_cursor, '\n', &resource_array); } on_finish: wrapped_user_data->callback(&resource_array, error_code, wrapped_user_data->user_data); aws_array_list_clean_up_secure(&resource_array); aws_mem_release(wrapped_user_data->allocator, wrapped_user_data); } static void s_process_credentials_resource(const struct aws_byte_buf *resource, int error_code, void *user_data) { struct imds_get_credentials_user_data *wrapped_user_data = user_data; struct aws_credentials *credentials = NULL; struct aws_byte_buf json_data; AWS_ZERO_STRUCT(json_data); if (!resource || error_code) { goto on_finish; } if (aws_byte_buf_init_copy(&json_data, wrapped_user_data->allocator, resource)) { goto on_finish; } if (aws_byte_buf_append_null_terminator(&json_data)) { goto on_finish; } struct aws_parse_credentials_from_json_doc_options parse_options = { .access_key_id_name = "AccessKeyId", .secret_access_key_name = "SecretAccessKey", .token_name = "Token", .expiration_name = "Expiration", .token_required = true, .expiration_required = true, }; credentials = aws_parse_credentials_from_json_document( wrapped_user_data->allocator, aws_byte_cursor_from_buf(&json_data), &parse_options); on_finish: wrapped_user_data->callback(credentials, error_code, wrapped_user_data->user_data); aws_credentials_release(credentials); aws_byte_buf_clean_up_secure(&json_data); aws_mem_release(wrapped_user_data->allocator, wrapped_user_data); } /** * { "LastUpdated" : "2020-06-03T20:42:19Z", "InstanceProfileArn" : "arn:aws:iam::030535792909:instance-profile/CloudWatchAgentServerRole", "InstanceProfileId" : "AIPAQOHATHEGTGNQ5THQB" } */ static int s_parse_iam_profile(struct aws_json_value *document_root, struct aws_imds_iam_profile *dest) { bool success = false; struct aws_byte_cursor last_updated_cursor; struct aws_json_value *last_updated = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("LastUpdated")); if (last_updated == NULL) { last_updated = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("lastupdated")); } if (!aws_json_value_is_string(last_updated) || (aws_json_value_get_string(last_updated, &last_updated_cursor) == AWS_OP_ERR)) { AWS_LOGF_ERROR(AWS_LS_IMDS_CLIENT, "Failed to parse LastUpdated from Json document for iam profile."); goto done; } struct aws_byte_cursor profile_arn_cursor; struct aws_json_value *profile_arn = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("InstanceProfileArn")); if (profile_arn == NULL) { profile_arn = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("instanceprofilearn")); } if (!aws_json_value_is_string(profile_arn) || (aws_json_value_get_string(profile_arn, &profile_arn_cursor) == AWS_OP_ERR)) { AWS_LOGF_ERROR(AWS_LS_IMDS_CLIENT, "Failed to parse InstanceProfileArn from Json document for iam profile."); goto done; } struct aws_byte_cursor profile_id_cursor; struct aws_json_value *profile_id = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("InstanceProfileId")); if (profile_id == NULL) { profile_id = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("instanceprofileid")); } if (!aws_json_value_is_string(profile_id) || (aws_json_value_get_string(profile_id, &profile_id_cursor) == AWS_OP_ERR)) { AWS_LOGF_ERROR(AWS_LS_IMDS_CLIENT, "Failed to parse InstanceProfileId from Json document for iam profile."); goto done; } if (last_updated_cursor.len == 0 || profile_arn_cursor.len == 0 || profile_id_cursor.len == 0) { AWS_LOGF_ERROR(AWS_LS_IMDS_CLIENT, "Parsed an unexpected Json document fro iam profile."); goto done; } if (aws_date_time_init_from_str_cursor(&dest->last_updated, &last_updated_cursor, AWS_DATE_FORMAT_ISO_8601)) { AWS_LOGF_ERROR( AWS_LS_IMDS_CLIENT, "LastUpdate in iam profile Json document is not a valid ISO_8601 date string."); goto done; } dest->instance_profile_arn = profile_arn_cursor; dest->instance_profile_id = profile_id_cursor; success = true; done: return success ? AWS_OP_ERR : AWS_OP_SUCCESS; } static void s_process_iam_profile(const struct aws_byte_buf *resource, int error_code, void *user_data) { struct imds_get_iam_user_data *wrapped_user_data = user_data; struct aws_json_value *document_root = NULL; struct aws_imds_iam_profile iam; AWS_ZERO_STRUCT(iam); struct aws_byte_buf json_data; AWS_ZERO_STRUCT(json_data); if (!resource || error_code) { goto on_finish; } if (aws_byte_buf_init_copy(&json_data, wrapped_user_data->allocator, resource)) { goto on_finish; } if (aws_byte_buf_append_null_terminator(&json_data)) { goto on_finish; } struct aws_byte_cursor json_data_cursor = aws_byte_cursor_from_buf(&json_data); document_root = aws_json_value_new_from_string(aws_default_allocator(), json_data_cursor); if (document_root == NULL) { AWS_LOGF_ERROR(AWS_LS_IMDS_CLIENT, "Failed to parse document as Json document for iam profile."); goto on_finish; } if (s_parse_iam_profile(document_root, &iam)) { goto on_finish; } on_finish: wrapped_user_data->callback(&iam, error_code, wrapped_user_data->user_data); aws_byte_buf_clean_up_secure(&json_data); aws_mem_release(wrapped_user_data->allocator, wrapped_user_data); if (document_root != NULL) { aws_json_value_destroy(document_root); } } /** * { "accountId" : "030535792909", "architecture" : "x86_64", "availabilityZone" : "us-west-2a", "billingProducts" : null, ------------>array "devpayProductCodes" : null, ----------->deprecated "marketplaceProductCodes" : null, -------->array "imageId" : "ami-5b70e323", "instanceId" : "i-022a93b5e640c0248", "instanceType" : "c4.8xlarge", "kernelId" : null, "pendingTime" : "2020-05-27T08:41:17Z", "privateIp" : "172.31.22.164", "ramdiskId" : null, "region" : "us-west-2", "version" : "2017-09-30" } */ static int s_parse_instance_info(struct aws_json_value *document_root, struct aws_imds_instance_info *dest) { bool success = false; struct aws_byte_cursor account_id_cursor; struct aws_json_value *account_id = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("accountId")); if (account_id == NULL) { account_id = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("accountid")); if (account_id == NULL) { account_id = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("AccountId")); } } if (!aws_json_value_is_string(account_id) || (aws_json_value_get_string(account_id, &account_id_cursor) == AWS_OP_ERR)) { AWS_LOGF_ERROR(AWS_LS_IMDS_CLIENT, "Failed to parse accountId from Json document for ec2 instance info."); goto done; } dest->account_id = account_id_cursor; struct aws_byte_cursor architecture_cursor; struct aws_json_value *architecture = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("architecture")); if (architecture == NULL) { architecture = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("Architecture")); } if (!aws_json_value_is_string(architecture) || (aws_json_value_get_string(architecture, &architecture_cursor) == AWS_OP_ERR)) { AWS_LOGF_ERROR(AWS_LS_IMDS_CLIENT, "Failed to parse architecture from Json document for ec2 instance info."); goto done; } dest->architecture = architecture_cursor; struct aws_byte_cursor availability_zone_cursor; struct aws_json_value *availability_zone = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("availabilityZone")); if (availability_zone == NULL) { availability_zone = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("availabilityzone")); if (availability_zone == NULL) { availability_zone = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("AvailabilityZone")); } } if (!aws_json_value_is_string(availability_zone) || (aws_json_value_get_string(availability_zone, &availability_zone_cursor) == AWS_OP_ERR)) { AWS_LOGF_ERROR( AWS_LS_IMDS_CLIENT, "Failed to parse availabilityZone from Json document for ec2 instance info."); goto done; } dest->availability_zone = availability_zone_cursor; struct aws_byte_cursor billing_products_cursor; struct aws_json_value *billing_products = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("billingProducts")); if (billing_products == NULL) { billing_products = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("billingproducts")); if (billing_products == NULL) { billing_products = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("BillingProducts")); } } if (aws_json_value_is_array(billing_products)) { struct aws_json_value *element; for (size_t i = 0; i < aws_json_get_array_size(billing_products); i++) { element = aws_json_get_array_element(billing_products, i); if (aws_json_value_is_string(element) && aws_json_value_get_string(element, &billing_products_cursor) != AWS_OP_ERR) { struct aws_byte_cursor item = billing_products_cursor; aws_array_list_push_back(&dest->billing_products, (const void *)&item); } } } struct aws_byte_cursor marketplace_product_codes_cursor; struct aws_json_value *marketplace_product_codes = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("marketplaceProductCodes")); if (marketplace_product_codes == NULL) { marketplace_product_codes = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("marketplaceproductcodes")); if (marketplace_product_codes == NULL) { marketplace_product_codes = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("MarketplaceProductCodes")); } } if (aws_json_value_is_array(marketplace_product_codes)) { struct aws_json_value *element; for (size_t i = 0; i < aws_json_get_array_size(marketplace_product_codes); i++) { element = aws_json_get_array_element(marketplace_product_codes, i); if (aws_json_value_is_string(element) && aws_json_value_get_string(element, &marketplace_product_codes_cursor) != AWS_OP_ERR) { struct aws_byte_cursor item = marketplace_product_codes_cursor; aws_array_list_push_back(&dest->billing_products, (const void *)&item); } } } struct aws_byte_cursor image_id_cursor; struct aws_json_value *image_id = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("imageId")); if (image_id == NULL) { image_id = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("imageid")); if (image_id == NULL) { image_id = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("ImageId")); } } if (aws_json_value_is_string(image_id) && (aws_json_value_get_string(image_id, &image_id_cursor) != AWS_OP_ERR)) { dest->image_id = image_id_cursor; } struct aws_byte_cursor instance_id_cursor; struct aws_json_value *instance_id = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("instanceId")); if (instance_id == NULL) { instance_id = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("instanceid")); if (instance_id == NULL) { instance_id = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("InstanceId")); } } if (!aws_json_value_is_string(instance_id) || (aws_json_value_get_string(instance_id, &instance_id_cursor) == AWS_OP_ERR)) { AWS_LOGF_ERROR(AWS_LS_IMDS_CLIENT, "Failed to parse instanceId from Json document for ec2 instance info."); goto done; } dest->instance_id = instance_id_cursor; struct aws_byte_cursor instance_type_cursor; struct aws_json_value *instance_type = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("instanceType")); if (instance_type == NULL) { instance_type = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("instancetype")); if (instance_type == NULL) { instance_type = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("InstanceType")); } } if (!aws_json_value_is_string(instance_type) || (aws_json_value_get_string(instance_type, &instance_type_cursor) == AWS_OP_ERR)) { AWS_LOGF_ERROR(AWS_LS_IMDS_CLIENT, "Failed to parse instanceType from Json document for ec2 instance info."); goto done; } dest->instance_type = instance_type_cursor; struct aws_byte_cursor kernel_id_cursor; struct aws_json_value *kernel_id = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("kernelId")); if (kernel_id == NULL) { kernel_id = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("kernelid")); if (kernel_id == NULL) { kernel_id = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("KernelId")); } } if (aws_json_value_is_string(kernel_id) && (aws_json_value_get_string(kernel_id, &kernel_id_cursor) != AWS_OP_ERR)) { dest->kernel_id = kernel_id_cursor; } struct aws_byte_cursor private_ip_cursor; struct aws_json_value *private_ip = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("privateIp")); if (private_ip == NULL) { private_ip = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("privateip")); if (private_ip == NULL) { private_ip = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("PrivateIp")); } } if (aws_json_value_is_string(private_ip) && (aws_json_value_get_string(private_ip, &private_ip_cursor) != AWS_OP_ERR)) { dest->private_ip = private_ip_cursor; } struct aws_byte_cursor ramdisk_id_cursor; struct aws_json_value *ramdisk_id = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("ramdiskId")); if (ramdisk_id == NULL) { ramdisk_id = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("ramdiskid")); if (ramdisk_id == NULL) { ramdisk_id = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("RamdiskId")); } } if (aws_json_value_is_string(ramdisk_id) && (aws_json_value_get_string(ramdisk_id, &ramdisk_id_cursor) != AWS_OP_ERR)) { dest->ramdisk_id = ramdisk_id_cursor; } struct aws_byte_cursor region_cursor; struct aws_json_value *region = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("region")); if (region == NULL) { region = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("Region")); } if (!aws_json_value_is_string(region) || (aws_json_value_get_string(region, ®ion_cursor) == AWS_OP_ERR)) { AWS_LOGF_ERROR(AWS_LS_IMDS_CLIENT, "Failed to parse region from Json document for ec2 instance info."); goto done; } dest->region = region_cursor; struct aws_byte_cursor version_cursor; struct aws_json_value *version = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("version")); if (version == NULL) { version = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("Version")); } if (!aws_json_value_is_string(version) || (aws_json_value_get_string(version, &version_cursor) == AWS_OP_ERR)) { AWS_LOGF_ERROR(AWS_LS_IMDS_CLIENT, "Failed to parse version from Json document for ec2 instance info."); goto done; } dest->version = version_cursor; struct aws_byte_cursor pending_time_cursor; struct aws_json_value *pending_time = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("pendingTime")); if (pending_time == NULL) { pending_time = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("pendingtime")); if (pending_time == NULL) { pending_time = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("PendingTime")); } } if (!aws_json_value_is_string(pending_time) || (aws_json_value_get_string(pending_time, &pending_time_cursor) == AWS_OP_ERR)) { AWS_LOGF_ERROR(AWS_LS_IMDS_CLIENT, "Failed to parse pendingTime from Json document for ec2 instance info."); goto done; } if (aws_date_time_init_from_str_cursor(&dest->pending_time, &pending_time_cursor, AWS_DATE_FORMAT_ISO_8601)) { AWS_LOGF_ERROR( AWS_LS_IMDS_CLIENT, "pendingTime in instance info Json document is not a valid ISO_8601 date string."); goto done; } success = true; done: return success ? AWS_OP_ERR : AWS_OP_SUCCESS; } static void s_process_instance_info(const struct aws_byte_buf *resource, int error_code, void *user_data) { struct imds_get_instance_user_data *wrapped_user_data = user_data; struct aws_imds_instance_info instance_info; AWS_ZERO_STRUCT(instance_info); struct aws_byte_buf json_data; AWS_ZERO_STRUCT(json_data); struct aws_json_value *document_root = NULL; if (aws_array_list_init_dynamic( &instance_info.billing_products, wrapped_user_data->allocator, 10, sizeof(struct aws_byte_cursor))) { goto on_finish; } if (aws_array_list_init_dynamic( &instance_info.marketplace_product_codes, wrapped_user_data->allocator, 10, sizeof(struct aws_byte_cursor))) { goto on_finish; } if (!resource || error_code) { goto on_finish; } if (aws_byte_buf_init_copy(&json_data, wrapped_user_data->allocator, resource)) { goto on_finish; } if (aws_byte_buf_append_null_terminator(&json_data)) { goto on_finish; } struct aws_byte_cursor json_data_cursor = aws_byte_cursor_from_buf(&json_data); document_root = aws_json_value_new_from_string(aws_default_allocator(), json_data_cursor); if (document_root == NULL) { AWS_LOGF_ERROR(AWS_LS_IMDS_CLIENT, "Failed to parse document as Json document for ec2 instance info."); goto on_finish; } if (s_parse_instance_info(document_root, &instance_info)) { goto on_finish; } on_finish: wrapped_user_data->callback(&instance_info, error_code, wrapped_user_data->user_data); aws_array_list_clean_up_secure(&instance_info.billing_products); aws_array_list_clean_up_secure(&instance_info.marketplace_product_codes); aws_byte_buf_clean_up_secure(&json_data); aws_mem_release(wrapped_user_data->allocator, wrapped_user_data); if (document_root != NULL) { aws_json_value_destroy(document_root); } } static int s_aws_imds_get_resource( struct aws_imds_client *client, struct aws_byte_cursor path, struct aws_byte_cursor name, aws_imds_client_on_get_resource_callback_fn callback, void *user_data) { struct aws_byte_buf resource; if (aws_byte_buf_init_copy_from_cursor(&resource, client->allocator, path)) { return AWS_OP_ERR; } if (aws_byte_buf_append_dynamic(&resource, &name)) { goto error; } if (aws_imds_client_get_resource_async(client, aws_byte_cursor_from_buf(&resource), callback, user_data)) { goto error; } aws_byte_buf_clean_up(&resource); return AWS_OP_SUCCESS; error: aws_byte_buf_clean_up(&resource); return AWS_OP_ERR; } int s_aws_imds_get_converted_resource( struct aws_imds_client *client, struct aws_byte_cursor path, struct aws_byte_cursor name, aws_imds_client_on_get_resource_callback_fn conversion_fn, void *user_data) { return s_aws_imds_get_resource(client, path, name, conversion_fn, user_data); } int aws_imds_client_get_ami_id( struct aws_imds_client *client, aws_imds_client_on_get_resource_callback_fn callback, void *user_data) { return s_aws_imds_get_resource( client, s_ec2_metadata_root, aws_byte_cursor_from_c_str("/ami-id"), callback, user_data); } int aws_imds_client_get_ami_launch_index( struct aws_imds_client *client, aws_imds_client_on_get_resource_callback_fn callback, void *user_data) { return s_aws_imds_get_resource( client, s_ec2_metadata_root, aws_byte_cursor_from_c_str("/ami-launch-index"), callback, user_data); } int aws_imds_client_get_ami_manifest_path( struct aws_imds_client *client, aws_imds_client_on_get_resource_callback_fn callback, void *user_data) { return s_aws_imds_get_resource( client, s_ec2_metadata_root, aws_byte_cursor_from_c_str("/ami-manifest-path"), callback, user_data); } int aws_imds_client_get_ancestor_ami_ids( struct aws_imds_client *client, aws_imds_client_on_get_array_callback_fn callback, void *user_data) { struct imds_get_array_user_data *wrapped_user_data = aws_mem_calloc(client->allocator, 1, sizeof(struct imds_get_array_user_data)); if (!wrapped_user_data) { return AWS_OP_ERR; } wrapped_user_data->allocator = client->allocator; wrapped_user_data->callback = callback; wrapped_user_data->user_data = user_data; return s_aws_imds_get_converted_resource( client, s_ec2_metadata_root, aws_byte_cursor_from_c_str("/ancestor-ami-ids"), s_process_array_resource, wrapped_user_data); } int aws_imds_client_get_instance_action( struct aws_imds_client *client, aws_imds_client_on_get_resource_callback_fn callback, void *user_data) { return s_aws_imds_get_resource( client, s_ec2_metadata_root, aws_byte_cursor_from_c_str("/instance-action"), callback, user_data); } int aws_imds_client_get_instance_id( struct aws_imds_client *client, aws_imds_client_on_get_resource_callback_fn callback, void *user_data) { return s_aws_imds_get_resource( client, s_ec2_metadata_root, aws_byte_cursor_from_c_str("/instance-id"), callback, user_data); } int aws_imds_client_get_instance_type( struct aws_imds_client *client, aws_imds_client_on_get_resource_callback_fn callback, void *user_data) { return s_aws_imds_get_resource( client, s_ec2_metadata_root, aws_byte_cursor_from_c_str("/instance-type"), callback, user_data); } int aws_imds_client_get_mac_address( struct aws_imds_client *client, aws_imds_client_on_get_resource_callback_fn callback, void *user_data) { return s_aws_imds_get_resource( client, s_ec2_metadata_root, aws_byte_cursor_from_c_str("/mac"), callback, user_data); } int aws_imds_client_get_private_ip_address( struct aws_imds_client *client, aws_imds_client_on_get_resource_callback_fn callback, void *user_data) { return s_aws_imds_get_resource( client, s_ec2_metadata_root, aws_byte_cursor_from_c_str("/local-ipv4"), callback, user_data); } int aws_imds_client_get_availability_zone( struct aws_imds_client *client, aws_imds_client_on_get_resource_callback_fn callback, void *user_data) { return s_aws_imds_get_resource( client, s_ec2_metadata_root, aws_byte_cursor_from_c_str("/placement/availability-zone"), callback, user_data); } int aws_imds_client_get_product_codes( struct aws_imds_client *client, aws_imds_client_on_get_resource_callback_fn callback, void *user_data) { return s_aws_imds_get_resource( client, s_ec2_metadata_root, aws_byte_cursor_from_c_str("/product-codes"), callback, user_data); } int aws_imds_client_get_public_key( struct aws_imds_client *client, aws_imds_client_on_get_resource_callback_fn callback, void *user_data) { return s_aws_imds_get_resource( client, s_ec2_metadata_root, aws_byte_cursor_from_c_str("/public-keys/0/openssh-key"), callback, user_data); } int aws_imds_client_get_ramdisk_id( struct aws_imds_client *client, aws_imds_client_on_get_resource_callback_fn callback, void *user_data) { return s_aws_imds_get_resource( client, s_ec2_metadata_root, aws_byte_cursor_from_c_str("/ramdisk-id"), callback, user_data); } int aws_imds_client_get_reservation_id( struct aws_imds_client *client, aws_imds_client_on_get_resource_callback_fn callback, void *user_data) { return s_aws_imds_get_resource( client, s_ec2_metadata_root, aws_byte_cursor_from_c_str("/reservation-id"), callback, user_data); } int aws_imds_client_get_security_groups( struct aws_imds_client *client, aws_imds_client_on_get_array_callback_fn callback, void *user_data) { struct imds_get_array_user_data *wrapped_user_data = aws_mem_calloc(client->allocator, 1, sizeof(struct imds_get_array_user_data)); if (!wrapped_user_data) { return AWS_OP_ERR; } wrapped_user_data->allocator = client->allocator; wrapped_user_data->callback = callback; wrapped_user_data->user_data = user_data; return s_aws_imds_get_converted_resource( client, s_ec2_metadata_root, aws_byte_cursor_from_c_str("/security-groups"), s_process_array_resource, wrapped_user_data); } int aws_imds_client_get_block_device_mapping( struct aws_imds_client *client, aws_imds_client_on_get_array_callback_fn callback, void *user_data) { struct imds_get_array_user_data *wrapped_user_data = aws_mem_calloc(client->allocator, 1, sizeof(struct imds_get_array_user_data)); if (!wrapped_user_data) { return AWS_OP_ERR; } wrapped_user_data->allocator = client->allocator; wrapped_user_data->callback = callback; wrapped_user_data->user_data = user_data; return s_aws_imds_get_converted_resource( client, s_ec2_metadata_root, aws_byte_cursor_from_c_str("/block-device-mapping"), s_process_array_resource, wrapped_user_data); } int aws_imds_client_get_attached_iam_role( struct aws_imds_client *client, aws_imds_client_on_get_resource_callback_fn callback, void *user_data) { return s_aws_imds_get_resource( client, s_ec2_metadata_root, aws_byte_cursor_from_c_str("/iam/security-credentials/"), callback, user_data); } int aws_imds_client_get_credentials( struct aws_imds_client *client, struct aws_byte_cursor iam_role_name, aws_imds_client_on_get_credentials_callback_fn callback, void *user_data) { struct imds_get_credentials_user_data *wrapped_user_data = aws_mem_calloc(client->allocator, 1, sizeof(struct imds_get_credentials_user_data)); if (!wrapped_user_data) { return AWS_OP_ERR; } wrapped_user_data->allocator = client->allocator; wrapped_user_data->callback = callback; wrapped_user_data->user_data = user_data; return s_aws_imds_get_converted_resource( client, s_ec2_credentials_root, iam_role_name, s_process_credentials_resource, wrapped_user_data); } int aws_imds_client_get_iam_profile( struct aws_imds_client *client, aws_imds_client_on_get_iam_profile_callback_fn callback, void *user_data) { struct imds_get_iam_user_data *wrapped_user_data = aws_mem_calloc(client->allocator, 1, sizeof(struct imds_get_iam_user_data)); if (!wrapped_user_data) { return AWS_OP_ERR; } wrapped_user_data->allocator = client->allocator; wrapped_user_data->callback = callback; wrapped_user_data->user_data = user_data; return s_aws_imds_get_converted_resource( client, s_ec2_metadata_root, aws_byte_cursor_from_c_str("/iam/info"), s_process_iam_profile, wrapped_user_data); } int aws_imds_client_get_user_data( struct aws_imds_client *client, aws_imds_client_on_get_resource_callback_fn callback, void *user_data) { return s_aws_imds_get_resource(client, s_ec2_userdata_root, aws_byte_cursor_from_c_str(""), callback, user_data); } int aws_imds_client_get_instance_signature( struct aws_imds_client *client, aws_imds_client_on_get_resource_callback_fn callback, void *user_data) { return s_aws_imds_get_resource(client, s_ec2_dynamicdata_root, s_instance_identity_signature, callback, user_data); } int aws_imds_client_get_instance_info( struct aws_imds_client *client, aws_imds_client_on_get_instance_info_callback_fn callback, void *user_data) { struct imds_get_instance_user_data *wrapped_user_data = aws_mem_calloc(client->allocator, 1, sizeof(struct imds_get_instance_user_data)); if (!wrapped_user_data) { return AWS_OP_ERR; } wrapped_user_data->allocator = client->allocator; wrapped_user_data->callback = callback; wrapped_user_data->user_data = user_data; return s_aws_imds_get_converted_resource( client, s_ec2_dynamicdata_root, s_instance_identity_document, s_process_instance_info, wrapped_user_data); } aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/source/aws_profile.c000066400000000000000000000030051456575232400241450ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include static const struct aws_string *s_profile_get_property_value( const struct aws_profile *profile, const struct aws_string *property_name) { const struct aws_profile_property *property = aws_profile_get_property(profile, property_name); if (property == NULL) { return NULL; } return aws_profile_property_get_value(property); } AWS_STATIC_STRING_FROM_LITERAL(s_access_key_id_profile_var, "aws_access_key_id"); AWS_STATIC_STRING_FROM_LITERAL(s_secret_access_key_profile_var, "aws_secret_access_key"); AWS_STATIC_STRING_FROM_LITERAL(s_session_token_profile_var, "aws_session_token"); struct aws_credentials *aws_credentials_new_from_profile( struct aws_allocator *allocator, const struct aws_profile *profile) { const struct aws_string *access_key = s_profile_get_property_value(profile, s_access_key_id_profile_var); const struct aws_string *secret_key = s_profile_get_property_value(profile, s_secret_access_key_profile_var); if (access_key == NULL || secret_key == NULL) { return NULL; } const struct aws_string *session_token = s_profile_get_property_value(profile, s_session_token_profile_var); return aws_credentials_new_from_string(allocator, access_key, secret_key, session_token, UINT64_MAX); } aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/source/aws_signing.c000066400000000000000000002715501456575232400241570ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(_MSC_VER) # pragma warning(disable : 4204) #endif /* _MSC_VER */ /* * A bunch of initial size values for various buffers used throughout the signing process * * We want them to be sufficient-but-not-wasting-significant-amounts-of-memory for "most" * requests. The body read buffer is an exception since it will just be holding windows rather than * the entire thing. */ #define BODY_READ_BUFFER_SIZE 4096 #define CANONICAL_REQUEST_STARTING_SIZE 1024 #define STRING_TO_SIGN_STARTING_SIZE 256 #define SIGNED_HEADERS_STARTING_SIZE 256 #define CANONICAL_HEADER_BLOCK_STARTING_SIZE 1024 #define AUTHORIZATION_VALUE_STARTING_SIZE 512 #define PAYLOAD_HASH_STARTING_SIZE (AWS_SHA256_LEN * 2) #define CREDENTIAL_SCOPE_STARTING_SIZE 128 #define ACCESS_CREDENTIAL_SCOPE_STARTING_SIZE 149 #define SCRATCH_BUF_STARTING_SIZE 256 #define MAX_AUTHORIZATION_HEADER_COUNT 4 #define MAX_AUTHORIZATION_QUERY_PARAM_COUNT 6 #define DEFAULT_PATH_COMPONENT_COUNT 10 #define CANONICAL_REQUEST_SPLIT_OVER_ESTIMATE 20 #define HEX_ENCODED_SIGNATURE_OVER_ESTIMATE 256 #define MAX_ECDSA_P256_SIGNATURE_AS_BINARY_LENGTH 72 #define MAX_ECDSA_P256_SIGNATURE_AS_HEX_LENGTH (MAX_ECDSA_P256_SIGNATURE_AS_BINARY_LENGTH * 2) #define AWS_SIGV4A_SIGNATURE_PADDING_BYTE ('*') AWS_STRING_FROM_LITERAL(g_aws_signing_content_header_name, "x-amz-content-sha256"); AWS_STRING_FROM_LITERAL(g_aws_signing_authorization_header_name, "Authorization"); AWS_STRING_FROM_LITERAL(g_aws_signing_authorization_query_param_name, "X-Amz-Signature"); AWS_STRING_FROM_LITERAL(g_aws_signing_algorithm_query_param_name, "X-Amz-Algorithm"); AWS_STRING_FROM_LITERAL(g_aws_signing_credential_query_param_name, "X-Amz-Credential"); AWS_STRING_FROM_LITERAL(g_aws_signing_date_name, "X-Amz-Date"); AWS_STRING_FROM_LITERAL(g_aws_signing_signed_headers_query_param_name, "X-Amz-SignedHeaders"); AWS_STRING_FROM_LITERAL(g_aws_signing_security_token_name, "X-Amz-Security-Token"); AWS_STRING_FROM_LITERAL(g_aws_signing_s3session_token_name, "X-Amz-S3session-Token"); AWS_STRING_FROM_LITERAL(g_aws_signing_expires_query_param_name, "X-Amz-Expires"); AWS_STRING_FROM_LITERAL(g_aws_signing_region_set_name, "X-Amz-Region-Set"); AWS_STATIC_STRING_FROM_LITERAL(s_signature_type_sigv4_http_request, "AWS4-HMAC-SHA256"); AWS_STATIC_STRING_FROM_LITERAL(s_signature_type_sigv4_s3_chunked_payload, "AWS4-HMAC-SHA256-PAYLOAD"); AWS_STATIC_STRING_FROM_LITERAL(s_signature_type_sigv4a_s3_chunked_payload, "AWS4-ECDSA-P256-SHA256-PAYLOAD"); AWS_STATIC_STRING_FROM_LITERAL(s_signature_type_sigv4_s3_chunked_trailer_payload, "AWS4-HMAC-SHA256-TRAILER"); AWS_STATIC_STRING_FROM_LITERAL(s_signature_type_sigv4a_s3_chunked_trailer_payload, "AWS4-ECDSA-P256-SHA256-TRAILER"); /* aws-related query param and header tables */ static struct aws_hash_table s_forbidden_headers; static struct aws_hash_table s_forbidden_params; static struct aws_hash_table s_skipped_headers; static struct aws_byte_cursor s_amzn_trace_id_header_name; static struct aws_byte_cursor s_user_agent_header_name; static struct aws_byte_cursor s_connection_header_name; static struct aws_byte_cursor s_sec_websocket_key_header_name; static struct aws_byte_cursor s_sec_websocket_protocol_header_name; static struct aws_byte_cursor s_sec_websocket_version_header_name; static struct aws_byte_cursor s_upgrade_header_name; static struct aws_byte_cursor s_amz_content_sha256_header_name; static struct aws_byte_cursor s_amz_date_header_name; static struct aws_byte_cursor s_authorization_header_name; static struct aws_byte_cursor s_region_set_header_name; static struct aws_byte_cursor s_amz_security_token_header_name; static struct aws_byte_cursor s_amz_s3session_token_header_name; static struct aws_byte_cursor s_amz_signature_param_name; static struct aws_byte_cursor s_amz_date_param_name; static struct aws_byte_cursor s_amz_credential_param_name; static struct aws_byte_cursor s_amz_algorithm_param_name; static struct aws_byte_cursor s_amz_signed_headers_param_name; static struct aws_byte_cursor s_amz_security_token_param_name; static struct aws_byte_cursor s_amz_expires_param_name; static struct aws_byte_cursor s_amz_region_set_param_name; /* * Build a set of library-static tables for quick lookup. * * Construction errors are considered fatal. */ int aws_signing_init_signing_tables(struct aws_allocator *allocator) { if (aws_hash_table_init( &s_skipped_headers, allocator, 10, aws_hash_byte_cursor_ptr_ignore_case, (aws_hash_callback_eq_fn *)aws_byte_cursor_eq_ignore_case, NULL, NULL)) { return AWS_OP_ERR; } s_amzn_trace_id_header_name = aws_byte_cursor_from_c_str("x-amzn-trace-id"); if (aws_hash_table_put(&s_skipped_headers, &s_amzn_trace_id_header_name, NULL, NULL)) { return AWS_OP_ERR; } s_user_agent_header_name = aws_byte_cursor_from_c_str("User-Agent"); if (aws_hash_table_put(&s_skipped_headers, &s_user_agent_header_name, NULL, NULL)) { return AWS_OP_ERR; } s_connection_header_name = aws_byte_cursor_from_c_str("connection"); if (aws_hash_table_put(&s_skipped_headers, &s_connection_header_name, NULL, NULL)) { return AWS_OP_ERR; } s_connection_header_name = aws_byte_cursor_from_c_str("expect"); if (aws_hash_table_put(&s_skipped_headers, &s_connection_header_name, NULL, NULL)) { return AWS_OP_ERR; } s_sec_websocket_key_header_name = aws_byte_cursor_from_c_str("sec-websocket-key"); if (aws_hash_table_put(&s_skipped_headers, &s_sec_websocket_key_header_name, NULL, NULL)) { return AWS_OP_ERR; } s_sec_websocket_protocol_header_name = aws_byte_cursor_from_c_str("sec-websocket-protocol"); if (aws_hash_table_put(&s_skipped_headers, &s_sec_websocket_protocol_header_name, NULL, NULL)) { return AWS_OP_ERR; } s_sec_websocket_version_header_name = aws_byte_cursor_from_c_str("sec-websocket-version"); if (aws_hash_table_put(&s_skipped_headers, &s_sec_websocket_version_header_name, NULL, NULL)) { return AWS_OP_ERR; } s_upgrade_header_name = aws_byte_cursor_from_c_str("upgrade"); if (aws_hash_table_put(&s_skipped_headers, &s_upgrade_header_name, NULL, NULL)) { return AWS_OP_ERR; } if (aws_hash_table_init( &s_forbidden_headers, allocator, 10, aws_hash_byte_cursor_ptr_ignore_case, (aws_hash_callback_eq_fn *)aws_byte_cursor_eq_ignore_case, NULL, NULL)) { return AWS_OP_ERR; } s_amz_content_sha256_header_name = aws_byte_cursor_from_string(g_aws_signing_content_header_name); if (aws_hash_table_put(&s_forbidden_headers, &s_amz_content_sha256_header_name, NULL, NULL)) { return AWS_OP_ERR; } s_amz_date_header_name = aws_byte_cursor_from_string(g_aws_signing_date_name); if (aws_hash_table_put(&s_forbidden_headers, &s_amz_date_header_name, NULL, NULL)) { return AWS_OP_ERR; } s_authorization_header_name = aws_byte_cursor_from_string(g_aws_signing_authorization_header_name); if (aws_hash_table_put(&s_forbidden_headers, &s_authorization_header_name, NULL, NULL)) { return AWS_OP_ERR; } s_region_set_header_name = aws_byte_cursor_from_string(g_aws_signing_region_set_name); if (aws_hash_table_put(&s_forbidden_headers, &s_region_set_header_name, NULL, NULL)) { return AWS_OP_ERR; } s_amz_security_token_header_name = aws_byte_cursor_from_string(g_aws_signing_security_token_name); if (aws_hash_table_put(&s_forbidden_headers, &s_amz_security_token_header_name, NULL, NULL)) { return AWS_OP_ERR; } s_amz_s3session_token_header_name = aws_byte_cursor_from_string(g_aws_signing_s3session_token_name); if (aws_hash_table_put(&s_forbidden_headers, &s_amz_s3session_token_header_name, NULL, NULL)) { return AWS_OP_ERR; } if (aws_hash_table_init( &s_forbidden_params, allocator, 10, aws_hash_byte_cursor_ptr_ignore_case, (aws_hash_callback_eq_fn *)aws_byte_cursor_eq_ignore_case, NULL, NULL)) { return AWS_OP_ERR; } s_amz_signature_param_name = aws_byte_cursor_from_string(g_aws_signing_authorization_query_param_name); if (aws_hash_table_put(&s_forbidden_params, &s_amz_signature_param_name, NULL, NULL)) { return AWS_OP_ERR; } s_amz_date_param_name = aws_byte_cursor_from_string(g_aws_signing_date_name); if (aws_hash_table_put(&s_forbidden_params, &s_amz_date_param_name, NULL, NULL)) { return AWS_OP_ERR; } s_amz_credential_param_name = aws_byte_cursor_from_string(g_aws_signing_credential_query_param_name); if (aws_hash_table_put(&s_forbidden_params, &s_amz_credential_param_name, NULL, NULL)) { return AWS_OP_ERR; } s_amz_algorithm_param_name = aws_byte_cursor_from_string(g_aws_signing_algorithm_query_param_name); if (aws_hash_table_put(&s_forbidden_params, &s_amz_algorithm_param_name, NULL, NULL)) { return AWS_OP_ERR; } s_amz_signed_headers_param_name = aws_byte_cursor_from_string(g_aws_signing_signed_headers_query_param_name); if (aws_hash_table_put(&s_forbidden_params, &s_amz_signed_headers_param_name, NULL, NULL)) { return AWS_OP_ERR; } s_amz_security_token_param_name = aws_byte_cursor_from_string(g_aws_signing_security_token_name); if (aws_hash_table_put(&s_forbidden_params, &s_amz_security_token_param_name, NULL, NULL)) { return AWS_OP_ERR; } s_amz_expires_param_name = aws_byte_cursor_from_string(g_aws_signing_expires_query_param_name); if (aws_hash_table_put(&s_forbidden_params, &s_amz_expires_param_name, NULL, NULL)) { return AWS_OP_ERR; } s_amz_region_set_param_name = aws_byte_cursor_from_string(g_aws_signing_region_set_name); if (aws_hash_table_put(&s_forbidden_params, &s_amz_region_set_param_name, NULL, NULL)) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } void aws_signing_clean_up_signing_tables(void) { aws_hash_table_clean_up(&s_skipped_headers); aws_hash_table_clean_up(&s_forbidden_headers); aws_hash_table_clean_up(&s_forbidden_params); } static bool s_is_header_based_signature_value(enum aws_signature_type signature_type) { switch (signature_type) { case AWS_ST_HTTP_REQUEST_HEADERS: case AWS_ST_CANONICAL_REQUEST_HEADERS: return true; default: return false; } } static bool s_is_query_param_based_signature_value(enum aws_signature_type signature_type) { switch (signature_type) { case AWS_ST_HTTP_REQUEST_QUERY_PARAMS: case AWS_ST_CANONICAL_REQUEST_QUERY_PARAMS: return true; default: return false; } } static int s_get_signature_type_cursor(struct aws_signing_state_aws *state, struct aws_byte_cursor *cursor) { switch (state->config.signature_type) { case AWS_ST_HTTP_REQUEST_HEADERS: case AWS_ST_HTTP_REQUEST_QUERY_PARAMS: case AWS_ST_CANONICAL_REQUEST_HEADERS: case AWS_ST_CANONICAL_REQUEST_QUERY_PARAMS: if (state->config.algorithm == AWS_SIGNING_ALGORITHM_V4_ASYMMETRIC) { *cursor = aws_byte_cursor_from_string(g_signature_type_sigv4a_http_request); } else { *cursor = aws_byte_cursor_from_string(s_signature_type_sigv4_http_request); } break; case AWS_ST_HTTP_REQUEST_CHUNK: case AWS_ST_HTTP_REQUEST_EVENT: if (state->config.algorithm == AWS_SIGNING_ALGORITHM_V4_ASYMMETRIC) { *cursor = aws_byte_cursor_from_string(s_signature_type_sigv4a_s3_chunked_payload); } else { *cursor = aws_byte_cursor_from_string(s_signature_type_sigv4_s3_chunked_payload); } break; case AWS_ST_HTTP_REQUEST_TRAILING_HEADERS: if (state->config.algorithm == AWS_SIGNING_ALGORITHM_V4_ASYMMETRIC) { *cursor = aws_byte_cursor_from_string(s_signature_type_sigv4a_s3_chunked_trailer_payload); } else { *cursor = aws_byte_cursor_from_string(s_signature_type_sigv4_s3_chunked_trailer_payload); } break; default: return aws_raise_error(AWS_AUTH_SIGNING_UNSUPPORTED_SIGNATURE_TYPE); } return AWS_OP_SUCCESS; } static int s_append_sts_signature_type(struct aws_signing_state_aws *state, struct aws_byte_buf *dest) { struct aws_byte_cursor algorithm_cursor; if (s_get_signature_type_cursor(state, &algorithm_cursor)) { return AWS_OP_ERR; } return aws_byte_buf_append_dynamic(dest, &algorithm_cursor); } /* * signing state management */ struct aws_signing_state_aws *aws_signing_state_new( struct aws_allocator *allocator, const struct aws_signing_config_aws *config, const struct aws_signable *signable, aws_signing_complete_fn *on_complete, void *userdata) { if (aws_validate_aws_signing_config_aws(config)) { return NULL; } struct aws_signing_state_aws *state = aws_mem_calloc(allocator, 1, sizeof(struct aws_signing_state_aws)); if (!state) { return NULL; } state->allocator = allocator; /* Make our own copy of the signing config */ state->config = *config; if (state->config.credentials_provider != NULL) { aws_credentials_provider_acquire(state->config.credentials_provider); } if (state->config.credentials != NULL) { aws_credentials_acquire(state->config.credentials); } if (aws_byte_buf_init_cache_and_update_cursors( &state->config_string_buffer, allocator, &state->config.region, &state->config.service, &state->config.signed_body_value, NULL /*end*/)) { goto on_error; } state->signable = signable; state->on_complete = on_complete; state->userdata = userdata; if (aws_signing_result_init(&state->result, allocator)) { goto on_error; } if (aws_byte_buf_init(&state->canonical_request, allocator, CANONICAL_REQUEST_STARTING_SIZE) || aws_byte_buf_init(&state->string_to_sign, allocator, STRING_TO_SIGN_STARTING_SIZE) || aws_byte_buf_init(&state->signed_headers, allocator, SIGNED_HEADERS_STARTING_SIZE) || aws_byte_buf_init(&state->canonical_header_block, allocator, CANONICAL_HEADER_BLOCK_STARTING_SIZE) || aws_byte_buf_init(&state->payload_hash, allocator, PAYLOAD_HASH_STARTING_SIZE) || aws_byte_buf_init(&state->credential_scope, allocator, CREDENTIAL_SCOPE_STARTING_SIZE) || aws_byte_buf_init(&state->access_credential_scope, allocator, ACCESS_CREDENTIAL_SCOPE_STARTING_SIZE) || aws_byte_buf_init(&state->date, allocator, AWS_DATE_TIME_STR_MAX_LEN) || aws_byte_buf_init(&state->signature, allocator, PAYLOAD_HASH_STARTING_SIZE) || aws_byte_buf_init(&state->string_to_sign_payload, allocator, PAYLOAD_HASH_STARTING_SIZE) || aws_byte_buf_init(&state->scratch_buf, allocator, SCRATCH_BUF_STARTING_SIZE)) { goto on_error; } snprintf( state->expiration_array, AWS_ARRAY_SIZE(state->expiration_array), "%" PRIu64 "", config->expiration_in_seconds); return state; on_error: aws_signing_state_destroy(state); return NULL; } void aws_signing_state_destroy(struct aws_signing_state_aws *state) { aws_signing_result_clean_up(&state->result); aws_credentials_provider_release(state->config.credentials_provider); aws_credentials_release(state->config.credentials); aws_byte_buf_clean_up(&state->config_string_buffer); aws_byte_buf_clean_up(&state->canonical_request); aws_byte_buf_clean_up(&state->string_to_sign); aws_byte_buf_clean_up(&state->signed_headers); aws_byte_buf_clean_up(&state->canonical_header_block); aws_byte_buf_clean_up(&state->payload_hash); aws_byte_buf_clean_up(&state->credential_scope); aws_byte_buf_clean_up(&state->access_credential_scope); aws_byte_buf_clean_up(&state->date); aws_byte_buf_clean_up(&state->signature); aws_byte_buf_clean_up(&state->string_to_sign_payload); aws_byte_buf_clean_up(&state->scratch_buf); aws_mem_release(state->allocator, state); } /* * canonical request utility functions: * * various appends, conversion/encoding, etc... * */ static int s_append_canonical_method(struct aws_signing_state_aws *state) { const struct aws_signable *signable = state->signable; struct aws_byte_buf *buffer = &state->canonical_request; struct aws_byte_cursor method_cursor; aws_signable_get_property(signable, g_aws_http_method_property_name, &method_cursor); if (aws_byte_buf_append_dynamic(buffer, &method_cursor)) { return AWS_OP_ERR; } if (aws_byte_buf_append_byte_dynamic(buffer, '\n')) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } static int s_append_with_lookup( struct aws_byte_buf *dst, const struct aws_byte_cursor *src, const uint8_t *lookup_table) { if (aws_byte_buf_reserve_relative(dst, src->len)) { return AWS_OP_ERR; } if (aws_byte_buf_append_with_lookup(dst, src, lookup_table)) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } /* * A function that builds a normalized path (removes redundant '/' characters, '.' components, and properly pops off * components in response '..' components) * * We use a simple algorithm to do this: * * First split the path into components * Then, using a secondary stack of components, build the final path by pushing and popping (on '..') components * on the stack. The final path is then the concatenation of the secondary stack. */ static int s_append_normalized_path( const struct aws_byte_cursor *raw_path, struct aws_allocator *allocator, struct aws_byte_buf *dest) { struct aws_array_list raw_split; AWS_ZERO_STRUCT(raw_split); struct aws_array_list normalized_split; AWS_ZERO_STRUCT(normalized_split); int result = AWS_OP_ERR; if (aws_array_list_init_dynamic( &raw_split, allocator, DEFAULT_PATH_COMPONENT_COUNT, sizeof(struct aws_byte_cursor))) { goto cleanup; } if (aws_byte_cursor_split_on_char(raw_path, '/', &raw_split)) { goto cleanup; } const size_t raw_split_count = aws_array_list_length(&raw_split); if (aws_array_list_init_dynamic(&normalized_split, allocator, raw_split_count, sizeof(struct aws_byte_cursor))) { goto cleanup; } /* * Iterate the raw split to build a list of path components that make up the * normalized path */ for (size_t i = 0; i < raw_split_count; ++i) { struct aws_byte_cursor path_component; AWS_ZERO_STRUCT(path_component); if (aws_array_list_get_at(&raw_split, &path_component, i)) { goto cleanup; } if (path_component.len == 0 || (path_component.len == 1 && *path_component.ptr == '.')) { /* '.' and '' contribute nothing to a normalized path */ continue; } if (path_component.len == 2 && *path_component.ptr == '.' && *(path_component.ptr + 1) == '.') { /* '..' causes us to remove the last valid path component */ aws_array_list_pop_back(&normalized_split); } else { aws_array_list_push_back(&normalized_split, &path_component); } } /* * Special case preserve whether or not the path ended with a '/' */ bool ends_with_slash = raw_path->len > 0 && raw_path->ptr[raw_path->len - 1] == '/'; /* * Paths always start with a single '/' */ if (aws_byte_buf_append_byte_dynamic(dest, '/')) { goto cleanup; } /* * build the final normalized path from the normalized split by joining * the components together with '/' */ const size_t normalized_split_count = aws_array_list_length(&normalized_split); for (size_t i = 0; i < normalized_split_count; ++i) { struct aws_byte_cursor normalized_path_component; AWS_ZERO_STRUCT(normalized_path_component); if (aws_array_list_get_at(&normalized_split, &normalized_path_component, i)) { goto cleanup; } if (aws_byte_buf_append_dynamic(dest, &normalized_path_component)) { goto cleanup; } if (i + 1 < normalized_split_count || ends_with_slash) { if (aws_byte_buf_append_byte_dynamic(dest, '/')) { goto cleanup; } } } result = AWS_OP_SUCCESS; cleanup: aws_array_list_clean_up(&raw_split); aws_array_list_clean_up(&normalized_split); return result; } static int s_append_canonical_path(const struct aws_uri *uri, struct aws_signing_state_aws *state) { const struct aws_signing_config_aws *config = &state->config; struct aws_byte_buf *canonical_request_buffer = &state->canonical_request; struct aws_allocator *allocator = state->allocator; int result = AWS_OP_ERR; /* * Put this at function global scope so that it gets cleaned up even though it's only used inside * a single branch. Allows error handling and cleanup to follow the pattern established * throughout this file. */ struct aws_byte_buf normalized_path; AWS_ZERO_STRUCT(normalized_path); /* * We assume the request's uri path has already been encoded once (in order to go out on the wire). * Some services do not decode the path before performing the sig v4 calculation, resulting in the * service actually performing sigv4 on a double-encoding of the path. In order to match those * services, we must double encode in our calculation as well. */ if (config->flags.use_double_uri_encode) { struct aws_byte_cursor path_cursor; /* * We need to transform the the normalized path, so we can't just append it into the canonical * request. Instead we append it into a temporary buffer and perform the transformation from * it. * * All this does is skip the temporary normalized path in the case where we don't need to * double encode. */ if (config->flags.should_normalize_uri_path) { if (aws_byte_buf_init(&normalized_path, state->allocator, uri->path.len)) { goto cleanup; } if (s_append_normalized_path(&uri->path, allocator, &normalized_path)) { goto cleanup; } path_cursor = aws_byte_cursor_from_buf(&normalized_path); } else { path_cursor = uri->path; } if (aws_byte_buf_append_encoding_uri_path(canonical_request_buffer, &path_cursor)) { goto cleanup; } } else { /* * If we don't need to perform any kind of transformation on the normalized path, just append it directly * into the canonical request buffer */ if (config->flags.should_normalize_uri_path) { if (s_append_normalized_path(&uri->path, allocator, canonical_request_buffer)) { goto cleanup; } } else { if (aws_byte_buf_append_dynamic(canonical_request_buffer, &uri->path)) { goto cleanup; } } } if (aws_byte_buf_append_byte_dynamic(canonical_request_buffer, '\n')) { goto cleanup; } result = AWS_OP_SUCCESS; cleanup: aws_byte_buf_clean_up(&normalized_path); return result; } /* * URI-encoded query params are compared first by key, then by value */ int s_canonical_query_param_comparator(const void *lhs, const void *rhs) { const struct aws_uri_param *left_param = lhs; const struct aws_uri_param *right_param = rhs; int key_compare = aws_byte_cursor_compare_lexical(&left_param->key, &right_param->key); if (key_compare != 0) { return key_compare; } return aws_byte_cursor_compare_lexical(&left_param->value, &right_param->value); } /* * We need to sort the headers in a stable fashion, but the default sorting methods available in the c library are not * guaranteed to be stable. We can make the sort stable by instead sorting a wrapper object that includes the original * index of the wrapped object and using that index to break lexical ties. * * We sort a copy of the header (rather than pointers) so that we can easily inject secondary headers into * the canonical request. */ struct stable_header { struct aws_signable_property_list_pair header; size_t original_index; }; int s_canonical_header_comparator(const void *lhs, const void *rhs) { const struct stable_header *left_header = lhs; const struct stable_header *right_header = rhs; int result = aws_byte_cursor_compare_lookup( &left_header->header.name, &right_header->header.name, aws_lookup_table_to_lower_get()); if (result != 0) { return result; } /* they're the same header, use the original index to keep the sort stable */ if (left_header->original_index < right_header->original_index) { return -1; } /* equality should never happen */ AWS_ASSERT(left_header->original_index > right_header->original_index); return 1; } /** * Given URI-encoded query param, write it to canonical buffer. */ static int s_append_canonical_query_param(struct aws_uri_param *encoded_param, struct aws_byte_buf *buffer) { if (aws_byte_buf_append_dynamic(buffer, &encoded_param->key)) { return AWS_OP_ERR; } if (aws_byte_buf_append_byte_dynamic(buffer, '=')) { return AWS_OP_ERR; } if (aws_byte_buf_append_dynamic(buffer, &encoded_param->value)) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } /** * Given unencoded authorization query param: * Add it, URI-encoded to final signing result (to be added to signable later). */ static int s_add_query_param_to_signing_result( struct aws_signing_state_aws *state, const struct aws_uri_param *unencoded_param) { /* URI-Encode, and add to final signing result */ state->scratch_buf.len = 0; if (aws_byte_buf_append_encoding_uri_param(&state->scratch_buf, &unencoded_param->key)) { return AWS_OP_ERR; } size_t key_len = state->scratch_buf.len; if (aws_byte_buf_append_encoding_uri_param(&state->scratch_buf, &unencoded_param->value)) { return AWS_OP_ERR; } struct aws_byte_cursor encoded_val = aws_byte_cursor_from_buf(&state->scratch_buf); struct aws_byte_cursor encoded_key = aws_byte_cursor_advance(&encoded_val, key_len); if (aws_signing_result_append_property_list( &state->result, g_aws_http_query_params_property_list_name, &encoded_key, &encoded_val)) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } /** * Given unencoded authorization query param: * 1) Add it to list of all unencoded query params (to be canonicalized later). * 2) Add it, URI-encoded to final signing result (to be added to signable later). */ static int s_add_authorization_query_param( struct aws_signing_state_aws *state, struct aws_array_list *unencoded_auth_params, const struct aws_uri_param *unencoded_auth_param) { /* Add to unencoded list */ if (aws_array_list_push_back(unencoded_auth_params, unencoded_auth_param)) { return AWS_OP_ERR; } return s_add_query_param_to_signing_result(state, unencoded_auth_param); } /* * Checks the header against both an internal skip list as well as an optional user-supplied filter * function. Only sign the header if both functions allow it. */ static bool s_should_sign_header(struct aws_signing_state_aws *state, struct aws_byte_cursor *name) { if (state->config.should_sign_header) { if (!state->config.should_sign_header(name, state->config.should_sign_header_ud)) { return false; } } struct aws_hash_element *element = NULL; if (aws_hash_table_find(&s_skipped_headers, name, &element) == AWS_OP_ERR || element != NULL) { return false; } return true; } /* * If the auth type was query param then this function adds all the required query params and values with the * exception of X-Amz-Signature (because we're still computing its value) Parameters are added to both the * canonical request and the final signing result. */ static int s_add_authorization_query_params( struct aws_signing_state_aws *state, struct aws_array_list *unencoded_query_params) { if (state->config.signature_type != AWS_ST_HTTP_REQUEST_QUERY_PARAMS) { return AWS_OP_SUCCESS; } int result = AWS_OP_ERR; /* X-Amz-Algorithm */ struct aws_uri_param algorithm_param = { .key = aws_byte_cursor_from_string(g_aws_signing_algorithm_query_param_name), }; if (s_get_signature_type_cursor(state, &algorithm_param.value)) { goto done; } if (s_add_authorization_query_param(state, unencoded_query_params, &algorithm_param)) { goto done; } /* X-Amz-Credential */ struct aws_uri_param credential_param = { .key = aws_byte_cursor_from_string(g_aws_signing_credential_query_param_name), .value = aws_byte_cursor_from_buf(&state->access_credential_scope), }; if (s_add_authorization_query_param(state, unencoded_query_params, &credential_param)) { goto done; } /* X-Amz-Date */ struct aws_uri_param date_param = { .key = aws_byte_cursor_from_string(g_aws_signing_date_name), .value = aws_byte_cursor_from_buf(&state->date), }; if (s_add_authorization_query_param(state, unencoded_query_params, &date_param)) { goto done; } /* X-Amz-SignedHeaders */ struct aws_uri_param signed_headers_param = { .key = aws_byte_cursor_from_string(g_aws_signing_signed_headers_query_param_name), .value = aws_byte_cursor_from_buf(&state->signed_headers), }; if (s_add_authorization_query_param(state, unencoded_query_params, &signed_headers_param)) { goto done; } /* X-Amz-Expires */ uint64_t expiration_in_seconds = state->config.expiration_in_seconds; if (expiration_in_seconds > 0) { struct aws_uri_param expires_param = { .key = aws_byte_cursor_from_string(g_aws_signing_expires_query_param_name), .value = aws_byte_cursor_from_c_str(state->expiration_array), }; if (s_add_authorization_query_param(state, unencoded_query_params, &expires_param)) { goto done; } } /* X-Amz-*-token */ /* We have different token between S3Express and other signing, which needs different token header name */ struct aws_byte_cursor token_header_name; if (state->config.algorithm == AWS_SIGNING_ALGORITHM_V4_S3EXPRESS) { /* X-Amz-S3session-Token */ token_header_name = s_amz_s3session_token_header_name; } else { /* X-Amz-Security-Token */ token_header_name = s_amz_security_token_header_name; } struct aws_byte_cursor session_token_cursor = aws_credentials_get_session_token(state->config.credentials); if (session_token_cursor.len > 0) { struct aws_uri_param security_token_param = { .key = token_header_name, .value = session_token_cursor, }; /* If omit_session_token is true, then security token is added to the * final signing result, but is treated as "unsigned" and does not * contribute to the authorization signature */ if (state->config.flags.omit_session_token) { if (s_add_query_param_to_signing_result(state, &security_token_param)) { goto done; } } else { if (s_add_authorization_query_param(state, unencoded_query_params, &security_token_param)) { goto done; } } } /* X-Amz-Region-Set */ if (state->config.algorithm == AWS_SIGNING_ALGORITHM_V4_ASYMMETRIC) { struct aws_uri_param region_set_param = { .key = aws_byte_cursor_from_string(g_aws_signing_region_set_name), .value = state->config.region, }; if (s_add_authorization_query_param(state, unencoded_query_params, ®ion_set_param)) { goto done; } } /* NOTE: Update MAX_AUTHORIZATION_QUERY_PARAM_COUNT if more params added */ result = AWS_OP_SUCCESS; done: return result; } static int s_validate_query_params(struct aws_array_list *unencoded_query_params) { const size_t param_count = aws_array_list_length(unencoded_query_params); for (size_t i = 0; i < param_count; ++i) { struct aws_uri_param param; AWS_ZERO_STRUCT(param); aws_array_list_get_at(unencoded_query_params, ¶m, i); struct aws_hash_element *forbidden_element = NULL; aws_hash_table_find(&s_forbidden_params, ¶m.key, &forbidden_element); if (forbidden_element != NULL) { AWS_LOGF_ERROR( AWS_LS_AUTH_SIGNING, "AWS authorization query param \"" PRInSTR "\" found in request while signing", AWS_BYTE_CURSOR_PRI(param.key)); return aws_raise_error(AWS_AUTH_SIGNING_ILLEGAL_REQUEST_QUERY_PARAM); } } return AWS_OP_SUCCESS; } /** * Apply or remove URI-encoding to each aws_uri_param in a list. * (new strings are added to temp_strings) * Append function must grow buffer if necessary. */ static int s_transform_query_params( struct aws_signing_state_aws *state, struct aws_array_list *param_list, struct aws_array_list *temp_strings, int (*byte_buf_append_dynamic_param_fn)(struct aws_byte_buf *, const struct aws_byte_cursor *)) { const size_t param_count = aws_array_list_length(param_list); struct aws_uri_param *param = NULL; for (size_t i = 0; i < param_count; ++i) { aws_array_list_get_at_ptr(param_list, (void **)¶m, i); /* encode/decode key and save string */ state->scratch_buf.len = 0; if (byte_buf_append_dynamic_param_fn(&state->scratch_buf, ¶m->key)) { return AWS_OP_ERR; } struct aws_string *key_str = aws_string_new_from_buf(state->allocator, &state->scratch_buf); if (!key_str) { return AWS_OP_ERR; } if (aws_array_list_push_back(temp_strings, &key_str)) { aws_string_destroy(key_str); return AWS_OP_ERR; } /* encode/decode value and save string */ state->scratch_buf.len = 0; if (byte_buf_append_dynamic_param_fn(&state->scratch_buf, ¶m->value)) { return AWS_OP_ERR; } struct aws_string *value_str = aws_string_new_from_buf(state->allocator, &state->scratch_buf); if (!value_str) { return AWS_OP_ERR; } if (aws_array_list_push_back(temp_strings, &value_str)) { aws_string_destroy(value_str); return AWS_OP_ERR; } /* save encoded/decoded param */ param->key = aws_byte_cursor_from_string(key_str); param->value = aws_byte_cursor_from_string(value_str); } return AWS_OP_SUCCESS; } /* * Adds the full canonical query string to the canonical request. * Note that aws-c-auth takes query params from the URI, so they should already be URI-encoded. * To ensure that the signature uses "canonical" URI-encoding, we decode and then re-encode the params. */ static int s_append_canonical_query_string(struct aws_uri *uri, struct aws_signing_state_aws *state) { struct aws_allocator *allocator = state->allocator; struct aws_byte_buf *canonical_request_buffer = &state->canonical_request; int result = AWS_OP_ERR; struct aws_array_list query_params; AWS_ZERO_STRUCT(query_params); struct aws_array_list temp_strings; AWS_ZERO_STRUCT(temp_strings); /* Determine max number of query parameters. * If none, skip to end of function */ size_t max_param_count = 0; struct aws_uri_param param_i; AWS_ZERO_STRUCT(param_i); while (aws_uri_query_string_next_param(uri, ¶m_i)) { ++max_param_count; } if (state->config.signature_type == AWS_ST_HTTP_REQUEST_QUERY_PARAMS) { max_param_count += MAX_AUTHORIZATION_QUERY_PARAM_COUNT; } if (max_param_count == 0) { goto finish; } /* Allocate storage for mutable list of query params */ if (aws_array_list_init_dynamic(&query_params, allocator, max_param_count, sizeof(struct aws_uri_param))) { goto cleanup; } /* Allocate storage for both the decoded, and re-encoded, key and value strings */ if (aws_array_list_init_dynamic( &temp_strings, state->allocator, max_param_count * 4, sizeof(struct aws_string *))) { goto cleanup; } /* Get existing query params */ if (aws_uri_query_string_params(uri, &query_params)) { goto cleanup; } /* Remove URI-encoding */ if (s_transform_query_params(state, &query_params, &temp_strings, aws_byte_buf_append_decoding_uri)) { goto cleanup; } /* Validate existing query params */ if (s_validate_query_params(&query_params)) { goto cleanup; } /* Add authorization query params */ if (s_add_authorization_query_params(state, &query_params)) { goto cleanup; } /* Apply canonical URI-encoding to the query params */ if (s_transform_query_params(state, &query_params, &temp_strings, aws_byte_buf_append_encoding_uri_param)) { goto cleanup; } const size_t param_count = aws_array_list_length(&query_params); /* Sort the encoded params and append to canonical request */ qsort(query_params.data, param_count, sizeof(struct aws_uri_param), s_canonical_query_param_comparator); for (size_t i = 0; i < param_count; ++i) { struct aws_uri_param param; AWS_ZERO_STRUCT(param); if (aws_array_list_get_at(&query_params, ¶m, i)) { goto cleanup; } if (s_append_canonical_query_param(¶m, canonical_request_buffer)) { goto cleanup; } if (i + 1 < param_count) { if (aws_byte_buf_append_byte_dynamic(canonical_request_buffer, '&')) { goto cleanup; } } } finish: if (aws_byte_buf_append_byte_dynamic(canonical_request_buffer, '\n')) { goto cleanup; } result = AWS_OP_SUCCESS; cleanup: aws_array_list_clean_up(&query_params); if (aws_array_list_is_valid(&temp_strings)) { const size_t string_count = aws_array_list_length(&temp_strings); for (size_t i = 0; i < string_count; ++i) { struct aws_string *string = NULL; aws_array_list_get_at(&temp_strings, &string, i); aws_string_destroy(string); } aws_array_list_clean_up(&temp_strings); } return result; } /* * It is unclear from the spec (and not resolved by the tests) whether other forms of whitespace (\t \v) should be * included in the trimming done to headers */ static bool s_is_space(uint8_t value) { return aws_isspace(value); } /* * Appends a single header key-value pair to the canonical request. Multi-line and repeat headers make this more * complicated than you'd expect. * * We call this function on a sorted collection, so header repeats are guaranteed to be consecutive. * * In particular, there are two cases: * (1) This is a header whose name hasn't been seen before, in which case we start a new line and append both name and * value. (2) This is a header we've previously seen, just append the value. * * The fact that we can't '\n' until we've moved to a new header name also complicates the logic. * * This function appends to a state buffer rather than the canonical request. This allows us to calculate the signed * headers (so that it can go into the query param if needed) before the query params are put into the canonical * request. */ static int s_append_canonical_header( struct aws_signing_state_aws *state, struct aws_signable_property_list_pair *header, const struct aws_byte_cursor *last_seen_header_name) { struct aws_byte_buf *canonical_header_buffer = &state->canonical_header_block; struct aws_byte_buf *signed_headers_buffer = &state->signed_headers; const uint8_t *to_lower_table = aws_lookup_table_to_lower_get(); /* * Write to the signed_headers shared state for later use, copy * to canonical header buffer as well */ if (last_seen_header_name == NULL || aws_byte_cursor_compare_lookup(last_seen_header_name, &header->name, aws_lookup_table_to_lower_get()) != 0) { /* * The headers arrive in sorted order, so we know we've never seen this header before */ if (last_seen_header_name) { /* * there's a previous header, add appropriate separator in both canonical header buffer * and signed headers buffer */ if (aws_byte_buf_append_byte_dynamic(canonical_header_buffer, '\n')) { return AWS_OP_ERR; } if (aws_byte_buf_append_byte_dynamic(signed_headers_buffer, ';')) { return AWS_OP_ERR; } } /* add it to the signed headers buffer */ if (s_append_with_lookup(signed_headers_buffer, &header->name, to_lower_table)) { return AWS_OP_ERR; } /* add it to the canonical header buffer */ if (s_append_with_lookup(canonical_header_buffer, &header->name, to_lower_table)) { return AWS_OP_ERR; } if (aws_byte_buf_append_byte_dynamic(canonical_header_buffer, ':')) { return AWS_OP_ERR; } } else { /* we've seen this header before, add a comma before appending the value */ if (aws_byte_buf_append_byte_dynamic(canonical_header_buffer, ',')) { return AWS_OP_ERR; } } /* * This is the unsafe, non-append write of the header value where consecutive whitespace * is squashed into a single space. Since this can only shrink the value length and we've * already reserved enough to hold the value, we can do raw buffer writes safely without * worrying about capacity. */ struct aws_byte_cursor trimmed_value = aws_byte_cursor_trim_pred(&header->value, s_is_space); /* raw, unsafe write loop */ bool in_space = false; uint8_t *start_ptr = trimmed_value.ptr; uint8_t *end_ptr = trimmed_value.ptr + trimmed_value.len; uint8_t *dest_ptr = canonical_header_buffer->buffer + canonical_header_buffer->len; while (start_ptr < end_ptr) { uint8_t value = *start_ptr; bool is_space = s_is_space(value); if (is_space) { value = ' '; } if (!is_space || !in_space) { *dest_ptr++ = value; ++canonical_header_buffer->len; } in_space = is_space; ++start_ptr; } return AWS_OP_SUCCESS; } /* Add header to stable_header_list to be canonicalized, and also to final signing result */ static int s_add_authorization_header( struct aws_signing_state_aws *state, struct aws_array_list *stable_header_list, size_t *out_required_capacity, struct aws_byte_cursor name, struct aws_byte_cursor value) { /* Add to stable_header_list to be canonicalized */ struct stable_header stable_header = { .original_index = aws_array_list_length(stable_header_list), .header = { .name = name, .value = value, }, }; if (aws_array_list_push_back(stable_header_list, &stable_header)) { return AWS_OP_ERR; } /* Add to signing result */ if (aws_signing_result_append_property_list(&state->result, g_aws_http_headers_property_list_name, &name, &value)) { return AWS_OP_ERR; } *out_required_capacity += name.len + value.len; return AWS_OP_SUCCESS; } /* * Builds the list of header name-value pairs to be added to the canonical request. The list members are * actually the header wrapper structs that allow for stable sorting. * * Takes the original request headers, adds X-Amz-Date, and optionally, x-amz-content-sha256 * * If we add filtering/exclusion support, this is where it would go */ static int s_build_canonical_stable_header_list( struct aws_signing_state_aws *state, struct aws_array_list *stable_header_list, size_t *out_required_capacity) { AWS_ASSERT(aws_array_list_length(stable_header_list) == 0); *out_required_capacity = 0; const struct aws_signable *signable = state->signable; /* * request headers */ struct aws_array_list *signable_header_list = NULL; if (aws_signable_get_property_list(signable, g_aws_http_headers_property_list_name, &signable_header_list)) { return AWS_OP_ERR; } const size_t signable_header_count = aws_array_list_length(signable_header_list); for (size_t i = 0; i < signable_header_count; ++i) { struct stable_header header_wrapper; AWS_ZERO_STRUCT(header_wrapper); header_wrapper.original_index = i; if (aws_array_list_get_at(signable_header_list, &header_wrapper.header, i)) { return AWS_OP_ERR; } struct aws_byte_cursor *header_name_cursor = &header_wrapper.header.name; if (!s_should_sign_header(state, header_name_cursor)) { continue; } *out_required_capacity += header_wrapper.header.name.len + header_wrapper.header.value.len; if (aws_array_list_push_back(stable_header_list, &header_wrapper)) { return AWS_OP_ERR; } } /* If doing HEADERS signature type, add required X-Amz-*** headers. * NOTE: For QUERY_PARAMS signature type, X-Amz-*** params are added to query string instead. */ if (state->config.signature_type == AWS_ST_HTTP_REQUEST_HEADERS) { /* * X-Amz-*-Token */ /* We have different token between S3Express and other signing, which needs different token header name */ struct aws_byte_cursor token_header_name; if (state->config.algorithm == AWS_SIGNING_ALGORITHM_V4_S3EXPRESS) { /* X-Amz-S3session-Token */ token_header_name = s_amz_s3session_token_header_name; } else { /* X-Amz-Security-Token */ token_header_name = s_amz_security_token_header_name; } struct aws_byte_cursor session_token_cursor = aws_credentials_get_session_token(state->config.credentials); if (session_token_cursor.len > 0) { /* Note that if omit_session_token is true, it is added to final * signing result but NOT included in canonicalized headers. */ if (state->config.flags.omit_session_token) { if (aws_signing_result_append_property_list( &state->result, g_aws_http_headers_property_list_name, &token_header_name, &session_token_cursor)) { return AWS_OP_ERR; } } else { if (s_add_authorization_header( state, stable_header_list, out_required_capacity, token_header_name, session_token_cursor)) { return AWS_OP_ERR; } } } /* * X-Amz-Date */ if (s_add_authorization_header( state, stable_header_list, out_required_capacity, s_amz_date_header_name, aws_byte_cursor_from_buf(&state->date))) { return AWS_OP_ERR; } *out_required_capacity += g_aws_signing_date_name->len + state->date.len; /* * x-amz-region-set */ if (state->config.algorithm == AWS_SIGNING_ALGORITHM_V4_ASYMMETRIC) { if (s_add_authorization_header( state, stable_header_list, out_required_capacity, aws_byte_cursor_from_string(g_aws_signing_region_set_name), state->config.region)) { return AWS_OP_ERR; } } /* * x-amz-content-sha256 (optional) */ if (state->config.signed_body_header == AWS_SBHT_X_AMZ_CONTENT_SHA256) { if (s_add_authorization_header( state, stable_header_list, out_required_capacity, s_amz_content_sha256_header_name, aws_byte_cursor_from_buf(&state->payload_hash))) { return AWS_OP_ERR; } } /* NOTE: Update MAX_AUTHORIZATION_HEADER_COUNT if more headers added */ } *out_required_capacity += aws_array_list_length(stable_header_list) * 2; /* ':' + '\n' per header */ return AWS_OP_SUCCESS; } static int s_validate_signable_header_list(struct aws_array_list *header_list) { const size_t header_count = aws_array_list_length(header_list); for (size_t i = 0; i < header_count; ++i) { struct aws_signable_property_list_pair header; AWS_ZERO_STRUCT(header); aws_array_list_get_at(header_list, &header, i); struct aws_hash_element *forbidden_element = NULL; aws_hash_table_find(&s_forbidden_headers, &header.name, &forbidden_element); if (forbidden_element != NULL) { AWS_LOGF_ERROR( AWS_LS_AUTH_SIGNING, "AWS authorization header \"" PRInSTR "\" found in request while signing", AWS_BYTE_CURSOR_PRI(header.name)); return aws_raise_error(AWS_AUTH_SIGNING_ILLEGAL_REQUEST_HEADER); } } return AWS_OP_SUCCESS; } static int s_canonicalize_headers(struct aws_signing_state_aws *state) { const struct aws_signable *signable = state->signable; struct aws_allocator *allocator = state->allocator; struct aws_byte_buf *header_buffer = &state->canonical_header_block; AWS_ASSERT(header_buffer->len == 0); int result = AWS_OP_ERR; struct aws_array_list *signable_header_list = NULL; if (aws_signable_get_property_list(signable, g_aws_http_headers_property_list_name, &signable_header_list)) { return AWS_OP_ERR; } if (s_validate_signable_header_list(signable_header_list)) { return AWS_OP_ERR; } const size_t signable_header_count = aws_array_list_length(signable_header_list); /* Overestimate capacity to avoid re-allocation */ size_t headers_reserve_count = signable_header_count + MAX_AUTHORIZATION_HEADER_COUNT; struct aws_array_list headers; if (aws_array_list_init_dynamic(&headers, allocator, headers_reserve_count, sizeof(struct stable_header))) { return AWS_OP_ERR; } size_t header_buffer_reserve_size = 0; if (s_build_canonical_stable_header_list(state, &headers, &header_buffer_reserve_size)) { goto on_cleanup; } /* * Make sure there's enough room in the request buffer to hold a conservative overestimate of the room * needed for canonical headers. There are places we'll be using an append function that does not resize. */ if (aws_byte_buf_reserve(header_buffer, header_buffer_reserve_size)) { return AWS_OP_ERR; } const size_t header_count = aws_array_list_length(&headers); /* Sort the arraylist via lowercase header name and original position */ qsort(headers.data, header_count, sizeof(struct stable_header), s_canonical_header_comparator); /* Iterate the sorted list, writing the canonical representation into the request */ struct aws_byte_cursor *last_seen_header_name = NULL; for (size_t i = 0; i < header_count; ++i) { struct stable_header *wrapper = NULL; if (aws_array_list_get_at_ptr(&headers, (void **)&wrapper, i)) { goto on_cleanup; } if (s_append_canonical_header(state, &wrapper->header, last_seen_header_name)) { goto on_cleanup; } last_seen_header_name = &wrapper->header.name; } /* check for count greater than zero in case someone attempts to canonicalize an empty list of trailing headers */ /* There's always at least one header entry (X-Amz-Date), end the last one */ if (header_count > 0) { if (aws_byte_buf_append_byte_dynamic(header_buffer, '\n')) { return AWS_OP_ERR; } } result = AWS_OP_SUCCESS; on_cleanup: aws_array_list_clean_up(&headers); return result; } static int s_append_signed_headers(struct aws_signing_state_aws *state) { struct aws_byte_buf *header_buffer = &state->canonical_header_block; struct aws_byte_buf *signed_headers_buffer = &state->signed_headers; if (aws_byte_buf_append_byte_dynamic(header_buffer, '\n')) { return AWS_OP_ERR; } struct aws_byte_cursor signed_headers_cursor = aws_byte_cursor_from_buf(signed_headers_buffer); if (aws_byte_buf_append_dynamic(header_buffer, &signed_headers_cursor)) { return AWS_OP_ERR; } if (aws_byte_buf_append_byte_dynamic(header_buffer, '\n')) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } /* * Top-level-ish function to write the canonical header set into a buffer as well as the signed header names * into a separate buffer. We do this very early in the canonical request construction process so that the * query params processing has the signed header names available to it. */ static int s_build_canonical_headers(struct aws_signing_state_aws *state) { if (s_canonicalize_headers(state)) { return AWS_OP_ERR; } if (s_append_signed_headers(state)) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } /* * Computes the canonical request payload value. */ static int s_build_canonical_payload(struct aws_signing_state_aws *state) { const struct aws_signable *signable = state->signable; struct aws_allocator *allocator = state->allocator; struct aws_byte_buf *payload_hash_buffer = &state->payload_hash; AWS_ASSERT(payload_hash_buffer->len == 0); struct aws_byte_buf body_buffer; AWS_ZERO_STRUCT(body_buffer); struct aws_byte_buf digest_buffer; AWS_ZERO_STRUCT(digest_buffer); struct aws_hash *hash = NULL; int result = AWS_OP_ERR; if (state->config.signed_body_value.len == 0) { /* No value provided by user, so we must calculate it */ hash = aws_sha256_new(allocator); if (hash == NULL) { return AWS_OP_ERR; } if (aws_byte_buf_init(&body_buffer, allocator, BODY_READ_BUFFER_SIZE) || aws_byte_buf_init(&digest_buffer, allocator, AWS_SHA256_LEN)) { goto on_cleanup; } struct aws_input_stream *payload_stream = NULL; if (aws_signable_get_payload_stream(signable, &payload_stream)) { goto on_cleanup; } if (payload_stream != NULL) { if (aws_input_stream_seek(payload_stream, 0, AWS_SSB_BEGIN)) { goto on_cleanup; } struct aws_stream_status payload_status; AWS_ZERO_STRUCT(payload_status); while (!payload_status.is_end_of_stream) { /* reset the temporary body buffer; we can calculate the hash in window chunks */ body_buffer.len = 0; if (aws_input_stream_read(payload_stream, &body_buffer)) { goto on_cleanup; } if (body_buffer.len > 0) { struct aws_byte_cursor body_cursor = aws_byte_cursor_from_buf(&body_buffer); aws_hash_update(hash, &body_cursor); } if (aws_input_stream_get_status(payload_stream, &payload_status)) { goto on_cleanup; } } /* reset the input stream for sending */ if (aws_input_stream_seek(payload_stream, 0, AWS_SSB_BEGIN)) { goto on_cleanup; } } if (aws_hash_finalize(hash, &digest_buffer, 0)) { goto on_cleanup; } struct aws_byte_cursor digest_cursor = aws_byte_cursor_from_buf(&digest_buffer); if (aws_hex_encode_append_dynamic(&digest_cursor, payload_hash_buffer)) { goto on_cleanup; } } else { /* Use value provided in config */ if (aws_byte_buf_append_dynamic(payload_hash_buffer, &state->config.signed_body_value)) { goto on_cleanup; } } result = AWS_OP_SUCCESS; on_cleanup: aws_byte_buf_clean_up(&digest_buffer); aws_byte_buf_clean_up(&body_buffer); if (hash) { aws_hash_destroy(hash); } return result; } /* * Copies the previously-computed payload hash into the canonical request buffer */ static int s_append_canonical_payload_hash(struct aws_signing_state_aws *state) { struct aws_byte_buf *canonical_request_buffer = &state->canonical_request; struct aws_byte_buf *payload_hash_buffer = &state->payload_hash; /* * Copy the hex-encoded payload hash into the canonical request */ struct aws_byte_cursor payload_hash_cursor = aws_byte_cursor_from_buf(payload_hash_buffer); if (aws_byte_buf_append_dynamic(canonical_request_buffer, &payload_hash_cursor)) { return AWS_OP_ERR; } /* Sigv4 spec claims a newline should be included after the payload, but the implementation doesn't do this */ return AWS_OP_SUCCESS; } AWS_STATIC_STRING_FROM_LITERAL(s_credential_scope_sigv4_terminator, "aws4_request"); static int s_append_credential_scope_terminator(enum aws_signing_algorithm algorithm, struct aws_byte_buf *dest) { struct aws_byte_cursor terminator_cursor; switch (algorithm) { case AWS_SIGNING_ALGORITHM_V4: case AWS_SIGNING_ALGORITHM_V4_S3EXPRESS: case AWS_SIGNING_ALGORITHM_V4_ASYMMETRIC: terminator_cursor = aws_byte_cursor_from_string(s_credential_scope_sigv4_terminator); break; default: return aws_raise_error(AWS_AUTH_SIGNING_UNSUPPORTED_ALGORITHM); } return aws_byte_buf_append_dynamic(dest, &terminator_cursor); } /* * Builds the credential scope string by appending a bunch of things together: * Date, region, service, algorithm terminator */ static int s_build_credential_scope(struct aws_signing_state_aws *state) { AWS_ASSERT(state->credential_scope.len == 0); const struct aws_signing_config_aws *config = &state->config; struct aws_byte_buf *dest = &state->credential_scope; /* * date output uses the non-dynamic append, so make sure there's enough room first */ if (aws_byte_buf_reserve_relative(dest, AWS_DATE_TIME_STR_MAX_LEN)) { return AWS_OP_ERR; } if (aws_date_time_to_utc_time_short_str(&config->date, AWS_DATE_FORMAT_ISO_8601_BASIC, dest)) { return AWS_OP_ERR; } if (aws_byte_buf_append_byte_dynamic(dest, '/')) { return AWS_OP_ERR; } if (config->algorithm != AWS_SIGNING_ALGORITHM_V4_ASYMMETRIC) { if (aws_byte_buf_append_dynamic(dest, &config->region)) { return AWS_OP_ERR; } if (aws_byte_buf_append_byte_dynamic(dest, '/')) { return AWS_OP_ERR; } } if (aws_byte_buf_append_dynamic(dest, &config->service)) { return AWS_OP_ERR; } if (aws_byte_buf_append_byte_dynamic(dest, '/')) { return AWS_OP_ERR; } if (s_append_credential_scope_terminator(state->config.algorithm, dest)) { return AWS_OP_ERR; } /* While we're at it, build the accesskey/credential scope string which is used during query param signing*/ struct aws_byte_cursor access_key_cursor = aws_credentials_get_access_key_id(state->config.credentials); if (aws_byte_buf_append_dynamic(&state->access_credential_scope, &access_key_cursor)) { return AWS_OP_ERR; } if (aws_byte_buf_append_byte_dynamic(&state->access_credential_scope, '/')) { return AWS_OP_ERR; } struct aws_byte_cursor credential_scope_cursor = aws_byte_cursor_from_buf(&state->credential_scope); if (aws_byte_buf_append_dynamic(&state->access_credential_scope, &credential_scope_cursor)) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } /* * Hashes the canonical request and stores its hex representation */ static int s_build_canonical_request_hash(struct aws_signing_state_aws *state) { struct aws_allocator *allocator = state->allocator; struct aws_byte_buf *dest = &state->string_to_sign_payload; int result = AWS_OP_ERR; struct aws_byte_buf digest_buffer; AWS_ZERO_STRUCT(digest_buffer); if (aws_byte_buf_init(&digest_buffer, allocator, AWS_SHA256_LEN)) { goto cleanup; } struct aws_byte_cursor canonical_request_cursor = aws_byte_cursor_from_buf(&state->canonical_request); if (aws_sha256_compute(allocator, &canonical_request_cursor, &digest_buffer, 0)) { goto cleanup; } struct aws_byte_cursor digest_cursor = aws_byte_cursor_from_buf(&digest_buffer); if (aws_hex_encode_append_dynamic(&digest_cursor, dest)) { goto cleanup; } result = AWS_OP_SUCCESS; cleanup: aws_byte_buf_clean_up(&digest_buffer); return result; } /** * Note that there is no canonical request for event signing. * The string to sign for events is detailed here: * https://docs.aws.amazon.com/transcribe/latest/dg/streaming-http2.html * * String stringToSign = * "AWS4-HMAC-SHA256" + * "\n" + * DateTime + * "\n" + * Keypath + * "\n" + * Hex(priorSignature) + * "\n" + * HexHash(nonSignatureHeaders) + * "\n" + * HexHash(payload); * * This function will build the string_to_sign_payload, * aka "everything after the Keypath line in the string to sign". */ static int s_build_string_to_sign_payload_for_event(struct aws_signing_state_aws *state) { int result = AWS_OP_ERR; struct aws_byte_buf *dest = &state->string_to_sign_payload; /* * Hex(priorSignature) + "\n" * * Fortunately, the prior signature is already hex. */ struct aws_byte_cursor prev_signature_cursor; AWS_ZERO_STRUCT(prev_signature_cursor); if (aws_signable_get_property(state->signable, g_aws_previous_signature_property_name, &prev_signature_cursor)) { AWS_LOGF_ERROR( AWS_LS_AUTH_SIGNING, "(id=%p) Event signable missing previous signature property", (void *)state->signable); return aws_raise_error(AWS_AUTH_SIGNING_MISSING_PREVIOUS_SIGNATURE); } /* strip any padding (AWS_SIGV4A_SIGNATURE_PADDING_BYTE) from the previous signature */ prev_signature_cursor = aws_trim_padded_sigv4a_signature(prev_signature_cursor); if (aws_byte_buf_append_dynamic(dest, &prev_signature_cursor)) { return AWS_OP_ERR; } if (aws_byte_buf_append_byte_dynamic(dest, '\n')) { return AWS_OP_ERR; } /* * HexHash(nonSignatureHeaders) + "\n" * * nonSignatureHeaders is just the ":date" header. * We need to encode these headers in event-stream format, as described here: * https://docs.aws.amazon.com/transcribe/latest/dg/streaming-setting-up.html * * | Header Name Length | Header Name | Header Value Type | Header Value Length | Header Value | * | 1 byte | N bytes | 1 byte | 2 bytes | N bytes | */ struct aws_byte_buf date_buffer; AWS_ZERO_STRUCT(date_buffer); struct aws_byte_buf digest_buffer; AWS_ZERO_STRUCT(digest_buffer); if (aws_byte_buf_init(&date_buffer, state->allocator, 15)) { goto cleanup; } struct aws_byte_cursor header_name = aws_byte_cursor_from_c_str(":date"); AWS_FATAL_ASSERT(aws_byte_buf_write_u8(&date_buffer, (uint8_t)header_name.len)); if (aws_byte_buf_append_dynamic(&date_buffer, &header_name)) { goto cleanup; } /* Type of timestamp header */ AWS_FATAL_ASSERT(aws_byte_buf_write_u8(&date_buffer, 8 /*AWS_EVENT_STREAM_HEADER_TIMESTAMP*/)); AWS_FATAL_ASSERT(aws_byte_buf_write_be64(&date_buffer, (int64_t)aws_date_time_as_millis(&state->config.date))); /* calculate sha 256 of encoded buffer */ if (aws_byte_buf_init(&digest_buffer, state->allocator, AWS_SHA256_LEN)) { goto cleanup; } struct aws_byte_cursor date_cursor = aws_byte_cursor_from_buf(&date_buffer); if (aws_sha256_compute(state->allocator, &date_cursor, &digest_buffer, 0)) { goto cleanup; } struct aws_byte_cursor digest_cursor = aws_byte_cursor_from_buf(&digest_buffer); if (aws_hex_encode_append_dynamic(&digest_cursor, dest)) { goto cleanup; } if (aws_byte_buf_append_byte_dynamic(dest, '\n')) { goto cleanup; } /* * HexHash(payload); * * The payload was already hashed in an earlier stage */ struct aws_byte_cursor current_chunk_hash_cursor = aws_byte_cursor_from_buf(&state->payload_hash); if (aws_byte_buf_append_dynamic(dest, ¤t_chunk_hash_cursor)) { goto cleanup; } result = AWS_OP_SUCCESS; cleanup: aws_byte_buf_clean_up(&date_buffer); aws_byte_buf_clean_up(&digest_buffer); return result; } static int s_build_canonical_request_body_chunk(struct aws_signing_state_aws *state) { struct aws_byte_buf *dest = &state->string_to_sign_payload; /* previous signature + \n */ struct aws_byte_cursor prev_signature_cursor; AWS_ZERO_STRUCT(prev_signature_cursor); if (aws_signable_get_property(state->signable, g_aws_previous_signature_property_name, &prev_signature_cursor)) { AWS_LOGF_ERROR( AWS_LS_AUTH_SIGNING, "(id=%p) Chunk signable missing previous signature property", (void *)state->signable); return aws_raise_error(AWS_AUTH_SIGNING_MISSING_PREVIOUS_SIGNATURE); } /* strip any padding (AWS_SIGV4A_SIGNATURE_PADDING_BYTE) from the previous signature */ prev_signature_cursor = aws_trim_padded_sigv4a_signature(prev_signature_cursor); if (aws_byte_buf_append_dynamic(dest, &prev_signature_cursor)) { return AWS_OP_ERR; } if (aws_byte_buf_append_byte_dynamic(dest, '\n')) { return AWS_OP_ERR; } /* empty hash + \n */ if (aws_byte_buf_append_dynamic(dest, &g_aws_signed_body_value_empty_sha256)) { return AWS_OP_ERR; } if (aws_byte_buf_append_byte_dynamic(dest, '\n')) { return AWS_OP_ERR; } /* current hash */ struct aws_byte_cursor current_chunk_hash_cursor = aws_byte_cursor_from_buf(&state->payload_hash); if (aws_byte_buf_append_dynamic(dest, ¤t_chunk_hash_cursor)) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } static int s_build_canonical_request_trailing_headers(struct aws_signing_state_aws *state) { struct aws_byte_buf *dest = &state->string_to_sign_payload; /* previous signature + \n */ struct aws_byte_cursor prev_signature_cursor; AWS_ZERO_STRUCT(prev_signature_cursor); if (aws_signable_get_property(state->signable, g_aws_previous_signature_property_name, &prev_signature_cursor)) { AWS_LOGF_ERROR( AWS_LS_AUTH_SIGNING, "(id=%p) trailing_headers signable missing previous signature property", (void *)state->signable); return aws_raise_error(AWS_AUTH_SIGNING_MISSING_PREVIOUS_SIGNATURE); } /* strip any padding (AWS_SIGV4A_SIGNATURE_PADDING_BYTE) from the previous signature */ prev_signature_cursor = aws_trim_padded_sigv4a_signature(prev_signature_cursor); if (aws_byte_buf_append_dynamic(dest, &prev_signature_cursor)) { return AWS_OP_ERR; } if (aws_byte_buf_append_byte_dynamic(dest, '\n')) { return AWS_OP_ERR; } /* current hash */ if (s_canonicalize_headers(state)) { return AWS_OP_ERR; } struct aws_byte_cursor header_block_cursor = aws_byte_cursor_from_buf(&state->canonical_header_block); if (aws_byte_buf_append_dynamic(&state->canonical_request, &header_block_cursor)) { return AWS_OP_ERR; } if (s_build_canonical_request_hash(state)) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } /* * Builds a sigv4-signed canonical request and its hashed value */ static int s_build_canonical_request_sigv4(struct aws_signing_state_aws *state) { AWS_ASSERT(state->canonical_request.len == 0); AWS_ASSERT(state->payload_hash.len > 0); int result = AWS_OP_ERR; struct aws_uri uri; AWS_ZERO_STRUCT(uri); struct aws_byte_cursor uri_cursor; if (aws_signable_get_property(state->signable, g_aws_http_uri_property_name, &uri_cursor)) { return AWS_OP_ERR; } if (aws_uri_init_parse(&uri, state->allocator, &uri_cursor)) { goto cleanup; } if (s_build_canonical_headers(state)) { goto cleanup; } if (s_append_canonical_method(state)) { goto cleanup; } if (s_append_canonical_path(&uri, state)) { goto cleanup; } if (s_append_canonical_query_string(&uri, state)) { goto cleanup; } struct aws_byte_cursor header_block_cursor = aws_byte_cursor_from_buf(&state->canonical_header_block); if (aws_byte_buf_append_dynamic(&state->canonical_request, &header_block_cursor)) { goto cleanup; } if (s_append_canonical_payload_hash(state)) { goto cleanup; } if (s_build_canonical_request_hash(state)) { goto cleanup; } result = AWS_OP_SUCCESS; cleanup: aws_uri_clean_up(&uri); return result; } /* * The canonical header list is the next-to-the-last line on the canonical request, so split by lines and take * the penultimate value. */ static struct aws_byte_cursor s_get_signed_headers_from_canonical_request( struct aws_allocator *allocator, struct aws_byte_cursor canonical_request) { struct aws_byte_cursor header_cursor; AWS_ZERO_STRUCT(header_cursor); struct aws_array_list splits; AWS_ZERO_STRUCT(splits); if (aws_array_list_init_dynamic( &splits, allocator, CANONICAL_REQUEST_SPLIT_OVER_ESTIMATE, sizeof(struct aws_byte_cursor))) { return header_cursor; } if (aws_byte_cursor_split_on_char(&canonical_request, '\n', &splits)) { goto done; } size_t split_count = aws_array_list_length(&splits); if (split_count > 1) { aws_array_list_get_at(&splits, &header_cursor, split_count - 2); } done: aws_array_list_clean_up(&splits); return header_cursor; } /* * Fill in the signing state values needed by later stages that computing the canonical request would have done. */ static int s_apply_existing_canonical_request(struct aws_signing_state_aws *state) { struct aws_byte_cursor canonical_request_cursor; AWS_ZERO_STRUCT(canonical_request_cursor); if (aws_signable_get_property(state->signable, g_aws_canonical_request_property_name, &canonical_request_cursor)) { return AWS_OP_ERR; } if (aws_byte_buf_append_dynamic(&state->canonical_request, &canonical_request_cursor)) { return AWS_OP_ERR; } struct aws_byte_cursor signed_headers_cursor = s_get_signed_headers_from_canonical_request(state->allocator, canonical_request_cursor); if (aws_byte_buf_append_dynamic(&state->signed_headers, &signed_headers_cursor)) { return AWS_OP_ERR; } if (s_build_canonical_request_hash(state)) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } /* * Top-level canonical request construction function. * For signature types not associated directly with an http request (chunks, events), this calculates the * string-to-sign payload that replaces the hashed canonical request in those signing procedures. */ int aws_signing_build_canonical_request(struct aws_signing_state_aws *state) { if (aws_date_time_to_utc_time_str(&state->config.date, AWS_DATE_FORMAT_ISO_8601_BASIC, &state->date)) { return AWS_OP_ERR; } if (s_build_canonical_payload(state)) { return AWS_OP_ERR; } if (s_build_credential_scope(state)) { return AWS_OP_ERR; } switch (state->config.signature_type) { case AWS_ST_HTTP_REQUEST_HEADERS: case AWS_ST_HTTP_REQUEST_QUERY_PARAMS: return s_build_canonical_request_sigv4(state); case AWS_ST_HTTP_REQUEST_CHUNK: return s_build_canonical_request_body_chunk(state); case AWS_ST_HTTP_REQUEST_EVENT: return s_build_string_to_sign_payload_for_event(state); case AWS_ST_HTTP_REQUEST_TRAILING_HEADERS: return s_build_canonical_request_trailing_headers(state); case AWS_ST_CANONICAL_REQUEST_HEADERS: case AWS_ST_CANONICAL_REQUEST_QUERY_PARAMS: return s_apply_existing_canonical_request(state); default: return aws_raise_error(AWS_AUTH_SIGNING_UNSUPPORTED_SIGNATURE_TYPE); } } /* * Top-level function for computing the string-to-sign in an AWS signing process. */ int aws_signing_build_string_to_sign(struct aws_signing_state_aws *state) { /* We must have a canonical request and the credential scope. We must not have the string to sign */ AWS_ASSERT(state->string_to_sign_payload.len > 0); AWS_ASSERT(state->credential_scope.len > 0); AWS_ASSERT(state->string_to_sign.len == 0); struct aws_byte_buf *dest = &state->string_to_sign; if (s_append_sts_signature_type(state, dest)) { return AWS_OP_ERR; } if (aws_byte_buf_append_byte_dynamic(dest, '\n')) { return AWS_OP_ERR; } /* date_time output uses raw array writes, so ensure there's enough room beforehand */ if (aws_byte_buf_reserve_relative(dest, AWS_DATE_TIME_STR_MAX_LEN)) { return AWS_OP_ERR; } struct aws_byte_cursor date_cursor = aws_byte_cursor_from_buf(&state->date); if (aws_byte_buf_append_dynamic(dest, &date_cursor)) { return AWS_OP_ERR; } if (aws_byte_buf_append_byte_dynamic(dest, '\n')) { return AWS_OP_ERR; } struct aws_byte_cursor credential_scope_cursor = aws_byte_cursor_from_buf(&state->credential_scope); if (aws_byte_buf_append_dynamic(dest, &credential_scope_cursor)) { return AWS_OP_ERR; } if (aws_byte_buf_append_byte_dynamic(dest, '\n')) { return AWS_OP_ERR; } struct aws_byte_cursor sts_payload_cursor = aws_byte_cursor_from_buf(&state->string_to_sign_payload); if (aws_byte_buf_append_dynamic(dest, &sts_payload_cursor)) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } /* * Signature calculation utility functions */ AWS_STATIC_STRING_FROM_LITERAL(s_secret_key_prefix, "AWS4"); /* * Computes the key to sign with as a function of the secret access key in the credentials and * the components of the credential scope: date, region, service, algorithm terminator */ static int s_compute_sigv4_signing_key(struct aws_signing_state_aws *state, struct aws_byte_buf *dest) { /* dest should be empty */ AWS_ASSERT(dest->len == 0); const struct aws_signing_config_aws *config = &state->config; struct aws_allocator *allocator = state->allocator; int result = AWS_OP_ERR; struct aws_byte_buf secret_key; AWS_ZERO_STRUCT(secret_key); struct aws_byte_buf output; AWS_ZERO_STRUCT(output); struct aws_byte_buf date_buf; AWS_ZERO_STRUCT(date_buf); struct aws_byte_cursor secret_access_key_cursor = aws_credentials_get_secret_access_key(state->config.credentials); if (aws_byte_buf_init(&secret_key, allocator, s_secret_key_prefix->len + secret_access_key_cursor.len) || aws_byte_buf_init(&output, allocator, AWS_SHA256_LEN) || aws_byte_buf_init(&date_buf, allocator, AWS_DATE_TIME_STR_MAX_LEN)) { goto cleanup; } /* * Prep Key */ struct aws_byte_cursor prefix_cursor = aws_byte_cursor_from_string(s_secret_key_prefix); if (aws_byte_buf_append_dynamic(&secret_key, &prefix_cursor) || aws_byte_buf_append_dynamic(&secret_key, &secret_access_key_cursor)) { goto cleanup; } /* * Prep date */ if (aws_date_time_to_utc_time_short_str(&config->date, AWS_DATE_FORMAT_ISO_8601_BASIC, &date_buf)) { goto cleanup; } struct aws_byte_cursor date_cursor = aws_byte_cursor_from_buf(&date_buf); struct aws_byte_cursor secret_key_cursor = aws_byte_cursor_from_buf(&secret_key); if (aws_sha256_hmac_compute(allocator, &secret_key_cursor, &date_cursor, &output, 0)) { goto cleanup; } struct aws_byte_cursor chained_key_cursor = aws_byte_cursor_from_buf(&output); output.len = 0; /* necessary evil part 1*/ if (aws_sha256_hmac_compute(allocator, &chained_key_cursor, &config->region, &output, 0)) { goto cleanup; } chained_key_cursor = aws_byte_cursor_from_buf(&output); output.len = 0; /* necessary evil part 2 */ if (aws_sha256_hmac_compute(allocator, &chained_key_cursor, &config->service, &output, 0)) { goto cleanup; } chained_key_cursor = aws_byte_cursor_from_buf(&output); struct aws_byte_cursor scope_terminator_cursor = aws_byte_cursor_from_string(s_credential_scope_sigv4_terminator); if (aws_sha256_hmac_compute(allocator, &chained_key_cursor, &scope_terminator_cursor, dest, 0)) { goto cleanup; } result = AWS_OP_SUCCESS; cleanup: aws_byte_buf_clean_up_secure(&secret_key); aws_byte_buf_clean_up(&output); aws_byte_buf_clean_up(&date_buf); return result; } /* * Calculates the hex-encoding of the final signature value from the sigv4 signing process */ static int s_calculate_sigv4_signature_value(struct aws_signing_state_aws *state) { struct aws_allocator *allocator = state->allocator; int result = AWS_OP_ERR; struct aws_byte_buf key; AWS_ZERO_STRUCT(key); struct aws_byte_buf digest; AWS_ZERO_STRUCT(digest); if (aws_byte_buf_init(&key, allocator, AWS_SHA256_LEN) || aws_byte_buf_init(&digest, allocator, AWS_SHA256_LEN)) { goto cleanup; } if (s_compute_sigv4_signing_key(state, &key)) { goto cleanup; } struct aws_byte_cursor key_cursor = aws_byte_cursor_from_buf(&key); struct aws_byte_cursor string_to_sign_cursor = aws_byte_cursor_from_buf(&state->string_to_sign); if (aws_sha256_hmac_compute(allocator, &key_cursor, &string_to_sign_cursor, &digest, 0)) { goto cleanup; } struct aws_byte_cursor digest_cursor = aws_byte_cursor_from_buf(&digest); if (aws_hex_encode_append_dynamic(&digest_cursor, &state->signature)) { goto cleanup; } result = AWS_OP_SUCCESS; cleanup: aws_byte_buf_clean_up(&key); aws_byte_buf_clean_up(&digest); return result; } /* * Calculates the hex-encoding of the final signature value from the sigv4a signing process */ static int s_calculate_sigv4a_signature_value(struct aws_signing_state_aws *state) { struct aws_allocator *allocator = state->allocator; int result = AWS_OP_ERR; struct aws_byte_buf ecdsa_digest; AWS_ZERO_STRUCT(ecdsa_digest); struct aws_byte_buf sha256_digest; AWS_ZERO_STRUCT(sha256_digest); struct aws_ecc_key_pair *ecc_key = aws_credentials_get_ecc_key_pair(state->config.credentials); if (ecc_key == NULL) { return aws_raise_error(AWS_AUTH_SIGNING_INVALID_CREDENTIALS); } if (aws_byte_buf_init(&ecdsa_digest, allocator, aws_ecc_key_pair_signature_length(ecc_key)) || aws_byte_buf_init(&sha256_digest, allocator, AWS_SHA256_LEN)) { goto cleanup; } struct aws_byte_cursor string_to_sign_cursor = aws_byte_cursor_from_buf(&state->string_to_sign); if (aws_sha256_compute(allocator, &string_to_sign_cursor, &sha256_digest, 0)) { goto cleanup; } struct aws_byte_cursor sha256_digest_cursor = aws_byte_cursor_from_buf(&sha256_digest); if (aws_ecc_key_pair_sign_message(ecc_key, &sha256_digest_cursor, &ecdsa_digest)) { goto cleanup; } struct aws_byte_cursor ecdsa_digest_cursor = aws_byte_cursor_from_buf(&ecdsa_digest); if (aws_hex_encode_append_dynamic(&ecdsa_digest_cursor, &state->signature)) { goto cleanup; } result = AWS_OP_SUCCESS; cleanup: aws_byte_buf_clean_up(&ecdsa_digest); aws_byte_buf_clean_up(&sha256_digest); return result; } /* * Appends a final signature value to a buffer based on the requested signing algorithm */ int s_calculate_signature_value(struct aws_signing_state_aws *state) { switch (state->config.algorithm) { case AWS_SIGNING_ALGORITHM_V4: case AWS_SIGNING_ALGORITHM_V4_S3EXPRESS: return s_calculate_sigv4_signature_value(state); case AWS_SIGNING_ALGORITHM_V4_ASYMMETRIC: return s_calculate_sigv4a_signature_value(state); default: return aws_raise_error(AWS_AUTH_SIGNING_UNSUPPORTED_ALGORITHM); } } static int s_add_signature_property_to_result_set(struct aws_signing_state_aws *state) { int result = AWS_OP_ERR; struct aws_byte_buf final_signature_buffer; AWS_ZERO_STRUCT(final_signature_buffer); if (aws_byte_buf_init(&final_signature_buffer, state->allocator, HEX_ENCODED_SIGNATURE_OVER_ESTIMATE)) { return AWS_OP_ERR; } struct aws_byte_cursor signature_value = aws_byte_cursor_from_buf(&state->signature); if (aws_byte_buf_append_dynamic(&final_signature_buffer, &signature_value)) { goto cleanup; } if (state->config.algorithm == AWS_SIGNING_ALGORITHM_V4_ASYMMETRIC && (state->config.signature_type == AWS_ST_HTTP_REQUEST_CHUNK || state->config.signature_type == AWS_ST_HTTP_REQUEST_TRAILING_HEADERS)) { if (aws_byte_buf_reserve(&final_signature_buffer, MAX_ECDSA_P256_SIGNATURE_AS_HEX_LENGTH)) { goto cleanup; } if (signature_value.len < MAX_ECDSA_P256_SIGNATURE_AS_HEX_LENGTH) { size_t padding_byte_count = MAX_ECDSA_P256_SIGNATURE_AS_HEX_LENGTH - signature_value.len; if (!aws_byte_buf_write_u8_n( &final_signature_buffer, AWS_SIGV4A_SIGNATURE_PADDING_BYTE, padding_byte_count)) { goto cleanup; } } } signature_value = aws_byte_cursor_from_buf(&final_signature_buffer); if (aws_signing_result_set_property(&state->result, g_aws_signature_property_name, &signature_value)) { return AWS_OP_ERR; } result = AWS_OP_SUCCESS; cleanup: aws_byte_buf_clean_up(&final_signature_buffer); return result; } /* * Adds the appropriate authorization header or query param to the signing result */ static int s_add_authorization_to_result( struct aws_signing_state_aws *state, struct aws_byte_buf *authorization_value) { struct aws_byte_cursor name; struct aws_byte_cursor value = aws_byte_cursor_from_buf(authorization_value); if (s_is_header_based_signature_value(state->config.signature_type)) { name = aws_byte_cursor_from_string(g_aws_signing_authorization_header_name); if (aws_signing_result_append_property_list( &state->result, g_aws_http_headers_property_list_name, &name, &value)) { return AWS_OP_ERR; } } if (s_is_query_param_based_signature_value(state->config.signature_type)) { name = aws_byte_cursor_from_string(g_aws_signing_authorization_query_param_name); if (aws_signing_result_append_property_list( &state->result, g_aws_http_query_params_property_list_name, &name, &value)) { return AWS_OP_ERR; } } /* * Unconditionally add the signature value as a top-level property. */ if (s_add_signature_property_to_result_set(state)) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } AWS_STATIC_STRING_FROM_LITERAL(s_credential_prefix, " Credential="); AWS_STATIC_STRING_FROM_LITERAL(s_signed_headers_prefix, ", SignedHeaders="); AWS_STATIC_STRING_FROM_LITERAL(s_signature_prefix, ", Signature="); /* * The Authorization has a lot more than just the final signature value in it. This function appends all those * other values together ala: * * "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/service/aws4_request, SignedHeaders=host;x-amz-date, * Signature=" * * The final header value is this with the signature value appended to the end. */ static int s_append_authorization_header_preamble(struct aws_signing_state_aws *state, struct aws_byte_buf *dest) { if (s_append_sts_signature_type(state, dest)) { return AWS_OP_ERR; } struct aws_byte_cursor credential_cursor = aws_byte_cursor_from_string(s_credential_prefix); if (aws_byte_buf_append_dynamic(dest, &credential_cursor)) { return AWS_OP_ERR; } struct aws_byte_cursor access_key_cursor = aws_credentials_get_access_key_id(state->config.credentials); if (aws_byte_buf_append_dynamic(dest, &access_key_cursor)) { return AWS_OP_ERR; } if (aws_byte_buf_append_byte_dynamic(dest, '/')) { return AWS_OP_ERR; } struct aws_byte_cursor credential_scope_cursor = aws_byte_cursor_from_buf(&state->credential_scope); if (aws_byte_buf_append_dynamic(dest, &credential_scope_cursor)) { return AWS_OP_ERR; } struct aws_byte_cursor signed_headers_prefix_cursor = aws_byte_cursor_from_string(s_signed_headers_prefix); if (aws_byte_buf_append_dynamic(dest, &signed_headers_prefix_cursor)) { return AWS_OP_ERR; } struct aws_byte_cursor signed_headers_cursor = aws_byte_cursor_from_buf(&state->signed_headers); if (aws_byte_buf_append_dynamic(dest, &signed_headers_cursor)) { return AWS_OP_ERR; } struct aws_byte_cursor signature_prefix_cursor = aws_byte_cursor_from_string(s_signature_prefix); if (aws_byte_buf_append_dynamic(dest, &signature_prefix_cursor)) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } /* * Top-level function for constructing the final authorization header/query-param and adding it to the * signing result. */ int aws_signing_build_authorization_value(struct aws_signing_state_aws *state) { AWS_ASSERT(state->string_to_sign.len > 0); AWS_ASSERT(state->credential_scope.len > 0); int result = AWS_OP_ERR; struct aws_byte_buf authorization_value; if (aws_byte_buf_init(&authorization_value, state->allocator, AUTHORIZATION_VALUE_STARTING_SIZE)) { goto cleanup; } if (s_is_header_based_signature_value(state->config.signature_type) && s_append_authorization_header_preamble(state, &authorization_value)) { goto cleanup; } if (s_calculate_signature_value(state)) { goto cleanup; } struct aws_byte_cursor signature_cursor = aws_byte_cursor_from_buf(&state->signature); if (aws_byte_buf_append_dynamic(&authorization_value, &signature_cursor)) { goto cleanup; } if (s_add_authorization_to_result(state, &authorization_value)) { goto cleanup; } AWS_LOGF_INFO( AWS_LS_AUTH_SIGNING, "(id=%p) Http request successfully built final authorization value via algorithm %s, with contents " "\n" PRInSTR "\n", (void *)state->signable, aws_signing_algorithm_to_string(state->config.algorithm), AWS_BYTE_BUF_PRI(authorization_value)); result = AWS_OP_SUCCESS; cleanup: aws_byte_buf_clean_up(&authorization_value); return result; } int aws_validate_v4a_authorization_value( struct aws_allocator *allocator, struct aws_ecc_key_pair *ecc_key, struct aws_byte_cursor string_to_sign_cursor, struct aws_byte_cursor signature_value_cursor) { AWS_LOGF_DEBUG( AWS_LS_AUTH_SIGNING, "(id=%p) Verifying v4a auth value: \n" PRInSTR "\n\nusing string-to-sign: \n" PRInSTR "\n\n", (void *)ecc_key, AWS_BYTE_CURSOR_PRI(signature_value_cursor), AWS_BYTE_CURSOR_PRI(string_to_sign_cursor)); signature_value_cursor = aws_trim_padded_sigv4a_signature(signature_value_cursor); size_t binary_length = 0; if (aws_hex_compute_decoded_len(signature_value_cursor.len, &binary_length)) { return AWS_OP_ERR; } int result = AWS_OP_ERR; struct aws_byte_buf binary_signature; AWS_ZERO_STRUCT(binary_signature); struct aws_byte_buf sha256_digest; AWS_ZERO_STRUCT(sha256_digest); if (aws_byte_buf_init(&binary_signature, allocator, binary_length) || aws_byte_buf_init(&sha256_digest, allocator, AWS_SHA256_LEN)) { goto done; } if (aws_hex_decode(&signature_value_cursor, &binary_signature)) { goto done; } if (aws_sha256_compute(allocator, &string_to_sign_cursor, &sha256_digest, 0)) { goto done; } struct aws_byte_cursor binary_signature_cursor = aws_byte_cursor_from_array(binary_signature.buffer, binary_signature.len); struct aws_byte_cursor digest_cursor = aws_byte_cursor_from_buf(&sha256_digest); if (aws_ecc_key_pair_verify_signature(ecc_key, &digest_cursor, &binary_signature_cursor)) { goto done; } result = AWS_OP_SUCCESS; done: aws_byte_buf_clean_up(&binary_signature); aws_byte_buf_clean_up(&sha256_digest); return result; } int aws_verify_sigv4a_signing( struct aws_allocator *allocator, const struct aws_signable *signable, const struct aws_signing_config_base *base_config, struct aws_byte_cursor expected_canonical_request_cursor, struct aws_byte_cursor signature_cursor, struct aws_byte_cursor ecc_key_pub_x, struct aws_byte_cursor ecc_key_pub_y) { int result = AWS_OP_ERR; if (base_config->config_type != AWS_SIGNING_CONFIG_AWS) { AWS_LOGF_ERROR(AWS_LS_AUTH_SIGNING, "Signing config is not an AWS signing config"); return aws_raise_error(AWS_AUTH_SIGNING_MISMATCHED_CONFIGURATION); } if (aws_validate_aws_signing_config_aws((void *)base_config)) { AWS_LOGF_ERROR(AWS_LS_AUTH_SIGNING, "Signing config failed validation"); return aws_raise_error(AWS_AUTH_SIGNING_INVALID_CONFIGURATION); } const struct aws_signing_config_aws *config = (void *)base_config; if (config->algorithm != AWS_SIGNING_ALGORITHM_V4_ASYMMETRIC) { AWS_LOGF_ERROR(AWS_LS_AUTH_SIGNING, "Signing algorithm is not V4_ASYMMETRIC"); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } if (config->credentials == NULL) { AWS_LOGF_ERROR(AWS_LS_AUTH_SIGNING, "AWS credentials were not provided/null"); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } struct aws_signing_state_aws *signing_state = aws_signing_state_new(allocator, config, signable, NULL, NULL); if (!signing_state) { AWS_LOGF_ERROR(AWS_LS_AUTH_SIGNING, "Unable to create new signing state"); return AWS_OP_ERR; } AWS_LOGF_DEBUG( AWS_LS_AUTH_SIGNING, "(id=%p) Verifying v4a signature: \n" PRInSTR "\n\nagainst expected canonical request: \n" PRInSTR "\n\nusing ecc key:\n X:" PRInSTR "\n Y:" PRInSTR "\n\n", (void *)signable, AWS_BYTE_CURSOR_PRI(signature_cursor), AWS_BYTE_CURSOR_PRI(expected_canonical_request_cursor), AWS_BYTE_CURSOR_PRI(ecc_key_pub_x), AWS_BYTE_CURSOR_PRI(ecc_key_pub_y)); struct aws_ecc_key_pair *verification_key = aws_ecc_key_new_from_hex_coordinates(allocator, AWS_CAL_ECDSA_P256, ecc_key_pub_x, ecc_key_pub_y); if (verification_key == NULL) { AWS_LOGF_ERROR(AWS_LS_AUTH_SIGNING, "Unable to create an ECC key from provided coordinates"); goto done; } if (aws_credentials_get_ecc_key_pair(signing_state->config.credentials) == NULL) { struct aws_credentials *ecc_credentials = aws_credentials_new_ecc_from_aws_credentials(allocator, signing_state->config.credentials); aws_credentials_release(signing_state->config.credentials); signing_state->config.credentials = ecc_credentials; if (signing_state->config.credentials == NULL) { AWS_LOGF_ERROR(AWS_LS_AUTH_SIGNING, "Unable to create ECC from provided credentials"); goto done; } } if (aws_signing_build_canonical_request(signing_state)) { AWS_LOGF_ERROR(AWS_LS_AUTH_SIGNING, "Unable to canonicalize request for signing"); goto done; } struct aws_byte_cursor canonical_request_cursor = aws_byte_cursor_from_buf(&signing_state->canonical_request); if (aws_byte_cursor_compare_lexical(&expected_canonical_request_cursor, &canonical_request_cursor) != 0) { AWS_LOGF_ERROR(AWS_LS_AUTH_SIGNING, "Canonicalized request and expected canonical request do not match"); aws_raise_error(AWS_AUTH_CANONICAL_REQUEST_MISMATCH); goto done; } if (aws_signing_build_string_to_sign(signing_state)) { AWS_LOGF_ERROR(AWS_LS_AUTH_SIGNING, "Unable to build string to sign from canonical request"); goto done; } if (aws_validate_v4a_authorization_value( allocator, verification_key, aws_byte_cursor_from_buf(&signing_state->string_to_sign), signature_cursor)) { AWS_LOGF_ERROR(AWS_LS_AUTH_SIGNING, "Signature does not validate"); aws_raise_error(AWS_AUTH_SIGV4A_SIGNATURE_VALIDATION_FAILURE); goto done; } result = AWS_OP_SUCCESS; done: if (verification_key) { aws_ecc_key_pair_release(verification_key); } aws_signing_state_destroy(signing_state); return result; } static bool s_is_padding_byte(uint8_t byte) { return byte == AWS_SIGV4A_SIGNATURE_PADDING_BYTE; } struct aws_byte_cursor aws_trim_padded_sigv4a_signature(struct aws_byte_cursor signature) { return aws_byte_cursor_trim_pred(&signature, s_is_padding_byte); } aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/source/credentials.c000066400000000000000000000353411456575232400241400ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include /* aws ecc identity which contains the data needed to sign a Sigv4a AWS request */ struct aws_ecc_identity { struct aws_string *access_key_id; struct aws_string *session_token; struct aws_ecc_key_pair *ecc_key; }; /* aws credentials identity which contains the data needed to sign an authenticated AWS request */ struct aws_credentials_identity { struct aws_string *access_key_id; struct aws_string *secret_access_key; struct aws_string *session_token; }; /* aws_token identity contains only a token to represent token only identities like a bearer token. */ struct aws_token_identity { struct aws_string *token; }; enum aws_identity_type { AWS_CREDENTIALS_IDENTITY, TOKEN_IDENTITY, ANONYMOUS_IDENTITY, ECC_IDENTITY, }; /* * A structure that wraps the different types of credentials that the customer can provider to establish their * identity. */ struct aws_credentials { struct aws_allocator *allocator; struct aws_atomic_var ref_count; /* * A timepoint, in seconds since epoch, at which the credentials should no longer be used because they * will have expired. * * * The primary purpose of this value is to allow providers to communicate to the caching provider any * additional constraints on how the sourced credentials should be used (STS). After refreshing the cached * credentials, the caching provider uses the following calculation to determine the next requery time: * * next_requery_time = now + cached_expiration_config; * if (cached_creds->expiration_timepoint_seconds < next_requery_time) { * next_requery_time = cached_creds->expiration_timepoint_seconds; * * The cached provider may, at its discretion, use a smaller requery time to avoid edge-case scenarios where * credential expiration becomes a race condition. * * The following leaf providers always set this value to UINT64_MAX (indefinite): * static * environment * imds * profile_config* * * * - profile_config may invoke sts which will use a non-max value * * The following leaf providers set this value to a sensible timepoint: * sts - value is based on current time + options->duration_seconds * */ uint64_t expiration_timepoint_seconds; enum aws_identity_type identity_type; union { struct aws_credentials_identity credentials_identity; struct aws_token_identity token_identity; struct aws_ecc_identity ecc_identity; } identity; }; /* * Credentials API implementations */ struct aws_credentials *aws_credentials_new( struct aws_allocator *allocator, struct aws_byte_cursor access_key_id_cursor, struct aws_byte_cursor secret_access_key_cursor, struct aws_byte_cursor session_token_cursor, uint64_t expiration_timepoint_seconds) { if (access_key_id_cursor.ptr == NULL || access_key_id_cursor.len == 0) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } if (secret_access_key_cursor.ptr == NULL || secret_access_key_cursor.len == 0) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } struct aws_credentials *credentials = aws_mem_acquire(allocator, sizeof(struct aws_credentials)); if (credentials == NULL) { return NULL; } AWS_ZERO_STRUCT(*credentials); credentials->allocator = allocator; aws_atomic_init_int(&credentials->ref_count, 1); credentials->identity_type = AWS_CREDENTIALS_IDENTITY; struct aws_credentials_identity *credentials_identity = &credentials->identity.credentials_identity; credentials_identity->access_key_id = aws_string_new_from_array(allocator, access_key_id_cursor.ptr, access_key_id_cursor.len); if (credentials_identity->access_key_id == NULL) { goto error; } credentials_identity->secret_access_key = aws_string_new_from_array(allocator, secret_access_key_cursor.ptr, secret_access_key_cursor.len); if (credentials_identity->secret_access_key == NULL) { goto error; } if (session_token_cursor.ptr != NULL && session_token_cursor.len > 0) { credentials_identity->session_token = aws_string_new_from_array(allocator, session_token_cursor.ptr, session_token_cursor.len); if (credentials_identity->session_token == NULL) { goto error; } } credentials->expiration_timepoint_seconds = expiration_timepoint_seconds; return credentials; error: aws_credentials_release(credentials); return NULL; } struct aws_credentials *aws_credentials_new_anonymous(struct aws_allocator *allocator) { struct aws_credentials *credentials = aws_mem_calloc(allocator, 1, sizeof(struct aws_credentials)); credentials->allocator = allocator; credentials->identity_type = ANONYMOUS_IDENTITY; aws_atomic_init_int(&credentials->ref_count, 1); credentials->expiration_timepoint_seconds = UINT64_MAX; return credentials; } static void s_aws_credentials_destroy(struct aws_credentials *credentials) { if (credentials == NULL) { return; } switch (credentials->identity_type) { case AWS_CREDENTIALS_IDENTITY: aws_string_destroy(credentials->identity.credentials_identity.access_key_id); aws_string_destroy_secure(credentials->identity.credentials_identity.secret_access_key); aws_string_destroy_secure(credentials->identity.credentials_identity.session_token); break; case ECC_IDENTITY: aws_string_destroy(credentials->identity.ecc_identity.access_key_id); aws_string_destroy_secure(credentials->identity.ecc_identity.session_token); aws_ecc_key_pair_release(credentials->identity.ecc_identity.ecc_key); break; case TOKEN_IDENTITY: aws_string_destroy_secure(credentials->identity.token_identity.token); break; case ANONYMOUS_IDENTITY: break; } aws_mem_release(credentials->allocator, credentials); } void aws_credentials_acquire(const struct aws_credentials *credentials) { if (credentials == NULL) { return; } aws_atomic_fetch_add((struct aws_atomic_var *)&credentials->ref_count, 1); } void aws_credentials_release(const struct aws_credentials *credentials) { if (credentials == NULL) { return; } size_t old_value = aws_atomic_fetch_sub((struct aws_atomic_var *)&credentials->ref_count, 1); if (old_value == 1) { s_aws_credentials_destroy((struct aws_credentials *)credentials); } } static struct aws_byte_cursor s_empty_token_cursor = { .ptr = NULL, .len = 0, }; struct aws_byte_cursor aws_credentials_get_access_key_id(const struct aws_credentials *credentials) { switch (credentials->identity_type) { case AWS_CREDENTIALS_IDENTITY: if (credentials->identity.credentials_identity.access_key_id != NULL) { return aws_byte_cursor_from_string(credentials->identity.credentials_identity.access_key_id); } break; case ECC_IDENTITY: if (credentials->identity.ecc_identity.access_key_id != NULL) { return aws_byte_cursor_from_string(credentials->identity.ecc_identity.access_key_id); } break; default: break; } return s_empty_token_cursor; } struct aws_byte_cursor aws_credentials_get_secret_access_key(const struct aws_credentials *credentials) { switch (credentials->identity_type) { case AWS_CREDENTIALS_IDENTITY: if (credentials->identity.credentials_identity.secret_access_key != NULL) { return aws_byte_cursor_from_string(credentials->identity.credentials_identity.secret_access_key); } break; default: break; } return s_empty_token_cursor; } struct aws_byte_cursor aws_credentials_get_session_token(const struct aws_credentials *credentials) { switch (credentials->identity_type) { case AWS_CREDENTIALS_IDENTITY: if (credentials->identity.credentials_identity.session_token != NULL) { return aws_byte_cursor_from_string(credentials->identity.credentials_identity.session_token); } break; case ECC_IDENTITY: if (credentials->identity.ecc_identity.session_token != NULL) { return aws_byte_cursor_from_string(credentials->identity.ecc_identity.session_token); } break; default: break; } return s_empty_token_cursor; } struct aws_byte_cursor aws_credentials_get_token(const struct aws_credentials *credentials) { switch (credentials->identity_type) { case TOKEN_IDENTITY: if (credentials->identity.token_identity.token != NULL) { return aws_byte_cursor_from_string(credentials->identity.token_identity.token); } break; default: break; } return s_empty_token_cursor; } uint64_t aws_credentials_get_expiration_timepoint_seconds(const struct aws_credentials *credentials) { return credentials->expiration_timepoint_seconds; } struct aws_ecc_key_pair *aws_credentials_get_ecc_key_pair(const struct aws_credentials *credentials) { if (credentials->identity_type == ECC_IDENTITY) { return credentials->identity.ecc_identity.ecc_key; } return NULL; } bool aws_credentials_is_anonymous(const struct aws_credentials *credentials) { AWS_PRECONDITION(credentials); return credentials->identity_type == ANONYMOUS_IDENTITY; } struct aws_credentials *aws_credentials_new_from_string( struct aws_allocator *allocator, const struct aws_string *access_key_id, const struct aws_string *secret_access_key, const struct aws_string *session_token, uint64_t expiration_timepoint_seconds) { struct aws_byte_cursor access_key_cursor = aws_byte_cursor_from_string(access_key_id); struct aws_byte_cursor secret_access_key_cursor = aws_byte_cursor_from_string(secret_access_key); struct aws_byte_cursor session_token_cursor; AWS_ZERO_STRUCT(session_token_cursor); if (session_token) { session_token_cursor = aws_byte_cursor_from_string(session_token); } return aws_credentials_new( allocator, access_key_cursor, secret_access_key_cursor, session_token_cursor, expiration_timepoint_seconds); } struct aws_credentials *aws_credentials_new_ecc( struct aws_allocator *allocator, struct aws_byte_cursor access_key_id, struct aws_ecc_key_pair *ecc_key, struct aws_byte_cursor session_token, uint64_t expiration_timepoint_in_seconds) { if (access_key_id.len == 0 || ecc_key == NULL) { AWS_LOGF_ERROR(AWS_LS_AUTH_GENERAL, "Provided credentials do not have a valid access_key_id or ecc_key"); return NULL; } struct aws_credentials *credentials = aws_mem_calloc(allocator, 1, sizeof(struct aws_credentials)); if (credentials == NULL) { return NULL; } credentials->allocator = allocator; credentials->expiration_timepoint_seconds = expiration_timepoint_in_seconds; aws_atomic_init_int(&credentials->ref_count, 1); aws_ecc_key_pair_acquire(ecc_key); credentials->identity_type = ECC_IDENTITY; credentials->identity.ecc_identity.ecc_key = ecc_key; credentials->identity.ecc_identity.access_key_id = aws_string_new_from_array(allocator, access_key_id.ptr, access_key_id.len); if (credentials->identity.ecc_identity.access_key_id == NULL) { goto on_error; } if (session_token.ptr != NULL && session_token.len > 0) { credentials->identity.ecc_identity.session_token = aws_string_new_from_array(allocator, session_token.ptr, session_token.len); if (credentials->identity.ecc_identity.session_token == NULL) { goto on_error; } } return credentials; on_error: s_aws_credentials_destroy(credentials); return NULL; } struct aws_credentials *aws_credentials_new_ecc_from_aws_credentials( struct aws_allocator *allocator, const struct aws_credentials *credentials) { struct aws_ecc_key_pair *ecc_key = aws_ecc_key_pair_new_ecdsa_p256_key_from_aws_credentials(allocator, credentials); if (ecc_key == NULL) { return NULL; } struct aws_credentials *ecc_credentials = aws_credentials_new_ecc( allocator, aws_credentials_get_access_key_id(credentials), ecc_key, aws_credentials_get_session_token(credentials), aws_credentials_get_expiration_timepoint_seconds(credentials)); aws_ecc_key_pair_release(ecc_key); return ecc_credentials; } struct aws_credentials *aws_credentials_new_token( struct aws_allocator *allocator, struct aws_byte_cursor token, uint64_t expiration_timepoint_in_seconds) { if (token.ptr == NULL || token.len == 0) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } struct aws_credentials *credentials = aws_mem_calloc(allocator, 1, sizeof(struct aws_credentials)); credentials->allocator = allocator; aws_atomic_init_int(&credentials->ref_count, 1); credentials->identity_type = TOKEN_IDENTITY; struct aws_token_identity *token_identity = &credentials->identity.token_identity; token_identity->token = aws_string_new_from_array(allocator, token.ptr, token.len); credentials->expiration_timepoint_seconds = expiration_timepoint_in_seconds; return credentials; } /* * global credentials provider APIs */ void aws_credentials_provider_destroy(struct aws_credentials_provider *provider) { if (provider != NULL) { provider->vtable->destroy(provider); } } struct aws_credentials_provider *aws_credentials_provider_release(struct aws_credentials_provider *provider) { if (provider == NULL) { return NULL; } size_t old_value = aws_atomic_fetch_sub(&provider->ref_count, 1); if (old_value == 1) { aws_credentials_provider_destroy(provider); } return NULL; } struct aws_credentials_provider *aws_credentials_provider_acquire(struct aws_credentials_provider *provider) { if (provider == NULL) { return NULL; } aws_atomic_fetch_add(&provider->ref_count, 1); return provider; } int aws_credentials_provider_get_credentials( struct aws_credentials_provider *provider, aws_on_get_credentials_callback_fn callback, void *user_data) { AWS_ASSERT(provider->vtable->get_credentials); return provider->vtable->get_credentials(provider, callback, user_data); } aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/source/credentials_provider_anonymous.c000066400000000000000000000040131456575232400301520ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include static int s_anonymous_credentials_provider_get_credentials_async( struct aws_credentials_provider *provider, aws_on_get_credentials_callback_fn callback, void *user_data) { struct aws_credentials *credentials = provider->impl; AWS_LOGF_INFO( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) Anonymous credentials provider successfully sourced credentials", (void *)provider); callback(credentials, AWS_ERROR_SUCCESS, user_data); return AWS_OP_SUCCESS; } static void s_anonymous_credentials_provider_destroy(struct aws_credentials_provider *provider) { struct aws_credentials *credentials = provider->impl; aws_credentials_release(credentials); aws_credentials_provider_invoke_shutdown_callback(provider); aws_mem_release(provider->allocator, provider); } static struct aws_credentials_provider_vtable s_aws_credentials_provider_anonymous_vtable = { .get_credentials = s_anonymous_credentials_provider_get_credentials_async, .destroy = s_anonymous_credentials_provider_destroy, }; struct aws_credentials_provider *aws_credentials_provider_new_anonymous( struct aws_allocator *allocator, const struct aws_credentials_provider_shutdown_options *shutdown_options) { struct aws_credentials_provider *provider = aws_mem_calloc(allocator, 1, sizeof(struct aws_credentials_provider)); struct aws_credentials *credentials = aws_credentials_new_anonymous(allocator); if (credentials == NULL) { goto on_new_credentials_failure; } aws_credentials_provider_init_base(provider, allocator, &s_aws_credentials_provider_anonymous_vtable, credentials); if (shutdown_options) { provider->shutdown_options = *shutdown_options; } return provider; on_new_credentials_failure: aws_mem_release(allocator, provider); return NULL; } aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/source/credentials_provider_cached.c000066400000000000000000000247251456575232400273450ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include /* ToDo: credentials expiration environment overrides AWS_STATIC_STRING_FROM_LITERAL(s_credential_expiration_env_var, "AWS_CREDENTIAL_EXPIRATION"); */ #define REFRESH_CREDENTIALS_EARLY_DURATION_SECONDS 10 struct aws_credentials_provider_cached { struct aws_credentials_provider *source; struct aws_credentials *cached_credentials; struct aws_mutex lock; uint64_t refresh_interval_in_ns; uint64_t next_refresh_time; aws_io_clock_fn *high_res_clock_fn; aws_io_clock_fn *system_clock_fn; struct aws_linked_list pending_queries; }; static void s_aws_credentials_query_list_notify_and_clean_up( struct aws_linked_list *query_list, struct aws_allocator *allocator, struct aws_credentials *credentials, int error_code) { while (!aws_linked_list_empty(query_list)) { struct aws_linked_list_node *node = aws_linked_list_pop_front(query_list); struct aws_credentials_query *query = AWS_CONTAINER_OF(node, struct aws_credentials_query, node); query->callback(credentials, error_code, query->user_data); aws_credentials_query_clean_up(query); aws_mem_release(allocator, query); } } static void s_swap_cached_credentials( struct aws_credentials_provider *provider, struct aws_credentials *new_credentials) { struct aws_credentials_provider_cached *cached_provider = provider->impl; aws_credentials_release(cached_provider->cached_credentials); cached_provider->cached_credentials = new_credentials; if (cached_provider->cached_credentials != NULL) { aws_credentials_acquire(cached_provider->cached_credentials); AWS_LOGF_DEBUG( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) Cached credentials provider succesfully sourced credentials on refresh", (void *)provider); } else { AWS_LOGF_DEBUG( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) Cached credentials provider was unable to source credentials on refresh", (void *)provider); } } static void s_cached_credentials_provider_get_credentials_async_callback( struct aws_credentials *credentials, int error_code, void *user_data) { struct aws_credentials_provider *provider = user_data; struct aws_credentials_provider_cached *impl = provider->impl; aws_mutex_lock(&impl->lock); /* * Move pending queries so that we can do notifications outside the lock */ struct aws_linked_list pending_queries; aws_linked_list_init(&pending_queries); aws_linked_list_swap_contents(&pending_queries, &impl->pending_queries); uint64_t next_refresh_time_in_ns = UINT64_MAX; uint64_t high_res_now = 0; if (!impl->high_res_clock_fn(&high_res_now)) { if (impl->refresh_interval_in_ns > 0) { next_refresh_time_in_ns = high_res_now + impl->refresh_interval_in_ns; } uint64_t credentials_expiration_timepoint_seconds = UINT64_MAX; if (credentials != NULL) { credentials_expiration_timepoint_seconds = aws_credentials_get_expiration_timepoint_seconds(credentials); } /* * If the sourced credentials have an explicit expiration time, we should always use that time * rather than the much cruder, mechanical refresh setting on the caching wrapper. */ if (credentials_expiration_timepoint_seconds < UINT64_MAX) { uint64_t system_now = 0; if (!impl->system_clock_fn(&system_now)) { uint64_t system_now_seconds = aws_timestamp_convert(system_now, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_SECS, NULL); if (credentials_expiration_timepoint_seconds >= system_now_seconds + REFRESH_CREDENTIALS_EARLY_DURATION_SECONDS) { next_refresh_time_in_ns = high_res_now; next_refresh_time_in_ns += aws_timestamp_convert( credentials_expiration_timepoint_seconds - system_now_seconds - REFRESH_CREDENTIALS_EARLY_DURATION_SECONDS, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL); } } } } impl->next_refresh_time = next_refresh_time_in_ns; AWS_LOGF_DEBUG( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) Cached credentials provider next refresh time set to %" PRIu64, (void *)provider, impl->next_refresh_time); s_swap_cached_credentials(provider, credentials); aws_mutex_unlock(&impl->lock); AWS_LOGF_DEBUG( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) Cached credentials provider notifying pending queries of new credentials", (void *)provider); s_aws_credentials_query_list_notify_and_clean_up(&pending_queries, provider->allocator, credentials, error_code); } static int s_cached_credentials_provider_get_credentials_async( struct aws_credentials_provider *provider, aws_on_get_credentials_callback_fn callback, void *user_data) { struct aws_credentials_provider_cached *impl = provider->impl; uint64_t current_time = 0; impl->high_res_clock_fn(¤t_time); bool should_submit_query = false; bool perform_callback = false; struct aws_credentials *credentials = NULL; aws_mutex_lock(&impl->lock); if (impl->cached_credentials != NULL && current_time < impl->next_refresh_time) { perform_callback = true; credentials = impl->cached_credentials; aws_credentials_acquire(credentials); } else { struct aws_credentials_query *query = aws_mem_acquire(provider->allocator, sizeof(struct aws_credentials_query)); if (query != NULL) { aws_credentials_query_init(query, provider, callback, user_data); should_submit_query = aws_linked_list_empty(&impl->pending_queries); aws_linked_list_push_back(&impl->pending_queries, &query->node); } else { perform_callback = true; } } aws_mutex_unlock(&impl->lock); if (should_submit_query) { AWS_LOGF_INFO( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) Cached credentials provider has expired credentials. Requerying.", (void *)provider); aws_credentials_provider_get_credentials( impl->source, s_cached_credentials_provider_get_credentials_async_callback, provider); } else if (!perform_callback) { AWS_LOGF_DEBUG( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) Cached credentials provider has expired credentials. Waiting on existing query.", (void *)provider); } if (perform_callback) { if (credentials != NULL) { AWS_LOGF_DEBUG( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) Cached credentials provider successfully sourced from cache", (void *)provider); } else { AWS_LOGF_DEBUG( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) Cached credentials provider failed to source credentials while skipping requery", (void *)provider); } callback(credentials, (credentials != NULL) ? AWS_ERROR_SUCCESS : aws_last_error(), user_data); aws_credentials_release(credentials); } return AWS_OP_SUCCESS; } static void s_cached_credentials_provider_destroy(struct aws_credentials_provider *provider) { struct aws_credentials_provider_cached *impl = provider->impl; if (impl == NULL) { return; } aws_credentials_provider_release(impl->source); /* Invoke our own shutdown callback */ aws_credentials_provider_invoke_shutdown_callback(provider); if (impl->cached_credentials != NULL) { aws_credentials_release(impl->cached_credentials); } aws_mutex_clean_up(&impl->lock); aws_mem_release(provider->allocator, provider); } static struct aws_credentials_provider_vtable s_aws_credentials_provider_cached_vtable = { .get_credentials = s_cached_credentials_provider_get_credentials_async, .destroy = s_cached_credentials_provider_destroy, }; struct aws_credentials_provider *aws_credentials_provider_new_cached( struct aws_allocator *allocator, const struct aws_credentials_provider_cached_options *options) { AWS_ASSERT(options->source != NULL); struct aws_credentials_provider *provider = NULL; struct aws_credentials_provider_cached *impl = NULL; aws_mem_acquire_many( allocator, 2, &provider, sizeof(struct aws_credentials_provider), &impl, sizeof(struct aws_credentials_provider_cached)); if (!provider) { return NULL; } AWS_ZERO_STRUCT(*provider); AWS_ZERO_STRUCT(*impl); aws_credentials_provider_init_base(provider, allocator, &s_aws_credentials_provider_cached_vtable, impl); if (aws_mutex_init(&impl->lock)) { goto on_error; } aws_linked_list_init(&impl->pending_queries); impl->source = options->source; aws_credentials_provider_acquire(impl->source); if (options->refresh_time_in_milliseconds > 0) { impl->refresh_interval_in_ns = aws_timestamp_convert( options->refresh_time_in_milliseconds, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL); } else { /* * TODO: query AWS_CREDENTIAL_EXPIRATION for a refresh override * * This must be an ISO 8601 time interval which we don't have a parser for yet (one could be cobbled * together from the existing timestamp parser). Does not seem important enough to get bogged down in atm. * Punting for now. */ impl->refresh_interval_in_ns = 0; } if (options->high_res_clock_fn != NULL) { impl->high_res_clock_fn = options->high_res_clock_fn; } else { impl->high_res_clock_fn = &aws_high_res_clock_get_ticks; } if (options->system_clock_fn != NULL) { impl->system_clock_fn = options->system_clock_fn; } else { impl->system_clock_fn = &aws_sys_clock_get_ticks; } provider->shutdown_options = options->shutdown_options; return provider; on_error: aws_credentials_provider_destroy(provider); return NULL; } aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/source/credentials_provider_chain.c000066400000000000000000000146431456575232400272160ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include struct aws_credentials_provider_chain_impl { struct aws_array_list providers; }; struct aws_credentials_provider_chain_user_data { struct aws_allocator *allocator; struct aws_credentials_provider *provider_chain; size_t current_provider_index; aws_on_get_credentials_callback_fn *original_callback; void *original_user_data; }; static void s_aws_provider_chain_member_callback(struct aws_credentials *credentials, int error_code, void *user_data) { struct aws_credentials_provider_chain_user_data *wrapped_user_data = user_data; struct aws_credentials_provider *provider = wrapped_user_data->provider_chain; struct aws_credentials_provider_chain_impl *impl = provider->impl; size_t provider_count = aws_array_list_length(&impl->providers); if (credentials != NULL || wrapped_user_data->current_provider_index + 1 >= provider_count) { AWS_LOGF_INFO( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) Credentials provider chain callback terminating on index %zu, with %s credentials and error code " "%d", (void *)provider, wrapped_user_data->current_provider_index + 1, (credentials != NULL) ? "valid" : "invalid", error_code); goto on_terminate_chain; } AWS_LOGF_DEBUG( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) Credentials provider chain callback %zu invoked with %s credentials and error code %d", (void *)provider, wrapped_user_data->current_provider_index + 1, (credentials != NULL) ? "valid" : "invalid", error_code); wrapped_user_data->current_provider_index++; /* * TODO: Immutable data, shouldn't need a lock, but we might need a fence and we don't have one atm */ struct aws_credentials_provider *next_provider = NULL; if (aws_array_list_get_at(&impl->providers, &next_provider, wrapped_user_data->current_provider_index)) { goto on_terminate_chain; } AWS_LOGF_DEBUG( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) Credentials provider chain invoking chain member #%zu", (void *)provider, wrapped_user_data->current_provider_index); aws_credentials_provider_get_credentials(next_provider, s_aws_provider_chain_member_callback, wrapped_user_data); return; on_terminate_chain: wrapped_user_data->original_callback(credentials, error_code, wrapped_user_data->original_user_data); aws_credentials_provider_release(provider); aws_mem_release(wrapped_user_data->allocator, wrapped_user_data); } static int s_credentials_provider_chain_get_credentials_async( struct aws_credentials_provider *provider, aws_on_get_credentials_callback_fn callback, void *user_data) { struct aws_credentials_provider_chain_impl *impl = provider->impl; struct aws_credentials_provider *first_provider = NULL; if (aws_array_list_get_at(&impl->providers, &first_provider, 0)) { return AWS_OP_ERR; } struct aws_credentials_provider_chain_user_data *wrapped_user_data = aws_mem_acquire(provider->allocator, sizeof(struct aws_credentials_provider_chain_user_data)); if (wrapped_user_data == NULL) { return AWS_OP_ERR; } AWS_ZERO_STRUCT(*wrapped_user_data); wrapped_user_data->allocator = provider->allocator; wrapped_user_data->provider_chain = provider; wrapped_user_data->current_provider_index = 0; wrapped_user_data->original_user_data = user_data; wrapped_user_data->original_callback = callback; aws_credentials_provider_acquire(provider); AWS_LOGF_DEBUG( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) Credentials provider chain get credentials dispatch", (void *)provider); aws_credentials_provider_get_credentials(first_provider, s_aws_provider_chain_member_callback, wrapped_user_data); return AWS_OP_SUCCESS; } static void s_credentials_provider_chain_destroy(struct aws_credentials_provider *provider) { struct aws_credentials_provider_chain_impl *impl = provider->impl; if (impl == NULL) { return; } size_t provider_count = aws_array_list_length(&impl->providers); for (size_t i = 0; i < provider_count; ++i) { struct aws_credentials_provider *chain_member = NULL; if (aws_array_list_get_at(&impl->providers, &chain_member, i)) { continue; } aws_credentials_provider_release(chain_member); } /* Invoke our own shutdown callback */ aws_credentials_provider_invoke_shutdown_callback(provider); aws_array_list_clean_up(&impl->providers); aws_mem_release(provider->allocator, provider); } static struct aws_credentials_provider_vtable s_aws_credentials_provider_chain_vtable = { .get_credentials = s_credentials_provider_chain_get_credentials_async, .destroy = s_credentials_provider_chain_destroy, }; struct aws_credentials_provider *aws_credentials_provider_new_chain( struct aws_allocator *allocator, const struct aws_credentials_provider_chain_options *options) { if (options->provider_count == 0) { return NULL; } struct aws_credentials_provider *provider = NULL; struct aws_credentials_provider_chain_impl *impl = NULL; aws_mem_acquire_many( allocator, 2, &provider, sizeof(struct aws_credentials_provider), &impl, sizeof(struct aws_credentials_provider_chain_impl)); if (!provider) { return NULL; } AWS_ZERO_STRUCT(*provider); AWS_ZERO_STRUCT(*impl); aws_credentials_provider_init_base(provider, allocator, &s_aws_credentials_provider_chain_vtable, impl); if (aws_array_list_init_dynamic( &impl->providers, allocator, options->provider_count, sizeof(struct aws_credentials_provider *))) { goto on_error; } for (size_t i = 0; i < options->provider_count; ++i) { struct aws_credentials_provider *sub_provider = options->providers[i]; if (aws_array_list_push_back(&impl->providers, &sub_provider)) { goto on_error; } aws_credentials_provider_acquire(sub_provider); } provider->shutdown_options = options->shutdown_options; return provider; on_error: aws_credentials_provider_destroy(provider); return NULL; } aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/source/credentials_provider_cognito.c000066400000000000000000000731411456575232400275740ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define COGNITO_CONNECT_TIMEOUT_DEFAULT_IN_SECONDS 5 #define COGNITO_MAX_RETRIES 8 #define HTTP_REQUEST_BODY_INITIAL_SIZE 1024 #define HTTP_RESPONSE_BODY_INITIAL_SIZE 4096 static void s_on_connection_manager_shutdown(void *user_data); static void s_on_connection_setup_fn(struct aws_http_connection *connection, int error_code, void *user_data); struct aws_cognito_login { struct aws_byte_cursor identity_provider_name; struct aws_byte_cursor identity_provider_token; struct aws_byte_buf login_buffer; }; static int s_aws_cognito_login_init( struct aws_cognito_login *login, struct aws_allocator *allocator, struct aws_byte_cursor identity_provider_name, struct aws_byte_cursor identity_provider_token) { AWS_ZERO_STRUCT(*login); login->identity_provider_name = identity_provider_name; login->identity_provider_token = identity_provider_token; return aws_byte_buf_init_cache_and_update_cursors( &login->login_buffer, allocator, &login->identity_provider_name, &login->identity_provider_token, NULL); } static void s_aws_cognito_login_clean_up(struct aws_cognito_login *login) { aws_byte_buf_clean_up(&login->login_buffer); AWS_ZERO_STRUCT(*login); } struct aws_credentials_provider_cognito_impl { struct aws_http_connection_manager *connection_manager; struct aws_retry_strategy *retry_strategy; const struct aws_auth_http_system_vtable *function_table; struct aws_string *endpoint; struct aws_string *identity; struct aws_array_list logins; struct aws_string *custom_role_arn; }; struct cognito_user_data { struct aws_allocator *allocator; struct aws_credentials_provider *provider; aws_on_get_credentials_callback_fn *original_callback; void *original_user_data; struct aws_http_connection *connection; struct aws_http_message *get_credentials_request; struct aws_byte_buf request_body_buffer; struct aws_input_stream *request_body_stream; struct aws_retry_token *retry_token; struct aws_credentials *credentials; struct aws_byte_buf response_body; }; static void s_user_data_reset(struct cognito_user_data *user_data) { aws_byte_buf_clean_up(&user_data->request_body_buffer); user_data->request_body_stream = aws_input_stream_release(user_data->request_body_stream); user_data->get_credentials_request = aws_http_message_release(user_data->get_credentials_request); struct aws_credentials_provider_cognito_impl *impl = user_data->provider->impl; if (user_data->connection != NULL) { impl->function_table->aws_http_connection_manager_release_connection( impl->connection_manager, user_data->connection); user_data->connection = NULL; } aws_byte_buf_reset(&user_data->response_body, false); } static void s_user_data_destroy(struct cognito_user_data *user_data) { if (user_data == NULL) { return; } s_user_data_reset(user_data); aws_byte_buf_clean_up(&user_data->response_body); aws_retry_token_release(user_data->retry_token); aws_credentials_provider_release(user_data->provider); aws_credentials_release(user_data->credentials); aws_mem_release(user_data->allocator, user_data); } static struct cognito_user_data *s_user_data_new( struct aws_credentials_provider *provider, aws_on_get_credentials_callback_fn callback, void *user_data) { struct aws_allocator *allocator = provider->allocator; struct cognito_user_data *cognito_user_data = aws_mem_calloc(allocator, 1, sizeof(struct cognito_user_data)); cognito_user_data->allocator = allocator; aws_byte_buf_init(&cognito_user_data->response_body, cognito_user_data->allocator, HTTP_RESPONSE_BODY_INITIAL_SIZE); cognito_user_data->provider = aws_credentials_provider_acquire(provider); cognito_user_data->original_callback = callback; cognito_user_data->original_user_data = user_data; return cognito_user_data; } static void s_finalize_credentials_query(struct cognito_user_data *user_data, int error_code) { AWS_FATAL_ASSERT(user_data != NULL); if (user_data->credentials == NULL && error_code == AWS_ERROR_SUCCESS) { error_code = AWS_AUTH_CREDENTIALS_PROVIDER_COGNITO_SOURCE_FAILURE; } (user_data->original_callback)(user_data->credentials, error_code, user_data->original_user_data); s_user_data_destroy(user_data); } /* Keys per Cognito-Identity service model */ AWS_STATIC_STRING_FROM_LITERAL(s_credentials_key, "Credentials"); AWS_STATIC_STRING_FROM_LITERAL(s_access_key_id_name, "AccessKeyId"); AWS_STATIC_STRING_FROM_LITERAL(s_secret_access_key_name, "SecretKey"); AWS_STATIC_STRING_FROM_LITERAL(s_session_token_name, "SessionToken"); AWS_STATIC_STRING_FROM_LITERAL(s_expiration_name, "Expiration"); static int s_parse_credentials_from_response(struct cognito_user_data *user_data) { int result = AWS_OP_ERR; struct aws_json_value *response_document = aws_json_value_new_from_string(user_data->allocator, aws_byte_cursor_from_buf(&user_data->response_body)); if (response_document == NULL) { goto done; } struct aws_json_value *credentials_entry = aws_json_value_get_from_object(response_document, aws_byte_cursor_from_string(s_credentials_key)); if (credentials_entry == NULL) { goto done; } struct aws_parse_credentials_from_json_doc_options credentials_parse_options = { .access_key_id_name = aws_string_c_str(s_access_key_id_name), .secret_access_key_name = aws_string_c_str(s_secret_access_key_name), .token_name = aws_string_c_str(s_session_token_name), .expiration_name = aws_string_c_str(s_expiration_name), .expiration_format = AWS_PCEF_NUMBER_UNIX_EPOCH, .token_required = true, .expiration_required = true, }; user_data->credentials = aws_parse_credentials_from_aws_json_object(user_data->allocator, credentials_entry, &credentials_parse_options); if (user_data->credentials == NULL) { goto done; } result = AWS_OP_SUCCESS; done: aws_json_value_destroy(response_document); if (result != AWS_OP_SUCCESS) { aws_raise_error(AWS_AUTH_PROVIDER_PARSER_UNEXPECTED_RESPONSE); } return result; } static void s_on_retry_ready(struct aws_retry_token *token, int error_code, void *user_data) { (void)token; struct cognito_user_data *provider_user_data = user_data; if (error_code != AWS_ERROR_SUCCESS) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): Cognito credentials provider retry task failed: %s", (void *)provider_user_data->provider, aws_error_str(error_code)); s_finalize_credentials_query(user_data, error_code); return; } s_user_data_reset(provider_user_data); struct aws_credentials_provider_cognito_impl *impl = provider_user_data->provider->impl; impl->function_table->aws_http_connection_manager_acquire_connection( impl->connection_manager, s_on_connection_setup_fn, provider_user_data); } static void s_on_stream_complete_fn(struct aws_http_stream *stream, int error_code, void *user_data) { struct cognito_user_data *provider_user_data = user_data; struct aws_credentials_provider_cognito_impl *impl = provider_user_data->provider->impl; int http_response_code = 0; impl->function_table->aws_http_stream_get_incoming_response_status(stream, &http_response_code); if (http_response_code != 200) { error_code = AWS_AUTH_CREDENTIALS_PROVIDER_HTTP_STATUS_FAILURE; } impl->function_table->aws_http_stream_release(stream); AWS_LOGF_DEBUG( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): GetCredentialsForIdentity call completed with http status %d", (void *)provider_user_data->provider, http_response_code); if (http_response_code == AWS_HTTP_STATUS_CODE_200_OK) { aws_retry_token_record_success(provider_user_data->retry_token); if (s_parse_credentials_from_response(provider_user_data) == AWS_OP_SUCCESS) { s_finalize_credentials_query(user_data, AWS_ERROR_SUCCESS); return; } AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): Cognito credentials provider failed to parse GetCredentialsForIdentity response", (void *)provider_user_data->provider); error_code = AWS_AUTH_PROVIDER_PARSER_UNEXPECTED_RESPONSE; } /* Success path is done, error-only from here on out */ /* Unsure if this should be unconditional or a function of status code. STS does this unconditionally. */ impl->function_table->aws_http_connection_close(provider_user_data->connection); enum aws_retry_error_type error_type = aws_credentials_provider_compute_retry_error_type(http_response_code, error_code); bool can_retry = http_response_code == 0 || error_type != AWS_RETRY_ERROR_TYPE_CLIENT_ERROR; if (!can_retry) { s_finalize_credentials_query(user_data, error_code); return; } if (aws_retry_strategy_schedule_retry( provider_user_data->retry_token, error_type, s_on_retry_ready, provider_user_data)) { error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): Cognito credentials provider failed to schedule retry: %s", (void *)provider_user_data->provider, aws_error_str(error_code)); s_finalize_credentials_query(user_data, error_code); return; } } static int s_on_incoming_body_fn(struct aws_http_stream *stream, const struct aws_byte_cursor *data, void *user_data) { (void)stream; struct cognito_user_data *provider_user_data = user_data; return aws_byte_buf_append_dynamic(&provider_user_data->response_body, data); } AWS_STATIC_STRING_FROM_LITERAL(s_identity_id_key, "IdentityId"); AWS_STATIC_STRING_FROM_LITERAL(s_custom_role_arn_key, "CustomRoleArn"); AWS_STATIC_STRING_FROM_LITERAL(s_logins_key, "Logins"); int s_create_get_credentials_for_identity_body_buffer( struct aws_byte_buf *buffer, struct cognito_user_data *provider_user_data) { struct aws_allocator *allocator = provider_user_data->allocator; struct aws_credentials_provider_cognito_impl *impl = provider_user_data->provider->impl; int result = AWS_OP_ERR; struct aws_json_value *json_body = aws_json_value_new_object(allocator); if (json_body == NULL) { return AWS_OP_ERR; } struct aws_json_value *identity_string = aws_json_value_new_string(allocator, aws_byte_cursor_from_string(impl->identity)); if (identity_string == NULL) { goto done; } if (aws_json_value_add_to_object(json_body, aws_byte_cursor_from_string(s_identity_id_key), identity_string)) { aws_json_value_destroy(identity_string); goto done; } if (impl->custom_role_arn != NULL) { struct aws_json_value *custom_role_arn_string = aws_json_value_new_string(allocator, aws_byte_cursor_from_string(impl->custom_role_arn)); if (custom_role_arn_string == NULL) { goto done; } if (aws_json_value_add_to_object( json_body, aws_byte_cursor_from_string(s_custom_role_arn_key), custom_role_arn_string)) { aws_json_value_destroy(custom_role_arn_string); goto done; } } size_t login_count = aws_array_list_length(&impl->logins); if (login_count > 0) { struct aws_json_value *logins = aws_json_value_new_object(allocator); if (logins == NULL) { goto done; } if (aws_json_value_add_to_object(json_body, aws_byte_cursor_from_string(s_logins_key), logins)) { aws_json_value_destroy(logins); goto done; } for (size_t i = 0; i < login_count; ++i) { struct aws_cognito_login login; if (aws_array_list_get_at(&impl->logins, &login, i)) { goto done; } struct aws_json_value *login_value_string = aws_json_value_new_string(allocator, login.identity_provider_token); if (login_value_string == NULL) { goto done; } if (aws_json_value_add_to_object(logins, login.identity_provider_name, login_value_string)) { aws_json_value_destroy(login_value_string); goto done; } } } if (aws_byte_buf_append_json_string(json_body, buffer)) { goto done; } result = AWS_OP_SUCCESS; done: aws_json_value_destroy(json_body); return result; } static struct aws_http_header s_content_type_header = { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("content-type"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("application/x-amz-json-1.1"), }; AWS_STATIC_STRING_FROM_LITERAL(s_get_credentials_for_identity_path, "/"); static struct aws_http_header s_x_amz_target_header = { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("X-Amz-Target"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("AWSCognitoIdentityService.GetCredentialsForIdentity"), }; static int s_create_get_credentials_for_identity_request(struct cognito_user_data *provider_user_data) { struct aws_credentials_provider_cognito_impl *impl = provider_user_data->provider->impl; struct aws_byte_buf body_buffer; AWS_ZERO_STRUCT(body_buffer); struct aws_input_stream *body_stream = NULL; struct aws_http_message *request = aws_http_message_new_request(provider_user_data->allocator); if (request == NULL) { return AWS_OP_ERR; } if (aws_http_message_set_request_method(request, aws_http_method_post)) { goto on_error; } if (aws_http_message_set_request_path(request, aws_byte_cursor_from_string(s_get_credentials_for_identity_path))) { goto on_error; } struct aws_http_header host_header = { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("host"), .value = aws_byte_cursor_from_string(impl->endpoint), }; if (aws_http_message_add_header(request, host_header)) { goto on_error; } if (aws_http_message_add_header(request, s_content_type_header)) { goto on_error; } if (aws_http_message_add_header(request, s_x_amz_target_header)) { goto on_error; } if (aws_byte_buf_init(&body_buffer, provider_user_data->allocator, HTTP_REQUEST_BODY_INITIAL_SIZE)) { goto on_error; } if (s_create_get_credentials_for_identity_body_buffer(&body_buffer, provider_user_data)) { goto on_error; } char content_length[21]; AWS_ZERO_ARRAY(content_length); snprintf(content_length, sizeof(content_length), "%" PRIu64, (uint64_t)body_buffer.len); struct aws_http_header content_length_header = { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Length"), .value = aws_byte_cursor_from_c_str(content_length), }; if (aws_http_message_add_header(request, content_length_header)) { goto on_error; } struct aws_byte_cursor payload_cur = aws_byte_cursor_from_buf(&body_buffer); body_stream = aws_input_stream_new_from_cursor(provider_user_data->allocator, &payload_cur); if (body_stream == NULL) { goto on_error; } aws_http_message_set_body_stream(request, body_stream); provider_user_data->get_credentials_request = request; provider_user_data->request_body_buffer = body_buffer; provider_user_data->request_body_stream = body_stream; return AWS_OP_SUCCESS; on_error: aws_byte_buf_clean_up(&body_buffer); aws_input_stream_release(body_stream); aws_http_message_release(request); return AWS_OP_ERR; } static void s_on_connection_setup_fn(struct aws_http_connection *connection, int error_code, void *user_data) { struct cognito_user_data *wrapped_user_data = user_data; struct aws_http_stream *stream = NULL; struct aws_credentials_provider_cognito_impl *impl = wrapped_user_data->provider->impl; if (connection == NULL) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): Cognito credentials provider failed to acquire http connection: %s", (void *)wrapped_user_data->provider, aws_error_debug_str(error_code)); goto on_error; } wrapped_user_data->connection = connection; if (s_create_get_credentials_for_identity_request(wrapped_user_data)) { error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): Cognito credentials provider failed to create http request: %s", (void *)wrapped_user_data->provider, aws_error_debug_str(error_code)); goto on_error; } struct aws_http_make_request_options options = { .user_data = user_data, .request = wrapped_user_data->get_credentials_request, .self_size = sizeof(struct aws_http_make_request_options), .on_response_headers = NULL, .on_response_header_block_done = NULL, .on_response_body = s_on_incoming_body_fn, .on_complete = s_on_stream_complete_fn, }; stream = impl->function_table->aws_http_connection_make_request(connection, &options); if (!stream) { error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): Cognito credentials provider failed to create http stream: %s", (void *)wrapped_user_data->provider, aws_error_debug_str(error_code)); goto on_error; } if (impl->function_table->aws_http_stream_activate(stream)) { error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): Cognito credentials provider failed to activate http stream: %s", (void *)wrapped_user_data->provider, aws_error_debug_str(error_code)); goto on_error; } return; on_error: impl->function_table->aws_http_stream_release(stream); s_finalize_credentials_query(wrapped_user_data, error_code); } static void s_on_retry_token_acquired( struct aws_retry_strategy *strategy, int error_code, struct aws_retry_token *token, void *user_data) { (void)strategy; struct cognito_user_data *wrapped_user_data = user_data; if (token == NULL) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): Cognito credentials provider failed to acquire retry token: %s", (void *)wrapped_user_data->provider, aws_error_debug_str(error_code)); s_finalize_credentials_query(wrapped_user_data, error_code); return; } wrapped_user_data->retry_token = token; struct aws_credentials_provider_cognito_impl *impl = wrapped_user_data->provider->impl; impl->function_table->aws_http_connection_manager_acquire_connection( impl->connection_manager, s_on_connection_setup_fn, wrapped_user_data); } static int s_credentials_provider_cognito_get_credentials_async( struct aws_credentials_provider *provider, aws_on_get_credentials_callback_fn callback, void *user_data) { struct aws_credentials_provider_cognito_impl *impl = provider->impl; struct cognito_user_data *wrapped_user_data = s_user_data_new(provider, callback, user_data); if (wrapped_user_data == NULL) { goto on_error; } if (aws_retry_strategy_acquire_retry_token( impl->retry_strategy, NULL, s_on_retry_token_acquired, wrapped_user_data, 100)) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): Cognito credentials provider failed to acquire retry token with error %s", (void *)provider, aws_error_debug_str(aws_last_error())); goto on_error; } return AWS_OP_SUCCESS; on_error: s_user_data_destroy(wrapped_user_data); return AWS_OP_ERR; } static void s_credentials_provider_cognito_destroy(struct aws_credentials_provider *provider) { struct aws_credentials_provider_cognito_impl *impl = provider->impl; if (impl == NULL) { return; } /* aws_http_connection_manager_release will eventually leads to call of s_on_connection_manager_shutdown, * which will do memory release for provider and impl. */ if (impl->connection_manager) { impl->function_table->aws_http_connection_manager_release(impl->connection_manager); } else { /* If provider setup failed halfway through, connection_manager might not exist. * In this case invoke shutdown completion callback directly to finish cleanup */ s_on_connection_manager_shutdown(provider); } /* freeing the provider takes place in the shutdown callback below */ } static struct aws_credentials_provider_vtable s_aws_credentials_provider_cognito_vtable = { .get_credentials = s_credentials_provider_cognito_get_credentials_async, .destroy = s_credentials_provider_cognito_destroy, }; static void s_on_connection_manager_shutdown(void *user_data) { struct aws_credentials_provider *provider = user_data; aws_credentials_provider_invoke_shutdown_callback(provider); struct aws_credentials_provider_cognito_impl *impl = provider->impl; aws_retry_strategy_release(impl->retry_strategy); aws_string_destroy(impl->endpoint); aws_string_destroy(impl->identity); aws_string_destroy(impl->custom_role_arn); for (size_t i = 0; i < aws_array_list_length(&impl->logins); ++i) { struct aws_cognito_login login; if (aws_array_list_get_at(&impl->logins, &login, i)) { continue; } s_aws_cognito_login_clean_up(&login); } aws_array_list_clean_up(&impl->logins); aws_mem_release(provider->allocator, provider); } static int s_validate_options(const struct aws_credentials_provider_cognito_options *options) { if (options == NULL) { return AWS_OP_ERR; } if (options->tls_ctx == NULL) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(static) Cognito credentials provider options must include a TLS context"); return AWS_OP_ERR; } if (options->bootstrap == NULL) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(static) Cognito credentials provider options must include a client bootstrap"); return AWS_OP_ERR; } if (options->endpoint.len == 0) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(static) Cognito credentials provider options must have a non-empty endpoint"); return AWS_OP_ERR; } if (options->identity.len == 0) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(static) Cognito credentials provider options must have a non-empty identity"); return AWS_OP_ERR; } return AWS_OP_SUCCESS; } struct aws_credentials_provider *aws_credentials_provider_new_cognito( struct aws_allocator *allocator, const struct aws_credentials_provider_cognito_options *options) { struct aws_credentials_provider *provider = NULL; struct aws_credentials_provider_cognito_impl *impl = NULL; if (s_validate_options(options)) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } aws_mem_acquire_many( allocator, 2, &provider, sizeof(struct aws_credentials_provider), &impl, sizeof(struct aws_credentials_provider_cognito_impl)); if (!provider) { return NULL; } AWS_ZERO_STRUCT(*provider); AWS_ZERO_STRUCT(*impl); aws_credentials_provider_init_base(provider, allocator, &s_aws_credentials_provider_cognito_vtable, impl); struct aws_tls_connection_options tls_connection_options; AWS_ZERO_STRUCT(tls_connection_options); aws_tls_connection_options_init_from_ctx(&tls_connection_options, options->tls_ctx); struct aws_byte_cursor host = options->endpoint; if (aws_tls_connection_options_set_server_name(&tls_connection_options, allocator, &host)) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): Cognito credentials provider failed to create tls connection options with error %s", (void *)provider, aws_error_debug_str(aws_last_error())); goto on_error; } struct aws_socket_options socket_options; AWS_ZERO_STRUCT(socket_options); socket_options.type = AWS_SOCKET_STREAM; socket_options.domain = AWS_SOCKET_IPV4; socket_options.connect_timeout_ms = (uint32_t)aws_timestamp_convert( COGNITO_CONNECT_TIMEOUT_DEFAULT_IN_SECONDS, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_MILLIS, NULL); struct aws_http_connection_manager_options manager_options; AWS_ZERO_STRUCT(manager_options); manager_options.bootstrap = options->bootstrap; manager_options.initial_window_size = SIZE_MAX; manager_options.socket_options = &socket_options; manager_options.host = options->endpoint; manager_options.port = 443; manager_options.max_connections = 2; manager_options.shutdown_complete_callback = s_on_connection_manager_shutdown; manager_options.shutdown_complete_user_data = provider; manager_options.tls_connection_options = &tls_connection_options; manager_options.proxy_options = options->http_proxy_options; impl->function_table = options->function_table; if (impl->function_table == NULL) { impl->function_table = g_aws_credentials_provider_http_function_table; } impl->connection_manager = impl->function_table->aws_http_connection_manager_new(allocator, &manager_options); if (impl->connection_manager == NULL) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): Cognito credentials provider failed to create http connection manager with error %s", (void *)provider, aws_error_debug_str(aws_last_error())); goto on_error; } impl->endpoint = aws_string_new_from_cursor(allocator, &options->endpoint); impl->identity = aws_string_new_from_cursor(allocator, &options->identity); if (options->custom_role_arn != NULL) { impl->custom_role_arn = aws_string_new_from_cursor(allocator, options->custom_role_arn); } aws_array_list_init_dynamic(&impl->logins, allocator, options->login_count, sizeof(struct aws_cognito_login)); for (size_t i = 0; i < options->login_count; ++i) { struct aws_cognito_identity_provider_token_pair *login_token_pair = &options->logins[i]; struct aws_cognito_login login; if (s_aws_cognito_login_init( &login, allocator, login_token_pair->identity_provider_name, login_token_pair->identity_provider_token)) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): Cognito credentials provider failed to initialize login entry with error %s", (void *)provider, aws_error_debug_str(aws_last_error())); goto on_error; } aws_array_list_push_back(&impl->logins, &login); } struct aws_standard_retry_options retry_options = { .backoff_retry_options = { .el_group = options->bootstrap->event_loop_group, .max_retries = COGNITO_MAX_RETRIES, }, }; impl->retry_strategy = aws_retry_strategy_new_standard(allocator, &retry_options); if (!impl->retry_strategy) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): Cognito credentials provider failed to create a retry strategy with error %s", (void *)provider, aws_error_debug_str(aws_last_error())); goto on_error; } provider->shutdown_options = options->shutdown_options; aws_tls_connection_options_clean_up(&tls_connection_options); return provider; on_error: aws_tls_connection_options_clean_up(&tls_connection_options); aws_credentials_provider_destroy(provider); return NULL; } /*************************************************************************/ #define DEFAULT_CREDENTIAL_PROVIDER_REFRESH_MS (15 * 60 * 1000) /* * Cognito provider with caching implementation */ struct aws_credentials_provider *aws_credentials_provider_new_cognito_caching( struct aws_allocator *allocator, const struct aws_credentials_provider_cognito_options *options) { struct aws_credentials_provider *cognito_provider = NULL; struct aws_credentials_provider *caching_provider = NULL; cognito_provider = aws_credentials_provider_new_cognito(allocator, options); if (cognito_provider == NULL) { goto on_error; } struct aws_credentials_provider_cached_options cached_options = { .source = cognito_provider, .refresh_time_in_milliseconds = DEFAULT_CREDENTIAL_PROVIDER_REFRESH_MS, }; caching_provider = aws_credentials_provider_new_cached(allocator, &cached_options); if (caching_provider == NULL) { goto on_error; } aws_credentials_provider_release(cognito_provider); return caching_provider; on_error: aws_credentials_provider_release(caching_provider); aws_credentials_provider_release(cognito_provider); return NULL; } aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/source/credentials_provider_default_chain.c000066400000000000000000000422011456575232400307110ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #define DEFAULT_CREDENTIAL_PROVIDER_REFRESH_MS (15 * 60 * 1000) #if defined(_MSC_VER) # pragma warning(disable : 4204) /* * For designated initialization: .providers = providers, * of aws_credentials_provider_chain_options in function * aws_credentials_provider_new_chain_default */ # pragma warning(disable : 4221) #endif /* _MSC_VER */ AWS_STATIC_STRING_FROM_LITERAL(s_ecs_creds_env_relative_uri, "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"); AWS_STATIC_STRING_FROM_LITERAL(s_ecs_creds_env_full_uri, "AWS_CONTAINER_CREDENTIALS_FULL_URI"); AWS_STATIC_STRING_FROM_LITERAL(s_ecs_creds_env_token, "AWS_CONTAINER_AUTHORIZATION_TOKEN"); AWS_STATIC_STRING_FROM_LITERAL(s_ecs_host, "169.254.170.2"); AWS_STATIC_STRING_FROM_LITERAL(s_ec2_creds_env_disable, "AWS_EC2_METADATA_DISABLED"); /** * ECS and IMDS credentials providers are mutually exclusive, * ECS has higher priority */ static struct aws_credentials_provider *s_aws_credentials_provider_new_ecs_or_imds( struct aws_allocator *allocator, const struct aws_credentials_provider_shutdown_options *shutdown_options, struct aws_client_bootstrap *bootstrap, struct aws_tls_ctx *tls_ctx) { struct aws_byte_cursor auth_token_cursor; AWS_ZERO_STRUCT(auth_token_cursor); struct aws_credentials_provider *ecs_or_imds_provider = NULL; struct aws_string *ecs_relative_uri = NULL; struct aws_string *ecs_full_uri = NULL; struct aws_string *ec2_imds_disable = NULL; struct aws_string *ecs_token = NULL; if (aws_get_environment_value(allocator, s_ecs_creds_env_relative_uri, &ecs_relative_uri) != AWS_OP_SUCCESS || aws_get_environment_value(allocator, s_ecs_creds_env_full_uri, &ecs_full_uri) != AWS_OP_SUCCESS || aws_get_environment_value(allocator, s_ec2_creds_env_disable, &ec2_imds_disable) != AWS_OP_SUCCESS || aws_get_environment_value(allocator, s_ecs_creds_env_token, &ecs_token) != AWS_OP_SUCCESS) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Failed reading environment variables during default credentials provider chain initialization."); goto clean_up; } if (ecs_token && ecs_token->len) { auth_token_cursor = aws_byte_cursor_from_string(ecs_token); } /* * ToDo: the uri choice logic should be done in the ecs provider init logic. As it stands, it's a nightmare * to try and use the ecs provider anywhere outside the default chain. */ if (ecs_relative_uri && ecs_relative_uri->len) { struct aws_credentials_provider_ecs_options ecs_options = { .shutdown_options = *shutdown_options, .bootstrap = bootstrap, .host = aws_byte_cursor_from_string(s_ecs_host), .path_and_query = aws_byte_cursor_from_string(ecs_relative_uri), .tls_ctx = NULL, .auth_token = auth_token_cursor, }; ecs_or_imds_provider = aws_credentials_provider_new_ecs(allocator, &ecs_options); } else if (ecs_full_uri && ecs_full_uri->len) { struct aws_uri uri; struct aws_byte_cursor uri_cstr = aws_byte_cursor_from_string(ecs_full_uri); if (AWS_OP_ERR == aws_uri_init_parse(&uri, allocator, &uri_cstr)) { goto clean_up; } struct aws_byte_cursor path_and_query = uri.path_and_query; if (path_and_query.len == 0) { path_and_query = aws_byte_cursor_from_c_str("/"); } struct aws_credentials_provider_ecs_options ecs_options = { .shutdown_options = *shutdown_options, .bootstrap = bootstrap, .host = uri.host_name, .path_and_query = path_and_query, .tls_ctx = aws_byte_cursor_eq_c_str_ignore_case(&(uri.scheme), "HTTPS") ? tls_ctx : NULL, .auth_token = auth_token_cursor, .port = uri.port, }; ecs_or_imds_provider = aws_credentials_provider_new_ecs(allocator, &ecs_options); aws_uri_clean_up(&uri); } else if (ec2_imds_disable == NULL || aws_string_eq_c_str_ignore_case(ec2_imds_disable, "false")) { struct aws_credentials_provider_imds_options imds_options = { .shutdown_options = *shutdown_options, .bootstrap = bootstrap, }; ecs_or_imds_provider = aws_credentials_provider_new_imds(allocator, &imds_options); } clean_up: aws_string_destroy(ecs_relative_uri); aws_string_destroy(ecs_full_uri); aws_string_destroy(ec2_imds_disable); aws_string_destroy(ecs_token); return ecs_or_imds_provider; } struct default_chain_callback_data { struct aws_allocator *allocator; struct aws_credentials_provider *default_chain_provider; aws_on_get_credentials_callback_fn *original_callback; void *original_user_data; }; static struct default_chain_callback_data *s_create_callback_data( struct aws_credentials_provider *provider, aws_on_get_credentials_callback_fn *callback, void *user_data) { struct default_chain_callback_data *callback_data = aws_mem_calloc(provider->allocator, 1, sizeof(struct default_chain_callback_data)); if (callback_data == NULL) { return NULL; } callback_data->allocator = provider->allocator; callback_data->default_chain_provider = provider; callback_data->original_callback = callback; callback_data->original_user_data = user_data; aws_credentials_provider_acquire(provider); return callback_data; } static void s_destroy_callback_data(struct default_chain_callback_data *callback_data) { aws_credentials_provider_release(callback_data->default_chain_provider); aws_mem_release(callback_data->allocator, callback_data); } struct aws_credentials_provider_default_chain_impl { struct aws_atomic_var shutdowns_remaining; struct aws_credentials_provider *cached_provider; }; static void s_aws_provider_default_chain_callback( struct aws_credentials *credentials, int error_code, void *user_data) { struct default_chain_callback_data *callback_data = user_data; struct aws_credentials_provider *provider = callback_data->default_chain_provider; if (credentials != NULL) { AWS_LOGF_INFO( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) Default chain credentials provider successfully sourced credentials", (void *)provider); } else { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) Default chain credentials provider failed to source credentials with error %d(%s)", (void *)provider, error_code, aws_error_debug_str(error_code)); } callback_data->original_callback(credentials, error_code, callback_data->original_user_data); s_destroy_callback_data(callback_data); } static int s_credentials_provider_default_chain_get_credentials_async( struct aws_credentials_provider *provider, aws_on_get_credentials_callback_fn callback, void *user_data) { struct aws_credentials_provider_default_chain_impl *impl = provider->impl; AWS_LOGF_DEBUG( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) Credentials provider chain get credentials dispatch", (void *)provider); struct default_chain_callback_data *callback_data = s_create_callback_data(provider, callback, user_data); if (callback_data == NULL) { return AWS_OP_ERR; } int result = aws_credentials_provider_get_credentials( impl->cached_provider, s_aws_provider_default_chain_callback, callback_data); if (result != AWS_OP_SUCCESS) { s_destroy_callback_data(callback_data); } return result; } static void s_on_sub_provider_shutdown_completed(void *user_data) { struct aws_credentials_provider *provider = user_data; struct aws_credentials_provider_default_chain_impl *impl = provider->impl; size_t remaining = aws_atomic_fetch_sub(&impl->shutdowns_remaining, 1); if (remaining != 1) { return; } /* Invoke our own shutdown callback */ aws_credentials_provider_invoke_shutdown_callback(provider); aws_mem_release(provider->allocator, provider); } static void s_credentials_provider_default_chain_destroy(struct aws_credentials_provider *provider) { struct aws_credentials_provider_default_chain_impl *impl = provider->impl; if (impl == NULL) { return; } aws_credentials_provider_release(impl->cached_provider); s_on_sub_provider_shutdown_completed(provider); } static struct aws_credentials_provider_vtable s_aws_credentials_provider_default_chain_vtable = { .get_credentials = s_credentials_provider_default_chain_get_credentials_async, .destroy = s_credentials_provider_default_chain_destroy, }; /* * Default provider chain implementation */ struct aws_credentials_provider *aws_credentials_provider_new_chain_default( struct aws_allocator *allocator, const struct aws_credentials_provider_chain_default_options *options) { struct aws_credentials_provider *provider = NULL; struct aws_credentials_provider_default_chain_impl *impl = NULL; aws_mem_acquire_many( allocator, 2, &provider, sizeof(struct aws_credentials_provider), &impl, sizeof(struct aws_credentials_provider_default_chain_impl)); if (!provider) { return NULL; } AWS_ZERO_STRUCT(*provider); AWS_ZERO_STRUCT(*impl); aws_credentials_provider_init_base(provider, allocator, &s_aws_credentials_provider_default_chain_vtable, impl); provider->shutdown_options = options->shutdown_options; /* 1 shutdown call from the provider's destroy itself */ aws_atomic_init_int(&impl->shutdowns_remaining, 1); struct aws_credentials_provider_shutdown_options sub_provider_shutdown_options; AWS_ZERO_STRUCT(sub_provider_shutdown_options); sub_provider_shutdown_options.shutdown_callback = s_on_sub_provider_shutdown_completed; sub_provider_shutdown_options.shutdown_user_data = provider; struct aws_tls_ctx *tls_ctx = NULL; struct aws_credentials_provider *environment_provider = NULL; struct aws_credentials_provider *profile_provider = NULL; struct aws_credentials_provider *process_provider = NULL; struct aws_credentials_provider *sts_provider = NULL; struct aws_credentials_provider *ecs_or_imds_provider = NULL; struct aws_credentials_provider *chain_provider = NULL; struct aws_credentials_provider *cached_provider = NULL; if (options->tls_ctx) { tls_ctx = aws_tls_ctx_acquire(options->tls_ctx); } else { #ifdef BYO_CRYPTO aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "TLS context must be provided to credentials provider."); goto on_error; #else AWS_LOGF_INFO( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): TLS context not provided, initializing a new one for credentials provider.", (void *)provider); struct aws_tls_ctx_options tls_options; aws_tls_ctx_options_init_default_client(&tls_options, allocator); tls_ctx = aws_tls_client_ctx_new(allocator, &tls_options); aws_tls_ctx_options_clean_up(&tls_options); if (!tls_ctx) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): failed to create a TLS context with error %s", (void *)provider, aws_error_debug_str(aws_last_error())); goto on_error; } #endif /* BYO_CRYPTO */ } enum { providers_size = 5 }; struct aws_credentials_provider *providers[providers_size]; AWS_ZERO_ARRAY(providers); size_t index = 0; /* Providers that touch fast local resources... */ if (!options->skip_environment_credentials_provider) { struct aws_credentials_provider_environment_options environment_options; AWS_ZERO_STRUCT(environment_options); environment_provider = aws_credentials_provider_new_environment(allocator, &environment_options); if (environment_provider == NULL) { goto on_error; } providers[index++] = environment_provider; } /* Providers that will make a network call only if the relevant configuration is present... */ struct aws_credentials_provider_profile_options profile_options; AWS_ZERO_STRUCT(profile_options); profile_options.bootstrap = options->bootstrap; profile_options.tls_ctx = tls_ctx; profile_options.shutdown_options = sub_provider_shutdown_options; profile_options.profile_collection_cached = options->profile_collection_cached; profile_options.profile_name_override = options->profile_name_override; profile_provider = aws_credentials_provider_new_profile(allocator, &profile_options); if (profile_provider != NULL) { providers[index++] = profile_provider; /* 1 shutdown call from the profile provider's shutdown */ aws_atomic_fetch_add(&impl->shutdowns_remaining, 1); } struct aws_credentials_provider_sts_web_identity_options sts_options; AWS_ZERO_STRUCT(sts_options); sts_options.bootstrap = options->bootstrap; sts_options.tls_ctx = tls_ctx; sts_options.shutdown_options = sub_provider_shutdown_options; sts_options.config_profile_collection_cached = options->profile_collection_cached; sts_options.profile_name_override = options->profile_name_override; sts_provider = aws_credentials_provider_new_sts_web_identity(allocator, &sts_options); if (sts_provider != NULL) { providers[index++] = sts_provider; /* 1 shutdown call from the web identity provider's shutdown */ aws_atomic_fetch_add(&impl->shutdowns_remaining, 1); } struct aws_credentials_provider_process_options process_options; AWS_ZERO_STRUCT(process_options); process_options.shutdown_options = sub_provider_shutdown_options; process_options.config_profile_collection_cached = options->profile_collection_cached; process_options.profile_to_use = options->profile_name_override; process_provider = aws_credentials_provider_new_process(allocator, &process_options); if (process_provider != NULL) { providers[index++] = process_provider; /* 1 shutdown call from the process provider's shutdown */ aws_atomic_fetch_add(&impl->shutdowns_remaining, 1); } /* Providers that will always make a network call unless explicitly disabled... */ ecs_or_imds_provider = s_aws_credentials_provider_new_ecs_or_imds( allocator, &sub_provider_shutdown_options, options->bootstrap, tls_ctx); if (ecs_or_imds_provider != NULL) { providers[index++] = ecs_or_imds_provider; /* 1 shutdown call from the imds or ecs provider's shutdown */ aws_atomic_fetch_add(&impl->shutdowns_remaining, 1); } AWS_FATAL_ASSERT(index <= providers_size); struct aws_credentials_provider_chain_options chain_options = { .provider_count = index, .providers = providers, }; chain_provider = aws_credentials_provider_new_chain(allocator, &chain_options); if (chain_provider == NULL) { goto on_error; } /* * Transfer ownership */ aws_credentials_provider_release(environment_provider); aws_credentials_provider_release(profile_provider); aws_credentials_provider_release(process_provider); aws_credentials_provider_release(sts_provider); aws_credentials_provider_release(ecs_or_imds_provider); struct aws_credentials_provider_cached_options cached_options = { .source = chain_provider, .refresh_time_in_milliseconds = DEFAULT_CREDENTIAL_PROVIDER_REFRESH_MS, }; cached_provider = aws_credentials_provider_new_cached(allocator, &cached_options); if (cached_provider == NULL) { goto on_error; } /* * Transfer ownership */ aws_credentials_provider_release(chain_provider); impl->cached_provider = cached_provider; /* Subproviders have their own reference to the tls_ctx now */ aws_tls_ctx_release(tls_ctx); return provider; on_error: /* * Have to be a bit more careful than normal with this clean up pattern since the chain/cache will * recursively destroy the other providers via ref release. * * Technically, the cached_provider can never be non-null here, but let's handle it anyways * in case someone does something weird in the future. */ if (cached_provider) { aws_credentials_provider_release(cached_provider); } else if (chain_provider) { aws_credentials_provider_release(chain_provider); } else { aws_credentials_provider_release(ecs_or_imds_provider); aws_credentials_provider_release(profile_provider); aws_credentials_provider_release(process_provider); aws_credentials_provider_release(sts_provider); aws_credentials_provider_release(environment_provider); } aws_tls_ctx_release(tls_ctx); aws_mem_release(allocator, provider); return NULL; } aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/source/credentials_provider_delegate.c000066400000000000000000000040411456575232400276750ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include struct aws_credentials_provider_delegate_impl { aws_credentials_provider_delegate_get_credentials_fn *get_credentials; void *user_data; }; static int s_credentials_provider_delegate_get_credentials( struct aws_credentials_provider *provider, aws_on_get_credentials_callback_fn callback, void *callback_user_data) { struct aws_credentials_provider_delegate_impl *impl = provider->impl; return impl->get_credentials(impl->user_data, callback, callback_user_data); } static void s_credentials_provider_delegate_destroy(struct aws_credentials_provider *provider) { aws_credentials_provider_invoke_shutdown_callback(provider); aws_mem_release(provider->allocator, provider); } static struct aws_credentials_provider_vtable s_credentials_provider_delegate_vtable = { .get_credentials = s_credentials_provider_delegate_get_credentials, .destroy = s_credentials_provider_delegate_destroy, }; struct aws_credentials_provider *aws_credentials_provider_new_delegate( struct aws_allocator *allocator, const struct aws_credentials_provider_delegate_options *options) { AWS_ASSERT(options); AWS_ASSERT(options->get_credentials); struct aws_credentials_provider *provider = NULL; struct aws_credentials_provider_delegate_impl *impl = NULL; aws_mem_acquire_many( allocator, 2, &provider, sizeof(struct aws_credentials_provider), &impl, sizeof(struct aws_credentials_provider_delegate_impl)); if (!provider) { return NULL; } AWS_ZERO_STRUCT(*provider); AWS_ZERO_STRUCT(*impl); aws_credentials_provider_init_base(provider, allocator, &s_credentials_provider_delegate_vtable, impl); provider->shutdown_options = options->shutdown_options; impl->get_credentials = options->get_credentials; impl->user_data = options->delegate_user_data; return provider; } aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/source/credentials_provider_ecs.c000066400000000000000000000515371456575232400267110ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(_MSC_VER) # pragma warning(disable : 4204) # pragma warning(disable : 4232) #endif /* _MSC_VER */ /* ecs task role credentials body response is currently ~ 1300 characters + name length */ #define ECS_RESPONSE_SIZE_INITIAL 2048 #define ECS_RESPONSE_SIZE_LIMIT 10000 #define ECS_CONNECT_TIMEOUT_DEFAULT_IN_SECONDS 2 static void s_on_connection_manager_shutdown(void *user_data); struct aws_credentials_provider_ecs_impl { struct aws_http_connection_manager *connection_manager; const struct aws_auth_http_system_vtable *function_table; struct aws_string *host; struct aws_string *path_and_query; struct aws_string *auth_token; }; /* * Tracking structure for each outstanding async query to an ecs provider */ struct aws_credentials_provider_ecs_user_data { /* immutable post-creation */ struct aws_allocator *allocator; struct aws_credentials_provider *ecs_provider; aws_on_get_credentials_callback_fn *original_callback; void *original_user_data; /* mutable */ struct aws_http_connection *connection; struct aws_http_message *request; struct aws_byte_buf current_result; int status_code; int error_code; }; static void s_aws_credentials_provider_ecs_user_data_destroy(struct aws_credentials_provider_ecs_user_data *user_data) { if (user_data == NULL) { return; } struct aws_credentials_provider_ecs_impl *impl = user_data->ecs_provider->impl; if (user_data->connection) { impl->function_table->aws_http_connection_manager_release_connection( impl->connection_manager, user_data->connection); } aws_byte_buf_clean_up(&user_data->current_result); if (user_data->request) { aws_http_message_destroy(user_data->request); } aws_credentials_provider_release(user_data->ecs_provider); aws_mem_release(user_data->allocator, user_data); } static struct aws_credentials_provider_ecs_user_data *s_aws_credentials_provider_ecs_user_data_new( struct aws_credentials_provider *ecs_provider, aws_on_get_credentials_callback_fn callback, void *user_data) { struct aws_credentials_provider_ecs_user_data *wrapped_user_data = aws_mem_calloc(ecs_provider->allocator, 1, sizeof(struct aws_credentials_provider_ecs_user_data)); if (wrapped_user_data == NULL) { goto on_error; } wrapped_user_data->allocator = ecs_provider->allocator; wrapped_user_data->ecs_provider = ecs_provider; aws_credentials_provider_acquire(ecs_provider); wrapped_user_data->original_user_data = user_data; wrapped_user_data->original_callback = callback; if (aws_byte_buf_init(&wrapped_user_data->current_result, ecs_provider->allocator, ECS_RESPONSE_SIZE_INITIAL)) { goto on_error; } return wrapped_user_data; on_error: s_aws_credentials_provider_ecs_user_data_destroy(wrapped_user_data); return NULL; } static void s_aws_credentials_provider_ecs_user_data_reset_response( struct aws_credentials_provider_ecs_user_data *ecs_user_data) { ecs_user_data->current_result.len = 0; ecs_user_data->status_code = 0; if (ecs_user_data->request) { aws_http_message_destroy(ecs_user_data->request); ecs_user_data->request = NULL; } } /* * In general, the ECS document looks something like: { "Code" : "Success", "LastUpdated" : "2019-05-28T18:03:09Z", "Type" : "AWS-HMAC", "AccessKeyId" : "...", "SecretAccessKey" : "...", "Token" : "...", "Expiration" : "2019-05-29T00:21:43Z" } * * No matter the result, this always gets called assuming that esc_user_data is successfully allocated */ static void s_ecs_finalize_get_credentials_query(struct aws_credentials_provider_ecs_user_data *ecs_user_data) { /* Try to build credentials from whatever, if anything, was in the result */ struct aws_credentials *credentials = NULL; struct aws_parse_credentials_from_json_doc_options parse_options = { .access_key_id_name = "AccessKeyId", .secret_access_key_name = "SecretAccessKey", .token_name = "Token", .expiration_name = "Expiration", .token_required = true, .expiration_required = true, }; if (aws_byte_buf_append_null_terminator(&ecs_user_data->current_result) == AWS_OP_SUCCESS) { credentials = aws_parse_credentials_from_json_document( ecs_user_data->allocator, aws_byte_cursor_from_buf(&ecs_user_data->current_result), &parse_options); } else { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) ECS credentials provider failed to add null terminating char to resulting buffer.", (void *)ecs_user_data->ecs_provider); } if (credentials != NULL) { AWS_LOGF_INFO( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) ECS credentials provider successfully queried instance role credentials", (void *)ecs_user_data->ecs_provider); } else { /* no credentials, make sure we have a valid error to report */ if (ecs_user_data->error_code == AWS_ERROR_SUCCESS) { ecs_user_data->error_code = aws_last_error(); if (ecs_user_data->error_code == AWS_ERROR_SUCCESS) { ecs_user_data->error_code = AWS_AUTH_CREDENTIALS_PROVIDER_ECS_SOURCE_FAILURE; } } AWS_LOGF_WARN( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) ECS credentials provider failed to query instance role credentials with error %d(%s)", (void *)ecs_user_data->ecs_provider, ecs_user_data->error_code, aws_error_str(ecs_user_data->error_code)); } /* pass the credentials back */ ecs_user_data->original_callback(credentials, ecs_user_data->error_code, ecs_user_data->original_user_data); /* clean up */ s_aws_credentials_provider_ecs_user_data_destroy(ecs_user_data); aws_credentials_release(credentials); } static int s_ecs_on_incoming_body_fn( struct aws_http_stream *stream, const struct aws_byte_cursor *data, void *user_data) { (void)stream; struct aws_credentials_provider_ecs_user_data *ecs_user_data = user_data; struct aws_credentials_provider_ecs_impl *impl = ecs_user_data->ecs_provider->impl; AWS_LOGF_TRACE( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) ECS credentials provider received %zu response bytes", (void *)ecs_user_data->ecs_provider, data->len); if (data->len + ecs_user_data->current_result.len > ECS_RESPONSE_SIZE_LIMIT) { impl->function_table->aws_http_connection_close(ecs_user_data->connection); AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) ECS credentials provider query response exceeded maximum allowed length", (void *)ecs_user_data->ecs_provider); return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } if (aws_byte_buf_append_dynamic(&ecs_user_data->current_result, data)) { impl->function_table->aws_http_connection_close(ecs_user_data->connection); AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) ECS credentials provider query error appending response", (void *)ecs_user_data->ecs_provider); return AWS_OP_ERR; } return AWS_OP_SUCCESS; } static int s_ecs_on_incoming_headers_fn( struct aws_http_stream *stream, enum aws_http_header_block header_block, const struct aws_http_header *header_array, size_t num_headers, void *user_data) { (void)header_array; (void)num_headers; if (header_block != AWS_HTTP_HEADER_BLOCK_MAIN) { return AWS_OP_SUCCESS; } struct aws_credentials_provider_ecs_user_data *ecs_user_data = user_data; if (header_block == AWS_HTTP_HEADER_BLOCK_MAIN) { if (ecs_user_data->status_code == 0) { struct aws_credentials_provider_ecs_impl *impl = ecs_user_data->ecs_provider->impl; if (impl->function_table->aws_http_stream_get_incoming_response_status( stream, &ecs_user_data->status_code)) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) ECS credentials provider failed to get http status code", (void *)ecs_user_data->ecs_provider); return AWS_OP_ERR; } AWS_LOGF_DEBUG( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) ECS credentials provider query received http status code %d", (void *)ecs_user_data->ecs_provider, ecs_user_data->status_code); } } return AWS_OP_SUCCESS; } static void s_ecs_query_task_role_credentials(struct aws_credentials_provider_ecs_user_data *ecs_user_data); static void s_ecs_on_stream_complete_fn(struct aws_http_stream *stream, int error_code, void *user_data) { struct aws_credentials_provider_ecs_user_data *ecs_user_data = user_data; aws_http_message_destroy(ecs_user_data->request); ecs_user_data->request = NULL; struct aws_credentials_provider_ecs_impl *impl = ecs_user_data->ecs_provider->impl; impl->function_table->aws_http_stream_release(stream); /* * On anything other than a 200, nullify the response and pretend there was * an error */ if (ecs_user_data->status_code != AWS_HTTP_STATUS_CODE_200_OK || error_code != AWS_OP_SUCCESS) { ecs_user_data->current_result.len = 0; if (error_code != AWS_OP_SUCCESS) { ecs_user_data->error_code = error_code; } else { ecs_user_data->error_code = AWS_AUTH_CREDENTIALS_PROVIDER_HTTP_STATUS_FAILURE; } } s_ecs_finalize_get_credentials_query(ecs_user_data); } AWS_STATIC_STRING_FROM_LITERAL(s_ecs_accept_header, "Accept"); AWS_STATIC_STRING_FROM_LITERAL(s_ecs_accept_header_value, "application/json"); AWS_STATIC_STRING_FROM_LITERAL(s_ecs_user_agent_header, "User-Agent"); AWS_STATIC_STRING_FROM_LITERAL(s_ecs_user_agent_header_value, "aws-sdk-crt/ecs-credentials-provider"); AWS_STATIC_STRING_FROM_LITERAL(s_ecs_authorization_header, "Authorization"); AWS_STATIC_STRING_FROM_LITERAL(s_ecs_accept_encoding_header, "Accept-Encoding"); AWS_STATIC_STRING_FROM_LITERAL(s_ecs_accept_encoding_header_value, "identity"); AWS_STATIC_STRING_FROM_LITERAL(s_ecs_host_header, "Host"); static int s_make_ecs_http_query( struct aws_credentials_provider_ecs_user_data *ecs_user_data, struct aws_byte_cursor *uri) { AWS_FATAL_ASSERT(ecs_user_data->connection); struct aws_http_stream *stream = NULL; struct aws_http_message *request = aws_http_message_new_request(ecs_user_data->allocator); if (request == NULL) { return AWS_OP_ERR; } struct aws_credentials_provider_ecs_impl *impl = ecs_user_data->ecs_provider->impl; struct aws_http_header host_header = { .name = aws_byte_cursor_from_string(s_ecs_host_header), .value = aws_byte_cursor_from_string(impl->host), }; if (aws_http_message_add_header(request, host_header)) { goto on_error; } if (impl->auth_token != NULL) { struct aws_http_header auth_header = { .name = aws_byte_cursor_from_string(s_ecs_authorization_header), .value = aws_byte_cursor_from_string(impl->auth_token), }; if (aws_http_message_add_header(request, auth_header)) { goto on_error; } } struct aws_http_header accept_header = { .name = aws_byte_cursor_from_string(s_ecs_accept_header), .value = aws_byte_cursor_from_string(s_ecs_accept_header_value), }; if (aws_http_message_add_header(request, accept_header)) { goto on_error; } struct aws_http_header accept_encoding_header = { .name = aws_byte_cursor_from_string(s_ecs_accept_encoding_header), .value = aws_byte_cursor_from_string(s_ecs_accept_encoding_header_value), }; if (aws_http_message_add_header(request, accept_encoding_header)) { goto on_error; } struct aws_http_header user_agent_header = { .name = aws_byte_cursor_from_string(s_ecs_user_agent_header), .value = aws_byte_cursor_from_string(s_ecs_user_agent_header_value), }; if (aws_http_message_add_header(request, user_agent_header)) { goto on_error; } if (aws_http_message_set_request_path(request, *uri)) { goto on_error; } if (aws_http_message_set_request_method(request, aws_byte_cursor_from_c_str("GET"))) { goto on_error; } ecs_user_data->request = request; struct aws_http_make_request_options request_options = { .self_size = sizeof(request_options), .on_response_headers = s_ecs_on_incoming_headers_fn, .on_response_header_block_done = NULL, .on_response_body = s_ecs_on_incoming_body_fn, .on_complete = s_ecs_on_stream_complete_fn, .user_data = ecs_user_data, .request = request, }; stream = impl->function_table->aws_http_connection_make_request(ecs_user_data->connection, &request_options); if (!stream) { goto on_error; } if (impl->function_table->aws_http_stream_activate(stream)) { goto on_error; } return AWS_OP_SUCCESS; on_error: impl->function_table->aws_http_stream_release(stream); aws_http_message_destroy(request); ecs_user_data->request = NULL; return AWS_OP_ERR; } static void s_ecs_query_task_role_credentials(struct aws_credentials_provider_ecs_user_data *ecs_user_data) { AWS_FATAL_ASSERT(ecs_user_data->connection); struct aws_credentials_provider_ecs_impl *impl = ecs_user_data->ecs_provider->impl; /* "Clear" the result */ s_aws_credentials_provider_ecs_user_data_reset_response(ecs_user_data); struct aws_byte_cursor uri_cursor = aws_byte_cursor_from_string(impl->path_and_query); if (s_make_ecs_http_query(ecs_user_data, &uri_cursor) == AWS_OP_ERR) { s_ecs_finalize_get_credentials_query(ecs_user_data); } } static void s_ecs_on_acquire_connection(struct aws_http_connection *connection, int error_code, void *user_data) { struct aws_credentials_provider_ecs_user_data *ecs_user_data = user_data; if (connection == NULL) { AWS_LOGF_WARN( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "id=%p: ECS provider failed to acquire a connection, error code %d(%s)", (void *)ecs_user_data->ecs_provider, error_code, aws_error_str(error_code)); ecs_user_data->error_code = error_code; s_ecs_finalize_get_credentials_query(ecs_user_data); return; } ecs_user_data->connection = connection; s_ecs_query_task_role_credentials(ecs_user_data); } static int s_credentials_provider_ecs_get_credentials_async( struct aws_credentials_provider *provider, aws_on_get_credentials_callback_fn callback, void *user_data) { struct aws_credentials_provider_ecs_impl *impl = provider->impl; struct aws_credentials_provider_ecs_user_data *wrapped_user_data = s_aws_credentials_provider_ecs_user_data_new(provider, callback, user_data); if (wrapped_user_data == NULL) { goto error; } impl->function_table->aws_http_connection_manager_acquire_connection( impl->connection_manager, s_ecs_on_acquire_connection, wrapped_user_data); return AWS_OP_SUCCESS; error: s_aws_credentials_provider_ecs_user_data_destroy(wrapped_user_data); return AWS_OP_ERR; } static void s_credentials_provider_ecs_destroy(struct aws_credentials_provider *provider) { struct aws_credentials_provider_ecs_impl *impl = provider->impl; if (impl == NULL) { return; } aws_string_destroy(impl->path_and_query); aws_string_destroy(impl->auth_token); aws_string_destroy(impl->host); /* aws_http_connection_manager_release will eventually leads to call of s_on_connection_manager_shutdown, * which will do memory release for provider and impl. So We should be freeing impl * related memory first, then call aws_http_connection_manager_release. */ if (impl->connection_manager) { impl->function_table->aws_http_connection_manager_release(impl->connection_manager); } else { /* If provider setup failed halfway through, connection_manager might not exist. * In this case invoke shutdown completion callback directly to finish cleanup */ s_on_connection_manager_shutdown(provider); } /* freeing the provider takes place in the shutdown callback below */ } static struct aws_credentials_provider_vtable s_aws_credentials_provider_ecs_vtable = { .get_credentials = s_credentials_provider_ecs_get_credentials_async, .destroy = s_credentials_provider_ecs_destroy, }; static void s_on_connection_manager_shutdown(void *user_data) { struct aws_credentials_provider *provider = user_data; aws_credentials_provider_invoke_shutdown_callback(provider); aws_mem_release(provider->allocator, provider); } struct aws_credentials_provider *aws_credentials_provider_new_ecs( struct aws_allocator *allocator, const struct aws_credentials_provider_ecs_options *options) { struct aws_credentials_provider *provider = NULL; struct aws_credentials_provider_ecs_impl *impl = NULL; aws_mem_acquire_many( allocator, 2, &provider, sizeof(struct aws_credentials_provider), &impl, sizeof(struct aws_credentials_provider_ecs_impl)); if (!provider) { return NULL; } AWS_ZERO_STRUCT(*provider); AWS_ZERO_STRUCT(*impl); aws_credentials_provider_init_base(provider, allocator, &s_aws_credentials_provider_ecs_vtable, impl); struct aws_tls_connection_options tls_connection_options; AWS_ZERO_STRUCT(tls_connection_options); if (options->tls_ctx) { aws_tls_connection_options_init_from_ctx(&tls_connection_options, options->tls_ctx); struct aws_byte_cursor host = options->host; if (aws_tls_connection_options_set_server_name(&tls_connection_options, allocator, &host)) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): failed to create a tls connection options with error %s", (void *)provider, aws_error_debug_str(aws_last_error())); goto on_error; } } struct aws_socket_options socket_options; AWS_ZERO_STRUCT(socket_options); socket_options.type = AWS_SOCKET_STREAM; socket_options.domain = AWS_SOCKET_IPV4; socket_options.connect_timeout_ms = (uint32_t)aws_timestamp_convert( ECS_CONNECT_TIMEOUT_DEFAULT_IN_SECONDS, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_MILLIS, NULL); struct aws_http_connection_manager_options manager_options; AWS_ZERO_STRUCT(manager_options); manager_options.bootstrap = options->bootstrap; manager_options.initial_window_size = ECS_RESPONSE_SIZE_LIMIT; manager_options.socket_options = &socket_options; manager_options.host = options->host; if (options->port == 0) { manager_options.port = options->tls_ctx ? 443 : 80; } else { manager_options.port = options->port; } manager_options.max_connections = 2; manager_options.shutdown_complete_callback = s_on_connection_manager_shutdown; manager_options.shutdown_complete_user_data = provider; manager_options.tls_connection_options = options->tls_ctx ? &tls_connection_options : NULL; impl->function_table = options->function_table; if (impl->function_table == NULL) { impl->function_table = g_aws_credentials_provider_http_function_table; } impl->connection_manager = impl->function_table->aws_http_connection_manager_new(allocator, &manager_options); if (impl->connection_manager == NULL) { goto on_error; } if (options->auth_token.len != 0) { impl->auth_token = aws_string_new_from_cursor(allocator, &options->auth_token); if (impl->auth_token == NULL) { goto on_error; } } impl->path_and_query = aws_string_new_from_cursor(allocator, &options->path_and_query); if (impl->path_and_query == NULL) { goto on_error; } impl->host = aws_string_new_from_cursor(allocator, &options->host); if (impl->host == NULL) { goto on_error; } provider->shutdown_options = options->shutdown_options; aws_tls_connection_options_clean_up(&tls_connection_options); return provider; on_error: aws_tls_connection_options_clean_up(&tls_connection_options); aws_credentials_provider_destroy(provider); return NULL; } aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/source/credentials_provider_environment.c000066400000000000000000000056471456575232400305040ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include AWS_STATIC_STRING_FROM_LITERAL(s_access_key_id_env_var, "AWS_ACCESS_KEY_ID"); AWS_STATIC_STRING_FROM_LITERAL(s_secret_access_key_env_var, "AWS_SECRET_ACCESS_KEY"); AWS_STATIC_STRING_FROM_LITERAL(s_session_token_env_var, "AWS_SESSION_TOKEN"); static int s_credentials_provider_environment_get_credentials_async( struct aws_credentials_provider *provider, aws_on_get_credentials_callback_fn callback, void *user_data) { struct aws_allocator *allocator = provider->allocator; struct aws_string *access_key_id = NULL; struct aws_string *secret_access_key = NULL; struct aws_string *session_token = NULL; struct aws_credentials *credentials = NULL; int error_code = AWS_ERROR_SUCCESS; aws_get_environment_value(allocator, s_access_key_id_env_var, &access_key_id); aws_get_environment_value(allocator, s_secret_access_key_env_var, &secret_access_key); aws_get_environment_value(allocator, s_session_token_env_var, &session_token); if (access_key_id != NULL && access_key_id->len > 0 && secret_access_key != NULL && secret_access_key->len > 0) { credentials = aws_credentials_new_from_string(allocator, access_key_id, secret_access_key, session_token, UINT64_MAX); if (credentials == NULL) { error_code = aws_last_error(); } } else { error_code = AWS_AUTH_CREDENTIALS_PROVIDER_INVALID_ENVIRONMENT; } callback(credentials, error_code, user_data); aws_credentials_release(credentials); aws_string_destroy(session_token); aws_string_destroy(secret_access_key); aws_string_destroy(access_key_id); return AWS_OP_SUCCESS; } static void s_credentials_provider_environment_destroy(struct aws_credentials_provider *provider) { aws_credentials_provider_invoke_shutdown_callback(provider); aws_mem_release(provider->allocator, provider); } static struct aws_credentials_provider_vtable s_aws_credentials_provider_environment_vtable = { .get_credentials = s_credentials_provider_environment_get_credentials_async, .destroy = s_credentials_provider_environment_destroy, }; struct aws_credentials_provider *aws_credentials_provider_new_environment( struct aws_allocator *allocator, const struct aws_credentials_provider_environment_options *options) { struct aws_credentials_provider *provider = aws_mem_acquire(allocator, sizeof(struct aws_credentials_provider)); if (provider == NULL) { return NULL; } AWS_ZERO_STRUCT(*provider); aws_credentials_provider_init_base(provider, allocator, &s_aws_credentials_provider_environment_vtable, NULL); provider->shutdown_options = options->shutdown_options; return provider; } aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/source/credentials_provider_imds.c000066400000000000000000000155151456575232400270670ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #if defined(_MSC_VER) # pragma warning(disable : 4204) #endif /* _MSC_VER */ struct aws_credentials_provider_imds_impl { struct aws_imds_client *client; }; static int s_credentials_provider_imds_get_credentials_async( struct aws_credentials_provider *provider, aws_on_get_credentials_callback_fn callback, void *user_data); static void s_on_imds_client_shutdown(void *user_data); static void s_credentials_provider_imds_destroy(struct aws_credentials_provider *provider) { struct aws_credentials_provider_imds_impl *impl = provider->impl; if (impl == NULL) { return; } if (impl->client) { /* release IMDS client, cleanup will finish when its shutdown callback fires */ aws_imds_client_release(impl->client); } else { /* If provider setup failed halfway through, IMDS client might not exist. * In this case invoke shutdown completion callback directly to finish cleanup */ s_on_imds_client_shutdown(provider); } } static void s_on_imds_client_shutdown(void *user_data) { struct aws_credentials_provider *provider = user_data; aws_credentials_provider_invoke_shutdown_callback(provider); aws_mem_release(provider->allocator, provider); } static struct aws_credentials_provider_vtable s_aws_credentials_provider_imds_vtable = { .get_credentials = s_credentials_provider_imds_get_credentials_async, .destroy = s_credentials_provider_imds_destroy, }; struct aws_credentials_provider *aws_credentials_provider_new_imds( struct aws_allocator *allocator, const struct aws_credentials_provider_imds_options *options) { if (!options->bootstrap) { AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Client bootstrap is required for querying IMDS"); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } struct aws_credentials_provider *provider = NULL; struct aws_credentials_provider_imds_impl *impl = NULL; aws_mem_acquire_many( allocator, 2, &provider, sizeof(struct aws_credentials_provider), &impl, sizeof(struct aws_credentials_provider_imds_impl)); if (!provider) { return NULL; } AWS_ZERO_STRUCT(*provider); AWS_ZERO_STRUCT(*impl); aws_credentials_provider_init_base(provider, allocator, &s_aws_credentials_provider_imds_vtable, impl); struct aws_imds_client_options client_options = { .bootstrap = options->bootstrap, .function_table = options->function_table, .imds_version = options->imds_version, .ec2_metadata_v1_disabled = options->ec2_metadata_v1_disabled, .shutdown_options = { .shutdown_callback = s_on_imds_client_shutdown, .shutdown_user_data = provider, }, }; impl->client = aws_imds_client_new(allocator, &client_options); if (!impl->client) { goto on_error; } provider->shutdown_options = options->shutdown_options; return provider; on_error: aws_credentials_provider_destroy(provider); return NULL; } /* * Tracking structure for each outstanding async query to an imds provider */ struct imds_provider_user_data { /* immutable post-creation */ struct aws_allocator *allocator; struct aws_credentials_provider *imds_provider; aws_on_get_credentials_callback_fn *original_callback; struct aws_byte_buf role; void *original_user_data; }; static void s_imds_provider_user_data_destroy(struct imds_provider_user_data *user_data) { if (user_data == NULL) { return; } aws_byte_buf_clean_up(&user_data->role); aws_credentials_provider_release(user_data->imds_provider); aws_mem_release(user_data->allocator, user_data); } static struct imds_provider_user_data *s_imds_provider_user_data_new( struct aws_credentials_provider *imds_provider, aws_on_get_credentials_callback_fn callback, void *user_data) { struct imds_provider_user_data *wrapped_user_data = aws_mem_calloc(imds_provider->allocator, 1, sizeof(struct imds_provider_user_data)); if (wrapped_user_data == NULL) { goto on_error; } if (aws_byte_buf_init(&wrapped_user_data->role, imds_provider->allocator, 100)) { goto on_error; } wrapped_user_data->allocator = imds_provider->allocator; wrapped_user_data->imds_provider = imds_provider; aws_credentials_provider_acquire(imds_provider); wrapped_user_data->original_user_data = user_data; wrapped_user_data->original_callback = callback; return wrapped_user_data; on_error: s_imds_provider_user_data_destroy(wrapped_user_data); return NULL; } static void s_on_get_credentials(const struct aws_credentials *credentials, int error_code, void *user_data) { (void)error_code; struct imds_provider_user_data *wrapped_user_data = user_data; wrapped_user_data->original_callback( (struct aws_credentials *)credentials, error_code, wrapped_user_data->original_user_data); s_imds_provider_user_data_destroy(wrapped_user_data); } static void s_on_get_role(const struct aws_byte_buf *role, int error_code, void *user_data) { struct imds_provider_user_data *wrapped_user_data = user_data; if (!role || error_code || role->len == 0) { goto on_error; } struct aws_byte_cursor role_cursor = aws_byte_cursor_from_buf(role); if (aws_byte_buf_append_dynamic(&wrapped_user_data->role, &role_cursor)) { goto on_error; } struct aws_credentials_provider_imds_impl *impl = wrapped_user_data->imds_provider->impl; if (aws_imds_client_get_credentials( impl->client, aws_byte_cursor_from_buf(&wrapped_user_data->role), s_on_get_credentials, user_data)) { goto on_error; } return; on_error: wrapped_user_data->original_callback( NULL, AWS_AUTH_CREDENTIALS_PROVIDER_IMDS_SOURCE_FAILURE, wrapped_user_data->original_user_data); s_imds_provider_user_data_destroy(wrapped_user_data); } static int s_credentials_provider_imds_get_credentials_async( struct aws_credentials_provider *provider, aws_on_get_credentials_callback_fn callback, void *user_data) { struct aws_credentials_provider_imds_impl *impl = provider->impl; struct imds_provider_user_data *wrapped_user_data = s_imds_provider_user_data_new(provider, callback, user_data); if (wrapped_user_data == NULL) { goto error; } if (aws_imds_client_get_attached_iam_role(impl->client, s_on_get_role, wrapped_user_data)) { goto error; } return AWS_OP_SUCCESS; error: s_imds_provider_user_data_destroy(wrapped_user_data); return AWS_OP_ERR; } aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/source/credentials_provider_process.c000066400000000000000000000210661456575232400276070ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #if defined(_MSC_VER) # pragma warning(disable : 4204) #endif /* _MSC_VER */ struct aws_credentials_provider_process_impl { struct aws_string *command; }; static int s_get_credentials_from_process( struct aws_credentials_provider *provider, aws_on_get_credentials_callback_fn callback, void *user_data) { struct aws_credentials_provider_process_impl *impl = provider->impl; struct aws_credentials *credentials = NULL; struct aws_run_command_options options = { .command = aws_string_c_str(impl->command), }; struct aws_run_command_result result; if (aws_run_command_result_init(provider->allocator, &result)) { goto on_finish; } if (aws_run_command(provider->allocator, &options, &result) || result.ret_code || !result.std_out) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) Failed to source credentials from running process credentials provider with command: %s, err:%s", (void *)provider, aws_string_c_str(impl->command), aws_error_str(aws_last_error())); goto on_finish; } struct aws_parse_credentials_from_json_doc_options parse_options = { .access_key_id_name = "AccessKeyId", .secret_access_key_name = "SecretAccessKey", .token_name = "SessionToken", .expiration_name = "Expiration", .token_required = false, .expiration_required = false, }; credentials = aws_parse_credentials_from_json_document( provider->allocator, aws_byte_cursor_from_string(result.std_out), &parse_options); if (!credentials) { AWS_LOGF_INFO( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) Process credentials provider failed to parse credentials from command output (output is not " "logged in case sensitive information).", (void *)provider); goto on_finish; } AWS_LOGF_INFO( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) Process credentials provider successfully sourced credentials.", (void *)provider); on_finish: ; int error_code = AWS_ERROR_SUCCESS; if (credentials == NULL) { error_code = aws_last_error(); if (error_code == AWS_ERROR_SUCCESS) { error_code = AWS_AUTH_CREDENTIALS_PROVIDER_PROCESS_SOURCE_FAILURE; } } callback(credentials, error_code, user_data); aws_run_command_result_cleanup(&result); aws_credentials_release(credentials); return AWS_OP_SUCCESS; } static void s_credentials_provider_process_destroy(struct aws_credentials_provider *provider) { struct aws_credentials_provider_process_impl *impl = provider->impl; if (impl) { aws_string_destroy_secure(impl->command); } aws_credentials_provider_invoke_shutdown_callback(provider); aws_mem_release(provider->allocator, provider); } AWS_STATIC_STRING_FROM_LITERAL(s_credentials_process, "credential_process"); static struct aws_profile_collection *s_load_profile(struct aws_allocator *allocator) { struct aws_profile_collection *config_profiles = NULL; struct aws_string *config_file_path = NULL; config_file_path = aws_get_config_file_path(allocator, NULL); if (!config_file_path) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Failed to resolve config file path during process credentials provider initialization: %s", aws_error_str(aws_last_error())); goto on_done; } config_profiles = aws_profile_collection_new_from_file(allocator, config_file_path, AWS_PST_CONFIG); if (config_profiles != NULL) { AWS_LOGF_DEBUG( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Successfully built config profile collection from file at (%s)", aws_string_c_str(config_file_path)); } else { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Failed to build config profile collection from file at (%s) : %s", aws_string_c_str(config_file_path), aws_error_str(aws_last_error())); goto on_done; } on_done: aws_string_destroy(config_file_path); return config_profiles; } static void s_check_or_get_with_profile_config( struct aws_allocator *allocator, const struct aws_profile *profile, const struct aws_string *config_key, struct aws_byte_buf *target) { if (!allocator || !profile || !config_key || !target) { return; } if (!target->len) { aws_byte_buf_clean_up(target); const struct aws_profile_property *property = aws_profile_get_property(profile, config_key); if (property) { aws_byte_buf_init_copy_from_cursor( target, allocator, aws_byte_cursor_from_string(aws_profile_property_get_value(property))); } } } static struct aws_byte_cursor s_stderr_redirect_to_stdout = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(" 2>&1"); static struct aws_string *s_get_command( struct aws_allocator *allocator, const struct aws_credentials_provider_process_options *options) { struct aws_byte_buf command_buf; AWS_ZERO_STRUCT(command_buf); struct aws_string *command = NULL; struct aws_profile_collection *config_profiles = NULL; struct aws_string *profile_name = NULL; const struct aws_profile *profile = NULL; if (options->config_profile_collection_cached) { config_profiles = aws_profile_collection_acquire(options->config_profile_collection_cached); } else { config_profiles = s_load_profile(allocator); } profile_name = aws_get_profile_name(allocator, &options->profile_to_use); if (config_profiles && profile_name) { profile = aws_profile_collection_get_profile(config_profiles, profile_name); } if (!profile) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Failed to resolve config profile during process credentials provider initialization."); goto on_finish; } s_check_or_get_with_profile_config(allocator, profile, s_credentials_process, &command_buf); if (!command_buf.len) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Failed to resolve credentials_process command during process credentials provider initialization."); goto on_finish; } if (aws_byte_buf_append_dynamic(&command_buf, &s_stderr_redirect_to_stdout)) { goto on_finish; } command = aws_string_new_from_array(allocator, command_buf.buffer, command_buf.len); if (!command) { goto on_finish; } AWS_LOGF_DEBUG( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Successfully loaded credentials_process command for process credentials provider."); on_finish: aws_string_destroy(profile_name); aws_profile_collection_release(config_profiles); aws_byte_buf_clean_up_secure(&command_buf); return command; } static struct aws_credentials_provider_vtable s_aws_credentials_provider_process_vtable = { .get_credentials = s_get_credentials_from_process, .destroy = s_credentials_provider_process_destroy, }; struct aws_credentials_provider *aws_credentials_provider_new_process( struct aws_allocator *allocator, const struct aws_credentials_provider_process_options *options) { struct aws_credentials_provider *provider = NULL; struct aws_credentials_provider_process_impl *impl = NULL; aws_mem_acquire_many( allocator, 2, &provider, sizeof(struct aws_credentials_provider), &impl, sizeof(struct aws_credentials_provider_process_impl)); if (!provider) { goto on_error; } AWS_ZERO_STRUCT(*provider); AWS_ZERO_STRUCT(*impl); impl->command = s_get_command(allocator, options); if (!impl->command) { goto on_error; } aws_credentials_provider_init_base(provider, allocator, &s_aws_credentials_provider_process_vtable, impl); provider->shutdown_options = options->shutdown_options; AWS_LOGF_TRACE( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): Successfully initializing a process credentials provider.", (void *)provider); return provider; on_error: aws_mem_release(allocator, provider); return NULL; } aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/source/credentials_provider_profile.c000066400000000000000000000517031456575232400275720ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #ifdef _MSC_VER /* allow non-constant declared initializers. */ # pragma warning(disable : 4204) #endif /* * Profile provider implementation */ AWS_STRING_FROM_LITERAL(s_role_arn_name, "role_arn"); AWS_STRING_FROM_LITERAL(s_role_session_name_name, "role_session_name"); AWS_STRING_FROM_LITERAL(s_credential_source_name, "credential_source"); AWS_STRING_FROM_LITERAL(s_source_profile_name, "source_profile"); AWS_STRING_FROM_LITERAL(s_access_key_id_profile_var, "aws_access_key_id"); AWS_STRING_FROM_LITERAL(s_secret_access_key_profile_var, "aws_secret_access_key"); static struct aws_byte_cursor s_default_session_name_pfx = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("aws-common-runtime-profile-config"); static struct aws_byte_cursor s_ec2_imds_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Ec2InstanceMetadata"); static struct aws_byte_cursor s_environment_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Environment"); #define MAX_SESSION_NAME_LEN ((size_t)64) struct aws_credentials_provider_profile_file_impl { struct aws_string *config_file_path; struct aws_string *credentials_file_path; struct aws_string *profile_name; struct aws_profile_collection *profile_collection_cached; }; static int s_profile_file_credentials_provider_get_credentials_async( struct aws_credentials_provider *provider, aws_on_get_credentials_callback_fn callback, void *user_data) { struct aws_credentials_provider_profile_file_impl *impl = provider->impl; struct aws_credentials *credentials = NULL; struct aws_profile_collection *merged_profiles = NULL; if (impl->profile_collection_cached) { /* Use cached profile collection */ merged_profiles = aws_profile_collection_acquire(impl->profile_collection_cached); } else { /* * Parse config file from file, if it exists */ struct aws_profile_collection *config_profiles = aws_profile_collection_new_from_file(provider->allocator, impl->config_file_path, AWS_PST_CONFIG); if (config_profiles != NULL) { AWS_LOGF_DEBUG( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) Profile credentials provider successfully built config profile collection from file at (%s)", (void *)provider, aws_string_c_str(impl->config_file_path)); } else { AWS_LOGF_DEBUG( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) Profile credentials provider failed to build config profile collection from file at (%s)", (void *)provider, aws_string_c_str(impl->config_file_path)); } /* * Parse credentials file, if it exists */ struct aws_profile_collection *credentials_profiles = aws_profile_collection_new_from_file(provider->allocator, impl->credentials_file_path, AWS_PST_CREDENTIALS); if (credentials_profiles != NULL) { AWS_LOGF_DEBUG( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) Profile credentials provider successfully built credentials profile collection from file at " "(%s)", (void *)provider, aws_string_c_str(impl->credentials_file_path)); } else { AWS_LOGF_DEBUG( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) Profile credentials provider failed to build credentials profile collection from file at (%s)", (void *)provider, aws_string_c_str(impl->credentials_file_path)); } /* * Merge the (up to) two sources into a single unified profile */ merged_profiles = aws_profile_collection_new_from_merge(provider->allocator, config_profiles, credentials_profiles); aws_profile_collection_release(config_profiles); aws_profile_collection_release(credentials_profiles); } if (merged_profiles != NULL) { const struct aws_profile *profile = aws_profile_collection_get_profile(merged_profiles, impl->profile_name); if (profile != NULL) { AWS_LOGF_INFO( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) Profile credentials provider attempting to pull credentials from profile \"%s\"", (void *)provider, aws_string_c_str(impl->profile_name)); credentials = aws_credentials_new_from_profile(provider->allocator, profile); } else { AWS_LOGF_INFO( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) Profile credentials provider could not find a profile named \"%s\"", (void *)provider, aws_string_c_str(impl->profile_name)); } } else { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) Profile credentials provider failed to merge config and credentials profile collections", (void *)provider); } int error_code = AWS_ERROR_SUCCESS; if (credentials == NULL) { error_code = aws_last_error(); if (error_code == AWS_ERROR_SUCCESS) { error_code = AWS_AUTH_CREDENTIALS_PROVIDER_PROFILE_SOURCE_FAILURE; } } callback(credentials, error_code, user_data); /* * clean up */ aws_credentials_release(credentials); aws_profile_collection_release(merged_profiles); return AWS_OP_SUCCESS; } static void s_profile_file_credentials_provider_destroy(struct aws_credentials_provider *provider) { struct aws_credentials_provider_profile_file_impl *impl = provider->impl; if (impl == NULL) { return; } aws_string_destroy(impl->config_file_path); aws_string_destroy(impl->credentials_file_path); aws_string_destroy(impl->profile_name); aws_profile_collection_release(impl->profile_collection_cached); aws_credentials_provider_invoke_shutdown_callback(provider); aws_mem_release(provider->allocator, provider); } static struct aws_credentials_provider_vtable s_aws_credentials_provider_profile_file_vtable = { .get_credentials = s_profile_file_credentials_provider_get_credentials_async, .destroy = s_profile_file_credentials_provider_destroy, }; /* load a purely config/credentials file based provider. */ static struct aws_credentials_provider *s_create_profile_based_provider( struct aws_allocator *allocator, struct aws_string *credentials_file_path, struct aws_string *config_file_path, const struct aws_string *profile_name, struct aws_profile_collection *profile_collection_cached) { struct aws_credentials_provider *provider = NULL; struct aws_credentials_provider_profile_file_impl *impl = NULL; aws_mem_acquire_many( allocator, 2, &provider, sizeof(struct aws_credentials_provider), &impl, sizeof(struct aws_credentials_provider_profile_file_impl)); if (!provider) { return NULL; } AWS_ZERO_STRUCT(*provider); AWS_ZERO_STRUCT(*impl); aws_credentials_provider_init_base(provider, allocator, &s_aws_credentials_provider_profile_file_vtable, impl); if (credentials_file_path) { impl->credentials_file_path = aws_string_clone_or_reuse(allocator, credentials_file_path); } if (config_file_path) { impl->config_file_path = aws_string_clone_or_reuse(allocator, config_file_path); } impl->profile_name = aws_string_clone_or_reuse(allocator, profile_name); impl->profile_collection_cached = aws_profile_collection_acquire(profile_collection_cached); return provider; } static struct aws_credentials_provider *s_credentials_provider_new_profile_internal( struct aws_allocator *allocator, const struct aws_credentials_provider_profile_options *options, struct aws_hash_table *source_profiles_table); /* use the selected property that specifies a role_arn to load an STS based provider. */ static struct aws_credentials_provider *s_create_sts_based_provider( struct aws_allocator *allocator, const struct aws_profile_property *role_arn_property, const struct aws_profile *profile, const struct aws_credentials_provider_profile_options *options, struct aws_profile_collection *merged_profiles, struct aws_hash_table *source_profiles_table) { struct aws_credentials_provider *provider = NULL; AWS_LOGF_INFO( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "static: profile %s has role_arn property is set to %s, attempting to " "create an STS credentials provider.", aws_string_c_str(aws_profile_get_name(profile)), aws_string_c_str(aws_profile_property_get_value(role_arn_property))); const struct aws_profile_property *source_profile_property = aws_profile_get_property(profile, s_source_profile_name); const struct aws_profile_property *credential_source_property = aws_profile_get_property(profile, s_credential_source_name); const struct aws_profile_property *role_session_name = aws_profile_get_property(profile, s_role_session_name_name); char session_name_array[MAX_SESSION_NAME_LEN + 1]; AWS_ZERO_ARRAY(session_name_array); if (role_session_name) { size_t to_write = aws_profile_property_get_value(role_session_name)->len; if (to_write > MAX_SESSION_NAME_LEN) { AWS_LOGF_WARN( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "static: session_name property is %d bytes long, " "but the max is %d. Truncating", (int)aws_profile_property_get_value(role_session_name)->len, (int)MAX_SESSION_NAME_LEN); to_write = MAX_SESSION_NAME_LEN; } memcpy(session_name_array, aws_string_bytes(aws_profile_property_get_value(role_session_name)), to_write); } else { memcpy(session_name_array, s_default_session_name_pfx.ptr, s_default_session_name_pfx.len); snprintf( session_name_array + s_default_session_name_pfx.len, sizeof(session_name_array) - s_default_session_name_pfx.len, "-%d", aws_get_pid()); } AWS_LOGF_DEBUG(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "static: computed session_name as %s", session_name_array); /* Automatically create a TLS context if necessary. We'd prefer that users pass one in, but can't force * them to because aws_credentials_provider_profile_options didn't always have a tls_ctx member. */ struct aws_tls_ctx *tls_ctx = NULL; if (options->tls_ctx) { tls_ctx = aws_tls_ctx_acquire(options->tls_ctx); } else { #ifdef BYO_CRYPTO AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "a TLS context must be provided to query STS"); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); goto done; #else AWS_LOGF_INFO( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "TLS context not provided, initializing a new one for querying STS"); struct aws_tls_ctx_options tls_options; aws_tls_ctx_options_init_default_client(&tls_options, allocator); tls_ctx = aws_tls_client_ctx_new(allocator, &tls_options); aws_tls_ctx_options_clean_up(&tls_options); if (!tls_ctx) { goto done; } #endif } struct aws_credentials_provider_sts_options sts_options = { .bootstrap = options->bootstrap, .tls_ctx = tls_ctx, .role_arn = aws_byte_cursor_from_string(aws_profile_property_get_value(role_arn_property)), .session_name = aws_byte_cursor_from_c_str(session_name_array), .duration_seconds = 0, .function_table = options->function_table, }; if (source_profile_property) { AWS_LOGF_DEBUG( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "static: source_profile set to %s", aws_string_c_str(aws_profile_property_get_value(source_profile_property))); struct aws_credentials_provider_profile_options profile_provider_options = *options; profile_provider_options.profile_name_override = aws_byte_cursor_from_string(aws_profile_property_get_value(source_profile_property)); /* reuse profile collection instead of reading it again */ profile_provider_options.profile_collection_cached = merged_profiles; sts_options.creds_provider = s_credentials_provider_new_profile_internal(allocator, &profile_provider_options, source_profiles_table); if (!sts_options.creds_provider) { goto done; } provider = aws_credentials_provider_new_sts(allocator, &sts_options); aws_credentials_provider_release(sts_options.creds_provider); if (!provider) { AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "static: failed to load STS credentials provider"); } } else if (credential_source_property) { AWS_LOGF_INFO( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "static: credential_source property set to %s", aws_string_c_str(aws_profile_property_get_value(credential_source_property))); if (aws_string_eq_byte_cursor_ignore_case( aws_profile_property_get_value(credential_source_property), &s_ec2_imds_name)) { struct aws_credentials_provider_imds_options imds_options = { .bootstrap = options->bootstrap, .function_table = options->function_table, }; struct aws_credentials_provider *imds_provider = aws_credentials_provider_new_imds(allocator, &imds_options); if (!imds_provider) { goto done; } sts_options.creds_provider = imds_provider; provider = aws_credentials_provider_new_sts(allocator, &sts_options); aws_credentials_provider_release(imds_provider); } else if (aws_string_eq_byte_cursor_ignore_case( aws_profile_property_get_value(credential_source_property), &s_environment_name)) { struct aws_credentials_provider_environment_options env_options; AWS_ZERO_STRUCT(env_options); struct aws_credentials_provider *env_provider = aws_credentials_provider_new_environment(allocator, &env_options); if (!env_provider) { goto done; } sts_options.creds_provider = env_provider; provider = aws_credentials_provider_new_sts(allocator, &sts_options); aws_credentials_provider_release(env_provider); } else { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "static: invalid credential_source property: %s", aws_string_c_str(aws_profile_property_get_value(credential_source_property))); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } } done: aws_tls_ctx_release(tls_ctx); return provider; } static struct aws_credentials_provider *s_credentials_provider_new_profile_internal( struct aws_allocator *allocator, const struct aws_credentials_provider_profile_options *options, struct aws_hash_table *source_profiles_table) { struct aws_credentials_provider *provider = NULL; struct aws_profile_collection *config_profiles = NULL; struct aws_profile_collection *credentials_profiles = NULL; struct aws_profile_collection *merged_profiles = NULL; struct aws_string *credentials_file_path = NULL; struct aws_string *config_file_path = NULL; struct aws_string *profile_name = NULL; bool first_profile_in_chain = false; if (source_profiles_table == NULL) { source_profiles_table = aws_mem_calloc(allocator, 1, sizeof(struct aws_hash_table)); first_profile_in_chain = true; /* source_profiles_table is an hashtable of (char *) -> NULL to detect recursion loop */ if (aws_hash_table_init( source_profiles_table, allocator, 3, aws_hash_c_string, aws_hash_callback_c_str_eq, NULL, NULL)) { AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "hash_table_init failed"); goto on_finished; } } profile_name = aws_get_profile_name(allocator, &options->profile_name_override); if (!profile_name) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "static: Profile credentials parser failed to resolve profile name"); goto on_finished; } if (options->profile_collection_cached) { /* Use cached profile collection */ merged_profiles = aws_profile_collection_acquire(options->profile_collection_cached); } else { /* Load profile collection from files */ credentials_file_path = aws_get_credentials_file_path(allocator, &options->credentials_file_name_override); if (!credentials_file_path) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "static: Profile credentials parser failed resolve credentials file path"); goto on_finished; } config_file_path = aws_get_config_file_path(allocator, &options->config_file_name_override); if (!config_file_path) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "static: Profile credentials parser failed resolve config file path"); goto on_finished; } config_profiles = aws_profile_collection_new_from_file(allocator, config_file_path, AWS_PST_CONFIG); credentials_profiles = aws_profile_collection_new_from_file(allocator, credentials_file_path, AWS_PST_CREDENTIALS); if (!(config_profiles || credentials_profiles)) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "static: Profile credentials parser could not load or parse" " a credentials or config file."); goto on_finished; } merged_profiles = aws_profile_collection_new_from_merge(allocator, config_profiles, credentials_profiles); } const struct aws_profile *profile = aws_profile_collection_get_profile(merged_profiles, profile_name); if (!profile) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "static: Profile credentials provider could not load" " a profile at %s.", aws_string_c_str(profile_name)); goto on_finished; } const struct aws_profile_property *role_arn_property = aws_profile_get_property(profile, s_role_arn_name); bool profile_contains_access_key = aws_profile_get_property(profile, s_access_key_id_profile_var) != NULL; bool profile_contains_secret_access_key = aws_profile_get_property(profile, s_secret_access_key_profile_var) != NULL; bool profile_contains_credentials = profile_contains_access_key || profile_contains_secret_access_key; struct aws_hash_element *element = NULL; if (aws_hash_table_find(source_profiles_table, (void *)aws_string_c_str(profile_name), &element) == AWS_OP_ERR) { AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "hash_table_find failed"); goto on_finished; } if (element != NULL) { /* self-reference chain of length 1 is allowed with static credentials */ if (aws_hash_table_get_entry_count(source_profiles_table) > 1 || !profile_contains_credentials) { AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "assumeRole chain contains a circular reference"); aws_raise_error(AWS_AUTH_PROFILE_STS_CREDENTIALS_PROVIDER_CYCLE_FAILURE); goto on_finished; } } aws_hash_table_put(source_profiles_table, (void *)aws_string_c_str(profile_name), NULL, 0); if (role_arn_property && (first_profile_in_chain || !profile_contains_credentials)) { provider = s_create_sts_based_provider( allocator, role_arn_property, profile, options, merged_profiles, source_profiles_table); } else { provider = s_create_profile_based_provider( allocator, credentials_file_path, config_file_path, profile_name, options->profile_collection_cached); } on_finished: aws_profile_collection_release(config_profiles); aws_profile_collection_release(credentials_profiles); aws_profile_collection_release(merged_profiles); aws_string_destroy(credentials_file_path); aws_string_destroy(config_file_path); aws_string_destroy(profile_name); if (first_profile_in_chain) { aws_hash_table_clean_up(source_profiles_table); aws_mem_release(allocator, source_profiles_table); } if (provider) { provider->shutdown_options = options->shutdown_options; } return provider; } struct aws_credentials_provider *aws_credentials_provider_new_profile( struct aws_allocator *allocator, const struct aws_credentials_provider_profile_options *options) { return s_credentials_provider_new_profile_internal(allocator, options, NULL); } aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/source/credentials_provider_sso.c000066400000000000000000001022621456575232400267330ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(_MSC_VER) # pragma warning(disable : 4204) #endif /* _MSC_VER */ #define SSO_RESPONSE_SIZE_INITIAL 2048 #define SSO_RESPONSE_SIZE_LIMIT 10000 #define SSO_CONNECT_TIMEOUT_DEFAULT_IN_SECONDS 2 #define SSO_MAX_ATTEMPTS 3 #define SSO_RETRY_TIMEOUT_MS 100 struct aws_credentials_provider_sso_impl { struct aws_http_connection_manager *connection_manager; const struct aws_auth_http_system_vtable *function_table; struct aws_string *endpoint; struct aws_string *sso_account_id; struct aws_string *sso_role_name; struct aws_credentials_provider *token_provider; struct aws_retry_strategy *retry_strategy; }; /** * aws_sso_query_context - context for each outstanding SSO query. */ struct aws_sso_query_context { /* immutable post-creation */ struct aws_allocator *allocator; struct aws_credentials_provider *provider; aws_on_get_credentials_callback_fn *original_callback; void *original_user_data; /* mutable */ struct aws_http_connection *connection; struct aws_http_message *request; struct aws_byte_buf payload; struct aws_retry_token *retry_token; struct aws_byte_buf path_and_query; struct aws_string *token; int status_code; int error_code; }; /* called in between retries. */ static void s_sso_query_context_reset_request_specific_data(struct aws_sso_query_context *sso_query_context) { if (sso_query_context->request) { aws_http_message_release(sso_query_context->request); sso_query_context->request = NULL; } if (sso_query_context->connection) { struct aws_credentials_provider_sso_impl *provider_impl = sso_query_context->provider->impl; int result = provider_impl->function_table->aws_http_connection_manager_release_connection( provider_impl->connection_manager, sso_query_context->connection); (void)result; AWS_ASSERT(result == AWS_OP_SUCCESS); sso_query_context->connection = NULL; } if (sso_query_context->token) { aws_string_destroy_secure(sso_query_context->token); sso_query_context->token = NULL; } sso_query_context->status_code = 0; sso_query_context->error_code = 0; } static void s_sso_query_context_destroy(struct aws_sso_query_context *sso_query_context) { if (sso_query_context == NULL) { return; } s_sso_query_context_reset_request_specific_data(sso_query_context); aws_byte_buf_clean_up(&sso_query_context->payload); aws_byte_buf_clean_up(&sso_query_context->path_and_query); aws_credentials_provider_release(sso_query_context->provider); aws_retry_token_release(sso_query_context->retry_token); aws_mem_release(sso_query_context->allocator, sso_query_context); } static struct aws_sso_query_context *s_sso_query_context_new( struct aws_credentials_provider *provider, aws_on_get_credentials_callback_fn callback, void *user_data) { struct aws_credentials_provider_sso_impl *impl = provider->impl; struct aws_sso_query_context *sso_query_context = aws_mem_calloc(provider->allocator, 1, sizeof(struct aws_sso_query_context)); sso_query_context->allocator = provider->allocator; sso_query_context->provider = aws_credentials_provider_acquire(provider); sso_query_context->original_user_data = user_data; sso_query_context->original_callback = callback; /* construct path and query */ struct aws_byte_cursor account_id_cursor = aws_byte_cursor_from_string(impl->sso_account_id); struct aws_byte_cursor role_name_cursor = aws_byte_cursor_from_string(impl->sso_role_name); struct aws_byte_cursor path_cursor = aws_byte_cursor_from_c_str("/federation/credentials?account_id="); struct aws_byte_cursor role_name_param_cursor = aws_byte_cursor_from_c_str("&role_name="); if (aws_byte_buf_init_copy_from_cursor(&sso_query_context->path_and_query, provider->allocator, path_cursor) || aws_byte_buf_append_encoding_uri_param(&sso_query_context->path_and_query, &account_id_cursor) || aws_byte_buf_append_dynamic(&sso_query_context->path_and_query, &role_name_param_cursor) || aws_byte_buf_append_encoding_uri_param(&sso_query_context->path_and_query, &role_name_cursor)) { goto on_error; } if (aws_byte_buf_init(&sso_query_context->payload, provider->allocator, SSO_RESPONSE_SIZE_INITIAL)) { goto on_error; } return sso_query_context; on_error: s_sso_query_context_destroy(sso_query_context); return NULL; } /* * No matter the result, this always gets called assuming that sso_query_context is successfully allocated */ static void s_finalize_get_credentials_query(struct aws_sso_query_context *sso_query_context) { struct aws_credentials *credentials = NULL; if (sso_query_context->error_code == AWS_ERROR_SUCCESS) { /* parse credentials */ struct aws_parse_credentials_from_json_doc_options parse_options = { .access_key_id_name = "accessKeyId", .secret_access_key_name = "secretAccessKey", .token_name = "sessionToken", .expiration_name = "expiration", .top_level_object_name = "roleCredentials", .token_required = true, .expiration_required = true, .expiration_format = AWS_PCEF_NUMBER_UNIX_EPOCH_MS, }; credentials = aws_parse_credentials_from_json_document( sso_query_context->allocator, aws_byte_cursor_from_buf(&sso_query_context->payload), &parse_options); } if (credentials) { AWS_LOGF_INFO( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) successfully queried credentials", (void *)sso_query_context->provider); } else { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) failed to query credentials", (void *)sso_query_context->provider); if (sso_query_context->error_code == AWS_ERROR_SUCCESS) { sso_query_context->error_code = AWS_AUTH_CREDENTIALS_PROVIDER_SSO_SOURCE_FAILURE; } } /* pass the credentials back */ sso_query_context->original_callback( credentials, sso_query_context->error_code, sso_query_context->original_user_data); /* clean up */ s_sso_query_context_destroy(sso_query_context); aws_credentials_release(credentials); } static void s_on_retry_ready(struct aws_retry_token *token, int error_code, void *user_data); static void s_on_stream_complete_fn(struct aws_http_stream *stream, int error_code, void *user_data) { struct aws_sso_query_context *sso_query_context = user_data; struct aws_credentials_provider_sso_impl *impl = sso_query_context->provider->impl; impl->function_table->aws_http_stream_release(stream); /* set error code */ sso_query_context->error_code = error_code; impl->function_table->aws_http_stream_get_incoming_response_status(stream, &sso_query_context->status_code); if (error_code == AWS_OP_SUCCESS && sso_query_context->status_code != AWS_HTTP_STATUS_CODE_200_OK) { sso_query_context->error_code = AWS_AUTH_CREDENTIALS_PROVIDER_HTTP_STATUS_FAILURE; } /* * If we can retry the request based on error response or http status code failure, retry it, otherwise, call the * finalize function. */ if (error_code || sso_query_context->status_code != AWS_HTTP_STATUS_CODE_200_OK) { enum aws_retry_error_type error_type = aws_credentials_provider_compute_retry_error_type(sso_query_context->status_code, error_code); /* don't retry client errors at all. */ if (error_type != AWS_RETRY_ERROR_TYPE_CLIENT_ERROR) { if (aws_retry_strategy_schedule_retry( sso_query_context->retry_token, error_type, s_on_retry_ready, sso_query_context) == AWS_OP_SUCCESS) { AWS_LOGF_INFO( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): successfully scheduled a retry", (void *)sso_query_context->provider); return; } AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): failed to schedule retry: %s", (void *)sso_query_context->provider, aws_error_str(aws_last_error())); sso_query_context->error_code = aws_last_error(); } } else { int result = aws_retry_token_record_success(sso_query_context->retry_token); (void)result; AWS_ASSERT(result == AWS_ERROR_SUCCESS); } s_finalize_get_credentials_query(sso_query_context); } static int s_on_incoming_body_fn(struct aws_http_stream *stream, const struct aws_byte_cursor *body, void *user_data) { (void)stream; struct aws_sso_query_context *sso_query_context = user_data; AWS_LOGF_TRACE( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) received %zu response bytes", (void *)sso_query_context->provider, body->len); if (body->len + sso_query_context->payload.len > SSO_RESPONSE_SIZE_LIMIT) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) response exceeded maximum allowed length", (void *)sso_query_context->provider); return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } if (aws_byte_buf_append_dynamic(&sso_query_context->payload, body)) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) error appending response payload: %s", (void *)sso_query_context->provider, aws_error_str(aws_last_error())); return AWS_OP_ERR; } return AWS_OP_SUCCESS; } /* Request headers. */ AWS_STATIC_STRING_FROM_LITERAL(s_sso_token_header, "x-amz-sso_bearer_token"); AWS_STATIC_STRING_FROM_LITERAL(s_sso_user_agent_header, "User-Agent"); AWS_STATIC_STRING_FROM_LITERAL(s_sso_user_agent_header_value, "aws-sdk-crt/sso-credentials-provider"); static void s_query_credentials(struct aws_sso_query_context *sso_query_context) { AWS_FATAL_ASSERT(sso_query_context->connection); struct aws_http_stream *stream = NULL; struct aws_credentials_provider_sso_impl *impl = sso_query_context->provider->impl; sso_query_context->request = aws_http_message_new_request(sso_query_context->allocator); if (sso_query_context->request == NULL) { goto on_error; } struct aws_http_header auth_header = { .name = aws_byte_cursor_from_string(s_sso_token_header), .value = aws_byte_cursor_from_string(sso_query_context->token), }; struct aws_http_header host_header = { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Host"), .value = aws_byte_cursor_from_string(impl->endpoint), }; struct aws_http_header user_agent_header = { .name = aws_byte_cursor_from_string(s_sso_user_agent_header), .value = aws_byte_cursor_from_string(s_sso_user_agent_header_value), }; if (aws_http_message_add_header(sso_query_context->request, auth_header) || aws_http_message_add_header(sso_query_context->request, host_header) || aws_http_message_add_header(sso_query_context->request, user_agent_header)) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) failed to add http header with error: %s", (void *)sso_query_context->provider, aws_error_debug_str(aws_last_error())); goto on_error; } if (aws_http_message_set_request_method(sso_query_context->request, aws_http_method_get)) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) failed to set request method with error: %s", (void *)sso_query_context->provider, aws_error_debug_str(aws_last_error())); goto on_error; } if (aws_http_message_set_request_path( sso_query_context->request, aws_byte_cursor_from_buf(&sso_query_context->path_and_query))) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) failed to set request path with error: %s", (void *)sso_query_context->provider, aws_error_debug_str(aws_last_error())); goto on_error; } struct aws_http_make_request_options request_options = { .self_size = sizeof(request_options), .on_response_headers = NULL, .on_response_header_block_done = NULL, .on_response_body = s_on_incoming_body_fn, .on_complete = s_on_stream_complete_fn, .user_data = sso_query_context, .request = sso_query_context->request, }; stream = impl->function_table->aws_http_connection_make_request(sso_query_context->connection, &request_options); if (!stream) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) failed to make request with error: %s", (void *)sso_query_context->provider, aws_error_debug_str(aws_last_error())); goto on_error; } if (impl->function_table->aws_http_stream_activate(stream)) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) failed to activate the stream with error: %s", (void *)sso_query_context->provider, aws_error_debug_str(aws_last_error())); goto on_error; } return; on_error: sso_query_context->error_code = aws_last_error(); impl->function_table->aws_http_stream_release(stream); s_finalize_get_credentials_query(sso_query_context); } static void s_on_get_token_callback(struct aws_credentials *credentials, int error_code, void *user_data) { struct aws_sso_query_context *sso_query_context = user_data; if (error_code) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "id=%p: failed to acquire a token, error code %d(%s)", (void *)sso_query_context->provider, error_code, aws_error_str(error_code)); sso_query_context->error_code = error_code; s_finalize_get_credentials_query(sso_query_context); return; } struct aws_byte_cursor token = aws_credentials_get_token(credentials); AWS_LOGF_INFO( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): successfully accquired a token", (void *)sso_query_context->provider); sso_query_context->token = aws_string_new_from_cursor(sso_query_context->allocator, &token); s_query_credentials(sso_query_context); } static void s_on_acquire_connection(struct aws_http_connection *connection, int error_code, void *user_data) { struct aws_sso_query_context *sso_query_context = user_data; if (error_code) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "id=%p: failed to acquire a connection, error code %d(%s)", (void *)sso_query_context->provider, error_code, aws_error_str(error_code)); sso_query_context->error_code = error_code; s_finalize_get_credentials_query(sso_query_context); return; } AWS_LOGF_INFO( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): successfully accquired a connection", (void *)sso_query_context->provider); sso_query_context->connection = connection; struct aws_credentials_provider_sso_impl *impl = sso_query_context->provider->impl; if (aws_credentials_provider_get_credentials(impl->token_provider, s_on_get_token_callback, user_data)) { int last_error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "id=%p: failed to get a token, error code %d(%s)", (void *)sso_query_context->provider, last_error_code, aws_error_str(last_error_code)); sso_query_context->error_code = last_error_code; s_finalize_get_credentials_query(sso_query_context); } } /* called for each retry. */ static void s_on_retry_ready(struct aws_retry_token *token, int error_code, void *user_data) { (void)token; struct aws_sso_query_context *sso_query_context = user_data; struct aws_credentials_provider_sso_impl *impl = sso_query_context->provider->impl; if (error_code) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): failed to schedule retry with error: %s", (void *)sso_query_context->provider, aws_error_debug_str(error_code)); sso_query_context->error_code = error_code; s_finalize_get_credentials_query(sso_query_context); return; } /* clear the result from previous attempt */ s_sso_query_context_reset_request_specific_data(sso_query_context); impl->function_table->aws_http_connection_manager_acquire_connection( impl->connection_manager, s_on_acquire_connection, sso_query_context); } static void s_on_retry_token_acquired( struct aws_retry_strategy *strategy, int error_code, struct aws_retry_token *token, void *user_data) { struct aws_sso_query_context *sso_query_context = user_data; (void)strategy; if (error_code) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): failed to acquire retry token: %s", (void *)sso_query_context->provider, aws_error_debug_str(error_code)); sso_query_context->error_code = error_code; s_finalize_get_credentials_query(sso_query_context); return; } sso_query_context->retry_token = token; struct aws_credentials_provider_sso_impl *impl = sso_query_context->provider->impl; impl->function_table->aws_http_connection_manager_acquire_connection( impl->connection_manager, s_on_acquire_connection, user_data); } static int s_credentials_provider_sso_get_credentials( struct aws_credentials_provider *provider, aws_on_get_credentials_callback_fn callback, void *user_data) { struct aws_credentials_provider_sso_impl *impl = provider->impl; struct aws_sso_query_context *sso_query_context = s_sso_query_context_new(provider, callback, user_data); if (sso_query_context == NULL) { return AWS_OP_ERR; } if (aws_retry_strategy_acquire_retry_token( impl->retry_strategy, NULL, s_on_retry_token_acquired, sso_query_context, SSO_RETRY_TIMEOUT_MS)) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): failed to acquire retry token: %s", (void *)provider, aws_error_debug_str(aws_last_error())); goto on_error; } return AWS_OP_SUCCESS; on_error: s_sso_query_context_destroy(sso_query_context); return AWS_OP_ERR; } static void s_on_connection_manager_shutdown(void *user_data) { struct aws_credentials_provider *provider = user_data; aws_credentials_provider_invoke_shutdown_callback(provider); aws_mem_release(provider->allocator, provider); } static void s_credentials_provider_sso_destroy(struct aws_credentials_provider *provider) { struct aws_credentials_provider_sso_impl *impl = provider->impl; if (impl == NULL) { return; } aws_string_destroy(impl->endpoint); aws_string_destroy(impl->sso_account_id); aws_string_destroy(impl->sso_role_name); aws_retry_strategy_release(impl->retry_strategy); aws_credentials_provider_release(impl->token_provider); /* aws_http_connection_manager_release will eventually leads to call of s_on_connection_manager_shutdown, * which will do memory release for provider and impl. So We should be freeing impl * related memory first, then call aws_http_connection_manager_release. */ if (impl->connection_manager) { impl->function_table->aws_http_connection_manager_release(impl->connection_manager); } else { /* If provider setup failed halfway through, connection_manager might not exist. * In this case invoke shutdown completion callback directly to finish cleanup */ s_on_connection_manager_shutdown(provider); } } static struct aws_credentials_provider_vtable s_aws_credentials_provider_sso_vtable = { .get_credentials = s_credentials_provider_sso_get_credentials, .destroy = s_credentials_provider_sso_destroy, }; static int s_construct_sso_portal_endpoint( struct aws_allocator *allocator, struct aws_byte_buf *out_endpoint, const struct aws_string *region) { AWS_PRECONDITION(allocator); AWS_PRECONDITION(out_endpoint); if (!region) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } aws_byte_buf_clean_up(out_endpoint); struct aws_byte_cursor sso_prefix = aws_byte_cursor_from_c_str("portal.sso."); struct aws_byte_cursor region_cursor = aws_byte_cursor_from_string(region); struct aws_byte_cursor amazonaws_cursor = aws_byte_cursor_from_c_str(".amazonaws.com"); struct aws_byte_cursor cn_cursor = aws_byte_cursor_from_c_str(".cn"); if (aws_byte_buf_init_copy_from_cursor(out_endpoint, allocator, sso_prefix) || aws_byte_buf_append_dynamic(out_endpoint, ®ion_cursor) || aws_byte_buf_append_dynamic(out_endpoint, &amazonaws_cursor)) { goto on_error; } if (aws_string_eq_c_str_ignore_case(region, "cn-north-1") || aws_string_eq_c_str_ignore_case(region, "cn-northwest-1")) { if (aws_byte_buf_append_dynamic(out_endpoint, &cn_cursor)) { goto on_error; } } return AWS_OP_SUCCESS; on_error: aws_byte_buf_clean_up(out_endpoint); return AWS_OP_ERR; } AWS_STATIC_STRING_FROM_LITERAL(s_sso_account_id, "sso_account_id"); AWS_STATIC_STRING_FROM_LITERAL(s_sso_region, "sso_region"); AWS_STATIC_STRING_FROM_LITERAL(s_sso_role_name, "sso_role_name"); AWS_STATIC_STRING_FROM_LITERAL(s_sso_session, "sso_session"); struct sso_parameters { struct aws_allocator *allocator; struct aws_byte_buf endpoint; struct aws_string *sso_account_id; struct aws_string *sso_role_name; struct aws_credentials_provider *token_provider; }; static void s_parameters_destroy(struct sso_parameters *parameters) { if (!parameters) { return; } aws_byte_buf_clean_up(¶meters->endpoint); aws_string_destroy(parameters->sso_account_id); aws_string_destroy(parameters->sso_role_name); aws_credentials_provider_release(parameters->token_provider); aws_mem_release(parameters->allocator, parameters); } /** * Read the config file and construct profile or sso_session token provider based on sso_session property. * * If the profile contains sso_session property, a valid config example is as follow. * [profile sso-profile] * sso_session = dev * sso_account_id = 012345678901 * sso_role_name = SampleRole * * [sso-session dev] * sso_region = us-east-1 * sso_start_url = https://d-abc123.awsapps.com/start * * If the profile does't contains sso_session, the legacy valid config example is as follow. * [profile sso-profile] * sso_account_id = 012345678901 * sso_region = us-east-1 * sso_role_name = SampleRole * sso_start_url = https://d-abc123.awsapps.com/start-beta */ static struct sso_parameters *s_parameters_new( struct aws_allocator *allocator, const struct aws_credentials_provider_sso_options *options) { struct sso_parameters *parameters = aws_mem_calloc(allocator, 1, sizeof(struct sso_parameters)); parameters->allocator = allocator; struct aws_profile_collection *config_profile_collection = NULL; struct aws_string *profile_name = NULL; bool success = false; profile_name = aws_get_profile_name(allocator, &options->profile_name_override); if (!profile_name) { AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "sso: failed to resolve profile name"); goto on_finish; } if (options->config_file_cached) { /* Use cached config file */ config_profile_collection = aws_profile_collection_acquire(options->config_file_cached); } else { /* load config file */ config_profile_collection = aws_load_profile_collection_from_config_file(allocator, options->config_file_name_override); } if (!config_profile_collection) { goto on_finish; } const struct aws_profile *profile = aws_profile_collection_get_profile(config_profile_collection, profile_name); if (!profile) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "sso: failed to load \"%s\" profile", aws_string_c_str(profile_name)); goto on_finish; } const struct aws_profile_property *sso_account_id = aws_profile_get_property(profile, s_sso_account_id); const struct aws_profile_property *sso_role_name = aws_profile_get_property(profile, s_sso_role_name); const struct aws_profile_property *sso_region = NULL; if (!sso_account_id) { AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "sso: sso_account_id is missing"); aws_raise_error(AWS_AUTH_CREDENTIALS_PROVIDER_SSO_SOURCE_FAILURE); goto on_finish; } if (!sso_role_name) { AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "sso: sso_role_name is missing"); aws_raise_error(AWS_AUTH_CREDENTIALS_PROVIDER_SSO_SOURCE_FAILURE); goto on_finish; } const struct aws_profile_property *sso_session_property = aws_profile_get_property(profile, s_sso_session); /* create the appropriate token provider based on sso_session property is available or not */ if (sso_session_property) { /* construct sso_session token provider */ struct aws_token_provider_sso_session_options token_provider_options = { .config_file_name_override = options->config_file_name_override, .config_file_cached = config_profile_collection, .profile_name_override = options->profile_name_override, .bootstrap = options->bootstrap, .tls_ctx = options->tls_ctx, .system_clock_fn = options->system_clock_fn, }; parameters->token_provider = aws_token_provider_new_sso_session(allocator, &token_provider_options); if (!parameters->token_provider) { AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "sso: unable to create a sso token provider"); aws_raise_error(AWS_AUTH_CREDENTIALS_PROVIDER_SSO_SOURCE_FAILURE); goto on_finish; } sso_region = aws_profile_get_property( aws_profile_collection_get_section( config_profile_collection, AWS_PROFILE_SECTION_TYPE_SSO_SESSION, aws_profile_property_get_value(sso_session_property)), s_sso_region); } else { /* construct profile token provider */ struct aws_token_provider_sso_profile_options token_provider_options = { .config_file_name_override = options->config_file_name_override, .config_file_cached = config_profile_collection, .profile_name_override = options->profile_name_override, .system_clock_fn = options->system_clock_fn, }; parameters->token_provider = aws_token_provider_new_sso_profile(allocator, &token_provider_options); if (!parameters->token_provider) { AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "sso: unable to create a profile token provider"); aws_raise_error(AWS_AUTH_CREDENTIALS_PROVIDER_SSO_SOURCE_FAILURE); goto on_finish; } sso_region = aws_profile_get_property(profile, s_sso_region); } if (!sso_region) { AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "sso: sso_region is missing"); aws_raise_error(AWS_AUTH_CREDENTIALS_PROVIDER_SSO_SOURCE_FAILURE); goto on_finish; } parameters->sso_account_id = aws_string_new_from_string(allocator, aws_profile_property_get_value(sso_account_id)); parameters->sso_role_name = aws_string_new_from_string(allocator, aws_profile_property_get_value(sso_role_name)); /* determine endpoint */ if (s_construct_sso_portal_endpoint(allocator, ¶meters->endpoint, aws_profile_property_get_value(sso_region))) { AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Failed to construct sso endpoint"); goto on_finish; } AWS_LOGF_DEBUG( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Successfully loaded all required parameters for sso credentials provider."); success = true; on_finish: if (!success) { s_parameters_destroy(parameters); parameters = NULL; } aws_string_destroy(profile_name); aws_profile_collection_release(config_profile_collection); return parameters; } struct aws_credentials_provider *aws_credentials_provider_new_sso( struct aws_allocator *allocator, const struct aws_credentials_provider_sso_options *options) { struct sso_parameters *parameters = s_parameters_new(allocator, options); if (!parameters) { return NULL; } struct aws_credentials_provider *provider = NULL; struct aws_credentials_provider_sso_impl *impl = NULL; struct aws_tls_connection_options tls_connection_options; aws_mem_acquire_many( allocator, 2, &provider, sizeof(struct aws_credentials_provider), &impl, sizeof(struct aws_credentials_provider_sso_impl)); AWS_ZERO_STRUCT(*provider); AWS_ZERO_STRUCT(*impl); AWS_ZERO_STRUCT(tls_connection_options); aws_credentials_provider_init_base(provider, allocator, &s_aws_credentials_provider_sso_vtable, impl); if (!options->tls_ctx) { AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): a TLS context must be provided", (void *)provider); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); goto on_error; } if (!options->bootstrap) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): a bootstrap instance must be provided", (void *)provider); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); goto on_error; } aws_tls_connection_options_init_from_ctx(&tls_connection_options, options->tls_ctx); struct aws_byte_cursor host = aws_byte_cursor_from_buf(¶meters->endpoint); if (aws_tls_connection_options_set_server_name(&tls_connection_options, allocator, &host)) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): failed to create a tls connection options with error %s", (void *)provider, aws_error_str(aws_last_error())); goto on_error; } struct aws_socket_options socket_options; AWS_ZERO_STRUCT(socket_options); socket_options.type = AWS_SOCKET_STREAM; socket_options.domain = AWS_SOCKET_IPV4; socket_options.connect_timeout_ms = (uint32_t)aws_timestamp_convert( SSO_CONNECT_TIMEOUT_DEFAULT_IN_SECONDS, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_MILLIS, NULL); struct aws_http_connection_manager_options manager_options; AWS_ZERO_STRUCT(manager_options); manager_options.bootstrap = options->bootstrap; manager_options.initial_window_size = SSO_RESPONSE_SIZE_LIMIT; manager_options.socket_options = &socket_options; manager_options.host = host; manager_options.port = 443; manager_options.max_connections = 2; manager_options.shutdown_complete_callback = s_on_connection_manager_shutdown; manager_options.shutdown_complete_user_data = provider; manager_options.tls_connection_options = &tls_connection_options; impl->function_table = options->function_table; if (impl->function_table == NULL) { impl->function_table = g_aws_credentials_provider_http_function_table; } impl->connection_manager = impl->function_table->aws_http_connection_manager_new(allocator, &manager_options); if (impl->connection_manager == NULL) { goto on_error; } impl->token_provider = aws_credentials_provider_acquire(parameters->token_provider); impl->endpoint = aws_string_new_from_buf(allocator, ¶meters->endpoint); impl->sso_account_id = aws_string_new_from_string(allocator, parameters->sso_account_id); impl->sso_role_name = aws_string_new_from_string(allocator, parameters->sso_role_name); provider->shutdown_options = options->shutdown_options; struct aws_standard_retry_options retry_options = { .backoff_retry_options = { .el_group = options->bootstrap->event_loop_group, .max_retries = SSO_MAX_ATTEMPTS, }, }; impl->retry_strategy = aws_retry_strategy_new_standard(allocator, &retry_options); if (!impl->retry_strategy) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): failed to create a retry strategy with error %s", (void *)provider, aws_error_debug_str(aws_last_error())); goto on_error; } s_parameters_destroy(parameters); aws_tls_connection_options_clean_up(&tls_connection_options); return provider; on_error: aws_credentials_provider_destroy(provider); s_parameters_destroy(parameters); aws_tls_connection_options_clean_up(&tls_connection_options); return NULL; } aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/source/credentials_provider_static.c000066400000000000000000000043231456575232400274150ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include static int s_static_credentials_provider_get_credentials_async( struct aws_credentials_provider *provider, aws_on_get_credentials_callback_fn callback, void *user_data) { struct aws_credentials *credentials = provider->impl; AWS_LOGF_INFO( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) Static credentials provider successfully sourced credentials", (void *)provider); callback(credentials, AWS_ERROR_SUCCESS, user_data); return AWS_OP_SUCCESS; } static void s_static_credentials_provider_destroy(struct aws_credentials_provider *provider) { struct aws_credentials *credentials = provider->impl; aws_credentials_release(credentials); aws_credentials_provider_invoke_shutdown_callback(provider); aws_mem_release(provider->allocator, provider); } /* * shared across all providers that do not need to do anything special on shutdown */ static struct aws_credentials_provider_vtable s_aws_credentials_provider_static_vtable = { .get_credentials = s_static_credentials_provider_get_credentials_async, .destroy = s_static_credentials_provider_destroy, }; struct aws_credentials_provider *aws_credentials_provider_new_static( struct aws_allocator *allocator, const struct aws_credentials_provider_static_options *options) { struct aws_credentials_provider *provider = aws_mem_acquire(allocator, sizeof(struct aws_credentials_provider)); if (provider == NULL) { return NULL; } AWS_ZERO_STRUCT(*provider); struct aws_credentials *credentials = aws_credentials_new( allocator, options->access_key_id, options->secret_access_key, options->session_token, UINT64_MAX); if (credentials == NULL) { goto on_new_credentials_failure; } aws_credentials_provider_init_base(provider, allocator, &s_aws_credentials_provider_static_vtable, credentials); provider->shutdown_options = options->shutdown_options; return provider; on_new_credentials_failure: aws_mem_release(allocator, provider); return NULL; } aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/source/credentials_provider_sts.c000066400000000000000000000746231456575232400267510ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef _MSC_VER /* allow non-constant declared initializers. */ # pragma warning(disable : 4204) /* allow passing of address of automatic variable */ # pragma warning(disable : 4221) /* function pointer to dll symbol */ # pragma warning(disable : 4232) #endif static int s_sts_xml_on_AssumeRoleResponse_child(struct aws_xml_node *, void *); static int s_sts_xml_on_AssumeRoleResult_child(struct aws_xml_node *, void *); static int s_sts_xml_on_Credentials_child(struct aws_xml_node *, void *); static struct aws_http_header s_host_header = { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("host"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("sts.amazonaws.com"), }; static struct aws_http_header s_content_type_header = { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("content-type"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("application/x-www-form-urlencoded"), }; static struct aws_byte_cursor s_content_length = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("content-length"); static struct aws_byte_cursor s_path = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/"); static struct aws_byte_cursor s_signing_region = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("us-east-1"); static struct aws_byte_cursor s_service_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("sts"); static const int s_max_retries = 8; const uint16_t aws_sts_assume_role_default_duration_secs = 900; struct aws_credentials_provider_sts_impl { struct aws_http_connection_manager *connection_manager; struct aws_string *assume_role_profile; struct aws_string *role_session_name; uint16_t duration_seconds; struct aws_credentials_provider *provider; struct aws_credentials_provider_shutdown_options source_shutdown_options; const struct aws_auth_http_system_vtable *function_table; struct aws_retry_strategy *retry_strategy; aws_io_clock_fn *system_clock_fn; }; struct sts_creds_provider_user_data { struct aws_allocator *allocator; struct aws_credentials_provider *provider; struct aws_credentials *credentials; struct aws_string *access_key_id; struct aws_string *secret_access_key; struct aws_string *session_token; aws_on_get_credentials_callback_fn *callback; struct aws_http_connection *connection; struct aws_byte_buf payload_body; struct aws_input_stream *input_stream; struct aws_signable *signable; struct aws_signing_config_aws signing_config; struct aws_http_message *message; struct aws_byte_buf output_buf; struct aws_retry_token *retry_token; int error_code; void *user_data; }; static void s_reset_request_specific_data(struct sts_creds_provider_user_data *user_data) { if (user_data->connection) { struct aws_credentials_provider_sts_impl *provider_impl = user_data->provider->impl; provider_impl->function_table->aws_http_connection_manager_release_connection( provider_impl->connection_manager, user_data->connection); user_data->connection = NULL; } if (user_data->signable) { aws_signable_destroy(user_data->signable); user_data->signable = NULL; } if (user_data->input_stream) { aws_input_stream_destroy(user_data->input_stream); user_data->input_stream = NULL; } aws_byte_buf_clean_up(&user_data->payload_body); if (user_data->message) { aws_http_message_destroy(user_data->message); user_data->message = NULL; } aws_byte_buf_clean_up(&user_data->output_buf); aws_string_destroy(user_data->access_key_id); user_data->access_key_id = NULL; aws_string_destroy_secure(user_data->secret_access_key); user_data->secret_access_key = NULL; aws_string_destroy(user_data->session_token); user_data->session_token = NULL; } static void s_clean_up_user_data(struct sts_creds_provider_user_data *user_data) { user_data->callback(user_data->credentials, user_data->error_code, user_data->user_data); aws_credentials_release(user_data->credentials); s_reset_request_specific_data(user_data); aws_credentials_provider_release(user_data->provider); aws_retry_token_release(user_data->retry_token); aws_mem_release(user_data->allocator, user_data); } static int s_write_body_to_buffer(struct aws_credentials_provider *provider, struct aws_byte_buf *body) { struct aws_credentials_provider_sts_impl *provider_impl = provider->impl; struct aws_byte_cursor working_cur = aws_byte_cursor_from_c_str("Version=2011-06-15&Action=AssumeRole&RoleArn="); if (aws_byte_buf_append_dynamic(body, &working_cur)) { return AWS_OP_ERR; } struct aws_byte_cursor role_cur = aws_byte_cursor_from_string(provider_impl->assume_role_profile); if (aws_byte_buf_append_encoding_uri_param(body, &role_cur)) { return AWS_OP_ERR; } working_cur = aws_byte_cursor_from_c_str("&RoleSessionName="); if (aws_byte_buf_append_dynamic(body, &working_cur)) { return AWS_OP_ERR; } struct aws_byte_cursor session_cur = aws_byte_cursor_from_string(provider_impl->role_session_name); if (aws_byte_buf_append_encoding_uri_param(body, &session_cur)) { return AWS_OP_ERR; } working_cur = aws_byte_cursor_from_c_str("&DurationSeconds="); if (aws_byte_buf_append_dynamic(body, &working_cur)) { return AWS_OP_ERR; } char duration_seconds[6]; AWS_ZERO_ARRAY(duration_seconds); snprintf(duration_seconds, sizeof(duration_seconds), "%" PRIu16, provider_impl->duration_seconds); working_cur = aws_byte_cursor_from_c_str(duration_seconds); if (aws_byte_buf_append_dynamic(body, &working_cur)) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } static int s_on_incoming_body_fn(struct aws_http_stream *stream, const struct aws_byte_cursor *data, void *user_data) { (void)stream; struct sts_creds_provider_user_data *provider_user_data = user_data; return aws_byte_buf_append_dynamic(&provider_user_data->output_buf, data); } /* parse doc of form accessKeyId secretKey sessionToken ... more stuff we don't care about. ... more stuff we don't care about */ static int s_sts_xml_on_root(struct aws_xml_node *node, void *user_data) { struct aws_byte_cursor node_name = aws_xml_node_get_name(node); if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "AssumeRoleResponse")) { return aws_xml_node_traverse(node, s_sts_xml_on_AssumeRoleResponse_child, user_data); } return AWS_OP_SUCCESS; } static int s_sts_xml_on_AssumeRoleResponse_child(struct aws_xml_node *node, void *user_data) { struct aws_byte_cursor node_name = aws_xml_node_get_name(node); if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "AssumeRoleResult")) { return aws_xml_node_traverse(node, s_sts_xml_on_AssumeRoleResult_child, user_data); } return AWS_OP_SUCCESS; } static int s_sts_xml_on_AssumeRoleResult_child(struct aws_xml_node *node, void *user_data) { struct aws_byte_cursor node_name = aws_xml_node_get_name(node); if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "Credentials")) { return aws_xml_node_traverse(node, s_sts_xml_on_Credentials_child, user_data); } return AWS_OP_SUCCESS; } static int s_sts_xml_on_Credentials_child(struct aws_xml_node *node, void *user_data) { struct sts_creds_provider_user_data *provider_user_data = user_data; struct aws_byte_cursor node_name = aws_xml_node_get_name(node); struct aws_byte_cursor credential_data; AWS_ZERO_STRUCT(credential_data); if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "AccessKeyId")) { if (aws_xml_node_as_body(node, &credential_data)) { return AWS_OP_ERR; } provider_user_data->access_key_id = aws_string_new_from_cursor(provider_user_data->allocator, &credential_data); AWS_LOGF_DEBUG( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): Read AccessKeyId %s", (void *)provider_user_data->provider, aws_string_c_str(provider_user_data->access_key_id)); } if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "SecretAccessKey")) { if (aws_xml_node_as_body(node, &credential_data)) { return AWS_OP_ERR; } provider_user_data->secret_access_key = aws_string_new_from_cursor(provider_user_data->allocator, &credential_data); } if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "SessionToken")) { if (aws_xml_node_as_body(node, &credential_data)) { return AWS_OP_ERR; } provider_user_data->session_token = aws_string_new_from_cursor(provider_user_data->allocator, &credential_data); } return AWS_OP_SUCCESS; } static void s_start_make_request( struct aws_credentials_provider *provider, struct sts_creds_provider_user_data *provider_user_data); static void s_on_retry_ready(struct aws_retry_token *token, int error_code, void *user_data) { (void)token; struct sts_creds_provider_user_data *provider_user_data = user_data; if (!error_code) { s_start_make_request(provider_user_data->provider, provider_user_data); } else { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): retry task failed: %s", (void *)provider_user_data->provider, aws_error_str(aws_last_error())); s_clean_up_user_data(provider_user_data); } } /* called upon completion of http request */ static void s_on_stream_complete_fn(struct aws_http_stream *stream, int error_code, void *user_data) { int http_response_code = 0; struct sts_creds_provider_user_data *provider_user_data = user_data; struct aws_credentials_provider_sts_impl *provider_impl = provider_user_data->provider->impl; provider_user_data->error_code = error_code; if (provider_impl->function_table->aws_http_stream_get_incoming_response_status(stream, &http_response_code)) { goto finish; } if (http_response_code != 200) { provider_user_data->error_code = AWS_AUTH_CREDENTIALS_PROVIDER_HTTP_STATUS_FAILURE; } provider_impl->function_table->aws_http_stream_release(stream); AWS_LOGF_DEBUG( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): AssumeRole call completed with http status %d", (void *)provider_user_data->provider, http_response_code); if (error_code || http_response_code != AWS_HTTP_STATUS_CODE_200_OK) { /* prevent connection reuse. */ provider_impl->function_table->aws_http_connection_close(provider_user_data->connection); enum aws_retry_error_type error_type = aws_credentials_provider_compute_retry_error_type(http_response_code, error_code); s_reset_request_specific_data(provider_user_data); /* don't retry client errors at all. */ if (error_type != AWS_RETRY_ERROR_TYPE_CLIENT_ERROR) { if (aws_retry_strategy_schedule_retry( provider_user_data->retry_token, error_type, s_on_retry_ready, provider_user_data)) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): failed to schedule retry: %s", (void *)provider_user_data->provider, aws_error_str(aws_last_error())); goto finish; } return; } } if (!error_code && http_response_code == AWS_HTTP_STATUS_CODE_200_OK) { /* update the book keeping so we can let the retry strategy make determinations about when the service is * healthy after an outage. */ if (aws_retry_token_record_success(provider_user_data->retry_token)) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): failed to register operation success: %s", (void *)provider_user_data->provider, aws_error_str(aws_last_error())); goto finish; } uint64_t now = UINT64_MAX; if (provider_impl->system_clock_fn(&now) != AWS_OP_SUCCESS) { goto finish; } uint64_t now_seconds = aws_timestamp_convert(now, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_SECS, NULL); struct aws_xml_parser_options options = { .doc = aws_byte_cursor_from_buf(&provider_user_data->output_buf), .on_root_encountered = s_sts_xml_on_root, .user_data = provider_user_data, }; if (aws_xml_parse(provider_user_data->provider->allocator, &options)) { provider_user_data->error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): credentials parsing failed with error %s", (void *)provider_user_data->credentials, aws_error_debug_str(provider_user_data->error_code)); provider_user_data->error_code = AWS_AUTH_CREDENTIALS_PROVIDER_STS_SOURCE_FAILURE; goto finish; } if (provider_user_data->access_key_id && provider_user_data->secret_access_key && provider_user_data->session_token) { provider_user_data->credentials = aws_credentials_new_from_string( provider_user_data->allocator, provider_user_data->access_key_id, provider_user_data->secret_access_key, provider_user_data->session_token, now_seconds + provider_impl->duration_seconds); } if (provider_user_data->credentials == NULL) { provider_user_data->error_code = AWS_AUTH_CREDENTIALS_PROVIDER_STS_SOURCE_FAILURE; AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): credentials document was corrupted, treating as an error.", (void *)provider_user_data->provider); } } finish: s_clean_up_user_data(provider_user_data); } /* called upon acquiring a connection from the pool */ static void s_on_connection_setup_fn(struct aws_http_connection *connection, int error_code, void *user_data) { struct sts_creds_provider_user_data *provider_user_data = user_data; struct aws_credentials_provider_sts_impl *provider_impl = provider_user_data->provider->impl; struct aws_http_stream *stream = NULL; AWS_LOGF_DEBUG( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): connection returned with error code %d", (void *)provider_user_data->provider, error_code); if (error_code) { aws_raise_error(error_code); goto error; } provider_user_data->connection = connection; if (aws_byte_buf_init(&provider_user_data->output_buf, provider_impl->provider->allocator, 2048)) { goto error; } struct aws_http_make_request_options options = { .user_data = user_data, .request = provider_user_data->message, .self_size = sizeof(struct aws_http_make_request_options), .on_response_headers = NULL, .on_response_header_block_done = NULL, .on_response_body = s_on_incoming_body_fn, .on_complete = s_on_stream_complete_fn, }; stream = provider_impl->function_table->aws_http_connection_make_request(connection, &options); if (!stream) { goto error; } if (provider_impl->function_table->aws_http_stream_activate(stream)) { goto error; } return; error: provider_impl->function_table->aws_http_stream_release(stream); s_clean_up_user_data(provider_user_data); } /* called once sigv4 signing is complete. */ void s_on_signing_complete(struct aws_signing_result *result, int error_code, void *userdata) { struct sts_creds_provider_user_data *provider_user_data = userdata; struct aws_credentials_provider_sts_impl *sts_impl = provider_user_data->provider->impl; AWS_LOGF_DEBUG( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): signing completed with error code %d", (void *)provider_user_data->provider, error_code); if (error_code) { provider_user_data->error_code = error_code; aws_raise_error(error_code); goto error; } if (aws_apply_signing_result_to_http_request( provider_user_data->message, provider_user_data->provider->allocator, result)) { goto error; } sts_impl->function_table->aws_http_connection_manager_acquire_connection( sts_impl->connection_manager, s_on_connection_setup_fn, provider_user_data); return; error: s_clean_up_user_data(provider_user_data); } static void s_start_make_request( struct aws_credentials_provider *provider, struct sts_creds_provider_user_data *provider_user_data) { provider_user_data->message = aws_http_message_new_request(provider->allocator); if (!provider_user_data->message) { goto error; } if (aws_http_message_add_header(provider_user_data->message, s_host_header)) { goto error; } if (aws_http_message_add_header(provider_user_data->message, s_content_type_header)) { goto error; } if (aws_byte_buf_init(&provider_user_data->payload_body, provider->allocator, 256)) { goto error; } if (s_write_body_to_buffer(provider, &provider_user_data->payload_body)) { goto error; } char content_length[21]; AWS_ZERO_ARRAY(content_length); snprintf(content_length, sizeof(content_length), "%" PRIu64, (uint64_t)provider_user_data->payload_body.len); struct aws_http_header content_len_header = { .name = s_content_length, .value = aws_byte_cursor_from_c_str(content_length), }; if (aws_http_message_add_header(provider_user_data->message, content_len_header)) { goto error; } struct aws_byte_cursor payload_cur = aws_byte_cursor_from_buf(&provider_user_data->payload_body); provider_user_data->input_stream = aws_input_stream_new_from_cursor(provider_user_data->provider->allocator, &payload_cur); if (!provider_user_data->input_stream) { goto error; } aws_http_message_set_body_stream(provider_user_data->message, provider_user_data->input_stream); if (aws_http_message_set_request_method(provider_user_data->message, aws_http_method_post)) { goto error; } if (aws_http_message_set_request_path(provider_user_data->message, s_path)) { goto error; } provider_user_data->signable = aws_signable_new_http_request(provider->allocator, provider_user_data->message); if (!provider_user_data->signable) { goto error; } struct aws_credentials_provider_sts_impl *impl = provider->impl; provider_user_data->signing_config.algorithm = AWS_SIGNING_ALGORITHM_V4; provider_user_data->signing_config.signature_type = AWS_ST_HTTP_REQUEST_HEADERS; provider_user_data->signing_config.signed_body_header = AWS_SBHT_NONE; provider_user_data->signing_config.config_type = AWS_SIGNING_CONFIG_AWS; provider_user_data->signing_config.credentials_provider = impl->provider; aws_date_time_init_now(&provider_user_data->signing_config.date); provider_user_data->signing_config.region = s_signing_region; provider_user_data->signing_config.service = s_service_name; provider_user_data->signing_config.flags.use_double_uri_encode = false; if (aws_sign_request_aws( provider->allocator, provider_user_data->signable, (struct aws_signing_config_base *)&provider_user_data->signing_config, s_on_signing_complete, provider_user_data)) { goto error; } return; error: AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): error occurred while creating an http request for signing: %s", (void *)provider_user_data->provider, aws_error_debug_str(aws_last_error())); if (provider_user_data) { s_clean_up_user_data(provider_user_data); } else { provider_user_data->callback(NULL, provider_user_data->error_code, provider_user_data->user_data); } } static void s_on_retry_token_acquired( struct aws_retry_strategy *strategy, int error_code, struct aws_retry_token *token, void *user_data) { (void)strategy; struct sts_creds_provider_user_data *provider_user_data = user_data; if (!error_code) { provider_user_data->retry_token = token; s_start_make_request(provider_user_data->provider, provider_user_data); } else { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): failed to acquire retry token: %s", (void *)provider_user_data->provider, aws_error_debug_str(error_code)); s_clean_up_user_data(provider_user_data); } } static int s_sts_get_creds( struct aws_credentials_provider *provider, aws_on_get_credentials_callback_fn callback, void *user_data) { struct aws_credentials_provider_sts_impl *impl = provider->impl; AWS_LOGF_DEBUG(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): fetching credentials", (void *)provider); struct sts_creds_provider_user_data *provider_user_data = aws_mem_calloc(provider->allocator, 1, sizeof(struct sts_creds_provider_user_data)); if (!provider_user_data) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): error occurred while allocating memory: %s", (void *)provider, aws_error_debug_str(aws_last_error())); callback(NULL, aws_last_error(), user_data); return AWS_OP_ERR; } provider_user_data->allocator = provider->allocator; provider_user_data->provider = provider; aws_credentials_provider_acquire(provider); provider_user_data->callback = callback; provider_user_data->user_data = user_data; if (aws_retry_strategy_acquire_retry_token( impl->retry_strategy, NULL, s_on_retry_token_acquired, provider_user_data, 100)) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): failed to acquire retry token: %s", (void *)provider_user_data->provider, aws_error_debug_str(aws_last_error())); callback(NULL, aws_last_error(), user_data); s_clean_up_user_data(user_data); return AWS_OP_ERR; } return AWS_OP_SUCCESS; } static void s_on_credentials_provider_shutdown(void *user_data) { struct aws_credentials_provider *provider = user_data; if (provider == NULL) { return; } struct aws_credentials_provider_sts_impl *impl = provider->impl; if (impl == NULL) { return; } /* The wrapped provider has shut down, invoke its shutdown callback if there was one */ if (impl->source_shutdown_options.shutdown_callback != NULL) { impl->source_shutdown_options.shutdown_callback(impl->source_shutdown_options.shutdown_user_data); } /* Invoke our own shutdown callback */ aws_credentials_provider_invoke_shutdown_callback(provider); aws_string_destroy(impl->role_session_name); aws_string_destroy(impl->assume_role_profile); aws_mem_release(provider->allocator, provider); } void s_destroy(struct aws_credentials_provider *provider) { AWS_LOGF_TRACE(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): cleaning up credentials provider", (void *)provider); struct aws_credentials_provider_sts_impl *sts_impl = provider->impl; if (sts_impl->connection_manager) { sts_impl->function_table->aws_http_connection_manager_release(sts_impl->connection_manager); } aws_retry_strategy_release(sts_impl->retry_strategy); aws_credentials_provider_release(sts_impl->provider); } static struct aws_credentials_provider_vtable s_aws_credentials_provider_sts_vtable = { .get_credentials = s_sts_get_creds, .destroy = s_destroy, }; struct aws_credentials_provider *aws_credentials_provider_new_sts( struct aws_allocator *allocator, const struct aws_credentials_provider_sts_options *options) { if (!options->bootstrap) { AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "a client bootstrap is necessary for quering STS"); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } if (!options->tls_ctx) { AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "a TLS context is necessary for querying STS"); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } struct aws_credentials_provider *provider = NULL; struct aws_credentials_provider_sts_impl *impl = NULL; aws_mem_acquire_many( allocator, 2, &provider, sizeof(struct aws_credentials_provider), &impl, sizeof(struct aws_credentials_provider_sts_impl)); AWS_LOGF_DEBUG(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "static: creating STS credentials provider"); if (!provider) { return NULL; } AWS_ZERO_STRUCT(*provider); AWS_ZERO_STRUCT(*impl); aws_credentials_provider_init_base(provider, allocator, &s_aws_credentials_provider_sts_vtable, impl); impl->function_table = g_aws_credentials_provider_http_function_table; if (options->function_table) { impl->function_table = options->function_table; } struct aws_tls_connection_options tls_connection_options; AWS_ZERO_STRUCT(tls_connection_options); if (!options->creds_provider) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): A credentials provider must be specified", (void *)provider); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); goto cleanup_provider; } impl->role_session_name = aws_string_new_from_array(allocator, options->session_name.ptr, options->session_name.len); if (!impl->role_session_name) { goto cleanup_provider; } AWS_LOGF_DEBUG( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): using session_name %s", (void *)provider, aws_string_c_str(impl->role_session_name)); impl->assume_role_profile = aws_string_new_from_array(allocator, options->role_arn.ptr, options->role_arn.len); if (!impl->assume_role_profile) { goto cleanup_provider; } AWS_LOGF_DEBUG( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): using assume_role_arn %s", (void *)provider, aws_string_c_str(impl->assume_role_profile)); impl->duration_seconds = options->duration_seconds; if (options->system_clock_fn != NULL) { impl->system_clock_fn = options->system_clock_fn; } else { impl->system_clock_fn = aws_sys_clock_get_ticks; } /* minimum for STS is 900 seconds*/ if (impl->duration_seconds < aws_sts_assume_role_default_duration_secs) { impl->duration_seconds = aws_sts_assume_role_default_duration_secs; } AWS_LOGF_DEBUG( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): using credentials duration %" PRIu16, (void *)provider, impl->duration_seconds); impl->provider = options->creds_provider; aws_credentials_provider_acquire(impl->provider); aws_tls_connection_options_init_from_ctx(&tls_connection_options, options->tls_ctx); if (aws_tls_connection_options_set_server_name(&tls_connection_options, allocator, &s_host_header.value)) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): failed to create a tls connection options with error %s", (void *)provider, aws_error_debug_str(aws_last_error())); goto cleanup_provider; } struct aws_socket_options socket_options = { .type = AWS_SOCKET_STREAM, .domain = AWS_SOCKET_IPV6, .connect_timeout_ms = 3000, }; struct aws_http_connection_manager_options connection_manager_options = { .bootstrap = options->bootstrap, .host = s_host_header.value, .initial_window_size = SIZE_MAX, .max_connections = 2, .port = 443, .socket_options = &socket_options, .tls_connection_options = &tls_connection_options, .proxy_options = options->http_proxy_options, }; impl->connection_manager = impl->function_table->aws_http_connection_manager_new(allocator, &connection_manager_options); if (!impl->connection_manager) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): failed to create a connection manager with error %s", (void *)provider, aws_error_debug_str(aws_last_error())); goto cleanup_provider; } /* * Save the wrapped provider's shutdown callback and then swap it with our own. */ impl->source_shutdown_options = impl->provider->shutdown_options; impl->provider->shutdown_options.shutdown_callback = s_on_credentials_provider_shutdown; impl->provider->shutdown_options.shutdown_user_data = provider; provider->shutdown_options = options->shutdown_options; struct aws_standard_retry_options retry_options = { .backoff_retry_options = { .el_group = options->bootstrap->event_loop_group, .max_retries = s_max_retries, }, }; impl->retry_strategy = aws_retry_strategy_new_standard(allocator, &retry_options); if (!impl->retry_strategy) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): failed to create a retry strategy with error %s", (void *)provider, aws_error_debug_str(aws_last_error())); goto cleanup_provider; } aws_tls_connection_options_clean_up(&tls_connection_options); return provider; cleanup_provider: aws_tls_connection_options_clean_up(&tls_connection_options); aws_credentials_provider_release(provider); return NULL; } aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/source/credentials_provider_sts_web_identity.c000066400000000000000000001276761456575232400315260ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(_MSC_VER) # pragma warning(disable : 4204) # pragma warning(disable : 4232) #endif /* _MSC_VER */ #define STS_WEB_IDENTITY_RESPONSE_SIZE_INITIAL 2048 #define STS_WEB_IDENTITY_RESPONSE_SIZE_LIMIT 10000 #define STS_WEB_IDENTITY_CONNECT_TIMEOUT_DEFAULT_IN_SECONDS 2 #define STS_WEB_IDENTITY_CREDS_DEFAULT_DURATION_SECONDS 900 #define STS_WEB_IDENTITY_MAX_ATTEMPTS 3 static void s_on_connection_manager_shutdown(void *user_data); static int s_stswebid_error_xml_on_Error_child(struct aws_xml_node *, void *); static int s_stswebid_200_xml_on_AssumeRoleWithWebIdentityResponse_child(struct aws_xml_node *, void *); static int s_stswebid_200_xml_on_AssumeRoleWithWebIdentityResult_child(struct aws_xml_node *, void *); static int s_stswebid_200_xml_on_Credentials_child(struct aws_xml_node *, void *); struct aws_credentials_provider_sts_web_identity_impl { struct aws_http_connection_manager *connection_manager; const struct aws_auth_http_system_vtable *function_table; struct aws_string *role_arn; struct aws_string *role_session_name; struct aws_string *token_file_path; }; /* * Tracking structure for each outstanding async query to an sts_web_identity provider */ struct sts_web_identity_user_data { /* immutable post-creation */ struct aws_allocator *allocator; struct aws_credentials_provider *sts_web_identity_provider; aws_on_get_credentials_callback_fn *original_callback; void *original_user_data; /* mutable */ struct aws_http_connection *connection; struct aws_http_message *request; struct aws_byte_buf response; struct aws_string *access_key_id; struct aws_string *secret_access_key; struct aws_string *session_token; uint64_t expiration_timepoint_in_seconds; struct aws_byte_buf payload_buf; int status_code; int error_code; int attempt_count; }; static void s_user_data_reset_request_and_response(struct sts_web_identity_user_data *user_data) { aws_byte_buf_reset(&user_data->response, true /*zero out*/); aws_byte_buf_reset(&user_data->payload_buf, true /*zero out*/); user_data->status_code = 0; if (user_data->request) { aws_input_stream_destroy(aws_http_message_get_body_stream(user_data->request)); } aws_http_message_destroy(user_data->request); user_data->request = NULL; aws_string_destroy(user_data->access_key_id); user_data->access_key_id = NULL; aws_string_destroy_secure(user_data->secret_access_key); user_data->secret_access_key = NULL; aws_string_destroy_secure(user_data->session_token); user_data->session_token = NULL; } static void s_user_data_destroy(struct sts_web_identity_user_data *user_data) { if (user_data == NULL) { return; } struct aws_credentials_provider_sts_web_identity_impl *impl = user_data->sts_web_identity_provider->impl; if (user_data->connection) { impl->function_table->aws_http_connection_manager_release_connection( impl->connection_manager, user_data->connection); } s_user_data_reset_request_and_response(user_data); aws_byte_buf_clean_up(&user_data->response); aws_string_destroy(user_data->access_key_id); aws_string_destroy_secure(user_data->secret_access_key); aws_string_destroy_secure(user_data->session_token); aws_byte_buf_clean_up(&user_data->payload_buf); aws_credentials_provider_release(user_data->sts_web_identity_provider); aws_mem_release(user_data->allocator, user_data); } static struct sts_web_identity_user_data *s_user_data_new( struct aws_credentials_provider *sts_web_identity_provider, aws_on_get_credentials_callback_fn callback, void *user_data) { struct sts_web_identity_user_data *wrapped_user_data = aws_mem_calloc(sts_web_identity_provider->allocator, 1, sizeof(struct sts_web_identity_user_data)); if (wrapped_user_data == NULL) { goto on_error; } wrapped_user_data->allocator = sts_web_identity_provider->allocator; wrapped_user_data->sts_web_identity_provider = sts_web_identity_provider; aws_credentials_provider_acquire(sts_web_identity_provider); wrapped_user_data->original_user_data = user_data; wrapped_user_data->original_callback = callback; if (aws_byte_buf_init( &wrapped_user_data->response, sts_web_identity_provider->allocator, STS_WEB_IDENTITY_RESPONSE_SIZE_INITIAL)) { goto on_error; } if (aws_byte_buf_init(&wrapped_user_data->payload_buf, sts_web_identity_provider->allocator, 1024)) { goto on_error; } return wrapped_user_data; on_error: s_user_data_destroy(wrapped_user_data); return NULL; } /* * In general, the STS_WEB_IDENTITY response document looks something like: amzn1.account.AF6RHO7KZU5XRVQJGXK6HB56KR2A client.5498841531868486423.1548@apps.example.com arn:aws:sts::123456789012:assumed-role/FederatedWebIdentityRole/app1 AROACLKWSDQRAOEXAMPLE:app1 AQoDYXdzEE0a8ANXXXXXXXXNO1ewxE5TijQyp+IEXAMPLE wJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY 2014-10-24T23:00:23Z ASgeIAIOSFODNN7EXAMPLE www.amazon.com ad4156e9-bce1-11e2-82e6-6b6efEXAMPLE Error Response looks like: ExceptionName XXX YYY 4442587FB7D0A2F9 */ static int s_stswebid_error_xml_on_root(struct aws_xml_node *node, void *user_data) { struct aws_byte_cursor node_name = aws_xml_node_get_name(node); if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "Error")) { return aws_xml_node_traverse(node, s_stswebid_error_xml_on_Error_child, user_data); } return AWS_OP_SUCCESS; } static int s_stswebid_error_xml_on_Error_child(struct aws_xml_node *node, void *user_data) { bool *get_retryable_error = user_data; struct aws_byte_cursor node_name = aws_xml_node_get_name(node); if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "Code")) { struct aws_byte_cursor data_cursor = {0}; if (aws_xml_node_as_body(node, &data_cursor)) { return AWS_OP_ERR; } if (aws_byte_cursor_eq_c_str_ignore_case(&data_cursor, "IDPCommunicationError") || aws_byte_cursor_eq_c_str_ignore_case(&data_cursor, "InvalidIdentityToken")) { *get_retryable_error = true; } } return AWS_OP_SUCCESS; } static bool s_parse_retryable_error_from_response(struct aws_allocator *allocator, struct aws_byte_buf *response) { bool get_retryable_error = false; struct aws_xml_parser_options options = { .doc = aws_byte_cursor_from_buf(response), .on_root_encountered = s_stswebid_error_xml_on_root, .user_data = &get_retryable_error, }; if (aws_xml_parse(allocator, &options)) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Failed to parse xml error response for sts web identity with error %s", aws_error_str(aws_last_error())); return false; } return get_retryable_error; } static int s_stswebid_200_xml_on_root(struct aws_xml_node *node, void *user_data) { struct aws_byte_cursor node_name = aws_xml_node_get_name(node); if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "AssumeRoleWithWebIdentityResponse")) { return aws_xml_node_traverse(node, s_stswebid_200_xml_on_AssumeRoleWithWebIdentityResponse_child, user_data); } return AWS_OP_SUCCESS; } static int s_stswebid_200_xml_on_AssumeRoleWithWebIdentityResponse_child( struct aws_xml_node *node, void *user_data) { struct aws_byte_cursor node_name = aws_xml_node_get_name(node); if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "AssumeRoleWithWebIdentityResult")) { return aws_xml_node_traverse(node, s_stswebid_200_xml_on_AssumeRoleWithWebIdentityResult_child, user_data); } return AWS_OP_SUCCESS; } static int s_stswebid_200_xml_on_AssumeRoleWithWebIdentityResult_child( struct aws_xml_node *node, void *user_data) { struct aws_byte_cursor node_name = aws_xml_node_get_name(node); if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "Credentials")) { return aws_xml_node_traverse(node, s_stswebid_200_xml_on_Credentials_child, user_data); } return AWS_OP_SUCCESS; } static int s_stswebid_200_xml_on_Credentials_child(struct aws_xml_node *node, void *user_data) { struct sts_web_identity_user_data *query_user_data = user_data; struct aws_byte_cursor node_name = aws_xml_node_get_name(node); struct aws_byte_cursor credential_data; AWS_ZERO_STRUCT(credential_data); if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "AccessKeyId")) { if (aws_xml_node_as_body(node, &credential_data)) { return AWS_OP_ERR; } query_user_data->access_key_id = aws_string_new_from_cursor(query_user_data->allocator, &credential_data); } if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "SecretAccessKey")) { if (aws_xml_node_as_body(node, &credential_data)) { return AWS_OP_ERR; } query_user_data->secret_access_key = aws_string_new_from_cursor(query_user_data->allocator, &credential_data); } if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "SessionToken")) { if (aws_xml_node_as_body(node, &credential_data)) { return AWS_OP_ERR; } query_user_data->session_token = aws_string_new_from_cursor(query_user_data->allocator, &credential_data); } /* As long as we parsed an usable expiration, use it, otherwise use * the existing one: now + 900s, initialized before parsing. */ if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "Expiration")) { if (aws_xml_node_as_body(node, &credential_data)) { return AWS_OP_ERR; } if (credential_data.len != 0) { struct aws_date_time expiration; if (aws_date_time_init_from_str_cursor(&expiration, &credential_data, AWS_DATE_FORMAT_ISO_8601) == AWS_OP_SUCCESS) { query_user_data->expiration_timepoint_in_seconds = (uint64_t)aws_date_time_as_epoch_secs(&expiration); } else { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Failed to parse time string from sts web identity xml response: %s", aws_error_str(aws_last_error())); return AWS_OP_ERR; } } } return AWS_OP_SUCCESS; } static struct aws_credentials *s_parse_credentials_from_response( struct sts_web_identity_user_data *query_user_data, struct aws_byte_buf *response) { struct aws_credentials *credentials = NULL; if (!response || response->len == 0) { goto on_finish; } uint64_t now = UINT64_MAX; if (aws_sys_clock_get_ticks(&now) != AWS_OP_SUCCESS) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Failed to get sys clock for sts web identity credentials provider to parse error information."); goto on_finish; } uint64_t now_seconds = aws_timestamp_convert(now, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_SECS, NULL); query_user_data->expiration_timepoint_in_seconds = now_seconds + STS_WEB_IDENTITY_CREDS_DEFAULT_DURATION_SECONDS; struct aws_xml_parser_options options = { .doc = aws_byte_cursor_from_buf(response), .on_root_encountered = s_stswebid_200_xml_on_root, .user_data = query_user_data, }; if (aws_xml_parse(query_user_data->allocator, &options)) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Failed to parse xml response for sts web identity with error: %s", aws_error_str(aws_last_error())); goto on_finish; } if (!query_user_data->access_key_id || !query_user_data->secret_access_key) { AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "STS web identity not found in XML response."); goto on_finish; } credentials = aws_credentials_new( query_user_data->allocator, aws_byte_cursor_from_string(query_user_data->access_key_id), aws_byte_cursor_from_string(query_user_data->secret_access_key), aws_byte_cursor_from_string(query_user_data->session_token), query_user_data->expiration_timepoint_in_seconds); if (credentials == NULL) { AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Failed to create credentials for sts web identity"); goto on_finish; } on_finish: if (credentials == NULL) { /* Give a useful error (aws_last_error() might be AWS_ERROR_INVALID_ARGUMENT, which isn't too helpful) */ query_user_data->error_code = AWS_AUTH_CREDENTIALS_PROVIDER_STS_WEB_IDENTITY_SOURCE_FAILURE; } return credentials; } /* * No matter the result, this always gets called assuming that user_data is successfully allocated */ static void s_finalize_get_credentials_query(struct sts_web_identity_user_data *user_data) { /* Try to build credentials from whatever, if anything, was in the result */ struct aws_credentials *credentials = NULL; if (user_data->status_code == AWS_HTTP_STATUS_CODE_200_OK) { credentials = s_parse_credentials_from_response(user_data, &user_data->response); } if (credentials != NULL) { AWS_LOGF_INFO( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) STS_WEB_IDENTITY credentials provider successfully queried credentials", (void *)user_data->sts_web_identity_provider); } else { AWS_LOGF_WARN( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) STS_WEB_IDENTITY credentials provider failed to query credentials", (void *)user_data->sts_web_identity_provider); if (user_data->error_code == AWS_ERROR_SUCCESS) { user_data->error_code = AWS_AUTH_CREDENTIALS_PROVIDER_STS_WEB_IDENTITY_SOURCE_FAILURE; } } /* pass the credentials back */ user_data->original_callback(credentials, user_data->error_code, user_data->original_user_data); /* clean up */ s_user_data_destroy(user_data); aws_credentials_release(credentials); } static int s_on_incoming_body_fn( struct aws_http_stream *stream, const struct aws_byte_cursor *body, void *wrapped_user_data) { (void)stream; struct sts_web_identity_user_data *user_data = wrapped_user_data; struct aws_credentials_provider_sts_web_identity_impl *impl = user_data->sts_web_identity_provider->impl; AWS_LOGF_TRACE( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) STS_WEB_IDENTITY credentials provider received %zu response bytes", (void *)user_data->sts_web_identity_provider, body->len); if (body->len + user_data->response.len > STS_WEB_IDENTITY_RESPONSE_SIZE_LIMIT) { impl->function_table->aws_http_connection_close(user_data->connection); AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) STS_WEB_IDENTITY credentials provider query response exceeded maximum allowed length", (void *)user_data->sts_web_identity_provider); return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } if (aws_byte_buf_append_dynamic(&user_data->response, body)) { impl->function_table->aws_http_connection_close(user_data->connection); AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) STS_WEB_IDENTITY credentials provider query error appending response: %s", (void *)user_data->sts_web_identity_provider, aws_error_str(aws_last_error())); return AWS_OP_ERR; } return AWS_OP_SUCCESS; } static int s_on_incoming_headers_fn( struct aws_http_stream *stream, enum aws_http_header_block header_block, const struct aws_http_header *header_array, size_t num_headers, void *wrapped_user_data) { (void)header_array; (void)num_headers; if (header_block != AWS_HTTP_HEADER_BLOCK_MAIN) { return AWS_OP_SUCCESS; } struct sts_web_identity_user_data *user_data = wrapped_user_data; if (header_block == AWS_HTTP_HEADER_BLOCK_MAIN) { if (user_data->status_code == 0) { struct aws_credentials_provider_sts_web_identity_impl *impl = user_data->sts_web_identity_provider->impl; if (impl->function_table->aws_http_stream_get_incoming_response_status(stream, &user_data->status_code)) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) STS_WEB_IDENTITY credentials provider failed to get http status code: %s", (void *)user_data->sts_web_identity_provider, aws_error_str(aws_last_error())); return AWS_OP_ERR; } AWS_LOGF_DEBUG( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) STS_WEB_IDENTITY credentials provider query received http status code %d", (void *)user_data->sts_web_identity_provider, user_data->status_code); } } return AWS_OP_SUCCESS; } static void s_query_credentials(struct sts_web_identity_user_data *user_data); static void s_on_stream_complete_fn(struct aws_http_stream *stream, int error_code, void *data) { struct sts_web_identity_user_data *user_data = data; struct aws_credentials_provider_sts_web_identity_impl *impl = user_data->sts_web_identity_provider->impl; struct aws_http_connection *connection = impl->function_table->aws_http_stream_get_connection(stream); impl->function_table->aws_http_stream_release(stream); impl->function_table->aws_http_connection_manager_release_connection(impl->connection_manager, connection); /* * On anything other than a 200, if we can retry the request based on * error response, retry it, otherwise, call the finalize function. */ if (user_data->status_code != AWS_HTTP_STATUS_CODE_200_OK || error_code != AWS_OP_SUCCESS) { if (++user_data->attempt_count < STS_WEB_IDENTITY_MAX_ATTEMPTS && user_data->response.len) { if (s_parse_retryable_error_from_response(user_data->allocator, &user_data->response)) { s_query_credentials(user_data); return; } } } s_finalize_get_credentials_query(user_data); } static struct aws_http_header s_host_header = { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("host"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("sts.amazonaws.com"), }; static struct aws_http_header s_content_type_header = { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("content-type"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("application/x-www-form-urlencoded"), }; static struct aws_http_header s_api_version_header = { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-api-version"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("2011-06-15"), }; static struct aws_http_header s_accept_header = { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Accept"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("*/*"), }; static struct aws_http_header s_user_agent_header = { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("User-Agent"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("aws-sdk-crt/sts-web-identity-credentials-provider"), }; static struct aws_http_header s_keep_alive_header = { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Connection"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("keep-alive"), }; static struct aws_byte_cursor s_content_length = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("content-length"); static struct aws_byte_cursor s_path = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/"); static int s_make_sts_web_identity_http_query( struct sts_web_identity_user_data *user_data, struct aws_byte_cursor *body_cursor) { AWS_FATAL_ASSERT(user_data->connection); struct aws_http_stream *stream = NULL; struct aws_input_stream *input_stream = NULL; struct aws_http_message *request = aws_http_message_new_request(user_data->allocator); if (request == NULL) { return AWS_OP_ERR; } struct aws_credentials_provider_sts_web_identity_impl *impl = user_data->sts_web_identity_provider->impl; char content_length[21]; AWS_ZERO_ARRAY(content_length); snprintf(content_length, sizeof(content_length), "%" PRIu64, (uint64_t)body_cursor->len); struct aws_http_header content_len_header = { .name = s_content_length, .value = aws_byte_cursor_from_c_str(content_length), }; if (aws_http_message_add_header(request, content_len_header)) { goto on_error; } if (aws_http_message_add_header(request, s_content_type_header)) { goto on_error; } if (aws_http_message_add_header(request, s_host_header)) { goto on_error; } if (aws_http_message_add_header(request, s_api_version_header)) { goto on_error; } if (aws_http_message_add_header(request, s_accept_header)) { goto on_error; } if (aws_http_message_add_header(request, s_user_agent_header)) { goto on_error; } if (aws_http_message_add_header(request, s_keep_alive_header)) { goto on_error; } input_stream = aws_input_stream_new_from_cursor(user_data->allocator, body_cursor); if (!input_stream) { goto on_error; } aws_http_message_set_body_stream(request, input_stream); if (aws_http_message_set_request_path(request, s_path)) { goto on_error; } if (aws_http_message_set_request_method(request, aws_http_method_post)) { goto on_error; } user_data->request = request; struct aws_http_make_request_options request_options = { .self_size = sizeof(request_options), .on_response_headers = s_on_incoming_headers_fn, .on_response_header_block_done = NULL, .on_response_body = s_on_incoming_body_fn, .on_complete = s_on_stream_complete_fn, .user_data = user_data, .request = request, }; stream = impl->function_table->aws_http_connection_make_request(user_data->connection, &request_options); if (!stream) { goto on_error; } if (impl->function_table->aws_http_stream_activate(stream)) { goto on_error; } return AWS_OP_SUCCESS; on_error: impl->function_table->aws_http_stream_release(stream); aws_input_stream_destroy(input_stream); aws_http_message_destroy(request); user_data->request = NULL; return AWS_OP_ERR; } static void s_query_credentials(struct sts_web_identity_user_data *user_data) { AWS_FATAL_ASSERT(user_data->connection); struct aws_credentials_provider_sts_web_identity_impl *impl = user_data->sts_web_identity_provider->impl; /* "Clear" the result */ s_user_data_reset_request_and_response(user_data); /* * Calculate body message: * "Action=AssumeRoleWithWebIdentity" * + "&Version=2011-06-15" * + "&RoleSessionName=" + url_encode(role_session_name) * + "&RoleArn=" + url_encode(role_arn) * + "&WebIdentityToken=" + url_encode(token); */ struct aws_byte_buf token_buf; bool success = false; AWS_ZERO_STRUCT(token_buf); struct aws_byte_cursor work_cursor = aws_byte_cursor_from_c_str("Action=AssumeRoleWithWebIdentity&Version=2011-06-15&RoleArn="); if (aws_byte_buf_append_dynamic(&user_data->payload_buf, &work_cursor)) { goto on_finish; } work_cursor = aws_byte_cursor_from_string(impl->role_arn); if (aws_byte_buf_append_encoding_uri_param(&user_data->payload_buf, &work_cursor)) { goto on_finish; } work_cursor = aws_byte_cursor_from_c_str("&RoleSessionName="); if (aws_byte_buf_append_dynamic(&user_data->payload_buf, &work_cursor)) { goto on_finish; } work_cursor = aws_byte_cursor_from_string(impl->role_session_name); if (aws_byte_buf_append_encoding_uri_param(&user_data->payload_buf, &work_cursor)) { goto on_finish; } work_cursor = aws_byte_cursor_from_c_str("&WebIdentityToken="); if (aws_byte_buf_append_dynamic(&user_data->payload_buf, &work_cursor)) { goto on_finish; } if (aws_byte_buf_init_from_file(&token_buf, user_data->allocator, aws_string_c_str(impl->token_file_path))) { goto on_finish; } work_cursor = aws_byte_cursor_from_buf(&token_buf); if (aws_byte_buf_append_encoding_uri_param(&user_data->payload_buf, &work_cursor)) { goto on_finish; } struct aws_byte_cursor body_cursor = aws_byte_cursor_from_buf(&user_data->payload_buf); if (s_make_sts_web_identity_http_query(user_data, &body_cursor) == AWS_OP_ERR) { goto on_finish; } success = true; on_finish: aws_byte_buf_clean_up(&token_buf); if (!success) { s_finalize_get_credentials_query(user_data); } } static void s_on_acquire_connection(struct aws_http_connection *connection, int error_code, void *data) { struct sts_web_identity_user_data *user_data = data; if (connection == NULL) { AWS_LOGF_WARN( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "id=%p: STS_WEB_IDENTITY provider failed to acquire a connection, error code %d(%s)", (void *)user_data->sts_web_identity_provider, error_code, aws_error_str(error_code)); s_finalize_get_credentials_query(user_data); return; } user_data->connection = connection; s_query_credentials(user_data); } static int s_credentials_provider_sts_web_identity_get_credentials_async( struct aws_credentials_provider *provider, aws_on_get_credentials_callback_fn callback, void *user_data) { struct aws_credentials_provider_sts_web_identity_impl *impl = provider->impl; struct sts_web_identity_user_data *wrapped_user_data = s_user_data_new(provider, callback, user_data); if (wrapped_user_data == NULL) { goto error; } impl->function_table->aws_http_connection_manager_acquire_connection( impl->connection_manager, s_on_acquire_connection, wrapped_user_data); return AWS_OP_SUCCESS; error: s_user_data_destroy(wrapped_user_data); return AWS_OP_ERR; } static void s_credentials_provider_sts_web_identity_destroy(struct aws_credentials_provider *provider) { struct aws_credentials_provider_sts_web_identity_impl *impl = provider->impl; if (impl == NULL) { return; } aws_string_destroy(impl->role_arn); aws_string_destroy(impl->role_session_name); aws_string_destroy(impl->token_file_path); /* aws_http_connection_manager_release will eventually leads to call of s_on_connection_manager_shutdown, * which will do memory release for provider and impl. So We should be freeing impl * related memory first, then call aws_http_connection_manager_release. */ if (impl->connection_manager) { impl->function_table->aws_http_connection_manager_release(impl->connection_manager); } else { /* If provider setup failed halfway through, connection_manager might not exist. * In this case invoke shutdown completion callback directly to finish cleanup */ s_on_connection_manager_shutdown(provider); } /* freeing the provider takes place in the shutdown callback below */ } static struct aws_credentials_provider_vtable s_aws_credentials_provider_sts_web_identity_vtable = { .get_credentials = s_credentials_provider_sts_web_identity_get_credentials_async, .destroy = s_credentials_provider_sts_web_identity_destroy, }; static void s_on_connection_manager_shutdown(void *user_data) { struct aws_credentials_provider *provider = user_data; aws_credentials_provider_invoke_shutdown_callback(provider); aws_mem_release(provider->allocator, provider); } AWS_STATIC_STRING_FROM_LITERAL(s_region_config, "region"); AWS_STATIC_STRING_FROM_LITERAL(s_region_env, "AWS_DEFAULT_REGION"); AWS_STATIC_STRING_FROM_LITERAL(s_role_arn_config, "role_arn"); AWS_STATIC_STRING_FROM_LITERAL(s_role_arn_env, "AWS_ROLE_ARN"); AWS_STATIC_STRING_FROM_LITERAL(s_role_session_name_config, "role_session_name"); AWS_STATIC_STRING_FROM_LITERAL(s_role_session_name_env, "AWS_ROLE_SESSION_NAME"); AWS_STATIC_STRING_FROM_LITERAL(s_token_file_path_config, "web_identity_token_file"); AWS_STATIC_STRING_FROM_LITERAL(s_token_file_path_env, "AWS_WEB_IDENTITY_TOKEN_FILE"); struct sts_web_identity_parameters { struct aws_allocator *allocator; /* region is actually used to construct endpoint */ struct aws_byte_buf endpoint; struct aws_byte_buf role_arn; struct aws_byte_buf role_session_name; struct aws_byte_buf token_file_path; }; struct aws_profile_collection *s_load_profile(struct aws_allocator *allocator) { struct aws_profile_collection *config_profiles = NULL; struct aws_string *config_file_path = NULL; config_file_path = aws_get_config_file_path(allocator, NULL); if (!config_file_path) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Failed to resolve config file path during sts web identity provider initialization: %s", aws_error_str(aws_last_error())); goto on_error; } config_profiles = aws_profile_collection_new_from_file(allocator, config_file_path, AWS_PST_CONFIG); if (config_profiles != NULL) { AWS_LOGF_DEBUG( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Successfully built config profile collection from file at (%s)", aws_string_c_str(config_file_path)); } else { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Failed to build config profile collection from file at (%s) : %s", aws_string_c_str(config_file_path), aws_error_str(aws_last_error())); goto on_error; } aws_string_destroy(config_file_path); return config_profiles; on_error: aws_string_destroy(config_file_path); aws_profile_collection_destroy(config_profiles); return NULL; } static struct aws_byte_cursor s_dot_cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("."); static struct aws_byte_cursor s_amazonaws_cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(".amazonaws.com"); static struct aws_byte_cursor s_cn_cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(".cn"); AWS_STATIC_STRING_FROM_LITERAL(s_sts_service_name, "sts"); static int s_construct_endpoint( struct aws_allocator *allocator, struct aws_byte_buf *endpoint, const struct aws_string *region, const struct aws_string *service_name) { if (!allocator || !endpoint || !region || !service_name) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } aws_byte_buf_clean_up(endpoint); struct aws_byte_cursor service_cursor = aws_byte_cursor_from_string(service_name); if (aws_byte_buf_init_copy_from_cursor(endpoint, allocator, service_cursor)) { goto on_error; } if (aws_byte_buf_append_dynamic(endpoint, &s_dot_cursor)) { goto on_error; } struct aws_byte_cursor region_cursor; region_cursor = aws_byte_cursor_from_array(region->bytes, region->len); if (aws_byte_buf_append_dynamic(endpoint, ®ion_cursor)) { goto on_error; } if (aws_byte_buf_append_dynamic(endpoint, &s_amazonaws_cursor)) { goto on_error; } if (aws_string_eq_c_str_ignore_case(region, "cn-north-1") || aws_string_eq_c_str_ignore_case(region, "cn-northwest-1")) { if (aws_byte_buf_append_dynamic(endpoint, &s_cn_cursor)) { goto on_error; } } return AWS_OP_SUCCESS; on_error: aws_byte_buf_clean_up(endpoint); return AWS_OP_ERR; } static int s_generate_uuid_to_buf(struct aws_allocator *allocator, struct aws_byte_buf *dst) { if (!allocator || !dst) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } struct aws_uuid uuid; if (aws_uuid_init(&uuid)) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Failed to initiate an uuid struct: %s", aws_error_str(aws_last_error())); return aws_last_error(); } char uuid_str[AWS_UUID_STR_LEN] = {0}; struct aws_byte_buf uuid_buf = aws_byte_buf_from_array(uuid_str, sizeof(uuid_str)); uuid_buf.len = 0; if (aws_uuid_to_str(&uuid, &uuid_buf)) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Failed to stringify uuid: %s", aws_error_str(aws_last_error())); return aws_last_error(); } if (aws_byte_buf_init_copy(dst, allocator, &uuid_buf)) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Failed to generate role session name during sts web identity provider initialization: %s", aws_error_str(aws_last_error())); return aws_last_error(); } return AWS_OP_SUCCESS; } static struct aws_string *s_check_or_get_with_env( struct aws_allocator *allocator, const struct aws_string *env_key, struct aws_byte_cursor option) { AWS_ASSERT(allocator); struct aws_string *out = NULL; if (option.len) { out = aws_string_new_from_cursor(allocator, &option); } else { aws_get_environment_value(allocator, env_key, &out); } return out; } static void s_check_or_get_with_profile_config( struct aws_allocator *allocator, const struct aws_profile *profile, struct aws_string **target, const struct aws_string *config_key) { if (!allocator || !profile || !config_key) { return; } if ((!(*target) || !(*target)->len)) { if (*target) { aws_string_destroy(*target); } const struct aws_profile_property *property = aws_profile_get_property(profile, config_key); if (property) { *target = aws_string_new_from_string(allocator, aws_profile_property_get_value(property)); } } } static void s_parameters_destroy(struct sts_web_identity_parameters *parameters) { if (!parameters) { return; } aws_byte_buf_clean_up(¶meters->endpoint); aws_byte_buf_clean_up(¶meters->role_arn); aws_byte_buf_clean_up(¶meters->role_session_name); aws_byte_buf_clean_up(¶meters->token_file_path); aws_mem_release(parameters->allocator, parameters); } static struct sts_web_identity_parameters *s_parameters_new( struct aws_allocator *allocator, const struct aws_credentials_provider_sts_web_identity_options *options) { struct sts_web_identity_parameters *parameters = aws_mem_calloc(allocator, 1, sizeof(struct sts_web_identity_parameters)); if (parameters == NULL) { return NULL; } parameters->allocator = allocator; bool success = false; struct aws_string *region = s_check_or_get_with_env(allocator, s_region_env, options->region); struct aws_string *role_arn = s_check_or_get_with_env(allocator, s_role_arn_env, options->role_arn); struct aws_string *role_session_name = s_check_or_get_with_env(allocator, s_role_session_name_env, options->role_session_name); struct aws_string *token_file_path = s_check_or_get_with_env(allocator, s_token_file_path_env, options->token_file_path); ; /** * check config profile if either region, role_arn or token_file_path or role_session_name is not resolved from * environment variable. Role session name can also be generated by us using uuid if not found from both * sources. */ struct aws_profile_collection *config_profile = NULL; struct aws_string *profile_name = NULL; const struct aws_profile *profile = NULL; bool get_all_parameters = (region && region->len && role_arn && role_arn->len && token_file_path && token_file_path->len); if (!get_all_parameters) { if (options->config_profile_collection_cached) { /* Use cached profile collection */ config_profile = aws_profile_collection_acquire(options->config_profile_collection_cached); } else { /* Load profile collection from files */ config_profile = s_load_profile(allocator); if (!config_profile) { goto on_finish; } } profile_name = aws_get_profile_name(allocator, &options->profile_name_override); profile = aws_profile_collection_get_profile(config_profile, profile_name); if (!profile) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Failed to resolve either region, role arn or token file path during sts web identity provider " "initialization."); goto on_finish; } else { s_check_or_get_with_profile_config(allocator, profile, ®ion, s_region_config); s_check_or_get_with_profile_config(allocator, profile, &role_arn, s_role_arn_config); s_check_or_get_with_profile_config(allocator, profile, &role_session_name, s_role_session_name_config); s_check_or_get_with_profile_config(allocator, profile, &token_file_path, s_token_file_path_config); } } /* determin endpoint */ if (s_construct_endpoint(allocator, ¶meters->endpoint, region, s_sts_service_name)) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Failed to construct sts endpoint with, probably region is missing."); goto on_finish; } /* determine role_arn */ if (!role_arn || !role_arn->len || aws_byte_buf_init_copy_from_cursor(¶meters->role_arn, allocator, aws_byte_cursor_from_string(role_arn))) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Failed to resolve role arn during sts web identity provider initialization."); goto on_finish; } /* determine token_file_path */ if (!token_file_path || !token_file_path->len || aws_byte_buf_init_copy_from_cursor( ¶meters->token_file_path, allocator, aws_byte_cursor_from_string(token_file_path))) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Failed to resolve token file path during sts web identity provider initialization."); goto on_finish; } /* determine role_session_name */ if (role_session_name && role_session_name->len) { if (aws_byte_buf_init_copy_from_cursor( ¶meters->role_session_name, allocator, aws_byte_cursor_from_string(role_session_name))) { goto on_finish; } } else if (s_generate_uuid_to_buf(allocator, ¶meters->role_session_name)) { goto on_finish; } AWS_LOGF_DEBUG( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Successfully loaded all required parameters for sts web identity credentials provider."); success = true; on_finish: aws_string_destroy(region); aws_string_destroy(role_arn); aws_string_destroy(role_session_name); aws_string_destroy(token_file_path); aws_string_destroy(profile_name); aws_profile_collection_release(config_profile); if (!success) { s_parameters_destroy(parameters); parameters = NULL; } return parameters; } struct aws_credentials_provider *aws_credentials_provider_new_sts_web_identity( struct aws_allocator *allocator, const struct aws_credentials_provider_sts_web_identity_options *options) { struct sts_web_identity_parameters *parameters = s_parameters_new(allocator, options); if (!parameters) { return NULL; } struct aws_tls_connection_options tls_connection_options; AWS_ZERO_STRUCT(tls_connection_options); struct aws_credentials_provider *provider = NULL; struct aws_credentials_provider_sts_web_identity_impl *impl = NULL; aws_mem_acquire_many( allocator, 2, &provider, sizeof(struct aws_credentials_provider), &impl, sizeof(struct aws_credentials_provider_sts_web_identity_impl)); if (!provider) { goto on_error; } AWS_ZERO_STRUCT(*provider); AWS_ZERO_STRUCT(*impl); aws_credentials_provider_init_base(provider, allocator, &s_aws_credentials_provider_sts_web_identity_vtable, impl); if (!options->tls_ctx) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "a TLS context must be provided to the STS web identity credentials provider"); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } aws_tls_connection_options_init_from_ctx(&tls_connection_options, options->tls_ctx); struct aws_byte_cursor host = aws_byte_cursor_from_buf(¶meters->endpoint); if (aws_tls_connection_options_set_server_name(&tls_connection_options, allocator, &host)) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): failed to create a tls connection options with error %s", (void *)provider, aws_error_str(aws_last_error())); goto on_error; } struct aws_socket_options socket_options; AWS_ZERO_STRUCT(socket_options); socket_options.type = AWS_SOCKET_STREAM; socket_options.domain = AWS_SOCKET_IPV4; socket_options.connect_timeout_ms = (uint32_t)aws_timestamp_convert( STS_WEB_IDENTITY_CONNECT_TIMEOUT_DEFAULT_IN_SECONDS, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_MILLIS, NULL); struct aws_http_connection_manager_options manager_options; AWS_ZERO_STRUCT(manager_options); manager_options.bootstrap = options->bootstrap; manager_options.initial_window_size = STS_WEB_IDENTITY_RESPONSE_SIZE_LIMIT; manager_options.socket_options = &socket_options; manager_options.host = host; manager_options.port = 443; manager_options.max_connections = 2; manager_options.shutdown_complete_callback = s_on_connection_manager_shutdown; manager_options.shutdown_complete_user_data = provider; manager_options.tls_connection_options = &tls_connection_options; impl->function_table = options->function_table; if (impl->function_table == NULL) { impl->function_table = g_aws_credentials_provider_http_function_table; } impl->connection_manager = impl->function_table->aws_http_connection_manager_new(allocator, &manager_options); if (impl->connection_manager == NULL) { goto on_error; } impl->role_arn = aws_string_new_from_array(allocator, parameters->role_arn.buffer, parameters->role_arn.len); if (impl->role_arn == NULL) { goto on_error; } impl->role_session_name = aws_string_new_from_array(allocator, parameters->role_session_name.buffer, parameters->role_session_name.len); if (impl->role_session_name == NULL) { goto on_error; } impl->token_file_path = aws_string_new_from_array(allocator, parameters->token_file_path.buffer, parameters->token_file_path.len); if (impl->token_file_path == NULL) { goto on_error; } provider->shutdown_options = options->shutdown_options; s_parameters_destroy(parameters); aws_tls_connection_options_clean_up(&tls_connection_options); return provider; on_error: aws_credentials_provider_destroy(provider); s_parameters_destroy(parameters); aws_tls_connection_options_clean_up(&tls_connection_options); return NULL; } aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/source/credentials_provider_x509.c000066400000000000000000000536721456575232400266460ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(_MSC_VER) # pragma warning(disable : 4204) # pragma warning(disable : 4232) #endif /* _MSC_VER */ /* IoT Core credentials body response is currently ~ 1100 Bytes*/ #define X509_RESPONSE_SIZE_INITIAL 1024 #define X509_RESPONSE_SIZE_LIMIT 2048 #define X509_CONNECT_TIMEOUT_DEFAULT_IN_SECONDS 2 struct aws_credentials_provider_x509_impl { struct aws_http_connection_manager *connection_manager; const struct aws_auth_http_system_vtable *function_table; struct aws_byte_buf thing_name; struct aws_byte_buf role_alias_path; struct aws_byte_buf endpoint; struct aws_tls_connection_options tls_connection_options; }; /* * Tracking structure for each outstanding async query to an x509 provider */ struct aws_credentials_provider_x509_user_data { /* immutable post-creation */ struct aws_allocator *allocator; struct aws_credentials_provider *x509_provider; aws_on_get_credentials_callback_fn *original_callback; void *original_user_data; /* mutable */ struct aws_http_connection *connection; struct aws_http_message *request; struct aws_byte_buf response; int status_code; int error_code; }; static void s_aws_credentials_provider_x509_user_data_destroy( struct aws_credentials_provider_x509_user_data *user_data) { if (user_data == NULL) { return; } struct aws_credentials_provider_x509_impl *impl = user_data->x509_provider->impl; if (user_data->connection) { impl->function_table->aws_http_connection_manager_release_connection( impl->connection_manager, user_data->connection); } aws_byte_buf_clean_up(&user_data->response); if (user_data->request) { aws_http_message_destroy(user_data->request); } aws_credentials_provider_release(user_data->x509_provider); aws_mem_release(user_data->allocator, user_data); } static struct aws_credentials_provider_x509_user_data *s_aws_credentials_provider_x509_user_data_new( struct aws_credentials_provider *x509_provider, aws_on_get_credentials_callback_fn callback, void *user_data) { struct aws_credentials_provider_x509_user_data *wrapped_user_data = aws_mem_calloc(x509_provider->allocator, 1, sizeof(struct aws_credentials_provider_x509_user_data)); if (wrapped_user_data == NULL) { goto on_error; } wrapped_user_data->allocator = x509_provider->allocator; wrapped_user_data->x509_provider = x509_provider; aws_credentials_provider_acquire(x509_provider); wrapped_user_data->original_user_data = user_data; wrapped_user_data->original_callback = callback; if (aws_byte_buf_init(&wrapped_user_data->response, x509_provider->allocator, X509_RESPONSE_SIZE_INITIAL)) { goto on_error; } return wrapped_user_data; on_error: s_aws_credentials_provider_x509_user_data_destroy(wrapped_user_data); return NULL; } static void s_aws_credentials_provider_x509_user_data_reset_response( struct aws_credentials_provider_x509_user_data *x509_user_data) { x509_user_data->response.len = 0; x509_user_data->status_code = 0; if (x509_user_data->request) { aws_http_message_destroy(x509_user_data->request); x509_user_data->request = NULL; } } /* * In general, the returned json document looks something like: { "credentials": { "accessKeyId" : "...", "secretAccessKey" : "...", "sessionToken" : "...", "expiration" : "2019-05-29T00:21:43Z" } } */ static struct aws_credentials *s_parse_credentials_from_iot_core_document( struct aws_allocator *allocator, struct aws_byte_buf *document) { struct aws_credentials *credentials = NULL; struct aws_json_value *document_root = NULL; if (aws_byte_buf_append_null_terminator(document)) { goto done; } struct aws_byte_cursor document_cursor = aws_byte_cursor_from_buf(document); document_root = aws_json_value_new_from_string(allocator, document_cursor); if (document_root == NULL) { AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Failed to parse IoT Core response as Json document."); goto done; } /* * pull out the root "Credentials" components */ struct aws_json_value *creds = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("credentials")); if (!aws_json_value_is_object(creds)) { AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Failed to parse credentials from IoT Core response."); goto done; } struct aws_parse_credentials_from_json_doc_options parse_options = { .access_key_id_name = "accessKeyId", .secret_access_key_name = "secretAccessKey", .token_name = "sessionToken", .expiration_name = "expiration", .expiration_format = AWS_PCEF_STRING_ISO_8601_DATE, .token_required = true, .expiration_required = false, }; credentials = aws_parse_credentials_from_aws_json_object(allocator, creds, &parse_options); if (!credentials) { AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "X509 credentials provider failed to parse credentials"); } done: if (document_root != NULL) { aws_json_value_destroy(document_root); } return credentials; } /* * No matter the result, this always gets called assuming that x509_user_data is successfully allocated */ static void s_x509_finalize_get_credentials_query(struct aws_credentials_provider_x509_user_data *x509_user_data) { /* Try to build credentials from whatever, if anything, was in the result */ struct aws_credentials *credentials = s_parse_credentials_from_iot_core_document(x509_user_data->allocator, &x509_user_data->response); if (credentials != NULL) { AWS_LOGF_INFO( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) X509 credentials provider successfully queried credentials", (void *)x509_user_data->x509_provider); } else { if (x509_user_data->error_code == AWS_ERROR_SUCCESS) { x509_user_data->error_code = aws_last_error(); if (x509_user_data->error_code == AWS_ERROR_SUCCESS) { x509_user_data->error_code = AWS_AUTH_CREDENTIALS_PROVIDER_X509_SOURCE_FAILURE; } } AWS_LOGF_WARN( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) X509 credentials provider failed to query credentials with error %d(%s)", (void *)x509_user_data->x509_provider, x509_user_data->error_code, aws_error_str(x509_user_data->error_code)); } /* pass the credentials back */ x509_user_data->original_callback(credentials, x509_user_data->error_code, x509_user_data->original_user_data); /* clean up */ s_aws_credentials_provider_x509_user_data_destroy(x509_user_data); aws_credentials_release(credentials); } static int s_x509_on_incoming_body_fn( struct aws_http_stream *stream, const struct aws_byte_cursor *data, void *user_data) { (void)stream; struct aws_credentials_provider_x509_user_data *x509_user_data = user_data; struct aws_credentials_provider_x509_impl *impl = x509_user_data->x509_provider->impl; AWS_LOGF_TRACE( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) X509 credentials provider received %zu response bytes", (void *)x509_user_data->x509_provider, data->len); if (data->len + x509_user_data->response.len > X509_RESPONSE_SIZE_LIMIT) { impl->function_table->aws_http_connection_close(x509_user_data->connection); AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) X509 credentials provider query response exceeded maximum allowed length", (void *)x509_user_data->x509_provider); return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } if (aws_byte_buf_append_dynamic(&x509_user_data->response, data)) { impl->function_table->aws_http_connection_close(x509_user_data->connection); AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) X509 credentials provider query error appending response", (void *)x509_user_data->x509_provider); return AWS_OP_ERR; } return AWS_OP_SUCCESS; } static int s_x509_on_incoming_headers_fn( struct aws_http_stream *stream, enum aws_http_header_block header_block, const struct aws_http_header *header_array, size_t num_headers, void *user_data) { (void)header_array; (void)num_headers; if (header_block != AWS_HTTP_HEADER_BLOCK_MAIN) { return AWS_OP_SUCCESS; } struct aws_credentials_provider_x509_user_data *x509_user_data = user_data; if (header_block == AWS_HTTP_HEADER_BLOCK_MAIN) { if (x509_user_data->status_code == 0) { struct aws_credentials_provider_x509_impl *impl = x509_user_data->x509_provider->impl; if (impl->function_table->aws_http_stream_get_incoming_response_status( stream, &x509_user_data->status_code)) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) X509 credentials provider failed to get http status code", (void *)x509_user_data->x509_provider); return AWS_OP_ERR; } AWS_LOGF_DEBUG( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) X509 credentials provider query received http status code %d", (void *)x509_user_data->x509_provider, x509_user_data->status_code); } } return AWS_OP_SUCCESS; } static void s_x509_on_stream_complete_fn(struct aws_http_stream *stream, int error_code, void *user_data) { struct aws_credentials_provider_x509_user_data *x509_user_data = user_data; aws_http_message_destroy(x509_user_data->request); x509_user_data->request = NULL; struct aws_credentials_provider_x509_impl *impl = x509_user_data->x509_provider->impl; impl->function_table->aws_http_stream_release(stream); /* * On anything other than a 200, nullify the response and pretend there was * an error */ if (x509_user_data->status_code != AWS_HTTP_STATUS_CODE_200_OK || error_code != AWS_OP_SUCCESS) { x509_user_data->response.len = 0; if (error_code != AWS_OP_SUCCESS) { x509_user_data->error_code = error_code; } else { x509_user_data->error_code = AWS_AUTH_CREDENTIALS_PROVIDER_HTTP_STATUS_FAILURE; } } s_x509_finalize_get_credentials_query(x509_user_data); } AWS_STATIC_STRING_FROM_LITERAL(s_x509_accept_header, "Accept"); AWS_STATIC_STRING_FROM_LITERAL(s_x509_accept_header_value, "*/*"); AWS_STATIC_STRING_FROM_LITERAL(s_x509_user_agent_header, "User-Agent"); AWS_STATIC_STRING_FROM_LITERAL(s_x509_user_agent_header_value, "aws-sdk-crt/x509-credentials-provider"); AWS_STATIC_STRING_FROM_LITERAL(s_x509_h1_0_keep_alive_header, "Connection"); AWS_STATIC_STRING_FROM_LITERAL(s_x509_h1_0_keep_alive_header_value, "keep-alive"); AWS_STATIC_STRING_FROM_LITERAL(s_x509_thing_name_header, "x-amzn-iot-thingname"); AWS_STATIC_STRING_FROM_LITERAL(s_x509_host_header, "Host"); static int s_make_x509_http_query( struct aws_credentials_provider_x509_user_data *x509_user_data, struct aws_byte_cursor *request_path) { AWS_FATAL_ASSERT(x509_user_data->connection); struct aws_http_stream *stream = NULL; struct aws_http_message *request = aws_http_message_new_request(x509_user_data->allocator); if (request == NULL) { return AWS_OP_ERR; } struct aws_credentials_provider_x509_impl *impl = x509_user_data->x509_provider->impl; struct aws_http_header thing_name_header = { .name = aws_byte_cursor_from_string(s_x509_thing_name_header), .value = aws_byte_cursor_from_buf(&impl->thing_name), }; if (aws_http_message_add_header(request, thing_name_header)) { goto on_error; } struct aws_http_header accept_header = { .name = aws_byte_cursor_from_string(s_x509_accept_header), .value = aws_byte_cursor_from_string(s_x509_accept_header_value), }; if (aws_http_message_add_header(request, accept_header)) { goto on_error; } struct aws_http_header user_agent_header = { .name = aws_byte_cursor_from_string(s_x509_user_agent_header), .value = aws_byte_cursor_from_string(s_x509_user_agent_header_value), }; if (aws_http_message_add_header(request, user_agent_header)) { goto on_error; } struct aws_http_header keep_alive_header = { .name = aws_byte_cursor_from_string(s_x509_h1_0_keep_alive_header), .value = aws_byte_cursor_from_string(s_x509_h1_0_keep_alive_header_value), }; if (aws_http_message_add_header(request, keep_alive_header)) { goto on_error; } struct aws_http_header host_header = { .name = aws_byte_cursor_from_string(s_x509_host_header), .value = aws_byte_cursor_from_buf(&impl->endpoint), }; if (aws_http_message_add_header(request, host_header)) { goto on_error; } if (aws_http_message_set_request_path(request, *request_path)) { goto on_error; } if (aws_http_message_set_request_method(request, aws_byte_cursor_from_c_str("GET"))) { goto on_error; } x509_user_data->request = request; struct aws_http_make_request_options request_options = { .self_size = sizeof(request_options), .on_response_headers = s_x509_on_incoming_headers_fn, .on_response_header_block_done = NULL, .on_response_body = s_x509_on_incoming_body_fn, .on_complete = s_x509_on_stream_complete_fn, .user_data = x509_user_data, .request = request, }; stream = impl->function_table->aws_http_connection_make_request(x509_user_data->connection, &request_options); if (!stream) { goto on_error; } if (impl->function_table->aws_http_stream_activate(stream)) { goto on_error; } return AWS_OP_SUCCESS; on_error: impl->function_table->aws_http_stream_release(stream); aws_http_message_destroy(request); x509_user_data->request = NULL; return AWS_OP_ERR; } static void s_x509_query_credentials(struct aws_credentials_provider_x509_user_data *x509_user_data) { AWS_FATAL_ASSERT(x509_user_data->connection); struct aws_credentials_provider_x509_impl *impl = x509_user_data->x509_provider->impl; /* "Clear" the result */ s_aws_credentials_provider_x509_user_data_reset_response(x509_user_data); struct aws_byte_cursor request_path_cursor = aws_byte_cursor_from_buf(&impl->role_alias_path); if (s_make_x509_http_query(x509_user_data, &request_path_cursor) == AWS_OP_ERR) { s_x509_finalize_get_credentials_query(x509_user_data); } } static void s_x509_on_acquire_connection(struct aws_http_connection *connection, int error_code, void *user_data) { struct aws_credentials_provider_x509_user_data *x509_user_data = user_data; if (connection == NULL) { AWS_LOGF_WARN( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "id=%p: X509 provider failed to acquire a connection, error code %d(%s)", (void *)x509_user_data->x509_provider, error_code, aws_error_str(error_code)); x509_user_data->error_code = error_code; s_x509_finalize_get_credentials_query(x509_user_data); return; } x509_user_data->connection = connection; s_x509_query_credentials(x509_user_data); } static int s_credentials_provider_x509_get_credentials_async( struct aws_credentials_provider *provider, aws_on_get_credentials_callback_fn callback, void *user_data) { struct aws_credentials_provider_x509_impl *impl = provider->impl; struct aws_credentials_provider_x509_user_data *wrapped_user_data = s_aws_credentials_provider_x509_user_data_new(provider, callback, user_data); if (wrapped_user_data == NULL) { goto error; } impl->function_table->aws_http_connection_manager_acquire_connection( impl->connection_manager, s_x509_on_acquire_connection, wrapped_user_data); return AWS_OP_SUCCESS; error: s_aws_credentials_provider_x509_user_data_destroy(wrapped_user_data); return AWS_OP_ERR; } static void s_credentials_provider_x509_destroy(struct aws_credentials_provider *provider) { struct aws_credentials_provider_x509_impl *impl = provider->impl; if (impl == NULL) { return; } aws_byte_buf_clean_up(&impl->thing_name); aws_byte_buf_clean_up(&impl->role_alias_path); aws_byte_buf_clean_up(&impl->endpoint); aws_tls_connection_options_clean_up(&impl->tls_connection_options); /* aws_http_connection_manager_release will eventually leads to call of s_on_connection_manager_shutdown, * which will do memory release for provider and impl. So We should be freeing impl * related memory first, then call aws_http_connection_manager_release. */ impl->function_table->aws_http_connection_manager_release(impl->connection_manager); /* freeing the provider takes place in the shutdown callback below */ } static struct aws_credentials_provider_vtable s_aws_credentials_provider_x509_vtable = { .get_credentials = s_credentials_provider_x509_get_credentials_async, .destroy = s_credentials_provider_x509_destroy, }; static void s_on_connection_manager_shutdown(void *user_data) { struct aws_credentials_provider *provider = user_data; aws_credentials_provider_invoke_shutdown_callback(provider); aws_mem_release(provider->allocator, provider); } struct aws_credentials_provider *aws_credentials_provider_new_x509( struct aws_allocator *allocator, const struct aws_credentials_provider_x509_options *options) { struct aws_credentials_provider *provider = NULL; struct aws_credentials_provider_x509_impl *impl = NULL; if (options->tls_connection_options == NULL || options->thing_name.len == 0 || options->role_alias.len == 0) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "To create an X.509 creds provider, a tls_connection_options, an IoT thing name and an IAM role alias are " "required."); goto on_error; } aws_mem_acquire_many( allocator, 2, &provider, sizeof(struct aws_credentials_provider), &impl, sizeof(struct aws_credentials_provider_x509_impl)); if (!provider) { return NULL; } AWS_ZERO_STRUCT(*provider); AWS_ZERO_STRUCT(*impl); aws_credentials_provider_init_base(provider, allocator, &s_aws_credentials_provider_x509_vtable, impl); if (aws_tls_connection_options_copy(&impl->tls_connection_options, options->tls_connection_options)) { goto on_error; } struct aws_byte_cursor server_name = options->endpoint; if (aws_tls_connection_options_set_server_name(&impl->tls_connection_options, allocator, &(server_name))) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): failed to set tls connection options's server name with error %s", (void *)provider, aws_error_debug_str(aws_last_error())); goto on_error; } struct aws_socket_options socket_options; AWS_ZERO_STRUCT(socket_options); socket_options.type = AWS_SOCKET_STREAM; socket_options.domain = AWS_SOCKET_IPV4; socket_options.connect_timeout_ms = (uint32_t)aws_timestamp_convert( X509_CONNECT_TIMEOUT_DEFAULT_IN_SECONDS, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_MILLIS, NULL); struct aws_http_connection_manager_options manager_options; AWS_ZERO_STRUCT(manager_options); manager_options.bootstrap = options->bootstrap; manager_options.initial_window_size = X509_RESPONSE_SIZE_LIMIT; manager_options.socket_options = &socket_options; manager_options.host = options->endpoint; manager_options.port = 443; manager_options.max_connections = 2; manager_options.shutdown_complete_callback = s_on_connection_manager_shutdown; manager_options.shutdown_complete_user_data = provider; manager_options.tls_connection_options = &impl->tls_connection_options; manager_options.proxy_options = options->proxy_options; impl->function_table = options->function_table; if (impl->function_table == NULL) { impl->function_table = g_aws_credentials_provider_http_function_table; } impl->connection_manager = impl->function_table->aws_http_connection_manager_new(allocator, &manager_options); if (impl->connection_manager == NULL) { goto on_error; } if (aws_byte_buf_init_copy_from_cursor(&impl->thing_name, allocator, options->thing_name)) { goto on_error; } if (aws_byte_buf_init_copy_from_cursor(&impl->endpoint, allocator, options->endpoint)) { goto on_error; } /* the expected path is "/role-aliases//credentials" */ struct aws_byte_cursor prefix_cursor = aws_byte_cursor_from_c_str("/role-aliases/"); if (aws_byte_buf_init_copy_from_cursor(&impl->role_alias_path, allocator, prefix_cursor)) { goto on_error; } if (aws_byte_buf_append_dynamic(&impl->role_alias_path, &options->role_alias)) { goto on_error; } struct aws_byte_cursor creds_cursor = aws_byte_cursor_from_c_str("/credentials"); if (aws_byte_buf_append_dynamic(&impl->role_alias_path, &creds_cursor)) { goto on_error; } provider->shutdown_options = options->shutdown_options; return provider; on_error: aws_credentials_provider_destroy(provider); return NULL; } aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/source/credentials_utils.c000066400000000000000000000325111456575232400253540ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include #if defined(_MSC_VER) # pragma warning(disable : 4232) #endif /* _MSC_VER */ static struct aws_auth_http_system_vtable s_default_function_table = { .aws_http_connection_manager_new = aws_http_connection_manager_new, .aws_http_connection_manager_release = aws_http_connection_manager_release, .aws_http_connection_manager_acquire_connection = aws_http_connection_manager_acquire_connection, .aws_http_connection_manager_release_connection = aws_http_connection_manager_release_connection, .aws_http_connection_make_request = aws_http_connection_make_request, .aws_http_stream_activate = aws_http_stream_activate, .aws_http_stream_get_connection = aws_http_stream_get_connection, .aws_http_stream_get_incoming_response_status = aws_http_stream_get_incoming_response_status, .aws_http_stream_release = aws_http_stream_release, .aws_http_connection_close = aws_http_connection_close, .aws_high_res_clock_get_ticks = aws_high_res_clock_get_ticks, }; const struct aws_auth_http_system_vtable *g_aws_credentials_provider_http_function_table = &s_default_function_table; void aws_credentials_query_init( struct aws_credentials_query *query, struct aws_credentials_provider *provider, aws_on_get_credentials_callback_fn *callback, void *user_data) { AWS_ZERO_STRUCT(*query); query->provider = provider; query->user_data = user_data; query->callback = callback; aws_credentials_provider_acquire(provider); } void aws_credentials_query_clean_up(struct aws_credentials_query *query) { if (query != NULL) { aws_credentials_provider_release(query->provider); } } void aws_credentials_provider_init_base( struct aws_credentials_provider *provider, struct aws_allocator *allocator, struct aws_credentials_provider_vtable *vtable, void *impl) { provider->allocator = allocator; provider->vtable = vtable; provider->impl = impl; aws_atomic_init_int(&provider->ref_count, 1); } void aws_credentials_provider_invoke_shutdown_callback(struct aws_credentials_provider *provider) { if (provider && provider->shutdown_options.shutdown_callback) { provider->shutdown_options.shutdown_callback(provider->shutdown_options.shutdown_user_data); } } static bool s_parse_expiration_value_from_json_object( struct aws_json_value *value, const struct aws_parse_credentials_from_json_doc_options *options, uint64_t *expiration_timepoint_in_seconds) { if (value == NULL) { AWS_LOGF_INFO(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "No credentials Expiration field in Json document."); return false; } struct aws_byte_cursor expiration_cursor = { .ptr = NULL, .len = 0, }; switch (options->expiration_format) { case AWS_PCEF_STRING_ISO_8601_DATE: { if (aws_json_value_get_string(value, &expiration_cursor)) { AWS_LOGF_INFO( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Unabled to extract credentials Expiration field from Json document."); return false; } if (expiration_cursor.len == 0) { AWS_LOGF_INFO( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Parsed a credentials json document with empty expiration."); return false; } struct aws_date_time expiration; if (aws_date_time_init_from_str_cursor(&expiration, &expiration_cursor, AWS_DATE_FORMAT_ISO_8601)) { AWS_LOGF_INFO( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "credentials Expiration in Json document is not a valid ISO_8601 date string."); return false; } *expiration_timepoint_in_seconds = (uint64_t)aws_date_time_as_epoch_secs(&expiration); return true; } case AWS_PCEF_NUMBER_UNIX_EPOCH: { double expiration_value = 0; if (aws_json_value_get_number(value, &expiration_value)) { AWS_LOGF_INFO( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Unabled to extract credentials Expiration field from Json document."); return false; } *expiration_timepoint_in_seconds = (uint64_t)expiration_value; return true; } case AWS_PCEF_NUMBER_UNIX_EPOCH_MS: { double expiration_value_ms = 0; if (aws_json_value_get_number(value, &expiration_value_ms)) { AWS_LOGF_INFO( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Unabled to extract credentials Expiration field from Json document."); return false; } *expiration_timepoint_in_seconds = aws_timestamp_convert((uint64_t)expiration_value_ms, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_SECS, NULL); return true; } default: return false; } } struct aws_credentials *aws_parse_credentials_from_aws_json_object( struct aws_allocator *allocator, struct aws_json_value *document_root, const struct aws_parse_credentials_from_json_doc_options *options) { AWS_FATAL_ASSERT(allocator); AWS_FATAL_ASSERT(document_root); AWS_FATAL_ASSERT(options); AWS_FATAL_ASSERT(options->access_key_id_name); AWS_FATAL_ASSERT(options->secret_access_key_name); if (options->token_required) { AWS_FATAL_ASSERT(options->token_name); } if (options->expiration_required) { AWS_FATAL_ASSERT(options->expiration_name); } struct aws_credentials *credentials = NULL; struct aws_json_value *access_key_id = NULL; struct aws_json_value *secrete_access_key = NULL; struct aws_json_value *token = NULL; struct aws_json_value *creds_expiration = NULL; bool parse_error = true; /* * Pull out the credentials components */ struct aws_byte_cursor access_key_id_cursor; access_key_id = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str((char *)options->access_key_id_name)); if (!aws_json_value_is_string(access_key_id) || aws_json_value_get_string(access_key_id, &access_key_id_cursor) == AWS_OP_ERR) { AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Failed to parse AccessKeyId from Json document."); goto done; } struct aws_byte_cursor secrete_access_key_cursor; secrete_access_key = aws_json_value_get_from_object( document_root, aws_byte_cursor_from_c_str((char *)options->secret_access_key_name)); if (!aws_json_value_is_string(secrete_access_key) || aws_json_value_get_string(secrete_access_key, &secrete_access_key_cursor) == AWS_OP_ERR) { AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Failed to parse SecretAccessKey from Json document."); goto done; } struct aws_byte_cursor token_cursor; if (options->token_name) { token = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str((char *)options->token_name)); if (!aws_json_value_is_string(token) || aws_json_value_get_string(token, &token_cursor) == AWS_OP_ERR) { if (options->token_required) { AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Failed to parse Token from Json document."); goto done; } } } // needed to avoid uninitialized local variable error uint64_t expiration_timepoint_in_seconds = UINT64_MAX; if (options->expiration_name) { creds_expiration = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str((char *)options->expiration_name)); if (!s_parse_expiration_value_from_json_object(creds_expiration, options, &expiration_timepoint_in_seconds)) { if (options->expiration_required) { AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Failed to parse Expiration from Json document."); goto done; } } } /* * Build the credentials */ if (access_key_id_cursor.len == 0 || secrete_access_key_cursor.len == 0) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Parsed an unexpected credentials json document, either access key, secret key is empty."); goto done; } struct aws_byte_cursor session_token_cursor; AWS_ZERO_STRUCT(session_token_cursor); if (token) { aws_json_value_get_string(token, &session_token_cursor); if (options->token_required && session_token_cursor.len == 0) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Parsed an unexpected credentials json document with empty token."); goto done; } } credentials = aws_credentials_new( allocator, access_key_id_cursor, secrete_access_key_cursor, session_token_cursor, expiration_timepoint_in_seconds); if (credentials == NULL) { AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Failed to allocate memory for credentials."); parse_error = false; goto done; } done: if (parse_error) { aws_raise_error(AWS_AUTH_PROVIDER_PARSER_UNEXPECTED_RESPONSE); } return credentials; } struct aws_credentials *aws_parse_credentials_from_json_document( struct aws_allocator *allocator, struct aws_byte_cursor document, const struct aws_parse_credentials_from_json_doc_options *options) { struct aws_credentials *credentials = NULL; struct aws_json_value *document_root = aws_json_value_new_from_string(allocator, document); if (document_root == NULL) { AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Failed to parse document as Json document."); return NULL; } struct aws_json_value *top_level_object = NULL; if (options->top_level_object_name) { top_level_object = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str(options->top_level_object_name)); if (!top_level_object) { AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "failed to parse top level object in json document."); goto done; } } credentials = aws_parse_credentials_from_aws_json_object( allocator, top_level_object ? top_level_object : document_root, options); done: aws_json_value_destroy(document_root); return credentials; } static bool s_is_transient_network_error(int error_code) { return error_code == AWS_ERROR_HTTP_CONNECTION_CLOSED || error_code == AWS_ERROR_HTTP_SERVER_CLOSED || error_code == AWS_IO_SOCKET_CLOSED || error_code == AWS_IO_SOCKET_CONNECT_ABORTED || error_code == AWS_IO_SOCKET_CONNECTION_REFUSED || error_code == AWS_IO_SOCKET_NETWORK_DOWN || error_code == AWS_IO_DNS_QUERY_FAILED || error_code == AWS_IO_DNS_NO_ADDRESS_FOR_HOST || error_code == AWS_IO_SOCKET_TIMEOUT || error_code == AWS_IO_TLS_NEGOTIATION_TIMEOUT || error_code == AWS_HTTP_STATUS_CODE_408_REQUEST_TIMEOUT; } enum aws_retry_error_type aws_credentials_provider_compute_retry_error_type(int response_code, int error_code) { enum aws_retry_error_type error_type = response_code >= 400 && response_code < 500 ? AWS_RETRY_ERROR_TYPE_CLIENT_ERROR : AWS_RETRY_ERROR_TYPE_SERVER_ERROR; if (s_is_transient_network_error(error_code)) { error_type = AWS_RETRY_ERROR_TYPE_TRANSIENT; } /* server throttling us is retryable */ if (response_code == AWS_HTTP_STATUS_CODE_429_TOO_MANY_REQUESTS) { /* force a new connection on this. */ error_type = AWS_RETRY_ERROR_TYPE_THROTTLING; } return error_type; } struct aws_profile_collection *aws_load_profile_collection_from_config_file( struct aws_allocator *allocator, struct aws_byte_cursor config_file_name_override) { struct aws_profile_collection *config_profiles = NULL; struct aws_string *config_file_path = NULL; config_file_path = aws_get_config_file_path(allocator, &config_file_name_override); if (!config_file_path) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Failed to resolve config file path: %s", aws_error_str(aws_last_error())); return NULL; } config_profiles = aws_profile_collection_new_from_file(allocator, config_file_path, AWS_PST_CONFIG); if (config_profiles != NULL) { AWS_LOGF_DEBUG( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Successfully built config profile collection from file at (%s)", aws_string_c_str(config_file_path)); } else { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Failed to build config profile collection from file at (%s) : %s", aws_string_c_str(config_file_path), aws_error_str(aws_last_error())); } aws_string_destroy(config_file_path); return config_profiles; } aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/source/key_derivation.c000066400000000000000000000311521456575232400246530ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include /* * The maximum number of iterations we will attempt to derive a valid ecc key for. The probability that this counter * value ever gets reached is vanishingly low -- with reasonable uniformity/independence assumptions, it's * approximately * * 2 ^ (-32 * 254) */ #define MAX_KEY_DERIVATION_COUNTER_VALUE 254 /* * The encoding (32-bit, big-endian) of the prefix to the FixedInputString when fed to the hmac function, per * the sigv4a key derivation specification. */ AWS_STATIC_STRING_FROM_LITERAL(s_1_as_four_bytes_be, "\x00\x00\x00\x01"); /* * The encoding (32-bit, big-endian) of the "Length" component of the sigv4a key derivation specification */ AWS_STATIC_STRING_FROM_LITERAL(s_256_as_four_bytes_be, "\x00\x00\x01\x00"); AWS_STRING_FROM_LITERAL(g_signature_type_sigv4a_http_request, "AWS4-ECDSA-P256-SHA256"); AWS_STATIC_STRING_FROM_LITERAL(s_secret_buffer_prefix, "AWS4A"); /* * This constructs the fixed input byte sequence of the Sigv4a key derivation specification. It also includes the * value (0x01 as a 32-bit big endian value) that is pre-pended to the fixed input before invoking the hmac to * generate the candidate key value. * * The final output looks like * * 0x00000001 || "AWS4-ECDSA-P256-SHA256" || 0x00 || AccessKeyId || CounterValue as uint8_t || 0x00000100 (Length) * * From this, we can determine the necessary buffer capacity when setting up the fixed input buffer: * * 4 + 22 + 1 + len(AccessKeyId) + 1 + 4 = 32 + len(AccessKeyId) */ static int s_aws_build_fixed_input_buffer( struct aws_byte_buf *fixed_input, const struct aws_credentials *credentials, const uint8_t counter) { if (counter == 0 || counter > MAX_KEY_DERIVATION_COUNTER_VALUE) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } if (!aws_byte_buf_is_valid(fixed_input)) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } aws_byte_buf_reset(fixed_input, false); /* * A placeholder value that's not actually part of the fixed input string in the spec, but is always this value * and is always the first byte of the hmac-ed string. */ struct aws_byte_cursor one_cursor = aws_byte_cursor_from_string(s_1_as_four_bytes_be); if (aws_byte_buf_append_dynamic(fixed_input, &one_cursor)) { return AWS_OP_ERR; } struct aws_byte_cursor sigv4a_algorithm_cursor = aws_byte_cursor_from_string(g_signature_type_sigv4a_http_request); if (aws_byte_buf_append(fixed_input, &sigv4a_algorithm_cursor)) { return AWS_OP_ERR; } if (aws_byte_buf_append_byte_dynamic(fixed_input, 0)) { return AWS_OP_ERR; } struct aws_byte_cursor access_key_cursor = aws_credentials_get_access_key_id(credentials); if (aws_byte_buf_append(fixed_input, &access_key_cursor)) { return AWS_OP_ERR; } if (aws_byte_buf_append_byte_dynamic(fixed_input, counter)) { return AWS_OP_ERR; } struct aws_byte_cursor encoded_bit_length_cursor = aws_byte_cursor_from_string(s_256_as_four_bytes_be); if (aws_byte_buf_append_dynamic(fixed_input, &encoded_bit_length_cursor)) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } /* * aws_be_bytes_compare_constant_time() and aws_be_bytes_add_one_constant_time() are constant-time arithmetic functions * that operate on raw bytes as if they were unbounded integers in a big-endian base 255 format. */ /* * In the following function gt and eq are updated together. After each update, the variables will be * in one of the following states: * * (1) gt is 0, eq is 1, and from an ordering perspective, lhs == rhs, as checked "so far" * (2) gt is 1, eq is 0, (lhs > rhs) * (3) gt is 0, eq is 0, (lhs < rhs) * * States (2) and (3) are terminal states that cannot be exited since eq is 0 and is the and-wise mask of all * subsequent gt updates. Similarly, once eq is zero it cannot ever become non-zero. * * Intuitively these ideas match the standard way of comparing magnitude equality by considering digit count and * digits from most significant to least significant. * * Let l and r be the the two digits that we are * comparing between lhs and rhs. Assume 0 <= l, r <= 255 seated in 32-bit integers * * gt is maintained by the following bit trick: * * l > r <=> * (r - l) < 0 <=> * (r - l) as an int32 has the high bit set <=> * ((r - l) >> 31) & 0x01 == 1 * * eq is maintained by the following bit trick: * * l == r <=> * l ^ r == 0 <=> * (l ^ r) - 1 == -1 <=> * (((l ^ r) - 1) >> 31) & 0x01 == 1 * * We apply to the volatile type modifier to attempt to prevent all early-out optimizations that a compiler might * apply if it performed constraint-based reasoning on the logic. This is based on treating volatile * semantically as "this value can change underneath you at any time so you always have to re-read it and cannot * reason statically about program behavior when it reaches a certain value (like 0)" */ /** * Compares two large unsigned integers in a raw byte format. * The two operands *must* be the same size (simplifies the problem significantly). * * The output parameter comparison_result is set to: * -1 if lhs_raw_be_bigint < rhs_raw_be_bigint * 0 if lhs_raw_be_bigint == rhs_raw_be_bigint * 1 if lhs_raw_be_bigint > rhs_raw_be_bigint */ int aws_be_bytes_compare_constant_time( const struct aws_byte_buf *lhs_raw_be_bigint, const struct aws_byte_buf *rhs_raw_be_bigint, int *comparison_result) { AWS_FATAL_PRECONDITION(aws_byte_buf_is_valid(lhs_raw_be_bigint)); AWS_FATAL_PRECONDITION(aws_byte_buf_is_valid(rhs_raw_be_bigint)); /* * We only need to support comparing byte sequences of the same length here */ const size_t lhs_len = lhs_raw_be_bigint->len; if (lhs_len != rhs_raw_be_bigint->len) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } volatile uint8_t gt = 0; volatile uint8_t eq = 1; const uint8_t *lhs_raw_bytes = lhs_raw_be_bigint->buffer; const uint8_t *rhs_raw_bytes = rhs_raw_be_bigint->buffer; for (size_t i = 0; i < lhs_len; ++i) { volatile int32_t lhs_digit = (int32_t)lhs_raw_bytes[i]; volatile int32_t rhs_digit = (int32_t)rhs_raw_bytes[i]; /* * For each digit, check for a state (1) => (2) ie lhs > rhs, or (1) => (3) ie lhs < rhs transition * based on comparing the two digits in constant time using the ideas explained in the giant comment * block above this function. */ gt |= ((rhs_digit - lhs_digit) >> 31) & eq; eq &= (((lhs_digit ^ rhs_digit) - 1) >> 31) & 0x01; } *comparison_result = gt + gt + eq - 1; return AWS_OP_SUCCESS; } /** * Adds one to a large unsigned integer represented by a sequence of bytes. * * A maximal value will roll over to zero. This does not affect the correctness of the users * of this function. */ void aws_be_bytes_add_one_constant_time(struct aws_byte_buf *raw_be_bigint) { AWS_FATAL_PRECONDITION(aws_byte_buf_is_valid(raw_be_bigint)); const size_t byte_count = raw_be_bigint->len; volatile uint32_t carry = 1; uint8_t *raw_bytes = raw_be_bigint->buffer; for (size_t i = 0; i < byte_count; ++i) { const size_t index = byte_count - i - 1; volatile uint32_t current_digit = raw_bytes[index]; current_digit += carry; carry = (current_digit >> 8) & 0x01; raw_bytes[index] = (uint8_t)(current_digit & 0xFF); } } /* clang-format off */ /* In the spec, this is N-2 */ static uint8_t s_n_minus_2[32] = { 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xBC, 0xE6, 0xFA, 0xAD, 0xA7, 0x17, 0x9E, 0x84, 0xF3, 0xB9, 0xCA, 0xC2, 0xFC, 0x63, 0x25, 0x4F, }; /* clang-format on */ enum aws_key_derivation_result { AKDR_SUCCESS, AKDR_NEXT_COUNTER, AKDR_FAILURE, }; static enum aws_key_derivation_result s_aws_derive_ecc_private_key( struct aws_byte_buf *private_key_value, const struct aws_byte_buf *k0) { AWS_FATAL_ASSERT(k0->len == aws_ecc_key_coordinate_byte_size_from_curve_name(AWS_CAL_ECDSA_P256)); aws_byte_buf_reset(private_key_value, false); struct aws_byte_buf s_n_minus_2_buf = { .allocator = NULL, .buffer = s_n_minus_2, .capacity = AWS_ARRAY_SIZE(s_n_minus_2), .len = AWS_ARRAY_SIZE(s_n_minus_2), }; int comparison_result = 0; if (aws_be_bytes_compare_constant_time(k0, &s_n_minus_2_buf, &comparison_result)) { return AKDR_FAILURE; } if (comparison_result > 0) { return AKDR_NEXT_COUNTER; } struct aws_byte_cursor k0_cursor = aws_byte_cursor_from_buf(k0); if (aws_byte_buf_append(private_key_value, &k0_cursor)) { return AKDR_FAILURE; } aws_be_bytes_add_one_constant_time(private_key_value); return AKDR_SUCCESS; } static int s_init_secret_buf( struct aws_byte_buf *secret_buf, struct aws_allocator *allocator, const struct aws_credentials *credentials) { struct aws_byte_cursor secret_access_key_cursor = aws_credentials_get_secret_access_key(credentials); size_t secret_buffer_length = secret_access_key_cursor.len + s_secret_buffer_prefix->len; if (aws_byte_buf_init(secret_buf, allocator, secret_buffer_length)) { return AWS_OP_ERR; } struct aws_byte_cursor prefix_cursor = aws_byte_cursor_from_string(s_secret_buffer_prefix); if (aws_byte_buf_append(secret_buf, &prefix_cursor)) { return AWS_OP_ERR; } if (aws_byte_buf_append(secret_buf, &secret_access_key_cursor)) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } struct aws_ecc_key_pair *aws_ecc_key_pair_new_ecdsa_p256_key_from_aws_credentials( struct aws_allocator *allocator, const struct aws_credentials *credentials) { if (allocator == NULL || credentials == NULL) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } struct aws_ecc_key_pair *ecc_key_pair = NULL; struct aws_byte_buf fixed_input; AWS_ZERO_STRUCT(fixed_input); struct aws_byte_buf fixed_input_hmac_digest; AWS_ZERO_STRUCT(fixed_input_hmac_digest); struct aws_byte_buf private_key_buf; AWS_ZERO_STRUCT(private_key_buf); struct aws_byte_buf secret_buf; AWS_ZERO_STRUCT(secret_buf); size_t access_key_length = aws_credentials_get_access_key_id(credentials).len; /* * This value is calculated based on the format of the fixed input string as described above at * the definition of s_aws_build_fixed_input_buffer() */ size_t required_fixed_input_capacity = 32 + access_key_length; if (aws_byte_buf_init(&fixed_input, allocator, required_fixed_input_capacity)) { goto done; } if (aws_byte_buf_init(&fixed_input_hmac_digest, allocator, AWS_SHA256_LEN)) { goto done; } size_t key_length = aws_ecc_key_coordinate_byte_size_from_curve_name(AWS_CAL_ECDSA_P256); AWS_FATAL_ASSERT(key_length == AWS_SHA256_LEN); if (aws_byte_buf_init(&private_key_buf, allocator, key_length)) { goto done; } if (s_init_secret_buf(&secret_buf, allocator, credentials)) { goto done; } struct aws_byte_cursor secret_cursor = aws_byte_cursor_from_buf(&secret_buf); uint8_t counter = 1; enum aws_key_derivation_result result = AKDR_NEXT_COUNTER; while ((result == AKDR_NEXT_COUNTER) && (counter <= MAX_KEY_DERIVATION_COUNTER_VALUE)) { if (s_aws_build_fixed_input_buffer(&fixed_input, credentials, counter++)) { break; } aws_byte_buf_reset(&fixed_input_hmac_digest, true); struct aws_byte_cursor fixed_input_cursor = aws_byte_cursor_from_buf(&fixed_input); if (aws_sha256_hmac_compute(allocator, &secret_cursor, &fixed_input_cursor, &fixed_input_hmac_digest, 0)) { break; } result = s_aws_derive_ecc_private_key(&private_key_buf, &fixed_input_hmac_digest); } if (result == AKDR_SUCCESS) { struct aws_byte_cursor private_key_cursor = aws_byte_cursor_from_buf(&private_key_buf); ecc_key_pair = aws_ecc_key_pair_new_from_private_key(allocator, AWS_CAL_ECDSA_P256, &private_key_cursor); } done: aws_byte_buf_clean_up_secure(&secret_buf); aws_byte_buf_clean_up_secure(&private_key_buf); aws_byte_buf_clean_up_secure(&fixed_input_hmac_digest); aws_byte_buf_clean_up(&fixed_input); return ecc_key_pair; } aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/source/signable.c000066400000000000000000000114601456575232400234230ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_signable_destroy(struct aws_signable *signable) { if (signable == NULL) { return; } if (signable->vtable != NULL) { signable->vtable->destroy(signable); } } int aws_signable_get_property( const struct aws_signable *signable, const struct aws_string *name, struct aws_byte_cursor *out_value) { AWS_ASSERT(signable && signable->vtable && signable->vtable->get_property); return signable->vtable->get_property(signable, name, out_value); } int aws_signable_get_property_list( const struct aws_signable *signable, const struct aws_string *name, struct aws_array_list **out_property_list) { AWS_ASSERT(signable && signable->vtable && signable->vtable->get_property_list); return signable->vtable->get_property_list(signable, name, out_property_list); } int aws_signable_get_payload_stream(const struct aws_signable *signable, struct aws_input_stream **out_input_stream) { AWS_ASSERT(signable && signable->vtable && signable->vtable->get_payload_stream); return signable->vtable->get_payload_stream(signable, out_input_stream); } AWS_STRING_FROM_LITERAL(g_aws_http_headers_property_list_name, "headers"); AWS_STRING_FROM_LITERAL(g_aws_http_query_params_property_list_name, "params"); AWS_STRING_FROM_LITERAL(g_aws_http_method_property_name, "method"); AWS_STRING_FROM_LITERAL(g_aws_http_uri_property_name, "uri"); AWS_STRING_FROM_LITERAL(g_aws_signature_property_name, "signature"); AWS_STRING_FROM_LITERAL(g_aws_previous_signature_property_name, "previous-signature"); AWS_STRING_FROM_LITERAL(g_aws_canonical_request_property_name, "canonical-request"); /* * This is a simple aws_signable wrapper implementation for AWS's canonical representation of an http request */ struct aws_signable_canonical_request_impl { struct aws_string *canonical_request; }; static int s_aws_signable_canonical_request_get_property( const struct aws_signable *signable, const struct aws_string *name, struct aws_byte_cursor *out_value) { struct aws_signable_canonical_request_impl *impl = signable->impl; AWS_ZERO_STRUCT(*out_value); /* * uri and method can be queried directly from the wrapper request */ if (aws_string_eq(name, g_aws_canonical_request_property_name)) { *out_value = aws_byte_cursor_from_string(impl->canonical_request); } else { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } return AWS_OP_SUCCESS; } static int s_aws_signable_canonical_request_get_property_list( const struct aws_signable *signable, const struct aws_string *name, struct aws_array_list **out_list) { (void)signable; (void)name; (void)out_list; *out_list = NULL; return aws_raise_error(AWS_ERROR_UNSUPPORTED_OPERATION); } static int s_aws_signable_canonical_request_get_payload_stream( const struct aws_signable *signable, struct aws_input_stream **out_input_stream) { (void)signable; *out_input_stream = NULL; return AWS_OP_SUCCESS; } static void s_aws_signable_canonical_request_destroy(struct aws_signable *signable) { if (signable == NULL) { return; } struct aws_signable_canonical_request_impl *impl = signable->impl; if (impl == NULL) { return; } aws_string_destroy(impl->canonical_request); aws_mem_release(signable->allocator, signable); } static struct aws_signable_vtable s_signable_canonical_request_vtable = { .get_property = s_aws_signable_canonical_request_get_property, .get_property_list = s_aws_signable_canonical_request_get_property_list, .get_payload_stream = s_aws_signable_canonical_request_get_payload_stream, .destroy = s_aws_signable_canonical_request_destroy, }; struct aws_signable *aws_signable_new_canonical_request( struct aws_allocator *allocator, struct aws_byte_cursor canonical_request) { struct aws_signable *signable = NULL; struct aws_signable_canonical_request_impl *impl = NULL; aws_mem_acquire_many( allocator, 2, &signable, sizeof(struct aws_signable), &impl, sizeof(struct aws_signable_canonical_request_impl)); if (signable == NULL || impl == NULL) { return NULL; } AWS_ZERO_STRUCT(*signable); AWS_ZERO_STRUCT(*impl); signable->allocator = allocator; signable->vtable = &s_signable_canonical_request_vtable; signable->impl = impl; impl->canonical_request = aws_string_new_from_array(allocator, canonical_request.ptr, canonical_request.len); if (impl->canonical_request == NULL) { goto on_error; } return signable; on_error: aws_signable_destroy(signable); return NULL; } aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/source/signable_chunk.c000066400000000000000000000062551456575232400246210ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include /* * This is a simple aws_signable wrapper implementation for an s3 chunk */ struct aws_signable_chunk_impl { struct aws_input_stream *chunk_data; struct aws_string *previous_signature; }; static int s_aws_signable_chunk_get_property( const struct aws_signable *signable, const struct aws_string *name, struct aws_byte_cursor *out_value) { struct aws_signable_chunk_impl *impl = signable->impl; AWS_ZERO_STRUCT(*out_value); /* * uri and method can be queried directly from the wrapper request */ if (aws_string_eq(name, g_aws_previous_signature_property_name)) { *out_value = aws_byte_cursor_from_string(impl->previous_signature); } else { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } return AWS_OP_SUCCESS; } static int s_aws_signable_chunk_get_property_list( const struct aws_signable *signable, const struct aws_string *name, struct aws_array_list **out_list) { (void)signable; (void)name; (void)out_list; return aws_raise_error(AWS_ERROR_UNSUPPORTED_OPERATION); } static int s_aws_signable_chunk_get_payload_stream( const struct aws_signable *signable, struct aws_input_stream **out_input_stream) { struct aws_signable_chunk_impl *impl = signable->impl; *out_input_stream = impl->chunk_data; return AWS_OP_SUCCESS; } static void s_aws_signable_chunk_destroy(struct aws_signable *signable) { if (signable == NULL) { return; } struct aws_signable_chunk_impl *impl = signable->impl; if (impl == NULL) { return; } aws_input_stream_release(impl->chunk_data); aws_string_destroy(impl->previous_signature); aws_mem_release(signable->allocator, signable); } static struct aws_signable_vtable s_signable_chunk_vtable = { .get_property = s_aws_signable_chunk_get_property, .get_property_list = s_aws_signable_chunk_get_property_list, .get_payload_stream = s_aws_signable_chunk_get_payload_stream, .destroy = s_aws_signable_chunk_destroy, }; struct aws_signable *aws_signable_new_chunk( struct aws_allocator *allocator, struct aws_input_stream *chunk_data, struct aws_byte_cursor previous_signature) { struct aws_signable *signable = NULL; struct aws_signable_chunk_impl *impl = NULL; aws_mem_acquire_many( allocator, 2, &signable, sizeof(struct aws_signable), &impl, sizeof(struct aws_signable_chunk_impl)); if (signable == NULL || impl == NULL) { return NULL; } AWS_ZERO_STRUCT(*signable); AWS_ZERO_STRUCT(*impl); signable->allocator = allocator; signable->vtable = &s_signable_chunk_vtable; signable->impl = impl; impl->chunk_data = aws_input_stream_acquire(chunk_data); impl->previous_signature = aws_string_new_from_array(allocator, previous_signature.ptr, previous_signature.len); if (impl->previous_signature == NULL) { goto on_error; } return signable; on_error: aws_signable_destroy(signable); return NULL; } aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/source/signable_http_request.c000066400000000000000000000076711456575232400262430ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include /* * This is a simple aws_signable wrapper implementation for the aws_http_message struct */ struct aws_signable_http_request_impl { struct aws_http_message *request; struct aws_array_list headers; }; static int s_aws_signable_http_request_get_property( const struct aws_signable *signable, const struct aws_string *name, struct aws_byte_cursor *out_value) { struct aws_signable_http_request_impl *impl = signable->impl; AWS_ZERO_STRUCT(*out_value); /* * uri and method can be queried directly from the wrapper request */ if (aws_string_eq(name, g_aws_http_uri_property_name)) { aws_http_message_get_request_path(impl->request, out_value); } else if (aws_string_eq(name, g_aws_http_method_property_name)) { aws_http_message_get_request_method(impl->request, out_value); } else { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } return AWS_OP_SUCCESS; } static int s_aws_signable_http_request_get_property_list( const struct aws_signable *signable, const struct aws_string *name, struct aws_array_list **out_list) { struct aws_signable_http_request_impl *impl = signable->impl; *out_list = NULL; if (aws_string_eq(name, g_aws_http_headers_property_list_name)) { *out_list = &impl->headers; } else { return aws_raise_error(AWS_ERROR_UNSUPPORTED_OPERATION); } return AWS_OP_SUCCESS; } static int s_aws_signable_http_request_get_payload_stream( const struct aws_signable *signable, struct aws_input_stream **out_input_stream) { struct aws_signable_http_request_impl *impl = signable->impl; *out_input_stream = aws_http_message_get_body_stream(impl->request); return AWS_OP_SUCCESS; } static void s_aws_signable_http_request_destroy(struct aws_signable *signable) { if (signable == NULL) { return; } struct aws_signable_http_request_impl *impl = signable->impl; if (impl == NULL) { return; } aws_http_message_release(impl->request); aws_array_list_clean_up(&impl->headers); aws_mem_release(signable->allocator, signable); } static struct aws_signable_vtable s_signable_http_request_vtable = { .get_property = s_aws_signable_http_request_get_property, .get_property_list = s_aws_signable_http_request_get_property_list, .get_payload_stream = s_aws_signable_http_request_get_payload_stream, .destroy = s_aws_signable_http_request_destroy, }; struct aws_signable *aws_signable_new_http_request(struct aws_allocator *allocator, struct aws_http_message *request) { struct aws_signable *signable = NULL; struct aws_signable_http_request_impl *impl = NULL; aws_mem_acquire_many( allocator, 2, &signable, sizeof(struct aws_signable), &impl, sizeof(struct aws_signable_http_request_impl)); AWS_ZERO_STRUCT(*signable); AWS_ZERO_STRUCT(*impl); signable->allocator = allocator; signable->vtable = &s_signable_http_request_vtable; signable->impl = impl; /* * Copy the headers since they're not different types */ size_t header_count = aws_http_message_get_header_count(request); if (aws_array_list_init_dynamic( &impl->headers, allocator, header_count, sizeof(struct aws_signable_property_list_pair))) { goto on_error; } for (size_t i = 0; i < header_count; ++i) { struct aws_http_header header; aws_http_message_get_header(request, &header, i); struct aws_signable_property_list_pair property = {.name = header.name, .value = header.value}; aws_array_list_push_back(&impl->headers, &property); } impl->request = aws_http_message_acquire(request); return signable; on_error: aws_signable_destroy(signable); return NULL; } aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/source/signable_trailer.c000066400000000000000000000104301456575232400251410ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include struct aws_signable_trailing_headers_impl { struct aws_http_headers *trailing_headers; struct aws_array_list headers; struct aws_string *previous_signature; }; static int s_aws_signable_trailing_headers_get_property( const struct aws_signable *signable, const struct aws_string *name, struct aws_byte_cursor *out_value) { struct aws_signable_trailing_headers_impl *impl = signable->impl; AWS_ZERO_STRUCT(*out_value); /* * uri and method can be queried directly from the wrapper request */ if (aws_string_eq(name, g_aws_previous_signature_property_name)) { *out_value = aws_byte_cursor_from_string(impl->previous_signature); } else { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } return AWS_OP_SUCCESS; } static int s_aws_signable_trailing_headers_get_property_list( const struct aws_signable *signable, const struct aws_string *name, struct aws_array_list **out_list) { (void)signable; (void)name; (void)out_list; struct aws_signable_trailing_headers_impl *impl = signable->impl; *out_list = NULL; if (aws_string_eq(name, g_aws_http_headers_property_list_name)) { *out_list = &impl->headers; } else { return aws_raise_error(AWS_ERROR_UNSUPPORTED_OPERATION); } return AWS_OP_SUCCESS; } static int s_aws_signable_trailing_headers_get_payload_stream( const struct aws_signable *signable, struct aws_input_stream **out_input_stream) { (void)signable; *out_input_stream = NULL; return AWS_OP_SUCCESS; } static void s_aws_signable_trailing_headers_destroy(struct aws_signable *signable) { if (signable == NULL) { return; } struct aws_signable_trailing_headers_impl *impl = signable->impl; if (impl == NULL) { return; } aws_http_headers_release(impl->trailing_headers); aws_string_destroy(impl->previous_signature); aws_array_list_clean_up(&impl->headers); aws_mem_release(signable->allocator, signable); } static struct aws_signable_vtable s_signable_trailing_headers_vtable = { .get_property = s_aws_signable_trailing_headers_get_property, .get_property_list = s_aws_signable_trailing_headers_get_property_list, .get_payload_stream = s_aws_signable_trailing_headers_get_payload_stream, .destroy = s_aws_signable_trailing_headers_destroy, }; struct aws_signable *aws_signable_new_trailing_headers( struct aws_allocator *allocator, struct aws_http_headers *trailing_headers, struct aws_byte_cursor previous_signature) { struct aws_signable *signable = NULL; struct aws_signable_trailing_headers_impl *impl = NULL; aws_mem_acquire_many( allocator, 2, &signable, sizeof(struct aws_signable), &impl, sizeof(struct aws_signable_trailing_headers_impl)); AWS_ZERO_STRUCT(*signable); AWS_ZERO_STRUCT(*impl); /* Keep the headers alive. We're referencing the underlying strings. */ aws_http_headers_acquire(trailing_headers); impl->trailing_headers = trailing_headers; signable->allocator = allocator; signable->vtable = &s_signable_trailing_headers_vtable; signable->impl = impl; /* * Convert headers list to aws_signable_property_list_pair arraylist since they're not different types. */ size_t header_count = aws_http_headers_count(trailing_headers); if (aws_array_list_init_dynamic( &impl->headers, allocator, header_count, sizeof(struct aws_signable_property_list_pair))) { goto on_error; } for (size_t i = 0; i < header_count; ++i) { struct aws_http_header header; aws_http_headers_get_index(trailing_headers, i, &header); struct aws_signable_property_list_pair property = {.name = header.name, .value = header.value}; aws_array_list_push_back(&impl->headers, &property); } impl->previous_signature = aws_string_new_from_array(allocator, previous_signature.ptr, previous_signature.len); if (impl->previous_signature == NULL) { goto on_error; } return signable; on_error: aws_signable_destroy(signable); return NULL; } aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/source/signing.c000066400000000000000000000140761456575232400233030ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include /* * Aws signing implementation */ static int s_aws_last_error_or_unknown(void) { int last_error = aws_last_error(); if (last_error == AWS_ERROR_SUCCESS) { last_error = AWS_ERROR_UNKNOWN; } return last_error; } static void s_perform_signing(struct aws_signing_state_aws *state) { struct aws_signing_result *result = NULL; if (state->error_code != AWS_ERROR_SUCCESS) { goto done; } if (aws_credentials_is_anonymous(state->config.credentials)) { result = &state->result; goto done; } if (aws_signing_build_canonical_request(state)) { state->error_code = s_aws_last_error_or_unknown(); AWS_LOGF_ERROR( AWS_LS_AUTH_SIGNING, "(id=%p) Signing failed to build canonical request via algorithm %s, error %d(%s)", (void *)state->signable, aws_signing_algorithm_to_string(state->config.algorithm), state->error_code, aws_error_debug_str(state->error_code)); goto done; } AWS_LOGF_INFO( AWS_LS_AUTH_SIGNING, "(id=%p) Signing successfully built canonical request for algorithm %s, with contents \n" PRInSTR "\n", (void *)state->signable, aws_signing_algorithm_to_string(state->config.algorithm), AWS_BYTE_BUF_PRI(state->canonical_request)); if (aws_signing_build_string_to_sign(state)) { state->error_code = s_aws_last_error_or_unknown(); AWS_LOGF_ERROR( AWS_LS_AUTH_SIGNING, "(id=%p) Signing failed to build string-to-sign via algorithm %s, error %d(%s)", (void *)state->signable, aws_signing_algorithm_to_string(state->config.algorithm), state->error_code, aws_error_debug_str(state->error_code)); goto done; } AWS_LOGF_INFO( AWS_LS_AUTH_SIGNING, "(id=%p) Signing successfully built string-to-sign via algorithm %s, with contents \n" PRInSTR "\n", (void *)state->signable, aws_signing_algorithm_to_string(state->config.algorithm), AWS_BYTE_BUF_PRI(state->string_to_sign)); if (aws_signing_build_authorization_value(state)) { state->error_code = s_aws_last_error_or_unknown(); AWS_LOGF_ERROR( AWS_LS_AUTH_SIGNING, "(id=%p) Signing failed to build final authorization value via algorithm %s", (void *)state->signable, aws_signing_algorithm_to_string(state->config.algorithm)); goto done; } result = &state->result; done: state->on_complete(result, state->error_code, state->userdata); aws_signing_state_destroy(state); } static void s_aws_signing_on_get_credentials(struct aws_credentials *credentials, int error_code, void *user_data) { struct aws_signing_state_aws *state = user_data; if (!credentials) { if (error_code == AWS_ERROR_SUCCESS) { error_code = AWS_ERROR_UNKNOWN; } /* Log the credentials sourcing error */ AWS_LOGF_ERROR( AWS_LS_AUTH_SIGNING, "(id=%p) Credentials Provider failed to source credentials with error %d(%s)", (void *)state->signable, error_code, aws_error_debug_str(error_code)); state->error_code = AWS_AUTH_SIGNING_NO_CREDENTIALS; } else { if (state->config.algorithm == AWS_SIGNING_ALGORITHM_V4_ASYMMETRIC && !aws_credentials_is_anonymous(credentials)) { state->config.credentials = aws_credentials_new_ecc_from_aws_credentials(state->allocator, credentials); if (state->config.credentials == NULL) { state->error_code = AWS_AUTH_SIGNING_NO_CREDENTIALS; } } else { state->config.credentials = credentials; aws_credentials_acquire(credentials); } } s_perform_signing(state); } int aws_sign_request_aws( struct aws_allocator *allocator, const struct aws_signable *signable, const struct aws_signing_config_base *base_config, aws_signing_complete_fn *on_complete, void *userdata) { AWS_PRECONDITION(base_config); if (base_config->config_type != AWS_SIGNING_CONFIG_AWS) { return aws_raise_error(AWS_AUTH_SIGNING_MISMATCHED_CONFIGURATION); } const struct aws_signing_config_aws *config = (void *)base_config; struct aws_signing_state_aws *signing_state = aws_signing_state_new(allocator, config, signable, on_complete, userdata); if (!signing_state) { return AWS_OP_ERR; } if (signing_state->config.algorithm == AWS_SIGNING_ALGORITHM_V4_ASYMMETRIC) { if (signing_state->config.credentials != NULL && !aws_credentials_is_anonymous(signing_state->config.credentials)) { /* * If these are regular credentials, try to derive ecc-based ones */ if (aws_credentials_get_ecc_key_pair(signing_state->config.credentials) == NULL) { struct aws_credentials *ecc_credentials = aws_credentials_new_ecc_from_aws_credentials(allocator, signing_state->config.credentials); aws_credentials_release(signing_state->config.credentials); signing_state->config.credentials = ecc_credentials; if (signing_state->config.credentials == NULL) { goto on_error; } } } } bool can_sign_immediately = signing_state->config.credentials != NULL; if (can_sign_immediately) { s_perform_signing(signing_state); } else { if (aws_credentials_provider_get_credentials( signing_state->config.credentials_provider, s_aws_signing_on_get_credentials, signing_state)) { goto on_error; } } return AWS_OP_SUCCESS; on_error: aws_signing_state_destroy(signing_state); return AWS_OP_ERR; } aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/source/signing_config.c000066400000000000000000000162101456575232400246200ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include const struct aws_byte_cursor g_aws_signed_body_value_empty_sha256 = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"); const struct aws_byte_cursor g_aws_signed_body_value_unsigned_payload = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("UNSIGNED-PAYLOAD"); const struct aws_byte_cursor g_aws_signed_body_value_streaming_unsigned_payload_trailer = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("STREAMING-UNSIGNED-PAYLOAD-TRAILER"); const struct aws_byte_cursor g_aws_signed_body_value_streaming_aws4_hmac_sha256_payload = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("STREAMING-AWS4-HMAC-SHA256-PAYLOAD"); const struct aws_byte_cursor g_aws_signed_body_value_streaming_aws4_hmac_sha256_payload_trailer = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER"); const struct aws_byte_cursor g_aws_signed_body_value_streaming_aws4_ecdsa_p256_sha256_payload = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD"); const struct aws_byte_cursor g_aws_signed_body_value_streaming_aws4_ecdsa_p256_sha256_payload_trailer = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD-TRAILER"); const struct aws_byte_cursor g_aws_signed_body_value_streaming_aws4_hmac_sha256_events = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("STREAMING-AWS4-HMAC-SHA256-EVENTS"); const char *aws_signing_algorithm_to_string(enum aws_signing_algorithm algorithm) { switch (algorithm) { case AWS_SIGNING_ALGORITHM_V4: return "SigV4"; case AWS_SIGNING_ALGORITHM_V4_S3EXPRESS: return "SigV4S3Express"; case AWS_SIGNING_ALGORITHM_V4_ASYMMETRIC: return "SigV4Asymmetric"; default: break; } return "Unknown"; } int aws_validate_aws_signing_config_aws(const struct aws_signing_config_aws *config) { if (config == NULL) { AWS_LOGF_ERROR(AWS_LS_AUTH_SIGNING, "AWS signing config is null"); return aws_raise_error(AWS_AUTH_SIGNING_INVALID_CONFIGURATION); } if (config->signature_type == AWS_ST_HTTP_REQUEST_EVENT && config->algorithm != AWS_SIGNING_ALGORITHM_V4) { /* * Not supported yet. * * Need to determine if the Transcribe service supports Sigv4a and how to test it. * Transcribe's examples are insufficient. */ AWS_LOGF_ERROR(AWS_LS_AUTH_SIGNING, "(id=%p) Event signing is only supported for Sigv4 yet", (void *)config); return aws_raise_error(AWS_AUTH_SIGNING_INVALID_CONFIGURATION); } if (config->signature_type != AWS_ST_HTTP_REQUEST_HEADERS && config->signature_type != AWS_ST_HTTP_REQUEST_QUERY_PARAMS) { /* * If we're not signing the full request then it's critical that the credentials we're using are the same * credentials used on the original request. If we're using a provider to fetch credentials then that is * not guaranteed. For now, force users to always pass in credentials when signing events or chunks. * * The correct long-term solution would be to add a way to pass the credentials used in the initial * signing back to the user in the completion callback. Then the user could supply those credentials * to all subsequent chunk/event signings. The fact that we don't do that yet doesn't invalidate this check. */ if (config->credentials == NULL) { AWS_LOGF_ERROR( AWS_LS_AUTH_SIGNING, "(id=%p) Chunk/event signing config must contain explicit credentials", (void *)config); return aws_raise_error(AWS_AUTH_SIGNING_INVALID_CONFIGURATION); } } if (config->region.len == 0) { AWS_LOGF_ERROR(AWS_LS_AUTH_SIGNING, "(id=%p) Signing config is missing a region identifier", (void *)config); return aws_raise_error(AWS_AUTH_SIGNING_INVALID_CONFIGURATION); } if (config->service.len == 0) { AWS_LOGF_ERROR(AWS_LS_AUTH_SIGNING, "(id=%p) Signing config is missing a service identifier", (void *)config); return aws_raise_error(AWS_AUTH_SIGNING_INVALID_CONFIGURATION); } switch (config->algorithm) { case AWS_SIGNING_ALGORITHM_V4: if (config->credentials == NULL && config->credentials_provider == NULL) { AWS_LOGF_ERROR( AWS_LS_AUTH_SIGNING, "(id=%p) Sigv4 signing config is missing a credentials provider or credentials", (void *)config); return aws_raise_error(AWS_AUTH_SIGNING_INVALID_CONFIGURATION); } if (config->credentials != NULL && !aws_credentials_is_anonymous(config->credentials)) { if (aws_credentials_get_access_key_id(config->credentials).len == 0 || aws_credentials_get_secret_access_key(config->credentials).len == 0) { AWS_LOGF_ERROR( AWS_LS_AUTH_SIGNING, "(id=%p) Sigv4 signing configured with invalid credentials", (void *)config); return aws_raise_error(AWS_AUTH_SIGNING_INVALID_CREDENTIALS); } } break; case AWS_SIGNING_ALGORITHM_V4_S3EXPRESS: if (config->credentials == NULL && config->credentials_provider == NULL) { AWS_LOGF_ERROR( AWS_LS_AUTH_SIGNING, "(id=%p) Sigv4 S3 Express signing config is missing a credentials provider or credentials", (void *)config); return aws_raise_error(AWS_AUTH_SIGNING_INVALID_CONFIGURATION); } if (config->credentials != NULL) { if (aws_credentials_is_anonymous(config->credentials) || aws_credentials_get_access_key_id(config->credentials).len == 0 || aws_credentials_get_secret_access_key(config->credentials).len == 0 || aws_credentials_get_session_token(config->credentials).len == 0) { AWS_LOGF_ERROR( AWS_LS_AUTH_SIGNING, "(id=%p) Sigv4 S3 Express signing configured with invalid credentials", (void *)config); return aws_raise_error(AWS_AUTH_SIGNING_INVALID_CREDENTIALS); } } break; case AWS_SIGNING_ALGORITHM_V4_ASYMMETRIC: if (config->credentials == NULL && config->credentials_provider == NULL) { AWS_LOGF_ERROR( AWS_LS_AUTH_SIGNING, "(id=%p) Sigv4 asymmetric signing config is missing a credentials provider or credentials", (void *)config); return aws_raise_error(AWS_AUTH_SIGNING_INVALID_CONFIGURATION); } break; default: return aws_raise_error(AWS_AUTH_SIGNING_INVALID_CONFIGURATION); } return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/source/signing_result.c000066400000000000000000000154371456575232400247030ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #define INITIAL_SIGNING_RESULT_PROPERTIES_SIZE 10 #define INITIAL_SIGNING_RESULT_PROPERTY_LISTS_TABLE_SIZE 10 #define INITIAL_SIGNING_RESULT_PROPERTY_LIST_SIZE 10 static void s_aws_signing_result_property_clean_up(struct aws_signing_result_property *pair) { aws_string_destroy(pair->name); aws_string_destroy(pair->value); } static void s_aws_hash_callback_property_list_destroy(void *value) { struct aws_array_list *property_list = value; size_t property_count = aws_array_list_length(property_list); for (size_t i = 0; i < property_count; ++i) { struct aws_signing_result_property property; AWS_ZERO_STRUCT(property); if (aws_array_list_get_at(property_list, &property, i)) { continue; } s_aws_signing_result_property_clean_up(&property); } struct aws_allocator *allocator = property_list->alloc; aws_array_list_clean_up(property_list); aws_mem_release(allocator, property_list); } int aws_signing_result_init(struct aws_signing_result *result, struct aws_allocator *allocator) { AWS_ZERO_STRUCT(*result); result->allocator = allocator; if (aws_hash_table_init( &result->properties, allocator, INITIAL_SIGNING_RESULT_PROPERTIES_SIZE, aws_hash_string, aws_hash_callback_string_eq, aws_hash_callback_string_destroy, aws_hash_callback_string_destroy) || aws_hash_table_init( &result->property_lists, allocator, INITIAL_SIGNING_RESULT_PROPERTY_LISTS_TABLE_SIZE, aws_hash_string, aws_hash_callback_string_eq, aws_hash_callback_string_destroy, s_aws_hash_callback_property_list_destroy)) { goto on_error; } return AWS_OP_SUCCESS; on_error: aws_signing_result_clean_up(result); return AWS_OP_ERR; } void aws_signing_result_clean_up(struct aws_signing_result *result) { aws_hash_table_clean_up(&result->properties); aws_hash_table_clean_up(&result->property_lists); } int aws_signing_result_set_property( struct aws_signing_result *result, const struct aws_string *property_name, const struct aws_byte_cursor *property_value) { struct aws_string *name = NULL; struct aws_string *value = NULL; name = aws_string_new_from_string(result->allocator, property_name); value = aws_string_new_from_array(result->allocator, property_value->ptr, property_value->len); if (name == NULL || value == NULL) { goto on_error; } if (aws_hash_table_put(&result->properties, name, value, NULL)) { goto on_error; } return AWS_OP_SUCCESS; on_error: aws_string_destroy(name); aws_string_destroy(value); return AWS_OP_ERR; } int aws_signing_result_get_property( const struct aws_signing_result *result, const struct aws_string *property_name, struct aws_string **out_property_value) { struct aws_hash_element *element = NULL; aws_hash_table_find(&result->properties, property_name, &element); *out_property_value = NULL; if (element != NULL) { *out_property_value = element->value; } return AWS_OP_SUCCESS; } static struct aws_array_list *s_get_or_create_property_list( struct aws_signing_result *result, const struct aws_string *list_name) { struct aws_hash_element *element = NULL; aws_hash_table_find(&result->property_lists, list_name, &element); if (element != NULL) { return element->value; } struct aws_array_list *properties = aws_mem_acquire(result->allocator, sizeof(struct aws_array_list)); if (properties == NULL) { return NULL; } AWS_ZERO_STRUCT(*properties); struct aws_string *name_copy = aws_string_new_from_string(result->allocator, list_name); if (name_copy == NULL) { goto on_error; } if (aws_array_list_init_dynamic( properties, result->allocator, INITIAL_SIGNING_RESULT_PROPERTY_LIST_SIZE, sizeof(struct aws_signing_result_property))) { goto on_error; } if (aws_hash_table_put(&result->property_lists, name_copy, properties, NULL)) { goto on_error; } return properties; on_error: aws_string_destroy(name_copy); aws_array_list_clean_up(properties); aws_mem_release(result->allocator, properties); return NULL; } int aws_signing_result_append_property_list( struct aws_signing_result *result, const struct aws_string *list_name, const struct aws_byte_cursor *property_name, const struct aws_byte_cursor *property_value) { struct aws_array_list *properties = s_get_or_create_property_list(result, list_name); if (properties == NULL) { return AWS_OP_ERR; } struct aws_string *name = NULL; struct aws_string *value = NULL; name = aws_string_new_from_array(result->allocator, property_name->ptr, property_name->len); value = aws_string_new_from_array(result->allocator, property_value->ptr, property_value->len); struct aws_signing_result_property property; property.name = name; property.value = value; if (aws_array_list_push_back(properties, &property)) { goto on_error; } return AWS_OP_SUCCESS; on_error: aws_string_destroy(name); aws_string_destroy(value); return AWS_OP_ERR; } void aws_signing_result_get_property_list( const struct aws_signing_result *result, const struct aws_string *list_name, struct aws_array_list **out_list) { *out_list = NULL; struct aws_hash_element *element = NULL; aws_hash_table_find(&result->property_lists, list_name, &element); if (element != NULL) { *out_list = element->value; } } void aws_signing_result_get_property_value_in_property_list( const struct aws_signing_result *result, const struct aws_string *list_name, const struct aws_string *property_name, struct aws_string **out_value) { *out_value = NULL; struct aws_array_list *property_list = NULL; aws_signing_result_get_property_list(result, list_name, &property_list); if (property_list == NULL) { return; } size_t pair_count = aws_array_list_length(property_list); for (size_t i = 0; i < pair_count; ++i) { struct aws_signing_result_property pair; AWS_ZERO_STRUCT(pair); if (aws_array_list_get_at(property_list, &pair, i)) { continue; } if (pair.name == NULL) { continue; } if (aws_string_eq_ignore_case(property_name, pair.name)) { *out_value = pair.value; break; } } } aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/source/sigv4_http_request.c000066400000000000000000000122551456575232400255050ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include #if defined(_MSC_VER) # pragma warning(disable : 4204) #endif /* _MSC_VER */ #define DEFAULT_QUERY_PARAM_COUNT 10 /* * Uses the signing result to rebuild the request's URI. If the signing was not done via * query params, then this ends up doing nothing. */ static int s_build_request_uri( struct aws_allocator *allocator, struct aws_http_message *request, const struct aws_signing_result *signing_result) { /* first let's see if we need to do anything at all */ struct aws_array_list *result_param_list = NULL; aws_signing_result_get_property_list( signing_result, g_aws_http_query_params_property_list_name, &result_param_list); if (result_param_list == NULL) { return AWS_OP_SUCCESS; } /* * There are query params to apply. Use the following algorithm: * * (1) Take the old uri and parse it into a URI structure * (2) Make a new URI builder and add the old URI's components to it * (3) Add the signing query params to the builder * (4) Use the builder to make a new URI */ int result = AWS_OP_ERR; size_t signed_query_param_count = aws_array_list_length(result_param_list); struct aws_uri old_uri; AWS_ZERO_STRUCT(old_uri); struct aws_uri new_uri; AWS_ZERO_STRUCT(new_uri); struct aws_uri_builder_options new_uri_builder; AWS_ZERO_STRUCT(new_uri_builder); struct aws_array_list query_params; AWS_ZERO_STRUCT(query_params); struct aws_byte_cursor old_path; aws_http_message_get_request_path(request, &old_path); /* start with the old uri and parse it */ if (aws_uri_init_parse(&old_uri, allocator, &old_path)) { goto done; } /* pull out the old query params */ if (aws_array_list_init_dynamic( &query_params, allocator, DEFAULT_QUERY_PARAM_COUNT, sizeof(struct aws_uri_param))) { goto done; } if (aws_uri_query_string_params(&old_uri, &query_params)) { goto done; } /* initialize a builder for the new uri matching the old uri */ new_uri_builder.host_name = old_uri.host_name; new_uri_builder.path = old_uri.path; new_uri_builder.port = old_uri.port; new_uri_builder.scheme = old_uri.scheme; new_uri_builder.query_params = &query_params; /* and now add any signing query params */ for (size_t i = 0; i < signed_query_param_count; ++i) { struct aws_signing_result_property source_param; if (aws_array_list_get_at(result_param_list, &source_param, i)) { goto done; } struct aws_uri_param signed_param; signed_param.key = aws_byte_cursor_from_string(source_param.name); signed_param.value = aws_byte_cursor_from_string(source_param.value); aws_array_list_push_back(&query_params, &signed_param); } /* create the new uri */ if (aws_uri_init_from_builder_options(&new_uri, allocator, &new_uri_builder)) { goto done; } /* copy the full string */ struct aws_byte_cursor new_uri_cursor = aws_byte_cursor_from_buf(&new_uri.uri_str); if (aws_http_message_set_request_path(request, new_uri_cursor)) { goto done; } result = AWS_OP_SUCCESS; done: aws_array_list_clean_up(&query_params); aws_uri_clean_up(&new_uri); aws_uri_clean_up(&old_uri); return result; } /* * Takes a mutable http request and adds all the additional query params and/or headers generated by the * signing process. */ int aws_apply_signing_result_to_http_request( struct aws_http_message *request, struct aws_allocator *allocator, const struct aws_signing_result *result) { /* uri/query params */ if (s_build_request_uri(allocator, request, result)) { return AWS_OP_ERR; } /* headers */ size_t signing_header_count = 0; struct aws_array_list *result_header_list = NULL; aws_signing_result_get_property_list(result, g_aws_http_headers_property_list_name, &result_header_list); if (result_header_list != NULL) { signing_header_count = aws_array_list_length(result_header_list); } for (size_t i = 0; i < signing_header_count; ++i) { struct aws_signing_result_property source_header; AWS_ZERO_STRUCT(source_header); if (aws_array_list_get_at(result_header_list, &source_header, i)) { return AWS_OP_ERR; } if (source_header.name == NULL || source_header.value == NULL) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } struct aws_http_header dest_header = { .name = aws_byte_cursor_from_string(source_header.name), .value = aws_byte_cursor_from_string(source_header.value), }; aws_http_message_add_header(request, dest_header); } return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/source/sso_token_utils.c000066400000000000000000000133771456575232400250740ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #if defined(_MSC_VER) # pragma warning(disable : 4232) #endif /* _MSC_VER */ struct aws_string *aws_construct_sso_token_path(struct aws_allocator *allocator, const struct aws_string *input) { AWS_PRECONDITION(input); struct aws_string *sso_token_path_str = NULL; struct aws_string *home_directory = aws_get_home_directory(allocator); if (!home_directory) { return NULL; } struct aws_byte_cursor home_dir_cursor = aws_byte_cursor_from_string(home_directory); struct aws_byte_cursor input_cursor = aws_byte_cursor_from_string(input); struct aws_byte_cursor json_cursor = aws_byte_cursor_from_c_str(".json"); struct aws_byte_buf sso_token_path_buf; AWS_ZERO_STRUCT(sso_token_path_buf); struct aws_byte_buf sha1_buf; AWS_ZERO_STRUCT(sha1_buf); /* append home directory */ if (aws_byte_buf_init_copy_from_cursor(&sso_token_path_buf, allocator, home_dir_cursor)) { goto cleanup; } /* append sso cache directory */ struct aws_byte_cursor sso_cache_dir_cursor = aws_byte_cursor_from_c_str("/.aws/sso/cache/"); if (aws_byte_buf_append_dynamic(&sso_token_path_buf, &sso_cache_dir_cursor)) { goto cleanup; } /* append hex encoded sha1 of input */ if (aws_byte_buf_init(&sha1_buf, allocator, AWS_SHA1_LEN) || aws_sha1_compute(allocator, &input_cursor, &sha1_buf, 0)) { goto cleanup; } struct aws_byte_cursor sha1_cursor = aws_byte_cursor_from_buf(&sha1_buf); if (aws_hex_encode_append_dynamic(&sha1_cursor, &sso_token_path_buf)) { goto cleanup; } /* append .json */ if (aws_byte_buf_append_dynamic(&sso_token_path_buf, &json_cursor)) { goto cleanup; } /* use platform-specific directory separator. */ aws_normalize_directory_separator(&sso_token_path_buf); sso_token_path_str = aws_string_new_from_buf(allocator, &sso_token_path_buf); AWS_LOGF_INFO( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "successfully constructed token path: %s", aws_string_c_str(sso_token_path_str)); cleanup: aws_byte_buf_clean_up(&sso_token_path_buf); aws_byte_buf_clean_up(&sha1_buf); aws_string_destroy(home_directory); return sso_token_path_str; } void aws_sso_token_destroy(struct aws_sso_token *sso_token) { if (sso_token == NULL) { return; } aws_string_destroy(sso_token->access_token); aws_mem_release(sso_token->allocator, sso_token); } struct aws_sso_token *aws_sso_token_new_from_file(struct aws_allocator *allocator, const struct aws_string *file_path) { AWS_PRECONDITION(allocator); AWS_PRECONDITION(file_path); bool success = false; struct aws_sso_token *token = aws_mem_calloc(allocator, 1, sizeof(struct aws_sso_token)); token->allocator = allocator; struct aws_byte_buf file_contents_buf; AWS_ZERO_STRUCT(file_contents_buf); struct aws_json_value *document_root = NULL; if (aws_byte_buf_init_from_file(&file_contents_buf, allocator, aws_string_c_str(file_path))) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "sso token: failed to load token file %s", aws_string_c_str(file_path)); goto cleanup; } struct aws_byte_cursor document_cursor = aws_byte_cursor_from_buf(&file_contents_buf); document_root = aws_json_value_new_from_string(allocator, document_cursor); if (document_root == NULL) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "sso token: failed to parse sso token file %s", aws_string_c_str(file_path)); aws_raise_error(AWS_AUTH_SSO_TOKEN_INVALID); goto cleanup; } struct aws_byte_cursor access_token_cursor; struct aws_json_value *access_token = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("accessToken")); if (!aws_json_value_is_string(access_token) || aws_json_value_get_string(access_token, &access_token_cursor)) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "sso token: failed to parse accessToken from %s", aws_string_c_str(file_path)); aws_raise_error(AWS_AUTH_SSO_TOKEN_INVALID); goto cleanup; } struct aws_byte_cursor expires_at_cursor; struct aws_json_value *expires_at = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("expiresAt")); if (!aws_json_value_is_string(expires_at) || aws_json_value_get_string(expires_at, &expires_at_cursor)) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "sso token: failed to parse expiresAt from %s", aws_string_c_str(file_path)); aws_raise_error(AWS_AUTH_SSO_TOKEN_INVALID); goto cleanup; } struct aws_date_time expiration; if (aws_date_time_init_from_str_cursor(&expiration, &expires_at_cursor, AWS_DATE_FORMAT_ISO_8601)) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "sso token: expiresAt '" PRInSTR "' in %s is not a valid ISO-8601 date string", AWS_BYTE_CURSOR_PRI(expires_at_cursor), aws_string_c_str(file_path)); aws_raise_error(AWS_AUTH_SSO_TOKEN_INVALID); goto cleanup; } token->access_token = aws_string_new_from_cursor(allocator, &access_token_cursor); token->expiration = expiration; success = true; cleanup: aws_json_value_destroy(document_root); aws_byte_buf_clean_up(&file_contents_buf); if (!success) { aws_sso_token_destroy(token); token = NULL; } return token; } aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/source/token_provider_sso_profile.c000066400000000000000000000147101456575232400272760ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #ifdef _MSC_VER /* allow non-constant declared initializers. */ # pragma warning(disable : 4204) #endif /* * sso-token profile provider implementation */ struct aws_token_provider_profile_impl { struct aws_string *sso_token_file_path; aws_io_clock_fn *system_clock_fn; }; static int s_token_provider_profile_get_token( struct aws_credentials_provider *provider, aws_on_get_credentials_callback_fn callback, void *user_data) { struct aws_token_provider_profile_impl *impl = provider->impl; struct aws_sso_token *sso_token = NULL; struct aws_credentials *credentials = NULL; int result = AWS_OP_ERR; sso_token = aws_sso_token_new_from_file(provider->allocator, impl->sso_token_file_path); if (!sso_token) { AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) failed to get sso token from file", (void *)provider); goto done; } /* check token expiration. */ uint64_t now_ns = UINT64_MAX; if (impl->system_clock_fn(&now_ns) != AWS_OP_SUCCESS) { goto done; } if (aws_date_time_as_nanos(&sso_token->expiration) <= now_ns) { AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) cached sso token is expired.", (void *)provider); aws_raise_error(AWS_AUTH_SSO_TOKEN_EXPIRED); goto done; } credentials = aws_credentials_new_token( provider->allocator, aws_byte_cursor_from_string(sso_token->access_token), (uint64_t)aws_date_time_as_epoch_secs(&sso_token->expiration)); if (!credentials) { AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) Unable to construct credentials.", (void *)provider); goto done; } callback(credentials, AWS_OP_SUCCESS, user_data); result = AWS_OP_SUCCESS; done: aws_sso_token_destroy(sso_token); aws_credentials_release(credentials); return result; } static void s_token_provider_profile_destroy(struct aws_credentials_provider *provider) { struct aws_token_provider_profile_impl *impl = provider->impl; if (impl == NULL) { return; } aws_string_destroy(impl->sso_token_file_path); aws_credentials_provider_invoke_shutdown_callback(provider); aws_mem_release(provider->allocator, provider); } static struct aws_credentials_provider_vtable s_aws_token_provider_profile_vtable = { .get_credentials = s_token_provider_profile_get_token, .destroy = s_token_provider_profile_destroy, }; AWS_STRING_FROM_LITERAL(s_profile_sso_start_url_name, "sso_start_url"); static struct aws_string *s_construct_profile_sso_token_path( struct aws_allocator *allocator, const struct aws_token_provider_sso_profile_options *options) { struct aws_profile_collection *config_collection = NULL; struct aws_string *profile_name = NULL; struct aws_string *sso_token_path = NULL; profile_name = aws_get_profile_name(allocator, &options->profile_name_override); if (!profile_name) { AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "token-provider-sso-profile: failed to resolve profile name"); aws_raise_error(AWS_AUTH_SSO_TOKEN_PROVIDER_SOURCE_FAILURE); goto cleanup; } if (options->config_file_cached) { /* Use cached config file */ config_collection = aws_profile_collection_acquire(options->config_file_cached); } else { /* load config file */ config_collection = aws_load_profile_collection_from_config_file(allocator, options->config_file_name_override); } if (!config_collection) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "token-provider-sso-profile: could not load or parse" " a config file."); aws_raise_error(AWS_AUTH_SSO_TOKEN_PROVIDER_SOURCE_FAILURE); goto cleanup; } const struct aws_profile *profile = aws_profile_collection_get_profile(config_collection, profile_name); if (!profile) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "token-provider-sso-profile: could not load" " a profile at %s.", aws_string_c_str(profile_name)); aws_raise_error(AWS_AUTH_SSO_TOKEN_PROVIDER_SOURCE_FAILURE); goto cleanup; } const struct aws_profile_property *sso_start_url_property = aws_profile_get_property(profile, s_profile_sso_start_url_name); if (!sso_start_url_property) { AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "token-provider-sso-profile: failed to find sso_start_url"); aws_raise_error(AWS_AUTH_SSO_TOKEN_PROVIDER_SOURCE_FAILURE); goto cleanup; } sso_token_path = aws_construct_sso_token_path(allocator, aws_profile_property_get_value(sso_start_url_property)); if (!sso_token_path) { AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "token-provider-sso-profile: failed to construct token path"); goto cleanup; } cleanup: aws_string_destroy(profile_name); aws_profile_collection_release(config_collection); return sso_token_path; } struct aws_credentials_provider *aws_token_provider_new_sso_profile( struct aws_allocator *allocator, const struct aws_token_provider_sso_profile_options *options) { struct aws_string *token_path = s_construct_profile_sso_token_path(allocator, options); if (!token_path) { return NULL; } struct aws_credentials_provider *provider = NULL; struct aws_token_provider_profile_impl *impl = NULL; aws_mem_acquire_many( allocator, 2, &provider, sizeof(struct aws_credentials_provider), &impl, sizeof(struct aws_token_provider_profile_impl)); AWS_ZERO_STRUCT(*provider); AWS_ZERO_STRUCT(*impl); aws_credentials_provider_init_base(provider, allocator, &s_aws_token_provider_profile_vtable, impl); impl->sso_token_file_path = aws_string_new_from_string(allocator, token_path); provider->shutdown_options = options->shutdown_options; if (options->system_clock_fn) { impl->system_clock_fn = options->system_clock_fn; } else { impl->system_clock_fn = aws_sys_clock_get_ticks; } aws_string_destroy(token_path); return provider; } aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/source/token_provider_sso_session.c000066400000000000000000000226451456575232400273270ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #ifdef _MSC_VER /* allow non-constant declared initializers. */ # pragma warning(disable : 4204) #endif /* * sso-session token provider implementation */ struct aws_token_provider_sso_session_impl { struct aws_string *sso_token_file_path; aws_io_clock_fn *system_clock_fn; }; static int s_token_provider_sso_session_get_token( struct aws_credentials_provider *provider, aws_on_get_credentials_callback_fn callback, void *user_data) { struct aws_token_provider_sso_session_impl *impl = provider->impl; struct aws_sso_token *sso_token = NULL; struct aws_credentials *credentials = NULL; int result = AWS_OP_ERR; sso_token = aws_sso_token_new_from_file(provider->allocator, impl->sso_token_file_path); if (!sso_token) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) failed to get sso token from file.", (void *)provider); goto done; } /* check token expiration. */ uint64_t now_ns = UINT64_MAX; if (impl->system_clock_fn(&now_ns) != AWS_OP_SUCCESS) { goto done; } if (aws_date_time_as_nanos(&sso_token->expiration) <= now_ns) { AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) cached sso token is expired.", (void *)provider); aws_raise_error(AWS_AUTH_SSO_TOKEN_EXPIRED); goto done; } /* TODO: Refresh token if it is within refresh window and refreshable */ credentials = aws_credentials_new_token( provider->allocator, aws_byte_cursor_from_string(sso_token->access_token), (uint64_t)aws_date_time_as_epoch_secs(&sso_token->expiration)); if (!credentials) { AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p) Unable to construct credentials.", (void *)provider); goto done; } callback(credentials, AWS_OP_SUCCESS, user_data); result = AWS_OP_SUCCESS; done: aws_sso_token_destroy(sso_token); aws_credentials_release(credentials); return result; } static void s_token_provider_sso_session_destroy(struct aws_credentials_provider *provider) { struct aws_token_provider_sso_session_impl *impl = provider->impl; if (impl == NULL) { return; } aws_string_destroy(impl->sso_token_file_path); aws_credentials_provider_invoke_shutdown_callback(provider); aws_mem_release(provider->allocator, provider); } static struct aws_credentials_provider_vtable s_aws_token_provider_sso_session_vtable = { .get_credentials = s_token_provider_sso_session_get_token, .destroy = s_token_provider_sso_session_destroy, }; AWS_STRING_FROM_LITERAL(s_sso_session_name, "sso_session"); AWS_STRING_FROM_LITERAL(s_sso_region_name, "sso_region"); AWS_STRING_FROM_LITERAL(s_sso_start_url_name, "sso_start_url"); /** * Parses the config file to validate and construct a token path. A valid profile with sso session is as follow * [profile sso-profile] * sso_session = dev * sso_account_id = 012345678901 * sso_role_name = SampleRole * * [sso-session dev] * sso_region = us-east-1 * sso_start_url = https://d-abc123.awsapps.com/start */ static struct aws_string *s_verify_config_and_construct_sso_token_path( struct aws_allocator *allocator, const struct aws_token_provider_sso_session_options *options) { struct aws_profile_collection *config_collection = NULL; struct aws_string *profile_name = NULL; struct aws_string *sso_token_path = NULL; profile_name = aws_get_profile_name(allocator, &options->profile_name_override); if (!profile_name) { AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "sso-session: token provider failed to resolve profile name"); goto cleanup; } if (options->config_file_cached) { /* Use cached config file */ config_collection = aws_profile_collection_acquire(options->config_file_cached); } else { /* load config file */ config_collection = aws_load_profile_collection_from_config_file(allocator, options->config_file_name_override); } if (!config_collection) { goto cleanup; } const struct aws_profile *profile = aws_profile_collection_get_profile(config_collection, profile_name); if (!profile) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "sso-session: token provider could not load" " a profile at %s.", aws_string_c_str(profile_name)); goto cleanup; } const struct aws_profile_property *sso_session_property = aws_profile_get_property(profile, s_sso_session_name); if (!sso_session_property) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "token-provider-sso-session: token provider could not find an sso-session at profile %s", aws_string_c_str(profile_name)); aws_raise_error(AWS_AUTH_SSO_TOKEN_PROVIDER_SOURCE_FAILURE); goto cleanup; } const struct aws_string *sso_session_name = aws_profile_property_get_value(sso_session_property); /* parse sso_session */ const struct aws_profile *session_profile = aws_profile_collection_get_section(config_collection, AWS_PROFILE_SECTION_TYPE_SSO_SESSION, sso_session_name); if (!session_profile) { AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "token-provider-sso-session: failed to find an sso-session"); aws_raise_error(AWS_AUTH_SSO_TOKEN_PROVIDER_SOURCE_FAILURE); goto cleanup; } const struct aws_profile_property *sso_region_property = aws_profile_get_property(session_profile, s_sso_region_name); const struct aws_profile_property *sso_start_url_property = aws_profile_get_property(session_profile, s_sso_start_url_name); if (!sso_region_property) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "token-provider-sso-session: failed to find sso_region in sso-session"); aws_raise_error(AWS_AUTH_SSO_TOKEN_PROVIDER_SOURCE_FAILURE); goto cleanup; } if (!sso_start_url_property) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "token-provider-sso-session: failed to find sso_start_url in sso-session"); aws_raise_error(AWS_AUTH_SSO_TOKEN_PROVIDER_SOURCE_FAILURE); goto cleanup; } /* Verify sso_region & start_url are the same in profile section if they exist */ const struct aws_string *sso_region = aws_profile_property_get_value(sso_region_property); const struct aws_string *sso_start_url = aws_profile_property_get_value(sso_start_url_property); const struct aws_profile_property *profile_sso_region_property = aws_profile_get_property(profile, s_sso_region_name); const struct aws_profile_property *profile_sso_start_url_property = aws_profile_get_property(profile, s_sso_start_url_name); if (profile_sso_region_property && !aws_string_eq(sso_region, aws_profile_property_get_value(profile_sso_region_property))) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "token-provider-sso-session: profile & sso-session have different value for sso_region"); aws_raise_error(AWS_AUTH_SSO_TOKEN_PROVIDER_SOURCE_FAILURE); goto cleanup; } if (profile_sso_start_url_property && !aws_string_eq(sso_start_url, aws_profile_property_get_value(profile_sso_start_url_property))) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "token-provider-sso-session: profile & sso-session have different value for sso_start_url"); aws_raise_error(AWS_AUTH_SSO_TOKEN_PROVIDER_SOURCE_FAILURE); goto cleanup; } sso_token_path = aws_construct_sso_token_path(allocator, sso_session_name); cleanup: aws_string_destroy(profile_name); aws_profile_collection_release(config_collection); return sso_token_path; } struct aws_credentials_provider *aws_token_provider_new_sso_session( struct aws_allocator *allocator, const struct aws_token_provider_sso_session_options *options) { /* Currently, they are not used but they will be required when we implement the refresh token functionality. */ AWS_ASSERT(options->bootstrap); AWS_ASSERT(options->tls_ctx); struct aws_string *token_path = s_verify_config_and_construct_sso_token_path(allocator, options); if (!token_path) { return NULL; } struct aws_credentials_provider *provider = NULL; struct aws_token_provider_sso_session_impl *impl = NULL; aws_mem_acquire_many( allocator, 2, &provider, sizeof(struct aws_credentials_provider), &impl, sizeof(struct aws_token_provider_sso_session_impl)); AWS_ZERO_STRUCT(*provider); AWS_ZERO_STRUCT(*impl); aws_credentials_provider_init_base(provider, allocator, &s_aws_token_provider_sso_session_vtable, impl); impl->sso_token_file_path = aws_string_new_from_string(allocator, token_path); provider->shutdown_options = options->shutdown_options; if (options->system_clock_fn) { impl->system_clock_fn = options->system_clock_fn; } else { impl->system_clock_fn = aws_sys_clock_get_ticks; } aws_string_destroy(token_path); return provider; } aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/000077500000000000000000000000001456575232400213335ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/CMakeLists.txt000066400000000000000000000353711456575232400241040ustar00rootroot00000000000000include(AwsLibFuzzer) include(AwsTestHarness) enable_testing() file(GLOB TEST_SRC "*.c") file(GLOB TEST_HDRS "*.h") file(GLOB TESTS ${TEST_HDRS} ${TEST_SRC}) add_test_case(credentials_create_destroy_test) add_test_case(anonymous_credentials_create_destroy_test) add_test_case(static_credentials_provider_basic_test) add_test_case(anonymous_credentials_provider_basic_test) add_test_case(environment_credentials_provider_basic_test) add_test_case(environment_credentials_provider_empty_env_test) add_test_case(environment_credentials_provider_negative_test) add_test_case(cached_credentials_provider_elapsed_test) add_test_case(cached_credentials_provider_expired_test) add_test_case(cached_credentials_provider_queued_async_test) add_test_case(profile_credentials_provider_new_destroy_defaults_test) add_test_case(profile_credentials_provider_cached_test) add_test_case(profile_credentials_provider_default_test) add_test_case(profile_credentials_provider_nondefault_test) add_test_case(profile_credentials_provider_environment_test) add_test_case(credentials_provider_first_in_chain_test) add_test_case(credentials_provider_second_in_chain_test) add_test_case(credentials_provider_null_chain_test) add_net_test_case(credentials_provider_default_basic_test) add_net_test_case(credentials_provider_default_manual_tls_test) add_net_test_case(credentials_provider_default_chain_disable_environment_test) add_test_case(credentials_provider_imds_new_destroy) add_test_case(credentials_provider_imds_connect_failure) add_test_case(credentials_provider_imds_token_request_failure) add_test_case(credentials_provider_imds_role_name_request_failure) add_test_case(credentials_provider_imds_role_request_failure) add_test_case(credentials_provider_imds_bad_document_failure) add_test_case(credentials_provider_imds_secure_success) add_test_case(credentials_provider_imds_connection_closed_success) add_test_case(credentials_provider_imds_insecure_success) add_test_case(credentials_provider_imds_insecure_then_secure_success) add_test_case(credentials_provider_imds_success_multi_part_role_name) add_test_case(credentials_provider_imds_success_multi_part_doc) add_test_case(credentials_provider_imds_real_new_destroy) if(AWS_BUILDING_ON_EC2) add_test_case(credentials_provider_imds_real_success) endif() add_test_case(credentials_provider_ecs_new_destroy) add_test_case(credentials_provider_ecs_connect_failure) add_test_case(credentials_provider_ecs_request_failure) add_test_case(credentials_provider_ecs_bad_document_failure) add_test_case(credentials_provider_ecs_basic_success) add_test_case(credentials_provider_ecs_no_auth_token_success) add_test_case(credentials_provider_ecs_success_multi_part_doc) add_test_case(credentials_provider_ecs_real_new_destroy) if(AWS_BUILDING_ON_ECS) add_test_case(credentials_provider_ecs_real_success) endif() add_test_case(credentials_provider_x509_new_destroy) add_test_case(credentials_provider_x509_connect_failure) add_test_case(credentials_provider_x509_request_failure) add_test_case(credentials_provider_x509_bad_document_failure) add_test_case(credentials_provider_x509_basic_success) add_test_case(credentials_provider_x509_success_multi_part_doc) add_test_case(credentials_provider_x509_real_new_destroy) add_net_test_case(credentials_provider_sts_web_identity_new_destroy_from_parameters) add_net_test_case(credentials_provider_sts_web_identity_new_destroy_from_env) add_net_test_case(credentials_provider_sts_web_identity_new_destroy_from_config) add_net_test_case(credentials_provider_sts_web_identity_new_destroy_from_cached_config) add_net_test_case(credentials_provider_sts_web_identity_new_failed_without_env_and_config) add_net_test_case(credentials_provider_sts_web_identity_connect_failure) add_net_test_case(credentials_provider_sts_web_identity_request_failure) add_net_test_case(credentials_provider_sts_web_identity_bad_document_failure) add_net_test_case(credentials_provider_sts_web_identity_test_retry_error1) add_net_test_case(credentials_provider_sts_web_identity_test_retry_error2) add_net_test_case(credentials_provider_sts_web_identity_basic_success_env) add_net_test_case(credentials_provider_sts_web_identity_basic_success_config) add_net_test_case(credentials_provider_sts_web_identity_success_multi_part_doc) add_net_test_case(credentials_provider_sts_web_identity_real_new_destroy) add_net_test_case(credentials_provider_sts_direct_config_succeeds) add_net_test_case(credentials_provider_sts_direct_config_succeeds_after_retry) add_net_test_case(credentials_provider_sts_direct_config_invalid_doc) add_net_test_case(credentials_provider_sts_direct_config_connection_failed) add_net_test_case(credentials_provider_sts_direct_config_service_fails) add_net_test_case(credentials_provider_sts_from_profile_config_succeeds) add_net_test_case(credentials_provider_sts_from_profile_config_with_chain) add_net_test_case(credentials_provider_sts_from_profile_config_with_chain_and_profile_creds) add_net_test_case(credentials_provider_sts_from_profile_config_with_chain_and_partial_profile_creds) add_net_test_case(credentials_provider_sts_from_self_referencing_profile) add_net_test_case(credentials_provider_sts_from_profile_config_with_chain_cycle) add_net_test_case(credentials_provider_sts_from_profile_config_with_chain_cycle_and_profile_creds) add_net_test_case(credentials_provider_sts_from_profile_config_manual_tls_succeeds) add_net_test_case(credentials_provider_sts_from_profile_config_environment_succeeds) add_net_test_case(credentials_provider_sts_cache_expiration_conflict) add_test_case(credentials_provider_process_new_destroy_from_config) add_test_case(credentials_provider_process_new_destroy_from_config_without_token) add_test_case(credentials_provider_process_new_failed) add_test_case(credentials_provider_process_bad_command) add_test_case(credentials_provider_process_incorrect_command_output) add_test_case(credentials_provider_process_basic_success) add_test_case(credentials_provider_process_basic_success_cached) add_net_test_case(credentials_provider_cognito_new_destroy) add_net_test_case(credentials_provider_cognito_failure_connect) add_net_test_case(credentials_provider_cognito_failure_request) add_net_test_case(credentials_provider_cognito_failure_bad_document) add_net_test_case(credentials_provider_cognito_success) add_net_test_case(credentials_provider_cognito_success_after_retry) if(AWS_HAS_CI_ENVIRONMENT) add_net_test_case(credentials_provider_cognito_success_unauthenticated) endif() add_test_case(sso_token_provider_profile_invalid_profile_test) add_test_case(sso_token_provider_profile_valid_profile_test) add_net_test_case(sso_token_provider_sso_session_invalid_config_test) add_net_test_case(sso_token_provider_sso_session_valid_config_test) add_net_test_case(sso_token_provider_sso_session_basic_success) add_net_test_case(sso_token_provider_sso_session_config_file_cached) add_net_test_case(sso_token_provider_sso_session_expired_token) add_test_case(sso_token_provider_profile_basic_success) add_test_case(sso_token_provider_profile_cached_config_file) add_test_case(sso_token_provider_profile_expired_token) add_test_case(parse_token_location_url_test) add_test_case(parse_token_location_session_test) add_test_case(parse_sso_token_valid) add_test_case(parse_sso_token_invalid) add_test_case(parse_sso_token_invalid_missing_access_token) add_test_case(parse_sso_token_missing_expires_at) add_test_case(parse_sso_token_invalid_expires_at) add_net_test_case(credentials_provider_sso_failed_invalid_config) add_net_test_case(credentials_provider_sso_create_destroy_valid_config) add_net_test_case(credentials_provider_sso_connect_failure) add_net_test_case(credentials_provider_sso_failure_token_missing) add_net_test_case(credentials_provider_sso_failure_token_expired) add_net_test_case(credentials_provider_sso_failure_token_empty) add_net_test_case(credentials_provider_sso_request_failure) add_net_test_case(credentials_provider_sso_bad_response) add_net_test_case(credentials_provider_sso_retryable_error) add_net_test_case(credentials_provider_sso_basic_success) add_net_test_case(credentials_provider_sso_basic_success_cached_config_file) add_net_test_case(credentials_provider_sso_basic_success_profile) add_net_test_case(credentials_provider_sso_basic_success_profile_cached_config_file) add_net_test_case(credentials_provider_sso_basic_success_after_failure) add_test_case(imds_client_new_release) add_test_case(imds_client_connect_failure) add_test_case(imds_client_token_request_failure) add_test_case(imds_client_insecure_fallback_request_failure) add_test_case(imds_client_v1_fallback_disabled_failure) add_test_case(imds_client_resource_request_failure) add_test_case(imds_client_resource_request_success) add_test_case(imds_client_insecure_resource_request_success) add_test_case(imds_client_insecure_then_secure_resource_request_success) add_test_case(imds_client_multiple_resource_requests_random_responses_finally_all_success) add_test_case(imds_client_get_ami_id_success) add_test_case(imds_client_get_ancestor_ami_ids_success) add_test_case(imds_client_get_iam_profile_success) add_test_case(imds_client_get_instance_info_success) add_test_case(imds_client_get_credentials_success) add_test_case(imds_client_cache_token_refresh) if(AWS_BUILDING_ON_EC2) add_test_case(imds_client_real_success) endif() add_test_case(config_file_path_override_test) add_test_case(config_file_path_environment_test) add_test_case(credentials_file_path_override_test) add_test_case(credentials_file_path_environment_test) add_test_case(profile_override_test) add_test_case(profile_environment_test) add_test_case(sigv4_skip_xray_header_test) add_test_case(sigv4_skip_user_agent_header_test) add_test_case(sigv4_skip_custom_header_test) add_test_case(sigv4_fail_date_header_test) add_test_case(sigv4_fail_content_header_test) add_test_case(sigv4_fail_authorization_header_test) add_test_case(sigv4_fail_signature_param_test) add_test_case(sigv4_fail_date_param_test) add_test_case(sigv4_fail_credential_param_test) add_test_case(sigv4_fail_algorithm_param_test) add_test_case(sigv4_fail_signed_headers_param_test) add_test_case(signer_null_credentials_test) add_test_case(signer_anonymous_credentials_test) add_test_case(signer_anonymous_credentials_provider_test) add_test_case(sigv4_chunked_signing_test) add_test_case(sigv4_event_signing_test) add_test_case(sigv4a_chunked_signing_test) add_test_case(sigv4_trailing_headers_signing_test) add_test_case(sigv4a_trailing_headers_signing_test) add_test_case(credentials_derive_ecc_key_fixed) add_test_case(credentials_new_ecc_fixed) add_test_case(credentials_derive_ecc_key_long_access_key) add_test_case(be_sequence_add_one) add_test_case(be_sequence_compare) add_test_case(sigv4a_get_header_key_duplicate_test) add_test_case(sigv4a_get_header_value_multiline_test) add_test_case(sigv4a_get_header_value_order_test) add_test_case(sigv4a_get_header_value_trim_test) add_test_case(sigv4a_get_unreserved_test) add_test_case(sigv4a_get_utf8_test) add_test_case(sigv4a_get_vanilla_test) add_test_case(sigv4a_get_vanilla_with_session_token_test) add_test_case(sigv4a_get_vanilla_empty_query_key_test) add_test_case(sigv4a_get_vanilla_query_test) add_test_case(sigv4a_get_vanilla_query_order_key_case_test) add_test_case(sigv4a_get_vanilla_query_order_encoded_test) add_test_case(sigv4a_get_vanilla_unreserved_test) add_test_case(sigv4a_get_vanilla_utf8_query_test) add_test_case(sigv4a_post_header_key_case_test) add_test_case(sigv4a_post_header_key_sort_test) add_test_case(sigv4a_post_header_value_case_test) add_test_case(sigv4a_post_vanilla_test) add_test_case(sigv4a_post_vanilla_empty_query_value_test) add_test_case(sigv4a_post_vanilla_query_test) add_test_case(sigv4a_post_x_www_form_urlencoded_test) add_test_case(sigv4a_post_x_www_form_urlencoded_parameters_test) add_test_case(sigv4a_post_sts_header_after_test) add_test_case(sigv4a_post_sts_header_before_test) add_test_case(sigv4a_get_relative_normalized_test) add_test_case(sigv4a_get_relative_unnormalized_test) add_test_case(sigv4a_get_relative_relative_normalized_test) add_test_case(sigv4a_get_relative_relative_unnormalized_test) add_test_case(sigv4a_get_slash_normalized_test) add_test_case(sigv4a_get_slash_unnormalized_test) add_test_case(sigv4a_get_slash_dot_slash_normalized_test) add_test_case(sigv4a_get_slash_dot_slash_unnormalized_test) add_test_case(sigv4a_get_slash_pointless_dot_normalized_test) add_test_case(sigv4a_get_slash_pointless_dot_unnormalized_test) add_test_case(sigv4a_get_slashes_normalized_test) add_test_case(sigv4a_get_slashes_unnormalized_test) add_test_case(sigv4a_get_space_normalized_test) add_test_case(sigv4a_get_space_unnormalized_test) add_test_case(sigv4_get_header_key_duplicate_test) add_test_case(sigv4_get_header_value_multiline_test) add_test_case(sigv4_get_header_value_order_test) add_test_case(sigv4_get_header_value_trim_test) add_test_case(sigv4_get_unreserved_test) add_test_case(sigv4_get_utf8_test) add_test_case(sigv4_get_vanilla_test) add_test_case(sigv4_get_vanilla_with_session_token_test) add_test_case(sigv4_get_vanilla_empty_query_key_test) add_test_case(sigv4_get_vanilla_query_test) add_test_case(sigv4_get_vanilla_query_order_key_case_test) add_test_case(sigv4_get_vanilla_query_order_encoded_test) add_test_case(sigv4_get_vanilla_unreserved_test) add_test_case(sigv4_get_vanilla_utf8_query_test) add_test_case(sigv4_post_header_key_case_test) add_test_case(sigv4_post_header_key_sort_test) add_test_case(sigv4_post_header_value_case_test) add_test_case(sigv4_post_vanilla_test) add_test_case(sigv4_post_vanilla_empty_query_value_test) add_test_case(sigv4_post_vanilla_query_test) add_test_case(sigv4_post_x_www_form_urlencoded_test) add_test_case(sigv4_post_x_www_form_urlencoded_parameters_test) add_test_case(sigv4_post_sts_header_after_test) add_test_case(sigv4_post_sts_header_before_test) add_test_case(sigv4_get_relative_normalized_test) add_test_case(sigv4_get_relative_unnormalized_test) add_test_case(sigv4_get_relative_relative_normalized_test) add_test_case(sigv4_get_relative_relative_unnormalized_test) add_test_case(sigv4_get_slash_normalized_test) add_test_case(sigv4_get_slash_unnormalized_test) add_test_case(sigv4_get_slash_dot_slash_normalized_test) add_test_case(sigv4_get_slash_dot_slash_unnormalized_test) add_test_case(sigv4_get_slash_pointless_dot_normalized_test) add_test_case(sigv4_get_slash_pointless_dot_unnormalized_test) add_test_case(sigv4_get_slashes_normalized_test) add_test_case(sigv4_get_slashes_unnormalized_test) add_test_case(sigv4_get_space_normalized_test) add_test_case(sigv4_get_space_unnormalized_test) set(TEST_BINARY_NAME ${PROJECT_NAME}-tests) generate_test_driver(${TEST_BINARY_NAME}) # sigv4 test suite files add_custom_command(TARGET ${TEST_BINARY_NAME} PRE_BUILD COMMAND ${CMAKE_COMMAND} -E copy_directory ${CMAKE_CURRENT_SOURCE_DIR}/aws-signing-test-suite $) file(GLOB FUZZ_TESTS "fuzz/*.c") aws_add_fuzz_tests("${FUZZ_TESTS}" "" "${CMAKE_CURRENT_SOURCE_DIR}/fuzz/corpus") aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/000077500000000000000000000000001456575232400256655ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/000077500000000000000000000000001456575232400262165ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-header-key-duplicate/000077500000000000000000000000001456575232400327615ustar00rootroot00000000000000context.json000066400000000000000000000004761456575232400352700ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-header-key-duplicate{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000002721456575232400403070ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-header-key-duplicateGET / host:example.amazonaws.com my-header1:value2,value2,value1 x-amz-date:20150830T123600Z host;my-header1;x-amz-date e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000001001456575232400366610ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-header-key-duplicatec9d5ea9f3f72853aea855b47ea873832890dbdd183b4468f858259531a5138eaheader-signed-request.txt000066400000000000000000000005211456575232400376260ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-header-key-duplicateGET / HTTP/1.1 Host:example.amazonaws.com My-Header1:value2 My-Header1:value2 My-Header1:value1 X-Amz-Date:20150830T123600Z Authorization:AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/service/aws4_request, SignedHeaders=host;my-header1;x-amz-date, Signature=c9d5ea9f3f72853aea855b47ea873832890dbdd183b4468f858259531a5138ea header-string-to-sign.txt000066400000000000000000000002121456575232400375500ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-header-key-duplicateAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request dc7f04a3abfde8d472b0ab1a418b741b7c67174dad1551b4117b15527fbe966cquery-canonical-request.txt000066400000000000000000000005251456575232400402250ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-header-key-duplicateGET / X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host%3Bmy-header1 host:example.amazonaws.com my-header1:value2,value2,value1 host;my-header1 e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000001001456575232400365760ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-header-key-duplicate3349ee0b81b4b589da0ff28a395c3591e04de515651dd74f298fa992d1507a97query-signed-request.txt000066400000000000000000000005651456575232400375530ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-header-key-duplicateGET /?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host%3Bmy-header1&X-Amz-Expires=3600&X-Amz-Signature=3349ee0b81b4b589da0ff28a395c3591e04de515651dd74f298fa992d1507a97 HTTP/1.1 Host:example.amazonaws.com My-Header1:value2 My-Header1:value2 My-Header1:value1 query-string-to-sign.txt000066400000000000000000000002121456575232400374650ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-header-key-duplicateAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request fe8b58fb44117d598520befc07c144a5699c661a8db78f9ce4caee1655dec813request.txt000066400000000000000000000001401456575232400351260ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-header-key-duplicateGET / HTTP/1.1 Host:example.amazonaws.com My-Header1:value2 My-Header1:value2 My-Header1:value1 get-header-value-multiline/000077500000000000000000000000001456575232400332565ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4context.json000066400000000000000000000004761456575232400356440ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-header-value-multiline{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000002721456575232400406630ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-header-value-multilineGET / host:example.amazonaws.com my-header1:value1 value2 value3 x-amz-date:20150830T123600Z host;my-header1;x-amz-date e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000001001456575232400372350ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-header-value-multilinecfd34249e4b1c8d6b91ef74165d41a32e5fab3306300901bb65a51a73575eefdheader-signed-request.txt000066400000000000000000000005021456575232400402010ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-header-value-multilineGET / HTTP/1.1 Host:example.amazonaws.com My-Header1:value1 value2 value3 X-Amz-Date:20150830T123600Z Authorization:AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/service/aws4_request, SignedHeaders=host;my-header1;x-amz-date, Signature=cfd34249e4b1c8d6b91ef74165d41a32e5fab3306300901bb65a51a73575eefd header-string-to-sign.txt000066400000000000000000000002121456575232400401240ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-header-value-multilineAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request e99419459a677bc11de234014be3c4e72c1ea5b454ceb58b613061f5d7a162e8query-canonical-request.txt000066400000000000000000000005251456575232400406010ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-header-value-multilineGET / X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host%3Bmy-header1 host:example.amazonaws.com my-header1:value1 value2 value3 host;my-header1 e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000001001456575232400371520ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-header-value-multilinee6f5def831211aca02987a44b96826706278c7bc078112ae0263659c5b2f2d56query-signed-request.txt000066400000000000000000000005461456575232400401260ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-header-value-multilineGET /?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host%3Bmy-header1&X-Amz-Expires=3600&X-Amz-Signature=e6f5def831211aca02987a44b96826706278c7bc078112ae0263659c5b2f2d56 HTTP/1.1 Host:example.amazonaws.com My-Header1:value1 value2 value3 query-string-to-sign.txt000066400000000000000000000002121456575232400400410ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-header-value-multilineAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request 0e73c10e35324b4d215da4bb70be61d13a3d30d569be4ed6e8fd8948965341carequest.txt000066400000000000000000000001211456575232400355010ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-header-value-multilineGET / HTTP/1.1 Host:example.amazonaws.com My-Header1:value1 value2 value3 aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-header-value-order/000077500000000000000000000000001456575232400324465ustar00rootroot00000000000000context.json000066400000000000000000000004761456575232400347550ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-header-value-order{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000003011456575232400377650ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-header-value-orderGET / host:example.amazonaws.com my-header1:value4,value1,value3,value2 x-amz-date:20150830T123600Z host;my-header1;x-amz-date e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000001001456575232400363460ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-header-value-order08c7e5a9acfcfeb3ab6b2185e75ce8b1deb5e634ec47601a50643f830c755c01header-signed-request.txt000066400000000000000000000005431456575232400373170ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-header-value-orderGET / HTTP/1.1 Host:example.amazonaws.com My-Header1:value4 My-Header1:value1 My-Header1:value3 My-Header1:value2 X-Amz-Date:20150830T123600Z Authorization:AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/service/aws4_request, SignedHeaders=host;my-header1;x-amz-date, Signature=08c7e5a9acfcfeb3ab6b2185e75ce8b1deb5e634ec47601a50643f830c755c01 header-string-to-sign.txt000066400000000000000000000002121456575232400372350ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-header-value-orderAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request 31ce73cd3f3d9f66977ad3dd957dc47af14df92fcd8509f59b349e9137c58b86query-canonical-request.txt000066400000000000000000000005341456575232400377120ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-header-value-orderGET / X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host%3Bmy-header1 host:example.amazonaws.com my-header1:value4,value1,value3,value2 host;my-header1 e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000001001456575232400362630ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-header-value-order313720e71ca6202fdcfa9b20f88de01a4eb0638a83c833b1c184359a4eda864equery-signed-request.txt000066400000000000000000000006071456575232400372350ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-header-value-orderGET /?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host%3Bmy-header1&X-Amz-Expires=3600&X-Amz-Signature=313720e71ca6202fdcfa9b20f88de01a4eb0638a83c833b1c184359a4eda864e HTTP/1.1 Host:example.amazonaws.com My-Header1:value4 My-Header1:value1 My-Header1:value3 My-Header1:value2 query-string-to-sign.txt000066400000000000000000000002121456575232400371520ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-header-value-orderAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request f4d3f13084ba7664111670ce26458291d3e0c620acd9384f8cd6b60d8e83423erequest.txt000066400000000000000000000001621456575232400346170ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-header-value-orderGET / HTTP/1.1 Host:example.amazonaws.com My-Header1:value4 My-Header1:value1 My-Header1:value3 My-Header1:value2 aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-header-value-trim/000077500000000000000000000000001456575232400323065ustar00rootroot00000000000000context.json000066400000000000000000000004761456575232400346150ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-header-value-trim{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000003121456575232400376270ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-header-value-trimGET / host:example.amazonaws.com my-header1:value1 my-header2:"a b c" x-amz-date:20150830T123600Z host;my-header1;my-header2;x-amz-date e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000001001456575232400362060ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-header-value-trimacc3ed3afb60bb290fc8d2dd0098b9911fcaa05412b367055dee359757a9c736header-signed-request.txt000066400000000000000000000005211456575232400371530ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-header-value-trimGET / HTTP/1.1 Host:example.amazonaws.com My-Header1: value1 My-Header2: "a b c" X-Amz-Date:20150830T123600Z Authorization:AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/service/aws4_request, SignedHeaders=host;my-header1;my-header2;x-amz-date, Signature=acc3ed3afb60bb290fc8d2dd0098b9911fcaa05412b367055dee359757a9c736 header-string-to-sign.txt000066400000000000000000000002121456575232400370750ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-header-value-trimAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request a726db9b0df21c14f559d0a978e563112acb1b9e05476f0a6a1c7d68f28605c7query-canonical-request.txt000066400000000000000000000005621456575232400375530ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-header-value-trimGET / X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host%3Bmy-header1%3Bmy-header2 host:example.amazonaws.com my-header1:value1 my-header2:"a b c" host;my-header1;my-header2 e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000001001456575232400361230ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-header-value-trime7bb0fd515e125e1aec2ecc4c0c17484fb06f6846b927c35e46005dd3df3acd4query-signed-request.txt000066400000000000000000000005671456575232400371020ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-header-value-trimGET /?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host%3Bmy-header1%3Bmy-header2&X-Amz-Expires=3600&X-Amz-Signature=e7bb0fd515e125e1aec2ecc4c0c17484fb06f6846b927c35e46005dd3df3acd4 HTTP/1.1 Host:example.amazonaws.com My-Header1: value1 My-Header2: "a b c" query-string-to-sign.txt000066400000000000000000000002121456575232400370120ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-header-value-trimAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request 84c9e353b6161b689210977f93b93e6a7182f9ecb2ceae8af8c3d86b080a88aerequest.txt000066400000000000000000000001251456575232400344560ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-header-value-trimGET / HTTP/1.1 Host:example.amazonaws.com My-Header1: value1 My-Header2: "a b c" aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-relative-normalized/000077500000000000000000000000001456575232400327505ustar00rootroot00000000000000context.json000066400000000000000000000004761456575232400352570ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-relative-normalized{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000002171456575232400402750ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-relative-normalizedGET / host:example.amazonaws.com x-amz-date:20150830T123600Z host;x-amz-date e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000001001456575232400366500ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-relative-normalized5fa00fa31553b73ebf1942676e86291e8372ff2a2260956d9b8aae1d763fbf31header-signed-request.txt000066400000000000000000000004321456575232400376160ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-relative-normalizedGET /example/.. HTTP/1.1 Host:example.amazonaws.com X-Amz-Date:20150830T123600Z Authorization:AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/service/aws4_request, SignedHeaders=host;x-amz-date, Signature=5fa00fa31553b73ebf1942676e86291e8372ff2a2260956d9b8aae1d763fbf31 header-string-to-sign.txt000066400000000000000000000002121456575232400375370ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-relative-normalizedAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request bb579772317eb040ac9ed261061d46c1f17a8133879d6129b6e1c25292927e63query-canonical-request.txt000066400000000000000000000004351456575232400402140ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-relative-normalizedGET / X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000001001456575232400365650ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-relative-normalizede93c787ed7f371d5c6b165c1b38ede9550f4dce4144713e844b25b7192d3865dquery-signed-request.txt000066400000000000000000000004741456575232400375410ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-relative-normalizedGET /example/..?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Signature=e93c787ed7f371d5c6b165c1b38ede9550f4dce4144713e844b25b7192d3865d HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002121456575232400374540ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-relative-normalizedAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request bb7705b4aa3cb8e8f5e1e0b3d4c0b64030797a313c8ceee43e33117cc43eadc5request.txt000066400000000000000000000000641456575232400351220ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-relative-normalizedGET /example/.. HTTP/1.1 Host:example.amazonaws.com get-relative-relative-normalized/000077500000000000000000000000001456575232400345025ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4context.json000066400000000000000000000004761456575232400370700ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-relative-relative-normalized{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000002171456575232400421060ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-relative-relative-normalizedGET / host:example.amazonaws.com x-amz-date:20150830T123600Z host;x-amz-date e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000001001456575232400404610ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-relative-relative-normalized5fa00fa31553b73ebf1942676e86291e8372ff2a2260956d9b8aae1d763fbf31header-signed-request.txt000066400000000000000000000004471456575232400414350ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-relative-relative-normalizedGET /example1/example2/../.. HTTP/1.1 Host:example.amazonaws.com X-Amz-Date:20150830T123600Z Authorization:AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/service/aws4_request, SignedHeaders=host;x-amz-date, Signature=5fa00fa31553b73ebf1942676e86291e8372ff2a2260956d9b8aae1d763fbf31 header-string-to-sign.txt000066400000000000000000000002121456575232400413500ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-relative-relative-normalizedAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request bb579772317eb040ac9ed261061d46c1f17a8133879d6129b6e1c25292927e63query-canonical-request.txt000066400000000000000000000004351456575232400420250ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-relative-relative-normalizedGET / X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000001001456575232400403760ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-relative-relative-normalizede93c787ed7f371d5c6b165c1b38ede9550f4dce4144713e844b25b7192d3865dquery-signed-request.txt000066400000000000000000000005111456575232400413420ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-relative-relative-normalizedGET /example1/example2/../..?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Signature=e93c787ed7f371d5c6b165c1b38ede9550f4dce4144713e844b25b7192d3865d HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002121456575232400412650ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-relative-relative-normalizedAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request bb7705b4aa3cb8e8f5e1e0b3d4c0b64030797a313c8ceee43e33117cc43eadc5request.txt000066400000000000000000000001011456575232400367230ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-relative-relative-normalizedGET /example1/example2/../.. HTTP/1.1 Host:example.amazonaws.com get-relative-relative-unnormalized/000077500000000000000000000000001456575232400350455ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4context.json000066400000000000000000000004771456575232400374340ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-relative-relative-unnormalized{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": false, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000002461456575232400424530ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-relative-relative-unnormalizedGET /example1/example2/../.. host:example.amazonaws.com x-amz-date:20150830T123600Z host;x-amz-date e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000001001456575232400410240ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-relative-relative-unnormalizeddc33e0856fd4baca4d7aa2146c38958283844764f38c74252a333df5e613003bheader-signed-request.txt000066400000000000000000000004471456575232400420000ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-relative-relative-unnormalizedGET /example1/example2/../.. HTTP/1.1 Host:example.amazonaws.com X-Amz-Date:20150830T123600Z Authorization:AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/service/aws4_request, SignedHeaders=host;x-amz-date, Signature=dc33e0856fd4baca4d7aa2146c38958283844764f38c74252a333df5e613003b header-string-to-sign.txt000066400000000000000000000002121456575232400417130ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-relative-relative-unnormalizedAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request 76115030c0f3ff06c20fdff5ceb6d5e0b835a1743e00b94fea7c7f381269437bquery-canonical-request.txt000066400000000000000000000004641456575232400423720ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-relative-relative-unnormalizedGET /example1/example2/../.. X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000001001456575232400407410ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-relative-relative-unnormalizedb45db0bfd1cf15003493b733e33aa208dd981bd0e63037a6ed953f71d2118a16query-signed-request.txt000066400000000000000000000005111456575232400417050ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-relative-relative-unnormalizedGET /example1/example2/../..?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Signature=b45db0bfd1cf15003493b733e33aa208dd981bd0e63037a6ed953f71d2118a16 HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002121456575232400416300ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-relative-relative-unnormalizedAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request 13b7758115ec070fe36a7ae9d754154ad73f6ee9a5eac022494857d0e7effc18request.txt000066400000000000000000000001011456575232400372660ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-relative-relative-unnormalizedGET /example1/example2/../.. HTTP/1.1 Host:example.amazonaws.com aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-relative-unnormalized/000077500000000000000000000000001456575232400333135ustar00rootroot00000000000000context.json000066400000000000000000000004771456575232400356230ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-relative-unnormalized{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": false, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000002311456575232400406340ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-relative-unnormalizedGET /example/.. host:example.amazonaws.com x-amz-date:20150830T123600Z host;x-amz-date e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000001001456575232400372130ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-relative-unnormalizedeca7ead57bb5aa5c8e28007acd4ff04e1ff9a0ff3b237ec1554a184887ff9282header-signed-request.txt000066400000000000000000000004321456575232400401610ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-relative-unnormalizedGET /example/.. HTTP/1.1 Host:example.amazonaws.com X-Amz-Date:20150830T123600Z Authorization:AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/service/aws4_request, SignedHeaders=host;x-amz-date, Signature=eca7ead57bb5aa5c8e28007acd4ff04e1ff9a0ff3b237ec1554a184887ff9282 header-string-to-sign.txt000066400000000000000000000002121456575232400401020ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-relative-unnormalizedAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request 0511f456aa502b456d135fcb9d749374a55228f9dbeedda1eacf659e05b0615bquery-canonical-request.txt000066400000000000000000000004471456575232400405620ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-relative-unnormalizedGET /example/.. X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000001001456575232400371300ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-relative-unnormalizedcbcb213b928a077e43275df47b500f1dfaa864ab3f5a18f6b95f4ff0938167eequery-signed-request.txt000066400000000000000000000004741456575232400401040ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-relative-unnormalizedGET /example/..?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Signature=cbcb213b928a077e43275df47b500f1dfaa864ab3f5a18f6b95f4ff0938167ee HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002121456575232400400170ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-relative-unnormalizedAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request 6e02e2dba21d6f269bae339b86ff27a4b7bbc9d88c482abccbfbf8a49d602482request.txt000066400000000000000000000000641456575232400354650ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-relative-unnormalizedGET /example/.. HTTP/1.1 Host:example.amazonaws.com get-slash-dot-slash-normalized/000077500000000000000000000000001456575232400340645ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4context.json000066400000000000000000000004761456575232400364520ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-dot-slash-normalized{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000002171456575232400414700ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-dot-slash-normalizedGET / host:example.amazonaws.com x-amz-date:20150830T123600Z host;x-amz-date e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000001001456575232400400430ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-dot-slash-normalized5fa00fa31553b73ebf1942676e86291e8372ff2a2260956d9b8aae1d763fbf31header-signed-request.txt000066400000000000000000000004221456575232400410100ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-dot-slash-normalizedGET /./ HTTP/1.1 Host:example.amazonaws.com X-Amz-Date:20150830T123600Z Authorization:AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/service/aws4_request, SignedHeaders=host;x-amz-date, Signature=5fa00fa31553b73ebf1942676e86291e8372ff2a2260956d9b8aae1d763fbf31 header-string-to-sign.txt000066400000000000000000000002121456575232400407320ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-dot-slash-normalizedAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request bb579772317eb040ac9ed261061d46c1f17a8133879d6129b6e1c25292927e63query-canonical-request.txt000066400000000000000000000004351456575232400414070ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-dot-slash-normalizedGET / X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000001001456575232400377600ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-dot-slash-normalizede93c787ed7f371d5c6b165c1b38ede9550f4dce4144713e844b25b7192d3865dquery-signed-request.txt000066400000000000000000000004641456575232400407330ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-dot-slash-normalizedGET /./?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Signature=e93c787ed7f371d5c6b165c1b38ede9550f4dce4144713e844b25b7192d3865d HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002121456575232400406470ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-dot-slash-normalizedAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request bb7705b4aa3cb8e8f5e1e0b3d4c0b64030797a313c8ceee43e33117cc43eadc5request.txt000066400000000000000000000000541456575232400363140ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-dot-slash-normalizedGET /./ HTTP/1.1 Host:example.amazonaws.com get-slash-dot-slash-unnormalized/000077500000000000000000000000001456575232400344275ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4context.json000066400000000000000000000004771456575232400370160ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-dot-slash-unnormalized{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": false, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000002211456575232400420260ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-dot-slash-unnormalizedGET /./ host:example.amazonaws.com x-amz-date:20150830T123600Z host;x-amz-date e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000001001456575232400404060ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-dot-slash-unnormalized68714168e6557f8f2de0ef956fc24dc2593a4bd2961f8df51898d8a134695145header-signed-request.txt000066400000000000000000000004221456575232400413530ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-dot-slash-unnormalizedGET /./ HTTP/1.1 Host:example.amazonaws.com X-Amz-Date:20150830T123600Z Authorization:AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/service/aws4_request, SignedHeaders=host;x-amz-date, Signature=68714168e6557f8f2de0ef956fc24dc2593a4bd2961f8df51898d8a134695145 header-string-to-sign.txt000066400000000000000000000002121456575232400412750ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-dot-slash-unnormalizedAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request d67825e2268bd77a97c7688b8d72c31a3c1855b309808505ba0a9747d2465aa7query-canonical-request.txt000066400000000000000000000004371456575232400417540ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-dot-slash-unnormalizedGET /./ X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000001001456575232400403230ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-dot-slash-unnormalizeda9b13e8f3484d9505bf1a6f347219f8f35b0fe8f128ceea597efc146a3dfe90cquery-signed-request.txt000066400000000000000000000004641456575232400412760ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-dot-slash-unnormalizedGET /./?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Signature=a9b13e8f3484d9505bf1a6f347219f8f35b0fe8f128ceea597efc146a3dfe90c HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002121456575232400412120ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-dot-slash-unnormalizedAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request 21b2da17873f30c1a7410efdc271738b318ccc2c9c6fbe1289fc242aeb1a8ae1request.txt000066400000000000000000000000541456575232400366570ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-dot-slash-unnormalizedGET /./ HTTP/1.1 Host:example.amazonaws.com aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-normalized/000077500000000000000000000000001456575232400322475ustar00rootroot00000000000000context.json000066400000000000000000000004761456575232400345560ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-normalized{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000002171456575232400375740ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-normalizedGET / host:example.amazonaws.com x-amz-date:20150830T123600Z host;x-amz-date e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000001001456575232400361470ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-normalized5fa00fa31553b73ebf1942676e86291e8372ff2a2260956d9b8aae1d763fbf31header-signed-request.txt000066400000000000000000000004211456575232400371130ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-normalizedGET // HTTP/1.1 Host:example.amazonaws.com X-Amz-Date:20150830T123600Z Authorization:AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/service/aws4_request, SignedHeaders=host;x-amz-date, Signature=5fa00fa31553b73ebf1942676e86291e8372ff2a2260956d9b8aae1d763fbf31 header-string-to-sign.txt000066400000000000000000000002121456575232400370360ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-normalizedAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request bb579772317eb040ac9ed261061d46c1f17a8133879d6129b6e1c25292927e63query-canonical-request.txt000066400000000000000000000004351456575232400375130ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-normalizedGET / X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000001001456575232400360640ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-normalizede93c787ed7f371d5c6b165c1b38ede9550f4dce4144713e844b25b7192d3865dquery-signed-request.txt000066400000000000000000000004631456575232400370360ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-normalizedGET //?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Signature=e93c787ed7f371d5c6b165c1b38ede9550f4dce4144713e844b25b7192d3865d HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002121456575232400367530ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-normalizedAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request bb7705b4aa3cb8e8f5e1e0b3d4c0b64030797a313c8ceee43e33117cc43eadc5request.txt000066400000000000000000000000531456575232400344170ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-normalizedGET // HTTP/1.1 Host:example.amazonaws.com get-slash-pointless-dot-normalized/000077500000000000000000000000001456575232400347725ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4context.json000066400000000000000000000004761456575232400373600ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-pointless-dot-normalized{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000002261456575232400423760ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-pointless-dot-normalizedGET /example host:example.amazonaws.com x-amz-date:20150830T123600Z host;x-amz-date e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000001001456575232400407510ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-pointless-dot-normalizedef75d96142cf21edca26f06005da7988e4f8dc83a165a80865db7089db637ec5header-signed-request.txt000066400000000000000000000004311456575232400417160ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-pointless-dot-normalizedGET /./example HTTP/1.1 Host:example.amazonaws.com X-Amz-Date:20150830T123600Z Authorization:AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/service/aws4_request, SignedHeaders=host;x-amz-date, Signature=ef75d96142cf21edca26f06005da7988e4f8dc83a165a80865db7089db637ec5 header-string-to-sign.txt000066400000000000000000000002121456575232400416400ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-pointless-dot-normalizedAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request 214d50c111a8edc4819da6a636336472c916b5240f51e9a51b5c3305180cf702query-canonical-request.txt000066400000000000000000000004441456575232400423150ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-pointless-dot-normalizedGET /example X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000001001456575232400406660ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-pointless-dot-normalized35034b1a0bdd969f346975386daf8aedfd4976573b8348cf4f67eaa41c5857dequery-signed-request.txt000066400000000000000000000004731456575232400416410ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-pointless-dot-normalizedGET /./example?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Signature=35034b1a0bdd969f346975386daf8aedfd4976573b8348cf4f67eaa41c5857de HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002121456575232400415550ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-pointless-dot-normalizedAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request 88dd73bb06de69cb042dbb82cadbb4fce1a2623615d94520c271614abf94e738request.txt000066400000000000000000000000631456575232400372220ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-pointless-dot-normalizedGET /./example HTTP/1.1 Host:example.amazonaws.com get-slash-pointless-dot-unnormalized/000077500000000000000000000000001456575232400353355ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4context.json000066400000000000000000000004771456575232400377240ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-pointless-dot-unnormalized{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": false, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000002301456575232400427340ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-pointless-dot-unnormalizedGET /./example host:example.amazonaws.com x-amz-date:20150830T123600Z host;x-amz-date e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000001001456575232400413140ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-pointless-dot-unnormalizedbeb03f223f7deae4146464f06e29eebbee9c8afbe15c290cf07aa8b119e14cffheader-signed-request.txt000066400000000000000000000004311456575232400422610ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-pointless-dot-unnormalizedGET /./example HTTP/1.1 Host:example.amazonaws.com X-Amz-Date:20150830T123600Z Authorization:AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/service/aws4_request, SignedHeaders=host;x-amz-date, Signature=beb03f223f7deae4146464f06e29eebbee9c8afbe15c290cf07aa8b119e14cff header-string-to-sign.txt000066400000000000000000000002121456575232400422030ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-pointless-dot-unnormalizedAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request 73895e0e829507e28e39fd24669aedc2434a8e179e547e3c075b42921f952cdbquery-canonical-request.txt000066400000000000000000000004461456575232400426620ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-pointless-dot-unnormalizedGET /./example X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000001001456575232400412310ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-pointless-dot-unnormalized72b11f16d9530b18204bfb71f2d6ab085894c0ed8d352730a6e76234c58b5e10query-signed-request.txt000066400000000000000000000004731456575232400422040ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-pointless-dot-unnormalizedGET /./example?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Signature=72b11f16d9530b18204bfb71f2d6ab085894c0ed8d352730a6e76234c58b5e10 HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002121456575232400421200ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-pointless-dot-unnormalizedAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request cb23444939471187ac1f0fb25d437337af0d5f48b5ae0ede5baa3727c8c6af92request.txt000066400000000000000000000000631456575232400375650ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-pointless-dot-unnormalizedGET /./example HTTP/1.1 Host:example.amazonaws.com aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-unnormalized/000077500000000000000000000000001456575232400326125ustar00rootroot00000000000000context.json000066400000000000000000000004771456575232400351220ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-unnormalized{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": false, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000002201456575232400401310ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-unnormalizedGET // host:example.amazonaws.com x-amz-date:20150830T123600Z host;x-amz-date e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000001001456575232400365120ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-unnormalizedc88bcd3d312d75078c0cd961d6deae3f4c754924b01669efcfcb439fd5e5b76eheader-signed-request.txt000066400000000000000000000004211456575232400374560ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-unnormalizedGET // HTTP/1.1 Host:example.amazonaws.com X-Amz-Date:20150830T123600Z Authorization:AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/service/aws4_request, SignedHeaders=host;x-amz-date, Signature=c88bcd3d312d75078c0cd961d6deae3f4c754924b01669efcfcb439fd5e5b76e header-string-to-sign.txt000066400000000000000000000002121456575232400374010ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-unnormalizedAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request 80cb39203d058af815de2b79250ff56e1b73eb9b4718c86556cdc6f150c5d209query-canonical-request.txt000066400000000000000000000004361456575232400400570ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-unnormalizedGET // X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000001001456575232400364270ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-unnormalizedb2a9542809db687769200f56a844a03f2bd0291d6eb90232d5101c6a579446cdquery-signed-request.txt000066400000000000000000000004631456575232400374010ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-unnormalizedGET //?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Signature=b2a9542809db687769200f56a844a03f2bd0291d6eb90232d5101c6a579446cd HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002121456575232400373160ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-unnormalizedAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request e10a01494cafa1f2207c4de80c69b7a36d6f2d282aa63ee64a5ca50e175cf730request.txt000066400000000000000000000000531456575232400347620ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slash-unnormalizedGET // HTTP/1.1 Host:example.amazonaws.com aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slashes-normalized/000077500000000000000000000000001456575232400325775ustar00rootroot00000000000000context.json000066400000000000000000000004761456575232400351060ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slashes-normalized{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000002271456575232400401250ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slashes-normalizedGET /example/ host:example.amazonaws.com x-amz-date:20150830T123600Z host;x-amz-date e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000001001456575232400364770ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slashes-normalized9a624bd73a37c9a373b5312afbebe7a714a789de108f0bdfe846570885f57e84header-signed-request.txt000066400000000000000000000004321456575232400374450ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slashes-normalizedGET //example// HTTP/1.1 Host:example.amazonaws.com X-Amz-Date:20150830T123600Z Authorization:AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/service/aws4_request, SignedHeaders=host;x-amz-date, Signature=9a624bd73a37c9a373b5312afbebe7a714a789de108f0bdfe846570885f57e84 header-string-to-sign.txt000066400000000000000000000002121456575232400373660ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slashes-normalizedAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request cb96b4ac96d501f7c5c15bc6d67b3035061cfced4af6585ad927f7e6c985c015query-canonical-request.txt000066400000000000000000000004451456575232400400440ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slashes-normalizedGET /example/ X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000001001456575232400364140ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slashes-normalizedc1834e8fb0307243711f0f907f6ab7311ed300d87f13792d7ee4da89ab93e082query-signed-request.txt000066400000000000000000000004741456575232400373700ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slashes-normalizedGET //example//?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Signature=c1834e8fb0307243711f0f907f6ab7311ed300d87f13792d7ee4da89ab93e082 HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002121456575232400373030ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slashes-normalizedAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request 1a3e3fe19998c62136ed2ff9c8531973a46d4a21b336f58e8fcad185ec64d642request.txt000066400000000000000000000000641456575232400347510ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slashes-normalizedGET //example// HTTP/1.1 Host:example.amazonaws.com aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slashes-unnormalized/000077500000000000000000000000001456575232400331425ustar00rootroot00000000000000context.json000066400000000000000000000004771456575232400354520ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slashes-unnormalized{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": false, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000002311456575232400404630ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slashes-unnormalizedGET //example// host:example.amazonaws.com x-amz-date:20150830T123600Z host;x-amz-date e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000001001456575232400370420ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slashes-unnormalized87cca117541a147f6df867677d98a7d80dff226d2bfca9e4ffa899665623c7e5header-signed-request.txt000066400000000000000000000004321456575232400400100ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slashes-unnormalizedGET //example// HTTP/1.1 Host:example.amazonaws.com X-Amz-Date:20150830T123600Z Authorization:AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/service/aws4_request, SignedHeaders=host;x-amz-date, Signature=87cca117541a147f6df867677d98a7d80dff226d2bfca9e4ffa899665623c7e5 header-string-to-sign.txt000066400000000000000000000002121456575232400377310ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slashes-unnormalizedAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request 528ec3105ee1f34ab014bb0a1a45da0ed2742a4fea3555149e5b4d5d201eb240query-canonical-request.txt000066400000000000000000000004471456575232400404110ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slashes-unnormalizedGET //example// X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000001001456575232400367570ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slashes-unnormalized822a5a9ba28072c5ab2bb4a6307d0c88276d40e49fec6b724c03fb4d4ba60fc2query-signed-request.txt000066400000000000000000000004741456575232400377330ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slashes-unnormalizedGET //example//?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Signature=822a5a9ba28072c5ab2bb4a6307d0c88276d40e49fec6b724c03fb4d4ba60fc2 HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002121456575232400376460ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slashes-unnormalizedAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request 82241114f9a6320f054333c890952474aa7a0069d28a4326c3a8d95bbecc69eerequest.txt000066400000000000000000000000641456575232400353140ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-slashes-unnormalizedGET //example// HTTP/1.1 Host:example.amazonaws.com aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-space-normalized/000077500000000000000000000000001456575232400322305ustar00rootroot00000000000000context.json000066400000000000000000000004761456575232400345370ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-space-normalized{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000002371456575232400375570ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-space-normalizedGET /example%20space/ host:example.amazonaws.com x-amz-date:20150830T123600Z host;x-amz-date e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000001001456575232400361300ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-space-normalized652487583200325589f1fba4c7e578f72c47cb61beeca81406b39ddec1366741header-signed-request.txt000066400000000000000000000004361456575232400371020ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-space-normalizedGET /example space/ HTTP/1.1 Host:example.amazonaws.com X-Amz-Date:20150830T123600Z Authorization:AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/service/aws4_request, SignedHeaders=host;x-amz-date, Signature=652487583200325589f1fba4c7e578f72c47cb61beeca81406b39ddec1366741 header-string-to-sign.txt000066400000000000000000000002121456575232400370170ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-space-normalizedAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request 63ee75631ed7234ae61b5f736dfc7754cdccfedbff4b5128a915706ee9390d86query-canonical-request.txt000066400000000000000000000004551456575232400374760ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-space-normalizedGET /example%20space/ X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000001001456575232400360450ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-space-normalized7a1f416954786484c9824d93c1f26ef64acb9b1b6c9154d08c9f07d0e394abf6query-signed-request.txt000066400000000000000000000005001456575232400370070ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-space-normalizedGET /example space/?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Signature=7a1f416954786484c9824d93c1f26ef64acb9b1b6c9154d08c9f07d0e394abf6 HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002121456575232400367340ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-space-normalizedAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request cc3dd817bd405e51225826ec4934a96d065d6af5b6b0163c7a5abbd26a84519erequest.txt000066400000000000000000000000701456575232400343770ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-space-normalizedGET /example space/ HTTP/1.1 Host:example.amazonaws.com aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-space-unnormalized/000077500000000000000000000000001456575232400325735ustar00rootroot00000000000000context.json000066400000000000000000000004771456575232400351030ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-space-unnormalized{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": false, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000002371456575232400401220ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-space-unnormalizedGET /example%20space/ host:example.amazonaws.com x-amz-date:20150830T123600Z host;x-amz-date e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000001001456575232400364730ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-space-unnormalized652487583200325589f1fba4c7e578f72c47cb61beeca81406b39ddec1366741header-signed-request.txt000066400000000000000000000004361456575232400374450ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-space-unnormalizedGET /example space/ HTTP/1.1 Host:example.amazonaws.com X-Amz-Date:20150830T123600Z Authorization:AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/service/aws4_request, SignedHeaders=host;x-amz-date, Signature=652487583200325589f1fba4c7e578f72c47cb61beeca81406b39ddec1366741 header-string-to-sign.txt000066400000000000000000000002121456575232400373620ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-space-unnormalizedAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request 63ee75631ed7234ae61b5f736dfc7754cdccfedbff4b5128a915706ee9390d86query-canonical-request.txt000066400000000000000000000004551456575232400400410ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-space-unnormalizedGET /example%20space/ X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000001001456575232400364100ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-space-unnormalized7a1f416954786484c9824d93c1f26ef64acb9b1b6c9154d08c9f07d0e394abf6query-signed-request.txt000066400000000000000000000005001456575232400373520ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-space-unnormalizedGET /example space/?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Signature=7a1f416954786484c9824d93c1f26ef64acb9b1b6c9154d08c9f07d0e394abf6 HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002121456575232400372770ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-space-unnormalizedAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request cc3dd817bd405e51225826ec4934a96d065d6af5b6b0163c7a5abbd26a84519erequest.txt000066400000000000000000000000701456575232400347420ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-space-unnormalizedGET /example space/ HTTP/1.1 Host:example.amazonaws.com aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-unreserved/000077500000000000000000000000001456575232400311555ustar00rootroot00000000000000context.json000066400000000000000000000004761456575232400334640ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-unreserved{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000003211456575232400364760ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-unreservedGET /-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz host:example.amazonaws.com x-amz-date:20150830T123600Z host;x-amz-date e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000001001456575232400350550ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-unreserved07ef7494c76fa4850883e2b006601f940f8a34d404d0cfa977f52a65bbf5f24fheader-signed-request.txt000066400000000000000000000005221456575232400360230ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-unreservedGET /-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz HTTP/1.1 Host:example.amazonaws.com X-Amz-Date:20150830T123600Z Authorization:AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/service/aws4_request, SignedHeaders=host;x-amz-date, Signature=07ef7494c76fa4850883e2b006601f940f8a34d404d0cfa977f52a65bbf5f24f header-string-to-sign.txt000066400000000000000000000002121456575232400357440ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-unreservedAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request 6a968768eefaa713e2a6b16b589a8ea192661f098f37349f4e2c0082757446f9query-canonical-request.txt000066400000000000000000000005371456575232400364240ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-unreservedGET /-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000001001456575232400347720ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-unreserved95968482db1b9e0fadef6efc1bd24689f77c77d9ef56919c96a28cc92e0d6005query-signed-request.txt000066400000000000000000000005641456575232400357460ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-unreservedGET /-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Signature=95968482db1b9e0fadef6efc1bd24689f77c77d9ef56919c96a28cc92e0d6005 HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002121456575232400356610ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-unreservedAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request 3134f2fbc6ef58f34b74b01643a159bfccbc121ea4288a4b75e65bc805d08219aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-unreserved/request.txt000066400000000000000000000001541456575232400334060ustar00rootroot00000000000000GET /-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz HTTP/1.1 Host:example.amazonaws.com aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-utf8/000077500000000000000000000000001456575232400276615ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-utf8/context.json000066400000000000000000000004761456575232400322470ustar00rootroot00000000000000{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000002301456575232400352010ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-utf8GET /%E1%88%B4 host:example.amazonaws.com x-amz-date:20150830T123600Z host;x-amz-date e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000001001456575232400335610ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-utf88318018e0b0f223aa2bbf98705b62bb787dc9c0e678f255a891fd03141be5d85header-signed-request.txt000066400000000000000000000004231456575232400345270ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-utf8GET /ሴ HTTP/1.1 Host:example.amazonaws.com X-Amz-Date:20150830T123600Z Authorization:AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/service/aws4_request, SignedHeaders=host;x-amz-date, Signature=8318018e0b0f223aa2bbf98705b62bb787dc9c0e678f255a891fd03141be5d85 header-string-to-sign.txt000066400000000000000000000002121456575232400344500ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-utf8AWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request 2a0a97d02205e45ce2e994789806b19270cfbbb0921b278ccf58f5249ac42102query-canonical-request.txt000066400000000000000000000004461456575232400351270ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-utf8GET /%E1%88%B4 X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000001001456575232400334760ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-utf810eae3f14a260bd3911cc6d008d3c576d143b05b62f09782a7a4b37f52178e44query-signed-request.txt000066400000000000000000000004651456575232400344520ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-utf8GET /ሴ?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Signature=10eae3f14a260bd3911cc6d008d3c576d143b05b62f09782a7a4b37f52178e44 HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002121456575232400343650ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-utf8AWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request d8f78e05d0d9bb1c16ae1e60ed405de76cfb8fdcd3e414ffe65fad563b996d66aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-utf8/request.txt000066400000000000000000000000551456575232400321120ustar00rootroot00000000000000GET /ሴ HTTP/1.1 Host:example.amazonaws.com get-vanilla-empty-query-key/000077500000000000000000000000001456575232400334275ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4context.json000066400000000000000000000004761456575232400360150ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-empty-query-key{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000002341456575232400410320ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-empty-query-keyGET / Param1=value1 host:example.amazonaws.com x-amz-date:20150830T123600Z host;x-amz-date e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000001001456575232400374060ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-empty-query-keya67d582fa61cc504c4bae71f336f98b97f1ea3c7a6bfe1b6e45aec72011b9aebheader-signed-request.txt000066400000000000000000000004361456575232400403600ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-empty-query-keyGET /?Param1=value1 HTTP/1.1 Host:example.amazonaws.com X-Amz-Date:20150830T123600Z Authorization:AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/service/aws4_request, SignedHeaders=host;x-amz-date, Signature=a67d582fa61cc504c4bae71f336f98b97f1ea3c7a6bfe1b6e45aec72011b9aeb header-string-to-sign.txt000066400000000000000000000002121456575232400402750ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-empty-query-keyAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request 1e24db194ed7d0eec2de28d7369675a243488e08526e8c1c73571282f7c517abquery-canonical-request.txt000066400000000000000000000004531456575232400407520ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-empty-query-keyGET / Param1=value1&X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000001001456575232400373230ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-empty-query-key49096700cbbaa5753443850f40df10f904fc2fdb544dc9512203cc77c471a9dequery-signed-request.txt000066400000000000000000000005001456575232400402650ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-empty-query-keyGET /?Param1=value1&X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Signature=49096700cbbaa5753443850f40df10f904fc2fdb544dc9512203cc77c471a9de HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002121456575232400402120ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-empty-query-keyAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request 74828b608f939673ed5a839190452a943b0178760f258da766209b21f8ca3f86request.txt000066400000000000000000000000701456575232400356550ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-empty-query-keyGET /?Param1=value1 HTTP/1.1 Host:example.amazonaws.com get-vanilla-query-order-encoded/000077500000000000000000000000001456575232400342155ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4context.json000066400000000000000000000004761456575232400366030ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-query-order-encoded{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000002731456575232400416230ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-query-order-encodedGET / %E1%88%B4=Value1&Param=Value2&Param-3=Value3 host:example.amazonaws.com x-amz-date:20150830T123600Z host;x-amz-date e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000001001456575232400401740ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-query-order-encoded371d3713e185cc334048618a97f809c9ffe339c62934c032af5a0e595648fcacheader-signed-request.txt000066400000000000000000000004751456575232400411510ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-query-order-encodedGET /?Param-3=Value3&Param=Value2&%E1%88%B4=Value1 HTTP/1.1 Host:example.amazonaws.com X-Amz-Date:20150830T123600Z Authorization:AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/service/aws4_request, SignedHeaders=host;x-amz-date, Signature=371d3713e185cc334048618a97f809c9ffe339c62934c032af5a0e595648fcac header-string-to-sign.txt000066400000000000000000000002121456575232400410630ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-query-order-encodedAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request 868294f5c38bd141c4972a373a76654f1418a8e4fc18b2e7903ae45e8ae0ec71query-canonical-request.txt000066400000000000000000000005121456575232400415340ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-query-order-encodedGET / %E1%88%B4=Value1&Param=Value2&Param-3=Value3&X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000001001456575232400401110ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-query-order-encodedc5f1848ceec943ac2ca68ee720460c23aaae30a2300586597ada94c4a65e4787query-signed-request.txt000066400000000000000000000005371456575232400410650ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-query-order-encodedGET /?Param-3=Value3&Param=Value2&%E1%88%B4=Value1&X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Signature=c5f1848ceec943ac2ca68ee720460c23aaae30a2300586597ada94c4a65e4787 HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002121456575232400410000ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-query-order-encodedAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request 9808d6cdc8245c74e705c68350fc6a62b91778cdd613890e5d177998c02c6f53request.txt000066400000000000000000000001271456575232400364460ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-query-order-encodedGET /?Param-3=Value3&Param=Value2&%E1%88%B4=Value1 HTTP/1.1 Host:example.amazonaws.com get-vanilla-query-order-key-case/000077500000000000000000000000001456575232400343155ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4context.json000066400000000000000000000004761456575232400367030ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-query-order-key-case{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000002521456575232400417200ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-query-order-key-caseGET / Param1=value1&Param2=value2 host:example.amazonaws.com x-amz-date:20150830T123600Z host;x-amz-date e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000001001456575232400402740ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-query-order-key-caseb97d918cfa904a5beff61c982a1b6f458b799221646efd99d3219ec94cdf2500header-signed-request.txt000066400000000000000000000004541456575232400412460ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-query-order-key-caseGET /?Param2=value2&Param1=value1 HTTP/1.1 Host:example.amazonaws.com X-Amz-Date:20150830T123600Z Authorization:AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/service/aws4_request, SignedHeaders=host;x-amz-date, Signature=b97d918cfa904a5beff61c982a1b6f458b799221646efd99d3219ec94cdf2500 header-string-to-sign.txt000066400000000000000000000002121456575232400411630ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-query-order-key-caseAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request 816cd5b414d056048ba4f7c5386d6e0533120fb1fcfa93762cf0fc39e2cf19e0query-canonical-request.txt000066400000000000000000000004711456575232400416400ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-query-order-key-caseGET / Param1=value1&Param2=value2&X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000001001456575232400402110ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-query-order-key-case86012e2c9ad4d77369f5d81c11f75158aae4f895a085212cc6d3f923d300bed5query-signed-request.txt000066400000000000000000000005161456575232400411620ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-query-order-key-caseGET /?Param2=value2&Param1=value1&X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Signature=86012e2c9ad4d77369f5d81c11f75158aae4f895a085212cc6d3f923d300bed5 HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002121456575232400411000ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-query-order-key-caseAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request b82878ecb2ab7ad194b9fe79b2946c2a36ee1627a219408089b2d774c1a0cedbrequest.txt000066400000000000000000000001061456575232400365430ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-query-order-key-caseGET /?Param2=value2&Param1=value1 HTTP/1.1 Host:example.amazonaws.com get-vanilla-query-order-key/000077500000000000000000000000001456575232400334045ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4context.json000066400000000000000000000004761456575232400357720ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-query-order-key{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }request.txt000066400000000000000000000001061456575232400356320ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-query-order-keyGET /?Param1=value2&Param1=Value1 HTTP/1.1 Host:example.amazonaws.com get-vanilla-query-order-value/000077500000000000000000000000001456575232400337305ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4context.json000066400000000000000000000004761456575232400363160ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-query-order-value{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }request.txt000066400000000000000000000001061456575232400361560ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-query-order-valueGET /?Param1=value2&Param1=value1 HTTP/1.1 Host:example.amazonaws.com get-vanilla-query-unreserved/000077500000000000000000000000001456575232400336655ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4context.json000066400000000000000000000004761456575232400362530ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-query-unreserved{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000004241456575232400412710ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-query-unreservedGET / -._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz=-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz host:example.amazonaws.com x-amz-date:20150830T123600Z host;x-amz-date e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000001001456575232400376440ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-query-unreserved9c3e54bfcdf0b19771a7f523ee5669cdf59bc7cc0884027167c21bb143a40197header-signed-request.txt000066400000000000000000000006261456575232400406170ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-query-unreservedGET /?-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz=-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz HTTP/1.1 Host:example.amazonaws.com X-Amz-Date:20150830T123600Z Authorization:AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/service/aws4_request, SignedHeaders=host;x-amz-date, Signature=9c3e54bfcdf0b19771a7f523ee5669cdf59bc7cc0884027167c21bb143a40197 header-string-to-sign.txt000066400000000000000000000002121456575232400405330ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-query-unreservedAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request c30d4703d9f799439be92736156d47ccfb2d879ddf56f5befa6d1d6aab979177query-canonical-request.txt000066400000000000000000000006431456575232400412110ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-query-unreservedGET / -._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz=-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz&X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000001001456575232400375610ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-query-unreserved8e76a88a7433637b12778d5592799b29ad21ecd6cf6325051c21d86f0acda2bfquery-signed-request.txt000066400000000000000000000006701456575232400405330ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-query-unreservedGET /?-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz=-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz&X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Signature=8e76a88a7433637b12778d5592799b29ad21ecd6cf6325051c21d86f0acda2bf HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002121456575232400404500ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-query-unreservedAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request c77f27ef2b499fbfee3f0b3c88bba7057b2b31ee9b62047078a5e0d6be91fd58request.txt000066400000000000000000000002601456575232400361140ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-query-unreservedGET /?-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz=-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz HTTP/1.1 Host:example.amazonaws.com aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-query/000077500000000000000000000000001456575232400315645ustar00rootroot00000000000000context.json000066400000000000000000000004761456575232400340730ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-query{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000002171456575232400371110ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-queryGET / host:example.amazonaws.com x-amz-date:20150830T123600Z host;x-amz-date e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000001001456575232400354640ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-query5fa00fa31553b73ebf1942676e86291e8372ff2a2260956d9b8aae1d763fbf31header-signed-request.txt000066400000000000000000000004201456575232400364270ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-queryGET / HTTP/1.1 Host:example.amazonaws.com X-Amz-Date:20150830T123600Z Authorization:AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/service/aws4_request, SignedHeaders=host;x-amz-date, Signature=5fa00fa31553b73ebf1942676e86291e8372ff2a2260956d9b8aae1d763fbf31 header-string-to-sign.txt000066400000000000000000000002121456575232400363530ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-queryAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request bb579772317eb040ac9ed261061d46c1f17a8133879d6129b6e1c25292927e63query-canonical-request.txt000066400000000000000000000004351456575232400370300ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-queryGET / X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000001001456575232400354010ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-querye93c787ed7f371d5c6b165c1b38ede9550f4dce4144713e844b25b7192d3865dquery-signed-request.txt000066400000000000000000000004621456575232400363520ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-queryGET /?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Signature=e93c787ed7f371d5c6b165c1b38ede9550f4dce4144713e844b25b7192d3865d HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002121456575232400362700ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-queryAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request bb7705b4aa3cb8e8f5e1e0b3d4c0b64030797a313c8ceee43e33117cc43eadc5request.txt000066400000000000000000000000521456575232400337330ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-queryGET / HTTP/1.1 Host:example.amazonaws.com aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-utf8-query/000077500000000000000000000000001456575232400324505ustar00rootroot00000000000000context.json000066400000000000000000000004761456575232400347570ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-utf8-query{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000002341456575232400377740ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-utf8-queryGET / %E1%88%B4=bar host:example.amazonaws.com x-amz-date:20150830T123600Z host;x-amz-date e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000001001456575232400363500ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-utf8-query2cdec8eed098649ff3a119c94853b13c643bcf08f8b0a1d91e12c9027818dd04header-signed-request.txt000066400000000000000000000004301456575232400373140ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-utf8-queryGET /?ሴ=bar HTTP/1.1 Host:example.amazonaws.com X-Amz-Date:20150830T123600Z Authorization:AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/service/aws4_request, SignedHeaders=host;x-amz-date, Signature=2cdec8eed098649ff3a119c94853b13c643bcf08f8b0a1d91e12c9027818dd04 header-string-to-sign.txt000066400000000000000000000002121456575232400372370ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-utf8-queryAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request eb30c5bed55734080471a834cc727ae56beb50e5f39d1bff6d0d38cb192a7073query-canonical-request.txt000066400000000000000000000004531456575232400377140ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-utf8-queryGET / %E1%88%B4=bar&X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000001001456575232400362650ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-utf8-query0bdd809b1519ac4f0c1dc3540e2cc46bd0c7f778eda408b2ebf3b913d21ff600query-signed-request.txt000066400000000000000000000004721456575232400372370ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-utf8-queryGET /?ሴ=bar&X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Signature=0bdd809b1519ac4f0c1dc3540e2cc46bd0c7f778eda408b2ebf3b913d21ff600 HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002121456575232400371540ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-utf8-queryAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request b2e8ae8e48ec880defebc6f6dc1d9fbc9f1856b33146f2943060ca71a50a7a77request.txt000066400000000000000000000000621456575232400346200ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-utf8-queryGET /?ሴ=bar HTTP/1.1 Host:example.amazonaws.com get-vanilla-with-session-token/000077500000000000000000000000001456575232400341125ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4context.json000066400000000000000000000006231456575232400364720ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-with-session-token{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY", "token": "6e86291e8372ff2a2260956d9b8aae1d763fbf315fa00fa31553b73ebf194267" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000003721456575232400415200ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-with-session-tokenGET / host:example.amazonaws.com x-amz-date:20150830T123600Z x-amz-security-token:6e86291e8372ff2a2260956d9b8aae1d763fbf315fa00fa31553b73ebf194267 host;x-amz-date;x-amz-security-token e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000001001456575232400400710ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-with-session-token07ec1639c89043aa0e3e2de82b96708f198cceab042d4a97044c66dd9f74e7f8header-signed-request.txt000066400000000000000000000005731456575232400410450ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-with-session-tokenGET / HTTP/1.1 Host:example.amazonaws.com X-Amz-Security-Token:6e86291e8372ff2a2260956d9b8aae1d763fbf315fa00fa31553b73ebf194267 X-Amz-Date:20150830T123600Z Authorization:AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/service/aws4_request, SignedHeaders=host;x-amz-date;x-amz-security-token, Signature=07ec1639c89043aa0e3e2de82b96708f198cceab042d4a97044c66dd9f74e7f8 header-string-to-sign.txt000066400000000000000000000002121456575232400407600ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-with-session-tokenAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request 067b36aa60031588cea4a4cde1f21215227a047690c72247f1d70b32fbbfad2bquery-canonical-request.txt000066400000000000000000000005631456575232400414370ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-with-session-tokenGET / X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-Security-Token=6e86291e8372ff2a2260956d9b8aae1d763fbf315fa00fa31553b73ebf194267&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000001001456575232400400060ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-with-session-token7ff2b50b376cb4d151970630573d6291dc128cc5c2a12ffb237f73cc53f67b6cquery-signed-request.txt000066400000000000000000000006101456575232400407520ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-with-session-tokenGET /?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Security-Token=6e86291e8372ff2a2260956d9b8aae1d763fbf315fa00fa31553b73ebf194267&X-Amz-Signature=7ff2b50b376cb4d151970630573d6291dc128cc5c2a12ffb237f73cc53f67b6c HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002121456575232400406750ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-with-session-tokenAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request cb30434103085aa9e73780fc60dc9a9df818fd2a7b1de12a9f6f4d791f898761request.txt000066400000000000000000000000521456575232400363400ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla-with-session-tokenGET / HTTP/1.1 Host:example.amazonaws.com aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla/000077500000000000000000000000001456575232400304215ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla/context.json000066400000000000000000000004761456575232400330070ustar00rootroot00000000000000{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000002171456575232400357460ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanillaGET / host:example.amazonaws.com x-amz-date:20150830T123600Z host;x-amz-date e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000001001456575232400343210ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla5fa00fa31553b73ebf1942676e86291e8372ff2a2260956d9b8aae1d763fbf31header-signed-request.txt000066400000000000000000000004201456575232400352640ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanillaGET / HTTP/1.1 Host:example.amazonaws.com X-Amz-Date:20150830T123600Z Authorization:AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/service/aws4_request, SignedHeaders=host;x-amz-date, Signature=5fa00fa31553b73ebf1942676e86291e8372ff2a2260956d9b8aae1d763fbf31 header-string-to-sign.txt000066400000000000000000000002121456575232400352100ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanillaAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request bb579772317eb040ac9ed261061d46c1f17a8133879d6129b6e1c25292927e63query-canonical-request.txt000066400000000000000000000004351456575232400356650ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanillaGET / X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000001001456575232400342360ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanillae93c787ed7f371d5c6b165c1b38ede9550f4dce4144713e844b25b7192d3865dquery-signed-request.txt000066400000000000000000000004621456575232400352070ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanillaGET /?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Signature=e93c787ed7f371d5c6b165c1b38ede9550f4dce4144713e844b25b7192d3865d HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002121456575232400351250ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanillaAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request bb7705b4aa3cb8e8f5e1e0b3d4c0b64030797a313c8ceee43e33117cc43eadc5aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/get-vanilla/request.txt000066400000000000000000000000521456575232400326470ustar00rootroot00000000000000GET / HTTP/1.1 Host:example.amazonaws.com aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-header-key-case/000077500000000000000000000000001456575232400321305ustar00rootroot00000000000000context.json000066400000000000000000000004761456575232400344370ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-header-key-case{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000002201456575232400374470ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-header-key-casePOST / host:example.amazonaws.com x-amz-date:20150830T123600Z host;x-amz-date e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000001001456575232400360300ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-header-key-case5da7c1a2acd57cee7505fc6676e4e544621c30862966e37dddb68e92efbe5d6bheader-signed-request.txt000066400000000000000000000004211456575232400367740ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-header-key-casePOST / HTTP/1.1 Host:example.amazonaws.com X-Amz-Date:20150830T123600Z Authorization:AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/service/aws4_request, SignedHeaders=host;x-amz-date, Signature=5da7c1a2acd57cee7505fc6676e4e544621c30862966e37dddb68e92efbe5d6b header-string-to-sign.txt000066400000000000000000000002121456575232400367170ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-header-key-caseAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request 553f88c9e4d10fc9e109e2aeb65f030801b70c2f6468faca261d401ae622fc87query-canonical-request.txt000066400000000000000000000004361456575232400373750ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-header-key-casePOST / X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000001001456575232400357450ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-header-key-case2ce6e6d2e0cf2f9d1b55fafec88cd20574c31dc2e7631979f71ba2310083e95bquery-signed-request.txt000066400000000000000000000004631456575232400367170ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-header-key-casePOST /?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Signature=2ce6e6d2e0cf2f9d1b55fafec88cd20574c31dc2e7631979f71ba2310083e95b HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002121456575232400366340ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-header-key-caseAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request d27fc6fe1afc1d88b248c5ae9194ec0943a693dd6d81d8d815c88a369eb0471erequest.txt000066400000000000000000000000531456575232400343000ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-header-key-casePOST / HTTP/1.1 Host:example.amazonaws.com aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-header-key-sort/000077500000000000000000000000001456575232400322045ustar00rootroot00000000000000context.json000066400000000000000000000004761456575232400345130ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-header-key-sort{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000002551456575232400375330ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-header-key-sortPOST / host:example.amazonaws.com my-header1:value1 x-amz-date:20150830T123600Z host;my-header1;x-amz-date e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000001001456575232400361040ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-header-key-sortc5410059b04c1ee005303aed430f6e6645f61f4dc9e1461ec8f8916fdf18852cheader-signed-request.txt000066400000000000000000000004561456575232400370600ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-header-key-sortPOST / HTTP/1.1 Host:example.amazonaws.com My-Header1:value1 X-Amz-Date:20150830T123600Z Authorization:AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/service/aws4_request, SignedHeaders=host;my-header1;x-amz-date, Signature=c5410059b04c1ee005303aed430f6e6645f61f4dc9e1461ec8f8916fdf18852c header-string-to-sign.txt000066400000000000000000000002121456575232400367730ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-header-key-sortAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request 9368318c2967cf6de74404b30c65a91e8f6253e0a8659d6d5319f1a812f87d65query-canonical-request.txt000066400000000000000000000005101456575232400374420ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-header-key-sortPOST / X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host%3Bmy-header1 host:example.amazonaws.com my-header1:value1 host;my-header1 e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000001001456575232400360210ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-header-key-sortc09d07e0d55871f10f2a6d350d994acf6825a3cae70673d7def55616e6119dd7query-signed-request.txt000066400000000000000000000005221456575232400367670ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-header-key-sortPOST /?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host%3Bmy-header1&X-Amz-Expires=3600&X-Amz-Signature=c09d07e0d55871f10f2a6d350d994acf6825a3cae70673d7def55616e6119dd7 HTTP/1.1 Host:example.amazonaws.com My-Header1:value1 query-string-to-sign.txt000066400000000000000000000002121456575232400367100ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-header-key-sortAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request 1295ed77e4ef8b18b32815e493e0b0f78ee47615c0ecbebfec7c75709eb58c88request.txt000066400000000000000000000000751456575232400343600ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-header-key-sortPOST / HTTP/1.1 Host:example.amazonaws.com My-Header1:value1 aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-header-value-case/000077500000000000000000000000001456575232400324545ustar00rootroot00000000000000context.json000066400000000000000000000004761456575232400347630ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-header-value-case{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000002551456575232400400030ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-header-value-casePOST / host:example.amazonaws.com my-header1:VALUE1 x-amz-date:20150830T123600Z host;my-header1;x-amz-date e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000001001456575232400363540ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-header-value-casecdbc9802e29d2942e5e10b5bccfdd67c5f22c7c4e8ae67b53629efa58b974b7dheader-signed-request.txt000066400000000000000000000004561456575232400373300ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-header-value-casePOST / HTTP/1.1 Host:example.amazonaws.com My-Header1:VALUE1 X-Amz-Date:20150830T123600Z Authorization:AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/service/aws4_request, SignedHeaders=host;my-header1;x-amz-date, Signature=cdbc9802e29d2942e5e10b5bccfdd67c5f22c7c4e8ae67b53629efa58b974b7d header-string-to-sign.txt000066400000000000000000000002121456575232400372430ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-header-value-caseAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request d51ced243e649e3de6ef63afbbdcbca03131a21a7103a1583706a64618606a93query-canonical-request.txt000066400000000000000000000005101456575232400377120ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-header-value-casePOST / X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host%3Bmy-header1 host:example.amazonaws.com my-header1:VALUE1 host;my-header1 e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000001001456575232400362710ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-header-value-case0698bc9e4f7f9139065ba2909c0e99da257e8e2a42492a097bcd46d792391fedquery-signed-request.txt000066400000000000000000000005221456575232400372370ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-header-value-casePOST /?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host%3Bmy-header1&X-Amz-Expires=3600&X-Amz-Signature=0698bc9e4f7f9139065ba2909c0e99da257e8e2a42492a097bcd46d792391fed HTTP/1.1 Host:example.amazonaws.com My-Header1:VALUE1 query-string-to-sign.txt000066400000000000000000000002121456575232400371600ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-header-value-caseAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request 9d4d1de88f0149695a91a01275fc72dc00fb50bdc78e424f012ec860d5f8c41drequest.txt000066400000000000000000000000751456575232400346300ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-header-value-casePOST / HTTP/1.1 Host:example.amazonaws.com My-Header1:VALUE1 aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-sts-header-after/000077500000000000000000000000001456575232400323375ustar00rootroot00000000000000context.json000066400000000000000000000013041456575232400346350ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-sts-header-after{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY", "token": "AQoDYXdzEPT//////////wEXAMPLEtc764bNrC9SAPBSM22wDOk4x4HIZ8j4FZTwdQWLWsKWHGBuFqwAeMicRXmxfpSPfIeoIYRqTflfKD8YUuwthAx7mSEI/qkPpKPi/kMcGdQrmGdeehM4IC1NtBmUpp2wUE8phUZampKsburEDy0KPkyQDYwT7WZ0wq5VSXDvp75YU9HFvlRd8Tx6q6fE8YQcHNVXAkiY9q6d+xo0rKwT38xVqr7ZD0u0iPPkUL64lIZbqBAz+scqKmlzm8FDrypNC9Yjc8fPOLn9FX9KSYvKTr4rvx3iSIlTJabIQwj2ICCR/oLxBA==" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z", "omit_session_token": true } header-canonical-request.txt000066400000000000000000000002201456575232400376560ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-sts-header-afterPOST / host:example.amazonaws.com x-amz-date:20150830T123600Z host;x-amz-date e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000001001456575232400362370ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-sts-header-after5da7c1a2acd57cee7505fc6676e4e544621c30862966e37dddb68e92efbe5d6bheader-signed-request.txt000066400000000000000000000011671456575232400372130ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-sts-header-afterPOST / HTTP/1.1 Host:example.amazonaws.com X-Amz-Security-Token:AQoDYXdzEPT//////////wEXAMPLEtc764bNrC9SAPBSM22wDOk4x4HIZ8j4FZTwdQWLWsKWHGBuFqwAeMicRXmxfpSPfIeoIYRqTflfKD8YUuwthAx7mSEI/qkPpKPi/kMcGdQrmGdeehM4IC1NtBmUpp2wUE8phUZampKsburEDy0KPkyQDYwT7WZ0wq5VSXDvp75YU9HFvlRd8Tx6q6fE8YQcHNVXAkiY9q6d+xo0rKwT38xVqr7ZD0u0iPPkUL64lIZbqBAz+scqKmlzm8FDrypNC9Yjc8fPOLn9FX9KSYvKTr4rvx3iSIlTJabIQwj2ICCR/oLxBA== X-Amz-Date:20150830T123600Z Authorization:AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/service/aws4_request, SignedHeaders=host;x-amz-date, Signature=5da7c1a2acd57cee7505fc6676e4e544621c30862966e37dddb68e92efbe5d6b header-string-to-sign.txt000066400000000000000000000002121456575232400371260ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-sts-header-afterAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request 553f88c9e4d10fc9e109e2aeb65f030801b70c2f6468faca261d401ae622fc87query-canonical-request.txt000066400000000000000000000004361456575232400376040ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-sts-header-afterPOST / X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000001001456575232400361540ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-sts-header-after2ce6e6d2e0cf2f9d1b55fafec88cd20574c31dc2e7631979f71ba2310083e95bquery-signed-request.txt000066400000000000000000000012731456575232400371260ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-sts-header-afterPOST /?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Security-Token=AQoDYXdzEPT%2F%2F%2F%2F%2F%2F%2F%2F%2F%2FwEXAMPLEtc764bNrC9SAPBSM22wDOk4x4HIZ8j4FZTwdQWLWsKWHGBuFqwAeMicRXmxfpSPfIeoIYRqTflfKD8YUuwthAx7mSEI%2FqkPpKPi%2FkMcGdQrmGdeehM4IC1NtBmUpp2wUE8phUZampKsburEDy0KPkyQDYwT7WZ0wq5VSXDvp75YU9HFvlRd8Tx6q6fE8YQcHNVXAkiY9q6d%2Bxo0rKwT38xVqr7ZD0u0iPPkUL64lIZbqBAz%2BscqKmlzm8FDrypNC9Yjc8fPOLn9FX9KSYvKTr4rvx3iSIlTJabIQwj2ICCR%2FoLxBA%3D%3D&X-Amz-Signature=2ce6e6d2e0cf2f9d1b55fafec88cd20574c31dc2e7631979f71ba2310083e95b HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002121456575232400370430ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-sts-header-afterAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request d27fc6fe1afc1d88b248c5ae9194ec0943a693dd6d81d8d815c88a369eb0471erequest.txt000066400000000000000000000000531456575232400345070ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-sts-header-afterPOST / HTTP/1.1 Host:example.amazonaws.com aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-sts-header-before/000077500000000000000000000000001456575232400325005ustar00rootroot00000000000000context.json000066400000000000000000000013051456575232400347770ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-sts-header-before{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY", "token": "AQoDYXdzEPT//////////wEXAMPLEtc764bNrC9SAPBSM22wDOk4x4HIZ8j4FZTwdQWLWsKWHGBuFqwAeMicRXmxfpSPfIeoIYRqTflfKD8YUuwthAx7mSEI/qkPpKPi/kMcGdQrmGdeehM4IC1NtBmUpp2wUE8phUZampKsburEDy0KPkyQDYwT7WZ0wq5VSXDvp75YU9HFvlRd8Tx6q6fE8YQcHNVXAkiY9q6d+xo0rKwT38xVqr7ZD0u0iPPkUL64lIZbqBAz+scqKmlzm8FDrypNC9Yjc8fPOLn9FX9KSYvKTr4rvx3iSIlTJabIQwj2ICCR/oLxBA==" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z", "omit_session_token": false } header-canonical-request.txt000066400000000000000000000010131456575232400400200ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-sts-header-beforePOST / host:example.amazonaws.com x-amz-date:20150830T123600Z x-amz-security-token:AQoDYXdzEPT//////////wEXAMPLEtc764bNrC9SAPBSM22wDOk4x4HIZ8j4FZTwdQWLWsKWHGBuFqwAeMicRXmxfpSPfIeoIYRqTflfKD8YUuwthAx7mSEI/qkPpKPi/kMcGdQrmGdeehM4IC1NtBmUpp2wUE8phUZampKsburEDy0KPkyQDYwT7WZ0wq5VSXDvp75YU9HFvlRd8Tx6q6fE8YQcHNVXAkiY9q6d+xo0rKwT38xVqr7ZD0u0iPPkUL64lIZbqBAz+scqKmlzm8FDrypNC9Yjc8fPOLn9FX9KSYvKTr4rvx3iSIlTJabIQwj2ICCR/oLxBA== host;x-amz-date;x-amz-security-token e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000001001456575232400364000ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-sts-header-before85d96828115b5dc0cfc3bd16ad9e210dd772bbebba041836c64533a82be05eadheader-signed-request.txt000066400000000000000000000012141456575232400373450ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-sts-header-beforePOST / HTTP/1.1 Host:example.amazonaws.com X-Amz-Security-Token:AQoDYXdzEPT//////////wEXAMPLEtc764bNrC9SAPBSM22wDOk4x4HIZ8j4FZTwdQWLWsKWHGBuFqwAeMicRXmxfpSPfIeoIYRqTflfKD8YUuwthAx7mSEI/qkPpKPi/kMcGdQrmGdeehM4IC1NtBmUpp2wUE8phUZampKsburEDy0KPkyQDYwT7WZ0wq5VSXDvp75YU9HFvlRd8Tx6q6fE8YQcHNVXAkiY9q6d+xo0rKwT38xVqr7ZD0u0iPPkUL64lIZbqBAz+scqKmlzm8FDrypNC9Yjc8fPOLn9FX9KSYvKTr4rvx3iSIlTJabIQwj2ICCR/oLxBA== X-Amz-Date:20150830T123600Z Authorization:AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/service/aws4_request, SignedHeaders=host;x-amz-date;x-amz-security-token, Signature=85d96828115b5dc0cfc3bd16ad9e210dd772bbebba041836c64533a82be05ead header-string-to-sign.txt000066400000000000000000000002121456575232400372670ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-sts-header-beforeAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request c237e1b440d4c63c32ca95b5b99481081cb7b13c7e40434868e71567c1a882f6query-canonical-request.txt000066400000000000000000000012461456575232400377450ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-sts-header-beforePOST / X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-Security-Token=AQoDYXdzEPT%2F%2F%2F%2F%2F%2F%2F%2F%2F%2FwEXAMPLEtc764bNrC9SAPBSM22wDOk4x4HIZ8j4FZTwdQWLWsKWHGBuFqwAeMicRXmxfpSPfIeoIYRqTflfKD8YUuwthAx7mSEI%2FqkPpKPi%2FkMcGdQrmGdeehM4IC1NtBmUpp2wUE8phUZampKsburEDy0KPkyQDYwT7WZ0wq5VSXDvp75YU9HFvlRd8Tx6q6fE8YQcHNVXAkiY9q6d%2Bxo0rKwT38xVqr7ZD0u0iPPkUL64lIZbqBAz%2BscqKmlzm8FDrypNC9Yjc8fPOLn9FX9KSYvKTr4rvx3iSIlTJabIQwj2ICCR%2FoLxBA%3D%3D&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000001001456575232400363150ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-sts-header-before46c2e3c63c1dbe7d39f8ada16fe7f001c1f56c5791441565323677f96308871cquery-signed-request.txt000066400000000000000000000012731456575232400372670ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-sts-header-beforePOST /?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Security-Token=AQoDYXdzEPT%2F%2F%2F%2F%2F%2F%2F%2F%2F%2FwEXAMPLEtc764bNrC9SAPBSM22wDOk4x4HIZ8j4FZTwdQWLWsKWHGBuFqwAeMicRXmxfpSPfIeoIYRqTflfKD8YUuwthAx7mSEI%2FqkPpKPi%2FkMcGdQrmGdeehM4IC1NtBmUpp2wUE8phUZampKsburEDy0KPkyQDYwT7WZ0wq5VSXDvp75YU9HFvlRd8Tx6q6fE8YQcHNVXAkiY9q6d%2Bxo0rKwT38xVqr7ZD0u0iPPkUL64lIZbqBAz%2BscqKmlzm8FDrypNC9Yjc8fPOLn9FX9KSYvKTr4rvx3iSIlTJabIQwj2ICCR%2FoLxBA%3D%3D&X-Amz-Signature=46c2e3c63c1dbe7d39f8ada16fe7f001c1f56c5791441565323677f96308871c HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002121456575232400372040ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-sts-header-beforeAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request 37f7ba7a2ec89e8b29a0d637bcc70d8e8d0fdba854ddfc5bc06a12398cfe8605request.txt000066400000000000000000000000531456575232400346500ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-sts-header-beforePOST / HTTP/1.1 Host:example.amazonaws.com post-vanilla-empty-query-value/000077500000000000000000000000001456575232400341615ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4context.json000066400000000000000000000004761456575232400365470ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-vanilla-empty-query-value{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000002351456575232400415650ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-vanilla-empty-query-valuePOST / Param1=value1 host:example.amazonaws.com x-amz-date:20150830T123600Z host;x-amz-date e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000001001456575232400401400ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-vanilla-empty-query-value28038455d6de14eafc1f9222cf5aa6f1a96197d7deb8263271d420d138af7f11header-signed-request.txt000066400000000000000000000004371456575232400411130ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-vanilla-empty-query-valuePOST /?Param1=value1 HTTP/1.1 Host:example.amazonaws.com X-Amz-Date:20150830T123600Z Authorization:AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/service/aws4_request, SignedHeaders=host;x-amz-date, Signature=28038455d6de14eafc1f9222cf5aa6f1a96197d7deb8263271d420d138af7f11 header-string-to-sign.txt000066400000000000000000000002121456575232400410270ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-vanilla-empty-query-valueAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request 9d659678c1756bb3113e2ce898845a0a79dbbc57b740555917687f1b3340fbbdquery-canonical-request.txt000066400000000000000000000004541456575232400415050ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-vanilla-empty-query-valuePOST / Param1=value1&X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000001001456575232400400550ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-vanilla-empty-query-valuea9ef26247f293bffe40eeff86a0480ba8c14d31503b2e0c3399e3a16bf1a3682query-signed-request.txt000066400000000000000000000005011456575232400410200ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-vanilla-empty-query-valuePOST /?Param1=value1&X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Signature=a9ef26247f293bffe40eeff86a0480ba8c14d31503b2e0c3399e3a16bf1a3682 HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002121456575232400407440ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-vanilla-empty-query-valueAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request 98241d1c6f938789d34b92e16e08b7fc8fdaa774c5437ba30d505eedae43dd2brequest.txt000066400000000000000000000000711456575232400364100ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-vanilla-empty-query-valuePOST /?Param1=value1 HTTP/1.1 Host:example.amazonaws.com aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-vanilla-query/000077500000000000000000000000001456575232400317725ustar00rootroot00000000000000context.json000066400000000000000000000004761456575232400343010ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-vanilla-query{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000002351456575232400373170ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-vanilla-queryPOST / Param1=value1 host:example.amazonaws.com x-amz-date:20150830T123600Z host;x-amz-date e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000001001456575232400356720ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-vanilla-query28038455d6de14eafc1f9222cf5aa6f1a96197d7deb8263271d420d138af7f11header-signed-request.txt000066400000000000000000000004371456575232400366450ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-vanilla-queryPOST /?Param1=value1 HTTP/1.1 Host:example.amazonaws.com X-Amz-Date:20150830T123600Z Authorization:AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/service/aws4_request, SignedHeaders=host;x-amz-date, Signature=28038455d6de14eafc1f9222cf5aa6f1a96197d7deb8263271d420d138af7f11 header-string-to-sign.txt000066400000000000000000000002121456575232400365610ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-vanilla-queryAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request 9d659678c1756bb3113e2ce898845a0a79dbbc57b740555917687f1b3340fbbdquery-canonical-request.txt000066400000000000000000000004541456575232400372370ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-vanilla-queryPOST / Param1=value1&X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000001001456575232400356070ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-vanilla-querya9ef26247f293bffe40eeff86a0480ba8c14d31503b2e0c3399e3a16bf1a3682query-signed-request.txt000066400000000000000000000005011456575232400365520ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-vanilla-queryPOST /?Param1=value1&X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Signature=a9ef26247f293bffe40eeff86a0480ba8c14d31503b2e0c3399e3a16bf1a3682 HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002121456575232400364760ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-vanilla-queryAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request 98241d1c6f938789d34b92e16e08b7fc8fdaa774c5437ba30d505eedae43dd2brequest.txt000066400000000000000000000000711456575232400341420ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-vanilla-queryPOST /?Param1=value1 HTTP/1.1 Host:example.amazonaws.com aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-vanilla/000077500000000000000000000000001456575232400306275ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-vanilla/context.json000066400000000000000000000004761456575232400332150ustar00rootroot00000000000000{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000002201456575232400361460ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-vanillaPOST / host:example.amazonaws.com x-amz-date:20150830T123600Z host;x-amz-date e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000001001456575232400345270ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-vanilla5da7c1a2acd57cee7505fc6676e4e544621c30862966e37dddb68e92efbe5d6bheader-signed-request.txt000066400000000000000000000004211456575232400354730ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-vanillaPOST / HTTP/1.1 Host:example.amazonaws.com X-Amz-Date:20150830T123600Z Authorization:AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/service/aws4_request, SignedHeaders=host;x-amz-date, Signature=5da7c1a2acd57cee7505fc6676e4e544621c30862966e37dddb68e92efbe5d6b header-string-to-sign.txt000066400000000000000000000002121456575232400354160ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-vanillaAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request 553f88c9e4d10fc9e109e2aeb65f030801b70c2f6468faca261d401ae622fc87query-canonical-request.txt000066400000000000000000000004361456575232400360740ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-vanillaPOST / X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000001001456575232400344440ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-vanilla2ce6e6d2e0cf2f9d1b55fafec88cd20574c31dc2e7631979f71ba2310083e95bquery-signed-request.txt000066400000000000000000000004631456575232400354160ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-vanillaPOST /?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Signature=2ce6e6d2e0cf2f9d1b55fafec88cd20574c31dc2e7631979f71ba2310083e95b HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002121456575232400353330ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-vanillaAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request d27fc6fe1afc1d88b248c5ae9194ec0943a693dd6d81d8d815c88a369eb0471eaws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-vanilla/request.txt000066400000000000000000000000531456575232400330560ustar00rootroot00000000000000POST / HTTP/1.1 Host:example.amazonaws.com post-x-www-form-urlencoded-parameters/000077500000000000000000000000001456575232400354375ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4context.json000066400000000000000000000004751456575232400400240ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-x-www-form-urlencoded-parameters{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": true, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000005471456575232400430510ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-x-www-form-urlencoded-parametersPOST / content-length:13 content-type:application/x-www-form-urlencoded; charset=utf-8 host:example.amazonaws.com x-amz-content-sha256:9095672bbd1f56dfc5b65f3e153adc8731a4a654192329106275f4c7b24d0b6e x-amz-date:20150830T123600Z content-length;content-type;host;x-amz-content-sha256;x-amz-date 9095672bbd1f56dfc5b65f3e153adc8731a4a654192329106275f4c7b24d0b6eheader-signature.txt000066400000000000000000000001001456575232400414160ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-x-www-form-urlencoded-parameters328d1b9eaadca9f5818ef05e8392801e091653bafec24fcab71e7344e7f51422header-signed-request.txt000066400000000000000000000007651456575232400423750ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-x-www-form-urlencoded-parametersPOST / HTTP/1.1 Content-Type:application/x-www-form-urlencoded; charset=utf-8 Host:example.amazonaws.com Content-Length:13 X-Amz-Date:20150830T123600Z x-amz-content-sha256:9095672bbd1f56dfc5b65f3e153adc8731a4a654192329106275f4c7b24d0b6e Authorization:AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/service/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-content-sha256;x-amz-date, Signature=328d1b9eaadca9f5818ef05e8392801e091653bafec24fcab71e7344e7f51422 Param1=value1header-string-to-sign.txt000066400000000000000000000002121456575232400423050ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-x-www-form-urlencoded-parametersAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request a89f1a5b53e37702ee6363ce1da3ce8f54386f3c8f352ae652153c2982a0bc4dquery-canonical-request.txt000066400000000000000000000006521456575232400427630ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-x-www-form-urlencoded-parametersPOST / X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=content-length%3Bcontent-type%3Bhost content-length:13 content-type:application/x-www-form-urlencoded; charset=utf-8 host:example.amazonaws.com content-length;content-type;host 9095672bbd1f56dfc5b65f3e153adc8731a4a654192329106275f4c7b24d0b6equery-signature.txt000066400000000000000000000001001456575232400413330ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-x-www-form-urlencoded-parameters0dbeb9b026c7b6675f266b8427efec9b4fa8b1f6ef1477d717aea231106eab4dquery-signed-request.txt000066400000000000000000000006601456575232400423040ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-x-www-form-urlencoded-parametersPOST /?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=content-length%3Bcontent-type%3Bhost&X-Amz-Expires=3600&X-Amz-Signature=0dbeb9b026c7b6675f266b8427efec9b4fa8b1f6ef1477d717aea231106eab4d HTTP/1.1 Content-Type:application/x-www-form-urlencoded; charset=utf-8 Host:example.amazonaws.com Content-Length:13 Param1=value1query-string-to-sign.txt000066400000000000000000000002121456575232400422220ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-x-www-form-urlencoded-parametersAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request 32192353b8ae6d857fac1c71cda066e6af2d3a3127ea7ac1fee22621ab4f05dcrequest.txt000066400000000000000000000002111456575232400376620ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-x-www-form-urlencoded-parametersPOST / HTTP/1.1 Content-Type:application/x-www-form-urlencoded; charset=utf-8 Host:example.amazonaws.com Content-Length:13 Param1=value1post-x-www-form-urlencoded/000077500000000000000000000000001456575232400332765ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4context.json000066400000000000000000000004751456575232400356630ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-x-www-form-urlencoded{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": true, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000005301456575232400407000ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-x-www-form-urlencodedPOST / content-length:13 content-type:application/x-www-form-urlencoded host:example.amazonaws.com x-amz-content-sha256:9095672bbd1f56dfc5b65f3e153adc8731a4a654192329106275f4c7b24d0b6e x-amz-date:20150830T123600Z content-length;content-type;host;x-amz-content-sha256;x-amz-date 9095672bbd1f56dfc5b65f3e153adc8731a4a654192329106275f4c7b24d0b6eheader-signature.txt000066400000000000000000000001001456575232400372550ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-x-www-form-urlencodedd3875051da38690788ef43de4db0d8f280229d82040bfac253562e56c3f20e0bheader-signed-request.txt000066400000000000000000000007461456575232400402330ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-x-www-form-urlencodedPOST / HTTP/1.1 Content-Type:application/x-www-form-urlencoded Host:example.amazonaws.com Content-Length:13 X-Amz-Date:20150830T123600Z x-amz-content-sha256:9095672bbd1f56dfc5b65f3e153adc8731a4a654192329106275f4c7b24d0b6e Authorization:AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/service/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-content-sha256;x-amz-date, Signature=d3875051da38690788ef43de4db0d8f280229d82040bfac253562e56c3f20e0b Param1=value1header-string-to-sign.txt000066400000000000000000000002121456575232400401440ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-x-www-form-urlencodedAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request b1edd1d03544c25390e32085d55b57acc9a3961bb59415ff86c45c3d89d16cfbquery-canonical-request.txt000066400000000000000000000006331456575232400406210ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-x-www-form-urlencodedPOST / X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=content-length%3Bcontent-type%3Bhost content-length:13 content-type:application/x-www-form-urlencoded host:example.amazonaws.com content-length;content-type;host 9095672bbd1f56dfc5b65f3e153adc8731a4a654192329106275f4c7b24d0b6equery-signature.txt000066400000000000000000000001001456575232400371720ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-x-www-form-urlencoded89a40deed0f26f9461242825a082d2222717248abc7ab41f552ad84a94ad46e9query-signed-request.txt000066400000000000000000000006411456575232400401420ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-x-www-form-urlencodedPOST /?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fus-east-1%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=content-length%3Bcontent-type%3Bhost&X-Amz-Expires=3600&X-Amz-Signature=89a40deed0f26f9461242825a082d2222717248abc7ab41f552ad84a94ad46e9 HTTP/1.1 Content-Type:application/x-www-form-urlencoded Host:example.amazonaws.com Content-Length:13 Param1=value1query-string-to-sign.txt000066400000000000000000000002121456575232400400610ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-x-www-form-urlencodedAWS4-HMAC-SHA256 20150830T123600Z 20150830/us-east-1/service/aws4_request ee5059a7c437165a28d0e775e6498be428761255d657d8c04cb1baa41de6514crequest.txt000066400000000000000000000001721456575232400355270ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4/post-x-www-form-urlencodedPOST / HTTP/1.1 Content-Type:application/x-www-form-urlencoded Host:example.amazonaws.com Content-Length:13 Param1=value1aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/000077500000000000000000000000001456575232400263575ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-header-key-duplicate/000077500000000000000000000000001456575232400331225ustar00rootroot00000000000000context.json000066400000000000000000000004761456575232400354310ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-header-key-duplicate{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000003461456575232400404520ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-header-key-duplicateGET / host:example.amazonaws.com my-header1:value2,value2,value1 x-amz-date:20150830T123600Z x-amz-region-set:us-east-1 host;my-header1;x-amz-date;x-amz-region-set e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000002161456575232400370320ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-header-key-duplicate304502204862ad283a21f883fc12f1156a6f3fcdbba13d1847e58aa5eb37c666477ea06b022100ee439fac0a975c9a6605b1fa44ad7b654a1f8ac6e868e4e1069a1b3aa35d8113header-signed-request.txt000066400000000000000000000007071456575232400377750ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-header-key-duplicateGET / HTTP/1.1 Host:example.amazonaws.com My-Header1:value2 My-Header1:value2 My-Header1:value1 X-Amz-Date:20150830T123600Z X-Amz-Region-Set:us-east-1 Authorization:AWS4-ECDSA-P256-SHA256 Credential=AKIDEXAMPLE/20150830/service/aws4_request, SignedHeaders=host;my-header1;x-amz-date;x-amz-region-set, Signature=30450220331da6dfebb0d19e5e161b1efa389ccb83cadb60bc71f6791ef71ac6054c44de0221008588b7d5c9f7a79ca9c02a02efbd0f540cda242a64ca1452aa914e050b517724 header-string-to-sign.txt000066400000000000000000000002061456575232400377140ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-header-key-duplicateAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request 30f1f7b639b7fd5982a0f700e6d23bf7bb24f2f1d9e1314005bf22130da61cdfpublic-key.json000066400000000000000000000002271456575232400360030ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-header-key-duplicate{ "X":"b6618f6a65740a99e650b33b6b4b5bd0d43b176d721a3edfea7e7d2d56d936b1", "Y":"865ed22a7eadc9c5cb9d2cbaca1b3699139fedc5043dc6661864218330c8e518" } query-canonical-request.txt000066400000000000000000000005521456575232400403660ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-header-key-duplicateGET / X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-SignedHeaders=host%3Bmy-header1 host:example.amazonaws.com my-header1:value2,value2,value1 host;my-header1 e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000002161456575232400367470ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-header-key-duplicate3045022100e1e03fa80218cb464404dc4badbdf443f5f5bb504840a2b3803565a28be0388d022063399d4fb8b974906e87a8d67bf7e95334d9dc620817ef92eb2bc6affb1c6a10query-signed-request.txt000066400000000000000000000007301456575232400377060ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-header-key-duplicateGET /?X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host%3Bmy-header1&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-Signature=3045022100b6fa5712e476c23f411995e85747f62e9a25bdd718aac8692b3744dba106e1720220331a1e84a25e69c1ef07e63b3b76e1e78100f2ee020a3e66f127240901468912 HTTP/1.1 Host:example.amazonaws.com My-Header1:value2 My-Header1:value2 My-Header1:value1 query-string-to-sign.txt000066400000000000000000000002061456575232400376310ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-header-key-duplicateAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request a6e21a0099c98cbb4ec73928a08e8b116dfd634c471a8c03c4007b5258b664earequest.txt000066400000000000000000000001401456575232400352670ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-header-key-duplicateGET / HTTP/1.1 Host:example.amazonaws.com My-Header1:value2 My-Header1:value2 My-Header1:value1 get-header-value-multiline/000077500000000000000000000000001456575232400334175ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4acontext.json000066400000000000000000000004761456575232400360050ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-header-value-multiline{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000003461456575232400410260ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-header-value-multilineGET / host:example.amazonaws.com my-header1:value1 value2 value3 x-amz-date:20150830T123600Z x-amz-region-set:us-east-1 host;my-header1;x-amz-date;x-amz-region-set e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000002161456575232400374060ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-header-value-multiline304502206a2d7c3572ebcb9a1f34e5fa744250c3fb2f403a0a7e4b0bc196286846a996a8022100d94aacba36ff453394df27966e9cccd7c6065457b6fe828aebe107307cc5e8e9header-signed-request.txt000066400000000000000000000006701456575232400403500ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-header-value-multilineGET / HTTP/1.1 Host:example.amazonaws.com My-Header1:value1 value2 value3 X-Amz-Date:20150830T123600Z X-Amz-Region-Set:us-east-1 Authorization:AWS4-ECDSA-P256-SHA256 Credential=AKIDEXAMPLE/20150830/service/aws4_request, SignedHeaders=host;my-header1;x-amz-date;x-amz-region-set, Signature=3045022060b9f2f480a395bf34aa42074697f923c2355e26970987461ce904a6a2eeef52022100fc7ef73838bbfba208a8cf8f3edbe1c8879be7853b677f492db93c30df6fbe02 header-string-to-sign.txt000066400000000000000000000002061456575232400402700ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-header-value-multilineAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request 3579d4751dd7db30860b89a17b53647c70fd8363ec485836dbf68cfd22313398public-key.json000066400000000000000000000002271456575232400363570ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-header-value-multiline{ "X":"b6618f6a65740a99e650b33b6b4b5bd0d43b176d721a3edfea7e7d2d56d936b1", "Y":"865ed22a7eadc9c5cb9d2cbaca1b3699139fedc5043dc6661864218330c8e518" } query-canonical-request.txt000066400000000000000000000005521456575232400407420ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-header-value-multilineGET / X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-SignedHeaders=host%3Bmy-header1 host:example.amazonaws.com my-header1:value1 value2 value3 host;my-header1 e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000002161456575232400373230ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-header-value-multiline30450220159829015456fd111a27c4e0d6a555da894aefd23c96c5eb915b94bf6788499a022100fd48a0b97d1c32a11ee33873f87953a3ef410978f226b4a708acb9e582828fdequery-signed-request.txt000066400000000000000000000007111456575232400402610ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-header-value-multilineGET /?X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host%3Bmy-header1&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-Signature=3045022039c91f52792bd02ebcd8959326e022c60a8a7f4febed58dbea23cb295df8dc9e022100b95d4fcdd93090a54badb5ccb0ec5d38e7b20ff81204f48ceda327b9b155c14d HTTP/1.1 Host:example.amazonaws.com My-Header1:value1 value2 value3 query-string-to-sign.txt000066400000000000000000000002061456575232400402050ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-header-value-multilineAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request 960c3db8a6ce71c62c4bc13e5f2a72231a8f1a644d52e688ac1524ecd4b09643request.txt000066400000000000000000000001211456575232400356420ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-header-value-multilineGET / HTTP/1.1 Host:example.amazonaws.com My-Header1:value1 value2 value3 aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-header-value-order/000077500000000000000000000000001456575232400326075ustar00rootroot00000000000000context.json000066400000000000000000000004761456575232400351160ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-header-value-order{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000003551456575232400401370ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-header-value-orderGET / host:example.amazonaws.com my-header1:value4,value1,value3,value2 x-amz-date:20150830T123600Z x-amz-region-set:us-east-1 host;my-header1;x-amz-date;x-amz-region-set e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000002161456575232400365170ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-header-value-order3045022005e72f83b232f1fa01d7344267d44a46fa4c193bc3d0ad256e89f92a561eb8fa022100c62a85a22a6018463df69c97da07cd189185dadeede293c659e15514cd28a0c9header-signed-request.txt000066400000000000000000000007271456575232400374640ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-header-value-orderGET / HTTP/1.1 Host:example.amazonaws.com My-Header1:value4 My-Header1:value1 My-Header1:value3 My-Header1:value2 X-Amz-Date:20150830T123600Z X-Amz-Region-Set:us-east-1 Authorization:AWS4-ECDSA-P256-SHA256 Credential=AKIDEXAMPLE/20150830/service/aws4_request, SignedHeaders=host;my-header1;x-amz-date;x-amz-region-set, Signature=304402203410301a4cfc805996d0ac5305374d7cdff4e2564fcbd6f6cfed73b227966046022046e36b2ede1f78d2b68ea4534ae59da3de089e58f67ae08490411dfabd77f36e header-string-to-sign.txt000066400000000000000000000002061456575232400374010ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-header-value-orderAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request e1c3b5d34632ffff080330b3bc31906c8988bf1683f4af689ef3f1811952df36public-key.json000066400000000000000000000002271456575232400354700ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-header-value-order{ "X":"b6618f6a65740a99e650b33b6b4b5bd0d43b176d721a3edfea7e7d2d56d936b1", "Y":"865ed22a7eadc9c5cb9d2cbaca1b3699139fedc5043dc6661864218330c8e518" } query-canonical-request.txt000066400000000000000000000005611456575232400400530ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-header-value-orderGET / X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-SignedHeaders=host%3Bmy-header1 host:example.amazonaws.com my-header1:value4,value1,value3,value2 host;my-header1 e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000002161456575232400364340ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-header-value-order3045022044e0c722f2d02673c326d0ac38aae57e2877e158b700de702e5dd082be680f65022100841bb5007fb5ece1078add2bf38e84f1b7ea13e67b935d1fd021014eedee46a4query-signed-request.txt000066400000000000000000000007501456575232400373750ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-header-value-orderGET /?X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host%3Bmy-header1&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-Signature=304402200c9f3470f63b573d7c7b8838d3acd8c527e4e5c3b990050c786f074107b12cf70220021b788568d7947e6ad6044e7cc6488dcdb4ff08f15e162ebd2537cc292ed168 HTTP/1.1 Host:example.amazonaws.com My-Header1:value4 My-Header1:value1 My-Header1:value3 My-Header1:value2 query-string-to-sign.txt000066400000000000000000000002061456575232400373160ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-header-value-orderAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request e2bf35ea25a1943bf52cfc8348c787db8fd8ca642dc9f2b9443939c2fb0d3c54request.txt000066400000000000000000000001621456575232400347600ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-header-value-orderGET / HTTP/1.1 Host:example.amazonaws.com My-Header1:value4 My-Header1:value1 My-Header1:value3 My-Header1:value2 aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-header-value-trim/000077500000000000000000000000001456575232400324475ustar00rootroot00000000000000context.json000066400000000000000000000004761456575232400347560ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-header-value-trim{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000003661456575232400400010ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-header-value-trimGET / host:example.amazonaws.com my-header1:value1 my-header2:"a b c" x-amz-date:20150830T123600Z x-amz-region-set:us-east-1 host;my-header1;my-header2;x-amz-date;x-amz-region-set e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000002201456575232400363520ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-header-value-trim3046022100f05cfb8a9a49ee4540c49ed27e612ddd1a2d168512bad65a96cdf8378254a4bd022100a3eb663e6734125d467fc00d5a41dc6bf476ec167d5c20657ff3135c8b4f7815header-signed-request.txt000066400000000000000000000007111456575232400373150ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-header-value-trimGET / HTTP/1.1 Host:example.amazonaws.com My-Header1: value1 My-Header2: "a b c" X-Amz-Date:20150830T123600Z X-Amz-Region-Set:us-east-1 Authorization:AWS4-ECDSA-P256-SHA256 Credential=AKIDEXAMPLE/20150830/service/aws4_request, SignedHeaders=host;my-header1;my-header2;x-amz-date;x-amz-region-set, Signature=3046022100ce72c874a80cea45e3c5b7cbe76178f8577870e1f97ee1730f57cff45c3d398b022100e4ea7b870335abab5ca57fa740d20b3c5aba2739cf96dd7fa671fc11cf6c3341 header-string-to-sign.txt000066400000000000000000000002061456575232400372410ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-header-value-trimAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request 18b43001be9b531ebdd8202144dbd7630ea8a35bc328a7d0e561dda03a876095public-key.json000066400000000000000000000002271456575232400353300ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-header-value-trim{ "X":"b6618f6a65740a99e650b33b6b4b5bd0d43b176d721a3edfea7e7d2d56d936b1", "Y":"865ed22a7eadc9c5cb9d2cbaca1b3699139fedc5043dc6661864218330c8e518" } query-canonical-request.txt000066400000000000000000000006071456575232400377140ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-header-value-trimGET / X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-SignedHeaders=host%3Bmy-header1%3Bmy-header2 host:example.amazonaws.com my-header1:value1 my-header2:"a b c" host;my-header1;my-header2 e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000002161456575232400362740ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-header-value-trim3045022100a765fa4e0a2c5d56292cc9aa654332858bcbb2af27b8c488157113f2c084776b02206145afa6dbb99993e499863fa36e592b94ddaeefbd778a473348204f26c15547query-signed-request.txt000066400000000000000000000007321456575232400372350ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-header-value-trimGET /?X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host%3Bmy-header1%3Bmy-header2&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-Signature=304502210085dc6b963562f7d443e0c35819c752a39ab744a6a8820ec4718a2aaf6d9acbae02202053d5ec6fcd892c09ce485b169b94590f805a05348ade664bbe3e06a7c62edb HTTP/1.1 Host:example.amazonaws.com My-Header1: value1 My-Header2: "a b c" query-string-to-sign.txt000066400000000000000000000002061456575232400371560ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-header-value-trimAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request 4f92ebcf5f0844588e443a2243fafdb64319c6d1ad913c07686129b9991326a3request.txt000066400000000000000000000001251456575232400346170ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-header-value-trimGET / HTTP/1.1 Host:example.amazonaws.com My-Header1: value1 My-Header2: "a b c" aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-relative-normalized/000077500000000000000000000000001456575232400331115ustar00rootroot00000000000000context.json000066400000000000000000000004761456575232400354200ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-relative-normalized{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000002731456575232400404400ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-relative-normalizedGET / host:example.amazonaws.com x-amz-date:20150830T123600Z x-amz-region-set:us-east-1 host;x-amz-date;x-amz-region-set e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000002161456575232400370210ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-relative-normalized3045022002e5e766e09de198957182589c9abe3e7ccd00a8fc6d7bbaf5f0a2c10660d16a022100e6b0835731cd5b72d5abb50e5821d90f5f713339697eb65cf40d34a3facfdbb8header-signed-request.txt000066400000000000000000000006161456575232400377630ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-relative-normalizedGET /example/.. HTTP/1.1 Host:example.amazonaws.com X-Amz-Date:20150830T123600Z X-Amz-Region-Set:us-east-1 Authorization:AWS4-ECDSA-P256-SHA256 Credential=AKIDEXAMPLE/20150830/service/aws4_request, SignedHeaders=host;x-amz-date;x-amz-region-set, Signature=304402206ccd591952cdb4a0a002217788234c80eac520f890b0a6ea07b98044f08ba10e0220733126caf0347f7df89773f79b9fae2648ca786e67a7d88ec296d66452da4bec header-string-to-sign.txt000066400000000000000000000002061456575232400377030ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-relative-normalizedAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request cf59db423e841c8b7e3444158185aa261b724a5c27cbe762676f3eed19f4dc02public-key.json000066400000000000000000000002271456575232400357720ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-relative-normalized{ "X":"b6618f6a65740a99e650b33b6b4b5bd0d43b176d721a3edfea7e7d2d56d936b1", "Y":"865ed22a7eadc9c5cb9d2cbaca1b3699139fedc5043dc6661864218330c8e518" } query-canonical-request.txt000066400000000000000000000004621456575232400403550ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-relative-normalizedGET / X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000002141456575232400367340ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-relative-normalized304402200ef7c6ac7bfaaddbf4df3a4b03ab2e4e24e992d305fa133ef0f6c826ad9850e602203ef21f22e239cd1971310c404c80d6e50c5cd06eae1452695ae762d1a9a51c90query-signed-request.txt000066400000000000000000000006371456575232400377030ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-relative-normalizedGET /example/..?X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-Signature=3045022100be82e63cce1729590924a36bd6902036fe392d1b6196cffc2bf9620c8c5488b40220029a31d7cbb60816e40c0a00a53520db7d7928296b4236c2d0ada59c08b85bc4 HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002061456575232400376200ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-relative-normalizedAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request 890c4ed28c1a1ac10b5862719b537afbe392e987dc1aab1efa16fe7de41d3c81request.txt000066400000000000000000000000641456575232400352630ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-relative-normalizedGET /example/.. HTTP/1.1 Host:example.amazonaws.com get-relative-relative-normalized/000077500000000000000000000000001456575232400346435ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4acontext.json000066400000000000000000000004761456575232400372310ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-relative-relative-normalized{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000002731456575232400422510ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-relative-relative-normalizedGET / host:example.amazonaws.com x-amz-date:20150830T123600Z x-amz-region-set:us-east-1 host;x-amz-date;x-amz-region-set e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000002201456575232400406250ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-relative-relative-normalized3046022100ea029ea1e7b8715f67cd33b8536e013e53e9c7bcd109770f29a4a28767bb2212022100c95a298283c130266a4c8457590d8c047205b4aef9266b85deeb90e5c73e5caaheader-signed-request.txt000066400000000000000000000006331456575232400415730ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-relative-relative-normalizedGET /example1/example2/../.. HTTP/1.1 Host:example.amazonaws.com X-Amz-Date:20150830T123600Z X-Amz-Region-Set:us-east-1 Authorization:AWS4-ECDSA-P256-SHA256 Credential=AKIDEXAMPLE/20150830/service/aws4_request, SignedHeaders=host;x-amz-date;x-amz-region-set, Signature=304402203edb747d59fe32db47529b2e5e02c9276759b7c43ad9e36ff62d52b8dbeb00e3022007f99e8601938b893fe6dcbac9259fec2f3797b0e3d74fd66d59b09e83b8c1d4 header-string-to-sign.txt000066400000000000000000000002061456575232400415140ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-relative-relative-normalizedAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request cf59db423e841c8b7e3444158185aa261b724a5c27cbe762676f3eed19f4dc02public-key.json000066400000000000000000000002271456575232400376030ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-relative-relative-normalized{ "X":"b6618f6a65740a99e650b33b6b4b5bd0d43b176d721a3edfea7e7d2d56d936b1", "Y":"865ed22a7eadc9c5cb9d2cbaca1b3699139fedc5043dc6661864218330c8e518" } query-canonical-request.txt000066400000000000000000000004621456575232400421660ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-relative-relative-normalizedGET / X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000002141456575232400405450ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-relative-relative-normalized304402200afc56cdc43e814e44408999f2fb2fe0f8992877345b09e402b28fb2063e4987022035cdba28f5462e9034f45fc5f879bd2305943f9a1830b0f7fbe6468658ba52c1query-signed-request.txt000066400000000000000000000006541456575232400415130ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-relative-relative-normalizedGET /example1/example2/../..?X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-Signature=304502202eec946a96e06beaaf22963e2051d0f9200ffdcbe9e179761a0c324142cd5291022100af3d0044ff5a67452342aa457aa7d5646f5f834a9458b147ba4152bb658f0727 HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002061456575232400414310ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-relative-relative-normalizedAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request 890c4ed28c1a1ac10b5862719b537afbe392e987dc1aab1efa16fe7de41d3c81request.txt000066400000000000000000000001011456575232400370640ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-relative-relative-normalizedGET /example1/example2/../.. HTTP/1.1 Host:example.amazonaws.com get-relative-relative-unnormalized/000077500000000000000000000000001456575232400352065ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4acontext.json000066400000000000000000000004771456575232400375750ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-relative-relative-unnormalized{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": false, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000003221456575232400426070ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-relative-relative-unnormalizedGET /example1/example2/../.. host:example.amazonaws.com x-amz-date:20150830T123600Z x-amz-region-set:us-east-1 host;x-amz-date;x-amz-region-set e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000002161456575232400411750ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-relative-relative-unnormalized30450221009bf86ef8b148904a938fcc5a050aa99cb99cb6a3436a62badfef1b82977d95c4022053e3c11e10fb302a2c39c253b066530404ee5fcca63bf962facd8d072817a69dheader-signed-request.txt000066400000000000000000000006331456575232400421360ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-relative-relative-unnormalizedGET /example1/example2/../.. HTTP/1.1 Host:example.amazonaws.com X-Amz-Date:20150830T123600Z X-Amz-Region-Set:us-east-1 Authorization:AWS4-ECDSA-P256-SHA256 Credential=AKIDEXAMPLE/20150830/service/aws4_request, SignedHeaders=host;x-amz-date;x-amz-region-set, Signature=304402205651d45d4f377407303dba0775405efa77683821adff20c0256b94d10710de5b022053fa72420652c55327876c5105da6770cb482f55533e1b0bd7d15fa75f570e46 header-string-to-sign.txt000066400000000000000000000002061456575232400420570ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-relative-relative-unnormalizedAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request 7bad1fab948577ec4e860ff2bb06ce9b69f0dd60eb8a9ad7c016b584254f9b5bpublic-key.json000066400000000000000000000002271456575232400401460ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-relative-relative-unnormalized{ "X":"b6618f6a65740a99e650b33b6b4b5bd0d43b176d721a3edfea7e7d2d56d936b1", "Y":"865ed22a7eadc9c5cb9d2cbaca1b3699139fedc5043dc6661864218330c8e518" } query-canonical-request.txt000066400000000000000000000005111456575232400425240ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-relative-relative-unnormalizedGET /example1/example2/../.. X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000002201456575232400411050ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-relative-relative-unnormalized3046022100a11cd4cae39a6b3553651f9c6360898278d90a3cb8f736184ff6bdb8564decbc022100d86a34623cc34890750a7fe8a30948638f6dace0e558c3ff82a0cf6256067503query-signed-request.txt000066400000000000000000000006561456575232400420600ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-relative-relative-unnormalizedGET /example1/example2/../..?X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-Signature=3046022100dfbb77a9369c87af1e00ef8b4da3761849080372576a86e0d1ae0363b135f8f6022100fac80ba02cc33852349f6b19b8fc066724557186b1efe82421e73fb85bfbddef HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002061456575232400417740ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-relative-relative-unnormalizedAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request 504892d7f7262dd98d79ab7f3bb6f918cd59d491aacb2d76450f6e065479b31arequest.txt000066400000000000000000000001011456575232400374270ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-relative-relative-unnormalizedGET /example1/example2/../.. HTTP/1.1 Host:example.amazonaws.com get-relative-unnormalized/000077500000000000000000000000001456575232400333755ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4acontext.json000066400000000000000000000004771456575232400357640ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-relative-unnormalized{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": false, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000003051456575232400407770ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-relative-unnormalizedGET /example/.. host:example.amazonaws.com x-amz-date:20150830T123600Z x-amz-region-set:us-east-1 host;x-amz-date;x-amz-region-set e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000002161456575232400373640ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-relative-unnormalized3045022100a85167a37a2c6aea42ef9dcb0dcaeaf85ca4ef1a4d8ffea5be550dbd4573471902202193760e5e6bb1ee4aff836769b9739f4563e06749bcaa7553ec1d1377aa5a6dheader-signed-request.txt000066400000000000000000000006161456575232400403260ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-relative-unnormalizedGET /example/.. HTTP/1.1 Host:example.amazonaws.com X-Amz-Date:20150830T123600Z X-Amz-Region-Set:us-east-1 Authorization:AWS4-ECDSA-P256-SHA256 Credential=AKIDEXAMPLE/20150830/service/aws4_request, SignedHeaders=host;x-amz-date;x-amz-region-set, Signature=304402201430c834d8a89aca13ebd785da338950559f71ab29f1b7d7b4ad6b090de1a09702206c8d6089b75105d6fc5df5374bbb8abbc33b78951c95b90f9f6c25a657b58ce6 header-string-to-sign.txt000066400000000000000000000002061456575232400402460ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-relative-unnormalizedAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request c859b5032f2ebd5df0285ff633b495b0e6e962e5adb94731c95e8e993a9a8213public-key.json000066400000000000000000000002271456575232400363350ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-relative-unnormalized{ "X":"b6618f6a65740a99e650b33b6b4b5bd0d43b176d721a3edfea7e7d2d56d936b1", "Y":"865ed22a7eadc9c5cb9d2cbaca1b3699139fedc5043dc6661864218330c8e518" } query-canonical-request.txt000066400000000000000000000004741456575232400407230ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-relative-unnormalizedGET /example/.. X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000002161456575232400373010ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-relative-unnormalized3045022074d05647e395d4bf3d751953c7242ee27dd16fbae3f604a272d3acc3d72ba5fb022100ddccd95ac05f60f10d5e95c5847b49018bbfa1084361bf338964408cc2162fffquery-signed-request.txt000066400000000000000000000006371456575232400402460ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-relative-unnormalizedGET /example/..?X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-Signature=3045022029b3d53b5fdc8a5c0f769c2cf41221d06b6f88e941e92e11c47009b2f768908c022100b979fc4142576a3328bf28172fcd71d47556f06d90a55a2f19c7b0f9b946c7f2 HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002061456575232400401630ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-relative-unnormalizedAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request bbbb3668f996906ebb2c96ebdc2418af99656315adaf647989ab336c88fb516erequest.txt000066400000000000000000000000641456575232400356260ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-relative-unnormalizedGET /example/.. HTTP/1.1 Host:example.amazonaws.com get-slash-dot-slash-normalized/000077500000000000000000000000001456575232400342255ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4acontext.json000066400000000000000000000004761456575232400366130ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-dot-slash-normalized{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000002731456575232400416330ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-dot-slash-normalizedGET / host:example.amazonaws.com x-amz-date:20150830T123600Z x-amz-region-set:us-east-1 host;x-amz-date;x-amz-region-set e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000002161456575232400402140ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-dot-slash-normalized3045022100e8ed39e8b964e06162ab1cddbff57c449a9b72e2d052a7a3e12a116ae30185ee022003e6eb0fddbaa586225b26877ab145e05a08ac418ef7c966e5daef258b70cbfcheader-signed-request.txt000066400000000000000000000006101456575232400411500ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-dot-slash-normalizedGET /./ HTTP/1.1 Host:example.amazonaws.com X-Amz-Date:20150830T123600Z X-Amz-Region-Set:us-east-1 Authorization:AWS4-ECDSA-P256-SHA256 Credential=AKIDEXAMPLE/20150830/service/aws4_request, SignedHeaders=host;x-amz-date;x-amz-region-set, Signature=304502207665172b8612dbbd2c1f3c722c52b7b8dac2cdc5203d86252e21d9610018b153022100c548f3f0c23beeb388f8c8138ce1e883e2cdf2be1d7c5d7c15d4e565bfd6b0dd header-string-to-sign.txt000066400000000000000000000002061456575232400410760ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-dot-slash-normalizedAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request cf59db423e841c8b7e3444158185aa261b724a5c27cbe762676f3eed19f4dc02public-key.json000066400000000000000000000002271456575232400371650ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-dot-slash-normalized{ "X":"b6618f6a65740a99e650b33b6b4b5bd0d43b176d721a3edfea7e7d2d56d936b1", "Y":"865ed22a7eadc9c5cb9d2cbaca1b3699139fedc5043dc6661864218330c8e518" } query-canonical-request.txt000066400000000000000000000004621456575232400415500ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-dot-slash-normalizedGET / X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000002201456575232400401240ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-dot-slash-normalized3046022100ea775dcca84dbada39eb9e7d55183c31d7294d1826d965c2ade1730bd20ac7cb022100aaab5325eed9bb3f93814af0fd0170893cbeeff052d0bb554f91a394fd2f5135query-signed-request.txt000066400000000000000000000006311456575232400410700ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-dot-slash-normalizedGET /./?X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-Signature=3046022100d44a1214b0389964d48bf1ea73df6649f6dbd213421986ce9c2fae6397c9a866022100e54a0895ed71289a7b38282fbbf57ab4bdf6558bec6bff5b05c0d2b1a4f9b7e2 HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002061456575232400410130ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-dot-slash-normalizedAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request 890c4ed28c1a1ac10b5862719b537afbe392e987dc1aab1efa16fe7de41d3c81request.txt000066400000000000000000000000541456575232400364550ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-dot-slash-normalizedGET /./ HTTP/1.1 Host:example.amazonaws.com get-slash-dot-slash-unnormalized/000077500000000000000000000000001456575232400345705ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4acontext.json000066400000000000000000000004771456575232400371570ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-dot-slash-unnormalized{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": false, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000002751456575232400422000ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-dot-slash-unnormalizedGET /./ host:example.amazonaws.com x-amz-date:20150830T123600Z x-amz-region-set:us-east-1 host;x-amz-date;x-amz-region-set e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000002161456575232400405570ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-dot-slash-unnormalized30450220430660fe6e8e04ee055a2c56476dcf95c00b3efca267823134664f90a23fdc0f022100f188106c83e13a89e6a487f24d23522520f7a15c40e3aacd0c32284da42c2f5aheader-signed-request.txt000066400000000000000000000006061456575232400415200ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-dot-slash-unnormalizedGET /./ HTTP/1.1 Host:example.amazonaws.com X-Amz-Date:20150830T123600Z X-Amz-Region-Set:us-east-1 Authorization:AWS4-ECDSA-P256-SHA256 Credential=AKIDEXAMPLE/20150830/service/aws4_request, SignedHeaders=host;x-amz-date;x-amz-region-set, Signature=3044022049aaa02168ac9666a94f42238be0a02a7a244b74d6e16bdf64aac7ce8e21e44d02206b9be50186bdd4ee9191c0dd5ccff7e990365bb3b56430653f6fa49a68dbf596 header-string-to-sign.txt000066400000000000000000000002061456575232400414410ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-dot-slash-unnormalizedAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request a14dce0217a32357c623c3db790988b6b5aa1494a527158b06d3ca4444561a4bpublic-key.json000066400000000000000000000002271456575232400375300ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-dot-slash-unnormalized{ "X":"b6618f6a65740a99e650b33b6b4b5bd0d43b176d721a3edfea7e7d2d56d936b1", "Y":"865ed22a7eadc9c5cb9d2cbaca1b3699139fedc5043dc6661864218330c8e518" } query-canonical-request.txt000066400000000000000000000004641456575232400421150ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-dot-slash-unnormalizedGET /./ X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000002141456575232400404720ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-dot-slash-unnormalized304402207999228e5213710741e030ffa7dfa83ce3676384c5df044431bc854488a08abd0220248ff8a956bbcbc641c0ec4a7853774f44112b1571f861fc5aebf78282fd7e63query-signed-request.txt000066400000000000000000000006271456575232400414400ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-dot-slash-unnormalizedGET /./?X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-Signature=304502210089daa43b189bd9fba8fc67a3b03b4dce6e29e0aff0bd40a6ce7df7e0c8f890e1022016592a48b323a176064eecf203863be8e3de057d3ff0d7736c121a9cf88ccb85 HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002061456575232400413560ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-dot-slash-unnormalizedAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request 0d146fed00cdf50d7a87864583b7a33ca75322aab46b0a2d204f5d0c13440917request.txt000066400000000000000000000000541456575232400370200ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-dot-slash-unnormalizedGET /./ HTTP/1.1 Host:example.amazonaws.com aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-normalized/000077500000000000000000000000001456575232400324105ustar00rootroot00000000000000context.json000066400000000000000000000004761456575232400347170ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-normalized{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000002731456575232400377370ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-normalizedGET / host:example.amazonaws.com x-amz-date:20150830T123600Z x-amz-region-set:us-east-1 host;x-amz-date;x-amz-region-set e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000002201456575232400363130ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-normalized304602210084a7310186a866f81dbe8e546f6931dd7c61586d30664d5c11f5904836f72cfd022100c7d80abdccceb1cbcdbbda8b4f3f0ae1ac6229dce800b21ba8298c582fa96a1cheader-signed-request.txt000066400000000000000000000006071456575232400372620ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-normalizedGET // HTTP/1.1 Host:example.amazonaws.com X-Amz-Date:20150830T123600Z X-Amz-Region-Set:us-east-1 Authorization:AWS4-ECDSA-P256-SHA256 Credential=AKIDEXAMPLE/20150830/service/aws4_request, SignedHeaders=host;x-amz-date;x-amz-region-set, Signature=304502207c7fe2e91cdbe4529569d58fc3727507375cff81a56cfa26b387895ab6cdbb25022100d38e283efa5ff27d88f10e7367493b1dc49050ba9474531e4acb3ee30fac3739 header-string-to-sign.txt000066400000000000000000000002061456575232400372020ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-normalizedAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request cf59db423e841c8b7e3444158185aa261b724a5c27cbe762676f3eed19f4dc02public-key.json000066400000000000000000000002271456575232400352710ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-normalized{ "X":"b6618f6a65740a99e650b33b6b4b5bd0d43b176d721a3edfea7e7d2d56d936b1", "Y":"865ed22a7eadc9c5cb9d2cbaca1b3699139fedc5043dc6661864218330c8e518" } query-canonical-request.txt000066400000000000000000000004621456575232400376540ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-normalizedGET / X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000002161456575232400362350ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-normalized3045022100b1c08ec4e9a6313f2de7bd8ce726845ea7a3cc636a21ab7258a3067836a6601b02206b76d7a9845bfb683eb30d126a1dc4657e8b64762d4bfb976da9fa132c4a9bd7query-signed-request.txt000066400000000000000000000006241456575232400371760ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-normalizedGET //?X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-Signature=304402207e412e67f6164f6f3bdae2cc5af21bc5747106274a61a531b61275846a81a4f6022036260958080b0447f67df63ba3dbda6fe97dbffe54073491dc884ae4da43c83e HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002061456575232400371170ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-normalizedAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request 890c4ed28c1a1ac10b5862719b537afbe392e987dc1aab1efa16fe7de41d3c81request.txt000066400000000000000000000000531456575232400345600ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-normalizedGET // HTTP/1.1 Host:example.amazonaws.com get-slash-pointless-dot-normalized/000077500000000000000000000000001456575232400351335ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4acontext.json000066400000000000000000000004761456575232400375210ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-pointless-dot-normalized{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000003021456575232400425320ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-pointless-dot-normalizedGET /example host:example.amazonaws.com x-amz-date:20150830T123600Z x-amz-region-set:us-east-1 host;x-amz-date;x-amz-region-set e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000002161456575232400411220ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-pointless-dot-normalized304502206b0f9b99fe4e4b3bbcb69dd6ca076c847e676161b6fb4cd2bc776f357ece3d07022100fafc5606c7b1a45aa6ce6a7a3a68b07875315b8f952f9a192a420e407e8d1cefheader-signed-request.txt000066400000000000000000000006211456575232400420600ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-pointless-dot-normalizedGET /./example HTTP/1.1 Host:example.amazonaws.com X-Amz-Date:20150830T123600Z X-Amz-Region-Set:us-east-1 Authorization:AWS4-ECDSA-P256-SHA256 Credential=AKIDEXAMPLE/20150830/service/aws4_request, SignedHeaders=host;x-amz-date;x-amz-region-set, Signature=30460221008cb5fa4d42bdd0c293d58ce748128be3a41693d25cfd701e281bf0c0bc28f41a022100b863217c8f05146ff1b4282706bc78781b1425c2d36a09127116e852e31fc974 header-string-to-sign.txt000066400000000000000000000002061456575232400420040ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-pointless-dot-normalizedAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request 823cb165e35e124f862c99c89a46414c24e3800f149377591e35a4848317e825public-key.json000066400000000000000000000002271456575232400400730ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-pointless-dot-normalized{ "X":"b6618f6a65740a99e650b33b6b4b5bd0d43b176d721a3edfea7e7d2d56d936b1", "Y":"865ed22a7eadc9c5cb9d2cbaca1b3699139fedc5043dc6661864218330c8e518" } query-canonical-request.txt000066400000000000000000000004711456575232400424560ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-pointless-dot-normalizedGET /example X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000002161456575232400410370ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-pointless-dot-normalized3045022004b3d54ffdc33558a93c158a156ef1b50963d051e58ecb88b6809629d6b93860022100d88f70cb68de54e8abc628cb2325bcb32fab1a207d482010955f844812ad5174query-signed-request.txt000066400000000000000000000006361456575232400420030ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-pointless-dot-normalizedGET /./example?X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-Signature=3045022100ae36c4ce9d4e0026419e57d4cac9d2d4fd487f2aa3e8520ef43d1d4b8b0e96f002202a4b135ab4cb49cc35dd756dd40f02ce96271d32f2f5ae2d671ccf535139a4b3 HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002061456575232400417210ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-pointless-dot-normalizedAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request 0cc67a8acfed5946b645794c649dd98d3485728119cdf17d38985ba0ff55abcarequest.txt000066400000000000000000000000631456575232400373630ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-pointless-dot-normalizedGET /./example HTTP/1.1 Host:example.amazonaws.com get-slash-pointless-dot-unnormalized/000077500000000000000000000000001456575232400354765ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4acontext.json000066400000000000000000000004771456575232400400650ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-pointless-dot-unnormalized{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": false, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000003041456575232400430770ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-pointless-dot-unnormalizedGET /./example host:example.amazonaws.com x-amz-date:20150830T123600Z x-amz-region-set:us-east-1 host;x-amz-date;x-amz-region-set e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000002141456575232400414630ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-pointless-dot-unnormalized3044022056618e95bf64058b8174e9e52790352380946d9a0a4ab332b530a63f497a80800220105ebcd6c6ee9b034157d21e7c81fdfb72ca640961cee9b49de82e5c1f80ac5eheader-signed-request.txt000066400000000000000000000006211456575232400424230ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-pointless-dot-unnormalizedGET /./example HTTP/1.1 Host:example.amazonaws.com X-Amz-Date:20150830T123600Z X-Amz-Region-Set:us-east-1 Authorization:AWS4-ECDSA-P256-SHA256 Credential=AKIDEXAMPLE/20150830/service/aws4_request, SignedHeaders=host;x-amz-date;x-amz-region-set, Signature=30460221009e2c768ae9904c3d04eba53cfbb7460034029fb1f169b51f82623cc53ae25dc7022100bfe47ecde858ad9ae3311bba5a4fd45fcacdb1795cc6825f0939e5d929a464bf header-string-to-sign.txt000066400000000000000000000002061456575232400423470ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-pointless-dot-unnormalizedAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request 407314cdff397462b2458ba1860907adefcbb73fd630ddbd3de7300d2f773804public-key.json000066400000000000000000000002271456575232400404360ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-pointless-dot-unnormalized{ "X":"b6618f6a65740a99e650b33b6b4b5bd0d43b176d721a3edfea7e7d2d56d936b1", "Y":"865ed22a7eadc9c5cb9d2cbaca1b3699139fedc5043dc6661864218330c8e518" } query-canonical-request.txt000066400000000000000000000004731456575232400430230ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-pointless-dot-unnormalizedGET /./example X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000002161456575232400414020ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-pointless-dot-unnormalized304502202c31415712324143f4f20aebbd0f8a2fd4fcdd49d68ac95cd6815db3c0e6407b022100cb46cb62bbd024da2e29445c3378a32057b68b272fb2a08c79bfdcbc398505f3query-signed-request.txt000066400000000000000000000006401456575232400423410ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-pointless-dot-unnormalizedGET /./example?X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-Signature=304602210090227b15d2fecbf2acbaf7db1e17d678ee26f351546b2a4b29ca288b094f0056022100c1b386398e19ea14bd93082062eef901365946ca05dcb166a8bc9ca5c9d1dfec HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002061456575232400422640ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-pointless-dot-unnormalizedAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request efbe4f47c2acbb53fbfd0be6846cfa35a48c21f3f800e741278dae7b721302b4request.txt000066400000000000000000000000631456575232400377260ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-pointless-dot-unnormalizedGET /./example HTTP/1.1 Host:example.amazonaws.com aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-unnormalized/000077500000000000000000000000001456575232400327535ustar00rootroot00000000000000context.json000066400000000000000000000004771456575232400352630ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-unnormalized{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": false, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000002741456575232400403030ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-unnormalizedGET // host:example.amazonaws.com x-amz-date:20150830T123600Z x-amz-region-set:us-east-1 host;x-amz-date;x-amz-region-set e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000002201456575232400366560ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-unnormalized3046022100fee7e772befdf85d3f9871798d5e08e93ad3f5d5d408b41387ba55714244809b0221008268d33835753dcdbd0a4d149d902eed071c41b70c230ecf56e0120a0f7d72a1header-signed-request.txt000066400000000000000000000006071456575232400376250ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-unnormalizedGET // HTTP/1.1 Host:example.amazonaws.com X-Amz-Date:20150830T123600Z X-Amz-Region-Set:us-east-1 Authorization:AWS4-ECDSA-P256-SHA256 Credential=AKIDEXAMPLE/20150830/service/aws4_request, SignedHeaders=host;x-amz-date;x-amz-region-set, Signature=3045022061f03990d40a274470a1621b6e12b64452083a61218be77e48132a965da27377022100cdff18369b7361b1e33bd6cb5b216cdf88d390121447d442f59ccf84eb6489f7 header-string-to-sign.txt000066400000000000000000000002061456575232400375450ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-unnormalizedAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request fc8f42c7ce50ba8830a34b16d9fb478170176d78c81339e8d7e31d4baa9ec9f4public-key.json000066400000000000000000000002271456575232400356340ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-unnormalized{ "X":"b6618f6a65740a99e650b33b6b4b5bd0d43b176d721a3edfea7e7d2d56d936b1", "Y":"865ed22a7eadc9c5cb9d2cbaca1b3699139fedc5043dc6661864218330c8e518" } query-canonical-request.txt000066400000000000000000000004631456575232400402200ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-unnormalizedGET // X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000002161456575232400366000ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-unnormalized3045022100abcadf84ed6f4bf5c19df3220b13e370ef093e5e7a5a5093c1a796a3de7804710220568ec0dafd60aa764002ce2ac7c89f403b47142dc7e0e48ded1691929488c88equery-signed-request.txt000066400000000000000000000006301456575232400375360ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-unnormalizedGET //?X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-Signature=3046022100a3e0b71f92af44f8b58e94018ab4b3fddca86cacabaf94d4a100ab94f06c3bbf022100a6f920a313d8d01da331c95d6b12b4d51d90cad487a4a82c20eb5bb373d5f4d2 HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002061456575232400374620ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-unnormalizedAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request 633e0570a745c18cc22e43af8be65cfed3e7173061ec403353734bdfae90e0b6request.txt000066400000000000000000000000531456575232400351230ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slash-unnormalizedGET // HTTP/1.1 Host:example.amazonaws.com aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slashes-normalized/000077500000000000000000000000001456575232400327405ustar00rootroot00000000000000context.json000066400000000000000000000004761456575232400352470ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slashes-normalized{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000003031456575232400402610ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slashes-normalizedGET /example/ host:example.amazonaws.com x-amz-date:20150830T123600Z x-amz-region-set:us-east-1 host;x-amz-date;x-amz-region-set e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000002141456575232400366460ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slashes-normalized304402205dd203fd6b973e5fb088569ec27db1e267ef4a728f177e88fcc65f299255c0ab02206da878f40c64094595aa4e808849b69025f1c9b9f4be0f0e6ab8cd7a04e81d77header-signed-request.txt000066400000000000000000000006161456575232400376120ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slashes-normalizedGET //example// HTTP/1.1 Host:example.amazonaws.com X-Amz-Date:20150830T123600Z X-Amz-Region-Set:us-east-1 Authorization:AWS4-ECDSA-P256-SHA256 Credential=AKIDEXAMPLE/20150830/service/aws4_request, SignedHeaders=host;x-amz-date;x-amz-region-set, Signature=304402203d34870fcc77b0f5a3f6d440cd1cecb75c7e534cfbcc68f60743d308ae92fef602200f9fbaacc5010b9a7e046b1b5ae58764bd32c24c47ef65b63ad9ff756c478e4d header-string-to-sign.txt000066400000000000000000000002061456575232400375320ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slashes-normalizedAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request ea6c9c4bc1e85b94f2579cebbc85a84c3f8eaa055c006697555f074dd68509a6public-key.json000066400000000000000000000002271456575232400356210ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slashes-normalized{ "X":"b6618f6a65740a99e650b33b6b4b5bd0d43b176d721a3edfea7e7d2d56d936b1", "Y":"865ed22a7eadc9c5cb9d2cbaca1b3699139fedc5043dc6661864218330c8e518" } query-canonical-request.txt000066400000000000000000000004721456575232400402050ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slashes-normalizedGET /example/ X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000002161456575232400365650ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slashes-normalized3045022100dfd2281b396d138207f0e58889bd6cc4d95ffd8d64e6f37ef2a9f2a15cab6944022060d05a747cd4e3c49cb50bf58d834ae917cd1ebd1524a352fb64f4d54f9ba279query-signed-request.txt000066400000000000000000000006371456575232400375320ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slashes-normalizedGET //example//?X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-Signature=3045022100ec452ebb2ae12447edb19c7c1824c5b5921650498ed70c3366d9bce4875f0095022007c29a38626d79722aa8f8882ea96003eaf2a135c4bed3394f24a068fded99f1 HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002061456575232400374470ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slashes-normalizedAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request ab3f7b8af0ce16e0faf508160fb13d890874992d74f36214ae9eec7437361f2brequest.txt000066400000000000000000000000641456575232400351120ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slashes-normalizedGET //example// HTTP/1.1 Host:example.amazonaws.com aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slashes-unnormalized/000077500000000000000000000000001456575232400333035ustar00rootroot00000000000000context.json000066400000000000000000000004771456575232400356130ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slashes-unnormalized{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": false, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000003051456575232400406260ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slashes-unnormalizedGET //example// host:example.amazonaws.com x-amz-date:20150830T123600Z x-amz-region-set:us-east-1 host;x-amz-date;x-amz-region-set e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000002141456575232400372110ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slashes-unnormalized3044022039ccb6afb5f0b574cfc8fec2e9158269bbe28f9e5747acc032d1ea17617f8b2002204933ee3e4ad4ec425c90593a68db99fcece29ae45906dec40c2204290cedf8bdheader-signed-request.txt000066400000000000000000000006201456575232400401500ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slashes-unnormalizedGET //example// HTTP/1.1 Host:example.amazonaws.com X-Amz-Date:20150830T123600Z X-Amz-Region-Set:us-east-1 Authorization:AWS4-ECDSA-P256-SHA256 Credential=AKIDEXAMPLE/20150830/service/aws4_request, SignedHeaders=host;x-amz-date;x-amz-region-set, Signature=304502202450e5abfccb424d5a0e2d5d1a084e76abc5290ae567a97f5174bfed575fd519022100a5e743040a2bdd3cd7889070e1d190d5782e7591a715370f9bea04dc2702dbab header-string-to-sign.txt000066400000000000000000000002061456575232400400750ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slashes-unnormalizedAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request bf8c459a6a7f3879f55bff41e4dca65f69df4628456904e47f83013c0deb7276public-key.json000066400000000000000000000002271456575232400361640ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slashes-unnormalized{ "X":"b6618f6a65740a99e650b33b6b4b5bd0d43b176d721a3edfea7e7d2d56d936b1", "Y":"865ed22a7eadc9c5cb9d2cbaca1b3699139fedc5043dc6661864218330c8e518" } query-canonical-request.txt000066400000000000000000000004741456575232400405520ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slashes-unnormalizedGET //example// X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000002161456575232400371300ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slashes-unnormalized3045022100fa1d3e99997a184d59e23fb8bfeca08a5f94f2e7b89b0320d11deacab3593f0f022009c0785ec02c5dda370a3eb714feb51e3b0d56df674571bf7146bf8277897e65query-signed-request.txt000066400000000000000000000006411456575232400400700ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slashes-unnormalizedGET //example//?X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-Signature=3046022100b3e1640be9e64cd28d1ff4a1f157e9f3c751545d6e0a1f1c4e380fee08e8745b022100d9b667f2d38600993703f88b246b53ebb85faecfaf2de27cf39e43957e90d492 HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002061456575232400400120ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slashes-unnormalizedAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request 660b4a6f0bd53b287cefb21cdf69c1574303de44d2e9f7759b5379b428b70157request.txt000066400000000000000000000000641456575232400354550ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-slashes-unnormalizedGET //example// HTTP/1.1 Host:example.amazonaws.com aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-space-normalized/000077500000000000000000000000001456575232400323715ustar00rootroot00000000000000context.json000066400000000000000000000004761456575232400347000ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-space-normalized{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000003131456575232400377130ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-space-normalizedGET /example%20space/ host:example.amazonaws.com x-amz-date:20150830T123600Z x-amz-region-set:us-east-1 host;x-amz-date;x-amz-region-set e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000002161456575232400363010ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-space-normalized304502202292b4f0e4f1495b9ceb3b77349efeffdce42cb5461f810db3983005c1e772ca022100b4b6175661196a82f5b59968315517e1ffdde58d2e3eba10d3bfe171cde625c4header-signed-request.txt000066400000000000000000000006261456575232400372440ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-space-normalizedGET /example space/ HTTP/1.1 Host:example.amazonaws.com X-Amz-Date:20150830T123600Z X-Amz-Region-Set:us-east-1 Authorization:AWS4-ECDSA-P256-SHA256 Credential=AKIDEXAMPLE/20150830/service/aws4_request, SignedHeaders=host;x-amz-date;x-amz-region-set, Signature=3046022100e8e0ec99a8d287073e078543fdbdeba108cdef097a52c7556874076072350200022100e59d9e0f48209bea00faa47d485d850eb72747db83927a2a43b5ebab5ca2b545 header-string-to-sign.txt000066400000000000000000000002061456575232400371630ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-space-normalizedAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request 1bbd1a746b47d963b21f6e6783c689c038162ccc3f8a69abeda218a178ee4d19public-key.json000066400000000000000000000002271456575232400352520ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-space-normalized{ "X":"b6618f6a65740a99e650b33b6b4b5bd0d43b176d721a3edfea7e7d2d56d936b1", "Y":"865ed22a7eadc9c5cb9d2cbaca1b3699139fedc5043dc6661864218330c8e518" } query-canonical-request.txt000066400000000000000000000005021456575232400376300ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-space-normalizedGET /example%20space/ X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000002161456575232400362160ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-space-normalized3045022100fc068fdda7586bec9ac9ed341e7a81bf1e1f8c60fee6e22165868ecac62e404d02200620b5d68b9be75210336618fc0a884439ce1ae93085adb03e27f1ce7ca4912equery-signed-request.txt000066400000000000000000000006411456575232400371560ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-space-normalizedGET /example space/?X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-Signature=304402200bc73964284c5a0367aec4bc4c3e80c7425388c8f5ac963a3e0cc9437549bd2b022077b25c48bed62c31f1cc4bf78d56089953806d8132ccebf23907c9b4445db86c HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002061456575232400371000ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-space-normalizedAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request 6a2f4ddc0e28ca4b91abed80c45ad628fc63d8a4cd9c443869d5dd6cf07235edrequest.txt000066400000000000000000000000701456575232400345400ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-space-normalizedGET /example space/ HTTP/1.1 Host:example.amazonaws.com aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-space-unnormalized/000077500000000000000000000000001456575232400327345ustar00rootroot00000000000000context.json000066400000000000000000000004771456575232400352440ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-space-unnormalized{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": false, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000003131456575232400402560ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-space-unnormalizedGET /example%20space/ host:example.amazonaws.com x-amz-date:20150830T123600Z x-amz-region-set:us-east-1 host;x-amz-date;x-amz-region-set e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000002201456575232400366370ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-space-unnormalized3046022100881d1dce37131cd3248fa4cfb63f050e8985c3de5a58f654a8b263d52ad576180221009316e13742504474cf33631bb5f2fa89f4990c911ad317be831a6de015cf4054header-signed-request.txt000066400000000000000000000006261456575232400376070ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-space-unnormalizedGET /example space/ HTTP/1.1 Host:example.amazonaws.com X-Amz-Date:20150830T123600Z X-Amz-Region-Set:us-east-1 Authorization:AWS4-ECDSA-P256-SHA256 Credential=AKIDEXAMPLE/20150830/service/aws4_request, SignedHeaders=host;x-amz-date;x-amz-region-set, Signature=3046022100a26695d793a8e7d2ed2a3495402e9b4bf8e5b05e767d303f57d9515f617ebae5022100c4b89f5648567c08b6d173b6d733f5c012bb615cd38039109f64db56efecc851 header-string-to-sign.txt000066400000000000000000000002061456575232400375260ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-space-unnormalizedAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request 1bbd1a746b47d963b21f6e6783c689c038162ccc3f8a69abeda218a178ee4d19public-key.json000066400000000000000000000002271456575232400356150ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-space-unnormalized{ "X":"b6618f6a65740a99e650b33b6b4b5bd0d43b176d721a3edfea7e7d2d56d936b1", "Y":"865ed22a7eadc9c5cb9d2cbaca1b3699139fedc5043dc6661864218330c8e518" } query-canonical-request.txt000066400000000000000000000005021456575232400401730ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-space-unnormalizedGET /example%20space/ X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000002161456575232400365610ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-space-unnormalized304502201825fe5d62f321a740cbe590ce0c0c17045eed9acbbbd574d3cefbec349d06c5022100bcd482f85374b24d9f1f5000d2de7e12a4e0c8f5277e73d29c41ecc45390e731query-signed-request.txt000066400000000000000000000006411456575232400375210ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-space-unnormalizedGET /example space/?X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-Signature=304402207c82fe84466237eda8666f893df1d96a82da91509bb11252d82af8744bb2155702200405ec0f65fd883703a556c6d7d74cfb36a2cbbd3dabd884957809e6c6856325 HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002061456575232400374430ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-space-unnormalizedAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request 6a2f4ddc0e28ca4b91abed80c45ad628fc63d8a4cd9c443869d5dd6cf07235edrequest.txt000066400000000000000000000000701456575232400351030ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-space-unnormalizedGET /example space/ HTTP/1.1 Host:example.amazonaws.com aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-unreserved/000077500000000000000000000000001456575232400313165ustar00rootroot00000000000000context.json000066400000000000000000000004761456575232400336250ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-unreserved{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000003751456575232400366500ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-unreservedGET /-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz host:example.amazonaws.com x-amz-date:20150830T123600Z x-amz-region-set:us-east-1 host;x-amz-date;x-amz-region-set e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000002161456575232400352260ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-unreserved304502204303d34750ced762f4e5af2c61ba70c9f5830d8a5c7fc418a02823d61673ca32022100c26cf8f38007155cb8bd3e684697063ad8f421a91be796d78b14b126e25d3680header-signed-request.txt000066400000000000000000000007061456575232400361700ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-unreservedGET /-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz HTTP/1.1 Host:example.amazonaws.com X-Amz-Date:20150830T123600Z X-Amz-Region-Set:us-east-1 Authorization:AWS4-ECDSA-P256-SHA256 Credential=AKIDEXAMPLE/20150830/service/aws4_request, SignedHeaders=host;x-amz-date;x-amz-region-set, Signature=304402206a228bcbe26141c2c5065525b98ea00b793470e04e655006269f740e320215930220160015e35502fa89a96f5fed59981af0896b60ecea6a68606a0be368bca73f2c header-string-to-sign.txt000066400000000000000000000002061456575232400361100ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-unreservedAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request b48c0f7d7cdaa2cd05e4b789c913063becd96ccace5296a334c950040e58bcacpublic-key.json000066400000000000000000000002271456575232400341770ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-unreserved{ "X":"b6618f6a65740a99e650b33b6b4b5bd0d43b176d721a3edfea7e7d2d56d936b1", "Y":"865ed22a7eadc9c5cb9d2cbaca1b3699139fedc5043dc6661864218330c8e518" } query-canonical-request.txt000066400000000000000000000005641456575232400365650ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-unreservedGET /-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000002161456575232400351430ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-unreserved3045022100a8e6e2918ceb211f77810150d828be217af238ca95db6f88264d668ffacefb8b02204122d651c775a3ed8f40f3bfe37a2c8fec28cd53027b599eddd7115e06ca4bb0query-signed-request.txt000066400000000000000000000007311456575232400361030ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-unreservedGET /-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz?X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-Signature=3046022100e9480cf54bfa566c6225af6986818e36a79525af244316551c55f902494d774f0221008f15178ec18c589b3704ccfc9cd7daa410f148407f75bc5777afcca28470cfbb HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002061456575232400360250ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-unreservedAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request a967a5035e5dc574f94fb9f0de0faf9d56e889c26d9a65d7d0a15d89690280d1request.txt000066400000000000000000000001541456575232400334700ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-unreservedGET /-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz HTTP/1.1 Host:example.amazonaws.com aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-utf8/000077500000000000000000000000001456575232400300225ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-utf8/context.json000066400000000000000000000004761456575232400324100ustar00rootroot00000000000000{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000003041456575232400353440ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-utf8GET /%E1%88%B4 host:example.amazonaws.com x-amz-date:20150830T123600Z x-amz-region-set:us-east-1 host;x-amz-date;x-amz-region-set e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000002141456575232400337300ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-utf83044022009b73628ce2929e224b781c591bdc93fc3381cef5f63992eefb82a286276fa0902203cbc6071d1be7b8c91ec0a04b921881c48cbea2a0a86481f6fa7a983e36190c0header-signed-request.txt000066400000000000000000000006111456575232400346670ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-utf8GET /ሴ HTTP/1.1 Host:example.amazonaws.com X-Amz-Date:20150830T123600Z X-Amz-Region-Set:us-east-1 Authorization:AWS4-ECDSA-P256-SHA256 Credential=AKIDEXAMPLE/20150830/service/aws4_request, SignedHeaders=host;x-amz-date;x-amz-region-set, Signature=304502210082c462efc22f340d7d20b19e0c9b38400e9395bedd9269e01e959ef915ca2846022050d38df1572786ef8befe01d888335c15b0f17fb1d795858fa3cb79e060ba375 header-string-to-sign.txt000066400000000000000000000002061456575232400346140ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-utf8AWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request 7c8a7084825e715f0e217a7470ce3611bcfedb5b70329d9b36bba5cefd39c11aaws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-utf8/public-key.json000066400000000000000000000002271456575232400327620ustar00rootroot00000000000000{ "X":"b6618f6a65740a99e650b33b6b4b5bd0d43b176d721a3edfea7e7d2d56d936b1", "Y":"865ed22a7eadc9c5cb9d2cbaca1b3699139fedc5043dc6661864218330c8e518" } query-canonical-request.txt000066400000000000000000000004731456575232400352700ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-utf8GET /%E1%88%B4 X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000002201456575232400336420ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-utf830460221008cf2ac3d46b7529fe7b73add56626a546fc9372b458e197dbd355198465e0918022100c059b86e01bd11d31c5fe19ee7c73fe3b8058ad2cebe2fe74c779c4a6a95684aquery-signed-request.txt000066400000000000000000000006301456575232400346050ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-utf8GET /ሴ?X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-Signature=3045022100a6a54a9d29f463be9ee0822246866fdbf4d71ab2f0c1dd70da52bd3d4539ce6802204fb18b4ddab0e97af74ecb42e33d6d2f2ac2ae0cee9509f649506f1cd2954899 HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002061456575232400345310ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-utf8AWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request b77c78c8276c5eac930db70b21b69b34d0c2560e1a963ab28e28481ef6a4ff66aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-utf8/request.txt000066400000000000000000000000551456575232400322530ustar00rootroot00000000000000GET /ሴ HTTP/1.1 Host:example.amazonaws.com get-vanilla-empty-query-key/000077500000000000000000000000001456575232400335705ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4acontext.json000066400000000000000000000004761456575232400361560ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-empty-query-key{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000003101456575232400411660ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-empty-query-keyGET / Param1=value1 host:example.amazonaws.com x-amz-date:20150830T123600Z x-amz-region-set:us-east-1 host;x-amz-date;x-amz-region-set e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000002141456575232400375550ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-empty-query-key3044022075e5fe54105695e4af6f5b59ff39f4f1d84de3bfdd546a6b9212936a78e743ac02204f25f3b9826688eb9743088f426eabd6b306432fd2c4c5c5aa2bbb9058267542header-signed-request.txt000066400000000000000000000006241456575232400405200ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-empty-query-keyGET /?Param1=value1 HTTP/1.1 Host:example.amazonaws.com X-Amz-Date:20150830T123600Z X-Amz-Region-Set:us-east-1 Authorization:AWS4-ECDSA-P256-SHA256 Credential=AKIDEXAMPLE/20150830/service/aws4_request, SignedHeaders=host;x-amz-date;x-amz-region-set, Signature=304502210093d40fe05684ddadfadfafc4b8565d18ca94d590fc897577224bf09696bbde6102206f12bb5e2503781303cf6880c0a2136d4b820221eb9d6ac5ad61378b16b8ea5d header-string-to-sign.txt000066400000000000000000000002061456575232400404410ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-empty-query-keyAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request 894f4237e92aae973c992da1d1f39d7a5913a23e9f7cbcf085e9550685eb498apublic-key.json000066400000000000000000000002271456575232400365300ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-empty-query-key{ "X":"b6618f6a65740a99e650b33b6b4b5bd0d43b176d721a3edfea7e7d2d56d936b1", "Y":"865ed22a7eadc9c5cb9d2cbaca1b3699139fedc5043dc6661864218330c8e518" } query-canonical-request.txt000066400000000000000000000005001456575232400411040ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-empty-query-keyGET / Param1=value1&X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000002201456575232400374670ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-empty-query-key30460221009a9e99aefe8effed47320e6e9f54cb943e2f72ce638ed7aae2a6711240304b82022100a90e99f053dd51912a6993299229e19aebbd81e68d4d24a1bdedcc6717c9f456query-signed-request.txt000066400000000000000000000006451456575232400404400ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-empty-query-keyGET /?Param1=value1&X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-Signature=3046022100d0e421c8dcfbc809d91211cb4588940f6567729d5f9542803c4121704ec85bf70221009bc7b39b796dc0b4436a08d0320e2e89a2a47724432a1060029a55ae05dc2e83 HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002061456575232400403560ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-empty-query-keyAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request 07551f1d699afeb50d6811a527ab7b0270b60448ea27d8cbccb9750d68287b3frequest.txt000066400000000000000000000000701456575232400360160ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-empty-query-keyGET /?Param1=value1 HTTP/1.1 Host:example.amazonaws.com get-vanilla-query-order-encoded/000077500000000000000000000000001456575232400343565ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4acontext.json000066400000000000000000000004761456575232400367440ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-query-order-encoded{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000003471456575232400417660ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-query-order-encodedGET / %E1%88%B4=Value1&Param=Value2&Param-3=Value3 host:example.amazonaws.com x-amz-date:20150830T123600Z x-amz-region-set:us-east-1 host;x-amz-date;x-amz-region-set e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000002141456575232400403430ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-query-order-encoded304402201c9d8aa856d87542e2fc29ccb3841bf604d7582a8ab2ca8e6c9e21c98693f35502207052f84ecd1dc8b6468a536e9d1bdc990cf14bae8142c1cc945f15bcf0e87613header-signed-request.txt000066400000000000000000000006631456575232400413110ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-query-order-encodedGET /?Param-3=Value3&Param=Value2&%E1%88%B4=Value1 HTTP/1.1 Host:example.amazonaws.com X-Amz-Date:20150830T123600Z X-Amz-Region-Set:us-east-1 Authorization:AWS4-ECDSA-P256-SHA256 Credential=AKIDEXAMPLE/20150830/service/aws4_request, SignedHeaders=host;x-amz-date;x-amz-region-set, Signature=3045022100cef3137947f52af13fe5591d4ddcff0260176e7b2a668b43ac8f0cba2e489fe502203b4fe53c9fe3e8925643fe0e4c3ec0b3ea6262917bf7300d2e2abb399078e6de header-string-to-sign.txt000066400000000000000000000002061456575232400412270ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-query-order-encodedAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request 55568a29040b287c72015a5ab482d9aaceeb1e9881f3bc946378bc3d6079f6a5public-key.json000066400000000000000000000002271456575232400373160ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-query-order-encoded{ "X":"b6618f6a65740a99e650b33b6b4b5bd0d43b176d721a3edfea7e7d2d56d936b1", "Y":"865ed22a7eadc9c5cb9d2cbaca1b3699139fedc5043dc6661864218330c8e518" } query-canonical-request.txt000066400000000000000000000005371456575232400417040ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-query-order-encodedGET / %E1%88%B4=Value1&Param=Value2&Param-3=Value3&X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000002201456575232400402550ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-query-order-encoded3046022100dcc853d7fbd59ceebd1ac57d404b979a7252cf88c79dcc6d58167902750235fd022100ffa98128e35b03aa434287165980f78533e9e6dfc80373cea82b047f4f0554eequery-signed-request.txt000066400000000000000000000007041456575232400412220ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-query-order-encodedGET /?Param-3=Value3&Param=Value2&%E1%88%B4=Value1&X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-Signature=3046022100a1244805f7fd13f1c5f4824fe85052857f2c5b6820829562e3853715966a4f7102210091a4e9e79fd79fc7d4d580061730822d3228d15d584b7c5c16a4c490690740c5 HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002061456575232400411440ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-query-order-encodedAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request 56d9ab07692614338f2aa44f1018ad4102b19305fba8b83383d2d7c6ca816614request.txt000066400000000000000000000001271456575232400366070ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-query-order-encodedGET /?Param-3=Value3&Param=Value2&%E1%88%B4=Value1 HTTP/1.1 Host:example.amazonaws.com get-vanilla-query-order-key-case/000077500000000000000000000000001456575232400344565ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4acontext.json000066400000000000000000000004761456575232400370440ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-query-order-key-case{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000003261456575232400420630ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-query-order-key-caseGET / Param1=value1&Param2=value2 host:example.amazonaws.com x-amz-date:20150830T123600Z x-amz-region-set:us-east-1 host;x-amz-date;x-amz-region-set e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000002161456575232400404450ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-query-order-key-case3045022100b342b0d62487d235a5baf82e970102670a8d7af34c61f086125d204a90724d9e022070304e846de3de228c57e528b61f4addfdb5fa788a8a452e483c6a286d8803f5header-signed-request.txt000066400000000000000000000006421456575232400414060ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-query-order-key-caseGET /?Param2=value2&Param1=value1 HTTP/1.1 Host:example.amazonaws.com X-Amz-Date:20150830T123600Z X-Amz-Region-Set:us-east-1 Authorization:AWS4-ECDSA-P256-SHA256 Credential=AKIDEXAMPLE/20150830/service/aws4_request, SignedHeaders=host;x-amz-date;x-amz-region-set, Signature=30450220394fa0699225f403514cb4db03b7c479f64c5f9864bf520b871bb23a054d6517022100d4445c909fc62a43c6c485a8582621e0e14c6ef1e4218072bc2dcaffc1188f18 header-string-to-sign.txt000066400000000000000000000002061456575232400413270ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-query-order-key-caseAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request cc07b29e0d0f0b2d6aa296621a5608fd9c2271159b9b2f737f682704ebb96482public-key.json000066400000000000000000000002271456575232400374160ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-query-order-key-case{ "X":"b6618f6a65740a99e650b33b6b4b5bd0d43b176d721a3edfea7e7d2d56d936b1", "Y":"865ed22a7eadc9c5cb9d2cbaca1b3699139fedc5043dc6661864218330c8e518" } query-canonical-request.txt000066400000000000000000000005161456575232400420010ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-query-order-key-caseGET / Param1=value1&Param2=value2&X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000002141456575232400403600ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-query-order-key-case304402207c976c47b1bcf7333ad10fcf78498bb14a368a6cd8f58703d47c9e91c4468c13022059a3715bafae9410271bf3355ccd0bbc622fe90f3c0168203d83323297789de5query-signed-request.txt000066400000000000000000000006571456575232400413310ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-query-order-key-caseGET /?Param2=value2&Param1=value1&X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-Signature=304402202cbada4314f8da2b9dfa447db936428ad13f522f007110427e5c6c4dbf93762f02205ad19e24495835a7242c546eeec3dbdfcd538361fd9a67f99610a8adab3411e9 HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002061456575232400412440ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-query-order-key-caseAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request c688584c3dbae2868c4911c825239f2c9375e66b9962f21db60b9b2fcd75bf45request.txt000066400000000000000000000001061456575232400367040ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-query-order-key-caseGET /?Param2=value2&Param1=value1 HTTP/1.1 Host:example.amazonaws.com get-vanilla-query-order-key/000077500000000000000000000000001456575232400335455ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4acontext.json000066400000000000000000000004761456575232400361330ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-query-order-key{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }request.txt000066400000000000000000000001061456575232400357730ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-query-order-keyGET /?Param1=value2&Param1=Value1 HTTP/1.1 Host:example.amazonaws.com get-vanilla-query-order-value/000077500000000000000000000000001456575232400340715ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4acontext.json000066400000000000000000000004761456575232400364570ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-query-order-value{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }request.txt000066400000000000000000000001061456575232400363170ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-query-order-valueGET /?Param1=value2&Param1=value1 HTTP/1.1 Host:example.amazonaws.com get-vanilla-query-unreserved/000077500000000000000000000000001456575232400340265ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4acontext.json000066400000000000000000000004761456575232400364140ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-query-unreserved{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000005001456575232400414250ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-query-unreservedGET / -._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz=-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz host:example.amazonaws.com x-amz-date:20150830T123600Z x-amz-region-set:us-east-1 host;x-amz-date;x-amz-region-set e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000002161456575232400400150ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-query-unreserved3045022100ce7ae9d07bea650598803710644006d47de14a884d0777abd71ccf76164a36c70220256f34ad94c5cb1291a05f7680c2a0068c51d2a31a4e2fe2100d4a58197565edheader-signed-request.txt000066400000000000000000000010141456575232400407500ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-query-unreservedGET /?-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz=-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz HTTP/1.1 Host:example.amazonaws.com X-Amz-Date:20150830T123600Z X-Amz-Region-Set:us-east-1 Authorization:AWS4-ECDSA-P256-SHA256 Credential=AKIDEXAMPLE/20150830/service/aws4_request, SignedHeaders=host;x-amz-date;x-amz-region-set, Signature=30450220023b0ce37b4e3f7fe6b6e155dab72846b461d22ff4820f019a07afef476f61ee022100f985e993037270cc48ac905b360046d077770a2b28aa829ea909430b1b3ed8aa header-string-to-sign.txt000066400000000000000000000002061456575232400406770ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-query-unreservedAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request 377b8a3e195894659b84cd1c475dc8a3663a663360a349430c0c3b82bd82b77bpublic-key.json000066400000000000000000000002271456575232400367660ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-query-unreserved{ "X":"b6618f6a65740a99e650b33b6b4b5bd0d43b176d721a3edfea7e7d2d56d936b1", "Y":"865ed22a7eadc9c5cb9d2cbaca1b3699139fedc5043dc6661864218330c8e518" } query-canonical-request.txt000066400000000000000000000006701456575232400413520ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-query-unreservedGET / -._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz=-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz&X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000002161456575232400377320ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-query-unreserved30450220720698c40b04f1a2e1f94e91ed9a59f8b2a2e70cb55e6dd245d0aec039cad6ea022100b2076666b780735e1ddabf773f141ff49f8df3686ffb6c4c7798ba22470e3819query-signed-request.txt000066400000000000000000000010351456575232400406700ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-query-unreservedGET /?-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz=-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz&X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-Signature=3046022100fa0918db3b6088ebabc41b614c1e60820cf46a58a5e939bf27ab43869e60133e022100f2b594597c9f6266cae0f17de30e2776827431f7cb63224530cf7718f38df0b0 HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002061456575232400406140ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-query-unreservedAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request 5d8923c620c699f856a35a6eb8dd786fd4c8c6ab0a35c552caeb5b648989433frequest.txt000066400000000000000000000002601456575232400362550ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-query-unreservedGET /?-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz=-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz HTTP/1.1 Host:example.amazonaws.com aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-query/000077500000000000000000000000001456575232400317255ustar00rootroot00000000000000context.json000066400000000000000000000004761456575232400342340ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-query{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000002731456575232400372540ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-queryGET / host:example.amazonaws.com x-amz-date:20150830T123600Z x-amz-region-set:us-east-1 host;x-amz-date;x-amz-region-set e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000002141456575232400356330ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-query304402202f36c5363f01990fac07fb18efd255fb82fa3807a8a84946f0b5ac2b71ac96f5022068a86869075a8ca0fe4f875f6a07af6df16538f156dba648c9a96b7c3d687924header-signed-request.txt000066400000000000000000000006061456575232400365760ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-queryGET / HTTP/1.1 Host:example.amazonaws.com X-Amz-Date:20150830T123600Z X-Amz-Region-Set:us-east-1 Authorization:AWS4-ECDSA-P256-SHA256 Credential=AKIDEXAMPLE/20150830/service/aws4_request, SignedHeaders=host;x-amz-date;x-amz-region-set, Signature=304502201f8540fba7741deef60d162a2cf5a8abece28db296174e82420ae6790d93b8af022100d3109ac9ec7995b9e976efa2c54b278d3fab0737e3c73483771773d2061d8f3f header-string-to-sign.txt000066400000000000000000000002061456575232400365170ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-queryAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request cf59db423e841c8b7e3444158185aa261b724a5c27cbe762676f3eed19f4dc02public-key.json000066400000000000000000000002271456575232400346060ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-query{ "X":"b6618f6a65740a99e650b33b6b4b5bd0d43b176d721a3edfea7e7d2d56d936b1", "Y":"865ed22a7eadc9c5cb9d2cbaca1b3699139fedc5043dc6661864218330c8e518" } query-canonical-request.txt000066400000000000000000000004621456575232400371710ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-queryGET / X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000002141456575232400355500ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-query304402203840161a4739cd19b33c2186657554e140b84ce1fe9021074f154410b97ffad002200606f4430d72826616b2cd47590fcee42bbf23cb0287458389a43f9e6edd897dquery-signed-request.txt000066400000000000000000000006251456575232400365140ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-queryGET /?X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-Signature=304502210081f33b59ccf8c4a729d3e3f6ac355cfb6bb0f9c507b180ed7eb2756202cfe220022074f44519d0f1da1d20737fec456ac86aa876379e4531633c14972b88bc268eb7 HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002061456575232400364340ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-queryAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request 890c4ed28c1a1ac10b5862719b537afbe392e987dc1aab1efa16fe7de41d3c81request.txt000066400000000000000000000000521456575232400340740ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-queryGET / HTTP/1.1 Host:example.amazonaws.com aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-utf8-query/000077500000000000000000000000001456575232400326115ustar00rootroot00000000000000context.json000066400000000000000000000004761456575232400351200ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-utf8-query{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000003101456575232400401300ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-utf8-queryGET / %E1%88%B4=bar host:example.amazonaws.com x-amz-date:20150830T123600Z x-amz-region-set:us-east-1 host;x-amz-date;x-amz-region-set e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000002201456575232400365140ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-utf8-query3046022100b47fe24b090d857f34597a58a4e8acd0ea9639462e606af7e1fa1e2c3fae1d630221008ce7f870adf7d2c06f299da0c9870402b027c6a50ff027881655a664ff294cc3header-signed-request.txt000066400000000000000000000006161456575232400374630ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-utf8-queryGET /?ሴ=bar HTTP/1.1 Host:example.amazonaws.com X-Amz-Date:20150830T123600Z X-Amz-Region-Set:us-east-1 Authorization:AWS4-ECDSA-P256-SHA256 Credential=AKIDEXAMPLE/20150830/service/aws4_request, SignedHeaders=host;x-amz-date;x-amz-region-set, Signature=304502206e0b9d70bb0413a557946314e04739d721c9f301d452c072590edfdcb9320572022100846f242fb0346c8c56c76a6f0e70c521d7ac02742f09c2e7b5253f1e9afe5a44 header-string-to-sign.txt000066400000000000000000000002061456575232400374030ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-utf8-queryAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request b27131d754045e85823698b1fe3fc9694c17b16d97f4c43cef2359a1233c7b76public-key.json000066400000000000000000000002271456575232400354720ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-utf8-query{ "X":"b6618f6a65740a99e650b33b6b4b5bd0d43b176d721a3edfea7e7d2d56d936b1", "Y":"865ed22a7eadc9c5cb9d2cbaca1b3699139fedc5043dc6661864218330c8e518" } query-canonical-request.txt000066400000000000000000000005001456575232400400460ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-utf8-queryGET / %E1%88%B4=bar&X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000002141456575232400364340ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-utf8-query3044022076d8b03a19db63abc106c50f13487d5363f1eac2c77bc4eda764eadba8f84c6502205c0aa76554ba223a97958be9ff34aca35d84cb7b8c7ba698793ead9e6788e241query-signed-request.txt000066400000000000000000000006371456575232400374030ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-utf8-queryGET /?ሴ=bar&X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-Signature=3046022100d0001873de6d6746a050e3dff894b0fbccce0bbe7fad8e4c0960761d053a695b022100fb8f5d77ebb1854be7a8e1ea56428aba3a2f3bf23211e1049a2fa18b56784b63 HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002061456575232400373200ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-utf8-queryAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request e22d57192c68232f914afff959b8aa1027756098f879c7d4d7615a2469f85c1frequest.txt000066400000000000000000000000621456575232400347610ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-utf8-queryGET /?ሴ=bar HTTP/1.1 Host:example.amazonaws.com get-vanilla-with-session-token/000077500000000000000000000000001456575232400342535ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4acontext.json000066400000000000000000000006231456575232400366330ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-with-session-token{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY", "token": "6e86291e8372ff2a2260956d9b8aae1d763fbf315fa00fa31553b73ebf194267" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000004461456575232400416630ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-with-session-tokenGET / host:example.amazonaws.com x-amz-date:20150830T123600Z x-amz-region-set:us-east-1 x-amz-security-token:6e86291e8372ff2a2260956d9b8aae1d763fbf315fa00fa31553b73ebf194267 host;x-amz-date;x-amz-region-set;x-amz-security-token e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000002141456575232400402400ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-with-session-token3044022035c6bc9a8cab8a434bd8bd1cc3b6f1401842f530f1537722ad10d72a1577f8c80220101352fe0720715efbab80f82edb133175f0b61cad85daf943ad615852f9ae10header-signed-request.txt000066400000000000000000000007611456575232400412050ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-with-session-tokenGET / HTTP/1.1 Host:example.amazonaws.com X-Amz-Security-Token:6e86291e8372ff2a2260956d9b8aae1d763fbf315fa00fa31553b73ebf194267 X-Amz-Date:20150830T123600Z X-Amz-Region-Set:us-east-1 Authorization:AWS4-ECDSA-P256-SHA256 Credential=AKIDEXAMPLE/20150830/service/aws4_request, SignedHeaders=host;x-amz-date;x-amz-region-set;x-amz-security-token, Signature=30450221008f4516c8850ed9eb29502658d1f0e0f6cabc11a44963c558efd1dd8e9fb084a0022032e47df89ad1a5b4e2b82d3a68dbe31d6ef8e1ea8c3815b2097538c4695d184e header-string-to-sign.txt000066400000000000000000000002061456575232400411240ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-with-session-tokenAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request 79893373104239a0547df489af395ec3c1b8873a8601f07f11ffd3f1ac557e7dpublic-key.json000066400000000000000000000002271456575232400372130ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-with-session-token{ "X":"b6618f6a65740a99e650b33b6b4b5bd0d43b176d721a3edfea7e7d2d56d936b1", "Y":"865ed22a7eadc9c5cb9d2cbaca1b3699139fedc5043dc6661864218330c8e518" } query-canonical-request.txt000066400000000000000000000006101456575232400415710ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-with-session-tokenGET / X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-Security-Token=6e86291e8372ff2a2260956d9b8aae1d763fbf315fa00fa31553b73ebf194267&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000002201456575232400401520ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-with-session-token3046022100d51f7ae1ac1b6b3db6c40f69aafb4306f81cdd2d87a85f69b2c4cb8fd057cf21022100cdb787a698c3f53d961f5b6279a0ff246097f50c874bcbf7a249bf4c0f118fb3query-signed-request.txt000066400000000000000000000007531456575232400411230ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-with-session-tokenGET /?X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Security-Token=6e86291e8372ff2a2260956d9b8aae1d763fbf315fa00fa31553b73ebf194267&X-Amz-Region-Set=us-east-1&X-Amz-Signature=304502207cdbb61908b9d4cf6c84e03ff7dac10a9a4637784d335a7d8bf1d1a6931bc3ba022100ee78b054e5441b71ab155632174df6b68bdaa00eabf939bf0c7299e1367e02a4 HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002061456575232400410410ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-with-session-tokenAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request 496062b4c2ed2175fe08ad084158783fa8d013c694542af721d49b25d1ebd390request.txt000066400000000000000000000000521456575232400365010ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla-with-session-tokenGET / HTTP/1.1 Host:example.amazonaws.com aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla/000077500000000000000000000000001456575232400305625ustar00rootroot00000000000000canonical-request.txt000066400000000000000000000002731456575232400346630ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanillaGET / host:example.amazonaws.com x-amz-date:20150830T123600Z x-amz-region-set:us-east-1 host;x-amz-date;x-amz-region-set e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla/context.json000066400000000000000000000004761456575232400331500ustar00rootroot00000000000000{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000002731456575232400361110ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanillaGET / host:example.amazonaws.com x-amz-date:20150830T123600Z x-amz-region-set:us-east-1 host;x-amz-date;x-amz-region-set e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000002161456575232400344720ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla3045022018b4e277d0281864beb51d3600e23f88510ea5031d68ddfbb68614b82a5eb7d2022100effb9c5f22ed9ef3ae0ab243d21f06bce82365bbb79529a07b6888c343ae5f8cheader-signed-request.txt000066400000000000000000000006101456575232400354260ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanillaGET / HTTP/1.1 Host:example.amazonaws.com X-Amz-Date:20150830T123600Z X-Amz-Region-Set:us-east-1 Authorization:AWS4-ECDSA-P256-SHA256 Credential=AKIDEXAMPLE/20150830/service/aws4_request, SignedHeaders=host;x-amz-date;x-amz-region-set, Signature=3046022100fddaaf816a31e30d04973875b13bc27b98da10907a3a1872fd5819a221334abc022100de2ee81d83b54f31efc82834de07e6cd4d5c6f25bfa08e0f29a15bb0977c343e header-string-to-sign.txt000066400000000000000000000002061456575232400353540ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanillaAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request cf59db423e841c8b7e3444158185aa261b724a5c27cbe762676f3eed19f4dc02public-key.json000066400000000000000000000002271456575232400334430ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla{ "X":"b6618f6a65740a99e650b33b6b4b5bd0d43b176d721a3edfea7e7d2d56d936b1", "Y":"865ed22a7eadc9c5cb9d2cbaca1b3699139fedc5043dc6661864218330c8e518" } query-canonical-request.txt000066400000000000000000000004621456575232400360260ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanillaGET / X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000002161456575232400344070ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla3045022100b3246f8e1442dd58e73292ae1294720c3256ee58f6cea50f3a3f83b4a4b0331a0220329693f7745c9008a5887c0529ceccb0fae4e16d707b712ea66acdbaf963769aquery-signed-request.txt000066400000000000000000000006251456575232400353510ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanillaGET /?X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-Signature=3045022100a28466db6c76223d5aa6ed436a8451b30e21ba0a076e7f2f943dd1f3d2816249022049e4b3746929cd2a2cb7b7984dbe0f5d40a300e7d9a7a7f1307c196b6f96b61a HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002061456575232400352710ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanillaAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request 890c4ed28c1a1ac10b5862719b537afbe392e987dc1aab1efa16fe7de41d3c81aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanilla/request.txt000066400000000000000000000000521456575232400330100ustar00rootroot00000000000000GET / HTTP/1.1 Host:example.amazonaws.com string-to-sign.txt000066400000000000000000000002061456575232400341260ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/get-vanillaAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request cf59db423e841c8b7e3444158185aa261b724a5c27cbe762676f3eed19f4dc02aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-header-key-case/000077500000000000000000000000001456575232400322715ustar00rootroot00000000000000context.json000066400000000000000000000004761456575232400346000ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-header-key-case{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000002741456575232400376210ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-header-key-casePOST / host:example.amazonaws.com x-amz-date:20150830T123600Z x-amz-region-set:us-east-1 host;x-amz-date;x-amz-region-set e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000002141456575232400361770ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-header-key-case3044022025397839d55c2147eead39170f5b1758c9f9fb4ccbcda4c8612a23df8e90c13d022079f5fc1377201691a11be43def8be12fe882e998589317508ff67b8dbe982722header-signed-request.txt000066400000000000000000000006111456575232400371360ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-header-key-casePOST / HTTP/1.1 Host:example.amazonaws.com X-Amz-Date:20150830T123600Z X-Amz-Region-Set:us-east-1 Authorization:AWS4-ECDSA-P256-SHA256 Credential=AKIDEXAMPLE/20150830/service/aws4_request, SignedHeaders=host;x-amz-date;x-amz-region-set, Signature=304602210081bb2a93eae5cce843abe0e7cac2f0e4cecdd0a79cb8d60e5b9dc4c8e8906b61022100e18c8119f50b8218da995ada62080d88ff75d2f02309c3685a02dfb9a83b103f header-string-to-sign.txt000066400000000000000000000002061456575232400370630ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-header-key-caseAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request 806a9b01b76472cc6b66fff02630726d55f8b4ada6d2fd9b36eb0d710e215861public-key.json000066400000000000000000000002271456575232400351520ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-header-key-case{ "X":"b6618f6a65740a99e650b33b6b4b5bd0d43b176d721a3edfea7e7d2d56d936b1", "Y":"865ed22a7eadc9c5cb9d2cbaca1b3699139fedc5043dc6661864218330c8e518" } query-canonical-request.txt000066400000000000000000000004631456575232400375360ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-header-key-casePOST / X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000002141456575232400361140ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-header-key-case304402204afe4ce8de62e37853d0cb8a00f9601532fdeca89d6d8591c72d5bbf728bee2f02202feb0d8e680cab063e0c3c9090c0483f400511678ce83169ddce3e6d040f9ed8query-signed-request.txt000066400000000000000000000006241456575232400370570ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-header-key-casePOST /?X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-Signature=3044022100ebd5c442df864cd1fdd76e9d6e7548cac7ebaed57a93bcb3ac92af37ad5797fc021f1c4887de85feca28098f1dbab7fddaadb08a08564131881b0b5d151b4a2c77 HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002061456575232400370000ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-header-key-caseAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request 192546340393592ef9baaa24656f55ed91288110e7514b50f0a3f79bb761a29crequest.txt000066400000000000000000000000531456575232400344410ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-header-key-casePOST / HTTP/1.1 Host:example.amazonaws.com aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-header-key-sort/000077500000000000000000000000001456575232400323455ustar00rootroot00000000000000context.json000066400000000000000000000004761456575232400346540ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-header-key-sort{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000003311456575232400376670ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-header-key-sortPOST / host:example.amazonaws.com my-header1:value1 x-amz-date:20150830T123600Z x-amz-region-set:us-east-1 host;my-header1;x-amz-date;x-amz-region-set e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000002161456575232400362550ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-header-key-sort304502210097752f8c9953078cbb6b370471cb7029aea9517d59fb835d8befce66a91e6c4a022043a8c62681ff76e6cbaaeaed2fff2fcfee8efceebb7bd9e562ecd5a08b2c563fheader-signed-request.txt000066400000000000000000000006461456575232400372220ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-header-key-sortPOST / HTTP/1.1 Host:example.amazonaws.com My-Header1:value1 X-Amz-Date:20150830T123600Z X-Amz-Region-Set:us-east-1 Authorization:AWS4-ECDSA-P256-SHA256 Credential=AKIDEXAMPLE/20150830/service/aws4_request, SignedHeaders=host;my-header1;x-amz-date;x-amz-region-set, Signature=3046022100a5dc03771b33997d8eeb15d639cb7bdef2aff78a4a59c7996070e5f94fe149780221009a732061539208209ba0ae6755bef956c258a0c0ac6a6916489486efde21cfeb header-string-to-sign.txt000066400000000000000000000002061456575232400371370ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-header-key-sortAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request b92b1e85236a12f9d118f85fb6686c83b0e83fb3428f8d4da3cc9acb2851fcfapublic-key.json000066400000000000000000000002271456575232400352260ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-header-key-sort{ "X":"b6618f6a65740a99e650b33b6b4b5bd0d43b176d721a3edfea7e7d2d56d936b1", "Y":"865ed22a7eadc9c5cb9d2cbaca1b3699139fedc5043dc6661864218330c8e518" } query-canonical-request.txt000066400000000000000000000005351456575232400376120ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-header-key-sortPOST / X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-SignedHeaders=host%3Bmy-header1 host:example.amazonaws.com my-header1:value1 host;my-header1 e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000002161456575232400361720ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-header-key-sort304502204f2a31513c9bea5c92dd8045e226dc1db150f768e60b538cacce157c623873dc022100ab5d654ce3c7b917656cec2cf58a3689159e89deed5241ca21e19a40173674c4query-signed-request.txt000066400000000000000000000006651456575232400371400ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-header-key-sortPOST /?X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host%3Bmy-header1&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-Signature=30450220398bbd17824da156cc02e783348c16e8c04d46d1ae6b31dec9422d8054193380022100f05d3e4f7ed4eed55e950488ed7c53975cdbb4713ec4a2c0af9351ab8fdc9a48 HTTP/1.1 Host:example.amazonaws.com My-Header1:value1 query-string-to-sign.txt000066400000000000000000000002061456575232400370540ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-header-key-sortAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request 336638ab840d4785edc7db35ab9c036bc15ffb2dc1a4e05b04f3a7cd7407593frequest.txt000066400000000000000000000000751456575232400345210ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-header-key-sortPOST / HTTP/1.1 Host:example.amazonaws.com My-Header1:value1 aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-header-value-case/000077500000000000000000000000001456575232400326155ustar00rootroot00000000000000context.json000066400000000000000000000004761456575232400351240ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-header-value-case{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000003311456575232400401370ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-header-value-casePOST / host:example.amazonaws.com my-header1:VALUE1 x-amz-date:20150830T123600Z x-amz-region-set:us-east-1 host;my-header1;x-amz-date;x-amz-region-set e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000002141456575232400365230ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-header-value-case304402207b3baf259f5d29f13511eb4c9b236045c43168ca70369ed639611f4bff9eb6490220465e922e8b19146de9c70147440f4ee80a3318f0c756c8050dbfd641e6ff0456header-signed-request.txt000066400000000000000000000006441456575232400374700ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-header-value-casePOST / HTTP/1.1 Host:example.amazonaws.com My-Header1:VALUE1 X-Amz-Date:20150830T123600Z X-Amz-Region-Set:us-east-1 Authorization:AWS4-ECDSA-P256-SHA256 Credential=AKIDEXAMPLE/20150830/service/aws4_request, SignedHeaders=host;my-header1;x-amz-date;x-amz-region-set, Signature=3045022100c72dd4d54c45d3eb30858c6227e9512f41b7379b7c685e39e0cd39a793d7ee3102205c900d330270e31d2f7b5138fb3d791b6743f6f3b9ffbc34a4adcc8307591b11 header-string-to-sign.txt000066400000000000000000000002061456575232400374070ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-header-value-caseAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request 7de5a74bc45fb5c8a90faada2ab9538e69e4a5eb7f330f62387715669cecd492public-key.json000066400000000000000000000002271456575232400354760ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-header-value-case{ "X":"b6618f6a65740a99e650b33b6b4b5bd0d43b176d721a3edfea7e7d2d56d936b1", "Y":"865ed22a7eadc9c5cb9d2cbaca1b3699139fedc5043dc6661864218330c8e518" } query-canonical-request.txt000066400000000000000000000005351456575232400400620ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-header-value-casePOST / X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-SignedHeaders=host%3Bmy-header1 host:example.amazonaws.com my-header1:VALUE1 host;my-header1 e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000002161456575232400364420ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-header-value-case30450221009895446cb62cfb2dbb5e943baa12cad86c9d903bcfdb400f7e24391566709d8002205a12b1179a7d3569a8dfbfb8c15d0ad2e5e816aff6b2ee0c3deaf0e89fcbea80query-signed-request.txt000066400000000000000000000006631456575232400374060ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-header-value-casePOST /?X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host%3Bmy-header1&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-Signature=304402205ed45a0daa4ef3d554ea401f8d2e4f434c77dd8dfd4a15d08786faf9d6fdedf50220501f847d3159358f7823190dfffebaa16ce5b3868921c06b255f7dc3c742d9ff HTTP/1.1 Host:example.amazonaws.com My-Header1:VALUE1 query-string-to-sign.txt000066400000000000000000000002061456575232400373240ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-header-value-caseAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request 949fb555e05d3289760ff0f0566ad73a69ed865000d9843b93a15b916dbc8b6frequest.txt000066400000000000000000000000751456575232400347710ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-header-value-casePOST / HTTP/1.1 Host:example.amazonaws.com My-Header1:VALUE1 aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-sts-header-after/000077500000000000000000000000001456575232400325005ustar00rootroot00000000000000context.json000066400000000000000000000013041456575232400347760ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-sts-header-after{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY", "token": "AQoDYXdzEPT//////////wEXAMPLEtc764bNrC9SAPBSM22wDOk4x4HIZ8j4FZTwdQWLWsKWHGBuFqwAeMicRXmxfpSPfIeoIYRqTflfKD8YUuwthAx7mSEI/qkPpKPi/kMcGdQrmGdeehM4IC1NtBmUpp2wUE8phUZampKsburEDy0KPkyQDYwT7WZ0wq5VSXDvp75YU9HFvlRd8Tx6q6fE8YQcHNVXAkiY9q6d+xo0rKwT38xVqr7ZD0u0iPPkUL64lIZbqBAz+scqKmlzm8FDrypNC9Yjc8fPOLn9FX9KSYvKTr4rvx3iSIlTJabIQwj2ICCR/oLxBA==" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z", "omit_session_token": true } header-canonical-request.txt000066400000000000000000000002741456575232400400300ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-sts-header-afterPOST / host:example.amazonaws.com x-amz-date:20150830T123600Z x-amz-region-set:us-east-1 host;x-amz-date;x-amz-region-set e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000002161456575232400364100ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-sts-header-after3045022005936b1139f85e06856778dc6fefa39c534143129dca2e0a40d1c910d42f12f1022100b97f90927755775d36f57803214c2268a051914bbdab196c071ec24462f6baaeheader-signed-request.txt000066400000000000000000000013551456575232400373530ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-sts-header-afterPOST / HTTP/1.1 Host:example.amazonaws.com X-Amz-Security-Token:AQoDYXdzEPT//////////wEXAMPLEtc764bNrC9SAPBSM22wDOk4x4HIZ8j4FZTwdQWLWsKWHGBuFqwAeMicRXmxfpSPfIeoIYRqTflfKD8YUuwthAx7mSEI/qkPpKPi/kMcGdQrmGdeehM4IC1NtBmUpp2wUE8phUZampKsburEDy0KPkyQDYwT7WZ0wq5VSXDvp75YU9HFvlRd8Tx6q6fE8YQcHNVXAkiY9q6d+xo0rKwT38xVqr7ZD0u0iPPkUL64lIZbqBAz+scqKmlzm8FDrypNC9Yjc8fPOLn9FX9KSYvKTr4rvx3iSIlTJabIQwj2ICCR/oLxBA== X-Amz-Date:20150830T123600Z X-Amz-Region-Set:us-east-1 Authorization:AWS4-ECDSA-P256-SHA256 Credential=AKIDEXAMPLE/20150830/service/aws4_request, SignedHeaders=host;x-amz-date;x-amz-region-set, Signature=3045022100f7866e35aa214de4541aa33326aa9b5e9a409b72c9185c7a2e2d246ebb5a25a80220642e809f3d016fb3674364776c2ffadf7e7f8da94c36538a4715fa9425b52ed8 header-string-to-sign.txt000066400000000000000000000002061456575232400372720ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-sts-header-afterAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request 806a9b01b76472cc6b66fff02630726d55f8b4ada6d2fd9b36eb0d710e215861public-key.json000066400000000000000000000002271456575232400353610ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-sts-header-after{ "X":"b6618f6a65740a99e650b33b6b4b5bd0d43b176d721a3edfea7e7d2d56d936b1", "Y":"865ed22a7eadc9c5cb9d2cbaca1b3699139fedc5043dc6661864218330c8e518" } query-canonical-request.txt000066400000000000000000000004631456575232400377450ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-sts-header-afterPOST / X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000002201456575232400363200ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-sts-header-after3046022100f7c89084cbc2c276f6cbc02a6f6b353e2fb3538d02ed31f819324acf0f49cd0b022100823e6714c97899ea16d7f409989b250a911e6930112f8c73cf7d8c660bf2ed2fquery-signed-request.txt000066400000000000000000000014341456575232400372660ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-sts-header-afterPOST /?X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Security-Token=AQoDYXdzEPT%2F%2F%2F%2F%2F%2F%2F%2F%2F%2FwEXAMPLEtc764bNrC9SAPBSM22wDOk4x4HIZ8j4FZTwdQWLWsKWHGBuFqwAeMicRXmxfpSPfIeoIYRqTflfKD8YUuwthAx7mSEI%2FqkPpKPi%2FkMcGdQrmGdeehM4IC1NtBmUpp2wUE8phUZampKsburEDy0KPkyQDYwT7WZ0wq5VSXDvp75YU9HFvlRd8Tx6q6fE8YQcHNVXAkiY9q6d%2Bxo0rKwT38xVqr7ZD0u0iPPkUL64lIZbqBAz%2BscqKmlzm8FDrypNC9Yjc8fPOLn9FX9KSYvKTr4rvx3iSIlTJabIQwj2ICCR%2FoLxBA%3D%3D&X-Amz-Region-Set=us-east-1&X-Amz-Signature=304402204360bb65fe763c79526141ae439e4198190b7ffc048551a63c60b428bebc4c43022064a9d5c1f476566aea41fa60adfadc952164c53460553b2d1ae161d2f9b665f7 HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002061456575232400372070ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-sts-header-afterAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request 192546340393592ef9baaa24656f55ed91288110e7514b50f0a3f79bb761a29crequest.txt000066400000000000000000000000531456575232400346500ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-sts-header-afterPOST / HTTP/1.1 Host:example.amazonaws.com aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-sts-header-before/000077500000000000000000000000001456575232400326415ustar00rootroot00000000000000context.json000066400000000000000000000013051456575232400351400ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-sts-header-before{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY", "token": "AQoDYXdzEPT//////////wEXAMPLEtc764bNrC9SAPBSM22wDOk4x4HIZ8j4FZTwdQWLWsKWHGBuFqwAeMicRXmxfpSPfIeoIYRqTflfKD8YUuwthAx7mSEI/qkPpKPi/kMcGdQrmGdeehM4IC1NtBmUpp2wUE8phUZampKsburEDy0KPkyQDYwT7WZ0wq5VSXDvp75YU9HFvlRd8Tx6q6fE8YQcHNVXAkiY9q6d+xo0rKwT38xVqr7ZD0u0iPPkUL64lIZbqBAz+scqKmlzm8FDrypNC9Yjc8fPOLn9FX9KSYvKTr4rvx3iSIlTJabIQwj2ICCR/oLxBA==" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z", "omit_session_token": false } header-canonical-request.txt000066400000000000000000000010671456575232400401720ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-sts-header-beforePOST / host:example.amazonaws.com x-amz-date:20150830T123600Z x-amz-region-set:us-east-1 x-amz-security-token:AQoDYXdzEPT//////////wEXAMPLEtc764bNrC9SAPBSM22wDOk4x4HIZ8j4FZTwdQWLWsKWHGBuFqwAeMicRXmxfpSPfIeoIYRqTflfKD8YUuwthAx7mSEI/qkPpKPi/kMcGdQrmGdeehM4IC1NtBmUpp2wUE8phUZampKsburEDy0KPkyQDYwT7WZ0wq5VSXDvp75YU9HFvlRd8Tx6q6fE8YQcHNVXAkiY9q6d+xo0rKwT38xVqr7ZD0u0iPPkUL64lIZbqBAz+scqKmlzm8FDrypNC9Yjc8fPOLn9FX9KSYvKTr4rvx3iSIlTJabIQwj2ICCR/oLxBA== host;x-amz-date;x-amz-region-set;x-amz-security-token e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000002161456575232400365510ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-sts-header-before30450221009cf93687a6055c287c68f881d4a907a6c99eef88b9c908e33e15740ec5a75e43022072df645db08e18ba4cd7436b3d67691792815a353d48113c7c444bb67c630e50header-signed-request.txt000066400000000000000000000014021456575232400375050ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-sts-header-beforePOST / HTTP/1.1 Host:example.amazonaws.com X-Amz-Security-Token:AQoDYXdzEPT//////////wEXAMPLEtc764bNrC9SAPBSM22wDOk4x4HIZ8j4FZTwdQWLWsKWHGBuFqwAeMicRXmxfpSPfIeoIYRqTflfKD8YUuwthAx7mSEI/qkPpKPi/kMcGdQrmGdeehM4IC1NtBmUpp2wUE8phUZampKsburEDy0KPkyQDYwT7WZ0wq5VSXDvp75YU9HFvlRd8Tx6q6fE8YQcHNVXAkiY9q6d+xo0rKwT38xVqr7ZD0u0iPPkUL64lIZbqBAz+scqKmlzm8FDrypNC9Yjc8fPOLn9FX9KSYvKTr4rvx3iSIlTJabIQwj2ICCR/oLxBA== X-Amz-Date:20150830T123600Z X-Amz-Region-Set:us-east-1 Authorization:AWS4-ECDSA-P256-SHA256 Credential=AKIDEXAMPLE/20150830/service/aws4_request, SignedHeaders=host;x-amz-date;x-amz-region-set;x-amz-security-token, Signature=3045022035115ac58fb923b0894e63eabf5864858c27f504b34bd707b569b2d6ee6a9c2e02210080ed7d2f3b7d52b4243890c596379ecd2066e9a1ac183d546ba07955ed5db306 header-string-to-sign.txt000066400000000000000000000002061456575232400374330ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-sts-header-beforeAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request 964c15d46a67b327b877c02d680c81cb75df04e85144142e190da565ff0d029fpublic-key.json000066400000000000000000000002271456575232400355220ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-sts-header-before{ "X":"b6618f6a65740a99e650b33b6b4b5bd0d43b176d721a3edfea7e7d2d56d936b1", "Y":"865ed22a7eadc9c5cb9d2cbaca1b3699139fedc5043dc6661864218330c8e518" } query-canonical-request.txt000066400000000000000000000012731456575232400401060ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-sts-header-beforePOST / X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-Security-Token=AQoDYXdzEPT%2F%2F%2F%2F%2F%2F%2F%2F%2F%2FwEXAMPLEtc764bNrC9SAPBSM22wDOk4x4HIZ8j4FZTwdQWLWsKWHGBuFqwAeMicRXmxfpSPfIeoIYRqTflfKD8YUuwthAx7mSEI%2FqkPpKPi%2FkMcGdQrmGdeehM4IC1NtBmUpp2wUE8phUZampKsburEDy0KPkyQDYwT7WZ0wq5VSXDvp75YU9HFvlRd8Tx6q6fE8YQcHNVXAkiY9q6d%2Bxo0rKwT38xVqr7ZD0u0iPPkUL64lIZbqBAz%2BscqKmlzm8FDrypNC9Yjc8fPOLn9FX9KSYvKTr4rvx3iSIlTJabIQwj2ICCR%2FoLxBA%3D%3D&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000002141456575232400364640ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-sts-header-before304402203a4d2e9bd05ccf8b1e5834f2bb5ab81ea3b1dfbeb84388f07ab3b61ab519332a02204db83afae56ac4e713f631cd15268605d7b36119e672458d86df0cfcb68fe836query-signed-request.txt000066400000000000000000000014341456575232400374270ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-sts-header-beforePOST /?X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Security-Token=AQoDYXdzEPT%2F%2F%2F%2F%2F%2F%2F%2F%2F%2FwEXAMPLEtc764bNrC9SAPBSM22wDOk4x4HIZ8j4FZTwdQWLWsKWHGBuFqwAeMicRXmxfpSPfIeoIYRqTflfKD8YUuwthAx7mSEI%2FqkPpKPi%2FkMcGdQrmGdeehM4IC1NtBmUpp2wUE8phUZampKsburEDy0KPkyQDYwT7WZ0wq5VSXDvp75YU9HFvlRd8Tx6q6fE8YQcHNVXAkiY9q6d%2Bxo0rKwT38xVqr7ZD0u0iPPkUL64lIZbqBAz%2BscqKmlzm8FDrypNC9Yjc8fPOLn9FX9KSYvKTr4rvx3iSIlTJabIQwj2ICCR%2FoLxBA%3D%3D&X-Amz-Region-Set=us-east-1&X-Amz-Signature=304402207e41aee2d6ea2a3868b3dea7b04caeb14d23ee696a76b93467213cd18467cbf502202a05e108d8feb197a62b4ea4b8a9d11aa4dc92bcfaf6e5fe185efac4bafd0802 HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002061456575232400373500ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-sts-header-beforeAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request c3a8ba26c461df46b5010b756fb8644fd922a2aea95d77b56295e5e4d3bb155frequest.txt000066400000000000000000000000531456575232400350110ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-sts-header-beforePOST / HTTP/1.1 Host:example.amazonaws.com post-vanilla-empty-query-value/000077500000000000000000000000001456575232400343225ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4acontext.json000066400000000000000000000004761456575232400367100ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-vanilla-empty-query-value{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000003111456575232400417210ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-vanilla-empty-query-valuePOST / Param1=value1 host:example.amazonaws.com x-amz-date:20150830T123600Z x-amz-region-set:us-east-1 host;x-amz-date;x-amz-region-set e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000002161456575232400403110ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-vanilla-empty-query-value3045022077e142d360c6c3d6768c94c89856f6de0e4fe088328ed0a2324d78ffb73570f1022100ed84c9cd071b43d78f2d70cc1694b3ea807638c648d29a2c401cf3301b34e081header-signed-request.txt000066400000000000000000000006231456575232400412510ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-vanilla-empty-query-valuePOST /?Param1=value1 HTTP/1.1 Host:example.amazonaws.com X-Amz-Date:20150830T123600Z X-Amz-Region-Set:us-east-1 Authorization:AWS4-ECDSA-P256-SHA256 Credential=AKIDEXAMPLE/20150830/service/aws4_request, SignedHeaders=host;x-amz-date;x-amz-region-set, Signature=304402203129b52c7149bcf5761f83d023986576af5846b9e9f11050651639f9e24c9ebd02200f7e2f6c2aed2bbb76580e50d32e8aa09dc65f7544cdb169b4a690a2a248efd3 header-string-to-sign.txt000066400000000000000000000002061456575232400411730ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-vanilla-empty-query-valueAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request 502dea2656f02eea10bd05eeec315ea1a6686ed2861176e1670b2d67e17b2f36public-key.json000066400000000000000000000002271456575232400372620ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-vanilla-empty-query-value{ "X":"b6618f6a65740a99e650b33b6b4b5bd0d43b176d721a3edfea7e7d2d56d936b1", "Y":"865ed22a7eadc9c5cb9d2cbaca1b3699139fedc5043dc6661864218330c8e518" } query-canonical-request.txt000066400000000000000000000005011456575232400416370ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-vanilla-empty-query-valuePOST / Param1=value1&X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000002161456575232400402260ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-vanilla-empty-query-value3045022100df7ad6c7c07d989d8bf5bdc3953a5992ceb0e0acb40c8fba0fdb2e12b1003a0e022002a3b1109182bf60285df0b581ce2d8c6665215d7c0ae4bc32a43a30bad21626query-signed-request.txt000066400000000000000000000006441456575232400411710ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-vanilla-empty-query-valuePOST /?Param1=value1&X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-Signature=3045022025fbe60840e867e12ed08dde14cb7b65da87de27afd012fd2bef96c7bc5b6bfe022100ab6eaea845c3d8b65b39d4506cdd36232f10dae092ddd4d54f887d2f979821d9 HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002061456575232400411100ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-vanilla-empty-query-valueAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request b9ec8df7b378fbee58903f5c54bd50e80a4d2d5aa9532583910ce771e42574ferequest.txt000066400000000000000000000000711456575232400365510ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-vanilla-empty-query-valuePOST /?Param1=value1 HTTP/1.1 Host:example.amazonaws.com aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-vanilla-query/000077500000000000000000000000001456575232400321335ustar00rootroot00000000000000context.json000066400000000000000000000004761456575232400344420ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-vanilla-query{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000003111456575232400374530ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-vanilla-queryPOST / Param1=value1 host:example.amazonaws.com x-amz-date:20150830T123600Z x-amz-region-set:us-east-1 host;x-amz-date;x-amz-region-set e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000002201456575232400360360ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-vanilla-query3046022100b876f1bd2e85da20227bf515463de11ed5334e78e559bf814ce39c6a500af453022100a0a4797c60fda7bca3af5ae71680853b0c4e401d3da0821e3266062ff3463275header-signed-request.txt000066400000000000000000000006231456575232400370030ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-vanilla-queryPOST /?Param1=value1 HTTP/1.1 Host:example.amazonaws.com X-Amz-Date:20150830T123600Z X-Amz-Region-Set:us-east-1 Authorization:AWS4-ECDSA-P256-SHA256 Credential=AKIDEXAMPLE/20150830/service/aws4_request, SignedHeaders=host;x-amz-date;x-amz-region-set, Signature=304402202785ef1eddce96aa4f4cb359f00fcb1155fa7f679b4af8949a8ea097381543fc02205a9719949fcc5452e690dcd2426318aa17d87aa67c68fdd66f815ae2d3591b49 header-string-to-sign.txt000066400000000000000000000002061456575232400367250ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-vanilla-queryAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request 502dea2656f02eea10bd05eeec315ea1a6686ed2861176e1670b2d67e17b2f36public-key.json000066400000000000000000000002271456575232400350140ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-vanilla-query{ "X":"b6618f6a65740a99e650b33b6b4b5bd0d43b176d721a3edfea7e7d2d56d936b1", "Y":"865ed22a7eadc9c5cb9d2cbaca1b3699139fedc5043dc6661864218330c8e518" } query-canonical-request.txt000066400000000000000000000005011456575232400373710ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-vanilla-queryPOST / Param1=value1&X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000002141456575232400357560ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-vanilla-query304402206c9877a31b4f7d0118d45b1e1ac58ed605f2d7ded4005d59c86ac7331737097302201772085759c0ab0ecbf67a85128823319866ee7118945cf569dd17f678aafce5query-signed-request.txt000066400000000000000000000006421456575232400367210ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-vanilla-queryPOST /?Param1=value1&X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-Signature=304402207a361da47ef99ca9b92f2a8d5f8f0214a9580491aa78413733200307498bae3d02207072a6f2a774fa336cc5fb84daf553f70ed1867f2e35563e3a24fdf4a329713d HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002061456575232400366420ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-vanilla-queryAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request b9ec8df7b378fbee58903f5c54bd50e80a4d2d5aa9532583910ce771e42574ferequest.txt000066400000000000000000000000711456575232400343030ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-vanilla-queryPOST /?Param1=value1 HTTP/1.1 Host:example.amazonaws.com aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-vanilla/000077500000000000000000000000001456575232400307705ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-vanilla/context.json000066400000000000000000000004761456575232400333560ustar00rootroot00000000000000{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": false, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000002741456575232400363200ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-vanillaPOST / host:example.amazonaws.com x-amz-date:20150830T123600Z x-amz-region-set:us-east-1 host;x-amz-date;x-amz-region-set e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855header-signature.txt000066400000000000000000000002141456575232400346760ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-vanilla3044022051fe398025aafbfc21d054bc78e5edfb96c9acb7fd272795565181d757815e4702202e7b8d2b92324290b1d95f8b0fc5e333bb8b5e333f6160bcab39d7258156d224header-signed-request.txt000066400000000000000000000006051456575232400356400ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-vanillaPOST / HTTP/1.1 Host:example.amazonaws.com X-Amz-Date:20150830T123600Z X-Amz-Region-Set:us-east-1 Authorization:AWS4-ECDSA-P256-SHA256 Credential=AKIDEXAMPLE/20150830/service/aws4_request, SignedHeaders=host;x-amz-date;x-amz-region-set, Signature=3044022020e04d83be1fc4c7669eb2530c48444ff2ef65e73ab6d4084ba89b886b70d01a0220570df05bd3ba39ccd96a3e831ed8757fbb516baa603d78eaed3ccc95d50a2abb header-string-to-sign.txt000066400000000000000000000002061456575232400355620ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-vanillaAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request 806a9b01b76472cc6b66fff02630726d55f8b4ada6d2fd9b36eb0d710e215861public-key.json000066400000000000000000000002271456575232400336510ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-vanilla{ "X":"b6618f6a65740a99e650b33b6b4b5bd0d43b176d721a3edfea7e7d2d56d936b1", "Y":"865ed22a7eadc9c5cb9d2cbaca1b3699139fedc5043dc6661864218330c8e518" } query-canonical-request.txt000066400000000000000000000004631456575232400362350ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-vanillaPOST / X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-SignedHeaders=host host:example.amazonaws.com host e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855query-signature.txt000066400000000000000000000002141456575232400346130ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-vanilla30440220158b75ce84ef2d97a0b287759f9104b9e093ef3a912a84bd78d30274d08c10c4022064fb9961da1d951aba42455df5752c3e3fa8055cd053b59ad53b140d54f6aafdquery-signed-request.txt000066400000000000000000000006241456575232400355560ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-vanillaPOST /?X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-Signature=3044022034768b9f03c61ee7f93b1e23198ff08923345e32f25e662afa5794997f00ebb102205813497325d50d374e486a3fce997e02ae9610a8eb81af01128103dd9d2d997d HTTP/1.1 Host:example.amazonaws.com query-string-to-sign.txt000066400000000000000000000002061456575232400354770ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-vanillaAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request 192546340393592ef9baaa24656f55ed91288110e7514b50f0a3f79bb761a29caws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-vanilla/request.txt000066400000000000000000000000531456575232400332170ustar00rootroot00000000000000POST / HTTP/1.1 Host:example.amazonaws.com post-x-www-form-urlencoded-parameters/000077500000000000000000000000001456575232400356005ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4acontext.json000066400000000000000000000004751456575232400401650ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-x-www-form-urlencoded-parameters{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": true, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000006231456575232400432050ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-x-www-form-urlencoded-parametersPOST / content-length:13 content-type:application/x-www-form-urlencoded; charset=utf-8 host:example.amazonaws.com x-amz-content-sha256:9095672bbd1f56dfc5b65f3e153adc8731a4a654192329106275f4c7b24d0b6e x-amz-date:20150830T123600Z x-amz-region-set:us-east-1 content-length;content-type;host;x-amz-content-sha256;x-amz-date;x-amz-region-set 9095672bbd1f56dfc5b65f3e153adc8731a4a654192329106275f4c7b24d0b6eheader-signature.txt000066400000000000000000000002161456575232400415670ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-x-www-form-urlencoded-parameters3045022100eaa8151b5a6e60d9c52dfa60d186a304d0226984aa9ed21f2913267cdfd365c902203bfd4e6dfea58e68d3cb892939f3a00ac91e80c8df69828aefa1e48245928678header-signed-request.txt000066400000000000000000000011531456575232400425260ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-x-www-form-urlencoded-parametersPOST / HTTP/1.1 Content-Type:application/x-www-form-urlencoded; charset=utf-8 Host:example.amazonaws.com Content-Length:13 X-Amz-Date:20150830T123600Z X-Amz-Region-Set:us-east-1 x-amz-content-sha256:9095672bbd1f56dfc5b65f3e153adc8731a4a654192329106275f4c7b24d0b6e Authorization:AWS4-ECDSA-P256-SHA256 Credential=AKIDEXAMPLE/20150830/service/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-content-sha256;x-amz-date;x-amz-region-set, Signature=3045022100c016dc61cbd380cf8160711320957071f5c122ef69164d56d02f79daf51a0603022043e3313aa2f6b46285cc89dfe5616ccc74c810a3d7ea25b76ee1ca496f7facba Param1=value1header-string-to-sign.txt000066400000000000000000000002061456575232400424510ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-x-www-form-urlencoded-parametersAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request bcdb9ab3050c0bb18c5e9eb60e6eb1aaaf00907920065569a99b0c51278639b5public-key.json000066400000000000000000000002271456575232400405400ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-x-www-form-urlencoded-parameters{ "X":"b6618f6a65740a99e650b33b6b4b5bd0d43b176d721a3edfea7e7d2d56d936b1", "Y":"865ed22a7eadc9c5cb9d2cbaca1b3699139fedc5043dc6661864218330c8e518" } query-canonical-request.txt000066400000000000000000000006771456575232400431330ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-x-www-form-urlencoded-parametersPOST / X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-SignedHeaders=content-length%3Bcontent-type%3Bhost content-length:13 content-type:application/x-www-form-urlencoded; charset=utf-8 host:example.amazonaws.com content-length;content-type;host 9095672bbd1f56dfc5b65f3e153adc8731a4a654192329106275f4c7b24d0b6equery-signature.txt000066400000000000000000000002201456575232400414770ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-x-www-form-urlencoded-parameters3046022100dcfa462c480d85707221a690323639df78fb10eb2f4913abcfc4eec215c39fb8022100bad863cf9d951963fc2d8068a2887742d553283e5086f644e3ca9ff8b262e13cquery-signed-request.txt000066400000000000000000000010231456575232400424370ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-x-www-form-urlencoded-parametersPOST /?X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=content-length%3Bcontent-type%3Bhost&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-Signature=30450220128d491ccca3f9a7988d00454d2210dd884d7990ca2cb844d4968c8fa9e937f8022100b3668dd440d8206074460567cee98691f66412fe5a83d8e32c601e1331f2ec2b HTTP/1.1 Content-Type:application/x-www-form-urlencoded; charset=utf-8 Host:example.amazonaws.com Content-Length:13 Param1=value1query-string-to-sign.txt000066400000000000000000000002061456575232400423660ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-x-www-form-urlencoded-parametersAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request d4dab970413a0459d2a3bb644b278e96e85f2f6ac8fa09a74bbd6a269ec3dd82request.txt000066400000000000000000000002111456575232400400230ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-x-www-form-urlencoded-parametersPOST / HTTP/1.1 Content-Type:application/x-www-form-urlencoded; charset=utf-8 Host:example.amazonaws.com Content-Length:13 Param1=value1post-x-www-form-urlencoded/000077500000000000000000000000001456575232400334375ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4acontext.json000066400000000000000000000004751456575232400360240ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-x-www-form-urlencoded{ "credentials": { "access_key_id": "AKIDEXAMPLE", "secret_access_key": "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "expiration_in_seconds": 3600, "normalize": true, "region": "us-east-1", "service": "service", "sign_body": true, "timestamp": "2015-08-30T12:36:00Z" }header-canonical-request.txt000066400000000000000000000006041456575232400410430ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-x-www-form-urlencodedPOST / content-length:13 content-type:application/x-www-form-urlencoded host:example.amazonaws.com x-amz-content-sha256:9095672bbd1f56dfc5b65f3e153adc8731a4a654192329106275f4c7b24d0b6e x-amz-date:20150830T123600Z x-amz-region-set:us-east-1 content-length;content-type;host;x-amz-content-sha256;x-amz-date;x-amz-region-set 9095672bbd1f56dfc5b65f3e153adc8731a4a654192329106275f4c7b24d0b6eheader-signature.txt000066400000000000000000000002141456575232400374240ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-x-www-form-urlencoded304402202dc0a1ca6ca9308bece143f48201f2500761a242ba6efc7857e1ba01a022e843022053f570cb5adf521df2f6732b5077becd86bc2073b30e9d48c2057851902c1c0eheader-signed-request.txt000066400000000000000000000011341456575232400403640ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-x-www-form-urlencodedPOST / HTTP/1.1 Content-Type:application/x-www-form-urlencoded Host:example.amazonaws.com Content-Length:13 X-Amz-Date:20150830T123600Z X-Amz-Region-Set:us-east-1 x-amz-content-sha256:9095672bbd1f56dfc5b65f3e153adc8731a4a654192329106275f4c7b24d0b6e Authorization:AWS4-ECDSA-P256-SHA256 Credential=AKIDEXAMPLE/20150830/service/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-content-sha256;x-amz-date;x-amz-region-set, Signature=3045022100955bbc0f0b9d4284719808642167f7d5ea4a72f8c296b75b442898c8b81cd7e502202425fb97620fa9a20344d22b248dd00db07524fc39b5ad77533e00b277331241 Param1=value1header-string-to-sign.txt000066400000000000000000000002061456575232400403100ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-x-www-form-urlencodedAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request ba7fa291aefad463b308fdc89b33d852aea9fbcfa151d820117bd8189066cb91public-key.json000066400000000000000000000002271456575232400363770ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-x-www-form-urlencoded{ "X":"b6618f6a65740a99e650b33b6b4b5bd0d43b176d721a3edfea7e7d2d56d936b1", "Y":"865ed22a7eadc9c5cb9d2cbaca1b3699139fedc5043dc6661864218330c8e518" } query-canonical-request.txt000066400000000000000000000006601456575232400407620ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-x-www-form-urlencodedPOST / X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-SignedHeaders=content-length%3Bcontent-type%3Bhost content-length:13 content-type:application/x-www-form-urlencoded host:example.amazonaws.com content-length;content-type;host 9095672bbd1f56dfc5b65f3e153adc8731a4a654192329106275f4c7b24d0b6equery-signature.txt000066400000000000000000000002141456575232400373410ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-x-www-form-urlencoded304402205e21d399153f879b0f32c38742d12941299467bfdc213224cc76a97acffd6ec6022035a9be37a38c19e98e5cfc536fabe885e82fbf999081234b364cab5cacff8733query-signed-request.txt000066400000000000000000000010041456575232400402750ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-x-www-form-urlencodedPOST /?X-Amz-Algorithm=AWS4-ECDSA-P256-SHA256&X-Amz-Credential=AKIDEXAMPLE%2F20150830%2Fservice%2Faws4_request&X-Amz-Date=20150830T123600Z&X-Amz-SignedHeaders=content-length%3Bcontent-type%3Bhost&X-Amz-Expires=3600&X-Amz-Region-Set=us-east-1&X-Amz-Signature=30450221008d8a6aa0bc3f651e6c14c52e9e24dbca58964641c9cb6e55169f9dc74766ae3d022016126756ce1523ac972f66f6bf6e981f44572d3c8916f1f43d428fb2caa0e1ea HTTP/1.1 Content-Type:application/x-www-form-urlencoded Host:example.amazonaws.com Content-Length:13 Param1=value1query-string-to-sign.txt000066400000000000000000000002061456575232400402250ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-x-www-form-urlencodedAWS4-ECDSA-P256-SHA256 20150830T123600Z 20150830/service/aws4_request 4e4122984d30d13170a298ece62cc30f8da12578fb3b482616b1f11036b13934request.txt000066400000000000000000000001721456575232400356700ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws-signing-test-suite/v4a/post-x-www-form-urlencodedPOST / HTTP/1.1 Content-Type:application/x-www-form-urlencoded Host:example.amazonaws.com Content-Length:13 Param1=value1aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws_imds_client_test.c000066400000000000000000001624231456575232400257120ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(_MSC_VER) # pragma warning(disable : 4244) #endif /* _MSC_VER */ #define IMDS_CLIENT_MAX_REQUESTS 15 struct aws_mock_imds_client_tester { struct aws_byte_buf request_uris[IMDS_CLIENT_MAX_REQUESTS]; struct aws_array_list response_data_callbacks[IMDS_CLIENT_MAX_REQUESTS]; struct aws_allocator *allocator; int current_request; int response_code[IMDS_CLIENT_MAX_REQUESTS]; int token_request_idx; int error_code; uint64_t timestamp; bool is_connection_acquire_successful; struct aws_mutex lock; struct aws_condition_variable signal; struct aws_event_loop_group *el_group; struct aws_client_bootstrap *bootstrap; bool has_received_resource_callback; bool has_received_shutdown_callback; bool token_ttl_header_exist[IMDS_CLIENT_MAX_REQUESTS]; bool token_ttl_header_expected[IMDS_CLIENT_MAX_REQUESTS]; bool token_header_exist[IMDS_CLIENT_MAX_REQUESTS]; bool token_header_expected[IMDS_CLIENT_MAX_REQUESTS]; bool alternate_closed_connections; struct aws_byte_buf resource; int successful_requests; }; static struct aws_mock_imds_client_tester s_tester; static void s_on_shutdown_complete(void *user_data) { (void)user_data; aws_mutex_lock(&s_tester.lock); s_tester.has_received_shutdown_callback = true; aws_mutex_unlock(&s_tester.lock); aws_condition_variable_notify_one(&s_tester.signal); } static bool s_has_tester_received_shutdown_callback(void *user_data) { (void)user_data; return s_tester.has_received_shutdown_callback; } static void s_aws_wait_for_imds_client_shutdown_callback(void) { aws_mutex_lock(&s_tester.lock); aws_condition_variable_wait_pred(&s_tester.signal, &s_tester.lock, s_has_tester_received_shutdown_callback, NULL); aws_mutex_unlock(&s_tester.lock); } static struct aws_http_connection_manager *s_aws_http_connection_manager_new_mock( struct aws_allocator *allocator, const struct aws_http_connection_manager_options *options) { (void)allocator; (void)options; return (struct aws_http_connection_manager *)1; } static void s_aws_http_connection_manager_release_mock(struct aws_http_connection_manager *manager) { (void)manager; s_on_shutdown_complete(NULL); } static void s_aws_http_connection_manager_acquire_connection_mock( struct aws_http_connection_manager *manager, aws_http_connection_manager_on_connection_setup_fn *callback, void *user_data) { (void)manager; if (s_tester.is_connection_acquire_successful) { callback((struct aws_http_connection *)1, AWS_OP_SUCCESS, user_data); } else { aws_raise_error(AWS_ERROR_HTTP_UNKNOWN); callback(NULL, AWS_OP_ERR, user_data); } } static int s_aws_http_connection_manager_release_connection_mock( struct aws_http_connection_manager *manager, struct aws_http_connection *connection) { (void)manager; (void)connection; return AWS_OP_SUCCESS; } static void s_invoke_mock_request_callbacks( const struct aws_http_make_request_options *options, struct aws_array_list *data_callbacks) { size_t data_callback_count = aws_array_list_length(data_callbacks); struct aws_http_header headers[1]; AWS_ZERO_ARRAY(headers); headers[0].name = aws_byte_cursor_from_c_str("some-header"); headers[0].value = aws_byte_cursor_from_c_str("value"); options->on_response_headers( (struct aws_http_stream *)1, AWS_HTTP_HEADER_BLOCK_MAIN, headers, 1, options->user_data); if (options->on_response_header_block_done) { options->on_response_header_block_done( (struct aws_http_stream *)1, data_callback_count > 0, options->user_data); } for (size_t i = 0; i < data_callback_count; ++i) { struct aws_byte_cursor data_callback_cursor; if (aws_array_list_get_at(data_callbacks, &data_callback_cursor, i)) { continue; } options->on_response_body((struct aws_http_stream *)1, &data_callback_cursor, options->user_data); } if (!s_tester.alternate_closed_connections) { options->on_complete((struct aws_http_stream *)1, AWS_ERROR_SUCCESS, options->user_data); } else { options->on_complete( (struct aws_http_stream *)1, ((uint8_t)s_tester.current_request & 0x01) ? AWS_ERROR_HTTP_CONNECTION_CLOSED : AWS_ERROR_SUCCESS, options->user_data); } } static void s_validate_token_ttl_header(const struct aws_http_message *request); static void s_validate_token_header(const struct aws_http_message *request); static struct aws_http_stream *s_aws_http_connection_make_request_mock( struct aws_http_connection *client_connection, const struct aws_http_make_request_options *options) { (void)client_connection; (void)options; struct aws_byte_cursor path; AWS_ZERO_STRUCT(path); aws_http_message_get_request_path(options->request, &path); if (s_tester.current_request == s_tester.token_request_idx) { /* verify token ttl header */ s_validate_token_ttl_header(options->request); } else if (s_tester.current_request > s_tester.token_request_idx) { /* verify token header */ s_validate_token_header(options->request); } int idx = s_tester.current_request++; aws_byte_buf_append_dynamic(&(s_tester.request_uris[idx]), &path); s_invoke_mock_request_callbacks(options, &s_tester.response_data_callbacks[idx]); return (struct aws_http_stream *)1; } static int s_aws_http_stream_activate_mock(struct aws_http_stream *stream) { (void)stream; return AWS_OP_SUCCESS; } static struct aws_http_connection *s_aws_http_stream_get_connection_mock(const struct aws_http_stream *stream) { (void)stream; return (struct aws_http_connection *)1; } static int s_aws_http_stream_get_incoming_response_status_mock( const struct aws_http_stream *stream, int *out_status_code) { (void)stream; if (s_tester.response_code[s_tester.current_request - 1] != 0) { *out_status_code = s_tester.response_code[s_tester.current_request - 1]; } else { *out_status_code = AWS_HTTP_STATUS_CODE_200_OK; } return AWS_OP_SUCCESS; } static void s_aws_http_stream_release_mock(struct aws_http_stream *stream) { (void)stream; } static void s_aws_http_connection_close_mock(struct aws_http_connection *connection) { (void)connection; } static int s_aws_high_res_clock_get_ticks_mock(uint64_t *timestamp) { *timestamp = s_tester.timestamp; return AWS_OP_SUCCESS; } static struct aws_auth_http_system_vtable s_mock_function_table = { .aws_http_connection_manager_new = s_aws_http_connection_manager_new_mock, .aws_http_connection_manager_release = s_aws_http_connection_manager_release_mock, .aws_http_connection_manager_acquire_connection = s_aws_http_connection_manager_acquire_connection_mock, .aws_http_connection_manager_release_connection = s_aws_http_connection_manager_release_connection_mock, .aws_http_connection_make_request = s_aws_http_connection_make_request_mock, .aws_http_stream_activate = s_aws_http_stream_activate_mock, .aws_http_stream_get_connection = s_aws_http_stream_get_connection_mock, .aws_http_stream_get_incoming_response_status = s_aws_http_stream_get_incoming_response_status_mock, .aws_http_stream_release = s_aws_http_stream_release_mock, .aws_http_connection_close = s_aws_http_connection_close_mock, .aws_high_res_clock_get_ticks = s_aws_high_res_clock_get_ticks_mock, }; static int s_aws_imds_tester_init(struct aws_allocator *allocator) { aws_auth_library_init(allocator); AWS_ZERO_STRUCT(s_tester); s_tester.allocator = allocator; for (size_t i = 0; i < IMDS_CLIENT_MAX_REQUESTS; i++) { if (aws_array_list_init_dynamic( &s_tester.response_data_callbacks[i], allocator, 10, sizeof(struct aws_byte_cursor))) { return AWS_OP_ERR; } if (aws_byte_buf_init(&s_tester.request_uris[i], allocator, 100)) { return AWS_OP_ERR; } } if (aws_mutex_init(&s_tester.lock)) { return AWS_OP_ERR; } if (aws_condition_variable_init(&s_tester.signal)) { return AWS_OP_ERR; } /* default to everything successful */ s_tester.is_connection_acquire_successful = true; s_tester.el_group = aws_event_loop_group_new_default(allocator, 1, NULL); struct aws_client_bootstrap_options bootstrap_options = { .event_loop_group = s_tester.el_group, .user_data = NULL, .host_resolution_config = NULL, .host_resolver = NULL, .on_shutdown_complete = NULL, }; s_tester.bootstrap = aws_client_bootstrap_new(allocator, &bootstrap_options); ASSERT_NOT_NULL(s_tester.bootstrap); if (aws_byte_buf_init(&s_tester.resource, allocator, 256)) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } static int s_aws_imds_tester_cleanup(void) { for (size_t i = 0; i < IMDS_CLIENT_MAX_REQUESTS; i++) { aws_array_list_clean_up(&s_tester.response_data_callbacks[i]); aws_byte_buf_clean_up(&s_tester.request_uris[i]); } aws_condition_variable_clean_up(&s_tester.signal); aws_mutex_clean_up(&s_tester.lock); aws_client_bootstrap_release(s_tester.bootstrap); aws_event_loop_group_release(s_tester.el_group); aws_byte_buf_clean_up(&s_tester.resource); aws_auth_library_clean_up(); return AWS_OP_SUCCESS; } static bool s_has_tester_received_resource_callback(void *user_data) { (void)user_data; return s_tester.has_received_resource_callback; } static void s_aws_wait_for_resource_result(void) { aws_mutex_lock(&s_tester.lock); aws_condition_variable_wait_pred(&s_tester.signal, &s_tester.lock, s_has_tester_received_resource_callback, NULL); aws_mutex_unlock(&s_tester.lock); } static void s_get_resource_callback(const struct aws_byte_buf *resource, int error_code, void *user_data) { (void)user_data; aws_mutex_lock(&s_tester.lock); s_tester.has_received_resource_callback = true; s_tester.error_code = error_code; if (resource && resource->len) { struct aws_byte_cursor cursor = aws_byte_cursor_from_buf(resource); aws_byte_buf_append_dynamic(&s_tester.resource, &cursor); s_tester.successful_requests++; } aws_condition_variable_notify_one(&s_tester.signal); aws_mutex_unlock(&s_tester.lock); } static int s_imds_client_new_release(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_imds_tester_init(allocator); struct aws_imds_client_options options = { .bootstrap = s_tester.bootstrap, .function_table = &s_mock_function_table, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, }; struct aws_imds_client *client = aws_imds_client_new(allocator, &options); aws_imds_client_release(client); s_aws_wait_for_imds_client_shutdown_callback(); /* Because we mock the http connection manager, we never get a callback back from it */ aws_mem_release(allocator, client); ASSERT_SUCCESS(s_aws_imds_tester_cleanup()); return 0; } AWS_TEST_CASE(imds_client_new_release, s_imds_client_new_release); AWS_STATIC_STRING_FROM_LITERAL(s_ec2_metadata_root, "/latest/meta-data"); AWS_STATIC_STRING_FROM_LITERAL(s_expected_imds_token_uri, "/latest/api/token"); AWS_STATIC_STRING_FROM_LITERAL(s_expected_imds_resource_uri, "/latest/meta-data/iam/security-credentials/test-role"); AWS_STATIC_STRING_FROM_LITERAL(s_test_imds_token, "A00XXF3H00ZZ=="); static int s_imds_client_connect_failure(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_imds_tester_init(allocator); s_tester.is_connection_acquire_successful = false; struct aws_imds_client_options options = { .bootstrap = s_tester.bootstrap, .function_table = &s_mock_function_table, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, }; struct aws_imds_client *client = aws_imds_client_new(allocator, &options); aws_imds_client_get_resource_async( client, aws_byte_cursor_from_string(s_ec2_metadata_root), s_get_resource_callback, NULL); s_aws_wait_for_resource_result(); ASSERT_TRUE(s_tester.has_received_resource_callback == true); ASSERT_TRUE(s_tester.resource.len == 0); aws_imds_client_release(client); s_aws_wait_for_imds_client_shutdown_callback(); /* Because we mock the http connection manager, we never get a callback back from it */ aws_mem_release(allocator, client); ASSERT_SUCCESS(s_aws_imds_tester_cleanup()); return 0; } AWS_TEST_CASE(imds_client_connect_failure, s_imds_client_connect_failure); static void s_validate_token_ttl_header(const struct aws_http_message *request) { const struct aws_http_headers *headers = aws_http_message_get_const_headers(request); struct aws_byte_cursor ttl_header = aws_byte_cursor_from_c_str("x-aws-ec2-metadata-token-ttl-seconds"); struct aws_byte_cursor ttl_value; int ret = aws_http_headers_get(headers, ttl_header, &ttl_value); if (ret == AWS_OP_SUCCESS) { s_tester.token_ttl_header_exist[s_tester.current_request] = true; if (aws_byte_cursor_eq_c_str_ignore_case(&ttl_value, "21600")) { s_tester.token_ttl_header_expected[s_tester.current_request] = true; } else { s_tester.token_ttl_header_expected[s_tester.current_request] = false; } } else { s_tester.token_ttl_header_exist[s_tester.current_request] = false; } } static void s_validate_token_header(const struct aws_http_message *request) { const struct aws_http_headers *headers = aws_http_message_get_const_headers(request); struct aws_byte_cursor token_header = aws_byte_cursor_from_c_str("x-aws-ec2-metadata-token"); struct aws_byte_cursor token_value; int ret = aws_http_headers_get(headers, token_header, &token_value); if (ret == AWS_OP_SUCCESS) { s_tester.token_header_exist[s_tester.current_request] = true; if (aws_byte_cursor_eq_c_str_ignore_case(&token_value, "A00XXF3H00ZZ==")) { s_tester.token_header_expected[s_tester.current_request] = true; } else { s_tester.token_header_expected[s_tester.current_request] = false; } } else { s_tester.token_header_exist[s_tester.current_request] = false; } } static int s_validate_uri_path_and_resource(int expected_requests, bool get_resource) { ASSERT_UINT_EQUALS(expected_requests, s_tester.current_request); int idx = s_tester.token_request_idx; if (s_tester.current_request >= 1) { ASSERT_BIN_ARRAYS_EQUALS( s_tester.request_uris[idx].buffer, s_tester.request_uris[idx].len, s_expected_imds_token_uri->bytes, s_expected_imds_token_uri->len); } idx++; if (s_tester.current_request >= 2) { ASSERT_BIN_ARRAYS_EQUALS( s_tester.request_uris[idx].buffer, s_tester.request_uris[idx].len, s_expected_imds_resource_uri->bytes, s_expected_imds_resource_uri->len); } ASSERT_TRUE(s_tester.has_received_resource_callback == true); if (get_resource) { ASSERT_TRUE(s_tester.resource.len != 0); } else { ASSERT_TRUE(s_tester.resource.len == 0); } return 0; } static int s_validate_uri_path(int expected_requests, struct aws_byte_cursor resource_uri) { ASSERT_UINT_EQUALS(expected_requests, s_tester.current_request); int idx = s_tester.token_request_idx; if (s_tester.current_request >= 1) { ASSERT_BIN_ARRAYS_EQUALS( s_tester.request_uris[idx].buffer, s_tester.request_uris[idx].len, s_expected_imds_token_uri->bytes, s_expected_imds_token_uri->len); } idx++; if (s_tester.current_request >= 2) { ASSERT_BIN_ARRAYS_EQUALS( s_tester.request_uris[idx].buffer, s_tester.request_uris[idx].len, resource_uri.ptr, resource_uri.len); } return 0; } static int s_validate_uri_path_and_excpected_resource( int expected_requests, struct aws_byte_cursor resource_uri, struct aws_byte_cursor expected_resource) { ASSERT_UINT_EQUALS(expected_requests, s_tester.current_request); int idx = s_tester.token_request_idx; if (s_tester.current_request >= 1) { ASSERT_BIN_ARRAYS_EQUALS( s_tester.request_uris[idx].buffer, s_tester.request_uris[idx].len, s_expected_imds_token_uri->bytes, s_expected_imds_token_uri->len); } idx++; if (s_tester.current_request >= 2) { ASSERT_BIN_ARRAYS_EQUALS( s_tester.request_uris[idx].buffer, s_tester.request_uris[idx].len, resource_uri.ptr, resource_uri.len); } ASSERT_TRUE(s_tester.has_received_resource_callback == true); ASSERT_BIN_ARRAYS_EQUALS( s_tester.resource.buffer, s_tester.resource.len, expected_resource.ptr, expected_resource.len); return 0; } static int s_imds_client_token_request_failure(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_imds_tester_init(allocator); s_tester.response_code[0] = AWS_HTTP_STATUS_CODE_400_BAD_REQUEST; struct aws_imds_client_options options = { .bootstrap = s_tester.bootstrap, .function_table = &s_mock_function_table, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, }; struct aws_imds_client *client = aws_imds_client_new(allocator, &options); aws_imds_client_get_resource_async( client, aws_byte_cursor_from_string(s_ec2_metadata_root), s_get_resource_callback, NULL); s_aws_wait_for_resource_result(); ASSERT_TRUE(s_tester.has_received_resource_callback == true); ASSERT_TRUE(s_tester.resource.len == 0); ASSERT_TRUE(s_validate_uri_path_and_resource(1, false /*no resource*/) == 0); ASSERT_TRUE(s_tester.token_ttl_header_exist[0]); ASSERT_TRUE(s_tester.token_ttl_header_expected[0]); ASSERT_FALSE(s_tester.token_header_exist[0]); aws_imds_client_release(client); s_aws_wait_for_imds_client_shutdown_callback(); /* Because we mock the http connection manager, we never get a callback back from it */ aws_mem_release(allocator, client); ASSERT_SUCCESS(s_aws_imds_tester_cleanup()); return 0; } AWS_TEST_CASE(imds_client_token_request_failure, s_imds_client_token_request_failure); static int s_imds_client_insecure_fallback_request_failure(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_imds_tester_init(allocator); /* secure data flow is not supported, fallback to insecurity */ s_tester.response_code[0] = AWS_HTTP_STATUS_CODE_403_FORBIDDEN; /* Insecurity fails as well */ s_tester.response_code[1] = AWS_HTTP_STATUS_CODE_401_UNAUTHORIZED; struct aws_imds_client_options options = { .bootstrap = s_tester.bootstrap, .function_table = &s_mock_function_table, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, }; struct aws_imds_client *client = aws_imds_client_new(allocator, &options); aws_imds_client_get_resource_async( client, aws_byte_cursor_from_string(s_expected_imds_resource_uri), s_get_resource_callback, NULL); s_aws_wait_for_resource_result(); ASSERT_TRUE(s_tester.has_received_resource_callback == true); ASSERT_TRUE(s_tester.resource.len == 0); ASSERT_TRUE(s_validate_uri_path_and_resource(2, false /*no resource*/) == 0); ASSERT_TRUE(s_tester.token_ttl_header_exist[0]); ASSERT_TRUE(s_tester.token_ttl_header_expected[0]); ASSERT_FALSE(s_tester.token_header_exist[0]); ASSERT_UINT_EQUALS(s_tester.error_code, AWS_AUTH_IMDS_CLIENT_SOURCE_FAILURE); aws_imds_client_release(client); s_aws_wait_for_imds_client_shutdown_callback(); /* Because we mock the http connection manager, we never get a callback back from it */ aws_mem_release(allocator, client); ASSERT_SUCCESS(s_aws_imds_tester_cleanup()); return 0; } AWS_TEST_CASE(imds_client_insecure_fallback_request_failure, s_imds_client_insecure_fallback_request_failure); AWS_STATIC_STRING_FROM_LITERAL( s_good_response, "{\"AccessKeyId\":\"SuccessfulAccessKey\", \n \"SecretAccessKey\":\"SuccessfulSecret\", \n " "\"Token\":\"TokenSuccess\", \n \"Expiration\":\"2020-02-25T06:03:31Z\"}"); AWS_STATIC_STRING_FROM_LITERAL(s_access_key, "SuccessfulAccessKey"); AWS_STATIC_STRING_FROM_LITERAL(s_secret_key, "SuccessfulSecret"); AWS_STATIC_STRING_FROM_LITERAL(s_token, "TokenSuccess"); AWS_STATIC_STRING_FROM_LITERAL(s_expiration, "2020-02-25T06:03:31Z"); static int s_imds_client_v1_fallback_disabled_failure(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_imds_tester_init(allocator); /* secure data flow is not supported */ s_tester.response_code[0] = AWS_HTTP_STATUS_CODE_403_FORBIDDEN; /* v1 would have worked if fallback was not disabled */ struct aws_byte_cursor good_response_cursor = aws_byte_cursor_from_string(s_good_response); aws_array_list_push_back(&s_tester.response_data_callbacks[1], &good_response_cursor); struct aws_imds_client_options options = { .bootstrap = s_tester.bootstrap, .function_table = &s_mock_function_table, .ec2_metadata_v1_disabled = true, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, }; struct aws_imds_client *client = aws_imds_client_new(allocator, &options); aws_imds_client_get_resource_async( client, aws_byte_cursor_from_string(s_expected_imds_resource_uri), s_get_resource_callback, NULL); s_aws_wait_for_resource_result(); ASSERT_TRUE(s_tester.has_received_resource_callback == true); ASSERT_TRUE(s_tester.resource.len == 0); /* There was no fallback so we didn't get any resource */ ASSERT_TRUE(s_validate_uri_path_and_resource(1, false /*no resource*/) == 0); ASSERT_TRUE(s_tester.token_ttl_header_exist[0]); ASSERT_TRUE(s_tester.token_ttl_header_expected[0]); ASSERT_FALSE(s_tester.token_header_exist[0]); ASSERT_UINT_EQUALS(s_tester.error_code, AWS_AUTH_IMDS_CLIENT_SOURCE_FAILURE); aws_imds_client_release(client); s_aws_wait_for_imds_client_shutdown_callback(); /* Because we mock the http connection manager, we never get a callback back from it */ aws_mem_release(allocator, client); ASSERT_SUCCESS(s_aws_imds_tester_cleanup()); return 0; } AWS_TEST_CASE(imds_client_v1_fallback_disabled_failure, s_imds_client_v1_fallback_disabled_failure); static int s_imds_client_resource_request_failure(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_imds_tester_init(allocator); struct aws_byte_cursor test_token_cursor = aws_byte_cursor_from_string(s_test_imds_token); aws_array_list_push_back(&s_tester.response_data_callbacks[0], &test_token_cursor); struct aws_imds_client_options options = { .bootstrap = s_tester.bootstrap, .function_table = &s_mock_function_table, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, }; struct aws_imds_client *client = aws_imds_client_new(allocator, &options); aws_imds_client_get_resource_async( client, aws_byte_cursor_from_string(s_expected_imds_resource_uri), s_get_resource_callback, NULL); s_aws_wait_for_resource_result(); ASSERT_TRUE(s_validate_uri_path_and_resource(2, false /*no resource*/) == 0); ASSERT_TRUE(s_tester.token_ttl_header_exist[0]); ASSERT_TRUE(s_tester.token_ttl_header_expected[0]); ASSERT_FALSE(s_tester.token_header_exist[0]); ASSERT_FALSE(s_tester.token_ttl_header_exist[1]); ASSERT_TRUE(s_tester.token_header_exist[1]); ASSERT_TRUE(s_tester.token_header_expected[1]); aws_imds_client_release(client); s_aws_wait_for_imds_client_shutdown_callback(); /* Because we mock the http connection manager, we never get a callback back from it */ aws_mem_release(allocator, client); ASSERT_SUCCESS(s_aws_imds_tester_cleanup()); return 0; } AWS_TEST_CASE(imds_client_resource_request_failure, s_imds_client_resource_request_failure); static int s_imds_client_resource_request_success(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_imds_tester_init(allocator); struct aws_byte_cursor test_token_cursor = aws_byte_cursor_from_string(s_test_imds_token); aws_array_list_push_back(&s_tester.response_data_callbacks[0], &test_token_cursor); struct aws_byte_cursor good_response_cursor = aws_byte_cursor_from_string(s_good_response); aws_array_list_push_back(&s_tester.response_data_callbacks[1], &good_response_cursor); struct aws_imds_client_options options = { .bootstrap = s_tester.bootstrap, .function_table = &s_mock_function_table, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, }; struct aws_imds_client *client = aws_imds_client_new(allocator, &options); aws_imds_client_get_resource_async( client, aws_byte_cursor_from_string(s_expected_imds_resource_uri), s_get_resource_callback, NULL); s_aws_wait_for_resource_result(); ASSERT_TRUE(s_validate_uri_path_and_resource(2, true /*got resource*/) == 0); ASSERT_TRUE(s_tester.token_ttl_header_exist[0]); ASSERT_TRUE(s_tester.token_ttl_header_expected[0]); ASSERT_FALSE(s_tester.token_header_exist[0]); ASSERT_FALSE(s_tester.token_ttl_header_exist[1]); ASSERT_TRUE(s_tester.token_header_exist[1]); ASSERT_TRUE(s_tester.token_header_expected[1]); ASSERT_CURSOR_VALUE_STRING_EQUALS(aws_byte_cursor_from_buf(&s_tester.resource), s_good_response); aws_imds_client_release(client); s_aws_wait_for_imds_client_shutdown_callback(); /* Because we mock the http connection manager, we never get a callback back from it */ aws_mem_release(allocator, client); ASSERT_SUCCESS(s_aws_imds_tester_cleanup()); return 0; } AWS_TEST_CASE(imds_client_resource_request_success, s_imds_client_resource_request_success); static int s_imds_client_insecure_resource_request_success(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_imds_tester_init(allocator); s_tester.response_code[0] = AWS_HTTP_STATUS_CODE_403_FORBIDDEN; struct aws_byte_cursor good_response_cursor = aws_byte_cursor_from_string(s_good_response); aws_array_list_push_back(&s_tester.response_data_callbacks[1], &good_response_cursor); struct aws_imds_client_options options = { .bootstrap = s_tester.bootstrap, .function_table = &s_mock_function_table, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, }; struct aws_imds_client *client = aws_imds_client_new(allocator, &options); aws_imds_client_get_resource_async( client, aws_byte_cursor_from_string(s_expected_imds_resource_uri), s_get_resource_callback, NULL); s_aws_wait_for_resource_result(); ASSERT_TRUE(s_validate_uri_path_and_resource(2, true /*got resource*/) == 0); ASSERT_FALSE(s_tester.token_ttl_header_exist[1]); ASSERT_FALSE(s_tester.token_header_exist[1]); ASSERT_CURSOR_VALUE_STRING_EQUALS(aws_byte_cursor_from_buf(&s_tester.resource), s_good_response); aws_imds_client_release(client); s_aws_wait_for_imds_client_shutdown_callback(); /* Because we mock the http connection manager, we never get a callback back from it */ aws_mem_release(allocator, client); ASSERT_SUCCESS(s_aws_imds_tester_cleanup()); return 0; } AWS_TEST_CASE(imds_client_insecure_resource_request_success, s_imds_client_insecure_resource_request_success); static int s_imds_client_insecure_then_secure_resource_request_success(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_imds_tester_init(allocator); s_tester.token_request_idx = 1; s_tester.response_code[0] = AWS_HTTP_STATUS_CODE_401_UNAUTHORIZED; struct aws_byte_cursor test_token_cursor = aws_byte_cursor_from_string(s_test_imds_token); aws_array_list_push_back(&s_tester.response_data_callbacks[1], &test_token_cursor); struct aws_byte_cursor good_response_cursor = aws_byte_cursor_from_string(s_good_response); aws_array_list_push_back(&s_tester.response_data_callbacks[2], &good_response_cursor); struct aws_imds_client_options options = { .bootstrap = s_tester.bootstrap, .function_table = &s_mock_function_table, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, .imds_version = IMDS_PROTOCOL_V1, }; struct aws_imds_client *client = aws_imds_client_new(allocator, &options); aws_imds_client_get_resource_async( client, aws_byte_cursor_from_string(s_expected_imds_resource_uri), s_get_resource_callback, NULL); s_aws_wait_for_resource_result(); ASSERT_TRUE(s_tester.has_received_resource_callback == true); ASSERT_TRUE(s_validate_uri_path_and_resource(3, true /*no creds*/) == 0); ASSERT_FALSE(s_tester.token_ttl_header_exist[0]); ASSERT_FALSE(s_tester.token_header_exist[0]); ASSERT_TRUE(s_tester.token_ttl_header_exist[1]); ASSERT_TRUE(s_tester.token_ttl_header_expected[1]); ASSERT_FALSE(s_tester.token_header_exist[1]); ASSERT_FALSE(s_tester.token_ttl_header_exist[2]); ASSERT_TRUE(s_tester.token_header_exist[2]); ASSERT_TRUE(s_tester.token_header_expected[2]); ASSERT_CURSOR_VALUE_STRING_EQUALS(aws_byte_cursor_from_buf(&s_tester.resource), s_good_response); aws_imds_client_release(client); s_aws_wait_for_imds_client_shutdown_callback(); /* Because we mock the http connection manager, we never get a callback back from it */ aws_mem_release(allocator, client); ASSERT_SUCCESS(s_aws_imds_tester_cleanup()); return 0; } AWS_TEST_CASE( imds_client_insecure_then_secure_resource_request_success, s_imds_client_insecure_then_secure_resource_request_success); static int s_aws_http_stream_get_multiple_incoming_response_status_mock( const struct aws_http_stream *stream, int *out_status_code) { (void)stream; /* randomly return 403/200 */ uint32_t rand_output; int ret[2] = {AWS_HTTP_STATUS_CODE_200_OK, AWS_HTTP_STATUS_CODE_403_FORBIDDEN}; aws_device_random_u32(&rand_output); *out_status_code = ret[rand_output % 2]; return AWS_OP_SUCCESS; } static struct aws_http_stream *s_aws_http_connection_make_multiple_requests_mock( struct aws_http_connection *client_connection, const struct aws_http_make_request_options *options) { (void)client_connection; (void)options; struct aws_byte_cursor path; AWS_ZERO_STRUCT(path); aws_http_message_get_request_path(options->request, &path); if (aws_byte_cursor_eq_c_str_ignore_case(&path, "/latest/api/token")) { s_validate_token_ttl_header(options->request); s_invoke_mock_request_callbacks(options, &s_tester.response_data_callbacks[0]); } else { s_validate_token_header(options->request); s_invoke_mock_request_callbacks(options, &s_tester.response_data_callbacks[1]); } return (struct aws_http_stream *)1; } static bool s_has_tester_received_expected_resources(void *user_data) { return s_tester.successful_requests == (*(int *)user_data); } static void s_aws_wait_for_all_resources(int expected_resources_cnt) { aws_mutex_lock(&s_tester.lock); aws_condition_variable_wait_pred( &s_tester.signal, &s_tester.lock, s_has_tester_received_expected_resources, &expected_resources_cnt); aws_mutex_unlock(&s_tester.lock); } static void s_multiple_request_get_resource_callback( const struct aws_byte_buf *resource, int error_code, void *user_data) { (void)user_data; (void)error_code; aws_mutex_lock(&s_tester.lock); s_tester.has_received_resource_callback = true; if (resource && resource->len) { struct aws_byte_cursor cursor = aws_byte_cursor_from_buf(resource); aws_byte_buf_reset(&s_tester.resource, true); aws_byte_buf_append_dynamic(&s_tester.resource, &cursor); s_tester.successful_requests++; } aws_condition_variable_notify_one(&s_tester.signal); aws_mutex_unlock(&s_tester.lock); } static int s_imds_client_multiple_resource_requests_random_responses_finally_all_success( struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_imds_tester_init(allocator); struct aws_byte_cursor test_token_cursor = aws_byte_cursor_from_string(s_test_imds_token); aws_array_list_push_back(&s_tester.response_data_callbacks[0], &test_token_cursor); struct aws_byte_cursor good_response_cursor = aws_byte_cursor_from_string(s_good_response); aws_array_list_push_back(&s_tester.response_data_callbacks[1], &good_response_cursor); struct aws_imds_client_options options = { .bootstrap = s_tester.bootstrap, .function_table = &s_mock_function_table, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, .imds_version = IMDS_PROTOCOL_V2, }; options.function_table->aws_http_stream_get_incoming_response_status = s_aws_http_stream_get_multiple_incoming_response_status_mock; options.function_table->aws_http_connection_make_request = s_aws_http_connection_make_multiple_requests_mock; struct aws_imds_client *client = aws_imds_client_new(allocator, &options); for (size_t i = 0; i < 5000; i++) { aws_imds_client_get_resource_async( client, aws_byte_cursor_from_string(s_expected_imds_resource_uri), s_multiple_request_get_resource_callback, NULL); } s_aws_wait_for_all_resources(5000); ASSERT_CURSOR_VALUE_STRING_EQUALS(aws_byte_cursor_from_buf(&s_tester.resource), s_good_response); aws_imds_client_release(client); s_aws_wait_for_imds_client_shutdown_callback(); /* Because we mock the http connection manager, we never get a callback back from it */ aws_mem_release(allocator, client); ASSERT_SUCCESS(s_aws_imds_tester_cleanup()); return 0; } AWS_TEST_CASE( imds_client_multiple_resource_requests_random_responses_finally_all_success, s_imds_client_multiple_resource_requests_random_responses_finally_all_success); static int s_imds_client_real_success(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_auth_library_init(allocator); struct aws_logger_standard_options logger_options = { .level = AWS_LOG_LEVEL_TRACE, .file = stderr, }; struct aws_logger logger; ASSERT_SUCCESS(aws_logger_init_standard(&logger, allocator, &logger_options)); aws_logger_set(&logger); s_aws_imds_tester_init(allocator); struct aws_host_resolver_default_options resolver_options = { .el_group = s_tester.el_group, .max_entries = 8, }; struct aws_host_resolver *resolver = aws_host_resolver_new_default(allocator, &resolver_options); struct aws_client_bootstrap_options bootstrap_options = { .event_loop_group = s_tester.el_group, .host_resolver = resolver, }; struct aws_client_bootstrap *bootstrap = aws_client_bootstrap_new(allocator, &bootstrap_options); struct aws_imds_client_options options = { .bootstrap = s_tester.bootstrap, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, }; struct aws_imds_client *client = aws_imds_client_new(allocator, &options); aws_imds_client_get_resource_async( client, aws_byte_cursor_from_string(s_ec2_metadata_root), s_get_resource_callback, NULL); s_aws_wait_for_resource_result(); aws_imds_client_release(client); s_aws_wait_for_imds_client_shutdown_callback(); aws_client_bootstrap_release(bootstrap); aws_host_resolver_release(resolver); ASSERT_SUCCESS(s_aws_imds_tester_cleanup()); aws_auth_library_clean_up(); aws_logger_set(NULL); aws_logger_clean_up(&logger); return 0; } AWS_TEST_CASE(imds_client_real_success, s_imds_client_real_success); AWS_STATIC_STRING_FROM_LITERAL(s_test_ami_id, "ami-5b70e32"); static int s_imds_client_get_ami_id_success(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_imds_tester_init(allocator); struct aws_byte_cursor test_token_cursor = aws_byte_cursor_from_string(s_test_imds_token); aws_array_list_push_back(&s_tester.response_data_callbacks[0], &test_token_cursor); struct aws_byte_cursor good_response_cursor = aws_byte_cursor_from_string(s_test_ami_id); aws_array_list_push_back(&s_tester.response_data_callbacks[1], &good_response_cursor); struct aws_imds_client_options options = { .bootstrap = s_tester.bootstrap, .function_table = &s_mock_function_table, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, }; struct aws_imds_client *client = aws_imds_client_new(allocator, &options); aws_imds_client_get_ami_id(client, s_get_resource_callback, NULL); s_aws_wait_for_resource_result(); ASSERT_TRUE( s_validate_uri_path_and_excpected_resource( 2, aws_byte_cursor_from_c_str("/latest/meta-data/ami-id"), good_response_cursor) == 0); ASSERT_TRUE(s_tester.token_ttl_header_exist[0]); ASSERT_TRUE(s_tester.token_ttl_header_expected[0]); ASSERT_FALSE(s_tester.token_header_exist[0]); ASSERT_FALSE(s_tester.token_ttl_header_exist[1]); ASSERT_TRUE(s_tester.token_header_exist[1]); ASSERT_TRUE(s_tester.token_header_expected[1]); ASSERT_CURSOR_VALUE_STRING_EQUALS(aws_byte_cursor_from_buf(&s_tester.resource), s_test_ami_id); aws_imds_client_release(client); s_aws_wait_for_imds_client_shutdown_callback(); /* Because we mock the http connection manager, we never get a callback back from it */ aws_mem_release(allocator, client); ASSERT_SUCCESS(s_aws_imds_tester_cleanup()); return 0; } AWS_TEST_CASE(imds_client_get_ami_id_success, s_imds_client_get_ami_id_success); AWS_STATIC_STRING_FROM_LITERAL(s_test_ancestor_ami_ids, "ami-5b70e32\nami-5b70e33\nami-5b70e34"); AWS_STATIC_STRING_FROM_LITERAL(s_test_ancestor_ami_id1, "ami-5b70e32"); AWS_STATIC_STRING_FROM_LITERAL(s_test_ancestor_ami_id2, "ami-5b70e33"); AWS_STATIC_STRING_FROM_LITERAL(s_test_ancestor_ami_id3, "ami-5b70e34"); static struct aws_byte_cursor s_newline_cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\n"); static int s_assert_get_ancestor_ami_ids(const struct aws_array_list *array) { s_tester.has_received_resource_callback = true; size_t len = aws_array_list_length(array); ASSERT_TRUE(len == 3); struct aws_byte_cursor cursor[3]; for (size_t i = 0; i < len; i++) { aws_array_list_get_at(array, &cursor[i], i); aws_byte_buf_append_dynamic(&s_tester.resource, &cursor[i]); aws_byte_buf_append_dynamic(&s_tester.resource, &s_newline_cursor); } s_tester.resource.len--; ASSERT_CURSOR_VALUE_STRING_EQUALS(cursor[0], s_test_ancestor_ami_id1); ASSERT_CURSOR_VALUE_STRING_EQUALS(cursor[1], s_test_ancestor_ami_id2); ASSERT_CURSOR_VALUE_STRING_EQUALS(cursor[2], s_test_ancestor_ami_id3); if (array) { s_tester.successful_requests++; } return 0; } static void s_get_ancestor_ami_ids_callback(const struct aws_array_list *array, int error_code, void *user_data) { (void)user_data; (void)error_code; aws_mutex_lock(&s_tester.lock); s_assert_get_ancestor_ami_ids(array); aws_condition_variable_notify_one(&s_tester.signal); aws_mutex_unlock(&s_tester.lock); } static int s_imds_client_get_ancestor_ami_ids_success(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_imds_tester_init(allocator); struct aws_byte_cursor test_token_cursor = aws_byte_cursor_from_string(s_test_imds_token); aws_array_list_push_back(&s_tester.response_data_callbacks[0], &test_token_cursor); struct aws_byte_cursor good_response_cursor = aws_byte_cursor_from_string(s_test_ancestor_ami_ids); aws_array_list_push_back(&s_tester.response_data_callbacks[1], &good_response_cursor); struct aws_imds_client_options options = { .bootstrap = s_tester.bootstrap, .function_table = &s_mock_function_table, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, }; struct aws_imds_client *client = aws_imds_client_new(allocator, &options); aws_imds_client_get_ancestor_ami_ids(client, s_get_ancestor_ami_ids_callback, NULL); s_aws_wait_for_resource_result(); ASSERT_TRUE( s_validate_uri_path_and_excpected_resource( 2, aws_byte_cursor_from_c_str("/latest/meta-data/ancestor-ami-ids"), good_response_cursor) == 0); ASSERT_TRUE(s_tester.token_ttl_header_exist[0]); ASSERT_TRUE(s_tester.token_ttl_header_expected[0]); ASSERT_FALSE(s_tester.token_header_exist[0]); ASSERT_FALSE(s_tester.token_ttl_header_exist[1]); ASSERT_TRUE(s_tester.token_header_exist[1]); ASSERT_TRUE(s_tester.token_header_expected[1]); ASSERT_CURSOR_VALUE_STRING_EQUALS(aws_byte_cursor_from_buf(&s_tester.resource), s_test_ancestor_ami_ids); aws_imds_client_release(client); s_aws_wait_for_imds_client_shutdown_callback(); /* Because we mock the http connection manager, we never get a callback back from it */ aws_mem_release(allocator, client); ASSERT_SUCCESS(s_aws_imds_tester_cleanup()); return 0; } AWS_TEST_CASE(imds_client_get_ancestor_ami_ids_success, s_imds_client_get_ancestor_ami_ids_success); AWS_STATIC_STRING_FROM_LITERAL( s_iam_profile, "{\"LastUpdated\" : \"2020-06-03T20:42:19Z\", \n " "\"InstanceProfileArn\" : \"arn:aws:iam::030535792909:instance-profile/CloudWatchAgentServerRole\", \n " "\"InstanceProfileId\" : \"AIPAQOHATHEGTGNQ5THQB\"}"); AWS_STATIC_STRING_FROM_LITERAL(s_test_last_updated, "2020-06-03T20:42:19Z"); AWS_STATIC_STRING_FROM_LITERAL( s_test_profile_arn, "arn:aws:iam::030535792909:instance-profile/CloudWatchAgentServerRole"); AWS_STATIC_STRING_FROM_LITERAL(s_test_profile_id, "AIPAQOHATHEGTGNQ5THQB"); static int s_assert_get_iam_profile(const struct aws_imds_iam_profile *iam) { s_tester.has_received_resource_callback = true; ASSERT_CURSOR_VALUE_STRING_EQUALS(iam->instance_profile_arn, s_test_profile_arn); ASSERT_CURSOR_VALUE_STRING_EQUALS(iam->instance_profile_id, s_test_profile_id); struct aws_byte_buf buf; aws_byte_buf_init(&buf, s_tester.allocator, 100); aws_date_time_to_utc_time_str(&iam->last_updated, AWS_DATE_FORMAT_ISO_8601, &buf); ASSERT_CURSOR_VALUE_STRING_EQUALS(aws_byte_cursor_from_buf(&buf), s_test_last_updated); aws_byte_buf_clean_up(&buf); if (iam) { s_tester.successful_requests++; } return 0; } static void s_get_iam_profile_callback(const struct aws_imds_iam_profile *iam, int error_code, void *user_data) { (void)user_data; (void)error_code; aws_mutex_lock(&s_tester.lock); s_assert_get_iam_profile(iam); aws_condition_variable_notify_one(&s_tester.signal); aws_mutex_unlock(&s_tester.lock); } static int s_imds_client_get_iam_profile_success(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_imds_tester_init(allocator); struct aws_byte_cursor test_token_cursor = aws_byte_cursor_from_string(s_test_imds_token); aws_array_list_push_back(&s_tester.response_data_callbacks[0], &test_token_cursor); struct aws_byte_cursor good_response_cursor = aws_byte_cursor_from_string(s_iam_profile); aws_array_list_push_back(&s_tester.response_data_callbacks[1], &good_response_cursor); struct aws_imds_client_options options = { .bootstrap = s_tester.bootstrap, .function_table = &s_mock_function_table, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, }; struct aws_imds_client *client = aws_imds_client_new(allocator, &options); aws_imds_client_get_iam_profile(client, s_get_iam_profile_callback, NULL); s_aws_wait_for_resource_result(); ASSERT_TRUE(s_validate_uri_path(2, aws_byte_cursor_from_c_str("/latest/meta-data/iam/info")) == 0); ASSERT_TRUE(s_tester.token_ttl_header_exist[0]); ASSERT_TRUE(s_tester.token_ttl_header_expected[0]); ASSERT_FALSE(s_tester.token_header_exist[0]); ASSERT_FALSE(s_tester.token_ttl_header_exist[1]); ASSERT_TRUE(s_tester.token_header_exist[1]); ASSERT_TRUE(s_tester.token_header_expected[1]); aws_imds_client_release(client); s_aws_wait_for_imds_client_shutdown_callback(); /* Because we mock the http connection manager, we never get a callback back from it */ aws_mem_release(allocator, client); ASSERT_SUCCESS(s_aws_imds_tester_cleanup()); return 0; } AWS_TEST_CASE(imds_client_get_iam_profile_success, s_imds_client_get_iam_profile_success); AWS_STATIC_STRING_FROM_LITERAL( s_instance_info, "{\"accountId\" : \"030535792909\", \n" "\"architecture\" : \"x86_64\", \n" "\"availabilityZone\" : \"us-west-2a\", \n" "\"billingProducts\" : [\"1234\", \"abcd\"], \n" "\"devpayProductCodes\" : null, \n" "\"marketplaceProductCodes\" : null, \n" "\"imageId\" : \"ami-5b70e323\", \n" "\"instanceId\" : \"i-022a93b5e640c0248\", \n" "\"instanceType\" : \"c4.8xlarge\", \n" "\"kernelId\" : null, \n" "\"pendingTime\" : \"2020-05-27T08:41:17Z\", \n" "\"privateIp\" : \"172.31.22.164\", \n" "\"ramdiskId\" : null, \n" "\"region\" : \"us-west-2\", \n" "\"version\" : \"2017-09-30\" \n}"); AWS_STATIC_STRING_FROM_LITERAL(s_account_id, "030535792909"); AWS_STATIC_STRING_FROM_LITERAL(s_architecture, "x86_64"); AWS_STATIC_STRING_FROM_LITERAL(s_availability_zone, "us-west-2a"); AWS_STATIC_STRING_FROM_LITERAL(s_image_id, "ami-5b70e323"); AWS_STATIC_STRING_FROM_LITERAL(s_instance_id, "i-022a93b5e640c0248"); AWS_STATIC_STRING_FROM_LITERAL(s_instance_type, "c4.8xlarge"); AWS_STATIC_STRING_FROM_LITERAL(s_pending_time, "2020-05-27T08:41:17Z"); AWS_STATIC_STRING_FROM_LITERAL(s_private_ip, "172.31.22.164"); AWS_STATIC_STRING_FROM_LITERAL(s_region, "us-west-2"); AWS_STATIC_STRING_FROM_LITERAL(s_version, "2017-09-30"); AWS_STATIC_STRING_FROM_LITERAL(s_billing_product1, "1234"); AWS_STATIC_STRING_FROM_LITERAL(s_billing_product2, "abcd"); static int s_assert_get_instance_info(const struct aws_imds_instance_info *instance) { s_tester.has_received_resource_callback = true; ASSERT_CURSOR_VALUE_STRING_EQUALS(instance->account_id, s_account_id); ASSERT_CURSOR_VALUE_STRING_EQUALS(instance->architecture, s_architecture); ASSERT_CURSOR_VALUE_STRING_EQUALS(instance->availability_zone, s_availability_zone); ASSERT_CURSOR_VALUE_STRING_EQUALS(instance->image_id, s_image_id); ASSERT_CURSOR_VALUE_STRING_EQUALS(instance->instance_id, s_instance_id); ASSERT_CURSOR_VALUE_STRING_EQUALS(instance->instance_type, s_instance_type); ASSERT_CURSOR_VALUE_STRING_EQUALS(instance->private_ip, s_private_ip); ASSERT_CURSOR_VALUE_STRING_EQUALS(instance->region, s_region); ASSERT_CURSOR_VALUE_STRING_EQUALS(instance->version, s_version); ASSERT_CURSOR_VALUE_STRING_EQUALS(instance->availability_zone, s_availability_zone); ASSERT_TRUE(aws_array_list_length(&instance->billing_products) == 2); ASSERT_TRUE(aws_array_list_length(&instance->marketplace_product_codes) == 0); struct aws_byte_cursor cursor[2]; for (size_t i = 0; i < 2; i++) { aws_array_list_get_at(&instance->billing_products, &cursor[i], i); } ASSERT_CURSOR_VALUE_STRING_EQUALS(cursor[0], s_billing_product1); ASSERT_CURSOR_VALUE_STRING_EQUALS(cursor[1], s_billing_product2); struct aws_byte_buf buf; aws_byte_buf_init(&buf, s_tester.allocator, 100); aws_date_time_to_utc_time_str(&instance->pending_time, AWS_DATE_FORMAT_ISO_8601, &buf); ASSERT_CURSOR_VALUE_STRING_EQUALS(aws_byte_cursor_from_buf(&buf), s_pending_time); aws_byte_buf_clean_up(&buf); if (instance) { s_tester.successful_requests++; } return 0; } static void s_get_instance_info_callback( const struct aws_imds_instance_info *instance, int error_code, void *user_data) { (void)user_data; (void)error_code; aws_mutex_lock(&s_tester.lock); s_assert_get_instance_info(instance); aws_condition_variable_notify_one(&s_tester.signal); aws_mutex_unlock(&s_tester.lock); } static int s_imds_client_get_instance_info_success(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_imds_tester_init(allocator); struct aws_byte_cursor test_token_cursor = aws_byte_cursor_from_string(s_test_imds_token); aws_array_list_push_back(&s_tester.response_data_callbacks[0], &test_token_cursor); struct aws_byte_cursor good_response_cursor = aws_byte_cursor_from_string(s_instance_info); aws_array_list_push_back(&s_tester.response_data_callbacks[1], &good_response_cursor); struct aws_imds_client_options options = { .bootstrap = s_tester.bootstrap, .function_table = &s_mock_function_table, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, }; struct aws_imds_client *client = aws_imds_client_new(allocator, &options); aws_imds_client_get_instance_info(client, s_get_instance_info_callback, NULL); s_aws_wait_for_resource_result(); ASSERT_TRUE(s_validate_uri_path(2, aws_byte_cursor_from_c_str("/latest/dynamic/instance-identity/document")) == 0); ASSERT_TRUE(s_tester.token_ttl_header_exist[0]); ASSERT_TRUE(s_tester.token_ttl_header_expected[0]); ASSERT_FALSE(s_tester.token_header_exist[0]); ASSERT_FALSE(s_tester.token_ttl_header_exist[1]); ASSERT_TRUE(s_tester.token_header_exist[1]); ASSERT_TRUE(s_tester.token_header_expected[1]); aws_imds_client_release(client); s_aws_wait_for_imds_client_shutdown_callback(); /* Because we mock the http connection manager, we never get a callback back from it */ aws_mem_release(allocator, client); ASSERT_SUCCESS(s_aws_imds_tester_cleanup()); return 0; } AWS_TEST_CASE(imds_client_get_instance_info_success, s_imds_client_get_instance_info_success); static int s_assert_get_credentials_info(const struct aws_credentials *creds) { s_tester.has_received_resource_callback = true; ASSERT_CURSOR_VALUE_STRING_EQUALS(aws_credentials_get_access_key_id(creds), s_access_key); ASSERT_CURSOR_VALUE_STRING_EQUALS(aws_credentials_get_secret_access_key(creds), s_secret_key); ASSERT_CURSOR_VALUE_STRING_EQUALS(aws_credentials_get_session_token(creds), s_token); struct aws_byte_buf buf; aws_byte_buf_init(&buf, s_tester.allocator, 100); struct aws_date_time date; aws_date_time_init_epoch_secs(&date, aws_credentials_get_expiration_timepoint_seconds(creds)); aws_date_time_to_utc_time_str(&date, AWS_DATE_FORMAT_ISO_8601, &buf); ASSERT_CURSOR_VALUE_STRING_EQUALS(aws_byte_cursor_from_buf(&buf), s_expiration); aws_byte_buf_clean_up(&buf); if (creds) { s_tester.successful_requests++; } return 0; } static void s_get_credentails_callback(const struct aws_credentials *creds, int error_code, void *user_data) { (void)user_data; (void)error_code; aws_mutex_lock(&s_tester.lock); s_assert_get_credentials_info(creds); aws_condition_variable_notify_one(&s_tester.signal); aws_mutex_unlock(&s_tester.lock); } static int s_imds_client_get_credentials_success(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_imds_tester_init(allocator); struct aws_byte_cursor test_token_cursor = aws_byte_cursor_from_string(s_test_imds_token); aws_array_list_push_back(&s_tester.response_data_callbacks[0], &test_token_cursor); struct aws_byte_cursor good_response_cursor = aws_byte_cursor_from_string(s_good_response); aws_array_list_push_back(&s_tester.response_data_callbacks[1], &good_response_cursor); struct aws_imds_client_options options = { .bootstrap = s_tester.bootstrap, .function_table = &s_mock_function_table, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, }; struct aws_imds_client *client = aws_imds_client_new(allocator, &options); aws_imds_client_get_credentials(client, aws_byte_cursor_from_c_str("test_role"), s_get_credentails_callback, NULL); s_aws_wait_for_resource_result(); ASSERT_TRUE( s_validate_uri_path(2, aws_byte_cursor_from_c_str("/latest/meta-data/iam/security-credentials/test_role")) == 0); ASSERT_TRUE(s_tester.token_ttl_header_exist[0]); ASSERT_TRUE(s_tester.token_ttl_header_expected[0]); ASSERT_FALSE(s_tester.token_header_exist[0]); ASSERT_FALSE(s_tester.token_ttl_header_exist[1]); ASSERT_TRUE(s_tester.token_header_exist[1]); ASSERT_TRUE(s_tester.token_header_expected[1]); aws_imds_client_release(client); s_aws_wait_for_imds_client_shutdown_callback(); /* Because we mock the http connection manager, we never get a callback back from it */ aws_mem_release(allocator, client); ASSERT_SUCCESS(s_aws_imds_tester_cleanup()); return 0; } AWS_TEST_CASE(imds_client_get_credentials_success, s_imds_client_get_credentials_success); static int s_imds_client_cache_token_refresh(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_imds_tester_init(allocator); s_tester.timestamp = 0; struct aws_byte_cursor test_token_cursor = aws_byte_cursor_from_string(s_test_imds_token); struct aws_byte_cursor good_response_cursor = aws_byte_cursor_from_string(s_good_response); aws_array_list_push_back(&s_tester.response_data_callbacks[0], &test_token_cursor); aws_array_list_push_back(&s_tester.response_data_callbacks[1], &good_response_cursor); aws_array_list_push_back(&s_tester.response_data_callbacks[2], &good_response_cursor); aws_array_list_push_back(&s_tester.response_data_callbacks[3], &test_token_cursor); aws_array_list_push_back(&s_tester.response_data_callbacks[4], &good_response_cursor); struct aws_imds_client_options options = { .bootstrap = s_tester.bootstrap, .function_table = &s_mock_function_table, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, }; struct aws_imds_client *client = aws_imds_client_new(allocator, &options); /* 1. request a resource */ aws_imds_client_get_credentials(client, aws_byte_cursor_from_c_str("test_role"), s_get_credentails_callback, NULL); s_aws_wait_for_resource_result(); /* Currently have 2 requests, one to get the token, the other to fetch resource */ ASSERT_UINT_EQUALS(2, s_tester.current_request); ASSERT_UINT_EQUALS(AWS_ERROR_SUCCESS, s_tester.error_code); /* 2. Request another resource without change the timestamp. So that the cached token should be used */ s_tester.has_received_resource_callback = false; aws_imds_client_get_credentials(client, aws_byte_cursor_from_c_str("test_role"), s_get_credentails_callback, NULL); s_aws_wait_for_resource_result(); /* Currently have 3 requests, only one more to fetch the resource as the cached token being used. */ ASSERT_UINT_EQUALS(3, s_tester.current_request); ASSERT_UINT_EQUALS(AWS_ERROR_SUCCESS, s_tester.error_code); /* 3. Update the time to expired time. Request another resource without change the timestamp. So that the cached * token should be expired, and will fetch another token. */ s_tester.has_received_resource_callback = false; s_tester.timestamp = aws_timestamp_convert(21600, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL); aws_imds_client_get_credentials(client, aws_byte_cursor_from_c_str("test_role"), s_get_credentails_callback, NULL); s_aws_wait_for_resource_result(); /* Currently have 3 requests, only one more to fetch the resource as the cached token being used. */ ASSERT_UINT_EQUALS(5, s_tester.current_request); ASSERT_UINT_EQUALS(AWS_ERROR_SUCCESS, s_tester.error_code); aws_imds_client_release(client); s_aws_wait_for_imds_client_shutdown_callback(); /* Because we mock the http connection manager, we never get a callback back from it */ aws_mem_release(allocator, client); ASSERT_SUCCESS(s_aws_imds_tester_cleanup()); return 0; } AWS_TEST_CASE(imds_client_cache_token_refresh, s_imds_client_cache_token_refresh); aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/aws_profile_tests.c000066400000000000000000000100461456575232400252340ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include AWS_STATIC_STRING_FROM_LITERAL(s_config_override_path, "/tmp/.aws/config"); #ifdef _WIN32 AWS_STATIC_STRING_FROM_LITERAL(s_config_override_path_result, "\\tmp\\.aws\\config"); #else AWS_STATIC_STRING_FROM_LITERAL(s_config_override_path_result, "/tmp/.aws/config"); #endif /* _WIN32 */ static int s_config_file_path_override_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor override_cursor = aws_byte_cursor_from_string(s_config_override_path); struct aws_string *path = aws_get_config_file_path(allocator, &override_cursor); ASSERT_TRUE(aws_string_compare(path, s_config_override_path_result) == 0); aws_string_destroy(path); return 0; } AWS_TEST_CASE(config_file_path_override_test, s_config_file_path_override_test); AWS_STATIC_STRING_FROM_LITERAL(s_config_env_var, "AWS_CONFIG_FILE"); static int s_config_file_path_environment_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_set_environment_value(s_config_env_var, s_config_override_path); struct aws_string *path = aws_get_config_file_path(allocator, NULL); ASSERT_TRUE(aws_string_compare(path, s_config_override_path_result) == 0); aws_string_destroy(path); return 0; } AWS_TEST_CASE(config_file_path_environment_test, s_config_file_path_environment_test); AWS_STATIC_STRING_FROM_LITERAL(s_credentials_override_path, "/tmp/.aws/credentials"); #ifdef _WIN32 AWS_STATIC_STRING_FROM_LITERAL(s_credentials_override_path_result, "\\tmp\\.aws\\credentials"); #else AWS_STATIC_STRING_FROM_LITERAL(s_credentials_override_path_result, "/tmp/.aws/credentials"); #endif /* _WIN32 */ static int s_credentials_file_path_override_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor override_cursor = aws_byte_cursor_from_string(s_credentials_override_path); struct aws_string *path = aws_get_credentials_file_path(allocator, &override_cursor); ASSERT_TRUE(aws_string_compare(path, s_credentials_override_path_result) == 0); aws_string_destroy(path); return 0; } AWS_TEST_CASE(credentials_file_path_override_test, s_credentials_file_path_override_test); AWS_STATIC_STRING_FROM_LITERAL(s_credentials_env_var, "AWS_SHARED_CREDENTIALS_FILE"); static int s_credentials_file_path_environment_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_set_environment_value(s_credentials_env_var, s_credentials_override_path); struct aws_string *path = aws_get_credentials_file_path(allocator, NULL); ASSERT_TRUE(aws_string_compare(path, s_credentials_override_path_result) == 0); aws_string_destroy(path); return 0; } AWS_TEST_CASE(credentials_file_path_environment_test, s_credentials_file_path_environment_test); AWS_STATIC_STRING_FROM_LITERAL(s_profile_env_var, "AWS_PROFILE"); AWS_STATIC_STRING_FROM_LITERAL(s_profile_override, "NotTheDefault"); static int s_profile_override_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor override_cursor = aws_byte_cursor_from_string(s_profile_override); struct aws_string *profile_name = aws_get_profile_name(allocator, &override_cursor); ASSERT_TRUE(aws_string_compare(profile_name, s_profile_override) == 0); aws_string_destroy(profile_name); return 0; } AWS_TEST_CASE(profile_override_test, s_profile_override_test); static int s_profile_environment_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_set_environment_value(s_profile_env_var, s_profile_override); struct aws_string *profile_name = aws_get_profile_name(allocator, NULL); ASSERT_TRUE(aws_string_compare(profile_name, s_profile_override) == 0); aws_string_destroy(profile_name); return 0; } AWS_TEST_CASE(profile_environment_test, s_profile_environment_test); aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/credentials_provider_cognito_tests.c000066400000000000000000000517401456575232400306610ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include #include #include struct aws_mock_web_credential_provider_tester { struct aws_byte_buf request_uri; struct aws_array_list response_data_callbacks; bool is_connection_acquire_successful; bool is_request_successful; struct aws_mutex lock; struct aws_condition_variable signal; struct aws_credentials *credentials; bool has_received_credentials_callback; bool has_received_shutdown_callback; int error_code; struct aws_tls_ctx *ctx; struct aws_tls_connection_options tls_connection_options; struct aws_event_loop_group *el_group; struct aws_host_resolver *resolver; struct aws_client_bootstrap *bootstrap; struct aws_http_connection_manager *mock_manager; struct aws_http_connection *mock_connection; struct aws_http_stream *mock_stream; size_t current_request_attempt_number; struct aws_http_make_request_options request_callback_options; void (*manager_destructor_fn)(void *); void *manager_destructor_user_data; }; static struct aws_mock_web_credential_provider_tester s_tester; static struct aws_http_connection_manager *s_aws_http_connection_manager_new_mock( struct aws_allocator *allocator, const struct aws_http_connection_manager_options *options) { (void)allocator; (void)options; s_tester.manager_destructor_fn = options->shutdown_complete_callback; s_tester.manager_destructor_user_data = options->shutdown_complete_user_data; return s_tester.mock_manager; } static void s_aws_http_connection_manager_release_mock(struct aws_http_connection_manager *manager) { (void)manager; s_tester.manager_destructor_fn(s_tester.manager_destructor_user_data); } static void s_aws_http_connection_manager_acquire_connection_mock( struct aws_http_connection_manager *manager, aws_http_connection_manager_on_connection_setup_fn *callback, void *user_data) { (void)manager; (void)callback; (void)user_data; if (s_tester.is_connection_acquire_successful) { callback(s_tester.mock_connection, AWS_ERROR_SUCCESS, user_data); } else { aws_raise_error(AWS_ERROR_HTTP_UNKNOWN); callback(NULL, AWS_ERROR_HTTP_UNKNOWN, user_data); } } static int s_aws_http_connection_manager_release_connection_mock( struct aws_http_connection_manager *manager, struct aws_http_connection *connection) { (void)manager; (void)connection; return AWS_OP_SUCCESS; } static void s_invoke_mock_request_callbacks( const struct aws_http_make_request_options *options, struct aws_array_list *data_callbacks, bool is_request_successful) { size_t data_callback_count = aws_array_list_length(data_callbacks); struct aws_http_header headers[1]; AWS_ZERO_ARRAY(headers); headers[0].name = aws_byte_cursor_from_c_str("some-header"); headers[0].value = aws_byte_cursor_from_c_str("value"); if (options->on_response_headers) { options->on_response_headers(s_tester.mock_stream, AWS_HTTP_HEADER_BLOCK_MAIN, headers, 1, options->user_data); } if (options->on_response_header_block_done) { options->on_response_header_block_done(s_tester.mock_stream, data_callback_count > 0, options->user_data); } size_t response_count = aws_array_list_length(&s_tester.response_data_callbacks); if (response_count > 0) { size_t response_body_index = aws_min_size(s_tester.current_request_attempt_number, response_count - 1); struct aws_byte_cursor data_callback_cursor; aws_array_list_get_at(data_callbacks, &data_callback_cursor, response_body_index); options->on_response_body(s_tester.mock_stream, &data_callback_cursor, options->user_data); } ++s_tester.current_request_attempt_number; options->on_complete( s_tester.mock_stream, is_request_successful ? AWS_ERROR_SUCCESS : AWS_ERROR_HTTP_UNKNOWN, options->user_data); } static struct aws_http_stream *s_aws_http_connection_make_request_mock( struct aws_http_connection *client_connection, const struct aws_http_make_request_options *options) { (void)client_connection; (void)options; struct aws_byte_cursor path; AWS_ZERO_STRUCT(path); aws_http_message_get_request_path(options->request, &path); aws_byte_buf_append_dynamic(&s_tester.request_uri, &path); s_tester.request_callback_options = *options; return s_tester.mock_stream; } static int s_aws_http_stream_activate_mock(struct aws_http_stream *stream) { (void)stream; s_invoke_mock_request_callbacks( &s_tester.request_callback_options, &s_tester.response_data_callbacks, s_tester.is_request_successful); return AWS_OP_SUCCESS; } static int s_aws_http_stream_get_incoming_response_status_mock( const struct aws_http_stream *stream, int *out_status_code) { (void)stream; if (s_tester.is_request_successful) { *out_status_code = 200; } else { *out_status_code = 400; } return AWS_OP_SUCCESS; } static void s_aws_http_stream_release_mock(struct aws_http_stream *stream) { (void)stream; } static void s_aws_http_connection_close_mock(struct aws_http_connection *connection) { (void)connection; } static struct aws_auth_http_system_vtable s_mock_function_table = { .aws_http_connection_manager_new = s_aws_http_connection_manager_new_mock, .aws_http_connection_manager_release = s_aws_http_connection_manager_release_mock, .aws_http_connection_manager_acquire_connection = s_aws_http_connection_manager_acquire_connection_mock, .aws_http_connection_manager_release_connection = s_aws_http_connection_manager_release_connection_mock, .aws_http_connection_make_request = s_aws_http_connection_make_request_mock, .aws_http_stream_activate = s_aws_http_stream_activate_mock, .aws_http_stream_get_incoming_response_status = s_aws_http_stream_get_incoming_response_status_mock, .aws_http_stream_release = s_aws_http_stream_release_mock, .aws_http_connection_close = s_aws_http_connection_close_mock}; static int s_aws_cognito_tester_init(struct aws_allocator *allocator) { aws_auth_library_init(allocator); if (aws_array_list_init_dynamic(&s_tester.response_data_callbacks, allocator, 10, sizeof(struct aws_byte_cursor))) { return AWS_OP_ERR; } if (aws_byte_buf_init(&s_tester.request_uri, allocator, 100)) { return AWS_OP_ERR; } if (aws_mutex_init(&s_tester.lock)) { return AWS_OP_ERR; } if (aws_condition_variable_init(&s_tester.signal)) { return AWS_OP_ERR; } s_tester.el_group = aws_event_loop_group_new_default(allocator, 0, NULL); struct aws_host_resolver_default_options resolver_options = { .el_group = s_tester.el_group, .max_entries = 8, }; s_tester.resolver = aws_host_resolver_new_default(allocator, &resolver_options); struct aws_client_bootstrap_options bootstrap_options = { .event_loop_group = s_tester.el_group, .host_resolver = s_tester.resolver, }; s_tester.bootstrap = aws_client_bootstrap_new(allocator, &bootstrap_options); AWS_ZERO_STRUCT(s_tester.tls_connection_options); struct aws_tls_ctx_options tls_options; aws_tls_ctx_options_init_default_client(&tls_options, allocator); s_tester.ctx = aws_tls_client_ctx_new(allocator, &tls_options); ASSERT_NOT_NULL(s_tester.ctx); aws_tls_ctx_options_clean_up(&tls_options); aws_tls_connection_options_init_from_ctx(&s_tester.tls_connection_options, s_tester.ctx); /* default to everything successful */ s_tester.is_connection_acquire_successful = true; s_tester.is_request_successful = true; /* I hate using 1 for mocks, let's instead point at valid addresses */ s_tester.mock_manager = (void *)(&s_tester.mock_manager); s_tester.mock_connection = (void *)(&s_tester.mock_connection); s_tester.mock_stream = (void *)(&s_tester.mock_stream); return AWS_OP_SUCCESS; } static void s_aws_cognito_tester_cleanup(void) { aws_array_list_clean_up(&s_tester.response_data_callbacks); aws_byte_buf_clean_up(&s_tester.request_uri); aws_condition_variable_clean_up(&s_tester.signal); aws_mutex_clean_up(&s_tester.lock); aws_credentials_release(s_tester.credentials); aws_client_bootstrap_release(s_tester.bootstrap); aws_host_resolver_release(s_tester.resolver); aws_event_loop_group_release(s_tester.el_group); aws_tls_ctx_release(s_tester.ctx); s_tester.ctx = NULL; aws_tls_connection_options_clean_up(&s_tester.tls_connection_options); aws_auth_library_clean_up(); } static bool s_has_tester_received_credentials_callback(void *user_data) { (void)user_data; return s_tester.has_received_credentials_callback; } static void s_aws_wait_for_credentials_result(void) { aws_mutex_lock(&s_tester.lock); aws_condition_variable_wait_pred( &s_tester.signal, &s_tester.lock, s_has_tester_received_credentials_callback, NULL); aws_mutex_unlock(&s_tester.lock); } static void s_get_credentials_callback(struct aws_credentials *credentials, int error_code, void *user_data) { (void)user_data; aws_mutex_lock(&s_tester.lock); s_tester.has_received_credentials_callback = true; s_tester.error_code = error_code; s_tester.credentials = credentials; aws_credentials_acquire(credentials); aws_condition_variable_notify_one(&s_tester.signal); aws_mutex_unlock(&s_tester.lock); } static int s_credentials_provider_cognito_new_destroy(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_cognito_tester_init(allocator); struct aws_credentials_provider_cognito_options options = { .bootstrap = s_tester.bootstrap, .function_table = &s_mock_function_table, .endpoint = aws_byte_cursor_from_c_str("somewhere.amazonaws.com"), .identity = aws_byte_cursor_from_c_str("someone"), .tls_ctx = s_tester.ctx, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_cognito(allocator, &options); ASSERT_NOT_NULL(provider); aws_credentials_provider_release(provider); s_aws_cognito_tester_cleanup(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(credentials_provider_cognito_new_destroy, s_credentials_provider_cognito_new_destroy); static int s_credentials_provider_cognito_failure_connect_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_cognito_tester_init(allocator); s_tester.is_connection_acquire_successful = false; struct aws_credentials_provider_cognito_options options = { .bootstrap = s_tester.bootstrap, .function_table = &s_mock_function_table, .endpoint = aws_byte_cursor_from_c_str("somewhere.amazonaws.com"), .identity = aws_byte_cursor_from_c_str("someone"), .tls_ctx = s_tester.ctx, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_cognito(allocator, &options); ASSERT_NOT_NULL(provider); ASSERT_SUCCESS(aws_credentials_provider_get_credentials(provider, s_get_credentials_callback, NULL)); s_aws_wait_for_credentials_result(); aws_mutex_lock(&s_tester.lock); ASSERT_TRUE(s_tester.has_received_credentials_callback == true); ASSERT_TRUE(s_tester.error_code == AWS_ERROR_HTTP_UNKNOWN); ASSERT_TRUE(s_tester.credentials == NULL); aws_mutex_unlock(&s_tester.lock); aws_credentials_provider_release(provider); s_aws_cognito_tester_cleanup(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(credentials_provider_cognito_failure_connect, s_credentials_provider_cognito_failure_connect_fn); static int s_credentials_provider_cognito_failure_request_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_cognito_tester_init(allocator); s_tester.is_request_successful = false; struct aws_credentials_provider_cognito_options options = { .bootstrap = s_tester.bootstrap, .function_table = &s_mock_function_table, .endpoint = aws_byte_cursor_from_c_str("somewhere.amazonaws.com"), .identity = aws_byte_cursor_from_c_str("someone"), .tls_ctx = s_tester.ctx, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_cognito(allocator, &options); ASSERT_NOT_NULL(provider); ASSERT_SUCCESS(aws_credentials_provider_get_credentials(provider, s_get_credentials_callback, NULL)); s_aws_wait_for_credentials_result(); aws_mutex_lock(&s_tester.lock); ASSERT_TRUE(s_tester.has_received_credentials_callback == true); ASSERT_TRUE(s_tester.error_code == AWS_AUTH_CREDENTIALS_PROVIDER_HTTP_STATUS_FAILURE); ASSERT_TRUE(s_tester.credentials == NULL); aws_mutex_unlock(&s_tester.lock); aws_credentials_provider_release(provider); s_aws_cognito_tester_cleanup(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(credentials_provider_cognito_failure_request, s_credentials_provider_cognito_failure_request_fn); AWS_STATIC_STRING_FROM_LITERAL(s_bad_document_response, "{\"NotTheExpectedDocumentFormat\":\"Error\"}"); static int s_credentials_provider_cognito_failure_bad_document_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_cognito_tester_init(allocator); struct aws_byte_cursor bad_document_cursor = aws_byte_cursor_from_string(s_bad_document_response); aws_array_list_push_back(&s_tester.response_data_callbacks, &bad_document_cursor); struct aws_credentials_provider_cognito_options options = { .bootstrap = s_tester.bootstrap, .function_table = &s_mock_function_table, .endpoint = aws_byte_cursor_from_c_str("somewhere.amazonaws.com"), .identity = aws_byte_cursor_from_c_str("someone"), .tls_ctx = s_tester.ctx, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_cognito(allocator, &options); ASSERT_NOT_NULL(provider); ASSERT_SUCCESS(aws_credentials_provider_get_credentials(provider, s_get_credentials_callback, NULL)); s_aws_wait_for_credentials_result(); aws_mutex_lock(&s_tester.lock); ASSERT_TRUE(s_tester.has_received_credentials_callback == true); ASSERT_TRUE(s_tester.error_code != AWS_ERROR_SUCCESS); ASSERT_TRUE(s_tester.credentials == NULL); aws_mutex_unlock(&s_tester.lock); aws_credentials_provider_release(provider); s_aws_cognito_tester_cleanup(); return AWS_OP_SUCCESS; } AWS_TEST_CASE( credentials_provider_cognito_failure_bad_document, s_credentials_provider_cognito_failure_bad_document_fn); AWS_STATIC_STRING_FROM_LITERAL( s_good_document_response, "{\"Credentials\":{\"AccessKeyId\":\"SomeAccessKeyIdValue\",\"SecretKey\":\"SomeSecretKeyValue\",\"SessionToken\":" "\"SomeSessionTokenValue\",\"Expiration\":1663003154}}"); AWS_STATIC_STRING_FROM_LITERAL(s_expected_access_key_id, "SomeAccessKeyIdValue"); AWS_STATIC_STRING_FROM_LITERAL(s_expected_secret_access_key, "SomeSecretKeyValue"); AWS_STATIC_STRING_FROM_LITERAL(s_expected_session_token, "SomeSessionTokenValue"); static int s_verify_credentials(struct aws_credentials *credentials) { struct aws_byte_cursor access_key_id = aws_credentials_get_access_key_id(credentials); ASSERT_BIN_ARRAYS_EQUALS( s_expected_access_key_id->bytes, s_expected_access_key_id->len, access_key_id.ptr, access_key_id.len); struct aws_byte_cursor secret_access_key = aws_credentials_get_secret_access_key(credentials); ASSERT_BIN_ARRAYS_EQUALS( s_expected_secret_access_key->bytes, s_expected_secret_access_key->len, secret_access_key.ptr, secret_access_key.len); struct aws_byte_cursor session_token = aws_credentials_get_session_token(credentials); ASSERT_BIN_ARRAYS_EQUALS( s_expected_session_token->bytes, s_expected_session_token->len, session_token.ptr, session_token.len); uint64_t expiration = aws_credentials_get_expiration_timepoint_seconds(credentials); ASSERT_TRUE(expiration > 0); return AWS_OP_SUCCESS; } static int s_credentials_provider_cognito_success_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_cognito_tester_init(allocator); struct aws_byte_cursor good_document_cursor = aws_byte_cursor_from_string(s_good_document_response); aws_array_list_push_back(&s_tester.response_data_callbacks, &good_document_cursor); struct aws_credentials_provider_cognito_options options = { .bootstrap = s_tester.bootstrap, .function_table = &s_mock_function_table, .endpoint = aws_byte_cursor_from_c_str("somewhere.amazonaws.com"), .identity = aws_byte_cursor_from_c_str("someone"), .tls_ctx = s_tester.ctx, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_cognito(allocator, &options); ASSERT_NOT_NULL(provider); ASSERT_SUCCESS(aws_credentials_provider_get_credentials(provider, s_get_credentials_callback, NULL)); s_aws_wait_for_credentials_result(); aws_mutex_lock(&s_tester.lock); ASSERT_TRUE(s_tester.has_received_credentials_callback == true); ASSERT_TRUE(s_tester.error_code == AWS_ERROR_SUCCESS); ASSERT_TRUE(s_tester.credentials != NULL); ASSERT_SUCCESS(s_verify_credentials(s_tester.credentials)); aws_mutex_unlock(&s_tester.lock); aws_credentials_provider_release(provider); s_aws_cognito_tester_cleanup(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(credentials_provider_cognito_success, s_credentials_provider_cognito_success_fn); static int s_credentials_provider_cognito_success_after_retry_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_cognito_tester_init(allocator); /* crummy response followed by a good one. Verifies basic retry flow */ struct aws_byte_cursor bad_document_cursor = aws_byte_cursor_from_string(s_bad_document_response); aws_array_list_push_back(&s_tester.response_data_callbacks, &bad_document_cursor); struct aws_byte_cursor good_document_cursor = aws_byte_cursor_from_string(s_good_document_response); aws_array_list_push_back(&s_tester.response_data_callbacks, &good_document_cursor); struct aws_credentials_provider_cognito_options options = { .bootstrap = s_tester.bootstrap, .function_table = &s_mock_function_table, .endpoint = aws_byte_cursor_from_c_str("somewhere.amazonaws.com"), .identity = aws_byte_cursor_from_c_str("someone"), .tls_ctx = s_tester.ctx, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_cognito(allocator, &options); ASSERT_NOT_NULL(provider); ASSERT_SUCCESS(aws_credentials_provider_get_credentials(provider, s_get_credentials_callback, NULL)); s_aws_wait_for_credentials_result(); aws_mutex_lock(&s_tester.lock); ASSERT_TRUE(s_tester.has_received_credentials_callback == true); ASSERT_TRUE(s_tester.error_code == AWS_ERROR_SUCCESS); ASSERT_TRUE(s_tester.credentials != NULL); ASSERT_SUCCESS(s_verify_credentials(s_tester.credentials)); aws_mutex_unlock(&s_tester.lock); aws_credentials_provider_release(provider); s_aws_cognito_tester_cleanup(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(credentials_provider_cognito_success_after_retry, s_credentials_provider_cognito_success_after_retry_fn); AWS_STATIC_STRING_FROM_LITERAL(s_cognito_identity_environment_variable, "AWS_TESTING_COGNITO_IDENTITY"); static int s_credentials_provider_cognito_success_unauthenticated_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_cognito_tester_init(allocator); struct aws_string *identity = NULL; ASSERT_SUCCESS(aws_get_environment_value(allocator, s_cognito_identity_environment_variable, &identity)); ASSERT_NOT_NULL(identity); struct aws_credentials_provider_cognito_options options = { .bootstrap = s_tester.bootstrap, .endpoint = aws_byte_cursor_from_c_str("cognito-identity.us-east-1.amazonaws.com"), .identity = aws_byte_cursor_from_string(identity), .tls_ctx = s_tester.ctx, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_cognito(allocator, &options); ASSERT_NOT_NULL(provider); ASSERT_SUCCESS(aws_credentials_provider_get_credentials(provider, s_get_credentials_callback, NULL)); s_aws_wait_for_credentials_result(); aws_mutex_lock(&s_tester.lock); ASSERT_TRUE(s_tester.has_received_credentials_callback == true); ASSERT_TRUE(s_tester.error_code == AWS_ERROR_SUCCESS); ASSERT_TRUE(s_tester.credentials != NULL); aws_mutex_unlock(&s_tester.lock); aws_credentials_provider_release(provider); s_aws_cognito_tester_cleanup(); aws_string_destroy(identity); return AWS_OP_SUCCESS; } AWS_TEST_CASE( credentials_provider_cognito_success_unauthenticated, s_credentials_provider_cognito_success_unauthenticated_fn); aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/credentials_provider_ecs_tests.c000066400000000000000000000645651456575232400300020ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct aws_mock_ecs_tester { struct aws_byte_buf request_uri; struct aws_array_list response_data_callbacks; bool is_connection_acquire_successful; bool is_request_successful; struct aws_mutex lock; struct aws_condition_variable signal; struct aws_credentials *credentials; bool has_received_credentials_callback; bool has_received_shutdown_callback; uint32_t selected_port; int error_code; }; static struct aws_mock_ecs_tester s_tester; static void s_on_shutdown_complete(void *user_data) { (void)user_data; aws_mutex_lock(&s_tester.lock); s_tester.has_received_shutdown_callback = true; aws_mutex_unlock(&s_tester.lock); aws_condition_variable_notify_one(&s_tester.signal); } static bool s_has_tester_received_shutdown_callback(void *user_data) { (void)user_data; return s_tester.has_received_shutdown_callback; } static void s_aws_wait_for_provider_shutdown_callback(void) { aws_mutex_lock(&s_tester.lock); aws_condition_variable_wait_pred(&s_tester.signal, &s_tester.lock, s_has_tester_received_shutdown_callback, NULL); aws_mutex_unlock(&s_tester.lock); } static struct aws_http_connection_manager *s_aws_http_connection_manager_new_mock( struct aws_allocator *allocator, const struct aws_http_connection_manager_options *options) { (void)allocator; (void)options; aws_mutex_lock(&s_tester.lock); s_tester.selected_port = options->port; aws_mutex_unlock(&s_tester.lock); return (struct aws_http_connection_manager *)1; } static void s_aws_http_connection_manager_release_mock(struct aws_http_connection_manager *manager) { (void)manager; s_on_shutdown_complete(NULL); } static void s_aws_http_connection_manager_acquire_connection_mock( struct aws_http_connection_manager *manager, aws_http_connection_manager_on_connection_setup_fn *callback, void *user_data) { (void)manager; (void)callback; (void)user_data; if (s_tester.is_connection_acquire_successful) { callback((struct aws_http_connection *)1, AWS_OP_SUCCESS, user_data); } else { aws_raise_error(AWS_ERROR_HTTP_UNKNOWN); callback(NULL, AWS_OP_ERR, user_data); } } static int s_aws_http_connection_manager_release_connection_mock( struct aws_http_connection_manager *manager, struct aws_http_connection *connection) { (void)manager; (void)connection; return AWS_OP_SUCCESS; } static void s_invoke_mock_request_callbacks( const struct aws_http_make_request_options *options, struct aws_array_list *data_callbacks, bool is_request_successful) { size_t data_callback_count = aws_array_list_length(data_callbacks); struct aws_http_header headers[1]; AWS_ZERO_ARRAY(headers); headers[0].name = aws_byte_cursor_from_c_str("some-header"); headers[0].value = aws_byte_cursor_from_c_str("value"); options->on_response_headers( (struct aws_http_stream *)1, AWS_HTTP_HEADER_BLOCK_MAIN, headers, 1, options->user_data); if (options->on_response_header_block_done) { options->on_response_header_block_done( (struct aws_http_stream *)1, data_callback_count > 0, options->user_data); } for (size_t i = 0; i < data_callback_count; ++i) { struct aws_byte_cursor data_callback_cursor; if (aws_array_list_get_at(data_callbacks, &data_callback_cursor, i)) { continue; } options->on_response_body((struct aws_http_stream *)1, &data_callback_cursor, options->user_data); } options->on_complete( (struct aws_http_stream *)1, is_request_successful ? AWS_ERROR_SUCCESS : AWS_ERROR_HTTP_UNKNOWN, options->user_data); } static struct aws_http_stream *s_aws_http_connection_make_request_mock( struct aws_http_connection *client_connection, const struct aws_http_make_request_options *options) { (void)client_connection; (void)options; struct aws_byte_cursor path; AWS_ZERO_STRUCT(path); aws_http_message_get_request_path(options->request, &path); aws_byte_buf_append_dynamic(&s_tester.request_uri, &path); s_invoke_mock_request_callbacks(options, &s_tester.response_data_callbacks, s_tester.is_request_successful); return (struct aws_http_stream *)1; } static int s_aws_http_stream_activate_mock(struct aws_http_stream *stream) { (void)stream; return AWS_OP_SUCCESS; } static int s_aws_http_stream_get_incoming_response_status_mock( const struct aws_http_stream *stream, int *out_status_code) { (void)stream; *out_status_code = AWS_HTTP_STATUS_CODE_200_OK; return AWS_OP_SUCCESS; } static void s_aws_http_stream_release_mock(struct aws_http_stream *stream) { (void)stream; } static void s_aws_http_connection_close_mock(struct aws_http_connection *connection) { (void)connection; } static struct aws_auth_http_system_vtable s_mock_function_table = { .aws_http_connection_manager_new = s_aws_http_connection_manager_new_mock, .aws_http_connection_manager_release = s_aws_http_connection_manager_release_mock, .aws_http_connection_manager_acquire_connection = s_aws_http_connection_manager_acquire_connection_mock, .aws_http_connection_manager_release_connection = s_aws_http_connection_manager_release_connection_mock, .aws_http_connection_make_request = s_aws_http_connection_make_request_mock, .aws_http_stream_activate = s_aws_http_stream_activate_mock, .aws_http_stream_get_incoming_response_status = s_aws_http_stream_get_incoming_response_status_mock, .aws_http_stream_release = s_aws_http_stream_release_mock, .aws_http_connection_close = s_aws_http_connection_close_mock}; static int s_aws_ecs_tester_init(struct aws_allocator *allocator) { if (aws_array_list_init_dynamic(&s_tester.response_data_callbacks, allocator, 10, sizeof(struct aws_byte_cursor))) { return AWS_OP_ERR; } if (aws_byte_buf_init(&s_tester.request_uri, allocator, 100)) { return AWS_OP_ERR; } if (aws_mutex_init(&s_tester.lock)) { return AWS_OP_ERR; } if (aws_condition_variable_init(&s_tester.signal)) { return AWS_OP_ERR; } aws_auth_library_init(allocator); /* default to everything successful */ s_tester.is_connection_acquire_successful = true; s_tester.is_request_successful = true; return AWS_OP_SUCCESS; } static void s_aws_ecs_tester_cleanup(void) { aws_array_list_clean_up(&s_tester.response_data_callbacks); aws_byte_buf_clean_up(&s_tester.request_uri); aws_condition_variable_clean_up(&s_tester.signal); aws_mutex_clean_up(&s_tester.lock); aws_credentials_release(s_tester.credentials); aws_auth_library_clean_up(); } static bool s_has_tester_received_credentials_callback(void *user_data) { (void)user_data; return s_tester.has_received_credentials_callback; } static void s_aws_wait_for_credentials_result(void) { aws_mutex_lock(&s_tester.lock); aws_condition_variable_wait_pred( &s_tester.signal, &s_tester.lock, s_has_tester_received_credentials_callback, NULL); aws_mutex_unlock(&s_tester.lock); } static void s_get_credentials_callback(struct aws_credentials *credentials, int error_code, void *user_data) { (void)user_data; aws_mutex_lock(&s_tester.lock); s_tester.has_received_credentials_callback = true; s_tester.error_code = error_code; s_tester.credentials = credentials; aws_credentials_acquire(credentials); aws_condition_variable_notify_one(&s_tester.signal); aws_mutex_unlock(&s_tester.lock); } static int s_credentials_provider_ecs_new_destroy(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_ecs_tester_init(allocator); struct aws_credentials_provider_ecs_options options = { .bootstrap = NULL, .function_table = &s_mock_function_table, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, .host = aws_byte_cursor_from_c_str("www.xxx123321testmocknonexsitingawsservice.com"), .path_and_query = aws_byte_cursor_from_c_str("/path/to/resource/?a=b&c=d"), .auth_token = aws_byte_cursor_from_c_str("test-token-1234-abcd"), }; struct aws_credentials_provider *provider = aws_credentials_provider_new_ecs(allocator, &options); aws_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); /* Because we mock the http connection manager, we never get a callback back from it */ aws_mem_release(provider->allocator, provider); s_aws_ecs_tester_cleanup(); return 0; } AWS_TEST_CASE(credentials_provider_ecs_new_destroy, s_credentials_provider_ecs_new_destroy); static int s_credentials_provider_ecs_connect_failure(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_ecs_tester_init(allocator); s_tester.is_connection_acquire_successful = false; struct aws_credentials_provider_ecs_options options = { .bootstrap = NULL, .function_table = &s_mock_function_table, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, .host = aws_byte_cursor_from_c_str("www.xxx123321testmocknonexsitingawsservice.com"), .path_and_query = aws_byte_cursor_from_c_str("/path/to/resource/?a=b&c=d"), .auth_token = aws_byte_cursor_from_c_str("test-token-1234-abcd"), }; struct aws_credentials_provider *provider = aws_credentials_provider_new_ecs(allocator, &options); aws_credentials_provider_get_credentials(provider, s_get_credentials_callback, NULL); s_aws_wait_for_credentials_result(); aws_mutex_lock(&s_tester.lock); ASSERT_TRUE(s_tester.has_received_credentials_callback == true); ASSERT_TRUE(s_tester.credentials == NULL); ASSERT_UINT_EQUALS(80, s_tester.selected_port); aws_mutex_unlock(&s_tester.lock); aws_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); /* Because we mock the http connection manager, we never get a callback back from it */ aws_mem_release(provider->allocator, provider); s_aws_ecs_tester_cleanup(); return 0; } AWS_TEST_CASE(credentials_provider_ecs_connect_failure, s_credentials_provider_ecs_connect_failure); AWS_STATIC_STRING_FROM_LITERAL(s_expected_ecs_relative_uri, "/path/to/resource/?a=b&c=d"); static int s_credentials_provider_ecs_request_failure(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_ecs_tester_init(allocator); s_tester.is_request_successful = false; struct aws_credentials_provider_ecs_options options = { .bootstrap = NULL, .function_table = &s_mock_function_table, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, .host = aws_byte_cursor_from_c_str("www.xxx123321testmocknonexsitingawsservice.com"), .path_and_query = aws_byte_cursor_from_c_str("/path/to/resource/?a=b&c=d"), .auth_token = aws_byte_cursor_from_c_str("test-token-1234-abcd"), }; struct aws_credentials_provider *provider = aws_credentials_provider_new_ecs(allocator, &options); aws_credentials_provider_get_credentials(provider, s_get_credentials_callback, NULL); s_aws_wait_for_credentials_result(); aws_mutex_lock(&s_tester.lock); ASSERT_BIN_ARRAYS_EQUALS( s_tester.request_uri.buffer, s_tester.request_uri.len, s_expected_ecs_relative_uri->bytes, s_expected_ecs_relative_uri->len); ASSERT_TRUE(s_tester.has_received_credentials_callback == true); ASSERT_TRUE(s_tester.credentials == NULL); ASSERT_UINT_EQUALS(80, s_tester.selected_port); aws_mutex_unlock(&s_tester.lock); aws_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); /* Because we mock the http connection manager, we never get a callback back from it */ aws_mem_release(provider->allocator, provider); s_aws_ecs_tester_cleanup(); return 0; } AWS_TEST_CASE(credentials_provider_ecs_request_failure, s_credentials_provider_ecs_request_failure); AWS_STATIC_STRING_FROM_LITERAL(s_bad_document_response, "{\"NotTheExpectedDocumentFormat\":\"Error\"}"); static int s_credentials_provider_ecs_bad_document_failure(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_ecs_tester_init(allocator); struct aws_byte_cursor bad_document_cursor = aws_byte_cursor_from_string(s_bad_document_response); aws_array_list_push_back(&s_tester.response_data_callbacks, &bad_document_cursor); struct aws_credentials_provider_ecs_options options = { .bootstrap = NULL, .function_table = &s_mock_function_table, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, .host = aws_byte_cursor_from_c_str("www.xxx123321testmocknonexsitingawsservice.com"), .path_and_query = aws_byte_cursor_from_c_str("/path/to/resource/?a=b&c=d"), .auth_token = aws_byte_cursor_from_c_str("test-token-1234-abcd"), .port = 555, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_ecs(allocator, &options); aws_credentials_provider_get_credentials(provider, s_get_credentials_callback, NULL); s_aws_wait_for_credentials_result(); aws_mutex_lock(&s_tester.lock); ASSERT_BIN_ARRAYS_EQUALS( s_tester.request_uri.buffer, s_tester.request_uri.len, s_expected_ecs_relative_uri->bytes, s_expected_ecs_relative_uri->len); ASSERT_TRUE(s_tester.has_received_credentials_callback == true); ASSERT_TRUE(s_tester.credentials == NULL); ASSERT_UINT_EQUALS(555, s_tester.selected_port); aws_mutex_unlock(&s_tester.lock); aws_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); /* Because we mock the http connection manager, we never get a callback back from it */ aws_mem_release(provider->allocator, provider); s_aws_ecs_tester_cleanup(); return 0; } AWS_TEST_CASE(credentials_provider_ecs_bad_document_failure, s_credentials_provider_ecs_bad_document_failure); AWS_STATIC_STRING_FROM_LITERAL( s_good_response, "{\"AccessKeyId\":\"SuccessfulAccessKey\", \n \"SecretAccessKey\":\"SuccessfulSecret\", \n " "\"Token\":\"TokenSuccess\", \n \"Expiration\":\"2020-02-25T06:03:31Z\"}"); AWS_STATIC_STRING_FROM_LITERAL(s_good_access_key_id, "SuccessfulAccessKey"); AWS_STATIC_STRING_FROM_LITERAL(s_good_secret_access_key, "SuccessfulSecret"); AWS_STATIC_STRING_FROM_LITERAL(s_good_session_token, "TokenSuccess"); AWS_STATIC_STRING_FROM_LITERAL(s_good_response_expiration, "2020-02-25T06:03:31Z"); static int s_do_ecs_success_test( struct aws_allocator *allocator, struct aws_credentials_provider_ecs_options *options) { struct aws_credentials_provider *provider = aws_credentials_provider_new_ecs(allocator, options); aws_credentials_provider_get_credentials(provider, s_get_credentials_callback, NULL); s_aws_wait_for_credentials_result(); aws_mutex_lock(&s_tester.lock); ASSERT_BIN_ARRAYS_EQUALS( s_tester.request_uri.buffer, s_tester.request_uri.len, s_expected_ecs_relative_uri->bytes, s_expected_ecs_relative_uri->len); ASSERT_TRUE(s_tester.has_received_credentials_callback == true); ASSERT_TRUE(s_tester.credentials != NULL); ASSERT_CURSOR_VALUE_STRING_EQUALS(aws_credentials_get_access_key_id(s_tester.credentials), s_good_access_key_id); ASSERT_CURSOR_VALUE_STRING_EQUALS( aws_credentials_get_secret_access_key(s_tester.credentials), s_good_secret_access_key); ASSERT_CURSOR_VALUE_STRING_EQUALS(aws_credentials_get_session_token(s_tester.credentials), s_good_session_token); struct aws_date_time expiration; struct aws_byte_cursor date_cursor = aws_byte_cursor_from_string(s_good_response_expiration); aws_date_time_init_from_str_cursor(&expiration, &date_cursor, AWS_DATE_FORMAT_ISO_8601); ASSERT_TRUE( aws_credentials_get_expiration_timepoint_seconds(s_tester.credentials) == (uint64_t)expiration.timestamp); aws_mutex_unlock(&s_tester.lock); aws_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); /* Because we mock the http connection manager, we never get a callback back from it */ aws_mem_release(provider->allocator, provider); return AWS_OP_SUCCESS; } static int s_credentials_provider_ecs_basic_success(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_ecs_tester_init(allocator); struct aws_byte_cursor good_response_cursor = aws_byte_cursor_from_string(s_good_response); aws_array_list_push_back(&s_tester.response_data_callbacks, &good_response_cursor); struct aws_credentials_provider_ecs_options options = { .bootstrap = NULL, .function_table = &s_mock_function_table, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, .host = aws_byte_cursor_from_c_str("www.xxx123321testmocknonexsitingawsservice.com"), .path_and_query = aws_byte_cursor_from_c_str("/path/to/resource/?a=b&c=d"), .auth_token = aws_byte_cursor_from_c_str("test-token-1234-abcd"), }; ASSERT_SUCCESS(s_do_ecs_success_test(allocator, &options)); s_aws_ecs_tester_cleanup(); return 0; } AWS_TEST_CASE(credentials_provider_ecs_basic_success, s_credentials_provider_ecs_basic_success); static int s_credentials_provider_ecs_no_auth_token_success(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_ecs_tester_init(allocator); struct aws_byte_cursor good_response_cursor = aws_byte_cursor_from_string(s_good_response); aws_array_list_push_back(&s_tester.response_data_callbacks, &good_response_cursor); struct aws_credentials_provider_ecs_options options = { .bootstrap = NULL, .function_table = &s_mock_function_table, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, .host = aws_byte_cursor_from_c_str("www.xxx123321testmocknonexsitingawsservice.com"), .path_and_query = aws_byte_cursor_from_c_str("/path/to/resource/?a=b&c=d"), }; ASSERT_SUCCESS(s_do_ecs_success_test(allocator, &options)); s_aws_ecs_tester_cleanup(); return 0; } AWS_TEST_CASE(credentials_provider_ecs_no_auth_token_success, s_credentials_provider_ecs_no_auth_token_success); AWS_STATIC_STRING_FROM_LITERAL(s_good_response_first_part, "{\"AccessKeyId\":\"SuccessfulAccessKey\", \n \"Secret"); AWS_STATIC_STRING_FROM_LITERAL(s_good_response_second_part, "AccessKey\":\"SuccessfulSecret\", \n \"Token\":\"Token"); AWS_STATIC_STRING_FROM_LITERAL(s_good_response_third_part, "Success\", \n \"Expiration\":\"2020-02-25T06:03:31Z\"}"); static int s_credentials_provider_ecs_success_multi_part_doc(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_ecs_tester_init(allocator); struct aws_byte_cursor good_response_cursor1 = aws_byte_cursor_from_string(s_good_response_first_part); struct aws_byte_cursor good_response_cursor2 = aws_byte_cursor_from_string(s_good_response_second_part); struct aws_byte_cursor good_response_cursor3 = aws_byte_cursor_from_string(s_good_response_third_part); aws_array_list_push_back(&s_tester.response_data_callbacks, &good_response_cursor1); aws_array_list_push_back(&s_tester.response_data_callbacks, &good_response_cursor2); aws_array_list_push_back(&s_tester.response_data_callbacks, &good_response_cursor3); struct aws_credentials_provider_ecs_options options = { .bootstrap = NULL, .function_table = &s_mock_function_table, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, .host = aws_byte_cursor_from_c_str("www.xxx123321testmocknonexsitingawsservice.com"), .path_and_query = aws_byte_cursor_from_c_str("/path/to/resource/?a=b&c=d"), .auth_token = aws_byte_cursor_from_c_str("test-token-1234-abcd"), }; struct aws_credentials_provider *provider = aws_credentials_provider_new_ecs(allocator, &options); aws_credentials_provider_get_credentials(provider, s_get_credentials_callback, NULL); s_aws_wait_for_credentials_result(); aws_mutex_lock(&s_tester.lock); ASSERT_BIN_ARRAYS_EQUALS( s_tester.request_uri.buffer, s_tester.request_uri.len, s_expected_ecs_relative_uri->bytes, s_expected_ecs_relative_uri->len); ASSERT_TRUE(s_tester.has_received_credentials_callback == true); ASSERT_TRUE(s_tester.credentials != NULL); ASSERT_CURSOR_VALUE_STRING_EQUALS(aws_credentials_get_access_key_id(s_tester.credentials), s_good_access_key_id); ASSERT_CURSOR_VALUE_STRING_EQUALS( aws_credentials_get_secret_access_key(s_tester.credentials), s_good_secret_access_key); ASSERT_CURSOR_VALUE_STRING_EQUALS(aws_credentials_get_session_token(s_tester.credentials), s_good_session_token); struct aws_date_time expiration; struct aws_byte_cursor date_cursor = aws_byte_cursor_from_string(s_good_response_expiration); aws_date_time_init_from_str_cursor(&expiration, &date_cursor, AWS_DATE_FORMAT_ISO_8601); ASSERT_TRUE( aws_credentials_get_expiration_timepoint_seconds(s_tester.credentials) == (uint64_t)expiration.timestamp); aws_mutex_unlock(&s_tester.lock); aws_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); /* Because we mock the http connection manager, we never get a callback back from it */ aws_mem_release(provider->allocator, provider); s_aws_ecs_tester_cleanup(); return 0; } AWS_TEST_CASE(credentials_provider_ecs_success_multi_part_doc, s_credentials_provider_ecs_success_multi_part_doc); static int s_credentials_provider_ecs_real_new_destroy(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_auth_library_init(allocator); s_aws_ecs_tester_init(allocator); struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); struct aws_host_resolver_default_options resolver_options = { .el_group = el_group, .max_entries = 8, }; struct aws_host_resolver *resolver = aws_host_resolver_new_default(allocator, &resolver_options); struct aws_client_bootstrap_options bootstrap_options = { .event_loop_group = el_group, .host_resolver = resolver, }; struct aws_client_bootstrap *bootstrap = aws_client_bootstrap_new(allocator, &bootstrap_options); struct aws_credentials_provider_ecs_options options = { .bootstrap = bootstrap, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, .host = aws_byte_cursor_from_c_str("www.xxx123321testmocknonexsitingawsservice.com"), .path_and_query = aws_byte_cursor_from_c_str("/path/to/resource/?a=b&c=d"), .auth_token = aws_byte_cursor_from_c_str("test-token-1234-abcd"), }; struct aws_credentials_provider *provider = aws_credentials_provider_new_ecs(allocator, &options); aws_credentials_provider_get_credentials(provider, s_get_credentials_callback, NULL); s_aws_wait_for_credentials_result(); aws_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); aws_client_bootstrap_release(bootstrap); aws_host_resolver_release(resolver); aws_event_loop_group_release(el_group); s_aws_ecs_tester_cleanup(); aws_auth_library_clean_up(); return 0; } AWS_TEST_CASE(credentials_provider_ecs_real_new_destroy, s_credentials_provider_ecs_real_new_destroy); static int s_credentials_provider_ecs_real_success(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_auth_library_init(allocator); s_aws_ecs_tester_init(allocator); struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); struct aws_host_resolver_default_options resolver_options = { .el_group = el_group, .max_entries = 8, }; struct aws_host_resolver *resolver = aws_host_resolver_new_default(allocator, &resolver_options); struct aws_client_bootstrap_options bootstrap_options = { .event_loop_group = el_group, .host_resolver = resolver, }; struct aws_client_bootstrap *bootstrap = aws_client_bootstrap_new(allocator, &bootstrap_options); struct aws_credentials_provider_ecs_options options = { .bootstrap = bootstrap, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, .host = aws_byte_cursor_from_c_str("www.xxx123321testmocknonexsitingawsservice.com"), .path_and_query = aws_byte_cursor_from_c_str("/path/to/resource/?a=b&c=d"), .auth_token = aws_byte_cursor_from_c_str("test-token-1234-abcd"), }; struct aws_credentials_provider *provider = aws_credentials_provider_new_ecs(allocator, &options); aws_credentials_provider_get_credentials(provider, s_get_credentials_callback, NULL); s_aws_wait_for_credentials_result(); aws_mutex_lock(&s_tester.lock); ASSERT_TRUE(s_tester.credentials != NULL); aws_mutex_unlock(&s_tester.lock); aws_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); s_aws_ecs_tester_cleanup(); aws_client_bootstrap_release(bootstrap); aws_host_resolver_release(resolver); aws_event_loop_group_release(el_group); aws_auth_library_clean_up(); return 0; } AWS_TEST_CASE(credentials_provider_ecs_real_success, s_credentials_provider_ecs_real_success); aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/credentials_provider_imds_tests.c000066400000000000000000001264431456575232400301560ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include #include #include #include /** * The max requests SDK could make to IMDS V2 should be 4. * 1. query_role_name -> Get 401, unauthorized, then switch to secure way. * 2. query_token * 3. query role name * 4. query role * * By default, the requests made is 3. * 1. query_token (unless gets 400, no matter succeed or not, next step is query role name w/o token) * 2. query role name. * 3. query role * * Well, IMDS could act crazy then client would keep switching between secure and insecure way. * We will not handle this extreme case. */ #define IMDS_MAX_REQUESTS (8) struct aws_mock_imds_tester { struct aws_allocator *allocator; struct aws_byte_buf request_uris[IMDS_MAX_REQUESTS]; struct aws_array_list response_data_callbacks[IMDS_MAX_REQUESTS]; int current_request; int response_code[IMDS_MAX_REQUESTS]; int token_request_idx; bool is_connection_acquire_successful; struct aws_mutex lock; struct aws_condition_variable signal; struct aws_credentials *credentials; struct aws_event_loop_group *el_group; struct aws_client_bootstrap *bootstrap; bool has_received_credentials_callback; bool has_received_shutdown_callback; int error_code; bool token_ttl_header_exist[IMDS_MAX_REQUESTS]; bool token_ttl_header_expected[IMDS_MAX_REQUESTS]; bool token_header_exist[IMDS_MAX_REQUESTS]; bool token_header_expected[IMDS_MAX_REQUESTS]; bool alternate_closed_connections; }; static struct aws_mock_imds_tester s_tester; struct aws_credentials_provider_imds_impl { struct aws_imds_client *client; }; static void s_on_shutdown_complete(void *user_data) { (void)user_data; aws_mutex_lock(&s_tester.lock); s_tester.has_received_shutdown_callback = true; aws_mutex_unlock(&s_tester.lock); aws_condition_variable_notify_one(&s_tester.signal); } static bool s_has_tester_received_shutdown_callback(void *user_data) { (void)user_data; return s_tester.has_received_shutdown_callback; } static void s_aws_wait_for_provider_shutdown_callback(void) { aws_mutex_lock(&s_tester.lock); aws_condition_variable_wait_pred(&s_tester.signal, &s_tester.lock, s_has_tester_received_shutdown_callback, NULL); aws_mutex_unlock(&s_tester.lock); } static struct aws_http_connection_manager *s_aws_http_connection_manager_new_mock( struct aws_allocator *allocator, const struct aws_http_connection_manager_options *options) { (void)allocator; (void)options; return (struct aws_http_connection_manager *)1; } static void s_aws_http_connection_manager_release_mock(struct aws_http_connection_manager *manager) { (void)manager; s_on_shutdown_complete(NULL); } static void s_aws_http_connection_manager_acquire_connection_mock( struct aws_http_connection_manager *manager, aws_http_connection_manager_on_connection_setup_fn *callback, void *user_data) { (void)manager; if (s_tester.is_connection_acquire_successful) { callback((struct aws_http_connection *)1, AWS_OP_SUCCESS, user_data); } else { aws_raise_error(AWS_ERROR_HTTP_UNKNOWN); callback(NULL, AWS_OP_ERR, user_data); } } static int s_aws_http_connection_manager_release_connection_mock( struct aws_http_connection_manager *manager, struct aws_http_connection *connection) { (void)manager; (void)connection; return AWS_OP_SUCCESS; } static void s_invoke_mock_request_callbacks( const struct aws_http_make_request_options *options, struct aws_array_list *data_callbacks) { size_t data_callback_count = aws_array_list_length(data_callbacks); struct aws_http_header headers[1]; AWS_ZERO_ARRAY(headers); headers[0].name = aws_byte_cursor_from_c_str("some-header"); headers[0].value = aws_byte_cursor_from_c_str("value"); options->on_response_headers( (struct aws_http_stream *)1, AWS_HTTP_HEADER_BLOCK_MAIN, headers, 1, options->user_data); if (options->on_response_header_block_done) { options->on_response_header_block_done( (struct aws_http_stream *)1, data_callback_count > 0, options->user_data); } for (size_t i = 0; i < data_callback_count; ++i) { struct aws_byte_cursor data_callback_cursor; if (aws_array_list_get_at(data_callbacks, &data_callback_cursor, i)) { continue; } options->on_response_body((struct aws_http_stream *)1, &data_callback_cursor, options->user_data); } if (!s_tester.alternate_closed_connections) { options->on_complete((struct aws_http_stream *)1, AWS_ERROR_SUCCESS, options->user_data); } else { options->on_complete( (struct aws_http_stream *)1, ((uint8_t)s_tester.current_request & 0x01) ? AWS_ERROR_HTTP_CONNECTION_CLOSED : AWS_ERROR_SUCCESS, options->user_data); } } static void s_validate_token_ttl_header(const struct aws_http_message *request); static void s_validate_token_header(const struct aws_http_message *request); static struct aws_http_stream *s_aws_http_connection_make_request_mock( struct aws_http_connection *client_connection, const struct aws_http_make_request_options *options) { (void)client_connection; (void)options; struct aws_byte_cursor path; AWS_ZERO_STRUCT(path); aws_http_message_get_request_path(options->request, &path); if (s_tester.current_request == s_tester.token_request_idx) { /* verify token ttl header */ s_validate_token_ttl_header(options->request); } else if (s_tester.current_request > s_tester.token_request_idx) { /* verify token header */ s_validate_token_header(options->request); } int idx = s_tester.current_request++; aws_byte_buf_append_dynamic(&(s_tester.request_uris[idx]), &path); s_invoke_mock_request_callbacks(options, &s_tester.response_data_callbacks[idx]); return (struct aws_http_stream *)1; } static int s_aws_http_stream_activate_mock(struct aws_http_stream *stream) { (void)stream; return AWS_OP_SUCCESS; } static struct aws_http_connection *s_aws_http_stream_get_connection_mock(const struct aws_http_stream *stream) { (void)stream; return (struct aws_http_connection *)1; } static int s_aws_http_stream_get_incoming_response_status_mock( const struct aws_http_stream *stream, int *out_status_code) { (void)stream; if (s_tester.response_code[s_tester.current_request - 1] != 0) { *out_status_code = s_tester.response_code[s_tester.current_request - 1]; } else { *out_status_code = AWS_HTTP_STATUS_CODE_200_OK; } return AWS_OP_SUCCESS; } static void s_aws_http_stream_release_mock(struct aws_http_stream *stream) { (void)stream; } static void s_aws_http_connection_close_mock(struct aws_http_connection *connection) { (void)connection; } static int s_aws_high_res_clock_get_ticks_mock(uint64_t *timestamp) { return aws_high_res_clock_get_ticks(timestamp); } static struct aws_auth_http_system_vtable s_mock_function_table = { .aws_http_connection_manager_new = s_aws_http_connection_manager_new_mock, .aws_http_connection_manager_release = s_aws_http_connection_manager_release_mock, .aws_http_connection_manager_acquire_connection = s_aws_http_connection_manager_acquire_connection_mock, .aws_http_connection_manager_release_connection = s_aws_http_connection_manager_release_connection_mock, .aws_http_connection_make_request = s_aws_http_connection_make_request_mock, .aws_http_stream_activate = s_aws_http_stream_activate_mock, .aws_http_stream_get_connection = s_aws_http_stream_get_connection_mock, .aws_http_stream_get_incoming_response_status = s_aws_http_stream_get_incoming_response_status_mock, .aws_http_stream_release = s_aws_http_stream_release_mock, .aws_http_connection_close = s_aws_http_connection_close_mock, .aws_high_res_clock_get_ticks = s_aws_high_res_clock_get_ticks_mock, }; static int s_aws_imds_tester_init(struct aws_allocator *allocator) { aws_auth_library_init(allocator); AWS_ZERO_STRUCT(s_tester); for (size_t i = 0; i < IMDS_MAX_REQUESTS; i++) { if (aws_array_list_init_dynamic( &s_tester.response_data_callbacks[i], allocator, 10, sizeof(struct aws_byte_cursor))) { return AWS_OP_ERR; } if (aws_byte_buf_init(&s_tester.request_uris[i], allocator, 100)) { return AWS_OP_ERR; } } s_tester.allocator = allocator; if (aws_mutex_init(&s_tester.lock)) { return AWS_OP_ERR; } if (aws_condition_variable_init(&s_tester.signal)) { return AWS_OP_ERR; } /* default to everything successful */ s_tester.is_connection_acquire_successful = true; s_tester.el_group = aws_event_loop_group_new_default(allocator, 1, NULL); struct aws_client_bootstrap_options bootstrap_options = { .event_loop_group = s_tester.el_group, .user_data = NULL, .host_resolution_config = NULL, .host_resolver = NULL, .on_shutdown_complete = NULL, }; s_tester.bootstrap = aws_client_bootstrap_new(allocator, &bootstrap_options); ASSERT_NOT_NULL(s_tester.bootstrap); return AWS_OP_SUCCESS; } static int s_aws_imds_tester_cleanup(void) { for (size_t i = 0; i < IMDS_MAX_REQUESTS; i++) { aws_array_list_clean_up(&s_tester.response_data_callbacks[i]); aws_byte_buf_clean_up(&s_tester.request_uris[i]); } aws_condition_variable_clean_up(&s_tester.signal); aws_mutex_clean_up(&s_tester.lock); aws_credentials_release(s_tester.credentials); aws_client_bootstrap_release(s_tester.bootstrap); aws_event_loop_group_release(s_tester.el_group); aws_auth_library_clean_up(); return AWS_OP_SUCCESS; } static bool s_has_tester_received_credentials_callback(void *user_data) { (void)user_data; return s_tester.has_received_credentials_callback; } static void s_aws_wait_for_credentials_result(void) { aws_mutex_lock(&s_tester.lock); aws_condition_variable_wait_pred( &s_tester.signal, &s_tester.lock, s_has_tester_received_credentials_callback, NULL); aws_mutex_unlock(&s_tester.lock); } static void s_get_credentials_callback(struct aws_credentials *credentials, int error_code, void *user_data) { (void)user_data; aws_mutex_lock(&s_tester.lock); s_tester.has_received_credentials_callback = true; s_tester.error_code = error_code; s_tester.credentials = credentials; aws_credentials_acquire(credentials); aws_condition_variable_notify_one(&s_tester.signal); aws_mutex_unlock(&s_tester.lock); } static int s_credentials_provider_imds_new_destroy(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_imds_tester_init(allocator); struct aws_credentials_provider_imds_options options = { .bootstrap = s_tester.bootstrap, .function_table = &s_mock_function_table, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_imds(allocator, &options); aws_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); /* Because we mock the http connection manager, we never get a callback back from it */ struct aws_credentials_provider_imds_impl *impl = provider->impl; aws_mem_release(provider->allocator, impl->client); aws_mem_release(provider->allocator, provider); ASSERT_SUCCESS(s_aws_imds_tester_cleanup()); return 0; } AWS_TEST_CASE(credentials_provider_imds_new_destroy, s_credentials_provider_imds_new_destroy); static int s_credentials_provider_imds_connect_failure(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_imds_tester_init(allocator); s_tester.is_connection_acquire_successful = false; struct aws_credentials_provider_imds_options options = { .bootstrap = s_tester.bootstrap, .function_table = &s_mock_function_table, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_imds(allocator, &options); aws_credentials_provider_get_credentials(provider, s_get_credentials_callback, NULL); s_aws_wait_for_credentials_result(); ASSERT_TRUE(s_tester.has_received_credentials_callback == true); ASSERT_TRUE(s_tester.credentials == NULL); aws_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); /* Because we mock the http connection manager, we never get a callback back from it */ struct aws_credentials_provider_imds_impl *impl = provider->impl; aws_mem_release(provider->allocator, impl->client); aws_mem_release(provider->allocator, provider); ASSERT_SUCCESS(s_aws_imds_tester_cleanup()); return 0; } AWS_TEST_CASE(credentials_provider_imds_connect_failure, s_credentials_provider_imds_connect_failure); AWS_STATIC_STRING_FROM_LITERAL(s_expected_imds_token_uri, "/latest/api/token"); AWS_STATIC_STRING_FROM_LITERAL(s_expected_imds_base_uri, "/latest/meta-data/iam/security-credentials/"); AWS_STATIC_STRING_FROM_LITERAL(s_expected_imds_role_uri, "/latest/meta-data/iam/security-credentials/test-role"); AWS_STATIC_STRING_FROM_LITERAL(s_test_role_response, "test-role"); AWS_STATIC_STRING_FROM_LITERAL(s_test_imds_token, "A00XXF3H00ZZ=="); static void s_validate_token_ttl_header(const struct aws_http_message *request) { const struct aws_http_headers *headers = aws_http_message_get_const_headers(request); struct aws_byte_cursor ttl_header = aws_byte_cursor_from_c_str("x-aws-ec2-metadata-token-ttl-seconds"); struct aws_byte_cursor ttl_value; int ret = aws_http_headers_get(headers, ttl_header, &ttl_value); if (ret == AWS_OP_SUCCESS) { s_tester.token_ttl_header_exist[s_tester.current_request] = true; if (aws_byte_cursor_eq_c_str_ignore_case(&ttl_value, "21600")) { s_tester.token_ttl_header_expected[s_tester.current_request] = true; } else { s_tester.token_ttl_header_expected[s_tester.current_request] = false; } } else { s_tester.token_ttl_header_exist[s_tester.current_request] = false; } } static void s_validate_token_header(const struct aws_http_message *request) { const struct aws_http_headers *headers = aws_http_message_get_const_headers(request); struct aws_byte_cursor token_header = aws_byte_cursor_from_c_str("x-aws-ec2-metadata-token"); struct aws_byte_cursor token_value; int ret = aws_http_headers_get(headers, token_header, &token_value); if (ret == AWS_OP_SUCCESS) { s_tester.token_header_exist[s_tester.current_request] = true; if (aws_byte_cursor_eq_c_str_ignore_case(&token_value, "A00XXF3H00ZZ==")) { s_tester.token_header_expected[s_tester.current_request] = true; } else { s_tester.token_header_expected[s_tester.current_request] = false; } } else { s_tester.token_header_exist[s_tester.current_request] = false; } } static int s_validate_uri_path_and_creds(int expected_requests, bool get_credentials) { ASSERT_UINT_EQUALS(expected_requests, s_tester.current_request); int idx = s_tester.token_request_idx; if (s_tester.current_request >= 1) { ASSERT_BIN_ARRAYS_EQUALS( s_tester.request_uris[idx].buffer, s_tester.request_uris[idx].len, s_expected_imds_token_uri->bytes, s_expected_imds_token_uri->len); } idx++; if (s_tester.current_request >= 2) { ASSERT_BIN_ARRAYS_EQUALS( s_tester.request_uris[idx].buffer, s_tester.request_uris[idx].len, s_expected_imds_base_uri->bytes, s_expected_imds_base_uri->len); } idx++; if (s_tester.current_request >= 3) { ASSERT_BIN_ARRAYS_EQUALS( s_tester.request_uris[idx].buffer, s_tester.request_uris[idx].len, s_expected_imds_role_uri->bytes, s_expected_imds_role_uri->len); } ASSERT_TRUE(s_tester.has_received_credentials_callback == true); if (get_credentials) { ASSERT_TRUE(s_tester.credentials != NULL); } else { ASSERT_TRUE(s_tester.credentials == NULL); } return 0; } static int s_credentials_provider_imds_token_request_failure(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_imds_tester_init(allocator); s_tester.response_code[0] = AWS_HTTP_STATUS_CODE_400_BAD_REQUEST; struct aws_credentials_provider_imds_options options = { .bootstrap = s_tester.bootstrap, .function_table = &s_mock_function_table, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_imds(allocator, &options); aws_credentials_provider_get_credentials(provider, s_get_credentials_callback, NULL); s_aws_wait_for_credentials_result(); ASSERT_TRUE(s_validate_uri_path_and_creds(1, false /*no creds*/) == 0); ASSERT_TRUE(s_tester.token_ttl_header_exist[0]); ASSERT_TRUE(s_tester.token_ttl_header_expected[0]); ASSERT_FALSE(s_tester.token_header_exist[0]); aws_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); /* Because we mock the http connection manager, we never get a callback back from it */ struct aws_credentials_provider_imds_impl *impl = provider->impl; aws_mem_release(provider->allocator, impl->client); aws_mem_release(provider->allocator, provider); ASSERT_SUCCESS(s_aws_imds_tester_cleanup()); return 0; } AWS_TEST_CASE(credentials_provider_imds_token_request_failure, s_credentials_provider_imds_token_request_failure); static int s_credentials_provider_imds_role_name_request_failure(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_imds_tester_init(allocator); struct aws_byte_cursor test_token_cursor = aws_byte_cursor_from_string(s_test_imds_token); aws_array_list_push_back(&s_tester.response_data_callbacks[0], &test_token_cursor); struct aws_credentials_provider_imds_options options = { .bootstrap = s_tester.bootstrap, .function_table = &s_mock_function_table, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_imds(allocator, &options); aws_credentials_provider_get_credentials(provider, s_get_credentials_callback, NULL); s_aws_wait_for_credentials_result(); ASSERT_TRUE(s_validate_uri_path_and_creds(2, false /*no creds*/) == 0); ASSERT_TRUE(s_tester.token_ttl_header_exist[0]); ASSERT_TRUE(s_tester.token_ttl_header_expected[0]); ASSERT_FALSE(s_tester.token_header_exist[0]); ASSERT_FALSE(s_tester.token_ttl_header_exist[1]); ASSERT_TRUE(s_tester.token_header_exist[1]); ASSERT_TRUE(s_tester.token_header_expected[1]); aws_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); /* Because we mock the http connection manager, we never get a callback back from it */ struct aws_credentials_provider_imds_impl *impl = provider->impl; aws_mem_release(provider->allocator, impl->client); aws_mem_release(provider->allocator, provider); ASSERT_SUCCESS(s_aws_imds_tester_cleanup()); return 0; } AWS_TEST_CASE( credentials_provider_imds_role_name_request_failure, s_credentials_provider_imds_role_name_request_failure); static int s_credentials_provider_imds_role_request_failure(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_imds_tester_init(allocator); struct aws_byte_cursor test_token_cursor = aws_byte_cursor_from_string(s_test_imds_token); aws_array_list_push_back(&s_tester.response_data_callbacks[0], &test_token_cursor); struct aws_byte_cursor test_role_cursor = aws_byte_cursor_from_string(s_test_role_response); aws_array_list_push_back(&s_tester.response_data_callbacks[1], &test_role_cursor); struct aws_credentials_provider_imds_options options = { .bootstrap = s_tester.bootstrap, .function_table = &s_mock_function_table, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_imds(allocator, &options); aws_credentials_provider_get_credentials(provider, s_get_credentials_callback, NULL); s_aws_wait_for_credentials_result(); ASSERT_TRUE(s_validate_uri_path_and_creds(3, false /*no creds*/) == 0); ASSERT_TRUE(s_tester.token_ttl_header_exist[0]); ASSERT_TRUE(s_tester.token_ttl_header_expected[0]); ASSERT_FALSE(s_tester.token_header_exist[0]); ASSERT_FALSE(s_tester.token_ttl_header_exist[1]); ASSERT_TRUE(s_tester.token_header_exist[1]); ASSERT_TRUE(s_tester.token_header_expected[1]); ASSERT_FALSE(s_tester.token_ttl_header_exist[2]); ASSERT_TRUE(s_tester.token_header_exist[2]); ASSERT_TRUE(s_tester.token_header_expected[2]); aws_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); /* Because we mock the http connection manager, we never get a callback back from it */ struct aws_credentials_provider_imds_impl *impl = provider->impl; aws_mem_release(provider->allocator, impl->client); aws_mem_release(provider->allocator, provider); ASSERT_SUCCESS(s_aws_imds_tester_cleanup()); return 0; } AWS_TEST_CASE(credentials_provider_imds_role_request_failure, s_credentials_provider_imds_role_request_failure); AWS_STATIC_STRING_FROM_LITERAL(s_bad_document_response, "{\"NotTheExpectedDocumentFormat\":\"Error\"}"); static int s_credentials_provider_imds_bad_document_failure(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_imds_tester_init(allocator); struct aws_byte_cursor test_token_cursor = aws_byte_cursor_from_string(s_test_imds_token); aws_array_list_push_back(&s_tester.response_data_callbacks[0], &test_token_cursor); struct aws_byte_cursor test_role_cursor = aws_byte_cursor_from_string(s_test_role_response); aws_array_list_push_back(&s_tester.response_data_callbacks[1], &test_role_cursor); struct aws_byte_cursor bad_document_cursor = aws_byte_cursor_from_string(s_bad_document_response); aws_array_list_push_back(&s_tester.response_data_callbacks[2], &bad_document_cursor); struct aws_credentials_provider_imds_options options = { .bootstrap = s_tester.bootstrap, .function_table = &s_mock_function_table, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_imds(allocator, &options); aws_credentials_provider_get_credentials(provider, s_get_credentials_callback, NULL); s_aws_wait_for_credentials_result(); ASSERT_TRUE(s_validate_uri_path_and_creds(3, false /*no creds*/) == 0); aws_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); /* Because we mock the http connection manager, we never get a callback back from it */ struct aws_credentials_provider_imds_impl *impl = provider->impl; aws_mem_release(provider->allocator, impl->client); aws_mem_release(provider->allocator, provider); ASSERT_SUCCESS(s_aws_imds_tester_cleanup()); return 0; } AWS_TEST_CASE(credentials_provider_imds_bad_document_failure, s_credentials_provider_imds_bad_document_failure); AWS_STATIC_STRING_FROM_LITERAL( s_good_response, "{\"AccessKeyId\":\"SuccessfulAccessKey\", \n \"SecretAccessKey\":\"SuccessfulSecret\", \n " "\"Token\":\"TokenSuccess\", \n \"Expiration\":\"2020-02-25T06:03:31Z\"}"); AWS_STATIC_STRING_FROM_LITERAL(s_good_access_key_id, "SuccessfulAccessKey"); AWS_STATIC_STRING_FROM_LITERAL(s_good_secret_access_key, "SuccessfulSecret"); AWS_STATIC_STRING_FROM_LITERAL(s_good_session_token, "TokenSuccess"); static int s_verify_credentials(struct aws_credentials *credentials) { ASSERT_CURSOR_VALUE_STRING_EQUALS(aws_credentials_get_access_key_id(credentials), s_good_access_key_id); ASSERT_CURSOR_VALUE_STRING_EQUALS(aws_credentials_get_secret_access_key(credentials), s_good_secret_access_key); ASSERT_CURSOR_VALUE_STRING_EQUALS(aws_credentials_get_session_token(credentials), s_good_session_token); return AWS_OP_SUCCESS; } static int s_credentials_provider_imds_secure_success(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_imds_tester_init(allocator); struct aws_byte_cursor test_token_cursor = aws_byte_cursor_from_string(s_test_imds_token); aws_array_list_push_back(&s_tester.response_data_callbacks[0], &test_token_cursor); struct aws_byte_cursor test_role_cursor = aws_byte_cursor_from_string(s_test_role_response); aws_array_list_push_back(&s_tester.response_data_callbacks[1], &test_role_cursor); struct aws_byte_cursor good_response_cursor = aws_byte_cursor_from_string(s_good_response); aws_array_list_push_back(&s_tester.response_data_callbacks[2], &good_response_cursor); struct aws_credentials_provider_imds_options options = { .bootstrap = s_tester.bootstrap, .function_table = &s_mock_function_table, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_imds(allocator, &options); aws_credentials_provider_get_credentials(provider, s_get_credentials_callback, NULL); s_aws_wait_for_credentials_result(); ASSERT_TRUE(s_validate_uri_path_and_creds(3, true /*got creds*/) == 0); ASSERT_TRUE(s_tester.token_ttl_header_exist[0]); ASSERT_TRUE(s_tester.token_ttl_header_expected[0]); ASSERT_FALSE(s_tester.token_header_exist[0]); ASSERT_FALSE(s_tester.token_ttl_header_exist[1]); ASSERT_TRUE(s_tester.token_header_exist[1]); ASSERT_TRUE(s_tester.token_header_expected[1]); ASSERT_FALSE(s_tester.token_ttl_header_exist[2]); ASSERT_TRUE(s_tester.token_header_exist[2]); ASSERT_TRUE(s_tester.token_header_expected[2]); ASSERT_TRUE(s_tester.has_received_credentials_callback == true); ASSERT_SUCCESS(s_verify_credentials(s_tester.credentials)); aws_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); /* Because we mock the http connection manager, we never get a callback back from it */ struct aws_credentials_provider_imds_impl *impl = provider->impl; aws_mem_release(provider->allocator, impl->client); aws_mem_release(provider->allocator, provider); ASSERT_SUCCESS(s_aws_imds_tester_cleanup()); return 0; } AWS_TEST_CASE(credentials_provider_imds_secure_success, s_credentials_provider_imds_secure_success); static int s_credentials_provider_imds_connection_closed_success(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_imds_tester_init(allocator); s_tester.alternate_closed_connections = true; struct aws_byte_cursor test_token_cursor = aws_byte_cursor_from_string(s_test_imds_token); aws_array_list_push_back(&s_tester.response_data_callbacks[0], &test_token_cursor); /* this one will fail, replay the body. */ aws_array_list_push_back(&s_tester.response_data_callbacks[1], &test_token_cursor); struct aws_byte_cursor test_role_cursor = aws_byte_cursor_from_string(s_test_role_response); aws_array_list_push_back(&s_tester.response_data_callbacks[2], &test_role_cursor); struct aws_byte_cursor good_response_cursor = aws_byte_cursor_from_string(s_good_response); aws_array_list_push_back(&s_tester.response_data_callbacks[3], &good_response_cursor); /* this one will fail replay the body */ aws_array_list_push_back(&s_tester.response_data_callbacks[4], &good_response_cursor); struct aws_credentials_provider_imds_options options = { .bootstrap = s_tester.bootstrap, .function_table = &s_mock_function_table, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_imds(allocator, &options); aws_credentials_provider_get_credentials(provider, s_get_credentials_callback, NULL); s_aws_wait_for_credentials_result(); ASSERT_SUCCESS(s_verify_credentials(s_tester.credentials)); aws_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); /* Because we mock the http connection manager, we never get a callback back from it */ struct aws_credentials_provider_imds_impl *impl = provider->impl; aws_mem_release(provider->allocator, impl->client); aws_mem_release(provider->allocator, provider); ASSERT_SUCCESS(s_aws_imds_tester_cleanup()); return 0; } AWS_TEST_CASE( credentials_provider_imds_connection_closed_success, s_credentials_provider_imds_connection_closed_success); static int s_credentials_provider_imds_insecure_success(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_imds_tester_init(allocator); s_tester.response_code[0] = AWS_HTTP_STATUS_CODE_403_FORBIDDEN; struct aws_byte_cursor test_role_cursor = aws_byte_cursor_from_string(s_test_role_response); aws_array_list_push_back(&s_tester.response_data_callbacks[1], &test_role_cursor); struct aws_byte_cursor good_response_cursor = aws_byte_cursor_from_string(s_good_response); aws_array_list_push_back(&s_tester.response_data_callbacks[2], &good_response_cursor); struct aws_credentials_provider_imds_options options = { .bootstrap = s_tester.bootstrap, .function_table = &s_mock_function_table, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_imds(allocator, &options); aws_credentials_provider_get_credentials(provider, s_get_credentials_callback, NULL); s_aws_wait_for_credentials_result(); ASSERT_TRUE(s_validate_uri_path_and_creds(3, true /*no creds*/) == 0); ASSERT_TRUE(s_tester.token_ttl_header_exist[0]); ASSERT_TRUE(s_tester.token_ttl_header_expected[0]); ASSERT_FALSE(s_tester.token_header_exist[0]); ASSERT_FALSE(s_tester.token_ttl_header_exist[1]); ASSERT_FALSE(s_tester.token_header_exist[1]); ASSERT_FALSE(s_tester.token_ttl_header_exist[2]); ASSERT_FALSE(s_tester.token_header_exist[2]); ASSERT_SUCCESS(s_verify_credentials(s_tester.credentials)); aws_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); /* Because we mock the http connection manager, we never get a callback back from it */ struct aws_credentials_provider_imds_impl *impl = provider->impl; aws_mem_release(provider->allocator, impl->client); aws_mem_release(provider->allocator, provider); ASSERT_SUCCESS(s_aws_imds_tester_cleanup()); return 0; } AWS_TEST_CASE(credentials_provider_imds_insecure_success, s_credentials_provider_imds_insecure_success); static int s_credentials_provider_imds_insecure_then_secure_success(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_imds_tester_init(allocator); s_tester.token_request_idx = 1; s_tester.response_code[0] = AWS_HTTP_STATUS_CODE_401_UNAUTHORIZED; struct aws_byte_cursor test_token_cursor = aws_byte_cursor_from_string(s_test_imds_token); aws_array_list_push_back(&s_tester.response_data_callbacks[1], &test_token_cursor); struct aws_byte_cursor test_role_cursor = aws_byte_cursor_from_string(s_test_role_response); aws_array_list_push_back(&s_tester.response_data_callbacks[2], &test_role_cursor); struct aws_byte_cursor good_response_cursor = aws_byte_cursor_from_string(s_good_response); aws_array_list_push_back(&s_tester.response_data_callbacks[3], &good_response_cursor); struct aws_credentials_provider_imds_options options = { .bootstrap = s_tester.bootstrap, .function_table = &s_mock_function_table, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, .imds_version = IMDS_PROTOCOL_V1, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_imds(allocator, &options); aws_credentials_provider_get_credentials(provider, s_get_credentials_callback, NULL); s_aws_wait_for_credentials_result(); ASSERT_TRUE(s_validate_uri_path_and_creds(4, true /*no creds*/) == 0); ASSERT_FALSE(s_tester.token_ttl_header_exist[0]); ASSERT_FALSE(s_tester.token_header_exist[0]); ASSERT_TRUE(s_tester.token_ttl_header_exist[1]); ASSERT_TRUE(s_tester.token_ttl_header_expected[1]); ASSERT_FALSE(s_tester.token_header_exist[1]); ASSERT_FALSE(s_tester.token_ttl_header_exist[2]); ASSERT_TRUE(s_tester.token_header_exist[2]); ASSERT_TRUE(s_tester.token_header_expected[2]); ASSERT_FALSE(s_tester.token_ttl_header_exist[3]); ASSERT_TRUE(s_tester.token_header_exist[3]); ASSERT_TRUE(s_tester.token_header_expected[3]); ASSERT_SUCCESS(s_verify_credentials(s_tester.credentials)); aws_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); /* Because we mock the http connection manager, we never get a callback back from it */ struct aws_credentials_provider_imds_impl *impl = provider->impl; aws_mem_release(provider->allocator, impl->client); aws_mem_release(provider->allocator, provider); ASSERT_SUCCESS(s_aws_imds_tester_cleanup()); return 0; } AWS_TEST_CASE( credentials_provider_imds_insecure_then_secure_success, s_credentials_provider_imds_insecure_then_secure_success); AWS_STATIC_STRING_FROM_LITERAL(s_test_role_response_first_half, "test-"); AWS_STATIC_STRING_FROM_LITERAL(s_test_role_response_second_half, "role"); static int s_credentials_provider_imds_success_multi_part_role_name(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_imds_tester_init(allocator); struct aws_byte_cursor test_token_cursor = aws_byte_cursor_from_string(s_test_imds_token); aws_array_list_push_back(&s_tester.response_data_callbacks[0], &test_token_cursor); struct aws_byte_cursor test_role_cursor1 = aws_byte_cursor_from_string(s_test_role_response_first_half); struct aws_byte_cursor test_role_cursor2 = aws_byte_cursor_from_string(s_test_role_response_second_half); aws_array_list_push_back(&s_tester.response_data_callbacks[1], &test_role_cursor1); aws_array_list_push_back(&s_tester.response_data_callbacks[1], &test_role_cursor2); struct aws_byte_cursor good_response_cursor = aws_byte_cursor_from_string(s_good_response); aws_array_list_push_back(&s_tester.response_data_callbacks[2], &good_response_cursor); struct aws_credentials_provider_imds_options options = { .bootstrap = s_tester.bootstrap, .function_table = &s_mock_function_table, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_imds(allocator, &options); aws_credentials_provider_get_credentials(provider, s_get_credentials_callback, NULL); s_aws_wait_for_credentials_result(); s_validate_uri_path_and_creds(3, true); ASSERT_SUCCESS(s_verify_credentials(s_tester.credentials)); aws_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); /* Because we mock the http connection manager, we never get a callback back from it */ struct aws_credentials_provider_imds_impl *impl = provider->impl; aws_mem_release(provider->allocator, impl->client); aws_mem_release(provider->allocator, provider); ASSERT_SUCCESS(s_aws_imds_tester_cleanup()); return 0; } AWS_TEST_CASE( credentials_provider_imds_success_multi_part_role_name, s_credentials_provider_imds_success_multi_part_role_name); AWS_STATIC_STRING_FROM_LITERAL(s_good_response_first_part, "{\"AccessKeyId\":\"SuccessfulAccessKey\", \n \"Secret"); AWS_STATIC_STRING_FROM_LITERAL(s_good_response_second_part, "AccessKey\":\"SuccessfulSecr"); AWS_STATIC_STRING_FROM_LITERAL( s_good_response_third_part, "et\", \n \"Token\":\"TokenSuccess\"\n, \"Expiration\":\"2020-02-25T06:03:31Z\"}"); static int s_credentials_provider_imds_success_multi_part_doc(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_imds_tester_init(allocator); struct aws_byte_cursor test_token_cursor = aws_byte_cursor_from_string(s_test_imds_token); aws_array_list_push_back(&s_tester.response_data_callbacks[0], &test_token_cursor); struct aws_byte_cursor test_role_cursor = aws_byte_cursor_from_string(s_test_role_response); aws_array_list_push_back(&s_tester.response_data_callbacks[1], &test_role_cursor); struct aws_byte_cursor good_response_cursor1 = aws_byte_cursor_from_string(s_good_response_first_part); struct aws_byte_cursor good_response_cursor2 = aws_byte_cursor_from_string(s_good_response_second_part); struct aws_byte_cursor good_response_cursor3 = aws_byte_cursor_from_string(s_good_response_third_part); aws_array_list_push_back(&s_tester.response_data_callbacks[2], &good_response_cursor1); aws_array_list_push_back(&s_tester.response_data_callbacks[2], &good_response_cursor2); aws_array_list_push_back(&s_tester.response_data_callbacks[2], &good_response_cursor3); struct aws_credentials_provider_imds_options options = { .bootstrap = s_tester.bootstrap, .function_table = &s_mock_function_table, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_imds(allocator, &options); aws_credentials_provider_get_credentials(provider, s_get_credentials_callback, NULL); s_aws_wait_for_credentials_result(); s_validate_uri_path_and_creds(3, true); ASSERT_SUCCESS(s_verify_credentials(s_tester.credentials)); aws_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); /* Because we mock the http connection manager, we never get a callback back from it */ struct aws_credentials_provider_imds_impl *impl = provider->impl; aws_mem_release(provider->allocator, impl->client); aws_mem_release(provider->allocator, provider); ASSERT_SUCCESS(s_aws_imds_tester_cleanup()); return 0; } AWS_TEST_CASE(credentials_provider_imds_success_multi_part_doc, s_credentials_provider_imds_success_multi_part_doc); static int s_credentials_provider_imds_real_new_destroy(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_auth_library_init(allocator); struct aws_logger_standard_options logger_options = { .level = AWS_LOG_LEVEL_TRACE, .file = stderr, }; struct aws_logger logger; ASSERT_SUCCESS(aws_logger_init_standard(&logger, allocator, &logger_options)); aws_logger_set(&logger); s_aws_imds_tester_init(allocator); struct aws_host_resolver_default_options resolver_options = { .el_group = s_tester.el_group, .max_entries = 8, }; struct aws_host_resolver *resolver = aws_host_resolver_new_default(allocator, &resolver_options); struct aws_client_bootstrap_options bootstrap_options = { .event_loop_group = s_tester.el_group, .host_resolver = resolver, }; struct aws_client_bootstrap *bootstrap = aws_client_bootstrap_new(allocator, &bootstrap_options); struct aws_credentials_provider_imds_options options = { .bootstrap = bootstrap, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_imds(allocator, &options); aws_credentials_provider_get_credentials(provider, s_get_credentials_callback, NULL); s_aws_wait_for_credentials_result(); aws_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); aws_client_bootstrap_release(bootstrap); aws_host_resolver_release(resolver); ASSERT_SUCCESS(s_aws_imds_tester_cleanup()); aws_auth_library_clean_up(); aws_logger_set(NULL); aws_logger_clean_up(&logger); return 0; } AWS_TEST_CASE(credentials_provider_imds_real_new_destroy, s_credentials_provider_imds_real_new_destroy); static int s_credentials_provider_imds_real_success(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_auth_library_init(allocator); struct aws_logger_standard_options logger_options = { .level = AWS_LOG_LEVEL_TRACE, .file = stderr, }; struct aws_logger logger; ASSERT_SUCCESS(aws_logger_init_standard(&logger, allocator, &logger_options)); aws_logger_set(&logger); s_aws_imds_tester_init(allocator); struct aws_host_resolver_default_options resolver_options = { .el_group = s_tester.el_group, .max_entries = 8, }; struct aws_host_resolver *resolver = aws_host_resolver_new_default(allocator, &resolver_options); struct aws_client_bootstrap_options bootstrap_options = { .event_loop_group = s_tester.el_group, .host_resolver = resolver, }; struct aws_client_bootstrap *bootstrap = aws_client_bootstrap_new(allocator, &bootstrap_options); struct aws_credentials_provider_imds_options options = { .bootstrap = bootstrap, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_imds(allocator, &options); aws_credentials_provider_get_credentials(provider, s_get_credentials_callback, NULL); s_aws_wait_for_credentials_result(); ASSERT_TRUE(s_tester.credentials != NULL); aws_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); aws_client_bootstrap_release(bootstrap); aws_host_resolver_release(resolver); ASSERT_SUCCESS(s_aws_imds_tester_cleanup()); aws_auth_library_clean_up(); aws_logger_set(NULL); aws_logger_clean_up(&logger); return 0; } AWS_TEST_CASE(credentials_provider_imds_real_success, s_credentials_provider_imds_real_success); aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/credentials_provider_process_tests.c000066400000000000000000000464401456575232400306760ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include "shared_credentials_test_definitions.h" #include #include #include #include #include #include #include #include #include static struct aws_mock_process_tester { struct aws_mutex lock; struct aws_condition_variable signal; struct aws_credentials *credentials; bool has_received_credentials_callback; bool has_received_shutdown_callback; int error_code; } s_tester; static void s_on_shutdown_complete(void *user_data) { (void)user_data; aws_mutex_lock(&s_tester.lock); s_tester.has_received_shutdown_callback = true; aws_mutex_unlock(&s_tester.lock); aws_condition_variable_notify_one(&s_tester.signal); } static bool s_has_tester_received_shutdown_callback(void *user_data) { (void)user_data; return s_tester.has_received_shutdown_callback; } static void s_aws_wait_for_provider_shutdown_callback(void) { aws_mutex_lock(&s_tester.lock); aws_condition_variable_wait_pred(&s_tester.signal, &s_tester.lock, s_has_tester_received_shutdown_callback, NULL); aws_mutex_unlock(&s_tester.lock); } AWS_STATIC_STRING_FROM_LITERAL(s_credentials_process_profile, "foo"); static int s_aws_process_test_init_config_profile( struct aws_allocator *allocator, const struct aws_string *config_contents) { struct aws_string *config_file_path_str = aws_create_process_unique_file_name(allocator); ASSERT_TRUE(config_file_path_str != NULL); ASSERT_TRUE(aws_create_profile_file(config_file_path_str, config_contents) == AWS_OP_SUCCESS); ASSERT_TRUE( aws_set_environment_value(s_default_config_path_env_variable_name, config_file_path_str) == AWS_OP_SUCCESS); ASSERT_TRUE( aws_set_environment_value(s_default_profile_env_variable_name, s_credentials_process_profile) == AWS_OP_SUCCESS); aws_string_destroy(config_file_path_str); return AWS_OP_SUCCESS; } static int s_aws_process_tester_init(struct aws_allocator *allocator) { aws_auth_library_init(allocator); if (aws_mutex_init(&s_tester.lock)) { return AWS_OP_ERR; } if (aws_condition_variable_init(&s_tester.signal)) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } static void s_aws_process_tester_cleanup(void) { aws_condition_variable_clean_up(&s_tester.signal); aws_mutex_clean_up(&s_tester.lock); aws_credentials_release(s_tester.credentials); aws_auth_library_clean_up(); } static bool s_has_tester_received_credentials_callback(void *user_data) { (void)user_data; return s_tester.has_received_credentials_callback; } static void s_aws_wait_for_credentials_result(void) { aws_mutex_lock(&s_tester.lock); aws_condition_variable_wait_pred( &s_tester.signal, &s_tester.lock, s_has_tester_received_credentials_callback, NULL); aws_mutex_unlock(&s_tester.lock); } static void s_get_credentials_callback(struct aws_credentials *credentials, int error_code, void *user_data) { (void)user_data; aws_mutex_lock(&s_tester.lock); s_tester.has_received_credentials_callback = true; s_tester.credentials = credentials; s_tester.error_code = error_code; if (credentials != NULL) { aws_credentials_acquire(credentials); } aws_condition_variable_notify_one(&s_tester.signal); aws_mutex_unlock(&s_tester.lock); } #ifdef _WIN32 AWS_STATIC_STRING_FROM_LITERAL( s_test_command, "echo {\"Version\": 1, \"AccessKeyId\": \"AccessKey123\", " "\"SecretAccessKey\": \"SecretAccessKey321\", \"SessionToken\":\"TokenSuccess\", " "\"Expiration\":\"2020-02-25T06:03:31Z\"}"); #else AWS_STATIC_STRING_FROM_LITERAL( s_test_command, "echo '{\"Version\": 1, \"AccessKeyId\": \"AccessKey123\", " "\"SecretAccessKey\": \"SecretAccessKey321\", \"SessionToken\":\"TokenSuccess\", " "\"Expiration\":\"2020-02-25T06:03:31Z\"}'"); #endif #ifdef _WIN32 AWS_STATIC_STRING_FROM_LITERAL( s_test_command_without_token, "echo {\"Version\": 1, \"AccessKeyId\": \"AccessKey123\", " "\"SecretAccessKey\": \"SecretAccessKey321\", " "\"Expiration\":\"2020-02-25T06:03:31Z\"}"); #else AWS_STATIC_STRING_FROM_LITERAL( s_test_command_without_token, "echo '{\"Version\": 1, \"AccessKeyId\": \"AccessKey123\", " "\"SecretAccessKey\": \"SecretAccessKey321\", " "\"Expiration\":\"2020-02-25T06:03:31Z\"}'"); #endif AWS_STATIC_STRING_FROM_LITERAL(s_bad_test_command, "/i/dont/know/what/is/this/command"); AWS_STATIC_STRING_FROM_LITERAL(s_bad_command_output, "echo \"Hello, World!\""); AWS_STATIC_STRING_FROM_LITERAL(s_good_access_key_id, "AccessKey123"); AWS_STATIC_STRING_FROM_LITERAL(s_good_secret_access_key, "SecretAccessKey321"); AWS_STATIC_STRING_FROM_LITERAL(s_good_session_token, "TokenSuccess"); AWS_STATIC_STRING_FROM_LITERAL(s_good_expiration, "2020-02-25T06:03:31Z"); AWS_STATIC_STRING_FROM_LITERAL( s_process_config_file_contents, "[profile default]\n" "region=us-east-1\n" "[profile foo]\n" "region=us-west-2\n" "credential_process="); static int s_credentials_provider_process_helper( struct aws_string *config_file_contents, struct aws_allocator *allocator) { s_aws_process_tester_init(allocator); s_aws_process_test_init_config_profile(allocator, config_file_contents); struct aws_credentials_provider_process_options options = { .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, .profile_to_use = aws_byte_cursor_from_string(s_credentials_process_profile), }; struct aws_credentials_provider *provider = aws_credentials_provider_new_process(allocator, &options); aws_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); s_aws_process_tester_cleanup(); return 0; } static int s_credentials_provider_process_new_destroy_from_config(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_buf content_buf; struct aws_byte_buf existing_content = aws_byte_buf_from_c_str(aws_string_c_str(s_process_config_file_contents)); aws_byte_buf_init_copy(&content_buf, allocator, &existing_content); struct aws_byte_cursor cursor = aws_byte_cursor_from_string(s_test_command); ASSERT_TRUE(aws_byte_buf_append_dynamic(&content_buf, &cursor) == AWS_OP_SUCCESS); cursor = aws_byte_cursor_from_c_str("\n"); ASSERT_TRUE(aws_byte_buf_append_dynamic(&content_buf, &cursor) == AWS_OP_SUCCESS); struct aws_string *config_file_contents = aws_string_new_from_array(allocator, content_buf.buffer, content_buf.len); ASSERT_TRUE(config_file_contents != NULL); aws_byte_buf_clean_up(&content_buf); ASSERT_SUCCESS(s_credentials_provider_process_helper(config_file_contents, allocator)); aws_string_destroy(config_file_contents); return 0; } AWS_TEST_CASE( credentials_provider_process_new_destroy_from_config, s_credentials_provider_process_new_destroy_from_config); static int s_credentials_provider_process_new_destroy_from_config_without_token( struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_buf content_buf; struct aws_byte_buf existing_content = aws_byte_buf_from_c_str(aws_string_c_str(s_process_config_file_contents)); aws_byte_buf_init_copy(&content_buf, allocator, &existing_content); struct aws_byte_cursor cursor = aws_byte_cursor_from_string(s_test_command_without_token); ASSERT_TRUE(aws_byte_buf_append_dynamic(&content_buf, &cursor) == AWS_OP_SUCCESS); cursor = aws_byte_cursor_from_c_str("\n"); ASSERT_TRUE(aws_byte_buf_append_dynamic(&content_buf, &cursor) == AWS_OP_SUCCESS); struct aws_string *config_file_contents = aws_string_new_from_array(allocator, content_buf.buffer, content_buf.len); ASSERT_TRUE(config_file_contents != NULL); aws_byte_buf_clean_up(&content_buf); ASSERT_SUCCESS(s_credentials_provider_process_helper(config_file_contents, allocator)); aws_string_destroy(config_file_contents); return 0; } AWS_TEST_CASE( credentials_provider_process_new_destroy_from_config_without_token, s_credentials_provider_process_new_destroy_from_config_without_token); AWS_STATIC_STRING_FROM_LITERAL( s_process_config_file_no_process_contents, "[profile default]\n" "region=us-east-1\n" "[profile foo]\n" "region=us-west-2\n"); static int s_credentials_provider_process_new_failed(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_process_tester_init(allocator); s_aws_process_test_init_config_profile(allocator, s_process_config_file_no_process_contents); struct aws_credentials_provider_process_options options = { .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, .profile_to_use = aws_byte_cursor_from_string(s_credentials_process_profile), }; struct aws_credentials_provider *provider = aws_credentials_provider_new_process(allocator, &options); ASSERT_NULL(provider); s_aws_process_tester_cleanup(); return 0; } AWS_TEST_CASE(credentials_provider_process_new_failed, s_credentials_provider_process_new_failed); static int s_credentials_provider_process_bad_command(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_process_tester_init(allocator); struct aws_byte_buf content_buf; struct aws_byte_buf existing_content = aws_byte_buf_from_c_str(aws_string_c_str(s_process_config_file_contents)); aws_byte_buf_init_copy(&content_buf, allocator, &existing_content); struct aws_byte_cursor cursor = aws_byte_cursor_from_string(s_bad_test_command); ASSERT_TRUE(aws_byte_buf_append_dynamic(&content_buf, &cursor) == AWS_OP_SUCCESS); cursor = aws_byte_cursor_from_c_str("\n"); ASSERT_TRUE(aws_byte_buf_append_dynamic(&content_buf, &cursor) == AWS_OP_SUCCESS); struct aws_string *config_file_contents = aws_string_new_from_array(allocator, content_buf.buffer, content_buf.len); ASSERT_TRUE(config_file_contents != NULL); aws_byte_buf_clean_up(&content_buf); s_aws_process_test_init_config_profile(allocator, config_file_contents); aws_string_destroy(config_file_contents); struct aws_credentials_provider_process_options options = { .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, .profile_to_use = aws_byte_cursor_from_string(s_credentials_process_profile), }; struct aws_credentials_provider *provider = aws_credentials_provider_new_process(allocator, &options); ASSERT_NOT_NULL(provider); aws_credentials_provider_get_credentials(provider, s_get_credentials_callback, NULL); s_aws_wait_for_credentials_result(); ASSERT_TRUE(s_tester.has_received_credentials_callback == true); ASSERT_TRUE(s_tester.credentials == NULL); aws_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); s_aws_process_tester_cleanup(); return 0; } AWS_TEST_CASE(credentials_provider_process_bad_command, s_credentials_provider_process_bad_command); static int s_credentials_provider_process_incorrect_command_output(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_process_tester_init(allocator); struct aws_byte_buf content_buf; struct aws_byte_buf existing_content = aws_byte_buf_from_c_str(aws_string_c_str(s_process_config_file_contents)); aws_byte_buf_init_copy(&content_buf, allocator, &existing_content); struct aws_byte_cursor cursor = aws_byte_cursor_from_string(s_bad_command_output); ASSERT_TRUE(aws_byte_buf_append_dynamic(&content_buf, &cursor) == AWS_OP_SUCCESS); cursor = aws_byte_cursor_from_c_str("\n"); ASSERT_TRUE(aws_byte_buf_append_dynamic(&content_buf, &cursor) == AWS_OP_SUCCESS); struct aws_string *config_file_contents = aws_string_new_from_array(allocator, content_buf.buffer, content_buf.len); ASSERT_TRUE(config_file_contents != NULL); aws_byte_buf_clean_up(&content_buf); s_aws_process_test_init_config_profile(allocator, config_file_contents); aws_string_destroy(config_file_contents); struct aws_credentials_provider_process_options options = { .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, .profile_to_use = aws_byte_cursor_from_string(s_credentials_process_profile), }; struct aws_credentials_provider *provider = aws_credentials_provider_new_process(allocator, &options); ASSERT_NOT_NULL(provider); aws_credentials_provider_get_credentials(provider, s_get_credentials_callback, NULL); s_aws_wait_for_credentials_result(); ASSERT_TRUE(s_tester.has_received_credentials_callback == true); ASSERT_TRUE(s_tester.credentials == NULL); aws_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); s_aws_process_tester_cleanup(); return 0; } AWS_TEST_CASE( credentials_provider_process_incorrect_command_output, s_credentials_provider_process_incorrect_command_output); static int s_verify_credentials(struct aws_credentials *credentials) { ASSERT_NOT_NULL(credentials); ASSERT_CURSOR_VALUE_STRING_EQUALS(aws_credentials_get_access_key_id(credentials), s_good_access_key_id); ASSERT_CURSOR_VALUE_STRING_EQUALS(aws_credentials_get_secret_access_key(credentials), s_good_secret_access_key); ASSERT_CURSOR_VALUE_STRING_EQUALS(aws_credentials_get_session_token(credentials), s_good_session_token); struct aws_date_time expiration; struct aws_byte_cursor date_cursor = aws_byte_cursor_from_string(s_good_expiration); aws_date_time_init_from_str_cursor(&expiration, &date_cursor, AWS_DATE_FORMAT_ISO_8601); ASSERT_TRUE( aws_credentials_get_expiration_timepoint_seconds(s_tester.credentials) == (uint64_t)expiration.timestamp); return AWS_OP_SUCCESS; } static int s_credentials_provider_process_basic_success(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_process_tester_init(allocator); struct aws_byte_buf content_buf; struct aws_byte_buf existing_content = aws_byte_buf_from_c_str(aws_string_c_str(s_process_config_file_contents)); aws_byte_buf_init_copy(&content_buf, allocator, &existing_content); struct aws_byte_cursor cursor = aws_byte_cursor_from_string(s_test_command); ASSERT_TRUE(aws_byte_buf_append_dynamic(&content_buf, &cursor) == AWS_OP_SUCCESS); cursor = aws_byte_cursor_from_c_str("\n"); ASSERT_TRUE(aws_byte_buf_append_dynamic(&content_buf, &cursor) == AWS_OP_SUCCESS); struct aws_string *config_file_contents = aws_string_new_from_array(allocator, content_buf.buffer, content_buf.len); ASSERT_TRUE(config_file_contents != NULL); aws_byte_buf_clean_up(&content_buf); s_aws_process_test_init_config_profile(allocator, config_file_contents); aws_string_destroy(config_file_contents); struct aws_credentials_provider_process_options options = { .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, .profile_to_use = aws_byte_cursor_from_string(s_credentials_process_profile), }; struct aws_credentials_provider *provider = aws_credentials_provider_new_process(allocator, &options); aws_credentials_provider_get_credentials(provider, s_get_credentials_callback, NULL); s_aws_wait_for_credentials_result(); ASSERT_TRUE(s_tester.has_received_credentials_callback == true); ASSERT_SUCCESS(s_verify_credentials(s_tester.credentials)); aws_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); s_aws_process_tester_cleanup(); return 0; } AWS_TEST_CASE(credentials_provider_process_basic_success, s_credentials_provider_process_basic_success); static int s_credentials_provider_process_basic_success_cached(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_process_tester_init(allocator); struct aws_byte_buf content_buf; struct aws_byte_buf existing_content = aws_byte_buf_from_c_str(aws_string_c_str(s_process_config_file_contents)); aws_byte_buf_init_copy(&content_buf, allocator, &existing_content); struct aws_byte_cursor cursor = aws_byte_cursor_from_string(s_test_command); ASSERT_TRUE(aws_byte_buf_append_dynamic(&content_buf, &cursor) == AWS_OP_SUCCESS); cursor = aws_byte_cursor_from_c_str("\n"); ASSERT_TRUE(aws_byte_buf_append_dynamic(&content_buf, &cursor) == AWS_OP_SUCCESS); struct aws_string *config_file_contents = aws_string_new_from_array(allocator, content_buf.buffer, content_buf.len); ASSERT_TRUE(config_file_contents != NULL); aws_byte_buf_clean_up(&content_buf); s_aws_process_test_init_config_profile(allocator, config_file_contents); aws_string_destroy(config_file_contents); struct aws_profile_collection *profile_collection = NULL; struct aws_string *config_file_path; aws_get_environment_value(allocator, s_default_config_path_env_variable_name, &config_file_path); profile_collection = aws_profile_collection_new_from_file(allocator, config_file_path, AWS_PST_CONFIG); /* Update profile and config file */ aws_byte_buf_init_copy(&content_buf, allocator, &existing_content); cursor = aws_byte_cursor_from_string(s_bad_test_command); ASSERT_TRUE(aws_byte_buf_append_dynamic(&content_buf, &cursor) == AWS_OP_SUCCESS); cursor = aws_byte_cursor_from_c_str("\n"); ASSERT_TRUE(aws_byte_buf_append_dynamic(&content_buf, &cursor) == AWS_OP_SUCCESS); config_file_contents = aws_string_new_from_array(allocator, content_buf.buffer, content_buf.len); ASSERT_TRUE(config_file_contents != NULL); if (aws_create_profile_file(config_file_path, config_file_contents)) { return AWS_OP_ERR; } aws_string_destroy(config_file_contents); aws_byte_buf_clean_up(&content_buf); /* provider should used the cached credentials */ struct aws_credentials_provider_process_options options = { .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, .profile_to_use = aws_byte_cursor_from_string(s_credentials_process_profile), .config_profile_collection_cached = profile_collection, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_process(allocator, &options); aws_credentials_provider_get_credentials(provider, s_get_credentials_callback, NULL); s_aws_wait_for_credentials_result(); ASSERT_TRUE(s_tester.has_received_credentials_callback == true); ASSERT_SUCCESS(s_verify_credentials(s_tester.credentials)); aws_string_destroy(config_file_path); aws_profile_collection_release(profile_collection); aws_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); s_aws_process_tester_cleanup(); return 0; } AWS_TEST_CASE(credentials_provider_process_basic_success_cached, s_credentials_provider_process_basic_success_cached); aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/credentials_provider_sso_tests.c000066400000000000000000001163021456575232400300170ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include "credentials_provider_utils.h" #include "shared_credentials_test_definitions.h" #include #include #include #include AWS_STATIC_STRING_FROM_LITERAL(s_sso_profile, "sso"); static int s_aws_credentials_provider_sso_test_init_config_profile( struct aws_allocator *allocator, const struct aws_string *config_contents) { struct aws_string *config_file_path_str = aws_create_process_unique_file_name(allocator); ASSERT_TRUE(config_file_path_str != NULL); ASSERT_TRUE(aws_create_profile_file(config_file_path_str, config_contents) == AWS_OP_SUCCESS); ASSERT_TRUE( aws_set_environment_value(s_default_config_path_env_variable_name, config_file_path_str) == AWS_OP_SUCCESS); ASSERT_TRUE(aws_set_environment_value(s_default_profile_env_variable_name, s_sso_profile) == AWS_OP_SUCCESS); aws_string_destroy(config_file_path_str); return AWS_OP_SUCCESS; } /* start_url should be same in `s_sso_profile_start_url` and `s_sso_profile_config_contents` */ AWS_STATIC_STRING_FROM_LITERAL(s_sso_profile_start_url, "https://d-123.awsapps.com/start"); AWS_STATIC_STRING_FROM_LITERAL( s_sso_profile_config_contents, "[profile sso]\n" "sso_start_url = https://d-123.awsapps.com/start\n" "sso_region = us-west-2\n" "sso_account_id = 123\n" "sso_role_name = roleName\n"); /* session name should be same in both `s_sso_session_name` and `s_sso_session_config_contents`*/ AWS_STATIC_STRING_FROM_LITERAL(s_sso_session_name, "session"); AWS_STATIC_STRING_FROM_LITERAL( s_sso_session_config_contents, "[profile sso]\n" "sso_start_url = https://d-123.awsapps.com/start\n" "sso_region = us-west-2\n" "sso_account_id = 123\n" "sso_role_name = roleName\n" "sso_session = session\n" "[sso-session session]\n" "sso_start_url = https://d-123.awsapps.com/start\n" "sso_region = us-west-2\n"); AWS_STATIC_STRING_FROM_LITERAL( s_expected_sso_request_path, "/federation/credentials?account_id=123&role_name=roleName"); static int s_credentials_provider_sso_failed_invalid_config(struct aws_allocator *allocator, void *ctx) { (void)ctx; const struct { const char *name; const char *text; } invalid_config_examples[] = { {"empty", ""}, {"profile without any sso config", "[profile sso]\naccessKey=access"}, {"profile without role_name", "[profile sso]\n" "accessKey=access\n" "sso_start_url=https://d-123.awsapps.com/start\n" "sso_region=us-west-2\n"}, {"profile without account_id", "[profile sso]\n" "accessKey=access\n" "sso_start_url=https://d-123.awsapps.com/start\n" "sso_region=us-west-2\n" "sso_role_name=roleName\n"}, {"profile without region", "[profile sso]\n" "accessKey=access\n" "sso_start_url=https://d-123.awsapps.com/start\n" "sso_account_id=123\n" "sso_role_name=roleName\n"}, {"profile without start_url", "[profile sso]\n" "accessKey=access\n" "sso_region=us-west-2\n" "sso_account_id=123\n" "sso_role_name=roleName\n"}, {"profile with invalid session", "[profile sso]\n" "accessKey=access\n" "sso_start_url=https://d-123.awsapps.com/start\n" "sso_region=us-west-2\n" "sso_account_id=123\n" "sso_role_name=roleName\n" "sso_session = session\n" "[sso-session session]\n"}, {"session without start_url", "[profile sso]\n" "accessKey=access\n" "sso_start_url=https://d-123.awsapps.com/start\n" "sso_region=us-west-2\n" "sso_account_id=123\n" "sso_role_name=roleName\n" "sso_session = session\n" "[sso-session session]\n" "sso_region = us-west-2\n"}, {"session without region", "[profile sso]\n" "accessKey=access\n" "sso_start_url=https://d-123.awsapps.com/start\n" "sso_region=us-west-2\n" "sso_account_id=123\n" "sso_role_name=roleName\n" "sso_session = session\n" "[sso-session session]\n" "sso_start_url = https://d-123.awsapps.com/start\n"}, {"session with different region", "[profile sso]\n" "accessKey=access\n" "sso_start_url=https://d-123.awsapps.com/start\n" "sso_region=us-east-1\n" "sso_account_id=123\n" "sso_role_name=roleName\n" "sso_session = session\n" "[sso-session session]\n" "sso_start_url = https://d-123.awsapps.com/start\n" "sso_region = us-west-2\n"}, {"session with different start-url", "[profile sso]\n" "accessKey=access\n" "sso_start_url=https://d-123.awsapps.com/start\n" "sso_region=us-west-2\n" "sso_account_id=123\n" "sso_role_name=roleName\n" "sso_session = session\n" "[sso-session session]\n" "sso_start_url = https://d-321.awsapps.com/start\n" "sso_region = us-west-2\n"}, }; aws_credentials_provider_http_mock_tester_init(allocator); struct aws_credentials_provider_sso_options options = { .bootstrap = credentials_provider_http_mock_tester.bootstrap, .tls_ctx = credentials_provider_http_mock_tester.tls_ctx, .function_table = &aws_credentials_provider_http_mock_function_table, .shutdown_options = { .shutdown_callback = aws_credentials_provider_http_mock_on_shutdown_complete, .shutdown_user_data = NULL, }, }; for (int i = 0; i < AWS_ARRAY_SIZE(invalid_config_examples); i++) { printf("invalid config example [%d]: %s\n", i, invalid_config_examples[i].name); struct aws_string *content = aws_string_new_from_c_str(allocator, invalid_config_examples[i].text); ASSERT_TRUE(content != NULL); s_aws_credentials_provider_sso_test_init_config_profile(allocator, content); aws_string_destroy(content); struct aws_credentials_provider *provider = aws_credentials_provider_new_sso(allocator, &options); ASSERT_NULL(provider); } aws_credentials_provider_http_mock_tester_cleanup(); return 0; } AWS_TEST_CASE(credentials_provider_sso_failed_invalid_config, s_credentials_provider_sso_failed_invalid_config); static int s_credentials_provider_sso_create_destroy_valid_config(struct aws_allocator *allocator, void *ctx) { (void)ctx; const struct { const char *name; const char *text; } valid_config_examples[] = { {"profile", "[profile sso]\n" "accessKey=access\n" "sso_start_url=https://d-123.awsapps.com/start\n" "sso_account_id=123\n" "sso_region=us-west-2\n" "sso_role_name=roleName\n"}, {"session", "[profile sso]\n" "accessKey=access\n" "sso_account_id=123\n" "sso_role_name=roleName\n" "sso_session = session\n" "[sso-session session]\n" "sso_start_url = https://d-123.awsapps.com/start\n" "sso_region = us-west-2\n"}, {"session with profile", "[profile sso]\n" "accessKey=access\n" "sso_start_url=https://d-123.awsapps.com/start\n" "sso_region=us-west-2\n" "sso_account_id=123\n" "sso_role_name=roleName\n" "sso_session = session\n" "[sso-session session]\n" "sso_start_url = https://d-123.awsapps.com/start\n" "sso_region = us-west-2\n"}, }; aws_credentials_provider_http_mock_tester_init(allocator); struct aws_credentials_provider_sso_options options = { .bootstrap = credentials_provider_http_mock_tester.bootstrap, .tls_ctx = credentials_provider_http_mock_tester.tls_ctx, .function_table = &aws_credentials_provider_http_mock_function_table, .shutdown_options = { .shutdown_callback = aws_credentials_provider_http_mock_on_shutdown_complete, .shutdown_user_data = NULL, }, }; for (int i = 0; i < AWS_ARRAY_SIZE(valid_config_examples); i++) { printf("valid config example [%d]: %s\n", i, valid_config_examples[i].name); struct aws_string *content = aws_string_new_from_c_str(allocator, valid_config_examples[i].text); ASSERT_TRUE(content != NULL); s_aws_credentials_provider_sso_test_init_config_profile(allocator, content); aws_string_destroy(content); struct aws_credentials_provider *provider = aws_credentials_provider_new_sso(allocator, &options); ASSERT_NOT_NULL(provider); aws_credentials_provider_release(provider); } aws_credentials_provider_http_mock_tester_cleanup(); return 0; } AWS_TEST_CASE( credentials_provider_sso_create_destroy_valid_config, s_credentials_provider_sso_create_destroy_valid_config); AWS_STATIC_STRING_FROM_LITERAL( s_good_response, "{\"roleCredentials\": {\"accessKeyId\": \"SuccessfulAccessKey\",\"secretAccessKey\": " "\"SuccessfulSecret\",\"sessionToken\": \"SuccessfulToken\",\"expiration\": 1678574216000}}"); AWS_STATIC_STRING_FROM_LITERAL(s_good_access_key_id, "SuccessfulAccessKey"); AWS_STATIC_STRING_FROM_LITERAL(s_good_secret_access_key, "SuccessfulSecret"); AWS_STATIC_STRING_FROM_LITERAL(s_good_session_token, "SuccessfulToken"); static int s_good_response_expiration = 1678574216; static int s_verify_credentials(bool request_made, bool got_credentials, int expected_attempts) { ASSERT_TRUE(credentials_provider_http_mock_tester.has_received_credentials_callback); if (got_credentials) { ASSERT_TRUE(credentials_provider_http_mock_tester.credentials != NULL); ASSERT_CURSOR_VALUE_STRING_EQUALS( aws_credentials_get_access_key_id(credentials_provider_http_mock_tester.credentials), s_good_access_key_id); ASSERT_CURSOR_VALUE_STRING_EQUALS( aws_credentials_get_secret_access_key(credentials_provider_http_mock_tester.credentials), s_good_secret_access_key); ASSERT_CURSOR_VALUE_STRING_EQUALS( aws_credentials_get_session_token(credentials_provider_http_mock_tester.credentials), s_good_session_token); ASSERT_INT_EQUALS( aws_credentials_get_expiration_timepoint_seconds(credentials_provider_http_mock_tester.credentials), s_good_response_expiration); } else { ASSERT_TRUE(credentials_provider_http_mock_tester.error_code); ASSERT_TRUE(credentials_provider_http_mock_tester.credentials == NULL); } if (request_made) { ASSERT_CURSOR_VALUE_STRING_EQUALS( aws_byte_cursor_from_buf(&credentials_provider_http_mock_tester.request_path), s_expected_sso_request_path); } ASSERT_INT_EQUALS(credentials_provider_http_mock_tester.attempts, expected_attempts); return AWS_OP_SUCCESS; } static int s_credentials_provider_sso_connect_failure(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_credentials_provider_http_mock_tester_init(allocator); credentials_provider_http_mock_tester.is_connection_acquire_successful = false; s_aws_credentials_provider_sso_test_init_config_profile(allocator, s_sso_session_config_contents); struct aws_credentials_provider_sso_options options = { .bootstrap = credentials_provider_http_mock_tester.bootstrap, .tls_ctx = credentials_provider_http_mock_tester.tls_ctx, .function_table = &aws_credentials_provider_http_mock_function_table, .shutdown_options = { .shutdown_callback = aws_credentials_provider_http_mock_on_shutdown_complete, .shutdown_user_data = NULL, }, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_sso(allocator, &options); ASSERT_NOT_NULL(provider); aws_credentials_provider_get_credentials( provider, aws_credentials_provider_http_mock_get_credentials_callback, NULL); aws_credentials_provider_http_mock_wait_for_credentials_result(); ASSERT_SUCCESS(s_verify_credentials(false /*no request*/, false /*get creds*/, 0 /*expected attempts*/)); aws_credentials_provider_release(provider); aws_credentials_provider_http_mock_wait_for_shutdown_callback(); aws_credentials_provider_http_mock_tester_cleanup(); return 0; } AWS_TEST_CASE(credentials_provider_sso_connect_failure, s_credentials_provider_sso_connect_failure); static int s_credentials_provider_sso_failure_token_missing(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_credentials_provider_http_mock_tester_init(allocator); credentials_provider_http_mock_tester.is_request_successful = false; /* redirect $HOME */ struct aws_string *tmp_home; ASSERT_SUCCESS(aws_create_random_home_directory(allocator, &tmp_home)); s_aws_credentials_provider_sso_test_init_config_profile(allocator, s_sso_session_config_contents); struct aws_credentials_provider_sso_options options = { .bootstrap = credentials_provider_http_mock_tester.bootstrap, .tls_ctx = credentials_provider_http_mock_tester.tls_ctx, .function_table = &aws_credentials_provider_http_mock_function_table, .shutdown_options = { .shutdown_callback = aws_credentials_provider_http_mock_on_shutdown_complete, .shutdown_user_data = NULL, }, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_sso(allocator, &options); ASSERT_NOT_NULL(provider); aws_credentials_provider_get_credentials( provider, aws_credentials_provider_http_mock_get_credentials_callback, NULL); aws_credentials_provider_http_mock_wait_for_credentials_result(); ASSERT_SUCCESS(s_verify_credentials(false /*no request*/, false /*get creds*/, 0 /*expected attempts*/)); aws_credentials_provider_release(provider); aws_credentials_provider_http_mock_wait_for_shutdown_callback(); aws_credentials_provider_http_mock_tester_cleanup(); aws_directory_delete(tmp_home, true); aws_string_destroy(tmp_home); return 0; } AWS_TEST_CASE(credentials_provider_sso_failure_token_missing, s_credentials_provider_sso_failure_token_missing); AWS_STATIC_STRING_FROM_LITERAL( s_sso_token, "{\"accessToken\": \"ValidAccessToken\",\"expiresAt\": \"2015-03-12T05:35:19Z\"}"); static uint64_t s_sso_token_expiration_s = 1426138519; static int s_credentials_provider_sso_failure_token_expired(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_credentials_provider_http_mock_tester_init(allocator); credentials_provider_http_mock_tester.is_request_successful = false; /* redirect $HOME */ struct aws_string *tmp_home; ASSERT_SUCCESS(aws_create_random_home_directory(allocator, &tmp_home)); /* create token file */ struct aws_string *token_path = aws_construct_sso_token_path(allocator, s_sso_session_name); ASSERT_NOT_NULL(token_path); ASSERT_SUCCESS(aws_create_directory_components(allocator, token_path)); ASSERT_SUCCESS(aws_create_profile_file(token_path, s_sso_token)); s_aws_credentials_provider_sso_test_init_config_profile(allocator, s_sso_session_config_contents); uint64_t nano_expiration = aws_timestamp_convert(s_sso_token_expiration_s + 100, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL); mock_aws_set_system_time(nano_expiration); struct aws_credentials_provider_sso_options options = { .bootstrap = credentials_provider_http_mock_tester.bootstrap, .tls_ctx = credentials_provider_http_mock_tester.tls_ctx, .function_table = &aws_credentials_provider_http_mock_function_table, .shutdown_options = { .shutdown_callback = aws_credentials_provider_http_mock_on_shutdown_complete, .shutdown_user_data = NULL, }, .system_clock_fn = mock_aws_get_system_time, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_sso(allocator, &options); ASSERT_NOT_NULL(provider); aws_credentials_provider_get_credentials( provider, aws_credentials_provider_http_mock_get_credentials_callback, NULL); aws_credentials_provider_http_mock_wait_for_credentials_result(); ASSERT_SUCCESS(s_verify_credentials(false /*no request*/, false /*get creds*/, 0 /*expected attempts*/)); ASSERT_INT_EQUALS(credentials_provider_http_mock_tester.error_code, AWS_AUTH_SSO_TOKEN_EXPIRED); aws_credentials_provider_release(provider); aws_credentials_provider_http_mock_wait_for_shutdown_callback(); aws_credentials_provider_http_mock_tester_cleanup(); aws_directory_delete(tmp_home, true); aws_string_destroy(tmp_home); aws_string_destroy(token_path); return 0; } AWS_TEST_CASE(credentials_provider_sso_failure_token_expired, s_credentials_provider_sso_failure_token_expired); AWS_STATIC_STRING_FROM_LITERAL(s_sso_empty_token, "{\"accessToken\": \"\",\"expiresAt\": \"2015-03-12T05:35:19Z\"}"); static int s_credentials_provider_sso_failure_token_empty(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_credentials_provider_http_mock_tester_init(allocator); credentials_provider_http_mock_tester.is_request_successful = false; /* redirect $HOME */ struct aws_string *tmp_home; ASSERT_SUCCESS(aws_create_random_home_directory(allocator, &tmp_home)); /* create token file */ struct aws_string *token_path = aws_construct_sso_token_path(allocator, s_sso_session_name); ASSERT_NOT_NULL(token_path); ASSERT_SUCCESS(aws_create_directory_components(allocator, token_path)); ASSERT_SUCCESS(aws_create_profile_file(token_path, s_sso_empty_token)); s_aws_credentials_provider_sso_test_init_config_profile(allocator, s_sso_session_config_contents); mock_aws_set_system_time(0); struct aws_credentials_provider_sso_options options = { .bootstrap = credentials_provider_http_mock_tester.bootstrap, .tls_ctx = credentials_provider_http_mock_tester.tls_ctx, .function_table = &aws_credentials_provider_http_mock_function_table, .shutdown_options = { .shutdown_callback = aws_credentials_provider_http_mock_on_shutdown_complete, .shutdown_user_data = NULL, }, .system_clock_fn = mock_aws_get_system_time, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_sso(allocator, &options); ASSERT_NOT_NULL(provider); aws_credentials_provider_get_credentials( provider, aws_credentials_provider_http_mock_get_credentials_callback, NULL); aws_credentials_provider_http_mock_wait_for_credentials_result(); ASSERT_SUCCESS(s_verify_credentials(false /*no request*/, false /*get creds*/, 0 /*expected attempts*/)); ASSERT_INT_EQUALS(credentials_provider_http_mock_tester.error_code, AWS_ERROR_INVALID_ARGUMENT); aws_credentials_provider_release(provider); aws_credentials_provider_http_mock_wait_for_shutdown_callback(); aws_credentials_provider_http_mock_tester_cleanup(); aws_directory_delete(tmp_home, true); aws_string_destroy(tmp_home); aws_string_destroy(token_path); return 0; } AWS_TEST_CASE(credentials_provider_sso_failure_token_empty, s_credentials_provider_sso_failure_token_empty); static int s_credentials_provider_sso_request_failure(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_credentials_provider_http_mock_tester_init(allocator); credentials_provider_http_mock_tester.is_request_successful = false; credentials_provider_http_mock_tester.response_code = AWS_HTTP_STATUS_CODE_400_BAD_REQUEST; /* redirect $HOME */ struct aws_string *tmp_home; ASSERT_SUCCESS(aws_create_random_home_directory(allocator, &tmp_home)); /* create token file */ struct aws_string *token_path = aws_construct_sso_token_path(allocator, s_sso_session_name); ASSERT_NOT_NULL(token_path); ASSERT_SUCCESS(aws_create_directory_components(allocator, token_path)); ASSERT_SUCCESS(aws_create_profile_file(token_path, s_sso_token)); s_aws_credentials_provider_sso_test_init_config_profile(allocator, s_sso_session_config_contents); mock_aws_set_system_time(0); struct aws_credentials_provider_sso_options options = { .bootstrap = credentials_provider_http_mock_tester.bootstrap, .tls_ctx = credentials_provider_http_mock_tester.tls_ctx, .function_table = &aws_credentials_provider_http_mock_function_table, .shutdown_options = { .shutdown_callback = aws_credentials_provider_http_mock_on_shutdown_complete, .shutdown_user_data = NULL, }, .system_clock_fn = mock_aws_get_system_time, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_sso(allocator, &options); ASSERT_NOT_NULL(provider); aws_credentials_provider_get_credentials( provider, aws_credentials_provider_http_mock_get_credentials_callback, NULL); aws_credentials_provider_http_mock_wait_for_credentials_result(); ASSERT_SUCCESS(s_verify_credentials(true /*request made*/, false /*get creds*/, 1 /*expected attempts*/)); aws_credentials_provider_release(provider); aws_credentials_provider_http_mock_wait_for_shutdown_callback(); aws_credentials_provider_http_mock_tester_cleanup(); aws_directory_delete(tmp_home, true); aws_string_destroy(tmp_home); aws_string_destroy(token_path); return 0; } AWS_TEST_CASE(credentials_provider_sso_request_failure, s_credentials_provider_sso_request_failure); AWS_STATIC_STRING_FROM_LITERAL(s_bad_json_response, "{ \"accessKey\": \"bad\"}"); static int s_credentials_provider_sso_bad_response(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_credentials_provider_http_mock_tester_init(allocator); /* redirect $HOME */ struct aws_string *tmp_home; ASSERT_SUCCESS(aws_create_random_home_directory(allocator, &tmp_home)); /* create token file */ struct aws_string *token_path = aws_construct_sso_token_path(allocator, s_sso_session_name); ASSERT_NOT_NULL(token_path); ASSERT_SUCCESS(aws_create_directory_components(allocator, token_path)); ASSERT_SUCCESS(aws_create_profile_file(token_path, s_sso_token)); s_aws_credentials_provider_sso_test_init_config_profile(allocator, s_sso_session_config_contents); struct aws_byte_cursor bad_json_cursor = aws_byte_cursor_from_string(s_bad_json_response); aws_array_list_push_back(&credentials_provider_http_mock_tester.response_data_callbacks, &bad_json_cursor); mock_aws_set_system_time(0); struct aws_credentials_provider_sso_options options = { .bootstrap = credentials_provider_http_mock_tester.bootstrap, .tls_ctx = credentials_provider_http_mock_tester.tls_ctx, .function_table = &aws_credentials_provider_http_mock_function_table, .shutdown_options = { .shutdown_callback = aws_credentials_provider_http_mock_on_shutdown_complete, .shutdown_user_data = NULL, }, .system_clock_fn = mock_aws_get_system_time, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_sso(allocator, &options); ASSERT_NOT_NULL(provider); aws_credentials_provider_get_credentials( provider, aws_credentials_provider_http_mock_get_credentials_callback, NULL); aws_credentials_provider_http_mock_wait_for_credentials_result(); ASSERT_SUCCESS(s_verify_credentials(true /*request made*/, false /*get creds*/, 1 /*expected attempts*/)); aws_credentials_provider_release(provider); aws_credentials_provider_http_mock_wait_for_shutdown_callback(); aws_credentials_provider_http_mock_tester_cleanup(); aws_directory_delete(tmp_home, true); aws_string_destroy(tmp_home); aws_string_destroy(token_path); return 0; } AWS_TEST_CASE(credentials_provider_sso_bad_response, s_credentials_provider_sso_bad_response); static int s_credentials_provider_sso_retryable_error(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_credentials_provider_http_mock_tester_init(allocator); credentials_provider_http_mock_tester.response_code = AWS_HTTP_STATUS_CODE_500_INTERNAL_SERVER_ERROR; /* redirect $HOME */ struct aws_string *tmp_home; ASSERT_SUCCESS(aws_create_random_home_directory(allocator, &tmp_home)); /* create token file */ struct aws_string *token_path = aws_construct_sso_token_path(allocator, s_sso_session_name); ASSERT_NOT_NULL(token_path); ASSERT_SUCCESS(aws_create_directory_components(allocator, token_path)); ASSERT_SUCCESS(aws_create_profile_file(token_path, s_sso_token)); s_aws_credentials_provider_sso_test_init_config_profile(allocator, s_sso_session_config_contents); struct aws_byte_cursor bad_json_cursor = aws_byte_cursor_from_string(s_bad_json_response); aws_array_list_push_back(&credentials_provider_http_mock_tester.response_data_callbacks, &bad_json_cursor); mock_aws_set_system_time(0); struct aws_credentials_provider_sso_options options = { .bootstrap = credentials_provider_http_mock_tester.bootstrap, .tls_ctx = credentials_provider_http_mock_tester.tls_ctx, .function_table = &aws_credentials_provider_http_mock_function_table, .shutdown_options = { .shutdown_callback = aws_credentials_provider_http_mock_on_shutdown_complete, .shutdown_user_data = NULL, }, .system_clock_fn = mock_aws_get_system_time, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_sso(allocator, &options); ASSERT_NOT_NULL(provider); aws_credentials_provider_get_credentials( provider, aws_credentials_provider_http_mock_get_credentials_callback, NULL); aws_credentials_provider_http_mock_wait_for_credentials_result(); ASSERT_SUCCESS(s_verify_credentials(true /*request made*/, false /*get creds*/, 4 /*expected attempts*/)); aws_credentials_provider_release(provider); aws_credentials_provider_http_mock_wait_for_shutdown_callback(); aws_credentials_provider_http_mock_tester_cleanup(); aws_directory_delete(tmp_home, true); aws_string_destroy(tmp_home); aws_string_destroy(token_path); return 0; } AWS_TEST_CASE(credentials_provider_sso_retryable_error, s_credentials_provider_sso_retryable_error); static int s_credentials_provider_sso_basic_success(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_credentials_provider_http_mock_tester_init(allocator); /* redirect $HOME */ struct aws_string *tmp_home; ASSERT_SUCCESS(aws_create_random_home_directory(allocator, &tmp_home)); /* create token file */ struct aws_string *token_path = aws_construct_sso_token_path(allocator, s_sso_session_name); ASSERT_NOT_NULL(token_path); ASSERT_SUCCESS(aws_create_directory_components(allocator, token_path)); ASSERT_SUCCESS(aws_create_profile_file(token_path, s_sso_token)); s_aws_credentials_provider_sso_test_init_config_profile(allocator, s_sso_session_config_contents); /* set the response */ struct aws_byte_cursor good_response_cursor = aws_byte_cursor_from_string(s_good_response); aws_array_list_push_back(&credentials_provider_http_mock_tester.response_data_callbacks, &good_response_cursor); mock_aws_set_system_time(0); struct aws_credentials_provider_sso_options options = { .bootstrap = credentials_provider_http_mock_tester.bootstrap, .tls_ctx = credentials_provider_http_mock_tester.tls_ctx, .function_table = &aws_credentials_provider_http_mock_function_table, .shutdown_options = { .shutdown_callback = aws_credentials_provider_http_mock_on_shutdown_complete, .shutdown_user_data = NULL, }, .system_clock_fn = mock_aws_get_system_time, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_sso(allocator, &options); ASSERT_NOT_NULL(provider); aws_credentials_provider_get_credentials( provider, aws_credentials_provider_http_mock_get_credentials_callback, NULL); aws_credentials_provider_http_mock_wait_for_credentials_result(); ASSERT_SUCCESS(s_verify_credentials(true /*request made*/, true /*get creds*/, 1 /*expected attempts*/)); aws_credentials_provider_release(provider); aws_credentials_provider_http_mock_wait_for_shutdown_callback(); aws_credentials_provider_http_mock_tester_cleanup(); aws_directory_delete(tmp_home, true); aws_string_destroy(tmp_home); aws_string_destroy(token_path); return 0; } AWS_TEST_CASE(credentials_provider_sso_basic_success, s_credentials_provider_sso_basic_success); AWS_STATIC_STRING_FROM_LITERAL(s_invalid_config, "invalid config"); static int s_credentials_provider_sso_basic_success_cached_config_file(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_credentials_provider_http_mock_tester_init(allocator); /* redirect $HOME */ struct aws_string *tmp_home; ASSERT_SUCCESS(aws_create_random_home_directory(allocator, &tmp_home)); /* create token file */ struct aws_string *token_path = aws_construct_sso_token_path(allocator, s_sso_session_name); ASSERT_NOT_NULL(token_path); ASSERT_SUCCESS(aws_create_directory_components(allocator, token_path)); ASSERT_SUCCESS(aws_create_profile_file(token_path, s_sso_token)); s_aws_credentials_provider_sso_test_init_config_profile(allocator, s_invalid_config); struct aws_byte_buf profile_buffer = aws_byte_buf_from_c_str(aws_string_c_str(s_sso_session_config_contents)); struct aws_profile_collection *config_collection = aws_profile_collection_new_from_buffer(allocator, &profile_buffer, AWS_PST_CONFIG); /* set the response */ struct aws_byte_cursor good_response_cursor = aws_byte_cursor_from_string(s_good_response); aws_array_list_push_back(&credentials_provider_http_mock_tester.response_data_callbacks, &good_response_cursor); mock_aws_set_system_time(0); struct aws_credentials_provider_sso_options options = { .bootstrap = credentials_provider_http_mock_tester.bootstrap, .tls_ctx = credentials_provider_http_mock_tester.tls_ctx, .function_table = &aws_credentials_provider_http_mock_function_table, .config_file_cached = config_collection, .shutdown_options = { .shutdown_callback = aws_credentials_provider_http_mock_on_shutdown_complete, .shutdown_user_data = NULL, }, .system_clock_fn = mock_aws_get_system_time, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_sso(allocator, &options); ASSERT_NOT_NULL(provider); aws_credentials_provider_get_credentials( provider, aws_credentials_provider_http_mock_get_credentials_callback, NULL); aws_credentials_provider_http_mock_wait_for_credentials_result(); ASSERT_SUCCESS(s_verify_credentials(true /*request made*/, true /*get creds*/, 1 /*expected attempts*/)); aws_credentials_provider_release(provider); aws_credentials_provider_http_mock_wait_for_shutdown_callback(); aws_credentials_provider_http_mock_tester_cleanup(); aws_directory_delete(tmp_home, true); aws_string_destroy(tmp_home); aws_string_destroy(token_path); aws_profile_collection_release(config_collection); return 0; } AWS_TEST_CASE( credentials_provider_sso_basic_success_cached_config_file, s_credentials_provider_sso_basic_success_cached_config_file); static int s_credentials_provider_sso_basic_success_profile(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_credentials_provider_http_mock_tester_init(allocator); /* redirect $HOME */ struct aws_string *tmp_home; ASSERT_SUCCESS(aws_create_random_home_directory(allocator, &tmp_home)); /* create token file */ struct aws_string *token_path = aws_construct_sso_token_path(allocator, s_sso_profile_start_url); ASSERT_NOT_NULL(token_path); ASSERT_SUCCESS(aws_create_directory_components(allocator, token_path)); ASSERT_SUCCESS(aws_create_profile_file(token_path, s_sso_token)); s_aws_credentials_provider_sso_test_init_config_profile(allocator, s_sso_profile_config_contents); /* set the response */ struct aws_byte_cursor good_response_cursor = aws_byte_cursor_from_string(s_good_response); aws_array_list_push_back(&credentials_provider_http_mock_tester.response_data_callbacks, &good_response_cursor); mock_aws_set_system_time(0); struct aws_credentials_provider_sso_options options = { .bootstrap = credentials_provider_http_mock_tester.bootstrap, .tls_ctx = credentials_provider_http_mock_tester.tls_ctx, .function_table = &aws_credentials_provider_http_mock_function_table, .shutdown_options = { .shutdown_callback = aws_credentials_provider_http_mock_on_shutdown_complete, .shutdown_user_data = NULL, }, .system_clock_fn = mock_aws_get_system_time, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_sso(allocator, &options); ASSERT_NOT_NULL(provider); aws_credentials_provider_get_credentials( provider, aws_credentials_provider_http_mock_get_credentials_callback, NULL); aws_credentials_provider_http_mock_wait_for_credentials_result(); ASSERT_SUCCESS(s_verify_credentials(true /*request made*/, true /*get creds*/, 1 /*expected attempts*/)); aws_credentials_provider_release(provider); aws_credentials_provider_http_mock_wait_for_shutdown_callback(); aws_credentials_provider_http_mock_tester_cleanup(); aws_directory_delete(tmp_home, true); aws_string_destroy(tmp_home); aws_string_destroy(token_path); return 0; } AWS_TEST_CASE(credentials_provider_sso_basic_success_profile, s_credentials_provider_sso_basic_success_profile); static int s_credentials_provider_sso_basic_success_profile_cached_config_file( struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_credentials_provider_http_mock_tester_init(allocator); /* redirect $HOME */ struct aws_string *tmp_home; ASSERT_SUCCESS(aws_create_random_home_directory(allocator, &tmp_home)); /* create token file */ struct aws_string *token_path = aws_construct_sso_token_path(allocator, s_sso_profile_start_url); ASSERT_NOT_NULL(token_path); ASSERT_SUCCESS(aws_create_directory_components(allocator, token_path)); ASSERT_SUCCESS(aws_create_profile_file(token_path, s_sso_token)); s_aws_credentials_provider_sso_test_init_config_profile(allocator, s_invalid_config); struct aws_byte_buf profile_buffer = aws_byte_buf_from_c_str(aws_string_c_str(s_sso_profile_config_contents)); struct aws_profile_collection *config_collection = aws_profile_collection_new_from_buffer(allocator, &profile_buffer, AWS_PST_CONFIG); /* set the response */ struct aws_byte_cursor good_response_cursor = aws_byte_cursor_from_string(s_good_response); aws_array_list_push_back(&credentials_provider_http_mock_tester.response_data_callbacks, &good_response_cursor); mock_aws_set_system_time(0); struct aws_credentials_provider_sso_options options = { .bootstrap = credentials_provider_http_mock_tester.bootstrap, .tls_ctx = credentials_provider_http_mock_tester.tls_ctx, .config_file_cached = config_collection, .function_table = &aws_credentials_provider_http_mock_function_table, .shutdown_options = { .shutdown_callback = aws_credentials_provider_http_mock_on_shutdown_complete, .shutdown_user_data = NULL, }, .system_clock_fn = mock_aws_get_system_time, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_sso(allocator, &options); ASSERT_NOT_NULL(provider); aws_credentials_provider_get_credentials( provider, aws_credentials_provider_http_mock_get_credentials_callback, NULL); aws_credentials_provider_http_mock_wait_for_credentials_result(); ASSERT_SUCCESS(s_verify_credentials(true /*request made*/, true /*get creds*/, 1 /*expected attempts*/)); aws_credentials_provider_release(provider); aws_credentials_provider_http_mock_wait_for_shutdown_callback(); aws_credentials_provider_http_mock_tester_cleanup(); aws_profile_collection_release(config_collection); aws_directory_delete(tmp_home, true); aws_string_destroy(tmp_home); aws_string_destroy(token_path); return 0; } AWS_TEST_CASE( credentials_provider_sso_basic_success_profile_cached_config_file, s_credentials_provider_sso_basic_success_profile_cached_config_file); static int s_credentials_provider_sso_basic_success_after_failure(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_credentials_provider_http_mock_tester_init(allocator); credentials_provider_http_mock_tester.failure_count = 2; credentials_provider_http_mock_tester.failure_response_code = AWS_HTTP_STATUS_CODE_500_INTERNAL_SERVER_ERROR; /* redirect $HOME */ struct aws_string *tmp_home; ASSERT_SUCCESS(aws_create_random_home_directory(allocator, &tmp_home)); /* create token file */ struct aws_string *token_path = aws_construct_sso_token_path(allocator, s_sso_session_name); ASSERT_NOT_NULL(token_path); ASSERT_SUCCESS(aws_create_directory_components(allocator, token_path)); ASSERT_SUCCESS(aws_create_profile_file(token_path, s_sso_token)); s_aws_credentials_provider_sso_test_init_config_profile(allocator, s_sso_session_config_contents); /* set the response */ struct aws_byte_cursor good_response_cursor = aws_byte_cursor_from_string(s_good_response); aws_array_list_push_back(&credentials_provider_http_mock_tester.response_data_callbacks, &good_response_cursor); mock_aws_set_system_time(0); struct aws_credentials_provider_sso_options options = { .bootstrap = credentials_provider_http_mock_tester.bootstrap, .tls_ctx = credentials_provider_http_mock_tester.tls_ctx, .function_table = &aws_credentials_provider_http_mock_function_table, .shutdown_options = { .shutdown_callback = aws_credentials_provider_http_mock_on_shutdown_complete, .shutdown_user_data = NULL, }, .system_clock_fn = mock_aws_get_system_time, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_sso(allocator, &options); ASSERT_NOT_NULL(provider); aws_credentials_provider_get_credentials( provider, aws_credentials_provider_http_mock_get_credentials_callback, NULL); aws_credentials_provider_http_mock_wait_for_credentials_result(); ASSERT_SUCCESS(s_verify_credentials(true /*request made*/, true /*get creds*/, 3 /*expected attempts*/)); aws_credentials_provider_release(provider); aws_credentials_provider_http_mock_wait_for_shutdown_callback(); aws_credentials_provider_http_mock_tester_cleanup(); aws_directory_delete(tmp_home, true); aws_string_destroy(tmp_home); aws_string_destroy(token_path); return 0; } AWS_TEST_CASE( credentials_provider_sso_basic_success_after_failure, s_credentials_provider_sso_basic_success_after_failure); aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/credentials_provider_sts_tests.c000066400000000000000000001726521456575232400300360ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "aws/common/byte_buf.h" #include "credentials_provider_utils.h" #include "shared_credentials_test_definitions.h" struct aws_mock_http_request { struct aws_byte_buf path; struct aws_byte_buf method; struct aws_byte_buf host_header; struct aws_byte_buf body; bool had_auth_header; int response_code; }; #define MAX_REQUEST 10 struct aws_mock_sts_tester { struct aws_allocator *allocator; struct aws_mock_http_request mocked_requests[MAX_REQUEST]; int num_request; int mock_response_code; int mock_failure_code; size_t fail_operations; struct aws_byte_buf mock_body; struct aws_mutex lock; struct aws_condition_variable signal; struct aws_credentials *credentials; bool has_received_credentials_callback; int error_code; bool fail_connection; struct aws_event_loop_group *el_group; struct aws_host_resolver *resolver; struct aws_client_bootstrap *bootstrap; struct aws_tls_ctx *tls_ctx; }; static struct aws_mock_sts_tester s_tester; static struct aws_http_connection_manager *s_aws_http_connection_manager_new_mock( struct aws_allocator *allocator, const struct aws_http_connection_manager_options *options) { (void)allocator; (void)options; return (struct aws_http_connection_manager *)1; } static void s_aws_http_connection_manager_release_mock(struct aws_http_connection_manager *manager) { (void)manager; } static void s_aws_http_connection_manager_acquire_connection_mock( struct aws_http_connection_manager *manager, aws_http_connection_manager_on_connection_setup_fn *callback, void *user_data) { (void)manager; (void)callback; (void)user_data; if (!s_tester.fail_connection) { callback((struct aws_http_connection *)1, AWS_OP_SUCCESS, user_data); } else { aws_raise_error(AWS_ERROR_HTTP_UNKNOWN); callback(NULL, AWS_OP_ERR, user_data); } } static int s_aws_http_connection_manager_release_connection_mock( struct aws_http_connection_manager *manager, struct aws_http_connection *connection) { (void)manager; (void)connection; return AWS_OP_SUCCESS; } static void s_invoke_mock_request_callbacks( const struct aws_http_make_request_options *options, bool is_request_successful) { struct aws_http_header headers[1]; AWS_ZERO_ARRAY(headers); headers[0].name = aws_byte_cursor_from_c_str("some-header"); headers[0].value = aws_byte_cursor_from_c_str("value"); if (options->on_response_headers) { options->on_response_headers( (struct aws_http_stream *)1, AWS_HTTP_HEADER_BLOCK_MAIN, headers, 1, options->user_data); } if (options->on_response_header_block_done) { options->on_response_header_block_done((struct aws_http_stream *)1, true, options->user_data); } struct aws_byte_cursor data_callback_cur = aws_byte_cursor_from_buf(&s_tester.mock_body); options->on_response_body((struct aws_http_stream *)1, &data_callback_cur, options->user_data); options->on_complete( (struct aws_http_stream *)1, is_request_successful ? AWS_ERROR_SUCCESS : AWS_ERROR_HTTP_UNKNOWN, options->user_data); } static struct aws_http_stream *s_aws_http_connection_make_request_mock( struct aws_http_connection *client_connection, const struct aws_http_make_request_options *options) { (void)client_connection; (void)options; struct aws_mock_http_request *mocked_request = &s_tester.mocked_requests[s_tester.num_request++]; AWS_ZERO_STRUCT(*mocked_request); struct aws_byte_cursor path; AWS_ZERO_STRUCT(path); aws_http_message_get_request_path(options->request, &path); aws_byte_buf_clean_up(&mocked_request->path); aws_byte_buf_init_copy_from_cursor(&mocked_request->path, s_tester.allocator, path); struct aws_byte_cursor method; AWS_ZERO_STRUCT(method); aws_http_message_get_request_method(options->request, &method); aws_byte_buf_clean_up(&mocked_request->method); aws_byte_buf_init_copy_from_cursor(&mocked_request->method, s_tester.allocator, method); size_t header_count = aws_http_message_get_header_count(options->request); for (size_t i = 0; i < header_count; ++i) { struct aws_http_header header; AWS_ZERO_STRUCT(header); aws_http_message_get_header(options->request, &header, i); if (aws_byte_cursor_eq_c_str_ignore_case(&header.name, "host")) { aws_byte_buf_clean_up(&mocked_request->host_header); aws_byte_buf_init_copy_from_cursor(&mocked_request->host_header, s_tester.allocator, header.value); } if (aws_byte_cursor_eq_c_str_ignore_case(&header.name, "authorization")) { mocked_request->had_auth_header = true; } } struct aws_input_stream *input_stream = aws_http_message_get_body_stream(options->request); int64_t body_len = 0; aws_input_stream_get_length(input_stream, &body_len); aws_byte_buf_clean_up(&mocked_request->body); aws_byte_buf_init(&mocked_request->body, s_tester.allocator, (size_t)body_len); aws_input_stream_read(input_stream, &mocked_request->body); bool fail_request = false; if (s_tester.fail_operations) { fail_request = true; s_tester.fail_operations--; mocked_request->response_code = s_tester.mock_failure_code; } else { mocked_request->response_code = s_tester.mock_response_code; } s_invoke_mock_request_callbacks(options, !fail_request); return (struct aws_http_stream *)1; } static int s_aws_http_stream_get_incoming_response_status_mock( const struct aws_http_stream *stream, int *out_status_code) { (void)stream; *out_status_code = s_tester.mocked_requests[s_tester.num_request - 1].response_code; return AWS_OP_SUCCESS; } static int s_aws_http_stream_activate_mock(struct aws_http_stream *stream) { (void)stream; return AWS_OP_SUCCESS; } static void s_aws_http_stream_release_mock(struct aws_http_stream *stream) { (void)stream; } static void s_aws_http_connection_close_mock(struct aws_http_connection *connection) { (void)connection; } static struct aws_auth_http_system_vtable s_mock_function_table = { .aws_http_connection_manager_new = s_aws_http_connection_manager_new_mock, .aws_http_connection_manager_release = s_aws_http_connection_manager_release_mock, .aws_http_connection_manager_acquire_connection = s_aws_http_connection_manager_acquire_connection_mock, .aws_http_connection_manager_release_connection = s_aws_http_connection_manager_release_connection_mock, .aws_http_connection_make_request = s_aws_http_connection_make_request_mock, .aws_http_stream_activate = s_aws_http_stream_activate_mock, .aws_http_stream_get_incoming_response_status = s_aws_http_stream_get_incoming_response_status_mock, .aws_http_stream_release = s_aws_http_stream_release_mock, .aws_http_connection_close = s_aws_http_connection_close_mock}; static int s_aws_sts_tester_init(struct aws_allocator *allocator) { AWS_ZERO_STRUCT(s_tester); s_tester.allocator = allocator; aws_auth_library_init(allocator); if (aws_mutex_init(&s_tester.lock)) { return AWS_OP_ERR; } if (aws_condition_variable_init(&s_tester.signal)) { return AWS_OP_ERR; } s_tester.el_group = aws_event_loop_group_new_default(allocator, 0, NULL); struct aws_host_resolver_default_options resolver_options = { .el_group = s_tester.el_group, .max_entries = 8, }; s_tester.resolver = aws_host_resolver_new_default(allocator, &resolver_options); struct aws_client_bootstrap_options bootstrap_options = { .event_loop_group = s_tester.el_group, .host_resolver = s_tester.resolver, }; s_tester.bootstrap = aws_client_bootstrap_new(allocator, &bootstrap_options); struct aws_tls_ctx_options tls_options; aws_tls_ctx_options_init_default_client(&tls_options, allocator); s_tester.tls_ctx = aws_tls_client_ctx_new(allocator, &tls_options); ASSERT_NOT_NULL(s_tester.tls_ctx); aws_tls_ctx_options_clean_up(&tls_options); return AWS_OP_SUCCESS; } static void s_cleanup_creds_callback_data(void) { aws_mutex_lock(&s_tester.lock); s_tester.has_received_credentials_callback = false; if (s_tester.credentials) { aws_credentials_release(s_tester.credentials); s_tester.credentials = NULL; } for (int i = 0; i < MAX_REQUEST; i++) { aws_byte_buf_clean_up(&s_tester.mocked_requests[i].path); aws_byte_buf_clean_up(&s_tester.mocked_requests[i].method); aws_byte_buf_clean_up(&s_tester.mocked_requests[i].host_header); aws_byte_buf_clean_up(&s_tester.mocked_requests[i].body); } aws_mutex_unlock(&s_tester.lock); } static int s_aws_sts_tester_cleanup(void) { s_cleanup_creds_callback_data(); aws_condition_variable_clean_up(&s_tester.signal); aws_mutex_clean_up(&s_tester.lock); aws_byte_buf_clean_up(&s_tester.mock_body); aws_client_bootstrap_release(s_tester.bootstrap); aws_host_resolver_release(s_tester.resolver); aws_event_loop_group_release(s_tester.el_group); aws_tls_ctx_release(s_tester.tls_ctx); aws_auth_library_clean_up(); return AWS_OP_SUCCESS; } static bool s_has_tester_received_credentials_callback(void *user_data) { (void)user_data; return s_tester.has_received_credentials_callback; } static void s_aws_wait_for_credentials_result(void) { aws_mutex_lock(&s_tester.lock); aws_condition_variable_wait_pred( &s_tester.signal, &s_tester.lock, s_has_tester_received_credentials_callback, NULL); aws_mutex_unlock(&s_tester.lock); } static void s_get_credentials_callback(struct aws_credentials *credentials, int error_code, void *user_data) { (void)user_data; aws_mutex_lock(&s_tester.lock); s_tester.has_received_credentials_callback = true; s_tester.error_code = error_code; s_tester.credentials = credentials; aws_credentials_acquire(credentials); aws_condition_variable_notify_one(&s_tester.signal); aws_mutex_unlock(&s_tester.lock); } static struct aws_byte_cursor s_access_key_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("accessKey12345"); static struct aws_byte_cursor s_secret_key_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("secretKey12345"); static struct aws_byte_cursor s_session_token_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("sessionToken123456789"); static struct aws_byte_cursor s_role_arn_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("arn:aws:iam::67895:role/test_role"); static struct aws_byte_cursor s_session_name_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("test_session"); static const char *success_creds_doc = "\n" " \n" " \n" " \n" " \n" " accessKeyIdResp\n" " secretKeyResp\n" " sessionTokenResp\n" " \n" " \n" " ... a bunch of other stuff we don't care about\n" " \n" " ... more stuff we don't care about\n" " \n" ""; static struct aws_byte_cursor s_expected_payload = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Version=2011-06-15&Action=AssumeRole&RoleArn=arn%3Aaws%3Aiam%3A%3A67895%" "3Arole%2Ftest_role&RoleSessionName=test_session&DurationSeconds=900"); AWS_STATIC_STRING_FROM_LITERAL(s_access_key_id_response, "accessKeyIdResp"); AWS_STATIC_STRING_FROM_LITERAL(s_secret_access_key_response, "secretKeyResp"); AWS_STATIC_STRING_FROM_LITERAL(s_session_token_response, "sessionTokenResp"); static int s_verify_credentials(struct aws_credentials *credentials) { ASSERT_NOT_NULL(credentials); ASSERT_CURSOR_VALUE_STRING_EQUALS(aws_credentials_get_access_key_id(credentials), s_access_key_id_response); ASSERT_CURSOR_VALUE_STRING_EQUALS(aws_credentials_get_secret_access_key(credentials), s_secret_access_key_response); ASSERT_CURSOR_VALUE_STRING_EQUALS(aws_credentials_get_session_token(credentials), s_session_token_response); return AWS_OP_SUCCESS; } static int s_credentials_provider_sts_direct_config_succeeds_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_sts_tester_init(allocator); struct aws_credentials_provider_static_options static_options = { .access_key_id = s_access_key_cur, .secret_access_key = s_secret_key_cur, .session_token = s_session_token_cur, }; struct aws_credentials_provider *static_provider = aws_credentials_provider_new_static(allocator, &static_options); struct aws_credentials_provider_sts_options options = { .creds_provider = static_provider, .bootstrap = s_tester.bootstrap, .tls_ctx = s_tester.tls_ctx, .role_arn = s_role_arn_cur, .session_name = s_session_name_cur, .duration_seconds = 0, .function_table = &s_mock_function_table, .system_clock_fn = mock_aws_get_system_time, }; mock_aws_set_system_time(0); s_tester.mock_body = aws_byte_buf_from_c_str(success_creds_doc); s_tester.mock_response_code = 200; struct aws_credentials_provider *sts_provider = aws_credentials_provider_new_sts(allocator, &options); aws_credentials_provider_get_credentials(sts_provider, s_get_credentials_callback, NULL); s_aws_wait_for_credentials_result(); ASSERT_SUCCESS(s_verify_credentials(s_tester.credentials)); ASSERT_TRUE(aws_credentials_get_expiration_timepoint_seconds(s_tester.credentials) == 900); const char *expected_method = "POST"; ASSERT_BIN_ARRAYS_EQUALS( expected_method, strlen(expected_method), s_tester.mocked_requests[0].method.buffer, s_tester.mocked_requests[0].method.len); const char *expected_path = "/"; ASSERT_BIN_ARRAYS_EQUALS( expected_path, strlen(expected_path), s_tester.mocked_requests[0].path.buffer, s_tester.mocked_requests[0].path.len); ASSERT_TRUE(s_tester.mocked_requests[0].had_auth_header); const char *expected_host_header = "sts.amazonaws.com"; ASSERT_BIN_ARRAYS_EQUALS( expected_host_header, strlen(expected_host_header), s_tester.mocked_requests[0].host_header.buffer, s_tester.mocked_requests[0].host_header.len); ASSERT_BIN_ARRAYS_EQUALS( s_expected_payload.ptr, s_expected_payload.len, s_tester.mocked_requests[0].body.buffer, s_tester.mocked_requests[0].body.len); aws_credentials_provider_release(sts_provider); aws_credentials_provider_release(static_provider); ASSERT_SUCCESS(s_aws_sts_tester_cleanup()); return AWS_OP_SUCCESS; } AWS_TEST_CASE(credentials_provider_sts_direct_config_succeeds, s_credentials_provider_sts_direct_config_succeeds_fn) static int s_credentials_provider_sts_direct_config_succeeds_after_retry_fn( struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_sts_tester_init(allocator); struct aws_credentials_provider_static_options static_options = { .access_key_id = s_access_key_cur, .secret_access_key = s_secret_key_cur, .session_token = s_session_token_cur, }; struct aws_credentials_provider *static_provider = aws_credentials_provider_new_static(allocator, &static_options); struct aws_credentials_provider_sts_options options = { .creds_provider = static_provider, .bootstrap = s_tester.bootstrap, .tls_ctx = s_tester.tls_ctx, .role_arn = s_role_arn_cur, .session_name = s_session_name_cur, .duration_seconds = 0, .function_table = &s_mock_function_table, .system_clock_fn = mock_aws_get_system_time, }; mock_aws_set_system_time(0); s_tester.mock_body = aws_byte_buf_from_c_str(success_creds_doc); s_tester.mock_response_code = 200; s_tester.mock_failure_code = 429; s_tester.fail_operations = 2; struct aws_credentials_provider *sts_provider = aws_credentials_provider_new_sts(allocator, &options); aws_credentials_provider_get_credentials(sts_provider, s_get_credentials_callback, NULL); s_aws_wait_for_credentials_result(); ASSERT_SUCCESS(s_verify_credentials(s_tester.credentials)); ASSERT_TRUE(aws_credentials_get_expiration_timepoint_seconds(s_tester.credentials) == 900); const char *expected_method = "POST"; ASSERT_BIN_ARRAYS_EQUALS( expected_method, strlen(expected_method), s_tester.mocked_requests[0].method.buffer, s_tester.mocked_requests[0].method.len); const char *expected_path = "/"; ASSERT_BIN_ARRAYS_EQUALS( expected_path, strlen(expected_path), s_tester.mocked_requests[0].path.buffer, s_tester.mocked_requests[0].path.len); ASSERT_TRUE(s_tester.mocked_requests[0].had_auth_header); const char *expected_host_header = "sts.amazonaws.com"; ASSERT_BIN_ARRAYS_EQUALS( expected_host_header, strlen(expected_host_header), s_tester.mocked_requests[0].host_header.buffer, s_tester.mocked_requests[0].host_header.len); ASSERT_BIN_ARRAYS_EQUALS( s_expected_payload.ptr, s_expected_payload.len, s_tester.mocked_requests[0].body.buffer, s_tester.mocked_requests[0].body.len); aws_credentials_provider_release(sts_provider); aws_credentials_provider_release(static_provider); ASSERT_SUCCESS(s_aws_sts_tester_cleanup()); return AWS_OP_SUCCESS; } AWS_TEST_CASE( credentials_provider_sts_direct_config_succeeds_after_retry, s_credentials_provider_sts_direct_config_succeeds_after_retry_fn) static const char *malformed_creds_doc = "\n" " \n" " \n" " \n" " accessKeyIdResp\n" " "; static int s_credentials_provider_sts_direct_config_invalid_doc_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_sts_tester_init(allocator); struct aws_credentials_provider_static_options static_options = { .access_key_id = s_access_key_cur, .secret_access_key = s_secret_key_cur, .session_token = s_session_token_cur, }; struct aws_credentials_provider *static_provider = aws_credentials_provider_new_static(allocator, &static_options); struct aws_credentials_provider_sts_options options = { .creds_provider = static_provider, .bootstrap = s_tester.bootstrap, .tls_ctx = s_tester.tls_ctx, .role_arn = s_role_arn_cur, .session_name = s_session_name_cur, .duration_seconds = 0, .function_table = &s_mock_function_table, .system_clock_fn = mock_aws_get_system_time, }; mock_aws_set_system_time(0); s_tester.mock_body = aws_byte_buf_from_c_str(malformed_creds_doc); s_tester.mock_response_code = 200; struct aws_credentials_provider *sts_provider = aws_credentials_provider_new_sts(allocator, &options); aws_credentials_provider_get_credentials(sts_provider, s_get_credentials_callback, NULL); s_aws_wait_for_credentials_result(); ASSERT_NULL(s_tester.credentials); const char *expected_method = "POST"; ASSERT_BIN_ARRAYS_EQUALS( expected_method, strlen(expected_method), s_tester.mocked_requests[0].method.buffer, s_tester.mocked_requests[0].method.len); const char *expected_path = "/"; ASSERT_BIN_ARRAYS_EQUALS( expected_path, strlen(expected_path), s_tester.mocked_requests[0].path.buffer, s_tester.mocked_requests[0].path.len); ASSERT_TRUE(s_tester.mocked_requests[0].had_auth_header); const char *expected_host_header = "sts.amazonaws.com"; ASSERT_BIN_ARRAYS_EQUALS( expected_host_header, strlen(expected_host_header), s_tester.mocked_requests[0].host_header.buffer, s_tester.mocked_requests[0].host_header.len); ASSERT_BIN_ARRAYS_EQUALS( s_expected_payload.ptr, s_expected_payload.len, s_tester.mocked_requests[0].body.buffer, s_tester.mocked_requests[0].body.len); aws_credentials_provider_release(sts_provider); aws_credentials_provider_release(static_provider); ASSERT_SUCCESS(s_aws_sts_tester_cleanup()); return AWS_OP_SUCCESS; } AWS_TEST_CASE( credentials_provider_sts_direct_config_invalid_doc, s_credentials_provider_sts_direct_config_invalid_doc_fn) static int s_credentials_provider_sts_direct_config_connection_failed_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_sts_tester_init(allocator); struct aws_credentials_provider_static_options static_options = { .access_key_id = s_access_key_cur, .secret_access_key = s_secret_key_cur, .session_token = s_session_token_cur, }; struct aws_credentials_provider *static_provider = aws_credentials_provider_new_static(allocator, &static_options); struct aws_credentials_provider_sts_options options = { .creds_provider = static_provider, .bootstrap = s_tester.bootstrap, .tls_ctx = s_tester.tls_ctx, .role_arn = s_role_arn_cur, .session_name = s_session_name_cur, .duration_seconds = 0, .function_table = &s_mock_function_table, .system_clock_fn = mock_aws_get_system_time, }; mock_aws_set_system_time(0); s_tester.fail_connection = true; struct aws_credentials_provider *sts_provider = aws_credentials_provider_new_sts(allocator, &options); aws_credentials_provider_get_credentials(sts_provider, s_get_credentials_callback, NULL); s_aws_wait_for_credentials_result(); ASSERT_NULL(s_tester.credentials); aws_credentials_provider_release(sts_provider); aws_credentials_provider_release(static_provider); ASSERT_SUCCESS(s_aws_sts_tester_cleanup()); return AWS_OP_SUCCESS; } AWS_TEST_CASE( credentials_provider_sts_direct_config_connection_failed, s_credentials_provider_sts_direct_config_connection_failed_fn) static int s_credentials_provider_sts_direct_config_service_fails_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_sts_tester_init(allocator); struct aws_credentials_provider_static_options static_options = { .access_key_id = s_access_key_cur, .secret_access_key = s_secret_key_cur, .session_token = s_session_token_cur, }; struct aws_credentials_provider *static_provider = aws_credentials_provider_new_static(allocator, &static_options); struct aws_credentials_provider_sts_options options = { .creds_provider = static_provider, .bootstrap = s_tester.bootstrap, .tls_ctx = s_tester.tls_ctx, .role_arn = s_role_arn_cur, .session_name = s_session_name_cur, .duration_seconds = 0, .function_table = &s_mock_function_table, .system_clock_fn = mock_aws_get_system_time, }; mock_aws_set_system_time(0); s_tester.mock_response_code = 529; struct aws_credentials_provider *sts_provider = aws_credentials_provider_new_sts(allocator, &options); aws_credentials_provider_get_credentials(sts_provider, s_get_credentials_callback, NULL); s_aws_wait_for_credentials_result(); ASSERT_NULL(s_tester.credentials); aws_credentials_provider_release(sts_provider); aws_credentials_provider_release(static_provider); ASSERT_SUCCESS(s_aws_sts_tester_cleanup()); return AWS_OP_SUCCESS; } AWS_TEST_CASE( credentials_provider_sts_direct_config_service_fails, s_credentials_provider_sts_direct_config_service_fails_fn) static const char *s_soure_profile_config_file = "[default]\n" "aws_access_key_id=BLAHBLAH\n" "aws_secret_access_key=BLAHBLAHBLAH\n" "\n" "[roletest]\n" "role_arn=arn:aws:iam::67895:role/test_role\n" "source_profile=default\n" "role_session_name=test_session"; static const char *s_soure_profile_chain_config_file = "[default]\n" "aws_access_key_id=BLAHBLAH\n" "aws_secret_access_key=BLAHBLAHBLAH\n" "\n" "[roletest]\n" "role_arn=arn:aws:iam::67895:role/test_role\n" "source_profile=roletest2\n" "role_session_name=test_session\n" "[roletest2]\n" "role_arn=arn:aws:iam::67896:role/test_role\n" "source_profile=roletest3\n" "role_session_name=test_session2\n" "[roletest3]\n" "role_arn=arn:aws:iam::67897:role/test_role\n" "source_profile=default\n" "role_session_name=test_session3\n"; static int s_credentials_provider_sts_from_profile_config_with_chain_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_unset_environment_value(s_default_profile_env_variable_name); aws_unset_environment_value(s_default_config_path_env_variable_name); aws_unset_environment_value(s_default_credentials_path_env_variable_name); s_aws_sts_tester_init(allocator); struct aws_string *config_contents = aws_string_new_from_c_str(allocator, s_soure_profile_chain_config_file); struct aws_string *config_file_str = aws_create_process_unique_file_name(allocator); struct aws_string *creds_file_str = aws_create_process_unique_file_name(allocator); ASSERT_SUCCESS(aws_create_profile_file(creds_file_str, config_contents)); aws_string_destroy(config_contents); struct aws_credentials_provider_profile_options options = { .config_file_name_override = aws_byte_cursor_from_string(config_file_str), .credentials_file_name_override = aws_byte_cursor_from_string(creds_file_str), .profile_name_override = aws_byte_cursor_from_c_str("roletest"), .bootstrap = s_tester.bootstrap, .function_table = &s_mock_function_table, }; s_tester.mock_body = aws_byte_buf_from_c_str(success_creds_doc); s_tester.mock_response_code = 200; struct aws_credentials_provider *provider = aws_credentials_provider_new_profile(allocator, &options); ASSERT_NOT_NULL(provider); aws_string_destroy(config_file_str); aws_string_destroy(creds_file_str); aws_credentials_provider_get_credentials(provider, s_get_credentials_callback, NULL); s_aws_wait_for_credentials_result(); ASSERT_SUCCESS(s_verify_credentials(s_tester.credentials)); ASSERT_INT_EQUALS(3, s_tester.num_request); static struct aws_byte_cursor s_expected_request_body[] = { AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Version=2011-06-15&Action=AssumeRole&RoleArn=arn%3Aaws%3Aiam%3A%3A67897%" "3Arole%2Ftest_role&RoleSessionName=test_session3&DurationSeconds=900"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Version=2011-06-15&Action=AssumeRole&RoleArn=arn%3Aaws%3Aiam%3A%3A67896%" "3Arole%2Ftest_role&RoleSessionName=test_session2&DurationSeconds=900"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Version=2011-06-15&Action=AssumeRole&RoleArn=arn%3Aaws%3Aiam%3A%3A67895%" "3Arole%2Ftest_role&RoleSessionName=test_session&DurationSeconds=900"), }; const char *expected_method = "POST"; const char *expected_path = "/"; const char *expected_host_header = "sts.amazonaws.com"; for (int i = 0; i < s_tester.num_request; i++) { ASSERT_BIN_ARRAYS_EQUALS( expected_method, strlen(expected_method), s_tester.mocked_requests[i].method.buffer, s_tester.mocked_requests[i].method.len); ASSERT_BIN_ARRAYS_EQUALS( expected_path, strlen(expected_path), s_tester.mocked_requests[i].path.buffer, s_tester.mocked_requests[i].path.len); ASSERT_TRUE(s_tester.mocked_requests[i].had_auth_header); ASSERT_BIN_ARRAYS_EQUALS( expected_host_header, strlen(expected_host_header), s_tester.mocked_requests[i].host_header.buffer, s_tester.mocked_requests[i].host_header.len); ASSERT_BIN_ARRAYS_EQUALS( s_expected_request_body[i].ptr, s_expected_request_body[i].len, s_tester.mocked_requests[i].body.buffer, s_tester.mocked_requests[i].body.len); } aws_credentials_provider_release(provider); ASSERT_SUCCESS(s_aws_sts_tester_cleanup()); return AWS_OP_SUCCESS; } AWS_TEST_CASE( credentials_provider_sts_from_profile_config_with_chain, s_credentials_provider_sts_from_profile_config_with_chain_fn) static const char *s_soure_profile_chain_and_profile_config_file = "[default]\n" "aws_access_key_id=BLAHBLAH\n" "aws_secret_access_key=BLAHBLAHBLAH\n" "\n" "[roletest]\n" "role_arn=arn:aws:iam::67895:role/test_role\n" "source_profile=roletest2\n" "role_session_name=test_session\n" "[roletest2]\n" "role_arn=arn:aws:iam::67896:role/test_role\n" "source_profile=roletest3\n" "role_session_name=test_session2\n" "[roletest3]\n" "role_arn=arn:aws:iam::67897:role/test_role\n" "source_profile=default\n" "role_session_name=test_session3\n" "aws_access_key_id = BLAH\n" "aws_secret_access_key = BLAHBLAH\n"; static int s_credentials_provider_sts_from_profile_config_with_chain_and_profile_creds_fn( struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_unset_environment_value(s_default_profile_env_variable_name); aws_unset_environment_value(s_default_config_path_env_variable_name); aws_unset_environment_value(s_default_credentials_path_env_variable_name); s_aws_sts_tester_init(allocator); struct aws_string *config_contents = aws_string_new_from_c_str(allocator, s_soure_profile_chain_and_profile_config_file); struct aws_string *config_file_str = aws_create_process_unique_file_name(allocator); struct aws_string *creds_file_str = aws_create_process_unique_file_name(allocator); ASSERT_SUCCESS(aws_create_profile_file(creds_file_str, config_contents)); aws_string_destroy(config_contents); struct aws_credentials_provider_profile_options options = { .config_file_name_override = aws_byte_cursor_from_string(config_file_str), .credentials_file_name_override = aws_byte_cursor_from_string(creds_file_str), .profile_name_override = aws_byte_cursor_from_c_str("roletest"), .bootstrap = s_tester.bootstrap, .function_table = &s_mock_function_table, }; s_tester.mock_body = aws_byte_buf_from_c_str(success_creds_doc); s_tester.mock_response_code = 200; struct aws_credentials_provider *provider = aws_credentials_provider_new_profile(allocator, &options); ASSERT_NOT_NULL(provider); aws_string_destroy(config_file_str); aws_string_destroy(creds_file_str); aws_credentials_provider_get_credentials(provider, s_get_credentials_callback, NULL); s_aws_wait_for_credentials_result(); ASSERT_SUCCESS(s_verify_credentials(s_tester.credentials)); ASSERT_INT_EQUALS(2, s_tester.num_request); static struct aws_byte_cursor s_expected_request_body[] = { AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Version=2011-06-15&Action=AssumeRole&RoleArn=arn%3Aaws%3Aiam%3A%3A67896%" "3Arole%2Ftest_role&RoleSessionName=test_session2&DurationSeconds=900"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Version=2011-06-15&Action=AssumeRole&RoleArn=arn%3Aaws%3Aiam%3A%3A67895%" "3Arole%2Ftest_role&RoleSessionName=test_session&DurationSeconds=900"), }; const char *expected_method = "POST"; const char *expected_path = "/"; const char *expected_host_header = "sts.amazonaws.com"; for (int i = 0; i < s_tester.num_request; i++) { ASSERT_BIN_ARRAYS_EQUALS( expected_method, strlen(expected_method), s_tester.mocked_requests[i].method.buffer, s_tester.mocked_requests[i].method.len); ASSERT_BIN_ARRAYS_EQUALS( expected_path, strlen(expected_path), s_tester.mocked_requests[i].path.buffer, s_tester.mocked_requests[i].path.len); ASSERT_TRUE(s_tester.mocked_requests[i].had_auth_header); ASSERT_BIN_ARRAYS_EQUALS( expected_host_header, strlen(expected_host_header), s_tester.mocked_requests[i].host_header.buffer, s_tester.mocked_requests[i].host_header.len); ASSERT_BIN_ARRAYS_EQUALS( s_expected_request_body[i].ptr, s_expected_request_body[i].len, s_tester.mocked_requests[i].body.buffer, s_tester.mocked_requests[i].body.len); } aws_credentials_provider_release(provider); ASSERT_SUCCESS(s_aws_sts_tester_cleanup()); return AWS_OP_SUCCESS; } AWS_TEST_CASE( credentials_provider_sts_from_profile_config_with_chain_and_profile_creds, s_credentials_provider_sts_from_profile_config_with_chain_and_profile_creds_fn) static const char *s_soure_profile_chain_and_partial_profile_config_file = "[default]\n" "aws_access_key_id=BLAHBLAH\n" "aws_secret_access_key=BLAHBLAHBLAH\n" "\n" "[roletest]\n" "role_arn=arn:aws:iam::67895:role/test_role\n" "source_profile=roletest2\n" "role_session_name=test_session\n" "[roletest2]\n" "role_arn=arn:aws:iam::67896:role/test_role\n" "source_profile=roletest3\n" "role_session_name=test_session2\n" "[roletest3]\n" "role_arn=arn:aws:iam::67897:role/test_role\n" "source_profile=default\n" "role_session_name=test_session3\n" "aws_access_key_id = BLAH\n"; static int s_credentials_provider_sts_from_profile_config_with_chain_and_partial_profile_creds_fn( struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_unset_environment_value(s_default_profile_env_variable_name); aws_unset_environment_value(s_default_config_path_env_variable_name); aws_unset_environment_value(s_default_credentials_path_env_variable_name); s_aws_sts_tester_init(allocator); struct aws_string *config_contents = aws_string_new_from_c_str(allocator, s_soure_profile_chain_and_partial_profile_config_file); struct aws_string *config_file_str = aws_create_process_unique_file_name(allocator); struct aws_string *creds_file_str = aws_create_process_unique_file_name(allocator); ASSERT_SUCCESS(aws_create_profile_file(creds_file_str, config_contents)); aws_string_destroy(config_contents); struct aws_credentials_provider_profile_options options = { .config_file_name_override = aws_byte_cursor_from_string(config_file_str), .credentials_file_name_override = aws_byte_cursor_from_string(creds_file_str), .profile_name_override = aws_byte_cursor_from_c_str("roletest"), .bootstrap = s_tester.bootstrap, .function_table = &s_mock_function_table, }; s_tester.mock_body = aws_byte_buf_from_c_str(success_creds_doc); s_tester.mock_response_code = 200; struct aws_credentials_provider *provider = aws_credentials_provider_new_profile(allocator, &options); ASSERT_NOT_NULL(provider); aws_string_destroy(config_file_str); aws_string_destroy(creds_file_str); aws_credentials_provider_get_credentials(provider, s_get_credentials_callback, NULL); s_aws_wait_for_credentials_result(); ASSERT_NULL(s_tester.credentials); ASSERT_INT_EQUALS(s_tester.error_code, AWS_AUTH_SIGNING_NO_CREDENTIALS); aws_credentials_provider_release(provider); ASSERT_SUCCESS(s_aws_sts_tester_cleanup()); return AWS_OP_SUCCESS; } AWS_TEST_CASE( credentials_provider_sts_from_profile_config_with_chain_and_partial_profile_creds, s_credentials_provider_sts_from_profile_config_with_chain_and_partial_profile_creds_fn) static const char *s_soure_profile_self_assume_role_config_file = "[default]\n" "aws_access_key_id=BLAHBLAH\n" "aws_secret_access_key=BLAHBLAHBLAH\n" "\n" "[roletest]\n" "role_arn=arn:aws:iam::67895:role/test_role\n" "source_profile=roletest\n" "role_session_name=test_session\n" "aws_access_key_id = BLAH\n" "aws_secret_access_key = BLAHBLAH\n"; static int s_credentials_provider_sts_from_self_referencing_profile_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_unset_environment_value(s_default_profile_env_variable_name); aws_unset_environment_value(s_default_config_path_env_variable_name); aws_unset_environment_value(s_default_credentials_path_env_variable_name); s_aws_sts_tester_init(allocator); struct aws_string *config_contents = aws_string_new_from_c_str(allocator, s_soure_profile_self_assume_role_config_file); struct aws_string *config_file_str = aws_create_process_unique_file_name(allocator); struct aws_string *creds_file_str = aws_create_process_unique_file_name(allocator); ASSERT_SUCCESS(aws_create_profile_file(creds_file_str, config_contents)); aws_string_destroy(config_contents); struct aws_credentials_provider_profile_options options = { .config_file_name_override = aws_byte_cursor_from_string(config_file_str), .credentials_file_name_override = aws_byte_cursor_from_string(creds_file_str), .profile_name_override = aws_byte_cursor_from_c_str("roletest"), .bootstrap = s_tester.bootstrap, .function_table = &s_mock_function_table, }; s_tester.mock_body = aws_byte_buf_from_c_str(success_creds_doc); s_tester.mock_response_code = 200; struct aws_credentials_provider *provider = aws_credentials_provider_new_profile(allocator, &options); ASSERT_NOT_NULL(provider); aws_string_destroy(config_file_str); aws_string_destroy(creds_file_str); aws_credentials_provider_get_credentials(provider, s_get_credentials_callback, NULL); s_aws_wait_for_credentials_result(); ASSERT_SUCCESS(s_verify_credentials(s_tester.credentials)); ASSERT_INT_EQUALS(1, s_tester.num_request); static struct aws_byte_cursor s_expected_request_body[] = { AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Version=2011-06-15&Action=AssumeRole&RoleArn=arn%3Aaws%3Aiam%3A%3A67895%" "3Arole%2Ftest_role&RoleSessionName=test_session&DurationSeconds=900"), }; const char *expected_method = "POST"; const char *expected_path = "/"; const char *expected_host_header = "sts.amazonaws.com"; for (int i = 0; i < s_tester.num_request; i++) { ASSERT_BIN_ARRAYS_EQUALS( expected_method, strlen(expected_method), s_tester.mocked_requests[i].method.buffer, s_tester.mocked_requests[i].method.len); ASSERT_BIN_ARRAYS_EQUALS( expected_path, strlen(expected_path), s_tester.mocked_requests[i].path.buffer, s_tester.mocked_requests[i].path.len); ASSERT_TRUE(s_tester.mocked_requests[i].had_auth_header); ASSERT_BIN_ARRAYS_EQUALS( expected_host_header, strlen(expected_host_header), s_tester.mocked_requests[i].host_header.buffer, s_tester.mocked_requests[i].host_header.len); ASSERT_BIN_ARRAYS_EQUALS( s_expected_request_body[i].ptr, s_expected_request_body[i].len, s_tester.mocked_requests[i].body.buffer, s_tester.mocked_requests[i].body.len); } aws_credentials_provider_release(provider); ASSERT_SUCCESS(s_aws_sts_tester_cleanup()); return AWS_OP_SUCCESS; } AWS_TEST_CASE( credentials_provider_sts_from_self_referencing_profile, s_credentials_provider_sts_from_self_referencing_profile_fn) static const char *s_soure_profile_chain_cycle_config_file = "[default]\n" "aws_access_key_id=BLAHBLAH\n" "aws_secret_access_key=BLAHBLAHBLAH\n" "\n" "[roletest]\n" "role_arn=arn:aws:iam::67895:role/test_role\n" "source_profile=roletest2\n" "role_session_name=test_session\n" "[roletest2]\n" "role_arn=arn:aws:iam::67896:role/test_role\n" "source_profile=roletest3\n" "role_session_name=test_session2\n" "[roletest3]\n" "role_arn=arn:aws:iam::67897:role/test_role\n" "source_profile=roletest2\n" "role_session_name=test_session3\n"; static int s_credentials_provider_sts_from_profile_config_with_chain_cycle_fn( struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_unset_environment_value(s_default_profile_env_variable_name); aws_unset_environment_value(s_default_config_path_env_variable_name); aws_unset_environment_value(s_default_credentials_path_env_variable_name); s_aws_sts_tester_init(allocator); struct aws_string *config_contents = aws_string_new_from_c_str(allocator, s_soure_profile_chain_cycle_config_file); struct aws_string *config_file_str = aws_create_process_unique_file_name(allocator); struct aws_string *creds_file_str = aws_create_process_unique_file_name(allocator); ASSERT_SUCCESS(aws_create_profile_file(creds_file_str, config_contents)); aws_string_destroy(config_contents); struct aws_credentials_provider_profile_options options = { .config_file_name_override = aws_byte_cursor_from_string(config_file_str), .credentials_file_name_override = aws_byte_cursor_from_string(creds_file_str), .profile_name_override = aws_byte_cursor_from_c_str("roletest"), .bootstrap = s_tester.bootstrap, .function_table = &s_mock_function_table, }; s_tester.mock_body = aws_byte_buf_from_c_str(success_creds_doc); s_tester.mock_response_code = 200; struct aws_credentials_provider *provider = aws_credentials_provider_new_profile(allocator, &options); ASSERT_NULL(provider); ASSERT_INT_EQUALS(AWS_AUTH_PROFILE_STS_CREDENTIALS_PROVIDER_CYCLE_FAILURE, aws_last_error()); ASSERT_SUCCESS(s_aws_sts_tester_cleanup()); aws_string_destroy(config_file_str); aws_string_destroy(creds_file_str); return AWS_OP_SUCCESS; } AWS_TEST_CASE( credentials_provider_sts_from_profile_config_with_chain_cycle, s_credentials_provider_sts_from_profile_config_with_chain_cycle_fn) static const char *s_soure_profile_chain_cycle_and_static_creds_config_file = "[roletest]\n" "role_arn=arn:aws:iam::67895:role/test_role\n" "source_profile=roletest2\n" "role_session_name=test_session\n" "aws_access_key_id=BLAHBLAH\n" "aws_secret_access_key=BLAHBLAHBLAH\n" "[roletest2]\n" "role_arn=arn:aws:iam::67896:role/test_role\n" "source_profile=roletest3\n" "role_session_name=test_session2\n" "[roletest3]\n" "role_arn=arn:aws:iam::67897:role/test_role\n" "source_profile=roletest\n" "role_session_name=test_session3\n"; static int s_credentials_provider_sts_from_profile_config_with_chain_cycle_and_profile_creds_fn( struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_unset_environment_value(s_default_profile_env_variable_name); aws_unset_environment_value(s_default_config_path_env_variable_name); aws_unset_environment_value(s_default_credentials_path_env_variable_name); s_aws_sts_tester_init(allocator); struct aws_string *config_contents = aws_string_new_from_c_str(allocator, s_soure_profile_chain_cycle_and_static_creds_config_file); struct aws_string *config_file_str = aws_create_process_unique_file_name(allocator); struct aws_string *creds_file_str = aws_create_process_unique_file_name(allocator); ASSERT_SUCCESS(aws_create_profile_file(creds_file_str, config_contents)); aws_string_destroy(config_contents); struct aws_credentials_provider_profile_options options = { .config_file_name_override = aws_byte_cursor_from_string(config_file_str), .credentials_file_name_override = aws_byte_cursor_from_string(creds_file_str), .profile_name_override = aws_byte_cursor_from_c_str("roletest"), .bootstrap = s_tester.bootstrap, .function_table = &s_mock_function_table, }; s_tester.mock_body = aws_byte_buf_from_c_str(success_creds_doc); s_tester.mock_response_code = 200; struct aws_credentials_provider *provider = aws_credentials_provider_new_profile(allocator, &options); ASSERT_NULL(provider); ASSERT_INT_EQUALS(AWS_AUTH_PROFILE_STS_CREDENTIALS_PROVIDER_CYCLE_FAILURE, aws_last_error()); ASSERT_SUCCESS(s_aws_sts_tester_cleanup()); aws_string_destroy(config_file_str); aws_string_destroy(creds_file_str); return AWS_OP_SUCCESS; } AWS_TEST_CASE( credentials_provider_sts_from_profile_config_with_chain_cycle_and_profile_creds, s_credentials_provider_sts_from_profile_config_with_chain_cycle_and_profile_creds_fn) static int s_credentials_provider_sts_from_profile_config_succeeds( struct aws_allocator *allocator, void *ctx, bool manual_tls) { (void)ctx; aws_unset_environment_value(s_default_profile_env_variable_name); aws_unset_environment_value(s_default_config_path_env_variable_name); aws_unset_environment_value(s_default_credentials_path_env_variable_name); s_aws_sts_tester_init(allocator); struct aws_string *config_contents = aws_string_new_from_c_str(allocator, s_soure_profile_config_file); struct aws_string *config_file_str = aws_create_process_unique_file_name(allocator); struct aws_string *creds_file_str = aws_create_process_unique_file_name(allocator); ASSERT_SUCCESS(aws_create_profile_file(creds_file_str, config_contents)); aws_string_destroy(config_contents); struct aws_credentials_provider_profile_options options = { .config_file_name_override = aws_byte_cursor_from_string(config_file_str), .credentials_file_name_override = aws_byte_cursor_from_string(creds_file_str), .profile_name_override = aws_byte_cursor_from_c_str("roletest"), .bootstrap = s_tester.bootstrap, /* tls_ctx is optional, test it both ways */ .tls_ctx = manual_tls ? s_tester.tls_ctx : NULL, .function_table = &s_mock_function_table, }; s_tester.mock_body = aws_byte_buf_from_c_str(success_creds_doc); s_tester.mock_response_code = 200; struct aws_credentials_provider *provider = aws_credentials_provider_new_profile(allocator, &options); ASSERT_NOT_NULL(provider); aws_string_destroy(config_file_str); aws_string_destroy(creds_file_str); aws_credentials_provider_get_credentials(provider, s_get_credentials_callback, NULL); s_aws_wait_for_credentials_result(); ASSERT_SUCCESS(s_verify_credentials(s_tester.credentials)); const char *expected_method = "POST"; ASSERT_BIN_ARRAYS_EQUALS( expected_method, strlen(expected_method), s_tester.mocked_requests[0].method.buffer, s_tester.mocked_requests[0].method.len); const char *expected_path = "/"; ASSERT_BIN_ARRAYS_EQUALS( expected_path, strlen(expected_path), s_tester.mocked_requests[0].path.buffer, s_tester.mocked_requests[0].path.len); ASSERT_TRUE(s_tester.mocked_requests[0].had_auth_header); const char *expected_host_header = "sts.amazonaws.com"; ASSERT_BIN_ARRAYS_EQUALS( expected_host_header, strlen(expected_host_header), s_tester.mocked_requests[0].host_header.buffer, s_tester.mocked_requests[0].host_header.len); ASSERT_BIN_ARRAYS_EQUALS( s_expected_payload.ptr, s_expected_payload.len, s_tester.mocked_requests[0].body.buffer, s_tester.mocked_requests[0].body.len); aws_credentials_provider_release(provider); ASSERT_SUCCESS(s_aws_sts_tester_cleanup()); return AWS_OP_SUCCESS; } static int s_credentials_provider_sts_from_profile_config_succeeds_fn(struct aws_allocator *allocator, void *ctx) { return s_credentials_provider_sts_from_profile_config_succeeds(allocator, ctx, false /*manual_tls*/); } AWS_TEST_CASE( credentials_provider_sts_from_profile_config_succeeds, s_credentials_provider_sts_from_profile_config_succeeds_fn) static int credentials_provider_sts_from_profile_config_manual_tls_succeeds_fn( struct aws_allocator *allocator, void *ctx) { return s_credentials_provider_sts_from_profile_config_succeeds(allocator, ctx, true /*manual_tls*/); } AWS_TEST_CASE( credentials_provider_sts_from_profile_config_manual_tls_succeeds, credentials_provider_sts_from_profile_config_manual_tls_succeeds_fn) static const char *s_env_source_config_file = "[default]\n" "aws_access_key_id=BLAHBLAH\n" "aws_secret_access_key=BLAHBLAHBLAH\n" "\n" "[roletest]\n" "role_arn=arn:aws:iam::67895:role/test_role\n" "credential_source=Environment\n" "role_session_name=test_session"; AWS_STRING_FROM_LITERAL(s_env_access_key_val, "EnvAccessKeyId"); AWS_STRING_FROM_LITERAL(s_env_secret_access_key_val, "EnvSecretAccessKeyId"); static int s_credentials_provider_sts_from_profile_config_environment_succeeds_fn( struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_unset_environment_value(s_default_profile_env_variable_name); aws_unset_environment_value(s_default_config_path_env_variable_name); aws_unset_environment_value(s_default_credentials_path_env_variable_name); aws_set_environment_value(s_access_key_id_env_var, s_env_access_key_val); aws_set_environment_value(s_secret_access_key_env_var, s_env_secret_access_key_val); s_aws_sts_tester_init(allocator); struct aws_string *config_contents = aws_string_new_from_c_str(allocator, s_env_source_config_file); struct aws_string *config_file_str = aws_create_process_unique_file_name(allocator); struct aws_string *creds_file_str = aws_create_process_unique_file_name(allocator); ASSERT_SUCCESS(aws_create_profile_file(creds_file_str, config_contents)); aws_string_destroy(config_contents); struct aws_credentials_provider_profile_options options = { .config_file_name_override = aws_byte_cursor_from_string(config_file_str), .credentials_file_name_override = aws_byte_cursor_from_string(creds_file_str), .profile_name_override = aws_byte_cursor_from_c_str("roletest"), .bootstrap = s_tester.bootstrap, .tls_ctx = s_tester.tls_ctx, .function_table = &s_mock_function_table, }; s_tester.mock_body = aws_byte_buf_from_c_str(success_creds_doc); s_tester.mock_response_code = 200; struct aws_credentials_provider *provider = aws_credentials_provider_new_profile(allocator, &options); ASSERT_NOT_NULL(provider); aws_string_destroy(creds_file_str); aws_string_destroy(config_file_str); aws_credentials_provider_get_credentials(provider, s_get_credentials_callback, NULL); s_aws_wait_for_credentials_result(); ASSERT_SUCCESS(s_verify_credentials(s_tester.credentials)); const char *expected_method = "POST"; ASSERT_BIN_ARRAYS_EQUALS( expected_method, strlen(expected_method), s_tester.mocked_requests[0].method.buffer, s_tester.mocked_requests[0].method.len); const char *expected_path = "/"; ASSERT_BIN_ARRAYS_EQUALS( expected_path, strlen(expected_path), s_tester.mocked_requests[0].path.buffer, s_tester.mocked_requests[0].path.len); ASSERT_TRUE(s_tester.mocked_requests[0].had_auth_header); const char *expected_host_header = "sts.amazonaws.com"; ASSERT_BIN_ARRAYS_EQUALS( expected_host_header, strlen(expected_host_header), s_tester.mocked_requests[0].host_header.buffer, s_tester.mocked_requests[0].host_header.len); ASSERT_BIN_ARRAYS_EQUALS( s_expected_payload.ptr, s_expected_payload.len, s_tester.mocked_requests[0].body.buffer, s_tester.mocked_requests[0].body.len); aws_credentials_provider_release(provider); ASSERT_SUCCESS(s_aws_sts_tester_cleanup()); return AWS_OP_SUCCESS; } AWS_TEST_CASE( credentials_provider_sts_from_profile_config_environment_succeeds, s_credentials_provider_sts_from_profile_config_environment_succeeds_fn) #define HIGH_RES_BASE_TIME_NS 101000000000ULL /* * In this test, we set up a cached provider with a longer and out-of-sync refresh period than the sts * provider that it wraps. We verify that the cached provider factors in the shorter-lived sts credentials * properly and refreshes when the credentials expire and not when the cache would expire. */ static int s_credentials_provider_sts_cache_expiration_conflict(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_sts_tester_init(allocator); struct aws_credentials_provider_static_options static_options = { .access_key_id = s_access_key_cur, .secret_access_key = s_secret_key_cur, .session_token = s_session_token_cur, }; struct aws_credentials_provider *static_provider = aws_credentials_provider_new_static(allocator, &static_options); struct aws_credentials_provider_sts_options options = { .creds_provider = static_provider, .bootstrap = s_tester.bootstrap, .tls_ctx = s_tester.tls_ctx, .role_arn = s_role_arn_cur, .session_name = s_session_name_cur, .duration_seconds = 0, .function_table = &s_mock_function_table, .system_clock_fn = mock_aws_get_system_time, }; /* make sure high res time and system time are sufficiently diverged that a mistake in the * respective calculations would fail the test */ mock_aws_set_system_time(0); mock_aws_set_high_res_time(HIGH_RES_BASE_TIME_NS); s_tester.mock_body = aws_byte_buf_from_c_str(success_creds_doc); s_tester.mock_response_code = 200; struct aws_credentials_provider *sts_provider = aws_credentials_provider_new_sts(allocator, &options); struct aws_credentials_provider_cached_options cached_options = { .system_clock_fn = mock_aws_get_system_time, .high_res_clock_fn = mock_aws_get_high_res_time, .refresh_time_in_milliseconds = 1200 * 1000, .source = sts_provider, }; struct aws_credentials_provider *cached_provider = aws_credentials_provider_new_cached(allocator, &cached_options); aws_credentials_provider_get_credentials(cached_provider, s_get_credentials_callback, NULL); s_aws_wait_for_credentials_result(); ASSERT_SUCCESS(s_verify_credentials(s_tester.credentials)); ASSERT_TRUE(aws_credentials_get_expiration_timepoint_seconds(s_tester.credentials) == 900); const char *expected_method = "POST"; ASSERT_BIN_ARRAYS_EQUALS( expected_method, strlen(expected_method), s_tester.mocked_requests[0].method.buffer, s_tester.mocked_requests[0].method.len); const char *expected_path = "/"; ASSERT_BIN_ARRAYS_EQUALS( expected_path, strlen(expected_path), s_tester.mocked_requests[0].path.buffer, s_tester.mocked_requests[0].path.len); ASSERT_TRUE(s_tester.mocked_requests[0].had_auth_header); const char *expected_host_header = "sts.amazonaws.com"; ASSERT_BIN_ARRAYS_EQUALS( expected_host_header, strlen(expected_host_header), s_tester.mocked_requests[0].host_header.buffer, s_tester.mocked_requests[0].host_header.len); ASSERT_BIN_ARRAYS_EQUALS( s_expected_payload.ptr, s_expected_payload.len, s_tester.mocked_requests[0].body.buffer, s_tester.mocked_requests[0].body.len); /* advance each time to a little before expiration, verify we get creds with the same expiration */ uint64_t eight_hundred_seconds_in_ns = aws_timestamp_convert(800, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL); mock_aws_set_system_time(eight_hundred_seconds_in_ns); mock_aws_set_high_res_time(HIGH_RES_BASE_TIME_NS + eight_hundred_seconds_in_ns); s_cleanup_creds_callback_data(); aws_credentials_provider_get_credentials(cached_provider, s_get_credentials_callback, NULL); s_aws_wait_for_credentials_result(); ASSERT_TRUE(aws_credentials_get_expiration_timepoint_seconds(s_tester.credentials) == 900); /* advance each time to after expiration but before cached provider timeout, verify we get new creds */ uint64_t nine_hundred_and_one_seconds_in_ns = aws_timestamp_convert(901, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL); mock_aws_set_system_time(nine_hundred_and_one_seconds_in_ns); mock_aws_set_high_res_time(HIGH_RES_BASE_TIME_NS + nine_hundred_and_one_seconds_in_ns); s_cleanup_creds_callback_data(); aws_credentials_provider_get_credentials(cached_provider, s_get_credentials_callback, NULL); s_aws_wait_for_credentials_result(); ASSERT_TRUE(aws_credentials_get_expiration_timepoint_seconds(s_tester.credentials) == 1801); aws_credentials_provider_release(cached_provider); aws_credentials_provider_release(sts_provider); aws_credentials_provider_release(static_provider); ASSERT_SUCCESS(s_aws_sts_tester_cleanup()); return AWS_OP_SUCCESS; } AWS_TEST_CASE(credentials_provider_sts_cache_expiration_conflict, s_credentials_provider_sts_cache_expiration_conflict) aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/credentials_provider_sts_web_identity_tests.c000066400000000000000000001401601456575232400325710ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include "shared_credentials_test_definitions.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static struct aws_mock_sts_web_identity_tester { struct aws_tls_ctx *tls_ctx; struct aws_byte_buf request_body; struct aws_array_list response_data_callbacks; bool is_connection_acquire_successful; bool is_request_successful; struct aws_mutex lock; struct aws_condition_variable signal; struct aws_credentials *credentials; bool has_received_credentials_callback; bool has_received_shutdown_callback; int attempts; int response_code; int error_code; } s_tester; static void s_on_shutdown_complete(void *user_data) { (void)user_data; aws_mutex_lock(&s_tester.lock); s_tester.has_received_shutdown_callback = true; aws_mutex_unlock(&s_tester.lock); aws_condition_variable_notify_one(&s_tester.signal); } static bool s_has_tester_received_shutdown_callback(void *user_data) { (void)user_data; return s_tester.has_received_shutdown_callback; } static void s_aws_wait_for_provider_shutdown_callback(void) { aws_mutex_lock(&s_tester.lock); aws_condition_variable_wait_pred(&s_tester.signal, &s_tester.lock, s_has_tester_received_shutdown_callback, NULL); aws_mutex_unlock(&s_tester.lock); } struct mock_connection_manager { struct aws_allocator *allocator; aws_http_connection_manager_shutdown_complete_fn *shutdown_complete_callback; void *shutdown_complete_user_data; }; static struct aws_http_connection_manager *s_aws_http_connection_manager_new_mock( struct aws_allocator *allocator, const struct aws_http_connection_manager_options *options) { struct mock_connection_manager *mock_manager = aws_mem_calloc(allocator, 1, sizeof(struct mock_connection_manager)); mock_manager->allocator = allocator; mock_manager->shutdown_complete_callback = options->shutdown_complete_callback; mock_manager->shutdown_complete_user_data = options->shutdown_complete_user_data; return (struct aws_http_connection_manager *)mock_manager; } static void s_aws_http_connection_manager_release_mock(struct aws_http_connection_manager *manager) { struct mock_connection_manager *mock_manager = (struct mock_connection_manager *)manager; mock_manager->shutdown_complete_callback(mock_manager->shutdown_complete_user_data); aws_mem_release(mock_manager->allocator, mock_manager); } static void s_aws_http_connection_manager_acquire_connection_mock( struct aws_http_connection_manager *manager, aws_http_connection_manager_on_connection_setup_fn *callback, void *user_data) { (void)manager; (void)callback; (void)user_data; if (s_tester.is_connection_acquire_successful) { callback((struct aws_http_connection *)1, AWS_OP_SUCCESS, user_data); } else { aws_raise_error(AWS_ERROR_HTTP_UNKNOWN); callback(NULL, AWS_OP_ERR, user_data); } } static int s_aws_http_connection_manager_release_connection_mock( struct aws_http_connection_manager *manager, struct aws_http_connection *connection) { (void)manager; (void)connection; return AWS_OP_SUCCESS; } static void s_invoke_mock_request_callbacks( const struct aws_http_make_request_options *options, struct aws_array_list *data_callbacks, bool is_request_successful) { size_t data_callback_count = aws_array_list_length(data_callbacks); struct aws_http_header headers[1]; AWS_ZERO_ARRAY(headers); headers[0].name = aws_byte_cursor_from_c_str("some-header"); headers[0].value = aws_byte_cursor_from_c_str("value"); options->on_response_headers( (struct aws_http_stream *)1, AWS_HTTP_HEADER_BLOCK_MAIN, headers, 1, options->user_data); if (options->on_response_header_block_done) { options->on_response_header_block_done( (struct aws_http_stream *)1, data_callback_count > 0, options->user_data); } for (size_t i = 0; i < data_callback_count; ++i) { struct aws_byte_cursor data_callback_cursor; if (aws_array_list_get_at(data_callbacks, &data_callback_cursor, i)) { continue; } options->on_response_body((struct aws_http_stream *)1, &data_callback_cursor, options->user_data); } options->on_complete( (struct aws_http_stream *)1, is_request_successful ? AWS_ERROR_SUCCESS : AWS_ERROR_HTTP_UNKNOWN, options->user_data); } static struct aws_http_stream *s_aws_http_connection_make_request_mock( struct aws_http_connection *client_connection, const struct aws_http_make_request_options *options) { (void)client_connection; (void)options; struct aws_byte_cursor path; AWS_ZERO_STRUCT(path); struct aws_input_stream *body_stream = aws_http_message_get_body_stream(options->request); struct aws_allocator *allocator = s_tester.request_body.allocator; aws_byte_buf_clean_up(&s_tester.request_body); aws_byte_buf_init(&s_tester.request_body, allocator, 256); aws_input_stream_read(body_stream, &s_tester.request_body); s_invoke_mock_request_callbacks(options, &s_tester.response_data_callbacks, s_tester.is_request_successful); s_tester.attempts++; return (struct aws_http_stream *)1; } static int s_aws_http_stream_activate_mock(struct aws_http_stream *stream) { (void)stream; return AWS_OP_SUCCESS; } static int s_aws_http_stream_get_incoming_response_status_mock( const struct aws_http_stream *stream, int *out_status_code) { (void)stream; if (s_tester.response_code) { *out_status_code = s_tester.response_code; } else { *out_status_code = AWS_HTTP_STATUS_CODE_200_OK; } return AWS_OP_SUCCESS; } static void s_aws_http_stream_release_mock(struct aws_http_stream *stream) { (void)stream; } static void s_aws_http_connection_close_mock(struct aws_http_connection *connection) { (void)connection; } static struct aws_http_connection *s_aws_http_stream_get_connection_mock(const struct aws_http_stream *stream) { (void)stream; return (struct aws_http_connection *)1; } static struct aws_auth_http_system_vtable s_mock_function_table = { .aws_http_connection_manager_new = s_aws_http_connection_manager_new_mock, .aws_http_connection_manager_release = s_aws_http_connection_manager_release_mock, .aws_http_connection_manager_acquire_connection = s_aws_http_connection_manager_acquire_connection_mock, .aws_http_connection_manager_release_connection = s_aws_http_connection_manager_release_connection_mock, .aws_http_connection_make_request = s_aws_http_connection_make_request_mock, .aws_http_stream_activate = s_aws_http_stream_activate_mock, .aws_http_stream_get_connection = s_aws_http_stream_get_connection_mock, .aws_http_stream_get_incoming_response_status = s_aws_http_stream_get_incoming_response_status_mock, .aws_http_stream_release = s_aws_http_stream_release_mock, .aws_http_connection_close = s_aws_http_connection_close_mock}; AWS_STATIC_STRING_FROM_LITERAL(s_sts_web_identity_foo_profile, "foo"); AWS_STATIC_STRING_FROM_LITERAL(s_sts_web_identity_region_env, "AWS_DEFAULT_REGION"); AWS_STATIC_STRING_FROM_LITERAL(s_sts_web_identity_role_arn_env, "AWS_ROLE_ARN"); AWS_STATIC_STRING_FROM_LITERAL(s_sts_web_identity_role_session_name_env, "AWS_ROLE_SESSION_NAME"); AWS_STATIC_STRING_FROM_LITERAL(s_sts_web_identity_token_file_path_env, "AWS_WEB_IDENTITY_TOKEN_FILE"); AWS_STATIC_STRING_FROM_LITERAL(s_sts_web_identity_token_contents, "my-test-token-contents-123-abc-xyz"); static int s_aws_sts_web_identity_test_unset_env_parameters(void) { ASSERT_TRUE(aws_unset_environment_value(s_sts_web_identity_region_env) == AWS_OP_SUCCESS); ASSERT_TRUE(aws_unset_environment_value(s_sts_web_identity_role_arn_env) == AWS_OP_SUCCESS); ASSERT_TRUE(aws_unset_environment_value(s_sts_web_identity_role_session_name_env) == AWS_OP_SUCCESS); ASSERT_TRUE(aws_unset_environment_value(s_sts_web_identity_token_file_path_env) == AWS_OP_SUCCESS); return AWS_OP_SUCCESS; } static int s_aws_sts_web_identity_test_init_env_parameters( struct aws_allocator *allocator, const char *region, const char *role_arn, const char *role_session_name, const char *web_identity_token_file) { struct aws_string *region_str = aws_string_new_from_c_str(allocator, region); ASSERT_TRUE(region_str != NULL); ASSERT_TRUE(aws_set_environment_value(s_sts_web_identity_region_env, region_str) == AWS_OP_SUCCESS); aws_string_destroy(region_str); struct aws_string *role_arn_str = aws_string_new_from_c_str(allocator, role_arn); ASSERT_TRUE(role_arn_str != NULL); ASSERT_TRUE(aws_set_environment_value(s_sts_web_identity_role_arn_env, role_arn_str) == AWS_OP_SUCCESS); aws_string_destroy(role_arn_str); struct aws_string *role_session_name_str = aws_string_new_from_c_str(allocator, role_session_name); ASSERT_TRUE(role_session_name_str != NULL); ASSERT_TRUE( aws_set_environment_value(s_sts_web_identity_role_session_name_env, role_session_name_str) == AWS_OP_SUCCESS); aws_string_destroy(role_session_name_str); struct aws_string *web_identity_token_file_str = aws_string_new_from_c_str(allocator, web_identity_token_file); ASSERT_TRUE(web_identity_token_file_str != NULL); ASSERT_TRUE( aws_set_environment_value(s_sts_web_identity_token_file_path_env, web_identity_token_file_str) == AWS_OP_SUCCESS); aws_string_destroy(web_identity_token_file_str); return AWS_OP_SUCCESS; } static int s_aws_sts_web_identity_test_init_config_profile( struct aws_allocator *allocator, const struct aws_string *config_contents) { struct aws_string *config_file_path_str = aws_create_process_unique_file_name(allocator); ASSERT_TRUE(config_file_path_str != NULL); ASSERT_TRUE(aws_create_profile_file(config_file_path_str, config_contents) == AWS_OP_SUCCESS); ASSERT_TRUE( aws_set_environment_value(s_default_config_path_env_variable_name, config_file_path_str) == AWS_OP_SUCCESS); ASSERT_TRUE( aws_set_environment_value(s_default_profile_env_variable_name, s_sts_web_identity_foo_profile) == AWS_OP_SUCCESS); aws_string_destroy(config_file_path_str); return AWS_OP_SUCCESS; } static int s_aws_sts_web_identity_tester_init(struct aws_allocator *allocator) { aws_auth_library_init(allocator); struct aws_tls_ctx_options tls_options; aws_tls_ctx_options_init_default_client(&tls_options, allocator); s_tester.tls_ctx = aws_tls_client_ctx_new(allocator, &tls_options); ASSERT_NOT_NULL(s_tester.tls_ctx); if (aws_array_list_init_dynamic(&s_tester.response_data_callbacks, allocator, 10, sizeof(struct aws_byte_cursor))) { return AWS_OP_ERR; } if (aws_byte_buf_init(&s_tester.request_body, allocator, 256)) { return AWS_OP_ERR; } if (aws_mutex_init(&s_tester.lock)) { return AWS_OP_ERR; } if (aws_condition_variable_init(&s_tester.signal)) { return AWS_OP_ERR; } /* default to everything successful */ s_tester.is_connection_acquire_successful = true; s_tester.is_request_successful = true; return AWS_OP_SUCCESS; } static void s_aws_sts_web_identity_tester_cleanup(void) { aws_tls_ctx_release(s_tester.tls_ctx); aws_array_list_clean_up(&s_tester.response_data_callbacks); aws_byte_buf_clean_up(&s_tester.request_body); aws_condition_variable_clean_up(&s_tester.signal); aws_mutex_clean_up(&s_tester.lock); aws_credentials_release(s_tester.credentials); aws_auth_library_clean_up(); } static bool s_has_tester_received_credentials_callback(void *user_data) { (void)user_data; return s_tester.has_received_credentials_callback; } static void s_aws_wait_for_credentials_result(void) { aws_mutex_lock(&s_tester.lock); aws_condition_variable_wait_pred( &s_tester.signal, &s_tester.lock, s_has_tester_received_credentials_callback, NULL); aws_mutex_unlock(&s_tester.lock); } static void s_get_credentials_callback(struct aws_credentials *credentials, int error_code, void *user_data) { (void)user_data; aws_mutex_lock(&s_tester.lock); s_tester.has_received_credentials_callback = true; s_tester.credentials = credentials; s_tester.error_code = error_code; if (credentials != NULL) { aws_credentials_acquire(credentials); } aws_condition_variable_notify_one(&s_tester.signal); aws_mutex_unlock(&s_tester.lock); } static int s_credentials_provider_sts_web_identity_new_destroy_from_parameters( struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_sts_web_identity_tester_init(allocator); s_aws_sts_web_identity_test_unset_env_parameters(); struct aws_string *token_file_path_str = aws_create_process_unique_file_name(allocator); ASSERT_TRUE(token_file_path_str != NULL); ASSERT_TRUE(aws_create_profile_file(token_file_path_str, s_sts_web_identity_token_contents) == AWS_OP_SUCCESS); struct aws_credentials_provider_sts_web_identity_options options = { .bootstrap = NULL, .tls_ctx = s_tester.tls_ctx, .function_table = &s_mock_function_table, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, .region = aws_byte_cursor_from_c_str("us-east-1"), .role_arn = aws_byte_cursor_from_c_str("arn:aws:iam::1234567890:role/test-arn"), .role_session_name = aws_byte_cursor_from_c_str("9876543210"), .token_file_path = aws_byte_cursor_from_string(token_file_path_str), }; struct aws_credentials_provider *provider = aws_credentials_provider_new_sts_web_identity(allocator, &options); ASSERT_NOT_NULL(provider); aws_string_destroy(token_file_path_str); aws_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); s_aws_sts_web_identity_tester_cleanup(); return 0; } AWS_TEST_CASE( credentials_provider_sts_web_identity_new_destroy_from_parameters, s_credentials_provider_sts_web_identity_new_destroy_from_parameters); static int s_credentials_provider_sts_web_identity_new_destroy_from_env(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_sts_web_identity_tester_init(allocator); s_aws_sts_web_identity_test_unset_env_parameters(); struct aws_string *token_file_path_str = aws_create_process_unique_file_name(allocator); ASSERT_TRUE(token_file_path_str != NULL); ASSERT_TRUE(aws_create_profile_file(token_file_path_str, s_sts_web_identity_token_contents) == AWS_OP_SUCCESS); s_aws_sts_web_identity_test_init_env_parameters( allocator, "us-east-1", "arn:aws:iam::1234567890:role/test-arn", "9876543210", aws_string_c_str(token_file_path_str)); aws_string_destroy(token_file_path_str); struct aws_credentials_provider_sts_web_identity_options options = { .bootstrap = NULL, .tls_ctx = s_tester.tls_ctx, .function_table = &s_mock_function_table, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_sts_web_identity(allocator, &options); aws_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); s_aws_sts_web_identity_tester_cleanup(); return 0; } AWS_TEST_CASE( credentials_provider_sts_web_identity_new_destroy_from_env, s_credentials_provider_sts_web_identity_new_destroy_from_env); AWS_STATIC_STRING_FROM_LITERAL( s_sts_web_identity_config_file_contents, "[profile default]\n" "region=us-east-1\n" "role_arn=arn:aws:iam::1111111111:role/test-arn\n" "role_session_name=2222222222\n" "web_identity_token_file=/some/unreachable/path/toklen_file\n" "[profile foo]\n" "region=us-west-2\n" "role_arn=arn:aws:iam::3333333333:role/test-arn\n" "role_session_name=4444444444\n" "web_identity_token_file="); static int s_credentials_provider_sts_web_identity_new_destroy_from_config(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_sts_web_identity_tester_init(allocator); s_aws_sts_web_identity_test_unset_env_parameters(); struct aws_string *token_file_path_str = aws_create_process_unique_file_name(allocator); ASSERT_TRUE(token_file_path_str != NULL); ASSERT_TRUE(aws_create_profile_file(token_file_path_str, s_sts_web_identity_token_contents) == AWS_OP_SUCCESS); struct aws_byte_buf content_buf; struct aws_byte_buf existing_content = aws_byte_buf_from_c_str(aws_string_c_str(s_sts_web_identity_config_file_contents)); aws_byte_buf_init_copy(&content_buf, allocator, &existing_content); struct aws_byte_cursor cursor = aws_byte_cursor_from_string(token_file_path_str); ASSERT_TRUE(aws_byte_buf_append_dynamic(&content_buf, &cursor) == AWS_OP_SUCCESS); cursor = aws_byte_cursor_from_c_str("\n"); ASSERT_TRUE(aws_byte_buf_append_dynamic(&content_buf, &cursor) == AWS_OP_SUCCESS); aws_string_destroy(token_file_path_str); struct aws_string *config_file_contents = aws_string_new_from_array(allocator, content_buf.buffer, content_buf.len); ASSERT_TRUE(config_file_contents != NULL); aws_byte_buf_clean_up(&content_buf); s_aws_sts_web_identity_test_init_config_profile(allocator, config_file_contents); aws_string_destroy(config_file_contents); struct aws_credentials_provider_sts_web_identity_options options = { .bootstrap = NULL, .tls_ctx = s_tester.tls_ctx, .function_table = &s_mock_function_table, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_sts_web_identity(allocator, &options); ASSERT_NOT_NULL(provider); aws_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); s_aws_sts_web_identity_tester_cleanup(); return 0; } AWS_TEST_CASE( credentials_provider_sts_web_identity_new_destroy_from_config, s_credentials_provider_sts_web_identity_new_destroy_from_config); static int s_credentials_provider_sts_web_identity_new_destroy_from_cached_config( struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_sts_web_identity_tester_init(allocator); s_aws_sts_web_identity_test_unset_env_parameters(); /* create a config file */ struct aws_string *token_file_path_str = aws_create_process_unique_file_name(allocator); ASSERT_TRUE(token_file_path_str != NULL); ASSERT_TRUE(aws_create_profile_file(token_file_path_str, s_sts_web_identity_token_contents) == AWS_OP_SUCCESS); struct aws_byte_buf content_buf; struct aws_byte_buf existing_content = aws_byte_buf_from_c_str(aws_string_c_str(s_sts_web_identity_config_file_contents)); aws_byte_buf_init_copy(&content_buf, allocator, &existing_content); struct aws_byte_cursor cursor = aws_byte_cursor_from_string(token_file_path_str); ASSERT_TRUE(aws_byte_buf_append_dynamic(&content_buf, &cursor) == AWS_OP_SUCCESS); cursor = aws_byte_cursor_from_c_str("\n"); ASSERT_TRUE(aws_byte_buf_append_dynamic(&content_buf, &cursor) == AWS_OP_SUCCESS); aws_string_destroy(token_file_path_str); struct aws_string *config_file_contents = aws_string_new_from_array(allocator, content_buf.buffer, content_buf.len); ASSERT_TRUE(config_file_contents != NULL); aws_byte_buf_clean_up(&content_buf); s_aws_sts_web_identity_test_init_config_profile(allocator, config_file_contents); aws_string_destroy(config_file_contents); struct aws_credentials_provider_sts_web_identity_options options = { .bootstrap = NULL, .tls_ctx = s_tester.tls_ctx, .function_table = &s_mock_function_table, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, }; /* read the config files */ struct aws_profile_collection *config_profiles = NULL; struct aws_string *config_file_path = NULL; ASSERT_TRUE( aws_get_environment_value(allocator, s_default_config_path_env_variable_name, &config_file_path) == AWS_OP_SUCCESS); config_profiles = aws_profile_collection_new_from_file(allocator, config_file_path, AWS_PST_CONFIG); ASSERT_NOT_NULL(config_profiles); options.config_profile_collection_cached = config_profiles; /* unset environment and config file*/ struct aws_string *empty_content = aws_string_new_from_c_str(allocator, ""); ASSERT_TRUE(empty_content != NULL); s_aws_sts_web_identity_test_init_config_profile(allocator, empty_content); aws_string_destroy(empty_content); s_aws_sts_web_identity_test_unset_env_parameters(); ASSERT_TRUE(aws_unset_environment_value(s_default_profile_env_variable_name) == AWS_OP_SUCCESS); /* assert we can create sts web identity from cached config file */ struct aws_credentials_provider *provider = aws_credentials_provider_new_sts_web_identity(allocator, &options); ASSERT_NOT_NULL(provider); aws_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); s_aws_sts_web_identity_tester_cleanup(); aws_string_destroy(config_file_path); aws_profile_collection_release(config_profiles); return 0; } AWS_TEST_CASE( credentials_provider_sts_web_identity_new_destroy_from_cached_config, s_credentials_provider_sts_web_identity_new_destroy_from_cached_config); static int s_credentials_provider_sts_web_identity_new_failed_without_env_and_config( struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_sts_web_identity_tester_init(allocator); struct aws_string *empty_content = aws_string_new_from_c_str(allocator, ""); ASSERT_TRUE(empty_content != NULL); s_aws_sts_web_identity_test_init_config_profile(allocator, empty_content); aws_string_destroy(empty_content); s_aws_sts_web_identity_test_unset_env_parameters(); ASSERT_TRUE(aws_unset_environment_value(s_default_profile_env_variable_name) == AWS_OP_SUCCESS); struct aws_credentials_provider_sts_web_identity_options options = { .bootstrap = NULL, .tls_ctx = s_tester.tls_ctx, .function_table = &s_mock_function_table, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_sts_web_identity(allocator, &options); ASSERT_TRUE(provider == NULL); s_aws_sts_web_identity_tester_cleanup(); return 0; } AWS_TEST_CASE( credentials_provider_sts_web_identity_new_failed_without_env_and_config, s_credentials_provider_sts_web_identity_new_failed_without_env_and_config); AWS_STATIC_STRING_FROM_LITERAL( s_expected_sts_web_identity_body_message, "Action=AssumeRoleWithWebIdentity&Version=2011-06-15" "&RoleArn=arn%3Aaws%3Aiam%3A%3A1234567890%3Arole%2Ftest-arn&RoleSessionName=9876543210&WebIdentityToken=my-test-" "token-contents-123-abc-xyz"); AWS_STATIC_STRING_FROM_LITERAL( s_expected_sts_web_identity_body_message_config, "Action=AssumeRoleWithWebIdentity&Version=2011-06-15" "&RoleArn=arn%3Aaws%3Aiam%3A%3A3333333333%3Arole%2Ftest-arn&RoleSessionName=4444444444&WebIdentityToken=my-test-" "token-contents-123-abc-xyz"); AWS_STATIC_STRING_FROM_LITERAL( s_good_response, "" " " " " " arn:aws:sts::123456789012:assumed-role/FederatedWebIdentityRole/app1" " AROACLKWSDQRAOEXAMPLE:app1" " " " " " TokenSuccess" " SuccessfulSecret" " 2020-02-25T06:03:31Z" " SuccessfulAccessKey" " " " www.amazon.com" " " " " " ad4156e9-bce1-11e2-82e6-6b6efEXAMPLE" " " ""); AWS_STATIC_STRING_FROM_LITERAL(s_good_access_key_id, "SuccessfulAccessKey"); AWS_STATIC_STRING_FROM_LITERAL(s_good_secret_access_key, "SuccessfulSecret"); AWS_STATIC_STRING_FROM_LITERAL(s_good_session_token, "TokenSuccess"); AWS_STATIC_STRING_FROM_LITERAL(s_good_response_expiration, "2020-02-25T06:03:31Z"); static int s_verify_credentials(bool request_made, bool from_config, bool got_credentials, int expected_attempts) { if (request_made) { if (from_config) { ASSERT_CURSOR_VALUE_STRING_EQUALS( aws_byte_cursor_from_buf(&s_tester.request_body), s_expected_sts_web_identity_body_message_config); } else { ASSERT_CURSOR_VALUE_STRING_EQUALS( aws_byte_cursor_from_buf(&s_tester.request_body), s_expected_sts_web_identity_body_message); } } ASSERT_TRUE(s_tester.has_received_credentials_callback); if (got_credentials) { ASSERT_TRUE(s_tester.credentials != NULL); ASSERT_CURSOR_VALUE_STRING_EQUALS( aws_credentials_get_access_key_id(s_tester.credentials), s_good_access_key_id); ASSERT_CURSOR_VALUE_STRING_EQUALS( aws_credentials_get_secret_access_key(s_tester.credentials), s_good_secret_access_key); ASSERT_CURSOR_VALUE_STRING_EQUALS( aws_credentials_get_session_token(s_tester.credentials), s_good_session_token); } else { ASSERT_TRUE(s_tester.credentials == NULL); } ASSERT_TRUE(s_tester.attempts == expected_attempts); return AWS_OP_SUCCESS; } static int s_credentials_provider_sts_web_identity_connect_failure(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_sts_web_identity_tester_init(allocator); s_tester.is_connection_acquire_successful = false; s_aws_sts_web_identity_test_unset_env_parameters(); struct aws_string *token_file_path_str = aws_create_process_unique_file_name(allocator); ASSERT_TRUE(token_file_path_str != NULL); ASSERT_TRUE(aws_create_profile_file(token_file_path_str, s_sts_web_identity_token_contents) == AWS_OP_SUCCESS); s_aws_sts_web_identity_test_init_env_parameters( allocator, "us-east-1", "arn:aws:iam::1234567890:role/test-arn", "9876543210", aws_string_c_str(token_file_path_str)); aws_string_destroy(token_file_path_str); struct aws_credentials_provider_sts_web_identity_options options = { .bootstrap = NULL, .tls_ctx = s_tester.tls_ctx, .function_table = &s_mock_function_table, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_sts_web_identity(allocator, &options); aws_credentials_provider_get_credentials(provider, s_get_credentials_callback, NULL); s_aws_wait_for_credentials_result(); ASSERT_SUCCESS(s_verify_credentials( false /*no request*/, false /*from config*/, false /*get creds*/, 0 /*expected attempts*/)); aws_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); s_aws_sts_web_identity_tester_cleanup(); return 0; } AWS_TEST_CASE( credentials_provider_sts_web_identity_connect_failure, s_credentials_provider_sts_web_identity_connect_failure); static int s_credentials_provider_sts_web_identity_request_failure(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_sts_web_identity_tester_init(allocator); s_tester.is_request_successful = false; s_aws_sts_web_identity_test_unset_env_parameters(); struct aws_string *token_file_path_str = aws_create_process_unique_file_name(allocator); ASSERT_TRUE(token_file_path_str != NULL); ASSERT_TRUE(aws_create_profile_file(token_file_path_str, s_sts_web_identity_token_contents) == AWS_OP_SUCCESS); s_aws_sts_web_identity_test_init_env_parameters( allocator, "us-east-1", "arn:aws:iam::1234567890:role/test-arn", "9876543210", aws_string_c_str(token_file_path_str)); aws_string_destroy(token_file_path_str); struct aws_credentials_provider_sts_web_identity_options options = { .bootstrap = NULL, .tls_ctx = s_tester.tls_ctx, .function_table = &s_mock_function_table, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_sts_web_identity(allocator, &options); aws_credentials_provider_get_credentials(provider, s_get_credentials_callback, NULL); s_aws_wait_for_credentials_result(); ASSERT_SUCCESS(s_verify_credentials( true /*request made*/, false /*from config*/, false /*get creds*/, 1 /*expected attempts*/)); aws_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); s_aws_sts_web_identity_tester_cleanup(); return 0; } AWS_TEST_CASE( credentials_provider_sts_web_identity_request_failure, s_credentials_provider_sts_web_identity_request_failure); AWS_STATIC_STRING_FROM_LITERAL( s_bad_document_response, "Test"); static int s_credentials_provider_sts_web_identity_bad_document_failure(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_sts_web_identity_tester_init(allocator); s_aws_sts_web_identity_test_unset_env_parameters(); struct aws_string *token_file_path_str = aws_create_process_unique_file_name(allocator); ASSERT_TRUE(token_file_path_str != NULL); ASSERT_TRUE(aws_create_profile_file(token_file_path_str, s_sts_web_identity_token_contents) == AWS_OP_SUCCESS); s_aws_sts_web_identity_test_init_env_parameters( allocator, "us-east-1", "arn:aws:iam::1234567890:role/test-arn", "9876543210", aws_string_c_str(token_file_path_str)); aws_string_destroy(token_file_path_str); struct aws_byte_cursor bad_document_cursor = aws_byte_cursor_from_string(s_bad_document_response); aws_array_list_push_back(&s_tester.response_data_callbacks, &bad_document_cursor); struct aws_credentials_provider_sts_web_identity_options options = { .bootstrap = NULL, .tls_ctx = s_tester.tls_ctx, .function_table = &s_mock_function_table, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_sts_web_identity(allocator, &options); aws_credentials_provider_get_credentials(provider, s_get_credentials_callback, NULL); s_aws_wait_for_credentials_result(); ASSERT_SUCCESS(s_verify_credentials( true /*request made*/, false /*from config*/, false /*get creds*/, 1 /*expected attempts*/)); aws_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); s_aws_sts_web_identity_tester_cleanup(); return 0; } AWS_TEST_CASE( credentials_provider_sts_web_identity_bad_document_failure, s_credentials_provider_sts_web_identity_bad_document_failure); AWS_STATIC_STRING_FROM_LITERAL( s_retryable_error_response_1, "" "IDPCommunicationError" "XXX" "YYY" "4442587FB7D0A2F9" ""); AWS_STATIC_STRING_FROM_LITERAL( s_retryable_error_response_2, "" "InvalidIdentityToken" "XXX" "YYY" "4442587FB7D0A2F9" ""); static int s_credentials_provider_sts_web_identity_test_retry_error1(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_sts_web_identity_tester_init(allocator); s_tester.response_code = AWS_HTTP_STATUS_CODE_400_BAD_REQUEST; s_aws_sts_web_identity_test_unset_env_parameters(); struct aws_string *token_file_path_str = aws_create_process_unique_file_name(allocator); ASSERT_TRUE(token_file_path_str != NULL); ASSERT_TRUE(aws_create_profile_file(token_file_path_str, s_sts_web_identity_token_contents) == AWS_OP_SUCCESS); s_aws_sts_web_identity_test_init_env_parameters( allocator, "us-east-1", "arn:aws:iam::1234567890:role/test-arn", "9876543210", aws_string_c_str(token_file_path_str)); aws_string_destroy(token_file_path_str); struct aws_byte_cursor bad_document_cursor = aws_byte_cursor_from_string(s_retryable_error_response_1); aws_array_list_push_back(&s_tester.response_data_callbacks, &bad_document_cursor); struct aws_credentials_provider_sts_web_identity_options options = { .bootstrap = NULL, .tls_ctx = s_tester.tls_ctx, .function_table = &s_mock_function_table, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_sts_web_identity(allocator, &options); aws_credentials_provider_get_credentials(provider, s_get_credentials_callback, NULL); s_aws_wait_for_credentials_result(); ASSERT_SUCCESS(s_verify_credentials( true /*request made*/, false /*from config*/, false /*get creds*/, 3 /*expected attempts*/)); aws_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); s_aws_sts_web_identity_tester_cleanup(); return 0; } AWS_TEST_CASE( credentials_provider_sts_web_identity_test_retry_error1, s_credentials_provider_sts_web_identity_test_retry_error1); static int s_credentials_provider_sts_web_identity_test_retry_error2(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_sts_web_identity_tester_init(allocator); s_tester.response_code = AWS_HTTP_STATUS_CODE_400_BAD_REQUEST; s_aws_sts_web_identity_test_unset_env_parameters(); struct aws_string *token_file_path_str = aws_create_process_unique_file_name(allocator); ASSERT_TRUE(token_file_path_str != NULL); ASSERT_TRUE(aws_create_profile_file(token_file_path_str, s_sts_web_identity_token_contents) == AWS_OP_SUCCESS); s_aws_sts_web_identity_test_init_env_parameters( allocator, "us-east-1", "arn:aws:iam::1234567890:role/test-arn", "9876543210", aws_string_c_str(token_file_path_str)); aws_string_destroy(token_file_path_str); struct aws_byte_cursor bad_document_cursor = aws_byte_cursor_from_string(s_retryable_error_response_2); aws_array_list_push_back(&s_tester.response_data_callbacks, &bad_document_cursor); struct aws_credentials_provider_sts_web_identity_options options = { .bootstrap = NULL, .tls_ctx = s_tester.tls_ctx, .function_table = &s_mock_function_table, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_sts_web_identity(allocator, &options); aws_credentials_provider_get_credentials(provider, s_get_credentials_callback, NULL); s_aws_wait_for_credentials_result(); ASSERT_SUCCESS(s_verify_credentials( true /*request made*/, false /*from config*/, false /*get creds*/, 3 /*expected attempts*/)); aws_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); s_aws_sts_web_identity_tester_cleanup(); return 0; } AWS_TEST_CASE( credentials_provider_sts_web_identity_test_retry_error2, s_credentials_provider_sts_web_identity_test_retry_error2); static int s_credentials_provider_sts_web_identity_basic_success_env(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_sts_web_identity_tester_init(allocator); s_aws_sts_web_identity_test_unset_env_parameters(); struct aws_string *token_file_path_str = aws_create_process_unique_file_name(allocator); ASSERT_TRUE(token_file_path_str != NULL); ASSERT_TRUE(aws_create_profile_file(token_file_path_str, s_sts_web_identity_token_contents) == AWS_OP_SUCCESS); s_aws_sts_web_identity_test_init_env_parameters( allocator, "us-east-1", "arn:aws:iam::1234567890:role/test-arn", "9876543210", aws_string_c_str(token_file_path_str)); aws_string_destroy(token_file_path_str); struct aws_byte_cursor good_response_cursor = aws_byte_cursor_from_string(s_good_response); aws_array_list_push_back(&s_tester.response_data_callbacks, &good_response_cursor); struct aws_credentials_provider_sts_web_identity_options options = { .bootstrap = NULL, .tls_ctx = s_tester.tls_ctx, .function_table = &s_mock_function_table, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_sts_web_identity(allocator, &options); aws_credentials_provider_get_credentials(provider, s_get_credentials_callback, NULL); s_aws_wait_for_credentials_result(); ASSERT_SUCCESS(s_verify_credentials( true /*request made*/, false /*from config*/, true /*get creds*/, 1 /*expected attempts*/)); struct aws_date_time expiration; struct aws_byte_cursor date_cursor = aws_byte_cursor_from_string(s_good_response_expiration); aws_date_time_init_from_str_cursor(&expiration, &date_cursor, AWS_DATE_FORMAT_ISO_8601); ASSERT_INT_EQUALS( aws_credentials_get_expiration_timepoint_seconds(s_tester.credentials), (uint64_t)expiration.timestamp); aws_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); s_aws_sts_web_identity_tester_cleanup(); return 0; } AWS_TEST_CASE( credentials_provider_sts_web_identity_basic_success_env, s_credentials_provider_sts_web_identity_basic_success_env); static int s_credentials_provider_sts_web_identity_basic_success_config(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_sts_web_identity_tester_init(allocator); s_aws_sts_web_identity_test_unset_env_parameters(); struct aws_string *token_file_path_str = aws_create_process_unique_file_name(allocator); ASSERT_TRUE(token_file_path_str != NULL); ASSERT_TRUE(aws_create_profile_file(token_file_path_str, s_sts_web_identity_token_contents) == AWS_OP_SUCCESS); struct aws_byte_buf content_buf; struct aws_byte_buf existing_content = aws_byte_buf_from_c_str(aws_string_c_str(s_sts_web_identity_config_file_contents)); aws_byte_buf_init_copy(&content_buf, allocator, &existing_content); struct aws_byte_cursor cursor = aws_byte_cursor_from_string(token_file_path_str); ASSERT_TRUE(aws_byte_buf_append_dynamic(&content_buf, &cursor) == AWS_OP_SUCCESS); cursor = aws_byte_cursor_from_c_str("\n"); ASSERT_TRUE(aws_byte_buf_append_dynamic(&content_buf, &cursor) == AWS_OP_SUCCESS); aws_string_destroy(token_file_path_str); struct aws_string *config_file_contents = aws_string_new_from_array(allocator, content_buf.buffer, content_buf.len); ASSERT_TRUE(config_file_contents != NULL); aws_byte_buf_clean_up(&content_buf); s_aws_sts_web_identity_test_init_config_profile(allocator, config_file_contents); aws_string_destroy(config_file_contents); struct aws_byte_cursor good_response_cursor = aws_byte_cursor_from_string(s_good_response); aws_array_list_push_back(&s_tester.response_data_callbacks, &good_response_cursor); struct aws_credentials_provider_sts_web_identity_options options = { .bootstrap = NULL, .tls_ctx = s_tester.tls_ctx, .function_table = &s_mock_function_table, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_sts_web_identity(allocator, &options); aws_credentials_provider_get_credentials(provider, s_get_credentials_callback, NULL); s_aws_wait_for_credentials_result(); ASSERT_SUCCESS( s_verify_credentials(true /*request made*/, true /*from config*/, true /*get creds*/, 1 /*expected attempts*/)); struct aws_date_time expiration; struct aws_byte_cursor date_cursor = aws_byte_cursor_from_string(s_good_response_expiration); aws_date_time_init_from_str_cursor(&expiration, &date_cursor, AWS_DATE_FORMAT_ISO_8601); ASSERT_INT_EQUALS( aws_credentials_get_expiration_timepoint_seconds(s_tester.credentials), (uint64_t)expiration.timestamp); aws_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); s_aws_sts_web_identity_tester_cleanup(); return 0; } AWS_TEST_CASE( credentials_provider_sts_web_identity_basic_success_config, s_credentials_provider_sts_web_identity_basic_success_config); AWS_STATIC_STRING_FROM_LITERAL( s_good_response_first_part, "" " " " " " arn:aws:sts::123456789012:assumed-role/FederatedWebIdentityRole/app1" " AROACLKWSDQRAOEXAMPLE:app1" " " " "); AWS_STATIC_STRING_FROM_LITERAL( s_good_response_second_part, " TokenSuccess" " SuccessfulSecret" " 2020-02-25T06:03:31Z" " SuccessfulAccessKey" " " " www.amazon.com" " " " "); AWS_STATIC_STRING_FROM_LITERAL( s_good_response_third_part, " ad4156e9-bce1-11e2-82e6-6b6efEXAMPLE" " " ""); static int s_credentials_provider_sts_web_identity_success_multi_part_doc(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_sts_web_identity_tester_init(allocator); s_aws_sts_web_identity_test_unset_env_parameters(); struct aws_string *token_file_path_str = aws_create_process_unique_file_name(allocator); ASSERT_TRUE(token_file_path_str != NULL); ASSERT_TRUE(aws_create_profile_file(token_file_path_str, s_sts_web_identity_token_contents) == AWS_OP_SUCCESS); s_aws_sts_web_identity_test_init_env_parameters( allocator, "us-east-1", "arn:aws:iam::1234567890:role/test-arn", "9876543210", aws_string_c_str(token_file_path_str)); aws_string_destroy(token_file_path_str); struct aws_byte_cursor good_response_cursor1 = aws_byte_cursor_from_string(s_good_response_first_part); struct aws_byte_cursor good_response_cursor2 = aws_byte_cursor_from_string(s_good_response_second_part); struct aws_byte_cursor good_response_cursor3 = aws_byte_cursor_from_string(s_good_response_third_part); aws_array_list_push_back(&s_tester.response_data_callbacks, &good_response_cursor1); aws_array_list_push_back(&s_tester.response_data_callbacks, &good_response_cursor2); aws_array_list_push_back(&s_tester.response_data_callbacks, &good_response_cursor3); struct aws_credentials_provider_sts_web_identity_options options = { .bootstrap = NULL, .tls_ctx = s_tester.tls_ctx, .function_table = &s_mock_function_table, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_sts_web_identity(allocator, &options); aws_credentials_provider_get_credentials(provider, s_get_credentials_callback, NULL); s_aws_wait_for_credentials_result(); ASSERT_SUCCESS(s_verify_credentials( true /*request made*/, false /*from config*/, true /*get creds*/, 1 /*expected attempts*/)); struct aws_date_time expiration; struct aws_byte_cursor date_cursor = aws_byte_cursor_from_string(s_good_response_expiration); aws_date_time_init_from_str_cursor(&expiration, &date_cursor, AWS_DATE_FORMAT_ISO_8601); ASSERT_INT_EQUALS( aws_credentials_get_expiration_timepoint_seconds(s_tester.credentials), (uint64_t)expiration.timestamp); aws_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); s_aws_sts_web_identity_tester_cleanup(); return 0; } AWS_TEST_CASE( credentials_provider_sts_web_identity_success_multi_part_doc, s_credentials_provider_sts_web_identity_success_multi_part_doc); static int s_credentials_provider_sts_web_identity_real_new_destroy(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_auth_library_init(allocator); s_aws_sts_web_identity_test_unset_env_parameters(); struct aws_string *token_file_path_str = aws_create_process_unique_file_name(allocator); ASSERT_TRUE(token_file_path_str != NULL); ASSERT_TRUE(aws_create_profile_file(token_file_path_str, s_sts_web_identity_token_contents) == AWS_OP_SUCCESS); s_aws_sts_web_identity_test_init_env_parameters( allocator, "us-east-1", "arn:aws:iam::1234567890:role/test-arn", "9876543210", aws_string_c_str(token_file_path_str)); aws_string_destroy(token_file_path_str); s_aws_sts_web_identity_tester_init(allocator); struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); struct aws_host_resolver_default_options resolver_options = { .el_group = el_group, .max_entries = 8, }; struct aws_host_resolver *resolver = aws_host_resolver_new_default(allocator, &resolver_options); struct aws_client_bootstrap_options bootstrap_options = { .event_loop_group = el_group, .host_resolver = resolver, }; struct aws_client_bootstrap *bootstrap = aws_client_bootstrap_new(allocator, &bootstrap_options); struct aws_credentials_provider_sts_web_identity_options options = { .bootstrap = bootstrap, .tls_ctx = s_tester.tls_ctx, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_sts_web_identity(allocator, &options); aws_credentials_provider_get_credentials(provider, s_get_credentials_callback, NULL); s_aws_wait_for_credentials_result(); aws_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); aws_client_bootstrap_release(bootstrap); aws_host_resolver_release(resolver); aws_event_loop_group_release(el_group); s_aws_sts_web_identity_tester_cleanup(); aws_auth_library_clean_up(); return 0; } AWS_TEST_CASE( credentials_provider_sts_web_identity_real_new_destroy, s_credentials_provider_sts_web_identity_real_new_destroy); aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/credentials_provider_utils.c000066400000000000000000000702521456575232400271340ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "credentials_provider_utils.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include AWS_STATIC_STRING_FROM_LITERAL(s_home_env_var, "HOME"); /* * Support for async get testing */ void aws_get_credentials_test_callback_result_init( struct aws_get_credentials_test_callback_result *result, int required_count) { AWS_ZERO_STRUCT(*result); aws_mutex_init(&result->sync); aws_condition_variable_init(&result->signal); result->required_count = required_count; } void aws_get_credentials_test_callback_result_clean_up(struct aws_get_credentials_test_callback_result *result) { if (result->credentials) { aws_credentials_release(result->credentials); } aws_condition_variable_clean_up(&result->signal); aws_mutex_clean_up(&result->sync); } void aws_test_get_credentials_async_callback(struct aws_credentials *credentials, int error_code, void *user_data) { struct aws_get_credentials_test_callback_result *result = (struct aws_get_credentials_test_callback_result *)user_data; aws_mutex_lock(&result->sync); result->count++; result->last_error = error_code; if (result->credentials != NULL) { aws_credentials_release(result->credentials); } result->credentials = credentials; if (credentials != NULL) { aws_credentials_acquire(credentials); } aws_condition_variable_notify_one(&result->signal); aws_mutex_unlock(&result->sync); } static bool s_sync_credentials_predicate(void *context) { struct aws_get_credentials_test_callback_result *result = (struct aws_get_credentials_test_callback_result *)context; return result->count == result->required_count; } void aws_wait_on_credentials_callback(struct aws_get_credentials_test_callback_result *result) { bool done = false; while (!done) { aws_mutex_lock(&result->sync); aws_condition_variable_wait_pred(&result->signal, &result->sync, s_sync_credentials_predicate, result); done = result->count == result->required_count; aws_mutex_unlock(&result->sync); } } /* * Mock provider */ struct aws_credentials_provider_mock_impl { struct aws_array_list results; size_t next_result; }; static int s_mock_credentials_provider_get_credentials_async( struct aws_credentials_provider *provider, aws_on_get_credentials_callback_fn callback, void *user_data) { struct aws_credentials_provider_mock_impl *impl = (struct aws_credentials_provider_mock_impl *)provider->impl; if (impl->next_result < aws_array_list_length(&impl->results)) { struct get_credentials_mock_result result; if (aws_array_list_get_at(&impl->results, &result, impl->next_result)) { AWS_FATAL_ASSERT(false); } else { callback(result.credentials, result.error_code, user_data); } impl->next_result++; } else { AWS_FATAL_ASSERT(false); } return AWS_OP_SUCCESS; } static void s_mock_credentials_provider_destroy(struct aws_credentials_provider *provider) { struct aws_credentials_provider_mock_impl *impl = (struct aws_credentials_provider_mock_impl *)provider->impl; aws_credentials_provider_invoke_shutdown_callback(provider); aws_array_list_clean_up(&impl->results); aws_mem_release(provider->allocator, provider); } static struct aws_credentials_provider_vtable s_aws_credentials_provider_mock_vtable = { .get_credentials = s_mock_credentials_provider_get_credentials_async, .destroy = s_mock_credentials_provider_destroy, }; struct aws_credentials_provider *aws_credentials_provider_new_mock( struct aws_allocator *allocator, struct get_credentials_mock_result *results, size_t result_count, struct aws_credentials_provider_shutdown_options *shutdown_options) { struct aws_credentials_provider *provider = NULL; struct aws_credentials_provider_mock_impl *impl = NULL; aws_mem_acquire_many( allocator, 2, &provider, sizeof(struct aws_credentials_provider), &impl, sizeof(struct aws_credentials_provider_mock_impl)); if (!provider) { return NULL; } AWS_ZERO_STRUCT(*provider); AWS_ZERO_STRUCT(*impl); if (aws_array_list_init_dynamic( &impl->results, allocator, result_count, sizeof(struct get_credentials_mock_result))) { goto on_init_result_list_failure; } for (size_t i = 0; i < result_count; ++i) { aws_array_list_push_back(&impl->results, results + i); } provider->allocator = allocator; provider->vtable = &s_aws_credentials_provider_mock_vtable; provider->impl = impl; if (shutdown_options) { provider->shutdown_options = *shutdown_options; } aws_atomic_store_int(&provider->ref_count, 1); return provider; on_init_result_list_failure: aws_mem_release(allocator, provider); return NULL; } /* * Mock async provider */ struct aws_credentials_provider_mock_async_impl { struct aws_event_loop_group *event_loop_group; struct aws_mutex sync; struct aws_array_list queries; struct aws_array_list mock_results; size_t next_result; }; static int s_async_mock_credentials_provider_get_credentials_async( struct aws_credentials_provider *provider, aws_on_get_credentials_callback_fn callback, void *user_data) { struct aws_credentials_provider_mock_async_impl *impl = (struct aws_credentials_provider_mock_async_impl *)provider->impl; aws_mutex_lock(&impl->sync); struct aws_credentials_query query; AWS_ZERO_STRUCT(query); aws_credentials_query_init(&query, provider, callback, user_data); aws_array_list_push_back(&impl->queries, &query); aws_mutex_unlock(&impl->sync); return AWS_OP_SUCCESS; } static void s_async_mock_credentials_provider_destroy(struct aws_credentials_provider *provider) { struct aws_credentials_provider_mock_async_impl *impl = (struct aws_credentials_provider_mock_async_impl *)provider->impl; aws_array_list_clean_up(&impl->queries); aws_array_list_clean_up(&impl->mock_results); aws_event_loop_group_release(impl->event_loop_group); aws_mutex_clean_up(&impl->sync); aws_credentials_provider_invoke_shutdown_callback(provider); aws_mem_release(provider->allocator, provider); } static struct aws_credentials_provider_vtable s_aws_credentials_provider_mock_async_vtable = { .get_credentials = s_async_mock_credentials_provider_get_credentials_async, .destroy = s_async_mock_credentials_provider_destroy, }; static void s_async_mock_credentials_provider_fire_callbacks_task( struct aws_task *task, void *arg, enum aws_task_status status) { (void)status; struct aws_credentials_provider *provider = arg; struct aws_credentials_provider_mock_async_impl *impl = provider->impl; aws_mem_release(provider->allocator, task); aws_mutex_lock(&impl->sync); /* * We need to make all of our callbacks outside the lock, in order to avoid deadlock * To make that easier, we keep this array list around and swap the callbacks we need to make into it */ struct aws_array_list temp_queries; AWS_FATAL_ASSERT( aws_array_list_init_dynamic(&temp_queries, impl->queries.alloc, 10, sizeof(struct aws_credentials_query)) == AWS_OP_SUCCESS); struct get_credentials_mock_result result; AWS_ZERO_STRUCT(result); size_t callback_count = aws_array_list_length(&impl->queries); if (callback_count != 0) { size_t result_count = aws_array_list_length(&impl->mock_results); if (impl->next_result >= result_count || aws_array_list_get_at(&impl->mock_results, &result, impl->next_result)) { AWS_FATAL_ASSERT(false); } impl->next_result++; /* * move the callbacks we need to complete into the temporary list so that we can * safely use them outside the lock (we cannot safely use impl->queries outside the lock) */ aws_array_list_swap_contents(&impl->queries, &temp_queries); } aws_mutex_unlock(&impl->sync); /* make the callbacks, not holding the lock */ for (size_t i = 0; i < callback_count; ++i) { struct aws_credentials_query query; AWS_ZERO_STRUCT(query); if (aws_array_list_get_at(&temp_queries, &query, i)) { continue; } AWS_FATAL_ASSERT(query.callback != NULL); query.callback(result.credentials, result.error_code, query.user_data); aws_credentials_query_clean_up(&query); } aws_array_list_clean_up(&temp_queries); aws_credentials_provider_release(provider); } void aws_credentials_provider_mock_async_fire_callbacks(struct aws_credentials_provider *provider) { struct aws_credentials_provider_mock_async_impl *impl = provider->impl; struct aws_task *task = aws_mem_calloc(provider->allocator, 1, sizeof(struct aws_task)); AWS_FATAL_ASSERT(task); aws_task_init( task, s_async_mock_credentials_provider_fire_callbacks_task, provider, "async_mock_credentials_provider_fire_callbacks_task"); /* keep provider alive until task runs */ aws_credentials_provider_acquire(provider); struct aws_event_loop *loop = aws_event_loop_group_get_next_loop(impl->event_loop_group); aws_event_loop_schedule_task_now(loop, task); } struct aws_credentials_provider *aws_credentials_provider_new_mock_async( struct aws_allocator *allocator, struct get_credentials_mock_result *results, size_t result_count, struct aws_event_loop_group *elg, struct aws_credentials_provider_shutdown_options *shutdown_options) { struct aws_credentials_provider *provider = NULL; struct aws_credentials_provider_mock_async_impl *impl = NULL; aws_mem_acquire_many( allocator, 2, &provider, sizeof(struct aws_credentials_provider), &impl, sizeof(struct aws_credentials_provider_mock_async_impl)); if (!provider) { return NULL; } AWS_ZERO_STRUCT(*provider); AWS_ZERO_STRUCT(*impl); if (aws_mutex_init(&impl->sync)) { goto on_lock_init_failure; } if (aws_array_list_init_dynamic(&impl->queries, allocator, 10, sizeof(struct aws_credentials_query))) { goto on_query_list_init_failure; } if (aws_array_list_init_dynamic( &impl->mock_results, allocator, result_count, sizeof(struct get_credentials_mock_result))) { goto on_mock_result_list_init_failure; } for (size_t i = 0; i < result_count; ++i) { aws_array_list_push_back(&impl->mock_results, results + i); } impl->event_loop_group = aws_event_loop_group_acquire(elg); provider->allocator = allocator; provider->vtable = &s_aws_credentials_provider_mock_async_vtable; provider->impl = impl; if (shutdown_options) { provider->shutdown_options = *shutdown_options; } aws_atomic_store_int(&provider->ref_count, 1); return provider; on_mock_result_list_init_failure: aws_array_list_clean_up(&impl->queries); on_query_list_init_failure: aws_mutex_clean_up(&impl->sync); on_lock_init_failure: aws_mem_release(allocator, provider); return NULL; } /* * mock system clock */ static struct aws_mutex system_clock_sync = AWS_MUTEX_INIT; static uint64_t system_clock_time = 0; int mock_aws_get_system_time(uint64_t *current_time) { aws_mutex_lock(&system_clock_sync); *current_time = system_clock_time; aws_mutex_unlock(&system_clock_sync); return AWS_OP_SUCCESS; } void mock_aws_set_system_time(uint64_t current_time) { aws_mutex_lock(&system_clock_sync); system_clock_time = current_time; aws_mutex_unlock(&system_clock_sync); } /* * mock high res clock */ static struct aws_mutex high_res_clock_sync = AWS_MUTEX_INIT; static uint64_t high_res_clock_time = 0; int mock_aws_get_high_res_time(uint64_t *current_time) { aws_mutex_lock(&high_res_clock_sync); *current_time = high_res_clock_time; aws_mutex_unlock(&high_res_clock_sync); return AWS_OP_SUCCESS; } void mock_aws_set_high_res_time(uint64_t current_time) { aws_mutex_lock(&high_res_clock_sync); high_res_clock_time = current_time; aws_mutex_unlock(&high_res_clock_sync); } /* * Null provider impl */ static int s_credentials_provider_null_get_credentials_async( struct aws_credentials_provider *provider, aws_on_get_credentials_callback_fn callback, void *user_data) { (void)provider; callback(NULL, AWS_ERROR_UNKNOWN, user_data); return AWS_OP_SUCCESS; } static void s_credentials_provider_null_destroy(struct aws_credentials_provider *provider) { aws_credentials_provider_invoke_shutdown_callback(provider); aws_mem_release(provider->allocator, provider); } static struct aws_credentials_provider_vtable s_aws_credentials_provider_null_vtable = { .get_credentials = s_credentials_provider_null_get_credentials_async, .destroy = s_credentials_provider_null_destroy, }; struct aws_credentials_provider *aws_credentials_provider_new_null( struct aws_allocator *allocator, struct aws_credentials_provider_shutdown_options *shutdown_options) { struct aws_credentials_provider *provider = (struct aws_credentials_provider *)aws_mem_acquire(allocator, sizeof(struct aws_credentials_provider)); if (provider == NULL) { return NULL; } AWS_ZERO_STRUCT(*provider); provider->allocator = allocator; provider->vtable = &s_aws_credentials_provider_null_vtable; provider->impl = NULL; if (shutdown_options) { provider->shutdown_options = *shutdown_options; } aws_atomic_store_int(&provider->ref_count, 1); return provider; } int aws_create_directory_components(struct aws_allocator *allocator, const struct aws_string *path) { const char local_platform_separator = aws_get_platform_directory_separator(); /* Create directory components and ensure use of platform separator at the same time. */ for (size_t i = 0; i < path->len; ++i) { if (aws_is_any_directory_separator((char)path->bytes[i])) { ((char *)path->bytes)[i] = local_platform_separator; struct aws_string *segment = aws_string_new_from_array(allocator, path->bytes, i); int rc = aws_directory_create(segment); aws_string_destroy(segment); if (rc != AWS_OP_SUCCESS) { return rc; } } } return AWS_OP_SUCCESS; } int aws_create_random_home_directory(struct aws_allocator *allocator, struct aws_string **out_path) { struct aws_byte_buf path_buf; ASSERT_SUCCESS(aws_byte_buf_init(&path_buf, allocator, 256)); struct aws_byte_cursor prefix = aws_byte_cursor_from_c_str("./home-"); ASSERT_SUCCESS(aws_byte_buf_append(&path_buf, &prefix)); struct aws_uuid uuid; ASSERT_SUCCESS(aws_uuid_init(&uuid)); ASSERT_SUCCESS(aws_uuid_to_str(&uuid, &path_buf)); ASSERT_SUCCESS(aws_byte_buf_append_byte_dynamic(&path_buf, '/')); struct aws_string *path_str = aws_string_new_from_buf(allocator, &path_buf); ASSERT_SUCCESS(aws_create_directory_components(allocator, path_str)); ASSERT_SUCCESS(aws_set_environment_value(s_home_env_var, path_str)); aws_byte_buf_clean_up(&path_buf); *out_path = path_str; return AWS_OP_SUCCESS; } /* * Mocked HTTP connection manager for tests */ struct aws_auth_http_system_vtable aws_credentials_provider_http_mock_function_table = { .aws_http_connection_manager_new = aws_credentials_provider_http_mock_connection_manager_new, .aws_http_connection_manager_release = aws_credentials_provider_http_mock_connection_manager_release, .aws_http_connection_manager_acquire_connection = aws_credentials_provider_http_mock_connection_manager_acquire_connection, .aws_http_connection_manager_release_connection = aws_credentials_provider_http_mock_connection_manager_release_connection, .aws_http_connection_make_request = aws_credentials_provider_http_mock_make_request, .aws_http_stream_activate = aws_credentials_provider_http_mock_stream_activate, .aws_http_stream_get_connection = aws_credentials_provider_http_mock_stream_get_connection, .aws_http_stream_get_incoming_response_status = aws_credentials_provider_http_mock_stream_get_incoming_response_status, .aws_http_stream_release = aws_credentials_provider_http_mock_stream_release, .aws_http_connection_close = aws_credentials_provider_http_mock_connection_close}; struct aws_credentials_provider_http_mock_tester credentials_provider_http_mock_tester; int aws_credentials_provider_http_mock_tester_init(struct aws_allocator *allocator) { aws_auth_library_init(allocator); AWS_ZERO_STRUCT(credentials_provider_http_mock_tester); struct aws_tls_ctx_options tls_ctx_options; aws_tls_ctx_options_init_default_client(&tls_ctx_options, allocator); credentials_provider_http_mock_tester.tls_ctx = aws_tls_client_ctx_new(allocator, &tls_ctx_options); ASSERT_NOT_NULL(credentials_provider_http_mock_tester.tls_ctx); credentials_provider_http_mock_tester.el_group = aws_event_loop_group_new_default(allocator, 0, NULL); struct aws_host_resolver_default_options resolver_options = { .el_group = credentials_provider_http_mock_tester.el_group, .max_entries = 8, }; credentials_provider_http_mock_tester.resolver = aws_host_resolver_new_default(allocator, &resolver_options); struct aws_client_bootstrap_options bootstrap_options = { .event_loop_group = credentials_provider_http_mock_tester.el_group, .host_resolver = credentials_provider_http_mock_tester.resolver, }; credentials_provider_http_mock_tester.bootstrap = aws_client_bootstrap_new(allocator, &bootstrap_options); if (aws_array_list_init_dynamic( &credentials_provider_http_mock_tester.response_data_callbacks, allocator, 10, sizeof(struct aws_byte_cursor))) { return AWS_OP_ERR; } if (aws_byte_buf_init(&credentials_provider_http_mock_tester.request_path, allocator, 256)) { return AWS_OP_ERR; } if (aws_byte_buf_init(&credentials_provider_http_mock_tester.request_body, allocator, 256)) { return AWS_OP_ERR; } if (aws_mutex_init(&credentials_provider_http_mock_tester.lock)) { return AWS_OP_ERR; } if (aws_condition_variable_init(&credentials_provider_http_mock_tester.signal)) { return AWS_OP_ERR; } /* default to everything successful */ credentials_provider_http_mock_tester.is_connection_acquire_successful = true; credentials_provider_http_mock_tester.is_request_successful = true; return AWS_OP_SUCCESS; } void aws_credentials_provider_http_mock_tester_cleanup(void) { aws_tls_ctx_release(credentials_provider_http_mock_tester.tls_ctx); aws_client_bootstrap_release(credentials_provider_http_mock_tester.bootstrap); aws_host_resolver_release(credentials_provider_http_mock_tester.resolver); aws_event_loop_group_release(credentials_provider_http_mock_tester.el_group); aws_array_list_clean_up(&credentials_provider_http_mock_tester.response_data_callbacks); aws_byte_buf_clean_up(&credentials_provider_http_mock_tester.request_path); aws_byte_buf_clean_up(&credentials_provider_http_mock_tester.request_body); aws_condition_variable_clean_up(&credentials_provider_http_mock_tester.signal); aws_mutex_clean_up(&credentials_provider_http_mock_tester.lock); aws_credentials_release(credentials_provider_http_mock_tester.credentials); aws_auth_library_clean_up(); } void aws_credentials_provider_http_mock_on_shutdown_complete(void *user_data) { (void)user_data; aws_mutex_lock(&credentials_provider_http_mock_tester.lock); credentials_provider_http_mock_tester.has_received_shutdown_callback = true; aws_mutex_unlock(&credentials_provider_http_mock_tester.lock); aws_condition_variable_notify_one(&credentials_provider_http_mock_tester.signal); } bool aws_credentials_provider_http_mock_has_received_shutdown_callback(void *user_data) { (void)user_data; return credentials_provider_http_mock_tester.has_received_shutdown_callback; } void aws_credentials_provider_http_mock_wait_for_shutdown_callback(void) { aws_mutex_lock(&credentials_provider_http_mock_tester.lock); aws_condition_variable_wait_pred( &credentials_provider_http_mock_tester.signal, &credentials_provider_http_mock_tester.lock, aws_credentials_provider_http_mock_has_received_shutdown_callback, NULL); aws_mutex_unlock(&credentials_provider_http_mock_tester.lock); } struct mock_connection_manager { struct aws_allocator *allocator; aws_http_connection_manager_shutdown_complete_fn *shutdown_complete_callback; void *shutdown_complete_user_data; }; struct aws_http_connection_manager *aws_credentials_provider_http_mock_connection_manager_new( struct aws_allocator *allocator, const struct aws_http_connection_manager_options *options) { struct mock_connection_manager *mock_manager = aws_mem_calloc(allocator, 1, sizeof(struct mock_connection_manager)); mock_manager->allocator = allocator; mock_manager->shutdown_complete_callback = options->shutdown_complete_callback; mock_manager->shutdown_complete_user_data = options->shutdown_complete_user_data; return (struct aws_http_connection_manager *)mock_manager; } void aws_credentials_provider_http_mock_connection_manager_release(struct aws_http_connection_manager *manager) { struct mock_connection_manager *mock_manager = (struct mock_connection_manager *)manager; mock_manager->shutdown_complete_callback(mock_manager->shutdown_complete_user_data); aws_mem_release(mock_manager->allocator, mock_manager); } void aws_credentials_provider_http_mock_connection_manager_acquire_connection( struct aws_http_connection_manager *manager, aws_http_connection_manager_on_connection_setup_fn *callback, void *user_data) { (void)manager; (void)callback; (void)user_data; if (credentials_provider_http_mock_tester.is_connection_acquire_successful) { callback((struct aws_http_connection *)1, AWS_OP_SUCCESS, user_data); } else { aws_raise_error(AWS_ERROR_HTTP_UNKNOWN); callback(NULL, AWS_OP_ERR, user_data); } } int aws_credentials_provider_http_mock_connection_manager_release_connection( struct aws_http_connection_manager *manager, struct aws_http_connection *connection) { (void)manager; (void)connection; return AWS_OP_SUCCESS; } void aws_credentials_provider_http_mock_invoke_request_callbacks( const struct aws_http_make_request_options *options, struct aws_array_list *data_callbacks, bool is_request_successful) { size_t data_callback_count = aws_array_list_length(data_callbacks); struct aws_http_header headers[1]; AWS_ZERO_ARRAY(headers); headers[0].name = aws_byte_cursor_from_c_str("some-header"); headers[0].value = aws_byte_cursor_from_c_str("value"); if (options->on_response_headers) { options->on_response_headers( (struct aws_http_stream *)1, AWS_HTTP_HEADER_BLOCK_MAIN, headers, 1, options->user_data); } if (options->on_response_header_block_done) { options->on_response_header_block_done( (struct aws_http_stream *)1, data_callback_count > 0, options->user_data); } for (size_t i = 0; i < data_callback_count; ++i) { struct aws_byte_cursor data_callback_cursor; if (aws_array_list_get_at(data_callbacks, &data_callback_cursor, i)) { continue; } options->on_response_body((struct aws_http_stream *)1, &data_callback_cursor, options->user_data); } options->on_complete( (struct aws_http_stream *)1, is_request_successful ? AWS_ERROR_SUCCESS : AWS_ERROR_HTTP_UNKNOWN, options->user_data); } struct aws_http_stream *aws_credentials_provider_http_mock_make_request( struct aws_http_connection *client_connection, const struct aws_http_make_request_options *options) { (void)client_connection; (void)options; struct aws_byte_cursor path; AWS_ZERO_STRUCT(path); struct aws_input_stream *body_stream = aws_http_message_get_body_stream(options->request); struct aws_allocator *allocator = credentials_provider_http_mock_tester.request_body.allocator; aws_byte_buf_clean_up(&credentials_provider_http_mock_tester.request_body); aws_byte_buf_init(&credentials_provider_http_mock_tester.request_body, allocator, 256); if (body_stream) { aws_input_stream_read(body_stream, &credentials_provider_http_mock_tester.request_body); } aws_byte_buf_clean_up(&credentials_provider_http_mock_tester.request_path); struct aws_byte_cursor request_path_cursor; aws_http_message_get_request_path(options->request, &request_path_cursor); aws_byte_buf_init_copy_from_cursor( &credentials_provider_http_mock_tester.request_path, allocator, request_path_cursor); credentials_provider_http_mock_tester.attempts++; credentials_provider_http_mock_tester.request_options = *options; return (struct aws_http_stream *)1; } int aws_credentials_provider_http_mock_stream_activate(struct aws_http_stream *stream) { (void)stream; aws_credentials_provider_http_mock_invoke_request_callbacks( &credentials_provider_http_mock_tester.request_options, &credentials_provider_http_mock_tester.response_data_callbacks, credentials_provider_http_mock_tester.is_request_successful); return AWS_OP_SUCCESS; } int aws_credentials_provider_http_mock_stream_get_incoming_response_status( const struct aws_http_stream *stream, int *out_status_code) { (void)stream; if (credentials_provider_http_mock_tester.failure_count) { credentials_provider_http_mock_tester.failure_count--; *out_status_code = credentials_provider_http_mock_tester.failure_response_code; } else if (credentials_provider_http_mock_tester.response_code) { *out_status_code = credentials_provider_http_mock_tester.response_code; } else { *out_status_code = AWS_HTTP_STATUS_CODE_200_OK; } return AWS_OP_SUCCESS; } void aws_credentials_provider_http_mock_stream_release(struct aws_http_stream *stream) { (void)stream; } void aws_credentials_provider_http_mock_connection_close(struct aws_http_connection *connection) { (void)connection; } struct aws_http_connection *aws_credentials_provider_http_mock_stream_get_connection( const struct aws_http_stream *stream) { (void)stream; return (struct aws_http_connection *)1; } bool aws_credentials_provider_http_mock_has_received_credentials_callback(void *user_data) { (void)user_data; return credentials_provider_http_mock_tester.has_received_credentials_callback; } void aws_credentials_provider_http_mock_wait_for_credentials_result(void) { aws_mutex_lock(&credentials_provider_http_mock_tester.lock); aws_condition_variable_wait_pred( &credentials_provider_http_mock_tester.signal, &credentials_provider_http_mock_tester.lock, aws_credentials_provider_http_mock_has_received_credentials_callback, NULL); aws_mutex_unlock(&credentials_provider_http_mock_tester.lock); } void aws_credentials_provider_http_mock_get_credentials_callback( struct aws_credentials *credentials, int error_code, void *user_data) { (void)user_data; aws_mutex_lock(&credentials_provider_http_mock_tester.lock); credentials_provider_http_mock_tester.has_received_credentials_callback = true; credentials_provider_http_mock_tester.credentials = credentials; credentials_provider_http_mock_tester.error_code = error_code; if (credentials != NULL) { aws_credentials_acquire(credentials); } aws_condition_variable_notify_one(&credentials_provider_http_mock_tester.signal); aws_mutex_unlock(&credentials_provider_http_mock_tester.lock); } aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/credentials_provider_utils.h000066400000000000000000000162071456575232400271410ustar00rootroot00000000000000#ifndef AWS_AUTH_CREDENTIALS_PROVIDER_MOCK_H #define AWS_AUTH_CREDENTIALS_PROVIDER_MOCK_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include struct aws_credentials; struct aws_credentials_provider; struct aws_credentials_provider_shutdown_options; struct aws_event_loop_group; struct aws_string; /* * This file contains a number of helper functions and data structures * that let us verify async behavior within the credentials provider. * * It includes multiple provider mocks (one synchronous, one background-thread * based and externally controllable), a synchronizing controller that uses * concurrency primitives to ensure we can perform operations at troublesome * time points (freeze the cached background query so that we can queue up * multiple pending queries, for example), and misc supporting functions like * time function mocks. */ /* * test helper struct to correctly wait on async credentials callbacks */ struct aws_get_credentials_test_callback_result { struct aws_mutex sync; struct aws_condition_variable signal; struct aws_credentials *credentials; int count; int required_count; int last_error; }; void aws_get_credentials_test_callback_result_init( struct aws_get_credentials_test_callback_result *result, int required_count); void aws_get_credentials_test_callback_result_clean_up(struct aws_get_credentials_test_callback_result *result); void aws_wait_on_credentials_callback(struct aws_get_credentials_test_callback_result *result); void aws_test_get_credentials_async_callback(struct aws_credentials *credentials, int error_code, void *user_data); struct get_credentials_mock_result { int error_code; struct aws_credentials *credentials; }; /* * Mock credentials provider, synchronous */ struct aws_credentials_provider *aws_credentials_provider_new_mock( struct aws_allocator *allocator, struct get_credentials_mock_result *results, size_t result_count, struct aws_credentials_provider_shutdown_options *shutdown_options); struct aws_credentials_provider *aws_credentials_provider_new_mock_async( struct aws_allocator *allocator, struct get_credentials_mock_result *results, size_t result_count, struct aws_event_loop_group *elg, struct aws_credentials_provider_shutdown_options *shutdown_options); /* If any pending queries, deliver the next mock-result to all of them from another thread. * If no pending queries, nothing happens. */ void aws_credentials_provider_mock_async_fire_callbacks(struct aws_credentials_provider *provider); /* * Simple global clock mocks */ int mock_aws_get_system_time(uint64_t *current_time); void mock_aws_set_system_time(uint64_t current_time); int mock_aws_get_high_res_time(uint64_t *current_time); void mock_aws_set_high_res_time(uint64_t current_time); /* * Credentials provider that always returns NULL. Useful for chain tests. */ struct aws_credentials_provider *aws_credentials_provider_new_null( struct aws_allocator *allocator, struct aws_credentials_provider_shutdown_options *shutdown_options); /** * Create the directory components of @path: * - if @path ends in a path separator, create every directory component; * - else, stop at the last path separator (parent directory of @path). */ int aws_create_directory_components(struct aws_allocator *allocator, const struct aws_string *path); /** * Create a new directory (under current working dir) and set $HOME env variable. */ int aws_create_random_home_directory(struct aws_allocator *allocator, struct aws_string **out_path); /** * Mocked HTTP connection manager for tests */ struct aws_credentials_provider_http_mock_tester { struct aws_tls_ctx *tls_ctx; struct aws_event_loop_group *el_group; struct aws_host_resolver *resolver; struct aws_client_bootstrap *bootstrap; struct aws_byte_buf request_path; struct aws_byte_buf request_body; struct aws_http_make_request_options request_options; struct aws_array_list response_data_callbacks; bool is_connection_acquire_successful; bool is_request_successful; struct aws_mutex lock; struct aws_condition_variable signal; struct aws_credentials *credentials; bool has_received_credentials_callback; bool has_received_shutdown_callback; int attempts; int response_code; int error_code; int failure_response_code; int failure_count; }; extern struct aws_credentials_provider_http_mock_tester credentials_provider_http_mock_tester; int aws_credentials_provider_http_mock_tester_init(struct aws_allocator *allocator); void aws_credentials_provider_http_mock_tester_cleanup(void); void aws_credentials_provider_http_mock_on_shutdown_complete(void *user_data); bool aws_credentials_provider_http_mock_has_received_shutdown_callback(void *user_data); void aws_credentials_provider_http_mock_wait_for_shutdown_callback(void); struct aws_http_connection_manager *aws_credentials_provider_http_mock_connection_manager_new( struct aws_allocator *allocator, const struct aws_http_connection_manager_options *options); void aws_credentials_provider_http_mock_connection_manager_release(struct aws_http_connection_manager *manager); void aws_credentials_provider_http_mock_connection_manager_acquire_connection( struct aws_http_connection_manager *manager, aws_http_connection_manager_on_connection_setup_fn *callback, void *user_data); int aws_credentials_provider_http_mock_connection_manager_release_connection( struct aws_http_connection_manager *manager, struct aws_http_connection *connection); void aws_credentials_provider_http_mock_invoke_request_callbacks( const struct aws_http_make_request_options *options, struct aws_array_list *data_callbacks, bool is_request_successful); struct aws_http_stream *aws_credentials_provider_http_mock_make_request( struct aws_http_connection *client_connection, const struct aws_http_make_request_options *options); int aws_credentials_provider_http_mock_stream_activate(struct aws_http_stream *stream); int aws_credentials_provider_http_mock_stream_get_incoming_response_status( const struct aws_http_stream *stream, int *out_status_code); void aws_credentials_provider_http_mock_stream_release(struct aws_http_stream *stream); void aws_credentials_provider_http_mock_connection_close(struct aws_http_connection *connection); struct aws_http_connection *aws_credentials_provider_http_mock_stream_get_connection( const struct aws_http_stream *stream); bool aws_credentials_provider_http_mock_has_received_credentials_callback(void *user_data); void aws_credentials_provider_http_mock_wait_for_credentials_result(void); void aws_credentials_provider_http_mock_get_credentials_callback( struct aws_credentials *credentials, int error_code, void *user_data); extern struct aws_auth_http_system_vtable aws_credentials_provider_http_mock_function_table; #endif /* AWS_AUTH_CREDENTIALS_PROVIDER_MOCK_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/credentials_provider_x509_tests.c000066400000000000000000000577121456575232400277310ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct aws_mock_x509_tester { struct aws_byte_buf request_uri; struct aws_array_list response_data_callbacks; bool is_connection_acquire_successful; bool is_request_successful; struct aws_mutex lock; struct aws_condition_variable signal; struct aws_credentials *credentials; bool has_received_credentials_callback; bool has_received_shutdown_callback; int error_code; struct aws_tls_ctx *ctx; struct aws_tls_connection_options tls_connection_options; }; static struct aws_mock_x509_tester s_tester; static void s_on_shutdown_complete(void *user_data) { (void)user_data; aws_mutex_lock(&s_tester.lock); s_tester.has_received_shutdown_callback = true; aws_mutex_unlock(&s_tester.lock); aws_condition_variable_notify_one(&s_tester.signal); } static bool s_has_tester_received_shutdown_callback(void *user_data) { (void)user_data; return s_tester.has_received_shutdown_callback; } static void s_aws_wait_for_provider_shutdown_callback(void) { aws_mutex_lock(&s_tester.lock); aws_condition_variable_wait_pred(&s_tester.signal, &s_tester.lock, s_has_tester_received_shutdown_callback, NULL); aws_mutex_unlock(&s_tester.lock); } static struct aws_http_connection_manager *s_aws_http_connection_manager_new_mock( struct aws_allocator *allocator, const struct aws_http_connection_manager_options *options) { (void)allocator; (void)options; return (struct aws_http_connection_manager *)1; } static void s_aws_http_connection_manager_release_mock(struct aws_http_connection_manager *manager) { (void)manager; s_on_shutdown_complete(NULL); } static void s_aws_http_connection_manager_acquire_connection_mock( struct aws_http_connection_manager *manager, aws_http_connection_manager_on_connection_setup_fn *callback, void *user_data) { (void)manager; (void)callback; (void)user_data; if (s_tester.is_connection_acquire_successful) { callback((struct aws_http_connection *)1, AWS_OP_SUCCESS, user_data); } else { aws_raise_error(AWS_ERROR_HTTP_UNKNOWN); callback(NULL, AWS_OP_ERR, user_data); } } static int s_aws_http_connection_manager_release_connection_mock( struct aws_http_connection_manager *manager, struct aws_http_connection *connection) { (void)manager; (void)connection; return AWS_OP_SUCCESS; } static void s_invoke_mock_request_callbacks( const struct aws_http_make_request_options *options, struct aws_array_list *data_callbacks, bool is_request_successful) { size_t data_callback_count = aws_array_list_length(data_callbacks); struct aws_http_header headers[1]; AWS_ZERO_ARRAY(headers); headers[0].name = aws_byte_cursor_from_c_str("some-header"); headers[0].value = aws_byte_cursor_from_c_str("value"); options->on_response_headers( (struct aws_http_stream *)1, AWS_HTTP_HEADER_BLOCK_MAIN, headers, 1, options->user_data); if (options->on_response_header_block_done) { options->on_response_header_block_done( (struct aws_http_stream *)1, data_callback_count > 0, options->user_data); } for (size_t i = 0; i < data_callback_count; ++i) { struct aws_byte_cursor data_callback_cursor; if (aws_array_list_get_at(data_callbacks, &data_callback_cursor, i)) { continue; } options->on_response_body((struct aws_http_stream *)1, &data_callback_cursor, options->user_data); } options->on_complete( (struct aws_http_stream *)1, is_request_successful ? AWS_ERROR_SUCCESS : AWS_ERROR_HTTP_UNKNOWN, options->user_data); } static struct aws_http_stream *s_aws_http_connection_make_request_mock( struct aws_http_connection *client_connection, const struct aws_http_make_request_options *options) { (void)client_connection; (void)options; struct aws_byte_cursor path; AWS_ZERO_STRUCT(path); aws_http_message_get_request_path(options->request, &path); aws_byte_buf_append_dynamic(&s_tester.request_uri, &path); s_invoke_mock_request_callbacks(options, &s_tester.response_data_callbacks, s_tester.is_request_successful); return (struct aws_http_stream *)1; } static int s_aws_http_stream_activate_mock(struct aws_http_stream *stream) { (void)stream; return AWS_OP_SUCCESS; } static int s_aws_http_stream_get_incoming_response_status_mock( const struct aws_http_stream *stream, int *out_status_code) { (void)stream; *out_status_code = 200; return AWS_OP_SUCCESS; } static void s_aws_http_stream_release_mock(struct aws_http_stream *stream) { (void)stream; } static void s_aws_http_connection_close_mock(struct aws_http_connection *connection) { (void)connection; } static struct aws_auth_http_system_vtable s_mock_function_table = { .aws_http_connection_manager_new = s_aws_http_connection_manager_new_mock, .aws_http_connection_manager_release = s_aws_http_connection_manager_release_mock, .aws_http_connection_manager_acquire_connection = s_aws_http_connection_manager_acquire_connection_mock, .aws_http_connection_manager_release_connection = s_aws_http_connection_manager_release_connection_mock, .aws_http_connection_make_request = s_aws_http_connection_make_request_mock, .aws_http_stream_activate = s_aws_http_stream_activate_mock, .aws_http_stream_get_incoming_response_status = s_aws_http_stream_get_incoming_response_status_mock, .aws_http_stream_release = s_aws_http_stream_release_mock, .aws_http_connection_close = s_aws_http_connection_close_mock}; static int s_aws_x509_tester_init(struct aws_allocator *allocator) { aws_auth_library_init(allocator); if (aws_array_list_init_dynamic(&s_tester.response_data_callbacks, allocator, 10, sizeof(struct aws_byte_cursor))) { return AWS_OP_ERR; } if (aws_byte_buf_init(&s_tester.request_uri, allocator, 100)) { return AWS_OP_ERR; } if (aws_mutex_init(&s_tester.lock)) { return AWS_OP_ERR; } if (aws_condition_variable_init(&s_tester.signal)) { return AWS_OP_ERR; } AWS_ZERO_STRUCT(s_tester.tls_connection_options); struct aws_tls_ctx_options tls_options; aws_tls_ctx_options_init_default_client(&tls_options, allocator); s_tester.ctx = aws_tls_client_ctx_new(allocator, &tls_options); aws_tls_ctx_options_clean_up(&tls_options); aws_tls_connection_options_init_from_ctx(&s_tester.tls_connection_options, s_tester.ctx); /* default to everything successful */ s_tester.is_connection_acquire_successful = true; s_tester.is_request_successful = true; return AWS_OP_SUCCESS; } static void s_aws_x509_tester_cleanup(void) { aws_array_list_clean_up(&s_tester.response_data_callbacks); aws_byte_buf_clean_up(&s_tester.request_uri); aws_condition_variable_clean_up(&s_tester.signal); aws_mutex_clean_up(&s_tester.lock); aws_credentials_release(s_tester.credentials); aws_tls_ctx_release(s_tester.ctx); s_tester.ctx = NULL; aws_tls_connection_options_clean_up(&s_tester.tls_connection_options); aws_auth_library_clean_up(); } static bool s_has_tester_received_credentials_callback(void *user_data) { (void)user_data; return s_tester.has_received_credentials_callback; } static void s_aws_wait_for_credentials_result(void) { aws_mutex_lock(&s_tester.lock); aws_condition_variable_wait_pred( &s_tester.signal, &s_tester.lock, s_has_tester_received_credentials_callback, NULL); aws_mutex_unlock(&s_tester.lock); } static void s_get_credentials_callback(struct aws_credentials *credentials, int error_code, void *user_data) { (void)user_data; aws_mutex_lock(&s_tester.lock); s_tester.has_received_credentials_callback = true; s_tester.error_code = error_code; s_tester.credentials = credentials; aws_credentials_acquire(credentials); aws_condition_variable_notify_one(&s_tester.signal); aws_mutex_unlock(&s_tester.lock); } static int s_credentials_provider_x509_new_destroy(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_x509_tester_init(allocator); struct aws_credentials_provider_x509_options options = { .bootstrap = NULL, .function_table = &s_mock_function_table, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, .endpoint = aws_byte_cursor_from_c_str("c2sakl5huz0afv.credentials.iot.us-east-1.amazonaws.com"), .thing_name = aws_byte_cursor_from_c_str("my_iot_thing_name"), .role_alias = aws_byte_cursor_from_c_str("my_test_role_alias"), .tls_connection_options = &s_tester.tls_connection_options, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_x509(allocator, &options); aws_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); /* Because we mock the http connection manager, we never get a callback back from it */ aws_mem_release(provider->allocator, provider); s_aws_x509_tester_cleanup(); return 0; } AWS_TEST_CASE(credentials_provider_x509_new_destroy, s_credentials_provider_x509_new_destroy); static int s_credentials_provider_x509_connect_failure(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_x509_tester_init(allocator); s_tester.is_connection_acquire_successful = false; struct aws_credentials_provider_x509_options options = { .bootstrap = NULL, .function_table = &s_mock_function_table, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, .endpoint = aws_byte_cursor_from_c_str("c2sakl5huz0afv.credentials.iot.us-east-1.amazonaws.com"), .thing_name = aws_byte_cursor_from_c_str("my_iot_thing_name"), .role_alias = aws_byte_cursor_from_c_str("my_test_role_alias"), .tls_connection_options = &s_tester.tls_connection_options, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_x509(allocator, &options); aws_credentials_provider_get_credentials(provider, s_get_credentials_callback, NULL); s_aws_wait_for_credentials_result(); ASSERT_TRUE(s_tester.has_received_credentials_callback == true); ASSERT_TRUE(s_tester.credentials == NULL); aws_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); /* Because we mock the http connection manager, we never get a callback back from it */ aws_mem_release(provider->allocator, provider); s_aws_x509_tester_cleanup(); return 0; } AWS_TEST_CASE(credentials_provider_x509_connect_failure, s_credentials_provider_x509_connect_failure); AWS_STATIC_STRING_FROM_LITERAL(s_expected_x509_role_alias_path, "/role-aliases/my_test_role_alias/credentials"); static int s_credentials_provider_x509_request_failure(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_x509_tester_init(allocator); s_tester.is_request_successful = false; struct aws_credentials_provider_x509_options options = { .bootstrap = NULL, .function_table = &s_mock_function_table, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, .endpoint = aws_byte_cursor_from_c_str("c2sakl5huz0afv.credentials.iot.us-east-1.amazonaws.com"), .thing_name = aws_byte_cursor_from_c_str("my_iot_thing_name"), .role_alias = aws_byte_cursor_from_c_str("my_test_role_alias"), .tls_connection_options = &s_tester.tls_connection_options, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_x509(allocator, &options); aws_credentials_provider_get_credentials(provider, s_get_credentials_callback, NULL); s_aws_wait_for_credentials_result(); ASSERT_BIN_ARRAYS_EQUALS( s_tester.request_uri.buffer, s_tester.request_uri.len, s_expected_x509_role_alias_path->bytes, s_expected_x509_role_alias_path->len); ASSERT_TRUE(s_tester.has_received_credentials_callback == true); ASSERT_TRUE(s_tester.credentials == NULL); aws_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); /* Because we mock the http connection manager, we never get a callback back from it */ aws_mem_release(provider->allocator, provider); s_aws_x509_tester_cleanup(); return 0; } AWS_TEST_CASE(credentials_provider_x509_request_failure, s_credentials_provider_x509_request_failure); AWS_STATIC_STRING_FROM_LITERAL(s_bad_document_response, "{\"NotTheExpectedDocumentFormat\":\"Error\"}"); static int s_credentials_provider_x509_bad_document_failure(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_x509_tester_init(allocator); struct aws_byte_cursor bad_document_cursor = aws_byte_cursor_from_string(s_bad_document_response); aws_array_list_push_back(&s_tester.response_data_callbacks, &bad_document_cursor); struct aws_credentials_provider_x509_options options = { .bootstrap = NULL, .function_table = &s_mock_function_table, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, .endpoint = aws_byte_cursor_from_c_str("c2sakl5huz0afv.credentials.iot.us-east-1.amazonaws.com"), .thing_name = aws_byte_cursor_from_c_str("my_iot_thing_name"), .role_alias = aws_byte_cursor_from_c_str("my_test_role_alias"), .tls_connection_options = &s_tester.tls_connection_options, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_x509(allocator, &options); aws_credentials_provider_get_credentials(provider, s_get_credentials_callback, NULL); s_aws_wait_for_credentials_result(); ASSERT_BIN_ARRAYS_EQUALS( s_tester.request_uri.buffer, s_tester.request_uri.len, s_expected_x509_role_alias_path->bytes, s_expected_x509_role_alias_path->len); ASSERT_TRUE(s_tester.has_received_credentials_callback == true); ASSERT_TRUE(s_tester.credentials == NULL); aws_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); /* Because we mock the http connection manager, we never get a callback back from it */ aws_mem_release(provider->allocator, provider); s_aws_x509_tester_cleanup(); return 0; } AWS_TEST_CASE(credentials_provider_x509_bad_document_failure, s_credentials_provider_x509_bad_document_failure); AWS_STATIC_STRING_FROM_LITERAL( s_good_response, "{\"Credentials\": {\"AccessKeyId\":\"SuccessfulAccessKey\", \n \"SecretAccessKey\":\"SuccessfulSecret\", \n " "\"SessionToken\":\"TokenSuccess\", \n \"Expiration\":\"2020-02-25T06:03:31Z\"}}"); AWS_STATIC_STRING_FROM_LITERAL(s_good_access_key_id, "SuccessfulAccessKey"); AWS_STATIC_STRING_FROM_LITERAL(s_good_secret_access_key, "SuccessfulSecret"); AWS_STATIC_STRING_FROM_LITERAL(s_good_session_token, "TokenSuccess"); AWS_STATIC_STRING_FROM_LITERAL(s_good_response_expiration, "2020-02-25T06:03:31Z"); static int s_credentials_provider_x509_basic_success(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_x509_tester_init(allocator); struct aws_byte_cursor good_response_cursor = aws_byte_cursor_from_string(s_good_response); aws_array_list_push_back(&s_tester.response_data_callbacks, &good_response_cursor); struct aws_credentials_provider_x509_options options = { .bootstrap = NULL, .function_table = &s_mock_function_table, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, .endpoint = aws_byte_cursor_from_c_str("c2sakl5huz0afv.credentials.iot.us-east-1.amazonaws.com"), .thing_name = aws_byte_cursor_from_c_str("my_iot_thing_name"), .role_alias = aws_byte_cursor_from_c_str("my_test_role_alias"), .tls_connection_options = &s_tester.tls_connection_options, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_x509(allocator, &options); aws_credentials_provider_get_credentials(provider, s_get_credentials_callback, NULL); s_aws_wait_for_credentials_result(); ASSERT_BIN_ARRAYS_EQUALS( s_tester.request_uri.buffer, s_tester.request_uri.len, s_expected_x509_role_alias_path->bytes, s_expected_x509_role_alias_path->len); ASSERT_TRUE(s_tester.has_received_credentials_callback == true); ASSERT_TRUE(s_tester.credentials != NULL); ASSERT_CURSOR_VALUE_STRING_EQUALS(aws_credentials_get_access_key_id(s_tester.credentials), s_good_access_key_id); ASSERT_CURSOR_VALUE_STRING_EQUALS( aws_credentials_get_secret_access_key(s_tester.credentials), s_good_secret_access_key); ASSERT_CURSOR_VALUE_STRING_EQUALS(aws_credentials_get_session_token(s_tester.credentials), s_good_session_token); struct aws_date_time expiration; struct aws_byte_cursor date_cursor = aws_byte_cursor_from_string(s_good_response_expiration); aws_date_time_init_from_str_cursor(&expiration, &date_cursor, AWS_DATE_FORMAT_ISO_8601); ASSERT_TRUE( aws_credentials_get_expiration_timepoint_seconds(s_tester.credentials) == (uint64_t)expiration.timestamp); aws_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); /* Because we mock the http connection manager, we never get a callback back from it */ aws_mem_release(provider->allocator, provider); s_aws_x509_tester_cleanup(); return 0; } AWS_TEST_CASE(credentials_provider_x509_basic_success, s_credentials_provider_x509_basic_success); AWS_STATIC_STRING_FROM_LITERAL( s_good_response_first_part, "{\"Credentials\": {\"AccessKeyId\":\"SuccessfulAccessKey\", \n \"Secret"); AWS_STATIC_STRING_FROM_LITERAL( s_good_response_second_part, "AccessKey\":\"SuccessfulSecret\", \n \"SessionToken\":\"Token"); AWS_STATIC_STRING_FROM_LITERAL(s_good_response_third_part, "Success\", \n \"Expiration\":\"2020-02-25T06:03:31Z\"}}"); static int s_credentials_provider_x509_success_multi_part_doc(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_x509_tester_init(allocator); struct aws_byte_cursor good_response_cursor1 = aws_byte_cursor_from_string(s_good_response_first_part); struct aws_byte_cursor good_response_cursor2 = aws_byte_cursor_from_string(s_good_response_second_part); struct aws_byte_cursor good_response_cursor3 = aws_byte_cursor_from_string(s_good_response_third_part); aws_array_list_push_back(&s_tester.response_data_callbacks, &good_response_cursor1); aws_array_list_push_back(&s_tester.response_data_callbacks, &good_response_cursor2); aws_array_list_push_back(&s_tester.response_data_callbacks, &good_response_cursor3); struct aws_credentials_provider_x509_options options = { .bootstrap = NULL, .function_table = &s_mock_function_table, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, .endpoint = aws_byte_cursor_from_c_str("c2sakl5huz0afv.credentials.iot.us-east-1.amazonaws.com"), .thing_name = aws_byte_cursor_from_c_str("my_iot_thing_name"), .role_alias = aws_byte_cursor_from_c_str("my_test_role_alias"), .tls_connection_options = &s_tester.tls_connection_options, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_x509(allocator, &options); aws_credentials_provider_get_credentials(provider, s_get_credentials_callback, NULL); s_aws_wait_for_credentials_result(); ASSERT_BIN_ARRAYS_EQUALS( s_tester.request_uri.buffer, s_tester.request_uri.len, s_expected_x509_role_alias_path->bytes, s_expected_x509_role_alias_path->len); ASSERT_TRUE(s_tester.has_received_credentials_callback == true); ASSERT_TRUE(s_tester.credentials != NULL); ASSERT_CURSOR_VALUE_STRING_EQUALS(aws_credentials_get_access_key_id(s_tester.credentials), s_good_access_key_id); ASSERT_CURSOR_VALUE_STRING_EQUALS( aws_credentials_get_secret_access_key(s_tester.credentials), s_good_secret_access_key); ASSERT_CURSOR_VALUE_STRING_EQUALS(aws_credentials_get_session_token(s_tester.credentials), s_good_session_token); struct aws_date_time expiration; struct aws_byte_cursor date_cursor = aws_byte_cursor_from_string(s_good_response_expiration); aws_date_time_init_from_str_cursor(&expiration, &date_cursor, AWS_DATE_FORMAT_ISO_8601); ASSERT_TRUE( aws_credentials_get_expiration_timepoint_seconds(s_tester.credentials) == (uint64_t)expiration.timestamp); aws_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); /* Because we mock the http connection manager, we never get a callback back from it */ aws_mem_release(provider->allocator, provider); s_aws_x509_tester_cleanup(); return 0; } AWS_TEST_CASE(credentials_provider_x509_success_multi_part_doc, s_credentials_provider_x509_success_multi_part_doc); static int s_credentials_provider_x509_real_new_destroy(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_auth_library_init(allocator); struct aws_logger_standard_options logger_options = { .level = AWS_LOG_LEVEL_TRACE, .file = stderr, }; struct aws_logger logger; ASSERT_SUCCESS(aws_logger_init_standard(&logger, allocator, &logger_options)); aws_logger_set(&logger); s_aws_x509_tester_init(allocator); struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); struct aws_host_resolver_default_options resolver_options = { .el_group = el_group, .max_entries = 8, }; struct aws_host_resolver *resolver = aws_host_resolver_new_default(allocator, &resolver_options); struct aws_client_bootstrap_options bootstrap_options = { .event_loop_group = el_group, .host_resolver = resolver, }; struct aws_client_bootstrap *bootstrap = aws_client_bootstrap_new(allocator, &bootstrap_options); struct aws_credentials_provider_x509_options options = { .bootstrap = bootstrap, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, .endpoint = aws_byte_cursor_from_c_str("c2sakl5huz0afv.credentials.iot.us-east-1.amazonaws.com"), .thing_name = aws_byte_cursor_from_c_str("my_iot_thing_name"), .role_alias = aws_byte_cursor_from_c_str("my_test_role_alias"), .tls_connection_options = &s_tester.tls_connection_options, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_x509(allocator, &options); aws_credentials_provider_get_credentials(provider, s_get_credentials_callback, NULL); s_aws_wait_for_credentials_result(); aws_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); aws_client_bootstrap_release(bootstrap); aws_host_resolver_release(resolver); aws_event_loop_group_release(el_group); s_aws_x509_tester_cleanup(); aws_auth_library_clean_up(); aws_logger_set(NULL); aws_logger_clean_up(&logger); return 0; } AWS_TEST_CASE(credentials_provider_x509_real_new_destroy, s_credentials_provider_x509_real_new_destroy); aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/credentials_tests.c000066400000000000000000001623001456575232400252200ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include "shared_credentials_test_definitions.h" #ifdef _MSC_VER # pragma warning(disable : 4996) #endif AWS_STATIC_STRING_FROM_LITERAL(s_access_key_id_test_value, "My Access Key"); AWS_STATIC_STRING_FROM_LITERAL(s_secret_access_key_test_value, "SekritKey"); AWS_STATIC_STRING_FROM_LITERAL(s_session_token_test_value, "Some Session Token"); static int s_credentials_create_destroy_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_credentials *credentials = aws_credentials_new_from_string( allocator, s_access_key_id_test_value, s_secret_access_key_test_value, s_session_token_test_value, UINT64_MAX); ASSERT_CURSOR_VALUE_STRING_EQUALS(aws_credentials_get_access_key_id(credentials), s_access_key_id_test_value); ASSERT_CURSOR_VALUE_STRING_EQUALS( aws_credentials_get_secret_access_key(credentials), s_secret_access_key_test_value); ASSERT_CURSOR_VALUE_STRING_EQUALS(aws_credentials_get_session_token(credentials), s_session_token_test_value); aws_credentials_release(credentials); return 0; } AWS_TEST_CASE(credentials_create_destroy_test, s_credentials_create_destroy_test); static int s_anonymous_credentials_create_destroy_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_credentials *credentials = aws_credentials_new_anonymous(allocator); ASSERT_NOT_NULL(credentials); ASSERT_TRUE(aws_credentials_is_anonymous(credentials)); ASSERT_NULL(aws_credentials_get_access_key_id(credentials).ptr); ASSERT_NULL(aws_credentials_get_secret_access_key(credentials).ptr); ASSERT_NULL(aws_credentials_get_session_token(credentials).ptr); aws_credentials_release(credentials); return 0; } AWS_TEST_CASE(anonymous_credentials_create_destroy_test, s_anonymous_credentials_create_destroy_test); struct aws_credentials_shutdown_checker { struct aws_mutex lock; struct aws_condition_variable signal; bool is_shutdown_complete; }; static struct aws_credentials_shutdown_checker s_shutdown_checker; static void s_aws_credentials_shutdown_checker_init(void) { AWS_ZERO_STRUCT(s_shutdown_checker); aws_mutex_init(&s_shutdown_checker.lock); aws_condition_variable_init(&s_shutdown_checker.signal); } static void s_aws_credentials_shutdown_checker_clean_up(void) { aws_mutex_clean_up(&s_shutdown_checker.lock); aws_condition_variable_clean_up(&s_shutdown_checker.signal); } static void s_on_shutdown_complete(void *user_data) { (void)user_data; aws_mutex_lock(&s_shutdown_checker.lock); s_shutdown_checker.is_shutdown_complete = true; aws_mutex_unlock(&s_shutdown_checker.lock); aws_condition_variable_notify_one(&s_shutdown_checker.signal); } static bool s_has_tester_received_shutdown_callback(void *user_data) { (void)user_data; return s_shutdown_checker.is_shutdown_complete; } static void s_aws_wait_for_provider_shutdown_callback(void) { aws_mutex_lock(&s_shutdown_checker.lock); aws_condition_variable_wait_pred( &s_shutdown_checker.signal, &s_shutdown_checker.lock, s_has_tester_received_shutdown_callback, NULL); aws_mutex_unlock(&s_shutdown_checker.lock); } /* * Helper function that takes a provider, expected results from a credentials query, * and uses the provider testing utils to query the results */ static int s_do_basic_provider_test( struct aws_credentials_provider *provider, int expected_calls, const struct aws_string *expected_access_key_id, const struct aws_string *expected_secret_access_key, const struct aws_string *expected_session_token) { struct aws_get_credentials_test_callback_result callback_results; aws_get_credentials_test_callback_result_init(&callback_results, expected_calls); int get_async_result = aws_credentials_provider_get_credentials(provider, aws_test_get_credentials_async_callback, &callback_results); ASSERT_TRUE(get_async_result == AWS_OP_SUCCESS); aws_wait_on_credentials_callback(&callback_results); ASSERT_TRUE(callback_results.count == expected_calls); if (callback_results.credentials != NULL && !aws_credentials_is_anonymous(callback_results.credentials)) { ASSERT_CURSOR_VALUE_STRING_EQUALS( aws_credentials_get_access_key_id(callback_results.credentials), expected_access_key_id); ASSERT_CURSOR_VALUE_STRING_EQUALS( aws_credentials_get_secret_access_key(callback_results.credentials), expected_secret_access_key); if (expected_session_token != NULL) { ASSERT_CURSOR_VALUE_STRING_EQUALS( aws_credentials_get_session_token(callback_results.credentials), expected_session_token); } else { ASSERT_TRUE(aws_credentials_get_session_token(callback_results.credentials).len == 0); } } else { ASSERT_TRUE(expected_access_key_id == NULL); ASSERT_TRUE(expected_secret_access_key == NULL); ASSERT_TRUE(expected_session_token == NULL); } aws_get_credentials_test_callback_result_clean_up(&callback_results); return AWS_OP_SUCCESS; } static int s_static_credentials_provider_basic_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_credentials_provider_static_options options = { .access_key_id = aws_byte_cursor_from_string(s_access_key_id_test_value), .secret_access_key = aws_byte_cursor_from_string(s_secret_access_key_test_value), .session_token = aws_byte_cursor_from_string(s_session_token_test_value), .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, }; s_aws_credentials_shutdown_checker_init(); struct aws_credentials_provider *provider = aws_credentials_provider_new_static(allocator, &options); ASSERT_TRUE( s_do_basic_provider_test( provider, 1, s_access_key_id_test_value, s_secret_access_key_test_value, s_session_token_test_value) == AWS_OP_SUCCESS); aws_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); s_aws_credentials_shutdown_checker_clean_up(); return 0; } AWS_TEST_CASE(static_credentials_provider_basic_test, s_static_credentials_provider_basic_test); static int s_anonymous_credentials_provider_basic_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_credentials_provider_shutdown_options shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }; s_aws_credentials_shutdown_checker_init(); struct aws_credentials_provider *provider = aws_credentials_provider_new_anonymous(allocator, &shutdown_options); ASSERT_TRUE(s_do_basic_provider_test(provider, 1, NULL, NULL, NULL) == AWS_OP_SUCCESS); aws_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); s_aws_credentials_shutdown_checker_clean_up(); /* Check that NULL works for the optional shutdown options */ provider = aws_credentials_provider_new_anonymous(allocator, NULL); aws_credentials_provider_release(provider); return 0; } AWS_TEST_CASE(anonymous_credentials_provider_basic_test, s_anonymous_credentials_provider_basic_test); static int s_environment_credentials_provider_basic_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_credentials_shutdown_checker_init(); aws_set_environment_value(s_access_key_id_env_var, s_access_key_id_test_value); aws_set_environment_value(s_secret_access_key_env_var, s_secret_access_key_test_value); aws_set_environment_value(s_session_token_env_var, s_session_token_test_value); struct aws_credentials_provider_environment_options options = { .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_environment(allocator, &options); ASSERT_TRUE( s_do_basic_provider_test( provider, 1, s_access_key_id_test_value, s_secret_access_key_test_value, s_session_token_test_value) == AWS_OP_SUCCESS); aws_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); s_aws_credentials_shutdown_checker_clean_up(); return 0; } AWS_TEST_CASE(environment_credentials_provider_basic_test, s_environment_credentials_provider_basic_test); static int s_environment_credentials_provider_empty_env_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_credentials_shutdown_checker_init(); struct aws_string *empty = aws_string_new_from_c_str(allocator, ""); aws_set_environment_value(s_access_key_id_env_var, empty); aws_set_environment_value(s_secret_access_key_env_var, empty); aws_set_environment_value(s_session_token_env_var, empty); struct aws_credentials_provider_environment_options options = { .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_environment(allocator, &options); /* Instead of getting an empty credentials, should just fail to fetch credentials */ ASSERT_TRUE(s_do_basic_provider_test(provider, 1, NULL, NULL, NULL) == AWS_OP_SUCCESS); aws_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); aws_string_destroy(empty); s_aws_credentials_shutdown_checker_clean_up(); return 0; } AWS_TEST_CASE(environment_credentials_provider_empty_env_test, s_environment_credentials_provider_empty_env_test); static int s_do_environment_credentials_provider_failure(struct aws_allocator *allocator) { s_aws_credentials_shutdown_checker_init(); aws_unset_environment_value(s_access_key_id_env_var); aws_unset_environment_value(s_secret_access_key_env_var); aws_unset_environment_value(s_session_token_env_var); struct aws_credentials_provider_environment_options options = { .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_environment(allocator, &options); ASSERT_TRUE(s_do_basic_provider_test(provider, 1, NULL, NULL, NULL) == AWS_OP_SUCCESS); aws_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); s_aws_credentials_shutdown_checker_clean_up(); return 0; } /* * Set of related tests that all check and make sure that if you don't specify enough * of the credentials data in the environment, you get nothing when you query an * environment provider. */ static int s_environment_credentials_provider_negative_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; /* nothing in the environment */ ASSERT_TRUE(s_do_environment_credentials_provider_failure(allocator) == 0); /* access key only shouldn't work */ aws_set_environment_value(s_access_key_id_env_var, s_access_key_id_test_value); ASSERT_TRUE(s_do_environment_credentials_provider_failure(allocator) == 0); /* secret key only shouldn't work either */ aws_unset_environment_value(s_access_key_id_env_var); aws_set_environment_value(s_secret_access_key_env_var, s_secret_access_key_test_value); ASSERT_TRUE(s_do_environment_credentials_provider_failure(allocator) == 0); return 0; } AWS_TEST_CASE(environment_credentials_provider_negative_test, s_environment_credentials_provider_negative_test); #define TEST_CACHE_REFRESH_TIME_MS 10000 AWS_STATIC_STRING_FROM_LITERAL(s_access_key_id_1, "AccessKey1"); AWS_STATIC_STRING_FROM_LITERAL(s_secret_access_key_1, "SecretKey1"); AWS_STATIC_STRING_FROM_LITERAL(s_session_token_1, "SessionToken1"); AWS_STATIC_STRING_FROM_LITERAL(s_access_key_id_2, "AccessKey2"); AWS_STATIC_STRING_FROM_LITERAL(s_secret_access_key_2, "SecretKey2"); AWS_STATIC_STRING_FROM_LITERAL(s_session_token_2, "SessionToken2"); int s_wait_for_get_credentials(struct aws_get_credentials_test_callback_result *callback_results) { aws_wait_on_credentials_callback(callback_results); return 0; } int s_invoke_get_credentials( struct aws_credentials_provider *provider, struct aws_get_credentials_test_callback_result *callback_results, int call_count) { aws_get_credentials_test_callback_result_init(callback_results, call_count); for (int i = 0; i < call_count; ++i) { int get_async_result = aws_credentials_provider_get_credentials( provider, aws_test_get_credentials_async_callback, callback_results); ASSERT_TRUE(get_async_result == AWS_OP_SUCCESS); } return 0; } #define ASYNC_TEST_DELAY_NS 1000000 int s_wait_for_get_credentials_with_mock_async_provider( struct aws_get_credentials_test_callback_result *callback_results, struct aws_credentials_provider *mock_async_provider) { /* Mock provider already has credentials, but won't invoke any * get-credentials callbacks until this function is called. * The callbacks will fire on another thread. */ aws_credentials_provider_mock_async_fire_callbacks(mock_async_provider); /* Wait for all queued get-credentials callbacks to fire */ aws_wait_on_credentials_callback(callback_results); return 0; } static int s_verify_callback_status( struct aws_get_credentials_test_callback_result *results, int expected_call_count, const struct aws_string *expected_access_key_id, const struct aws_string *expected_secret_access_key, const struct aws_string *expected_session_token) { aws_mutex_lock(&results->sync); ASSERT_TRUE(results->count == expected_call_count); if (expected_access_key_id == NULL) { ASSERT_NULL(results->credentials); } else { ASSERT_CURSOR_VALUE_STRING_EQUALS( aws_credentials_get_access_key_id(results->credentials), expected_access_key_id); } if (expected_secret_access_key == NULL) { ASSERT_NULL(results->credentials); } else { ASSERT_CURSOR_VALUE_STRING_EQUALS( aws_credentials_get_secret_access_key(results->credentials), expected_secret_access_key); } if (expected_session_token != NULL) { ASSERT_CURSOR_VALUE_STRING_EQUALS( aws_credentials_get_session_token(results->credentials), expected_session_token); } aws_mutex_unlock(&results->sync); return 0; } static int s_cached_credentials_provider_elapsed_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; mock_aws_set_system_time(0); mock_aws_set_high_res_time(1); s_aws_credentials_shutdown_checker_init(); struct aws_credentials *first_creds = aws_credentials_new_from_string( allocator, s_access_key_id_1, s_secret_access_key_1, s_session_token_1, UINT64_MAX); struct aws_credentials *second_creds = aws_credentials_new_from_string( allocator, s_access_key_id_2, s_secret_access_key_2, s_session_token_2, UINT64_MAX); struct get_credentials_mock_result mock_results[] = { {.error_code = 0, .credentials = first_creds}, {.error_code = 0, .credentials = second_creds}, }; struct aws_credentials_provider_shutdown_options shutdown_options = { .shutdown_callback = NULL, .shutdown_user_data = NULL, }; struct aws_credentials_provider *mock_provider = aws_credentials_provider_new_mock(allocator, mock_results, 2, &shutdown_options); struct aws_credentials_provider_cached_options options; AWS_ZERO_STRUCT(options); options.source = mock_provider; options.refresh_time_in_milliseconds = TEST_CACHE_REFRESH_TIME_MS; options.high_res_clock_fn = mock_aws_get_high_res_time; options.system_clock_fn = mock_aws_get_system_time; options.shutdown_options.shutdown_callback = s_on_shutdown_complete; options.shutdown_options.shutdown_user_data = NULL; struct aws_credentials_provider *cached_provider = aws_credentials_provider_new_cached(allocator, &options); aws_credentials_provider_release(mock_provider); struct aws_get_credentials_test_callback_result callback_results; ASSERT_TRUE(s_invoke_get_credentials(cached_provider, &callback_results, 1) == 0); ASSERT_TRUE(s_wait_for_get_credentials(&callback_results) == 0); ASSERT_TRUE( s_verify_callback_status(&callback_results, 1, s_access_key_id_1, s_secret_access_key_1, s_session_token_1) == 0); /* * Invoke a couple more times to verify the mock isn't getting called */ aws_get_credentials_test_callback_result_clean_up(&callback_results); ASSERT_TRUE(s_invoke_get_credentials(cached_provider, &callback_results, 1) == 0); ASSERT_TRUE(s_wait_for_get_credentials(&callback_results) == 0); ASSERT_TRUE( s_verify_callback_status(&callback_results, 1, s_access_key_id_1, s_secret_access_key_1, s_session_token_1) == 0); aws_get_credentials_test_callback_result_clean_up(&callback_results); ASSERT_TRUE(s_invoke_get_credentials(cached_provider, &callback_results, 1) == 0); ASSERT_TRUE(s_wait_for_get_credentials(&callback_results) == 0); ASSERT_TRUE( s_verify_callback_status(&callback_results, 1, s_access_key_id_1, s_secret_access_key_1, s_session_token_1) == 0); /* * Advance time, but not enough to cause a cache expiration, verify everything's the same */ uint64_t refresh_in_ns = aws_timestamp_convert(TEST_CACHE_REFRESH_TIME_MS, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL); uint64_t now = 0; mock_aws_get_high_res_time(&now); mock_aws_set_high_res_time(now + refresh_in_ns - 1); aws_get_credentials_test_callback_result_clean_up(&callback_results); ASSERT_TRUE(s_invoke_get_credentials(cached_provider, &callback_results, 1) == 0); ASSERT_TRUE(s_wait_for_get_credentials(&callback_results) == 0); ASSERT_TRUE( s_verify_callback_status(&callback_results, 1, s_access_key_id_1, s_secret_access_key_1, s_session_token_1) == 0); /* * Advance time enough to cause cache expiration, verify we get the second set of mocked credentials */ mock_aws_set_high_res_time(now + refresh_in_ns); aws_get_credentials_test_callback_result_clean_up(&callback_results); ASSERT_TRUE(s_invoke_get_credentials(cached_provider, &callback_results, 1) == 0); ASSERT_TRUE(s_wait_for_get_credentials(&callback_results) == 0); ASSERT_TRUE( s_verify_callback_status(&callback_results, 1, s_access_key_id_2, s_secret_access_key_2, s_session_token_2) == 0); aws_get_credentials_test_callback_result_clean_up(&callback_results); aws_credentials_provider_release(cached_provider); s_aws_wait_for_provider_shutdown_callback(); s_aws_credentials_shutdown_checker_clean_up(); aws_credentials_release(second_creds); aws_credentials_release(first_creds); return 0; } AWS_TEST_CASE(cached_credentials_provider_elapsed_test, s_cached_credentials_provider_elapsed_test); #define TEST_CACHED_CREDENTIALS_EXPIRATION_TIMEPOINT 3600 static int s_cached_credentials_provider_expired_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; mock_aws_set_system_time(0); mock_aws_set_high_res_time(1); s_aws_credentials_shutdown_checker_init(); struct aws_credentials *first_creds = aws_credentials_new_from_string( allocator, s_access_key_id_1, s_secret_access_key_1, s_session_token_1, TEST_CACHED_CREDENTIALS_EXPIRATION_TIMEPOINT); struct aws_credentials *second_creds = aws_credentials_new_from_string( allocator, s_access_key_id_2, s_secret_access_key_2, s_session_token_2, TEST_CACHED_CREDENTIALS_EXPIRATION_TIMEPOINT * 2); struct get_credentials_mock_result mock_results[] = { {.error_code = 0, .credentials = first_creds}, {.error_code = 0, .credentials = second_creds}, }; struct aws_credentials_provider_shutdown_options shutdown_options; AWS_ZERO_STRUCT(shutdown_options); struct aws_credentials_provider *mock_provider = aws_credentials_provider_new_mock(allocator, mock_results, 2, &shutdown_options); struct aws_credentials_provider_cached_options options; AWS_ZERO_STRUCT(options); options.source = mock_provider; options.refresh_time_in_milliseconds = TEST_CACHE_REFRESH_TIME_MS; options.high_res_clock_fn = mock_aws_get_high_res_time; options.system_clock_fn = mock_aws_get_system_time; options.shutdown_options.shutdown_callback = s_on_shutdown_complete; options.shutdown_options.shutdown_user_data = NULL; struct aws_credentials_provider *cached_provider = aws_credentials_provider_new_cached(allocator, &options); aws_credentials_provider_release(mock_provider); struct aws_get_credentials_test_callback_result callback_results; ASSERT_TRUE(s_invoke_get_credentials(cached_provider, &callback_results, 1) == 0); ASSERT_TRUE(s_wait_for_get_credentials(&callback_results) == 0); ASSERT_TRUE( s_verify_callback_status(&callback_results, 1, s_access_key_id_1, s_secret_access_key_1, s_session_token_1) == 0); /* * Invoke a couple more times to verify the mock isn't getting called */ aws_get_credentials_test_callback_result_clean_up(&callback_results); ASSERT_TRUE(s_invoke_get_credentials(cached_provider, &callback_results, 1) == 0); ASSERT_TRUE(s_wait_for_get_credentials(&callback_results) == 0); ASSERT_TRUE( s_verify_callback_status(&callback_results, 1, s_access_key_id_1, s_secret_access_key_1, s_session_token_1) == 0); aws_get_credentials_test_callback_result_clean_up(&callback_results); ASSERT_TRUE(s_invoke_get_credentials(cached_provider, &callback_results, 1) == 0); ASSERT_TRUE(s_wait_for_get_credentials(&callback_results) == 0); ASSERT_TRUE( s_verify_callback_status(&callback_results, 1, s_access_key_id_1, s_secret_access_key_1, s_session_token_1) == 0); /* * Advance time enough to cause a refresh from the caching provider's perspective, but not enough to expire the * actual credentials. Nothing should change because the credential's expiration takes priority. */ uint64_t provider_refresh_in_ns = aws_timestamp_convert(TEST_CACHE_REFRESH_TIME_MS, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL); uint64_t now = 0; mock_aws_get_high_res_time(&now); mock_aws_set_high_res_time(now + provider_refresh_in_ns); mock_aws_get_system_time(&now); mock_aws_set_system_time(now + provider_refresh_in_ns); aws_get_credentials_test_callback_result_clean_up(&callback_results); ASSERT_TRUE(s_invoke_get_credentials(cached_provider, &callback_results, 1) == 0); ASSERT_TRUE(s_wait_for_get_credentials(&callback_results) == 0); ASSERT_TRUE( s_verify_callback_status(&callback_results, 1, s_access_key_id_1, s_secret_access_key_1, s_session_token_1) == 0); /* * Advance time enough to trigger credentials expiration, verify we get the second set of mocked credentials */ uint64_t credential_expiration_in_ns = aws_timestamp_convert( TEST_CACHED_CREDENTIALS_EXPIRATION_TIMEPOINT, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL); mock_aws_get_high_res_time(&now); mock_aws_set_high_res_time(now + credential_expiration_in_ns); mock_aws_get_system_time(&now); mock_aws_set_system_time(now + credential_expiration_in_ns); aws_get_credentials_test_callback_result_clean_up(&callback_results); ASSERT_TRUE(s_invoke_get_credentials(cached_provider, &callback_results, 1) == 0); ASSERT_TRUE(s_wait_for_get_credentials(&callback_results) == 0); ASSERT_TRUE( s_verify_callback_status(&callback_results, 1, s_access_key_id_2, s_secret_access_key_2, s_session_token_2) == 0); aws_get_credentials_test_callback_result_clean_up(&callback_results); aws_credentials_provider_release(cached_provider); s_aws_wait_for_provider_shutdown_callback(); s_aws_credentials_shutdown_checker_clean_up(); aws_credentials_release(second_creds); aws_credentials_release(first_creds); return 0; } AWS_TEST_CASE(cached_credentials_provider_expired_test, s_cached_credentials_provider_expired_test); static int s_cached_credentials_provider_queued_async_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_auth_library_init(allocator); s_aws_credentials_shutdown_checker_init(); mock_aws_set_system_time(0); mock_aws_set_high_res_time(1); struct aws_credentials *first_creds = aws_credentials_new_from_string( allocator, s_access_key_id_1, s_secret_access_key_1, s_session_token_1, UINT64_MAX); struct aws_credentials *second_creds = aws_credentials_new_from_string( allocator, s_access_key_id_2, s_secret_access_key_2, s_session_token_2, UINT64_MAX); struct aws_event_loop_group *event_loop_group = aws_event_loop_group_new_default(allocator, 1, NULL); struct get_credentials_mock_result mock_results[] = { {.error_code = 0, .credentials = first_creds}, {.error_code = 0, .credentials = second_creds}, }; struct aws_credentials_provider_shutdown_options shutdown_options; AWS_ZERO_STRUCT(shutdown_options); struct aws_credentials_provider *mock_provider = aws_credentials_provider_new_mock_async(allocator, mock_results, 2, event_loop_group, &shutdown_options); struct aws_credentials_provider_cached_options options; AWS_ZERO_STRUCT(options); options.source = mock_provider; options.refresh_time_in_milliseconds = TEST_CACHE_REFRESH_TIME_MS; options.high_res_clock_fn = mock_aws_get_high_res_time; options.system_clock_fn = mock_aws_get_system_time; options.shutdown_options.shutdown_callback = s_on_shutdown_complete; options.shutdown_options.shutdown_user_data = NULL; struct aws_credentials_provider *cached_provider = aws_credentials_provider_new_cached(allocator, &options); aws_credentials_provider_release(mock_provider); struct aws_get_credentials_test_callback_result callback_results; ASSERT_TRUE(s_invoke_get_credentials(cached_provider, &callback_results, 2) == 0); ASSERT_TRUE(s_wait_for_get_credentials_with_mock_async_provider(&callback_results, mock_provider) == 0); ASSERT_TRUE( s_verify_callback_status(&callback_results, 2, s_access_key_id_1, s_secret_access_key_1, s_session_token_1) == 0); /* * Invoke a couple more times to verify the mock isn't getting called */ aws_get_credentials_test_callback_result_clean_up(&callback_results); ASSERT_TRUE(s_invoke_get_credentials(cached_provider, &callback_results, 2) == 0); ASSERT_TRUE(s_wait_for_get_credentials_with_mock_async_provider(&callback_results, mock_provider) == 0); ASSERT_TRUE( s_verify_callback_status(&callback_results, 2, s_access_key_id_1, s_secret_access_key_1, s_session_token_1) == 0); aws_get_credentials_test_callback_result_clean_up(&callback_results); ASSERT_TRUE(s_invoke_get_credentials(cached_provider, &callback_results, 2) == 0); ASSERT_TRUE(s_wait_for_get_credentials_with_mock_async_provider(&callback_results, mock_provider) == 0); ASSERT_TRUE( s_verify_callback_status(&callback_results, 2, s_access_key_id_1, s_secret_access_key_1, s_session_token_1) == 0); /* * Advance time, but not enough to cause a cache expiration, verify everything's the same */ uint64_t refresh_in_ns = aws_timestamp_convert(TEST_CACHE_REFRESH_TIME_MS, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL); uint64_t now = 0; mock_aws_get_high_res_time(&now); mock_aws_set_high_res_time(now + refresh_in_ns - 1); aws_get_credentials_test_callback_result_clean_up(&callback_results); ASSERT_TRUE(s_invoke_get_credentials(cached_provider, &callback_results, 2) == 0); ASSERT_TRUE(s_wait_for_get_credentials_with_mock_async_provider(&callback_results, mock_provider) == 0); ASSERT_TRUE( s_verify_callback_status(&callback_results, 2, s_access_key_id_1, s_secret_access_key_1, s_session_token_1) == 0); /* * Advance time enough to cause cache expiration, verify we get the second set of mocked credentials */ mock_aws_set_high_res_time(now + refresh_in_ns); aws_get_credentials_test_callback_result_clean_up(&callback_results); ASSERT_TRUE(s_invoke_get_credentials(cached_provider, &callback_results, 2) == 0); ASSERT_TRUE(s_wait_for_get_credentials_with_mock_async_provider(&callback_results, mock_provider) == 0); ASSERT_TRUE( s_verify_callback_status(&callback_results, 2, s_access_key_id_2, s_secret_access_key_2, s_session_token_2) == 0); aws_credentials_provider_release(cached_provider); s_aws_wait_for_provider_shutdown_callback(); s_aws_credentials_shutdown_checker_clean_up(); aws_get_credentials_test_callback_result_clean_up(&callback_results); aws_credentials_release(second_creds); aws_credentials_release(first_creds); aws_event_loop_group_release(event_loop_group); aws_auth_library_clean_up(); return 0; } AWS_TEST_CASE(cached_credentials_provider_queued_async_test, s_cached_credentials_provider_queued_async_test); static int s_profile_credentials_provider_new_destroy_defaults_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; (void)allocator; s_aws_credentials_shutdown_checker_init(); struct aws_credentials_provider_profile_options options; AWS_ZERO_STRUCT(options); options.shutdown_options.shutdown_callback = s_on_shutdown_complete; options.shutdown_options.shutdown_user_data = NULL; struct aws_credentials_provider *provider = aws_credentials_provider_new_profile(allocator, &options); aws_credentials_provider_release(provider); if (provider) { s_aws_wait_for_provider_shutdown_callback(); } s_aws_credentials_shutdown_checker_clean_up(); return 0; } AWS_TEST_CASE( profile_credentials_provider_new_destroy_defaults_test, s_profile_credentials_provider_new_destroy_defaults_test); AWS_STATIC_STRING_FROM_LITERAL(s_config_file_path, "~derp/.aws/config"); AWS_STATIC_STRING_FROM_LITERAL(s_credentials_file_path, "/Ithink/globalpaths/arebroken/.aws/credentials"); AWS_STATIC_STRING_FROM_LITERAL(s_profile_name, "notdefault"); static int s_profile_credentials_provider_new_destroy_overrides_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; (void)allocator; s_aws_credentials_shutdown_checker_init(); struct aws_credentials_provider_profile_options options; AWS_ZERO_STRUCT(options); options.config_file_name_override = aws_byte_cursor_from_string(s_config_file_path); options.credentials_file_name_override = aws_byte_cursor_from_string(s_credentials_file_path); options.profile_name_override = aws_byte_cursor_from_string(s_profile_name); options.shutdown_options.shutdown_callback = s_on_shutdown_complete; options.shutdown_options.shutdown_user_data = NULL; struct aws_credentials_provider *provider = aws_credentials_provider_new_profile(allocator, &options); aws_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); s_aws_credentials_shutdown_checker_clean_up(); return 0; } AWS_TEST_CASE( profile_credentials_provider_new_destroy_overrides_test, s_profile_credentials_provider_new_destroy_overrides_test); typedef int(s_verify_credentials_callback_fn)(struct aws_get_credentials_test_callback_result *callback_results); static int s_do_credentials_provider_profile_test( struct aws_allocator *allocator, const struct aws_string *config_file_path, const struct aws_string *config_contents, const struct aws_string *creds_file_path, const struct aws_string *credentials_contents, struct aws_credentials_provider_profile_options *options, s_verify_credentials_callback_fn verifier, bool reset_environment) { s_aws_credentials_shutdown_checker_init(); int result = AWS_OP_ERR; if (reset_environment) { /* Zero out all of the environment variables, just in case the user has it set (other tests may re-set it) */ aws_unset_environment_value(s_default_profile_env_variable_name); aws_unset_environment_value(s_default_config_path_env_variable_name); aws_unset_environment_value(s_default_credentials_path_env_variable_name); } if (aws_create_profile_file(config_file_path, config_contents) || aws_create_profile_file(creds_file_path, credentials_contents)) { return AWS_OP_ERR; } struct aws_credentials_provider *provider = aws_credentials_provider_new_profile(allocator, options); if (provider == NULL) { return AWS_OP_ERR; } struct aws_get_credentials_test_callback_result callback_results; aws_get_credentials_test_callback_result_init(&callback_results, 1); int get_async_result = aws_credentials_provider_get_credentials(provider, aws_test_get_credentials_async_callback, &callback_results); if (get_async_result == AWS_OP_SUCCESS) { aws_wait_on_credentials_callback(&callback_results); result = verifier(&callback_results); } aws_get_credentials_test_callback_result_clean_up(&callback_results); aws_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); s_aws_credentials_shutdown_checker_clean_up(); return result; } AWS_STATIC_STRING_FROM_LITERAL( s_config_contents, "[profile default]\naws_access_key_id=fake_access_key\naws_secret_access_key=fake_secret_key\n"); AWS_STATIC_STRING_FROM_LITERAL( s_config_contents2, "[profile default]\naws_access_key_id=fake_access_key2\naws_secret_access_key=fake_secret_key2\n"); AWS_STATIC_STRING_FROM_LITERAL( s_credentials_contents, "[foo]\naws_access_key_id=foo_access\naws_secret_access_key=foo_secret\naws_session_token=foo_session\n"); AWS_STATIC_STRING_FROM_LITERAL( s_credentials_contents2, "[foo]\naws_access_key_id=foo_access2\naws_secret_access_key=foo_secret2\naws_session_token=foo_session2\n"); AWS_STATIC_STRING_FROM_LITERAL(s_fake_access, "fake_access_key"); AWS_STATIC_STRING_FROM_LITERAL(s_fake_secret, "fake_secret_key"); int s_verify_default_credentials_callback(struct aws_get_credentials_test_callback_result *callback_results) { ASSERT_TRUE(callback_results->count == 1); ASSERT_TRUE(callback_results->credentials != NULL); ASSERT_CURSOR_VALUE_STRING_EQUALS(aws_credentials_get_access_key_id(callback_results->credentials), s_fake_access); ASSERT_CURSOR_VALUE_STRING_EQUALS( aws_credentials_get_secret_access_key(callback_results->credentials), s_fake_secret); ASSERT_TRUE(aws_credentials_get_session_token(callback_results->credentials).len == 0); return AWS_OP_SUCCESS; } static int s_profile_credentials_provider_default_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_string *config_file_str = aws_create_process_unique_file_name(allocator); struct aws_string *creds_file_str = aws_create_process_unique_file_name(allocator); struct aws_credentials_provider_profile_options options = { .config_file_name_override = aws_byte_cursor_from_string(config_file_str), .credentials_file_name_override = aws_byte_cursor_from_string(creds_file_str), .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, }; ASSERT_SUCCESS(s_do_credentials_provider_profile_test( allocator, config_file_str, s_config_contents, creds_file_str, s_credentials_contents, &options, s_verify_default_credentials_callback, true)); aws_string_destroy(config_file_str); aws_string_destroy(creds_file_str); return AWS_OP_SUCCESS; } AWS_TEST_CASE(profile_credentials_provider_default_test, s_profile_credentials_provider_default_test); static int s_profile_credentials_provider_cached_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_string *config_file_str = aws_create_process_unique_file_name(allocator); struct aws_string *creds_file_str = aws_create_process_unique_file_name(allocator); if (aws_create_profile_file(config_file_str, s_config_contents) || aws_create_profile_file(creds_file_str, s_credentials_contents)) { return AWS_OP_ERR; } struct aws_credentials_provider_profile_options options = { .config_file_name_override = aws_byte_cursor_from_string(config_file_str), .credentials_file_name_override = aws_byte_cursor_from_string(creds_file_str), .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, }; /* Read the config files */ struct aws_profile_collection *config_profiles = NULL; struct aws_profile_collection *credentials_profiles = NULL; struct aws_profile_collection *merged_profiles = NULL; struct aws_string *credentials_file_path = NULL; struct aws_string *config_file_path = NULL; struct aws_string *profile_name = NULL; credentials_file_path = aws_get_credentials_file_path(allocator, &options.credentials_file_name_override); ASSERT_NOT_NULL(credentials_file_path); config_file_path = aws_get_config_file_path(allocator, &options.config_file_name_override); ASSERT_NOT_NULL(config_file_path); profile_name = aws_get_profile_name(allocator, &options.profile_name_override); ASSERT_NOT_NULL(profile_name); config_profiles = aws_profile_collection_new_from_file(allocator, config_file_path, AWS_PST_CONFIG); ASSERT_NOT_NULL(config_profiles); credentials_profiles = aws_profile_collection_new_from_file(allocator, credentials_file_path, AWS_PST_CREDENTIALS); ASSERT_NOT_NULL(credentials_profiles); merged_profiles = aws_profile_collection_new_from_merge(allocator, config_profiles, credentials_profiles); ASSERT_NOT_NULL(merged_profiles); aws_profile_collection_release(config_profiles); aws_profile_collection_release(credentials_profiles); options.profile_collection_cached = merged_profiles; s_aws_credentials_shutdown_checker_init(); /* Update profile and config file */ if (aws_create_profile_file(config_file_str, s_config_contents2) || aws_create_profile_file(creds_file_str, s_credentials_contents2)) { return AWS_OP_ERR; } struct aws_credentials_provider *provider = aws_credentials_provider_new_profile(allocator, &options); ASSERT_NOT_NULL(provider); struct aws_get_credentials_test_callback_result callback_results; aws_get_credentials_test_callback_result_init(&callback_results, 1); ASSERT_SUCCESS( aws_credentials_provider_get_credentials(provider, aws_test_get_credentials_async_callback, &callback_results)); aws_wait_on_credentials_callback(&callback_results); ASSERT_SUCCESS(s_verify_default_credentials_callback(&callback_results)); aws_get_credentials_test_callback_result_clean_up(&callback_results); /* Fetch the credentials again */ aws_get_credentials_test_callback_result_init(&callback_results, 1); ASSERT_SUCCESS( aws_credentials_provider_get_credentials(provider, aws_test_get_credentials_async_callback, &callback_results)); aws_wait_on_credentials_callback(&callback_results); /* assert that credentials are not changed */ ASSERT_SUCCESS(s_verify_default_credentials_callback(&callback_results)); aws_get_credentials_test_callback_result_clean_up(&callback_results); aws_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); s_aws_credentials_shutdown_checker_clean_up(); aws_string_destroy(config_file_str); aws_string_destroy(creds_file_str); aws_profile_collection_release(merged_profiles); aws_string_destroy(credentials_file_path); aws_string_destroy(config_file_path); aws_string_destroy(profile_name); return AWS_OP_SUCCESS; } AWS_TEST_CASE(profile_credentials_provider_cached_test, s_profile_credentials_provider_cached_test); AWS_STATIC_STRING_FROM_LITERAL(s_foo_profile, "foo"); AWS_STATIC_STRING_FROM_LITERAL(s_foo_access, "foo_access"); AWS_STATIC_STRING_FROM_LITERAL(s_foo_secret, "foo_secret"); AWS_STATIC_STRING_FROM_LITERAL(s_foo_session, "foo_session"); int s_verify_nondefault_credentials_callback(struct aws_get_credentials_test_callback_result *callback_results) { ASSERT_TRUE(callback_results->count == 1); ASSERT_TRUE(callback_results->credentials != NULL); ASSERT_CURSOR_VALUE_STRING_EQUALS(aws_credentials_get_access_key_id(callback_results->credentials), s_foo_access); ASSERT_CURSOR_VALUE_STRING_EQUALS( aws_credentials_get_secret_access_key(callback_results->credentials), s_foo_secret); ASSERT_CURSOR_VALUE_STRING_EQUALS(aws_credentials_get_session_token(callback_results->credentials), s_foo_session); return AWS_OP_SUCCESS; } static int s_profile_credentials_provider_nondefault_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_string *config_file_str = aws_create_process_unique_file_name(allocator); struct aws_string *creds_file_str = aws_create_process_unique_file_name(allocator); struct aws_credentials_provider_profile_options options = { .config_file_name_override = aws_byte_cursor_from_string(config_file_str), .credentials_file_name_override = aws_byte_cursor_from_string(creds_file_str), .profile_name_override = aws_byte_cursor_from_string(s_foo_profile), .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, }; ASSERT_SUCCESS(s_do_credentials_provider_profile_test( allocator, config_file_str, s_config_contents, creds_file_str, s_credentials_contents, &options, s_verify_nondefault_credentials_callback, true)); aws_string_destroy(config_file_str); aws_string_destroy(creds_file_str); return AWS_OP_SUCCESS; } AWS_TEST_CASE(profile_credentials_provider_nondefault_test, s_profile_credentials_provider_nondefault_test); static int s_profile_credentials_provider_environment_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; /* * Force a profile switch via environment variable */ aws_set_environment_value(s_default_profile_env_variable_name, s_foo_profile); struct aws_string *config_file_str = aws_create_process_unique_file_name(allocator); struct aws_string *creds_file_str = aws_create_process_unique_file_name(allocator); /* * Redirect config and credentials files by environment */ aws_set_environment_value(s_default_config_path_env_variable_name, config_file_str); aws_set_environment_value(s_default_credentials_path_env_variable_name, creds_file_str); struct aws_credentials_provider_profile_options options = { .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, }; ASSERT_SUCCESS(s_do_credentials_provider_profile_test( allocator, config_file_str, s_config_contents, creds_file_str, s_credentials_contents, &options, s_verify_nondefault_credentials_callback, false)); aws_string_destroy(config_file_str); aws_string_destroy(creds_file_str); return AWS_OP_SUCCESS; } AWS_TEST_CASE(profile_credentials_provider_environment_test, s_profile_credentials_provider_environment_test); AWS_STATIC_STRING_FROM_LITERAL(s_access_key_id_value1, "Access1"); AWS_STATIC_STRING_FROM_LITERAL(s_secret_access_key_value1, "Secret1"); AWS_STATIC_STRING_FROM_LITERAL(s_session_token_value1, "Session1"); AWS_STATIC_STRING_FROM_LITERAL(s_access_key_id_value2, "Access2"); AWS_STATIC_STRING_FROM_LITERAL(s_secret_access_key_value2, "Secret2"); AWS_STATIC_STRING_FROM_LITERAL(s_session_token_value2, "Session2"); static int s_do_provider_chain_test( struct aws_allocator *allocator, struct aws_credentials_provider *provider1, struct aws_credentials_provider *provider2, s_verify_credentials_callback_fn verifier) { s_aws_credentials_shutdown_checker_init(); struct aws_credentials_provider *providers[2] = {provider1, provider2}; struct aws_credentials_provider_chain_options options; AWS_ZERO_STRUCT(options); options.providers = providers; options.provider_count = 2; options.shutdown_options.shutdown_callback = s_on_shutdown_complete; options.shutdown_options.shutdown_user_data = NULL; struct aws_credentials_provider *provider_chain = aws_credentials_provider_new_chain(allocator, &options); aws_credentials_provider_release(provider1); aws_credentials_provider_release(provider2); if (provider_chain == NULL) { return 0; } struct aws_get_credentials_test_callback_result callback_results; aws_get_credentials_test_callback_result_init(&callback_results, 1); int get_async_result = aws_credentials_provider_get_credentials( provider_chain, aws_test_get_credentials_async_callback, &callback_results); int verification_result = AWS_OP_ERR; if (get_async_result == AWS_OP_SUCCESS) { aws_wait_on_credentials_callback(&callback_results); verification_result = verifier(&callback_results); } aws_get_credentials_test_callback_result_clean_up(&callback_results); aws_credentials_provider_release(provider_chain); s_aws_wait_for_provider_shutdown_callback(); s_aws_credentials_shutdown_checker_clean_up(); return verification_result; } int s_verify_first_credentials_callback(struct aws_get_credentials_test_callback_result *callback_results) { ASSERT_TRUE(callback_results->count == 1); ASSERT_TRUE(callback_results->credentials != NULL); ASSERT_CURSOR_VALUE_STRING_EQUALS( aws_credentials_get_access_key_id(callback_results->credentials), s_access_key_id_value1); ASSERT_CURSOR_VALUE_STRING_EQUALS( aws_credentials_get_secret_access_key(callback_results->credentials), s_secret_access_key_value1); ASSERT_CURSOR_VALUE_STRING_EQUALS( aws_credentials_get_session_token(callback_results->credentials), s_session_token_value1); return AWS_OP_SUCCESS; } static int s_credentials_provider_first_in_chain_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_credentials_provider_static_options options1 = { .access_key_id = aws_byte_cursor_from_string(s_access_key_id_value1), .secret_access_key = aws_byte_cursor_from_string(s_secret_access_key_value1), .session_token = aws_byte_cursor_from_string(s_session_token_value1), }; struct aws_credentials_provider_static_options options2 = { .access_key_id = aws_byte_cursor_from_string(s_access_key_id_value2), .secret_access_key = aws_byte_cursor_from_string(s_secret_access_key_value2), .session_token = aws_byte_cursor_from_string(s_session_token_value2), }; return s_do_provider_chain_test( allocator, aws_credentials_provider_new_static(allocator, &options1), aws_credentials_provider_new_static(allocator, &options2), s_verify_first_credentials_callback); } AWS_TEST_CASE(credentials_provider_first_in_chain_test, s_credentials_provider_first_in_chain_test); AWS_STATIC_STRING_FROM_LITERAL(s_access2, "Access2"); AWS_STATIC_STRING_FROM_LITERAL(s_secret2, "Secret2"); AWS_STATIC_STRING_FROM_LITERAL(s_session2, "Session2"); int s_verify_second_credentials_callback(struct aws_get_credentials_test_callback_result *callback_results) { ASSERT_TRUE(callback_results->count == 1); ASSERT_TRUE(callback_results->credentials != NULL); ASSERT_CURSOR_VALUE_STRING_EQUALS(aws_credentials_get_access_key_id(callback_results->credentials), s_access2); ASSERT_CURSOR_VALUE_STRING_EQUALS(aws_credentials_get_secret_access_key(callback_results->credentials), s_secret2); ASSERT_CURSOR_VALUE_STRING_EQUALS(aws_credentials_get_session_token(callback_results->credentials), s_session2); return AWS_OP_SUCCESS; } static int s_credentials_provider_second_in_chain_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_credentials_provider_shutdown_options null_options; AWS_ZERO_STRUCT(null_options); struct aws_credentials_provider_static_options options = { .access_key_id = aws_byte_cursor_from_string(s_access_key_id_value2), .secret_access_key = aws_byte_cursor_from_string(s_secret_access_key_value2), .session_token = aws_byte_cursor_from_string(s_session_token_value2), }; return s_do_provider_chain_test( allocator, aws_credentials_provider_new_null(allocator, &null_options), aws_credentials_provider_new_static(allocator, &options), s_verify_second_credentials_callback); } AWS_TEST_CASE(credentials_provider_second_in_chain_test, s_credentials_provider_second_in_chain_test); int s_verify_null_credentials_callback(struct aws_get_credentials_test_callback_result *callback_results) { ASSERT_TRUE(callback_results->count == 1); ASSERT_TRUE(callback_results->credentials == NULL); return AWS_OP_SUCCESS; } static int s_credentials_provider_null_chain_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_credentials_provider_shutdown_options null_options; AWS_ZERO_STRUCT(null_options); return s_do_provider_chain_test( allocator, aws_credentials_provider_new_null(allocator, &null_options), aws_credentials_provider_new_null(allocator, &null_options), s_verify_null_credentials_callback); } AWS_TEST_CASE(credentials_provider_null_chain_test, s_credentials_provider_null_chain_test); static int s_credentials_provider_default_test(struct aws_allocator *allocator, bool manual_tls) { aws_auth_library_init(allocator); s_aws_credentials_shutdown_checker_init(); /* * Do a basic environment provider test, but use the default provider chain */ aws_set_environment_value(s_access_key_id_env_var, s_access_key_id_test_value); aws_set_environment_value(s_secret_access_key_env_var, s_secret_access_key_test_value); aws_set_environment_value(s_session_token_env_var, s_session_token_test_value); struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); struct aws_host_resolver_default_options resolver_options = { .el_group = el_group, .max_entries = 4, }; struct aws_host_resolver *resolver = aws_host_resolver_new_default(allocator, &resolver_options); struct aws_client_bootstrap_options bootstrap_options = { .host_resolver = resolver, .on_shutdown_complete = NULL, .host_resolution_config = NULL, .user_data = NULL, .event_loop_group = el_group, }; struct aws_client_bootstrap *bootstrap = aws_client_bootstrap_new(allocator, &bootstrap_options); ASSERT_NOT_NULL(bootstrap); struct aws_tls_ctx *tls_ctx = NULL; if (manual_tls) { struct aws_tls_ctx_options tls_options; aws_tls_ctx_options_init_default_client(&tls_options, allocator); tls_ctx = aws_tls_client_ctx_new(allocator, &tls_options); ASSERT_NOT_NULL(tls_ctx); aws_tls_ctx_options_clean_up(&tls_options); } struct aws_credentials_provider_chain_default_options options = { .bootstrap = bootstrap, .tls_ctx = tls_ctx, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_chain_default(allocator, &options); /* release tls_ctx early to prove that provider acquired a reference */ if (tls_ctx) { aws_tls_ctx_release(tls_ctx); tls_ctx = NULL; } ASSERT_TRUE( s_do_basic_provider_test( provider, 1, s_access_key_id_test_value, s_secret_access_key_test_value, s_session_token_test_value) == AWS_OP_SUCCESS); /* * Verify that there's some caching before the environment by modifying the environment and requerying */ aws_set_environment_value(s_access_key_id_env_var, s_access_key_id_1); aws_set_environment_value(s_secret_access_key_env_var, s_secret_access_key_1); aws_set_environment_value(s_session_token_env_var, s_session_token_1); ASSERT_TRUE( s_do_basic_provider_test( provider, 1, s_access_key_id_test_value, s_secret_access_key_test_value, s_session_token_test_value) == AWS_OP_SUCCESS); aws_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); s_aws_credentials_shutdown_checker_clean_up(); aws_client_bootstrap_release(bootstrap); aws_host_resolver_release(resolver); aws_event_loop_group_release(el_group); aws_auth_library_clean_up(); return 0; } static int s_credentials_provider_default_basic_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_credentials_provider_default_test(allocator, false /*manual_tls*/); } AWS_TEST_CASE(credentials_provider_default_basic_test, s_credentials_provider_default_basic_test); static int s_credentials_provider_default_manual_tls_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_credentials_provider_default_test(allocator, true /*manual_tls*/); } AWS_TEST_CASE(credentials_provider_default_manual_tls_test, s_credentials_provider_default_manual_tls_test); static int s_credentials_provider_default_chain_disable_environment_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_auth_library_init(allocator); s_aws_credentials_shutdown_checker_init(); /* * Set the environment variable values, but make sure they are not used when environment credentials provider is * disabled. */ aws_set_environment_value(s_access_key_id_env_var, s_access_key_id_test_value); aws_set_environment_value(s_secret_access_key_env_var, s_secret_access_key_test_value); aws_set_environment_value(s_session_token_env_var, s_session_token_test_value); struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); struct aws_host_resolver_default_options resolver_options = { .el_group = el_group, .max_entries = 4, }; struct aws_host_resolver *resolver = aws_host_resolver_new_default(allocator, &resolver_options); struct aws_client_bootstrap_options bootstrap_options = { .host_resolver = resolver, .on_shutdown_complete = NULL, .host_resolution_config = NULL, .user_data = NULL, .event_loop_group = el_group, }; struct aws_client_bootstrap *bootstrap = aws_client_bootstrap_new(allocator, &bootstrap_options); ASSERT_NOT_NULL(bootstrap); struct aws_byte_buf config_profile_collection_buf; AWS_ZERO_STRUCT(config_profile_collection_buf); /* Override profile with an empty buffer to prevent sourcing valid credentials from the profile */ struct aws_profile_collection *config_profile_collection = aws_profile_collection_new_from_buffer(allocator, &config_profile_collection_buf, AWS_PST_CONFIG); ASSERT_NOT_NULL(config_profile_collection); struct aws_credentials_provider_chain_default_options options = { .bootstrap = bootstrap, .shutdown_options = { .shutdown_callback = s_on_shutdown_complete, .shutdown_user_data = NULL, }, .skip_environment_credentials_provider = true, .profile_collection_cached = config_profile_collection, }; struct aws_credentials_provider *provider = aws_credentials_provider_new_chain_default(allocator, &options); struct aws_get_credentials_test_callback_result callback_results; aws_get_credentials_test_callback_result_init(&callback_results, 1); int get_async_result = aws_credentials_provider_get_credentials(provider, aws_test_get_credentials_async_callback, &callback_results); ASSERT_TRUE(get_async_result == AWS_OP_SUCCESS); aws_wait_on_credentials_callback(&callback_results); /* Assert that no credentials were sourced from the environment */ ASSERT_NULL(callback_results.credentials); ASSERT_TRUE(callback_results.last_error != AWS_OP_SUCCESS); aws_get_credentials_test_callback_result_clean_up(&callback_results); aws_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); s_aws_credentials_shutdown_checker_clean_up(); aws_client_bootstrap_release(bootstrap); aws_host_resolver_release(resolver); aws_event_loop_group_release(el_group); aws_profile_collection_release(config_profile_collection); aws_auth_library_clean_up(); return 0; } AWS_TEST_CASE( credentials_provider_default_chain_disable_environment_test, s_credentials_provider_default_chain_disable_environment_test); aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/000077500000000000000000000000001456575232400223315ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/000077500000000000000000000000001456575232400236445ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_config_profile/000077500000000000000000000000001456575232400300235ustar00rootroot0000000000000011cd8328d88b2c5080756a1090abceaf087d8b18000066400000000000000000000000121456575232400352320ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_config_profileooc eflifM2a09998d72f4dc2dfb21a20d285a9dab2efc5f0b000066400000000000000000000000021456575232400356070ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_config_profileoc84a516841ba77a5b4648de2cd0dfcb30ea46dbb4000066400000000000000000000000011456575232400355220ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_config_profileca5d8336f1bc6fbc70180b2926691f89300355aa2000066400000000000000000000000031456575232400350720ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_config_profileoocaws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_config_profile/adjacent_comment000066400000000000000000000001011456575232400332310ustar00rootroot00000000000000[profile foo]; Adjacent semicolons [profile bar]# Adjacent pound aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_config_profile/blank_lines000066400000000000000000000000711456575232400322250ustar00rootroot00000000000000[profile foo] name = value [profile bar] continuation_reseton_new_profile000066400000000000000000000000651456575232400365320ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_config_profile[profile foo] name = value [profile foo] -continuedaws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_config_profile/continued_property000066400000000000000000000000401456575232400336740ustar00rootroot00000000000000[bar] name = value -continued continued_property_pound_comment000066400000000000000000000001271456575232400365520ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_config_profile[foo] name = value -continued # Comment [bar] name2 = value2 -continued ; Comment continued_property_trim000066400000000000000000000000461456575232400346560ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_config_profile[foo] name = value -continued aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_config_profile/default_profile000066400000000000000000000000701456575232400331070ustar00rootroot00000000000000[default] name2 = value2 [profile default] name = valueaws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_config_profile/duplicate_profiles000066400000000000000000000000511456575232400336170ustar00rootroot00000000000000[foo] name = value [foo] name2 = value2 duplicate_properties000066400000000000000000000001041456575232400341100ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_config_profile[foo] name = value name2 = value2 name = value2 [foo] name2 = valuee700967c6b4f6efa44f1e1573e2ae868623800ec000066400000000000000000000000061456575232400352550ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_config_profileooc fMaws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_config_profile/early_property000066400000000000000000000000361456575232400330250ustar00rootroot00000000000000bad=value [default] good=valueaws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_config_profile/empty_comment000066400000000000000000000000371456575232400326260ustar00rootroot00000000000000; [profile foo]; name = value ;aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_config_profile/empty_profile000066400000000000000000000000161456575232400326210ustar00rootroot00000000000000[profile foo] empty_profile_whitespace000066400000000000000000000000171456575232400347570ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_config_profile[profile foo ] aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_config_profile/empty_property000066400000000000000000000000301456575232400330410ustar00rootroot00000000000000[profile foobar] name = aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_config_profile/empty_sub_property000066400000000000000000000000301456575232400337120ustar00rootroot00000000000000[default] s3 = name = equal_containing_property000066400000000000000000000000341456575232400351500ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_config_profile[profile foo] name = val=ue illegal_continuation1000066400000000000000000000000411456575232400341460ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_config_profile[default] s3 = badcontinuation illegal_continuation2000066400000000000000000000000441456575232400341520ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_config_profile[default] s3 = ^^badcontinuation illegal_continuation3000066400000000000000000000000301456575232400341460ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_config_profile[default] s3 = =value aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_config_profile/missing_assignment000066400000000000000000000000201456575232400336370ustar00rootroot00000000000000[default] bad aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_config_profile/missing_bracket000066400000000000000000000000251456575232400331070ustar00rootroot00000000000000[default good=value missing_property_key000066400000000000000000000000271456575232400341530ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_config_profile[default] ; hello =bad aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_config_profile/mixed_comment000066400000000000000000000001051456575232400325720ustar00rootroot00000000000000# Comment [profile foo] ; Comment name = value # Comment with ; sign aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_config_profile/multi_sub_property000066400000000000000000000000741456575232400337160ustar00rootroot00000000000000[profile default] s3 = name = value name2 = value2 multiline_continued_property000066400000000000000000000000601456575232400357010ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_config_profile[baz] name = value -continued -and-continuedmultiple_empty_profile000066400000000000000000000000521456575232400344550ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_config_profile[profile bar] [profile foo] [profile baz] aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_config_profile/multiple_profile000066400000000000000000000000701456575232400333160ustar00rootroot00000000000000[profile foo] name = value [profile bar] name2 = value2 aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_config_profile/multiple_property000066400000000000000000000000521456575232400335420ustar00rootroot00000000000000[profile foo] name = value name2 = value2 aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_config_profile/pound_comment000066400000000000000000000001051456575232400326110ustar00rootroot00000000000000# Comment [profile foo] # Comment name = value # Comment with # sign aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_config_profile/semicolon_comment000066400000000000000000000001051456575232400334540ustar00rootroot00000000000000; Comment [profile foo] ; Comment name = value ; Comment with ; sign aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_config_profile/simple_property000066400000000000000000000000321456575232400331760ustar00rootroot00000000000000[profile foo] name = valueaws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_config_profile/sub_property000066400000000000000000000000421456575232400324770ustar00rootroot00000000000000[profile foo] s3 = name = value aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_config_profile/tab_empty_profile000066400000000000000000000000151456575232400334460ustar00rootroot00000000000000[profile foo]aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_config_profile/trimmable_property000066400000000000000000000000351456575232400336640ustar00rootroot00000000000000[profile foo] name = value value_adjacent_comment000066400000000000000000000001131456575232400343510ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_config_profile[foo] name = value; Adjacent semicolons name2 = value# Adjacent pound signsaws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_credentials_profile/000077500000000000000000000000001456575232400310535ustar00rootroot0000000000000011cd8328d88b2c5080756a1090abceaf087d8b18000066400000000000000000000000121456575232400362620ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_credentials_profileooc eflifM2a09998d72f4dc2dfb21a20d285a9dab2efc5f0b000066400000000000000000000000021456575232400366370ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_credentials_profileoc84a516841ba77a5b4648de2cd0dfcb30ea46dbb4000066400000000000000000000000011456575232400365520ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_credentials_profileca5d8336f1bc6fbc70180b2926691f89300355aa2000066400000000000000000000000031456575232400361220ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_credentials_profileoocadjacent_comment000066400000000000000000000001011456575232400342020ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_credentials_profile[profile foo]; Adjacent semicolons [profile bar]# Adjacent pound aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_credentials_profile/blank_lines000066400000000000000000000000711456575232400332550ustar00rootroot00000000000000[profile foo] name = value [profile bar] continuation_reseton_new_profile000066400000000000000000000000651456575232400375620ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_credentials_profile[profile foo] name = value [profile foo] -continuedcontinued_property000066400000000000000000000000401456575232400346450ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_credentials_profile[bar] name = value -continued continued_property_pound_comment000066400000000000000000000001271456575232400376020ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_credentials_profile[foo] name = value -continued # Comment [bar] name2 = value2 -continued ; Comment continued_property_trim000066400000000000000000000000461456575232400357060ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_credentials_profile[foo] name = value -continued default_profile000066400000000000000000000000701456575232400340600ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_credentials_profile[default] name2 = value2 [profile default] name = valueduplicate_profiles000066400000000000000000000000511456575232400345700ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_credentials_profile[foo] name = value [foo] name2 = value2 duplicate_properties000066400000000000000000000001041456575232400351400ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_credentials_profile[foo] name = value name2 = value2 name = value2 [foo] name2 = valuee700967c6b4f6efa44f1e1573e2ae868623800ec000066400000000000000000000000061456575232400363050ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_credentials_profileooc fMaws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_credentials_profile/early_property000066400000000000000000000000361456575232400340550ustar00rootroot00000000000000bad=value [default] good=valueaws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_credentials_profile/empty_comment000066400000000000000000000000371456575232400336560ustar00rootroot00000000000000; [profile foo]; name = value ;aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_credentials_profile/empty_profile000066400000000000000000000000161456575232400336510ustar00rootroot00000000000000[profile foo] empty_profile_whitespace000066400000000000000000000000171456575232400360070ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_credentials_profile[profile foo ] aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_credentials_profile/empty_property000066400000000000000000000000301456575232400340710ustar00rootroot00000000000000[profile foobar] name = empty_sub_property000066400000000000000000000000301456575232400346630ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_credentials_profile[default] s3 = name = equal_containing_property000066400000000000000000000000341456575232400362000ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_credentials_profile[profile foo] name = val=ue illegal_continuation1000066400000000000000000000000411456575232400351760ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_credentials_profile[default] s3 = badcontinuation illegal_continuation2000066400000000000000000000000441456575232400352020ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_credentials_profile[default] s3 = ^^badcontinuation illegal_continuation3000066400000000000000000000000301456575232400351760ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_credentials_profile[default] s3 = =value missing_assignment000066400000000000000000000000201456575232400346100ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_credentials_profile[default] bad missing_bracket000066400000000000000000000000251456575232400340600ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_credentials_profile[default good=value missing_property_key000066400000000000000000000000271456575232400352030ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_credentials_profile[default] ; hello =bad aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_credentials_profile/mixed_comment000066400000000000000000000001051456575232400336220ustar00rootroot00000000000000# Comment [profile foo] ; Comment name = value # Comment with ; sign multi_sub_property000066400000000000000000000000741456575232400346670ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_credentials_profile[profile default] s3 = name = value name2 = value2 multiline_continued_property000066400000000000000000000000601456575232400367310ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_credentials_profile[baz] name = value -continued -and-continuedmultiple_empty_profile000066400000000000000000000000521456575232400355050ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_credentials_profile[profile bar] [profile foo] [profile baz] multiple_profile000066400000000000000000000000701456575232400342670ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_credentials_profile[profile foo] name = value [profile bar] name2 = value2 multiple_property000066400000000000000000000000521456575232400345130ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_credentials_profile[profile foo] name = value name2 = value2 aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_credentials_profile/pound_comment000066400000000000000000000001051456575232400336410ustar00rootroot00000000000000# Comment [profile foo] # Comment name = value # Comment with # sign semicolon_comment000066400000000000000000000001051456575232400344250ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_credentials_profile; Comment [profile foo] ; Comment name = value ; Comment with ; sign simple_property000066400000000000000000000000321456575232400341470ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_credentials_profile[profile foo] name = valueaws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_credentials_profile/sub_property000066400000000000000000000000421456575232400335270ustar00rootroot00000000000000[profile foo] s3 = name = value tab_empty_profile000066400000000000000000000000151456575232400344170ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_credentials_profile[profile foo]trimmable_property000066400000000000000000000000351456575232400346350ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_credentials_profile[profile foo] name = value value_adjacent_comment000066400000000000000000000001131456575232400354010ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/corpus/parse_credentials_profile[foo] name = value; Adjacent semicolons name2 = value# Adjacent pound signsaws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/parse_config_profile.c000066400000000000000000000013521456575232400266550ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include /* NOLINTNEXTLINE(readability-identifier-naming) */ int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) { struct aws_allocator *allocator = aws_default_allocator(); struct aws_byte_buf buffer; buffer.allocator = NULL; buffer.buffer = (uint8_t *)data; buffer.capacity = size; buffer.len = size; struct aws_profile_collection *profile_set = aws_profile_collection_new_from_buffer(allocator, &buffer, AWS_PST_CONFIG); aws_profile_collection_destroy(profile_set); return 0; } aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/fuzz/parse_credentials_profile.c000066400000000000000000000013571456575232400277120ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include /* NOLINTNEXTLINE(readability-identifier-naming) */ int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) { struct aws_allocator *allocator = aws_default_allocator(); struct aws_byte_buf buffer; buffer.allocator = NULL; buffer.buffer = (uint8_t *)data; buffer.capacity = size; buffer.len = size; struct aws_profile_collection *profile_set = aws_profile_collection_new_from_buffer(allocator, &buffer, AWS_PST_CREDENTIALS); aws_profile_collection_destroy(profile_set); return 0; } aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/key_derivation_tests.c000066400000000000000000000273251456575232400257460ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include struct aws_be_add_one_test { uint8_t *input; size_t input_length; uint8_t *expected_output; size_t expected_output_length; }; static uint8_t add_one_input_1[] = {0x00, 0x00, 0x00}; static uint8_t add_one_expected_output_1[] = {0x00, 0x00, 0x01}; static uint8_t add_one_input_2[] = {0x00, 0x00, 0xFF}; static uint8_t add_one_expected_output_2[] = {0x00, 0x01, 0x00}; static uint8_t add_one_input_3[] = {0x00, 0xFF, 0xFF}; static uint8_t add_one_expected_output_3[] = {0x01, 0x00, 0x00}; static uint8_t add_one_input_4[] = {0xFF, 0xFF, 0xFF, 0xFF}; static uint8_t add_one_expected_output_4[] = {0x00, 0x00, 0x00, 0x00}; static struct aws_be_add_one_test s_be_add_one_test_cases[] = { { .input = add_one_input_1, .input_length = AWS_ARRAY_SIZE(add_one_input_1), .expected_output = add_one_expected_output_1, .expected_output_length = AWS_ARRAY_SIZE(add_one_expected_output_1), }, { .input = add_one_input_2, .input_length = AWS_ARRAY_SIZE(add_one_input_2), .expected_output = add_one_expected_output_2, .expected_output_length = AWS_ARRAY_SIZE(add_one_expected_output_2), }, { .input = add_one_input_3, .input_length = AWS_ARRAY_SIZE(add_one_input_3), .expected_output = add_one_expected_output_3, .expected_output_length = AWS_ARRAY_SIZE(add_one_expected_output_3), }, { .input = add_one_input_4, .input_length = AWS_ARRAY_SIZE(add_one_input_4), .expected_output = add_one_expected_output_4, .expected_output_length = AWS_ARRAY_SIZE(add_one_expected_output_4), }, }; static int s_be_sequence_add_one(struct aws_allocator *allocator, void *ctx) { (void)ctx; (void)allocator; for (size_t i = 0; i < AWS_ARRAY_SIZE(s_be_add_one_test_cases); ++i) { struct aws_be_add_one_test *test_case = &s_be_add_one_test_cases[i]; struct aws_byte_buf input = { .len = test_case->input_length, .buffer = test_case->input, .capacity = test_case->input_length, .allocator = NULL, }; aws_be_bytes_add_one_constant_time(&input); ASSERT_BIN_ARRAYS_EQUALS( test_case->expected_output, test_case->expected_output_length, input.buffer, input.len); } return 0; } AWS_TEST_CASE(be_sequence_add_one, s_be_sequence_add_one); struct aws_be_compare_test { uint8_t *lhs; size_t lhs_length; uint8_t *rhs; size_t rhs_length; int expected_return_value; int expected_result; }; static uint8_t compare_lhs_bad[] = {0x00, 0x00, 0x00}; static uint8_t compare_rhs_bad[] = {0x00, 0x00, 0x01, 0xFF}; static uint8_t compare_lhs_1[] = {0x00, 0x00, 0x00}; static uint8_t compare_rhs_1[] = {0x00, 0x00, 0x01}; static uint8_t compare_lhs_2[] = {0xAB, 0xCD, 0x80, 0xFF, 0x01, 0x0A}; static uint8_t compare_rhs_2[] = {0xAB, 0xCD, 0x80, 0xFF, 0x01, 0x0A}; static uint8_t compare_lhs_3[] = {0xFF, 0xCD, 0x80, 0xFF, 0x01, 0x0A}; static uint8_t compare_rhs_3[] = {0xFE, 0xCD, 0x80, 0xFF, 0x01, 0x0A}; static struct aws_be_compare_test s_be_compare_test_cases[] = { /* * Failure cases */ { .lhs = compare_lhs_bad, .lhs_length = AWS_ARRAY_SIZE(compare_lhs_bad), .rhs = compare_rhs_bad, .rhs_length = AWS_ARRAY_SIZE(compare_rhs_bad), .expected_return_value = AWS_OP_ERR, .expected_result = 0, }, /* * Success cases */ { .lhs = compare_lhs_1, .lhs_length = AWS_ARRAY_SIZE(compare_lhs_1), .rhs = compare_rhs_1, .rhs_length = AWS_ARRAY_SIZE(compare_rhs_1), .expected_return_value = AWS_OP_SUCCESS, .expected_result = -1, }, { .lhs = compare_lhs_2, .lhs_length = AWS_ARRAY_SIZE(compare_lhs_2), .rhs = compare_rhs_2, .rhs_length = AWS_ARRAY_SIZE(compare_rhs_2), .expected_return_value = AWS_OP_SUCCESS, .expected_result = 0, }, { .lhs = compare_lhs_3, .lhs_length = AWS_ARRAY_SIZE(compare_lhs_3), .rhs = compare_rhs_3, .rhs_length = AWS_ARRAY_SIZE(compare_rhs_3), .expected_return_value = AWS_OP_SUCCESS, .expected_result = 1, }, }; static int s_be_sequence_compare(struct aws_allocator *allocator, void *ctx) { (void)ctx; (void)allocator; for (size_t i = 0; i < AWS_ARRAY_SIZE(s_be_compare_test_cases); ++i) { struct aws_be_compare_test *test_case = &s_be_compare_test_cases[i]; struct aws_byte_buf lhs = { .len = test_case->lhs_length, .buffer = test_case->lhs, .capacity = test_case->lhs_length, .allocator = NULL, }; struct aws_byte_buf rhs = { .len = test_case->rhs_length, .buffer = test_case->rhs, .capacity = test_case->rhs_length, .allocator = NULL, }; int comparison_result = 0; int result = aws_be_bytes_compare_constant_time(&lhs, &rhs, &comparison_result); ASSERT_INT_EQUALS(test_case->expected_return_value, result); if (result == AWS_OP_SUCCESS) { ASSERT_INT_EQUALS(test_case->expected_result, comparison_result); } int swapped_comparison_result = 0; int swapped_result = aws_be_bytes_compare_constant_time(&rhs, &lhs, &swapped_comparison_result); ASSERT_INT_EQUALS(test_case->expected_return_value, swapped_result); if (swapped_result == AWS_OP_SUCCESS) { ASSERT_INT_EQUALS(-test_case->expected_result, swapped_comparison_result); } } return 0; } AWS_TEST_CASE(be_sequence_compare, s_be_sequence_compare); AWS_STATIC_STRING_FROM_LITERAL(s_ecc_derive_fixed_access_key_id_test_value, "AKISORANDOMAASORANDOM"); AWS_STATIC_STRING_FROM_LITERAL( s_ecc_derive_fixed_secret_access_key_test_value, "q+jcrXGc+0zWN6uzclKVhvMmUsIfRPa4rlRandom"); /* * Values derived in synchronicity with Golang and IAM implementations */ #ifndef __APPLE__ AWS_STATIC_STRING_FROM_LITERAL( s_expected_fixed_pub_x, "15d242ceebf8d8169fd6a8b5a746c41140414c3b07579038da06af89190fffcb"); AWS_STATIC_STRING_FROM_LITERAL( s_expected_fixed_pub_y, "0515242cedd82e94799482e4c0514b505afccf2c0c98d6a553bf539f424c5ec0"); #endif /* __APPLE__ */ AWS_STATIC_STRING_FROM_LITERAL( s_expected_fixed_private_key, "7fd3bd010c0d9c292141c2b77bfbde1042c92e6836fff749d1269ec890fca1bd"); static int s_verify_fixed_ecc_key_public(struct aws_ecc_key_pair *key, struct aws_allocator *allocator) { #ifdef __APPLE__ (void)key; (void)allocator; #else aws_ecc_key_pair_derive_public_key(key); struct aws_byte_cursor pub_x_cursor; AWS_ZERO_STRUCT(pub_x_cursor); struct aws_byte_cursor pub_y_cursor; AWS_ZERO_STRUCT(pub_y_cursor); aws_ecc_key_pair_get_public_key(key, &pub_x_cursor, &pub_y_cursor); struct aws_byte_buf pub_coord_x; ASSERT_SUCCESS(aws_byte_buf_init(&pub_coord_x, allocator, 128)); ASSERT_SUCCESS(aws_hex_encode(&pub_x_cursor, &pub_coord_x)); pub_coord_x.len -= 1; ASSERT_BIN_ARRAYS_EQUALS( s_expected_fixed_pub_x->bytes, s_expected_fixed_pub_x->len, pub_coord_x.buffer, pub_coord_x.len); struct aws_byte_buf pub_coord_y; ASSERT_SUCCESS(aws_byte_buf_init(&pub_coord_y, allocator, 128)); ASSERT_SUCCESS(aws_hex_encode(&pub_y_cursor, &pub_coord_y)); pub_coord_y.len -= 1; ASSERT_BIN_ARRAYS_EQUALS( s_expected_fixed_pub_y->bytes, s_expected_fixed_pub_y->len, pub_coord_y.buffer, pub_coord_y.len); aws_byte_buf_clean_up(&pub_coord_x); aws_byte_buf_clean_up(&pub_coord_y); #endif /* __APPLE__ */ return AWS_OP_SUCCESS; } static int s_verify_fixed_ecc_key_private(struct aws_ecc_key_pair *key, struct aws_allocator *allocator) { struct aws_byte_cursor private_key_cursor; AWS_ZERO_STRUCT(private_key_cursor); aws_ecc_key_pair_get_private_key(key, &private_key_cursor); struct aws_byte_buf private_buf; ASSERT_SUCCESS(aws_byte_buf_init(&private_buf, allocator, 128)); ASSERT_SUCCESS(aws_hex_encode(&private_key_cursor, &private_buf)); private_buf.len -= 1; ASSERT_BIN_ARRAYS_EQUALS( s_expected_fixed_private_key->bytes, s_expected_fixed_private_key->len, private_buf.buffer, private_buf.len); aws_byte_buf_clean_up(&private_buf); return AWS_OP_SUCCESS; } static int s_credentials_derive_ecc_key_fixed(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_auth_library_init(allocator); struct aws_credentials *creds = aws_credentials_new_from_string( allocator, s_ecc_derive_fixed_access_key_id_test_value, s_ecc_derive_fixed_secret_access_key_test_value, NULL, UINT64_MAX); struct aws_ecc_key_pair *derived_key = aws_ecc_key_pair_new_ecdsa_p256_key_from_aws_credentials(allocator, creds); ASSERT_TRUE(derived_key != NULL); ASSERT_SUCCESS(s_verify_fixed_ecc_key_public(derived_key, allocator)); ASSERT_SUCCESS(s_verify_fixed_ecc_key_private(derived_key, allocator)); aws_ecc_key_pair_release(derived_key); aws_credentials_release(creds); aws_auth_library_clean_up(); return 0; } AWS_TEST_CASE(credentials_derive_ecc_key_fixed, s_credentials_derive_ecc_key_fixed); static int s_credentials_new_ecc_fixed(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_auth_library_init(allocator); struct aws_credentials *creds = aws_credentials_new_from_string( allocator, s_ecc_derive_fixed_access_key_id_test_value, s_ecc_derive_fixed_secret_access_key_test_value, NULL, UINT64_MAX); struct aws_credentials *derived_credentials = aws_credentials_new_ecc_from_aws_credentials(allocator, creds); ASSERT_TRUE(derived_credentials != NULL); struct aws_ecc_key_pair *derived_key = aws_credentials_get_ecc_key_pair(derived_credentials); ASSERT_SUCCESS(s_verify_fixed_ecc_key_public(derived_key, allocator)); ASSERT_SUCCESS(s_verify_fixed_ecc_key_private(derived_key, allocator)); aws_credentials_release(derived_credentials); aws_credentials_release(creds); aws_auth_library_clean_up(); return 0; } AWS_TEST_CASE(credentials_new_ecc_fixed, s_credentials_new_ecc_fixed); AWS_STATIC_STRING_FROM_LITERAL( s_ecc_derive_long_access_key_id_test_value, "AKISORANDOMAASORANDOMFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF" "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF" "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFf"); AWS_STATIC_STRING_FROM_LITERAL( s_ecc_derive_long_secret_access_key_test_value, "q+jcrXGc+0zWN6uzclKVhvMmUsIfRPa4rlRandom"); static int s_credentials_derive_ecc_key_long_access_key(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_auth_library_init(allocator); struct aws_credentials *creds = aws_credentials_new_from_string( allocator, s_ecc_derive_long_access_key_id_test_value, s_ecc_derive_long_secret_access_key_test_value, NULL, UINT64_MAX); struct aws_ecc_key_pair *derived_key = aws_ecc_key_pair_new_ecdsa_p256_key_from_aws_credentials(allocator, creds); ASSERT_TRUE(derived_key != NULL); aws_ecc_key_pair_release(derived_key); aws_credentials_release(creds); aws_auth_library_clean_up(); return 0; } AWS_TEST_CASE(credentials_derive_ecc_key_long_access_key, s_credentials_derive_ecc_key_long_access_key); aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/seed_tests.py000066400000000000000000000110241456575232400240450ustar00rootroot00000000000000# # Copyright 2010-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. # import os import copy import argparse import pdb import json base_context = { "region":"us-east-1", "service":"service", "timestamp":"2015-08-30T12:36:00Z", "expiration_in_seconds":3600, "credentials": { "access_key_id" : "AKIDEXAMPLE", "secret_access_key" : "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" }, "sign_body": False } def parse_arguments(): parser = argparse.ArgumentParser(description="Sigv4 Test Suite Generation Script") parser.add_argument("source_dir", action="store") parser.add_argument("dest_dir", action="store") args = vars( parser.parse_args() ) return args def merge_dicts(source, destination): for key, value in source.items(): if isinstance(value, dict): # get node or create one node = destination.setdefault(key, {}) merge_dicts(value, node) else: destination[key] = value return destination def generate_test_case(source_dir, dest_dir, test_name, context_map): source_request_filename = os.path.join(source_dir, test_name + ".req") if not os.path.exists(source_request_filename): return if not os.path.exists(dest_dir): os.makedirs(dest_dir) dest_request_filename = os.path.join(dest_dir, "request.txt") with open(source_request_filename, "r") as source_file: with open(dest_request_filename, "w") as dest_file: for _, line in enumerate(source_file): if not line.startswith("X-Amz-Date"): dest_file.write(line) test_context = copy.deepcopy(base_context) test_context = merge_dicts(context_map, test_context) context_contents = json.dumps(test_context, sort_keys=True, indent=4) dest_context_filename = os.path.join(dest_dir, "context.json") context_file = open(dest_context_filename,"w") context_file.write(context_contents) context_file.close() return normalized_context = { "normalize" : True } unnormalized_context = { "normalize" : False } token_context = { "normalize" : True, "credentials" : { "token" : "6e86291e8372ff2a2260956d9b8aae1d763fbf315fa00fa31553b73ebf194267" } } sign_body_context = { "normalize": True, "sign_body": True } def generate_tests(source_dir, dest_dir, suffix, default_context_map): for root_dir, dir_names, file_names in os.walk( source_dir ): if root_dir == source_dir: for dir_name in dir_names: test_case_source_dir = os.path.join(root_dir, dir_name) context_map = default_context_map if dir_name == "get-vanilla-with-session-token": context_map = token_context elif dir_name.startswith('post-x-www-form'): context_map = sign_body_context v4_test_case_dest_dir = os.path.join(dest_dir, "v4", dir_name + suffix) v4a_test_case_dest_dir = os.path.join(dest_dir, "v4a", dir_name + suffix) generate_test_case(test_case_source_dir, v4_test_case_dest_dir, dir_name, context_map) generate_test_case(test_case_source_dir, v4a_test_case_dest_dir, dir_name, context_map) return def main(): args = parse_arguments() source_dir = args["source_dir"] if not os.path.exists(source_dir): print("Source directory {0} does not exist".format(source_dir)) return dest_dir = args["dest_dir"] if not os.path.exists(dest_dir): os.makedirs(dest_dir) generate_tests(source_dir, dest_dir, "", normalized_context) post_sts_token_dir = os.path.join(source_dir, "post-sts-token") generate_tests(post_sts_token_dir, dest_dir, "", normalized_context) normalize_dir = os.path.join(source_dir, "normalize-path") generate_tests(normalize_dir, dest_dir, "-normalized", normalized_context) generate_tests(normalize_dir, dest_dir, "-unnormalized", unnormalized_context) main() aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/shared_credentials_test_definitions.h000066400000000000000000000051521456575232400307640ustar00rootroot00000000000000#ifndef SHARED_CREDENTIALS_TEST_DEFINITIONS_H #define SHARED_CREDENTIALS_TEST_DEFINITIONS_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #ifdef _MSC_VER /* fopen, fprintf etc... */ # pragma warning(push) # pragma warning(disable : 4996) #endif AWS_STATIC_STRING_FROM_LITERAL(s_default_profile_env_variable_name, "AWS_PROFILE"); AWS_STATIC_STRING_FROM_LITERAL(s_default_config_path_env_variable_name, "AWS_CONFIG_FILE"); AWS_STATIC_STRING_FROM_LITERAL(s_default_credentials_path_env_variable_name, "AWS_SHARED_CREDENTIALS_FILE"); AWS_STATIC_STRING_FROM_LITERAL(s_access_key_id_env_var, "AWS_ACCESS_KEY_ID"); AWS_STATIC_STRING_FROM_LITERAL(s_secret_access_key_env_var, "AWS_SECRET_ACCESS_KEY"); AWS_STATIC_STRING_FROM_LITERAL(s_session_token_env_var, "AWS_SESSION_TOKEN"); static struct aws_string *aws_create_process_unique_file_name(struct aws_allocator *allocator) { char file_name_storage[64] = {0}; struct aws_byte_buf filename_buf = aws_byte_buf_from_empty_array(file_name_storage, sizeof(file_name_storage)); #ifndef WIN32 AWS_FATAL_ASSERT(aws_byte_buf_write_from_whole_cursor(&filename_buf, aws_byte_cursor_from_c_str("./"))); #endif AWS_FATAL_ASSERT( aws_byte_buf_write_from_whole_cursor(&filename_buf, aws_byte_cursor_from_c_str("config_creds_test"))); struct aws_uuid uuid; AWS_FATAL_ASSERT(aws_uuid_init(&uuid) == AWS_OP_SUCCESS); AWS_FATAL_ASSERT(aws_uuid_to_str(&uuid, &filename_buf) == AWS_OP_SUCCESS); return aws_string_new_from_array(allocator, filename_buf.buffer, filename_buf.len); } static int aws_create_profile_file(const struct aws_string *file_name, const struct aws_string *file_contents) { /* avoid compiler warning if some files include this header but don't actually use those variables */ (void)s_default_profile_env_variable_name; (void)s_default_config_path_env_variable_name; (void)s_default_credentials_path_env_variable_name; (void)s_access_key_id_env_var; (void)s_secret_access_key_env_var; (void)s_session_token_env_var; FILE *fp = fopen(aws_string_c_str(file_name), "w"); if (fp == NULL) { return aws_translate_and_raise_io_error(errno); } int result = fprintf(fp, "%s", aws_string_c_str(file_contents)); fclose(fp); if (result < 0) { return aws_translate_and_raise_io_error(errno); } return AWS_OP_SUCCESS; } #ifdef _MSC_VER # pragma warning(pop) #endif #endif /* SHARED_CREDENTIALS_TEST_DEFINITIONS_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/sigv4_signing_tests.c000066400000000000000000002251331456575232400255010ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "credentials_provider_utils.h" #include "test_signable.h" #if defined(_MSC_VER) # pragma warning(disable : 4996) /* deprecation */ #endif AWS_STATIC_STRING_FROM_LITERAL(s_header_canonical_request_filename, "header-canonical-request.txt"); AWS_STATIC_STRING_FROM_LITERAL(s_header_string_to_sign_filename, "header-string-to-sign.txt"); AWS_STATIC_STRING_FROM_LITERAL(s_header_signed_request_filename, "header-signed-request.txt"); AWS_STATIC_STRING_FROM_LITERAL(s_header_signature_filename, "header-signature.txt"); AWS_STATIC_STRING_FROM_LITERAL(s_query_canonical_request_filename, "query-canonical-request.txt"); AWS_STATIC_STRING_FROM_LITERAL(s_query_string_to_sign_filename, "query-string-to-sign.txt"); AWS_STATIC_STRING_FROM_LITERAL(s_query_signed_request_filename, "query-signed-request.txt"); AWS_STATIC_STRING_FROM_LITERAL(s_query_signature_filename, "query-signature.txt"); AWS_STATIC_STRING_FROM_LITERAL(s_public_key_filename, "public-key.json"); static const struct aws_string *s_get_canonical_request_filename(enum aws_signature_type signature_type) { switch (signature_type) { case AWS_ST_HTTP_REQUEST_HEADERS: return s_header_canonical_request_filename; case AWS_ST_HTTP_REQUEST_QUERY_PARAMS: return s_query_canonical_request_filename; default: return NULL; } } static const struct aws_string *s_get_string_to_sign_filename(enum aws_signature_type signature_type) { switch (signature_type) { case AWS_ST_HTTP_REQUEST_HEADERS: return s_header_string_to_sign_filename; case AWS_ST_HTTP_REQUEST_QUERY_PARAMS: return s_query_string_to_sign_filename; default: return NULL; } } static const struct aws_string *s_get_signed_request_filename(enum aws_signature_type signature_type) { switch (signature_type) { case AWS_ST_HTTP_REQUEST_HEADERS: return s_header_signed_request_filename; case AWS_ST_HTTP_REQUEST_QUERY_PARAMS: return s_query_signed_request_filename; default: return NULL; } } static const struct aws_string *s_get_signature_filename(enum aws_signature_type signature_type) { switch (signature_type) { case AWS_ST_HTTP_REQUEST_HEADERS: return s_header_signature_filename; case AWS_ST_HTTP_REQUEST_QUERY_PARAMS: return s_query_signature_filename; default: return NULL; } } struct v4_test_case_contents { struct aws_allocator *allocator; struct aws_byte_buf context; struct aws_byte_buf request; struct aws_byte_buf public_key; struct aws_byte_buf expected_canonical_request; struct aws_byte_buf expected_string_to_sign; struct aws_byte_buf sample_signed_request; struct aws_byte_buf sample_signature; }; static void s_strip_windows_line_endings(struct aws_byte_buf *buffer) { size_t write_index = 0; for (size_t read_index = 0; read_index < buffer->len; ++read_index) { char current = buffer->buffer[read_index]; if (current != '\r') { buffer->buffer[write_index++] = current; } } buffer->len = write_index; } static int s_load_test_case_file( struct aws_allocator *allocator, const char *parent_folder, const char *test_name, const char *filename, struct aws_byte_buf *buffer) { char path[1024]; snprintf(path, AWS_ARRAY_SIZE(path), "./%s/%s/%s", parent_folder, test_name, filename); return aws_byte_buf_init_from_file(buffer, allocator, path); } static int s_v4_test_case_context_init_from_file_set( struct v4_test_case_contents *contents, struct aws_allocator *allocator, const char *parent_folder, const char *test_name, enum aws_signature_type signature_type) { AWS_ZERO_STRUCT(*contents); contents->allocator = allocator; /* required files */ if (s_load_test_case_file(allocator, parent_folder, test_name, "request.txt", &contents->request) || s_load_test_case_file(allocator, parent_folder, test_name, "context.json", &contents->context)) { return AWS_OP_ERR; } s_strip_windows_line_endings(&contents->request); s_load_test_case_file( allocator, parent_folder, test_name, (const char *)s_public_key_filename->bytes, &contents->public_key); s_load_test_case_file( allocator, parent_folder, test_name, aws_string_c_str(s_get_canonical_request_filename(signature_type)), &contents->expected_canonical_request); s_strip_windows_line_endings(&contents->expected_canonical_request); s_load_test_case_file( allocator, parent_folder, test_name, aws_string_c_str(s_get_string_to_sign_filename(signature_type)), &contents->expected_string_to_sign); s_strip_windows_line_endings(&contents->expected_string_to_sign); s_load_test_case_file( allocator, parent_folder, test_name, aws_string_c_str(s_get_signed_request_filename(signature_type)), &contents->sample_signed_request); s_strip_windows_line_endings(&contents->sample_signed_request); s_load_test_case_file( allocator, parent_folder, test_name, aws_string_c_str(s_get_signature_filename(signature_type)), &contents->sample_signature); s_strip_windows_line_endings(&contents->sample_signature); return AWS_OP_SUCCESS; } static void s_v4_test_case_contents_clean_up(struct v4_test_case_contents *contents) { if (contents->allocator) { aws_byte_buf_clean_up(&contents->request); aws_byte_buf_clean_up(&contents->context); aws_byte_buf_clean_up(&contents->public_key); aws_byte_buf_clean_up(&contents->expected_canonical_request); aws_byte_buf_clean_up(&contents->expected_string_to_sign); aws_byte_buf_clean_up(&contents->sample_signed_request); aws_byte_buf_clean_up(&contents->sample_signature); contents->allocator = NULL; } } struct v4_test_context { struct aws_allocator *allocator; enum aws_signing_algorithm algorithm; struct v4_test_case_contents test_case_data; struct aws_string *region_config; struct aws_string *service; struct aws_string *timestamp; struct aws_credentials *credentials; bool should_normalize; bool should_sign_body; uint64_t expiration_in_seconds; struct aws_input_stream *payload_stream; struct aws_ecc_key_pair *signing_key; struct aws_ecc_key_pair *verification_key; struct aws_signable *signable; struct aws_signing_config_aws *config; struct aws_mutex lock; struct aws_condition_variable signal; bool done; struct aws_signing_state_aws *signing_state; struct aws_http_message *request; bool should_generate_test_case; struct aws_string *canonical_signing_auth_value; bool omit_session_token; }; static void s_v4_test_context_clean_up(struct v4_test_context *context) { s_v4_test_case_contents_clean_up(&context->test_case_data); aws_http_message_release(context->request); aws_input_stream_destroy(context->payload_stream); aws_ecc_key_pair_release(context->signing_key); aws_ecc_key_pair_release(context->verification_key); aws_string_destroy(context->region_config); aws_string_destroy(context->service); aws_string_destroy(context->timestamp); aws_credentials_release(context->credentials); aws_mutex_clean_up(&context->lock); aws_condition_variable_clean_up(&context->signal); aws_signing_state_destroy(context->signing_state); aws_mem_release(context->allocator, context->config); aws_signable_destroy(context->signable); aws_string_destroy(context->canonical_signing_auth_value); } AWS_STATIC_STRING_FROM_LITERAL(s_empty_empty_string, "\0"); AWS_STATIC_STRING_FROM_LITERAL(s_credentials_name, "credentials"); AWS_STATIC_STRING_FROM_LITERAL(s_access_key_id_name, "access_key_id"); AWS_STATIC_STRING_FROM_LITERAL(s_secret_access_key_name, "secret_access_key"); AWS_STATIC_STRING_FROM_LITERAL(s_session_token_name, "token"); AWS_STATIC_STRING_FROM_LITERAL(s_region_name, "region"); AWS_STATIC_STRING_FROM_LITERAL(s_service_name, "service"); AWS_STATIC_STRING_FROM_LITERAL(s_timestamp_name, "timestamp"); AWS_STATIC_STRING_FROM_LITERAL(s_normalize_name, "normalize"); AWS_STATIC_STRING_FROM_LITERAL(s_body_name, "sign_body"); AWS_STATIC_STRING_FROM_LITERAL(s_expiration_name, "expiration_in_seconds"); AWS_STATIC_STRING_FROM_LITERAL(s_omit_token_name, "omit_session_token"); static int s_v4_test_context_parse_context_file(struct v4_test_context *context) { struct aws_byte_buf *document = &context->test_case_data.context; struct aws_json_value *document_root = NULL; int result = AWS_OP_ERR; struct aws_byte_cursor null_terminator_cursor = aws_byte_cursor_from_string(s_empty_empty_string); if (aws_byte_buf_append_dynamic(document, &null_terminator_cursor)) { goto done; } struct aws_byte_cursor document_buffer_cursor = aws_byte_cursor_from_buf(document); document_root = aws_json_value_new_from_string(aws_default_allocator(), document_buffer_cursor); if (document_root == NULL) { goto done; } struct aws_json_value *credentials_node = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_string(s_credentials_name)); AWS_FATAL_ASSERT(credentials_node != NULL); /* * Pull out the three credentials components */ struct aws_json_value *access_key_id = aws_json_value_get_from_object(credentials_node, aws_byte_cursor_from_string(s_access_key_id_name)); struct aws_json_value *secret_access_key = aws_json_value_get_from_object(credentials_node, aws_byte_cursor_from_string(s_secret_access_key_name)); struct aws_json_value *session_token = aws_json_value_get_from_object(credentials_node, aws_byte_cursor_from_string(s_session_token_name)); struct aws_byte_cursor access_key_id_cursor; if (!aws_json_value_is_string(access_key_id) || aws_json_value_get_string(access_key_id, &access_key_id_cursor) == AWS_OP_ERR) { goto done; } struct aws_byte_cursor secret_access_key_cursor; AWS_ZERO_STRUCT(secret_access_key_cursor); struct aws_byte_cursor session_token_cursor; AWS_ZERO_STRUCT(session_token_cursor); if (aws_json_value_is_string(session_token)) { aws_json_value_get_string(session_token, &session_token_cursor); } if (aws_json_value_is_string(secret_access_key)) { aws_json_value_get_string(secret_access_key, &secret_access_key_cursor); } if (context->signing_key == NULL) { context->credentials = aws_credentials_new( context->allocator, access_key_id_cursor, secret_access_key_cursor, session_token_cursor, UINT64_MAX); context->signing_key = aws_ecc_key_pair_new_ecdsa_p256_key_from_aws_credentials(context->allocator, context->credentials); } else { context->credentials = aws_credentials_new_ecc( context->allocator, access_key_id_cursor, context->signing_key, session_token_cursor, UINT64_MAX); context->signing_key = aws_credentials_get_ecc_key_pair(context->credentials); aws_ecc_key_pair_acquire(context->signing_key); } AWS_FATAL_ASSERT(context->credentials != NULL); struct aws_json_value *region_node = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_string(s_region_name)); struct aws_byte_cursor region_node_cursor; if (region_node == NULL || !aws_json_value_is_string(region_node) || aws_json_value_get_string(region_node, ®ion_node_cursor) == AWS_OP_ERR) { goto done; } context->region_config = aws_string_new_from_cursor(context->allocator, ®ion_node_cursor); if (context->region_config == NULL) { goto done; } struct aws_json_value *service_node = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_string(s_service_name)); struct aws_byte_cursor service_node_cursor; if (service_node == NULL || !aws_json_value_is_string(service_node) || aws_json_value_get_string(service_node, &service_node_cursor) == AWS_OP_ERR) { goto done; } context->service = aws_string_new_from_cursor(context->allocator, &service_node_cursor); if (context->service == NULL) { goto done; } struct aws_json_value *timestamp_node = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_string(s_timestamp_name)); struct aws_byte_cursor timestamp_node_cursor; if (timestamp_node == NULL || !aws_json_value_is_string(timestamp_node) || aws_json_value_get_string(timestamp_node, ×tamp_node_cursor) == AWS_OP_ERR) { goto done; } context->timestamp = aws_string_new_from_cursor(context->allocator, ×tamp_node_cursor); if (context->timestamp == NULL) { goto done; } struct aws_json_value *normalize_node = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_string(s_normalize_name)); if (normalize_node == NULL || !aws_json_value_is_boolean(normalize_node)) { goto done; } aws_json_value_get_boolean(normalize_node, &context->should_normalize); struct aws_json_value *body_node = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_string(s_body_name)); if (body_node == NULL || !aws_json_value_is_boolean(body_node)) { goto done; } aws_json_value_get_boolean(body_node, &context->should_sign_body); struct aws_json_value *expiration_node = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_string(s_expiration_name)); if (expiration_node == NULL || !aws_json_value_is_number(expiration_node)) { goto done; } double expiration_in_seconds_double = 0; aws_json_value_get_number(expiration_node, &expiration_in_seconds_double); context->expiration_in_seconds = (uint64_t)expiration_in_seconds_double; struct aws_json_value *omit_token_node = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_string(s_omit_token_name)); if (omit_token_node != NULL && aws_json_value_is_boolean(omit_token_node)) { aws_json_value_get_boolean(omit_token_node, &context->omit_session_token); } result = AWS_OP_SUCCESS; done: if (document_root != NULL) { aws_json_value_destroy(document_root); } return result; } static int s_parse_request( struct aws_allocator *allocator, struct aws_byte_cursor request_cursor, struct aws_http_message **out_request, struct aws_input_stream **out_body_stream) { int result = AWS_OP_ERR; *out_request = NULL; *out_body_stream = NULL; struct aws_array_list request_lines; AWS_ZERO_STRUCT(request_lines); if (aws_array_list_init_dynamic(&request_lines, allocator, 10, sizeof(struct aws_byte_cursor))) { return AWS_OP_ERR; } struct aws_input_stream *body_stream = NULL; struct aws_http_message *request = aws_http_message_new_request(allocator); if (request == NULL) { goto done; } struct aws_array_list header_set; AWS_ZERO_STRUCT(header_set); if (aws_array_list_init_dynamic(&header_set, allocator, 10, sizeof(struct aws_signable_property_list_pair))) { goto done; } if (aws_byte_cursor_split_on_char(&request_cursor, '\n', &request_lines)) { goto done; } size_t line_count = aws_array_list_length(&request_lines); if (line_count == 0) { goto done; } struct aws_byte_cursor first_line; AWS_ZERO_STRUCT(first_line); if (aws_array_list_get_at(&request_lines, &first_line, 0)) { goto done; } struct aws_byte_cursor method_cursor; AWS_ZERO_STRUCT(method_cursor); if (!aws_byte_cursor_next_split(&first_line, ' ', &method_cursor)) { goto done; } aws_http_message_set_request_method(request, method_cursor); aws_byte_cursor_advance(&first_line, method_cursor.len + 1); /* not safe in general, but all test cases end in " HTTP/1.1" */ struct aws_byte_cursor uri_cursor = first_line; uri_cursor.len -= 9; aws_http_message_set_request_path(request, uri_cursor); /* headers */ size_t line_index = 1; for (; line_index < line_count; ++line_index) { struct aws_byte_cursor current_line; AWS_ZERO_STRUCT(current_line); if (aws_array_list_get_at(&request_lines, ¤t_line, line_index)) { goto done; } if (current_line.len == 0) { /* empty line = end of headers */ break; } if (isspace(*current_line.ptr)) { /* multi-line header, append the entire line to the most recent header's value */ size_t current_header_count = aws_array_list_length(&header_set); AWS_FATAL_ASSERT(current_header_count > 0); struct aws_signable_property_list_pair *current_header; if (aws_array_list_get_at_ptr(&header_set, (void **)¤t_header, current_header_count - 1)) { goto done; } current_header->value.len = (current_line.ptr + current_line.len) - current_header->value.ptr; } else { /* new header, parse it and add to the header set */ struct aws_signable_property_list_pair current_header; AWS_ZERO_STRUCT(current_header); if (!aws_byte_cursor_next_split(¤t_line, ':', ¤t_header.name)) { goto done; } aws_byte_cursor_advance(¤t_line, current_header.name.len + 1); current_header.value = current_line; aws_array_list_push_back(&header_set, ¤t_header); } } size_t header_count = aws_array_list_length(&header_set); for (size_t i = 0; i < header_count; ++i) { struct aws_signable_property_list_pair property_header; aws_array_list_get_at(&header_set, &property_header, i); struct aws_http_header header = { .name = property_header.name, .value = property_header.value, }; aws_http_message_add_header(request, header); } /* body */ struct aws_byte_cursor body_cursor; AWS_ZERO_STRUCT(body_cursor); if (line_index + 1 < line_count) { if (aws_array_list_get_at(&request_lines, &body_cursor, line_index + 1)) { goto done; } /* body length is the end of the whole request (pointer) minus the start of the body pointer */ body_cursor.len = (request_cursor.ptr + request_cursor.len - body_cursor.ptr); body_stream = aws_input_stream_new_from_cursor(allocator, &body_cursor); if (body_stream == NULL) { goto done; } aws_http_message_set_body_stream(request, body_stream); } result = AWS_OP_SUCCESS; done: aws_array_list_clean_up(&request_lines); aws_array_list_clean_up(&header_set); if (result == AWS_OP_ERR) { aws_http_message_release(request); aws_input_stream_destroy(body_stream); } else { *out_request = request; *out_body_stream = body_stream; } return result; } static int s_v4_test_context_init_signing_config( struct v4_test_context *context, enum aws_signature_type signature_type) { context->signable = aws_signable_new_http_request(context->allocator, context->request); context->config = aws_mem_calloc(context->allocator, 1, sizeof(struct aws_signing_config_aws)); if (context->config == NULL) { return AWS_OP_ERR; } context->config->config_type = AWS_SIGNING_CONFIG_AWS; context->config->algorithm = context->algorithm; context->config->signature_type = signature_type; context->config->region = aws_byte_cursor_from_string(context->region_config); context->config->service = aws_byte_cursor_from_string(context->service); context->config->flags.use_double_uri_encode = true; context->config->flags.should_normalize_uri_path = context->should_normalize; context->config->flags.omit_session_token = context->omit_session_token; /* ToDo: make the tests more fine-grained now that we have updated payload signing controls */ if (context->should_sign_body) { context->config->signed_body_header = AWS_SBHT_X_AMZ_CONTENT_SHA256; } else { context->config->signed_body_value = g_aws_signed_body_value_empty_sha256; } context->config->credentials = context->credentials; context->config->expiration_in_seconds = context->expiration_in_seconds; struct aws_byte_cursor date_cursor = aws_byte_cursor_from_string(context->timestamp); if (aws_date_time_init_from_str_cursor(&context->config->date, &date_cursor, AWS_DATE_FORMAT_ISO_8601)) { return AWS_OP_ERR; } context->signing_state = aws_signing_state_new(context->allocator, context->config, context->signable, NULL, NULL); if (context->signing_state == NULL) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } static int s_v4_test_context_parse_verification_key(struct v4_test_context *context) { AWS_FATAL_ASSERT(context->signing_key != NULL); if (context->test_case_data.public_key.len == 0) { context->verification_key = context->signing_key; aws_ecc_key_pair_acquire(context->signing_key); aws_ecc_key_pair_derive_public_key(context->verification_key); return AWS_OP_SUCCESS; } struct aws_byte_buf pub_x_buffer; AWS_ZERO_STRUCT(pub_x_buffer); struct aws_byte_buf pub_y_buffer; AWS_ZERO_STRUCT(pub_y_buffer); struct aws_byte_buf *document = &context->test_case_data.public_key; struct aws_json_value *document_root = NULL; int result = AWS_OP_ERR; struct aws_byte_cursor null_terminator_cursor = aws_byte_cursor_from_string(s_empty_empty_string); if (aws_byte_buf_append_dynamic(document, &null_terminator_cursor)) { goto done; } struct aws_byte_cursor document_cursor = aws_byte_cursor_from_buf(document); document_root = aws_json_value_new_from_string(aws_default_allocator(), document_cursor); if (document_root == NULL) { goto done; } /* * Pull out the three credentials components */ struct aws_json_value *pub_x = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("X")); struct aws_json_value *pub_y = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("Y")); if (!aws_json_value_is_string(pub_x) || !aws_json_value_is_string(pub_y)) { goto done; } struct aws_byte_cursor pub_x_hex_cursor; struct aws_byte_cursor pub_y_hex_cursor; aws_json_value_get_string(pub_x, &pub_x_hex_cursor); aws_json_value_get_string(pub_y, &pub_y_hex_cursor); size_t pub_x_length = 0; size_t pub_y_length = 0; if (aws_hex_compute_decoded_len(pub_x_hex_cursor.len, &pub_x_length) || aws_hex_compute_decoded_len(pub_y_hex_cursor.len, &pub_y_length)) { goto done; } if (aws_byte_buf_init(&pub_x_buffer, context->allocator, pub_x_length) || aws_byte_buf_init(&pub_y_buffer, context->allocator, pub_y_length)) { goto done; } if (aws_hex_decode(&pub_x_hex_cursor, &pub_x_buffer) || aws_hex_decode(&pub_y_hex_cursor, &pub_y_buffer)) { goto done; } struct aws_byte_cursor pub_x_cursor = aws_byte_cursor_from_buf(&pub_x_buffer); struct aws_byte_cursor pub_y_cursor = aws_byte_cursor_from_buf(&pub_y_buffer); context->verification_key = aws_ecc_key_pair_new_from_public_key(context->allocator, AWS_CAL_ECDSA_P256, &pub_x_cursor, &pub_y_cursor); AWS_FATAL_ASSERT(context->verification_key != NULL); result = AWS_OP_SUCCESS; done: if (document_root) { aws_json_value_destroy(document_root); } aws_byte_buf_clean_up(&pub_x_buffer); aws_byte_buf_clean_up(&pub_y_buffer); return result; } AWS_STATIC_STRING_FROM_LITERAL(s_generate_test_env_var_name, "GENERATE_TEST_CASES"); static int s_v4_test_context_init( struct v4_test_context *context, struct aws_allocator *allocator, const char *parent_folder, const char *test_name, enum aws_signing_algorithm algorithm, enum aws_signature_type signature_type) { AWS_ZERO_STRUCT(*context); context->allocator = allocator; context->algorithm = algorithm; struct aws_string *should_generate = NULL; ASSERT_SUCCESS(aws_get_environment_value(allocator, s_generate_test_env_var_name, &should_generate)); context->should_generate_test_case = should_generate != NULL; aws_string_destroy(should_generate); if (s_v4_test_case_context_init_from_file_set( &context->test_case_data, allocator, parent_folder, test_name, signature_type)) { return AWS_OP_ERR; } if (s_v4_test_context_parse_context_file(context)) { return AWS_OP_ERR; } if (s_parse_request( context->allocator, aws_byte_cursor_from_buf(&context->test_case_data.request), &context->request, &context->payload_stream)) { return AWS_OP_ERR; } if (s_v4_test_context_init_signing_config(context, signature_type)) { return AWS_OP_ERR; } if (s_v4_test_context_parse_verification_key(context)) { return AWS_OP_ERR; } if (aws_mutex_init(&context->lock)) { return AWS_OP_ERR; } if (aws_condition_variable_init(&context->signal)) { return AWS_OP_ERR; } context->done = false; return AWS_OP_SUCCESS; } bool s_is_signing_complete_predicate(void *userdata) { struct v4_test_context *context = userdata; return context->done; } void s_wait_on_signing_complete(struct v4_test_context *context) { aws_mutex_lock(&context->lock); if (!context->done) { aws_condition_variable_wait_pred(&context->signal, &context->lock, s_is_signing_complete_predicate, context); } aws_mutex_unlock(&context->lock); } static void s_on_signing_complete(struct aws_signing_result *result, int error_code, void *userdata) { AWS_FATAL_ASSERT(error_code == AWS_ERROR_SUCCESS); struct v4_test_context *context = userdata; aws_apply_signing_result_to_http_request(context->request, context->allocator, result); struct aws_string *auth_value = NULL; aws_signing_result_get_property(result, g_aws_signature_property_name, &auth_value); struct aws_byte_cursor auth_value_cursor = aws_trim_padded_sigv4a_signature(aws_byte_cursor_from_string(auth_value)); context->canonical_signing_auth_value = aws_string_new_from_cursor(context->allocator, &auth_value_cursor); /* Mark results complete */ aws_mutex_lock(&context->lock); context->done = true; aws_condition_variable_notify_one(&context->signal); aws_mutex_unlock(&context->lock); } static int s_write_test_file( const char *parent_folder, const char *test_name, const struct aws_string *filename, const struct aws_byte_buf *contents) { char path[1024]; snprintf(path, AWS_ARRAY_SIZE(path), "./%s/%s/%s", parent_folder, test_name, aws_string_c_str(filename)); FILE *fp = fopen(path, "w"); if (fp == NULL) { return AWS_OP_ERR; } struct aws_byte_cursor cursor = aws_byte_cursor_from_array(contents->buffer, contents->len); fprintf(fp, PRInSTR, AWS_BYTE_CURSOR_PRI(cursor)); fclose(fp); return AWS_OP_SUCCESS; } AWS_STATIC_STRING_FROM_LITERAL(s_public_key_prefix_json, "{\n"); AWS_STATIC_STRING_FROM_LITERAL(s_public_key_x_json, " \"X\":\""); AWS_STATIC_STRING_FROM_LITERAL(s_public_key_y_json, "\",\n \"Y\":\""); AWS_STATIC_STRING_FROM_LITERAL(s_public_key_suffix_json, "\"\n}\n"); int s_write_public_key_file(struct v4_test_context *context, const char *parent_folder, const char *test_name) { AWS_FATAL_ASSERT(context->verification_key != NULL); int result = AWS_OP_ERR; struct aws_byte_buf key_buffer; AWS_ZERO_STRUCT(key_buffer); if (aws_byte_buf_init(&key_buffer, context->allocator, 256)) { goto done; } struct aws_byte_cursor pub_x_cursor; AWS_ZERO_STRUCT(pub_x_cursor); struct aws_byte_cursor pub_y_cursor; AWS_ZERO_STRUCT(pub_y_cursor); aws_ecc_key_pair_derive_public_key(context->verification_key); aws_ecc_key_pair_get_public_key(context->verification_key, &pub_x_cursor, &pub_y_cursor); struct aws_byte_cursor prefix_cursor = aws_byte_cursor_from_string(s_public_key_prefix_json); aws_byte_buf_append_dynamic(&key_buffer, &prefix_cursor); struct aws_byte_cursor x_cursor = aws_byte_cursor_from_string(s_public_key_x_json); aws_byte_buf_append_dynamic(&key_buffer, &x_cursor); aws_hex_encode_append_dynamic(&pub_x_cursor, &key_buffer); struct aws_byte_cursor y_cursor = aws_byte_cursor_from_string(s_public_key_y_json); aws_byte_buf_append_dynamic(&key_buffer, &y_cursor); aws_hex_encode_append_dynamic(&pub_y_cursor, &key_buffer); struct aws_byte_cursor suffix_cursor = aws_byte_cursor_from_string(s_public_key_suffix_json); aws_byte_buf_append_dynamic(&key_buffer, &suffix_cursor); result = s_write_test_file(parent_folder, test_name, s_public_key_filename, &key_buffer); done: aws_byte_buf_clean_up(&key_buffer); return result; } static int s_generate_test_case( struct v4_test_context *test_context, const char *parent_folder, const char *test_name) { { struct aws_signing_state_aws *signing_state = test_context->signing_state; /* Generate public key file */ if (test_context->algorithm == AWS_SIGNING_ALGORITHM_V4_ASYMMETRIC && test_context->config->signature_type == AWS_ST_HTTP_REQUEST_HEADERS) { ASSERT_SUCCESS(s_write_public_key_file(test_context, parent_folder, test_name)); } /* 1a - generate canonical request */ ASSERT_TRUE(aws_signing_build_canonical_request(signing_state) == AWS_OP_SUCCESS); ASSERT_SUCCESS(s_write_test_file( parent_folder, test_name, s_get_canonical_request_filename(test_context->config->signature_type), &signing_state->canonical_request)); /* 1b- generate string to sign */ ASSERT_TRUE(aws_signing_build_string_to_sign(signing_state) == AWS_OP_SUCCESS); ASSERT_SUCCESS(s_write_test_file( parent_folder, test_name, s_get_string_to_sign_filename(test_context->config->signature_type), &signing_state->string_to_sign)); } return AWS_OP_SUCCESS; } static int s_check_piecewise_test_case(struct v4_test_context *test_context) { struct aws_signing_state_aws *signing_state = test_context->signing_state; /* 1a - validate canonical request */ ASSERT_TRUE(aws_signing_build_canonical_request(signing_state) == AWS_OP_SUCCESS); ASSERT_BIN_ARRAYS_EQUALS( test_context->test_case_data.expected_canonical_request.buffer, test_context->test_case_data.expected_canonical_request.len, signing_state->canonical_request.buffer, signing_state->canonical_request.len); /* 1b- validate string to sign */ ASSERT_TRUE(aws_signing_build_string_to_sign(signing_state) == AWS_OP_SUCCESS); ASSERT_BIN_ARRAYS_EQUALS( test_context->test_case_data.expected_string_to_sign.buffer, test_context->test_case_data.expected_string_to_sign.len, signing_state->string_to_sign.buffer, signing_state->string_to_sign.len); /* authorization values checked in the end-to-end tests */ return AWS_OP_SUCCESS; } static int s_do_sigv4_test_piecewise( struct aws_allocator *allocator, const char *parent_folder, const char *test_name, enum aws_signing_algorithm algorithm, enum aws_signature_type signature_type) { struct v4_test_context test_context; AWS_ZERO_STRUCT(test_context); ASSERT_SUCCESS( s_v4_test_context_init(&test_context, allocator, parent_folder, test_name, algorithm, signature_type)); if (test_context.should_generate_test_case) { ASSERT_SUCCESS(s_generate_test_case(&test_context, parent_folder, test_name)); } else { ASSERT_SUCCESS(s_check_piecewise_test_case(&test_context)); } s_v4_test_context_clean_up(&test_context); return AWS_OP_SUCCESS; } static int s_write_signed_request_to_file( struct v4_test_context *test_context, const char *parent_folder, const char *test_name, const struct aws_string *filename) { char path[1024]; snprintf(path, AWS_ARRAY_SIZE(path), "./%s/%s/%s", parent_folder, test_name, aws_string_c_str(filename)); FILE *fp = fopen(path, "w"); if (fp == NULL) { return AWS_OP_ERR; } struct aws_byte_cursor method_cursor; ASSERT_SUCCESS(aws_http_message_get_request_method(test_context->request, &method_cursor)); struct aws_byte_cursor path_cursor; ASSERT_SUCCESS(aws_http_message_get_request_path(test_context->request, &path_cursor)); fprintf( fp, PRInSTR " " PRInSTR " HTTP/1.1\n", AWS_BYTE_CURSOR_PRI(method_cursor), AWS_BYTE_CURSOR_PRI(path_cursor)); size_t header_count = aws_http_message_get_header_count(test_context->request); for (size_t i = 0; i < header_count; ++i) { struct aws_http_header header; AWS_ZERO_STRUCT(header); ASSERT_SUCCESS(aws_http_message_get_header(test_context->request, &header, i)); fprintf(fp, PRInSTR ":" PRInSTR "\n", AWS_BYTE_CURSOR_PRI(header.name), AWS_BYTE_CURSOR_PRI(header.value)); } fprintf(fp, "\n"); if (test_context->payload_stream) { int64_t stream_length = 0; ASSERT_SUCCESS(aws_input_stream_get_length(test_context->payload_stream, &stream_length)); struct aws_byte_buf stream_buf; ASSERT_SUCCESS(aws_byte_buf_init(&stream_buf, test_context->allocator, (size_t)stream_length)); ASSERT_SUCCESS(aws_input_stream_seek(test_context->payload_stream, 0, AWS_SSB_BEGIN)); ASSERT_SUCCESS(aws_input_stream_read(test_context->payload_stream, &stream_buf)); ASSERT_TRUE(stream_buf.len == (size_t)stream_length); fprintf(fp, PRInSTR, AWS_BYTE_BUF_PRI(stream_buf)); aws_byte_buf_clean_up(&stream_buf); } fclose(fp); return AWS_OP_SUCCESS; } static int s_check_header_value(struct aws_http_message *request, struct aws_http_header *expected_header) { size_t header_count = aws_http_message_get_header_count(request); for (size_t i = 0; i < header_count; ++i) { struct aws_http_header header; AWS_ZERO_STRUCT(header); ASSERT_SUCCESS(aws_http_message_get_header(request, &header, i)); if (aws_byte_cursor_eq_ignore_case(&header.name, &expected_header->name)) { if (aws_byte_cursor_eq(&header.value, &expected_header->value)) { aws_http_message_erase_header(request, i); return AWS_OP_SUCCESS; } } } ASSERT_TRUE(false); } static int s_check_query_authorization( struct v4_test_context *test_context, struct aws_byte_cursor signed_path, struct aws_byte_cursor expected_path) { struct aws_uri signed_uri; ASSERT_SUCCESS(aws_uri_init_parse(&signed_uri, test_context->allocator, &signed_path)); struct aws_uri expected_uri; ASSERT_SUCCESS(aws_uri_init_parse(&expected_uri, test_context->allocator, &expected_path)); ASSERT_BIN_ARRAYS_EQUALS(signed_uri.path.ptr, signed_uri.path.len, expected_uri.path.ptr, expected_uri.path.len); struct aws_array_list signed_params; ASSERT_SUCCESS( aws_array_list_init_dynamic(&signed_params, test_context->allocator, 10, sizeof(struct aws_uri_param))); ASSERT_SUCCESS(aws_uri_query_string_params(&signed_uri, &signed_params)); struct aws_array_list expected_params; ASSERT_SUCCESS( aws_array_list_init_dynamic(&expected_params, test_context->allocator, 10, sizeof(struct aws_uri_param))); ASSERT_SUCCESS(aws_uri_query_string_params(&expected_uri, &expected_params)); ASSERT_TRUE(aws_array_list_length(&signed_params) == aws_array_list_length(&expected_params)); struct aws_byte_cursor signature_cursor = aws_byte_cursor_from_string(g_aws_signing_authorization_query_param_name); size_t signed_param_count = aws_array_list_length(&signed_params); for (size_t i = 0; i < signed_param_count; ++i) { struct aws_uri_param signed_param; aws_array_list_get_at(&signed_params, &signed_param, i); if (aws_byte_cursor_eq_ignore_case(&signed_param.key, &signature_cursor) && test_context->algorithm == AWS_SIGNING_ALGORITHM_V4_ASYMMETRIC) { ASSERT_SUCCESS(aws_validate_v4a_authorization_value( test_context->allocator, test_context->verification_key, aws_byte_cursor_from_buf(&test_context->test_case_data.expected_string_to_sign), signed_param.value)); } else { bool found = false; for (size_t j = 0; j < signed_param_count; ++j) { struct aws_uri_param expected_param; aws_array_list_get_at(&expected_params, &expected_param, j); if (aws_byte_cursor_eq_ignore_case(&signed_param.key, &expected_param.key)) { ASSERT_TRUE(aws_byte_cursor_eq_ignore_case(&signed_param.value, &expected_param.value)); found = true; break; } } ASSERT_TRUE(found); } } aws_uri_clean_up(&signed_uri); aws_uri_clean_up(&expected_uri); aws_array_list_clean_up(&signed_params); aws_array_list_clean_up(&expected_params); return AWS_OP_SUCCESS; } static int s_get_authorization_pair( const struct aws_byte_cursor *authorization_value, const struct aws_byte_cursor name, struct aws_byte_cursor value_end, struct aws_byte_cursor *value_out) { struct aws_byte_cursor value_start_cursor; AWS_ZERO_STRUCT(value_start_cursor); ASSERT_SUCCESS(aws_byte_cursor_find_exact(authorization_value, &name, &value_start_cursor)); aws_byte_cursor_advance(&value_start_cursor, name.len); struct aws_byte_cursor value_end_cursor; AWS_ZERO_STRUCT(value_end_cursor); ASSERT_SUCCESS(aws_byte_cursor_find_exact(&value_start_cursor, &value_end, &value_end_cursor)); *value_out = value_start_cursor; value_out->len = value_end_cursor.ptr - value_start_cursor.ptr; return AWS_OP_SUCCESS; } static int s_compare_authorization_pair( const struct aws_byte_cursor *signed_value, const struct aws_byte_cursor *expected_value, const struct aws_byte_cursor name) { struct aws_byte_cursor signed_pair_value; AWS_ZERO_STRUCT(signed_pair_value); ASSERT_SUCCESS(s_get_authorization_pair(signed_value, name, aws_byte_cursor_from_c_str(", "), &signed_pair_value)); struct aws_byte_cursor expected_pair_value; AWS_ZERO_STRUCT(expected_pair_value); ASSERT_SUCCESS( s_get_authorization_pair(expected_value, name, aws_byte_cursor_from_c_str(", "), &expected_pair_value)); ASSERT_BIN_ARRAYS_EQUALS( signed_pair_value.ptr, signed_pair_value.len, expected_pair_value.ptr, expected_pair_value.len); return AWS_OP_SUCCESS; } static int s_check_header_authorization( struct v4_test_context *test_context, struct aws_http_header *header, struct aws_http_message *expected_request) { struct aws_byte_cursor signed_authorization_value = header->value; struct aws_byte_cursor expected_authorization_value; AWS_ZERO_STRUCT(expected_authorization_value); size_t expected_header_count = aws_http_message_get_header_count(expected_request); for (size_t i = 0; i < expected_header_count; ++i) { struct aws_http_header expected_header; AWS_ZERO_STRUCT(expected_header); if (aws_http_message_get_header(expected_request, &expected_header, i)) { continue; } if (aws_byte_cursor_eq_c_str_ignore_case(&expected_header.name, "Authorization")) { expected_authorization_value = expected_header.value; break; } } ASSERT_TRUE(expected_authorization_value.len > 0); struct aws_byte_cursor space_cursor = aws_byte_cursor_from_c_str(" "); struct aws_byte_cursor signed_space_cursor; AWS_ZERO_STRUCT(signed_space_cursor); ASSERT_SUCCESS(aws_byte_cursor_find_exact(&signed_authorization_value, &space_cursor, &signed_space_cursor)); struct aws_byte_cursor signed_algorithm_cursor = { .ptr = signed_authorization_value.ptr, .len = signed_space_cursor.ptr - signed_authorization_value.ptr, }; struct aws_byte_cursor expected_space_cursor; AWS_ZERO_STRUCT(expected_space_cursor); ASSERT_SUCCESS(aws_byte_cursor_find_exact(&expected_authorization_value, &space_cursor, &expected_space_cursor)); struct aws_byte_cursor expected_algorithm_cursor = { .ptr = expected_authorization_value.ptr, .len = expected_space_cursor.ptr - expected_authorization_value.ptr, }; ASSERT_BIN_ARRAYS_EQUALS( signed_algorithm_cursor.ptr, signed_algorithm_cursor.len, expected_algorithm_cursor.ptr, expected_algorithm_cursor.len); ASSERT_SUCCESS(s_compare_authorization_pair( &signed_authorization_value, &expected_authorization_value, aws_byte_cursor_from_c_str("Credential="))); ASSERT_SUCCESS(s_compare_authorization_pair( &signed_authorization_value, &expected_authorization_value, aws_byte_cursor_from_c_str("SignedHeaders="))); struct aws_byte_buf *string_to_sign = &test_context->test_case_data.expected_string_to_sign; struct aws_byte_cursor signature_key_cursor = aws_byte_cursor_from_c_str("Signature="); struct aws_byte_cursor signed_signature_value; AWS_ZERO_STRUCT(signed_signature_value); ASSERT_SUCCESS( aws_byte_cursor_find_exact(&signed_authorization_value, &signature_key_cursor, &signed_signature_value)); aws_byte_cursor_advance(&signed_signature_value, signature_key_cursor.len); ASSERT_SUCCESS(aws_validate_v4a_authorization_value( test_context->allocator, test_context->verification_key, aws_byte_cursor_from_buf(string_to_sign), signed_signature_value)); struct aws_byte_cursor expected_signature_value; AWS_ZERO_STRUCT(expected_signature_value); ASSERT_SUCCESS( aws_byte_cursor_find_exact(&expected_authorization_value, &signature_key_cursor, &expected_signature_value)); aws_byte_cursor_advance(&expected_signature_value, signature_key_cursor.len); ASSERT_SUCCESS(aws_validate_v4a_authorization_value( test_context->allocator, test_context->verification_key, aws_byte_cursor_from_buf(string_to_sign), expected_signature_value)); return AWS_OP_SUCCESS; } static int s_check_signed_request(struct v4_test_context *test_context, struct aws_byte_buf *expected_request_buffer) { struct aws_http_message *expected_request = NULL; struct aws_input_stream *body_stream = NULL; ASSERT_SUCCESS(s_parse_request( test_context->allocator, aws_byte_cursor_from_buf(expected_request_buffer), &expected_request, &body_stream)); ASSERT_NOT_NULL(expected_request); /* method */ struct aws_byte_cursor signed_method; AWS_ZERO_STRUCT(signed_method); aws_http_message_get_request_method(test_context->request, &signed_method); struct aws_byte_cursor expected_method; AWS_ZERO_STRUCT(expected_method); aws_http_message_get_request_method(expected_request, &expected_method); ASSERT_BIN_ARRAYS_EQUALS(expected_method.ptr, expected_method.len, signed_method.ptr, signed_method.len); /* path + query string */ struct aws_byte_cursor signed_path; AWS_ZERO_STRUCT(signed_path); aws_http_message_get_request_path(test_context->request, &signed_path); struct aws_byte_cursor expected_path; AWS_ZERO_STRUCT(expected_path); aws_http_message_get_request_path(expected_request, &expected_path); if (test_context->config->signature_type == AWS_ST_HTTP_REQUEST_QUERY_PARAMS) { ASSERT_SUCCESS(s_check_query_authorization(test_context, signed_path, expected_path)); } else { ASSERT_BIN_ARRAYS_EQUALS(expected_path.ptr, expected_path.len, signed_path.ptr, signed_path.len); } /* headers */ size_t signed_header_count = aws_http_message_get_header_count(test_context->request); size_t expected_header_count = aws_http_message_get_header_count(expected_request); ASSERT_TRUE(signed_header_count == expected_header_count); for (size_t i = 0; i < signed_header_count; ++i) { struct aws_http_header header; AWS_ZERO_STRUCT(header); if (aws_http_message_get_header(test_context->request, &header, i)) { continue; } if (test_context->config->signature_type == AWS_ST_HTTP_REQUEST_HEADERS && aws_byte_cursor_eq_c_str_ignore_case(&header.name, "Authorization") && test_context->algorithm == AWS_SIGNING_ALGORITHM_V4_ASYMMETRIC) { ASSERT_SUCCESS(s_check_header_authorization(test_context, &header, expected_request)); } else { ASSERT_SUCCESS(s_check_header_value(expected_request, &header)); } } aws_http_message_release(expected_request); aws_input_stream_destroy(body_stream); return AWS_OP_SUCCESS; } static int s_do_sigv4_test_end_to_end( struct aws_allocator *allocator, const char *parent_folder, const char *test_name, enum aws_signing_algorithm algorithm, enum aws_signature_type signature_type) { struct v4_test_context test_context; AWS_ZERO_STRUCT(test_context); ASSERT_SUCCESS( s_v4_test_context_init(&test_context, allocator, parent_folder, test_name, algorithm, signature_type)); ASSERT_SUCCESS(aws_sign_request_aws( allocator, test_context.signable, (void *)test_context.config, s_on_signing_complete, &test_context)); s_wait_on_signing_complete(&test_context); if (test_context.should_generate_test_case) { ASSERT_SUCCESS(s_write_signed_request_to_file( &test_context, parent_folder, test_name, s_get_signed_request_filename(signature_type))); } else { ASSERT_SUCCESS(s_check_signed_request(&test_context, &test_context.test_case_data.sample_signed_request)); } s_v4_test_context_clean_up(&test_context); return AWS_OP_SUCCESS; } static int s_write_signature_to_file( struct v4_test_context *test_context, const char *parent_folder, const char *test_name, const struct aws_string *filename) { char path[1024]; snprintf(path, AWS_ARRAY_SIZE(path), "./%s/%s/%s", parent_folder, test_name, aws_string_c_str(filename)); FILE *fp = fopen(path, "w"); if (fp == NULL) { return AWS_OP_ERR; } struct aws_byte_cursor signature_cursor = aws_byte_cursor_from_string(test_context->canonical_signing_auth_value); fprintf(fp, PRInSTR, AWS_BYTE_CURSOR_PRI(signature_cursor)); fclose(fp); return AWS_OP_SUCCESS; } static int s_do_sigv4_test_canonical_only( struct aws_allocator *allocator, const char *parent_folder, const char *test_name, enum aws_signing_algorithm algorithm, enum aws_signature_type signature_type) { struct v4_test_context test_context; AWS_ZERO_STRUCT(test_context); ASSERT_SUCCESS( s_v4_test_context_init(&test_context, allocator, parent_folder, test_name, algorithm, signature_type)); /* replace the http request signable with a canonical request signable */ aws_signable_destroy(test_context.signable); test_context.signable = aws_signable_new_canonical_request( allocator, aws_byte_cursor_from_buf(&test_context.test_case_data.expected_canonical_request)); if (signature_type == AWS_ST_HTTP_REQUEST_QUERY_PARAMS) { test_context.config->signature_type = AWS_ST_CANONICAL_REQUEST_QUERY_PARAMS; } else { test_context.config->signature_type = AWS_ST_CANONICAL_REQUEST_HEADERS; } ASSERT_SUCCESS(aws_sign_request_aws( allocator, test_context.signable, (void *)test_context.config, s_on_signing_complete, &test_context)); s_wait_on_signing_complete(&test_context); if (test_context.should_generate_test_case) { ASSERT_SUCCESS(s_write_signature_to_file( &test_context, parent_folder, test_name, s_get_signature_filename(signature_type))); } else { if (algorithm == AWS_SIGNING_ALGORITHM_V4) { ASSERT_BIN_ARRAYS_EQUALS( test_context.test_case_data.sample_signature.buffer, test_context.test_case_data.sample_signature.len, test_context.canonical_signing_auth_value->bytes, test_context.canonical_signing_auth_value->len); } else { ASSERT_SUCCESS(aws_validate_v4a_authorization_value( test_context.allocator, test_context.verification_key, aws_byte_cursor_from_buf(&test_context.test_case_data.expected_string_to_sign), aws_byte_cursor_from_string(test_context.canonical_signing_auth_value))); } } s_v4_test_context_clean_up(&test_context); return AWS_OP_SUCCESS; } static int s_do_sigv4_test_signing( struct aws_allocator *allocator, const char *parent_folder, const char *test_name, enum aws_signing_algorithm algorithm, enum aws_signature_type signature_type) { ASSERT_SUCCESS(s_do_sigv4_test_piecewise(allocator, parent_folder, test_name, algorithm, signature_type)); ASSERT_SUCCESS(s_do_sigv4_test_end_to_end(allocator, parent_folder, test_name, algorithm, signature_type)); ASSERT_SUCCESS(s_do_sigv4_test_canonical_only(allocator, parent_folder, test_name, algorithm, signature_type)); return AWS_OP_SUCCESS; } static int s_do_sigv4a_test_case(struct aws_allocator *allocator, const char *test_name, const char *parent_folder) { /* Set up everything */ aws_auth_library_init(allocator); ASSERT_SUCCESS(s_do_sigv4_test_signing( allocator, parent_folder, test_name, AWS_SIGNING_ALGORITHM_V4_ASYMMETRIC, AWS_ST_HTTP_REQUEST_HEADERS)); ASSERT_SUCCESS(s_do_sigv4_test_signing( allocator, parent_folder, test_name, AWS_SIGNING_ALGORITHM_V4_ASYMMETRIC, AWS_ST_HTTP_REQUEST_QUERY_PARAMS)); aws_auth_library_clean_up(); return AWS_OP_SUCCESS; } #define DECLARE_SIGV4A_TEST_SUITE_CASE(test_name, test_name_string) \ static int s_sigv4a_##test_name##_test(struct aws_allocator *allocator, void *ctx) { \ (void)ctx; \ return s_do_sigv4a_test_case(allocator, test_name_string, "./v4a"); \ } \ AWS_TEST_CASE(sigv4a_##test_name##_test, s_sigv4a_##test_name##_test); DECLARE_SIGV4A_TEST_SUITE_CASE(get_header_key_duplicate, "get-header-key-duplicate"); DECLARE_SIGV4A_TEST_SUITE_CASE(get_header_value_multiline, "get-header-value-multiline"); DECLARE_SIGV4A_TEST_SUITE_CASE(get_header_value_order, "get-header-value-order"); DECLARE_SIGV4A_TEST_SUITE_CASE(get_header_value_trim, "get-header-value-trim"); DECLARE_SIGV4A_TEST_SUITE_CASE(get_unreserved, "get-unreserved"); DECLARE_SIGV4A_TEST_SUITE_CASE(get_utf8, "get-utf8"); DECLARE_SIGV4A_TEST_SUITE_CASE(get_vanilla, "get-vanilla"); DECLARE_SIGV4A_TEST_SUITE_CASE(get_vanilla_empty_query_key, "get-vanilla-empty-query-key"); DECLARE_SIGV4A_TEST_SUITE_CASE(get_vanilla_query, "get-vanilla-query"); DECLARE_SIGV4A_TEST_SUITE_CASE(get_vanilla_query_order_key_case, "get-vanilla-query-order-key-case"); DECLARE_SIGV4A_TEST_SUITE_CASE(get_vanilla_query_order_encoded, "get-vanilla-query-order-encoded"); DECLARE_SIGV4A_TEST_SUITE_CASE(get_vanilla_unreserved, "get-vanilla-query-unreserved"); DECLARE_SIGV4A_TEST_SUITE_CASE(get_vanilla_utf8_query, "get-vanilla-utf8-query"); DECLARE_SIGV4A_TEST_SUITE_CASE(post_header_key_case, "post-header-key-case"); DECLARE_SIGV4A_TEST_SUITE_CASE(post_header_key_sort, "post-header-key-sort"); DECLARE_SIGV4A_TEST_SUITE_CASE(post_header_value_case, "post-header-value-case"); DECLARE_SIGV4A_TEST_SUITE_CASE(post_vanilla, "post-vanilla"); DECLARE_SIGV4A_TEST_SUITE_CASE(post_vanilla_empty_query_value, "post-vanilla-empty-query-value"); DECLARE_SIGV4A_TEST_SUITE_CASE(post_vanilla_query, "post-vanilla-query"); DECLARE_SIGV4A_TEST_SUITE_CASE(post_x_www_form_urlencoded, "post-x-www-form-urlencoded"); DECLARE_SIGV4A_TEST_SUITE_CASE(post_x_www_form_urlencoded_parameters, "post-x-www-form-urlencoded-parameters"); DECLARE_SIGV4A_TEST_SUITE_CASE(get_vanilla_with_session_token, "get-vanilla-with-session-token"); DECLARE_SIGV4A_TEST_SUITE_CASE(post_sts_header_after, "post-sts-header-after"); DECLARE_SIGV4A_TEST_SUITE_CASE(post_sts_header_before, "post-sts-header-before"); DECLARE_SIGV4A_TEST_SUITE_CASE(get_relative_normalized, "get-relative-normalized"); DECLARE_SIGV4A_TEST_SUITE_CASE(get_relative_unnormalized, "get-relative-unnormalized"); DECLARE_SIGV4A_TEST_SUITE_CASE(get_relative_relative_normalized, "get-relative-relative-normalized"); DECLARE_SIGV4A_TEST_SUITE_CASE(get_relative_relative_unnormalized, "get-relative-relative-unnormalized"); DECLARE_SIGV4A_TEST_SUITE_CASE(get_slash_normalized, "get-slash-normalized"); DECLARE_SIGV4A_TEST_SUITE_CASE(get_slash_unnormalized, "get-slash-unnormalized"); DECLARE_SIGV4A_TEST_SUITE_CASE(get_slash_dot_slash_normalized, "get-slash-dot-slash-normalized"); DECLARE_SIGV4A_TEST_SUITE_CASE(get_slash_dot_slash_unnormalized, "get-slash-dot-slash-unnormalized"); DECLARE_SIGV4A_TEST_SUITE_CASE(get_slash_pointless_dot_normalized, "get-slash-pointless-dot-normalized"); DECLARE_SIGV4A_TEST_SUITE_CASE(get_slash_pointless_dot_unnormalized, "get-slash-pointless-dot-unnormalized"); DECLARE_SIGV4A_TEST_SUITE_CASE(get_slashes_normalized, "get-slashes-normalized"); DECLARE_SIGV4A_TEST_SUITE_CASE(get_slashes_unnormalized, "get-slashes-unnormalized"); DECLARE_SIGV4A_TEST_SUITE_CASE(get_space_normalized, "get-space-normalized"); DECLARE_SIGV4A_TEST_SUITE_CASE(get_space_unnormalized, "get-space-unnormalized"); static int s_do_sigv4_test_case(struct aws_allocator *allocator, const char *test_name, const char *parent_folder) { /* Set up everything */ aws_auth_library_init(allocator); ASSERT_SUCCESS(s_do_sigv4_test_signing( allocator, parent_folder, test_name, AWS_SIGNING_ALGORITHM_V4, AWS_ST_HTTP_REQUEST_HEADERS)); ASSERT_SUCCESS(s_do_sigv4_test_signing( allocator, parent_folder, test_name, AWS_SIGNING_ALGORITHM_V4, AWS_ST_HTTP_REQUEST_QUERY_PARAMS)); aws_auth_library_clean_up(); return AWS_OP_SUCCESS; } #define DECLARE_SIGV4_TEST_SUITE_CASE(test_name, test_name_string) \ static int s_sigv4_##test_name##_test(struct aws_allocator *allocator, void *ctx) { \ (void)ctx; \ return s_do_sigv4_test_case(allocator, test_name_string, "./v4"); \ } \ AWS_TEST_CASE(sigv4_##test_name##_test, s_sigv4_##test_name##_test); DECLARE_SIGV4_TEST_SUITE_CASE(get_header_key_duplicate, "get-header-key-duplicate"); DECLARE_SIGV4_TEST_SUITE_CASE(get_header_value_multiline, "get-header-value-multiline"); DECLARE_SIGV4_TEST_SUITE_CASE(get_header_value_order, "get-header-value-order"); DECLARE_SIGV4_TEST_SUITE_CASE(get_header_value_trim, "get-header-value-trim"); DECLARE_SIGV4_TEST_SUITE_CASE(get_unreserved, "get-unreserved"); DECLARE_SIGV4_TEST_SUITE_CASE(get_utf8, "get-utf8"); DECLARE_SIGV4_TEST_SUITE_CASE(get_vanilla, "get-vanilla"); DECLARE_SIGV4_TEST_SUITE_CASE(get_vanilla_empty_query_key, "get-vanilla-empty-query-key"); DECLARE_SIGV4_TEST_SUITE_CASE(get_vanilla_query, "get-vanilla-query"); DECLARE_SIGV4_TEST_SUITE_CASE(get_vanilla_query_order_key_case, "get-vanilla-query-order-key-case"); DECLARE_SIGV4_TEST_SUITE_CASE(get_vanilla_query_order_encoded, "get-vanilla-query-order-encoded"); DECLARE_SIGV4_TEST_SUITE_CASE(get_vanilla_unreserved, "get-vanilla-query-unreserved"); DECLARE_SIGV4_TEST_SUITE_CASE(get_vanilla_utf8_query, "get-vanilla-utf8-query"); DECLARE_SIGV4_TEST_SUITE_CASE(post_header_key_case, "post-header-key-case"); DECLARE_SIGV4_TEST_SUITE_CASE(post_header_key_sort, "post-header-key-sort"); DECLARE_SIGV4_TEST_SUITE_CASE(post_header_value_case, "post-header-value-case"); DECLARE_SIGV4_TEST_SUITE_CASE(post_vanilla, "post-vanilla"); DECLARE_SIGV4_TEST_SUITE_CASE(post_vanilla_empty_query_value, "post-vanilla-empty-query-value"); DECLARE_SIGV4_TEST_SUITE_CASE(post_vanilla_query, "post-vanilla-query"); DECLARE_SIGV4_TEST_SUITE_CASE(post_x_www_form_urlencoded, "post-x-www-form-urlencoded"); DECLARE_SIGV4_TEST_SUITE_CASE(post_x_www_form_urlencoded_parameters, "post-x-www-form-urlencoded-parameters"); DECLARE_SIGV4_TEST_SUITE_CASE(get_vanilla_with_session_token, "get-vanilla-with-session-token"); DECLARE_SIGV4_TEST_SUITE_CASE(post_sts_header_after, "post-sts-header-after"); DECLARE_SIGV4_TEST_SUITE_CASE(post_sts_header_before, "post-sts-header-before"); DECLARE_SIGV4_TEST_SUITE_CASE(get_relative_normalized, "get-relative-normalized"); DECLARE_SIGV4_TEST_SUITE_CASE(get_relative_unnormalized, "get-relative-unnormalized"); DECLARE_SIGV4_TEST_SUITE_CASE(get_relative_relative_normalized, "get-relative-relative-normalized"); DECLARE_SIGV4_TEST_SUITE_CASE(get_relative_relative_unnormalized, "get-relative-relative-unnormalized"); DECLARE_SIGV4_TEST_SUITE_CASE(get_slash_normalized, "get-slash-normalized"); DECLARE_SIGV4_TEST_SUITE_CASE(get_slash_unnormalized, "get-slash-unnormalized"); DECLARE_SIGV4_TEST_SUITE_CASE(get_slash_dot_slash_normalized, "get-slash-dot-slash-normalized"); DECLARE_SIGV4_TEST_SUITE_CASE(get_slash_dot_slash_unnormalized, "get-slash-dot-slash-unnormalized"); DECLARE_SIGV4_TEST_SUITE_CASE(get_slash_pointless_dot_normalized, "get-slash-pointless-dot-normalized"); DECLARE_SIGV4_TEST_SUITE_CASE(get_slash_pointless_dot_unnormalized, "get-slash-pointless-dot-unnormalized"); DECLARE_SIGV4_TEST_SUITE_CASE(get_slashes_normalized, "get-slashes-normalized"); DECLARE_SIGV4_TEST_SUITE_CASE(get_slashes_unnormalized, "get-slashes-unnormalized"); DECLARE_SIGV4_TEST_SUITE_CASE(get_space_normalized, "get-space-normalized"); DECLARE_SIGV4_TEST_SUITE_CASE(get_space_unnormalized, "get-space-unnormalized"); AWS_STATIC_STRING_FROM_LITERAL(s_test_suite_service, "service"); AWS_STATIC_STRING_FROM_LITERAL(s_test_suite_region, "us-east-1"); AWS_STATIC_STRING_FROM_LITERAL(s_test_suite_access_key_id, "AKIDEXAMPLE"); AWS_STATIC_STRING_FROM_LITERAL(s_test_suite_secret_access_key, "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY"); AWS_STATIC_STRING_FROM_LITERAL(s_test_suite_date, "2015-08-30T12:36:00Z"); static int s_do_header_skip_test( struct aws_allocator *allocator, aws_should_sign_header_fn *should_sign, const struct aws_string *request_contents, const struct aws_string *expected_canonical_request) { aws_auth_library_init(allocator); struct aws_credentials *credentials = aws_credentials_new_from_string( allocator, s_test_suite_access_key_id, s_test_suite_secret_access_key, NULL, UINT64_MAX); ASSERT_NOT_NULL(credentials); struct aws_signing_config_aws config; AWS_ZERO_STRUCT(config); config.credentials = credentials; config.algorithm = AWS_SIGNING_ALGORITHM_V4; config.signature_type = AWS_ST_HTTP_REQUEST_HEADERS; config.region = aws_byte_cursor_from_string(s_test_suite_region); config.service = aws_byte_cursor_from_string(s_test_suite_service); config.should_sign_header = should_sign; struct aws_byte_cursor date_cursor = aws_byte_cursor_from_string(s_test_suite_date); ASSERT_SUCCESS(aws_date_time_init_from_str_cursor(&config.date, &date_cursor, AWS_DATE_FORMAT_ISO_8601)); struct aws_http_message *message = NULL; struct aws_input_stream *body_stream = NULL; ASSERT_SUCCESS(s_parse_request(allocator, aws_byte_cursor_from_string(request_contents), &message, &body_stream)); struct aws_signable *signable = aws_signable_new_http_request(allocator, message); /* release reference to message early, to ensure signable keeps it alive */ aws_http_message_release(message); struct aws_signing_state_aws *signing_state = aws_signing_state_new(allocator, &config, signable, NULL, NULL); ASSERT_NOT_NULL(signing_state); ASSERT_SUCCESS(aws_signing_build_canonical_request(signing_state)); ASSERT_BIN_ARRAYS_EQUALS( expected_canonical_request->bytes, expected_canonical_request->len, signing_state->canonical_request.buffer, signing_state->canonical_request.len); aws_input_stream_destroy(body_stream); aws_signing_state_destroy(signing_state); aws_credentials_release(credentials); aws_signable_destroy(signable); aws_auth_library_clean_up(); return AWS_OP_SUCCESS; } AWS_STATIC_STRING_FROM_LITERAL( s_skip_xray_header_request, "GET / HTTP/1.1\n" "Host:example.amazonaws.com\n" "x-amzn-trace-id:fsdbofdshfdsjkjhfs\n\n"); AWS_STATIC_STRING_FROM_LITERAL( s_skip_xray_header_expected_canonical_request, "GET\n" "/\n" "\n" "host:example.amazonaws.com\n" "x-amz-date:20150830T123600Z\n" "\n" "host;x-amz-date\n" "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"); static int s_sigv4_skip_xray_header_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_do_header_skip_test( allocator, NULL, s_skip_xray_header_request, s_skip_xray_header_expected_canonical_request); } AWS_TEST_CASE(sigv4_skip_xray_header_test, s_sigv4_skip_xray_header_test); AWS_STATIC_STRING_FROM_LITERAL( s_skip_user_agent_header_request, "GET / HTTP/1.1\n" "User-agent:c sdk v1.0\n" "Host:example.amazonaws.com\n\n"); AWS_STATIC_STRING_FROM_LITERAL( s_skip_user_agent_header_expected_canonical_request, "GET\n" "/\n" "\n" "host:example.amazonaws.com\n" "x-amz-date:20150830T123600Z\n" "\n" "host;x-amz-date\n" "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"); static int s_sigv4_skip_user_agent_header_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_do_header_skip_test( allocator, NULL, s_skip_user_agent_header_request, s_skip_user_agent_header_expected_canonical_request); } AWS_TEST_CASE(sigv4_skip_user_agent_header_test, s_sigv4_skip_user_agent_header_test); AWS_STATIC_STRING_FROM_LITERAL( s_skip_custom_header_request, "GET / HTTP/1.1\n" "MyHeader:Blahblah\n" "Host:example.amazonaws.com\n" "AnotherHeader:Oof\n\n"); AWS_STATIC_STRING_FROM_LITERAL( s_skip_custom_header_expected_canonical_request, "GET\n" "/\n" "\n" "host:example.amazonaws.com\n" "x-amz-date:20150830T123600Z\n" "\n" "host;x-amz-date\n" "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"); static bool s_should_sign_header(const struct aws_byte_cursor *name, void *userdata) { (void)userdata; struct aws_byte_cursor my_header_cursor = aws_byte_cursor_from_c_str("myheader"); struct aws_byte_cursor another_header_cursor = aws_byte_cursor_from_c_str("anOtherHeader"); if (aws_byte_cursor_eq_ignore_case(name, &my_header_cursor) || aws_byte_cursor_eq_ignore_case(name, &another_header_cursor)) { return false; } return true; } static int s_sigv4_skip_custom_header_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_do_header_skip_test( allocator, s_should_sign_header, s_skip_custom_header_request, s_skip_custom_header_expected_canonical_request); } AWS_TEST_CASE(sigv4_skip_custom_header_test, s_sigv4_skip_custom_header_test); static int s_do_forbidden_header_param_test( struct aws_allocator *allocator, const struct aws_string *request_contents, enum aws_auth_errors expected_error) { aws_auth_library_init(allocator); struct aws_credentials *credentials = aws_credentials_new_from_string( allocator, s_test_suite_access_key_id, s_test_suite_secret_access_key, NULL, UINT64_MAX); ASSERT_NOT_NULL(credentials); struct aws_signing_config_aws config; AWS_ZERO_STRUCT(config); config.credentials = credentials; config.algorithm = AWS_SIGNING_ALGORITHM_V4; config.signature_type = AWS_ST_HTTP_REQUEST_HEADERS; config.region = aws_byte_cursor_from_string(s_test_suite_region); config.service = aws_byte_cursor_from_string(s_test_suite_service); struct aws_byte_cursor date_cursor = aws_byte_cursor_from_string(s_test_suite_date); ASSERT_SUCCESS(aws_date_time_init_from_str_cursor(&config.date, &date_cursor, AWS_DATE_FORMAT_ISO_8601)); struct aws_http_message *message = NULL; struct aws_input_stream *body_stream = NULL; ASSERT_SUCCESS(s_parse_request(allocator, aws_byte_cursor_from_string(request_contents), &message, &body_stream)); struct aws_signable *signable = aws_signable_new_http_request(allocator, message); struct aws_signing_state_aws *signing_state = aws_signing_state_new(allocator, &config, signable, NULL, NULL); ASSERT_NOT_NULL(signing_state); ASSERT_FAILS(aws_signing_build_canonical_request(signing_state)); ASSERT_TRUE(aws_last_error() == expected_error); aws_input_stream_destroy(body_stream); aws_http_message_release(message); aws_signing_state_destroy(signing_state); aws_credentials_release(credentials); aws_signable_destroy(signable); aws_auth_library_clean_up(); return AWS_OP_SUCCESS; } AWS_STATIC_STRING_FROM_LITERAL( s_amz_date_header_request, "GET / HTTP/1.1\n" "Host:example.amazonaws.com\n" "X-Amz-Date:20150830T123600Z"); static int s_sigv4_fail_date_header_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_do_forbidden_header_param_test( allocator, s_amz_date_header_request, AWS_AUTH_SIGNING_ILLEGAL_REQUEST_HEADER); } AWS_TEST_CASE(sigv4_fail_date_header_test, s_sigv4_fail_date_header_test); AWS_STATIC_STRING_FROM_LITERAL( s_amz_content_sha256_header_request, "GET / HTTP/1.1\n" "Host:example.amazonaws.com\n" "x-amz-content-sha256:lieslieslies"); static int s_sigv4_fail_content_header_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_do_forbidden_header_param_test( allocator, s_amz_content_sha256_header_request, AWS_AUTH_SIGNING_ILLEGAL_REQUEST_HEADER); } AWS_TEST_CASE(sigv4_fail_content_header_test, s_sigv4_fail_content_header_test); AWS_STATIC_STRING_FROM_LITERAL( s_authorization_header_request, "GET / HTTP/1.1\n" "Host:example.amazonaws.com\n" "Authorization:lieslieslies"); static int s_sigv4_fail_authorization_header_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_do_forbidden_header_param_test( allocator, s_authorization_header_request, AWS_AUTH_SIGNING_ILLEGAL_REQUEST_HEADER); } AWS_TEST_CASE(sigv4_fail_authorization_header_test, s_sigv4_fail_authorization_header_test); AWS_STATIC_STRING_FROM_LITERAL( s_amz_signature_param_request, "GET /?X-Amz-Signature=Something HTTP/1.1\n" "Host:example.amazonaws.com\n"); static int s_sigv4_fail_signature_param_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_do_forbidden_header_param_test( allocator, s_amz_signature_param_request, AWS_AUTH_SIGNING_ILLEGAL_REQUEST_QUERY_PARAM); } AWS_TEST_CASE(sigv4_fail_signature_param_test, s_sigv4_fail_signature_param_test); AWS_STATIC_STRING_FROM_LITERAL( s_amz_date_param_request, "GET /?X-Amz-Date=Tomorrow HTTP/1.1\n" "Host:example.amazonaws.com\n"); static int s_sigv4_fail_date_param_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_do_forbidden_header_param_test( allocator, s_amz_date_param_request, AWS_AUTH_SIGNING_ILLEGAL_REQUEST_QUERY_PARAM); } AWS_TEST_CASE(sigv4_fail_date_param_test, s_sigv4_fail_date_param_test); AWS_STATIC_STRING_FROM_LITERAL( s_amz_credential_param_request, "GET /?X-Amz-Credential=TopSekrit HTTP/1.1\n" "Host:example.amazonaws.com\n"); static int s_sigv4_fail_credential_param_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_do_forbidden_header_param_test( allocator, s_amz_credential_param_request, AWS_AUTH_SIGNING_ILLEGAL_REQUEST_QUERY_PARAM); } AWS_TEST_CASE(sigv4_fail_credential_param_test, s_sigv4_fail_credential_param_test); AWS_STATIC_STRING_FROM_LITERAL( s_amz_algorithm_param_request, "GET /?X-Amz-Algorithm=BubbleSort HTTP/1.1\n" "Host:example.amazonaws.com\n"); static int s_sigv4_fail_algorithm_param_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_do_forbidden_header_param_test( allocator, s_amz_algorithm_param_request, AWS_AUTH_SIGNING_ILLEGAL_REQUEST_QUERY_PARAM); } AWS_TEST_CASE(sigv4_fail_algorithm_param_test, s_sigv4_fail_algorithm_param_test); AWS_STATIC_STRING_FROM_LITERAL( s_amz_signed_headers_param_request, "GET /?X-Amz-SignedHeaders=User-Agent HTTP/1.1\n" "Host:example.amazonaws.com\n"); static int s_sigv4_fail_signed_headers_param_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_do_forbidden_header_param_test( allocator, s_amz_signed_headers_param_request, AWS_AUTH_SIGNING_ILLEGAL_REQUEST_QUERY_PARAM); } AWS_TEST_CASE(sigv4_fail_signed_headers_param_test, s_sigv4_fail_signed_headers_param_test); struct null_credentials_state { struct aws_signing_result *result; int error_code; }; static void s_null_credentials_on_signing_complete(struct aws_signing_result *result, int error_code, void *userdata) { struct null_credentials_state *state = userdata; state->result = result; state->error_code = error_code; } static int s_signer_null_credentials_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct get_credentials_mock_result results = { .credentials = NULL, .error_code = AWS_AUTH_SIGNING_NO_CREDENTIALS, }; struct aws_http_message *request = aws_http_message_new_request(allocator); struct aws_signable *signable = aws_signable_new_http_request(allocator, request); struct aws_signing_config_aws config = { .config_type = AWS_SIGNING_CONFIG_AWS, .algorithm = AWS_SIGNING_ALGORITHM_V4, .signature_type = AWS_ST_HTTP_REQUEST_HEADERS, .region = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("us-east-1"), .service = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("elasticdohickeyservice"), }; config.credentials_provider = aws_credentials_provider_new_mock(allocator, &results, 1, NULL); aws_date_time_init_now(&config.date); struct null_credentials_state state; AWS_ZERO_STRUCT(state); ASSERT_SUCCESS(aws_sign_request_aws( allocator, signable, (struct aws_signing_config_base *)&config, s_null_credentials_on_signing_complete, &state)); ASSERT_PTR_EQUALS(NULL, state.result); ASSERT_INT_EQUALS(AWS_AUTH_SIGNING_NO_CREDENTIALS, state.error_code); aws_credentials_provider_release(config.credentials_provider); aws_signable_destroy(signable); aws_http_message_release(request); return AWS_OP_SUCCESS; } AWS_TEST_CASE(signer_null_credentials_test, s_signer_null_credentials_test); static void s_anonymous_credentials_on_signing_complete( struct aws_signing_result *result, int error_code, void *userdata) { struct null_credentials_state *state = userdata; state->result = result; state->error_code = error_code; AWS_FATAL_ASSERT(result->properties.p_impl != NULL); AWS_FATAL_ASSERT(result->property_lists.p_impl != NULL); } static int s_signer_anonymous_credentials_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_credentials *credentials = aws_credentials_new_anonymous(allocator); ASSERT_NOT_NULL(credentials); struct aws_http_message *request = aws_http_message_new_request(allocator); struct aws_signable *signable = aws_signable_new_http_request(allocator, request); struct aws_signing_config_aws config = { .config_type = AWS_SIGNING_CONFIG_AWS, .algorithm = AWS_SIGNING_ALGORITHM_V4, .signature_type = AWS_ST_HTTP_REQUEST_HEADERS, .region = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("us-east-1"), .service = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("elasticdohickeyservice"), .credentials = credentials, }; struct null_credentials_state state; AWS_ZERO_STRUCT(state); ASSERT_SUCCESS(aws_sign_request_aws( allocator, signable, (struct aws_signing_config_base *)&config, s_anonymous_credentials_on_signing_complete, &state)); ASSERT_INT_EQUALS(AWS_ERROR_SUCCESS, state.error_code); ASSERT_NOT_NULL(state.result); aws_credentials_release(credentials); aws_signable_destroy(signable); aws_http_message_release(request); return AWS_OP_SUCCESS; } AWS_TEST_CASE(signer_anonymous_credentials_test, s_signer_anonymous_credentials_test); static int s_signer_anonymous_credentials_provider_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct get_credentials_mock_result mock_result = { .credentials = aws_credentials_new_anonymous(allocator), .error_code = AWS_ERROR_SUCCESS, }; struct aws_http_message *request = aws_http_message_new_request(allocator); struct aws_signable *signable = aws_signable_new_http_request(allocator, request); struct aws_signing_config_aws config = { .config_type = AWS_SIGNING_CONFIG_AWS, .algorithm = AWS_SIGNING_ALGORITHM_V4, .signature_type = AWS_ST_HTTP_REQUEST_HEADERS, .region = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("us-east-1"), .service = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("elasticdohickeyservice"), }; config.credentials_provider = aws_credentials_provider_new_mock(allocator, &mock_result, 1, NULL); struct null_credentials_state state; AWS_ZERO_STRUCT(state); ASSERT_SUCCESS(aws_sign_request_aws( allocator, signable, (struct aws_signing_config_base *)&config, s_anonymous_credentials_on_signing_complete, &state)); ASSERT_INT_EQUALS(AWS_ERROR_SUCCESS, state.error_code); ASSERT_NOT_NULL(state.result); aws_credentials_release(mock_result.credentials); aws_credentials_provider_release(config.credentials_provider); aws_signable_destroy(signable); aws_http_message_release(request); return AWS_OP_SUCCESS; } AWS_TEST_CASE(signer_anonymous_credentials_provider_test, s_signer_anonymous_credentials_provider_test); aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/sso_token_util_tests.c000066400000000000000000000152571456575232400257740ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include "shared_credentials_test_definitions.h" #include static int s_parse_token_location_url_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_auth_library_init(allocator); struct aws_string *start_url = aws_string_new_from_c_str(allocator, "https://d-92671207e4.awsapps.com/start"); struct aws_string *token_path = aws_construct_sso_token_path(allocator, start_url); struct aws_byte_cursor token_cursor = aws_byte_cursor_from_string(token_path); struct aws_byte_cursor expected_token_cursor = aws_byte_cursor_from_c_str("13f9d35043871d073ab260e020f0ffde092cb14b.json"); struct aws_byte_cursor find_cursor; ASSERT_SUCCESS(aws_byte_cursor_find_exact(&token_cursor, &expected_token_cursor, &find_cursor)); aws_string_destroy(start_url); aws_string_destroy(token_path); aws_auth_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(parse_token_location_url_test, s_parse_token_location_url_test); static int s_parse_token_location_session_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_auth_library_init(allocator); struct aws_string *session = aws_string_new_from_c_str(allocator, "admin"); struct aws_string *token_path = aws_construct_sso_token_path(allocator, session); struct aws_byte_cursor token_cursor = aws_byte_cursor_from_string(token_path); struct aws_byte_cursor expected_token_cursor = aws_byte_cursor_from_c_str("d033e22ae348aeb5660fc2140aec35850c4da997.json"); struct aws_byte_cursor find_cursor; ASSERT_SUCCESS(aws_byte_cursor_find_exact(&token_cursor, &expected_token_cursor, &find_cursor)); aws_string_destroy(session); aws_string_destroy(token_path); aws_auth_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(parse_token_location_session_test, s_parse_token_location_session_test); AWS_STATIC_STRING_FROM_LITERAL( s_valid_token_json, "{\"accessToken\": \"string\",\"expiresAt\": \"2019-11-14T04:05:45Z\",\"refreshToken\": \"string\",\"clientId\": " "\"123321\",\"clientSecret\": \"ABCDE123\",\"registrationExpiresAt\": " "\"2022-03-06T19:53:17Z\",\"region\": \"us-west-2\",\"startUrl\": \"https://d-abc123.awsapps.com/start\"}"); static int s_parse_sso_token_valid(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_auth_library_init(allocator); struct aws_string *file_path = aws_create_process_unique_file_name(allocator); ASSERT_SUCCESS(aws_create_profile_file(file_path, s_valid_token_json)); struct aws_sso_token *sso_token = aws_sso_token_new_from_file(allocator, file_path); ASSERT_TRUE(aws_string_eq_c_str(sso_token->access_token, "string")); ASSERT_INT_EQUALS((uint64_t)aws_date_time_as_epoch_secs(&sso_token->expiration), 1573704345); aws_string_destroy(file_path); aws_sso_token_destroy(sso_token); aws_auth_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(parse_sso_token_valid, s_parse_sso_token_valid); AWS_STATIC_STRING_FROM_LITERAL(s_invalid_token_json, "invalid json"); static int s_parse_sso_token_invalid(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_auth_library_init(allocator); struct aws_string *file_path = aws_create_process_unique_file_name(allocator); ASSERT_SUCCESS(aws_create_profile_file(file_path, s_invalid_token_json)); ASSERT_NULL(aws_sso_token_new_from_file(allocator, file_path)); ASSERT_INT_EQUALS(AWS_AUTH_SSO_TOKEN_INVALID, aws_last_error()); aws_string_destroy(file_path); aws_auth_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(parse_sso_token_invalid, s_parse_sso_token_invalid); AWS_STATIC_STRING_FROM_LITERAL( s_missing_access_token_json, "{\"expiresAt\": \"2019-11-14T04:05:45Z\",\"refreshToken\": \"string\",\"clientId\": " "\"123321\",\"clientSecret\": \"ABCDE123\",\"registrationExpiresAt\": " "\"2022-03-06T19:53:17Z\",\"region\": \"us-west-2\",\"startUrl\": \"https://d-abc123.awsapps.com/start\"}"); static int s_parse_sso_token_invalid_missing_access_token(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_auth_library_init(allocator); struct aws_string *file_path = aws_create_process_unique_file_name(allocator); ASSERT_SUCCESS(aws_create_profile_file(file_path, s_missing_access_token_json)); ASSERT_NULL(aws_sso_token_new_from_file(allocator, file_path)); ASSERT_INT_EQUALS(AWS_AUTH_SSO_TOKEN_INVALID, aws_last_error()); aws_string_destroy(file_path); aws_auth_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(parse_sso_token_invalid_missing_access_token, s_parse_sso_token_invalid_missing_access_token); AWS_STATIC_STRING_FROM_LITERAL( s_missing_expires_at_json, "{\"accessToken\": \"string\",\"refreshToken\": \"string\",\"clientId\": " "\"123321\",\"clientSecret\": \"ABCDE123\",\"registrationExpiresAt\": " "\"2022-03-06T19:53:17Z\",\"region\": \"us-west-2\",\"startUrl\": \"https://d-abc123.awsapps.com/start\"}"); static int s_parse_sso_token_missing_expires_at(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_auth_library_init(allocator); struct aws_string *file_path = aws_create_process_unique_file_name(allocator); ASSERT_SUCCESS(aws_create_profile_file(file_path, s_missing_expires_at_json)); ASSERT_NULL(aws_sso_token_new_from_file(allocator, file_path)); ASSERT_INT_EQUALS(AWS_AUTH_SSO_TOKEN_INVALID, aws_last_error()); aws_string_destroy(file_path); aws_auth_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(parse_sso_token_missing_expires_at, s_parse_sso_token_missing_expires_at); AWS_STATIC_STRING_FROM_LITERAL( s_invalid_expires_at_json, "{\"accessToken\": \"string\",\"expiresAt\": \"1234567\",\"refreshToken\": \"string\",\"clientId\": " "\"123321\",\"clientSecret\": \"ABCDE123\",\"registrationExpiresAt\": " "\"2022-03-06T19:53:17Z\",\"region\": \"us-west-2\",\"startUrl\": \"https://d-abc123.awsapps.com/start\"}"); static int s_parse_sso_token_invalid_expires_at(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_auth_library_init(allocator); struct aws_string *file_path = aws_create_process_unique_file_name(allocator); ASSERT_SUCCESS(aws_create_profile_file(file_path, s_invalid_expires_at_json)); ASSERT_NULL(aws_sso_token_new_from_file(allocator, file_path)); ASSERT_INT_EQUALS(AWS_AUTH_SSO_TOKEN_INVALID, aws_last_error()); aws_string_destroy(file_path); aws_auth_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(parse_sso_token_invalid_expires_at, s_parse_sso_token_invalid_expires_at); aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/test_chunked_signing.c000066400000000000000000001516351456575232400257100ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * The chunked signing test is built using the complete chunked signing example in the s3 docs: * https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html * */ AWS_STATIC_STRING_FROM_LITERAL(s_integration_chunked_access_key_id, "example"); AWS_STATIC_STRING_FROM_LITERAL(s_integration_chunked_secret_access_key, "example"); AWS_STATIC_STRING_FROM_LITERAL(s_chunked_access_key_id, "AKIAIOSFODNN7EXAMPLE"); AWS_STATIC_STRING_FROM_LITERAL(s_chunked_secret_access_key, "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"); AWS_STATIC_STRING_FROM_LITERAL(s_chunked_test_region, "us-east-1"); AWS_STATIC_STRING_FROM_LITERAL(s_chunked_test_service, "s3"); AWS_STATIC_STRING_FROM_LITERAL(s_chunked_test_date, "Fri, 24 May 2013 00:00:00 GMT"); static struct aws_http_header s_host_header = { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("host"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("s3.amazonaws.com"), }; static struct aws_http_header s_integration_host_header = { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("host"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("example_bucket"), }; static struct aws_http_header s_storage_class_header = { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-storage-class"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("REDUCED_REDUNDANCY"), }; static struct aws_http_header s_content_encoding_header = { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Encoding"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("aws-chunked"), }; static struct aws_http_header s_decoded_length_header = { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-decoded-content-length"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("66560"), }; static struct aws_http_header s_integration_decoded_length_header = { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-decoded-content-length"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("1"), }; static struct aws_http_header s_content_length_header = { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Length"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("66824"), }; static struct aws_http_header s_trailer_header = { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-trailer"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("first,second,third"), }; static struct aws_http_header s_integration_content_length_header = { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Length"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("535"), }; static struct aws_http_header s_integration_trailer_header = { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-trailer"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-checksum-crc32c"), }; AWS_STATIC_STRING_FROM_LITERAL(s_chunked_test_path, "/examplebucket/chunkObject.txt"); AWS_STATIC_STRING_FROM_LITERAL(s_integration_test_path, "/john_is_the_bees_knees_1_byte"); static struct aws_http_message *s_build_chunked_test_request(struct aws_allocator *allocator) { struct aws_http_message *request = aws_http_message_new_request(allocator); aws_http_message_add_header(request, s_host_header); aws_http_message_add_header(request, s_storage_class_header); aws_http_message_add_header(request, s_content_encoding_header); aws_http_message_add_header(request, s_decoded_length_header); aws_http_message_add_header(request, s_content_length_header); aws_http_message_set_request_method(request, aws_http_method_put); aws_http_message_set_request_path(request, aws_byte_cursor_from_string(s_chunked_test_path)); return request; } static struct aws_http_message *s_build_trailing_headers_test_request(struct aws_allocator *allocator) { struct aws_http_message *request = aws_http_message_new_request(allocator); aws_http_message_add_header(request, s_host_header); aws_http_message_add_header(request, s_storage_class_header); aws_http_message_add_header(request, s_content_encoding_header); aws_http_message_add_header(request, s_decoded_length_header); aws_http_message_add_header(request, s_content_length_header); aws_http_message_add_header(request, s_trailer_header); aws_http_message_set_request_method(request, aws_http_method_put); aws_http_message_set_request_path(request, aws_byte_cursor_from_string(s_chunked_test_path)); return request; } static struct aws_http_message *s_build_integration_test_request(struct aws_allocator *allocator) { struct aws_http_message *request = aws_http_message_new_request(allocator); aws_http_message_add_header(request, s_content_encoding_header); aws_http_message_add_header(request, s_integration_host_header); aws_http_message_add_header(request, s_integration_decoded_length_header); aws_http_message_add_header(request, s_integration_content_length_header); aws_http_message_add_header(request, s_integration_trailer_header); aws_http_message_set_request_method(request, aws_http_method_put); aws_http_message_set_request_path(request, aws_byte_cursor_from_string(s_integration_test_path)); return request; } static int s_initialize_request_signing_config( struct aws_signing_config_aws *config, struct aws_credentials *credentials) { config->config_type = AWS_SIGNING_CONFIG_AWS; config->algorithm = AWS_SIGNING_ALGORITHM_V4; config->signature_type = AWS_ST_HTTP_REQUEST_HEADERS; config->region = aws_byte_cursor_from_string(s_chunked_test_region); config->service = aws_byte_cursor_from_string(s_chunked_test_service); struct aws_byte_cursor chunked_test_date_cursor = aws_byte_cursor_from_string(s_chunked_test_date); if (aws_date_time_init_from_str_cursor(&config->date, &chunked_test_date_cursor, AWS_DATE_FORMAT_RFC822)) { return AWS_OP_ERR; } config->flags.use_double_uri_encode = false; config->flags.should_normalize_uri_path = true; config->signed_body_value = g_aws_signed_body_value_streaming_aws4_hmac_sha256_payload; config->signed_body_header = AWS_SBHT_X_AMZ_CONTENT_SHA256; config->credentials = credentials; return AWS_OP_SUCCESS; } static int s_initialize_chunk_signing_config( struct aws_signing_config_aws *config, struct aws_credentials *credentials) { config->config_type = AWS_SIGNING_CONFIG_AWS; config->algorithm = AWS_SIGNING_ALGORITHM_V4; config->signature_type = AWS_ST_HTTP_REQUEST_CHUNK; config->region = aws_byte_cursor_from_string(s_chunked_test_region); config->service = aws_byte_cursor_from_string(s_chunked_test_service); struct aws_byte_cursor chunked_test_date_cursor = aws_byte_cursor_from_string(s_chunked_test_date); if (aws_date_time_init_from_str_cursor(&config->date, &chunked_test_date_cursor, AWS_DATE_FORMAT_RFC822)) { return AWS_OP_ERR; } config->flags.use_double_uri_encode = false; config->flags.should_normalize_uri_path = true; config->signed_body_header = AWS_SBHT_NONE; config->credentials = credentials; return AWS_OP_SUCCESS; } static int s_initialize_trailing_headers_signing_config( struct aws_signing_config_aws *config, struct aws_credentials *credentials) { config->config_type = AWS_SIGNING_CONFIG_AWS; config->algorithm = AWS_SIGNING_ALGORITHM_V4; config->signature_type = AWS_ST_HTTP_REQUEST_TRAILING_HEADERS; config->region = aws_byte_cursor_from_string(s_chunked_test_region); config->service = aws_byte_cursor_from_string(s_chunked_test_service); struct aws_byte_cursor chunked_test_date_cursor = aws_byte_cursor_from_string(s_chunked_test_date); if (aws_date_time_init_from_str_cursor(&config->date, &chunked_test_date_cursor, AWS_DATE_FORMAT_RFC822)) { return AWS_OP_ERR; } config->flags.use_double_uri_encode = false; config->flags.should_normalize_uri_path = true; config->signed_body_header = AWS_SBHT_NONE; config->credentials = credentials; return AWS_OP_SUCCESS; } struct chunked_signing_tester { struct aws_credentials *credentials; struct aws_ecc_key_pair *verification_key; struct aws_http_message *request; struct aws_signable *request_signable; struct aws_http_message *integration_request; struct aws_signable *integration_request_signable; struct aws_http_message *trailing_request; struct aws_signable *trailing_request_signable; struct aws_signing_config_aws request_signing_config; struct aws_signing_config_aws chunk_signing_config; struct aws_signing_config_aws trailing_headers_signing_config; struct aws_byte_buf chunk1; struct aws_byte_buf chunk2; struct aws_byte_buf integration_chunk; struct aws_input_stream *chunk1_stream; struct aws_input_stream *chunk2_stream; struct aws_input_stream *integration_chunk_stream; struct aws_http_headers *trailing_headers; struct aws_http_headers *integration_trailing_headers; struct aws_mutex mutex; bool request_completed; struct aws_condition_variable c_var; struct aws_byte_buf request_authorization_header; struct aws_byte_buf last_signature; }; #define CHUNK1_SIZE 65536 #define CHUNK2_SIZE 1024 AWS_STATIC_STRING_FROM_LITERAL( s_chunked_test_ecc_pub_x, "18b7d04643359f6ec270dcbab8dce6d169d66ddc9778c75cfb08dfdb701637ab"); AWS_STATIC_STRING_FROM_LITERAL( s_chunked_test_ecc_pub_y, "fa36b35e4fe67e3112261d2e17a956ef85b06e44712d2850bcd3c2161e9993f2"); static struct aws_http_headers *s_trailing_headers_new(struct aws_allocator *allocator) { struct aws_http_headers *trailing_headers = aws_http_headers_new(allocator); const struct aws_http_header trailer1 = { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("first"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("1st"), }; const struct aws_http_header trailer2 = { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("second"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("2nd"), }; const struct aws_http_header trailer3 = { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("third"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("3rd"), }; aws_http_headers_add_header(trailing_headers, &trailer1); aws_http_headers_add_header(trailing_headers, &trailer2); aws_http_headers_add_header(trailing_headers, &trailer3); return trailing_headers; } static int s_chunked_signing_tester_init(struct aws_allocator *allocator, struct chunked_signing_tester *tester) { tester->credentials = aws_credentials_new_from_string( allocator, s_chunked_access_key_id, s_chunked_secret_access_key, NULL, UINT64_MAX); tester->verification_key = aws_ecc_key_new_from_hex_coordinates( allocator, AWS_CAL_ECDSA_P256, aws_byte_cursor_from_string(s_chunked_test_ecc_pub_x), aws_byte_cursor_from_string(s_chunked_test_ecc_pub_y)); tester->request = s_build_chunked_test_request(allocator); tester->request_signable = aws_signable_new_http_request(allocator, tester->request); tester->trailing_request = s_build_trailing_headers_test_request(allocator); tester->trailing_request_signable = aws_signable_new_http_request(allocator, tester->trailing_request); tester->integration_request = s_build_integration_test_request(allocator); tester->integration_request_signable = aws_signable_new_http_request(allocator, tester->integration_request); AWS_ZERO_STRUCT(tester->request_signing_config); ASSERT_SUCCESS(s_initialize_request_signing_config(&tester->request_signing_config, tester->credentials)); ASSERT_SUCCESS(s_initialize_chunk_signing_config(&tester->chunk_signing_config, tester->credentials)); ASSERT_SUCCESS( s_initialize_trailing_headers_signing_config(&tester->trailing_headers_signing_config, tester->credentials)); ASSERT_SUCCESS(aws_byte_buf_init(&tester->request_authorization_header, allocator, 512)); ASSERT_SUCCESS(aws_byte_buf_init(&tester->last_signature, allocator, 128)); ASSERT_SUCCESS(aws_byte_buf_init(&tester->chunk1, allocator, CHUNK1_SIZE)); ASSERT_TRUE(aws_byte_buf_write_u8_n(&tester->chunk1, 'a', CHUNK1_SIZE)); ASSERT_SUCCESS(aws_byte_buf_init(&tester->chunk2, allocator, CHUNK2_SIZE)); ASSERT_TRUE(aws_byte_buf_write_u8_n(&tester->chunk2, 'a', CHUNK2_SIZE)); ASSERT_SUCCESS(aws_byte_buf_init(&tester->integration_chunk, allocator, 1)); ASSERT_TRUE(aws_byte_buf_write_u8_n(&tester->integration_chunk, 'a', 1)); tester->trailing_headers = s_trailing_headers_new(allocator); tester->integration_trailing_headers = aws_http_headers_new(allocator); const struct aws_http_header checksum = { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-checksum-crc32c"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("wdBDMA=="), }; aws_http_headers_add_header(tester->integration_trailing_headers, &checksum); struct aws_byte_cursor chunk1_cursor = aws_byte_cursor_from_buf(&tester->chunk1); tester->chunk1_stream = aws_input_stream_new_from_cursor(allocator, &chunk1_cursor); struct aws_byte_cursor chunk2_cursor = aws_byte_cursor_from_buf(&tester->chunk2); tester->chunk2_stream = aws_input_stream_new_from_cursor(allocator, &chunk2_cursor); struct aws_byte_cursor integration_chunk_cursor = aws_byte_cursor_from_buf(&tester->integration_chunk); tester->integration_chunk_stream = aws_input_stream_new_from_cursor(allocator, &integration_chunk_cursor); aws_mutex_init(&tester->mutex); tester->c_var = (struct aws_condition_variable)AWS_CONDITION_VARIABLE_INIT; tester->request_completed = false; return AWS_OP_SUCCESS; } static void s_chunked_signing_tester_cleanup(struct chunked_signing_tester *tester) { aws_signable_destroy(tester->request_signable); aws_http_message_release(tester->request); aws_signable_destroy(tester->integration_request_signable); aws_signable_destroy(tester->trailing_request_signable); aws_http_message_release(tester->trailing_request); aws_http_message_release(tester->integration_request); aws_credentials_release(tester->credentials); aws_ecc_key_pair_release(tester->verification_key); aws_byte_buf_clean_up(&tester->request_authorization_header); aws_byte_buf_clean_up(&tester->last_signature); aws_byte_buf_clean_up(&tester->integration_chunk); aws_byte_buf_clean_up(&tester->chunk1); aws_byte_buf_clean_up(&tester->chunk2); aws_http_headers_release(tester->integration_trailing_headers); aws_http_headers_release(tester->trailing_headers); aws_input_stream_destroy(tester->integration_chunk_stream); aws_input_stream_destroy(tester->chunk1_stream); aws_input_stream_destroy(tester->chunk2_stream); aws_mutex_clean_up(&tester->mutex); } static void s_on_request_signing_complete(struct aws_signing_result *result, int error_code, void *userdata) { (void)error_code; struct chunked_signing_tester *tester = userdata; struct aws_array_list *headers = NULL; aws_signing_result_get_property_list(result, g_aws_http_headers_property_list_name, &headers); struct aws_byte_cursor auth_header_name = aws_byte_cursor_from_string(g_aws_signing_authorization_header_name); struct aws_byte_cursor auth_header_value; AWS_ZERO_STRUCT(auth_header_value); for (size_t i = 0; i < aws_array_list_length(headers); ++i) { struct aws_signing_result_property pair; AWS_ZERO_STRUCT(pair); if (aws_array_list_get_at(headers, &pair, i)) { continue; } if (pair.name == NULL) { continue; } struct aws_byte_cursor pair_name_cursor = aws_byte_cursor_from_string(pair.name); if (aws_byte_cursor_eq_ignore_case(&pair_name_cursor, &auth_header_name)) { auth_header_value = aws_byte_cursor_from_string(pair.value); break; } } aws_byte_buf_append_dynamic(&tester->request_authorization_header, &auth_header_value); struct aws_string *signature = NULL; aws_signing_result_get_property(result, g_aws_signature_property_name, &signature); struct aws_byte_cursor signature_cursor = aws_byte_cursor_from_string(signature); aws_byte_buf_append_dynamic(&tester->last_signature, &signature_cursor); } static void s_on_integration_request_signing_complete( struct aws_signing_result *result, int error_code, void *userdata) { (void)error_code; struct chunked_signing_tester *tester = userdata; struct aws_array_list *headers = NULL; aws_signing_result_get_property_list(result, g_aws_http_headers_property_list_name, &headers); struct aws_byte_cursor auth_header_name = aws_byte_cursor_from_string(g_aws_signing_authorization_header_name); struct aws_byte_cursor auth_header_value; AWS_ZERO_STRUCT(auth_header_value); for (size_t i = 0; i < aws_array_list_length(headers); ++i) { struct aws_signing_result_property pair; AWS_ZERO_STRUCT(pair); if (aws_array_list_get_at(headers, &pair, i)) { continue; } if (pair.name == NULL) { continue; } struct aws_byte_cursor pair_name_cursor = aws_byte_cursor_from_string(pair.name); if (aws_byte_cursor_eq_ignore_case(&pair_name_cursor, &auth_header_name)) { auth_header_value = aws_byte_cursor_from_string(pair.value); break; } } aws_byte_buf_append_dynamic(&tester->request_authorization_header, &auth_header_value); struct aws_string *signature = NULL; aws_signing_result_get_property(result, g_aws_signature_property_name, &signature); struct aws_byte_cursor signature_cursor = aws_byte_cursor_from_string(signature); aws_byte_buf_append_dynamic(&tester->last_signature, &signature_cursor); aws_apply_signing_result_to_http_request(tester->integration_request, aws_default_allocator(), result); } static void s_on_chunk_signing_complete(struct aws_signing_result *result, int error_code, void *userdata) { (void)error_code; struct chunked_signing_tester *tester = userdata; tester->last_signature.len = 0; struct aws_string *signature = NULL; aws_signing_result_get_property(result, g_aws_signature_property_name, &signature); struct aws_byte_cursor signature_cursor = aws_byte_cursor_from_string(signature); aws_byte_buf_append_dynamic(&tester->last_signature, &signature_cursor); } /* There is an error in the s3 docs where they list the authorization header value: it is missing a space between * the ',' and 'SignedHeaders=' as well as a space between the ',' and 'Signature=' */ AWS_STATIC_STRING_FROM_LITERAL( s_expected_request_authorization_header, "AWS4-HMAC-SHA256 Credential=AKIAIOSFODNN7EXAMPLE/20130524/us-east-1/s3/aws4_request, " "SignedHeaders=content-encoding;content-length;host;x-amz-content-sha256;x-amz-date;x-amz-decoded-content-length;x-" "amz-storage-class, Signature=4f232c4386841ef735655705268965c44a0e4690baa4adea153f7db9fa80a0a9"); AWS_STATIC_STRING_FROM_LITERAL( s_expected_request_signature, "4f232c4386841ef735655705268965c44a0e4690baa4adea153f7db9fa80a0a9"); AWS_STATIC_STRING_FROM_LITERAL( s_expected_first_chunk_signature, "ad80c730a21e5b8d04586a2213dd63b9a0e99e0e2307b0ade35a65485a288648"); AWS_STATIC_STRING_FROM_LITERAL( s_expected_second_chunk_signature, "0055627c9e194cb4542bae2aa5492e3c1575bbb81b612b7d234b86a503ef5497"); AWS_STATIC_STRING_FROM_LITERAL( s_expected_final_chunk_signature, "b6c6ea8a5354eaf15b3cb7646744f4275b71ea724fed81ceb9323e279d449df9"); AWS_STATIC_STRING_FROM_LITERAL( s_expected_trailing_headers_signature, "df5735bd9f3295cd9386572292562fefc93ba94e80a0a1ddcbd652c4e0a75e6c"); static int s_sigv4_chunked_signing_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_auth_library_init(allocator); struct chunked_signing_tester tester; AWS_ZERO_STRUCT(tester); ASSERT_SUCCESS(s_chunked_signing_tester_init(allocator, &tester)); /* Sign the base request and check the signature and authorization header */ ASSERT_SUCCESS(aws_sign_request_aws( allocator, tester.request_signable, (void *)&tester.request_signing_config, s_on_request_signing_complete, &tester)); ASSERT_BIN_ARRAYS_EQUALS( s_expected_request_authorization_header->bytes, s_expected_request_authorization_header->len, tester.request_authorization_header.buffer, tester.request_authorization_header.len); ASSERT_BIN_ARRAYS_EQUALS( s_expected_request_signature->bytes, s_expected_request_signature->len, tester.last_signature.buffer, tester.last_signature.len); /* Make and sign the first chunk */ struct aws_signable *first_chunk_signable = aws_signable_new_chunk(allocator, tester.chunk1_stream, aws_byte_cursor_from_buf(&tester.last_signature)); ASSERT_SUCCESS(aws_sign_request_aws( allocator, first_chunk_signable, (void *)&tester.chunk_signing_config, s_on_chunk_signing_complete, &tester)); ASSERT_BIN_ARRAYS_EQUALS( s_expected_first_chunk_signature->bytes, s_expected_first_chunk_signature->len, tester.last_signature.buffer, tester.last_signature.len); aws_signable_destroy(first_chunk_signable); /* Make and sign the second chunk */ struct aws_signable *second_chunk_signable = aws_signable_new_chunk(allocator, tester.chunk2_stream, aws_byte_cursor_from_buf(&tester.last_signature)); ASSERT_SUCCESS(aws_sign_request_aws( allocator, second_chunk_signable, (void *)&tester.chunk_signing_config, s_on_chunk_signing_complete, &tester)); ASSERT_BIN_ARRAYS_EQUALS( s_expected_second_chunk_signature->bytes, s_expected_second_chunk_signature->len, tester.last_signature.buffer, tester.last_signature.len); aws_signable_destroy(second_chunk_signable); /* Make and sign the final, empty chunk */ struct aws_signable *final_chunk_signable = aws_signable_new_chunk(allocator, NULL, aws_byte_cursor_from_buf(&tester.last_signature)); ASSERT_SUCCESS(aws_sign_request_aws( allocator, final_chunk_signable, (void *)&tester.chunk_signing_config, s_on_chunk_signing_complete, &tester)); ASSERT_BIN_ARRAYS_EQUALS( s_expected_final_chunk_signature->bytes, s_expected_final_chunk_signature->len, tester.last_signature.buffer, tester.last_signature.len); aws_signable_destroy(final_chunk_signable); s_chunked_signing_tester_cleanup(&tester); aws_auth_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(sigv4_chunked_signing_test, s_sigv4_chunked_signing_test); static int s_sigv4_trailing_headers_signing_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_auth_library_init(allocator); struct chunked_signing_tester tester; AWS_ZERO_STRUCT(tester); ASSERT_SUCCESS(s_chunked_signing_tester_init(allocator, &tester)); /* Sign the base request and check the signature and authorization header */ ASSERT_SUCCESS(aws_sign_request_aws( allocator, tester.request_signable, (void *)&tester.request_signing_config, s_on_request_signing_complete, &tester)); ASSERT_BIN_ARRAYS_EQUALS( s_expected_request_authorization_header->bytes, s_expected_request_authorization_header->len, tester.request_authorization_header.buffer, tester.request_authorization_header.len); ASSERT_BIN_ARRAYS_EQUALS( s_expected_request_signature->bytes, s_expected_request_signature->len, tester.last_signature.buffer, tester.last_signature.len); /* Make and sign the first chunk */ struct aws_signable *first_chunk_signable = aws_signable_new_chunk(allocator, tester.chunk1_stream, aws_byte_cursor_from_buf(&tester.last_signature)); ASSERT_SUCCESS(aws_sign_request_aws( allocator, first_chunk_signable, (void *)&tester.chunk_signing_config, s_on_chunk_signing_complete, &tester)); ASSERT_BIN_ARRAYS_EQUALS( s_expected_first_chunk_signature->bytes, s_expected_first_chunk_signature->len, tester.last_signature.buffer, tester.last_signature.len); aws_signable_destroy(first_chunk_signable); /* Make and sign the second chunk */ struct aws_signable *second_chunk_signable = aws_signable_new_chunk(allocator, tester.chunk2_stream, aws_byte_cursor_from_buf(&tester.last_signature)); ASSERT_SUCCESS(aws_sign_request_aws( allocator, second_chunk_signable, (void *)&tester.chunk_signing_config, s_on_chunk_signing_complete, &tester)); ASSERT_BIN_ARRAYS_EQUALS( s_expected_second_chunk_signature->bytes, s_expected_second_chunk_signature->len, tester.last_signature.buffer, tester.last_signature.len); aws_signable_destroy(second_chunk_signable); /* Make and sign the final, empty chunk */ struct aws_signable *final_chunk_signable = aws_signable_new_chunk(allocator, NULL, aws_byte_cursor_from_buf(&tester.last_signature)); ASSERT_SUCCESS(aws_sign_request_aws( allocator, final_chunk_signable, (void *)&tester.chunk_signing_config, s_on_chunk_signing_complete, &tester)); ASSERT_BIN_ARRAYS_EQUALS( s_expected_final_chunk_signature->bytes, s_expected_final_chunk_signature->len, tester.last_signature.buffer, tester.last_signature.len); aws_signable_destroy(final_chunk_signable); /* Make and sign the trailing headers */ struct aws_http_headers *trailing_headers = s_trailing_headers_new(allocator); struct aws_signable *trailing_headers_signable = aws_signable_new_trailing_headers( allocator, trailing_headers, aws_byte_cursor_from_buf(&tester.last_signature)); /* test aws_signable_new_trailing_headers properly acquires trailing_headers */ aws_http_headers_release(trailing_headers); ASSERT_SUCCESS(aws_sign_request_aws( allocator, trailing_headers_signable, (void *)&tester.trailing_headers_signing_config, s_on_chunk_signing_complete, &tester)); ASSERT_BIN_ARRAYS_EQUALS( s_expected_trailing_headers_signature->bytes, s_expected_trailing_headers_signature->len, tester.last_signature.buffer, tester.last_signature.len); aws_signable_destroy(trailing_headers_signable); s_chunked_signing_tester_cleanup(&tester); aws_auth_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(sigv4_trailing_headers_signing_test, s_sigv4_trailing_headers_signing_test); AWS_STATIC_STRING_FROM_LITERAL( s_chunked_expected_canonical_request_cursor, "PUT\n" "/examplebucket/chunkObject.txt\n" "\n" "content-encoding:aws-chunked\n" "content-length:66824\n" "host:s3.amazonaws.com\n" "x-amz-content-sha256:STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD\n" "x-amz-date:20130524T000000Z\n" "x-amz-decoded-content-length:66560\n" "x-amz-region-set:us-east-1\n" "x-amz-storage-class:REDUCED_REDUNDANCY\n" "\n" "content-encoding;content-length;host;x-amz-content-sha256;x-amz-date;x-amz-decoded-content-length;x-amz-region-" "set;x-amz-storage-class\n" "STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD"); AWS_STATIC_STRING_FROM_LITERAL( s_chunked_expected_trailing_headers_canonical_request_cursor, "PUT\n" "/examplebucket/chunkObject.txt\n" "\n" "content-encoding:aws-chunked\n" "content-length:66824\n" "host:s3.amazonaws.com\n" "x-amz-content-sha256:STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD-TRAILER\n" "x-amz-date:20130524T000000Z\n" "x-amz-decoded-content-length:66560\n" "x-amz-region-set:us-east-1\n" "x-amz-storage-class:REDUCED_REDUNDANCY\n" "x-amz-trailer:first,second,third\n" "\n" "content-encoding;content-length;host;x-amz-content-sha256;x-amz-date;x-amz-decoded-content-length;x-amz-region-" "set;x-amz-storage-class;x-amz-trailer\n" "STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD-TRAILER"); AWS_STATIC_STRING_FROM_LITERAL( s_chunk_sts_pre_signature, "AWS4-ECDSA-P256-SHA256-PAYLOAD\n" "20130524T000000Z\n" "20130524/s3/aws4_request\n"); AWS_STATIC_STRING_FROM_LITERAL( s_chunk1_sts_post_signature, "\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855\n" "bf718b6f653bebc184e1479f1935b8da974d701b893afcf49e701f3e2f9f9c5a"); AWS_STATIC_STRING_FROM_LITERAL( s_chunk2_sts_post_signature, "\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855\n" "2edc986847e209b4016e141a6dc8716d3207350f416969382d431539bf292e4a"); AWS_STATIC_STRING_FROM_LITERAL( s_chunk3_sts_post_signature, "\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855\n" "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"); AWS_STATIC_STRING_FROM_LITERAL( s_trailing_headers_expected_sts_pre_signature, "AWS4-ECDSA-P256-SHA256-TRAILER\n" "20130524T000000Z\n" "20130524/s3/aws4_request\n"); AWS_STATIC_STRING_FROM_LITERAL( s_trailing_headers_expected_sts_post_signature, "\n83d8f190334fb741bc8daf73c891689d320bd8017756bc730c540021ed48001f"); static int s_sigv4a_chunked_signing_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_auth_library_init(allocator); struct chunked_signing_tester tester; AWS_ZERO_STRUCT(tester); ASSERT_SUCCESS(s_chunked_signing_tester_init(allocator, &tester)); tester.request_signing_config.algorithm = AWS_SIGNING_ALGORITHM_V4_ASYMMETRIC; tester.request_signing_config.signed_body_value = g_aws_signed_body_value_streaming_aws4_ecdsa_p256_sha256_payload; tester.chunk_signing_config.algorithm = AWS_SIGNING_ALGORITHM_V4_ASYMMETRIC; /* Sign the base request */ ASSERT_SUCCESS(aws_sign_request_aws( allocator, tester.request_signable, (void *)&tester.request_signing_config, s_on_request_signing_complete, &tester)); struct aws_byte_cursor signature_cursor = aws_trim_padded_sigv4a_signature(aws_byte_cursor_from_buf(&tester.last_signature)); /* * Validate the request signature */ ASSERT_SUCCESS(aws_verify_sigv4a_signing( allocator, tester.request_signable, (void *)&tester.request_signing_config, aws_byte_cursor_from_string(s_chunked_expected_canonical_request_cursor), signature_cursor, aws_byte_cursor_from_string(s_chunked_test_ecc_pub_x), aws_byte_cursor_from_string(s_chunked_test_ecc_pub_y))); /* Manually build the first chunk string-to-sign since it's based on a signature that varies per run */ struct aws_byte_buf chunk_string_to_sign; ASSERT_SUCCESS(aws_byte_buf_init(&chunk_string_to_sign, allocator, 512)); struct aws_byte_cursor chunk_sts_pre_signature = aws_byte_cursor_from_string(s_chunk_sts_pre_signature); ASSERT_SUCCESS(aws_byte_buf_append(&chunk_string_to_sign, &chunk_sts_pre_signature)); ASSERT_SUCCESS(aws_byte_buf_append(&chunk_string_to_sign, &signature_cursor)); struct aws_byte_cursor chunk_sts_post_signature = aws_byte_cursor_from_string(s_chunk1_sts_post_signature); ASSERT_SUCCESS(aws_byte_buf_append(&chunk_string_to_sign, &chunk_sts_post_signature)); /* Make and sign the first chunk */ struct aws_signable *first_chunk_signable = aws_signable_new_chunk(allocator, tester.chunk1_stream, aws_byte_cursor_from_buf(&tester.last_signature)); ASSERT_SUCCESS(aws_sign_request_aws( allocator, first_chunk_signable, (void *)&tester.chunk_signing_config, s_on_chunk_signing_complete, &tester)); struct aws_byte_cursor chunk_signature_cursor = aws_trim_padded_sigv4a_signature(aws_byte_cursor_from_buf(&tester.last_signature)); /* Verify the first chunk's signature */ ASSERT_SUCCESS(aws_validate_v4a_authorization_value( allocator, tester.verification_key, aws_byte_cursor_from_buf(&chunk_string_to_sign), chunk_signature_cursor)); aws_signable_destroy(first_chunk_signable); /* Manually build the second chunk string-to-sign since it's based on a signature that varies per run */ chunk_string_to_sign.len = 0; chunk_sts_pre_signature = aws_byte_cursor_from_string(s_chunk_sts_pre_signature); ASSERT_SUCCESS(aws_byte_buf_append(&chunk_string_to_sign, &chunk_sts_pre_signature)); ASSERT_SUCCESS(aws_byte_buf_append(&chunk_string_to_sign, &chunk_signature_cursor)); chunk_sts_post_signature = aws_byte_cursor_from_string(s_chunk2_sts_post_signature); ASSERT_SUCCESS(aws_byte_buf_append(&chunk_string_to_sign, &chunk_sts_post_signature)); /* Make and sign the second chunk */ struct aws_signable *second_chunk_signable = aws_signable_new_chunk(allocator, tester.chunk2_stream, aws_byte_cursor_from_buf(&tester.last_signature)); ASSERT_SUCCESS(aws_sign_request_aws( allocator, second_chunk_signable, (void *)&tester.chunk_signing_config, s_on_chunk_signing_complete, &tester)); chunk_signature_cursor = aws_trim_padded_sigv4a_signature(aws_byte_cursor_from_buf(&tester.last_signature)); /* Verify the second chunk's signature */ ASSERT_SUCCESS(aws_validate_v4a_authorization_value( allocator, tester.verification_key, aws_byte_cursor_from_buf(&chunk_string_to_sign), chunk_signature_cursor)); aws_signable_destroy(second_chunk_signable); /* Manually build the final chunk string-to-sign since it's based on a signature that varies per run */ chunk_string_to_sign.len = 0; chunk_sts_pre_signature = aws_byte_cursor_from_string(s_chunk_sts_pre_signature); ASSERT_SUCCESS(aws_byte_buf_append(&chunk_string_to_sign, &chunk_sts_pre_signature)); ASSERT_SUCCESS(aws_byte_buf_append(&chunk_string_to_sign, &chunk_signature_cursor)); chunk_sts_post_signature = aws_byte_cursor_from_string(s_chunk3_sts_post_signature); ASSERT_SUCCESS(aws_byte_buf_append(&chunk_string_to_sign, &chunk_sts_post_signature)); /* Make and sign the final, empty chunk */ struct aws_signable *final_chunk_signable = aws_signable_new_chunk(allocator, NULL, aws_byte_cursor_from_buf(&tester.last_signature)); ASSERT_SUCCESS(aws_sign_request_aws( allocator, final_chunk_signable, (void *)&tester.chunk_signing_config, s_on_chunk_signing_complete, &tester)); chunk_signature_cursor = aws_trim_padded_sigv4a_signature(aws_byte_cursor_from_buf(&tester.last_signature)); /* Verify the final chunk's signature */ ASSERT_SUCCESS(aws_validate_v4a_authorization_value( allocator, tester.verification_key, aws_byte_cursor_from_buf(&chunk_string_to_sign), chunk_signature_cursor)); aws_signable_destroy(final_chunk_signable); aws_byte_buf_clean_up(&chunk_string_to_sign); s_chunked_signing_tester_cleanup(&tester); aws_auth_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(sigv4a_chunked_signing_test, s_sigv4a_chunked_signing_test); static int s_sigv4a_trailing_headers_signing_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_auth_library_init(allocator); struct chunked_signing_tester tester; AWS_ZERO_STRUCT(tester); ASSERT_SUCCESS(s_chunked_signing_tester_init(allocator, &tester)); tester.request_signing_config.algorithm = AWS_SIGNING_ALGORITHM_V4_ASYMMETRIC; tester.request_signing_config.signed_body_value = g_aws_signed_body_value_streaming_aws4_ecdsa_p256_sha256_payload_trailer; tester.chunk_signing_config.algorithm = AWS_SIGNING_ALGORITHM_V4_ASYMMETRIC; tester.trailing_headers_signing_config.algorithm = AWS_SIGNING_ALGORITHM_V4_ASYMMETRIC; /* Sign the base request */ ASSERT_SUCCESS(aws_sign_request_aws( allocator, tester.trailing_request_signable, (void *)&tester.request_signing_config, s_on_request_signing_complete, &tester)); struct aws_byte_cursor signature_cursor = aws_trim_padded_sigv4a_signature(aws_byte_cursor_from_buf(&tester.last_signature)); /* * Validate the request signature */ ASSERT_SUCCESS(aws_verify_sigv4a_signing( allocator, tester.trailing_request_signable, (void *)&tester.request_signing_config, aws_byte_cursor_from_string(s_chunked_expected_trailing_headers_canonical_request_cursor), signature_cursor, aws_byte_cursor_from_string(s_chunked_test_ecc_pub_x), aws_byte_cursor_from_string(s_chunked_test_ecc_pub_y))); /* Manually build the first chunk string-to-sign since it's based on a signature that varies per run */ struct aws_byte_buf chunk_string_to_sign; ASSERT_SUCCESS(aws_byte_buf_init(&chunk_string_to_sign, allocator, 512)); struct aws_byte_cursor chunk_sts_pre_signature = aws_byte_cursor_from_string(s_chunk_sts_pre_signature); ASSERT_SUCCESS(aws_byte_buf_append(&chunk_string_to_sign, &chunk_sts_pre_signature)); ASSERT_SUCCESS(aws_byte_buf_append(&chunk_string_to_sign, &signature_cursor)); struct aws_byte_cursor chunk_sts_post_signature = aws_byte_cursor_from_string(s_chunk1_sts_post_signature); ASSERT_SUCCESS(aws_byte_buf_append(&chunk_string_to_sign, &chunk_sts_post_signature)); /* Make and sign the first chunk */ struct aws_signable *first_chunk_signable = aws_signable_new_chunk(allocator, tester.chunk1_stream, aws_byte_cursor_from_buf(&tester.last_signature)); ASSERT_SUCCESS(aws_sign_request_aws( allocator, first_chunk_signable, (void *)&tester.chunk_signing_config, s_on_chunk_signing_complete, &tester)); struct aws_byte_cursor chunk_signature_cursor = aws_trim_padded_sigv4a_signature(aws_byte_cursor_from_buf(&tester.last_signature)); /* Verify the first chunk's signature */ ASSERT_SUCCESS(aws_validate_v4a_authorization_value( allocator, tester.verification_key, aws_byte_cursor_from_buf(&chunk_string_to_sign), chunk_signature_cursor)); aws_signable_destroy(first_chunk_signable); /* Manually build the second chunk string-to-sign since it's based on a signature that varies per run */ chunk_string_to_sign.len = 0; chunk_sts_pre_signature = aws_byte_cursor_from_string(s_chunk_sts_pre_signature); ASSERT_SUCCESS(aws_byte_buf_append(&chunk_string_to_sign, &chunk_sts_pre_signature)); ASSERT_SUCCESS(aws_byte_buf_append(&chunk_string_to_sign, &chunk_signature_cursor)); chunk_sts_post_signature = aws_byte_cursor_from_string(s_chunk2_sts_post_signature); ASSERT_SUCCESS(aws_byte_buf_append(&chunk_string_to_sign, &chunk_sts_post_signature)); /* Make and sign the second chunk */ struct aws_signable *second_chunk_signable = aws_signable_new_chunk(allocator, tester.chunk2_stream, aws_byte_cursor_from_buf(&tester.last_signature)); ASSERT_SUCCESS(aws_sign_request_aws( allocator, second_chunk_signable, (void *)&tester.chunk_signing_config, s_on_chunk_signing_complete, &tester)); chunk_signature_cursor = aws_trim_padded_sigv4a_signature(aws_byte_cursor_from_buf(&tester.last_signature)); /* Verify the second chunk's signature */ ASSERT_SUCCESS(aws_validate_v4a_authorization_value( allocator, tester.verification_key, aws_byte_cursor_from_buf(&chunk_string_to_sign), chunk_signature_cursor)); aws_signable_destroy(second_chunk_signable); /* Manually build the final chunk string-to-sign since it's based on a signature that varies per run */ chunk_string_to_sign.len = 0; chunk_sts_pre_signature = aws_byte_cursor_from_string(s_chunk_sts_pre_signature); ASSERT_SUCCESS(aws_byte_buf_append(&chunk_string_to_sign, &chunk_sts_pre_signature)); ASSERT_SUCCESS(aws_byte_buf_append(&chunk_string_to_sign, &chunk_signature_cursor)); chunk_sts_post_signature = aws_byte_cursor_from_string(s_chunk3_sts_post_signature); ASSERT_SUCCESS(aws_byte_buf_append(&chunk_string_to_sign, &chunk_sts_post_signature)); /* Make and sign the final, empty chunk */ struct aws_signable *final_chunk_signable = aws_signable_new_chunk(allocator, NULL, aws_byte_cursor_from_buf(&tester.last_signature)); ASSERT_SUCCESS(aws_sign_request_aws( allocator, final_chunk_signable, (void *)&tester.chunk_signing_config, s_on_chunk_signing_complete, &tester)); chunk_signature_cursor = aws_trim_padded_sigv4a_signature(aws_byte_cursor_from_buf(&tester.last_signature)); /* Verify the final chunk's signature */ ASSERT_SUCCESS(aws_validate_v4a_authorization_value( allocator, tester.verification_key, aws_byte_cursor_from_buf(&chunk_string_to_sign), chunk_signature_cursor)); aws_signable_destroy(final_chunk_signable); chunk_string_to_sign.len = 0; chunk_sts_pre_signature = aws_byte_cursor_from_string(s_trailing_headers_expected_sts_pre_signature); ASSERT_SUCCESS(aws_byte_buf_append(&chunk_string_to_sign, &chunk_sts_pre_signature)); ASSERT_SUCCESS(aws_byte_buf_append(&chunk_string_to_sign, &chunk_signature_cursor)); chunk_sts_post_signature = aws_byte_cursor_from_string(s_trailing_headers_expected_sts_post_signature); ASSERT_SUCCESS(aws_byte_buf_append(&chunk_string_to_sign, &chunk_sts_post_signature)); struct aws_signable *trailing_headers_signable = aws_signable_new_trailing_headers( allocator, tester.trailing_headers, aws_byte_cursor_from_buf(&tester.last_signature)); ASSERT_SUCCESS(aws_sign_request_aws( allocator, trailing_headers_signable, (void *)&tester.trailing_headers_signing_config, s_on_chunk_signing_complete, &tester)); struct aws_byte_cursor trailing_headers_signature_cursor = aws_trim_padded_sigv4a_signature(aws_byte_cursor_from_buf(&tester.last_signature)); ASSERT_SUCCESS(aws_validate_v4a_authorization_value( allocator, tester.verification_key, aws_byte_cursor_from_buf(&chunk_string_to_sign), trailing_headers_signature_cursor)); aws_signable_destroy(trailing_headers_signable); aws_byte_buf_clean_up(&chunk_string_to_sign); s_chunked_signing_tester_cleanup(&tester); aws_auth_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(sigv4a_trailing_headers_signing_test, s_sigv4a_trailing_headers_signing_test); static int s_on_incoming_headers_fn( struct aws_http_stream *stream, enum aws_http_header_block header_block, const struct aws_http_header *header_array, size_t num_headers, void *user_data) { struct chunked_signing_tester *tester = user_data; (void)tester; (void)stream; /* Ignore informational headers */ if (header_block == AWS_HTTP_HEADER_BLOCK_INFORMATIONAL) { return AWS_OP_SUCCESS; } int status = 0; aws_http_stream_get_incoming_response_status(stream, &status); fprintf(stdout, "Response Status: %d\n", status); for (size_t i = 0; i < num_headers; ++i) { fwrite(header_array[i].name.ptr, 1, header_array[i].name.len, stdout); fprintf(stdout, ": "); fwrite(header_array[i].value.ptr, 1, header_array[i].value.len, stdout); fprintf(stdout, "\n"); } return AWS_OP_SUCCESS; } static int s_on_incoming_header_block_done_fn( struct aws_http_stream *stream, enum aws_http_header_block header_block, void *user_data) { (void)stream; (void)header_block; (void)user_data; return AWS_OP_SUCCESS; } static void s_on_stream_complete_fn(struct aws_http_stream *stream, int error_code, void *user_data) { (void)error_code; (void)user_data; aws_http_stream_release(stream); } static int s_on_incoming_body_fn(struct aws_http_stream *stream, const struct aws_byte_cursor *data, void *user_data) { (void)stream; (void)user_data; fwrite(data->ptr, 1, data->len, stdout); return AWS_OP_SUCCESS; } void s_log_headers(struct aws_http_message *request) { struct aws_http_header header; AWS_ZERO_STRUCT(header); for (size_t i = 0; i < aws_http_message_get_header_count(request); ++i) { aws_http_message_get_header(request, &header, i); fwrite(header.name.ptr, 1, header.name.len, stdout); fprintf(stdout, ": "); fwrite(header.value.ptr, 1, header.value.len, stdout); fprintf(stdout, "\n"); } } void s_send_request(struct aws_http_connection *connection, int error_code, void *user_data) { (void)error_code; struct chunked_signing_tester *tester = user_data; struct aws_http_make_request_options opt = { .self_size = sizeof(opt), .user_data = tester, .request = tester->integration_request, .on_response_headers = s_on_incoming_headers_fn, .on_response_header_block_done = s_on_incoming_header_block_done_fn, .on_response_body = s_on_incoming_body_fn, .on_complete = s_on_stream_complete_fn, }; struct aws_http_stream *stream = aws_http_connection_make_request(connection, &opt); aws_http_stream_activate(stream); } static void s_on_client_connection_shutdown(struct aws_http_connection *connection, int error_code, void *user_data) { (void)error_code; (void)connection; struct chunked_signing_tester *tester = user_data; aws_mutex_lock(&tester->mutex); tester->request_completed = true; aws_mutex_unlock(&tester->mutex); aws_condition_variable_notify_all(&tester->c_var); } static bool s_completion_predicate(void *arg) { struct chunked_signing_tester *tester = arg; return tester->request_completed; } static int s_sigv4a_trailing_header_integration_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_auth_library_init(allocator); struct chunked_signing_tester tester; AWS_ZERO_STRUCT(tester); ASSERT_SUCCESS(s_chunked_signing_tester_init(allocator, &tester)); tester.request_signing_config.algorithm = AWS_SIGNING_ALGORITHM_V4_ASYMMETRIC; tester.request_signing_config.signed_body_value = g_aws_signed_body_value_streaming_aws4_ecdsa_p256_sha256_payload_trailer; tester.chunk_signing_config.algorithm = AWS_SIGNING_ALGORITHM_V4_ASYMMETRIC; tester.trailing_headers_signing_config.algorithm = AWS_SIGNING_ALGORITHM_V4_ASYMMETRIC; AWS_ZERO_STRUCT(tester.request_signing_config.date); AWS_ZERO_STRUCT(tester.chunk_signing_config.date); AWS_ZERO_STRUCT(tester.trailing_headers_signing_config.date); aws_date_time_init_now(&tester.request_signing_config.date); tester.chunk_signing_config.date = tester.request_signing_config.date; tester.trailing_headers_signing_config.date = tester.request_signing_config.date; // aws_date_time_init_now(&tester.chunk_signing_config.date); // aws_date_time_init_now(&tester.trailing_headers_signing_config.date); struct aws_credentials *credentials = aws_credentials_new_from_string( allocator, s_integration_chunked_access_key_id, s_integration_chunked_secret_access_key, NULL, UINT64_MAX); tester.request_signing_config.credentials = credentials; tester.chunk_signing_config.credentials = credentials; tester.trailing_headers_signing_config.credentials = credentials; ASSERT_SUCCESS(aws_sign_request_aws( allocator, tester.integration_request_signable, (void *)&tester.request_signing_config, s_on_integration_request_signing_complete, &tester)); struct aws_signable *first_chunk_signable = aws_signable_new_chunk( allocator, tester.integration_chunk_stream, aws_byte_cursor_from_buf(&tester.last_signature)); ASSERT_SUCCESS(aws_sign_request_aws( allocator, first_chunk_signable, (void *)&tester.chunk_signing_config, s_on_chunk_signing_complete, &tester)); struct aws_byte_buf first_chunk_signature; AWS_ZERO_STRUCT(first_chunk_signature); aws_byte_buf_init_copy(&first_chunk_signature, aws_default_allocator(), &tester.last_signature); /* Make and sign the final, empty chunk */ struct aws_signable *final_chunk_signable = aws_signable_new_chunk(allocator, NULL, aws_byte_cursor_from_buf(&tester.last_signature)); ASSERT_SUCCESS(aws_sign_request_aws( allocator, final_chunk_signable, (void *)&tester.chunk_signing_config, s_on_chunk_signing_complete, &tester)); struct aws_byte_buf final_chunk_signature; AWS_ZERO_STRUCT(final_chunk_signature); aws_byte_buf_init_copy(&final_chunk_signature, aws_default_allocator(), &tester.last_signature); struct aws_signable *trailing_headers_signable = aws_signable_new_trailing_headers( allocator, tester.integration_trailing_headers, aws_byte_cursor_from_buf(&tester.last_signature)); ASSERT_SUCCESS(aws_sign_request_aws( allocator, trailing_headers_signable, (void *)&tester.trailing_headers_signing_config, s_on_chunk_signing_complete, &tester)); struct aws_byte_buf trailing_header_signature; AWS_ZERO_STRUCT(trailing_header_signature); aws_byte_buf_init_copy(&trailing_header_signature, aws_default_allocator(), &tester.last_signature); struct aws_byte_cursor pre_chunk = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("1;chunk-signature="); struct aws_byte_cursor first_chunk = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\r\na\r\n0;chunk-signature="); struct aws_byte_cursor trailer_chunk = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\r\nx-amz-checksum-crc32c:wdBDMA==\r\nx-amz-trailer-signature:"); struct aws_byte_cursor carriage_return = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\r\n\r\n"); struct aws_byte_cursor first_chunk_signature_cursor = aws_byte_cursor_from_buf(&first_chunk_signature); struct aws_byte_cursor final_chunk_signature_cursor = aws_byte_cursor_from_buf(&final_chunk_signature); struct aws_byte_cursor trailing_header_signature_cursor = aws_byte_cursor_from_buf(&trailing_header_signature); AWS_ASSERT(aws_byte_cursor_is_valid(&pre_chunk)); AWS_ASSERT(aws_byte_cursor_is_valid(&first_chunk)); AWS_ASSERT(aws_byte_cursor_is_valid(&trailer_chunk)); AWS_ASSERT(aws_byte_cursor_is_valid(&carriage_return)); AWS_ASSERT(aws_byte_cursor_is_valid(&first_chunk_signature_cursor)); AWS_ASSERT(aws_byte_cursor_is_valid(&final_chunk_signature_cursor)); AWS_ASSERT(aws_byte_cursor_is_valid(&trailing_header_signature_cursor)); size_t body_buf_len = pre_chunk.len + first_chunk.len + trailer_chunk.len + carriage_return.len + first_chunk_signature_cursor.len + final_chunk_signature_cursor.len + trailing_header_signature_cursor.len; struct aws_byte_buf body_buffer; aws_byte_buf_init(&body_buffer, aws_default_allocator(), body_buf_len); aws_byte_buf_append(&body_buffer, &pre_chunk); aws_byte_buf_append(&body_buffer, &first_chunk_signature_cursor); aws_byte_buf_append(&body_buffer, &first_chunk); aws_byte_buf_append(&body_buffer, &final_chunk_signature_cursor); aws_byte_buf_append(&body_buffer, &trailer_chunk); aws_byte_buf_append(&body_buffer, &trailing_header_signature_cursor); aws_byte_buf_append(&body_buffer, &carriage_return); struct aws_byte_cursor body_cursor = aws_byte_cursor_from_buf(&body_buffer); struct aws_input_stream *body_stream = aws_input_stream_new_from_cursor(aws_default_allocator(), &body_cursor); aws_http_message_set_body_stream(tester.integration_request, body_stream); struct aws_http_client_connection_options client_options = AWS_HTTP_CLIENT_CONNECTION_OPTIONS_INIT; struct aws_byte_cursor host = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("example_bucket"); struct aws_socket_options socket_options = { .type = AWS_SOCKET_STREAM, .connect_timeout_ms = 3000, .keep_alive_timeout_sec = 0, .keepalive = false, .keep_alive_interval_sec = 0, }; struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); struct aws_host_resolver_default_options resolver_options = { .el_group = el_group, .max_entries = 8, }; struct aws_host_resolver *resolver = aws_host_resolver_new_default(allocator, &resolver_options); struct aws_client_bootstrap_options bootstrap_options = { .event_loop_group = el_group, .host_resolver = resolver, }; struct aws_client_bootstrap *bootstrap = aws_client_bootstrap_new(allocator, &bootstrap_options); client_options.on_setup = s_send_request; client_options.on_shutdown = s_on_client_connection_shutdown; client_options.user_data = &tester; client_options.allocator = aws_default_allocator(); client_options.host_name = host; client_options.bootstrap = bootstrap; client_options.socket_options = &socket_options; client_options.port = 80; AWS_LOGF_INFO(AWS_LS_AUTH_SIGNING, "Headers"); s_log_headers(tester.integration_request); AWS_LOGF_INFO(AWS_LS_AUTH_SIGNING, "Request Body\n" PRInSTR "\n", AWS_BYTE_BUF_PRI(body_buffer)); aws_http_client_connect(&client_options); aws_mutex_lock(&tester.mutex); aws_condition_variable_wait_pred(&tester.c_var, &tester.mutex, s_completion_predicate, &tester); aws_mutex_unlock(&tester.mutex); return AWS_OP_SUCCESS; } AWS_TEST_CASE(sigv4a_trailing_header_integration_test, s_sigv4a_trailing_header_integration_test); aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/test_event_signing.c000066400000000000000000000352151456575232400254030ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include AWS_STATIC_STRING_FROM_LITERAL(s_event_access_key_id, "access"); AWS_STATIC_STRING_FROM_LITERAL(s_event_secret_access_key, "secret"); AWS_STATIC_STRING_FROM_LITERAL(s_event_test_region, "us-east-1"); AWS_STATIC_STRING_FROM_LITERAL(s_event_test_service, "demo"); AWS_STATIC_STRING_FROM_LITERAL(s_event_request_date, "Fri, 16 Jan 1981 06:30:00 GMT"); AWS_STATIC_STRING_FROM_LITERAL(s_event_test_date, "Fri, 16 Jan 1981 06:30:01 GMT"); AWS_STATIC_STRING_FROM_LITERAL(s_event_test_date_2, "Fri, 16 Jan 1981 06:30:02 GMT"); AWS_STATIC_STRING_FROM_LITERAL(s_event_test_date_3, "Fri, 16 Jan 1981 06:30:03 GMT"); AWS_STATIC_STRING_FROM_LITERAL(s_event_test_date_4, "Fri, 16 Jan 1981 06:30:04 GMT"); static struct aws_http_header s_host_header = { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("host"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("demo.us-east-1.amazonaws.com"), }; static struct aws_http_header s_content_encoding_header = { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Encoding"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("application/vnd.amazon.eventstream"), }; AWS_STATIC_STRING_FROM_LITERAL(s_event_test_path, "/streaming"); static struct aws_http_message *s_build_event_request(struct aws_allocator *allocator) { struct aws_http_message *request = aws_http_message_new_request(allocator); aws_http_message_add_header(request, s_host_header); aws_http_message_add_header(request, s_content_encoding_header); aws_http_message_set_request_method(request, aws_http_method_post); aws_http_message_set_request_path(request, aws_byte_cursor_from_string(s_event_test_path)); return request; } static int s_initialize_request_signing_config( struct aws_signing_config_aws *config, struct aws_credentials *credentials) { config->config_type = AWS_SIGNING_CONFIG_AWS; config->algorithm = AWS_SIGNING_ALGORITHM_V4; config->signature_type = AWS_ST_HTTP_REQUEST_HEADERS; config->region = aws_byte_cursor_from_string(s_event_test_region); config->service = aws_byte_cursor_from_string(s_event_test_service); struct aws_byte_cursor event_test_date_cursor = aws_byte_cursor_from_string(s_event_request_date); if (aws_date_time_init_from_str_cursor(&config->date, &event_test_date_cursor, AWS_DATE_FORMAT_RFC822)) { return AWS_OP_ERR; } config->flags.use_double_uri_encode = false; config->flags.should_normalize_uri_path = true; config->signed_body_value = g_aws_signed_body_value_streaming_aws4_hmac_sha256_events; config->signed_body_header = AWS_SBHT_X_AMZ_CONTENT_SHA256; config->credentials = credentials; return AWS_OP_SUCCESS; } static int s_initialize_event_signing_config( struct aws_signing_config_aws *config, struct aws_credentials *credentials) { config->config_type = AWS_SIGNING_CONFIG_AWS; config->algorithm = AWS_SIGNING_ALGORITHM_V4; config->signature_type = AWS_ST_HTTP_REQUEST_EVENT; config->region = aws_byte_cursor_from_string(s_event_test_region); config->service = aws_byte_cursor_from_string(s_event_test_service); config->flags.use_double_uri_encode = false; config->flags.should_normalize_uri_path = true; config->signed_body_header = AWS_SBHT_NONE; config->credentials = credentials; return AWS_OP_SUCCESS; } struct event_signing_tester { struct aws_credentials *credentials; struct aws_http_message *request; struct aws_signable *request_signable; struct aws_signing_config_aws request_signing_config; struct aws_signing_config_aws event_signing_config; struct aws_byte_buf chunk1; struct aws_byte_buf chunk2; struct aws_byte_buf chunk3; struct aws_input_stream *chunk1_stream; struct aws_input_stream *chunk2_stream; struct aws_input_stream *chunk3_stream; struct aws_mutex mutex; bool request_completed; struct aws_byte_buf request_authorization_header; struct aws_byte_buf last_signature; }; #define EVENT_SIZE 1 static int s_event_signing_tester_init(struct aws_allocator *allocator, struct event_signing_tester *tester) { tester->credentials = aws_credentials_new_from_string(allocator, s_event_access_key_id, s_event_secret_access_key, NULL, UINT64_MAX); tester->request = s_build_event_request(allocator); tester->request_signable = aws_signable_new_http_request(allocator, tester->request); AWS_ZERO_STRUCT(tester->request_signing_config); ASSERT_SUCCESS(s_initialize_request_signing_config(&tester->request_signing_config, tester->credentials)); ASSERT_SUCCESS(s_initialize_event_signing_config(&tester->event_signing_config, tester->credentials)); ASSERT_SUCCESS(aws_byte_buf_init(&tester->request_authorization_header, allocator, 512)); ASSERT_SUCCESS(aws_byte_buf_init(&tester->last_signature, allocator, 128)); ASSERT_SUCCESS(aws_byte_buf_init(&tester->chunk1, allocator, EVENT_SIZE)); ASSERT_TRUE(aws_byte_buf_write_u8_n(&tester->chunk1, 'A', EVENT_SIZE)); ASSERT_SUCCESS(aws_byte_buf_init(&tester->chunk2, allocator, EVENT_SIZE)); ASSERT_TRUE(aws_byte_buf_write_u8_n(&tester->chunk2, 'B', EVENT_SIZE)); ASSERT_SUCCESS(aws_byte_buf_init(&tester->chunk3, allocator, EVENT_SIZE)); ASSERT_TRUE(aws_byte_buf_write_u8_n(&tester->chunk3, 'C', EVENT_SIZE)); struct aws_byte_cursor chunk1_cursor = aws_byte_cursor_from_buf(&tester->chunk1); tester->chunk1_stream = aws_input_stream_new_from_cursor(allocator, &chunk1_cursor); struct aws_byte_cursor chunk2_cursor = aws_byte_cursor_from_buf(&tester->chunk2); tester->chunk2_stream = aws_input_stream_new_from_cursor(allocator, &chunk2_cursor); struct aws_byte_cursor chunk3_cursor = aws_byte_cursor_from_buf(&tester->chunk3); tester->chunk3_stream = aws_input_stream_new_from_cursor(allocator, &chunk3_cursor); aws_mutex_init(&tester->mutex); tester->request_completed = false; return AWS_OP_SUCCESS; } static void s_event_signing_tester_cleanup(struct event_signing_tester *tester) { aws_signable_destroy(tester->request_signable); aws_http_message_release(tester->request); aws_credentials_release(tester->credentials); aws_byte_buf_clean_up(&tester->request_authorization_header); aws_byte_buf_clean_up(&tester->last_signature); aws_byte_buf_clean_up(&tester->chunk1); aws_byte_buf_clean_up(&tester->chunk2); aws_byte_buf_clean_up(&tester->chunk3); aws_input_stream_destroy(tester->chunk1_stream); aws_input_stream_destroy(tester->chunk2_stream); aws_input_stream_destroy(tester->chunk3_stream); aws_mutex_clean_up(&tester->mutex); } static void s_on_request_signing_complete(struct aws_signing_result *result, int error_code, void *userdata) { AWS_FATAL_ASSERT(error_code == 0); struct event_signing_tester *tester = userdata; struct aws_array_list *headers = NULL; aws_signing_result_get_property_list(result, g_aws_http_headers_property_list_name, &headers); struct aws_byte_cursor auth_header_name = aws_byte_cursor_from_string(g_aws_signing_authorization_header_name); struct aws_byte_cursor auth_header_value; AWS_ZERO_STRUCT(auth_header_value); for (size_t i = 0; i < aws_array_list_length(headers); ++i) { struct aws_signing_result_property pair; AWS_ZERO_STRUCT(pair); if (aws_array_list_get_at(headers, &pair, i)) { continue; } if (pair.name == NULL) { continue; } struct aws_byte_cursor pair_name_cursor = aws_byte_cursor_from_string(pair.name); if (aws_byte_cursor_eq_ignore_case(&pair_name_cursor, &auth_header_name)) { auth_header_value = aws_byte_cursor_from_string(pair.value); break; } } aws_byte_buf_append_dynamic(&tester->request_authorization_header, &auth_header_value); struct aws_string *signature = NULL; aws_signing_result_get_property(result, g_aws_signature_property_name, &signature); struct aws_byte_cursor signature_cursor = aws_byte_cursor_from_string(signature); aws_byte_buf_append_dynamic(&tester->last_signature, &signature_cursor); } static void s_on_event_signing_complete(struct aws_signing_result *result, int error_code, void *userdata) { (void)error_code; struct event_signing_tester *tester = userdata; tester->last_signature.len = 0; struct aws_string *signature = NULL; aws_signing_result_get_property(result, g_aws_signature_property_name, &signature); struct aws_byte_cursor signature_cursor = aws_byte_cursor_from_string(signature); aws_byte_buf_append_dynamic(&tester->last_signature, &signature_cursor); } AWS_STATIC_STRING_FROM_LITERAL( s_expected_request_authorization_header, "AWS4-HMAC-SHA256 Credential=access/19810116/us-east-1/demo/aws4_request, " "SignedHeaders=content-encoding;host;x-amz-content-sha256;x-amz-date, " "Signature=e1d8e8c8815e60969f2a34765c9a15945ffc0badbaa4b7e3b163ea19131e949b"); AWS_STATIC_STRING_FROM_LITERAL( s_expected_request_signature, "e1d8e8c8815e60969f2a34765c9a15945ffc0badbaa4b7e3b163ea19131e949b"); AWS_STATIC_STRING_FROM_LITERAL( s_expected_first_chunk_signature, "7aabf85b765e6a4d0d500b6e968657b14726fa3e1eb7e839302728ffd77629a5"); AWS_STATIC_STRING_FROM_LITERAL( s_expected_second_chunk_signature, "f72aa9642f571d24a6e1ae42f10f073ad9448d8a028b6bcd82da081335adda02"); AWS_STATIC_STRING_FROM_LITERAL( s_expected_third_chunk_signature, "632af120435b57ec241d8bfbb12e496dfd5e2730a1a02ac0ab6eaa230ae02e9a"); AWS_STATIC_STRING_FROM_LITERAL( s_expected_final_chunk_signature, "c6f679ddb3af68f5e82f0cf6761244cb2338cf11e7d01a24130aea1b7c17e53e"); static int s_sigv4_event_signing_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_auth_library_init(allocator); struct event_signing_tester tester; AWS_ZERO_STRUCT(tester); ASSERT_SUCCESS(s_event_signing_tester_init(allocator, &tester)); /* Sign the base request and check the signature and authorization header */ ASSERT_SUCCESS(aws_sign_request_aws( allocator, tester.request_signable, (void *)&tester.request_signing_config, s_on_request_signing_complete, &tester)); ASSERT_BIN_ARRAYS_EQUALS( s_expected_request_authorization_header->bytes, s_expected_request_authorization_header->len, tester.request_authorization_header.buffer, tester.request_authorization_header.len); ASSERT_BIN_ARRAYS_EQUALS( s_expected_request_signature->bytes, s_expected_request_signature->len, tester.last_signature.buffer, tester.last_signature.len); /* Make and sign the first chunk */ struct aws_signable *first_chunk_signable = aws_signable_new_chunk(allocator, tester.chunk1_stream, aws_byte_cursor_from_buf(&tester.last_signature)); struct aws_byte_cursor event_test_date_cursor = aws_byte_cursor_from_string(s_event_test_date); ASSERT_SUCCESS(aws_date_time_init_from_str_cursor( &tester.event_signing_config.date, &event_test_date_cursor, AWS_DATE_FORMAT_RFC822)); ASSERT_SUCCESS(aws_sign_request_aws( allocator, first_chunk_signable, (void *)&tester.event_signing_config, s_on_event_signing_complete, &tester)); ASSERT_BIN_ARRAYS_EQUALS( s_expected_first_chunk_signature->bytes, s_expected_first_chunk_signature->len, tester.last_signature.buffer, tester.last_signature.len); aws_signable_destroy(first_chunk_signable); /* Make and sign the second chunk */ struct aws_signable *second_chunk_signable = aws_signable_new_chunk(allocator, tester.chunk2_stream, aws_byte_cursor_from_buf(&tester.last_signature)); event_test_date_cursor = aws_byte_cursor_from_string(s_event_test_date_2); ASSERT_SUCCESS(aws_date_time_init_from_str_cursor( &tester.event_signing_config.date, &event_test_date_cursor, AWS_DATE_FORMAT_RFC822)); ASSERT_SUCCESS(aws_sign_request_aws( allocator, second_chunk_signable, (void *)&tester.event_signing_config, s_on_event_signing_complete, &tester)); ASSERT_BIN_ARRAYS_EQUALS( s_expected_second_chunk_signature->bytes, s_expected_second_chunk_signature->len, tester.last_signature.buffer, tester.last_signature.len); aws_signable_destroy(second_chunk_signable); /* Make and sign the third chunk */ struct aws_signable *third_chunk_signable = aws_signable_new_chunk(allocator, tester.chunk3_stream, aws_byte_cursor_from_buf(&tester.last_signature)); event_test_date_cursor = aws_byte_cursor_from_string(s_event_test_date_3); ASSERT_SUCCESS(aws_date_time_init_from_str_cursor( &tester.event_signing_config.date, &event_test_date_cursor, AWS_DATE_FORMAT_RFC822)); ASSERT_SUCCESS(aws_sign_request_aws( allocator, third_chunk_signable, (void *)&tester.event_signing_config, s_on_event_signing_complete, &tester)); ASSERT_BIN_ARRAYS_EQUALS( s_expected_third_chunk_signature->bytes, s_expected_third_chunk_signature->len, tester.last_signature.buffer, tester.last_signature.len); aws_signable_destroy(third_chunk_signable); /* Make and sign the final, empty chunk */ struct aws_signable *final_chunk_signable = aws_signable_new_chunk(allocator, NULL, aws_byte_cursor_from_buf(&tester.last_signature)); event_test_date_cursor = aws_byte_cursor_from_string(s_event_test_date_4); ASSERT_SUCCESS(aws_date_time_init_from_str_cursor( &tester.event_signing_config.date, &event_test_date_cursor, AWS_DATE_FORMAT_RFC822)); ASSERT_SUCCESS(aws_sign_request_aws( allocator, final_chunk_signable, (void *)&tester.event_signing_config, s_on_event_signing_complete, &tester)); ASSERT_BIN_ARRAYS_EQUALS( s_expected_final_chunk_signature->bytes, s_expected_final_chunk_signature->len, tester.last_signature.buffer, tester.last_signature.len); aws_signable_destroy(final_chunk_signable); s_event_signing_tester_cleanup(&tester); aws_auth_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(sigv4_event_signing_test, s_sigv4_event_signing_test); aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/test_signable.c000066400000000000000000000065551456575232400243350ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "test_signable.h" #include #include struct aws_signable_test_impl { struct aws_input_stream *payload; struct aws_array_list headers; struct aws_byte_cursor uri; struct aws_byte_cursor method; }; static int s_aws_signable_test_get_property( const struct aws_signable *signable, const struct aws_string *name, struct aws_byte_cursor *out_value) { struct aws_signable_test_impl *impl = signable->impl; AWS_ZERO_STRUCT(*out_value); if (aws_string_eq(name, g_aws_http_uri_property_name)) { *out_value = impl->uri; } else if (aws_string_eq(name, g_aws_http_method_property_name)) { *out_value = impl->method; } else { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } return AWS_OP_SUCCESS; } static int s_aws_signable_test_get_property_list( const struct aws_signable *signable, const struct aws_string *name, struct aws_array_list **out_list) { struct aws_signable_test_impl *impl = signable->impl; *out_list = NULL; if (aws_string_eq(name, g_aws_http_headers_property_list_name)) { *out_list = &impl->headers; } else { return aws_raise_error(AWS_ERROR_UNSUPPORTED_OPERATION); } return AWS_OP_SUCCESS; } static int s_aws_signable_test_get_payload_stream( const struct aws_signable *signable, struct aws_input_stream **out_input_stream) { struct aws_signable_test_impl *impl = signable->impl; *out_input_stream = impl->payload; return AWS_OP_SUCCESS; } static void s_aws_signable_test_destroy(struct aws_signable *signable) { if (signable == NULL) { return; } struct aws_signable_test_impl *impl = signable->impl; if (impl != NULL) { aws_array_list_clean_up(&impl->headers); } aws_mem_release(signable->allocator, signable); } static struct aws_signable_vtable s_signable_test_vtable = { .get_property = s_aws_signable_test_get_property, .get_property_list = s_aws_signable_test_get_property_list, .get_payload_stream = s_aws_signable_test_get_payload_stream, .destroy = s_aws_signable_test_destroy, }; struct aws_signable *aws_signable_new_test( struct aws_allocator *allocator, struct aws_byte_cursor *method, struct aws_byte_cursor *uri, struct aws_signable_property_list_pair *headers, size_t header_count, struct aws_input_stream *body_stream) { struct aws_signable *signable = NULL; struct aws_signable_test_impl *impl = NULL; aws_mem_acquire_many( allocator, 2, &signable, sizeof(struct aws_signable), &impl, sizeof(struct aws_signable_test_impl)); AWS_ZERO_STRUCT(*signable); AWS_ZERO_STRUCT(*impl); signable->allocator = allocator; signable->vtable = &s_signable_test_vtable; signable->impl = impl; if (aws_array_list_init_dynamic( &impl->headers, allocator, header_count, sizeof(struct aws_signable_property_list_pair))) { goto on_error; } for (size_t i = 0; i < header_count; ++i) { aws_array_list_push_back(&impl->headers, &headers[i]); } impl->payload = body_stream; impl->method = *method; impl->uri = *uri; return signable; on_error: aws_signable_destroy(signable); return NULL; } aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/test_signable.h000066400000000000000000000011731456575232400243310ustar00rootroot00000000000000#ifndef AWS_AUTH_TEST_SIGNABLE_H #define AWS_AUTH_TEST_SIGNABLE_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include struct aws_byte_cursor; struct aws_input_stream; struct aws_signable; struct aws_signable_property_list_pair; struct aws_signable *aws_signable_new_test( struct aws_allocator *allocator, struct aws_byte_cursor *method, struct aws_byte_cursor *uri, struct aws_signable_property_list_pair *headers, size_t header_count, struct aws_input_stream *body_stream); #endif /* AWS_AUTH_TEST_SIGNABLE_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-auth/tests/token_provider_sso_tests.c000066400000000000000000000706641456575232400266540ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include #include #include "shared_credentials_test_definitions.h" struct sso_session_profile_example { const char *name; struct aws_byte_cursor text; }; static int s_sso_token_provider_profile_invalid_profile_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_auth_library_init(allocator); const struct sso_session_profile_example invalid_profile_examples[] = { { .name = "No config", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL( "[profile default]\naws_access_key_id=fake_access_key\naws_secret_access_key=fake_secret_key\n"), }, { .name = "No sso_start_url", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("[profile " "default]\naws_access_key_id=fake_access_key\naws_secret_" "access_key=fake_secret_key\nsso_region=us-east-1\n"), }, { .name = "only sso_session", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("[profile " "default]\naws_access_key_id=fake_access_key\naws_secret_" "access_key=fake_secret_key\nsso_session=dev\n[sso-session " "dev]\nsso_start_url=url\nsso_region=us-east-1"), }, }; struct aws_string *config_file_str = aws_create_process_unique_file_name(allocator); struct aws_token_provider_sso_profile_options options = { .config_file_name_override = aws_byte_cursor_from_string(config_file_str), }; for (size_t i = 0; i < AWS_ARRAY_SIZE(invalid_profile_examples); ++i) { printf("invalid example [%zu]: %s\n", i, invalid_profile_examples[i].name); struct aws_string *config_contents = aws_string_new_from_cursor(allocator, &invalid_profile_examples[i].text); ASSERT_SUCCESS(aws_create_profile_file(config_file_str, config_contents)); ASSERT_NULL(aws_token_provider_new_sso_profile(allocator, &options)); aws_string_destroy(config_contents); } aws_string_destroy(config_file_str); aws_auth_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(sso_token_provider_profile_invalid_profile_test, s_sso_token_provider_profile_invalid_profile_test); static int s_sso_token_provider_profile_valid_profile_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_auth_library_init(allocator); static struct sso_session_profile_example s_valid_profile_examples[] = { { .name = "profile", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("[default]\naws_access_key_id=fake_access_key\naws_secret_" "access_key=fake_secret_key\nsso_region=us-east-" "1\nsso_start_url=url"), }, { .name = "with sso_session", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL( "[default]\naws_access_key_id=fake_access_key\naws_secret_" "access_key=fake_secret_key\nsso_region=us-east-1\nsso_start_url=url\nsso_" "session=dev\n[sso-session dev]\nsso_region=us-east-" "1\nsso_start_url=url2"), }, }; struct aws_string *config_file_str = aws_create_process_unique_file_name(allocator); struct aws_token_provider_sso_profile_options options = { .config_file_name_override = aws_byte_cursor_from_string(config_file_str), }; for (size_t i = 0; i < AWS_ARRAY_SIZE(s_valid_profile_examples); ++i) { printf("valid example [%zu]: %s\n", i, s_valid_profile_examples[i].name); struct aws_string *config_contents = aws_string_new_from_cursor(allocator, &s_valid_profile_examples[i].text); ASSERT_SUCCESS(aws_create_profile_file(config_file_str, config_contents)); struct aws_credentials_provider *provider = aws_token_provider_new_sso_profile(allocator, &options); ASSERT_NOT_NULL(provider); aws_credentials_provider_release(provider); aws_string_destroy(config_contents); } aws_string_destroy(config_file_str); aws_auth_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(sso_token_provider_profile_valid_profile_test, s_sso_token_provider_profile_valid_profile_test); static struct aws_mock_token_provider_sso_tester { struct aws_tls_ctx *tls_ctx; struct aws_event_loop_group *el_group; struct aws_host_resolver *resolver; struct aws_client_bootstrap *bootstrap; struct aws_mutex lock; struct aws_condition_variable signal; struct aws_credentials *credentials; bool has_received_credentials_callback; bool has_received_shutdown_callback; int error_code; } s_tester; static int s_aws_mock_token_provider_sso_tester_init(struct aws_allocator *allocator) { aws_auth_library_init(allocator); AWS_ZERO_STRUCT(s_tester); struct aws_tls_ctx_options tls_ctx_options; aws_tls_ctx_options_init_default_client(&tls_ctx_options, allocator); s_tester.tls_ctx = aws_tls_client_ctx_new(allocator, &tls_ctx_options); ASSERT_NOT_NULL(s_tester.tls_ctx); s_tester.el_group = aws_event_loop_group_new_default(allocator, 0, NULL); struct aws_host_resolver_default_options resolver_options = { .el_group = s_tester.el_group, .max_entries = 8, }; s_tester.resolver = aws_host_resolver_new_default(allocator, &resolver_options); struct aws_client_bootstrap_options bootstrap_options = { .event_loop_group = s_tester.el_group, .host_resolver = s_tester.resolver, }; s_tester.bootstrap = aws_client_bootstrap_new(allocator, &bootstrap_options); return AWS_OP_SUCCESS; } void s_aws_mock_token_provider_sso_tester_cleanup(void) { aws_tls_ctx_release(s_tester.tls_ctx); aws_client_bootstrap_release(s_tester.bootstrap); aws_host_resolver_release(s_tester.resolver); aws_event_loop_group_release(s_tester.el_group); aws_condition_variable_clean_up(&s_tester.signal); aws_mutex_clean_up(&s_tester.lock); aws_credentials_release(s_tester.credentials); aws_auth_library_clean_up(); } static int s_sso_token_provider_sso_session_invalid_config_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_mock_token_provider_sso_tester_init(allocator); const struct sso_session_profile_example invalid_config_examples[] = { { .name = "no sso-session", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("[default]\naws_access_key_id=fake_access_key\naws_secret_" "access_key=fake_secret_key\nsso_region=us-east-" "1\nsso_start_url=url"), }, { .name = "sso_session with without sso_region", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("[profile " "default]\naws_access_key_id=fake_access_key\naws_secret_" "access_key=fake_secret_key\nsso_session=dev\n[sso-session " "dev]\nsso_start_url=url"), }, { .name = "sso_session with without sso_start_url", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("[profile " "default]\naws_access_key_id=fake_access_key\naws_secret_" "access_key=fake_secret_key\nsso_session=dev\n[sso-session " "dev]\nsso_region=us-east-1"), }, { .name = "sso_session with different profile region", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL( "[profile " "default]\naws_access_key_id=fake_access_key\naws_secret_" "access_key=fake_secret_key\nsso_session=dev\nsso_region=us-west-" "1\nsso_start_url=url\n[sso-session dev]\nsso_region=us-east-1\nsso_start_url=url"), }, { .name = "sso_session with different profile start url", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL( "[profile " "default]\naws_access_key_id=fake_access_key\naws_secret_" "access_key=fake_secret_key\nsso_session=dev\nsso_region=us-east-" "1\nsso_start_url=url\n[sso-session dev]\nsso_region=us-east-1\nsso_start_url=url2"), }, { .name = "different sso_session name", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL( "[default]\naws_access_key_id=fake_access_key\naws_secret_" "access_key=fake_secret_key\nsso_region=us-east-1\nsso_start_url=url\nsso_" "session=dev\n[sso-session dev2]\nsso_region=us-east-" "1\nsso_start_url=url"), }, }; struct aws_string *config_file_str = aws_create_process_unique_file_name(allocator); struct aws_token_provider_sso_session_options options = { .config_file_name_override = aws_byte_cursor_from_string(config_file_str), .tls_ctx = s_tester.tls_ctx, .bootstrap = s_tester.bootstrap, }; for (size_t i = 0; i < AWS_ARRAY_SIZE(invalid_config_examples); ++i) { printf("invalid example [%zu]: %s\n", i, invalid_config_examples[i].name); struct aws_string *config_contents = aws_string_new_from_cursor(allocator, &invalid_config_examples[i].text); ASSERT_SUCCESS(aws_create_profile_file(config_file_str, config_contents)); ASSERT_NULL(aws_token_provider_new_sso_session(allocator, &options)); aws_string_destroy(config_contents); } aws_string_destroy(config_file_str); s_aws_mock_token_provider_sso_tester_cleanup(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(sso_token_provider_sso_session_invalid_config_test, s_sso_token_provider_sso_session_invalid_config_test); static int s_sso_token_provider_sso_session_valid_config_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_mock_token_provider_sso_tester_init(allocator); static struct sso_session_profile_example s_valid_profile_examples[] = { { .name = "sso-session", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("[default]\naws_access_key_id=fake_access_key\naws_secret_" "access_key=fake_secret_key\nsso_" "session=dev\n[sso-session dev]\nsso_region=us-east-" "1\nsso_start_url=url"), }, { .name = "with profile sso_region", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("[default]\naws_access_key_id=fake_access_key\naws_secret_" "access_key=fake_secret_key\nsso_region=us-east-1\nsso_" "session=dev\n[sso-session dev]\nsso_region=us-east-" "1\nsso_start_url=url"), }, { .name = "with profile sso_start_url", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("[default]\naws_access_key_id=fake_access_key\naws_secret_" "access_key=fake_secret_key\nsso_start_url=url\nsso_" "session=dev\n[sso-session dev]\nsso_region=us-east-" "1\nsso_start_url=url"), }, { .name = "with profile sso_region and sso_start_url", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL( "[default]\naws_access_key_id=fake_access_key\naws_secret_" "access_key=fake_secret_key\nsso_region=us-east-1\nsso_start_url=url\nsso_" "session=dev\n[sso-session dev]\nsso_region=us-east-" "1\nsso_start_url=url"), }, }; struct aws_string *config_file_str = aws_create_process_unique_file_name(allocator); struct aws_token_provider_sso_session_options options = { .config_file_name_override = aws_byte_cursor_from_string(config_file_str), .tls_ctx = s_tester.tls_ctx, .bootstrap = s_tester.bootstrap, }; for (size_t i = 0; i < AWS_ARRAY_SIZE(s_valid_profile_examples); ++i) { printf("valid example [%zu]: %s\n", i, s_valid_profile_examples[i].name); struct aws_string *config_contents = aws_string_new_from_cursor(allocator, &s_valid_profile_examples[i].text); ASSERT_SUCCESS(aws_create_profile_file(config_file_str, config_contents)); struct aws_credentials_provider *provider = aws_token_provider_new_sso_session(allocator, &options); ASSERT_NOT_NULL(provider); aws_credentials_provider_release(provider); aws_string_destroy(config_contents); } aws_string_destroy(config_file_str); s_aws_mock_token_provider_sso_tester_cleanup(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(sso_token_provider_sso_session_valid_config_test, s_sso_token_provider_sso_session_valid_config_test); /* start_url should be same in `s_sso_profile_start_url` and `s_sso_profile_config_contents` */ AWS_STATIC_STRING_FROM_LITERAL(s_sso_profile_start_url, "https://d-123.awsapps.com/start"); AWS_STATIC_STRING_FROM_LITERAL( s_sso_profile_config_contents, "[default]\n" "sso_start_url = https://d-123.awsapps.com/start\n"); /* session name should be same in both `s_sso_session_name` and `s_sso_session_config_contents`*/ AWS_STATIC_STRING_FROM_LITERAL(s_sso_session_name, "session"); AWS_STATIC_STRING_FROM_LITERAL( s_sso_session_config_contents, "[default]\n" "sso_session = session\n" "[sso-session session]\n" "sso_start_url = https://d-123.awsapps.com/start\n" "sso_region = us-west-2\n"); AWS_STATIC_STRING_FROM_LITERAL( s_sso_token, "{\"accessToken\": \"ValidAccessToken\",\"expiresAt\": \"2015-03-12T05:35:19Z\"}"); AWS_STATIC_STRING_FROM_LITERAL(s_invalid_config, "invalid config"); AWS_STATIC_STRING_FROM_LITERAL(s_good_token, "ValidAccessToken"); static uint64_t s_token_expiration_s = 1426138519; static int s_sso_token_provider_sso_session_basic_success(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_mock_token_provider_sso_tester_init(allocator); /* redirect $HOME */ struct aws_string *tmp_home; ASSERT_SUCCESS(aws_create_random_home_directory(allocator, &tmp_home)); /* create token file */ struct aws_string *token_path = aws_construct_sso_token_path(allocator, s_sso_session_name); ASSERT_NOT_NULL(token_path); ASSERT_SUCCESS(aws_create_directory_components(allocator, token_path)); ASSERT_SUCCESS(aws_create_profile_file(token_path, s_sso_token)); struct aws_string *config_file_str = aws_create_process_unique_file_name(allocator); ASSERT_SUCCESS(aws_create_profile_file(config_file_str, s_sso_session_config_contents)); mock_aws_set_system_time(0); struct aws_token_provider_sso_session_options options = { .config_file_name_override = aws_byte_cursor_from_string(config_file_str), .tls_ctx = s_tester.tls_ctx, .bootstrap = s_tester.bootstrap, .system_clock_fn = mock_aws_get_system_time, }; struct aws_credentials_provider *provider = aws_token_provider_new_sso_session(allocator, &options); ASSERT_NOT_NULL(provider); struct aws_get_credentials_test_callback_result callback_results; aws_get_credentials_test_callback_result_init(&callback_results, 1); ASSERT_SUCCESS( aws_credentials_provider_get_credentials(provider, aws_test_get_credentials_async_callback, &callback_results)); aws_wait_on_credentials_callback(&callback_results); ASSERT_CURSOR_VALUE_STRING_EQUALS(aws_credentials_get_token(callback_results.credentials), s_good_token); ASSERT_INT_EQUALS( aws_credentials_get_expiration_timepoint_seconds(callback_results.credentials), s_token_expiration_s); aws_get_credentials_test_callback_result_clean_up(&callback_results); aws_credentials_provider_release(provider); aws_directory_delete(tmp_home, true); aws_string_destroy(tmp_home); aws_string_destroy(token_path); aws_string_destroy(config_file_str); s_aws_mock_token_provider_sso_tester_cleanup(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(sso_token_provider_sso_session_basic_success, s_sso_token_provider_sso_session_basic_success); static int s_sso_token_provider_sso_session_config_file_cached(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_mock_token_provider_sso_tester_init(allocator); /* redirect $HOME */ struct aws_string *tmp_home; ASSERT_SUCCESS(aws_create_random_home_directory(allocator, &tmp_home)); /* create token file */ struct aws_string *token_path = aws_construct_sso_token_path(allocator, s_sso_session_name); ASSERT_NOT_NULL(token_path); ASSERT_SUCCESS(aws_create_directory_components(allocator, token_path)); ASSERT_SUCCESS(aws_create_profile_file(token_path, s_sso_token)); struct aws_string *config_file_str = aws_create_process_unique_file_name(allocator); ASSERT_SUCCESS(aws_create_profile_file(config_file_str, s_invalid_config)); struct aws_byte_buf profile_buffer = aws_byte_buf_from_c_str(aws_string_c_str(s_sso_session_config_contents)); struct aws_profile_collection *config_collection = aws_profile_collection_new_from_buffer(allocator, &profile_buffer, AWS_PST_CONFIG); mock_aws_set_system_time(0); struct aws_token_provider_sso_session_options options = { .config_file_name_override = aws_byte_cursor_from_string(config_file_str), .config_file_cached = config_collection, .tls_ctx = s_tester.tls_ctx, .bootstrap = s_tester.bootstrap, .system_clock_fn = mock_aws_get_system_time, }; struct aws_credentials_provider *provider = aws_token_provider_new_sso_session(allocator, &options); ASSERT_NOT_NULL(provider); struct aws_get_credentials_test_callback_result callback_results; aws_get_credentials_test_callback_result_init(&callback_results, 1); ASSERT_SUCCESS( aws_credentials_provider_get_credentials(provider, aws_test_get_credentials_async_callback, &callback_results)); aws_wait_on_credentials_callback(&callback_results); ASSERT_CURSOR_VALUE_STRING_EQUALS(aws_credentials_get_token(callback_results.credentials), s_good_token); ASSERT_INT_EQUALS( aws_credentials_get_expiration_timepoint_seconds(callback_results.credentials), s_token_expiration_s); aws_get_credentials_test_callback_result_clean_up(&callback_results); aws_credentials_provider_release(provider); aws_directory_delete(tmp_home, true); aws_string_destroy(tmp_home); aws_string_destroy(token_path); aws_string_destroy(config_file_str); s_aws_mock_token_provider_sso_tester_cleanup(); aws_profile_collection_release(config_collection); return AWS_OP_SUCCESS; } AWS_TEST_CASE(sso_token_provider_sso_session_config_file_cached, s_sso_token_provider_sso_session_config_file_cached); static int s_sso_token_provider_sso_session_expired_token(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_mock_token_provider_sso_tester_init(allocator); /* redirect $HOME */ struct aws_string *tmp_home; ASSERT_SUCCESS(aws_create_random_home_directory(allocator, &tmp_home)); /* create token file */ struct aws_string *token_path = aws_construct_sso_token_path(allocator, s_sso_session_name); ASSERT_NOT_NULL(token_path); ASSERT_SUCCESS(aws_create_directory_components(allocator, token_path)); ASSERT_SUCCESS(aws_create_profile_file(token_path, s_sso_token)); struct aws_string *config_file_str = aws_create_process_unique_file_name(allocator); ASSERT_SUCCESS(aws_create_profile_file(config_file_str, s_sso_session_config_contents)); uint64_t nano_expiration = aws_timestamp_convert(s_token_expiration_s + 1, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL); mock_aws_set_system_time(nano_expiration); struct aws_token_provider_sso_session_options options = { .config_file_name_override = aws_byte_cursor_from_string(config_file_str), .tls_ctx = s_tester.tls_ctx, .bootstrap = s_tester.bootstrap, .system_clock_fn = mock_aws_get_system_time, }; struct aws_credentials_provider *provider = aws_token_provider_new_sso_session(allocator, &options); ASSERT_NOT_NULL(provider); struct aws_get_credentials_test_callback_result callback_results; aws_get_credentials_test_callback_result_init(&callback_results, 1); ASSERT_ERROR( AWS_AUTH_SSO_TOKEN_EXPIRED, aws_credentials_provider_get_credentials(provider, aws_test_get_credentials_async_callback, &callback_results)); aws_credentials_provider_release(provider); aws_directory_delete(tmp_home, true); aws_string_destroy(tmp_home); aws_string_destroy(token_path); aws_string_destroy(config_file_str); s_aws_mock_token_provider_sso_tester_cleanup(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(sso_token_provider_sso_session_expired_token, s_sso_token_provider_sso_session_expired_token); static int s_sso_token_provider_profile_basic_success(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_mock_token_provider_sso_tester_init(allocator); /* redirect $HOME */ struct aws_string *tmp_home; ASSERT_SUCCESS(aws_create_random_home_directory(allocator, &tmp_home)); /* create token file */ struct aws_string *token_path = aws_construct_sso_token_path(allocator, s_sso_profile_start_url); ASSERT_NOT_NULL(token_path); ASSERT_SUCCESS(aws_create_directory_components(allocator, token_path)); ASSERT_SUCCESS(aws_create_profile_file(token_path, s_sso_token)); struct aws_string *config_file_str = aws_create_process_unique_file_name(allocator); ASSERT_SUCCESS(aws_create_profile_file(config_file_str, s_sso_profile_config_contents)); mock_aws_set_system_time(0); struct aws_token_provider_sso_profile_options options = { .config_file_name_override = aws_byte_cursor_from_string(config_file_str), .system_clock_fn = mock_aws_get_system_time, }; struct aws_credentials_provider *provider = aws_token_provider_new_sso_profile(allocator, &options); ASSERT_NOT_NULL(provider); struct aws_get_credentials_test_callback_result callback_results; aws_get_credentials_test_callback_result_init(&callback_results, 1); ASSERT_SUCCESS( aws_credentials_provider_get_credentials(provider, aws_test_get_credentials_async_callback, &callback_results)); aws_wait_on_credentials_callback(&callback_results); ASSERT_CURSOR_VALUE_STRING_EQUALS(aws_credentials_get_token(callback_results.credentials), s_good_token); ASSERT_INT_EQUALS( aws_credentials_get_expiration_timepoint_seconds(callback_results.credentials), s_token_expiration_s); aws_get_credentials_test_callback_result_clean_up(&callback_results); aws_credentials_provider_release(provider); aws_directory_delete(tmp_home, true); aws_string_destroy(tmp_home); aws_string_destroy(token_path); aws_string_destroy(config_file_str); s_aws_mock_token_provider_sso_tester_cleanup(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(sso_token_provider_profile_basic_success, s_sso_token_provider_profile_basic_success); static int s_sso_token_provider_profile_cached_config_file(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_mock_token_provider_sso_tester_init(allocator); /* redirect $HOME */ struct aws_string *tmp_home; ASSERT_SUCCESS(aws_create_random_home_directory(allocator, &tmp_home)); /* create token file */ struct aws_string *token_path = aws_construct_sso_token_path(allocator, s_sso_profile_start_url); ASSERT_NOT_NULL(token_path); ASSERT_SUCCESS(aws_create_directory_components(allocator, token_path)); ASSERT_SUCCESS(aws_create_profile_file(token_path, s_sso_token)); struct aws_string *config_file_str = aws_create_process_unique_file_name(allocator); ASSERT_SUCCESS(aws_create_profile_file(config_file_str, s_invalid_config)); struct aws_byte_buf profile_buffer = aws_byte_buf_from_c_str(aws_string_c_str(s_sso_profile_config_contents)); struct aws_profile_collection *config_collection = aws_profile_collection_new_from_buffer(allocator, &profile_buffer, AWS_PST_CONFIG); mock_aws_set_system_time(0); struct aws_token_provider_sso_profile_options options = { .config_file_name_override = aws_byte_cursor_from_string(config_file_str), .config_file_cached = config_collection, .system_clock_fn = mock_aws_get_system_time, }; struct aws_credentials_provider *provider = aws_token_provider_new_sso_profile(allocator, &options); ASSERT_NOT_NULL(provider); struct aws_get_credentials_test_callback_result callback_results; aws_get_credentials_test_callback_result_init(&callback_results, 1); ASSERT_SUCCESS( aws_credentials_provider_get_credentials(provider, aws_test_get_credentials_async_callback, &callback_results)); aws_wait_on_credentials_callback(&callback_results); ASSERT_CURSOR_VALUE_STRING_EQUALS(aws_credentials_get_token(callback_results.credentials), s_good_token); ASSERT_INT_EQUALS( aws_credentials_get_expiration_timepoint_seconds(callback_results.credentials), s_token_expiration_s); aws_get_credentials_test_callback_result_clean_up(&callback_results); aws_credentials_provider_release(provider); aws_directory_delete(tmp_home, true); aws_string_destroy(tmp_home); aws_string_destroy(token_path); aws_string_destroy(config_file_str); s_aws_mock_token_provider_sso_tester_cleanup(); aws_profile_collection_release(config_collection); return AWS_OP_SUCCESS; } AWS_TEST_CASE(sso_token_provider_profile_cached_config_file, s_sso_token_provider_profile_cached_config_file); static int s_sso_token_provider_profile_expired_token(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_aws_mock_token_provider_sso_tester_init(allocator); /* redirect $HOME */ struct aws_string *tmp_home; ASSERT_SUCCESS(aws_create_random_home_directory(allocator, &tmp_home)); /* create token file */ struct aws_string *token_path = aws_construct_sso_token_path(allocator, s_sso_profile_start_url); ASSERT_NOT_NULL(token_path); ASSERT_SUCCESS(aws_create_directory_components(allocator, token_path)); ASSERT_SUCCESS(aws_create_profile_file(token_path, s_sso_token)); struct aws_string *config_file_str = aws_create_process_unique_file_name(allocator); ASSERT_SUCCESS(aws_create_profile_file(config_file_str, s_sso_profile_config_contents)); uint64_t nano_expiration = aws_timestamp_convert(s_token_expiration_s + 100, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL); mock_aws_set_system_time(nano_expiration); struct aws_token_provider_sso_profile_options options = { .config_file_name_override = aws_byte_cursor_from_string(config_file_str), .system_clock_fn = mock_aws_get_system_time, }; struct aws_credentials_provider *provider = aws_token_provider_new_sso_profile(allocator, &options); ASSERT_NOT_NULL(provider); struct aws_get_credentials_test_callback_result callback_results; aws_get_credentials_test_callback_result_init(&callback_results, 1); ASSERT_ERROR( AWS_AUTH_SSO_TOKEN_EXPIRED, aws_credentials_provider_get_credentials(provider, aws_test_get_credentials_async_callback, &callback_results)); aws_credentials_provider_release(provider); aws_directory_delete(tmp_home, true); aws_string_destroy(tmp_home); aws_string_destroy(token_path); aws_string_destroy(config_file_str); s_aws_mock_token_provider_sso_tester_cleanup(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(sso_token_provider_profile_expired_token, s_sso_token_provider_profile_expired_token); aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/000077500000000000000000000000001456575232400177675ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/.clang-format000066400000000000000000000031611456575232400223430ustar00rootroot00000000000000--- Language: Cpp # BasedOnStyle: Mozilla AlignAfterOpenBracket: AlwaysBreak AlignConsecutiveAssignments: false AlignConsecutiveDeclarations: false AlignEscapedNewlines: Right AlignOperands: true AlignTrailingComments: true AllowAllParametersOfDeclarationOnNextLine: false AllowShortBlocksOnASingleLine: false AllowShortCaseLabelsOnASingleLine: false AllowShortFunctionsOnASingleLine: Inline AllowShortIfStatementsOnASingleLine: false AllowShortLoopsOnASingleLine: false AlwaysBreakAfterReturnType: None AlwaysBreakBeforeMultilineStrings: false BinPackArguments: false BinPackParameters: false BreakBeforeBinaryOperators: None BreakBeforeBraces: Attach BreakBeforeTernaryOperators: true BreakStringLiterals: true ColumnLimit: 120 ContinuationIndentWidth: 4 DerivePointerAlignment: false IncludeBlocks: Preserve IndentCaseLabels: true IndentPPDirectives: AfterHash IndentWidth: 4 IndentWrappedFunctionNames: true KeepEmptyLinesAtTheStartOfBlocks: true MacroBlockBegin: '' MacroBlockEnd: '' MaxEmptyLinesToKeep: 1 PenaltyBreakAssignment: 2 PenaltyBreakBeforeFirstCallParameter: 19 PenaltyBreakComment: 300 PenaltyBreakFirstLessLess: 120 PenaltyBreakString: 1000 PenaltyExcessCharacter: 1000000 PenaltyReturnTypeOnItsOwnLine: 100000 PointerAlignment: Right ReflowComments: true SortIncludes: true SpaceAfterCStyleCast: false SpaceBeforeAssignmentOperators: true SpaceBeforeParens: ControlStatements SpaceInEmptyParentheses: false SpacesInContainerLiterals: true SpacesInCStyleCastParentheses: false SpacesInParentheses: false SpacesInSquareBrackets: false Standard: Cpp11 TabWidth: 4 UseTab: Never ... aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/.clang-tidy000066400000000000000000000013361456575232400220260ustar00rootroot00000000000000--- Checks: 'clang-diagnostic-*,clang-analyzer-*,readability-*,modernize-*,bugprone-*,misc-*,google-runtime-int,llvm-header-guard,fuchsia-restrict-system-includes,-clang-analyzer-valist.Uninitialized,-clang-analyzer-security.insecureAPI.rand,-clang-analyzer-alpha.*,-readability-magic-numbers,-readability-non-const-parameter' WarningsAsErrors: '*' HeaderFilterRegex: '.*(? packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ matrix.image }} build -p ${{ env.PACKAGE_NAME }} linux-compiler-compat: runs-on: ubuntu-20.04 # latest strategy: matrix: compiler: - clang-3 - clang-6 - clang-8 - clang-9 - clang-10 - clang-11 - gcc-4.8 - gcc-5 - gcc-6 - gcc-7 - gcc-8 steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ env.LINUX_BASE_IMAGE }} build -p ${{ env.PACKAGE_NAME }} --compiler=${{ matrix.compiler }} clang-sanitizers: runs-on: ubuntu-20.04 # latest strategy: matrix: sanitizers: [",thread", ",address,undefined"] steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ env.LINUX_BASE_IMAGE }} build -p ${{ env.PACKAGE_NAME }} --compiler=clang-11 --cmake-extra=-DENABLE_SANITIZERS=ON --cmake-extra=-DSANITIZERS="${{ matrix.sanitizers }}" linux-shared-libs: runs-on: ubuntu-20.04 # latest steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ env.LINUX_BASE_IMAGE }} build -p ${{ env.PACKAGE_NAME }} --cmake-extra=-DBUILD_SHARED_LIBS=ON #Warning: this ci relies on ubuntu pulling correct version of openssl #Current version (18.04) pulls 1.1.1, but 22.04+ will pull in 3.0 linux-openssl-static: runs-on: ubuntu-20.04 # latest steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{env.LINUX_BASE_IMAGE }} build -p ${{ env.PACKAGE_NAME }} --variant=openssl --cmake-extra=-DUSE_OPENSSL=ON #ubuntu 22.04 defaults to openssl3 version by default when installing #libssl-dev package. Hence we can rely on OS version to pull in openssl3. linux-openssl3-static: runs-on: ubuntu-22.04 # latest steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-ubuntu-22-x64 build -p ${{ env.PACKAGE_NAME }} --variant=openssl --cmake-extra=-DUSE_OPENSSL=ON linux-openssl-shared: runs-on: ubuntu-20.04 # latest steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ env.LINUX_BASE_IMAGE }} build -p ${{ env.PACKAGE_NAME }} --variant=openssl --cmake-extra=-DUSE_OPENSSL=ON --cmake-extra=-DBUILD_SHARED_LIBS=ON linux-boringssl: runs-on: ubuntu-22.04 # latest steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python builder.pyz build -p ${{ env.PACKAGE_NAME }} --variant=boringssl --cmake-extra=-DUSE_OPENSSL=ON windows: runs-on: windows-2022 # latest steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python builder.pyz build -p ${{ env.PACKAGE_NAME }} windows-vc14: runs-on: windows-2019 # windows-2019 is last env with Visual Studio 2015 (v14.0) strategy: matrix: arch: [x86, x64] steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python builder.pyz build -p ${{ env.PACKAGE_NAME }} --target windows-${{ matrix.arch }} --compiler msvc-14 windows-shared-libs: runs-on: windows-2022 # latest steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python builder.pyz build -p ${{ env.PACKAGE_NAME }} --cmake-extra=-DBUILD_SHARED_LIBS=ON windows-app-verifier: runs-on: windows-2022 # latest steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python builder.pyz build -p ${{ env.PACKAGE_NAME }} run_tests=false --cmake-extra=-DBUILD_TESTING=ON - name: Run and check AppVerifier run: | python .\aws-c-cal\build\deps\aws-c-common\scripts\appverifier_ctest.py --build_directory .\aws-c-cal\build\aws-c-cal osx: runs-on: macos-12 # latest steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python3 -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz', 'builder')" chmod a+x builder ./builder build -p ${{ env.PACKAGE_NAME }} osx-min-deployment-target: runs-on: macos-12 # latest steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python3 -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz', 'builder')" chmod a+x builder ./builder build -p ${{ env.PACKAGE_NAME }} --cmake-extra=-DCMAKE_OSX_DEPLOYMENT_TARGET=10.9 openbsd: runs-on: macos-12 # macos's virtual machine is faster than ubuntu's steps: - uses: actions/checkout@v3 - name: Build ${{ env.PACKAGE_NAME }} + consumers uses: cross-platform-actions/action@v0.10.0 with: operating_system: openbsd architecture: x86-64 version: '7.2' shell: bash run: | sudo pkg_add py3-urllib3 python3 -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz', 'builder')" chmod a+x builder ./builder build -p ${{ env.PACKAGE_NAME }} # Test downstream repos. # This should not be required because we can run into a chicken and egg problem if there is a change that needs some fix in a downstream repo. downstream: runs-on: ubuntu-20.04 # latest steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ env.LINUX_BASE_IMAGE }} build downstream -p ${{ env.PACKAGE_NAME }} byo-crypto: runs-on: ubuntu-22.04 # latest steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ env.LINUX_BASE_IMAGE }} build -p ${{ env.PACKAGE_NAME }} --variant=no-tests --cmake-extra=-DBYO_CRYPTO=ON aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/.github/workflows/clang-format.yml000066400000000000000000000004671456575232400264700ustar00rootroot00000000000000name: Lint on: [push] jobs: clang-format: runs-on: ubuntu-20.04 # latest steps: - name: Checkout Sources uses: actions/checkout@v1 - name: clang-format lint uses: DoozyX/clang-format-lint-action@v0.3.1 with: # List of extensions to check extensions: c,h aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/.github/workflows/closed-issue-message.yml000066400000000000000000000013271456575232400301330ustar00rootroot00000000000000name: Closed Issue Message on: issues: types: [closed] jobs: auto_comment: runs-on: ubuntu-latest steps: - uses: aws-actions/closed-issue-message@v1 with: # These inputs are both required repo-token: "${{ secrets.GITHUB_TOKEN }}" message: | ### ⚠️COMMENT VISIBILITY WARNING⚠️ Comments on closed issues are hard for our team to see. If you need more assistance, please either tag a team member or open a new issue that references this one. If you wish to keep having a conversation with other community members under this issue feel free to do so. aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/.github/workflows/handle-stale-discussions.yml000066400000000000000000000006471456575232400310230ustar00rootroot00000000000000name: HandleStaleDiscussions on: schedule: - cron: '0 */4 * * *' discussion_comment: types: [created] jobs: handle-stale-discussions: name: Handle stale discussions runs-on: ubuntu-latest permissions: discussions: write steps: - name: Stale discussions action uses: aws-github-ops/handle-stale-discussions@v1 env: GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}}aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/.github/workflows/stale_issue.yml000066400000000000000000000046321456575232400264340ustar00rootroot00000000000000name: "Close stale issues" # Controls when the action will run. on: schedule: - cron: "*/60 * * * *" jobs: cleanup: runs-on: ubuntu-latest name: Stale issue job permissions: issues: write pull-requests: write steps: - uses: aws-actions/stale-issue-cleanup@v3 with: # Setting messages to an empty string will cause the automation to skip # that category ancient-issue-message: Greetings! Sorry to say but this is a very old issue that is probably not getting as much attention as it deservers. We encourage you to check if this is still an issue in the latest release and if you find that this is still a problem, please feel free to open a new one. stale-issue-message: Greetings! It looks like this issue hasn’t been active in longer than a week. We encourage you to check if this is still an issue in the latest release. Because it has been longer than a week since the last update on this, and in the absence of more information, we will be closing this issue soon. If you find that this is still a problem, please feel free to provide a comment or add an upvote to prevent automatic closure, or if the issue is already closed, please feel free to open a new one. stale-pr-message: Greetings! It looks like this PR hasn’t been active in longer than a week, add a comment or an upvote to prevent automatic closure, or if the issue is already closed, please feel free to open a new one. # These labels are required stale-issue-label: closing-soon exempt-issue-label: automation-exempt stale-pr-label: closing-soon exempt-pr-label: pr/needs-review response-requested-label: response-requested # Don't set closed-for-staleness label to skip closing very old issues # regardless of label closed-for-staleness-label: closed-for-staleness # Issue timing days-before-stale: 2 days-before-close: 5 days-before-ancient: 36500 # If you don't want to mark a issue as being ancient based on a # threshold of "upvotes", you can set this here. An "upvote" is # the total number of +1, heart, hooray, and rocket reactions # on an issue. minimum-upvotes-to-exempt: 1 repo-token: ${{ secrets.GITHUB_TOKEN }} loglevel: DEBUG # Set dry-run to true to not perform label or close actions. dry-run: false aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/.gitignore000066400000000000000000000010401456575232400217520ustar00rootroot00000000000000# IDE Artifacts .metadata .build .idea *.d Debug Release *~ *# *.iml tags #vim swap file *.swp #compiled python files *.pyc #Vagrant stuff Vagrantfile .vagrant #Mac stuff .DS_Store #doxygen doxygen/html/ doxygen/latex/ #cmake artifacts dependencies _build build _build_* cmake-build* # Compiled Object files *.slo *.lo *.o *.obj # Precompiled Headers *.gch *.pch # Compiled Dynamic libraries *.so *.dylib *.dll # Fortran module files *.mod # Compiled Static libraries *.lai *.la *.a *.lib # Executables *.exe *.out *.app .vscode/ aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/CMakeLists.txt000066400000000000000000000134751456575232400225410ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. cmake_minimum_required(VERSION 3.0) project(aws-c-cal LANGUAGES C VERSION 0.1.0) if (POLICY CMP0069) cmake_policy(SET CMP0069 NEW) # Enable LTO/IPO if available in the compiler, see AwsCFlags endif() set(CMAKE_FIND_PACKAGE_PREFER_CONFIG TRUE) option(BYO_CRYPTO "Set this if you want to provide your own cryptography implementation. This will cause the defaults to not be compiled." OFF) option(USE_OPENSSL "Set this if you want to use your system's OpenSSL 1.0.2/1.1.1 compatible libcrypto" OFF) option(AWS_USE_CRYPTO_SHARED_LIBS "Force c-cal to use shared libs in Findcrypto" OFF) if (DEFINED CMAKE_PREFIX_PATH) file(TO_CMAKE_PATH "${CMAKE_PREFIX_PATH}" CMAKE_PREFIX_PATH) endif() if (DEFINED CMAKE_INSTALL_PREFIX) file(TO_CMAKE_PATH "${CMAKE_INSTALL_PREFIX}" CMAKE_INSTALL_PREFIX) endif() if (UNIX AND NOT APPLE) include(GNUInstallDirs) elseif(NOT DEFINED CMAKE_INSTALL_LIBDIR) set(CMAKE_INSTALL_LIBDIR "lib") endif() # This is required in order to append /lib/cmake to each element in CMAKE_PREFIX_PATH set(AWS_MODULE_DIR "/${CMAKE_INSTALL_LIBDIR}/cmake") string(REPLACE ";" "${AWS_MODULE_DIR};" AWS_MODULE_PATH "${CMAKE_PREFIX_PATH}${AWS_MODULE_DIR}") # Append that generated list to the module search path list(APPEND CMAKE_MODULE_PATH ${AWS_MODULE_PATH}) list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR}/cmake/modules") include(AwsCFlags) include(AwsCheckHeaders) include(AwsSharedLibSetup) include(AwsSanitizers) include(AwsFindPackage) file(GLOB AWS_CAL_HEADERS "include/aws/cal/*.h" ) file(GLOB AWS_CAL_SRC "source/*.c" ) if (WIN32) if (NOT BYO_CRYPTO) file(GLOB AWS_CAL_OS_SRC "source/windows/*.c" ) if (AWS_SUPPORT_WIN7) set(PLATFORM_LIBS bcrypt) else() set(PLATFORM_LIBS ncrypt) endif() endif() if (MSVC) source_group("Header Files\\aws\\cal" FILES ${AWS_CAL_HEADERS}) source_group("Source Files" FILES ${AWS_CAL_SRC}) source_group("Source Files\\windows" FILES ${AWS_CAL_OS_SRC}) endif () elseif (APPLE) if (NOT BYO_CRYPTO) file(GLOB AWS_CAL_OS_SRC "source/darwin/*.c" ) find_library(SECURITY_LIB Security) if (NOT SECURITY_LIB) message(FATAL_ERROR "Security Framework not found") endif () list(APPEND PLATFORM_LIBS "-framework Security") endif() else () if (NOT BYO_CRYPTO) file(GLOB AWS_CAL_OS_SRC "source/unix/*.c" ) if (USE_OPENSSL AND NOT ANDROID) find_package(OpenSSL REQUIRED) find_package(Threads REQUIRED) set(PLATFORM_LIBS OpenSSL::Crypto Threads::Threads) message(STATUS "Using libcrypto from system: ${OPENSSL_CRYPTO_LIBRARY}") elseif(NOT USE_OPENSSL AND IN_SOURCE_BUILD) if (TARGET crypto) message(STATUS "Using libcrypto from AWS-LC") else() message(FATAL_ERROR "Target crypto is not defined, failed to find libcrypto.") endif() set(PLATFORM_LIBS crypto) else() # note aws_use_package() does this for you, except it appends to the public link targets # which we probably don't want for this case where we want the crypto dependency private if (IN_SOURCE_BUILD) set(PLATFORM_LIBS crypto) else() find_package(crypto REQUIRED) set(PLATFORM_LIBS AWS::crypto) endif() endif() endif() endif() file(GLOB CAL_HEADERS ${AWS_CAL_HEADERS} ) file(GLOB CAL_SRC ${AWS_CAL_SRC} ${AWS_CAL_OS_SRC} ) add_library(${PROJECT_NAME} ${CAL_SRC}) aws_set_common_properties(${PROJECT_NAME} NO_WEXTRA) aws_prepare_symbol_visibility_args(${PROJECT_NAME} "AWS_CAL") aws_add_sanitizers(${PROJECT_NAME}) aws_use_package(aws-c-common) target_link_libraries(${PROJECT_NAME} PUBLIC ${DEP_AWS_LIBS} ${PLATFORM_LIBS}) if (BYO_CRYPTO) target_compile_definitions(${PROJECT_NAME} PRIVATE -DBYO_CRYPTO) endif() # Our ABI is not yet stable set_target_properties(${PROJECT_NAME} PROPERTIES VERSION 1.0.0) target_include_directories(${PROJECT_NAME} PUBLIC $ $) # When we install, the generated header will be at the INSTALL_INTERFACE:include location, # but at build time we need to explicitly include this here target_include_directories(${PROJECT_NAME} PUBLIC $) aws_prepare_shared_lib_exports(${PROJECT_NAME}) configure_file("cmake/${PROJECT_NAME}-config.cmake" "${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-config.cmake" @ONLY) aws_check_headers(${PROJECT_NAME} ${AWS_CAL_HEADERS}) install(FILES ${AWS_CAL_HEADERS} DESTINATION "include/aws/cal" COMPONENT Development) if (BUILD_SHARED_LIBS) set (TARGET_DIR "shared") else() set (TARGET_DIR "static") endif() install(EXPORT "${PROJECT_NAME}-targets" DESTINATION "${LIBRARY_DIRECTORY}/${PROJECT_NAME}/cmake/${TARGET_DIR}/" NAMESPACE AWS:: COMPONENT Development) install(FILES "${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-config.cmake" DESTINATION "${LIBRARY_DIRECTORY}/${PROJECT_NAME}/cmake" COMPONENT Development) list(APPEND EXPORT_MODULES "cmake/modules/Findcrypto.cmake" ) install(FILES ${EXPORT_MODULES} DESTINATION "${LIBRARY_DIRECTORY}/${PROJECT_NAME}/cmake/modules" COMPONENT Development) if (NOT CMAKE_CROSSCOMPILING AND NOT BYO_CRYPTO) include(CTest) if (BUILD_TESTING) add_subdirectory(bin/sha256_profile) add_subdirectory(bin/produce_x_platform_fuzz_corpus) add_subdirectory(bin/run_x_platform_fuzz_corpus) add_subdirectory(tests) endif() endif() aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/CODE_OF_CONDUCT.md000066400000000000000000000004651456575232400225730ustar00rootroot00000000000000## Code of Conduct This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact opensource-codeofconduct@amazon.com with any additional questions or comments. aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/CONTRIBUTING.md000066400000000000000000000067371456575232400222350ustar00rootroot00000000000000# Contributing Guidelines Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional documentation, we greatly value feedback and contributions from our community. Please read through this document before submitting any issues or pull requests to ensure we have all the necessary information to effectively respond to your bug report or contribution. ## Reporting Bugs/Feature Requests We welcome you to use the GitHub issue tracker to report bugs or suggest features. When filing an issue, please check [existing open](https://github.com/awslabs/aws-c-cal/issues), or [recently closed](https://github.com/awslabs/aws-c-cal/issues?utf8=%E2%9C%93&q=is%3Aissue%20is%3Aclosed%20), issues to make sure somebody else hasn't already reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: * A reproducible test case or series of steps * The version of our code being used * Any modifications you've made relevant to the bug * Anything unusual about your environment or deployment ## Contributing via Pull Requests Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: 1. You are working against the latest source on the *main* branch. 2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. 3. You open an issue to discuss any significant work - we would hate for your time to be wasted. To send us a pull request, please: 1. Fork the repository. 2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. 3. Ensure local tests pass. 4. Commit to your fork using clear commit messages. 5. Send us a pull request, answering any default questions in the pull request interface. 6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and [creating a pull request](https://help.github.com/articles/creating-a-pull-request/). ## Finding contributions to work on Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any ['help wanted'](https://github.com/awslabs/aws-c-cal/labels/help%20wanted) issues is a great place to start. ## Code of Conduct This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact opensource-codeofconduct@amazon.com with any additional questions or comments. ## Security issue notifications If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. ## Licensing See the [LICENSE](https://github.com/awslabs/aws-c-cal/blob/main/LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. We may ask you to sign a [Contributor License Agreement (CLA)](http://en.wikipedia.org/wiki/Contributor_License_Agreement) for larger changes. aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/LICENSE000066400000000000000000000236361456575232400210060ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/NOTICE000066400000000000000000000001621456575232400206720ustar00rootroot00000000000000AWS C Cal Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: Apache-2.0. aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/README.md000066400000000000000000000061211456575232400212460ustar00rootroot00000000000000## AWS C Cal AWS Crypto Abstraction Layer: Cross-Platform, C99 wrapper for cryptography primitives. ## License This library is licensed under the Apache 2.0 License. ## Supported Platforms * Windows (Vista and Later) * Apple * Unix (via OpenSSL compatible libcrypto) ## Build Instructions CMake 3.0+ is required to build. `` must be an absolute path in the following instructions. #### Linux-Only Dependencies If you are building on Linux, there are several options for crypto libraries. Preferred choice is aws-lc, that can be build as follows. ``` git clone git@github.com:awslabs/aws-lc.git cmake -S aws-lc -B aws-lc/build -DCMAKE_INSTALL_PREFIX= cmake --build aws-lc/build --target install ``` Alternatively, OpenSSL versions 1.0.2 or 1.1.1 or BoringSSL at commit 9939e14 (other commits are not tested and not guaranteed to work) can be used. To build against OpenSSL or BoringSSL specify -DUSE_OPENSSL=ON. Typical OpenSSL flags can be used to help project locate artifacts (-DLibCrypto_INCLUDE_DIR and -DLibCrypto_STATIC_LIBRARY) #### Building aws-c-cal and Remaining Dependencies ``` git clone git@github.com:awslabs/aws-c-common.git cmake -S aws-c-common -B aws-c-common/build -DCMAKE_INSTALL_PREFIX= cmake --build aws-c-common/build --target install git clone git@github.com:awslabs/aws-c-cal.git cmake -S aws-c-cal -B aws-c-cal/build -DCMAKE_INSTALL_PREFIX= -DCMAKE_PREFIX_PATH= cmake --build aws-c-cal/build --target install ``` ## Currently provided algorithms ### Hashes #### MD5 ##### Streaming ```` struct aws_hash *hash = aws_md5_new(allocator); aws_hash_update(hash, &your_buffer); aws_hash_finalize(hash, &output_buffer, 0); aws_hash_destroy(hash); ```` ##### One-Shot ```` aws_md5_compute(allocator, &your_buffer, &output_buffer, 0); ```` #### SHA256 ##### Streaming ```` struct aws_hash *hash = aws_sha256_new(allocator); aws_hash_update(hash, &your_buffer); aws_hash_finalize(hash, &output_buffer, 0); aws_hash_destroy(hash); ```` ##### One-Shot ```` aws_sha256_compute(allocator, &your_buffer, &output_buffer, 0); ```` ### HMAC #### SHA256 HMAC ##### Streaming ```` struct aws_hmac *hmac = aws_sha256_hmac_new(allocator, &secret_buf); aws_hmac_update(hmac, &your_buffer); aws_hmac_finalize(hmac, &output_buffer, 0); aws_hmac_destroy(hmac); ```` ##### One-Shot ```` aws_sha256_hmac_compute(allocator, &secret_buf, &your_buffer, &output_buffer, 0); ```` ## FAQ ### I want more algorithms, what do I do? Great! So do we! At a minimum, file an issue letting us know. If you want to file a Pull Request, we'd be happy to review and merge it when it's ready. ### Who should consume this package directly? Are you writing C directly? Then you should. Are you using any other programming language? This functionality will be exposed via that language specific crt packages. ### I found a security vulnerability in this package. What do I do? Due to the fact that this package is specifically performing cryptographic operations, please don't file a public issue. Instead, email aws-sdk-common-runtime@amazon.com, and we'll work with you directly. aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/bin/000077500000000000000000000000001456575232400205375ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/bin/produce_x_platform_fuzz_corpus/000077500000000000000000000000001456575232400271045ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/bin/produce_x_platform_fuzz_corpus/CMakeLists.txt000066400000000000000000000016661456575232400316550ustar00rootroot00000000000000 project(produce_x_platform_fuzz_corpus C) list(APPEND CMAKE_MODULE_PATH "${CMAKE_INSTALL_PREFIX}/lib/cmake") file(GLOB PROFILE_SRC "*.c" ) set(PROFILE_PROJECT_NAME produce_x_platform_fuzz_corpus) add_executable(${PROFILE_PROJECT_NAME} ${PROFILE_SRC}) aws_set_common_properties(${PROFILE_PROJECT_NAME}) target_include_directories(${PROFILE_PROJECT_NAME} PUBLIC $ $) target_link_libraries(${PROFILE_PROJECT_NAME} PRIVATE aws-c-cal) if (BUILD_SHARED_LIBS AND NOT WIN32) message(INFO " produce_x_platform_fuzz_corpus will be built with shared libs, but you may need to set LD_LIBRARY_PATH=${CMAKE_INSTALL_PREFIX}/lib to run the application") endif() install(TARGETS ${PROFILE_PROJECT_NAME} EXPORT ${PROFILE_PROJECT_NAME}-targets COMPONENT Runtime RUNTIME DESTINATION bin COMPONENT Runtime)aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/bin/produce_x_platform_fuzz_corpus/main.c000066400000000000000000000161731456575232400302040ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include struct produce_corpus_ctx { struct aws_allocator *allocator; const char *root_path; }; static struct aws_cli_option s_long_options[] = { {"output-path", AWS_CLI_OPTIONS_REQUIRED_ARGUMENT, NULL, 'o'}, {"help", AWS_CLI_OPTIONS_NO_ARGUMENT, NULL, 'h'}, /* Per getopt(3) the last element of the array has to be filled with all zeros */ {NULL, AWS_CLI_OPTIONS_NO_ARGUMENT, NULL, 0}, }; static void s_usage(int exit_code) { fprintf(stderr, "usage: produce_x_platform_fuzz_corpus [options]\n"); fprintf(stderr, "\n Options:\n\n"); fprintf( stderr, " --output-path DIRECTORY: path to output corpus to, default is the current working directory.\n"); fprintf(stderr, " -h, --help\n"); fprintf(stderr, " Display this message and quit.\n"); exit(exit_code); } static void s_parse_options(int argc, char **argv, struct produce_corpus_ctx *ctx) { while (true) { int option_index = 0; int c = aws_cli_getopt_long(argc, argv, "o:h", s_long_options, &option_index); if (c == -1) { break; } switch (c) { case 0: /* getopt_long() returns 0 if an option.flag is non-null */ break; case 'o': ctx->root_path = aws_cli_optarg; break; case 'h': s_usage(0); break; default: fprintf(stderr, "Unknown option\n"); s_usage(1); } } } /** * Runs thousands of ECDSA signatures, and dumps them out to a file. This assumes the same public key and * message to sign scheme is used by the verifying program. */ int main(int argc, char *argv[]) { struct aws_allocator *allocator = aws_default_allocator(); aws_cal_library_init(allocator); struct produce_corpus_ctx ctx = { .allocator = allocator, }; s_parse_options(argc, argv, &ctx); struct aws_byte_buf output_path; aws_byte_buf_init(&output_path, allocator, 1024); struct aws_byte_cursor sub_dir_cur; if (ctx.root_path) { struct aws_byte_cursor root_path = aws_byte_cursor_from_c_str(ctx.root_path); aws_byte_buf_append_dynamic(&output_path, &root_path); if (root_path.ptr[root_path.len - 1] != AWS_PATH_DELIM) { aws_byte_buf_append_byte_dynamic(&output_path, (uint8_t)AWS_PATH_DELIM); } } #ifdef _WIN32 sub_dir_cur = aws_byte_cursor_from_c_str("windows\\"); #elif __APPLE__ sub_dir_cur = aws_byte_cursor_from_c_str("darwin/"); #else sub_dir_cur = aws_byte_cursor_from_c_str("unix/"); #endif aws_byte_buf_append_dynamic(&output_path, &sub_dir_cur); struct aws_string *directory = aws_string_new_from_buf(allocator, &output_path); aws_directory_create(directory); aws_string_destroy(directory); struct aws_byte_cursor file_name = aws_byte_cursor_from_c_str("p256_sig_corpus.txt"); aws_byte_buf_append_dynamic(&output_path, &file_name); struct aws_string *path = aws_string_new_from_buf(allocator, &output_path); struct aws_string *mode = aws_string_new_from_c_str(allocator, "w"); FILE *output_file = aws_fopen_safe(path, mode); if (!output_file) { fprintf( stderr, "Error %s, while opening file to: %s\n", aws_error_debug_str(aws_last_error()), aws_string_c_str(path)); exit(-1); } aws_string_destroy(mode); aws_string_destroy(path); aws_byte_buf_clean_up(&output_path); /* use pre-built private/pub key pairs, we'll fuzz via the input. */ uint8_t d[] = { 0x51, 0x9b, 0x42, 0x3d, 0x71, 0x5f, 0x8b, 0x58, 0x1f, 0x4f, 0xa8, 0xee, 0x59, 0xf4, 0x77, 0x1a, 0x5b, 0x44, 0xc8, 0x13, 0x0b, 0x4e, 0x3e, 0xac, 0xca, 0x54, 0xa5, 0x6d, 0xda, 0x72, 0xb4, 0x64, }; struct aws_byte_cursor private_key = aws_byte_cursor_from_array(d, sizeof(d)); uint8_t x[] = { 0x1c, 0xcb, 0xe9, 0x1c, 0x07, 0x5f, 0xc7, 0xf4, 0xf0, 0x33, 0xbf, 0xa2, 0x48, 0xdb, 0x8f, 0xcc, 0xd3, 0x56, 0x5d, 0xe9, 0x4b, 0xbf, 0xb1, 0x2f, 0x3c, 0x59, 0xff, 0x46, 0xc2, 0x71, 0xbf, 0x83, }; uint8_t y[] = { 0xce, 0x40, 0x14, 0xc6, 0x88, 0x11, 0xf9, 0xa2, 0x1a, 0x1f, 0xdb, 0x2c, 0x0e, 0x61, 0x13, 0xe0, 0x6d, 0xb7, 0xca, 0x93, 0xb7, 0x40, 0x4e, 0x78, 0xdc, 0x7c, 0xcd, 0x5c, 0xa8, 0x9a, 0x4c, 0xa9, }; struct aws_byte_cursor pub_x = aws_byte_cursor_from_array(x, sizeof(x)); struct aws_byte_cursor pub_y = aws_byte_cursor_from_array(y, sizeof(y)); struct aws_ecc_key_pair *signing_key = aws_ecc_key_pair_new_from_private_key(allocator, AWS_CAL_ECDSA_P256, &private_key); struct aws_ecc_key_pair *verifying_key = aws_ecc_key_pair_new_from_public_key(allocator, AWS_CAL_ECDSA_P256, &pub_x, &pub_y); struct aws_byte_buf raw_buf; aws_byte_buf_init(&raw_buf, allocator, 1024); size_t max_iterations = 10000; size_t count = 0; struct aws_byte_cursor to_append = aws_byte_cursor_from_c_str("a"); struct aws_byte_buf to_sign; aws_byte_buf_init(&to_sign, allocator, AWS_SHA256_LEN); struct aws_byte_buf signature_output; aws_byte_buf_init(&signature_output, allocator, aws_ecc_key_pair_signature_length(signing_key)); struct aws_byte_buf hex_buf; aws_byte_buf_init(&hex_buf, allocator, 1024); for (; count < max_iterations; ++count) { struct aws_byte_cursor hash_input = aws_byte_cursor_from_buf(&raw_buf); aws_sha256_compute(allocator, &hash_input, &to_sign, 0); struct aws_byte_cursor signing_cur = aws_byte_cursor_from_buf(&to_sign); int signing_val = aws_ecc_key_pair_sign_message(signing_key, &signing_cur, &signature_output); (void)signing_val; struct aws_byte_cursor signature_cur = aws_byte_cursor_from_buf(&signature_output); int verify_val = aws_ecc_key_pair_verify_signature(verifying_key, &signing_cur, &signature_cur); aws_hex_encode(&signature_cur, &hex_buf); struct aws_byte_cursor hex_encoded_cur = aws_byte_cursor_from_buf(&hex_buf); if (verify_val != AWS_OP_SUCCESS) { fprintf( stderr, "Signature: \"" PRInSTR "\" was produced but could not be verified\n", AWS_BYTE_CURSOR_PRI(hex_encoded_cur)); } fprintf(output_file, PRInSTR "\n", AWS_BYTE_CURSOR_PRI(hex_encoded_cur)); aws_byte_buf_append_dynamic(&raw_buf, &to_append); aws_byte_buf_reset(&hex_buf, true); aws_byte_buf_reset(&to_sign, true); aws_byte_buf_reset(&signature_output, true); } aws_byte_buf_clean_up(&hex_buf); aws_byte_buf_clean_up(&signature_output); aws_byte_buf_clean_up(&raw_buf); aws_ecc_key_pair_release(verifying_key); aws_ecc_key_pair_release(signing_key); fclose(output_file); aws_cal_library_clean_up(); return 0; } aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/bin/run_x_platform_fuzz_corpus/000077500000000000000000000000001456575232400262475ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/bin/run_x_platform_fuzz_corpus/CMakeLists.txt000066400000000000000000000016521456575232400310130ustar00rootroot00000000000000 project(run_x_platform_fuzz_corpus C) list(APPEND CMAKE_MODULE_PATH "${CMAKE_INSTALL_PREFIX}/lib/cmake") file(GLOB PROFILE_SRC "*.c" ) set(PROFILE_PROJECT_NAME run_x_platform_fuzz_corpus) add_executable(${PROFILE_PROJECT_NAME} ${PROFILE_SRC}) aws_set_common_properties(${PROFILE_PROJECT_NAME}) target_include_directories(${PROFILE_PROJECT_NAME} PUBLIC $ $) target_link_libraries(${PROFILE_PROJECT_NAME} PRIVATE aws-c-cal) if (BUILD_SHARED_LIBS AND NOT WIN32) message(INFO " run_x_platform_fuzz_corpus will be built with shared libs, but you may need to set LD_LIBRARY_PATH=${CMAKE_INSTALL_PREFIX}/lib to run the application") endif() install(TARGETS ${PROFILE_PROJECT_NAME} EXPORT ${PROFILE_PROJECT_NAME}-targets COMPONENT Runtime RUNTIME DESTINATION bin COMPONENT Runtime)aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/bin/run_x_platform_fuzz_corpus/main.c000066400000000000000000000227151456575232400273460ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include struct run_corpus_ctx { struct aws_allocator *allocator; const char *root_path; }; static struct aws_cli_option s_long_options[] = { {"corpus-path", AWS_CLI_OPTIONS_REQUIRED_ARGUMENT, NULL, 'o'}, {"help", AWS_CLI_OPTIONS_NO_ARGUMENT, NULL, 'h'}, /* Per getopt(3) the last element of the array has to be filled with all zeros */ {NULL, AWS_CLI_OPTIONS_NO_ARGUMENT, NULL, 0}, }; static void s_usage(int exit_code) { fprintf(stderr, "usage: run_x_platform_fuzz_corpus [options]\n"); fprintf(stderr, "\n Options:\n\n"); fprintf( stderr, " --corpus-path DIRECTORY: path to scan for corpus files default is the current working directory.\n"); fprintf(stderr, " -h, --help\n"); fprintf(stderr, " Display this message and quit.\n"); exit(exit_code); } static void s_parse_options(int argc, char **argv, struct run_corpus_ctx *ctx) { while (true) { int option_index = 0; int c = aws_cli_getopt_long(argc, argv, "o:h", s_long_options, &option_index); if (c == -1) { break; } switch (c) { case 0: /* getopt_long() returns 0 if an option.flag is non-null */ break; case 'o': ctx->root_path = aws_cli_optarg; break; case 'h': s_usage(0); break; default: fprintf(stderr, "Unknown option\n"); s_usage(1); } } } /** * Attempts to load a corpus directory. If it's successful, it loads each platform's ECDSA corpus, and makes sure * it can actually verify the signatures in it provided the same key and message to sign are used as those used * to produce the signature. */ int main(int argc, char *argv[]) { struct aws_allocator *allocator = aws_default_allocator(); aws_cal_library_init(allocator); struct run_corpus_ctx ctx = { .allocator = allocator, }; s_parse_options(argc, argv, &ctx); uint8_t x[] = { 0x1c, 0xcb, 0xe9, 0x1c, 0x07, 0x5f, 0xc7, 0xf4, 0xf0, 0x33, 0xbf, 0xa2, 0x48, 0xdb, 0x8f, 0xcc, 0xd3, 0x56, 0x5d, 0xe9, 0x4b, 0xbf, 0xb1, 0x2f, 0x3c, 0x59, 0xff, 0x46, 0xc2, 0x71, 0xbf, 0x83, }; uint8_t y[] = { 0xce, 0x40, 0x14, 0xc6, 0x88, 0x11, 0xf9, 0xa2, 0x1a, 0x1f, 0xdb, 0x2c, 0x0e, 0x61, 0x13, 0xe0, 0x6d, 0xb7, 0xca, 0x93, 0xb7, 0x40, 0x4e, 0x78, 0xdc, 0x7c, 0xcd, 0x5c, 0xa8, 0x9a, 0x4c, 0xa9, }; struct aws_byte_cursor pub_x = aws_byte_cursor_from_array(x, sizeof(x)); struct aws_byte_cursor pub_y = aws_byte_cursor_from_array(y, sizeof(y)); struct aws_ecc_key_pair *verifying_key = aws_ecc_key_pair_new_from_public_key(allocator, AWS_CAL_ECDSA_P256, &pub_x, &pub_y); struct aws_byte_buf scan_path; aws_byte_buf_init(&scan_path, allocator, 1024); if (ctx.root_path) { struct aws_byte_cursor root_path = aws_byte_cursor_from_c_str(ctx.root_path); aws_byte_buf_append_dynamic(&scan_path, &root_path); /* if (root_path.ptr[root_path.len - 1] != AWS_PATH_DELIM) { aws_byte_buf_append_byte_dynamic(&scan_path, (uint8_t)AWS_PATH_DELIM); }*/ } struct aws_string *scan_path_str = aws_string_new_from_buf(allocator, &scan_path); struct aws_directory_iterator *dir_iter = aws_directory_entry_iterator_new(allocator, scan_path_str); if (!dir_iter) { fprintf(stderr, "Unable to load fuzz corpus from %s\n", aws_string_c_str(scan_path_str)); exit(-1); } struct aws_byte_cursor corpus_file_name = aws_byte_cursor_from_c_str("p256_sig_corpus.txt"); size_t corpus_runs = 0; const struct aws_directory_entry *entry = aws_directory_entry_iterator_get_value(dir_iter); while (entry) { struct aws_string *corpus_file = NULL; if (entry->file_type & AWS_FILE_TYPE_DIRECTORY) { struct aws_string *potential_corpus_path = aws_string_new_from_cursor(allocator, &entry->path); struct aws_directory_iterator *potential_corpus_dir = aws_directory_entry_iterator_new(allocator, potential_corpus_path); if (potential_corpus_dir) { const struct aws_directory_entry *corpus_file_candidate = aws_directory_entry_iterator_get_value(potential_corpus_dir); while (corpus_file_candidate) { struct aws_byte_cursor find_unused; if (aws_byte_cursor_find_exact( &corpus_file_candidate->relative_path, &corpus_file_name, &find_unused) == AWS_OP_SUCCESS) { corpus_file = aws_string_new_from_cursor(allocator, &corpus_file_candidate->path); break; } if (aws_directory_entry_iterator_next(potential_corpus_dir) != AWS_OP_SUCCESS) { break; } corpus_file_candidate = aws_directory_entry_iterator_get_value(potential_corpus_dir); } aws_directory_entry_iterator_destroy(potential_corpus_dir); } aws_string_destroy(potential_corpus_path); } if (corpus_file) { corpus_runs++; fprintf(stdout, "Running corpus file found at %s:\n\n", aws_string_c_str(corpus_file)); struct aws_string *mode = aws_string_new_from_c_str(allocator, "r"); FILE *corpus_input_file = aws_fopen_safe(corpus_file, mode); if (!corpus_input_file) { fprintf(stderr, "Unable to open file at %s\n", aws_string_c_str(corpus_file)); exit(-1); } struct aws_byte_buf hex_decoded_buf; aws_byte_buf_init(&hex_decoded_buf, allocator, 1024); struct aws_byte_cursor to_append = aws_byte_cursor_from_c_str("a"); struct aws_byte_buf signed_value; aws_byte_buf_init(&signed_value, allocator, AWS_SHA256_LEN); struct aws_byte_buf to_hash; aws_byte_buf_init(&to_hash, allocator, 1024); char line_buf[1024]; AWS_ZERO_ARRAY(line_buf); size_t signatures_processed = 0; size_t signatures_failed = 0; while (fgets(line_buf, 1024, corpus_input_file)) { /* -1 to strip off the newline delimiter */ struct aws_byte_cursor line_cur = aws_byte_cursor_from_c_str(line_buf); line_cur.len -= 1; if (aws_hex_decode(&line_cur, &hex_decoded_buf) != AWS_OP_SUCCESS) { fprintf( stderr, "Invalid line in file detected. Could not hex decode.\n Line is " PRInSTR "\n", AWS_BYTE_CURSOR_PRI(line_cur)); exit(-1); } struct aws_byte_cursor to_hash_cur = aws_byte_cursor_from_buf(&to_hash); aws_sha256_compute(allocator, &to_hash_cur, &signed_value, 0); struct aws_byte_cursor signed_value_cur = aws_byte_cursor_from_buf(&signed_value); struct aws_byte_cursor signature_cur = aws_byte_cursor_from_buf(&hex_decoded_buf); if (aws_ecc_key_pair_verify_signature(verifying_key, &signed_value_cur, &signature_cur)) { struct aws_byte_buf hex_encoded_sha; aws_byte_buf_init(&hex_encoded_sha, allocator, 1024); aws_hex_encode(&signed_value_cur, &hex_encoded_sha); struct aws_byte_cursor failed_sha = aws_byte_cursor_from_buf(&hex_encoded_sha); fprintf( stderr, "Failed to validate signature\n signature: " PRInSTR "\n message_signed: " PRInSTR "\n\n", AWS_BYTE_CURSOR_PRI(line_cur), AWS_BYTE_CURSOR_PRI(failed_sha)); signatures_failed++; aws_byte_buf_clean_up(&hex_encoded_sha); } aws_byte_buf_reset(&hex_decoded_buf, true); aws_byte_buf_reset(&signed_value, true); aws_byte_buf_append_dynamic(&to_hash, &to_append); AWS_ZERO_ARRAY(line_buf); signatures_processed++; } fprintf( stdout, "Corpus %d verification complete with %d failures out of %d signatures processed\n\n", (int)corpus_runs, (int)signatures_failed, (int)signatures_processed); aws_byte_buf_clean_up(&hex_decoded_buf); aws_byte_buf_clean_up(&to_hash); aws_byte_buf_clean_up(&signed_value); fclose(corpus_input_file); aws_string_destroy(mode); } aws_string_destroy(corpus_file); if (aws_directory_entry_iterator_next(dir_iter)) { break; } entry = aws_directory_entry_iterator_get_value(dir_iter); } aws_directory_entry_iterator_destroy(dir_iter); aws_string_destroy(scan_path_str); aws_byte_buf_clean_up(&scan_path); aws_ecc_key_pair_release(verifying_key); aws_cal_library_clean_up(); return 0; } aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/bin/sha256_profile/000077500000000000000000000000001456575232400232675ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/bin/sha256_profile/CMakeLists.txt000066400000000000000000000016061456575232400260320ustar00rootroot00000000000000 project(sha256_profile C) list(APPEND CMAKE_MODULE_PATH "${CMAKE_INSTALL_PREFIX}/lib/cmake") file(GLOB PROFILE_SRC "*.c" ) set(PROFILE_PROJECT_NAME sha256_profile) add_executable(${PROFILE_PROJECT_NAME} ${PROFILE_SRC}) aws_set_common_properties(${PROFILE_PROJECT_NAME}) target_include_directories(${PROFILE_PROJECT_NAME} PUBLIC $ $) target_link_libraries(${PROFILE_PROJECT_NAME} PRIVATE aws-c-cal) if (BUILD_SHARED_LIBS AND NOT WIN32) message(INFO " sha256_profile will be built with shared libs, but you may need to set LD_LIBRARY_PATH=${CMAKE_INSTALL_PREFIX}/lib to run the application") endif() install(TARGETS ${PROFILE_PROJECT_NAME} EXPORT ${PROFILE_PROJECT_NAME}-targets COMPONENT Runtime RUNTIME DESTINATION bin COMPONENT Runtime)aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/bin/sha256_profile/main.c000066400000000000000000000131631456575232400243630ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include static void s_profile_streaming_hash_at_chunk_size( struct aws_allocator *allocator, struct aws_byte_cursor to_hash, size_t chunk_size, size_t alignment) { struct aws_hash *hash_impl = aws_sha256_new(allocator); AWS_FATAL_ASSERT(hash_impl); struct aws_byte_buf output_buf; AWS_FATAL_ASSERT( !aws_byte_buf_init(&output_buf, allocator, AWS_SHA256_LEN) && "allocation of output buffer failed!"); struct aws_byte_cursor to_hash_seeked = to_hash; uint64_t start = 0; AWS_FATAL_ASSERT(!aws_high_res_clock_get_ticks(&start) && "clock get ticks failed."); if (alignment) { size_t alignment_miss = (uintptr_t)to_hash_seeked.ptr % alignment; struct aws_byte_cursor unaligned_chunk = aws_byte_cursor_advance(&to_hash_seeked, alignment_miss); AWS_FATAL_ASSERT(!aws_hash_update(hash_impl, &unaligned_chunk) && "hash compute of unaligned chunk failed"); } while (to_hash_seeked.len) { size_t remaining = chunk_size > to_hash_seeked.len ? to_hash_seeked.len : chunk_size; struct aws_byte_cursor chunk_to_process = aws_byte_cursor_advance(&to_hash_seeked, remaining); AWS_FATAL_ASSERT(!aws_hash_update(hash_impl, &chunk_to_process) && "hash compute of chunk failed"); } AWS_FATAL_ASSERT(!aws_hash_finalize(hash_impl, &output_buf, 0) && "hash finalize failed"); uint64_t end = 0; AWS_FATAL_ASSERT(!aws_high_res_clock_get_ticks(&end) && "clock get ticks failed"); fprintf(stdout, "SHA256 streaming computation took %" PRIu64 "ns\n", end - start); aws_byte_buf_clean_up(&output_buf); aws_hash_destroy(hash_impl); } static void s_profile_oneshot_hash(struct aws_allocator *allocator, struct aws_byte_cursor to_hash) { struct aws_byte_buf output_buf; AWS_FATAL_ASSERT( !aws_byte_buf_init(&output_buf, allocator, AWS_SHA256_LEN) && "allocation of output buffer failed!"); uint64_t start = 0; AWS_FATAL_ASSERT(!aws_high_res_clock_get_ticks(&start) && "clock get ticks failed."); AWS_FATAL_ASSERT(!aws_sha256_compute(allocator, &to_hash, &output_buf, 0) && "Hash computation failed"); uint64_t end = 0; AWS_FATAL_ASSERT(!aws_high_res_clock_get_ticks(&end) && "clock get ticks failed"); fprintf(stdout, "SHA256 oneshot computation took %" PRIu64 "ns\n", end - start); aws_byte_buf_clean_up(&output_buf); } static void s_run_profiles(struct aws_allocator *allocator, size_t to_hash_size, const char *profile_name) { fprintf(stdout, "********************* SHA256 Profile %s ************************************\n\n", profile_name); struct aws_byte_buf to_hash; AWS_FATAL_ASSERT(!aws_byte_buf_init(&to_hash, allocator, to_hash_size) && "failed to allocate buffer for hashing"); AWS_FATAL_ASSERT(!aws_device_random_buffer(&to_hash) && "reading random data failed"); struct aws_byte_cursor to_hash_cur = aws_byte_cursor_from_buf(&to_hash); fprintf(stdout, "********************* Chunked/Alignment Runs *********************************\n\n"); fprintf(stdout, "****** 128 byte chunks ******\n\n"); fprintf(stdout, "8-byte alignment:\n"); s_profile_streaming_hash_at_chunk_size(allocator, to_hash_cur, 128, 8); fprintf(stdout, "16-byte alignment:\n"); s_profile_streaming_hash_at_chunk_size(allocator, to_hash_cur, 128, 16); fprintf(stdout, "64-byte alignment:\n"); s_profile_streaming_hash_at_chunk_size(allocator, to_hash_cur, 128, 64); fprintf(stdout, "128-byte alignment:\n"); s_profile_streaming_hash_at_chunk_size(allocator, to_hash_cur, 128, 128); fprintf(stdout, "\n****** 256 byte chunks ******\n\n"); fprintf(stdout, "8-byte alignment:\n"); s_profile_streaming_hash_at_chunk_size(allocator, to_hash_cur, 256, 8); fprintf(stdout, "16-byte alignment:\n"); s_profile_streaming_hash_at_chunk_size(allocator, to_hash_cur, 256, 16); fprintf(stdout, "64-byte alignment:\n"); s_profile_streaming_hash_at_chunk_size(allocator, to_hash_cur, 256, 64); fprintf(stdout, "128-byte alignment:\n"); s_profile_streaming_hash_at_chunk_size(allocator, to_hash_cur, 256, 128); fprintf(stdout, "\n******* 512 byte chunks *****\n\n"); fprintf(stdout, "8-byte alignment:\n"); s_profile_streaming_hash_at_chunk_size(allocator, to_hash_cur, 512, 8); fprintf(stdout, "16-byte alignment:\n"); s_profile_streaming_hash_at_chunk_size(allocator, to_hash_cur, 512, 16); fprintf(stdout, "64-byte alignment:\n"); s_profile_streaming_hash_at_chunk_size(allocator, to_hash_cur, 512, 64); fprintf(stdout, "128-byte alignment:\n"); s_profile_streaming_hash_at_chunk_size(allocator, to_hash_cur, 512, 128); fprintf(stdout, "\n********************** Oneshot Run *******************************************\n\n"); s_profile_oneshot_hash(allocator, to_hash_cur); fprintf(stdout, "\n\n"); aws_byte_buf_clean_up(&to_hash); } int main(void) { struct aws_allocator *allocator = aws_default_allocator(); aws_cal_library_init(allocator); struct aws_hash *hash_impl = aws_sha256_new(allocator); fprintf(stdout, "Starting profile run for Sha256 using implementation %s\n\n", hash_impl->vtable->provider); s_run_profiles(allocator, 1024, "1 KB"); s_run_profiles(allocator, 1024 * 64, "64 KB"); s_run_profiles(allocator, 1024 * 128, "128 KB"); s_run_profiles(allocator, 1024 * 512, "512 KB"); aws_hash_destroy(hash_impl); aws_cal_library_clean_up(); return 0; } aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/builder.json000066400000000000000000000036741456575232400223220ustar00rootroot00000000000000{ "name": "aws-c-cal", "upstream": [ { "name": "aws-c-common" } ], "downstream": [ { "name": "aws-c-io" }, { "name": "aws-c-auth" } ], "targets": { "linux": { "upstream": [ { "name": "aws-lc" } ] }, "android": { "upstream": [ { "name": "aws-lc" } ] }, "openbsd": { "upstream": [ { "name": "aws-lc" } ] } }, "variants": { "openssl": { "hosts": { "ubuntu": { "packages": [ "libssl-dev" ] } }, "targets": { "linux": { "!upstream": [ { "name": "aws-c-common" } ] } } }, "boringssl": { "hosts": { "ubuntu": { "packages": [ "golang-go" ] } }, "targets": { "linux": { "!upstream": [ { "name": "aws-c-common" }, { "name": "boringssl", "commit": "9939e14" } ] } } }, "no-tests": { "!test_steps": [] } }, "test_steps": [ "test", [ "{install_dir}/bin/sha256_profile" ], "{install_dir}/bin/run_x_platform_fuzz_corpus --corpus-path {source_dir}/ecdsa-fuzz-corpus" ] } aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/cmake/000077500000000000000000000000001456575232400210475ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/cmake/aws-c-cal-config.cmake000066400000000000000000000021611456575232400250630ustar00rootroot00000000000000include(CMakeFindDependencyMacro) find_dependency(aws-c-common) if (NOT @BYO_CRYPTO@ AND NOT WIN32 AND NOT APPLE) # if NOT BYO_CRYPTO AND NOT WIN32 AND NOT APPLE if (@USE_OPENSSL@ AND NOT ANDROID) # if USE_OPENSSL AND NOT ANDROID # aws-c-cal has been built with a dependency on OpenSSL::Crypto, # therefore consumers of this library have a dependency on OpenSSL and must have it found find_dependency(OpenSSL REQUIRED) find_dependency(Threads REQUIRED) else() list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR}/modules") find_dependency(crypto) endif() endif() macro(aws_load_targets type) include(${CMAKE_CURRENT_LIST_DIR}/${type}/@PROJECT_NAME@-targets.cmake) endmacro() # try to load the lib follow BUILD_SHARED_LIBS. Fall back if not exist. if (BUILD_SHARED_LIBS) if (EXISTS "${CMAKE_CURRENT_LIST_DIR}/shared") aws_load_targets(shared) else() aws_load_targets(static) endif() else() if (EXISTS "${CMAKE_CURRENT_LIST_DIR}/static") aws_load_targets(static) else() aws_load_targets(shared) endif() endif() aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/cmake/modules/000077500000000000000000000000001456575232400225175ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/cmake/modules/Findcrypto.cmake000066400000000000000000000104221456575232400256410ustar00rootroot00000000000000# - Try to find LibCrypto include dirs and libraries # # Usage of this module as follows: # # find_package(crypto) # # Variables used by this module, they can change the default behaviour and need # to be set before calling find_package: # # Variables defined by this module: # # crypto_FOUND System has libcrypto, include and library dirs found # crypto_INCLUDE_DIR The crypto include directories. # crypto_LIBRARY The crypto library, depending on the value of BUILD_SHARED_LIBS. # crypto_SHARED_LIBRARY The path to libcrypto.so # crypto_STATIC_LIBRARY The path to libcrypto.a # crypto_STATIC_LIBRARY The path to libcrypto.a # the next branch exists purely for cmake compatibility with versions older than 3.15. Please do not remove it before # we baseline on a newer version. It does not like duplicate target declarations. Work around that by checking it isn't # defined first. if (TARGET crypto OR TARGET AWS::crypto) if (TARGET crypto) set(TARGET_NAME "crypto") else() set(TARGET_NAME "AWS::crypto") endif() get_target_property(crypto_INCLUDE_DIR ${TARGET_NAME} INTERFACE_INCLUDE_DIRECTORIES) message(STATUS "aws-c-cal found target: ${TARGET_NAME}") message(STATUS "crypto Include Dir: ${crypto_INCLUDE_DIR}") set(CRYPTO_FOUND true) set(crypto_FOUND true) else() find_path(crypto_INCLUDE_DIR NAMES openssl/crypto.h HINTS ${CMAKE_PREFIX_PATH}/include ${CMAKE_INSTALL_PREFIX}/include ) find_library(crypto_SHARED_LIBRARY NAMES libcrypto.so libcrypto.dylib HINTS ${CMAKE_PREFIX_PATH}/build/crypto ${CMAKE_PREFIX_PATH}/build ${CMAKE_PREFIX_PATH} ${CMAKE_PREFIX_PATH}/lib64 ${CMAKE_PREFIX_PATH}/lib ${CMAKE_INSTALL_PREFIX}/build/crypto ${CMAKE_INSTALL_PREFIX}/build ${CMAKE_INSTALL_PREFIX} ${CMAKE_INSTALL_PREFIX}/lib64 ${CMAKE_INSTALL_PREFIX}/lib ) find_library(crypto_STATIC_LIBRARY NAMES libcrypto.a HINTS ${CMAKE_PREFIX_PATH}/build/crypto ${CMAKE_PREFIX_PATH}/build ${CMAKE_PREFIX_PATH} ${CMAKE_PREFIX_PATH}/lib64 ${CMAKE_PREFIX_PATH}/lib ${CMAKE_INSTALL_PREFIX}/build/crypto ${CMAKE_INSTALL_PREFIX}/build ${CMAKE_INSTALL_PREFIX} ${CMAKE_INSTALL_PREFIX}/lib64 ${CMAKE_INSTALL_PREFIX}/lib ) if (BUILD_SHARED_LIBS OR AWS_USE_CRYPTO_SHARED_LIBS) if (crypto_SHARED_LIBRARY) set(crypto_LIBRARY ${crypto_SHARED_LIBRARY}) else() set(crypto_LIBRARY ${crypto_STATIC_LIBRARY}) endif() else() if (crypto_STATIC_LIBRARY) set(crypto_LIBRARY ${crypto_STATIC_LIBRARY}) else() set(crypto_LIBRARY ${crypto_SHARED_LIBRARY}) endif() endif() include(FindPackageHandleStandardArgs) find_package_handle_standard_args(crypto DEFAULT_MSG crypto_LIBRARY crypto_INCLUDE_DIR ) mark_as_advanced( crypto_ROOT_DIR crypto_INCLUDE_DIR crypto_LIBRARY crypto_SHARED_LIBRARY crypto_STATIC_LIBRARY ) # some versions of cmake have a super esoteric bug around capitalization differences between # find dependency and find package, just avoid that here by checking and # setting both. if(CRYPTO_FOUND OR crypto_FOUND) set(CRYPTO_FOUND true) set(crypto_FOUND true) message(STATUS "LibCrypto Include Dir: ${crypto_INCLUDE_DIR}") message(STATUS "LibCrypto Shared Lib: ${crypto_SHARED_LIBRARY}") message(STATUS "LibCrypto Static Lib: ${crypto_STATIC_LIBRARY}") if (NOT TARGET AWS::crypto AND (EXISTS "${crypto_LIBRARY}") ) set(THREADS_PREFER_PTHREAD_FLAG ON) find_package(Threads REQUIRED) add_library(AWS::crypto UNKNOWN IMPORTED) set_target_properties(AWS::crypto PROPERTIES INTERFACE_INCLUDE_DIRECTORIES "${crypto_INCLUDE_DIR}") set_target_properties(AWS::crypto PROPERTIES IMPORTED_LINK_INTERFACE_LANGUAGES "C" IMPORTED_LOCATION "${crypto_LIBRARY}") add_dependencies(AWS::crypto Threads::Threads) endif() endif() endif() aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/cmake/modules/aws-lc.cmake000066400000000000000000000026161456575232400247140ustar00rootroot00000000000000# This can be used by downstream consumers (most likely CRT libraries) # to build and embed aws-lc as libcrypto into the final library/binary # This will create a crypto lib that is findable via find_package(LibCrypto) # and is compatible with s2n and aws-c-cal file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/aws-lc) execute_process( COMMAND ${CMAKE_COMMAND} -G ${CMAKE_GENERATOR} -DCMAKE_TOOLCHAIN_FILE=${CMAKE_TOOLCHAIN_FILE} -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} -DCMAKE_INSTALL_PREFIX=${CMAKE_INSTALL_PREFIX} -DCMAKE_PREFIX_PATH=${CMAKE_PREFIX_PATH} -DCMAKE_C_FLAGS=${CMAKE_C_FLAGS} -DCMAKE_POSITION_INDEPENDENT_CODE=ON -DBUILD_TESTING=OFF -DBUILD_LIBSSL=OFF -DDISABLE_GO=ON # disables codegen -DDISABLE_PERL=ON # disables codegen -DBUILD_SHARED_LIBS=${BUILD_SHARED_LIBS} ${CMAKE_CURRENT_SOURCE_DIR}/crt/aws-lc WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/aws-lc RESULT_VARIABLE BUILD_AWSLC_EXIT_CODE ) if (NOT ${BUILD_AWSLC_EXIT_CODE} EQUAL 0) message(FATAL_ERROR "Failed to configure aws-lc") endif() execute_process( COMMAND ${CMAKE_COMMAND} --build ${CMAKE_CURRENT_BINARY_DIR}/aws-lc --config ${CMAKE_BUILD_TYPE} --target install RESULT_VARIABLE BUILD_AWSLC_EXIT_CODE ) if (NOT ${BUILD_AWSLC_EXIT_CODE} EQUAL 0) message(FATAL_ERROR "Failed to build aws-lc") endif() list(APPEND CMAKE_PREFIX_PATH "${CMAKE_CURRENT_BINARY_DIR}/aws-lc") aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/ecdsa-fuzz-corpus/000077500000000000000000000000001456575232400233535ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/ecdsa-fuzz-corpus/darwin/000077500000000000000000000000001456575232400246375ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/ecdsa-fuzz-corpus/darwin/p256_sig_corpus.txt000066400000000000000000053506141456575232400303470ustar00rootroot000000000000003045022100fed84a5cb174b87dfe614e6c33751fe4b4525bae34be07547dda457bd4a0e37502203db36831b1a5f86c7e96aa59086109a0fc2feeadeeb617861bb674528febeba9 3044022038d00f05d8926e90aaa001c2365affbcab2a80953cfe1387900f632a224bd57802203767b77edfb10a38c70b21776aa36fcd7166e239c812a0e4e2cfc1371cd154e0 304402203f49634c94cc39468900bc5cbc5ffe88c86b914928076ca117a105f7e353db19022021cee4144ebf9f88368398620ecfb3df694aff2382684d094fdd65564fb0e7b3 3046022100ab41bb352c722624ed29eaaa07baee4bf31a666d098bb5d854531d98a7acdcea022100dbbff16edfd0bb922af2f652f86b0668d733fe887aebae53edc6319d07b76914 304402207087393c1e3f0b60dc60c54a94abae23ebac788cc2daf5bb82ddd3cc3098513f0220066427de4c362740c60ef4b779cad51e87090cbbd9f80e456a00f7811dc208a9 30460221009d07bf2ff2829bb0f2fe85ce3a9193fa39b048acf6d0ae92e9f3ceb8adf837c8022100cecd7698a7f959e718c1867594a1b55a1244d096b7e9f72b9c253782f5c2f376 30450221008f3ddd678906cab4424c758299e87fa1b226f23a72dcaf32820631fb6f56bfd902201be940f7c9e16c7ba704b48cf74164035d2575d9703a77ed6b0199a1097a2df8 30440220606a8ba23025e4896e7134edef987f99f1b8da0af76295f514c24033f15a0eac0220437815f173266900d969b67b7dd937221adfdabad4dd3b2a89aa2f904db1f515 3046022100b9c320a510a1e06abb8e073e76917cf241719d05cd45c58b400c8cbb913fa53402210086fea792c78eee195af6befb4bde7767ecde5598ca7ece4f6348b4cfa39574a2 3044022041977d39485aef959df396e2308fc312d8a3f3330138d8b94c06f6df14bd4b8302201f4f0776fef4f938588fa72a6f529bf99d7a38c3f0becc3b2666edfb527b2644 304402200cb2258b16d94df2f02b2a38f5974bd9961c94deae42caa3bf8a65c180d34a660220572cb98012e2305977859cc212a827426a7dfb7a5a0b96160ec4e460a07e98f2 3046022100b7f71b74a61838f505443869c7b5064a45b8e2494b4438dccfe07b0711046297022100ab0819aa6b2c2848ecf2ad4f11b88a9a877c9a2f7bac217a3450287cd077c5f0 3046022100e9dda288d9c386c62a01dffdf4e59256351fc68baeae36dd05dad7f64c34794a022100ed9110f6e25c452fec9e440908440a38a50352e48471ff9b3e49966d99b0ea0e 304402204f29962310845e902f6136151f45f369a2827bb966ab5d8d62fea4b2892f1a2602203a98d5b67915220fff66c9de6dfd4860683ab08af82ea6e64061aebeab50c840 304502204ccc8ca8303072b869870189f92e5c5687c11ab76bda9e8ac2bc1f66a5c551f6022100a76db08ef66551d863d6bdb292877f885cc240cfc11f78485cd3e2005fa85bdc 30450220087265eeed07a8410bef5962cf99102f8a53183fa959342b849e704559902ff50221009c8b0ed16f5dfd635f353bbd6d5ef6c51fbe92c9cd6c74d8f46eb8547bf75862 30450220061ff65f3814d242c95441691767fded4c7fbdef5d0c79c3a50c11bdbfa69279022100e4e5eceb9412374abd0dc1dbe4ae263f8c4a30052dbe2d2a83a89f5165c15870 3046022100e01f3b38eb5b03c240b67b440f75d697cee54a378db6039c798a88413cae5260022100d4c6c93ffcc5239fe8926dd6e10b7bbbf09af71ad83a6a16954f7ca320f6ab85 3045022100a6d69ef5cad2a81189d70208a8c1e5c1ff06d4b2994d98061a4532a7524960e9022060e8bf09e71cd4829e2b326357afa0f57e337ada90c90aeb1c25c6965d0e0883 3045022100ae4cec07d1bf9b9ad3669e5765e5850ea55b666922385a820012d0231822746302203e39a89fb40ec79b97835a1f2f3dc1346142f9c88ac6b9492b1d92a0897ca591 3045022100e462ca2ae31690c4381197762717160c45eb1aface1b57221658bdb632fedd6d0220033b3939952b4f7cf61883b3184012f1e4c5e570a9f42a4f89a6393516410680 304502207fb0b780d982e62969230fd445e4bf2e93b18d47f8d91a619af354efd50fbc79022100c31fdf4a5d135f7895fdc0c9e116900f786356c476ae322cf8011f18fcac2eb9 3045022100a516a7e922b5f6c3a2f6c1ce4f90745f6169a9c773b966670c8458dcd436836a022042f97ba8f9eb7ad757b546f708c7ec9a504c84478e6c0309cd537f1b859efdca 30450220195cced7567f8808a075e132d539236651119e75adcb2ff89b7c65bd98c4b096022100f4b7026daeaebfd2ccb1f8469aaabba288d80f4561600d150e12c72aef1da7d2 3045022100fbd7f4d77ecba78c3fe6bdf7d430689393dc57c5903149ae910735136f649d7c02205839b1c79416e3c9a07e1f53c0a59333a70f0ef986c9081dc64d5c54182bbdaa 304502204c820fda57842dbeb5fe7ba40dab159c41ef19db2f13a9b602c806326ce2436f022100dea8c999f500ffed824474db6a9569686bac0e52bd92734cf3ff84b5535e221c 3044022055a2fed929d142358d88ae3fc258a6c3870e1d149c0dcf21177c36444f84801d0220590c3caa0f4489fa3ea5aae584d97877fb7a0068736cd88d1b9ef35b60d220f9 3045022100f0790a44ef83886f0da839e887c6a5697661f18bb3c10e4062857baa0a00b8af02203b1fabb98107358785996631ba7024aadc8c88a41d3828c558e17392e2384a3d 3046022100b93ec2dd478fd9aaf4cbf52c2a1f070564b4c434dc9350032ae3302efad22b12022100ab33898776c3ad5e31013c6c43b73fc4dd153e1eb6fc8b66abc140a21ab701f4 30440220550049c319ba0d5e7e0a4cf54a3ed11834c8bd69a205ceee594aff809183c49402205ead88487cc6baac657a800bba741f5cac77d2236ba5437ceb06a4da0e84772d 3046022100f96b4dc3ca912ce3c0c400fd943ca110ddb71e2f2932fbc5c12fdaa7008578c30221009b50631ea4e04b4acca6ee04176c6c55216fe3af046f8a8ca6ff0ba328461c5b 3045022100f2d77c74ab2d9d0bce49f70ccd396290788b3f10647041331bd991dcec3a540702202492c9c5ef6f0f57255ba9bc50d2b68bc0fb46676db82aad6b81129eb7580cba 3045022037d0a67728e2f5197f3721d7c1b616e4583d4e1ce483de3f044198b095eaac250221008d45beda5c928f69ec8267c1525db5f2902797c73885fd3ab6ed518074e4ab83 3046022100c3ce091e1043400ed466330dc2f2b8ed6d66dba132c29f1feb1b3dd627112f2d022100b77a1b7cc20edd783792ae1a174c71029d641bdda8ab12ceeb02461ccd89c421 3045022100d7566943105bc467d6c0cbf279993016f44b22d8660dbc43f7857236db683c65022076ae617d7efe72422b512d8e5b74c6f1225958fefc26ffe5fbb8757651b210d2 304602210089433d90eed8a73a3fbb3750b8a649c94dd6fde5f47470378e84ac47f2f5573a022100e7cbd0ec566e5b6550493de482019e6f6c7009a5fdf27493821033ceec13bc65 3046022100f13e57c96e91a75cd0aa074a0604c4cecb67a1e621b13eefd2199b9289c76bd1022100c0459d8832ff2e32db11fd14a38013a6c05d8f5b44a672c7ed9d113de45cfee2 3046022100d0b9339e7e7ce9bacad66f93a3e49aab47fc3a25cf8b5cf57fa00f1920ef487a022100d3f36891aee1cd63a02bc3a05f3a97ee3b23f86281e593ee5972790f139672f9 3045022100b4034ff81a4416720defd09631c8c3545676ad6f7b0a55433e8e4890ad0042fa0220567af0f12d46acbc4bc404053553317a3afe9ae608aabf9211b31b62ede06dde 304502203d098521818224c3e5a12770ae256e1d8a59eb6acf2646853401d5ee0449da5002210096561e74bb8e99d69d009a7edb1324879cd92fd0b2665615b79a42e802ee7ea4 3046022100dd58d9a6cdf650378f350dafbba510ec8570396d4ed96c6c4a5128300ba86f0c022100d5ffc0753d739d5c65bbbc91c6ccb909106eb9ac00b0861b74a42d9c4fff6e4f 30450221009d8a0738e82a753b7e1647899cd19d7af73b5269b64ae61f09e83f9ef5e9f61c022079941b74ab784a54b4d38576c6fd7d09072d2e908c8bed3a920dad69815846e5 304502204ff4c20445bae3b8bf5da06f592a1f16880f3993ddf70d3794ac7c6940257a75022100e24c0eaa903f37dcf809f3df94117227d17b701c10c8a2f2260de5346c048b6c 3045022100949206ed7fbaad7cbf8dd47ce76400527d06929d39f43852cc86c7784c78884602204c8723477f427a6ea69a3a3f81e6feabdf1c6d6585d57752c3d86b644841848d 3045022100cd4c40f856fd5f6865bea1a1ee6e7816e20c0762f3948132e06c7a4b5cda60b502201f8561abcec46e7573ffa729cb783048a2616648d61c3c6ef720ab9f737be6bd 3044022071e3915765b25ac9f0e615d80bf63516729fe363f8a8cad8d7973f27377b2e3d0220662da042a513c2b17fb95344aa10a193e40c9352d4b581601bd1b9bc75c4ba99 3046022100c4c7d7d21264b485cc4e95a1020eb48355e6aa8f356dc2ecd16a09f91f28ac480221008b22cc599a0d86d00c0ebb6940a3acd5873c91f6ac7e5a95c003be27727d02f8 304502200f529ce86bb7d3349c031515e8ee9214da3387524c68e940e11df13836b7a5f9022100abb3512213e7f33b8cff9bb9d57dad2c18175810a027954a64d8100747dd32f4 30460221009ae19f03d828307c1634e1130eaf05adf9a595199e259b23d2f562617c3c2ffc02210095ceadd66aa51287fa28d87b3165fa6985255ec090d2900675efb5dc7e2c2d24 304402200f394156480afdca761c0d834f569b4169ca90c3a9fba10fed27561282d238cc022012da3c60374857d819afc1e5d6cc790ddd14bab3d01441850f6d4c1d4ca316e4 30460221009bdd4ce38790c8041123e4b71415d81a018f6aea8bd780e22be8f198a00f9065022100d151073ad73c926e175094fe91f87a46da75cb0f347848676597a4ae8ca0e700 3046022100f8448476771803611e3bf6309d069248d42903665a3692054f10e41667f02ee6022100ed2b2dd78c0df62b6d223e323bde34245dab66baa2806ff12b0544cab1dfb792 3045022100c12b8286d618857aede606b017d720ed1ac871f369c45dfae2ef7ea177a7326102204a000567bee4955b36407579baaf5daaa8db7ce81892e1be96a94d61d1fb8138 304502202065ae2e967cc00a4fc9876c60c7feecf3b2af1a5330016ea3021f32a11bf678022100d17803a79a91eb70023da1f3b02b0d81ea6250a8a2012f50c2963d30c67fe659 3045022100ce56bcce575e52852f592a6000b4b76f79f4f0c4d8fd11a05b26fcefe2b03a29022018349f96041579cbfde83d0a0582fafb6e976acb56b74092845b6e0ebd131f92 3045022100b80db5bb3ec02d747cd7b29d8430d7e2072c41e04c41951fb20c8b476c09335602203c897836c51794eb439a75f240f171b344347043eadcf1b7f229420a75a6d117 3044022002f905c02dd86a7cb096d0d8592a444de3f6b140da408134ef8c4a6bea78bd0a02207d677df7952ebb8b507762f06d8a7c22b7f85f8f954c7860809125398e8c72f4 304502201b10f68e457143d1abc60e65bd837494c5e233963a60988c737584761056086d022100fd46100921fe76ce53fef51c05331c113ac01e84b67e8ba746188abfa6693e4d 3046022100ad67bcb78fb1af98b50d775166fbdf448040a0d152036e9617f3f3486df81371022100c8e7ae8d90b15c50541758943d308df921aff6cce511afd44c692e52cf6019c1 30450221009ff06c2bc0562f33339087db713e9fefe25744ac1704cc3351b41b2792b2530402202d1dfa9c0ce1dbcfe42e7c69ad8fe60520a6f91c0104371866ab87956e286caa 30450220175f3f7fe5ad68687e5c97a92b26e87e9fdd5a34c6d398fe39d376423246cacc022100b3c54c32ee4790743b8f133b3051a4558a27e1edb9ae3dbe6329b1e95c23c606 3046022100a1668bbdf6675924ac0eb20f3274ae4847963e278efbf1a18a6303df137f0519022100c34fcb21c9b90b2ef972e39e05e46d48d9fbce89437c97673a459d8d32bd7a58 304502204e032dbc245ed6f0ee2f1d33a5904bf0fb61e81c6791cceb0767511427b46e8a022100cfe3578ce9b5a50489c6913385aa17e63f5ff6606bb31029c8ed68898f7cf799 30450221009ee06be84c564c80feb38e6ae2ef6119025c3594c5b6e97e9479d80e2cba7d1902207df3d17e37e4fe2578a432b2f5ff9097dd4a161020bd109ba66ea1f66cb44b72 3046022100d36adc340acbc7d2aab1de5ffaf7741cb5e1b8f214d5aecfc840a5a63ec1244a022100ba720d4b0ef84f75d1a0c190a4c488b3e858f5d4c06c3defb1585d729d257a8c 30460221009819df211047c219f72e00c6cf4b49ebf6f1a9c3b4ec749425b01c80ab61524c022100a00f41a4ebc7e17aca7690ec035f88f56340c799c513700526c0db8ae4cf5486 3045022100b240be485e9478386e8bdc1e0f28dd7e7fd3e91b4410edaea0fa452a734b18f602207ce260f66bb455b00f5806b52a977b07cd8a4327e25d1da61901816becf92e91 30460221008a7d880d4bd74cc701ca3d86257202e609e75b1f61196095eb2f378b3a78b824022100a49df35071eafbc6fd7cfc75e9fd440440ce5959e61ddbdd4c85d55693e04035 304402206984d405598e2d595e2412e219edecc55ec257d8d006bc0bbad12763e286ca4a02203b214163b87cfaaae34cc577267f62dbaff5a51196ef3b38383dfcfd12371a70 304502207f391acae572c5095e673a19ea8bc4ef4febb2a8a0f2cd0a9c3acfad9010361a022100a74762bdd3749b030bf3600409b9cb3fe7698c07f9f92251029a6bb31c31c233 3046022100af14afb46322e27fd2e8463e11f06e097960bce5d3236538c01d9daebd2ee8fe022100e106d22263c3fe37bc19b7b576cec67622fd7d030ee208ca879c3371e8cec84e 3046022100fdb41b2cfb68179fa55d1c44403e5a738f1301ca2bdf25496d09aec8f7460c1e022100848958f686f450d4dfeaef303560374ae06faf6f77ba36522a97975b5a458c7c 3045022100f8a0be7cef4108e164162c3f6d60ee414791ee55274406dfde3c5b7228c7174302202310e75e3354212d51ad89ec2a8230b3ce6affb14998d14a460db1d84d0715d4 3045022100e0f583482b7aa90e2b979ca3033998789d3c154ca303c92fc860164746360f81022031c6b5865920b69a9b81499896d26135ca25e4bae1134ac98b48bb908a9eed45 304502207196111573a7b2cfaa9b95b925a1e5a6ac2e907a60c90e2ac0d6d64bef8d7a71022100e3b7d0408d88613a8097a8b28f9761d9eb131b3472007e8257f5179155270e93 3045022100b9fc0c7bd29381b5b825df99f7a0ff3724cde010a0db937ee0d7f70bdf504dde022036405b9416ca2f716dcd4cc4d362ac8893469648cbcccf123f0e507a50bf944e 3045022100fe100228dad158f4171d75097def8b4195adf8f18d19c4e17ae4c29f7661cd550220639cee60c52440a43da9462f3a955cbe743a41434be15fe87cd939d34ef5ff87 3045022100fbaabc9152b9901f674f525496bc36a1cbddf664b2d8c47abd09aed2a59cb7960220194e641d07d63bbd9f67ed8d30342861ff55be4bde5d1da56024bbdbb8e2e5f6 304402203ec51c70ced262f22e82ed2143c4b3d6d79432f16bff71dbccfa4f97088d44c2022017ad8b5925db71eeb28c8f9466f502100f810f76e79b1b2dff4f58d76a5b2104 3046022100e319b2cb9e9b6d1ffbd33ac59d5c605dfe1984ebd497518db8a1d9901c7cc422022100d2f7ab4429cbe567f4f7c1fa623d6ec7ac7dec9c2bada803b39360cfe80d2e7c 30450220025e26eaf56e9335e52b0f96d159dfc4eaa4c5501cfc73b91349378437af5c1a022100c86ad6a478704fdb920e837de0fd285924dedec3c1bf3855033f25be4acc0f3e 304402204f4ccd8277692c5e02ed3a30e03a3f217293ac6ed377a366feba4beb908919ed022031293f0adbb69bf26d604612af8345b59166165c433193367591279abc6d0541 3045022100d792769935bdf225b07d143879669019f97fca87d7a64324fd7f3429c9991889022021d73be2c356d805dcbcec02a8ce0b8127892300d24ce1932d69b5e04ffc060c 30450220755c6a40a5ae8a071d8b5ac9954bc1e2979178190d422d753d47be9914a246e0022100ac022d9ac84bb9d0e8733503dc30bcebc7441f28f29246e4dbd14e7d57712c4f 304402202faca724f71044049e57ddcb64c353eeb4d56779db26105a02e6113920e9666b0220431c8a41ef288e193a4a877058667eb83a3eebfbc983abbe214a9b540caf87c9 30440220332a45e9f304132860eaed88fe15fa1a9bf492fd92ef89aa2bd4f16045b70df202204cabb7d21ed4fc2dbff80ebd75a4c9dde98aeddbaaeb515fecb50a84b60294a6 3045022100fdd2561738aefca4b8bfd32997bffddbe7c9ca36b9f498f56270ff522f2183b8022063d94c89cd92bf6e83d9e190d60c56c97f8d060568e2c7d1d6aebe35970ebcb8 30450220349e9e216b8064836a3eb64d7f5c5a7e7cb018c22f7ef85bbc25b1cba28f66d1022100c0d61e3fedafc4e40d842e4db45bc05b6edde072c642e2d9923f1a6b5c1780b6 3045022015887dad4ce9ed2aaf2ee2ed093db8d04771964849c18c860ebc9a9667519b82022100d84b3f0052fbd4cd4bfcb85462f6474e99c4efd38765d9559c48256548ed253d 30450221009b9d94419ef688008147dedba520b966a1c650ec61d95790e23d767d675e261f022023b2f665652ffc7846cfb7587aa3cd43a298cd7dfbd026d62fc6d570500e7bcc 3045022100de8ee9bf604dfd18aa1722cb41d475be2201e7c3e612e3919a38b395f72d73b702200bae1b2a719e63d4e9356abb3bcaa5fbe55d9581a11c4f77e158a86d44087032 304502200225430b1166d69790d73fb6b1c55f6cf238023e56962cda7fdec7fedf899b57022100b1f4aaaa489420ca2db4a2f9facdd2cb1a8308e82af4947b76a5490688a6199c 304502202f758fb63a23d43473a128ee2996920c3fce7bddb44ade336875b1271d4f8878022100836cece34a1b229754687854c5463a50139267be06ee8396b91389bb673251be 30450220369936ab23cee132e6d4326742b7a68944549d087dfbc0cce8f478b2cba4b48802210089773f0d1a7bed5bc1804f97d8612b0e34c30a76dc735bd431e76919f5ca0687 304502205ec7a88be3e88a32e9b646067ca3df9bdeacf66f3207a3aab1df488ebc0316cb0221008ffe0e83d1fb476b5e1d40c44c36af78462a35f73d3c9605bfdcc3ed7a4bc80e 3045022100b405dd7f3f532f6f350e46cccfad87d18129c7659657328570fe4964fff83d980220724a61e3463410fb441a4bf62c87271a086a1db0513f230adcf74fc37198e76b 3045022038549a7e6d47b8651e6f3f179a8e2be3993b3dc713c0b02f34602e4e386d86ff022100ba017c553cbc789c4f06bae85ae821ac38a86c38169d084a1df3250fac9c9a81 304402206b6eafef8c64393c8290fc1a0c6c55c82fad5a7a69d0433fac9ae425a6ff5137022079906f5218104517c5267bfaf518f24c5b0b4e02d72c5526549ad05498d71ad5 304502210092cc4c10908475c02136b38e58aa19d1bc3a17a30e709d93c3bb9db904f4ecf2022000ee67d9f623677e6a0d6f9789131a3d37366cb865f8539e7779ddece8e19ec0 304502203ff1b79205c943fc50f8a182922aa992618074fb2f3c6d2832b4185fcf243a1e022100f81d3965eb55a74c6ca7cc2d8a8fad22285d6e25d3ad20378d4da95a3f31e26c 3046022100e4ebdc5ad0e80fecd3ad4efbdff8ef45aff99332c4b29a808651ab1fff8fd768022100cd8c14b5af3dd50936da5bfd1d6d2c1f96fd4b5be69144922b9f519a36039f84 304502205a0108ba23e919713cbe370e28cd14f258d404bd4e945a6870a026630de00905022100bf8a5c027f998af8b7177f5372882f376df09a15bb941e3780d082277c367366 3046022100d1c2001326b452d07f8a0f0074f0f1d046779048f51037d45e31b951e6288ba9022100b249d3167b12acfa33cd8ea35f94fa9fdb3dfcd219d06293ab4fb98fbfeca572 3045022100d87c7f271be33d4f1aa47a5a4ded36248b27eaed734dc697a35fe4e9ea4fcd1e022028f8a346a5cab3af8b8843886ee2100dddafd6b9ae689a06a23435e1088352df 3045022051a77e0c64acfe8429d81a399d107110e5c4a9425a54910438813c4399abcc11022100dd7b39946eda89fca1cb9cd05d85d67365113b5b9a23e7ee5ed4a454f2d03961 3045022100de9e6554be5ab967cbc233cf51287786250bfe46e45cf19fbe074db043002f7102207deb42d40fd3a4ee68ffc06395dae61a8807611136b8067c9308b4f42ff87146 3045022072ac8c320817d8c0f98e47980c00131487e51906bf191b0e8a2844562a14f71d022100d0f2d2abfb3d2a9a358de412d33470fa183ac3f998c9ba913f8df36bb0f18b31 3046022100fcded095161caff5019b07088673a35128b2be2e4ec913ec39326e549065afad022100e115e59ccc89ee212ba002f095c85e2330bf9b4d364c516c7e5fb38addfbd84c 304502202f4f261bbf43d70cadc4477bc4ece5ff45001d12afec5217342c41dd8c3361ec022100a0539a17f3cc157a1177e444cff3d53d1e5ffb8a804cb357095ba2a552141e18 3046022100b143ceb21eff9fef69300c2a60dfd6e2c2ba8e750b20ce980bee0d42ca68f518022100bdad8da34f8201d817bce907c5a3d64bb2f018bde3e975300237d271308f2099 30440220795e0ce18169bd238be15e08438a117cbde24f786dc1198e652ae4a1715d665f022079a42f612b7b0acd204ccb6b2e8457f242cf1cd615ccbd6e9277d9264cacc2ca 3046022100c507d94fa8a9a5cbf112f2823f115f6c281848760890db373397744c26e5fc3b022100c584add2a13ddc9996997697fbd7294ec3a0a8eb88fbf1fabd8dae8e5f67b2d2 304502203d116602dfb63a32e67416274af250a070db864164767b65e30e2ea2638ed1e3022100ca7a58ca3e731a78ea6bed656f87a88f8bc3bc7ae3bcd7377b36a6699d358184 3045022100907885f099c85c9ef75323455ed562f491bee7691e2bcb3544c938b32ad08660022022123bcf1d72eaaa1ddbefc77780016dbabd0f00025ef8d6893b92931450dcb7 3045022100fb1233f84faa6bc08723bae790aa1be6c6fabf5c96ad1ad489c39bf58d8cd16b02205da3415dab6e4ff5e792fc56e121ac99c4f3bc4c8c1df5f0bf24dcaa7076ecb4 304502204564b530798dc9827b746a1eab9d834221437e5da341a872f9476eb9accfa85d022100a187d769eda733514c75bf9b41c61e86e67b62dcc15dc28028bbfc69a0f733a5 304402203e1297b785d7c6f2b072dd9ccc3666d15615c5234e64999b266d09c722eaf72402201a494da7d534beb1efe2c9aa1bbcb1c3e433d43751e82750398f72c1459da786 3045022100c0000bfc56b68ba43396b4db87bf62bb355bdb3d254b591253e9aa5eff9d698f0220588c5f8226892be656dec5ebce3e2ba5b2acaf323706dfe711476742681ba053 3045022100f70c23fcba9f8629e3f95ded09de0592207c7aea59c7054613f95398f5d54edc0220687406e2775a8b74ac26975d5e4c3fb5c831e8838e372cd2054761e81234e7ee 3045022100bbecb201cb0d270683f48e1340e42067827de898ddf24ad14512fce31864c7120220404d702e2afecba29eb55a50a2e781bc7755e6bac10d614e516db8b6e9f571f7 304402206162a097dff88fa774a7984ae0be149c770ac972e2472133a001a821afe9add502203a792dfcb35ccf64e4ba4ef2b2a7e688a3fad0e03c39eb0fe1051c7bfb8e3c40 304502200a0a659936cc94c4f7b535fc12b5ef6b97ac76977d545f7c5df18baecfdb2ce3022100b3183abfc3f47b520350748b2b2d74e87b00d29a62c12689ceb251adb43be350 3045022062a71b7354b6c7706e9e03c71ea5ee4c3c2c70d0ad56505b073240c3600e9cf80221009c785cdfaf8576bd1529c5c0d7b578dec69109fb4548d7dc611de72d725ccec5 3046022100a1cd4b5e61575544a98897044d225d57ba60381471d1f0da5cf44432d1b7eafe022100fe94338dabc1e17fea99cc618e32d6260752dbcb1eced2d8da2a4a8ad1aba356 304502206cec8b59bbbe9e57a88367dc69debbcc44b6817909968240ada1da84e6b6c44c022100aecb3b78388e115278f1b3aa05ee7747a0f76b3448acc926d6a392102f460256 30450220252ec5526f4f77a591dc3c952122e2c92656a73652bd4c145724d7e2eb10576c0221009e338bcaff50ece7cea4167103b764e36269cced84b32182442eb1c7a6d15375 304602210081fb8e20c35f237dde34abb5bb1ef4c634cc36a9b34a0eafd3734d2848efd447022100c20a07bf4f0b7c936d68fc4a58a65f783eab11756ae0d3c301e824e881dfe1ce 3044022073cd1c9f5fbba8ec7e4e9f8df7496ad4b068130cd4141fc046fa6a9921f6a8be02205701e9237c852772a1a03b3f8250346385485a5167af65d755133182241fac81 3044022019400a996f773000141247076d70a0bf7dd927599114b402ef675485aab6aacb022060f4a7c1e5dd0a34522e0fa85a4eab2369c5054bfd1b571f6757d216fc084e2f 3046022100996c828af8ab75be1b8fc02b5706fc73af90803c8b98d43d1fb1934bfe337925022100f06a5bbd2d9d8af21c348defa98f1eadecbfcd9431f0ed47a8ee717ec9e7b3bb 3045022100cf41053c9d85ffdfbf6369b853ee63d4a940ca75e427a4d55330ac0fa7bbc6b702202c6041ba61c001683b9f3abaf371c2fd7dfc330be1b07df934c3ac7330c358bf 30440220031f829780509c627c1b043751b21664bf89fa50eefe86c913e89eed257e11ae022017711405a3b3d4ecfa8fea7de7baeec438c6dc41a39cff5af2b0cac662b2cbc7 30460221009f54c48b3e318e46284f931d5dbff21a0016355056956c70e4e60d8a0b2d3f9e022100ec0269764930c88f0a4d4486f9bea86bc896021b8d87434f84c165e4fe18147c 3045022100bdd2694410b7dd4828aee46d3ea5dc074ece6787714d9a2a1272b09d0d52d08802201a66e312474e3f7f0b97f7e2b82e6beb83a702e4c900437cd676f6a8a0a33a06 3046022100d2e023830b5646918b544f5a2c9c1ea82012bf6a4cd22b384bff7e1c94c3907902210093a4eb9a31eb85a43e486042e2ed233c1daf1e0a3c6ce5743cc914bebed6fee6 3044022049d048db442dc09b8d94d4a63ab733ecc846c2b045c76420e543f91d8326292d02200acd69ff15922ac38d539f2c6a7f5de4e1eb25ac7cbee6e35de613b939ffeebb 304402201759d244bbf68b4c8f3ee4ac49b3a36e190f7c06f85ebf8eeee579766644093c02205a8f8cd95d3c9883e8282a0d7f688864e8ca1aae6677e114639bd046c8f83a1c 304502201abc827151892077f06dde56a552f0efb2709961bd3a50dbd7f524d88655728102210085adea88f22c9c4fc01523765c9630d5deab88d9b86c85950f072045ee800bf1 304402207c4b4508e8a73b942bdfd117a41c2955f2c363056e1f987ad45654cf15f2be4e022025d31e222969eb6ed1820c6ebd88ba98031b9a9a51bcf58d16dcb139d3c21fdf 304402203c4815ed16147ad2eb84fb2f4ca3ac33b12d26a02223231d99750de4dd9eee6e022011b0eab92fd59dbf4dc9f1ea79e77dc67493503c60a4b46c5b0297f0b814617d 3045022100f6a12561e1d39fe11047b26f27112446eb2ea2edc84a1bafd9c01468e0c40f1402201df0e007b1b15e79f5397f97758275744739e5392a79792d6bbfe70ce4289350 3045022100ad93cd0816ac1183d00b1ad408ecfc2d91cbb10069c0cb784ecd0289997a217e02201cc35182a09da95dcb41f048a7cf8f8f8eb197bdb4152a1720cf5268c017ced9 304402202f194d0ac4d70b008ae0aa6545990fa32cf801e28050b72a93c905b1a25e0878022017589c3185150757e916bf2f1fc75de026ce834db43117aaaa4bb436f1958e5b 30450220138070425b0b51969a8dc64430bc95de0d11e6b572fc3fcaa1440d01ecbae47c022100edd072ed0aa7dcb55caa077b2bb7f1878379372c0a1b77d54b02aeff53d737b2 304502204c17e1272ee398e0c50fbdbc0238b0d65c91e8f8eba1fecd68456f88b0f9fadd022100fc403f91961b8ae5da5dbc0fa385a9f837f264b0ea2cb1faac874e9e36e0bcfd 304502204dceb0defc67a23573cd45c54205163de19ee52f0adbe6afda7067eee61e94ae022100cb1915a7b9517b0108317ce754301ba64cdbf9bf1e6bee30cbc71b8100335b73 3046022100d340f1f187d8ccf1c94e02a9950ef558a4ca4c90e46f8b6727fcb4ee979ca4ec022100c4a68cce8725d639be3ce063a59da125b6c73d779663ea4f99b4fc1d3ec4af7e 3046022100b5ecdabb8fcc05a6393df3c78f5189da31443c84639c3094874e9a02732b205a022100f89a0275096bc95c760840aef0bacd39fd071dd6be3502d3ebe58c18f12e3bc4 30450221009bf1617f209c3e9b218acee71ec83681e765e09322e8412dc27add360c867e3702206a01b9955a6b62d68c46340f37ee85e090d9ab367902d930f56b010d8d53d659 3046022100d0f9b35fae14c907023f051d4ef25e92b873f0f1b1fae1f4713e999aeae3ca63022100a3caccbd7b02de052036fdaee7f12a7e5649963518d040a8425ea00ce14c402e 3046022100809e5fc0e5b358d82da2bea4ab75f2241e90534456e66db3268a0ec9f8cd924b022100be1b8794cc326cde7777e5848b695bf31b9f6589108741cd41bee01961735042 3045022055a5fd874be9deff7014febe708b1fe2ebb56c7bbe25cbc3e7c40ea323293284022100f1805e2695969badfb5998b59284b5e50455dedaba0cacecd9e9a005dc7644f5 30450220175f3478aa471842823371baa003c5a88a7b388ee20380dc0aa1bfc64aed1b6f022100e5530785ab4413fcb62702434aaaceb01e04e9c66b8ee3f8f0607c042c2f85cd 304502204a158779f0929fa160f27da895be8930ededb1ee554a65e01a1df531f0cf1a53022100a5fc7a94468317260b753b07c2099071058c0f0a4218dc714c9154ee413cfb3a 30460221008b37c0838e8707a9d4689681a0686e6914573fbcc33acb1521bd4c6ff45f4d2102210090d38fc595791a49807f3343557f7ebcfa1b1fbfadcb762c1ca209390a1c7943 3045022100e989948403340ae5132e1533d258b9bd07581ac1a1c075f8ca7a7b79481ca8f302200e0c0335e1472e49d7f2f2548a61d4950d30605266ca38cc04abbcffe964ae0c 304502203077c75d7710685478f46e2932c8d28b0e3cce262a1240c78cf1b1cb731b84e50221008733c0fb064bacfa5e927599b6eceabafa6670ab87dee221a54824199abfda6a 304502200e38cfb0b62f60426e51df4ba6be54f3637402e7bd72faac12c2f8bb77f87b7c022100cd068ac9d0add3891b548d4ae6188911ba802ffd3a3ef90ef982d73bb437eda2 30460221009c396b228e22d01e913ae6c82fb72a294cb3f8df99f21f17e0deb159e58b930b02210097fe28847085ffcddcfc49e2bb804659c82e52703340e45e7c9f64429f6907e0 3046022100e45453ae593ef472adb64ea50b36ff2823a334221162efc93d7aee00ad920f07022100f7cd8d26913e7b7d332e91fa60dd2e4b2367209fde2022d12d3b7e92515a11bb 304502202d9090d62d5a3888632a3129012c232907b8d215d2b198f259fec7ec3ba8fb140221008a62db1d3a5c4cb8731a592c7b5231e61202f51459a820ff2af44bfe6fb67314 304402203e253d2373668ebef6eb5fddc805cc59db22bd5a2cab9e6cec2e48e4d50ea58502203b627f1e35810e4cbe644f1e42a4340174996a517093b5dbf6a47ef11580cbcf 3045022100f06910d5f1d456bd72fec5610731b4894f895a6a188cf487e583bb31c8103c5002204440a23a815ab30ca9ff743b815a12afc842f8125e4b7259344f018cab01d528 3045022078c85cd53d7f9f2efac55aedf780e60be059bab8118498a775962fd9714f130c022100cedae4b4e752b017c72efdfec12b08f2687358e6f7516decb744ef212b1202f6 3046022100980842a9c38d40497c8b0a27e454c5e44a053b4019d57344aedffa4592ccd611022100cd493774b0b630981dc0059901b3ffcfaf88cb9c3cf0f70ca4c8899422d92f0a 304402204b5cab3cb8f65b0f9dc26c14f454a0c74fc32d6e975437f4cb2a933d0b88f67c02204c37aa1ed13dd3537e1e182a6c82803b71ed41556630a4dc37125bab546a6839 3045022100a53f255cfb3b8323e8eb79100414b9a6086f538a575bf89b43976f59ea7729c402202204651fa2abf2f982a9d8bd8287d96d4eb850aa48161dad834afa5594b5c76a 3045022100cdbfe939f3aab9120abfc7c2f3b749e2fd1fbec918d340c18cc4da016a2ae49d022074e98b36813d766f93d62d4e56e7a573a35b5533c85c5c02bbe101f7d03b562a 304402206a6e640f05ee81bc2e36fc291f7dbaed2be605e8a45acaed8eb38f21f576edbb022069781746758f83a17a2f93e56e78e5bbdbe86a9e131846755daec90e5f0aee13 304402207e63a76c07bff7e27011facf1ce124269d5e73c1c5abb038a8152098e032320502203fbc1ca1a2cab09054f548cc8c0fd60f4db4103ac33e841e4fea5ef7cb4a2f23 30460221009ae07d5387a6569306b38eab869ffa0782051d30ca8a5a7f72424c119e31814e022100d09d7dd0e0598ed21c34a946c74aa189e97560f426f2bbe3c8a02a088ca1ede0 304402204cc07d413cd61ed70fc90b60a7dc2a8e8094c786ceecb13f8b4b9294eed18ec80220140e91a308ad982e7edd026b7bcfdcd2040305187ad6cda26807ff13435cba33 3043021f2a848ad0115c36fe928e921a2d580615a79229b06556e5bb14ec8711efa84c02202f392977752520a57e0c6fa9faaab4cc5041eebfce7d5893d3414bfb3958fd32 304402202bafc658c2e3487f6626c8a38a56cc44b66f9b6342f1b21d9f015c400880e810022050a22e70748baeab83086536f5c60c41d14d35395cd6766f533b6910709ddc02 304602210097768884df0a17f29f7e11e0740881859a4dd791ffc002b42808c030d7250cc20221009032374e4ec8b5d00119ee798aa67c425bebe0ed8857416e8d4aea19636a3fe5 3046022100c9b1d32aa32aceb77a2d65c5821e61d7e014c65eac49028cb492977796961af3022100f45ecdcce85817bb4a0fa80071fe6106302a2c1c9e38f76de3fdebb8cc0d1a5f 3045022072b414fc7664f711ddccf34e6cef02aefde9be1cf5ee635af728d84d5ac20beb022100b99ead59fb23893b509d6d8eb3fe10db94f8acd2b1eae7c4b5956d8ad73022cc 304502200a480501322673b04afcac911a282ee54cc27751779d457d11f40dafc119d93e022100e069c3983ab6710df60e678a3863defc54dfc45718bd16806cade6bf0a85f0e2 3045022100d1fd48becb964c3b829c2a59f3446ef34f55bd6586908a30d5eddf3e34657cdd0220706233eafabc4bc9ea4d0570770b917dce55eafb288cea8987c7370bec6e7a73 3045022100d29cb9bb8ff74f5ff5b4f9c53614a104d8c31c3f0a346bacb58b73e8c2a4200602206df7393f85c14f52395d561a13e9bf4a6431397fbd4e8e80b2b9f37ef7dbc7c4 3046022100ebacfee296361d900902b888da0e89e0753b2a7163d4c9d8b9eb1a75ce3879fe022100e04f755a022c5d327ffe57f742a31ac413554f81e02f003319c93df154877d59 30450220415885756c5bbb953688c25b334d93f8cb6c884d6ead537c116cd236aae139fa022100b04b6e910f12efc31ce57f79f21e03548a0f2fccfdad95243ea9cccfd5e807b6 30450220544046531b9be7ab3c468aa878fc2d9018c2b92b3173d8ae8aece4318cf74e4b022100f2595fec6c3ee74219003bb99b142ab75da7462d2b8ce2efe864fb1a6970c506 304502206e17274ebec55abc7dc8245d2a2c4db006cfb46ab2e6853a24fa88138603e595022100bd3fc420993c91a39ed9766fb58b123a26e1f226fd609e132d1bdec564bfcda4 3045022054b837c53b4cb13e6e3700369d79a371baf5ac4c89fc3645a7b1edefdf3e057d022100ebf5b0997e26d4c3e6bc49f99ce530fc2d452f037360b6cbc2a60a240eacff7e 3046022100d6158cfea49a196ffb8322b9f5fe40441ee94c6550f9f3d88bf463ef483dd79a022100f267353e91979c79d7368ce59c2cb156f85e57d72ad97d28c03aca835c525959 304402204846d3ec1eba1ce7560c2a233e59c578c8df9287136a392263f61af7fc0a3bf30220025f5541b9e7a5929c3cfbd78944a7783fc38c54fa774b9930335e7eb66a00ef 3045022022a5933dc98cf109c1cd11b360e04169e8c9659be102d32fe41a750ad3ecdc5a022100ad764720eee4981106d04e9a3f28d94a1ca92c1635fe82cb03d2e3323c63c838 304502203f60b2c88e1dfcccd9fac166aa2033340efe4b5eb8576dff6186db4c1a45091b022100e6a8af29008bfb5167c7303cb3d656912e4dd29d5f624d334a3ba21ff8b7540f 3045022100f9688801f1067086535ee83121a2918a481f420540e8c865a39a643c50b1a08f022031d27ea1d43def649a0932b2729978784ebd018539abe8a83b96e99de4150032 3045022100ffea7e41199e042e2dc1df5996f9f5eadda9a4090800bda1ff9cf532bc96e8e0022000a784bcbd57c0cc8da4412cd3c2ced6e4e51f813fc835155f326abaeec94cd3 3045022069a86c3d6925e4013b67861087ce04b50c79289084fa9e09a4a07588fd99e8ec022100c9178e346ac4eff3e24a681a51abe57837264afc171e30802f9e4d8445d475e4 3046022100998260b597334ccc02304a6252c4ee14e6f0e527a9356595ddd61df13c664332022100b6cdebcd363ecbb6c60afd27149801d6febef0d0e90f51d10e6598c1a9476c7c 304602210083d39b13bf03f82ba64754e98f730f4a0f66f9cc2bc6f66baddb4933e3974c34022100dc9196b84f1819e8aa9ae459169bcbf4fc4f5a33591b2e58ed06f755e1af512c 3045022025938f0661f399993c36bd74b3bd3f06b2bc1e9568b61af72823fae7f1c0816b022100b06457b1c0160688007d5497e0af047ea09fdf70856ae55c782a6020071a4367 3046022100b59fedb91b5f49aa2b9f21500b88227e77aa95d7f75b6b5ef1bb44f63efd23cd022100d376e6a90ca175c452fa176ff7aa2ef9cbf698465433f1fb65d9866e10e4f139 3046022100a315979963705110d7cbadd0e6c4d9bb4b95ac6c6d6c81e730e85433173030d2022100fb9b2a12255486e09888f2f4331078cebc0f090c39bc49ad9cd00a3746956006 3046022100b25eaf412120360611c2aa94745c10c2ba907faca6ed365a8617d9bac2642eca022100de02887faa944c37e333362b27bae8f2d153647f9af1966153da3d91fe6f9078 3045022100d535782da3cca57e5e14c8d6c0f9fd360a1acdcb54838e9d384f3d442bf3a60802202dce3ffe8507566f9f473635fbbae27da1f0931dfeef75fcc80eccea3e1a58bb 3046022100fe931136e36a166bf2647a113449de73dc2d673676f571c717a1a5038bba85be022100ac8527d14b8d6d651bc89fe77760b2db1772b638279ca72d1baa658460adf6c0 3046022100d8fd67cdf6284ae5bed5bc03692bbdf64b33e47dfac746c8f0605a105c1ab53f022100a683f2cfb184f340872248f9df59b1a7dbb968a05a25f30e2d04397d15239b64 3046022100bf9e390d2da4114f3cfb5b54256c54263b90b89c6adbbd82fade74f2ea201a9f022100d1e220d51de58781cc4db94691e5dd728a6e045c2c86678b5f431fa506b35bb8 3045022014caae43f63542703012790651516f520135d889555f7b666365c1de4a598a00022100f2c817528d9734ffdd49afebb2c192a4d16a13be32a4a61d41ae9ff23508ad50 304402207ef2f848aa442929eae8970a17abe849836755ac834822d0fe9b94a8ab15b94c022067ee3af579a3c39789d96342b702f466c57a11bf7e6d8f8062f784a681522653 30450221009a1a122318bb93a9d0abcee489be292cde764a5e22136d3034800aaedee923980220008cb074b804d241b27a8d9a75a023a63050908de01362f1f072d54c65cc21db 3045022044f28ff6b65d0a4d75723b81fa925182cab440ae015ed7abbe54b942e29ad1ce022100ea0959744e30f59cd35fbe9d83c124b96159d93edfe144a21bd7440fd7550466 3045022100a10fc178143ff09ba0dfe11637eddf9cf75ab16f87197cc65bd83b80a2e95c7002206b7b57113439066e13611c3ceec4619ebac2b55aac0b3d5d9f7be3a784ca8661 3044022029c4f2df8157f8664197f17d74fc3e54437d6ce41d3d2612c7a327ae34c9149802203722abdb9a0d50860301235438914f97486b716638419128cbfce0c0e8ff0928 30440220088c3a518a48e9f88f0f7c8b4b66708ffec572adffe4088b0bc0794b80fa108402203acf6838f6c811ef09230b97f2c523f64ec8d960178ed1801f25802fc76cf9ba 3045022100d3bbe0679b893c12a49805c403d042c5dd40d966d95c0b2218f53b0a841c6857022002124bce904e285a982d4bea8bb5c27441711b84bb9724d2d747c9aa1f5da432 30460221009fb19e4c7b3e85d09592541302e28bdf4e798b19a8ce243f2645d9b3ae29d772022100f072ab62411cd8422a214edff7dd445d40652602785074b247dcf1dd5b3696b4 30460221008aa15f7ad1d42d925ef5e1039c08d7ac1a93a2aca24f6166ad32ffc58e5efd9e022100c698ed9cda77d5d2911ca2fe7099eac89943f1eb1cff3f93920d8cef4fb167c0 304502206863b83a31cf3260f67758b1e62496191a329f2437b691a8aa0b819bf1eaf8a7022100a87d972620cd213cfee482f3a341b4e75fbb11dbed342eacf65a6664f1fe54f8 304402206c781c7cbf899d0ee9777022e7e55f4c2513b0510b703b3ed7388234ce2950e30220426828a88da24142c9724a00134289a6986d3ac74dbf9ea19ad3ba9994ab589c 30450221009d4aeea66fac4ebffa9ca045088ce2bf835f4d6fed748b73700f07c56b740eb9022061481d8806ac92209ac968b378bdad9c82eaced20575feb12244558e8c676eb3 3045022100a222e44ecadcda550c2a65ab7707e0a8b248839a0b20c41b6aef9e51d94885d10220761c7d8403a9d97d8e325cf6bba8917783bdcb85526f33080069cd44e5772847 3046022100940546682eec857afe829ae93216c74dbe1be3b942220b33d225c327af49157c022100a4346086ba90daf16fdc1de744ad98ca538bb6d273c533cdfd421f6734b5a63f 304402200b0c5cc49d891637b07e383bc435fa2959e48e8616aa10ba71c6684fb2b02a3f02203af91e99a6dd76cfd02d1932a92f162315227ae048d401a5c44d4b0ba46a4c22 30450221009526428eb66de185a5cdf1e2c617f87f90c292ae4a9e797602666c95aeea648a0220202c88b4978df2514cc668c3cac283fcd491a0c67b7c160c2100ac50069240b6 304502206c33cef12c3d3958634731bbf13cc78885a866c136e45fa2e81c11cacf105d27022100f390b189c77fe22b2cbb452702e34936188f70e90e4a459effed35e7ca63e607 304502210092f1633a7c4319e7ddaf9a929a15b62917e22108af937e7af4142ab630622a3402201c82d0d0c27a3dc531f30e8332053221a3138b33839f2cc6cf5eb200d0cade73 30460221008942823324823bba0c945f669e8046b8883532d728aa00012c066a4252886bd4022100a97a2ee71bdd1efed6e1b9ba31d99bc021163f150320adac0c980e5faa688c9f 304402200a4d539c2aeab908e08502f5a60f115ebf9f06eff7c7b365e6e660047b6bea0402206cc5cf1865eed4e1eee2a680a7e16b9fc834e36131b25f8866b6bd5561d14fe2 3045022100a5d41f56b22501d6e8341c3ba728557469f3fa5a76c62dc60eae6c6f9a399b1e02202eca96b6a57ee3df9562ae8c1196cba63cfebe81a83ac59e3cf797686992377a 304502203c092a13e8021a96e42ce47cd7d0d78364134db5b6ef72fcd03749149ec3b41f022100a8f3c6c50785771884ffc6480a68c6e525ff94b4b28d684685c37e1b71c258c4 304502206f73cb80057ea4d35e688efb14ac280a4b727cf2ab1edff2a140e96f5c624658022100b573a87d7b1555606053ce0cae70a30ca4429ac4eff83caf56d9715f3092e7aa 30450221009196945bb42304309f88d7923f3b5e1f8113fde64139d10ec65830ae6e7ab92702202843e60913753a202ec684e22611c2ee7dba62a40664de6f92135102afffa18d 3046022100be8127b5068867ce86ec6017fe7d6f229203e5aa0c41cab64b0a62f8a0963753022100e7a46265ae2fc9e7e1257504f24a8dd11c2cba504cce1460b87d914573cbc6db 304502205abbdca63dcb817b7a9fda91893d9219c9a0d3cadc588bf1b438e38aae8673490221009a88770b1290a2332d8fa328febae30d42d1870073a918456794a5accf73f561 3045022021fa499f670767dcff6aed1a05f9f185e3fd0c8bd5c6ac04ff37bd7d1820555f022100d8ee06466df8fb2a6a08e8725eaa5f9b1ac02199411e81ecfa7378c636ba46a8 3046022100ffb61faec9371e67611740374348b9c96c70da294d4792e1462cf8795bc35fbd0221009232fdca1be74916603e34227a7ca071f5675f4ac7815b56425e9802d27fedff 3046022100fda104d8cdb1bdb148f1d30c5acc9fdaffbbc4b518f5f08e4818634bfe76e12a02210094145acac361037287b22851901e8e5771db408d879b5612c27c52fa5b2ec9cd 3044022040d4c48ae417b6e0b86efafc1b655f65bb741cb1f785ab4bad81f22f43c5c46102206c638545f1dda8614a4ae95d6cae6db9aaa256f6321917b8bc353d3a2d169de0 304402204f754aa2722f8194183a46630b5f245e50eb8aa4f5e01cd3e311d298e72990fe0220476f3ade78a2c437d6d3d1546b99cfa2fe0f1bb17cffafd24d09b23288b6bc43 304502201fd4c64ac31c3998b65a319e35a4c26eb256496a42f393ff8f4403a352a09c06022100f6269ff1f2a6842c75da65b29a32fdd0538635027367cafa158c504977ce88d5 30450220797867a04cdfe5077b27e7d0d8de6e05a2e75808258a9b26ed3a1d830f289dcb0221008c63c0f4a64fa740458b2f3b57d4235257ef8498712b1cf762afc18d356d7110 30460221009d3251f70757d28f220be1a8f818fe0f73acb2533e7a52dd8b5e6c121aa86aee022100bc862b7a37c02b4af8c721cf140223ef2ef94b349d14164348cf5c44b31068d3 3044022005e1d9d8d5d7dfbd3cafda7ab61e0ec0357b8dc7332a7df207ef7f4d21125cfe02200d681a3331cca2391f2f859b7fc82d093b4aabc141a6781e9805a1db7ec7c452 3045022100c3f1dd5d3613189b4a779100809a85788b9b54776bcc4942bbcc7610d7c1c707022011cd77912f9446c1c6e70e9ce4bdfa083b9faabf31fd53ac3fa15832407707ef 3046022100d279fe06a77000c40f7fcd2f5614e273273c95a64ec5eed3ed4fc571744e4efe022100c09ea3d08094a672ec7b4942639c384f262020d9f5f6dd2846c28c7db7b64ff5 3045022100e3ed180ebd791b14a977a0bc5ff6f4c03f2751cc6694cf18619f44c421cbe8ce02204c22f368bea3f73df604f851938bb14cd493874e6c8afb72cc9aac73dfdcdf0e 3044022006675390e4d5a6606dc7d42bba4f579f41a80fb977b17a3b36edf0f5fe53295b02200a3231fd8ecfe279eca92bd3f0cf0bd1cfc40a3dac836d7c8124c3929859b885 3044022014e5787070d29e3eef0ef10fa9fc2906fdc3f0b477b1e9625e560949169fb79e02206e9fee11b687add5af9f8d3961e2a2cc01fbb453dd853ab4b94e6df474ed6482 3045022100f18992e54ddc38f217ed35362102d0be6ddd00f7a87e8cc06e1165a9d00487360220023ac1a9e74389eb5a328a8be8a0d7d44e97584b3e568c270b3f35f66b2edd0f 3045022100fc38a0f7b3b7ff8c081bbb8f68c7551340e049db6568b040d66bc100a32dc35602203e34000068bfb234e4f6bcda2c9544d47e751d3f1f48ee28be4f64e8420e0da7 3045022100b6ba0a6bdf40955beb490414a855740c6038cd06025f203b434904d254835cf90220412c6da2270381fb423985cd6d856f002b30d84b204974547b61d8fe2e8854b4 3044022028cda22261ebf2be268a81ee3f8ef59b2f332eed3eae7343be3c9f0195fd539402207b5e09fd3fdd27f2e9e5b3ee9975c0205138161e6f1a7d4715bb7b77ee19bfbc 3046022100cd080c35b6790bcaf766c1f381ba5c1448cba1ca8eed72fdcdafb50837a8f642022100dbf553e5916855de6f3720d122ba84d818eea60a4604c2b41123f61516dbeeed 30460221008ad4647d8456c3ec2ae1e716368c1ae8e7ba49330befbeb73b8b86593a3c311c022100ea5d1feb903be4768b8ef1cfd25ee82989f37bc288978bef42dd922235131512 304502202d68252758b251922f1e56ee6bbad3b43b8887266f6e733f425e5fbe6501e5ea022100ce17293c7c1840966deb279a9784d9d6a0902eb679d3413d2996d2ba592f6c14 3045022100913d08bf1d168a39e0c5408511647d8ef382fed0787ddac02fd091ab9313ead702205d67042ae959015438a13a750aa7dae8c2f47c9b4b6dcbfec224392d5a37a765 3046022100fec5b15fac57f1165a161f75fd750514c81e04cefe08d66667069545ba23e445022100cb2a1261f8fe79aa67fd9bcd9a5601e72632cd9227e1a7c23bd1c354d5412bd6 3046022100dcac8abe78db1fe14930c026f6dee053821e863f5edb823138849abafb380fa0022100c49613141cccc0d8681cdc0e0f2078d1921d48ca363ee04a4306ad2cede30aa6 3045022100bd81db829a64bc26bcfe6f9f40446198c8830863bd07327c78965dd5f8fc74930220405838465a96bcca54d8f473683c5c74e07c76c659ab3ea5b6bd34ee7d5a72b4 3046022100b0e902fad1841eb94867bb1c3b383541ee1f15117c0a68b75da5e985b903df39022100a5d79f78a485b3cfe2e6161bb11f88eac49128a64ee0b0a51f91688049e23780 30460221008f384b4b7b7cc7154ba1e952fd65b225959bf22f3ec9185b1c91db479b02ba9c022100cd1816ac3b3f04ae49512c15b52a0049fb9b53073131fa5c9f2e0d1caacfabac 3044022047882833efe8854a87a185422e6970598efc717edf7b45e964360d3cc5a4237602203d9a70b44b550832fb79547792f0a019b7f1a3b656105cadbf2fb86009933aa8 30440220274d0ef1cd35e6ecbfb3205f9f679970d66445b58a995d197f090d0d6884f28602205967d57a848d76b1aa69d41ee504529cdb1c0170eccf3a60e6c3479a215df7e7 3046022100d3bb7ff656e8c01bb7ca374d9b87827af79fe2790a445204b2fd73daab3fb48d02210089b7a3e9b7092aea9cf308fcad8ec898d14a00c65f6b97554575bf889f2045d1 304502210087abc8dedb4af775c2ad0c7634ea60b6f64279c9527fab23f32c2d199225fc4102202af482b28637a9c5ffba8203d96b3a8bf6659e837478ddac01bb386a14c30ba6 3045022100ed560ebf9e5af39066eae8b01ac94bef0370d6a6ee51aa2ca8999c93ba7673f802204eed14b524a4912130593d0adc19d51eb8c544782a637fedd2942d3091e91308 3046022100f7f75b06e782bdc99d5ee33682ed0d9b1de0c6f9ff50e4a0a1fc37f8a906b50b022100b20f4ea4db321d830dcd28bcebd06d11bdda1148212f5ce4e1b8c15a8033f4c9 3046022100d4479bb6ae1cd1d41c6ef072622d903824fabaf4f892fbbe6218e31a0d509a280221008d20b2881c87d6f3de0ad0f5c918b9912c113153ce02dbc1727f220473368f6a 3046022100f89d5c4a241fb518a1b46c6928f0f848f8941ef8752b5fd9a7587414436ea55e0221009225cfc754fca53e8ef4c0541d6983d17997f5f2bd3cbe11f99862ccd41c573a 3045022100dddb3645b6665f18f1654a76f452aa6b5846c8a5845d68fb986bf49ded0fc91e0220085174b1b07163122ff74be67a5c826f7a25582ca9b31fea6587a400bf7dfc59 304502204fd1f2cef7d660ce264323711a92caad9efd7a86ce4e777492796a6a63e5b63b022100bce117dc499551687c9ecdeec45f1b4265a820cdbfdc6a878208a0976da12ae6 3044022033ec54c19b526d1f7f5fabdea32ae66ad2d8dd323839ee3c5c9d474f1ec7d0b002204db437670ee3c86a478b92747c1b871edebb0fa033708fccb2738a6d4ea018e5 3045022100ae0dd609a2ecf360a3a6d23728706733c637be19566ea3dbb84f5e6180e195ea022034f32a5afb4d0aabeda717ed6bb18d5c821eb5fe65363bac12c18be00fd14f35 30450221008334adc484610190788219776336d1a0f0ceaca3181be6651c0c4463da285f7e02205bd61dd434305e9f4853572a012bb7da35818f3611808d6b5e68f4aca3ba0364 3045022007cf4343c9739d07b7a1c59cb84ed5196604bf3df881b507e3220e9307fdbf80022100bc047e9b2cf106901f157c7b42698ea32d903c2e53d6680887da573687ea3b21 30450220334ac5ad144a32faeacb865eb33aeba91aa8bcaf90d809d2475d9ec7344dd2b5022100ae6b23e4ae4bc15079fc330b3de63f7a2e9d3084f48e3cd3a70cfd41139ac82f 30440220367f4f8d23c2a480b8f9c1eafd752020540198c663626c730bf5cf960366cd33022029aff69833f90f591dff96e9dcb79c7937fda4d1544786781c05c378b13cd400 304402207dc2a08b41c8c31b6b0fa6c4e60e073f26e47a9c189178fe8e2211e66ad6920c02207943f7c1f9d7a629ec39c15d9bfa91e93136d9139613c0ee8db3a2a714953fbe 304502202a42c00274a559b2e24155d3ee2b5050623d1e45586de91280fb87b4a7e4bc71022100c6a163a26ca57df298224d95d2a2d5f95659df76767b1c188a4ad92c5b7c3b98 304402203840cf8aa4d61e773d491876233cf842b534f90f0e7f3a418e356ec1870debc90220743cb5d2b7ed81177ea8f707a6753ee0f13cadd4217c1648f7e888d4ee633301 3046022100fc5c4043b674771a260eac1d16a89bb4f275e2374f164b2af08d72851f540c32022100f949579950596562c5e48f90d48f9109cdabaad2453f63e984e154304aee701a 3045022047689b7e2e9c3d06459b1772e07889c19d2ea4f420262a348102cb0b37f0dbab022100dbc22e6df49f7874a7ee640d366b66c3bb5fcdfaf0ce48566d9ac4b32f5484a7 304502207b393c85b961d489a7babac21f4fda13d3cad731c5a4b62321829594ebb475a5022100f2559b10f9bd59d73021bcebd6d479c95b6d95ac166d7a1e0fd600495d15b135 3044022069c4596026ccaacc1955fb82635027c52eadf88609643007267631d2b8a685f9022029ca5d1777696a4ffab1b60c692d9a5b54f97c330c9a4bca78dc09448c628ac1 3045022100ca274020bae187f262f0799dba58d34631ceb07c6df57e319963fa2a752f3a1502200b54e2c20ce60c69b3baaacd6ce4770677e629f757bd22db4688c645e84b60a3 304402205745b3e46727d914af0527667631ec8cf607bc9b1b8fe598e0d289e2b54fef6802202a0525ecf1667782e582afc2db5210b73351d894ea17296097e7c39d26a59227 30440220357dd8d4b0bdbcdd155066e9390528b9b2b5e0bac42ed7ede4ede49ddd761c9f02200d61a666a45f8b9a3c75124358fd14c9307eb3793124f9df42de34487ed9937e 304402207271770a224539e32ea6ed3ac5b4f262f6b09af6c61f68e232be3aa6f11287c4022005c294da94218791555355d9d42d4b0978a822a177bdcc959fdf4e6512ca30b5 3046022100f45093f627a606e04681d030e4d554f0cdcee6d57584ec91ee07c1b718ba609b022100b8999f61fbcc9fe5e7f6f941880c189b5ab61a1eebd71c2ac0327b7db2b8255c 3046022100af311156819e9672b423eea101fa61369c860e5b227566cbb69d5d80023b70b6022100812551b042c73d6ce40eb95dc0b3669502818fa982d89513b34051bd0a8121a5 304402206f73a1b800fee9d5bcd3580d1e0d2bbcafdedcdc7687224462cd800cb32728070220776da76b4f286a6aa2830ecaa372de1638685d9e810921127529683df11119d5 3045022006bb32959f4d61ee291a7d37babab58293aacb13882436a9853e75fb129c1d12022100d4f9db8c5481643af135b86ce60a02776c787570b3fe3c321e990aac22cc7d9a 3045022100f9178c7c358cdda723db2afb8b4663fe287bcbc1bd916a3e7253b53f5bf4a77302202cce541cbf322bfd6252e5caedb8ae168950f0b4f7643900dd289c1be1f1f532 304602210080880cc3d93158adf77d5005029c5999091e14844e72f4d94aaf9a7e87e77cc7022100adf9c7e5f846fab10334cbb8f6db58868612da7fc49b30cbb413ab0644788a68 3045022100d459a1a99a6c55e225518f9b4f0352a0fda3cd8434eb818e6651f036d53dbe6102204b6f3a05907205659f74bef4194084b89e38a64dbf999e86822afd9bc92793a5 304602210087ac5853d5f5bc899c11ab5fae307bc984ff7ac707d7eab73717cd1c7d84352d0221008fe2b5405b7f6abbe005811a171740f1191104bbb5cb117f9bf6eba30f4be6bb 3046022100ac6d3610f80fed0ff96779e89f4ee8379976fc26d0e0a5815e61614110ac17b4022100ab3b2d76cb1a7a549a97cde5aedee2d3bc9461e8927810d7e95ea02f8284d685 3045022100c8b0f93fd4e3f69f8c6dcce1cbc398f6edcf3b96a62510b9f93371325085146d022037494f38309d85d6ab06ef5e89072e021efe0cabee4893e405818dae67935374 3045022100e8aeb3e6e1e89c87057f06f0dd27119afe5dd145f25ab5be987c6792167288cc02204e84f90ae25677f42a6f00776eabe446113700fb39e52300628a9295db0c1268 3046022100af790c2030da34fcdc1c3f70fe448ddb28f77612be6e16d13cc0f96238b2c0ac022100b739afe408ab5bc7e473d35b1e3a85c931b3861555cc76fe8b0c649b2dc2cc74 30450220513455a5e94d72a9abd25ba927d34f2dbbdbdaaf49edfcc34d12487d943c198302210094af76382e7a2510956b9f4ffc0d688a5c9d3fa55390b9bf27547655c2ca9fe0 3046022100c28a25efe5b1a0ee19158a763a81e14ececfa7c648a7a3ba6dd0a1c02fa50705022100deb3aeae0e7f58fd98f17ce6eb9ba6a7a0e7d380344c59121c89d652026d5e03 30450220119ed12b198c06443997d7348643b31f00b68f51a2587f55d4a17403ffd658b802210084050e80ae29ef72e54702222e893b226e36b03ffcfba94e7b9feb854da9244d 30440220760abe4441882db0325b633bc16d4f6e422156c27c7a0cca20cff9b3b05a367602200e7e7a40a2ce6a747ebbabe240d93629160f0aa76e6b471c387d0d694d5de0fd 30440221008494788cc73ff27906338f10b3d3b5aec3571b50ce2277991873e94fc670e039021f33b3f173d95f259fc75bc344b4fc03754d53d22c003798c3f19221c8b46821 3046022100fc0bf42b7ca497a84a6b04601638e6d7d3a5e317fdd8ceada9a5ff9c53381af8022100f0b7dbb15c883322bdd983253a026a08dd05ab9518b0bb30f75dd6bedb404c55 304502206db5d95ce21230b546b204075cfbbff5b089752d983efa2e6836a789c2ffa7930221008eea371f11eeba3a587000234519b8723538c99ab380f81356d099738985e71f 3045022100b210193b2f7b469d4b15989578a09e506d45d27669ddba9fe5df8ccab4a0de690220204f9255e8df2f7c5727aa10d662e5feb5a81cc4b910c3015d555e4a635ac66d 304402203b556eb08bf0ab681b421a3f1d338f1ac950424a77dfa6d32bec96591123bb4002201c13b88af88b4b545c06d53a9a629aa47729993495c14dd2b39f0ac1173f4fc3 3044022006ad247f87f0ef7de22e29cf859956b4c6ecff94d7dc2079ab28d5d242485d9b022036a4bcf58237ce8d5d641fb568dbf4e30d80319e29869963c25634e654c1e47d 304402206299ce28e633f2053d9de110bc221c00f46f0236cfca5997dbc1e0322b6eb2cf0220032e79d22e54e4748a4a7012f45d06f019cb7b9bc1a30928c97127eb55f57ec9 3045022000f789f2135cb053450b92ea8456ba61f503331d9e58b673f726c2b508efc7d8022100e5688a5f5086a3e7058164b5de104aedffa8b3f726f939bd3b2254ca20e54b29 3044022079c041ede8e17057e48443d46de0d6d5a13bd96dc0262791fad3c65646d4e70302200ab91d21cb2c0d1384efbe4acce4b1033c93c4cc8227e4c6003361e897ff4d2a 30450220680b3f9d4ebc0d2def511352c0b4183fb5b25ece3a94a6e8b431e10c1111d02c022100d38f7c8d1b3010fecfca077228d4e577888ecc7b99b0978ef4dbc1fb77098086 304502203eb14733cc76e8f7237b5efa5a957aa56fb7f70985ebc42817d07fc7f4455275022100e7b0201e8b48ac8c13e304dcac416767c1bf0a470b0021ad00a25c5da892e4e1 3046022100b94be79ce320102f262df49d14a0794e0e987418614ee742ec52fe132256fe54022100ecad932f0ba66abcdbc59818f985ef0f0335b58338c38e6af7015e8055ae4ee8 3045022100efe5e225e836e945524ac95016fb01820dfe818bd802ee32b0ad1dfee1acbff2022000c4a2395d968cee020df41d622ed3cb783789281804db13be8c6dfb395aba8b 30450220137a162bf52676f178a7f14f4b5ee551f2aa0dce80b336afff358228545617b3022100909239f8376a4c4c9c1ebb3b7e8bbe1550ca6de05adf4627a064cd00146da014 3046022100f5bda2de9a06c54eecdbb69a5a49932d0d5257d09b67964c8746c91a1746fc8d022100e264d8df32b778dd79ae186e2b45b9d0a2714c10cf529af47796c22ed79bb89b 304502205acd6f47d8eb941b3adfa2b9e00ab085fa7d42a3ef2670c7cc683441badcbce4022100927fe23de03a9918b9431fe8755da5796c6f625d17e216b36dbdb34500817fcd 3045022100f4c5ed77426df2701d1a7d7afddef3864d1c0a6bfcef5646d6a375325894462302202c6c79cfb532fca24c49ead0ec40eae31a25118198b29cbb304f10371ee1551d 3045022100edbdc59c73d403ef11544eceda85643c714393c13fd46b0bb67dfd5c7357dd4b022062872b60187cfdfb32563215cdb3942bf4034bd27957494e5409c3071e539a45 3046022100f546a0d161d097927c99cf9f402c76a918c7e19830fb842a4238d560e7a7db87022100eac6b8108aa2822dd764dfdd84eaae042ef03af534740e61c37c56b186519027 3044022055ebd1b74cbc0ed774db8abf1473c616739790bc289c962f2ea966da73a9c34302201676fa31354546a416beaf23794e0109fcbacff730dd15cdca37ea89ce0ed5c5 3045022100c7be4462c4d143136867daddf317ad813ba58098478c47b259c5da9dd33b0af4022068a5fcbc835f5d5f361362d722dc8010c61c731c43799b1ac8f8bdfdce014886 304502203a1871872f848763459da542f3eadf08d20606aa150988708a76b5e476d433de022100c81510eedc76c41c09c97f7e5906d41ec14fd8112d4299f6c735545a4f992454 304402207e830309f2b8f0cee5dfee441c060c7b0fc09bfa607c5755be689e5b870cc62902202f458d89093b871fde29f1bcf634c89ae3cece0ab62c86eb9ab5a07e5b628aa4 3045022100d13f69b6d46935ef756cdbdbd8de2afce871a8975aaccd316c1ca23ce21af9e602203ce15b1d72ff6477dbd3cafaf814edac8408eb807986b8bcf30655a5f3c94ab0 3046022100e2849d7815471b4caaf474caa0e2ec41fd07a527009d9e7726f722ccfaaf2d8c022100b5bc002ee293b226c91a693b8870030663b017ac699dda5cec4034e100b3180b 3045022047e519aca3feecc8687a4a2062ffae3caefd2a03639229fde9b96152f6b64782022100809e4daaa74c04c183fce3ab816fb557e43c4e2bb0c1628b012a77289bebc035 3046022100c62c07b800ac8919f4b650bccb29758757ddef53097e1c36b43256b3b19bd998022100f8b0440056d0fb0afa50d96e2e1d100e680e877f88ca425cce7e5aedbdcacc71 3045022100adbddeb765f8acd722a06ca3fba9d55395b182488c8720e31f30978a668d081a022017060a82cdd94cab7efdbd13cfa4c5ba4e559a78804e5cd233e6c7f8516ca243 3046022100a086602b5adf36f993031f703e1fb71c0b7b217f6223e92a675a4a6bbc40cd480221009bb82fdaaee523aec21b40d947ab787581dea26aebc8ffce5cf92fb1b0bda81f 304402201db4fc6778e6c9be68a171c5b88eb9073fb1de44151568e230bd6399eb2acca902205dc07207dd383a5591bc31f6fb8b8b7a48ace8f3b604c54cf9f2601d6203ad4c 304502203ab032004663f756e02b1598b7cc0da1146750158468aaa102c73a9280cd6197022100940c567d6c96aac25455a154da2256f0a12530ecc3777d1430823acfdc891af5 3046022100a79fca817a8a2d7633d089dc4ebe4a58044bbdbcb5b686195f711f975859a3d302210096ad224da8826becf3c870e62aa3aafbc87ace8aa13813af0a3fbe0cccc2391f 30430220408d12a297ff68e3312985e1961bebd8232b3dfecc220af51d5651e9838fe1f3021f258daeb19b96c8e53d76d2e4299b0c8ff65f39cf495839f4b631eec02cdcc3 3046022100a3062831b1d036796df0c5570fb4b4e01ad766e56fc9cda61122df1f4712dbf4022100a01910e5445e1f9c1db147a31c9195d1e205d19e8689c4fe227f1d22077e421f 3045022100eb0d02b775c17a03a105ee2efa0ff7486a6066a3e7c48ce343080306d42785f8022014b2efa8664e4702789d10299c460ac4f8c2d1162dd654c943b6ccc3e2cb8111 304402202611b2da037c07d8ec35c5fb6cd47f0787bac2859e738d3e2026e7ec5bd9f8a8022046631afd09b409d76b114a7dae47ee6b8dd2aff4258f4e2e7e6c26e44840b7c3 30440220546b7edc959e88c869e55bb05f6f1657ec2c44ad84e088773a54c16676caca4202200398f8a44d498004f14ddfd4d5f53e635704f907d1c61b8243f04c9dbf54fcb4 304502207e1a2356695e7961c233d81faa429ef6bb294c2a1f3ae0898bbeeb084d2d5f69022100d0ca5856b0de354f7ee658f131a0b49ccb56f7dd264274b2a0c44bd753ea2ff1 304402205f458e8421b5297c1d577c04eb51c64a0f3de0d039a744f7e33135ba822d00b102204c2386d6df21333d7142b54357e8a8ff53e467ea6a27675420f80907f23d17e7 3045022100fb42d8216518642a7af79276ddd84df2ea469379aa5611f182c4785ffe313e39022003936e9bee89fc313b831894d04c38b4251fafac463f92e27da054afe50b2063 30450220712742cfe42ca0b2173751dc6fecaa8a7f579bfdeb316ac80262e18e58dea408022100e94d7484375b1e7141cfa5ab6ba4374b7306af944cf8fd30c10d695490800d93 304402203e8f262895e91c49d1b1aa10f815f518bcf3cc97059af0bb03cd10988fc8577a0220067537e187332387ff3191e9ac77fe6a0891a3d4179336992a3f116f5c9a614b 3045022100a24e103c654223e93698fd7f59648c37745055870e3a4b73819b0f22954b49f8022023c7bdf99c4bdde1baa18a9b39f20c469468fdb08b929a11d4a908fdb5776379 304502202df0c1540d9297358c73969df8690f3c6a2b2d9b277067fdf4c1e754150be3ff022100ad92844bac831001ad1e84d07f47344ed0c4846dfc521ad8b3fe90c074467926 3045022100aa6fc67100d3e16bfaf06f47ccd586eae0704de36f7a797a89b1d029d85246ba02207e950e927a5698b30405a32a121e6a17e08d16a4de748ff329a6f7dd23692df1 304402204e7e7841ef519b58596a1baa607deb24b61d08f553c3e4f024d5a5350827800e0220679d42b95bca174d36565341f065b9b846781c34f8146db541f10bf18f42d525 304502203c2ba1d63ba1307b7dc0bf5997dd39ebe506e224aff5399ad3394cca6d41be29022100f8f29a74d16a857acd60dd181ab77d882d1aa7acff15f55fbf737bd8f8a91e9a 3044022055e14791bae0c1c39389138cfb584272dc30d65a46fd0d270a3db7cda214115c0220548227b335b4f14fd83c429e6f2f4ccdc0050453c3af983d0a62f2a16018fafc 3045022100cbe8ac95caba0a55bac68fb528eb958933cbb2711ac62459a42b8096459833e402207dfb5eaafea6b300548244705c7beb8f013931161d54d22a7581ae1c13a4f8f4 304502203f43cd200305915981e08e7a06d984fb3a7a47cf098ca90c954465aeb58aca07022100b4d46da5d2009c60d23675fd998fe9f4dfef273f45749d701b17b3d698f349b2 3045022100c36bd42003ce2be8428ac6b522978315d5c106655d9b3aec20672d64187ad31002202a461551ed6352541f3ad5862b07b698c0bf0d75e1004cf409225fee1959c70c 3046022100c8238a3f4491b7d359d18ce108714bac4af5129fa53fa030c5f1a7523b73f764022100ee28a2a3a68159bcb80254c2cf6a404f7fd9907d7d07726d17f4f8b89ce12a55 3046022100806b8b73e011c21db0ca319dcd021645749b41788be8932febb0991dacc22f51022100cecf1c4f6b77b84808c77bbb3c55c394c08650c1c671b098f8aad8b863bbee1d 3044022039d5c8525a48a096732ac0f9afd09e0ce0ded3f5d5ec3ff888a74c885961e4530220035b73ec8ff2fc84885dfb940a4c7097cbfb3a1715828a8492595de57422cb42 3044022061fcb637483ac6fe60667ab4bd8acc2bac32305c023aef7d404cc01f5ca8f25902207d80f10c00fd725dbbe8b8bd5d2ad3413d754ec94ed1d59989354b6ed548996b 3045022100d365456b55b065d3d73ed3549cbb6282f231fc808caef09cf56f10f6b017802302206f158674903698332f0e2a64ed4af57489c39f03e9c7f7ffe6634287a3e2d2a5 30440220229bf1ace5ef3fcc228b192acee9b0569dda156d553d923c2bd1feee4828cad602204ab20c0152fb21fe599f2bec43998c97bd406cc5eac8a2681bde27b690685df6 3045022100d87ab1c80b6a824e4d86148feca5e639ac7e601e5b37ff2e7cc5788edd6ce31602206b4cf50f31c242f706fbc2bed07438d80544289e988af151b78035d42979bd7e 304402203cc50286a9a1e42f5d834ae094dcc2b33a49636c5f055a2d6cfed1b6382881f2022021b4d83e48a8a52e52749f2f4f5cd10e04beb3d5d94c74b8416af67413c57cb8 3045022100dba463f4e7b7e9c370d5f9512a2851b3e5329fc82b9483a2a8e286c3c033bce9022054f80873cb895a443b536e839ff4320dd561f63df71f1b34edb11d27bb7eb489 3045022100d58d7e97e75714d0493f85e589ec835a89488fc157729cf74fe8b44c161684ee0220116271698610e8c7dc6cf256c040b436f7afdd260730de32d1d2ddb08e4ab74d 30450221009655a81b20670eae15b55778a9ea1f2c88a66be742be84bf9eced9390ad3da7102205df3bdc920bf5efe007f3213e209a2764a24b83aa5cb8e9b96eae5db88c2740e 30450220270218be99755eba6d044fb95c3cf62b226cf8308a7bb9c62f3b74802f398353022100c153ea6677b13439ce2c9cb17f2880aa35cc08e80cdc58939e9ac738919c5c76 3046022100aa4bb8ca6e63550a08afd210dcb3c58494c074277324cd7970114312b5fc848b022100d9085df332a715337f28d9c5788ee95a20db6de43c42fdf954a878e1a249f20f 304502205674258622dc7272e3aea957a4eba7963bbf04b8d315ea59e5f0a4c3c23cd45b022100af35d1c6c7332131d76023e6cd9557727db289dd6f4b7e1d7f8eb754fa4d9a74 304502200d51c297239f0cdced51c3b7ac6c6530041df6809118a9ff5331246c07e2225e022100ee08d54bd16db5238c501d7afe7576ac9a8846b512f422ecb394f3b5b054b9da 304402201becd58ba74d34661bea80f9f7860474e5210cdce71be6c0d1797f3430f90b1b0220505538eba96d03d137229768a8e5cd48ce5153d80315f9bcd01890bf57ef377e 3044022063d79e6ee2ae9dac367755cb5ef214d1d80907fc98993ef6d2ed545da7bd041b02202b9f3b97a8bf2fac85f8d2bb9e92b78e30d515cbc91bef0467c220b31abb9cd2 30450220026a32f238f1f75979792411ec6688960c90db6fc5302d11ec843769475a2fc3022100d237ff011d5cf902515c56a2335ae5428c1636225a91f91f74891351ce34ef05 3046022100d3f07cdf1647523a62a0916aa5c216082a6671367e3db1b917d5ca286eec0a89022100a65f1425c8b8cdd87a05296223385925a402abe65fad6d885b49f65c01ce6026 3045022040a559f02b3f4a40d9dbd9e564baf8b15d3913665b2b0af14f31f3b85f1b3400022100f5df73873e6803753fcd822568181cfe652a856135777dd22096f459e3ef51ed 3045022002c0a9933725191eb1b7d118115a06e2d79a6c9a71ab3de4558c49365beb6740022100b5c2a887e1c655cb33df476abb2a73621d2b548cc2246847ff0a69520a5323da 304502203f39dcd45086b1641659ac9081f6c21c26620d5f99fc0b40c8e1dc9937076eea022100b9763128584270b240a52e9fac1fbf940eef87d8af90365db3769f22a4e2cd2b 304502206ae05a706ed2628f0cc1a02cf18daa1a2a86fb5a4e4196437b64a9c5a8b4e719022100e5a5c990f5451d8b53c1291652875b3bbd16128093394515f9f7914085750339 3044022031c9a4d390442ccfdd5ed09495bc77ff4ba74ce1b019f3a6e51e2aa569730c04022035bc51cdd1fe001908821e6ce2d6f977ac9319da0f032a4fefdbc6153c600ca8 304502210082af4446b9bf334f865350df8af3eab7f99f20cf15a3dc1e5131fa231552cc7502207096db4a9931475eec8ce65fead6b9f4bd7e1dc3cc11fdf764ca83bc468fe6ba 3046022100bb0a3123cc95eaaebdc31b1bdeff064ff7c6ea1234c8ae678e20d7dddca05238022100ebafc5381787ddf225dfde00a1b0148b877d56b2c447528a3607d31925c30316 304502206d625449760b60060e3a8721a14fb2ce85db06778a44e02c94149748ffbae7bc0221008b6ce2d8a030a9dd46677edf59e303ad2eabd28fa1c6b8c7e178b7eb18385c80 304502201595156e1eb87c767442d495006eab87de46085900e773a8c610052f772cc984022100a53bcaf143067e6022477e7e2565818bb2c24ef80387764b968cfd864241ef4a 304402205799c808741b3878fbe6c579d88f2fedcc0f358b741fe1d05cc639fdf5e8b5bb022021efa9896e4938cab9968bd333f574d8528e1c24d8edcba138afc7fdb684feaa 3045022033da57091a4bc63f6008d4cb1135ed3a4d5c981c4f65fd90f085f5ba2f7f46d4022100e2d4d2ef317756aa82528b8a9706f3ee071a1883d58a1b7700bd3e6125c32a18 304402207e8cb9b88686c5f8240b0ee9d93dc28724920452104737a70176c8585a7cdb4902206070219e1b41719944b9a9004b7539e7b4a7da077c5017aa1b8035e53de603a5 30450220539eec7cc885e3520d3fabc42af4331d08fdd46a957704c7e5fc00cfd7d3fd97022100b7c04473858ad3c90975764640687201d58a6ad995debb74224d82b93485ba7a 3044022005deb15b99564ea1380b61e01841ecab3cfe571faad23db4cd754b53bd286af3022064ec8acb4d6768bcc19f71b2730add5b52bc881c80ca61609a061b0e23928e8a 3044022062a118e1f0541f9034e48b3f7e1d4fab37e73cdbf2b27529fff57f115629a024022049f41d7437763569ff998231d552fb82b5ae033e5994767ef0cc5b5d43d1fbd4 3046022100c55a5284791dd756916412f15a796eb6c612f3696e2abde04c91c45297217559022100a3af870fd59ab4495fad76f9ab306348469da54601fa219521febaf6c45d92de 304402207c719bbbfe82ce88ea2237d7b8ee512c1da962a24d602e54ed2d729de8ea364e022015291421e8c2adb7a3c3d5928ac919d23dfc79b085d2a0441c1c1cfd2aa361cf 3045022100e69c6d7743defc5d88a0b3a54b051a8827a65f40bdbd9ada2fe5426387de7a2d0220795434a39e95facdd74344766e81c4f38c83ffafe4f27cf85a250b35c1f8a7da 3046022100a69babee9cd7d2918e8d97f294826b2ef3672d294006d899650d30c660d731e8022100f515d133386124f964f3808b5247dd4c95c1210976eaace66b646e0974712710 3045022005312e9d9738f1f68924a6879d69a7b8bbd823a475ef0fbcc1ae0a3008b177bc022100e1b63c156fecbf1030549c0db837b16352bc7c8cd923bd2dddbba95478b9fd00 3045022100e90a2e45d40878d7c47d96a45a0c918424676ec55dedda67a8f40afdd1a2627a022025e91440cac1a7990d2f459505517a5e6456d3748bb77aa7cbbb7ec614149240 3046022100fbbb549f7ccf0ae2aba00d4977ff3449b3b0be4df7514ea51078dc995a2c0741022100dda71e162b18e7e5f1fab9a42823b7cab8188332cbc8e6bf7b5050cac3f1a236 30450221008a99b3bbdbbbd2850c96d7480033e81e90361f621c77a0c9e28e7ba29ae73bb0022043712a1531742dd6e8869a82d1874f2b8b901d15f8648d64d8a24020f0081eef 3045022100c29bb112de82734fc5bd5b4538df535a6fe660e0263979109cd112cd2d55b4e402207c57167eb8f966565de60583d48c5287d135b552abe09b0e7acd10f4d5ad8eb1 304502207f2595c29ba93b902e1b7253536a8f3734a97cbfd5a0eecdac4ee2795f6c0dc302210092bb0f1e834ced71cc07bdb99625d74220208cecb6a2e4893156b734264ed4ee 304402201c0e0d80022e8d8fcf2177151631fe9ee80150f1d3166fe4218b4bc79529d05202207f48310b0db85bbcee5da8fd9e4a3a057b87f90421155e90ce0238903cb9c121 30450220258ec4c40106e2e288ba76c48273698718988b718e3befaea10fb614dd24d1140221009f036a3c7c1cda5c92f32d8b36d5ae7d08392b9ae9c3194f1f2b78ce65ec7b48 3046022100a56ba351264e6d3ae4fbb8c1cc90c607e5fed549ea2edbd97c2441c5fb692d12022100fd2cd3a84ee97f630486b2bba3cc9f57161612f3b135980a1cf923d3c4c1a889 304502205094e8a4eac4d84174702e4dbf8d825bfa315f73eadc6869ec945effbe41303d022100c67d286a7f8641de6347eb3fb149b02468f27b78ac2681956083ae21b38c3891 304402205b0ff1ccdc943794ccf2d6c5adfe43d3d0b802ccc2bedb1163c988aa4890851b02205e8a8346ba76aa7185f5b43aa799eaca5fff1a952e1ab7181966727238153b0b 3046022100cb3633ca20d16237e2e804e29bcbebc1351ef3738d3e8f3c7c98b0c9b7ef4004022100a0125121db4b632c5d716770c178d5ffe512d47876fc135e12a5bdec5d70f0b0 304402203315ef69b48ebd532aae0025274be64aa3f1724173931e55466cc7fbb961e9d802200f93ece65bf295e9f92183f7007614d9c995b2a80594792f73019f3a82cdc464 304502210083f6e627019d60bac18932be6d630541c3a65966dbb5759bc222ffbe6f34d02d02206e2a4bf33ac3c095ed531c12d74f6c5e1dc9b1910b8ea565f6ea200b311f0d7a 3046022100d3af8cb935da8f850116b273435c00a985ab19bd7f9c77d30a9c3d48198b5695022100f353ebddc32b848b20b7dd807a1f26afa907d95e389cbf5aad6816dd4800ca20 304502210087818a47459b3555ca9225c20acc5f55870da738c9dec324fd09da6a3b53ef38022073d177d7c70a935750d6cd63f8c83bda4805ea8a1bd755195dabac17b81bceca 304502205309e291897a84cf03a903a30b4627aedf35e63f9a9e2b140aad297f0c50738a022100fae660989cc872f7b94366aad9d730e978c3c54684166a93cf0cfcc2985a44da 304502206885a9b6af8226978a3b53b0d2c45ad2d35e3a3f14313f80851414e0467c7ef1022100edab8cb8286a672cf3aeb6743034afe99850a33e927556dc35057fa5a07735a5 3044022062c673c4e9349bece2d808875e1f3b1856d462ed5d0513ca2bd00f5a73bd0daa022030314f93a1a89d6fc8570a93797b093ced01fe26bb8e4330f4f4dd1a663d437d 3046022100ffb5cc3e37b780ae0a8e1327138ca68e71f8b47e1b60ed6151f578e9432bf218022100c5e514407f7ce08474cf1078673fa3b21d7273602d149f587fca6ce0689a2f59 3045022100c009801e9b0e60626cfca346599ccc1e940d4169e34b11ced716a8b75359212802205205da2626a7383e48c560bab37410bff90ba7ce8efb5de68e1f6efa2d4fd5a8 3044022018b311aefaa7b5a1515e8b038dd26ad840b2c0a99202352833eea49209b34d19022033b4030aaeaa28b53b26497d0a83285f8a9a641aa90a82cb4a4c0db5dc01fb47 3045022100f8f88b6451d445dbd049a6e5083edf0294a948eb014411a36019a504e811ae12022072936c7ceec75aeac20b55ab8291dd76562511667aa7cb1011a5fa3c27bcb568 3044022067653104a20e3f98193b9da082f7d4b26eef0d5f3ff2189052713b608f72898602203b6d58d14e41c41178133734f89a937b7f174b9a8281c8bbc765dfe3702575e2 30460221008624f08a7516368562ee8f973a4b3373396c1711ca577671db07f23fe02b9fad0221009c97b8b90493877823d5a0ec9c73556896470194276a8d3edf390e52a617c48d 3046022100d88b0d77a334286ceba041c5529be7e1cb8b2f4f20368722f216b26b4271e39a022100ecad1d8b527c7588388553dd71155eb07e0758d0454ee63afa1670758c4daa72 3044022038f86bb0866cf7a5a97d3b6e53d1cbe4809295765df0bd7ee1be053e03601d8702205d926358e1796ce40009ab3e5cbd932e3d07d1624fbd6668ae8a7fa1009167d7 3044022027d2cae7475c43d39b38c5ae05b2a5fea02a8c7e1eb7dd48b3161e51d447f05802201abec252d32716d813b08bc092e08b67d21ae52794478b178a5d040da5bf8702 304502210095e559b59ff899c952251aad55db9d19d25eacf919cfb3917897e15513a132a10220228b3d48d311251673570e6a73ab4e0aa1164773a63f6e043f9de01f3a04bd78 304402204bc91355e46c5b7bdbe2cca9146793a10e7e85059bbe73655629007b0f2c125f0220314d782611ea68cc722dd24b11f56f9b99503bc269d776ce33c40bda5bb1235d 304502210092d34ec84455be89fd3131a686a9ed72aaa5f21d028aedb31b6303d8eb2827a902200a42c8fbefb7e7ab272b58a8590c2048b3d5086ffa05515d23cd954aa17b4192 3045022040d3702fe24b21bf404f4ab11ac21cc989d74a3ed164e3c40be1b7bedd594585022100d1023a5d58d04ab7e010e58baed1355c90037c82303a3cef8d2cf1aa101e1410 3045022100af458c3fffe722d19cd8394ecf82ea83948022c97731feb96b65af14b4b7a24f0220179d45b25fd7638bb23ad48e0862e2cf42929cbc7ddb096e4ba6503efe0629b0 3045022100f7f1399b806e028ce6e3edc2acf4df0e288859efaf8f721442ccc0075c0cbd2b02203bf5f6c0e64c42a227932af4e433ee6a381e52489cae393ecf419c4b24196b8d 30460221009a6c383bef99d80f2675299f3ac6762330078f45221b1b7c0893a07254c3527102210093b876b27d3520a8085dc4c273015c27d48e030f40efd4ef944ebb04bb58d08a 3045022100b85ae58ba2a10eaaa275720f37fd17d97ad33ac4d27ed5fac0b7a4ce05a37431022043a27e9b7ed77d5ef3d4e7b8d3fea098b144e73d137a74c2a88c9decea4e7582 3045022100ef3fba706b5107f279a9f045fa52fec67fb75981d62977411ed60f0aaf9996c2022035a694a4823b029312eab270fa4d97d1b4c4ac977943100c60e56a42f6ecec97 304402202f0e7707d96997e8c983cef988d43b8515773c3ef5b258a8f5e92f3c3ad1602602200eb28d22a80dd3c6daf00af903a22031ed2fd1822af3f8e39387902b87758471 304402205c2c82e9a08924c35d8ae40dac2770da9b45e229d5338740d9f50d19a66792b802205ca29cb3c18fcf3d8523dc84ab1f78c864a91a2b64a690d1c97721128d9cc61c 3046022100a71ee626fca5ab00f250499f8560f07d9e27355dc20dbd505ca8167d4d445032022100ef532b1f388f8f783b8c052ae9f118becec1aa394722756994967464632b2c5e 304502201e68c4012667262e90bcd2a405b9d500d3bcb74f97f7579d217d5696ac750ed6022100923cf3131074ce264e7d39937bb7e6e315067f6338eae41eac63418e77b4795f 3045022013b3841822493e6dd57bccb504b7e1a748c6ee66404e8a70c34da47f5254645d022100ef0d991000c9e706acedfca2671590ccd5aeed4f50242b583cfa0dd0744ef64f 304602210084f7e78c9dabd8cd9bf0218f4243d4017362ad8a827afd334df7d85d3085ecc60221008ef6770b6968ce5441ea9bfd1d24f8b6f1402cf937b5bd1d518e197594b76715 3045022057fa80082d6176c1c9240dd6e8ea12b3deaf87c361ef2a9f1c44abd7f89e2072022100ae6fd03efc39b62645349d31486ac1749c21dec72b145c018b9a422714a738d8 30460221008549128502683556d7c30b5b41c97df82d386b2f473f7ee672ad2d8d8163ba330221008979b9ca6567b7ee16a7466458b610c090eb176d71b9e7143a59becd01cc7505 304502204c6a7a1e8f9473ac7e1a8357405988b79073c2619c55207d620f0c6a3d674103022100bc0e76af5bd1baac50083c4844d03d4a51d0bf529a5054c7bc00eba38a70748c 304402205c93ea74949e598bd0f7ad04244fa67e87c860e061b4cca5cf871ad210846acc022019461f51109d84cb185a667222c79d0d18a84a257b3a77823e8494b8b6b4bb8f 30450220652f4c341f7370b19febd6769b86f5f5d44f4abf421268653d66c59d74c463f0022100a7eb1384af4f74a56899444951e0a6b6599b3f2965443c4d91b2acf6b34c64be 304502207b9316687f7dc08678c73e31a8db15ca96ee07687cc86a63b10400f8bb82c9ec0221009ad04f8323a4637726cdfffa8f25e24b11e83bc13c4b242f919b80616d3418ff 3044022039e7254926dc1fe97aab741244138d42a36115c247cc52f2e60a13d2791b536c02203ab4f5876561c2ed0f86e507d58d743f6d7df7b8b3af5ed2fc0d903f8faff113 3046022100a65b6b5d52273a2805c20f5646f005c40f21f8400db0c5f64832bb89721b877d022100bd48316ea17e0b12cffe7b6b08cc6cc449ba194c0f63c8cf3cd2da9f89268c64 3044022027188987598816ea64a8a5a846e1d4d259d04d97d2203c5c3255056ab8ffb8af02205022cebe0369772840440182d80fe418f3c610b92d3d7c60f8fed7151ae7958b 3046022100bd785ecbd39c118a36da11f32b0e1d45a6c1a6a27d52aa8e706a105a67d03ed4022100f4339a4360d6af165528d99317eafc9097443be9890792f24a3f2b8291af28ef 3045022100a90081624cabb6138423694b4a2490a0c96fa49bca95aacea9119153f9eb9ea602201bbc0049df9ea9073a4017a74c1e859e3e9a004e3920077707631804bd0e5503 304502202a3a54aa3d651ae24dc0a435869e486b2a54628f8da17c4302826cc2697af920022100d3f97cf7d46587e057f6999884657cd0c5d4c49f6ef4604cec391533f3e36107 3045022100b086c7e39dc6880446df7e7dd8d5a721fea70ca4af93ce391d1a606907bb3408022037689ed357da71d44e7107f717cb9d572b946495d3b8d8f92e2a1da800fc1484 3046022100cdce46750ec86758fddebd17b07ab47ab2f6ac6ead7a10208af0bba8b051b711022100fd34b5309d1c5c635d5321e0176c2c77d7837d3cf08bb95c9099a9c543be931b 304402201959dd8006532ea9dd6f52803766c4ab03147cd2a70ffb33310736f44c0a7ca8022019749555504bc99a30d0326fed6627def7c6f3f499c249bf434c02ed0e4b2164 30440220593013ea618dae423d00f92c7c4ada65422ee586c990c25f81621b67e197bd5502205651a59cc0a51404cfb7ce1e9f39c4223b81d916095f70a2c3934e8b4b14b31d 3045022033c1da46d33f824263dd80e4f3e4dbc079398300dd5c516a6b29f7d43264e2ba022100e15efc461435b6af4209762d5d6de1ff68f08fd2991a368e3e79e3eb03671031 304502201358897549737ce705a73ce911dc4a93dba1b771855af9cf8ee98cc8bec68b29022100de0feef942b2fb55df5d1c5af823e60b1da211456d46575646ac3326c47c1381 30440220091f2302ba275a946260ad2aab2853204ef25a546770d4933b7cd78f19a7c23e0220165c52bd604422411ecabe75e1bc4ea650cdae0450dd8a2971d2c2b3021ea65a 304502201d068297cdffbdf7fbebbfd297a6e6a074580b12804e77f17f51a64ec686fc43022100de7377de3b86058a24c551967848bf6f1bae8e981ac2620e4e25b96e16cb9e65 3045022100f4bc139a69b3658c6b7cf54740e2013ea942323d59adf950339c7c965fad08de02206bf8034e40b15a241b0976768ad384a7cf553eea6e2d0b3fb1258b460b74d530 304402201ff171e9602f6d2c4bed9386419d8d1e96ae8dc09a6129bfaf2b5cde89cb07840220669fdf332b3550ba1d3093c22823bd4e343b6f785564f41c406fffd5c6d95910 30440220096e8e2e00c0cf1be438302e7dc164462219498f9309f5f8b21f9a727bfde93a022035fadc16b850c3c577c8aa446f040b900ec7cbceadc86c4de9fabb0e23523d12 3045022033a9d26b0511e9d62e1b2d7c9e68dfc40bdd9f585a24b7aa96ad7521de59a32202210085a014a3fc13273db8cf9848d99d05487175560a33f7fc4fe09e8d03baff0cdb 3045022100faf341666b0b833f4c4d5fcdf4bab30503812575adb7a5079f735cf875ac0954022077f523642e422c0d0046ee68b6a07080d8c336a5670327b326560080ec203642 3046022100b3c4907bc2b2edcaf8cbcaa65e2635706382a17a4899d4dad425033ee5ec636a0221008652bf18c1f6559b9c2fd16465eaa5709f34352b9ee5b7dbcfaaefcd508e4dd7 3045022100d5fa876e96aef57efd66d4fe4072dcf4b2049861c86c9c677651a6e6bb6b53c802205c689a428ee28388602f46f5dead810ddf3a757d717f5a710cb28f3d137fc282 30460221009b135ca11a750c1d5a9c2d5fd8a3f1da6d8cc8d7afb6bf4932a25101f4119cc6022100872538121d3ee4fbcaadc82b1300124b22489180d3cdc02444aa37f81073bd6b 3046022100fc847cfdbcfef0e2721d61340d3443aefd6860c760de155e1dfe836498a64e24022100aaf140cea70882f9dabd2192bb54a3db4db6ef9afe3b3ece396e36b024895b5c 3046022100b708a2119438d7b39abbcfdb5a36532918a820379ee47548417d03682518d57b022100cbc782c7df93fa0c4dbe26d60a67950c2da25af22d716e1f208ebca83840d15e 3046022100cd9ad4186360d59484b9dfdc5d16c056665da8da4e3d58499340e581ec8b9290022100b3c4814c9bcf570ce8ae07b37596b2da0f8c2739ef4c0fafc283f3683fa93c4b 3046022100ee36db59550373df589cb7d056d9cd66037d0d9aba0be53ce21549165ef6c6c2022100b0237a7f3352e13da6257a7bbb2ea6a1387935724098c55ab5b5d284aeca7e24 304502201655e968f68c18986c814903d62aa27064fecb2606ba3292d0ba8b4357970454022100f8b5fdfbf9d151763ab0afbe7d76c6c6b08563448926a4582b51b581bd0f6def 3045022027324f6e4ae10636435026c1986f28b360244f3baf9ede1f7fd4da34d56e443f022100ca9495da0c3973a799ef595357eae3e2b362bd1a0302d478c2309a60bd9297fe 304402207e56d28a2cda25230c1ab69fe4470cd5d305f1324eeb6da77a93bfeaef04174902201d8712624a7fdce16e6981feaff4c7322e7790fba744bf545634c3e9b36dc515 3045022100f335daeb1525755707fdf710ec006de3b33d84b400ad459b23b56dac9835f469022079940d2c83fe2537a0cd1da664e3b039e778c9489409718bda71f5daecf3f17e 3046022100dbe57f5dfe940baa094c5eae89a976a5350552b70ff697724c26bd644d0f910c022100e589e70c1d1e6dcc67e30a2cd870a13b0f8e75843f62210a14b4c6903eb3d81d 3045022100bc5d0e694744a40f2028d6c8506dad873725c83d95e347ce4d33b857cb69433c022012c94b84f7703fdcabb07201e1f7730f797ed0fc0d182b4b88fdf6199186c05a 3044022001a908fcd44b6670850dd2e85ccf21c05496cccf0f54ba6a6607f9f66a381a22022062c3691f38196e68f0cff0253d11232c17faf62e294454b6b5df805df4c3ca79 3045022062538c27a464aa715049d8d6c2789893dc8d466cfc353e93dfddc16814370b280221009a15eb860d120c3c01161c1e73871e8f8f09969cf59d0868273ced4d1f1590a4 304502206ed180cd0ea55164fde882f26d5ceeb9ddfe4e70981cce88eac28cb1085ea218022100e1eb74df7f2cd660d91e99a534360dff10ad344bd9450282f44db88bdf5fce00 3044022026d9316e803fef1bce03674e263a9807735d8faa231f38fab367ed57b9a3182002204efae3e4a1f8c3cdf53f14a813a9dc09f9a80017543dd9e5ca8caa6cd979cb2e 3046022100ca9155684f73629a4a96f71a8a5ce96e54b0d631fe32926e02407eab4761eb26022100fa53a891ad8d867ed9890daea5955aef9f2335e68ead0e29ae3de53d40048ee5 3046022100b4c9674fa7cbde26bdd641b9832c13ce5b8e97fc2dddf3d6d69381af4178ad5f022100ebc9e2ef1f74de3d889a3de034b94e3bf638ac6c399f03b902637d2b850c26d2 304502210090d1bff5f2b9c73b026003ae6e6a25ea7ecf6c388e7838c49d206131e2c20c140220317bc3bde2aeaf8d68fc30aa49d18d1195be148a5e9f5d64f2d10bfad941b114 3045022100ec736b0589f50aed1c8e442494484fd1e4afae153922269ef58033bd5fd6bf2802201051a1995c2f2b39a6920acc30d66a20183839026237de52ef77cfb015ac3d38 304402203897a59be32b91d2e0518fe59bfb4aaee392daef182edcb2a81b5d1e320ba4590220468344fe550685fabe83e87cfe6a44601e1db951c74ef0f701cfa8a6d59e99b0 304402203a571a9c2f9181f53f67a26bbfaccce2de3576e5206c852c0d0aee4a7cd4309c022028c4290f01e84dc946d2d29faa7af39fe6d6e1d3c2fe0443b7dc00810d9befbc 3044022024682575d079593a5d7c86beeaf3594887a165472a2d5fea1f43a3d06b28bdb9022012ef4c70505ad40718877552f89a30161c276b419cd7dde592ce597e696597f1 304502207ba9596825c27f46d964586773a7874257b9fc0dea6b73602426cee994467621022100d70b48f45595f1750752d13fc1f63bfbf06a7c75781157fccc19d9babeed5de2 3046022100d8212413a10ce69ea7701c555d5f0c78922ccbf7258f24aa16e07124cd18c6250221008d49635aec9e78e9bd17e258a3b3ba6da50bc30acab295f32e1c24b61c6ffa51 30450221008fd8c5d1aa914f98b94f8757049fc00c4ba8e1ebd75005886f58eb56c2c5d955022047dd2a0067504a264670d2570bd2fb511547d7c84410c13246e80e5c22f3a810 304402203acfd98dc4348e8dadea2fb7ecd7ca6a708bde522b3724272f709227b53f5dd202205e78d6ea95ea3e6808b1d09806ea9eaf51e1ca05f05512ed134bd2f977821b5c 304402204ac3579492cb12d52e14704f5ced2f49758fcf096edfe43ee2d2b28b32d3e09e0220777bde5a10c1c505ae057fcb3772003e8b52453dc81c1cc6e3557c6452c81573 3044022047ddcbde8cdae5b16f57ae9260f0d2940065db82e6f0c1ba26d35e1f28fd40f30220327899eb4c4098ac54c7dde8fb7c6a6cec869da19a88a75a41a472b7d2cd4b71 3044022010f3386f339bd3b33557c68fa4798dc74641465209336082a6de1117a7f1d65702205850f2eadbc43a586b3a352b49d3e4a3c264576e7cd3d7a6a3f3f0d8a2e28373 3045022100dce11df96e896f58db36d49286def19abf64518447bdcf5dd7ed959bfb3cd94d022051424c7463a97a55b86f15a17a1b066a2fcbf30801decddc2144f6103ef552e7 3045022100f50f5d719a08b6ee7da4ee49c2de1393c83630475290bb55bc127452c8d3cbd2022047f117b55e2ff3819e9b978aa0578a0a32d7d006f2bd45d0b2b11d721c5d91a7 3044022023c2b6197e38af37c73a19228a8868134933f4fc319231836da6a662696d7ec60220223937b6af6acb63f58f98e97045dae7c22a3e8790651bd3024fbc97112187be 3045022100f6d7f3e2a98d4335b7df070a2f7772b0e683089d07a43ad3ad0b05fff1ec15f902204a3394c68e71d43d4ff83ecfe483269340f797b8cdf8867601fe5771278c668e 30440220761d13dda528bb5bdc27167fd130cc4bba99bc86c9fc7943aff91b0ad341e4ae02204e3aa08984a3997180dbfacba98dcf9bf228ac0c03963abb21a0b3862db1df82 3046022100f75b77365cb396d1f1616b003bb95ba7c0c7682b9faa26909b726bbb47781991022100e510a55eb681b1f56e3de39820bee97ecc2a3ca995ea98fd7c2da84b7fae4468 3045022100f0d402535a7f157b6eb40c7089b03842b88a3a70854cb282d3b6632ff37174e902206534859f9cf375549901e09cade4b7dfc5706a3122debb98333de4846cfed455 3045022100a561d7dfeb9adb3008298181b17755233975c30e1dcc2df7b1c954a8106d4f7b022039b763a69bc05be2624df55f6280bf7de4eb28d96fa3abced9e45f62ea08a76d 30440220021da83b892f21c1eb5e1b3c3aa473b69704b0451c5b358bb1bfdf8a6da181820220617b4c7f2008511cf857c2836c3cb7b747fb8721a179d14d33ffc3c9dd80cc63 3045022067420d667f88e51c92398969f72df10e3d4211037939c8064939129ab553a6fa022100db217a56ef38f06cf3c1fc9d7b587e1a7105d1077043327d27bf469d5b9ff3af 304502200a787ac3465dd8b0fa62cd4f36f3ba72c1721b34eb0b1c45e81634a387eb9338022100c9165b3c14802ee83ba3edb4e4c0a0aa5d274197848229996ef5192a20dfb2af 3044022022f5f5ab14ec05f27d508050cea086ce0002648e8f8ab643b6f708430df0e8c60220020483beb7a965b67b688dd539c1433bcbad3973469e522cf751d10e822def7c 3046022100cdd845ef5f73359285558cf11fe3a1d334281a6f98482a27d80a3f68cb388be8022100e02caecfeb2a87f20aaa5ce80f310f94ce0ba78079db568ba7e2a41709523724 304502206933ccc3806c0f47cdade89926fca36f3bca88ca93ead8c8f0a12a50a89aa3b1022100bb2b81d3af4cc75f992505d29f05c4c560e6242d7943e841e89538f38f3423aa 304502201db79dbcffb2c615c6f5e4a76dccbe4db94d6e200e42575b8d8248e1ebd8c0da022100c9c44ce3c316e892567833677e9564074a66def0054ba05a386c0da31142ec5d 3045022100ed0e51a82494256001d178fde690bdaf7d4b5d94c4d09e7dca4e79d1be1f64310220447a0d5fe54286e1d5f7c9bc9315fc20ad13b57766557859e4c17760ab716bdc 3046022100f930ca345cee6246283457887e009b1937ee95cf41d769a7fa700b00439d460f022100f99c4f74f8d3f832d797226ad6f2dcf17bbce62f67485c5ac16e702e2af7d61b 30450220333b781482f6d1d3b7a43ac8daaeafac545c1c1025d3a22618dcd13fe917156d022100d3e5c48a7fbe37f514e9282d1b5689c400ca4a8bfcb8d5fe0ba336c7f49f2b7b 304502210085df3b64a130816922cfd2acf75d4740487eb0493109b6d0e6ece551af79516b02206e57a559c7f19bb72009747c5affc4472d9a3114a8c59fa029e3e3669f30db16 30450221009b4223dae09e73373819187705001d62ea28224fbaaa79dc20de7af0e73940d602207854fe04c70c9de2518e46e855d20919214f6d9a0032fc7852ef0c12e8c88937 3045022100c76c07fa998ea32624032453ed818abd739786de469059c4c19d051fc13a649a0220140e98b8ee4a2b739419689bff93baa23cc64e8e8955ce519a004048c4311319 3046022100b8d7bcefd33f2e79d432e671057372562f7932e55798271d7b40334e97e505900221008e66c0da2b63574f058b49da76d38dbffa4b01c5d216506162593355afbc0503 304402206ed8e3c1e93bc16c6c39168fe9eb1a6ef7441e75f379828bf99fef57eda0c2da02203f33dfa358f46a361658aa7d443e160da1fd5610f3b40595634426ebc0b80566 3046022100eac5bbfd4f11506865089ac1a1b6d9d6f45100edcd2a211f1749a4da7503ec3702210080ddf9395f4984744c293fbd3f3b76a69e14caaecc306d37a67e06e7ef01ba9e 304402207d0f7d0122e9e98fb97b0a73f6e87fa58ad767c750a071fef42217082ca45e320220721203fa28fd472f8df5ccfc075d097d0f8ecf76e15483035974823856418d03 3046022100818de74d9cac165651db03b54395dc26baf5f10ab2e0cfb99f41f226626442d80221008ecaadadfb1952822b1099d567d592e378adc19810d1a584ad10c93ec110a9f2 304402200d4b4cf8a1db8726114efe4092a303391b52038b65b24b7d2e0ae030511da14b0220068386cfbe7d46946203bee89ca767cda942b074430e9e88f476bffdcc0c1ff6 3045022100aee0b17b565dccd6f1d3d64f0ea979ad493909842a5f9d1e99a35983f5ac1454022008855f9b14b3a2403f17a775ac49ab4ca3811c8b675411eecf07452f073e665a 30460221008889480f4c71a89a63a62c87d039f8f07d4bc4686c0e7670653644c64be1c36a0221009d4285a4ca42a1b4d07aa8d208fbaf0c2a0a44f914f9d36d84bdf248b5608ec9 304502207a541733bf251154347182b0fc9de7f46da95483d75f2ab702879361e97b9c76022100b79eb2c63486c7e6eeee8fbfb1ad5adf045ef5ae8260baec7a4ac95d008048e6 304402204fe3952d90984d4e5726571e56d61347b44e25fa23afc2a92292a0c37e39313902200958190600071ad44e73bd2024489176ac4575c8eaa37dbcfb1fcc56faa692bd 304502205748310e5ba166e42ac493895f58fd35d3f89548ef07f968981a463b66c5a80d022100c40317e0c47cc17e547a62818feb3a2e2e2125a692031c6a5e804985a1ab53db 30440220441898ac6d5c5deafb1932e5251338bb76916b2fbcd825f4e2310c0735dc6fb2022008ded81042fdcf2ae11a8c5008b1c300fd1b7237677a64ea7dd984a148ac120c 30460221009578d7963917854d9e859bedd3e714692d79a38f05d9b490066524f4d0ae66e2022100ea6a424e840871a3f5019e2aa33267ba7e0696dbd35141184a479cd3604cf5dd 30440220200ce2990cf9ea7a70105f552da29fc8caec99d1a12b1f4e5683e6883b54c04c02204e3dac80dc3ca91ff4facb71bcec4ed7bbb7aa6b569bacf8a9d67df332947a4c 304402206d9399802988425071d5ea10c6d95aafe5ee3528ae4eff7d4c22327492b2bd05022019d1c4099baffb99a3edb1df383ac972d40417cf7d4a48a576dd9c4ce5b25103 3044022074b33da21dede2a9dad708582868a547123dee7a01719b6d46f64cae159cbb3502202a47f8703d4c2ca260ad25dc140b2ba4d47dda7d07f864b0fbc0c827bdf5e401 304402205ce03b0aea174b4e52ce250d29fe38bbd5e585572d7d21c303ab2ec981c67e310220357e2c28815d0053547c948e737207d1c8c1d5e55af62e68efeb2b7c6f306a07 304402206c4cf78e078857534603afe32ef6f4b146c66146f001590b80b948c9176f8f4f022022dc48098806e0130c06e18472ceaade4da691b80a9a1fb530cd9528a6b5de51 3045022100b14b6294b66da3b2ec0c85bf6c184b931202a196fe00738c7816748e601e96e502202cd40ea8ca688bfb8dfe66112e12c1e1a9a28aa23eb4b742ac23c0d00cae0337 30460221009695b2d945eddc57e094bf602b44cd73a50d668e42c9fb1e2bbecf901bfc9168022100c8593b750202efe2341600412c5b717174f35fde25cb670cf79d5d08dc47f657 30440220353423fd62814b575d92175da1dea514acb2ff2ef12183d38e882e264c00c63b022002a48eccafce28497d52cf275ff7fca4badb65b85e319274a4286c44d54209f6 3045022100e9059cf83dff8ff04ea692fd1ecfb6e43940eecaea694dcb34432a7ac77700f802202cb7c25b9d5717877407056fd5647db61c9903f039c7274392c0db05a4eb85e7 30460221009a9d2c389bfccadeb68d9fc3a567eb90932908e8b45387a10145745f4e4aba2f022100a09e00dcc26d5709093e2fe5b87af55dd6164501accb69b697c20c3c1e1210ff 304402200224bb490d4cff94408a30c00287e76a2dd7a4d58f8a683dc0ec9a32e98528a00220051137f8841faf333ae4f816f402b54fbc84f8d155d08ef7d6758a3f8fbe0523 304602210097fd3c6da74f2610c48643887a1d14dac87ce93b196cba6b53d093210105848c022100947857bff45f6bff17d33ebd47b788ab5951db27bea833a7345ea9b6a628d72a 304502210080b37cae7ed866c7d26c28d1d87aa61a80ca973d7aff125f7dae94513c8fd09b0220094bcbcd5b8f1ecae333ff90617334d3316df28a015ae000cb7c6281b5b0979f 304402204b99d381ebaa3a25f22a21d677b50ee46d994979d117bcea2574504bd5874842022040ba13ec5b5f91b37981df0d726de3e2c59566ce0557ba786d7718532afa9366 30440220279d37de7cda5300d8e0ebf9fd4509fc259fa8c2ac645bc967a292e9b1fb7ef5022071a931e0c34effa04f98f45fbd6eae0e87dae454d2afb660b5bffbe418c817b9 3046022100f6aefd6ff7613f791c0f4c4610af4016149b8e4b17893693b8e4fc393858b7f5022100d8feaec2a1a4d9dbd811b306e2f4491c2823d085fa092fbe5d69a43bb8994892 30460221009bfb377afac63c4afd6348da5175d93e384af06cc73c780d947d27bfe0cb178b022100a6cf569d90e618f382d52de2d2d6ae6ec00670ab46ebd5592bf770e951eab124 3045022100f0965c04cd8d012c5f6daf3b02ed9e2aa609764c0283d29a8c3f0a6ac359176e022061d63231f16ca5ff6e9cd3214144c85c606651f8089576e9ecbc0d3547d2ff64 30460221008facc7b8fecde786d9eba79b806bdefd5c1e77cf9b6857583d73d91a228b876e022100f6b673aca074ec0e8935fbb26b9ad8cb564d3771080d90192c9256ddf4a216bc 30450220235253c73334dd8ff311068666718cdb339029c8f73a112ab56499965decc6f4022100d464a03c9947aab253596c3f48c94a124bbdc90fdde9ccf740edf2fb7fee311e 3046022100f391abe365793eef4dac9af3fd1fa87f6247611996c5825b617f12208360c506022100aa23ff8b6cd7b2f61327ab0f680ac50017c04f8fa1faf0ae645092d63979f419 30460221008d2fa9acb999eca540c60b90165190b1a4eb23644455b6ad14f8219ab7c462920221009fecaf9beba48f9c7434a2b74f9bd05d5b9a6445907494b1591e9c76366a8921 3046022100e9b2c25b502ac5eff383cdae005470e5dd2cfb9d9911a145ad4aaa6f64b1c75b022100c2df5ce378053da65e99a74f6210371eba93392144cb251389778ff6e69cc90a 3045022100f79245e07890464834aa7aab87460f2493cdb61e55e501b117c0ea49a4e197d6022020cda0ba1837b5d277203bb59e9edb42d9b8b37f4a48adeebc5b4c420c015968 30460221009b09ee72c2a1d9c406e347b2296d259ba4825017ae211ced909974816caa6c2902210084299bbbdf751d00d7168e4529d673ef9295f91a6305861e68506e630a7e4c70 3045022047a977f322607927a2b952bea18ad3d7055e592b132b93bb2b43d9ffc7b5755f022100c4bb6a601ec6564fb5ef697814a404f05f5e93f21352fe34ba50c572bd1ec989 3045022075603d2ff507fcd6f62a60a22b904db61dd6b2074adb760231cd88008e32ee8a022100fbda9df528ec4a7755b3a6fa45172939ece9d5b31bde0d618f4847cb71438c73 3044022053fcb298eaebb0d070355582e3c42ea6538a27c38d7dc9f952715d8423cbea9f0220268c57283c15175277e655ed255276395a15fcbbcb67b2341979adee72378492 30450221008f23c84c1dd3f23f2ae28468937f76488d7de227741e2b06a061283d5a1b151702203bc7ee9b644c49e35627d2c8362a806c1d10ce141f3331a2736e70fb22e23553 304402204408c35fb76802b446060adafdbf4516e45884e3abc38b5a3b520ad46475a82702205fe151098b242b35de906351b588cb8092aaa13905a93cff82567d570289f6c4 3046022100c202d1c4dc290703561d4e80393f428ffd8a5a3a480cb49b857e1a2f286f0ccd022100ebc5adeba43698e94aaae0a5eb0653d6737ab2a3e25205a40211a6a25aa02727 304402204931754b479d701c8aff1986f7295cb640bf65218890a38667c49add41bfda9102203a7b176fc8d49219e69f131756c1286dc9ae1da93e61b35166fe020791cb5d16 3043022000d32ed9c05e90375259c9a8f4d1a766cf8d17dacfa60c14968fbe0dfdac5d05021f2067550d3247a2a954f2141d0e1d3533292582e40671bc75ef0dbc8d0e81e8 304402202924408a35d24de96385a6faa1ec3a4ded39b2fef7829109d25bc32356dc14c002207ccc9c1b58f4b831ffab05871b37ce4727553ac04892cdc79fde0a604c28c844 304402205beab2440fe8271efd1f591e621d8d9bef24b943a3ef956c045ea0c1f8fa04340220621efd83aca34faaba40b349b3389b91c4e009fd1b286b514e1d57812e5bbe23 304502203b72a1228afcb56a36786acfdd18448c7b7f10f34f9aea6150ee1df18b92adb2022100dbab79d07af8ee3edd5bdd244024ce1da84f780244d52361a0ffd0ce100367b0 3046022100c7c100efad381d3f8c85a1dfa5cc0ffed125c7032c27b35b0e5f5933325d8727022100e64b4217f21439107b18b4c8808f2956b7d5fd0c8e5a62c46872705fc7ced932 3046022100896e28380faa227484c4b62d6468767ee12947104b5b3dadee9bf8b6497a2778022100ef865a2c0c0131527fc59a839729485cecd197f88dc641614a540631d0339181 30450221008f7ba156805f6d291f590deede00fe3ee4f7bcdf096a002d05e46cc2991f371e0220225693a80eab3cbe2ea86f619070d1d22d5b59466b96058333c4ea6aeab380eb 3044022003578a99cc2d30ff225be5dd976f80e2d5c93d71ed5aa20f77e66b931d134e7c02200a1a17cd70dee5750f340174e241549f55f84d7d3b73018df48213b5dd7a301b 3045022100ee6e6d8ad600452e21e6212929facd4728278189f85bfcff2517fe3fa1ed87c8022064127b79d7d52fc7105227dddd89b246f15dc8475e840473019bb0a1a991266e 3046022100db8bc0d858ae31d0614a3401f6b2f6a457a4084c9b7c62737f31cda84189a1c80221009c15eef4ec919e9b81068370aa50e1530aebaa6b4a0c200ba2078b60905d0b23 3046022100b2e62fe2d9ff4fec9c59be6e93d48f011f23853e61b3f9cd36286dc460920542022100f1281d84d672bb459291ac89fb7aacb88dc93c01b1b5344904c61b6b04d6fdd3 304502207a77a91c50bcf7c28e0f62534d0d7ba76e82fa96bc0dd4412c36893c2e32d56502210088864ab5a212ba03c99e1a1d6eee9846b000ef981cf3fb1a7f3a257ee5bd8acc 304502207cb25d19b7a0e06a703a1f46969a7d1dd73b6f7ada7485236afc65708653d2ec022100d25789ae3bc27702654b14fdd74f557a975f9ed777342853640e67de82cd7eef 304502204e17aa7e2ff29b87444bf39d8742e8b633d5da7860a44c661a732de811ea915e022100cc998bcdf9f1bddc750ac42595076cb1ad69477ad70680df48f141af03519fcc 304502210099dcae83445751315689651df7ec3a0ff8460049f622a01b3444f305ebb61d130220305045b355e17f1d1a2371613fc8397e0bcd8fd9dee803bddae6fab4f0bb5f09 3045022100bb649af47fbbbbda9998ee421794c0e7fa2ad7c653568a204ccddae0dc9546a8022056e75870d1606afa9bed6e716e5efece2358570bc999d486704f8fd170cf756f 3045022024545d471545c4b3bbca3d3eb3b199446986c566211e356d9ee83cfb08de9d05022100b0eb0decb7d25d09fb85b4f31334b3fcd36248ed8f9a5ba885c7dfcd06c61be0 30450220101f7d8dbd3e4c6b70c07acf487c15fd8767675487f8ae21217b1761714302d00221009095fdcacdb248ee7ae9289464be84445bf9b81338145173090d0e149138af06 30440220514bd5c3da9e4d80b7ba933e6749be86ad54b5663b7d5d16abaeca20bd5d4485022079c4baa251ea023ee7e2e8f3cd850688f26049ca98704adf81d9a79978916257 3045022100b6f66534cc75198dbc3b8e1ee921812e8c90214b435ca2a4c03f5cab5e46916902206996d0f5ac698f7f33180372097ef5ef34a6f1ccaca028c1fa5eb9e4c1f31aec 3045022100d04022fdaca47571eab325d0f507e8d7813da7d3f5f3d65ba6b1049b11f74d5c022040baa5c3f072f885921d714241ce3d277753f2ce14646be7a7317a1dbf388e06 30450221009e552b1aab860813e48ee7c1a7a9a6f1ee606d4c4f80444f383a35aea97e0b1102205670c5678f9575967c3ad1667b1630cc8775ed63a476ed1d7385b03c773b3407 304402204bca881c44b60be0b6c55f0d563274b656b6fb9c2db6da5983beb0c0c00d0d8202202d3f805b7b6bb8588ad3428ba38c79199368446b80c8b3ab3c6ce2fc03dda33f 30450221008440360cd86f1a5b6cdcc68ad26b8fe853c834a517a086670018ef9cd57e6f3f0220364270433c7fad269d7ee4e3e997fdafde04e6658c88fded31384b3b95d5af7a 30460221009895000ba9940ddaab87daf6992c92fdd0e3b76fc512a98099f20311246bc126022100e71a0eda4617ace28d4697919e2bcb85484a433f00a6043f549878c3f004f9ec 3045022013f1c056dda0759ff65ed19860c37bd5874b732ee9a21ddd3a0a03a422577c05022100d2cbc5384b71a5873888859838d2b72386203e4cf42f5b6a29d14aca3516601f 3046022100fb8f7da5e63b1415a3fcfb9c98384418f2f4fca63ce546ae75571d3016edfe66022100c07b2350ef14fc8ba1a417a863252b2d19942d4ad3c767fe2ff4a6564e74e298 3044022079246751cf3db69b8ba26dcc9fecebcb9e5cd72e9bf4d6cf40e541e3d4f7745d02206b6f5ff8ce1cfd72cc95b5c733d10926fd119bb494670b718dd3f6c8b9b17e42 30450221009a716c1b6087d1513eebd07bc040203c63412bd3fd4410113cb7ca0a173e861a02200adfcb1019e42d2e28f14f70e8c571f3320cb1f7273eb3326b2a9b42e316075c 304502207143cd44fb5b9e7eadc84646664a3a1a1e147a43b8d98d32ea71dd87c5abb40c022100fab88a182d551a3161ad5185c58aa174916c7590b259221c5ac4ee56aef1d7ae 304502210094b3d91e382e104f1809cb0abd3c3032d332389412bd24dff8a597968956d21d0220170f3605c0880997f874395779546837454eaf9264a64c25749a8e881ab36f41 304502200ddad2aa5888d8e114647830c4920296241b6f6fd443057f570f63fb04555840022100e17baec9b17ea21fcdba9ede5530a11f3e1473f7b90a5944f5429b8e33f2ea27 30460221009b9ec8ab6f41c07e96cb131e3501e5384b6ef0d4d4f769ea9a8fed782140639d0221008fa88097cabdbb7eb6273583494fbc6e792d553e2f393bb90e0cf60b53255da1 3046022100b05229f5947e42fde51f31fa09ae2bf8f6d8f80ba8d8983c5ff02ce87f7ac9330221009483b852b1f5c8064e75ad055e208fdff2cfec88e2f4dcb32bf78958f9ff789d 3044022054bcaa97b0a9198821dea27a37789d4aa368ed054d095eee8818f86c77c50a2b022000e005970af97ba996e0ba7bea69ceb3bc948616777fcf54114b246971958695 30450220048f8bd66c19a341bab404f449d1a999ba13937a8171ca191e1795a46c7d6783022100f32f6d6040690c56582f42d351e0cb08c5dc7814041099c88cc18d329410bad2 30450220409b6903025971d4e616cec6c1fba3b6b6bdcddd64f43cdc3bcf0ed004a4a320022100c9d7fab3cfa235ba3deee44111d4024c30a2768dde0c00c059a98a56b20cd4a0 30450221009feeefa96a1615dcd3e2cb4c13461838c3de29e9514cdfeaea12373df4ec391702201df191256c48293510991a1d466f083e0535a15e75d132a341ba1ba34e7ef657 304402204d39dd619b6acb8809553aa6617d4a520526a8cc4ab5b281748cf6e7e3cd743502206df678256df65b351d253d2dcf93feddc98dd72586f68c2e4ab3f2fb18113a31 3046022100d1bfb85588db3184b1f0b795340e0747141b948c296f74e07009ab72f39d46b2022100c33678fe7097fd82d80477788cbf38b0b43beb0742cfd10c56e2a635f04dbe6b 3044022005e715812b944416463ffeb05196d6661fe25ff3ad79ff817e729f86cdfa378c0220674e324134a6c339a74370b57b7863945d7783fd52ad67d15b1071ff4371e087 30450220317ae5b440bb60de056dc7dc26f6e10a7d646a6b8593699853401396a92ec957022100d6a986d452f4be06d34761871fbd528a45f6a0305066c756b18ab565f5f1896f 304502202f8b27c599c858e31586f70fda012e458af1bcec9db7bf9312b020ea36b64889022100f480a7387e5343aaf702bf1a6c41ffc9651dd521e286c91e38ccd3b1a86bd23b 3045022068471d55814dc2fda617bd99ed25ad47ec307e12308dfa2159908ce7692d0b61022100fc1f57827c9f94d240e7cc9fb312b6667143afdd40d0f389abfcb5197c2206e6 3045022100f63b56714e85ae4a51e0e4c825a89b6db3c4ddb461e3e9b9685645891520b126022039f0d6fc64c983e677e77f033911d5212deca10f5d60c770ee646237b65648bd 304502205f647dc0739dad0e6d4bee0cb21820f3f644b751300b223881f15b8b78b2aa8d022100b3bd7939edaf9f2b6c302df1a06f8f07b86039aa95a5172675357e47666bed88 304402204dd820faafc800b6d75373fb43760a96105fbdcd24db1895471b3c1ebc68ce56022066f685a6130bd9fa287e4f0aaa562b56045b17ca7ea166e172684278e9b75050 304402204409787ff77c035137726b598788ffc196d5a512125bee01ef9d5748446d9a79022015a4d3737ff0d707b100ed3e448cafe229f9bbc261c6e7b8e88f72b68d687992 3044022031bfd9c4c95b04be65b6af74336f03b4b8768aa92f5267119aa9d6c8860abe8a0220615f7bf617c304936fde7ff05e0244ed8a362c27ee576325ec10272fdc02e80f 3046022100ae1f2209e5c34f01d528054122684232f642494011f64d2fc57e2a0c1dc5b861022100f55b3da23de3ee5543f431822f2a6526290300547a4c49f782b75843ab44651d 304402204aed76a97652eb43eab92fc556929d65f715d904862ade475f2a7ebe5173c49b02206b69615542136090d6ff7f9de6496f674069ad8db507676bd349c3ecbc803e84 304402201901c1c90aab3720ba6230fc90dd8ba28ce504cf8a2cbcae48947c3094abfd720220585333508510ee95aeb9ec21e4f33def199aae0aadc2c1958e7012508027c20f 3044022064bbdd35892a3c3e9f79966efb30f7d055015d633edd9e70940fa82df987b07f02201d6283b91dfac64dd92edf5e50c355b54ecb21d76c122d13ccdd99d1a27c727a 30450220300ff437339d3d15fdc320439fd5d68a4d49fa58230571d2374f3ed9457105e8022100a481289b7562ebca3500f971b6ce0a58d9d96db968a7d40ea9fdad5bf37db74c 3046022100c6a0a92904efb7168ee1ce805368264b2dfeb6eb8ba22f37f6d1ce8a3b5e2c61022100d594ddb8cfd515589b6e39522b21cc9ae13495879bdbfe33add679b0491bcbd9 3046022100b4e9be421d92ceca87e3e414d2e412d15f35bfafa038aba558a8c91aad4b26630221008192b90dfe8c6c1411f2b33dde26f6b00fa4762cecc462127ce07a869f4f2d62 3045022100dbfcb4bf2a46069f722fa970f5019115449449235994a1d85a378f001301cff4022003cb98e53001bb67fd19e45e606af9b5ee392b9652806ea49539bbfead5f4d99 3045022100c207be7ec66584867ae495dfb18a4fcf01d5f112b52679bb308bf8257e71d0f602203da967944499046edcc7dd7463a72fc5d78b6e546a186ff59e093f3a4a606acd 3045022100b2751149079671a8a736c92aef2348fd10a4b10c84a613b433911451e451bb63022041198c26f9f915c67656b97333d4a1d91ec0bfa9fcc790a9f5e51d967ab68968 3046022100c09e3b44d1fb8339f91d55250cd5fbfae9537d3dbf10fef878261b6e5871844a022100f032d7cec8c3d1192cb08c723fa9b695b539ac5743739ccd832305828dbfd57f 3045022100c09840294e094a18e1a04183a7d955675993e2fcdb795ed2ea27dc8e861c8e3402205622fb8a41b83b0fb25f9286fa455c47c511c405530d497e28271be3f0bf7299 3046022100fc83af86819ce653355f8c949455d570f1eb17e2a874aab04ae5bfcf8b034c07022100dd369b603d2be54ac3dd6a26d820f2916bbc43f2c52b3721ee8979c384a8b3fb 304402205a150209d2eb3830d9d3a0ee3313f8190f6b0cc27d9b1104a95e9a76db0130fc0220380d495aecf2bb4b771f9654cf4d69a5ca694990437b3cd50cd748ef4cc785bd 3045022100f68428b404d93569514b20be94be8416a782fd5822ceb288eb727a03f85483e90220400109c377da655ed7fd7afd63fb1a0ad8900b02470059f842044f0c3ac6b1a5 304402202d4aee555775a7de0703a1d79aa3fb4b5f158bf03f69f49f1301c82f57b3a49702205df41e9172067f822c87adddc8550b9120449abab816c6c1b3ce6202894df09c 30440220580bedbfb7f810efe9032d5ba9d26caab4bd6fc8ba6a33b7fd15778daec3299f0220314fa969cf2bca9cccbcc3d9c193d4562527dd08ee9ae4eb77ef77f15e8b4e34 3045022100f53f13d7a858eb405a113e1dc1d3f44de182baead9fc6f6b145323c0f1a0b8e7022039a421f21dffc5a563f8c88f86d94c8418b5c373fe03bcf15429ec9964bb9e56 30450221008222878f996446354ab74da7cf497ef3b01a7706b25a5c6b2e71d3ee2b0a3e08022068385b200d3f697a35a433c19600b99a703f23785aac9f178a17221f3fc7dece 3044022040b22caac8a37f979da8138107dcd03233c0985db86caed73322d09102d5943802202724ea21dd05f5a5d3591601a73fedc2cc5d1eb63bb07c0b9fbd2f923fd57bd3 304502207ea66e9c0b5b8fbc0f364d2d2056e32998007139bd4d3cd9955bb28b5ed59cf70221008611ce74fb00623d4e1eaeb24321e04f1c17a3bcd7e151ee38fbdc4e86c0c85d 3045022100cf8f635836ac487ba4c752120598e9d3879a504905a281686b51d28eaad438cc02200d433a77225e0fab3928c92a4da2fb98eda290019acbe3bd6b48cd9357cd2e38 304402202548e641d0a4589a1c770add5857f55f5b214f06d250225312045ffd0e4021d602200c70cf93ca797d446b43aba83f783f6abfbf6f10e6501320bec9a0290c59d321 304502201c75dcfa881717ac153fec4210e90b9e5375235cd97736300fba3e84d45af858022100980f498b3af44d89ac690c561e26223eef96f07692f19e06af2e7d063609e1f8 304502203ae8ced3bfd0f6a9d839436b339c1558198ee8fe59a596071e09024e6bd0eb86022100f097f5324c53b6a8a54f3a6e32d5855fe991c183fcd5e53f97ea486c1f234077 3044022063741eff03004109b76e3ccea98b814afd437120ff784e43d873437f7abad3b00220626778c1baadfa073048575406e369509a9da64176904ab9c79278ec71fc4e00 30450220190519715283a45dbbbe9b78705fbfbf91af0423ca6055ef5fa3db738838821f022100f4acb0d4ba6ddde1a3a3a9a5a96885e1b3bb9dc629f6a2f77883f15d5e30b9be 304502204c2806301ef46e97eb0c99261a4dd41ff84d8e432f627c4db80abd83a2c02c7a022100cfe27a50f01a173c4913e9b87eb4d1717fe3c0ca7c0cecebd65d3cb06a9f004e 3045022100eaf3521ad53f8c7bfc2466e098f86ec830c7d0add297df7be84adc03a2795e19022069b82a26cd94cc619ea6018d69d86b67c24325efe639e9d434a51c1220331c92 304502203740005d5fa22624d5e8c6e8cf39ce795013c4cd117a1ae36b340b3d85d64588022100db7b5da601b4432663f1318f16e33a29fee5b71ea3518c1e3ddf4daa5bd6e0ed 304402206d187075b2c3517e0348046f3297768110cdd02294cd1fb904e961ce094e64300220483f2df431ad3c678ff2098e6ab00b641adfd52fbc829a311391ec8e86207e3a 3045022100f42103c90e90ac3392f31045c4b47974b3c3d2161e9f274e449599fe3e18270102207aeb51e82c692a9cbd5927e32f578fbb2dc0faebfc0f88d4e50bb9d9539f2314 304502207a18416a778c6f65cc696d4b34f029ded51d57ab1f5bb65268755d2ed69a4fbf022100ce414ad5ef4519c5337e05792474da2334effbe5025a4b29c12971e95103ed1d 30450220469316116e5f718a0eb570eb2b0f377fb2f0f1d07726019646b15eee20d3b14d022100ff9188a0beb2390738c6e139874362db558231f2fba0a6836957c16a27eb868c 3045022100d7bcce122b3e1412bf60a635b0767f049fd6fbc0aba1e782e3c1917541746e0602203048a13872237c1e6228be53787c57b394d708692a24f93168a92ab1ca28c346 3046022100f655ed9734f1104d27d76d0d123ebb94374fae0c2fd5e674b243ee8ad3c3b054022100cdf4690cb01b09bab0dedcdeb11709cffa8f86fa9a34789db1bf2ba66c5ed99c 304502210097de0daf61299dc76990469d0e1cd186b3ab13dd709c36c7b99767d1df87455d02204a5c1517c7e8e26ad1e26b63ccfaf4f43551f58bf1877fee8997336f95063776 3045022019304ede9ebac5fbeb86de27002cd37c1fc33f7b79b343b290e88e5e7ccc962d022100ce68bf15eb6aa2e101f29444d18c6d71cf420809c993b2b687e1f114538eef45 3045022100ba24968902806b420537622d410ca97392efef28efb849e8b92df87993efa042022055d9a8ba7c463ccc64e8a9bf3a1530da2ae75111a4ae27f19d850f3246ab2f0a 3046022100ade9d1aea3fb654a359e6b41806c7052d539932439a927ac68d689cba6fb07de022100b8c13a669505ffd54d5746614fd96e0f266f16b9987bb002bda476a43469317d 3046022100eecc315b7d2629542c9d97a1a8e883f1bafe463c7165517414d2eaa3ca2ca15202210081d70f566ee58c3bbf2ae93cadb3c52bc544e7a9cbec431f9423b1f28e210dc6 3045022100ce251fa24215cc9855ffa3aedf0583981dfe80d74ee99d04baa9f703948934fb02201029546a2ab2291cb5bc2220852d0d4f3fd390ba90a1066d2a5d4cfd7b55ff48 304402204063c5857ee097db51244f0188841ab4405214fe7f12a874ac88d69bf0b2b4300220794034e33748f8bc80ca0cf4885319210cf36889197f1e7c4c0d26f10aa454f1 3046022100fff9d1bad0ce07d6d6f6ae15f03aa9dfd55ca474ab08bd144cd1e510376f7693022100f99282f6b55007064cd602cddd0dc4d4076b01ff3761fd1c57cedc950fd980d8 30440220094be3c2dae5a6ad478856b19b7328a384f6d93772036fbe5a302d9178e2d33d0220513dbda6d575a6200c7522cc188aee7fee5eec25925cf3dd5270cd1ae4424100 304402203440eb1f6e45a7103f152001bf856011a976962d9ba6a7d3636ba33b53cb5429022048c3038fdfd7a6a6bcdf494246275553546b4766f172fd8104fbaaefa644da8b 3046022100f0545d7c9e0d08acb06a5be39db05df515fdd6508b8cda40c9fcf3bb0a6ad629022100d1671f962f7dceb088891ff6842f092dd4a0e95333b5b139b34c8378cfedf09d 3045022029060708cecd623294885b73f10c3a2ea882367530f04de53763f56bdc41b7ce022100a9c8aebbe1159e138e4897f71531e20045f079c3b3de1feaa17d68bb4f9751eb 3045022061c43c53449a24de0df9a7137ec4a57bc98870a077df9547fe36f3d9128859f6022100ad5d6d64109045ad8b7333c2556bbe97f1fc51cad58fb9cb2bd15de54eb1f5be 304502203075d7ddbd33b9d0a39408a5c2285cf5a02109e06319295699853e21928939e80221008872772ec4e8d1b90b77498e63d96ff4b472266036b64c4bda683a41b37c7ade 30460221009407026f2005ce83365b250a79ae16ccff4e75d281313d324b78f6c459ccff75022100e9f52d05bf37c6494b5490fcc637002c6508631a04e90f6b2d438d63f2167a8a 3044022001f7c70299f48e99fcfb56e513d4a7ca6f474e8702300b4a7999acca5a469ae202207bf33b13637df3e7614dabeb90d75ef2822f8bb7bdac40aa8673ed82644f4f77 3044022005b25caa43787d015745154042bb24adb0fd780f6e9f06c2fb36bc8dc09b93fa0220444050061b71c3546365a9035c558241d4d941b05f53c65f07972367db519d00 3045022053860106ec3062f595a6e1ba38a59974a4e8105ce352e773ed1991553084ceb90221008fc90fc505231e8aeac4356cf45cf7284c52443250877259ef92c78b5f00c8b2 3045022100c837b92d672413a59f56810960687bb5218ec2a731c8f7685b7f08fcb704256e0220672d090aeff0f91df74835f08d399b451b097bc0ef65c7ddd3e85e8c73347888 3045022100ac69d0f13f833180b33071e6378e1813865c8af74586c665593efddfdaab61f9022010fb18ec3e4d721549ae6d3ea93a001dd0ef350cf13378456dd1f154bfab988d 3046022100e31022f587534b43e0321620614023ac2c727a3bdde9dcb5b70fb31325d9e331022100b8cb4330668a5f7d2f6c04ff1534d513f5608867248b093f182c0027da006dd7 304402207ddcb112d823bd41679b879c02a8e93bac76ae7e1ace535fea71bfe1be17d9f7022048189728d314d435f6ccd599511f8ff5ae85a4b1aa0db83fb5be916690642b8c 304402207ab8475fa202e0fa7de0c6e1cd5fa3566daca5f4c2f35c53c1bd43d361fa2e4402201d534d93bfb5d2a639882c6bec7ec2a841caea780f89dd53f624040acbeee9bf 304502210093fe711de31d8c9bbdc6cf57adf8f9d69e05deb9a320a0f7196b948003c2e505022020731a57796233c70b491a1646c869205dba56c7536dd8d4dcdb850e6482f300 304502202b377dbbba5de78963904b2c73178aa991e6998ffbb404b0d62e92466cb7019b022100eae1eaa9ccb31addb7ba0e39ca9294268605466b7ebffc5f4238067a506038e8 304502206628b597709ae8807e0fcc039035ecdfaadfe58832fdfb22c721ef533337a546022100fa3ec0392a545cab674199e6b6f6e98de65824a460fc9d82b308d3352f77ddb5 30460221008b5cab590aa29dbe4265cb60ed5fbc3921a304f40e9787f5efed42d4c96b1f4d022100e7977e61548c4c4b3affa43ce2eb4d104cd491c86e641fc3dd7e6c770588744c 3045022067d8e12c7afb9f0979607b8a1868a9fc933d6ef9ad5189f4a3660c75cec96d30022100a69bd4b6d5abad3566ce517f30a380ef089b134103b6418c1fb6ea9eb18b29f7 3046022100edf55c824fb1e1119778e05b0fdb2de5f03d4269fd84224629453b581a7f8055022100c27b7ba0976bb675da65fbab7de732bd4c47fd9d609f407593de0379d7fc8dbe 3045022069e731942c1502079285b8d7c3109fdc0f6044d868399aec64cd176246ba24c102210085805c5ebb00f33adbc2e5aa9d404c8274dcf724277967cc05a148ceb7def623 3045022015cb94240fbb2d2bf6ef2b769818e5488a13e8fd1c5a22ea12e80a7b77b03d80022100dc4bc2d90ecafaaaf17b5881d354cb17b4983ff91c3dc8b6802610cc9362da50 3045022100cb49f95d2fd63b1dd3b99ecf27ab46e2038e38ebb83982759ee4a75fb482f5d802205efdeec3c45f10fa96d3e29c46c0fa9ef5d6fcc851bd1aaa9ebcc953e3e2417f 304402204bf70ddb621f8115478985261610bffd6dedaf95975fe38a8249d9f5e1c615b902207d0ea58d2538aea7edd45b4bfef120b027af716a51adcc51f56de71c03a2345a 304402206d32201f34c550733ebfe5224a18c5fbae087c498f6eb0ef9e6f97e9b64e74dd02202f9ee23a7a963b102d40f4b62e176eac56eecbd6c93449d5de984ee0700a24bc 30450220574030b0e568089dcff39bb8626608f8b8a24568f7d0bb812de0307a107dd48702210094046b81c24f041a8461af573cc8d4f36831181285becc2922aeaf46056bed1d 304402202cbc0326ce78ff5f94e00aca8c6a77f7cb321959e966b13326433a210691459b02207b5635430a25cade18e237978d476f5ddac2a068d34d15f06d8d8c6313ec70e5 3045022100957856dd197c3a43a6e82c4831eca4d0b245e6fa72b7528003c5fa8b3895838502204b56208e165dce3f4650caaccc5fdb1f846112544df8cca9ca65c9c9021c4647 3045022100e0dfcf48a51b512ff7eb24d50acae0e05db5d668ea7cddf62b43c59c2a95baba022030278022230bac57743615c270f93f6c06b045e6f2da7f3dcf7b70cd0245958e 3045022100ca96199f06b2cee1c2ecc628df270317778f8c961e535fc7aefeba2ef70552be02203020a4abe458c0259de5bd99d63743635dcf524ba6c28b149c31ffaed882ee6b 3046022100b2fb8550f9b0cbe87761061243e46d8f920cfe17151912e480a3d9de9669adfd02210092383f4b7f2f5f06f52b987627e1771804d1c511018a31f070fde9e85e8a6849 3045022100ce8126058aa40ae585d649e16918b30912e3d6aabd56df91779abd69ef73eba702202902bf0ed62c38a8f0d871d901dd23e0ab6e9d42fc56e319f8a2acb4cd8568ba 304502200be759083e2064fc682daa888cc0c163a551ebd8008e34277f11be30715f764f022100aa14b402c308f62c065aa5fcea75b9ee4f39753d52dc927c01b205587a340a9c 3045022100cd04e9078673b3260b6ea99f756106b4ec68b61fdb29cf5a75e5c3b084d2fb3402206655efbd5234bc1124742ac67017f17b2142ebf95ffcc8d29f223206ff6905ec 304602210085a5496d1c94945fe6dba7555db0bf0f3aa79a21ebfb2449a16ff78fbbf21ac5022100ddd12804ae3d8d1cb655f98a6ec555b13e37ac1c06bb842a6346f7aca2827c14 3046022100be301880f1759881443270beb7d6947cfd2cb4ac320dc86050d56b963b92b0f6022100a2bc41d8c597c45a20b01da06596a1d672e3f5823b5b9b2ce091d7aa44a4367d 304502202875697ec2eba13bd7f7fd1ee8da485667c4141f9999b2baa3d966c1fcc904f1022100e1d7d486db3a574da833b9c488f38158e289a79fc5527ecfdcc65fbfe07a9092 304502201c75840f336a4077bc94a328ea55b11bb0d10dce3b7d93e15237f8f9047d68530221009fe2cb91e42ae5bdac9af0dbd931b654a89a4649234ed2213a74eac65d0133f8 30450221008443de9bd8585df666c98b62e1eab287d5b13c7b3c264f9f1572ecb6a05f649e02201e61b5a69bbbb763861f7a3edf599a303013573b4fc8d6cf60a69927ac210bd3 3046022100e0fed9739b025d9683cb172a8cfc491575113bd72977f414b0c2afaf88dd8f77022100b3166456eff065c19dbe31bef30e6705aa8db65559045b989585410a75670062 3046022100c8a65e294972c813ddfb09d66d53374cb5d9972a2d6e71b53885cb12b36c88e2022100f8e43ee5fe4f5e04d7ffe1e60a869b1d3f53dddbc6b75aa82d97819205e753b5 3044022053c5bf37c59d81e57ea0e0d4f438041388e864025f1124f9f7ec41af3beda38602205edb5d37d62afa634cd0085b169e32ea4ba6f7371f3f81af4a4d4fb6f353e89c 304402201ba2defae962123b880a2f2547b5543e1d62c43e8c6b5fb80096d5abe454d6fd0220727434655b64f82e0a6bac1afe78e2be532ae2df2f043427fcd15a3db256232e 3045022100a2f479dce878517075e53889ed6baca1d5f4a1051a84fb7e3a5a36ab36a1a354022018e98319c004f08a52261f3ae934a4f43bfa71c1c56e210587d031c00741d99c 304402203de23048b981ab71d71fd7c8d819f1233c9790209e149166d034ac14c8db168402203d192ddf5da937d22d4bd977bc8119bb62e3a31fc58920f3d43dc34c06696d4c 30450220575a3547fed9d88aa55083bd633d01f28853491ef77163b2a2a8a0f9e649be1a0221009765f4fe6f8a1291c8c232ba7c57bcdbdfeeb7410a8df8efab0a3580419bf11f 3046022100da6bc211eae4050aeff8568c9848ab1b7b91cdb897aaaed5f350ffd1395bc0a50221009060572bc018025f8f14de477541479d29f89f72cbcceb08aedf8c48847f5eb6 30440220261af10d24bfd24991050628acac432332c40b276cf296302cf87f7b82e7539e0220329239b082be72f1c9664110d7704216f8ecfd4d5f5eeca97beede789142e158 304502204a4f7bf109bd9995558f38f8b5baccfec64b33388296f2a5bc1c712dd9bfedf102210093e938511c133afd59cd08d72590b9570917d10920527c442214c7f66948680c 3046022100e58bfd899ed5edba560a7196d342f6da805093d2d5d74c52ab86ee91b8644cf3022100936b0e0a216f9c2923cfb72ab5d7d2096f31f1e0c5930088395d74887668c5c5 3046022100e9e5238e8d09724d79923491a234d987a007eb64d7b02b4a396c66fb486a5a4f022100a39d6774af47c1a29fe8f3f9f73f8125cfa6e41eee8f9cf232df7a698ab06bbe 3045022100c0fc4f14c8716b394f4bee717037ded5379706c89638a5b89fe73144bf00358502206ecd3bc522f49c0c0c7791d30eb41c4a99a7eed845e587e3ede8ae09500ec566 3045022064acc3a6d0c29ae6eea40db920526bcee6cd9fe9a90e364180d5d6607e624ba4022100a423203bae3c7de77e9099995c87e53b798551ee5dd3dd327a5d69d67744d661 30450220754acd19a8ef495c977de3eabc20ace3530cdcca7fdabee4abca10a745955bee022100b96714466ad93f6cd1d7eed7f7a4ae6af7351edaabecb29d84d20634af461511 3045022100dca7eab328aa2a0aee592bb2b7abe8da5c79e9350c2510ad353298e6131b0fec022004c8f37508a1a4bcf877005857eb0ebd42f624c4a9a64e7c1324bdea837e20b7 3044022016b5ebc7dccdc289cf2939b829f45bf737759d74e3543cb7e87ac9bd849d488e0220720012f1a64d38d614610ca67d0093a2c685f4c2879f1246de0e2beae6296b4e 3044022037d2b20b53fcec7e0c958f60801d2ae5d4f59fe1165419f2dbd9aa98f65fe345022073d8f71000a22051968befa40707aa2d8b75d3a6c4cc288c11623ae59c71cbc7 3045022100c33fe73f1628397a7cfbace49582258f6c79c4a3bc8935913e151b85b2a0af35022042492f95440e44aba636d498e5012a4d11a3edd47e0aebd943058a25548ff523 30450221008e2b1496bf03ee80d38212f212354b74fafd9ad5774e31696ee70ae391e3a43902204f9266aae26214689b4e2cd3f0d498d30e3041d88076e6d48838d33157d2159b 3045022100dd5664a10a63a91a05eba1cdd9974b789e734d24bad74dff37488cceb3188d6c022031bfa4b23420259dbcc1bdec376affbaa6b66965a3e53782630b12e6af715617 30450221008b264d236bf5980acd7ccf5f14c78650deada6f28d01d4adc586d6291c0a94640220206b567443368c943565b2bc35e8c97d0713db6b95363d8413fa5ef6fbff064f 3044022061af3e9af203bcd448701898084ecae17e8ed5ad15532822e6d584cc5859a0350220193aa6363b9be2291512a22a9119f97753899ca81224e1430009ec904bdd4d6a 3045022024741bdae1ac081532d79dcd740e98542c33c87324cf4457556bfbda982345f2022100c48a3819447508cd9a0fd0d3501239716019daf5ef6fc71b06802d9e34c269a8 3045022053a0e70cb491d7efe868d8b467c206ba437c4e1837ea178199b24967b5a51bf0022100b19620dec7b80994bc9e6738d6d8e7d9d852b70417b3bdb5966b51268b3af4dd 3044022044ce5e8f88dcc851abb6b12fde004fcfd0583b9f183876c0af7eb8c0eef177c4022006f812103074ed0bde9b4c250724c714b5f36868685ba02e913d195ec7ea750c 304502207d8308dfabf52b68e20c39fb12ea53f366a623bb03d5265565948a390e0b0fc10221009f162ff66d202663254d4fe24e733f9a19a3e9ef5a401e30223274701c776a41 3045022100adb79eb84a3b485991660bf4d52b064cf52c7d6c6f4c515afba76b21da8058e50220739f81011a3201dca51ef6fb4a0221db357daba07790bd6410fde4e1443f8588 304502202a1746c160d6ca7eb69b0892947f1c257fb5beb48f0ff76d508a18658fb19a7a022100daa78cb92be2118cdfd5ca32ba65a74746564ea24752c638f1014eb499634d82 3046022100954ac9cedc7321162f77f65b876947f268d66f26591421ad1f5a1bee122c1c14022100f888cc0f3b646ffe9c36d68c0b8ef77585e7ecc71a44331af60266062b9b80a9 3045022003340ef101c1244ec576a41b31f029b510344f86e2cebcfd5c921e8a9eeb6c22022100f2990d025d17b30f7bfcbbb877e52a3fd196ae7c1f417a22a0b164c09d26dc52 30450220183850da6f5f64d3108f72e91abf2dc059ccfe66a0b4013449f4e01b1b78feac022100f476d8c773c9422b89a485ee09e0c5ab7462b20027c75154ace50b65d5e3d7fb 304502204c37ef9f04ad3d42ad95cd593a49922b9a6572ea2a131a4ae755a1edd61f242d022100ad70f836d15e64ad3bb51153b95f8abaf57c9c1195e626bab67d365c9cf374ed 3045022100d55b518b74684f75a89019b0d4694582edd4f095885b9aac5908d8cc0a3fe04f022039c6057fca25b809f10417a1051ba44c9a8d1b5bf4e67198eb090efcc0719a9b 3044022013d5855173099b218ca8e83c92b03811e9766e4a06fe76388ab3bdc1f09b041e02203cbaa6a3cb5d25c9d429fdac7133aa4183d47cde3e2c7712bd08a2130562e2fd 3046022100dd137179b3c66fea8bd7f6e996ed5dbfd606e457401426ee0fdff8a628e21b97022100e1d286b36f2c5dec28d3831714a6025835934ebf8edd491f63eed8f3fa2b9516 3046022100d7a65d20b5cba858e3582e72fdb291a5f9514c3d91b5aea113b4cdfda6b35167022100b47c983a4d32faabdcd032644e1b6892de3fdaa393da79fb410a91c24777847a 3045022100ce6cfce30cc8649f906c0f2a25f30936d704db294964db89eb6135e13bde27c3022016901435ac1bf29d93c012403aa86c75786d289bd159e0cf113dd0f4fafe1ecf 3045022077c7c3d3a79898ba1566a5e68783c6932a3d11013ce0ef4197848ad608dd46f202210099607202afb530696b05d410429614e2afd96c7a489f2d7fbbf12bc20b418554 3046022100f18d9babbe28d31b97bd1c1f25546ed39ab2775a9b4148417f2597c4389ac484022100bfa3d7d75dfebf271a5e031c491d8b7ca2ae05345be175b4fb82b001a6eb7879 30450221008c6a768e194770841424a7fdfba6fcd31251bf710ec6f01b01b3bd84038fa8e402205fcd19962d95e45c23fb0711a419345eba465f245d372b4c7c3104d7f8321b44 3046022100b0b61565ae89f2b025c28907be1e229c1f47afa93ce48a0e09282582d2a58fb2022100e88c27661cd51a1a71b4a21f8e14c748d99ed75bf63bcea3645e0507ec59d2b8 3045022100ddeb6d52637e73f74dd6af92c58b886bbbe97a18a30f7341e2adf2a68e9dbd6502206da922fe0042856832455fb3cfade42c3be01a85039ffc31a95f14d0f192560c 3044022048c88abe31a5827cabd9b5444bc7b0540f5154b03a2106482b562f42fe3e98d8022024447814e18475b911b01899305c537e2fcdd2ed621c2413c9c11530926e1ffe 3045022100f5c34cd3c658bab35a979c048b70f71733f14ec4ceb74ad91231c0f3cdb0c1db0220303a95f4bdb30eae764347b94a7c4f837d6e62f911d3b5dc666f15670f6842e0 304402204ff608776018e93692338d4f59f22f77edbf23bc61f63df9c79f45394b22c63a022058846b25da6a74ffee7c52a0b9bc593e1ba39a377c5f7ec428d833cec7894c9b 304502210084254c178dca55838dc2bda966ed4783013d89006d67febf56c7701cbc455aef0220642791c3f576542bf6d80e1d51706d55938f8d3cfad1fd3db1589b9ef3fe00b6 304502207f12d7a620d91f1cf723bb2b0326e1270af9f463914b4464c5d0e95dca0388c4022100cb883b6a2bd70158a5b52b4ec5ca43e51406e9e177dad590c2164553dcbfefad 3045022100909ba5cc4a8e97b7cfeaf9caeb99106f96a65887ae2cfbb18c4516cff635597402206881bf4d8549f9de76246225d814fb750fbe8f086aedc36670400f46237684ef 304502205759b6c43c087ef621f0f71446e84b29985c1bd5d5370832d55da79b2f9a135202210093da16ee5b0788017e9d6ee30be85d9785fc4304fead8a85a77a41c149fbb438 3045022100e025c848519c258c95e8add30e60774bf8926cf26076a7f23e7bcb56a225ddd602203127dd2d2a24fea2f72e1cb8df998b44f8dc5c4905bb5b4b459b1a4e814b97b4 304502200d1b52fb77c3473b9da72af0c0bdf3703c94316e5a76c71e773f3524ca157333022100eb021340dc994aff21c3f8a0ac02b941a3e49f6e4c73d05150a61ddc2f9f4346 304402204eb57b6b0cf347bea736c80d0ee9e9c59453eeb3153f5e68e3d5eafd68a92d7702205aac26aef63d6739de8420feb1914f3cc850ee060da5712192bf657e9bc50f8b 304502200d62431edd56451920c97fce5325b12deda2b6026eae9690d58810a79a3f5e81022100c1b8eb528189c8e8b67c96face51476911037c5873cff6aad6155e57f23008ce 3044022031658a5081832cb917259322c555c87a20cd741b840a5f1efdd4ac3d2c751cc5022031bbfd91a036937d6648f0e675ac6b335e15504811c4b2d6ae5925a141dc53de 30450220707cd4a13e442c2f15ecce8101063edd02af6f46d35b244b64f70d69534e4ed3022100aa2b364e146f95b2f7354f4332901b39a26b4d0ddadbe4ea6de1cddf68b72697 3046022100d2fdea24579b544be05b89507adb57abdabefe64500cc155eb266a278451a49a0221009833dab10c71c9359d6f1fd95042c740d4a3b097b20263680e98ffe5f1a97e42 3045022064eed57035fcc7a5cb026afcd83bc8476d0924b36e5fe7b1b98017f5dea81417022100ac63974e81f5472d35d356b6f58272a78a633e97bd60032f72e88fa395269d20 3046022100d26fd1d41c438d9d04fe8db69595709c5c9ba36d021868e466201d666be7f104022100e5b57986e3d4729617eff793caa2ec16af4d6759acea9f3f7feb35eb5a8426c7 3046022100e9c66b87fb0b54c8189dd9b4dc9a7d506cad12f9b053d2b0b2894e8aabe26ab8022100f75813db390d15f528e15fac979e96988fcf0e62d67beb4574de342fe45f1164 3045022026a571203a8489722cf3213096215d9aad6fac2f324b237f09265136499bf4be022100977989c0dea56ad0d89db187eb63989c850db614dca0691941ee28455f9d30e6 3045022021a43be278de88b518adfb00f224901a0e7ad3750ae151853d9112eba6be3ace022100e90b8569bdcb3d0082bc972d162c7d32161efcf1865bae5360615b532afb90de 304402203e78bbcb0c235edac8cd386982895759c48cda445b73ef0e858cfbdf865d42e402201d02dd4158e5d7981fede6a1f2efb650346f8d1a1caba0e6d16df6667875d5c4 30460221009e86ad47bde083fc3261c925168463b9cd0830b31dccf035f30fd97b69b115b6022100c63165d86e86f3e5ba494ed56e467d1313ba6cfa2250384c23bbcb39525a52b7 3046022100d3fce978cf322aeb66eff326c0aba941dc04810f0167a8dce41dfc0e137c780b02210098018b2fefc04fe065ae9df79b14c92ebf5c15fc04acfb346ebb741d3ba0cc8e 3045022100e850dac129786d5973b12c3d61a3c9492ae7a4849a7570acad1587e4a9e5a5f202205f73ded48a76b67bf0109483d225c747af329ac717b16cba325c40d730300090 3046022100b6cf4fc6e549f29097a1c23ba84a8739b895a5ffe75d16f68b571b074dc5bb01022100ae3752b4b15338ea4e7afd1814021350f2823c7d26a76a99525e74c1e8c74e64 30440220273d918113acb666cffdcddc63e89f34e277780994755a30a97f70f1a4e9341e022006e84a687befa50b8af9b82bf57deee0b410e949067b8cf89ad430e35e83ab9d 304402204684561d91d25fc8f25d812526db2755a1f140ea140acbb1f44129d40b5eaeda02200f1d9d04ead54d960054d9ea60a3400b9591d3959e552ab7491f68f312bce4a1 3045022074e108eea6dfd9f3a6c0f56f6ecb80bc132ace893813dbd043d294bbba050e080221008aa1ff121b5ec774d88200f1b736110c630c9b8fd41fd44efdb50fb4c3ed8491 3044022053d224c0ae5383094b9dc71f3ea6d829f20cc1674993b84bec681dc395cec0d0022073940995a624d2033c9a5697b98184dd9bc360f514db9182e4e22a4024bb7d79 304402201c38e51806daedcd7743da0f68a67dea4ade6c037e4126dc6bee3b1d693e02f102204a8000def60ae8e8e1c0fe95011b4548a29e03a311870ce36528609116c717cc 3046022100f1c8af6ff96d02e6894f2290acea76226ae598b806164ccdad8e9d4008e3eddf022100e44a461406c32474b2ac7531eabd8254b7306ba2ddb4b9040cbe17155c615ce1 3045022100fb3577e2ff48b5261cc1645ccb5060d5ed50f822565cbd4efa7eb7229d32a297022010fd4b3eef1a3395199a71edda5c7669855867a8820e3dac8716f701d42db4c4 3044022074ad9669ffeae67b408549180bccef50d8bc6d5b21e5143978dfad36cace8244022017c7e54b754c3e46f773f70777ee7b6f5f1df04ac792f4684ce946492b12fbbe 304502200240b054c809e4ff2e2365e94633449c7eeefc8b78c82a9f8c6b06af2ccd60e40221009e539a82fb6984df590ba0dd8451a30f1b5585fbfb8f7cd0994c78cdcb9011f6 304402203ba8da6454456af66e47ddd58dee096fb1d6730bad0ccca2be6726fe1649a8c102206fba6d367703910437024a5ffd3873cfb30a48ebca648fafde19b0f772562dd8 3044022079e2b202f96511f84433f6f38cfe7d889fb50704c4d1623df6fc2cdef23473630220363d99cf18f54c69e87f00a990ce0d92e459b9a79496273a451bb88fc8bf9340 3045022100f483adb0d3ff1fc3437fb8e2a92fd5960265978ba0a0b280efdaf95d727c2ea4022065f4b0a54729993ef601e8d2cc5668c3d00b28e7a6da38412651f5e50cc7f6c1 304502200bead0f0555d4fce5b550bec2c6b584aadf528a2f249b737355ae8a38631c3700221008c7359e623aba2316505f5fd159e91ac7c9659516c6b5fdf77b0333a0a638c9b 3044022061f047f5d4a296c83954880af0c399c39937515ccdf3b46969ea3b4a710872bf02203d1afeca753fb7ec97c17447eb21f413cd7b9983f9c96c3495ba559afb2ec827 304402204f98bdcf991276a23ba27a43e5e3785c0711a30afab7c3791a9e79c13b05c4f202207d21e341d8c7e5ccc338049c523e90cd312ea48457079b858627a79d78aef031 3046022100c13cd3f6c10ea5ca939d2ec38b8d5dee8cb909416cd0f63390c640b767ef713802210094721b21f36de5d161e970ce3cad956ad7493820fb4fb9382f6e3e4673450764 3045022100add9c56727a53529533369f93a2181c1f5644b281415ee1b5bd0404f300402660220329c2b42df4796ba56100cad5f52ceafe340d4471882e10409f0d69d13806321 3045022014aac4df9379a7c966cf00b8b663cdc4d9162abea90961b580cfb0a87329ddfd022100fd2a9e6f8f9b0b89cf64e9062c2df927814b01fac5aec6a87f3a5273085197b0 3046022100b6515c849555965001528d6980ab598dad81165f4e017ab97e7de1342fde67bb022100c9f98c4ea30bb8a1238566c43086bc2dec5b977d9a00b15f64402f4b6aca01a6 30460221009372c1da99635b2014caecf13f40551c1e2dd66186ff58e5fc32756d7502f2b40221009304b646fc861c1daaa21522002b8dbf0fed26c87c77456e16f1bf165e1bcb04 3044022016a7baa55e6c53f45845bb493b36c47614698b1e97114e4638c31ea7ffa3e83502202fab43fe2fff504420e1fc9afdb2f432e2b9b4a05706b90a03f94f0afbc0a828 304502204c90eafd68dd2a888c5a5618c1c31466b83a1c6bbfa0b6b22e6e8d7eb9eb2b46022100e94fca9a506d399ef3b0db2f9a34a76d1857dc39e74f0db689c2ff2e3bb40de7 3046022100b84f712afec4317ed74bbe66bf778ecb6f1f12605a5c93b8a2c324d95cc28954022100979a398195138e3bf8e0fc93e39e320f2195272c43a73333c56826e1927adf85 3044022041c50cd21f0453dca0ca931963da2027156117dfd21e4081f28687179c56a0d7022039577c34c8ed1a629d63be331cddae70c922c8dfb78a1a5dd0deeddb8c121f97 3044022044c235aaa1c3978965c0aa1956a15f49f89dd59b3eb03526446ae3cd66527df102200a5bf06b80f6a4541bdedae714fad41ed6fab3c6350b57ec5926c2a558d79fb2 3045022006a1fc89733474dbd16726c54fef7ce4e792ed833056d3a30149f8c7f3bb189c02210092df1b375b2c590e5357d92ed3d9854e48983d8782467bb3d7c5b38bf196b383 30460221009c641e08a0183289dd2e4535a283eb13f4908aa9c7ad45c39a5015366b8204a4022100939da2414294a60fc208bb44f654a86f72737ab6e5462bf4b39fc238a640f5bc 3045022065207faca574c45dab1061a62260a75d6c6d8a25476bc58bf0a291ec0ee400720221008ac08c2babad076cfa866fd32b797a691320862f52fd675b847fb3d999798fbd 304502206035c605a9b5a3b59853a8c81678083df82f2e694cc965bebf1662678facae01022100d7b797f5c3254b6df58a9d004f0c3205fa18005d2eb2e2b477302209392f48ec 304402204a05b70746a4c052aaad9dfb5c3db59b2eae3c55e44b7aa029fe21e894ecaf87022014cdd48fcf47caff5b7690bab6cb6ac657d6ef3e95139755489f46387dd4d81e 30450220171a1e06a25b345bb26c1832b65e909d59a58d7ef7d5cfddbbe1f84069ff4114022100b95df7cf2b7ff8d6e17bc2c03ff0575b5da097fdd62f5a5bde769e367a6e04c3 3046022100bcb680c52eb1e898cc079cb85cd27f580622b6487db0d58e02d268ec74793d19022100da5adb657dfbf21066eca70db93f111e7c2a58f4d8f833160e37615a4bb3f440 3045022100a1da5561abe8a6da89dd3da840d5db7ce810eae7e1c9b2e6e115e6b8dca6f1b3022065cdde1e3942e33250041e573e3fb9858687cfb60214e4c02893b13e7203c4a8 3046022100d925cd75fffcd95d7b136b2d2741dbe4fd50d5e1ffdee3c02c397286b65aa60d0221009d2dc23c75335b6a0a1168e8d8062e1312599e60de4d3649147b5d76092a420b 3044022056ae018276e7dcfe23b96b4207b13b70147710289e3de9269415970b4e6f1d1002205dc13d3642d61c2ae1faa36e3e383b2bd77c0b22ea886ec7f16ba8d772089d6d 3045022040787ec5667e90e16cb046bc4433bf8a7c1504d720d8e8c50ac4bebe2480d5ba022100a8262eaaf3b1411929f18938f2e974ae0150add7a13bbd36f7518b0d751d584f 3046022100ff19483f542fcd0bcb5848bfa44afe4294e4583a80590e1dc2180f9630cffd6b022100859a6211233db2d2a62e876ce39cf5e90afaaf2ca2ddf41743839ef3a60cfa46 3045022100e78a7940bbb321afd4c8532b47277f8dc086225d5d49e950f7ce2c12bf129b0002203ec5ed773dad656e22aaebe6c1f95cfaf878056c95f37e9e0df129fbdd5cf532 304402205f4298e5931090d4fa770a7786e4a6d6b41096ff42067dfa0cb4257b81ba324d02202e62c29e327bbf9bf4f69d1b9f06c01ab67b08f14b2dbb8e27da3276c028464f 3045022043d8f469d6debe301a574bb36fc7b37650b3dd5007b41dc9f99c0cd431fe23bf0221009d0131f05b554426afd00fb07d220578c605f1f433b18afc3385d565e97b3552 304502210090a28851a36e07642cc90bad3f9c6f319eb934b127f357aa77b2a1f9f636216e0220265622ba12e87248955e99c7d3c218f24449c1e5fa1b74a94fa06f750a948eee 30440220414dc8274bd1df2b63f46b2532097cae2f902b06dd8004295f5aa8214ec02f3f02201d13188b1fd60a039490c6ee67406e210a3b6854ed1d1f08b6ff852831999099 304602210097f4a04044cb35fe3a355a3a92a1e777cef9ee0a527194f1f8872a1866329e6b022100e92ee09d3a8930d6f0bcfa101206021e0d9d5e3ee48ba8e6b8c4bf0f283fb542 3045022031921864755848c056a1262e81faa2cf1f0ece06d841893aac5de836be28377802210086967e3a7fb0118f522f4c47953002f201f7f6ab26514b588c77dd8ccf11a087 30450220242ceaf8533de2445c05bc4eed50dcc9843b3ac0d358bd42c044ac50d9da9c32022100e926ffb7604ed1237a24270757e80617bbb37b47c966edb72bcb1ee962f2ac29 3044022054548600bf96a2ef9341f4ec1dc3f3da0324f0b95962991cfc0160109934b32b02204d6b8262d2e47ebbdec2e0304e63b23cb87d35e7f6913b60084887dbd27c3520 3046022100e72c8800fefa0d742439cca6d2df8a584de97cd5ff82402fad9c892635168651022100e53f0ad64b85fc565c7be9500184077c152dcd8d3ce9e3ff44ba66689d832d83 3045022100ab094f92699c6c113cdae46575c045bae29eb4438696c5bc2b8a044c3753af560220541d021e4f44a079869d0c6645d7104a18795b1f349122a9b13bf862a87b1c80 3045022100e12fbc644d85d51a0110bdd6a3577902783ce661da4c7c1facfd0bdfb689d7160220180e020d02e5b97f5447c5bf7197aab52ac4fea3f21597bd8ae53396c64799fa 3045022100b58bef978e0a85113f4957f53f5f2a554545d31f3204b4d291e08c72acc1587d02202e18c5bafa77fb47e0581f05e51d4a1286a20b70c643c13aa6aea71a699cff73 3046022100cf954cac27cc6754b85eafaedf3f24f6fcb6dfd45d916ca78553ef0dc7a68b1b022100a0fdf7efbd2aaa914299907bcec78dbddf08b5e8f020070154510c562a55fc4c 3046022100983cdd5fc1ae445a78d8313dea3baaa36ff1633014b02a07e94e23255d9d32a9022100b8321c7d5ecc374f12b234c87c59694db7bd0501cd1e4a92adabfc2aa1ad3fea 3045022100fa1c819cd5e86ca10d4d55e14455a20b0e52b4f5059b2cd5e4e1097ded572a03022002ef00e1cb224ee53e3f080ff7824cc0abe20ab5292fa097a162974812ab76eb 3044022058f772f2fc3d5fda6ba7b21a80fa07be54696e97a095117fc29d52ea04735b6902207e3136a50c36b0824bfb3ca1a87a770d8e431e9fe9ee42e89e9a5df9b7913a87 304502200d1d1df21ed65157bb4a4b20c7620c9888319986b58a5a617215669dbcc9d53502210086ddc1c3a9d2b508ad9338815a077188316eda375d847df42ace5faf0459dedb 304602210093082fd26b7f3271b502aa923296eaca950317f69deb9f3093f32988aa4e4532022100b4d8e66fa2b3a27160a61c64edaef4a53153a702a097a6564cddf8e617ba4cb3 304402200e1e4a69675bb5100ffee88b1aab0884b46a11ae9f43a97f66f9594827842a0902206d2014d2495bbce0f30a57928468b37161ffb74f4e0bdd232d4f46be41c0d105 3045022100d6de5e2ce87e4945c5f486fd116ca7439c8cfaf90fc0786a2cc2df0341a5ed1202204235cd2eba7ef359733bd4a33a89c18b5d7b427ece5407ec2707360f61dd6f62 30450220018726ed30d75a791c1a4fa09d88eac140b092e7cb39687fc06c35faa45c9b46022100bd2d216dcd71d150b44d54d015bf3f428788214745bb07b51c02fb30477e00a7 3046022100e6efbd4acd28f7a0a62d6fe00846bf8ae0f439b5711e5bc3308b81c8b81fd74e022100e5e678c15a40c63c629135b80dea6935a56f47a862ab661d6d16baeb75f1cfc1 304302203069af1923c9102649fb431dd812a2f4b9bc63c152019867ac965827fbd418a3021f5ad3a69d20d0b0967191cfcc287ce76c929228dbea4db7114ddb71865ba24a 304502205ee02fd7941da9d2bae811a848b1ad3c5274c05deba331f86d30e028dfb1b4fb022100fdda573df40b5c6b500745e6540ac762beec7a8da6a1de9ed2a6097be1aff09e 3046022100f13a7395086a3eb4f4ada7d3b6741990c7a01062d2c4b9d0e7b28f9353344803022100f3de655aa49d2944e13ad55ee92b7359066b247eb15407128347cb68592cd6b4 304502203d90bcf1dbb28b4fd8ac37d071d6def815bbf2ad018bea52b40b5a7523915f150221008ed104974ea38269c0c049761e6db774776ce83a275b877c23135b7589be1c24 3045022043656f7330909854d37621047d7a667239a30d9294ec81fce9c8ec60b70b6075022100ebc428c5b5bda18660e55fa6acadacbe714da6ce7df9602069266f2d7017ffef 3045022100f9c8aef316e64b6a5c647156ccfa4a9926678058eacd68bd3630b1d7203baf910220526d914a8a81a0c205d73f4717ee6b632606f3892b343c536f36599d5992b9ec 304502200c773e569366ac996c9091af5ce324174283f7c6111bc7720091944d0675aaba022100f3102869d3c7c6e406b9f992602e7ee2237f2fd954284d6d258b8d0cc50dcbee 30450220099b49cfa675e3cc60af7923d9fa98995292c1952e76972b1508a5ef2fd334270221009b979b451cee71ed93892f0b33057b159088c542ed826a4d87ca327cf9f1ead3 3045022045be0c52ad650673275be7059281eaefebe54932e60efdba39983e74d570bfb1022100a83980dd2fcc6a0022733343977d6c110d0b274ef39a2cb50d246d06924f7469 304502204f2437b796bd5a329b9e9a42ffee0f0ff4fea0f6961caee347edacc9912a96690221009352275d80c3555aaef94b995fdfe72884898692facf3172775b168452883439 304402204930e5517711c613132d3da1ba33595e9b2821dc9141ad2d1db25a69382c55b4022018b518b5dd4f0a9b998a3f74b01071c282111501f868073758539f9e9ad9eab4 3046022100ba74538cb42830dd124f43c88f017c42e9d2ce503638ed21727d5326aa074921022100f92c32081ff180d8a048d0ae8b6e0934801aee1a80a9a9adeaabf5b494d1a64f 3045022056b73ce5a1eaa3aea9039c26d711fe6e9dcfb70b3d293d0ffbb5d2d3bc3c142d022100bc30431ed46759f23e009c42c9704db586f6a4a45f41debfa063ebbab11317d3 3046022100a211c275a787a3d3b2d2afe9abaa89b72b03f505af4e6fde2f91857edd0c0b73022100ef98b2757168b3642d0b3921da3ae29f90097d164dd6ae7fabc3be106e9dc195 3046022100da4db0295577fa58606b6e63e0e254cccd2748d9b4691a3a518d92af3ad93879022100e84090bd9305343cae2664b205cf62b2b2bb58ef02212459f5c40c02b9cb3d11 304402205bb2893a70e69b20f3e29cfe5bd7da44d72c18af64de14b280aa193e7c760c5002200a28ad5454188c5bbe9384ad7910aae5e2f1c776a642845def4d2b31cd44f500 304502205e96d6636082b5cd9445a14343e16ef1c461722a5ba5e79e4aa2e46f9c296729022100a80e7cfbc95bfb8b4dd3ddd8d53d60cdfc2ed4bd2fc79a81f2ed4ce5fd81e165 304502206964d1feafeedcd30629575f9ef287c6e52fe7d5e8a6b3f30548b6e2a7cb9830022100db94676d74e455ccb00d35cebbeeecdec8dfd8a6c4d6547ab9da81f2970f19e8 3045022100ff88a123459f94f7dd0713f63e42ed511fb0c8567d2e0bf0954e24791eb6052d02202776776c54114515840a3a9bc2112f7aa6d6cd9be2f913167156cd4158d16f53 30440220562aadb0dc6109539e92d419556f18ca300bf9713203b61901aefa9182bd0837022038d22e619acfd61e3ebda1c42ead8a0565ebc49fee2e69a15c48f811a60c6485 3046022100c2265f16ceb3dfa592c3ce416c6b8e44dd83311e64cb109186ec7f71dff3fb14022100b13a00a54a20c45ad6171d582b50b2cf49532fc61ce3679bf8deced66d3561e1 3045022100e01411db5b6e28fbdf40edf64c1f56685fcd624c8883464f3ed7914bb160f2fc022042ed745088047ee932eb4d7008535a4f64a77e6e98341fa02d89396d279de129 30450220228e92ca77129fdf1ab23a56b7d5ded26e2395eb47cdb25ecc64bded0d895392022100a240c5e487b778731ee0a14b4101824272d13476a832c1f5659ccb9b55f791b0 30450220707061b73abf9f438d8f62f4cc082c540bf3093fc5885d8635921c5954c8d6dc022100c4ffa9e3111c6d0c4e6e6f3dba8298c27751aa139c9130c05285bc9ea6342e21 3046022100ba7eacee86e464ec34c8b4a58ad47b0b23d5017ed60150c51eef99426ab2a4cd022100ab9c512230a8dcbf89eb340987678bc38a8dbc46945820b9becfb73d05617b8c 304402206c5ec923a87bc4ecce7968cb7c03d1ab63f9c19d1fd77e6e235adf64cc09481f02203441d57eeb3c682b4d45adafe091d96853b970cd94e65ff024dc669551cc9f9c 3046022100be7ba7cf71cd2b9d0a8b06abe9a5067cef3e02be8288b9d1ceb054791368aa06022100f70ddbba22268b84839073248f4cf8427ea8ff49e996da3df2591f7a5a4592ad 304402200abecc8e632b96bd7525e5d6623c6a0b5401185373d44da3f89364a3d1282f4802202b398fe267c005fb44e366002f784aa5cfb290c4da7dd214dd21e90ec22fd2b0 3045022035f05bfeaad56cb76d9b792de45a979f702f449c1f79eac8a61273efb6a3951902210081f223e7b91bfd950c2ee0086fd40dd2533cd272b0bcf7d3b16c958f842c771d 3045022100976bcaef7bafac0599601b94a5016068148a04f92a6b3823b8af5c3a5c3f419402202d07baa8f44e29095ddac294849b5817bad1ff733faf44b63871f31a3e32fa2e 3046022100a811c477a9b82fbc5ba863d0863d0770f83d33e13f05b9c61e9930446b248e46022100b9fc0e550ed895e54c20c569235613163453e1b2b292bef49aca6196bb1fd04b 3046022100e2e973e42f1d4b2773794e747d504a0457b2c9a59f8c013dce5060fd2ce781b4022100cd7bd978786238b4ebf59c141994f6cf459c4dfa45056069f177d4a9b6780d52 304402202e96f2a555a0807d16b776a5efcab827754a4f847f1c166a3bd0e86ed7d1e17d022014d20f643c947d40bbf276fe00826d05a27cdfb1a53f17c9747363b0cfea7512 3044022029abc208595383b1342bfce3974ce0219e9db56ebf90d10395f53abd260873ed0220575bd9fb7d01836592af0c98c0294958baff1a43cdba9e2239c9f0b21507b2a8 30450221008b13437c4fa19aed748ba1f6829928b86400b35594c30836c0ee468c75318e14022003676de4bc1a59581d3b291e6c7e1ec8fa8d8b46a7f872d4ead2602c25696981 3046022100eaab150b3597567ea8bdf3939ae7061f8ca282f70d182968033e77feab22ae5402210093960fc05357a35970fc8084ceadfa911ccda5f87e7e15386913d00b9909fa2d 3046022100df84843333255afa87fff177aec4e18aac59fe16000a1ac534a38cdaaaf6ed58022100a170b0aa190a210330dddee733e81665f0f145cd39a3c95b38bd802ffd6ae3b6 3045022100a04de7c1a21564c7cc70f1af44262f085f62a164543d129d888eacab193bd7e6022078e5e6e442ffc26db5275685e4164b76f6bcfcd1b287074b5cf5a37db1272653 3045022100c952c499ccabac2e3c6d4a2d2f7b7a85f5d7532164d36ddca8b40f0555383fff0220709c59eca5f7e3131f76124673be716b8ca63f19c9f6ad9d9db3eb0a8b842f1c 3046022100c938fe83ac4687678ce56da6c7662e8d71f2f78ca4dde3d0ee5453a8fb32b587022100ff20bcfe990a438aec87a5754b9f6802772916bb697b9b281543c47221565dcf 30450220579375b3cd2665ebe2edff3977e6a01b1a3fb19539cdefa374f1463d3c54ca79022100eff90e7bf1af856d052fd3b508fe85385a02e742a3a52cdf2dfe24a1c83db677 30460221008321bddbdd135af0aef894572eb667f29253ba4b28b769b98069cc6371ddbcdc022100eba00bf4567811281cd458558e4f4723f220e5944b839fca8449173a4079bcd0 304502206fef1bc8cbd366b379d538fd208241069bb4e656aa0241a99e71dc6d66639a9602210092a25e4b8884902492d273e616e92be628097e441558310810449c8f8a93925b 304402202a4abb9be69949cbdb536a0a1b573c26d931fa4c8f263df50b637e0e372e81fb02200506fd44a5e24fad3cef284b8c23b818012f0fd1641b98144491ab5157a49ec1 30440220678d9d14a8e70bf422315b5536427d26f1270848849d983ff00792fd5b618895022076d1bb6851d8178fdaa1f7aaf29cd1747d76c94a6f3f98361ec5cad5ef9398f1 30460221009aa2e68781fa463cb31f13ec8f3c9e9f919a21a2f7b02af9eb27f2bd8f423b8a022100baae5af50a2b5d8a7c19c1bef6d760c56d4e212b401fee62eda138e0010551b0 3045022100c310fed332ff1d3408edf8147052efbff945a239aacf345ae475a45a91b744f3022008bffbfe69f1921f4f616eec9f03516dd9c2f160d6103c2643b28f2d734148d2 3044022003abcd83348f45350f3885bc10218d82c3e12c7061bd8cd7b008eabf77f4c96d02201e858ecc26942cd0dfad967383f578a43c5302c5891c2df53d23134a79845235 3044022045be2d6275633691fe235ccae25c3db9c34fbd08026d18b7b4a6a317009fec4a022056ed511be08047a9a9be372240c78c3f69692d4cfd9a22e931f5984e6391ea4e 30440220166b5ce2d4b3395d467e28dad3cae7a44fb2791a74a95234b8d0ba39466e1e920220637156521bd5b5b9d248836779ad83e06c16685e9fc7e5dbbcfcb61899e5ae8f 3046022100858d51f20e48df10bc0388a734b7f38c9f21d90f65cf842f874f96190d12c9510221009a53f4cbe5b645b5ff6f875d1b6db92f17cd16fcb4e1f0f0710e249a5d8313ab 30450220057ec7e6bdbcbfdd3d49200c2ecba6f80e032244f297f9042cb7908d0b50346a02210094536039629219730a8d6206890f94f193eb544d9a4a6c05a1049bf05f6b3152 3046022100f943ecd110dd71faf1bf9291976af84583d5d869dd64e331f83727823bf51984022100fe70333348e3769fbeee2f5582986b203f4125cb40889a814cf8d93959bd3e48 3044022014107ceae6223ab7eb3b1362a9db32d51dd28e8e5407cc738eef90342c1e465902206bb67cdfaab120365b555849ca1a6946e0aa6729d4b59ee69dad98175106b115 304502210086620795e1411f56ee134f614295e100310e0d70a1a00e60e090349a4c01814302206e346b1757807631b4542f7fb8330428d3f283573bb232c51d990ec76047fb2e 304502206aa781edca64cbb2b62ec9d326933c3844c396954e1a382ef69b30360e2d0eb40221008aabd6d06ff108e13094bed4d691beac960cce3fddbf787cbdfdd03f80bfff79 3045022100f71067d39ca849945f6a3d8d7183df1a3351b5257448914c56b38fea9be6b63a02200dfd7f20f538c9a6787d66c72edeb7c0d18b6ca644bc5bfa879ecc7110a114da 3044022063c14746ed2261a2dd8b5b967a5c142aa1002d432a6d88cfa944a45a9db68110022036d32e5100bf589c757d6d3f9710e51762c072585d60e87aee63be37a15a907b 3046022100af1f97ef05946edc9f95939b7f13fb13d1ed3ed15b5f5842ed5e6b997cd4cf63022100d0beff2d922a3ac47f6b3fb3f440a7d7df6c82ecda0fa544350b615558c25bf8 3046022100ca10e96e06675e133adf0a1ec3932db7d0496f84bad67ee2781e1772160cecd6022100bcaacf67b40abd103d0e626af426a3403830531fe6b0e00af1bad89ca49bd652 3046022100d4b9e8be4ce05d6533fb8fc97ad4aae244684b61bfa609d6c8b500ddc9c62e7802210094ac9827457c0c0f161bc7cfb840abfea66127e7f18c95a17b61075adcde45b4 304402206e8f4de0dd0599e5cee2581cccc929e820d3359360f9eefa23629f31669949a6022009164806d4b82e565e140a9f80a9eab6254fe015652cc3375a9b6af1d14d964b 304402200dade4cc185b634e0ce7f71121b387bc5fee97e026941601d5287bd12975baf802202880d1e581d9a937ce3c3d6eed17c55ebdfb1c57e0d561896e878427b9ea7ae9 3046022100f5b3bf04aa31538775aee6c847cc963867b53dce0498e54f3a973d33d376437e022100d735f4025579d00c20816700261a70795df98cdf5940ca73ccd089504a05cab0 3046022100d120fea70d06071409b9cad1b9f902c06bb287b5afc0a8c80f283c188941c7c0022100d0b3630d9d8d5aa8b0bd439c2c07d64e0e81f30ab4cf6a89dce3fbadbfb84c1a 3046022100dc89f719e0f0f207e92e28c87d4ee21688ec964a195d0117d572372df29a5c8d0221009383a822bab880cb1ac3c35ac73222e296a38d903470073973253c2244dd9dcf 30450220600cd2f9386a5e01e3b9c57ea34617faca1b3deea945e5fe89252f71661b3a05022100f10643fd2705819d6a105b66a405282152ad8633dfc73052b26fd003ff7f8fe4 304402202f039c815602778868bbfe7a9820a43d6f028916e56c22e8e5da823c2c39e92f02203ddb3d45388b370b0a9eb798fa2ce6517f9dc809f399dc60a7088de9a855fb6d 304502202fb3a6c8f3bf12b68e8cc5793eee7af82e69b02d9dee7fd568b3a2139f41243f022100958c18715e8f3a6d264962b4da33290a4f6e42a5efaef3e8f56a4494aeb4ddbd 304502201988ea328f5c63c4f7bbf0ec1fae51699b9969ec5ed0d37fb40fa98450c4661d022100a17aa30b02390b548a80bd8e83fdbf4dc2c473d4d4746a89c9b2a2113753de26 3045022073ecadee222e848a5e517c90eb5d42ec93e0a76d4b3b0430f920eb2c7da8521d022100af87e5500e60f953a6f75ec6e2a9640e0a2ba893dbc02f456923516dd2539661 30440220252d8d5899b24e85abb49a963da9990be912190369254ca77e2c57a2bd4c5e5a0220735bdc1f82937ccc9706c85cdf0a9aebcfa77d851079b9a969b87b3b4e21d5c9 3046022100a506776d5fa7b71f98f5ff309e05b5356dab207ed0657aa8c6cdaf6e9b051621022100fde65565631a16e8f01812aab216b9193384935155cdf8da270cae9d3220beec 30460221008eaff07c859d05c23de09be3f21fc638450b14ae9f439a90268f084d4f922275022100a765d396857c309936bbc86212f1ece0724cd8f0c99aef9d8aedeb7969bddcd1 3046022100ac390919287a76af7e51a05833ceb75cfbf1249cbb130699da67648f6a1ff068022100a7bba084b20c29d3792c51ddf5df21bcdbf2d2ed4eda182f9ddf40d6aeb151e1 3046022100e6bf16c2923fb3952382eec672d5c3d7e8897be7da8f977176b525a0550685610221009b2a07f41c860482125c55ca527bde65588b3b36568006b1ac028191e4cbce9a 304402207ba896683ecd048b38390f1e7d3be609c857c77d9770002ba66ebbe1e0177fae0220320a13f94ae23d1b325a9053ea086dd3baca94daebc4a5b51d4b123155bd8868 3045022100d1db20a645b21716fa72b30ff58105e6b1676fc7501d24a433ed72a7180239b80220222caa43a5b78a91f4d8d7ab23f399d371cd50eb663b689e3b25b8ab13486b52 3046022100dc791b41174f098b607dce0382d4dc5c2b7599df8433fc08144928389ee8e04d022100f5053f7311c393e9e82b620f0ff89fd31d470b93489ca67c6a5fb75129a5e644 3046022100b77af32b4b68af4b85bb82d07cd9597192c53dcf800748190099c9e0c126dbdc0221008679c74a628d0ac1aa9948925dbb3a654d62233cb7a204f8bc7141f86915132e 304402207ff24babdeda6b052a3df596e1eefbbec29436dcdc51a6903af620a981f87e0102205698bc1d4b9f8f6650e66c31ee0c4013ca5a872b2cd441e7a1469c04a9bbe5ae 3045022100ef2197b09e554b7ad0953e696311af50363103071d598dd87838f49dc15aadb20220389a4e94100e7c91d63df718658257051045e1a2205e8ddb1859f331efdacbe0 3046022100a6d3561774c34f523d0bbf39fa6f26d2fdd4febfde43ec6f84293ee7ee9111ec022100a792e67aba37e7d26fc6fcc7ca3aa3a0d5931609548bf7743cc76f5e1b371825 3045022100c9c26e0d8f22bfd84bf61d2ba9383febe58e84b865644c7c57a30df7ab916efd02202ee425e465c64955aa3974e2e6b5abcf59347d843b6c0beea9ef2118d2a98c46 3045022053dd92eb0f6a84b74061902b5862d7dddc263413bf8085fb43b04bd3a9663dfc022100afe35495e8c2cc202399da4b91435decf418d20bc9a387e22f40b4ac5b33780f 304402200192c1204f5f39c9fe37f0757b8f93bb52ed408cc3529633d23f66efb5896baf02201c7bc3d74a4444d1235723760b1b8c058ce42faec585b5e1848e94ad43e95721 304502200a42d0c5fc73c87bb8712dc495f409e2649ce129cfdda0ec217b066158503e35022100b4dc7764186a4f262c73b149db78d2b93879f77c038c92f35cf6b5dabff43918 30430220338f1e0f497a284b43e414e13b401e2d1c37469cc3d2c3a9356923828feeb521021f28ed38339763b1c0f30765a8cb2d19acdf7c9297bad4ecbb9e488804114ba2 304502204df554746b1fc5322f6dac133331489f8e14983ff3d74cd0436a68c08580e414022100d7b508ef9f1e79c186e3ab35bc9ce9205e80e527480c7c6498197eb584f67c6a 3046022100eb79e005d7309ba516078a1c7b9438ff58b3fa1e526f51b00fc9b6467994d52d022100e04711c7b934b856dce8d3df985d9e2583896548b60c8073757e2a9ba34444b3 3046022100b71abf77ffdd921713432bf94d9fa8db73d9b27711069055357e48e670a19d93022100dbee8786385406f3b3fb30719058fd71831d58994e1bbc98536a52555226e503 304502204cabd9fa6c8f36d6fb6053838ee280fe46fccbe7ba8885dfa0b2fab717f82e4702210090f3c4278e1d488e5b53ad03ab3eec01d10a466c50568be700e1172ddbaf5de0 30450220642b4132c0c46ff20a16a09a152134469b58817814485520e4bd8ba26b04ae8602210087283a8d55775afe6e0ee6ed4da5011778ffabab8af6a05e3c8bb506f0cd4c93 3045022075e920550623c6fbe2a01e5d3ca6b10c2872a2ad0b85ee4577468b06ec78b274022100ee25ed579d17af78ae17988191714c868fc17fa5ebad02ca58a9eeefedcbfea4 3045022100ebc084ad04f0d43a4c778589294af1b1b811bb527ad4f7dc4389dd48852de60c0220375ad1b3cb13b1844df4cb85c5824cff3f1efd448932f96cbb379438c56fb578 3045022032604350c628cc7e1b65f607a608a3a328422f2382d009113f24bc48033449e1022100c87283f1209b744a9412d62c285f7bb80a16614c97a5ddba91ccda35767268df 3044022015f3fbf22d941f6f3007a929ef46f189f2c056bf8482e7f0f33d1f83a96a930202203b54799dae8bd211fd4e807df52b16562782f136c50c439da6a13705ff998c30 3044022076aade05b60b68ddbf048c2b6ce868c0d30a1710cc5f465e5aa6bfa5371f8d980220503f4661a6a0a79924b05b59fbe136e7d88188f53f859bf9b7f7b5b5beac4ce1 304402203ff693b9405e6657a315ced8916f93d2ae6b5b216f0b5a613ff2f0e7a5125afc02204a9d2a877c47901c2448352ed837dc36074e8d2d05804d3eb31e836f47a4af1c 3045022100f561a0bdf753622765694cc5c3d3c9c08ad6dee2b5992342d7849b9a8b20693002200fb374fa1bd50781d9a7b93cd36825b6f1bf18bca5c4169c2d06c225bdce31b7 3046022100c6293d5eefb883942a559be4c6a1d8c6fef8ef124e501cbc4a56d370150e0daa022100d039538f1b5fe9c6c3400e75e0a37cc3b8988e3c6468bf4b8278df7dde98d164 30450220575508bf75422a4c1c68ad6d160f703e32070356b5096fef40772baa04aef190022100d1e04756616183c112818d89ce21a94e7a456c520a3335ab692a2a89994497fa 304502203ec210ac16576d1151c3553aae3fb08a463945866ee2bc6ea70534e4dcf1dc1d0221009e17063ee4a0c86b4fe70811e26484d73dd95fc10cdc6237733c4847a52a5df8 3044022064d2a5729ea7faad41c7239920fbd819c7819798e6a1c56cff4b6eda42f7065c02203e2b1c0de53cc7df19459cc58fa29dab4f51dc522154606de860122476628622 30460221008516e9c471847735b4d409333a5f553cf1e792c2be384009c014bc48aa196861022100a3ff48f32756c2278c941582e343444bdad3fcc1e4d990fced4118df7c6670ef 30450221008ee0aa4bf999190b450542b31dd1e2f3de908bf51791358ebcb0c08c63d645c4022013131b94b34c62befb3935dc00c191cb164140c970b2148ac197dd546fcc7d41 3046022100e3b9e12515236f5eeab297d087362daeb0630688766b9b9c668eb7c062614bfa022100e1c847df78283fcac0a5e0f96508af8fb277024d8092956e44efc925dd01e46b 3045022100e3a449e63c26538ab23d7d9694fea433a0291002b3f327d1443532fccc2a649d022063a6d2d40f0cfe6cb2e6c34dd82c0dc5cc79a40e7710d8e26225803f4d2a3af9 304602210083bf91b3f6cd043cc5dffb69cdd37cba915aaff41b37badbe259dafda168a824022100cd297156d3f00f477a9c1f82d787a98d9db6a1105507105b38ce832acf4814a9 30450220570e8027d742233df4c731257b232394857a508b2b84cd7fb6308e8b6b5458fe022100b32d73cd24066ab0b6542fc00d78b4ed438d0af361e5a3dbc1079511c1ec80d5 304502201c879ecf7a22848495e816571db6a99ffd35dbe697e3cbf330d7bad6791c67640221008fc85a143c19788fa4c8c39c1570233ad854898d6aec3d91d15dd30197eea597 3046022100e13161a18fea35ee6d3f486ac08e29f7783f1d2deff0134df21125b9acc9f4ed0221009960e275ea909306ee0670a155587bfad564a0706ddced5cff2dc6781fd8e491 304502203859d775c01ecf165be0d5ec7574f9a5f9ea067a3a78ba1a031a609928b52d73022100b4193d076c52c14a5bbf46a2f7e121308f1fd33e6adec71247f322b264be752a 3044022072bfed7a613149047495077b3402614c10472d6bcfb5fadcd3cea7c69a520e0d022013c41b246d2f0f636300d00faf24aa5f82c967b1901cf213002d8924d2eadb1b 3046022100e2173df8c03acc964aed09d0d7b33e63f828da67cf56e1f431d22ade2a1060380221009e77a12d44f6821d8cced0c5b097cbdda4f76aefea436965d1519dac8bb860cf 3045022100a6b2700f37c6ead18cef4c2e941af45ff65c604df5c4a6e8e86a1d233027755102207d049f55ac5601b09ab840271979b7a567bd218441535aebd1f6d94f5257e236 304402200cf493bf7f1e3d821af8a87fa8f63ae12d885b5c5c1818830422396ff4180cc60220021d51074d0b6c33d20cda6978e125a65a85590cd2cf25fe1a334813ea8b23fb 3045022100b119dc77a77e6f69a17dcb4b0c34a1eb34c2aec215a33f8d8d7fe0047fa94ff602200bc89c81a689ad49d798a67ce01b4d4d3987bec2629442fd82130c03bf817d5d 3046022100d11d28515fe6424351898e4d994b6ecfab686a031aefc2977c5e3811b93e2a6e0221008a8950bfe8dc85baa440f2490ecdd6a6d6b394bf5be774d999769240464b3467 3045022015a1631306bf8668012cdf87ac7de32b0c5abe6f45cb0e11a6517d1f09917e19022100f720557321bd10300a77a58b256a6460de80278f2d36f075045d22ed31220771 30450220536ed986dffec757a6b40905a4e223c82764cffc0bb88c29a845557539bf1f26022100989de17c249bec15d99dc0d88678822ebdc1ca87009143f90913fdfc687f2f5f 3044022013d3b690df6696d4ee3900b6ccc8e4fd27d07a8ac19e90b0dac4e5e7eea5dee10220043f001dfcc17537c0a8b474b08afa6969d262bac099f461aa646c7d3b07ff7a 3045022041f18dbbba516b5478a00b389102933322a59f4f9f33417daed6b78ec31aa535022100f6063d9c34af78b1dd200a16edabb2b80592e7185198be407458dda8459e13ba 3045022100a91c46c5e54236fef4b942f82f3222621f808bbccbc982e919ec8eda32ce5006022070bb315b99b23a4906f446a8fd4f601a9c6ddd668e1442c1172963f69736bfa9 3045022100fdcdbb93500caaf3c39e1ed10398368ec65a3f81673073cc5456bde6b350a5ad02200968d555f1196c527d021c02eedcb0f3f526f973443042db4113a36e465ffc1a 3044022061f872fd0e6f62af9361108a1f70717745bcc79fea1487a32657b751afc1d301022058fd0666806558b9eba29e4c21b9b36ed43f3cc23e2da0ec14872db15d42ce6d 3045022100d09271b63d7bbf8d44b99dd58eb50d228c8d9410366fc2f91c521d8a6798ea8f02207c669a7a1fb09f80689286713ffc1dd7f8d3c8fae77b14e9203612921c9af05f 3045022100b11d2ad2bcf7c802de4c1e8c1b8febb552c47d6aff01573df5abcc384b9f6cd902206c27ef9f799aa1de9ec860ed26a593b1f2c5acde86c359b6bde5e8f5c8776ff8 3045022015c2ff8c22dd32373bd12320cc8e08e31691d4d1bcdd07b63b2c9434d3fe8499022100d6ad9d0347964fe88b51d6dfdf6d6334e9930f3e27af96ac9f20050aaf5e76da 3045022100dc5ec4a45ec4d651f1da92ebac253fede71ae834a4aebbd6eca9584b81ed92010220679220f157f5dc9664035125b9f814c9fdeb09a7accb6d29a6451ad6e47d5688 304402200569b6421451e96fa61d9e0111ddcf1aa16bad2f434f84f9eda705ac9a55b12b0220272bdc6d124208d8ac3aa824f8966b48f23d73b391b37f12d961070b5f40d7dc 3044022024b08937dff57405216d4cdd59ba55d22625b3b6b7a90e9dbc1e71122cb262ca02206b09fc4a4bde4dc7dbed4d9bed79c1f7cfabfd69a716bd67f0a6db5053717b32 3046022100b713eae725fb30a6be459a29a8d9a9f1cee4d32bc368776a4c027cc29cf1d276022100f93d460229939a33d88afab7cb9d5a2dd8e28a9936f955cd154ac1190e66bc73 3045022058dd64fce19b2c5192f522ce1e72daf5da9a174b20bdb8435ddf91ec90871f00022100926a55ca47ef808026dcc354fff3b66a3ce1687a11ce8421bc7193f69035a3fe 3046022100ee830484d4354c92d28fda337df24e3a52051e1ba665a866b7a24ad5ec0ebecc022100bfc3483f0828ff631b132b68e97460f6ead8fd529038c7ecccbfebf42327d1e7 30460221008d8f61e0aaa156b58d0b3774e88bd3eede11416e6d8f5b0aa72e7693e1afeedb022100bc1d3af2dd1d33cf3196f6788d01d8f22f518693ceb8342c5b4f6b65a9c0c7e8 30450220659839f69208dbce77b4f4fdd84a54d487bb25f5df5f3bc4b502cd8c9167668c022100a5ae32b88487b5c907d27e4b01e90a307c9be284a73763629926d317ae79a094 3046022100848428bb59072ab78c868b2f1cbeacd8d93963337495d5810df137e0b7d40caf022100a9092e5f0e2034963003d3edb2689f9cffca694689939f6add160fbcbf46e08e 304502206a07741fbf09b8f2f5003a8bbe1a541c1296f20f28d42980aabb5aa8ef9b1395022100be87b8293a7e28f8771d79290c5c99af6a7f8ed30e17f3565ae90fa940198f2d 30440220172d2b3f9c0f2cd30beea535a0bccedea251313871a1885f527da31cd110ea37022015701c09eeab062ee46a79e168495be3c9d50f683fb7b443679d49a20fe2a07a 3045022100adc7d70d941122bb6461083fa2bf65d8ee558a0931ee9606d030df61cee6d6e802201ae5785fb7feb57e9cc58bf21243ee4f46012f0c07333f18ca466d160c9b554b 3046022100a183c18121ca9054806ea326f3c164361e94c0be3eae3d34f0e52f2d1edc633b022100e4c642a5f193ee3a97dfe341e6ee9140e0fb01404b5952291fa3bbecceb1ab7d 3044022066858d58610eaffb8b00b2ae46e8ee03d4132b842acb884e5bb362abce4271df02200c69db46982566625d012a074ce8255457849e39630b31fad1dfc61fed9ccd93 304502204567ac54268da4ad0b02aee765e5d8e8e4150a9452ffc01a51a1e412598b173b022100bae4fa441d19eea063d998ac7cf27350e00127268e095e15f06240e35872e0c0 3045022100e35ec637a0b448467d01269bc5a5faadb5642d6c964035610d8703a7e50b6e0d022026e64a31318b0e2267465e55f95ab4599044936a38425541118ce3f3cde05571 3046022100dddc0fce9d0245811b0750a8a127c490b5c41d6343a74e1e19d06f8dcadcd7b5022100ebce02f8c0f72d8366c137b6e760edaa91d68203246b20a7698004d2a9c9e37d 3045022100c28e09c30ce600e3ff971192650af206b96bfa7f8ae06e8e8cc277e344cd69cb022015bec38285bffcf4254a4b81b230ff8ee2611b08f7c20d5e336821d5b95835d2 30450221008f6dd31e218f78c943dcb553961e6bb5c23ebb46f8222be82b50cbb701e4a2d802203abe2ca827eb5c9b2bdd9b02d691b3e113443efe3c223e2dc79a4282f1d9dc50 3044022059aa7c8b2598069ecd6b1bbc3841a36b27e3d20d17c93b6361884b5951e13c62022007ca740bfb1d1e7770bd28460422fd062659d54a0d427b856cf1f19bddbdd8e0 30440220529a613b3dd44363d5ade943826300664631e53e715f03524daa0175539fab6a02207ecaa4159546f58dd6a155e217f384d7db850fffe298033d8775ccff885ce43b 304402204c2381058dcfeab8fccc6f4e207b1312272f10834e60aebe1c5ca044327a9c8802201fe106cd4e9cd7c274fc270531d33b7f7fa8ddfb3bf1bfe9ebe18197e1962457 3045022100fcd5430966fe048dc8900496e9ad8f27f58de0c8696cc7616d41fb45156355ff02200eb527e315e46083ac2d2fc00cf8ee86c351808575d85c64bf433c6c3fdc8a19 3045022100c3aa60254cd6efd77d59c33af6ccca8de986b40d7d8249f0e86a3dede6466f0602205f43a87bd95d77f26741c63e97b0720201acd65a8f413fd34d78a314bf95e453 3045022100c7c1ba75424b922410ad62039061c05441bbd736f412d34bfa0865dc865367c1022046bbe8d69d4bdc2e837aee18edada4eebca7b07a9da38f74809e8f7cad29cdd9 3046022100a2811260b12072f602695e2e29ba06f840928a178c4256b190908c7bdafba8b1022100bca95e5f0e60da4f7017a7ce42d86c8a2228d4c792a63124ad9c48b09379d576 30440220569bb8bef5b7cc8b3620b08c5eabfaff86d275ca6f0df68d6733b8a47f10964302201f3c10f8fa28226c171db37eb14fc692f89549e93dfe02a24a5b940d5abcdda3 3044022043dbbe81df3c637b7a93bafdf584357a0d6454993e32b0424adeab5fdc67631a02203c5127dfe40820ebd483795d3d98b3e0a810cb5fd28f47e81af919038f86e94c 30460221008d0093404e2d4839e7454c15e1c3b3e8fb6829a7d1f9fa4c29e1586ff697fbf8022100f1fe34713d739bfe4a34c8b9948f9c1a82d51c55eaf5c9693a66c0169118b7f7 3044022040f39d0a5f1f3869865215a5162eb42253fcd6872dfd25b37597d42b142959ba02206bd89353b230a3a1d04ccb2e0f2354765c57cef95d63ad396c346ee1ebf76678 3046022100f4ccc878b0fcc1fca787581943042ea8050bc1c609d6bb3fa3d2134b40d7e71a022100f773d9d7230cc67bec8d36ba9c23198ae0ef466924c71bef376a611c29b1f239 3045022100acafbbfca55260473973100d3fc7f1a1a9ccd17a37c35e290ae32522073fedf2022073b57b755acca38396f7a99f2206cf125f7e59ce2e2eca54641d60a299c9e797 304402204890f0e84d55f23b05abcad504c991187985e988f19826fd11b66956e74a4570022053bd7fbd015d5dbfecd6c3b961981e29bc393bc6fc8533a1dc1721aff1d5199c 304502204a61f65b0171765b2b389473b9700d12871b046e7c8a2dddd588d856b440eb02022100dfd716dd0feffe574f4ef215067dac9134c0a70b806541785a83239cd6b068eb 304502207344d39b267db61da0bc000d59ab20d04c8e255ec85c7e81957a57f2b0349a62022100de9a903396801560862f3bc669e09bb30bbf13d2ae8c15da69a9837db62d2155 30440220061fa16bdffd290663b4694f00ce6041d2be933f6128b0b4846d7a806098cd270220635add98775b4ec230b95918b4716620adb9c0440910b39e7e41b43961de0685 30440220749447b5e3bc154b546a91f97fb019a6a66bdfecd3de84e04873b52a5747e9cc02205f85ab3996162a536e686c44ddc84f64da3f123be45b18cd416faf74f2eda282 3046022100e9df02cd91f4c397a81935661b8e964c5a7278123ffc9f2c1476fd082dd8d485022100a9b35e163816af56fd9a981d562f05d59b328c4594da812f42b6bdf0eed12b8f 304502207c2aab5b2201c9ef85740920d3de7229ddfa23c2be04560a80737dd7da51dcd302210092fa73f1fbfa59d7c54272c17beb7644df6bd15c3fc571a767fbaed93b2cf335 304502206259caebf8c2d4b1e22bfb60619c2c5c97119b634d9719860565abe38736043b022100a76b6760de2d8978709129b7758f2770a3f72ac540ccb70645b774ffc3cd21a7 3045022100d0790f69fb11b8a1147752b20466cce22c1aad4c7d74d525af3152915830ce18022044f14185c7c9c2ff80f954bda3ed2ce89ac72485c92e58146f92b07651b14acd 3045022100b563ac3c98c44c64a2d455170f6edb484526982f36fb6bd40eeb82185be28c74022041c594e8450faff8bcb3b0715f0b290d1ac45eb3412507f54236f3265906b632 30450220557f060d53ac225a2f8bad874d4503523a0b7975d77555bef841d0156b3e93de022100c4143bdf7d6701ca663b8b19a6c4659a286777022cf89ba323c902e0ccc1499a 3045022100d9f131cfab0a17d5d5ab6938bc61e372a47183576425b6ba9e8812d164bac89e02206f67fc3dbe4e0369dc5b257559f855035fce8f726cc1adad54920d3671d7f4f4 30450221008fac2c3fa71e60ba78b99aaf9d2457610769428c0a206084dd57da37412153d302206e4bba5fc3cb0dc3aa5fbe2c9640d8cf177d5acd5010d2ed39d35a84ea032b43 30460221009f00fb8f3584829b374e6f2468e1a7325851f4f1413937ec2db6af1d4678655f022100c46d2d330fee4cde8dccf7c71a9de89d4b3833eb24e5c244701822b8f2cb35b2 3044022037efba6bc0d26a82c20a419cd5367f2480d3c062513601a30cece85ae2e8f5540220139a07bd1945e1ddcb0cf3fe1dd339390e93e12f46d6f0acd2103e7d6fad6c66 30450220737c90720d3d2336e5a66c0833bcbf1ea778f5061192cefe080f3cf5ccadf972022100ccbbe08af0ca359f508c8e916209e6b0bad6ac0e544a23591afe18ed225a39e2 3044022000831dfd63c65ee13a6f3097a6cd9e413036fa6895e367d8c626029a04eaa30b02202f953953513cc309182920ebdfee286d556114bd102208a14d27e8729e4e0fb6 30450221008bb34236e8912baa52243994ef37b608c3f3dda19b762c885f79e9a10465be3802207c38329d955d171f93ce1bc98e9a8cdb4400bccc349c9a644708d4482a1ed524 3046022100e5296745b02bd644b03e30394f12328018f7cb4391510e3632866e3ffc7939c3022100fed943d9ea51723cd5f3285f513ab2834165dc2926c99c8434a7c009b98e82fd 3046022100f0d598e0c8209490b25fe29462ffc07dcbf5486c7d59caab6ee55c25d26af549022100c44677b472608263785bfb15c7871ed04c8b14ee11ad06a6f526b0af47a178a9 304502202babc548bd7fa4f030fc3c1a4867dcbbfd5344f5da8e18f72adfc678cedb0caf022100afb675411f1ac0b43591fd3122f279059efbfb0c63ad90eec06c0ff3a73fe095 304402201c749f3c2428ebe0055b11e410968ee5f0a89369c30fd03d688b95c17a00e1e9022037265b8f2349847b002ab7005d0251ec1f5c46ad113df6550572e9f362d84997 3046022100e74ffe033e7053fde3213a500bfd9a7b0baeb2801cccbc825067506e4b75c33802210081c1858c611124bd044c25f96561a0e1e774f1fc4b01b76fcaf65c37363f8ecb 3046022100c578bd309c7a2a92363e4575b9b5851e1d549b0648cce51637c94fa7eeca7ab7022100f6048fe6d3c3ad6b652256e0c8b72f936e0d9de033f565090748b4e87a34e840 3045022077d29968a1aa324158ee9265e96e4658470d127314792bab668dc59d18062828022100d69f26605f6dc34da0422ab436b063b9b334c3097a3329117dddb477409a50f8 3045022100a04202b96cf9b560d86d782a7fe76336fa0d33fd70512cf72384437b7d2defe302207764fcd430620d0065817e1d6858953ba2dd056431f6d96d32d766dab07645df 3045022100ea2b1e02d26b75e9d9f6daf04080bbad165613cd2108046a5ea7f9ed1259d3ad02201aebaf5afd3ba1f38c3506a0452dd9be7c4411e473a1134a814468fe3f8e8a13 3045022043b5a79132d304ac1eace399e6061783ee8824ba55f5d234dd5ac426553a81c6022100a03383e33b372ef52a2a0f3a3c0d6b4753bd396355080e6f0dbb0f8c78f650fe 3045022000fe4ec02085656798fac6d7feb63b91ab7df17b9f888f8ce10cc00c95ecae85022100f5ac7f418c8ee3d6cbc56d33e6e4eb1fda4707714cb78a0db2b5191187df5724 3045022100f475fe150bd1f3fabdccf1dfdc9c194c58ae8b3603be63242faa6f1a87abcae402200e897b621aeddb252b302a6d3eaa4c94276c170b67c9f3bdcdab300f95fff78b 3045022060fcaddaaa88c39899028b22a448415c18baa099d599b2f3c63aea245b36a73f022100bfddcac4fc221bfeca4a730d58c97e0b0537760eab0d13a2e5bfbb4d99121409 3045022100bd1c2a6884347deab76ce7771b49aa6c78765665925cfbf60b1b16607d84dc130220105f1209e525bc034d35c95e04a088d9117f4eab90b9bd8845c1bad202621d50 30450220753b1c9c74be1b8271cf5e72744227601afecc0f543ce9ae737492348853ad23022100e92e8c82e3dd51af0d8ac2cf1cec4386728f9916b6666f4b2153fad7f6c534bf 3045022100a6cb5a1581f2f647b567c74fd063cc791e45eafc2159f0f633c822b7c19627fa02201b519239e403a4a0c574dd8c55e411f1963c516e681db2c9aaf83bc44d8666d1 3046022100de865ee5d3053ff25d727a9f67fab8afe385080c082a5a9bbe3052c59cacd44c022100d4a747a5c6d893a1808143b6f8f6b7d76481a1d37ad6ce4488519118d9988f44 304502205d211d863052763469f5360a5ce21c115b9ada15502693310ad6e2f50a444ef2022100b587e0d61b890e5b740facc747d3c75e21406f0fe1e5ffdd7f8625ccca3fb577 304502203230a0e49b45a473c48ee9aa8034596a9155a02eee76a6782660522ab4fdea9a022100c84739a9a10fb3307a81c6d2bff0fd995daa1c7722050f84ca7458e1da920969 3046022100e044df1a18f7cb9dfac5006676c4f8449465b447dd67ddd19cb3a35c711d08f0022100e4f59c9b1f9b7df542a2b9bba1493ec30f8562d9d71ce01cdf9caf6c4ac3bf1c 3044022053cc94dadaea5b0fcbeee7461f7760e452a70bd702dd849f055761be836db7a402203a0b81134445dfbf6aa14a66688f0a61f6bb07c6636da3737db9aeef2a3cb98d 304402201fd9978ab331af56cb8961835fee58be59e155d946553f3ac797a7d6fbd5a2ea022060aa9c684b4eb82a720a27816dd76a997a4538378fce0441e80e4a2b5d698153 30440220205960dfde3b38dbf849da7d4104ea621e9ad381e3695f74d74befbc0f19722502206ecc2d7e32a741eed5aa99d91d7288627746df7b9ce76d91c6970c740820bf31 3046022100b171888bee594f8ff118aaf8567de58582456467ceafb1fd351e58c2d82255ce022100ec4cb4459923ea21674452a6427446c58c2a1b84e56deba7ce4e926b4e654c98 3044022017cb058ede683e8571bb188d0318cb741b3aa372d70f4fc2305e1f252e113b75022054821827decab09474cb959a10400505c9a02368a2e2be63f13cb45d4d5171a1 304402205167984ca51e3747e06a94c31ad2664512078c5242f935415fd8da147850940b022076e7bb084820c08a970ce38cf40180480b336aed484e365685d4a0610e575d47 304502203037f7cce55ec36ac3b88d00f337910bccf29eae20f34d055ccaa86c8e488d780221009a6756247931429e8f46968fbe7bdda9a6f2c329a1ba4a6d6adddec96e17109d 30440220417db27b386d39ec3f419b132bbc81aa7e31ec55d29dd3b45c9c5471daa4395d02205b1d35ecd5f4e0251e4d53b1da4dcf26f973ea680c668a66b6032f0d46eecbb2 304502205df4acc6632752dfc14091cad56c29d428d278bfe371153a1d4538d578bb3f68022100dcb429aa47163ee9ce943f039093f11ea846a106bf44de82e7c73e3bdee5bda5 304402204b6754010521f4818024606ea9d0aeb68d9ab50373d75a8dba4e16d17a21cc47022039f1456020516f96bb006f1276b4bda5e20092a45305b6f4ee4bef4672714f5a 3046022100f4c290abe0e14ea95c5c8953ef15d6d5579ffe00e4df646af5a32d85cbcf49340221009119f1e86dc603eda25d178e756e0b9940ff0d3a71c1171118155c9e51c4c71f 304402206358e28094f9d28b26eda5a95e53d892c434921c588e3a198b9d6a3b2d7c6e4f02201aabc75fa5cb7a3f936e7bf889994c997c390738b1c8f0997e3ea469a2b6215d 30450221009d7716e809a4f09cf19d594ff599f2e92498d2a8ce3a9c503990ffe31b7e477f02200e25cc2f6d006ea708da5332b516bc2640270f815c0ff60c07032518e14bd0d4 3046022100a9135fa4d709ccc9c885d87b0d9c4d82452d2e97654915009bfe362e6a88e977022100d04909bbbbc64db84e8cd6cc39d13f1165d8056b2f60ce2176ef9ac3574cd73d 3046022100cbab59b6781fed542b48e97cfaa47992097bc78edd9d07f6a0789d25d8fb85c9022100e4563cdc843be472cd9bc7bafbb73c48a5e52fa9536d02aff86251a2e0e85fe3 3046022100c611bc0866667178de82458593bd61b5854cfbcb8b785a04fcd71121c8b7293b022100e3901fcc5528fa3804975997bfeeeced31d211fc3165eafb319e43fb7d452b95 30440220388864f1c1e8d88a897abdcbe62767ba05cdd4baad3327584ef4c660d61fe4c8022053db7ccbf9f2fee46b6fa732a9b306f7b0fed9a37fec4075edff765a92a3bb30 3044022015bb1d8cd899f8f8f88c9d1129f29a4f7f39b27f2d15fe320947cba7cd03292d0220341036ebd844889869649c8ecc2c03573425b6cac9093588a91806a8cbd67af3 3045022100ca394e9fe86f579ef7fea74cdbe03a6f2acb804437ad91f17905aa2616d4948c02206ace7339388c8b7ed1241209ad32dd69c2d19c14937bd0f3000b38d06c48e6ce 304402201f6d15b0d76c335782ccf07d8417afb070baa2c76ba3c872609bc6f1f7ff0c10022045bc322fd983925872083e9036203a203fbe14b000a315ea13531d81d3c54c05 304502210082c7f642b84e5d5027e92c1bd2ec749582b94e597713bd13646201d0aad560a3022075ad9ee888666284ab369132f6de76a28ae9985cc8208e47a91e42a9d896e19f 304402205cb0c01d6e66972cc5a7b23e123b62ec4d01f7a32d7048c696454201a281982802204e2e59db6805323ff94d7a04ba9770fe09e13fde7fb9220803c72c0e485d0af4 30440220519d2b6ef30d810168d07b729f8534a20da71bef11b4b51f039d3aae99e5969702205d69973180c6fe458e0c4c5ecbefe4fc9bff9bb59df0248d7b3f1608274857be 3046022100c0f6881cdac36b694b0e6e87b25c95bc21047ab06e0e1825b7f97ab162080478022100ce584f49576f52628840e88be204ebbec42d9de85830fb898bef7d4c4444265c 3046022100928c9bf555983b41a9e9cb80ec8f4670ae323b62636f645a8cfef1d78892fe23022100e8d5a0011aef86389227f44e1a1395bc0725974ac1ad0ef97aaa68a87a77cf91 304402205ef98a05c75f49ad120ce4e4acceeac3916fce37e6a71389dcdf5816954cda6c02201602893916eec03a7b7d4c3e9f34f788ca2cd389eb7b8e11d3f5d873bc3ab82b 3045022100ac81cb9f911d3c5e7ae5a485ff87e04c79134b448253e60292e851d8dab9c44802204de437ba98c2a37c63dd167225c21b6c1149561d3625b72c29fea3b3deb12791 304402205cc3d4ffec7599e68713d923fca71f0694448a1a172ce642df28848ef4292a330220178e7c752449f230cea49c94bb21b7ee0edaaefad9420c41135f6c47e5e8947b 3045022066177702c87849a8cd35ed425bac3d8a1023d8e6aba955945bcc70682b106d5302210082fc6f14939b539d6b11395451706949448bc8eb2b5194cc81a0aba37b5a7bcd 3046022100d4de20f68ca556ffc176ee24452e66bf69eb653bd3e8dba5cd7583705360bf34022100e5e86741737cfab36ab69a7fd7ea5948a1eed945b9f066a362c5cc7fee68a225 304402204bc888cb463dc36e61ee9644b7be6a2f1cf41cd159a8cb0bc2d949e4602e2cf502201bebaa8f2fc845d7c70be218f87a3dd501a11e7c3fcd778942e4ec4222ecaf31 3046022100ffc17576905abfb665f5683a15d7cc14fe821d3aec616255c3214b0704caa88b022100a517a9ceff85f68ac2932d887c2f43e125c50a0af059792d18748859331686d1 304502210081c74cb03bf42cf3f574d90f29cd7fd1cf5c26b935d55c1c4ee597196b88bef20220320cd4ce0ecf14beadab5bee7a719ad687d2fae5c314361a25b5fc72b6281bb0 3044022052f98e071deb66e424a3c5119feb2b58c61ddd01dde30a3256ae1be355b7649a0220429b959ec5e0a1e84e956b44c570cb69f664a910f595d007eba1ece46131552a 3045022100d3e832c1bff6fb69604e54830ed877e2037bbef1b0061b85ab4a08ff103b3aa3022065367d9b3745b35d1013e3fcd7b7ba89e0c0c313749f70b5eb7b15112ac5aa7c 3045022100aed5addc1e0ef7386a3fb695964bace4baab5a61285041556413282f175e393a022020c9c997a9f06fb430f5a4665958e554e8f41ba946273b944a4f1a1028522a59 3046022100afa5c19aefd8210abca33f2d0af3118c493b34773c6fffa3cebe71ce849175c6022100fedd2084d18e3e68586ff67ca5cc09a70cd6c12f4b894f17fee4f8982e8cd47d 3044022006a46ff1bc6c238469685a7a04a18edb7a273a3342b5e59b3b97c6f6ea15a2ba02205eac4a18fe3eca391537db76969a3a5c99a2baabad86b25a3fbc054a36476c39 3046022100f2f985535936de87578ac29e1062b2dae43b1a4c2e974fb83f0aeefbc54c3ec9022100d3c7e99ff93ba63aa817df71b8f94d708b5c993b84caff8b9ad2807b28653c67 3044022022d7c72acff157b6ae5ab153d8f45005100040239fc4b05f2136fbc87d8c020a02205fc57b10a35455dc57c14efad0b3e6a833e2be13e54f59a763fc0f2b232b8b76 3045022100ccd4448b060919f38ad35c4e3791a75aefdd338b1ce6314ce06cef49ba7c644202203c7651d7568247ab14dcf0f4f2de33b8bfaf0861237347dcd500c0e3036b90b8 304602210093c6f4c5a41c7dbda51623d730bc8a1e0de9173cdf39369cdb29cc6ef86247ea022100bfd44d5c2bc20a533b6a83f311dd4e25cfd2829d6adb876f66183d241dfaa36b 3046022100c0a5695b63ef5cf2cb5598d4749ae1dac848adb725b4a9cb372d5f93da3f8bce022100e29fbaecfe27f0406d1035a41ca283dcefcaaa7216d277d2eedbba1b00f744b7 304402203c89ff379df6bb8cdb582e6e38aa2724f9eb39793d6a93129d47417ff98c2d9f022069c2998bb0a622cf36bffb17af97e5f9e2bcf53ec9dcbd8ea61d961f438713e3 3045022048c78865051f4ef2ee42c45e09456c694a5cb66193e962d0f1d34af39cb97f25022100ca8fc649b7c1c6ee9c38088e44d1c0a38b323934a7d9b491f79b9b96c9f82b7d 304502200f0eeec9a4bcc1d40188f1e989e98fca5c9dd40ea43661229c374ff755710bd30221009acb3ed079bc778ce65c007709925a5e222ff46fe1e34351f410c59765fcbe22 304402203747cc21597d20f9af6d6f2e86401bcf7f79c34365e5d1f1e603a52c8294dd180220207b87b4fda864bc7354bb7747b0145460776e884e5baa1a9b26c307a451b6e7 3045022100eeb3f54fadb0be0afb63cc42cbe485bc27b50c0a0c2c4494a9b193ace99ba38f02207c5dd04bd07fd09aa84479153fca0f6e824a5c93acf42c31edeb3f1f774b794f 3044022048fb3f46b8c39288e6866c3b1a1657372ac97f4131ccb570c0ca8c78950d3d9702201f3feeae57419ed0d9b8e2fc71b6955f697499b70e5feec1cdc8253965671971 30450221009a325215446c32b50602906f06dccb91e45fbc8e9922030e9b19fb3fc57371070220621e883ca178ba8afc521d1cdea5047e7ad6a1c58e558de9ea9a628c02e55986 30450220270024214cb504b2489729246976184f587add258d3c4919c09edc18eb0432f5022100ac1b3207b33ba0b13ac071a84ba7a0e238147dd33535fede1e3468359554307d 304502207df40a68709fd6fcd6460ce8034b01d5e6ba42e5cd24b123cb4456227cc4421d022100de8de530d2746e042cf3fa233d7d50d45089656bc9b3c799c9a6436554474af4 3046022100939df5b13c6b4c08542d927666954c8f39b08c96d4c31ead87d46e5903e980c8022100ac66ce149408052f2d0ce9db6f7fc39cd4768590cab90e227ad9f3c288f63446 3045022100f0c03467d395cff45a79af83345eacfba253d8b6f9daa9fd39833e08ba153a4e0220342513eee68a7ef36b91d87c23f22c346e7285e8ded1fccc126e42c2bd3ce494 304502204a2f7450e12fd480d946968dbfcb964121370377032858a015eded6b778063d7022100d67cf6ec48596d2af892c4365b1fb6f9b9f560fb812b58e490da51d7b8cc581b 3045022060ac71c15e1a29af274a674d6aa7a6aeec99142ffcde4a8fb4fc766edead82d1022100898730f5df436bde3ef0ab97f9839a222fa6e7ce0e2ad7c52604aa5fbaf955bd 3044022062f0bb779e668d64b452db6d359f8fc79bf9286fefb9fa7298a26bf66e8709020220543ef51be067f477eae3c1bd94bd5f0d5fc6a04fd507265c869ae49f9dced978 3045022100dba94a1b10a4fc8a330fa4def3e6d9462968ba575916f4f8d647e613bb45f7ab022037cddf4f1caef7bb0490f1b67417c1bd817a7f0d5664b954debfdbfe6ac28883 304502210094cb75346f7d836ded4f9c52e467c3bf16b952f834b260facbcdace86bd574bb02203fd53fb760b617db7b57dd3d6a8960dd2bf5dfc7142b3a945e4ba133f57289f4 3045022100ee0c860a877dd0ed8ab928eb4b72b54fcfd1a17329d7bba5a235e2403c21ed0a0220589a11b01d6632688a3e8923ae70144fe71e7f43b5ffd00fbb8ad603022ccce6 30440220184206f8bbe7748594b3a0b007e2536f1e128eba8d6b4c794111216276fd222e022017a83de0fa528fb37fee6ecbbefa10fb4e2e02e7d98b5e6228ec9bffadddc4c4 3044022067b2ab5ea831c23d29ad92b6ed9665f4e051eee273bcec265c768065e266c9d102207ec518f69a01732935a46188b75968c287987496fe8d401b8168d015dc98e901 3046022100858bedf5fac06c1cdc96886c71388ceda78dcf8413a145898ff618ac112b805a0221008927aef4e3808bd9a632984b69547d190507c4f61fb1f98324e0385618dfef97 3046022100ca3962fb6491079cf3d4aa21f686b9eb41c9915a90cc832a5555b00160664b2b0221008a0e6fec838c713c6f44ec3a10a488ed55d3108b90fa3b94eb9a0bc4a2f23de9 3046022100fc3cd43723a191b496bc5e126ea53234a282d51d4a46b22e9e48de1654e29cf9022100ffe41fb4cce3ab623cc5482ac81a96dd9d13c6d770e820722a4932ebce2ba000 3046022100f8e6a13cd22406018d0c78632948ebb817a4cf63ef9060cf2e0287af43fab5ca022100e9ca430c5cc009a25632be2e279837abc972d5e0b253aa1541c44501062d29fa 304402203ac0b581467fe4473d205bcebfe74974da607641276355c56d43d51fad916e97022001115aa218ac09f63e422c419cbaa54599927b1bcd8391cefb96c66bcc82feea 30440220419b114d44e538d957a8eedc33491d5b0d8e966a3dd430f92a3e7138e026daf80220373c653cd5af56d234ed2d2fb3a835f3b9e200f42727e8a65bfd546e7d443bdc 3045022100af353193add761f865ea948bb9c2144b1e24a2420119375499887fb652f654c90220628540930209a6f34ecdd7093d0b31ab76518a772df3004a96a33868d4c2f7ca 3046022100b566e69cd5f7b00e14ecec735fe7cf6e6bce2768c8dabfe0745d4f897a80b68f022100cea5a3a31fa4046bc815a9c98282c5d5a3986db566b756177484df17b471a163 3045022064b7f3aa0e4d53c228825162dee00b3899c4ce13204fd5f486243431ea35d9dd0221008a0672b43474e1d25559b7c9fdf1d4bc1b32217d20163bf6984bc89ae2a8f839 3045022100aaac3ecc253d884e684fae8944cb7495e6d1c6a2e5780bd8b90fa29a1c22081202200747c7bc8ed516278001eb870f23202a5fcfe21fe1f7688f5f7f1ca50142d9e3 3045022100f0ea6edb18a8248052da629985c027715222bc9d45ffdd45b1316b919e6acadc0220533cd6778b884dd64d7154bdd799856383fdc5703e26ba49cac4a982f9f20749 3046022100872cb33316c3f0b37a943c9608e8c105494e789bb624529f04e2fda0e3f60f53022100c01f56511b30797b0644904c232ff60835c9218f00a4c918aa1d886298cef859 30450221008f26d7b914c977cd1acf4cff09f62bdd5e81ff462a8bd66135d3e894b901efbc02205458ff0a2641244929c75b56f98a0dbb9f7f58cb35fac9aa408dde2a1be15ae2 30450220377926ed623ecb2f5974a2b5a051843666e8e5cbe612fb122c61aa7e8ff337c50221009e9428268dd0a890ea985735c550a0549c762951ac7566fe13e2a5b49b85e2bc 304502201062f560ba96ab83d8296f4c64ce0c147cd7c175f33d67c10c99e5baa02e8b5c022100dba094b3915175720cba2bb8bae364a4c8da01bc5f550c33d2577fd4dd23a000 30450221009cb044f75df605e1da4fd8c222ae57e58ffb7823cf5e2fa6161d84c3a6bdff6e022055544fefbf7570dabbed95b637539a7a8b7cc9282fed00aad3e9c2096d9b6a3e 3044022064b1a4613378ab90093ea00f94537559f8734b7c9bfb09cb4bab81654e4dcc1302200212b57b397c390a7bfd9e51b61bd02f51204d358414a503e885d5beea98a59e 30450220552e81425b04921e434bfc6ff2194eddca7231faab219a43904824e6863a38e0022100c5c9f5e14b028877a23b5679d387f4806beeef891886fa0f6a96ed4a1e2b0361 30450220624e3b276ac998d9ca5bd69f112feef25c9d7097011496d6ab8f5c6662f18cfa022100d493e896a1e64d489f7b57404314c99bd69100f845b6fe1e1d314a57feca72ab 304502204a7810df63febbc8631087a3afa6aa79c999145f38c1cc8441a9bf558f44af8c02210090dc94dadf913c76ee7454e4f77f605963429cdb370ae4ae2885ddeb8ca45135 3045022100c576dd9f65d51d73598dfd6a763dbdb81cc39676820e4ca5a271092ef6c7ef1102200659427b825ab9c20cf35c574d6ed7d342f2738fa3d1d6dace7caa775bdf868e 3045022069386b68d23b7556976030d3ef8b262cbab22ba9f8893eface1348ad8b37d663022100a347c2049b0fc56e92cfa40ea2b4a77695ba8b13d07986a2a022f8c16548fdcf 30440220539e5cd99265afd9642eae3c3099ef904a66603d2e823999c573a98a68ce9c8f022006b04b57e1449e62679dc8799ea993e5f23a32194da8c2e9bfd9bfec1fe4ecab 304402201f8d52098e64c1f31dd4ba52b6fc3eeb92dfe36e83ee332536616c39cd7800ee022028307ea177e543d02282e02f33c2c1f9e3225f3895ccd4adc52a737e05915b8e 304502204bd577c908bdeec249ecf46112a71bb777ba23ab4e8d49e2b76ea95121728a6d022100cf68bf23ac343628f5adc6230cd31d46820f19bf047cde73875cb91e0e6dfa86 3044022069ed1ba3402df2d339a2a2eade193fbf396c2f30958d0c660d54e3483fb35248022026a8ed956ee9212e97513a1599321ce971b09c27486bd56dc957f75ba9738312 30440220206af27f3d988f0e0aca196e48d45b4add5520d5d67d3f9154946a7fbf37a0780220465aca7fcb2cc96e2eeafa01db0d4a52fb4d06e75ea24cac72686a8754b5c5f4 304402202a703f5500db62da2469bb2e23d918efec3cb73521a3a2eb75a4e30e1dc7d53b02200867226f678c91c9035a963fecd198012c7ca5a0bb2acac07bbde77573fa60e7 304502202a5be9a2320e3b281b0c9534ed270c603a9b4ff8ec70214dbf52e12aa298ceeb022100f64b5af8cda99f6a5be3cb912972db545deccfc7c9827c0f48dc92896093ac5f 3045022013ca42f1ce6392bdbe05c29f6dd6cd1901f5f7994b7f1229827480eda093e906022100fa8757b07bca3f3d0daa95ad0839da719d66713a2b5746327baeb29a2d7b2cff 3045022077a5495d4fc671b3f9149f8db9f47719cc0b85afeffd2bb4ca9dd8dae26cfb3c022100b4f718d128a39e0a9d315447d0603285436b0f3f1e582792b09be8d93128fc20 30440220317c2afba7d692f31624551d15bd2694a3fcd69a13c4af4320fe0a3aa1e7623402205027bf1ffbc921b05f9b5bc1f61a936e2a41ac59099925d0075be9ff36616ef1 3046022100fa534f64549bb0757923d324c39b7e964161ffbb65023138d6dcc33ea70cb3f402210098430e92776f7eaa95200535ea9f67ff9342113455963c9314ceba059767ffc9 3045022024b3eb537bbabeed9716442f256e46d847f2027eb832053e55bb2be6a6e85f62022100bebfc4b9096bdf010732371d09624799f6726b67298c4999ebd896cd85102a88 3045022013af381a7ed2782119c9d6918e4546193733f86e2feb925c3e13ef6f0705c2a4022100a80919063da69e1baf4332a7196a5efbf1c19d5241e4ce325a79e97a5bf3506e 3045022100d28a17c7ea8caf8fb1eaf624a1e4c08a13e43a04eb555458f0264b6c64af8c5002200989961e6d30c636ac0e0f4870cebfd79d54102f6f944943bfba3ad7297a8f1d 304602210083d8e2b6884f3d7f7b3ca46513a9556aa143c9faf502b69657f1a19743e75875022100f5b1080ba072d1ba94b18702f8b34efd4a23d2bba2739487d70f234a17fe35df 3046022100acc99294e3971700314d36bae241f4eac10927aadc9cd17e5099966153f155d00221009b7b6a9a6d9300a67185dcc27cf6897483e3842de0ad6992658e574eb781556f 304502205dcdd9bf5e29f99101d649b21fb211150f3ad4abd1f226ae6ee95dfdb995bddb022100ce4f63b22c2b7afa76ae45abd271d2e1cb9eb69cf0556a9d2436b7e04f2b2e8d 3045022100e326811f5043e6b0edb528f63087baebe66f718ea300d1af2b8eeb3977e460e602200f001a052ab9ff716820807bdcce4bfb85b932448581aa7ac7ec9bdfc4c80214 3045022100c274f9a2348e4bd037e26387fc0bbed32441024a83c01adb81ff94e8202af94902206e0f325947d86d4182d789841f756926eeb25b9107719b4dc5bbd976cc592f9b 304502203878b6eeaa1fbb9ecfb99119838d4f5f70f8ac01213ea389acc5dfe1c29fc6f4022100d6c2170b6a500d5c436197d62e938aeda5bf21a0c6542690d3fe92942ad187c5 3045022100ded7a511e23c9343a5cd4133b70dae85ec4976c2b59ae6c0875ee9c3a951ebf002204f4da2cf547a4b388a638a1c988abe9cf1bc818961d596d2f54848c0fcadf2b2 3045022061270cc620f9de0cf98e930bbf7a246d1a94853da99cd97dabf77c84f80352a90221008df46c53d7ed670caa0f87d4687481fa29b1ec920d135cf99a08ad819b8c4bf9 3045022020ca2d4dccf423eab35c41248193f461b4c9a9c013ceb19057117191282a043602210092e610246ec167a5e36306b8ced9ccaaa4ee4645ddeff673e513fa9b99336579 304602210082b1bf6bf11a59fe94b716e62a91fdffe7e4e62a16465b782e07446346593776022100c11bc49a9320e9e3bdaf604e149392e567ea0c717613e5c3a4b463b68c89ca78 30440220539370c220f535d9102115a0e9d6628f7cc40d3301e08c895ff29342be8000be02205077701f441fc70fc2c06c6f65b298247a433bbca53a5849e7115477d1a27006 30450220592919c5bf1b20725fb5fe2a17216040f3b0758e9c4d3a5778ce392e7448c36f022100fd294b566b39e55a08d86d98a73032cee5ec5264c812a228fe1086967f528971 3045022100cd53e47ad16412ee4c0f86ac610ce89db64fbecfe57d95cab45ee82683115e23022073aca5ddc907e267f9bc760e7e8fc7cc7a8db03b86da78759b0d6f9c0e28b187 304402203bcd9f8972e29dfcc7cddba93a88fff69f5dacb5dae7affc4b015d7e5b74feda02206a7bad8ba57cb2e4e91c9056479574d690b59fe3e0585858045c74fed032eb01 304502210090ec8921a7adce6853e42f1d39a4c49d38116742c2d5c208328762d2d608fe2602204df4346a7690433d7bfce07769b9e89969b44303e0f1f3fd8522a515a5df8b1f 304502201c55b9e14d294af5d1dc9890eed49b9b23c5f62f780108f629c60582c79cab40022100995abe2ffe6a17513f5c66eec95fc0c5ee59b9ba21d93847b39bbf731d81b87b 30460221009fa37d070899e2822d96806f3a156cc784e9bb2661c9a00fcafee7ac108179d1022100ad567dfe4f9a6793fa05845200872e93011ea4a2065f0e0cac4bb7ae6872ae86 3044022014f6d2cbb5bc4df60870403a6edbcf7d2c1c08e7903a991af2f045eb1e58521b02207dabe1ebae13d019e98acabce425b4300e73facb774fd1c56d9931f83f287140 3046022100b9d08cbbab66b68bad7ce97cafb4b651e8b184755363363b2f202fb0ffc8edba022100afe61211bd549e1ea1f1dad15b4da34d82bf41cf50a72c1475176b3d03f3007a 3046022100bc3b74df7ab16bdf82dee3a1420a050b69902a0c234475fe752897d25fa4ecd5022100fcd853568ecfcecfc09913160f76ead39100301d7fe1a5ba71d74b837137df10 304502210085508e99439ff8a4afd33c5fb38ceb392b148b3d50b0907ab8a2de5a2113568002206d92f782850b0a6687888f53ad868d4dfd6d4387dfb983c602c716ffb526ff48 3045022100c3909df879e37a6a4449e9aace416f6633e6e933fda614d7934446e174fbfd8e02202a88a05a2e7c8b41998b3b111688ef98cc0ff6681d23217b6beeba2f2fc4192c 30450221009143032d1e1a49ad7836f2d12998e1889a5d1488c7f67a5fc12c260ebdfd23370220371aa88309045ff351b4ffd7ae3908962005a5f79e208db97de6c015946dc038 3046022100cd46a1483bace2de7a09b4ba4e3fa7d58ead49ec95b510a4159406c61a69886e022100f0fde315297dae527c832cc9f57a6349645eff06f54e487211e20daf9415c80e 304502201479e98b05e7a9e6ef6fdfa9ae0b9cf7556f8e5b505460539400a584bfda63cf022100901e0e4541670c08ecfbf657740c926699a00564c8acde120b0d53d6eac60805 304402202fe5e1676c4aa9e8ecec8330c5f92eb07149854f159f81ca8df63814e2905f7e02205c1091c0d861f2dbe02abc70d7a8f47229dde0a25c2d16519e9e089fc7731e62 304402201de7e05fb65755286de47fb55ea483e9e7562b59325b410a3ea0423c509fa797022012e1e5fcebf18a5703db381fc4aaf229b9aca30673c84477a80283d79cf73bc7 3045022100a8cdea32315007e4c49d425a1e2c101ec544659b65e24fa2b0fa858747a2844102203c369a301b4d7f23a7b9ef72f2239dc3df2e2696c8632a56be791db90eed3b02 30450221009bc32397d8c04793df4d92d51d4a8833c0430f5cca9a7d7fc949032906ce19360220199524d5d54c878a6f34d664f79c04b97c6576c0c7f23027b85baa1bec747c34 3045022100c0fc1145ca01cc5dcd06e10f09ca43791212920761f1c054167a8d0b1f12b9a60220702d51293f659ddfcad8c5530dc7c5b19b72467ceef28e14f19bc6bc68039302 3045022041584c6f62fef9bd2f5cc8c8c0ffe61b0c8b75fda400f4673aea5059c1f1d703022100f78ea6e392277c21503c413d226f55ee3213583a7db5c9b805fdf5a290a5de20 3046022100dacdaf545ed8d4f48d3183e51c949d398ab9c993d34a1b603d8e4018587e4de2022100e7ff9a22610ef58ec11e62dffec3ea8ce150fb7f0ff84a4b8950ee06d2641e99 3045022100c5964c676d5a241739247bc68b386ab0547f4dce707a6cfcbe33a6afe3abdd4502203eef20091ec42a45bf3493fab85fda55b38db2dc6aa47d69c5570fe4f11a0630 304502205e1e25c700415fa96997e02ac91a3a6e0d8e33e4d3ffdf221599f3f0971c442b022100a4a075b4ca2ad110e8eb09b3f441a252cd88a628b3d459736eacefe2a5425636 304402200be6a6ebb29f14472bb2188cc4f7ad9f59af3284a8200a9927d6d090aa5ec0af022061c641bd96dd214f06dbe1245cb3d8b44a43cf1ef6e5edebe07da99724574e01 3046022100da8e11b1f9c5bf66c12234b28125fc1ea62aa498a2986b278c28d71b6a7cc4cd022100d7b384a57865a59263b45b30b504b4e46d9dfd16a3702e82c70d2839d44b0c95 3045022100953837181f174c9192523ebad435a22e146d85b4d12ab0eee9d893c61d71102702201da21e0f23886f5a89214f44a3f8c1ea8ea3cb4891f4cafceb3d3ef81146bb7e 304402206c51270db7bb8bc6649f5895f1365e011c8cc0d10013c42620e47d12194aba84022008eacfefc488ec039bd93b838a849d946e17abdca6ee859c087c0fffaaa310a1 30450220409ecee116b596be4f34c499033fc9d318e9ae25093a0c2636be425976163c8e022100c22e0a92acb6876ff1a6343de2d1424b13df98ca936d567796100038bfe15a15 3045022100c9ae1bc61ed1ccbd6191aa4e381a9802b02ea7e1e7890f6b10450145c7597aa8022076978d9505122730ceef34149382442b80d947e2856944061a75518ee0beafb2 3045022100c1b86ac81db76ee9bf4d21bb3fcd84e52d9578d8f560e2806928faee85e270c402203a093bece13fecf63b6fc0e36469fb94f4cce15554f2fc8a221041311feb3cd3 304502207ba3c8df03fe6f95227352d8fa88ec35772ec78e8d4563e4de97b68b1f39093c022100b7798d56002a4505cb89e73a397dc853c064319a9e404330e006677cb6f68782 3046022100c62c978e2e1d77548c710189126724038717e0596fdd5a0696708d2e8ca7e90b022100f2771b526476a247c17d63d6463d4f043e04e2d8fbada64a43778a172a491c27 3046022100c524ceefe6135aa987919f87a0048244b6331a416aa90fad9c1a353145c07a6f022100d89034c667409eb4e8c74baf2b6512a8581a0d333e826878b6e1cc020a2df4d0 3046022100bb627c558bc5c63cf9db528c8dd396bbae50b224c9e167a48f5719156f0cc54e022100c768addcbdf0c37e4e729c01041cc929edd9cb74ccc636479eef37280c32db4b 3045022100f15473c6b411d2abba77c97757983d89ea0671b1e0010f674d852baa4283f242022019e8b8bfaff234e86a115cca9e526dd207cdb8bff9df0cbcd6455f2b497bdd9d 30440220610569bece49fbe04f85bc80d8f7f99bbb71752de5af3017242df57b565a6e82022037d3894345a27dbb7069f066bad2627391e654648e8893f47d536133f8e844bb 3046022100d9a7b38c481c7dee1952022f0ccc81080515bdd8e538dd6cb211016808b75202022100bda02c7e01f0691edb2d5e57cf3eaedbbcd6ac5551ea643da8c8468266a370a0 3046022100c3d6eb436f86c79948d6cbb5e9454cc6629667d3666d5e679fe916be4c9561a8022100ee7f3fd45dfbcf7146c4178135fc5bab2e1c53b658c468a707e24e56ee0d1e78 304402200ea63fb3048dbf81a3b804956c1d6882e0a2b79702728c07acf245eb3f75a88102202426428137d240d93160527f7a224770f2cbea94454eeafce63af64b4c124bd1 3046022100f91a80596835b969b9d8791bf68919727cd6df7679f69665b50b8a1b03b885a00221009207b9e82e313aaa7cca17061e4d8c31194f86ec8aa78b50de438d0f93beb8a7 30460221008219704be040ed29a40268e714e009dea386b2e24cf10f4fa55574df1c23631c022100ffd686bb600db3c46e817520471f5b71a3a5532c500415a21bdc5df17f19b924 3046022100bf669cec0d4c5e39e611dc8b6d1f67876c1d2cb31ffa880ebb5c8f1206c1b35f022100ec8ebf9094547a3c26694b4563a3ff2c2ceb2d4345a70b0d9400918161f24a91 304502207eef44b012bb66d68eccc873a9b1da1d10c17fd53ee431d0641da47fa68f029c022100f715ad90e4048a4982a242fc172196162842cb2da21dd5ea265780e75731a122 30440220619c10eb2c35aa729da7a6afeec3c72922734a421ddb3e093026c921aa54b83b02205c81387738b26caaf3225b35acc532b80844f0dc9952a14f569ff6beea705628 3045022100f06b20f965b0b6f9df306f40d7e8d23ceb3cba62507fa48abace74ada7a575140220317bc38e181eefbb6faf165df2f6531841ff3705e7932617201bd2f1537d53f4 3046022100905bb2a5d9dbf5b224b1abf6cdbb820739452c7aae568c6f98daa1a634fbf80f022100a864b7e8c69edad8363522f5e17f9423a50199ae044f265926965025e29a03a1 30450220379a59985746eb46edb7ea7fe220fc936263fe560aac3cc404543338766991bc022100ebc1f49c4c8ad215edc23635fbcb91891426c7cf8781c64b247e6661403da240 3046022100c13d72b685e6e88e58df0950e3bd613bd1ca7ba92bb84cf0cfa1e1d4da5f1ce3022100deca0b28f453882183cc31372215e5b112d08769c3eaa9bf82ceee599290ac81 304502205f8442a67135638d4d68185b9e37cdfaefded74a7d5170d5d4b298c664afe423022100b270087c99c66bad1bf0cf35101a536e5969e4f26bb469f3c978e94629f1028f 3046022100f1b0982b64af504107f2c8c05cf1247abf6c1f095539d1828b38e8345673ab69022100fc9eb6b10349724b80d4a8cc9d9e4294ff8baade0c1d16eccba29e010c9216e6 304402201701902a1c09773c352ef013d8c9b74f2a25e93a5a55b112fffb859779e282f90220597bf9dc3f1976bd0b92c6df93e8005423eb92aa6e80acffe4f5ac8b7b702cba 304402202347fbf4ba024061790abc57763d3c7d4d5e2db1d98317970016cfd2f8c444cd02207b2bda0e1a17665e71b689367e36cedc3402d65596ea8e217e7d8cd304449f8b 3046022100ebcc1da990adb2de94ef7a0744a083bd7ac574519597dc599ebd8154b5cfe774022100f5edfdb46b4d3c43ff668b5bcca1284dea8987c76aca0990b408692a64f2944f 30450220086cd545c3a1420e7e42e7ad0b58a3fa42192450e2465300faf1b609872d4fd1022100a1c60842906ecee7824f371c42fc218f1d81654f4a52c06d79c5866cff081420 3046022100ab64ed5faa163c4b5c8b2464357527492ec15894ca2eff5a3fa9dc93f49f068f022100cbe3cbb5277a45a822a829474e5f390bab2aee71bcd386341498bc2742520ad7 3046022100fc1acd6d77e3eab8c44409230eeb52f12e77bb5ff36a96a63efd295b709b2629022100ebe7f0cf1c2ee15307d440909660aef8c12fbb255bb58f11bfb3db123fdb757c 304502203075b7a6c659ca65054a638456671cc14fceb579e758c350454cb27bf34a035b022100fa2450b6a98f814531e1fcb154bae7335de7bc58368c1f4b4f52fe275516ee3e 3044022018edd935af19ce9e83da0976a9ca961d616ed8d2ebe53d291b87e30f91a77b3b02205022437f35c1c226a4a845f58ce10353c217fc541d0d384654dfe9639c32c3d1 304502200133602f4b6ec24226350f905baacc11f6e1fbe0e8b59f3a11379af82abe213f022100807a9337a390975c16b08b9938a334c94d6c513b3b256541af25e89e4ebe9c9d 304602210081a1b6dc8d5e280ba6f7ee855b3427c9b2921ee53730ece0d3ee8e64ac15a585022100bb3d3bcd55830abe738292969591eeb232e91b3c59a4a39cb558fcfc1fcf944b 3045022078395653389f60957de36bd4f5030f6eaee0f5ebb6afa033aed04200826d39fc022100fe83116ee171f667c829899f5216e435471149dd1432efc83a045cb476d673e6 3044022055dbc276fd1c585042ec084ad9f9eea54cff5f61db5f46f8730c5e9ba2256d1302200d00405dc9d9a40ecf85a6dbf9f96a3c2c26092570cde1aa31e51bb6602e37ba 304502207e171a94c8cff3f5f6397f766869723c882d14b7297cf9b7afe9129c3474fed7022100eea9e377d663c6f9ed854c10f7b24961f23e983c3e3bc72e228d57bad1c33bb5 3046022100be555b50bf093a3ab69bc8ef04c797648869b9cc12b6ba0912774f7137150646022100ab5b6857a129f65548a3d8898708fe344c2fae7615986292741b2f78fb3968d4 3045022100e6885b29d5b1557eaf5d3473ec6fab4acd054a0e8c1553aea320b52f4ea004c8022038e01d0515fdc1c91f0113ef2612bd56612ef500f15ed36207e63d31e06292d6 304402202f5d96db48441fd45278c00a5af27852d902c65a5cfe4c747649f37983a31fc9022028ab2b16e37842400ba5fcc878c6b6b7a0c8f0a9c3bb1e798547d4ab9a134c45 304602210088fc2332b5e5857a4eac2f4fcd4cdea49894f99be7075ad85e50b37d9c5aee7c022100dfc5ae0f12422bedfbb002b3ae4e5760901a908bbb795ea49672c51fc5ae997a 304402203abb0316c98d0673cce23f4056cfe8edbe6d23cf30689964c9e70aff53211d62022036cb9835be84f5a19cb306b8643b3221f59f9e6cfd053631dfabd70e351c5fbc 304502207573051119a55def20123d0c21d2c11a9cdf04896ecac8de58eec8985f5e9f96022100cb5cec9446c42721f18a9b73beca1698fc3d645662658eed24e074b5825eac90 30450221008ca701c2b4400f96c1f6bf61fe8a89b9f36b0864c59aa9d190afc2d29d96abc802204e396a7e9a20a01266d7ce15e759a9f4cd34092b2ef1eb309dd653fb990d036c 3046022100dd9ecca2f58f5e138eb94b5fa364769e9fb091b57c5ece91528702dfa06db929022100ab1e036a93deb8fd4338383e748eecf2185bb25d8258723f9844d15b22869a05 30450221009b9e559d577feaea962e7b2ecd7f8a0abf7be6d6f0c81fd8bd25ce3548dd64c30220685ea726a820c98066556b4fe9cae4f08c973c1de591126856660d353266f1e5 3045022075bcef15f3685847dbc741c0fb898b597612ee72c6f70694f26eb4b883f7f039022100a45afef5a00b4aff75284e8355724ab1794601ff49962284748b39f25864f786 30460221009b0044edcd589775286d260c917981bd67336c752b53c668c2a402de9baf48ce022100811149287f35dea96d5ec89cbaa31f124718417f623d514850d658a462b3fa3f 30450221009828701fcc1c5de8bc68226e2731491339ad92c05281117f83776b0580ca6f32022050fad6ccc3652837efbc9af431ea2d168e82147afb846b84ba7209bf0e83adc4 3045022100b067cb8fb7fa502d09a17810b1a9c669930419f2ae8c5823db4b8aa39e077ce102201342047bcd0cb563a79b44fffa31206d74d4a94cc7fede4bcedfd420a70dabd4 3045022023b39f80fe48873f1bbd136699ff45bf58d1a66bda32979fa515891a27bf544a022100897cfb3d72d4eeadc25873fa327f8db95906ac70f0a4d8a3e007acd2c1687270 304502210080e98e6939d3ae4ec5a207c1e9cb1b1afee34c6b403e585efcad8a37761653a8022062cd6d1d7031345227aa5be3db304db3a5720241f2de97019a028ed5a15085f8 3046022100b4fb577446334b80d8f89f721a362968587a8952ea03facedb8fd98594717fdd022100f2fe64919d519348d5cf91e71bcc54dff21e36b6fa9260ad0735e0c559fc037d 3045022100903b353189ca58112310bc4c21a329684d9a65396d25eea0dceb6cbaf7f58e410220271a358230bb6aee05a9cb4f1aa4944c3eaa0ef2cde69ac485fecb295aec213d 3045022100ebaa80be8d5a5812683ecbb7b30317b8c86d018be7174e7199c8e763e883a3390220436be74b605b2697b756398cf81d070458d3b5b8d0f01666015297cdc8a2076f 3045022100cda9e7be0fc436fb245ac23d017339296cf730a793f8e9fc465e235165aa3d4702200c7a0a727f5035ee282d39b7597aa03cf4353382531565ef6aed528944d3ae9f 3046022100d3485924552977e038e2e2e74d9196d747501b7b7746b729c34588808ad16484022100ecefb50b2405fc2f445e4c739488882dacbedbe39ee89db3914069e3d3c76577 304602210090a6934bb85e4de4b3d932ccb2441d5a05b7d7d1314f08f648089d68529419c8022100817a046627382259162321b5129601348c3d88433038f8720c72a8dd16113bd9 3045022043feb330ac1dfc4d05566f4516f8186b51bcc94c1941a172bd145b5697a4b1eb02210099bf3d9d09fdd1193b120eac7a1df90fed6c1533f3f2c812df21e7f994607d41 3046022100a1478aaee67d59532f75b2da5d9a25499f7cd228cda3672051ae83b378a6cb4e022100d2e4bfed2602d2171a4394f0954c26dcf7ece76f1d7428384f3885070a51dbfa 3045022100b65c4096b90f1e8ae30a672c4f5bea7bbb01f735933c2bc82287ce54a79aa4f4022071dfc50405e4fa0df574e9e603449706c4e128c58bf5960f75c1fc3c9e1a2f2e 3046022100a62aa11c1966c7baca1534b48d1657cddcf2ec67eb384dac8e60bdcc9ff95cbc0221008e981a4317512bebe4d2d4978869aac8e4e57b2b96e27266c178c8cc3479ec9f 3044022033cbbbd43c1b888c1507f21dcf50891327f9fcb22ec72b6cf7e23d246d9d078d022064e8f45fbb95b5eef9aee2966d87c6ae707ce60a0e6cdc52245f849b6a3559db 3046022100892122e12cd717158dcf00fba38daeae21f9d37cb03661f2469c1e0046d77b96022100fe6760d41c22dbcd59ef8a1177a103a4420276a2372d5dd90b38c3efc6557811 3045022100e8cbfc462b95356cfcd95546315a44f5a742fe0abe23ff9ba7220f5c91ad2f160220561bffe99e299e98cd5ca33e91d571547192507f1315b9f9e1cccdbd61548221 3045022100b89885b81f1f4868a6000f24b896c41f33a5d2519be8607b0fb94d220b21ad5102203c7dbb1444226fcc58233c286089ca9ff0becee76f19839635025870a1a9c342 304402202c94a88696713b1104de198947ce8c24745361e796ee00658cfd117355d943df02200eaa16e2d59676466acf61c7c36aea83238276776f4a5fa0048322341436fb58 3043021f14e309ccb6a43661e409dd7ec11935c54b6f08f895ea4bd67808dcbdad055d02201a4fb8d34670dad70ec30ecb34111459c78963db5c94ee8374acf947b9996622 3046022100d25835c7a3a6028700244cc74c867fc7e574963846b8d0bc7f46fdc1f5c7265e0221009b8795d810aadc1e2f38c312009e7078fa5b299dbd0478fbf8789f7aef26b15e 304502201e1e34e46c590dfd17c81aa786292817b3d7ea559918b4594a59b85cbcb25ae302210083fdb7c0a53998de8704b6c2d63f7ba324068431b8bded9e1abb758b91e4ab06 3045022100a3891ddfd6005fb3ae127204e13518f2ffd2f7536ead6982c59482551b65ac530220032ce5ca9213996cb638be14bbbfa99d27c8173280ef03f983708a1a2bda80a4 30450221008b7c6f6b752112b807ac26ce324c338bcb513550df05dc023c9fac0eb96dc83e02207b7e588774e1c36ac63bd23ab0155b34ba341efd57d7c596c6757d450fd03b79 3044022076098c2ce945a5d953f74eae40ab88ffc7b1d5f8b0f9f3f91b57025a7e2561060220782024cc7dab875e640aa46922615cc8bd31b28a575806eca21ef1d3592a90d3 3045022057edaf30a4d3c8d5dd7f1183f252204ebbd5085a701dfa281d7931effee9fa95022100f6dafd895199f808fb8184a344c6b35abb5f8d5cd42f45b7a11136ea3f66165a 304502210080d6c4685cf2f23840d369bcc8399c45281d264c69708746b173431c452c62d00220503c6a72016e68bfca65beb48380009e090fbc173f6b19f76bb0b6db47cf3b87 304402205cba13b33eaa4c4dc2af68dc38328d02d72a05a205b68dbbb0393f97af4c116f0220297d0dd5a688980ca7a576cbe56c7375fb962bfae2162e4331100bb7d249c4a7 304502203b41c0621d3480dbbb97e9d5a964f6a99c6e183ec4f4d83a9f8095d5c318f2d6022100936c119f31c52d24909d57ae7ba577b4c11a11c60441ad0e680158c03a8cacae 304502201511039db17646e4b878f6493d245f9ff5fd721e7509d03691f31c9adf255b62022100b4824c11d5df3fa93f90ed648718ee6dc98a93a97367474beb109612d0cdf636 30440220324dff4916250e9e2ae997d670e3671f72331a61b1b522235fef2755f367772902201bacdb186ce95bb5643362d7fda7658b5c529e698e9be4de6348697f7880128a 3046022100b99e294a54fddd87be380410e9afd69a69a4abd0aa2fbebe36a338ad9045160e022100f8d3a258c832ea82db1cbfa9b96f0bb6d6a59892ec8a60e7f88ba9a8b67b7d6c 3045022000c69343a7f85f06a2d736984a0c8a439c0cd5290fe9ea393ef1cd6e56e486ed02210094a93b19cf8ea215aed0c8943f11e5f32222b20be036bed31493b2dc2d8a66f7 30450221008829ae9c14ee49c8034a2810368483d34c8d0c747e2d6a0e85d79b5e784af9c80220400edd493bbaaa7ebdef90073eafb5659106daa948e47cd7d4aee02b6d4a9491 304502203a83b4ec5ab5e81c8d999d0821d32839573e476c20f3353194f0bd77db46d9c5022100c16f693cfff7fc7deba7a21a7e908561f395db43c8054d43581e39a9725d2cbb 3046022100cb2288c29dc20fbaecf3767eee45a1b922e65291b439932d227251c9ed3abeb4022100b9f168300908b68999b7e7e6249285ee277f1110b1044b24fb1b399e124846fd 3045022045539885817990d0a72697f0a236388e79f75b6f00ddab61738e7fda74937a93022100a546ee84851261814bf40c35e531b02041fe175585bfaaf60dfbcc500601b99f 3046022100e626d76e2e50e3559e1533b984cf199fad2623b8206d29cdd2236cacd019d92b022100b933cb843b911e94a2e7bb8648d9572f8b99e997841da7dc439c8e4c5aafbabc 3045022100e33595a8ef8ad51852a0903b419d8fae0c598b60fcbe5430a60bb926f1b58f6d022074e5bcc34df2128a1c7a23fd0479cd124936df4efc90242bf355a01713095b75 304502207a185f302c79f5a6822686d8dddbb6a9ff843fe01674cead4928413b753e688e022100dd1c7057b2cb2ee2239df76243a94191cd0e5dab40db84c161f3664e3e6b5420 3045022100b2c78470619cbb4021f2e0b86708ac691ad29c9105018e023eb808405b6e3a5f0220593788ea79d1aeed4eac169632c580ceb86d5a5fc72a0daa7c52b48f0a1721f5 3045022100f6280bbfe503f20d94d517fdd05ba52b809d55989bf3fa55532eb09d90589d51022036a18774bf8bd0975b6efb59499697ea43f3fc056e9a319e07232041e9d5c2b7 3046022100b377032b478dbb352ef7bb7808bbee886cf0498213a207805ada1fcc619b5612022100b38737c95cc863d34c2c1a97367455a6593f5c4ac62e5b2bf8bed38f6af299ab 3045022068b706a007ee8feaf15419818af767502b464a041156374cf5be0596466876140221009a97686427d3b7b255fd3ce68177b3e8c20bec224fb7ed6d91947b1208863dde 304502200f2634f0c6431bfa5bd94ca7afeeec06c1f9018cc8f1bf3ee9d0c7f67bfd0c220221009fed92d63ddc7fe4e788e40f70d05dcb5c5ee9185318fffce3b8597ee5968fba 3045022044c23976e2dc5c3c8ee6ef1eba0a26e5b1c2861e50f2f6cbf989de21781b5318022100cff22e4d18d31802a41a41954278e446111abb0057753d7c639f82c8f183044c 304402205ab7afc18b0de18b5d992bcaaca50f7187a39fea670ff596a898562629291e5e022075d9b8e906320e29956b9b7aa4edd00a011a6357c96aece06bf180734dea693b 3045022100bb2ac5e9ff15c1326d08192c08b4fb1f2fa1cb2129b8f69c6527019f0642e78f022041d1cb24d9641b5f0e80eacbdb520e59131110a848c2ca294470b24abc0cebfb 3046022100e8dd680812dd1e8924e5cd889950dc1d97084ec3a4bfe02fbc5c535f0f39b4c9022100aee44df4b8b1c32800e22cf61bb3db6ac754f4c22a3e072dc91cf97c43aa3639 304602210087ab345ee0ed4da0fb531408e7cbb11d8ad11a4109eefe318a220e86c3ac2c2e022100ffe78034a227e7e65f0ebdc6d70dd9219ee45acaa6bd805e47e0a3c09ce3bd5b 3045022046f71a97cb9b5e35203192a4c84505fcf459440c5cd31d3b5b2b1c8528ba03f1022100f93a202b19990204370abb666611a4196faa75f6ce2d14a2b7b175e9eb96bea0 3046022100e32cd1e91823c9b9b49836e5e18d3cdaa34bce4a3c44e16e5afb2e69a7c609a5022100f5c1aeb05f3918d62ed400103950f360e79d9ab2d633b14198fb9d85ffd53382 304502205d105d57e43d319d46495dd8aca1211e4951e3bfebc21f3930d6b3ad44fb0b2702210085b8b22bf337340a67d538566790dbdf3dd0c523025ae6bc33878942ff7c3aaa 304402202ec65f734157efb521af214eb2415834f21a3588c488e3ab3a0f5fca3abd4c3102207aeb0c91f0a25e418878f21bc8b6139fbaa64d5e34e854a44b78f0b102803991 3045022004273aad5d7955b3b0e03f5486a742ffbddd096d1c07bec801a087a73cfcd54d0221009cff3b19f48d2c15ac49731c314934ab570e0cee0f4bc00d0a19a613c0a56898 3044022064c023794bf3bf765bb02fae60a5faa4f62d953cf8468638605c1124dae5eeed0220431bf78a875498e197056c5992990d457cb9379e517b6f13b590471f9f24356e 304402207df12aa34d92b026509a53878d168073d8232309c5384f82dce27946bdbabf5a02206c79a6a1b6af8ee252b0b6980f5f5aa15b711660887f0465f09b767988b329c6 3046022100879fad0943b1981fd18535b75a21e20cc20e7c8e258fc0cd6ef2bd8c8203b180022100cf2f5d14ac8639aed7b16cfa3c3b36bb07ceea423209d2cd77fc20ff5fda9e31 3045022100d16b14205f9dd0823e43fc53685df35b79eecf1b1ab05223e263af50cbbf307502206e42e4d5682ce785d8ac15836cf2952069ae830f8e2e9ba0f47ff46f0867f812 30440220363489de39619882ffa3aa2487e5e8733ff302975f3a7ce8f9288ae03641a70d022020b4e9e88bbc162e11c63e2ab54fd4baaeedd225e384b30d60389da58a81b613 30450221008988a1dcb97727497ead92d69a25be6109cb64a6311a2b1842683b79a16134a1022045e27ba5b6168a6220473b7602270484fa0738b997238b46756c8cbe81f6f436 3046022100bf287c3bf20a7e92c75495f0a24887563b5eeb0c05fe48d62ebf3cad0cfbba0b022100cca5a5f2915ee345611848485973c5e6453b02f8753dcafc5709d7d0e53d4d8f 3046022100c1739b68e1ef2635612453ad32dc1823f71fcc4d018e6b922ddbb37b423b4a71022100ef7f2a3ab2262829ffea667196548b012eaf69969e7c8d2fe58a98815c026265 3045022100ba0e82ffab629df86a387b6d5944f76c80c835e910473a6f97b139c2cd778a7b02206a1081345ec597ba996b17add97e56c852d99db5a35b0b59516135f5990d94ba 3045022100ca4cba5c25025fcf7683cd270cd7fcb8342b0fe003585bae00fbfbf803f0af0202200eeb58a2c932a9accca1eec42ee13cedcf47bc2df36587ac79e42a5f2f086942 304402200d4f572dd2331e2551824780e009c271fe46598ab98bc43c61f86c6c4fa50f35022007ee330030626c1510c5085cf3a7d2d9b4cb6d39b3115f2310e657d25e0685b7 3045022100dfcd9de4c6ee3c132dc1ae9ddaf50f662222b8f664ba38dbd0bb14877dfa16bc0220720092a521cda404a8d12f7044b5b64bcdbf4b06f1faa211f1fd5c2069676984 304402201c5b12bacc986bd8355fc45ebf126c919c1e160f4e699d2356be7534467590ad022021b3796b4d3dd75e36c9c2013b481b26e4c35fc4e5f526e45e2fcdcf06412ce5 304402205d5261984a378df8b14855c3c1570f442e32bb24fed8246ce57e1e2cf5e8888002204b6b7af82fdb870e0c12e0950197379c7537662eb5627fb3cf43101504c999eb 304402204cac85f3690072befd6cc8bc3e871133070abc614e843a1633575dbba2c64c6202205415793549ab1ca7a1ee655be8bd0d1b5298bffc226766bc3984194c220ac0fb 304502202d3692b0d271f478cc4758f30760dd42fe091511a57dd600eacf1817a26d2716022100ce107a9ebddcc4727e1f7e3c7d8b3442e366039a6281559a8c74e4934633f21e 30440220405ceae9cfaf5a66874872b44af78444094f0cd191fc7fa244f8183be982a13802202447c62832afdcdaa4c6a522ac3741dc7ff309ea0d8d9367216aadef6ded16b7 3046022100f8de55a4775d75da8d7f9ddebb79f0b1df00a4106454e7322d0f2ba3b05ecd5902210087ae420b794376c39b3004cd23b578d258bf571a1e3835031babdf2b774ef3f5 3045022038ec7ea2404e069facb772974968589c94893e7ed0cbc85454bcfcaf9bc8e8ad022100c5683859c27f01256a32fd431b6dbdfd23ac001efd39d961a45dad91eff0157c 3046022100ae5c5a015dc6a4935cb1c76f6b4e2c1a0cf93da04c85708afef2eb1ea771f624022100d625ed3cb1c5af002d9e5ce38009ed7bff0ea2c106544bedcb3b12830662b928 3045022100e9b6e4944ee478e1f6924078471f6e67aee400b134cf172ae6c40783171db752022004212fff7ab08b8e0b3589eb756e834f0adb8096bfa3e4a5f87b2f3757248321 304502206a858eb8c94e754d3592ccdf975bc32e3543e826bf2346c1062c710089713db5022100eddfe8d9203c9a24b81699020269b7ea593b7884e33b3d4e7d891c074a51aab3 304502210093a27ebf1f3c11d98e7e563516ef3b2567fad4da5f70982a068751df4413147c02206f5413dda202c8fbb55e8544534eb6279a019eef165681efda864c4cb1107fea 30460221008f6f1a4f6e7b3a202a5669d3e96aced2cf1fe79a6a6b79752fb1fb18c712e0d9022100eff1d67c75638924bce238f3e7f7d2570bd98982c8fa5f8aed0a24a0a59ba786 304502200a0eba2d88303ddb3afd505ce56edc9c82061b648b751bf957293df767b885c2022100a62057ac36ea3f1ee1d5ea34e2efb1cc876871980a9b3ae12df566bccbec0239 30450221009b7b98c035469991daa39d40988fd3bac7df0921934c21c7aac2d462937ea0c902206a3d08e2f7cfc2d58dba75e9d952b82f08b29f3c231e160d88287ecf54b312c3 304502201d14dabc3c6529e9a6acab20ab88333b5b9d2e40fe7f04ad94e1ccffb63003b9022100c34aebb3b059cfc710e7325fff553dc4a0fd2d37dde55ae9c5861ace9460baa8 304502202466cfec6adf4894d43e026015977ad208319d2452e55b55f2848e31579bc2fc022100f52486ae225a7b73430a7d4b7ab9ec30d42609787bbcd11a9490627019e4fb82 3045022008576480813c4303c97dc3840e09ac016f90400bd67782a30862612d171d458c022100e8a9caf5625322c5641c1561c90672d6694a0edcefb8aca0f7558b1a541b6320 304402204870011a5efbf35964ee97b0f6b70b5124fff539749690fba10c0f5904ee16f802205a8311a1beeb7513f4edf8fa816e81a481b833043751fdbf241a88c34aab9880 304602210084b3d7ba2ea28286ba9bdb6470bb3513c760f63e589b4ac3760b5ec84d9918cb02210082624158ee55ae9ba5007c6646878030f1a49597b9aaf51ecaf1cbf7c195ba2a 30450220306cbe3bce31a9150678258f6b6306bb98860c8cfec27778783eaee43a643d5d022100e1db4c8e6200441f817917fe21570f3d6e1ae0d2450119aa3e699ef0525c2c73 304402207919827fb9fdab93b191438575be08cf0d18d4b940e31699bf4a93a9f2a08fff0220625d5d8c945525947a68f2108828ef879eaeac33e82e6b1d72baa4b73ee9c7d3 3045022100c64ac3b7fb22aa9da442f088ad9ab5eb3aaf14a6713203382855b0e09cd7698902202ab8d3c7f64d03501009c467ddb7727125bc59b1fafd741a013be91b11db1a5c 3046022100b669cd3d634297fb56ea1288ae8b8f4425ba16add6485b088f4359378a87b195022100eba01b6505716dd33018fe96a51bb4faca1dd3b7a5d50f1d805a8d00c04a696e 304502206f5d06c651a3178c357eede59d15bf8ab0fd80941b2ed8026615bd9c6a159835022100a023098ef81356c660c56b759d076e462c89da379f30ba768cf3767349cc4f82 3045022020cb9952004afe8cf29dc9dcb122b7ece6a6b01af99556e4674a08c9117afbb2022100c6a110ab59ef49e81a4bc0fe2ea95757703f9d674c8c038eefe9cfdbead2a3ae 3046022100cb3d44da291668ef5a764237d451c783ae6df621548e9bc29815540c4c23c0a3022100d859e9368e29b2db626f5f2bc927793c4398b10c699b33657a6e330f01863819 30450220121c2b7eccd1723ebc556c982c7377c769e5403976fa5974f3d35660d9a0c6350221009e6d229bbac1bac78a66de7ae5624082a8e0692a23bcc72f55ff3d9c3a583035 304502203ae2f8bcee03e0475c708819f79f41b28f06051b7939e6102791367e30723735022100b7c4243727703c5667d6a663aa17d16ce6c3e642d92bf11acad0af8bddce77e1 304502200bdcdd63700931d6371ea989a8bc87db61d6cefdfd6fe9a2828eb3669a2f89a50221008148705a98381ddf305c046e573c8f1dd48cf9b5f1be4c7f4aa0aa79a5a01631 3045022100ff3903c002a862f223cda1086792fdf8e0a3d5d6115f4c2c226f2778e884c17a02201074c5febfc7dbde36fb51c68b49fda466cbebce918ecdb733fdc5ea5c152b21 304502201c8b4469e127f2ec63179ff82d13e1a3a5ae651a7383b3d64b3d358c9f3dd24a022100d167c8a4ef640185200c5b7383e476e12ba8f8cc688290b62da8cddbd10574f4 30440220204f8f601c7bf21502947e809b592e66d637a663820c4bdb08854363c656e0ee02205adcd17e616b1dfa893beb88d03c79e36057ce976d3e9a8405497cff239b526c 304502206a75e1f47f456aa982d5664a1f6cfb84c9695bc213f87e3889202ebddc0d898e02210080471046973bdf30cb958054d74fba7cda00b94dc9a2edcf38630ca68424d9e7 30450221009997f1439e5b8df1a3ffc23cf36885a2863bc4da4c3c08e6c6c3a3677ad9695c0220707628b2a452141f296aacbc19dfa310df7fff27ffa7d9aa31220957d40a4c89 3045022100ca1a2f1c4f709544ce90aa103dc3820f3e474c75ebecf4232f384d809681359602205bdfbb1936fda95e1fc1c10135c344972a50382736575545ca2413b6655d1d98 3045022100c1b925118ce3ba2167e85f719a3eda2c147ec9d5344c2a3080fc89772d8e6af2022058149d5bf0946acc9eac588ea16637a858f4e14b7bae92ba3099f8c65d9b45b1 3046022100b4f190ff81c670af485b22ecfa180650c10ee4546df2e22684ba4e7732d1ceaf022100dacdbf13b04a30353c42d296ead0e8195fec62c16823a2dd87f94c2161a994e7 3046022100cd9d65790a61b311b710afb9b7e4f6bf3ad4987c0cc6f691abcf52628e57d368022100ebbad3b2515761ec38b0e98b017e5983b7d45c7d7243f0701e54090e2f364ad0 3045022100b4e9aadcd82ddcaf5ee3a3d8186bc808d7e05828d8935927f36048e2cd27ddf20220415b5494f2cefde98871369f6dd35e031820a5d09eacdcb7337fcc29b55b2c78 304402206c63af86ffbe13251c7eb2739f65b9d3c0487d945628bb4b0da2e29e314f0ec40220782a7605cf2eed0bf28ad40dc1fa3389d01535a91cf2e54965fa0be94522fd04 3046022100da177c8187fe24148f2faa400678020164e6b793e60b7c0f2ac2ea88739d8bef022100c3287969483e4aa90110224c1569e0be21ac6f48c40dde27d7e1f12749b50143 304502203e6944916d58937d0468a9b1f217fc04a622bd50e20a81512c86731a4d2e5f15022100a2d11929e09f24f5ed837325174eb9127984e85272ba0429de4cd36d1f0603cd 3045022100ad4a94a77a0c7d784b3a807544eb23d736b7e20aaa28334d929ed5df0ba0f58402205149fa12ee9731cafd102d139a2c2a33124ed84172230669a184c97ae30e6fb0 304502210083202ce692b9597a3200d93c603f8f580bdf2231516f2a3b1fd38476d13f9db602206110cbc037f23fe7768d0f2f0b16475170c97391435c953fee2f0956233b6e8b 304402207601be01cb271446258f8fecc2122e0da93ba726c33f0046899a9f2ff32fd5a502201ea2d6af427f194c178712328a6fb88b427c9dad0b44ab5f2c465e7752ca11fd 304502207b8601ed21cbfe28ec743ee3b425a8bf5d099f8a536d93a25e3d591582e091410221009491050a466649159fe9b92824848b2f7dfa51e46ff8c052a0ee72c7b58e22a1 3044022051c1db2d329c0082146411bd8ea04e89dcf9ba365a67f178ba39da63158d5ffa022003d560eb7a4d9d0579a2465a8039e84b8acbca1600f9a36aea04441259d68669 30450221009ada9093666dd2ff902b59286b157358559f2ab6f8ab6ea4b5c8330af7b8a414022010260c1180f0351284fb646eb7d239ebb9c893c29b6586063b8d37c6e096518e 304502206db815d6421298b6ebcc8e331d2298d7b053789c055aee28bdd40f234a50fd97022100f2a22f678d62779c515ee7f5c0ac97a826443e23f387ff9ae9858b14639c2846 3045022100c3a3901b0403bddab4e7d523a086dd9cdc77ad70abe7d6318bec3e7bd3c5ec3f022062526a2216f3d6878f37218e51cea2ad3689444c468824b7647b854dcd1e6aff 3045022100fe6612e738de0ebc90709a39a8d8caabb8d2cc94a0ef28483b28306f62474ab502201c1c9c531de8d3f05e602d7f46e3fee6e99d37eb054429788d14174c3794db43 304402203e563a41a20934be1a942148d26c734b24d2b565ed2b5ef5bd8d5c39969af24902205c840f48e1c60d4dec5f41e10968f347c113542796f4ef5cc44e0c98cab4c5ee 30450221008491d5aed5017ba2caf633cabf05d97bee546309e69308f53285f82cc33b61d1022050be42d27d189aa34f0ff25ced1d97fc5fbcc95e2b98aa299c2859300dd10d40 3046022100c047a6d29311e3003e59bd63512bac0f49f2918b218868d05466cc45a0b8855d022100d9837f7697ed43596f8c4b86e21a490dba09bdb8f0495be4ddd93391de7c862c 304502201b73f5f9f1e03131b7d7c2687c7178c83053881ecd1e3e35d0da13b8775b4df4022100c27709db3626b0e082bdcb92c3ec6880f59ace18b1f088becf3adda634c9af88 304402205d91dfa9628de67cd2cedca266ec85ab90b8a83e9182b9bbf9ec715f0515130b022062111d791b60b9e1e74362770b23b58da1220b152069639cd73cc2a643e48a43 30450221008a3f7496d7cd2bbcf2a434f3a063ba2fe24340ef127d6c9668892ea546df8dc002201b3d7b23a492e3f06eb8b5a9dd8bfda71a3315d3222fe484f8b8b7314562d1a2 3045022041289181f42e7171e7dbfc417d1a3ab2511ae0cb3227570359d09b1b0759e5a5022100aca381ec7fb691308904f44b9cb352b2ff5838742e4eb7e1c8dddc212e966e0c 3045022100ae06201d686f393f5c90fc7708cf6b0a9be9585f6d0e0eee818d6e3a711f9e7202204ef58090dd0065a31b223d2102dc3c419a05bd710d5e633dab4ac1fefe8bc38d 3046022100fdeb5af99332621ebe4f9375c034854a3c1b2bf1b153f2642769c30a5e615570022100c7078511283e68789ed2cafa15e246021a79f69a8c952687fd5e31b58956121a 30450220577be6e0fd079565bc078faeb5fd22b9bffbc7cceea887dcd20fcb90dc7e445f022100aa03dfae4840f1aa6d4ecdf080c5cb6309db56faca2995c06c04a29b0b438eb6 304502205dfc23f23c5ac3628a5bdfa8bda5a300ebf386aaee86f75671370455ff5ae1fb022100a569f1c29b86762ef7ff37b4cf2aba83a3a62a7f97b10c91367afe9a964efb57 3045022100fa49513286bbefa10de20dc12a5a457632845d0970fe9ae1d011dd03e0f997c9022079e80dd6dbc2b5fd7deaa456c01ce32c21c16c1cd626615d2c37cab775c0db82 3046022100fb242f6a92fc92508705f09e5fa345289bad78cf2ee24855d50a57037da9385802210093f1743612c26970118d92e313accae85c89c9adc7d492e9709abba250740983 304502210092c610c3c5fb7d4c0908342999f76c52e969791f23699285976f50ddf335b87e0220569097b13c7d5973291f431e29b2995bd90342b9196dd8d06061e3f415866096 3045022067be51c3d8c7e8455fe392d5363c734ec37bbacd01ed9078ec6ee705a9369cf90221008bf7b1fa03389be7c1eb788afbb884b2e28fbf1791fdcc929596660e1a07b83f 30450220319cc3954fda68a08c46d3bbf70d616a04dd2ff6ddc5097a80d26fe424296ca3022100d29d63b88fbace7d8d60c9d639356f11c17ce47873c319655649cb34960723eb 304402206a9fb00f358316bd38d916d928e54813a66e8feab605bb8738be0ae82d1681a90220557c964459e7f66c25f9d7e263cacc7afb03923e1d6ea6e9e9afcdd2719a256f 3046022100937a8f6d3b1035dd81533368263b07e297640c0d494f0a7c4400aecf8d4a2e21022100d24734c6e61ecc4ed0031dace4e9c951b8eeb569cf08870a7da388ea2a9db61f 3046022100d0f85ef27b0aaa23691227767787c9bc1d24899cf7981fc03a49eb3dae5a3046022100841b00b9adf4b779e5c6cf8b5f23dd02aa2a9d62886bf0fb5341f4968cc370b1 3046022100b06eb59eb97211f951af5c6a993f7776ed5959f3a8e9b9eacf1cab4eb48696b7022100cf23d09b3aaf55de5a633a793e5216a95edecf942c30c0edd93e0de3c5b4d08b 3046022100e5765e15c9075b888469ab2db53d43d85f5e8c5962c516a89bedefd0bea4246a022100d5a2381b33749dcee984610c5d8e730cd5c9d62dc0fb865c8bdf88f67c9bbc87 304402206564800fdf82f52ce59ce68779c12e403514cef753b71824c3cf3d9c6439c6df02206169834630ca3e3a66d199f3b59c704214551f84bdffc1b355ddfdd4637ba5b5 3045022100bc1f9b841af89ba1238b254fb8277764ad0038fcca700dc67f81b945c5c4d83602207af038e2322884e8a2e7fe9a9d44bd51b0ebba0e63741c040c08dbfe84ae39fc 30450221009b833441128f56e905f0a9ce949ccedb308d789b25995ba8364efc04c01afbcd02207c2a0eb24b7d33d7c2c287eba9b42cca28195bcb7d86d71212110b83f1eb91e1 304502210098d922b29fc38e2e82f7c865171722874302d9e7661de989144ea032b9030b2e0220749bbbc621fb26235c83a75b9cb8a1fbcd0b3028e18a96cbd4ff4b7b560dcaea 30440220623a5016aecd40d696d3d1c3a83d45bc217a41441890e193e1773660875a99df0220593df84fc5ad727209b2a0c123d88c57e1c0b953197015814fe4a378afe4c01c 3045022100d09b8052562443bf4c32af01db1cbbda6165838433f510ad4c5799f4b29eab6102205d1f4fab2e2a82fb0d5ffd8d5b04d0b15c2af4a330d886b536bd7ff99b07fa66 30460221008cfa3d325ddd8860aebf3839e8c9bfc53139fad052f95e47e213fe0c3987de8d022100a4ef66c97d39814ca7445f956eab6e426d39e15b6cd07ee2779bc57128489a2a 30450221008cf2764b128e38d3940a35a240893465d91d0ed7a566a1d00e2b0462768784c802205e78a3c8fa7c6e429aec89c4c5d955bee1dd2accbbc0ec89bfff04ed7b654731 3045022100dc51e52c220202f874d26ca0f7dfd2401391b48b68c8a3b7cd1343e733a3e1ef02204514dca305635045bfa43df4f71e674c691158e1a73b318d0209e2648841d59f 304502200ca97492653cc865eef92457e9119675470c0aeea0c6ea5c4724e728a3babc5e022100a3274b7763defcac76bdd8afb73b858c005c8eb3139019dc26e8891f30e3e3a6 3046022100ae3bb959a55668185e226e95f98e273e708e4c1d028426fcbb547c1572fd40c5022100961308f38a9d61a94200149ef1871589cff43a362a79247bdf68d92c655602ea 3046022100e8e1ada0a8063b7c33a18be2370869525cc6211b2b95fd1af561ea963f391611022100dd1708ab107fd99de90eb9c2466c8b812a45108eefdbb1165a0d0f2c518f6703 3046022100f4e7ff3b18d8c6866a85e13cf067b2cdd5fe5df878ff9d84ac5e85ecbcbe7035022100eed2ee6a8e0aaa0faa470426756972e7bee5dac34df7a5d661f1ed62c088adcf 3046022100d62e5e092b8c725e464ebf7446ff6d48cc711a3c041e290d7f35e252f4d91961022100b14a768a322bb3704ec5668853d4cf529c9d228a9da8addbcb707966fd357d1b 3045022100af8c887909d656ffe6e45abf3118b25b3e17cbb8cb47219dd9e998db8896ea8102206a0291cd2cf531e4f322e94d16efd3a4ffb74369f22106dff0ec28006dbd8912 30460221009db00d77bb51e12b5ffffd784bd5e2a6f6ef2aadd37e0b02817ba2557ef79570022100c5a9d1e362e2e688c301765a37f25e760466044bcd09550eb4b982c20c734d05 3046022100d9388c874e3411bfcb843eec421422fcc5d063948e631cc7a3ffbfe003cc1feb022100d42542664af9e225b6b51b96572cfdcc6a9710300e93693d2c52525025e931cd 3044022058114c6f29bbdbe2556af2a3759c3b1e7178d14a6e392eba59903346bea829c1022045769c6e1684ca3e2f2bafba66c702173a6fbd5bff755ffa419f8661f5b2be18 304402203c11843a19bc1ae223ce9a8eaa8bd161d0d5353dd88715976ac37591f4cbc09102200494db4d40116b22f01ed8710f3ffc7aca2b2f9a4b82c6c33e762c2ce0520311 3046022100fcc217695f16be0003403b88bea64e02e1453fffc97461575ceeb0f25953e47f022100b8e53ba605bc0d73ff2f0fb2c46b22c0813e17ca6c9feb2923e01391b5494bf8 304502200a5f98fca68b385a2c628ef31445e5d8e57d707ca2598d3631485a833a0452f4022100804ded8f4fc9bc581925f7eb5649d68884e460f8f6adc26260ba392fd523e5d1 3045022100d918356b3b9bb7cbac1de8f469845dc01065042fe3ef5a88dff3071c69f3381e022053b750d50960e4c71d370bde94520bd8be8cf39110339025a2ffe24b9044d31e 304402206a824231323cb6b3765c8ba920c4b2bbd222a25fc280e4e5d8e3fc8d3628749f0220120e8515c634f61e38082c1d6e7a6d491666f3dd8add1e4b5d159e271bf0e0dc 304402201376844b083818e8391f18836ad126b2e9b9a06c60ecd8241987417d669103b5022045189c251268cd5ecda4c2a338811c37c9c1ce463cf059d84b4c7b6785c82fcd 30460221009d3097e5cd52104278646a494168b54e46cdafa1ae07a0b1c677ffdaceb94ee2022100c80a1b0b66fae190a921a8bdd0f99010fadea8e85b8d84641de1ef227698b574 3046022100d7486ba4fb045d33757cc8b26c3490102d2241481dc5887c0ab76712123426c5022100fef18a245244ec8205e50c1b4a6200c6ee03728cea4ae67f0f7a48d1cd62c463 3045022100fc3f33a55cf1eb8915b1fe4a1ff1eba445fb94d559bfd9f41244b860057ad4ae0220166baefa518532a9d2d4c46ce257de0a84f6c8e65f3910962c90800cdffdc21f 3046022100edb339fc493179bc680f727b6a70e4eeb52fae74c8f1663474857bddf019c55b0221009fabb332f314bf5a2ec095acf5d8ade5c955c473c7868a2e1607b2ab8d077bcf 304502204c1281300217caaaf10e52c7948a30522e289caad099808c8ee624bb7d7248040221009f8e35e80a087980516c83746f07a2c84856464f5f565f93288d600714b9d13f 304502206292fc6ff4acd5a407be389c625f5a945d682a976e0417013bc554ce5c416a0d022100f90af4728008271b837bd78e4945843d723624e2d1dac24f511bedfbcb0bde91 304402207bb5673f133e5983a7e0b8cf3d2c18ac286eb59976100acbfbc9db901ec1ef1002200ae6c01ce4b0d77116c24e8adb3ba104ba928d199972a6cb001b90490b8464d1 3045022100c9bd19548b3910e57c9fc4576dd2c2be17bf3fe5ae317535615ca0e7270326570220683dbd722620fd6361cf1ccdf3ce59df1b789c8ad041782978f616b6e7b0ebcd 30440220465f43748e62173d5f79e8279ebfa7d125f0245dedff516d813bc0afa49e8baf0220698a55b0f068a8568801daed88397ecb787b24506259d50b15869f4fb7f586b1 3044022054d303b9a758dbac423a29ba1f7300ba897dae16cbb2cb95d068fb1dc16a946f02207e371745f48c6773d9e6fe5d3251fc87c0a30dce3c572fc5d3bbb21937203790 304402205ab199a0517c05ca0b1c89e7da222059539aeebc67e3c4237099f8da8a30a76b0220514e4d9de6fe1bfef0ea4101b132807d0ca8d602b611af8787eb61b9be957127 30460221009a6f8246e9c375a4e4b481bf090f5875da94c6e49502e2cd43dbdc572e3b35550221008da098ab1a1e8fdc61d913477f49a58c78d9aad40899969801961ce14ccefb2e 3046022100885dbd14e637fc63309a5082daa3c716d1776e6569f7bd22ba09bd760c3593ef022100fa4df2e2530444b75a027e8893fb509b1e4a71b20ba77b36f111b7532b41630c 3045022020707ab363c401e76e0a33b8a222666052e9073658bd373956047663b3150a24022100d88ce5d1a5e1bd0314b68629be563eaf9af4923828fddde00f61aa1c390f8edc 3046022100e83224ce55d3ef73165f325a1a9de947489446828393ad3ffe5f2a276969a9d40221008d5d0b8de91034cd47ccd0e03cd4c6d8605922a6942d416c6de91a055c69b838 304502204451d1dcd2d5275a6a76efecdb7adbc01227d9be3b84f319413699c0fa2be14a022100d75bd6bd7b091f724110b851294e080f3f0edc9811ad1e150ae4105a528888a5 30450221009f035d4301b1a2f18755a6edaa5bcf08a68cb1bcd5922aca105e76e46a59ce8802207c3fa9229d21bebce77d176a0200e9dc5cb0c8fedc66fd4990661d5a16f9b390 30450220326a112d1a7029f071fe57826f043b59208aae17692c16f20c5f2761db4e4c560221008d0d2e42e311d50f696e878cc373ebfe8557cc82e9084c5b4f2be85833d59f03 304502205ad1fa5785d30803bd7ecfac13b7026dcb7c8a54775d8eda5a23101f248f9f09022100a743da1d4237adc81ec85922e7b5bd634878a0a98954411c14b191446cea707a 30450220273d989f949765c0ee486949935d4a885041ff3746062a832f3ec7429370212a022100fa711544f72a8b5e389cad5554c260d36c5324f442c17756e01d7d82edc28414 3044022046cdc0c7d25b6ad41a85c64bc10552e22d77b054cfcc4522e5e3d0932e63159d02205427ee22b5c494882abb8032adfd341242c321e6caa17ab397247e15f98667cd 304502204bd821d5142547e5cf11d84293c3c7ed2d5c873fe5d2b67af956764fb55d374e022100a0b48c678e9512b562270913de67fc96a11442dd89c7d0276bc09bd4db95cd53 3045022100a3e53e7e7fc1278ee86b08b3a2e4e6570c4f46d70a2cced37e0f214695471ac1022057151d0d3223a0385b814313af3e10816782d2bbbf7807c3af36d2a8c7b18f81 3045022004021792c6b92782e02c0e2dc94c74adc340f41a8caf94a8adfe27e9636ba86e022100cb308245db5bb2d68dbe848bb4ade942743dcbaa2411cd9936872fc4d742e77f 3045022100aa7e24586bdcae3e5bb9e9402cfb3733499d40a9f72eced4fcb473d3938818c902200b684431eedc7030516ba890c808ba0729604ef5bb623d52f65af4a8ca6c2488 3046022100fa672c769fe98cb8c5a7b3ee609f85d3f1da52190529599ed7af2c6724c3604402210089147ab9d7926a23e5a09bb298a833b7ab0baa76dc6983faab562540e9294713 304402204e5f82b6d9b7243068c2cbcbca1c3f05fbe14f6f8e0205cc6d5f99367fc1609002204d65880c61002e5b016a7a54ec5f2fd8f9444168e72ba56be0a9cc5973010406 30440220685c6d8751dd99c4950194dc67398cf9da210424a5d331007e691cbaab85198b02207d8c4cdb6880a9aed2b71b28f43a7286ac186a190f6e2103b5420c2320cca612 3045022068620b911bae422aa76b07a23ee47ab4a5f14fd4b21e7cb5e35811e08048d28602210095f5d252061e47247d574a22a8d22e827dabb70406d0a81b050dd4ae438b708e 30440220757a520799dc4494c6baa39aaef5d9d0b2bede16ffae6bd01494d1b127a8331b022010e780af18b6aa124b6dba64ae3f3f0a896723bae750f0845f88baee055648cc 3045022002594a4073f6c07a074562a652b1abf548ddb8884f03dfb7853b6abde6368c50022100d989c2872745f41c1178973c11bebf9a3dc5dfafca9179392b43acc87981e92c 3044022100a347d0b19e5d00cf0c3830cd66b7564005d5a61ddf2adcf4d107b240e52a64f1021f25b8e58738bc4bd8bf43441afef1c35c81b10bd6362ff6e7d6f4712f151c3a 304402202c2f103c294cc75eb9ae7d74d62b8289a953e526c0a7a5cae873fd34a5a0da9302207f6220de3f78a6f723e9593f38fb9ce923eca4899f2fd11e8054dd78e7d4f989 3045022100bf6a901eb8db3b0ea12242fd3e5c6f094c1a3e1dcb6eca4eccd8c2623d2aa57b02206165032370aa083215a2ad161d2e06059dc26e7bd2e76580d43b2b49dcc49e32 30450220422b15f5547e30dbe445642a1b83b774c141cfe1140ef0a1574be56511d10538022100de32ce11533f40fb4292b3861249e9b9139de70f7cf9c6199e1716100efd2c02 3045022100f3c43438eece26a78511bf9960c296eb113a8b53d7304b998736130c1b89bb7e0220320bd0f42cb477d16d32ee38bd43f67d2c4122bb4695273869d3c9ae600b2316 3045022100d8712be8aead73fe36da2b6268eeb62230f499e099568614d5d81275ed8eca0a022061a5ce5801ce3aa93d201458f795a484d370fb6aaf2b0eb9d98fb4653d181293 3046022100e4387c43be1482924e0f36e236a7a767735d3aa1a6d01dee55d3f87b8b4a79280221009d0b2a8974a105891fe7a4c366f5a5ff5ccbe537816ff905d6282c7e1b9e3c4d 3045022100c4a1e5978cf54f1629a6935025bccac913c3cdd7fc73c94b436911e32f90ab190220172d1cc3b7fe7c1f9a477b61fb15fb4e25730b362f56f25a4f80ac13730d2790 3045022100e268f4f56303780273863e3df3c622546ec3853101e12220c5502b874330092b0220714dddbc78e8e4a34e088ebed31ffd4a1dcc327db3dfc5c2bf255ac64adaf5ea 304402200f86e90934adfd9a9b1bf3326662735d7326a0114645abcbce279bc0a93ca61f022017c6d0a5f18ce26e2eb71ffa9e747aef5320e6e1be49a505bc9d89282299f5e8 3045022057154375a6da3e34460db6dba0386392a0b635c74de02e66696d0a8d21a53a70022100f85f904c1c1fdeea9f6dc8b1ed6f4b9e56a982ed5db059a764c27e1e683051b4 304502204480c0f34755230e3e251e73f1e1264ae2cea4d888814ff2bd181147ebe7ed4c022100cd930b2811f6cc81ebfba67d0e2a6c66c34270d974fa7dee5c5eafb373376618 3045022035f52ae6fd78c21da8f954b5b69b1092cf1afaaad6ca1e55d35221663f7312cb02210098a1a274a805268f141e76c11edd61a7ba6156b4aa9f509775de71c7d6a1a35c 30450221009e4ef5171e9d9f5c5760a9d6769a1d4f0ae1353d4da33256931619048a0be7ee022051b0c54aa1269ee1478a55d02c1a3618323cb3e54e4c2d040fe1574d58121d29 304402207c0dde62854cc7238eafd48b652e1674f33f918321c8d01bc219bf46936f2d230220540d9536abdbf1a48cf6dcc5e92bcf1fd87b0e6058ddc79fcc8f8709ff80d69f 304502205e0dc2d8aaddd5866296c1383a6b63880f36aab5eedba2a1e20ba04ba062a56e02210084044f93945ac72029765007d45909e3559903d1fcb743ae4768f5600df41ba3 3046022100a414c55eeb8c564dd50d58a527533c8ff479d5a5684507c5dd3a150156b54b88022100ac88a2f9d1f45733196f74c2341093b7eb8e1b4ef21f331a981c7a1f511075e5 3045022100b04c24a17ae58311e70b123b60ecc199f0fc1e078566a9a2617456ea33d8dfc702201002bfb78877c0f798dfb7eb9e84fbb163039234b74648a3e4ecfed9bfa0a1ec 304502210091205880b39fe2a7d3a6a8bca81d8abb360a3f8f44bfd3cd3e15bc1ae059bd2002206167a7e9a1117823d4fc2a4f1d7c33bc16cecf023371e5bb5697ec840276840d 3045022100ee87076c82c59346d61fcd4d1d81894066cff1f729896341875dc2a1579c68c602201f7f84eb38904b8065f54619e889ee72cd07605396775e251da6300b84b4d048 3044022070b077b2005226d2fb238c9ddbc46e4a35ba19f56c6afee351630adfaa52fe7b02201ca74c77ac1b173baeb8b20f68f13c8cfa5e851abba2834735b92abf16c18af1 3045022023f2bb121659f0303ed006ce7d5bd80e52d7cf9be2dbdcd4a5bd00273e5923b7022100b60e7ebbb946a3811eefb7eb717a0025e407ff790fc69934ebd0ea20c9d569e4 304502204879bce03b8f7d2270c4e041c34f684633e0e53f9f1fdb0bf4b01c7625e3119f022100ada846db23a1b1923ebd90ce2688c3cd168c36d334353b76faafe70b5c9f4524 3045022062d5fc9cd32e6d3ada7e69d1aa4c7c9fb7113b53b99b24541d0906c6e9085855022100deb7fdd9be3c205c6bc43e8283da620050e5f931daeb4b1e414a503b9422bdc6 3045022100ff0e2e77b05d5ec3b557e1b8e119b43b3f51be6cc603c5962d32aa50516d90f502207975aa5d5cf0c94bcf1f827f7b82340bf05c27dbaf387a412fd033dc1ba4a96a 3046022100ecfecda2f136d4617239a0bac46fb4621831ac9f8cf448759ad4f8e90c595c3e022100e3710463bfa01f6aef410232772861017e86e17529dd1c291d1b4548f27a09df 304602210087b5037089708ac7d0d8f63160142f9ecb421ae5c0a4cfb9171585023d8f3e87022100810e0fb407702dfbd21512665dd1c88db25965fe6e4f9db73bc6c7c098ba66dc 3046022100f5044083031d653f05051870b4230457565fcc7d53d0818b2a68866e46f8991d0221009f6c92d3b07dbeb7cd19f69d2f90ad44682f99cb863b6f8bb53c192050219831 3045022077f8ccc8a0d7522801fec18b03303743a24bac62117eeca1286942e032b87a4e02210086fa7c4bece4a406a86200ec0e1aa79abbb9843cc0dd8a07c78ee7a2d3c36eaf 3045022100fb61874ab2c94bcb95d7c6abfabd956628bb769ffd0ef85b1cf7bb32d6d9e0ce02204368b05bfb6c141e55e40c3c762848872503672ba98b41a55ee12a650af1bcdf 3045022100c8e034658e45aed2ccccd1fff2fd78670dc9a8fcd03006db6d23c4c4ddc40c620220648a50c53787266944497950bed275cea8b5f1db3080e601f970d9516a919ebe 3045022100d8ba1f09af3e82ac986cf1beb32358d29e294572cc35d006af663fad01e2a64902205f8093d184d3fbaf3524612dc2da87b3508faac366af71f1658129474134359f 3045022100b2a02e815d7df210285640438f0eb51ed94b18d28a831c678b7e6044ef1053d1022077ac280f9d7b029064ed064b6dff34e1e8e7157398aa1f4ea5eb91637a4c5380 304502205b4f57ed84e226b559a46f0a74c77545dbb1ab706c883d9ff75012f0a0ab78d70221009fa414defea5a2346c71ea3ee8d8370dcf5bd0c63184a56b34dcd62cb5336ae7 304402206b68cf869edc39163ce46a7b3d374c9452261e80a56c8e380687f6c7a1e656a502201548deac6ffd9b2c2055b33949e1dd8aaffc686141719c9b5167a02e4d542a8d 304402204ed6f298834993d1dc6642995e7212c067f184056023d85ea076531de8935d6b02207c2c6cd338039ba2cb3283da2c267f1f6c609ff03033bd1756f75d0414e94b3a 3046022100fc05042c862a87c34fe8932b21ddab897a6ee1d5fe614509ad77f373844027c9022100cdde599a5041f267efe191db9005a2b19f755d9a766b9f04042762f460b111ec 3045022029429c58bc76e99ae0553a1722ec18289b8f94344027daeade9a0e58a1381cad022100a70125b40aa055dc01ce84af203e2c9cd66302f573639f37a4f481cf5ad54e8e 3046022100c8a2b977a1c230d88dce10d31f5be514e6cfd6b844fc0f71afde9aebb5830347022100fab2674ff709eb2c009e9320319deeadd774da1c8f56374507b9e730f36c402e 3044022061b0fa7a53901cf543cbead64b12c3b60119a44a5e1f8c5955dd5a909adaa36702200c409b4f046cf81eafb616952e5602b512187adb44a73aeb80cc117984c1e691 304402203819488c6beefa18f0c95685552c31012ffcabf5420d933cbc88bc6864b809c402202de62c7ba1ea0c894df71930e5b17e9808d3332eb3141727ff65b7d21e4ffd4e 304602210098e04d8a70a9011a72917876b9eb7095030b4aee1d110cea53d943aa47551695022100e9af7b953d5a1b6ef956c659722a94db043fef0dcdf1e6c564ca50490f907169 3045022100b0b2a5593a91d0ce49e3453dc83633db07015a03a1db4df56b499ba38ed13fb702200f6d6b5837c5037406df04fb80c9ad7942f9b039293360f323aa760512fa1d26 3046022100f2c6cfa10419a767680bbba159704214e0fa29c3d1f4fe5fafe6b64d9226bf36022100f6e9afe0d98b87d94ff647c3a8898b3f2b647a779a8117baf37cb0c41ea880dc 304502203f047b603f30b6e69d541fd032e5fb294fcdffade34402681921055465f16a2a022100a208d0260c67bb480094b4e15ed5267d21ae886f24267c6da21daa72c59f2d52 3045022100b63f60c3fc550bc40dc980ab8f679e655566395cc2225e361a0f255358093f28022073469e47d923a110224b9df91b2c29f98a62b5dfdf008ada0e9d36645ada05d5 3045022100efa913d2a53d1c75a4203b308166561d74e36d6e922af3cd8d2ded84763e4ee302202b4747d1bb3796b22ab42d14aeec5add9ef358353ef69cacbf62bccfba90e02e 3045022100fafd50b06c280aadbd4b2f87fb2bd70b80494b1c42c35b19dd1deb8a143064ef02201b4e1ffba08651f2ba9d3148dc5e4a9570e507c269889d6b828726dc9d8908dc 3045022100ccded0c7cf57a103371b9ee252e5297889e696e650786b39d3ebbef3878fe893022004baf3f82b502dc84a94e890dcb9713f52478ef634f442dcd243fa5b1ed26b5e 304502201cf2627d74293e4050bfca6ba971bd9c5146e81a989f9f50e4af4c009081f4170221009ea0783591f4e35e4f1ef179b7464378a329c681cdea53e4cc251de1542ccb04 3045022100a40e97d294adbb85429dd4ea5d7a379039ddfb6249cdc7744cc4ce36d80398ef02204a0a3f211d5098fc222245423c495a8c220f63d700f1ee9cfe742782d47c804d 3044022022252ed8c41c98fa636bd7bdc56cd0347b6093a857cb6fadb16f4e074265e98a02206ade2a1f209c1534a46c4a636f4aaac4c063c400134cbc900e1adf4df289f090 3046022100964193f04d569f40bd201c1bbdb1a9d35d5e6ab7d9e4c6539980605718e8004d022100d341e35728e7cc3e58376b886af1944563fd25b57aa7647b0c5a7eaed240b06b 30450220406873ddc7af7dcd320493be173c89ed729b28e8ca1ef2e1c760e59c51a6fbf4022100f89deae3bb331d922f4e17e6cfd3f27160d3ea70efa7b0392f917c41aeef09f6 304402206ef8a101cdf17c396a9f9c1965578970287631db2a0ff87a3a0d20b05dcfa1a002200e52851cb1ecda2a3fdc0c69ece3da701657c798328fab78e27997b75c143d17 3044022001d739178452a600ceea4fc293c015a73ad3a16a7b71ad03077eb93a866d33d202206999599e2b336008a66cc5ab822591dca91553ca353379b5f197e73f6d4140af 3045022100ee069c003f79aa0a46261a2e3a69616c538543162d0e1e093a2733759cf1e79102201bf409d73de4b93510d83f3541e59126adb32bdf76e49a54259423670366b56e 3046022100c7a1a2c5d5fc9d364e2e1d57041e874ab339a061dc3566e63e3d3cb5e183ed98022100d70bab40d6e261a16d32db5b4e27a6162748f949f6a1902ac6ddee3be00c05ec 30440220163df43469f77a31fcba6e1f42a783fbbf1dd5de7af9a4665c6ed8cbe7d67973022017acdf8daa1610a687aa3400992342b638824d9d8e18c34a947a73f93eea0273 3045022018ae0a458dcfb860ce1badcb09eb26c8a6ba37f1c9f32268b845246d5cf482380221008f66f823b5b55db9e09949604728b0cdeb7747f4700b207ef1fa50af0f67773d 3044022069660adfa20ee7beae71f787cf0ea96f0323e9846da7ad81805a527415ff61dc02200735299da402d896e2263fb7daaa6cfa85a4712481fe3e16664e62acfc54c0cc 304402202fa1d40c2a2a2557adae5252b24359923e5b2bb47cb572b0b58e68d74760506d02205ea9a921a6efbe83ff2ae1c49048efac1303d466b8f7a31ccf737e3527bcb944 304402201b7bcbac9c8ed03921b716575210cd9c14135d2391ff84d4c276c84bf1edd56402206fda57c59aa900c80363230330616f27b78d3268242f7ae830389a0e28046595 3045022100dbb44bb1e71e80950b5c3fedf7e0779a10480630a83a916fdfd4a135be26e72002200268279c467c63c54e7573559a608f9149e3c6b3712917504898d07951afa567 3045022100ffe5d390957ef6858b00735cf832c8b7adb85bd723e1a983a93f608831ddc14d0220303a5ae4e17c5af1badefde8fd0f24c4ffd988419b6009b88b375e82e686e6df 304402203a574505be5a604cd1b624e803cd027a8c1e6b47ff1e6182abef515beeb1efb20220675cd541b8dd1c3772586aa1f8a6940280532b8d687eeee54b17b3f8047360b5 3046022100b7bd140ed13c4a76939afc4c51360a424ab43e0e6135cf919ee9dc97371b1cd302210083e88ad37daa45d5a915ef9c8428bc184d85c18814720f0b6bf931c1fb523a55 3045022100f5fd5aaf5565b5b1fb0160524d2d245811a8bcc1784c9e02afe29afc03486a6d022049c965c6635febd95ef522f4797274448cc5aec139139f41bf2be0377582d2a5 3046022100eb92e60619c56820fc138971ccfdb960fe542f6c8b3a61e4dbc9d4d6ae669a4b022100861cbf4a1f7e3fc73d9aba257c102374fa94c31eeaef7a72b0041ec961493582 304402207d561e8e8eb525552f097072d38d21b10eeec8bd52a61f851ca69ced420dc35b022063588d86b46b9fb161a003e2570206c08e9c4140fffc5d883d75b9e40bc78edc 304502203f91a3e46988bf212798c4a59ed6c5dea37bc393300f6883f5eb5efe147ec1ee022100c6841ba2a860c4a2042a971c32758c3a48f997fee19456b16d69cb7cbfa2017f 3046022100ab6885e85da064a330c2971346d0bbb80f6f759695eeea1a01ce5dea83faf722022100b9f8b5f4d8e9bb15b895644cc61cfdf75fed1caa37905e8a22d6e970f6424c16 304402204c58e956206265fbd99dbc8cb550afbc830b2c54b87ff0057d8810a285e47dc10220512c9c667c17be5f205da4d545f4001f243d290e146ad2752be538d092b9e366 3045022100bcd353c4a976eefaf6f7e4bf9a1064114ad5b1068e3e2f1fc34021db9d53c89502202b5e88e724b185875f6f16677569e216d53f0ae4d1621fba622593a324c3537a 304502202a00ed7176b6b4d7c7da1018ce0fdfc6f2a0fe1df871d6c2b8ca9469da0ade3e022100e9306e36c75414e3e29dc5bc2136742708e03a0698c14361375c9ecb09b67adc 3045022064d59bba7c7ae1a97234610731d90225210e4962f8df49e6d64c8cf91c8bfc22022100b6e703cf55aedd4c31a69486e7912c5f2db0179b37c4c0a695824d9493a78f68 3046022100fd161b61bc4f2bdca2212672276b470d9de66d4f994357088194740d5dfa8da2022100baf5b0898c517697b497c15471c3eb74c19f3723086202251a8adbaff0a1f790 304502207e7c6ada0fd15d94cd515fbfa3602a10dd99de2c84c3d7d95c35649ff34725ee022100a276efc6680f9d49500bc64e8f4a575eedf4661091ec63d87f385ba855d3ea2d 3044022020ea8337ebcf2947501bd419ed432a6d64af62b8c3edbb588233c1afce97d6b80220251125d102c3d78ef06e17b9b840f4c7fe35509e0eaf204628bba4be3ca11071 304502207106e2f74757d240a45c071de81e641dc991ce62c920bcabae19bbb1a688c7ba022100884ecf0ca3d9cc5702cc6526eb3e96519de3dda2f3c7200602bebf36b41ff77d 30440220140b1aec0c637f347654b760807d65ea9f584cc2a40bb35db48f1870cafdc6ec0220617bf42bd322c7e572f0591e58d1937715de2bdeee6c819c72a5a0f52d695cfb 3045022100dcc267afa62ab5fd50236c3932663c1c802df824bf21b55a9fdd51a59f36332c022051e9b9024bf5aa411dde2c682c31b7929d0ec2fae6b8bbeb6e28162add7d158c 304402202acbe8387317554e319bdd90d8cad1b603b4678c9872c34ae0bca5e2befa2b15022069ee781a09b8a46be0176b923f5f003e041a1f83d16ec6d418ecfd52dd31f2ae 3046022100b66b15788a8fa89d188500e9070e409191281800ecdfcc0506ca57961496cec6022100909eb0c94bd89f9a38ba81ebb0fecbb1c35d68a11e3e77d20bd5fe01a80471a8 3046022100f56e3227290759193288cf898eeb614a712e569eb48a9bea28c5684790e122fd022100e8ce0544be05fba369dcb96570401d1ac5e297435d51089ce8ed2da1dca07de1 304402202cc438b3e9ef15cd08203826f4cfeeb5aaed3cf2d88c7a72f69e282f645a94790220094d84bf6ba85f04878a281b52acd8feda2e8044f98dcf616350eb2762021d0f 30440220173e42e1855dd7d1b65009608be3e474a76ba26de3b545ea00fd56fe49297402022023f1e53327e4c55fba3aef2f46f792eaac9bba975cd9cc872d7fad328c3fa8d3 30440220454c88c94ece81be77920c9b94bbb44cc576e76402c2fd3802baad8a29aab67b0220090ea26b68770813fa99f2aea578ddb72a5645fab6cb9090f2c0d1368b991ab4 30440220422bfe3dcec81b30d9e334434b02b1b36eeb0fc14d593585113e6d025848a4690220662842dfe9e253951bdfc72bb8ceac7a1a64dfc6926960c588afe02499b76a36 30450220581e04e18d76b533949184d63b95757ad96dd0311e72b102f5d2182d76c2b76e0221009805b1df63e4b36a98858101123a99bb30b031e66c4279eeebfb9c913609fd5b 304602210099c7ead1d2d8ab8ddad53bcaa68f42aada0cedc5a699667755cbb495b5845b8202210094936fe04b39dcd1af9f5e2e419975e0015450c798823aa1945feb94af19645e 304402203af2fea32e0ab6ab677444699b192b63d11f0e74c8271b5ee686ed83b177533602201c369bb83d2cccdac9e8034f1fc4ee09591c20a35c0ffee1f942c45437f0ba28 3046022100e16313b755d3d60369120a1a4fe754c4d07d4b35a339d0bd4d208d95d46a7b880221008150fb7687423cb9b50ad0e5b1d8d8ee018eabbef01df54e3cd762cc33d1f873 30440220685e7c1b001ed4c7cb575df1097689a5549a67e2233553739338ea50a788d69c0220044eea812f35567b85df3ad1f0cfa2c96feb6c5c399817c53d89c2c9cc747b15 3043021f28988951e2df2274343e39c77cbd20bfb8c645349fa15296262374010795e702205528b4b2d893a9ffde17b0afb9bf606028fb8e5bf6d119dfbffa3e43da749ec3 3044022054d4db6c39647538279090c5a1b9e3830aee949b5d14ce65488e53c3fcc9a32f02207d20d9a7f5ac49e3a74b8e5e3b3484409c75b9c610d524715f115c27420f0567 3044022068ab5276a0827b57d53d9422a52b0de3a9991c45619df0548d4c8258b17e4313022022b03f1d3c37282264de8aef95af228f4809119b056dd989894046f22ae4069c 3045022100df89a5e3caebb2a9eb591a1f0ba1763f2a916591141b15b87e1e0bda97b9158f02204138413b1f378b15ae92d7fe20e5d2470ee7a044dadce24216dafd62d59b796b 3046022100c8112608876e3b66bbbb234f5edb809f0d18af8d54fc3ac905dfe87552ace7fd022100fefda5ea464dc8e69cbbfbe4c8feca88d1f178d03c54990e2c345b493cfe041e 304502200cfe0bea1f91e26f85e38ee8a6c27550a7f0fc7c58e07853afa041e5a5245409022100b9a8fff114f75f8469f1bc8ad4f3c6a993b3787c5f35127b1ea85d789dd13f1b 304402202401a2d77395e1af90b1ff68382a6fa0bcc822c6b663bd1dcf75ad4ada63127c022016a114861a08d70402364ffa6401ff89dc601fdeab6d4beda5d345c8df5fe183 304502205778482da312f27ecc325bd72c956b1b07435fad14f50a26f17c7d9aefed7c65022100906ca98685e6698e9d3eee2b20a2e3afad69534ad5780e5023354b63cd62ee52 3046022100f9b67b876b8c04aedce69df778ff78423568a8bc7aec52fbf66959bb1b5a1a7b022100fd137be4d64ac8ce5e055fc57b89aacaf4d6493832ea20fe12c88d4d4f638dc1 3046022100f9806653d81495d4682c4f91172afecab9b33107e7c339978bfef9f52b78d2c3022100b58c7e3687a86092280da20895def1bc436eec848624888a5eacaa4935bfc8e3 30440220574c6fa253a089dfc1998908481dbc58367788ab05cbaa2d028b3b7b3dd93d5a022065a2742d6b19f328241562356aad7cc773ef5cb768adea019c7f2e1b3ebe79c0 304502200b542e79ca1cbe709f2e69445e2c2a0cd91503879a61e669e342909fb3a79f20022100853838dbbe3549c3cdd61e30f833734086e1e795ea917f87cc6b29ff15dc4aa3 3045022019b0712a91c058ebd5a8a83faa7fd63771bbde8b57603bbd7417addb5d8fc8cb022100fabff8baa2c78552ce2ce1fdc1eab98cd36625d2d6efd45ef229322a8b2fde39 304502203685176c04b615d05619d7030ec9dc3a92e4f7c89c2e2f44d0046a9acdced136022100c54c674bb4b825f1eaaa30714e979adb3e432f2b662defcf12599831242d212a 3044022073bb524b722a39c8e59dbb375f97784f712d9315174331369f24c071352c4d0f02207fa68b374882a3d9a09dc4c829991a5e1f372964976ed3492e67e54cf12f599b 304502201e9206b3f8f5b9da48cccf5af546c5480017655e805873173729789b779147c0022100da8c57b860d5c34ce8cdd2289ed4c5b7926f5f7a98d3dede6b5b2ac56b276161 30450220255087a8d958b4aea327b3b44c53434251e41e6e3895d54679e6ee14effd32210221009402626b3bced66d039e432e4ee112b00de7167752f29cdaed416d78bbff98bd 3045022100ada1437fa39003de71b0bf5893d6255280a96d95b1fab41df91de0f5facb44dc02204a68568a0aeee1467362dc26c86d7bd519adc81636326a44ba60ed47ab8f8743 30450220198da78120aaf30922b8eb31f4f1092cb2ac59a1feb8005ee290b1925f69415b022100a54322ef21dc1752b7280ca2e8532efadab59a0130892b68f05f63e2515ef257 3046022100889c6fe7d41eb63f8f3a38f15131d3487aeb63dc0157f21c55f99e1c78a5e8f60221008bc8f3ec4556a96580f62c032204d0af35996163212403e23e629b73ffd0d2f4 304402205c7156e7dbfd046f5e8892157e3b5a7a8454f5eca9cc55414aaf1d004743539f022028d96e2795045315985bbbe8e54135dbb6d5696d77c0e7ed9d19d66d3affb357 3044022002980a60fba0b857d1219d19344e81066803c5ac0ef9a3585a68f1a80ec7c97d02201a711f36dbdaf12234406c6188e00a33f981ec724b1dde78176e5cf8afdebcf7 304402207d77b9071efb0869ea008d63b2de05df9ecc1c37f1a2a85858b403dc50865f8e022006e498cfda81411c66cf980ec13b019ba1a146a3f0794e25eed1d10ec84cde77 3045022033072eb35b8b739fb168b121628f172775d41efea75a46721ea3558afc1ef8ee022100afe830dfa3af33356049752b1898d765f3e3712d85c66d77ecfd6014170686a8 30460221008765da248d389a69cd1930a649bc42f0f2d34d35ff0f83767c55f07b85f77e0d022100a491a5d71f33bc9b392518c91e948f8420ad46c9e9a8bd6f739f0f290059a919 304402207b998e88c0ddaa72c3304d566af6f6a588f64756cf96991f5d04f1134671ebd10220041a52113f670d8ff9daa2e2d24bba0a9e347ee6fdcbba72dfa095a237915f0b 3046022100ca82074521610644c885d064e64c06802e525796e7b30189dc9d20cf29644c92022100eefb671ec915a059e749f108099f201bb5af7ce19393dcf581a9b361115177b0 3046022100cc077f321af64bfc8ae0309bc443396cc6becf943e5a6e34b2280f660fabb209022100b334aec83fbef8eebcc06d40c9b0015ab4f1144804aafc526c682c25b7955f95 3045022010d9bb126f069a125e3439ac5974c5bb52714513be26d934076c6d3c749a9918022100fbcf3739208f6b79d27ea10dcfcd3d5faafd4dcf1c49e4866ceb351026ed575b 3044022051c213e8bd60b7df2962ca879d1970e2d5378e3f4979648b0c985d93400df7bf022003e585bb8ee57755ab609e95c9f9ee089c1eb52149c1dd58d0e19207cc79d8d1 3044022009b972ba1764b3bd1a16d827e6b41087a266287efad7e7a32412ffdd73c8fea3022008bc26f490f639d3534546b929122909d73987e75a40a452e1ef10ed1319f5c7 3045022100d6605878a8df6e8ec7c92977f9507cac503d815b87db84f75d8a8028056cdf2002204640c578f22e96fb05bd1b24b1abe2b10bfc336c7e2093dddb0b75e8dd4eda3e 3045022100d75312153f822ae49867c403cc81f3424acdfbdc2951eba895601837bc120a4a02201894030f3aa59a30bd79fb1108f6ccb32020c7d27087d387500d69f72074065f 304502207bac05f183f3ac79710f60a452a5d0b5f63c0a42c3b01370c0266a4392906300022100d4d36f0cc9e964cb9ef527f65247ee32291fe9f1459cd9284547a251cea47fc6 304402207eae1efac32be87c487a2214be280ccee5dba073ba385fbfa4f258da62b9468202207040eff630678df558ce470438e24bf465e0244b7c282373fb6fd43106a77826 3046022100c8896d809859dfb61a948f06113d1f3a9122f7a7aa4036e84e4219f8e8a0d18e022100e8940d3ada40b0943b08a055300b77c04dfc40deab059f338edc891dbdda9621 3046022100a0f5f5e7fd9940400124ce5ba1cca582bd300b7ffc54aa8d6fc8cef9e688c495022100f96246ae788753a912aa397455dea765e236682747b7d1d52e3cd5a10b30c18a 3046022100dc988ad6e353b3b04aef99682df465c81eb2ed01411bb9ee3be047a58a2de8ca022100fc30fa4067207edf67cc35460aaf0dfa9ea184dc89594c778c7f4b2213be017c 3045022100f9ef19883a743a89f3512c1a75ac8cf38ca118a6d2ba3ef3d077a56006908bcc0220021f8e6981fed36499ce1b9d06a274dff9ecb9c6562b5c82a3413d702dd7811a 3045022057654f8c2b0ed33e521cb06a2bf54f75b6a44dd906fa72b4d58ef2594cfff3f6022100a86c374460efde168d4efc5ed19b57f9ea05ae81fd2f693333a9c3a9fa068760 3046022100e0df08435992350c22be6a590841e62b59d593709a583ccb584010c7d69326cb022100c42ca7dd1d7a2519de0ce0007b1d0a7a3bca1ef16bd3ba233384089bde232e0d 3045022100cc6651867f8f2ec0fb43743e79cb7c9f5be9e971504edae31e61ab37feab9bb70220008ddd3027a85183ae8abac2d7b275699d029f9ccc0252eac72c4258d5cddaf2 304502204c4d291314bf100de27fad32c6da7125ed04c345c62625c231915cf3ac85fe03022100b268dc17cc85858a3a7e4c741d648fc9750e0fe76c221b45a54e72fc903bd55e 3045022061756897d228311b690dea50472813bc08d244c03041fcc08ae68d9b67cdacc6022100da1d6b14ff4b575c1af2e4a46e86a32e58b7c63f4df6e1a24dbc79195738d333 30440220617c1d65bdc736e68aeec2a203353f8c17ef9afdfbb7cb5cd85d6b7ab193fa37022042acb385928efcd5b98ac09fbe9b0286437d191d0346142e63072f8ecef84f6b 304502207c274ad65482b7c869d4122fd26425558de5fc243669f2ae3119bbe9409236c5022100a45cb65b791683cf05332acd4a1dda823770360d82db2794a79fa456bf24a602 3046022100fc38c393e0d5f22a4ae09988025e3e4121b169323c669577ca05e0309ee5f90a022100e5a0038a69300bc803267c161898d0f2310207202e39a733c68018250b382956 304502200c79d14fd52c0f492b2ae8a00a4b2632e02530c9c67cea19bd5d03c761d56786022100b18844b8951ac360e64b4b606b71b34be86a37116309110bd980f6882d9b097a 3046022100d0bde43768b308f3ed10dfe75a722f079e4baaef07c338b1c35f87141c863123022100d013605b342346bc70720d1a186420f5bb6f69a4b3ce6fa9bf04a2a253b46422 3044022079d37adee7e973c948f40285cecc890e80cef2b9a2a9580ad87176b415bd1414022073b771fade7458489ada4c2dc35225d776c54a40c26c88270c8e9581c4928195 3044022046a0528623b44e4e6e9cc7021b4c5b27aa9164ee2ba8a21f7fdbd015124248ff0220211ec2878c52fa28011093909d4eb9739eac483f27c70abe3fdccc3e88e17eb8 30450221009b74787cdd5f18a3bc2d849f3eced257128f14d4f07cb30cf7bd0d128cf0a7c602203369ba658bd17418d1d7c61de4c2b6a6b9d84e91892577c229c07601ce3fa6c7 3046022100b716b546ecae4d62f0319c87e0373b27a5856094183762b2e37a584f7a237e23022100b49a5292bac8b71b0ccd636ca64cdb68a2a5f2298c60f3516625379aaaf9827e 30450220225ad70521a35066b0c41b38ef6524e36fd441a2d3b1edb4f9ca26d71867431802210093b5a59da4fbfc9920c144f9b795108d0f41df1454077e88fc2aaccb4f8bd82e 30450221008198193f4d59cd76da25a42369611bd8fa99fb9313053eb2681942affeb0af4902202412da2b41970e841c02892c9ec17d9c3fa5579c58c8405c8b2c9270216ebb34 304602210098461c7664b0eef52e02c0ee98f1620cbb45f8bcca784a46cf00fc21204db553022100b254274126f0a770de3a57e3e767784d26cbd7156f7392f393f6c62ae057282d 3044022027913e9727881f395da592b07b486d769050b2536e11ce500669e22102e65527022012c0d84fa05d0c547f4de852bd1ab11e37702d16f865575f1328e200b0dd8951 304402200fd7d50c03ead6e36b18b0161c9531c4643f69a488569c968d8a3fe7273be4bb02203b1252765f6752fa19f1fb065b9c9a81e6459e99c70fadcf3752c8692d6dc344 3045022056325c7cf1775a9a47034d51cad41ceb4d24147440a376971684efd047384e19022100f93e62563cb2ba40981c2ecb721883bc6201fc0552920978be559d67a64661af 30440220054e1d0570bb70c250b22bfe85199aed96b27637d618d36877260645b4d2d93a02201ece3424e473f6536864a702c8c6d8e1e840b19e306f588eef04b76ba85cc332 3045022100845abc98260c900def5bf03e59c8fb4e0ca0a6e8039793cb2561236cc4c854680220370f0f50df039b2e331c8e33b49eb101a7f420014ca0777a5daac1e9003e8bd8 30450221009dc925a2a5f2e7070fc980f655b393ba28e4bf5eba99fb3743d331785de289fd02205b2073ee2efdcff1d4e06eb18f60ca8aab855e77e3c08220b04194d9e0c41d4e 3046022100eeb6e1b6fb9ad4a17eacb30fafcf5682d43fe48dfeab7284a83b97cf5313746f02210098821c58eeb1906c4517580306e62b1840d8ba4c0a604bcc3f1ceded6d30b9cb 304502201703677efc37b43ab9c2e1d83e035c076e3f4a618cf2f1cab471665ff88e6c2602210097294f6bbc5884e303e4f494f5558342491fd630047f340cbad01389b32e8a34 3046022100f6d63c37926d0ccd88f00a67b4cd16cd3d90d487786fbece1bf4eac1eb47a52202210094f9ad13cb6b5f4d8d749d6d7efa6c7e25e2e81f915a9bc905932afdc8e3d0a2 3046022100efdf87342e8c1ec6552025ce6a4f4f2a7ca6ae539ce74f5c41bad147940cc7800221009073e41701fa32f62ac22643ae580dcebe10ed9f5421293f11b063f211647bfb 3046022100e6d97769147666c606af5210e3f7fa3f26aa60f87902866a43081779e65132e7022100bd12626c52f8c87605f451b12a0942be4121d4114fb36fdfc7ca4ff691b757a8 3045022100e69ca01f78e0a3e92afa8d3f341292e7bb4128dbf03b092980e70f9215230a50022062439e9db9ca6ed92482d2df193bdd5a8a85bd909dbe9b426f1377d599a2d9be 3046022100f6a8710b59e92873a362f6d4d82e2d36fae35819303238d7b504b65990e3b2d802210095a8cb22c0a1ba309d47f6994a2693720a274b2d62b6e3f5b415631ccfdd931e 3046022100ab4da31c9f81dd182f4d06830d6bab286a9d4c9bb100e8225be1fb5af3696c51022100f47ca6642c50efb91b52dab5a198d1cd5ebfa1b87daea9b686849390d5f155b5 3046022100c7b65d05a7b533385dcb2988fa30b837380109011feeb08870a93efbc74f4003022100c2203668f493ccef636d85a057b2ad9849a1dbb6de0fda27081a63c28467e08a 304402201abd5429212e0884df5f9d7af3ad5a4998b1917a3388cd3d26c9e985e5a7430802203f910f90331b155de598c7f29b87a03759c874239f598457e1a6ce9adc17062e 30450221008bdfbef78c01fa42ba2a5a2c419cdbdb3bd050d00be4bfdca7eb479b29fb62510220477d42cfd211b1ddea4f511777acdba4b45b4615849afd6561c83ac0661f6e4b 3046022100cb3dc6be89835a6e4db76da91f7d53b04ccbec9fe2857ddd32c8d451aa8c1847022100f741c47b107ccffc594fe2e4f93cc4cefc70653d6c45e0af7cf82bbe8b9de8b1 3045022100ed291eeaa324f8187e948aa04bee7010d5d5207a187621fec611679316d2ee4f022055b1922e33527d109643263209ab0a18899028559f700f8ddb0247c0519d5244 30450220412139ee356bc8c47e57f1019ed715be81645c06daf537b95e1d246bcb355638022100c56afb0f0f65c222a54d8c2d344b67a80a995c70751e2d7349cfee4e88392377 3045022022fe1e4417e60495d2c18507ce1cb6f61bd9c3861d9dbbb108ef7f2a8a3d8020022100fe4817f60d66588eaee019403d658172c2a9102fa8ee2ef0be1492c7dcf0017e 30460221009927f02452957a0fc1d57773b1ff5e931d95dc6bbd4634aaee0dfb8c304e15ba022100ba099c05f1ab24e569a2d5f9747f57f8923d6f0cbb90df923690fc0e4c330956 3046022100e468a42fa827fa02336ba9b7204203841ba76b023b25be13f97a21887a1d1dcb022100f6825bf9ea82989212edd233a8504683d4b4dc2ee6066f0dcc4f6e85174f97ff 304602210099103b7c5142afc814e3253aae5653a2bf32cf98c59f80e98d5c6467d10b1bd50221008de7ea38c646fce46819cb629cf8c7ab82206734b8c9dc8d275bdea004cd519f 30440220163e9e3ac3dd87a24f86b7500b86487fcbaf2f7c4b5c4dfbf6705762bfa2487e022055526b01c9ff44e26035f037437f729fa72f85b193153890f7a87ae755d082c0 3046022100e5fc50b5c31a099a9d799760752aa37c305d830b8cde300920c7cdf2b03d2b9f022100eca67ff6f46514c4cea79ae7c73b86628bb85ecfee33070002ace032cc9bd75c 3045022100a6ee0db65e066674c44685d59caccb67e57826563a0e1216c0e41baa8804a1920220506b1c4fbf9ebe90934d4ee2a71279d1026a206875b687b909ca5a6db6cfd784 3044022047a66c9c6edb998b2298eb65bba6b9bcd3d089477fa2abeb048d18fded4fed9e0220127b0cbe17bf48c943609186826af52286e9dd75ebce8bfec1b29bc9af8a12f5 3046022100e52d7970d5dac47c736b1c69f96e272a990c92ab3c8674fe2958e8bc7174d646022100fc28cdc5a27e51e371c26452c20b4f2875d00bc7ceb42e20eb3234091faddbdb 304502206f3d145df8f1b3cea5cd6ac1aeff13f2e5db70acfdc82b1801dd87fc44b1ef6702210098a10d6857c8aeda29ad666ba3ddf1baacf937abd842afe2b6557b1d67955560 304502200192f9e528d03f324ad6d98e1870ce7882cd6e89538fb9dd3e86baa3a30929b3022100f39a9b6115cf75006810afb87d22047c92601f2aceb63d8bdd67d3ec4efedc7d 3044022012159e2b1c864124bd2012bd4c05c725ca546595b17284b411823db5505674e802200522c230fa0eb69c4998650b0be3027308d757b1edf63e2b8b461510dc230e60 3045022100a164f80c21f8b641bb4eb481ac0f8259fcc0640774e90577199b409491cc548a022012c1daefb0b947438d07c9a9a30bfc36fb2729985564424679be9a0d3bda94d4 30460221008317980c7977683f929217c6597ff4321a74d0c9c27be8be6236fc07f1975035022100b510a053e9187c731104df6cb25ea19cfc78f9f8cb2472bf4ca52631dd877588 3045022042ed16f846968b432d2e4aa7ff69fefaf31899a46615bdca470f1235dc28b4b2022100c7eaeea81bb8b028e2937b169d116018d049144c681239e575c8091f6cfeb3b3 3045022100e6103f6684e65d697571838b3da0d70163c3abf65edb4adff4324ac66dfb324002202e84a1a4927536d76a658feeea4276ce88985011f75e7f21f971153c837ff30c 3045022015a6b6af78f7169447960f1949baea2b7b743ca1cea61c5c063a6876675da5dc022100c258183b9e2fb5609c4bec4a15bd717c059057fc4ad06bc9bdc29cb5e8120190 3045022100c371b8e4e06033bc7da32ffe06b45a6da7e494f06b6790f38e7ff1430284538c022019b446b420c3da3a07c34b768411ea923b4893e5a9fde73277525f1a5f25ec68 3045022100da4db82950e2b1686f9e4ef307e7bda5e34e5b56c436b9cf748c2731b666194502200197cd9be8ab6019f78cb5ef89c9a538706f60c704c69a2594119159cf7ab127 30440220488d48d8e2b51aac8a02c829cbd5ebfd7b18c7f55a7bfb819fd56e3c7959f8f40220174b3395f7f69a600ca3a6afc9464e1a379efd604975201b4cf2d64706af4a33 3046022100e6366c950f91ada35d88e048161a9cf00fc1e61cb6aaa0667d9cb6310382974d022100c15940c16eaab341c4665f67d3f14dbec8acf6bcf05b76c978692b95fa94784d 3046022100e4654f86721d63be9c9db541643a4116007ab630272b2c60ba5848b6d72fed92022100e43204dfcb1ab8ed554404b58665d1a96a507af969fe3ae7a735af67a1348c67 3045022100aa4e06cfd7c4fd5b38cba3160826e69be245de02346c80905814fde40943913b022048357bfc74185031e7edf5df4d0b144a76208df4fc80c5bbc8185882b682b44d 3045022100d1874053ac097802fd8353006f87fd0ffee8eb99f6a9dca80cf61fbbfea0e1a702200ac8838e38eaa535e86648bd0bab59d1f4a7baf3b385f91818e40b4f7c785f21 30450220098ff4545410368beb56e4577178e9e06052e5520d3c194e382353d17c90f3c3022100edb7c0fae7ff2579874f78eae4c399ef0b78ac87bdee88d483aef75e25577135 3044022033e4f31f60aea46ed4eb8528518f55a51c8b697b63803677445e6fd1ef2e801102205cbad209339fe20a3a1d37434ca4db9d82c17d4e6838388e6233998cb1977fc6 3046022100eca7456569602d46f46c02aae0bfc766c31bf650110567a60517c0f5820e6d75022100c95ee9ab06f0ea38b7aaf2329ee7a2dfd31155f6a3528acefeb815e3f93fc3d7 304502203b9b7cc20e1ccc11f8d448aa5ec16994869fcab413d38adf7a677c7d68d302960221009d43e4f9adfa0da3f30a380878647ab84c5c2eff30471bfba7147bcebf94ad42 304402207631da457f3971deba332b42338db35ba7f07bb2a761b46e162d9ed8ad35242502204f29c5bb97b9f03bf64b5572e34c94bfdd1c931d592df0d825022dcd78e92d95 304402202cafb3aed32d6deb6c52b27a610cc926619178b70756e5e6c151c2c89420772c02206af6c8d53cc22c9a4de0d6cb3969d0dd47d6b6345b82158e66eed9219e5f9b67 30440220044624399724d8d8a1742d8ecb023d4aa457445189f5271bb9c405b76077233702204a28948255c1b450a26ff959720a84561b0befc89b0d24e98131333733708ce5 30460221008a36c5bfed7b8c3b4b7d6a129d3de0f72da947c11d2dddef10715785069082a4022100d1b6aeb005e004859f05503cdbc596fa395649aa6753a655ef15dd53e0655b04 30450220155c98e0df60a2a5a974c765706b3a9d934339e8a2f0bc482b09e487bf21979f02210087794bb256b705548ab719db82353194e2212e4588e75f93b0e5ac1be1f9b4f3 304502210084029283362841856ea5ee0598f1cfc026110b280154489359f5aeed434c8ab90220688c89ecf5f473eb67678d6159c53f32b9e6dd40cf1fc5b17f54dee12812de67 3044022073a2167c32de9ed42738ed03f0d7c240fbbfa4bc5389ddc08bfdc4ae51be629e022003b17b4eaa42b9dc1a526619fd9100cf3efcde77695b534d2a6557cac3d348b8 304502210081cfc2cca85b96fce6b2e43d443c6f4bdda318c97937e22c9bdc51fa9dd24f9002206cb6c9d3a80e8149800e459c3fe8d51f2ac8b30afba0f7c1885642dcf24bbb0a 304602210093e19bd58db889837f05991e49c7a764ee2f94f76cc73d34d6e44a7f9e44d719022100f04a71ec9e575c228de3151d7539e0b4c34f492264a7a03d4828748e8301d386 3045022100a1031a12fcdb675425f09976676ce2b21f3a19b504bd74447db23d6b1a9c096c0220789030f3545a8938ce2c8d03999487ab79bc0336fbd1bba1147fee41db44bc07 3046022100a423a39798b8d3efc8a70e5bad0e9b6ddbeeb9ab3c53e26863f4e0127298be540221008db372300db067e51494ed5acc70af06e573344d129aec3fcd51a1d63f5b0ad3 3046022100bdf6a782577d0329a36a891d8c075493e8d0c89a29845677a75843e1788e0e07022100da729aa76db24f2925708e8da21029521dc5339dd3d125329d1ac4c7d414af9f 30450220757f0d8d50554d2efdc1500c50118d238c28fe8ec85f5336d50278ec598013e1022100d3ede7755744597a3b8191b0a136e4fa2d3b58d1d622b02dc98bf507dbfadf58 304402200b5df69a571c694386d9d158da4200cc3ada82b863c2df99ceff408a9dbf051502206f1b393e7248d6cc859e37c3c895fa1fe399ec5df0e595e032f1bc6326cb0cf7 304402205620f3e9988e0687ab364d403149dd8ba44e6bd0dc59e3469963026d5350cb8702204e3d1e66e03a27329f55d4d0137f2e647f55cf9f914c9346c077518f9842d264 304502200b646086e2c160728082b09a15be0e94b30693406cac300c78f30d9704693ad1022100a3bf9d852a6d87c6ca9eb61789c4b437ff737ecde1e2e8c76f4e6ecb394b6cfc 3045022100d4f561fa97e6bb5395875748292444ce2161c378f63d1549da86ad1f7b9de2d7022012cb4be9fe7e29d17f24c96c07b8790519763c3b71725781f66edcd6baf3d67b 304502210099ed0eb73db9076e4a947ccde6c57697383c5315bc0c3b7489802ef90a127f5c02207fd785257ff8c86b9506c15688dbc307e060aab9e1b76c24e528f093974b2398 3046022100a1eec8b34f0d938dc1668820f0958ba3024c975cb5b23874f8d26320293629ef022100dbcbd02cff2ad0ad9edbb1faed85bd2a79daf0b5c45d76e402a5ba9260ef7a44 3044022063eb3b338867779c6d3152fcfa57902fa1e594dc85df89c2b90d36d6aa0b7dd6022020a2141a07b5002a23483096f569a9cc70ff8696c5c2446b668380d0b89bbfbc 3046022100a9f72e919b2b14b890bdfdfff1ae25e89c017d41e50220885c18289f0eeaa80d022100e1929d956658bf7b44e73e2027101c469bab8c75f18c6afcfa8433e1615eb50d 3046022100cfbf7c589d4b6082a6af98a29a9cba89c61eff993a705cfdc41f7fbc566dc2b1022100de089e28920a8b189021e6267b2a24a052f281f25b52c8b55db6e1cac988de57 3045022028bbb22c8fda8ce5bd3386458fbf76ff3c71c0c7545679ef1047ede763b321ee022100d1d19edf870d5cd299cc49aeb706e5eb38c8618e9a9bc75ee3a1e279ad0d0034 30460221009fc93409bf1b00aca094606bf8e9f87eed90e81abb5851cbc0ce7caac267b0b30221009e8b5e1960f888010ddeb8612da81ef313c3881014a9daa3e9fb760250c36cec 304402201268b50ca0ea6704c67a491bc601d06b2916a4dcb83e093a6fb31303492999280220795f4ff075907aa004e16daa701c85dfb1aa9d4f18314f89bf934bcaa25be365 30440220127aabce0a52771108d5ab2f23d474a3d932810ff048f39d30e6322cd85d33ea022038735fb7abf8aaccbd70c56175ad72c7c66f5ff05e28ae374f36edd7518dfd84 30450220319e4f55c60fc5050a00eeac01f0f3bf62a36a3f04ac7fdd8f5111b25cfea56802210095c892983cad0bdea90cafe62679b465900e5d6e27b173ab2d1f0eabeaa4ffff 304502202dcfb662797e936920c3c11e924a4647b898e2a85cedda78929f908876e12ef80221009f72ada609b988a67aba2f04ee5da8d5f1f2cc921aea8a283ccfa03ec9dae500 30460221008e8503422cea34dc03282e614346eb4bda83f8667f8caedd514cb935455a7ece022100f4ac3d031682f99769e015e58fd3a12645d39f6484b013448b70509d83ab5b8d 3044022061db76165869117000675f54e42f8ed48c9b8e6af7e67dac9a02edb5a8ad35f2022051b2361c9c08bbfcdcb25977460cb07846153b4efb1ad17aa22645e72fd090d2 304502200979f160218d12fed9e4df46d81a090312029ef921ea28dd2178dbf9565093ce022100ffe3ffcba9e888438a76e5b31f73ffb3fd122ced0cbe9cc2f7e5d39858c13d23 3046022100fe33f37ca039fce1c3feae8d4f26716190381b694b12814bb6212d4fbfa5d44a022100eea6285e2fc12e6a76d96db0c1648a4e71bd11ad6c687b5fccdbe438eeea8c14 3046022100a9cbab0dae2ef84075151f9014898c635fe25751bee401ee2090f09897cf7e23022100f71f30feed8f69214ee6414e0159a93f8a95d7f2188cec4583357ebdaa706cd3 304502206d63fb30b766b40934c3bead50d919c3c63691cb27ff22ce34cf07b0c40c8b64022100a6759826767d674bf34dfb37604150681f297fcd2148a7adb21ba2ad9211b7a8 304402205c131eb83191b45a4223e7c9fced3721d37d6e7cccaebcf0520e30137e4b1bca0220234afd5ec3fd2cac1d74b76001d0ac8023cbebf3b34b6ab2f49b864ea709f727 3045022100cdf628e34da525304456d842d8657637eee9b1f8f518c6949eeebc04fc56bd6a02204b4dc2ad9e9ac8029b818bc1ffa45324b548283c75012553eedadf46d90a052a 304502207dcf5ac255840676ab262549a7ab2cf8f6c863b0c53d5af392ce15a7c4d3bd56022100f8a45c802aa2003b07856e6ce3509b89a781ea635757bd995b0a6b0d620503ac 3046022100803a03303336fa9842022498eb14d17d3942bf8355722ef55b3a930c6644f424022100bb894786947032f8737481d12f16206c481c08a7af4bdca733fa666b8cd7e42e 304502201e5160102a50724df6dc9ece464d03f5cf6d44a284490299a6128ed1ee3033a9022100995e64185aaca552cd227de952adcbb6eab5e3e76a821e013c1a784ba235ebd5 304502206cac5aec4729f28806b2e762b9b14328b4b0b486fdfa9461519294a5acda11c2022100cfacbda62276d93ab39f82bb11efc3a308351499777d01d3045c9e065f228593 30450220252660ce11ad633b46627bba5ca31860284109d5c8d80d2823c90ae907388881022100828261f5de149b11c54ffd6403c6a77c530eb83e2536b1d22678b44c2c8d8f95 3044022006abb8cdfb90ede4c42dc66794b9163486119f7211ae4db87afbaaf5a83b4d5a02202e40ab2a182be3b8c53dff697f05d458d8fdfe3f2aa375f9e96e676036dd8167 3045022074574cabab0151c5911d83f39c4e4c88f5fd66e315888eaec79d67915a5ebda4022100fef63b22acff95edc1ec65b364b0c7ca14c79ae1d60b2b170dc63431765e0db0 304502203db5c08184791669ffd55bfb39beab6679d71f637c8e420025060c1a4d664dba022100d8f13d64fbdef1cb1e23fa2d6bceecd47984a715e03606bf2f04a764c625e0e3 304502206ca1bc4a33e75632f5a66d00f857e9b9cf78140e15853bd91ade52be0f51bca5022100d29a416a530142e1559c2b672ec8c99d2d6a678fa5e278e795341267551641ce 3045022100d2547a2faf75a1e49f227579ff30bb061cb9fbc02c68d9142cb120b1201dbb38022022f617f57dc08e9485dc07d5d5c18dced999cf956ca02789c2a558ca22e46127 30460221008c04ca76520b216d225333161e493f3333e805c9e00277fd70f5dccf2467add2022100f577b6e43850437f49148f388461135840f8114c7c764b4fc8057d86f555c1ae 3045022100fc2149af2fe645a68add0cfbcd4a98ee718be9327672b136e874ce8a7b0d4e5302206f2abe0872da3aad95be97eaae50b542af55345590933c763279e250d2fbcdd6 3046022100b07a7099a66258f5621817370e95a6428267c176ff47a4b32f75c34c618280000221009a2351e28cabdc2e34e4eeeb344d4bc3204c36861e2dd5cc07505b1b9aae49fb 30460221008d4ab8e32d8c288d08241338b6428f57b3eb35172e9390016d452fb0993373dc022100f43945c999553fa94ad91ba29b2b41bb88a5b912d66740b674a564db4a39dd53 304502201945ea2403e09fa94d660d0fca608611b15f6e88e4bd6f20e1e2441cb7a3135c022100b1d077802cfdf3336d254c0aa8a019d7dbc5fe47f5fc04a415857618bab5c0c2 3046022100942fc7afe7632cf4bab009cb383241af0647c0a0620f50a1a487fd316440b986022100ed88d9a9971acad07b247b3685d599f0e9e1ebe7eba8ab27a7529b23a85c5ec7 304502200e5d4fa905ce2c499fdcc42622bf6ab65f36d1cff484143a3f6b3918da502a73022100d3239c76a10df44b51e60e6cf801c6c62b60e8b8b1ac8cb40f1c4072c8023509 3045022100884a2579212c14226b6731cc43f78b1a3ffe093a92b1203ec42eb27450671dba02204b69f7ebb400e3c2c3fbad79f2aa5288e447f20388da44c471d31efc494cca2a 30440220566743b5dcc9234debe8642b42cdc2ad5d3412a6adc8a69ccb56ac054ef0ea3202205eba8273936166fe0110ad335202a86e9ff6477d4f554f0aa7c0e67ddf9b6eef 30440220762801fe122a1c4b50d0fd985f05ed2d9f3734b4dabb6acbcd84841f79d0cbe9022023b721b2743a28d7376f8e0981ea4d80ce9dd51beee637ed1db0dae133262e45 3046022100bf9d760d048a02e4040aa721f12070f94ebf854429d05460929586f091e6c55c022100f75eecbd89af64a17918461a82b99fd68b3215c369824f006c3f8e98e3ce662c 304502204999652fecf0578593e54080c5d5a494256f24564cc56d6d322d7246f2b44979022100e00c82d72312fb4d9c18bb82d01d26c218f90c46a3bf46ed5ca3df0448dd7500 3045022100e5fd256d7c8d7e4185cf144c3ae9674d9e13e2ff1c1ced55532b105c9feb57be022045b8c4c15c97bc8f7d14cc6fad6f572b8628a2c55c9eee42a1cef0167a398cbd 3046022100dd4146258d92eebaeda3b5e68b5b2c121e6814c4280f4c8bbc63a5e14c61bf2a02210096f72bd068f937687dbf8951ba7d8e6d2008c5d4a9529002b0296195c92b21fe 3045022100d75f34bd8c1e09ae81597c5ef545cd142024623a789ffd4d9e6787a3d86897c302201b608a5b9418b0ad06e44c676ed2209d51478e16f04c8da13667efff8495f901 3046022100a3a8b1fa61954d05a803a26eab8372bf3c251a40a023f108a5e0255534751fe1022100a2fdb6c536abf65fdb33e71784826fb648789b575cde4223d1ee6dfca656e321 30450221008c44f1fc62129fab54b3856d49e9d2f895671efe88bd66ff288e2cd565df7e2402202594d6f26891ca87e604bffdda2b990be0080f9fc6b3a6dddfb9fdc68cf17ebd 304502210092d4683a40865f7c4a624647e70614158bd9c4d596ac23f1ec9b1dea5a39626002204c9392abea56ea4294a9eb0571c46d2ad12e3dc1a2866921abd18a78d4de0b44 3045022100ee41526492189b4ffa3de85749f3a817f2f879692d6bfced049bad1ae2e9706c022014821a60589c089d44efd5c10cf21fb4c0a966e94e3feaad54f0b1c46b9cba1c 30440220345a64a8f49aa3b19bbf6fa1d6b5831db35ba415b7c836694b718f3c7c719a3b02205804a3151dcbc42c445cfef02581dab4ed4dd5b7e5bcd061a761a14c13c328ca 3045022047ff849ba5edbe581464098b8e22b73a1b658e0f95a741dac79593e8a5d6becf022100a44b85095bc9066712734d99d46f3ea8b6774ae0bf96e9ecee27a5909f4fb412 304402200775b5a49fc038ad09f7c0da293cbc97dec18987cb4fecd13cc9743599de6d0202207687206b76fa1530e8258549a75f0141bcf3d047ee0d51296726e4990b363a5b 304502204dc24e2f85ebf813aad5e1a0b2c0a549535be4d8cbcab18275f4a612dee0e1c7022100d3624abc344567160e6e907ffbdea93ec305abac1e9ab4186d85ff584e6b814c 3045022051ee85b5ea99b00334a3e186db4fdfe92c66baa5887a18c5abd441f8e997e9ce022100bd2cbe7dfb859fdff02f905e5e67b251d91f9baa6baf24718cf5a5c4a1eb8479 3046022100cad3dc6a162ebca8276711c85a31631b8da7693a705b8e931e561d99e4b85e9b022100ce04892125552e632f5678534020e7292ebd78ef79fbb097bdc4b44ee24ae79c 3046022100dfe1e7488679f9b923452d8f2d5e340f42a5275c49b06ec9fafc9c11e185597d022100fc4419ecd43c5705fa2a0123943204d6df94dcd4f34647d52df96c12d3b61ce8 304602210081cd21f554765ee103eacf1ecd2f00242e3a07113f2045f16b7306920f6d942a0221009d16d3fbebdc196e6eec1033e09cca5501723d9890d666bf8197ce5a1a6a14d8 304502204efe65d85563e3ff310fdb9b62134cf6e33b93927561149ae2d849377c7a7e0a02210090d301c91c2067c0616a1459a751501355b460fab168769c0e2df3969848da24 3046022100c1d8dac427b5a858bf0f0140846fb0bd2b381a75cb0272fe708209b78873a0d30221009e5ffd871baf5ab8c24b5d2264129235fdff07f9d50fbc387115cb65b4a58c71 3046022100c855f41c08ae087f1b606321cd549595bc6140012613471f483a234bea92ca1202210086e79e7aa357c8efe5f2ed05c07a0ad51c2bef4386e8e696cd3dab7af6c25315 3045022079523c6d9a6784b922f5a849d7cc91ad73d6371b5b01bd838f9f44765157d75f022100ceda7ae1cd36190dfe2a389326a23fc171122b45023d83d6c108118e8388c6a9 3045022100f9e744eaada6d119e0a4f4e1a199917af5ecbec1a148ac4b07df5676740d0115022015a4d0d5dbd1f46cbbe7d0fa97e53c372490158446640b417f02a398f26fb3c7 3046022100d74f86f3ff2afe52400595acd8864f9598857f4bdeb682d3922fe0b7e73776e9022100ceecdf14f27a02ef6ab4c7bc2ff52ad79e950d9663faf565e780d793b617ba59 304502210088dbef4f4794a73d62f01575e8265b255cca702175136b4277761d011d88a33e02203bbf2c180ae91080c01f3ae76c24c2a29acaccb142eeadab62897066e3800709 3045022100b14fb55c3eabb7c6296ff4440c73937456f4a1876e129fc0099effaf19400bd2022023a4a43d951b89b950dd9eae2ec1c2952c8e949cbb05f2f03369347b71cdc833 30440220304c1a657e0e4991e19505179d0daa88bdd3722fdc242c68547d77627099b2e1022031e062d14b8d894361f02a2536bbf551b54cd9f8e000328cf0df1d1d917bdb73 30440220623538b3a1b3833f9814d72a029acd22115cbc762ed07bbfb4800175feec750402201ea334602e6b2f8b056e22e924502d667ac65f5f209c114406e26859ad43ded5 3046022100c15488334fdc2e022e81941e658f3b4fa4dda1c4f6a246456aeab92054f2ae7f022100ca747037b49c30861804ef12cbfb614feb5ef2d8193e963bfa3b1cff5b87eb69 304502204a0ff5c1a90ff09050ada99b251481c9ba14f9d2fdbfa12d467ec528bc171c28022100ac855656ad7925982bd521813ad15fece98987c88f94ecd8a8e4b8795bd0062b 304402206d6fa0b0bb3a378b57916a40925dfd8de4bf54070f7afc5d9b5f23a6c966690b022060edd7f496132982d9d1c30fa94129a95d7adc4876b0ca750014c2466862938a 3046022100f3d6601bc467b69b3d4725f8e6dd20d8fc30fe0b748444e4cd553f49831bd508022100f3ec275099157ac3fe655ea5eea54d201c41b8db481e05c0bd21110c9d2c275f 3046022100bf68e2836f61c852ac1842d71e5ef6283aa40055e643c61d7665a74ee6b02e27022100e2b05edb858878f6a1ff555e13826fe39b0ce51b46a66382a66911d9dfe74a96 3045022100ffd93885a87c68ba506514089df910c6224ce720707fe213ef4ad3526a079b0f02200706cef22b2b10ca9f1477ae0e521c688f9133005e48ed5edec0c88cbbf35a1e 30460221008fe7724f2aff2e0ae2350b70207274c875b480e8cb37d6da5b4b5d675087b48c022100c61e85ddc71c15b08f197431e1ef41c91bda2d7b58ad8e3341b9c92bb2e36938 3044022010eb4076aee9e8db85bfb2b65901eebfe70d034c90533b7b5d11c49d167c5dce02201b4a120a8294cfb2da03417a7cf747d1054caf8b037bbcd245a850509ef01b1e 304402203170fac834660b57637b06ab883567ac6eed7d95d21083187071007528bbae4d02205e415df2a9695651026a1693cb3906ea680f09c0398b936d8c5bd3f1f0180e64 304502206a02b1bb1134fa5169e79decc82384f5c2cd6b333f7653f7647d9185b2ab14c1022100a10ce0d3e8193e37d96d2436fe48acc57d93e63f9dd9ed091dc7e405824bbdca 3046022100cf9ebcd482b33684685da20d4291a043d5e1d14cafdd8773b77ba1e5d88fd2b9022100e85063560790ed485cffe0c29eeb3b1ff0825287421af63b1fc1d253f60eb7c4 3045022028e4615d5681580cf49525675fc047a500f8830b0775f05dd8a6d8bcff66fe77022100f60ba39ad297b1b4f9c2998fe60b60dd99656ca5cc39bb4d59a7e61c1b335bd2 3044022004b83a974f619405a4bab48eb6287eff5c3f15a64fb8f8ea840074e41025353402206f10e9954adf0d4a6d135ca0ab16038369372488956ffe869b9112a76a5ab97e 304402200e30c29a5256f914492340af1dad8b9c69db04b268600eb5f7b559d1df53156e02201d3b98648c7673c0ddc094e5258b331c7b784152df21438ed67554f4a02126fc 3046022100c2c37329893c1b50c90bf8ed6f96c1b6f4faf93154a51867ee796f7eda39e313022100c7d88a349cbd032ebbf496a7ba1ec8605effb7bc8b795e4e4a717b906b9e8977 3045022100e97dcd3dfc5f06157128162773579f98c0f2b3b48f99514b573b15674ac3d9d50220586f4470624f0a814a0e4286c1ae040ff6f3c444c06df84470cc367d2e0a021f 3045022100a7b361ff8d06f91c248974a68848a9c623e5553a45efcba592e14bf7a9271c280220189a1b29e4158b301c9e4d8dfd7d99d8307632f481f9a581743c0fa8d5924eea 30460221008925f0ac0bbca12aafcc16f5e2edfc23fe7f5932925b0fcc8e7ba7ce76d84b7a022100a9f0b64c8c4e6d3a3b5977c362540c85190ad06e7d9a09d9c5513fdb4a6045f8 3046022100bb224b611e0e2033ca69bc927a31557fc4d0974dc5427f188ffc779436b7007b022100eb8bf756e82783e0323824225198d8d185190382850987c7c117e1d3e96273bd 3045022100fc27619d0b4f3bb50ce49de89832eac2d34fb87deda6816b4a5ce34f8eb9251a02206fe56097d3e97e1a9b065b90455aaae357b0e160021b20a1fa9d6d6e7f30b2f1 304502202f3932eb0b91b6df08d2272bb8ad0d92afc3b10f1c881c6df4dafad203052534022100fdc645a5f13e816de92e21eca92bbb63081af0dca68d45b90396447cbaecf04a 3044022015a209609728800e80b8038a41f956360657710b77bd4e9a29c827ee042b210d02202f7282f62937cbd5182c04cf0a2de532a628666857b2e94d4ff99935ce6328cd 304502202eabeea1506ac4c5d7e3163daf57bb1c1b45329c31a8a4dbf7ca71e137654ed2022100b4258c4b2343fa9988ce928e748c85e379705f79988376264cfb22b259d5aa50 3044022061cf82f5c30573f3d30d029cddd25db4975f0bea0f57a166cee98ad42a7e3c4302200d815f8439e950234f5a57125abc8639b5ba1c51118aa7ddf5cec6be32ac88df 304402202c9673a560305ac3b3f442504ced3cf4185e71cff47513778c2adfe3a1f381b502207343320815dd8dc5b67a2d152d6cba743ee6479ca5a9175a6c3a7f962c19274a 3044022031c5c4625c65ae6967e058316276f8080ba02152b74e79299e57a19d31bbe1b7022077ed30825b0064d39137d7a602014ddbb9ab45b20ad960ecf5d637bdc9a12df9 3045022057d3a75de202c23fd1989ac4a1e0472a6a50aa461714f873663948fa14bcc225022100e6a5804181276be639df1550bba3155aaa55e5cce00db2bb4b271e3b5e5783fc 30460221008e223b04e9cddf7086a252396c752878206317cd259d1656df52696e1d2d99f7022100a258b5f6e098c618b62d8ed9d97977d584bb12a7429da724a9adf1424757335f 3045022100dfc95e1ebb9d44d9a2e63dd3435bd1fe3eba76810d16901067d19fbe42657f0e02207f49e5c37aaa74b4e1ee584f78e3d9cecb352fd54da003936375859294249fa9 30450220573c59595413e901cf188b018c1869a3b8cb9e2c78f21550cdc1999357ca8bb2022100b6d5aa1e6c6176278f199308f9a365673a923e0afcd55d110afd0048bb756196 30450220694e4cbfa6eff13a25bbfc454f651c94ad175551b292b295966c2948be35b0a0022100d999b58f0d606c3e1eae3c828ba9605e41d548f1b4b771fe788726c8b37f751c 3046022100a94f1ca9eb5e07cbc9b64fe2b92e56c2f1cb1b13d0408329edf9dbc4d9f53053022100c60f61dc02144a1f1eeeeac4566b90508e165f8d5281284cb652debdc20c6ef3 304502200bd838640a84d388c93981d57be81984741c49a45644b00f2b37854b1386ecef022100b7a688245a4a441aeaa9ecc23b9d61c2c77ad381c50eb503d4773598f96887c1 3046022100a8acadcc8917d0254b92808c38fed9fd5b8c8a4bb6f4af54dbc0162b3347ecee0221008cbd0106f278aeee191d814a137559de146c669e602bb39b298fb54eaa9a8ce1 30450221008cbf9b84afaeab8e75b7afea5129cde164c434b8b2963723b9a11ee915fd2fc602201ede21824647b0a347c345bdc2aa37ea39b84d8013902c8090eeb7b36105181e 3045022100bf3c4f5e1fd35e7a62464bb6eaf07f5d71ac90a5a4e3e088c9830159aca4177b02204c3f872da910eb0d0a15cc38419fb2ed0f83560e3a75ef902b082f60a4da79a2 304502205cf83273f3bbac4c821d4d67623ad8af669d1611189afb70732d5c38751d19f4022100bf9680800b6e59533ce2c929f13e7054e6c7a170131d97fe542952926ab83654 3045022068704ef87d4ab145d2e18f1b81a6b4ba8f69740c8cae02be02ef2edec5ffa1120221008f06d56548809800328ac8b377903c31efc461c9cbbb0ed3570d356acc2fd5d2 3045022100cd80dfc631bb8817c01d4b5861c4be1408b70d7955c4edc562e687f2ec2e822d022017f49162e4a8cf7e092536771ccf3095fcae5f7ae63ae2aeacb8f32dd0bd0842 3046022100ff45b264be4c10c83cefc7c48a34fba2ca8686249290600ba3ac8c2d0be34023022100bb00c61fdfe7eb2a7487da77027e72a14738e1a5e66bf95723a68e9376e96984 3046022100d49839862f96c1eb2acc903336f85a4aaecd82e55304119dd8c0c932d88a7e850221008e33031df1c5266521a6533138d8bc45cc44a5b46d4ef071b5d2d48cb2d5df1b 3046022100db0432670b56c78c2a1b11a03eb5fa78fe9810269185573915e6e5a9609ef421022100910f5071b0cf69afcc2f5466cd3e15bc8136bf12cedbe08d1700cae1e923d108 3045022100f9f2eaa20ffeaac93c83e20f3b90f03eb3ae3bdca4268b79cf4b756f0efb9c890220148652ef13394f5b3cfcbccbf2e53c1d72c97d7414ded0435690a19cac04aaa9 30450221009f2324549766b3c4a9aa6d0cf49a6eed904974a0171847b3536022b39c150902022011912629b4196c0fe922363b0ee01f64aeafc16f48f4ee99ac91f8e9d3e80b73 304402200c8d80b9a8be0a05e2dee09cb26be2c50c572f983cdb8ecc19c818d1ef4de5e10220576f805bd782aae2731f9e5418c17f4e8722e44cef8b54a7cadd3e58d8cf159e 3045022100956ac80a5b4283a02bcab94f62b5043e9317ea4b2067aee21e9123010192ab890220765de75ee4a94d42ad885b009482b6eb40bbf4ed20a31d0fcc469fbd1ed6eba0 3045022005bf7de55795d69747d7821fe636ca7ab8fca25e30987a1ad05db9591032d1a602210099e659d6604ccda8e118a4c5e13635a06cf675606384a728ef0d0b6fb2a64c79 304502206c4bb054875ede6cf431bce3e492feb4db96610c40b69f49615cd2341eb77926022100ca8c3f1b76380daa47cd8da8973c80e765ffff94852ad0459dccb581905e8602 3046022100df76d797ef1fc57a8031219ae04b64f78ba8cbf51ffe41b6eac9af54ac9211f6022100d7603d5bc785f8daeeb1da4ec3ac7b8b7deac09b097f5abb2d63ada74c68db25 3046022100e1a32b0ddcd23089dc4b12c5cf34cc5ddc9d2a33fec0f5bf88ad9b6426df6f4802210093fe570a4555a21203a40b5939dc3199726f0e36041ec1ac04d20f887e54d07b 304602210094d1115b5d35accc5927c98c3049c0ebf8c32763def487e64d42a89c88cf62b8022100dbc15f45bf7cfd77cf055f83ec2e6d8194766671a3c9e178b8ba8f859d08e0df 3046022100b0e171bf7d81042babfddbcad862a51339662d01d561ac558268826dd33fb908022100d7ba457cea97f628f0f6212ab9420f46491a5f37ecf96262fb4d4ca025a88b7e 3046022100d7404ef3d563f71e73c16b0915b7a43bf21383fd36dab58dd257b248055b4b42022100b522fa377cb60870beae77eecde9b4b933f34708ccbcc383bdd543bcfec8a4f4 30460221009b113fcae53966577e1c1600794bcf98148c5ea8070190d87236e6779e75fb7f022100977ea86da9ca0984a9a1992b0826d1e9bfe48ec1976b69c26385a7ac1aae2d40 304502210080d7fcaa1664b81f4109c4f4645eb0a4e430a99cb4615d7bfe330ce8e3f63e2302205301e25395e6a785e284d942acb1d88444849153f58aeddcbc497281af37e5e0 3045022100b6f565486f769759f67ee55c49c2b594fa86f90be4caf1a490bdc3cb05a881990220088d3aa7f4a8592925cafd2ec147329a56984d159472984f464d553eca72f5b2 304502203837316d122aaf33bc861b883c197732b984c21c96cdb6904778f5995941162e022100f6c193aa1055153a5896e147b02adad67083635342edd1eedc5b9fbdb5eb1758 3044022052942547432441dd92773e01de1211fb5b12ca697321a8310f560e7aba4006b102205e797c4cd730bbfed3a2b98d2a252e86332d3b3fde486bee332b2f7b53615641 3045022100934970b4579ebe1c5e91a86a485caf1fa8bd85970c5bae88ae767d009f6d478b0220400d631482f612bbe1e94a8797bc1d811ca3fe96663e314991f8627afc1c5a8f 3046022100ff7946f45f653c7f4ad6d7ab6004006791957203274c0261e1ead3a4d43cd08a022100ae8b231433429e7824d61f1f9276b87c8d21cd50119969e461a4eee7bbc80ae8 30440220344298abc5c8b34f088a62c18e74bdce802356541ebf1ddd18121ce53d098cca0220662a6c5f795b8931fedb0176dc84e4d4a1b0c83add54373f24bc764390bb1ba9 304402206ffe391b9f942132fa61569afe3d804d19a74c244810a6bb5858c3c887c659b80220030f39106a796921cc888eb586430cdb2ce5b5b05eda7d51bf989e9825d0501f 30440220592b139498697a03553a27f3a6b1a0f0b39cbd38e2bedbc5ff5919d2a3822c3902204ff813c58cf183da2af2efbbc27b5048d05b4d24d42315062cf8f5368b0f5981 3046022100d4497e5c54e8cda341c71ca8c08b07e09f26a44e1df96cbf4faad898b31d3db1022100b97456e204ef4c208ce5ef2c788c132a353b93656490f4973baec3022d8ed851 304602210087a0da5b9f6008f80f3322a611a98c448c647fa2d0b7f17d778e4db6ffcc35ae022100dbf1052d1f363c97910036ee235e92ff8b21d43dff5bd3de15240563376c56f6 304602210095bed02328a845d039876aa5fc6239aabb4bcbf466810f236f202935b478d0f1022100d6e5b8fce1df1cd5a13d672f8ffdf8440c36c6323b3b64e81ffe6a8d2cdc592d 30440220312394c06fdc23baef2aea77811cc5ecfff985a927462bc36b89fce0e0d386a6022063561e3f6956890bc352c0eb8bafd5733e509bde37a0261f73b3530e5d6091d1 3045022100c2b93eddfeae0e2ee5b9588c7977a31d39767fcc82ef56a08fe86beb5a02299f02204170eaae24f2e054a0107cc227c75bd1b7d3c781340faf6904027e67e0048353 30450220429bb66e49fab1a0997a897bc32515b732e1ec4d598bc1c88a838cc128ec91de022100a503d8d01d0d8b1a708ae7a384397d916031ca88c7d7d530ab1dfc8fc56f69c8 3045022100ec02d81bd5cc5c071c9822ce9895a47b6726b0bc459b26a9231044cba9b3ca12022069976ab553ad69460af9ef2b6b55e461df345b2243bcc350e461944e6c11e09f 3046022100915d5553fa8a469640c31d54e0ebba98c7f460d9cc6346258bce074acff601e7022100e8c43c991f27a7cb727736fee09d70e993126370d8e7667aed2e8129dcfa74ac 3044022004dc08d408c9d33f7e58de0b553d77bcbfea1cea4a5376d63a67c4a1c8b5bc5802203d16518c10fcfdd7ddfd99f5c7f624c3cfe16bd0de4bb8dd7f5d40db6316b723 3045022001eb120079dfe0fe44c8c6812579c4263d55a0a04641e5234b14b6e495fd6078022100a97304c24a6869909575fc09c3a1e5517724366e0e40708fec3ee412a5e86c9e 3045022100c6f518614cc729d99981951c9cc373639a9fd28f3ff0379b6b19c814862e1f7b02204b23c0061b896f4fd5071fa7ae891052a4031c01da715a399a0fe82013d02b21 30450220075c4572abab099182ce0c05ed3cc809f6a5e52b59d132265ca6c2485ecc7229022100c384db5240b21cd6aca964696fda3de452a8839bd4c26d074680850701aaa837 30450220351627ad3d8baa1ef551ea87c4742168a61b67ef0f5338bd93e9aa6b1ca10f45022100c3b03cdaefb39c07cf77c282eac163ef567857e1db6596f795c89b676251e0b1 304402204a05a0ed07cf66c59490a62f5f63586dbe368f58591251611281738032e4720e022007696aed33e4f501b86c8bb5653de2a9d0bceab3d013fb264a1cee6205649152 3046022100e04e9cdb42b3eef593b424045828f2466c63e881a0ccfa0efc2a0d4e9cdfed520221008bf3362000466fd80a441d7b26181836a7527b69ccebda3714e167fd4ab509fa 3046022100d6eae78995628c10e93d9fae2b857a68a8e7c6ae0fa93e3a13e386ba9b9404f6022100e01239fa363611d140f546264d681b963bff166c9fbd18d4872744ecb2b9bcb7 3045022100a9a63a9d170b8612b9b98c29edfcd0613f40210fe1c369c32229c250311eb33302207ffc0d67947da6e1c17400eda9b658494167a0335e3deaa933be55ba3e457dee 3045022075f20b8b629eb59de2c320842b736c936639c5f834c7c006da0a2c2787e4aa4e022100c3d9ef5b575cd4037df1d4e19b40f44e9d2f80cef00f7762e75320beb079e17a 3045022100f77afba2359e11c99168e84fa31dcc5f78b71dc35900830cbd750d372714bffb02206d2c7ca2cd77e8cae841ef40fb4bedbe9dfb1ad2cd47a1a5c15a7d586609fd48 3046022100a9a8b1e868d7116ae2d78e4a86b4dd6276050208165d47e20daa3b703196dea5022100a306fd1d9314c808da45f6c0656c5ef9bca69c78b2aacef465f03b2208b21cd5 30440220614b77b7a32d4a3117cc5571863212826fb8aa28f9cf2e9de9fda4049c08ecf502205302df69e7f8031912db970f1530e136c97bb5cf652402f689f9c2786041d614 3044022047f18242ced3c9c8ac59cc599ac83054c9bf8a9f3eeb37dcb85f41418bf5641f02205834a4e6216005c42ca26973d061957950fc8b75fad58b41c18c8e8acae1eff6 3045022100ad47bf8cabcab34d3c328a3d792066287d5f5dbdf01c7b095dbf9c353844f58b022028bffe5d01ab242a117187eef041e2bc0c78a09121ec7d0bfb75cd90038b8be7 304402205bf809108ab0a13946c594a6be2440ba25f1d7d59d1fe4c6a0e6d701664a8a3d022056ae08208eca26abdf0f164806ec0887526e0c8130a5c22c617a2de5f77adc64 304402204dc154d3e0c3ba98913c682fdab49da7762da104f8f636248d734dc7eb6a3449022057f970fe46f266667ac2b1490acbdab8a0f21d641c43d76979e6064111b814dc 3044022025733506cf547de9cacb5a2679de329a798379cee12d9a726b4bcac82cc9aa02022031cb1bca03bda6277debfd5efbced79f723b0ef625926722a69a28f2c4364f91 304502201e893ef4c4df6245b601efe5d833a177e79daaff4c539e58dd2ad77604a1217a022100e559fb9ea97864354852550848aba37c8a9e9ecc4f19bcca23407ec3214bedf3 3045022040f923c27ad6d560911d1f02229994ab2eef71e07082dc57d4325168b58f9f22022100ef01431e30d6eee07938447a0a3a6716ec57ab2166772c54b9d9dfdecb0c6df0 304402200bc2140e8b4e73502b1f1c46fa4b0d958de80ab768d90dfeb545d2eecee244af022011c412721cdf69250b4e9cd6a2e84c296e1abff720b0c08da15ca929e4f27d73 3045022074b13d44fe8879e1d24efbbdb5f65d506605bf9d648f1e25b2c689f3e5a8cfd9022100eb893dfbf929efb4128ae32cf86d062e6f52c2fd71c658641986dcbee63c92d6 304502201a08ae6fbce78898969d9cf79b172653bf69cf145bda707b799add86c6450c6e022100e8982f11a2f6e85c5536a034f1487ee85f1ac0d697dfe52c04afae449a3dcc4c 3045022100a2133d3ebdd26b06e348cd5b4cf8a4fd3e656b7c2a132bc932f982074fecbb5e02205cde0bc57915e253f9f9fc5ab497bcd79dc55781bb452c068de906f11dbc2c7e 304502210085dce60ec52595ccdb995335c6a90d0daf6d95101d3ca9584c32a4fcb82039b602206b79064565844a65c5a5f1a7a17bc71fa03e8b04cd457077457ae0b6a9884341 3046022100db03694cf8890a4bf52c019b623e7a317b069271f2dd6068c503278ed859a8a2022100ad3dcdb2e6676f618de3ad3e31caca54519ca3ad8928e83b60d2c4adfe2f8eac 30450221009f9cbaa7e0a336932f6839e729f04ad68ff424bdea6a9fde1c1b8d6371f91fee02201212f408607725ddb4349b0de3ede35241c379132b1fa3b45d12fae3c2fcb7e6 30450220269a30a58b48beb38795ca421e23a660ee52f629f2b514b7d353c04134e51330022100c831c91e004e553bb24ff3d52106b1a69b4b0b96d8f9f30fa7868065ecac6b87 3045022100c7a23a814d1f9aadf8480d765c91f0c0f54c0cc568ee8cdf386dfceb44a0da6402207a4ce9383597b7e20e13b53ec65ba0d439b2633b492cd642a4f993243f24e136 3046022100b95cbdb07ee3d10639b6c588bddbf8d00e7e1d71f432bf30c7d25f99346ea82e0221009093753f577cf8832b54824ca2d3cc24b385dd119108323adfc6c31083f88733 304502206ec607dc99390365d038ba27e8b9d4825f6f900ae945da9ee4a491e24cb6a121022100e9727b2c0242c9f701a59cbcd6296be1518bb5b95b6544e37ead1611ac0b2d98 3045022100feaf5a9ed6100ae5e3bef78c686ee51d47af0ed6250f49df26b911c05a9a888b022009389a2e4a8509979ea93ee05767c217f599c0be59535d44c8c8b2294ef95a98 304502201f9743ba19fe6ad89894b1b3edca84af1942206ac9dabcc0680f174e7118d674022100ac372163a44b2627a1684f4764cb56b53e9b1e4f0408feeb7226b63fc0ba5e3e 3044022042202cec503513efb9c72ac2187ca387d303891427e9408611a8d511f966379f02200171941e69ebf1eb7373a91be9afb7574909f1e322950e68b6d64336c8d28f11 304502210090c41c534395c0165aeac826706bf452c7a8e081f75dc2e3ec0ec8a1bfdc23cc02200480318698d189cb73d9908f5845828f9790b4a0971fc14f148cb632e47d8831 30460221009293993d2b44c89729e8e38c0459d59a1aaa9afe51ba268ac6488b5e925e75a00221009bd0c7c03c4c275d3622eee322054928140113b9f8056f85f668ba7af9fddfc0 3046022100ef1bca077fcc9015a92fd8e5ea9d47f48034743be2a41b48404f7d64699c5800022100accb9719dd943f80fb7f225481c350d427f82be8b2700447437ab8ab385c1d0d 3045022100c0eb74bcfede74b39cce9d82e51977e8bb48371e67979924c38719c7f00e7d17022046f59fd583f13085c19399c1ad4191adca7e918e0579a756716656bf88140811 30450221008a6c2d756cf480b10a2cd28dfec5680c2d5bc0aa770eef3bc572df007d7e6b41022006cbe36a68ca744da444e1428ab3dde3ec4e1e220e7de2a22b7f42c4befaf312 304402200d2cdb155571597ce0eb9f7d478299d4d28b9585c78e882747ec37c346b23180022031a5a6232684d813e5efa6cd5191bd501d491e96026d469169446f593102be71 304402206f8fc7fc8f2a53393d4acb71cc5f92ef6bdb9bcfdb900c549e45c3f8ec8c3e0e02200a5d2297a8b393fd006765e0042130767872f2ad5d6acfeb91d961147c01aff9 3046022100ef72ea05088e0dfe1ed59bb7fd3b10ddfacc5f944ca8867f33393c98c4a48ef4022100c5102f05804eeeb6a88aead153ccb71c9706be992702da2a21346a35a795a0ad 3046022100b11cc7cc58f50e706a4b0133b7c678df81b07055cc50a32461e65c3875a9364e022100f4ad826e1ac57cdcfff34e6a674f3458a785150e3f2270bafdc8822c46f599aa 3045022100baa763bf0590bd5086e4be4ab2c114747b802b9fc2b422d756e35a0be09d78480220302d9f10e4bc6d286cbaa01b4a4324473cef83f95829ee0e02b1452511675a82 3045022100d32cfb33041f5fb69e4f324d39555ab4daf7e56d1865acc29cdff83cee51d779022016dac553f86ccb5965edec8a97c68e6e1bacd29e43fd21e05e4ab69ebe528d8e 3045022100c5b167ddee741af7125992efbb1a25e72db3597d393092f088a0eeeadc59c5830220607c0632edde6aa00f047c8a4097c3a2aaa203b2f11fec16eaf2d70459f40030 304402203bc5ee0c39fdb54c1492a951d712f07921673b0accadc18a714a0631c8d40f2e02206531b223be4c246fa871f84c6b49ef4d4a412fcf80796a577163753dcaf5df1a 304502205a8a9e88d7d1fb5a3da87dd83fb018e5d62245b76acaad25f80ee555fbb53beb0221008b7429282cce136e9ada7298f7d10a0b0f85a2a00a93841ce0bc3785508ed117 30450220601ab6e6eaaf34bee5854d5996b8939dcb6f72fdd4228f21b17a5f423b67af2a022100f02aeeecec4ba9bc0c0cf6750c316974460d986e4ba569bb282c7859d5c0084a 3045022023445a24da38701952761aaaf5c1d41e42cb2c1abcd1ea01253c5fa28ade424d0221009b484e27eb99eacb37394bbf93e951bb43a07b747a2ae8c0b893fb9fc4693263 304502203dd4096a20d589519f2a9c25ec1af7baff530f188beb7863767722db9eda4d3e022100dc7ba364a70081f19737e7c361f1786bef2045fb1a1c9f2d5dde1a74b6cb89fb 304502210093ffff70347c6caa7c04bf6e43d1cd9d9d61bd187a0f3fd7d20f27e1727bf2dd02204df8d196deb1b685e3c23362c8d368036277b0edf510e2d746a5febc15a84579 3045022100ab006ebe2ffc38cae59bab5029e9387090c0115f42cad00c7efabf4fe30415a902207a8a45bc4b18ec80fb9911727d5daca8692c1c86ccc2110e06fb1496819a7499 304402205661c8a0a9f52c7cc0d970d3b16ff9b7f1576e3014418bca42a43ad8140a7bdd022002e9bd5ad7d305380a8e3dade4373a880436630b7395739fb74805cd9b9ad8c1 30450220506c1ec81903e49a93f1f074b3586e9cd6107875301879d82cee9ccc49d3acd0022100c818a016d277658cd35485e9db2a4af0cb89c642ff91dae3045068463f33ed22 30440220611eafb66f315fef84818d7d941cdc60940a9533417ff03748a54af2781d5225022017e960bdb1fb393d53ced5183faee020110df43918e95f16b17d11ca29f51a43 304402203302dec57d557fbd51d3d394187b877ab2b97f068e8219502458ce3675c3235a02203379d03d99f889749cdd5396bd6bb573ba4e49554c35edb297a9ccc965f66954 304402201c7a1a1f68268642be30b9502c64366423096b9147d5bff81a20966cda79992802202a469c40a96b0106781e122c86ba0f55e4abdadd4824d8fd2d9372850b982345 3045022100b5eec15ffb60d45724e79bdfd7a63d8ee95bf60eb14e30a0d906c3863004231202202e8998dcee40a38cd8ba331d32ff96ab30220bfa05503186c910f456a464e715 304402204dc48012a0950c289d268974af55bdaa0905dc57504df570bad4e997b445146502201d64955f6f192455814e69cd7eba5ca3fe41f54e8b5d5d790e5e8823c532a572 3045022100b8188a6afb615a1fdbbce6bdfae00861e1efeb03a19ca0b441a7e2aed55c56c502200546dfbbbc68dff1c24a187b579fa5d347e78284f4a2c9e241d0a2cb61188ca1 304602210097d3015079bc2afadb2fd5b2158642684117447c30d904dec33247cf2b2d26f2022100d64e969044bf809283b371024f5fbf2dc5cfd01accb39e06d864adaaf0e5e227 30450220548e15b6217f79a81609a673e47fe4cffd74fab63e47d57a32bcd522a1f5a115022100e5b91fa2963fbdf32677a998a49a76c6a9d15da1ba421ebdd9dafac6aa98d1ec 3046022100de075e294b0158d6f421fec0eab14c8ce76c64a2d546b3373bc07f53256634b9022100c02a73308c0cb7668b57866137e474424f0f29c9cac710fe155f1018744490dc 3045022100cfa87560f416233df54a961b6225b3cd1eff8219450cc575291d9d8fe117865d022015535a3b02c381b87e44ba9484f02d6834af320d2bf17b32f144cba4494024f3 3045022006864117225ef5f3cb6e140706cfed8a96bff8b3beda26dd74ab3da618c83abf022100fd8f094e289db7707138e68ec0f096faddbfbdd5d78fd991bd4003eacf40f67f 3045022100d3c8f89da393340382cbc0e87be2d25eb3271945f8fbe2fed81f8d22374244a6022016a83710c61dd8a90c699c7788e1890694dc0141694a0e5a314fbcda86e674e8 3046022100c1b63fc7957ab4dd8c9288ae5750e315a08b99e89ea601c450ad70767b6553ec02210099d3c96b914d9a47721b69fbb6bbb8253332f04f9ec4119a13e5226c9b52f5a1 3045022100f35cf377a5ecbcc2b6b558a8cd6bda3c4353a9c2bb469a3ca57f0a95d75e67f402205e9d9dedf017e4389d129c7e57adb6b6168045d48d555a2691166b6b6100215f 3045022100e92e6b5b7876b92b53682de2881e2d9fabef45c01dc56d2c8df68f96e566064e0220299294c4620a5612ebcf96879bcace8c1f069690cb219f98d89aebb76cd0e63b 3045022100ac9556a8cdfbddf70af1be1d57722f804b7966c91ea0a9fceeecbe3a24ea2b6302203823b337528d96970dfde8b8e4c52a01ce227a1bffa8eee77098bcb6f5d6557a 304402207ab4da1f3f94b7e54701672c96780909f1f357e7eef4b709fd4604f2b7bb26440220266afd3510da9f8dfe868e00362632930eb8567bc377d3d11b3155f3aebd5af6 304402202fb4aa4dacb4c21b48f1d9efff4620d5d4f9a50db976576c85e8df1f12f42c82022059bd34df1957a16b6ae8e329ef5861c764a588d18ae4f4b8385bc90ad952e8d8 3044022020f7543d63c1391cc57c37476e2c1bcf3ea25f422d870680cd58cf1cac880b26022060b39ba5004a6ae43eb09423ddaff206e2fa7d66fe94d180996ded59e0777dfe 3046022100ca84bce88886bbbc0d64e81dba293cfb32177c7db5d193c3f957bda4a9aab81c022100d4fe43f5e11f6b7031cb4817141e9c91f51438894c4999d8e1908f8d65fa6a5b 304402207b1135e511f35985803f15b419863b94b716c55c751cd8e5d731651d7d7a473602206d56b2c878fcade78e9edebadc245c5352384e6916856e00da86160a88dc4880 30450220388e68fb38df6addf1b9bd39b994f7185e88b0902ddd1e9355122a7f1804210b022100dc6d4a2ded8be94b62f9245bc7654ed5cebcd26cd4ef3fb513218f2a10be9bf5 304502207b71e42faf4813953dccf4389873f25674895caf729ccdf2db3c7b79d2d63d71022100c9128f5d80cbd904e24bf62ae1b0f2c4e66b9c821b8b4b449ba614d44ede8927 3045022063c94b854b31d2fc414fbf0df5180e99556c0493589572eeb71151a417e3cfe2022100b699c3cca379a006cbcefa048beaa2cc17aafa6390ac431902ca5f827bf6452b 304502200d1ca5a7df8bc19d859ddc01b2e903d8f2215a85b67a008b40a16785a666f889022100bc0e236aed99f39183f0a7a7a9dc9280ef37ee08e2fa544254523f275df43a10 3044022051fd8e617bf1e757c15c3ceed728439c5bb3441aac905c57aee190023794099102200fc6335980798467eb55d80b96fcdd5833ef9b92b2932b88ba6dcdd8870dafac 304502207e3461305991b9898af0c5dda78479b8d1f24fcf10f5aadd0386db3584464370022100d13fcce2c229ef619a5f7b56c4aff5a9e6a7be2fc41846eea67137dbbf506de0 3044022064dfd67384af3e47f882c9c9925be3b074042703c8b4257803cbc7612d284e530220079d5c2e869c7d59591b5dec17ea098c9d60e9f9353c52b4e43443bddea59f03 30450220254dbf52d5002b9801171c315cd3bb98120c8e4c0a89bdb945bdcba108ea5b58022100e830ac0356752eba7216c3124d81be848595ccc51b6490335989fd7ee3cb5edc 3045022100f66df7ba9af33ec2ce89346eb0b775ca89e681772434a7a66fb607769ca7c1e00220101af5d10067ae19c76f845277bcfca8d2f41acea983dbd16ca2643df7fb8047 3045022100bb9572ca419b495c51058ee63e2b8ed0ec3b8e7241f8b43c0729bdab0e46e2d2022004234e99bdac8a1a533e11b0568173816a6466ef278fd55c2c1cc3039761a498 30450220702585aa0197d335ca09c818dd84ec2da4c63f17a8c1676d1ab3d3b6370cbfbb022100fd67c8e6f6c1d6e68664f38be7a04ad40febe66e76b40a2b4280c028879f756c 3046022100b0cb2899da6a7ad23b8b676d5cab62ab4ec417c50ffcacd5ed8938da135a4f12022100e3f75c216f1692759b58cb2748ab276f363dce64c6ba02495cc1ced3b271a803 30450220228a4fab4d417ce4fcb6443b2ea2b47e488b313b3af7ab1cacb8cc2933abaabb022100ee8ecf62140a77071b3730ddccae367cda058dfcf96f613c7bff7352a4d9e9bb 304402204662f9dc2e357bdeafb1e57a4dbff3b88eef95141adbbf387bf2ae35befafc470220614676ed9f60720327b11f7c213e4db69ec7fef03e95f0d3b4f78433e3ac45d7 3044022000aa444f29846558aa5bf83a6b8ba9ec566c372991dd7c7ea967378edc26d5d902200fd8100ed90952ecde082d228d2088eb5c21ee99d7aae4c3f1bf9d15b851bf33 3045022100a8c9131fa08bdb2b438adbcf1c59f47723b756b5044620078b34c30f5cb6f38102205f9c1c609aefc03bae37bb351ffeea662aa1cebe2c28e15e912c76079791cbce 3046022100bd36cfc84e59ed4b86412c0ea8a855d7ed8148e223dd6e62110aa93dddf57d4102210094ff4265b71c44f838c1c77b12a01b09e044b598a2bc16fb6e602dc9c7d37662 3045022100f875087650ae8695636f34bc0ebdf199baede0ee8f2e65a2d9a63c3c5812ac1d0220532c93c70f5b98cd0c5e2a6e5306d17860fd05aee8f731e32b96ca2446bbb47c 3045022100f25732c6881d067dd3e3575926d0d357d6fedcbb9fc1cab3685a88dbad7ac69a0220083689590b3576741b09bf1aa04da5950915b950c32b1e4d923c98aa953cf54c 3046022100b88668617b121283772605b575e13c04d8b49b8584bf4b86292dedee9cd049d3022100b7d44e83322bae77b97b5c4a0c0c19d92d5c26adbca7ce005f9a802903b4da4b 3045022100fbae8bb2c0d1e21cb5f6debcf679a23a3c667e6408126fba497ae8431172119a02205ebbdcf02166332708d3c5aabc3d757157706865d600a3efdfc5e10bf1299559 3044022062a7bb57eaacd083dd7434b85f2d9607ddfdb70bdbaaadddfcd9465358aae1380220391091b7f0c487ca38c00a710a72136b4eab247f1d2d809ce57444164af94700 304502201b4ecc0270366dd73338ef79dfb1cf30142e99b1b952e6a46bf6ff52ebbe3eb10221009e7be9768e10c1a87fdbf45de281c407ca581c0e1ce6a7d23a3055168d42fdaf 3045022050fbb0e13a31a6bef5efc4e52f2cc303fc3821a56b88091619cd986f2037d9dd022100d3da0939d61418ce62b26e20613622e6a047aa30380b24e238a901007d365596 3045022027d69c6a4cd62d8cb0ca36565bffbcc0db689c8234528136b6cd534b1be89a6d022100ed88af1a2f5285436a0ceca8e085207421e9aab3f1fb0610b93e30e545799d11 3045022100a73a0af0f82d687507cc5da81607bbdb2ece4f86c563a218fc59522a876c3798022001801b92c35024d66065b6ced5da6020c9c801f27d9069b413d15cb9af511c9c 3045022100b972f6a3aefcaa2da73c9569aaf7314ee26665578ccce92388a177c439aa59340220427c5287c6006335f8ae79733a8451fb9cf2bc911f5290f23bb3a225ad4f7a10 3044022048332be6dc5d60fb44557d3cfb09993d392e57eab9928667475befd9ac69230b0220506ce6d54eb6c01c122f8c46f3ead56360d65abcf63f20be3fd33d52bdc3a54b 3045022012068261f132cc31ef677a99d1a8bd8780576e49be4f54d54de63c6e74e86f43022100929baa633da1ce4b6059d2c5085a06b684cf26fa84f088f39cd2f692c6292faa 304502203853730dbdd299ba48d09820c6d4b1d783265b493242d2364a375a8bf331ea8c0221008827c6a116537e821a213b30cb3be5ed71a9ca0dfde9edfaf9fe00e024c5964a 3045022015d0fa1925046f285035ff6dc19617bb00833e8ce9978ee04ca94389422075cd022100d3b86860d9f9dac823ef57c099942c35e6e943ca07bbcfa8718c0a463fe7661c 3044022057fc39d3983438fbc4a167a60f6894c2300ee930699f1e8f7912e93c71aed57202204bdc5dab4b103bb23530b754c93c93d110904024d0909093aa0f0243d5d99a31 304602210095cf5021158e9757673bbacbec28e6cdd040e0c3b32bde52d48fb54d0cbd53d7022100a684d5c19d39f0414dbbdc447b9071e09b2b1d2efbf3e33dc96861b1239ef9bc 3046022100862d8d0702c362db5c2988ee4fbb40eeab8ebec1d9d4e821305432f8e9cb24bb022100961494e21fde63398c891aa3523af427ac781ed8a4efde093b7ba6b29b0cc066 304502202d31cd2e4539f0163bcabf9ec971ccb77bcf544443ed96f53c63514c6d094f9d022100b1fc84e6784751846f16a42996fac763f1630eab126e0a55fe5bf53a17282d02 30450220617853dbdb4e67ad5dc7bd517bdf4e2c7aca703d4f902ca466dfd9d0f2421060022100ccba6cc0b76aa2a17d05e02247a13e90b026339da42ecc5f5db1acdb96050850 304402202892dbd2c1a259499e33813e382ca18ec1394ff2ed9faf8dce8cb985ec5c9581022016fd612e745bab9045906bf396dbfbf91ab1af565f9baeba2a310f405909a83d 304502202b05e16fa12738fc0c7e88c0f3f0efdf6621877640563f2d9c56137689287e60022100e8c4023bb34f52a7f33c812726d82d82b1b89cd4725cda15dcc7e1edd7863074 304402207692d1a1227f0a08be171fd16608c0071ddb205a51d9c2226e947e574c08a55602207d1666b7303cc674a45c1a9d63b3e1d1927c4e0bcf2a4c115d9d3f29a04da4c9 3045022100b2a9b046024b3bbfd8fa03cdd6538cede0aeb3656fa0e985fb7b04b58a116d4602201aa0bccd8ba798178e0069840da5681d93e2c86f58a3f011a164419fafe56919 304502210096ab28df16c43189ecdbfbdbd4f496a532371b3e91d9f61fe0c1c74324d140e9022042402245fd91911282b48aad08efde68adc8e05edef5093370938336af7646dc 3045022100d294ed961bd2f0817c777b681932de3f8a6b99f929b1712972a5b4ba98876860022021411c692229ee6829f0de85c265ebc8644963f253f35ebefa57669475b66d9b 3044022055361279ae2ec3894099303797ac08547df35637cc87e3b24945a9fbd0590ed802201281b80dbd3435cfb44bfff776733d8d8c658ca4e8166cecd7f36ca684a5c291 3045022062d5668d2bccca1abca638316f675e58d152a6c3f7cc9b6efd528f4e983697e9022100e80a0d9d98c0f038280a5f642e95bcfc0b8d4e085d99b425d43dc7738f9767ab 30450220122853fe628893d0325766bfa51b4bb8c5a9553f95b28c8445d7a6aa99745a3a0221008cee6518ee8bedc17972e7154b770df45fc6f895ff37a9f74eac06d1ed407aa1 30450221009f592ad5c75a852b2ff1f2462fd603b3299793c2fba9c395040c3ff3126a564b02203fc23af4d0732076600689b3f9ffc12091fec2aa5f0097389f51d66cc0cc690e 3045022021a6f4d40af5920efdf986f834114d3a9dc1efcab1553bafc5471be237a34e7d022100a6012c0da689d1121b1d6731da180d442121fbd0732ef5ada377004bbd4c92a4 3046022100a3f2ee8b5b48cb6ed522183e0c85a529271e225f9dc57ebb392320386fe3c288022100ed4efe1273d16208405f1fae2834494b0275950d9c51fd504e9835a1eec3a573 3046022100b97c34caf2b74cd67bcd6b4d24353de7036d7114a9ad1a5b02a1adbdb509d2e1022100ec634fc9b77e2135ab49a5abd6550da53a3e88e381b36b78d704a9205f35d097 304502206bee720e407d6694e43398185035a2453ea85fe834b1211767b08f4617cc5d7e022100ee7373e402d418d4253b503ad11c0db7520feda5271a1ea95b812b8dae42ce8e 304402202439e5bbd7cdff62c5d2376c1efd950b4a6d0e50c7d148899dc657e1cb66c675022077e21a84b657933d2ade01f042a53966834d93f2942d6e4c70eb46b4a88daf1a 304502206aeb4920c4deaa0d7afcce4e7747f15a9e241d08ee839bd724b120e6e223f33f022100850da89d6ddbddf5fb8fb4995c83187fac3b3340bc37b0a6ae774e1588433fa3 304502201c6bc6f0e012ed490e8661fd2a25f61731ebdb8ead997abd21be3b68f87e07330221008a3bad7c9d5aefbda5b3558a3351cb999aab8434a4891ad71d890ca891475cff 30450220109567c9d4bef67828329bd69d3074472b1d80fd2fc21fb04b270903739cf9610221008d2def35ea3e7b73b6cf08e58cd4e76dcbeb6e7faaeaf6a14da9db2ba232724e 304402203f0413c2f4b97a91b6954197922f2d79e4a12c6701e4a8a2d01e7ab208f6132f022048173e0989dedb13c102f783113a0b4cb7348299cc888bbbfcc557e958c239ca 304402203963aa25dc258585cd2b9c209a88dca04eda53dc508cec4315c077b4bfbd042302207b88438eb04338134ce1d6eabee8042b525d24be873726e96a26fc1df951e0d5 3045022100ebce54b37c53db49df1ae2883ff51464d5cb55f3d5742251570ccf1e08cc7de402201331141bbf8b11e4c4e6184606775223dcbe1131ff68f62d355b5cfbf4845935 3046022100d0935e29ea354138e897ec649ffc0049d5df58b1aed1c4cc03b0cdc16838b39d022100e5670eb460ef23125780012e192103fedf17e3b2f839055bad87f3a038bcf518 30440220366961abd119c7503eec10fc4f3860583ad057eef810f82b39ec0c0880157d7c02205f5b5ef831e6b7e3d5002be8c7efaa27b86d83bcf56a431239c4c1afccb96147 304502201c4f88e0aa023ce1fd99f531bb08aba40c551e705b701d2b17e2161e2b6572c6022100dbdd88a0d874c14ce687e584dd0ac4c00d0230d4c626324620ba1d1c61d4c647 3045022100a2d53f6f57d576213f74f522c41e0ac0e5af24e3df129ae3bd989d40c596315c022077e8d00298f31f6d522ba811010bcfcbd7ec9321773e55e9042bb4c974ff5a11 3044022025ae90a329dcb822bf7c02fa7aac5363543f2f27a039ec0ce9046cf447075b9a022069e0614df07179146497713dd4917ad097b8a97e111fe5076d25279f816883bb 304502201b29e73ea4a06ef0f2e9b93a2694fca4af90727b1af9809f3cfaaacbc55ccf07022100da6e2c7fae7e51dd866ede83e0c2dbccf191a03ba983fce16e201c4bde18d59b 304402202c2e2a179672274860f92d7c87066c1c1daee0e3eb9889f06a2131d24c4c82f402207be9efdc19be061e4ec656d11f1535a601ce09629f63bec42c64a3835301434e 304502202e6154294588374375bd989829d68f3d67de495948f5c425576af806545c12dd022100f0eb729637b7e3476414a798dadddc514fa4cad8015123602abdb57e9a9d4373 30450221008ed3f859b22789b64b7122ce6af269d97450f5b371f9659299c9da2f146625f80220228d46375cf5efee9055a5f06b3eec0ea5080bf78520c9cf47d5630c01d628b0 3046022100eef8e3f8083b8794aa47098be740befaaf6097145e38ec3ea189a16ac11c31f7022100f18cd00d3d8f68229002a62769b8e1dfe36b5f3debd97129505862d80b89e2a0 3045022100f75faa6b22847af714dd68583cde86529a809f52f4b18769230887c56c80b1a0022040f6eaab136cdc14856961d6014d2bce220a1035b8a75c0e56e629e8dd2e7631 304402207b871c99cd3796a17d3e6429d14c73bce235d33f125ab98cdaf39eb79093eac802202aac66933b2946264f0ddbb4e2cb36731587567df995d9c3b1ce5317c2238304 3045022063d2a7b8c1009962fbc46640c7ad8ca7737f6f1e7656bde988afd80a9244a0a30221009bdbd58f4a49d471b4584379c0be203f0e6f96100222251d315ec62433490ba7 3046022100e0bdded992a78eaa8249b432ce4ffb92b1f4ad45397f274b6fdc9116d764f6f90221008ef11003f7dfd5a505ca032e965012d91b6b9021fc631ca0841ac273ca4701e0 3045022078803d8fcc82f637b8b6aa6ff3c91fd617ceff2d8ea7190866c543ee22afe782022100b056de144f6d013b5e0c399494c7b39c260d1ec1f49b080acb97c1bc9bfd071d 3046022100cec53c8c86d7171490b2cba2331c4cd17be79187f6dbc1c8406bc5836f4eb43f022100a82fb091bb11eb9343e43a8d1b46116051aea7717a70e42c28a898453512496f 30440220196f70f68068245e35d2da3e65a9deea0f6dab0c6c509ed3bdf258f19231cb8d0220676598fbce4296d4a086d1df6b70a2a683e30eb52eceb795d6c229dc7a6e3629 304502200505970c102290063fbd15127a9a6a6bdd3ef5c77d62704972b093ba79335088022100c5e0565b171c12512f679767ae3b28929b0f1d4de83996ff99fb91dd158ef38d 30450221008b819a3df4d9c06f3f1d14041c33c0f1d0a356414bd34cd173eb8f684c35154f02205ec47dc696c0004043ab94dc8035a82b5165381ae6fdc31106bd7519632e97c6 304502210091578dc15d1da17ce364c39b6ae6de7f2be73989cbdc9cecd4489d9ae3924ef202201eb270e329014d54ba33b2c26c9bced87b6fe1b6026e82b45c734a71e5b0c3cd 3045022007be2c193111dcfc0b3cd8e7ba04afa11bcad5e95d61f62c49a1ae12859387230221009a850ca0ffd86b05e0e82afcb9c194c70c916e352ce93e04b36926c2dec26710 30440220390cba71687891d57a0221b76e0fbf8adb341e14d001520383cf3ba183bb578002201f83e2ded626f7c84799431456693069624375041318d9b2c3e378ec303e1409 304402205a91b8a85a348ab4dbe4047d5e9de452ab54e8469bf4f093006e5fe0db49355102207871b59be7497402a15e49b89b6d29afc621369ef162fa76d577a9e18a0811ec 304402206b0f3bc39e19d42d1102d4642255c030059a220cc5a404cd533b4d5e5b723e8902203eaec2647a88709472e4bcbcbebcb965935c4a30271b82f918b76d24b0177d43 30460221009e791681a0e06dff6671cd8fa76370b77b9aea84436e5b16b5b79ece5c8bfeb10221008eddd3f165e0b31e9c6cbb8c79ab316ada1ca324109db435e249e2c5655ce09f 3045022100cda8965943b112beb3cab228e731b9e9e2f4386bfc2900201030b942c76f54bf0220549c1fe11002529023322a8a08c87dacee9f863822cbf331c55f7ac5c56aa301 3045022000faddbc57cd70881a142c143317be57ad12c8d33988ffdd2a268f96d4fb3bad022100962ceedb5fdb9755dbf7fa9ac1460dced6b8ad68facb0799fcb71fd30c10d56d 3046022100af829a8089cc80110b69df866e2131bb3ebb15c8c2b2bbf76f5a3ff4acd98b81022100c7c4e8a6bbf61607384c52dd1ce8b3886e93b55e70ec79a1beae52ca833945f2 3045022036976637251987a2a2f407c61e87d4d69dd3d2dcdbbfb67c56e65d773e79583f0221008ede7c86449bbe203d63e9fbaa396d10a9ab53001272aa167a3ad69293edce3d 3046022100f8e55545e35462687d77d2a9dc042f22248151e860455dce1afb8132fd1fd4b2022100e0fb6e0150cddd87739da4c1e2828857e8d0e4df2d56c1c13b90402da21d4ea3 3045022100cf309798725c0c5b9d308685fa299f4570585da475ad24e1584694a9ea2f2abf02202586a0d447f6ee5d702104ebd14768a2675d53a2a9916afbd8a2e031d795330d 3045022100fa855252aa6f0f75d2e0e1b5208f32c1fee6ef12028299d7ebf09d75d8cc6e79022050fd4401f41528e8892ef508763dea74a3bc324c7cee7cc58f3999a3cc1b44e6 3045022072fe640c090e088c6c6d71a6d9018f878d9f4930e53cbeac5839bda40da0790102210082cdbe467dce605776030ef0b996eac08ad94d6c1ca4e20b7f7efdfd2a289210 3046022100c24c0c53c2b4870996a6c8111800210f516dcb438b5d4c3d61884c996a385077022100e59e9862634e4ef11ddb7a81db29728afb2fd694f6deaa4b116ba7770bf0cb92 304402204c52ce7cc35177c60f1b7508d6608a51462ddc9749f4d6d0a41ebabefd85658d02207daa0f084715a1c250c0bb404ca044e41bb00b32a8425440524c03bd5561f1ce 3044022012861a39d4cf60c4146900557c4411af3d143ed92f86810baddfed3d4a1afedb02202d5a949af11d5c58d7cc40b5e26e997976d203e99fa96a5ce1df0f3a740744f4 3045022100cbe621de1ff1c69a8ed3171792c47cefbc46a7f5c0e58686cb2d7fff64c371cd022035aae05a9a9647064aeb918b1f69ee87094e7288f1dec714608404fc14838635 3044022070e8e2e453af1c89403a4942df0343e617348ac5d903c7ab1b7e579b64c4af0102203cf9aee8008292741f3f1d29b13689422c0a822da3f7f83a4cf1e4c8e8d6aecf 3044022044d8c839526380b965223dfe5d91710685543405179acdc0e227f814bcfafe8902205e2f595d586e89cc52df3edfd275e88bce2192d97905b3fe89da66cfda5a3f9f 304502207e614d0ee883e5f3b18a876eba28a7920c51b609a7b47d10be231db9de5f20f70221008e6e4250e2f08a4a7b67e88a59386a888916ddc3885bd8fe02e846c23ac351f6 30450221008dbcfa077f4e8993fd613778910c84983d2201de01f4053263feb28c3228c85f02204c6483d02eeeed4cc5632e291763443b4e39a6fce979d84401c95347a1959575 3045022048dfbca57b4f7f184f11cecfa5573bafdf1cd11a147fb5d095cb14ef21203834022100b57fb72c739dcae35d3c8c63b1966461f8f71c91c9d962ecc260e44c41fc7e3a 3045022100cae301ede9b3118c97018f605d87d22d91e7df4d0387536731c9fb5f43b632e1022013c84af0ab4d01dd7b1f47307a841a6f2791c4b54672aef9550469801511c8ad 304502205213759d5236517afc8dd8a8c2c40ce3b9f92a039253d8fda7cb2e15460000f5022100a00401437ffe45d3bed2876dc890c6a6efe8ff278e8c2b00e588a68872321b68 3046022100b3f7a90dc1a14bac26a23ae9e259578ea2ced52ed38b95808fb16f36a0856db6022100844cbcea3e6e7f458f2e7e13c982369b3f23d0f87df3dcd0d7308695e43e8943 3045022078f799b10f5a945fcee04bd199aee50441c8da6cfb5d6b926baefb83bf4a8414022100a301c966b1a9ef6aa2c37372d8d3cc2a24e215853b394056ebdffd647e6cb497 30440220609f1da23261305199edd539cefe8637916f4d130bf54c4e0232d4abca14b454022079f8571495de27b8d1bed6e086d247bee4bf589068b7794ac97c8e690a4a3bfb 304402203cad3d092daafd9bffa00b745f811e4ae74802f2bd9a2cc95874b69f5bd46fdb0220395121894aba905634583f0ce721afaa1a2634fda871f472b20475308507397b 3046022100a96fad7ebc0bff634adc31e9ce8f9590962122ffcfb9c74723e5dc1c91cbf4da022100ed4c8e9624bb99bca522708033d1d10a2cffd3871002653e41ad82d3a0527302 30460221008c214cc18c5e6b9d04003bf8be6f99287db76f4cf3438e08a07afe50ec77d3ab0221008cafcbc730a32998b3ab59d20b3ccf0a5d9756d4cd0fb8c2edb29042b862eeb4 3045022063f7bc591fe3cc0f242e06894527cee671d8eddc49a216a51af66cff01fddddf0221008c7e24df0eb4eeb0bc52b390d05eb33493832418984abede8fb8f8639a2593c7 3045022100aa877cf614d26754b99f2ef17a68eb4492dfec0aebec8f7b31f48f7c01a1e71502206e4dd4916d6fb88310f3e175fa6dbe67ce46ebc0c89f53a251176c05b53e456a 3045022100f3ca1d0179451c74937193ac8af84c48fa4cf8d4d88f80740c1bbc635a549234022023643ce25cd9f2b0164995645d93c88a42ce807fce4823c6cfb97f8714f3eb16 3045022100fb4fbc2517d682bf1c5cf82bbbe933fd818c2ffb68a6aed31b6738bedaa26fe502207fec5d80b08d1c16d9d6a27c9d3abb2a2da77fc5b92efab00413a5f287e50b0a 304402206e67e35324917a288c916eea62c1d8f14d5b6c7ade0d1883c0adfb5dd2a4eca002205cbbb7c94ffd36fb9fbb71fdc06ccf661e86dba140627310cd0ae8360cddadb9 3045022006488827bcc122bd7a56006292caaeeca3ddfe56136fe8b8fb445c7be06412b5022100c1bb441ca8b0aff9f3e0875e747390d701a07d6221ceacbe6ad226bdf9724177 304502200b0e9ae79ee4ee441e89e76617c1cffec69eb80daefcf5fbb049f2188781df8a022100c9bec94497234d276223e48f8c0b7aa5c1c4c8f8630a8112bf31e21c35efcac8 30440220061d6f21d4c905adb3c9a30428aa583cf2b7eb30b853a4c5e66d295228e9b7190220087ee60430009406385ce1ead5e19eec66b3d8d419a954ca7da8b649f4286dfe 304402203575a37ae3cbf86dfd7fde10e3660c63fc23ed2796a5ea6abf1eb4f00ac8661a02201410f05f3c6e060ee27a55a66be9943cd13f7f974370bda6006a3afa3f5539a4 3045022006cd863f3db3272fdad3da019ce8af4893328b623c55f1aa8c02c0b507d79a5d022100ad5454e111007e4a1f098f93ac31a933ea237fe2cc31271e99941c7dcedb6988 304602210094c0301c4980af8a1ccb7011cbfadfe521500847a3cc113d38f6978ee42cdaba022100882f95f7f410610f93e8d44567d8eb3ef2bed4a03709c16582d6d1ae1aa48414 3045022100fabe2c229e75c99939120184abf21ede4b227b2d469a86cec9823733dba2b3300220069df8b0863585a1adc090a7b871896e4f5ac14f635a44de7168a9a429b7c01f 3046022100fbd6e7fe2b4a2a55104fe399ce4d32d5708ee7b25941f8121d3483bf00809b52022100edcde6cc5f7c08475a028f264d791f4ee006fdf24de550b4dd555f784daaeb75 3045022100e0b50467c9a0367a79d4f879c2f8d09c3132200e7c9399300425f87766c5c1c8022027db6703266361b079d704bd2725f4702566885964ed771f94ab75a98f2a7be6 3044022076642c465ef5f6232c0450cb36cc9f9577074eebcf63359dada7436f2ff886a6022026ec0fc277e255ef2f55c4f0027e4e94792be1d58ad925c98cb7d7616519d9fa 304402206cc57ebaeaf8253ac261acdcb1e9db1c9787add44152b3731001ebe9d7afb51402204c3ed5771588aed8cecdd12b6e372ba17b852362def69fa7932f1c91727a3e3c 304402204bcce9c53708589e35bd146096f6ded816d6ebb9d90502c3990d0f57120370ae0220217b25ef148fd709c62035765fcc170b958e27aeddcf17797d052984ed430b32 304402200817eb7a2850761f18bb20baf32a68c9e491287188f95295b02a4882a049439f022056bb452428d54dd6179d69c25d6130fbacc5b58f1f91e386fb691ab2a9715925 3045022100f614266317e3a47ae67c2f8b710a892e80050577e82db6b326c178764bf3e24a022008a3813d53d2140e94d38b6bbb70f4b0022aaff6087515e96b9a89c624caede8 3045022100a61c29ee8aacbb87546e087175ced7226686cbe0a824948a573cd7385726e056022037e6ae6e5907ef862ec172cbe67de29e2f1f629aeea0f49deb504e5414771188 304502206328fed9762d75327d89524c2a1d8263af950cd3e33b5195f988ccc6714262df022100ad3aa8bbbde8317a010cf156b10119d8a443d63011e1eb5f53774db2741e2f6f 304502207d029606b0f7faa037e52f1923349c8617c0c6ec0d5db4650ff0401dd5437670022100ce83fe4de9aad2c53c85bb024595e8414dae397dc80d51b847b15ba3d4c7c75e 3045022100befe6ce76b3d98893796b5bc5eefc2941add20e033e80b5b78f22bab081b203502204e32b8acfa9ec2a9f8a65c4b56c9a8b866ff8d99010a6b5afb2a18581eeebb84 304502207872fbabf9ea770f725fbcbf9ca688ca97f29da583868c173e53cc5ff5804232022100dacee05bf26716c1340d768247a68e65b0bac0df8d1a392759f60a6d648f5c17 3046022100c5ef8ef2b395f9fa847cfd2e0ae4e80da05bc491703aab658c18875d9161b3e4022100942e99a36c756a3c5c07f17bc1f98184022883b9b60d3db1e895d4fbf15ed8f0 3045022100f8f4c3dd2f16659b1bf624c96aced2a3a29816af5b378822bcf0a23d51e90e35022011b4389d4ad646a58c9207d87e15499e5f6572adb57cd3f3a78c77a022643d49 30450220536d23ea32ded1b07c8968b4abbfe61186e663a8b5af9ed7edd567acec9fe0c6022100b6381c50f4fe0d95b8139896b39cc1e8e6be1f3776b8a9e9abbb2ef64bec25fa 3044022000c62987b68e1dc80f650b8ea4ec98a48abe0087bf3e09a0121d4853dde576580220525d916bd949a0bf2067198118761395b33a4211a3010c578a2c4e3a2f809f70 304402205b8663f4780d86d243cd07ee1d79486f7ba53dacec78fd27bbe0272d37f0d51202201cc332bf4c162b667eb3fca310ec29f24a855ce9c61e33d299ed368491b65d97 304502203da6c0e137c51dcaa66757b17d146a6385054db86bb68e8e41a7fac1a3638850022100c9c9d2444ac33776cedb9148dde77e781125dd21f16cdcd2bae21351ad286c71 3046022100fcd344cb0daa2be2710809b41accc4c71eebd0107466995996508cab13bcd574022100bd3aa189b904b3700bc2014bf6da75d40effbc976b5152ae00d4d90631281080 3045022100ad3329619eac09a539dff9ee935721fbc6c83287559d84825b0d1eb7b9be63db022067e7033958444e7068ec96cafd6790a8199b825f560f93028a628277e4a3f2d3 304502200fdc6c85b0a04bb969796228eb5603fe7d3ff5b783bd988ee87120f46ef7acac022100b34ac5a3ae00df32573d4487140f6697e18bef351c00f5a636a6e56256459620 3045022007db83bd12367165418e75cc351e75656c8146eb0bcb5940431ad54e68c0d6c902210090be6238e7327491d46b5fcf5b1b63904f7fa2338e3bf8ed9e16695d39a4c89b 30450220242958a1797eeef039b1ce7857bf8d005df942ec9e3f0b4ffb62492601287151022100865c30fb277cc96fc9e0ead7a1e270238fffa145a7fa4c8ff5629fd3f74989d5 30450221008672f875d6b925f7b27f46507fa0a6d248478e608d6a60ce310f6a2c33c1fe56022075fcf7b7c031bcc7d2b41c9575da070b238aa182d78ffba90a67cdde46044f97 3045022100ab711dcc39dcdd5cf48f73a79a74f5567f5dd3d22d3161f3afff6d14bacb33840220135abad3a983dadc813843b8cfe393bb4a4dc4cf4d7e932e6967a0eb216ffca2 304502206edb889eb18059e6f9de5dff720fa700e8d8f8df7c4d34689dcebfc2b89dcbc6022100d59fe0cf52d109bd8014565cfd3aa869b2ad04649b4054633a01fbe4581f6fa8 3045022100bfbc331ae6bfca980a2647bb11d5fba030bd109917cc3d3dae75dd167092256602206d47700ab6d51329cf30b05ab775b6ec3d494d91dbe8ebd72383dc0effe55b9c 304402200be2ce8b0a4267d6b4d2188b1d0c0e563740e1eeed14637f0fceccca22f8e5e202207b4f9de1af85e7ab699c135dd03cdfcc5a8b4017bf7cd3279fb1f8d9aa33951a 3045022100b67518261794b8283fbe863dffa22798f512926081f9a84a9baf6e1681c81c7a022051a476d8dd30112d14090f286738061f87763f7c759268337a6f294416a2352f 3045022038b6cefce01667b52f0b595579a2592176e750eaee2daf5fbddc196f3b3271e7022100e1d59fd37de4994e6840a6f1fe32242b1e69684751462af550db1aa6b68fa1f4 304402205567407ab748f7dea2cc52149389035e8f3c01df1fd35fc44a5aff33d72e3b3b02203efd6988d41bc1ff41accb53bfcd9185245281b316f4e1ac4b9d09a21838b4f5 3045022100e395b096500492b541a161e79390171ca20a2600260b859f1777d6b624c7979502203699289add62f322dc4a14fe65bfaa9f744da35de9711425425da6fb433ad291 3046022100fb7fad18bb357ac4bb064383779601cc84836df5a9925ac2d744d7079d0a9df1022100d1cf24d4c6b7a86cf7369299b31238e49375f0ca8c10c6647d6ae57c11ea7ce9 3045022100969fc79b80ab4398f91403a693583d59c16cf6a0b3fdf62e3add55b4f4fc107802207ae36d683859807b2ca47461d24bc5041c6753473f0067c1c675a2b5a2212cdb 3045022100d9007e70a81702aaf03035fb440a0130d98d42b0ccd5eced27a6cc18a222ca3802204d931f39607ba8338944de201c18d5dc1ba9336efa2bffa119ccb5a80a213cce 304402203cd010559bd36fa89d8183b9d2aa5e30d4d5010519c68bd275714f18ad2722dc022018c64bdcee6db828cb5b42a6d82896583768e9b953f0bf664e08772f0c6f4921 3045022011703fe098c34a64bf3644a7686ed764de8e740b1fc9fcfdab6cc77b4ea77920022100ff7aa9cb37015a3e5feb0d6e2d6e0e563c78c845af52400db31cecbe81e16cdc 304502200d81360cb41a8ee5100ee8833013f5ba75fbb9957624c369802838589c8891e8022100e2a11b3029a04d11ff93d432b7a1f2e96b283984c791e9f50fc2d7e36139430b 3046022100fd8db2e2fdfa3288188da7eaa05b8baf12882b326fdd4ac1e11ec25256ec8ac7022100d09694642e1bf8a2d773f74f49d6eceb1e43daf389ba34b8743e20bf71ebbb1d 30460221008774565220518684220580d7fd6460841ce8692b82062d5b7ef5212bc7f9b384022100899e681406045ef6e537e36983544467331e2623462cec648482cf8345dec22c 3045022100f075bece0df88d1c6b2b5eafafeec6a61f6f0367492ade8f1a7e45c59611121502202bd0675bbbf6868d8b1729d53dbdc9cce845a4a75cc0764efb610392202e46c2 3045022100e64d42692a58f278c79a20eb57c63c212d62419a6780c3bafd7a0ed2a198a35602205e89702fc943cd6f29883850506345ca0be5660ade4047242d61bf9f7d93e482 3045022100acdc23529b83eaaf69cd605294ee65afef1591e83f3b76c899cb24e689c69c0a02206fa9af230607ff71d1eb6ae7ef1457a3104357fdbfe979c7fdfbdecc03b525a9 3046022100ddba08083990067c4dc8d6f6f8642f9e19c24000d08a4adbcd8f6fde29aac132022100f147086894ecef71da340bae91dc558c8546d2991724bf417622dba117cec9e8 3046022100e0d6f86cd7ff30bac4601c62b5bc9181a4f0d8e4697fb2c5db9115e15597b1ad022100a25749e915aa5bcd3d29dd4e3438449a609d1f63b52213863218f30a4460cb63 3045022100fc8fc0fd24181df81dc174434c2ff754e2a42330dfc9e2cd9598789fd57d22e802200ac23c8182f3bcc291cb4dde786a52dba72841ae2ff5b5978b4eb5df550aaf63 304602210089dc03d061d4e41ec86b08542ef06975c7c6d71d8f326a4f6676dd08328f866a0221009339fe0a1af364c2a5eb5ba325a531977b8f59896d62b54a02fcbfdc091eeacc 3045022100ed9b2d4b13de537a28d59daacc8a3b34379d47cd21b4ef55b90390592566a826022063405dcc64abf4d85f432f6c834c202219668da70b94f14e429decc6f52041fd 3046022100de4d862e73d61c884045b170e88fc737249ae6f5f5c38b47336a2046fb058d92022100a8ea08d625d349c60ca68924b10bbd4ee01b6f7cf278b0c01473ffe93a2a0b8a 3046022100fca89a353c02d9271cfdfc61bb253661df9b3c0c7a61df0a76e15ac12b06a57c022100d23970732d6acb8df754ca848ae3a92ad4550b1b77d0821ad0d4639f56f3a5b5 3046022100c5fe255ea6414f7a023ec667185c7f7c204bbfef994d6cf77e49b51647a0dd50022100fe40926d88361221e77093fd258bb335510c0ad347ab17f7a042532dd025fe55 3045022100d7dfa87bf91c1a814f6076b174923452763b7caebfc2b8afd264ee71020b8898022063b76cbe9ba4b34520fc938a1cf681998af63d5ef99dde5a563ac65d9157f569 3046022100fbe6bbe3de7a49a2dbdc75ac27f67a2c677f7150cb388f7f011d01b2951bd2a9022100b98ff754fa04196c11a4a6111c8257e3d4a13daa11c5f6bdc4bd59271d1ac98d 304402203b714b671cb5df76b7047ca102c49f6eb77fd30b5bb66a4643569df9441c70d102200455e1a98d3cef5b1536ef355ea8bbb94e9de27b022ee27993a8138a8785d653 3046022100dc4d87adb0b51d0fbcaf0cef045c6b313e1ab70ffd469955200f278dd58d794c02210092761b94a5a74f27886b3944906a2f4388d16274d9891eed046d149ac8ccf690 304502201495730a5ff333fa2e62d702b23a796480c6c095aa35590705f3d35c99ce2ced022100b84af6c60e0ba4bbc71a72bf236012a306a1ef649e4dfaefb3090f5a2727bce3 3045022100af1893dde0ff781d6de9238279675d6e3bcf691ff5e7c33c6cd655fe8e3f4da2022033486292d9c4cfb32802cdf6fc10693096bbb9c3da14fa540fe94d3836fcad19 30450220337d29065e7ece3b66703763daef8f61b34839cb3721db4b279a2d91266f024b022100998b46b8eea679d8d7a209ff15af83874c1c8aeb430d2add877c3408076acc75 3045022100999a0586b7b0a3b54880efd8f5027b41f277506d2d7f559c32929b12bb09ae9b022024c0809597fc5d4d1d3b6330cad9dc0803236043d70315bbed9fc1f87d3ed1d3 3045022100ba6144a95238e6e7c1fc22d7c38ed272a3c137a165f0d8b5ed61dd2bc1e65ded022026d1162ab900651589f7164e0dbc6c2c2f4058ac333e010974cfe12dde2aafd7 3046022100b919cd934be1a4bfd83f9fa1aa6775bce855b35f693b4c887eb76c821438a9b7022100d2f751613ec37178ae52faaa5b0d9694791a0df133e5a446c8919ac511a5a0d9 3045022100a41a1526f087357f629a3e5e5a44b56f01c7310f9757aaeb337953cff5131f3202205d7b33530267dc49dbec1adbc2ca5d8707c06e7339bf3653a3d28af39b0e0ca3 30440220445592ebaadad5329a997fd3822b046d86dee2a6887fe7a7a58b35e7110fb8d602207dccf2e91c02e5119876bc11b95b5ba7d196a3204ef93c93f35ac7eba60522e2 3046022100d9f84d94cca95d499f103f49d107149ab25d14c6e8105f2ec08ba2c5e470d984022100ef5c9c057d90042becaece986bed6b02f06a19f0306e3104c0fd714aae3635f4 304502203ac12dcfc9743789cd887c589072ccaa04c6b9f43e70778a1d87584e85f7859d02210084d9b8ded86369e09923f04aab593c2249f5ba94f150d2b9fcb7b6e242089e3b 3045022100d5a121662a85c29bfde250d02fafb77059a9bb4d1f6c44b14e261f34617bb9210220701d477ac3377ff3fec2ac5931d2c21eab0c5d91bb0d6caa2152e9fc46dd6c00 304502204b95981ac7ee18a36c44172b454efe8db3caa11f9d01707deee8fa6dd4c909c6022100f1482d196b50fb1152eb8f8caf25dca86567b519826e6fad46cd28f8c12deeff 30440220414a46e0008e23b7aa0e108743d72bac28a5b971d37e187cb5d5e302501ae117022058886817f899f052f1b1a3b726e0cfb223c3021cb2e4b38ea32ad7443691262c 3045022069be1568ef26fbefabf08579aa54e6cd81761d931ae28d6dce84c59ce64513b4022100ab9ee338febcc4ba7ede7fd1cfd5e2cd4dba4c6355886f29a106d0d4cf460b0b 304502201defcfc5be0d0fb5970e403b422a6785a4ebed75c4199dea5836251d584aa3d8022100e4fd497f85cf6af1d4a365af7972b00f04c6edc16a0e1552aece25bee0db753e 30450221008c5ad561590fdce5aceb7ec9f496d17ea2f3db24072964df7b34396f23f78db502201a77c7147ebf09c5d54fb01b0ca938cc6396ad595615d8dbf86561d4df014a98 3046022100c454feb295932239cc9b279bc5846aeff6e010a87107bb0c847e93e5063bd924022100e6d5b3363b6be6cc774690b7695944bedf428c229275d23a4149a3a6b5419c61 304402204fb8e30aefbe84a269f9092391b31d4cf670803a001f0f20f782ff24121bbda102203e1afaa867d9fe37e4ad3b19bbc37734feb73441bc46a0207da88b565bdb1c96 3046022100cdc68415320f87e4a79400431a381b2308444c124524ee2cb65d4ddfd90b3b23022100a2db8dc369c0667d41b31f939d6535841b6cf6d6ada031765a9b163d9e571fe2 3045022064c3852c0ae7586d050d01d030b5cf12d45ba8ecae6c08787883a39eee1cb487022100c341a79f51fe75589e1772a4decd34f138fe790dee1ff610bd8fe012076644c2 304402202faf2c5eab46a41ea8c1e6f0a12e4ef2d789a8e44e471ee286f152bdda334f8402206c32bfc879033207a50d038a55b3df59d4e10354c1dc59982cd028f24a9eec31 3044022007172f0e61279d3abd69e2d45fd5a70627f0c2c01299aef035119e34b624ea4402207e5b94771eeb108cd5ce2bc58e56a0f4820abfbcc366b5c89b6d411ef987bdd1 304402205f1ce9dc41b80697bcc59c2a00f0620760c3ee721707db536c8d88f360b66dec022055cf4f127320ab41aa9edfcee86df49041f86a02b3264a5827ff847ab30b6c3d 3045022004813e698917c06a00412c7c2cf5bcea9d4b9062426836c5842c4191e67419f4022100fd099f12717961f9333e025cf9bccb7e5c55dd61c020a4d1f5723a7f199d8efe 304602210099461a7cbde37de07ab8d43711f2c5b5c5d679094877dec5446e43961fc0dfcb022100a3e200d11e9b3adcf73a53d6ede1d008201affd26aac75a235184e01ef0a1847 30450220190e68f98bc078f0c4a15dbd18f7e9660ef8b7b1facd275aad461742fe7df502022100d2ec4d1d7b704b22f630632f160d13f4ce5671c1d1784e370a6edda27c5e1390 3046022100838691c512d3894b8ba63e9c3173ee09d9e5c0a7e12f8e53b99375f507874934022100b9875c54c027f1c4cc21c787ba15e27fe91d6a8aad32d1ecaa4d6d102bc5a0dc 30450220056a57ddde903ad823ba178030937a1f213c27956ca40b154275c02c978a0c96022100b1e2a1f7ad3aef27511e7c0f64665e799b64e7c3cd809ce8aa1166e21c8461d4 3045022100c7eefa115dd5902810d529f2334c6324444962d802f2250c6a98f976fbebdd69022001d7888d846749471fa06fee6f8f158c44ce17317070ac87e10ea570f9f69c47 304402202e314cdb154777dbbb59a1a618a64f2e18f1e091b6115c047fc2e8c5fe6a4b2e02202431bdf54fb24bd03ccfc5aaa692d9df1025ef94472c8e1c192e281e4d9e8416 3045022100f311d227e5f80bc770c223011be6c3f5752ea6f9b1c16bc093ded7d52eb49fdf022023caf3592506361c20ad6824fd06e78e14641ac2d6aa94ba2aea141a94cf23c5 30450220333d39f727dfbe2c85b85e91d8ce8c8ac93ad59e0a8329d556b3b25a1d217aba022100ef3cfcb093fd476aa4c9655cbacb043010132777d1e80ab618bb566ae20457d0 3045022100b652676f8db017fbb406d0cbad3c46b0ece79ffbac252b0a668e758dd056ae6b02202644506bdd9fb500e9bebd15350aade02fd4523be1ae66565da10028a85a533f 3045022100c47c0cb2f5e30dffc6c9f82b8e31e50896ab9728448450f5edaf02b9ab73e896022056aff342c9dc7c28d494410f162f629fb5d921d27e0a67c0ad8daa91ea5f9d3d 3046022100e8519eca50e2d277b03ee9ff5ffd4af6531cb108046114bee5e7a3e32f43bb87022100bfe2f4b94dcbd5dbdc3bc4c5b48ca103545c7d967ddefa8ff487b0fe97d44573 3046022100d4e1fef6d8f627cbb2c3925fa1476ac81e78be5b8187ae3564901c2ffaa23355022100bbc652e1dcf2b2f5576bfe5f23856b8d28a50d86b9faad87a4c12df063952b1d 304602210089e99bdab80b643e7807c9d1d1abc2e2ee45942d8f92b42d4783139a1a6d187b0221009b078b707f916e926a1b59fc61b0009238a0d19189bae3243c1cfb1b4499be3d 3045022100b5f8e3954565023b9740f3cc44cda58d48540853f5f43310591ebb8ceb6ffd06022048f0629b4d54f3fb3596bc90fde42727729bee8c3f4410fa8c75efe5070cc302 304402203bd021f65e827e5cd14bca54c869799ef797f6ed8a18930e9048861e666302b102205bf5aaa5811bbce9396ff878ac16c0c1ffbd598bd3f00686696c29e8de701d4d 3046022100cbdfdf9fc719935414ad32b87c4ab629ec9f4ad98fcc193a83e5342d46926de10221008a26af800b95273240e94c13e8ba8c5ad736af059232765829cff306452bb4ea 3045022100b9ac55edbf3efc310c3df307a4c52cb36a12085ea28fcf23296bc5124f5d5a0602205af0d149b4600cd82ade2579f3bc00d6303cb0cb6ce6eb3b9c861137d06a8825 304502201047ef68be6770b3e83a8132a395db8150c16ff342d8c5fdfe4f52213a3a90c9022100d07af595df49e6fc818181826c8d2f61ca1a239affacac10123765d582568992 304402205bae7bb04550ec9406aa0e60de6e78c4c1c057afaaae99dfda732712f0ab47ba022009f8de6b38dc59330296476ebd614de7a8f805c83daa5d168279859963b51a4a 304502207a069c9e6e21ff1cf6be8c5ea63f1811ce8c1a9fc6bce8df4e603c844e7a0ea3022100cc5f59f5fbef7f5b24ecab11bd93a19b3d069a20e819888fec1fe1f76de12eb8 30460221009c12cc40fbfa8139766ae427877bbb260413c23eb38f7b751a5d8cb3068c120502210094b9c52b1b468d58d66c3f5270ee5170fd61e397a0039b6137b371028981720d 304502206ee6d5ec8eede0cce8e0a80c8b356369c53cf999febb3c99f9739ca8b466cc10022100c2ae9247baffa4a9e80785167adf2c72ada943758cbaf2e97323bca2a6ba640e 3046022100e2fefe2598afadb23685df517933c9f2330876722b79b7952b22f70e37d7d8bf0221008bfc4e88ac0ac2af3acbd3575efa507fed4aaeed065f8a74e67c79e3b4e0bfe1 3046022100c64c7fd8119e1a4b93267eea4088197c24c5d01a10ae3b410284cf105aeb6ba3022100ce1c0ac83e3841f52c3a02ddce681787b8f5a069ffce39dc52638ded0ad7d943 30440220599e5ecc6fd6e8bc2a626367d710b23c77f8a18440c8ac0b61467b139ed023f002204eaa507f2f7bb5d37592506b23fadf8129422788ae0ac4e7af72613bb67eb62b 3046022100a7be191b7ec5dccfb302196cbd1b9cb1820bf5b7d6c4142d8885c7c3b32dc1890221009b58582cdd733c5ec1de01b541e479dca72af18c0e3faf3e00e2829bbbdad761 3045022074a58e06e38758a6eb621710e080d4550caf08429c3b138b43b1d0fe74a10dfc022100c648951a9a42bfb62b0cc5f1cc28cfd6be0897cb0fd9283786fa25ef691c5806 3046022100821b9cb4a4f1721886c12fce47c740ddf7f083ae7e1843949877dd5bc9fe56ad022100be64d9dc16fb0d644908004f21ec94e97aa22e9b372498260c2e360faf56da14 304502203c980f4416aee6cec29ad491f6b361e8f6870e095d63d4f1d591d4b2b4f83a7b022100c63a701dcb365e775c3d99aa89f4777075914a155545ef0c55f6ec959b69dc00 3046022100f5d9ba0eff9fcf6004b69faab53a24a9c915d503b048962cade3559d94aa66f2022100be91ff6571e2cec2e1cb141012f3da0cb08e6a36f420d6460187e6f2c85efdc1 30440220439aa675d9a1c42b3e8cda07eaeba3b93b9c9d4073556ad6a79c4e4ab252ff70022034be589bd80c3f8f16233ccad08915bcc5aa1ed2df635d856b09907a2dfe350b 3045022014900849e519975a09fc1635fa84b01bbc184cf3f2fc50378a00eaeb03d6e99902210090bb7d3280c23128935d99b850915d84272ffab0ef4006a697aee451ba4bc807 3046022100e19c44e71d08d8dfa8ebdaf08f226153672d793fbed7f7e05abf5baf89ed586302210091218ccfcbce170cf3c1689a2204029ad4634b8e92aad92b0434e3053ad8517e 3046022100c9e7e8ddcae4081c92f459a44cd8545d3bbb0a849156d1db2f9f82961bccd100022100f0968493f7af89083be8dce48c8783a67a2f6b666fad4fa518dd14d2a09c03cd 3045022043c7f0aedfd10367aff8141e2cc58eb788444517eac2d2f6556dea9ff22eff6402210083391eea2381f6f6551869d6e05daccbb9f310295d94274cbc34f904b97da584 3045022100fe2fa41207fa2ed64c41b0176120d9a36fd9ebaf238c004d9bc020e8cba4435a0220245fae03a64f56a81036c947a4859c442e39decaacc53b06195748d902ec3f37 3044022013e4472f710a6382cde44b94c14f0a3f668c432390d3d3c57642097564fbe62b02203fb6128ac50387eaa922a973423ba4976da86876a484e742e2c82b5e49f080c4 304402206b4cd58c135c163bf6a885fff495ffd79dd579e83659b30f9c84a98b308bc908022021e843ea65f752622e04fef8e57fd733eab8b0dc3456ce4db64e30860f4d1142 30440220253c6667b0e5fcd5733b7e119ae4996ceedde863efa4b793e2669775bb2790a402203b0b33b29819db3b5072f516dd62437c1065e4cc8c337810fa1e41d1821fbdf2 3046022100d890c127c58d3876c3a1addc222b3dd01317299c20956152577152633d1092b2022100bb392bacb8cee6af9eaa9554d113c95ffaf3d506cfb424c218f972f3673a91d8 304502206eb58733f23b25dbb4d71f5ba63894643e3ef8678d20d4a86a0acbd8e645eac8022100b8f36248e8af6f93c13512dbd9668ccabba24a6b016a94d7cd24f6d05cc25204 3045022100db8319225d09e08c9c0acea4a6ee3ae57e250ac8f2a45703d3b27b49f462e8fd02205555a90aedcca4e9f4c04757b27c79bc31f9a606c9b5eae0f95841b092ee50dc 3045022007774127b229006b7e77e6688dd7229a88a2ed8c2dfd4bc41f0f2e27072b5ded022100d7afc8335cfbf7b2d0aab9c4e1c1957f4d4aec355a6fa456e2cdbad1a586a9f3 304502207a5d5a1fcccf2e165aba6853d55a6271984301e84432c4814ca379452227161a022100c50d185b2310d6eee7f4c1287c611d65565ca508a4b97d4262475fc8edeb0fb5 304402200c8adfee5c835e47977babe10794791332f258a7beecb1554fb89f5009b960b3022033155bfdc67c28cb83e3a07591931ccdb078fd77d686d2eb9a1d3c101d00492f 3045022100c6c70ff7ecd1e3c77495a311a8a43d9d1d99e8c77bd0dfbb13d22424a4cb8aed022014009848b6bb58a2e5191e18fc3198dbf4adbfe46e0547c1abee7a58749ef222 304502201efc6b6da52d8b392f8d371e37be513a4293e85080758cda801d095ebcab2069022100bd9532c34477fb2a93a78fddefab3a58f486cea4da8187b67eeb16940754c0cf 304502207a42643d749f9ab48361871ed9173e48d477f38bd47644e9d8388f1f9d085762022100ec4e90ebc9212c08e5fbcfecd21f6a13f670c39ec5ccdfb01e3c444bacebe11c 304402200504daa6a76e51534291c87dfd418777ee7d93837004c52c2aaaf55976f5ff5a0220025a544bc839b6fa49a3e10687d549e04598f1cfe2e701fba7f7337a12860206 3046022100b788a9d1b33ad9931d65321acc2fdb7bcca1f566ca4cb3ce77093c6611759921022100934e6a24662cc614d4c928e9fb34edac82ea7a11f07f095cbdd8b79b79912480 3046022100efb6322c4efec792fd4aafbefed7c92d79ab0a9c137a530a5190e1f88c8e42f90221008ede40020f96bf6e387413c45dbec144ebeee2a8d6741c87bfcf2bf2208b6aaf 3044022048c67aa0f7d98439206e8ca0dda92ea2d0d3dbeebf1075e6088b7d89cfc5b25c02202e99888dbdc625a4fbe4693dabae77117a0088b6096ff0121758eec29df11fc5 304402207da46ced4ddcb273ce01fbfe4d9668495570ae4e95f47b2dccd84251de88f7150220585a0d5ec8d04a71a60fa87d0147f8b6103038e31d12af0849cca1bb004d8880 3045022100bbe989367a649f0ec406353c8b1901c2cd24b5d812f8f3427154cc5b4f5e69e502207640251bf2f5ef2d17e34788668b115279e457a6ff71f9bcc2490c0be226d15c 304402204fc4ce6ed998649cafa24a16a6e5b8cca5fafd6c6edecce02ed44cc8359b1863022031c9a9a5e40a44b7bfef2c3c847ebe674a4845ab4b0f86f9388e71a845e59944 3045022100eede4e5758aa080679527477af68f241adddcb9a88893a24d72987611c76c71c022051583f2db71a417b7c68e907f116928193211ae70a18fbd82666be500393e7b0 3045022100b215a42913fcf8dc4246c123a4d9fc83ce9ba9655221d6b1c53c359a36c1131a0220761d6270c86f9dc81ac5bef43e561666b59fcb06c6f4a4916a4223958dd2f110 30440220337bf0b065b1dc9c0d8541d79b83dc3cee591a9ddc2f470402e03b8e556d6c3402201614a213ff9bce5c2322ef6a0671bc48e8d577b5083987ae8a09aa88b9f81ea5 304502200d5fe885325c0c6fe2b6a2bdd148a69b546d2c23b72790c1dd85067d229f142e022100f7eb2aa79ede3c7d6ef0503e056fb451cae0ac0dac45cf558c506b4593e3c1e6 304402206e5519ced7d087ee99e289b2268753bc123f0672e0ad80be9a90ff0eb1bf788c02200d9b1df93093127d8178a9a837f023f174cff33a106762480460b8aa7e9ea9f2 30460221009216b2dbcff55ea4b597f818d854f572c5512f58b499e4408fd6dc306e8d6979022100b5278e30d67dfbce08f798ca68295c594161dea89f3eae5613e51cd3ccc1da39 3045022100b48c224b9e5f86f20bb2271b5f0c0ebf56cf30e41cb48b4c29e699d5014771de02207c052f746102a71fe5d2e2483715d5c636dc19c81eeb4c275b6b5f133dee1d6b 3044022005e6fb0a2d680cfe1247f965ecc23e471df60c81ccfde9a77f5fcb4d993a6a3a0220770b86fbedacc300b29519514acf3cab067816dd0bf07ffaf93e20c96a3f83d0 3046022100d70cd429a6665a20cd9e715b3e048e7fe3c75a50433ff3b9c6b9303bc28f7312022100d4c443dcb2c28b368888ad85277ed08d67fe3452d1649065b0d767e7d04a8457 304502206468780f14418c38098c25ce2d2b296dd6ad368c1fb069ef880ceab4cfef729e022100921d1bbd6616f95acb2654815407f162ef153d4901f0195a06b8c2a9ba8d7562 3045022100b9b516056a57076b3849dde6d9daab1d76c6d19d0d9ceecefc25a23d87c5f08b0220208acc92168c3c1b7e65e48a0db7eb779a4db341369975cae9878c9985708d02 30460221009197711977a0d010b9d770f5a86abf5345f4e60df689ff1775d8042179c02c4602210093159872f249040723eb10f13e923391769975cf08eaff153ada1c79b492a192 3044022008b9a8e959b60ac4e1b670545911e6f662e753341286b2584834665bfb4e789502203d35b216061b8b852469db00d71c4f04eb48fbd2d62ff71d1f8bbfd557c3fcdc 30460221008a81c63e8062682688b4a00f4592c3059135e95449ec351b339ff9887848383c02210093b06316e7e209e3ed33c61877ea3fe2d012ed54a614ab8ee3248be801a9bcdf 3045022100c64bfbc37864fbc20ad27b834c4f4fd703725ff1844e792b6427401d4c4d4a82022028af64a442dffe5bc317a61b5cee72dfe398668ff633b883b8b56b1ebdf45918 3046022100e5301ebd7dd577205683ca59c28b96e268e69c7d0454fbdc7cf7671b6c7b99860221009a933addb0860950b6e34b0962e29a141a599380ca36f64a66763b5dbefa6c03 304402202f35623d8a2978fd01a304f6920756e0d0d73a598e12d2861b10baafe883deb602204d05e7642ed87ee1ce5a4159afa971278f2bf357785f5dcb447a895ddaec6bf6 3046022100da58a0ce4bf0aa22fe817c129c999baba8629c2373060ad33b1cbd19b6f968c7022100958799207a1011b69cfccf85e71482d7789f05d8eea6bcea0f8500bf3470b9c4 3046022100ca9f8f65149f733796532a177be7933d5d77ef5f7aa5194a23ec98a04667302902210095e6fc65978b20ea57f7cf4efff3be3e1f9a192b0c9c8b51997fc527fcf1db65 304502205964dc1c0363cb609968de3e89cf8710357f183571dbec7c6f1625a5bdc31d45022100cd8333af3ce4d76b0c1d4d04e141572da6f85712afffbd05cfa742813fd1416c 304502201fccf32fa724136f6b772241e10630133daf7b1fa4ffd577bf81f66c1b1f508f022100d2d40fc7381f65741248572d4f1dc467f616b9682f1044e23ebda6eb913bd080 3045022100b59bf17acc1d1c10f1221f3b229e05f283c787e6c7b5608da61e4a567fa22f9602202f711e56733c75932ff252da35ef55a2ac00553b9118e9e10214d5c9509b69be 304402201ae5e7061b1770e79d5f0272ebf1e394b08830106f5eeb6ea0fcdfb411036b3d022058b6920664137ea4f89bd74fd2b3e5c0ecacf2eaa7c3f0da1cad20b8ae959f95 3045022059fd4e033fe799085dbf918af5335cdfdd2311ddb1438f5c4c1500fe9a2213c1022100a44fdf10d4ff30d3b921cae79509f6cbc1cdd68cd4dcc3444d79bdd09183096e 30450220672414310535507c773546f03ae227fc7d79cd39e71d3490ff96987301de5c8a022100a975cbe5127b92ba9e41c294107c4a9bb06a9934450d8436dfcbe21063573bdd 3046022100a350dd55984c09eaea933f9a585e37b0725ad88e1b761a7645b4dc12d3bad789022100913d9bd8223616a59f8f4d0dd334926afdc82a15b428167c6c28296046208faf 3046022100cc28eaa096e8d5e6e7b81410b34ca13a8780fe57aa63dd62f32b50b11f26fded022100ff05b6001d36abc27fc1194ae1960e89553273f3a943f6b437381438e50fc119 3044022012a4bc4c0c4a8c0e5823222dc89534cf62955f9936824420a64182f7f938fc590220431214ea93d84348b967c861e8d25d64436eae565c0fd61d07104f91f5878f1c 304602210091c48bd3df53d5186012998dc3a8fc53f9c442fba3184b35662a83828d04ea35022100995f1eefdc33dbed359fc31e8bac7b72bb480ad02662f36725c2b8532187a037 30450220369adee41d9b94a3ee61efed4278f475d5da1af9169e3b0620c50e5d6960a686022100a774dd81e78a3a291393bca258542a789c6eb0ba9c627f59403d20406b8a8bc9 304402203923bddea139b8492af3accc51f5a751cacb2d445f41f90664821518254d822a022031b7e46da4cf36755f1a5effd3ceabc7b98268fd1c22d165af717206c59b129c 304402206223f6de004d557fe53f4c0724995ac0bb9985a0bc83c02ef9ddfe31bdc6340902201ff0dfd2982e0afeea7fed948eec769d138912c06a3876058ef489b5fec221aa 30450221008741cad0ecbfb0f79bc7378fabd3ce4257208902b9615168129ae330d2c7fac90220306adab6ac4c9d1c59590514e4a5448feb65296b90f0d5a2ba4e80bc30da2690 304402205cd61f759a750cec31d171ce75e528a26bc876883e12338511142b9cfce93261022055f40b8e7eddc271ca1f4f78c5dd987b3ab46e90a8a3c96f6809dac5c6bfa69f 3046022100984d0ce6bdc548ae0f55e41a4656c37508b29b0c3f0838b2e111f84b02d050a102210082185eaa1ea0a49b9172ff9b25f5a8f50860a390c31fe4db50cc8180fafe5319 30460221009a05f23425f8318f4bfbe288af52605f7ece665b43fbbddddcff1925518f9f1a0221008a4f834d5fd62da382c566e68e485cf1d17131bbe0b4c44ecaba9aae964c4cd3 304502203f5bd2e62d737351628fa2328d4abc2f6b16685305bbb6ef153b51aab544ddcc022100b2934d5013e9d62bf9df21117349b54f1b39cea194d1775ea5cd88582b007b14 3044022027683e8692a5285cad5d0f27699b1efc6c6559b493d40d7fa596fd4d9cc14530022024f14d1bcf6f035b931d588c86d4717ab765b9e2898cbf93659b2e63a9222e69 3046022100e2684998882e6ef68192ea9b7a8b13118573186b4f352603b8f49a971bb152b8022100ca188355bab8af9febed8634bbb3b610eb58d3f53bd55729e865c407d4cf2697 3046022100e01982a75b5972c9522d5aec4bdc394ee449e254b5ef0a95a0a2930c481982600221008d74822936ea1f5c0c670196defe2bc16630e2ae053eb6a3674a348a1478e887 304602210083bdfdefd06a9b27a6c535141addb84b4b107f8f4cb278df2c35b323a67b51a4022100f6ea53adc202f3a585684b0e4eea382d46fc3e80936d25b67bda7693f7e9715e 304402207064b1e4e741cef9dfdec28aa3450971020c47bd45ea6855daee761ca8a3bf8d0220701347c151eb770ce4592d8e9fb7371db720b293d30bf95f65e2942bbe19c138 304502200395b5245dd67e7ab93442984668df8b76a6e222b745a0d57c8986b5808077d7022100ba649eabf4e91efbb1309562341dea61293808e41bfabab694185289a2c4f5bf 30440220068d4788531d00f1385bbb3a8551eb42e30636ce305bdf2a379e2985af9bfd2f02204f7b77060b99617005f66ea6b2e125183d8017e9b22ed3c7e75ae0d44d2e0a40 304402207750896d74a0c619f8fe29e3f978629a9a5a76189a24381271bb11def9d4202c02206e9fbc43c568bdf153bd28a359aa95ff43ee42fb2e0539e1aaec173059f258b2 3045022100bd0d0f2fe6b88d0dca0098f138d6c4cacb855883403f4988bd0e3495eeeb7b7402204c7fbe9adefe1ce424a141c31b0b9e396b03cbeed672191f54daf4d7516a8647 3045022100b7a241080ef2f7f88ce61cd17c096705527e4932e5c5618751aa29c777102e9f02200a8fa1ad60c193a87321b296b71e3234d8a74b843b7dadfa8740ff886f7290ce 304402207f01e5ac780b4fede3be87185248913a2196c32f6df7ea8fab0d159f4ec8f40e02202217d463365a4c6ef04020fd352cda10d1d7a37a826ca4fba0393c40b122cfb5 30440220081cd2647b4b6d1a0335920b91bfefaba6407339f4c2888734f030b4919dd98a02203975cb991fd166dbd649aee10276b3c0c551547de8646f2d134775ec06b83d0b 3045022100d8c2499ff350696827f3eb0b336f57ce672ea65ad7779f2ab1770477fea9e2180220212dce6c8337c7f8a02d456eadfe1b036ca33a3949944e1e8783530281ad5b88 3045022053bd3fa2290da03e6257a57a4e58f0198d79a0cf2435cfa0b6a7298892f460100221009af84dc054ed7a63466b60ce9b075cc3df4c0fc8f4b55d35da817c9fa9bd791b 3045022100b5fed74e19393288b78604adcd3eabda4dd8354cd010dffb921893c8fd36846402200639fd87ff663accaa749d629f788f2f5ee6c4e91891d512ccacb3110d821945 3045022000911944842aa82c82f8dc9a11701b846ab4701d624f4d5dee97a815a82d6ce20221009b071c7ce9cd75ac8db2934e199750f4cc4827d6a72d06521a0baf8ae741b398 3046022100a8e616c8f6b00ced14f07087508b26bc7ffccdf0bfca60e1fe2f7b377cfbd93b022100ddb3f452b7812c488b7ac3022b64c64b47a84f8ba52302fb315d8255dc7b29ad 3045022100be3186fc03d5c126f59d23dce30fe44db8e02fdbe0d94c7931d7121a90a540550220513bb51cea0e17b64aa7f63b8f7f88ec121e143b355638bf915c84bc4339a5c6 3046022100d7303d96aa6abcc35ffcd1b3c5c84561665b4019045f192e8bedfaee594e0317022100bc4495479199837393d96b6c4d0fa6686f310a56f7c70175cce147f3ed1ef9e5 3046022100e5da87fc237d3ce0a9e94b207fb3f4cfddef978c5c0307880297cb6893c91755022100ee96ee378caaf53d122c9a6573d5afe41a7809b253c446cbf27cdd438c26e486 30440220094a245e72266d543f14af26d3cbfdfb37e25880d789e2529958a20892ff809b02203f44d5f5f5a62ac2528d02012cae25d9a817d44f97a6d1f367e7464dcc144fdb 3046022100d655a0f2447dd076ba56b0c01bd8afe15b34257ff95796eda46f678918dde132022100f0547ac571425cbb59293281ed7df008ad1bd77e278811497d4e534e8517c8e3 304402204d2369cf11a949c43a2392576c0d67ba6e72909f3147c244bc6f45dd2c5f61be0220706c8de13e289ea007f48c6f0d09843c977b55e5b38c98cbe71209449984cf97 3046022100a4b81b365d11c809b8637be808893ab19e04e1ae6182eebf0bb91e7fc5ca6f9e022100b541c5441aca106d4e0e2a50ffc684c9e34cbc6d9f6255ebeffb53c6dcaf093f 304402206738c68d00e2aefedbf2fef430ecbda4d7a2da7b3a088341f5f2abf78c3a7d0302204206937543c334a5433d5382ec50a9d3d096e3242bc041e93669ce2328364ff9 3045022100b14f873ced896477a47afa17400b4b68e1dd89de97e5ab2e06af07f5338617da02204e1e330c02a86460a7739516f8b49c270c42574917806f553c6c36f14c53d42f 3045022100f501ea7611f09824033f77c1649b71bad4dce1d0df9e0c62414927111c1cbc6c02200e043e5ad2fd7ab8442a971b04bddee9288770d048968194a316b50e1c960fc0 304502202b7bb30f2da01ec687957614e72ca69dff96d152fced3dd1015487d80a9e219c022100a0b2582013a5924a23502c00c8d97e9b1814898e79758533f7009d5c6018732e 3046022100cda651e8de1fcac482c2654158d2c3b386df64e2aefab6a6eb84af14a6b1fa5c022100fc51d5dd3cd561f2d1c35cc19d0452212432900888e951b4cc1f0c0b6387a0c2 3045022100d87cb0d4cd494330d0d99bcca7c8734757e81b0a797671c6ab1d082eed7b9f9a02201f7894db51d01588e0eee8059bd46adb6ee1ea797f5cb5202ba677e856b66dd9 3045022100c11218fd22f5b3e54ebf7cb1d0290ca0b3eca4879bf4af2e5f03226c129e5ea80220274e618da7d288cd358e987a1a435466feae14c215ce72ae2edb5ebcf4ad7684 3044022013361a9fc1887da63b8f5ce29ec0864330f2f52f3df37f2cb7d95422f9dd7434022022ef38227ce4163a0d9cef3ef40abb34ee6b9533aed6a070852ba6e25452b0e5 304502206976d28c0061b248ccb308a1be10751ac5afd5f4e1c65738fbd42181a528dd33022100aedb8deab25c570b8813da1fc61b3b4ae1c924ec8cf2f08524776cd377af9ef8 3045022100a60ead6699833ab90e22ce4a26c0d4e8c28715dd64389a55dafde1b21ac4ede30220175b01d03fb7f8c2aa1ea5065c0d6ec1c281c6789f6ca5bc25eb69420093a8c8 304502204e65129dd90010364922c884324092ee74ad8a1aa0ae327fff1cc8a4fc2612f8022100cdf24df3a37401f901483c79893e640153cd902b306be656ba05965e7ae2ab01 3046022100c8f88d14508f80c32b86e74a3da940614542d4062934a8ad59a43c2fcd20714e022100b557624deb27d19ecb283fa74f87ef03e7c0a0e7bf10cfadfd084e6b017c6c3f 3046022100bcfe1b1f923d11efbb763c15c9d1aeae1e935371447a8b9e7c94c2b754bcad0702210095312e6190b17ea2bab88c4991ef22e72cce15b5d24a736b5100e80ef99f6a1e 30450220088d620b2278ac52b9feb9a39687046200b60813fa2f1c19bfbeb83192fd4771022100a6db89c5f8b94bae7aa78621b09c2b4f7c557955c036db39f21795f661b635fd 3045022045863337fdfbe05aad403515f9146c0c75ae524b2850eb6c962c8471226c6ab402210097ae021f2b47ea8e90ba65550ca3312802bca516111fd6528acb9163e52481b4 3046022100ac9d18c30d7d30df9de1f4ffd8d6dc132b52255aadbf1fce30596de4d39a14700221009962e6e7ebc4ed702f69ba9ddccc75f75036552b0c1ff65d5c619b6c326220ca 3046022100a6475411f794d1e8360f7d9d452c4b976158bee02570685c5d0f16b168cf53ff0221008acf380d1f3ce6321ebd250c4ada9ed058c9c083f5f26353c1a73edecc52f3b8 3045022100a7594599f507db72a0b008996c6f60f52b0d4812d65a93a7f78c328159a772f00220656c493a762dfcc0aa5878c5b5cccafab59986e7b7e8179d702fbfb90287f010 3046022100c23f87a9cf744da9ef7bac7866cd6bcae7f4675a58e8974f604556ccc00abab3022100d6bd741e72fe2c655e053693c98d7d734020dab97fefa0da01e98717825f5f99 3045022027daf87d8ff6c4d33bc2b10b5736715ed3cecc0e56db9c2d12c46c5b688d277f022100c3845ea026061416ad04353783de0f348307685e21a80860c2c8c054dc92e0c9 30450220253b6d88f363578e46f5910b98fa14c86d30fb88cdd592161f8eaabfbda3fbfa02210084721efca8627ef5069bc25a88b7a8d7efdaf4283ab4bc55f8ec83bd70c6c292 3045022030a4152169f8306286e7c7176c53cea877b419c006d9b288c999f363cbd68b12022100b9a34b78e1a4d502e7c9173c0cf07e309d3e3505606bcf984083e592f29dc632 3045022100e8decfd96ae16967c73b105e3fd9771b1f7beffbf94d1f53d07d68275bd04cff02204560f966f48398f40323c4e8be2b060d02df7ad5429a07bd515ab06311d9dbc0 3046022100a6f92d1653fa118be11c749fa45e56d01fd51473cdde82bfa82584fb104ec2de022100f22e2b32ecb4ef04dde0366e3dda78a225dcccc304d3fb0474237aaa4cc944e1 3045022100db52b73aadcf292852f31394b209032141fcacf49b3316daccc221a0d81ba0ba02206bee1bb8d052a87e2f961c58285549dbdec4fde223769d0c1a7d0a8692637a5a 3045022100bb36ea39e654b317bc134a376bd39c6113ec22915e7103a363ffb5299cb1132b022055fe166d0d121ba7b67ef730cf2b25fc8029741d07657681e2a8ac7b753cca44 304402201fc363e55c05350cb42c5d69cdaadc856836d3243a1de05ea4b28c916422ec84022011de0e7158265297793491168c110bc014d80b7562d7b59b937f09bdf1a38f88 304402201046cd74702aaf8688828da370e6104358cc3287de679f9f6210979fb64eb57302207bb45977acca58f88e7e6448d3cd75faea9c0b7643ba9be7c428e45f6a5768a1 3044022059309324a381be9c0567ad6131f5718958eb73510d88510a5bbdea987cc3fdec022070fe8a060b1a5506c9499ea2340df155953065cc335ac675ca36dc4be8d00877 30450220799acf7c0a70743b59a180943f866b10043f998ea3059ef07a38871708fce005022100ae0b281fb4044ab6d04aa0af3e17f026dcedce9d632fe4423b858e66e93b9119 3044022020695f49c1e7ff54e349431c3c53e7c30b038d3bcf79f4ffa761569cdd58241c022015e2b1c5c0036bbd4dfdaf5891e0b99483411549d026c09b6650048db26fe60f 3045022100a694fec1ee5a8780cbdd1f83e98dce2846e62f02832cb05faffc100af2441cf60220373776803090f03711e8ba136e73cd0d7483ef7a2e79d2660bb668c344932d17 304402207f4321bb95f1d6119c5c41fe1f8206a355b6d5f2fe0753b697405c5d48274dd70220504c938303f738c1b3535f164632580d31e712dccc56dadeace7361ae77d7ec0 304502202d82554292a2f1154ef9011d64887c0d14707d550f177f3e57d6cf5c0299cdb2022100c5e947cdf419be0ed3cac6f4d87bff712e75b06c61e10fdefa6136f35f03eec3 304502203cbf608dd76ab7b96aad5bb94e9174070380a4781ac9ab9da81f5985be9d3989022100fa560f34df0ec5ea72f8dc38320ec4d610b0916d44df5dbd416860344390aa6b 304502207f0b61f781e5f779971acd9543f8b0e5dbae713d61c1072d77fb9ce1363becee022100e9f415252f905f760e69ac9c20708e72e5ed30301b8a28ecf93c2fe117289273 304402203070d85d37f1527aafa09e45fb1b9287fee708f1a5fd64d984112d0312a7d8c002206c1bd39bda928a109317b8f3f9f2432f642edffe85fed91d72a9f307de559628 3045022006efc714e774b0b7119e7b0bb16b7cd9077cee597dc088637ce72827fb3d56bb022100e1881cd9989ba82a29580ec3c6d66cdafb785b86aede4d0fd9c53227202e96ce 3045022100cfd2e6fa52a6c7fd7f860006003805538765353fbb606823fd229ef8f1a758c402206fa413526f68d74e56e6dff4a5939b66d0a31c432f31d81dbf1478637ef8c48d 3045022100d1b9415106a42647933c537d92538c2860acbf47ac33c964f9f3f6be7dfcf82202200cc2dae9ef6cb62f3ad1cdaac57901bb974b5d32c2d8ff3142b71ab47c77a1e1 304402203e84e91b56530f32e2ce80df2d0a449e503e7c505288ce561008c012dd7c6d6a022050b477a4a902ceb92f1f183c69bbe6045c13920a6a3f31bbf9739de5ae956e73 3046022100b67a8407fb722599f3e4e38a020c1821efaa1df0fe12b69bdfbe6bb172c5b9820221009d07018c1ac4ebdefa28851bbf5c6138f37230379ef427e44a2f8f408d20847d 3044022015c8f642783d55841b013d0e54a9c984134b858522fa7b5d28131fb918990f5c02206757f4834193c9449c2926cc3bde85eb4f24ee1e9a3766728037c81dbd6c2499 3045022100bd89f912df2c0a780305ade9a09f4de4cd17345e9fbaaf5e5b1404dff25b9af4022001d67c41be0d35ec87110c517286c47407f8eb1189dc2aa3d74909d1967c2d35 3045022100bf47dac05ca1fdf705acfcb50ededb17d6cb38c90a5256d7a7476fb0db90ed19022018356dfc322d946b61a5581f81e9340b264f82dce159f2baf2813cf2db27724a 304402207d83dc2a19cfc347ca7bf70d4a42675b697c1a80388eced7332574f16034cbdf02204adbec985788ded803958fe51099e0cd4cc7b05130ad0adbe2aa82f00fea8de8 30450220785ee36b50f271f85e776698f9dd7531ff107ec2fd5bf137c32de838409652a4022100a048ef7873d1617f5040521a536f66101c686b359c4422ee964e7901b0876c9c 3046022100cbafd28c300d55c6b8933518e4699a1fc23181ba814856d682966edd046ef25802210092a030e42cb9157951e9fc9d8eb40bdcfa8313782faea48a252555b144829d31 3046022100d2a64c85d9857f678437e55c76900e2c6a760460bcaaf6ede8ab745ca893b6c7022100a0987f28ee5c4653703d5add6d0c6a2ffc25d6fd5633767c7309f01cceb5e993 3045022067931bf4ac24bb811ec882a95ebb67b507d579278ed98bd7f3abc135e762770e022100a85c71a86d29a4605d851bfcb00772e2e84c0a7ba66a1c90b5e9fc1cc223c9dc 3046022100d5bebdb448a3d7b4d963b295eb3fe6a36c271184d50fa55ddbf526408dd4fa0e022100ffbd9be4640ebd4f01a40a77424b02fd2e1976da390fc859da765efce12eadd7 30460221009742c5775397c4753242dabf0dbc276a3d7e38c2849820cb9855a71eff5f3ce9022100cc2d02b8a6ca126a0c8a008ced53d6635e79046b53c85472a162796eaeda2a5b 304502207e935d8b4ca62991303196040bf771bde7c9c28a5f143ac7f5004371e4a4606e022100d2adb5fda62606809c9a6c5243816d0483c8b3d32e72688a6a1cb25364e1228d 30440220170dfd8b4c47ea231f6cffa276a12874bf684cca79856e7f5bd34d5f32457ac50220527cd23f24ccc2b0e13017f5301d9d34e27e8d283e73cc2855daee9ff5903f2e 304402204e12164865ee0466c3bd6dfaed4573665d70334cab8b11f3796f0241fc0ac2140220077d33f8316edd35dab80d4ceb34478c884c183b90fccc37c39dbbbe52a681a7 3046022100abb2a89f8799a5995eca385cf259b2b592416f3ab39a9cd7e0550ac187142944022100b59a27ea2e1e9cfb04addbfde7b13ea64659e686c9c6eb996f03412adfb06d13 30450220235ce61e30dce9eb9454df2fc4c53ede6b586e54c8a5b55dde2b64ef36b8c292022100c2d9f219f78662060aebf93353d5d71bacd2b2d139b0071dbf7f8f8cd5e564b7 3045022100ec22e3260e45126a26b5dde90ac44c6dc7f10a09c59dd3edba57fe46664adef20220481cdc681c591820d6c81da54c39f0fb48dcbe5068d53b4d0b06ce5db3b41aa2 30460221008e1c50446d335ee999bbdcf4c27cbf0840b196c2694bb4fda784d2bad1ebf9bb022100883a030b99ffeb7e3e8ee5f01260f9d3fcd0d6dd84ca8f631fb2ad90598dacd6 3046022100c90b08c101b84a3ba30e0eb346809713205b920694c51230b29789cb34281a50022100fa3cd512068c431e25d84d49772f36a3a8685fefad9dbf4d88e5d9f16ec7f856 3046022100b537e86638b2c07fffc574ac0a618892fded91cd553190b4aa80d6996270c402022100926fc403a5d699b34a82913c16783875f12b6a30daa044044518f9b5fa8938e6 3045022054afcdf414fc75e5d23851cda2afcdb3d3943c6611122fbc4b04f8532a87799202210097febb6e48eee432cad2146f93d847f4a621d730ac85e8f59ff3b3c8a445eb1f 3045022020d7371817a7cc89f44110dfd711f90236b1967dc56a203160e7478c5f0d95f002210082b4df936417a7ff4d4ba9f448292cc89fce6a0db08364b4af5ea9fa047c78c3 30440220249d8f7786cef01053415bb6c3661b67a1f96726770554625eee4246023714ef022044fd20984b7d4cff4939df2cdce83c8005d94e6f651aac86bd383f74a04fdcaa 3045022100c855af61c5d546f23204c04e5d251f9f4a894024599c140ed3f22558aa4467c002205d213adc673fd57785b54af36788d6157f709b419f3817a13ea62cf2a73810ef 3045022100f2141310564bbb307e0376b23dbbadce68441f32da70112f49cecc15f9b9541002207cde0a26065711ff86d5f89a0adcde80a4bc3921b788a1a2bf47f47a19333bc2 3045022100be131d8ade145e3ba798ef4d041b5ad396da7c0494c8de779d0bd0037e39010002203d5be817c283a1b8a39563740b22aefb959d7073ea38b0499b746a0287d5c874 3045022002ce49112970be9eff0200eb4612ca0be1f6b1e38be5fdbd778f3e4184132da2022100fe4c8a42f1dc3cae89edd9ba5dbd42380bc5f7d8ad0333ad8e8093846c04050a 3045022100cf32c3b9c5c25954bdeb8940d91a6a8d48e89d8daeec13aa6be3fa19a800919702200ebc7e5616b7ec6f47381474fb2056114bb62117584c2a8b699f757e62729b79 3045022100a71e7dc42359692952ac3683730a4dccaf54627a26e3cf959a970774c8774a3002204de80e6a524938d4a7fe6e5530e2e17c803db106fa490641831af2a6f84cb4a8 304502203c0f8b9a06369272fd777142df27ce1e94c41a52650813793d308c93ee638a3e022100d562475c6b1fa9046f6d42121a5644a49e59de506e355ec02be78baba85f0c44 3046022100beca5c032aba802553221ab905f886e99a5af6e609bbbab94c34f4a35bede65e02210097bac34d9de452a2151e341d8f9d8e2cf563be8b99e8e7ccc5089bd43e82f6ff 30460221009fc84bad8fcefa4e65da9908eb9241ffd99490b34fefebce426e9e7e2326bec2022100d19a0978f0700ae58c133546cfb4eaaa689aa07f81ee4f9805486d2dd3e2d8d0 304502210092fc8f187ae9afc3a83239df4f8a8c93e0aae23113361c10ff3ba24620c68bfd02201cdb4aff06c611fc259e962c98c6128c599ec1e4c79a73f63142653b2a7b0006 3046022100ad0506248c6d4ba3b24a1136d0f5814efb44c4d7ff51b92c5fec1dde881242e5022100f3ad3a8853878eebdc92a1fedbbfffc98e648fab168b67133ab02137a86c9516 3046022100ae6bbd80aad8ff3f94b50579a858bea710200b2142ba29776dc976b0af0671ef022100d8dc4ddf534550a4c0211fb8adc2c670b3a91eb8acb0550f3906ec798e5a6d0f 3045022100ceda1bc482ff8a5e6d8b439c6fb20af419d6155354790724f31e9b327e33104a02204e9c5822255d22c209709ebb8ddb6a7f5e9599ed8b2b2ded156ddf9a38255e64 30440220585628171c1be0822c28ae85008de2bba35408971d84cc62b724170c1155e6ce02203f2a3b77dcf2e196b458ecc39492d0278724c0b59132b5f6c99a1287c3c60cf9 304502210088bf8206660175538410ba0563a285ec98452f4a98ec60637ec342b6a4857c3b02207fd75ebe74ff50a1ceee4864569a57e6c87fd492cfd04fc6acb68ed27b1dd868 304502200cf2ff4169beeb5ac193beed8c9684c4645dbdf64870e01c0b050819f93c52b2022100edb687baea6b2b51916e71badd0b6855dba0a553b766dba8e3eef06157c95747 3046022100dd7fad290a58acc169448c5a26e40fe54682f0b24792fbf3fddc1567496b3ac8022100db86aa1a4a24a272386b75e16bcf511aa4fb327fe2895698f9bdabe3937a61c1 304402201e2f2c87eaabac5f3818032d7f85aa57650f9f1b6ac2b407f02bf0e3065f961f02200c6c8e74b69342c398547e244bbd0555672ab0ba8e85af6cecfb02b9b6329e60 3045022100c8a2c4b659fab2eccc3bd7112cb217093596e8ccc22541a1bbade303f1153f360220060eb6d594412f6e2fa294b88f70f4bf37c846e912f68b6f2fd355ea8baf14b8 30450221008250267ac54f84aad004e76be8196f564e944744cc88723de90904267ca1c73702201a40200844f75a5170440fcbcc6a022ebd9e1632790d0c649cb844c043ff1669 3046022100cb9d06d0e9bca4d706b5331bba4fab5f79854ef2a58342c5322032a4bfde99b3022100e278afb276f0a8b4d16968351445dcbcce5bb672f03e6ba565732e8dabfeb10d 3046022100e67cc2ab8fc7563ea463f898fe012b622ce4753c440b3a55c2e9240156eaed95022100da31bf324c110bf1ff1ebcf909eaf5324b4fd7224d7201c376536a43be1979f3 3045022100969ec8d3f84b035e573417da439dc62162e3fc7d97248225cf443a8da99eff2502201c77a500163b14f6f14471a8e9dfe77d909dfee604d8acb9a62e625b76404f21 304502205bfeb211b03cc7bbd63ec5c84b317d47989cfecc7ade1c750ab1e38b9dca33710221009220782149e60223c63f77463396f60f2cc979dc57242f1d9042bbe34bd77fc9 3045022100bd614f1eda12c13f29eab520e2a28e2ee94eb5c2637d43c3ae2f3d4370fb699c02207cb19a62f2aa4d3a7ed1e40367a017b079b644dae28795493a4cd8042d2d774b 3045022100d2cc5f6b3e7743eedbc8a2ff2a5df324602fe98d4566605e8ecd5083355302b702205b6f3b767849503e7fbc575da4f3910fa5a2b7d5706148cb5d075f4cb761d097 3046022100967be825256e60966804db7190c4ebf4d5f0638c64bd02e18a86bbebba9f69bf022100ce1290fbfdadd67e4bf8484f8fefc3f2c1bf500f61cd2179c209663f9ec663f3 3045022100ea17b4e73844e57798100450913b436affe568e5850acd6bd3ecdf0108b41ae602200ae80d71b9a14df35c3f66635aec2c62aeb4fb04a43e334819a502702593cf19 30450221008d1cbc82fd5fe6de25de950f4477862399c055d9a55e8ac80229568bc386ff26022004cf4eaf2ff7031f8d7698e0b9ff7227d9492b361c0256630d3f38900041d825 304502206265b23f09d1cc88cc55a9f01f3a3804f9ccf306b6d6ca37a13853ca2255831e022100f6b9f8ed5fa691b9a531f51d91d1b7bf25fc9f079efbf7af2923e9ced252772e 30440220035f088404f01639632de93a62008f3419f4705bb279009c244393fc5d7674c202207b3e06a6959f80d96d0794ef2e65bbebeeeb9b41b22dba12dc60264d90a4cdae 3045022100f4e4869f5c0c45a2ea7a6e0f1c1797285e8f9fba216cd970fd8076f4f930873a0220451883da86b19d0bf7757a0f832f5fc86e7101f541a72a9f2f2e9b12706af4f1 3046022100fd0ddfc2aa7d583f15d75c07359c86785e9d70fad06748919df0871614a6f203022100effad7ab7ee2a2d8e2f209672d24c37c8c3445c648584dba36e461cba9e60a11 304502204159c51a1547bec1b5af8ae7cc7009356508eef7a2d07be3750d8d9c679d6c36022100d178e916eb9fd5d293cd736d3e66b6a74c3effbd3312626e3d5d213e0d0a8e83 304402203a3fa9decec67449d3be8b3aebc646426e9a5b3844ccb7f3ea5a72fac51d5ab1022029e426f9431d3b71c04db120e99aa3944f14aef0f65b8cd50c4442d12500b966 3045022100958e6ef758f3d21035f51cf13a8ff28f03c7901e9ef81d20d19e65f5cadeca550220377c20ff295c830c6dd19a81eeba971bf2df5242f0e061659c75a599fcf6d0a8 304402204b14112e425e9c80120822dd5c941c11702f9bc95b3a211e9795aa669371851f0220023c568fd8bd3ba18e1e511945ccf67a6e0ddf5d246a55dfa6589f82e53eae79 30460221009b79849082879053f59219c5956c1339779d0bba6458d6ee084c9b2f32fcbecf022100b32609ec8ff0800718498074e6faf6229d4ca1b7f2b13bec7f34d5cf2fa481b6 304502205763ee8ab6f174793dd854dbfdbe2126a62fd68798f2ab9e30a85edab290f5e6022100a7d907e4fb1adf3345466d18d8222484efaabfafddb4ce6a43673370f68761c4 304402202eb65f85d00de285b356b861129ec0cc5d741ee7dec22084c31128d9f3d259e402204609ef7df74337b2a976ab0798d29e292e4497a23504c38a5feeaf49521586f9 304502204308bebcc41edeaf03c09afb68c21422d9fb3f589a1cfa23af20708389bbc567022100fb565367f80ed00c98ed3ca7b882da0c689d031b9e507440fe3489c9089c7e13 30450221008ff2cd3e648af853fc7850b03416cbe30bd7d4f15ccba271c2b1f91d36a75ff602200264dd103671d569a0a4d0570a2b24495976277d8ec2623d787bcd52792978a5 3045022100e89e66158159a95f6ec57ba0ece9934845636c02ae158c0223f5e897ac7569f6022047d8b53de15b6016c3bcf121971423b45de903fa07eead2243818b9e7a6997bf 304602210084b304f5edb4cbffeb618fa14b18567ac7800055427bdd99606e69dcf3d7d505022100a4b8936813002b1bf024641323be2291a0ca9db925372382854e475d974a074f 304402206a802cfaee61c845056761d80ac8144b58002c611e091c146a7d2eb83dccb5c8022051fa0ae1f1e7736aac109013c13c63e453d796e3ca46758dcfa959486a880600 304402206623a97a3f8ba4e65fa38ad0802eb9dba9056b4bacd92384d400375f1b364630022006ecbdb71672ef36115820466f30736b4212263f019a34773cdca90e253ea860 3045022029cb39172a26565279db25353962ddab7726dbf318c8e636bf96d983cd052921022100c9e709056fabe8ffddb83a294a86790ad0d26f4163bbd211bdb216ef5bf4f0d6 3046022100ccc0dc79cea5c9656752f8651e35c5519274ec24499fd2679f44d108ea22b592022100d127eb1e1bed170ff079c61a3ff3da6bfabe8e1927fb539a4af6bfe510ac52f2 30440220231b2a3c96184a488b1c6ed7377d5a2600dc209c3be63e7785ce11cc9ba068cb02205bddc52cf9953b5d3ff22faf32007c2b4626cd08d9bc2abd8c758c80be5272ab 3045022100d1763770456f27f9806dc8615d39197d796d575224c16b6aee676207ff9151380220598151d386d46576db0f35b3afb5324dd1b5e802c2ef531108ca8897ff59c19a 304502204caf906f4d589223ecf8ef491fcacabdd317d37c51ab9eff5f40e5b6c335fe9d022100b4cef8807bec01e4167a40a3051c96ce99a482446dc16d958e6e9e65b3689583 30450220091aeac0d4a9b95a5ee71f72a32ac26e566ca496852c2fa84de641fbef1152510221009ff0e1200b700bcc2998d40ecbfe4e3b400b2a10ff549dca3495ac7586fb7d4d 30440220123382d78bb6534cb854158ca22da5fd17bb2544fbc8e768db5dd73372b3efc202203be8ed6e5950002e3ee4645af2634b3769e29d74dc102a1eca402582bd7eb334 30450221009a1178b999bc823060007d5a5c565b812d21ae8b3ac3671250dbafa8b0bd2b2602203899d08c2512c6e82298c2deaf5d84c4b2c2a005c796a2b3d3a42ec1d1e52230 3046022100b699c596b85f61033abf7ceac6a943b3e02df30a9e899fca2eb7cf646c2b846c022100ed7bfd7953679a45abff1046c5b3e7d3b35f1d5b46668fa5680e4c4181819910 3045022100cdcfe1f2f35730cda35f90b497818a9ea1a0687bbc3c7a25201eac2febf70ad902203a31f9be5e61376e2d26e19e2e3e5839748c675bd54b11290d6652d849c12417 304402201b5f0a9fd6eaf0692d9197934cde124698165185406c3477bb51359413bec86202200b725b6e90f8fb604621923dff83d12d2f36d379b9af6671990f8fb6bda59286 3045022100aa89d604fa5ede467fbdae275fcee1d4dd167211f7c9fc9ef5a479ebb8afd884022038a1b4b120ecc603acf735dc34510964aff389516e53c7beb70f109847d37cd2 3045022100faa28dd11f2f806575ec79829122b63c8d72f350e5a9a8e67c9f5fc34b91e20202201d2be2e402765c5a0b087f0ff23773d5809e3eaa29623fe95fc1adda7d4eb377 30440220546249715a4910db3bb0a5a3ba1f9d452c7caeb40718778fbe329b8dd1121e18022059f7735ba48b8bf84ba79768dda4b88c6690c15166654a6458d770d59cc0b5f1 30460221008250d7360421afd4d4faec6098cf2013c86e04160d0c5d1778d424ec6cca19d9022100aabdf6d34503cfe2a8eb15aeda70a18e5c2298276cd84481bf34738b119a24f3 3045022100edbae4106f6ece70f1b6d64810660faad855000c4840fc6c516d53559a1db784022067c47f728824e3d4cfb9aa5ddf038a534c41373678a9f487d66486d0920381a3 304402200de1c8ca4a58c802796a313248b84d2b3b260704e1612f5307994f06fe22303002206f69eaf3c78064d89e09a769f28ceef169aa2009c33072018dc77a2ff0a06809 304502206ccd38bbb6087147b9115a6e86e5c29b9f907cd961a239c1b24765977d08dc5902210090e75f11efd039c21b3f286efd902bb5c4ae79c1949a8f5c8edab73e28b07083 3045022100a089303b286c19b847f3516d9d613fe2998d5bf712be6b4f3ad7c59d8ac4b162022011ce0bceec002c13c4ed936c2239a72ad1ef6f9108c34f5598183fef8900a3cb 304502205a89301813cfcd4df11d6770fea55b61fb47e1b757e57f5d911b7de3820f79a2022100900626a19f284a465ad8ba788c0d9b24bb694cad055f41f516773906d4da1299 30450220049ccb1c9beca312b2067d1de7221216d48dd35986a7d3fdbd882808e24172f0022100e5fda77da031356b2592c7823c232c637bd6d5683df7c0751ba02412cc87b403 3045022076ae1112878646750555b5404ffc9060a7aa68889d0f86a3e6eb7813230a9a7a022100b3f81e423791a2908c93b37f5d814094b0ebbec1c11fadc02617a28cce331c95 3046022100a94b04b411195f7823444123cbb85f8dedb062fd16aee6768e1efb4466f9e998022100abcf510770749553a49653daa1217f7ba2977d04d24a0a7e39e2ea799777ec86 30440220092d559c7d0071702d3562bafcf02d96bc67593913c60b55993d08b8165f96e60220472be5e1b5e96dc55a0cbd717589d653ac74d97d77fb576cfc28d954a4b5e654 3046022100f1b471397a36495fbdf6c3939138f032d2603aed94fca514a8b64367f9fa5536022100c841e532480bdd129cc4c119782e10d031a9aae74d4e8972cf5aa03965775136 3046022100ee78da4088f416c800b06a4eb97bb3b50a9a4d984f574d92b944aee1e05a707d022100ba573cb04b91b9964ec1acd8d49ec0681fe9c1ea6995e128817c5603551aade5 3045022100edcfe99397abb5222e969c790fe0666ec5c5a7ef8e961a27e716b01483ebf5270220291ac9bf5b8af63968f34b9ed6d436f3d08e80fd72eae49b07159ef11bf3bb88 3044022077b6198ae72d557af12f0aebb04d6724d97d0858fddf7b89026133f6a23e5c2102202fd160c683b43285c2ab2e6f767fe4f7cd1aeef2bba521c8f3c6d9a64d2e7148 3046022100d40d44c826acbd0615c09f4ebe540f3007412db7ce4b5a2e535f35315376d9dc022100aed143182c5a9b01383b64780ca2d80a7348df78fd0ad5f7b48353edc4deb0ee 304502201ffdea9ff6ebb47b32a02e3eb4c44b16768eb144d31b1681e8ff78f48b5eadc0022100d615ab217f0b9629ed36ac3bffd272e7f2ab4c47974dfd1f75e08208db7f78e8 304402206e026f1f9cc9bef2897e24bcd302b2586f434e613e678f592f855a135c7e83df022033081d0c794181a5c38fc1d5b95512013a70e3ab905827e87023acb6f79027ba 3046022100d390e159853a80fff59d5ae4e7442adcf4a05c74b7e71d37215360fee2a0b058022100ad77dd891490391da38c919fc26c70acbdbc92cd3f63ae370addb6dbf518f274 304502201f0a4398aaac6427a2b9c3bd4b198fb89c81a1c2f2f1c0ea7b853c4ed885573602210097be892afdc2da8cd29b27e315d3b4aa6874913cebd1f34c659a23aebca5ca7c 304502206fb4e86dc7236e05db0f052b4ef4c368f237724a02620334dd4d7dd87f652fa2022100c1d9b93cbda309afae26ceef6fad368c77428b45f6e5c63d2d1d484953ef9d48 304402203c658f8f68484f1a49f64eda3d258aa33f9cb2dc0d7915f7c59c8749bfe1ffa902206154e4b92cf5a721984e2138b7c834f670cc890395a573469ce7cac5c6e6867d 3045022039586cd090a541aa76acfb96041175f562412d13079b35d534b5b2d67a874ca30221009544a6798fda5ec774fb81a18f90de794916ae79b7d9777a26a98978e6b2e978 304402204a1b636743ab7a37b251fbf497b1c2fd18b7f9322cd98e4a4322145e33e848df022058465311db70fe095f49f31dcc8c1fa868192f6b30654608383f4922e074ceb5 3046022100dc05a679425bac479532680f23c86d2a772f52f50f857b7f8757fc1d21ddac22022100caaedcbeed8a3610e428b0d1299161e717cc144b8331a8e4bdb0c3dde7409bc3 3045022100cf719e916c542a41bce8f8c1662bdca2fd22b057d59f7b80ea21a34215b034c102201e1ed269a37f86f18098a8aaea7d1e05ce64940d82e8c69cddc3fde8e83ae921 3046022100ef48feb0346f7d3579c14f795bcc7996e5b7b9715067768ba6b6f37de9db318a022100dce12b1f281e03d33319e10a278ca4bbc0de7c90b361726c99626866b9e6230b 30450221008ab554bb734e0c0e43614bfc39c72576a0e382792b7abe7da2476978ad3ec84e02205c957196bb148490ac5ab313cb24c122d903ea66fb6be0aae774ec267751450a 3046022100f6ddccfc34f2c8ac2affecf9f23172b34dbd4900156674c467487de208ba0441022100b80b26d5a5982c94ebf6493fc216e05054d8e7e8e8144036cb943cd60fccd0f5 30450220078cc55d04f2fddce0a6bac85061e02822ab03d175a3a363a168e43bdb412fbd022100df9ce787da3c7d6609b114c94aada61c23465d35cac76fa0e0a4e04ec88857f8 30460221009e153fab014a3570188226e882c7e2f84bb5226cbdb0c50ff591e52dd23a383e0221008a40da9c61f18952227d4d7b8a6f0a271dd5256bfba0d7b668b1c0a3439cd03a 3044022072259388d7748bf1c9824ce28bd4bf8db7b101c1b58e806980e9679c23400f1a02203547177529a8aa032577e6e05dd65ada2335103eb1accf277b200dbd49316c32 3046022100819647f028749298469da91864ec2757b3596d7f3cfbadf07a75495e3fad85c8022100fd90fad7aa7a7bb8c7c910d765f805060268935bee521d6b2a462146a0633834 30460221008bf05a0f09c71b0a63295232c9c0ac145d0722b70d47032282ec98b276a57f49022100ad1e7d3d969ad4153c9871fd9efc6633c3ade6cba983a4cb51a0214bcef9d790 3045022038b845cbec396647e212141a7f36092b501ad59e9a2680f893c3e8a1438d6e66022100b4d4290aa2dcae6f3f08ddc4b98b6d653d980d2c30e428c4011df36ab34acf21 304502210085d7a88543ee79a70f5dc818a0e86a2eaf3ed975c4384aecd6a6cccca934a17302204e5be492df566445f9d9790a5d7404786661aafe085d1e9bf0f924eea8048a3f 3044022062c08ab6ea68848f80409f1ac104e1bcc86fb9e48c75ddf2af7b6815019672c30220234ce5183d9829e3a6f30a32677a4d2d51ab27ef71d30e5d66fb8c70543d4790 304402207b9d8fc7e4240545f5904b6c46dd91bfba5e72669037970d133f3747f4d6efb4022059c8d3810c98433de31df4c956f3d70b799b179da8216ae444b2f8cce2855d5d 3046022100d0fa1298090e6d60c4a7eea04d42b08c39f1714752e40b2dc6d6a52b6b53a8b4022100af9641033efcefc07f7353362615eddc874fe9b059112ac80ca9d89e30f959da 3043022012d87ba02b1d69915a31e77c1d725ba2f1006c08bb8d466c9e9f3f7aeb9c6f7b021f320af5f9547031a76b29d8c7fdfad02f0af4d86cf9994b2e8acc933737defb 30450221009a2f03ff0904df8f2d9f2923a9f3d2bcc8c516fdafa6709304b84c6d40d6f6140220333d758f142fecf68266d910f2c7cf8cab502b52f4ff64a179cc64599771e079 304502202207434ceadb371eb852a26302b4fdbf00a4d3fb2a933f19237d8502a523b701022100ad139107bbac7ac4c5426467e4955cbf958e4b8accabae54e570995bd9857e8a 304402205d78b8efdd300d306f13f7dd18d64760be4a1e5f148b557e70d8a29859b9573a02201f14b527dbaba84f59d96dab3b4c5015679584e3fc635b9945cb623d962ea2e9 304602210084603c4ce94ee06053616ed1b6f77589cee6a17cf410f7db52317eee6262bb87022100aabdf58882d286b309f989ea53b9924a5bb3de4c4fe02fed669949a68e582756 3046022100902bbe47ee1cef63118f4798c46fabae2bc2c6b22cb208b5b7ba474600932c2d022100c43b4873977df961bb565d5a2d77ddfad0cbc72c0ca2a31828c093dbba9d42a2 3045022100b3bcf047286da213a7b83b5d5dd010fe6600b18d737517ef06d99e1c970e670d02202fc26df8283430b1624e8286bc34b19ca74078ee3e46d6998c91c443871e26c5 3045022100bec14d09d23e447c8d74d733d7c54e1baf144ac58e997d343827e2fff9f4096e02205a18598d85427e22d4e581780f49c8d866df16f4c6ce32ff9967afcb6f62d8d8 304502210092134c6ae41a9bae7217bb6c2d63685f426ef916a7829ecd0018be83b3bf0e9c02203f3edefa90150aea417fd42939356eaaf0d06489d9c85b47d2976e0b10f94e8d 3045022057b5d699ab4d521483143633560788975db44a31847eedd1e8ca0325e428e8af022100924930c3a87f1be6bc53ac5f8a2d6fc93c2392560489efca5dd36d7cb5a31ce5 304402204f4318ffdfed0c6aefed935ec80816a8db9a476caca810d9580fbdc3efe028d002206fe520f14842f20ff37a383f6dea507c7da1d2a5a48ac15d82b191b0bd500cdd 30450221008a702a8e6c9d141a8142ec229399f7a9673abd628d203e08a80fce7eb9e3aa55022058e82e41642d6e60834f31ca677afb0e221ae82d182c03a40458c40046377394 30460221009c76a2928510a7d8943c3f2c7b06cf944897358d8774d7dc5935dd79e41c7706022100cc1b69fd0af7bc68033b82d108712dc1703f082df50cd990d33ee0774ae32e00 3044022002f5f127b52aec3da62922f868e3b45edd4209b0906db0edcaae812c02bc2f860220668a642d52a8bd37e253ec9af96c7c0dad0de54336cd9f58efd829ab53f550f0 304402205a5daf9b6bac7b96d41f976d8637de610b52144f87813ee6b6959d21a1c189df022010abe242bb725729022b3fc216605b7c005d145e652f75d274dda3552797b5d3 30440220671575e8e5c504b09fd3df6407e3f165df5ef7c6d598bc9dfc55998aba752fc0022019508adfdcb64ec02398f19ae8c93a71d5a6a04b739a827d738a5e927896e1ce 304402205d7d44889ccd30bf584aa7d53db9d190b9fcebb3474a54cff75823c99e9c1ad6022024c035fac51447f528ca8925b64a186e34feea08aefec4bfb7e72ccf4669b807 3045022100a9f8cb75979d66c13c669c9e5e72e8358b3fdae2d98943e0fbf6675612ec65df022025c02307be372d7c2e9af8f8956b846ac03f12a7bc460d08db6c03de205f8989 304502202d167358c1cfb8abcebb84d6640a0496f7d8ac977fb9353ccf76f758b27a89c4022100c4edaea8aff4d249c4bf53968c8649ad9e0d60bb288713181d1220450deea276 304402200c882615d1f8ce20341f106157aeab4c96c5af920adb36fa644811ac4c5f17d202200a23e3a6ab5b9da698e98c9089fc6dc4f4ee9497d56303df57732f983d311963 3046022100c8cc152259763f137411e4b2736b1eb8b7490f5b2202b28aa78d85f7e5f5f6f5022100d7e7622b4c08c2fc92cebd36e1805b02c32513cea017440aa9217dfa10a5dfc5 30440220365d8820cdedd42e7dbae814a5f0a9892de4ad62d2e59b4767c895ff482a9f3902200a43abba79d9806ff3abcea72a41b4d59ca58a084a0c79816d27ca3bb7a6dd2d 304502205959c79dd9464f31d36587e998dff1227c28a73b3615b54e7cbcfbbdd0b37281022100e9910f946a5b41b08f8b16a77699b16ffb7f442bb1b2c7d36dc18638ce930b7e 304402204cbe34c98adb892ce57b3757b688a113100093dafab91113650b020c2187410802200b8a318a7e916b134970e7a503888e440b62950ef3a0d68aa100a3a71a46ba39 3044022042310aa289bb9c1a76d05b1e0385374267017168f0dd24941dd4698c0c11845602200e620dcfe1bca1ec06f318b1fb211523edf6632be2ecfb3ad86083c825a9d3c9 3044022073b10405e3103236ed47c9650ec8bf4523e165c1c41741951201c7e12f5579e9022001f13866e2c3946be2399394c8ec5bc30f56fae15f7f8471da02fca851911a13 3045022100a8e9e53f4bd985dc5e784ca04661fc9f5527d7b6b53b9cc43c97581cd822de65022053d632167acdc8a3e196fbbd2afa667dd20ff48051cf8f5e470abd45f7d4b0d7 3046022100aa88eb7cea1e1005b9639f9955a4f5ed9182ceb9562e9730ab2f282f16b558d3022100ae0fd2a6f73a1dda9b4d731b1064c92e7e5f3df4e62213be5876dac0b4c91a71 3046022100a31169d206782b82f7fa5fb512a82d8d7bae229daa363bcba95f146ac749ff0702210081781a163fdf4eb47857d1bc293563d7e1a09285d85cfbca76fee0286514b36c 3045022100ae25a8139916b147d75605e8440be9dbad73782b58ae5331fabccca0462f3c3502200c00ec78df1c2584df13790851ac5ab6eeeb87049ca7979e33124a9e72983680 3046022100a7adbef067d76fe594608fd5b54020b4ebeaf060d7711a8e46c6f1615399c55f022100ac4feb475ce6adba56f5fa46b733402ae9d1e93e445e80a5ec55bea53940950e 30460221008ec7a69e33826fb2b6a47dda27fca813d73008fd63c8ea9192541f53983b6e55022100969de110c1432d318c0ac56c8e31555c96622961e08d6d6efd6d932f55e1f0c0 30460221008a728f864d0a9f763d4a3ef7e42d2f84b0f6512b9eeb2e8386f8ff25f83ee16b0221008a2d52f44237e019bff37f66f92cd69787385f73da5a4139207c4aa24e623aea 3046022100e1abbc3180c7777eae2145c3c5d0dc5905f68139f2221c1ea9347fa5543fa4c6022100de4e73b397dbaacc66af3bd98af74e0006ca61a82695be02f6f14e7804575c5f 3045022035b324569445df8c64af900b0fba4c285beeb53c33109686f4abc27d0f73745e022100af41e19b847114f677e8fc35d694095edfe7a5925f2cf9ada028d9f406de0062 3045022006ee985b43ee58caf0d2033bf7094236b38f3f4c4a76c923134cc783f39dfe1e022100b409dbb37caeda0c2a41e09408259c0d1138fd7be4adad6633d092dd8e8a9d63 3045022100b879889658f80a4feac2179d06935601575ace4841819f95f6fd7057d6f3d8550220223de7d3c6d17ecca377493c71dac4cb1293066a6fff56585c81f307da0247ce 3046022100cd556e8bbb4701dece3defef8de70b0d56b4a8021fba8aa2f9b5fb9c8b733dc70221009ab5f0069bf084f3a6fd9d1e20d649d3c17cf770050a1c12af85aa95ceec5439 3044022068b1b96e5c53740c2f7b6c43d452d30c74ef26c19cd4b7bf21eb82b8e7e1744f022045a3dda010acd355029af8e478d9355c418d9828b1dd78289521e738fe58236c 3045022100e645dcc399cf1b747903841fb1271a4c75dede2e7ab49ff3c94023c8eb793e55022049a5db47852ada2bbb4390782f9bfd265fe1f3895ccad8d3e5a4c95b6bafaed1 3045022100ebfd942f3da7a21b5f7b250175263061ceeed4d016ac625b5d67d03dcc7caedf02200c547662cf6a51561a653a7d26025128302dbe80fbda59072fafca77d074d31e 304502210090796a296376496f9bfac1caecf0eeb7d0d0640a9b7605ff10f102f2e23d248702201e2413ac31d36e98af04c0d3cdaddeb17db1bb3c7d85a1adadf006c009cefcbc 3046022100dbf2571eada4e50d5745b937d5db51ed1ec2f77d90fd0896e6dda7c2142d85a20221008deec1de12f5f643d976e082b9a46f015fda4e792a957cdbe23122fef2b965d2 3046022100bea9d1f05f47cffe1fb38d39bd5ccc7af46d9ad45e540eefd6fa1e2b00fa128c022100f4f112fd8d899a218befec4caafc383a9fa4dc7217c67b7415d05839ed4b2d8c 30450220287393c5496a436a1607e445f9583d212ea078105fb445d0a6c762a50082f0480221009d043103ef1e243099e6be438b509e0eaa5cf771e2dd7b90f45b87cb534f5b3d 304402203b350fce709c30c14b51ef6295d3177bd18dc7ba661d1c7827289fa6fa7767fd02203a55c6e343c8e529b8624ff0f984f0553052916977e4919b6aad44f3d532333d 304402200d9228fe83c45c6b536fd3a178881add7ac94179740e9d2e6b7d9ed1120b2f5402207749317e35badd286f813866783d0a97ea61d8e13f0b4e045a783a05dd0cd510 30450221008575dd73b1427c7d9ac368cbd438a11e6eca4f0d5c377c86ccf860dbf3e03bde022013adb95ee849325c8a03b9addca64b3098bd7335711adeadb949ba70a1e0769b 304602210096045a02492c82c1e009c95884e12b769605895b357671a9adcd5686e92754cf022100a68abaf6808a75d1b56a65ddd7bbf6c977a9723adcc11b7502d6973ee8824834 304402204dbe49c4fe75111703a4cc2ad6c39c3f8bd305b3ab3773407190dfd4fb75dd0f02205062ff4c67d84654cd2e0948269188b8f5f0858b47dda868d3f3606df02aa6a4 3046022100c5734812e7265f1643248c97b6fde092e22e7a0bdeb4bc30f55e1f6ff2917f85022100dddbdaa4d29192a8bcd043d327c99716d4a4c1a58d6018fb7a084e7c4a0841f1 3045022019459b176a543109f14af67246f537d03be932501ff8ad189090382c9ed69ce8022100930f7d502445d3e1ff42d460ba8588dfbf8c980bbe9f886c12aecd198a3e0fd5 3044022027a3e9db02882a4da3d4f4612bef9530e1a58f335a8d3eacc2c8bfc37b52206d022077aa1b3839f02a98760e6f8f5b73017477a75e7379291b4d14516cb6f0dd9ec0 304502207abf551ca193a44d047627c6c7b18c177fc5a886e3a083fa5697a3ddc58dac37022100a9f349ac8e65a9af06434ef525043b220f4460bf63866a9fce7a93310e72b505 3045022100f6aa626365390e2d3edbc6ff2ed149a249650e7e1e2138305e56513e0c75f53602205e95e2fd1d93f33e6bfce36da13a6ad53537b5025fd6fa4902c6e4567fcf9221 304402201f97d4ddb6ef8a4a7d72f2d29163097039b4db60b9ff4263e78a62325c43e2cb02206acb7c950182edfbba4dbf67c1d95bfbdfb1f8e11006dc056e2f0251b1184faa 304602210082b74c4cef34d9dad9cfbfd11d2c870b1c8563469d1e65855cffaefc2a5eae70022100f7bf62ac82c81aed8df7830a87cd4d84b5c2ee89b1b54856b09c295030348e83 3046022100c9f84255b5737ebe5e9955b63fb9a48674d87e1772569faf54ce44cec6c0c656022100b6616629c90b81708196ea242cda5d49dcd33eabd0de19fbe5e4356dd9e78a6b 3045022100abb8cf3fe4a43c4ac1e7ce55b47eef12557b5c5fe55216195e6acc06169788890220416ea6217cdf7f59afd0a1ef49ea112232ecef6a28cc10edbd3399097c892290 3045022073192a49a010d8c2469277c1cbf279faf43b1afa3527dadacf9ba3ddf39d35d00221008eda539e33b7c19edfd559fa2a6efe292ef1871db0c40df882817654629e7fb2 30450220505337970a629a7aee9ab133080f854d58c7e84c0e5d3694e3929fe19ed89fc9022100d33dc0de61f14f69f17bf6851ef6fdbfbc0973854705f6de2c6781c6fb35104c 304402204a95c37a8eb5a3d22563fca99674f14175a6b518b4beb85846003d406302e26602200aec4cf8fed47f761594935575a3a1a63d44176d0306f9b55b768dc5125d7391 3046022100f478c9ca28d72bf4ecffddd1d1829f2652ab7629fb548d5c6f27edcc85de08f70221009600d9582084d4a930c462c05e7768b88dc2d588f96a7ba7d6d95890b2c8753c 3045022100e4756c8de0c1a1c89bb05d115a61c681d56a1dbd7ef3cde2df5554159b98b8000220720e84ecb7900debf35efdf464d7256de299a11689afd96b3f38102b7aac4375 3046022100f4cd7373170f809e674e8d5c37e44aafb79b8f5ea825d5c017e55a03b1d5c573022100bbd27a88180377de1b12bed724cc7d1d0efc064cce39d3615c8842df54be883c 304502210094044a006dcac440c4d4d9275b291c106e84aff255cb239f69dadcd0b7bc22d802205f633128544b8a06874479f9ee1fc3233b8b9feb97d09225ec5182df7e7a1040 304402204b9db99552dea43f8359890fe5044262b0aab32e87b9e8526f8e6c3193859f06022068570a3815db98ccb5ca820a58f26009afc7882863d1e89e9c02c1369afe2ea6 30450221009642a2d54febf9bcd1631f7924ac4816d9cf56e4a4cabdd176543c0f0ae7642402205071d36b1b97c6c66c251d8d2cd7cf85cd05b02dbea49b93123471af253de60b 3046022100b4c145efaddcbb37164052afa43e344b8ad6c720449b96db7a4d5990a555b628022100b23558d37d6302797a75b95ab4f52d822295da1afdc2d5e1cd49711faa02563b 3046022100e2ac382e6d10eba2b5754a28cd1bfce3531dd5e2f4a35986e1b5f4be8a166518022100c14a954dbaa7152f73dc74bcba658d7eba65f7b8ba6f1e14ba3980fffaf13bbe 3045022008761cc050cf3e2082124fc1e31771681d7b6b1150474a7f78cee2b14d415b8a022100ff1dea83e48812602078d5b6cb6f087fa9f4916a18547be758fe7ba2ae61c6ac 3046022100de08702ec1574a3f8a73deb7046b496f2b915e8d61f06f57ec8f55c6dd95acee022100bbebc2bf6ed57db807ae1477b4adce9774325d95e353cd22b5e26e8cb32a883a 3045022100c1f696ea08199efc072e20e71ae4f9f98e2a5c214120d538f95ad5d546376c30022026682b33f2b725480ba79e684c797008dacb0a3db98a255958b2254b7acb51de 304402205d421ec9e0e20735b218ac32d7c756b579dd0f65faf3068e3593b040fda54c49022063e5c0394ede679e45ef180cbab5531624e7124e06032170519e0d6837adb1d3 3046022100af2ed554849af26e81a7318523ba23df069e02c7d0681c62a1b0f7c3a1d5d364022100ed6885123e3e1729f61f5988e4d585ba9a08db7003a842e12e2f3db87812ada4 304402203e3bd6162f2e4d225e9da392e21c9ac10b76abea2378b6c72f6bdd6aeaa36e0102200c9b0b0f146c36bb4b3f967e71ac4dfda45ed98acd3bb4d4bdffba8bda61303e 3045022100b0fc99c6df5c29d21a94c3fe72d5d8e923acd40a64c5afb93cbb5ba15bb1e59c022004da13a5701e718a4ea71f8351945d400bb0ba82d201a5d0af829bc5425b4922 3045022038e70e09a9ff5d339a619a65503c24328ad95335802240923df15719e951a456022100a0426082e1cd9ddfa443bcbb66c179bc94267e66a4deb97add59761524cd159f 3046022100e466889bf391b99e55e1269fcd1d1109e4017f445446b202ead10ed5e7a459a1022100fea33f5ba022fa96cc02fcecc9c0160574f2a76e849452e131d717f3f276e683 3046022100ea5b93e032afc0f78414a0fbfc02c3a808e23702a6e18216e8f7999c98835cee022100f613172ebf4240bd99a7b93bd152ef5d736dd3f599ebd09dd5843c6de6cbb7f1 3046022100b0d4d504269c1eef3e33a0cfc4c75141934116b37399332127d4e0523a537b22022100ed427d78ba3da0651499b055d33e60c03081977907d9e78f5372f4be52d7c83c 3044022055d3f98ae1762df2930df75d5548802b8aef799112c3f72e5f4ea7827bdd8613022006d33e4e022f47aead69e0d5c705d617011c740415c2e52c4e950025a3e47fd1 3046022100f65bd74f50b4b16518411461e408128461f9f4fd75520f1a1ac8f944ed9b8e6d022100b3ed1326260064f58898f81b5db872daeb2c4cc659daeaf8a0ba69cd9160b8c3 3045022100eed208dddf3ffb0a6345b2c334042c668940165d45017a5060a22f37fd93bbdc02206f5d85d69531dd0613ba8033959ae79237d94689f09065ff812c4cbf6069ac93 304502201a855df72fe77e1800bd4c20de6650909bd005d59f5c722f53863d2492224266022100f5e677460a387b4104f0b96c636d626856a562718f45d4811730c3fbfcf245b8 304502207604e943fbb8aaeddf6ef5640e8efe7711a3f8dfee1bd0b93eebca62c6555ef2022100a4933cd3ac52976e8809e604479931bca296c02ad071dcece48236c71792ff5d 304502200ab1ec138c4086aed3e0efce272ea52f4d843cb6c142b703be3f57c64a199b4f022100d8c94f30d61c77afe2ef3667c2e4dbcbbcfac71e99f597369588e406aca84662 3046022100ee832c031eadf3a18428e13e10b125a3ed1eb149bc562594fb07dcb40cfcfdc8022100b696f4056a75ca866c60fcd41be2833e0dc673d9826e5df77a97fd57358bafce 3044022019e2d04d1420eedd1015e753e7b0d417b4b24eb5709d99be2f30570dbf85722602204aa32a6f6808d281fec161d76bcb30b09952b65405d029c977bd7419d2898271 304402201811f078165f45832d7104c3eeaa820281c43e4a97d6c6b5a4287d6b0883f54f022077f8e61d2796724f8b655bc72b8a326bf07982a7798b7d19eae43437e73c8e88 3046022100fd9c92076b281715a0959fa2b76926502cbb26df5ebba54c4eb07ae8627fc4c6022100ad3e372407e3f08806faaa18fc874ecb3b77924978fd6a31b6aed61bec6195c5 304502207c334e134d25dbd0ea105b77349df5383d30b648a3cb741d19437961ea7c8e0f02210086e8f9f1fea09633c820e2a0572c1a9afad3b643858ed069e077252d356c9622 3046022100ef27c76844ed7d288d7625f1d21ee19839e54f39e0ede553d1095f86ed87aae0022100b03c0bcde538ddf7632c6ec706f9321e27ae582a0f26047fb89332c9c16bad2e 3046022100f8d986c4831fe758e250d3a8fca3013e1375f54e967eed35fcda2a10f90d6372022100e3de9081c26931afb280962a51b46be838a4d78a3bfe5f242a8e03a353a1f01a 304502207937c9cf285cc9e8939077ab0891e6ff3228b6218da4dbe4682ac927d60d5b53022100a1911bf393158a21a31ae11840398446192663c54fb7c8746b7500f9afa49a62 30450221008ea02bc3733d7b75b62210664f46eb50a67aa6cf47d1316da938efb5911b5313022043f16e8d3873fbd3f37ec4e8f1134aac1a8edd4abba2268ce340147b4b6b41e2 3046022100c871193d84439ce3f356369ad401fc3ccc2660f477274732cff5ffd5d47bbeb2022100faec9cceb7cb3ff85a1888a785071989e24da199e9755cc756d2fc81af872b7f 3046022100f3a44c1f1649331d78b4074d8e85c85f42707e5e00a77cbc21a1c20aa5d57133022100abe5f213acf56a9aa81d1c537fd93a4a4587a33f9596c986e51168a5ab56a2af 30460221009c662000246055e2f891ec8be0e5eec57fcab4938fce58fca7ca6667a441ec1f022100b898af9281d8cf85d3c6f126e2d060fa407d42eb8ed58bc3c1465ffeab7fd36f 3045022100e1f21eca89188cc9e175df1460e9d1be48ed7bc543f209616466f9ca38aef7160220531dbbc6d1d9d3f9c4ca707fd9ac9ac8dc5a5a5ace9251f85e61c279df882eae 304502206167e9b324d9d85926a832d13ec7756467bda221a1fe8e71c54b41b77a20c07c022100fef016794cd4281fa0a474e2f347cd83ba65c3dfc63d457c216b910bc2846f6c 3046022100cf2d6727cbd96e96d258b1fe01228fec8e13b17d4230ca8cb4732080d18d38ea02210099e2ae14902322ce6ea859464777679fefdff5a00c7d26ddfa33ab991fc80703 304502200241dece5cd5fcf8bb3a3566a64e0ad61a9ed50d750c4e90e95ab4f1564627f50221009a66c6bf73a09407e6357860ba9c45ec0a8caa3fb82aeb5fe4644a8556cb4d73 30450220302db0f0bc579833d31ea738402670c5caa1c5f12d0b505ec8b9aa6ab86c8f70022100f216bd67ea8fbd385bb53d3426ffb1ff688deddddeca04442f65248cde5858aa 3044022039b0e05edd7eb84de0f77c8af7632e130608aeb93db903a8b327ea24d9c8dc12022063efbc30e1ff8c9aba61386e1b746c529a6b769624a2b880068bd87a5fa9a845 3045022100ad40b8ea29fed1c7fb28d212a17c8d938a1823d2111be61e42fa853d2b3c7b8202200e5d60936aa0a438316547e357481fac65e64029c0691bf550cfa7bce5c53e51 3045022100c8253b368518cb21bc5bfe3676ada63218f2e55481609a72dc98a57467bae39b02203fcc8fa5d1ece66785e77a5ec279b91c07bdbd61724cd8cbbade068150641877 3045022100c2b4cf413c7ad100f073cf196e92accd4a56ad53d3e30f9084db5eea7faeb18502207c83100797afe95e627d6f55ac73128e803f11da0e62930c86d4e96b9946d678 3045022100a170a776dc9ddf036305aef2025c3973ba718f0f4396807be4f6c83fad5bcb9702206211f5ac63ce6d2ee0934638995e6970b749e40a874911ac5203f851959c85a1 304402201109cbb5894a338912d791ee56a793d7670f7ed1c312355ba025072096b9a69a02206f8137f720fbbcd0c6f67febf174e297e031b54392a592cc5bb3c02a0db6a03c 304402203568b17a9984c4de6273f2b4fea4f30477622c3ed9468f4d5379886b12b0d2d0022064543fd1e9a903edad7ba43a358aa92e4bb5432f4fcb21749491472afe382b55 3044022100cfe53a31d64797f882c79c9922a3af4e2a193dec94d16d0991ab62764eb52916021f669af340360d08f1a3a1c5c47cbb5822dbc62bea4938866c64bac7e33665eb 30450221008ca075ca51ea0a3ef0b4d4a8572785ede1614a538058ff25a941c1adb4fbc37802200546b35668810cc128c40b1f1928a6cc4130d5e0c0a551aab06f4faf388803bf 304402207fac62913b5a2b37b8036f3fd6bbcf588354966b879bda02a35c2777a6896e7302203e9faee71a897fb6f6f460066910b592c4d2f4b243080c69bb075ae11a193fef 3045022100bb4b889aaf7220c89773421a50aff372714ed1b00e9d57dc069e39baf37b4edc022038f69c1c63c8b37f52b9df051e63e7ac6d29cc87ce58854c255f4a5fe1d171c4 3045022006bd6176beec46d2f2cb03066c76dd52475ce1f0e0e27c845cecac50c9b979fd022100dbacecedfce0f87be85225c9c5587ef63e18d8cca777dcc805bbbee2409c0777 3045022063e57a29ff45fa9fcd7d322b268c78e97018c83d582ef6f74589339a6dcd3136022100dc03293db2ed394b4e87a6970d51fdc230643f6d33a527c90ce0682f9bf77067 30440220470934fd909cf48c054bf5ad7b5f533ba4d65861d10fc489b89bec2fd21dd86d02206c6cc1c9893ff3bc9b86530eebb8ed80e07acfb62144ad4fcf72aeffb4d343ec 3046022100fc43b6ffb2fd081b831e2a8fa0209f92d85a8490baf18469e6196067d77c62ff022100c1af4503ffdcc172b9188fe7361434f1cbd63ab118ed5687578fc27607b0f764 3046022100ecd96164a4f2915366903e12cf237b5607f13c92cc05216fc1c60df98030b45d022100a20786643989ec057680f5f2cff6c2f45b00263e8fce2709eb6c61649c20b776 3046022100a4b1a87d6e1d146dd12876b7fd118cfa7da18660dd1ade23f29ca57a5969de72022100da0cbd35f58c9265aa58f40f2ea979ff2e6205e683e1f9c2b9cbd4b66e3beebd 3046022100e033dca1e858989afb6a982f0b18fcde86c420cf8dd052ff2e9fe8b2ece22d00022100a11e1d9989d7e3fb54dd4be5617cb423a4e348261110c71b625a3f0b5f3969a1 304502205457519ef4c20046f9db971a740fff4e926da8e49aad267f9ecfbd40b11254a70221008c7c58527db3aebc0f49d9c28b9a76e97c1bb99c25619266932421119d0e8066 304402207a714a9194e2a9dd501027c80e013471688464a972755e17c7ec2678256595f0022057cfa534cb561597c710aedc8646504b0c6a2df50d6a199ebd5fda9b65265c28 3045022100a87ed16f48be99bfdd2496dda3bc4a1ed1a7ecffb8b38c6a00223bda2575f2be02206dbf19252100579b6e3f4bd4a863c914316e78fdccc4a5ed3221326b60c1e8e9 3046022100f5d883113930ffd858cccc0635336a1201d32e476a86624dd9cfbc8111fd9bfa02210093e7c923d5b70446a87913c698f706190b75f2d0f1a78ecdbe91e2d4735f82ef 3045022061560060df4b1e516f954b234abc9899eaf13e9f8c08f658aceb661b923d5afb022100c4c1a23c11aa880d15f641911c8907fccbb49bef7f164338295f6cbb75295ecf 3045022100c97945ec085fd51521570ff2566ebeb94b49e0f466f6aaf36c7c65fe08d5d48202204985366af56dbf8025f07eabc33f4cbd774a57f3be5a824fce5a705edf0ef333 3044022000cfe6023bdc4415d6e8e7fa7fc143ab547466c5254749c7168a48c1afdb2c6c022042b5a61d75070d56a7c783369772fb92c494566245737bf1332e7eb653bbde2a 30450221008727734e2d52e87601320bcb6233effff5df93b7df84de935a13b554fecf1b3e02200988f0da62924bc96da015c802427e0c176291e6421d077d897539be55678e0f 304602210087917974a24af4bf027b18c6e68e0b93c9d950c428df0a8d4887368f8b9b41aa022100dc3838cd77bbd570ab5bbff529c866610229a2d58c1bd3c00e6080742af827cb 304402203a067749b1eeadcdb26674b62fa2dc77f43cdd7021681b35f202b502babc545e02202c20c3b054560e73bc6eb912f1fa4239156a463aa55ceaa04b1a1f8b88a81417 3045022043093960fe9d046435c7776841f23d2e46c532693227dcaad867ea667424a48002210098b479598d9b279d475e7b40c88a5c117519e9a528d7810755ba69656d71ea6c 30450221009f4d9f20e5dc6641dec82c2e988768d5019f41ba3b9547fb335fa72e56cb9c240220196bf3ec6d7a414c55ba42a63f9d855ee9e74c7a8efe1efe928217825c1147e5 304502200bfdc1bac27e119db40db51acc0cd661ce005a43b6f115b66a52b37a9d5a918b022100a3bd51bfbca3a77d08dca387edffac8a07a212a4622fd153a1c53c4d36a86c0b 3046022100f64f72c434a2f6d328fd0d9af8955f2474f8e0c1127d95466963f1a03ec2bcbb022100e198944cb579f3151c0d0332cf6655f784be06c9c1fae6d17768bc051b4bb8d6 3045022100b04508d56b273d1efddc51e1886d552feaed6d32abea8b6a83376a3df24cd3d302204a6aa92a4ff92420d904263a5d9c6daa90df2e5b9bc9bb918c94ca2d6c2604ca 30440220696bf0c8e2b123560992b781baed8717e0ca352615c7022bbb57154da459b78b0220599cded74ecb4cef3fc157c50fa0a26682ee352defdc5abf689023502d5b0016 3046022100d823ab19d1d96b12e1e1710a5b9ba930e7baefb9df6b438e2da75daacfbf4d8e022100f6c5549941ae79276f0ddbe68a515e5ddb85da1e77de8cf38945518b3660bd96 3045022100e43ed6ef89ebe9a9af45b3c5561128c5fe561bc9739f0d31c3bb747641175195022024676ed14d9534161fb3f2d8b275ad713af59b95922fe4dd170f6952f3e6df94 304502210099eb7a9f1915dc3dfdb7d59286a5ef8110f9b8680822c647a13873446d64ac6d02204e453ac8b4fbb3cc287bca1981f3e9cea30259af2a56ae2b2b7b2d18cdcb82f5 3045022100a087afffbca541d4dcbfd9a182c7a233be1b2044bf89533b4dd4ef0f71b3b687022004a0022541eda2bf593d89480de84538861e81377cbcc82c813bb699446d7318 304402205dcbd0d3d3211708bb39ea3345e3d328570b8a174e8fe6a83323cf5be2fbf8f502204599ea9571b6bc030192adac9ad28012a9f7e7fec89fdddf967b4e5f412689c8 304402203cd8c75af0b1cc5f77bbcb526252ecb8d0ae110c12144eba4c041733a4b8a83d02206371631778fc371d9aca0c5226ab9ed1c3afe3f8cf70ef2b44da239b5ef09cef 3046022100a79cd7987262b919293d258102e87e441db2d64a65b200df22b65699ba7e354502210088b704eae4da6ca70dd6e71ef118e547abbf817f1f83e098c95228bb0b89ab98 3044022059714daffdb25c0a8174b2dfc1938b288a21050419619277c3e4251d9621bad40220730023b488f29125b5a8dccff027c021566abec8052fffad33ffdc0b05bbebef 304402204af467db7f7e2396de3fcb74d834af775c75973816153def07401b6a468a53b90220126ef695ff26b8ffec1beb49ef49dde3774ff01007c06caf3f25a0e40eb7444e 3045022016fde11bd850c94a780a25e1d8b46e58f757ac1f2e45d73c164480349b71fdaf0221009a0dc705b0532a4234bab5c9bee6912cc3613d4aa79de7850954862f6adbb175 3046022100e17c93500fb72f71493823c574a11dc66a9390db0eab9be2b9dbdedd608cd3b9022100930a1a25d91c76c58b4d510a1b5bd425e4e3987841f27ef75aa89d0a4ccc1d77 304502206d4525457bab9e57d755a984e8079d59933df260b4696424a3cdb11a850bad0a022100a9a91aa9e23c9881eb6ca84c6934d226964b5faf215e474701a623f8349599cf 304402202003b0baa35d134b51f03d8eb540e39633b265633d59c0a6374a0879796c9507022045e940d03d674ce44f4d77bfb754aaa725ae568e4294a1a4edd913a29b644439 3046022100a5efe16cd3ac77d2ae08cde97f5054b55bb21d30a02e3d270e01f0d9bd9ceb76022100a3082c24c4e9846c80e49f69c9267af5f9f550b95e6af44cf087accac73ad7bc 304402204480a4cfecea4c91dbbadbb702d2cdb73d32fa764356f199c317f3a02c7bb153022036b977a159da6c57e7355e1be9bbcab8b80611739230c7cf88921fd0f7294d22 304402202c743ac7de747fb7106294b8970451549c0cae48da04f63b07d0be390429cf1802200b28fedacf58d57b93ec15a57eb647f6fcae9bcff562e66d541f7f7ec9f5c55a 30450220245708d7ccc8808499a2af37bb2c0350103685c60c9225841dcb508a3cc4c76a022100e31a7b0b5cb7302a51f4d16a3c00dbd6b20385cdab592cb0c9a1e0ed0442fdcd 3046022100e2300a4a89eb4651f350a9488feca5f3d02f962b1691341bc986737bbfe80efe022100bc0921a65cac868b2df1b9aa5e77ad77de9eefb88e2a44d418509779806b86d4 304502206f77e680483677573f11032c79c94cec8908139dded7004c5f95b3ec2ded8e68022100aae317090cbb1efc68ad5050d47b26ff055d41a1b8e4b0081a1d915e7f238fa3 3046022100ffedf5656a8b064c8856f5875dad7850d585e6fb5c53a97a354c965a85483550022100d5590559bba2760b81a7d4cb27b9ae03a068a9ecbc56b6c8fc4d2217a98f7f93 30460221008e31208cc9c13ffc7ac9f3b8600da45b7cc44990b64a54f14f1f5c247270159102210091ba0ad3a69cc3e8a7cbdd5d49fcc30137a7202818058a6154baaf3ee6e18eb8 3045022100cd56d52a6746857fce878edf07e6bc2f7b28f303632413b2c6f2d7489b21b356022027575fd11d9c65b99eb8f7cc73a07d89519b3d0b5561b4f24795606bc350ad8d 304402201b7334beaa6ab4d24eed8078d7ed73bb6a97e40f3a1efe922bea74394fb9a190022039745de6509b06cc6c7d720c857dc8e2a2364a8e3eef7111460816bdf5551f95 304502201146605179197768a520e130c3a14673438dbabd02c29db609f7e46df616ef7c022100fbd6d745a5782d5087fe70c5f1c27f2d3343b76e9104cd5e0bde7c4b97aaddcd 304502201c3f8fa066b438a1814acb25659c2df9e6538fd6575ad79b4eb31a08f2124735022100c1541f6e07070d700ce7740d87244ec755d15ec37fdfb6de2cc1ebf3b4d7a32a 304402206999ba6e34b0c603f8af30048c025ccbeaab21ae7eb384f58ea6a0b235adce59022077484e1f3f56e1cf549aaa06e298c6fb66007a67f5d421aa6d515184886381b5 304502201cda74cc2c8a8c9d0dfd6bd647874837a99be7c43046516050cd75f05b483e1102210099cdee03954f0802bb0561c6af609c800ff69421f12c9223a1f18e117c1676d2 3045022100b9b446ca5089d3d58ac493ad622e0a36d083abf69bccd6c57a234efdc9d1de9f02202cc299e83cc26f9a9fe6caab6a0a6ee05a484cc41a7d39784f0334ce0f7e0deb 3045022100e6094c4c757574dd30cf96de8548f9c1953acaad5932710fbc8b576081e0a76002202336d3055f4535d3084e022a9c8dc4c4c63a80fd7514cb21036597df9cb285f2 3044022013c0ffee23226234c50023a0bcb3494458b3924f68106eb3771e56a0ab511038022037bb3be64e8ceacf3a9d3e54d2d481734b53dae93ed48ba6c5b91864fc9aaf9b 3044022018471e7c503bdbe63184aff13b484dce0425a37a95608b6a9fe12c3cebbb8b0902200bf3752b7104ae147aa7a87c4ae83932bf2a982357a12e1d6dfa6858da8658a5 3046022100953b4a7a68aa9e4cde1be58d3638a56fb906f61cebd0138ddc3f973a8f488a800221008a507c31e99f345dc4475e1f8d23a239eb22cc5832a6061ba8e55e4576e9e3ab 3045022021497574b427b5c902ccd787f7d3bca33799138cf383dcc7a5f1c963c8982750022100c7d54338f07dc861e553a16def8bc185f16355e290c6af800c75a6186a1bb303 3045022050c1ecc810da064c505e196df830e7a12bf30c7abf57b5815296b5e24db75324022100e2cfd56c96565eeb037ddbe2418e6bd654220345399c7cbfe3caa8f37e71c5d5 304602210094133158608d4cc72b2d306f39b2830c009f64a315376efd3123f1348f30d36a022100bec6f1640ace1cc29299935bcc91fdb525bb20c9708be49ec57d7a3c3d77c6f0 304402201e5646f0a59f1017bfef339272e3469e9f14ceffadc2aefee4035fd34fa7301002202ad46caaf2cacf0375811b9ce8616d79c8f50b4cf2a45dccbf3643423cc096b3 3045022100e73124ae775954648b4a04fbc48b0a94bd10379d50c63152adc10a9c5e516d1a02202c1b8b451b1a7c8592a6b047d967b17676f2ab081b8732f21f48aeabfebf4ad8 3046022100bd7dd66e1cf57f6ef0b60cbd1e737fb58067ac6055bf1f4e93c2e9c53af4244f0221009379e85ab10ef2d7124beb786b0a24aff3092e4092f27ac9f58b521d0cd46f3c 304402202c83a60e1573087f09c0fd743512d7cf227cbd488a5ed0033e5d13cc8e5a5f3c02207373fcb92143046a6843bfbbcbf892d479f33f23e411c370d0877f86569217fc 3046022100afa19e72088cd322b493d3b9f3968445fd7b7c1bbd1c09f9825887b4cb7593fd02210088067222bc1b0b38468e77f1be9b57cb14e359ea2eeda092b320b7cbfb180ae8 3043021f1468d523b173241914ceffbd01a4285e9efd99455d28dcdd843c0a53089c6d022045c6e1accaef89ec3fcd00f6747f344cffb86d49911ec80c70c32b954448bc5c 30450221008ba9e55444d4d9d5a9975115748685dd22847f338f77cf4b7d4658c1f79e415002206005a16b452dc6c064efc923dab62e653072aa879dabc045fd0f67acb5bc2a7e 30440220520bbbb919f4f90a25c313e800c0b1892c65f92c8b2d4810aaf0a7ef725d6c4102204bab740ad14baccc50ed5d8606fcafd4009d12d4170de44454d1dce396862dbe 30440220479df7fc4a27b0667ca0a0c0c96013202c26787a9a2713452db40411243fe87a022015a19061066a42bd3b56fff1bd476c4b7eb9336129ebc2367d5ebbf60a0b4f89 3046022100840bafcf6c2fec61984d43ab260d31bcff3e454da2c3eda6739d831d1da95ff4022100b9e6d56194cc57e6e8591e137c9845ede13aa2cf14d6359b9982345e68412c36 304502201b41547b984e42c5219a7e0e9447e36fcdda15eb4742661682ba0e12662a6d4e0221009fd7fc9ebddf688e24f30264c59cdc30305d207bcedfda3b2a258ad24a853d0d 3045022100a08b62b1ea37923edde9edd7bdbd94589d152891386501522e6d1f4f84a14dc202207b0c2329cc4086bdf174b96e7011d6b954f331523719894d3645d1b4b73d2ac4 304502207b39178eeee8db22010dbe43b2c384a8bb1647909fc38d3a7b3fbde9ff35b32f022100df409e285ba86846b0c2be055371fead7c12845b75240903837dc21c4ce43b8b 3046022100bee73e41ff7456054599a46c5080096d3ebfdc41b842fcb82a3158c6d5bca37b022100e5cf41d9c10eb1a04244dee68ed54ae275db268c63f01e83a3fa6aa0314c9d71 304402202ccce1f88ba6bbc61d941d90cb2315afd8dbb830640c279e3e5ed47ebbae397a022062c1d33d33736a0dd1fe8a8c055d168a636f1dd94dcb27daecdb36e833619335 3044022063b6e5a4eea15ee93e750c22aafab0edfc707c27e13c2bb5bc807c149dbe5672022012602fcf9327f67e6b815a6fb64ab298a524af1b0c06c94b120358b2c9a2d161 3045022100ee1a0b8b6b0269b5d1848140281f7c9af516f4c3be7044d4be1233c605219a25022050382d9874c294259843fe2e2399f77539c961a5fb345a0a5552095d864f5413 304402202b5e84ea4f81b5f11fe67f2cc7c4bdcc81d8cdf871f0a0e03b583f22318561bb02201e403e5b6dce11b18a13444f4b31d0a330a637021965d23ffa8fffcbe9004462 304402205761ddf22ac6f10fb5ba05f2edb94c44c54f349b2f0307fdfcc64a8a06d11d4502205bc4236c66df98072ee0e7f042a07f3f72a1f99e4029b1856dc90a20c3d1f1de 3045022100f7c6fdb7debba1eefbaf6979b7a89c47f0ef4672fd8be6afc309a2f162941de9022007e19176e8de861651dd371ffe634f04001a35a93a3a08080a048806e5ebea1f 3045022058b20ca9cd7092921069cebc77aa27bdfd2c87a83651d5d4a4400d040cad3bbb022100d53cdc6e97ea286f1fc815be58c0a6add7bdae2ed20c589d4b5bcdd01fce2a42 3046022100b2e8e05648b58f54f3a99003e3195a5ede29408be901408b8baeca3365470afd022100a5d9063ff33f3ed45c63dfac6cd07d038fd7639c9947dd595d92a579b14b1802 304502206d9f52e0c48ec754d604cc72104ef851640ffb1509d06e69eb2941447e8e248a022100844e64d8926cf6d2bfcc7a88bff746e5131148fcea14140296e9e09ffc8df261 3046022100b8e78c2c8621e418d32cf3d1f6aefe638187e21f6596c6e606f6f1a2c23904a10221008157013a2a4909077adcf74762b18e56bb1f7adbb549ebb12ba4656bbc7a9ee7 3045022100b730dd48de0c43bd82ffd8c2305b7452d6dd0bca7f8ef84b351226f5b041de09022022a7e2f10d29dde698a3b56e581431d9127f38ce0e0f71cd4cb74a6d81ed0aaa 304602210096268c4b777294c8c79a7989410d34a278dcd74b4818d2c5800ebddd668e0180022100e83e5960cce57bc775a1fd5d13c21550f4f72cad261e8966eb482090484a3789 304402202b0ab505f803c50ded67acd072097bdaeb5f788456b1d71e8173f0e607561fc302200fa14f0be0a42f0c1f4ee207f698903a53e3f3a24ec89d0ff7f2979e3de158de 30460221008b3cd8fb1cec212814f0f02553c5ed2f4507fe8913fffb7f8cfdc305d52f3fb9022100e5d54e287fc2556252054d8690cc967d30799b07f6498e8e2da11dcdc3dfe1cb 30450220111c71382a7d3101a542ac47276a28ad9addb31f572c5f18feaacf1e462dad070221009e7ba7603fe1f5391d31da533cef109c445bd93f84292c8e5c1a17252f931555 304502205aac1dc678d2bb728c1ef3f797a0f8c9603972634a84180e9fadc64993bb7ac9022100d21ac0c381aecfcd66dd34f081c7ac75023ad0d93ea5720a46732206a89e00f8 304502202f19fd4c0b7d39bb2e0d27208add32da2506b0824c1e5ff5120509db6bec74c60221009569a645b54acbbedad6dcd817b751d2a14c41bdf9111833e1834701067d755c 3046022100bcce291e77c3330e1a964384357cf6c4da45048e26fad04a1b9d2b9c19850b7e0221008497b69cd6072b6f13544e053a629606cbb2da0990260aa8f6ae8fa4d5044a69 3045022058579eec6fa08265fc0aa71bf99875fe378804c0dd0bc425298c03ef13c880ac022100d79b61ce44e74edef4de2c83493ba5855cad179848db9e14d9c313971b560fb2 304502210095b79d3b9473417bc3e0df6cb63c1e479cfb8fba21a5257df0c6ccd0b66e5e9402203c41d9ca1dcee25893a19957e1255779d73004ceedaa8402552d7135551eb5b6 30450220575418f4ff72d4c2bff46f8c916488d5775d1298b6c8ce3235bcf89757904dec022100dac4d7733ffb778db2f181fb786f678f8785447ad9d05de744abaa22143c1511 304402206100d5515b1c449f88bd652bca0d5a0040d307201425189b88c780ebc36ae990022029a82b92ad0a3b622622531e4c7101616348e860482a7f37bb6a62a3ee97506e 3046022100b736a914a8b75320bfec9a372ce655111247aa061d0b01c4e7ea81931f068854022100b00aea3320d5e362ae59530e844c6094e86ee37ea627daaca900ec0f802a0742 3045022075853fa778948e838a69ee848519fe49bfdcf238f2da92619cfbb111ed5e735f022100f488670f6bc6ee56b5cc7f316728eabfd1b49c4d36ebf1cb8f40c39efff7738c 3045022100b9ff07b128c0a5b01987aef5f62afd429e641f1d3263dbb24cf8370d0dbd320802203fe9e71e17fe1661fd7fbaf23bd6baf516bd8510322493718c8ebcb74b0517f5 3045022100b9e5a1f609bba1083e126b577cab3b00086af5538b2bb5b844c4e75c7ee8d4bd02201cf6f489bc957342dfddfedac2b556c209a16a5bd1c5270cd0054a7f548cb9f4 304502202703f5dfc95439d3097bebeeaa039782d2fec85b8a05981294492519637673870221008e6973e7a465b8ee8ede7846b942156b5b07d7e3feab8b0ff5adc66beb3e7ece 304402201952f33917c5d78cb9374454560e05bf7128e08604903f3b994cc114861aec7a02200a0d8aeb9b398ce741f11bd584a7c8b72cec1986f3624c0ea62de49b76fd1c3c 3044022036fb34d40128ed278fe328f335c538916f341d0f3fbb14ba2c71695c9383ec4f022013d2617358f3fb3699716ffa7560150e6b3d5dbc38094ac11f6a07f43f798c7e 3046022100e1fa7f2b0a30968ff672420c66a183bc52428b785d759090c66a0f00f43f485e022100eeb1ca6d6171030ca9f8739daf02c69e01a725db02e4381adc015d92d7cb6b39 3045022100e7cbbfc8533b5c40c7286b9bd222583b1acefe5bca686629ae7206f91122ed5b0220121768029822730825bc67ad21f812bef383fd645631778cc2bc4d49d0cb30aa 304502201ec8b17c3286ed05ff6b5c94c29d2321579852d57aa64ecb299acd24f0e9548a0221008bf0233b13de7a8a61044a6c61df7104e45f1d6c51c67cd8063fa366b796b036 304402205b1374667f8f5d04499ce439c07eff71c76053e2e6c6fb0a661b64a9c2145ec002202ffb1f32091d99c997081d3767df26612f6c8bc73b0e9881a0b21eed68b76f5f 3044022055d95d0b1cb8f3440487a5fafbadb82be944c22e43e1a70b96fd0570a6f9f7f602202b032c9b8c8c7933a372f849f316cf6580fbab6075d79bda1ad6f4267a29e3c9 3045022100f64248fa512d0a582f91d9aac2046fbf964a226d7dbebe8f5af5e1601768947a022046147083edfa3ec85e65c70608527a61f3bfe7bed5e01a30352adc161bae622a 304502205b53a1411b8ff8e16ed2955452dcc02e9597eae05a57f15c8be2a0454021d5d3022100b8ea4915f55f14267b1184664a9843356b1dc435d03de4396203d978fc9ca761 30440220291c22680517b53ff2230322fa40c2592c816a80d56b4889c981fa955659074802202b2285ae25c4abbb26dcb61799d799887afa0274160f735d75cae2d1fd231b1e 3046022100c3cbfc03c1c2d18921295ea52c27b3a4ff66fde923c11b9a5d92891795b46fda022100eaf0d3262e84570a89922cf5c739b42d209a98059b35d6c547b4f2f5df6d56ba 3044022071f9da4902ff48402e36aeb8341841c101b11c1b9fd6805d5a19df2f31ac256f0220299e7ebc047eb3784e7758683dff7ae2cd0d118fa118051f66ba2c88949a2072 3044022000ac7db809d0746d0c694c2e21cb58b379cfadb844cf828bf7b2afb5127a93f502206278933f9db44c4b1a78c0e0296237815056b724e0d94dd2f0685d2f9ce157ce 3045022054914bcdfc41def35782e15827e1a32e6a884fd6001613173451cb44311ce8ce022100fb647e4fdd3b5161aae8298f764cb548c5f5ef6000ae9528bf039376f73247a0 304502206a186568a6513a1c5e770815fb985ffb3da9f10688d43d2a43b6cd7ba041a527022100912af8e7920f6fe801bc607294f771afd6e88540203dd8cb22dcbce63fc1d591 304602210094cc44bf8ec37acd7012e90ec2acf9d7b2db16d061a9ca5caa26d70cac25b2d4022100c82920c45bf014b8a8c77700146f7f9b29187da48fe32d6bd3fdc1460899093b 3044022062057bdfd330c26a5245087cf5a496ed39df54a3f2df2e0469a863e9bcf44a8302205fb401d01401cc2e4aab3d4d85ec472f5f2b0661f65e63e2aab5f707a5fc31c5 30450220054105264448ee764066228ef9d503f26238371b464145e442ba44cd98b41cec022100f055c7138f5c3e4064712ff7f83dad7eabe725809ef0adc6ec996cadf10769d6 30460221008ceb43f2ac0d63a01eb891a1b60fdb5681241b5f9f799bd347e3cd15aeeb2c0d022100fde1a48d09212f8bffae629c3d8e211a789dd0a6b5cca478ff36b613b7357a92 3046022100821cf05b42aaf389fcc3f1fccfc53688e1e1aa8f80ef47111a1978933c933a960221008c64482778576c2ec5badcb1f82f7ffc293895c194660036673ddb254c155760 3045022100de31ad6244de6d37f78b5d78d43ae191b304b0a59db0244aee7565e38cb919ab022044060a86f21a68366d1bf2e59c5db5406ba6323b847665bc0a8c5134d30013b9 3046022100df449ea883514a28eefe23d9126e777fdbbdec8440fb00ce06ab5dad40a9e144022100d30b7f7fa41b93f16117c33ab89ecc8cce0b70ee28988734c09faefe203be2b2 3046022100c17c2395135771a5760715159f351efb64e811ca821491780b7ce6952020b523022100b61ba2386d0b8afe577ed3baadddf40ab1c32b40112453885ffb7237335d0d45 30440220673ad881baae19f11b3750d309b6ab2dc66f136ae503b15f8a8ad7150a1f19200220422a8e5f053ab7979414c3fb811157e3070980ecf6d3588cfd2c51520456d400 3046022100a224fb7538f373746481ed965f2b3200008fd8d4d41ff5ff8dccc2e98296c61c0221008c883e28f5f33914453c3641878813dc09edc7e67617edb95314242797a86f40 30450220705d954e1cd77bb0bbd055a9bcb4508a86e6de65c96c3708dc1d53e4c135f1f5022100ad81bd2c2278c6130f388f2821b14e35010cfb605c7fadaaedb84691e14e37db 3046022100fdda6d281ebc658d194238b953c1c71fde64b0f57e85d355b440391839fab7dc022100fa0db5e06cb4e9a378eacf1a892b455e977a5f4bcd440b28e3022596a78004e4 304502202d905a15262edc5d9bc3af17d575b3b6b9f52df4ac1ab6d494a22d7563643ed10221008a2a23ac303a7ccfddb478208a81d76fb9e9f36fe379107dab11aa4ccfb5210c 3046022100a37daf9493e48a8251a8ca1ecd0cc4dfef93752b19b630c1439ce4dbd2340b6d022100a977d073c13a2f67facead51fbf47b239c38fea7e92e343ac2f2debdebeb4a48 3044022013c540eb03dd9f79a87b88a628aca0264535fc0a1481498d3f79a6f0f8b231da02205ee6c583e9d785dc242ec4b6845a77e7f1321ca883ec711c49387781648b71eb 3046022100d8b4d87b4e50f5316d115c7e2eb342013064efa39314ea27ef348cc822aa0b820221008fb03463c6251aa67b1dae38fb375770e9c091265abe4a21e711236586be7548 3046022100f202f3c8115da682ca27ffb4cc85c13363e244b7c38244b10e2e5de7d77a75e0022100f996bc1f4232027dfd2b5fa20037dcac8097895496a3cc7911e6c084decfbcf4 3045022100a5bdc5ba9698b18375e85096c531ee16517ceccbe25c8ea091a2ef412d0b4a78022075117ade2efc51412c88a04858c221b103be7b899d61b4cbfe67577065b5a37a 304502206c6848739cee2b52efca7514433951887c3f602a690ae787a74312af8f09ba3a0221009fd2c920050cd1877a09f819e29e1da7b7ef5f7c878328432e13e6fee66e133a 30440220221eb242f6e55a9886a47365a117087bdd4ea2a26a4aeb5b20b77664574a00fb0220309bfeb56e5b58724169bc4c623671aa8e7094d341893887bfc696a63c4450e6 3046022100e4d842847afbc410acc16cb2cd1b314bf18e57036a14a381f10b6a63a9bc691e022100e632b7a96f2eac1513b73ae6660b74cb01e67b8b80e38a666658fd915b6576d9 3045022100c051190cceffad4f7ad90a72194b038449a24174ede8771f5dd1159c33d02c160220431fcefce6cbdc88c570afc6a9eb1b69987f430fa2149747f2e9e1f3f415fde7 3046022100eb6ae8e93fa78c490cdcbdf3c73b099f1b5bbd74b64f2275bedd15e81d8ab5be0221008314f552c7b1e844ffc0dd06685d7b9e75f7375b7e4f891cea7ddd173aefa510 3045022058f1787795eb70f46f1e54f652a4714c50ae5f579d33404c23a84d0c0686007d022100fea1f3290a254eb63c19b279e9f9b317a6afd00d28035f2e7b2d049d3f889e0c 3045022100ab5f34253a9b1711ed571fbd4f009911a0cbe28a0044f909a50c73e78c8d583302206d1306c3946fca1249b606d103e6c2b3444dad79719bc75b56e3da3495bd03f4 3044022046afee9d9f843990d618daea7b8975b8d7b47708d38dbe12b983b39a3e0c992e022039dd695d6bc26b8b236b4dc253faab1ecf5d1e279cd399a39068dd882a87b385 30450220236f9ecf9baa54c9dc8b710be8bf5aac48536f556b25755aaeb1043ad37fcff8022100843abec8bec097d36207d4f202b4ea19beb632318fccd36f7ca53d04942e7830 304502210093922bb7a3810f098dcf96e1d32a2ee22bd7acd62f898e86c062ec39db6d44210220382daca2074fc8ab2290c00e8e5b65371f443a6f474100e0a06103f201d9100f 3045022100c2668ad050de84b75b3e575f6ccac76555b14437176cc1fcea3949d31c27b38a02204008a7e1bc3a05fbd711d3c37aeb590cf925b9f05608050c8e8c72557216052d 3045022100d12ac4b09b726e7c3cf23458b338a44c6324f936351bc989d5a8e27107de001402202d62ac6a78617bdb7068656ae97785d28bb9806ed03dddaf48f3e9679a5fe619 304502210080bbddbd97545a9d699b4fa0dffd784c85cc4e6d9bdfdc29bab4a7c101b9f8bf0220751583e76d8fbecfdab35042325ffab03c24823e0679e276a3e7fdfe56771d84 3046022100ce56be726014a4ef6b6e86520f1a403dcecb5c10763fae42d0b59a9e05156793022100fcbf43111888c435ceef82cd9ce48d5b9f61bfe51e5158352cee1bdb2c78495d 3045022076cfbc7277b595e93c1ad4cb2a7ec4b2a4a3e62767fd90aa4d99a0ed8745aa15022100f9b66dacc135c3d9a3eb2886db9587f26916b629a1d9eb227f1c05812e449e59 304402205cea4c47313a7ff9dcf9260bf291a564fffc96191fe561acf8ad2593d2b0b24b022013befc83f17f71ec3c1683f103f2e5fa892891aa3aea256c0fa78ae9fc128f9b 3045022040d651dfc1eaaa5bc499f0eae671f2aafd2e80c88e27ed0a0ceab26e406d0376022100e3064f02bafff2c32827f54dacfa00c9678a5954ef3c467356d8fb87a5160671 304502210082e2dbc4554443ca0fee03af507c0d70d9bce1df1754fce0e166ba8f14d52678022050ba07edc1b1c624f99a7f439352fee469a42ced228e470600aab692362bc3cb 304502202fc0b692381160c67391a82a691f4e02be637170942dccae46b7269d1e0cd52c022100edc8e378d812821f4d728a488ead5e518d8583438f5cc3edb27180f5c097b536 3046022100dfc7fe92465dd5e56b3462c12bb6b57357c4271770eb277f75b3dcc9ad3db28a0221008f91a6bed3b0b52958bd2fea672dda13cc17b625c2c838707dc97cf842e134fb 30440220364e159eb7790c3e687152b20808aa8a7165954885ce61bec748823926bfae8f0220235cddead4e3d8b7e066d5d350c41f3de7cb5cfc9ed09da5db5bed1a1f5a2cea 304402201cd16bfdd613d0c6424710d516586c51fb52a8414220f643bd708fef1c4ee143022068798327155d31f22b5ad45aa684b73aff27131bd3fa466681cd19135713c82a 3045022100bfad517c1ba6bf9dd9348d2ae8b060c0060f874d2bc53ca499073de8ff304eb202204acdaeddc6da12269aed5c9e3f4174b27a9ccc8bc771d97627593f7cd1097d69 30450220074bce64e781d9761d00fb152601a1498ab86a716cc4a286dea758a4baeff31c02210087d5da46084722d02f1185bed73a339894c42e69a76034df7fb14861ea9b0ed9 3045022100bed4cda03142fa2e3c69abf11749cb19f955a5dc46cbd60f5fcd0f4d82ff7eca02207318b6c5c4ab0b38efd33663237d3eb7e8b38d3cb388343baae3fdd251a1fef7 3045022100c46c60dd5e79f87110401bb4e57304cf048666c6014097b4d3288a88c66ffebd022067ed14c8f641e344b0b8dc03cac1b3b8e71651059f1f7dddd3575aab9ecc5332 3046022100a4a1a545c3de114aebdf2a68fb791b31bb8dcbaca4b450ef5d389e741413dab602210093fbe8c0ab79fdc7a63621258f6c7d210c9fa236694533f32b41c774380e54a0 304402201b2946d952188d75bbc6a9b5f6a6f41a08387e45451d68718923782867e81c4a0220600038a5dd7f3f5345c4c21cc3628114be13cc42ee21c318d5c723d296ee8e9e 3044022027dc6b1afb6a95dc74b54e8592304b92047f6030ee8cfc97a78c8f9f5e4f862102204e86bb8b5ce601aea21592aafb724deb5312bf7af30b9eceae1c78ee36b621cf 3045022100f8da33cedaf60986da7e6f947c30ab363711e6db694b6fa7da44fbeb11bcc689022036f7b17debf6109a4d47119eb498b605f50e10557c4388940dc84b75eb0a1d16 30450221008ecc8be82c3e02aec521efcf617a7154dd11aca49f07f959a51baac713c2746e0220645e25c524a69dcb614ca5102a64a256a0dde362a894fdeafec482c4536a6658 304502203e028eda7471911c08d8c6a985c41dfb5c393c9f6d15b7217784a609c5b8f5cb022100d5eddd9d3e96ed93f3e0d74d8fad012e73f6d168329a99ceb66b30adc287e654 304502207ffc9cf6331f6001b394cc1fd719941116067ac96390d55103a21f3fd44c4b17022100ff89b1b46442ef345273e3ce5cdb5df1c60a573576e5cf38ddd3d1d22f9070e4 3046022100e584b5d5018e63fcbb354721289c38d8de9bbaadfab532c1b38f7692b8bcf796022100ab5dcd9968e657be19d1fff0bf9e5b638d82e0c4ffc21a3ec576e3b806844bd0 3045022000c1ce212f903ab8e21794a3fe3c9648f4e075b81eb3421fe5b847e3a228003f0221009171b66b5373cf80466aa5edba4da64795d6acb8dda63b14fa2b56c4e126616f 3046022100d562e77d8a1ff767ef48a1644b15a408f17df66eb74b3ff906478621fd644ff9022100d122b65560416515eb3a31806456d3e3d196379d0d5e82f91b3618c5da2204de 3045022100ebb33fbd7c281953d79ab1b2ff7e454d92dec0d3a7c494cecb88dcf9eb04e57402203a04b1873c79ea7c0eabf67de90602ed88d8f4dfbb56f4679fea54364c503be8 3046022100f9a10a7c9822bf80f2c18f7e5bf32de1fdc219b133bda9cbe5a4653b0398e810022100c24c0af90417da64b6c85cd7b184f1f11efa8c4f32a889352669c62d11c7fe93 3044022054449f0fc5d514d76dfbdfb84b9022900c2efd349f5e9d3a945fc4f94341a34e0220024a70779d3ed2230f82b4984c48b9e5a84e44380c4174462f31b58a27848f7c 3045022100932d64f4ce328dcc3741c0f7d6482dbd77d66424d74160b7a87fd2a80487f98c02203fe420360a34f74936f3ea777a3d9d04066fc1af9d9eeb2ff83c4add1d4cbced 3045022100d3f43db6c8fc6603c663d47a2182c000481c4540967e924f40664222118b59e2022014dbdaec7ca3e43fd794571c8b9598a2a600ea43724ec9442e4d7c9896c09015 3045022100dac0288260fa091d10197af14d56565b414307b3f8c15c7d1826bca1e1d75e1302204eb4dc63c7f478a76b8f22587bc2b66ee7b29c698aa7c1ad9547648fe293eca7 304402205beda3bd2a803adac5953bf72e7f8f0a65950553eba24b024403306b92044742022013d07e371bc0956c9aa5a3b501108d90dab5ee4eae372a332ef1d51200f802ed 3046022100cba3b79dd48ab01ea01909e6c382a6b14dff2bcbe582a4de43b38ed8b772c3c9022100a63372ca3636c730130d4a380f6b1685a3ec399cf814997335a2ea7ad6ccc2b3 3045022059ceb31751a76229de0c7059e8fe4fb4c7665ec40fa6d531bd231548284cee200221008d1bc74cc0eb98d27ac51fb25b3f34a40805515e5a2a9564dae5aa5c36c621e9 3045022076955b9914333c9039c3eb482d6190f435fc454003df34b79c03930550bc39b6022100e8b781baebb76acdac2243c1e2946681e9867634344082c5ab6934c1b29cd2e3 3046022100a75cd571adf9780153a69d622541163330d03f3c5ec1565264ef9211f7a232d00221009c58ea757b3c0302c0a165ab08c5de1ab182736d96d50aafc82abae76420235e 3045022100ffdd2fcf15247101728afada58dbc3288588b34cc5eff826670999c036564e910220748cebc4c4194978c15eb7c411145b466213504b748ab1b93f611b4a627fc57a 3044022051012e20b88a232d5534a497c887b2e397739cde38b7c9b46982c1f0611100970220151466e7e6f1e651c9f24b4749876bd1b7766b0850d49bcfdeb081c524afb925 3046022100b11a06b55f524ec4c3f6a3e9bdb14614762a6a9ad26df4aaaff1c2c87538792b022100b7cf010e6cfea7fc81cc1095edb10e486d6e15b890e9f1052fbc665982edced3 30450220667830c0e0f64476a6007a6657d3158c0bb77a9698e817a5c16f2c5853c4a2f4022100841ecb7c784202ab3ace5be0af74695be24ad3fb564a4e65d5c776004932b950 3045022100d061ad7355f2edacc08d1febc23506df934598c7ec329991830866d9634f44b20220529efbc9eaa12e1406efa8ec780a79b4f74ecb0a19a911d964c5cf31ce064fe2 3045022100c751cdd95323cf82801beae2189dc856904910789f7b8a9d7da5eae6c8635400022016a769dff5510567fd4514e2c8550d87b9a9edddbfe184aacebce316d1fc30dc 304502207f69f173911c8c4155d4561da1593f5e1ae750267b88aaf2dda4f2c0febf11d3022100e48e3c03fc7356a7f06667c9da63f82f0b4a8f9078fe056989c4bf33b85fed9b 304402202c0b0ff68f13489f48186bc17ba3716deecc598a7674de5cdd8eb872287bb4bc022012157103b237600ffc41732c48f1be0172d2ef5d51bc1cbcaecddeb807da5966 304402201d8b0bf9f3cbf73ec0a29d45a275431694b95df804d6a84bb5c4ca300c392c2702205d64ce7a47816a10955526fdc4aeaab41ed3ef6c0cf15a47cda6b9c74f3e3bca 304502200fbda5cc95c220a27134516e251a62454ea3dbcdac7cf7f9147c38dacc85cd1f0221008c72902cb1c485aa3aea78a2e05320a9a498e9542f276bcc4c697861ced96124 3045022100828a9859adf1c50224aff8e4690f0f617979aafbd65ac8f6ab784add56ab06b5022066f8f404e2ec7872e831e15e6211353aeea1f9e7720448f96d66dd61df062e0f 3045022100b96aac132db18112efdd2de4c8aa0c74c0109647ba9d434cbbf6263f85672b3502207d47eaaa8d2d300998a105cc68ae63203ba62ad0b3147dfec17951720f18de76 3045022100c3fe9717467f020e9ad143beb0e62405a0bd9cd4818c485c0049cc2237260be902206335ca71d2d3dc69c2dc6d49d84aa8609d20a5a9cc46723dd0deec0b1154e4a0 3044022060057c8287c7e797c5fed86dbaa6684c8ef6a51806d730ffb975a57a1ad7ca9402206b66bd4bc2dfb108488fdb6f388dd62fb496c20ed95d1764f8b75f342fa4ce33 3045022100dfe219fea7e8b25e8823bf12674afe0bc58a8b16ad35d154350f5c92168624340220255c2b314bdeac276805a8749e1f73f61a32de374269ca9a53f2d3befc165578 3045022100e0c0b94a530c773f866d4ee7abc331357bb9ecd869051a37353f0efd9f7569eb02201a88036eef5dc2322cd30994da39a72a1a2dd3b1cae1a407be0bca4a5a33470f 3045022009261d730b09cb9e50f4ba4e0303e1a750b8ad51afbe7d8a655d7b9e5e404d90022100ecee892a5d90c916910a9881a66c77bca733985db67bd211b2a8cd528bc83da1 3045022100becc7c98e4ccacb247bac66bdaa67c817793e6285375b26483ad4135fa125e3302201ff3b0d262a0e1f32af2c8e116bdfeab12fe79de9e5180f13cb0dfe9772cfa10 3045022100d50567cba69f1bfc54acf8c9543150092bd243fed414435824729f2a832a5f2202200581820eb5a6e47d5d59b5cdb412280cc4e467e500fae10cca44d0109bcc3c2f 304402203203c7b1f6d5586b5f3ee2e459831024728349ed1e676bd82a73592fcc32d54702205086751603e5a85cedb91597598938961b78b5dc903a87ac6816c1e7b8a2e0ca 3044022010737179af3df165237d23c85b9910d240298120364d58ded8af6964a57bb07202204d63c0dd268e359cb392356648d003c572edf6619465ba3355331f0edde6e749 30450221009847fd6944124fab3cf4cbd26741b0e334a93535ac5508375084f627a60036a7022005a00555ec8694a996f592264876424a923006a1b0b378234274c6256a374f6e 304502200ba7d507ecd371762e38eb4d2d7559fbd364e7a3f1f8ae7f21015af4c168590a02210090ece11eb6e166ea10c391d17deb73fe4ff0239cf86bb3501f007209cad664d8 3045022073b9cbd3cf943fe40eeb848e392eb7e4ddc438073a64b8e8efd4c76d282f5b18022100ceba2e22a0d22d51f22b57ee4056648c3fa5af70ef3498ede35c6459aa5dd7ad 3046022100bec84d4db6ea79a52ef6fc9c796a0e353c6357e6ba3ecc13632b6661ffdc2144022100b24a80a430e50638a2eb5ede77962819f79656146670bfdad8e1084121af7081 3045022100919baf47dd88391cea0f4b108500d1d37ee518b0aa199095dc822ada7078cf9802202aa94ad05cd339e6c8477a8ae972d9d7e673c65ae6b168db4961eda58096bffe 3046022100b4ceadec5312a42cf0fc074faaa1cefdd4fbf947b66dcb73d529eb28a05dc4f5022100ef328c64df6910738a060fdbec17a00ab7e5b258d8980c37947abe6e15baba2f 304502206d3027cec1a064e50c10a335519c587e06d2d15da26ac7000c12071ea0de215b022100de2b712e3bf2e871111cb892dae27ce320da4cc99b6a22c665d71136902dcbdb 304402206ca1932a328a947b795a48c7e42af548e98f43b0b0bf29d7df15f467f5579b4f02202d59d7b798b49f47b8006b94bc5da80cbe61cb61ddddff3c7583979eb1763d39 3045022005e8c53e2ce22a2677d0739bbe68cc207e55aa591e5134f8d261b625c249089b022100ee3fe3b785569cef6701c0d76702e81b90d53853dc149e828bffb3f4b4bd45ec 3046022100d538f741f6ccb44bba76975aa429ed95dc97c59985d130a60daa1fe2b4df06550221009c1aa775ba780f8360dc383cb759e5c9e1b6afc6f088aa8a21c0b4a754b9e887 30450220090a2f54db2402cf4a95220f182c0dec190d4cc5c470d3f0e9f7e7f171e9f0c102210084e2a12f024f4fc24037029fb4e34114a936bd3edc733c44f45320454c3ed3a0 30450220291364d06a83da50fd832135bbaf9b1003763deb59af3b7d59d2cc837b176d4d022100a3ec708ef90c2b5b3bc1d9ec5055c2e522bc19def377e150f083f67a9f1f6cc3 3046022100de95adc59243db679dbaf4e26d32c8fa5dd403688307e7c27d23090a977519d5022100fb2ed4812354059c6c31dc70a12fef8a6a62b08c2dbd55bba0ae53f60f49decc 3045022068d237d4bfe12b9afd313c4f5bb38a224600584c0d97260868da3ac6e1bf4853022100f281859dc4c3154b785b7feedc29f48830890afb1b1a75f34a5fdc227df44961 3046022100bd2cfcc632012ea4831778d67af92379cf4445ae5b2371176d3f381106494785022100c3a8124c7660c72eac702802c8e744aa1de772b9c7799f6e4485155082652255 304402204d690fe00cdb5abfd12089baadb654c3fe75ba2fd210b9fd1d754262c9be5b3302200464443ee22a46822f35da8bf24a5a8effcf2b34e1ee37de5e0f6b318e80be10 3045022100a631f5a13f8879382e0b72ad0b41b09954e41ca345bb0841a28cd3c192bb35a80220556976884553b4278d9bd73f4761fdda8ca25b344ea588103a02f6fbd0490f54 3045022100c79db450956f128b78768ff13c4e5ddf37c8d6c55a69064904bc72808610d09b0220644cb7aa3121b06e70e237ff606d0885a76247814703c0d3035c5b86c94375f7 3046022100908ba07a5c94858b510f54a438713ea75792f12298074ec2b47c2cb5370faefd022100d084a4aff4514374ea7f0ba31a028dc313673b2e53bccd825a7ca8cac78e8689 3046022100d478dd70c35720da3d380f20899bd3271d16c2927751fb2f904ca16b1b0da56d022100ecf97f69ee353754042f457f374fae76211af7d1c828879fc2f916d1ffd66cd6 304502205b1031725c42d7611864db693dee9a4b7df30f5decf7217626601f086ec06bd8022100ea57e020097925836aec303ca0df433735d03705908fabbc8e8411956704721f 3044022071f1564b7937062b72469a6ae4bbb252132bdf930333a78e7763228789716e7b02201806e03c1844104e0758c95810eb326e738a6bd9604c2ded13b053622b8a178b 3046022100beee6015f9de25cf8127629b9cbaaabbc0cdeea0ee82005cabf2601e35b769c1022100981a8cf33a95e4192f9b8a43e4f0087f6d51f1c4edde66b45eae875634700ba2 3046022100e93dffb40d1040f14bd8a20ef63ab48dfe8c2977d6f3d68980ca01ba6712e1cb022100f002bbdb5bbb9109392bba5e1f0fb914eb882585b73dcf1e154a78035248a9a8 3046022100ea90eb92c9afc1a59b620ea171bcea980cfa50db0cd1bb6d38ce214b49656e0a022100ea51f6d5be897479ce964107fcedf2d12d276272326523ca4a481ef8ddde981f 304502200a324e30c8aa20bf17b39ba77bf60658cd374a3758984e17461621e605c547f7022100ee3bf2a2ce09818cd4b7e7e965d6656a6d26eb355736b1278b017e768a9b4387 304502210090b9dc71dc90d86d47001dcffba9062dacae53ad06873bc113d5303eb2258c0002201a73aa9234af1961dd3c4a7df1ec77cb61de35865b47668bdc9c65802cbcc83a 3045022100b6d052ccb8889a2fca6ed0fac79b73ebf60a729e4008f58ad9fe62db04c5cab80220742d90ebb50e6100369b38804b3add6b74f4ba7f9ef685aecfb176c19d255e01 30440220373d26cdf6cf0b27c2b479395ec4d7597b5d0baa091ba87af150ad7dff129746022043baa289ae6952111b8262f7b312a083e18fcfbe71758f5c6b534440e46a96e1 3046022100d07b445c6b54a20d4c6da620fcd0cec3e727b08c1815fe57bdfee3eb4e3fdb3302210080124ba692a04262e4ab777ccd71c07b55eae5d39804187a7b7cb1c4b56937f7 30450220236713f1d9b24b07f041d9f5ea6fe6a034ec698f643486e27922cbfec7c8d40f022100c8857511f4f1f2ecd621c4d1025e7b44331fddcd2ae3ce7a3003bceef91f6052 304502203f8596e5d4e7d03478fe03a3cac46f2b14e27a500a7967ca080f125e946a5cb802210097bef4a7698271d21318ea3b5542c9502e7e9a0d2582569f33fa045e12c2828c 3046022100c0449f89684d6e228a0e210f4333a3102138753e564d8528abff9f4e91ba244f022100a8b20390f959cc3a8b88a0792c37ad03edfd26c7637b8a9eb88feca334228d61 304502206b745eea2ad0ed51f52e227d17d6facc630a244c98f9d4f4508d979a061ad25a022100ed38152858a94780ff90fa3848a712c957a353fee852971e0be220f53259bcc4 30440220410af3a66c6ebe0edb5f0543541f4ee7305a32bcfa5c0192b067c59795bfd0a402206e0931ad9b741246f7a527e0032cc828c1c27bfa3b0a347fd3b7a19d216e1960 3046022100ce8e98a33d7567e0ccbd236e763be44024431b4c6d2bd2b271fec022e3df1b29022100c94e7d348b6410502226592367fb5d5a53ed8de0156a0027ef5b84f414d528d6 3045022100b64104ba07221d0755a1d20a87496694bc9a005aa94c24c8a552e00b66cf923f02204a414615db9d3b7d425be21d44eecf7af095406ae59a00dd2dd6e6e0997372bd 30460221008b43f276b64791de953f689b95d8d57dfbb3304f5413ace649b661967fb94740022100c6ea5da8ec73329f300fb1c35932f381b8154cdd544db1650a6e854575a73f1e 3046022100c617578064b707979b91b0fcf3b70945666577ac516838e25a9bef884420a4a6022100c4970e5a7e080ed67c88dc1853e726032bac5eb3189a030120a283b766923af2 304402204c9e14d890fdd950e10dbb5eed250aff0a3fb988cce53f7567fd58ff7861d1ba02200b02327c283ebd6202cfb5ee357128fa8d8d7fea8d46fc7e44e65c30cd182e2d 3046022100aec39bb476afb829b033cd1bdd541eeff17799af6146e6778774b46c5f629e60022100d4a517857f82873a520c77ea72ecfadd3709460fc3a296d5cfba47f14976ad6c 304502204cb8663aaf94de57de3bb335da5544f001174169f0dead6441c82161a60944d6022100b3abe1a508083b5d0d8bbf17318e34396b51c8256aa2fd1f44feaebea30fc9c5 304602210085312384fa285c576452ac3d48afcfd1e5d4a93369d46275b52c138da449b663022100df599d5656504e5e223ac180cc41f97f0cbbe8f1b469ce1fd12af90d7311ce7e 3045022044b650006cc2deb9633347c9fd2dba3681b0669fd788b4ba4a2fe17d3d2ab1d8022100ca1cdfa2c7b6c4861dbabd9921579d5ae95065a0880a60f57daeae159b9ec865 304402201fbd20ade74fdf1c6c75c1e2c6fba154f67bf51498cfeb3487ea9fd4c776e99402203df8e360f5860404c31a7c1001f0de6c9e008db7cc0c5855f5954939919f8d44 304402204b80d82e5cd889714c10c12c970390d92014178a1af1c2addfefc4818b59e3a8022024e6f8e95fcf10ded45a502633b26fe29b9b1f691598d8b0d7972bb75b3b4820 3045022100a302bf7d25e95cd889d66fa0aec00ab1675cae163f59ce56263c2f1ac85e8abe02202af06e2d670dc8d671c1f90a5c3638a6263e13064871208fa17d1e985d62cb96 30460221009ef7886d4f67ad1c9d81e5e2ebc67969d9ce04fcacf68909d77a7137d4973227022100e8de442d98651aa2d4453ded2235ae94e5037c2a6094c7eabb978934bb923a85 3044022021a9d0407b41e2b224ec59aa07a725b9b27cf60dffc6bf395f478cfbdd4175a8022029093e2a9361cd9d6ebaf12730df384a0e5076b6952037cedf866556143087b7 30460221009087120a8aa63f18c73570918702ffb00c49af6e06cfec45f2b072677ca8e087022100fe6685334b4ced59303a5f14eff2d0efb669f5d9ee608108c18b3cbe2b230e8a 30440220636c13f27ec5b0bd9d0707d35919b7a1433d0c79529cd39c8997f2cc78d645b302207a63240b1ed25a2035705d9924ae9ac5fbf11bd29f6569044f8d6c14f8cb5b56 304402207a5e0bdec583c0b406990c4201739305a0a2fcb0302bc2bea6bec4894c8140600220086e5a5cff39e2cc070963546d3945ec2ac60a813b4503affccf8b8b1aba4394 3045022100c86d4bd6b6b1108ed40c2a018e5b29b1366635b4125d94ff2f445313139a0140022001e44a7858e65a6dba5d16dc837d5890b99faeec5f217c26f0288fc93165ecef 30450220497ee1f29f358b673ff615fd748f86ce18565f4ead8ba68704c0a46eb28bb0c9022100d29f75609e02cd5ed0734def5331c139584fbc136b07f313b907cc5400224de3 3045022028dcd16e55d5afbc43bf961922012a607c6d989618c756d1d3a647cfa4ab4dcb022100df77d5c0a4713cfee38d9e245e57619361c935ef7d8cc830da7bc0270bf70e63 3044022030a81dadee0e66eb73173b7ab2ceca642fccbb86540ed689b9fef009e4a4100402201142a41ede397701207e7df4d618042f73a2fd4709ed8030a49005c7cd6f7aee 3046022100994f3318e844be46c3b9499897ef24da1ff0738dc25c3ce9500ee3bffbfc42d5022100ba0d2b33cc45e1ffb2bc59510f63d7d64ccd5729c029815812073a6c4ae25a60 304502205df0a807b05d5f5d394c755f023fe374e6f0a64528c5eb8e6306f49fde4d1cc6022100ed5ba75721bb1efa4bfe0a8f4e5b93477af89e07deb800bf821ea96207aba534 3045022100ed84f6738b9ad377c49bd23821a5d296f646960b59739493eefcb3308be1fe7002201b728d98f062c20b4d94454c8284832d4f9784520fba9ccecbe7d353a45d0cb4 3046022100ec3f93d2c43416d37e0b4151b1b52685f5fe630250a1976a8b2ad254c5a8ac6c0221008dc3b8f8a19822e4d8b29af75ef95f647b886d242a31bc711437cc746182c6ad 30440220289ce0a78aba729bd01cbdda36d0f4fe4b59bf35477bd10c509d981f9868801902203623d4fb41388131941c27d768a11197e300bc4e74642519c6af4688d28f2ff2 3046022100c0d37cf1f2b37a6ead5af659eb5191ebcf4294d7d5ede62b5e9300ab837bad6b022100ff38ec0a3188870ff2678d5b5fdb5627e01e4ded278e7c59ac974e9174dea6c4 30450220142eeab4d21090bcef00a6107cc0ab78ab42d0a8a133748fe15ac3d0debefeae022100bff978bedb3784cc87ae8d8b05eef82d7a64265f34e9d6d485f7ceede20ed877 3044022072ff4438a11292064fd46529b7037f196780896b2969828ac108d9086e9f41cf02201201d9fa83a4e422879bebf780d782b5bee3bcf120391831f06fa69181b06e76 3046022100e3dbaa3774d900de8752918671b32472f7c04005a4bbf339b930c3c210c01e1d022100a402256d2b07d14e3ccc18d98ed345df435315c343d06cbac24d00cf7033553a 30450220334b69d9abec9443f99c35e28226f93b230bc0e5a0d37e2181534aa9f4769c83022100add21e803c9756a26aebfeae7721cac448a060b7b432dddd6bfcbd4d5446053d 3046022100c3ba61787249e44c51703b32fa00e7ff4f860eb8f09687000bc5826a8bfb6ab4022100db2bfd72327d3bd1512154a816d0e0f51acc969914aee919e79be05567f58371 3046022100aaf0e775595ac050d8e8c3cfe9490960f27bc29c51f1fcdae5873892537107e1022100922ae8474bf5eba2c7c28d19138f2db50718b410e06711107eeb78fce1c211da 3045022068767c96bf866f2ca9f5b6c1c1afed8a453181b39412d41480bace6403a216a70221009bec945a860c6f2e521e8c4550013f0ea5bb303e95a986216913cb87fbe657dc 304502205a8069357e03466db51af0930b47454975db3cbb85242a3243405a76ac918ed1022100e658cf800edf3fbd72fe3969aa2de3b5e75c0ee97bdf132e5d002cd4a8345ffb 3046022100cfd6a3356e03f0a07050cd9bb616ffe49869d944c450cda911b3cf92302954f1022100b1c49f911d5900eb6b934eda655774210ca58f6ee9f638af4d5fc149ead49eba 3046022100a90aac8c388ac7200f47fc6a9a25893cd6a7530f77410e50171f15cc3dfd6f21022100b295ef2f0f9078f8068fecc879e91f48b88d742b4597380dd47897c1311a58c1 3045022100ba95e9af6d0ab5ae2e4e4903b318c36e3b06332dd2e0d38162dedb80e0f8e28402203b86e4201d6b038cdb64bd9f83f4b7d38d93938811f4e5cd1399fae6cecf3167 30460221008189022bb44473549601e16f11c17b183616034202544a9e3a4a196268e013d1022100caad0a8c34b8cea4f6bb4c445c6ee81265d9e9e670368931d204788e15a06d96 304502207280b894e68a1e6f47e6e1488be7db17257cf77968ce24a1038632f364790d48022100c2fb1f96346b19c742c37b68b86b93d213e122f197178293ac676e1120df96fc 3045022100b069677f95a696e1fc1abdb52f5d8597d946b76fb5e266630f154cfe560e5deb022052f7b0c48a37750bcf9eadfa2ad03c013c8b90c2abc9b988bb8a9d6d6c227244 30440220122ab2589b16420713c82443a1f71b77eff25698a1f5d2df9d6339ae69a45fff02207289772ad349b01c633a15f4bcdedd81d38065fff3aaafd69373932430fadc89 30450221009f5db4428eb40c2b84b730417aaf20c28df6a5085a5bafc11393238384ebaa8f02207e709fb74465cfe4fea32e936d2f334128e4f0c8a2febca6d5f2eb1ee807f930 30440220346400eaef0f3d8d10c5a7661bebbd1b515b7cac8ea70ffdb7517cb2fe9d7a530220543dad51b27af6f5f81e1bdb60b22776d60fe24acb81610a67eae1801050bb01 3045022100f06d8cce6a65b81f36fc00d87073a5735b42d6d4c2b4ca5e704c8a804faf8f890220355ae47de2dac4d7d9945d580a82e6443790bde060391ddf5353e92f2e883399 3045022100fbb89ae5ac1c048cfa302cea48e74aed401b56a10ebcff7af16529ae85c9b83c02205c0e631a0d2c5916726ca8ab325f5260d2a4cd230bb5a5ab69da7d75d9f71860 304502201ccf2d54e20cd3ea9fd9499632d7671a4ee572dbbf09ad239b6689d976f7ef17022100c1cfa96d12970803ad38a9a59f3540f8caa4b6f0c29e41e5a1188b8d26830d96 3046022100c0eafb496961ff887bfb07ee15b3148233b073aef517e01ebc63662eb161c5ea022100ffd1848611c5e2f1e7701c4460f1990f6574d5a143c2e35ee7c642ba7592b164 30460221009276fbc3711f88619a812c1b61b2b28d19348ece0858e4c8728792413d2012da022100f2ae68d7b5a9bd54dc31a833094432e83eeabd9598927c042f71f97571edcd77 304402207198ab0bbd79334397c69f66d1bac540fe42e4712a2fa926526dca66524cbd2702206e073927fcb1674a3e6afe8ecfa62c237a57528a9fe2678c13023741f962b86a 3045022048a8f46fcc86650e81ece8bf511c2d5999106e0c99974a05d2c5fd65c6c36425022100bfe6985e7395faed1059cfaf38ce68c9051fbdaa384557aab608d668ba775446 30440220211a95017d004602a1f042dbe1bd2fd57c0a140a306dae9fc3e9c587bef3d568022073cd7105ecabf47779b8dbab39a0fb73619020d6c9a5dd0e26ad589e3b1efb6b 3046022100c5e54f164afa68f066bdfd3923ab8b29b5a5b5305aeca61860e5281d831fa137022100c89915e4ff4a40b99758b9d86429f3c317f94f4e8ca3a3184daab299e4f3a1ae 304502205cbbad3d367b1b6e09f7654ac50a353407c29dccef2b0024e5de11f02694df79022100c4a500142377c4638337466689f2971f33afb3f3282dddf1acbfb2c8e6126208 3044022025eac245d7f331f7cdfdd56ef0e4806013925e9c5321e57ce16d54db026c0cc8022027c7df2fa31faaa28d12a701ca37791f29eb926739e74c702cebe5e86eeccdc8 3045022026c8c5436b27186638f4272bdfd87e334ec882d526aa84c573de00ef1557c9e5022100ff3b7c2353876e11e09ef22709135342066f4ab9d1175e5a582e846ad07cb07a 3045022100bbf04f461822e6a8c6f3c826c9ff307f374f4b04ef78398fb9a28ea72aded64d02200f16d913e231db3631245de3b7b07c709ee215dd94d6bb1086fe69635ea4d126 304502210092ebbd403ae46ca419a8edc789acf727623dc47a2b6342767024a0ec1fdee93002207ba91518984d5e6c7178d10585b3a2c657880b461160db91f73e9ff09b18af4b 304402206a70d33f8837ecada62a6beb87057f477c5effd32277fbf9b0f447c162d474db02206c6d366e061273703346dafda8b647d847041de49280209f631875dbecd44eb9 3046022100e03e67c658cb1b5f9b8f67b939a9001d4d9f47aa87153bae506555906c103536022100a7a3b95f853b070f50a3278cef6a5bdd3ac5407566b521ce9058e1f6c498646d 3045022067b4f428743965ae068a87ad72c5e5130389e4eefb74d56d746027013f6d0c1b022100fe8befaea549026ae5aca95354d8df392be41a83293983a8058a94cb1b00d5cb 3046022100b490cd8387a5968c4270108e2c94ad86218d5643276064eb9255949691eae23a022100b923678acbb796ad710daa3bc793341766a26150b6f353c0a3a8e89c150dd8ea 304502205736f0b20d7aafca8e515070dce2a0f59d413dd5393bfaf04e4f90a288d0875502210096b5207c0cd91eb9d8a663f622ee52a3f909b96fd5aaed27ff3724ebcd0b125a 3045022100c5400b92ec1214fc64b7b1374d7f804fcfec535f089f9259565c437526eab3cc022014dd1b0d6b8f5ae084fef830ee2ba57d12d642d464d7a6a4c73b3b5d2d27fc20 304502207a7c8bd9334b22236909201e1ff9074c048ef4a33af59070e78b0965866c639f022100c29a8fc2e8863cd8bd6ead746b1e5bae86eb44c3003d0dbb8ecd9359d714ec8c 3045022100cb7d1ee31d7df31e235ee3c76219d941d8e4b811edcd8fbc0aa480c0f22b9c3b02207521f538656464c076e5b965cccbba155f0c3150beab0ac1917aba3c27435170 3045022100ed54a9be261a9126052ef814727a3e43f9e1bb21bbee11dee6eaf5c07106d8f1022016a4dcea3fcd225ca237c992e279049d92e9cc02faebf4c3a1f5d040854a4fe4 3045022014851051b741494ea10c634155588be12074f876079672f61b1a8c8fb65535e302210082fb1965e12e9afae9b76d887fee896aac5e87e2e87fde00e9e465a9c2c0a255 3045022100e0aefd07d0bc4e4f38cf2e2a8d887da1fa3732f210cc98cf4b514d434a38314902201ebfdbb419c34c006aff93bc38cc2803bd9be63f339acaa3399588e6ca17a420 3046022100ddfc5f78f99398bc58b93ab6b3f0dec9f928c705117339b8d726bcb05295b0b4022100be7309368fae0602751b9f97816644f0ddd39370d8843aa1608d44b622a70cd1 3046022100e6d1de6a36dc70c3ffdc45ee86ea6c35a2785e03d9a794052280939ab4df95e4022100c4a2d5565e445d24005bcd2a7ca1e167fb50341c7a5cc5b81ad31b21c9aa47fe 3046022100c7733ba6e8e52a3aa0900e87cacce89088a89212cda5ae870167f4ae6153dfdd022100a9c0cbd6862cc7f9e36b6a8c822b046264022ea2ec9077b36f993780f2585395 3045022100c8b246e044f5699b6140a5c972de028fab3209ef10415e954098d63a2f525ddb02204049f952d0ef1f803c2f7ee3056ba4870238585542385909432cf222cc24253a 3046022100d07fbf94accbff0619a85d16dadc4239f9f3fce29e9cc58d2fd1feb57ba4ed43022100b8266c6d4e6400e99e49e5d9c652b8002d82adf95f597b6ca85a6db01afbe54b 304502204b6cd85eab94f8f4be781ae5a49d0ca25c3a89ee2e7183bd6d9d112c8d368afb022100d945c51b24191c35b6bc89d2f86aa4bfd86bfc6b31c7dae334524fc884e8784c 304502204b5aa4078d7aa55e2f3e0ed76b199c5f03145d0fb4bf9ff311da51fb166e27c6022100a45293f05300e3ce29c2d40f0c1a5b89128f2d3189fc8158b175da45146c03d5 3044022037660e5da1a404f6a085f5769db55b0eee3fdefb40b554dbb6925027eef82f3102202e94352317668bc66397453d667e23e77b065e7cc52133af6ad2fb7bcf9c3b07 3046022100fac8e9851a7e74f4c6672d89e37c60665e6596bca7fd43120237dad548a7380f022100ba4bc3ae68c25f380340639d03b71aabc40d43be3b6742e13a2cbfdeae53d2ca 3045022100bee6bb7f1b6c73b135ef778534c5ffca992f414d34519cf19f5ac4c65118f9a7022078607e3a6bcebe4935f4b70a444209fe7039330c8f24ff56d29011c9dea7a958 304502207655a85774b0c967a3ccdf59fa7da9db4ad9491241c32c4827684649d9218f5a022100ddf541a60da934ab8537f62ba489393c8dc09562910e3504daa1d2b13cad45a4 30440220561d3ba79161c3dc15f2ebb5252889ad8bcc5336d93bef1b8acfbd398dd29a0f0220215f5dbe3181be3d0543c5303c73843e900fec594922b197ffd2305517ddaf59 304402200bbec6e71c1f7a95ff5abe20f849060bd028e733d160352d5dd77eac704cfea202207bae3dab364462d66496155c66b9da9557dfdd16a1ac25100f770dd35c14f3fc 3045022100afc6260b1a3ecbfd6356be118badf857bc386810caa11cd567464c9c47a952df022072cf09e6ad096be99647deaec70b85fe23e28e1fe3e4245ff2af4cfe638d91f7 30440220522c9ad867c5cb8393c1284d3a4eaa90f20d240653bd0ea1f37da500e574274102203a04a2b446850b45b64aa364f47ede685e1b97fe33070d764259b7f57dba2afb 3046022100ca7a5342e1c884764aacbb04c52f06bbb309df3e7a7a881a389487c18eeaa773022100d1dd3d106b4cdff225bbfe6e19bfb905abfa525ef2ec72c5a1c582740ba76510 3045022100d6a27080bad72c4a8a9383f164ed9577340889c6e24033b523882664cf768c580220748ab610553392abf7fb55a40013b7c59e0b519dd4e4a1edb2c5c6ebcae539a1 3045022004e09bf36ae2fde941e23c84840b07802252b69aab158c1951ac06335016cb0b022100d5760555e7f9e00f4b9f3d30a7e54281b287a5c389b3feff1359de34e8a74a49 3046022100e01b2209c49f9c5ce6eef4004e779933f4123de9cd113ef88629e0d6da503d8f022100b6aafc63bfc8645bb0049fe818634c4a19e11d7fc21b5e444c6010c2da09885f 3045022100f76716a920736a5fc35505aee24ec2e3122c2d0cb32cd59349990a7aa5a786d502204a70dc95dba405fa24010e01b02a5b5b063a1cf9dfb576db52ab9cda98831444 30440220294b0b6a24192f171302f455ae71ebdece9f5e439b059dbe6adf1bf5bd5d272402200b41261d9079af63d34db2da6a86df05802991bab5081c198ac4a7ee188d3168 3045022100c212a5dcf688aaa813db41bb41f5719a8e17036733bfedffe0d5a546c09ef38502201e03a0040ee7731733cb1ca388b76bc60b359e35d7678820a7e924bb44402ca5 3045022100c5c76e858d0727def5114d4ee5f01419ba9ebc2548886802eec7e6b3be0a3c72022021d845195f8100bd9fb856dbcd39b11dc32b809bcbcebdcbc464d82a28208d68 304402202405c0c36672eb9241f23da791800c6ad41498e70d876e644571452a4f98e7ac022051b80b99f1cdee5ab9c3c2f91a8ac49f8e33312fff169c0e11e79a143cf492c7 304502202447bd613fd6a354908c8f23b586c9faca27182c1bf6b28a64123125cb7d7fcb022100d2bcf8f15af0a44d09dbb9ec602dbaa9e8990d8d8bd516d29011499aaf1875d1 3046022100ff6912120cc897059b257eefe8fd17ff926b8f55af1507c9c600b1d5ed0119f0022100dbc337abc9a43f59d60b67591c959872c523e7786368b1135db2198704f99e91 3046022100c73ae9fb476195dfa47ed0dfa5aeca53bc347c1a237dee6fd2c6f5c91ff2c3c5022100ba8ecfefdad7aaf7b84625762ef33480aa0d1c39c306e4570dd50f909dbc1eee 304402203d6869af503756d2b9301fd4084c53e61e7e6bbfade19f32b8f66272727bdcb5022030233e13adbf30388b4ffc567ae8ee22c0d299108f5ab38d2836824f69df9d39 3046022100940f88f9afa8fcd0c9964e042d46b10212692a208de18b4b4242f80903a56370022100f5ed0fff3e11b555c6424f02ec3f40db33f8910fdd2f415aec692f56e2b79dd3 3046022100a0a36b044d10dd030403a57b6a39e7af865e00d153254fcb401beac5de99ba29022100cf0f9e0390a25ddfad35d840d2be68ddb03ff088fc1ebd9050f2f495e7d4ab37 3046022100bb9963e03eae215d7272f691275e963c142d537f95cc6ab53d2ba449858e6f56022100cb3b8248f53077f9a301d069e8a41732623ec3372bc926ec0e9c8eebf6b331bb 3046022100b649c95015272ede698fc65e960837bbb2e6b9c0fcbfb1e4dc90c76553f52bf2022100c012b418db29a39677428bf76975381b4333fe19d31f2e0be398ac18d1a6e3bd 304502210088ad50a0792a4997ba9dc6825e2b4532a59df8778ccfa8027ff1d3354979b42402207bd897a15f34b9469e76476f3c9cf37fad447e4178fe16231e3ef383b74c4e5e 30440220638e176409a4d020eafa43c61ce5dcdf9dd00417abee22dd0b3303645419212f02201e746fd350adfd0ed7a6878837768066b605377095d4a2225bc13d54ab122746 304502210087afd3840c89da03154875df7958eb652ae47f064f62a54b7657ae9d5f7aa7dc02206868fc4edeff6ff4c1264c939ff75f3c3cd2f9bfeba187bac11fc1dea51e4864 3045022100d70ba7fbe661bae0d19d0313f6d7c0a0068f1c69989d86006fa677a9d0870723022009c9652a672d20d0dff5ec97ab33dfca048b861960b77e087eb58b7cb6578594 3044021f0b3ad14d7133eed9037998af25072e583ef08f1e1baab866a244395762bb67022100f58dc9037564fbd043da6fe2baaf7fda072d0b0a5a2ac6ee6d2957187234a2e0 304402207c431b7fdfa7e3fd05e9fb442819576d5cbb4ebf5a7656a6feaaba46c0ad3da402207fd8fe4d8100b1aa2c9570ff9aade31e1ad691089c6c0f0e2aacc4933db37bc0 304402201214dd5d7d21f1b2662188bb3493d698350bf1de7549b71eb339d23a9dfc56d0022062b66fe4f439d6521c8071ba529e3c1aeffc031c379ebe43446bc79fda88d6fc 30450220267dfe05db64aa4723b748806b7e1157daca3abdba9a2bcf02aba27049aa1f110221008dc677edbc79c59a96d748cd54edce4b82ed5cfda7c3ea542f659c2e31f6225e 304402204a386caa6b8134e8b08984bb832abed00bbe444b088bf80ee9166c81eb34e210022043b9e5eba41da2027ac6a989b8fd25af952896eae55ff1f873b4dbbf5678d3b4 304402206e765647639a0f81903d6b19e892065c890867c8ac549a2d1d13598f60b0469a02205a1de4f5055dcd10a92d72da8c43968d60570f1bf945e5811c8e64719d7d9cd2 30450220406ed2d7d1f533cbcf093d0f785b9fab8b83a61b6707bc8d8c0f2ae01fc054b8022100e5e0e697bff4dda84545dd62a98c05ae745648aa40f71150457db795c5231719 3046022100f949d99cc8962daf08b949438e9128386a735e2e4031846e2cfdbc6bf95af540022100e4063fb9b3c4cf9dee69449636e91376f558fed8040f0d07b7f1f4c5117df887 30450220705b4013f27d1e8837e84eff8919f9387692d31aecd5f0a9929690a042978580022100d06461b1fff0a7869cd2eee8c5d266fa2f1db5e6b91d11006609d51e964df067 3046022100b6cbdf8b60dcae956014cbd4a4d40f53e14bb5effa6a966eba1f3446e8a453a802210082ce82f43a56a8e14b93d683221f0898da310902c0dd744421f539255cad77e1 3044022029c2a234b369661e1a2db038bef200b2dcf1f866de5153be80b42673deb1a07e02204bcdadebc77bab4f475ada91885f9d5755306989afe1ea29422666df483dab90 30460221009e7dcfba093d931b9fb0315f1db4a1afd2d20cbeaa5c589b8781e918bd8c3ec2022100d0c692b75b4800aa0f3932b5459aa2cd13a834aa5bb882430340138885da43e2 304502206a45e1194625eab2e08a9b3974011042626c60c91ee4db2996c8cecb8279d291022100dfafd84f553adf7c6b189ba5ed4f545b9ef9b26b1f39b8afcaf4d48799212797 3045022043bc21967d73f90ce6b38337ed6c1dc6a7cce6569fb4f6fa3ec353b7d5a5254d022100d3ee458317091f95e72e36302b5df247cb64b2426ae9b229e2ce221f88d5360b 304402205505fe78c0c1b33027779557611a9d777d403d4160d48b3fd9c800e35accfe2202207e835ea2ad88ca1725df04dc258a3c7d307291b5dcc9cb66667f50e83fff77c6 304402204707a979ba6d0bce641e271520c1b8ad4b1b6cb4c3de6b5e165d5affcde4411b02205d12aceea63c4333d2b9fb6f2d67b6c94b9738da7ce1f58f1dccf87f3f92a303 304502203fd0b973b9d985fa67bd1e7dd1319e94bbcb563a92492364c5b326ccb66b8472022100ecd3960987725837632620e2860d617504e1bed926bc7b66061c12deb247b1af 3045022010d861d6e04e67915e5f540a539f11d1da0da2172176c7c5a4db3c132464b65c022100f8b42554fcc4ecc09cbb4f93ba290d4910f174d34a4c71817681740bbf810c93 3046022100ef71b187a9181f3a318b9144e25c2613625d823a1dcac5541f328e24788bede7022100e6bcf23246e31242ebeb605cbe4bb6f863bc25200ebbaebf465f5932bf9ce5a7 3046022100d6e5a55c82071bd5e93b5d5eda61f16c76b3d043bec39dcc19ee16e8eee4d5c4022100d113b232d0be4601337f7024ed93cea55acfeece73357622d5b397a6514d8f8e 30440220054cd225ddc57c8ee2fb39ac778bf840343b0072d379c7786e469c4f5f247d7202200c21c2024eb586691fe1b2db471fd4f6096ee5b178a71fe03c18e169af1f0e43 3045022100fe24e4754390f4cf4bdaa6e48ac452ec532d4d572cdd87ef4fa0bd2982206edc0220412064a51bd1c31088636ef6ff437bc81129dafcb8d269ec357872485b7b3ce6 3046022100eac40e558ef878ad0e7925dbcbfe22daaf30409c1db3a87bcad10aa05395b0a70221009401d2a88fc3f74ba8e37fd08d1db4824a464810fce477da68cb31300d24c726 3045022100f70a39f9ca018ac5010dcd51117db3b145262490cc23f90d69520ca768c016f1022010e74a7c8ba25dfe5e8ae30b7597662c01ac1e5a68da0ecfa3cf5707a0da11df 3046022100d428c3c7b90149920601e7daa7ef5b0e6b44bc3e0811f4aa4b6afdb36ad7837402210096a8883749a2379faba79862f65e28ee5632ff15932ebb0c67feb84946a9dc17 3045022100debe456f7a21a2ece5775ac40627e41c07b44d9b79ffbfdf96514ebf66e9c11a022051de927eb5e19e1eb53bca02c27df1b701e6228ee57f3efad94b87d4a5daf301 3045022069ff44cdaf87febdcb6f0684a5e3e604b257d48055c4829233c114ffbe929ebf022100f87da3ea159403cf8bfa02a6df18b194f9f8490a99c88ea92e4ea1e2c7cb7d30 30460221008ccc704b6acb68eee15b0521b7a9053f163ad4a8d859a58e51ebe599ac19fbd5022100d07dcb7476a57090d298446dd416d8b71880221aa432746d9a71c27d78cd49fa 3044022053f1438a6b2c26ba4c09f43710d1fd18b17b27c4b52264a6ff5813f82b7a267502202d138d126d41a3ca7433b42fc1911085c82e80be04b4d712b0c16de5335dc4e8 3045022062d92030e4f94a9e2b534649819c90f1701e7c80a267c8dbc939877a9892a303022100a7dbb8bf5a4d192b5f6952e33ee23a2d5b8cdedaa3dbd2622f8b23487eabdb0c 3045022021cdd0ec3ec1e8b5a8c4cedf9f9425d8de6498965c6cf68318dbc822a27e18890221009b54abe801de919004297df53f7a4aff9a4ad9e98c38d15126cae1218a3d94dc 3045022040fa872c8c366c82a7fed49b519a81521edb2ecb48bfce1fff5f0c50add05aed022100faeba40ce6f86c28dd6e1ab54227c095c8ee2b6b2006112500d508b15f996e29 30450221009c535f9fb0831cfd5ca8592fad2ec7c312a3b5b8ae434bf632a9aadf9e0f43d902207846481842b0a7de679ab4fe6635f2cdbecc9414e33e76a20d7e887d3a62d53a 304502201e71fc9184e11a5e2fab1ddb28fb91e25eaa45ed6b7c196a22e3fb15837e65e002210085c591955490e4b44f57f9cbc7b514e324b744577f7f0d8b7616066935eaded1 304502206986840e55061c6116ed34af997a324b91c2a77fb39a91348d9f4e636350b0de022100948e22e79e75c04e6a5e5353f23d43341dace6fccc817df574aa075baae59071 3045022100c51f89d268cef0e7cfd76f442e18a20e43935ea5e62ec107ccca2c8c4314a56602200ac4e81e6895d03e6bbebf7ba9a320ca16634c1ffdaf48617e0636b57773606a 30450220613b6f5543a0605fba5f2d30d3b53bd1796780e0971a59ac40d010a77a945f12022100ebbdd42c849197f92182e06b6fb065ced882b6c8842eb0939fe856dd1d88d868 3046022100af73eb7cb4f90598f34aad53cee1e8770f3dd832e6544a325bb9f1116437fa36022100d25461f83e4d694c902ed3aa71bd7b1b99d1987448446e8f606af6376c480983 3046022100856ee57137e8a92a625e51fcd6ca24d7afa4ee0b6e05de0f55db0c4dfcce47ab022100b1320bf248acb66b1bc1380a7873f7167b863b721d9b22feb6512e97d6d1cd83 3046022100c29f2c610fccd3103d4a98c76d4122749454071f657ad903e9f7a606d44ac5a2022100b8a3e1a3aa68a9207a82ecce36db125e329d78d78d7ee6014ecc7f9649f15e49 3045022028e3ae07edd606bbedad76e545e7c88380b8324613e6ce4b8739c12e6c54362c022100ff534b2081cbcdedff59f15563e306475c709c7bb9fe2f38d5d90d6f17bfffd8 3046022100a74434ad709b49f5b2c6d7814929220e7b2c240c432865e89a157ee9b334c8c9022100e05ac3190b61e52672b4baf183bdea256093c65d1a369407a64ea7728a862259 3044022024ec1c2e2edf6c80b0e5abc78595b28056a22ace082af675320a0cc484feed0f02203b7c6931d370fcd951e6c0cfc73c8004529a18b93dba13db62b07e43779c8e56 304502200d38ea44c4fb90b190c5f7d7f0b19e49c31fb6b69e7e046b493ead9de1caa7fe022100d528b1b900ffd6719627ae13af903aceaf92db7ce93b83c1a40c9da04bb20963 3046022100c78a3b316949f81129845b2bbdbc36d1bb542adbad2e982223ba4ca93935fc6c022100feec42b83279caeaa2ee254682d917613795659d0f466a3892392844fa1fd39c 304502203fd3e3fa3fba8c170bdf5a25ca73dc1e22f59945307264bf8aa25fead11da4cd02210084f0812e3ff4d46c5428e25da2410ef5a8290de6fc5dc63ee901c74ee1522232 3045022100db4405193620ada0643ffacaf03bb51448b941979c5a67c6efc3893bb7c09050022006327570f1e6d49ca0154b8231cd4df347eedf895c205f91c6e532e4b96e6d76 30440220552f60209ef4065aa096a75377701cd1ea2a34255be103f724c77b783251219702204dba5a8947328f5aa107d8e5b3c0ddc6a8ef54531a383316f9ee8546012ce516 3045022100cef31c769778f8d353530b4dc94dff1df43d1bd1fa29c75eaafc58859dec9927022017bb78d0f92578ce473d171c7be206a703fb808f73da5d3cf830ddf7a3bf7e3a 304502206332238c22a3f0015538ca33880dc4a46429650b0f20b66456f6cc9afd8e602a022100a49a8701f181da60bb62463278a682259082e1c0bff3b18d728a38995a013705 3044022016270a5a8e326c9a83af37bca58be9fb24b347fa1fb10e55b53e27c9cef7ed6f02206a5452a8f676294869602853ec53ad201422a04680235466df846ce9d2af8860 3046022100802702e1bce0bb43d356b002e646e7f0a2c704585e7b3107cdd35ff714d43812022100e5548b596447f211cc77bcdf65a72c5a2d00485bc88164bd550675e59a26c57b 304502207bfc55ab04d1defc6c1b09091bf26060ae25df17f3ecb677b15b05f82a5621dd022100ff2699f0277ac3a8fabf4c21b92749c06a2fa1a95bab9c028cf0b10894ef061d 30450220494b72b4629473f9532430c13fdec2da702cafdadda6cabbd4f519e8b69bbfc8022100c0d2e20901165520059ac062b745b2be32ea16d0bec018ff44eaacf5cdfe9dc9 3045022100e8360a26f014a33734ad366d99697af955c4b96156d6907488c69459c371e6bd022057fa2c2e62268b2b2885100183f2cd445f094ee6290e6a7e1eb209fd8e9a9a9e 30460221008247d5d0d292ea89dd927d284eb923da82586c448937aa150312262ae1ed4b52022100ccc927b6f362aa90bc14585aa240092f45718c4d1c2536e1861e1b68d006c116 30460221008e51e013fedff4840c66542d2502f62b8f4524ccb0d055eb89985d240a0f4b1002210081a3a14844c7d77706e3c281be9dd594ed67dd91e91519a74ba4f5d3db9708ff 3044022018b5ee2ab38de61e679999597887cbb5eaacfb4049f8a1c5005ae567a9b71c5f022068e5dcc339af3b2cedbc876149422ff87dc2eb5b857b98bc7f3514a2b7eece05 3046022100dc987d5d2c42c792dd3bcb1a19bb4c9973d1548aaecc062df6029fbf22e32298022100a52fa0da4b26b35f7e374818836314efee77bb5e790981c706cb3e8eded75e88 3046022100a5bcb4295ab05615e73470fe3dca1f1dc824e9a7da60dc03941c0257b02ed532022100bf62234f402e2562eb716262a667c4e4f5d3bb02e5c6e5e27304135daa04c9a5 3045022100b2194b070778c2d10e61445d118b7b40ef8aa772007e9aae6e90a07e682b4138022014abcb5e232cd7a150041bf4e25a469867d764b16c0216afbd7cd905d4a6431e 3045022100bffbed86c781b9a26679b031e48305a3fb53ee812d09d11dfaf16979418ff96302205170e9dd14ca77e93d1e8a3e98ae11b67bd4512463bcf578b760d143d776e003 3046022100ec15a65f44e4a965e8b6df6f7477e9c255debc674b75a1e61101b311f196f109022100ff03b7eb85c13f9227d21ae4f588cb216cc8de2c29cbcf6228d7bed9cb2b8a78 3045022059759b009d6b39641331525d57d3a05918f55854f01099adf3b255f09379c9d202210087c96a590cba384889fb61278ef80efda48a3f1421e66074e8861456e48106fc 30450220646d2cdf1d2b13322f457be03ddc0d802c4dc8959c222eb2df0be23c177164d60221008e9255c68a32b354f12f326edcd3abfa54819d6e5181d4905a221dceed74ef7e 304402206ea4571e38d7c88caba48b610a86b3f91423ed1ea0481c19a16515f7d4c7744d02207aa600ccef8a8ee8a75700af93f29e84397df888ba86e18716f5e13475aa36bc 30450220128dcfe4d802becce46054f7d3f772ebad2c5c7d17c9f5195df310fdf5c6835c022100980955ea95e81a169c6cca516c0ce5fc7c8ef2a06d7556cdb410971720c118cf 3045022100a6553f65217932326e8b541b31732d3b4b0deef7fa6dc258ed8ca1f4e91fa06a02203038a0185065dfb3fe780292bddd88ac243c5b8083678f775a87785ef319e1fe 30450221009377630e3164e72756b6668e377f17788a43ca602afa1c80e4c8a09a334f99e102201372464c78094c20e660084a296d705b09bab6938e8d45d89a1ebc4936c25641 304402206e6246126abbce056f5e415cf6235d4aa5303d52c964cc36aa18d0b7ff821453022041d81aa0c491d14da2f7903fb7c2f2a67c427d73c45c4bcedbe23f901270f447 3045022068154bd481ad76d76dc7f65573fe68dc185801f303f875c10ea5deffef416b45022100a96b18ef7f973e3295228337c021b624cd652fc5e864081d9d85677e219121d9 3045022054446b06a5953767841513f9f6a240752dd447d1780e3e79ae16141ac060de92022100e2ec55e7f06528e2c2225d2ce6fc93dcca9be09639756dbb17f0b3fb5dfc29ff 3046022100ff513fcc8ccfe13fa8d2d9b3e7f0fd4c50b33ba9da50de7137a3c5a765e433640221009bed7405fd9b51aa4f30774732a148db55e7daf6ab962dc49d0579b4111682b5 3045022069713f51db1df49294892b3b4135ee37d05a83d805da9221ee5677fd4c376cd3022100d003c4606e129e899aef47d6b835548391eae2b2b109afa3e348cab0c3f056ca 30450221009a997e3e47c23d32669016267826dd6531d361099bc489c106b42e4503303df202205f31276669509a1cb8f5a11124211e12b31ecc20d2adac4bded16155058b4316 304602210091ad44fe441eccfe5edec7de9ea159d59aeaa0ac4256ede95dc7da66d312ba4f0221009983169adc0556710208245125b5cb0ca8571dbedfbd185d60813976fd08652c 3045022100e66cc1680c6d0e74e3fe133f1889a6560d3195dc7dd438a1084128ad4520650e02205f0449e8b1709a9028112febdc7aac39103ee83decbf678c05af6f6e5960e0ac 304502201e5cceee52457ac01f0b773c93d5f5af36286d1d795344976c0d4765a9993326022100cdb81f682391c8dd0c813313f14222b2c14228c764fac7b4699a15b423b682ad 304502210081caa876f8cb6e0dcd80dcdf45c2ba22d07caa69c0966c595e0057c2571905f102200ea2ff99d8dba6e6bed8d6b83b21543df23dfd39cf9ad763d8330ebf1b743799 3045022018c5573a640fe48d8997fb21ef894ae5ba0efbaee19367cf1f49b6203ba3503a022100a65aaa3b0ee57e9f3042cba48eb6fdef1581e64cc9122ec71a38f1793e8bd1f2 3045022100fb2d6087b57c9d9d10d2926efc252c1ad9b12f2cdc95f97bf83c951afafda489022077a2d453557bde652717c8d3c613d1699124452bd22b999ee24d1c92adea9aa1 3044022069461e981af3f20e0238cef66d9e2e0b662e6ab38d7ee03acf26ac39b475870c02202788f7aff8b83a5d537b8806ba64875e145cc410a273bb644fd29b0abab97cf0 3045022050d17a88f1c2d3e9df9e0a5c5f4a216f36046632afb3f7302110812d8ab171c6022100f0db168cac37f57525997ab716897d004388e58bd921e1f6555b471c56d76d9f 304302202147aa1ebdf9564de3b10d648e7ae5d6f04cf06a1a29b1d70d669cb7d99846ab021f785be4490ea9ddc8ea94ae3c9fbc5d692c7fdc9a850a6f5766601d77a39f4e 304502202bb6559fa03b1d39e81040afc0ac1d1457b8650c8e355cad4a0b856ed36a982b022100d9792a7a9dca42911db504fba71cdb7c4e9aa69623b295098b586712ce4ae0ca 304402206248f60ce162d7318c951f0499e1cbf423fadc9a3ebc551517fc6999ef762b3402205e3bcbfd9288955fd3f19ba548940ff2b97614eddce5c63a84b80718ea0867d8 3046022100d615b29a4a6a09e2725f90630835afe46649267a30ec43c79062a0a4dab7ee14022100cf9e8d5e14d870834eace564819d6430a5f3a12719ccca8dda77f4f77f8d1494 3045022100b1a96caf3ae550e3bfcdc668b92aadfda9abb0f8f8dce921145f7f4e85026c20022054e5572f86d3d9b6b1d545ba41e9d66342738ca804a1239180a4c9416605504f 304402203789c357f91d7e9d5f8b15744ca77e35ddab83193163495477886d49ead974b80220574169beba458cf263b5153e7237248b3d23594c60007b71267a6db5e34ebf7e 3045022100a3e901211f8f8a6f7073765d200fab9a791aafbed5fec07029f87548c6f176870220369b6f28f6b8694d9ad7ec7afd7ce2c5b930654aa3519d8c345f3fd03d802c50 3045022100aef5f68fde9a1c770ace4520a43655da45f2a394eaa052a875de8a26b0a3033302206252193d003f5f267ebcda9a8502329e7d4634a80e661ff62e31426bdc46bdcd 30460221008800a0bad6f5fe294eac9ba619ec5598ad8b95de53f98063924e9d4d2e4c545b022100c0a54801ecd6f22a7bdff5e83026705886e9b45f6467d8180a72afecafb514ce 3045022100bcbfa43c2f44d43d5b42c7ad985131e698e3a88272cf9a9c7c6c15090711b13c02203a04f14712421706e0b2490519648b79355ac22469aa4941fe0e89070be27f44 30440220189978cc748226d3953887e53883dff0b21604422bd547987f4292f67da05ef8022030a3dbc1c441c33cc4e6e22af3f931afdf73d93215acc2258f48959c0c24e216 30450220512c5f4ef1cd96ec1b11ba3f2be37a445c4e193f1104c2f19e290d92999db999022100f326359a16214ed1a175c4c65d0d77e00c0d1c2315417b0a1ace1a110b431a3c 30450220560372fa66244f41754ff0f3985313159f187bf96c5fc375c614ee23bf491cdc022100a1e2f04732c1d7c5aceba46346819500db1dfe97891ee2d797d3347c98ae5971 304502210080950649f10fceee2d35652490145cd775e1150a0eb983a3f437df2123fedb4302201d7665d6ebe8e0a940570e119ec12ac747499d213f2974998a5e7472e8b54d71 30440220455273f96d6c06d47f757bc9b7cbd1d4367454624cfa62cba13221b629bc178a02203f4c62443faad89cb4aa83889b1c357188444755c6597904e34591517ed567db 3045022007ac2c4d114e6dddfb49a4d0548baaa20d58f6819ff95c6002e15c2c96633ecd022100d85827806b60e4f97b86727b7e315312adcdd9043793d18499bc2c9a0959c1cb 3046022100814d795d7a967252102530a786fe6c579eadd09cd5df686a7ca4173885a86266022100dfbc9ed4d761560a56aaa7bdd076d4e6d22e122f1bf6edf811b6efc38089ef1e 30450221009c9b8c8f58b6f47a635b28e14e51d5bf03c8e6f1f5cc093f56aaae57b84c20d90220775b73403143692cdf5d0a8747c4d24a5621e8e048ce6d85b871bf2ca4e83acd 304502200e70ee46cf4504646110e143ee410ef80f11b16b693d3ef0c4b806e737e8fc87022100e3d9f65b6ac12bae8a897d5c4e5a82d362d6e8f864f368a6ca5ae886563f0832 3045022100aed0e64dae614c97ef5ffd4865d49ec92510dcb047e256e49281f2f9c9a650b50220160aa7ccff0be0be8458eb96c8251392e93545150f109fb44e1fa8080db945c2 304502201d473f8a9add35c1d0ed2fd38d6598ffb43061c469396bd76e9fe7b9c71e1188022100a741e27d503c2395eb54bb9e6e4cd9e31ad7736f80a56cfb11fdb8b07f1bc458 3046022100a73977d58bc6cf2b598492aacbd903bb1eaf2c6f9f10d66299305d73f602be6f022100a2e514c3a0692c09246e3d546c42a8048824e06da8f721351bff7f0c4a3513b8 30440220555cebe5a90a100f1cb2d7ac2e660d5616af62445757379145b4ebfef3fa1b94022078ded9b9dcc0c41c0e766024d77f3e5b165434ada432dd22aeb81ee2f2e179bf 3045022100eef154896ff8d5f94549b17d7fd0f5efd85beeb1f2288c51c6720983d4e87c49022072f90f396bcde125581508e5653e993cb86082c39f55154696d1a44b1f018fe5 3045022100a1ce9288e48f9b1d462fdaf28bc782b72b62e2d23e547985b53f534c2807717602200456a60bdfbae675076a5952d4a6c04bcd9e877f341348bbfa3fd339d53734ff 3046022100cc609d605d5685f8dcef8f9a7292ec54630e49bfddabbb71e17a2352f45c5a23022100c17b2dd5625de47672621279c4d3076c8e63995619bf95a0eabf81c352d026a4 30450221009afc9c5145bc7e514cbbc007cac25224258ba9cea98eee4244b214b8590f6ea8022010a86dd7ead97e03ce7a398a6392dd4764321109d83956d73ac99224aa7f5585 30450220519b9aae7f69597aa8970d9f8246e90d75f6490a4d8c50c7973461073cee83ed022100f2321f15a7b5cf7b7cc82c37cf1c273ed489543f4fa4e708706e29237699c916 3045022100e1d6108c6efe0b48018ab2871913395a6abdc22ef4ceb139954479c0d4cbcb3d022009e3446803b910b7e724906d72501f777ae67049032cdb55d1acdc24aa4466a7 30450220458ad8ea7de809794cb8ba65126d860c5e8e27e4df0f180d1abb0b4743bab6f40221009fc504ddd9f87f9297afb10b1b3cb55853258388126d0f64ec17bd5859f94d5c 304502201f0bef88288ebca283668ee3ac38952f087c0f0a1347a4d3cb2a8fcf8600c534022100fa3a875df1ff1cbb747756b08cb00f8fd23079bc6d746910006a7cef0f3b8f34 3046022100f2733e2d88b207306288c09542812e54a3d66a644aad43c02c47f15e5b354951022100b6120994ff63408d1a6e4cface91365fe33494b01339c933b64ed4d4654ba7cf 3046022100e6e2114495ec66ecb633ecd22f9d4d88f342e8376f56d9a3445692db5ff4ed14022100feb0efaa258a1dd532423c444b1d1dddfd2c4224a4c7de9afc1940283979c1a6 3044022028f2e56e95d0b344f7c24007ab4f12abed524203cb68289abfd6b3761659bd4902200ef5b77ed960e9ad9b043102439a7fc6098a8063ec0967aaca6daefabee9ae29 3045022015e29967062ecb878c34ae96c58b33db4311fc352af21d27f2b8ff0643c76898022100edc0dbf4a6c7e1c7eec4d8381bfc6f3c8442ad0a99eb57ae73651f3f2b090044 3046022100a66a8910ac85b427c3518d6fba6692b91227aa68c82d66264750b358ee0f39bd022100f9be1dcf333bfa609017bd40947ac38c86f78cc2d91198f15e472b7a26d58719 3044022040486ebec0b78f781f583619ebc8589e7fc699d2b23519940327381f06227fce0220788c4ed7570d31f622f29efb3f6c6d25a5e649a80aa935beb44424beff57f846 3046022100fb128e995be121f158d639e4dc9357742e91f9e8a69bb7d3353886f746788b7c022100bfcb31bde373bcb2732103d9aace2c76c43146a6727e87de8667eb6d87198003 304402207a2cfa3b9d62eac74dd1d2134c13b9498efc420b7cab08868f8abf0e661fa2ea02202658cd96fb3eec0a59746f7f7cc5f709c3fd775c88657f7e312aaaff98829844 30440220085ffb59f0e86cc5eb22840ddd6c8510ede6411a62312f4895e9e4f6d362744002202c047b186942bb37fe0ebe1f6831ee8dfdacd9452d85aaab417cade6f212dd95 30460221009a0dcc81608549030823ffc524501bec84bd3a961ea9d81921924471999377dc022100e5a8ffa8308ad7ff3392464d201f5bab449242e875b9553339b9e8ceeb112b78 30450221008f9ed24d1908df60cde747a7a83ba7f565a4d31c06a8c73c00be3f69a92dd7be02205b9622ae161daf12c25a2661ef5a02ff09d20fe30e1f3131b446b55678558e4b 30450221009d8769c7def02e129dd64801ea90d3d37ee53c7a73d7b064d5dade67adc110cb022020aeb6b260cc5801a4d0b7412979689036bcf6c8d7f5428f029d46758d9e84e4 3046022100d1feed5e88f1ba3869d3d8bb4b2726e1975970fc30b8078d244044f2875cd14f022100a038c24e7f5bd847289af1ea6f1a30c21331b0a7aa30e906b913f565b39b4673 30440220584f2a8bc953c55315d8aab4ce96d23ceb2abab67464ddfe386a9ed5e21928e5022032c5766292e7c28e33bf604dac62a08569e99b966f5533eb7e704b24996d4d10 304602210082375f92eda8cb99ca274bedd103931de31e598d024586839d2f59d0b5f6f6980221009ba225c0ff27150f2a870310ec8c702a8f77650bec858a5ea7205190cf8dd763 30450220270ef1b990eed9ad5bb95ac23c0a83014b113d0419bd30292f98a9d1bab9ccc6022100af95505e31c0328b0d706c4c9cf921445ec3dc2ec2089f1d75df8cc1902377ca 304402200d4e98c5d29da5d6b1d35e4342dcb3713b07b5c5a7fe5154a4a095c4e529b74f022049312b72551a71a81f4cccea049d736a3a530cbb90503d893cac165e0b0dd7ee 3044022062b1661c89aae012769ee76d9126a4670025f7dcdbb2357598846dec03a74cd9022019304282622ef365f94829e86de415d868a936d7bca95a82e501c12fe8d88c67 304502206cae73cda507091be276ea954efb59b22a05bd344489f1c82c51d1f59c0938a3022100984c6908b7bf01e8a5fd534b5d1988e7bb147a3d4a6fb248fbf29d0c1ee51f82 304502201c378f596b5e238f28b4168169f517be2788f57e1791b6624f68b11b500aeb5c022100a1d79f33485df5a44b4b7ca9beb9668e7b9cf785f0f4748e309babac4b75786a 30440220429ca442e4c05b2c2e9081341cc77c385c818fe167bf3af425a755f6d519c628022007d4b9de648dc3cbed0483b9dd66bf1245edfb140a0bb5febe1403ecd856b425 304602210089f567bc73f704ea59bf142ee5a19a552e21634e71ed25584c0a90e1ab80cd300221008c63eb12215cbd8ec8235095524a6dff11df53d860c4a5fb5663e8394c7b5f44 304502210094477aff3737f12f3654de88629658db5ec27b924ff2fe194ca86546650172f902201bdfdbccfe7cfe1a41434192fcfde994c67b628a39c14d3009bfdcba0c2682c6 30450221008cbca4e3bd6e3ef70bf7b9559db9b35374f0b6b8b900e2ea7af61c233bdf341502201c2ef40a52a406d9621e57539f8bcf54b0752a1174ac304ac86f51dfdcacf1fa 3045022100efc2cfd5985feb9106e32dabfdb69d181a223b4b8424156ae88980dbe0e425a502203b3141df56009623c264bf18891097fa577ca3c12418a22b3f179c6df0f0e67f 3045022054ab6a6135eaca0d5d57edb483b93c517bf705671b87fc898a92388ac37e903e022100d165756d72680f96c267b8b331e0d41e679f47df8f6dc31adb260db12f9d1468 3045022100e748b0168eaa5e2d70d13a00965897b21dccbeb681e35bc60df6291c2912a69702203d0ff97472331029b9581186b4f782e98edc75091a4be0aa4335c21abcdf6717 3046022100906a6cdf79e3dfef7c016736336e922af876de8d5bbcaddf3d795dbfeaedcdc5022100f5a4647db65fb2cbe10f0a671280b63973e7e4ab5f30096a4cc47959b8f91ae5 304502204f9049488818704ec9bc101ae08e40557234126fa71034330bd2096111050686022100e3aa87396c043c0f349261a264361af2dea038d6df59422c880e648f4ca2a9ac 3046022100b730eb24e8e311b47932328647af1c4d0427909b4a06aa7dca0fb65d12e65209022100da08179817fe4f4d3fbec9e81e1410bcad1420cd535fbf6d08229234a75d3b12 3045022100a52d2dc7e1b5e3d0eeab62364f0c73d959d507082efca9a589ae174b4e1ab754022026c749ad10aea6731c73b5e80efbd8dd9bf6f576a9f09e7e6cf90e863465831e 3044022032f0ede33662b7337128d429f6735b16e9dc930efd21d5a7062cd6d9ebf1caa40220792a9583bd0fc0342d16b832d6f276b2d0d10b169e429fb2ab2a3cdcb9b4efdf 3045022100f15a9bbd6f10630435252782bf0ec87f653262e0c862fd64f2f5b22e962dfa2302200304e4d1c425e9c0d06e326c9897853c6ed9e14ae0860e2671243671e8f34210 3046022100d46df64864719cc6854cc81668f328a7132342ec9c454b79872cf89e92e1eaef022100af37e7a3ef977e6d1fcc72b3cb5249dd3b91249ed853824dedc64b7180f902fb 304502207e967febb102f38ec7db811fdab5582c7a50d1b40540c851aad370b5dc15ef5b022100b0af4c11165edcda5d0c942107a7d1ca4c68db4e46d9213a78c42c65a49f0f5d 3045022100bdfe14dad914410f5b1ceabc229487ce9cf43015e9460556c2fb079bbfd5863302201a51239672e5f3b6d72c5ff1639e963c50f4862fc313decaff3908a27c6626c4 304502202920ce207c95b3a97ad30afa109f9f193d2fea34bf35fb55e916396e988e9fcb022100b5bdf4696654a5375e722259afc109ea6e934baa6988a1b3030ab0d83053ebf0 304502210090317d2ac84f74b4685decdd95196627b0fe5f046ddf607c2aa6d3edb62c086c0220522106246c02aac60793f6535261ade25685a74ac713ea5a2566a28fb83a7717 30450220243a8ccd5c38ac2e824d0be4a9399d49f91857949e9be42e821cdbbb286a0720022100a1d48de442ad61b4a192a3b4daaaf98f630770a712d2897150576a66e083a967 304402200c6808107752e013a1959806fcc1b74e287572315323bcab6beced9cac20f7f502203de0f690d0a2e5f7e6c6cf66e434cc6da0adaa54fe1fede5ef2d9e73ab8bb96c 3046022100836050a2364cfe3d0db12c7bffea1e7f10e90af3c4686e5ca963793e255587350221008eb7164f33b31404b406f582554b1de61842bf317f5de4b7224c67c21caeaee0 30450220579a00adb68a15f3e921303c3e814da5517cd6256c669f7b90b95bf06020e312022100ee99370acd0ad5c58be465a0c1766ebc71912a6c5864af6dfddc3a82f95c70bf 3045022100af0653513d2021b9f7bb3494573db40c3b7eb2dcbb2026fde39aaf18504903ba022007830cda4c2edf1dc8194d7463bf05afae3ba5fcd944e3a77380115288f1be80 3046022100ab73c1f82c6caacd999131e0c415c6ab2b680ffed5975bbaeb71dbe84c2b4dc3022100f5c0038281c64fa65dfcf7af2207f821dbb876f359bc67eebe41398bb5c9b330 30450221009ba8c58c9eb87d674ba656669473c36e975508701d45af4636038e875847b6fd02200880d0cca297503854fc87f6b6bd74a51402f23017d948243f3ecfd674cc0a5a 3045022100e399c26a37caa40836e9a093af550d1c65b655be13738ba77a54b683b24a264b022072f0a2dc2720c1936ad44640d1b2acad48363d83b27fcc6cf5e021dd91b486ff 30460221009c4a90a1c56985d47185992aa5b4a7f53616f52720f6aec253a91d6ab887ee25022100cfb419105249de56087ae582b04ec04d5c5f857c886e96889c656adfd88df140 3044022052a6efa846f838f9fb6236e011777c91b2338a71d808be0a3142623e4c82230f022027c21efefbfd3e3aae60f60904a406fc6a55e5253d3e859ddbd265888dfb0849 3046022100afb3e8991d765259e68115c159b739919f382913db896d14ec6cc57f6ba3ebf1022100eb216b2aafa37c4dac94fb90bddb2e533999d924b449096a98ff0fe921993082 3045022009bbda2884dc5c48715ce5daefe93dd203c133494e588fa8c085e82404206b56022100a8f9f4ebd09a08443356c1b70ca2f7d888fbbbd509e05a4dd46f2e8c3d56c59d 3046022100ebef94236bfe096a57cdea20ec17b14bca385bf54e1e327704e27b043c7e5402022100b82c540933825dc890433c1c091910fe66b368ce08248607cf1f4bec44501f6f 304402204903e0da050edf0336244e687f8457e398e9e752c243ee144b04086ce879950902207b8010382173c4796931e527f56514d1a6f245c6b0e619ca89e231e3eb4ab3a6 30450220096aa81b9685058690d6b6e111f3fa4a0b5afc0a4e28bd0f1677b2b6c1fc00a0022100bfe3cfb6a50ccdba42868272ac2bb5a12cfe7b13fa994cb58cf0dd6dd88b4056 3045022100f0d14ad7090ac5d36fdb1aec120fea4f7f82642c60153599d4020f392c894bcd02206806f38c857a86e940456f47bcaf3c3c76e8bea0b461217e95012913ec3f6407 3045022069065d39c7c063a6bdcb83c2ddf8005b22062faa81c7cfff757e50f287388063022100c0c8f3bfbd9c46f8d56eac5f2b5593facb381256a4f3363969628bb08d604ba5 304502201124e6946291b7af60cdf851521ce42ff5b36d767d5ba279613863dc77fc6749022100dae1eb102fc233e06ef418d2a1e45d59b9b0c5b94fc77acff7cd27bf5bdfe6d5 3046022100ef4c61276d24d325c7baff95c0c9c089dadd29e8662b1245b2bf7e320ce93386022100baa7f3a6a55c4ec77ed2853349c06b88c313b55def01e4ab9ce166e903a955b1 304402201932d8a80caebb750d63f8d56d3f11d6a8b30e22ea8b908a6d5fad48f57cc3d602204e5735e7a26f667e1588356a650864a7c3e7a97a24bf21ef5740b82a9a0bccd0 3045022030e2d5a90cb8dc2b287e17568a7098b4770a80f8f9a88a779ad327b13d82bf69022100ca9039c0cd986049b4b3c5584fc14b948a0141f848b628c604ae497995e0cdca 304502205e2bbaf613219e7b444514b0187124333a2c74d2a6c35f1b03de51fb6ee8495c0221009bb7b000040a24c9d27aec051795cf9465d6293b7186b389f9eb195ad3fc8a02 3046022100df325c77bf9773ba08d5bfe7fc49646003ea21d1cf8dce926a8c9578505aa955022100ce85da13db5e148d09b347d072a7a424f8cea6f2ec2f4881e131541dbc56489e 304402200db4ec9a7918fcf756de8a4f3efeb321842de6b6c57d42a9bc171d42878d6f3f0220766263b197299632a038fb5785872a533f5c7eda54a4a0e6a238b1b886a01fc3 3044022013b7846cb3cdd3fb650fb17306961a18b0d08f6d30d8408f10cbba2c3911f9f202203b10804338c4296939ba2865b398a7358658ecc414a27fe682a811837f3545cd 3045022100a8903862b4b36d82d8c71155cf8d19814ad0da996be7a1d19186af5c0a569171022051227b75c4539033f674877a12f603544c7e2a3af879f7237a08ab7e7fbec337 3045022100c78856f98d323523a72ab4dd43927f2d127f5e7bf63d0e52d291f434b861a3e302204df65d1ea0d5ae9fb7be93a9f4fe314dd858a036e55bb8826da3dfde41dccacd 3045022056920f86f5de3bf9c052db2e5cce6847eaf8a0c1f21845b4044d4f1a73f6442b022100b9d30477f234825b6563a1e60a08b55157e7eeb9144d0cad3ca656bf57b01d68 3045022100c1bb708f485dc30ba55c115c4ad2bc0fb148a4bbe40dded84bccba1c65f9778f022058cec9f50a8d557d06621d8b776ce3886acc53130d9c5c4720a6d40029f5c578 304502200c041245b6e9f7dafec6c27426e79d6eb56bdb578d06ab4cc348c33f8c003d75022100d7d762d7cfdb29ff74244b9c53b7cbe3d6fa22756e32f8267b3230c8ef0f6e23 3046022100cfc8b906156bc348a23792277013b444b88bbf396cf4fc84eb634dc16f97533e022100efbd41e3c58bb161f7070faad076595145cc3b76540573733304cfe92abb7e7d 304502204ed1b7b1902a980e44b88e0cce1b23dc1999fc32e0456a875c12447108bc8446022100e0f1a4845d44fe85280da9a6890b81a657bdc664f54bce0d55fb34175614daef 3046022100cf1ad2a236ae2d5698560a2e78be778d062d7fff7abe2fef54c8556cd02a01e4022100dbf01a5cd4779a995546c56bb17a6210e3ae4c5c61a653e79aeae120357af93d 30460221009f089b297d6db28a76634ca4a5f425c5100be47bc72e230377e264f8462288cf022100c345b698ea818bb2cbf6ed3153094fc4624af3c657ba956472f248de1be6e545 3045022066764f71d4bf4ecad53660e95572ddaf208da45b988d0d6ef9161df0a1f7a28e0221009a1b1456fedf38c3878db8de230532f8885945632e57cac95ed93067afeaf098 30450220050fced6338a99d3412046686e8be89ee47233fdb69939fd392028c5da1bc520022100befc0bfcef995a0019b0da186e77b88cd8e4cb8b96146d9914843077ef226582 304502210095d333775f79823c2d82f571f7f6284e2748e17fe359ff58d8bf2c6bfbe953f2022045c91564d7acdc00aa9c0a247c7b44f54513c51412c17b9740710850027fa465 30450220400e62e32745bac87723ae84f5180ab49d9eb721fd305c6ed537cea294fdf33a0221009bcfb42142d194de57f929164ace11506b97c17cd44ea20286ebec58c721f514 3045022100b2f8563249f892c69ba5b1ebc3a5521267b2b853e3cf04009f5370125ce085a1022074c19babfb0ea020a885a244e6f646e098f3f05ba53ce681cee81805c8beb7da 3046022100a0e2f7e3d8a0103643ee633a8dccabbda71d0c6e59eebbf04f9eaf233a510ab802210086ee72319d90f9f49c8d6652b5bd4fbce5257f501dc138942342a429e724b033 30450220308599ec8d7d0dc5eb64556a6f755940b9bf4f77350ecd1aee23fa051f085dd2022100ad54d83e703b5e133b7e308a409974e3716055506150fadbd68f215e85439e70 3044022035e41750d970037edbf259b6c32ded5f7ce70c9f908221f4448d2e50e835a8ca022001b97de17bd5ae453f42b602562c055e485021f20ff87acc5afc19ef7a7f6c3b 3045022055872269847eb52cd81cf7d2001624e5d59a0a3da51ef36c7b3c7e239cf83794022100d69cca8d768f49599ca2ce0c50a82fe8360d5618f9394900b132e4c0a68eae55 3046022100d01bf2b3291543a1bfc26283e1f791abff05e5ee161df0846c9890a42472a201022100bd46ba99f49c5c83859cb2dbeba7566afd45da5911a7bbd6a72865a242bf63a4 30450220361f41e969dfc47c16f562cd43bf55b911b6132ba31b170bdf9ca258d5bd7dc8022100ec2e4783e5f31eb9e5c55f94145ed36ab56754f588269456bf4134720c296b73 3046022100aeea4923d191f61f5a922ccccb4c161be8f13c6bd15950493665b7e8ee5454d6022100d32074b179cce94bd5b050b181ed31be5e98406b9574e860968109bc1ce9c929 3044022070e60d263696bc359ee685c470a15f75540af06cad99b81ba484228e5ee89bd302200bb29d67acd531d0fa3dd121c263e0205d7f45d3833f7b905b0ffbece2cc0149 3046022100a33bd7d6c2eb56a93c9ff9323dc6856e455730a53aa5fdcf1d118cb9b83ff7ba022100a7705ab88728fadd097950ca296954a6cdce824185e48eab3394af1a36848d36 30450221008664ab4d1ae096d907db81dc83a04ec8e551998b9e7a99d8a5333434f6456bb302204940177c9c09173e84e8ad1713292162e6b0abd1151ab086de323c55fb219acd 30440220636fa3e5fd2c3f8c873b4248735a19f53b8cc624a77b616768a7ea49fc7f12dd02207087311f7b4cbaab497cbd4471feafba251913974aca465393615098bbeadb5f 3046022100f2b199768b1a59594335142dc848c684b5f7c33adc087985334bcbcad842c021022100a4c7b1eb81498bf5b688bd47cdf63f30d45f55f0c28e39b969883c613076bc82 3045022007e46da3f248b5a574ed99858131db846b93fae0f02904f2dfa34e78a63585cb022100aab4bcf2a88f38a317bae80eabdbe5a47f50b7deeb456dafb310912e4e878e65 3046022100ad8782c095bea5660c732d928f6144834ddd9181dbb0eac81f8c880d88f55ead022100af2c89b2813552543518282d93f520d6858ddf87cf82c7626de7f7ba2bc48ab1 3044022024ce5ca4bcda48af31337864653d4412775e95ebb87659cb59a378a511c4580902201e093fe4f7f01bc93f4002ef807161eab56ce9e1c0db9ee749a501aa55a0322f 3045022100dc63118c2c20bc81cc5be795ea7b278d710cddb5be1873948fd3295f74762a3b0220715c991af825c175985166469671696797f79a43a5fa7e0297c189238d692e14 3045022026bf3a572229c8723410572c641fd16826d1cc35cca9855788ffeab4aa310b95022100e6b15b0df27ebd3d06aa97b8163cdbc335c1e01b71ab3aafc7d29659cefef9ad 30460221008c9267a33fc1a7a2cf79f5cbfc4b5a6d96409a81d44aff79522707d937429dd3022100a835145495580fc09d486d717fe5fddb4077f234b725894ecf52941325fd60eb 3045022100bebf2125e7bb5790c0b773b9e64b800f3375622939bba884a39010b2d72004a402201e81e81d980d81865ab39f341a20976c26557e74a23b0d0efc8ef3b8c545d864 304402203a6789a602bf9285f942fa0b48840ab8e43225673350d7d8cd7704d4062cee3002207b1ab1bb8adc39a1eb0cca0ec8c949b1b37651982fb34ef1a81cd44473a08936 3045022005bc5f3f6217f0bcb7e8d591aaf9676357e12f85c4430119780f29b28bc5ae49022100cf148398ba3d3b9e1c56f4ca4627585ae2322aeb970b9ca6b64bf4831cdff894 3046022100a29b839409f44468fc2979070c698419023f9b9d6321c76103c594d65436e336022100fb9fb80f1f6ee4a4db3f501c6e44d2ba792f005258a1c68cedab2f43c20651cc 3046022100dd07935864d578669d642f2edb914a8cad6fce72c29adae352a05d9c3bed153b022100803c2e1bf3dfaa7970726a188aac768c951e6cb8b616d01380ba972b76615eef 3045022100eb2d1fa377d86d676d26ce03874ead3ce864faf9d03d78bec66eab95dbfbb39c02202b6fd65607b31110ebe3fc14a3b0078f5bc52e043c366ae4e93ea89a35e8a337 3044022034ff75118c67085a1c5fa51a18078b93d878b14a7182e17f52e3f6822b8ea4140220253538873333406fba174b3e471a04bd8b18118ee9b531ddc223a696df01a6c3 304502210080336b48a1971937e14c5b92e72fa53f872bf2fe1cb6106c1673bf2bb4fb13dd02205db68b05d1781f02c869966993918abdd48c5024345ca63a945a0eb5d75c1da2 304502201f7168e51fdfa6b1f164e8d02c82c3b644affca998d72c14c4bf8a91e68607d20221008fab1c28f87688235dfb96e204c6a56d8b0eefe8364da198f9e2f1809ecc78b2 3045022003d63ce8ce36388c7c98b750c1320ed3834db960de2387c4a712c4ac8b933196022100e3d1bc285e1bec4566371c8ff7b4de425da67577a625e2d1236845c28f23c53d 3046022100bc5867662b5eca02cbc931c7cc4c9e60a855aecb04ab170eef81f208da2f34f4022100a70ba48a40ec390775b60d2cc9c44eb5b738a00b2bc7d986b30e19320e2bc17a 30450220365476819bfb4eca3f4800662d320be7c141982541b252c2fadf52f311d508d9022100d14b3d42bccbc05bb4e8b56d7385aa0253bfb50a49672e9000917a66714a23b2 3044022056c37b58d0be6ca6cb007d36f7972ee2d69d963e76a663be7b75ced267300b99022036e3c8e0e769c4963ba18766ee0616f77ff4cdcf903e0bd101ef2ca687bff8f5 3046022100c4e550c590a5db5bb110912fad5cfe3f7d6d4541c8c0444b19b35e6ad9441eea022100bb7183e4aa59e8c98f3ee264b4d865e15e72416d84061eb062c383e2d5b6d562 304402201dd009b48753da6c6a4a048e6a1d1ff70ea2de85471fef04f5bf0a142674821102206dff172a7aea348030cd3c7fe7f8eb1720360a78bbe79aaefbb437365b836735 3046022100a109ac24f36c8214968d2f07cb8b677bf69bb72e2e2f28db7e75d64b7f646564022100c9652ee94c529074dfbc1e877ebe1ec87f47ca439f700a6d8842dcd9a60e8ccd 3046022100a22381446a4eecda31baccbe26b408e4abf3f1372724cdd72ef345565f4c9529022100e2e8465e4032df72fa7f742710c77e424a510ca9586fba790d6d8e9e4428fc76 304502204988c04e781fe643ef428964b00c4e9b52e3838f833b674e6d6d897bb74d1af3022100b5e83100d9601e8715ba1dd38f91d0efa52f052e8d2902ab9aae79e4fa288c6c 30450221009a1eaa1f7b11b869105d75513ab4db4acd0cc758913374ec1d68650f4ceaf1f802206daa3ad49d93564ec72343bb358205fb33fafed067ac27573dac81cd3c680280 30460221008399770c3f9b235425438bc88c5a3e7acd4fdce0d292ea2fe208c9e42d9d11f202210091817bf2a7c2742330c8356742af08df9d3f215cdb9a48d12d8973161b8d3a07 304502205b12f183413518a0107d83ab2c77b0bed46721df0d8d28f8bceb38bb6a818575022100f7f5d0c854b50329e3b9ca103972ca9d857d0c6e3f3b4648d59c73a4375e6d2e 3045022100acf6b69f99ddc032d7bf79187306eb9d77cfed7dd792806cca13b2d80bf76f8002206dade396abafbc2da1129b6dc0c0089159dbb33df731206f3a43c45aea690d76 3046022100bfb2aa47d7f96676ab30413d352d6c1328678a9343fea786a310d3f0a8f864dd022100ff99832733bd92c2954a5f60810c20e1aead553913546ad2b55c44b1bea9f30b 3044022040ef1a6e2027dcd5f1e45c76f95751e5a9c421a5fa897ebe6b74c2cae66ee710022001b731185b57d388d238d5de5e313f24943c8a52f61a0e45f0f5f24cdd7bddf2 3045022005749c2f60b378ce25c9c0bf28bab2f4851e18d2051b71c7dd90a61936280a55022100c041676d8db4b1893785c95ff4010ba167a016566cb67b6706178103cdc2dd66 3044022041f0f980c0a26bb067ff492b6654a07171ed964bb87ec2d0b520c66dd88be5bf0220422857d0400cb987ce420775e28a8d41aac3c31c814136501aca9dba97df0e58 304402204d55e7b91e89b20798225bfcceb1a1a437590509cbde6bf4d235e07712525bd602203a9dfc1547b065cfc231630ca0cb8dacdf1e9b16ba5494abd54949db75d795d2 3046022100e679f56a9bc6a3df87557972b84e247b3ca46c6f5277a64061a8d76b1ad0b16c0221008baf781653d01de63b13a6e1c4382fa81dbf9edb023ecb7c42692d9f86bf7828 3046022100af32832dbf643299cf0ca6aebdad95402f580983d39f8fe648991626b9052c530221009fcf94d12e9f6e1d62f2a97be3640213e9daaa7aba705e1384651235d724c46d 3046022100e39f8b45c2a57ee13466a2f8c71f704ac31ca8fd70d835bd082ef01749855f9f022100bdeb5dd40d5ad7b32f590882c82916a53526ee493ab407eec5e387d4c2b35af8 30450221009069924a61d0e4faba2486dd23f1b3426a7ca40e04cce6891df75713fdc989360220545fa7c2ae5e0f093c864b844d6e669fdd027c78e6e2217f1475cf8cbbf66c07 30460221009df3da693b96ef237356fb830f53ac3540b5cf9330758b5bc51b24656b528e7a022100a4603e2ef9b6da379090a2e82ad1603f4c283adc7103f6f79825ac8e01d2dc87 3046022100ff9c73d7730dbb79129fc99c266dbf58850c152569f96157e5a222c732d2cd7e022100ba255fb147e51b5e752f5997476994850a5c7fd0261fa14eb5bcb2c09a213daf 3045022016d02d14b477621c33488c27e587bb7304bd18f5711ccc4724b6b7528d9dfb81022100b54e7bedfdd4665f11f1d5392d12555b694917b6c1a3082e7f8ed6ebe100da31 3045022100e5b2fc66debf8a7f06eb6a09be926f1421cfaf33d39265cf39b2ecb7b711b39202200c8e36c790dcd1c5833a00cc8b6b0f7c31d720ee0b91294e719ba10f4916a190 304402202aaccf38136852edbc03c72ad697929d315177003fdb04884c8256467cc928fb02202b75ab20d62c10b6f59b970458e8b28a2ded3faacfbe966e0d913ac8f706f2bc 304502210090150a67ec886a7337f109f0b644e5cc343eb16728e49e126fde55f15e1ff3050220040d94c51093dda53d5aad1cf6385050e5eb47e338a073dcf64f408ccf710e81 3045022100cbe3d0349e4e19a0efb85f6871ed996b3ea27ff9a16065688bc63d707132f96e0220086d4bd14326193ad79067ef15d0e71d68dcc0ff13a11031428d92e1d73d1d60 3046022100d7c93fab3d760159d56a0fb22c28f30ce7c735a2a21dd1ea487576bbb4587755022100b0c9efa5719e08828ce16b99de7ed5f99faab7fa5191e4a8efd001e0ab5d282d 3045022008257990be68769b8fd567e52e76525dbb0c5c05ab2bdf0542ce2a3ad15c59cb022100f807ca2c377e6e988ff1a35beaa3af77ab1138ae07b4b60a35a22c64c710b416 3045022018a05338d0f2391143b08a946c716423518b3f8972236238b10b590c2d166f6e022100d800eda3424f9c92e6630d7159164e0f3a03665623e6319bc35da7e4e867d5ae 3045022100803fe0fcb713d5f63da3b5977342aac493696749857174de033a10257df227b302205acadcc1b8c46aab7911e7e5768188cc9a08d1fc9d5db9db4486b0e6010540ec 3046022100a38c841aedb211ae6d99c16ab1450d066c30bc6c7aaa755f0fea6b7ea9f79232022100b75057435a5e0a0f271d7a1f7385fbd4badb6e3cd2e61b5a8f7f633da272e19a 3045022100f268cfe9587431e1a484ff6ca1ef2c7717c3485516db94280972005d7d0da090022058eeba481e3f609bf737df46c97a69e7b4930d697784f26c0e6a9d3729b0c52a 3045022100d0f6bf1f87509c9c3fafb2fa30fb4170bd046d9b8fbbed21d8c66fe938f00a8402206f190dbd64079f8f998189b934f30119130a5e3954b3cb23719b875de3274db4 3045022028eb01375314425c8f7141f2801d48bbea00d39de85fa3b3b9e365851b4029b2022100e76c678c80ca198b38f5ceb7355820967c05d124e8ae90ba7c9442390443b060 3046022100cd14fd4a6384f89b0c9ff2ad642d6999c89d7780ce7be0f9ea5958cc9392fd55022100ac982781bf6eefff3bb07c7ad2b4767ca01e2c6784f855035bfc164408f3af6d 304402200467fad24e7443d501957d9e599f318322ec663f34bc818c77db4ccea249632402201ce248b1106cf061957f7719a3135a9e192b75f8f81cc0574c6158fb6b679b71 3046022100ab4099529b555c97011350378f320eb194673e0059902fc3ad201d93279269da022100ca87335a4d67bf9ecbe99efcd1d7af4b3020105a13e1fb7cb385c62ed5012d5d 30450220086083025c4b8b4e59976f3cb921d8e77b5bccee9328fe9b0bc8dc9e89711128022100b95df21f4214bf7558ac1084d1bb8f03294d150ded91f6fdd7dd9a5b215a31b0 3045022100a4adc4b7bd00c474891a97e2a6e15e1946637e21f3d04b8b13bdb70d920a0996022000e790ea43075e2468b323a813d2d9b8fd7cd0f30462dc3dc0c282dab10f4bc6 30440220112d04788e0e2d08f7650e5a6cc05a793d9ba011a01d6d4bcc2c9060aae8efaa02206dff10dfcdc12462c72ea3a34b8bb1cf9430c81a55bd696029bcdfe787991afe 3046022100b9b5ccea79b8f440f5b081c8e8312372d22b0a32c3cb9cc36e000da01b4da260022100da753c5713e604d9773aa3c4dd7c40ebeec0690a6ff6f1783f0e0465870d80a4 3045022100fb423acb04a3ac234b5d7086a2630e0de5644cc2d39bb230eed20fac0db8bb6f022053fd5d322233d0d2c2f71c15c132d785a07c72e1a680a5a954b1f03deb9a1b5f 3045022100b32ee3226ce2ca75ea3d582f3fcdfd5c280871c9179074f589777904fde7efc802203c694892667ab2fde374d4fd692a17ab836a28d0e5320cd574014f9c814b1e32 3046022100d4542df794708bd1fdf997f6ad43381d19489d3b7a12146474893de0afe5978a022100c8e07c3ca0d2b786d1b6260485ffa989ad511df00a800c6cb813d96993981408 30440220100cd7129e752aaa16c5d31878de13f90c670f1d4a05124a57eb9f4a995b20b202204a226e9289acfdfb9bf6eb70b818cbfb62e4f4d96e2ce55f82b75cff8a82a087 3046022100be3603fb29af1f92ddf51957534cf1ec8230089aaa9040cd7b9f7e79a776cba8022100c44e11c8f439018b7c62d44db54e13b1580c55c62b8f64558377cd21e78c00f9 304402205dff9dfcded1e526ee64fea9016a2e2b506a1015b531b2f1552b7c76d0c6797a022079d58431b65466df28be828d133aa7a7980d7eda7daef23861efa7895e2bcc63 304502201834a09bdb7b3b60ed0e96e05607c03d58d3f1b9d5906ff31eb170863fab65b00221009439b91554376f758daea0ad49d5c829182b34af8b515543f8dc3cc013af2e9e 3046022100c0434d700c8ed43221424a9b3528406ab59143ffbae9dfb519a7744f16e4d185022100f681f5025fabf06ab8b3c5d57ba2f1b2263243721ab5c2265f4430f304296174 304502204a381b09b24d5b48b799af8f45619645beae212fe216279ed86058288afdd1a2022100e7455e11ef9a3c26a7879f9bb65ca105eacc352126080c50141a9694f1990c76 304502205bff8edb243388e0ca4f24319aa73429d7343f587a092e0f0999c34b6f55b560022100ec92c44637f70413ee058417e374074317f218d87ae03c6f5bfa0e827b2d59a4 3045022041229b3d8431e99a3c325486c19bb16b58e64fe025773390bb4a3e5760c5aff00221008ae1253ebd4cb83486bf9942f5f0fca69893a605847748a920b6f009d43cdd8a 304502206fb4aa02eae3349024388228b1ab74a81866015a0237087867cf1b4bed0a8e5e022100ab79499140b6d958c9dacef7db79bdad32dcf9ae9383ed8f14fcf2e861d82357 304402203477a44cf37066bea53e59434fa1c29aa90b152602519627b30f5f6ad48b1392022071e006e939c4264db62e93d4fccdd3c588852a7fff1b98ba88d9990cd3df7c55 3046022100950fa08f7aa9286ff8e6e1275d303a806ddcd89e5b45829bd9a4dae9ae4c76e202210095df962b3d338e0fde99bbfaed62d01d68c997f1fe6d6aed254118fccb37169d 304502202473b866674734d87787b7a28d2cd134ffc2c3d7f22b26fe60a8584a397ec4af0221009b86e78187aa70779f9eb72dd8974502a130318d99b8188ad4dbcaaae8353e49 3045022008fbbaacb3a45ad92c096d747d53c87cd1ed9728a533e29725b367ce6d526355022100b88110e913e4ac9b077fa7828bc7593900ff0122eeab997dfb478742f39dcabc 304402202b97f36b3077982ef86f33a12d2d4c04ca2971de726dacb72d89f267d497989202205a10788da9dde21c4a66ddcbb724b82b2db796e03eedc8f45b9b269af4df63d2 30440220718d44292af3191713bccffa92408f21ef5f979309c6fa10c79731fa6049eacf022022a7ea8b16b362c4823b2bb2ccd33fcbd784385dfeab849fb0e5e8b136fdc465 304402205703207a805fa710c73560f39521c403c8740dce890a1283bc0a5095c449041a02203ba8790729af0ef00cfb6ef3e248a47db1e76ef5f7b89ed2249175d114b08bf7 3045022100a3def87bae5e786f6156f747521abadebb8b4026522c149a59556865e3934fcd022039f742f99f368d440c3abf5e9c0a812f4c4f1bc653ab15639174ee0e2cd1b55b 3045022100eda7e1f3ba222dd292ae958d56451934506a34b0e42481a0d6b9aa093bbacc1c022069a60398a09ffa3e6f123e96deb94fa8f1da7115b202b70aee7bfd9ed1da1327 3044022043694fef257f1695d4b99609008d05516e9c2bd668ff38ca2de513fb0437f892022006ea7dda5a77ef356f5b95bfd787f4264c43c0009f14ec77c69763bb1ec42c1a 30450221009f44d9c7f914d5c8e5ad98650e73019f42ef218fef33313be307568da3ba537002207a9e1de65124c3a99e5b69b87c4ece4d4a2b0804d6260b3f6398adc27930a0fc 30440220562cbf0e7a271c05c971326e7c0861ea932344fe22a20a8e4960ae7775d452900220587f246bd621e0cbad295da479e1c4c304c22764493b13fdf39478fefe796a80 304402200fa45089d5aabfba67513833f1a92b020364c0713fcfbd3956b5b55100681f5f02206eec39e2ab8b7fe4cff9da48f2a4bfbe8211ebb1a165fb90b68cad006878f62c 3046022100b281030b11e9d9c2fda3049cb19f243bb5bfce64a56ccfb8471d36f8504fe43c022100e3d29e0649b4fa4e30d7e3b226cd627a521bb98b311cbbd89ffa6c420ea0a6b3 3045022100dc95ebabd111a6d296bd02c2f726bf09034e868228887856495f430537818e69022020b4fe35dab10525927a7d7cc257c1a379038ebebe65ebcf35569360f2345da0 304402202ff538bb5cb3b2660c7c3d8beb6b8e21a362455ef8550464bd281c258306ecf3022016c3cfecbb281f770df387dc0b547e5110b88dba57017f3461c14e688c8280d1 304502207c0ae982418252a167b1d8309fc80c19988b0d04d8c36836fe39d1b3dc336d08022100c2b2972dbf90b6983aad9676d25a060421945bf0f100e030de8c5718c2cf8175 3046022100a08f4c9bfc36257ba338858a7ed957b3718e400bf5ababd112f454ac12336f92022100e8ce06178b0aee5e5afa06d33ebfcc1c20d4ad2da1e0e4d9b8eca560e322c0a9 3044022051a2fd067b4a11a4c05448f2a3cf70240cbb812eeb8afd54ed0d66358e541e2f022066b7fbe8360f94e6e17903e52d2300f22d885705d6204d7bfd048207576ac078 30440220648e41dbf5a3202b8737484d2822beee82ff60c34077ea4a3d4316efa23165ba022026d2d6a7b768754a7a36446068aa2c694ff3903b748cc32fec23ac774773cbf9 3046022100a6cab64d230c1ba705f2d8122cccc5373cd8bd70845fbf4121fdaed266f6f452022100c3ca8008ee8093fa634d31fdc47444bb534cd2cb30bbfff63f0a90999d2a5edb 3046022100e9e6b59804791059dfa643979c6ceb0bf610a4f53c0eccab05ca4281ecd27521022100f83d58b17cca1b30546a33fa15fd1946d4b8697aeacc5b11cc381c3fdf65d270 3045022018e60faf60b2f123d85ff241b679f0b253579e754f85053366274e1f539fd2be0221009521d623c32b83cb998b7196c333dbb7b54e76f5d523d7e8dab37d0284671335 304502205d7964e290c053ca91f5e32ad01d8090c70cbe84933a676dce21f4433762716b022100b36bbebe49f83be7d77fcf1518f37a59020cba0a389a03a40b1f8f7f1cac00d5 304402201f8eb9eb2002a2d66d2b6ba151b0b5a68ec21d79e1e60b15f9005016718f8d3a02201d6a14008d9544b9ce0f4e9daa52767adea998eb3db273b99f3e03ea467e4d21 3045022100eec262c0c93a05293e380fadb87bcc6c2be45c6a8172f59e51999e1aadb2d5a6022069c128645a2f20c7da8c8ff11c3251889634524ebc4ad2ffb37b503c21506454 3044022016f1709ea2da25ddcf8357f3a85ae06824e769a0566a785c0838fdc47767a9de02204457e7860697b4e1754bb083acffcbd7585fc2a268c2bd98e769237f5141addd 304402206ff68d8ac1a09342fe360748ab8d880806b4301c5658d24daf9e7b25bc980daa02204e9be2484d6021b4004ea4aed62ea77cb3c83cb3c289521db78e6d8b30aa9d9a 3046022100d4a79f2de3a620a133702cd4490f0e3f9aac3966df40b8f166b018f0ca48ba77022100e7401df27e02c99b285776095cf518d11c9c1718559093a7bfe304488b252de4 304602210091ddf59ba22a3ee9ae2bc878e9ac66f25f051d1772807ff9d6e02f41b60227bf022100ff34fb13cdeb95d809c39c067ca27b6ac107ae23670c9d7c589232b7f7269f8b 3044022036c45b80394d0b6820a0885fdd341ed7a9193a311a788bb570065b330ed33774022078d04e9c2427441a1eacb6f8f9c83fce427e58214c80f74e18b21bdf903d4180 3046022100eb87d97cc2cb14ec7afc739dd4208050913ea1c5f51ee1005648fedb66bf85f0022100cf47af7edab8a7d306b2f0f78bdd02f932704c8ba9ed396f56f79a9d9e8645b6 3045022100889b5fa314a03107d6156a6d6c9090e5c2f4989ba4c18334676aa1b4f5fabb9f0220099f2b574dd550d4cedc0712b29b73872360355642156dccda71df6bef63f2eb 3044022058aa7b5145fa03555c6678bb1538a10e81f40f3e32d5c9b4f1721700a320f07e02200d720a397af4187e023028ec5a715ecba5a3f39f8622856a198585cf0db2c71d 30450221008007fb18bc40b5e6bd533d82444013a7ab56ae2105dff75c5bf15f1b468b125f022001b2efc8d25524ef5237ea2f52964d0b2db134cf3131037855ca84ba43d0087f 3045022100d5647b99d37d1f1050ebd171e6422a982879aaf560198569fe04c0e15439b6b5022029197a40cdf2973478248f80c231a97a7a40e2b8df6d2ae929da2d70964e6daa 304402204f4760f8fa50d1724f82d1933f4fb518dd3bc1f46aaa2c179a48bc2bed4e485d02200f194f69112ec22cfeaf4eebb0493870e255e71adad7c628c5e28a9f653cac92 3046022100a6cb37c65b04bc07e9b81fca4e5bcd2cd44a957ea8462a7998dfd4dd5f376a34022100e4f9359f84ff2f143eaaaaa5c36e9549b78b3c350884126990da639837110bdc 30460221008aaeedeae872796038f7de715899a1bd2531ceaf0660e68a2031e0b2660ddd16022100e8d1596fa3a190ab917b6116fd7121a6ec20b8d82396d7f37c09a49450f9fcc2 304402201573e2c1d7255b8d2d7962527f296efb8f972ce555a943307737badb1153108a02200c8743bbcc8f760a52138a50b3bdf8dd8078f0c0caa9c7259dfa17e64fb1adab 3046022100a73b2fedbefce50bebeaeca30e61d3dd75442a93c74a6be3fbe2237db0dfa732022100b835222ecea5016a8aab5d3f4081dae51b634a43f77e3a0891f73c21c5b581aa 3046022100b9864bcbfd561529a842474142f2a19e11be53b64a45bf646bb518d6b11ef076022100fe007e929fa58b744a988f1c350529193a394c97229712d9f50d0922dbd0f671 3044022047c87ecfade3609ead5a1a095f68e9f768d349d87406b946e6875e1a184e3f3f0220022398c0d32375a86e6db4f949efdff55319ebd020a7ac65f6bfae1704ee32e7 304402205c766bc4cfc0bb4cd82760e1ec41140e679f103f940bf4411da10002745543c50220447ba959ccccbe73c34d4117f5d584758002ab306ec0d08209cee9bc1a469f74 30440220428c485d5812642ce9acf9a8899dd2f19c25d09bc678a4233f9f001514d3831202202c2d5f119ca19c7d7bd79d3005111ebf908bf6481fae59a18d1e0cc016ee18a4 3046022100edcf06814abf0927164901a5d285391b3a6f721305f33240f62548a34c3dc1b5022100c3b64de5e7a75362c2a65a02b45d442818dab2ba78c85d8200e0bcbafad42146 3045022100dc2c6e20f545314c4ebe519d225f451727c5990fd86d98656dfa60c2c3346c7102202f3ae811f869c47774bf0862d432b591c1881ceecde63187169501748d4dbe7d 304402203186761533d08c3f063110665ddd35dae5523efea98c39e5cdf56ede6c26f6db022046fbd97af59f1432f54188324cfd548ab812dc355a30d5bf6bf64c5d61f46463 3046022100d6bfddaf9ab95db4c682389bc4c6e8fc0d95185ab633d4758f00c2843d712c84022100e0ac6bdeb2b223fe3d64abce97363aa5c93f9dc3f009ea1861a4e7c5ff3af733 304402202d0f9b07435df9de09c15c8b0acc5573748e0d009f935c8cd2f930472d9c735302201a8ae1b0405a1864a89e903c10ff372db964ea78229f4138e4cbbbc04cec7826 304402201c0981264ba9056579b8270e097c8e13952023d5791758e6cf5028a244d34d1902200dc458f513e4ff9e2cbafadbb69bdf9b3c65c6a23a8401a569c991300ee12766 3046022100e7c73dc4fa3cc51a07cb35c2a22887d7899d40ddd8a1cdc30245bd2a387354b90221008b7b1e84fd42624f07d367dc1b7fe69c8bfc6318ab83c4173c037ccad1cd41cc 30450220557aa17bd29f68abb3eedbb493d98f75b700f650d82e5ec34fc793688943673e022100fb3e203ad606d212bbb69054a48cbc9bf040e9ef99009befbefaf28934b7367e 304502201e6eff15bfa6ac6991197afa65cbce28b8ed1a66a32cdf6b8dc95a0259025753022100b86cc60a7fe0ec970d48ba48c9718d0c47621a74b7191d665294844ed2352fb2 3046022100c43854b8a08c971f223cb6dd5c18bfe3a6df7916ff744cd5307dc4dfa17dd35d02210087925505707d050346d9d7f9c2e0dc6889bb63dec2df278414cfc82b90da7f68 3045022100ac37288f11b97698839bd71798e237378e506339bef21d499cdd728767a583bf02207ef450e92222db574684445506d426de1d0395001df27a058a83f94a8657d7a8 3045022100a4ebbb13f3103a2cffd30153ae5f1513a9923b1916006fa745a54581b54f2d0502203a8bff5ed72b8a8dea60b74492be2e816bcf890ad4ea7a394bb7553274a79f4c 3046022100b63d20ad1a58138bd89aa270d25c30eb5f2ad663481fcf36af0b7f22e2906c8d022100c3013f597323d2c175f33f22b7c7ed1c4cca56930b051a89ab71b1e8d4ae6d09 304402200494365dbaea96e5b4acadec7a3bb6d1da18682a03acfb200c9f3341d4111d3602205a985f17efc281b048f17cb0d4e1c9a7e7b41db8d0de8102c361371330e707bf 30440220042bb07ebf6d1eb7f2418329789da9082351b9a04acee16411edb55108a28fcf0220201bdd06fddb6f0dfad42cc5e42d1a22639418e48bc11e914fb2654bb0598e93 3046022100f20bc1832faef9ec385fb1d950acd404603af070584759633aa676bcf2a1993d022100ed6d085cb858155c99fd1cc109f9a6aacf64bc557fe449af33764e2e12b5595f 3045022100e524e4a729c92e01528fa3b54cc471c003d14b50234b6ba7f50cadf58073068602202ae36be704c9fdecad689d507f53f26678bd63a2ea2698b4100a05e2de6dd116 3045022100d8e6a916cce52f2dcebd74f276c008330a46e4683cbd65847c714bac4b64e155022064dc61d08cdbb57ad075ed98fba200ea9c4fa768014e1b7ea48277dd1b3d7056 3045022100add721b6a1840c772ef410ca582ecdf4483468d533ece05d0fb7dbd972de47140220417cba5a28347ffa2533d04f951fe5ec8e48185011ef3111f7e8b87ec4741087 3044022055256eecace6d9dd3ada6160204ec90476cc544335e3b3f082561fd14cb8f81e022029d76e7cecd09d99a262122847bc5b62ce0dcc085647ae83c87b6513e0296592 304402202a4404f351c3473fa264027f98c21682a34bdf08b259f7ca1f78a3bbd5e05a06022061b3917ff9ae3b3374cbbd401843b72a81d734972d0abcb2061e799c01f2a07a 304402200846f037cbd9ef20b0b223d95209394ae3c88dccd340212d208fe5c3e971e2c702203c8d3b59afa453445b14903db828235d3fc2f50fd2eb25c9eda6f0c7a30646f3 304502204ea7cb518ead816e5b99a820c9926a2b576d3283df562072c95893f1faf2ece60221009963d5cd7ffadbe559a8d9a7bdcaad4d64fd76e85facc8c78370fa698ed71cc2 3046022100f41925861408eac632cc729630b1e7287d8bbffce608e3a09828ed9fb205af67022100a10962bab3c8b163f5adb93c7421d12128adb1668089055efad2500df5197f8c 3045022100f9d3868782775cea6d81fdda8b9446f42df628df8ef9211d2cd3cf799dabeb5b02204d409eaa031d41f6740df3041aed5a518194861bd0a82bf7a107922200450df5 3045022100c98bb836d104c996e37b6ab4113936c733de76305f55aa1060f6f631bb45b7fd02203df6733996a363f571adf449118000910531d9034ddb8cc3c130a683b27fcd11 3045022000c4acfc00b9be9e1f8279375c2a7ef6ba40a885c56c5194419353365ce90684022100a97b41c5cca2c969d8dd105d196a8270f41791620ea56995a437f6f573751c74 304402207192d75e46d19ad65260891d766a132869c5ff6db6f07db952eda639b155af3c02205fefaa685fc2feddee82e1a32d023af4091dbda2d084f0ad6aabbf3cd8a91c57 3045022005843a4e5a9f9d1e2bce5d375d5605e8db3bfbbe9e403f15314e5485c0889ffa022100d9aa897dd1ef84dc6740f178f53c1157891f1092e4642e0eb0b846e65a5d951b 3046022100b71a87179fe3eb01d1e78ed3ec5038c0850f0f269c468236585b82d7e0187807022100848b7c2cd008ee290b34b05f5a004833ed699984d318da37c18fc1e4285085e3 3044022028c589b338c95fa4e206fec4c7856f5a48e5b96dcbf475996f45023a66a9c3aa02203623a4c4d734cbc9d637f5ebf7ea0cb8da05e8a063bf2f71cde767dd07536c97 304402200b44eec39ea41a1c79800e278e336e5c6eab3654e1bc5685278d09a824754209022067bac322578354c14edbc59f74ba74a38cf3083f4b107969a9b67b1015338f28 30440220585a0fcdc381ea0b4c7a42a24a4f4e6defa25691df3d222e867e2478cd91be7b02203b6c4b424a48d1951fdc2c0200216f784fec33fc0fdca6c71800e3a9ff519361 3046022100a0c5bb21aa0e9cf7384df8685b01f40b82e7016bf55df3d50fba10da18a65d1c02210094ccd1407b2d59d486b6273f688dcedafb3e02e48f872cd6f87f9c5f67e5d991 3044022067327c1111238fd88a4fc7b8664c8f2b885b502b8dae173bd99cafa66925a1cd022079e7b9acc42aaaf7e19c3be85f2cdbdfec43fe1b4377e4eb5eff348ff353f500 3044022021c3ea5aaac0a05d02f500836e410019f82727b673c5ca73602d642c4e6738a402203c77a909ed32ef454c23df0de09a95f1ee0521999f38e36b7254fd7763635d67 3046022100f10ebba2870df7385f6f96f199aceb20320f74ffacb075498932654218b8c879022100f0f1f72bf2ffe0418d2b87d57f545e785a888f057cd41ae62782461f4bb499ae 304402205c85880ebc750e2a00476a401a9e14cbfdd18e50d4ccda83ad76174c5075425302200c01b01496e4e8c448361eaa1d1e28457f43e358783be1593dfb80d3151ec48d 3046022100fd8450c5589d258a0b13677d19650b3d2d84973f054316f6addcc97fc54e4a0a022100eea962e256702518446d5d87e99ca61d280ed1e70ddda1be496395cb034489b5 3045022068d96252c6a969e9e198722e64a7f6ff643a8e9af35f73157c01bef54ef41953022100c226aabdb6ed1b91c0ab62d95dd41846379648ba3cedd77982034aa2d170e34c 3045022044b8c27c23f615553beaceeb5c072f89a92aefbbdba3ee1431555beffaf118b7022100e6bf9967e7b5283f52376c59f100ec67017bf03df2ff4b4f02bbab5d20cec142 304402204e38ca31f7d9c186643fbb24209ed5c8e497b840406929b3689eff6fb2ec7ddd0220446bc9155a5094d11e6c93d0674b1e74b71db6ddda569c44bd5694954c5c6425 304402203c02700e07803ea6b35548ef87377e7543ad20dbc5f988090a547ba56eb3133b022033e8f13bb63c0f1ff836cc5cec4b7b0a78c0bfdaad939be94f0b2ebd57748fe4 304502204215509b14f94e1b3de9fa182e180207e8a3820075f09b619a30c2be3c88d854022100f9eb66014cdc2297e09a63bab9d0d3e96ec96a3eb89f662b585592ec23fe41b6 30450221009b7b9c80fd3b06d322f89ea6852ad3a0ab4ba6b4206c397ce22056938c20f3df0220766c41994e1f76603b6f20179a99b94b335e5ba71ae7848b017b7226d901d3b8 3045022100e59ced24d4a27386e629e5611ecbc67dab56b09024a62840dfef85d0bd42ec860220258ead85df8a417c9d03753d844eff3ed08983ddd9a87d7eeb20124cc341da9b 3046022100fb809ec41ef59985bb93a8c5ff334529423d0e9a2a4397a980a85991119217c3022100a35b8a41bd518ad873ea348ab52dba7620c41470688a9aa468e24159fba6f21e 3045022023ec910be2e8bf906352a312c38f5a1b7665ae4a442c53362708d50c74d153fc022100b4f99aacc348122886246c951426482a2676b673c381187997034ae79f20d87b 3045022100c0607f5ec02acd763e987220e4401f8c4edd9f0f0a2e07c3490f2cd2b9e1597e022031ce706df5738e9c5f248bf1972f6d8082f1fa0fc42227b2250b2b671c7079f4 3044022047a99ed34c0f1e0e64e7dfc09d5fcf58de9d4a1e2967febafd6b4133113b783302207028ce4f477103c081ff43a33d1093f8ff4ee8e89eccf755b8f3598b2a36028b 304502206b3101b262c9931063ea28103c6c4c91cdd849d8e464168059955d83eec68de9022100bde6ae6e0ea98f992a58bf00dcb2e312c55baa444b8eefa0521964cfd87e6e62 3045022047d3945e1ee52a17d8cde1dbf7c9b2f3299bb33074d77987b90c841f7db0f7e1022100c60c192a8b9850fdebdb658976dd14e824e0bb732bacaf7d85b2bd32184e5736 304502210091ead6f9715aff3d1525009d732de1145b259db03532eb6cec39ec41d55a0d82022057286261fe0361a9ccece0c60b8a67cf382fd6724e0986dac50333b7b8799ee8 3045022100d2d7826f8711ff539bc0bfa94ce0b61bac2d99077faa8fd849fe26c75847a6f202203e3fbe19dc8d2e4f9edce69c9fa53c72068b9def4b2c49274a6fadcef9a6c5a9 3045022014347cd53bcba1de951f4881f060aac7dbe4ff0c4de4a8978fd3a763d0b6a01a022100e206788805659e6a49eb0eb195872f53b046c57219f596bd1c54d08aba7c2d7e 30450220711bf1dcda22ac04ef974e5ed25cbf414435c1aa804681d9a17b6649865127fa0221008b651234b218679ddd4252e5027c2fba1fcad8770750c683490205a74f66b9c2 3044022069aeb415bd160974d50da2aecb706fc58b797ea0de7013bd0faa0762cc05f961022062354c4ac84b37174dc8373ecee7a96934271228cda35b03df049d7c5afd8345 304602210081d634bf7c6b93271688b459135f858c6193d40fee7ef01b99b8a6cb8728a680022100ea2200228763a64b9187c67e1c8c67420b0f2e1c1c94266af7eb6c17ef31da8e 3045022100daf3da7751ad90ff7d6eb1d177c73bbe510987e461242feea1337eb0e9ac9f9402204f44033526b349747d06f3033888ca250ee96749c1811720ff1db00ce5a03a74 30450220359ba4269085e9a3cfc39a1c3142b988c951fd654404ea91b1cee7145a98a9d2022100c239fe9dece5348cb5f7c61162f740ddde33fbf77a4d787f2bf9dada99b4ff7c 304502202168d86265685b3caf46efbafab35557090fc5defaf196881230a2fc8a050ba5022100b3976d797d68b20fe8e790759713063334d9667fa943dc7d9fd07397c0c7b68b 30450221008b9c6ef00342c6cffafd52e5604c7e82fa70248ee7f8acc72f45b5ec638957350220527c25f5d092b11e0c1504af70e6c192cac7c807649084e70d5e76ae7c684468 3046022100e49bfcc5839d0ee7247cfd5a198b266fdb264b70214fc73e1eadf8181506e1ab022100ad9c4bd4af99fdfda2223bb3ba7b77f38f6cdd0952a767814ddc53de417a1f3c 3045022100cc9c6dc18fb5cb32514c74097d45341f8ebbd47372dbd88367a08d1b5d1c3a5b02205dbfe9114e835f56ab0888078732e1d3dee7f9b2e37d706d234ef16536e36b5b 3045022100f355df9bfae70420bc1a5aca5d11b636281b4770a01b6759d7b66457ca4a6e4602204ba4427eb46a2d64a4526ebebb30c279f5b98c26d5e309045f1ced95012cf1ee 3046022100ab611355a81970df0ced823b1b9d0a1b83eab216661df06e543076307aeeef78022100aeb642ebfb09ae76cc9c202eee6f6653c302ef6ec98565611b78a509ecec29af 304602210090fe87371299cf7c6c43026744f697d35141c2ded7399a7118bac1881de9d799022100c6f4f19fd3726085b7e3d373365ca756014ac4d617ed4f3943b3739d68612472 3045022100a1560b06bac519e757733c034536e6726dbe8f2052775b881c3f45d4e8891bdf02201830f84c05eda794b49931c5ebb134cc29dfd2a2df1791a938e829f0e6f23a12 3044022009dd102b50475cc965891c761e5d69ce0ca61f052e3f8009e528e7a06d4cf922022001d6b1c9e0046b026b72ee0bce3cdba1e172f2366e2a51049d3e19feab1b8649 3045022100ee9fefb31aa6bf21888849dabbb2f259d05d7269c27d87825f604c9f6ebada8602206970271c9adbd5a00999f1df0308d0259dcf354b5d9c9e64d0b5858218a2aedf 3045022100f22053c404110b9b003382f2f3ab80d4dde2f499fa103630733e9c2aa050fd4e022043906eaf0cace641e453a3f2d6941e3902afcf7ac90c4f3008109fa8946164f4 3046022100d5d5fac278955bd8ff61655f861d46888f529b672f673494b08e732ec282ca000221009319bfe386d0d2eef9e4de30eebf02e268eec7429a55b60528e7f1f0f113baf7 3046022100a794ee5fac29395b413a67e07e2df885954d54a6d128ba0150039ef347bf18a2022100e2bf1bf90632908cd860768c01a37e9ac10c713be2e3292d33e5a7040e901643 3046022100d83f7b2f783844a6d9c64523db1bcce386b1afa243d6a7e5839bbebe95e9ad0c0221008150475c0724467e04d4cb1d33795101507aa3483a42d54d855de1c9836c6e26 304502210084350b4c05cab5db0de0de8f0a4a41c02330c3fa95696751eb4b0584ad2f06bc02202e3e7d72e6bf140e840f3ec96eb28e6446680192784f212f43ec104cff267816 3045022100c6c76eabada0d54d4a04e923da335e9fa82bfdc64119efc6432c2f421e3d09b80220046408484c0cec78f6cfc324544f2b76a079dfe202e309b7fc823ad5eba585ed 30450221008a9b54a698c6e1defca76036260845695b3b9cb5a0324b0a919fc8b6917ef9a002207c6b0e7a5b083f90e5ed08c1ba956fb774ea0682db80c8eb11f11517dbd0ac5f 3045022050695c0e83391bc62b9404cb431a462f3f4445384a872c1fb594db5680b59da8022100abcbf455d64703f151e5c2933c84ab4490685a66a22af217823061fdcc3bf973 3045022047314a96bfe085e6afcc721af71a4f02cfd5e79356ecda942c5bb4fab81ef761022100a7469313174772a4789cfc2cc0e7bc432a91d227f5662b26cbb7c7edf13b944e 3045022023b4d714c6d136cda150d1cecd9e812677a710fbe6529bde359221b607c6f9ca022100bf4f409a56e983accc2d6fe3a4aa5603407833c01809a5dd40f54674907b900b 3045022100bf036a0f14105f577b89177c0c55b6098f263386c5b64c21bdf78f35e114b618022044d8d063768fb379cb4e1084760e66114f4963fa55d5f9bc3d59829f643190f7 3045022078e84cc293c08cb854ca658ef0bb0458a437bb75f052c75f43fd322107a4e76602210090e8e9ff0a2c1515f4118b12230538858873dcdae70b0f6c93786e549f4a1f4a 304602210080ad3c44fb01054883d1339a19f0c57d114c0c1b09656c52febe9a73e90477fd022100e7f2450bc4ce51b29a29233b27f566defd7920701a3cf907fb645890cebcc202 3045022100d93f8cedc829c20118a872bdc8c022900ea2306d49fc1cb527535eb44e0ece4202200a22602e1edbd63c0084ffae2349b6539c24ebb9e4481937d14d6337f1e51acc 3046022100c9a72050099f724a98b61d094a7415e0ceefdb47c0fb19b75192e2d4e3860721022100a5d390ba43704085d8e6546b62277085d681cdfc1735720243b302cea3f903a1 304502207c50f47fc95e57bd33096b8b42290757f81d5f1416baa918ef938ebe9b797e56022100b9c3b5d206bff3ad69d6cecb77169f3d4a3cccacb03d882e82ec7162be84e3db 30450220612046805d697b82722754d394f3fde34ac824100a8661a87877400f0eaddd9e0221009c8024b1fdc2962353b12d4df62fa8f7d38ef0a7174c8d990ec5e8b044646196 304502200a9572675c05b61160b2f797f4859b5af5531a74819b930e6ec65ccc5a12e2d2022100e68c60ad05d2a4021ae05bc8b1313c6cd5d0937831d4dabf9f10badb64b569ab 3046022100b83fb89ae4d67bca56764f66ed83e508158827b4a5fd62b81044868564d7e3780221008020332041c08a1b5f731209002928d641c77df41c705cde2a5cb514efd29574 3044022067daea7703b2fc5bb1e110917de15992c8fe8ff2531446e1f60844cf455777ef02200c3b6734fa256d9bc673f2242a653bcad23cf49a6dbc1779a634e4e635ef5095 3045022032ea9c82435ae04b175f46238ba79318532d1e20c7632ddced0208e7982dd427022100955f2ddfbeda8e8ffa5511ff22b3fb4d8fad98cd23c5adfecdf1ef73c08b278e 3044022056d4f8f54e90b4791abd1c535c80a0ff4919209b9bcaf8a5b8b6dd2a3d7bd0f802206ced022495522cbaeb5cb2dab826802e2f472ac8ee625cdb383248f388f28689 3046022100a390c51e5567b8a191bfa81ff5452940754198514157ea8684ad0d072c4bfd4d022100d9a9931c0e7b914ed388dc8eadd22b90a2c1d2b1b93361924dbbede190046b5d 3045022100fb7ffdc76d0cf2bb25dfc7e143f12611046e46ac165bbdde2ecbaf748e540a9902204d2a51d05f35a427d060c5485f2a8e83204c8e341d77f0d37c8ffafc4b1d110c 304502202ce5b9f85fd626a43e95daf6f9f959b81a4675256331b804f39c3f1707a8bdc3022100a6c99095b79909fdb0fc786e6fd150df082d17edfa67b4a081e421a55a952579 3046022100f8372598492773785562f06e1ad9402bf43a8f45409c6143a401969942d4992c022100cb3f8bce070e40af6bf934898fc48ec2fe9cfc2ea788a18db7ef34e9ce86f83d 3046022100d936005b4b93b0865adea4e4fa94846b7ada00275dc6a0f7dfe017e43a77df720221008354bd89a8f42d5d786f2984bf82523e0c09cb823e9d4d64ac2ff5e3d7c9da5a 3045022100cad9c6a9640e321357887c76bd8ccfa7231fc58fd4915be2cd2b3fddb06590520220049d77f15e0ed458a3ac25cfed31ef0d2b0b599f26d946061fb06fa844da5519 304502201233b430a1fa7868dcd5898d441954c8dc6929215ce4ffcf1fddcc97ce9ae0d9022100d2ac0621e11d658ecd0ff9d1ccd331e84e02a18fefbad34ce9ca920ff0477e5d 3045022100f591ddf0abf9e6ce547fe376e39ff124ab653716a2375af005a68644ab0a550902200b7c0f80b97fe90c3b42c1728777b6c89c33441ca09b63ab954dd0f17121f3f6 3044022012aba7fbab087672451ae77e81c671ed6f64f503a6f59dffa2aa5b4d4222cd0002201268825d3fe7ac8b4c4365b1e79214142d93b91bff58d2e143a4151acb147007 3046022100970f1481b4cbf245e5401f8dcbedba07679589e7248a1e2e69cc685f6ef36b6b022100c0f4ac84f69d1c9187f53f104553420de73aafccb78814e94a6b5676b8846a57 3045022100be1f598732287aa24f39ad39381003f3f32b33179f6bf7ab2fe141a0707a0b06022028116eec452b24f4f5708af933421b0fc77008ff70afc39e692ab6a045424e99 30450220516406df004c20f6ccc3209ef63d1395bcabd4075a052c22051bd057c811af4e022100e8e6a949a6e546606956c3eb16a020f7029aa5aa463c22ffa52314a65bdb1d3f 3045022100acd20d7423bc3eff97c2412740c7dfa93323c66479451be8de7d8cd438903a0802202155e9b2dc44fb374af54904c5f12d14849ab4546e7812a1116a6cdd19330af7 30440220009d77c4d646d523bb20d3bfd18a5dbf2af7b1f29cfe21d87f135d4e6f2c819202207ece9f97ccc5de214d67402f0ef524e46f2b6e2dcded6500e3edcaa6ed253beb 304502207290d77bee54086f08913c2a39b634571bf85f37e94e0f22c03dee5f27a86956022100a188bd4a17a4045d7e0bd23dc6f0bac79996a0a57218d50ccd3abc0748eed3ed 3044022023e1c2e17bbc80a855efeda1404f75df43ecdf54a7105f3a98c1f124108216d802207cf1b1f82a78fdf807a3f846b048120b8517c9f63e8bcd460063e15b2dfaee29 30450221008231a4b35ed21d69cae697a0d94f27d867ad27ea832ee6d3c03fc3320c9ff31302203d97efe8f9ca684edbd08da8f5a385bd850fb3e3e142ad9d4974a5cfdb2fae72 304502207714353303a789176a5f9b46f34261c33a3179d384135f911a5fa0c37867bc37022100c48b0b4e2629033c4f83f9df35f1726bbbfeb03abdf518e964fb339501ca272e 304502210081c1e323d71942138f1d2433d7902af63fcfb852db7f1c76e6f69b02dad69184022070b054d2464303ecd95358bcadcd2a4d4034edc1af56aa543a5e364fc561d968 3044022056852e672821349420e90e36459edbbfc03bce75eded4605b11ff65e799e5b5202207f84b379552652e6e1002292bb7a6931ee3ce09cd6acc78f16a95da2a79831e3 3045022100c79c3179b4d5c07c84a27231010244cd42e4f81265889e3f6c1c149dc870c09802202cc18dd8822819707c71f473293638c5f5a02d00508706a7a8950d76306cfb11 3045022062d510df17b49a0ae57f45e005ff2fe98ff58e0ace1b6042ad10d804499201d9022100878b177d576c2201513d71154605ea0d5efe42654851f1374c47adff15d2e7fb 3045022100c2f03869a068f0780a4c7c4a37c48ae081bc87749b955e095f7194ca0af3e2070220367e087e233de99ebc3cadc45240ce6855b247826626c404f52f05fb152bcd5c 304402202490f480829fc0c8bb0b05cba1c4fc925d9feeb114239c5629d6695b703abed0022048e8b042618f1ec336ca1e2e215816d844b491d84174e403b4470a5eec34c730 3046022100a4e652d767768b4b731544987c8a735a918c9d9b443129853de5628d7c37ad9202210080c9f60e26d86101bf0a762fe2116e1496b393a3d10fa2ba568b8ac3276dcfbc 30450220078eb9b767657e6e9a53f2c99bf0759880aac3391ccdc2c969b29d0d3745c1d6022100993c2b755b06966896d6667f17529f694db33914cfcbfb33ceeab51a83131e34 3046022100a4ba3182adceccc530fad0175a2234a4ce9183a11a0f6fcd76d440d1d99fb25a022100bf90392ed3877a93d3b43fe870d01d04e16a6d26ae945b182811250f27049397 30440220768dbcad12d6d517ab7942a85c253dbe5c5e84d329d6e4efbb6680529ea0294b022008519acd927f92de951c8b68f4c89b81a4adaf3912296f4ef4f6692fe3177852 3046022100da259f3c05623c352290a51b7b5b57ace92dcebeda24b3f7408af296a6bd804a0221008b654d318c640eef6d42c42d5d89ae6b457667d4f9eec30342aa8949d5733ff5 3044022056a41594904d37bedd6297f7a67e6f0cca820feed658b030844c331a40a15cb2022070dfc03b142dbfc1aa1680b7fb74d0e708121750b1890de9caea3beebce49e88 3046022100eed18614364531d261d13af2100c6c0a228953ee7cf1601c23cdea36c50c0c5c02210094b7cea7fac3a4720be086c275c34549959489526f8f95b08a520b55fdb9c011 30440220172778a4b5525c90d205cfe09ae09c40ef002c2b8ab077c1c08991e286f5c895022011e1b054d048cceef3eb87ea714bdafda5d59b5ee7138b4398a233d14415e017 3044022071c30b5b6753cbc387ab41d11eb1fe939b9bcfff504f2edb8a5df53729b2fded022046405c6afc3676dcc3e51d26453cbb8fc8040cbf512a8f3d60b55a28fcf21366 304502200b44e1f8fb5361a2e23bbd8b2d4e8b293a6f4ae5331151a77adea08a268583d4022100e4abc5a0f2c0b3f6e531d9df141e912029aa778fdd9d3b763c9b84551923643f 30460221009106be916468825c3957083033ac257407415fdc68f0852580be10381370d09e022100d0f7b0896f242cdea2cc3d7b7ea1769042bda0031808929c5a1a200f93cb0804 3046022100af07e1cc1700148458648a36d84d676a8dc6f51e50a24f5a360a16a5541e8730022100e436d607de78d06705f2d9470da6a6393256474fcf3f017f1d17012b975ca077 304402205eec0fe616984472ff806e82cd2ceaec3271c88f4182c0f4abc1dd16414f9768022052eb027ea4cf074e509fc33ecf1b10541b80c6370b1e68ac31a49f88eb479655 3046022100f6454920d53ff96ad1d158c70180de143c4d26b2c4779be0164bdf7cc9ea9de7022100b8261f8936124df2821a2667fa1e14a07f4011af0251205d1c781d0bdf01b058 3044022023b1780b2e33b37f0b4192af4ede034b585240b5025ab4f096eeb6199e3abe0d02201668f262f199206b28473696326774a7bab9e31ca673bfd1f376a3612842c58f 3045022100e26f3ef5011b33fcbf38a36a7667dab65026fc9bac6f832ee703eb91f31ce51d022065928b625a6a85609c915f49153fe20cb8c485b62b673241016547940c174138 3046022100c0cccbfa9617632da2e0e94d9de3cda03e541fe916e4a0e6b9d67f163cee67d0022100c01789b0874129543c518c3d36ecc6db3999c8d8c4008a90880fe9ea74a583ae 304402205ca9ef9849a1bf9669b060914fd3c55f90af9de848caa371d2f3e5791bd2946602203191668292bd5bc71350f95894000fe005c5ea1129c318c667046a22cb6e9dd2 304402205e8de5cd8d9b4f63b1f3fd346ab5bedb3e3d0253f344ae3faf6147d79b304adb022021ef123a78656c4bd8bad7b31559abd5a8aca22374073117182e0f1a27687399 3045022100ccafe9cfbf32d5652e397830416182ccabdbbc70850422ade6870ed8901e77d702207aca239ef6071f338b5a7e91ca969da1633cd1e9e4d97b75fa15b5d31bdb1ac7 30450220699c38dc6397901d1b7953d3c238876b19e5957b5b70067fcfe2c342b8ea8d4502210090e41a02541e9846fea5c1c76b94355c07dc6522592faf44568acc2fe45bd225 30440220301c8dfa88a90defd2b160b1db4b2793e5125ec0f8b7842ac8c445ab3aa45a3002204d32a6838f7493dcefdd9617b3826e0d4ba7ee067cdbc1eaf1a0d851c3f89947 304402203cabf187305d5a8f73a598b78599fdc62b9cde52538e978b3e9e0c9bf1f59dab02200d9fd181e0692886b7888fca89f12ee2341e34d2424f5136e395198775d52a78 3045022100dcfa6a8ec3aa6d88102a4484fafb907268ba94c9868cb8a93b644d678c58171e02207a6b5ff18be024c5f6350d6f58b93b1cd48847cbcc6a338d5246b0047d7a797a 3046022100e39f504b905b93035496b14e820c9ad8579ec1f01b119ea036cbddb88d17ee8a022100e7d5b3f1f8692d2c0b62c2526b51f2fb581ad24fe89b0d3ee9bebe145b39b9f4 3045022100909620124a415df308c649adf231d3f436b58a79afca2969a55e3b0a2a5ac06002203be066cc74b22deb53d8536ffd5be796facab72596ecb61f648a7b8f5e8223be 3044022015f79f440a318df42c70eb90636c95288ce8e75f3e4ca8c2f4e75d8c5c10f73902206020f0a86ffb7380acaba2655df99e1ca7b4eb3470ab10987e2de27698faf6e5 3044022059d3c82ec943225480960b34216ed89074243dbefa06ebaf0f179e45d94d6be802205b9a1018f4f9fc3261d71e68d765761027b74fe5bb74844277d659a18b93748c 3044022015f0ad8eb3672ee485cad4dbe310d2e76a822c325483dcfcf03107503751ef950220208bfeb39cd23389a5d36331dced06263f9c2e7e0bbe2df08a681922fe76631b 3045022100d2ec7e8ff4590f7574d4d73ea52e1e697b662d4a246e1a10d27b9320de0f4b7b02206cd5716ebc279c4c75c3fa02d694b36293a08f04ae570506664cec84f198d4ad 304402203596b4d261d335151b519df622517170239e25c51ffe3383ac5c6aeb6969210802206e54489ff1836c639948878471ca42bab0afb4454bdba36f649c5863586a3a77 304502201fa5b86e3cfb011e754aab6b53d403a7d9856694eb83045967b28a509240ced3022100b3003bdd44f4bcbd380c2e3beb8df831fbe192f5b9335c389499c7872431a6c9 304402203facd571898bbd26e3ca8d95d3620783c0084b96280dc0880c181217199ccc37022063c6ded8cd38506991e16aea8a3d50504405fcd2922242a52954c442a04ec381 304502200ec1a699f11e3040bf2a5fb6b37aa745c64cbafadd1ceb0176d597aa7e785022022100cf1258fc795cb5b60cd5990dbfd03e508fe0188bdd3fed587a8399612fcdaee8 3045022063220116d48b8cadf53afa7e49237534bc208ba2c827e160d1a83cdd16c3e992022100a3b1640dde4c998309e6e14fa02c8d356a24a5d798358015d8f27119d914114b 3045022100d76345211ddf119fe8c74ef6533f0ebf55ea628127ee812994a074546c09428e0220260c2b80cb69a5bd24bd331755bd71a5bddfdcdb9ffd7827a28c7132385926aa 3045022100c00129c9a0ff1601007480eb016d9e2a67a2f575186f0b2f4809b8b0b04d6f6d022007170cd0ad150fdf6000c93223b2b69bae6204906c8896d0cea0441e2de66073 304402202b220ff049059fe637832693543d004646f4f1d04c13ebb14ff2e80fce0472ba02203783de02a72b394c5c69733236160ff8ea03d0aaa7ffe885251e4bb6274c9373 3045022100e1cda8e95610fe0eea7e7e9c5414ffada2d5bf2e00cd808237d32d4a7450c37c022056e9220cc364d095248ce723ad2d3d1524dfb3f4c3ec6b13ca5097d4c2840f81 3044022028448f5a4302cac351f085faa0bbf05136a7ca9f34125941e6ada690895a7a4902202674c81a6eb293a3364fc4d520d1ec708ac84a74c652b53ad87df1e1c38f3ea8 30440220608d10c4f3a49b1476fbc35bb593f7f4c8156f1ac0d8646739aa70806c869157022038e598d4217f7770bb68035d47512701f4726f33a988b1f67842d883a8cb4483 3046022100a0efb1ebd9c82b0f4858d1c86842caf97ecc9654d4691ab25def7e85663d1ff0022100d155a76fef63b0e1f2afba5dba38659d5a86d61c61db5dbd7c913f9109f8917c 3045022031d5289371e36c696c689eab2b77af5757f466959f54e8645596e51bf8cdac74022100e0fc4740009af28e07a766dd65ff6126500c2d278d4dd768efe6b813c8173dee 3045022100b4f41c32cf6cafed7a8fc6e90aae015acffdff64a13846fe6317d082df6770e5022010a751e6074a61876c3e2c53edb36465a8008de0a8428b57734b9ec41e00390d 30450220593e372fe1f0ec4563d0e7475c4da8c797506cdd35e1bc645e2408a02a24e4700221009c42416f2a497d75179cf8407bd1484e260bb9eee0a30020652b874b42bd9bdd 3044022075a2aefc5e54775779e57feb898ceb4e2bae6717b649dd5174138591439d949702201a36df04c3416a7602d7d4a10cba556e54b2c52d50b139a90d3384bd689c0459 30450220773cc32b8a03a5344c472081e916ca001df11b1cd77e52aad07058a1540166b7022100d4cebd28d3b34f1c8eae9b2bf4b32b5b612cf34d04c0934a9a4a257b483f07bb 30450221008ae5ec265d4473429734dbbd84eee8fbba143a215fc88372f315e09b1516bfa702204c3ee796f52d8bfc365bce086203571151e0cda0171bb727fd5e9ed3e4996cbe 304502205c4e8783c02357c9c4cbf7488258b55a3059460b281470ada6ba8715ee97c8d2022100857357821e85d2afbccda9b26e3f9118ea7619e4dd6f8440166ed92049dc0d6e 304502210087cc8716bbdfe96e0711eca4d1a1584a60e7e2be68db60092e8c12bdf3501ec9022010badd6060d76bf7b2fb7219e4a76f4812f134098b05c4c877dfc65391727b9d 3046022100affb7f588e34f66fc68eab833d59b95caac122ec037abb6bc9c99722ca0b498b022100c29c90c44aad670962c8cadf37978c6d2e9c4ebda056e931723d7f3e5f6d73a4 3045022100e6d579f3379ead0274886ff6d517ac055756494c06c5c275eff58f2a959ca0e20220345cd69ecb1c15b2e0509ae15e45e1decfe6e5b58b4827ecc5ab8b8a23d88ae3 304502201dd4926947f4fc118b1aa5d05a5e27be8dab22adca2707a5574d461a9d8030ad022100af823531e0ab9c4e67d37a50ad8e54f8d70dec38a1a101523b59831fdb14a812 3045022038c57b88317afb7711a8bfcad30475820adcd8786f7ab8174507b24c359c26fe0221009dda4c611bb1f1a58fd6ebc8cb4e9c9afab1b25d0981302947407ab7801fdc44 3045022100be43c05258dfbf7772735811bd70f4b170125eb15c550a010ea6fede1a9d69a202205d5fdaf14f7026f5ea3dabb72d7fef1558c1b1c4c0ecdd42edfd055ddac711e4 3046022100faf5e1b2cc4b578b9b4cfb3c6d45eca00222ebf160085de98a0f783c617451ba022100c72db1e1db647447c9789e6c1e004d02a3c6d75a776b7831f87b3c38f17761a6 3045022100bcc970534edea27bb7ac33ec4b8d2e7e2ae3b22f2dd491c2afeaeaf80d0ec49702200332db398c5380bb15e789ef72122ba4213f265c128e929afbb390fd26e84332 3045022067d7ccf962756c1ef8c67a494db8a3b60b7635ca5aa7629ee032df2ba72cb11a022100cdb0c23c62962e5865fc44e81c94500be35311816ba5a43914a0b87e0e4a1fb1 304402200697aec2d8cbe8fbaedea24a01fe110ff74908f41f1395df956f48c85af59b9e0220138e090cc3508513460b19a214bb3037e3b09062252addf3af3865b3d352481b 304502210091131aadfba7b51a6832b5b603250cd41431ce4f2de85801d793c9776e50ca9502207d23fe48f06dc546243a31ea176a92cf3be0f54c3e9f4214020f6ff188c74804 3045022026e3664eb4e7a7c5982d7f953f558a7f0811956a0d8b5501b70e77b9ba770f800221009a5401c1b13015d802a9c3aa712dfd978c61563ec3b3bf5e4b566eceb085a161 3044022055ef2fc4944a680facf5bb4e15446cd47ea0d66a75ea589ebea1d5f2180be27e02205dfe15c7c3b8aacb0f506d0c587b18facdd070f3d81fbf40612e14a7e67b4de4 304402204d67a8b4f3c007ba63db95d03e844ba47126318caafc750fc663b6620171482f0220377509d790872a9a3165dc5d395c023ca53c022c2d92fa8c9cdb315241df35a1 3045022100d4419c87ef7c002edb589119b64ed560aeff9320f9cc396cd077cb404c518bb202205c779654f1127f48dda67fe9d2d49831ae22ca1ed4adb4745b9730a99ec1463e 304402203b3d7f2567e72fc79ec4405d2fc13ec88a0102447fa90edf79f710d7a59903cd02202b7dd8cacaea5ca21a59380e94a6de07a30795b0b94d9016934a6e1ddb32db30 304502207d77457b8fc795b0d4eed01c0f8781257610b18cf9257fa1299861c0aaa53b75022100cbf3331db60821dd02e603c7d87ef3aa7ac5cc8faf7371dc808ddbcfcaab62a3 304502210085a0945a0945f2db96278272b06e748c1c36312bd2e8f1aee2925b68d29c8cd102205248df800b6b008c13149ba10ad802b6f448eba4a0517a6b357d71553e041fef 30460221008ce44b231f9bc49fdbbb9551df8334943aa84e0c873535af433b25e81a3e2025022100eb4e1dc02ebea184a501b51f7fe9263fa607fd155ee429788272e589adadbabe 30440220457967e758cde821d3e0f3921598eaf0e2a1006a64cb77641c4ddba7f1dd23ad0220624d1ceeb25830684f772d35827651380a97d4cac726a42b954acbad80fc2ab3 3045022100c3c6cb2d6145c9c3b070b678be9c3069473a662bb879639ff6c9c61d8c952820022037e1b7ab6c1318ae862ac3094e6aec9931d3139b5830d3f826a92dfa1f0f05fc 3046022100e83ac6fc290ad7b9504c7110a92a8b3c184bb19fe6d60a66c5ab376345136991022100c6bd72ff95d3eec95bca606671b6ee3eaeaa3e4b7fa7522ad48eb16bd684f353 3045022100c47233908d26ee8d5fcd8e225d693ad3f6b3e8c15fa9cb8a221c0e3d010a516002204048ac76ce219c2edccafd7c7328196859c67e95438e741c3280276d4704fed2 3045022100e5bad2c9feb8ef6872fb1c0b5e6a6625259beb4310a25c0b74a1e16b55c1198a02203dc6ba6f3a904517dcae7969fe3043024fa9141128082d455c40525cf042a285 3045022100ed95d4a62885023f9daa8066a14c3abeb64ebe5d627e0153413cc717931f635402203d3770a8d20f00af71855c21371a193eea364de4e636e3c4e13a86dad13cf805 3045022100e477c432a11a9ce55f7ca659fe416155ab6c833801155e90232d8089d6e7c704022022ba81c2d12976ef0dc60e1e6439e1b2a31f0db613dc73603b0fb2a7ce092478 3046022100c38a6bc804a8da3d35ca687b54d9640d0545a3bfbfc803c607de71db27594bf0022100897d37af27fdb0ff0abad668c41d334ab3875f7e1964b70fddf157f76f5dddb1 30450220056ea18b5e2a2042c2f1355edf046f81837a3648d7f2c9352abad353ac5da976022100a589ffbe74fddfc5c95aa5fabc6067c7840dce429cbb2e9feccc450cde445e9e 3045022100d91dbb584849a4bdcf54ef6ddf8c58e5bb1d25021eaaf607500f2e62279ade5902200b2bd407602dacdc612c8effc2f7bcad2335cbf1f9e3ac2ca0011c965404f471 3046022100c8080d0da52a1c32ab44a8cae86e656022c59e9012f4baac6c125362f49dc9d0022100f2322b08d925d301925ae280dbd95b077a41b16bc6178b3642793de677ca3b01 30440220348537db05e8a0c54a1a392ba10ea11e9a316e0054547e548e7d2028b54d5d85022063eb8924a987f2114bbc475ee7d0779f2cebb7b3eb614f9197f754265fabed74 304402204f74404b02c705b91ac83d6bc39874ab43530987763f6745e891648c451820a302206b3d1cf9b77de2d027313088e871739658b60e8b817c47767f393efbf1ad453a 304402205fd2c0845da42bfb40ba640ebda8cd39d580a252112addbdf1ef3e6bca9f0351022018c25f9956208f86830abe1622b0d5dcba86b3828e2c5a7e6c9a51cf033b53b0 3045022100b71770d8aa2976d79c9e033281822c325ffadb0f9044edb343d4b0d781de701102203b46ee7de9652230bde53893148402feadcaeb629a027afa144057a734262a61 3044022077d89ea03e3b7fdcbc9b6c634b549f7f98fcdc8b1cd37660b8243b6533e6441c022066ab396a8f4283be631f8b3b21a9d77c9622610c619dff065403c5fbed3139c2 3045022100cafab859e886fcaf87819ae7c5e4bf89b8e00ebd24466264292d65a7fed757f90220191d51c7840afd411954f9239ef8eddfada99438b2b83d10df06cc1a42225dc6 30440220419ac6e29ec4c2d01566de33391eb18eb230d600691c4777997403bf7d131cfb022034d8b1e084852f6f2d957c767eeb92878fe81df95415342ffebfbeb3e8f7201b 304402202355dd0f07e4cbd3c834e195c38986d18efb1b9efac5a818656701fa53902cec02205bc9bd275d006fee002f55e1cca7ba9c127a93100c309335f957923e3eb3a55d 3045022100fbf16762439547ecaf1cb00a84f9af032c77f143b0cf183f94d3013586185d4e0220617b8efbf683d72f4af586e83525ea312051596679241102b7652ad6975d6fc2 3045022100f60508426659f0e24eed1bdce7f933f9d64d504f5dd860921631d248b4a64ae00220488cd961e930bc41fa48b29add06d666db8eee5003f38bbd4a01cd7abfaa0e9c 3045022100ac7a5cf5f14e5eb6ee8d5fff229a3303eaede62352ccbde4dcd3e2bba7df2e8502207614416d6c008e37e3b55ef13edb8c7e1bd2dac2478f33e87782ad62499d2134 3046022100ae2bbd233ecb6c4bb8a8dca89bcb78baf52280bb8f3b14177918ce5230ff3a22022100c91c0a962160f07cfdf2bd25b19491c070fb667881068d1403fe7c8bdc2ad6a6 3046022100c800135ae8223077b006f9cf9d8a4b6ca9bdae433fd8c8bd339b382b22e99eea022100e7fe52e713131e99f65c90bb3a15afe15170adef0da43a4361dd701602454a0a 3046022100f14ad1770f51944e1dcd2ccd1a47c9ce0d1ad38e9baf9eac556c4614bc048c0a022100a33d7cbec10de5846ee794dd60db9ecede38eb92d75f4897b612674cf39cdde4 3044022062546880ae9855cda31de65e1d22798dd8fde933076d62f223d4d544f6576aaa022016665b6d6cc7ea901590daf89c235722cb29eee5103bb36e41ab8ab5eadf6e95 304402204f2bea61a06140a616e831cedc1198b8ec92b0ea4325578a2c4ce6512a5630f20220424fa213ca00600f4cfe8d0f2b80a055bbd078defa85d852e91fe79d1dd83b0a 304402205e11c1672ae44901a611b635b7dd498607c860eaa4a8848d0c6ebaa49dac89e302203e9c055cca8c3f0ca3d327ec8de194d3ee4a351f9ddb88f66a852116aca9e16e 304402202ff441188549e2e9ee04ce98e003593335dab32417c03a5b03316be10d601c7b022022a7712f8b315387e0362477506187296debd35c932fd51d139cfa041041f187 3046022100d733ccd53c6933c4d1a08e5a6022db0d0936b033480ba29df313c739a4a40044022100fa07a988a3dfe70ff4a18a7b66c5b3f67b33051a9705af2a474dd7a7fc4abb98 3046022100a1acccf6b448a775eb680142e6e543597718d058b40c9f5d19653f92bfb878950221009e541a4ada97ca350fbcbf6ab9104f8978888e581f0db274bbac2be81cbebbfd 3045022100b09838a008eacf981b5f4b535117a23bf6567c497cfb740cb6c76cc53fadc87b02206d10483a2a1d19bf6f9cab67fdc9769393bec1f862699641dfef298f49164671 3044022048d8556ff28339564e89a3107adcde4a40701ab0c11bade5d7b8a643d8ee3a3602201de96098f762aa2e6ee9b5b634d157b1260c964c413ba5659cb50a8b245679ab 304502202cf3000c50cda0f89be0d6ba0db0eb5025ca6c1ea47aad06232dce83e384caa1022100e889718d29432e90bce480558d84aea8ab9725dd0ce252acf5a01ec06a1c3adb 30440220222b532d7d80b3be445d4f58877bf7022fe38c61ce85ab388412ea0cf3725cd6022024eee6af4474d76cd080b7d3a6e11e92108c10391c5115ffb48b1dc95b1d389a 304402204f75f33f419f352d3b736642b88043f1d27c830a6cdf5f62ea46567e8841ea6f02207cea6ba2080c8091f2130b99d6b6ce8ec796b4786c6a38d43d6800f3e40ea150 3045022100e08fd8f175ab5543f3731d3774f5b07b4175c57fc7fcfb5a4c01ec36b41f6c92022060b4e7916c60ded80371f0e1814afe1506b2992f6c17350f8cc8063bf99b0c40 304502203e76a5e92c6f44ca4f071431966c83082d05a0bf8f330337d798108306855b5a022100f7eb29f020dad774002f3c73d7a86c5bb6d399921829292273a367d7d474f6c2 3046022100ca6f23792cdc2249cc3ac47a0a3bac85b74a77cb7330821f3ebf0072ab7dbc0e022100880435f6cbcaca904b39890107881174a51b7f9757541e699dc0cc31ef95f282 30450221008c5dbee4b5031d2c0e9b3d16efaadef4b9df4ec7d85060362930899131a4926d0220654eba5ed818901ff0735ed86cbe0bbb1154f5de5730a3b7e99ad049c83a0bd5 304402203b0b7eab981ef682dc633fb590443e364895c2d500c494dd262661c8bc13fa9d02201db91f6bd04f234669330c192bdb3a2c00d0bcdcf4281b051d47c35ef6b691f6 3046022100f86a4428371a11545308644e58e890f44e418ab21f7a03ebb9e7db5dff7f9805022100a982c83c9ad501e640838441f8dada042c6681e28d54a8152eb62598c2841fc3 3044022047b8409dafd230d3d2654507d1349b4ae5f0d8efe2bce447473b046094dfc58d02205dcacdc23430807bea0393b3c994cea0a58c348d3af795e4e030a1b2caef54b0 304502200b8502288af49ddd56e16cef032deb56483926d0bb052820490bdd695c652a36022100ab3195fddec15de316324be19fb40227edbc4114b84efcfe6062803a0794f1de 304502202f248668f27407ab1890903ba5a82e824ede2f883259ca975550dc990347851f022100a8eab3e0249d8fb68180f2efec2b82fa2bcd7ac3b36a71916382bb817cf34b60 3045022100cfd5224ec5de4d382cea3ba2f1d7f8790dd2a2eb08adda55c49c09fc9c45d9ae02203f22a92d3dd47142e253e3e4f832cd6f114d8d28774c380ae82809662c25ebb7 304402206c7106b0ba7c9f0e84c69cdfe899e884f9cb47dfbf2fbbf6b919db0e8d5e79e30220445ad22fef31e9fd6da128cf2363c0d6f9e27caf1d5923fa639111db71892f11 3044022008c7a729f7da7636b06eb13fe552989b6bca37f27c1a36d3420429219b6bec8002207a72e1f61d4005f8af361d41ad76cba89107f063ed6704e62df3dd8bc20efd23 304502210090b67644d08c27446bcab4843b19c0c328139e0633ca5d3badcf2abe29f64c2e02207b087cf4e945adea4377a909126a812d047826d1995cc7a8d90355f514783f7f 3045022100c9e93eaf4fa0b66376852ed4a3946d3e81c04d3c23925bcc3fc328c2baba888202206be02c90056cfc79af1077fb49172670efdf518e9dd9ed60ad76a5a3cda95735 3046022100a74eb0278e6fc334623661dcf8c79660e9d2fe5371ca7ef72d7887ba4dcb64ed02210093567b71070ccdcda8a949d869a9d2283eb2c07d13845d643d3f9f99a5f70d8e 3045022100dc92dc464693e07adc2b9ddb168d4ba32925c86dc8c0540afced75e4b781d0d6022077baab9ee8025fdf9cb213cd46ad26158e0948cbf98e32d5235e71bd74232b9f 304502201a170a32ef269918961a0edc1cdbb8ec1bd4d41eaa3cf258e85e7882ace9927b022100cab32d756be72945bff4d163d3d549f4eb17956d70810a2afb5268d8ec24d7fe 3045022051f88fdd379a2eed513295670d30ab0c7d0f583600bdbb1ee1a8ef7e3e1f6efb022100c8aa4c56592fb9dbfd7c9c0cc9124b1fd13359815b325c0e30a1bd2a9b293676 3044022076dcede9ce7ccfcf8a94486020ca54666eecbacefa171568685344dcf5c5269202201c5974f9cfca5d2171e7997fc67aff481daed3658739e223fa73efeb6e6c2a2c 3045022060e81e52ce23297245b29c75c417310340f62ab557348f6bdd69232cda47a42c022100a39f78c377a015c1c90ed715baecbda14172c34e8999b8814fb62c6bbac548ca 3045022100c85b4158f480bd624d51e31fc4eecf479f0637fdbd42e33fb1e8178a36c7c23702207fb7b01b22843c21aa9d2b04afe3a37c187d51cb03d7a7bd2fa0f70257609264 3045022100dcc1688aa0d1eba391c5ead27599b4e643a8f4e25aa510e52dcabf1a3764f47502207377c1c8195a6b2f5b7d6684358d93b8ef806d89825a2d680da7e10438c02138 3046022100fc3d80a673358415f6a71b03c5efe5a2f5c9bc54a5345a75eedfbba8b4094294022100a3c9f5c605d96ffea1d6e0fd22f98ae28ac59a5b0bcba773a899f62ab6fa1973 304602210091270b209d94808e1b2818a634e2e7d131765b0193d6da50bd95edfc7bfee821022100eabd7971fed03d774e0711bfd58723dbd7a9d16d1135731170078326ce68bc1c 30440220065bef944df8fe1e28e65fbdfce04536c1f77e25794146b2cd8f8f713d6d02e00220640e02b95d1833214048c6b07bbf3ae8b8865a7970efc040ad49085cb28b45e5 304502207cd55d94808c44b21949ad883d7b132924b0271acc73fcc44cdbc17133808cc102210094b3a8a2b659b771b16975164dc0953cf8b12f2e5c33e9be679457bddd794f2a 3046022100a2461a37ae1cea2b88c8c95786bb17620a9278e832d1fc8fbdf80b08ac84dc45022100fffc06f1a420119ce71c4476bdb1efbb1b23adfa487d43b0d28836cd5171b053 3046022100e9fded2e859dd14e65225fe385a3fa795aed5e18a099d316a96669aa38abc359022100cee1eeb93cf7d9b113be9012361858faf0959084b1d53d1a7aa44fc35c2971b9 30450221009f06245b94e02290a9b51f6314992fec5ec8da119c15d2bae77dafb08e6948fe022061a59d1169402dc58e18c15adfcef77800ab8e017a5394e9d86fe9ccc1b36f3f 3045022045dfb491a3e2e4b5f8b8ed7a20b2bb387b844529e3f5e2cb63a0712133a6c70d022100d2a5c5a519750cfd07f6bca9b70cac61abc69f2f0f85379a6e6a476728c13672 304502201faaa4ae5b8813e705405ecbfb23592552df7d6318096c33b68de21b4ee3ec07022100c3f1f60209224fddc5e940c6a8859ad55789f3a3b37a8b848ff6ba935bc9bcf2 304602210081ae03b72b0c4e4f7338a4dabe680189bb478ca11d8ce7650b75e563007d82e6022100826cf28733d7ea29aa7f1a746d3272311e5a448ac74e6544fb8130f4b183230f 3046022100db3d21c07044448e6a5173e53a2a41d85a4fccf94a5c74c788b7a0b26d4fdc05022100d8f683af9876067322c90651b7a2e147307f2532d36104247a3946f931fd7f0f 3045022100b6e3e8c22700d00d2273b15846703893e4be888dc709c76728abd5db9ddfd68f02201515b933f66c972050574ab5e3a7306a04944febc5c61c3d25303369a6d491e6 304502210093cda202e908e5688f10b11e7936cd8002bc366ff28ab09e4c0dd73c7f0896b202207d27ef9513c7d5629eb3db35d82362469c9ea900889cf67bc2d305194202465a 3045022100e76b7df53e5a4490e64bfa9fd81a9fc4da8982e6b31772d85300bb44f109dce50220712fa7a69dac13373f29cb17ae0e9d0ca0d31343ebf600b396b0f0b028e2f401 3045022100bcf725c2fe07e67492a272f89ce4c960ca45702f97ec859e010747e045c0f5e702205780ec956f0503d0ee397ebb8459ca728dd3b94bf7edd2b589f149ce63c61ce0 3045022054ac322003e3b4d9d22ea542bc861066d38c55d64e5fdbc535285796867cd757022100c8697e50e67a041a82bad3cb7d184876bfa90763efc7f82406618c5082453eb9 3046022100b846032e188a810885299e14e2d6a28b331ae5b609fc4903f670cbb60cf873800221009951db51d5815b32d10b272ba6685c35ebfcc7fd38aa2b5737341d6419171195 3045022100a51362130cd2f38d2a1391e9406550d77c9aa5d49c9a67f645feb85e68bf4c9902203ba7eecbbd7327f362c57fc4c8a5389f4a060e7f445bdb06d9686b05a3a75a0c 3046022100a10ec5c14c1a31e06093bd78fcbacfab330a26aed6084ea66c293a13d8b38f42022100ef56abb27a9704ffdd51e67a3ec0a5edc7e377bd26fbcf94cdebfa92212da606 3044022059fc6a2332ad8c4f6a1ff9bf119d6f092f1e7d9510ab1fbf02df9341951eefc2022043ee16cde60de458ea134e3596b7cbb02806a3f146415aadcaecab8cbdf2e852 3045022100b54480cc1ee6b6bb338f1059866ed6aefdd94151e599577577a5c77b4c39188e022005a567a6509ef292611d2bbedbbebac16a68f8d47a1fad952c057f56c5ad471d 30440220512217402483e57e2aa964731cc17dfb50e05b29506b33060c4a60a9e8d48aaa02202f5749d77dbdcd5c775e60ec1db1131134bc93cd98b7ae8a846aa7153c533974 3046022100dec8a03cc9daba7331f248519f6e354f9c2da139529f7ab10a48fd22d367cdb2022100c92a47bb3228964f515750fd91356250eff1b27eb050b4bf3be156228654a21a 3044022003c12eea07c2bafdbf847c18901cee420f8af5c5c02bdf18c090df9508d2e122022001cf943cf6551df83f8d4c9710a7273b02eba5528c9074e346c50f554575018a 304502205b781bdf7fcb068d82820327c8e612f6626f977ede93b155d861e4cbf14c256c0221008a519d2a68b0b953d1b8d591266b4a2ffd7715e58b25a09cfa06355236f5aab9 3045022043b23f11408f28466c217ed20cfbfa9e16fac5d9e3f85cc20ca6773739b7cc64022100a03fc0109155b72f7ef401b0c98c02ba2235a63f955a8da0451b1992def54c88 304502207546fe58534d8d398225f29f5cfa1f126d805051ad1af4fb2a6a6abd3fa5819e022100d094024f49da40ef5426342823ac1560d3c7fbfd96f2304fac0f5624339368ee 3045022100dedadfc2a33f70f0cbeb7e818199ccaccc111b85b9c81654b678633eb4d180a802200f3f81913c27d45f0734ed565704730eff3da4b4067748a81a6fbdedd65ec100 304502200ae059d84fe85425681cd4ed74f60413c2762371bd97ba914b6e4e60152a02a7022100f82c5bd3f6cec4e4983c0cd212161ad0769966b0afd2c2d77b4c9ecdcfc13ae0 304402200857668352965004610e284db5ba82f397b65d9f377b6685edfe52fef7968a560220256db834f9220ddd4f98ef9ba731e1026dcbbe60e850d569fd3c1706af4ab800 304502203c9f04ea46b9be63ba545a99b028e8847a1d3ac27ee07abeb7b130f399818a3f022100fbdbc2d3e930157db4f259f5d0df3bbc5bd574586ba1f65bb09c4b11887592ee 304402207637ca5425a647cc4c87a091cfb557d8ed7723f9c926c5c531b11dd29de1c0a40220549a120cbf3b25f43f0518aa9fb66f11f2af12a0ca23ee3745e8327c70ae5e5c 304502200daf575638e02101ac7ccee85a5a27665c2e7d2ce4334949b6c20f41277df274022100c0e75faf2657281cc5a9463adcec2ddaee250527b96b9487e12fa53278467267 304402207c7c7f63c5c2332e12d4caa32330a7211e1d090949e405fdfb5d426f5613d99302205f1a9061c9baa307e7b696eb98e2c3759cc7fc9c55d14b115085d4fee050e1af 304502207ca6c7d8da6710ed52eda815c5ffc544a6373294c9d4feb91e5d259e7a506879022100d95b872e236847295e756060ef491b848000d4ab62d86349cbf04a8335870b43 30460221008ac1dc21a6a9eafb7f5b312b8418fe231e9a2f0b52cfab6426d3750af69408a9022100a5d583252cda7d10180bcf08d2ca3cd175f9569e0d8fcd49323c1fb68f543925 304402200240df883ba368c2fc85dcce80c28c39775e97bc788ce61d3b362335802ecba102205e59896aca353293c3d8c4812813492902723ed7b4c28fcbd518e4de6021975c 304502203cbeb432e6c67af2c6075131575e07d6552d5b0069c20af67dfb5b79ed0ce8ae022100883f706db1fb745405bd58755adfe5cfe90794404fb1b038c5bd7b18dfd2f6f6 30440220266580f3669bc9c795634b85931ac30264834ecec5a12451e8f02c7ea8dcfb5d022002a81e3f6a9c248840a3998035a1ab312eb54af95bfede67e49ff24875350c34 3046022100d71dd6a36a804b73366e6386179375844f10e6472999c9fb93d921ba56370a4c022100c43bf653a62dd26d0931ad6916c0a0cd576be0faaef4d8373f9419addf414a48 3045022062204c496c1c04abfe524efc8792b4296bb1d4e6939110a8474624c2d57a4065022100866be228a1e89ec4e3d2c26304817a000c7eb52fcf8a29056038bc7b413fa808 3046022100e811302f9a025a914c3cd874439eadf080a007d8c1407c8daa5429c482ff5811022100c76deb6705b0a5f6b5c11e163505b6c32c0249014b630b88e9218f5700dcb3f7 30440220174bdefa024c308827beb12595f9423f407389fdd454d0c89dca8112fc44a812022004a90c51bedc0d48d29f3c5a3f16229dbcc452e073772911cdc35911afd1854c 30450221009f5327196ae67d17226ba57033d30e639cdc3945dc685f49ffa605424b32af3d02203fee86466a011409515337aa661ab1fa2368896e6bd0a4f26fdc1736eabe03de 3046022100ca25bbd6fc2fdcd963294d3ebb3a664ebdfa618d983416e559787fe0a4b9610a022100d1f5b476b675dbe7b041c606e53eb0387a305eb06e53e3f50055133fec9748f8 304402206e8a2df41b51567a68f8f1cee567219640be6d9f43a60854c28f7664fef0b6e80220656cc9a9e601b0f5b87e2ffb58bdaf63a2be7231349a4feda4ef6191c010bdd1 3045022048589136b86f34234ad7dc8a1064de6093f14e48d6bb0f1e4b3febf2f0810e57022100c86812b45fa7065bb6b02281cbc00d2f22601d8ba0edb4b90861946df1473323 3045022100eb0182e18a1281fdd01dcb992e8f20a123442879ead3b7fa16a19f4f67e5cf5d022020157ef26dc8740a9bc4f5f6025718f3b4a42d33935ad20867cbbc816e44e3ab 30440220022900038297eb06ef62a4305a7e8512a5a8b6ada95ff7a9a61e736c1f97816202202f82e4bddf11627328b3fca88bb5a85987675cf0371483f8e9fefc3f443ec844 3045022100f151973b80b351aa5424ade6cef27d11dd6809121af7ac959ae218d5006649d2022014f0208d287aded6082fb1d9abc7568a5cffb7fd959095634884adb9036bb943 3044021f08ee38dec9462ad7c63f38cbfc0f9982d78f7ce94825cf6a9b7695139f1dbf022100df09ce423eb55ff85039dc80665c9604d420f0b1bed19a6631b075c2a1544707 304402202720af7ba2c0b4e71c037c2990da0645be0d48ce3f1c7fbc6335033e4be977a502203afd1671b80d3c838685efed868f5a6351674abf89f166624629fc51fc96b1c4 30440220656613974fef2acb5c9e7b45797f1f1ddc89ad72f0ad5dbb73defef6315fb7d702206fe70641d8e49f785bda7a4522af2e972cbf9a77ec2d4fb909fc9d705e6101ca 3046022100e17fa47a97778f026e1be484e2ae71996879ba4c9dc1208c0199e0ec3189f476022100c46fe3abb846c09728d62c199575ee5b4b287ac92a35f8fc8a84a208faeae58a 3046022100f0ecd7387f98f4beee55e3ad8644bd28c1dfb06e23f66aca518ce5207fc92aab022100dc2a6dfa27478c29752643adcc3469d033978c256bd4c1b8a0d5cfa2d14c5a75 3045022100812a41f618d3771b6caec13a7d3b6b246422b0045d3f2d666eff65912cdd13af02203df21d134c66974a1d03ee067cfbbbf1246372df3844462a6e7969eac898f73d 3045022100ec9fc03fa7b21243c597b3ca975da467effe42a13fdcc0186b9b4f69386462c1022036de6a5ef87e4bac9d45154d295e1365ec9a22e27ed1c4c252ae6ca8d95a7ee4 304402202a7cf39aa30e696c92dd37be290a23d801186e201a2b676857ff23fa6a576829022045ca5309fa5126c210fac9e788a53be68ef979cf56d8abdfe44484ed8549fe67 3044022009b33b6b172b9c8e28e5c0ed25726818e34b89edfbe8f80504deaf9d8dc4fc3f022008aafe27e5effda74226b5c03f2c4c2a7b11f6bacdafd18d47544d616ed8fbd4 30450220253a91ad77a9c61f2bbd2ffe6b2101b646a097716ff58352fa42422660c98aa5022100e799e97ade4fca5ef73b76875394fa87ad36b42da94f689167fc821f80643856 304602210090ac217ae99460b89769386dd1bc00402956d20bdb91fc5815a0d2e98eb94a1d022100c4239399fe600123f3c5859c88136928ebc32a78638deb20444df410797be97b 304502204760a2534458c460052ab3d4252b3e38bbff149682649e4fdc0019f4b3b691d4022100cab5a1cd81aa784f398a589b3136c5d253413a7684dffcc8b8417f6afb3505d2 3046022100b896794c7e8799fd0f84248a8e80ea44c488faf0bdd457021de06c6f7dbab552022100a0d1d612e56f16a209cb25f2ab1e536ea4d8e1c2ed26b1a74c7549dac48f6b86 3046022100aefbd049e0110e8b68852b113a6bb8f8d2914306d6d1594ba81d6846c166acba022100b77c6f2075f89f82e416b0c18993c95567a2435e859f95ba93f5739dc750b3e1 3044022005363cb364d8bdc7c4c5a9eb34f57078ed9fae1f94507b379d127babefb5cf6902207d12f97ac068617178ae739aecd28a69118ea2769ce3a44d28db0f5327befa6b 30450221008119d314c919bcd2822a44b7a7fc2a55814418f4acbc06647bcfa48c543099c402202505b14acadd73ec5081105c4c3e389a0de385691e2632b3f486e8ccbd9b6439 3045022039da7e47d0d79c513d2e97a41cb50a8864f3be3234615137553041579ffff85602210088bd08b792dd385af6c1714a95c3b4bdc35eff9cc61bd416f451c36b0dead859 30450220652c3fa6c14049f23ebcd669f734705265ea50f404366f56443c10eab0b64241022100bbebc9a3e1cb05d4ba0540465bca1dffb3ca16623980adf143fa77a1f47edc73 304402200bb1edde774a6e9b6de57af1694c88574d43fa71df86a0504d2cb188540323230220181d4ff7738a35df7a7ee6d69f7727281ddd06f80ba1e1e7883b48445f6f7e36 304502200aab8e857d32f7bfcb477e9c6e0092a7d503a18e846d0d2f72e1676d3f304591022100efb0ee6d46695ec235df40bdf9cce8096882671b0da1b25e802df9460ce164af 3046022100e4a5734544049aa025da8ff3ce35e9238f4d3a3c8364407f4118b3b5ada95215022100be216018ebb836d41aca457825739dd6241f02855d899b5a9f9ddb598526fb0f 3046022100a8ead3384a7668afca3fb3188e778c4fe21c4df6ef1e4695b862d8a5d9f087e0022100e06afad19045644a3ca8cd8b57c46be48f84f52d2a505e05e221f93de8404d65 3046022100dafc8081bf190d232f23651bab8872535c3a3fc3df7ffb533a3f74292d4d6fe1022100a5464049d50213bd051f662e166e537d57a0f8757fd8df0cf1db734ec18a8be9 30450220259a59558217c9f92f2be674c24b22577c193d52c957a9118e71cb38dbd67c7c022100e84fe6b0a076af00af6303a0ac74a80af8dc1a840b3bcdad36ca43bad5deb681 304502202b76b2d388b6654c32cf1c6843d76502aa61b715dc877ba70ca7fcf822a8c23302210082a608b005c4afa1d33956166f34c7b0cd1699b3afe2695de61ffb9f6b3e6f64 3044022076de345e2bb1d080ba8687bef77a1f6cbb63bbc1fd609af145a298798497cfb4022015fd65c3423d2993c51561190aabbd267bc0bcc9337ce6872eb9ab154ea0480b 3044022100e54c465b0017cd9f01686a4c5df6b722212ad658343886ee6e72ff28eca3f2b3021f182aa224f9944a29f44ecd694925590a2737857b79eaa7048890e273708c71 3045022056cd40e0b61b6fdae7cb71c8a4b24d79ae4c434732bd565fb1ccf36da35c6997022100e09f21045305708bb2e9cf2ebb6e8f2f9480a5ad71b5f8c697e0808947735b41 304402203753728833067f5567fe9167c0c09e94f91f37994a0f0f6fb6541a6dc3262568022012d597ae9a19b994ed9bdc66efef332f994f0a46f2eb919c6f2fc694963b9b64 3046022100f01578e5e6474995707caba8c0a8b7f7146be806bd1a3e4cb8d8218d7f332d1c022100a3679ac29b130e4bf3a865650de14e02fc36313a318f84159c0e42b2e7e6f438 304402207217272c364910a74efd451df39cdeb3bc4f1846f0c8ce30a189675139c4fa95022003dbce70f60b44d6a1dfbc7480761d83ddb187168321130d1958b02cf6dfc1e8 3045022012c7373bf1f12e986e9e41f6d41c7ffd3b9a716ba95038ca15741cb3648efc4e022100f8775c67c51c9ea241b4ca75aa0234b3bd1c33103d22762cfb0bb17955ea8bf5 30450220708d6648cbf26d910e883999401812b4b59ca0d7e4c2fd044a5590ff4027d9f3022100a9b2277db2d9bc424f5d8500267fc7e6ff600fa40bd5c9c9e23f7f5528fc2f49 3046022100e3365c9aaa11de73edfc2db3a7d778ff45bcb9018949653746c56b074e953017022100d57fb6bf6f599fa9e3d701813821aad656924e3ab9a9686490b09c21e77e4bb8 304402206cc7f6d88b8ce36519721ee42e5bf5cad2fb66a6969c6ac1e792458a50b5809702205f1bcd4432ffbbfcf637e9483ca3f2e4550fb857d57765691643e2d0ed22185d 3045022100ea54ccb2f39884a6e481c25dfa66c6201692a3f39d12a0bdf227d9f05b20050002206c39f991f1e6a2bd04b631baa1dab8dcce42b662f483cd77c9d8021e9bbf9eed 3046022100b4c0ab2261316ad104d9fd744709e5d02ab946d5daee8053f011a2c071b533c0022100986ccb492ff7432ed3799960695f0ff270bb9461414af28f9615d759e093f201 3045022100dc9eb8e3902d7a86b18b8b81077391cbf7a69a601d9ae1dc59f990dc5921fd3102201aa34db87f718b0cc7bdc5437126a93f57029141f91e14c40702c2aef470e648 3046022100a2cb050ed75d3b6899b7d6ff0d0e5c455b88d340dd947af6e50b8dfc6c235096022100f260cbf1f58e79273e5b770fd330373bf18282b4198462cde8e258f440e0edec 3045022100debaf8c3a2a82c8d132b3a81327e65b7c51157d0ebc3cbb5001417eb274a83e602203f933d1e290424efd615363ffad390e4d6e743dc59cc1c5eb5fbf1903849de20 304402203d0b221d932ad876fdfd56fe1b5d55e29e89733287190872cf49514fab147e92022031102bb11cf36c649e643e344c2e69670f49cc22dbcd8ad1122eb0ae1ca815f6 3046022100fc8b169bd42e6d12662ea2e3c23b180b6fc2ebda859e6bd4777730fd805cfd69022100e268921ab0d044316a657b1d7f316c05ab87d490da39281d30d30f688555b3fc 304502202879a85bc92430a67faa2a1ce5be76c9f45e758c53a8a66e4ea19e89d30d9da9022100b2121f4f14f49be7e55c7c8b999d757e4201761eeac78d532730b5d088cad1fd 3045022058adf4b9984e804af30c5c0d495f4df933ebb56206e89fa8a108500c8c138891022100f414ba485152e4336f0ac1f4b83c963558d61feffbcbfb2457687d0450071ff9 3046022100b4638cb28b62c2488246719c1d30c9d88d98f383dd83a5dfcac6be6cfe6c3542022100aa1bd5089903a9c2011e46e670fda400d41c086e899156c68f8e9153bc1d7e7e 3044022060b3498bd0fd0b02f2f49c7f162b3c9121db02c96009faa00d36dbec108c50a2022001826f01bf9625d4689975b800c62137d43c8dc33c31d59164124e20e43ace0c 3045022100efab323c97ec047de695ba4fdd33bea93ba0db72924eebc2a294c91df6807722022039e2b46e5aebe368a40777c62711a1bda4356c3fcc381ae90cae992282849101 3045022100b8ebab171abdff6f0172155893a9e4937843a41ab203b6273b458c42744422c4022075d77f7f5895092408e4f693e489870e88ac960cf3ed48561e740354b8e9d392 30450221009eaeb0119cc1eb2c79935287b6e9164c6085d3b3894c2a6276c916444984f54d022006a587a63b4413f2c22c2985dd53abba4bf5321e6453c5840989caaa8d338e67 3045022100d6d21a74534e45df6267644f5ca9799a48ff16aac1b92b6b941f23a78658d90302201da9223cc77a1dfd6eff077357a5f321d8a9144c8b3c07c0cd69cdb8ae327ae3 30440220745dbf0e24da9c0f3ab74490b45f3e2dfd7c05a8c778fc59c1896deaf078fa260220457df901bb4a11513103d51d739c3da494edf7f23984045200dbd82c9694d5b8 3045022100c94e663fc76c13404daf639069b9f240841806468dd1b19d2e1c9eefc9dc565802203d06729a220787c63195f920112347ff29cd552352cc759029602a5439047f0e 304502210097df61d3080f06b30d74d5e4dd031bce5a24f88bb0319c723885618bfe63960c022034eac3d4128e59500a0b65c9ec4181c885d2ec100cb3ed7a78ae5d30ab01fc38 30440220370fad6c67abd6ef5ac3062e85f3cedc6389bb931d952166b5ae9667c7094aae02205846f6d5dd5ab0acafaef5b4209cf58222b4908f0473e72e44e47c34c17a14b0 3044022003b2b4d4932eb7d4dd35da0321f057dc7ebee988d52cddc3462305eeeaa663a2022002a87a442d444488884da3148e4a277f776f90649f694498ccaabac7160c4ad7 30450220359d792d24096888fd72d879d6eb96bb7375c0e1a525f0db56ce8be3a4969a3e022100de48a8de7133b7d559a54c9799c1df6da2ce3d138d2a25db479369128d0d336f 3044022058e73cc587d1427dd5042fe1fbf28efe7620eb319755dd2641b74cc68d8845e30220422e1cc62c844859b458e35e4ee5aab294dd7df596fb88ddf80d9a8a2bf55ffa 304502205b8795fdca0dceeaf61adb038bcb43c1167dc27df5a0cd568d2d918c41bbd410022100c00a5bc6fe7794c0ba5fd4a3a2f65c4d5c4cf34d218076ec23b9f90d4caecf80 304502210098280b92e9f1972229a98fc7b7d729ddf3fd6acc64380210b8b9b7b84edf4919022026b8434b2a2879015f0f384fef790c241abd37f642bc0475f86ea8de9d38590e 304502207cc6cd595107a45d3740d3c193044400820785f28accf31f80ebdcf07ff287aa02210094d97ba257e2106dae0760ebd7ac03e9a98e0de0bb890a985014e7380513ac77 3045022100ae48b1b053e99473cc876bd427d5b7748ef80c01f6c3e3e28413a4a1ad04a3c30220737745f8cafbb3400f3675d23e95962a3216a376434730dab21e271fbc36d77b 304502201748f635671863895ebaf50ba398657560824d33409f01dbf9f28ff634f1bcea0221008635a11fc7ecb88bd66256eede946cadc45aa8b3749cff62ea10d62d77e57d99 3046022100b38efb651f87c8bdc8173657b9cc97d44aa9efcca7783e6ad6b43b9a7d09ff75022100b47e0e1421cc5ee6b280eb939725f335c5b24338d012b044c2c90e5e1e8c7bc8 3045022100acd9fde1a10575dbb649c8db83f629c44f8372d758aed5b76303fff09f1ccf7f022044db4118c54b3d756f3a40cb46dbbf00e87739c82ee6a21ff5d5254e02d6a136 3046022100aa4c65f498bc46e56b897ffd8ea01c9e77e20eb2d32ab007a1efdef2dfa87027022100aeaf0764c267e8693aba047101770a118bc07c3ab786877ddc035bcb80c40855 3046022100b2c7815c414745f7b861e9f70a97f23d6d3e353e68990fa0330c25a77b3bc14c022100dd7dc9184694b3ee411b0b067b5c92476c54615246bcde3b3ff7cd19b1edd57c 304502205d84d57e6b2bc29cf0da19d279170121634085721e2f389b85440e75d838a613022100c106ec4d1b137a2cbb640635b18c47bf2e7b04cf4fdc26e88b008aa1dea0176b 3045022100e0158f882f63703b49e6821b9ca6971334d7b7503dc1d41c41f428bb0b7cd6c602206b6b1fb2538d2e8b47e8e9c9caeea0fb2fd1370bbe29265a1a27113036b01dfc 3045022100a9352dcb2ebebb455216aa454a96c698df49465106b1cc5aaa8de26b6f3135c00220027784c052cbe06d99511a3ffd8be6daffb54c7b18506ae441b3d6628a240756 3045022100a1efa5177b16627980e0a02687374611697a3f8dd2716c945599729713b852f402201778d13e953059208ece9582a6878bfc15d52ef4f8268a43deb38531feadc810 30440220576cc0b165bfb486772f1a1c0071e86245ecb5b6eaddac9fdf4ac5f8699b4e5d022036e12ec91954902197f0d81ff7be5e7800a74376a6bdb72c97cb400ba34b2ad7 3045022100fb67fbc4fa38f59cbae80d3fcef2a08be2f3adc6af96c3cc98828f71fec69371022040792625b5649adf31256be048678266018b9e46412feef156c50a4df1733478 3045022033068b7f60f4cf21c1f50cdb4f6590be104cbb0939c887e1c225480cd6cb25f1022100caf7476079a537ca5a72ccae3d9245b9e295747ab4a31d5e09371a21d3347135 3046022100d0755ba6205947f6508d740908c8a24ba9cb9ef5f936d5ef70ece25f9428b7e6022100f71e67cd678d67708a65699d7bb99de20eb04d63e2935eedb78965a66a285c87 3046022100bec0db0a917df5f681e250ef77269abb4680cfc17e811c1ce8b019fb3add7e6e022100bd2329fc8cd7eee979fe266cd2e17e1050b9e06685ecd688b81d4f17f544db14 30450220462a028da6e702309555bcc9999090f526fd2739215ef2ba03dfe8f7425569d9022100d36335b622e51058cecdd112f3d1699e08e4c502a5e791eeef74a77f62e20324 3045022100a9ed5806e605d08fc93bd54b1ebc08c4e2ab85d2b0517c2cb280ff4aa3220b5602206c7cce88b9c982a44eb81fea17d305ac3a3e547fe418c8861dbba6f6cbc0d99c 3045022100feedf8bdfc486344b8d7ed98603f3f5b8842f50cfaab7ab7388a33a533c798110220369ed117e4c428163dbdf4bac7290a8708f82dc190dcb88136d3e2da70100e19 3045022047e514de002b1b1e263096234d9f24fbabcf5bd639a44ab9cacf8f0b1a1defbd022100c33fb7f2b6100c57f2b68a799cc013d2cffd88cad0857443d9cf5f5c2f72f276 3046022100bc9b823511773ff48a794bab7ce33b415060daf5ba3f1fc5e8e861aba3b93516022100ad1684b841896c28de2602768630f1f2e94d49755e168692c67784c8788afc7d 3046022100b7c9f93902327c4393d8ae3c614ef3274e9581dac809a0effa88cc8bef236d13022100b4c0deb35adb50cf3508f0c3977872d0f41d6a3ba2d0ab4e17658d743b27763a 3046022100f43e208de79a253b40d8ca6a20905702bfcfd34ba623992eefea3ef08f47490f0221008da72f5e33d2d9207f2a199915b8b6bb9f5349b952929513fdaba9e073b632dc 30450221009c4be5d80b2ebffc766eb36f721bdb187db0b628d57e77614e907f49f0e85932022046565484e6742f8f531d52bd28e855651d6c66e7a4e68e5093b536f369e79c30 3045022100add653bcf140a162f822cf5be891f19df8877b12fe49d25cc3ad08d2a1f4abb6022037935c23a5c2e9bc65cc6f5b650889b58965a47eebb344e1d87760abf1e057d7 304402202251238ddaa8a962dac65b774ac3916cfabac4016a4cf9785d03df75fd95bb1f02202687daa1f57bdb0f05f3b97d1e57a2f2d7339389d4558394eb5e561fa19a82e4 304502203d0e3047708ff7711a441d6e69e63142ea0b1ec1057e19156b3de9cf076600f4022100ccf47921fc45c63855456054d79942a7ff9cb87c852ba1895bb3ac811f6ccaf2 30440220081c4c397ac674220e2affa692f9e633a10289089d799a162962a63c0638dc2902206d2a63d34fcb74cefd0905a1872cf6de21729c6edd8b14bc8d8043492a270343 3046022100f8961b6fe8bdd5357f2b3f818b052ac7e10ffe2704efa8e1a356d45f3a87a3f1022100f098a125761faf09858d0957717a2328794da169b54f6c49467bb149a55e6671 30460221008a9bbfdba1908bb9240f5c2e4ffd32e0ef4c7aecc4a2874dd6b1e9f8743e02db022100f8ee3894a6023a6e70be1ac25c33ec064ebb8363d0c420c7a7852d850c5feb5d 3045022025d7cabb5aca7d6a219e8f73eee31701ca0fa53520b92e984ab7bdf5a5b1d7e9022100c497a4e69b6d4e2b0c7c6880b40c30a6d304d7d50610a18483f82746e75d92c9 3045022100e0cc9642cdda882e6ddcecdc6573c94876c2f714ad9bda3fc56a23638d297bc6022000ebae2f7dd62e50ccd019408ebaab05d75f382afdd4df53ce000e0d9b525e6f 3045022100866129afd9c4a446f8a9ee3bed0bf1331e11fdfa70128930a9bcda65fd4e0723022027757a753d1b7eee86addac46cfab2a792bdb14da945e76e0f5596e8bf90a0ed 30450220760bdf5bf9e36983446e047c77edb664fed8d84b77110b62d1c75b4858dd7011022100fc5b294496fc43e343b1fa933281a495fa33ed70b69c51c9addb8a710b059f41 3046022100ce9d257c3b6503eae456448007fceffaa12af688702fdbca23c1d57e507acd97022100a55b835b05becd59ec06f8192f7433d9ea073ab36c4a06841c97aac2a6c01f46 304402207169697cffc9b53ae36111fa4cbc1ca6560f546ab06c28c24f9b9baec7a61ce802200c91976ac77a525e8f481e29fd387562eceb2b6d5ffa743d87ef9a0ef9bb67b5 30440220139c3938f418f8da22b7823e70ed078cb807800c7a456400eb915cb1452295cf02207503b64a52b8f92a8aae5eca61ed179e2c132c04576ea8b06df74084fad29429 3045022100a11d52f2c86b0d3e4a9e1399c940c454b1571c61312ebc0a21d985ecee3e61a202202fa9e971416d98984ecedea27f06df728f17a17012c46b152f8dfb0a80185148 304502201d1f21305e42f78db87e84e9bc16cce094406ee6aee52370d8e1501fba442068022100c734ea798e7c75c04fbd18864f20bdf00d75818aa244502c9a4a5f14d927dfc9 3045022100e97f7ed38354cf380cef37878edccc27ad4413e31502f762e57c39937984903c022041b2556303543c6780cab7c4c468adb7eb10fa97e7fb3c193812831a9954c676 3044022034a7414a8a0b794c6eaab0e9eb86169d76e2475010326f5a66472e57f025d0fb02204d4c9aeec6f648cc2bd75ae786c44c6530c208dfb3544dd28d41d17c3df87741 304502201f9af546f0af07f7cdc819f3c904107d14bd57cfb25a40ad030145f2d1becce902210099e5927753e28abfbd00652c083d941938fd4b8b6b9117863e37d419a6fc3224 3045022100c79e6f0048b54506c6e5e9d8b3e06801fe4f13941e8bf093779c2cd1068af264022003034d49512c9ee1f55f6ab7704c4d4752b4cec5477991fec165cdee2b31a109 3045022100adea495113f217a83a20d2332c0133f0776bdf4e50d70d073e9739e83b8b53a402201d3bb6814c139c155a0865ae86094cfbb3e86662f0b5ade296c8dccc61eee338 30440220714a57c380c710c12bba746c4878c868be70efbdd43287c9b9400298287209f1022032f6c108e0568637dd3a3ceecbf1673f24c27e40dcc403062874c842f9091b7f 3044022053e4f73a7dbc9236c5f7a55d93bdc18d614517165510d17693205185fad10d120220612bf7f6368465f04d1fca7a0ea581d3d912e4b9f75dfdbbf397c952be0c107d 30440220754935192fa3ef2bc018d55187d11b730d8007c3d54e6a219cede30fb834264d0220533d9cd924138e296072a5a63365b63a3d1c97f8a9b5df59e5527cfa5c3473c5 3045022019e1bd42d451539ba180a1b847a2423b9734be83a69c3e5e452dc4f53d36eac5022100df18c48a9fe983d7ce3b7909c0fee8a79013f9025782773f1d7cc7183716e823 3045022100dc01ed7efc7f4a034693fd82237220fa008ed0783e1970da10b2d0c383d61dc30220121a5d0e1552972fac836b06c3c1bc7082f5e9c41dad6bf566dcb14ad14f4cf9 304502203e50071a855673042c3eecee20d4b811f0e0b2e46aace69e1d6eb23e8f36366c0221009f400004c93c664aaa4964b099449b35da07f95590b09c334675b9e48105e1c8 3045022100e46a8f7f55b681d6447916778f1730c74f15d8022e723da8a61108f660b57c1b022070e98f64b954085567914032739038dd61eedf60ba1ad9d0fe6218296f88c190 3045022100ac3d1b9c675d0f28bd8ae24835ac690b28d143441b921ebd2fdedb5d711d366502204f987b7c4d26803180575cdc22a2217e0ba65b21e215e20e3bd3b2259469f3e1 30460221009b273b81e9ff632a52868346c1e2071afb7eccd0a3dcb676943776b67e38ef8f022100ddf930ba73b1441be9aa6fabf223ea37d643f4dd72523a54ab579ed93b22205a 304602210091528021efb75f77787e5a8a1d55e9443085b1374fb046a43e34096697344b02022100f24b188e608701e5cea37d92c45caf0fb8d586bfdfc8dcd8b25bd8e565379b18 30440220373b759d38b866c040ed5e4488e239763ffc5fa177b50d5e7cb3fcc1b250c3ad02207e8db98ea3daf4e182433a513b35640ff65e0864a454fb280c99511bee8a49bf 30450220505d58037aaa0ce061d6ce88bd49a9e06101eb8529dbda7be30e1053f7e5c00f022100a8a537aa379955769804c0b333ce1e489c4a442a968397abf735771071b01204 304402202c796b89d97dc486bcb570e3a6fe1f66fbe4fd5dc4b2a528caeb245df5f07f82022063c849f24b22a99c46f2e11e135ab2dfee07541c4a5a4019ac12fa6e06be3871 3046022100812e01684d67724ab119f3fe115385c52b728799c0173027ce4c983373eca91c022100f44223ace2036a529ae1a4bc217ff8638971a78e39bc375b1cc346fbb13f499e 30450221008e25c5ba9891ea32641f813f7f88f840e7b273611ad5974517a15c60f6f32fde022010810abc6402d35717ab29449cc7b306e8fa29dd97c7fdc1e7ff68286c211927 30450220281c2811d544109be10bb87fa99f20e43fffb2c6bfdec14eefebf423e855bff9022100d2a7d653cfad0f67b8d3939cb31235b3a63d0eb6a05693a7a5cabc1d5c609449 304502206a3ce13d3e0da70e68ae8985a27fb3bdd9dd098426bcf3212224f09ba7c7a617022100f16fccad3d401622d0c17cf4b37e63e66e7a45daeff52390e47d353079c14a49 3045022100e4a08b72f24c986f096a56aa481f94bb0e552b2be7076a11e0e837af34c4b713022073f26116c53598e45ac1fdef01803282027922f892f4216c0d48d48a5bb65a87 3044022022867f06088e8b8a37753c99dec103af66b2a68dc58768f3bad0645342adea2902202eb969131b4ae998b1df4bff92624af961afd8296ab22922221108af62d15e5a 3044022002c6e3e849564dde127508a264a16076078882eac0760f99c509647dad173f2b02203942a8fc61fe388766e5f0036418bcea15b5e8c631ccc33f0587a059c97b51b9 304402200d77bd2c9c8f4b46f636ec8e9417abe3b871804acff40c0ed9f2631d5d436c860220707a4cd11b0df42f166c0c8a051dfeea64ba7016aa7b554822e110c0c734d735 3045022100fc222d53f4b866d80587b73cdddf9b180774809c427ae831e216a53ca1f53ab30220775787980835b3e939ed456a13511dfb0be175a975cccb9571a963a60499566c 3045022100dbb3493e0f73f6d88c6b9bcde8d488176fec020b4ea6f7df268256e49f55cd4d0220051abdede1c46b4f331e9e85e37721bff9cc41bcc73d9b0e5164042647925dd1 3046022100d5c34f2c9e8851bb64da5f01790e8f667a77217d2844d258ce94d13fcc8b6d55022100880e582555773a4faa119b8c27e7e0e2e48b4a420e7e64f5dc4080ddf365173e 3046022100f6b3756b9e07d9be3c7a139698ed2b5f075be2841fda834d04a7aa6365d43f9102210083b688d4ad3ad85f30089ba6052d7c2756e7b889b3a7bbfc0b23606b65570d06 3046022100e310efee0afe6da56b8b8e721b7c1496ad4bac6fd11ef4b081f86c5d542fd7c7022100808e1c0da2c4fa6c6460a9d0112b3ece4772c5945abfb6972c48fac62bf9ac94 3045022100e367331799b04476a071be49622004ab38fd8afb7fa05e57b8471091c8c4832c022069cdf0073a3c7d7025e256ebf892b0bfed094704f938fe3f5bd2d0f832083be0 304402204675e30802324e989e11530c31bc218cc430ab938241c9d447e846be92821dbb0220632a06295553c1e883e9579590e61c2f5b8e7091fed62eb537dbc8cfeb4b59e1 304502200caf02f538ac5340d62340fc3ddc9af7bed7ce2bff2bc2002b0dcf388409b216022100b4afdc26959b80506b4eb0132eeb51bc89dfbe9efc3824f9aeb1154b66fa54d9 304402207c1b92766b1c55f865e11806870b6d81446a13e7ff969beedcdf61bf83217660022048106f850fe8dfd849183c7c6c535b6e18b86c4582d18624a2d53f9cf32b2888 304502205178b11885b7acb9416d3803bcf8f3ab5c13c1e863bdbb4c565e9a2e8c4046a5022100a109335c6e80ac20a0df27be7715b5215acf5fb9c2dce2eedeb48e5e2e35bafe 304402204bef27a06910a05c82bb47b2336c3e2817815bc9883c031714378c1f05a422ed0220458456127edab9d5460008c9eb10fe85339af0d4097bff7a023f62906af95787 3045022017f0a014f20acc7b00db7513700c71408fa8725c022b042268b57e099b79ac810221008cb0227da566e09e4680b4293d5cafaca5093f30b295dc13dd096f40c05f0386 30450221008e5e2782cb617e501c72dd148d24de5cde048d4a4a8d30d30dfc4cb82f61243902206a37962b6866f9f7eddd75031ad37a4f770fd18853fc5cbf5d99530a9074c7fc 3046022100ea1bd79c58d681206ae6a4d848d494f8fb807bcd353ac46d5a9ba4e48afed8c8022100898657a5f77aee4f2417f2ea32b345154fad483668f84a228cccb5a9bd6a125f 3046022100c2933f264f41e719be841aa09371f85c21d146fb174e4a52acb60607f8f024b8022100c546c0ff28234dcf30b9adf0ae5cef2eb586dd4cc57cc031408377ecf218bab5 304502204d5ce90296d2c54b932faf2b9b86219aef5e58720cb99de7fefe9463a765f13c0221008f9fa3bb2d98d1466f9e78bb6c2dcab4260a7fe77648ad915d67768637fe333b 304402203412b7e14e8efce0674bd3330d6ca2517fd5df666e824a348fb7a7da6e20e42c0220199b3eb6373477d1b6a743dc6bf3c82c675cffbae14a7fe45cd7b26476643bea 3045022100d67353df3f458373f7ca035b83b37dec03b9550bce383ce90af198718ea7fadf022045a28b92a189553379c8413f01b2480a4253b4226e0430c97f84d815444bef17 30450220485dfcf2ad94334a63a0d3533ef013f7771163fc5cd3c1f69f050ba928660c99022100c6fd0790329f07341806fa135af53bf0562b6414b86080bb277fa72197ae4fc7 304502205fca62b78ab68fa8f840205c2630e9701645105b5fda9db610edf2e4df1b94bb022100e7200055e32be46237a0b30eee64b3bb8a20bdb4e078ed3c1901c193a71c0f4c 304402201c881e909758efe7b917a29743efd2ba515657f4d9abc6903f1e51219a4a52130220082317bc9d7f8d2c062d4f267895b9d45fac855818ed18b0648ff8861c7e31ad 304602210091219a2592fe34f733c4652966c4e06c4ccc780ba449f9c0dae62b8ecb100138022100eb20e02109ffffc029a33dc780d1d69321da63e4e5c4f1aa8e8f9c70f2845dfc 304402207fe82379283e68daac2f4572887de769983e28d041afc0bd8086915795691ec702207842fd85498332ba3a5f62caa3370635d166e00502a2a9a9522d28546da8f9cc 3045022100a564511fcddc8663c1b373df2ee50ce8cfcd3ccffca4406ce76492a929edf1bc02204d541a9ac33e784977b095be0b0e492cba1743f351e41f4ec490b9e8b5934fe4 3046022100df5e341465950bd3d1d5a88ce2463a98c79efada9d9f419d30e020da69e7cfb602210092a956eaf9594808fb9ed8cf4e087d255eb46572b77618ea4dbe860e2e4ad090 3044022046209380daaaee79e9ca83bb26ffdcf6617306f614c26f7d9df6c9c6c3bf049e02200ad68440c411f8f5357d88a26d1820f1a954f1f2c1513b31da86318073152cda 30450221009c48659dc6c3c96c6f34a266a27d39e29ec96a3959594c9099cb9c2f484786110220102e711c8820ef28d6c8688c2b5b5c4a7e84e75a54d22e6cb464bed11259164c 304502202019ddcd6fa14d22940fe0025412519588b8c44f534f4d8e6b4330ea067b6bc3022100e8231789fd0e88435924958a6700b5f2bce97ad919c274ccf5a7f67bcf540411 3046022100a59ae43252cbe5658e94de752f7e7ac97ba1aad4919995e700003ee888416482022100d318e7320b4032942e4089b8e6d14348cdb753581f8ce7f42326f564fbbae42e 3045022100a7cc40c2d360bfe5710c72669750cea92ec7f3ecef1b061b9937e61bfe68fa5202203d1c0b52df5a05de9ce694296ae643b4c1700684a43376d55f7716eda8b9a933 3046022100d80d729a8d27cc1a8047e020c0a049f741be23d673276cbefc3db1692c40b2e9022100ef6f74ec05bf1e0206ee3eb4e4c2b190de7cba935163aa231f87bf00d878d969 3045022012aba91248bfa072beb87de6816b10c793240522d6c11943fbdc81951ccf4835022100b24ef0354c9322eecf28f4e28dc47cc782e0731afd153eac4a3a6fd0637fdfd6 304502210082e26343bb76df42255affaf15c6e31cbff99729bdabeb9b26b69eaa71141b0402205330da4540d186893bb668e6a35a8f94dcae3121ed2c5ccf869bf4d760b2c64c 304502200e3bba2951d7f2ed5f0a4ad339da352aaebdf055c852699e92c45fd57751600d022100ba8e4e425e2d8f2e25d71406fc52670dc7855ab4a96fb81752f4c694a2a3df04 304502206dadffc17fb8433b472cfe9b329666cf06523f9ab1c396fa35484f957a705a1f022100ef496577372e2193dbde20c6b1cb9591d9af7ce54460a2dccb53ffb90e5c61e5 3045022100d36aa0a73faeb355973428a51902aba5b3e3242e5dbcedfa6be27a17fadc11d902203d136a2e86e93de26c732fff4e2c8aef7b02e6abedd4c28d799081250266a891 304402203bb4e7056f925d4e7697cd3e5f235c6b745b1a0502b44ab028fc34cf7dc4abbd022020d1d00e111a1539f0ceff83767eeb1a6b6048694fca31eac491c6fe9a65cf4d 3045022100e95ac72ea891c9fbdf25412e1ddbbc1ffefacafee99b91dab4f5b0cf1ae5231102205797385dbdf50edc2f776fea060e1c176487577def69f9e2496ddd3ccea59d9d 3044022064f03648bc2b499cabf63f0916fdef7a41e26c1e10c2f71f2af2f2a5b172036f02200a3a773bdc1e3be61921f4e8eba0b3be5a6809f34ec1157564fb092f55fc7a7e 3046022100f82b139e5156c41c151882f88cbc560cecc4237dd64ffc94967a68d7d5b306f1022100fbebc2067eeacc418b7714184a0198da65294de46e8d1672a7a1923b1dd7bd33 3044022014e56fdb579de8224938bb71fde67ac3c3a4878f20743432c3a0e044db49464a0220770bfa2c5b90c2adeaa6dd152e220ddcb704acd9743f26ef70ee28e8a4658f5b 30450221008514337245562d4f0be9bab471278a478eb3e7c8d1ef027b26790cb4297a10bd02200d59a2b5b4240096f5891dddd072680537e2793af518351648cd8cdc0d9b6ddd 30450220557a7bf58a8f86b0b8ceab40a16689039eca5fdf09eff2485ed129ff4f5f2196022100b0c9fbf3a77ee421cb1b62a2d61efb5bf9030a5e9de628e7676c9e8e6e17df9c 3044022011fdebb59e1862abfe58ed6c51f9713c7809886b2acf1373242d66773e87a323022029d71ebd2cf7266883285bea7d32cae236a22cd7490750c0076a3434065299e6 304502200a5f356b91522b29e94029786722b35b34cc564c37abc2d8924be521125f7545022100c34fe81e081fdbf327880772e672139dda67f8df4c73195e95676953d9708adb 3046022100afe9eda18945640607ecc99b53c44e19dc12f3110e9e2811e5d3390945f181d2022100f44f4035b348240099ca316d221088546b5e8f11782e54bd36b0402b64409fbb 304402205e79b183da940f75c5c76cc73cad1487369d28398af7dc3f5a1be3e5b11cfe7b022063e7d73613d7285ff83e5f51dc981faa92e492d386354a41b1402885746feaa3 30460221009a33e71513abca3a8546346d13389742b964c9b308e85ce7da196f019a2ad0a30221008aa1e6f3c94905b0a9a7273d75e6311ab2dd20ecaa1a0e751a53817e8d03795a 30450220316282c947e11ebda5f06c28e15f1d8d3cc6d6cda45a8d03a0be67c875122ee3022100dcc06d814481c6fabf0ba2df758f1e5ffad7802170ee96d6c7a4ab5ccd963b5f 3046022100ef14001660574d76e3448dd48ff459bdf0d7a47e0eef704d146d628f594d36600221008c0884225bfd136af944a4a5799001ce60d78e0d91a76d4072e2626fe07a9857 3044022034f200f747e3d28b099f262a2de01de176195fcbcce7c26ea19d2c169b40733102207b0bc5631ba6e0145e06d7f34e27d1322298a2b403a711d7428832298b8f0f0f 3045022070ce72b7be62df13d39cbec95ae08b491f4516ce6f820e42a04673d1d2c7b405022100eca70487c8137fba32146dba98930d85c4b5b471793216c5ce72d46dd0197aa2 3045022100a689ebe4f7eefabb03c4ab4f4b5cc8e0dd68c64a620df8480715b891517f32e802202edde8d16a4735b968e2ee8369d800d9bb56573c0ff4607ef204647172b1b682 3046022100f5c54a8a136491c1745f21e867ae69978777409f346c0ac61aac30b90e15e4fd022100c7f46845407e281c5067b1f21e7fc4c5323a748db67ea7f191e5bad52681f635 304502207144f9cc93844ab8433bed2d5ff5e109c84d3700bffdec37132d87a60f2aa564022100d3b7732c8a5631888121d37e5169b89ce3a681858606dba8ddb6acdfe8b192cb 3044022072292225465f58f0b4b0079c1e5c69853741a1b7568c30a3c7de7595e51091a5022041c3a6b0592646edd7d718d0f03336ccd2087365072ffd03437c1843ff5a5ab6 3045022100e50b80817167f5d5e2f5bf7c45302ed489286195e4954c0954c0bc08bee7fd0d02206c54b5fcfab64f92ca703dff08be7fb5b558e587169a9e7b38bfcf8ea88a3edd 3045022100fed334200967b6d1d74705118f36f0cf7f5371e5a5fbd794c4f9987990f87baa02207df0cf57965a61f3aa959c471ae5fce51bb729c1781fdfcdd5c9a8445d63972f 304402202d2ae48e9fdddc1730d5c3a21e0b00cc7d82c4a812acbba1ba58b917b20fad7b02206d858e1760e74fa6a8483a04f7cc2b38394fcfd0cb40a9196079f3192fb31b94 3046022100a474c42bc4354a82091eea2ec470584db8ba41870a6cd4ce44edccff70c3ed8f022100de223323b114307b7aa35d6b8f1d176848026a4422ac5f9fe3a50b081fe99dda 3046022100d6ed326e86bf6de341cff9aefc573568d134df45640b3e9f134520bb5bc1abc4022100ee72d0edf72c2db496c631cac50aafa45827776c8b2c78093e17a291ee97f7f9 304402204a803e67c0666eb12487993eb34becfa7380684604973fd5b47188014c20c29b022025fe355ea4c9ae3d8180ba6abc2bcac1746c5fbed0fc8ecbf4b68b21d848efd8 30440220161118374d138d861384d2465820540b8a09d68772c81ac0df37b9c849d923c80220245e27b6109fefc172deaefce57e32d933fc2bea5a9d2dba1bd80b6f4f80548d 304402206f5e99790f1b45dacd86ed8d91a650a723c4c6487ad39b3ee7fc783a88a751260220361755cf246cddefb238c1f8bbec9ed230831fb2074d76baad35fea830df6632 3046022100b0fb3cd7e091eeefadaa2d8da5d0ed742d8f3d01b3abb0daba831f438e937732022100d8ab201695957fae92aee511f8ba463b62716aeb4c65846f609a514095df9764 30450221009b2cd61e073847c5fadc6e20308a8f6c8eeece48081c9aa786dd2f93eb8c28c702206861a6a7ac8dd78d8696ffaf73a8f039d2a4ecbb633d12aa78b76d7eefd9f721 304402207a9bc132fab6c03c6493403ea08ab1516558d21480c2d43aea4f74a20e28359f022070604950ce7e384aca1a69299b844ee6f68392f2d1eea1d44104645dde3e4b92 3046022100f462326370b50ad242cde52a3f238af39765f569b51689cc2036a9b8713c205a02210082bed301c6014d1342ef05ded66a9e252a7d5ec52a4ea97a083102a988762987 304502207d05619acb302b94d94ef0112cbad2d79094959daeb68b20c7fdd281168f3dc2022100dc7d3db5821b44270450644a6c921c662cb1a7d4eb46bd1a164bb17c3da3d13a 304502200d105f7add452e95f73f09c5b48ac2a415990359ccba8d0de268be4701a5ed89022100b5d7e8e944aafb1ccf7e2c797930e98576e7530c09ef926748b853aed3826273 30450221009a91595ab83d03bc0e48f86ea7f8724b968c4275c8660efd213cde085540ba7e02202622532e11640f8ab181fcbebaa47093aab259bfaa562cdc8f59b352f5f1af03 3045022100b6b84ab32bd43f555c27168bab7b8bda5a61a8f9ef0f9f41bb71b6574cd423600220203890f84e6968d48a460817b8c70e641be1970e0f1459e878bd68cd4bd70023 3046022100b40e1191ebf4731938fd9096c8a509d05db19bf114c5ea3d2deb2e1eb0c9c2c702210094f903790bee4f40835c2dba7748f9df84c4686c3e0fcf61ce73c71c25bbccc3 3046022100ed95fedb44abc29e999ad339b52dbf82f5c7da7e456d706ca728f082852d7065022100f63a4b2f6ebff7a85fd20da8e5ba7e7703c95df92c2e8594e545579d9f7cc0d1 3045022100811c02cff4d9b886afba11b19bb0952112eaa7f96cac4194b74f62cc34398b83022078ef6fadfdb52dfa29c4239202d9b5439e34f9c7db7ff194fde4971488654a92 30450220330b758d15f11eb03b96c0955a2a36344bb2336a5db47d25e4c902a68f0e8273022100fac8ce59c9046a38efeab6c2258ae469bf3634b550a92bf5bd575a7a77c3f701 304502205b3ce373393f08fbb750a15514d2f1c6ab633008c76f98bdca6eb95e1456646d022100f1092da8b7f25253008ac300e8ad1eb911672ab2b6129457e0b7ba62c76059bb 3046022100e2bae60c446339af09ce903755eed932e19e7abb2ca4b54bc46a6c8157899445022100c55c2e9390de17de61deae3cecc872a0c17e9b1308ec69b7cd3c97c41613af14 3046022100f5be28070958cfafe42f130896b0e80d6bb6ea68d81f8d247d0aeedc3481034b022100f61ee0cf81351ed299d69de481d1c91bc34483619c1929e0eb6219fa082e5afa 3045022100dea723a8c416e577c369b2eb85c8762a09ee0f6647292b324152b6fccef058010220124fc2ca03e26030fdba18b71581aa90979bd4ab9746fd2eed65b2a1ab52b8d2 30450220272c88a22e3a7a868aeeeaecbbbd41bbd108dbacf84cd27bbe2aa31c2491efb1022100a823fa03618c4902d94340d887e96ecb85925dc1b3f45300bf21db8ae1a99612 30450220313b6da698d0fa3c874682cf666792a2a7e787bfc8dd0c7fc7533d04540a261f022100b22653edb9ae106f1564aa70c0bfef968b5e0845047c712582419e7ae414c793 30460221009ad755141b9af15c07f314cdcdcc08766a0ff5e8f88d3406ae509ed0df12d892022100e3d88eb2005f1d97318a4146f13f565d3829e231a384f45ff940fd0d4eda3a83 304402207d7778eedf896f8738bacbdd0d2560d3e911abce752fa935719a99df5c3b2327022028c6dc37728d1bfc3b619c912df510fca2f294d0e8449c7bed2f13ff3fc79796 304602210086bbbf87415306e4f7a90791c141eb97c6b9c60db3f44b7b4fa61ba96c44d209022100981127328c5e902fc63ec6afab410b2cfa9ac8cbca0e2211dff9d882f3b28022 304402201eee4b9be407b1ee5cf108bfad9b6b1ef4d71f4a55a4a4fcfbaaea9bd64a54f30220264fa2c20cbfa1db4253182217d705c0376ce7ea0ed6e8e48b3a66c134e63f3c 3045022078860cdf9f429863b2bef184de4e83ced933be726347aebbf57045dd8e3e379c022100d8ac4b868669a8912dd6230ffd34a136cc3248fd51706f4750a5deda519fa895 3046022100ccf088336ef5618696dc3108157913bf944f4774cdd276753e907f0205f4fb47022100dbcf5f272ff01db05b58202cb81a22eb297d532b1705a5d6323fc580f8b90e6b 3045022100e58bf794247f4a77951bc76d634c2c6e9d77871ae22aba3688d8db2f8b21488f02200eeb280a58c34ae10466688849db2309f3c54aa2e017292cf691f87d78e2e5c7 3046022100c376a7bc994c6f166c247fcd075795eb485dec0aea91c8f58cb88161fac4316f022100cfab2e477faf55344392d598898286e800cc5b3a24dc1f7680d8a48a54e1f7f8 3046022100b915a95d6f94626d40090bc7d4a394b7d41b7bf925f0ef370fdafbf83305db94022100c8c39451a11bfe6734f0bb3d14d0a779fa75258e65a5ce7891a5478500b64084 3045022100fb6382dafe00671cb5b7bf02731dc6e4fcf6daa81aadeee01634f0850fa3e2e0022069a3e69f082f7f103de10970d35983356ad73e2f7cb842ba2d9d3e8c8de02953 304402206656c7cc55ceffb2b2964a6dc57140ab2808b53b79c5285da4e88078312a46b1022037d9fb461a2987e566e91d95133994450dfc06b87c327f382553c4d727a4198c 30440220120663e6263ff406b25024cff3e5b08606e2ff786151f204ab411f586403ba4902200445bcb1aa168db29f9bdca5e7c86bf1fd221522e9c0d692ecb5e96ac4e3d211 304402203c36be9008b69bf6281967836ee3ab9f6081d4297cc1b6deb6dae2250c1c114802205e6c433c5c9b9847f1f282248fd61063f36d3c63b6602585d9deda5c2e041851 3046022100b417ae784ceaf70fe467bbc385ad92807070eca0dbb148b8f3ff73c142c09c91022100f3f2af8e8abf8bb14111a4d562098f3cae01ba4bec49ff30935205d46ca6bddf 304502207d8757e0adf2b7072eb158616a8b6f2503880684d125392f58eaed68a6706a210221009ec6e6a8e187a3621110864305e44a27a3394d216f375fd5c973000a021808e5 304402206265298317e5667bd2dbb288e76ead7bb06fd60840815189531540012b4a65450220086dc62853edafc12641009f253e401e5b465ed18bf2eae0036bac3a3f004616 3046022100eac4197a24a8bb7c5009fa026f822fb5c951d2cacbb217af2b77431c7e589a5d022100e0cfae5abd91d1973b5a723c3013d51a498a41ec3249026b6614e4e6afe2fc34 3046022100c50b7b61b30566777da14f588465d7c1a35aa527b99faf8ddf7cc0b87f6b3445022100b9f3b4264b79af14bc5c338f073e63724cc79023aaca31e748617d483b478b0e 30450220519c5465d2462c3cd33e343cd75f7c917f6e7e665f4843f73b193a93113ccf30022100d0975fd78590882bfb7d89500d2ddef7e8f64ef63eba65cee686b1e92996e2e5 3046022100a1441042f87b5d70a6344d1da0a210da28316d0652e8fc184728a4221ab59b8a02210082ae5359eaa3515d706d05787503f3103301533b5b6c3f5da5ddc3058bf202ef 3044022037c7eb4e58d3b2258fa61b71c83062238e080174d5fe18c502933e9f5ab12a2a02205d38f95409ac64c0cf643e76d67ba29dd377a84b98da30d04bc0202e7dc8e39c 30460221009e76479f8a6719b513ca8ab0e4b97401ed5ddcb5c2e31d058baf47f89a909f54022100cc161b0fd408dad98388872f0cff7a7af0659886c4d7d44d77a738bdf7023f81 3045022100937ce284b29ef268d6e693645d459627ab8974e8089a3ad3e10d01ed85920f6702202b5f437f996560c98fa5559d6a55a27bcf56ad2b335a0ef6451003de7e48e8d8 3045022100fbb090984482fc7639ea903c10bbb7ee1ca6a2d9fe3af15b60efc2881689262e022040c80c23d609ed3d3d8c5f98017ed2cddc3cca6c528ff311c2ffcb1ad770a8d8 3046022100bec708fd02c31f9e3c9c5745955dd950fad7d929aed12d583704c61e47cdead7022100a85fca04e2829a95737f06704c990b3c763e4a54018901dab018a63f13f18893 3046022100935eddadb7e9052719cff2ba11ad53370d19113b46f0785afe00ee3f5dedb823022100812af26dca694e9da8010f77a7b74e3d615d4d9dfee5de7d3a4e6466d0198445 30440220538d1171cf36b1f6984a58602dff9cbf0b91705c92294721f5312ee34a7353860220739b3a8b180f3afed4c8f89de10052a422b84fde4cadb95b5367ce53f7f51e96 3044022047af1fed29e78f5f8939f035dc9a7668d8d943ec81453c5d7f8d36b747e3aff5022072d3524573687a59f506e0211dfb0098a29c82c8c26a01c6e3e478f595c57800 3045022100dc74ee53c7f4d203dcbcf47d81c6fc96d813e36c4c921fb40d2c7f6a50ca27f9022024b00e7a55b3e29b2539142026359d5ba8d058612327a1c58f72a22a1e547417 3045022100c0bbb3b549be3c439584342a65fe8d729f67491d5583725fe5ce072eb79f536e022056bf7b22eb39248957caab4c0a32c255a3bf2f01131c4770ab7d65edf9dcde49 3045022100dd59ff92612cc076fd1218c3928ee8862ba771383788830da090c5db39f6996902200517d5a0f56bbe286336acb408575ccb5e3a1f13dd7228ffef1a40655c051e3a 304402207b629a6aaafcb643b4446fec73817ddfc68f0c9fdd1a15b819137276a85e55d102207e984e324725f826130493253fa1a314e83ab0ad39e5b770edd1c2e5dad15887 30440220099bfb10cdd8df1eedb405d46010911be09ef41310ac36511232f095e8e05030022031c0a85e72edc46c82db49d5a0e7bfe383c8d0d9d11e791d8c5ab37669a45e46 3046022100b665d8663cac1e98f9f9b6a3e05e17b046b2cb538b33a3e45d98d4fb7814cece0221009bb2471ab813fae4f7ea8c7b0a859f794573818fa9f1b2fb26ea6fcda3d2eaa2 3045022100b74a8884ed8b7c4f514162d1f99884876f5db03e5b67d131b49bebeb0bae3418022020e9ce66f05f38c764c464852486be985637dc24e50bb64ed7163469a33fb31a 3046022100dc27a41c81ff3f67a15f7bfdacd7fe30dd89bdd991e4fbbd16b76823ba35d4fb022100e371b163aa499b1654ce00127ad449cf9cb050288cf9e4bf4ef64c7e74f65279 304602210083c534e0ee4c4f478c1807b30c480f07ecd24412cee2c6b03393086cc02bf3ea022100dbcb7c6011d9a83d897291a3b1992a0ab257d409829995471761c048524007fc 30440220551173c7c9f41a978fd8d276093d05ae860175c1c1eef1852954047350a3138c022076de9f37ba55162ddea78894c3d11355cc9c909c1e897b2f56c47290062659fc 3044022002ff916d0ddb02ce9c1b78c25e7dcf5660f0c3928b4d2f625652a638d2295eac022076e1bc3cc9f54933312b6c6aed24bb95101c94ae771ba52b2cb0ef79660841fc 304402207eb9f79810dec0f625c29246a2346acb5d08725de7ce8856863c25688a7d6bf802204dbeffd5abdb9b89a938aa905b7fb9275f9908acad2b41ae753983a4bcc43a41 304502204706054d6f8e5a398dc339e962036971f451c56357d499bc3ab0cdcdec4899fd022100d825eec7c082879d9aba92cb9c80ad2719945dc91dc88509e47dbad610fca1bd 3045022100ee6ba7554548be785638927ca7f7f325abdc77837d531f3a3022847b731c49fb022047cded56bc68e3b8faa6d863ad11cbe37828780a608732630f77a4f14d60e432 3044022038915524d26af7d33a8407543ee3b4d5cf34899194a50784b6cb457d88be50ec022030de2f6b01d5aad819e7e09c3b547e2856d9d60ce3e0d030aaf43c96a1e8f9e5 3045022100b9380c007573db38ed54db86ceffd69973481a2543a53a997f176da26dfdbdad02203008fe8344affce59996722048b5c1a2271910220ff3055122bf472a38490723 30450221008316ac9e33eaffeafec60bf982ed97593c35b6b3ed640129bbcb2156abc08c7302205b8fb50f6f64a0474f3fa5bc8529649d7731ca08697da3c00995a6d9bccff61c 30460221009d3fd8b3c18205e226ca5fdd8af218fc4c944e78e95a74a551ca3779e37b82f0022100a4e4c684c9f23d955cd8834dbf12dc5675063bb3ba52b707006db47f3d43f658 3045022100e561854df4fd34514cdcdc1edd19d2cfa726719eb4d96f96c9eccf4ad1f7b1120220498ebb4c8979206614066386fd50d8486cd104b14d7e9bb88c173f03b7d34746 30460221009c5d96c2b0339d7943518db1085ffb9d9febfe6958a671de2e38babd00bb22c6022100c23780550a8434a125e43163c06d660b79ace9ba5f0e60d31d2fa1a57c8c2563 3044022048307148be6ed7e7ad39177d0cc3fb19848bad94ac445fc995e9ecf403d6cafc02204c34d88c5a6fc4a106c14f8183c6ba978a7d95bb906450a9e01f89b0c9e11060 3046022100d14f5786c2e0d89dd6f5b4e1bd166a1851ec9abb6af9633a9829628c80c9f8c2022100ce26e4a4cca8d8779cd860e10e9f928714cabbbcea1a320ddf9be277cfb24f5a 30450220692293277880f9439c616f77feb6ed2a8eb1b2de1bb87d4bc52056ad1958bd09022100be03e930e81d58e195de636109ce5e4d9c73e9e0abd49af22af1af271ae1f0a7 3046022100c6e3f325cf095d17c5f04e81d8f83ef40f5be45b201c7b4b641a236ae7c0019c022100e89bb91930cf14c844bd8d531d3fcdd4092b3fbfbbef04984fc40e3e6aab2a94 3046022100f82bb54e0ea6dc61d568472830c106222527736877f649025c5a8e5a8464955202210093c2648afe263fd9e9955aa7e26336fc14242d01c6c53d2245a15cbf99998dfe 304502204ac68a076deb10f47b9155ecc0a71977113ef60be7cc028390ec5a5c6e9f68cc022100b4c24646163bcfabbe9cd91f62474e201a68efa259758cccc36fe40e02a46b3e 3045022001be7048b4943a9345bc0c34f559816951a7de240139e26c1e31df92c122fafb022100b75386ba54499f4c518d742259ac0c7cce6295fb0661b3e8a065318aaaa5eb69 3045022100ce1294e9c2dae704fb370feb0d41ae0e48a9f6399a1d9f19f61d17808ed2832d02205918119deca1bd376820301a55672f6f9e6a9d130e542abae7bee68351ddc659 3045022022285607ed73760471c4b2cd3ee7f36e5ccdc20bcae9d23e416e0d5b2fd8ad8f022100813e3a2f572b65406ca08834b930af8c86e1bfe09d0351d32784303cd8e6e724 3045022009e0bb4214af1fa062de76a519e0589b24edf550c0857eb7769664ebf504a019022100a90c3456222041f32d1d56c78c88c5c474a639f266fa6c8647a53b1019b8ecbb 3045022100839886da91348d7560702f80b0dc73d57a0198f676da55bb27569ff921d20eda022060fb770bc744d9f27646817476fb51ea40d8dafa5c1dcca2a246a3aaa1e86f09 3044022074b7444c7ff6ec9ee5006b3182a1efe8513c7f4e3d215178e43e2ea2eb4c9c49022004e82e8bd404d330afd8887bf1d72c2271426e09723293870de672a048f615c5 30440220141c7d3b44e3c407e44e2761ec625c47a38c07f37ac031cd7165b2a0045ab60f022027e283387ff11db3708bef53621d97c717f2fafc9a614d8ab6a905c6efe2b1e5 3045022003ea434cb7cb89d003297c964d2ae4c3cc87ebdcd299a4f8e9ec2fd989082cc4022100ede93b75cb133a3a165a27d1654872aeecec2b88fbe7235de22d8896565f7bcd 3045022100ffea8d681a2a41d27af0371245373f4e275efde26608a13eded97b0de5bb41b202200094a6f0a0cd4f3b1de3bb07de17ef85d97ce70f4faa3a9450e5f304774206cf 3044022002a68d2c3ca0e3b7bb07ec9bf13527c9f51a8102ad963cce9e21c2ea9a357e490220644e50af51a395f94f024041ec1b9dcc4878994e21ae1d1c566f9d9330dd418c 304502204b48850db396d220fd14b92f71b9a5d998bb4be3789c6cf23f7d5144ec601a31022100f81a1ba420541949d8482987f2d2aeef606fae8945366dc5233afed345c016ab 30450220705c028844e0445fd3b94d64fd45433bac190afa4d3d4e351f26c4327634e43e022100a281add1c3dc1ca700d831c7e3165a775df5e913e43a1e47ecb0d284726aada3 3045022100e0eb53954c811d4404d6cc9625be1bc034885077a2eceab83e5924d175e0d0d502200538243f3ce61d262a7474a90d3d03e36b9c7561c7eef98167dee8d0da16c367 3046022100bd21326c54722c0ad7c23851a1beb72e39372c84ecb491ad468eae34e4ffc8e8022100c44334308ce10fbc88c965826922e30d05bbe83803907c1a96e9568165274c05 304402203c28b9afd25c72d3ef2cda9222525b1342804f97e58cb5c1a9bd8e2f21ea0860022005a1ed829e080329def2905652d6fa70e2d104a38fd549fe4273f1e5ed4e60eb 304502202abca209597e087931a75cacf156d5d7f5850f9cd53366f850a50aa1e8f8c36d0221009aa92b4fc7160a55006968d661ca926027cb0572b5abd8027b6bc1beda716e23 304402206dcaaad077d192e031e46b5bc1bb728648bd2b9073b8846b3ea766ad2102eff7022002e640103f166cf1602338e3cb2c518fbecec1f745aeea8fcded3693897c4d54 304502200a1e3896132ee81ff8010b1244120b10a0109592dd3e20bd3f966342daa7b41a022100a66761240c583b28efd2140d56e6835d6d8167f752cf432ab5591b3c71f5cf66 304602210082afb5322a7d6c612d7107dcb1f9e615607bee54e92aedd2752e677319ddfce7022100eb017aa6f2ac8dd8eb0901e8aac766042b7061ea40afc2938bedc061aa01945d 3046022100fe62a286571d8ae6608ae65e54767c91493ea872493acdbecdd58e2c5c2e7af202210095048ddaf19bc04b839d8eb90cc9fbfc57ac4ebc7fce3561d0db9edc6237ebb8 304402201af3a06cdd0af02228f52ffc831b351612a4760822b8527ca0bc693509456a6c02200da2db762dd741c835fcd752aebe5b67691e7fbee54de25c6a6f452ce9a5817c 3046022100d22126a14e868d0599ff9c761a6ce96b7a75495f80f1bc3e8390f69098cd0442022100c57ba656450754e2b1e043b04a9901e92cbb8a9b99260fcf50535afddbd34e6d 30460221008fec8b86badac5b7623f7d37a48b970743109cc360e3a32f74d13d265227dc97022100889ed6b5444815d3bf427c9e3c8a8b9fe2260322e69d1e8bccc5f6f3c5b18a2d 304402201d9b8484f9a4c43f5f72f67e0742150411455f1877f0081e69ccf79a3b911e96022007a5497d7bb5300f1c91570f05d45db96eec633d2237a554d56f6a1dd46dfe01 30450220436c57596baf285e35251512d350f7d080bb0ed29ba84257329074a46b765b10022100b197d01855db46fa1976f8afbedb6ee64cbf12fe65b4b97c509ddfaa34565040 30450220221e8e1c096b67898f456835929e48389aee390b9a68d1fcff75b7ea70917b2e022100a303d9d3dba283d3dfa4c4053dce0df4392878e896159390989c67208d170b8c 30440220272006fecf9ab054561416559aa73fb94ddc72f2f40830669bdd127b678bfcb902205b87a95a9b0668c1b3ca69d2f9e547253207a87d1fa10b334ca4f6a661574e0e 304402200d3d6b0a026f6fb40915914ec67dfb761bdbf2e45d94272765227b98e182d95902205944642d80f2a6e1aeb4d4bd0e442b1ec1bb9a62c76ef3746bc8936130f6e054 3045022016204a4cc6f6f0d4f5647c59d57040af757b7e4c6efeb211d29df83d28b12fa6022100c345d59e18ad3e7474d648f2575b84e4556ef1e8dda42478503ffdf53ba5e74f 304502205ea2eeabbc892df6560e94bbf7db6e1737def40176bc1ae1a3520a6633e09c1a022100d1f48071be89f5b7ce266f5621230eb7cbd550317b9665c7494588383f049a46 304502202d7245f44e586432377be1db110512fe1648660b3c525eea58868323497ab118022100d5405f65a965add877b587d3097074beea1de72ce57bf95a4f7ee6e52bd33755 3045022100c9da185e0a36bd7fee4e4aeb5417397b591d680bd0257bcc12132906823258d702204752bb39267ffa498fc77a698792e27a646afcc37f056f0f5337691b006b2af3 304402204ed995f58b69b8055fc3087a6e9e47c8d5b9ab9b1f671c10103655d6e992b32402200effe86baebace7fff9229ef3aeece63a109c36f2a4206d31221ef7732107170 304402205f62e4f302a28d93bc43bcbe54791fecd8ed15777720f3d5268f6f5620549e1102203bf93b16ff77c0d39d2ab332384026dfe4b5ffee73c90fc2a03023bfeeba2bbb 3045022100ea473c6474e9b381eac7393da9ef4c3b38f40e0ed0986c5b4e446fcd97bab59d0220572cb495e803adde82e655fea33d64a1f808ede7068aeef156de7ad54b570ce9 3046022100d5dd39f858ed53d3ead3f597b274268d075ab5c28cf07140b7777270c1e448ef022100da6aebe27a1392ba920940dd5b8e3edb6e80d5c8dbb4baaef97f54e4ad5c377c 304402207cca34843633530d78c1921ab1b5388ce529820cbad420da5cfc57cc4485d42302207b4b82bdfe932db9a22fd97ecac0b16563c141f93b2e76b9dce8d4700fbfc019 304402204863adf4fef5c965febd1714be500a2c164cb08fbc363afeb9bf35845323c05e02202732344c9eda30cf9c8a9864a1040e0cdc34509111da31c1135735a1898fda0e 3045022038c1c772b3dd2a85954acf84d214d0f0b8362e0115431657fc6d13967f150ab8022100bc60918d9eab591bcccf4f3a595566a3230252f2b3970ea088dcb8b250d25bf5 3044022005e477a9a5b5755c523c77690c80641f2c6e035c8f7e3096851990f9d537bf6602205ceea13da82bb34cf5fb4ac109662750630fa1d249b01d4e55e4c05bb38ef59f 304502210090c7b02704f9d11754a82449b1d458f06c40cbb7e03b1d7c775794cb4a6867ac0220497910c31336de2ccbf7709a358a6fd75f5dd78df07d7aff897e095340f4322b 3045022100b0c6138ff4fc57793eec5899069a4aefcdf92c82f827370bfa87df22689d6f0902204d705c9f04a6ab6b4829890b6d844511d76b060dcc11a6ae1640c942d379708e 304502205197c412c48440a3daf21783ce48a8c5001c5719e14fe5f90d44ec127473b626022100d8bec11e1f890fa2536e1373fab0e141b779b64444e28c27ba4dd2fc721c8c06 3046022100c6befc9f73e925a9207f136134bb31e4dd368383069bdcc07723d05a8d33250b022100f8621fc5d217bc0c46045f07913b982fd99e39cf99564adedbb0551334ff024a 304502206bf76b902f9a7d41cdbe409628f782600b97ed84597a0ec36949533ded26a3b5022100e5ff06203f05f4429879ccebdb06b9b49f8689c2fe1ebaa81a325b782229ef2b 3045022042b698765a8a719dfcfc0bab73d5b25848dc49931fa20e3b5bb5be5489c1adac022100ee9c1b7a2108a7634c0b04c2fc8d091404a37c6ea4cc5170bafa4f1889a3ab0e 3046022100a6cee8306a848eaebd052eb41423f7d289e16f673f59aaf9448cfd2547fba2e502210080a5fa74cf2b223f5c1036339d527ee814fd55cc52253891dabac6e0e43a2f29 30450221008d7deb6e8400d0b570f0efb3373149c4fe29d59c1bb4cba9738dd2f970cb0b3202207fc292b1cf844abbc8b896f79310234d71c1e4122be483bd8c248f08a6c88113 3045022100bf957d8a5a1c936beaf3e0379b20d1c528368679e70fc4f6105d9081dcaa083002200f8244f3f5fdb55c76889403ba1e43edd512be0c4ac3edfdff7e7467057a80ce 30450220346b693ce4e836cced363bdcd6fb9a731cffa557300ec4ceba8995e8c226f8af022100ae316b61e89677066bc76f88a09b2fd6cd30b40847df71567484edcca0ad29ef 3045022036925caa1cefc384186bf17cfbb9ee00161ff5af8206dc0accb893b7fccafec5022100fff18f66b2ac5aad5080e3de8ead28f9e7ed5e2bec2393303d02b4b410b79aa3 3045022004aaabccad138421db51ab0d53326246b558cd560a0189b98a7a1a851d1f52500221008113ff6c62f07c139305087ae89e1f2367172466781397b036e7540a065b43cb 304502204c2b8cb0c1d877f6be7cc9d5b961e5b7b35112ecd46e416d4d9242d8988898260221009456f6e9e6f6e306788ac5c889d095fd1b8148692bd816f5fd87bfcc98023f0d 3045022100b6b51dbabff9d87118048be5118fdce559821818acb7230fb8a8132fa479805702205a0cb2943d269d8f0f60c53f91312f648e115e5e5de61d18ff7eaa13a9306a1e 3046022100eccca8a6f17b8b1ae73b37701e168cf8d1c4eaa21bce87b4fd10655b18ec7697022100b5a5d831f7a145fcf4f44fcc5818299b6fd33f9357f22fa892b7c7c7167c136d 304402204eb6e8db1689ea97c0807735f2c6ea110d40e1483a13bc9bfc765ca46d98bd98022037ae375450790817750c1373e46eb093e9dddb905d34117b6313daa5f2c86264 304502206207f294572e04072441210351f1db920cada3c24d720baab47603bb8a31d134022100dc33bd87d8312bb969574e6afb04fcfe0d6d790f4ef95ece16bf908a793478d0 304402204dc4bd94c6320b9c1872031766c5a8e4ac3f9191a1462ec7d0f9560c7f6b10aa02205dd29b1ecb3a6600ac0a7fbc186961b18aecae32a65a8b2c089ed72764cac211 3044022002ee0459e3ddd40d819a4442e15a03282d5d21f9b5b6fd4a57e4b7ed9a9a37d502206d8eee6b225481f8b0a6671b8a488d8362c51bf73072c71c64f03d0bd4370f85 3046022100eb259fbad4231d33aadb44bea3d6e4bc86ca43e2b5e7fa9767c7b5dd8044a264022100e298788857b985872b68ba359c84abffed7bde3a05859183b4fcb60baa5ba338 3045022100ac1f522d610b30bd93dee23995d2c3dd67c551b30680ba8121f098e8cc3ff03c022029b0aedcd32c39bdec4112d7a85c9aa0e8038685f035111714a52b6fa99e88c6 3046022100e2312e20505295342037442fcd5aa740b4e3e1a6f0ad2a6151c063dbbef13543022100e3b00c499463674ea940f3082803b91a20237da575c013973db6ab06c2b567d1 304602210085e7a4142a07bca049fbc9106a43150a573ef699afe7a08453c12f55a808d723022100f1e4bc92e3c3f462e637244f61c38794e976cbe7d34b40a66248bc4af0d15602 304502201ac130ccad7471d6a4c99d78b6bcb5f25a34027b2bfc6b728064b52a8c4c4baa022100984333ded0e74ffd5fa26f23d56e89dc84c9ec4ee4cf7e7471236dc14f15d514 3045022100f4198a1a6150226b19e54ea8fe3ad95a2098a798c8aa92a877baa7d05b4cb9be022036aa02e89c89ed5f8fd7cf51b6575d9c0312a2a9117a9f7293963ca26a792a82 3046022100938da2b115b5609cd0dca46be1d03e921f4272bb5e705a7c6f414ad14f2d2330022100b2059c5b2a560b344e29bdc26753c791f828e5c4fe0025a11f2fe8feac68ab41 3046022100b75ec463374bbb45da9686f423704bc056ccbf2d255460d554a7ed69ed5f1693022100c1103e23470ac6d439c9f57a13d22ea3e6927f1417c62a666e1ef8f4c2f87e22 30450221008ddec2ed2c27d871a59281ec1d365b67472e4df879e1e00bd4bf9fa85c0ffbe80220087f88e4e60ae0ad5f0e5f114e343997d9029123fd3c243edb3fa2f9cf9d771d 3046022100ae80767d25842b6fdb20b44412cb9e51d2bd0cf10629c83ad802acd642690417022100eda71cb7990b21b4900c8e114bbd9bda9d1093950ddbd7dc037be4d276740be3 3044022025192d3e73dd3fa347a6a6d9fa0c94190e9ae2510a681e4c06f9d93bd1717bfa02206fdb99fdc9070d48b997987418500f9d1d767b92aa19671fc535c6bd4c1649d2 3045022100efad9a7da5fe31121c5ffcb71caf75e7ff46b6fe2a44136c1aafcd6735bbf8d302204bb0f2ff88ddb080ec5ab5abd5c5744e93a5c40504d1ef395b9b716a3ef79961 304402206b7129c6256822fe8a62af2fdb43ff8b1f90b41e8dffea6e247daa26cfd76bf402201281f3cf6e8b75586922ccc9de98239298665b148d3d898f7804fd61629300fc 304402201bd5fb8ac5f24a952f8fa335afa12403b0258b2daac0a07e95ca3142ec068ced022022109c1c9d8ed50b268fcdd9a43f32f931194b93299cb53db159a73c78d38e9c 3044022029574f19bf2b8c5e71cd82a0716566d098e28d30518970fd243b58905b19b5b302201e70659d3a4a65b6f9d3381540e8de67854281ad2940a6d49c5404d9677c9ce5 304602210082a6332756be9b61c89a5acff9a56cc256d6f335abec2af44885aab7bfe236d3022100ae0ae51e58b934bc6dbad3222fd76d1c4bcb2fa91b72e9130a1302b9d9830a20 3044022079cbbd05eb4a26efa244c941ce75d94f2fad559d657ee2b28f1aa74ca0bf8ec902204b85a942f1f85c62fc35bf48b5a41db8846a8911a02aa6a62d9573fb126b3511 30450220614c8b124ed3a76a5155103445cf5e9c48eac424c515a0c1e70016a5542a0f45022100b58ff7088220f30c475431747af7f762922a299e347fdff10ba012deebbabb71 3045022006afb405459223db3bfd4c7ed613b9e6222ea774f50485deafddb02d8143e6db022100eaed4e569bb5fe1e3f7e89cbf758b2b0579834bb191012518f9ff32cc3e704a4 30460221008130a81f565ec3a85017c9b3587d69c83def0aaa26856a6b4892b5174b3e35c6022100dc964e6e6e524dea5d08bf6e16d08358bface16bc406ce4234205eec0c303d52 30460221008b03c51319beedb3f1c799fda84af98c5b3f973edb6f3279141c7954ab42296b022100b0b9ad0561d0f94c21e13f4b92458769bf16412b06a40488d577c74d4e2f9edd 304402205dddbd801a6e73783eb20e3fef0ce8843890b659cfb59a98c977ec65b520ade3022056d8877264cbb8196860c49985010f139d232bff4e3997587bd9d73301328d4c 304402205a3697bd1c23c84790a9a43b7649605a46644b6ad983c28e869c99b0424d383e022026649478d7ee1b4769fdc7878bf0f288ce9b7c41e985530d9030ac0698ac53b3 304402203e6301d2d3569461304eeda8fceefad05a0c7cb07256d8fd6d85bd780dda1f6802205de514d6f99c5a6f63d1ddef4e59a4990acf6547615fb70fd851cb997afb64da 304502206218155af262e661e7a650a8191a1f16deb4ad59c80b4817142852bb996086c2022100bdf1474b5eb11290e7385f4a56fa77d28318f7529c958847996d7a96b285d6b2 3046022100c0512ede6f127994b3b3e80bc027d0743ae26720ec66de0118c3605c4ee3f2ba022100ef59e85542304bc3bc9e947e0839e80ba7cd8a41113917bee29fe1cb07aa7b10 3045022100d40658945446c21da0232fe40ec6bd841a08c93ca169eb6fc1dd423d104fb613022013ed848c131ed7da217b6c53810506e627a5557d4fbc9ac02f32c5e9e52b75b1 304502202bb072ce7599f45660ee44d757bb1a260ac2add8a432aba95771d47ca8e6c158022100c2a90d01abbaa01061be3531172b24382debb548ca0a59bcc4b076dc71148309 3045022100a1844039766ff0aa61eb336f8ad0bcd830e54e202fb4a3a22aae1262e0d78e04022052f94f3a3be4964a424b8cb00b3066a8184b135cfdbb727617e68eed0af8e616 3046022100b702d5ea91064b8bf59c0f51b3fdeb877eb50ea5dee6eb0c851ca7e9cd88d720022100de163c53ee9fc5deff8aff4946080ccbe2bf523ec7671819ab84f5472c8613eb 3044022009379ce410857d1e051d272eb0629e1bb3d35aeffbabec01c79db653793cdc2f02203394a7320092061cc59957dfe4e86d75d6f17f832834f2f0bf8ad0e2ff9e7ae6 304502203b98add513e28393bc1cd64d8f1a66250c5275c8053a119ebdcb5ff534132eaa0221009e8dd8769d522701518a74b26ca9658a1cc401cf682e55cf954b801245dd7175 3046022100844833a9f10e8d4d80da0715074a638558a142b8f5b89dfac15c5fba9ad7d500022100c698403a93157010d24f180f1779fd45918213adac2ed11586880bd724fe3e81 304602210086b05ad76340978b91d597b7a70ddd9c79a8a5816d4050f756c61a8de8217a5c022100a1930071b6b17f7a8d73a0a96c99d964a3980130d2d07a5c12513441d10d635b 3045022100cb5213e57a87f4d278e209a9bb33133ae2c8deb312880bd8ef272cda751ff1b602203602b4384f1289070e02d98d360ee2f6dc8fe8053f655500f93d2f200a883161 304502210097e4a4eddce6d59a55cee3b6fda98534053f8ac26d1dc4fcec0b1b9b428d9c0202207b074ee5cf58300622cc0c85e37c7a58cb28df72f18e66683fb89028c95dcb43 3046022100f4a83b4a6dcedf61658fddacfeab7cfdd5ef4930297c403467fd79b5b4cd7c590221008978fef4823d57e3772c58d8a8db3766b0a6bed302416cfa21e3df9c05ecd0d3 3045022025001315fd298b7932793a758a9c58380846335ebf36c4da5c5dbd4fe94a83a3022100fa57f03bac7442bcba6a22ccca96e339cc1df1baddbf9e7492433e07d6cc03e6 3046022100e228dac91c2085bd2be340e17b3a4c54c635d91694c4f4e38eb54149aaabce40022100c3cb6ce113bc082aa0f4dfd88cad336c43db1cd23d7a3e48acd47fa928600492 3045022100a479c3abeb0f64b5a6579765e1bb6c167d9eab76b6a35d89720ea09270eb72ed022070189966864babc24f3abc616607f8cc82a94bb0f412c1d2d8dd2516992189a9 3045022031d00bc8255319e4c278136243033327591da3835385d1db40ab5388518f65e5022100efe0740c2277f2182aac411f64c80bdb1d82d7cf372bf64425dbf40d89dadd6a 304502210081a2da934d01e5ecf80d8f6845952d9fce99394b911c5fad4a904095740b4441022076e1ba5b9a84063f1d443af2ed9e715cf3e6c6563151efef9319805da7ad122f 3045022100b1d6956681102a20a454aef819da923c92393a806ea959d9c7a8611e5f4bc88d02202df394cb27287b8d45c7c083e70cae4fc9a6f6a94bbb330efc1d99909c7b4182 304402204183154740d230e524a7d66da653b699145cef8c341c4bb20bf12cc43ca4e87202200b8f1c7ac72b17b7eedd54168b4e64f5378a9b60062e82ef1bc4cab49ad4cbe4 3045022065ae77529705042b78e116a64a8d6449c5525861c4e949f4a42a1cc0e7e1f80e022100e4e7f870c77972c5626a15f593d83ad11d3a490235e7190871aa6e1788791131 3045022016748bbd332b16fa9a9fae184dd51c83555e8f77bcf61be9e9c8318bda30a506022100c02d5e16cb2adaa9fa74f99c3d1b2ed5f401a327fb226f844d34cf98670699f6 3046022100c5e22a57e9278757e27c2f04d27acf14a87a420e0d6c7d68793e77a77d78b1f3022100ecb8effa46083ba0bd157360638c251dde35b48087e7b2062c04a7c0693b8a65 3045022100ade113e0910698b81ad3c01f0e9ef76f5179e791fb8efe71090b0f88d3fa0e3f02201b6a3ca308526342f020bdf8a502d8e3c2c1fccc038d0ee5ce02cfe26435db5f 3046022100f6a935fdf8ef90886010a905820b678f5e212cd2c6cc389fa74e31b4bd434e4e022100ad2026a7967173f2827a8f92b06bdff28b5f7cd0ad42a3b852a340551c35a739 3045022035ac876fdf11bb465bcdecd20ef45df62c987f3d33286fb2a007c984189fe040022100995bb5ea5f0dfe2bd4f4655119325cc9d174207fbdf65366e6ee93f42465e5e9 3045022100953a8cda5962467a9bef7d5258eb3cde717362b73488d2db5d628f72ae1e5f1402201994cb21f645b010818f06434ebb2555763ff2ac88a19b18044e0d452b519de5 30440220059e2e0396080a3c21089fb515995bf1777030ce1f6f5497487247b3de6f8e8d0220576e34bcbd6291a12fab7ec4b6e73b24230526ed60ecafb07d91e45f1d901a8f 3046022100a83724ffc4d05f698e31b1976c0f35999494440012239a03217ef7af545cc26a022100ee16c1b5f7c88947947bfb53b9e68d30491bcf18e735ef486687fb5446ed56fc 30440220328603080a8f6d0e660ef04d46a11764a0a1fa25d90080bb8e9c84082aa1844002202cb37615a4765335ebb74d22c3faa4cc0c1677de76cf5ef2bb0e149302a389c1 3044022041922e25bb07f436843a707f1292522565fcff3bb44022aadedb5d3c3da063490220154320394ea9215983afc2f2678d892ba0dbb88779cb069911f1df6c7f82b249 30450221009c94dd44a3c556886480ff6d60e3f49b46093da9a26076dddecd733be28f7e8a022073650ac7a6f2f80053101844ea432b299969b3a294d53197168866037a13c61a 304402204dd2075eeeba7c212b9685be9c1695716220f5c156bfc14c97b47584a6b3ae2102207ae37f15e95a057f20ea6ba2a0af49154294b6fa546062e2539f6e264c4899ea 3045022100b1282f62b085acede44089493fcd19c9072779970a9358813ed50dab4634832202201522c52b0ff879a4a74bf7b64e3dfcd145d0e0e41c3e17fbf657198e51d7c4dd 30440220231a32b21cc0696ad7791e4e8671f3dfe4bfd4fb891ec0123651a10a1ceba34e02205d5a0e7dc9e8e594094d81014b12c7f7eb626aa339f093239978c0217a0ac17a 304502207eb6b257a616fffaff4fbade3b08b65ec74e1279542d47d4b0f40457a09a6ed602210087d0a574a38a86227886a63cf6cbca7cd012aa206c3feec46c097e4c95cef680 3045022067261606bf73c8d8c4280c76ad616ce20f2632b3289d8b38a2936fc906101a9b022100b37b3720443593b0e5066564a8ecf2b3f92d6d2a9e5546995ffebd486a95a48f 3046022100eba8426d2351403aa33498a1275e628252cb971551f6440895911c65f426e6f9022100f754d3c9b78d4cbb1f642e89fee0265b8bf926e14681eb350e411d762d77df4b 3045022062ebb67be8bb6d6b3fdee89d28a539e90ccd42fcceea9c912a20d893a555d8b1022100e10b870cab23ffc5820685a06a7b6fa58432f7f94a57c1427c39018797dcf727 3046022100d08e80b320f3ef0045943012733941830a84a7d65c57eb0f891b9e3ed2520a2b022100c732214598bc669d4c248eec12b08f6dfdcc232316570847ffa8981e2a7cff09 30460221009a2e5323a0cd0e45a93536ce4093fb159f2ca7497fd23bfaab1f996de85739ed022100a17b2a7387273f608e7910bc5044bf12b02cf5b430e07fe3d48cbd2d7f106ced 30450221009fa31b7138480a157e62b228641c9e4aa8b6a6933b3c2dac073b565f859798c0022045587f45e0708ea224d82cf9eb006ec1df8aa6275e2cb4ffc2bc95aaffc9d21a 30460221009d57ff30ce830b759935670ac68251ef6c98578d3944d43e6ebb886a24feb8d9022100a7e51d85f06bf4e532a5df7e1f07a8dc0c4484da0c3e50cd33db5a9c59a61495 304502203f5a6e75209d62050731ccdacd4e2e6696fa3abf525c8e3be7a97f2feba19a1c02210088c80f546946cca30a359cbf8574129514bff81baf57bd1042bd25282902dc43 304402206cce67f869492f12ef60d108a04bb289b74821f9e3010ca86ad5e4cd902d0d29022032b3d91efa4d22caa57b85563c000b126b931ee7a505651cfa5c78c6a601d347 304402203008022481c44e61d3878cbe68c27f67129fdbfdcf3bc73ed247b32ffbe104d1022036504d9a975a0c63e51fd1a6f1edd99935a5f2a12ee2b7cb683f6a225fbb75d6 30440220389d064f8534f3192a3ab1205f418e4993fd3ca1c9879b0daad033cea08e172802200930dccae6056121aea68ab585e08ef0f89e601102d3a7b5967d7d21bea78d7b 3046022100d1ddd7ae21acd0e5a8ab5cead848827929d2522331179445bd091d2bae8049020221009b6ed1c84f44d23cf75501ccc1c556d39e19e516801a9d99fa73f9c9e20d677a 3045022100d82bdf78b278a532016729de1c9a3e4da7e1f78825844672f9eadda41f1fe910022000d9349fb3fce1dc34aa48301e55301eebaaafefd78dbabb7c61c7b90a2d62dd 304502201bb78a99161f8a46dff4c45040afb099cd3873ea03eb96d52b392554d82337ee022100d019370f8818d97da29bed8d3206dae52c21f505f23279ea818489f6ff26598e 30450221008178e10e746b4fab7890fc43f63c4565ee562313fa48ab4ed39fa4def75e906602207590711af221b46f2121556e6471ed4f63583f4099ee3e90561ee416d92722f0 30450221008c0d5a42629d7742d6b069b3f9a3bc98612b34e2687bc91467e8b6d78b462072022066bf19c65479b3cab1317d8464dc432f8caed63616305cff5cf80e89d6dcf603 304502210094881c77217db0ecacc984d39a5a8a13a3c073c80ee940c3fbf147cfc7014d6a0220087dcba1768bceb8058ee6d6bc2a9ebf2c31986fc2e1acde195fc9890271590f 3045022100dfeee177c24a2e895cd6987973c34ac9565d9fd6378140996b5752bb1bc0be0f02200dbf51fad1b168f06ae789f7dc74963ef4fe192d46aa0bafffbc5e8df80b8e45 30440220354969d3d8a6ee8c00ce2144cc31755e2f79bcac11ea1b43068b788912925c9302201d81e0dbb702239d810d9a37bfb16f84c753631983e5a78c371b6aabe2d05df2 30450221008ce3b76251823863b0788e4f452865974892862134fdbca08d34988684b0a39a022002e00de2f48ce57e43957b26231f956c2c12291a4e3c14cd11c4a30eb4c29325 30440220588a199a7fb03c49eff6667539484f9e66769d78ddc57630ca562ea5f91f25f40220259123fe092ff79cb8421c21b5ce9532fb6741ba775ff83cb65b7df85f877db1 304402206fd4904c2d4a46ef6c406d7ee2ba616d8d863297f38cc495bae500a1aa20b09c022034417de17f207753e7e1fb86f548183f8809852336b0a4e09c1310480ddadadf 304502203a8b332214d4edf6720870b9d3cc49a6089a67c1585cc9506f92417a8c4b7ff7022100d32d135d352d3bda2e1ba74cca4056e2bf031fc8534891b3247fccab5c80f08d 304502202f18fadcd051ca52c6b8ff5ab6cf4dda4e76f37311addd99c24c1a6fe7b00f7c022100d2c366c94853276d8a142d802879c19a9b4d0fb41400911771a417b6f9fccbca 30460221009fd365f875fdd089ed9222ca906ca3678187422558d19a478723205181dde20e022100aefc76f65aa98b1e2f44bd7b491714f8678a10125051afde94eb8380b06213c7 3045022066b138f143b7b9ddbf9d7423cf6ca182553127d49190d5f0af39da8622bfe301022100ddf4bca224b50731fb6d307a4bbfcf794b6fc3f31d62e7cba0d55886f20d262c 304602210087b9587ea959b97cd468bb5a7f0d9e08415ebea22b8a4b2f2f03c8c513b83bdf022100809ae6072a138192a8892f2963cb58162380c7b0a0f1a5d87dc300b503ac55db 3045022100f725eec887834aff7d4b97a0e91a32004e36ea33388169c8f45e22c910e3b9a202201ffa9a005563a714869e4108a1d65d4a742309b906f22b47ded4e6d78dc92387 3044022031164919ce96da095b3356d678204df1ee208550f574b2704dedd5a9523a310b02206e52b9e6b4465a1d19e186fdb81098e31dac23f45ee70e01f9cb883c98f081c1 3045022100ed360afb62d6a1fa8aa53f81770265f676028b785868425410b600aee4440a1b022029774f53d735c56bb5dccb91c5c1e3bbd04fb7b5720cefe0bf28a886751ea930 3046022100832017ed9e03ab1737cc9fea8963c92b903fe331b15cbe8aec706f753db5d724022100c25020bd05e2036b4c27878dee81a987270c3117496ae9f4be648c051524a92d 3045022100923b8c9a02a30e93e15c82bc5a113a453287ed37c16c3d2f271e1f3687121bb3022032d3838054e66d4e6cfa17a727149434bf3863885134f20313f70063cf25ab8a 30450220702b018734d28b44b711463b39e6bf3742717670c7b415acc1d4726bb20727c6022100b2bd90232362b95f2901c05e346e679d7bf8133b90f99e5743b8c586bc807fdc 304502201c1e8cdc220561d04841bd5f5799cc448cbccfaaf081cf908cf6a79e8088e8d4022100dd9d67a60b0e456516bbf6147d62506e637457324ee8fc285f692983e97e819a 304602210091ee4e4f18fc46706095f9564907155e9e10aaaa8269a1f79058b58f4567a66e022100fabf6bb4c3ce94505952981bfa1420600c059c5ccf132323336d942dca817ded 304502205ac6479eff8f512a74e48ebebcdf2d79c08e83d8fa18387a132347805b18f8d1022100dabf10db337708978fb06b93657fc11ae505f89a51c5ec383ec8e5b7bdf362ef 3046022100e14669da6597f0bdd4b849c380aca39e6cfb9254e205f6fb1bbeaa42d69365830221008288d2479c1c30e67c8054c094c05b2fbd6d46eb00e107fc851b985714d40c51 30450221009fd98e35cfd9f16000daf1ee547a1b817962517050842d4127821487daf8e80302204672665e309d6c99f3f347a065ab245b83443520c5838ad74cd8893d48ae44c8 3045022100b80edcc5294eab96cbe17791981d100e1c482bc1fed9c0c2c49b91eccfbef5f002206c4bdc55a74691683f9703b4801f3aeeafb8e90371d28b96fe973bf94fe212fa 30450220728e206d5f4ba122f35c68a006f6142d599e92ce1e4b473e415977d232338b09022100e8a0f066807b9a2a585e030dc2ff23452feffeae0b0f99488506bbd77be64427 3045022100fe42cc3e26af118801d724562fa3cb4195f14862db3002e4067d0a1081953fe102206e0110bf1635deac0e1d6be56512043a7eda84a017087947c6e59586155ffc37 30450220727833e8f2b068609295216d86c27ce3ddabb7c0b8d5989518cff57e747301f9022100b68548bb26ca9f072386d3c6c0c404d004445fa716b3f7cb9b51d5138395692b 3045022100b25502393045aac7748eaa8ce69ca67d614125b0c0ca1f5a88ec2b609fc447c1022025c9dd44050578fe18e5325487a087d1492cee79549bb5ffe44d22247d3f6939 3046022100cbd44d6b99a0ecbeddd72c59e99d025d979399dc20262ba9191b58dbfbfd1197022100894d5e2251e8100f9d10c25f8bfd953c6f3aea18aeb5d81090cd00d60360eb25 30450220762b7db3e532dc13a613a1999c561ba8dfaca7eb64dfd448c319e7cf208427bf022100c828708a6f86a0dda4756bbbfd73c6c7173ac460f491bdebac36df1e858b9afc 30450221009f030b4c4271b514a0606920a15dc4c320be4bec94abb27022f4121d1a6a628502205309857618d99751947d908b6c1fb05bfdcdfcd399f36e9267d5a51c210c1c5f 304502201ae965992a698d6659fde27c8d3d26cf934e292ae9baa99941d6f3b9074b1d92022100b49e206e0b5cacfb613b90ef54c684a3e2e78cc5aa0a24163a7166f32c78aac2 3046022100d90abba5ddb64593333ef3fdbf0ef7fb25f208c8dcbaed8b25dd1bb02f12930b022100af09a87c26497f4b4e64738edbd7d33b66d08e342ee186fda4cb43f641e35d31 3045022008b6644b74955071d260a382fc1c537e3fa6bfe14a614a98b5e48a95d3e5056c022100cd0a5328804cd713bf3f0aa14cae45e62440ff2bd08aefac2c4e5bccdb96fa71 3045022100b219260e32c21e784733828ce4488faf02bc0862324c3f410d61fd3559d86c10022036bc5846f736f524bccd57331ee5b5bcb2fd20edd8c5dcf55ed613b77cb49fd6 3045022100f70574169b7c471b7911b34929fb5219724df72cf0f5fa8d5f11be9c575734290220476ec41602eb5148864feca536181f0e0e87fd8093a43911ecf8498628471a2b 3045022067204fdc43b4ea1a377126c28443b62c9c4fe69613061ddf25d8a9eb9052f461022100d937f4f9113576feb1c0ca584e11ea1910fffd313fef33abe8ff6023a2c01aa9 3046022100aacb00f83615b839658943fac2b4b3e6d5dff7b43ba34b4d3f4c5effd6e3bb5d022100d8755cc3b2808f334a9f48d293972d0b2e5d0999f80606f88c33db911e6961a3 3045022006d5d0303fc99c055aed3761c3ac12a0453178b9e074ec53d4747aeeaf4d2329022100ade98fc2926167d891dcba75150db82e6924fd5df75319d92a6971cb9a8d31c4 3045022100c836baa43287e93fce3fc887d8cfe011c32fd7dd4f3cd90c8b854f46143f00fc02200127c7f533c344df6a1327a6c34e5b628041bd978e4003246b0814c302711176 3045022100fa0d8bf0a22e4c7daeb7eb3de4f834656f82d386642c90cb0abf6950063381c102206607d211a80d60a202b48830c489e31f234d996b5c74b49bc5700246bdb008be 3044022063516ccff3ab2111c2c812dcdf26be8ddc88aa5418178e0320a39d513fe0483b02200d81a349886dbca7bc462b97b9111824a37e03dfe045af0fef2423b776d92fbb 3046022100f5d0137808affce88ab7c7145ef1e04d87b4c13569a33f0f5a0017e478f8d5420221009e3c1b828029c9ff7396f961cab8834b605a55382a68e6d91843ff23a999c1a5 3046022100d7e7c6adac1ea88b2109451489e28660fa20e43f5bb83a8fc25072824b480585022100987121f5cef98838bac52f0b284a87a3a12ffcc2cb65b2fd9a3cff5d320505a0 304502207fa7bfe31e109093259cc8373557c1d846bc3fd5ed02d97d2acc7de17bbda6dc022100e9fce8882aaa1eaecec758bf180dece6c423a18dc997bc5c8441c3a8727d3155 304402203b189ab510d54dcac33877767e0a9186ecf100b4957aad659295eb5082e0bbb9022059956730d2ebda32989b3bf330d4e7ad70fafc600ac0a5167d27171902fd2efa 304402204c23b93cacbc9fdcccc10d0dd5d90a91760a03cd57a8e207ad1e7d11cb7a56f202202f4dbd8d70ed59de63a2be44eba7513ee19b6414ed8ea114bdefb4264ff4d8dc 304402202bfa77c299ba062953114895bc8760dfe09df936d0314bf01b2d497b7df496a5022066986370a465f5e7d9d46b59340e44b40e460eef2e68617826bff7da232758f9 3046022100bf3cc6009c163624f257e8e13199213bc2032b668909215b6b77602163c35e9a022100c5e6998f9ac6032a0294c91f7561200d3c9d59cc534a26f59ca9b85a59f39291 3045022019d1a1029ed4f6240d6dee13412be42fe049c6c268fc3ad4c0d37eeaa5accbd3022100e730e1808426a28073af868fc77470b48dc4382ae815f0e0426505092c5290f3 304502206ed2fd5b1ab69a27fbe3091140b0bdc8e3ecfbfb60281b75c7c8a429fee4e356022100c7fd775f5f242f2824d7535b4eafc2b58cdab404b6873e6bc625dece9f647c1a 30440220352e4d5978286351d5db63992b7715f99f417becddffed7efe649ddaeeaebaac02201fa8964499111aaeed44b35633bd1e68e7893a20a76ea1adde28369e67bfc6d4 3044022060635ba916654079f37578b962c498462419e40ec386c3b81e19d81d2acd670b02205ad0ed57d3c1d4d9d0954726add952ee0f6993e61eb83e8e2a729b2607ebc774 3045022100e19b12956b47f571178c09354690ace17462b936a5959ea5484b20aef8a54eb8022003192d49b251b2d185461b7af494eb3a37222caff2a46a6dcba3ec513e2aa445 304502200c744455c91d5ed3dd322ad645bd729f850cbb5f842af8bacc37840cafb5c18e022100bdd36d0f3c8a9c93a424b1523138d1d8c75e952c9e1c47828d972cbd545f34df 30450221009792b6affce44da8866d52f0a74e861ac4535d35bee39ec643c8e9d70c2d327502203298080981a5abc77d073f9099aa30065af04ff922018f62b79c031feb81667a 304502210086f3fe3336918dd82ca421c7376082094f3bb2ed5b1038c92bbba6a51be0e1dc022026681311b50c77ee51a4d9df509bda91995b10f55295432de0713298ee6d8029 3044022042efd7f5f0474eaa99057b3358268c1aebab59ec89df7aff6a42076c4e26e7c20220058789adb6879dc3a35508e0ae92d42498cef2b1cedf52b97830614fc6fee78d 3046022100d597bd443016b91e054bc4d7808e4205af1fd3b1b42434fa871de8c10bf7c23f022100eeea3c5a514241d9ba32288d138fa5d838cb8287d11361c87c7ba5ab133a5e23 3045022100ba5fe5d0c8beae75ec05aca04322114a1820f8147d094fbc9028937a58dc43f4022079a16a688f5df7ed80a23e1bf476972730f7fd7cf484d2086d651271acd0097f 3045022100c1890fbe609f1bd3d21a356c39143b8595d8d77dc0a42cf53e15bf6085fb989602205b3965fe3a903260bd41e0822eaa3d90c8c47a6992d2e4bf32ee0b79bc2bf740 30450221008e73789a6b2ebe47b02a0d5c1b341d44058c16e2f8378ea166fcf94d199a06da0220306b950348fe41d36c95698377ea4cce420c98fe05eb51a9789fa26fb2046bb6 3045022100f01e4d1e6f1d6231204744b5d3ce8a44a7322157ec7ec57f74d065e5830daea702206f217c798260c12a5b080efbc517659a1a08ba0e9cca4aedc4422afd4326cf5e 30450221008bb3cfe1a9f77b2d9799cac761579870ef52291dc3ec74a6f6b045721f7af77f0220168bcab224fb52230034b5009a1f4885d7b19aa4b8c0a418b922bfd79e792df9 304502206cf63b3eca315c45c5ce8dbcb21d8232217cffc6e7eb2abd93447dc097d844fc022100cafc2415966ad21e16ca1cb7f6011e6e94eeacb5aae44902ddb65599521e5b3b 3046022100f3558b8223650426230acf13c38dd1af791eedfd3c8cb803da634bb9843501750221008eca502a32f7ef9587426a2f2a2162cf66cf11e944e28a277e98b92e1ed12b03 3045022051419d3340c1da79f512d6b1b113422a51ccfdd96dda2f72be56cda4b713ca48022100a4f9c11b5d3a9c149af63a454a3453485f94f965ee41246e2bf37a92a77a2ba0 304502203d17e5e18ffc1ebe38c0f993414de3154c1589a81152fd20587fe0a4a027d1af0221008f29d9f35bb36cb8ebe3501dc3c766c6ec53f30f051d5dbf8ed72a3a3d661a02 304402204c767c3b348570f0edd8ed4f646f3ef671f786081f7926496a8afd29dec132fc0220557d3d21f17d26a4fd896347ee43828fbe07f594ce3d4bb8bbc26f068291a36d 30450220708220958fa319881ab73e71b566b4fa61ecc16e8c5bcddeebc63a109b0402cc0221009f910b46ebbf725ab2f7d53a82ad7e110551b4ccd652ea599d18a7e97224a2ce 304602210097410564516721a14b726c1ae1a0c4af20978e95a80de8c72d14e787bf7ad47a02210099ff8b140f513130b184ec856432cec9c03cd0ee889b2089f7dc7b7ae8f04b4c 3045022100a76dfca14dba4bc01d9f7308feefdefb3ef8b052f5f72621e846131928b113f9022030d1aea4abee8b3a0ec5c0eece9faff7dd21d87c995735c5cec3dc7fcfd15d2a 30450221009fd7283c6506e97b166f35c62b2f1cf7cb16334041aca5a0897cb32cc327ec91022015ed94147b31ea0e2eff5ef0f252afb1dd8c0f357a9c2f61f7aadad7a4878536 30460221008c8bce2c2645d2be822f26f8072b479ad63c9f8ba29bc731d596607b64b75cb8022100f0aed6dc58e78f88fb379e54737a3126f9ce8dbbc7eabee0116b9ec468fea412 3045022100a1a9c2bbc30b4b322578017676fb619367a38ae378499b358027e8713ba1c9c002201c9ac2cecfa574a86fce5f9d1d87784add46c5cff7d2caf7c74947b482fc3dbb 3046022100c529f35c2ffc11d57533df4434e28552a400acfe1fd6b24c4897a7f02f8875f7022100aa4df471bbfe54a28f100a88843bbd893011ce4e2b8cfcce15f5974b7b0d240f 304502207ceb909ae398a2545028c53193e93607ae135cf8bc1c40f8afdc627c5b2ca4bd022100ede90217fdc8431134a719ad0859414c4f6311dcd704ee196b287136a018a970 3044022055f35d9f8b27e2069a3b993c7e8347db811eed4f52364777bd5de985147b6611022033e30b0ec8971c258469cbddcf3601e68532169f5dac496a66f0298a5c310f8b 3045022100acccdee41ed68ebe5363c9e87ae18e883a95837abbb17f99eb5fb68822e015c202203435500eed25b274315df83a09b397f34ba3f1c8a3dac0f05876bd8f344b95b3 3046022100a9e5330fa99e2038e0f2877c75ffe6e8964b8d791848cd25bfdd353e324a1c3b02210085a56c0366928b32b778edf8614bacffe69f6a4a5e4ad76fce6af722ce74d218 3046022100c361fc20e981529ef4e3e93c8fadf64ee8a98f49baf41f28b945d220f3930f04022100a433f59cb914e528637805b5968d7f2d7312b6c296c3d25976358952e9b885f3 3045022100f2c8fc36f9d398b1cb3aa11a893301ddc2abb8fec91892b366c7e897658b8e32022036e65aa7689fdb697427a705fff1ec6095774b3fa61034e385cdc4a5817494c5 3046022100a44948cd4a99b482d5c4fd509fae75b3025c79974b4b63bc187b425552bc314d022100b0835f6eabd5a1f0f04e44febffda818cf4f89119b0baf0d9ab365609155b8af 3045022100b716caea3e2d1cd8057a3cbeaf829510853df0f9edd4d58efeaf4dabafdb0d2e02205e5032d7c31b0b4f50b3dc0e0829f09f5227703b551fd25fdc123c12fb7f8195 3046022100c37b2abde3403c7bfa4758db4ae9391029ff5b13ed047bd24ebf032af52291b5022100ed2dae455c5dcea4fc6d08222c74a3a3e2d6b3b2eb93dc3f94a2add5e8b8bf08 3046022100a8738cdffa7300f341154153e8317c4b7dfa596fa8b4fd40e0f5131f25c5630d022100f662127518ceafbfc524aeeb82b31b56a968739af99934c67d6e9ef7ac5bb0f1 3045022100a52940628acd3b15401b18d19cab4eb2e6d7b126a5c6edad2b012ce383b5ac8602204b769febfa075c277bf9cdfe26c0e65afb2665469b23d8dce201602671c76d1e 3045022100bd9f66b1bbd1b70cd4a5f7ba0528409a83865055ebb7b10d179bb90e19cd8d02022030ca58e48c4be21a78b61d66169529e5211945b6ca378f962aa613590ab7a0e4 3046022100be6a76ae4cf286849c8944dea0e82bbb3ed48a10eae68210913727d24be2ae78022100e6cecf4aca977ce8e09211044aaf6ee5698474e7c141ca553e4e88a288f0cb1d 304502201334da51d0b1107ce5f682804845bf9cc03100fd6f9abdc28b3fd3402dbbe7d40221009e1f7fb2156485a3884db13d2b377b82e61315dfe8b72bd871a251aa8186135c 3046022100a4837faeb0893f5421cab4a579f7eecbfd8cd7cb06674ab68ca3474ef83a0ab2022100c5d5c38447c81fbc73e523575f0c7d85e3080491aae55bc61c397d4d10adad61 304402203d8b39353f7e4a168e730dce7ed5f5b39bede73579d0a23c067c4ce07a13c091022050e1d1571096f5279ce3da4331fc8078567b54b7a8acf96987067e624b94447e 3046022100b5af10fb094cdfb71de7d6ed04971c1fde71198fb9721d429d76abdba90066f8022100f77e5adb415c2c6fe2628c590fca480f915b3b061b70d01fbe3f9bdac014614d 304402204aff31b481013608566e29f38f4056121a8adf73af5e6cfaf16c872d273622a102204c61b7db1d890e5044ebf3c956d73a518f963833a3fbd31ac71e90250eaa79b1 3045022100e1a196a82c75694700011d54f8b2dbe5855c5358b8a3aacea4b08d2e1cc1649a02202c9f289ba8bca1fc200afbdafe2764003bb7431eb6e2c873a6a1915dd404b490 304502203ec7e33da4da4fc3f24fc6834c5d1e57226f7239dc232e222ae8e1fc43f34e53022100f0e98a49340b6f25812f1e1e00ad2a5c2e0d98ad3fea9a839549bf88a795d23f 3045022100ec85b1d4d6639242019bf7bb8778ac63a9907c82f2b0592041a572e12ee3ede20220062ac682e2013d5ade20907565499b2f9d92aa60331303790990cbb641b6eaac 3045022100fc523961c915c38586648601e4a873195b31d5b24cd9d15aba4bd60a9bd37a490220035bc26455116de4528705c48e7a9a0a430d6a3e58c3e769aa44ba0a3e97ac32 304402203f0a3dd58d6cb80231a2192778d0379c2db5b8b734c52216634f0501ebc362080220039671b965467f95e08cb908662efeb90b0af4a514e8080880de7e173d74648f 304502200e50c5f492d03efa9e45e4e45e40f8d540a35cb6528004c9ae983c7263855f0d022100bbbae799d6486ecbfcb829a8a69660170dabc89251f553f2289222ea51d0c883 3045022008778f6d8c39542fcd3dd68291626ac446d0f523cc57de988cadcbadfe11276b022100ac98d69c6b6c7394deb88b63249bd4d46e85788486b2ba2db7feedf4df554edc 3045022016bff5293b5d2b4ce31c71702dd0e6084e04fa18bbc05bb244501dfdc1f3b6b3022100b13fa130b964577ff676e9da19014462368a78568ed862974c07a64f52ff2f24 304402200f38f24d6c441d67091605f8a1c14e42a6b782f7c0ca9395a9e6e644830c75a802202aeca1b2a3172fb4692e6cbd0d230fd485fc81e70e7c8ab62900c9615c8b8601 304502207543832be03f19bcb7e391c291dd0d312b44861fa093d9ee2b362a79d9794b57022100cfdb25a78a4425f18cc0c3c2d1a3fe222c0fecc9a248152b17669a2306da3bfe 3046022100802f6649ae690a1c024fad9e9d532d73f46b8400f2cf6521f9d5be1b6b9579a0022100e76d5306c1984b4a25a4d3c41540970a44b9970132035e65f2e57f6a03f67901 3045022100ac5c4ddff86b352e3688fe56e5f07a808bfc58372b7d695d0c577bbf5b1d89e002203e33765c1e0481a0fe4b763b5f6afd19935b7ec164c01221db7ae91c37e6d51e 30450221009b8b79140da9626dd8848678c0b4ba7527e54c0f39fcb01fab6522918ba3054102207119fdd27b236e768ebf699332dfc639623b1a78823ee76167a138ed460ce0b2 304502205eba439d906c4a7dc3fede5dd82bc55becb071a99352774af3f63dcbf5aa691e02210087a3d0e8ede761daa4bfcc734ee9a469df32a6b16525920b9c27e3719a3cdb32 3046022100a4a187d99b02a0d4308d3c47073bea5b35f2ee358e9b4970ac0aef8cbc969f14022100aba72daa44de430b97d366dbae3217f391d1fa571190bfc6d318ed7c8334dbfa 30450220401205edfa258586427df21994d81f45772e8a6b03953ecaf5d2266463184e0f0221008ed8c51690cbec251f11125ad5d9a019023f23403eebf8224473817e9c57d334 30440220121c466229bca4beedfe33b0af40f16ed9a585f2ef2d4c5216e7ae44a759c063022047d3dac2469de7ce0e969e13553cd765cfa8c13393af7cbf9c7deaad54cd9f17 30450220643a9f2a7fb75c748862c8880ed1cd9c769ba9a072700933fb65ebbdf669e07e022100dc77fb34b3b91339e1ea4d553d83662203446494c5148f54bf81ecc66d26482d 304402203ea13bd11e137d63cbaad804ee776dee620da496f5d9b01ed8deba42b20555f602207838b74dd9f70f7d71ae59ebcc820e5d358ffaacfa630febd108513d8c148ca3 304502202bd2c45defa9c87d9312508e9ad65acb06be8c3d2bd345b89e46cd78ce30e06b022100c40b583c98ea8754d3874aab603f80f845d5af548649882f46857c1d7e8fc027 30440220358d045827ca41f9dbfc6d790ca43418a91886d069eeeab14ede4fa41f7705e902204b6e9791693e27667ffcf6ebc06eee662482632629e124a463c6158a85ef8373 3045022100b1a549b8eba70dc649c2faeaa227dc51f0952b1a0266a2c6452e829d9e8b83660220298bd1812819c8be98626ee431856877c6a9f536bc736166362273656f61ff5f 3046022100c46b1c85500c494e271847ea95c4203651d1d770743f22df3d8b7ee9631a24ac0221009358663f4bde4c892c20d6891d6bf93289f15947d6ef3ad34aece3871805c3aa 3046022100c4b381580a72710c89ed326d11fbd1823b6dbb88f4dc79ce079a980ad06318aa022100dd39a06f49f344ad3656f9b772c85c47b6c337948f566cd1d50c1cbc1064eeb7 304502210091a528aace2cf7c24d4b80856cf403402189c84ba2a24224c88dbd5f52a612120220248fd2d466149d4456d248ea78891e90376434a385a1ccd399a3e7684fb53a15 3044022054185bedc63fad33dec9073a0d8c0270e3d27aa19ad051a12f67e0661841a5ff0220591b3207b6b3dd923a87d4e5ecb003a202d17ab36b3d358e6e27586f31aa7cda 3044022071b7547f1f5d68f6f578e65ba3744580e26ffd3c940cf96bb38cb76e63de1a10022069f3983b77cd6e748695e66011e8ed546adaa6fcc2c42fef44c540220872d04d 3045022100f1d20ccfe1437091f7c9a1378e627fe6c30dd0fbe2e419ae1ae70403e5fa051a022008a61de8a4a4410f2c4a08f05e605d786f3f599a195040cf46280899633c09aa 304502210090daf5ae4c77337b48d14751744c734bf74f8bc891285307bc932d2ff8d4ebc2022041ade7a537fc6d766d0532dae473de41d86f7920f8481d698315c732c1720a55 3045022100d6266aeee95ebede679adc787bb016f162091546f0cbec606523e64275f3e59c02200ac09b97dc37805bcccab3257048ec5cd72ee7647a3fe4869e33fd811d750362 304502202142801098b7f4d3aa5f186ba072209c66365cbb7adc529a1cdc30be67f8fd82022100b7d02b14eb2534884a2e5b13d9aff7f8542d5afe670d44adfb0fe93c61224961 3045022100a88e9ecfe49339e3c1ca9e2aca7e100a01323a3439a37443eb745eb272531d9502201cb95e0b8630d2ab9dda06eae15e6a7782bfdaeb7a49a61b0d5f30d2a959a204 3045022100be5c0b0b4181f0b05bcd4c1b8ce2a9f693de07a2523523150c7cb792f33d6bd002203737fde25f7ce17b0b97ffa9e04bd4ccf214d69b371ec23f436efcb748cd964e 304502207d30f8b7ab26042c1d3c2548947cd17d576324f3bd5c3eb5a2c29114e74544af022100cf6828772d9e0eb3c54c75e2eef8a6a662425a1f636f7a6088d2d77451b0bf8c 3045022100b689aac019e289385d9c69affa5a9ccbd6ca260b648d174c6332c88fa65db73802203ce4ea94b6cc73115ef092fbd042a2e7797904b092f53bf05a083c8d7786d759 3046022100da2adc3a27093c276070f4669169a3902e5ed80dac92c2e4e6f9aec3a4de433c022100b2ffba6b69337bf0d7ef3ff5ae8bbffb3cf82ddb4aa63193a8d4edd2b686dcce 304402206c12a567d65d03cb6b01ea4955eebc4ee0abb962794ff496808db0a2bab70b8d0220082596c145aa9dfce900d05c7fb555246275ea3a853cdda62539cb2b4687fbe8 3044022029b38de45b30d13c04f1d66b5a995768aef94ff6ac485fcf4c28bc775a0e6609022020ab3a30b5e59b75b5264881ebef78c1638c6b35d072bb6745498171c549ae0d 30450220454774d9b640eb2a78d8820c743d3955b6696e29a812be09fee99b00e74cba6c022100fe94cc7fb2ae107293d5b0642d9ebcc81bb05e1396ba13eb63f396f3788ac624 3046022100a46ff7f282e28fb1d167555a835f3369d8337fe00894c78d9afbf15107c428f0022100c4ef4edf2a921d6d5c3a0fca7c6fb72483d52ebdcf39db7e1a3273309e806831 3046022100d7de1db6d00f83f1146ca4baa0c44dcabfbe9847f99b27c40fc7a79e314127e7022100976d81aeaf496beff8c5c9f1f3d48f2be7adcca70f2137e19739b5854f9275e2 304502210097170da8344d1570e90fd3b0fc6e65e46ab16f2cb57a951a6e35dc6a965c68970220220e705a8c55b668034d828de86449165be7f45833f80bc65677d7f1f4426db9 304402202e36eaefe421026bb08173f278bb7eb886a16fa6fbd3563780f2aed70e8a2d060220614cac709481f6305714956ab6152576faf0ac63075a0cbeb15376b64b3abddd 304502210097c9bedf0894c667e2385bb784a36a32da85c617ebe2c609dab21043fd8783d5022075c963b33fcdef8274c0f40f7955a97d4858bd07737fe86e36b7a4b545afc28e 3046022100bdbe34d7afc56525689354c271072612a158019f7da8bcebd36844eba6be3853022100bd3d679915fef9fc0703b6b41062bc2bd42256cd9ca3ff3592cefe2bd6789292 3046022100ad67621f3910f85c1c06edb35fe80747b0c0655f6e8699a0e88120a2e0ded7bb022100d1e96930fd970fa043b4aa3fa077468b2b0d385293d99c80b8df70beaae9a9f3 3045022100f9672dd96ab11709dc779139ded12f2f54df8e8d5f26e34eb3b2f69a89e67e4302204db3d0578fb42f9e4461a0a70116a9ace782c255e51a830c55243ef7913d3523 3046022100fa63d031b99848c6981799fe2f79af7f0ad2dacf9660be2dfa34ef410d8f833202210093b4e03b80095dec2973984e4ac24169500ce69c85acb403b0a00eb6736fda92 3044022073d36c601cfd5b73421c9ea85903ea357634a0a305657f21579affa92459860c022009aad018c02f925260d12df9e813c9e363461026b4566443bb3f5f821b3fe740 3046022100d257d7469156d2350052211cbd5ff03898d2331a51249beaab5ed176c92d54d40221008a5c19f5c45b722deab4531f19844677a29d5f8d7efa8aa4811d2859af257d91 304502207bab17b59d1b13dea651da7a9a6562b8acadc0497b9a0e41b91d80641e4bb002022100d9ebbd2d3d9369993b9fd6ef85e27dfb8e460132a9fe9e5e735f1041c4f19c86 3046022100cc56b2b050c206834c4b5876d3ecff69b59fd0b97d9e38ea27ca797875ee54e9022100ff49a58958e744841d25646ab0ddb8a9c5848f1417ca974a8bac4791519a0de2 3044021f124faec1d3454e5374d7c2554e843164d44a44e52ae45bd8a4fcffea1efd9c0221008e28149917b99c6ef4a495c8949af7e0df01a5e507778bc41968c1513de0d02c 3046022100ffb58f09fe2310e0adb83ffa544b5b4141e45c64e5335e8e9ee9a760a6fcbd8a022100892f98e31850665069c04b8796abc68e00dbdb9db03094b2206998ab2c902a36 304402207df36653d1276905984c1af8a32bc2b157ac521862fe007fbf2f6fd9e5fe5c77022074704669b7a29e69f2ecfdb1062dde6879162affb344271d05f1ba2dadf21b50 3046022100a0bf25b4a5cfce51003505f109ee87fa925fc1298fed97dec1a88252401d158a022100c3bd5f237141f55e54aa4b7ba7fbd6250ff6778ee52e32c5bf318376070d353d 3045022100a25ae9a4ca89bf57ebc6966c500e3ad3d2f7392e448b9193447edae2a6216b03022020f6b64d7ed3d86673b3b90766e1e9d2a993ac534ec323e70bb8115c31f06b86 3045022100d912b59275fc589e0921c4029cdfc7b1ff865456ca045235c0b748a7560a56190220596fa6439da3b8c92ba257172fa01fd29ce88e0686937308cf7305bff5cd67ab 3045022100a86b15f9204bc7f634cc2531285a5de0bc2e2e580d6be7fb09001f1d97037d630220661d1bf4a40430c205cfc3030a08870e11cfb227c556d0fadffbe0d47e45a4cf 30460221009bbf4623df1b0b8ccaacb69414c7407565a2e67dd6865eaf4ccb542df2774272022100a9ac4a230f14498b346e45da42f01ffaac7842139553645394f2d7993483ed96 3044022013d00a06a464c0c3a0a6f1a5977339db5a235097e7d7979f517507c8a6dc56a60220205d22abe3bd492bf1298d53a4223ea1f660009f221ac1968da5b5ff8d5e1e07 3046022100e114b1e35265f4cacfcff738a77596e140e2feccec16178e28e963a9f9c6bf74022100fa14e1dd05aa8bc8c3813c12d85773ec5590ef4ae061085dc6e838b423554340 3045022100890e9fb4a11da5b8bae8c454fb0153c92251ca24aa0be4163fe29e99be4be6de022023e5ad567bfc39111e2bbe3837c98a36aa95bda5466b7c75ca32b54aaea1dc7a 304502206571747a5b1d3fc93d797ed7312acdf3c9f02ffbbb4ea1a9da5faf4c6d750b92022100decfa034adb16353e3914c35d70bc64d44adac4db7c9302f94fb5dfd9920b88e 304402200bab36c1884ec1c410c4b9295a382bd333d58d7290cb0fc652ef3c9698dbfb8202206aaf7361476bc61dab4071d5414f8ccc77ac2cfc1eaf8291a662f0963f984c87 3045022028c26d2d7a2caa17ee5db6c4e3d541e33557e317e6a10b742c1fc04392911a5a0221009df5a20ded192e680fb8bf03f61c30ded72f17b0dc2bb156f6958836c5e7892b 3045022100cfa1baf45c0812f19073b91c5abbb119a2d3be1f280922eb73d3fb8cee045cbc0220219273cec648c1aadcb55bae147260a9eb2acb849364eb96466a2517cc7ec325 3045022025fba6fa0c4c8e24e70aa78ed3b3cec947104ddce978e476fd49a000aa569fec02210096fe378a1e89e6fa17136de49b006e0ba5ca73670de542486b0f5810ca0df3e0 304502205c29e6d6c0bab69c22f4c22298b85bdffd02e08faaef1bc8fab239008545f37b022100d4d7e5c8fa750c8779bb6611676e42f63bb8238acb2a9e9a8aa9853a7a5090d0 3045022100c4fd62593ec96b71bb9eaf46544125b525087c79eff01afc17a8c22f6cd03b5c022031be2f3162dfe191dba7572b820bf3c37f519555aacb56a59bb7f5bf07a16176 304402204155606c4f10ab7f7b921e63ec0087e67f5dc6b42230255e422242cdbf2b91df02200f7355d5d703c52eefdc9b0c0a896e5676b682d998506c2e9c8159e6553dddfd 30450220161a44a1673e90c71d5c445a18175b17792166473b0390279ab55a9360d6a81a022100d861fea37e4c0b386b41ac4b1ec38103ad88ab3e067b24db8a2b153afc1e617b 3045022100dd7098c2acce9b2fc48606bc72a7f0005569dc73a0964b26f30b6b15a6de8e2802207635b076301ae5e0346cf3299c9e4756c5f9f019fb34254a51c4bbc13a1ab3dd 304402206955161e38381c18fef7ceaf6c4456f6a661c7aaee6d391957ecbcfc06a7bcd302202ca1fca01cbb9436f5c36c91be7493a2569767d58ae9b357366a6d545018b5b5 304402206bc34ea47828250c867d654abdb7e8b9d3d3c179b0294092ae506143d6cfd35302206d30d5775101af6dd1410f7b68b253ad3931d5efde48f89e66080801efbeaccf 3046022100cd7e19d18984bfbecef555e9caa17d62d2cae8d9bedcc1afc00394ca82c4c880022100ebb325820de48be84496d490f849f60e5416567efd1d50af52b0a18fe820775a 30440220488d78812cb2e59763da7d371f6a35aa4ca99718755f7323f9fdd3ad74f0e093022025cc57a3ad4037066d29bb30452817a69f0e17dc01d2e2fff608222a09fc4b03 304602210090bce4c535262e15712f8ed4ef57d14b222647a77d1dcc71d9956a550d6efb73022100e7c53221801db29c90b067160fb59a4441e83a5ae86c6226790bdc697cfefb33 3045022100a87ee6de5d0d4aa207962f42ec23e59ce9bea5e731d1b12790f577d8a63bcd140220746cbdf1facfcf0574bd17145fa41054c1c35f9c1e8669bf2b21175edc8f3fb0 304502201481d95148ce8bd5c1cb6570001957fe2ff5a4a01be11e208e83801fb7c5cdfa022100e5b29fa4aed5851610627ca7ad1894c06bb376cb3492d755fc97779b8beaf8cd 304502202dc0c9d20caecf736e61d2b38505ca897d2316a0991e2fdb7ccaecf76798f846022100e3f4392e93ec9c620e9cbb7e975d58aff083e5f06db6a8b61bc9df73eb2f9806 3046022100fe932aa02d7ca7ed752ee9a0d9c086683d06b450a6faff575dee1986ee282f14022100d0e233e753d297c59b53e2e16dff5e8702a8d91d00b977d86dd247d555a69e75 3045022100d70ce30f20e6135d3e3afbf3a6b3359b18a23aade10bb459b4caa85403ebfbcd0220658c2da5b6b0f7137566f11c17bc5cc1a9ab9481106bf526e7cb22774c49228d 304502207de3a69ecc273ab7911158ca7ada98615f804a8c0df5678d27a90025094b9c6d0221008e0823109954c861b4f30402716acbd10d901a70446a6952b4c6e8ed7d14e6e2 304402207ebfd97b58fea6881ea164bcfd620a21d04ddc3b5f98b86d34fbdaffed5009be02200acf55c1e7ef259e9a83a76e60c5f0e47cb0cb6a208e95c70a4aacccf7a6459c 3044022022872529459671511a91a4fda0110e206a023a4cbe6f65ce3bfe778ab02374e502206cd8a9c165e025be4f9083d8c93ff961c2afe2f5f224d4545aeb74f2429b70f1 304402205624afb6dea742629b022c9e52c1cd53b51d29aaedf6ae7b306d4af8ca08612b0220715bca126b7a1c62f1ec90562ebedc49165ac1dd2235a930fe25c7de3839e470 3045022100aeaeede2c634cc1b995d17c6d318b0cde53e43036319a4dedb468c2e946dbbe8022057708b27b00d98ea6ea5540c7dd23426a45d1ef17c6c0b2a3d1bb731b01b862a 30440220210a6ab0e77f141bb9b6d0c65ee40e36de367ad33d69e34a5b9061f13c3df3ce02207ac085be997f373cfb834703c478855e05950b90d6040320c8b682cb60ac284a 304502206635955ced31519c31e0db31c3739f601526bee750274692295ed0b7cd83f80d022100d04f9e48f096bb98ab12074e37c6f4fa4c02384509b686411bb38b86affe5401 304502201e8f5ba041a4d14fcdfea11dcf120704ecfe3ddbffb20efa32c5420fc68b9e73022100efea566e19f49fff58bf193606bdad5dfcd3aae6ee4b5eb81fe7b99301d96a28 304502210086addf61ee5def48a37715c8abfb5a86e822eb3b520d7fe4165899c459e6fb97022030e4c081e57f009637d7aed6a7b4839aa64c09526cbc2a201f08de2d757826a1 3045022014feb5697c27073ae5ef87d49faf7edabd74281a7d07f40af5b32d2b81a8d49f022100c992c46c14d509d09c08161f74aaa9df6511814523953d555219d1712bf7f8cc 3045022050c63e232ed01ecb0b20e46779ea7fdba54978c925f0abc4d19a79e71625faf3022100cc060da21d23d629732102e6a71f3c95c5f61f7aa981805c4ba573abff395e2c 3046022100affe4f760d0520abe4c15caac4c1bd5a55eea3c41561d58d95cf264052bcdce6022100de56b62927981e87246a1bfa6b0ffa23360eb2f8fa4722dd8f0dd9b05e52d914 3046022100edb992cefee7ae3bc8af87687bd47aead3b2cc204c4529cdbce377bfdd006ffa022100ffa3993e3877441973b26dd24e7b846010c2612f0d9fa8c9cc47f6de1ecd0ce3 3044022072fcc4b93f1edbe157de605cad24ebe6e4abe165260db76358ee08348f01a3c2022001a59dd72e9e0528f328b8b4bb4e5e2e180c24d4d328cdd9967de1aa1281226e 3046022100a904eeafa9da1bbbefc707fef7a667032606ea46553d0ec2015ef2af9e658861022100b2278a61dc473956e0e8fcc522511ec55fe733ec6c8a21757ceb08424e450c63 30450221009841023ab30d2864994271b670e547461eb2508c112a891f9e5946091cbb342c022078015341359b8d27bde39e2a093a478068f2b3a7cdd423e9bd9693f761d874a9 3046022100d48d76c2a17559c2bfd55fbcacc3858cf8a5bfb1b57412d3cfe49ae31004be3c022100e91cd29d1e1cc285ece50a1f9b56db2aa7d64b8f57e1755978ad9e4d188ee1f8 304502210085f64c51fee1652b8007baca98f26f70f202a291e3cb7b2a9b0ef16c52eaeac402206c42789bd58f90fcd8ec247c43ecf7ca27856c6c55a772c9d38f901613dac217 304502204c0d5d7f0bf1641bdced78b20d3bab598d7b5eed5f1ec8cd986ade6827ffb73b0221008578cd5e7d0b013e4917c62180c99210955a50d5ce1aa2d3ed1cc53d5422f3c3 30460221008a18b12d57fed3361b9b7938cd1043d6f20a140aa7281e192b2ec54009a8c58c022100f2f4ff50dd9aa63ba9fe052c097d528a877258d311cb4260c95b438e47e15dd9 3045022059771d80fb983ea7732b7784a3bd579bce3d2b062cdc7aea1f780262bc523830022100b7e4452ed2fd7191cc763f4092eb93687b9520c72e1064d013dc42d9f5766c08 3044022005f8124968b7c46af17f09220b661aea12f72cdd572aff1667840701bf8c6dea02205c0d2f0488d136d049256bff3e703167b54d0b70f1e28a36bca3de773241cc85 3044021f424dd15a6866b6eab73d9c626c31c97f104e76c81c0f06644860f3bb9226e4022100a4ba54ae2f293e74b2b0042e7191b9c5c4db28e8c66d920703babf6ae4c356bd 3045022100e88fea39f1ffc15b720dcd19097480af9473459f467cf3af2f43bf149370569d022042d60cfa2d4342cb4d17627569de60234e4380e781a4c7a0faca0734628e6aaa 304502203ece7b13b504eb13ae5b3dc705cfe6c54b8ff2dea83f5c2f40dbd98dc8870c8d022100e45fce643bacf63585f116e515599bc570e14cfe53aec97917c50ef2f6667c3b 3044022035da8cef64bf66750d550089a2394b01ae3edf08eba526aa4c5f2c373d2ec29702202237f77c4fadc6751f3e3c539e88d000a7fc579ca612cca61c71b35b964894e8 3045022064701d91f63e2c35272b48e05182f0a7f928c41c9edb3304781bdde3c52267b3022100a09121637f4b90d92e0b22e431dde5992fb395977c8777b5aadf0f4817bac8c6 3046022100faaef6d161376f926d09385fa066448c754530e6cd563da4732768626c2bee2e022100a68c2c3ab1bfc98a72ab9a677e9ea16d33c782159e7abd9642e984cb98dd59a0 3045022100cfc9cc0d5fb854906ca3b24fbfc27c43e53932381e8cce91ac6d438f3dc00496022033571e0063d9ecfca2eee448eb76906de652d92d1350e9248066c28c72695650 30460221008625cd24662650e38399868a88010f11cbd60cc962815c274ac4641e40ad7545022100b33e53ee3f29c42ce94ef4c068d1c26802eb058996d4cd35b7cb7868dbba0ee6 3045022021c773eb07e616cda631640f67b8208e5edcebba77452111254ea5e082e1e59a022100af0eb7850f2ac3442245aae2712a12766395827f40c19d4f9acd975202783935 3044022071925b381fa26473d141af2ad388f32c0a1a74101ed9055e16b24615c9fcd72e02200112be1528f84e689c8d673c6d956455802b9ef61dc4333791057944550c7a7a 3045022100e6befdd8258a61ca21588f4c9989a7e6945fd16c75d7a2d793d57ace44728f8a02204d7148fd05578a1a9af872ecd95eba80c0acfdb9fc68289a2596e8e30f67a2bb 30450220062d85f1bb7183de23d0b78cf3d5257bc5691c210a9a41d0776ad61af5fcebf0022100fe48f19e647a89a3c06176085dd2b6579345b2609561abd100731af4959ce12d 3046022100e4fd11503b1f66e74f8a0b50348cc4477bdbfd5df3cb318bb1cc364d9d16347a0221009ae4bf026593a882fb41d64c41756af332736dec6a81b54ec0df289ef8e1ffdb 304502203342fcead73f5b410402ec2a265b0a6558cfb2b90e63e1cb2bcf58a8f71c2bf2022100cd394845989a9258fa16a2e74dc07a9f7df99797682ebfb582780f1c148bb51d 304502206a4c1c507b4fdbe771bb072f9c04d8d5520867c5e57f7b580f634380ddba46da022100986396ac9ec15414e7a2e7058fe3bd4aed9b4391c8972739800ef688c4a4b93a 3045022100ef4862239c35e8187442fb5f7394308276e4b30346d1d8e8b9fef6d46a24087302205e0f2323d4669c7c5389e414ff7c68a7d20b0144840a9026f80886d73c0a01a3 304502203363dcf4fbf33ed614c0296730cd7be8dcfaee952c8b876b3b9b425d6a47f287022100b67c5da5054c687b0bbea80bd3cec8024f6e63f7944e8c6187cddcdd1cffc0f5 30450221009a567eaffb9a6df34f2a5538a4e15fe1e26ec6e548c3348489c1ebb1da12735f022017568848a41a29ae322a2d9db8f69b7b148c1891adbce89011eab7aede634b28 30440220176dc3b9b22f6bf16dab7c61b41cfe9d4132af553c08f03ce8907aa213a1ea04022030b0103036b0657980896286a935058b967f7b04f192c4568e5a6438fdb24315 304502200b9a8b80fbb005f63e91f79ad1bf0cff561ab6a3b959bd5447de3348c9b5a459022100af4ff46380d3d6d7c6bb0b6e6bdf5696e6eb6ba3dadf3f88d23e5ee7be35b7d3 30450221008c2c1fee4a921e717d02774795f37bcab751e0b37116e7a8e5cae4d967d436470220732e41611c0ea82502d34fe78802a28801ff1815252d71473b92a7bf87de8979 3046022100c0d053d6da4b1f4eaa9ed9c6c5b0828bfd86d0e7064e338e6707e1afdb03fca1022100e398740e70f84c1a9fc4f7e7895c4dc926e806eeec527bd10fa6982956519982 3045022011185fded94fe9c98305424363d90e269ffd939392536e3a955d1988d381fd9d022100ae1ae6d2dfdddb56a9781873f77b99fc6373fc2074ec27354cec16936311c90e 304502202bf7a4517e504d98193bd5035e557d00db8bb5be62adc9160846aecf298c50fe0221008c04e59c2deb99c607d94d83b108af21b6413058b3faefdaa75b517339318e8b 304402205528231131536e11198e77caba51096c383ecf7ddcd9d687fbf631da42cd371902204c04d34c503e06898e4a33e87d9ebbaddc3be0520e113f8af4c8322f5960f870 30450221008dd830568e108c5e65f4eb70c1fd66999cf6681165171306e6deb4493a4c033e0220643502115c0449d502d33712898681a9aa49d719ec8eec7cb63afe4bad28993d 3045022038013a1f217613325e1d37dcb4c943d6b9111e0f44e6a4a128ffa16be1690d48022100dd9beaaa4732729d56fbc5619fc49ef481a39e1e17c334250d996446f7ea8c4e 30440220444866f85d28391ab8d9aa60fea7ae1700f37c1392c9b84bbbc42f05aeb0a11d022019c69eccdf1d49f58e67d566986acb96f2bd76dd737e57b5edf75fec7ce40354 3046022100baf5b2ae5742cb6331a100e664b882c8d56277ac03702124f65a9771b94ecef8022100dc16031c6d9cdfb9085d6dacd301150bd8c00a36753f53915fa6b632f2de6b2e 30440220647ea767ebe31f5cf276231b0bee26577293b18e3c4c59bfeea24195ea5fbe0102206a6d312c5600d9c9290ff39d4c140af9795a47e804811bfe2fcc9df4bd97e97c 3046022100d4c2e42c398232358c08758474af069fa54807421467a33c1160c48f833f9b89022100b4b85a4fc2913d67338c467d12a403bb00e46aef6440df22e5f983ce0ee077bc 30460221009633cb0c857e5d6e757054ae6aafd970baf59e24e0825834b93be1c10f5444d5022100a7d314c6b21ddbcfed4528137883a94fe28bb5d50388a66cd9b1fbb56582c0a7 304502205d53dae9465e06413900041bb48200be9c7807356ca318b8041ffd580f1a1b7c022100ab056c842ca094f0d546c7066335268e0c0f65927e1fa559450a3e67329cde91 3046022100e337ff906494763ad5d36a81d5edc8c4a13b9e2eea7f33ea3c6e572c7a95e963022100ea2d5789fd22ff460543149d369b0bfff8375dbf67c62606cb26b38cc31e8de1 3046022100e0218f9fe25bfefd9bacfcaa777ca0e0f5cf9ca4c6fa96fbbf6414f082660439022100dc5559d0b03585579aa35ab5260597862367e166d05283917ccffe9c53b69a5b 3045022100d0046ce91dad210365512e29bff1935bc8b9c7b377b8c5ef3f158f815d5e679602207853dc4d007baef626b28a31b7e40371ce468065e0b6cb65fbb2ce784df953ac 304402201cfeda56a4cfa5ef52fc56be0e495d9967592dd9eaeb415110281ad2ab5200600220637b36fa6b2bce081d416883763a6a9a112f907a906b153b85d540d09ea2b586 3045022054c17e30d5b624c56ed2d4c8794a48af07008cd50b021444bf908d0a9d29c2c7022100af94ad2269c5c9eeb4c24d2b94b0e1d4a329efbe75e5d2e5c04d5780d4e0e849 3046022100939e46213fca649d97aeb4edbd30138c5fd458c64e0686c9585092fe9a2a7466022100e5b86a3b15b13c165ce3d0ddd67ebc93d4b1faf2d153e2eebda7c115758da092 304402204cf40d11fce0ceb741613750a3b8f98a8439f64aff61bf2891066563b1a4e4e902206afdf9055704e315b45ce21746dadda515b70c141fefb544633bbd1fbc7e6104 3045022100f526acc1ce9e4f3ba4846680aeb6844514bcbad0107307b5bff70d5aa987e8cc02206332fe3e8c4520de9b9059d1088ff0a302c51b90e816866a3c3adaed41253774 3046022100f4538497da1b414f045b2bf6b857633fca3d4d858e34f29ddaec5e2658fd30c5022100881c9e0926cf125cb502e48925038ccfaaae52c2708378682989d6f6550d97da 30440220452ced2fc7a36ca4cbf69babc835e604025be825dce8a4422830ee66a10719da02201b51b02bba3d9255ebd5496041d966285d51f518c56c5c24a1209922173da357 304502204329eae66360d69ac15576d4662bd26c149edab79313f697210f2ee3391803990221009c82fca543e7976f37b7d23630afd7402cb0440682cf4613fb72781ccfab269d 3045022100bf1829fdac5515bf000fd43dd77a4653f9d9bbef6909e2344a1a9610925e9cf902200242280fc9f9d5298a3f5b0be653da6c69df0ad2ee8ddb301c7e8e68001ea385 3045022100a0645aac84819378bbf5b6c142fc06cd20c5923e78d95673d86f522d094612100220010babf025f484b91b3b6a01096e395998065d06b8621561a56fd3eb240479c5 304502206b99bde79b4f7ccfe24b0610c60139a53c6d9a0bade376e556e415b4fb17961d022100ae643bde8e0ff86aa2e4702e271ce85a279c863ba399427580ea3db879d3ae75 30450220470aa420867172a2f23e1ee8259e4b02523d195314f06daf67907d72f0788094022100b57fb69816867e632088788edbd8057c396540a06f932c7f63190004ada71bc6 3045022100c8ad6e08bfbff2743cd3ca5e2b71950a810e4c7c112901589a3074068a37c01a022063031dcce64d456e86e9da0f206e5c040ca20bd502fa2fd576674365d3564320 30460221008fc8ce78e38e0d6bc04ced775fac3f5ae9f383d78ebbd92a88a182cea66ff011022100af8a670f237aad643948da8050db6cc9096256e30877e647ba2b00bc62dbc411 3045022100c926250b9d072663fe804e244f28a3c8de846a08143e55c19108cb698be288ae02205b32f5095d7d7d04d3b5e80fdeceb4113f61c509b5352b6685171ee20a937743 3045022100dcb63d27599c5ce6145065c8d90e5f55d0af5e4001415ac85298f2d523574dc202203ddfd7060815007530c958601f58561b3ee8211d43a848b8b11e1bd06930079d 3044022024e318e77572daa80523021856e26d410e68a866807788d2c5dbaaee619f1da00220457f41f3aa85b174fdbe943720bf6686cb8293ca3837fa41976743cad0e5f544 3046022100c5a1107cd713d19663e575e293b7ed17a0f91763f1f40c9a8e2e4ef71dfe43ff022100c227e2445429168522f7680e8d4c47dc2cfef6a519817b77d06cb2e6c0d47fd3 304402206a8b9abdbad9351c3149ecea88b0acba6470ce3bc153e885162e88ac67fe24ec02203f15280b0bda048b7ffd79def65b505973b9d092c6a84cc439011616ab1da86a 3045022100bcf57d2627f60be8288e58159c529500cefed1037b0f1c0dd7b15396f5f1470a02203f8fe34b410d6d98feff77d303701ea4f1007c71311f5e92fad881a71a4b9b88 3046022100b9aeca54ec76e96f1e4650ade0bdf1cc97862b9eae5821b33fdc8abf2c3cbe70022100f426b8ed355c656cd943f36e2c8303697290a541ec6bd7b66f30f98b6d1565d4 3046022100c1f4f1c08c4df492cb96c8dd9344f2be7f64271887d3eea3f0bba29e3263c0a70221008fd4e6fdcf8d80cd1f16da34aaaa5708bb234eb1a1dd6d73d28b79eb4c286532 3046022100e42a4c04e8b64fdcbb48dfe59021716710ccf056767be435f7ad0f30de11841d022100c83340ed0e3237e85981913d6d30ac7ab0c5de4d7e2a739fdcc8483054682f7e 3046022100a7dba283c2a67cfdf8a07d87ebd9850b27cfcec66716117b21b1f33bd3417dfb022100e44fdd671d1874c4f82811a1d1aa6e4d6840d68ba215ebdb00b0acfb838524d3 30450220577cf8d48ccf1d3256f28013f638a2bf5a4e2630cb329beeb393b2260dbef692022100ed3eabff7e036a910b3fcbf417bfe33f58fbb543bb11599c4f7a3137af8bed38 3046022100c7dc25513de43eef2a021abc0f7381fa5d9da4dbba842c8f28957b81fd8de396022100ae03835259fb19463dfd433575f29abee27c3873f296cf4d3abee554cddf8e76 30440220238192fa635c6021e4a2440af22d065ad0cf7478b7167dc377771bf3d5031f3102207509ca8835edca63937245fad69c67120a03aff6705b5e0e35613b5c684e4489 3046022100fa29de9125f08c704bd502e1a7d939a0d1f2be63657448dcb5f70514d5061385022100f563ba4e25c912f85df771d5f1e1f913d9d354e7d02e32db69565c941626a5f9 3045022100ce7afdf03393d696768b3d765944e0efc351709dca00dcea7b2f67870d8a332602207414c81b5ad6f0f33ed39db0c7f1d75096d6ac3067dd5be8ba966b89ba9ee12d 304502203285853e687d8e1210cd2dd1f3fd3cfd8a988a19708cc349b526e1c74744dd96022100caf8706253902177f9fd286bac33150e434b8f7299f1be33276c79dea447c3fc 304502201b89de9673fbe71ba0c1780521bb58acfb7822fea5ba1239eee2c62b11cc5ce9022100b0004712c4e82090da20701e8ee36ca45d3ac87f7b1baaf88c66f0254edfbfc8 30440220471023e2eb7226ccb5eee9b41b6105e4659b3b24f5a85314d1be0173c725f363022074db43e913f7a224c4dd4264fd243e7e745c4286879fbd402f0bae45f2109547 3046022100d6c19fd3c27b79afa2654a7bebbcf7b6923cb1d8f4f5270833cc52d839985a33022100f38ee42c338739e4c8a7d6ac466630f779407220acb904012459b52673da2f52 304402206b8430455c470dd35196298e25862330c6ad56875c1ebbbdb9f4ddf943c4645202207569e020b2d5fa8dbd0cc0f2ccf47011417e1e9df3a0ed9537c72c92bc3855c5 3046022100a83c3d68666aa6df2dede7ddbe98a0b892ddd7f87897b4c01fadca6ceeef7ee402210087b9816e650cb0d1ec17d5c7eb84fd7328efcad76deefeb4b3184e2dbd15fa9b 304502205127b4f2b5b4ceacd99ce69a1f5961aa6e322bb48fe42a89e145a29ef5833c7c0221009a397367e715f5b85a2519a5a8cb43123983610ad1c06da7b20f857806398372 3046022100866628f4aa7038085464a53d6cc4ce41e5af97fa025943d5878991fc21e3feb90221009101298bb2ac6b4b5eacb600bc5c371fe99ce624decb69e75596c15bcae1ca7b 304502201cce7e258118839e7fb938ecd692ba8a89c2c5a4b6575737a60b4dd802072c97022100b93b3f9906068c42433ca092b2347607e8d11476b2e8c1edd56d84ebcfa52144 3045022100f5ae00389110cca0e62df9d059cd66bdc0de3009465fb3a8ff245345b2a7c99a02207e40109d66f22c41d6d63c1fe0133504edfec5fe1bdeb20bdf36174aeb59ee3f 30450220419717383407c7a3c8e194135fbdb84a65aa4fc7016754f3b7269426c117a1dc022100970b93911144f4794e78ebd015e04e229de0d7dc443d2a11a41e99af7df4284a 304402202001999c83b5349fc329133d3baa808f04c09e273d243a24688df6b911f56ba40220535c16e0a54ab25c57deb540b5a3b2c9a6addadc95d8485e0faa997be2f73dc5 3046022100baaaf05a7103e6be69cb08da0356712d22ddb6c4b6c66f4238a1f56a5435c2fc02210096955c57825afe917c73e7a4bddf4b4706adb9c14006e5bf79ce91d4d23c0425 304502202a0221aec21996e0b9fa7c8a81e0434ff384dbba6f6f4757170055d7184f8e6f022100c523d1737d4b1675d98a1070996674c45436c4e2865e965064fdb61b08871b33 3044022074246d7f37196d2fcd3abb17d8ae712ee831b3c3a84301e9208f77ab309dbc8d02203ffbfbfff5ff64a55bff8913bbcacee167de1f25ad1da9deffdab5d44c5f4a84 3044022016c438620457487ec9fbb000147de119414a01472fb5c43d69ca3e9a2e9aa3fa02206be1cd11b8d93875ecd7f6cb151126f36eb3d6a176eab6990ce0de67d6be73f6 304402203e14fc83d6622d05d2174d3125d3b8403d73f73dd74d89ae340ece047bc3fb2f022041da4fcdc39339c455de4fd3190f7ebb7cab0c0f40a8b0c46435357a913f6363 304502203cd33364df748b85fcbebc0bee3983ffd94505d4571f5426fb6861eaf6ae7b7f0221009eb4f1237dde9c1be875afd71dab7b8c98f59f350a3c96ccb9a2149ac2da9899 3045022100b684985318a1e293df8d3a27d576501fdbe16a583358fd035a502cfe83e101b6022003f345deff487e395cc3e4ebc52b97ad222768aacdc057aa9f38563530115f1f 3045022039fd74daff57a5d58b06bb1df35a4a977cfbd3223003844c41bbab86a9a527ac022100a60188c28dacf2ee3c228e2a3eca6c7f4e7e1826f99d5d6f34a116594ae62705 3044022008b957b39d746fe9cbd3fba4c7b10c5979a0ecf912df6421d0e42428ed5992d50220195c9e53286773ef3a37f3a3b12c6e791c9f4f65803392e20dc9415313d7a410 30450220750621fe5b8843b9e45ed77bec083adcaf16ee0cdc8d55c9e158374ab993c0fa022100822188e920e3e8c9ac1c78a60a79814dd9755230d11e5c0c13f9fd366c737552 304502203fe947987e611b39334576c36561d0f49ac1aa016e77b7ef66b44e5497f87de702210086e3fd6ea82c9aa3d50792237fd9c8c366ab00efdfcde8080cde7d82414144c6 304502202eb31d3f40258b36e07ca5eb2e5bc7964e56c0c2f8ae1c1c4c985cf80d162e9502210097c440d229d7efe967c9f67f62a0c7b506c135dddd1e90903c429156b983113a 3045022100b7c49bfaa7a1513eb6fcb2b47664f5f0a2fbac84f247f80c425d2fd4f9982a4a02204e6b40c1a3f5b8f16d16da1f14f561563f260b4704cd783d4e363c87d9329135 304402200394516384f0988e15447a8e6d5397ec068c1649500965ddbad1de92fbc87b8e02206eb4b4d89f8f0609a2b74eb12440c1fe2104b9ca2579783043acd017336d60dd 30440220393580f96a1b4ce24d5fa352baba8c5991828d9fc72e0b1473af06ec344a40e302206dd082a4a734e421be8b8ada9c478c86f68cb9ebccad2f9799c35e576ed81a1a 3046022100a66527a3c1e5ed76ade777b9ea47c9ad7928457908b804fa42d4ec5a7dc1afda022100ce89778434cc615f92094ee63db0c8d5bd5346e6cac27a1be20d332754769161 3045022100925a8948b1f8b77c8a02b06ff3ce0dcfca0f871c8355fb50db9425c171538f56022030a33e9b9b5b7c4aded4784a0b9ac3c95aff6f4c2bcf0b4ed1e1245383d8da6d 304402205cea8b7a9fba12b483709696e99cbb3fe617931ca615c0a3278e39247cf8f9ca02205c2a155759def352ea8f4bc692028fae67c9d21d7c6f76bd5ee6c133cac0bbe9 3045022062568f0558ff002c213ca07d2131c63ff96e7841a0143c64e8d15233f52a528a0221008538bdcc5cb820f6bd1689a1fc61377df04abaf59d8a36287dbb04f85dea6d70 3045022100d5f0e90e8422cb9bf3973a15636a2b49381bd2c17bec9527fb2559f04f4a9e85022065652a6e4e37e40a6828434663b5ebbae7061f0aa16b5c4540c351dc8bf417a0 3045022100b7e913d46bc196a771a66409cfb41d0c571ae9f756d43a2edd43dec4d77c7065022057d00e105500e283cb9c3bd88fb514b2d31419e4a04f129c2697f86499ee4983 304502201ba0e40570ac4beab2bb37dee049ef327acb0cca1734dd8d54476b55a1bf22d5022100a820e3c3c22aa0d0c837c9b7de45d7302c2e016178c9f75fd6866a34b5a7a590 3045022100bdf463252673fc2f5510f8d0eb65bda5b1d22f2c523c3d4f6f78d434e3337b250220036df7e331a343c33ca39dbe35f18ee96bec41303753813268b727886dca462a 3044022039ece113f1024d7c5f7768caed1ec358681db57d235d50b05beeb24617d0a98202206a24ad9dfd756c5b165029091786426a57439fa729a9d411ae171be50fe2d5d0 3046022100b1c226d08508c23d52b1083273f7e09d867a9bf518e92df171636310c55182d6022100a43c8ef2a6ba9259b7342bc684aaf6fda448d36f2007da95f55a62e7853952a7 3045022011a3bf6fed04d29dc070ee77e8dd83687b6d796310319ab3ff71539fac0c29cf022100fd10ec49042c2123645794e6b61ed937a497293e7ad9fe07e9b888f65395d914 3044022021b9cdb11a298203a2da7e05b5ec324ec678705c294561d03551cd884d16a17e02207d326ab4981da6180ada0b2c3992cac32b427479dfe98a67829b7a8eb6ae0e78 3045022100b5cf2d49d5923d5dee5dc462938663e2e4bc4f107b7db1530a8ccea0066a3ace022038e8e4926f2a32d7c2f57426dcb952b5a6db7fa6b35ce4861df9a1b2b716beeb 30460221008f8b4929717c05bd8e06ed6b5cefdc04d7c8f33d662cec438c67877e25885463022100c038a40abb467e7161dd70cc94fd8158ad665ee735d4f0d30f4953d601d1732b 3046022100991235578c12de365ffcfbd6074ee322ea68f88d3877255b5230495b5cda1197022100b9dc3275093b126aed81f17038213d26bc3c3d9ba5ee8e41c88630301ae173be 3045022100d1844992d2e8308c8d803d0a5fb4d8320b59ef09e14786202aa9e188e725ba750220267428a6b443c693b3b9afa6d5f55e72fa91bba60fd2b17f293f571c6c02e1d1 3045022100d9d4c28d1c70db1099cf8009451bcfc02e45490d71551bb092b8b8fde18a6c6902205b824fe258ffe693c4ce1ba5ca4abe076e8cb4f334dabe5c09c0d6b11fdb8f3f 3046022100a1e0bd9a895cd786e752e47016b4d16ae25abfb88f6f138b9ee8e9e7af49bbad022100ce7966af7e465b3274193275ef83f1bc0daa8251bf13a691840ada9902eb7b9f 3045022100ef99bc2485f7eef59568ef8e98203fd195515159c77dc67e60febb1deedb8a09022019b08bbb9b78b183f9e15bc8f16c50cfc17425fc5e1def42fbd3f063a970f208 3045022100ff18f51b4668e7d48c4ea32c32dea806a6a694797cbe577674035fdbe022d242022078a58ad18a8b9e2205904c4b5f43719b84893efd987c3532fbd7f88590601dec 304502203694e436018cd14a2e452171061172a5272dfcd2ef02b15a287b06b462691efd022100a3ebc866a1bcc24ca4cd7ba3197189787e728ab508fe30274dc6e2ece78b9de2 3046022100b4a56766c1bd941408c972ef85f8aab294e5d71443f313155a1ceda5c2c725be022100c1255205f49cfaf2842f19e00e37f68fb85d1f319b80fefc3d2c910a79891c23 3045022100b822b0bc0cbf35429787431b61ac851cfa048f6259a7af2b93dbdc19c1ab5195022070a271f44f86f367fb4039419a64cc4ff36338cf5a2ff746f93ce19d17ffa146 3044022060fca93e629d18836a106b63e485f2ee242669ded2a6bf966c19c9101ee814e8022012dce0d08309317fa10cddf3d4e50aca7ee25e55917246157a8cf9f74230d602 3045022100d7f006cd5a16720bc07ca04bb454161794323295e12f57a82227da8a9f8777c402203bf3577e2eb42141df1d565c83c1196c1858e6b435090bb243108f34bbaba118 304502205fae45a7fd30ab303b4eb8e2407680e718601d6f77ecea3d20fe99ab66036584022100e64204e3d4d03e540efbab80671317afb5cbba0c4fc859de5e7908d437313e76 3046022100c5c1aeaaa70d3f922be75d529cc2d5a42f6147e7b49cc9a590c7ae095fd70848022100a78381837993dcfdd43c74eacd4cf047c2567cfb8b9fac7f2852db2ec35a0145 3045022100c561a1d2d801434219afbede682d0a2942cd04a3aa668dfe2b04dfd9f41338020220301145b0f0ceb996af1805c048e92dff2ccce97e2b8112217d82ec3af54c62fe 3046022100ae53df655357327745b2de6a9426d893ed935f1d16a6188acced0fdc428fec79022100e44229832acec879c9f096a7767f3654a246aa906075c2233789e659be28d680 304502200c0398309472c70207e80fff8f65905471959c0dfc811f4dc18ac8b1c7176f23022100c087041d0bd32d25d537111f2244b794e199fe24b73a2075a4b815bafc1e44c5 304402200e9029c7e1bf28a80ad53760398f5d44b6f07f7f82743fd5a1880b1f73ab344f022071925d17a4d00e8e3468dd2b7ef653a1a9dd1c21cc3f945d74f011ef2cd3d92b 304502200f30c6aa71adcfcc6605e6e5dacd72ad922abfd7718c9eac74e6a314b1e6daab022100d32fa173fcb67317fef9c64333e4bfc5a0bfd4565c80995a4c635353d3f9f2e4 3046022100cc210d38593875251d004294eab51eadd8dc23c2339476fc941b92fc7adbfead022100ccb54567ff2a5acf437ad68706755708845f7d3b65d6ab57ab6a8eccf94029fa 304602210084116a8e289ad9a6a37fa1c9cb0e703a2bacf324bf0eba83a09cba7299501a76022100ff3999436b540c8b1dd742f81db1b0cc4c3699abce26e14e6dde4d888062b7e6 3046022100b21bb7a3be23c26973c3f93d8c57420a678d92892ee93a3538665d80e8b884bf022100d4c3cdfc1976fd2f1e6024441762f537c9145cd1b337d972fe3e9103289cfccd 304502205e9151ad1278581217ba7feba3a128af675efed6035b511b2c89e5291afd10190221009190de7113ab8ed55f545a52a6a0f09f90d856a73a9e78ed9fce5a9b12fccf0a 3044022005f3b5f7640f06c4b7bb8660cdb0f43a8d82da8dbfd17f5827ecd063a6a405bb022007dd40bf0553ecd019e11877bfa27d0012bd5105766b93efebc04ab90a1b02ac 3045022100b931eddb1c0c8c5328bf83b5a4d8f35b3456c12f64c694ac9d3ece7987df1b460220623d154cd5d3bb555ad7531cde790c0a5254f266a2690895b3cd81347566321d 3046022100a1e5a63f6a7f467a07309b7d6b327eae25dde6268f93ba2372c4153dcabca29d022100f5db440631906219d691aa7ca0942ba61a49094f819aa01851d1f6fb2ca13b71 304402203490a2b04e82bdf7acd34dd481d9f7d3ac9978289a5588c39b1069d6b8bd167f02203f862ff5b8aba066393336de1218634d1cd6001c7d423a5beab3c67affcac463 30450221009f5375b43462c86fe7d6a07a5c669f4f61381cdbaf66ebfe0f90c60ea4615dd00220446d4314664a445695f051747a43001034941801b7244ebec4dc40dda7cc71b0 3045022100b20c77bc1545276f7de2d7d4c09e3d8b0de1e3d5ba2f145526472bc66c763ab202200b88789696a1b81fda006f88c3db96504765d74992de5401cdd105580136bffb 30460221009a185568549114b4100d85dac422d17720df84a2c0392c7f4a5db8260b58d155022100e5e9ecbf1b94d21fd02ff1d49dc37b525871998bc7a23bb8696a885a08c865f3 3045022040eac960fd2a4186ccaa4ef78f47c5f8eddff363681ab26903eabe8930dd3d6c022100c8074279393dd8d238c878dc11118aee84352e9bfc2d89478e672bb36244449e 3044022100e28ad131d8b4a44f7c0031c2146f09958a6dc465cb4bd2b9a4e91b6614a22caa021f233bf95bf36b8d4a7718f2293db3b36fc1c3d6e80bdaf21dbafb03c7ab3d0f 304502202dfb4507cad478e1fa2592d3d9d1efdbfd4dfff5f3939f3d503b1982ecbed277022100cbbf2d7bb59ea19b1f3f0a4415be08d49382dfdf259d63b67e2ed6ab69b6ed96 304502202f202996c8d4e75e429232c4fb40f8af1edb04e911b19e06834d55306822a8c3022100b5bab406a3df2486463d49ea965adb2cf164bfa82b7c150bc345118046aa46d7 3045022048a3c4af8e48df1b04f1b0ba141dddfa72e4ea7621fbce1a44c7bd1a10943abf022100ad5dc5bedf60a883dbb93c72990bdb611391c62a42c0e3a6acd8ed90b32e2104 3046022100e4445a98025d98a24da9a324c601ad38f3e76395f3b88338329bed0e3341fede022100b5981aa015855a3d11be6af2fc9bf4b829f2ccf8d09d7731fae97a7c6b57fa3b 304402205b91fce510c825d330a6f80cf7544f2ab6b6d24fe876e7a52c15362bc788d599022035dcc30990a652b91f967f0f9db8ad40c7fca31e965f006241ec7724fd6d5d2f 3046022100c53e585294630c6c3d0bb898f048d20237cf03ddb4dc91987ff6120d7b442702022100fda0a9a49693db3a5c6815394c241445611a2613ee51a624b217bf2cf81b0598 3045022037d8283451c519d5048b16c5dedbb93120a6f4ae5bdb230b89500d77ed0fee7b0221009d34d3ee5586b6bb4c507848a5ed9303ceaa95b89f8a863f1795f436d31c619a 3046022100ac639634b8077d4be50f26f3991f00dfc8a05a48e7a353ff9576d06b00748e42022100b140f8fbbe910e7415267b01af40fcc606938574c7036a205b2a714778c46c44 304502201528c0bfe68ce1e213385305998352ff85a44067f06d3e48c91ad8449b8486440221008c4da8ad0b180a994cce94882ddead5e67c0a94b0591f2a09138fe9f067489fb 304402207a22d9e90272e838b6e687f9b2a39cd7c1517df0a972e9cc546a36de2c1bc5c7022007df5c8a1a64f40de3aedffde3e446fdb773fdb9d06929bb21b0ab6de6e1905e 3046022100aa17729237c31b406be8cd10ecc655ca161eec53fe8d0570dc49707244ccb641022100b86bdf7c290d700b7777ffde06864451aeff67b4f2f68a22d9104439e912c78f 304502203a00b958a98a2ea3d6d84d1028e0b7a94ef757a5b3e7445b758432b83779114a022100aad9af200c27e7715ac4027dc0cbad1f6a739084850bfa8afa1b53b5e2a77db6 304402203a74b48bb4d099e65da9e87f1328080de54fbfbeb33052052047a49c5826cae80220508b01f6671d1708b63428aeb983b7cd84fed8a626be6127261a41caec5ceda4 3045022073ecb7df3f732a6bbf77606aeb8384e104bbf0261ac6ac7e211a5d4e2d362d7c022100b29c42443209bdcddd7557491ae8db6d9f1ebac13207762f9ee1a7c4c5713628 3046022100de81313f2867b5a020a6c0034d64f8467057511ad8cda6794ea898d96b47b15d022100c865337a2a4c637d8b06f6f316c11d8421df8171980cb5212785a631b3422207 3045022067c907128d435a9f99e98e52721eb4eb241181e859568fe0ec8ad6dd59215973022100868c5964d278a404111cd7749f6d5feee23b2cbfde63c99a24ff4a25754a6a5a 3045022100876edce1b059521396bd95171761de19ccf85bae06245c299cbdfaa67b294763022031ac7112c1fd07315ae623f2c289f17ae7bb02314e9dc14697ab01e05911761a 304402202ab1ea5719d2947b5ea908daa88e8a9eda65a140f1e62a5da62b3321bcd72fa802200d49a056b2e1d10bf7a1ecccbb01215a571040df1b98d7f1cc1889cb71886745 304502200c37fa9b5c285a7c310f10a82d64607e611fd5b247d11fdf8652d05654a46d53022100869bc47ee61ebe1e152e197a70831eae842c704ae83cc84a5e52bda3f592f512 30460221009996aa6ef6fb747385b12a6e07125d5f44a027cc12ed20f85dfc45c478be2528022100daf16f9121fe2857052d5799b3ff346a14acbed6defc1823b88efea78a2bf296 30450220082ec07aaf047e117a6993395c6ae5c73b9c57307634d8f15dcdec5da1118045022100d3f9583fadb7df4c0a23fd69b20f257fff2c5c17ba0844b1b15af9333a08bad1 30450221008e3ed510d612c49f3de3e43240c466fde459d6f6ef008d06ec17cb5728ed960502204cf074d0ec6fbd9dd59f91a6f30334b4905ca794a01f164e7a685b1adae20c37 304402203496ec0c67aa02ff41b61978d3455481a8831775c7f09b8c1de987c5ac8373e002203954f8914bc704b4a73ee34a7ca6cb1dbe03516ff13576f5dd2a0b9f949590c2 304402204d798ac4d668e526297974bea4cf3b25c73aadd4ab2047a7cf5ebe71d04c4c64022015f638791aea66b154c8d742716911eef6052093b6adfe2c10a54092eccc19c6 304502206225ff56b9f0daf661a32d625f9c487f399b02c6ceb10f700f23d039cb9bfe00022100dcc193031517c4b614578555eeac195eab8c652f1381f8c21e6617a6bd087bb7 304602210080e0fe3dd30402495af9c064153bb3088bd338cdf2687c7e0146bc5e989fdbd8022100bb267c81eac3463972cc5fdbbd653562175b68d4762328e8ab0bed7e0373b013 3046022100e7bd43aef0a20440ad9797e218358203966a07b903c8e21fa37927e506b977ad02210093fd3679d391d7806954949db79915ad1452e0c0f0b7edf1d47f522b0fb41f8f 304402207e8f47001902980733978466af878416ada551565ca2a46f433c8b775df947fd022076eb28bf3e8461f79f26d939095333ec61ecc8bbcc3b8e0c335cf1864cbea3f9 30440220409c6b334749281b3122d1b529b55ee1f558106a5c4e834f4804715d3b5ebf84022010d20f0a171dc594a86ac2001af6fec7b47206322efdeec110abf70ff10da318 304502210083a629c84ff176a12cbcc0a4c276a639c92d6f7f789ae184991ec56dd5f5f0ff0220350bf40d8402d40ddaffdea978073034bb2c2945e937b5e12e39d0e63727aeb6 30460221009cac6d267a4dc49f32938773d9db38242e993ca6561cf28ae67ba343ce99436c022100c4f40361359e8f4699da4f0375b457ea537db866038dcc87a9c0ce6e8767d130 3045022003894357fce04cd193b9faa4e6db67a80e3f8dd9f533c1f6a1ed38b043480a52022100fedb4b133ccc2a4f61d191328fb42f9b50bf01da35ce26fc2c0b026ebc2156c7 304402207269ca8f4b8295e7d6dc56dee1ea8d2be1005aeaf74e2732c8c9da709a4db5bf022041a33bc2f7d651a332917bc1968e4ba1e7ad74b8aa3ea7fcd546462546b5337b 3045022100ffb3a538e9688ff1555c6b85e0bd1dc8d3edf7c5a84da27f677006b2069e8bff022022876c6308a27a4c5600fa1b13b3a9c57410a58adc3cd0c591a942c90974f810 3045022070c7b6b84418c4ccd129529dd03ad91adb758e6a0f9aca4c95ba317398f2d749022100bd70cdad88c7dd15713c0822d53df675a9dcfdd609af83e1e211bf354dbc55d9 304502202f425322ad2ce528291ef1946239cdb74fd33a3e76f53b4a69da550496efda0a022100ed435ab906daedfc2e7819f03889f560e535f4706eeda1b9b30bfc8ca1512f08 3045022100e3d4b61ec82577bc3764b002681031c6ef0661d53524e79a6de94f59c66fcd4702204e56a1cc5b1c8328b5ce4816c86a1d98fb9b3456259803cfe508e3eddf172868 304402204b239ef3188446607bfd84baccb804c1ba1e6a5669d66a9b4e6c980bca78e20b02204708aab17a754d47f1773a51699841bdd5ddb4e480f7e2094d145f15b5f3b8ea 3045022100ff1509a443d8dd1664bfd644abb488c8cca57f222fc3619f16c75461467b1734022071dc7b0546d1a2b3ba5cfb1d6c3178e70682774d27d0eff096aba6e63a672e8b 30450221009887514d81a09250a1938d88ec32e8ded320b4c47f63b4ea3f29b065932af0e70220442a6f1d3ccf5210821e39a2e16633711f40779dcb68c5513fb74f3ed43cb3d8 304602210099fb0d2a7121cdd1012b9be03052d59544b0750f918a5d4e7856e5c859db7cb8022100892e9ff7f31ff1c5ed83579541210640237b977c39b9b869198e8408ae372057 3044022048e1b70ea336252168fc7f2b1e782a604829a20a163fe392b99bdd77c1722914022079a1bad46db9084021c5f6d01c1e6c193ddc90e285f2969e37ec6711b853cb61 304502206734f246e033b72d58f98278c70089eb2584047bb4fe93ca36074249d70c26cd022100815f52850dd2fb47e8b109486f6e7af00b24df23b8dfdc29ecefde8265aca12b 3045022024c3e56862c0d8e9804aac4dc81cb45ede8b035f3ffccc538c7da5adff3c83e9022100d3089249f44b7e96117e950e138c961aef92e36806644eb5a5bf9fdce91ca866 3046022100d1d6e44b4a2f6210a76be517e31f2a6203c5053c748e5b553d0a2c7292a9a426022100c614c3456ca6d473a8d57cd30c26fc22a32c791abf1d7e7681731a41733047ab 30450220026f4912c7f8b5b46fdc6c3fa5df677c2b2ee9667f24d8c5a0348b270d95f6730221009a8f4ac7aede21fd7b68b05d6396e0d7fd76be3b348a48f70de6bf1d5c5e6129 304402205b33c7556f1d241fc6a1d578abe9ab7b8f99166ce5c84fd863c6c1311453c92f022017cebb24ba4e6b6a4c102727d37fb6be20f337fedba562d577445c1e81485fa8 3045022100d8da53b9729872321e171cfd3f34d719bc67b73b268f8ab586da424a0e536e60022074e281320e5b75ec953e7757a0bfeb9b5814a707ffb26e92378d7d01ef0b29b6 304402203e152fe2f1c942b30368e15779f26158780104d8a25b94aec30418281e26810102203d1cc01279d351c09ba5c13343168412ff46e3b94650d5544fa41b8002b2d1ca 3045022019ec84e95f06773ba74595a562d518eb962f55d8323a37d866e6c7eb53c3ef5f022100dfb545c4c65f7e528ac83a4b104293981a577d8f33b32f2c65c0ba0ec2a7a58b 3045022100a84043aff173247b378e8ddac361b225bb6ebc8191112fe5de1c4cd0cf25e86702202a85b21ba291adfe7ed184e5ca322323fb08c045a65d5336b57b0bd57e1dbd55 304502210094a4c3b0b15fe4ca85f88959f37f946ad9af1335e3f59ae22371e52d28ad870002203488ce362c9c3d0f0c895225cd6ab752714cc82e0a2b0b6506bf921e62ae091a 304502204a9fee732738d2ef114143e7de3987d05553fd063d45ea9e0b4b93fb441e06cf0221009adc212cf19c1acb3b4f7e05d07c63701d21d37dd27eff8c8c8e93e71fb58d5a 3045022100fcb7e0874baab630a97cc8bc47cd2c27ce7f34c220f7b604d609e7ee797f443e02202c92fca87de1639789183ed6ba89be16b69aebfd395942fabc51a4bd3cfebb6d 3046022100f1bf00efb007eca9327629d7293158268446f1978779aafcd0a3642b71c1ef22022100ce92e123ee04a14dfa90d026aefeaa12aade73663b3799ce1993278d38028cc3 30440220585122d86012c09e90f6326532814c1b9b1333b72ddbea5dbd884ca52159bb3102206feb17d0cbac76ce9700f3971ce85bd11c44cc6c6681b28cbbc6597425b8b43e 304502206c1b1a01384b03df483386244836bf588d6d22381e34f5c3a8e0b67f1e6defab022100ef96178cf5ee65c520cd672ba3f113a670bfab267f240bfcc6692258e7c9e77f 30450220478881fcc697ed9e8f7df80857f0e5bbcec9a611bc84bb54ff19498e52aef00f022100be34c1c12f7c3e7528e51f835915f5d8da141c21dd6cefbfb8a0925c1ebda9bb 3044022036cca1f34c3fb574f505e061de02b7040f4d42d421bfb8313ff50b51a0e45bb9022012ebc57359ad28f1d5481de705b79fa258ba3d96bfdcecf69f98464cfed807a4 304502204858624198924b644bd72f1e788ea5a9567c5b6850d7634d6003f5a1f982cc24022100d99c1219fe109ba281c7907c784f0aaa85bcad07253c8fae12d4dc18602ad03a 3046022100a586a4a205db77103799bc96b60926d0a50357b91e83fda2879ee1455f033bab022100b0ee6ef2ac10267d63eb09ba2912383d96932d14229ab4cab92fa3ee6b4430f9 3046022100ca3d7b81f4a03740e6b9be5d67cc6c8705006c2710db4ba4ff7e4cbb73e0fb9e02210084d454bbad04235e510d14fd67aeda670d10d3faefdcba5113b225b377ff11c8 304502202488938785c49063d1ae96a5e44c5d181a8703f595fbbf6b273022770eb58a53022100889b86db0ef3d588ef7802297d0736647c8cb458a264987250d03af5bc55806c 30460221009c8b54dc51617a7c4dcd570a9d5251b33099eef5ee5e5441c0dca4eb55047c2d022100c55bb5f98afe242ddc4f1c18ae0e20c0e75e8f47cf6bcdacc5f1c698be5e5979 304502203631445c44a8432dc14b3030c5475dd4e29df74567535b159678bac7db9f2928022100bf4a73141e4090b8f1b0e2e17ec249bf378b070f4d57720157da411ee063a35f 3046022100d4770cac37fa249aa19ede96d47ffe614998a614f3d62f06acd7096f323e703a022100fa9b95fee04893c7ae9c747aca5a75e32d16aed87ec8894b7f3c608eb60c433c 3046022100ebe0f5627a04f5ee553a745d35a8e5a4e4f0ca320c93e511f36f4a50b082064202210099c9fed2ce036e3bc89c7d628a5d0816cf65cb61a9ecc4efdcf9fa528abd0f51 3045022100f7ffca63952332f609668dfb88550f41b7a02cef1343324973034716160d0c5202204ca85d7e6d208d78cf982dab220965ac12ebb56ea585345578e24e90c9d21bff 30440220776880a967288d92b11256ecf4f2c83bd50e09c0b30b2771265a9b6da920e646022061634a681a8319ef10756a0b4a81f998c1fba628f08932a29972890f96610270 3044022015b46b330df17ced104170674bd576b3b28db97a2472ce534013c6f591173b1a02207efbaeae1a753e5c5421824b66ad758ce323d1076579c3a730ca49cb152e958a 3045022100a85c9cd8a5ec0186a610fd14985207b25c06b107afd65380bb3c40aafc576409022076cd7198d0ef29590bbb50295b572014f068eefeac1a2f515f104369f5bf0035 3046022100c3a6e74a476e2839509fbf87622704600e2ae77b60a1073bce6ad58c8863ded8022100e1d4820147f9292d5ca7d8facefe52ba1fa1edfa0c094b1f4e765d82b89b07c3 3045022100be04ea7b3129361a6abfddd8791e4b91993de6eef1ecfda32f902bcccc06ff0802207dc92a309432315665455e58c3da11b2d9fee4423b9d70124b388b866060f83d 304402203d5b4759f485afb84c9604d4561ad12adebd654b5241c43588cfe608c579b61802205bbf6280e0d84d2eca48e9437a708581ae212f28339af44fea436810b9bec80d 3045022013f25cf73660992f46d8efc5a9b64f94661e0e71d85e6482de2c948b4464586e022100b87002f32067435e19166e22d17efa014feac8e3ee90906edbcc428eeb820393 304402206b529d58e43ba3c8bf8fb8d4b2553f13e6b55b4c91ce560621e7a8a68ef80e3b02207bdc0acf144bca5c82a908da8c56439d6f799adc4852df63f2e6fe5c638b40b7 3046022100bdc2fe4437192c96004fd6e9ad5d69c001692ea618de9b29b93a714b9717fb15022100e4c98274eb63b4a698c10a7d3888f881f295e77c9658c245c5b05e285063ff02 30440220650a64e5a62bbcea96bcbd6067b2268437a122b3452ee8856825ab484db8823a02202232d1e1ad9f56c6081eebbf7e56868add118d4f1557a98736bcfa13512523b8 3045022100a50376c1d0b87313b046eaa41c7325acbf54fd31fd53ad16af3695e04d64fc84022003ed5dc2a542f73ea9ff6f7da8b7e8555d8c3c20fb0e5f826cf880b16a6ac41d 30450221009e77e9895be5697cc6003fdfa0cb7c9127affc23ee809010fe02f0b5e49bf8ae022057576f848a797acdd03f3f73d000efc1d67a8339bd38ee14b588c77af2225066 304502206d2bafb0acf46f99854d87801fcf02dd67af72dbbb43c47b90cfec0e48e24ec8022100eb463d035f6b2f2003ec21eda5e1cac34f46f04949fbd7c85d0e58bf48152f6b 3046022100ce98e546e8db4faa5a4917788ca614a153540db23c927e5ece132f2b16eceba8022100fb36872068da1cd6926ec1f1b8a8c784190eca337447646486dc59b311999752 304402204a80922dbc7be7886b724972ebe5f45d8d09e7a52b819b4261fa236fb22cbd7102207232e7272aac9af1538b85f23e13083c3fb70e01b09148b0251fb440cd0d5d79 3046022100d3f6b530f9a9299a2caf7d4433b789a2cfe53b73a5ed6d83132856596eee7eda022100e764ff755bf0c0873c6040b0de7336d8a47ac2b2bca63ef0b73adebad37144e3 3046022100d3fe421e4246dc60f6a9b572d8f92ba03c64e7084bb50c489aa8ff2243d070ed022100f5c4947b4957b5b307f9953cf60135fe1df69f1761159734b3f5b22e0f373773 304502200b81a4dd16a7edb2d818d7440fc05be3a8f3980b3c58bcbdbf1c11ff8df785fb022100fbadf56d6a94e82d1cff4853bf9a6f0954899ac7f8a1b264e7158d6775377fe6 3044022056d43c31320c28436eb91de16e9186886a7e1f4aad169074830a80490c78bf9d02205009608d66a101ed110b14a22601b6adc62b9f95f399caa7c22cc78ffd25103e 30440220302558bd5f1a6e2ac2f9c9632a0c6103083258cb032619f033886dab7972fc1c02203f3be761cfdfb3843ad3c4d4659b050865bd548ed37503d0fd346613e792e93e 3044022031116dc74bb140ffa506340a16cdbf711f31a1a98a41318478e366f2427fc95a022068908f63f1bd43bd9f29029b62fa7b7dbc78cc0115f0c9a667600a54dcb296ac 3046022100fc73e19c2f861f22032c413123199cf98b9b940a4bc7e5d5e4ef79d3b2e6586e022100d7ed4db304fcd99967d72ab84dfc76d9da2f80bd9a0c86207f058c2c90785fcb 30450221009b5f9ab389d163533830c332d6837e6e271387a4d32a0ff7dfda3b3845bb545302201bae5d7b77c0c72ca0f29b7ba712cb9ab1557d923a6c039ab3d40f84ac4f3e29 3045022100c8060926c2191caa4d0d5d1db9c29561b5c0df3340a7d2fde8b7bb4323431b8402201e1d6399a69cbf989810144b6404b67096b67b69e0491b2625e01f624eb922de 304402203a87e42833ecffc0d24f3a1abef52a8ae8c339b7798f4a729f338832227977e702203d91246f936dc00979c9b58239cc097fe77a53d8da2f480b928b292f7490e15c 304502201e3ee1b50dd6b16ed36ad6069fd8f05627c0d446fbbe00f5de5596f07d9321ea022100badb5dd7d4eb18250fcb63c9b2c7fd657255b8ea5d29c1ec3c942b15907155c7 3044022012c6fa1084a573273c62181a96d058564e6663d2f6c3de6e94b163a926271062022072bb5598981ab259a0bc34e8d6b9ebbb346b0a12e9023a4d2040ae352b41ad42 304502202ce379eda7fe665564555e8615e94687da2a5c6cd8ba152ed94337a4707aa7e00221008b58b37519f63d8f1deedb0c016ed0fb12dbb1ae0fd2eb28d74127a7f47a4de8 30440220789b83eb6355db66bbee1b9e7706bb1394ca316d4a1e942b243ab6c5c0455d0b022032a84562d2f07975fc41c892968db6bc1dbd248b156b1b1a1ee4d20e69c913c0 3045022100d55389997ff57436dd36640d597d0284a1447ee9ebaa714e957e23a0e168e41102205b017ee007ac2ba933f3753bfec14f3b95f18abbf7c0e437475ce19aa5933042 3045022100afa4fac51fcc662b2f4b12f1104bf8f57eaccb4913fa7683c2337e53b5b2ac4b022068ac3176818764fec00ed677802530ebd24c8330a5e235a9145771ccd95a6b85 30440220605a5c7a2136367288e1b7375b513143411edd913f14b6e7a801ebe483b112b3022053f66ff379fc91d8abce95a6bc2e84a52182da0da56f94949b0a717c43d95f7f 304602210087c9fbc94c7cfbfd1ecb6be53d17b3fa044e6f716e9a242a158dfcb217b9629c022100bd04aa38df60f14cee1c537718f13918623a31e18f74e3ecd9cfc70ed53c6677 304502200c4034e66e4eed930226ea4c2dec915ed4d625e2914f87bad5b9a2eae856ff55022100b917fc50128690430d9d2d18d171cb489c61798910279ec0f90a5fb6bd0207ac 3046022100efe42c6d30563558de10693efc24bbe1811931ea77294355a42d773df2576261022100b215e7e5b45f4432044a4514681db769b84f7990f9232dfb6350db4cf974bc07 3045022004fc2ded93a85a7e85e2908872d445988124995d69d5dc7555184b886ed0d712022100924f193b665ddd755ca8e99752793003b294f9d0e3cfc1a883dc74fe766d743f 3045022100ad1c2872c8bc3221380248d2e0cc275f086a478f5b08b150e6963d0a1aad85c502202a9c9fea7478d1efc0e10c514789a47a8199fb6a9387b045d97cfefacf6d3cef 30460221009eb596c459161dc8031b9cf2cf4e58afae4151cb54e9c8cc3647bfdf86048753022100e25b26b6448cd0d10df714a388a1ad8466512480fbdd75bd969e11827c7b948e 304502203865aad55decc7ac34ee0fd88c193926daafce7cda819a94fd7be0b9c1e8871c022100b5985117dd905082a5db9b8ee0d1562064da5442aec99fee26623ea8aeb5fde6 30460221009b85904761a9f86d49a3db2df5bfbfd7bde5e1228c963f45f4e35c852fb856b2022100f90ce1c302c27ed2a4c38484fec1ed88c313c64b4f80e71ea687f4fd38c185e3 3046022100c855f487fa9fbc40864485aa8f6bae98835a1a7484ab7811f96bca8b2274dbe9022100efe2f6f06341c2eda7e5dddb1357c3e573f390e4e5f9bf59bf7a839ca05dbf02 30440220147fed506afeb180950aca9c1ad6c04bb9188fa6f4b02571de75bea963b7337102205d6082fad0c4c1f2682d10e67f51bbb43d5a06f50f7f1dc6d7f8f704c1f496c5 3046022100c48181d10874d1f07addae34b41cd57bb61fcc9ad7623be2f3539f9385e70d9e022100966bba3ee5f96829b98222afc33090cc83dc77b761281192cf5ee8e998f26df3 304402201794e8334f066394525f9f4740da3e5343ce6d76e0adc86eaa15103965139bd30220263593b0019b297832218082aff14cbe60c47de71c74ffc4f92847cdba7b1ad2 3046022100ce1f97f9e0b58682fd910468603a81bd105d0c8c90ee2f96e639ecec7c667bf7022100b568f8bd2e9b2c269c4ad9eb69a132b53532e5b1b966f7cb95257e1cf9810bfd 304602210099ac56cf0965b04a799676b6116cb11e064244594e05e9d0d7deda30519d2d600221008f5edda20858da66caad4d4ec42a64fbf65d50123a5a66f09253991c80ec6900 304502210093b1d4d443ec52924bf06a01ea8b9b39f6094d4affe01f66017c7adc056b2fae02207b83fde37837531003202a3548465976af71ccd39e7b7dd7f33c34e50e2c86e2 30450220774421cc802d597cdc863425ba7cee27e6568df6ba50193a3a59a98e035eb78e02210093dffd215caaed7d476fe68e2007a8ba395820bf98414a3a6a84a929db8dc4f5 30450221009313862faf16deefa3e6f31cbb73e45e2f99724522bc1eee649d8affa3b57ce802205b9aa20aef935220adb1b9d6f634a16f43a30440c2f252cc885c600ea43ffccf 3046022100cde568263ee08a0bff1d927fec281d74b2819cd2ad7640144e90735d224755f8022100ba83226c707c56f7ee6c6af276b13c6be7b472f2867e72d3bf7104cb0239e553 3045022100ed0fb474845b406760d8ef24a35ce5a50e52ee67c141e5f6dee833c5e4cd680802204c19ec12069ab557ba81e27525b00ef152bb6a7feb3558c60d278d0de53861a7 304502203b4879e5012bb5ef0d807f2225e48133e0029ff1c0d5a0c90c4a96fd64ed0c7702210084bf083176ada6e6deca06cb0a3e6ad4fba6298596068aec5e24b647552b0a9d 30450220532c8ed9406127e09f4ff14325820da9943ef4fe149a2a7fd9f778baef0e92a6022100e2b45f821b2a3edfc09521f3d4c93a961ae60f2d5d201aa59d54a8f7df047745 304402202da93a132abd540e31cc616ef79117ec230e98195b25368c9582627454552ac1022029a781961230fe3a8023634e45c26b7b3c864f79419947bed0688fb2cc593c71 3045022100a08f94c3002d67dc6f4fe82740331b783ee4680c4d415945bc93af154774e5cf0220694fe080ef5db5f32aca6946e4665a09e96633dd87ccc60c7045fcd2a3e02265 3045022100ba3f788b500d499c9d8ff01e03da3d882af8975f49e394f6d6f7da0c4e8a7813022018eaa452f9c9ff816af0200d3273fb5f0487de3c405501afd97671a80f375474 3045022100b0da3a86bb0bc65fd3219d8c28b6368c3d3d986a9637c29d88f3ab9d8bc04a7c022050cf1f44901fccb735d0aa0cdd9580b24ffa7b9bd2a2b5b09fe24545157b562e 3045022100b025290c42d0170c634aadf4bd528986a7420f8961d9975eab6f36f55eef2808022038122bdbaaadbbc614a74b5611c7478892add52db4d51f30cc42a20fa3bf4eb2 3044022004444f3c1d6dcb663218d78b59ce421265ca953caf7261e193520b5329e6c710022079f7c281c6cedca62d3b6777077a9cea5a863f57407a1cdd4fa72dfb70630f72 30450221009e0fb1d3c1fa5b1800328254985ba4bb7ab93d8fdf256468622ca40b23877987022078b8555624043c0720d439f3b45edff1baac4215c85d0d8784e0a9b83fe729d3 304502200354288840c1a836efdc597029dea5a420ab5cbc5221c584e9352883ddf4a748022100a341727a9bd562e73f5ffe7fb4026267278a761cd1997fee2e0ddc1f12f3b735 304502204a0fdafb92d620572d9521cb1b6e686bb74ebb389a1128051f796f695b8afdb4022100fc0b20d5efb54c3c8dcb6f119dfcf5c4de5962b8cef191ac355566c115f1a373 3045022011b247aaaf4095eeb834e2a20db43030c5bd8acd512047780bf92eb22e671fcd022100df707b6e1244e47d62c952a4f3b719971ad651b6c3bbce9b24879b47cbe8d19f 3046022100b4ebbb9f01c39001e20303ee5ab982f01161191f61e74c722ba4b3fae7a631ed022100fa01c1a6fd7876b17f5e232613cdfba8b3962d4d153c393ec63d22a4e6e41c47 3044022030e7141bb4ab7ae62b7a2550a66527c0db4304ee714d009b880f12a7721b6b54022038ed4a6130f9ce2c7fb546359012b0350ae4eb162a6706f91b3e0a0bf25e5623 3045022100c64e377d43e8bfb7ce65f96b0d4d2597b4ce08108496bd95a38b985e98941a5b022031bc1f67f182accf55eec823381cc08cf5a72be953b7f57be2ee81c0ffaeebb4 304402202aa42969028b3ac974a9a44b6965984835e43306bfb6db5470de108a3db8f8a102200461b5eef15b8014dfc44a4d22ebb8e0c8c5d1d1d3b6a1757086f8b7a1a9eedd 30450221008dbf853dbe8aad88423c975f9fc876ca11ebbe145dbd11a37172d4fe20c86e0502206953c831d64701c8662a499746f3a1d0a50e6235691c07ebaeab32ce191e711b 304502201b463bd7c988b47bc845a2e01604ba51da0f95d183323003ae8c324560d563bf022100a6ea1293dc1e5ab84905c66754feda84ec4ff6c17078c1032f6204b7aa6511d0 3045022063f424e4f9f26b715ff9ba9c4ac91ed15f3fcd397d42b902c82bfe35bd0af60f022100e351b6f666c8a2d393983b767805baa71af6fc02ebf1aeadaf2ea04acbce2ee4 30450220015b2817b32c943871edf993f3c1ce89e1636ab344accbc30f425e44d3f93bf4022100990d48abbe413c4053a979bddbbe43dc7d73fa4540973988550ecbad8309b406 304502203447dddd159f319880e1f75143667c4446af3a72040a4dbb538624489981f8750221008ed243ab5947fff5b2d9d53c99af5425b95b5c08e77a82576ed8289bd5d94471 3045022100c0645ae319e1865680d10c65e9810087725e5ddae39bfb3af18fd437a3004cab02202cdf8c260977c1d3a7befa457d3167ba08da1272a0498cf14814c7e83973bc04 3044022019a93c88cba42886dd486e95acc378c1c335c10f86e1988fd005a62dd61345cc02200cd8be49e92e33c52c41679b34f34238714b8a4a04cec356bd939738bc741e27 3046022100981f1c70efd84e108f30db67193f620c8b5aa3d1115d54d062e95851f60b877c022100ac5c8eed40b94719bd68d136c6f4d370fc9ee4b4e15b13c1db924a7d9ec3641b 3046022100d749f3f3cbd4a43662c62ab71196e9b61eb1cb3535f21c27f8c24e5352f8e1e3022100bfc99e606b8a1e439b819e851a70fdbea265a98ba4161c0947770fd1c6fc471e 30460221009cbfcb1f5413ad62e3c946c8e2260cfbaacfefb2d3d9025dd83db744dfd81173022100e4c2980d7eb837956f392f13a6e9e19bfb5f68811e9ae25a522b0a38cf2acc18 3045022100f2bb1fc3653f1e91b17353d200f1984c6b15c6ddf9682584da0ba47d33d17cb90220773e37b6b8f58c0690e0ad63ad04e034d7299dd5ab1b4d5f8dd826d8d8f967a5 304502205535b2094d0e56993757358bcc8f5c587ca66d98bd2069762d43ae69dd8a154f022100b25c058f9e798b236669d1b26feab9efd5c4d344a2e9dc3b1a0998b21bda7b85 3045022100c53c875f7b2712aa74253c9de066b95540e9fae72a3a54293e1de7cad48342a402201700bca6d5f1a6339b5f56934110c3ab2d561c1377b6efb4efcde94a065e75c4 3046022100b174b673472a523c8f04a6a9e81f73c0be865bcd5cd3adc4c529cd6716ba18ba022100aa91106844562b3ed4650284477e97b3f0fb4b14fe5aedb24ce462c480fd181a 30450220726b8c31bbc8c415bb6758d81a903fd2d3109f3599c76955239db6a2b7a71af0022100c32402a9f139d04d885246c6d5a85ba42e1ad0830ba44fc05bcf73879c383b9c 304502200415a5afcea74b05f646da016474ff76ab1cf9159b4a79fb53e7e751bed68469022100ec9c620a2e47067d39721f7d6c6a73da138c5253574aff4fe3af05259fcf5353 3045022045bced88330332f830cd692d71c2ea54131ad926b1b808f3bb2dcdba9a68dbd8022100f7257eb68daa6736bacc855e6fe160acfe5568f0bb529e834ac893fdccb0fbe4 3046022100f9f72a20db34cddaa978136bf8fb36ced898c89d5886050efa882d0e77b4387a022100f066389fa2e344d893cd1cd2d9fc06cc783b555c5da0064815c3b9953758e337 304402201ad1f5efc2f81745ed12351d7f9bd68f1fee97f223f6d289285bb10aed3bc038022032f2ef7c9600581eaed12c6455b483921a361ab91dd5487888550b884ad8c212 3046022100f6c13c6505b04cb452e9dd9e22b22cd87800bf16e9d339183c81632c530b62fb0221009d6bc7e97e434f6b4b6b359a44cd958000b755b878f11feeaf294d1bfc9c5566 3046022100c950a5e59d14421eff606ae24a5b24fa4621806a71c392b8ec964e67c36c5225022100d511c3866ae7a73b32731fc4e2fe82c98c03fbfc6e7dda29bb9f67e70bdf8527 30460221008a46f492d4b2af9e72f5ff50541e6db1cc83530f7abd2391252a459f3074325102210080b6216baa274ff6eaf67fc1c4f48c29dc001896d58d15e6f6bfef63c3f63f8e 30440220396de48b631aabf3bb0e8f6335b8f34eda0a056d257007b945057ec73f07a81802207004aff92abe6e5da7ce6b27b32d1f07ccbe9252e001d059ef6a73ee7f4bd78f 30450221008251c3c20ee6c8092b2c47bf562817f7f16f42fec78fd06aeaf775266a72967d022042a4bc9577d6e7a9ab2885eaffe2bf48f4d95d96a42e1d338ce607fb12cb43b8 3046022100e473e3bda572d76063de3bbd01883bb50d5d6a3b607d03890bbf1eeef365e335022100b898a136ddf2ebce17eed349e03150b459d2c9aaf0ff852f4869db2f9a6291f9 3045022070e445e7d23c440728517ebcdb497cf0f1170e08bb5c78e9ec13737ca70108dc022100ad2c972567fb88996e62b53e21847ba386167c3e7491e6de1c757b20eb079566 304502203a35b37408f907bd179100b7a43edd88f17d8d8f873946e34b10ab8e2ba2ca36022100ae1243033fc92301ac5967d00bda55cc35ad181c5d821deb14edd2da1c4dec52 3045022037283a4bf094d9149ef9efc44114725362274575e1f83613c6ffd8bfc8fb9e97022100967e74fb98a07fec124d613e9b9150bb18ae338a09b27c5397015938fab58522 30440220212f03b4d7f827c154bc63b3ca30cfc3bb30d00f03461b48e8143b9169de059b02200991e6c7c13441ba83178b9f74dcf78423350e9bf82f633788f690a0ed837642 30440220792a3ff0d197629b088e7df84bc9bd80d0cecf7a8bebba7440d6ae42e39b676e02202d2ba8d79bd18c892c040361935f1c0423d80a27bc5526d045bad25745d1ddae 3046022100c2d6f7f41c8b84a60b064ec95860188b4bc68fb5c18c762f6a7cd73aaa8c3de4022100e83c1e447dfb82cc9e4386050067cdcbd3a13fe1c93813cb1fa526d317f96f5a 304402203be699b9d62e01f80f04ae7af1f262936f6139dc87960a8ce428a0898aad566102204d0445e8c39b62caa258fe90811d1a30498ab70c1d7478536e20d545ade0910a 3046022100d9e72e9c92b76be2b36c728e20c73b66ee62369e83486a70a159c16b68b2b0c8022100dea74009d3cfbd0602d9305a42af63ac244c2d27b1de351a55d3c6162d8f3ce1 304502202c89abe87199deae9425865d09cd963389fa449fc476d1ccfc6c37541ff4ac2b022100aed1b42e9aa352869587d6f09377662e8dc47f02ef3d6db09eb33d63116054af 3045022100b1e4342e6c75da3284cfed154016efeef41304ba4176d1739ffd3c9bc93d52a202200cf4086debe32a5cb5aaf9f75356d3169c4d638ee0665cdd7abe9a924e7bcca0 3046022100a9138ec0be6a124d4dbf9a5c1000e70badd7aa7f73e20e4fd950c673a0929d64022100ac9f1c105ad14e3c1d2deaf88ce15b53ef595db12d3c0a7e44d974a2b3356a1f 3045022100e0b2d4f9690b544d780207429f451178578e55449e0c1f4da4b72b41d52c183402204c8097bad83b30b878ab50711798d57542c5b9cec7026d65b7503fa8217d8ec5 304502206ce644bd53b9f09878ebca1bb2fc73e2bbc80827ae75c339341700cb9ae6c963022100fafe85b0a4aaca04fa1b45a17c4f13471079de62ab9605fb8d78a3911516d570 30440220599a3a57b74ac95b507800847652647b9a67cc249b94a4c78f697a9e52a1b6470220018c768bd84a50be113ea531a9cae77fdb39dcb972fb9fa03ef3e1301205adad 3045022043ae4eaaaf886b05f8d6bbcda4d3b169a86d7c25a9523eec183f0be556d4e9310221008ea4da707b7a0be99641f06918d9f10b468d29db574878195f54ef76ff4bc05d 304402202ea1f359b285abddf69973b971ad1efff18ded48a4885da6fbc71de8f435c5fe02203020c9d0589b3fde3d11d451fc6a9dca87d20898c0e4edb7d410f2b79142ca1a 30450220617e43429b99433f2dcff834c121492264b84b4588100284136a12e379780ba5022100e30d4042fe49b8ed0c1f953d96a36ae49d81c3953e117cdec844e4cb7bd1a8fc 30460221008e771f6e371da222b42756686b975ddeb2ef52812b1566c463c9ac476d42a91a022100ebaeb16292846b16d657ddd8541b7bd0ea4129354b0b0e7374202a8c1a92de93 30450221008d8bf1d94439a17f97dd530408bcac4f5595b33e0a7c1003d3bec0ca22bae62802205bcd9ff9871ecff84257794b0d11cfcd35b4180204c45cec278a3c58c73b043d 30450221008bd9035b71db23af42c97b753986b6f68b4875b7217e3cdc61bf15ba12eb4d7102200277dcaa8732377d0ecda3d9a07ee8ba53e3f9ef3964444eead94422dea98fef 304502201912a4515c8c05b42c508d7463b2c16ab5bafc5394c49e8b47d6f93cdb25e3b5022100845cf183a8851ddf45abc01bd1406a42a30f580ae4bba156f5a4a3744765c054 3046022100986775ea43d1df347d82d6aa095cba95fe5b046627ad5dc80d01c9a4cfefdf5502210082473c94491e3fd001041d251f30efcf94c2f002db82128777a0ce5dc2167bdb 3046022100abc77fb47eb479f911ca0b920ef43b738b86afc4f08b1d7447743879804a13e0022100d81849b80c9a7eff3cc0133560b6acc20d0c21768d6e3894888b7b6e6bfec999 304402200e252d1219cfa2142ef468fdd76896d96ed1de6103dfdb57b57885b7c7682b910220728d953f6ab1aa76a2e5aa376688451279d1f44783cc3d9e604623ebcdfda726 3045022100956755a6df7a878282ff50716fcbc32ee06fc04308f87d1145a1c4fa30b43bda02207d38078c516d70202f12141176beb3d81f28f155458692463f499cf1cbe0006d 3045022100fdebd9af931676f49e4f5a3ae51c8b4c3f6e7bfe52789e9550cef5cd7b2046a402207e16b32038301d37e3f6a14b9c3f930b63131033c2fc7f864dd556c9c78c17c3 3046022100f4854b5f31bb096c16c53ae26b31574e383f7fd0faa70a0ae48c4cfcfff73d6d022100ef88c95d949bb896a239666ff4d5f4c78ebd9d940523ca553dbd10b0e6c9a8d1 3046022100d0ef006e5dc4054812df7d19809798ebe40ebb3513c1bbe79d6972a22979860f022100af120d54e0099edc76d147f09307a123c71b2aef527fe362731931a57280544f 3045022030042431c0e9bdfd8f8180a0ec499ab1afd946a68a2fef8b1e40951290f7f88c022100d3eea722cbe9db2879e4f308c7769752ab027c22007d6d5b0c2ac521746f057c 3045022046cabe00b7c02de11f9e26fbf4b2c6082c388db43c26fd4a554d6a1f1e6f3bab022100d94d67b2fb0a347d656abc314b2d48511fb30f7661347a462f6a3c7fa684f1c6 304502201b29f29fcddbe35d6ffdf9e2e1d09ba489b909be5189cca62139f6fbc8b686920221009bd203f6f88e79f189f659d4d69eb694a74c3261807c432ef2224edb8d95234f 304502205a865c670e874af849bfb2a4c5fec4ecdb0ab3a2dc5830536abcbf9a816fe8b9022100bae852b376b4d14e36d0fc622d03928500e3b8fceb5740c2b8419ae18f33e71a 3045022100dbe49ce67287af9dcc11b20274ce192c909c0f9dbba38d9c1802153cfb99ad9402203042e8d92c0040b678248cee9df35332a0782fbe563709bf77d1d89660c2c2c9 3046022100ae0d6697b886ab155b827e21f9cf777a9a334fc6da8e2906346ee0ad4380f507022100f76d3d9eed4b30d4674cf78ae4728c9ad75d9d2c5faa0e6bb896de84980e7d61 30450220016f3bd0e0e6d44e5426da000db017ad4dd99bebaf9214c35811e956805c4012022100c96774d6e682171c9869d3df65661cde87645e14a881c67154a0b296b186340e 30450221009ec9f78c981e9303c20128cf1226d4f459792df732e4ed0e02c805e3a193b89b0220678af3425845e9007643aec5d8245f32351175bc320d003f9c402a5f37846b21 3046022100994ed2d3375879799573e0f13a9cbe020f5d15904f53432a391470b6b8a0eaca022100d4b09c8da1fb2dbf58c9d9afb58e9ec98d92844d605168d41d5949b9bde75cff 304402206cef330d7d8d06a05294cd59e37328933faefd21f153fbaaa853a4166e529daf022027ebb6eac2f5060b99a8efd35702d85e9e478607286b72d03469c4e7c0834c41 3046022100c0e4db95f172c0d7dcbc16f1d5184451428c9189a4816d38885797184fbf17d8022100a9a1ca6f00457a53dfc0f2e6da6455d19271709b6f8837e13f4b452ae60300a1 304502202db60715af0d326e11747fd3599a893bc14e2b15ee300509e34461b1788bacd7022100fb36e74b2e8573bc08ebcc81b58fbc11aa33b8f64d49e7b9c0e8636c2e0936c1 3045022073a9e8557963e6d381b91f97e02275f3eb42739819a4927adf1f78b78b2d70dd02210087257122253dc949610b0510bf1aa179a107ab7a8fbfb97d08eec3af88297c2a 304502200b170db9e7d92401944b71c7eeb73c933ee84fbcc9a9e0802630e1c837354d33022100af35372404762b7f347fa1b16fd7d12abcdf38a6651604e548da0c817a5e1385 304402207e0a30ae26d7803ea20047d339886fadf761ae7724a26b5c58491410fee5af4502202483eeae5af96b77b12adc0ded9953d91bb3ff36e38f02f9c8750581c804bccc 3044022039126e352583a2c939dd086ddc129dceee884384e01ed7f30f4a7cd9ec36e98f022010a200998d27132d3f122c59e4eb1523122126fef26c725f30f8e5edd4419d65 3046022100ec1ca347b543823f8df796563ab218b63674dc6682bd2b41dd8d6b786bffbbb8022100cf2749c107b22dd99d65f56b4d5ff06cce756abf7577912d9e5a22a2b29849d6 3046022100ea3c4876e64b795b0280da89f1c5ea3b01d5ffffb170d3b658af64b03b9a1623022100b32b067492ee400c684df62abc03e650e6b89fd2e21c66fa95a514e0adb24711 30440220188de5def6463e148902c8be3fddd1f1bcf5840cf847476658e6eac41028bd13022062d3e8fed0739df1384a39bfb6e8e64e931ea909afccb5bcf7ae01136e4b5deb 304402206a3059461a67df54b771cc44f02377f0dde5a942c702e620be7768b03acbc65702205d9d50ef721cb52163316eed0074c447cf44af9496b5989bb0bc3e4abc0af923 3045022100daaee9a7748311a808ae94ab5da7438cf6ef8fcf69e9b776a7703bb63603e2ff022033eb0bff9368970bc043cce9a8f39053e448ca8592714f567668fe7cc9a727b1 30450220370d25f143bf17a1757c033b98486f0f7f8d6ae743b7ebb4565e235b3f8fbcca022100a52e4c94bd9c4a0900e8ef2dadeb28f6487d47daee447e0c3dbecc95f61338ea 304402200a1d8dc9cee1d9426b735116d8f0b22a8465aece85332295e5dbe8b9f719350a022055a50cd6d0d13793f6e3cdaa00e5f80d581db608b72cbc8d82612a3d8b4e745f 3045022100bdd1a3c3a2cb12b46410cfcdc0c808e563c422099fe71b2330a660772e61b1b602200391f5486448f37c3439c62614a77292a05957a3bb28ead937c866934b9be589 3046022100d3e71b43e7961ce231f5b2ab67d5b741f5fbdcbcc9532fd925009b32b7bcb87c022100f646cfea3975387820e6fb89a770bd103ff393e504b63a59887193793c5734ca 3045022100ea8965605a6452f0f291c3f924826719e202ed76af5b2f3eb7df64b5a8e9cc8c022078825ca5b6804974374426705d7fced029244b82aae65862d45771523e056df5 304502206ae0fad7d1e60ba91f4a6ac7e4403b1f8dbedcafeb43372559e90fe1162f9a41022100af5b058f505e33631cf96a1d3518cb8926c17c0360931e2ed38c4402980ff8e2 3045022100d15af0257d9c19022fdfd26232bce14b46789851b3a358c640fb7b1070c4b5c202206b339f15e4664a152468731990812185c427136031e74ff19fc6bfffe8a69e95 304402204d9b8fd9faae88491bc8a18a6bc47e3df0c7f020ecd624024a47c87fd31ddb1402201d69f66617a4e00fdd2cecf6e4520f99dfe0c10f4e86aa96ecaf1d0e8152a736 3044022069de467d23cde7e2c6cfa1aeb56a8bc15b15d89d176b2547d014edf5375023b4022018daeb64e0d5e3177926cd7f253f0ef58a8bf840fbf155901024e79e765f4a5e 304402202c89aadd4d902d30cba48651d2035fc1d50c8c310cf995d153868343b3a9b742022061db90424279a3d337c4c7bb1550823915778b29059614cb7d751e6ad35024e1 3046022100d0a1b89127a31ff483b32b460eef047b445c468552cd3b46b3c4e51b5f0c17b50221009f1ab4d943420261b06fb12f48fc22edf15f12c3640c0b641f1fd2b228459006 30460221009a6e47aca8324d40f3f082649b097ddde519d259807c3c5e41935f9741010998022100a14c5a80b66f24deb338e0d6a3f4f083f216cc947e663cf08573039b1516edd1 3045022100c7c1673be736e97d4c1ddaa535dbab40080db45e3ee23405818b43ad8716c040022023603009c3df9362cbdf96aa26aa1e2293b2a87272abf584c5333fa53608c58e 3045022037fd1af1635c0fa02801ac4f5e3ec9357012bda994cd0244a1daae98cc5da69c022100a5835e0ac52d09163a4f5aa2baf001b88c4fac532431d0c32fe00506b539a667 3046022100e5b05e1f0f38afa7e69406aa9c20e7e048869e2e1225cef688f308b8ebfb18b6022100c8a747a39d54d30a392e5890232ba5f89cc8a518c6cbefb57506842cb0a54f3b 3046022100c6ec74dcc03465ac1b13439b3f5b5cc9ea53373ce91c38a1d1d6af4e44b175e60221008283cb10e65fe14d609a0d073dd3dc8f3355ce3db79ad1a723dfc8fb7509814f 30450221008e1b0290077f82c0da1a5793fc76ba44c74f4a71603bc43f0687b3f56b54df42022055256cf86563eb96fc55825acfd1ec2cd62fdffa853a616ec9664be741ad53b8 3045022100c2eea3e7ab8df2f3f5782006aba8272d78b449e37bf8f3e5de1ff6a0a8ad1d160220168136e6249b5d948bff3f68df8c8ae962312230a199cbd654e1a73c315d28cb 3045022100b55865c64e2a7c40f1df20ea81098d94c11e4b9e7d39d14eb199d9d99420660e02203cb488dc82fdbf2f40063e63fa4e35d37cec5e4b28754d3e9f5b7d32d5e3ce84 30450220352306f33575729507a9684a586636ae2f3942d205020a599ffbbd0f928286d5022100d8f42f40dfdd6a4410c5c928085b96d7f0e623b21b6cea079f0dc49e034c6879 304402206740e69477e2f811d6be343dafad53cfcbb1ced1f5696f701c17d99fdf595bb402201fa64fc093d81cc02b21e9d6f1612ee4abfd0d01e75ca343d12a899c3dac4846 30450220558feb1049a46858c3af6bf1bea321083fa6f58d7215af4b5c1a5bf79569165e02210089779aad142cbd9bdfd1ef34561a44fa3bc959ca46a56c33fcd68634ad1382b0 3045022100fded3e37843a0f05c529547da8a18596d7bba78d8dae6329b55e5f840a6c3f5a0220172ded69a6f32cc451018449ddb8c62d599cddbb423c3e677365d4f7937656dc 3044022035b8d671b6ad70b6bda00814809fc1b95fb0b8f041a8aaa7bfc0e277bebb9fd802207b4bfd8ec24bdaabe7a09220eacceab7cc1e6cca63c697c494b4ad2d6b02f4f3 3045022100ac9836d40dfc6345da3ed9dab34edfce5575fd5f8f9658827b366b7380a13849022000e7262b811ec63a9e18a0059eb4c8ac5b6a6b9ffada55b79ee9fcc5cfa93702 3044022048e89983427016706362ac9307a0eb9f6d592b51a7614ff7002e60960e7c52b102203a7fe0a51b4152f4e3b931eb5d0c0a029a0c4bf715fdf17de18b033211312616 3046022100c39dc3cc1ea18241640b38890bd1ac8453d6fd17a60cafb8528f7b544e152039022100e3279f32a9decb1993702fc289a78b5680f3dea3db655f2285be2c15274075ab 304502207fdf33298de2397a83d97228efbcb97750995a835b2bb834c09f3a44003c37c0022100fc9f7e0cc76fb55895187a1822a9c88227d9b547c08138332ba9775ed874011d 30440220026222e6a015e54264ae1a1fb00cc8365609f4b06358eb3056d298cae3d0375802202e80f1e4fd25826eb44036dd6297a09cb623a2cdde9ee6c454a9140b74869610 30440220726d6bf086f86560b1116845d3cc0e20f0d72acf1e165c406a825ebf424bdbe102207fb6a38fb3ad331045d8d4a924ddc84cd4d09d8862f649daf60382f5e2281142 3044022074069f5c5e503378c997a54025b0b99598fb33209308888cd65bcaf91b1fee3d02203edc34a0406c793596913cf9cf0a519dd2cc9091860f016ea2a6cdf60a8c307b 3045022100b2c0066c59bca4e1cf8f7f0e105478d83e66eeabc8a7d7e1d5482adbbae1bd0c0220420fbcb6a1fba7e7725a3196cb4f1efd8fad445a46599ea61a796895345bb968 3045022023c363033a98f148c67eaded32ffc99e21632b0fd471bec345a8d7e3296819eb02210090a8d4f8f3146d7764440858950c1b33aa85e08a6af7f043f26eea814c06c7b7 304602210098ec028ba37371655d9ff94bdcf8388699e822568d5c00e34c8590082175eb36022100ac137f3a2b7624b8b348cfa77e363477be758ce6bf9e8b127b6daf9353ca3969 3045022011e0cb4283109b0995a3fe0b2081f09ecd2918765cf29222c28ffe259d6e8757022100cec8f848e7e3a062c40e17d6d6f78359efe88558e015f5ff2254a60421172f27 304402207735244d8b3108b058735cfb17a941efe1ead5deb8f45bf37a463ef9e16781a202204d5bc7341e9adfce42c8b0df033a4469a90e3097a4688645ac7ad5c60093695c 3044022017f42c185064eb566c51513ac2c0125a4634c85c6fccaaa91839a1ca95a2959c02204d376adbab8510ccd4860d3818271e8915aa0aad8bbe81f0e3499d5de2054569 3045022100831e2d9eb03b5d52c7d34c4a21e940b3363e73aef292bd54e7c17f0e0300488a022030b0401ddbd971af12ba5f80d1c3e02c367250169d90bad93f0486b54e923114 304502200a7b6ea80508569960cf8858b4f6a7394716822cbeb4ff4694744bab4ea1d6dc022100a24457fde9685d506eb5202a4a4ab1c95e093710f0a49f879aa62ad86ec87063 3046022100f408a17de66bd600d6018a3824c79743e71eb0ad234ba67c5a48f1de69a65136022100d10d1d58230b873c972c0816d66672eecde1a9fc0e65e65b7189da9591e16241 30440220221d33e6a212f7541f4a1a4916414dc100f1a2446222cf3ef40de38f320e405c022012e36e01258c995c3b3cd31acfe61c46bd655b4674d6aa5a5a72189c553664d7 3045022100f8935025ffd365645eba08e689022b986ca892c9643f5475837eb74c61d0ae2902204314607006ff974a3de8fcef104655d09ba6deb82abda591b237a86ea066cf71 3045022100e6dc76140b51d2955ef4405507675413c09fb32c53f5c95f6b089cbc0757d39502206d81bbae556aa39baf57be5982615b8a3c64f8f62033d1b1c3e319f3d170ef1b 3043022066a5581922015d2c13828b424b297b444c5b7e3cb5a0e827e2e8517ea0b89914021f2a3141c74097b7837faad2b22106a59ec5fdd38d6d96319accaf5677ee9576 30450221008903bb8305585f0832d2efb128f6ed7d058fd7259d8ce2ee1fabfd8085bb724a02206ba6f3d54dd2d049bbf9279ae1547bd69537a91633428e5bf602c2530727380d 304502200de348e2d90741d32702be06e35455f2122a6291af9369171b9a498d6eceae2a0221008f51d659fd2f317ead7b6b0a3e8813b14aa119fd056dccd6547d80515d3f13b6 30450220774be79c5a91791f98d928b7502c868b3674173bf3c459588d0624d132eef0b4022100a0ed8cf147307ad6b59bb3b5c2527d34a38993f044b10ca6f53388e7b3144260 3044022000ef158473a5be94045ad2555390aee520adb97e28f0ca8a3e726ff44d08385502204a1fe8e38cfafb498598fe7c7e918dcf6c482b9d2c2be53226d3c67fa08fa01a 304502203ad1142a7a813ebf960714ce7c2662d85a8150c39c2ae07230e89332f8974df3022100827e7e7af4eebbd05d607cf64b0fa5eed72f1cb08531c9fe1b3a6ce658fedad7 304502201ae155abe4469b2473b9783c407c544f14ac7a0ce24b578b025ab864d7260a6a022100cd2a5dfbcc00b20f8fa758f16c8cf14b8b4c14a78b3ffc82aed4920b13dc1fc8 3045022100a0af803e7371b5692e23e7dfc637a403206839ca425afb4049424be372d1635b02201c2de8cae812a4d029c3422c32b1cd88e8b6523ade6679122c07a303c4f86cae 30440220507e96bc0fea133b4f5db41a1bd6275b1361fee472da0c1b0449a394e77b8ecf02203b4752490e79a089b40c27a61822314462c3c94a1f12c43d44ffc5b59deeccb4 304402204a3fbc619f561ece5a40c0286e4fd080653cf8760e2672886841e144e24cf0290220748d197a34808e770f5d0cb7f8ab360e4a9e23d202f5df4fa239775e3f960133 304502204a8b7d76b79dc8edc66ed16e2f749853c30c729fe2d359a8e68f99d45e382942022100b41c50f563461ff00d95d5c3bcb052bbbf68fe0bda8e5fd220433822a9231860 304502207d0c1aa0a068c3131aa13a2298b888d5697740bc2b410b0c83f15e620a12b353022100b628d2aaf91bf0af48f7825fb64a57783849d4ee0f265339ece0abb065dbe8c4 30450221009195cc9713a157cae05a11ac4f272f041eefcbef4c0aab046a5803dae5cafaf702202af80e727dc9dfaae07a4a23feb586904e5123b416951feacb51c4022435c21c 3046022100d1110ac293a54d6d03a728f37e2750847869ef0b06d58e7ed1c007d8cef4cd39022100a10a4ef4861481f6d42c2f33f8def288351a73dc6525a08816b4f6bad12c79b0 3045022100901df293adf740f08f0c23021998dd6684eed6e2dca8d836fa4bc74f4d1eede302204fc6339715851e342c521689fe1738d09ca69d4d97b16d77bae27e79bb7eab46 304402200eb2a8dc92ed93644b0c9c22e5c0cc424e33c609fb330fe0825b15ab8f40215102201010784f68551fac57024ddfdb571281fc10aab9b538b515cc65769087ffe406 30440220265a18add33bf34b17230055386d1ec8ff084e80aab2bd5dd036377fb85aea6b0220286aa70917dc28bc088dcf5da0f5190456a6dc51702a193300ad25b8dac5cae7 30440220151717c81f6fd50c9fb8dfc830b2c49a1393d5a5dd72d08d4ed8237f066596a7022078b7a6d9c8bbd66efb17772d74444c05501bdf55c5477bf3ed98b960055c9415 3046022100bbd12cae732ea87fd15a57ec5c08da4221e1d3c7f26bbad9428ee5c61cfe320b022100dd021be30e31b211c8d78dd79b2d7601d3baa0933159412dbf274fc7d79b470c 30440220033db6e533bcf314030be7d98005367c41634ae16182f1db86745da83ede7bed02202a8b3e094e9455857a5dc1cae4a45e0914ad0c94f8cfe4aade1c084a2ba867cf 3046022100dd98d3fbcb8f92ed7750a9a728e8c6b330743245ee04b5578bee8f3a4f1936aa022100868d81c17d9f0521fb7238da683b5298dfd4e9d14300b932f3450d800e5b2468 3045022100f8f09ab700bdaaf549d267eb368c2288afb7d958403ef1dc1ba2da1f02e5b0ba0220627b98fb35510b527e1ed7a3cbfb77a75841130dcc75de5e46daba826ce190bb 304402204908ad6d3a9c5d79d3e07af6839f271d72b607626192439d9da299c09c28becb022042d49394173a738f93fcdde0b5ae53a6445756161d4e398dec397efca1623969 304502200baad249eb933e42d5e64543adc952ccea89a7ba834cac9cf12cf2e72720698d022100d11ae83966e4b9d6bd6dcdd95584566bd250c95614a09dca4e05bfdbfb3a128f 3046022100bb64056412fd8648c9acc35c7b2694fa51ec081bb5c96f60bc615b925f3b8cb5022100d2ee0c81cd3d331ab2400dd5f4b9750ca3fa23431388aa93ee04e42b24386885 3045022100ae8e07366af2e1962cd8e928408d4f6333814aa8d00318b41c201591c61270f70220290eeb22c1c1aa35da07ef2381fdd69fd31fbdde69e01a5ac0f7db88ed2c7bd2 3045022100885025f0a6faf332e0aa7cd99db0f9ea8f7fb2be24da533723276b46d618fa8902200c5acbe4283f7afab2ad894f1fa0353478645f7bdc5f9fef692ac5c507947eaa 3045022100a1368426529883d545a7028d9ba82f375c52cfc57c3ad963c3364dcbdf508bd1022039d7d711b851c83e652ca6949e5a207a1345887ad548df8232321c696abeb4fe 304502210085748d37ca80542e113b75b74545703f41e7015633e4ab0f05f564732226946b022031ee7fcb50f01f5e389cdfec67e434b2bdfe4649da5928c2d6bb4f11908afbb8 3045022031e123f828ba3701817e4628cbf76d43e837efcea236b529b5801ed4b6beafa6022100d12f099471428c3fc2e2c117c598c4eb9785050de2553f0356ea2e4c41381433 30450220040e232e3f4330abd04bf77344445830f160ea148ab5a7ff0636c54ad427c09b022100aaa963ca2ba7bafb690c72c360b384b0243fea363934002796ba54c10cb8022f 3046022100c6a42aea74e14d5f271b1f4219d53eff342e821c4dc3097ed3debf2ec2dc4892022100fb8e000a4cf6d41668685c41b929c6652d40e6e385c997623227ad41dec3aac9 304402205eb09383b2ebf6fa860ca429a01e294c5a12afa328a570bb631f7c985bb6bbf4022025859910da840c87d21ad45129dd5a24a35ce5e66b44b87db4b53dd51be75b09 3046022100ba3dfb0f288c094deb93e2c2de7d341520648f6937aa419f8059ddf210575a7b022100c5ef3ae37b5440ca2bcd390b50538148b4cf536549945cf386b1cd979401f2b1 3044022056f862dadf2d742996c31e8d809e42da77e1759f6b638247b5ceac17e4d682a6022069a3ef9cfb5913fb81ad8615ad0ab61710d1e17f227fbc5dff181d8b53ad8680 3044022030dbbcc72aa5038361c149672f9dc27be4c2d4d0247087e8093b35b3b31d8957022034059cde9985b4ca1438dec719897819993da5e9c5be272d2df3419ad07b4742 3046022100efd23ce64633d1b29957a5c1884e3beb1887e896c887dbc1d43f8b5e7e145829022100f5c9f68e335a7be09a30a0105213c813696afd4da096305d5f5025e8dcb2312f 3046022100a0ff4e7212787cbc4d9db47e1fd27d1630dcc1e8ee549bf5ba6fa5f824c9570602210099ec14dbc52e4f23e7c890aca6b84b8ed418a4165663a696e53ebe6a1ffe7999 3046022100cfedd72cb748c096de3369648532188ecdbe01ec2566351704076556da930b47022100d8dcd6c7728743ef69617f3865e1d6cfb549d038443b88a2c24aa1b616c05c68 3044022060864433ac60bb8a623e880e4c7b5116894ac262f5c30bd35dd8b6e1773dee3d02200db899cbf9dee8184ab382421edb58fc6d7df7030b66522ad9ec58604479a472 30440220162a65686929750cccc9d9f5c74da2ed0445efde0e2cc08fdb298403f812285302202308e2cde9c86cd5f71555663d352970a5613e4fd6eabe0c324cc0c1c83546d5 304402203cd6e95801bb2f389569dc0c45dd48bc708a5423e2e914f123646034c5feadfa0220482671c96a5c86dc5add97403948026efdab528dc64c8199c296983bbd686a27 3046022100d82a3036a9482efeeb36c2656221a81bb5ef32a0784603b8fef758fa35fbf005022100bd776c3f7ffb53bddd8f21db6130440dba1f7221f9cbcb976302c85356befb9a 3045022100829a7d57cec0094252d716bf9c45afdaf494e3059de42ad3dae410a4dd0e19be022043dc4662d4b8f5701025fcc5b6fdfda13e69bf8a0bc9334ab0e4a873bd9df795 3045022100e2bc5c96c6d0c273d0f30ba7a56f0b20b6672220660264d14c96276d9f606ffb02201b9b4f31f3a9521f8b0ea3c9672d3967ce4db53a5c9b25fea00fc4331f4f8b0a 3046022100c29e8b9e81abef1e02db2cd4ff0674d0bd3d772774fec26be5f600ae459961370221008234e7c4c2f20eec199e6ac4acf6831d6dc649ff4a7477c48a5e394d7923c777 3045022071605abe48154c366943ea0b4f4d3970e13428ac37f8b4058857ecc63a11c2af0221009c183fd05b593754bee9b1354774d556e793ef9be3b33fb0d606c730eeb1b708 3046022100be5bea0a0237e61a700bb9d77c38ce0778e5847256b40898977cf859f75282fa022100dcd6112b56d6d775f1330b70eeee9ab852c8634570b22d23819972d2869086cd 304502201212a35bb3cb454ac54cbbac292223a150001b875311b0eb9703b0f3e519558e022100ca96db8e18a787b95467001293cafa7f7c4d40ccfc5080bec9945d155a2693ad 3046022100a095b5c31366094410e7efedf1e748dfa36b357f5f0d39026a9d7f949a91fbca022100e0301cc5d2776f6ae1d93cab06cebf82f3de2e007bdeb7cd0590944d4c1274a2 3046022100fde8f20ebe99a6bd36d3d752408276b0e8483fee94149199b16e1a734480df6b02210096ceebefe18d3a09014dba13bd844e8d199c640f6758a2afc3b1db5322bf8577 3045022100b3a3eafc028832676f8582cd8c29d1e96da2e3e5f66d34abf407da721eb96ed20220711979d5fb31f0f89157f675d95ab1c17aca56d94c6b2eec51bab2ea856c31d5 3046022100ffc04517b038f2e8661adfb05178fe754002f40aacd3ace93670a2d47c7c1e2b02210097cc570a317862a3a9fa0210467ed7c4d4fcae69d42a2b01edf1a391e31ef2a3 3045022064d5a0dd3c7c3db5b9eb6212d1936737071102dc4bfb3d6c64c2f619c8c198d4022100faa47993c8506a1aa1d3507a79986ce2454fdc413403c3cdebbf93155bb7cf9b 3045022100e75d28b9704aebf4c0d193fc2d48e6eecbafd54374ade3fc9033ccd2b66a2ea1022001bf0267ab1eba6cc433974dce2ade4da57e75389869cfdbb0874cb34721dafa 3045022100f972fb007765c7708c2476a26030237113b8e890c8ccd81f072f4edb2c48ab3d0220613f5871ea8691e67ecff49fd1580be4e25682c0b5a587f8262b1dd81b548478 3046022100ebc149eab6f827bfea305538f49ab293b7b64d21cc4049b8140dedca5d4594ea022100dccf52dbd7bf74b09c9fbd5567c1cdc60b2166682ad1153872e677f3e230c3f3 3044022026c06b2f8ba3a41d62ef406610a687c9ee0a245fac7b0287a2fa68b6b3149684022071f11f16e44ce778665c31ffeb2bc8dc5b5d98befc89c2af302c6b9f88fd4c29 304502206299b08f523b1d69fc20ac8a25a5c58a9f25b715631ddbf76a4bba06462b154b022100dc5eb6666914b746c2962b632d756de364262a92af9841eb8a52aea522ecc82e 304502200dbe93fe9f0ccdfbfa726d4caae3991256e59f27b7bd39bee38598e558051591022100fb1cf16044feb35396cd19bdc3380ec74bbdc9ad1dc9aa0bd54db58e01934f9b 304402204c5f1b91902af96642e7f36cc1f9734051890e7bafa27cd01a68c04ec79ce63f0220561ce4c95f168cffc839617f87125bce74caa99dc69149f464436bb90289ddba 30440220393fb823f39c48ea367b915d37477220add8e2f679886b151ae9f9eab555cb0502205ba7b5bc880cb12afc5351c7025a19dec284b9061bcc14f09263fdfdd7d2cd07 3045022100d4b3da94fa46f6b3c39490512c0d9b12fdf66cd713b028676c3ac246c3cca76f0220495ab0cf98a981e498e9b3d645bffd727f9af22a3bc1f8cf44478ce727973d6d 3045022100ab8ae67f67f5b7624fba6bfdb8ff9a8a31ae506f4b0c60fdcba82c88cf18c9c5022043e5422e0a9a93749ec0e73916e42fa9233980d9dfa100eed0e0c4214e713b71 3046022100c9c39a71008aa3397d7d0676a89ca9630fa6abb4a3fc12e2a3847d7178f8ee87022100a7808c4de0c069ca74d116e0f30cdf8d4e3cbb782010163acf69f689f4b8d477 304502206e6537952e74a047d2a7171fabbb31671daffe4c00f476571d88386168fc4a6d022100dce2e7e7b80dae52192ee5d2e2b59c10351e81cfca45bd6aabd67b2974c66657 30450221009bdd4ba2e362a5f2567af5d5a024d2fd30e9d135ce679bc0a2408a08a9328de30220279834b55440f751890d89ed3e578c95475d29aa67cd403d7e17b108d13bdca7 3046022100daa3a45f3d0e583a457a56e5d3bec2846db9cef8d33bc586c4657f78f2c5193b022100ed8f7ef29319f3789ece875dcbf40e548510fe0bdd96081ee64a5d12887e1841 30440220217cdca2879d317c9b3f09b31be0be93f0355c3cda373bd6af208b5c6a097433022058aace1bf840b02539c8323ba7b34bd2669c512ab59c5269139ea0425d5b67cf 304502210091f710097a9cb4cb1bbcbf1bc1ca3981d844f30bc81a215be617b78d866cdbbb02202fe831d5552cf25c478ea389e19d8bcccf1d87b9f4a92d8fadc2774b9cccd94e 3045022100fc362c3208880dfc3c70a790e878cac1d06c47c3bd46875db4d5e95774ddc05e022079c9d938929cb9270745a56b1833b9d1fca54c328500d97887ce42a9c7facca9 304502210087fabb065014e0a342b70e26fb8881079250510a4c7a18fa4a9c4b6e1dfc9dbd02207d9ba6e091a398eabdaae8127728aecaf4763ee759b598869896b617520b59e7 304502204ec32ecb0dc31127cd26f775c185ad6fcaccb11c0f3707d3cfc7af4b13854ac602210083fa8b36687b51130c6c7eb1b0fd76378a07c2370ad35302472dd506b47ff75e 3045022100f8dcfb7e9a51cd8d1e14d108e85278e7f3a554de435341c284e25dddfddc94db02201d1530c821c85a35ec4b7371a813dada7302c037d5d7f73f93c8e525bb3ffffa 304502203d92adab0535eea36eeecd4c6803cf52f4b9081286dfb1037c03c53e7f57e746022100ce9adec11f7bbb3be41622c03ef32fa4905f23fa8dbe8769e4436cb4401ccc2f 3044022021eb17cf7d0eb36ff885ecdc88f5042ceced65d39e4c97f53d12476e6b0cc86d02200be81169b8096e9a470cb007728e2cac8bd4fc6e79d6369c2d1bfd3e378f3c66 304402206e86b6326dea414c4f9769920ad11abd8700730132fbec371ff9ec9d94105ba502205e141b0f7dbbbc3e1ff847139790ddf04f23890ad839362a7176fa853dfa9a03 304602210091e7868466beb0dbebf4b1c211ecc6a00629488ba7d3a2c541e53db30bb09715022100f300b65d735fbd461e563b164cd63376d3f4844bde9117b35b2e048f980d1309 3046022100ed62660f3be2df3a21b1ba44af601afb21a6fe618286952e2727bd558dd3d4f8022100d23a8834023ba615eccd9886779e09c4cb5856fb8bc1f918c2f4c64a96374aef 3045022100be51d63d98c02bd29d2af9c91a702210e98c31c78754f96cbdc55650894acde002207868907d1679c6d5e021c07626f8302819e473ddbaeee30c11b8b531f28ab13e 30450220278c0990f24900c207dfc55af496c2a07769b30545b3050a095710f90bb9f7c502210096f72510996a292c3e1a6095fb51f6671dd382f3cbf5a9b6f3e0db79742f1df2 3045022100d2e8c81742787ea8771591a352fd71186e8aa3690afc9ef31271522df6a174ad022048cdf358a6dbf0f1b054f11698e54d6c8b32753be9df37e558571f1cbdd919ae 3044022028d11b64c9fbeac56de73f198d4198f0e620004b028f1c5b0ca5460fd6ccc2880220179cbc2f15bf60cee4be7669da151f8d609084578bc4b35ca88d97af9f45ac07 304502207f0c4a1356c95a310c3768eb038aecdc24cd51986022ba0f7b570226b9ae6c87022100f749a1c09ba2e087f200351d78622d1e68bbecd8e5ab5bfbf8e85db63049ea3a 3046022100f98e26c7a6b143956cfc96bc5eda94dbff29e40688736d1d4095186594f2c0ea022100baf2308c477529dbe47aa8b699366b0b46a235a343171e156c9110c588ab8907 304502206eea89b212df4526094da29daea944ba2c288cecb2d3001a791bf930e84b78740221009f2e5099ab551814d9e1ba5814a4259f797226315f492cf6ed2382b06abba9a8 304502203303069502f59baa5fd7258185a88baeefd438c8e5f721583068e02398833e14022100f7f5cc3bd53253aecd37aec92096963b6891b01e1e22e71be12671ce15d03190 304602210099251bdc6ed525b6d9659683ca27abd3ce612b209e65006745ef978d18c94909022100d2fbff9d9e98b4821d3aa8ea2ab97cdcf13dd0b4dea215becd0b2541aa10110f 3045022100de9d9640315989835adc8e9621c843cdf1fca894cc8324c99db37c21a8e85211022010da7e6ea8de74b603bab59dd5aa5c685058093e6a626e6c1c85e5d785bca9b2 30450220056e25d1c67402fce312ddd350868ed7729957a3b92b346f61ab179f856154ce022100a84498f280a4dfc66a5d4efcada1b1111a8590ca350235b3776adacd7180e680 30460221009c5e144fc32f820763804d53db1f7224a096f9589da4365564f3bd90b1a44cc3022100eb8b4bc530d91eaf656edfb2b60757cdda96b67ec7e34f0d357c0357f5e3b7a6 304502201b20e2952409795a4a1459e9632eb41ab0ab8767a0dcf1463be83953047ca709022100a853b430cf212913006fdbd20f3330d8aee0d36dd9c8446f681c867798a055ac 304402204ef5e0bd93b785e75976cf2adf9cd2c106181ff7a05af690660ea4d5a16086cd02200d4ceb6d4b94565f98f475d6fb466ff1d76e044c2f15a7cda16c5f8c7c782ee9 3044022020dfc45864ad8ef95a8ba54ccc95e4feb7cc44ea21abb7688c5406f1057ba269022075a86249600e19092a13a93ddbc934de015a4afd425af2db10dd4a7f3d14d2e0 3046022100ad5fbe986db21a859f45e03d98ca2df578a3a6b6976a0ce968914d13927c5a49022100dd8ee38b991e9bbc9b929444570ee281016b913ed7064ac16e7e31a856563f75 3044022075aa7d3a3689a195621261d578e85dcd3c49d92a688bf6a386b9733e1137665202206b4c43c90c6c8d974c174a504caee48cd17972bf7afd46e59f2dcc12a2ed660d 304402204bc5d70f695df196fa0bb9c34cf1f864348e73207d26037be36a5a47807909e5022014adc7b6f781a9619ea3598c5787927f020ac276edebd0226c5e9f7a7251f9a3 3045022100ac6fbae1756292376c5b2aae9ac0e7134b7be79f7586630c2abc920d4b8741be02207c8d911c3951e95fb54dd2b4dfde9d81da5e81260ba66480adcede77b2d62d87 304402204403bf68350b1f6413539cb7924de992dd8bec2849ddc69d405610c0e654045e022034b59994463648c165e5eee9158c0d6b598fb3071a32fda8111760b59689e08e 3044022043e6d9e183141344adf8ec7146a9a7939bab1dbf3769c052d6f08ca78e51c1af02203803dddd7011ba29ae6481fcb05ea15713e0084c1b88b1f7b407ebb412ef2356 3045022043fbde190412a2c262a6438bfe56aa63cb0be569a122f296346e91c0590bad2e022100bdf3047802db8ec09d5aa233a22452024e04a0e2252c49c22cfecc3b0ad4dc60 3046022100a2aa216ba8fe5531b390edba00a740882fa16d00e19691454b96e059813ccf44022100a077138b9512d1b85ac1d903c49a14041cd78040dbfbf56a8995bd7935e71bac 3046022100861907f909c02a4e0dca0bbad13f9d7b742254efdff8b966348f8aa46853b54e022100de757695580c53459c5b7e01ebdb2375fc986930b39795bd97011045cb393a4f 304502202cd37eb84484947fed2afac0d96ab65ea44ea60de5eb82647028bb32d37404120221008900d64901f8852dceaf7811b0b3a9fff68e5e0e017977a32b88a3827b1443d7 3046022100b0a5a40ca543c64ef8b053abd5d4913b9872ac01ae2709015ba759624fa79ac9022100dada215c356feef2f5156c76588316f20c4bee5b95df602e49625e6d52955727 3045022007e5e1e8535f5266294c8fe2cadaed1776baa2d09517bb85be57f7784a4a7f63022100e0e76dbc7bc0ec50456eaa61e42fc0abe56edb66b27827b779c5a3bbdb02bebd 304402203c4745da5803b6732595af41627a0238522b730e2aae39b81a477fbeb359e3a8022044a75bebb328eee5aab591666dc1ad9d9e0e04659e6b6fab04a7f5def4330f26 304402207af7b79ab22c4df371b18a6e9aba34eb2476c35377fa014de2da5c9993653588022026b752ab69a66978b2addc13a4e9603a5669c6a66117acd188a5b5bea2913389 304502202148b1701616ff056a5bf90b02c59368b509295cd5b04f398d87dc803fb44b72022100a1713b5b18f52b328c9e9c6b5e8e1fe981aacb0140bf288d778b0039756713d4 30460221008512c72f81aee88c22f0ee14ba690aac75303a403adb4bf4b5f6bd91a0970f4f022100b9e0e3ff068eddf08dd242b8e63e081f2eb3d3c4dc00cfb02371af806a33370e 3046022100c0a0e142b81d60d2f73259df7673cba2c1fe272fd7c2dbb9f6fd28e08da69697022100b0ba8f30af04a628d6124e8fa6eb8ce148f4fb1925752d5354d8c037aff3397c 3045022100bc49050bd2f690b943cb8aa7a30b6af6f2630669e47a9bfce82394a4789a4cf902200cabdb507100cc2221af493962b4d2215fea345886a3fc849ccf8c2cc1b3db0e 3046022100d8f04462b44f8163d4a278dde3e6b016e408b959f9d07fca6e16926017d4a708022100b494cb972826b8658bfd5c54341de248258398e8e7e01cf9ce5b8b01626a49d3 30450220412b3ae780e726020552000c6e8f07eb5aa884d7953fbbc223f0ae3422797028022100ded624e01517b3d37d0f6394c03093e82fee74e6c8b534cc64e8175603b5a725 304502206668e41a4cb8fcbb0b8fecd4616fb8fb91aa6eeafbc8597eabbe7104b71766a6022100c102e38a20bfeed391fe6db605dce4517d4d5866c3d6ac248bac7c93ea2b41a7 3045022100f34cd876862bcefb88136a833ff5064f8266f77897239cc341f72a77fecca77b02201863933dabec3b8341253f7c4a9cb57f706f3af4e59ebeca4912ea204468161b 3045022018150f5e4fc359b2c22735efe44f5971bee6ccd6d3fb5aab4b2b7e8a76054697022100f2e09c9e3222933f8f32c3d2740d6353e017198123dd9860dd02d50f5c43a61e 3046022100dd3c02539fc643576b3993768fc54f217279daf94559b5fb7835722c525f158b022100aed29b4e991b9020eb367194a777e534f3d0fd3d091e0bbde2c348654fff99db 304502201da8981837ece574ca75f4f40076968a1765b3e4881ff176ffa9c6b9ae5e9b85022100f4df5f53e0514a06cfe8fc78ca4df2c529700a74ffe14c6b0aba9240282382b1 3045022100b881ad8d3c59ea68b0015330239ae618ec6a962dac5f933fec0110be2a29f46d02201dc0d1ad7cb611d3b46ecf8499b4466b646d14c86122157e9c07c92bba6ba13c 3045022100897011b974d8099a2ad25906d0a767240c6a2b54b0a4c55b1a3c52f96f0b2e3602205b99665c5fe694cc2dc338b7b9393df8b43b73d09bfc68019e3d5b6614090beb 3045022032572cc21087d3f643d99ed8eb91f07fa7577e6b459c5ef88bf574e01f322be7022100fe294af4211ba8323ea1a1d3d9ceee6707918bf4061600d6854baee45aa6ede3 304502204aa64b8958e15ed61921215a66a868604b136cc4806a91c9e0492407a689297b022100b67ac303c7e07fd8ee73263ba21a144fa9b20de2ec708c6cc2013a67c54243ee 304602210098ec018500ac7d74c9a3ceabf355119ca27cc9e960632b8a5adc19f95c545cba022100e891910d06ac6b172b4dabc8ce0a384ed88ceaa5d9c6c79e2dbdeef22f012fc3 304402204d31483d4563e025248b392db5fdd84a6321ddc1f8f8f462b4ad1ec3e65794b402201d2be35ade3804b06992e77c0e7a195861eeed65dbc258f74f4d1d52d86c84a7 304502202ba68f00dc5aa3d41f2ac9a57fa80ad59e7e840364725f792083274c9afab25b0221009a2ffaf7b6a7ba7547854e5f8a9e49ab96f610df483069028498a2ccf7d70687 304402203d5f41451e74af8895f087ec678aa23564b51d6e09bd723f5b7faeaeb60d8d84022020197bc4f84e7fc5b6ec3405ce97b8668ef15c98086acdb66bd0a7e1ef7a6da3 3045022100aef8e1d4840c063901ef5a20d05d8ca3d085686d871ee3a62f993bfaae049db6022049f4d977e136fc69d1f16ced59a3e4387c3ddb8f99beb5d18e51bc49d89ab976 3045022100fc2341945901368a3634cdcaa2727bbaaa42ab87b86c4dc866715ca6c2fbd6c1022070e455399c2da3596147b0b8d511d4becf4fe3c4fb941084b4f55b39e0639ab3 30450220542d5d7a18b706dca9ac5552cefd2dd1ebc96d8223341de9e34714aa12416043022100f9f12bea8a606161a5b9a68616031da2e371118eb997edf1717a9dcf341abbf0 3045022076ddc1d956b19e6bca283fb97833a3319ee8622779d41a66eff53cc69f28da6e022100c83f9bef4bc62d2edc0a389f3efa33789ff23ee9c5631532420166006ea8f54b 3046022100bc8669d0148ac201449ef78b47981f7612d8930bd621dea0f69357e5c26729bc022100f75df1b5725db25d5c4a8b6328649224220aa5c5a0276f825c8a30a60f0eb746 3045022100debd0c52efc5607515700be70bc796f4846b527786f3fb1de4f853b656873707022028bed35c44bf9042c989b6ac75b1880b0b5d7b636b9749cb3b30b5e355ef3012 3045022100f67d5095ca546d58274d4bca8230a6a1d45b653e8a27ff2da7433430df788838022044645f14fe42b50b5065a19e56357275e66e6671f3a3906e103616514ff44466 3045022030a1e88452d93e0e86ec144162f1b6e7a279a121f45a841751b05c2c00ce08a4022100c132d5f82a0f68fc2bd35d3ccd7d3a65ebef8368b66d72b05062b10937a18714 30440220263b6e766d3268a783b6521b09376dfaf271efe21e100aafd50c0d4d406dceda02202ec99273bcf58c4d80ec9acf1aef9ca751cff6b35e22505de94b289d4f6b8fc9 30450220508bdf54bd73be55dfd01c2531d6eded56c8cc5d2af9510328dd2dba859e3cda022100d5714c155342048d66257ee091b0e070d1b81d2916784941b5a83a4a1a56c73e 30450221009857b33fe79fa331ea8b82c4855c6ed6f3da2a13c1d36e3bfef85272184fcfc702204ff1e9f74fe8ec26eed0173aa60cf7a0acedfb6a296056aefd986f21cd00a40c 3046022100c750f24bf5d6746f73dd31b8149e6283c607cc3e96d869c3714c08f068100660022100c28fdc4e13e1c2c323fe40309fd33094212f3677bd82e1b0b95b462d610fb446 30440220333c979d03d093b6202b442c100e9dfdbfef30a943e4b5438af622c1976e3a290220123fd7661026050f06034fb3b14e081f5701f56093cdbcde15c5b07b12956cae 304502207a9ba5b5aa3a0f8dae584e92654bfdfff61e6dc991a96c03f133b5a54ff7ff11022100de93d70ae04c1b98d97f45f1030f29d4f7411380a9d707c60ccbc9000b35742f 304402203cdef744a3df0b30f9bc0a293a03a34d52f24568bea391b2beacc4bfd33d41e702204f72af755c1f9ec2de86e31da7f77b92355e8b760e0fba75dfae4e492bfd3499 3046022100dbc75393b9305d5d94d1d627b154b528979e64b50c3bd8b1ea6999ed61c32268022100b9e09fd4e89b14e019770f0416e177f7fe0dad0f9e1c8ba66ee88c16687cacce 3044022057c671eae758a774503c86d135b605137cdd7af66b744cf358fcbf64f458f49602200a1224f84b7151e52080dd39bc1819ca99e2f713e305cdc0f350703169506522 3046022100ac7586ed0967532eda9428226d8dbd51f0b2350a7e60050804803d3836eeab99022100a6cb521acef38321d50a2ff12b5080b273d695c39ec5263b3a960d4d881eb931 3045022100c2607fb11f0b2e6918f2a0c045bf4aaffda11bcf5e2742dc6383f3ae706ef0fe0220383f1ddab98e55bfd6c98403266eb8c930533d5aac4ad04debf2d7f87b552658 3045022100a1f38af59d6ffebc51aa2c80cf7e0cd3240ec5844b02d29e72d5ec036bce6ad302205747cd0215e84e40204212ba69284b1ee64e112559a4f63de88ec150f6bcd356 30450221008239dcf6c3aca1c4ae125c72ac89514f0d07fe204a417077d204a02f492ddf970220178ed255ef21e47494871cc42273e36a3fa1c0948048fc45efefe98b68c1a706 3045022014e2b743552583690626b30568d6c5744777f0ad362930a3e17cdd0b457bbd8c022100a5bcb9f6a40252e5a70479003775012b547ec0b0f55678ffe9c7ebd9e7cb4819 304502207399434d668218300a561bc49bd720f6a0826ae7fc7039545f5837ff1d1b1b8a022100b9da853b6d364c076c50d5a9fc0ca9d8cb1dc6725590572436aa7b9cc13459ae 304502200a62900864049a2e75447b366be2ba67615ac02174d54e450431049e93ef8fd2022100aa3e81af836559f5e6f9aa709abff501868e40ee0d2927517efe0198aa16b085 30450221009f5dc0b20bf9745bcd01286333c60864ade8a41c41fbb165c03e5b32a9b683340220243c5723c92b0d8d6f201631a1bef1b57cd0d8c372b94c566c3549b94fa7e188 3046022100afa79f73ac41a83c3139eb1744e7b9642dab17dc1aa4193d26baa7458f8ac01802210087ddeb19ad4f14041af699bf5242a0d11bbd7c4ccbaca46eb464eb286f3b372d 3045022100ca1f532713f62e9acf065d1a088be6940ef9c9caf9d04e596e126ac9e0be9f8002204be5564500bb49d5904fead475e52c84c8b7609b46e1cc370f2990eaa168ce9c 304502204da5e68db0094749f81b6d64fe58f36a7662e3ee0b4f1c3d3b8078454bf5b796022100d69f14805dc29bd7de7bcb52a6ac40f448530786649df3b846a87010d993b52f 304502210089955bd37ccc7c85ac38e3ac33334ee6817b9844e773ab83f9cb6ef65d82d4bc02205edddcd4f877ec411f327295664c332b7329d338344b0136ed47bbc22a2b47aa 30450220240fcda0daa456268a6bf58e85f417abed45ce18f22cd85a0284197caaadcd54022100fb8b7d1d04457228c1a7b591002b75d3a081643dcc2a3e4148c809675de61371 3044022067b47cd630a2ab136e1115c90dc53f99aa543908347d04e8749ad07aca05a9b10220729360b5e205750fdbb0c4d0f8abb26975bf3bc08e3bbd096676dabef46557a6 3046022100f3f2d8d08ee1e504d14888074ae333a4741a1808904c7de9bfc70b44158ec2f4022100f8800b7dd112c05a81410dfe9c1ccf99c6e9098506c7398eab60b2404c849ede 3045022030658e06d42f167448df6cdded31124f9c17a64c210e5f5c9ca09df363a18737022100f1fd038dbf5e648c9d76e9d04a5315f279a7a1f164b14b9c3548eea0b0f5b9cb 30450220179f6123d1d7105315e948d56cfa21a987232ee68c985e46de1b55e0560a5b03022100bde1575af75b1fcc1386e4bf8a0240092cb04d8d12f64de944b726fe03e6ee28 3045022100dce8dd87ca20eda84f1f42ea166e31831794ae6f772ff58bfebda5670bfd8b800220376982cf4466e0e493a7dd663caf9c1d64451926da2815243b507fb03d097de4 3045022100de15f80bd859242e35b82d40e9c36aabdbbe668a72ef9f354b292e7e4aa48874022076a177ac24b49832422911618e99e7abe8b68eaa202476c75b89b5c9c2354e1a 304402200d5be0e79ad92e280a5b633ace9279e81aa7d11d335348fd258482e0d1d3e36502207432c706c7c0d925c6413457ab5568895141604fb43281d33a7c2e866d72c6fb 304402200918c00a58e3d6cc2df5a41e42b9d59d0aa82415315dbdd7b07fd3ea8267f0470220190b2f3c4402e3f4d417053039af7d7b6273ea92b600fab002a9ca5a30e0c833 3045022100927edbf12e5cba8c12cda7577e492cd949d715ce9823733bad382664d7bf6ed702206e7d36c0c7fe447d3682acebdd0f78a670c398137ae6812a6366154535667aee 3045022100911c842b82871a9a37ffcc21e85aabf7707bc2d4ba1bf94d6471450778fb35eb022072f1d2888fa612001dff4e04d1dba10c51da3d6b6f64fc3d4019b349b42cc83e 3045022100fa5b4b461f909a11fe0afb3572d120e5ad1d9a7fabd1d50ce7ab5b39681eaa670220761eb5d7dfa027da23ebeb0ce6c4bfe3fe5fcdfbc7b296edc0fc783a8bc24ec2 30460221008878d9c22d0e72e472bcd66fcc489ee28d0cbe0ecdb769dcec79ac007369901f022100fb177ede1c307007900c9c757ecaa566d68e84fda321719e43b611d285853d0f 304502205c76b43814d5e6a1f0f618587e5f70fa245b6f840f1eaed533e09de07e26cc47022100f48d91a6eec63a2c2b21043d1fc4225d3bf6b0fd7192c6ac04e7ca8e2eabd3e4 304502203517d7ee7bccae2ef99c7b283dd4dd3125c51bfbe9a30dc4ea1327d527de97fe0221008aec2045d5531db9decd21884d3c4d0711aa5ba40bbd4ed4f55246f0c14123b8 3045022062710bc1277717c15e5d8c274df5d576f3d2a2d2ae6af7680879cbf528624189022100f59b8f1e8de4862c4e5b54d51bfbafdcb8225007d3804812982b79b83c0e7173 304502205db64a225d5945c8046eae254febf960e18a6b4dcf31578eac3776616f22ff4a022100e370756dd02fd198e9b338fc16e3bf17a3164001ec15b4cfa38d7850d3ecb1cd 3044022055ed546a7e234aa5bd2aa6231f75f2e6cba3fe6be099be70b3cb9a22cc43311d0220285fefef05d74a9c9370e9f9bfe912a624afccc034f70f4468780cc198c1d432 30440220654b787ad539a3c1cd0cb5dd314c6f52f4269be80960e22ee1e4e4236d95deb402206beeaca53fd89678e834601dd74c64fba3a71e6d5477753fd40faff9a6531dd0 304502210092c6322504f35d44462234a4e9f0536fc22783850a42fbef757be260979a3ef802204b086fcc6c4677312c77ad9d3c11eff250b703b1fecec1d8bbd9df5936ea42ab 3045022100fe669938ea33750f66608d19be4119f88c4f330ac0d9b3eca4e17916261f987702203df46677e46d531520214034f2dfd770f474e2e7b1fcb087dc34ddbc59ca06aa 304502202bca655477763c33e24759cc96b0c2e9d385bdf99cdaafa9783e1347fd401915022100c58d8c4603f3654bc4af5ca075ccff082b7d477cc3bb9f97e5e8d045a4f6566f 30450220096a6a89b73d39600153c4c76002f2743a4aab34a1b870147fccccbbf13181ef022100fc7906748923ddb2dcd9d486757c6cf6832224bfa81f1fba3f7ec71c1dd68bc1 3045022041ad15a9845a655d6a0128f939b37f32f252962c90773d2143a3e0deeae820b1022100ae0bc75d3cdd55068818a386f2f0395bb187870b6c49945c0d4f720763808d2c 3044022018e0af568a5e99f22953da72db12ecc891c7443f3a088750260be4dd6ec65c3e0220329a0df22e202802fdbccfaa2d930ea8375ed667ad68027480e3715682e5687d 3046022100ea33d1631ac251e1f2f502b91601528aeca1140d9da29b098c996bfd67c25e620221008cefcaefc461e2ae036b5b0c6d445799b5a8f0b6fcadd82fd75ab08148cffc24 3046022100885ffcf53126c26a69e04e850d8bbbd0084f3b7223277d1374b5b4730a2baad0022100f6da022fc9a1ad620d16e91c67fd56b1ce4ceb936ba38e1b30f9ee20042c4298 30450220210e25d3a752813d714a20d16bce6ea99a4bf2daf744f76e91d0cb816b55d5bc0221008d51ca3d2bd6e312b693f942a11b873d5d83b480f02ff5b359c86a202868f9c6 304402207d2fe09e914723724622602d499a0426fd1d03a834151be824fcaea3277a2958022072aa33f27f999ff84bdcf326a833a0d3da66e420a8f43ab764adf95210ec24d2 304402203493aee1e5a9cdc50a36473b2b3e3b43f99069ded9f6ced72dc9ce18b01061e202205d4d46153e2a20fbd74bdbffec65551d316ad7a2b095a2ee7741e5db53d83381 3044022064a1dd154c0efc90d8f7e4d03c924a1f4ed5394911b4e828a9ef1ac35308bf4d0220085f8a293b1042b04876bc815c2cb355250707720b1ea1416776c9d8f4f4f34f 3046022100c0938b4437d8d30fc0f1f6b0654acfdd19791983fc66042033ba78d45d7b5343022100d955c18e3d4e61e03bc7832b28311955680f75a9a390e184450715832686cc14 3046022100da6551049c3d58a50b79ffd2ab1ad796484e9761da2ae47a52671304752f30390221009d2ba6f175c951c81771c7c21c9643098104976cf80fade8e66fa31f49a3e01a 3046022100d0a2ff3b8ea12d413ff65eda6464787583f73b815ab01ba54af11729f4a6989e022100d21078791d448fab37c42990c4d4e7e7560d6c08879e5ff8a613388f8bb73c9f 3045022100db5158c51bc06d9a99b959b0e72268cd9c4abd73b171e1e36caaada546f9180302201a27f4ef28db85b9bba61637f4fb2ef23b4361baf98d1b492568dcc3b80373ae 304402206727ca05f93f9d2e7dedd501b221ac279211e965f75227bd6fb7cca3a00059a7022025b538f9065655322188eb6480e788f668ec06ca5e7d3ce044e6392e659074eb 3046022100c8521b4453ba59b8c87c3da5c241237ea932d28e5f3b357fe3443ccfa2b5ab2b0221009a8a7acd00095b48166c4055410386ad880b0d6191c121967abef1401d0c982b 30440220609ecdd6ef16e7d7775fd6bf39eff8f7c45ae8f8d6a524b92fd1a8c144a8daa202202b321de6c075f5a72f44e805b0e0123801e8c3a2d803863ee40d3df35962f4f0 30450220364aed6a1a2a7037cce390f03c190aebe31b8696c9d51f6a2c32d33135d927cd022100f39c1c584d911335370a1c1c381e5ab7a3299a803f42524da0496d5b103345ae 304502210082103e01163ab035b5f2cc319337d6edfcb26390ae0accc2150997ccac42aec6022004b49a489d6c14e6ac686d83758025876a71813dbeb241cabc9697313caebc4b 304402202e423503fd6732e1d778bfac8d8b877c103920d98326ccf315db314e9b959fb902202e650ad9cff266ebe8c53597ed49c9ebbf77e81caf2a6fae576c99cbd96ac9b7 3044022034317bf6cda86da72eba1c4ad675c3ad244df103ce727e928880b91dd55ee1bd02203586743775e7de680e76d7dba25b6c7353cdc28da9e5cb5078db0bca328477c8 3045022100df9dc48b9cfa35579dd89c2577f9f5014f81e063fd437d588db0903042ed042a022079e04ba3bc379099d914b08f1805bea37e2da7c11c6e4a2a9812fdb0e19e13f2 304502200f5847c2165a66b92d101d9e44c2ab4850d911270d9fb370342515ba29a1da3b0221009013b49ef46046f41c0f9bf27c89f38b160c714e77cd6f84417d023a0bab35ce 304602210080d72545cb51e1a88cb28c9e57888ee9a863501e0221f6e94a59fac0f1a113ff022100ee700f6adfe5342053b97586cc0943732a90e3e2613f531ddba79e334eeea957 3045022100ec305c69a050d2e74f6c44061d4f1454c05c59da84282360a272a75c8d5f85ea022060b87c60a9e4750b8a96ffaa157a06d51c2b14d378547d9476178030e2e09713 30460221008ecfdbc0ccc91672e7bf6627c61eae5bced992bd9173e4a89bd9ea69edbefe9802210090bf930a1b0c1d008af3c9e7b12ddbe355820cdf8468c6bb0859d2819a9f41e9 3044022064269d0bb0b994b50a227c1da053ad382a2a06aba44d3ad6de005b3ca4671aa102202a4e9a3e4436957df6135f9692ab60ee620362fb7b7b5684a2259e927ea3c635 30440220406406bda6804f3d82f2358bdcb0e6559adfc46a55fa9797c8113b832aac2bd602203c0c29ac3acbfc320cffb65737689e9a6d2997ef1f81a1436a17a9eeda13e181 3046022100ca860b5554637acb25d2399b516daf7ec7d96e9b793013ad50a302aa897e3372022100efbbab0b27cc2bdc51bfb5dc659e943473c4b4a18edf25a9413b2de6f3443fcf 304502203e30a17e9d2d4b0980b8b633fe43574598a43b83a33f0f2332ac97f70413ba2b022100c05d4ccdf76f9b65b48bb91ee80e918c7e4d06139cb52f184c4a8d66bd02cba4 3044022012d3598e8476110f873f8a2a3641c4951a60115b5aa6b2721c655da8449f0bd102203ae010458e179e3a6c27ec96a8e0e71afea5f3f9cdca99a0829432d3ee25d515 3045022100b47b516605d99f3410dce1c925743c15908c16302d097a098eccf5eecb7657f60220438bd59b85ad405d0ea369a32005253eefde4b0cc45efff717f79dedb3bfe106 30450221009d1573c11030a979d45c8248708b8744b691a511fb56692d9627eade464855b302206268c6157700808916b93522586c3f0a7cfff470b07d03eb77f4d3b7e7cf3788 3046022100a9c0ba638bc61535cc2cb62b76c4d2a74bfbbadec40356592ec381ff4a1439d8022100db9c289c24bbaf43010a1ac644ee9c915a5a1c4918da62c959f49f5363e2c27d 3045022100da8e8ba20aabfff692a0acbfaffa9cf99ec2b3ac5481780682e66a61eeafdbc402203c46dc268e603eaaa0369f1e4512542ae3e93c7c59f5dde0406e5dca52dea5b8 3044022074724361db4badcd184ef7c90230030d06829da6635fbd5f62533789e68b868402201f282a2b5e343989cac50691eaebcfbd05a6cb48296bc402c2bf3caaf0125e66 3045022010b5eafbd29433d6bfa89779a0e90157016801a48c86a9f076203b4be0f5a5fc022100f146868aaa8ba19e155e672304214b1e17361f93db063fefed49592b36e75b1c 3045022100eff446713c10fd5a47194eb5f5eaf1161bb7179fede559620770f25e53e3b12302200a2ca8cdda490b7d9584ef3e4e7f7fb3e817cb8219f5040e1dc49443a628cad4 30440220157ac49cdf96d388e1f7daabd7914eaff41949bf75a40fb4927007ca147b38cd022036b08c5355a1c68d4c2562dcf0905ad1d2aac7fa99ad9afa5495ec0ba0209167 3045022018081ba47b1b22f647f034263e721a300074407f1d2c010f65ab27738d152faa022100977b279a49f53a663666c0b85073e0b290626e6ffc90de0de93542b41548717c 3043022035c7436d68f580a1f2430fc691bfeb7737de462544a6583e6b6286dbf19c51fa021f26652f08af069fa8d2af84ec1fd0a59b28bb87da0379b863d9e4f817f1e666 3044022049422de293c7947df06b87031196be44d492c185c9e45640f1695496cc6cf49502202199a0e9259dcb65d979a89ccb964d2a0168ba101982f23388933483fb35a96a 3044022047e4ba317f0710eca8e5968267c7dd04d47f6b98bc88328cff45a3686fbdbb0802200f6dfaba827fecfb20874ad99e22f41603260d51e1703c5842597c20118cbfc9 30460221009e11cbd0f4c116be613fbb5aaa76a424dbe74575fbf516d58cc01daffd08106702210091fc0690d1afca3308eaca36f70066c84ebe4637d0c7cce1f8068b83d41a0bf4 3046022100e063f0c8f7b57c8bc2e656f8f9124bbf167ee41f016bfd0e02979c4fd9b7d41b022100a954fa57181c79d322494ef318d3fc519100626c59af69c29581dac4e054fb43 304402207cebb1846c2cd143a0067ae20d966ac8fbc00d08690e8e551be6346658b0777b02202bdfbbce356ae1f32f9cae5d7be1a26df4d70f5e60829f385751cae7b37fd8e5 30460221008710e350fc8e99074898c87a967a5050f61601270c6d0d3aa4cf7298535f56d8022100ec6eabeb0104c9a1f072ace8155d8502842f2d4704e8541f235900139c5a0f49 3044022018a652d97d9d5341a28e043807b0906ae3ca5ef9b94541e87a73e416b0346cf4022038b5fb7982e45e6fb660a4a875f3e9350c0c26b17b9591adc7c19ae5f61719d3 3045022044a5e073a1be42d3860529ea3be40cbf06dc1e1ad6b91b0c456e48ecb5816e1b022100f99692d7f40ca58b795fd2ed545b6d591f514d8a75bad2c2e5501b4cf4c24ef0 304502210096a4f52c61b23be32d3cf4c47aebd44628a6a18febfd1bf79f2c03fa23968fe002201c62923fa18be3e218feb0706eaa1ad49ad2ae83ddede448f1d40320877fe013 304402201c8d2c729b5448f41deb26c7fb43b6469e17cfe4cb1e52cd867eb0f89690ef5a02204733ed0f790a5ff991340ac462c924e65726fa325303b96a1e90fe2e725d5e0a 3045022076b1d83d5b3a0325bd71c6499eaf5ac78e69ee84b1ec4ef34c06109d91ac7c1a022100ef9df25e64ac4c3df3a287cc7eec0b77641c00448d80054454b1cc9d0653d1bb 3046022100e3e99eba142bf5a12c6d4087275a53474b73ca738dfccb664b693a8ae7146b360221008608c5420ac00adac78e9fa1c23d97c60b368621ae0d0f9355a9370534e6a232 30450221008c1656508338a8ccb3818a39c46e70849329f27c1dae18e44bc0b3e554299472022012d7728348459d17c98d0df1e71f0f22acad14a159af896da136e6cbba1ca0a5 304602210087754e96e7635a84e70ef0187144d3bce5c3fbf0398e07e2be48176cca53fa8c022100c4d46e8ee048371eaaffaec9eeeab6abb0858493ef693340793635365c60f525 30460221008b5139d692c110855c01c48ba37b34702d9b02364174209c7bf9a063fef557860221009f0b0ed75c52f30c95ef3eb12161672b461526b28692effb94723a06c09d0f6e 3046022100a7355748236393a986d79277d3c6005ed928f00f751b8ae48c3aeffe4b780b40022100c40f3c68b2a9fcb13092236bc8211732e31a8cd950e48fd8b5b561d7431b136e 30450220355b45d5e207501657374621fe982aa089522826106377662180976d31846294022100d184fedb63657d9e8dccd0eeb49640e42f345d9275823a2284a1e323fb30e234 3045022100915da9d43bdae386f5af03f62ec2fb68870714db7d99cd73a648f0319de0b92202203a4868538b8099e6ae544e2e6b2b3375442369280bbc5fa39aeb226ac7b1611c 3046022100a9103590075f4a261ef267785a6a1c1b1ce0f4a3866914bb729d809cbe6e5b1a022100e2322edcb83db949642810ba3dc4b1f55a6bb700485426676dc19fcd5a306c40 304502200404aa421e8e05e5c127e122af50d428432e5e0c3583c21997703be68831a42d022100a8924539ad813fd0d73b9e3b598a78ecc9260fe48739228f74c6b2fed931ca2b 304502201fad50d622d5459ded545d7b030b41547cf03046c4651b6f0eb281a6e9a25da802210098ef2967c6264ae4a7e31e837b34087a2c90d42b74e9185b064fbae87548f1e4 304502203246894c4f233a6f9120b775e387da539357fb4e68d1c2cf8e3897718f021978022100ed65d265b77d812e9b94fe4dc72ea09e55b11506b5dee6ba18356e7f4cdd8362 30440220257b2bf2c2ace360a8bd98f75d3e08758c5b41ff7afa4757383b1b5835d1add50220609a86f7ac8f10e303399f867896f6a30ed00af1d69dc33595b6f38ce1d4c710 30450220691fa779609221b39313ac29731e96c9e708770067eb433f6a52bd2f71010f38022100e6df2658cb66db93cb5ea57a6741c7ff14875e98bdc4abb60c145d1fd3983246 3045022100a939461ac8650454c87576d6554aa26c47925bcb35a272c289a3788cd422fbdb02205dda3f2d68999970343dfcb849ca971680e2dbc9af0686535798c36a328059cd 304502201e9a88ff567733f1e59a120d10fff41b7dbaa527821d864786c6385aa605a86b022100a15bd406264ba3b69f02b47b6368071d4223a175cc656b8eefab30cf0679c344 304402204aa2ade28a18710b12fcb08884b5433884d80a9883343a993b3a3ff3962b69b702207f0aae3caba7655e8871cf9fe78fa398420a74f9742bf16523b80f18a0746d13 3045022100c6c07c431283c2977ce92aa68e42d36e1bba763cd3a761a3a040d93c874ac266022063472c2efc3e767e9aa34fdefacdeab179114804e3fd47745a86527e0ce0e800 3046022100fdfef9debb79246e68180b9e3be1f17c92a0b0dec2f0d67314a52850348292ec022100b7afc77178b8382ad8b35a43135483ffff33512ed0a0d2825463d7b9765f2aa6 304402201b2c3dfb99cc105f58ea756b24bb4c98cce2698d61b684bf161da1e733f1a879022068eef2ed9715200a7bf2ba2035f566be31f5cd9d0d4970928ec634ad518e69e3 30450220082739be4195472c6ccfc681eb45b6667d49d9ad45df30dd93fa9497beac3cbb022100a2ec3725a67af5bb14d084317dcbfc89ff130e496597c23f5783e7e3752c7be4 3045022058649399bacf2572219f0158546856d22bb51b8a28aa100fcdb216d7421bebfb022100f4d9c309b6cb6340b15dae5c790189ac79e84dead33d69a4e7ebdd70cb5608e0 3045022056fd7220c5e18ac225d21a5e29ae365534bff3d4cbd79625d5a5e23c057b92b802210087c86b92702bc08428394b93053ac8791c0ac25c33b58174cb9b2519c5cecb5a 3045022100a166a20a3cf3b92b9006daa999d5fc6ad407ef8faddd161f9fce275af57b0f0102202304f4d09e09acc083ff2e4259b6b21fa3b0944fed966b8d0dc29943b0239391 3046022100eb6c8155b711af0733ab28c6a858d7a91112307d571d0f64155705b282a73e6c022100a723a5aa28f2e96963af4023b5c609a827fd3219147c75c7a2abb5d7fe8c7395 3045022100d99d77fb72d2dccee3326e5f05d720f485bb422ce7ff89d7a1d2953199eab11c022012fa15266bba3a2ef54a18654fc55b3d6aa6db89f2299af2283e936421f78dd2 3046022100fb43c7ecedc445c8fa0fb38205e8677e45923970c17668b6be76bda3486d1be90221008b3d989a65dfc5dcfdc438ce55951352461cc86ae376d52a31b88a9e92ebdce9 304502200cc2a739af8418d97620f368a3a8c3daa67d47053e6c0bd53b5aab43ffc6ae7d022100dfd4a4ea4a5c58ab84234458e40c1d4d0c4b71323fac9f91d54b7b6c8f1a78c8 3045022100b2fb99158d3458a2bdb0e5f8d18d621762d9a1556e34049c926eeae4ec4abbb80220320b8ad80b7916af5dee133ca5d2ca99b92656d5638373b7ab39c6df8cf94a27 3046022100f5dc9183dfb6d1ef998827f68fa0840a4d9c692af51d7230490ee73031724843022100de4093e97c32c467940d003bf751bfd004db6d5a63e73f4963ae610fb13fc4ab 304502202b579fc9c6c7d41278ba9c4a1da95a8281e08cac5b27f93b776e575a68065c36022100b7f57affec01212eead8e2b995d8c89a73d79b8512252ab1b3419fe5ec59218c 3045022037f676cc37ef4b0f07b92bebdca00d7e549087853bdb6b2e2db9e48532d45f2c022100d16bb9e1e0787be26ae24c55cb1436c497b8fbcdc3361ac4adf69ed8ab95c3d4 30450220017d2c37e4a7cb644e4466580356ebcd7d0eb29aad406623875663d8c8f6bb8e022100b7a078651c7c4868f80f1e787572aa4b8fb5b9f9d84bba549371ddbb92a01417 3046022100836416be02de29f3136a8862d1602fdbeb626a89f4f6246b8c44dd41c1fc9cb2022100adb927f1d837891ba4a60eaba0b812f2d7faeb7a5307159c120f95eac22f96b8 3045022027cdcf4949c206f684ea4660c41dcc2fd72dfaf21bcf30e5a5aaba587898bbeb022100ceb9888f3db48d5049439c9812303643e5bffb10ff97e01dc20102231d218469 30450220427989642f31594d0317910e3dd3abf68f0aa350160f34c3e52b031d0e2a20c8022100e03852d0580468511388596944e6c1610bb2be0f2e4782ca08b2391f71a077c2 3046022100dc4f6449ec8135484fac1e48a79b3d1e24d0e56eb0631ce46ca52e7500e7900b022100e1d9d1cac79511fdb76838a71cddca17b2b58d6e81116968cf1f9669a508f928 3044022065896be201f92156e46fea56da79abcac2437ea0b3d1de4fc88bb347fa918610022040b2839dd3b2afc276a718b63647d06f91f9ffb284cebf21b2dca452a384850b 304602210097d4c072472881e43de045d2d1029a95896d49416e960ed8e7e8cecaacdfb772022100b1cc1c88a7cd6e1168b32d6a08341689965ed2fc82f556634ab28d12791f454b 30440220584bbce10b14ac0526b43899b11c7524bbb1bc0a5e8746509b972d3f2e32b8e50220352078d44963b3c0e66c08e4ad5a1ea7740d40c7bd743d33993c75162749fae8 30450220204fd28f8ffec805c376ab1ca16c6924d6d7b8b5a71d673010cbc87fcb302597022100817bca5a8347e495453f1e34c817a95e5d6d8e1a84929b925f70759ac052cec5 3045022100f6c506f3d816c970239917ff4fe0c0ec8204db7a1f22979b15c41bedecb630bd0220051b3e25c28dce62287f2012a0242dfafa796f108eea4e2dab6e65d207083ae9 3045022100fd99e9e5f6b4f90349078b7de6291e4f9cc20624ba0abf4f22f45eafd012c53602202605ac08b45ccf4032bf016491084a740f3b8fbdbfcf8bbf6c81f004eac569cf 3045022100e705fb2ca08c1595655a48b7da48faa552c73d758bbc0dd30d5d7e50898fed5002205c4d4d01b75d30cf304108ec49b905dd59e8f706e784e7416e989d55bf1d84b1 30450220298be9d470d258e4116e1aef2a49cb8d97eafe30d68b8ffc8e0effbaff909d1f0221009418a78a44f3ce79dfc2b41961c3085506732db9310b08f379f6a15e209cea28 304402203895d107fd811f8877ddf21870e82ecf1832f2a9eb97f5446fbb7e5484457c5402207560b1950e2dc4a7be3825cf65fbe26a3a72786804c7645e369f3d89acfc9e4d 304402201e106d33c788c56752b5119023dcf52c83ee57e6a41dec3042f6c6bd2446759702204542a76242516baf2c33bce1866a99a981936f2bfd84724fd9c31335e1499dcf 3046022100b6a36e65609edb8f890e6803058c8097588245c9bdad405d079aafb7cb36b705022100c2719a2d178b06dbad3fe53172a37c9beda3cf8bf0a24df64e3959b9f1bed5ba 304502210086f89b27cdb515717c1eb3daf2d68f3b357eb6eaf8f90499dd8037b2674a34a1022020b6fa78c3d37cfd2f361a5f819ed920a9874c8b030c51384f2522be59f2174a 30450220429d6a270162965dcaca2ed235f89ae13767445b9b5254208a0e4f56f01a54a6022100bbbab64230dd8bd4a01a38ee5ca524591345377faafb7e455f191073f2fd37c2 3046022100c3c6fa24c20c8b8528f933f95ecd67896c870a07fa814d192f877e54b9e32b13022100fde0e5d276500b766e55a96ebeb96f5a3d9213400d1b6b92960dffb7a45a5cf4 304502203abe2c996ac0b02c906b784a517997b5ffbecb4e15e8b9852b4d40ada5cdb28102210091440a0850168faf2dfe5a4b38ce98a6e5168f19bd2cdc8b0627f6f95836c49c 304502210088845d56793c51bf9e4ec862b701484c0f0df9add10ab4c0e7fe8e31bae36b8b02204318e7ad5a4c513b3990adf79f5f463956ac3b64bbfdd0ea0633c2bc49bfbddb 3046022100d2b39a5350dfb145e414b6aa6b5567cb8f70b671c81475b8d9bc80deb6646db1022100af85ffbd4f3face1e2d2496616569234dde4ce12b00288bd17e473ee4a07b0dd 304602210085453ba5425b70413084039737f635c88ce004d1e414ce51366c1bd1c4d8b8f10221008103c67eec9665ac7a299dad66a3a4ba53a1f2cbc570149e2bd2eee2390b2250 304602210093bf13a8abcfa9c6beccbb562a99dfac3a5d8aea7759723297743641e450bfbb022100bca8f95c12432155c47f7adff095b7ccce8e8a22717b2f733f102344c4a43b8d 304402204674433a811b566f42e58e524a10ee1f830e15e43ddebcdeb4147f5c551e7d8402204684ecf8c7940399725fe98bfd4d688b09ccff2ee4208806a1c2580a9a9e47a3 30450221009136253e1ada7efb2ab69633e94230cc145b5209f909e7c9f09d266fc748514c0220615daec6db20b68fd2715d6fd1a0c94dbb1ff7601c023e9314908581da5a3a45 304502204a305f1cc254641b6ebe04e7c097e2954ea4ba28e12f1dfa2c42f1d623c1f6f40221009c90b16cbea87fe836af7c3305aadd84ea6e6827967f242a0b338ffe1170671c 304502201143e24963e356548bbed41553918526975f8643e69d8ce4c2a9a7aa37a62af0022100b453a6aa044eb5018fd4732f6176440c0c399b9cf115899aa12866142183f670 304402204ab305cb0067034a8d3cd153d8cf74fde4b5aac078ae9d12e97c28e3f6385c4e02203d2c348e5269c61d8019e457f3ed5eb313d1bdaa953711cadf758619772b2fcf 3046022100e674987a5c230bb2a1b5e8e8baea63ff35dc12a549b0ff9daba6be307512fa7f0221008d44a05fd9f19f981b36ec5c4fc8e4fae23a91a122b9bd066e770585405f826e 304402200e471049a3998dfb6575930479b8fe6e7caccb1bdfc3aae014cc7bd5ed60dbe5022046097873f3c9966224b964b26c5db041ee1c09209666081548575c817f3909fe 3045022100d116bfcd587289ec8ccce6ccd62c50910feb903182aa3b8914e1e2b66f7ff1bd022018510f9db28f1c59b0731a772e25c1b0d0b0ac7c9fd7eacc2706401e39348d41 304502201d02b51aeb41675abb9b75fe61f50a681af9d02d9447c80f6e4452b100a8ae7c022100ed7a200c315e06f783a89cf1c7ae32c6bdedb8df4e2943951f1a0066feddf366 3046022100dee26088bb86b983fca7db0fbb55fec24e4ce1874ecbaf55e7ca9dd80e8ee770022100efd089ccc3125b7d7a873f3af82914b465eb59590b97b685fd72165adcf0cc45 304402207ce4f4ac56b093a567bdb71bf28b6f326b6494da462cabfb54e33bede32f29250220605c8901787f988fca10d34a6324b15529646a6d933caea97fa7725a8efb3f52 304502205f7a141a19d94f6f6979b6623affb30fff5ac524fe12a44e28df01ea153848d3022100cb1e352b27e1b4aca8248f0f4f5d57a4ac33251b7b78b3aa7ff00b8ad8c24319 3046022100ce445b7147750b28d2376cae3b5e8ac6dbec9db57d3c6c9326715ce21d6e953a022100a91b8b821bcc1a907a222f0fa74229ae6fdecaef690caa97567ed88df8cf110e 304402203c8649f889be9afaa86aafb056f335a32b68ea392613847be845c609a856a412022004a003ac3afb96e1842850e1e8ab0b080ceea3e88ce04877fc0e745b3eab50d7 304402200c3945950362457f7176b5d4ebeba92de3bf079a9aa58e1b6dc2132cccb8bca60220329b2afe9a81b083f5242d6d538d8ae6e1485b6c85d9e5f2e18effbba09f163f 304502205178186082c9306fa8303680d380ec255d36c676e24efa3261c9d9255a74f058022100a9734e026eb360e252caee406aba7bd3c3f923be477695d9c965658cca1c4a6b 3045022047731ea8ba33bbae26e0f69a9cc220989e9914a2dce73918ccc8d05b5b3ac2570221008be2b5773a18494b2c0eb2c3e56fc6f497d45d245060459f15779000f1d643ac 3045022100d9f2a32f88ae7625b86dcef189afb87ef54f43964d768925c1280248d02b4a1f022074b70322f98ed33220dec5372cfbdb734cc7c1c878bff37608ddaa7be6384662 304402207f242fad424e7b52f8184bcab10e064d060761e6004dc30de9a943132320598d02201731226cf9a9d5fef20467209c2a32fda006eb4fe33f1ca287f440a20c5cd5a5 304602210098e19bdd1bcfc5107397a955788efb2f73071e44c19e690c936e07c0c27d7ab2022100f90020d28643abd1aa619463d00ba9eee7f940445cae42a6a6e201451cb12f28 3045022100a553ea652c58d304244fde5b1bf79ae8c683a13950e535150b06fa334ad99aad0220792a7219749fab6b9e79a53261a12ca06cecaf1fa51525d1887b6d7b0bf24f94 3046022100e891e832279d40d767f98e9c01634c19f08b59a5efbb393d3cda26eff40d229b022100c4c49b5499ce4e4a1f9c6caf4eedf356f4d330a38134cab7700eb74a51d3c878 30450220646ee44a25b832b80dbde97d775a923be502cfac0cebb4d07e269faa613f6d8b02210087f509d938daa444bda753a5a58a2ecee72666fa74436a37e22aca2c5f42fa4e 304502200d38e25693bf62b5364c63fe39dc2aa150150f553461f0594d5a95fd3ceb95a8022100be511946175574d3af4c5dff5c78ffc2906e70d0def41ff0c3129f281e02f8c9 30460221009522b1487bf389b66073660e2e2519b326206c0abeef438811bf1b3704032cc6022100caef44e60e4fb2c9e072b24279d4bf475c53cbcea0c0c899d787feb01951b65f 3046022100ecbfb9f5e1e364c26f63431baa7854f413aac2ec3d2bb7f28558dea6ca3fe3d1022100f5bca388fd0ccce29231d3da7c1ee48f55d3a99814809854c9d58fdcdb97846b 3046022100ecd0d58f3b5500ace2cfe694fafd1dca9c7eed29e55db355424dfcd56017b8e9022100b4d2c017be14469ccedf8d4e4b17a6238143ac038ed975e7aaae9f4d3082d6a0 304402201163b29811c1d0159e6ea903a54b519b52a2a585b26c64ae1f8adc47391d163e0220048853194474a2f48c9ad144168bc4a45a52fa04d611889bc1788fca89c9f243 30450221008063b19ed55a47e4d3efbba4ef88b8a88734af8b160136ea8fe3c5f2924799c402203fd7b7f7bccd2aefad511f12569c26e2b6a770a57a789b36b16dae5696c44d00 3044022014559ceaf602b93c846ea0185b63992d41423094836466bef5cbc5f03a6160b402205d713643184365046478690d5b7f2ac17328b48c424f89fa4d89bdabe884f483 30440220780d9045b70862412c3804c5fb431065bdcae452ce27492b88c0cf28dd14c2860220047766ced8d652ced4dcb32dfaa33b0fac14122ceacc32d36cfbeeeb096feb43 30450221009f09a1baf596547ce97a024026909d8614456c01ec7ac7b911df885b5f9764d302202e998f426fb0bae26f9dcf3abd90b1508ae9c01ca31b66a05c6f4de647239459 3045022079687f1e3f129986bcd6604c21a88993453c977ec3e8628fdabc77052031a3cc022100cb6e85bcc2ccd9a50afc6ae99edef061fa1894284042c6afd86b6b07d7c5d220 30450220283f547cbc0fa8a898dd6f01c6b4d3efefb966d336237b111c778fcc74f097b1022100db64684d122f57b5b2545b7073fcb2a15228b43d64f3c71c5118f0fa34079ad1 3045022100e771e6c0dfa7069823acc8bed29e1d7d1b1fec57b0ca59b51c968bfd2b12dd0702200fcabfc9e1fe6051a172c75488fcd080581c9304029e30964c0e9d0706eb0f06 304502200a5c41e88ce530ee1f7ddd3718800d75a5cde44846c96f61c3dec6a30bec61f2022100d3f18a918eef834009aeeb507e02b0489e4a29f80aa55050f8bb544ae883a8f3 3045022100cbcef35cff3a0890550d21b808128c9a4da4707eb5bbfef07c649e27458c09ac022059942b2eeaf28f41f92bed1ec82a616013e53b5037739fff0d05ca5bda669735 304502202475c4a80e81bfe5120aae9e28fe1d1f6953f4e7a8e408b6f5dceed6aaf4b208022100a18d4e75ec8726e6efa9d892fc83bdd1cabe03d7aabc89c2ddc9fd551d1fee9d 3045022100a40ed04adac63b01196b447c8ff6b8ed5faaecfb0f892c3a8b5c623c79f831e802201938088e18bad2dcaa546135e0d19b29a4e9de58b839b71295a81b00fe6bac1d 30440220649f71ce1c3176cb1bd6c17bd88529ff5c7d418769a2e4e296526ce882c9448602203f166872fedd651d25610ce48704b60fe8847f17b4417d1cea383cab1c8c780e 3046022100bfb534f3dd2bddbac38ff669460424090cb98e3dae5fe951ffa04f914963c23d022100bc73173e6c5f863cdfaeb663ea835fc219f2a1d6fdb8b3b4b5892caf74821842 304502204627827a5b7b9309f1a6efab708fc1d15ee64ee3e9e91a29be0b5b83eb56ef61022100a7eea30ccc8bc1bdd7166da4f06f4e3146e4cfe5e65897439cab5f217bbd242f 3045022052104fe06e4cf35870f5380650861b6f083f8cafaa08ab175b10ea127c8a35f602210090f0266883ae5f9e9901c73355b0726299801979a509d45735ad9b20918934ba 304502207b8b431749a8b54ff78b46e324cfda06bc817a2da9a1f168ac23e8f61e7bb1fa022100816bbc59bee2f058946cff4655af10f43c473742837bf414b3ae9e99b685e55f 3044022039e07605b954b2cce8b075ed4363d6b42b48b606f9378d73a86b1343e451c5b902203cf8a2bfed94ea36602b1a45b9aebd8918e1db652f21f78ad80ffec874e3bc5a 3046022100e26c04062aaa0a4acecd69f8478847d180dec2916b80a942b0decf599d0db15e02210085d6d4df2a8cb7d8d6bd05022632edf5936fa4673f9267d35aba12a6c6b39e9f 3045022100ba7b5c11a8d3c37205a45cc7f750275cb9474507cd35aed3ca94d5e9ad7fd4ea0220600bb5456c3ac9bd0f42151d7497ba004c11148e26e4cf44dc478607c53b7d63 3044022022edd036a32c3d440c9383ecd60a7a9f10751682b8694e8978148f5b6b2a328c02205f48487dc5396733e8423c6d26a49a417d8f589209dd0e8784211023d3ea2890 304402200eb2b0d4ede85424edfb7aa15d4eaef2d44d9dd91f50ef44474ef350209ba0e902207aa1a367a9b09b87dfd648900c86a598f46c7fa4958af7481d503c8372e84716 3046022100c779cc25edada6837fb54e178b4e8c46163cb36f5026af205fe5b19eef47debb022100e02934f7af3505fe37af3ea7d13d90d7a3065e420271f8e8c75c1a5feaf11592 30440220566c0405fa83196507598d81f90e0d009277c894959337c70c50d02ef361c85a02205cffdb3b443064b9e8231d5f54bb00a30228b1b6d7e4f69b07706f240421c8e5 304602210093e5fefda085a61e0df68bfa154ad55c1edc20b73434c3b5687fb9fa5bb17d77022100b38b52235b287ed6e3325a2e812042b349ec75ba2cd739491d5b53d3da0b6a34 304402202c30562a710589f14276f53571001ab4eb490e0dc8ae1134a84653c40b35a4ae022063ebe20d30e6f86a4d8d2d959e064bce1d881184871f584e8cbc1a163d0f6aca 3046022100b18f12a01bc1fe71f6c4d78491c1fedc1ed9d76a6fc285a3135c83affd4d62930221009b4f6dff83062d8b604a1d6d042542e1ca6eaa352838206e39ca9642edacbe02 3046022100dfc9db7ce156016a168a8909ef3c09c954fbdad41937fa978c3bec632fa7c221022100a8d4077da999c6b5973c77b2877e808e37daa9780f825b405fcbbfbb90ed5ac5 3046022100b8645c81614272afe9ddb519d86ce90a3b24f8d93fa6a81bafffa185eb785281022100bd701023eed21a7ae4e895773b39c665bf9d925757caef5b8bcbf4d6f522064e 30440220519c72e6f24080ab0692d136cd2b440862495383330b10e6b6a63cd3d972f54502206c2088d0fd97c48ddd75b21ed9ce684999994c8bad6975fdaf3910fff6654243 30450221009cba323210399e49bdb93989079e647bd274aedb5ce400f7b3a9c1ca3321356a02200b79c99a9ca3a893058e6ec228d41f991e59d131941b1e8580efa24649a6cda0 3045022072f2ad25169ac78fd077c4be38bed158628a4cbb4e4b861973972cc6fd89d515022100bb6ab746b1260ad20db37e3adb641c20c00b82ff8b488abf15154e060761da7e 3045022057221849fa8d9070ca747c4a1895ba4278e86f0554f5fd2aa2578fe3318e2dcc022100db368a040aae52fb2564f6ece971401e33d45b19ccbec7e2ba8b243de24b4ab7 3046022100f4e027c82af30b4ce4654a082539194eccaae285c1b20a40e68a3a27ed5397de02210085d51f1a0195eee39e61d182508b4ef0d831b490163b5f0c73c5596db81f8345 304402204b36b2490c65431a8e48f0c209022f9d5feb9871fc15791408ce47b5ab168f4c02205eb2e17b26418603400ffa47980eb4a6c276f326b41999e6521490913c75b14a 3045022100f4107217a4d0880a22d9b44961b521ab5fb095c775a2d5668047082f76b9ab2b0220780d90dce83f51b5e0d5d36551fa47403c26fe9eabbe5ab5167379844cc88266 3046022100ead8a43fbd7d9235a3cc6bf07283ec9868d51f4688e345f14054c5ed3151e531022100fcf4da46137fde907d0e3e9ef6656a944fe1ba83f87c1cf994fe1ff871c7ae5f 304402207445455454bc6014f38f076438831b49c3eea7637a4a0083536adeef0211862c02200b8bf575ae706ae5cd2ef0996ae77ef81e126eee28ece6d180d56fb05cd2ec59 304502210093e3b1d72ca54ffd044fd23cefa1a8c967b13e61c2e5c8f2b8f69209fd40d94902203dc0f27a22bec934be88925df1a2e548cbaaa0c153c7c84901c9a81e075136ac 3045022100fb9f56c73d1645cb8f25b206cdf3d47692f92dc3dd18b598c48f2cbbbf25941e022075220a2c3e62fe2a41147562c82bade69510c49fcd036897e42265fcfa587429 304502200d2f7f76f7713c882ed9650b9d119b1fc6f0e2324dd159925edb707ee78758ac022100fcb868f19c7470d51fb79d140abd32f6a80c0b145a4c9e1714c2a75f5c21aaaa 304502205f89de08165468f28e6339d279118a5238a553630272a9796ba4e2e656a73ef7022100dae6972d026389c28c38b5227d20fc3b9f151724ab0b118f1b00bc113eff8ccc 3045022100b910f4c11bb893e31e1059f143eb5c0d7b53fd74020b70e6fe14fcc7fcde4146022059be686d5a71f8f9073b4ee4ba1e71cc5eb12e6cbd9f2f14835a4bbbd41a7c8b 30440220618cbba1fab30bfba3c33cfd88ff291f6ab74d1e84180adf130c1358590e442e02206461772d50260e05cc78ab6ff5710164145c1bde0ece2e5edfa99fba18ee8a5c 304402204acafc5a949f55c412b0c51ec2741cde29ede9c6abe2537f51f2039262116c06022004fb1b1273a4e3e60b9aa8aaa61b2782cb5c023d6a8c78f23ce6e985592583b6 3046022100c8a92426c9e711f1f8bbd0d7afa233d99a1493d525ff0a403a67d75503877368022100fa3db9e8775c4e81fc2921e5f9474314b49928cc38c874c6f93dd8e8e72848a6 3045022100a6e85408599e0abc669c27f4c71538161aca43ddb4228a9d6158b35cdedef2b802206822d17e1581201081aa0a363924addfeef369b715b02914711832ab76fe7190 3044022012200c2939dc3f68226a4f5bac8d8f613b8028d66b4ae34a2f4f88eef968a0da02203b1b9fe11281dc68af56a72498689a54ff7f7debafe41d1f96f116f82a169b0a 3044022047826c820f46e0620258d81fa5743e760e4734bc3baac72c08f8a8732202f527022056bf7fcb07cfe55de16459bda95d9b6206e8e8668c319d3fbec033af2b4934d2 3046022100a1b52c1ae14185a3b799ec1199346493409a8442820ff7d8e0435f44cc5c5ebb022100f79498b696bf23ed012efa234d206b4f08017a406b8435bbfddc44f3a3e1a6ec 30440220219f2fcbea06a1eefe2aca06e5b0b61d44c565c133e17ab8837266607e00ff6f022053c55c268dc2e103ecc7c6b41dd763316fc3741ba569acd25e5b64afd0716d56 3044022026f6756d31e02867e62dc48f875bad50ef3a865e8caf5ad8f61c6ea095945ef902207f136cfcf361d3d0d41b6a55b46903709b3e51b0724df9155c1f281dbc207c5c 3045022100f94081d848c4656bdde38832565e9636d7663f78889f791a71e853fe1d9a7e6902204a67d497691f236f56d7a7ab312cff04dddcf6586fd0cddb58c6cc2bcd94857d 3045022100df08d928f9dff47e190c46b1c01b0475986c87725bddb981ff315708c3ffea00022074a7ad51ac962a829e160f51699c403416081da4ea3c2f775af5ad8b152c8214 3046022100ca3f2dcfd02cbf8ee11face13ffb94dddab4862785a96ed5d116b77eb99ff518022100bdb4b3c65d856de989f5626515ffba6656862e288352c963906ffbe9edd1d0d1 304502201eeebf3e4f6f850608490d25f82dcd71d065d5d6c91d73db0a04e61c63d32e82022100ae83234c07af16eb1a2563642aa4a6423c45b82dbf5995da85053511a6746f69 3045022048b7f438bfdc3803cfcf2d5256ac423bfe1cdc710050464b2335edfd5732ff27022100b97869d9a123ced241e20d1a26e7aea7350c7a9a7166103cb8fe51efd13517fd 3045022100f518a1c01a610d1987336b40daf8826fe750950d2b21e0b46c0d954f20a7c403022031ab23a445b0f8774140732f54f26d01be827dcfa366e2dd57dfa5cb51ed89db 3045022100faa91b0d6d71e7af71242d115e36716a3e2254faa00c413b644300b4a50d955b0220553b637670f90cad8f92c13561889715f3625ac178b420ba0b8fc68b7b0d769c 3045022100b51d24135d5b21f31af3f18e9f9e6d559187d380ef0c87f2f4369be4d24c55ed02206d05927fb4de95301dc3898b0894959b6832d50b7379d0e8299dd27440beff34 3046022100ef8b8ba7697e74d3816baaedee6189a4ac7f70299011c35861f5b673426629a1022100e518612444ea0dd73e64c061cdae999261edf781ce0a2ca94e61a4c3ba84f9ab 304402204d31ccbe1fc8f602a04a3a0a094e12e04580dd13f8fcc5d79d2a7ffae73fe97302206ba8b51818ea8aca04d1928b996670f1dcfa6a37ceb9f04ecd77143e3b0f15d7 3046022100f285f4c69b52fd3225a43632dd111ad036fec40507838361ab6055928445cbb2022100b5f79b3d255e6f54ab5170b8808bc82a655353c13a9b145c207f9314b9357d1d 3046022100a6f7106ac382686c5f1aaf8509a3f62d6440f103c063db0f72a4b557ab54916402210099ba59e997cc2da02baa0dcbbe55a426bde988c5e559e361e4b0b557d157a6e8 304502204f6124466e1c4847d9e559ca7aa19af3255ff4a05d15979680fdaf209342e6de0221009fafff16fa6bd55a5710675f0916a6d650a34f185668e46184a9da782bbe439f 3045022079b97f5d67ebe7a41089367bd0e173168fbadcb32758ad19d2eb4e966a1ac625022100c8c12e69a6adf5555b2a0fe54562262051ea8ccf811295f6fd3c5686c538b933 30440220431c04c06d000570790754c50aa741dcfb48c6760580153dc7ed75647956a4d502206f21e1498cec6b6b51f1dbdeb6cd7a5447a282b6807b134aec8913a989140b4e 3045022020fc9f8e6520806a4a0cf148da529e0ed2913b1bf500d4702d96ffce49ddcfda022100d5c08f3c38b2ddecf21cd978dbe9a9a5a8231968403ede01bf8f1bbde1578fd5 30450220304ec2e52c1316799162e2b3c814d41cdd66d5804b3c628031561fb1d4ec5036022100db1502762dfff72738fdf91f609d6c3cc08ada37e9aaeca5f90599022e6a8d1b 30450220256fe3434cf9b15a1938101ff476c2f580a2df7c63eab80d0e1379f3a487b0ea022100996e158327ad94a3ffad2688af1ac3ea37c6b3ba38e23d75fb372508e2f0e3e6 3045022002b66cab8c01b6a729da423e09ceee0601ca9feeae22b2e15672bcc2633da4a3022100fb216e9ebc2fb5e24433a77b073d8db046d4328586fd0c2b11126d63fe1cefd3 30440220636b32e34eb84e72608dc1f5d3a438256f81f5a7a2ceea34b56ba178bcc2d891022028bf6e1d804abea0ef10cb8f12ba5877dc1260173f0e9f4efc2591d6158b7c14 3046022100b92ec49c693f195f7d5f1d01a71329ba74de081cf2050c591bc4c9a88180f982022100f68d94b06f8e47e190a5b49b0ace8b175c518e175257724f9bdd66f54910cb84 30450221008c0c071819c058746e97ecd3c6c16a54fbc1a1aded424fe4da47abf6a6f2b3cd02206241091eee2216d5d1cc62a2840c72ed62b5865e93592e0d09fc1ac206e999e8 304502201827c84d25f8e7f70677df26851bfc656a11efcf207bbe77603b81aa4053550c022100a8db533af98bd0955665907546e578998ad04ee66219ed5b4bd1215a53d1f028 304402207e353b70c2a8ce62d59931c2ef5968987e36d5a906d7e14f8afcde127981cbb9022019f58cb2f429e7d2645ec2aa4c834285cd7198cbf1249578cb9f27221cc239f9 3046022100aa16d452b29603a6d123dd1ed6ba0155943939be24688886aede1bf9f81a0ac70221009b49d0917e7bf24d3ad2dd79277a7af8884438e5a8f89b15f32f4750bc1ccba2 304402204fe10c0d7f56067367d48dc595d089a2cd24333b971d4051b2c9365dcbe3cbfd02200f388f7d7b05fb2fb8c7ad4fdfbb5c01e2ddc3c3d298c74d0bf12dcd5b624ba6 3046022100aac25d83958e249496b7083e5993e68d86e74280053cc04a95c641c950347bcf022100a3372d1376c5d5ae42f3263dbb55532f7e01788df1553ab25b6fee16091899d5 3046022100fce9c919bf200367f27752881d776fb7c20f4260205c59d45ad6ccc284f32bca022100d1569d0f3359dd8b509bc353017d2af8638a144e3c5a671fa79b9cdd2ba8f6e0 304502202945ded124ad14a27e506b4ce8ede76da0d50d319734fd1408970fc6da819fe1022100d37583f6d019bf5ed2bae984d8c716731d35bda59667f3aca8c66ee251022eb6 3045022100b813003896be4c42b37f68258c056b4b8652f2f3204a5bfffa746736501dea0002205c970c540c75338c049ceda7ac172ad5e6e49245b1b2b12bbacfc65781891170 304402204426cd9ff0479e7d143616463e2bccff6d72aeaa6d735fa772ca4a1a2794c00402206b44e6591fb37c71628a502fd775e61255353b48a24aaf67836eb884960bf5b4 304502202e1f34fe01a6305e3e7679aeac447bc0bfe7c6610232ead1ba2a5bcb040dfa3302210085a9a4887f3bd844bf650ff34ac4089e76a47ec980b9d78d4f277b3e42c40bd9 304502203b2424401a9024f82397770b1857815ec1b895449eb7bd07b0b3a8c5cb9ba938022100c8c751f6478c13a6d064ef588fa3c21a606d69a286897301e51fd09051f8dbf1 3046022100c77c1c930a8fdfa3714fcea3781af0549b506d66ca18afafcd8ecf3bbc982ddd022100d7e854ec3229806fd4687d3cebd1381aafdd8e2865ecdafe2ef8d00ef51804ce 3045022100d3d35ac3fbf70ad5c4c3ec1d9ea956f066b11036ae852ac921fb33b9d23aa63402206b182a75b0abd1a33eac6dbe4b2882bd27e7044e3e096b7adb36a7db86cfb93c 304602210081b75cc3c5684498817c3e14dfbb75e1f43eca581f5c6583316ab71460fe537402210083cfde6f7f644c2141db80ece9232c2f3679c934f0a4cb087c9d5a4b905d3263 304402206652f0cc9f8ed3354d393baa119bc5da9b5786868ad0df0245cd0dc8e422d97402200973c2f6a360ccc0b8b9a0834eab2f1a0a87face3ebcb7f20bdd5742fbe6eef4 3044022034fcc499181b388efb89db66b6786c7acde4ceb86b1415cb912f582f96b36c74022066a6744c44436ee51f23e062c781e316acf2f830a706eda5ad47292768c0ce71 304402206ff342285c4e3ba2c67e9af45e4be21367ab8761ee17a3c130e037918236ab1202204ddce6842a01aedddc5dcecd1a2b3649537879bbdcda3f2873a97051dab7c308 3046022100cbfb743837035f099db8e891e4f107957712f8296815e4f3a3baeb831c7d3ab302210088eac37950c2be70dff659e9d39c1fa13ae1bf8c40d4636b691e20f77aa88eac 304402205a97492aa8b7d2aefaa39956e34f906b46450e7d2221473a3fb729ce80dda46c022079b78944d9c2399eb3c228432b049e2706ce804e98ebfff90bef753619fbd710 304402205d0e98ea4003331d045baf5d2c5891f022bf3e397c68f283a78387bf9332b48902200bc994e8b9d8b98c13b7cdd53e903950dc2163823dcd0dace3b9bcf66ed33204 30440220224937ba4086782cb0fb9b0b9b1a887dcd8b7cb65a188db55a2fe07fbe7bd3e30220227d050582631ee8edb2cc55e629475000fa01c0ffb85ed26d3a622dcdfd3c33 3046022100cecdcc16babe42bbcb1f9bcbb85331da4e76b81b8513b711391fd93852c79dd8022100904eec76c3c284cb466577cdf28d680d892a60ffdae526d0b9b681917ef424e5 3046022100e82bc9f0e92ce969f5062db582443904f55d7c7d1b123446b56573632f1644e802210098f11a9135cc6a50b6d42d6b6bbd3dff1bff0bc1962eb8253f92e5df3750bb47 304502202ea6e94c5a4ff75bd82c7582ce8ed5ea49e75db85b01ddad823dad3781280649022100f741cf679706a7b56451a067892a59a8385668358301bc43ca2c3c3131192a2c 3045022100c1b9e2f7206f42cec2de6583935d1f36d8486725d67a50e873d941d143304577022028ae9d2f8b1fe0451ace4832a5905c7455ca2a6c980b659e806e06d55346b337 3045022100d2e3767690ee99a5241dca129d2c1b1adfd6f3c636864e4377afdd212f5237ca02205a1da8f7fd4b2961c33234ca9a290c2cfe4960f44a79fc8a1686ab81a31aa7f2 3045022051ab8691c8027f562362264fb03d7545c08bdd90ea1a22d8d125e69e0ce6df73022100c1f2fe0978e3e2553e73afccc3c9b9ab1f6d006cfb6bcc7ea79b0b0a5161adaf 3046022100ad1952186213f0197812bb4f666c5c03ab6e2828057dd75cb03eb5001e19e431022100d35d1d909177369437555cd82c16a6e66a5a5d584bc3125ad7f061c158c3bf2a 3045022064d995e1c179e7e35911a5b22ccf95c042ac51c022bd747aaa4c36a51b73edf8022100a340f67317040f8036f1fca1faecdccce85a9838f32211870d4b65180d994491 3043021f6baf1e646cc0d7a2d681ff685f28dd887a1f234d4647bfca1ca4ccbff8f18202204fe58ffee0b75bb8e88c95f7fcc4b324fbc31e6bc8aa025cd0c065ed4a152595 30440220746d85403ebcb6a3007ad3ec7a0923c420332612b3096ca7f55b8612b455bb9e022054560f80240d064d82374c446fad02e0e0428a7fc57905b293306f1b4adee8ac 3046022100a34f44a7c19f2ed13e3577a0b7d6202106c5ab67f3ab4bf4fb7a8ff1ecb0ab42022100ea6ce41ae357327ec354b8a40874f9d8c6f7f72b529d900793d03bd00fe6a496 3044022076e520ac8cb4606ea253686fae759de58ec5e1bca8c94d6553cc040d974e217d022031694cae5da90d1b4fdbe868aeba3a1d1c35b6636041a2e2d96116efb6f4bf33 3045022100fd451bab06c447e81494998e92cd2d3e640cc0f9337c985732b2887ee1aa3e2c02207d4144e775062a08b3ebadc3dd5f13b8ee623a6956e628a342ff5e749a3ef49c 304502204733a51006424ab37f0fe22300c21dcb15af8a53ee67b6388d32d3bce5bb6b6d02210094590aa53d8503ba491ac49de37d7666e1e46700c39f121553219f43d57d494f 3045022100e4aed547d832fb8a4f02db4e1de98474d19591fd8c46d7eee9eef5e05b699e060220564f592b7f42426c9c48b7bb1805849074ccd7b3b890696c487b96126c5ad5dc 3045022100f446bf8f7fe7209b43839347113bb8a2414fc8c1cf5e308e4aed8673277efac502202cc07af371ed0876b606f0c3fc88f151fa1ddb5f91561385f69ab43e6be93239 304502207ffba047e3cfd5b06be41003fd82b987aad42ebb88bbc8810313a69776c1b05802210096ee541a55d3b60839eeb8d0f844fec90664fec743f8dc2322d1b89f924f7432 304402201d8bcb160d96a7dffd6d2726fc73fd745c87a45aa65b60a84a842d1a11c18c8e02205bd4e14f7c13aabc5ed3c3a9e091dde33e78d4d5e896b0803d135e88c11d3c4f 3046022100aad3d7e1524b0ce467586394164b3204da86420cafe96d0579160cf3138f6fa2022100f8ca5397af69e0af8d9969fe6d1562864b86ef08385780006735ed02fd544d8b 304402206b315b4cea4284620d309e82499f9f8d78265b5217e7e2e5fcd5890a2643cc69022012accc8ab6064e3c43667c327ad3e4e3d4b08fc37bd849fc210607ce5973e837 3046022100a8c13aee6c37ddb89c103ed69a9bed9ef6b87f465e696383bdb842ab42f411a6022100e1da823ca8c680414897d76f9a161a48fa2bd8cbfb783622f90fca00fbb61bbe 3045022010c9d626cc5286b7ff31b9b373a035aef04a5bc41e2ff44feaa0e6fc9de20319022100b070494689f62ea84e89d2c78cd7caccc69e82ecc80f521e32b330966a39ba62 3044022002c75863635938c90cbf7032480702f687de911c573d38408bdd1bdb3ffb46a5022033f0dfb1cb6285b83f5a9f7b7c198e89faf9d8b4ef16a7aa3f3d2e12db51c185 3045022100e848f442c658c1f584a9bfca22a98de5592d79ddfb4509b932619b0a58e8e6ad022005e2e414909860999c0ecfde6aa6e981a9996b35a413a2ed3bba5c4af40c45eb 3044022020fe41b023e0a9596af00c003273a61cc23768e78e5a61b51dd8081bac80ead402204c09ad305ee160c1051c425b30d5748e5822c2c3c7cff20f654d1fb835d88329 3045022100c0d98c0349a45325752484a4546995443db5f9f52ca8859dc88392f652f83047022014b4a2db2f47de7913a45e8abbe2053ecc17cf379d3eedfcf7d99da3ee6729e7 3045022100d23d12d4e6548f1eb5d84c665c1f850b389c0560485b6c42659d3d0909cb9e4f02203544c671b343819d6b9e9f0d1a98dcee4e2dd60554de5e6861dc47997a462b27 3045022027811448d5e75a0213ffda04e8a6562d9ab7158c624847067e783cc8035e6bb7022100bcb57937ce71917adcc115b9b13b3245a9c5c817ec86be1b015913fcc8f61918 30450221008b62f1ff2beb985793411356aa42092f404d9890964111ae00c2af41c88e19d60220400584f2d256a4c59ea979775d937a8b8fe65dd9ffbb531e586f509362b927d8 3046022100b69f2209a5e14d4e85f4f4f3fae7439d7abb234674d5168e7d9eb64f1062bac1022100cec4bf0450ab7ec687d92e7d16af743726278ee916fe678ceb4ee5c7aa17cf0d 3045022100c41bc72f35f8710b83a92e1401443d4e2ed4bed5f812fe41dc0bf2cdba3ce11f022005b2898e91a646d341fa9a8587e2c37961647de46d9cc835d6f69b57359e3f8b 3045022017bd6afc2535e69fea132bd1dc68f3fcecefe8da85869f7e72ffc3b579a3ca8b022100a6c2650869813d78753dd16b8f9f5ef4db0516d58c383500e625541abc46f47a 3045022027eb4ce98f5f4a0b20c913731d0ab1061d7741fe72dedeec9af6eba927f69295022100db774ec7adedd6a55a07e20f5a7ec39b94ecf5a6b8a15801f69cb7f94875a158 3045022100f9d5f24c021739f3b1e16c615602d23fe40a6c8f867255f3ee60ca381b6b4d9a022016a95eb93395c1431201c92a5911f989485edc7dc2679c599c4d09688cda74bf 3045022100b9a0466936090cd7c117347657d9c9f2cc1b4c366396d3bbe1ca978d759f8f2c0220186949b1b60e19ed34184ec66611f7873bc02a191e21212cf9ec67a83ded7099 3045022100e56a54d9df86ebefc0c8900a7d00d2c11376ac8999f414f3391014a1929ef08602201a82ec9ffd56c5bd4472c5edf3fec1fbcb10e95cac1cdca39be36f26adf6af4a 3046022100a60496c79ee4a8f7c20e008a36cb818c368146ddc629438e8d018d6d0e6d948d0221008f1403539c1a9ff0bb3eab73923ce2acda78a8ed0bb039b7e438f94fc2f23f7f 3045022100f5fb282378cd152540fcd5e6e48bff549339d9d4426b58963ec3f0daa0bb4e4402204a89b5732eb56ed6ff777326a8aaa383b8f729e005c1dde5d63201568d2c61a5 304402203c42f66dd02a9d25867f2ff338a96ba23b4036b8da4c593284195390a50c1abb022049b7d498eda5cb7a5262bca63c8b5f7792ec11b5fd0c64e8ee062e1d2abd6804 30440220334c222860d3b59015a7a23788b91886082c4d785097ac8d04002ea184cc77f302204661d9b5f72efd88b30ae9cee357685537d7ceb8b205754291057f0b39e43e83 3046022100c357a960ebd58981b140b1a7095e28d4db6cf72943cfa987be522c5a3697157602210084f7e855d72c3b21152638dccd0874798f2ca16a15b5eac2145ecba3492d4fbb 3044022007c5fd368f9511505899b84999a187d202217455f3806880a6ed126fdb892e70022026d752647025d38eb3c2f08cb0a38d4c81b9fa7592902bb34fc0ad2bee80cac9 30450221009e49ff17ff57a8d54a888398e14f1d85ebca103ac49e106b19d48f5f1228d5990220501d7b60fdcc37029ffac2d001776198fbd0a31cb09bae2168f74503a5f6bf30 3046022100f5ad197ecaa94988ea9386be3ed5656cdb290e178eb9ec107bbd907d8b783c21022100d727ff7ae39f0d9251788c8c3c28ce17f09f9855a65e769b62505842ce7a6956 304502201178b464c6594c2118f7cb0ad6a472633074ed8b40b820faa921461edf40f052022100fd856115638a767848d98b23e4cbc28de09674a3ed0fac8593db892edd00a3d8 3045022100d7a0fb1dadd0f31b9c9d617d503967cd74c231ced9c25e1365db81f51009c3b602204a3b383f2daf8208a60df5ee2d7afe0f86a277d17e57562585193edd5c638304 3046022100e971801eed263b1d6aa4ebd0f02cd9a2757b7b9e9b604c2e7442b10d09c96df3022100f34f6bac0d36057e328c1bf7ffb69ddd9b4d58b83c1ca03792e31211ecfc83f0 304402201188e054556943425fc1633942a6df9a9c4dd6d39b802f20ff7381a23452434602201fe8ccb3a26ab4d0e8af5ab409ede1984fb6d734b7e8a0dce9ee3ddf6645ad8e 3046022100e677ef252a370d519315527ea71d09aa2765fec37f33aff01fb2d3e4307e0d5d022100e0a652845cb8a8f7f842f1309fe7d69f35821c8202eee9634229c4cb1b9a80a2 3045022100dc0e658fe7bf8032e6cf24dc6956fc804ed71e597a998dd4c04134fc8eca0cdf022051fd50fa58b75878a372b166c405d63053eca4995576a790c57a51a140578960 304402205ca9e9221ce638ec26b68fdca316b1201dcedb3c2637979564f75ba9f88af7a1022024463e111631fd362e7c83fa94d37b219550c10baaf7243477344d2c940f80aa 3045022100f867643857f4c11f31ff03d1be161e0d6a064b9de07621956236aa4f097199600220204fd5e510c4a7e5d67cc82638af9acb93ad4973e36aa0ed838e3dc3cf1274fa 3046022100ceeae67f3147d442c65026219c0d92995d89a95717e588f9ba3e48e22f09d480022100e47c5403ac0e8141057c9711e3157348d49c5fa9714117b36dd861ced784bf3e 3045022100b43a15ac52e72d376f77190dc3c3d9af5d929106701458e737b906c027800774022025f833bd7a36261060c9fd5a90b852ce1f2077160830a8262bbdbbf8b314810a 304602210095b8d9feb54679c48962a5703d190d45e339c5aa339a7b80924b3fc7924e1d42022100ad9db79193a948f7d7046f7911a43f64dbb6f1dc10b38ca28c60b7013257475c 3045022100e1ad4e89352323b6570eaa18dcebe2a0c5c0bca751665b0d73d389b838c3a56d02203b32526a2c7f0901f69aa20fb93ae398409bbf794c8990814ffd2e3cb0f9d893 30440220042a6213b609ccc6d2b71c3e3f06819ca739d0c8323d5d560ec5c8abf709b671022068947902f50db1ab1a3c7bb056290007ab37d3589e72ba37f0b0d846d39f053d 3045022100a83b5e6e1f26ff325e14c3a4e59316ae81eaae341eaa15a3e828c11c1ad52eb9022037cdf8673de1265342562c7c813bb3593106dbe8a3d0fc988293d4ea2d703d5c 3046022100b7fecad7d2d6dcf5ce9d6d06c4bc459817b5ab8e429977899fa618b7b1226bcb022100f48d1a01e1d91941b51eb2d54c6b4ab2f4f87ba4b8c7fb7f836efd928c947096 304502200fc1f29dcaa7846b34a24f22c22e32c8140cfa4dc64dc0f19995e9dfa5f03917022100cfa12591c7b3fc047c5aa3fb2eeb07c4425a2368f3a15d755e035ba5bce30f6c 3044022026aef5c049ebe7f9d27dffd8a69a6618f4924bc562858fe07ec10d4af3fdb2b3022028ec9d36c03279b10e2b3153078132a91f580c341679fa043f9a1360a14b8a19 304502205c636ef020435c501caf383493be0475cd0c42de0c2641fd1944a2d66674e39b0221008b237e8d2cdb2ed829a07cea7f58c4d735f57557c3f25e471302978bf06e656b 3044022054eb044928de9159c66c49e7379e7009063bce74c86120bbded1f9023957fc45022072df4030faea9bbd375f2092ffbd971a9661c458d5f533b17fb63a20be50f32f 304502207aaac34f40d4b23cfe63e8d32cf13740832d651d3c92a2891c9e86ec15473b51022100fd01bc602966ed80d66cdc5bbd94309e012efbd4d77118e392f05a6b89f733db 30460221009db74002ae98e89738511e176f8642356795d221060c3cdcd2f210272a4e0ce5022100b1f4e845ec02d2718fbe5447c6bb32a3b74d032d07fb5645bbb6590901b79d2a 3045022078faa1ba929c33ce0c8dcb92d33346afc884411005e9f6e6bd600ee4264d3c3f022100ef7b6a6f5af066e41b175e04cde9505411ef69150896ee79b216b667bc09bbc0 3046022100c21d8cc23d841db908cad6e171e7360159560c0c73b6d28ee81a27c1445b41d1022100cec0fd431877e997b2cd10c0e4405c68297c5a2af8e52b56c2f7a6d774061e25 304402204d468fec9c5a6919455c5bb9a02f20d9d4912792e44068bcded62161a33ea6f00220709937b5e0fd669ea15356e99fb8d001db85a612aca8c85385fc50350dc7eb8c 3045022100fdbfac5d15e9c2176c6cdfd767197d3a2d8e8ae71c0ecec676291692aef480c6022054b363ca8c04a1c8b359950f17794fc453c0e3816e9d3e8cd72e76dbde6fa3d9 304402206740d93398b7f69c63533c27efaacfa330ac2cea5a88a969b15ca75e209b588c02204bbbaebaa2918c1a86a95ff599eae176d3762cd38d9c40dc3dbb99edb39a1277 30440220119c9f4f556df49fffe8dae02d043e432e3c4e0f026d09071b6efc0bac5b2f9f02201f1ccd35c002a6434a4b683b5dd57245d19da42c8b44cbbe2682f9b3cf016b10 3045022100e10de615d2ee8fe4e82dfe97904ab554bee9b9e065be0f4811da9f63b407895702202aa72d5877b065ecf39633d0e7fa2fa748be5bf8f35682daf6f4db31109b7937 3045022100a7465c0839e1e5ad3b52b4321bc0c91762f55bbc6872d4aa29bb7b010758e04402204d6b1a8b0fd1c2a116fba45d23ac4aa56849fdd258d34ee99ef601334a04ac01 3045022077622d882caa8473f2b5220e7712a8155112ae393186970886eb7b1065e1fa06022100eac88f4d0d539979d32be6af8816e49c17372dde88731d5df021e2ce5d46b6c1 304402202ad8599476aa40da51c923eb8dbac0fed0b5d4eaa7f67b8b7a4f18df53a46bd2022071cccb893db8efdf42e1cda5ba34cd482dfb84ac9516be627377bca0a1667375 304502204a1cd4699ccb35cd89c4e9d3e9855f0c602e88718615a075901a41e455ab7e35022100af0f628b73846c41a4945798e246f90bccaa5518b59ec523c1f0c82394be5adc 3046022100993be7211b2691291b289de7cfbfb81a91517eef7ee0d70e3127df7725e27a99022100db704b9d402b6fded061046a6d96c7e97671f3783b5d370f885301de07aab69a 30460221008e169faf78d6c125ebd0444addf71c7581887837fe6e6c0d6afe03f75ea66213022100de5722575568eefdd33dd29ae9045114d8af930d1381d7c1579bb7d29d556184 3046022100bfb89abcf787d800a7de72cef2e237ac8303f08c61fac831afe0a6cdb369817a022100b144dd1aabbacb8183ca8682b8f127286bddfb827cfaf750b70641556ec86a78 30450221009ccd6c4ff3fe8f47f8cd7c69ff8a747b698d5f9973372498beebd417a95aa4ff02207000e568c0785365ffb6a7153eb3a6e3bfd19b2926c3dcc4e6a38dfd0cc5c4d4 304402200f91fec219f3401351ab38fee72e49185b71ade54e829c7ae113a1e13e8ffdf802205fde9132785b37ec358a8942b17e83386acee0162ea070c56e00941fdc7327c9 30440220056f09e483154c18860f5d2cac4d9fa2e8061ef4e07175588222353ce9e2c97d02202c0b21e62b56c387448211a77babfa33927e939f87f74b8fb9e95a3cb5071126 3046022100a7ad4ea9000dc0bd632147cee50033037a07ce9c1290af80446ecf04c40ffcb00221008f55a3a303e85bd72b12bb71d9ad6f2a21f6b06a27621359f25f0dd7234785de 3045022100d8199f1012c43e9f0a02020e3b030922a74db99d292848de8d064c6e60628cca02201e65e97c0bc9a0936463c54f92b1fec5156f3bb23fbc494a241b3be65f6e21c9 304402207dea98c9fdb25c93db271665184fe255b4781c4a1795149a9bb3e2b4be00f57402205d197d9b89640928ccb69b77996f0ea4fe7d1fe32408ed5c4ff20a55d0812d41 304602210098cb3cd691c714623af02b6578586e5973d106a94b0bec3f4c50ae9c1c535c4d022100debe5e721190766f8cddbeed017fb113f91743d092bdb4456646abdde5afaf4d 304502206e19334374d45eefa1614da3527b7d55d49eb86a76b20bc2e7afa407aa21332b0221009e53e0f118912eb304561e4592b16f250619d9da38be88ffcca62baf591476d5 304402202368bc5a74be55a317811489b8fa40eb81b0317606cfbca4765bcfba9af23d6a022026b2ea5e0fd74ecfe28455e62c74637fcc9e1cf939e710133a5ace1c86ce8e71 304502203b955b545288d67fab78531e87376118c339f8e1ed72dcfe64fd2d0115b23854022100aff3a96f2f2d7c31b84497232844db150a68e8372e4febbf42f42f4ed2eea553 304502202fbf6ee11de7812afeb0256e6b8c35ce5c8cca9b857f91b923a695c092b32cb40221008d80ef98b201fc09fecf0f72efcef40c16fc27398712855d79ede55c3249956a 3046022100f50e0a2ed9cc9dd3bb1fd6a3824a132847889a8268a4be4f3870df1ee346e2380221009ba21bead5c5b92dfd5295dc104ef6562b85658fcbecbc42e2ccbf0d25af479c 30440220058e354e48aa3d331af27ac317b251f3d0e9c381827c167cb206e1ebb75e70ae0220543d86aa237a81d22229f92bd5a764daf25963e196967f6d955a64c3c5f390c3 304502206b8088b9c23f136bc6b99e5b41848ef206a69e133a153a9fc3c935a97548396b022100cee45ee1cc5e3e43459cdfbdd8f4ccedabfe2cfca177e61b75d30754ba8e9426 3044022003b2d377cf1e5c6ab22f7a14784043264a077703b0708024849207d4496379a70220533ff81c5883659b6854322f812672b16ed348ec723d9a82a21a37110b9ae546 3046022100e03b169a8d7045fa9e04fed22bbb3849cfa43851ba56ff4ffa132c06caa52b6c02210083ab9a5b70a5acfec32707cdaf93c78a63ce6bc89f2e6efb3e0567a63bb3049e 3044022021e8fb5aef7796785a1e22e171844fe412f6f2b19be5ce10f7fab737eb3dc4cd02204b2d7ebe8b5509280720c3708b9b22ca74c35a42a79e81f72d576ca935da7e6f 3045022100cc32c0a1829d5c24294a4becdc299f350e59ffc1a94bdfd2b73dcbe161fbb6d002206dbc2ebfeaf368495bcf85c8088b1abf36556120df020f9bd69a32751409fd56 3045022100f7c19639e4a763be8daf561e3c167ad8a99c1f61a284f296009ed88e76fce32702200900563d8ba93da9b51b026f06bcbe5ad33571dad203550193ad9a5fbbc037cd 304402207eb939e280c05fed6b61abe326289faabd0dddcf75c837e38ba5fd0c0c3662f102202198af74fa97f248d78854ea83ca88632653fe6a29df1e9b2d7ca9335e3219c3 3045022047463b5821cdfc66c4946350ccfebe21bda69fea2350b9ce5b858a1f389808170221008deaa4f98c3b578f469839e3c6e1011780797e27a5ef2e318359cfe39934fcb1 3045022100caf0b9f2e9515467b07af8d465fcf6058010695aff34fdc410701f6a9b1bde7702205040cda25707a24547c0231d9da33638793a44d991fb3323ca2ed6a2f4c3f258 30450221009ddf347b35b0735b02685930d851fb642adfe62f4a81b3ea96d4e4d8894f0d410220322ce03aea28b8c8586c9760ebc80d6e59c5af1f32d72137f2f220d999d487b5 304402200dcdc8693b1d1f4e7fe2f2ef85a20c81ada4020b7ed1af0ebce73a6651838b0402201d77b5a3a480d78277cbe126886621c8bbdbb332feff566e58691ce977a3eba0 3044022009df0707d565109e05b407fd1202dbd8821bd5a95d7d78ee2494c34a22a9ec8002202b4a53e64feebc1834d5071d14ebaf54cd57460210a0351c98557848b92e2efa 30440220610c99c737f5eb06d4cf226024afa6064c6973c70fbaf169d69ef727ac1cc6b9022012e1882b4db1f4b9d7a4a3b325abb49dcdf3b10eaec6efeb863924ec7232a9b6 304602210089119e28b8b1f0d2a01fc2068c78eb1b8b801806bd4e18bc66d44d5781344acd022100ca37dba0173fccf7a4de1fe9c7764e3380d9c1cf941e91fb14baeca77f9fa510 3045022039d46b0fd6c41822a9a7cc77e7c80837ff018c9a1a3c5b335a5163760e29758b022100bf5cd4684060a27c92c3eb5e23b8040b33e01c4c4d17dd75a677a6ccb84b1648 3046022100d40ac7cb0af3005b708f684ead1cc61bac42f68a7fc647eb953b6d788fb08aa8022100e5ff79e6bee8626dfa09ce85e5389a3afd921158ef215e8f7eb36ba8c2607e8b 3046022100d9e399081f466886d5d586bb466e095e12f13b9589c1c80991142a7e2681fd96022100cee3ea97c752f464c28d505260e37d558c021d22221e25dbd7bf9b229d7bb33b 304502202c6bfc30b077d4e5b8546260794b60ab220193cfe2b4ee7f1d31cb3be45a1cd0022100df461306a6f7179b4b51530c8ae414f3e17a26ed02e46835a7ed4e3a53a4a37e 304502207f4dca256eaf34a39d4a2b6d385f8fe47501142c5b38724346c8514d8c04c3a802210099f87f393cb24705106c4ffc0581aa4566fbb543d5d163d6136ad6471e1f24d6 3046022100ef4ac0a23ee1fbeac17fcbbb73a4e0e0256e94904afc28def626ec05c4c6c9ec022100b0c54878940e9fb3288e05d615c5b439772fe009556175da0d52c727afedf1df 3045022100d5f512fcc43c6a519e1b643303f0879239fdd0ff344648d574c8f9f30906396f022041ec4d7ccd979c6094ab9573224cc582c9778f56951af5fe0ec2ef668cdc359b 304502205c442fefcb263bb7ac01627461ede83ddccb05e7561de09af9ecd06cfcdccbd3022100c9f5a2f8dcf31afa787960edc3e3cff21653ece35410d257379a27780c942267 30450221008adf47b1a773a2e29b8f72532e11f32c34f42434d1f2a8f8e9d2bdd9c78d10dd0220651f52cde02df5808faff308eb8ab94c96171b2c5c08ef029efbc9217b8fa701 304402207c4be8d48aefbbab161cb62dc19ce8bc5e03ed4b16be283e6e7a10ef9a20f62e022035aeb6ee4804855908ad765916b378d5f20c41fd32f0325834e932897c231b67 304502207c88d49f4c14f80e6c7239d014751916675e444110e1a4c6076310e6219436590221008ff5ea447d720ae52f960f9f968ddc5c460242bb0ef324b7adc59d8e640e3ca4 3045022100ba9b5cbbca38f08fb49ac095e27cb88e891d357cb01de44cb1e82d7e00726c1f022007fb545d9d440e117953058ee0f27a9d77afcce681d340e85e7e23bf338da77a 3045022062144044a9f8214efed7fcc150414402964d45787dd538ba00f361d03b08daf9022100b04a5348c3f062d96651d5305897247c63edee99ee280445d1eb8abeb574e89b 3044022040407dedd58d55bd18afc9552a5f4a8933fc9b33895f239c22ce18bdb1c2579e02207d368e2438fe2cdb59456706c0b5d25312544e8df2757e8705c27ea7f6ec3636 30440220371b932963f865d0cc1556211a972aaaa8d69cdc89aae1f060154805d4fbfd3e02206cfb8711a50c1791a31cc01c7423942ba836689f9441435dcd207bdb07726625 304502204bc1f611cf4bd9e1fd4ddfc5fde3ee55505f572385d7f203ab690c7504df76dd022100cc379cfd4a4b14ae18dd65971f5bea6808abeda4a761d3c6f7756d3a9a4571b9 304502201b6011280dd3ca2a76c7d4c2c32f7242f6c7cd529478a55141fe4d17cce17fe5022100aeddfdb063e06228f7906424a67d480c62d769e8ccd0af02bdba847e5a228404 3045022100ca7920db2c21724c67323f773cb16d7f65597ac3f0afa8c4cfbaf4978791735c02201fc8e8fe24f1ba81f981267b0ba867f339897c73a77b431cc173e54ad3cb3423 3046022100c6dbb1ae2d8f11037994057b89c84da15ad6a035b9e7acaa297d31b5fb914ab6022100c3d4d291d859ebef5431929ae4fefb8e689f1b10f50b3fe0bc792231b0acb88a 30440220661601e5ead0faad68a025346dadbb631da08c76da2e2d1750f41d3c5c778758022067cede597c34b4e3bc8360f17db3ca7a59b8eb7607245660d4b54d9f9dc31cb5 304402202f2614a87d441fdfd366ab075a9890c06d07542b70a85640e1b463ab649b698002201e1f4a2bc803784982c7f948846db7492a614412880571a873a7bfd219d8922a 3045022100fa42f3739d9c42431eaa8c2e0ea2e40f9fb2c6d4fcf15fb3401ee3abb4a00b7902200282ddce6a88e3cbc371f02683589351fdb20dc7e36fe43750b2ace07133dae0 3045022100fbe2b4d838397ec9ff662d0f6e952d7d3dcc88e63a39943d3dc1969dc91f886302205c4de4b4ac99569d7123f33aad638198feb6d588c7019a5e8856cbebf46ed146 3045022100c748c418b3da7cc2926883b60d96ad987c48caa8c5d520df1935318e0c691d88022042661a5d4a0b852656e92fd586c67288c46be79e7ab5beaf93e234d41b5bb917 304502204194d81b7e0219b4f149954adde04afa0f2620e814acf682bf8baf6d9f17425e022100dc33f8cea2baa8a9fb2678079490d4aa9a5b7b026cd82bdb6933b48d73ddd423 30440220187fc2d6a80a0d72970d33d8cfe7c8463074c059901cb6903c64ab51ed2f5c64022063b29988276c6c57867b5bf6f32914c4bbedf0da1b1cf5dca68d4d44324ed275 304502200fb2cef83115b9b2ec22b5c006aa664cd8a7e63eb8618a7830fb0e84ac8e7710022100e08183696cc75b8627912c16e5dd28d5a17d2573458aa863aa150681d044117b 3045022100a67557613a0cd6723c94f39096963172f392e35c580b34aeeb7f60929d7a520702205fd3338aae455ecd918a83961fb9643d2fa5c1ae186326b12bbe4eab96951eda 3045022100b04346570b5f6b4f806b028aa74a98193d92a18aef3b0d04a21feb95139771af022024378e3fbb0abccccfaa96e444b81c05f083ceed173e40523eeb6d2e044c7906 3045022100fbd387a4e28096fa30fd7540b8139ecfaa31d5ce6325109e8dc4c06441e1b36e022057441d674bf77ca24ec1005ee8ed379dd82eace2772046cf5bf55547c601402d 3046022100bd50cdecee0db370e5706f1c0862615bd7a9bb5fb52f54bae9f89f7806564d29022100f368a974c1679e738b3f3b4727b07c9b70b034d140fdede04baeabfa5e389cde 3045022100b5aa13a5e65b2859c7fe4a8edf98f054136627c30c97ff11265a3ed83ca68c7402206574f44aec1dbb95e03e1cbe8a9e47f9a39e5c7954deaf90efd79768693f147a 304502202078a55b758f6923572f5dc1e932453a86c95a79c5748fd216618de633f879e9022100b4f5aacdecd4eab9cd39564ab43b510148abb096a364ef13490e375103e2a4d8 3045022009b9f9fd756ea91e177bc158372e34206201b88ff29647fac7f96cc73218856a022100ba122da8aca3299abafc7d27727cbfecc7a00452695411b643d1734b6931e483 3045022100e6999f3250573f272196a7ec88d11f7a92f7303a8a9cc4107206102d287e978002202047ed4bdf3d713658ae7a72263b3209099e067e6a104eb2793c99721fec343b 3046022100bf35b76445c26c580901c262996343ef5948e22709c9d0ab9051d562dfc958f9022100a4b8134428c095b16711a64db1a18e55674345d208b913295a45e7e27c57a401 3044022042458068b14aef28ee1bc3cd913243b76db633cb959390fc49e6581b4ca5c2c2022048a85e31f85feb3a634173adcfe107b6f46a73f61c64b2324ccf4bf7565571be 3045022100dd61dd7cb88fb4bb4082bc076240ed15207cae5ad38fc049962163e7beaf8d2b0220459ecc36e03c790a88925316d86c422afaa24079b443c993db5581c341ac2814 3046022100f148ac66c2614be116b52c307da831e6acd76e9742493ca6f120f49c5934e4e10221009cb1ca468c009b5c29707dd6a2c05da7175ae44b0e568ebc4c1d2ae7106ef337 304402202a8e91e75cd7c3e7e2935a26cec69e67ba3426855f22f7d859faf132ea9062a2022018543b55c5fe311b20ff2c7452e369456f10c6c88b31bc2e06a658352d205589 3045022000ac8ec5e99ac717f0966d57d70588945721972c1bdb7bd636c8ed10d3a632c0022100881c48659a4333729f2c3380768d2ae940f22af670acc17fb634aefa297492f0 30450220610859cfbef987611d4b4decb83769d462bd7063631172abd21ed387f1cdd44f022100a19aaecdac788bcd176842d5dea2369a703f6e742723a2111b38a292dc3b92b2 3044022005f94cedb4d98a508a68c0924981c5169b9e0ef9c0ff08ab928f5e0bb5a0675702202a7db3b4c2a33258373845d7af8449a45b573bd4f529645a6c4542c749b6485a 30450221008b8f7e1c71bb4adbdc98701273288afb06ce395c630308ea0089cd28bd6bdd0202207418e782dec89d6edb81a674e18ad43ade723948f5335495c05b596ea00c2b42 3044022003cee42f92dfebda2f60716179b9c950ba4049dfe884505fcd1b6b573cb0f66202207dedcebc579e71a6990beb7a37a7a3e4e2cad480ee57b56993c7deecb0062700 3044022043b3d34afe7b1edaec698ce1c08f2aaf2c264a5d2b8bef9bded5bf98cd6c9e45022059c3e19a50f83a7084e231c22d9a2aafda6d8fe3be9d50f2a6edfa9e7a99e476 304502202b390cbdb3075e463f174f7416aefeb5588156d12b3ed3f37e4d5c273e42f9b0022100b46cea41fcba6ec583fef64cbf6977f7200aedf444a3e03988e7c42e30528184 304502204a0715b17bf12312c22b8d2ef2cd8f34c22ef31721dc33f749db55c49125de6d0221008260df11a7a324ce2d1f5b2f7aa2a1a53ac12e3f2419d859633d47591e0e45eb 304502206c9234b9ada9a6f6b7bd448d97c61a8197d9a29b51ea9710e052891be37130030221009c5df2f116e9d94e5eb7fc359df296832ddbe90d77ffddc2bbb7e074ae2bc8a0 30450220296dc80dc4ab5cc1654006a4c65073a972958188c0f95faf128ab8b96c46354b022100dc284734353a4586cbcd077f663a7c64746175042414976783d7514fedf1ab0a 30450220175894c2dfb282bde059bd5c0323901ebaa153ff3650dbd1e4c3c568903063df022100bfbb7622da208085d972daf4789d3ea2ac17b93687d6eb5b9dc03ca90796f59b 304402201fa52ec48c841e2ee04cbb76ce38d086783b1c6e3c7c1b4a820be8e5b4b28e44022031be64582be99e3d9abbdbc968c6e0a09442d5b108c338ef3487d872993edad1 3046022100a3ae7a061b312e232130094727f908e60984927b9ccd950fc50dda6e40df2b12022100e88c64f5140c96af274ab7cdccead588b55f158bd7039dcd47ba4d187d9ec4c9 304402206bb4626ac645e853e00615174c3baf65fb43a255be687aa47c8ecede84bc622f02207b66bd8747a9087dc8841700709469fbcc0039443db560dd4cf2165f7e7524eb 304402202c897fc29b4d31ed18d2ab9708faf7a66408d1c3f46b386101a7dbc82a3176a902207e7e6410d339f6d02d86ab13c2148f29301e5e977bd0c4e36c429be2e0db0ab2 3044022044063f48676b5bbc291d262ec9dd45f271daed21b7b3fd5968ecb3b585dcd1d3022050454f27f604f0bf18643704abc69c1398b5da69b347377e380a906f4758dd75 3046022100ca4c20d3079ea98cf3d944c1b1b0f58af43fdd3707c3351b59d4bf1661384397022100eef0a6af6f6b92d3baef635fc0b5e96b02083ec314a019e99db9c9f7eb344e35 3046022100ff2064b3b15cbede6955f88c3a94ccb395db145d62067c4247d9c52851d0b1bc022100bfee7784d084b69988bf5e31b8157554396a1f41b3700266908e82f7ba90b1f1 3046022100b6f9bb7cfd7ba51cd9452d03144a867486d2269a1b97f7310ff7897f5b5c68570221008c3f702da259ee56117ce7fabf8998ff1cb605a6baa76b4da68d23c31ee135e9 3046022100b5ac8c95bbbe415fb63812a773d9cedd945e1b5e8fc995802e0c97d664c7b206022100c24684e1bcdadb5a39e9c3a61134de879a205bc5aed72754c9e0b986502565d7 304402204b3d417ee7a6630b0ea33f3dc220bf9a8137e8a17160729c834c470018d2da0302206faaf821f6d3b49dfb11bb29df0694e37841a15197a230266a8d4854e8e79244 304402206e6a6412a8e46af0a26e4459b3f9deed4d500c0efafe4f0bdbc2cd9e5c540a0d0220290ac3c73419270330a2685d27ee86c9c4e5907204eb58add350e19c73789d34 30440220538ebc734c06d73f8656e86e1a0c972b8ab54f4378742e69cf06c17aa6db78e802206316ddb1c008a13d508634226d285dff5a34cc5c17b0b9528e479c2cd556df83 3046022100907fba216d0148a0b46a8c88c62c01ba94f2c9a62ef50544f3c884b83958c6e0022100979a8c51b2f8c2dccb6f4e70c194712c656b4dee8d4cde4d40c6f5f0e84894eb 30450220562469d252393e34dd633f2b82f086a27e70ed8a60cc8674737fed7c9bf0c244022100a79114e8544346284491406576b59385dc4cb21f87ec43d6a933bc16ae7bead6 3046022100bfb0c9ddfe3270d6ea79630dad492563498f90f9dc1161bbc502fb095e97df74022100e1e995b4f5608a348983b994409ecb91fdb9055ab28d95d250645ea0918c1cbc 304602210087e2203e093f3d4385f3e8a1b0444978c9f0fb3de59e4d99d4749b53c4085a14022100b76a5b5ad371134bc8e06939aff94890adedb09dd4deed6a847c08f31d7f2557 304502200c043900c7d3c534fd489fe1c7fa2b2ab7144ec5c1ee468bcd5fcd575901166d022100f81fc03713d42b15b8687b057895ec50004e1ab4d2bb5aa86c8b770fa1adfa1f 304402204aaed263b38550d1342eec36e2f6898ad95a7c10b05478b2265100023bcc2ecc022047d6a8903ffd273e4bac904d6a8053c7a6561b0f46dbf54e8de5ae15898427b3 3045022040b519b1088a1290229c018f751409663402cd5399077f1204c643e4f8f69e7c022100d238ab23a40417de8202c14910a05cc4a3a23f4fa24e2d28091a7ac188e46fa5 3045022100aa5ca14c801b160bcb92e8db3df3861c1a060cbf4fc6b5435f41b40d723a061402202204d3858e66be73b72f14683cd91bb6aca3d24c2745cfc46549152dd28658d0 30450221008f475d016876e520521545ca98e28da8cf2cf4524ef6b400d44c51414b83f6c3022058eb529c9d9b3e0964a5359050cd51fd8b9cade6d25d39900f6c91389f34c5b6 3044022024bfa495f4be0b7d89dc18c856b0d9646b6e488cf5c68d2220cf1117405af6b702200bce7ced9987c8a4d0e1fa195d0be1bc9cdc17ffe7bee3d9cf65adce80a4d980 3045022100ca65398c8e5ead3b26f07ad370c3d84186f4b7aaaf15fca2dfac1348dc8bede402203a301ae29ef49a3f96f931cd581c3c6087e003f1903d52808912b00d36c9629e 30450221009158852a09b73caf2f6a9ac17685fbd08f4fa625afd57a0d42bede95446e8f9402203193daf0bebbcf9840c809d175c109686bbfa86e82d8dfda19b69198487f03e1 304402201330927bed5d5d122213f279d5ef5dbaac6ff0731bee3fd84bb2c4ed85da931402200dfc920ddef0d67b95e4a724bd2e41e7ced82f726f33b4b75122829f5c779d36 3046022100ceb9723002c553557f78835dda7e25a11da704a8ffe4ee35367c6be19bba3108022100c5212b9baa49be9024c9e3650fbe4a866310e860682df8baca624eaa463d084a 3045022100c3efe023e67f5e252ebb69a9f1a0bfc2f45ea0e561c3e4673006befb89961e650220289724acb7b46356ddb694e8e11d9514f965b979507c77f377b053f5cfea04e7 30460221008aa824493852261053cfa66e82803ebfdecb2880d8a861a421faf5184e28e8b7022100e343334e328b33596063e045c0e374f64452c69cc91d20b95ff3d365b3634159 3046022100c1fc4ed12a4fdb670c084f29240271c25fe5a4a69ab00511c5e0173944252f4c022100a38fa017ab97eca7ddb71617d61fe23df6df12a2beada44d91bbc93de97ff14d 30450221008cc0c11c1bf3bbacb0f86d6c85af4b145e48f04933cc47aa696158a1dfcadd910220464dfc471f5e6d96069dcacc2fc8105f7e583b37ad54f637451a5434515cc4e1 3045022100b13121503bd0ef180aa2a02ba8f2fb30c3fbe8a11f12706c36930ee0ed439138022018ea5afd91521d12e565c3c5d81ed5eb40e9fa3e3a2b0eac5f61d987978f1f08 3046022100a4265a88b5a12c3cde2f9a77f7c542cca96b2b73958617e6c3fdd41e2c884d2e022100960bfea72c3bffc02432c7138158bacead9a7819265b73815d844d17e6cbbd0a 30440220141bcc1bbd1904530fab30549aef882eb90601bb3fc578e88306a611d985bf7a02201ee4d18c9cad0d529c2fd6314dde481f0a9cf2cb665ff713c840b4f64d55fe1e 3045022057f14f84193ef41ee3d165196fce40e4f47a9d90ac932e8d5695252bbff786f1022100b5b4fabe2ec0200632f690f3f0cefbb87e4cc3a424028f382713eb3d39f09dfe 3045022100b5225aa73bf68dee16432a996c5e741ac3bc608e3cb13d064728d33e1c80982302206fc15ccbcdd51fb76c540a4d7660933a2600fa20f8203e87ec7d4e90e9c4baf5 30450221008485b0804499ec7ea747683ab0aceca10184dd5917e6c0e7a32fbd68b06ba7bb02206470609c87252f110cbf21ad5002c588cdcef1d5b4bdd101b28e49a59c9e7ad2 3046022100f2b172d9bd522efe98ced4453d3b53c567a3dfe3cf965a9b3842cec12143d59e022100f1f78278462e74a6de7f1b4ce42002e547dfd789e474e7038baa7e4404a08ec0 30450221009c2ef68249ed28a6730696cfddcaa3d253ad19296624b97631cabd5c315268ec02206f88b5ca0be860e48614b317270ffebe3d33def773d0165ae39a49d20d193e67 30450220637060614f7f18055c1a36335d6f162be946724df5d08a9b21d9cd0447e55f02022100d176bb838a615335a68c0884c110f10f017425b42bd8a8540a7e08e73d7ba847 3046022100ef8c2d8c9cbd4b3e0af44292e3c449272c5cfa12efb5c85891fd6ba4d2949fae022100cd8ad96323de6dc3239ec6ea0adac00509739aabfc5537c59677c95ca3191ca0 3045022100fac4a8b8f6515b03344287c02d501050b6823636827bb3f3cd49cbfa9da1487a022015922b1d981016f82c2c72c374b450a9f5003d0384d7fd64e31d4791a17ab266 3045022100dcd005957651ff585e03bbcd73560b2a6302d970f3a49caaa994f144895fbaa502202737dd654a58530ea346ebd14ca91b5828232491497e0881499b87b241a1ffcd 3045022100da0872c1e4bd2dc774bc4d44546430a860aaf9464a9a7bc0281eacc1c9e03c34022002bb8583a4f75e337eee5a932e8658d6e24e0fa1a7a3ee6f3ba95422caec7d05 3044022067bb170ec1c17ebf853c6a42825dfe996db41b472317d892d4c896950cbe32b702203376e1b5311579d15ac0bd2e73aa086ccb8d7478cb38266b11cd6007ae750474 3044022011ddfa13074dc44708412d2e5f9eae49cfbcc82061ff846bd3c7cc8abe6faadb0220235c35d6090b4762fb926331fd13da93d343039ded352b5009eccf6dbe04e81b 304502206f0d9fbe0b097efed8592c441cf486205abf38c50179d4589146ef8eed47a5b0022100da05f8aa2a866087aaf1caa940dcd866b17e5c67b55b43581853a1224f4d63af 3046022100fac09670b3d29c31d0a7aebe4571f1f1ce0e2bdf22ed0e557ab5e8eb9d0cdbc1022100d8c59c19823e6cf1196d60d2bad101ee52da75c223a09fbb80455f9c3a2d2901 3044022022a0ee5d7f05d48d3c9f7d6d58be739ee1744a36b5d50364ff9f82f4ed82d97f02200245912e29c2ce5d99d21b324624ec8a0974d2875885083d5481db892b6fd2f0 304402202bba30363fe395091c3c1d3178d938ad47190b9515c6c1979ccf1e92996f8127022022d8d48f624b67a3635c383a2649b822e6c13f87bab975fbbe7f3292d89b4f89 3046022100bd8e20e5966d23940863c6e1dcaece3eb557b46da7f70bfa8540faadf9db8008022100c2981ec9714a394fd4872dff48250412cb27b24a87aff0a7f742b5ef747641d8 30440220401c48fb557a045d487983c0027f76be0cc26434b06d5ce19d02133f356ce46f022044370eecdb8fd37a6716f3e552d9d3f7bb2915e7d4233180c2f0c748315aa5da 304402204690bf537c650e2ae79f2e6a5d6134b711b363b366a478dd9f2643652408330b022023b1522e39a5a8febfbff4e72e46f71f5265faead4a60f3d6a2fd3b54b7ee405 3046022100833863ee7a4f6104d687507b62f613c634776b5b1bf72b5ce320d6c028e6c51e022100beaf71281e9f73df67cc0724d73fe7fa07d08dec182cfa050177b0816c3dacb5 304502210087051265e68d02c7f0f1473e10b6fec2623b8cb878fda67ae571a23d17eddcd902205e8a07bfbeae5076a8d51a3d840ade23342b60b42902d6c5cf22585a163fbf95 304502206929f4958938452031fa8c3d4d62be630b55fc49f686396e2dd3c482c9fbbdd9022100b301a0779a84dd9a35babc37c088cdf9e85fac715194b992c9bd978753a0a50f 304502204a6180f2370cc93a74704a9f8fc4fcb9f91e3337490f92d2ca4dba45e9cd81000221008d0574d2dbfe673fddb62029402c9b135fe32773653de6e04ac08d4c2766d5a9 3046022100d2730504de86b3576eb76dba37a08d9e20a48710debbbb7db3019f8f04fb05c5022100fd38adfa1fd6469d101eb7598dd29fcf517b9f5c25a8f52314c1879c943eb0c0 3046022100c155e2ed1b461e6590786d304371b6a906a8b57922d43e62d6bfffd4673b99ab022100ea708bcd0e9258a45d7c13e76b6e81494b5f8e8408fd378a4cb1316eac768349 3046022100dd413098bc45d02195a366f2cb15fe65527f6a8176b4a634cfac1a10489676e8022100858011c884c048bf4a440fc8fdb790c87ef941c451bab65321d28a70858ed91b 304602210084a8d6d3e63e9ecd9e624fc51c7436b6ead90b7b37463f552cb3c68ceff9b8ff022100f474b205d3298b9a8ef603a1b4ff5aa2328830bcb05d1ae729602041d08083a4 3045022100976d5016749943b5373f4cef239a4cdee119a04b4b4d07f7dac5554bcbd5752002201ac306c0e8dcfcd414a2ee2aff3a68a9da475e2db5dd83caf87a096b96da9308 304502206764785c34fc25df98fec68c32329951ac8af3f5613366a97cf842fd065e74a0022100b74b1a9bc13b113bc80b9c0920775851991882a38530bdbdaa6c25a495220cbc 3045022100acb20313b45539a5f968fa6d381624d9948315acf0f0629eacde3b51c79ee8ef022076010b0bb7bce6b840f326e4900a2601a8af553865ce43ad74599e8f5da7f674 3045022100887681fe1ccb69605fed4a6edc71e5e7e4cb9823500ab025bfe3cd219632f13002202084424a01011ee4ab28bcca621ea68b5042bc3014f4463d88192a7c1c9ee3db 3046022100a8d649e4663c3db03ab58d0960c630d413100ae8346852e156c9051235b4b03e022100bf4c8363d46517f685d4b5658d347c0f56c86b4a5f388b5391c0f692c2939ad0 3046022100b29daf5d60ea397c7878ae9257e5e8ebb57c76d0c1da71b98a7d11275faffae0022100e4c51add12f555adc0a57cb2226bc73010f6dc8ffb4ea1369ae15f05a7a14867 304502204394db89151e4c8f05a436db79c6e761b2e78de3a9e4634e02015746cbe252a5022100e6f97b3dadf915db28313a8db432ebbd604865610c922fa499d5235fb32694bb 3044022078065d197d9987ea36d31d838a5ea4f9aaec9bc0d3970868fc513e5cb61967140220176b0ba95001fe742758918f1d4d590d5422f487d94e43f4c65533a88b9ab18a 3045022074b3e01d27aaa7876b3a27f8f46bc033340e001507fbcc3f697bec4e81aa4aa5022100fc6f87943afa3249c9d5f3512ac5864c4e3cdf6c88f1ca21f8816942fb9bb25e 3046022100ed5797cf61fb10e594fb73101b8e1c77a9f8b69ea6861eeb99117ceab1aef3d50221008242253ec0bbb6966d0a5dae79ba4ae753a14f2d05f34704379fc08433f18c3f 30450221008ff2386eaea05ab3b071e4b70b43ee22fbe8283f8ea36e251ffb366f74a31649022003a6038efb824bf382f8215c6a9b420f2e716af9da2ce11271bd41cc2e8fcef8 3046022100cbcf6f12e53b2404087326bdf9ca44d7b8c9ef942d4b586bf7b7d9b76584d0b3022100b38422fb29255be38f4af682f9448a10ca1aa11027716e5e7c29f4c3f29f1e66 3045022018c5731a3cbabba38bb1f3dfc4288ff267a0f890e272673a76315e07fd718bc6022100864abb9f8580d282a612b7bafee9da6b7497e6cc5e8425ccb37c5dd26f78e642 304402200d4a0ec821adac53affdd8873f639b41eabaaa43a1d0052c5d5ed47d9cb906f002207fe8d88794b6986be0f066e4dd152f9945b6314b6e78f2f5e8af1737b8935b06 304502210095f892e388f99a3290f59b8d757f51368e5cf9577f28ece70a817d68e49d2f890220339ad3814616e7ce3711eff0f7a401b9fdd31578511d384759c5a8116b657f20 304402201be01fd92acc38ed6683bcf780372f00f42530d5d94dd953529e991b4b2c57f502204319ef979cb291d29b9c51d4bdb34e9d4293f971a7ba53bb4ac38b9a50b9c27f 3045022100ebc820b156492b386ef6d7756dd8f49f0a6e0f7565eb9c34ad3612ebee980f23022057b783269f807fdc7dcad22f5aa00a3590e7339b779aaff42bebef6cb1bd61aa 3046022100b35df07096725151eb04c1bb7ae3a2cafb84bdd6a468b287fa68df954220555102210093f660617598bb6670d6b340f318d0ec5581683bbb34bce1b852ac2f6d8c3495 3045022073fc167f1c7899f64d82d77494e9ed57c51295de6b88cc437375267cd2865ea6022100b7a4887a52af17a7439bc694f1f0937c8aee91ab6f4ffceea0378470580a3e8e 3045022025aa2f47f9b3d1de085c2c13b95e286a2462ca1c18fc1cafc6c296336994e868022100c84d72bbdf3e7b9306f9fe9db60bc97ab7f0b581964c67bd0500705563341bc7 30450220155f425ad75cd92c9fd46bf9153442e9747b2b732809b62fff85d4009b2193e5022100fab3c4148ea4cc1916f61e5750035ef9643ab997019d7f02694141b4d2024ba0 3045022100c6dc7109cc7431ac6201299668772dca70bca41a74140069e7d513418a4e08dd02202f5b25a635c8c4b425f0a160c32ed0bfa557678c29374cf881328b11ed130c09 304402201909fb7fcbb8a7e2adb6d7265d6e77115e78ddc203a354e69b56af3a365295e902207b8b5ad3e2ce9a6a610fa185551158984114dcd2fc763075e0a0dc4d2d0e2e43 3045022100e242b56f78a0d4c586ce96ee1f8fa1413360fcee5c371e3e5bdb6dd27b87d68b022077a6f3cb49e0e3de951c513cfcd42a9e76a65e139438c8159058d0b59cfb7077 30450220070931785bb485d053bce169613f99c59a8b21df09425921d80b94c6d49ff42f022100cadabfed85f8f8be836d8515f27d272a1603cefd73d5828753a149f3867c7d2d 304502200d778e244afeef3ffa3b80b08da42ad19ad94aa134727c7c61a1a750fb09ee83022100d7a87bbdf8d5ae774e556e9b9ffccba34a298a479c2dc4ec1a4c98bfb8b81343 3044022045615d08ec4b189b02b14a9fe8c8b5b5e8fd6a76e6217696afa1b46452552cc4022020f1b71ec6f0c77c0ad5cb97137a5bc53867d53804f1a7e6d7dab17c71d186e9 3046022100a5484a15978ec0bd2db2fcb732a4f36753216c8e0eeba730565ab15eb71572c2022100a70ec9f35b4898fd69590b075f1ae226c46992d38b8353255cef9148c5ea6d19 3046022100a2c81089945c038c61b4805227607cbeffcfe7347c93dfedad1630c29486a84902210095b5ad1a1225c9c11359545623cf146431bb43446e031652208d0082028adc84 3045022100c7ed0141c335a6b5bc15860bf05f677d528b6a29ca27d19e2eef0d2d68b327300220441ab7f9f6ae8455ddc49c87e4d90a36b6ee7e84375694bcf573e92f9acf2d11 304402202fbac0403b68d49fc44dea6fcc36f50638d6659025291c9ff553d4fec1d4deea02200ef650cf98faee5e3f981fed8bd7119653f0a3c26f062d78769da74dd5819c97 304502206aeb90a49a197514a78f526e716a0d3119e434d3fb2e68041c77f5042b3a02f2022100c374a5d92275058ec6fcab67055afecf482c551e0bcfe1faa1ee3a4901587f5c 304502202349f86af2864eef389427b49746c2636e5ba1d8a35b233d09ba5c8d5421a540022100e55a4b26b0c0a1fd02e46ba5659c65a3166f9c5847c6c89bac54ee57eb35a82f 3045022100a34b51bab7a7da6cea9cb32cd9cda54e9da946151a422dbf1a2b438c4bff1291022011cf0a144d5eddf37707b44fa1f915e1600c804540307ae498913bb35086bd10 304402205f589481f2df4b6b822225dfae56debbdac833cf8e32cad21650957b619ff74d02201b28994d6b9e210840f2195407e59ea87c35cb3db4841649dd1936710f1bb5c6 3046022100c9e150e05990487985d82cd849a5517490d5e152e1d49fc2ee5d24ee856e38ab02210098b225f64f5002bd700f77beb95a05da5c054830ed4356acea3795480b26dbce 304502200641b525e5c6f625a910d3a3621d7abf8a0cb6f347de3d4c0c1afc691fe6d8600221009f2205c8f9ce18e1959d9d1832fff401b7c398d41ef344f698bd0ac85ffa90f4 30450220614d62f3f6a024fde3f1a9815311072b473cd48c07ef300b7d0fb23f42a30eb2022100b660eea76f4de7a4ab3e0a0802674a947a03209c1359db5ec1d1ae454387f1dd 3046022100867c04416de4aaf425ceda99e02163801853dc9c75f19f73190f3e4874899800022100d0246f2626fa382570e10bdad795589c72f6a4ade20ec6627ebf20a9d30e8f50 304502205c2a2f9c1ed668f89c23540e63efdb8528573754417db437bce19fc2d69e9da1022100f6e9685ec199d0d758977342378eb2e09943c7d732b5273b217a408c488cd9c0 304402200aefa247cf3eb44df8adf945e635009d58068ea1dabc1d6eaf8feaa76ac27f15022027038ccc485fd8364ff3fcc69692c14ff4e896afee204ddea85888a6732375d7 3045022100b8902feeed311756158ee6ff43984820b25aab6fb49b7d9fac33113e2c04fb36022079e2317581b16834b89d6d52ac713ed27069a69e6b37ab5a2b0eb9dabd5e6c0f 3046022100f3cb9db0d46ba3c24ece5f8f4e31fca34ce35c16275da69a594c8ef868e9e4ac022100e85d82ab546fd771e1ca235e14cfb7944d3df7d723e137061eb9195aa9c70cc7 3045022050157f92d37f63314e764ee1c931b7b65f4778aedd9c54283299fc16b6a18323022100c02032f1ac2f9960f745b28e57d12c7fc8fab84db1cb17681afb82c0ab3c810c 30450221008a15663d09950e299654a2081ccc7cf30f13c8b0a355623bd5a5b7adda4a79e002206e3a691f7a6a1017bc1a6044047b3a866dab38cfb24129d276fb12703efdf6f8 30440220769a741f92d483f4be15a18638d17349e389300e3f4cf8a8d5d9bceccd356e7c02203b14528aaadda654c51a5700cf80b8772a7dd87a3767e3e9f50bd6b379e572e6 304602210089117d20249ad1db5b530718f3277314428d0391339fa2e836cf86d73cf7d6ba022100f19d820923ec87615a4e8f2b9943bf583ccde5a145d4b08bcd6ce2b32f2e3544 3045022020be625cb367c23354205114c860a7a31a17afac359c2385ac06aad89e1524c7022100e8797c11fc6a9a71e73db117ac6b7ade402ce6c7cd7bace298e50696e88d8a0f 304502210096f88fb63a6b2309f49697542e404777431202062b55dd5a75b02f3b6d2153fd0220159c1449b22b725f0d7b2598bbb44e3b4cb5b2889d0eb0faba22ac42795077a6 304402205d206344df2a5468d715635f9dec22683f7bb5aec25b7013ded772ad29d1d6940220676be187f65377a707197e153c0cd44c2d889d8818b930ce14c4b9e270664029 3045022100e2cfecf378c3a29827abbe0ab0d39957fb9d79b9ae711fcfb1bee3e27d99f6aa022045ff57a96c500fbce5150e75589c3caaee5f9f7488e543a44157a2be97b24743 3046022100ef37791e1e1d6c33f9b16de394037c7fb5f201cb15187422e4cedfb5e1c7f93102210096a149720c0f7ff1a5aa4c7edf883c152f28f6dbd9b923f6f43f110c0edd46e3 3046022100c563405ce0f52ac5608f87dfccbc9207e4f727c93bfa09cb95549438723cabc7022100e597b34835162ea08c3f99e634b33a35a4e48b7e29c2f5a65e61571143d8b11e 304502210096dadac659ce6abc688f70b101f2c610ab51922f627735e51c3e109f351d7ef102206b40a2902040d7ce5633dafba2376122d38e34269effc8ba8473aa2d497858ab 3044022058415efa558131deec36bc8c1d6c4ddf131e9bd3f351c31b5bbe7a0e7329c0a502203ea035f663491c4883639a6c3562d39bd23e1f7e89e10217dad2d34fdb36d679 30450220194095a72bc2f247bcb4210f008e066aa192a8dfaea4f1b9f18e08eb73f79fd80221009613e4defc4d15980478d5537e46f91f08b96b0f3e221bdf3508876e5cb35f58 304502210087e9150c38dc467a63449d3781a8bd87e75996176335ffc8a401f06a9529ab140220013d0250c911b0925fcae83830ebe646416a815145f2698a5c72ac6687eb7a0b 304402205e8543c43843fd662693fe634276fdc74991c0b99a115cdf1b2af10def56dbe802202157fd7ef0cf3ef6b1f61f5440730b0bf7a2d7d3f914bf68a2206f77eeeae837 304502207dd93c7fe4398120a8c580f354f0d7c6ac528338ab1ab8b35cfc0fcb68e6b11c022100ae5d2f2b9861c2f1928275373c817e5735258c3109d5fb6552a197f959feec0b 3045022055438839f45599e02b8abd33caa81aacd45b7400857a1fdd97dc2f57d7bd7c5c02210081809f9430220f199135d7b4fa8d83aace1830294552013319bfabc92ff42b96 304402200abcacce2cd26573167d952af41c5289c2c3b13f0cccd0f891348de33bea351b02201cbe0f53d46a0b832b7a6335b54727b37d9e291809c62c87081145a725764d21 3046022100bf6ce00af2da6732d85b1c57d15b19ad7d53fe277be946c37c805dcfbb6d1e2e02210090c8842a21e53ee540d7ae74bef1e9290d463f4d069db6702da6f8bbc8144431 3045022067cdc3db770ee2d3bc464975f42e83fa6e30e4f0e5f319374e4964d12f15b3f5022100e52e2813d41b5c9c8b9eb1e627c9a019f4a6ae9488002d2d303cea5839347589 3046022100ce12e28d924018229d0ce504b6b5513652cc94e92192a6f0c755e03141578ffb0221009e15d2ef1cce9d29527cedfe9a36a25029ebeb866d88ffc887673ca0a433b31e 304402202f78ed425d476fd92552dcc60215ed11bee9ee3a91091d67b233f777ca308b5702207a21ea9fbc8516581d1bd99b26b21601040d34b42d484726213203f3823975bf 3045022100c82b59fd38de1cb9d52895b399b9642225527c9ae26f0d470a17d1c27f7e69800220135ae22de56e8c35ab5f3b963eeb64cf71766d9d1fea4e2292cec2ca6c1f0404 3044022003aaffccea791248d93b31229ee42085ef2fe75245e7fe08b34ccad0bfa4db6402201ea0a87ad13b895235408daefad67c4bc1985aabef6f3efb4cc46f56ab1bf6a7 30440220343f936de489680e3848f2454918898cfb6be96712bee1c3828286c86b85de6702201cfe3e091e5da2cf5e092370fc2b7e0a7df8578b4026dfb38ca0ade3271e57a2 30450220487f627bcedc6424ca10dfc530ac5069bbd2fe14218771a4da7c874a8683b3c5022100b3cfeadf6e32c6bec89ae613d7a52887b422e1dae0de5e071d4d8c0d225a0b1d 304602210088f3dfd1f60251eb08126a8b0d5b13eb79a2ffc5cae162e4d0d1dffa0d5d1d70022100f9f709fe699ca1062fdf342f63b259a72ca47eee73025fad697392bd81150dfc 30450221009d48365077b5c106266dd666147a676d5ca37d74b658e72ea3c1f6a262a047b602204929ba0d41cb435434a52271e2a02e26249742ea978adcb789c24470cd443a1c 3046022100fbe6aef85ff47317360ea9c83de48c41b80079d6ca27816278d039ff69ed737902210086b62d6ce73478f660d051fff402fbb786534cf7362f751408d5921411f4b4ba 3046022100cf6f71ac65af57e96c4df3745619d858b9381c6646fcad2497b7a01880f53e3a022100fb55dc76db84c4bb1bb9c2b44116650bde254d1c15931ca0ff086b0a1b4cbe9f 3044022036e2a569325ca2bbfc0f91b1953b11bef78f3f191238d6b9ac9941126df1bb1102205310bffd9c16b3e1b6ff9443c6745c8b3d270629fc85041103d72331683ccfe6 30450221009b2af3b128d2cf5d5cdcef481a61712a4e1ae021ec55d6e54f2be0c10b1cbc77022069386e6f86bbe071cdbe500d9e389d8411cdea6044b298a5c73504cbaa3fb8a7 3045022100c4f655e393319cd4421522548f510591bf5290227fe60fdb7000d88973ff2f20022026d0e31e8faaecfb7c72293cc15caa9d420f0a756e14b875d0ccf9988309d3cd 3045022100db2696cfe32979f5f2c17c545104da3fc279c1c0bcd2445e9f8f3b6b30bdac2502203683c6bbf45845f25abee7df7eb14ba25354ddebdbe77cc5d839c48360acd55b 3046022100819a3c80a75dda21fb23417fa0f654116518a5713de09f741bf776a5e8a7acd10221009ddf3cf236c260a99decf40aca1ed347d33cd3af9f83a102a8d6777df3a4c8e6 3046022100bd11fc919a080142b8ed6499e56add02e831abc29e67fcdbb578b9a7d331b1cb0221009aa2fffdced8f16fe48719aa792fe3a477e6d22f9c3bb97e0b2dae54d983c76f 30450220017cd1a5206ff18b5bafc11409974571e2b2437e80c94cd0611275abd7d51d4d022100dec7eb7534a3d082a8392c20a5b90054fbcc39b10b84f9bffe4625b35591fbe7 304402202fdf3ed8a339e29be49b820ae1ad1760465f410b0f24d3cccf8d9933cb3497350220554b137ebbebf4360223a57ac9fcb49053c141c786e9b4b25690934f5666eab9 30440220122b48fd5c98ebc81eb00cb4aca83a159b5e9ddfe3221020b07b1be4f6239506022012d34f5973e0cc06dc976344f013c92b0f933a5b5ba95549bdfeb7927d755e0a 3044022004e4cb48bb291bf3099ef94861a9840e137d884b0f9b917633ebebdb200fdd5a02201f0e8fd17c11529b08cae109e1561e1d33d02863bb7e9ea451e6039406e9888a 3045022059995f71f98d43fe6cf411c3196265538a6a88b837049964c4d1a6896e6b76ad022100b44e10772272a2f670bf8c739373bf93b121ec3b4e5fcc142dbd12ad2656de64 304502206ee9040eae4e79cd9f142735b3f036d58b4d7982e283321ed39d7de40cc85a73022100aa06ec0692eb8557772a091b6bd029b5539fae84f38c31365be44b001fba8d1a 3045022100963cfbf33ea8685783eb4b411589f14d8035d530366c0321d660d1000b794c3e02201bed44b450d0041d32f3ff55a16205294d9cd2ef01c1af259bcb48b9c9fbe8f6 304502207c4702c71e797b334fb2f3c85c8e61ffdf6ebfc121a6e271be7b10cb107d3129022100ece046f4dbb373363fe72c98b3cce6b832c740b672d5ab1f381ee06b4ef8422b 3045022100f2b7939cffcb2c25b5b012a96e693031f8a6c1ef5d9cc2a5b86e139a89d7ec1302205d8d7ac2c0da689eb0fada94c8e5e75d3de8f8b4239c3c31d42c5a2ff332a0a7 3045022100d86bfa900adf6b2fab82fcfd0337475c8aa60071699e571275c89d29e517618202200410160e8933ed111c8f1c0b78a1e902de4138d3e67f3a917fa2f82be797e339 3045022100d3e120be38ca5a1610adc9702827916023521d554564f5f001e61103b9e00cd8022007430d2b42aa96ad6b626ea1695ced5522091f4d99d3cbc343e0bffe99f5f4c1 3044022054017084986a8d7fdd026e5c9808b0151902fe7b6b65ae26be334ff278cb2d890220497f4ce2f9ed1ee1d6c9a4d3c9762da50a52efd7250e9b6d3fb110dcd2525188 304402203909624417e5624985ccd688ba3be186ee1743b87c0080a9c60daa49ba5da6cb02202964ac0614e0cf0babd0095e5d7f8be0b40c7ebbcc467d93074a27735bc0ce8d 30440220561bfbc75005778e576191297ecc774ed6c3e5e5af4e6c474170f19a6736409d0220786cdd60f747c91c9fa03c9af317e10eed6285be1a2a14b5d9c85b9aab097df6 3046022100a1c22adc5a19fef820162d3bbcec976db2412da50449b646154443d0ef02581102210080901513d14eb5f8393ebe5d97a052c701a764b8344f2a0b28aa82fb5d4f9538 30450221009a86ee7e0beeb948d29c32de9f26a2d16540f92014c5838d29931332ec06d878022071f1a91499eba8997a4bd7030c13e7b968c41fb722f22fef2cf1c26959402b78 3045022100c2201fd4416072c73b3c38db79a8af83305e2618ef6137738b6f4798ecaaf96402200af6dbb43c56bb27bca47c0f260912892b7b8ee0099c1b23515130fd31e0ec74 30460221008a5c8a2ab0846a14db90cc0651b62f19b4e18bd74044592a897e043487afff85022100902bcd15f69cf84bff4e8ea699d17aff6377fb326f0686ebd5557f8d28b3b6b4 3044022054fb5b842624beb40081045e6aa5ceac2882dfce22db6223a296f049e9b12aa60220181e7cb2af4108f71f17c671134c2ec15548dbd8ce549750e27ada9fe6cb6011 304502203a717c040e783f70979daa8870f48e01e879ece9d79609e74b1e550bf0c0aeea02210093b9527bfb95804734c38ee1b0e45486ab63aba5ccfde4218c12fa3eaf5f81e3 30450220043d40a1fe79849e7cb4c99f1a91c50345bb77b673391d26c73ea0d19bb37e49022100bbfd295846bcd86a4904c5cb8200755ef952c57fdee0358bf823e747ffb83242 304402201f7157e4c6904eaa93b36e39af5b9e589a5b5fa47d0ea70d8caa140f5e22f52302200f4b34a566255fcd64732e92c898d222e88ed22ae270c366c903c3d293102d92 30450220272b63e29233240966040ccb84727cd57dbe25cd62caaec6f83065ecd55e371d022100f605330e80f93ed096691e3f4aabdeeb33fad8b0aca67309c1fc60bf6482e64a 30450221008b4f8fc88810d0b01def29aaa5494e910916ba0e7ab9af50eb87fae101400b950220397e0df8a8a375936384ab277552296620ef25c7032894bfe9fabf93636e054c 30440220249a48c6b57ba06613ba72990050eeea355744143fd3bab6fdb6aea08ce22d3d022007860ae07faeb5084d415491be95bbb0f5cb5fd68c85f08cd07e37824960b42b 3046022100f14ed228d4337c9b1a1ca4acfa963b2c8a63c459a4b7ca6d0dd911634d0eb343022100cd194b1107cb3f79f6d51536985d4992bf452760d77bdf06334500e637c19cae 3045022100be96926c24ba912c855cf7835fe7cb21771462ddc884e238448622005947bbf8022027af167e3d6f3bf00c8e6ef10660b42ffcb4c26a3140ce51971a58bed53c1114 304502202da74346b817f84340e8b9656e1c205b7825d8bca0f54faaad9984a1434f41ac022100844548a22549e1a91ce2ac9ac5162a4f83c91b8f23c450359024ccb18e5b03f6 30440220590147fa638d02b95acf6c9fd5a51918ba11d054d7e1a33c32182871106c7ef0022010fa03c77992183f6bd6475ed5d682207a20c1166e6061cc1dd7d7121d01a629 304402202fc046669deef101bd94391fc9a198331a5459fafb0b96a54bed31b6c99952320220083a5f4d8788ffaa6d253bade2368218f791563eefefb084364a41736050b3e8 304502203b32614fce1ba51f3ff87edfe01d9b2e9a741c98115650132d39b6e40614630e022100a592931c639d2a3c0ff21ac452319b962a3e4f3953269c74fe03bfd2b9bae659 3044022040af5e0a48e6a54e93fdd6ac08f041a6b363da06375ddbd31b6842a7ace598030220605c5768d5983a091fbc7509e98761c1283abbc54f1cb23dd4abeeda43bb492c 3045022100975224993cfe199c75b91564af69844bf2ee92e8042e887d3c3da7187bf8528d022031f46358f237d17a169d3be513e6647aa8b86bef34dd5cf35306bb71e4c837d7 304502201ab5ad7c3e551251b0fdf067895bec7bcce89458be957af671578b0f87a0f0d4022100c49b65b2fc1676ef109eb55932e50a0439c519ecd6b35fc70af3b0c6cb31e1d9 304402206d60ae50071d9ccaa074db69c7f0b93d209688f520f2495af2eaf22107ca8886022023daf864c38f73d522e5fafb6f4b7d4289e2379ef1e590d2b43396ca117f9f87 30450221008489a033f2e5fd7fae94a0e482833e2819aa16effeda1c6609801254f9d6b9df02203731ea23022777ef91c9a06c42d46edb8228e4cb2fd2f90b997a89648def2dd5 3045022100ece8df5c23e0408944db3bfac55c47021db0c919dc06d5d9f85615b4b7a1d51302203b040a2085f28993c155b3ce281ff78842b6d055cac88f947d80bf5b493628ea 3044022072cfc6d86c150147424fc68b1d245bec4618dfb8b185e586e3f1cef343b77d2b02206c289ff9db9aefbe77307052c9735772f6a40e26f636e8ec5a21bf6166262994 3045022100e8ee3d6bb99017ba3ae326d0c58593fe90f9d0f8051d736da13e14c514d12cc0022010e82d9f11d25d80c5dccbe71480eb0466dc695adabc86c855c1c9c91e03af19 304402200df79befecb052216556fba8f33daa6386ff556edb246dc5ebecb3026d06f572022051390f12ae6fb845b48e958f606e65f541fff9c0aff7aa04e705a776c733d913 30460221009a76a81f38813014042db3b483420f0bd6e0cc66d80ab9fe58ea672054f8e4be022100f3d63864c160bea61cccedd271a077b6d6bf6db78c803d892a850ab921b99ec4 304402204892eeca4e3436a4053846fb0836a59f6509f24cba2e42563bbc558257b6abe102205a5798def9508d5d8f4cd22f532de8901629a4d179eaccbaffd7e513b9a02350 304502200267bdca8ccca7931352e52bbc4cecd64cc59eaf52636aae18dbfe8811184db4022100d949e4b92682e36c41d448fdaddd3ff60c9415683c42afe4d51af03908945890 3046022100cd4fe311b7ebedd321c884a327ca1f5f0155c723b8952f874cbca482d5fac4560221008a4507b13a20e4d5a23f508fb6484e455a55745c088cd2c4e904d11a1b00e7e5 3044022024fe83124c31a89ad1102b72b011e1ecbc887f22af609f45b0e1cf408cf4c97a022038d28d6213cb9ad48627d2353e23973013858c115afedcaef76d2e5a069ea460 3046022100e78b757a40b7190a02abe0c86ac7201bb974931b7f21c1bab917092c99cf6d78022100e6b40776ba187cc1c3e29e59aa74746c7d358536cbf20dadf64ba356dd1f9eac 304402203b1f7b044287a33ddb36f477286776f9b9e2f38263bde312f1bb233ba61b707a022031b03c89feb81bece27874a2780feb6c206bcb3bfe572b93f930e0afd0b50e2c 304402206632b3ce4f3ce6e1c89d245104b0a2165185c1d6cf6ff88e9696c80c1daf2247022031fd25420ca8dd2985e8b0afcba65cc0229d46e44bd165d8cc47f3e5299eb0ee 304502203ef12d92782bc3e3a391b669964834888717b4d4c9d86454ce56d7af3b24ed2c022100e95935e9afc6d6c9b4ae25afdd916a300b4cfd4d3fa3bbdcfea32e8971544c63 3045022015185a5bcaa42e51c31b8b4ec5e2246807845fa05459e6462d9e7cf5db7d7e8b0221009e6106e99e7e008590a83d1f039f4bdf4e939ecd61a5cae0a5fd93f088603bfe 3046022100e86c0cd51b472dde7ec315cf602070ef94bdc56faca78dced15fe2b9115773cc022100fb5899881e74176d22ad8992619ded9cd362a6dc357aa42b64f01e63bfd1462f 3046022100e3c5595f6b8280f39f816c6f75b1b47642abe3a378b7f596122cea560680b2a4022100b31a4c197d591bfbebb70e39b75f3632e434a0b744db8297f70d47813016cebb 304502202176aef59ec1fd0cec631cba18614373ebe98d944217803e9f399f8509aa3938022100f11e0b6bf14237c606765b5032cc79c6164a6630b5a0893e6c495b7e56e30ede 304602210094df26615a63771bb08dd17ac33775c4a11b81182c01a42d4ddafbff75b9d11902210089fb5e9d6449624dabf8a694e2109396170ea1155a48f4a015ef6a42122b2a33 3046022100cc86a6cd7c69477a4d5c00e6356e1a30e3fcfda289d75f50c2c0995bf3d4eff802210096fb3598f14115426c2ba547a82a503ba5e889c9951909707391cb83d94368d4 3046022100a71ff6cf9a7840dd03744b04b267e22a188181b28a639486c458bc6ed6108106022100d1905c474d0c5ec05e47da38abfe45d9edc56bde22e1367d376405a831862d25 30440220470c59e61a7da6fca69095e86313bedf8cdb421d9c9e936df5524223e979137f02204dff1f72fdceca2c1b78bba34648af390b03413f0f4695d0616cedc6b8f14cbf 3046022100bc727941610a64b5e6fedc13f3c98d06243d9c8e1e81fb2d888ba44b3b9b99220221009f0c4f29c610fe1c29906cfe758c2a9bc7125a821ac461cf3f53b4c5ccbd8e02 3045022100aff939926c1385018bd037ec70fa9b9f77afc6017b7362e433e996defd7c1f2e0220546473349d89c93196248d8e4b63813b9dbd071da8d398781e4e7b948cd7b566 3045022100f6cb533aff45ecc20f21130566f6ffcfb89d325ef845d39fe5fe584afc0f6556022048a529ce7a8351c05d2a384bd9bd364010f32d3cec73a15d3b7f89150edf2a01 30440220579efa1318506289ce1e325f6199777c12f80650d289c77834af0d553971034202202688ef3ba5380c131493e435934e6244ab01adbea78c23263da310961816198c 304502202060857f67ac009040cf0fd9d4a02e212951ed27438c5ceb896bd580f42c0f76022100a804cda7f691454208bc0976d5cf1aed957e6af95f84aaffb62a08ad8aa66376 3046022100e10ad1ffd3712add805a044b1bcacc26046baa2e345c6e252de1b408ad71d79d022100cf9c507ceabe6f0a455b01daa6cdb35e3db96f4f9f3e8b164ddfa5a446735b43 3046022100b3a0b1773f360f125a0b83ce39542bfcb70f005f4b0c33f459e252dcaa1555e8022100f9686c3f424a0ed3f631e77fb48a45eafe92729b46e273c0943e548b83323ac3 3045022007b3463218b4567f37caabadea4c7a7c7925babb9afba46c31fbe1a6ca610040022100a74bfd6c86a78007ed9920db24f079c23da14749eafc075037c9674f1d2be373 3045022100c2b09c89e15b547a6e449de74e0cfb35515e83f623cd3b03634428f66c1d0081022021f8d09ae524cf2581263732b4000201288c13ca2e64cd753adae45d4672bf8c 30450220272d584d74b8356ad3a6e2bd15e8daf6a8a8a23f8d2261ef223ae520a98341fa0221008fb56d223104ba2e0fe6c06c7257e506638eb3a6161d61fbf41cb9356bca1712 304402201b0755daff44a9d77d899db16e3569e0961c0bf8101c4d0494d29a4ba676f2f30220428dcf2af0ea67700723901044b66f97fe1b4dc3b7b773f8c4d3a17e20223ed2 3045022100994090c3666a3df3906f8d9b89aa9bb8b4ace635ca54d01d94b5dc57b4b058a7022017e5ac7428c1dabab847d30d4ab92b5b1004e26de4f41c5ee5cff2e3436eddfc 304402205deb95c2a2ee19eebd37d09a6302ce54ce845b36b2d7d15c768f215801f71ae102204af4bef1fbfffd1b91cc11f0f7590dafd6e0d397e5ce0f339325c3d2338637a3 3045022011aeb7d97fdef5c3be9abd8f7dbfdbc315731cde834861d8322c887ba69f146a0221008b8dc03f8cbe4ae9f061d8128dcb62df28def0b3178ff7777b8041c49fa3682a 3045022100dacf0b1ab342098d647eddd6bbbfbd1120c61a17ff566e086ccae80fe1ae1ece022050068afb207ceb44ca8027975a9835a140c7cc281bd1a40e22a748f24cc8c36e 3046022100cabbd3437ea363ecdd6e752f4ed3e754d57a0ee166cb933a8676ba91a3ca69b9022100e0d2829d3b3d1946f9cd6427a0f6a69a73b9c7a72170ae929a20f332f283d536 3045022100810e1c04de2aa878beec20c57bac18dc1f740a8dbe3334767df8b748bef13faf022030f9395bbb544dfde41d840b7864a7be92ea2de5cdcc8884381017cc500628cf 3045022100b8be1731ee15a63194b73df2636b33e23b66107938ffadd0654ac515d27d46d50220383c0a544a399e9a8c739e815d5088c1734c3f191dde87a6fa4cb268f0d178fd 3044022037bb68895edae3e2715ee6ff38fd890c914fc7845bca6c7fb0c10fb99be061da02202b10d56612b52381ddc5eacb9cf12608d165e28e4073268f36a307a2ea6d8686 3045022100ab70c6b8180753b9aa91198349e34da26c99c77e4af03434f174eaa9e2cc0c09022020cf80b85e19f1d7be99d509dc294b55e2fdf3cc2ec2daa9d8779b97cf12c7b6 3046022100c9de98d034e02c4c61990f1c9f56376ddddd208236a08343708943c463f41cd6022100a9ab4e091750b356f447f40b0464e3e31d16430f4c2b8a71c0a5728fe390fd01 3044022019cda55477f2ce510c3a7e857e7d2afd1e16dde575b404bfa783508e976fcf4902206c60448963032f5a501a5b4035f85ec787901e6aeb5d2c0ce35b142f6c9a9250 304402205f1a1bccd068ab456e59348030faf7bdf0c1c9c2482cc638dfda18890601eeda02202915940056ba58d1959b999f790f0b700d1016097c99b13501ed05f5ce485afb 304402204c3dd37a7d3ae13682e9008c2c3f43a1fa77993afc469d8caf4febec924965570220213e4b998d8775786ab91508cbecfe6c17d30bfbe72f5d151a7e7020a68378c9 304602210091d40b0adc607f9cc3a2e9864236c3370b614e3508a2ac3f4c699eb3ba3d82e60221008566d126efc08b6b21695a4c003f95ac51093cf1848ea6acb0cb1885cf380a12 3045022100b1f86e098a31d6218281cf5c53d10de5c01c187c907b545dfff0c3f54fdda2a502205d13615d4fc12f5414239f667e1ba13038fb7bb82e96479d1d3881f1748735a1 3045022012fff5392184dc2008c2d290e6e89f98ce59bcedda2c090ffb1f17b6a1397b1e0221008408ed953db197e9cbd14dcc5984d668a070dbeccb1b6983fc2b8dcb5fb312ca 304602210097c4f2e295d8cb8e0289b48d16d5c1bb7e9402ca9116f40b2b3e7daf8532304402210096daf64d1b52630e66b1fb0bb2cca9f441eb9dd7d355428b7e9c92aabdef74b9 30440220705cf9364010e8d69a14aada25c08659cc4c29dba0789a0ab222a14e58b0f5d402207fd4e360ec0b3f122d72de7c22875990469a96423ba4c37a7f5d9b88a463ed64 3046022100a625501b763a26fd6bce743fe8cb929a9399ebbd9c6ea889834f72e54b34ea95022100f322f112e478d2769c94f952ce582690326cb56b9f15acfa0a6e1620f9f5cba0 304502202067cc574512e349e7cb39e24474cf9ab4ae96bf1f89550d4e7170992891ecae022100f4b12db7419929a571cb475c16e493b5bcd297e1642997ecc994a1ef01898606 30440220078d2fd38854f0e0324951ecb099ea7621c6a7e8da2b075263e555c0a6311102022020466216b71a4a6b5f82d20a92f2c457d5d807efe4316890bb811c5fb2ba945b 304502205148ba4d86c763c0712608954e2d0c396791cd8dfbe0285a3238519e3e6400aa022100da50c8ee9acb83530afb1f5d8621f086d496bbfbbed16ae74a04f893e4144042 3045022100a644bf789707d5791d798a41ed905770ab49dee562047a8c418aa24e2b45fa37022055cacd897a00538ba9dbaef3cbdf21217795db125fc54f2cce491521cf1f618d 3045022039e378d8321754a903920f0b3bd7878adf941e306739fc2b4b577ab4b1d7adce022100d80cb4baa0d898a15ace73b74c1c99494fa388795c55806b4696b45f117526ba 3044022011c93c217b9cd63a73145de651b9652985d271062f4c4e2fe8c1347f72a4c08902201a97dba97afc9175ea56a6f72117962d6ed7f7c024b34f6519b0dfa435a1452c 3045022100cf9d18ffb34d99d2c850c8a1ea8bc18791a84f76512b3a7295642efa89c1ba2402200a2eae36543fbd763afea0d591711ca9feef1a197bf19baa961ffa71bace40bc 3045022023028f872161a83c7bccaa51867cd0860bd1756bb8a6b097513c2a5ba72245e8022100abbca71b045a8d5d0e87bf4daf0c44f3c8123326cdd7f8881f39f69d1550df36 3045022100dbe419fb8dd21a100efdf7cf108727ab731ef63000392ba1adb345c8ddff74db02206b50098c458c800582e19963240fcee53511c81bacba3f8a1781a910c088e92b 3045022100dd040afa46a99b543f8feaedec64dba9bf21a525f33a3bb4a8db018105ef0bb00220635883ef5c2951d44995abc9d889b3db0288de5b84f93dab30353439a6382827 3045022100b1f1bf5f2c5c4d7fb5752da084280f8fed87964a765d35545ec1f94bb388f46c022042424c8cfdbcf16077aa3a1240c15dbade4ca0662baa20bffdf1b834636850e7 30440220168d531a351f0c0402c4fc28c96a13d10acc982bff3f52552ccef48ea644b82302203b3cd53048a20304488302bf0df4269f8bd94400c592d9b371169821de1711b0 3046022100ce0a19ccd5e65c90b2ff8d09948a14bd908f9d1270fd6ac119e5d66bcc340ecd022100da0a14f52ae557f3d7beceb0a21ca12d84a56bf7cbe9670b894ddc6927739b7a 304602210088a93d4a6cdb02280b7e08262840b80ab75dff2c2f71c4b6ace9d0ef898ee12c022100ed24e0d40907596c829620abe78810561cb5b3ac3fba4f01701a21802fb9c2d7 3045022100ae881ad8f6ad599fa1e348d181604586909087f52077e44a5de08cbde732c8b2022057f41470f915bab4f95c1c107a2777fe385cd45ed911f9e18b93f73c1cb7b725 304402200c5b0d97bcb4e777404b29a73ba5ab028a64e47fcd168c3c1134ce22dd678b74022071d05a05f941a7b22f57abfb2b0eb6bece452322f10342939e34d94ded659b74 3046022100a7400c47f4b6ff7e2643a7a5717a62630e0e78f99dd18eb780a4692e8a64d098022100af2ef9c6713d6e72a6c862ea1f596866ab9f3609a0009e8061c580f82ff8037f 3045022100d8edbcba84d7878adc096dc9392121911aa28a12e6a20bb85f76d9bb0c0f720f0220575b0e02fcab4872061bfead38d44d08ad80fd7ff5359eb318b9a919ac2ff18b 3044022024edadb72c7022a3f9b6ca3c2bca1210ded1e5c6954d77c1fea8721b40bb38b102202fad1cf570939f3f0746a4e14d9647053b02b0d012a9fa952e085c54993a4b54 3045022100f1ce1c4218c59eaa15d5cf3a99e11d089426675c493665517a73b6858dc762a902200e5cae76a92a5416c7d93f78c644c82fd236576fa1b4f83da7a0c22e980a6e29 304402200103c41f658a7821c1a794bdd852719a6fd31576aac1e5f6efe6f1baee8454aa02200c5f351dff5e85d0eb947f9bc8d2471c51c3d274993842f8c4d8d7f944bb37e1 30440220605883f2236b1e763d87178b028d90628949c3c4180409a99e6205550e5514c20220632b1303902de1d1ae218f0d353aa6676b501c9e619e52581b0623c86bba63d5 304502210091eb1b6027808b89153a92d9048358eee70fd50226f2b8e4b547e84ae236015402200f2922002dd33364903ba65589842dcf3f819f429046161528a63e8bdaea285f 3045022100bb7dde8156ea911d1b94cb800d9614bf0bf830d0e415c394176659b94cefde260220224f3d775593e8a4ad0762f26d3d88727413f4384ba54c801d77b27b07fd6192 304602210089eb3fa7e181e78f2941821e908c432412497f1d45f402e4adc8b8d43c8a3621022100e251390b83212bb7daada9bdaccbdff283c54b6cd50db6a72db82912ca95a119 304502205f2af10959a51a1a024309f9163a416ec16b73acc832f08d27380b62d811e306022100cd70f2dbc1b333ff211e8563cba0396ffdb8e470877c81503ec044b67e5d76dc 3045022100b585136edf56341ae975669620126e5e50549ab8444d1297c18054cb8b72cc5d02202252e9f17caa4159c2eedb5d3885a095b2c35793ea21a95e6e8851a39997a091 3046022100fbd3467d3178e3867d56740dc7ebf8f62663183dbc4d7c9c4076def87c7e90bf022100a57842aa88c737b37fd1735bb63134f18fcc6241849cd5855fb018ab1c43493a 3045022068e397f2b9dd5046e58effcbf1924032689c45560557fcbc29fea6decae84193022100942fc4286f85f5c7f4a5ea17031d8e8e6b32377aaff6f64cedca03251b775113 30450220356576ff0040082144633580f01aaeb9f37a3cb06decb56eba995329208406bb022100dc43b4e79e556aca5b3568bae989352affa554eb66d525ba76d940b92223a9e3 3045022100c87476a0bb7b8ea75d7278bc1f7df34371f5a34467573be2de09a5f855cbeb4d02205aac3681fcb14b73c88f4080b36d0fe19e9c325432a66a0cf831272783f1af0b 304502203bcc56632cf040bb3899170d144efb96ddba3747ab1e46644b2589085c872bd8022100b664204a826a6e84b811b8c0a46f183abae50501488ff60e664d2ede59db570e 3045022100acc2784706a9ed2d1ae8e7445823a289f4ebb9c3be4242a1a40d71cafd3fb77e022027b85d600fe37fb3a9f785edfd903d3fad40f1d85c8d66c554816a31a29fcc95 3044022001a369beee8ae2f49ed09a6186d26d17d48ac17c927e7b38e055cb866afdfda502205fa7b329fdba9134c83b64f7ed8012a0334fc0f27426ff919a8d7c15b7117421 304402202a817da10d1054e46428731250b4d7159f9c42380e161e6e099448887dfe5678022040d7afa9d0b23aa1d38b716807fe3ac7b9ad285e25a02d8500a7ba499f40bf95 3045022100f1e3a8ba9db9b66cb5d9a6fdd732065ea73fa0a9d73e4effb8fdf092519dff04022058877cb1aacd3eb7eb5dc6e86a44b4309a85be08888fb95f2f4d1f653bf7a67b 3046022100a5ea661f57ba13053012617f88b7f11d97891f7f56d80ea2136246b77d7be76d022100ae9b0c8979cc2ff503f4119f2641f1b6be1cbc3278b4583a6c2ca16df0eb22d1 304402206ad99e739d2d77b695cae9080e06a742a4df7b8c2985c9c5dda9cc0931ff24380220496524f718b567f63458a6c6cedacf709e077a312b1f860214d3ea855aafcc22 304402207f01fcb13691e693a9288ae1031ad10111493969f7a21d8d1346d6b9804a7e2702200e6f3bc47c0d307db152976aec5b84b85652d1eb9cfa8ddf3f6e3f7b74853ffd 3046022100f9328a8a549680201ce14396ea3653db63110d96b3363c82929319ebef423de4022100f2a3e2e6c5b84c26ee4b4c6a390e290fdb01bb3e3f02a0a8b5e1f54473d295ea 3046022100ebe3aabe3ad56cea601fce1e028932ffd55201c74a899bf2f00bc6d26a08a22f0221008593647a678828ad1a9d3fb5b13e949522a52fef3cd4dad48ebf5a248dbf8be8 3045022100e419be18baa6989bca17e71aaff5081288b641d78763271b15adecc6ea4eb6ac02206f5dad7c4cd7393927dc9a5e5184692a1e1a41780327d7c703e46c94a0869ed9 304402201f5d4809a7908f9f7f3caa4c83ca0980fc50ceb2b3fed068d1011c8d5a370a4f0220699bff5c2bdde44c99a14acec2a3a9753ed723e5329841147fe58d827b80de0e 3046022100da37d12c841602ef5e0601a7cb2411dcd8f66dba1a4ed36c3b3ff768dfcae3ad022100a855eaa1d5eb05b3498ac2d5ccf883f7a6e17df930ee26bad99d3b30a8858654 3045022078346754e71844e6e57161e7fa36725c8d53b70b5bce72c517ea25cf4f5e87de022100b839879332a280d0a24b7ed79ac35b68f30d68185386678c4ad7f7b0f54a1a59 30460221008adca54152d63d97814642a3106d7f55ccfcd2fd99c5e22bf83d90f862d871f9022100a5f444c6a592bbd6de347f8147d8fd8ff6af16eeecbebca6c59b0a67f0d56c47 30460221009b3747acc99101330e1b2c252e27d3b22bd3cf404e633a1353f0f3675c55e603022100dcdde787817acfa0049ea2b7288718998be1eb6900b648b0e615ea07d3f5afad 30440220502ce9aecf1df3dc94ab9806430c026652b1b9326ed6cbaa59d00d05b0ddcf6d02202856b79dc147ecdd87b9c89d90be89177d5a882a40c944c410c3fb325ad225f6 3045022059e5a801fda3c9f1323950681d5c73e9cc4f67645493fff352e2600c2e84a49d022100e807d04ed6fb6afa939a6105ddf2ebaf23e828bf258a15b45797ecec626223a2 30450220107fa1692e6013ec97a009f69da34fe53dff5307c08c7e6df8d374223d9043b90221009b1e0b7b404b2924a8f5a0f2993d7f29568237f31b91f388d76716a9a523351e 30440220578648d35c3405366a7e14d2aa3955f7a8564517c80aa6fed7deff389c09ffd10220615dcc184269dfdd5215dd4f7caf2d3e3a01ec7eb98cdefd59b859c9fd029f92 3045022100ab8934eb0cdb108b8b0d63b5f1611c59309f4a8434a911645c50cc6a17e9170c022076b70a2edf9853c0e4f30b862df61cbeb274e3f0d9f2d6e3b9665a1cad5962dd 30460221008ae364726f02f8e780bde473faa1177130884f667d2f19ac51b8524692721fe3022100a7bb3c74016d5f0df4ddf4cbb1bd1157ceed5a70dadcd998c76fe06e42e23a73 30460221008c0bd3ec3b40a146b02c6adc374e3c4ec30ee5aa8f628853e5c63d9afe128d80022100c9b95a34ab89d67bc5a6d73e12b15a8fb86a63a5dabaf6872544f4d564fbca5d 30460221008d8b176b484fe6e8b22d099f7c6aaf15aaa2f6993223b48d3f0db4ee427d2b1b022100cfbd3f53093b94642354440cac5043616f905659157db9cfa919626b17df9cbd 3044022067905f988040361dac73217001e6628932db0596b7ebbf4bbf0a99bb6c85abbd022052d6ceafcfefa587200066d0fc612218bb01889733e8a160b9ce49147b940a5c 3046022100e54d96e24257bb843dbfcd0568938b1f64c0abbe288960c0213c294f408ab6b8022100bb38285c3993711213e90810b0a1ef7a8de08161a20bf5564e32ba0c8b601fdf 3045022071d4f0e9e432230bb86e37471bc133f41baa4127e68e4839eaf120844eaa8aba022100eb4604d06e6d51d7a7c9605b0fd6f5996986ed83b54aa30b5046fb2ce1f1cd10 3045022100e80f22f55715d907f787d9eee0a93a1fba622517c43c70927ea876e98f0b006902203cfc894ac6eebf35fcd96528cbab3865af21123902a1ccbeb01440ceb037acae 3045022100966bc45682684cfb3cfa4052f702c41de1bf9ae79ed2685a499838030f7ed4d30220071277078c21adca0d9277ce290806a70d639e66098235466ccfb5b57c123a5f 3046022100dc13c577a969ba4e9251c95c6b165f9033dd4e12c62d50a1a6dd4e26183093150221008e9a0301f61cf4183b75ba743f03f5b9fd98f9218a929846113c3284d26442e6 304402206bed400d23c2584a9777cfcd6718d60722c9d736a7720fde67def2846a394b9f0220502553366d1f41a5b299f388c8d8e86206ebb2ed1e588a6649c9392c98a0894e 3044022060fe26406feda8567a5da0759f4cdfbabc19a291cbbc28bd784829bab06c846f022028239d512b2f4cc35971c6beec11e86b651ac5d55102ffe304b2f393b641be6e 3045022100dace2d3b32de771c213fa8ada4cd96d4bf79ee1b09134c759c5845a2b5a8fa4302200e780534b1fd51167b8ad2dffd2a82a9a2261d1a2a1056fc31abe4470c331246 3046022100cdf08df99a25da7628158164f091619fdc1ef051432b4b38dcc73867192ad7f10221008bb122850dd22f8e0d6e30599bd6eaf609e1f70030bca19d689b4e35e4da9c72 3046022100be536f57853857741a7f9028d6af630a0a7dea0e704b7657d68c13aa58662e6e022100d40732929402dc8c9298e8b337dba19001211a9dd04bef67ec658d460fbf914e 3045022017a624e998499e9e4ac76f68681af600f16e2f2b9d0224b9495e7e1afaa48189022100f9a56bb1ff37edefe57d8ba9d36cbdc57965dc34e5e9c9cf03c977b5f83e656c 3046022100a95c6a9eaecd6f1bed3f4ca430c5bfc3ab19774718d8f1f8bd3171e507a4b580022100ecd5de37473c46e77ef07324beef8bc9249714cca95225945bf0c8ffdbe4561d 3046022100d607cd2ee79458fb8e4b4a6a653c61ca356298ef5f7abc6a791e385abb90abf7022100a846d0543e74a1860830769ebcddb8735722e9dae2c1997e95fb6b718354dbd6 30440220624b7f753d2b3148b2801a5de34f8d1e9f18a4162c9f8598fc01c27ff1c8ac6c02207c73d8044b00dc50bee0bf84aad4344f9960a9fb736756b9bc2957b76ffe45c2 3045022100b9e2a0ec9adc44807912ce50b65be138ff2f9de4ae25a709bfcd4f9b250a75d102206946ccc906a5d3d5e6ec427a8b874b8838573137c18ef592852e07539d70ba5f 3046022100a48346d2626f51f796b732c984bd7d513228fb1d66d1365658e52ddf73177cf10221009339c1c6d00855abdee914c1333f5c44e916123f1826168f9068db8eb7d99398 304402207916c8387f5fc9cf1a02f69f0a2b355a41b370d3eb54b88c1fee93b982ee975d02200c0371b3ac01b9d525b5b3f369911011c063934fde3b687beffef5c96b5d5dc7 3046022100e095c9dee304c379b249797a55d310fec8d71ff4e41df0c78e7d042487ab18e1022100c6863a9bea7731ea06f98c2ac0ea726475367a3a53667097b25776fafddef714 304402200d2502d94197b2837db98d7c70d5339704541d0d160a5ee3b51bd562b2ca6afd02203d68a252e52c9fc52e190a656ae03a1ff9beb92f2e68c6db650297e9f158f579 30460221008e47485121f513aedd7f0ad77d4f3841e7fda5e490d7c4ba604368eb2901fc22022100e047d26fbcd48256621e1b7667b55ad79e6f99c1895413d8699036d94fcad4ce 304402206f80635eafd3e25eb54cea17abb8c2f75bbcf08a919c0554553cdea44eb37c920220337ac7d97aa12add959094ff6d6cf35e1c5bdee5015951d7aa263b03f0844b7c 30440220670c7793966d07a8c7c29aea63bd99661219728ff3ef53342d4ae5263ad2cffe02202030044285bdc18af424e003dc53de3e414c179c43f8ec1c7e313b9f578193ea 304402205bbc9d675c4368727c9fcbbccca692e3933a37eb21c35d8c432f90377fc4346802202d205621b84301ae5781c693b584e60251ff0a063e4868c8c83905ce63ee10e9 3045022100e72d29e35d3b134664d27d9518caa7a43d19f1b7d821f7f3e180dc79f68a8fc6022061bab5e25dd857dbc452c60ec082fe370726b5ad0725263f877da60e2f9e6dae 304502202eeb27c22d9c798d82b80da0966bc7a5dab7958e9e6d1a586c0196fce34eb8d502210087bcee93974349e10a62b81d654b64f39580af0c099d085c73110f4e4e6ba2fd 3044022073c2f5a80f83fb69cc667472d90b05259d668ef41e398004fa38462119bb1606022047cf8e864affdbf23bba0e335f6f94c1afdd2217bb10e05e54441d8919f09d58 3046022100d5cad9746df24f627e52265af5f2713ebd7b14f6585308da13ee9083b8b3f35e022100ed46bab3b1eb50a0ba55d641fc2c2f03b4fa0984bfb2f8bde2c43a1afc104a3a 3044022059f27211f111af168038114ae66b24fdafae7d9b4f45c0393bcf475c54b1321c02202b7a42d9fd34e53a947a81196bf5ba6c8b5c96142463793ac25f8e09bb7bb74f 3046022100ba2fe2c637be23e3c3fdfd0a61d97e89cf09537ee29eb21d3661a25bff6558e0022100eab611ab24f695c33b9108f9e3a96a357f10357771b685a9719eb46d66c3163d 30440220702826653a8f8fd392a9485fbaa27696cd0c04185d7ee6ef9a0050982a52ecfc022049f6c9c0358a5e5fa1209200a94769263d695820da10be078d2ba7b7a3749617 3045022100e3f5cf3e6cb04d284cc26adf6c1b8bb3410b207eba8afa34fc7e2999e4b41a5402202932ae85c7af49d8d7c6f76a4f55caaaa606ca2b2b11cb6594194bdcd26ec20d 3044022063e63e30d06e983f8104d7ec3a8196a69d6a78b3cb62af03bf29c22c000560d50220172ad74e17159731233f0c79601000f317f193b59e4eb9052806053def8a47e9 3046022100925e97791db206b7ee312495b2ed07d6add1d87b52a4ab2e2ee835139bb105340221008913c253198fb5e87243e53e07b2ce04f1b4e6d02dbe93ab2b79ce096df806ab 3044022016354f824d7289953b12099cda02d3eaefbdd9770b664aae656d4721acb6b3c802205ca2d4269753dcd2ee2e2738b56a2ecfc06a70de1c9ae975c7f6effd970401c7 304402205d4e17c8f13efdaae5549e3fc9e057f41a3bda1d6163efaa18dc62466ab3f6950220496a3eb89e32fd3b246f70a96abde8b60fc49609756e0e49b195ce997c2bdf19 304502200431851807d90579d71f2a996daa1d4e8e082906e3a30aace165301d11a13017022100c53c29ba1b6bc4dee9ca0984515d9d775d9b71242555d30446c72d5b560036bd 304402203d23ce8d939af5eedabede9f53e8d72e5045d3f525c9c1a298815b6917e75a4c022044ce04f7f41fd0ba55a20d168970cd997de04627579d8f428c9d4882e1113c2a 3046022100cf3a9176282f3c0d1963a109d80c05b7d683ab879d42aeaf60cbcbd41b192eed022100b1dcd9f88e0493429fd0ed59b57ad747bbb25f61eb204742d6ec118c506924ca 304502207b4e1c8ff1d152a0202d55a578c8c942343d155894cf62395001991fa21bf3e0022100de693f4334e73a1bbe275e17bf0281f60a55dab92c921d924f1d93da952059eb 304402200315bdc70cad81c4a34101a5b2c94f53845960d6de46dbb9153ab22a5c2886f40220067ee26c1da544306ebd2c8975680374428a1fc72d0b12d9eac873d6c8a17a46 3045022100c5b2cebc4352b851bbca2931f768c40f8b07abe5f114d578e4aa14804fcdac9102202eaa821557aa9b2ee7f9e987fea7aef22101fb8833d5d48a4b22b02d9894d418 304502200b1c27fed7b45cc507329526f918eec08ea8ca82307ec2242ca8ff920ecac39d022100a7a5228917db7924dfb25338bd0aae86511a8397863e3f8d92dda313c8cad095 30460221009dee10e9e837c9c5d8cae6cbe29937523118e2fb19f59b4514fb5404d7e1fb190221008bd30242e0e3acc71e9ad7d18ea83b442c30b5fcbf35b74e7696aef935a4a525 3045022100db236b166e63f6271778ea2dcbb9a21826dcab052440975bef9b3d6cc0e5727f02204c3f58101bbbe55748351ea9829928886fc257d4c50766e95bd32080ed4a5c04 3046022100c960146b995a70b512e49caf10394b279db719aade7f1c277a2cfb01e0d1509a0221008c1b30539676718546ced3f9150c6882bc88343cf2dc56666e4424e4a06793e8 304402204b09f935911740c265f3889885090593aebef4a9daaf986cfd80eae2fec16e4f022003f24eb48d8c4d3092a6d6d00c4fbe02cb3b013564d40773ae17829d7711570e 30440220727ea1f9e32a1d314e62e696d37afa3484ea0d441e11d8f46f8a89a98ba76297022040ae2d7987570fb663a1d1b7a7b78403870d1300a8819e13ff434f269b6c0ba6 3045022076dc94ecea14d3a8614fad87e14f973f36431020cddd080d671bbf989a080a17022100cb988e2164fa2eedb5555475b0c5b744a0f2cb126fdffb743099250950813000 3046022100e38b9f95307390401954058c44fd492e5761a700f9eb4ed4267a11374e45bbe7022100b5ca6aa1cb40082dedf875006cb1d2371cee0930b66eb1a6e370994464ef9a23 3045022100f604e7fb8c562f297c59984c2b05d66049102fa36da51c814a9991a58175e02e022066a2767e2375def7c9597a1a382aa755ac1e2968fc7f6105255619fd3df3a1a4 3044022016e97975f4bc7a2e79a70c76b7fe643af2aba58435656b311f6a239c92240cfe0220529ef6b9d797149d5a69292f73a99e670fd8654c0803e260b4ffeb6cb96af57c 30460221009981a752ad59ab41dcb14d8658e45b3893885ade6c4431fbfd843369975b2004022100efd75ff5a0e3fd12f3b694825e851e545d24d2fb98d25746a27b3bd48bec262d 304502200bfee5145738b0c5693e75273966b1819befba4e1561b49913733542ce153192022100f677a3405433c2bfcd55db23b9fec4b72fe35ac0fe7ed6663f053e55f51905b2 3045022100b572f7fb45f4f07f897f63ad72a62906f84df0c333bec90bc917975d19d3cf1e022065540714b0590fd455a06ea32ac2f07bb5e22cc4c8606e90b7e51eba18c4596b 3044022028fd55eeeedb127cb0fe5e1f5a8a9b1795cd1ee2cd42c34d8c9cb16b0b939397022076f3b40f41cd536c8ca5f47b417d890c6b8286313de869f98c503d4a8124afc7 304502203aaac1cc3a3de5408dbd11bfe96fce9e67687e54537b38345b519814f5cbc91e022100cba0ef52b62b13180a35160392ffe5cc7e5463ccdc72c10c808c409311bf85b4 3045022100d534ba5dd61f2c80bd605e18ac6cecd6ddefea4b09a030b3976fbe02a9744351022010ce17eb52c61c2721596eae53529f6243ca67bfb58cc32c508e5467074a2c72 3046022100956d7990ba2f7f0a5868b9df7297e762250b9fc41b3ea3fb35ee4c9c87b2ba37022100957126a98c12235f3b79c9272526c2693b3174cdf71830d51bf2a97837c8db35 30450221009e90aee9e1e7c2b9ede4bab192560ccef035c307ee97304fe915d6ac6727d25b02200c04e9566dcb1782facd90a5b92d7c533cd204b17cfebcf0bbd62b1b4299d409 3044022026db21f803a466c733575053aff148dc491d0177b8a0a335796609cad75b76e302201cd7035a3cc48a16dc7bc8680521860bb6692c9087abb44ed033cdfcb1fd7d27 304502200511cc04a5c98560f7584d73d3ba38b83fc2cf2bb223bf5e19fed2477c625d8e0221009af52967d94f2534672b074a6edd1c553af4d43bc39a22c74b0232c550f9e849 3045022100ad6630d657ba0a3abb3298cda37f31a4ddf92d86b60eb8a62c57908878aa7705022017736d47747b0786f862c583fd5d5843bdb6d260d1c2141e128b46751025529d 3046022100fcbabf7e548207063c4dfecef01b5267260169c1447acc43e7419950f2f71e2f022100c07fee9461351e7562db339a8b8668404a43520831a4c0214034e48bfa31f9a6 304502202764a3bb96d1c62822bc5a8860fb6ee2256ad233daedf8def17324690e637cfe022100c8cf12c4315d550c50b5ffdc4723a046d21d6dfa66fa8685778eff36e12ff007 304502200be86b9dd3acc6fc1e85886122df56e980b72c98cfe2131e1516ab2861abbe9e0221008c61636233e2f540c25f41401f540a5b1560875d4119f227814f766191bf080b 304502200437dc577327299a6920b7cfe732cf9e6b171c26fe0be04aa977a3a94d59a17e022100bb5488fbf11f3b7bd093e9e6db7846eb4afa77e75d6c2d225e3a868f02f87b11 3045022100d61e2e66791f3db6d2f53ca81a8749f5d38fff927ed2ebb1e664e354a034936d02206e95663886717d063deadcb0bc16de5bb553bf2e47675f9ea73d1861da10e4a9 30440220071113cc6308a52409dff6f18988e3c23dfa092e927b2fa54e6e8fd7a17dc0210220369e272fd9c869f95b4e1196849cf2d1acdab137edd66fd269d8e0197a351863 30460221008d95f376926a8cdfad677ad030269e5cd21e12c5bf0a5e5ce828d15af8bdf70e022100dc0f23fb8932baa0453051106ddd8cc39ebf7871a48d43ebf07829cd3ae4f6b3 304402203ae7b0a33c43db685011edacfd8491a6381743476e447826f95fdf62dab1d80f022009f6d436651d4a63d431931673c45bb147e05999b1b36f10a35f90aa80b1fe03 3045022100af5d5c44a55e4f484a3a006aa051003f0476a13ce1c12fe2483f2b9a5a7ad575022025551e181ba5efeac87fe55bc5b8cb63a70d43ef2c16c6fea896ebda364621e1 304502201ff1fdfd8275e53db89d6bd7ae98bc47815ee9c2cc17900a1f24e7945589819f0221008637bbd8444e8139b3ad5d95daa6c85d3c61ebe47a64707d48c3faccc2df321d 3044022016f4e7e5cee6769697cfd794be56b2be142917c02afad4979b6c346ae39c880902206268a2ef4db97403f8b92529dcca46254e597429bd79f8476a677a74a0d459f2 3046022100951a256e104f132900594bc7a61c1b244739b6aa1522b61fa03c5a89eca49797022100e458207a5cdd7bbcd0a7382ee9e9d08cdef68d2a5868f210085a48531f7bb807 304502207d0fe5f0c8849198f61a28c0f9922e030e545c01f354ab016fcb5d31dfd06396022100a7fee57b92c22a1bc7de09bdc884710328faba25d5fbc66b2e3531dfc8a8cd54 3045022100f10eb3fa260ef0eee621524ce3a10ccbbb2abe4a220997611dc3eedb407dd890022002bedf6153d226056a4a10279a8ec1e36d62a5de6e0200a1c30489f1da576f21 3045022100bfe2318bed68feca405cc792dab91945cb3682800bb82ec3b0d4ba09dd33b97402205bff6307d61dc6a7af492bffe00b798c36e4c7644c0e13f1788df14e844c0fc5 304402206470bc040bca98c43399f54f43d53fa0fd7fb4592cee82c02c96f3f1e3b64ea002200cc4b27aee70fa64f19aec79c8600a2b0cc8ef7641b4046e41f01ee4adb770fd 3045022100dda2d99b49cdec4562917bd429f3d9485bc7e16e88646c7b2a87baa62dfbbac402200ac88d07d04120775c868986b52ab30b3894b1691a31897853dca7a7f0602437 3046022100cfbf67960321853f1b41d932af0f616bbcccf4a3ee5c8bab0e04175f21e042fe022100b9bb1b879201035381a002993cdf32d6b13bedd21b43ab8891bb90d8f75e4897 30450221009b1c0ca4e780eca20083e31182e44324355e6eb29025a0ce50012d8f617f5b3d0220074399e5c7e1d2b675396fe48d7bb530f5da747c38a273e6cfce05e239a7989d 3046022100b7b950951e54c1031bb47516f25e8bba205ae43c50d0033217ea94f1fc0a09e8022100d8483a22592cac84d6e3b968f58aa6eaacf6db34cc79ded002cfa49a524fad23 3045022100a97f2e12240794c236cc95ab04a0f3cf883f22c317a26b720988ee89e8599b7502205e1607ba80610abda96cc3975d67f20ab1aa0dcc9e277199cdb65aa474096ff4 304402200bea95e5654431565144b48453607e426b8b80f351a760af91cde5d5c3d6258802206fe4d093f90aac36b343d3dc785492bed4a20a0a8c453b960f8cf94c74c643cc 3045022100c395acf802ddae29b9db6cde4f1c9ffad9184bdc4e549930dd047869a8f3ac6a02203c042e74126ca4e3ccbfc3e844f3422d65e17e9326e394ab25ab52fd64f4406d 3044022058864b18c2b9b84e8999d920d702427cfd30542e4e473f18433428575712dfba0220334375da5c7d2370b355719f9059e01ad4b942d914db99a33b4b42e2f828fd86 3045022100fd83e47930ae93fbd8bae43be30fdf9a6f2d07736dd0def0caa8315f2031c77f022043ce83747594c01b8f6982bc7ff965198da272207522af6e68922ba0f333a1ad 304502206400320b50a451f1e528af920943236e3748a33653bcd30fa789b6416b27325a022100bc33239c16e2ac97100687ca8bedebdaa575c985f0dc3bb658bd0c9a136185a5 3045022033906f4b90322475f01d499b56736fdd1c42cc8fc3be683c511d66e6de6a96b5022100d54e517776d57929ba7fda2d4ba677f2be7c8f0b6eba8b48e179dc6c76dcb7b5 304402206cc36582de36bf0a5dcd84f0abf1190131332c18efd91d221f037dc4e096fdb502205e1764dc672ead24affda796fc6266b52a19e70732df07d491e3c14e19a83b3d 304502202ae3cbe888bd3c66b6220a1054102e848b60c5eb0a1aaf024b22cb10cbf66408022100bb1726ac08acae7e341310f3dfee2298fd5a695c2068167b2e63a6695353d1f1 3045022100a995543aaf1c7553fbb56e06caad7217335c53a040a935ec50f3af28ff7774ff02205d0ae3798f2ac36b0864a79168fca9026799bf8445b166c4f5559d5ab112dd07 3045022100a1249eb07e93b26814a1528786128b06952c537652d9f8c246b72a69ade321ac022056ee1563a6e89749ea7802ead83a9c9b35ca9c412c595aa55f2556d39b95568b 3045022100f22bbbaa16cee2d78b01ec325ff49eb0ef29739bf46f32c87c7d025c6df4217b02207e0deb9cf1ad49f9d0b889f7d56d7251f07f7337f91bde036c72045255b5889d 30460221008ba46150b2b9542722dc0b0618da0e5cbfa889f4aae139b045cd39e69d3dcacd022100e942875873eab768dacc2f65af1f1e38e7643bd515bcace27a141a9651a75863 3046022100c61593e55aae51a12c3b9446a6b108b361da9b0e43259e0523d1790b93404ee30221008bc1a24c1ca6d2ead903d2a7de1495adce15964d75883d2c244d0ca925182d9f 3046022100bf10643b87ebab7ef18558708df613f722b4e9723f95cf4faa5bf085e93eafa9022100ac4e9e29f05f8f03837d0c30bd03edee0e6df3f9663ebefa96eb0753950148dc 3046022100f53b447b60b06aedce9e9be53bdd2e3155209968a30b2f690cbf3496ebb539dd022100b8ae2f25b5f23e1f6772732ac822b26281d2eadcbda0810ab21c287e8d9589a2 3045022100e32d4143f1470ea1afca67ec42375acfd5825c8023e126649a27359da3eada7c02204697e143b4b8042c645a5cdba8b576ff5723fc6ab87488f5d093cd90a3b75761 304402200083668056b3ca3f0991d6197d190676e986fa42dfb6484e3d9df7b14b016a82022058ed70c252a5ef882e0d5939ba70de4ee8832bc3b31b502da7d53866eb80e47e 3046022100f5b00c120cb1062c01008b976f274903e9fa1806e6f44a727d70d127520dbaf3022100b8574d4028d3d0a7488b410299ac365b954877cd49b22630768d3aacbf5f8fa2 304402207d0387694f9b4307554abd7e93288032f92ea1fe869b01c3769708fa6da5f36502204f8de2ce7058f3c8fbd0707d56e797dbafefc3ac26783fbd5c651918537ae449 304402203be83bcdacd7cab61a66daa557523787382e930269f056c69c57c4cbbdcac974022058a30354aa2988bbbbcd30a78562b66aec3fc48fbb8e35899fe3fc62c253490a 3046022100ebf9ac855c9f509152cd37feff4e38c14346b481fc76a78971cc250ed344ece402210098e51e860618a4f3954c9f2d16f10190452cf6ef90d88f1a3923b551f00180c9 3046022100ca2fac4badcdc9b2b69ff05e83a93dbe8de8fb9cbd8e6bdb74fc2e8877d0c475022100fd9dea3e0d1737e69e007d27d7c13545f0fd6a29a085efecf9879084c90e3f27 3046022100d41c8b7bec9b615023c5dcf3e3c5f6f72386c9c8cb9a24c855092ee79c27cb56022100a2f6fb8fb3dcd96c79ca8992cf72639c8e3975d580ab43fec0ef484f6bfb81f4 3045022062f6131f2b07fba3ff7c09e8293e6e7d18265710abbcb57ebe0fb9f7419372c3022100b7053dcebadb23d9456e84adce78a6372dc18f12678728bc9e11eb0de3f0df81 30440220612e70b0e67a407323843ab322ad183bd3f64924e91f0b53e290f3b85cbd27a702203abb1cb58e17a055a782d8ae1787d3c83ba1d8055c25e1051b35613c3abadb74 30460221009a6db824ac58e085b1514b3e8e7d841f4f06c38c48ee1082ff5ff791a3ff89d5022100c6e0f78e018c0380947b196d04e5b46e7ce1aeef49000603fc1ac22e1c727c68 304402204df877792e1a0a40db6bbb204c3cda96a220ac4e92d17ac7a03392210c603d4c02206540e593fbceeb3f340844814b3f0a6ec2670a0e12278c363a8c7cdb5ec9383d 3044022004fc5439c033f82b284e6d69c4008be406316faded8f4db725936ab215455dfd02207c5b3af14429c2f71b9529a902c136d1004853faa0d207952d4a4ec34fc2e312 304402204008a821acc34788d125f6f58161b558989c47fb4c7d912218008af5371749800220180331de10df6114813174e7f9a55bad0e6f78f0d5d94be4c3775780fb3ae4ae 3045022100b1210e227eb5bd2f5b6f018416d1bc547fb03b7c19753b1d6df1e688214e8f0c022037df40916376ab42706bb67dd4c46c5f2f82652c9619e86a27cff9860aa56703 3045022100c07604c46e1be9028e9cb7be1e095c649e783c1a0197af13d704623033aa62b50220645eec47ebba5ff62f09231e8a9b8efb97aad6d3b0b5697ebf9194f2ec6470ed 3045022032ee937aa3e08d99b34ac86e3ceba40b977f9f27947cb8413ef7b19819d6e96f022100a918c66b1fc2ae42bdbc2f1b17c5ec48529e92f7af8450d7cc8f761be06b8fb3 3045022100ac28352115444ac3f3a66cc3bd97789bd1857328144906418a052701f93f206f02200ba40a062be118121005011f1b2ec37622ea6e28fc24d8666b8b1f1c75d6a6d4 304402207637fc33d110c6693576d42e6579b294be24827dcefb611b9abea2def0474a4f02206e71d8d5667acee67c6f074c2dc8b9efc50169eb5d94056dba917f1ff0231bc4 3044022049d8dafa635ba384dfb5c5266f477dfc89b67bf9a641e7e097c0b3976d0f2445022052c17c78f67a3466737a8fb8eb7f99b051103d4c9a48afcaa88e57be699551cd 304402201113047bcb1ad5e878c24e21cb0a32b5e62f9705fe4976fb66dbb95ff98cf04102205c1242164e28af20a170397d31116fbea63bc8ae31191d5a9d7eb74d7166d27b 3045022044fb072801f20f256e1bb39fc9361db01802c75722266e62361207e368b03505022100da985e8369e0728ed286b13667e8ec2ae12876ea23c92b5b0129487dd6a7951c 304502204831558b5d1222bff3084a375c6cd52e0bca87481d8c224ee2feda35e33076af022100c44596a5840e8f6ea4cdcf4024552f4d4c97072110adff111e036e9ce14285db 304502210099beab148ff8d6b5525f4aa916d25fe812dac26e8a575a5baa0455efe9f2c21e022056a2d7c49d5efab8da56ddc5364f71540a41267e893664e30486bd9c032fe0b0 3046022100f85ee1e04f4e6379e209ee4d684d098bfadf3bac83adc76c64e44497f3121d14022100bdd033e9ddac13623eaedd07838ab6dc62cea15bf7da90b4f451ea0e78b02571 3045022100b1162bb7ca21324dd384201ff917b76ae6a0d39634d2f890fcafbbf87e0022260220072857851624d276f608f3549837c2585a80b01938269a132aa5b7e361e5ff06 30450220287c5fb82c3948a9f729e0e649be607365bddc13314c21081758e15e56348026022100a30f66ce490c897140f589ec5737fd2b959761524959acedc1cf56233a1fb916 3045022100cbff2094a48979ba9da7baced7f00e53e082150e5626f0737b9262c2fa0c59e9022074ba836c1b4d24d470de20b42b3bcce9f7eb5563fe598c9b10be63c1575a14bf 3046022100f24e725fffc068150b9b8c248d98cf021eb9ff22835a0e3027d1d9d28d077b270221009a25d3d8f2bb73675ea06813077f24a687976318b9f2352ff8140b5dbb588013 30460221008d0a469032eac5f15077ccc16a07ca7d407cb9c0201526da815b8989e60868b8022100e097df840d2147e10800437395db6731d2f15b7303c209860f79cbb4be536074 3046022100e396df4d44dfe817840ea32e819d6bf881d784ef418373a6677cd7d03ecbcbad022100e1d48ceab120dbece5c336183a94c89d20fc7caf62b9fcff7327c7e9842f9eb2 304402205e425e16f2877e1aa63c6b1b0aaf76f22ab3b85ca349435d13491d67bcaaf96c02200406d31441377d39832928b47d25dfbec494d18dbf3f83b2cdbfd92bfdb99b32 3045022100d2f3ee39a873be4f02962982f345006ac9f5e15ef46df5fca194fc8f23ab9d1a02200763cc2af8633361fd4b77450e8f03650db5c97a7df187aa3fe04a179f4bb49e 3046022100dd188077b7ff1d677f157662df4ab93da84c26cfed11ce5e4d80741cc01591530221008aa695a33a155ffd1afb35eb0909caefa12c9b5a35033262b1855c8773f5e1e7 30460221009230fd9b8c1667ee8ffb7fc9351cfac68228f4c029dbd7a5469e7ec7e4a221500221009734a8e415aed700de73992e03edd8dc279e65ee60aef92f175b32a8ee4815ab 30450220396093f9191dfd86e60114dec5150ee31e12ebf30a54b870a62239f4ca1b84930221009f2e7998dd01c387bf66b10a7fa09de585fd47a8de4650cb74fd313de2bdbccc 304502206a24547cfce5921e5014e684fd59516386ffde1b1d4037dddf039fc72705659802210092f767d2aeb8808219e8b1799a0bd264508b276d36f6598f10411d0be009aa40 3045022100bcebc2db74a889d79437c3e8a06b6e4f55cdd347b9b0b45e9041ced9947f7bb402200b74c413d81579e6b9eb15d71fe4c8d357d560f2bac01e236f970eee446939f9 3046022100bbe1c5f76affefa2a808ac6579fa0fb6f0eeccf6b5a1523978a709483184acd9022100c0515dd6214251dc188daedce9e5e04e396ceba1ac1812fb787803c8a32f36a3 3046022100ad102bb9571dc70a80f9dc32384c55d5db6ef07ce95fbe5369d731e2bb5bbb74022100b2c97fa7ca152883a7b57dd92807968582b2e3c8db862f1356d856bdacfaee0a 3045022050aa66efbb9572cca9f983081946289e8a4eb812ff764be73ffc495d29677de60221008c9ba336a38c4e099f98430c963713cc35e9f1972f008e26b0b5d78548413187 3045022100b4f04a89bb59e07b4a1c7520d13440fbeaf686bfca92609499d4043ca88a729e022034585d3b8addc810991ce5adb398be7d05931eb7757a8c67138593ce95890cc0 3045022014751eb4c0f5dedf3843043639ebac805991773332cf3e9754e081334cf6ebeb022100bdd83ed6da3ff1b1d0f6084a61df829f76db9d0fcbdd1a6a67aacb6fe89ba65b 304402204bbac23d04bc9b57a199f2699805bfdadd763da74fe06a6f193e53c2133e284e02204be7c7da1ac89750aad45be8480b2b67e5dfe9b29f743ac8261c4dfa46385079 30440220069ba6450bc5ba7f53d1d14b06f145e05b973d0110d29500dff0bcab7beec65602207dd3c113044362f3ca24ba094980747bec62e8aa23801933e1e724955bab9254 304402204686054b81dc5580c32c35891484c2b00c6118b681326c4242d54b08c496d87602207ce90ecf10dbd92dd392e2b7770d591d41cd56735424b801a47cb910f4259c97 30460221008a535dd9906ac68c5dd9a79f0b9c86c14ce814e44f0eef4342a06b68eedcad7502210092351821d67499d3b7a8e31ab46f5ca444ba9b9aa367d05f6a92c572408abb07 30450220227b956d9038c317c7b89586dd122b89c0ddb1626e4dbf450514745102eda079022100f54dfd6ccd20ffcdc973efd053c26fcf698161d6c3b43bb969495905e6c9a253 30460221009f298129af56b53708a95dfcafd5685a247f84579798d38f96dcac9d624aac91022100c1e649f03675eff1a83fd738d87f6cfe7c801f9b76ccb74dd35978ddef845411 30440220172c4c29b158ada152e61852982beba075f8777920067a3089989fea240ace350220019c269dd9c4e795b6edf8175c4b40224e5bcc70a0c4f6fa90b8790a811ff46f 30440220339238196a2a5253dea9504ce592427beb346cb6dae39dbb1ab49396460a302f02201d9d7e946e6ad6fbaba630ac9465f0c6fd49707eacf9bca0fc33d3a8c1873859 304502205c17d9a4bf712592ae52195f99d525d91b373608539128e52cbe9397f9f8a856022100da50d407466b66e8021d670bc5f4b5e16d41811cfdb1e9f8880ceba36fd92e16 3045022014e89c59aee94607f4e240665a8eca500b4782ce1d95733d63108ec5c33b9d0c022100a7d32fbd85da0febdcb85913e163aa3537ef1a34e923eed05005175856b32db5 3046022100da1c85bc25ec39211d62843d838cf6389b2b85744b8ac44f626c8bb57c550672022100e4f757b8d523deb442c717942e5a45979d6f076fef769162d0a6dab527c5b93d 304402207b7079abea5d6320c7dcaba88fcd58a83a7ff7a4b1ece39326962657fee0dacd02206ee9a39961a859639ba3ccac464b8163253eab20e9f6892e481aa5d284c15da1 3045022100b3fef923e1270111dfff5611415d7c95d7f9ce9c63ee56716dddfa0d573c1c6a02203a3371e87e00503090134fe94193a483b7d0f9c8b7e07d9059f2785b775f6530 3046022100fc720504c8e5fac3f2177d5e2b45b2fd8487c7f693613ffb2a919f3e8ab8559a022100b0f68273738b64a1fa36d500d86e606c54ac221fdae0459b5c6d125c0cc15b85 30450221009f9b4476a8128d841e1534adfb33ea42af2f41a3685bed5b56f080d66e9835ff0220635b7b0c0a398d95ba3bcff7296c6d11931e0dfb170137d1a2cf0a38aa86b369 304502206d6731f6f585f3bb61e447e48a999f6070244a5f177799cc2d82406e7f7ed9bd02210087f18ca3bdaed053b8b8fb5ea33c522b57e7da608c78fa424679b2c91230d002 3045022100921287e342a27f326d0e1fbc928cc8990a28644f291efec01f4cb9fa5e2c7bbc02203ce6a2090641a429cbe720b72f0507d3169ec6adfd67840ce62066fed4bf2866 304502200eb6a1acff4f301ae8c922767f2d75cb13d20759873914cab07a7eb181665904022100d35719b284e1d7c075a28e697b978ac23e6337dcd2f6d90f09c750b4a3f8ec45 304502204bd2a35d96aedf4a9e6662ed7535a71ef517bd4347c8970b1e940fac4d6d71c8022100ec9281dd9fe0fb3401ee15c1cce2b4b562c80492c1d6875a427412cd9b206930 3044022076dc1d90cf6b53f960ea2976ff8be69dd20d4d7e255985cb001565e3a09298ce022065b792472e89579b3af20a875b88d200eb6fed853239c9d2d28538572bc5ad8f 3046022100c9a7d4114167fb8d8b7f58c563355ecb5a21458fb51b589153cf811bc74083d9022100feda741d135853989c741fe063ab495ec02c74601ea0d9bab09d83b34449ae76 304502207a3910ab05a9b29d79c2d71440e33a1b8354cba283b0f9767785ec1f457bcf0b022100a0817612736b21b2f588cfb56d8c25a31c2beb39c26dea9d439cb93132b4de3a 3045022100a10b99db4b9cab1dd69e29f29583325fe416fc9597c726c75c97893583b79cfb02203cc10c3516b250f53d681d9e6ec9b7ebc8aeb1af54b4c3a13b7fced9f8c151ec 3045022055587373bc52bdb3c27c494e31f477d0f15a936ad308721469d4e9c87ab360ed022100fdbb1ddcb35fd60af80cf24d05e2a99a3f599ec498e5844ce079f1be8bf4dbb3 3045022100b8ee00f58f3402d5e90651c6e342f363c00860838a9c0cfe7b56c87341ee9b30022068f6822a6d17eb3b8d9f752aee56368c558388104a5c0a6c23dce7cb82f40c03 304402202c0d455312ccb860f107a266ca17bfd42c6db9f7b57d93f71b29c69784404f97022064b0838a1f0ec4a92ea1c39117fbe9157326471147404f19b7c6414c901e4149 304402201c96bf742cc69f8bc70572dc7feec48388edc0abdb6e91aef1cf687e7bbde58902207585712c598ed528ac2f184cca30e0d8262cdad33c85b0bafadb46af227de829 3046022100c01c92f1cc181ef317bce7dee0ed3e8074773726318adc07d8c8df151b2ce14b022100c622d1bea2c820a6018b082c4cb090df641a86e556ebb31cdba090cb983c5e6e 304402206d0760444bde51eed89a5ded8688559d35f4e5d66b073c1714be6f9900a8170d022003fd2f5e4368dba5a95b55b350c9f4b5c7a83b7925d4bd6e2741f96dba4bcaf3 3046022100a55fe3faabd800ae61015afb6b1d37d2caaacdf0ee0ba26e8df6dfc162f097f20221008554d7bbf57623b0d8ad4fc327ceb6f1d7f1a8d8f9a2d23da5ea9f46772cb48e 3045022035e2c67a78a4c2bb80fc6c6fb7602dede354c70a45ab150154c11a4d2d9c5210022100bf3e2a9d62280c6fac677ab3f2768ef90c18826febd2029b9e1f037e61a1cf27 30460221008171b9e877b4e770f6f43bf6d88cbf71f0113bf14a32237963fdb39dcf589307022100ec4cfcfcc7096f434ff7cee376e00a6fa9f35fae7df3b637f315e5e7d7770461 3046022100b7021527c69013ac862d31e2cfaddbbea5d3d4978979f21659a484d2bc2fffa8022100ebee749b090623542a5e1deb5f5f0a0c8b8602d955013e140d5c124bd3e0b5c3 3046022100bfc9e71cbe87d19a5048da0673f0088235812831328c0f6498c5f309628d333e0221008672b3c2bbe7ecb074e8047637738100f1690e0fa30d7ebdea27b78db3b999c6 30440220503eebaa0e86d360aa5cad99e99c86b9e835bd253f4712023ce61f9143d96a2402205659b980f8feeb03439779e6ea27c8918e5e5e1390fa308970397ddd162a02dc 3045022100977d5855fcd340c3a4994aee2b11769019ad7424f0d2c87c76343d6be955350a022070bfee99b0c5ed75ff0d9c35ee5a33a1fb8cdb1ef6ea677694234cd1fad56e93 304502206f146c1f67793394d7dac9487c51e301391f9c747c519b7038c066d7c1c60acb022100fea047c75a142e7a5211802ba1b4e35fb4b496b3e28401521c58db3d4b07ca6a 304502206b5bf94d5aaf2f0300a9c34d4188bf37b7589476f23df6f91248eb883f79747b022100dd3b7cbb3e94584f3c3b0c0c2a7f6cbfc8e2d608d04ab7838f170eacfc56988c 304502200e1e899d52d552cd4d067049a3fb45b52486b0b9667fd1a17c60ccc3a4526bda022100ee0d598ffbb48964acd9ffc58772339b5e0a823a767225930a8b23128db20146 3045022028800f6c2b3f10939e62bae85314055f850db6f60720bf04ae62538098d2e073022100aa3defb1aa8cbdfcb1e936bd27fe1e463acf7a6497f970731843ceb0ec37e409 3045022100b24c3aa2baf44aad332a07b3121ebb8d3b1bed89461c820993caa563f61d2f7c02203796ef1961680db91a481912eda779316bfa4b267e9f524afe4137ca981d100c 30460221008bb47e398b022a63254bcf79326ea0f126c16f6c88f4889d3a7b981c72e04224022100fa207df54b4aa841b4cdb4355ff1a855faf202cb81031b509e21186dbe65bc62 3045022100c89ab1eec657f8afaaa1e0fbb6f32f2c1ed986831b3085d719ed91b38ebe1804022040c0407d590efb4d5cb941cf01def36408352caa3df5a508eca470e8ab890ca2 3045022100ab1a4d89593d7107c16dea3699603964b2bd129dbf03ba2a92fcbd369d75f6fe0220740a85ac7c5d6dd008f8bb4442795d54b1a78cfcf19ef4645154f90f9b6b5773 3045022100fbe4a561e77388911ba64545e4329ab314c7fc7b283ad906bf6ee613b6597d2f022056ed7050daf1f9c8528cb52f71c08b5aa914af31e869b07f819996330268f7e7 3045022100bfbd3ceb248b4a0c51077cc69181bf41da55e759065629a6d5c771f8769afe2f02207b0f59bb89130576522d805a7799f1f708a20c2787e1e02efa98bb852e17a670 3046022100b81a265180c1d511babe52002a1d39ef25ffd665aa51e59e9a5063f464a4b3a1022100ae5bc5f38b689451457571625675695154479473d2d0835fec61c4b07d7ca697 3045022100a45dcc6d5d402850a7270a855abdfbd962143f6dfa49bc76abd3269283239e4f02204a6102525ab8ac36349a1c31c74554938ff81cb19a8a8303848aa20eb4402f6a 3045022100ad511afc3e5e101adc210bc280866d8d1b23a59f85c39dbf4644d98f2d438ce102204b2b5970f1a6c28ddea6065331a7d29fcf1d70eb20054a0e980be2845821515d 30450220141fb29e5e9ee91519f4c5018742f97e32aa2d4dc8e2f8a2fa5a2e04cff53b6f022100b51cbf50c1c6aaa04b777d2c0219cd78f46600bfe4d36b7a2e3d4ad89d267c32 30460221009c14dfc70a5dc65e4be107b50cdbb01e4bb714c05170f0fa3a0fcc08fb4d93c0022100c8a99e0be2df287a43dfa98a5eae8e0b3b7e21ec08c6c94755e0e959a301dafd 3045022042b3439fcd4488178ea303ec1a78e82449abfbc7f930864c298c55407806bb69022100f03415654d82aaceeb7ce021491ef4e46c753c9911483b8ce445ffaab11b547a 3046022100f848d2a426126ce895028a1dd6d56bf98d81fb120846ee2c667aa6885f765e760221009dd1c473b7e4fa19dedb1234a2af986be5245d98be6db9691a6df9b07e619694 30450220435e7723da89326acf3ff2a0beb5974545a389a851c9424ce99ec1f4f96ebc520221009b33c98426cf8348b959457405122860df0e9293c68da0a4315e5c95a69321d3 3045022100e30ff489fdc3aa25747294686739a1a885039995966f6cfecff1f5aa359f155002202cdbca5e593f47013c9b65077222be3f7d077c9a1b8131de8eff8cec2e5a37fc 3046022100cd332936c8f61ff0defbf50473af1350bed8fa4869177ef80bdd65f909dd5ef402210085cf951c37207917629d038509c3cb5f99b3c0e5953169e3dc2864fce0821c36 3045022028f94c549e9b494ad27c1c7228b0bf3b80d348f88cad50a595dd9ab6ddd75fdb022100bdd7302aea34734def7596b2912f2576e1d34cb29d3a517f9451c236e9434ea5 3046022100fbc13073906189e5f26676e4af78f899e16151cb4da5016514ecb92a0b07c0ea022100ab70a275e10cf0c3a7ed904d548ada972c088b1d2fa5cfee54c562f1a4ae0962 3045022047fc4f9563739c3345b286d62710124ae729770e6344a51e3f7f6e2e42b57e55022100d6a4278ecfd882c4adcf2eac8939cab1c830d6aa385046e420f44707f6bc55c5 304402202e32c1e61d1d82641dc093cda281eff00a2cb575a16941b17d9bb3add4afa73e02204a68acbed7a9fb95816cc710fa61faa0d408aa7032803e7c26f1098c8c3f84ae 304602210092b166023a3bdd22a2b88540c97bc93b0ca4272023c2331d4fe6ffb237a9eedb022100d2866bbbe3a85e78189c6ed799cc9af4cad69ddf34d1377b5add7441425d7af5 3046022100cdba2fb56c7020b3e11a2d2f9a9dcfe2788f0c4527177188d50b9de93da98c16022100f61b87596a33367cde051ec6c9ff6d7557bb738d24951cec2436de4ec2ed8c19 3045022100ab39738bd224e8d562238e2a15cf4d89161b20680284b4e2db4d506c1d08f1e0022052cc813e3d78ab59ec8ce886b93faba7ae597dbbdcb27423939ffff735ac6601 30440220339ebbe4ba9fb7941186b3ecdff47c7151f6ebf9c8587124170f2de1c191cc7d02201999209d2d234a64ef10b82b94a6ab80163584fc7fcbaf62895bd9f484230723 3046022100a5ce09474000295ee1194891e6f995892717a6258ad556180eb77e44dbd1589a022100c5eb74ea9b1feb8ffad7fabbc918689542ac5200499a445375c1ace9a5e2b1ce 304502205eafc1a3f907917c37ead0909d03923a57c918b4d3e78bbfbd8a8bfa155416fa0221009564b3ca54f768fc256c551408763a2cc0bc76f7fc55c9eb26a776b1784370a7 30450220221ca5d3978463c801a02f5ca7d8610472cca9c7a3a25c2af0200b1482a03973022100fad0334762ba3449e3ec8356ee2f9f1621383fafbc4e0cd278e4ef3f73e05c62 304402202eb3b7727d72f16c6560e9bc6661217aa6663d6bd266dbeb535b17dd1c1229d00220702d4f4ce0a1074aec11f3539a8ea5d7cf28975ec727e4995f6d4ad8f1ece50d 3046022100f03555f51eea230ab81886e1b48e23389abb7e59e7808037bbae408cda0871fe022100c661cf91898505951b23824d9832c2cddd3a08c97861ae17550eb56bb4b790ec 3045022100f2daae139685db1fb745f2753c358727501b2132706a2f5566de207c1c94a8df0220217c0d1bb0916ba13a3b70fd51cf5114dd5792dd03a262d3f83a08e3e47ca02f 304402201042974c218303e67dcb589be3dc5e8e793133109390080747a26364cf0f5b3202201f78d328bd842f854ec538ebc8874932d646065e3d6a44474702a4a9c895ffa3 3045022100e6945021e7c9c75c02f409046ee085ca5daa3bad08f54d9b9ce370a575eb9e6d02203ab3e9a267e2b7cf4073417a53a5615b0916973dab59c78560e6f2ef1c43a899 3044022079e02403ce0b911d457da12c91e36005a166c1dc63873181b8fa70958c3c278e02202889c231e7fcddbd2ec25360d2457a9dad58c5e3597c677b532b56f7ecfe4d6f 3044022054410c8b3ba27bfca0ad0b5015a8064ec6808eb3dabef923345840deb935fec202201e842d7887607196a1535065c193915e1b843d61183cbb5ae87538c5212c6223 3046022100df4a400867c08bf272b1ae8ad845ffc9220bd5bcdef36eb1a675bbe99a5a73c6022100ef581bc7ca3abb0085b065efcddecb6e5e0e3a7f6fbd67d0b31b3ca32eedad2a 3044022046052e0bc63f2ef24e60a57a3a1afefc33f2be55d012e1bccef766f306e3a7080220776a4e92020fbff0c8f81faf735629d1dd2f7d9b72ebd734186f6986a3724aac 304502205fa1915b9ecc36898dd0c1448dad3322eae260c74a329e567a5b91db10fd2189022100c10d80c8beb0bfcb2a8bceaa1b942d84a47bde866d013ab1a551a986eb708cda 30450220140d9dc859db6114143835638a9af18bf3493a963f7b163f88e882d56c9f6c51022100963290aa3decd0db8c8b80b4452aca01588e50185df5090e444bd06475dd41be 30440220408a0901654bc8cb02d4f5931a26a15ec419a25d95271a6fc49f55150f3073cb022046bece6aa1b924771f34c1347c40932a808f2e155b956d8ab456deec4d794a77 3045022100f7f3f75faebc12e7ce712e396ea46f83f21fea3edece3787f6baeca5ebc0daee022002d7813ec3875fff6d0d57aba117f5df32a85fe18369d8c93fe14c1994cd318c 304402200eb9304c34c71efb3888d63c16dc7c6855dd622b98a995e65047b34a29ccba28022050dc02654a96fc4f9df1be14b371269eb3439c0181806267238da63496f7a6c0 3046022100f504f7314bb8841a5259026f17a4d44dce28cd891da394d9e6c412baa259ec830221009e35131eefda39477d8962b55adf48bb794078180a006d1f8363e0f4f0c527f4 3045022100eaefbbac3ddc43e6a1fb6ddd5473fe7c215accf9834dbcaff6399c72c9e4b74f0220292f898c7b35c70a43d39fb3836a6789e27367302d48456ff909daa4100a7411 30450220691d47fcf99219042541f2875f821dcd20ccfcd72b4796b7d30c13a89daeffb7022100f569770c5f9878c01137b28e46d161528c027a7d9a322c73007c23284fee0a83 3046022100cf11b0e04b2d28c4905bb785a97be368b79c735095e677e6e37ac64bba08458c022100e6796306827be4ccef2af86d99b7befb62ffe75496a3b2810a7a55e6b55131ee 3044022013a31093349e25539fec71a0aca611e41d002d9e3bc28be69decc705efc519f7022060868a1e24ea1034efe78eaefad7f0ecf7173ea904af434259fe513c6ad375e8 30460221009d790f16ccae38e2d7eda27e368e5bf82ad8f1d632ed1f445f27e0262aa05065022100d65c4e4561af6b05e003520a5d3875d58f7e8912892b4831a1ef2d154dad2d41 304502202d8ce3ed32e248b4a17a978735fd7ce8f2908501d4a3ebb48f6dac5a069ef8fd022100acca15ddeed06a6d244a883ef97409dc40b82e5d14f6b8cc252f57d25680a5c3 30420220536898da3bc49fd3a28c9979b0e4f8019a6b161e908ba51569ffcde6afd91a41021e31f3987f043eecacaabe981f604894b59e09ca835c5658910bd0cae2cd41 30440220314dbe0718b99b9d3ff2dc8e929e08a0bea35027007ec7000a21c0dfcf1f7b950220384879bc00b8c881fd80dd88e24e4b4e384271795ddf9783c8bb05bd83c7c77f 304502203a8c2caec7ea04a270f9991e72d159a067e8424c81c6d620f2dab0195e9df4dd02210084f30087b712f9eea2b01c5b73e1aae3ca5cd043daf05ae2c91ef74038cf9a75 3045022100ff52937e887ed56bd041815f21821a60c53501702c3d7f3dab31ce6af44d838e0220213e1cd86349e6e7b984ac747063d21524fb1874381eb65b9cf5de4a3b848d10 30460221009d5f611a39ff7709ad329a53226a409086cc0551b603443f6934e6c66b9acdf4022100b3e4162c18d79cd6c42cb794b000220f8a171b777c1b5a301ce1ef75b63b65ff 3046022100831f47561f391bb12c1b042a44eb5f854494056663df16fc2bf011927500dfd8022100f067b549728e557131efd02d593970e658270d2fdd5912eec775662f52971b51 3045022100d76d16fa8fa33644a69a81e400b19c6a20f45fad464dc0f5884c6e6b0a45f12a02206260848055eb0ed4741ca4f340e5533e1b342d282f855fb862d05076a63f83e4 30450220615865c156c021ac5b08bcd046d6ffbd1769cb3688cfa0d5d7d04af191756b240221009da4b9719652f614698f2c1b22871738a0b3108bfb0662b16217a76af3b4a45e 304502201edce0d027b9792875a1365a17e790bedfe96a96efbbb261c0617b950b86e2da02210082a9aec40cff7ecb525ceab9a175ce8c7a3e9ed15b755ca62ca68f55afaf98f7 3044022068cc90f6b83e5965fadcef68d392c66e712f605acce4bddd931deb8d2aeb622802201553d8d21a55eea84f81870e7f4270411d651121ef229131a339744ddacdc32a 3046022100a71a7d04bf70234b4d4a7ba1bf98dc54f8477866cd507fca1a2639df620032c2022100eecd063cbe5c8172b1e32a22106087580eea1f85d343c5cab3f1e36743f76a33 3045022100ea8de229550ae5b8d2b8680f6388afff31d2c962a8440c81a1f4336da926b17a0220426bf9890cca7306a3cca39049a03935b8c6169abc4897dc2f1712f0b88056c2 3046022100e1625a798ccfcd64789c0d4e29a743fe6b28836dfa96fb3e158b944330d13e0b022100ab6c8437bdc4d498914b395df1908d15b2070310f31f84bb292dc0b6e958b6fd 304502207b673558e7bb065bc81a5503128c3ce5dce61935c8d6d178805aebd8474ac311022100c4b7c879af05d81c666c9f072063f52910cab813136ed96e0ea33c74b537b26a 304402203f3239d7e8a55a482f927009839733147cbe13a6f49529717be93dd45ede3d87022064c020e9e1b11907f99bee9fbbca2ca449cd9b3db10875da8127c6afadf6b835 3045022100ecda1bd72ef36946082f966b958ff5adc7975d85c4207a59d2df3833dec37444022038e126bfa1ecf5ca7d3776e5e12bc1df37a6327c274dd9eb19c250f6d062a503 30460221009d3ed214714a54be849eb196e73ecf77e84263b796267e54fac12b7697998320022100ed4b29a2483fb18a3b9b1522947c086d4b718652f3bfd1de6617fe1ec3577349 3046022100dec9c12ab77abca779dd68d34fd81aaa4b531da090fd28c76b6d0f524c12851b022100a48a3dc8f3610807459c7164bff609cfb50e7bb65ca074b239d18f16129f18e8 30450220735db2924f97f894f8bc821ff5e287a46e7f95f67031298238e34f1db06d9dd5022100becd440c92a77143b1b10e12c47e8d6ebfa66ab96fc079623d8c3996c66993ba 3045022019db7bf5e57bda283db2acc0cc0e45bc0853678dbd15a56cfbb08f27a7abf2e50221008d700cfc1e53007c5675ba99055d5a8736a73eedeba7557ced6dd0b3da94344c 3045022012fd2a2cc870c1265cac7ddfbd7437f422d77d45d79048107a0910d7b8f5adeb022100e8a7274985646b519472f4fe7b64e1fd84d9446c4a8c3b6926c127137f5fcf74 304502206eb55d346580ac99db4c41a37d33e2e41ee72e2ec372dfd449ea6b5615374dfd0221009e88ae63948bbbcd49df33bfb048ea629723f15ee4e3a0b377d65566d375f6b8 3045022100cc424904808d36fda3c81919a793128461396a4443bd65396c665f1e8731bb4f0220696d02b05492706b0f92f3c8c2f95ac30f44f214bdb94213131df44205627730 30450221008bbfb8d651a3be1da967021be675b795bc3aaa02ab12f41715967162da4daf3302207c91e2a5c168053b973425d68114463dc56b3fe02c450aebd08433a990d1b35f 3045022100a6c283ea3b21d121084ffdec6cb06706cfe8218652a47da17a7d935de1c7a98802200957bd127d78edd251205776a7ca53b6d0d3bd950e9ef1c1df899825e20f8c55 3046022100dc3a3da8fb202dbd83f2110b174cc064386b7b7677af54ba703fbcbbb3d2a0b7022100b067d7342ccbde825df7999c5ae77b452767846b4a07815d9b8e605ec527f211 3046022100e1594fb77afbbb31bb239ba09f5e702a93c3fe2ea5038740b02dd44bc184634e022100facd93254a9fc6953ff5cdb924a05cdf7c58d3dbfaebbc5dca7a6d98e9fe994d 3046022100e221556100e1d508b3075b09c77c8e3a103ba35c6dbfad766848391cb66868de0221008bf58b48e3e0d665e2f388b533440c116defc23de1e24de9a947a877e36be434 304502200151d45f4ac195ac9a955a3553106642082d040bc3c2461e9388b4f7be72561d022100bb95c4d09bc5bbd33baeb7047b0055870a704891e7052b70f55ccdb912631e93 3045022100b2ce5038a7d30abcba480da4c0d81fb74b61e449783bdc65ceb2b8ff717e79760220636afe235b11d6a9edc7e1894956a66e5c979df4ea5b4ca57f68c57b0349c4ed 304502200b0a97b73dce91c167429c4e42643ed752a512462cd4a209016fdd94bef9873a022100fcdc293e3777a2ada0c83b0b32813c06d06e43e6e77ed0cd6068f94f551bf0e7 304402205bf4e5d83f1386e888e47bb93a35253a9034ad607d0022d622c75beb34cd03a802201c80853131ab74ca779034b903255498581419a6a2f65ed4525e297ea3a0874a 304602210090718132d209916b95630ee2fdc8ea0d0dcd04dcc7b6a2b6526492fa0a7c85a70221009fdbc81de45c63c2f69475a2ac377a2dd53b236daa159310ce5bb40c6f1507db 3044022027b3053add7bb024c0fb1d7de3e8b8cf5b64711195442a5f3a23ff240ed2181402207926ab0cca6e4afde3d1c6a5624e776a64f58aba351e86f5d1c0d96831de634e 3046022100f9a863ce68aa6f0246c94042782672089ce150f53673de8e564e2fe318265403022100ef385579b967514acbdf8ca09865a06e9a6aa60f147f09cf4897567f8a3061f3 30450220022ce5c9a21c69dd15655a2723cdb5ffd54658c79a267d1722a6a1acdf70317f022100dc14a828a9ce8457dc33eab8354f98f93fd93be12d0f119a115186ab962252f2 304602210083c47841c27b2c9fc3e46e98f4a5ebe02ad57315dde7e17b3d9715fd122abdcc022100e9d3a430eb3ee0fbd1fad0c0ba0b216a5d0feb95541a65068dfa541dad3073d1 3044022027ca48755c8d2ce8445cfbe9dcbf144fd19a5519a8f6cfb5ef3d91ba318dc99202204503d7773b6b60e1f412b76e45427d4100c5cb9d8fd5e80ae80f9b34fed3710b 3046022100fa2fda0c3192c08e6ed18185d78bc7a0befb3ed6fb6ddcbb722bdeeeae129585022100949937f4143160eb99766e41313c745dd981d8c4134f9347ccc442eaf564742e 304502202fd90b38ecc0b162a2c311029d7c7d9ddbf0f40e6b6064443b32578fc1b4e18f022100cb457a2171cc0c964a5562d20af31419d6b32807b7362b2016c0c703b5e5a93c 304502206348057dd54fbfca702b754cd2a3d098d2736db1348b3b4a1741bbbe539f38d6022100923b14503c2f9190bb4c85b33ea1f95ec0453b30fd6da34c3fc7498217fae40e 3044022071da64a0e554ed0fbd0795edea1866b0fbb2332bd4e45b137fb3a05ea463aaeb02200a1f4c6a4014ab9115939c8c04a3b2dd24627c9400ddf56722408656985a0ae8 3044022057f2f740c534248ba5f7c74366b7a1ec867b087763a54dac691114b657e16639022019342bb7708dd7da25bba07295a06dcc2bd5dd7ea2d36474572411f3af6da25d 30460221009a988f2b70b72fa6b5a284f0575c5a6163078b7907176bda2320e781070d96a6022100815ea5b50799c16d672451845412cb2aeb3f46420178aea5effd4aea8f634a08 3045022070e3fca2f2901b7a2ddbf844f741c0a7f66ecebc2d98bbaba850077410fc983702210090ee0f9d6b01cf8daaeaf1af6f97d4fe5329d99186284350cbb0691bfa6fb7e6 3046022100e31580695a289e4adc3f0033d9b741c97dfb8178520105dc3781520951e45ddb022100c84d0f74d67d7aa2cc890548196548fd0853cd6fd2ed6c2bb1d39c30329ebbee 3045022072845aad272c335ee4c0b16c0cd86dc2d6a079b0baec3feb785645bb08d4734e022100a7fa29161e049860951147d35acc37a6bcee19fa88fc90d05b5106753a76deae 3046022100e793559a7762b4d371c9eb6c695797810054dffec3c74853d62ea2c15bee45df0221009b93ff0281cbdd813073567a3d5f3840846a38121442a1d9e1f12e7d7639f647 3045022100af176ceead121d42714932262e89a022d3dde37015167bf19bf102faa4d015f5022046897f3a548dab1230adc8928eb4f7cc394762d6a8e4d0b6ab1256a7dbb51514 3045022100dad794750a442baae58bed9d32cd2f97e9b7ceac6ac9ec5a8dff8acfc3af126f02204754014f773a6836d3b1ba88c27f367378d9fb3b49bc2d3747bc479575cf063c 3046022100b9c4b94afdbca3e7084a9f4e45fb8461552bc810ee1dc31434a5057494e33e94022100f5c06c40d7b2a6c0608ad129c8964fa10f0dd3da74a42ffb74b25b291fb92edf 3045022100de56f68669026514eb78c71840e693e41f1c88309698de900a01bf915b6add1302205ee5dd6312d2eb87b7abb80634fb276965abd2df9b0c1641bab62844ceb90ab0 304502204b2ca10875d0a0e376d7a5790ed374096989f32b3fa00f117ea62e1a460a71be02210086d83bc4a0166c46349b2418a41dc0f3bb69c4554287dc8f967c3bcacd47f07b 30450221009d222ab5e549cffa0d7b4051a05d0abd93274b6beb6a6e26ea4791ff5a2f7f93022040efac40546eb5d2401f3eb447ee5bd6ee49b63c3dc39bc7fd14931bd2453021 3045022100ed9d2d627a55f819e1ec130504f6e3ae7b95cb5d2f43b9b9b5a38aaa671268a902203ff6fc249a0dc5c7f0db60f55f99342c6206e4ded255fe66d03ec48455f3e4e4 3045022020e14a91e8b29f5713d142feb34695556a0d246bda811a34d0394ce2255cabd8022100bb8117c94ec6a5f6e34859c80dfc999ec6c1470a15c87f64ce65cb8359e8137e 3046022100b221cfe7858d29eac9ff2a2da43aea9af3ad2b71090ef8456f125d8d3506107f022100bcd2c0ac1faec0e8ea1a16a0e9399381fd2ae09a1461669a728913149508a420 3044022031f7bbc78eac1ddf8adf3f37928c1429cfc131a20972c856cdf17ad1f213f3660220101d526fd09df0cb3cc5fcd7be525420d613b9dadc537e2f0b85b28ce4a3d87d 3045022100c3af74dced6b3556331b5a4de44e11b0118a1dda773d5be17ef131119e4664e102200bbe54ce0c50d42254ee06d64c59278d13fb8546b186c80af9a4a9ebc7d2b4ac 3045022013fd9d6a95a56db6fb39e33082307288a80306f285329f22850289d9039ca3e1022100ca9153c7d2da23210b30f8850f5bbfd862b17369411a0220b24366b7b8f88059 30450220287c33ac7d106943b94bdb732620348cdb8798576be095e11586b1598fad4d93022100b8b7495f4318859276b02a0c42a4802310b162d70107d62663d3c5ab6fb5a5fe 30440220319a7929774e51902e17ddfe61b385d09c470d1e95241c9499295f0aa41d6430022043e20573467bd16e04dddc5a381356396b7610c0b44db076193a5449822f7a5c 304402207c27f2a22da04821b827ae9b195d31f82299bf32b214febda7ef87ab6a44436302204ef9543906374536036d89165b91c71fcc56e15d6899ce0b4156f58bb25286df 3045022100effc7a18e19a65ebf0fe8f6ec500e303e8e290e8e68ebe7ae0ed6c3f2412d7df0220074de966e8b70cf47d1080ce8c63a69e32b0706e859f024de4e8144ea92b6c17 3046022100d609437fdb005b2685a7b2bf0b09c6abf94454df6d2aa9a88000b503de0a25ca022100d38d0927e521b0212672a53b296fc4966cc1b4e1610217025c3484711a053980 3045022014f89c54687720b049eaedde6f80069ba69132980498faae2773734120586568022100f578b366874424a576dcdbfa3b39e812c822c98608d3368bd805ff63cfee3472 304502202ed8187f216bfd6cb377b1e519c157aef63a8d34ff8783d18bdade62a1bce08f0221008665d4aed3dc4e43769df2913738f1e6cab796061483e00b827a6655e38b6db7 304502207a22e036bcaeb2d3cf8e4c556640aaf8755302c2f044910a1ac01215e76bf3dd022100edab304670558e3e78965e91e4b50ab728e97a8d0f0fa5b4bc426d4d2edb76fa 304502205ad5eef21329b198ec482f427989ade32daace581e92c55230dbebb21a471f780221009a403a1ed6c86deb924219b9cdf5f421076a459f5f00ae3005c340db467b47b2 3045022100aeaf981f44bf03d92ec41367f055e920c7e2e3c8717f38d01283cf023541807f022072e454453d5901413ec748153a6af0525e0fcaccfa5c13dce2e42d7fbc425dad 30440220573992388c616b8ce61619fa0b94c2ee9d186df8312674f799805728f990152d0220381218917afe6e4f52f165d22cced6019c4f58cea7a5c62b2852e96b2ea76276 3046022100840fbe92bd3ce6a7d7a77d9014218315bdb24f4fdc098841fbffe0ab979982e7022100e71ef327c6ddeb435d5691a24ec3f8aae2e35dcf1a407a042654bb953181ff0b 3046022100d8cd52eef22b313ee5783f3c298d9243186931ea680fd8165805dbfc934f9529022100c1c43a448db1fc88e4def63bcf1dba5b75c44bac5c9390c51b32d830fc10324f 30460221008eac585dce53b1e3199fb43b6169533c9d7c717b4535338968de759d9483f52a02210094ff2d799bca2bb8d5614fb4cf8551bfe0ff6bb1fc1a99ef606989abd88bfd98 3044022050ed7985db07a5a56cfa9f1ddd14b3e72ff99566f98c685649fbb79758405fe8022073f38ab8cf8898c8c96c6333fd0300f94eecdd735ed8840ea9d903f82facc171 30450220304421e4cf6c745b356d3c7c23aeb978c1cee82cefde06de89221407ac7db247022100c9d9f1635de77c136f4933d6064cab7440df4d083381bfb47082617fe9dadf2b 3046022100e3d3cd98d320f1a7452d699dda237a6ea6aa628c130078d1ce027df53a49bb30022100d8d01a14ffaef20e7f1b0a7bff2bcd7a3658f4207ea890ab865e7cdcb9d8dc2a 30440220402dc0d3ec636f35ec9cc71654533f2cd07a6db46d3f8808d05dc3b8a58bc25202205f5faf73a2b494bf7993fa835088376b910bcf7b97ca9ab3c81b729db1053790 304502210092a5d6b6dd429215d050df4d557ebac23434fd15acf2df0bbe3d04c4f6688b5d02200f53bda51aa8e2bf628234e00d4d5aa43f727afeff36dffbc62ec0b13ed6ae21 30450220477e9433b55692d85642614671c69407d03da291e126f699b401e7d1cfc8aaf0022100c51cb186d09cd0b2c62d4f71320a5cc4be6c7408e153c0980b013d53e45ffcb8 30440220016db66ae7d7b3de7ff3f2b42ef9f3128d03a80dcad8dbd20432211d2dedf7bc022057f7488567c4257b8b3042ce5329d205ef24f338f5de5e3e9e7ea39c6a430a9c 3044022049df4d8acf07f5973d99a24ccb7f58ae370e0d2bb37659aa475b9734aaa98c75022068b16c706eee0927d299caed27480ebe1f66244756f15859689764af25948128 304502202f83859e2ba36b18fb955020e1198ac9aa1b68604ec26e6605dc272cb3c203ab022100ead9558511e3005edcabc07a463f4c4a370532a38ae43d2dae0b689817ef9eb3 304402207afff2f06fd10b7c54230a74c09fd33815723373ddda1e6aa745597a55b96a5b0220583a899292db4ba7fffdbc1c33ea4f46a6507f3bb5db93746c8a2dbbc0d13f9e 30450221009a42dc5b304f7becedcbc77931328d3f13fbe0b3f4ffdabb95fd39f2a8641d3b02202abd1ae4ccf8c350bcb9c7243db97ac86cbe7154b1a7690bc222d6f33666eecb 3044022038ef0919ad814c35d085736a0843e2c4c6e7a922d3f24d0e9cebf272102592bd02207e3fe3e4be62d9daf43b2f1f767005f3b03c3715b36949301ac072e58e9fc6ce 304502205ed332f76198de8e93b96f3ba05c5513b2ec101e6c79eb6eae8bd7ee182f85e402210090f08b6fbd86685ad3d22a1ac670c918a48b26c5aca52bfda019cf60b8a224a6 3046022100aef6ff5ec972e9e0198909527707b901019e472a78cc7cb3953a93a25fa3badb022100fcf0abf086fe53a55750989e81e5887d925cc9201784454b247d67fe9a298cf3 3044022066c4362a747269a3bc30b537412a75f7c5e3f5741f240e8d7edba2a018b0036a02207675cc472d77fa6c5a2b17c3c89e92546f10cdc6c4788bbe5ade8c9ca3d4207a 3045022100aad3a918a1e6d5ce37722b0ebafee3e3cb54653c7e38ce3dd416a23789e6087d0220402931456ec430a2783d9a8f903e05f242c0f62cb1cfd03e0575cba2b9222227 3046022100b2f0fc935ba7e56b199b3996b26d0459a00c58ecad4486332222d0603cfbf29c022100a2eb9836f495567612a12c4cfecf61caf99e1476faa2f4194b54e0b1c9e05296 304402201ff0be24e002131325e484e345521e3f222f222d1d7477f4c5b043b30d5f6e0502202071416bb38f78fd1773798bf7668f337f28d97d1a904b5a6edd23cbb453a594 30460221009542dca0fbeaf39e3b696f978aab2d5cf81e1a31b82e5a3fddd68697d8386e6802210096d7ac250d8894888a1e2865a20238b69e486a49d8726e61ea40e4b1306f67b6 3046022100fd9f496a615d089c4facc046ec689d55e464dfce34288323ada5e8c49866f317022100f0668ea40dadfbd6e37dfdc64be335c0dcf774a58f5390d4e147c59a0beb16bb 304402200db45e56f0d2a3294b18043ac6b51075e908f2c4ebe872d00155ac600f2d2e4a02207ca44fc556435b0c9c9d2c45502ae163d7d0f7e6e4e7604037830a814290f0df 3045022075acd65dab5904d0f6963bce97e7c8e7a750208600ecdec699b2dc1d38bc99270221009f1d295409233a75ce8b099b663140f9e4f4607e65a1c92cd3f2b53a188b8c5b 3045022100ed0bcbd4b3c52d31c5541161eb0ca7db746c6df535eb49c821a7523f3413441702206e97f9a487916280f36e3d6514daea56d6e112aca63e8227dbd3be5d155d3b8d 3045022100b6b49b7c8d62800e21965b5f844f326af99362315db408ad33c2b3274369a5e102205ad1d04d1d0449b17bd59aa08f3604d311594c9923f19ea5eb933e60dd10f382 3045022100e5ed1f4ea5a5fb4bd170495308f968bc84794cce3b83fd4ae4e030d25952e0020220453117cadc3d1fa667489083ab91133c913fde95364063597317b37c02c4c6e5 304402204a931c9423d1bff8ce5559ef533d6483fcd4654fafbcc5e7cb6fb87bbd6c623202203dc357e5824f90016b610c2c694f1988f830f49b6fefdcc20aeafd37f002bd92 3045022066c655ec2748137172e29994f8999749aeeaa0b31d8e29abab60636d5afa189d0221009c0ca4f70821edb1c47a3917a5e0ff0ea3c8ba1f0fece6a93988a8c8ebdfdcfe 304402204c48a6f305cb6ac28a937d6b27eb4fedaa4fd5e1b9258a8c2039dbbcc755efcd02200ce1f372e87d37ab3b08a6fdad9a8b049644bb3671309a0748ee87d55d80d37a 3046022100d27ace7dec59ae1104ada098cfd67cce90a49e6c51b9278abcd318214bf30230022100ce1460a8e311ca3064a5e1c74f75c2394efb1d172bf70092917c45e0d6334c6b 3045022100e1bd27574573f3d828ead4bf1d9d3ee8cbe86b4e0e1c24d12b4ab8ca5b4f685102206c004b485b718833cc27b6a3b74f7e556ab90a295bcb88e58c57cb35a328d77a 3045022100fc3e043cdf77bb50724f4fad261989712f7695c538d87a3e921d1da37788c5ed0220335451a2089cf636469643a2a479c470d05241aaa3bfd1e6d191f3ad95976bff 3045022100a8d600d0fdda815c56f45560559bf0cd8081e2bf1b4d004068089056536de6020220682b97c4f2d566082d4aa29c40dc6d02ff9769b18cfb855fcb81e2d00937d3bb 304502204c4c1c8f7745a1701e2f6fc309252547040e24036f19d55bb7d486ef9681d08f022100e0b9a9a0a46c6e9b9b1c8aec7fba40311d4e024617fc1a1a1267ca0ae3cafffb 304402202d18c59e1ebb9ee26341e0987019f1486d691ea78ca15b9b249db3bf3799fb460220081c38664bf20f3b4ea07638ad4d2493ed7eba8f45fc35816c34eb95c6ffd492 3045022100dfe9079537813ea5677268e8227279fef045ed6f425f417e4944dcdb3f2f04a902202a8e7c9aae13ff146b41c4f6769d6d03582383533744bb4aa002bfe390c0d920 304402204a3efc8965bb14c134b4daba03657bb5c47270ed484b2e5503227e1c9193d6dd022032d868d31319f9d36656c5313c18173e2721684d854291e80351011b065d82c0 304402202337bf743fb93abbf99326bc1216ad4ea3309d611531b063cdb32ced9a23ca7602206e74b556021f230f4318f75a82452e13a847d1bb61e606fd48a403629b721480 304502206eb4c98c2e84e9aa687a2de33d81c62cf36cc299ff01db63842049a8b67f2a0f022100c67f9c10b4b4d8ee55b5e14d28d1137c0bd2c8502f90e726bc43cb2ddbe6c049 30450220428df17e0256d01febb72af91038f69c3a94600a021c6c29c4ed85d93f164a20022100fe370d3c9970bd88694bb2edfde15cb626b3819b3bf603295ef4061155bc1927 3045022100b04b2099e295ae030708a7d30dcc51f7b3a15e414fd41a2df3a9f069e33fe0f502203855edaa56168aa611033627c600b4679a3eca0b999e3ebf6e6ed289d980ba51 304502205341b8a5620f1245e4346d7fe46f84f76604ee1c72ad540980e39d5cc26f7218022100a843fb781d0ea26838bdfdd574afea52ae72b2b5a6cee82b37cdd3e9613328ed 3044022076ed1318623cc552c188e13164b0345f032fe15262d9bc6e8f1f603b50ef8a4002203d0555b1fa630fbd2ba367afec9bbacf54e8dc4bac95055f06cb6ab39aee758e 304502201bcad2015e860dbbed85d98c29e82630942fa6292ba8f2028abd53239666a09a022100cdf4f5b3f43f09a538a2f8b769ceb4c55cd14e56552d307a0263ace0ae405e75 3046022100bdddb735b6f321f2fde8254127dade5d87c75038497f2d8c11e607d3a7098ed2022100cf0c7c7d4472442302a3b2ceafce96a0f1d8cccfc6a472dfcd139933b1383724 30460221009a731c50b525855cb57492b571c8d30c96546ff428ab6618bd8a59dd49e8a33602210099dab1970e683594e43f0b3cf3534f9bd9e2214e6ecfe0b673b27a156ffe09c8 30460221008da0c82cdfbd807fd4c7b337deda46f06e802eb91127d992b98eeb4bb80f5284022100daf6ea149eb9e865f7a703375e4caef63c450ec589c693a5d15036be454fd8de 304502200f27fce3cb391d919b0604caa1f2a04f14efda1c53e115d53d0810fe9579461c022100e4dbe5fc4d8b4a775c4f12326888a49ff3aa6106a694039fb949a88f93c624d6 304502206839c648fd4524ae69fa3ff95c6d9f10bfdb2518894b31e95dbf4d6c1895b04c022100917fa92caf4043a2532ab9f6867eea251e56b5c614e6beab715d54433be82ede 30440220485a9fcd3a5cfbe5ff9f2ea04a98cc029222d4797430f22a093b307830fe5c6e02204dfdc0bf8e1acdab76685a578b776a2f8dadab2d89d746db2c699f54ac1e5552 3045022100d8c674d815a91bf73ea599d92de8f75f51ba053849f1b9b8babd6e10f8716173022002747e94595eb9db0172771c50cdf1d918bfe9f830124182a71298f5ff53003e 3045022100bb46b7e7bf75e7fb80c5ce1710227a3a14d4ab43ecfaff93e317081dda142ff102201b7f4b10d30b116f5b71fa5a3adafdb666c304c0fc0cb045b5e0be3126a2cc7d 3045022100d8de9175339805d877b941362a6beda677e2b4022c0ff2b7b586a0070060d677022064b5ad40364ef3d3e2530eac8efdc55186f28f185e672efa7b56e10765998182 3046022100b0a003ae3529790f0dbd2ab003f68b94f26a44c800ade52b4c02731384cdd356022100db8e468c15c7f462f68ccf28eeb13043923eb22aec2c15e5ef77b0b7c82c4ae5 30440220319fe1596fc4cd047a25ba03e2a8b43fdf00000d1e72b90578e1d70933e5d72b02202aa6d9179a5703cd6d4c52d3a199eb24cf2453737a4276aa100301ff8cf5d5a6 304502204271f2bb3bd52dc70d8a1b21d315b1d6addba65651580d3a417e9754e1be683f022100e685af0511e8f3db9e1dfa7458fc5d2b2cc7b7bc82ecd704f121bf12accaa735 3045022100fc535080eb167a47418f3314b59764c73f2f0c715482b0be490f591769de03c7022002cc47578bd0951a71d3926f1e2ca9aa7528b7d5c09327b14e8b229c4d9744d0 304402201b7f14d5a9730c0ad0e5045e882b9eb5aea824a5a5cb2cdff49a2122eaecabbe02201dcf16c048b2bc2e9f048de390df2a9ffed0bcbfa17d1f9639877f2a003949e2 30440220601df50e8f063899d43ceadd582d8c4b68dab553585f6ff7ffac9f8245754049022071603b69dad6d0be39116ac57c70fedb718cfedd264f6acc960c6ccc2b713b7f 3046022100c4fce7cf15ef52de627173cd381449004d2083949c49bb38d3bf9ae8fdb0c3e70221008c65ba814c18481bc5b15fc14480fb06e96aea1d868bc1056d44779a76f75a58 304402205a21980bb1f49e15efa400b173a305f94a1f1c5b9f7a68142d90a60fc59843d90220518f5a761764450b6d8a0f571e35e19b763feefb034249181fbcb70b8eca1479 3046022100864658431c7f6776005096d7a37adaa851c274ebe76033a8051569dfeb9d2404022100fc1128f45e3efee6d15f953d15859cdc8c83051584bcb29f26ab6d081436db76 3045022046d82540382c38204fe57c61c6a8ee97aec473463465dcd81a21215891815e2b022100955381c552d73b023422b45bcd4d6088c07f2cf624e50a4d05061d642139ec44 3045022100a3c35a967ff5041c8076cfa2824e85ab8aeef37559b3fa33d49d697308edf28302207799a5902f5ae3227a778615591faad0d3ce041ae20f77144693a98dd381a7d6 304502201c6eb0e0c81075848318094cb0c11d5b118db75b3ab811703315565b1b3e676c022100feb4780e4265aaf347ce2624e0dc243550d9702079e1e7f0c9db63fb442b29a5 304402204ed47a7d245ce155d100e4b26bf8aa05bf9006e5775197234a6e364970240d44022029b822a1a156eccb46498d41fcca9fa09ecfd07c698dbe541fb96ca28ea6bd83 304502210081af9d3201c3308633ce4a01bfb819980d638f454be8286b67ac6f749e2d0ad2022010f80a30a834c511d7cd27a7a035ea74929702964d5df19523f844fbee72bb38 3045022100e2b6ff3fe89f8c3455a3ce64bb5b87d9e05ccd8c723bc8fd77f2c71754b5700e02204a60d87e78d463229fc91c3e5dcb92f9b5465a2f9645f73fe9cc7904a9220532 3045022065cce1eccd584e335b61cc01f1ba5945106f5660324f8a604cbedf9096e4278c022100fa2dc7ef44d0b0219b91869d9eb40fb5d93d41f4caaccc0803fcd610837c28bf 3046022100918d56d75ba2f2f435186ae1231f005840ed3be16c63e11c55720f1bd97db4fa0221009203cef09b476b072695926a025b72cafa7aa85fdb1d61edd0b77a883ccef86e 304502204813d4fb21cd13c1e234f1f3dc8653c92ff3ad63d96b709d4fa4bfbf66b9731a022100c4d142ca9aee54d7d404a2dcdc6cf685463b3a3c8ce5ed27eef4e794cea09206 304602210093687fbfd352dcb5362626147baecf0a628279983668587b420f45c51aa7de9d022100ab15dc7a76c8f0e198968dad8a65e85f54c692c872f8125944106cc1e131bb5c 304402205c1817f139ca30b757786802d092576185e5ea334c15a366c2c9f998d7e4f8b602202fa4d5d0033fa1523451f439e5923d3bb922d3acafb1adb2b23f3c12d9dcbbcc 3045022100d4db17804ce50061a489814d60279503b05d6287fe399e5d3b431b9576f279e502206eed4afc37c1fc32f4d7a7e9e4484f4d60a593ea97b2a7fab2874007518e0d36 304502200c4606bff1fc7d64cd0e7e76111c4c099233b9bf56b16762693c58594e8b0eab022100aa0d0bbfc3d8b0a6a2a71f72201692699924a4ad4d6024190643295a6e7d15a6 3045022100cba06d36a8482e0f8265490d96053d519a5b6baf914db97e309259859bbea07e02207ffbe1a2eda2b10067bea47854c3d7984e2961a53aa92da087c3985d43e3f2fa 3046022100ed74cf15d07cff36a8a218689cb64dcb2c32abe6f949126a28ff9d0cf167d891022100cd5e6f691dce0edd506f834f12283eb02f9c1a923d99cb5c32a6805a2c6de6d2 3046022100d2418281e496d9ef3c65677a1dfc0b24e478def4ce8946656dfd6ccbc92dad09022100dd75c317c4aaa75f296ae3eea8b10bcc673c1e2228d105059b5c936c4b2c15de 304502200d6fa4b6a4eb9eb2b550e0f67de6ebdacab3eb262fa2453389e86225f3191a4e022100f6a0ed9dd0097fd1a00649af8d6e1f87ead0e3378e07fa675937521dd2dd6976 3046022100ec053c5a63c55337bace7d7931eaf8ae7d2a21b9483628f788396da386e7700d02210096f215f22303ecb46de64824f5de14c899731b2bacbd0d2080ae5f9c9335ee56 304502206950b172e150e6f586f00419474b61458f2928f5d92f10e269ef408287a593fa022100ae874910cf86eca44295875c39f672326c1eca8c2e46d5dad5d93052c868f8af 3046022100fb48ec4f88b26ee632e7ad6213a7357a6088b7b3fac65e2b6bb441e4312648a3022100aea8ea43f8dc8591c624b04e686902af737a7f167708e240e1ab5a2eeab1955f 30460221009c1c655357a3d13866b0c9bf459421c159cbd27dfea9c197513ffdb702512246022100fed748f1bbe90f5c096d1e13edc9529e7f34d81488a28cbb80f6fd3ee6c432b7 3046022100efda25ea7a4044b079d831a044c2d051564bdb68aca76e3357495c309c95a6cd022100992ffe6b721d00710ebb37664355439055c62bba5821430d6b1c76a17cbdd7bd 3045022100928996d060eaff795b3494434315f91f5551f099e7e2a5e867e102042a671d08022062148ed5aabd6e462f310a64976a4509d49a86e48dc9c821f6af9d574bd3bd6c 304602210083dcd4c5485468f579b751b46f87fefd0608053316b5a50ed7a24b0bac137d52022100a811fc6d311afd8f95fa8cdc26936c461bb001e93d776d2f5368755d86d0289d 304402201ce0709fbcf0b7c1bea111ea774de8a4f0b7cef8a7062dbb98cd411097c50fc402202eadd5a666fdd2f35b1ced3577cef53cc78145f094166d5fca631d9039c1c46d 3045022100d931aa5dc1d13bc58784add4d2e5d8fc33773c2b83a36ac5c5d8f75a14bd55c0022005bfcf113e45024a3c9ac6168603d5337269a4dee64feb2b594aa9ca19a98c3d 3044022062ec91609b7ee05688622208619c4304dbf917aa6393e9a55ecb4661bb43995c022053eae7afab3efb71842183bbe5d1d4d6ed1210eaa027c57ef2faa08ac6cf8f48 304402203336aa221fcba0c1cb5adf6fd21de07cc43b1d2c298eab4012c49249fe1a5ad302200fd4a4c54f91eb5ffab02984fc757b52fcf876b5abbc46e5241d93d472a5b0c0 3046022100b393439419b4c454a6e9120b7aa4c6708e274c027335f79cd6a092f1c9c86fa3022100f3a56571901a7f7fceeb9ae660be76e527589f63fea6c0af37895c86d41cc116 3046022100e69fa63c77d768ae7f2fdd4c3b8b4d6e7379b0869a2c59eb853dd6c9a3ca10360221008d312da1a871ec69509fc0fe7f8ea3d22cc84923e496a550606a1530eb531e09 304402207f152284d9cd2be3a62fcbbc8057b86044ccf93e09412a5afe42002a6ff49b1f0220427aa11db3054987d13f2440bae9d5437fc6fdd839f267fd2e6d5304f6340267 3045022007746c28273e4ee07a6e604d8c8743d5c497d995b480d630d005bad3f99652c1022100a6d90cd3785cae2e0559e9c29063f62bc68b3dd29b3998dafc3a0865e1502e35 3046022100866be2f305612d117c3b667c7e415a5a322154c7b96e917e8690bc751fa21ebe022100e217e341c9cb071fc051564ceaf9e3eb338f751ebb08bbf9ea56b6a990a6964f 3046022100e63c3653abcdb2b6a68e87721077cd675c8dd1b42abbccf7f9228853f7179fec0221009d6bcb92f0fac02e7c0395750caf500b3df4b2d9345b53771fb8a532371f33cb 30440220207b9a239aa22a8dd7ba85ab113f6ca5c2f5a6c535d9a2d27118dbbd8ac1392e02203f2b1dc88ff57beb0b38c9a40d047ff4a7fa0749be710bb4ad37b39cb8098a38 3046022100c29fa074c78c8e0a0208c5e72240180e13a28acb285e53161be190b59316e33e022100a240a873ee548af18a9531b733b5dc3364988bc26f496837e99c1eb48d503eb5 30450221008a65c7857467bfed83fa7dfeef4821f45266b7bd9478f1490f2669edf3c45bc002206eed6e2073a1006046c5cdcac5d7d370a90a02f74f1d546519e93e4f4923f011 3046022100a1e1bb9067baf8b8026b21aafd5e614523fbb9566e6492fd8ffc9a986180a9d2022100e5763598f3dc748d4b32aefe8168136ac0e3983c43b9590991ba129f481771f7 304502203c76a52ae8456c84e89f0e89b1daffb0f4d536c427730962e3189c94ba99f198022100f15907c67c8122da466514914671a042f14caf109dd7988edf077244f165c07a 3046022100888843c9023e9db8d42f552753fb5f553e8ff6ba13483acfffeaaf3b0812b72a02210089db1532785d5b386604577faa3d2cbd86cab649af56a5a89acec14e2f9a39d5 30450220225084702c6eab601548f106baf32f3f358c7398999a4c61dc40a55b1326641902210082147121c9bf8ad802eaac1a66dda2656c8230d828f00feb7c596d95f65b7a63 3046022100f3e7df4585bd99647ed16f0b4cac6aac206e6f3c2624f54b3ef2461bcb598bc0022100a175916dda7e358aefe0f86930369cf689424317591ab5516364253ff040c022 3046022100c4f04efc4ef3b4f72d1daf410940a5d0154b3d4eae86e1a426fb25ea1d0b5a57022100808a8563ff5b8cf1c95d3545eeb8220f35371dfe406c543f607f6ea15bdadc35 3045022100870d144b6db10ec6c19bd0527463c4689e9eec05b854aea71c737edfc31fd67e0220546eb9dd60cda2fea5082038085e0aa1d867f0610f205589969eadf629a4f385 3046022100d4ae55ab1c5061ca6ddff0c667bdb1341316cc05e08680a0dc448464f45c0d1c022100dc56e2f23ba48054496b2cfebf6ce4add01cf75d7a893c13287251cadc65f531 3046022100f1105118235df14e2d36ffbd78200348f9e6d03c7e60b0943d28f06e71893c02022100d01cde37072134248880b882c73ba6f225d393c84ea5c2e531b18fdb5d831ef0 30450220179fcb0f6d069fd2cf25d0b4dca8348087e5a48ee9d75e6d0e27669c7a7af9c1022100bb54a79580b387dcb3aa0a2147dc0776f3c582e9fea05c57ba7b2f20bc6accc6 3045022100ca5ae62f5588fea092b7e8abbf44577ff24404dd9fa09f54b76caee5090a73b00220615dfe890c48cff1634af9a4c60671b0da14cb5e649f955aa8bbeeecfb0d6a15 304502210082c3d28ba26a7011d1b3dd6e3fe6fac521abcc889285386e3b7dc1f528827b4902207f9df3429bd0766ef7b79403d79371efb2616ad8030a0206429da87dc38be14c 304502206ce15a6c2c763ce2308067284f076028fd18a87591c5bf07cb686e4af36e0e7f02210084fc15f0f42f2b5198219216eda0fce13e362dd17a87946defc670243b47d184 3044022047910aa5c6a9baefb98ebe3ec556682105fe0289679968db55a5918a1a5c617302207d3cd6f50c26a0e240858bce48a1d93b07efb34651adf393f88c24d9850b5ca7 3046022100a7cfb29c4918aa831bbf768307e66eb04554858d0ac14a683df98ee75e2b03c2022100fda6fa06345d07b7614f33ed309fd30b608caa71dbb87a3a133861963f3ae888 3046022100ac4872c1c40239b2de37bbd927f83e30865f9d15355cafc03ff0aa5abbca1a78022100ca71bb609da3a67d39956d3acc4e4a85fdeb8f8faaecbab9f0db1da7e7f39ff0 304402207a364e447bc2ac259544b7ad570a392cc3865617ce08aa96517259f19ef2104702207c5967662bcebfcc99d2193fbb7bdd0d99997526bb6eb5dbd6f0a1b900309789 3046022100eaca4b42b987552321a6d03bfdc125547cdedc1eee03d24503b393149c8900ba022100b4e3190c458cc13dc6b91f87b36c7961c8eda0a84e373ed8d2a08dc93db83613 304402201b6193820ed65c4e266424827dc4c6d4f24d2caf6f54c822776020c86fe362b102204439fe0562602ad03e61d46bd59be29c8971fd3df9e028fad5603691231077c6 304402206ced8f55f2fdc1b315ed8f8f79b0a92236e411d14b0cefff0e847d5f5fc4b5c902203515aec096410bc7156e7aefca5cda8015bb3f4eb2f1e0902dfd3cb5c14ebffa 3045022100a8cd63f1cb65390a57c52dc0cae922feeee72a65916a3ee03d45b8bb3ad20d0002205e6f0ec5779dcd0ce61cd41a0f10822b3dfe23b49b960023f91043a2d24bafbd 304502203b43a2bb540eece1c57a0c272ee07bc7a25a43b9e9f9f897b5fbdd8c1d3f25fa022100e022d69e8ef10e25d3a0014fba76a0a98ca9b5c39a8daa47f2cdc9a22e6bd197 3045022100ffc5229eac608ef14fddbba3f51df798e4a91331401df9ed9ead241010b3cdfc0220723be697737be597cde02e086cdf72524b4a8dd55eeebf4df4e9f749a278080a 30450220292932cfb3639dac15265fad1e8fa15a8eb48525e92293dc76feee07d0891ba1022100f82207e651a4a6a34113d47c37560e83c107e6ef39c8e930ab4bbb34531b280b 3045022100c8b41afd3d6f4ca70eacf23e0d837344b50ffeb0c3f5863485165c1d4dcd4c66022053391030cd772934dee0ecbdfa3a324bbd689fd71d3c5e05f7fef472cc18b440 304402207481cd128442a3f0cee8680d92b67a7abdcf8b557c64ea4b9234a43261e19b5602207d4dd5bcb6ef08944874ad71c44d287748e7a5fbce87f8520506bed5b8b3bc60 304502202ca4743a59224d9a9a8a0b6131d8fd0f5f56335fd6528b0b2d4410a1eff1aac5022100f6315f16abdea37c49ffd9fb8a4b95cf41b0f857e1f9aa01479a821f53085df2 3046022100df3fb0b00a96363742bb6a5f0f78febd35f35bb1dd859021a1a4257784a61bff02210089823687712fe5a1135d74c85e40a58cef65f9dd1ca3fe2b765f6711ad1b6174 30450221008f526d059b6c094540be978829ec5db1c6bcf39048c78c6c083aa84ec747ac2102206a5b15b0bf50f533d85194667fddaa5c247cf963446e2c070bb163e4e095d830 3045022100d291da949298a70b373c83e4c870e5de9d7269cf2b54a78429f524b30608707102205c3414870bd438f10c6851532a2ba4c9210009bb466915eed6053a456c7c059d 3046022100f11fe0de097c1a5f76cc64e9a0ea8d89b4252ba277428d156f6ab5fdb721143d022100ba657bcc4e990def5d04eb45cc588b3f1564f5f6ccf53e5bc49a5d61d124115e 3044022008ac1a7362823bce3d0c3c18a80b93204efb27920a5bb8e2935b63eae99117bb02202a5418c1a1014b55a6f3a5cee7bdd1755ee06fec2df2d25a8b466cf94a45c543 3046022100afff2f0855a14986659e5bd47883b246fbe4fc92f0d502e1d0296419aedebc6302210085f07ea569217ef73e3baea4e8911e78e3e3fa0f626abc669a02c52f37ab2ae8 3045022100ad432e883d7dcc2ad1915a2de7b2460bfc5a2e7be60540a8f98b2fd9f0e3c697022009da828f93d9d3e789861f9ae63061239d4a9ecc1db2eb18b631f92f547626f0 3045022100898c90f7fec522d4c222a226461cbdefda4c0606c2bf20f02e1c3967bcbf66ee022016c98f0e696f2a7e4601828a33a6edd02f366a3a083704aa375486c1191466b1 3045022026279c92bd471a35bdd02e238194a17731a93819fa0b77d26dce63b64dfdc967022100c675363081d5ce79a061655e7bcf1f86714c590d486ac8777e86afaea29dd897 304402201db15e9178468c2ca4f91c64572ffe94ae46da10b8a85d8588f5a0e46f3e7710022049a71db9cdae3605f3026d8a9a771511a92b8be579aca2e3eef62c93e6da0bac 3046022100d7c8b432059515f1edb03f46eebe5f526431901368e1c0e022c2ee45bda7feda022100a7e3e506b5ef5582f49086c63ce893b1d98cc5333bf9c7e1a4a078a2cdb90c81 3045022100c3c94dc0eb084a9c89e6922cdd1d7333697ab7fe64734cf85384074a03cc188002207663345b32b88cc711c33e23ad31d2250d657e43008cf10306e0f242186cd7be 304402203d55fc830be83e90c4c9e9368f0a28e98fbeb25a8618b2cd3b5e13c2598fe1b502204e5abf39e643ebf3ea2e635d9e8aa6b7c8ad46dbaf76c510531802694f760c5b 304402200c3985bd3d6962e24e114e6e1123bac003ffe9284430bfa77815e6915ab0296c02202984b65a258aa670ba685c939bb7f59330e3f55352541c5cfc7417814204d637 304402207c0df1de1fe97f5a39389a2f9c2a7071171b0c06745397e8c050b3793eed04f20220414840b9d13873bbfa26a0a617efcf3be6afa4bf65f894d63872a1f366a5dd94 304502206c4df8d109c47175e1fe07784f34a9e2f7be351367712e0e2e27259aae82fea5022100b5fe6c94016928ba6df4a9421095000dbf0cb286302b5ba2932edf6f2a1f1cc1 3045022100a0bf97e4dc1c4fd693ed0753ca7747bff3a51fc2a21e763c07d4ead730e522b602200815c87ebf182fe93c019d07a6c6c2bde73e3da8be40e723bf63bc6c18158000 3046022100da054f043195f3e042369d55e4b639e8a98220363123bcc406b1074d5c94d8fc022100cf129bf1312540db6f1755a0cfd282d5792136f0765f86fc92d75e77b8720666 30440220735aa5550d35aee1ffc91f327f722a6c7e214fcd67c920e7e1467202e090e684022005725fedaa45cd7a25d0e9b0cc817a4f7f3c1dd1087dc7b7f9471d6282c175e8 3046022100a770161b5adc70430254edcc6dd93235af90f0c57cf6b7187751ad737fb61387022100c2e3432c34de0828050bfddfc160c3359f8f27f4ed8c1eb9902657c4eefb22ee 30450220550ec13a805833df786d75182a966b73a24f2a84b2d20d3ba858089b3ddeaa3002210093bef06cd3def7831920527425efefa572c0e5833998bb88a30aedf9c42be8c5 3046022100e37784003792f15f7d999a628f2c91e542e14ba0b6abda7c005dff9c0e7a850f022100c13eaf58c86295765f0850c6321bba4c12234176fbf15cb3bacd2641f827677c 3046022100c491009f7c15189afa9719eded65ca01eb4ff497a9e5f8703e5a5b4442778afd0221008e2a502ecbd8191fb423762a035efa806c162cb79e3a01e426526ad08860205d 304502206b113e619c7b4f6625f65c94c2a979ccb6c5119cc13d4c0926ee43e0ad7ae11b022100f3d9c38d71a2052d0e9448355985c11fa421ca8760176f31a4744e305833c6bc 3046022100daf0895637cf89c4d34cdf4e505d0a137ceb68352a4f494f290987bf8e1fb5e6022100d1887bc5f2931ac51527ab35c849082ba44c90de55f4c56a2d7d42558a27bbc2 3045022100e1c2df170109da28e1f4542e6ab7c34950013cd471b8d9c2c2389803dc3787e50220027214305649430363a2574e7d2a023a9d7227419a707bab74a14d3f243c4e3a 3045022100bbdb2c4bac8b305867e1d743d007cc8431be24a708e029c1665090073f0f4022022005832bf2e438dae90bd7701d1ea0f33741015888e5a1a4d73ca2aee7edb63d07 3046022100f34231afb7ca0ec7e1314f4f60350717c598eac082616ff86e4c303de3b371c8022100d30e6902af7841bc02a23d69eaa01db3405d6f2c82943af0b56d74c0bcb95a25 3045022057f576367b7000525f5f238cb803a31065ccbcefe83826429e9dfce25db54dd3022100802f60c9aa349f22fb5c4ee67de096c109618304713b6ee6754c3802d1295cc3 3046022100ee2d2300bcc375f47989f88345dfe1c298eafb081f63999af7576fc03bba937f022100ee321af47685ef4ed6ec8a48bf34b961058088028d83014abf2ea5e8b7991b0f 3045022100f60528a9721863c216d6700115080a78aabfaef9287711ea02ae889ee4e10a6402201ea60f99a8a4c8f3993bd266633773a54ea4521ba617bb5490bbcebd9a5f38c7 304402204c2e25c217e5aa73cd175ff4cfc33c8e3661206e178d5ead7588a1031c04482402204bddf4a2832af476745a2182d7fc479f29b70f51ed6c1c0accee4962bae49e20 30450221008299bbbfa442c4f52a1878d3789804a31c8da8637fe7e266cce91d3cd2e35e87022052d1e1af91b50e407c4ed5f8c9f088fd0731dfb056d94a37d33b1585f115d3e6 3045022100deddbe64765022885d808e9f43a1c403cb117f4c6ef693e29cff70fd96c95355022043b1fbfa096501ab9f933036fa77decd55037ae7ccb0fa8e122bf96185d72cb0 304502207a44f7a5cf7722c8bafe4abfa9cca9d3ffe03627bd64802c6876e65d6c4d91ec022100b20e45c844e56954acc5f606f03983c4fde12db52434411c94baf1f1937ad7cd 3046022100c719e56df543dfba143ff87dff35c34911fc512a7c9dfd65c93a605d5d244d96022100a3f40b6fd903e1ca7fd6b0e11c60dfb8761fa685d71d1d663cba159d8ada7d45 304402200c4ae6da3e705dd41fd939093b11c69645936bc13e572396238892de8a535a51022022c53f5ae93f123b4fe868a91f99eb317dbc989ca9b7f4b3ec7c2736d0181160 304402205770a8097f02ff0d8a19a0deb35e710ea34f3e6ac48dcfb44f83408e0ae360540220043097a66392141fa22a981b6c0fa10c028c9425606f78865aab9df70e2e633f 3046022100c969f8cd8e4c66d6a5a1f7145b43eddf7c4ae76868222b6b44b222602505a569022100bb3951a132db8995b0a0b2c50e86c28e381ef313137f0e89176aa64a17bbc697 304502201d0875329c57ba480d4c359153cd2d1ba3f4b2df2c8e05ded22cbb31a5b3410d022100a8857b448e209a93279d901a0de884805cd384b0ac6f0caf53d6b43657e1ce61 30440220233ac5b7fc3d3f1fe888be0c6a63814933a2d780b3bc87aa2dc2d268c1ae12c502206fd0ab7a34180e83de83fee0467043e0b614275c57ae908c42dc2e19e5ce9d4e 3045022066bbbe187f4530e527ecc9692ec3e7eefc22feb7f45185aa82636c01d46e22c3022100b1b63341fcf8b9c3669de9183142d0f66a3bf76a87346b4b8d64b70aa59ece78 3046022100a48410f2a8477c8446f8e39308ce39c4ddca92c539d50b1d0c3967b5fdaa1622022100f2cb994e6585b1e307bd989c325d480c2b63239136ba758aff09ab847b0e9a2a 3045022100aa76157763ed736c406aaab62b0ee1c032bbad4aa27e81f1d9a28291d5973c2c02203b8156ec2abfd2a95bc3083bf2b72b4e5ae6feaa3db8e27a390429f1678d9d62 304402202e40aec4d3709c86c8257503029ea2ae768ee7c61ce0b173ad2094c8f12b48f6022064f1c8f748f950f71a70e79bee79e5b89ddcb2926fcd8700c8e187b2b82d4bce 304502207059cc075d1f62324c5861bbcf48e6d4ba2174c85666afe1d74751df4ccf0973022100b56ed00dd98a9decadca177e0c694be412dcd8eb7d6758ecac439a1da5482933 3046022100ce28f2c6d6f45d13c265a888e0fc45990ae141c9ff1bd12da1edac431a54668b02210094c151174561272d96c38f1b242577914ec37ba6462d2fcec5f2d3c57d83d8a0 304602210082f0a8b0152684d94b10ce1ab491a5245a03e8234fb1636f2d167b52f2e31e5c022100979ec78c25526bb73fd7d18c2bf1d98d41d074c2b785b3bbb0bbf1c0d78d5248 30450220441acef8c4712951784b5eaec906342a96e1c6fb37592d62e469dae086bc7fa802210093f4e788279737d274b718c07dc9de9579b8f38bdef6c979b223938019f670b5 3046022100bad56a41f34ab74c519e970b1b1ec4e159ae01a7d4e58e92f90c5479b6d0b803022100b774d222fca5b7633647f4252a835d6480bcb7e9b133cffb0107f3f78bf8edb2 3045022100d6f3c7190c12014d67a4ec3f4bb23c999474c85a37c0666ad0e019281c5ed03a0220124b5f2a3c92eae09e4c69da33f6a306db0033683d738eb29f20796978ed3736 30450220061715ced209dd2cc6a2b72b128594d06872d6d1ab7ee1fd7a2fbd1903a4c80c022100a4f16a61607de712e25d2a26d68951c3c3794cc690e47e3bf8e2f704d219c888 30460221008ab1c03c8877911cf655cc6e5ae71e1c97505375e83c6008b7fba69ea2fb74af022100b2e4097655858ab74f095084a4fd6d65ad2fc50b3837284b45245942ca358bf7 304502200ae29b95aad5d07c948f9d14bee6a1ccc45aaf0985e164b64b2a74c896780c01022100c34b3b69ac23038878b162516333bd391587dbe850adf439876d0407eb5a719e 3045022100db6912c49e99ace7fa89b1c9f4b55552b50445ddd1169746452818a9a0ee9ce702207bd732cce715c813e749a5484d03aac0231653357ec1ddfb321be524b442a84b 3045022100f1b83577de1fdb53744560eae505476448555b61e1323553da3a222e27aed733022024eabd29e474c726962186fa93b82a2f20f12ebec83b133933366cd8d687a0d2 304402201820a0e28c9273547c4a074e234967a755b1128a6c1639eff569883dd5b49bd1022038f89664d14df38b00e9107835f3e0e5c9cdd6dd39d7c9c5c6b88ae27c5b6e9b 3044022069eac3d6893d0f17d76e8e4396b39e1d3ebd739dabfd703d7eb52157234f005602204d36d1dfc76349cff991163e5a0fee0bd43b53412bffd979ea207b533aedc905 30450221009259b72d14e38c15722d06589b31a348582df7cb7895cd219bb821bb5b501ad80220785ce9bbc657d92b5ed51880e7513488daba0edef4780c1483701179e0699e30 3045022100bc4142c69966b499ba785012072222a1889b77b60c3a8fc2ce873f6344ecc68a02203bbc379b6370ebfd2a9b6e85f926742bc490410f493d6f1c43be2c9427212982 30450220341f5abea0ca5207d6d050e581f6e4f993d42054024d8c9a20a987a64114e038022100a8a5f6e9e4ba4c6c1861287eb52a982c7323f93b1a627a8815efecc7e909cb4f 3046022100a3bc7aceb15ccbbd7512b9b39baff818a2a5d1a102580c3477738e82663c7780022100b03995ae1619eb3453294978db9b805bcc34ee8cd3f9e2fa066e7c4490d6c944 3046022100b4128cb98fdcb79639b3f80d6cbec4927a78f560c6a56af56a1481d4101e33be022100d10dc401877f24cfeb8c6248be3df6852607285467c636a6351660d2fb099046 3045022100a6b0285b6982cbc6b88f3d38a2776454871593c38fe7ca837dcdb679b3c3dad50220476c32e2ffc2fed03c610927a3e3c95c3e5c643973815c5de23803bd93492fca 3045022100f178bad20da0616ca38b7e8c78309836d6cf6c90e600fa966bd8b4f923de36c502204493b4ea5e2e7e0f19b9191cd6ab4995198a3e9d107578265dbd8432710e8af7 3046022100e3563aaf6a67b0a21c0f8862aca4d99451c659558fc271139795fd3aca41a9a7022100cf8360b54b56e4f90cbc157f079a602d5d54bdba49401f0ccadc0b7027fde7d3 3046022100a7d1a4c09ba19a121eff9a93420864a96dc76de43653dec88ae94a29c68486e2022100b7829ccd5934d5214c49b16d4f093db76ec41761e84d2e615885e6249d3c4adc 3045022100b7830afdb5568083d2fc327781d490c5efe270be8708f1858bb7cafe3a7cd1d00220578d4833aa01ac8739f70f1bb07a5d8c025f59d4a0fa5e26320ac1d3f35d1ea4 3044022003108c4604b790aab30d11add55edf1d093c3b1949ab4c3e0f7f5284ae6224c70220026045550d59a7dad8502f994ae188f51e84494c819110e8bbb2da1176443390 304402205e88071b4059234b34c71e704644aeecc906f9796e552938ef93c75eb4b2696d022009afd5fd8ef673436a6caa29d8f595f46d08617277bb67d0448ebb5684f5f407 3044022074d82c710f90a8ff37be5cc85aba0ff2666cb982920470c0c1cf19827c15e23c0220561dd131407f72890e1ead70f07254fc0ceb2ab795948b87769b716e0a5e405d 30460221008caca7f2fd32145d3f76fc7b8637c204cae9c8708b0503ef22d5fae12ce6a304022100f22e0f5827fceab04a783a6b818020ab0256fcff55b23f2e8f8a4ab4d13d29b2 3045022100f4c02b07d9e19cebf40c789dd536e6172242af6d57d588dcf5feb010dfe87fbb0220547d2406731173192c9bceb2a7f3103c382af02de183db180bf4cee008df6e08 3046022100a54c56be44cf9fe7a6595b008ee6ce5cd1510225f331360f4f17ecc80eab3205022100b112269b7cc9763b26e398139ed1bf0aa4af63b7a71996b1738f60957a609765 3045022078e5a47c4560e9c5e5a59d205d9b068301b5768e7bab2899bba598c14482f6e6022100e3bf83908cb0611f13cb197eb7dd40bb2846f1240f87ee58d945caccce83a4ef 3045022046cfae1e2e09593944b56b29b25f2f6fdbbcadef09e08ccec71b143345d2ed18022100fbe72ce194ab0dd860c7beb7bd97a2da03b4701dda1e195ea17f8a29d020733e 304502206cbe8e2204bc9f037f007cbe0d46408c73df1307a8f810cefd9991b60edb9b7d022100e619e1fa0a7c19aa81f764764d6e8667168ea3b93478e2e3d9ccd2b5e76a712c 304502210091bca0a6560066a8f289390284745015fcc0b3cb332d97f4bee34531c79a89b002205b4f7ee3997d657c862bf4c40dc869a34c69278ba36c1ce77456d01a104cd0ba 3045022100a977ccf69bb05a658c6ddc778ef2c5d87bb4bd10f48e2e17a43f1f17b04af69102202b6978f3fe687923cf384c767681474dd770ac9034c24433f745f88eed79617c 304402206f0f73b818ac10a06c41528d3dc349e8c15314c428a3b1d3b4ad6ff52c56e84c0220096706ba047ca639fe420b5b8c3530e3a5d70fd9317fdeff3dbb2379c199a02c 3045022100eab38f59e72744b682f94ee666e47d8e43157b96f0bd36f093f2bd0e9a62fa5702202dd75d241dc2d5fe07d228cf5e5315c37b551edee47221f421e3dea882de3887 30460221009bf427740cdf491a0b03c38bb4a05ef02c874b59534700f1416d5f71a327fce20221009f0d38728a7e5806880493af6d05527eec6022a00cfefa5724777dc329943693 3046022100b7e8e96ecd09e10a9740fa4c45807730656ccc06109fefd4f80b8cf62bd78bdb022100d9789c68068d75201b140df966cb3496f0aec981cac506775e28eb0fae517f36 3045022100d2d89f0319de06add1fca08e5a3da260509b912ada8a840d37c2ccab1659357e02201f8a7ff79b811d4dd71a366fbebcfa5b9f3892f46e5962d48a3b5a57787134e9 304402202a0725215e08de1bb41fe7b5643d9c419adce1d0646ac31e0f7b98337cd552fb02200fc3775b11245a05e19e025fcdcc18e0729dfd3fef37ee673faeb217db064906 3045022100c7f7a17fa4810b0a37e88b378b74fae9c53fdb6d91968af7b02a6446014d317b022019ced62721c562c4be11e997e25af2cb3e3b8e82bb83aa11ba43fce65f85bfb2 3045022100903f33dbda3c04fb851f3432f3ddf8b333f7fe23f178354f4ecd50aa46075d9e0220426e409f45a00a95cd654d7fd79ea1418668b975a405500f3a8cb54d1fb95893 304502201d23fcac22b315edd8baf42f72f478511dabec1a9a6027a2e8f9198daef7ce74022100a65a521390584dcd90b502a6f71d9789b0e4095631116ee7b042bb4d358d5df5 3045022049354c34e41f700e6dc07ddbf8de9857442fa12187bcb17bcbf489ce783e899002210088ab8e1ee74d5a3eb07aec85af5b094adade8bcde4213cf8917b601f33cb61f1 3045022100d93121e7c9dfb7af81caa04911359d1fa81c0f507c253f21e11efcdcbfb23cfd0220133c68a7bac6c179f0a9fece21300c5e8d2a25411125232667079c0bfa6bf405 304402205522b00186359a936180d99cb485cb0fe933f34b919a7283f8a91bff085b4d9d0220401dfd971214ab2b51da6f912637591d673141630ebba9db1a72f3bd997b5b21 304502210096fa9b68bc94c53ad09809d91abf831c5d09bcd95d567833aeb7745c5aa2760a02204aa744d55ecd76a28f9dee70b03d80eb0597f5616e15d241bd22fd6fafb8301c 304502207b5c1bbc1ef092516df5eef831ec7383d7249954fbf677c3847207188607f300022100f022ef0e8da29c5bc3e7717d69ee1719e51e17b0d05ca3bd6a0c3aab8c7b914e 3046022100975bd8c2098a2ea7e36211fce602be7defdf921614c721286febfc9560c396b4022100d67937e27b1142af2c95bc6ada9140e541035ae30a091da37812008fecedc0f0 304502206a28589457766ddb95b6fdb7ea1a55be921d9066904f7d51fd40bb8dfcaf73cc022100baf47c64aa7e131f2311f796489afe362105c1960e7b16fd1cb835ebaab80d89 3044022014b9feba3b493c8fe004b6350f3aa99e572197508e9bccf858d2c5d99659f50b02207f292d1622216905a692d7aa99d7bb991d8e73187667b6dc75472c33a4bf613e 304402206d7b51608001dc58d0317c7c548c34234a7f255e2a405780a71700ada0f3ea0d022042a3a5b3291a6f5d809de3fdcea88d677d0eb40c3af42c65ee0c73b23104323b 304402206a8340b3b5fd6db82260d3d65714f4ebd0f46ae819291dbd5ec943cc4e0858d40220483832b367bdfcb64a64163edd3cf82e18d823ca5356bbc7bc28696c02703d81 3046022100fef1e232f6022316e1d41030b4f3211e73f061217d588543f5fab2b954c6ce5c022100bbb42cbad5125781f21d99594296be08f32c3bf6488f7a8b382284fe00a7c969 3045022100ac79be9c493b6052add73a055003e017779496818e70c2c7eef78dc2880ae993022009d4c9038683d493976afe2b39ec782264969bb0a9b3485cc4a424351a9ea386 3046022100e2d1c68d194892956d9fe704ab813ab1632bcb61c3ea32ff5da0927666012aae022100d9fbb5288b97276495b65fcf089955b1bc35928a3d1cbbb1118db6f6b2bcd9cf 3046022100e6eed02ac960e60673eb23b23f815013ab8e0c66f836d1050c43da239c59d5fc022100a2fcd98294e011299bc494ca19acfb5a3fb9c75da9fd4dee42b0e551dfc8055b 3046022100a9ff0a2b7928f4fad8c12763ef4c2ae45c95cfb7003ca07e927d6d9b54ce70b5022100c1e89500017dd6be4231947f71981b4d81c57a3c8024ee7112795932a12a658a 304502202a7bdb2a3e49f9054f8f74a783caf8ba104125e69d504d327f6c38a99c941616022100c92e9e78dbedbcc2a123d328c5595834c221df0bd2374bd4870f6f46e73d689b 3046022100af1c2565059a57458810faa4ed825dd216b2de3569c305f49938fb64f835e41d022100f82ff39c0aa789c9acb74bae280c5b19432eb5428bb4649c2b68f2fdf95e2dff 3045022100e95ea5f6d65d60d424fcda0bf03308e6b308ccec2068d89bba0264aa82d4db9e02204a3685d2546d5783f2adb8db1cd7ce6098d22d32fff6b184aba1df3929cb7e2a 304602210083c075c9f0bc86ec670796e609462bf21edc5e3d1142f6c2010841800b7cbe03022100de5c81fa2edd9a1e055c8a8cef396bc3bedf3df9d5277271ef36164df1015423 304502205ba1c1fd850a80d6fca12a3374dc5ca093470906c59da82738feee5242288076022100c74d688e5406b9ee87815ef4a38db9daefb79db2d394e6bdbb94bdbc5e7878a5 3046022100bac58363942c41bf4718de6fd1b2653d123a494f96b086fd65b2add2f9b1c120022100dea488a889f7899172d63e160c582e720ec847909c87e2581006dfa894f6a759 3045022100e51a8b01fcdb4aeabb4e38dd8732bc6c3cdcdd03bcbce41f67dfb5da4078844e02200df52864e5cf6a097a5e606cb9ea47faf3b89000a0f5be44342bdbe84ca279c9 3044022007ad18b7fd5f10328bcc170ddb1e4a194f891f96a06ddd555ee85c7eb16aaeb002203925e4c7b8c738e5b1cf6f83771e2b30dee7b892440c20194a018aa0f290609c 30450221008720d5ce3a80e2aa44f64df54923830818a30d0935e635b517e7a3d5f021b6a50220528fd1e92d32735656e296954f70660ef396ee6c1c65c287e2e2615749a2ab0c 3045022100980e5d00e746a2ad4f947e5a0ad9d15191325222df49071cf9f708cd5f09097c02206a7763487dd347bd819b1e6963e133711c94850e08b971a2c231411215e36844 3045022012a6341dd14d5e1fcd9b9a8eb27975319b1cffdc35eaf2e66cdbaff948e5e69a02210099e48979bafb59971e8527b2dbd5d24d0ef973a1efdf9709fd54be6c0d7d732d 30450220389f303ec7915c29597d1e8efd7eb48e37f85c262d87e7c9f0a5cbe1908ea030022100e57a5bc4c824c12f5edd8b3b41bfdf571e7387d8311929911fd58dfde3f8dcac 304402205826dca8b47444930b153b065f665759a427878107e7057ee20a37e3ed9d5823022000e6c83bb2ce932712136b00cc388badc8e1b47001510c25b6979fe8844fe521 3045022100dbae4fce54a3c7489a0dd982314ad599bc9c82d42ed66da896d98e4b7a5ea7f5022026e6892c4b3872275f4285b561ba1899dbae5129212cb41316f517abb7702b67 3045022021ef7ea1dcd6da4febd92df86042d65353acebb6a568d93802eb2bbe5ae764c8022100d130e3cc5a82042b91a7637e5f67513512e89c0ea9bf9a6758de38e8a4de68c3 3046022100aa118454ea5b5e25e974248deb863d8a3f56cb64cdfbb3d5094d334fe261395f022100fb48e943e3122d7715fabd33f04e409e3af8584d2fdf613d2404a77a62ba8c26 3045022100949e928aad7d68ee0a92bd9cd7b71a6b43c1ba32cb7fe636a0344e5463831c21022056374b9de967d49b94bbd98749326421762309f151732180a151e31d167f58be 30440220600d8e5901931493a37e9115eda9e82576152d7c91105b9eb822e6f0cfd004c10220240ca09b50e7c9d1936a504800297bc16ce233d0962046ed86fb0235e2ef23c0 304502204cc7104d15ae604bebfabacb57696e2f8c808e37ceb94cefd9d17858c8ee2232022100a5baa9b0c64adbd03de101009242dac1a087b0684ece310eb60657fa5402969e 3045022031c8a2283803089d3261c2de035542d5f6482fe5245a2c186675aba9f26666b9022100af1d9760541fd81a95f94c1b3b2b7fcadaec61466a0d36fa193f1995d8c8e685 3043021f7d816d4a459c40716d7ebe0ce327491e4afc2b3e485dc8bf8280cd3a272c5202201d4158ce76be71f6fee48a8bd05c5d2d0032d7902e52a840aa4c05a10bb0ab06 304502207ddf4d1319945ce27e7b22c0ea74e75fa091259f5acf494005506f684248c9d8022100eaed29371ccf46c7e3e2acab149c2dfbc28fba19c40e994509f5fe7ef47ea5b3 304502210098fcfcfeebfc56c3968ed48945e4d990aab7f858c9cbdde30ad0398f3c2efbff022048269082130d1837f07744308ffc2b8e7abe84fcce4f430ac385246c76908f55 304402203bb20fe763f933be3dabfa7e26829d00b82f71c1b0b34e0a1a3941950ea0c80202202169d5453e1bd863f7768e85f20ce4f59dff3b72e2f5497e48eb3320a8d9101b 304602210092414c86b49b7e24bf469898cec633b050ca69f9e4b19803c86ebb5588a13f93022100a45e37a3c7f52ccadf3426bc1c9d36b51d8cbf94178bf7fe26742c698d71d97a 3045022100fe6476823c2998e639817d979cf7df21354149e6dbbdfe7b6171271efd31e2cf02204684fc8d8a18598310663772c1a664462e3bddebc61b84ce1f22d23871f0fd8a 30460221008214b62b6175d5a2921de7da4b5346680bb2229bcb83afcf616afa7d6c0d36f3022100dc9d884da65aa7a714079a51e8f74e6771214587ec9380528fd897c123f4d794 3046022100b8daf341bc883e9c8778c81a16feaf5e5eee6c42fb83c16ffc11bb28d08b88ba022100a18b46fc5264eec83c19cb9c1c75a724b8c5ba1b438ae645b667cb123205e53e 3045022100f85428f185d06806241045b22d922258bc29314357245d50982f46c9e8bf7db10220520a3c27a143fc812de12b9837eb6214a53aacc0bd75ad24bb4788cdf7032f82 3046022100b752391ac3b00ddef41ce8c0e9f43c6001ea91f9e62c8b8af2860bfa69d8b5c502210094b682ba1c2d81d13e1e0e69cb44a193c1499a8b41b6829db7600c28f9253819 3045022100f5170583910b10b63fd2dedb133fb6da0733eb3704cdc4bc9e6e59eb927832ea02207c6d8f96019737c52de3d2d73bdd174892734d7d70012d94ee293f84fc486a16 3044022055dc7a08c167df0979491ca5b88c7600d08e87148f08a442bbad83fae483c3730220163b367f9c67b88812df2e9cc40987cf2c9e7d00ae218345cd4b4ab396f5c269 3046022100a26fc43bcd5a0873f96dbb57f6784dc87c1c5282356d2be4713a13844ec9abd2022100cc6546bcb9f16d14668a6ac60e3833f602e0e6b4069fae3646dbc28611a11335 3045022001d5f5f15f6f8115b1a39a44d551f7fcd236c3d1dc73696b0aef0676042c7fb7022100d333491b7289842418d74c84faa146817d745de18f6e06ae69c701a308fbd5ee 3046022100fc90153f47dae2e4218c862668981317d6b0f4b7c294d39e83da82fb45d9dbe0022100d04faed79d3294b6f6e13180217d52e921f47248292eabc7c5f0609e495c82ad 3046022100e2f2abe157defaf986c5f63840431506907d76b90a871fbf5b7cbaeceec2e8e4022100b18ca46297657faddd0ea477228ff8d0624e546c018ac4f570d8248413ab41da 3046022100a40716a7b0a3e9a0b230ecf3b8bbc065e544df24700589a9fe8c41faf12c07f702210083d85ad85d06194b381ff3e22167ce1235e9bf82ab13898a20f98b227fa3c602 304402206fba611c0f23894901da64b907aed74a97219327b00dae2e0e83f84086bbe8b90220236abc9ff0178ab6dca86e1df5f6282da33745256941f54a41dbc6e32425d124 3045022070122b343698aee787acca4e40faf43c07d8fbc874baf541ce9cbdabfdacd7f4022100a92d616b693bab6b79e8202a0db0ecf5cc7ae154ebba72066e94c2368b8c79a1 3044022037d22f5ee9553db8e6bb05d9ac6a4ec184363f0ed44335778342b942ad916cb50220476d4ba7030ea7772d012c481d93587269281267aaef7f9fc6377c66c71077fa 304402203e784ee330974927292e0ee1d2e2c6a2284a955d8de70dbc298847ed7a20fec302200d5dc0138c1dc8a827beda032d6ad6b79dbb22976c0ea789c667a030bc99b96b 304502205fd8e5742a1b0edddb33832a5dfe8358efcfbd9d7eff8e5c4f042fd539976377022100b3ee0ec0839aae802e43ccff9219fec1fac477afdb26e1bc00f6df6733184ee0 3046022100fc98574d9cbb4bc97863a96718408c1a0df9b7c0e3e7d2c03c9042fdd31247e2022100d767b3f5b8dbc3b01c00a19c0a1cc7548f8badf1cdb7c30ad845ceec7dc659fb 304402200781dd98305d20bda04b3d4e241d720b0dfb5db6ab7a7dd0b5707ab0c97ab5ee02205787e756a5f10a4b4b504ccb7922c40102b284671233bddfebcbf90a8ed977fb 30460221008671042b06d14ef78f3cccc49670093001ff0decbd24863d6b85101e396aecce022100bed08a114e81965c9142b700eeb0823027d8894f719c99178789c2631abf081d 3046022100a0806b7c4ca339ad8838644b167be91473824d8f8ae1cd1e9f55f5a0a31a622b022100e5c506b5934eeb02708b4d0d90cd04815d2df78ce47861110f3e991d58055f4f 304502203efcfa5925bfc6ab6b226557880365d3a942e114f61540f6e9f68ed2cac83ea5022100ce94630d37d4d5566a98c5ad13bfe2f5b92f9acc31570b43e44de685e96d2fef 3046022100cacca52ec3a7c86dacce9b7317f51c2219c8248d1897284e268e42264776e5c702210086ccbcfd597924b842331ec5fde4a6ad69c1540adb430e6ce86cf39c32851855 304502206572960ca1d2d68a7c5b7468a39d7dc9096ac2ddd6c3e6d1aeb83427e30c5bd5022100aeb5231ae4e4ab0f4d629da188ca21839ae2d9b519a46c0b9ce5b563d1ab8171 3046022100e1273f1dfca111616b8f6dcbe5cbb96992831efc02c5f543e48f5ba41be8979a022100835182ae82de5fdb6a957d91858292fb1aa56565c67130c822fb5ec50094e138 3044022036ae34a3674cc2af2e7c08e134679c4cda7a7158a709310a09a9b3fca13e8dc202205f5b46be01bb0a8a4fd8db4a40521d23b53af1de9079231e312db507a7c2a33e 3045022100a31b0d8c826fde8746d576516727318f10e19a6a4615a5e852066e2a0cc4d6ff02203c90e276bd0e3ec98de5933596b872b5cd491236faf08aacd4fea24854ad3eb9 3046022100f1e37762098dbb41e7c153e646ad034818d3ce3c9fe2627f7549985b5471311e0221008968f44ecc10a195cfbf1244f6692e1637b22e0da7e4930a260d78399285527a 3046022100c88c356ed94f42a38d1973fb28ac57cf2b0cccf8283b3967d5c6ec6781832e42022100c7d02c5738a1160b89c0de1cbdd6b674d8c6533b55146c7c1a85ea9cd5032b57 30440220159949257cddfed0be6ffdee0091bce061a5c789ba881f5d5a678504ca3746e3022062e2350a831e69ca8a845c325a21bb6140ab9d802ae140a5f7a009d1c016c685 304502202cccfb62bcf06a8d4fc25de393fe1cf9615167df640321b1de837f9d32bd67c302210097cb2f8555c98c7b91c3505b47c4d2167df284d01b47c177bb92c7655261e261 3046022100b8440d3383af4cb8bb0223a5a0e29fd5ebf13e8b90fb89e2884967ef79021fed022100f5ba53214ecb023296a443c5d995a959650b1efd814200246a1a72bce1877f84 3046022100e6c2277d4fc059bde18c3eefc265fc8ebba9e4dada2c40d2fa2e729126a482c2022100f977d58d414f145f61fc78316f47aabbd1742f361cd6bf1fab5d20237d1e1951 3046022100c2b5af4f04fb8d2f98f45ffe8d6f9e8fb2f88ed14466484013023b34c75740dc022100c0ee30c8b102b841b34365c3be7dae6ed67c3c4b4b6c2ec8f9d97f547d7d616f 304402207fc3b199d4319bb6ee63536fd26145e2ab67ab38954115d13c938a7e8eaf4880022031caec64e8ac2ad44cf9ca12b80dd22eb6ce839c41a8c245ded70e27c26c8691 3046022100b71c34e8d0d3558e35071c840167371029495e627c56e80acb5973777f23b958022100a21a0e522d065b59e5056d9f1c90627beb340ee3d26c0de6cd927246c90e38c7 3045022100b16bbcf441cc3384068b1b085c0fc87f54b8aa06330db8675d027872fefb70fc022050e6421321175a70b1ff131deaca6400f05162e55971114b3010f47e2d030253 304502205db39991d2472715091533ffa3c2828aea4f34d9d08f01b00bf7b7f6e0d85b75022100f66deba76b2052e3317597f2237dc0f9eb76df4c7c0bf8b3b8fcc953e992218f 3046022100c19d923a9fa0b299fdb5bf3d8f8337acb04123dfa37bacfadf3c304a3afa7c9c022100fd71d0d2dd30debd1cef3a3c141a486bfd7fd5fd5ab842fc48809be074749adb 304502205f7fc111f47a73d22c971435a9c1b22073461e7b513f7bcd29ac78b1da9198fe0221008cc7e0496f3a7e2d58d3f270bc4097f1454cad0d88edf624f23fd85d8f1b727b 304502205ec53c398c8f62c2606b49cacb22df115278ec9e79085c0249f8cf26f84365d6022100c1d1fbca2b6287b0c10809fbb5d602dc42cb5e9e0587bce812aa4c57908d86bf 304502201919b42befec846b04356308671a93a1321db9d942f8db766b7e9dbba7cfa5c8022100de07466acddce346f69c1460206efb21d0b047c732d8cbec40fc3cf1788ded35 304502201ee0e79413167186f3f5e1997022bef201312c5a11d139f90a1ae385ea0db5e9022100d983ecebddb411bb9ff0c5c1f07a4817c69892ffac26c8462b0c8cda512ad300 30460221009a0dca6258e7ac1f9064b4ff7abe9af72cc56675c70bdc6399118b4ed88fcbbd022100e34e6126c52d72e99b306d0ef7cfeb210a9ba97adff9cc875a9f4b05aad4e949 304402200dcaaa6ef3806090cf4082d1b8231bb0f20ce2464e71acffcd1c77526fdc652202207fe97e1416905e8b6230aae29d73548abacdbe0802e379025e983af4a5754e58 3046022100c702bdb5aca7f55f76c318797757573e84ee4c92f6c558ebd62d3d626d890c1e022100c3c06faaa434a20718b18c0c7eb73ea9a7642f11f58f322be251d907195b60b2 3045022100c8768d99a8d9f9562285ce3b7c39f64c40e4f091d5f142a51c85e72d69b2e09a02206e139f671147b113110fb06c6bd34c61ede32fcdaf86ff0436abc83a7c133293 3044022067b25ab9a4bcfb342a2f9588c45ea84d47d10836ce9632459eedab29213289eb02203d5cd94557ca15b94a948a0210d9906c4fac4d31410d4e43b8e2b9d3b511a5d0 3045022100cc40892e5758083def9564e344a156b48ad4bdc2f33aefcc373532c3ae33f308022066b25524c5631dd002aad1c75624a2dda113632177c2c1ef777296ccdb814088 304502207cd0ac4f716cd3b7406f6a6e2c045c7d41244dc03b75d3b889021623b7142343022100dd1f71d5564200090d1225094f26ebe86112a42265f63e6f3b040a8373150f95 30460221008de6dd04bcbfc1bb07caecb1ed88c7e2bba8eacfe36c4cacbc89b20e23bde601022100ea0702008a1c731351a057b75a40d7c6ea372c2f5db9dedc1a60b6518bc5c8e2 3045022009584b022828fab3c5d92c3e10b0366e89c32908e2aac94aa36e62d07c2c61e4022100ec476c6a10c6b655dda0ebf60cb0edf1070ac52f5177b09d0c528d11a225f8bb 30450220255140d6d6cbdd4de008f5ed4d0064210ed9e21bc25c936104e223f25bad30c8022100dbfb9b5cb2bdd48d234400053ac03b8f7f87f4d1dbb63be29492a82cc0ee11fb 30440220103a8ee0e203c8cf850105f969a1bfe381444168273b28347bdfac5091478c3a022053c991cccf1066fd16aa5aa3009b6d573e58d361d1ce67158a8f939ea4c9e99c 3046022100c15b1debd63a99185dfe07bd860a3bceda5f004038af21eac6d14a0b977de790022100dcf49e1276ff203f7dcbdfb773cfff395a7107c920f2d9cf24dc90555a0f5a77 3045022100d60f778c1f11bb4a2b6326086e42fa84a1a272dacabef62fd5c1d303eeafc65702205613a57cb1024a985498016f495496709bc7706f689767d5fe06ce36355a20d6 30440220112a5738b837909dcc85b0d9261dc8e87e6edd1acae4a46bb26821f3ff963ae302202199d659662096f024c5fc101174ecb6c67e36a1486243a4f7fe59656c8fda80 304402202f66ece8c0475d1be6e4b1101875ac85c0364232d753dc7046576df1fc9cf3a002205b57ab251d9eb93aefec6865c36d316a1ac3282059b24daea8ab491e11f105e3 3045022028d4649a211b88190edf334ba9de42d7c07d4c9bc5b75c11f3fa8bd2dce9d2eb022100f4ef64258e8bc9f5f5d8b5b1990f25a5a4fa85ba5ee7860ed4190afb12ded4a9 30450220036f5acc504610e03916252bb510f4daeed0dd877ceaa86124f8fa5665d3b56e022100be6308b5cb2bed59f2fc8e7f82d5126e825941884a1c7375b9b20480549c0c64 304502203e3391220ee2eed7eb083e545113351080f6a5d7254e8b194dd2a3eb13d860a002210091afee35cdb34ae870e3a9233a4eb4e88f5c80f967e8e5d6a0c0a5e9f5b6924b 304402206166a51310c65d6adfce689bdc0b41ac899ca33ddc129fa8aca82dad31303414022069dbd46311d19e0fb94f78048de411af26a91fa5cc156a6701d7f9e76f6e7343 30440220145099b516a80861d625d91b0a16d4278ba33571514872c57a96b8667f99eb920220778e8b2b9b7d87113bcaa8f6b971d6692c35175216feb7213bfb5dc3cd18f828 3046022100a85bfdc0b5068e84d2863ca0d3c1d4eb88f78de977350a411c3250dbba56171b022100f469224c8d33dd491c02bc16e8c21ec372c36849210d8990fde1cb2f61905637 3045022100da647d8dbe02f1499c877ef5ac60d4e8522f4a7defd2381993edf2c9bf4c04db0220145000431d40f9515bb2e1a49a03b1d14f677ca887de03fe489680d1cb87a332 3046022100aa174ef7b3c3af61d49fb4adc686b5203a3b9ffc66aeb4f8de4cb81aac5e6a8a022100ac66c2f372f1a07003e910904ced1805f4fa7a4b94797a88804487d8db07b099 3046022100c13fdeabd5d701c67811fc2afc03563759198196787afe1ed11f326c42c86b5d022100ba87d9ac8deaafd66e1aae6f233cca2c925c56cd3be548d2f336e3ca9effd8c7 304502200ac4241d3c3b5a4cd9ebe1c2e20c6f458eed949027f5ebef03afa778c767529e0221009948fdc233e732444f3d0221b245f8995d678f82401713f54148f5b8552515e6 3044022058d172b09fee4c151f507ac0bc0a7b4a7d071b3f7b257d71316064b4e6bbc924022078ec63a1802af0a6e5ad11025fc19764d27b59fa27fe04ca325b7d5b69abf281 304402201e77821836408da1a2264ddf17c732d620b9cefec525244cdee4be26b7e8f9dc0220496a26171e436040276f6a29db857012f1d1399dded44ca54e82bb289a511994 3044022037c946b9111e89eb03aa5f5015d88a414d3c31741f6cd11c7887c1596b747491022003dc26733cd8dcf884a815d6820fb50862ea421801578af45152c2acefa66dd5 304502203432fde4047ed7611fb925f8e6d9077d9c9025cdf1a1e916fcd531fc6cca5df30221008d61614ed5075348f83dc7b6ff309a59b99a15dafc7c309527e8a263cf79c2fe 3045022100b3eed4b51cc770bef3701edf0659becc8d3f796311025af2300bbb0236bca36e0220497d32a8ffe14feb98e489c9c4e86bb3361589c98c151579b9306881596b216e 304402200f59a4c3948457791ce0c4adb40aab64cb20c1c882b0ababe251605cc3aee9860220593500a360eedebd6e73f30283ce976d07ed407c6a390ee7ebec4e91869c2943 30450220100955d9885462da1edb37434af0f927e2987141d8572d02b1b5449e4a53b84b0221009244d6e65eda1dfb1368f11c4c7215c0f0e777add1be1c105c8107a8eebbd6dd 3046022100ef417f0378f05ad287f7c06ef77d6f3c5bb2f9be571742dded07187f7c1f64ed022100b02552b2c7fe79d0f0ad43baaa9c401b0001c00402455518ba7e85eec1bd63cc 3045022100e195c397ad236f53cfd4b4fcec8c9e73fbf79f98cce67212b6fb9864d622aabf02206ec961a20bd7f83ff4f240e155bd2a5d4596e7f10bcce7804e9ab0d72f3e40c9 3046022100ddd9b6402c16067a932529ed1482726128cfc0c86c9b868402581cc68fd6fedf02210084c157f478f837b20ef5999ee6b398ab4254ca1477f97678a1b9476c5fac01b3 3045022100d243dcc2975ea5e3f14b73b0496a25118cb91ebbdaa5d6e1667d042dc85e70870220656d1d322d33ae24c9116e3b8ba05bc4f724f2cb17b59c32bf950d44aad1656a 304502210083fbed29894aa43e1529ebb999709c2c27f259af1ba14a1ae5b1e581928dd7a8022011bd6c8f383441d97816d637005865d0fcc830503057adfa5f3d7a18f531e5d6 30450220409cf63889a405442aef77925f59d8004a54ed3a6d7a75bd45c185cf47f5d231022100d27dcdeb3a9fa3237892f4170ab3467dc68f0634d9cda03dcf728dcf0e440fc7 3045022100eee40b62e6b1d3c4e757ca1bfdf44b30706d0e216cb70c632a0b41e61ce8246502204f0ef60c537d75f64d79a54c1a0266c3fad7b8d80eccfe4acc9919589942194b 3045022008501ea9121622c59779676ec54eb237a16be16794a6dd4068028e0a1a13ca84022100fa805c4e033dffac872eddc236efed2958e1d81d4f4a1d739c6020e17e57c504 30450220192df84f55f9d996cc62eee963daa202cc7ef68ad2d9a90a5c7b62cf0a8308e2022100af76c5af28593d8e18137b7e02c6cbccad19619abeeebc81b40cef0d769ad56a 30460221008aaa10fde696206e481f4e0bda7d887137ef6a57195867d9480872d7cd940200022100871a56da9af6b4c483ba91e7557e0858992865d5e7361461654b59b930fdd862 3045022018295a195c0e71089c440498163cbe650b0caf5ffc96d8d02f75ba16729dd88a022100f5f9ed4ae02b7b8cc7eb0e8dd4f68e0b37c15d55e7f786fe1b176f78c1ce127c 3046022100ec0556fa6d44b61b30e5f51cf6c5f2daabefa56408a6479469518062babc4a67022100f69b1c4ae56043e90bd8ec53154a99c8b446d8286b8433da2fea9a494f714d3d 3044022038b79121e7a965c464093cb5f25f2c1bb329d41894868445a9434bfb91c34feb022072908ffba5a4cf998c955d502ced2543009457b8167066f0941a0fca730d91a1 30450220160218c5c595c455f3cb54dcf735ebda2a154efde964cdd18e84251aa2983277022100d22f8db705accbc47b39cbc2558e4c8e97e436bd618148a00a3ce165fef03938 30450221008d772e013656f1df52f604ada3b24b13d2a901a2c078203f6020a82cde9d368f0220742e9c063f6c60d250aac05691d6a63e0fd87c0fec9c829071ffd592379330e9 3045022069e031b6faa1f18e3b629899f6b211a972dae561497a28809376cc1415d5cd180221008ed7aa6e910852d29db181dfe044c8b8633ad9c62a71fe52ec5650cc79cd0bb0 3044022072fe8106ab234774b74a9c87c0cb62b00dc2c33d5532ac4e3a17336f66e1e1ab022007b0cb45bed823c1380cbc5cc917b6f80db17cee63484ab4469842d5bad9a1c1 3045022100d8601921a58b9279aa75438e0c43ca85babdbcd8e4a9bd90af61c0cfa9d4f48d022060dc041fc90191593b35455d87691dd7ede181c18e5b1fe14069fd1c21b39f11 3045022100eb96a2bd022ccffaf39bbc0269ae601bf39517d6712d72e6d820643c8d7790a202200ad31691955a4e3927af481f824bf2c08a98ab0209644b7e876aea8d60010f45 30440220421780dd5da586a96a401c1fd4772b1b71d93c58ce707fd2679c391b8d2b9c630220779e934b61b17829cc251314a934453604bb0b26a2416ee7b1840d9ebd8150f2 3045022100811a9f038739687ce718f251384aa0888fa39422a870f80f057bdc83ee51485402200828471b41647f999469dfb3446e0831e3dabb9c67d80a61dd8a74f4bcd6c10b 3045022028fb1bc3c58c5745b76016cd53c14666f0d809d051c1e36d3c266eeb597cba01022100f927c2dc93c688de68453dc32df868368d2b434febf34c970711eec9a9e1f160 3044022075283cf1f0b8d7aa5cfa8e1a02bd8c358962bcd1944fed113960c34c42d3e81e02205f5621967f1a52bfed68ed7a14660ecd7e4748d7f20f1072cdea557bd7f86698 30440220766cef034870c240c3a28502e83c02fdfa8b49504c929bead54cb5ac836f82bd02206e9f9439e200123b918ea5d2f0a263fe9856c947244d04f07e79ee8324eaf412 30460221008e1c3fc3d3c0e115edb0f4c7f3bc199f7fabe8b91902feb7181e8819b25ff75d022100a9a3d3e0f404b9bf75d1786e5e047eed0e52ab94d327fd6831e159a19e55f03e 3045022100f5520ff32a2a047c90aab72da9f71bc6ad1cd4d889600dd17607ec635534e2f802205921ad18cd2e5af87665e1e534f4e49ed9dc4697b278b9f34f94737eb13fd81c 304502207dfdf2cd85488f8b7b4e4c8f82e5a394fee2c99e7901b5ea6bec17d4d3129278022100e2de6d27b1d46fb451557b8ebc5be50a01ceb4d344f98b43e34f43cf90cf9a83 304502210092a0a854a8d171a09fcbceedb3cefa8b1154d9d6039811f1b37344d3b0467e5d022001917fbf84bab7603da5f632dfbd962fba6e684e5c61a8f9f68ab332a8ef9540 3045022100895224c86c629ab44d158597060560289ee51ff553f5d9778411846c882df264022008e0b67efebaa227b8ca1a1b6380c150b832373122f898f66d2b71006033db4b 304502207a4c628c90e6aff93c2e76adef77158e2c02ca796e0b45857e66df4663abe48a022100df6e59c23f1e3220fd6a597cbcf79c1c6bd8c322830b7a3895a2459efd4e21ff 3044022045464e5fb096ad22a4a5751f0c7d03ed947bbe326f5a5fcdc0aa8edd2844df900220528ea81c7fa3197cbc8fd17ecab05aa5c19702d21d16157e260886a243c1f9d6 304402200d1091ff96e6371143129a8d001ab49ba69d44ca3b6e4bd5a0386f719513589d022005e72d666913d9a2ac861428b0f5527697f44c1f19a1cfd1e03950bfbdcb2c97 304402204d7e7066c16277963a256ed703f96f4597816bfd6f84391ec108bf4eb215de4202200df22fe43d3522de3c9d7e2413029c5b84fb825709b8a65b875d5f6285e50f31 3045022100b920bd541ff9983756981a7cb5fc073eb8af7de1a64980fa20737ddd067a66e9022019be6250ac8531b0b0bcc409e8346e1383beee4b4597ad94860e080e659a30de 3045022074f9f06affe381a53ed76f3b0a3d6242ce7d7556edd7e281eb3f782e53a1a4f4022100ff6ebab642be9e5ed5c280eed0c3eccdf5739ea365537e85c28a9544024c2938 304402200e1f31382d4dcd8bd137779f6aae02bf942f94dc08468f8a7f056fff5f9c4b4b0220483fab561ccdff49a7e281ad913109528d88fbfe2ffdc29f5ce60ea8b8c44cba 3044022042db77e4d62d2d45f17300a56135a8d21c20208c59566b479ba470d8ee2ed60c022048fde4a2a9e0e2283ac8d9b2d217f7e8bbe73ecb8221f6a634af0d686dd13ef4 3045022004ece2b804df94873619cfde42f7fec50a0c4cbac2bd4d4a7f104df247ee6e43022100c898c6511d4e4524e8714fa0e7434e4eb33562fba70ba63a677cc915e2cb95fd 3046022100f01429e9b1a2168e8155fd3ea0b7aaf1ed14deffb4799b99150fdad5f2badffd022100bc11a7f96b036a84cc99e19e0de3bc03f48b72662763690180c13b6277a2ac10 3045022100e8c94a54b87c3b80c47f136c0213788ad5d4302e53a3ca5d226048805b3de60402200bd1bffa0ba1970994011b3b1390ba17a004b02be91d2e31e9708f54a3d3d520 304402206551ec266c894d74a6d9785e51d0f30d0067ae00f458e8c7922d06108b39cb980220706d455c1c1fea2cccbac4479acb0154706b4d2a6914bac85a35ec2d368934ab 30440220325a6961879ac14e640c866fd637b44b970d6f0970cfe6bc486a40987d12aebe02203c068de9e443e4f1bbeba88abb0460aa68ad45e83e8b6998aa307c50aca140b7 3045022004ab5fdc141e616c5753748612255702f042589ba3f03041e914b78768eb84d1022100bbe08f8385fe2cc08e0142c1681119f344113167b5d0a1b5638f1ba5a96a50ec 3045022100dcd2ec2e4edb08255f025b69278bdf41f9cb944635d7e672e70f92d63afb76df02204eefc591a0459ee2bda75ba735c3af87c1821718097469d67c58d588eff2bd5e 304402207a8bb2fabb050f966133c767ccf438f60101ca8b33e606f5bebf03af6903fc790220626a84e5afe3c89d112f0739ceae504624800470daa5c1ee8b1a0ffd8efcb3d4 3046022100a35183b281cacf5c6d117d047264ddc723300f8de5d9fe9e4d31cb3283390b03022100ed3b6d56b8b2dfc2b951cec2b10e6bb74a6519f8efca91eb8f8f795e01740607 3045022100a771a7171292aaf0c4ad3630c6b4c92464daa0899c634e104800f162188a4004022054bbde5a40d05f8ebb16762b08aa9173c3b04202ea2996bc0e2ff31c0044181d 3045022100818bbc09a4b0b0945c655dc580db422828c473c798d6d7726140a68a6d8df2010220287c02bf08f29e4d56fa8828ff2de1405460a90e5c7cfb6de7643c4619c271e0 3045022027a52d6fbfd1af4139ec7045f4362776d7e8791381d0a3f985e8e7cadc5b3e550221009125f29d51e7d064c1ff835907d1b032aee7b0384435717d29fe181343ef61a3 30440220429300877488657fef7e3a6dcb644c830ec1505c165194be0592d9e6a07d9e4d022021bbc28dbe3baa0736e3c8d802d76be15c962b37b75bd3476e99fb2d031cbd44 3045022100ab2d8d641ca3fc89ec4e272adee14f737442e58ee4e3e306e1ed949a40d1feaf02201e0ec05a72212afc745c822e9f5a53e4f81104db223a07477ee9e3ebc02165ac 304402207c10e4a222b3bc32b287c485f01147d037906d9d9a9cf1a5b1bb0db20d7d83620220102ff9f48c5eca3a9779943b8feb7c7375aabf9343f8d4f295e5e7abc68364a1 3045022100e93b74ad1fc397f42acfe4199517282ee30e194bba32baf1d224ee2a0ded1e1a022000acef44442545e6e3301fc30afaf5430ac0ad58e3e4d777219824a99659a129 30440220041b41985c59d62ca5add92e88e29745e8ddb63762064bd4216d4058c59bb5d602206360ba0dbfe270fc9f588aee6232ebc2ae5e6b3932f910795550629b0778f9bb 304502201ede58c4bb1973cc7a1c565ab0439274c0de03f2142673d00ed4802ff6ba54520221009be04f9e6a78122be5aa2c8774655468bd5361f3a5c7bde631aa5982965e47d1 3045022100b320bf01409d4f6d68e78b525bb4a96511c6043c81d5b859f4f1ecd2d3bfd1d9022008a4063da0bf91f96cf56df8ef7bacc4fb813c66a59ca6b50b4f0648417373b1 304402200fc994f9d930cde9e87e62ec6028e04f331bdde2b45d21a40c5832b88db2419802203a2c2dccdcbf32d7f1fd945cf313b9719f1e8e98704f94c50f32aed68b216768 304402204129e98c9d2b00667867f3ee21193df0b22873eb5b9c350064e8597390ab63f30220396e7e9b4cc555eff1f6a1c2142c09fcd751671c842f3e6cb45bf1cb234d874f 30450221009cfa3a46ecace106c325e5c78255b9fb977180136cbcb1d6fcba5257b08b3b60022018a9612e92936613bb61e845affc80a2fe2933f6d77ac7ceac745a4ed2e70a1f 3045022100e6f83707ccf27266738f4fa4a4eace755e3a7b2b1990c57c530555476a0a172b0220265bedb0faebad76e6dde79b40a4fa99c2eb366e1360f8f6d36ba459327402fd 3045022100939c5189cc1c6ba01a33e9e98217f85160a1fa1656f21da01de39cef8842ca59022027534ef82224814b7c1c07b403d777bfdc299ce872af444b93fdd19b7c3c8eb5 304402202821c931478e58114a22e321fefe35b4201cc5ab8452e1ff8008ae3723c9f202022077d4d49f13cebb6c90d09941c658f23cc59d7fed518d45127ee79b673a7dce31 3045022100e64d3fbbdced6e78a9f45b0b212203f1b4af589c1287861ea4a677fe4f264c5c02202ddedda54f72c3350a9bbf9ebd4eecb484ae101ca1ed168cc9ddc62ef12888d5 3045022100c647102c1e64a0f059ebca7b757710c935611f2d034b97bb334160688773294602202123ae33f23a61cf58ca41e84ee35f6157db6e0ec0641d0b04cce41d9065fd85 304402207a31d6400feabb429fad2ade27c9eb55cc80855afc4f565f14bd3f5ab4eea9c302205f87b273c2e59dc53f932c8afb616380df8fc0613a2802c89fa4b0975ee8797b 3045022100e2bb8cdbfadde1ab179d256f1f572d05487ba2f6ab803cde49229387f337e58202203c06600212ac8a12b18a222cb75ea38137b63e247c9a7279c268c01d89bf8457 3045022100dbf02077f18a7d7134526e79ab932c7774b6ff169136edfe52b7109d5f84de7402206f8b4d19376f8c69e7c4c3cff9aaeb9e2e71ad6413413cc3249848dfdc5f70df 304402207f189b006a9b14e393a7a598b9fa2b3eed0477c3aa4a86555ab145e8c21ec1e0022051559a00e42634ddf31c26333885b1f3447cbb6ab61275d13de7d7c957cbfbd2 304402200fa78c7b79e03903a4a739c6cb29e011c9395975b10144ff9e6f41b7c632d48302203f980270d8518fb6d4e149521c133aeed12d281d84b36a551948f6aa986fe2ee 30460221009fb69804277e104333b134006701c3d84219de602689f2caf87e1f95109a089f02210096c971e9264111d54cf524d2a7129323f956311bd3acbfd1e4f93a07b3b2cc2c 3046022100aa9b8c7da5a686f3b856969a37a1f5bf7e0f5aa1858df81a33e108f2e6245b7f022100af253bb8031819f531f1491f12428b5485edec1c3e735db43783d3ca22ed0f07 3045022020f463f39e1f120319a44f3f7c4b957572083ab7bcf90ba6707703ad145017e50221008ad78d0609ff5198377735f7e481f5f3cafc71906a4a2cb7ee3ad890908a767b 304502201a327c66ee0875e7d2949294e36524c0e5d77dfca704e98fbd4eee4f864ec812022100f24521c4e0c1894810823d46f5ff9a780ff856fa60492d129b0b9fc58b6c36e0 30450220791c21d1e72457b1b327abbcf6c2a569875c1d44e4f7ad74547c0ddfad61d2b8022100d9d6ec5ea43f20312bf438eb010072aa691ff30a2532562d2604503cee42c0e7 3045022029147d9745fc2b5d7e2658c78ab8683972cd4c573b31a87e63674576bdb177f8022100b7df8e9980763564622e0895444b4c469638cd6f5c4e7f29ed0a338c1350e3a4 3044022050bcfc04fe41d6e02fa90017eae5e03760467f43342086fa5b6c8e66cc74682602202feae0663927f6871d70aca191ea50946f51f32d6ca57a71a2c2640585ee10e9 3046022100859c68911bbd2c78218da6478a7ccdf1b6640f1f277218e6cf299e2833faa16d022100d2b32d39803d34bcf9f390268e9fb242d7968b76968ee6af69d9d98b5ecbea87 3045022100f41d49c3799e0f17c618ac9c89ca2cd69829060f0c704237541bbaff11789dea02201f06939798513389c9c227e38ac7796d9eb19d34e6c38ccdda64a1f6ed232443 3045022100d5517bb3d3e72ee8228b070ccbbaf7127cef8b10a31a52cf38b4213d4a1bc93302207ab1009b4848ced1964feab2020804a9a58a9e73107bc8809e0f8a487d2d097f 3046022100d133e1af401481809a258aa3c34d9284e1ef29118eafd02a00c4ef96b8a58e78022100e8d447ac38f139c0b7b97e5f275826d9ff79c2a21bc003befb23d7ed92d4abb0 304402201e4d90ac60fe19acdfc047a57141f03b1bb69dbb04b9eb71b82734c0b927e59002201fcab676e943793a699d53011adc1f647d4cd2bfb74f91d200553f02439f5110 3045022072a7453007c01ff57fc3ee1878cf411eaef54bd6e0a7d24ea79efb8dca171230022100b4b763ba4bdd9b79a02aedb276dd33955ac96d01c360010ba7deea6ff6b5cf25 304502206662a3c82ac7e6e7841e07ee94499a02e5a63090f4ec72db53029c0511bb2769022100d4c0e2d3a0d489e55f679a0447b79d86e38b51ffcbbc277deb750114752aeee0 30450220769ca1c1b99535ad85229dba775fe7a46786def9173345838b3c5702f6d494ba022100b7d65bb4dbe04df92807902be47765b7cff76156f69eb8fec1058234f2f3b96d 3045022024683cf6f97555ca5e922544b1676faabf5696db1b07be174cbd0c010abe4980022100ae81884ee12339c71d43e54ccbcfaa153eb7a81e809d5b8b674f37cf9ce247b9 3045022100da3fd69b3ed9e5bfb8e675bdfac708e77e1f43554d9f71fc05f9fc290d7d3bdd02203e74dd805d733eafabbcdd89e38390945a48731a693363d367a8020a72c5777a 3046022100f0289e4672568fa03c57b6ffb5cf60fce2a74b138495d54d10d906430d63b1e7022100a3085b8e3e22076c99d94c098092ee358ebc867f56500aee91b05b78226506f5 3046022100cc34c6d46b58e6e35e5950393803539f9e845467898443371ad51e0393dbb19a022100b5a181a6dce7016527450c14d84d2374b85cbe0045e5ddc1eb331d61619b4868 3045022100f3362a87d4a46da83f3b8a76dd63b59f06064b26525dc821542fbc9e37950e0d0220352c9def3895ed101db3746eda0fbdf1f314abfd8f3379737f387ac7e202305e 3044022009669e2d8d2f24b1bf83d3b418c0668787c99d881e66a469e8cefb7b35d38a1a0220537fc3643ddbaa44e312aeb24cd652090d641f9bd4fca4bc2e0053a8958917ca 30450221009af62291a4bc86dbd975a7a16d0c5641f80e58e9ad1941d78a2e1d01176bf2fa022033f4f3a18151f874768816ee81c3d9cdda7a9fc076b360321db6646b0e4c3366 3045022100dfcbd862fb665ad5f5e46718498c6d9ec0645ff662c42b8f1ea7c15bacbe542a02206fd8489b81c88ccc386ce5e3c567de9c0a03976e2dc20e1793a91feb5f51a01b 304502202568c369ac97dc25ef8f60bd526cda3b92f07891c6ef1754bcb1c08945927e51022100edf8a188efa64d7a1a94b3f74d714461d3b8bebdb583a40565549b066ec20ac3 3046022100f72adeecff10d3f6b449d363a462c765ec74eb069c68f62d36544f28a9e70d0e02210084bb3ce688f9569af04ddb7623b2bf1d875f9b28caa18dac29fda862dee2adf8 30450220185c0982e7ca24092ee2ff18b5bee57ddc10172349d2357b7fb83b3b795051f5022100cd8a939fd955531a2753c2b2b18d9bf1d9f0a068d8a609234db717c8201ebb33 3045022100e1e5a18351e71030f1278db07bfa1181f1c8ecc7c295067479b67ecc3e4d0b3702202d22fd0dd7646110a722fd0f191e88cc07ce68ee9f03cd8a465776e01ac4933c 3045022100b15d0ada2e11a4f30e07846928261d9ae91e25a9234b9838a7177ea52615bede022046d971935625049451ff06957fbfb04fc02c42cf06ad080b2302dd49a6e351af 304402205eb0f122ca26dbb5e4c0a4e6a377828ee18bb5e6d41643e2d57d5ed3d2f775dc022073954176c7be86ce27567afc5878254f9cbe61fc75791daaedb789a711d4608f 30450220343499b48519aae73e7b2a27fad568e437d4b24f8b40151fbdf06e20203fc5f4022100c901bf350fd5aea8ce8cbbf5975b8f91ea30a3a07735389943da5164eff99d0b 304502206f44e0592f2dcf94736191d74d2b2e88af889b9e23dc58b8048a0a19ef77f7c5022100ca0487f71ed7c98291434c72d6425db3b29c5e5a957912c6c057ace2738dab54 3045022100d4b9b3a7553467d3f5cb83adc8a95d85bc6a01486f00146c299d0a19cf21761902203165a5516aff4f40e17975fcd972581766efc85560e02d14ddb7d38b4700e4bd 30460221009b5b95b77bdfabd33557f93e941b1b57d4929e722c557a58e8a6e25448605f2c022100b931f343717641e5e3ef2b96f7ca1745cb9a4758a991699250a70801b0f93af5 30450220683f2bceb56f4937dccced8164e9a0fd495ba2a443b4c08e1bfd21266f82e49402210084d8dc2fcef4d4747f9a69471429a7e826c4c659ae7963d1c16e73cae088180e 3045022100e83f10bc63762c512c9167a503000f0d5dccbac796c71050a21fc8358de2f1a402200813777f4ca4c731a4d6a0809d5fb14c6bba2fa971d201f4e3f7d3b36ae3f1ad 3045022100de5fd43d5b6d50d137941cb5b69e3c54d7f0f70921a35b63591e92cd344e8e5d02206887de9c56a73f17465d64598dce81834ce7052116dc593daf7baf1a21cf4172 3045022100e143b66d7c0804889214c4fa88d8b693e5196056fca836176040603c831e15dc02203f79b2484f496350c405ab522a76198f9335f853c64bb65eb65c268b542d4940 304502206b4d0a5349c35b97b479e403ec0e34d1375b919f4feb9d2669023f9e68915d8b022100dbda9fd75229593a090f6a88ba95176e109336b477b11db85c52c7cf8932cfe4 304502207be5f5c85d7a5e9e997a4a33c9911d093688554d357806b2c0c4f538b6d121f1022100893693921b92fef84751ae49df50899434460435a4d6f016edd6bae8f33ed7a8 30460221009f771456ab5834251cbc172898647049ab2233c0de3719ca19173f555e3918020221008fdf071bd2a8058ff5c547137cabf623f87d248d5ef1c8d65b2a0cc1e72d1bf9 304502210087b6dcbac101404e716d21032a3d2955d805a92cd9aace8ad27e5a3c71d6a3830220432552c0924357790fb04a7e204d19267e79798a66cf3ed4b5d7a2359f6b46cd 3045022022e56e04901a8e2f3ba0067a8bf185db98d2985520523fc12ea2af2e21fc4859022100a3b64ac85e0a631edcabe3b5032e8f5b4d0d728184c9055d603b3050d92a231e 3045022100ae9ce61ba5d9312649dcdfb26bed96a42638d6788cdd339cae261f5f8a9a5bcd02205c28086042cbf63243e778231059b039c2a538ebaaf56dc671777754ede37606 304502200a32b77c7e3d943440db6e70e645694a0e32b86ccc987021d78bf1ad072fcbc9022100d14c4315467e55292ac7093d890fa33ddc7d64c2ff70959e88f1c554afce3932 3046022100c3021217db991b5f062e2144e3bca606429805c24c5b77e8dadcd7bee6d2f454022100c774709694b89201d22cfb88db82b11ca1b6d49977df9a6fe5451b7a35231132 3046022100bcd8782877eff343f8bc0d653039cf334c52ce9d167eb581036ce1b371711a1f022100f2dbe29b35ecc42d183a8c369591fd18199fe46844cb359100157f61a32fb016 30450220188e48336c87f2cd7826cd5c50e814a099e4ddc3d43308a78b1c2c344412dd90022100f6c6d62923554083dc0aa77e4e53f2382e85ef1ac65f9fda346d6f86249d9947 3046022100e6560838aff8608613b183dd3c5570c0a2463b88b8c5c1252102d2a493e2e020022100dddd2291c33efe532d99976ea59a864e556d4549e07f7d1a52317de9eebefd89 3046022100e4f56d088d0c167a1c789345e790c80f118510aefce184bd5f9e66a5fc9d454e022100fd064e036a298fe16974139543dc6ba8c38a3430f5dacc2d4f7c2ea6de6c087e 3045022100d936957a607c32d843b50e94a8c2c963807086ed4648535f987db8d97ac8d27202206d2add9a85a5c3dd6fb9679c743854f5866604f776b327e33507874a8cc92da7 3045022100cd1562f5058be7ba2e42139c1f3662e28cf4fe89730838e2c7165c0c5e6661120220756c05864b4c7703022b286e17629b0c001fbbb0ceb514ca9ca62e5de39fd129 3045022037b2f30007f1f6180cd09df772f4a6a3035e6c3fb02ef97111ed8c90ca7a2b6d0221008c21538a11a73f0ec76dd2968c0fa355cbdc2c2f8038e2839426977e43c6d208 304402204cb645e5c59357df38555acf67d25b33ddcf501c0d8e1ccf3d0ab596b8face8c022020061a25e85e333c716b02f9cc9f221e957fa548e6b71c7818f631a879f4e38a 3046022100fedbfd3d4926ee36b2115bb403b5edb9d645ac67fb2480a8b9969baade3a0a47022100fc4f45faada5dca3ceac89d5e6ae7363821ea46accb246c0c0d11cca8de78bff 304602210086e358d96f7f0054216c96f42a4cbe0891b62efd92dceb19dcce7d3fe92974a8022100c78837e954c7e0800fdd22d08a02f16199f224a3554bdee4f22cda6e9fe0e676 3046022100be127aae672f93d0610f1448e1d747bee929697962133318ee36acf1ed1522cd02210091d842a8d19fd58d3563c223f68743b0fada9422bbcca09134750734dbbd1c1a 30440220563bbc26b2630035821bd6a1a2b9facd71c261b4b2698b8ff2c4e2b30257608d022015729bc09bc04d5745bb9b509b4c2fab724a7cc5200d23c2caafb17411e6f7d9 3046022100f904522696b413153dd45ec2a347679780442d272c9840958b31fac3bf68b7a40221008feb1aad89285886e1a3647fd557290b7fe2426a408dac279047ee5268708414 3044022062b50ea9f6f029fcd377c6a6d2753a34de7e8d4d7a8a4d79accc97af0985ccc202202e45083cd587c87a7d4b0b6e05bed5615ee0d7c6d4e0b9bfcd673f67ad01d66e 3046022100d81bc5999ee61f25dcea56f9e4e0dff978e2bc87765c32097bf8cf041a0b4f52022100ccb36b67712af44a3e4ae86e27f5a971f760a3989fc2bb6ec572a38667b2d85e 3045022033ee61883e5c041527d5e86a470adfdc9db8299289c7ffc13a1153a79806e94b022100ad7c4e8ae5fd62872c24e28ac6621b0cafcea571dab691f6ce9f4cae70718971 30460221009fb91f358ff74de102bc7dab9f6fa408f558b97a2a25b35f7e9c0206146b16a502210091d8bb9dfa4295d3dac608290fccfd836c3955e24e42ba816b56a980c06da09e 304402204b89faaef2e83c91a0327ef8d6c17b37955046b84110181133571b6ef869fb300220438b1de4ac97fd1ddf9f6b622d3ca3427a0eede080fc05085cb4a1ab06b4e8c7 304502201182e6fe8332f2261e4a62e173ddfc0cc07196da1c41a21dffdfb78a61c65e7d022100dd48d702f394e3ef2c2abf46d93d3249be888ebe8e9d45a5fde2b2dc1935c0c5 304402205450ce64653c52fb7f6362633de3edfb7d3cf31ce56bac22e43a59ff000ef12402202db857cddcbb9ccf7bdb67d9ae075b684f2091ce8627ff4504c1421cb8403288 304402204d0a4b6bc760498025b77ed17759569c6762c3cd6bb4ce25fe22a22eb71ccdbe022074bfef5a6bd3f4d32472d9ec916a070ed820709cac67d35a56a74fdb2cec1640 30450220161ca8671234a7c789f2b72e4ec5d556b9ba2bc813dbf20e2bd95ace86b416010221009a46a00bff0295d50e03d11271417d092ec6c15b47d4872f688d39984f810a2e 304502202cf78503ba9de8f745d771bfc15901ca0803b62bd638f860a7166c147aa170950221009963f14bfe4a02e95070bae0ad47b259dd48c6c4cf3407b125a6c0b1ca5a6de7 304502203a0d855d36c683e6bd165863399a6ccebc639b8f01c2fda7459b18f6da430f03022100f37669ad62ea80328a6bb13e9509b0b2b4a3f6be268d1dd749fd44e2e04f1e85 3045022100febfcddba895cffb9923bdcef981bc6b6a92c8fa11921a1d2d4b3ae867826c12022059660b12ede7b439b391b61a0529548322c735c83e76b19ad266452ee81d2191 3044022075cf73c9b52cde37d52e4a5c3c0562ec17c824d54cc7aefa410e20878856f38b02207ba7c42de5fb72c9726df03039bc6f8461cc22cb96af4e7cb8ef26304a05c8aa 30450220317c18fdb1d7258c255e53a6681e7ddb1513c0e807a8c95b79f559167c9b9622022100a720cd19ead2fa7f6d6bb5fe2b8cb2aa6021819b4cf8b4bf93f2d94ed70df074 3045022100e55b9677c7c6debd74927164998fe8f7052f95897cc55498f186d03209ed58d702201dc798f55e9ccb528cdb950c1d7ce15ce39a4f1d69801b6834fcb0c1f15b0d43 304402203da69084c670f9b009db8101e001f282674697ea079dedbff2d1ac3ad86191170220203305c2964134c8f27a4a779a74d6c474d71b9b8177b4c30c162b010f687978 3045022100f6125bec875e4565490474f90716e1b881063db2778145e76a31a90b88d8372c022064ad61b2200bedd5f42669f14d9aec0dbeb445e8cd5b78ea072dd3e1397935f6 30450220511bc6df9cdb1147ac4b3fde8466d9abdc15a22de91ed18dbf04c30ac1c7ccea0221009a956eeb92944dcd2ff4a7d88528f229c9369276890083faadd2bdebf6396210 30440220435a8d6bec2cd7390cb93f50c73ebfcba3c8d5bf9b9d2c0775f5aa3e6094ed9102201dfcc93e99d3bb818c56c98736ac229ecab1bf9638aa69c6fef683732063c337 3045022100878710a1042e3127518a08496523434ecbb4828d4fcf8442d63f421166488f2a0220009aa261d1738d3a26b6edd4c0cf0c93b53ae26aea6adbd38dc60072296cddb8 304502204fbfb5138f0fd8122eba1f3a43c822be6cd35db0013d072e0956419d43a49e95022100d690cd32e60a549a1e9907957c7b6a9d50bfe8d49eb9f80db123dcca60cc571d 304402206836f4d99be9e94998e203d9367488fd39043f41f3bcef57c4f9e015969169bb02201678bd0b6d3b8a94271d53eebe6898974e52d3bc33492aea7e2b769c0c9d9f05 3046022100be7cc6b4d679dc86e58dfa86f5640a1acd42266743501eebf750d161f47ecd500221009744ef160ce98be235e98aec94714c0b962ec6eb15cf282a75b9043e0fce6a2d 3045022100ffbb7587f0ed27551a97d1757819e54bed9ddd3d13c8bbb3db83d93a6d3ffc2e02202ef9b61c9211678830463bf3e910fd041dbeb4a449d44bc62420eee08d022c9b 3045022100878073bf0caea73ef7d832c83b103a1bbf104d4e89afe58505c21846d4b61885022009cf3f479361c2027b3560103dccf3eecede8363792496d160682746fadeffb1 3046022100b9d93b7ee5bfff7b884b08c7f9a08367a3967e9c5c80c65a82752f32362be686022100df5a313aeeb9aa2100b861eec44e5d45d234b0eda6ce4be7bd8ac40ab866315d 3045022100d9e48f6ce893197f7f25080010cdd98fcdb8f2b0c70b569ba7ecb1d83795391802202ce2000e0a2156ea7d35952854b8321f0e9736f1e8eba6dffcc2f904661f8347 304602210093a8c7f8cf27dc17f88017f588419ad921c36ae0f0b0a08268065eadb2398ab1022100b51b22a0515bd35e74d0be65665c21150398cee6065ec8f28af08445e4b3a073 3045022078c6f44d7d25a73457d7b9057aba76dfe628137df54e89fee834f7e1aeb157dd022100b1ed8749ea95f5b3d351f6e6820e5f40dc2d8878c3068bd46185839dd59d504c 3046022100e6bd9533af2b72a9f09d9cc8cc4955f3b9f5be203b26f6064432aee9f9540c050221009d3e12bdba9f5d1ba7bb27676ebf71ae4139961fe1384517087aaa35d888198f 3046022100f2309943b1444dd9080c72e5a8b143d7857f168fb40fe68189261f764a1233cd022100ad9578176887e5cb96f0342b71203efac2d3595383b97be38708e0bdcaa4f084 304502210095ba9eaf68558db707d2ad80aad212f09658c9454e2f8796ebf0ce6377b9a34e02205ca67a913c6e5c5dd31ffe461dc6509af0b3365be0baa0e755592a9aa6da5efe 3046022100e586e42e6e84f1d253f7f738cd21b614a1c09a1e131de4291044dd7be532efd0022100887216f5b3e15f7367eb99902b028ef6d71bbb5edb68f7609f6a6db57c7be575 304602210082fdfa0f18c2876649349559cb62305f4a84aabf86dcd28894b9423401aacf8602210080263f65fdd8af72d31980d4771408f880ff2f26543d64a30c3f6d4fc51b07e1 3045022100ee86672dcf550acb9a8ace93c648bac3a446dc55aa0134c560c1681ad1430250022008f701c7e212413f1488b474fb56eb7c95ee1123ddb5980ae413866dc09c7b18 30450220079c846d2a3836631987c6527bfdee81edb42c19aa4e47b2301f12f7ca7b947c022100e57ff0e4ed0bb80890fc9fc5af669420327922b2020fba303625239717661012 304402202c0edf2380c622b710c63072cfeea8c0d10eafb0913baadf991892f8944b2fb1022001100768ef9c38bb9c99e486d5df6f5179b3ce698646d63302802ea4f5491808 30450221009920ba140a00d2b48eb34e1d245c6dd95e02d7b143267c6e8ba5457b5f012f1402200fe19abfe6aca2f02dccd9a256f93844588200a0aedbe2f746c92b94376b0c67 3046022100cc6cac2cbeede99ac730125382df82557d42217369b8d812981591ed1ecc88fe022100d67500c9a068339a418fb0395f3289c70626e1a510f7b6162953b53f10b37f15 3045022100ab2eb8e75f4243c8823d03d11ceddcb404c7a3299132539704551508ccc0331e02206378ddd626aee93c26f7e3db39958e80d86c07c5b352ae14168afcb763290ac2 304502205d7dc0fd053649a94a6fd7ac3bc992f5ab0ad8217b7f80383a961ba32f898618022100ddd9763544b338b4b65bbf33c37a8b4e2e227b7587c24e370ed79aafb34d0bb6 3045022065243ab3a76caf8a750588e900660e070f1584b2c735bbef803d3e3f2ea2fe6a022100f1ba61297c40179ed5f1ffe55ad6b39dd61e11c5a8e7ed2f1c42a0be7e2e9f30 304502205f5ddcb2a4e64f0f292f4575e870b821fe92a1026c07cc758e3d440761a30c8b022100c6fae67fcc52ddd740856fd6eec42dd00ec3083a028af9b4b1e62cd2a5309f5e 3046022100edbae6beed96a401af9180f97d226b392297e64ce3de30229ff73b5db517207d022100af9c3dfd3d4e13a0c8b77450258e8a98c4fc6156f184c362af97b82817f63f6f 304402203064d040f8dee52cef258558ca3989640585428b03393b7609ff4b5af63af41c0220543dfbcb662aeded61b40e2fd7fa14bc7426f8074cf00683fd2833eba46b7f92 304502207c89656ba30f3e3c91d4fa35f30216db693f498395050a6105fc2b861ebc5080022100edd91312d17d9b2aa08ed8f31cce7a44d7ed39c5e2ae6e73d0c96e0e61eb57b8 3045022100fabd90228c25cb7d00615fba03ce17fb4eec586b28584831a7a184ba8fddd35f02203b3f307b12f5859278968f9e21fd3632188edc78d59df066d9eac7f319643b56 3045022100cfc67bea5e88272ae187e6d954727886c5e26d0b741ff37253554f6f40f5c75602206a550db5896f3b66db02095923536496304d8ca606d6999ead503c9e975d50f8 304502205590c1228412934a5ab1666919ddde10313f0a97e891a0460c0f15027fc7c51c022100e302211844e2c8ca8eab2d255790a7fecfc4cbee5406a0a260f761e490eb2d62 3045022100b5ee596e5427012ef5cadc1ea520951eee7c8d2034bc08c3ea300ce2f8ef0c1a022044a325eb57dc21eab7272c2d73458f06047be2ce63f2097cbed8edec89b1f704 304502206580a6366fdde974976b698b6a923780e5e9fc52cdac60cc348810dc55073e9d022100ec644efd1596ef1666e05d4cada97a93bdb500b0afed80cc15b963eb6c4dac1a 3046022100810f50f2c44385f583830e5e38eba3a13541781a6a38926a3065b5fd32b6084002210091a6c594fb94fe01528bd56ed5fa1e6c8dd6f14fe96a5d1c4bcfcf8e0a22c6b1 3045022100f44662c71af379ba982b04258c3a50fc497b9f09ee6a174e59fdc255799dc712022010f17bc63f541564208fa9fcdb7c432da0f58b07d76b90ac40c8e34e092fbde9 304502200f86f999d8ade8c3dfaba712d408477946ea7b3d3f74f997442615d2ae063227022100c620e18aca9e2833b3fb6cc3ede1691c1c6fedb9b660b649bcd3987f9bdfe27e 3044022003463e807dfe0cb4af315d69da9c775ac5e49f54f844c07e4198e6d0f8751cc3022017ad0472202996c3d9fd40683a23702eabc319081d28e285963cc52ef5a2602b 3046022100becc676cfee4d113ec05be783060192c3af914c4c519b616b6bef9d43d49be3e0221008290eebb0e4fe1bf2fbb215af0c570b2c9c8e849a5be2cd9d63ad286e0658e51 3045022100850ede3fa1c12e5c2d7fe720f56430acff3c30eb6a50657f8b6d05f68a62194f02205b0a51e17a71b3dbb3143a543ea55c12bff269f71472364615d9c14066297375 30440220612cdc00d7a039d0ac4727997b18b70c75d1f7dee41341f6b0bbed96cfe77b9902205d40175bd5318fc0257a45b53b896ccc0745e26bd4faee62c18ade260eb391a8 304502202ecb8ca47551686b4061bbaffeadfedf8098de64a1508c1fb88d450dff2ee3e0022100898b37aa5e878ee14182b60a8ea035b1abf27d6c118fed173742fd2d33429373 30440220540a79768b0685b7fcb93444cd92832942a104754b8db66cbab434064372752002206d06424a762de9bcfe75d619cce8266b05db3fd335ae8027721399539e995d11 304402200e6be24c7a258ac62b0cce963442275c5eba635e6d5e3a46d67002c2a405326802203ae6151ad9fdb391ad9ed74fedfe0f5671b3e169e7fde6cd0302343376c19f1e 3044022042855e547677c51a9ff0a5ac405aa5602f1c853bd2a305fc205bc22a35c6176a02200725d4aba5150371b5e5965876bf00044fe8eac46472fd04ae620feb2c53340e 304502200e4532599190e010f2c8d5c83eadaaff64386f6bf36036ce8d3cd247ae62a729022100d70759146a7c6b9dd457fef40733130ca34388256acb7754d94325abd9306685 3045022100d56353d1a8ccb57df73d4e1c896758991fb7e4d9eb096408d86c4e4d1359ac940220199da642a4045dd14d7577217955ab04ebfb10fde8b67cc14090507869d2ea82 30450220245c15c4871c66189dab6e71471f2181400c5db3f76b3c3d5eb3a86d148ce62b022100b845fdd77666c2fbfae47451ea9670b509b68bd1e91ef808eeea276d4821c113 3046022100f894e328a0508b243d4ce54f020d96d1602c8267a112685a58c45cf38458553f022100bb3dd2b2c27a1d92f1780e229382ee244715c71440b5f2b51dac5cf5f620551a 3045022100ee372902bee2991c76c4548d65af2f2e14d9ca52278e827209c9d9f32a4bacda02200fd0b4df64c005a2a875cdf710ce14f4fc3d582f97e3e5be4b369660076d9dc4 3045022004093d43db6aa5c71f2d80773d8a2e8c7989233ec31fd7aef2f0be5982a2de2c022100d6040f6d0f02f4a7e619d4157ff7f3fdfce50058e406a27441c474a82ea8932e 304402201c7bfc0159e36dc72d37fc85eef69b5bd05eb1a2c8176d0033d689605458a40402204ee775b2c84acc9649476f0e3842a04f8b037f8ccedde5d1b8ca3ec8931d5118 304502205c8df6fb1f0c092ac0db75d06cc0e12faacd677056c70608c6c894dc1894b138022100e8dd217ce3d18e82e8485e65b604b494fc199396584020e34f64cf7df3a9aaea 304502204e7b9df8ed520fe74204d3dd2b1587f9dcd2df2911105e1a6f163a8d7e0b4b4a022100dfde91dc62ec401ff037a78c2f073f04e76df12bacf3e22515f7bd8e7ec99d5b 3044022004198d78a9c2bce01954b25a99e2129192cddb746bb77a379abed940c1d8576d02206bf4a4b3c9a1e12d7a8d26babab9cf80b511924459fe6a2f0b0a0402516094ab 304502210093f6bb432fecc5c66771311a7b8f60f032be585da1166e2329cb879eb855a06402205d028c0fbeead670eefa1e9f9a070f9bb3f7754d72be84e1c43139e682cdef38 3045022011cfc08d76ef9f7c3eba37d929305e30172c300e61330c8cff88eee510ca848a022100e9dab8724f54924a6ecc0768a2ed505d2aab813751836f7f28c1b6c35c8fafb0 3044022013f0ccd58ca4fb392a3e8d215794b474c7c19a4e528a99a7003aab060f3c330c022053d08ce19ea78d9ab45827437928c75d31e2c011a3f2ef45890cbdd2c3087e6d 3046022100f5dc7f2add854da3a92e412916a49642ecefcf6b6b4c473330be1c9948019054022100aac7a00c63c50bd60350bfeb0384a1ec7b940279ae1756a2b407b5721c2cd7f0 3046022100f92974e8a4f849796bede96f91a2f3a5d8104e5f1bb11252f027472a2932dce9022100a03ea937ab18739dc0c780be36bbce23dc8550571cbc11c3ebbec2c246a3edec 3045022062aacfc4e104ac8d5ff664a69e2ececcd3365910a990cd87bd9ac32b94343929022100b6613b3ab40f9bc57a5f5fcd180bdbaf00f01e63674e47a0d4ab0d5ddcdba320 30450220090bd61f478dca02c262a6bae43ebe3490bd5c0616c7152478026beb7506e62402210080c1721c9a94dad65d22317563df542ae8f1471c8e26e736e44959b546769efb 30450221009ede2c724768365dc57685792f6cf27368ee046d315a590a69bc5070d8da630702207923e5fec473a8967e93ba85e0d1d883a1bff71cfd284b13e2473166360f6608 3045022100e09a9dc6e3f89fedef97e33bf9e3a86b0bf7474ce1d2d4d60b012380e43dd8f802207915610ea20aa83f815ea06762b37eb7a9f14db0a819c42b704275faa980ab2e 304502207002aac3faf8023a4ac9156e00f099952c8298012395d22a67d92ed1eb88e17702210087d5cb6069d1a44066ac8dd7c9e84c3faeb1ba94047615a6223260f34809fd25 304402206796e2f25cc010e6daf476f31abdb1c5d195271c8419a40eb2f0e250cd963d310220290f2daa12954513571153f35e7490f6bef86c5ef916e6fa5e1dfb3ef0b53250 304502201b9042f48b0d92b3894cead28979b55294b5f37904d56e0626d2882f1141b226022100faaf4da5c7a89c91941b02b7cbec34c9efcbe4b50340dccf3a150163b8c8e918 3045022100941c2ec59ec5483c83faee566bf1c470732d47133c513812459599e8d204d15802204f065f83f0d738e46135e872a9b6067682bdb97f0a2c337e5f21c8e273ce5996 3045022100b14defdf1ab1c892cd90ffcd3618f5d2ece971addaa77e739073f94c2dd3b62702207599d6f1a24b2ebc8282e724e507b5b4b2caf9a36bfe01af0f781778c5cbea11 304402204c166a531790e578549e2b78cef6321db3de457a5c78b48fbabd3dd3a1b0f7b2022038afd4e17a89d7b7d613e6b022745fca01d013ebefdc7913ba3a6fdcae6271c1 304402203fb90dfc116076b81753b2584932ba81c70bda19fb525a8f787b57454ab0edae022065c29ceb376a0ebdf3dd627db13b528b8c68bcc04fccf77d1be583494b061e55 3044022061f2e87d68258a2634903aff072abbf60e66e375976e77b09c61272c8b92d2f002201ee6382760913d2fe14671efc48c6a2b0dd7369d9d48f7f5d321f7f2ab57912e 304602210088b365ae5a5327b6c41d6de3c3b96d863d9be2dcc4ae6f8f15e36430f41b91c6022100ce017e94882f679f355c75028f7c0c6f317a8b4d287705d63b605394cb59c155 304402206affdc90428805dac7ed5a6b006dc385f29b0e26b187385da286c45eecba362f02204d8b0ba4440c6102ac3c0c238e6131a891b81f06df55965af868764a09c948fc 3045022017755bc49a285f62a1ff3e71324bccfb1e962ee2b235e98ad90803664d4be236022100a2e98eccde5c8722f6f35b4fabf534630ec8fdeab76e90bf5531b40f83d4e289 304402202855e77b5aab55e0c330d923dff8fa3242bf72a94da6a72b157bbbf070433bf802200e9e483718a2f4f813473cf5f4b00871e6231a08b37c5bf3f20d58e18410c925 3044022068ed78d8a7650cbac017c46641c89937148b1a21dd443d8e8becef2ca942e8be0220086cf54bb230cc2af66a40d4aee3a3840588cf84f4520266186cbfc9661de26b 304502202edb054d75bbdbaa704604d2d485990a6052a5346427be1661f09e402917d6d2022100c8b647f1cd449c6f70fede4f7a8a15b3a5e085ea883fe7159fa9b5e9bda453a0 30440220325b494de8ceecb9175db93b9b6197d073e23cba733497132e15526bcc0483300220194afec5825d45573d36a4d5aeca3d7d43cc05b77835995c4e3663003267a8af 3046022100bbeacd3ce945a9bdbd6b74ce436fe90457da0ac27da05a442c57aa670923e10a022100c90bce13264957c918b12a723133badaf0851a51769247c6c6f5912ca6fcda24 304502204b2bd76d8e5c43faf40db30059d8e03612dde10cc9dca2a13a9bb61c60e3038e022100a5f0c3bafec67c3233c0c455103808090de45db908238e528b2e04f34353d58a 304502207a8fe640ceefd57fc90a9c65950baadece2c91630b6565c1362e9883524ae179022100d271fe7cd7876d7152f9022d8446c53e0693cb24f07d7b244bbba9ab00279c43 304502203b18ba841d585628ba1797587bb7caf1209926fb60348b5a1662d86158014135022100f0837b478d352dce502059b52ef45e385816aafb7ed52d39885fe26bd140e802 304602210098f07d4ea4977c27679ade28e8a9e56d87558bd6f7de0408276ac95c46f9d896022100eaf2068af64d37bee4ab50fbc3a56f6151b8ad87e20e4ae0aeb74c52513bf235 30440220312a645f68518144c793d8598ce86337d315ee9e8f8101577e9c5cf0a89445ca02204b0ed4f3073844fe4fb97fa420856ff5b1f100af2acd654a0ec0b3b0d7f36fd8 3045022100a0694e1982fbbdd7e248f9853383b4a9f9392c751081b1ca35d97bf0ad9e3ec302206efd15c1a5a771105d88ab7ad75becc0f1d20a481cf0236c308388b04eedc49a 3045022100b4c81a3b7179ac5fbaab5d81604efe2e590fae6a9c33e002ae3301626bced05202203cee625597e9ea19e39508602dc557933a5babb7644cd8764ede433a31620bb9 3045022100e8c9313e44665d23759f9f7ffef5177302af30e25cccef03b4ad21c3e13c532d02203331e4aac6a367d2d807c4682c3a605a388d17705ca2be84e81f39551119d49e 3045022032cebfe845d931884d84dd1ec890bec3ce60772e68bdc8e3d93d0e209e802c11022100f171def8d0784608d474356162579b5e6db6af606799640bd7a6e9f90f541c88 3045022100c94104bd4db2f9392b6a4b6e57a848bf0208f8d74044a6c9b85a69f061e2e7d0022025ccd4d08138b377d0233795f1eff6e2338a8d947095e7a0c8a4581fe02077fa 3044022055230a9b97c0d2a68002c162b453732993e40c25f53be50652b5810ad2570314022038f45cbdcf082f696b0d1ff8be031e9e2c206a947a42be85bbe7f79a35fe7e6a 3045022031e52b472ed2df41c583eb6ee2a48ad65a68d935f5f3c78242cf670102a175460221009be917df8f12ac7b5c499dcf53cedf66b93b1f027f859e14d9e54dfa8db784b9 3044022018ab95209ca496196809294856ea34f42f3230e73630c90190907e056d46edca022066be7d95c036b66b9527afa2ee92e3df07e7cb6ef51e65b64ab1f18d21fb8e1d 3045022100ddeab702f11cad6c75fc6a51a27f094f7764a71ceb53ee2ad1ae4d9d72468c5e02200ae0a9afc8fae7b3253442bad8de7bcde5977cfe2260833cf57fcab67290a70e 304402205e1327fd56255f3d65ea0504ed96f6b6d7824a45536ae5d08870bef5b341f5e9022068181a83c3c67f5a757b473bea238bf55149b044a5fcb63fafd590e1b3ee7d65 304402201e05c4641f666574f518940496f87b7cfbb66c0e1d3399eeac3590f22818cb9c02202f6e8d9b57de27859dab3685c536d88016552b1214a80635316a5d3a87babbce 30460221009404f5d31500d30d041388649f2f8585bf01d3299f41ed831308ab07772096550221008f31411ee9e574c655bb4173b236c9d7de24454efc5c57abc98df9c53f3015b3 3046022100e9b017ca929299583dd386d8ca60e907398f179255d5ff59e3faf738cafc07ed022100bd1d3bf853ecd2e0e26bdd7e7dd4c21736d287fb90f0f048a69da03cf0ee3078 3046022100dae7aab8be32d81e93ec9ada28fcf710b0a48444ab9346282bf61cffd7555154022100e15ec4031b9fcb05519b9b5f8eceecd43436ba82aefdc6d0dc7be3c5cfcad7d2 3046022100843b8a3f768224a3a1d3f984fcf93a520c8b0fc45e708125901bd008be7d8c5a022100e3397571b74caf2add7bd1cbd4d14d1cb0abe637fd3b1282900c4b3106c18022 304402201cea2587216bde145d559a7637751719b832ff6dcdd6834d5bcdc8a8a64f764d0220264a5257190d53e34bbe35c62ad27563cc7246729d428bce5f552da20ff16bbb 3046022100bd52ff4d1440de4c2337a6226b28ddb2d6876b8ad8a238ffec19fdcee2a0c98a022100b2cc02e53cf223c969b431d59fb52f960cd0793ebfa3594c5e80c564a81913a3 3046022100da0bcadb95a67474d0a9a5e1e9a29a1f90ad286b6d6915dd75111c18c06ad6fa0221008137647ab81b11db36ae26f23513f5b0fec0c1e21e018fc8ce4e3027a0823fca 304402205a323077a64447d05b3680761570c4345069ec15dc3c697fced916cad200594102207e37ca945b033a886e721904182fb34d55a02c86ebd0edf6db740cc683ba235d 3045022100c4e8cd27274e5790525181da6e269c1cf82150113a549a61c8fe79abc6eb847702200b26d10ccd9c035d9efd77c2275432a2de64aa1ccad32ade55cd5e2b85ea7ea3 3045022100fbda6b2fb879a4d0542a924e7575b249ee7d25e1d1427eaf67f7ff39ca433d69022074b1d69f36f104f52927ab7510c7988be5cd9b607c82f6ad05344f0adb355620 304502210093b695c40750086fcb937bdfd6e31b308fd71b8d60a3a8d4472066263130da3b0220327c13130d5e2284ddb79916d687f972f91dbe04d40bf89c387528c0b2f4379a 3045022030c109883e7bef0be628f8b8f363f73138627399a32bc744c5f478ac2e726a96022100bbc4e88f72e3c07bfe06f73ccb31edcda3ae97863542ada907f64cb221088db9 3045022100c9b783038d70e64e0d45ff07de903ec3fad620477507815b420cda1e8a8f87f802206e7c548b9bf0e79034384122ff398e7ac57acb374d3719c717f44041885e0d77 3045022100e410ca2fc020b60e1e06a1738d00d058c99b4a2b8b3c34a0f00d0184f46b2074022032c6137021bcb9eeb01abe872328fc97a9811c2092df48e3eba31dc8e54ee32a 304502210094c6ede0f2c88676bc409cd7e726a7a23376d73cfda0459072070963c88d138d02203d1db334df5fb523518e58252a8d8666c924f53dd6497e0b04d453c0a86898b0 30450220669e4ed8bc28b8fdc604eca2b12570c1e5042108c282ce4e4489609828b4845d022100f67f0717940d327442e12ac7ecadcdba7488b508543293ded9520ac002727565 3046022100d5f19eb6e04e29758a050d1d1062871f6a9ac00de346156f898cede582b98248022100be2312a1f106e0f7956cb82a79462d49f52bfc81b60f8de1d49fc125dd56fad0 3046022100ec3ac1176a9f017b17e10e49ecf5502c66cdd76f7c83fff29b10f2ac1033ffb102210082974eff31d32f7b5ada027fb0c7202e55b6df4354a2badc64f93a26dfeaf625 3045022100fe8d4ff8bfd01ccd68764209a46bcacf2da17e0d591a5321cd9499c1b493b4340220341c100f4b45163e0a1584dc4b8eb9e2bbba57e72857aa3689b747688d85172e 30450220326fa2c9ce05ddc9e96912a24b9a5c7d2d5f5afdda878c6325ed82c67f865075022100fd121e5805c980d124e07d66544b8b7ff1009d1625b1b44b930120025b50164b 3044022023ba98415884c92404bf132f1ede71a163b0ee55755494c285acdc08929b5d77022060c42aaa03f66c70416e5f7b7ad01f22af8e39b09ff4b9ddee336d27d150f2ac 304402202ae8c817b9a4fb72098598afad0216a4e3dea3b1f16deb35fe467b540ae0ff360220770ded40c69db5521d65e35a0a982dfc9d92386a9404c2e8b41a02bafc832324 30460221009d7fb6c17b499c3135cef5e2466f125dad26a2b9420ceee13d50f2d88403c002022100964e9cef12da93a10b19fe88795033eafa5245cbf2c1d0cc3514a95d824bcff3 3046022100b155f85e5e02386c3f6f86450a53d1bb6bfda911eec55c38ae6fb323c036c020022100945540ac30ad5f097b4061a8f65b563c4a5b6a5a82d15c8b447bb53b329828d9 304502210086aa5d186c2aa4b64b83d6f47f041672008367493af346753d01a62444cb785e022049ab6ea2d1fc6763abde383848051566ff3d4472ef193c04b7a5458926069450 3045022100b6775825daa13e8c2b51972c1c7441eed1fb072d093285170a5d646dcfa5fc1e0220744cd8cf2ea8abc00e778da89bef2fa9d31a41b8b759961e81872578c8c80fa1 30440220790a062b062838ca6bb2dda2e3d436f9a231927ab3b0001a8506e026b4e058eb02203568478e51f448bdb46b1756d824dbf225de9b51bf1b316a7962383e24d28e02 30440220678c09d1a74affcab1205cf5cc3731ec28e75f53701a64adcfdb46d13d2dec6e022062aa328ce19fbed5787bc174d070952b6133e55324d49edc5226d3d9ef36d842 304502206faaaeab3e7722e5f625ecb9b0a6de22cc860377a21579e18335ab9c52f32d7e022100cbe493b1678322a92d4636556d84a26f4cd88cd5f33b226b16102a4aa29cfa30 3044022010c350152be3beb5c325582ea4db4ffded1f8e5cc005c887e8fef382243d461902205107e6e13d44d7a4edabc126eb5bc961eb7b27b1c5d8b2efeebfbbbec55a55de 3045022100b575929e76e1f575b1e435e3cb5cebc2bc61a37c1d69a55e9df2aebc4222914402203184191ea6475450f94965d32668d341ee618fa2db3effd471dc2e295847fa4c 3044022051cb58c2a68dd8e7530eac50e86c6cb0708697e4f91aef25d87ea726e53059e4022068ade1f551d231e520c6fe2670b959f132c38f9d959c1bca50a8ebc3f0720231 3045022100cd7fd60f7eebf1046db52f695614b5266934a24bce7fa5f166fc02b7c42df62402203351ca2603775044a6c026fa94627dc679f4548f311e4675b8e947625e768fda 30450220388b6f9e9a36049dca6b48015fc85f5c9db51ff565fef666781020ebc5171fcf02210081661fcad2f2dc313e3d3f0fcf8e15fd7f6d6e5b57c6e4e5cbb7bd63572f8357 304402202852f403a6462d009e5eb6bcb2161fd85d390567911bd2c770d564def10fe47b02205f7756a10b67276a953ff54077d6fb5406c98f2a29ce92727f87b0f7e079de74 304502200b3a80f7eab8b4448684cc1436479c995d0089a875a06cf266bdbf684bcb7848022100ba79e208b61d41f262910e6699a43e37d098b7ef81b45cdbd2b669f89d9a8966 30440220130a10e29fc44a923e87cf4ef35d1c10a2d53a6c469e02efa40b47b28e42a67b022011fd2c03bff471957cb491f3edffc31fdbbd6d49fc4727b152b1311bf79315ea 30450220052d1502cbc6b5763d4994cf73f6d4916b90da2d69bec82761eb3c3f83ca638a022100824c4f525909799469e400780c3b1b47c5ef9389a52dcb99f130e51bacf6db97 3046022100d6dfdc557e15a246ccb52669b1054fe22812cf07fc49265fd15ad00e00dd176a022100d403ca99a7ba56d049734f17719faf0a27519a38ff74d189b72f8bae46b25fe3 3046022100b95834ab630108d6bb63800c4848526bf1e99b41414ff24a2a7360db22e4da39022100ee12dede6759c07095c4d7a99c215b7bb61d799cdf3cc28a0be2664a3d109bb9 3044022016309588e5844d365c63350aa85960ef5f47ed1bd278555021642841944f97730220588a5f40d858e547bd1ad90e178e4bfc033db12a8e2486ab7e6c6367c2ffec97 30450220556fd63f6ca3c194829c73e40b9e49ff3187d348141729294c718653f091f646022100fdb199b8f06f8b54681243bb151fbe7073dcaaf933c2d06548b97a67f312aeab 30440220439a06531fd93c1501d6e38a6f60029c92af4c5d47d863ad65b86d32a2925d060220595e71f7c8c1d988657f049b3d19e220739656d94ea02eb9db547fa645f85856 304502204bd92c5626d545af24b45cd6cd6374eef8b6b28b79ea2a391400d57c4559b605022100bfc77bd4681558535152eb3d028638d6f7c407471718d7f8b62aac803d13def9 304502203e2be8735fa45e8de74ed5f990f5555f925a9da1216e5c2a7c9d3f237d6bcf2102210082d7431fcbda7646b8327e3fa97939944092f49d3907fbc61d1b4bccaaa3366f 3045022100ae4106d76fb8518fd83ff02d632f325d33950e8ae02024bfa2f4158a134ad2d402202d15e6eba45aebdf7c8439dbd023e7e94a4a318480e70c9c855e0f91c1fccc1f 304402202e384df96d576c185ca32ce8d881a85c8ed49c8c50889bbc3df70d3ea1a05c5702200b6033f0c4138860e043dc42807feb4a2f3b3f3e442e669cb195d3bc5c82b85c 3045022100e040f7666fbb4d6b76b8eec9752266921a4bc51a1070551a17f0dacfd48bc18d02205cec5e84a4af81a131385769bae0f16e040ff3dcb15f8c3f0f9f62b808c24f27 3045022100be2a1414fd320bf1f5e5a2f8802914a6d31042bf80096b3a990391eb1d94b9d102205ab78770cfc349727bbbcd2b6d81355785419aa65065bebc60f7c961265e94d6 30450220312c9d38b9e42b97405e2fbfadea5b35972b3848c51adf22c06a2773961b61470221008b50503a63e1fba84bd287a4bdcc076e279c73b8bdbdd5e8bfb919eb6054bc50 3046022100fb785096c3923a1f7b39a6c8fb307e01c69c6f7559011b499a15fd38c666e45f02210085fb4a432bfd0049d3826b857ebf9163bb13ddb16937f1a301e55ba5bef3a5a7 30450221009a5e4e5af59c3b18e31279af84954aad96a8cff1e75d399d8fc2d8764c670d920220396091dc03e23a328b9d36a70f6571b4473a5ca4076ce73d71c613f8ca6e1ba5 3045022100cbbc6f98d2683dcf1f55acd772376a31ef3983490f5b37300a604fc4901a710b02203e825e4aea2e2ff63e77f843019eaa47629acc5c8a758de09285da6f4657adce 3046022100bc73d4ce597603e72ae03488a7e93c43a2efde81f711ceea7b567853b7dea955022100e084bc71f4f90a7d7568e5639cfb29f1f5d5a05a3ab515a0eb71fc70809d53c6 3046022100f147b9cdce6590d00451508908aaa357cf012e218e39a521af7a83b9e6857873022100f2601a431b8d0829d668dbd65debb80b460dc1329cdb30e6dcda901f78c45e72 3046022100fa4463fa6283e156c344bc033a9e0b95970b22e2e5da4cd65f80899de4677135022100bddf4460f09e8173b1508835c3566e169e8f39cca38ab3abc95e15e8b1c99c8b 3046022100fa43b43b1d7729e1b6b56d7bfacf03c770323f1cfaf8535755d2dc33b7503e33022100c3e3ced04cd529e25ac076197b829f7e72f0e8c346c1d651a8f858b9e9c649ad 30450221009d37054da1bdf68b47c90e4f607fd9f1485585aced8b7af1b886155e721db582022038011e168448964ee80efd2082107c33388f89fceeec3117a80feef45266602a 304502210085b7cdb09a88368c434ead6f1118e6b3d751627107b1f70c1bcab405f7460f0802206cdf8ec9cd451c41b2f6b505a1b57b411606bd5e6d2f3d7f476a2f1d8234e6a6 304402206d71cbc12509bce9ba3a383eaae7c2556ea09f9a7f4bdad4c86278c4f337149f022018580021f7261d9ed0c89a26825c3ccf4a9a854089a44c1c763e61fee1969fde 304502206740d1081683df37f0beece95973bd9680bdef31fb4320ef7d8fbeba9933a52a02210083755c688b4c3eca7d9ffbdbe4a206d4e0d54fb5f2f05d7a8ff93b2d060479d9 304502204a6d2761b6dd21b53401d69758036894f5d7444bc666d3318cd8ba2b7f020511022100cb9d62b89d88fe310b1ab34c73857781b03d145adf2294ab90ac421e82d64d64 3045022042c1c2abba3ae628f1ef5842ab05573998c835751b33874c6a30c9b392b7f8db022100a1e6d1f20779992bf7499fd09da9e166aaf467c776939c0b42dd1105f2b32cc8 304502201b0a2af7f975f6ec15800318502893c78fc743774f62228b4a3e72fbeafb8083022100a8c5da7e9b74478edc959f7b9b466dfcc14db1767afe360e84a82653b77a7246 304602210093811f9e2d22fec09e8902bdf6d16514d0e85472681ba694ecc894548244091a0221008d94fb1214f302e42ef72e2ae8acb8f8b7c7890ff5367c04f99d75c27827aa36 3045022100a9abab317ed1de30e6d89da2eb181e6ecfde617efd9bef69913a8990773d41170220358f40bd13673db28e510144afb4f546f86275a692842a0db685658bfe45b6b1 3044022002dc07a8263556fbe6f7ca3b3aaa727897862d45faedb403b35ef14e0ba3321d02200eb94a5726aaaceb85072ece6b6b2ac056118e839ad34962a6a3a7c37f66bcab 3046022100ce684b70c92af1ac2a3e1ee2b29f05ba7a29812eadbc4898725e61d56e0890d9022100cb0498250b33b1ab92638ac94bbbd126adf95cc7acdfb5691ef79e3ccf934783 304502210083aaabd6c08217a64e5a846b7ffd4a10b6cf3535c3fd5a366d9f7bcb946fe27f022047b3cd63191de7883aa77d31df3c24344ff88912e31ead37de36f018d1fbde1f 3045022022f8cccadfdd0d18cf885f964334c184a23a3db2e433e55d92a87e2d4014df13022100918d7361f1bb1e64f8a0163f7cbf15c81a31a57956f707cf2d26be75b2476dcd 304502202545e5184f8da8f7fc6aec183cbb700c2d0152cd038239cb9c12973967712a7f022100e8c130c297718c1942c930406cf0c5aa0d2b1e7c6294d5f42c20eb0a3e5d2ba5 304502205237118399bfb7ef619dd1741ad201d674a430f49293b7e33a331e9065985b33022100e7b70f001278b4b563d6d4034bce421e49650019efc7e4eb8f295ed2a24bf709 30450220584b78a92e749c25b4df3856923720261408b061145881fb03e9c837e8e80d95022100a5cdaa93ddfac9d5e01f06204f0116d361ec27b2ecc27d26e8a630720b7f7dd9 30450220217ccafa12bf3971ed252fdb7094401f6f57937c2b273c6264270f33c33772a4022100a6f74cbd2da08e9c587a302e02fd8df104a8dd7f0e6534d9797c9842ac2a5f1f 3045022100b1581e4131da3e5a44fdb1340bc450702cf341aa45138e3bfdde30fc00e1993c02207ace4a2215db0b223fbbf1148ee59df79b555f0067a0bba4aa6fe12de9990dba 3045022100efb14cbbb106db790e5a401bd2e8d5e919cd53cf54649e9aff4339ed8ac6bbdb022006bc7476765f158619b97f8d60350627f8ad930e007ed27460e5672a4d6a91bf 3045022100e7db1d44a0d817b1bcf59ee7f4422b160ffac8da097447574caf228897dccb1702203badb86efd1f38ee2fe04fb382e4b6ddb01ccf5ff233b6d27c59776a41359050 3045022020c3f1151737fb22b9b065f7aa9348dca300286633957c2c5f8ba2d92bb0375d022100d9715e4e8d9de36c983f078a940a6641d75d70ae966140ff15ec2b2b41df29e4 304402206461e6263dc2af8613ff3321c60a133e7fde1ffebc19f953763bb5f000d2f1cb022057e94cedef67aae1a5e1f8b0294e8df49c49e1529381feece5484b106965295b 3045022100f5e02b36cc235b56e46d537e6bc312f1e6d96b7ee5f6d008aff99fce52e9029f02202b128ba2af415b9bb8315f915390d86fc5836db7e9d83260a8dd2c0d946b3e88 304502204420a6edb8a3c3e52c89591869c4324c040e13e22c4b0a1dea9785dcc1d6805302210088e36b9fa28e60eaa4cda589d6ff7feefd9f1c9561df3f0812a53bb3bb2c71ec 304502210099bec6ffd7a86b2b76f4d272061937a9da9c36cb20d9bd44f400559a16c219dc02205676fa8e0348647f23890f98d639c9e1450ef9e091325e2bd50b1cd4cf1e7057 3045022100e4c3f945cbaec2de79ae0b597bd94f53281aceb23a1d9e049a0a2aaa38117f9702203b8b1d89e3edd0463a89fe648f29dcb78b7c8b30a8c79dc1aeb4303a367410f3 3046022100c47f37b98108dd7efac7464e41062e8087cc5cca67ef12866d2d9b6b6a07f01102210081044a21731dac22858d58577d7e8cce8a720be2853a20e1c8c47ca920a2311d 304402205a6396793708de52335f53678f3c68cc495cb2e9ef327c3c90ab000cf4c332ea02202370b2ae5e824e762a8c7a5a366c8391d9abe19c51c8f7bc9643e2462ec6ec4e 304602210084744d6a9c6c397c56414a26cb93aee4fbd288fc43ddeb4ffc3d41a33994d99f0221009944752ac765cf79b037f6604e43ba03ff62b228942159069643711b9bcfd182 3046022100b1136686c718212373c20652978eb88814b9886d0888eca90d5d715e279d9110022100961e8d4de43fdb7d6dc00b43e657f139d57a5ff0aa2aa3e086a300e1ee99a68c 30450221008f70104776b6128be98579d8855b348b9bced26b9f1046ec14a2c0d1987cf9bd02200d02fb880a18ff2976ac85025dae446f0daea66307e11e1996854505011dc34b 3045022100a1838bbfd84087fd3f0f3f256a03f345acd995c4643484d88bab79f62b4a9450022065fdac1c462fab1f3f57129cc9362c0030e94283d4c75c7a0b01fa7afc69eea1 304402200291234d63ea1a42d9ad70b78bb575a9e2b08b420b769102228cdf2b5a2feb47022025ee722fe7a01582d74b9eb084d1d1187b9d82dfca59d0045eefa9bfcf9f3b01 3044022026b7f920e01e7e6d94691756bdea49c585bdf821f24cd4032a64a6a57906063a022036a7e71194e7076613dee379dcb98bcba6bcbc28dcd04645fa94360c797d3115 3044022076a91583699fc589a39bf948d4908a9a5baf8b9606f7cd3f3bf843b931fa66b502203d20bec6e269d49a1855fba48a1746385272501464aa6f49851bdb69c83dc90d 3045022100fd1e9c23c2be1ec28d9ac29809d2d60cc459bd7d77434500452aec1c7327772902201549c240eb17b2f3f4b0d26387747b91a81cfede9361224356559011dcf02cf3 3044022044650dcb4e83991e81176a523c91a1fe9d97f4017facbdec3dcebb6745c76924022015aa930e7ae760b02db97f83cb6ad4603adb3f5940a342c2f13ea9efb00b5e7c 3046022100d127d809dea1cd3e09eeafc85d8486681496c2c80378d0251ee6829a980c67cf022100a6a5ff5fe5e2c27899ada38f71f505970636255f63f05e933de82477d1104959 3044022008d1248ef9f7ea821939f74c569960e520dd9701bd509ea629fcebc0491c1c94022017a209b1f5785cb5fe2bc1cd16d81a4bf9d1a008b9a4294e5fbd3b74a1f2623f 304402203b611df01f3eccf5794c59141470c683a104d4e168d7088d3d74218b67326b0b02201ce3ba710a3141c646911de0ca0e29b01a808c2b76345aa3c75199ec6cb93af1 304402204011056951058c38c2fa3f82481b80142997369653a68f40e20c44004ff8f26e022041bce3d82c813756c71af14b36dcd86f3550aa7fdc638fa4e926d626bd59522a 3046022100bc39e03711fb074d9b6a0a11371d0868c17f69608524e7437b00fb8600edfc55022100b29bf2a3cecabc182fe4ef6104f92bf9e7c51ebb50d9fdc103e00bf9d2f06b7a 304402205fd7b0ba05757fbdcd9eb66ffdba71cb9af6f4e7394ee4076e131d92a7600059022045b012b3062ed9f2ec6e85ae9173ff25769b9c09e7c6ef628da999089f4814ee 304602210092ad6c3d923c2d4930bf1549da412f0cc9e61e8891f9df08849d9907a308bfbc022100a4d9ebfebd8004144a2e5921cc36eff3e624c33d0730665b458e65e84e79aaec 3045022100c69299dc79f4eae6163f9c45967efbe69e7dbfb0e8618dad6d62aef26c115de902202c347035f3d8eeddde77fa24b1fd25cab1764bc4799bf6a700e6c2e53ae164b0 304402202a5812e66cd37618afa6444765d655126400198419958949117aad0f981a116702207a19693510833f3dacf72c3e10e54d36973cfaed688d966e85f3998970cf3fcb 3044022049c8b64d9265a841e4d9cb566f01f29ad9d1147cf2f253ee17a7981f7e02f31f022045049f08eed9f8c76ec4954d21ea245934117d57755e924d1aaacf7fc53d55cb 3046022100b42efc23bfa786b20222d5d954da7d70da86d90099488e0feb346b296b2da3cf022100b2e952038a15fb0bf7de15afd475f61246bcd4d70d2caa2f36fd0fe5d21ff5d2 3044022035af39683bc31a7d2deab2f0ce34ae69b7d51280a48366e8d564437ded43458d022028542b8c85f11ee5e85de390ec06546612c0a7b2912ee069d3ef1d8ffd45e1c4 304402201e58d211d885b694f3ebffe4b59b1269f99ce097335ef739a730c31166d33ecd02207d04ee8ba094c7f7a55b24da05766c9d7630b299337c308acbf183684bf98b1f 3044022059382ad896d92090dda07ed8e646378b729669cfdd87081d808eabe3193338ca022034395e2b734bea93c7f0a3c0ecbe1ad4d10ff18b7e3f9472dece91ff556d729b 3046022100be12ffb2496531e1903616fe58ef2d460e65b8e7393411f91ce4a15b36dd4bea02210080d6b180aadcf74af3d9248928dbb79e1522bd4a1f971db10c87aceda048532b 3045022100de6c35fe375f6abb2160b98a8247a0223ab5055840cfacb8565275122a628cca02206914b82ea4e1458379a4000b910823b20c78152ff092dd23c8613b2cbbad080e 3045022100a1c33acd07da2d95e8a5af713f80f63746fc14153a862deb215dbb6b2080b12402204038832789183c20d9184f77b9125d0a81e68ab30ba268a0967bb9ffb12d80be 3046022100ce433511c1c21df9b93d781c5d7258a0b9b3817513afa72780738c74fc709bb1022100e9ef6277458e4149eb86e83548cc77cdaabd42526b0be0f00ade913c3dae07cc 3045022100ca024270a2b059acf506fba5d49c7bfb00dd8c49c18e550513f06553d0489ccd022034b93f13de8beffc889ea3f2be947226b0234690df80dd0853ac10efd93e018e 304402203fe7d52f8ba1cd23b30ec2f3bd5775c3a28fee81bef9c3a6283b48447a6fa06f02202e77f0ba762ba6e3c564b6cc88be89e3026fc29a82c40f6e7b2243aa48033095 3045022100c251705a4f0c4643cdb7201de1115287148c080855832b41935a00067d950d4f022027982502f9161e6983a02a760482f5c4731e40b972f71abde8adc813ff4c1af4 3046022100c16a19cc6c9ec3c68e05994bbe21e23b7ea763508494270df7f66031495f22c6022100b3e53faff7843ce7b639677d62205c06fcd280b92da9c3a517068e9e9ad97112 3046022100ab25ec745ce7cc6498f79159dd2981c7cd70c6990b53f0c4844159e43f0fb490022100ec99cbe6cb4487f1a6c842ba03933ac5ef0ea7c29a6908ffbf41e57221587d8f 30460221009794907e4157ccfce3649df2a59e2a45f491c7edf8c19b7b4563203525f18a97022100950c80c884f836e9c43aefda8efab86895360a623a2d1142521e08929eadd78d 3046022100c1f898e8a9eca6bbe3152b74761a170110f1de9a9a94de9ee73268d8ec628de60221009f34a2371a562e90284990ed4d87c2ba87bca93b4585d7cd447ca486a7b0b9e2 304602210083e0120094a1161e4e2e70f48be63a5ec609840937e1faf9472fea791560623c022100da7c1a7ba5b42d3e90093be9eb3d5002233af237ca8268206e713a13f03d8d4f 304502206d753fb2561a7a9db2979836b20374bbaed1fe62a307b6e5aa81b110ed73396b022100daeef310b3626dce709a2bd6b8c727623650afd4a51e2f72963fb8bec758c19f 3046022100caddb045716a49d0155dbd82b83782bdae284f34e3f076bb760c2aa3289fa72c022100da53e98d60a09fd73f670f8eed869e02c2e04190a51093fdf9c7dcdd9ac89fa7 304402207212b2db76b46e6c5fb5e81ac0235ba82e1488b8e3c547890c9eb4512d6e492602207a3432ceb1244fda2cf498c40cf8b163d25cbe5189b055f527fdf2ab36169020 3046022100a758f6d2351141c343a75a3f2521895b16562ba28e761489e94ae8b263eabba9022100bc563c70555fa0b5f2725f30089541f189773e90cad560acb40743b99078250a 3045022034be50028f67df538a124836f188f0b5f9b61feeeaa90548847ab4ff9e086461022100bbace9848ffd5edf2296c8ffa7b091c92c8c53348ba0d28530d2fb4b2fe76c7b 304402202ee378a1c6182c9e882b9cb33b042ec799b724cb5da7e1df26bdb120d26753b302200afe8a3ac73590725a9fc513b54948f0e811383e5f39c529895ac7f44b1de67c 30440220351c452ba3c8dc3252ed0fbc1138c3880efd62705bdd98f6236d160fd7964f400220105e944788fa4bc8f7dc72b8e41ecd54b874d31dfcf802f876ae3dbcef5c8ee2 3046022100e8e87d7526447a99aed1d4cf51da83fa6416e66e305185db00a75c84d62ece700221008587c29992e52661e635d7321c0fabb3be8e76c664ed2309577a12d68b95198d 3046022100a7ea76b9f1a15c40c830579a47e766be37d702942dbb492f09462c0eb22b5ca20221008d63aaeb774d25007a7ea1868d9040cdebffe2f350f8e418262ea77532a78345 30440220202098007b7900d6fb3f2ec6c1974dc210c38d809fff3d320ef2ab6f8e7b4e6f02201f164bd98bfefeeea1529abdff2f33fc8150d59a26e7f52b1904b29c68f5a7c9 3046022100a50af4d7e18a97e5f8cee228b44d6600ee92ff4b5552ede2adfbb431c64fa572022100963de9b6ffb87b8cfaa2737b0296f7296e9ed9cf2015565603ef38d4247432c8 30450220153c2b498b526147660c992b370250912a4800ad4c49f7f935c2814a94c28a92022100c352bf6838b665a381d433831ce1f578b9d43c77afdd0476fabaab397a5e9fc2 3046022100b6063aef0bf79adbca8a4346f12f5e066bbe5bb02a57fedd199c97e9268a2534022100e17aee776c0a32ee609c24430d0bfe857054dc3d7b2c43e092f0783ad98b0904 304402207fb82e22ed9ab82b55cfe16aeae73342ed1c7071670b0e544bc54df5e52dfb6602203431a1dd5d14de0e09770295bd83362c19cbfed72adeb449ad7b86e36a08ee35 3045022100f78e80ff462e338d97dbf48aa771f7b211517b6483b718e13425b2ef16a310af022010c7611e3b9c580341bbdd6eb0a235d1583bb6533bae84ebe13b98c3c5b23ab9 30450220723421da99d286061c5acdf41f8cc3b0e264121b88e0d83cd88bffc02e150077022100d558a95b14c13d70984f4ceed787eb6f956839554cf85ec659f61ee520ef1201 304502204aa44f2b3c2cb34f9221ba320e6101b895e5e64c56c5699b8e687a3841cc28be022100ef8c30ba97a7c4f3037532963e36f9ff88a5b1675dc4ac0e2cb69a5400b70743 3045022100a05a968ba3bb53d4be556b50b1c76d962c41bfef662092fe51736e2e0470ba890220174a9d25992541daff089ac7701779c7672d6c16d8f17d4b68dd54da1d720a0e 304502206036e0bbd741f5950f06d23dd7f8ce2ffe22393216993d6f1c08e6da0851ee4b022100e7606c870a2eca5a64d61641000514c91103f35473a6a448b02938a43273607a 3046022100eecb907b9685bb7aa8f31e94e93bd591a3c942503a7393b662096c68ba2a2d2e022100acbc6dbee0e6e683bf8388cbb4db72fa871f66b515f4a4b34e23a7dc986fb6b8 3046022100ea51b546aaa0b97caeb1bd504b756eb443d637e6f2eaa7af3d8bb99fc5bb43e0022100d57136f3f153d34590a8529b63629c2fa55580862ef7fc42fd29a9ce0e89cbff 30440220376f042179f84b3ebf49060ce3a962301ab79c79795140ed3802852a897cb5c402207fc4c97ef4cb6716e266952d6f97d361a4c02451a35c11fc532d35179ad422ab 304502210081e8e3528b209761c4dd7579dc4cb4c922093f8329c4f8cec8218b0870c2aa3402207d2681954fdd814e97b628512fe640203be4cc9bf74d66ea42c540ed77a3ef52 30450220149f1679f22f73743bb154ba11837f4c652e7bdd2bd53d85c03cabe1c2cf17c6022100d26b8f98c561258ebec3ec64cfe618e82f71a965b53a9338e56a8319ad984c7d 3045022100faa5e1f6f390f07782ec74372ef22b162131e52f48a0cae6b8ccce30dbe50a000220594c7681554f96b62bf523cb9b69d01e32415a2f8d15171a4078dbc7a1864112 304502200c3ec0a5970599c4e160295af2bda9a04b9bc923ec7362b80d44cc3164a2b717022100cb5f1d5dc34ae1294d5f26c5139c7094eca175a61362a87be5f6762b612cdff4 30450220526220acf2d0dd038949c721b08818ec1cf47938fadd39ab4e723f46e75a68df022100bfd28cdecd0b65619e6f9db24bc874e84686fea82bb0b5c2c5072c32d156d924 3045022100afdb81295510c639192647155dc2455a1e45dacb25e01e0aa39706ac05c26ea5022008a70a41d6ac15d68180cbdeef0d601951ac6622771ae44429bd2ebb70f9184b 304402200d6bdd1fb5fc26c1d0e088febc2bec258047500483da8d465d818201ba82c819022014142d220237aab6fdec3675f0323abe16cd9ca464c4389a6572b4ad05e05b0c 3045022100d74129a0d85f6ed707ee7f65455398598aab3c09c58b4f5ff5aae404414b273902207aaea72a5622d54c35329c0eed8069de3cb9a1204e98fd281ab5a9d466b93095 30450221008840582c7ebbba833250406d9c0dcfa3f44759ef85536071bd2e049e659dc08b022031d8fea17423281c121d264cf92a76a91a31b7454b3b6a01ce931670d7e75476 3046022100ba797877987e59536f1a166136a85af69b27b5b173f57df6c4d2b333069f02ed0221009ea7dc485b40dda584f9b730bf898d5c3b1ec57676867b48f8a3313ce6b30d04 30450221009c05152c6e1c1a3ecc218a5a42e1fb07aa92f3f8de636932bf8336d30a3818430220083d893af59ffea54dd7176ddd6bd796ba999c5b3d7e71ed85378ddb7d645ae7 30450220774dc65cb7fdfdd9818cd0c22474c8e07a56a738ccdeb22057ec0ce7c29f51ed022100eefdba0aa7400779168b74b6a62ed63a2a5e8ddbaaefb196d156d63d778175ff 3044022069ba743a76bca527aaeb3c9643b9df9fc6fd29a4e26d42e6d12e029f2e3dbd7b02201e287e5c35b958d549d34fc33c69aac2ca065fd94ca5d74fa04710182c96fe62 30450220793f59cb938639824b19144f3971814c0ad3168835af92a5cc8ec8dfc5be98cf022100ab4a11e211e2bb73ffaf75e1275570646e138caa30c3ee5c18a06c1b74812140 304402202c36a65f000e68179f3607b6f5f6359ab83a79697987e2ee21250dad326c9394022043acb8a02b524cd0bafb83f3db42aed408b4f2f2fe530633dc86e273a90c2fcb 3044022068f13694b6d27bc67ddbd234915f16859e7261928c792327fc65341427d4acbd022065cc9463b9179630d30b80393033cf08ffa8c7d73db29a376207eab717ce8d2f 3045022100f110ef0fc748f4b4aa8dc430415d0ddc14ca24916009edbde3084212eddeb558022059a8d7b9cc1678804981af94ceeac12879b1ca24f833ee357d5447e4b6f4356c 304402203bf11cc7a561cabf0572a1a3388a306d2385ddd3dd5be0df2607c3bb2afdbd0702206bff7c01d09d0be565b9d24bba22e0d6081571a504f269d5e0d7713cd093914a 30460221008804bd4abe1d1d0141a3f23555236a0bc0cab3f7fae2256c95525f7b58eb6c92022100d9fb423af443e282efae36be85505d5354d68ee2d96e9a89cffed703413b1514 304502202d7ec00473676ba7c685c17feac10b3768dd905528765df57d061bc990bc008b0221009935ad34907e74cdaab5f974c82c6a21400d3a9102da14e9b4927f1313e3df11 3046022100f3e13fd8f23aa79d27b71f64f7709fa3b0ee349d0523959dbec4fba25374dd78022100f2269ff5d9c3a5af2983588e008054aaff6755a124de80d10dd9dd4997f81ccb 3046022100fbe088df7baea638ccf10ff3513bd9927eb7730b5bf7a6b5d9a199986cc8dc92022100d8b3c097ba88639466ab07c20af2ae2ccdbe89c284edb2fdc9a195d1f490f528 304402200c592d7b4218138f7bb5e813df2b382d7236e048504da098d601042c4c3bcce0022078239c1fd20ae10c3e66058b47ad46f0c8c5237da79fc8dbd7a88b4ff90b1e57 30450220047bebf5dbfbaf16e5b96e67f22aaba077235cf4cc9ea37dee2cb05e06d359db022100bfdeb3ffdccfa54ca6785bb7ef046a0cccbba13ea998381c7c0ae0ec5c428e56 304402207189605025688bfe71578a2cef97d15930ae354a8fe71d849d4e54f92383346902204cb929770b78c6f18a71080a0d8f8966c8c124ad745c1f1ce8e8b8368fb6d0ff 304502210097aae92d7d92e29fe1a5662895e5627894490575441bb50ef7f9a2e638078d96022024063cb1576364d5918ac4620c83131c8a72315554f21e751e8028bbbb054603 30450220153daf721d52db24c770869fb942116496d329ac1a7950713f999ce174078aea022100b428ee9a8c039f1aace98dc9800b4ab9d4bb6129757f83139b232e3a226446fa 3046022100a0733b67fbc5c5a6cf2f7059baae7a9942876df4e844139a7b0861e731302d5d022100f5a70aeabefa7dced7353863df744ed116cc72d9f5e583030fa17418e5fe8080 304402204f5d2843a3cfdbded0cd0ed34f415c751fe4c78cbbc22421acc28a631452b4e60220339fc5998fe1e60156c9f3d5df26463d72f1150b59731d093cac1d22bd5b73ea 304502205a3202b5228b2550f43547c9c031f316a1025009193a3b6cdd0da412069c3c30022100f80fbde3793e79ff79cabcc7d623c15302547ff7b776c41533b7dd550af58b10 3046022100dea2cf2d26d342d6d423735d0121e5c1ed71854ec6337cc5ad440e09a7a5d7f1022100fff1b769015a9b66df6768d908a5d735b0bcd9de1baf52255d752877f8547e8e 304502205b502ea2b520737ce0e68ecd6ea08ad1ad62245c89043c06912c649b803d4db70221009fdb318152bff2c207731a880a66e7aa4555ca4247c3dc08698712bbe6230222 3044022055a4e5b285c67d8b63502c644a15aafd36cf86e641537f56b8a4c95ea5058a870220551508c27148def0a8d525eaa73d8ed4af3d57ff095d1d16299317fa827090d5 3044022022fe5390ae6dd4e71e3f5389b982a57d6bfed4bc2c07b709ee10ea130664987202201d4d319ebf4d2941912cb72aa0094c131b07c790e8c2e5134acffe2b970d46b4 3045022100f24e01aea9159635247d965f300fc0faf1780a027d84950c8470e0f74ec6a12c022060bde2e28d3de27dbbce3fe51417fdc3d649373aa59e9be0d5f56054c949cced 3044022074b5d0dc5bfb0b166345fb7f44d41b01858da43c8e9a3f75ba927153ea7cad7b022038b1fdfd0f8c65aa7144759ec03d3019d0a2ef24d709d7405e05c98556baf6c8 304502204df6539fe5938f7ea95d3763dd2f38cad36d99bda2b9cb512ac3207762658d9e022100dac94ea37047fcf40c579c199d6a31eb8a8da49cd6e02d7b740ff52b463e259d 3044022022c7d4215bf6a27407f2bd08eed1ae2d1ef79ca1111dab59213f9db6ad35c8d302205c6d18a7fbfdc5b5417cc4748e9b8473a14b627370e36a91a5f77f40a4b181b6 3045022100e2703af91e6fe3924370dd3797c8202dd88cc2a75b6b01c6a3fd7d1e8704f2e30220719066faca9eb0d2e0c2fcb3d9f57fd164955e30168bb49d2f8c440d51e14d77 304502202016f46ad2d70123b1a79f0bd6f428fee478efe2b244941e1f3d53abc0ca4815022100964a232ed3573ed7c959214897ac67e88be32fe2af6fcc03a9b159855acff86b 30450220523960df508671c860e3dbc19912342c0129d8f2723f94e971a22a8b5b9b3aa4022100a8c011301842f7f221b40a7c31998977ca8d3058c13730d325386d5032d56516 3045022100ce74f6531aeaaa5d3e1e6dcb3c2b25cf89262956a64bc0e0e3940f8a9eb5eed102207233c66ce82ac3b6cfd6f950c03562a43a0800e6796f7ed252b87d67fba4727b 3045022100a2d3b7802aec6785d7009f76f98c73a30cfafe6e71957cce5da12ef500bde71002203f3ad94a3ecf36683e7e461445103d2243e6db9b1a3e182f533ba65267a2262f 3046022100dbc935a3cefb37e4dee5dce30dbe81092766612d6c1e24732d43cd7f1e99b3c0022100ef45a94212205a5c4ce2b292d9c407781cde3f7fbac0b2bbed39fbcc72acac26 3044022059ecc340bc3218f585fbc85a5341a8ccd407050a419347ce41380b5ba8e8e8f2022065947794da3dfab33b6134a3840f9a3fb623dc86d9d8cb01e0ab5581459864b9 3045022059ca4d9022c05a383c1cdb0d91343e40f657462db87395676cb9a63b70906e080221008bf9d0846da0f344b4edbbd5333e1d1b3604b5ee8454f3af3d712073c0efcaa4 3046022100dc5cc7b9e6797ccadf8d4bdfcf49e188284cd8a664354d2f30a5576bbe14840a0221009b076d7e90816b1d7b1eac399c95e6011ab0a7b52a40f0fb6aa19f89b20191d9 304402202eb87d36b6ed292bbb96b2249e09ed43536bf5d937a7159675f59f664acf4e8a02203e385ea7419d79994a4d067ba74cdd01083f6758586021237a887641cf2c6ebd 3045022100b604ef707f1e204ccf1b438f553f8c65624632d6b2bf0edc9a0bd999756a5ce40220136074dab6b27fd5a4b3ea5726bf570497ae1a47aa1446bcd2ce503d823003d0 3044022071d9c3924582f9fbf5c9cce562d44bd4fc6a8dda5f8bcb14de67c056a0e6c9940220474717137acc65e27882a6c8b4bae3e2c103172144cef342fa7499a29bb56dc4 3045022100ad5ea02a6e72c5aa5bf19c877a5c6ecb36a38f8d0933cd929ea02606d3e1016a02204da2b475de3e2ad8833d65c2cef1c844d06771930521532840fd047352f24633 3045022100b494daf25acc648a8ba909c1ba0cca88e1942844e39c045d5b40e16ed929a23a02206cdbb1606ca03eba7916985ce4c404b77dae2d33fb18bcc6077dfb2bbd74da22 3046022100aae1ab9f5c92728cd21205281d2f245d9895ffb3a696225dfc59fedbe15306c0022100c6b101c179cd750c71919d8e3fbde6ec4258cf972e8731010a9f917b3edf2577 3046022100fc63e419602c9a6fee8a3504feab0fe30755e1fad776c6c072392cec6c380827022100e5bec5040d1d72674f184e4fa2b1732c9d873499dd6952fa5e9cbf67f75287d7 30450220164ec6ad10f014041f7b40d26f3a73c1b47ea5ee641d07146ec0e74bdbc1a135022100edb27c5d455b1056682a403dd8bc754051fe62a7eac80c047b4cdb37bdb76257 3045022100ca2fd6b7e39d5812ad83282281b22551c8217e00ca1686ed11461d8a6c55b0a102202695984d1505110e2e425fc622652c26bcfcc87cdd3dc36b933112ea1e5a9a42 304402204f20d33dbd67d98397b0c27e4d452f567cf4f5e752677a6638f243b4843a92f3022074b0f6b09fef87e6d2209233511f13ae0a5f5587fc2edb9d9cf0729a50bf1202 30450220389de2b23ee2034b7b5ea40f6b2f0a68a9bfedd2e4ad0fc4061bb1fb11bf8ee30221008f54cda289c6ddaa1b8e014fa5af02fc3d2bbee3ff5e7a03504a5b7c809db0f8 30450221009b68adb32e9f4ef021316ab2e6bdb987f56300e0149359ba246b3e70acf5901b02203a62e7474366788b8fab7202bffe18c1b81f6b4990ee7b61c841eec4f1c10f90 30450221008b24ec1a381edf9fd9daf7886cd5f66a101cfbca55f4e160cc4e077f7c80a36102201d73b83d93201e7e83bc7ef766ed08a22272e90bc0c639a42b6e6a293d81fafa 304402207dad2a7b29b7228034eea1b9e1666666d52b4028dc83f0ee5a90afade176795902205da4524dea2e4614c96f9cf66b60d961e6f5e0e3ac11467800313800a9ea5d0c 3044022039703f5604636c6845f964514774664a1b2932aa675d1de8293b52741179401f022066884d9dba3c33c3a2df92d00ccb66732c739fdc1f3483cc5f3b3ed0270fbd2d 3046022100dad0cfdc0110af57bf4fbf651bb77a76c44967fce83189ddc7923d325cb3abcc022100e9cf87befcdec6f0e06625956a2d4ecde646a08a2b0769dc4ce3e3a32d4382bb 3044022012c335a4d3aa208f7426ffa6fbf2b38919e6b9a6add51a21fb12c35247d41bda0220181983fbe636299e3833a820844d6e9bd45c2919baf70eeff934c70d587f93ae 3046022100ea0c6317be0818cebf15d3199a15a20eff1c743317994510f102eaa564e350da022100bb7dd29dbbae78aa0b59428b44ba3f0de927103a19aa4d22e3125999b7f35caf 3046022100b80aa4bd8de2bb3ee03f24219d846e42604cd6511cafdd0d9d145f5994567348022100d4a128bb40d75c32dd949fbb5ee53ef0ec12417cb7bae14794b57570687964aa 30440220226068086f71d507931b21ee010646ffb2cf1a24059ac7858b922fd274de321802203f9e5ce911688c39ea0ca4effb3cb5bbbc75e15aacf367e4f69d02a34a71313f 3045022051fb83e284b5e509808b37d63ba30a83d981e9bad30e0d52a1c9c5c6e65dab2a022100b9de4c606273de041332ea80bb3b2d810c3906cbdf774cac3d2f3ecb010111c0 3046022100a926330c8dc9be2e19440f134966d656571f875cabdd4246f72d1c03f7e3b6510221009d8592f163deee00a86e7eb9a494f7af584d9c7b50d47a47afde99bcca8a64f5 30440220683d67caf6141ad5e6b0cc7282f97030f11c6c28d199fff57f2a9a652036152e02203989234a0cecaacd72e3102e0a70823d001a6da0a730782afe841bfdb4e9462b 3044022074941110f813822fc2ed6f963b89759c916c43d81206fbbbbe1825f46c2c675a022028d8896897a42a48b720027a9a88b251dc2c93b84a0a6ae21b1f6fdfa741ab25 3044022021f51a2349d300fc0bc1d151d84ce747a31794ba152b141f5a19aaab982059b50220740587781ea27d2895897e5ad744a6513e23964f51c1ad1112bdf1b5395acae7 304502207368e7c988b631203a7254e1ee1fa6631ecaf75209a581c4ed995e4d126e7cb2022100de8b72e7ffa13e7a413d37dc70b6281c327a5e3c2046fb6cf3f616d108bcf7e7 30460221009668ccd2a0f29837057596a68c6d047c43d359a704f3207774ba339974a4cb800221008cf2de4b46c0a59a5872ed8fb316e9c0871db50bbf66df8f08ff474ea1392573 3045022100c53d2d048324f0599a04f6ba0f322786a8dab2b6475ce6ccce19dd5fe389f76602206f91d0a1f7d9a70a0dd97a24ef2a51be5d79ecc6b680eaabc016e3241d291197 304602210096ca46f3a10d13a003d603b3ba70e25f4c9de240287fa3f41cb51a60be28fdec022100aca44bd08222cfac3345555f4b37def74ebfe504f7287156020b59fc8dbabe4b 3044022004ce912c468d95cd63ce7b02037310e891de68c106d3c26bb05c979f9aac50d202202db6e3cbd25ceadf7a9734e623b80ce4a7f74fa4a559d3028352e23a0ff19661 3045022100b92549ea7a44e5b6525c0fd39f3e33005deecae20b1592e721d223c94b38dd2b022026a9e79010dc5aadd7c881a8b55b44ee947fc51c7e3d7351c72311bf881a9a21 3045022100f9dc4ef02b92c5fac31a193b08c4a22aa25885c1129e0530f193a1c97ad6faa702202f553434fcc11fc5cb1d5045757230d335056c36cdfcb9be5e315fc058886368 30440220659541174800d9cc73d97b03875205f3cf1d763b3a9b4a385dbacdbd520f344802206fc83de653da1e31d4802585336888207fb3b51b423436a87d704d4309b1ff41 304402200a626f20bd6baf1af31e65bec67ada91099b10697377c578f394529757fb4f9a02204b4ed552329807904b08bd2e7331ef62999dff53312a5858976c78bd376af109 30460221009c027fbee5abc4b54fbf54a66d55cf03dacf9b772659ec4bc829cc14b9c594a50221008e051bc6d81f5e24803765f4c79441cb3e9649905e39aff68c93accc163ccb58 3045022100be442ab4fd4c99c9066f45d9bb6dad881b15869e7f0d6b3db48685f8db56e505022001361d6988e5ae4f25535d43648d0f6d7bd6de42437154bb17e282c72b944517 3045022100d217558539cd6fcf6e4b41507b836810cf946ca3fab26ad4535a8c0a50da402402204c9272f1361aae9c3ffedd2a03ca7aed04de8fb9d8a99b05b8a417c749af86ce 304502207235d50735b2258322bd64004324307985bae61532d051a2cd6ae34745435905022100d3fedb58e09165136b1b9dc47c2164930c848cdd0cda99428260ae25a8f674ad 304402205fb9e73d479258b8c0f05ac56e90659bac5f052ba0b9c2abbb7f0da1aca8ba9a0220652c8f319dc851cc638fd6e7db210b302960d0e1812a35f27b780011aff28cf3 3046022100d1b020e6fa9db87eb79b6cd3bc4aeb878934f28c3534f53c8c9ade5fc638f3c3022100ad03962fbbb666f95ff02545db84560287bfff7bd6fc4e815ff5b70c9c9b80c7 30460221008d9575dda160464f3ea8e9d4b12cbb5f9416822bc9c5f88730c00e0913107d7d022100f5d868a899ccdf2076941790b87f62a405dbbd5837a3ee3e164edebbe013a7e8 304502210099c5e460ee36a59f76002cc44f17fe96079793ce41d444a128639e1de62c50ff02203d1ffef4a32adc5c872beb3a4cfde5402d640ac161a818e2dfeb6265589fd337 304602210099d6df91139bc6d9877a8309ece6c7792b035c42cd32775f7a9c042c16ee8705022100a225419de4d9ede5030916e59c1fce11cf97559283199737594e89cc00a0053e 30440220754a8b182484a63069d5cd84573f375afd51f9963f967365b96ca350c7f63a8b02202e3cd6498daa22dff956ae2cfcf23caa921cc339de3346bcef385874cc103a26 304402203d65a32ed2e42165f32bceda7508feb7878ba2e65a7316d50b72f2f2d007d29202201a42d2f8627fb094fb9fbac4e26a6c485bcf3546ac3141f916d10c0b5cc7dd48 304402207765e96679c4110a3e2c24e6a90afaf2d5d9064a2def025e9429d86e48ebeee10220422b7a8389ed4770cd4229f3abcc7a256f4c082dc01f349ac92b9afaeaafd9d5 3045022100931614bbea9768637004a929d95013a1fc413207ab952b5c69475d3c8322c2d902201611499c333646b5a35f3638d01f6db1902d93b64e8d6c773ad88e568abd04a2 3045022047f2de00f4e2444bd30297223d6b19f4370de15495150000fc9949ff0cf014b4022100db4b35c4672e9a4888bd1074f50b9725105b7c7ef8191d3b92eebdd485e50a98 3045022100e97041d469783f375debcde86e9e8949fad199ef06b2047e61866a9287d24345022034b4c9c6610ebbd68aace7dc8574260ee2f26daa3afd98051d1b9f76a0687c7c 304502207035093bab3387ae4b86804b5a30c329c6d8ea2b55ffd095439a1c5a0a92fc6f022100c57ac12860df9c56d8dad5ee763393ee685f687a546ae7e606c900de6aec0946 30440220595019090f882f15888e3af9a6e34f1c205212bf9ea03b88cc77b9b77c406b900220101f1656aef33945b2408019f45126f228a7a331acafad3bf0f0d917d14fdf2f 304402201d4683ec21d49a11dbe613475cca0b7df20d9d89f46a6d27a8439c630ce9016c02201db85d1a8a87dc13cfff025aa7feb8d07e6dd05e4ae0995343ab866d43163a97 3046022100ee315dd68984fef5d8ff2215f44ee52f6c5b78e078913f7b9777abc595cf0e88022100dd8e91ec6e4542313738d324e4e5b0e3b120cb2a860cdc8e17c73c1b263099e5 3044022007628c11505c7443e74241a73a7048fb8c000b9c1718422d843e7913455b40c002202e8113273d95a4645a3cff96543df0a15160cf5aab1b23331edfe8937820f89d 304502207a2e0df5af623707e20cf43ccc250e429b2fa2cfe3137bf2c76732656a920704022100a7658a7d38172a34faed5ca966da082ae00f7c84a4546cc222a7da915235e397 3046022100cb0f084a3c2b97898f3be22404998d4eb1a7a9af8f0c60fca50bd05396e94d440221008f1e46a709697c395fcd54c724ed8f3185abb4ab7eedf1fa8abb6c1711a1e817 3045022026671f501fd077a966d485456c35d1dbece9e208c9be8bdff1b6112f92fa4b9c0221008b8b453d2e21e646220558515df5ea96a8f1cb257ce714487c868083826850f8 304402201b884bf13cf837e3db7577621ad033c0a3753cd6c35c33959889c4bd9fe0172202207acd1836c12fa0a62f9c2bd1d85cc331cbc5ba74155cb6b84fc0d6ed616e5382 3045022100855d17333f243eab00b0240f2ba70100d3cc2d7c9b9ca460f2578fd5f5bd619f02201b9425128fdfde4750706c8d795ae1e3cebff3655d24e97323928cf3e08542ef 3045022014d6ccdcbef2f2be62ca1d2fbb049d31c885a3fc6c93c6a8f3a57ddba84f4346022100c75928c029b6ee948f415cfe23bffaaabc5dc8d900802e6288ec4abd9d97b61c 3045022100d95a037ecb99d010cbd946d552e47d80d79230e0a3c22167070e0827b9b4fa470220541a7159f6dc64c77c1ef8eb9113b5ac0d5ba055d3f3433bae4ea8e1de620544 3046022100cd9a665f96946570535976492d217ec16c03cf056a8d320ca7713591f35e89a8022100e82ce7b0c697e6479343291bf0eb60d7a1cc75c4e734b39c22c98f9232da62cf 30440220406792651de8c64ec89f84b899b1ba613d578b5fdb1ecb4cb048fd6a0f750ce602206a3d0adf73f5f99b59fba9132323fd0b9786c9bf61d5af1f3f5d49d00dc20f26 3045022100e933b1392818c2ee6b08d5f5fe58b9c7f1a53713f022bf95d121648502e3edda02206570e24116809f48162a8f98c4595b222ee5513cb05ae393b06bec039eac1763 30450220457240b85b09ada6c5651ed2660caa2507f33c5a60b7588929eff6fdd5b6c254022100f2b1a12dfb2740ec41dae689921803532f682062f746c94e4bbf7d00d42619d9 304302200432f7f11791ce4f4c8fc29c3164428c86190053a415d53efd50becc5a9dc56d021f0d860dc95f3b5f494191fc1e583922bd986d29f6379efecb370962a939d653 3045022100c037e9c60a01d511ea91e0fa1bcbaca2d7f2ad4c47c06b04fbe45a2c010354540220042bb3a074cad3a453cc1030b3c338ce640e5184cf4d83d9bef3208fb6e44716 3046022100861c61814fa05809411eabcdb441ab89a506529fdce6c7922d631966448c1882022100de6324869d8a41def0a087c05e3375df7b7daddba2221273274572415759dad4 3045022018bb19e03f94fa4b0e2be313184fefffbf14af92a665451f784bc2a629450364022100f1d4b5391aa59d745fea547fc99076f43a4b442c1d9903607bf073d1305450ac 304402202dafb214dd2a7af1291a8768633253673dda23dc418ba547d362b2c234b01fcd02207a34a704618151b19bf962ed6bb5746a602a6306897c8f61c868cb712253af62 3046022100bad10335cafeae4c9f5adcf339d4f5e85b8512bbf02218568191cd914a9dabd002210098ed56c5902369460440ea932b2cca165520168d08148c6a703fd1c50c6c9cbd 304402201499033034e62eecbefa6e79199c3c7fbc5a283d72dfe6f873d379073b40a5bf02203177a9db1ea4295f17cb37b5effdfb2e30ee9eaf35bc63a92e1a478e85b3c211 3046022100a653993e72d3b078b14342b4c134b8444734f97b36259dff897ad7b32cdfb150022100cfc20d427a8460ea27daea5f6f8269965674cd579f165bd2ae4a5b5b4d53fd5e 3046022100e4c4f840ff3e4d224e222ee3cb375f8d33d82b4c59b7fec01a65008213ca52350221008deee0bfd5ed1ab0a6f3b32b4821223e25b174250ce1933416e8146bf20f090a 3045022100f4d3f620584e58f59cfdcae13af303c0f58647259683d3a8cf5cdb5a49b2b112022065417d800c85678e386025d80e1a886e8dfaf99357f223b060b9917203efda73 3045022100a3abc65fd7d37e8ef2ec71ce8a4d300257ceaf953af40ef5de078212697b800a022065651a96f6120ec14c9301359aad594802f6b45a2c808ea04432506ccc002468 304502202f31e48f24edd70b690e6d8bb65a798ffc1decf4526c332ff6bba2b927fafc1a02210096a557b73d2eed199a799ca558faf7705934ecb599aa587fdc3721c8d095630b 3045022100e1ed8905825ca2f961256180c74428f2f345d44b21cdebead6fcd987cc42c53202203803747b1ba7fde8f1efe167777c90c479f42c20b72faf5f37e704f8fb5ae3a3 3046022100d7e85853bd45a8efdcdae74af740b4574eb864045ae3ff1b67924a9a28d03a7c022100f62d95a9839a066adf71c1d9af70c036a7c436cfacda716e86b9d1cd1481e9f8 304502204125acfd98b9f007bb9196e42f4d88f8a5a0143007ccb2d00bc4ad1523e39d3b022100b316bef5308d3907de01715f9571da40dfafc081729e65cd5d762626aa76613a 30450221008e4261f0f2d19fe63f9b5090762d597d2d77ac9fe8fe9ec351750271e5f43915022039eeaf6d1c926c2bbe478ca7dc65862e345f9f6b4dc5bd9d5a96c554c1933320 30440220190ec54a0611de5e55bde635eaf9de4cfb9777e7fad3e5cb1088e26f8f49fdb302202b57355e1f271c9d584e2ce7c54e4b8b7d79d4db31ee56015cf200f694ea9410 3045022100968061f7c24c9aa15f6abe085225f827571b3acb9a0d5c1abe483445333d670a022011b226af24b01b00fc2851db6379aa33c6463272db30a836ba8f8d13146ddf98 3046022100e22b3cb6fdb4d934a510d6e01a83272636f9792c31c168848c41acc14763dbd00221009bbb46dca71af93e9b777bb46baeec8eb09aec87dce4c4cb7a7b03f3fb78d18d 3046022100e077543f5fc42180f8e7fb8f6724ba625f1a10c0875bb0b153abb9fe7cffff4a022100b937c9260f8f87dac6823117248b9eb28de71902ffdcb26e4719113801962782 3045022100960d2bab133e3a987a9f486031355e595b53c75dad29454fc6c8aa5d8f56dce00220091098a657c2c770b960a5f6eeb5c1068bca6834df3a779bac64f97fec687ad8 3045022100cafb7a2c73ef4fc845004d0f1d8fe024abbb870f328d370e889a133860cd36d2022004ef517ea85ff2eb2e3c628862ecb3c84c47ee0746beb98285886151f6765139 304502207b33848aea5609298ba598615c0b4a491b4a1a5919a2b3b99fd681e06f9a2ed1022100bd02411850c4fde99d8e92c88dca66e6a0ed5e7e6d4fdf2d7cbe735d7d3c6b6c 3046022100c8403e0498bfef9ddd8bc0b7ad2d4a5561cb4e2ac293fce959b8c1160e23b287022100fc21b3f264935c1a79604f888935cc66b9f72e5ef5d91e2c390bc145436a10c7 3044022069e5356bf280c5cc1a2c00b08566350511ec6699ce4162e1256c6f77d879e57f022044b2c3c6bea3d34b39872629deb2d501b4e8fdf30a83c043282fc8901a06461e 3045022039296133e1031c1593b6dc32f128d41db459ec67567d64f0200e168a6f98e3280221008eef0c6056ad99fb196a4c066cca6b42e68b9a834fcf8a615b7d6868be187fe4 3045022100bf212632a7d0c30d7b3dfac97bd01ff13e5937c34a83ccb411dd48cc3da4a3dd0220765f7d480ad675d2990023a56d713e18e645ecd2b2c6d2f69428c3c26aecc9bb 3043021f029bd4a2dabe4bed6db990374c31d9e727059c18148b261d492e77467408df0220296a73903ec682d114cefbc9cbe328ba49a72dd7b1f1c99cb72fc01460787165 3045022100b15227054df3429e2d67fa00e5d02dd34c2c9626d5d1601549dcbacd6eb5777202205206b99d478b707f86792ef30f7e988e60dca47d44d5e2a716c10c78fe372fc5 3045022100b9a348faad1cfebbe54c4e14b7884a7c301ef98736a30845cbe1ab4dc5085bee022035a547e5fb82bcb4e0e89a7fa8f5d75b2c70f9ad0b515d207192de9ee8f180b1 3045022026058fd6a2030820618cad7795c5a960edfa93f495d97d39cb12528dfc010227022100c159c3fa75737d757d8168c8b873da4c39ea68a16c4904447fa76a939caab15a 3046022100d44c94d4b4501e6a2d24b4b90198171a5a887286b3099aa14aee8f590fe9889b022100ac8dc48f2974adb74339afb8ab2ca87309ad27ee3d6e256bac828c53fd62e1b8 3046022100aa4e540b3e0f655cf1682937fc216d5c5079bc7bcf13827b07c34d2960ce4aac022100aec7770449b2c3d5658978e97ba35084e75f9fa08cce976a4f201b476196ccea 3045022100caee22fa6f12e70fec61ed12e722df0e066f13c50788b46abe815bb83f270f310220089b46623834d45dbf9fd53a69942f3070b05996283009c37b45a6478910e1bc 30440220657b3d944ab04472e7af45cf5646dcceaf9e02b3d1cfcbe7f815aefdd511b84d022066f15c5075fad643c3dc598be44ef75b179b64714355261e9f4a0140ab36a50a 304402200e8fa6f796796b609c0baf44b5a5db76e1eb9999ab206e9b030b6accca634e3302203eb33adcc3fd558f364c712cc616821e49d663fa328d7469bd8d5c6a25e52c83 3045022100d77f6a7235ed1dd74c60870a39d38c80fb7a1d4cdc1419c574d0a72c72427c39022018c73c022e9864edc132a4edf90da9c6162bda84bc836c7e4981dbbcdeeb6da9 3044022013ab8d862fd822cc7c752d82014652c1915e42026f688e12ddccaf70035200530220597606e25f944d9ec8b96d56da917c5cc8ead7c3b9501041ee220bd4e3698674 30450220601408bc0bf028bbf774a771042bf5391e0c05ef2475fc2fb24b4141faedf2420221009203b9354589e910d4fc1ccb65661d105321d865ed61e2d44922ef6e987dc6d5 3045022100cc96efdd71d34fcee8a9af04a147247549f011352394c4bb57f18d5c9418688b02204ec36f36ae56c45e4575db2fdbbb2c5c7407a1250bfeae73030ba0921ab901ca 304502203e560b4ebc42e444e79ebc333840614fac46d6165e22e25422ada776fac619e1022100d1f4a6545025634c4203dc74258dfb02f7aaf271ea3a5609ecdbcd3f0fc44103 3045022007950c548a5eb308b01e9396ed9d85ebbf2e7b74faba56d136447aee6d315465022100d69eb2fd707da34bfecd487b8d750f4f3a01b1ad59f293f85b53c7cf749dcb31 3044022064171143d9f6f659ad58d6096e4206a0951f6e19eaaed8db2e3a67b0a316f05902204e8c4b9707eb508d6ffbcd3743690bdd0573075c76c446141cdc152a118488e5 3045022100ceee9eab5aa0acae49387fa320a364295a2bfc88590ff8bcec2624dfcec29535022032839ace68abedb52d44681b8ee8b79ba195709d49e835dca680b7c268a70d90 30460221009934edb990e77d56bb2b59dc168bef86d6425316368c610a7e9d93484d171b77022100e074d9f4bbe83da3313b3a92aa8c2fa56c2e33aedf95bd3784c5045aaee3c1cc 3045022100ceba10903a29e2adef171650e1658e957f8137f88a21b9d77fc492fae7626a95022061d06cf7efef6b58a557dc13ec3556738ba886d00ee7aa89504e6333cc5b2042 30450220483a1579cd5c2a1f499b9c9451171c5a0377d87a8fb2941ed6d0f12281eddd39022100b1d479043c68ae3ef4a8ac728051a7b3867e9c8cfe57a855a95d69b2b9e98db1 30450220164a5e15de9b060563882e22d8a008a2fa6fbf4cd8a03428357db8d0498da831022100ddc9339097473263a3ee050f55e1634f99b1626b3ef16c454169c0d095ab23d2 304402201839e7aaff8e442cff629d50e90cae67992c571f2d5dc2c90569863ec7af17f602207463a641d8fdae43fd74e1d6b3ac6ed4c77cd4fb97b6f30d4ad24073d461d141 30450220720ad665443865e9effcdd790bd8e7970e74934d812619c8b21855566a45e630022100bf76239a39a98500c65f3a69267b6440b8d4dbc3c6ec6c898ac2d54ef2a64413 3045022100e0baa16d8845bbd8307d08b4cb416cfd9b046085765f16f70782513231daba730220260bae964daf2e8c56346facd78b864472e5b20b7ec1f5e33113366cf877012e 30460221008ab5158b7b7d03c007c5b58761aebbe851124c56ed87a562801abfe4410a7a8f022100b96413911223735c7954b16eae869797521e59cf0f80c28e55a475dc6aa16880 3045022100925a4af55cc5d5398f6d303b6f7da33d61cecf7061f7ccc1c646a31f3146c4dc02200be3cb79388539850cc95e2203b38305e11b0906babc6fd53894473e39e6c8da 3046022100fff9ac9704ddb17805f82fbb3fb42126bd2c66a2d3e6be0b4187ab032d0efc41022100ff989aee4eb24d58b27af99ccf42008a846ae98107b5903cf0cc5d537792a856 3045022100a4fca1566409fbdf4e7657cf57017d9e18bd56701bac92d1cd3ba807e49e8a930220233d9b6c180c9d54616bf6b04a1b9e3fe37ab5642aaf1574888c08adaa891b02 3045022100f5f64855fb99cbd7c22cc58b55e420262b2dd39946b08bea11aeda7d49b50e3402204937873e65b60d20a55801b0457dc8bcd3f21177d21395de69b485d2d8608a62 304502203b039a8c34c5d737bd096a8465fe0a1f718ecc7f30902852662bed22e974c851022100978fb11a03dc1804dac41f6cf9b2c585834907c219357b1357556e6836fc6cac 3045022029d2e19a4f5eb89e38b5e26cc354a9097e1fce3a442b0d3f6827480ee923a75f022100b41e874f51cdbed5154b80e1eaf52ab443207578ea166463afc13441019fc022 3046022100c9c59f31bfd97a80406f545cfe99b22c9d5f885bf7626113ab897f0e0cccf430022100f9297707050cfb42ca4f2026d3ec5650df49f54f3c8f5cec79bf5176958aa05e 3046022100fe55834ec3f43f9f6285d54600f19a3800e85cc3abf315e99adf502fa6be2156022100aa02af329326d275d5c89be43d34532f61b026c98e8e75df4d0dc3a7ec92f5c9 304502204df3dafb2f0a2846b94ec73fb2e97f914735eb4ee932debbd982c3ccb17ad50b0221008535a81d6d2727c4b4afb5bc4a87a309ee53892a46d26a1fee36859eb8b81a70 3045022100a8d00b24ac558d6a07f5f2de0eeb66090154c5061fa978f686a9e7e827e9890602201efbdb63f8969ea63f67e7a2e7777dd9ba8c7cdab95842d900ba1b37e6052192 304402206e45f408fb0f84492ac118043efb2af9a4d3f07cdd2e22b775172c81839bcb38022039a30176476622d9d4ea91701d0ca7950853973651e0ff7cf6138c65a1b8b5c1 3044022007c83f5bf88b43f084d98dd7665b18074821e596ae778cffed7b28bb7005348502200a5b8b93b57f48caf20fda2c037b9f0dd7a7d810c1b495b545738787c86d7f98 3045022100aa954a10558be697b03208ea91d70f6350ae24a185f95b748ff6289e04a6e660022063941a8625ebe07c070c0ba2d27e00482a9f15bac047aca64ddc2db5e57de943 3046022100d1588096f89a60277a0eadea61ec27f74e036572e49b456f05b327df2ca6fe7602210082dbccba4de4e9bcc550c7270c165eacffc79800043c4653457576fbac43a441 304402200ffad946e9bae0871509d1afb958e512544b31693885d29ee263e5035a6cb21702204d713c9cbe877a96d58d49cbbd49c9c1ce9ea7a26f60f361062be8d7b573314e 304402206383ca8b7ee7bfdc33900a57b4272e0f61d6c5e72309073d173683b80a814979022051c7c841e9330494b8e0eeea61d519568eae5aa28ef61885107982f961429a8f 3043021f411924c5269ddac89f21d8117be40202b4d32d9fed28d4434b496cde210314022038f9e1b102422e41fa023c9117eb2cde9fd7821b831efd852f6262cfe4e7a8f9 3046022100e827dbdc60c46893d6e9406cd7edb3551d3184cbc2d28684fcd7ed0b94c389eb022100f1b6780cfbaa9308ff9334ccdaa91bc8bd19a617f92aa353665e75566e2a22e0 304502205c7bd98f21bce415ee6f204a5f2ae63fd3b91440996d9e87f78413b1c2855dd7022100b748864977c337da192af9a1000ea8cf01b3952b10c6c0c84d40c234f5f3210c 3045022057af0f7f1dcfd6b4d221a5a74d0a235a175c6c678280eaf6d67b65f6c30d1009022100a5a582c4d1af36103b387327bc70a4dffbebbdf6b29491cb91f6ea5295a45fcf 3045022031bf384a8ba1bc8bb9e9e05fcc5eaece62c49e76be98c3f9499cdbbeda748561022100c6b8f20532cc105c99f1e1a136ac162793a4c2622f23971c5dbe1b7b3398ef6b 304502203e5b9eb60851f003b92db51f43860f6d24d9ef3ce59a2c8a6a9d7b7ab812a369022100c145a830819c64630be40d61e183c9077cbff9761996e10e7f50220555a30751 3045022100f33b10c68788641b64256d56d9f448a8fb6a3422aa451a188db95b376e0bc3fc022035783d641eb94f59534198c66e19ab9b3504fa3fc15c0e521c712c20fb4949eb 304502210092676de6e1f2cffa326271c1bfe8a0d3f00013c176983b1c6d8d540c58f8dd3a02203eba1a821c4f5b4d0dd5b9bca39fcd8731be29ff958692e00d160732380a779b 3046022100fb0f8038699a748ff291431e54c294f81786dd4fc7c9093131f7ba8cbf2ca141022100f0206d414544f41e537996fa269610d1d1354d6b562c551c4f796db3a026984c 3046022100dd674cfc7e86a98fd6c6a3e6ddca14880b6b4332b855cc3852ca2f504365974f02210084d865add5f821a609cdfb688c2ca98e3783fdb798d558340a05b7fb3e63f3c9 3046022100d59b8695197400500d11a77b7db2ca33c2439e6b598c2d793449368c9ebe30da022100c4a49e67a83a5ccd2f1aca3b28302033cc8d5fab30d3b45179c0f59ceeb10d69 3046022100c8e82587e1e7313953b8db21fd8fc79d92195527461b071d329b8a3bb30825fb02210097d6cb825b84b63ea2561c84a380c2c45e02d29fe6bc632a64bdadda30f8ccf2 304502210089ea8bae45ccd557ab772ea14dc3872954419bbee5750361f27362c85b841d2502203a9d0ba448404d0acbad92b02f33c1a7b69d1318e06ba155f5e74095f56e3b55 3044022065aaa8b36acd8c149a5af1ab465876548c237c836c72c81bf7c370c3ef32fe6a02200e5240d796f5c4e61aa574e8168f41f90085f0b07acabf53edc4f5e780ecb75b 30450221008d04670075fc1ecfa57bcdfec58e71bed43331397790ddaa22e6841df31c6916022021c2edee56d2cd4158c42d065a9786ce698d7630d38f45215dcb642102d3de95 3044022048a0bdc6565950261a6f220b4e3022b1af679f4a15b56b1d910b03f2317a402d022010d1cf90cd77396c0e29a08ea1f98aa7f4d76172aaede8e4245154c9858c4456 304402205349ed0b2d73c89540b72d2d7af25d9466fa631641224c013e7e9ed5041ffc2c022022f40da46af8d697d80cef130d29f68dff227c417820b361e8084665c443cebf 3046022100b76fd573b6eb7448b9b8994a013a6f2ffc5dfd89c77cce8d26c107a4d9482e0b022100f0e935490473969256c5525f92db9d61af0c9578734e3da91ada48b69a3030dd 304402206ec49ec3178d54ce459bcfffa6c71f50f47e398f439d047815b6a9097b1e629e02206686ecc763d9892f1e3dbc85832eee6fce7b11a90aca0b1979e14f75d26a738b 304402203ec84d014077aaff6b51c2e59645fd83fef9b536791f4f03811387214cde38a902201c0499640cff3233cbf0e043af2b97f51d4484a669caab9ba97e227b1aabb7dc 30450221009b0884fca853befe8d2f04caa266afc0bde9927038acb55439908e27ece04db502201f9f1bcdc780075fe5e94e99be6f696e26bb78799e09adca31a1550e675337a9 30460221008ea44e819b8ec7d021d3f5f7e7d2f6f66f7d7b51bd94bb29bc204785ca3f761e022100c03cea3889623da5bbc774c5319814372ba8a643b3216e78da7395bcff4f6711 3046022100bafd88693c54580dbf39d205c8d831e281144ec02081db8cd565c8c997baa05a022100efe0e1a0aa5febe4674b04fe7b926c67ea98c694d1dec17a3959224a7c419fa8 3045022100a679c1e50521aaae7551307b5d36215f2b6c1a9b51a73c19714406181292540902200a7ba87b777790442aa70ca5db29144ff123bd4da5d52581e88c855c32f0dd17 3044022009acb96928ec2c3463b6bcc41f3a609b82c1daa7c9304886470df4c55c22547b02207dc1f6b5e7c2840a37898d7e39e33f3b8389ff9a607cf583061cd53bf6ddd387 3045022100fae689521007620b759ad5daf332f0ef1930c4dccc00cd12c69f11444c58f1e002201a2401419f4a3e61100059dabfea85befb8c65ad0a8a6084293564cd868d6a7d 3045022100a67b8942c23d1090157e11bf45337367d7668edb7f7b4f92079628a1fa32057902204baabc61d5273c697616fe0070599eb26e23484965d271b5fe82236ccfa51f36 3046022100c11eb0cf598bc18156982a5a6dbfd4c60a0887c9406f30c277990e4b53cc71c5022100e2dd4e3ea5911749ff067bfe2c04af983a66f2bebcc97001116e1a242e65ee47 3045022100d6b30bb508c471d869f64177e21d983db00f70203c940836e982f1f9481df3fc02201d4599884ae62de65cf53590b6e67f959b72c192e59a785973e144e8e08234d3 3046022100a0d4bca4f06a48d1f83b99a8cb07bf0ed4667c9ef890e46a892ff2b4ae69a1060221008ece4b0a3c183f2d49583829ee8c78498ddc604cb2d57d9efe314879e13c42b3 3045022100fcb77332ae45cbacf22859055b0fd8004166ca24032f3e8de0447154d473d39d02202db0ad68df4dec0ceda5615a0605e853ae7e71fce39412525e3df7eec3d64ccb 3045022100a1bf80907a683ca0384a7c05c526a624d89bb5a98f634f241b548993a5c7f3ff022064411db40d77d18239c507aff7028727a9f409cfc0053eb850b68538c17858d6 3045022015809d322f966449969882288daea01095c8a13e596f56cfb4e15ff1a2e79d47022100e06e2cc980a8f38690ea4acf4d5ee6f01e92501f4f271a2b67a7ba34472b179d 304502210082bb1045540fc21b167d22afe8c0a8f288bc3860041ceb4e00b938f58aa8cfbf022011f1522b6799dae92e138d3c54ac1be5831089a76ab17bf37260de15300a157b 30450220123c1308c7a3a57e2adad23e99c14f3cb3c4083859e8714830be3570b45120dd022100c8f1dbfe3c0f4ec51951c7ecac5c27060d3363c958d27de0465e735e2c0b3c21 3046022100c8a10ee0c000f4591d1aa10c17ed2f769910cfb3544002562ba7b6b6c169f7e4022100819b306bd37d74a8f776c624ce45a958ad021a2944eae9622b2716de2d063892 3045022100b54e1fc498dc3d603703aaec9658aed26a42c92796ae656afc53f405e993fe340220624625ab435d52823f8d2535d305205a05581f06c2f3d026209bd4b18ef01421 3045022022c9a4b95024790abdcd6941d67886a8629c0594e39a808cf43b3ce0208f0c86022100acec1e54ebb680c392b8206d2bbc5532e0ea6cf5f7e19d447d3dcb044ff36cb0 3045022005d5f16390f4e9d49dac5949e0386201f56e0ee21c15fe46c7f11c01923462bc0221008aae14b88595077eb8cb42b72cf42d31ccd50ddbe957c6dabdecc57d19f987df 30450220038356c86fdf487d1610cb4552857144cfa9176be9ac5559e0a5f3ebabbe39d1022100c13ca047af5381839edf958d89d6afead952b11374d1b90c50e5402c98c00ebc 304502200df6652fc9f55cff53bc77c8f19e92b388190cf44e3a4a2af91d91705e10d3ed0221009a600d765e763b4b3cb21fef7ac24ab1d3d1ebe4b68f5f7d8053397d40e015f1 304502206ebd9f6627453717ac125fe3c2f5ad3caed082507cf82fe2a76b4ef4c013916d022100a958d2b1a9f8c9264ddee6ae1c958d99d8b2215503cc2fa48ccfc1f9c997a638 3046022100d5038c88d4159bd3165ed79f12a02de9e813deb077f7725822c38d81f00c1525022100b3dece9ece24b39d58254250818f2a616c613185d786db3fe8a5ff4d6bc9eabc 3045022056923362b8c87afc43a72790c350f67bc1123c1b7ff51493e1bde9418f73f3fe022100e4164e99e9343c003ec0c3a9ede6744adfc96054d2a4eb5e410543e8fffc990f 3045022100fed901e961a7b2f4a5d0e55cc6e22d86a9eac21ba64d39ffc6f85debbbda548502202f5bd76c0d419cceac6f634df2064b7dfaf04ca1be9644e833383928ca07ffb6 304502206e8c6fae6f0185756b4c74fe5f1f9d9bc6766cdbf9348d64fb41e4edb81c78d7022100cdd252c82ef634c0a42840234ed2ca54b27149252c5163cc4aa25994bd434ac5 3045022100daee56073687a5132e3941ce9d56197de954698acf7ec1c37507cdcc2134132a02202c417cbbec21f819b7f69e3e9dad48e6557feebbefc5f24e0de86d8e548f650f 3045022015d7ab6edf7e99931a73015213c1be0f32bd8ba784b013376177f297813057d4022100a2df1015e412f910d32abf4e1da23531bd70e0197b0fea01223e047105916642 304502201339d6655b5ea53f0154cd60d54d4f6c2765c637629090ebe0c9259e9a67a2fd022100986e13ff9e448a514070bb5ca37f04b86da8f24762d175fc68015eade125f55f 30450221009f69864f1a03be7cad02b07fbfe45d1d9b033326598ad8356fd2480c47ebca340220275589f099608931cd2f19b7f88e573793f8be4a512efbe583831f1bcb7490c7 3045022100a883375c1b693ca3575412f803c42da551529fcc40e7c0d51a06738b4f75484d02202668689023958ea2987b8d701bdd23485012d1fee937e40ddcf07b04fb3db2d4 3045022100be351577e0a99458809cba643b946423c254ec617510aa97db4b93816394972d022040f60c700b662a1631399764f60dde0e9ba7578ada5a263d97929c92c479d4bd 30450220041cb3a8ece95559d8279b6eb1d113fe9af470a0288702f52e40ae9259e40682022100b1863b1f62718ff09a5e7e8b780891b22581e892c760ac57dc0c49a65048dd70 3046022100873fa6d6825b67734074687682ce2a2118145ebf669d2450f4cc0013ff5b079c022100fcb947cd1b49d8c729b30ba75279e5534df7cfb37403c45554a77c08f049997c 3044022018dd5dbfd1d8a6599cc2a3685ac94810d9d15a37ee2d2854bec5a97a81b712ac0220190e60fc49bfb09debcae91131e1493c37cf7abd02cf067dd6f1fd5980778e95 3045022100d1810ea9ff4848d0565dc6d2fc4348cbd90a4fef2ed534841a6c5d022e80befc022052a5c09db9566dd457305533f3e7475dfc4b5ff59e367a301e9676ce8e5298c8 3045022100feab035cefccd86e56b654aba73435255693c326d9b1c8c7a745285ac0ee37d6022042ff3995f756808286ac8fae7dc93456105234d3173935d267d6553c6c7de7d9 304402201556612cea7f3b728c8768beb10d8a5c22d7c246592aa94ef979110b8423eda002203fb56473a30aa349ef2e620cc042c438f1ca1e1aea91bf243b5dcff8410c5a5b 3046022100ce0f92b6ef6bfa0aef09da53bb8a2bc469f01d9908b7b97a3d978f79a70b7318022100c76bc34f45d2b77ee54583a6d483ab92236e56e4a9950e64e1e226dd8c8a797f 304402207b107e46c206267ebb124ee62099e791a37725709f4c615fd0e19bc9fcb6b14502200ce0d2f1f5cab2c80236a6f1fd8e5c245a65e45fc2acffbeac20f28c6cecb0df 3044022055af4d77de463df6c31278d7218faa63fb63f36949c8377134a78c3dd9052141022003530eb5d7292c7a714765faee21f69ef5f3c69c5fdcd25c7fba2ed2f28b9817 3045022100c5a6ec32fe19a437bca09668b580639f8f47cb9931c573aebca3cfed4dd10d8902201c085e44e9fb307f42c0d36f2a1d2043d94aae73a2607c91d0fc70a460066be2 3045022100f07709fb2dfe49a35cbab5523728ed53416bd8872d7f879e65bef51bd15cd1100220372c2505217e05d79626089aaba9d02b48c70565c2c266494b211adcdadfff0b 3045022100fe342b2f107fcde5ba7c025854bdb4ceee3a3e497f3a00294d98a70e733c99440220194b54cf61eba2872fed77a2fa13b0e4b1b7ab1992dfe466768a835f32a19f95 3044022035eeeb42263bc15bd67d194f79c931877decef58c9374906fe630bf220fcb0e602205ee0d979bf7a4bede671e831ff48a4788b3075b05239b67521901da61a55b1e7 3045022100d469d6e78cb040dd28c981b62a1fcda3a68ddea58e914402b422bfde40783ff60220466b4fd879f4601808f38045bc53c1ad9c72c8007e5d9bc9e2c5ffd28f50c19c 3045022100d37bb5edf322b347fb82cee241b7c8239cf9774a16801354e2a44c6da68ca271022020ea42df7824d2ad573f975c5d8f3296cefb13e50693e689763d97a8092d355d 3045022005800c5bf837bb28fcf01ebc6d1259bba8cc2e7d2be720ad4f0cc25197d7855002210097517712a48874cef8a08001d93171bb217bbfe70502dac665439125b067269b 304502200450b938f094490c3723099e621ee6e2bc52fc6831df0bfb9aa60a60bd8f7add022100fa2f542454734236be43672811434cca85273ca2ce3ad1d505178756bb86592f 30440220668fe7267d26beacf5fa240f81e629f65a5e0927b5e1e03a7ba1651b13c9eed902207d5b888fbc1e08e52380684a7eaaa7c33271925ea2d954130ab5f1977fa0d6c7 3045022100c354cb879221e19a452874932e8655856ddcaa70f000c4d5a789d85b240215fc022024acaf5f7cadfc7ca50cd9d25c406652bb4a69e1c0ab6948cd62f675fd50f3ee 304502207c76b67fd9cdc53585b8f4d06d6d46187faaa1c30c5d80fb41049038316a5a87022100fd03988bd59d146e1c97cb380fdcafdefa312a5679898793f7c099c4d8f1406e 3045022100dcfbebfb86a2dcc9d206cd3f0b5dd705096bafccbe38fe45816c64a6f65f206a022026375727fff782c44cdb771a265adb808786ebaeae1827b5c8a3689051da0cfa 30450221008a8b3107008f6335db49e0bb9703c046a08bb96a4d3e17d84d7a124af7b2139002203c8b8af6fcb1c162b6ce43a592c8721bf4ab78625593dd02bef045548b26cc70 3045022034be275441d54b2bf1ca08c90beca68875be544b7941cd0c60ee5bf0ece10ec7022100a0a98f50fad0b0993cb8c335bf1b2dce9c2faaee55a3ddb6235990fe4dc42df1 304502207f944e502d8a66740db7933e24ed78c38b8c2c051aef1f24818b1a0098e3d388022100dba8c908b4ee7901ea5e3a85390c6f394ec229178a2079728fa848b7709f27a8 30450221008b0e91cf70041563cbf4d71f346b850d26417d94927782ec7429c40a62b888b4022004900d3c786fec698792f13bc1d0e800011b5e936f33576c0bdb91c5279fa1be 3045022100869e5c4f95985558a47b4b36e59d638631fdf5cb9c0d7992a0c3570b200674b002205b37350845a8626dc14657462f0a2411dd1936d6b2713719d9a65c7c4c0d8975 30460221009a392c617d60a9269f072a58601e82ce3f791f5a85581dffef5422f90dec563b02210086a0d96c5acc3d087df46ac89c40fcf2fe19ec149e481739ecce4653fe1a588c 3046022100ea17563f6c72685206a1569968d6612be66faef633c341d29deb7a463513fef2022100c8c0c5c990506e78ee1fe7dc2a97d088d6d13e4cb9824481dae578d276fc8556 304402204014d781872171a84a4bfefec4cfcf8d7b694e5f09d2fd2d65eddba03fad636402201f8f19c4bdfa32a84d6cadb05cdcfff1099a6223b8ea4d28d2dd65554862b269 3046022100eac4bde08a04cc44b7ab232c39b46ba4d278697f50e506ff36c5e44d3d9fe1dd02210092775b279f7cd502a2a5d4cddbb20a9ed450280ed27d87071a057dbe2410690d 304502206f2464089daa42030c242418f6d75a79caf5e93ecf0a584fa8da86062e7f437f022100a9d7f4c0c7a01ebc7e81abbfd3cf02aecc20efbd963201c28e91ef44be0c1a73 3045022100e32d698e4ca704ebc00fd21fc2ae68aa722a6c19ade613455225df3aa0f095a2022044975f8b5321231e491b9fb525a1accdad41e1f0152b2055d283ff38e0e9fccf 304502203f1b681bad5bb76c25f466a1a03c921f5328a2db4cc27a093ef781338b31acf50221008586bd6079acb28c5e8004cf900a3309dc57a6b55d3c72493cbf37fe58a5cb85 3046022100d86c45a75daea35b030d3354e611cded6917adb837a56972ac6445d6f34a3c7e02210099ef61ca1e83b3194833ef5ae47dfe394c9f13703d7c5e3e676bd54ae6dfe9db 3046022100eead99d585f10a49bf453d56d43d5402feda288a33e4c361ccc25e03e049e7da022100fb470fd377096a9f39c5a767607ff4a035049a3d15b883e0369bc9cac9a3ec3b 3046022100c0643d2d1982ef9fc65079eec4870364cbddc565203b1cd5668c8fcbdfcbbd2d022100a8bafcfc1252c49b7163c69c8fb022933c6a0912211aa4b42a0762abc740a22e 3046022100f78149b79c89758515d45924b2919fed1ecf1a9535f0d457b38f0240d772af51022100bdff02f80ad1ce2b30a7ba8d995707a6660e45a6fc7e9657751054b2efd3b057 304502210084a5d58892bb40236f2fc67cefaeb7f9f662734f51ba7b7a14f5e07e02bfdabc02200795e54cc63d8f976c5f7d476be5c942c09cb554a603448c4fdf577e387aa22e 3046022100f61a768e2c223628ba2d48661b086b43dc53d2f034dc77981a153284834c781a0221008b4665cb934d0ab87e228da98648c018ea566305576498cc836db41212e4306f 3045022100d127c8d07e2e066b054c3dc40887d6dc66513405b3569b2b77fb0ebee1b143d802207292cfd6e9f8c714353a61e6bd2064a61daa10e1612baf3520e769091f0a12f2 30450220788585446520616f363e40250f54ea5ed8f8b6c007c6dacd79295bdd402e6549022100b93e14a7949afa30f07e6c1a7b2224acbd8b9633957d49c3a4ae8ced61994e7a 304402205bed8d7092da232bc22b9b0d65b620cd2e86c859e0e3d098a025d2a0e42d0dcf022042ec5d976d496f342bda84e0032fd4c32ea48c5cb2b291dae5eeae311c6249f6 3046022100b61c44c4bbf26cc244ab6aa3534da9863a2f5615f7813d23be52dcbae9e3aff5022100ae7919bb2d58533488674f06fde7c29bd6d3804f9161cade2f7579d5ec88abb9 3046022100d51575334efc548b38e022b5797a017d903eb20cc0bee0753d501d9fed08d57002210080a31fd1bb81eb3f16a6f167c9fbee89ae3cea5e014267bd131136178490ed27 3044022042f221a6c83aac8e667520d7c5b5831ed138277f0f3832e8698579572bf4923d02201e6fe7db20879d84ac5d98eb3ae87478dc31b41781955121656988cb618fcf74 3045022100896d9ce5a6bb13e993eaac1548ad65b812cfd2bdff7ffc9a7cc246a0ff988aa6022053aa2e228b412a13407647ac2bf258ef8ef076c677d7e9f9b9ca5e99cf9803f2 3045022004242ca4bcbe0cb7de0a0cd6ccb99f2581f207fc33657ee22821aaa52e7d4979022100c882b9b4ba2664f0cc169cf248af9a2d7ec393adafb93fbb6304699472e37f9f 304502203444ebc1263a9112874962f398d0931d679f5179948c8f18bd05a57972cabc5d022100fcfa5a9b5a489bdc213c4900de8bfe78c37b3d02008dd4b0c8f6a0ee745613bd 3045022100d0b432cbe1f66a2a1c9b9e67958e74bc6ca80d135ade5a3b7eaa3f2b69add5180220404fbe9c9f178be516831f242f3a162e8a3efd5f10de89e9cc9db4ce6264c156 3045022100cac4bc09333d0866ffd68589fef48a46395ca439ccb056c5ac79afa60e9f2b9902207db10b75d3b46e75b34bbe201a098f8b00a8ae46dc6ebf4ad69e574b0b58a407 304402203159736bcde0cdb14108363cc8987ccd6d748ca78bc4df855a8610b219515a9f022044c40eb618b767dbd44c98d45d964d3defbbe64f6fee1dfb77b5d0fcd2661b4f 30450220724213082d84680b32bf3d01d4f66904e793d6147e152167c1dbdf237492420e022100ada416e35ec7c9e0aad5e8489360c42fa7b7a7309d41542476e32b52d97660d0 3046022100a06d5dc04215d08123350e1985b460927d867eb90c2464d5686dd68f011de31a02210099d8a958bf2fed2fc68d3cab5d3c7d7a96852e4716b2ef73a15532b5cd2e8e06 30440220145e173c25b35899898830b086b0b5dd4dbe644a393bf6e7defe3adef2e3ad7e02201b763ced61a873c3991c24693e76fbb67bb8b86346e690010d3ddd7a8ae0996b 304502200b512e66f0f7260a334e5a5928895b0d251d8cab9f04d9598028a1230d3979cd022100bc7a5fdc99b2331ce59b39c64fbbbfb366dc5394921798aeba1129eb4d9f221f 3045022041ba0d8b3828f5170779e67ea8d6948d838f63e97628248a8c6f19f17d10a9fb022100de4b4346887f87b243710fd2d48c44086465f8d703c3ebea205a31f4472a0089 3046022100d29332398ddcd8a19e60fca349c12156461e781486c7663a334830b2fba113780221008b0973796b999530a123682ddf5b9591d1f75f471638e79564087d378806c721 30440220383460bbd039a5ff63d4f71e3a4e66c85fed9b9c32aeef7b354154318e28e24902201b57525296f967ba6a00fcfa02cd0111f1c9706def0853963fd61c4d9d617074 304502204df77390c650341160cee7744d2eb34053825d6eea1d796c143f426b76dadf63022100875432c65d3e5511d0abd0bfe0d351140a26851f7e8ee33fe18a063880fbda67 3044022007ec94079aa948fb2014ed269d91f1d469189862872214fe6653b8a8282c70c3022079fadd0cc34c96a7f865e0fb1c0b8e9dafc6b0d6d1d8aa357ae9f843f5dde44b 3045022100b2428b86f19bbe6c262e0e6a535efe258d5a739698426945c5f65f92f34aa58302202a1d51a4d7e385bb38e2ff513d100afc37c444a26ad8441748977a0ff79281ad 30440220371b2710974c4c1ade79078ce5fc7fd5465eefdf7339ad966c42cb88aba30f180220523240dd900aa68ce6d7b86c17003048b371abbc8612ae32c1c5711cc6920c9b 3045022052be0589e6158072c608f749bea197b3bf1e0569bf56c17e2d1ca4cd5f236d18022100c1e9a5eb731f037548cb4d40a4b41d0d778ffd2318e0516d194f8ea723c7e87b 30460221008b00a7ccb3e4c6ebdbbd5c79ea6bf836846faa7a096ddc3706f4f38fdf1eaf76022100dbb70787d09526604f06ff5a3fe0bb844c7e4fb6f49ce7ac856faa09746d33e3 30440220067ac902fa6014261bbf349b35da2696ff1be843f5b54abe1b962461d4ac15b602207ec4279e6778245a63d56f43ce073b917b41e7793c70b53d840df1f1061c39b1 304602210094a469be1b3556c3da332a68d5921655e4346c929897cbdbf8ee36e697145666022100b95fe95cf7234abb50c6e264274b546b48880627564cf5e48d94121cfd9d72d6 3045022100cb0a1117dea9bf38407d8457e07fe57cdab929a7514a67e2c147723be73a0c0b02206e76994bd104412abb4a0ca4088996d3f7a308542a5a5532e50ef39f75fb3ced 3045022100ed6de09491edfd875095a0dd7f4234c17776f053401b2c1af82f35eaf2cb2f8302204deb898bfea001c0d350c5c21e0d9e61fb097cde238c7980da1832e243bd1483 304402205068efdcfdd1e49d8b811da2a57f789301f3f2c8760b1f9284f8cbd529a60db102206c0d8813d4e6ba75a1eb1ecda368b78ecddcf8dbf196f84bc909074c43e1f061 3046022100f7b4f10b7aa1c1bfad4d6cdbb30370be5323790ade7b35587317b0dd6698e0b9022100a8ca58b588a27e2330523a08212230b55c392574fd59877d07a3de3d7cf63459 30450220548af1b2e22780f4ed472bcd41a86f776929cab23faf7de004b5d04afa81f255022100bdac01c55c2f39a033e33a8ce77b30be299a225cdf3f972db21284f4fbee1423 304402200982afabf967aa77e365abbae67a292c300d649edb0f03bfcac8e2fb8419f82b0220053c5223fb327060e1e712b385665279eb62eb0b139faac09592cf9cef7fc5af 304402210087131190ea130f71836f73c4996d72adc24c5331a366699f66a0f7f6eb000243021f4abb7592f3121dcb2a9d9db150d6ac7bce9e8f3542b38ea47a8c8af9e6ef8f 304402202bb24fae0d1d6025d0d88422041463f5e1fde0a3387bf8cb14d09e16c0904645022027ff090e9839d4dbf2f2fc28cfe74082385fa5eae7d15dc1742d62c12b3f1f44 3045022029c253b1125737a9eb842e7033137f36554ccd6a50b89c759abd7edba0c43606022100d9d9e432b650a0672a459da285de9b81660d25a67a774fb36c84018180af2c3c 3045022100eba91865d532acfcf3278a0489b8a147a29a93c9502ade6d2a0bbe755e532b4b022052e9185733fc0e268018f314ae546fc1b123a66f607f9898bc110467f282fc8b 3045022100b2f7f99b28d057f9db635dd8f1618ae801a08fbf4392af0d4ad2debd959ab76b02202bf37700ec036bee13093f25c8fdc889ded9adef016a9e7c8898d9186d4e49d4 3045022071ad85561ab140e1e649463f218c637f476114493e178a412e22e012d7f9ad5f02210090379b16b93e29eb7fd618e90e4aa3943b9fccb508ad6ed9f6b1a261c0681087 3045022077b94df766b9261c8876ae8f07aaeb8cfeb49aa04f66c0bbb2c1d8ceeb76811102210082fab444b4c81afdfd5039b9594515625283c510d4e1627cf281dd8e38d7b0f9 3044022024d4110cb4094fb90058357309dd84003537831ce348ca40cb028baa8430403302205b8cc332375800e4dc0bceee0464c84cffcda18b62756774ade5d5a3ff866a99 304402206ac75f430f44978666ffcbc83550e2f5204c6c4048a7fdc604da7204b9d42371022072b6de2b57cb66adfcf73cb47466f790b22f74f9aa9126c834d1fbdc5ea17d6d 30450220149240772fefed86df84cb25f095425e041e34c78cc3a85a440a4c30e92d6082022100eeac3f1d78f626c69389abc7799a8778e2a4a5d44ed73dd8e419740986bdf007 3046022100e0ed76d5809bd3617b87ec37b4b10487565232b62b61dd1a8cddb22fb79080cc0221008dfd499644e91b7aa69ab95c3fdc4eda769b2065390126d371db797ab2243f6b 3045022041e740b8cd9740c37e730df086184c63fd98ee470bcde0c0561c445675156c7c022100d655524a971fd7d34eb46e7e08801ebb07528b52844cc97c49b12294c115e77c 3046022100f0e5c442093adf9947f53652b956b2f42b6e287b5450d2f66c3ca1272789d9480221008c60af01876544d6cba1afd02701cb57960915fd19c53ffea0526a990e040275 30450220248278a7f26fe98833b44fed6815ed52a120c55ed86f21509584e2dca28c22fe022100aa9742a7fc8102aa6671242bfd96a86910af4822f98ad2f954f96b49173353ca 30440220172c52ba6fc5faf85a72505cf6630387b84e113f52f896c752221e734c6d842002204252b7e1d2d1fb0ded749e0bfde1ae0c3b1b8cc9cf16da93aa79e3bebcfaa508 304502206b573da664468cafe5486a4af930058fc3676999d93f42fa34aff40bc7b201ea022100d41c8bb6664098eec7be2f3ea96334ad3f4d4bcb9bb5144d1b27931b4efdcbbf 304502201a3e467e1f9d0e4b237086ba6ac53ba7c76962c63bcccb6ce15e2e746d3ce8a3022100ffe91893128996478bf05a8d7168dc76dabd884c6d30dfabe774d9647ab37b5a 30450220234a63eb3ae6fd34dcbd1606be85da54ace8b285c6018676c14344855149c3bf022100c1a386bb0dd78bcb9f4d08c02722fb083d2ad751eaf2243a8b0e5c7b3322d730 3046022100cae20254578e311754248dfcbf608fba77884b9469f3e91c9c44eb8589118662022100b4b61aae96e3f9afcdb61f6f7cc24cde8fae3021cd941c88a38e7f6dc8ce2247 3045022050497faa3e02b5338e3aa3132bde11f393d05a837ca28830a7cc98a58255112a022100a5c2471d59f1fcefc6fc94368c370424158c2553db9bb926b7623ea263851655 3045022007eda5ecfdb6a45ccf0ba7ded9872b46a400df30294991ded78373ca0ab4449b022100b6c8db1489bdce69f6677a233848163d01622abb3a60daf2b01e8d354fe28486 3045022100a69f503c04c085021ed1d01d4b7ba6b6ca6fbea43646e5e2f196bc65b48541c902205f7eaf10f60d52e2d65f3bbd987a2ff85e899840b37571736c87e96fd05a0ee6 3045022100cfbcd989844d7f91d536d082c2ac84a335498314259ba491a2ae24618de9a1f6022008419089e7348eec961156305f70093cc0146141965c3124be68a1e0f8769071 304502203f768c0b17ac0dd896267c7e444b8016e8098389d55be8dd35cb58fccda15a02022100924409dcd40aa566602816d0ffa1794c7506e7f9c91895cc694264883de5dfb8 304502210087a51e41bcd1682587081dbe1393e80331ed710d8473a6a32d8315dfdc6d7c9d022057b0975d625d4ce35b5fe4c7fafb218c8ea89f1efecbe474bee8c8befc685c28 30450220586fd83c8792271e43851e65de18e129f300be354a5a59f01b55491172cfa1af022100b824cef0eaaa540576bd20224b7da90548683da8a3dbb261bdefdc9cff725e9e 30450220330ed32c9be13026cf5ae7bd39099b1de9e354378fc8e774cdbaca90910b297702210087a77f2ac4ccb0d366375e653e1ffa5ac7e972da1e6f3a4b89dd78ad5f73b584 3045022100813eedb4d7ccb32c4691e80dcd10dc325a5158488e2dd1a1a6c6f777367b7c8702205936b26d2d301a89948b5361861ea9e178236354f8880f290b39749470f0b7b0 304502206f8f09ad100619616154051680fd56539e234104fe8c35071e3c4a4bb3b268a0022100933b142752dc0a052d17100c8c07c61627033c838b02caa6aac25cac2730fec9 3045022068fc39ead5b9920f1bddccea214b001b0f9cc5f211d69436a475d0e33490c828022100c497bceeddf8d5cad47d0cd21f7a2c49810bac84073ffab80e8c9db5d9188174 3044022052996ef469570890554ce242fc28099203fd78d21f964fe0116f38034a773bc502200dd329269755e7b695f2ee3ec78b9c98d95c6529fcf58f186e3480a9e2f7d202 3046022100ad8a257a128c20046139fa3ca91cae15aaa55fc7e6d1e2518876c9063199e62702210097e133529beeb22e2be36d5505f9268595122668be9831b10b8f82245ef4c788 3046022100c5b62d070ea7254c7101f8b3be9960a0a8609a5ae3a446b3b1669799d4b591dd022100f0489dbe31dc776ab7c477578945343d2f8af9744259c89eed39ff437f163835 304502201109e14932630cbdaf7434a3c212b7b0b559971e42a3e426422380c631acee8e02210096ae468d2725ca5a58f60c26e8f46dcbd03a3f9ee81a2f6fd2c97ea4a276b27f 3044022042d5beec015acefafdd4eabe0946112cbe044f294f406c93c6217dccd5ab557a022057d8bdb30097e8b9ce7d476194271a78eb8ac7affd4b247e6e481045d92262ff 30450220054a53527bc237d0ede90eefb8350e727eae90579f709d83c8b498f5f8cddc7402210090b1c7de605052698c4c91194b6577408016addfa28f9f4c673b295e6aff9b35 3045022058df314decea9c0b84eef49385c8b076e0f00a1df43a81ba26d492c474bcf7f20221009468848391fef6f32a88433ed5b03953be5016e33a376783c81876ebcdfcfc01 3045022100a131f9dabfcd666d5da7388d2708e1d006bc6f573951f5fe6ae83695551034da02200c8318a5e9e3fc80f30fc026d2962620a11936d79d389d81a5b76e635a05a8ac 3045022039a2fcd5d111b33b88d1d01c5e08c9c8da181fcdd3e47664dfc515b9ff3e72e9022100a836ebe4522b8ce07958c15cb3dd9155ba405ad17ef37799e078323c8bd04fe7 304602210088a9d1b45bf4c9e08c990a007e0b42dca4f2f8e1c216459c628317b1ff8265bc022100c4e743a85eeef3021886957e97d44601fc3e0b03099d4257def7e44f76aa5833 3046022100c973b362723c36a5e0d42ed2a3ecd17762eacb3366c7b9954acab8aa85ebc4c2022100f80a225b63ceaa9f684810db8ae09b74eb163d26e28249749ddb37b189bd0335 304402202cdd52f2fb1f57da2f8f57524493c18088c4d124dd4ff23207b618d73b112c2802204c0af126ab7cf834a6f62e7571e671b05a11bae36e9c230517ed3d70a5fc5bc5 3045022100b7581ac5d088900ac1d338e2854a7ddf18a1c16b0f293854b84b87e0c33b21b402202ecce58434617bb3c837236790e815d298d5604d80af6fbcf51a716d1250b052 3046022100d60972f43e3ba05be592ba73cb3021f6a4d3d84433e90330c2147719a5ab2301022100aba68b61ea2cb4402f1196bb49fa64e0520e6cde7fc9ae7938cd169f786ea684 3046022100aad89a045ad11893a0413772bf946e5ee89c3c054655cfeeb659d86287fb44f0022100ad0dd7a9030bb371af40560725ed60353b68e8a7211644e1bd2ac57bb7d88d06 3045022100d9b2e66643e2e2cb3a4a6fb5e0d5d31967c811f74ac2e37b3d31461cd028635f02200f1171821015ab542e6d62375ce2bba3d71b25b85743ca6f7d66edac47d8f0e6 304502206f047a04ac9055bdf1fabf3abe116ea74285d5ff1a6d1624c6bbee13a6c0f9bd022100c1eeab8dcf3d9115eb1e6aa43dfce618ae3a9221d6addd85f9e27f3eb2bbd4e9 3045022100ae7d6b36f66c14e60693b8e387747612c3494d8d53241c944172e4452279ad1802206817069fe37cce94312486f5b4805785a6895d288ace55ed6dce23d7322b60fb 304402202e54411ee1af914bdd634037413ceecd3ede83064b03a33b141f34db096528bb02200a7f2f8d3686b239edadab2c389d345966b522f9650ced6ad2cefbae2e1aedf5 3045022100daff52612620d3753ad6c5929b4c7001b4e9d7a05cf45f3e982f5c0e462501bb02205d9732d0879bba57d5d8563093266e2f4e4551632a9107fa197802c699c2c3b8 304402207a8abf5cc93fb56f2040d2e9fc270a1840067bd88c5897c0168acdb57eff11e2022021c87d43ef905fdecb54d92d5472719212a76993b7a009aa9f23d6f3aa3d4baf 304502210098335390921b98b2421697560e173a8fcceda6fc67bfc91a685a7b8d06836e22022060135a29a7f18bf76693868a9b8c3f65eb1278844d8c1fd6a8f5014ab5952519 3044022035aaff6ca33f8b490344cf7b1a986df4c0338e857ba327a3b326d235669ac95002201bf35e9bae49ff6ba99da95addc95f38f9794350edad4403ca383efdde0429f5 30450220571d5fa123393fcd3128dbcb579a6edf2b0a973d750865fb11a48bff19155a7d0221008ecf90aba8b6aee46c3bc2109a0ced4e82d24d3cedd33f7aac6ceeda62013f73 3045022100a5f2ca73a96dd401f663a077e2d833fe68415fc468ad1b790ad50ffb0f9654e002204212eb1e0ee5fa7f36fd56223dde6b26969fb9090740f9c5d4011834aeef9e67 3046022100d81bd591f5fae190e7754cd75247dd5a1db77a411efe8f40954c6fdc572506c8022100fd169158b803a1f5cfc866f21e3bb2ddabec0d0d96c25b7a86ec9591c92be98a 3045022100fba49535b96ee34c326a624c4d9b02f9b60f98d5cfa56863fd77a03d40342242022021c6c684ebb7c22cc2fbd65a8c31dc6db8ed8aa0c5a40ecc05421c9ab5db609c 304402207cb326d70a5680ec6ac4f090802ac3adce3f4da07f05011b65b4df7bd6151966022051b0d496818cec3d43293e26e7edccd615cf986c0de3e90edad7cad3620fa85c 30450220780f32868772166f69dc516365b3a89200b647345e01361545e096282d66b5a5022100c941905a1d9d9536db9502c20d349f3a8787b78c45714f98bf1f5e5ff4999ce0 304502202029c025577a648bb85adef6ab581185510aeb207fe9c95fa94d9311a76a5867022100d67db412d75dfe718ad745880743dbc0e8dfbc9a5e08576ca724724e75633c2c 3046022100e5a8a89d55ccd85a91fd60cf360fee5381ddaeda06e90270421a6a6293a1254f022100d02a7fdfa856bb2c5a2dab958765671deb3a9265db0fe8d53cc222be06ee60b2 30440220060ddf20f8d112d5fc19b14ca834c74cc8e6e4da71d1091eb6b3c5481a2622c002205ad2d202695a19883d8a1a6263d91e197c544cd3a6773c9bb43d8d6f79875646 30460221009eabff16c467ab045161a6a39ea9b2858014da43573305fc8f59ed2818f275f3022100ac962da77b52bbf7d8fd5ca608701e668a0badda7eb37d72e359bfef4e03fd75 3046022100a0cbfe5f82e7e3fb6d1d1eac041a8ecf9fdcd454fde3d0ca7838ce6fe899a1730221008cbb55751864b408aa425df669984982ce388b6077a2b3d592be1ee9ef541933 30460221008a5441d7dd58d7403d2e71fe257d04ebd05a6d8f17e7d9d57f47d8afc8069dcf022100ec13023b252d4409fcf3dd2202dae232abc9d48392e90570eded30482bb46a46 3045022100e0717e8ec4dd7a8cd130a41872d88fcd534634b4ddea6918ca3e5248837bbf23022041fce2f1deb127ffc4089eed2ce15707de3c93afd15940767f8e867d041cf7e0 3044022041aa8a76eea7e1fd8862461096faaf8417be6af4ba13c3c1473deaeac3d74a5f022065af591aa13a2b04eb4a9a8ce4d6dabd9d7570dc854aa363d4183f6dc6489bb2 304502202c339b363e25d9521acceb741bb76e2b1640539f093c79bef62b07ca9a5c24f8022100976eeee33d4dd72814090c783f417ed0818121de1722ca939931e7dd1f89891b 3046022100be0ab22c4855dd03b343e6eeb3641243a6f63ef1e8f4f4137c4a627b3db82835022100c3b7ea022ec25e0d7445790afec7da736580606532265bb8b0807aa2756bac8c 304502204e83f846f40d73cb6f6145214e949335207f76c8358c9928beeb61dc1320d30c022100d3ba920ba3828fef34f6a5dcb8173806ce61ca8483947ce4b46caf4a6265a6d6 30450221009b53cd107afc498042b581466802c6e1ba61f6e29561e1f607d06b08926859bf0220351443c75b2ec5242a735d9699bca9e7c669a025773e9f1e0388f9e0cbad1f71 3046022100f53df602bb566baa0d2c1dc674b401ebc4c89144dab79383e5b9cf794b9946a702210090967bea9b62008d5b5eb6f48fbd150d5de7e035575d50e0a78c94e6b46dbd23 3045022100c91545afbe7061c55f23b483638c268aa8ed27a4eb2b81a11084e1df45d2986d02204a4f3deda3fe46acd31e64790b9241ac567545c72c9ded2b40ff7b899bb2a879 3045022100a7b2400b64cfb0c510b6a36178b0bcd13e9c43648c383082319ac4f590dda7e90220681125cd58671258b773ead9631b66b28e6c313f3d69b1661ed07beea033eb43 30450221008c789f4b50e23af19dad6cfdf8243adfe19daefb2e148617e6a09d351ccd3ff002205af4719d121e67a8837a488d362a272cf358f5d2a18935a420e8aa99abc114be 3045022023c9407f38067615d66baf9261168ac9bbeb88fd722145546f584ddc2682a404022100d78169e99e940b6f9fb7e6eb9379bc266c5f4247e8f79e71cab5727abd6838ea 3045022100daab402287c40b43b204f1c0db01a019a6414fc6efa26c4a2ae382371385e6a30220782957de47ecfb6fa3667d0fe61a37221bcc2f84679248d6f3e3ff9f51cda55f 3046022100897c796706e66f9e64175064291e75f4eaea9e427986e78a58f99d889d7cfae9022100e78f443bd6491e9150e9841b9a22e6fff5731444e55844a381afc30e5e725845 304402203775dcec6b5be872fe509327484be09ab6187caa5c6d0a64bd11e045daf32ed9022038fc2f1548c5e22de41f72f2c4583a942ab0dca648175a30d1331760f45e9002 30440220305ce007190d1f79ca85bac5059806a7187737d8a51fee900166fa54702383b70220688cdd6a24083c026947fc7a3ae4a21f38f07de21091fcf600352953fd18292b 3046022100ac51d3b11c689e889227f6e0fc9087405dcb4350526787dbb3e62bd52c8ea5f4022100d514e898295b3f42d640056bad39195b2f117458609dc46f0f3565248fa3869b 304502207daab76e3844975d5826e63ff6f44f848f73029411f5e3bfbcd010463eba8baf022100f75f13092d0d15aec9cb1fcfc5efb44474b64dbac0500aeefc0296b4e8d8fd3c 3045022100d6f2f5e2bb8100f03d60cef2c445b546bce68068da86a836cb2949fa65f033ea022022dcb7541dcdaaee2eb4f445ad53918f084d22ceb4518001c4cc67f4b43c7b98 3046022100eb44b1d5a5137372add028d8bf7d72c47b8500b8627b3510469e95c8583824d70221008f7756b83c9871ff8c7f11fdab59a2b122bb38679bded558216e3ac109e60c09 3044022021840c8fe8b0cdabc227186ef978b21628ecd5cdf1569bfbdf646cfcdc751d4b02203be28d3034759ddc9ed53197e6fb22aa31fe60290f6c91b5e127cae668634dc0 3045022100a5d6e1a0e713134f5739f7f194b97dde296dcd43f62fc1346393f5ae4f9e9d7802204de713ef98d06455770efc0e8194d849fabeaa5dbd8f50db354ff14d15fb9e35 3045022040fca32c2d1094bdb8d9a298656f57eba6ee5d5dfce9b0b0c4a29f86ffb70e2b02210080b746437a3d27c38f8c83e923d088ad7c9331fff1bf256f83c2cee3df7d3ddc 3046022100a7fc917bdf723b86874467eeaebbca58bbb5be49aaed3d2de7a14157e2d75cab022100b8db67650f5aa5911cea9007d6e2aedf104b247f2fd1a7bddf144c84895650d5 3046022100bf49798a668ff2aa9c517b7a61a2c7893d3839cdca25f661473fea9b72622d3e022100cc3608e353398ba0db21262f47eb1f2977745e12d32452b2ba977e1dfaa0776c 3045022100d940a92a55754d541a7ff82fb200179a08247af26f6aba34fd2641df8405579102200648dbf046f8a6a57ab08c34f222611f11464961b7e7b9950573bb6370786508 304402203cc51222a8c61bed5c347c3e4898d093e2572f72f3aae6b464435c6e460dd50902205dc9f812d81c1d2a7d846497aa2ffe08ed251fe9cd3a3bb0ff89518916d1fe35 304402205d2c69a830cc01f0518f9892c37d68e3ad3feab95137751d2339b8ce5c7f3c5202205cb30fe5be7bfdfe86eea1d4043ae149fe04998b60bbc3eb5072b50abac86909 304402200731a0fbf99a9896b851207ab14a0c9e0e042b154b74f4e7edb1cd75b03bddd802205fcdecbd4649b34f53843d1ca2128f06a42e1f75a05f391144927d3a5a16b1e2 3045022070ddfd6dfb1978d84fe3ca60cc79d71db2846ee6b1f180ea3227c68719fd6f29022100af0e6db271b330948b054adad6519b522589319ff2d757cf136f53e5a10b48a5 3044022036df233466c1083215ebb8bcafec2a0a78adec800a10d9113a590188d0c4830102206024400a38fcebb3989fa58ee48c9e368871775f2b0bc19cee43514f18f93954 3046022100953257341d51d7a4c259b749acbb0011669f1d7014ed97b0100e3515a397e6e9022100a70bd29e3bd29b7573517e33df6021d9436a0be29fc6660b15e4a8f8690aca62 304402206032e95306d9011290b88b7d6d281654fa8887d271e71edea8a7a9c46f29793902203400eecfc4c549792d1bf545dfd5b17ffad7ad486d519a47bf018f96fa888972 304502207ba7858da51824de9f9e54dd1e92d872cb3b443479452055b66e79290da6490b022100d982c1a3620317da74e8d09e87f6961008095533c140fd4ef2ccdda6a4de6e1c 304502202b2d962ec4ca6e8f20534bf8606273c5f887e424fa2b84cb691d462d4754f638022100cad33f4d13b334232b4961149f98cd08422fb4eda5c2e97dc97d7e2521fe75b4 3045022040d24df8ae7b75b39906fd02148c70f371a748a4283c574c9fc1fe079d238d24022100fd1950c2206054f6b83ff33936a1c1b9d0bf902e5ce0e5e1df1d777953d03cf0 304402207bab5aac690aa4cbd8ac94045b0ae51aaa881d0bd4af48e4c4260aa5ac27678602206753710a624e651c06f8c9db457d1e6511f1f715bfcacceb7ed4ba0a408cb584 3045022100eeb5d16604be01f671ce9a804fcc810a87bee402da70751d2a226a3df4128ba602207f76bb97555f9c2f21617f0f184431b27ce73a12e7255d1300f280414b572551 3044022100fdac3587b12fa9f35001c070d8160e50fc7adb671174a9a5391c7c4e75d5ab61021f649ba490c7a9781ba36ad0145e06b933f169a37a5968f11e3f97f8ae7d58cf 3045022100f81ce6d115c9a64ca266338a466a7aac5191ee9e4bcd6ed46f0d99147aa0920b0220100ac444ecc4578a71ccaaa7e88b83912491269f154b0f2022d01a3b4fa5e689 30440220208d1537ac805e53b5245ed9c74ece530a8fd7a51ddde1103ff8d7b11e3616890220418d0670e4db65df414c6b20ea2665bede16edc7c22c6e6290d017190ed16f67 3046022100da2f5630172cbc6f454e79cbdb47f26f7bfb4bdb96674f99c8c86435de7e0f1f022100c6b74cf6562c1e99034379dd50841c0f3d4181454af6aa42f2e7b900b4135fe3 304502200a488a7b624848c9201ffdf04db3cf5601aef1ec92e269694058e0f098d85e09022100c3e4e67d96e72fc67231f68d9f2f582cbfc66591a0c2d72c7c855f6dc4a87461 3045022100862461169bf229c74d5c07e35189e95290ba33b15784cad29051f306219f326e02207de332185cabeebb09039e2c07aaed0675c375994ad71143f622ee73946eeef9 3044022077996754a58223364ba1c61d4d2bebada56f314f02fc8677609f22ce53b6cc1c022022b3d436cccaaed2ed660437f0f31ad081361610b7807317401da705af7d953d 30460221008a232533527fdd7f03e59aca96110f64291a3e557b19cdf7ca9dec7cba58fe4b022100acd3042a47f3811df5c867cad017a7c49d684f94c3c0b60fe6a1739caa09ce8e 3046022100b6adad18a6979f96cd8249459b49d52cf0929b4a695f06fa9a9a108d97edc70c022100e90057268f6a5a185679f928598f2380ac42dacd4c691895cee65f5d4e1d6e25 3044022022130a14b6e113ac2672a4a97b18b4b76e3a5fda7235ad228fb8cda984ba23540220676a151c5d0fdae9d0e4a53770db7228683a35d5d548d1c544e38b6ab8517b5c 30450220401e09a37f03e8008ba957820f0325e56907a171d680451153500121a6637945022100bdee40d3e7c7d303144a82628a6e16dbe04fd842ad376df1afcb73e6eee3caf0 304502202016752d0f9757874899b79f6f91c4a067609307bb67be80dd8051c47ba1cce6022100fa7f790a98661f8a9ad29539612c901ce9f86640e3e34fb50e4ff744f3de66dd 3045022100a2a1313113a1ac3eacecf8d2d6143b94d4e5adefe9574f1d010dba930929e1f3022052fd42e3cfcb10cd9d7a95d563f18861095dd2197a008df410896a5cc003dcb2 3045022100f4759a13c485c97ab19d11647fb38d379cfc901c37ff9c7e26053eec5a4934a2022060fdf7da48d5ddde54a8b73c7569ef311b6711caa7b75b3f12ea16377482d42e 3045022100c71737e9e0e8ce62afda3bf63077fbc6372d194e5cbc4c6715bc2331f615142c02207c058c0bc27f76659e9058dbd890a0d4c709ec2cf110b056a60f5f367d4d11e4 304502204c140d3792ec376088090e1be1ea3f6376da30f434630634d99da369cdce0a10022100cd30ce74a5b0a9cb5664239aab78d1d1ba2b8267ad204aeb93d2c4a172a823a5 3046022100b3f0a8635b69248597050a490fbb29e1c36b245c96d73c52f10c17b9a631daff022100fd3d28914a9f417e10e33d032f71cffc6a01718d01ea52d28578b0eb2f89dce4 304402201fcb3295a3d5f500ec454cc4072fa64315f5da52442296e16c202cf3c6efe2e70220659a9ed2a7c9a5333a91b4888c0f21151fc1d21ee14837ef7a08690a0a065eed 30460221008d2f6275e86918b7a641f7be0c995bcc5b5536159767b5d23724d780c1f2a9c0022100a3e3caecbcd4c9489b65e3a3b80fc0706403c07751ab2fa1424d82d1b298f497 3045022100c0ac9f3a898a6d513adb7ff344209aecf92fd9b9905c5072029930db7b53d91002203b5709a2df8967adb8ec1ac1ce49a680537df605e1ade24cfb64e02296654d34 3045022100fab45b5c07876900816bfb9d1914b7fc1cd63d3f658d69be13ed12f54e8dab4a02205190ebca9fbe89a05b1c7c577112897858d0b50a582f56d921eacfd89185be8f 3046022100c112bfacca67abe9b7c3662550cfd8453a730ba65371bbfaf4f6b7f554ecec2a022100c71c5c3ec9b079728aaa120debbdaccaf75f46c1399e3becf863605338bb5e13 30460221009981e34da8f974e349292c614278fd1455148032ba789c16bdf03a2e8aab236102210098deb64a1d75ba27a3a8e5a8e6828d9f77490f7348ec31bb69fc26c7b7f2e441 30450221008921fdd8e30ad0e66aa3b2dabe3c20ba13850ac7020d0807895a6f3e060cebe7022056039877e23c7ce4c1d749a263c9d51db403ead56c0326f609a3b937b9638228 3045022100b5f80c17726e4fabe0e27fa38e553912cb7ca324f0885f6eec54365b9caefbf1022009828624201157b4397d3d8abc7511cd1d562c5147b8779f7daec1931ae54300 3045022100dd3312587bc6c61985579f73d594425ae06bb52d9f2dd51546eed551726be5f402200dcbc856e2fdcd135b1fdbf96f0ba2ceef4c13ec836acbe9c07d8f0855f22602 304502206884043497433ab8f82ce5e38ae71914345328ebb7b0a7d9ec5d42850c8c28ff022100b37e7ab2b2f6b633ed077d5ecdaeb606473e425f21ac0e378b129a3e2dff21b1 304502202491970cea91d79ba49e1f6c9c9b26b96d70f4076b4c344bd34e06719ba5032f022100f748422022fe57fdfc5431749fa9f5b322e04841e3ef0b9e385b4c5b2ec1cbe6 3045022100a699596cef95b51a35ec7b23326aeda925f6fbd9470bd503ec620f28414d237402202fe76000b48a109240d7541001632d62786f6e8e70a5204c1c997297c31bde94 3046022100d5e6016b96353f6ea018480464b7ce59a66dec7d5a6de931946fcdff40a865ae022100a379fedded73904db27a7378870e08ac41a34bd142866687ad09840019fe2eb2 3046022100a8d5429ddf4db86142b909ab4dde7f76347f8d3e61f64fc39f6ed746b000e5c8022100dee95182749cf4cfd8e681c9322ddb6881fa4cac09818e14c0f919ec5f745485 30450220492d33b2a5ecf1f36614acc6c01cbfee0b06de11963e6c878e0104135a770b27022100be34e8cdb666d202c2c0eccb0f08efc2fcd86cc966fdd28eec4a53a6e459fdc7 30450220681c2e6f7d31fc40e0d8d5ff5dd0a1bf466f27935be4333e001e39f766b2af18022100bdcf0bec13e0f8b89e7b3245dfd2b6ad1772e81649a7d0d7c0568e9aa15401d6 304402200fc72db359624219c3dd1c26284bb8a5d2585f1f479215561ef4f8d4ec35388602206f3be02048b12cbe8667e2871ac53a3ed9b4a170ae95b6d67c49b88c9fe31a17 30450220623c537f65a9e78d0500af3bf04cfa27a9090902a6967ba0454b95444da27875022100b51975d97ab325652de145c56df5ff2ef1aad7a23a5b0970046df80abc853b9f 3045022010fa7d26cf769144c2337810da8432f872c34602bf447f8fd38bee99cd5df8380221009a7cc9c5cf9ca87468b95410e33e3ea9184277d11006ccb8f1b432a0e8631739 3044022033273a733e624cc048ee19b79a54e51b0db1971d46ee71ed788c9f936a2e439d02204a772d4a9e4e1eff8905aa5125997278fac6d93b42a6832c9ecf3435a9de078b 304602210084fb79107925eeea0d6e9883e3acf87b2a0b553d5439c3fff9b87da099d70359022100b6afeb5d80da1a75ab7e0235210050971db8734009a3ee737372aedd3e686799 3046022100e9a3d01e960ac3f835833583c2cfa7d61b6c022eb98be0113179bd2583c817da022100e7be107db7f624fd44dc53edb30b58d4c0c75af5d8d5bf2e4c37334c7db49325 3045022100c4f40bb8fb953ea7fe423e1a305d074bfdc7a1935aee7e5340c23688715eabbe02200a1648bfc92193ca5060b5e83f70c45fb85bf11e2b443e432d7dda196b5d81f9 304402204dbbc923a7bc8987dc67c4e24b84329f14342decb5e7402a87a81dcf3efefb70022046e3f264269e87ffe389461de83256ad682b39c06974e6bf128f59d16a428123 3046022100b52eaff7232b58539b2f3b8be8e738edb8aef6eb91d328b862f5dd16e73c4f150221009ce524d1cc2213dbc2773a06eeb62ca6cfab626efae92c80646c4b57e13337ef 3046022100f1a903582aac925d8b6ce920d4f0813c0bcae1d2effbb22b7447e349ac3e69a6022100ef5571f77f3ec1fb8e2b47e54f6a16fb27a6bad048381a990cea2bf9e3223d75 304402205c458c1303df219098ac6147a1f4c8da5c2e0f8c9e3a62f370839eeb1e66fc1002200956f5638ed3fbc57cbaac147dd83262c213a8447b01a52458fe95aecfc26254 304402200b8c9963bea398945f06a5c4559fc3b9b825843f534ae276a5e76f08714bda890220233d5d186dd9ccc855ccce8cb509add392c70f1b2b41f663c25e7bffa0b697c6 304502202d07546f396eef5be03c21637a5dc68ba63c5ac864f50ff0b6b025a4c3afb056022100e14026f4b69cf9e8569a118765f915860a946e2de3c0815c4b35aff45e7337fc 3046022100ec969ad7478f2d99bbeac5614086b7de5ba90b8713363b5f268c502568ecc0d5022100ea978439eabea1e14a35de4538cea15e91676afb67515f69805d546c0bb8c0a4 3046022100ce582b916a9c6d01c7dfa9ac1eb1712e432c5366f566437bff2f713dea276164022100ae29df1ae0b6581e7d70a195dbeca42e9c9ae4eec2f3560c10ef49d9dc1fece4 304402203b4b1dcf420121ee02b07d55b8c656cb2d0ba5c71eead0256bd02b9d211cd0b502207a3d7b2f880bfb905190b2c51b0b1cf05151d09963468c5b8e7b6bbfc9de6a4f 3045022100830c34b7cf7aa1eb444126d35ae06b76e04eb2de4dadfffe8d4b5d63f8775bb2022010fa0c744c4a5debaa1cdd461e78cb32f68a2f4b8aadd1bd38b8817ca24f41e1 3046022100d09b2028a463cbb77188f626d5f8d1c6334ca5e4f860b270d7ea03eb77f3580a02210085dd2159ba8e29a098f14d3beb219c86f1bcc0a4c41cb477e361efe60292ffdc 3045022100899eba119cff079495a5898888aed44d7e230f4df40146ca7f14b8e14399f21302207b637ea25f39c1356adfdea334106b871fe1481edf7ef9d54e6061c894d12526 3045022100d7e038d5b0afe0f9d0a133e8f2a947e45a47f58b65483461790d79dc47df3385022014645a0083a8ceff773f037d64af9e7e09fbfcd968b27780d5172e3d66b0cc25 30440220090e9e7d8e4348fad06a85668a0b013614f957fc110ab5632e0706607810e5b902206d3c15e59f3bfc130f400d9bc5535533fa5404315275fe328b3a812fe4e8789a 304602210090fb31250c29c6192fe33d274c809b04ca458398f940acb31129ab94e685962f022100c92f5d091b18e1a823646448fc088a6b2b7245372e5259a6ff615c5b36f22f5a 304502206eaa2433d8b06cf5a323be0b093a7e3991ce00698ae7f1c3ed9ac83629854196022100de32263bd9acafa94b19664081b19d62ece6acdf6dd0dc14fa525b45049b7de7 3045022100ede342166005f900267a8ea348c1b2b224b44f8e31e3f5d8422bfc79a904748c022068df33386d6a2697554219685327564d457d047403437d9deef2949469882459 3045022100dbfeaf9733cae0124c253fbf0576d56ba8f3915fa3d3000c5114419a261f0a3e02206ece66d2e98edfe23993bfd5a9c0977b6eb92e008a501c2a83e32660cf1bde03 3045022100bb3baf3bf24c3063e3d1caf4f473278ae4cb8c27e2695255f8991af8cbdd5b5c02206f815d8d7a8d57bb4b1067c25402d92b6e52e59ea4d38142472484ea742618ef 3044022002a1059cae6c35a4035aaf487156858a01f5c69889a7268e689283ded374261b0220185155d2a3742fe0d4e677c03bc785ec3872e79cbd11df55844a172c189d0ea0 304402204f507343cf095b42ed5f7ca4528e0aa75441c9c6bde27f5db0acb423f9c92dcf022018459da209fb59737681939a11e5f673023ca1ae2d81740199af1ce242ca2391 3044022019258c007f4e78a55ca0930ff3bbb85b87b9b5e092cd1ddae25849a980396366022034d84c2329febb8b07e09eb6b54d86316df18164583a990ac0f7bfbb839e34d3 3046022100d4aa9e810c360fe4ece6b9a450cfa89bcb675090c4b84c538d35bf54afc64169022100ac94952eed4ae49d13500af056d3c02198184dd15a01661150300b2cc1b8dda2 3045022100e0a89bcbfe5c9f22f1d55726158b2c277dcbe39ee5424d34e959e4720cf8f9ff02204db7bb000408b7487d3f0e2ea4d46bb9b95e19fb919d23eb9c6c17c80cab7122 304402201cdceca23da51efa8ac2143883603e9242dcd3f4c3afb79d3e035c7249d0a7d402207efd87f048643a5a40f3c62d4ef25aaa16ab618e0125992e59fd32b1575de024 304602210082538c5c6cbc75360c0d0762ace5cbf761995d38cb763d68392e8a3e48b8a03d022100f83de85c4a28bf036f6421766e09f696074c62d0ca7cbf5abc4d57d0a27a57ef 304402200459588d1095d9c9c11eaf4f5bcd5921ce5237ac1012af6e4d3291cc92d04b2a022076208b0ae902a7735b4fa0500ef9ed5dabda0090d01e7216eb0e97d170c7b3b4 304502210096b93a496fb2face4dc598b7564992341f70b6c886003c2599940d0d03e46620022062ca6bd725cf80f6a820698b05cc934f496bfbd2a4593a2c787ad1addceda3c1 304402206d31036459ff8ade58580fdd23873c0997eebbb8d129a59b00e0100ee17e8a8802204aae80c8cb7f166e653a4d33a5670670051869e3904d93186b53dece7b4e84f1 304402203a95ccb21e8ccf770816619fc4dafccf5dc6eef67f0f3db2e02563a4837e29cc02201068913b08496cf54d26a3c0e8f651db66f1a556f69bf78cdfb7e171d807f653 30440220236066fd80d8101647b42d0e9a1ce8cd3bcdb0dfcc478a36279b5c072b84d1290220533166ac8e4a2ea0f15dd35506ac02705b1fdb7988bef80f1df603d2c01ec634 3045022100f817291af0a184073e2acecaf700d2bee40786a1da4e2656abda39eee9ce9252022043cd93e2d6b8f47b084476668363d833b4d80b1df3773c2ff11625431656870d 3045022100c2133b3db88f1af447a69584bcdc9269a4e1ee19aef1b55c75768c9a3920320902206de15d176215e5dfc1c266d9b2a36e231704bea2f6b874fce0d8eb8349128044 3045022013c71cb7355ac38e8f2252f727098784744b87ffc8cb26e38d8ab6934f562dca022100e7de313515c51c80746aa36052e3f3580eef6ec56a61cf16e2e4fa02ec279ea2 30450220058816ef6299a0a7ab3e24d2b6429eb52dad7502dcc86e15fd3b6724811b0d7b022100c74ac11216473cd73a896d3ab0eb5ac341190125fc5352f6e336a65a2135310e 3045022100ed84c8b5696fdfe116b47b9a6f921f955b27746d7eec1e1a198bd183b0d17ec3022010a0269873bc09825715c57e0659e3472931caa9322289fcd128589c69d24c5d 3046022100de2ab469929b6350f4ff0f08159e26759976447e3255b7a5a778bbc2fdff6cfc022100cc604ba7e9dbdba21927e0d8b26f56b2a6f2cae682b316b342e04e2aeda4620c 304502210083604136823b6bb78d13edc6ea11ad438736aae012281561771c17e9101871b602205169100dd915430c1b1d779e16928b060b3890e2c157a16e295269e27548231e 304502206f989eefceb78aa642c9ac15c195edbaa6520f4c3c3bf7417b4d67269ee03c90022100cb8ce0b67d1682fe9e347916dc0bfe07afa1e3a0c8fff0205526602a5c41f892 304502204de29c334372366bd47d573475f57a03dadcb6544e6a1da49dfc0ab0b430da35022100d1e28e2cc84024736cf5169b66d5c72af487ac2e111b30f7e1f34b2d87cfac9b 3046022100d102156f053c56753504ecdb78b8bf6f94a4b0f015912c7f3ee5838bff0b2ba0022100d6e1301588b57699daf1a7552b772b70e1409533304bd996de85bb23ee77fbae 3045022100837ec23e502396e81a08cac5719d9539a2cb1a4030c585b3d80e031da123dd2102204719d9d4830c1dcc7ab57d3b6f06381eecc480c04f5c99d7591e285355c895c0 3045022100c97e5eee0356adf062690d4b9dda7e4f485f8a4c6894a7062f9565f443d1207502202969567437ebe3f7dde69b2f8ce025459fd642f4a07aa314f5875775ec3c29c6 304402205ba7ef36eafab7bc51b5ecfc3dc2ecfc255aeea85a6bc54e1fbe6b04afdd30c802201afaa16a6601bcd22dac49551f3cc068e0432f4471ce732c23168aa1c4a273d7 3045022100e95eddc883c17923f9f2d69a5b282f3b5c3a31de43f1df55c50516faabd67842022030cdb1a6e300e7ebf07394d806d21aed060c9361f2986d10ca2c112eda06a3cb 304502202db213f3d5a692d8747553a737fcd4fdccec0b852a0d3d8cf3e77d2ff9af279f022100af47172e91891a34c87ff33c3231954eedc965e38c7ad9413974605f64706d73 304502202cc9e0c5538fd6539673691ec9d3e531efdfc6507b7df7277436a3ccf9ff6efa022100d2ebc8d29b98b5b9572b32fcdcb1e90f8f63e31558b0788a2b793c7efe2e096c 304502202a3ef4939c929cdbcada373df09f5863f0726943d6e7f56032d14c64bee36f73022100e81acb00f5b5f27cf0a9214e00c47f81c0cba5be3fd6d475c5024520de81a8e7 304502201df3446cd4e2fdab2b72593bc728edafe498a6f9032d29be0e85c8da9489b6c0022100a9cba6adec50645f57b6233449c462ec907405727eb01f854aa7d291a83e78c1 3046022100c701ca60e2bb82ad327da5f4e84a3648ac2270110b878e436276ff49cb54453b022100b645e515e04b43d79e254a053758968197ccc352a998f830faea6cbc54b08b46 3045022100b43fab17cd27137acda7dfa0a420e0ebd7d8ec08e23dc0fbdc993785dcde84d20220345fe3593c20a575e6fcd2797bd8913f8d68f031857f431eb38fc772a92a4d97 304502202f9d5b9e21e932617ce892500ca25d81624cc654a640e33887be179140fd8bfa022100a813b7645224e6763fd6e1d211e39a4b3caf26db58bd8bb621984e7cf81f99a1 30450220726c2bf02482c082b1bfd0003720f589962d3bc70b6a508c0fe3c9c374a9ecec022100e3826221ae6447c48716b052af9b7a25b0e3a8d99a7b5d1283a6c8b39a885d07 3045022100ad6b3dd00ca4089e57b2665673ef05c771866f267412b6cdc6592f77f9de653402202c72d41b995bb6fbb79f954546e6c7fb47595198f6a5e2416d5dd5656a5046ac 3046022100ec080a16953277f4107f8be28d6d319016413bbc134d4136bca122cb5a1127ed022100c6bddf62f99dfa770fbcb8bf71080502966d5510976c0a9d1eb3ec3a71be8497 304502202442f19091172681df36e64df9cc6a334e21b55379b1e240c40ee22cbf52ccd5022100c8777d904083a429eddd9d6aa2d1d3797f5d3d984df98767a72a2e651f709bbc 304402205527d551cc535afced4f15499645849ab8210b3cd8d8a9b76e5f48e00281c85e02205db1f4ae41c03c95678663308a1db791286f972d7b99861e59189ca04733bddf 3046022100f2b7b91d1c7932716197505bd93c778259d3eaaa5e301a83d57e14cdc12d2008022100edd4093ced33f05152ae1a2cd7597dcf4aea767e4ea06e87ff3f2c73873fe7e9 3046022100bfe4fcd1065c24f6c227f8be4024b8c3a85590ad0e6e3cda12fb10ffe3f57818022100b0002f223c9397d60149fe7b6509e1f12752b3077a75a88283fd5ad0d1b777a3 3045022100d2d95ca4e911aeeecaf3805081c8d89df87f9a8c3a2a603c0415e160faf168c70220396f09779e01f16078c49e6487fcb20093705ee8af19c8ff30dddea526dcdd88 3045022100df4a758edc4068ae8d2aa6aa27ef80389641060a2a2adaf694847882db888b66022057cfe8f700eebf3e638da4d289d71a1a1b098729b559459fb43991e88d167e2a 3046022100a4bee6a4f2a1dced6682bd51e3c2794a2d8dd74364e8641b74117b10194f5812022100dacb7e4854bebf652643250f7a63e9289fcdb3255a433f1b2cff74394afcba17 3045022100aa03ef1f436a4e01bbc7f6fda6113c74cfe25e87243889fc61b59c66bd4376a50220279d555c3b6407b9a7fd02935af35da4de9657ff5b963ea427220fe6133802a0 304502202aa55af17fb685f52e7962cc09546f44b37fb1f89af242878e52d80b581e9a02022100ae1333343c3431803482de217bb28c8aee8f90e85a7bb9f03a4c5c2833cb251d 3045022100b86eec55f9852b1166551ec1c45a7cfc8b7b564e22d2c0e79755be13d87758c9022016e3f33d2de66aca9162cff6d039ba4836806cbcd9f967bbf326666de967ae98 3046022100e66ab2057bc72dd1a32332214b3d3484da7547161ab487dea12a657dd51a91dd022100be1150f03e70bac9622940dbb19af50749edf77f32c2302bed4a25e2e5b775b0 3045022100c139dfd23eac079890e596155f476ec4a148f09fd8448e465278fb69f050210e022077b06e5463937f7cf53e668947f703b05da3617644a8ee8b7548538c6346f102 3044022047e2dc0af20c664b3659c231ba8d696373fa467c69c9d15216523a83dc9bbc170220320b5025c90f0fc755bf1044d9d2772c199342158db34622ccde333447b4657d 3046022100a5cb4d7ce789d50ecddd26cbf7b0b2033554368b8c8b2f1b1bbb23b063a5d808022100ec494fe8dd82dafe22dbb9a85f064c412322238b2a8996e833ce4c9b6ccbd7cc 304502203a73bcbce27e0f63e9cdd21726852a49d45c3d844716ba9718ce6aeb7b91634b0221009e4b36cb776e2acedf6ba6a3a6c0e8c8cfde1bc14d6963b7cb288ba3c2651543 3045022100b705635b67f8d900c1f5bc3dbaa9f146a3c53484c4efa5d685260e4efa66cf87022000f6d22a6e9e26bacffea28417062a79b3bd92139174e984c12fdcb6edb82c40 30450221008535f25e6595e8393696ba453fb58ed413344aac7c7c75fe2d55171396a4194c02202190810d82eb71be187243a90b472e1a0d2c76c72d3a6e20c195913483056eaa 30450220514bb56133a618c31cffd54e2f72ad87eda37ebd12a736cc0ad0fe874d9543fb0221009b5f9d6ea57eb24a440ad8d46a26c3469ebb239b13f9b847883626f304639e5b 304602210097741b9b78c46e9a48e252a29136f343eeb8ba6a7bf5d4d3377259a6073efad8022100ddc8b56b9807dc0e430360782f49f93bfb04d03510d32936a633b7bcee2db8d6 3045022066c930e8f20c0dea5d1f0490d5056ba1fdeeca6eadc9aaaf0c84a872cc013784022100c01578140df5b5241dd7822d978b3b10786c7408727caa7df15c304060a2129d 3046022100e0387411eed8c31206eaf338ba69a928078c3d1a4d8b55d5a141f9aac35252fc022100f8108b1cf081e6d485ec0e79b54bf2f0d3776ed5649beb1ed1eab64cb4b78b43 3045022100aec410c5ad489f0ba731c635bb04661d23d0da0d0d92973f83eb3616ad20658e02205f50e0250276a8193d9e47143b4abe8a444e8c7a0167a21076a9384295facb70 30440220243188167021e9411aedd24b2399d5d1d0fdb476692cc51923ff54a60c5a678d0220687352cf2a728106e953dd0ae27fcebaa5948d9bbaed7a24909f66efe5e1d9ba 3046022100d456e6f21058d64f82ab8c9cc7e21855f0ccfa26a5a7fc5c4f367ebc29685cf502210094acc1d7d59d5c8210cda0eb0a15b8ecaf720fc22d3ada00731068b6a2ecc5eb 3044022042a7784ee406525bd29bf090a6567ef93f629ed223f00fe57683729d3c8c0142022030d8cd13a38619e90754d282b54a45de13a3b310b63c5ed204d14b1c645c5b27 30440220104c5b078afb00384b322e2977cc32bf05a06173191adf341559467f9ad975c3022017eed7dd98865c792eb0e431cdbbed309b91fcd85063f7101beab298478c920a 304402201738913dccd8b370da1b74fcc1eda745d9ccd7ddaa071b294a78da81f65f6ccd022004901cbd6bf1ecc4c5d6bafc02a4eb1b719da67e4c31def6a4696f3b41f5c3df 30460221008671054af4ecc9da274bddd87e930ac61cff1b6f985fbcf79f47fb345df4a612022100cccfaefeec190ee0c97c74d66b5ddbd6eacf38685d058f3ee7b4ff038c9d62e8 304402201f4ef82a00ea3362a5211ec1cdb75413d0753a9f652861caec2170522a5b6a8d02202fa92a8efed7d35f088800d85bfda46646397b94b7f3667a2c7f52773d1fb73a 3044022031f45cb348976453093f419562175143fdecaf81c24ede2b16dc734ce429d8860220250ebbee8e68450e657b993a0d5c6256ef06de68e784b87e6a4c90e8d5dc8270 3046022100f95e800b58525ad5160e6302bb1168a90555667392e0b0582bca2ffddab822a6022100c54a848a6a02057b68d268c88477c2e4bab928ae51b2e4c5d5eb742046f45400 3046022100ea37052d841bb0d18ba465b8ede9c99cea0de892829b07eca642e1186ec502b4022100aa08d34a01d1dbc9b91eb3c5ae25d817be777ba59e874ce69620ad7bdb4249cb 30440220581771d7b376574826e6fdb8150a83206f79350982fcd311cea04e8fd25dbc2b022026b3eed4f2a0c633ece512fd449552623cf0ea45d959495e615b8de68206545f 3044022050af280778a53d9395a0118d4c92eb5be2955d308d1dcdf53d764b54161ba2b90220539571272b3793bc4a51ce62112833b746e27698dca4f2b3f03317645f692297 304402203fe30df78817154f67e17a19ea847461793b6629e37584eddbfe09a2448a57640220014ddf484ba1a8a82f19d3d203cae0ce578f725c60df2b7fff909d3e76ab001e 3046022100c60cbca46c9fe691e6a3aa8e26bc642698fe258b7dbba10f6e3986115ee9ce85022100afe94f9438ced4326f6e16ab74bbf7479df375a535b51362de077a1c395df984 3044022077c4a21f7fe8d9284e0f3eaf71a659949007cbf1018d3a1301341c3004fb580802200b92c22aa85e38015789580f75b0bf4cb6df75a07bf43fd4a1e1406a35ab7b12 3046022100ab3fa530a5041734c4dd8bc03a49bded1e8e22937007744740a9bb37c46b9d6c022100aaceaf86b1ec49065931502a68d777869cf9084a4b92ec1507ebe81c45422471 3044022049a640e0f06a0e6e961f90cfd628f97f139d5a3975948c1c67885b62d812a6b50220465eff84252727498c1e8dea65899f486b165cbf4d03e3678d846a0e8c6ce632 3046022100d40dfd95a125a3f3f60c5e486f00a47308156fed0cb8557e534fc4e0225fd493022100c7f3afbb17c5f96605dea04b1ed52325d6826010a3acd1bbded7ef088d5befca 3045022100cca798e96c245382963ccdbc85e54613b7fcfa84ef2e7dc0dd4094a31b5d68eb02200711c042e6ab1ff98f2d9a587cb230ae62d45af00ef12895574b3105cfbdb361 304402200156e5cdd45a062229a6d56e16df013a879ccde748a54781d83da7470954ddf302203820ab4c51c4986895a326f1973e6d19a78635f1a22c6b565b8fb1136e4d02ab 30440220753a4a1125448bbcdaad7b86ac61c704690ff37efb924138a9046d07e188768802207add880b67050dd76958f1d929de42b800331c47a27204cfcb9f99200003c463 3045022100eff1cf85c2455fbcea90d7eb2271decaaeff1c0d9bd965079bb8ad0e1678f631022009087b19022a23321c8bc2d131908991912779718400567fdfc2730fcc683b37 304502205864d530d2be0015385cdb1d4867b6837d54d64145d3b4b151df05db5b99ca21022100ed9f0a9d6eee2cdad52db33e8df44a4b6b2435c4a819bc196effe1f0d36c3de2 3044022000cffe16cda2405599e51db056d9154232374a39ef17af09608e74edfaebb9a0022061330d94c9e2831beee9473a3c0c0d57924bb51403534fcba75847cda6f7e153 304402203d4e6aa591e1a5975bc50594ed7fae126e0eea5255faf8908ef83a377ae630d802204fb720097267a1266314c384d30ef34d8a8ddb3facecc6ac5cf167a5626af538 3046022100ceb01cd208f93cd7d31122420cfe0da4f19c301377c4e07b49d661d11b46603002210089c3f10210252fe94776fbeaf20aa737f0fdc2dc616ed3b291c5c38f6fc37974 304402200fef3629dbf02be2b54ef9616f785137dbc8370f2cf38f3ea9efcebae063e24802203f65c5bb89eee42aa3d96c810e5a32921908282744666c4f5ad482568f374076 3046022100ef90d0a73e849013b734c0153d295d4deca9fbcfb90ea74862ca5486649fd785022100a7707d7430470a72160d5e26991e06bca090180d489c86e9a94d6d623cb570d5 3045022100ed89d4fa942b141d455006fc98cfea9b10d97f00dca6ad22230309b34d87d1e4022055c557aca40d3cf335513401e9f1f1d93d617d3e2c8b02741b3c0569ed0f7923 30460221008256f65ff3ee8a7fb9b471e6cb37eec27ebc41cf5dc9e07a1896f6e25e5fa6b3022100ff9b20b638fd3e0f6898bbf86fb08d34a6344b1514d387d19237c829485479ee 3045022022b1d2dc2cb2af3491324315b5f6452fc6ad1f1f156936fdab7b8d1b17476680022100c24d0b7d0ae50c14b2640941bb960d6d7f8b9baa351a22e73ecf3970dcd9dba4 3046022100bd68c23d878dce8862285fdc13d2a02dc24bf3f2727e95def1668ef756a662350221008e6cc17aac15727c0f52e0ebfed9b1e8805dfdb539e7f86d2886ffc4e530c5bf 304502210085ce1abcdd5ee98e6779a98af2bd5628ddb26657e264e8b8b2c4d10670cec9c50220554322bcf894554cdabf5fa77d42768109dec52a6a5c5d72a8b277658b1d1513 30450220632695856084a7e6d0dbbcab45469c120bfea003bad396d3f95414049c9a7b65022100bced3f4e5bf57998c7f931397c6ef1e8e965b56cb82764296592d8187a5874b7 30450220527209fff03a25d5a532fc42e578311d0aff11c6858cdff7b933eb04d8591730022100ffc301c6421ddde2de1aa1ea350defd67c0e2e368189b0ef5394d9c2b838be83 304502202a181e5bcab7487577d0a6a4c2599a1826fa198af32c898289ff40f5e3e0686a022100a1a386cabb1890acb85bb5089f72c00c73778a42649c7f7866436a49ddb6b18c 3045022100c0b2f046a292c8edf78fc474fc49df986e9944f9204e2a82fb26f36324f8c3e00220796efbf71e16da8cf1949538ea45aa9fecd8359f1446d2142314a5a60abc3c28 3046022100ab542e9ff61ecbef134628eaa04daabfd32e1966cd0ee4b9467897fc6f881f7b022100b5a9bf22b0fb1b1e54620170707d9079b5ae710d41aa269525b68c69b4a19b75 3045022044fabc923559215bbdfcab4549e3424a189803a234d7ee1cb33c368975de7080022100bebe723c86ae784f8b3743e28420be398124e66531a5a566e378405afb3cdda6 3046022100fadd987d618a9098c6d5ff5f9468b3ce9bffe7ddb25b1a4cab6c9d37773493c7022100a2a9a69f064b189be6713bb87646c559eb5e5a838c6ab6a44596acb6e4720cd1 3045022100ad729dbff790171e3fd34fadd82609ff646eceef05c28c4aea898e17858e1a7002206026b0961a656a35f1d0558a11fe390ae7d8888a5167190bbfb501def6c2df6f 3043021f1e4a709ac631700884488bae59b44c355bf3454864818cb7183ab16c69fdb602202fe6a2aa56d5b9f710025b92d8a0f995b56ded155cb5008429c24f664ec4b7df 3044022043e3e2d9b558192f07ee03af87225879f597ba014ca43505da2f04e5c27ae6ea022024437c20572a2fa560b6f08d13646447b7a5cf8d72c14bc7c6d7b483491ae19c 304402200caaaca2dde394cb754cf3485244d9c3f7df4f6d3c9beadee3918b3501caf1b5022065c559921d0053bfec0d5069228754064cc00f165fd9776a22fb4686eec7e4aa 3044022012fadf187409ecfc013908b839ad8ec08dbd9c4490fe94c48fe705c6464e519f022002b4f41d241839b590ef7264f7a3a9e23599db77c1e838da496372fccb770053 30440220270a674de85abb934089feb950cf04ec4c4f9a2cf8a3baeba9ff31b83f2bd5a50220310b928b66bee73fda57fe1f4ee44f150389945e6762730050b475ad2d6357a1 3046022100875f2ed25f064484322bcab7b2fdaaeb775e044dfe4228f167c10c4ae041ba66022100faf041d67640be01b9e8f080cef5651dc857b7b1ae7c7f69f0c55b18254d7462 3046022100b050867cd366364c8db6c95f1a60872b646c5b3e452339a19ab95f4ccdf358b10221009da0b5b6b85044fa9f9ce081f251811727b935a92c2dc4f78e77312668339af2 3046022100d69df909c5e8243a7b8c89b3209fe4a3853743e90ecbbf0d72554841e7a4e856022100c10eb353234103ce095f891323246ff6a91787ce519b94bf471a67800669bda1 30460221009a5f80ffd1b8a85a11b7f8fa9d5f07d52c8e0246f7689828c4ac3097d46f08f6022100bb173a0ec4a3cf231b3ce8ae9811773d17dce71b4a907e7fa4dbfca362b77ef2 304402203931247e316c74a08746b925e20b09e8992b0129e2f0b14f2908a28231929c7102201ed79338869df1345f242f4415f30c4f82bc763b7630cc2e44b60844e375d42a 3045022100d1d25ffd1cbd6cb27de02968b9e12250728c221bca91292b17d1df95e7db77310220699dfe8627285561d6509984bbbeefb042c0409e7052e3cd1d1da7f686376d90 3045022072c73edabee636901f839ea3307bb943fe36607b9ec3cdbd01022f3db5bc17b9022100e20cb051b8f18e4240911cd9995cd018ad4a264fc6da45c9641c946367c8b3e7 30450220198c19e29ee95533fd902867b7f713e19cab1a5bf74fe309958b2feaf69c75f6022100c3c3c5c7bdeb4b9e10a9db2d90705155e361432cbe3ec006382729ab3f0e2d7e 304502207968e57108ddf15159f2ae88b247361324aec77dd382fa42ccfb6b6a23466d3c022100e2a9072461412c48720767bde8ad0b21200178dc90c2678dc3f5b22980f5f360 3046022100fe05f0f5764ca9d5c8387114f889f139002b934ac7935fba3377c33c27db146c02210092f21fac1cd39741754eb2eb9d2e5c9b8b53944d0a864f16b452515b4a241132 304402206b3629dd9806dfd748d9d6cd033bd498a38897e694ecb825a6457894712d68f1022037505c575555b4d10c75eaee1b8c4b912cd4bebc29bd22141552c31276eeb45d 30440220554b0b80fdb372e7cd4a575d87ec0d324f3847aec0533a6f5d8b35b8d899d526022068dfa08b90b29b36e908212dfb342f91a596505bbba3019959fcc5e39785388d 3046022100867f232c6b4546a516646c85707d7d01e42f6975c16f51c7d413a0b98a4961b2022100b06822a5b2fef29d68fd74d5749bed93a72089ea3aa0bc27409e205eb4a4c3fe 3045022100bb80d1ca04dcf7084929199053d8da4b3a8562058309d39a5baca6588b4e653c02202e77c594e8b7b057ae77da99a46571f6ac135f1f8be45a54a3b5507496d4fde1 304402200b3292ce691ea4b54f7e43cd31fa98d197ccda5ec61e4b5318e9cd2ec222310402203d8d3fffc1597fa650c238d5a463a136921259e306a4e020b7e6106b422fc377 3045022002e042aee402ae6a7fa056a9ff07d6bd7249fcaa439d21610c2f6c158a2896ac022100c48ef186e65c7160d75b426543fb581b92af90e516c7e283c29ef9a91146f22f 3046022100c337eaaf473d173e093401d311c716310fec41a2e760f8cd66cbd60cdbb8e5b0022100d9ee3171b083062342ed5a7b64a3b9accd972fc60339341cee65dd0ee4ef3f08 3046022100e6c2222d6d66b195aa541149c4b46dfab894d6dfff459c548f727d728423d94c022100b525eeb6cbe0ee2fc64d78aef54c5ccb1416dd92c2f51baf6ce915e11b56e5bc 304502202578bd95f0f3ec8f8ca55ead90048e82f28496174d5d647d5737c4518af5d39f022100dabef2ce52561d17f5e7727420767f87e4bf514c351b8320144cb8b3e77b8458 304502205fabe55236c255bf6980f318285128152757f97b5abd2a83e82cfeac1a783682022100a8268b64de41794170969dff71962ff0b1b6f2f2ef8bf8b6fc90d6fab714595c 3046022100e6fa9d55f7016630b23dcd3276e7f977e891937609b4009e4cc4e82fe0a0734002210096ea656073935d47e19c3cd4ae6ff2374824b953c90642bec8608f1f3766d64f 3045022076fac5c9831978f6605733304c61a20ee6a505a2473644a7ad7ed542eb0fda27022100e08fce06dc4789efdacac13c3e6b9eed950a5f8460ba69c6da0459955039dd02 30440220784c690309b34e41f836d97694c3f252efffa76a7c9a711e85319271d60284d1022000eef2f2ab832668ef5f52693f9bf438dec63f2188b72d8faee72bb17ed7b7e9 3044022025f13fce9fcffa442d0190f674ea68d88df41b02462e8ab31ac22e9dd70d70cb02202da3e6ecc5491279bbf64514dbd76816623d6cc1d0dd282048cdb731ab753201 3045022100cd2bb0a520f6b979c6f043588184fb1d0ea97338ae87937acc47e467eac4b5ed022044b7a0df19d6649b181a43763a0b4cced44fb3765ba42bbcd5b4a35f6b7cb39d 3045022054a0716648c59d06c899c64aa3eb641890b133093a6ae2ee85f45e5aa013cca202210095ce4b62c0fb98a5c68572e4114b75acb0ed0a98f5216d20d0ba16deae6c4f4b 3046022100c732a2fd1c40a1ff4104ba15dc8fdc6ebf4bdb9cb4e045b251c403374c6b208202210081f071ec0389cabfd4f7432136a1b0764a14c3db79db8b7779de5d2805d5ce00 30450221008dc5372f6945a69121212e33241ff67fcb786bbcc826a0177683bad584330d3102201354c6899046d6d8e914e1afb8d7b1f164393184e9eb030c184951c6231b135e 3044022003499e4dd6e20cb7712e8459bd6763d51406f8a5fb904285b6f24d46c9c397cc022072766f491b1d0fcd97bc303a04bd147dde97480d5f8165e262a685c2d435def7 304402202bf61be1a5632b6579e793157704ad7324ac26df3f810498e77ec9837cee34a4022073b4ff8c03b147ad4599b66f9fd7a94c7088b4bf439daa98375d97c7932c5a35 30450221008aca8acc447cb02b9047d1b3ca5d9a5f6dac02d3d33406b355c2c0c92107a707022028de65f543af84643a9a4a8ce2cfd667a809d573b97ef596acc032e4e366a564 30450220645bbfaa61baf675918e7a80cf6e8129a1de1c823e88621e134d3516b0136c09022100e05bcbd9ad612e9994443b2b830f6d431072e355956cac4e3e56e6e1a548c50e 3045022060c1f2b579303d65f4a323a17c3cad6a2b21d7c0ac5eb3109d40f63077ef82b802210085594dbcefd2c75532f23582740796af0b13c85afec9876bdc28b2861d039abb 30450221008c9c8e3d5d664127ab30156155b63ad32955bd0259370801004a944735a359a8022026d52f2afc172a0a2f2085e2d72e933b04d3a839629b4c15e084fee4991cb409 304402201523f430e994e727a34185ea7718e0acbfb9a1c3127f189ecb0678e5c56c061402200f27996869623db29a25fb27616ec3197afdd5410c4c4c08409878ecc406a192 3046022100a83113a1bff104304b23c786f7ee19752c48bbc7bf70353de0890538cf62125f022100dae9b81438b29c3828762da1dc567de480885833942bad42bdd6634265e877ea 3045022050bc4c8ec6ac70acd482f104501f8600d112590130794137abc4bc83c0a3ab2b022100a00f8ae33049158375c34545402f0816892671522c1642b811367ae06145adef 3045022100b10ccdc860945cccc753adf6d7f8192e67aa3d28b97ee3e2c89d3d88e4b3130f0220542f99450b2b23332676ee025c1a2ba35c517340875d5d52085bbb85f9650e74 3045022047b29c841ce017949f12be477f8d7913e7460ca69ed8d69ea6fdd8b0f981857802210082d62838e46e3e98db8334e494f6c1665852a9713b3f17b9c1d84f1f668c80eb 3044022073f0638ace0976e594f4da1bf5c07fdf180cd3df11f03eab6822bb62104421900220549b4d6341a2805e3f1a5c34a9f9dc221b5cf4afbc94613c54aa4e0987eda803 30450220618ff5a3eaf7d745019f9cd8756aa35f4e55856ee7c2120007049720710f17bc022100a0818faebb46272b9651af7b1af32a1fb757f052f90905c3aca630afba4f337a 30450220262f4f6f6af1f212d0c4b0af56b4eedf7042b3541dfcac16a3d96812139eab0e022100e18a4bddc5041886eedaec3a542479c40453c0f3f9061eac339b1e82fa7af6f6 3044022066536736c3f7fee8edb0c21f67e788be59d7747d26c004fe5e976f4a75c38cd00220199080f4121a4f067e887ac91261547b1674755e9c54ada740e872ac605f03ad 3045022100bd6c673314b0dbf7e727efbf270c4ac8fe0368083fd91c92d8f396e74da04d8102200803ef8ff656cbdd56e5f5817573b69d6980f98178a79cebdadfb673b29f6928 3046022100c8f60c9211c754bff584d94313b87200ff72d5f1491c4b036ad87994d2bcdefb022100de3e99ecefc78d175a60b72253fd7d5ce045c35587101b7cbb9e8ae61d03dd90 30450220157c46a077a018d814ad8b8705982b7f3bcc68e6ae689fd094f30a1af8e182b1022100fddb96655af94b76c01bcf3a0a6efc264616b93bc52450d944b040a2551660ae 3045022100c125dc7647d4969a429440bf8899c5d28efeb17e4a2648c743fe7d143814eea502203f4f7126c02fb1b8825d23d9d826113da908eb644bc4b174e2efec8cb6b85ec0 3046022100c07702ca9f952f5da9ac36989ed18749106743a6abd71ff22da83bdd6297140d022100d4c8d6f230911ce221ceb3e925bd27ccf643cc51646b188f1561fdb7c6123d5b 3044022007b7c4090e57fd9372c09b8068de1d58cf715ed2b2662b5e4d4c37d3db4b758a02205ace279fd13304b3da52f760b099c8db7bac6d794d6426113907007ab9ca6db6 304402203f31b19fc52c08dbcb4ae7cbd97b2fd5bbfbe7986a4e186c180f0053e80442850220237a10ee1e41073c29d144a731cc867d7e31f99e932e8aff5a82063e94259adb 3045022100f8836c18d80ca0200d20be01dfaba6cd9683387b05aed9563bc7a620f7f45e0902203c58890ed620ff55f22f8053d23fab786cefbb5591b64f2e386cc83a9f948b29 3045022027ad818165577b7b54a386984ffb5bee7b8a9e42acda43604c949803bbdf1f1a022100962649f4e84db60ad8852a919d4db830b0e6f85a39ad15aa97349d16297b5b8b 3046022100f8ba1a3668995c17557511c2b1b95d6c1f7be234c589bc5d78f8d216d7d195b3022100cd966535fdcd05e86c961104537f2827c43b208148541aa818ebeea8ec32f493 3046022100caf7ea887304298dad18483634fe707b323a98f7f06ca0a476d7b1c97e40f90202210091c06fcbac18132aa9ad2e13219eb75a35846720b2d81ed666d2fcbbd9aaf4d4 304502210096f0e03c624dd33e093fa338eae3b076c380c54552471fdf995ce6afe063fa1f02201593a2bc9c6d0791a744206faf35f1186b772ece1fab813b5f5dc6d23097032a 3046022100aa0a1eb8ccff3d81722b5bddc9fec7c2fe90da5dd95c95173b76d20ac43419730221008de6503a4befe6aa32efcbaf6c529445b5b936de0fca861183572a58784ea6dc 3046022100ae7d71adc1e93b1279a3e4017e80530e174ab90194a0affbf2f8b5732f8e73ec02210089fed84c1595de08499653b6eef64ac258bfa1629b0b4af5b9d3e1a19f0a1d9a 3045022100b1b7e0dce7bb7c9b1fd75b66b742f3dee3227243ddab7218daf58f5709bb794302205f2ff1ef9f6b2ad75d60812ba58c3ee204840ad05beda76b1995d41067f95fd3 3044022010d72ff669389b31aa7ac5a88c0b8273325a730a186148ed797a92981c237bd2022002a3c441c014e615df526ac663a04fc01e84e333735cb65873888bfbf58edbf7 3045022100e351236ca9b8f46aa6d9a9e3d5662bb1313b014e7bad14bf17c68187d207773602205d45fbfc342b6ec7737f478b3d797f20fb8d723a2af0e8e4979061fd8773150a 304402207790c80c1e9bef5a15ec05df7d8431fb2a2adfce974d18f2c24281fda3da9bbf022001cc9faf73d3a3eeb8662bab5cbf5facfc217f356d1137d6492f4e099df2c1a0 3045022100a5832e0f0ecba3a9a082c132e532d1a95a6b54a52897a0472243f647878b7851022046e2d52e71b0e3fb666a650bace994230124e7c17022f2dd948751e6c5917283 3045022100fe0a546ca5cd8f3f27bd62cd030ee8076216a74d5fa6994e99891fe77228d558022045d6bba451cc3c431a64718f56390651a961d03c86f10ac6c5cf4eaa43839579 3044022074afa007d2a4417af8070785b147e1d4696056a6e8a551fecee36d5200de3a9502200cbc69fc04705f357a2c3214f2c35053603eb7fc7887413d258cf4391d0ec0b3 3045022100d3b5427301ac483b085d6b4cce1c01e47481179051e437c0288503db09e81916022029e4cf13f7e650a18b28a57dd50ae4015559f233e510e04c314ebba01c8f42e4 3045022100a8846427bbad3a332bbdfd426b66f7c03d89f8514e546eae143d81eda5c48d7802206c6896295f36ed0ff9addbbabe84fb11ccdae46d61798a08d31d76fb0515df6e 3045022100c845fed53f15fa83af4c520960bb540effde5319080168e0cd74a65a86922e9802205b1618253b65e8fe6ce2f0f02f8ca3bc63d96a96c15709f5a4e180427816019c 30460221008c758c634f738937ad0790df6e881df4e0b5303bcca5f70f910d52e4920b2f16022100e0018e7d926db5849bdd1f3859c3cd531691a097ffe4d095b1819582521ec1eb 304502203001a7178ae44416487bdfa43d49d0d522cc26034a35c81349f51e4c1837aba8022100a0fe594ab1050f4e10bf8a1f433b4f480b60e5a2a915b86ad0b4bddedda3493a 3045022100b1d9924919ceb9570e2a84f1e478b7202168fb2dfa1e868a9a9384ceb836a44a022023f5834b22fc374a05e49e61125c04fed17df690a9b03fb9a8d611528b124e43 30450221008134fe22ea8fb2dbdcdd7b6530bf75e9f340510ff22313b47e457b0a6cc00a6f022077f4ea904e68cf81b14bdf4540b0d4c2b4e746ec4d4048f75f3dffcbc9911192 304402207eb167e18d79fa997eb76e650aeab2d7623ff4edb3542c949375dd72dd910dfa022019da3792258c4ac9e8ffa7e9dcf49eb33c44dc00b0d1b1a508a0251ad83d4c28 3045022006807de1c6bbde37e250fbf8c79fd9fc59fc8a87959951f835b3f0e8cdc48fa1022100c8cf9749605c3f47f210699ff9040f10fab230700368ed6816b67fcbfe3c11f6 3045022050b7e26316edb1747fdd9125be83af37f05286e12e536fa2ff98038d2a35f836022100dea7d6e5b358817058208a7003eb30061c0438ca852a7998b5ca91d7229b8bba 3045022100ed4985ec0be9a940cb2e7551c5a1cb0fa3ce2ffb7463ebf8149d7a26ab867aa7022053d86666235f76aa81bafc0e433b479caab91f22101d47a0af94048dad512c77 3045022100abe240364fb9d37531f6f1090a126e8f9051df50edad5cd7c119d68daa048425022015c58af92d48089cef1f09fa5d04e45fe066ae0c6a48512ce09f61a07e43b675 3044022057941b96f1fccb2793c8b39efba01910f5271d000f8b7561b212e87f0df3400b022041ce0bdf8ad7e5e20e7a5729ebbceb2bbc1ee7083ff73a33b294eb7566990d74 3045022100dabb36baaae66031597bf9192019d920555ba3a9f6fad7b815f614b30439363f02205455ffab65f0dabaa781ff1742326d64804c365513af5346d79777e10196fc4d 3045022100904dc4320e0000c4a482fe11fd323f36adb000bb6184bbf2a388402cd4d3a9b902205b23a23d80e78a6920b5187759fb681953aa936bb503ae4a5d3b7fe0e7174ade 3045022100b04cd7311cab03d8a5c1f8f47049f2e9933845978346715daa173ca134673731022005ca6c711160f21628aba03f8c2253edd9da6443d1c31c348daa35898e2b5a7d 30450220014697839bf33bcf73bc6b65d291e97d8bdda41e9f9043e385c30989442e3ca2022100d9b7454a9f916350c647a2ed62d61ac2d51387fddcfdbd8761e57b65aded31d6 3044022063fb6b1bfcc46b6aff875aae54d2e8d1a3ed3b943e4a525812bd4f76081d0b5a0220238cf386182d8235ddc866f1133f9b1e2b3f68f6ad90498439f6098411c250a2 3044022071194979b2676b60ca3ce9f3fbde41fad0b145e11b17873c3c41f1f9d6a1c7c1022013bf675216dc2937fb7f17b1de737b0a3b0959d7b1c4848d7eaaf855208d33ab 3045022068df1944c3453694473fa59f998b79503ef315a5e4b628d3b7fd5edc62106a8f022100c430863807689f8d18c9db253b474353924819e669598c88c259fc62b942d326 3045022100e3e49b5335d083706d32cc41ab69ffc8066ef24daa753b8ff6689c743653c931022016bd94bcba9e24e59b01b3fb6288628a749635cea83dede8ff2f8582a5e91295 304502202832a144afbec35fd550e3e385ce2555fa5fe9dfc7dfdd60933a66f07e0bd8b20221009cab88d3a488f821616f45aac0c2315e3ad697ad1e356990ff2b9377f0ee58d0 3045022100899eb87152419bbed056d0e3effee6d6d054c72e960bba0e45ded009ad759801022009a3da48edeba83896fc8ffdbaad730199640d3a6ee413c977e82079e979b84b 3045022100a5b0caf283a062cb4bfaf1ee008627864bb7bc63439393dde59f413c041c03be02204a8c52d432391d40372fa99aae0ce8ab5dcb78b36d831a4184fd267d8461e5ec 3046022100ccf1e1256e48707397294529e3036f167e08a85ce2026f9c9a839875e503df63022100d47577960e3ddd06d0c39eb62adb67e1e3963562e1c9bc8eeea2559fa94dcaa1 3045022100a035abeed32a03b56c8636516f3883130687839836a845d46bcad74d6fedac9202205d9f7ceca18f152dbe42aeee964c8553e7c11a9acc0498728cbea5cbf4c15a8c 30440220129f033eef3139de8131b228fc837aacb9245e875cd87af048e9db6ae5f2809e02204e24e4313a7c3836e49a367b664142a8af4541e0a66308275123a5a1e7221545 304602210098136f3d36122a5ca5b486402705facac15583ecfbb91a9200641eb9f3943e8f022100901dd43baf75b41a947db67316ee8c6a4c033e361cc6b2aef08caf9ebcd512e8 304502204fd9a50591bc68931c765f98543e5b9d003aaf8b1e5e0d1463ded12d3add33490221009e43bb0a87fd08dbbec33723cb84b485434ca1da3fbb3241cafe0c114e34cc10 3045022002de81bc6a738104bba524758dd90a1d3dc90ab401005791209730feb1cf0618022100c3fcd757bb4778d01f75b93697f70345a04687dd65af3d8ef8d47ac92c1050d5 3045022100d6fc07f353c5e685a4cff682d9f77fd09405cc31dfebff437845ea3aeffb699402204fe1084a4bc637b496432d876d0c3675a3bb0224d1e1b14e89ce9d2919cad71b 304502201741e88e687ba0f96a6e8a57d13e64847003cfd439185a0245c96d66150012eb022100c5033e628bf21b9d4bc38fd6677f0ef224e029a25a91c094710b0e7f6929c133 304502205cd31140bc7e7528d7f7a3e65a32ba0ee268d88f3295eb2e1362cb7ceea6e1720221009b8028adb2ad4f33d16e6cca5036a233d59582f696a20fdd53b221446a3ad112 304502206ca88968271fee1f6cf829d55bc7cb415b81046393f14a48302c9d371b418e8a022100c449b0414819424993c7f93532003ba29f6d879bb8261ac794547044164a3afc 3046022100a39b4238bc058820608b05b8881b090aec4b84b03905355eab96863b703c484402210094ee3d41c8e34634b427c76dc4394a86e58c8c0351e0f5f22a62ad4475f5beb3 3046022100f9b493cd5649b49cd01705d732b99e71460d786d17daf408a3fb6195b978eb91022100e8e1f2683fdab67ee30f1917db09f5d126ccc0a42948d2b145024d14f6c70b82 30440220458471567090613e1dfc2510cc70ac0b2f3cb07a2ad4c660d3a33ec59d12414802202b04c4d1826f7d5a5d1eb0a669be0f53db1f80475dd6b301c8a54c5908a29901 30460221008c6149576773a2b7902ab1a3929b90eea106481f96f299563279079a13063d24022100960806d4ff89fd7205c95e6687b92910900cdd0f2ec420f2ee6853c3699bc8ae 3044022079f0f557a7fb27b7138da88b78017ab6340a60a3264da734519d8a50fb64c77902203e21757b0ec2551b23f8ff83c224b32f5f175df264a2b23e2d70a9fb099c289f 3045022100af4f51b327eef92041b134de5f10eb7fdf27a4a747c61a3b2ef33aa0a890a8390220671c1bf504179702124122aa824a227d40cbe89be3a512faecbfa736591caeee 3045022070c6872d2835322d7f5950c98867835b70de0c901f2686fcffaa2f93c0be0989022100dde3403e60b54cda8167d84df6c7c01f26bdcd22795a69750383ada5ae4e9132 3045022066f3855cc498a2e4f32acb72b9a9c00cbfe46ea54150d661ed9f970a2bd8e62d0221009b15fc834d01440ca11bac967354d7b21e0cc7f99df5ba4fc6250b121bd408b3 3044022015391716c9d761628710c894a46511290af6a96ec9b91ac717159e8208bb85a602200b7777119310e64d210e4c55e88a53fc829b885f07799f40816cbeb3f5c27183 3046022100c1535e79b3bb4e43068727be1ca83367742814e78d0b984ee1ee37d981f27cd8022100842899dda08d965e3dced9946f07f3ef7b8e123886a86dc5842efc88d18e0923 3045022100b826c0246d57998d39b0a3f6f15a49f273a6ff1696afe7ce433200e1da72939a022050eb583d58b49280992a02811aca6ab822333116b477eb9ca8339a52344c4a8c 30440220638480381a24f214db2462d02ca3bc09cd5d6941b0d955246b7235704c6c7479022013a118d329a662b8f0a5480094a2ce3fa460a72bccc6d449d2c45d1306b5c8e3 3045022025e4a3b72929e2078f433bc89878e0a090f18a916c60555221c2998184bbca17022100fed0f5f0850d3120f523a5d9551637ff5ae6ecd62cf270ed5cd42e667b3e3f7e 30440220476cd2194f2d72d3755f65d56c6f3e501356cd2f4b680a06d1dc18fb5fc7019e022063ff059df098e5f47ceb283b3ed9e33b7a7f21e90c2b9bfeda05cf2c979f6ecf 3046022100969ce0b3cccdaa9b285d68cda6d815d5f09c9bf9598ffa13eeb11cf62f027681022100eb8419189106a23fa253cb0ed7d2d82af95cd04c86b5c733544a6db559f43d14 30440220075bb74f6a3e9bbe117c557c29ac6bcb291b2b24d03bcec0b10e893c6fa800a302206e34cbe577f6f8acd80c7b7ee62526307bbfa839dbcd2513d7c25b3d0de8955f 3045022053c853bafa8710eee89f3831715951b92911b65c9bd1146d839a508a44e40959022100c8287944516a90b50e67f5e68fbb8bef1f499051c8422e1904c3c20eb57a69c8 304602210094be0b7aa7edc3e041092d2d8caf9793782e48f0b3dc09ed5f568450265fb4b2022100e05caec8c2374658a10638a9904904163a39bb5673624b44db8dd096b7156325 3045022100e31fc9eff5baaac71e24886c293aae8578df0261b25e82a273b844eaf6c0364602202ff648a945c2e016a03f8870e7f989b00582b4dc61702d431f782bc2b01e8a80 304402207a4288a756219f24b94f0473f73f92a92775f8ddb5381086081ab738b6747e4d0220645bf07e0afedcaca97660a20f203bcadd821f12b60424c472f7d1a0420fcfdf 30450221008ec82e119f75df89455fe4b27993322d33ba793f066eb00dbe7532f899275a70022057233dfa341a8c5d2bf6b5c15e33b1ffff19233ee3f3b6937880c1415ccd8528 304502210094b42401c6be738feba16f74e1f5996438763131cf960b0be82c06961ac6dee002200512774b9ee37ea889cd9d16ef65b71c768b9ca2e926b7d1a1b7a0dbebae7267 304502210084263bed19635c355edde4db375397d3e2d0799e410b7cd35dc63cc88367846902203a1ce571523b72e37fd2143d7de757e28f137d75451efe7f90a760a4d128f0d5 304602210098202e6ebb8e9633f9aca72d7d57bbfe1fb7eddb7a3b733ddf103460693716e2022100f642c0529c92601134429ac61721ab01c71ae29b9323f9dd61277d668d32e957 304502202447e069abef8e65f11d5f0efe1583af162552612a32cdd26c86357a5490352e02210085b76a27bd93efa0e0b7fff830bcf4185a8780debe1c7ea34d2c0e23d040f65a 304402200930432c0d45bed91dda85a23f12bd951f49132f7e21512ca4eb61312c90138602204c5e1f45a055ccfba31d19789a90669c0b6e714cf29bc7a319d2b54bbf43cb5d 304402201e88460d97863c4247d1691810a90a60470bb4d9a5c94f49c50add6d77c6fdce022003812a5a1fdb62f770b0311d2353d8d250a862cbd7b855fb50ce0717e14db74f 3045022100fd5c124e066199b36e8c9a479f14829d58e096d162b9c95abde1e50db74088870220133e0767017a8ee6765a1a6e44bfe6fab4e8e712446d80e3d9765b02114b0f9b 30450220252256f63f191a46926c69552b96f287fe12d011a08e8396928a4312ae36af1a022100e6ef969bec783bf3bfcc9d574818fccc90d5dbd82214deb3e216f3e6aae8474f 304502203d20cca29f372ac4c7bd5e5d5249f73fd629b605ee4aa202ae0d9cf4b0521dc7022100926ae2e995d2f95503d84892a23ecf4820480cf0c60f747af4f290912c1c3e09 3046022100e575a8676d3a6142b81a9be2dcb4e30678d6b0d3ffdeb261f37a4ce00267f1d302210087102e1ebb10959a6ef3021f3f02d8f1eaeffb64760dbb44aac505e137799585 304402206e06282389e51eca6ba071a58706b61d86c68ed64f8e33c9964714c427ee4efc022044fb46c906be817c84b22b38eeb6e249a733420c3e9a3a6ac6a200aa20eefb03 304402202eb92da7c552e2918bf87f7b17ac626a608d19692233c2002517729235c1e54e022073fa8e336bacb89527daf56550a8c20860d1af1c61b12b1e5536c754f30bbe69 3045022100f8c00a7d8cd86c63eb0428effc922e1aab609716f09f82b22fc988264cf9593902203371a42a10b5a10578d23df715300814095ab54f6c0c7d805dfc761077eec6b3 3045022100df11d133448b5c3ef2dcb041c179421bc0e081043ac635c67bb024dcc170059602201c2a0d26f6b2cf2114499839a3d3ce583cb78922cc6f240eb147741d8ed8d413 30460221008db9f8c1cfbc71cf5654b723a62e92bbb37f6b201e32e2c023a9b2eb0b967bde0221008aaa72f5457c84a36596bc9e32955c77d7836d5e6003f7c40c2ba2b80acc349f 30450221008197cfb1de724ad469ad7c3e37156f00835c1e5e4cdb2f88b477f69ca2551d4b02207301f6deb19d7d717738f5d763eef37661d9419911ed598ce4a424bcbc6b400b 3045022100a77b03ebc7b1f754b43d4765a074a072b23ecec0beaab2b28d31c5d0955bfdd00220774d324d6cd4e1562cf528ba0e43fa648dbfb54a90954a804243fb3a90ce7b73 304402202558cd7024f196b5f55ebc20c34bb10d3f10d4f33ea70165a1ecf664c9dc10ff022040d31ec00e4f5c87e1c88a4411c7ac286c477225f9c0de4ef1c16c4e7f5ec191 3046022100af37cfc3f3c504b7d85b715a101e76adf62e50ac7359e9ad91c978eda8fd3c410221009d2869b426b7d11df1b8167e59cca26d21c30975091953bf4a0e690849185c31 3046022100fbed4a0c39cbe4112770dd290fc40771bf66773cb8fc017b9381c8648d7cb826022100834fd00e551e456ea85c7a02d61a6d32b5cdd90c197a98f43ae891be2ed6d63f 30450221008b640e3097d33f060bd707a25633b312bff6765258e5e96b9f9568b716f3a22302206aca3ffde938d35645c00e8a8960c32002797deb390b9b71337741d8d1142df2 304502203a5a07ffe5217311d82abff6d2896a5dd9e9d7fc8fe50d7d95085dd0e3bdbe730221009cb574baedefdf1f3190b40bc0bb529451351deb9f983b306d4546f3ee1ed43a 3045022100d4dd7042240802d70cc1a8a2a7b5115c93d6c6bdeb5ca66d268c4eb71b3029c20220537f7d305f39ff5de8a98dc0e02543edee977ede3ec9cd71f9e89b554b732274 3044022058b4d4dd043821f757b6dbaf78b091be60fa3b42b9f1792db0000e2cc05ce84d02200951b44a95f76706abb37b913104ec3177fd3755738c1d893d52b656c6e6ea5e 3045022100e5dcbcc76439bdec9bd83e2ff3c04f61dfe769c9b146f7710f6652b821db38eb02200eea8d6dc01ace2a666f8093fe645bcd54a31d203e2db0bc16892ebcb3ace327 3045022100fb70ef106ce6fcf93d613f20f859c31d8da72e3db5082eff1ce984b12d4850e3022079cf59e6231dd73c3c48715b147279068c749ac24795aee0c6b8e27537761d05 3045022100ed4ae39c9c40d6a0807805948e253f9c6de3b421fa139009afd04aeb66a7dfc6022059cd2d36ccf94fb108d63e8b7ba8839e93b0bd348c9af9d0ad5b6c945ab2ca63 30450220534ae5009604c4cdaad7e375b78f819da51f85fe1efebb28bc411221d2c1a34a022100cb4371c568e67d38a46b84ece6ad6bf3c5c7448accb2f901935d6a1ca2721256 3046022100fc3c4bdb999d69101107fcc53fd21f1e1dfd18b59b708d523b0b559fb5dc0827022100ca79dafee91eae8644005a21cf530ff4859d6072e8126adbe82df4cd14c98bf7 30450221008616d0d8ff550a10c379fe5073c5cf74536236aa7cecac030b40e27ece8dd8b502202a4d8b95ab1638c0e90f8f360b219ee0ab80c2caa4654f9827f27703a11ef277 304502201ec5e76ec9d1b490615e02d1b51c72872efd017eb738de9105e178622d2e7a3e022100a493fbd19ee4bec8d56b74b0dc7ee0d31260bea5c224d58de243eecb9cb0476a 304602210090dc008501765facbdd26e1efd7ce7851504b7809e1fa1b6f8483609a561f970022100a29fc5f90fea0b3fddbb9a27e4a5920b2781e036f1314cee44a4fb49fe4b0af5 3045022100de2ad2eb32160c081d0221736cca59858c667238c6737d2e7cd4099da778d79d02207fdc4b9fb573d0bf9d4f1b20a7cadec2699c91a2f639523353437e1f268586ee 3046022100c8ae8aeeb2c2cf7da9c7f53d3cb9f0563b78a9eabea11f14dea402b1cfb196e6022100d91e4fda6a3ccb49e507e7c3a3dcf40e463a1ab6ffc9e4c2193f8b1cd1544427 3046022100f8cbef758f0a4bd4edb6e754cf7fe88c1b7437920b3bed435c54b81c4d27b3c7022100d11b3e900c6b6fe7cd412bb59a6837e1f96e0bfe5a49f63c4ac5ff2ef7398181 3045022063b00bf5ea52d4435ad1e43b8b0be56f1b294dba9b5facb7275e55f81c266c2c022100c430c42989eb8e1f81d9b113afdc58f08ef7655bb1dabb53eabb5a3c647bf58d 3046022100e358e896a27da6644a25d1df8199e7cef1b05b76e60f56708ad2c045cc5feca0022100a0cfa6189b32608726d61c97bf26cc70a34cb52900bd07339b5bedbd07939c74 3045022100c36bc99778bfaadd1b9f0b118aad9455992e2893bb74f62c2785771655acb9bb02200fc7956ca3ff2dc842cac937d2ae5d9814e947d9b3f0b95e370a7e919b716f0f 3046022100a664feffa01da8e827542af9928daddaa3b56c70a10a755da11f516008af82f5022100a32d4718da02b388801dfe09af81d49fbaf47476c48e82d607a55ca985f5a6d1 3045022063b4392996c9a3e0dff410a60fcc4fd15b161f4611388708633b6259a0729885022100e24a908116b68a09a49c8e68bd85d2521a64f49931041eb40e8b8a8ee4a781fa 304402201f7e53cefe498ad135836d6c7f19853e4b2a53386a6ccaf5755e0ac096b62fb602202aee72fdfa9b5769898349dbc7e1ace39057375e3db412bf254aa8a97d8d8048 3046022100b88348706dc95fe926a553ba687f439364e4e33780b27194614e63b1583728e4022100e1675639f4f6f978842b97fdf69b1866a38f9f8acebddd44a31275f0627c4080 3045022100f27c476eff586646c066ccc6d838dbf61b076a895385f57741fce7800930363902204d336bdeba14a82e9a97586c1ebb6e70f3e4a8c92b212246541db9e14ec99471 3045022100e30643cb3c14c527052d51162886e90123b776a9ebad6264270f1acf4d340ae102207f62b109d5665a806181dd0d2339f6e5144d0c15d09c3862e4d540e508688310 3046022100afb2b03cf9aa5a633bf99eb0f4d4a4fb373eef24e1851807539f0b265ea2a14602210096733c25a3eef3e8a86fbf6921385cde5ae7c2f51829528918d40e2b624b5e1e 3045022100de19018417ea55af5c86f674f6b3760df445451cd7a996964841199cbe6b05fb02200f4a05da057ca382fb23bd9fef1c968322aa915a355ee65b243944f35788b323 3046022100e8dfd9eee15bbffa523978de8569c75ccb6468c777e152b6e691289850549047022100a746a8e4a4524e34e370d2c2aa265fb5c546c2e877e327fcaf374a963a0a2ebd 30440220694d24f0105377dc1a6c6d33a3046506471d5bd2356fba2172970a7fdb74267b0220473df54817f7b4585e7d6ae0c7e9755aefce886162186f9be99c02a6c7549104 304502204dbedcd85793cc2d912c37496dc097679666b795d50b3a76c32df3b6d039314e022100da2b33edf3ff8324d1a1b7709a7a2e8dc1210fc29293200478066a5364a3ecbc 3044022016b0851c96aa0a535839a2637e2bcc1afa8a347a360fd0c985fe67c5872791ea02205d2b2fa10579d78568210b3d663aeff8f6a0f0372b9cbea999c601d5b8e98a39 304502204902bd641ce389be6dfb807e3a623e2c2eff63e64fd96bcf88dd50a4d0aed69d022100d8a04a05901e355652acb9ad252ec2922d4e8c2e2101e7cb426b845864bf2cfd 3045022100bf3261c4e21eec8210c6ae9d032f53179a19ad5486a08f0c078daf5d8ceff86702203c13dd0b66a064557899185a0b0eac7e1bc20ff3ae4930ea4728b57862e23d45 304402201ed95c3dd94a8d18883ef94cde56f0ad93f3094e936250429e1c6c39d3a1840602207d882b83ac5b604b8ff1d4746c8f91e1df589341b67c1809a2d3b4a0ad5a131b 30440220021a551ab275bf320e08be853e46f1b85b0f28d648cf2a8138e7e889bccda695022015754444c4e1784a91f3abf0134312cf45c7de3908d114c9d4a40ed9d07ba30c 3044022027f996d389ad6b1f7d2277510dd00ca6e20d294aa156ed518ccd6a70c3cf00cf02204eb33b472883ae2ca89117193dd043ad38be943a6614a77ba10f8385a603d00c 3045022100b0946279a150f2cce59431773820face5fff38b741e1baadd253fc57f94cb0da02207db63a5a48398db051deb10fd80f102b9c93ce0efad156df29097e6920bc1aa4 3046022100e23d1dcaef8b8642077ce865ca32d16f90951e98e9962f6a5ca807e489caf9ee0221008d469f8ba4f1f3719d6379e3bd6dd8f7ae8b98dcadda5c6efd71731ee685d420 3045022002293c43f1f44bfb037ad9384be8b5cb5bd885dd746ceb4db208d5137cc9d731022100c594bf60839c011947d2cd545800433986a9920b6f1e3b328c35822b3ed036fa 3045022100df50a0c660f1a513fa513b7902300c4e228ea1d56072fda97de68d7a4a8fe43602205ee3a1f9bb35e168584e091980efee5fa1781d13f176813b6049c679c524c698 3045022025d32b445c11ed7f848a3d2fb31032c56f0c6c12e1f27a1e5714720e14873a3c022100e6ae1f07d8e4bc815a08e86a4c813b12a010196dca30e1a993bf58bda30cdc46 3045022044bf4981b6aad2561179bae93780926d38698e26fd4dee2758fe4d783d564f610221008931cc77c178ab2c46f3cf90f91066f565132dc23e2ff516261270ac62adb496 3045022100b56ed35bff35a5aeaf89cc86b03d3a526cefcbc063b4f52eec4fb705da4d354e0220072f504831a051a90f94a618577624fdfa6c942e7ea1891d0e376a36655821ff 304402207fc9a4193dac3e78f8154fedcc684186f24fc259af7a649fc0b9f98f1a34b9b0022065a55cef06e3b9fc8299fc75e0284cafe74819c9b0b2a7915eaf11d484f9a135 3045022013dc55f6f62160b5be16d24786fa20ce54682cc39297af6d6f1e7e29a08858a1022100ae86ce3ed5481a5ed13e9674a7ea3855f59045d6d71a3aabd92c69073e8c134a 3045022100b50bb90ae729dbdbfc70c2073023007754d59833822ae23b04414743a049eeb902200f18c95dc97a3120fa1e29cb9faee5b60d7cdcc77fbf0d8ab57b5ee147b3538e 30440220457b3c07cf97b29989ed381fcc22ada00dbfc83575bee66ffdf04e4e5d394b9402207e641b1da3aad638ea069b09700035187c001181886c0581ecb91b8722d7b742 304502203fceb5d7f9ddaa8a5fbf370aefb836d0ef69b106a757efdbf27dd0a9d79f147e022100982d9fd41d91b66e3984e1fac36305186697cc9561c79ffd4e8b67ceff971b86 3045022100d9642dccb8ab4b32497236f4a0f3d37e10e0c49c9b5a40991f2142207a6ff7b702200c0dec51087d47fc2aed33dbbbb13c786d0f59739b9d58def29fa737b1a48d30 304602210096dd5427c8faaa68f669e4b11b8bb316b8f110e7d4f66377511f97f2346a8d89022100e9b87a7fe0056138c81a066fa3b21337e5e35c6047bea02a270dfd3ed5b1809a 3046022100bc74f0e88e33e22aca946704aaf94470f292efa5278ee5c604565a7d126e12e4022100c88d5acd781c5193e706d897c77249d84097489389e5045f269030b90fea62b8 3046022100a1a3a20619cb98b0fb201c2c6007408975f2e6615a1f03de1b4b6f2b5c656aee022100ccd56996e18b47b4ae29e378a628762d4174d184833e62f52b83848e44ed4e91 304602210095a6644a4e1052fabc11a781a75aea32fd788627b34acd12715764f3b69bf082022100fda6466b7e6531654ef2823630b10f487510563f9bf55f8d98ce06aa6062ecf8 3045022100b057569637dfeffcdf67ca0b001323ba6d5429cbc670f90af2a13ac785a121a802205ee1659fffd6180ea15ef3663ea3352e33ca369f5c378fdad8d5461e405519f5 3045022067ef50b9d58cdd2a0f9a8057958fd7f46abe0a629d19167dbe4e2419d5442f41022100dbb8f3e198ad6579a2b7ef59161202ed8270ef02225f2198ac4b35858a7d4abd 304502201e10fadd69204f2637a217d92c09ad765bb7b8093473e21677c2cf7a5023274e022100c2aed352ea8faca1c243259d903aca6c7c7143d6b0e77fa0cd6e3e41980c9923 304402204948c3229d23061d390286992896b539572f6eec14dfc23bd7ec3b7015a1c5370220779fc375f4294b6bf987e6cb7f9bf68496473ac749b19143e54d41f648afd0ad 304402204c2fc574068319451fd4803ac5b86aadc522d3b55619bdb6fb27ff115d27e79a02202e8f07f3c08fdf2dcd9f39070cc1f1e67d0de2f621c0125f8b07a1826f148f5e 3045022100a33ecc91f631014fbeb39f2dbd0148f476f2327b3acaf0d4ac10296e4ccad1a80220176b113bd5674c49111011b7ca33686377c4aef20185af16110358940e29a1a6 3045022100f8053bd2900125f7a866a8e14a0277676907cf2595ec3c0ba3071a8ddda7e3ac0220096c6413446ea0f909e32a7a90b7540fbf337e0643890d00dc28b483ee2d3c8f 3046022100f4fca4f142ce4e3424fe3471b09b2a56ad890430f78097a69ae0fcf218d95aee022100bc6a6609c647fe117fe864d3bea94f0e9d6a05743620cb53b976ff547783e42e 304602210080cab71aa82f8e5519fb83f1434c7734a97075e562ad69c3fb42c01c8cbd936d022100c6a98923bdaa16198d79d1024fee0713ce8a71e35e978c336f342530eb2804a3 304602210094bb6e65064f415d117923967c08dd06c04cbf2914a99d5a904ecd32f5c016fc022100e6729143b88c3d388bd4eba569b8e9ab1b287ea425f5fd3ec40d1a993aa23abb 3046022100d43cdb60bfdc87424a6f46f31c18a57b5b5cbe989e5a9a8a05b574010f91abc1022100e283857f3f67844ff612d7de54a48816a750d9a44e8c46db6f3942850fb6d2ce 304402205c1a2c72051f44ec314d06e821e673d14a02e7e4a031015fed569c83fdf2ae9e02206e2b1e93b6e018c74208a4082a6d5990037426fb848652a43ce465ebd9fbc446 3045022100c4cc4062b0966543f3c911ec05792e18f14254399d038bdf26426094c5bf896c02205a6c26e3989ea197b928ddf3319e01cf654a7d23d34e0a19ff4d7dddeb65df79 304402207f75a098d15de47b6639ea710b907f4065396df05ee451a091d54cbbedbc756202204b31d8cb6c85a3b963c52aafedf0894d10f8bb124d988ef9757ce1afec0ed239 3044022067df6b86b4db5c67e2bac2ceb406ee6df62778bf71ce347e89541d807cd9d3b6022003dcbebf3921185adbecc95759eb8b61034e7b0df6e82ef0e0cfeb0b579e077e 30440220572732bad652c91ec8db445e86cd864d1cb927ca3b5e49d05d2af9ed7e17e57902206ae2350274629310b3dd209a4d281671b1a449edeb8d797df5f2d1de2cf30451 3046022100876026dac46ede289939c4aaa63388e1d3e54ec31f6f5d04c68fc07f12874836022100dac670de94d0008c0c09b1d063bf7156385fe4f84d443a0c7d2b6221803d7f01 3045022100d956e0081ba04764a46fd5f1b828a1b586098f44902c055e04fba52dcd23ba7d02205d4f9c3cef878664015abf56e056102a6cd6772d03b86c4a4afaa208ab7e585f 3045022022345352b0047dee4862ca06c0deb4c7a23caec1077aae8e52e5646b7ab6bb4f02210081403ef88fc9b13712874572df295eb89003d58f6cee60d1e4b72ece4510db1b 304502210096833ab8e3adc18f30203a7d6c78672461cd2e50920209462075319113186579022006552fb8799b4bd19944b040074c43a57b11ef6fcfe09b5d830a06b785435e83 3046022100e4640be49a23b531043a6375cdfadf9d9a09e68f4f77fb4ce18995bb33f21b4e0221009ab81ef2943ded8e0e94a0c343ba195ae54390b261e732e7bd5934de2600d10a 30440220272e4d44614888a619332c0d4463b24dfc22810494047be93a5a5aade3d06036022000cafa208607f2dbf9e34c2d02c0aac15d00d0701ee3a046e9eeae364801ebde 304502210080f7e8067b7ec05425d3ad9cef8f3919c3efd51b0e3da480719fdd3a3aa4bc4a02202b1a093f3a6960b2683f04e3b7cdce3d1ef524c4c4e35780ef8f4142bf4748e9 3045022057a321b33baea50db376c4d0b37a725fc939e5ce42138e866ebf4e2d0ba241310221008be5d66fbf43311d758640a6924bc0d643f750ad86a55e54fdb88d1b1f74356c 3044022057646ea85c2b6c3dd9fcacdc2e44a17acc0daa1fd70fde288676446d427471f402206dcf80799fa35f85b95c5e5a60a418933507cd56885be638cd771c8a48837454 3045022100fffab5959a4ee1891816478b1be880aeb495b2ec1bf2c31fb3cb16bb829ad571022022944273f64aa7573024246361fe722b89551fb357fdc34d553706669b61c902 3046022100c14a970d7dec9789726ce537c23007b9e203e2540fe878eb895f1b7bc87dc50302210091847790ac5f770c59a88ce7ea65d3d7c27eec264027e2baeb0a85e4fbd15bcf 304502202e2ee4bfb65d9646dbc444ad9981c2d812d68e9b860637ee36bb346fac016120022100d2cd7e322ea4696ceda43f8cf49417a570933fc3d72acbfbdd818a0f30b9b7ec 3046022100e7e5073c0be7c0eb0761de36a68d594bf09d7c54ab209128043a94804bc4741f022100c41ba2cd071dcdef9788f0693fad504ca4e6f407a8eddaea99fad388be5df89b 3045022022295b3faf3e9c4fd11bac4bb56d06ea4bc2acd0846bdd333403c37fca3490d2022100fa75ed86027ca619078f335f18ee21af459835d1a682cc60e44f5dc0feb00c8b 30450220044278ada3315be5ac7477add8be0f377f870cb860edc5eeeb0377c983b8881c022100ab5dcfda4e6de7761342db0de71891a9f4476c66cf90926b8087f33dac6087e4 3045022100f6ebb24dc5c0f66f44fc41d290d1afe1a92ad9bcdc875238e9b7408a4683a30402202e58fb9a34e98ea75c2ff8b9519e6b08d8d77002525260ed9dfebccc6ee9a10a 3045022077c2ea8c4b95b952fd2acf674810dd077e19fc1439420099c2b566462acab2cc022100c39fb1a3e55244297a804ee222b2e750711217dd3a215cee5bef77767d832731 3046022100be2211fca01241ef6b3cd13d92f4a6c24ac43ded3d58d51439ac9725ec4b68b5022100c9e157c91a0b43e0e42d0ae96e35b950d2611da657af81059fd2cdb47839012a 3045022100865c8c62c193bc2fb813cbdd7533ae7cf02b758c939df0f09425687c44c193a80220693e8b143fc58a4f813d86011075bd0a7dad2170180ad00e893f84e66e4027ed 3045022100952019517af4c1512cb4f04247cd0f5599a1ff41eb3dea36620e718b9ca657d6022078f2d0d981e10042b603f44d44cedf43de47e56939decfb4e4c9d259579112c2 304502204899276e1c9e700f5480f36e90aeb123739f5187baf7e843c2442094456dbe4002210097487ccfc0192872ae20e51cb0a7f4d6b49ff09db7d246a539e3fdd744117fba 30460221009cd5cc90a03139470793ad30a18e2e429bb9e8f789e2c60d4c2ec21ca89ec2b0022100a1425f7c3eccccd0fe8e432338a4f0c7fbff11712e73922472e5cbdef0202d26 3046022100ec4f3f1ee43d67f0aff722502213c000b03b820967278ebc93f1c055607816b2022100e113f81ac1801e1168715d2532f5e2982023a9b1229045e1e141ff6ead4b2814 3045022027de5b22ac36aaefb0380b7b055975cfad3f4814c497ef4ba893936d7588cbc5022100e3d70cccefaf05c1abaed5f18693012dcd3289026203c4cb80fb2c9d8f375b04 3045022100ca0c9ee6b19add17569e09c17a83ac0312dd0413b98f44bccbdd58e79719a88f022026246090ffa9c5f91023fffe19ad22d2d82d780454acfc1519651b0432221a59 3046022100b0270dc07116ab46bd9c17d06e43350f703075ad40a11dacae0f4389e9488d38022100d7ae1cabfac59d952359382b4b5bb8652e5a86a8b21f7adf3e9c567fe7fd8a95 304502210089e96a4b83573da17ca4becb2c52a9264402a7cadf8a42cd8e1e18335e42d96402207ceedc3f5af66c354f5b152a525817dafded09169f1024276d2a8bd4fb84b48e 3045022100f7736ab05741b2da8dcf6d58fffe24726008f0473885e956bf524c5092a3cd4e022075441134d656b7552fb4b3332576d4f167ab4694876cba8360f0db07a0759780 3044022041187800b22ceaa6d467b2f0025f76fcff94ffa71f2bac2281006aeac1bd9d5602203e60c0e7b625ebe794cfb7e30241de8d751b8483a1726c598ff88059486b3b49 304502201d650fdcba1391c0211ade3c774e454106a2b1ef3633adeb33f1ff70241a3194022100c13a817ebcfeb89ee0423389f608b91541aa6e039328fc0f117453387bec08a0 3044022074da11f276c8b0cf38f5f2d9d1ded2c63b1f966e55b741d61383ea8e6b979d43022077624157da80f8c6f61d527359854f17e174affbbdd861b299adcf9f68b42e42 30440220622411575c04fa8a7c5b609ed7fe89390f046bcf3177305ba61b2e1b66d182eb022077fb688221e19e2ad28cbd24196b21ba2a43f3e942e0a67c938a8bcf4404940b 3045022100bc9c602062a6b3389daf939e0e308f568a2f10d24c0fbbe6b18a64a1d00c2e250220450b08fd11be1a4ef71e27689b3cd103d446b2189e1b1cf789d995df94173276 3044022069e52d8ed6ce583cde6a94f9d9c1a6c34d54a0dbcda329ba1fe6e2103211c3cb0220648a804fa35a533ad3104443f9cb55209fa9e4e0c15037b93dc1249eaf2386c0 304502200288df182ff4072130240a84c00bccda36a1ff089e66b122dc934517402194a0022100b48bb9782215a56f1db00d39bb26c7fb86cf2efe65d0567384b538c93c5298c6 304402206210267eb525d88394cd7f5b5000714e470e2f2a5aa0532373c916528def412802200879e6cc911d31155ea3a0af65455fe8827dea9897b1bd65a87e20999b4f96fa 30450221009f6706cc40e5b62564bb6f2a7728164999b50d54e24afc683e17236f1ebee7a102205baee32096957926b733046127c0609ac0ad9310dac8bb3d0fac3bb5a0f5d221 3046022100ea4e3f2462febb998cd0439809234f40d4be8770433bb272af2d613bdc4b19e2022100ebb0964747fb8fa129f0514e8134fd8ba196b66b0769377463840dde101c7e33 304402202bd9ffa12cd90563903b8506b034aa3d188bdd544f811b03c9e74ce45eeb3a2402202246eb0e3174218b4313dd85385a7bf5aca49e9a6d30ad614994471dc6ed39e6 3046022100c872d4954e9625336c3545eea929e6b06492c5a221df174587fccee0fdb3baf3022100e222f5f18b0a93a29469836cbf58738d447c13d5b7cbf4da30fa1cc6df00c610 3045022100e418f2a2b6a566abb8a1176b3180ce336e7941c8feae5aeb08879d410fc6c2090220554848be4d2c6ce74bc6c0c1ea9193dd13550147dc6f677e3b24b1e6d749315c 304402205963d59f9e8fc7c3a4e963bc2d7ebac053dc6103c18132fef7b917829e7eda14022038259574dfa59f99a2aadb848421dc48bf2e8f299c2dc92267fd380bd30dd7da 304502201622d5f033bd7dcf3464af769c4514e5932160fcb5763b3e5f29e3c6a0e50db20221009b122a6dcbde181fa84d2915b182e6eaf99807e59dfef164197c7539065c56db 304502205deb4935e89677fe362efc821f5e825780b71bab5dbd2823a7a1b185f8904e48022100ebff4f2ffd937073c00c03925d023dfc5dd2fd66bb715010bf628f28dc7a3ba7 3045022100f06f1875c15f9e49937a291d441f000ce60651be449ccabafdfb2d484380e35e022052d918bc500afce77fa02c1a7ccb9fddea12f16a54bb29bc2aa9a82883166016 3045022100fb904ff8bba390428187b4c250ee77062ec52667c8522ca6dfde999708eac51a02204c2cdaf769b108f4ae4f89f8ddffc01d28381f4923efb835a5c1893ebca705df 30460221008a3f48587efd291326f143856d1e705bf8a459ed9324060699c02987a25d43f5022100b19cc09329f8546ccec4028d00cabc21fe05cf690ea7e7317b1b81c0bbac630c 3045022100c0adc8ed1f63047682aa94c3341a4cc6b97fbd4c6b18354cf21cbaab30cf18ed02204fe64c84253afef9c1db7738ec954a2f53c18e352d31d3a6bc677c242233c7ac 3045022066c65f175a555a19850e42d6f2a4f49ea2ed8824f97548a3eb61641727e61845022100b80b7dbc742d1c559239c714ee96acf03507fbea0332e829df5c7854fd9d131e 304402206c0e69dd58208e6ed91baede84c215fdef671a92ab875db97c282a84925ee934022006517f7b23cdcab12b7027ad08e52d4d9d7f3acb17d2c04b1ef630119d4ceafe 30440220288dc85a232884785d630fc66e6475151e6270994f5df77c92df48300816b50c0220115774dc5ced4c7c4d5474dada6f03f96f6212486ec469bacc748cae30cb01ff 3045022044b223bb13918bd02f9e0fd327ebd5cede9403dc9a131b35c637fad6673e1fa9022100f29f013060784dd956f690d6abde764d8928cda68747c4bfcab8c64743487524 30450221008af3eeb59cb4aa3ab7377ac6210159d48a4f966056f88a5c9e55db81c6c0ea430220591775b52e5fcf1dd4f04ff751a550b9221106a85aafae72128d43aed39a9b0c 3046022100e6b9b4856950a8055ad697812e72cbf3ac314f12b107247e32b202714f194498022100b632b7936f8566472bc90a5babdd6d1383cbe8100c10a97f945c742ef62c4337 3045022100ed5ca08f30769f457a9250d86817fae213952c86c8a6a5763aeb7f3fb74f305c022026c9c8c0e8393965aea05e9b6752d742729ab429d9770a3c5b4156ad0b39da2c 304402206e2901558ecd7367cbda5353960f405a2e1160558800f2c1007c8341467de6c202203a20b3032b37f843303ce5e7548b8278f8ef6ad77aa15681e787c64958e37e3a 304502210095890750cb0543a1a99860d6da1875fd9f69ff86b5ec495b4ac212c18301f49c02203e8757f04d4741f0a77645f24378b466a41195cd4f0f1b0c3099945615a18ee0 304402207f8479a1b0b79d886528a1c8e98b8375010cfc45b20dd6e173cb540defcecb0c02200a1cb342c5c32b57d29f96143cdc879eda134620ff228c48395f1f3b6b8cc7ff 304402206cb1e4d30301b8c8ababdd22727722990aa6812dc442cc3200906b16baf19b0102201685bcaa511d2a7d7511c887ce77e0f59b332c7e2dd6f4be371e90b462faa72b 30450220350a641235ac9c7c40c8827b0ade5793cb86ff4a9bcf48593163c89c6dbaf7be022100d1b144a3559c22b0dd8db9cd726de24ac5822f9c64ce797393415a37216ace14 3045022100b2e18f389f8ccc7cae975c5f5303eec6fa4955ab03a116387521b2a4c9ef41cd0220324a392c7ce65bffdf9a35b182a6265c31bfdf57ca6de40b95a441cc6f5e4f83 3045022033dc8381402782a0d6db40f3d2af6648882de057b9caf7e96f0bf438ec5d1c4d022100b8e9aa95a5baa9e97918069890e0561a09d65b00ce4a050b0794a7e348233fe9 3045022005f751078590317fffaeb5af9b5d4805eb1a67548a4ddc6d4b873338ccff42560221008c440b515811a8e938368716efaec545e8962f14149e30198b96177d5970b0cb 304402202561b555d39fe3666b614d9220ccd7adee3197e95bb7e4159a61ba16115a7e8802205bb1faca23a114681478730bee89a681ebe8d2fb91427e71eaaf51690a014c44 3045022100e5eda6ca57b740c1c7cd12fbd6163cc0bea81dad163b466ef09512896a60d23e02207a082e23768e07c299001796f604e4997967c287dd1a451dd717ac6f45dffe3e 3045022100f2c40c1e41ad28e7a17185af59f418057a75f331d49c3ce2395bb05ef5fd89290220427feeaa86b3a59da5c7101e57f94e133dad1201f79263ee80ed4548fbd13fee 30440220776ff1e04bf318314306bf485a1b2e2af0a78b40b18042454bc77c85348fe7090220670a5834274817b765e074e1f77072708c5ad5107c47ccc1e799d8d5f3400e99 3044022068e96e762d1dd7ae05bcac63a36b154fe932ea1c7fcd40dd61655e508064636902204fb5f22029fd85442d297a5e928bcb115ce0321ca5fa5645e040552e96947b43 304502204de66a3078e8ac96ebca109de7b84bd81eabaeca0df3e39f9aedfff773141b13022100e6edb38e8dddfb5dce11bd47453b19a0e6e28e1d983225ff08aabc3415e0ff1c 3046022100cf13d26f3501ea17f7f48d3123187aafb5938c024bcd549bc65affdb58978305022100dca5aa7c8ff75b84b3826e42dbf3cb8495cf28fe504f87b5955d3ac63e9ad104 3046022100a1d3a06925352a675227b08147363e12a8c6fdc64cd4c91075aa5fd54e228991022100a4ed0ebab9a6c99cf6669ecb2bfbb4536556a092f3fc30d2f12f9fcf6499cc32 3045022100ddcefb86472a156c67bfc7f0152b13faea9776ff30212256410e5563fb8068160220603362aea76632029efb0a0c4c3f6ba9fa7ca2585293ae72d6a7ce989a2fddfd 304502200577460ecac0d4c869b5797932d81e54965c8025367dda3b693f59e13c5a058c0221009d6c9202c6c3ddf9ffe21f2526d0c494bc21f9f85451e51c8dfc26b528655af7 3046022100e3809e61f7d5d5fc7bb21f771770a683bf18c1b1203e50b82df6bfc47a49f73a022100db604b30defc8e785361f7bf4d0b8a30b77c9c7174cd0463d32d432063b44809 3046022100b143fd0b20f4f725a5b6cccb03ed1c283ce3c55b3a56b858027b173c6c4a074702210098574afbddacb5250e75a14c9f7cfb69263d421d1049972d31dc6e70e163c68b 3045022100bec053ed4b0380eaabb05aba6dc41b28398deb0e5c48369915d6638eb43c806b022054d75ad069a3a54ac3bd38e86be4201d0e835ce9c596dbf158ae163f8d074335 3046022100a6551cf848b6a4486bcbd89a6a043d7ab697f2f275233d900d24bffdd40907f1022100a252b8719324bb1c6c374e4ee9685b063b23884533d4353519082bc4c9587b83 3045022100935c7721fd4d0333ae0c634c5945163a30d1f8d517cb2ffc4c913a6c5d0f7cd302200b0867ec4e6ce64acd98858ef29cb0f0b2870436929a96c00c8e564c1ac89efc 3045022078ecba3d606e0e338f5b16ac1ba648b2e32773a82c1a4062ead10e0798fc38ab022100aae253e6ca1bcbf365ca3f9fbef316ee3a79d08b2ddd0c0b77e215dd702963da 3045022100f9e1fb56b14cd380e95ce95c14a8ae21c9e148bbe81ffda3955577c9bb92615a02205f3241fc4ed0e317bea1b6362fb53b9af1d18f3525a70fe6c9666bd5b3f511f1 3046022100825ca264b09ced8bbf993b5db3f32d60853e513f5802e9a569162c472afb24c80221008e07baab540de0598279598cc5c17a4d16330f85397003a6ad6f33f6c323aee7 3046022100f3a2a1b60f55aa9a744ceacc030748cc3b16ea38761c5de8a4baf316ed4b4e0b022100e8308e6170e68280b1b6c5d574a8a9ac4698e9688aa42d1ab665ac2d79ae3f35 3044022055dbdd5fd036ae3bb5ffde55589686b00238cc5dbe8e1784be6ccf0588a9e98f02202af7c97aac31daa50b67ea26a598e6361ff44bff4e4f288cda162b1d8a8375ff 3046022100e397bf582c7fc179c1692bdeb678affdc2f07cbcd827535e4ddd76264c376cc40221008b99f330426b4beb63f70e69bb3ff2c510ad705088dfedb243926a8578d97355 304402207c99592535c3a31e0126ca7a259b9529139bf740e28bedff47d59a357e86d37702205cf4cc9224bae6396b5444e1c7f5c21c62221e1a8e74635094318b536da7269e 3045022100851da698db04f6e1440b3210cbf785171bf93a23ff683e93dad61217111036bc02200a84f67cdbb1c9d1ec2f4298fa8aa68dec6c62b36a98531cc0a5273271f4b61a 304502206c02aa3404aec8d63893da67454d1e52eb0a957460df4b51b799eaf82602d137022100e694c7d0372f71215e091f772b830eb6b8a1259a1feee7dd03b2e9fb38c4f16e 304402205676980718d6db65294667b57956b7958454c943eb534746eb1d1727c7a6988b02200ccfe20aae556994e8327a8401116394e682903e13ed5e4db5c3c313c0987b43 3045022100a72bbc5d37173d4565e3dbe9ec79d38544b693cb957e04e79531370a6cda35a2022012bfc841da8b37c6b02f663a51c123eca1895640a57b9e1a2e22d1ac2b120a74 3044022059fa1d13dab091f49ee49604671ee243e4994c7e61170869a5df670e1cc018280220705e918b176c3d7233565f0462da4c009a42ba8434f9a25bb6fac5586bf3be00 304502206d45cc6851f01df8ea039c410f32f0c55a57548750f9c4fafec0d3c9addb835d02210091eefae004b6a9b77471fc6798d07b1d114ce1e70519073e443c0143b4a9c1cd 304402204da81e0e04a19332b05e66f0637922f038e4f1f9622d9e4a109b38522c0098be0220776f40b702de7f976e7cc0adf7079b4bb958f1a9dd138aedcab37d88afd0f35f 304502205b19bd905aea46354baa47e8546539b2d5b95546f8207a8cc795b883b62f341402210093f545fc1464b3a09a9715808cae0c8fc9e5734d6c1964ef9ab9fb58d182622c 30440220330619adbeb2a3a38f1186eef0c8af9dff6261723681b2355e037680f9bac6ad022010c18338ce02bf6b0097f0769c5d5e8c2e470d1b1618b18875eb64fedc1eaf82 304402206563a9e73ff01668620fe29452cc94c6ae92981e51750a57ae47bcee90ec805c022001630c26e9a2aff25ed89520ec0d2251bef854d30c10d1baacdab1bf2a482b76 3046022100e9454ff387957be47426fe43465571ced84bdbbe1ac2ed7c77485f98212c4fb5022100be79c5a966039dd310f98f83e83a904413d33d71b5fdaa541681b1e246425496 304402202db48e0fc8164e3ad4a622134500198ebfdb2d05c10a3518dd95a5b4a7b03da902205a237837914335b80554262eb9cacad26ed4889dcb68be518f03a61a47409441 304402204ba92f53bcd0f75354ae5202a114bf25e1fa63b94e69c6c9d646b615e0f6072602207c44d30934a410cdee41c82a9ebf2943a1b7dcd389aa0f277de05c853da9e2fb 3044022025147cd8740bd946de6cb7dedf347c7c913c32725b0af4f39e416be5eaee506e0220276d7d9735fad9242c196f52374a55c0900f9556942516d2659ab230ff71c0cf 3045022100b3520c7b5baa39d6ce93491800bae20e800398371f1a26a10b7d053be820b9d9022047c4d764f00d840e088346a31bb23800521862c6e88c1a1c17450b98c7127d66 30450221009569ff114491293840e12afadd4d2583904ff4b1416376967e1080375f71b353022055caa47deb4ff647b6b393928ab6ff0222105a60b2b58726f1a463334aefad0d 30460221009736d33425d42e4f857c7141eee80cb333c8944d3034914633e6a4101d000030022100f8eb018f34f6836845bf40b479e3bfa5c6e51726ce3bd70b8c5594801c63ca2b 3044022010ef8dc13c5d4666531f497278231dcadfe74ccc53ddb9d1d7338bdeb800617902202e9a91c36c18ed8001a7729be5beb3ee6dde595e2240af37127e5190156a97f3 3045022100c3cce5e3aa9e8396f07757dfb6e2051ad168729ae288349f09e45d76adb1c38c02203ae8e05d90dc0a044db668527f2507c57d6bb029cc89373f9b28a9d7b16599bd 30460221009526f579041583dc8396017acbead39c7c7b7d8177c17b85c18945a8f2a02dd8022100a13c8e2388bad8e7afc5259ac96074b5123e953cf326757b1be82808b942b9f5 3045022002fc51d23c44f6ae4b5769d7eff65bb849e230f6d5ccaab06579884acf583840022100df0de4817ef8bec209472107b013063baae9e0a89b4775a033d0535a5b1728c6 3045022100f31da908eb04dbbffcb87c4a4cd5db91550835acc6cf83e6730c668cbbaede1302207be5654f6d4e645b1d759408ce6c696d0881fc2d5d4a69dc4a969b22e19625d5 3046022100f9c8d44e99ae93cd8f85c7d57ea82b0c46f735953427ea57b635582734ca22d4022100f233e927f6e88ecdffed00aea204bd7daed1669807a1c2fdca13d8195a9413b9 304502206e595ef165f233bf8d920bb95c33cd740ad42c05438075746024db35d670db730221009a3aaa0f274b4aa47796f8ea8b773ea5f3e3013a67769077da3a51c0443d5186 3046022100ccb9b39061cb1c041b866ee054be8d8cbd3b7e5824a779ed38bc61fd070af1e8022100a51e7e89a27523af96a03ae3d53d8b940cb3106be341f469c23210a8a9d1c3c2 30450221008c43480c70c02ae2f0b3e434385340ea89c6ce935253de9f816312de67a9024f022044fd5ce2b6dd00ae8ce7fe9e6c3f98477ae64689fadde8c96859398d4824e845 304502200bdefb931189a49f32834a030f704802a8cfee180d10b5f6d88fee42f041296b022100bcd6e2c5f12f2113f37b43d1ec03e35574a532aa7038cf39c3a8f86498241aec 304402206783444264a7865a927fe7d6ac87764d78b51a6b7649d3a19447d74ad43cfcbc02203617ae671b72239fcbc2c3e545070e37e161cd8ed0d6c1b1f827058797d7886a 304502201324a165b229117e76eeb1247de40710a5628059e4ccb86894a74f2b67343422022100d37ef0d9fdb995c6b9d52e582eedb01e0b5f2299dd0eb8dd0b8c1d92b8351a9e 30450220121b4b5954d31ac4bfa748ddc4fc25397273ed869dd06ae9e7339eba43648374022100cea9601488f5f03d4a6241e825725b2e42e45402428c7d670888615b632fc393 30450221009006980f66fd707d7e06b047e82ffe9b3dad27964b04e211ba3a91fc2c1907a502206a4462733e1685790ec91b08621a351ef128144875eb3619ce9c66614226dc00 3044022013280237281982c635b51a577018618d0248a3e557b151b30c73ca1a0258f4f90220181be41c1a6c272c33f5647c23cb33b047d1cc93ede94d25c5d46c4c62355c89 304402207a4ac15c635c19ff01d2ed06a3e6ae1979591a2dd3fe657b3434fe5a6a46346502206f146a388a1110a10798bd06be7da5d09d7e57cf3cc5280729657e956c228b5b 30450221008851e0f608473ef65a72346264fdfe7a43dcd01e330a0b182aa51064e1d866450220325702eb42b54036a7b74e2b261b845c00f16d078578ab72104f49e575da35fb 304402200956edced7f1c1710c112b5b23460e63ca6f4f36a41b99ce549e81b21ee1bd6002207c98098d9748509d0fb0fa27eca3e70c4186ed25ab20fa3656383341203c605b 304402200241441587cbb786f15060247d251a12fb77df4a473e283566450da2f7997df7022010ddc232f60ebfba22461fb669c7ffae33ad3a8700b85324a86dbd5b6dd83b35 3045022100e3a7aa869fd062913cbda29cbf2c99466b506c71fa8ac3aa3e40f76b475b8bd902203478ff1571f1eafc323c7c81ded2fa2cadc50b92a97da7fae09892ee1ce22b72 3044022019f35f254f632a3332ebc5e61156d6089ae0b1110fa275dace99a58f8c770d0a0220330a5d920833b3ef7fd5588e5f0f3da122ef6659a9f05a5eb86b37eab02272f7 304502200254867aec05509f57b0cb80e88d5ce1cc01674b200befbb9c0c04f7970c4ed60221009776b83753911e05253902166d1dc3550438d4124c55d9b742905d54358ca5bf 304402200740b32b823fa9dc07296c0e8b3c14c884acec7c4b7b428b78b4d8ee0565094702201df85929909810e83908e9223e0a07d7e33af591cc3109595bfa6d6abd840c9f 304502201f0777dbc58a2546ee192b3beb206219dc14197efbbe1caddf0cab7150c2393902210085b659b5ff619ebebbee4636cd3f25f268b6ba2a92307a21fa266454b96bb9df 304502204bbdcea40a9860c998a6dce7fa459df0753b55ce56d02f4a3db13f141924c79d022100dafbfbf3951783c5f9f9c0f38868d719e14ab7bcab22667d394f9d34739094bc 30440220338a49667a689ca6ac3e108f99f927a3805fa0c8e91e1bd3e9f95866ee62c775022009c03265765584a002450aa3d34e88c407418005ed2c773a82b63b8c57672339 3045022014a7284259b62d4a8c259bb098fcfc69e2ec667bd387318b97774dd3cbaf5085022100944b2a964fee3000d4a896b9777daa3c4d3aabd0d0bca7a6c620618ff61b450c 304502201e18a20d03c00b0d23fb029b289fd4986a9ef8d849c587034d6b58aec30ae16a022100a4963370b15a4788253725d35f8f07d76da756ec6db8313de301f0057dd3cc43 304502200d91e91975cc4a9b164c8b429280ef84ff63247cc3471b057beec7003767327d022100c29c27950150b6fe4dc02d89c2bfe34edfe8629eae0cceac041efe992ba10807 3046022100c35458257abbfecb7b0352fc51ab114e23c8059219ca6ea42908c83adddc916b0221009c69189f1aa1b99e3477593d4a023f785a34ca19ed496fb03b2df79155a69c48 304402202b9cd6cef91103db84bdfc78c4714525e31acfac8caf1298caa30a36389c990f02206933b42588db5a77ccf9357781b594e7aa9fabfa2da0e0e24d08eb9b32054d17 304502203e981fea772fc1213a38775d2d5172a61a026960fca5e0278e4b54591e3aeb5f022100a1ec7bf9a02c8c104bbcea7e432d0032bbe78cb990aa1b9b5a69bbaa3777a858 3045022100b0eff3a08ef405607ced5c92872750162c298e6d62d1525e420f89c74311383402200ba52d97c1461b5213079710894d46ab9cbebf2a58a65f116b3f4942fdfb8711 3046022100d4c5aeb5d646ca257fa977b12e075d29b993404aa91a81ecdf254710a3720eb0022100d02d5e48746ec840080ad85b890cef97a401bddc1f635268ab03b5e4f9fb6cbc 3046022100ab353d94ca50be5a8e87f97664f2f0b54c250a7fc3c6b7e1499eb9e804c07529022100b89b7240f5c85fe7354912e7976418e7f92c58a4ff5753e5fb32cf5eaf63d63d 30450220035f3da93fc666c6936cd6529e8b0535ac7d5c192173a1b42591fa5d47aedc4d022100ce32ad4914b0842b81789f6d7b933b8b5eb24c8f424707cb1bb5b71a0d26322e 3045022100d0ab2c60c5a11098e4cf151c515932fc78e074f5ee9da8bb1d9761b370ce328902203b836232f49d75ff5956f71b82b06978612647db66ee6a51bb8e517b159bdc30 30440220325f48833cbc176fc67fd1f49c36c247a1aa0328ca473d883bd905c5541b5d6c02204cc4586aa6640173981f3ff99d2a52dd89c1e44c5ee62cbdc32520bf9a4b43fe 30460221009d4fee4c21efd897fc4903b617d3c21670d0ed5d235e2d8cb81d01c63aecc247022100cc7a43ee7d37dd2f0b5776395b7cb222663fbc08961a083acd6fcf5b821ec1f3 3045022100f3d9962a9112d4d100c1ada33de42eda818518dd5a685bb3ab4645296538948202204f21f4a22704b5caa8fcc984a3c79b6febcaef935c43d1154f38cbaa8a940498 3045022016daded87a5bc60ba576e65a8b467aa7fea724247cebc0ed3b3857e74fd80a5f022100e73cd8416a451ed1cfef2b48834e3e07305a43ce8d7a19d104d2af9478604455 3045022100dfc43d56a3429f990558909b1a21409e7171f1b59ea672a3ad858cdd1d69786402201dff8651e9fb4274929de6b7cbff08931b2347545b718c1be9fdecb78f507ef2 304502202313c2e4d499c1c9ffa267a2688ea75e9eb182a19b2350dd9a9224caceb67c9f022100ac57a319206c91b103ec8e41676c8a04938cefa97921e6354e9f0a849b9a62ca 30450220639cfde9c1221cf565971593a60a204d36de2fea351b024b6876903cbc88a763022100aabcbb99ab068cde7e562c8f18e0e5c2a56c269c9a06cf3fe5ca5c5bba9b0792 3045022100d30cceecfaa82d731efeb2c9357b35d030ef79ec8625ae3318ee24df93a63b60022066f3732baa16d9dd075328d81a7ff1861976fa0021a0c4b7a75ea725d4e622f2 30460221008bd5f9c7109d76b80faac15b31993f4f7ae02c150ffd6da9e47d755f50d294c502210089d441871da1b0292185bf93aa32c98f3631b6c42e52d7dca8b32561d30f7ff0 3044022051af1e89ddeeed8b1e413a60d1f78067ac303cc7eaa6ace09370102a3cdc1e0402200416d2be499a845a8ff93214ff28390ac615e4e42a9d7a3dccb6932001306c2f 30440220036b084fe621e0bd136dd0f008af21d12b5ae82b997ab9ce650289a10753a4ec0220229d4bca79008317a7a7c78bd39ce792fcaf497bd1489ebb862f2ccaf0d80cd8 304402200baa98992ad12877d1d6defb471668dad658d31ed8be70281c312232c1d3781302206700db0ebcac7434ea9566090097b0af0293abcc7fc48c372f75393da2e60b55 3046022100924ec1857d0d26c2a8173f891c2f08313c51d9b55d5680bbad76d8ee44d49c5a02210091ec365f1cc4ea2ffeab87427f7ee41807fa87abd2690b75ef9293441bd5fcca 3045022100da4a372c87e15677b5f48f8b4034e8fc86ae12a372490a3c5fd555a61c10458902204330f1b10e7971a74325bad54e7304f2f769ba9d84b4d99d1ee37f0030705ac8 304402207554ae59018f85fc41880b85a67156956fc253a41453cafc3ec8df9fda81f3ba022048be8c06e07328ef8615581665b02970f4436eb705b29d560afd90c1a908fede 3044022033f0366ffae9fdb9458b19f3c5eb2b44115380039d2e69bfb371b1804ca9912b02207695c00c54a4700867804c7e2e0fa1a415e6bab8304fd737c3474e1aae6a9007 304502201d639ae67dfecb343356bf4a000722fb25c602da4d11996179794c6b2c231f47022100d216b725e9e636da746cea819f76fec741bd9be483e77385da4bf43d1d7e24a8 3046022100ef55f9f398126cad195df92fb49c224c09decf88ce7b3bbd3df2c3030fc7e3330221009f57fdd799cb1cbe085b1f44449e064eec17b6f19e5e6b203dee77df86fa3dcc 3044022074fd5fb28a5f6e7801a151bf87c854b348d10d2f3b2682dc88b8017261925f6302201f607cd77e614ed0d78981803e773ac5f0007225b8a478a4b3330d46a2e1222e 304502207545aec7c02566630987da4e09d47859e7cb31c4762e20119ea6ec6097e734e5022100e5a7d4e1782cc1b8c132fbc8a86004d2698aa6ec2463432dfffe703f31b9cf4e 3045022016ac6774f37b75ac3e7733607e02228395ab2f085f2efd7aae418c5460a93324022100b65e726f83feb6ec457157270d5a3aed92537a3f75de0a43bb932412754c3465 30450221009901bf266ca4a42e76f51c1f78c7a3e9a35814103862629aab52b21394449b330220361d213df51b7b77d5bc1cf5682239a5debe310e9ff803e722610dd9a759f881 30450220689fabb34919e8509f13cda326ed7fb5bcefe3e4141e407f51ef2017b0b3d1f9022100a54d47994716dcb642868d5032e93bcae4d3a1b39d0010ee431261a2fc79ee05 3046022100d4aa21de7153687cb2cdb169ecf247db60d92b8cc5c66ecd168a9a4ba6ad97aa0221009372567ab545589cbab64f9ab8d64d0f9ac5b74acb0e954d513cf08a5abfde73 3045022014537465fac27b1738acc841dc08b24881f65bf0bd0bf7625b7589ea48077191022100d553c922f2f532bf13cc0d377921101be60132b9eac868312739f576607221cb 3045022100d9119d10d57947416f572f01d719b4b212cd2c38df5d18248ed2ca48b07dd22e02206af398cf71664e1ea049a8443ad4ac0ed04b30897fe610d641f686755df36b6a 3046022100a0d6094a0334163d5f0935ac976ca1a7a9722d6d9a0c59f1776862a60c6fce73022100b356a36a0fd21de213deb01ddddd2645ff4ec0b103e5dfc0baed1bc134551737 30440220570ce2e800fb5a772f88bf20ed0489245ca5d7b38c52ebb5bfa287ab155e4b3a0220155ffa8ba61055c0774cf3b278d32f51ca932967217ace3792658337889356b2 3045022054f663e84d1a235fb57081f540462432d4e0e033d12ec1e8d6e60ef99efcc46f022100caa38a700d9a29bdb32c669a3a6985e6331e32e2d9860357f03b30b8b11ab5e8 3046022100f39fc08f52675eacf4315e8a412c59b87aed3e2f8d0ee4525d2ee434c31f6cc5022100ae7c67c6b9b1cd678ba45ac33009093810adbf23d499302f78da6c5bfa59ec0f 3045022100b1e4fb4f1a741f50e6e1b4c633af66544e55ce31c541033d904e0e804d4907ce022079e5bc7e9ba06ce0893f0f586db781a6db17d7d15c605ffbffa60c510ded9b04 304502207c977cf561e4cd187104170984d76aed8b44454943362c4b890caa12b78b52db022100a6a093c90f6cbcb4df2a028549c98a58977f98386b20edf0236a8857ef779b82 304502201d09db75cce728bdf97d35f9533784a044b91729c5db741860332a1ecbf3a5790221009927b49b0e9b07b7021cfef0462a51ebf6e22bfea2604b7f414c1209c29b2ead 3046022100997df041fc442fbf493c57bb0ac50103607148cfe2fb1aff5e1e2511dba662b7022100d49cd2c34d5b693378025c328937266c07fe014e30ff20cf3d0b378aff628913 3045022100e5013a5bd7eea1b4e263dcea06bc75eebc3f170ca4aac063c97302ad51c8053702204eaee73062f8dd52443aeb6cc4b714d61d382fbb805141a4e49c1a8fe78e4bd3 30460221008dbda148406ba17b0210eb71da01a6f0607329bbec64e418bf0ae4f71bfa4a4c022100d4e4fe21481318cd677ec32f2dc8ab28d791901ca3a5bab295e9ce8ba564561f 3046022100b8d6a0dcd7d74c5b0684c53fb19ecc7ef907a5bb5a2a3754bcda7ad276d3f834022100f9b20fc89ce69c27a8c129954ed67bc68f16204f5c8b7e9682a2f31838831880 3046022100efcecc45be22b9e70989dde7b5a133f603bde010b701503e75f74704164b0617022100bac7d36e7ddbc7e17f26b05db61081c7953e5abb61d3eb68ee748c685b745217 30450221008e4411053ed1d9ac7d5d5683ae2d9f529cc023caa0e4102ffad6054748b0493f02202fdd3a1d3c5ea2caca2cc7467f2b71674b5c4df5a0fa7f79de6445ddfaff9ab3 304402205a6c724d7a87697907787a0c7e8bfef8c5f5a7eddd0b608ea6d0473f2071cde8022006773ed8336908b27bb43a81b1834cb1d9dd41761df460a7d1138289859968fb 304402200e51c120dc728f62042790296769b4e562a895c43a72558bd01dbd23d261991f02205fcfc55284f7c99d95c59e29055979b047af47122bfbdb0ac65539a0a4217b43 3045022100eeaf5ba69e5e36a67f6287ed3d50ea041daac3505e5f655abb07d4c2221f143d022004e0b9254ec9ebef20cca444f3259660d6bc9156f53b379cee47f0f12adf9daa 30460221009e46ec81680f64b9d85046332ad611509930941ec45ee9b608c62c1d030e401b022100c78750e230b585076320374cb3438ef4f5c406e5b55a6be518d0f633f88965ee 3045022029b100789c5fa2ff35eb1d9964ff73be80e322eee165b960ed52a9eeb7e30e4e022100f50d9e7de3fa061affed0bba665c1066e1fe9d585ff17badfca7abc2a922897a 304402200a65063d4d84b22d3acb754c34003b56fe2e725d122face05ef5943a80299d1d02207774e59e4717525642aff208369f28532e4c875b4660a3809007a4a0a2b5e5b9 3045022100b34c73cddaccbd9572c691ad7256fb34eaab445dee90bcf10c32cf1221836fa002203e7ba253df08e747c1a4f2cb11b50417436ec2a4c477f203f9614e1c4a16b778 3044022036fb619183c2e4a9762e0ab1efce3d0fde8e4855901e9c5ea3b99beff579100e022068447a643055def2d996979921aa32d128b6b09ca3eca5d0984e5c9f16d8ebe4 3046022100ae203cc7fa51078feed1ee07212698d211a5ebec1b2d394b7fb7ec31099adafd022100db353e4fe84f8d3ae6b03f9d844479471511c880ea329a5b538c3bdaf47988b6 3046022100bdbbb1319280fb9a6a4fe1149548dc67970b4a4ac407bee6618af916a7773d900221009a52290bef103d001b61c251535b970347e2b6177658b37b6e7ddbbb464a581e 3046022100f3fe5e27af90f27fab61c801f361eb293510cf921c88268228f3a3534045c84c02210082bb0e88f6df2c55e5c120e90faf65430b24ee802d8f5048c2ab410c13696a22 3046022100e33d1dbb4e44c6181b84799cd5cbc0aee6821f83db0f9ab217255c3390d855eb022100fa4b4c6a039fcb33306b63f0542e2d7ce2595c281126429dabcda51ff882a8ef 304402207f050466a5c5035ca7ef22ddd7b93ac2aa3323567292d3916b15f10194490e3e0220135cf169777f86a751db04a23413338a216ea5d8684a7499ea2e6c2b72561dc1 304502204de1f686b416736d1cfbe07b3e1b1514d96a5a3bc489873ca774d11d8d5429cb022100ec1f9ca0c9cd4ed1e0174592758bd013d854e2a4d249331321382103d70557b3 3046022100806c7cb4a938c3142642438d01ccd67080fc037588e3195364201f7209c7945a022100c6ade2588e0d7a1c6446243e965254d4539712a25c17386970ca59b4a1f68bca 30450221008161c6a6b4d6819ca90de2f0e4f8e7d28a4133c7869704089a42a65ece647f1d022042cd67897d78c62f6beeae3ba6a12f70f7d59bf6998aa40e34f04fbf1b52e49c 3046022100eff916b1945383e948d80309f632c68e8b78eeb3af863c0af8c4d5fe743a4cf5022100a8f5f8836065f3b4cee74d6470137c789ef0dd7050c0160de92c74baa198f2a2 304502203dbba2eebfdadab5d88312d50d33309af98feb278161736f541e08a80db1b5bf022100aeffd343dcadea3835d5fb74a4c60c4809b5d8a6b1ad3e82320033a61b100963 30440220079041a5a47d38c34672c3268d6f76776174774402b74092140005a039a7d2d0022036319a063c7b90a71796f70b114005b4b6803ae50613ef8d31b4b825f7f240a5 3045022100b073ee0b0e1f5b9dd6f87a8ced6c80df0efe763f08fe827fba19369d6c21ac3b02206f09c4fd2776b17f9385e91cdbd20b23e0ce46f419d39cfbebc412258b5eb612 3045022076a4bf3ca84547eac71e3a1495b5ce0058bb7932b6ce12d8ddd302a3d25213b0022100f461ed753b08cdb4fdf7f515df8ba891c64fea1585967921425d0a7cefefff7e 3045022100edc1357557af9e9886674b57b3cdfa393b0591c6cd8af6751ed20e1f1eabf9b802206707b22e5b30f58bedaf79bd18e99eca75e314142ec3b5e314ba630b2e8511b4 3046022100fb06510430c541ea86139e95b4dee0ac0e033d483e533da8e20d5bb668d430ab0221009bcef7551f23ff53a5ae3d13eba81ef3aecc88102caae202ea1b27c8c81a11a1 30440220302bfddcb996c98cc0df14cbe47fbc8f327c9bdc74ba061f0daac6144f0c3a0d022060453075ed6938c64ee4f8f361501964f2ed6c394c871b8f58878349c3538b7d 30440220788e06716981ab3f7f07da06ce8957734c656bc24e262760c8a329012daf95ca02206a3e30948ffc358050290541581679101f774c43f745dafbdb83603b7d4b1bc3 304502204bf34d17f8498e3f156aa3857447cb97971d12b8fe6d5f7fb97453a09743e144022100b25a033feb719b902c53d7e4c2fc85087674474f51ba0a0222a62ee0a84dde4c 3046022100fce7ff80fd45a19453968ab3ef52bfb7434056c5687a098bf8ee4d94c66d1dc4022100b7b8188a4c14c0c4d6ce29aa46d170f3e8c8bc916bc85bbc39090d3c356ac55f 3045022025c0eecf4bd4c87ac2ff11ac5d6d5c4a2e0cc6477a25d2282e960f3c63485c840221008285537a5134125c3757f33d0a14126d54c72f49f9c5ffd7aec1b6dc6132d634 30450221008a2e3a8977034a161aaef2d25862cc28e2953fa3a4f49d7cc48dac5d9828c96f02201faaaddcbcb2f81db5b648db18f8457e6475687dae9dcf85fb0283ead3f0ba07 3044022014b423fab54e7ea73d8b7923d386a8472691b81d7f046f2306b6004b999ac6dc022025bcd9ac46290ca26e705c14101584a0f900c30c5a545179f5af3ab4a88c6bdd 30450220581ba44d29d6902193dfbd251429727c2b0b37984d47d788916cfca5b049793d022100a52a891beca13f718a47b9a4951725fef1e5f57c4d0d15aa7db6de14891ecdbe 304502206fb0024b055ddada3f5dee01c754d56e5c38c189ee1aa11c2e97a9537999dbc1022100a8280936b28224cf15df8a32d66f9d9fb100d9a0407addd960c7a4477bfc730b 304502204f3f8813baf3572a60e8d919658cddf721b669b2f2de079e734bfa3f0e82fbad022100c4e56a8d5ca54c5a64053bfd71d644776a80d20e5e0fdc8ffd437a224ee71a37 3046022100aac35583d88bd6028635839632ce954100ce849a9a7fe5ddb1478b963ed03742022100f3d0d699d55fdba879e1ea4b15c53c04c95259b48926507eb449bddea1795747 3046022100b34870279267cfc642a6b2317cd4f59390ea84646a049395f45a80dc2db115d0022100cd326e74c496c564989934c3a39ab7d8db8c421875e6151c6333d76ac487a904 30450221009cd5e4863b1a8a83f6f89b60a6d37fed338e0e67452f88f224fbeb5e1643035702206156071d2638377d1d65a211582a8d3adddc69823409b80dbfc553b950fdc5ad 3045022025f8ced576ce096c51ea2d00a368b4d3cbd748ff590b939cbbbbb79ada3e2a82022100b06078cab292625ca908649409627171a8244bd3758151639a775f749a5fd5cd 304402204194bee48f26305fce3d4ec5de37e711b4acdac4b352992e83123c94326758b002204229ff65cd5c460d9086036c1c937cdb552c87371273004fa9ff303acce62f27 3045022100ed8dac56c56075a2930acf353f6c162052c9e087ab2197f28c33ac6e6f094d9b022034559ca08f095d213b3e09d0452d583875d1649df7e9324195b2657474559074 3046022100a15235f2e8e9ce3bc7764da85997d126a57673db2377cad139494d225c898af9022100e80b6952286a94036000af3a98b24909a95b08e9929cb938d9d71545ba7a83cc 3045022100a2116df07cda1102c42998e8e1f991c039437bb9d9d576650da9ca9d795828d402201c58400d4d91b342a4f72550ef85f05aeb570456841be449c3a4d3f5af6ca328 304402201959cb6a42fa28b563c275efc8bb91a9a54603c02dd9658fa0c1a3653144231f0220684529bcd2ea326de4da1558d28b644e27210c8b260af3b9982acf6cf71da630 3045022100a4f39f58fc485e92716f5f5a16d70d294094468f4a77822a2297193cbd52afaa022064443a7f65720f40366bcf5b24c86445846b39f96744e5cd44b12fa41ce78353 3044022016142c4a5e01928db700c0d954d8f33e1ea9f56c0b1adbc7b19eecbb2fd8a83e022028bcacb5affb358a941a57c8339ec9312d0960b2ade997020156faa99c220b2b 3045022100d4f49ea0863753ea442e67db68ca5e2b40d7c11169b90034e2c5f7c365873da802201b5461dbdbfb210708a9ebce7297f8514b97bcb337d2e6195f5156650bef64cc 30450221008b71a4a2fd3c8e6a87631fca977c9cf2165a85ecbbf394d569c4debee8d8ca1f02201d92117db8432707ad9085fa2a43e0612555667370f7bc64c42bc5f0e3ba42a1 3046022100b70e2fbfa9e26f8c8fc00df1c0f10809aa3a4a243da1320be2099ebccc039384022100e5521a5583042915f7caca98ece91e9a0c464cde4ec25ab62aaffd019d3dcdcc 3045022100a7bdac1bb8d5e2a2f760082382d8ace1fd9bfdf3489993328c17144f00fa0e860220503641d0b7853d358c914431ebae447b6fac327d039dafd59dde3705b7370609 30450221009100ba4ffe3e7ac1819bdebf05a307ed189364b511737946684d9ebe6cee66b5022044152d665bb34cf5b79a6fdd64fdd635ec643cfa0660df20c8deb8445ae8c227 304402205d172ebbb20aad79b39dabc2e76c9f1132a215c64980dfc166a23ea311e463b60220614daf9b468ce7e70b15a21db217c81ff20ee114bd0a185a192df14c986c5d3d 304402205e7dc2a83abe9ca1a680d051aa8334d3ff25abf793d851df05ad62285ed682a1022075d99fc80c81fd55155c104a22aebb5d442a07837e3532cf4982c7a1d752a821 304402204a2a66ddcfe44417a91b6a29f7b66f047742318d05a5b2f5e66e0e67885d15de022049a361e5b5a76906106341d3285f81b5858ff76e822103f7771f6edd7a9e74f0 3045022100d1e1fd8ddfafde1ddb90308b270078f79af7ff8b5300c7de78a82093e5bf97900220610656525a8541b953a9a91a591848fc90ec13671eb2736cb82196279e2f2ab0 3045022100d1276087b7decad5291366d3a2e8b96b6d841f6f6ba3a90b79a0b3ae650438cf02201f2f587cbd57be345b3a360255a23f26eb8128c9226c3cb138dd36f41de878ef 3046022100dd31bba275877c888a4e3577e62230c3da2c2fcdf35ba59648b68c5ad454b21c022100db4d9046a51df18c8fa18e109005f75258fcc98208be739d257810e2ed719e63 304502205ec47788fe589e4a419408ed6b3e1c88381e3257a07ed471037777a878ddf465022100e623bfe2b4fb3582af2de7183d2c258b9966967296fabd8c78df6d80c703c0da 3045022100e24a11f0a01ecf187d985369d403ad19f1ced8407143d94a6185ca78971122bd0220078813a53a332d18820656576de49bcc8acb005235b2ec3b22618e9ef082cf80 3045022100e18873a00921e3ea5f7c3ff52f1dbdd414d326db8d8fb853e02b02c00187ec4e02206ecb8d4c72e1cfd6601bc32343ccac9d7cf9eb07ada17b3ef757dc058215099a 3045022067e008f64961ff5be6143fb7374427b7cd7c4200cd30f7ed6ff089c5df614dfe022100d4ce452e047400abae42a9f9fbfe46f8f736df58cdb8113fbfa73804d2edea9c 304502210093c0c1ea3ace6d3209d30b60b5e12c4251d8b6381e6dd32b08fe7a7d213fb8c302204c880d1977842dfb825aab30b6c5dae669d4f5239e235e29284e0de6d89d9cee 304402200ce42aaa6a18a433e3a50532ecd748ae71c09c2093244798e642eded519dd21002206963b071b1e9b31775f2cb3f342b44beeced861fc0d5c78003849b553c2c42bd 304502203ff399a83b827ded31330fd398805e9fb0475c1b084f763c7f0afc73a818d4c6022100fd718daa75e14f954e1f98347774d14c8d89641b1722860e95520e40482184e4 304502202de455d75ad365c9a88c79217cc8980cbb0e638d3b32d8b29ad3077f7c6d85d7022100eec1b521838c43ea27741f07d7f1dc96da6433dbb79207ed5bbc29d715b31776 3045022065d353311e79b3306f92ff2ce65e8539ad6705b180008c098ee387c4c04e3ab1022100bce734e98d13db90134985b8618839344cf5b7af8367f5b2ca77dbce8d27eedd 3045022100a42ae7c57cecafde0b46e6c17903e54c87d291041d821786c5f804bdcf9a7aaa0220510dfddc672e03fdff14035e61c626e35f087eaf75137768fd1e246fd4cc5362 3046022100c6864054ae6f57bab1944a575092906627c6ffd86faba47707a11be4ad4f364f022100bc0ccb4a9ef7517691c30c516af252b04248f8493e38bf348a667b1c5fe23da9 3045022100e41e5168233a1052bb83a321a4dd84f644699078c09dc610082648b2a6d6b85802200a25515896c26e2acae378ba82c29610271bd1cc28df5542448082a0556d2133 30440220408a36003d6424ab4d3de0ab7c9fa24eee0cd9c8eb5934017558b2461b01374602200c436501c896b970875e21a1241496ad1b2f370cd3548ad24f963066237b45b7 3045022016fc6c8b35926adc573c1a11c077d0644a8220f31f23d117c23f7b43fcbd178c022100c5d1386c762d748eff611f985e9873e78009575d61bd8d648032ba10aeafc02b 3046022100ca419589b7804b2044eb7e142bfd79356fc42be405dea766d4c00f79537e147b022100b841bc59dc75c2b7bf0467860b6942a45ef301ac4146d169f92ff944721d6051 3045022019ea05903b21fffeef65367a2860482ca0f23b0dab551163457a54087ff422e0022100e02e9c3d92b018f9a4811fbf99b12fd96e790ca92599a20c8579843070e2fe30 3046022100a3e7f2bb8a565e90f2d850b377380103640b6dc52fa2e2e121d14f8a1673a0d9022100cecf4146043980ade25d6af34cd9e7578ad81fb702fec77800b18a99cf432c41 304502201f3eeef718262e7f2401f96322a9c5ed57f44176148c2b09d16aa50cb93eed740221009c43aa9359e88b96fcc7103baa7ab183abf07ad873e9b3a00ac63e5e53535453 3045022100c0b834fba3792b32bfab2a28165804fabd7119074b0bd0483a23ff15c1b18fab0220206db948b4bfd236e877f6e559293e34efc6e0a624c4275ab67935cb1fa6ee17 3045022100d49d51a47cbedb330640bedb56133f2873cc681259b9fb7415b138c59436a18502205e7bd69d4c201c68e16871c2c89b80bc478fd18900aee1ce89bb293be0e1af8c 3045022021b8c209e113b151b8e25864bd7eb50a712ed5bb7e66f4e7e5bc6f80e35eb891022100a9a7a72d1234fdb6241f7c6cc4810a212474908e422054d9d31391a1d859564b 3045022100bc1aeb5b6bf89691ac9ca8788d0bf9a8c0b7061b89a63b4ca89a0962976368dc022052d66288feef6f6b1e9715edf50093e2028d71821a7548f5c4ea8cc5e1a4d627 304402206a465bf31020cdebd8c0157c184374974c24c59f8a18c22e33498cf74bb43a6e02201620f89aac7a810d1b54ac448ff1b0ae2755886b2aa27ec35c85d33421141890 304402206a609137f469bb56d1ff035dad72ea2559eadab56306d83bd3aa76125c4fe87d022076296a05178af09b33975fdf09f220db203446265a459c447f1949fa992c135c 304502200c3894e4b4133b48dc9b1f2f06d37c6dc5eb0059038e4aa6faa17b7a1d2229f0022100886a0457037e6587616b81ddb18e1826400d418cbcee50dfc6613c9d09e27998 304602210092893edcfd85857922757e0c8d9b04b4291dd5f535cd4532aa7808f9aa9e437f02210081b62da71e5fcdab5f04092650da9c1c1e5bae821b89075eba898e5f54607cc0 30440220565555f991b9fe9114d76e0894cd7f6fb9378b03b25613b6ed0fbd5980bae12002205e89dcdbc6f51972d947c0351c7098c73cbbb2e03d69ab481a47cab738eae7ff 3046022100eab7625b5d62914cd3f70d972f90ee2ad33cac3185dcaa351854828894c1bbcb022100d6e6e5b137aebecfe43abb6b5c5cfb8c0e28be4e1e7bf3854a33f46c7b5e9580 3046022100fcee95808409d18d2b535491afde1843b6db92404a9f2daeef8d0e34dea7586e022100a0338a4285db3593edef7355c52c932a609fcd66f4fcc2da6431e73d7e1ebb30 3045022100e6d0c3764d0108df86c779890e85a8f4ade4ce94a6117f1d31654e950c01ea7e022055dbe071cbced77caf829f214c8244fcf685817e0874ba9b128f95c520157cb0 3045022032e3a3c80f0a6edab4bae19ce5eb2e4d6437f5c6389a99cd875f17509bdcd43b022100db142008834956c4c10758aeecaeda3c43d3fd213c8c823d3c93b54359391aca 304402204240672eaed9995461916c8020e121f8575de444d33a5b4c1a42ae8d1f1f9c0502201119cda2bd535372724bd650d4a5e67426f1344f1c4487e8e27937da3b42d3de 3046022100c48cafbd3f4ebfcf6583657c51124d93e9ea71b4444438079f6e1e0da7a47d39022100e1c49dfd0160d4d779a1acc045f2fa7d9e04bb0719d0ae60df0316eb553989d7 304402207c66703ca93b30799e64b699a60197c5672c6043c6cded51b0df84fcbdcbf4b70220081ae8d294f849e8b90bac8bee1d64733800be052bf6ab6530ccc9da157abf6b 3045022100e61d7f024e645f5413f3dcdd9b30fbaefe35bcb3d10791d567578913d1ede9e102207e584ae5a2d892cb85d489c97abb0b1731e277538c5ef7b0a1d74d2c8cbabc55 304502205bc3cf8af4da885a45c7108588f2244836416529d971d41debd39241f115a7fa022100a8e23e3052e2f1abc9490637fc71011e7818a2122a73ef56c4093764ad26b123 3046022100fa8c6956ffe2f1b3926dfa4b441a8bd8521ce743e5b057bca9c5d53645810501022100eca6388dd70a216b10e4475bddca631db87e37b153f4366e17f01cac74232015 3045022100fd45b94a44d4e97a05cea2dc43bc2f4a1a8b703373cc57baacb4aaec94f51999022055234fac322017759c873ebefa642eec68792938889637fd9805bf34bdf11ac7 3045022055cd21ed8c01c4e3f1c8838df0ac5d0630db6e65659aadb3c3f7149bc2ca10ea022100eba24e3091b5fe836932ddcd7856b1e35dbf5bc7af03d9e314b02adbc4c0af47 304402202df3f546c1ef185d77a9d969f046487b1bd27deacd1bd40cecd222cb31490b780220513e4849964896fa379067a0764c91a4df0c08d5ca5e4fe10d08c362206c44d8 3046022100902f3a058b70bb9801bed9761aceb7bb8c1ee6715381460aeeb774f470e63945022100d4de24db074edf0d5e9e261478d0aacd3d8eb6323fd6eb3e8ab15cc430c4f549 3044022052f6a4f170d3973330b67d9071e7fd07302b513bce216c80fe5cbe52b491dea7022044a950cb4bcdd21b143573bc59ac7cc81fe40e7d26a24cac04bccb180e69d843 3044022034b125e8e07ff4363667479a054e3ee41eef96b981bb93e5431e1fec0ce93c2d02204c480a5b075ef743819f1e3493436b30ab83384bdb35963455d2ae4e257c7011 3046022100802788bfc1ddb26b266435a053218a5400a3eb2257f99d8d8ee011537de7cb35022100ee135e211e1108c369f4347f4eae54463d372b60b8ce3e52c7e5656e3f590973 3046022100e2a610386f63381d4034aa9018d9d9eb2bea3ae17a53a77b0a2355792aa17b10022100ed2d193dc9fa641632220c391478bbec65269e4366c42ed4448180b8a8e1be12 3045022100d6cba926859d7263c5fefad579f47eba0027426806809ff12a41c688a2e8c8c7022009e66fcbcf78a18e6444f3d01787bbda738cf9e71a6f68c722f4f0cadb3ddf91 3045022100a11e4a1d0db31bbe113c2dbc3223f2b41e23bdb561f6da45e31a683b8d32e9a602203da8f09ecadca6262ea3d5163a0c10578a5fa35d2e164acfd6c133591447bebd 304402203bb1af3b82c62ef62bc692089f8da51ab44ec90c259bf2863001b974ce5c6ae302205863f0298d7452c4dbf5c180ff8a39cabb12dc0b7660d098a90ddbb54d1fa0f3 3045022100e38a2c5cd694c5f69362fd19afca79c3e45b47381275f06f17ea58454561c7c902200fd3777df0c115fc74bb19df0d641356e7dd34718bc1cd18f1dcb9124442dfcd 304502202a3aa48fdc67e652e74d00aa9a4960c8cf7b86406d539da65fee4bd6787edaa6022100a3349a6da96707078dbc9d87ed0949279a43fdf86b33f86928891528856d1926 3045022100d656160f8ca1f7bc862f5ef173be990e8c2c8ec0020b16ea56fd4d4672bf1518022007d4aa6110fce6237290d6d223bc636621c4bf0997db8d1a009deac665da26e8 3046022100c6ef4be179560a4052111db12d318de09e9d743033eb087300286338e72f6d260221008d81cff9d64a6181ef4f4ef747ca21ef7f0f43704d1bd15cd2a539e27f1c888d 3045022100fe6e292ba390f362f19edc1e3e080be5445b717f0f0182c923a216d8ba2bcdcb022072bc747046ae49497e135c1f92c653f0561ef401f36723be664c4fabfa1e7d0e 304502206f8fe5af8616579dc8e8f45af8d9c572b26012ba26922308e5b0894f01f1ef47022100f81612ec514ad4f6a512187038299afadf1a27e3df52b9adc5f9c16c0d41c6e9 304402204190e546bf1f082435b2ecb0f21d5cf01a49f21bdd1bb7c1ac16bf2cbc496cde02205e6ea7c839162e5c7fb08fd3941e8d02320fb82693373f98a692d171b633127c 304602210085952b68da9acf0908b7ef9faa91498a0013cefd0114a6a850ce3c3828560c61022100b9820b146ae93e668eeeb51a2a576c60887f475855232ef4b5fd429dc0e8cfdf 3044022056e5f9942aa3df66456875652d74de8104b8bfe1f44325591396570ba8b57b320220516f2d17c373f9e8aa8918b6139013e4d11eb4dda75a7396e47995e92502af80 3046022100dc8b43871e5c48f6e7f841fecb3d21319a8308698efd29255fc273e0d959842e022100c1728ef48f9bc05b0726575487f93de35d3d34ea8e3f93bbbdda126178fb9cc0 3045022100da169643b539bade158cdd773800c26a6ab97d5cce6382227703bfa412af75650220381fa7b80bc77af66b36d4398a5c9674db8da5a4bcbeaddff36e78386489af3f 304502206998a08ae215be9b739ed432617598881425e0affad5231cac78429e21f5de42022100a6a7ab6f7cec5d75bf6345a8b4dc3ec729cb3b58a8431d89f4f63b5b188dc88f 304402205d932e5d3b326f18dbcb1fb3d139a6c32e73d6299a8f670c1d0c27ff337073a402203edb0a7256a67d591349be06e57d01bc8e23a52d6563fafc1b17a0804cd76781 304402203bcd59dec3ad0aff16a1cd45dabfd07a4e37def464b53255a330203130be24e70220506769294c8cc94f3413985ee887e3e87e9b170e15c7e194071f3ad80e1a6637 3045022100b1757f13d9ec4d4ffd31a044e7d1f19f45115965d987f63cdfcc0598a535d2810220566a1e0576e7de600245ff36f8844d136dd4c8f810d25f0f88d1f8248660dada 3046022100aa14d56f01e60c4aae0ed9ec41ec9518a91359aeb3b529dca5c99897d4c7eb38022100f680981986a7e6fe0a02af1fbfaef3d42f987e244c9696f1b4cbe70a9c70414b 30460221008ff7da65ebbc36d9195db15aa76360ab75593e3a8a9411a16a8324a30775deb3022100fdf8e9725260389e0acc999228463af2386ae3ecf201ce0dcb1a71bd37a8e2f0 3046022100b7b80d7875c48fcd5b96aa19a04015207c18c28ea67353cb6871273085e0c7d4022100c21804a60703418054d4bee66440ba016dd01d24925c0b3e93bf25b4505975a9 3046022100a8d7ff5022c94e8b95daf437dc31a8f2ad1b4f56690407ac4ea6a4dc2f159f54022100a0f1161d2a17d2d503d47f59727d72cb969b33d9fba6c347efe339280a987c0a 3044022005e2fd971e6a6a5681cc6fdafbcede2279b743965014b950af258bc4b6bbcd6802200ceb78aa7ec6f85bd617d8e6cd320facce87bcfdaceec650dcac5dfb4126077e 3046022100d04aa1bf316b4c654cbe4448daba46dfba56d2c7b4d6ff00c01207badaa62535022100d05ad993e77a619f3c2a175eba103c1556dd6760d30837e45e82b5de419794e6 304402201aecf6832e641c72b2a9ddc1326e89e462913b324b176b681a1f9659f84867160220164fe708056ea7b2a6ea349912d45cc8c165e8fd07ba9bb9a3358dfa9cfc478a 304402203018e0c6a97452709907bc52d569264e39c165240095f37ec327026eca573d90022021839ae708cc3025cd6800fbef0852204f3fa41439177685dbacabc9abbf5a77 3046022100ba123ff72ca4cc02e1807d3414a6f5698613147a055f3565191e2e6ed32e7bb5022100ec8d26586fa3d2f7bcaeb5ce83357190fd197c1b7228c3fbffcb1b3a04069ee7 304402201698a3289452116eb3a9e643fec9ead2572a8f2acc9954e42dc4f7ca29efca6d02201a3c1adac5aa6189052ff7f786346360a5ffb49ce539a580d8e24573a45ac11a 304402201d7d580b8ba022315c3ece19a678df9115ae2c36861b6664d326d54062122a0e022024bfadc31eeb7960ecfad0dcf627e795947e119c098baf2f2e7df6d903324b2a 304502204e3789ae099470d6bd8f04b730e5695f78e575ac65ec340c60ef8373b6931d0602210081d1a2cd2c23596f1b1fa4adc49df0f8f32c3aa154152bec0e001155cb486d90 3045022022e6152c8be71fd21c8ac202b88066387f18e6f07fff3275d5276300820b445d022100af76b42756eb9df7942cfc2251fb05b9b7d7da36e6a753e955a81a5c7d806ad4 3045022100be074f3309425adea2d50432d82c80f54433c157cb8260cccdc82e5aafa334ba02203f0d3f27e3c6c222690e5baaf62234a59fd1da799b30aa55b54aac54372129a6 3046022100c1f2f3d68781a8aa3fd14f54b15fa661d47084ddeb7b5411a078b29506c3bd26022100ebba8a1703fe65b9a051b98f078f3af679a46bdf75536b45175205f423c685b8 30450220769a8dbf7f19da6c1decc689c43d2070e272e3ef24ff2e895c940ad5bd005c66022100bb7e1aa4bca291258f69497ac085de33b1b9f8651f2468c091f6e32acd778437 3044022009dd61aeac7f8b8b713729fe926e2c62a8ebc9ffa0ce8406c39d86c5f005f1ff02201eeb100afd441815366f90d72eb426cdfaf47ebefc4b96f509194bda9cae509a 3045022100f2845d23d427522524bceb16da404aad57475743a76b7872aad401d1d2b0dcf20220528a43036bef444b23ddcca2822dbf9c7313e80b3d7a2d91976adf765404fd7c 3046022100aeca5346b7dde1a00416670ef4f6b5ebf3160c1505ad35bc045b501fdc87056f022100c2b9335b8c482a51439f42f5ae2cad7aa23902c50e2b64f532356393a747c64a 30450220217605bf9dad6f2192ab74e00d5630b21179e4a554029264545806e91332b236022100a6f38134956f56f7ec593baed20be7d0d9501fa5dcb865797e299afda40ca50d 304402203c7ad23bf340e4678424a75806e778f8589d08446333b8d1dba24d6d6e5eb90f022002f392382259142535f2a5cd3974fb2ff09cd9265956e4a194c2c1cf1314347f 30450221009454aede83d6666fa8cb33668e8060a4ab9a4a5d2119c0805ff9459aefe2065c02207c93e945ced00c8eb23eadcff35c97f04e27c1770a45ef0a0ae2b59399b9eaad 304502210091905b32dbaa1fe3dd69f86b300e6fb62ba59fcf85eb54b7c71e383138f4ac0b022018746eccd63e69e53c9e359d136de9ca8d9466c245787749ca7cca0ef159516d 304502206ac7ad16254c4403b09a8f86416cf362ea6dee482a452d53660fe3bc371d70f602210089e82390a5d83084461f8516166b4fec081de3708a64267efe4f4a515d01de76 30440220172129eec3f4aaf734042041b28904d59628fc3f40412a5d2c55bac7c6df139a02206d8cffc7a97ac2cffa6e7289299828b25792cc030f212c7d075710e96e477c15 3044022042081cd86bfda2fab5e630ac16172c3f09c4a5a23f28e6d522b1db2d594d7a9402203eb354fb27647cc244e6087b26548661777dfecaf83b054e44edcb6303321952 30440220143f4328ad02dbe9f2487a28101c974cfc957677383b42a0321fa4d500be4577022065f6bc03e760d1815e069a7da9d457dd022e3b7d04541cc9f3c2862e003bd5a5 3045022100f427adf9b4511ec5e6257b78615f79f49fa9a3815efb944fb62d4b98000faa7c022015778db4ef1bc81c28854c3ddf872e9b9ced0f61fe2abadefb06757a3f8a221b 30450220063508207bfefba2216c7fc435f487275142f46152f2bc074ea5a5ca675474fd022100e7d842f728b503876ddbac005c7ffd116ff576dd2b70f59827f734e1f044428b 304502201af42773ef706c466805a16cf8331366609ebff49fa102595eb5edef8bb00cb202210089c6751127abbf18ad11624c64529872b1a85d5de0d10d21e98ffd85c336a58f 3046022100fd3aa2a00e9f57bc4a50a8b00ed0d5c428b8bb558a35305d811539b96c15a2bf022100f1ea746ed12de6b0c3486866c8075973c2c642dedf86607afbeaf3ac37fda178 3045022034a068381bc3b93a5c08fbebda431274b268d690a22fba1610b603e7c01d4af7022100a5fdb51154f8c5f599c0682c9d5e16291cc58adfe0ff2faf6257efba691a5ecd 304402204381360b58a5170be97838c93f2fb940156a0099b35e29b533cd5aeaf9b792dc022030eddd77521045501404b66cc0073c74dd94f3980b811e3abb13261109063b58 3045022068409c7118f662847ebdc36f38db19bb787d86af077a0bd34092eebb23daae5b022100bac34c0e66e079559b2dc52db9864138d05bd8b46ea5dea452f6daaa4b09f7ae 304502210084801a86f49eee154bdd77cdbde14006b4edc34c2c79ad6e4569e04f46efcb6d02207e5ffbe550eb6b1bdaa954f298e31da9d123a10f807ebe58d385ddefc3ce43a8 3046022100e6226c16744d575b9b1f9fde7b869d51c6af74aa7e938792682aad0e91dc18df022100f00c2173e7fd8220dc19ebe6d719bf2531c280709b85b00e6f377b1fe61d7db4 3044022070e6e4313f612e7384f382807910c60e3b0dbe1da240be9bf983bcd46d4d740c0220650450a84b42f0c9a691f17d1cb014c0028ae3bbde534fc9b25500fb8c4ed6d3 30450220768878df22ca9fb22c8a5e86aadc8a10f1a8fba80291e602181c5fb169c656c1022100c29ee6cde810ba4c3d920b01aa2457dc6b40670c6205292c8c6f25d1062808f5 304502204733d4bbd2178c397b89b4223f170653f0f094f013c9229dcea72accff8a583e022100f8dfdba5a38aaf3aecb1d84216cbb7d6d00d3e0a37ec7a8f992ec04087c66a4b 3046022100ed3ef64a5c228c3ae54b82666dcc4eaadb644c178d52bb03ff25924eda1f41aa022100e3c8c4d42b06e5b5040381222f5627bdddb244c0ed0c2d0069c24db1f45f9496 304502203f268cb6fe08e2300cb6fa035d83b224fbf88d4fc16df0bd03e878c9c754cce9022100877154e83d347f4802a5c743addf7896a01f9ab89a79ddd7ca63903ae35b23e3 304502202310c1409476dbd3a6304436054a7109b25c01b892685419061dd88b7d284c2b022100b5a524782231cd8297aea4ce6d84da2ec2784dd7fa695d82cafc5fa4274a8ee1 3045022100cc22122ed8ff896ed064ba350b638e113ce91b59072ec04b265fdf87244fdc43022069c605da6a0cf125f9a8c41b9a21b7d7b6cf696ec47d0e848167909d6416b806 3045022100b25cc6f28528b4aafce04c1359246209216b7432d600606ff163f1faef10dd2702206cee7d3897e5bd3cf01dcdbd4acf328db4127851d0fbc6435b4b3f453f2ca306 304502200f986d0009fe0a70e14c1dfb52dee16109971e00f9811ad51883b074881ba049022100a67a4980828bd3425a0d0d52816dd2c83828ea962543242beb9dc5d4159aeb0c 3044022058e3e2e75f5f0ed05c8aa05e0c232c2c41af0a4860780014378a4c0e5718142e022013d45e87c9af7df0d1e4e55c9d7e1be6e7e5845f018328aa775c28aff8b8a453 30440220490c8dfb821da02c8bb81efa8f2c3f5ea93b54faf8c31e299505b6b367e8e73a0220367f7b19e6ff1d11eb7574c910ccc5dcc0dc282cc2a9fe3f6a11ac3643a4dc4a 304402203d34cd6a757a03fc793756c35fe4d04e6e53a993b5784abde06c51163142e9b80220731d34ff4c7f92aa52f9d11f39c8fb96f826cd59fd0ac5987ca84001ca8770ec 304502204e20bd5ddf07f0b9d648ddca920463a2f8b806a560ba537967557fe22a3467ba022100bec971bb4e8b1c810e69cc3a35c782e2780f764d4bfc019e7ddc232dce266420 304502200f53be5aea40e3c95556534ee2f55c84bd6dd8903c01f3dcdcb70ccdaa9fb3db022100a24e3d4dbc33522c1522b44cff80644ba75d69b8e65cefe3791fb37f151e36d8 3046022100e7e4c7f7f2149b873786fbe2a17ff9c38359ff6db2a2608bff760c4909f4fb69022100f8255b1e15ad43b2d2744a0670c577e66b224e6f76f36b915374ea04be14de1b 304402206782a7a5ebb054ae14f4d96d58dc70aff3932b39e262ab157e53cdbf737c3fd10220746a0b0a159b643335505b68e08a36eaf708819898a1333e9e3cdb902bacfb3d 3046022100e3624198944181844484f91866d3623b61e174d7db19eaeeee652e8cb8b8c65802210080a32e70747099f07075b6e0565dfd7bf38dfdca782711eabcc951ea295d83dd 30440220622429807cdb91069198b387dda1e7d88232633a2c4f735311892b48456c3a8502205c2d5db4579879d74b9ac023bd93daa4f058ffc043d42aec099ef00a4a2b54c6 3045022100859ac8adcb7af3113605e7a765110925f7abcc7df0a0a0f172f2e503cd35ed03022055cf6e0e6ef2f00a52a92b65b9a018b43403bb0b6b11465f8177890840b7bbb7 3046022100d2d72bfa778aadd047ce34cc09a1144c73355152ceb67e2551d8a7f90ada10a6022100c0d524e4bfaa227d2e6b5b3fa20b708fda4c5eb1a00178ed196bb4437485d8aa 304602210088be7611dd622a3274492116c956727c6b87d7aff6c244f05f4ce5cd084d008d022100ae9e96b079f82334cbff0ea0d15d51b0e581f5958ddfc003419827804c5aec9d 304402202031c8fed03da5e7de453d9f73f6376500b7660703e95446922da95d04f875b302207d42b595c38bef196042fb4b22f160781b9b80ed2e56b0ddbee8a16ea6071f45 304402207c47becb9c7bd5f86c04489d00fa60068dba3dd235fde9c64a1221bcca7c5952022046c5b0a7dbac57e79a87c4f7476ffcb70b1732d6f067f4847dfbc40ad5d9852d 304502207dab1855e8c9f734c107aa7acd72b335c4ea506c38c122ff38cfc0495dbae2ee02210094dee93beaa06e4076555582017e810ebdbe944f4082686158caecf538bbaf4c 30440220724daa50838ada728c3b9b57bdceadc63167555b64985d373d1a86db498c0e7302205becec02cf693a884a03ca594dbbb3e0b6568c75d429b2b9c32f9bc6b179a391 3045022100ea6accb7ad48a3ca38b44f6ff0648c443fedf87422423207556188999c6aa96f02202ddb996af70faf4ff1d6638f7c82c5885a49a3c39646a1606497d34a9a87ecb3 30450220125cac727d0ed6d9f13735e07894eb06406837851c81d2044c6847e4524c5c5502210093b1b4c0542f796f577b90922cc6b4f5b7a256589eaa9c2800af9e7a37045885 3045022100a878f101aaac7e06b44175ccb1b9db77d3f294363255a9adb25779b3070fec5702203745c9272d51d04b3dfe7047bdf499f6127a55f4e8912583d672c900b9b5788f 3046022100dc0545d3edb7249b9603c5f4654b2a6b09f5beabe4a89416bb818758aa44d6d3022100cac09edab3f63e910acb5eb00db100174231c1792a2e4ff5c84ff8383e926603 3046022100c6673639c638d4e1e445986a0e91280a0b8be0bcdae89d0263f3d58df85db4a202210092ccfb627f7620835c239691cdc22ae1311561b479248e9f9f83221c6bafded4 3046022100c62ee695be1b36195a125b2251494cdb25b8802b8c48b80e078504c7423875b3022100c133f4bcbcc47ecec01896b42e3f9d3c2a3d7256ff683fdcb2558d8d9540839a 304502210089ffba4212ddf26795dd059ca453183097a05ce396392585f17050eab95bc224022079ee5c0224cb5feba899d385dac6f67855cb51d232bdf95735d1cc360a1b3192 3046022100c268b5e5b397c49b9eb09c4374927b578e8c0c33854692aed4902f2b04124ccb022100f4dd8aeb11bb675a51fb0063d37cb6beba2a5419aaf9f256ad29ff6a730b61b7 3045022100e46347a21d4d7618e3cc43ec76b2ad9edc3e10dfd1ccd99fe3d53d602861d3fd0220273f5b3def15068a5a599540b41f68bd23976190d5d87bede1ecefdafcddb089 30440220372ae39149967dff2568ea553998142a6b865318652524e928894ffee88276a802203a5e8d65eb4fcb0307953be645d1d0069eafaddea375f1503512bc00ec196b4e 30460221009f639a6dc9519c8fd22f76cd1f20df30736ea186bf75678498a6613b0011ab8e022100ef341245b4d7fd1a491c5c94389562c1785df6d38ac391ae161d33bc51652b6c 304402202eeade12516a711ca1d280fbd059ebac934821bd65f696cd855eb00d277230b602201e625219f30933af9b119398743599f6dd9b927e3a9404da5e04e461fb8ca288 3045022100eb84e96dc8f2200a2f61e5537612f5954e99b82189a66b1ddbdd6f4632be815102207b0b148114cee1732e1a1d3f738f114ec3b5685d6fc44c8dbcdf6a63c382e3e5 3045022100c86a9922bc366f83bf3a43e39c1bf9cee968c7be720bcee04924d711e8b85c7c02203aa069d1e77d235a329db68684f8cce99477e865e0d2a7454927d655cd3fe85e 3046022100c896f310281ddc1f1abc4caec841ad9ac1b2c08405192178d17934018148349f022100e9704f4a2688465548956a172818b847e3e62d3d0c67a444ba820cd1f933537e 30450221008f990da516c8caeca34d11afe29d02c3bc3cdee36aaec832eb6b113fa4ae722b02206cd273d532393ae232dd2adef057a4ce12544d4e10e7df24801fa46fc4a0761a 30460221008e57f80cbfc860bd3669c5f5de3d25a5f12cdba9609a61885c70901b16badded022100bed47614cd2d19d6adf8b7f7ae330f20ff176f0d4e0f65e65aa8f884eee6d342 3045022100ead1cb01c47fddba099d718ba975d7e06f82638afe54a9eb8e7052a99038afea02203d62877a45e9a72e5db352e40e1116aa84272fb482e91b3b96042c1888e3af62 304402201f673cb42fbe7e8f824b2e08898d82b315015a9785b5557ba61cf33e4544dcf9022062623f1947f84a34557d2f4f8ed9a482ac4bc22d6c8155fd3bd2e0acfd6dc0eb 3045022100c432337bbed8420b74b008322e4806c87681e70b62a7a95ebfabae87986b09d6022075ffa8354b4e21bda764287adcf4fd012b8d61baccd626237452714343eb2576 3045022009f86ea5b5c8f2b4cc9767530cf385d0069ebf7037256fd06cf3825379af8fe8022100d924986388dab2480c42e1a6c962768faf962ef3d95a2162ccfac258a0a5bdea 30450220103509ea8d891be5e95f1b761c1fa468b6fb3c50d9c620fccf4fa30d8d329763022100826b7f52b3fb01570c24f55a1b96e31d1d5fd8b9c5e2960dd0e10422d6adf7d7 304402200ec7927928a9f8665762ce93f02becd3f27157b526fda3153c75a09fe9e8d26f02206fbb18aeecdb41a1a5b97a60c04eebc076890d3e559b2528fba423c34c60ff9a 3046022100aa938c0c56f67832ba5d1d00811dc65b2d2b0d3b2eba2df2f9e06ae039930825022100d7f37379a61f4710c48dff97bbe691906836a3a5a5c41973b905f3fef52040d5 30450220531ef1d0565f7b64531c5b9f9a61d25696af2293c38439a8cd9927a98468cfbf022100e4f92ee40f8647610757cf5e652c22a782ec49c40a131aa4827dd6cb5f3d0420 30440220215cc899242a558cea7bfd5dc889d4be7e0dfb0f02ebff6d8c2e66fae2d5fa8b0220436ce5fb36695e36661eeba69694b3be4532b7d5f39d079ddf6a1550574518f3 304402206e95790574a095ec8b1effc3320718d434449779adda3c93f724cebfb234348502204d5f9669dd653dd4e5d12c60a046b15341c88ef702a059cb8f7372848133a21d 304502210089ff5dc9b8734ddb12aa811b4e6bc5920bbf862bf9afcbfb7967e7b5aaee73b6022043a91c64bb4b5cc7c6b4335527ba817cfd0f08121b735705fd8c7416d5d9e07a 30440220080c64734b9a25e919c5b9a21cdc42331da0a393fbb7152931affecae21c6a0402203cc4068cf0d81e173a5323d896d5073a4229aee9a9bfdb85e2837c6e36c07ba8 30450220109dd08281586a3b8499089fc82f218d9813a10ce556ab78f7bb318a3647c839022100ebb5030f84847a34d8bb49d517ddbe11a75e52423246e7e537c8efdc008cf72b 3045022100c605c473f581c3b5cac96b40f71e5e5039430e11f9a61b9019f7333c856c8fa502200a5c42d3776dcd95e21ee51c4235fb69fd66cdf301f3ebaf99cfc787cf9b6858 3045022037ff4f9c03d5e7a74b93e479368a5324cae6e0d5d60e646b1fefe1f3febada52022100cf3429f879352e384722502d9d97aa56999a1797a4e6a9a60b370f876488599a 3046022100c8324aee78954717ab8abc6335befee72b5ec24989f62418c1a74a46a3879445022100afffa05d375f1c3bc2f89934498603cba351e56cae8b4ab93d06c0aa9ac8a70f 3045022031f95a0fccda4d1b9811f14ce1306eb2c77b753aae36fd327d89e4c569eabebd0221009b485013aabc90f4cdfe6fbbefded649d1542ec1e90071d96870f7cb80363287 3044022003303606ad94fcc6c7c9e55c3d8c28e9aae4b18f61b5d39eff3cd1f721de9d0702203eb1d70068fae843d24c17cdc0aa8f65e2e901c9d9b41342b37f39216be1c787 3045022100a42d11c662cdb811ff4243991b0a8ac79d05b157cb56ea50fb38597a422a1faa0220138dd4491e13b3d66c449503defb660d130fedbbaf49664e3d0d13f83513fb7f 304402204f7efe299e8c428b62e9e4be580b29afb3f5d5152de38617607da57f4c9d829d022001657d10abf8a6320ece6ebe745fff9fe74d8e5e800cc733e5b5cadc417d21a5 304502206db5c60daf20e830e148fddd2b412acf2bbf61b474e9c0dd5b0417e0b39c6d32022100f1e778aeabdef2ade4a5b1e837893ffa5b6f2e00a44d845907ae6b3d006d4580 30460221009e738931864b34aa807d17f8c00410a217a707513ca2ff14287cd4edbe2340ab022100f9b0b6a7148aa0a5cc37029d0b81124387b6d74fb2c956865178813ff070df15 304402200207913ce03399495203166d5fd95f79fb633b0ce7173e08b06b442de41919de022008ce4ef08462b7d0492bec0fdabd78685819420ae5a703db0241e1b8aa23d211 3045022028bf7ef1de2ffecc5354604bcc6ca01638715532f0283511f8885035908870fd0221009395c0592b93ca6905c6d43b49171c127f3ac66a7004709019e2fe5e9ece85ac 3045022100fbce165024669f1875e7f551470fbe14e8b49f67cc5d80b8913bc5bdf245644902206258a68c12e47f7f783c965a082f74400e0534b2218ae5ffa287a1028f02680c 3046022100ec56e8ca431123c0a27e22102114ba62b400d59426daf31501e972a80635b563022100adc8bd22454425366477e63e4a264622956aa137f85557684e42cc6efd9cebf7 304402207edbf00616596a5e0d60fdd266aabb68934c485d9e7924a7c499d151e6d5982b022045ddb4e6cbd7b33df1e417cb8c05fe6f1a9aad6b6de3c217fa93e86cdf6d6d3d 304502204045ef9999e5a302e3b3632f5b62e64b5dfc4cea2144dbbeb6cf3cd3d0d13fd80221009fcacbe3c435b20e45f860bd06679f1078074f3a1a15b215114dfa6fa46fdee4 304602210093b3d35c5ab767c9b7b2a47dd170ffdc7757e466aa7a7e88dc1ba762711132120221008822163e9b1640c5ded10b710ebc103d84da73e9928ebd51fa1272b5b0ef08c5 304402205a80096c7cfbb6779dd7caec18c281327babb7f31ad53957b0cc72f03e13d9a0022025ebbf00896272f4efd6f64f7ba1bd3410381c83451a140e5a751f842acaa17f 304502206349318838bef93e59d91ad2c5d2005df877b4233ccddb453e713a941351e379022100c783deb74efcbb1006598b647c731f8d191d43fb0f797949717efa6f3a877858 304502205fecbf88c39a56b3e5cb8e07bafdec5df07d92c35de7bcf571f175bce41d0f7002210083b2de0f9e433d0a573302bfaf8c0ac466031e680c380d490e9a8e3755f6d818 3046022100cd3506c8b60d46bd774684196b3271f3c702c3165f31673544406824cc68dda2022100c792c57d787eb5c282a6505896e6937dd1a37e69cb192c9d008c74d3a864cc57 30450220057090e1908eb6e987cedf0a01ca22c5a73876555d79013bffa2c9058176955a022100b70bfb20046394421963fe92259fcf2725b36505cf17c8e5201a6bfbf20f06f4 3046022100ea06838a564bbf389ab12e1aaa31cc4de0729d8fa263116f495f64816ac68545022100d8c3b5b35aea9d0494001c8038aa86d5ef9cb0c67353078a9aca5de587d35e89 3045022071dbfd25cbb2f767a210cd858aa21b2837b33871c5bb6d577edfabe7ccad6e9b022100ffedadb1e39b27f3eab39da224690378edc6262a3d23416af319b34f407f0035 304402203eab48072bc7ca715d06ac6662903f5524e0fff2a928e8c73f694242634a2ae0022017fe09a8cff3b6f927984338b8106d05b14bf3d1d1c03386f72d679fef34e148 30460221009a5d3af05d78dac57f5e07c4a9aeec76b66ed4f144b67d22196cf12b832e8849022100b82476739222afe58a0a8be125a6857c2c4d4f692a89e3e0d6bb7e46c29b0d09 3046022100868a3cba1ab5ea22cedfe402ca92e72e33cababbfa63debeeb4649f02643d0d0022100b51537c71261c7604f5d7ca62d979a1689b65d804662ea926d5b7c80a9194f0c 304402201e18c2fb44f72abe0c3254e4ac2711d7333bd33a96c14aa27d3a7a8912ba3159022049cd087ec77d1ac6b03bc9d15f45c8916d803959899439a2e3518c577f509236 3046022100d881d07e2ce3f43ac08f85f0d8af91f9d22c3fac953e4c8878f9f8fba8ee0b93022100c72dfc5da5e196dbb030e74163ac44fbbcb0b9a920c148b73af476db30389b8b 304402206b5c08cd66bc6d619f38477a5a6c816259989076720bef75d9835a09e0d06a6502203428c8707f071c236a08f09dd811c0616aaf43d037dbb34e51d82ca68f084a8a 30450220503ab797ae6f7cf39168a5c6110450b92c1d846e3e04c07c02c584e2e0c8f7d0022100d8af9d89ed32683b3bfa9c8f72df28f23c9e6edd41296ad3ed94b95aad6b0d60 3045022100a9212a44878f95b0360d0a6a5c7522238e1832b06c2ba5a1d212e8aa0a4e161102201eeae8e0d3afa036490ee70f6a239d8d043c7df76acdccff81068b2d7f394a0b 3046022100ab8ccaad6956746e0e5f037f2d8035d8d096ac4f3cedaadd69e6da54582be8d4022100a6095d36de5967d88ce6e0ee4b36072544ab713f9268ec46c75b22e73e43ba29 30440220564c26e9fd1fe624031cbd86b6d82b53438f54d590bbb69f5b3686a4aa8506be02204763a5eb8341305143bceddabd5c43a0f3494d8d743f3cc1d171ff173685dc63 3045022100a85984ed972f2de16bed16a1ceeea4684e63c905a0dde03eab027f05648eca2202202f88f717535123f3ec1113683a82806f83d087f455847ec5ad7808e346b97929 3046022100ee606fb02492074ed1fc87c5b57fd210dd1951faf301bf7f0a6a2461b306816e022100ce2e67d2be3b5bda449617ecbaf91a6c3d2064a54fe691ddb926f064e946aa10 304402203ae0f26f6a35299d945c0720ff9206eaacbca7d57eee417580adc586271d4c11022049290d19236f307546ba9099c5f3019294b9c909c81ce47f2e886170074a4382 3046022100ffdbcf6176f48fc4d86d2cf9c5301ee72be6670b8bc4acfc697c9c7a632bbea4022100bd21c788a47ef548289200993f6289ac62099d8661bc8ceb65960d53a042c38c 3046022100915ef2ee6a303814db4e256ad7a496f732c48d559a1a147418d2240e97a421ac022100cf77f18b8bfb3fa14307ea2df64bab0a4255273ce27540e1f060f66b4698dc97 3045022100dbf3f263c0de2343e4be0876fded2ff1122b3889b3e441fcd4a786ac7d4e080902202f4c8e77b758d237d8180f9556cd350e5edadaf6cee87c1d42b0316471b1bbf5 304402200b8920e704add257272103a7b0c83c2df00857a55873d1d112168c900e383a3402207428e6323555e7c5472e8912a428b4852cc942677c17fbb1f56501f5f9ef15cd 30450220186c4a2b0384111c0a9e86ea9443a9fcf652db45f05f61f91f9beea63a114274022100d8dd71cbd9f57716a2a16d108f8e9015801c374aa93334db6d1f8a041925e8ca 3045022100b0ef96391b003735575e82de86538c0225320f8d6c046fa8d564f53e17ec686c022041110f626731512fb865a4ebefb14d1eb4a40510783070f319455d2b01f59a84 304502202314ea5d95d2d36199b48270865a325186787eb00cfff7e99d60bcaa95314068022100908f050602a89765b367c49b8991ddf1d9b41d4a868c7249b9bbf8149b0fbcf4 304402203db2b5955c4d281199bcfc8ae2c6fb199ee1664bb087eee7585fa41ffc06bfd60220712b8490e89af0ec14dd10f4d568cf6c02b740586a58d5738e7e2c08996ef244 3046022100c130b0143adeb448126223b4e40c799fca59080844c841a3256ffe0a574a26b1022100b89730eb90c57c20e41280a1509a373a552c6ec63599127dab3845d467439296 3044022015e0da50ba3305cf9c86316346084b5d0d426058983fccf99304d67f7fb7cb85022019c62a797c518ccd2dc6a2989470bf7f370c2c90c25789898418d51acc14d653 304502207a010af9afb9a5bba47b6c7a4f20fe2f2e9fae05655613b195145ecf1142467e022100c3b8924dbac37a4a31e18b9cce8a46d6873a670e42b330098a4db7e60940335f 3046022100b8f3a43444ae6b493f363d4be94cbcc773e8eed971d92efe4b37a4838fe377cc022100b2334327947e5623b9073ddb440c50728dd039dc588dcf9f65825f07605ae36b 3046022100a880cddaaf59972fa7caa4ee5a6ba0506748480c613a042d4222179c09ddd3e4022100a7380527af2caca0ce22b00771b5c35a7743123edd4817de397efbb1d2147581 3046022100f2e30f85553100cee278024597748baaaaa5a38595ef3c5bc918ea4ac6308c0c022100d9d2e78b9c48f0b4a45a68bddc05550f478f901230b98176d214daf8332a3af6 3045022100cd1e4439c6fbe01f1455c5d1fd10dd209a7efe7b9a28ba2623d1fe04874eb66a022045e26038da0945b71abce341fc92831db5d1b857b09d5ec6fa4a605a97ca8410 3046022100cc3789538f71ee7fad6eafce0471bc28e297406960665cb29e366265e9b4cb410221008a0188d8716b5e725c3501813e4fe6ee5201ca5b4e1d19ff46f196bb40674065 304502200213b5248595f250bca2eeeedc1bcfffa5d280249514278068b657c7c6479edf022100d00c8ce8231be2a9b29432d8873c276fc9dfb5d2e66237a38519d65a9b516f2a 3044022055576e2e4e0d0d3a53803ccceaf11cba2f5438b06058190ae89ca317b20e7d3002201440dd66732078d4df606620037947422a52e988f49ce2cfc79e81006f751d36 3045022053550e638f5e38ba05fcfca20628039e2b0314b2e7671d927e8fbb9442795d94022100a686ca44fd0f676591e71c1e0fff881f9e13ced1d708dc76f41732a548426c41 30440220031caced8415dd53f56e0b6904d8e1d55eee267205c092d5dd0b59fc543c2db50220539298112ff898c0f8f67ecd64f5cac388d1838abef0716c9d872745695f4171 304402205bf8d86b207aa70e55ff46bc1da5a7586ecb3626deb03d0c48577003fde08ab702204d001614b4960901d4f3f2563015356f696fdf0294dffc42563442288c63d943 3046022100fe5c7bf2be2181dac38ddad76f4b7f5bc6fbafc7e98d91144932e2e47746c0750221008394366e10cfbcbc812552afafbc941571756dc5987dc5050f8e3d8a1f782a18 30460221009489fb3e358705979527ec24c0f38907fbbc61226d558bacea9a337c041baf11022100f771fa3843c17257c63ac96ff9390891b2d6a26559e9454a90effff8142f7c65 3046022100c04d8d4a9120a910005b4433998640ce48e26b61e24daa27fb96b8badb0ffd49022100f17b0976d024d5219d026ce7acaa3bf4f24b30b7ecf2668b8d4b109a8df0ab6d 3046022100cab076313a3016775be156072c7396e61d4b2ea41145f9b254b7d7bc8dedfd890221008cc26a618df0a2470731fdf5452bf867b326fd7aba07ebc28ecfa3c650f291c4 3045022100f3385ef3911c9f7fca1ebf44c9d9c140258c8fecfdcf64b6aabc6c67bea59a79022007a82a04bda291f77f59bc348a4bf642647be200cd2c63f57db64e222d699f5a 304402201331a206461bd74913d760f8eaf6261a4bb064b8b13e2dbcef4bfd75c52de7d10220791c5489d62bea8b900cf1c8a661662d2cfd72231e4d3b89902820d304a0ce4d 3045022100b04b31a603119896c8ccfa0c9963b700bb5275305f4f300b8c9e2fbf72552e27022016b148de95a6c27f5aefecd0d760e543e5ba6e0249031752f0014b39fe574295 3046022100c8b5b807c8e57f89cc94918feb8ab8da4bbb280611d19544bff5a92fa15d6c410221009afec4dfe55535c88d076bb76d1cf0509ef5d718cd6b15f995418cffcd2cd5d6 3045022040b9c8659c5e6b6725131eae554fc509e16618c3e17efec6f0f963111dd0a234022100f2a9f59ba2b76789aaf57a3bbcada7d9f629d269f70c0bd2907ec598b8881f6d 3044022060f507ba41fb7ae8dbe750df8b295db91238d32dbe13fb6eb84787400154cbe8022075fb4686d6cefbd16d82b37a34f73698f13fdbb584075993432f52e8ddbd92a4 3046022100a2e33d76606841e8fcbed951e99f1f8f95d08f8c60ef9a81c7e6879fe8158ce7022100da84b834a8602376c12728691611ba3f853f90b839d27c9b8a545706f6983db0 3045022100d2f2422bab366f5a3d91f1838e4a5eacb953dc1b206e792d222ffeb187c9b5bd02205cbd9fb899f81f7401dfa1c84f627771dd44c6b332bf56207bf69013184d2bb8 30450221008844e00af546c1a17269d48ef9620afedf9904c4800b0d2813c07db146a5616502202b0c014647555d3c2b3b8a183d6c1dbb418afdce549510dc167e083b3c32bb23 3044022019eb824dde01f394f8f0e0231840b60cecbf182169ad37693e302858c830a74e022045164b2b8367fadfe52c4e2b94157c28b1703a43585d0d33d8e780a4a837f1a1 30450220506adcb173fb764bad8b251c7f21093019355444a154864703d325f97780c8e90221009ebb84a47b19dac239e46624dabf1f77c33126bc38f9b33aa34a05791ab63b76 3046022100d28e95957a16480b94ae63e8e21a0f0138c987c0f53ee6b805c38465d3d165c70221008401ce45432e0a661828f015b7c8ab583c41cb6469931e09c4d959e275afe0bd 3045022019480aea1b91bfb6d04fe9e473724f7227c97175c9472b2ce15aad2b45dfd224022100a5391223db79644a847724663862a43d273fc7041c44aa0eb300219c4f7aa013 304402204765627beca495a25fc5dcb447c267b541dd0a7c6b78bf332e5666b287763e4b0220100c6113e6c5086d391888a92f1212d5bed5aa9a715d3950d5314502d949734c 304402207cb2e21bca585263845e6d5bcaf46c04d76e560af1378bac95734cc1c90bddb90220099eac071e4e3c178baf62ecec5fe7797b0b56ab94d52b53d4625b8f5d9da050 3044022076ec15f0629a0fad99437327666d989a79692d79ad442aab6893d3aa8ddd393302206b01f933049ae9c36c1ffb4fc687be9587f0657bd424f05d4e3a0fcab33542c8 3045022100ce0308a8e2e7c988259fb9119d7f43b59dc9e19baed3bf2ffdf2c1eb3a9ea74b02203c34e6b4a0c0c79f1a54b2c3815732a4b0ff409b5a574a4902440429cfcd6f1b 30450221009d20c1471ab5f192ae1a2bd86cd0b96b83a66dbcbccb13ade1fcfcb207737306022035dc9e6d143fb923b06f6fee59d93dc5ef4202e0f44be5414fbf00e7131b1504 30440220519bfa0a5efd7f2ebb993776d4d12b10f23f121ef3ed58e22e403b7ba513df5d02205bce6ea5e05d52e6407c908822383d82ef4bb413b3595cec502b1448a77fa900 3045022100c6b48677a2e3495ad4e09ca44598a829edd85ff162ca28bde767ca16244a937f02205780390d9b7da60dd04d0459eee1334954a56d70ea5f0e7cd1fe2ddbc08b0198 30440220750a7deb4958a8a8db50f30a3fe4c69297377a89d1f394938d6f9f077afb44c502206dff56499bebdf25c593b01c9681cb9fb0f233f3bf2d4aedaa3d58328a0089b9 304502205a23a93e6204eb6fc5e40b5ff60aa4a6dffdcac935c99982e3e6c2aed529da7502210087c2ed59b26d80800c8b3155212ea81bd96e2ad900caab8051d03e875139086d 3045022019a40f0f70211ed027162fac870e9e6bddc7101ab61f9cfeef198e776f8a3f1d02210081b43dcd74ee9c410236559c3ae8364c220a22a5edfbb56adb3510eceeb0cb77 304402206c5a716a72923d899fa4554fc1a14c8897b66e9a6de4d9cb827f42c66a88d2230220288f2121fa00fdc36a37a73224c463332d575694f2afa880a52e417eb276d131 30460221008b071ef608c426af2958ca777b363fc99bfd3e7677c32cb147772d09b024bfbc022100eed1ed1aa58c725b8a75f378749583db079237a6b6de9dad96b02fd9e91e964a 304502203948225fdb8191fe603e173447185addd908b6b6cdbbae397ae19a71ec7fe0700221008f98555bd598120b859b37ece8a8457f467fdb3347c090ae66e88df53961b683 3045022100b7e74fcc016c57d203c787e9a2d8b7ffe4121cc152a0df697d4b17c1259477ce02203033dd3cbe21ecb1b2711777459deef620e74f9ec260b8e4f5c9c59ade31f727 3045022100a892072fa91da310fe721a2a350925dcb2dcf40aa69c84525d2cc5a2659316930220027d6af44263cc11900141bac43c609cb2b9ca07cb22f71a9bbd59170ca690b6 3046022100bfee3d47139870407ca206a363a6d7aec5367f5e67216c123d8aa5b129ab1b660221009b63f5edf69bb83d001ff56633d1909d9643e6c0f89fab663c0f2d9f0c44b59d 3046022100ceeb83c74584c7f81c604b860b64d5c64b8ea461e79b21294f8464ea81df0345022100a25e29224cd82fb749f41b6bcf4351277355086fe689341f17ecdd2445407d1a 3045022100b69d03ea59757fe2fa3d3d0d8ade3008b5611311a70723a5e4fd97c440c274a402204d3fe22e9aac89d4c7160c5a39802c671fc4cb28b61d5e00c8674ba8004d2ada 3046022100e32167ee1989462356fed6262fc8c24ec4035e50f5a58e373dad5cfcea32bee6022100e7be8f394f2f723f8491f7098f35ab8c737f6066b9c4cb69537e3ae28cd0ad66 30450220239ed5990b3a02eaf4cbbffac2367d9abdc593c65adbf10674bf53b4f35d3a8f022100dca051550b43c2a3182cc045275cb488603c3da9627a257a1abb50e27c9b1faf 3045022100a7ece60b0c49e262bafbea43a23d529b04c12839b7e1cfa6adeadd5f4ab620070220212245b163b23398ba194abafeb22539e8d1d3c4d4e272d6296caf0e233f997b 3044022071d75b40b8ad45430c66e0c57a94ef963ca2d5d11dae52ee181f8d9a8a2cd3ab02201ebae76919f0313565534a4eaafa66627f861996811ffeb6a7e5fc354a306773 30460221009a004d3a6b6352ed4d57a9311ffc28003112c94a3b2d2aa3e56a3a32a96b26e5022100e72a1fbdd84beba2428adb997cf210ba52488802cd5fe0e19bd301e6af48fd17 3045022100f5a21d5b5760e8c948bcdbc01fe6c779c57dee8baf6c4f36c9846d422d32087a022044937ffe13de163d16fd7bc7e9eef2a4c3734a2aa710b682a6d4e682ed883ee6 3045022043d6c7959717c8f2b7758040afd60e87c6fc59a76f27a0dd673fe0f7fee0b51c022100e1636565b03673ae667eed67007e917ce38ed5810f37d727ccddb68ad807e0f4 3046022100fe30300f0893e85df323b410557197c99e5e8ebc80bc3806547b02b08a7dd8ea022100f172097a190e7b401be9bb2961a141c20fc55240f4f4791bc8ccb9299ac869bd 3044022021d0a122572aa8ac8ab407a8152d7bed6ea5e1c6fb5fcbab251010efca74c82b022011be388bb5c9212aaa2428b8a1882547e49942bf4ec9eca978fe0040bee61067 3046022100a57b1866c72fd2dfa02d7e9d14cd36a8b96bfc3ef23b5e80d0256da7eb552574022100b6bb2a0b49c94950c1a264159fc72ad3524a19350b62b2d2caa571b6342f6ec4 3045022100bef875fc657eacfe1ea7f5a208838fcd79e4169f5c0e2fdf5ecafe5cd0cef2f5022031614a716e420113e414c29ba079853c287d18fe7f451e6845a53dbbeadf44c0 30450221009a944cc70a21b454f5ed941f4f62349e1be5fa9b4c1479d2aa4d702b63b70f4a022018ffecaec11e28c445cb0651c8a22ddfb66a965c17ba3e7f66851771af0db81f 30450220242528cf18606d88da728316cdfc5cdb419d9868b5161386cd5c34c8f41959a50221009f469f23617ac50d5bea7729d0b06226bfaaeae6611b9275a799225d5028d159 30440220039805aa0c4db3441cb15a76ea466f0cca66bb6d713b334212b3ddaa06a9dad402202d3969914ef9397cf74508f46f79016835af09062538b3c39640392dd411b24a 304502203ec1192ed427d07accec97a9e4dce069d4ee8670bbabc9f0279c5d7e6278d59302210091ffd9f8093121058d1529c4014f67190fe58264a3126c74458466de5a70fe3b 304502202dd26fa29f48a519b4a8586132939426fe73c9cc4bf2537df1d2e60dfc12a3d7022100c2141b1d65cf5299a8b9101d4c162ace2486042793109c4df55f1ad7226c5a12 304402204fffed7d2ed371888e33a4a5f2eb2c93fc5bd1a770e5e051733776ee8df6115002200a6f321494c12cfdcc65b8c9c2e4f7736f8f905eaadbaf36d4d035d9986799af 3045022017857bcebdcd52845fe5838103df3dfdc94d117ea70cf8dc868bad9b691e0088022100e1bfd4d94626f937a55b92db614e5c4130c020b069bc17db61ccb084681438f5 304502207c787cb9cfaf64cdb8120ac936c5e72e6b425b870cdee18b63c065ee3383a243022100b378977ce261885ffae131cf925ad18d9ad6a5739b4e7cdc35aef957e56253a9 3045022100dd87739d4567d1b09f4354e8e2de2e693e037560a6de009c2b1659a9c7eeefa402200dd501c36a55f5727ae07770451464cc3487cf0f89bbdc37d6f5b6bb56c99f27 3044022056ef0599dbdab669aaa397bd5b73b56fada10e4e3eb923997ab9ac1b1abeb91702201f469e7ee6cbbf032361df6ab809c6b06642aa2b3cecb2db03c304a59da16554 3045022100b2be5944e01fe89783d51846fef43dd6c044a61992b5389f28183c8d2ebc795c022026253d1e4d5e844882e9b3ad448c5f38493ef2517046b53a3eea5cc5c6e14d9f 3046022100a4540182f36ab2152a6ad8c5c6d23281aaaa7ff52025a4e77ae6cc6064966897022100b65622522a891b342b486ff2b8c9fb6b148e38883e8fc652da098f0c92892844 3045022100fd97b31094b0a051ffb4e145b2b74ee60669df9e30ad4f9d6a44e38b029f8f5f02201727833bd12b2aa1a1126b4313dbcb9055fa0d7f44f842f12d186b42c7f83974 30460221009398eb6cfcb432647563121defb1147d00862efdf3cc99f70b7bdf10650ae1c7022100d36669da98e18e98ab4ff2c6b5de279c4b1b0f28cb8e18265448d22c0159b770 3046022100989890d21355cdfec9a454e29a23d2e8a6ac84bb44c49e4bf240e9c07e8cb529022100aa4dd3ef2b638751e0dcfdfd3523fd26402ec70d12c6e749e903637c75857a75 304402207a5b4ba8006ee17c9f796541a084abd08924a5a0c344729748832058b7680ce502201f3ab33ed825c71606da2a1eb2c255ed1cf7c726d3d6bf40612255f860032104 3045022100f23e89fed7d7a1903b027f8e1f7acc8812a6fb15298d65492841706e94f404cc0220177e6f0cfcf3963eeffc529e30b429c540a247b4d292c1cb5ba314b7d2595999 3046022100844a79a657ea357ac5e263143bb184636723c99bc1f0dd373fc3bd384c1462710221008450eb78fa7f87bb045bc16df2168734c004b8775f947cfb3e580b7221573ef1 30450220717aadbdb11cffbf5444c3e28600f8333f5860f39cce2d56703ad33f8fdee1fc0221009e38a5b9073d154b65204b8a5ca03497fa874dac3bd220ade47dae6543fa8d3e 304402205c968722afddd69c32a4257ca75278d92d39a13a0a858e847b547c229722c7030220699abe77c47970ab1dd71e278cc3bab30c4bf8ba61f95b22d449e89aa3f264f7 3045022100e6adc72971442f1419655eda98cfd925e7586c0b6876ecb2380c92b3117727bb02206f68adbe218f049709101dc62a99d00143345c511f6b4ecb318bfde90ebcb32f 3046022100b25a331466ef0e8476cdea595c87936fb2e6e7ca4a9d6c2928e67f9bc0c659fa022100d96b5fa0a800a8acae122cb5ada1f0aa6678c37f0685717a70b5bf51493c3a55 30440220659c1b8c6d2653f114ea072b4f3323d2dc0c6439432cd85fc190a16107b1c686022010de7f71f9ed674f2386a53bd7ab933c4cec1b1f73f0a39f67182edb6e3a442b 3044022010dec34871ccad20c9d58513f838d9fe34aa6b27ffd217ae7452e49020e707d102200b0d368a1dd71d184614b0ab5c073cf3810f1ba88460bed082ecafd41ab1270a 3045022100c4c1431768bc1073257441d7e8fff99fc3bf977659dcf9912f11f83b2c574e1d02204fc5735d1697612c717e2b50da92b32ee45fae2d50cd60a51923865c0cf95cab 3045022100d94639071c3efb0a8e6e1388a6a50d178de0665a8cb026686e5c83257eb6fa5d0220374ae85c210b210b21ef29e74f6b4334afe6dd6877c829de1453a6d34d1de34a 3045022100d94854a80e40539eff73cfc85ec4bdbda2476a3075b5f05a061ac4a88048f42602204d3f14ecd8f3eeef3a6fc1c222c204c9f955259d3c811e04be7b5f5b079fdb76 3046022100a719e40cc4d5a4f6622dcc8dd9d4fb953a48a992037cb78407366c0399ede48b022100aaa2f0ca81e523d7ce3a04107dc00de91fc7c57457e2f00411e6450aa96f2bc3 3045022100a5cb3bdb14d128dbb277bf58a09912a175463fe68db1e1f5dc9f16681d09f03e0220274872f1147ed436b08c091de506950d0863949d90ff9b3b966627abd7555dfa 3045022100b2722dd5a58558753cac3ad37a1fd09f5571b6fc618362bf9a81d85a1bcccac4022017cff05c1d8f2a194b922a272cfe2cd9c427e4a5bb08fdba1f7ff8b87f7a520b 3046022100a6d18d21164fce322a4b15b34bfb3ea084de4cf37638b2b9c936657763ec57eb0221008897d1a1b95679e951e8f3e80572943ad842d33d9bceb7a9feb6c2e47fea64b0 30460221008db8c1ec041bc420c572a1132b62787b967de84766f263e933c934cc3627c76802210080aef227a4865b35ed35d5f2f0c1980bec4da98c23249f0acbcc9cb8b081ac7d 3046022100d7aec4f77b7587e6579803220c7c718841737b5b415c1fdfd1e76933067579c5022100adaa55cd78fb1bee9e25ae13e610d354d8b690bb18c465154f681b936a8bca77 304502203f10b844a494fb851350a607c2d3a942764808869867e44130c608465e46383f022100e9bcd11cac1ed373750e6e61724bfe37617acf5864c283f676335481483964de 3045022100a35774b90031483d2e9c63827ea50c6e801c3d04a23418cbff81a8cbfe382959022007fcb8cd92463861c7d055194e449940cfa136fb5a430d16d10de6b846e83beb 304502204645f07515f7a0f2ad99e837e0d73cfcf7af21c3f7172f0bcb5a3995da583466022100e1099ab2ffb53c74de7eea868db32258791cc36e8ac93811dbf7524223aad75b 304502206d8efe7f3a7064f72593181e24fcc296e961433bed81d90903f41ad033edec800221009d447012cd23b6a6cf76202dc7d947813a8c7dfc466fcf234f0709b9d1a1d411 30450221009e5f4eb8b38b9f0771cddfc8b6945a4a581c7a40903c6e6cb256f2c686f0db5a022007e35ebc8fbf96cd0df79e5aa5af5bacebff33eba38bc85008538767a72c27f7 3045022042f62a71c7082d192ebd2ae2bceb14596047adb2cc1d55d993b15b2880dad7df0221009d836f75170e4d5b94078170d9205d8b265a41d801a20d81c5495c5206eb5c06 3045022066d6d3fcf7b38976094bb21de773a8731c2aa7d297e6a59fd084e0b05e57a0dc022100808b453fa17d093987788072fd9c8cf0423469e48e929a6f8d1835dd36d2338b 304502202d18755d1597509d7287c756613f528695a1d0caa4118eedee692ffa37a5286a022100d4600a7b8807f1330b160d015a23421ea349233052c062d453c0b8cab9ab961e 30450220085f6f5bcfddc1e187dbe7ccf98ef5fdfde0ced0c4e927224561e66a53986f5f022100d002844fbad41fd989c29a284aa2b12d0daff23086e4542d4fc25e62ed210c14 3044022062c3fa2e63393e1f6cb3c4fb37f82baae44c141aed0b9ff5a55780958ec718b1022030b11ed465c4e9d649e1e40f235b259c1e9141624e444ae0c88e57eb947f71fa 304502205a79083463c1f0b1d06373de1d35b33b2e743edc5e5708bfec965b552499300a022100a304ed605074616cd9fa53d54d9a229f146ea58ed5da4d5feb8f0797d29161f9 304402207b25928eda2425e6b7e60de29a59386609c226c87bbfe179284cc0057728e92602200c2f512a796ad7105935b3f706040d30a3dde62ad4143cbfe6255959c8219e9c 304402200982342dc6f54a13807f0b95ae56a8ef3d4ced54ef0bab392d2fe045a4f80e87022019153423fe3688b06a8f9512ecb1f5badf52c64cf0009ab0228eefe33cb0f074 30440220707eba8330419bb84b6803434820adeaf1ec7561e1fd15d166eac1414bd04fbb02206cf779b6c067faa966bf06c802dfe74bc04c82d93c5e918addab6350fcfcd3a4 3045022100afcc284e0655697db52c4ed10233d225e0b851c679c38b3a77b875b82e643d1802207d344b272d0025c5f9fa0c48412f8b05800751fd2a3619cc6f123ce5988e3b2c 30450221008072cb2ae933b3ad34bc177b0054ed70a5a9477c11ed0027640eac96ef955b99022012792c2a792ab1d891f0d71d959991dbed0ae03d8a292296a0b77fcf76531f0d 3044022056864f2d28e773242f76cc263fe6c713da547b84206a1151f96c7c870378f74402207d4b12fc8facafdec493a4d1f60f505630fbdc031b97d13fc951aca094411f2f 304502210090f54ffa3a6ed80dbae4f56e8aa3190fab7dc0efca232a9b971a2e95d014b2be022020eb5f6d2bfd4fdc1d00eac2ae100ff9656354ea604e48e27d599df9df2e730a 3046022100d22a4a3be48e1d500ec9d4dd0a3af518c194db7ae8026f948ce476bccc7933da022100932407c0f3acd5b52d8947988f706ce1591e1b3cad903d28e686cb28f29ece0e 3045022100a651f789133594cce72130e840539830f7f6aba667d6c33f3627b1735bc622740220579386167fa2cbcaada2b8aa31e553272a47d0a200fbe6c0e0f5455e85824806 3046022100873863bd7c0e61f34837c5f3b70d1abfc6cddc09597b7d072b179413b96acca40221009eb987b2beb2181329d8d7d161cff19f29f301a25e68b1b4d7630052087b799f 30450220603bbbd24323fe8f72d700625fb981365f735777e669213606eefe9acf0e6490022100ba604a9765f0009117438fb6b461cbe944ffee017e4cbe95ba91dafc19a5481b 3046022100b6863da73f926cb0fc0c84239972d74c484368b0b16ea963a2d67b104d37085c022100aceab5f49d628850e47a8f22794553950ed76d9f89dc8448c652b010b98adf57 3045022100e745c52e32d36d83290df599594ba271a3207f9a62021d262ccbefeb11f2b20402203bda3e41ca5cfddab6c1a2e6b2c196de082949695046184fc9b16457f0b1c604 3046022100b1a07d2fb7ee65f2b41f9e7302e3d0852c6625715413d086a467ae02d1432b730221008339b6c0cd3542aaf4bec2ef76a49e7d5fc58bacf428470e549b006bbc59560b 30440220476096a6938e0a394beebd86826de2eca3b8dc1afad47c292d34f8cabcc6aff30220781675a44fd4627e0ad3269e6b78ab86f12dae2a7fc8a32f5f14688d04cf0b58 30440220039c382d845fffee89bf74c1ae48e7ceba0895f49e02ca760d2518a8f33d53e902201b1c977d6883368da675a6dcda640e1475cb6066aefcbb99a40de823ea58bdf7 3046022100cf89f316c99fddbb7c9be0fcd3b812a0aa73f7f2f4d85ddb6c984669c68895c9022100815e1fae9cc5bafb559143f1bc9cabf1fc03923f0f56d3edf63fd04e45d72a88 30450220082d509ce8a1ff37a8843e009a298972476c3ae33e31bc47008b684e5d0fdc0f022100ffde9a5b4325648d24b80a6190c770137e4bbbf3042a7cd1ebc02648e80e43ea 304502203557b8dcde2d059dfe607b65b68380acef9144fcd62372e96d951fc6c8af854a0221008b133e2d7818166c428af094eb5f0c872d0a47abc278f2aa21aef55dd8731601 3045022078006472eaef51aaae3538ef6d4346b91c182f2ecfe6523b6a82d897573a4ec9022100b7bf4e317a93a8abcd2c1b6c9c1e0dcf7505536df995b7f9210ce1ffc667d837 3045022100bd7aaad16e0248ee6544fd1a7dda31cd7f0e32984ab4abddce2b87a80882ee98022068a276b73a38b0c514a88a06637156d24592f618b51b01a68972846a32f47f51 3044022067949ba7adad70b89b6778b7e771b4cb7d2b5eb2685d711beaa94c229aea25070220349b95a5f497e9bdefe0c479bc5df5ad61a4a78c8f6bd54616f93dc1a5d439f9 304502201aa8e791f5ab2fb81bcd5ce78f02590b77660bc069d9f1f14fa3a4df31e923fd022100fc4864b13316f1662b9c7c7051060eedd68db9416ca904a77e5821fefe7a4534 3046022100b67cf66305576e7b80f51107e9f71600eea2737ee1193725b91ec6d38b2e099b022100db44803ab0e87d80025ef8b335ecc648cedc154bf8f8870ecaea46eb1263219d 3046022100d6829231c6511eb6fb81662eaa28eadc883d882825e0c1d709843814a04dc629022100c6d1612d2c415315cd0d73a0b77ad2a58d8d505d0af53c891315d1082dc89c0d 3046022100f617f16be0ec231ac6922454674399da782f430d97ccbc5a54465474f81b681b022100fe52d1d979a186f5e335c567ad2f1bec1ad9edb880051b840718b3ec4a3147af 304502202269923393044c189f856ad0e64d17df705c8437ffd5cfcda7fa407a44206824022100ce2d2385207e314024f9197921b9240ef8b57d0b950c3de9bcd392b19d64013c 30450221009af23a2793daa92037c74d05c3a7f47c241bab2dd0fc1e1aa233443e67d49a9c02207a33d584e01171cd37aa264cbe3541c062a9f11a87424428600a4350531b7ab0 3045022055c5ae3a49009319c28fc038b11d37f6b869c517f7ea3ea733945735040a35480221009b2fa731e1328bccdaf0726273ee18b8cba69e7b00c3e0784ee479b0bf76a857 3045022100d368d42a8757a3159e309f7450fcc54b94154f1f80a92516b621a687ec2dfaa402205641c3f825edb59ad1ba94a0580e4ba864125b2df6e7aeee120cdabd624817a2 30450220300b88035a27aeecd343de5d2b93815285e033fa817cf2e64d311d4a2ac7b978022100d483362aee6937d298cefa5c0c966cebc27420b35dd77e490be99db0f11252e6 3045022100f8495c1740bb65509ed6d120c13411c9f78dde20f0f4b1e5ef353330517e6419022003997ed923b929aca78e91e7b602e024568f864732126e2b2e99f79efa945958 304502210093991da3a83d2de60104d402d8f68bc80016f21a1c745bac516d4f0260ac8d78022068b52c96d989be3f45fef574c6fc7d0aaf9ed7fd847f7d8578fdf1241a0b3042 30450221008b9f46f9e34c58b5c9c73b39f0f88dbbf6f63e9ea1ff5b3475398d36847f180b02207ec1c22650f4c36cb4be455892040dba322d7c58afb46f4f09d743638ecedfc1 304402206a9915e03806453f59d5a4c7b59f8039a3266831d1e2ff768efd144c1e1d1735022022f7a88e2dd8a1468255f91e9aa1a9af6d15a4f44f0471b94e56490bb976cc19 3045022100ff04d6b5d4af96dd94c6de2ce58375722d4b7c19c3c7c6b47af716ca4191dd2d0220759d4ca2eb8da40264e08f5a2206a7a1b1976723c0ab2837c373a3329a187dc7 3045022100e1cb439408b1a22bf545438acabfb58227ed89f9a1846894d4dd6bcde15eccb202205f7629b63b76f0d0598f7a17fc3ddd172d01f28e5daac5d9eb3adbbceb9f6ee5 3045022001d119949239ede621e04054243cd9bdbef2a1e0f1565de434f13df2dd1d143c022100c7a358a879c3e23784ca493378ab3cacff07b50f5edcb239cf92653de6483c4c 30450221008fedfced9681aa0b3f463e19bacbc8ba995c176a09400601cc8a43e5b9b6932b022037279f491e0a25c45ef7d4eb6b393d10249e5df30ce24849bacdd403ab303e1e 304402201f73fb8cc1c575968176cd35c7c13efc813a537b781a3ad4159674ac3e6c863002204ead791b1e68a760622e3fac9eeda18b5abbb33347c3118fb40fc287dd090e3b 3045022049fe7608cb51bcf5ab9f2173a8e1287fd93f010e47a834ecd754a6da9f7175e802210086b878833414ac6fc82875d07267ca187ab7d29885d2c4c5fb8985b25529f815 3045022100df1113819eb3336c44f012da08cca500560951b28e996110b6cda5c5b7d5ebcc0220431b207c83f8ae5d3fcb0108f9ef03b680982f84ce0329167c6ccdb5a5efa598 30440220054c183166c0ca4b12ee979670aa826f603c62185c979f8eab720c622a9ff0e402206335872a078d1dd5b1e87286f35b990b3f15baf2690b83e4fda382ebf6cad67a 3045022100821474c0c777d459b75c0f74b51b6d6480351042d2e26daa81e11f2cf00ac5de02206462b0039a22c305eff7ee0b9f0f19679d1dac930adc95e10dcb94510fbdad48 304502207ecee95bb5b31a6559a66b2e7bce682ff49e434f73e870f2a6995c980a19d7c60221009692147581545ad3a26218cb6805c8f1e7f0bdc4a10ed36d7c107cb50f2e706f 304502207254cb53f2ce6d26bd3fe70a25bb738259893a1a8165e208615a3f8c14029b8a022100d7d7ea75022cf4b2f5943dfdc707a96bd9464d187c75878ad73623a50c184f10 3044022070adb64c20948035bf3a7e239b101a311fff157badb6ee9c7c41e869fd46c07b022042eb726038fe8491b6684578fad59db8e15e04897a3eafaca2024f20acc36a29 3046022100b88d282cfc79a9d8d0cddc1162fc17ee5d3733408e122084ddf4786d4a090ee7022100b9b0546c741779378a7465146e66798f1e32b05f26269ca5f2ae5f08c2351991 3046022100f21498f72088258dbb1a7db9af7d58e64d62f8799754aed73d0486b8660dffcb022100b4e57976ac1622e5252af33ef4c665d45ea44e0964facc7cdd0432029af147b6 30450221008628ec9e181841042b3dbcb73ea90b0d90c77c39bf35b05544099b53c403d42902204ae411306d81c02458d38c13098d9bc35bf0e287f47f3fe77653da401dca2fcb 3045022100f7dcb26ade680143c72531fffacb145b360f2c3361be0539c4433a0ce8b814000220605f1752c3f0605ffe176847e6f107e07cb70bbfd31bed098a610c1e3f9a5b02 3046022100d1d1d1bbf446b192b5c6eb9c0bff8c0ad54f33eb0cef9f1ca82a568a3695f708022100fdc3a0641b2ccfd403436922431d6b774591c792e72e49b03d88ff43baef9868 30460221009be53a5055966cbeb77182ec812d69f62f96722c3443296cf5db164797a4e4ee0221009a7bdfc8eab13bf012e7f0103c9a7fcb69cce7874e575bc52d2c3bb3cd1caa79 304502205a6ace690d6026aea3d113b5b5b4391f176d71595303d87a234df71c2223060b022100d4fd512acb85b54ba7cd07b6752ab490ea240fbfd1e27f9329972dbe56cbf216 3046022100d222bf91531b1165d9f599bd14b990cb194082803e53a2748c0aba4a446692e5022100df2376bb0f1edec5fe1a0abf7bf7d111004c871372da1b4269fbb9df90ea9109 3044022065d44c2a28f10a00a4aafc08642639f1cd2c4bc74809d7eec2c7d2a3200892e7022050182ea9a7db72ce34e8768f0a89f0fd6e1900534c52b00cf46f7eaffb8cc86a 304502207237e0ff748a14607f8a087c02424f796b1989e71ef2c91046de791cf04cfbaa022100f6ebbe041cdd4e78c38ef0484d32b935b81568e18ecd48d1002e37076ab8e0b0 30450220151f9ec47a207d1eed6438ee17f032957b4790c2fab5c141c7415592c4cd9cfa022100ca9b45ccf395a942d3c30e5705d8aac7f93571a1e3e405726a95f534d6536062 3044022051e9ef475635f65ccb4e69cabbc3fcc0c9f0f2a67a0b4101f2e8859730b46b800220010c3dc8b4bc241e4b59cbc22ac8ac1401a82ed654cda24be3e5ce05ca81b786 304502203032f4527a555317c6eb8d666d132e0423e7d936d8d249d83bf94bdcee259669022100e4a249826f8fbb2df3bb5ad618763db8a55731ac22d8b6d4c5df68c40218f2e0 304402201361baf68b0df67c477b27e129002a3c442b948d22ef338da144cf68586f8ea902205ae69545453b76cd578a3ca5f85903342ccbdfb6267cce62e39af9ebf9055dcb 3045022100d75b6252b828debc268f135d6dcd6f09edcc934eef27147cc36856dbcbe0ec1e02205e7646bd77d6c5a04acf28575064a9943ebfaed4de6af990ccbaf1b24e8e8a79 3045022100d17d1d60d31fc9aa4909bb1d7095f602efeaab4dc8371d4b07550adabfc3f8fe02205a473618d9691f72a7f2590a2d7fe7410844ad880cf25d9fb122dd5af536d895 3045022100d157f8b97d1288f0fe8f20391425ec9eb49e8e865f328c13877dce89e0c5ee97022024812501040519a66cc8a32d13f17f3428da3da17b9c2cdd3a9e31f2e86be655 3045022079cab428a1a7015269eba5da48da18358f088242dbd24da323a020cca20ad2aa022100a876f8e833df8ba1916031db48a5a090e9ed6d2db41aa511e63219b2ab0b889b 304402200848906f702114f942de89ff3bce7bce3c2d05b80dddfb5ecb627de9867055f4022050f216c44a5d279881b1ac589c6091fe7fc0102c1be6eadbc1bede68b9ed3b34 30450220088f8ee55a0c781ef2159489b0e846ab1423c9951b01320ce8469f48bd60c958022100d024f72b3c7cc71dbe17d7f4eeca40fdd6c5a46d6ccfb587fdc375f36c89357b 30440220497ff0ba79773c2994545850e175825b96d9fe56f458c3c067d51f8d0581b8b70220235636e874e9b4efcab0d30c76a6c6dfd104a5e20bd48aef7e7668254a7f6450 304502210089c3a445cc5624f8673b3fd46e630da56cd770faace0e73695f75c1ad3d9853a0220674d7596a20fcf8de16f948a7a545aa947fdf4afa8266d9a481f39b57a5523d5 3046022100f7477a53a03ef3d54f504ad58109ca1a2fab2ed06784af6ecbb5a3fa69865c9d022100adbf1f637603d67f74e16a5b62328960e52c42738d56b0f0e7ea172bb75fe335 3045022100a92b93e47d5a3bcb5e50719dd139783d7a6b4675da4f95542da80d2e904f0f5b022058e24d8da00392c817dbb03f2fb6c885196afd9425af5be422ec686ccd5ecac7 304502207938ffb96e8d76f14430cdc830906bdf8d23236358414702318afd6394f62ae0022100e2908552908f823a3c0301d0566d52b11be45f68b2771a64a84669fd02a3a582 3045022100eefc6ca5e81150431cabbd5d28acc69391e71f4129afd46420e8b9679e884b3602205f974979d217ac91f20b2cdd90b078d7d15808dba4bc4005cfd41ceda62f277b 304402203714195a8fca5aa14cb0e5576696c0228d781560d3b00a12ba56ba5f91faa8e902207b27c97284281222e9d075182a5940636e97c46e02ea668375bc625fb2de1175 304502204b9ad46c34834ff08c924abad44d064a0ff623cb21d39275d37186d1c7e4e29c022100abb06706663be025062c6ac6389e05c800d446800682c0cfb935838e6a830a22 304602210088e48279cc52c7439fe4a86e72755705b45e2aec3b6829c4d57fd0a0b9d4e3f70221008528c548a2ccea28f47f42eaad717a05fcd44fd849d086f5e59ceb373fc4d38d 3045022100e34858d7735477ad5e0212b26dcf4e382f3dba2b6350660f254ae83a0c80e948022034aa5cfa50bcf5b5d202e796749e8334b21f82df40e7b1f5405d0b8c90b90d32 3044022004874e73f62b5b77ebb617765d08f427dc6e7e18875fdccdd09440c00446e2ff022031c7b7b120432fe1e34ce5d639e1fd06cbdeeb9dc6120058b6df6ae505492804 304402205096425b985893b40b094ac7962b4ed25696aacb4abaaeb8ef9a51a45984e1bb022019319ebb5aff40a0013dd9bef3abb1bcbc5a6b596fe8f94617d491bc476328b3 304502202ab8f1d4e9a740343e3a32fff4c3ce78866664f647d099ad99514997279394ee022100c8c12b444ec9d852f2ab6d3c38d24ec81269c2874222e82078a97514600cab54 304402202131cd62b6a9a25f764943897553118fb9ed9dbaea9bca0da40f42e0c8687b6502206edbe219c82ccd74796c465be51f48ee0b54c57ea30e92da10eca145aadf4782 304502204240cc3fa02f78f75e4e061b224e55bfc4c047793f873c05181d49b82b06ec4a022100a6852c20d4662e59944c87567613d684b08196cbde5439716b02a3653b61c9a2 3045022054b90e859553ccef4f10160b81950c4267cc41dc734e8a24003294f9c8cc3ead022100929e1a7b7c804725bfaf2451611bab023d25ca33b6d47e75d609db992be7dd63 3046022100e7881e9661a593a217be8b2542218b75a5eb205afffcb3cc10437714a965a068022100be31924085d9da051d942f26d3447e1b70e17497514086721e7ad89af3b7ef0b 304502200b38a5424f5f067730fd3870044fc8121b8e3c4f385f1a8c95c32bf80af7fa6b022100e584351175abc62e55f8981646d1780e26fc253bea14dddb64a36925457f3a61 304402205d056e4d867758b56c2831e477c6e33b9f7ab725bb3185a668b346e19851990902203f8dc78aa79b262cb3172bbe252f34a92fe5c0921b487bf506142fc9a9ff3392 3045022100947a211c26fe85a2e0999ed6d9d6e0b946b8dcfd0f6b623cf2297c3c970181b5022064daf02f7b991b6fe51fbe03df70994ef4a5a5525adb739b9d998943d49605c1 304402204221e863e4157a74102cde58cd890fdb9a85b98246e1de74c629478bfb90d612022008fa024dafa18b410827bdd3e8bedb665884fd36e43550ac3a2eeb4ecc61a9be 304502202ee2dd568fa25b3608dc32d1737cc52b3c363bc7beeb0a090b84c94e187d4e25022100e6ebcb70d0c7f5f96646621ca3902602ad149367cc5a3f7fabb3e5843c2c87a9 3045022012baec3b0d51408dbf95d7784c8428fccb7daaee7da47394718dcbe1d30daf0d022100828dce04c1787067fcebe7861d618399c80c51d0f5b8a663dd66821a6b026e5b 30460221008477aa88ae86078b3dd31c18519d697e2319ca596da709ef623d804006f0879e022100a5e487bced67fd5b6f3158005155dc32bb79c15936cb656932bc67353951af5b 304402202d95bb3fe76803d67e3d8f6e011c963a69ded5c270de25f055b5e26d5e9c3632022012a0e3b15604276423575b5f852b6dd6134a6c9ea32a391c113cb493b3277f85 304402206c14eee7de30f01ba741435ca4471b752533efcb52bccdd50e1a6180b0343947022037923118911c9d1ec1fa3303513d761eefe6dd7516480bf1bc3d22fe3a0eeb90 3045022100abe691236a0ca42400a2e771d6bc6b298b75c660c149fc16418fd1bf836cff25022041fa1870492f8c2c31c07280a912c495d8836d16bad148c86a4fea6c9d73fe7d 304402202da4e235abb8dcfea134dfcb23f62f49ac4054015188f5208ffeb87981dae3a9022016b887011cc96c4a8599ab0bec3a8b3aa5e44c2a592c27299b87f894329e593a 3044022053b894d21e8b12f946b6863574b13fc86394e634d3996495c7d8090ce48066ff02201f3cfda315d4482a4775de6c93ce7492c27a2bc8a1d72180a8c43018ee3f2d92 304502210097ca7180405a017240f9138915f1462a1b3fe98a600e9948f19142376dd1483a02203b7e11b4eee105e7ad2deaa0a875eb9a64433a886198001d7fe17a22996aa8f7 30450220688eafd2816466d3c3d7ff05e694ec4d8146df3d1fc85524499970ce8837e6fd022100baa413196e005771d1b46c0570bce9542ab4880e62daded5663637d3ec361ff1 3046022100fe08201e3481598580ac99e794e49b357cf080b7381997c90f2596b1fe1c431e022100ba0dfaacc0e52a83061319ed6e6280354069396b4401f589edf8d85f89f4b7e4 304402207a26d2a9e10a9908aa02a4d68e74d23058568c5caf5ebe33c6c5a6257c5136e30220386981324967e48d7fb5fa07322fdd3d4b83291dd8a0413f5f04e0d54c0ce045 3045022100bbc230d15eeb556cdcbab617ca8277c2d3b0c97eda9eac7c2864616a03d8ad280220341212576e3ac289b0ed34a03bb8b8a5dd296c84d4c4430d0cc5abbaffe87c15 3044022075cf6a8733486b248ecb97a94fba2b7b677ffcfc32e162224668f43fef140e5402207c1e6d1117bf33265af06dc4b7514ab5060858210f5b3f903eb0d40fb21cb244 304502204912d6a6dbdc3fa152bd1749800ae0c3dffd2901806d53060d4577e5a477ecf6022100a18906c2d90f70eb9702a0bc86205511b3be9f1d4596d6812e48f8cebdabc84c 3045022100a3c73d49ae3e75837a57fc6611d8770b131b12b8e639e0de75b3f2dc816db871022071e3a41d0b901118d69a0a128e8152f2c57b8e8d2a91d1566047194101a68381 3046022100f95c6c9c1a8e02755e7a02b47934e4cfddedd2a9ab209f914d81c5523fc88356022100804c22b69d705552ae3b0c77d62d95bfb1315d51991eaaa21764659762bf3cef 3045022100a49518f7d096c84bd7394c1ab9f87dc65b133848782af8fff3d38db973a3f8dd022001178eb509c02df1574ed06a745992f18c1264f0aa8d2fbf002172585f75531d 304502206339ae55fb61773a159b400895b0d591380f17c538c27e6e4f3ca9fef95ba075022100b149603800ea80cb09ac9d5ccacf9deeb0e2a2c47dd97e6e6595c2d6917b58cc 3044022065adf9cb733530f2a91f43ecc31235609598f402861df9c6c08f41ddbefb2693022079b942f3135fa6d4113158610f7e50c2d12376bb18a242e88b0d2ca072577b30 304502210099bda92a510b92a9b7a05c5e28c500960c7106fe5eca73f4e67ac54a331b19f80220293cf21810bc8fcb8b5587e4ab63c6722a0775a4c17740d6c732e559a4aa340f 30450221008316ba916a60eff87535efe1d91ef8152e21158dd613ba16c05ab1af87ca3d7c02205aa69635b5bb333da590cc11d6ce05120ef0ae0d222772ba49f7f4b1e03cd099 30440220446561889f2b2f9c4e416aa448f3d173d2021793fd0be43e007421ed1b6e61570220300d8a62f10f1bfd75077afa749717c7d48bbf8c98ec07fa12c32106f3f327e8 3046022100a3d5a38c23a209366c527fefdb427cdf6f015809466b63aeaf558cf3fb0db5fb0221009428f2235586a7bca1b09f8a4888a35c9a1b185428237bfe512f8febca96271e 3045022100b792f030d3391a88fe3cdd65e46c5df5471c30c07a5bd43bd3937f2eb0a0b83d02205046e01d00f5525cf7bb082305d0e253b07ba48b19fc224d292dd398f67697de 304402204fe868db1572c192e0d2105083de4312dc2dfdba1eb9c873b8f7cbf84f648aef02205e52db6fac14389f8b5c205bf59dff6d1182b917484932376aae61bf087078f2 3044022011cf0d427d6634751c0a2b6ced680aef1fd17b4b0ea63ffddd1624354a4f6667022076ba32dcc6805741f6542ce136fc72c4dfcfd70e2e14617f0d612b6c189c2682 304402201a0a89c9746e4737c4346ba44c76b8d85035302724e38a33355737718b6d541b02200d624b4094b5360da6cec38286dd24f3c588d484eac518d8fc8d016675c7dd00 30450220689affb2f5ccfb0ab2fd1c232adfeb636c920f92a18fc96629c583ea8dcf7f960221009c06b9e346b1df7a024c1feae96003835ce5e6b0a54cc1066b670349b2241bdb 304402203d11d9b63164204274bd6ba799f46a850df3e816d32abcf2e22634b497f8f6ec0220744a470ffc58b99d072a793280c35553f9ef8f7e0b1fdb1a4cfd0b54775ce29d 3045022100d6cd469cf6227d562290fe27e3d6145cb3f1e2429de2e226e89c872423572ce70220546b31c8a0af23a93717d56b3146e6cf4838563ca449218a40ff0e745c030cb7 304502201718e7149091c0e5bf9b5eeb6e37a653ccff1a57f5ab34436357e5f8a22a8d8a02210083b77a682ce2f92400fe061b9ee4549853eb17eef9c78657b1c9f045badf9d6e 304402207ce312a9758322cfb2a7db01614cfc46a98f013821d7c5da34284eb53edcec0f022019a5e0cc068b3b69c0e49e334bee0a6b9bcec570c850d61bbbd29b7a2d27ed08 3044022038cd9a4804c8e6e1df47f333ab6e441c751c8be8d490e2a5cde9539d74bd37da02203d4b025ec237314d2343c2ee5425b2ce4b72593023e5e4d7c07ca398101f6688 3046022100b83e968de7997e242ebc8b6566ede814269d04680ee8e23191b481946f9794e6022100e89124d798cc88e89f8b1a3a9a949e50a7004876241ebb96c0e8c428ac471ff7 3045022051ebc6a51583d0c8ad048aead3f45dc006a0451c5fef9205ee66d36a8502517e022100d4a4f6486ce45d10975501774a58a8c1c276bc81e8eb69dd3454af5f0c3a1081 3046022100e87d64278283a5f47c5adee5f3dbdf1dfe1ad9b360e05c9f601b9ba56888e7150221008afb86cf7dc50e69e68dc051c314ff59fca1691d7d74731d7a482555ede7c087 304402206472cd97623a3e72bf4df5ebb837f41f92232fc02b0f5816ccc8db65532dc39102200dcdf40950eec21f8169dda9d3eafdb21de6aa670da186d1d5db10385c7e5122 30450220129000bf16ab691c5b5a2f56b816be45cef37abb9a085e7447b3d4c3ecf3be08022100d6eff03f500dbce40a1515cbf460a9949ab22c9f676bfe0229670eea5190e7fc 30450220784337a5528a9a44e295fc87a5f411086319ef7173b63476848216f8863d85cf022100e62069aa857dac39e0cc28b57fd5ab241fba4412491d87d445f4767b1f767de4 3044022030eef09b0fab27815c2c0b0a4d47d951f0df0ba9c7c8ee9381cf47914e95fc34022040390da4e514b9f0458c7f27cdd40e901f4e5c2035adade2fdc145a4d584b597 304602210088794ecb63145697ae93ff3db39855732ece55b57a188ee34f667186719dc002022100a4a30bfbb11962537f8bdb12a3f3f91ea8d78df11e10206f7f28168f1ee49330 30450220157cd55616f1d456ccec4944334149abe8a838490ff9f0a67ee315f61e0bc62e022100ddad03acb26641fddd341f3284ebe3cae3f381d43a743bb9243f7db86c73d6dc 304402205a5c7e5c0470e0500a3b59e9ad4fa925d3ccc43eadac45040ea7fe0898936f430220261993985d1cd1bc6c9fa987900b67267118a7b772259787aed879ebae0a7ea3 30460221009f5588f7954260f6a752bf4b574d5046c477257011bc5a6be12e723f0cc3ddf302210086ecfebef84f2900c2f6e21e898c12fec36a81ba0cab8a5d5140a9a534acc001 304502203e2f6ad50997062b477d180cdda112af0c8e6e2b45c77913df7becedd8cec975022100c40ee9336ad0b50ed24fe669b4d17550c5d1e63c3ea0a546b7d426a7f8f7c9f6 3046022100a066a77d6fbad428a30537ab7fc2d5acb82039822e3f25cfbc0dd6e1d6aafb42022100e941b374db9bb86c791e065d64afbec150e036896b21f4754fa0d63953477139 3045022100fba2cf56f8969a167d5088717e7f3ff1d929e7c6696a06d6c71057a0a7b996bd022007e1a88048aea2de7ea37b3ea6444289d982934ac2e06f10f4687d4d625f44b3 30460221009825f062399ad195269cc5ecf3e2414088273165e351081e16fa1685103b2dd40221008aabb7ffdafff2cced897298ec597cffed223705a17a25dd8211713150538fe1 30450221008cd74f1f5f949d26a3aa9ee930edc613f9faa35fd9d6d744de46434532a65c10022059dd232cd0214aa06ebc6f74dc04f68a1636253931eda2420d00993f7a93e665 30450221009f55c574d01fbfe6997ba746621f6bb7dd7017bb6b0c6e1fce9b8c0b567699f8022076083b997afcfa5ba7a4bd3ac3580c4baacbfb362147dc670260be6f402a8cfc 30440220544e060023b220458f0b566e54058f5d2bc98457f8d5c423aed7a3d0ed53b23002200952b1ebfa7f812c52c87f6d2863e2b8d208539a719799ea11e24f23ad826f2e 30450221009e71bbdc17d17898ef9fb53b26a89e3569215831f43603d0e1d3ba7acbdcc42b02204ac2ab685facf036e43bea95289cb9d6f3e3a170e1db007db5f4ac22a8452bdd 3045022100e4667fce5d78ce7cd2179bdccb5dedd4f25b9facfb42b0bf57d3c9f8ae08c4d2022018a0cf8feeaaa6a7531f8e4220e37ab7406c3adcc193745681a6cea1bb7e61a2 30460221009a288a443f8bce156a50d2df5d7709f286e2d2acbc7a739be27e03bd2cdfc19f022100f22a53a1bc9a1d74de39fb3422c7c56b8fbf75b0cf7b135e38291717c3653a7a 304402200cae53bd84e2a0a33d46e7979befb29d2fc730acea7da662601ebb933f3e06b402201a7c3a7c8d1797dd3aa283ff7fc05ac83a31f4c812f8455638dc084d06f837b9 3046022100b8c595a5b3e24faf504511add4351af2aee9cbfd31b26db12db78e6231f3f4b4022100c76893b3e603c64b308ec3f768b241fb9355ed419864c904e975c358fc3a3fef 30440220173a62864f78cfe9650bdf0f0d68ade53f1351de700f847744b3a685cefa01b802203d2030979dcbf7c071a37c0fe936e8373e898371048d9fc9f7d8d975d0a5736c 3046022100d0f325f6327204a44b16208610eb7d3f7c9643699f8e71a1772a9a0932bb12ac022100a701c61b3dfeef96c43622390c6a1fb5ef95bd36d6ff439dc29436e2da387381 304402202e73d22abfdd6febd25bac3e3ab6ce09e3564173eeadaad1953cc56498f3943b02207dcc152107c8de07a51176f5c83cb62df5378e68eb8b63da6db29b51db6a91f8 3045022100e4f7b7eb8b963be21b916a35130d79a5a18b159bcb59e926f36317b1dd85bd0a02205f3ebd5beb85fcf2b70fbe5f1c3b7de460282c309fe9bf9b3e0e8303cb239770 3046022100fd64fdab99acd0a19aa1d056343a048fead7c2e0346e9467f07ed564eb3274ff02210080c89fe77e86b24662e950df5c23a4ea7cc7ba5ade3520d58ff3a8b042cf553c 304502210097f9c3f5b551f59c0762b2cb5f7b4e71ac62b25d4817434374bf10a6800a98cd02205be9892c590cdc5dd024f182b003a74705e33b3d09bace3ca969aa3b4b122f89 304402201400183c62bd7f1b365b3770b79b7734b30d93e8a0d2b545976c90dc17e603ed022077451379bf11571a23d76e80e0d22287a31221df772f6380792d58ddb9753af3 3045022051bcba68ae314f1350be849a027de89053db542a8efada0a18c6ea8a2edd5efa022100940bc05bb630218bdf303d3868790a0ac644a81efdc5d6b7a3147442afefac7a 304502200fa990a979baa569606b580ac43128cb93f58e250b090c67e3d18975bdf45f680221009df416f4c0495d2ad1f3574f03a9e844bd3a284f99e22056731e86867a3e5f21 304402201507e2223ed2344260da7d047f8773766fdd398e826d43b844c2211b279879870220495177da553f82acfdbd28d75050f59424a9f8f617b177e6f37f525a16a43ec6 3046022100c147c38d599eb16a07c5010cec063a70f278403d96b885d05cfe3c703866485b022100f37ff50b4292da602f0762a07983366f648b7abd0b8ad68b78bbf9726e289b31 304502205223ae309d5e5c7abbf101af1d889b3f8774beda9db7323b4c55ea6e963f21d902210087e5108876dfe9242e51a8de8e98ea993f11c5136984d2f598166e909d4564b9 30450221009dde0c6d5d7522deaec2ae4aa43857c1a7d9b319c100fce1e2402e423194568602200eaa80b1d960c2bf207c6640a8d83b4ae31cca400eb5f97ca61cf7b36e9d0b20 304502201dc5283a09c2f12b19b8d6fb09b8190e299c8bf47a60c91dc28bb1d18b70b89f022100f3747e9ec534bcdc7640ac8f823e08b79b4c73a0a83a7d61ee89f272327f0165 30440220310bab94eafa447f23c48a9af6f17809dcfb5ff69ed777ced8d03749e4e4f898022020d65a291c35a8a360b3469c9527a4c8b7ae7d57c2cc78ee2773da7e1ef42c71 3046022100da7384358391e8d222504cce6b526aaf1928b399f259ac5ae336c8b285496ff6022100f564190048bcfd6a1fcfbb8ccf5caf64af112655f6aa951b263a5ed8fd0ac89f 304402203a2c67c5cf2e9f041c7a7a0d567622f09afd7fae2b15b91c8edb86d121ec254002203349fba7bcff10f947816827d9721fe4d8171d780dd9ef246fa76a924c428d54 3046022100ca48beece0708badf7b9dee00c9e99efd57f7f6ec346e735a55672c93a6f94fb022100f6da25feb6c00199b8c811b2742acc6eaa7df027c4ba492b1749d535bde4af5a 304402204c4bb0bffcce2e524b7215b6d2d8660b1ccf577230a87c024ba6e3afe26ee7fd022052308c2ee4a5ee57e89a7cbfdd9aff9a5498acf49e727b267b8f71d038d91f18 3045022100e77183affcbc2a06a659f3981351e1093a94c4533823bbb42b2e9ae47bdfae9e0220196c9a2e16c0970355c8b35d7dbfe6af48796cd21e409e014096de73fa24fbfe 3046022100bd8b355adf809cae2c415df1b26eb41d1e58a294be3370950e243afa288c6eae022100e99ce8b36879b635668ecc390ab9197cb9b3363e2123bb021a4dc80bd22f5e07 3046022100c8ef5877dd7411f705f82e6e40f33516923dc7531038b1fb6a83513eb1ec2cc5022100820db51a7c3ef930c0aaf2ef7a326874121896d1deb6b35fdce915ca6bf2277e 3045022100c2533e4889d5ba7e56d3dd2a1d0602baf7453d1713f5b22bd4e0f13b01b7a04f02204305ebee9a45f2e977c278cbc75b72a6ffe41673dd2bbc9503af0452168138e8 3045022044eb3ff8e172a7a09ef54958c8e535489052b775ea101990fcec17e142a30d68022100ebdd1b9a56907241d6e0a71bdba66bc1d1067a7d98f64767f579ddc0cfed514e 3046022100eece9cb515ee7dc874a8b0cad48bb71bc397f37a325023bd47ca38ae4b604fb40221008f973d6e4c5fa7320a1a1232222f2f0b32319c42184626aad94064c80b1119c5 3046022100d9f77c4b13e4d6d6703288eeff72007a5bef76e7346cf3e1cbd86708ac04970a022100de4b309abd8e77609f894155376c6e4ee4307f775bc98c9667b1a73aa51f032e 3044021f5540a2dbc17248b414dc4191c5fd9013757d8d776cdee864304e9357736033022100ddd6380f41eee96d76011e28974a6563e06d8a9dc4937f024c7f25fa04367003 304502210080a833f75ea06ff3303ebe17288e53c2a5a4837c96860e669db4b7ece17cabf0022037955be6d17bb3ddaccf9eca0a62743fc28b462175718ded239420f9183580fa 304402201a6051342502f69bd0a2c9144d0ec71d931b035198b2a813912061b3ad456aaa02206b7a5ecfe0af9960a7ecf40142f3a1c1b1c5b3c49826f7df45089969bbc84e22 30460221008c56611f62030032161be13fe7d4b24fa19b01033f1cf3c1310682f1afa66591022100f15bd331ee3fdb5dc7062a5b6ec40403cfa700a3703cc352a916c3b1927b4325 30450220328dcefddd69797ca8331082a9644ea7a9f14449c8ba74d8a3fd73885d11fd30022100fb68759782b3e7fba9001d0c429def200f9b7e34d0bfa3fcc65d5414eb3a9ecf 304402205410e18bc1b0c5027578860ff63250d1d60fa9340b53e552250e094301c5a4d202201ac8eea82208b749502a3999b3040a78ee0e60b868287eb01ae3d6aec4cef9d9 3046022100871675ffc44839e609ff85b42c7fdf3f376734301b3ec25f69e93506495d52f80221009ddf1167d6f838306fde9f75fbaccfcca6d24e60cd1cf2df200afc2d2005a826 30460221008b6d5fe5c7f0911dd08a008f8e1c9c7944319ddf1ead8e674a84f5f4b53cc4dd022100c9e69f231a6e57caa5c0aedb73bcae617f8531a3865239755dab73f6766012db 3046022100bc0802909b963c369915e08b72cc379e6dd372090130fc3cce99a9ab8590c88a022100ab02a5bcd1f05cdff38b44b3a15e138911a0561d36d0b600f773c5203dce80cd 3046022100dbbf2c298b9346176e6671e1f31640a3c983a23afd6587e8125e3f5b4989d52c022100b80c3db87180f03ba8c191434f5e2d79a3a9268b7b3fd0ef4dafa6f4d8750c15 3046022100896b8c1998b1961bd1f93d369a6359e930c9a7bc0347e812a9fee53c8ff4f0d80221008ad80555ba5178b9cdfed8cfdb632999c801971e96068c5f885d780b1ce86126 30450220353d1217a6a34bb064a079cda20b5275cccaa37f4f71259df328c1a5d8092dbb022100e44475560e9d77593648eaa83b3d6ed5a80d8c6f883d9c9674026c1fe629b56f 3046022100ea977c50619866daea8e830cf6d9100b0683df21dd0172b06b012b21ec4968b0022100d4e3bb25eabd711805a18c5a60cf551c70a5a04fe68e10fcf1f37371e89d5d21 3045022100bc0d0c4c1c150f18f59da4a7371386652d710f6ffa345c162accbcc784f38bdb022067f0fb1ea5f20846a7d8ecb86a837476c26df7b36f39e131572730a6f799d5fc 3046022100b2880178a1960ccb381712fab31b9849d0fa6d9126d72b701132c7de8044323f02210093d728936d1388bb1f593769e8ee3d8b1cf012a67ccd2d3b0c2b27f3f27903d0 3045022100ec9851bdfb9a8726dad1fdeef8ceb079bbff113b468435cc05de75d2f64e5b57022058c6bbe49e72fe2b736f1909d9c015eb877ec4e5a4c76abc135b52be3e5d76fb 3046022100e1389fcb1ffdf6b91f375e1b80a2a9ca3a2f5868f7ba17d9796c4e4e6e2a75f4022100bb2f5150fc9aaa0b9369a10aad04098def8c0820d7b02785810e8da3f7505e40 3045022100adab11be8c4bd8db7dc0ad8827c09a7e7b946c1abaab0625b902b300169aede30220285c19e6a1c0c3f3b2c4a5a8c329b1cdbfe2cd1bf8b30ee990754b9701d4cf08 304502201bae5654bfdc8bdfde35f59343db779e3194412a894addef15770159faffea51022100d61fad60774cf7e4e347c11d797802e3bac4899686a70c305247d124c7b7129d 3044022078fecd40c5d7847d311f2f50dfb784b3349df86777769059ea1e237ea0136a5c022063a5457385e8f41abe608df35685d3a1da461e0d3c9a0037ec47f81d93bfdfed 304502200f76fd846fc52410169d46d2a947fa93bbbf666f656d6d801bd9c554ca6c040c022100e2a92d945e2be0d70e3cb425de0b79c2f5322d6961207a1b5b9730aa293efd57 3046022100ec6f9d4435e2327431ea95c8c0a810cb5009c953d406847175922525adde35220221008ce0a20160809b1172f614ed2d9f7cfd66070bc3950380fb3c2fff380920e054 304402204390c4a2bc5a4bccb1c09a74fc988b55b3b85a932b6c288a8189c88a501a068e02207f92c612ea04195ee2a6c7715129c6c76fcbcd939aac355c0ffbd920b642a3c9 30440220600b99e90c2753d867940cc3cf21b56c9c4103ae67b23f0bf6930b2a4817f33202205408c86e4a597685f7115c24b2f21e50493fac926a9f52c54c5335073ff3269e 3044022064f9672c251a45bb9708dfa971cf849cb8a3c783289995ba6496f6124e15b93002207d7d84581567f004c813ed5518d3751f2d018fd32c486133d338a1dae310cc0c 3045022029ddd071f2350d9371bf7342a49869cf0e9d776e2d5ff7b7d0de02b60284bf63022100d20234f76e970e6ab3c229297a2cef1551ba19f6c4c07abd306dc39621137294 30450221008d03bce851afa9488f670dd315dceaf191feb8460fa3ab4649140eaa6a72769a02205d6c6280b31bcab0cd182126d708e6834a7ad18440e9fa48df2061d5efaa0b74 3045022100ea2540e2580d0adc36bab2ea97e5c708374bcaf9a4738b1766698770a236db3d02201e10aad378e97f8e7c3ae1d84faed97e065be142c2a4ff520ec17cd410eaa2fa 3045022100c2fea467eb4f9cdce5c2497d1fefce9fa35a1ae94789b6218acc94c1d6b6823e0220206c682d021d9526520bf7a61c289baf5b0fca78f6efd04ae31e8555ac5ddc05 3045022100939739b6a50cd38d72cc76a8f99a399098ae98f821a09fcfa3e28413d4470abc022003e112e6bd85661fc685a83ec9114cddfc60b73583d9f86668ea0da2e6474814 3045022100a6eb877e0039be5fae67d993979c48dd9fa4821515388d92a7f6ba056575e6c102201779f2b1a962654dffd94ade8d7126eb497256394838de00a4d7277596353da7 3045022012b277cc74ecf751345e2154d73e70f399de6b7f5d608057476471238051a286022100b7ac1bc3d5ebe4372afba937e168796a1a9c3a6af172d84ba59f1000a2efd921 3046022100befef2891d8ab95a93a81e410e64875858a768a027ca99f7c005d4588b2dc915022100b8fe59b06a04495f691d685b0f767d987d3b30610d48e26f9b4c4e7c347338b3 3045022061c1d997c550573fe00928cbbf7ca1fb43a7373d163c342afac2298ff2882bed022100db40412c6c4906c15edb8e8330460020b1bc090281514130e246c0d5bcce3e43 3046022100d3c0898921d08d88480f514dd180be7633cb1c3946e41429162d3987992c0e1c02210092c0e5452e110f93ebfdcea82250c7d35a7b8f14552c6613ad437ee2f168e59a 3045022076bbb1780809dc930b983e43c517503b1ad12350c126e443c2aa331ad47ff70e0221008ebc89278c7a89df291e6426afa4b44180ac1523ced131b43fc10d04a8cfe5a3 3045022100b34d116d8786cbbb5b7ef3e42ca718fcc1eab635fe4831eb62a24171c037bf7202201844e72c2733ba91e82127a5a6fa21aff68ab8c6c8877103be86456ae4f63b48 304502200ca4b607f58411215e2df0bcf31664f9dc0a55f6cab1a3de2952c5fbf0f30b01022100937f30dbd3e87f23af97be187b0ea0292c7d76b40957425aad1c6e6ee1d30830 30440220706e0e829aca981d4488a0070da49558dcc99be34c08aaa378755d391ad0fbb00220387d719a0919d4404d07f02eab51914f5afcf3153c1c4d40c01051dfb2e5b22e 304602210098da2a97dfbdead58c2e2a23b39371630face2eb9d8804fcb9e70613d6a01560022100d0a59af2e6f13733e50adacf04e2112e9e1d76782ecf898abfaaf95796408b6e 30440220125c723c861ff9f52c6e1146a66e7b579dc66e6b2d9aba8031cd6d6f562d23340220639fc768716b1462a0dd3ac66e321261af8d8267bb9346ff9923ae95b14d4b02 3044022019c7ac37765a63200de63602f28b21d20823991aa293b1aaad63d19614daf7280220424c2a35407745c980f82ffc5a4ff8f03dc085540da769f24314e7910b1ecfa3 30450221008db31265b7afd36584a87d6ba33f0bd85b798a14bbba4b87ad3b8e2df54389a102203554b9c4f872c4b43355945df402e5a94544a9e4631517c841f7f39bbdab33ed 3045022100c63dcde81d6b2a7f59bd80e6cce8460e63321184c50f68b5d771d6f74cac4c40022007356ebe3990079b0c31c6d2d292c7c8b44cf7c0c3e38ca62d0e5963a35993a3 304502201e5580abf449b863fa077a500a2a596f26f533d48c2c0a7e9ec3555010567f59022100fceefe5babc681a665c9bf03281fc9ceca1cfb384d9bcd0b96bc83e78c03d856 304402202745aecdb7d733c7fdfa34fdcc0c11a5e77cd85e16955d83b2f4470cab10202202206ac87b1f2c574362ca6c3b4c1f9b55abca649153d9a38ee3f16582bec0026914 304402205a4fd07cd22214b633933e205879ce20af650e4784e9222956628762c09b0b4702200d60160fd053840defbb70eb536ed0c882e387a9cbb9a0879bb6b9110acddc8a 3044022066441f498ec7ff638ceae09efa5b4031c147694825b4be1d88027813fe011a6502207378bf32639278b296887c9578d4c9b23a2900fdb81496666d4623356f83f0e2 3045022076b89d964fb8edaa82a7709c24407b91b51974b765263d43248767a54299b552022100b72fb2786281c09cbe6be2594d2335a3cb478b3f0558d2af7e0d3d39b68d7c49 3044022100e104f17799eed3691d317e6e91df97456114ede4bfa079867a409e9e71abf92c021f4966d31f06e7217e08228ea9ff50ed6a86c8a69eed8f24185129d9f2f42390 304402205c5c5c06250f8effe1ed9937a1de68a1c7741d256ef3812bb69bade482c19673022052e88a96114267d0acf89a6bd899e30a5f67709c1eab49477b3d8af840f7fcd4 304502210099ae358d16326737d8549e4c3a4b9e46831f6494e6cbcd6512a2339d9416497102207511d90602ae48e605f8ac09faba771edf064ba390718d5f612f8546a33443bd 3044022012f994a6861986a32cdf99a0ee62e1b87ed131a51e7301b95135d31987b379fe02201f3ce634b5c0ea81b73fde43774478d576da4cceef84eb663553a19eb30beef5 30440220437ad74810b594f1a3fd9f0bb52ac62ea0e1046b0763fb9111c7365c3778a9e702205d91f4e4d3e5b255bbed108acaa9d4bbf9a24b41ce7bebc100b6b96bdf755907 3044022008ca76432fc5ee122fa3cc3a2223ede1053c55243c4d827d587d78fbbf07f8ee02206dca82d328945b62873097c1f4a5268cfbcbe2b63d80eed6242fe6519ac7e095 3046022100c67d1de4d717b48025de44efb10af1275309933367239a1e90b177600fd05e0e022100d820a7fab6c26b9cf5e468274cf24fd015012f0852f0cdf7d3e78b4f3b8740ab 3046022100aa56d2379712f9aa63e1097a03213b6f28f886269dcaa0207a845b03a0bef923022100f4b2af05fb6ef6ccd2c9e6178d01705c131810878c6bbf5b6789c477ba4669b6 30450220623fc603dd74229c5ff174e64d67c2575453edd722432a9cbe2d02f5d7a17674022100867eee04ae7b7440f118c27823a4ee04a47b878f02f4ce3b2100c80d92c56b21 3045022100ed66a9d6b81926511cacb242600b50de304e38aac83c9c8c4d1a9bf0b179623a0220029f9bcad80b088f25591b2c5ef0319cc77855256427f19ccdcf9ebb977d88dc 3045022071358087037fd30f4c1e4802db89a29f5727ba1cfdd56eeaefe575193e9b6e91022100f3f64663223f2d53bc57bee86c1b429fb40f9c95c14d7f209c776fc92fa07d36 30440220038313abff4adb0d9764c601f6f013eb8d1f146268761e9cfa70465b87e5596e02200fa436177f7ef2246941bdf24957674c9c5ba71df6c49ecd3d62828f974f8a36 3046022100b195c309ea9ba96c71908080676c6d764e10fda672d563d8b83abef2d71153c6022100bd30e6a317ef4c1ad63b9c77eb763dbc04f71325263c066b342cc8c3bf20420c 3046022100e2001c505d225c1f546a28aee11ea1e2d61bb166abc9a0cb859d401605cfc89b022100998fea7bb713f5478aaf82abb6878bb07b27ce52c89906d6d6371201db741860 304502210082368a69b518a908fa166e4f352b158c5cdd8b5cf798d1627479d5d38a7c8180022076f723110a3de1a23cee3a7cb175c5cc47c9e4075d9ae62c94fcbd156b6874fa 3046022100a20f906e27a9f62f08dfafc1b043f1ed28899ba82f9aebbae92ee263bcc633af022100e99d742b6ef7df5e6ec77a94c7de682dd30f4bb07125df96ac7ab2ba707a2cdb 3045022100aef90e405f3cc12a76610e7a0ccdf4095a3eedd5c88f1d21683c8cbdf46754b002202e7de41a5eff852b48c8abe5cca05dfeab66ddfa096d1d5d141edb627c14484e 30450220122cfcb25bb76920ac14604df274877936d37378f869fd32369da20345cc07b2022100aeed626ea0ff6cb6f961242735f2b7630a6ac440f45a2a2e36c4a560e42c0ad6 3046022100d95e0b171f13f45cc898a215927f5db557aa87db17aa4897834c0a6d58d83a14022100cf41450b1b513efdcba6c48d8255e0f2eba5b41ce7f6e17eadc488df1f6ed33f 3045022100a5fd0e2fa4b01fbeebcccc795e2ed9622e7e8cd4274f248fb4d4d1952e7d210202203e8308132251adf115650137facacb1b5913f0c7676feff2bdd6dbb52b29ff31 3046022100d12e68da40d9aef921087a1b6b790b7e8985c34b8dcaff3911913c5ceab34810022100d1c795b596d0b5918f1c1c37679d14bb85d0aa77588b0129c95a3bc5a0879280 304402204e1b531e4cf93e90d99afc50bd64c709d6f951e6a6ebb61e3f1525f94eeb98360220108d560f37ee4929a54b33988fc35ffe40e57fad680779c217473c5bfe1d380d 3045022100e7d5ec425ec1b25c401486583c6b682a3427f9a5df3550f85707aac08afddca402207afd58ba53de983bf04978140e97b348e70c2f2f2be5b32927846acd0be26149 3046022100f36ad03f050bffe9d9548d9641b0b34aa4d2db4c44f8ad622ed2f4d46f6061e6022100d3ba1bae373128d22f24947ec8ecc50e55d72997e86a386f7421a65dc8cb63bd 304502205c2bc88ba81ca0e07993fc870a4d929e29670435ca2132206f429b515ba359440221009734c7b8fc56f5dc6a0ffcc0607f564d2d439ed25f0c007a58c1c1609dc7be2a 3044022077e588ba2f81c2f24dcf59c6976684abbfc8bce50aca911bcaa0122592ad006602200562dc36309f73e5ecf520f5746e5271d5c9a89d90e02e1a949d656abdf855f7 304402205878f2f7420f62c1ed3f231f9e224a2ee3a1c78243f2e7078e371bf3e5c874480220116efa9248289a96a267ad281b5b769bfc93ac5d6c387b828a0effc11c6b0d16 3045022100aa04592dc290e36a59877c4620369903a9f884c566eefca52f8e6e6289f4686702201a0a2ad1dd4b655bc4abe3dfb8a33c8494fa6cedc750c074fbd91ce87ade6170 30450221008b5986321cfb3eb4e8b4960369443b2f8b882edd2f362039d778979ddb8f12e102200f6247ecbd6fb4e69a6c113b00d42cb981f7f17317492557ba93ff88e9b1ac10 30440220511033fb75e6c810627c4167c3f9f4b12e22a0f77951e39f909b9132f39b4f7402205e56d4d47f280773a16bf64a789dbce7e35840e3604a6554d1e0ad699ff9034f 3044022010b06355d12e1970ad7153452cfb14d68084426adf745e8cc263b61a805710f502207158b771a98b8abe82e2832badd5e878590fea3486525a15f1b1a01736f2492f 304602210092c1c5042486a13efe1943f97fddc758bcad4596aad85bd432d0291fbc30b44e0221008067351f9d968c843866f4d234a2553720138bd3a178130180f21c2d08c77189 304502201aa089d5cf244050e307760cc32979dd67c4718b8193311bb2d22a3392459fad02210084faeab5b08f8d7059dfefcf5ae2ae222cd34d3419d1e535fc301e3c54d6f20c 30440220474c1611fbcb30b0b87b674f5c67642c0a6c47e5830a955056642849fffdf285022021d3914dbec70d6d5f331173d22a1bdb3f3d141044cc916b96aee26837a18b8b 304402207691d6e3f4970f4523ee2793f1c69f98693588cf360b8bb9db67ee27b8bf6bfa022028c1f9ada2b72517eaa743870019eab691a16c120b5a33ecc8457623315653fd 3045022100fd05068468aac4c1fafc46d25fa933583ed8778efedcc7bd08a354423d9c74bc0220317974934971f254e1609bf0f7117e0d6a4ce7c97d37039fb893515b07998890 3046022100b89d2886f71bb2e64b4e4adf643d5da0a44ab2397d847cdc979c744257e67d0a022100b6ba7736cc9733bd3fa49afa1be754469074d471e63c7936d4603afc5a666158 30460221009387167e2def1f09134ab2b13aa5293865e43d7858f292c61b8271d535027760022100ec7b425614c1cb8effd75d48abafb928c95a976728a8c7040d2ce70d03035a31 30460221008a069d3dcb6e947fad2cf655dec44d95bcaf42bc0fbc9bffba2f47ee39058d22022100969c48f9b4b0c332a8ab23ce0c3c130f3e0ad7d1be2b6af302eea5449871db11 3045022100ad1c012e6e7be248d2a37ab13ee3b5c02b628d863efe2b9de37bbbe1e622ebb60220072fd4ffc6745cc50250dd4b3fe60c9a9a68ab75e78096b493905dfc4a78783c 3046022100ef03fe589433a03acfe3a8e889fdf5c5e84ec6838a1f94e265daac791efdbf15022100b0488f9edafb48f9d4e8dd80271b507cf726f68f81e63dff3162d5309e8bb835 3046022100d7c349a99748595062ceb33732a651367f9d56239523f33d1521c45aaf9a33ed022100c2c5b9b004429536c73209bddd9a4e78e886ea796db4cc4fa97301b7b15f0348 3045022018a57e7e351057771f845cd2d3ce48c5511a9b6b9d075d2ddbfc93dbd4fc18e702210091390602071d3237915dd294c716a106ab0c71c3f71f1574ed14a8ad770c063c 3045022100da88280bdf9f300bcacb6aa1ca6086d535e9339016d2a9a947e9d88a84f3792402206774709c28ad9279c209f763cd55a22b389add7fc53ebf5c6434e5a94fc70e36 30450220739f8ad752d71ef64a9a8a0911632a5cbbc6fb36086da13c4d1fd3e780d0ead702210091c8848bbad3c7d2061b8e407f08bf5474b8a0e48c9fea7f7a795c577976a945 3044022010482ffa42e801faad288468885ae48db26c633b585a31b2f33a47bed8bd911b02201e0136d9beffd5147077e9159e0d63265960c61cc10168d69e206fae2d154b29 3044022055de6c285d386734b234dd0e393154d86210f01deb23da5862eab0b95aa5da2302206d9a7a8c3820163fceecc00ed444c48a4642d09ff8ab58aa3efed28fa11cb580 30440220665c8d687efa6333e07d4a8c2e0229c4cec8aa6a5ec5f3609a592b6bc5edfb6302203c319ddb55abf394807b60d4471fbcf677d4fc06b55cc9a7c051806a8a6bff91 3045022100f1bac9962e5bf731e6d6c8b0b63eea85f181217fa4e8e64bff51eba403be721e022018ef3fab2a6d5e1a54d54d38967719a9e42401c4d5becb10a6cb92503e322b98 3045022100a27f6f8e3685e44d1a76baf01dd9f6aa0cb326ccdbb703762c2991af24299dad022015c5e417e183999458cbaaffbf137c9ca281063bca02de7fbb5c0130e2f546f7 3045022100a7157801655941583103d48407f2bd02693dfc4370cc19df173b77ef3ae01f5a02204dc98ca67eb0bde227bb1984c0cfeb8a3e7876d219327ff157fbaba00a55d20f 304402206f890d59a87211b48c5a63c1fa47bc22f29d51c873d0a052e6fec276e82492f902207f5aaefea4decf25f5bc24e97eb28c64fd9ef3c47933bb13fb965131874a61a4 304402206dccf3e61ea38260f440cb5191204f194823900f72fa2508d0a6cc32881192a8022031a83b6ceb962d9723957cd3bc2129728b9920bf25e519b69e3e3b10080079e1 304402206e664721c403558ef4bbb39640db4faa26a63d05d5338d85aa804193db3ddfac022021f503cece5f50e53688e85e31441a920c49fc79d3ac1ecc4cfbc631d3a186b9 304402206df12176c38dd5d4053cb042bc070e525e3580552dc52454d29ceb8ff0d703de02202383e32de23f2b7f50daaaa5de826b7a78457f6150207a648d922d25421cca79 30440220455f8ad3977ba599e8cce2b04b2380edcc07a2aa5a217db84ca443e7cd9928bf02206998129662d4b31810cdacf923376fc66ad4c1d5656b0e1925e57c996ed2318f 30450220201ebe76b5463226b6e94984e25039604a12e849873573d12eaff024976d4aab022100cf1f91746e4cdf6ac0d317b4af1b3aeb6c9095f52da4b44fcbc80dfed78ad597 3046022100fa6fa9466baaa35f8207e0f46f018f60837f5718eab78579cb673d45838b2f74022100ff808263aeea9432f5dc5d94464d6a9e824e8977cbdebbbd8e5808ccf14d47db 3045022100e5361e76811a3a42c6edfb8ff3a67b2548aa819081bf89a3e1d83f3b0cc942cf0220452a4f96d3bed55594012d716199693ec20bbba680d87a7ac3ede25fe803799f 3044022039c6c241f2c5bdfd3632e9864927fed7483ce3222d078582625ef271ca2bbeac022039e1ff546ad655d6fadd3b4edf4030da826ceec2590e74542649bab433aacf7c 3045022100cc1e2164ed48d1ce2cab6bec25a08d5280600015bec8d895d01882033df3d7b50220351e08cd696b868785857b835f9357702b94491eb3d8980cc2aaedabb187d467 30440220264afd13d2fa91936b15c6ef8d3aa6b71f26c3f9356997201d8ec173bc0b1582022077def0374fb94d96809f322ac4cfbfb0cd4bfcad830d45cc120a9d3c4c91b67c 3044022011f81c0b1686443440751938d44b6081ce2a40bb54d1dee22a2a6a66b19c9f76022048bdfee3693d5691d1af51068e2d99f92a8a15eb13d6f840a5d6d88aca04f34e 304502210085c6c45077714783c5e9249dde38b4ed31cf6e00b1bf83d623ef768b2aec7f23022070db81f3d5bb6a0efd11d3c19ea790b01b143ed3a698ab0b20cd4c5e307d7bfa 3045022100f8f98e2bdc9ef9260bd51d3c76e3410656ff74075b8dab3ad9ec4a838548e49302206f6b33f7911347839455e42e955d792be4eb7600340f13ce3fac30bdcc08db00 3044022077a422317992a5dd04d252943e231ebe6a28d5cf282517688f7e25a6000cc85d02202383eb58afb4dcb2cb64760a5e365b04013f6fefa61588510686de4dd3ece79b 3045022100ba3078c98549c5e7defdc0438f7a245a7a2ad3569f270430f77aaa22a7e53d71022026684cb6ee0a8f8b615550c505ff04cef4d01d198b0bc7ce3ce93c7a586becfe 3046022100f2d71237be9a8bc35ae44181eb7723432e4636cefeae0343cd9d715ffcf00370022100bb3e02c45c57fd421695d35a8e4210fb0694565c6b281b74134a684e0238a640 30440220597cbfbd2422f8dbef79a47248120446aa8227f4a3f4a2864c8c9083f483917f02204b0269f93bb6a7ab7b5913bf772dac9235e5d38b4968528f07d3f0a7d8be1e9d 3045022100cbcd613e14d8a9978473f36d0fd9873df589fb3e60bf28731fd1bb1b8814eec202206d788fc22d230fa858e9e49f9a8c3b14c997a05eed1842239b961475296ecdb5 3045022100f53689e16e2134970b6e342a733da89e8c35c32bf84ab038b7c35e8f2b8ae82502204309129fa7d528a43ef5924d4b4af4938073aa8cc9f554575ede98c42ebb4a97 3044022013bcd82d802bab14cee383649cf30548615b5fc0ecba19e693833615f12a176a022025f9c5263869c5a943022cf34d10765f61887f7c1ebea561489784301c245845 304502205abcb3cda271581658ffa03a68919204279286c86420db24d2b911fb608214b8022100c761bef1eee159319bfa939092e0840973ad4fe341b72bb62e674b3b97c70f27 3045022100abe75e56d363ad243d47ab5692e642a8e0246efe2925a9a94474bd0bde1692480220049a8479545da32dc88fe2b32eddc5acc914d078f48400d54b20de6e375a3913 3046022100a789002ba9d38e97c4840b240e964ed0f152d3f3a5d125e0d2a1a8b7b494035502210086216a2554b1136c34447d995d326eb00a371774d9bd52659b13ab38b0196c6c 3046022100c883087b233f97ad7b38c880181164f8df7f47de391ad919e340d401d3da546b0221009d91470a28f5e4d33a181ddfcb3abbddf84abee8728bf7d22ced7b47424bf936 3045022100e1d569412f48cb95827cc04276ccd61346172f5351bb40a16f8941407518529b02203b25fd8905e466ac3ea6b914852f81cee0e3b1cae11379aeb07a67142e8c308d 304602210095011542802f070bd7d1ef0c5e56018ecf09a190d347e513aa4e3c955e8b7f7702210088699a2c8ac6d9732f5a72491f7d8039df6f3801a8d3c6354860eb34d4fcf0e7 304502201e5dda227d3905273565a9034b34ed84221467fa1674534c7b3e774ca7a6fe87022100e163d4ed2b37c4a2dd45a7c2315354a51b16c014b3cdd059c4f2e19b84cdb25f 3044022071d84a715160f08e955d6e6c9a42d9ad18f00bbdd64b5f9a072656f14dd8a764022054a83ec2594ca9c7c129a1bfaaf34aa411b1c1512f5358cc486bc05b16864656 3046022100b4d67462138c668540cfb8756216284139ae2defbf879e99702cbd6c8cb65c5a022100b0b97ec44536f76da63d8f00c9a2957cd80c57ee7774a8db9a87598f49c02e48 3046022100c8eb20fb6cafd6173f25e02a6f53ca635eddba02db07db9642ba9eeab23de298022100b5335be1e7c61e5f5242a8af5dcefceb2ff2e7cc25d0bd715c0cbe0f1b4059bc 304502210080ab9129b5e97efb37c093d9368c1ba7d612591d9b0335a7f946dfc232535bca02201ce66379a281601de0b6dbcd2cd899319de927aba64aad1f7cacbd2083c01627 304402207328e687225d79d23e0d1bb52535f4669c81047b5bed35a64a999ea0f339f8d3022009a64194992447de7e894d7a7001dc937509ed7220a7954e678d14e98e3b3f42 3045022100d24eb1595403cc1a35010cb52e31f1e6d4e73d2035f628d384936ce6986e8f1402205798e5b23314ec04126ae673e8ed4c1803edb4b1faf7bab80d994db0e20c5b66 3045022100b9b1b718f93ce8808cd331c227bacace3238e30ff1688be00d9134e8bc96d45c02200c9dc3858eb197810b653f3b788bef6cf484bd89450935e5b4920160cd412ed7 3045022100d7a3c444153293fdb75bea43de3e1eeab53570c377a0e3f80496c1b63ca07f4702205ccad1c99789ad5b83db49a5b85d28000faf0a2ff98e1dc29ba52c7ed5d7513a 3045022100f7a99c41aea2f190b14d3542bd64034607ec08d2b954768a68ec073b8664461502204b93e12107770d9866ed12f0165a9264dcb2844bd1aadec6e1821bd276fec862 304502200b6f55a87f1dd27240db53775cab755b1e9cabc29a5a810ca717e63f5ef07f4d022100c609505984ca8de40803dc7a93eea00a0d917b2039f5cb0ab8d52fa78c3848a5 30450221008bf03c3f8bae805107cb9d0aa14ec2c9153898bb2d0950686deaa28376d8ca8102207e9189513c5cc9c1f169ec3aac2b615609f11b0e9f5227a6249237567a48466e 3045022007cfb766bea2cb9f643b03c86fdcf9a121f58fcb814f7f231e708f4f237337f2022100d747d46867c5c9dd8334f65a39a95c42720479a38880e3e202b96535ea960a2c 3045022009aadd7bde50269c92341605ba026bb22d82d5c8bc9db5f8ce7ea5d5be7f25e3022100e356328dff54c983d5c2d4a4183ff43ba8044acdaefe888a493b3ba9615e5844 3045022028a9f278b99e016029af2608993c186376905f08fb78cccefc56fb50dc0a73a4022100961a1532eb5467f4d016bf398ca1dce50e8795bab582479a43aa9b1480a738f2 304502201a366265cfb112dd9bc6fc47c7bdf0e94571d24a0ac5b307414e4c69cf38b346022100eeae9cd1cadde7fcb8e206a97cec287e39075b7af16f61b1c6ba16b9cda45ffb 304402200f9a3b80f2ac3edb7339719605c787537296bd3271c6b1cd2a9d5db1680b544a02204cf8089e7b1a04287b2cd3a80966c9129de7720d4f8ea3a9804689489bd7f2e9 3046022100a36c33a0155aabc558a4400c40b8bef56c09d8a0cc665dd3160da1e745f7c121022100ec3f96148795f6a1ead10579b8562b3146e89ff2fda4760802dbcd4c1c223919 3045022100a8666b181d6b31844d68d010b6f2433a63a5507de5470c7f3b35ee0d54acea480220174c93c27bc88cbf9a52c2ee1ebd5ef894970468f4970a1a067228e9a4f4bb73 3045022100c1b3dafcf1df1d09f1d52ad20a0b2ac986434c567960875ca3f6c391cff7e6b0022063326fca4558cae53a882797eda241e3117214180a1c74503f4670f49bc7ed05 304402206854bb3482d06175941df5d7041d170baa9aa3580199fa983dea117c732ea7020220681f483855b753a18093cbcf6ebedac14bab3bb6a8a18d37ad83c54737850ab5 3045022100e410c181bc99074f5cfbfc08238d5d5530791ef7a4709dbd4b591c58bd82289c02201d067c8f5c1715a9ace961b6c2e42d5368e9f7a94f178ffa3ca4da776416dc99 3045022100dcd1429aaf2c1c4c676be1cbc91425aaddf749c3a8a80911724711d87d2fd44b02205461c2892bd811bf714912047968c1f42561b96d646444e69eadd97f50eb9917 3044022020ab6a0b7434f56700dca7490f4b668e463e97201341bae2ee4cd45a08d72abb0220068a3ce4c4b73b1fe2b8dbe6c96b5c9ab07268aa5ff2fabbc04b5ac55648d813 30450220621172b9c22df27f801c86b74521239f5bd5fc8cd8596068949ff51dbe8eb088022100eb89e8accca60a21bd94061e8f6081b1370595dc24192fa519d1493823f70dcc 3044022053ab65fb90d08d7a069d1baba7b086777f5f822c0ecbaf9cbfb5480b199d64950220725c22ce820685386f63c8d19ec0682a6ae74319a655fe611b537581a7bc2b7c 304502202a1bb443b60d1f6f02a2b99420db890247a8357bd38ab3a890d157d0b9b93a69022100d15585314eb021421b7cda8f839a6dd8b79bb7087878d88f1dcd661691f90f75 304402205e620a6939f94891016e829254243889359e702b090359dd7623303c75fdccea02206ddc6319e33085fcfbc0cd7f3f3965c85c2efb7abdbfc5287b86c5f759a79539 3045022100dfe3fd8620fa4577d2a1f20150d9e496a7bed17ee2fdae9378972172e0bb1dca02207b6f8027846eede42963beeca277776fdb1b47c4efa5d3430fa8703c4cf2d685 3045022060e4d4c2a581dc362fd0c3688d4e95e7ef3b6bc2432d6d8192dacbe4cb1bd22a022100fa751db95718d7c823375c759a8d0c3bf9229ccc451fabc9049a5cbb610f1236 3046022100cf7a66be44b156c5ea942382092833993a31ab05c7e48c311a116fcdceca1eda022100955fb2a78f9819640045fff06d93bceb905d287b63a87c756f91c6ed87fc6272 30440220721013b98f01199fbc1ffe6bb5542d5b5127ef68c33f224a82892c5972556174022012808d43c320e7df429be7a2b68452dee5b31de39c736bd42bfd697d0b1b9139 304402203d566317318197b16e188ecb4891db366b4859efadd993544dfc7cd38d0691a902203ce09050f85a68c36474f61319df386cbabc31f343c33b6dac98c9bbcd8ae95c 304402205a3e91aca4ac4976f603552654e9c1be959d298b7393b80922bda33389551c96022021f4cbb3af19664a807067a0d57d01d67fc199967e254ca9035584f877116fe3 30450220122a05697ce540fd75da0b822221a2bc88e2c1970bf3edf409368a5365083156022100aaf0affa59fb8b65c0dd79f823607f957ce1a4f78ed311c9f1fb2b3190853e70 3045022100d68120fea9a5ad1b131dcaa0f5731f6caeaca2f044ab8748a640f4987cdd1d84022052f74d62ea8f439a4b2449a9b6ca5eaf68fd674cc6874e8a0ac2e70271e53aa5 304402203b9b6395a628e7e4053f1fd676f56410651563fd26db3e0a18704d80377a517402202de77e7a0b60993b6de050741dc54ea6cf4dc02fc982dd8519336d70e0ef2306 30460221009da73212f9a3fab1f1f5adcdb1281e894d0739ecd0b72f3cf459189f7972de4e022100ca40b0295cfd481cd0b3b77266a953bd32cabfa685609ec14c23ca39eae9ff02 3046022100f88599c0b348725e840c3ee7daf44cc24775adc731cfaa5a0b5925d30eb66684022100f71fdefc744e992d31db69a0e3bae7c4c2c31887e25bbff726672a01711bfd34 3045022077081596dac17f974dc2e95d33976ae00185265027149375301739cc78d7030d022100a84fa76d302de5086622ea9fc78e48d04a28379a057348c9729cdd1720e5e22e 3044022020fa951ee75047c9746214455f82c50a7f6c1c3521dad58c4a88a74e3924e90a02207f5803507af4faf6d66cea5fd9b5dcc9a54997bf89a84195d35f678283a8d849 3044022070884302e1e2b7ff480ffa03132cc55b146da8dfb4a57a7f20812ebc6a6e1eca02206cb88c3d6657a8fd1a86d1331efa29bdd4aa52c08065e86c888eb1ff240e1967 3045022100b0f2165e3ad1618ce8bf2cf059c048855da8fd561ffc3dd0d3fd9ffbf49fe4d202204066690d4b2ea9051158b744c068ede48e41a1d0d2d2cf21377f393eb86c812e 3046022100fa3ebcd811cd7929779eb675feba515d5e821316e2fbae300aa7b4193710a826022100a3ff686a5223246bde13ebddd12b98f18c13cdb46a92ce04a3d1f98f01d24786 304502206f4e095fa99d4900a30823ced921a2f33577efdd32e5d5725834fea99a3916f2022100be8d48b19c24e83a12128f84c0a52087ad560c15a7bc2ddddcd1a44acdc4ba7d 30450220522625ba7d0edeabe878912f7f59f920bdf9d2cd2d0d01ed4ab0cf26a7778c96022100bd49b5df54a3483156de084699520b528b69413cf5932c164f30a399e8b395ef 30440220722824e4282988c406e282fdb20bc04e556bbb7b73da1c3077cac4d0462d139402204b167a2aff77b8b627b240c14261e6bff939bd05007904311772488db05cf307 3045022100c8593bd9c06907089a845455cfe19b53656d20e4a23853fab6046b8df340e7ba02200b00b8e8c16264a172f8645fb29fa394ba902db6f10bf281f54fd62f4f35f952 3045022100f5b025e6fb67ba610da343fa16204f7cb49f74d8c3d275369953c99bba9ec3ef022063425bb4007f2dadf134b41880664125f341ca49052b9fb90531fd1e7eaef395 3045022001b2a212dbb809f52b13b2ca138fddc1b3c6953851c826bd2c944dedff565a9f022100e9e196a522b4ce7edeb45cd8c319a805ec0e96ef39e33ed2f003f54e5200b196 30450221009185f72dab87de9686aefb24de9cc1551feb642d494e71b2e6c7638f2196a37402203d41bd149b60148845482593c5fbdf098676a6834e0d1d87e584bf5a822861a3 304402202c23936e07b4040734669a44bff590c920b5648fa590a7a6509692fbbee8bd7b022022ff13f985cd461afd132f2527e06da67d7c9127475ec1f2590283285c08ba87 3046022100eac55fe5af84a93446f4c23f742ca4208ee14890b8892414a3f21f38b6b67479022100eaf4d9c6a486552153b4443a15fcdf3ad83dd7f8834167a9ea9d137db1fa6c11 3046022100a567206d01a369b95d5c1c6a66c247f452e7d44e09d6c67b63d7a830fcd406f7022100994943ade17a7acb8be3592ac8ad01665e6ab7f4ab51004e1c984bdab88476be 3046022100bc342dc009fa69a02f57e636c5c4490da62be3c52ff39d31f687fca2e9d6d5ce022100a1fa16225d420428b1d88c6b424f1256c93185d7ea2eb2845e3bb5badc8ac0b6 3046022100fadc3dd00e2d90e65398d79212371a76640856eb7fa0a20802d61f46659af2a4022100d9ed952934ab8eae6bf20311f1e3b8d238b428f1718a232dbeb44d37ddc23ae6 3046022100f45ad9b4864353dfb1d1e25767c27cad0a85175c6a0cc5c64dc2593c8704ec68022100ef64b53b5e7d077e13314ab0db36e530412ed265a8202aa913ffaf104fbcee2e 304502207c5e17de4f15957cb0ce5255389baee8e1f6d58e9460a876f301c4ce6075c18b022100e8920387fedc1def30342fe1e9c6427079f473fa3aafafc6acb072c6a30f2465 3045022100975bcbc4ecb7af37638128a941628049b58cb28a820c81eb19b137a7ef672e9302202f3b53d1ea1e8607c3af13a94edbcd109d5ab29048d53f7cd398101ec09f0602 304402206e822af536658b81f0e23eee3695f0f7a6e363edddacf8c86ac69e3ea4f27dab022077328fd9442aaa309662d09e9fd75c3fef7980b89ae711bed6aa2a958d8d66fe 3046022100dc2c3466ba9c809be265eebeaa5dc5e63614cba8dc93d2870252184756ca8044022100b2d49b5cd9313efdefabaa39098406c86b0da8ecabdc43e10c90c79e877e4dd3 304602210086c38f7162417b278d4d41fff5b3fbc0d0a89d7eabb36f7e8f7bc2b651c1b688022100eafab045c7f8ace6069a57afa0693ea2825efd816cb5824d9d581c667781dde5 30450220287fd94d4d1acc82df001c01617d58111f3de0fd0c0153eea3044081ea07b88c022100eab4dce61705c2e627b79efe6a1ae67267db30e93b4862c2f304ada4601cc100 304502200e87a32a39ae89dcb677c64f7cde4e650847237b960bbb6a93df769be5879b6e022100d6b55bc3f0fd0fd4255c8e2bfd94a8d566726f7c539a60a2ea10ec9acadf58ae 3046022100e1709b9f750c1a942e7ab9c18c20b4b29d806656598838a102c7aaac9f4092b60221008e83e458b1a613e6ab17a02a121fb9dae31350bd3b93f7b0355340d50bde9505 3046022100ab6d96695297f0be5225768fed88fbbba006ebd112891a6e5e7203536e17744f022100cb797a04f90fa4abb63cd27966facb9e209f5a91820eff859c8aaa9a42529cd8 3046022100d86336f9f0f08e268231f106a61407cf4c7d09d4264a3abd07483299b18c105f022100c113d6f34e8cd92e54d4892b7d810f95cfbf19cd6716dab3d1c8a11e52e4fb48 3046022100bdff2331ba1c807db72f4544b6eb856d830541a5bfe29c188627fdde632d6bf1022100c6070576eb8b47ffa2f73f1ae8a3fe4aa1af06e82f961350d20d9fba30fcc9cb 304502202c6ac1c04e985e75ccb10ea3f2c05c85b6f29b322bbfc19297c556b48033a630022100b09bfdd92a430670b4c84946bdacb07291f313cae8a6cac88560e43b969dad20 304402207480c57866710b4f9bbccbc8d356b8be8ff932c2a192cbe58c1fd0bfc5ec4f4a02203d33e5c833015ba1d7edf1babf882a99fb96169806da1b5f7760da575d30f0c8 3045022100a189201634aa64d43c88d902d7c8b1723d9f7910021a876d8412a5fdd054fe2e0220692439c37ea1457d3a484739f41cbc80c9a3a15972676ecdb86df206f977fdc0 304602210089c682f2d8aec096208b59c69c28ed79c0601d97101213eee270e0395200d857022100848c7a579ca729b24c61bfe0ae0eaed417219f5b0be50fe631df270d8c924a67 304602210098068c3f4670ce8428791d3e41ba5588c20a3c3db3e6bbbd8505d0fce0b9303f0221009a18f971c54945e2d493766e8721a8679d6bdeee7a1ea83462d0d04452623c70 30450220720e80be972d3318b55e61533e2ea9a388715b361b46fc07b8ad475afc061680022100f4a7d7bdde5dcb577a62c4d44e3584815aed17d07c24034d95f68f8a59e609c4 3045022075ee47ee5f49719e7826e3aecf60efde8fdc6064968e6d7d0a6dd5535f9ae2a5022100fd7022e5db9b629563d1fa3ff205deef7a8f44739f14f35ef3e0ee75ae1d5090 304402202293afcfc9b122238cafd43ad798e49b1452980c929fe60ee8c20ea41296b866022056af5e7ca113abd89f6a1a1cb17e5abebf39c06ebf296d86c9e8a1472641bf0d 3044022066c358632e8c632e6a86d7fa8cd6310156c4f0b98a34236e29a84196edab9f23022000811713c662608f8915669014819252cfb4ed5c6853af48a040c76d5dc300ec 3044022062872d8f7a2c3c517913885ef1ac6f2511078e2e111a9461d87120edff901c1702201b4e449b904dbe67b71efb7c82dccaf505333f4ff8ad23893e8da0b3982c46e4 3046022100c25af6d4a41e3600f96e224b5d2f773a6d21c3e8b70af12261b1fcbfafc4770a022100b36268bec930e152fbf4c5fb0c898abcb5f8fa2e6bec97add582cf23afdbfee1 304402204267202acb973452d0d53878782189101ae7384684fc0f8ca332951f6ead34000220435543d1d1850ac683547b762a79c5f73fba98ea4e0a5b270e0ef5ae7f29933d 30450221008303693a7e1fb2815ecb9b8e6a382b3f8f4c5f81e8cb38788881c9c80eec6e0c02205a8cb0f65cda5376c6d9de1e3b3a0b5230d4c3090ebff5bd9d81c03b405a6acf 30440220364c19bcae7bdf16e0440c17c655e1dda2cf8d5ea3427ca4d1df4ec8ee943c7302203a98b2bec00cfbc41c94333812e8e6c5d3b5c7d4fea30d5be18f102df13e6b99 3046022100bba14917398383f79779a3b62fae8a274c78e2243b5a2403897ce40ebea95a5a0221008ade3e979a6559918b3bb47a90193ee2d24fda63b21b83af1ce7267b381e23a4 3045022024c2a390e226e5ab98d9c9d0592566e72575e80d3da4e830167e2ddb7c435ebd022100d5eba3e349404471187931c16e5cf5ecd93ac9c641896e6dbec882a135706ca5 3045022064afc60508b71d6aa84c9dcd1c198d6e97d795efa61e1d749ae3105007bdcd7d022100a4284e48f59ae9a7d0934709fc2e512f0c8469a0db6cbb905674c48a0343d765 3046022100ef13d9986c6d32d8cdb6a402190a2d1f8f5c576ed2d34bd6427d1d9422875060022100c86ed224720022759ca3923226f6535297e9b3a2b87aa4e069c8aba31eb0f162 3045022100dcc8d91618d78896c979abfe647ce026845c15bd0fa3150cfcd52537f800212002204aa2b0aab0b32e269f3a802cd47f2f8d58d0fa276c0ace70e79fde9ecdabbdbd 3046022100f7bdb394e26899ec545ed3e4c5e2b506707e5a04f6ec608b6f63fa80b2870bd20221009f564565e2f01bfaf7ed38c62f6d0bea34a193311fad57f2011ff95b35f28622 30460221008535c77995d500dd6a74d33c70cd3f0c46c884245f24ea4ef39abb2c55324603022100ea34f64091064e50066749563a22609ffd793650b9118e6ba5eda708fa760b41 3046022100c75d9fac76c50538008634373155abea6f9cc73b0e0d123b2ccc7ab2cd563480022100fd4b242a4a70d3536c8e0ab42c6740928edbf7a1a28876188fb2f0a9307acd41 304402207c83ebdc5cc7c772821dd4b82b89400759b414f06a34a8e993e54499945cc27702206cdd55e00e79afcd6ef46f9a4129356a735dc3cd24b9f61a7fc64a17617e6711 3046022100be591c119e5f4845284a526ac51d61062ef94ac70ba84a9190ad6f433fbf1a59022100a3c995e0eec416fe32546462ca8fb6f589ae54befbd087e31a33c40f3993933c 3046022100b41dd0c2e282cb4b3e711cfdc619e9576ddd2d770b5c2888c310dee6bf8e5225022100c40c69cf130ff7d7003b989161d2eeb4e00e4ebaf95156c490eb6f5d3c8cefb7 3044022001714e85e9a2fa4f051d9a4347c76dde3b6d90b6485f18064d46238ebb9cc62502204acb8bed2f72ad4078a384c5ef5259072a384f36fd589b7bf4a574c7e8cb0f3d 304402200a04034946567bdb5475cbd60ad2a927f350c74f5596fdcf5e9483329927bf490220294a903dac8e379b1f69aedc51e113b844e63cbf4c123fcdff065a3979a13c0b 304402204ad96d0e97e32641cf575d09e283cb18ed43eb2e7cc9283bea10668a715b16b20220301cd1c089abd058cd34f2c13d58f2332d30e1915de2777a97022ba86ed1584e 3044022056b882c1be7d357a7b9677a8c2a8ed5fd6fe30b2d36ad92f2f7df3b4dfbc27380220315fe75a5ccbfe23ee1bd0c333ff570f9b6de43ee7d2516be20ff6d2c273604d 3045022047be280b2855004d47c5678c6fc3ed99077e8ce613475f8bb56564e3d2a886900221008ad0c070452a3db3d292ad666d70feca4b2ff6ca6934462ee3987bbc5b4ae42c 304502203e8173703670dcb56983beb8715cbd9ec3a92a1614d7bc02e6a578028e17636f02210095b5063ebfe793acd90eab9ba9276b1b0617b0f8604d95e6e3638c3f1e40a467 3045022100a92ed52993c0e59ace6ab35af1a5be5b32f700936bc066a6359a7941a16e12d502202782c5838cda0f0b7f98c2fd6c05514fe086ec194d7e5449cb4e36ceaed9647b 304402201ef9cc9ae7a1568e054b98b2bf19bad75587de1c2ff1fe84a2e00ff7e20af8c90220570fbfaf8815559069a1c0eec603833b92f4f279704b471056d09ba8d2fcf05d 3045022100887b7fd4f331e337e89bdf916d6dc5ebe040f56207f19f1cfbeffb96d1be680202206fa15962612fa859d13fdb4e8ae69ec13edafeeb08b05c6b3e202e4c0ac40992 3045022100c10d85def9aa6c1d303689fe24b94deb799dbf2a067512925bf286d08888d53e02207f673a3528a28dac6c9adaadc42d1b9e4e231594613afb1b68c6bdb644b3e221 304502203cc4b26c83506610045afc01abd1cf127fcde315a1c1b2422aae4e481f07711a022100dd2ccf8e948d9affda5396aad35f4187ce575cb7eecf0af946c2f00cef6f9641 3045022100eabe81b9c6cd67a25253ddf43897aa3fc1cb0f9def1125653d1f4d1d2195433802201c95be7b2be7e4deeedef75d96f17de698355047b42b6b462201950caab5af78 3045022100a45742e1557972c6f8eac36e361db3d4b61e217088be7f960f2107ef772b9b1702201df7c532aebc9a4ca8fc9dfc1563da61d0e1b7b7ac53726caed61027624969c7 304402201a4862dd0af6de9fe2847373e447effbc675654a9fa26122d7781d03e313ce5902203685ef0a45e288feee13f8261b41a031fd0e773f33194112afc615484b5a38f7 30460221008effdc292b747c2e64abdfa52d8c38311f922e48ef6fba2d663112e11409d635022100a04f6e8251a2e794b83b03b8fe4d0627f39756b9b460ce220f5c61467b1730be 3046022100bccaaad56e98d189f54911297b5ef949150778aba7640185dd185450810ee0410221008581c8a02797136039ddfaa6efb1b18614cb7e91600e8ba1c0597bda22df6387 304402201cfe5dde22f17ed628f82496cbf9dc6cc52e92adfa84db9906dce0eb50f0f09002201711b7123aebbd9aaa799de42965a376804f3d89211fb56785f470c5bd33656a 30450220232caebf29bafe77806ee86348938be032863385e80fb814e9f2b143f386acc402210097444128b8dcae05ada159422782e3d6dff036303625b43c98bb96232e3e4591 3045022100c4837597f280ba2d5eb5bac18de6b90bad1edbcc4c39a1284a07c48bd10f4a5b02207124fc3377119a5d494ee2783d2775819966354bfe25dbf6be3e997a921233ab 304502200944c65db591d1dc3120c074bd9d156cb9a168375438b76655c31f7ec89a3864022100fc9268dc7307100e82bf72d79bf203320284e373fe80394901132063c6c5a8cc 3045022100a7ed0fc817300ebb9b41f1191aa1f5ade96e6473fa302349f10d61ddeb195d2302201178ae3ae722e91ad0e90543e097bbfb3850054e31cf7a9ad64a9099f4fdf583 3045022100a720253fb41f2bbce861866fea14d047eefc9a1dfdf961bebe5225af365f439d022073194077c4c7983f8aa65e32a153d47870eb83bd38da69a7891ad510821b8c8b 304502205c0020940069c22ce6e6c42a4d085934b458a8c3c687c7ddca5530751f9ff345022100daf0fde2ef342365f600301f5021a0c83b563fb9604b8e3af2cc9d7673868a78 3045022005b2b4733817e5dd74c0ffd0914fa57d31287c4839e79a2b1ffeace9e63d9fe90221009d3ab11ecbc2a502a9fe83dcc7f9da7677d03e6ecfeaf6b2f92cc8deebba6e8b 304402207fe8dea3b30f0af8936c48640bde5e47e2d992134f39bccfec75b71be360317e02206f7eff406252105e45af3498f3b0923d26c1c0bc8b855e015ed44ebddc570eef 3044022045fd5521e2fb17d8255dda05439276c563425afe96ec66be70df841d570b5b55022070a857908f1bcfc44bf0ed65d1236f933b88e69ab00d2b1c7b35a3d07ef5984a 304402206e07109a2eb8e8c178299e67ebfa05caf583b561bca5e2c652adb9b3130df22b022029a5346d52885b6c01cd80e19216bdb79a8b4b3946d1a0574e7a709df1fb58da 304502206cc7664e971e5c4c085d84846674db94c39d09bded4bb39a256808d02232969002210089395b7d7c6edff55ba0ed39d0f9ab03835a4838e4f9d5e32ee29896bd8153fe 3045022003aa878b60edeb48040aa6def4fc574b6907724d5b2aeaffecea3df275361f1c02210096e5fabf65e9a4bb7785a14a3fa9244be37125294a0c8928504403f7c785a638 3046022100bb23dd80f7ea9def0de7ca36a9d302f442c46096c943b317a37406f93af1cfb0022100ea18560270addf7342f19679982c185f2db1b57e01d914adad5e229aafddf3a5 304402201a3234285b811d4595918f6289098b543c6b93f939c411764c1fd48be6e0f62b02204c3d531718ac6c0c9b0e34b6931ac466f5a738e7947735957bde3c28abf5738b 3046022100ad3d9244845122221b491956756b42638d6cd4f7a79df006911bbb53f49f1ea7022100929f3e17641bb7e1965087ec5491027efc495ddac2de924397f13fb1620bec23 3045022029f1b853c1e33df1fdb2185808665f578ed5bf88ef32e0b25b50b6bf0dbc6513022100aee4e809a8af894920973d011c857f543aa9935ab24083cd1794b201e3e418cc 3045022100c71b9ae27c6a6978f8b6df639be8be7ac8adbd5dd058c9fc9eb2b800eab70dc802201d4e2fbcd29a76de5702d607928285c4c7b821fd8583d278d936345564f1564e 304502207e61ceaa4cb7809ffe1dbe029edf9286a7259ba994021d359163783df98a5e16022100c677a4c2a1f0fd5f08fbdc3363dd608bb84ed05061da71b503ad4cadde5eaacb 3046022100f861ba666e3bfd45830ecc4e7580f4f849405d48c918c839adb0f390ae079dc00221009a4cf94c5c8d25a042997f276e1f85d48d594bcd06cf01e1c1af9dee1fa8d53b 3045022100d38a83982856e9f28c03540379d3f4b44c1bda413d4bb554861446e5ac7e75bd0220455595249ffda82adcd46cd64bd040ee9753c6e9ac34b57981bc5216f1e8180b 304502200e59b203f5cc72bfa62ac165f49fb9cc0abb10e8cee957248bd98dc4bb893147022100ac4d5fb252532595d9272fd5a645001c65e79713547ac39c87b9701389e8241e 304402205c24a433057e4f2a842ae3d17cf080c40f637ea6cd0deabfd3c9c462b9e4e272022066991dbb0399ed9eda7b8b3e28fcb1c5683fe3c6419ba139a99fe826ca44d660 3045022100b90b4f677a69b234bcf8225a6491582b16486bfd54499abfdc1c2f700010311f0220424e3ca9e3e99df1463394f37a9df57d44e8c45706c07a6dbafced6369d30f9d 3045022077f52ce5c2bf04eaf0c1de90520d9e587b7f91ae5e8b32f753963cab0b4f913e02210095b3c92488b69c297c5b71430726883167de286243049c7ce2a09886e7361865 3045022007ff0fb9984c529b530fb305522b0ec0de27e427b9ff21d60d7ebf86bdffef2c022100a087458e5a5a40bf3326a4638adfbf557c507f3132124c3dd8b4f33f17f316eb 304502210084e48c48d771ba44098004a8378b01135b0318d346d78c55b36cf5d7c067de8b02204e6c0510d767a6f677ac12341e5e5fb8d3563bd30268156deca971879f968856 3044022078ae5e4ee8db0f3a6abbce3831bc4ba9f0bd0e6e080f09b6bcba424570338ea002206c2375d8727b36566756c436a78d81c3c475c19dd6ed20bc7372b536e7c19955 304502204a1df3c143875ff7b9ef5a998e30a0d68b8bf7244c30be97ccf6fa5fc3385713022100b8587ae3855711771fc899191497a6d3b75d6e0833464ecb0b0de294d62abb04 304402207c6c6e7b410657553a3bdcd666b61084e15549067c222eb0818d031751a9488a022052847d155e3b4539165c792d6fbc960c100509999318c686c54e3c5d7fe89f03 3046022100eed0cfdade85b9d2f15644c1a8a862ac560928dfc3f8c7225ff8c5ef650719f5022100ece5b7b4358bd6174fe8349714ac7f3902b6c389e62fcba53ed78155137cedc7 3046022100d517e281228180be9f1d72398c41ccbf2db5eabf51ba38fde00dfe52b5883323022100888ffb21682125b44116c1d6fe3d8343bd137ae3b86ddca4842abd41b95ffc2c 304402200603c47df38bea589fcdaecc69151567119d7f1ba82f6e7044891d61abceb82e02207e85264a83cc5e7b274425f1f2c134282ddb9a2b370018163b6b683f119547a1 3045022100f7d62f9bb3f33337be392be68ada1831302a7c00b9c552408afc6da8d2442f79022057c962bda9360b79999b72f51823552ae423de1bcf9b34cee11cf4b361066d76 3045022100d2dd2e7b8677a2fbef566ef971b1dfc3c2b6978be6db4179a4b3dcd9ba88fac5022013d44d8c18bcd175db531a32e71b4c46dccfce734799021da4c57301021d85a8 304502204f26f5b5b8afd76d41e8094481066febdd9998f96ca18025ce7e25006c489d03022100995035833ea8c4b62acf0881f0d850b94cf2231a9c8dee8846f2813935e58326 304402201c11c901aa2716fb7d2c63bcab47cdb86fafc34e152898e740eb48420f6ef53b022022a71556861726324e54fb320f2c1eb0351264cd32cf09ce1bedac38981b02cc 304402206547acf9937301e275fa7843594210b68115ca6fede405b0aecd03075649781e02203d5046f7abb875f5ed4e8b48692cefbf03c4920537f8753db07e3898e5282015 304502200c5ac8f29d79f3dd4ece00ce2f179b05d875e8cf0c35056a5c4d05d178b6c3ad0221008c2a444baf34082215f9d1afe466ed4458a0d7bcd53e230b08163c31e41eb4e8 304402204d37f4d58dfa541a81dc31428dfd3c02f2ec6fbb3b64442bbe6f74cc19308ccf02207a6aff32647589adc01ad429de2e01954f8cd01a308444573205b50791da9c9d 30450220489f8b1921bd0c27f9177bb78e41b9debc0924d9044af44ac3ec02c8a8cc394e02210097ee375fe6632dc044b9fdbc694f89f9abdc3ad5f93cbeff69720c7da20882b9 304402203ee2ddda5e1e25abad6d6fc3e0f5232eee5437e9c2f2ca0922cee3a934a382cc02203a2cc967d770d2b9ead9762c53e9a8e7a484b375bb101afb155e656ca81c2c1b 3046022100dec07ecc0a2d3d39763be4f8a96132825d4954fe5f52151c8fdab2a3f8848b44022100ec7d0fbc416ab5275b0d3855e2bb1f2af41d1ebf5a3ab65c27941ca393bea9ff 30440220144ec2786c95f788e51e45a425e90d2d956d8865a7b0d20cecce649d6f694ebd02205164179072c46d8b5ca83ea329252099562a61f8829c992406352551132e9c2d 30450220774b2974a59594eb82e7701b9224a1db96d936aa30105ee154bf7fc1e2866855022100bb2bed8adef145d302ae8e47aec536b33db1e69cfc9977707c168c3077827d57 3046022100aae027164f3c3218a83813d27ec50b6ae5cea1aab7c8d519734f406c08b2f100022100932d48eea50ca9607bf71c70bf7595600c0bb60cff4da0d9dfebc71863770b0e 304402203e87efaeeae9891dd3fe10bee883ea3e5104f8a81adef534c787c7e35e038d9b02200b45e7bf1a8c89754c0db59f7377a9306ba42287dcd2035f8e965aed3d3cad25 304402202fca6f149b4876bfa6940518a6883280e653ad83cdaa28d94a6cc86c1ff454a102201ea163fe8d29691caa02a96ba2137d0799504b192c067c25a80f8b86a715f477 30450221009f23aa81f5e0fdfb4dd5c8311660b1723106aaa6f4991e2470414a99a4e6404e02205b0f9d4654f357d1c926661cd1b5b83945187ef426ea005e96cc2ba6979d335b 30450221008bd13350fc4ee526fbb20ffa662542ef9a225719bae3427417bbc7b516684c3d0220597271571a6c795e2ff5d405314bbec781f74c05bc7da516c5acba09126eccac 30450220681a11fc55d5d076892e48cd9a49bd4a35eaef131e89eb42d998b932e8675bab022100eb3763136ddb72760f52f73d212282e8f0fd002010afadd6edec684224680162 304502205b498c2073016d3ca56381110dbda84b54eabd6f06bfd7fcfdfc927f4cc9698b022100d016effca3cd787ea8912187daa07951fa6d02680e1d4e3e1dcabbe5a8685763 3045022072ec9c09765ed94d6a94a172a6ae4563d962e2874102ad1472b519690257ef3b022100d1dd77d51114c3b44cde55d1c3e6ff299c29591eee1c8f8bd7e81cd786a03730 3046022100c0addaefc16864aad66ed7cdf8b2aef9e64b828b63d4750bb4d7e95058bfe70a022100bdcb15b801481021101d95fe57fcb1a1a55c59033c73eec90c5736f6e42ad2fb 3044022076a73f5767b597dc9fabcfe701bcb42fac61c81aa416db5ef1361c3f893fc52202200d58e778e131d9ed1c0687cfc36e29567565273733a6e2eea1563278b8a0e815 3044022006718cff17cd316e25d0b7946d7adfddfaf3dc7258c0c95df8622ddc6dcb9c9f022028d31757d3684d587e1500762aa54cd3207fc6a0aec179a1da8795bf564b3bb6 304402203ebe533fbba53acb911c4cc462616ca5ae34d44ca48ef73c68667722f6d23b0e022001bbc0c09b8ab544ffc095378c3daf96b141dbfd454fa161ffc7188ff3254b8f 30450220223876769a97701925b23ff3e1798dda4c705d5c6eb3c64d89d46377bb5d05ed022100fc6201050e3edb0d9660d23821af57053015cdf2cc010e1a742beea160476829 3046022100865c54ee4dbbbe59a7568aae8b046e87c8911ab266fa20f87d61db8bb0b2928102210096e21d89a4e215b2a1bbc8a4e89087da4baa13b009080902be8f40fc5ed79ab9 3046022100e9915cc038126832f4bdebd5cff7002f1627f7a074ee69ce4ff488356609dce6022100bf456ffdfbe79cbdb1153a800e9582fdddafaf6d7f430ef5d221281ce14dbf31 3044022021daab3ef74f1e5c9671ceaf0e0f16b45eb4a723a62c51f8ee1a63ea09c8a04d02203527c5e0e995ea5c8e6e6216df0367df9c8c4e5fc6660c87fb0a1f0004033a60 3045022100df4ff89fd6480cc61b07954135519559ab703a880ef98d2d0d8b1861b86087d6022020d9fa1015379eef75e2a3e54dc6a03ef00e6184f6ed273b816c3bdb02c28aa2 304402200f601a95115480db7c57a07a34ac29d45cd58ce63cf1056941f3ed043a1c1f2e022008cfdf50081f2fc48f54182c09d62efd3764d6b411df20d193e3a010e0a4d577 30450221009d0c8bac33367452df3f9a3a7a5c3b12c052b897703536dcd135abda01cd82ae022023f01ef7d515336e3d5f6e4d52439824fe17426c871f3fcd0f2a3cd2233442c2 304502203b8037d402b4a64d1000b1e2e30551c0dac63ed9b76e272a09be07b7d6fb4379022100b7a6cdcf5ddd6de6afddd0ecbab204f51f1cb8362a34383fda47cf3fa7e89cd1 304502210098038bfbe8dd7348ee2295168d963d1d8254757eed73f782c5bef59cd3c588f402204b5869a5dff496c4df05866ea0380c34cd57c946e77e630db8006f1f185972e7 304402207d104e3d9c7358bbb56565b2a8ff0a797fca68a57960b970afbc4d64dbf8333f02202f7d1bffd2d0bc22767bf55a80a2dc3bcbfc208fce6b5e97b9488f7da52695cb 3045022100c3364465112a5b6ea58f538560e8d808b80563fc8e7057c7097469fd4e388c97022052c3f718e107a07a8a3a9c55077eef18492bc15029aadafd8df573c65da523c0 3046022100ffdc356bb7496e29cc11d4c7c232708c6aaac1b35dd32f1e1f83d989e47738b5022100f30e3f961851ee7f22bf64838c4d41aa084392d1bc619914550c58515860e30b 304502206704e0839b3406ed633483d4c3a74213a0327d5126c7cbd701735deb2742a530022100fa3c698537b6ba04975c7f2feafa1b256e538de9b602e841667cc99636ce196d 3045022074e96b1c7f6c7522ca354891e35c50e7c55f93b9424199d0d988ec05de6563fb02210097697dc2bd3e1802bc3587be91b207e8b4fe5e8c50c832a5807bc2d9475596c8 30450221009942232a648c5b3d3c8b594917f073a8a7d4515419bcd646f35a143a232b9816022026672530cc936acbd9fe93b46090563b3055a804feb8dc5cd1428b937f602430 3045022100fb0e8658be2c921b99f4238a4558048e104770c865d0f3baf0686e04f8495d280220047448c0a5c85bec9dff84d2cad21b3f024651b59d175082a1184ee54071ca45 3046022100a8ffdc4155df9020c40e5e1f7641b77489f2a514ac9b3d189e14557efa859426022100b164db648d8d2b5b3c13fd10de198dca0160066f1e50d64191fabc344601d6cc 3046022100e710d80cc8e92761f21573d6920e46a3da10db6939d3d28e0c1a108774169781022100c9b229e4d8c317c3b056f876d80e4c6f6136cb71ced9d0fdf66aa082cef55c61 304402200c35736330f81257d726a7d8ba6d4d98cc8929f69236470586115a837cb75357022042c7242bbc2f2fdacd9fdce191d7bd10a2d303f4b45b937ef83450230f7a1dec 3045022022423ac51420230aec7f6e1a184d55c5ed1ff26c804db1ad22647ba8670ee572022100bacdd1ed55d0fa60ee721f3ebe60eabf8bcbec0e2acb72ce5b3026c6de3c914f 304402202ba342857015e751af81bf515662e9723e8303848134e52b7f9b7458da625fb60220071e1f9044918ae106c7b55c2faebcb0e3537505c2922a8327e5320175f29aa5 3045022031e6d0a61f4bef87855f49af5f708685175891bc4e26dae791490827de9557e7022100f2d57dec74db0ebdefc5c22d3cc87d1d07d6138e355b911739f612ca85c73952 3045022100dc63d60c93f2ba45213fc85e64f5c956da79d12a8e9730b27870185e1379810902202ddd93624ca4972b1516f1de5cfba648f2aa02c1ee3d6f3caef772d0654235b1 3044022038a9f21fe6c92e1a3433f97755f70512c66612c404e42bc6c3771a7086625c5f022044eb4135e7a4be7baf9bad440f1e26bc51fe72fe887418a9c680f2620941e2e4 304602210086ccf23527ebdb37f3d3bc8ae9d16c939544841cbb983c8cd72b099ebf6c0cc9022100ee8378b070673d4bf5bc5ac34dc60b7ab3eeb39b8eb9811cd3852425c47790cd 304402202856f1da38ed8e26aa6afd34cd33f931b31e7015c4f6824dd54e3a13babffa9602205875b871f87378f60b756923ffb21085a4209ba1dc971a7174feccb159ef0d01 30440220253734125acac7d7c937bf9fd4a165bbf662a5712a3a1791a8bdbc56401064de022030f7fd2b483be30f0985458e30276b56e598bfe0b53505dc535bba8da4057a02 304402204c2a0d14cd93583088eab8057ae6f9cadbc5da009debec5d14d4a64735d4997a022033816bb516f6dcc990ab783fd3ae2e237309b034f37ff113131c3e2a22779cea 304502200970530216444b1092655201b2e4a86747d8c7ef96d2476bf423aea27e308956022100c4d10c49097a9200cfcfde7ab1f694794b30f8e7580ec6e333c9ad40fdb5ac05 3046022100fc94c30f56296a8e7923a839e442f152fb4c06cbffd0bb90920cf103e6a9d0e80221009e9f342ad8f43c589947f08edfbb4465ccdd67978fd93ae8e9b2f75605a4a90b 30450221009eb56d9641d43c9897884877dc39efb00247658bf6125191a60e299e70cbaf7d022012623dc48dcf7dbdcedfe439feeb7acce575f634e28325a54d4dc53b12e269dc 304402200b9037a8bd1bb4d5568f61446b0cc5467dfcfe14d1e40cd55eaaf9711a339384022005deab4297c56ff16cef94c2e85b0c9bf985264ad465fb54636cafb7f0909f45 3044022053aa674a7a2de6571dd07f89e18a32d49db8fea0f12c9c6b388dddc829c8d1f7022037ba630ffdc9f9054784ea5efc6992d8b8c0b7831af7fdad73db085280b01311 3045022100b728ff51bfcfb771decab50001b79c68386d838c35a11153bfa8086ac655d83e022048e76079b16b9050ee5fc87166f80d6a1c32f464ac99bcb3258d0e30e3a0365a 30450220470b5c9326aa25a8821f6cd94c9668f28637214b75a111fee29cf18058b31236022100dd0757170db8e75ca782b6a26486efef2a2874930233689f98df67e1ee5fb230 30460221009d0b893baaf6ad3eff1a61c9f8137837f0ef522aaedbf652aea6e15de0dbeda602210080d79f7d00f713286eb441793812c2c8b2b8d9f3b3f6e70e004d57a43de557a5 3046022100b47002f73d176680b95b2b7d91ace5fbc0673821e1630d471436fe17ef0f6aaa022100c64be9c49f4d02f6d2b4544d6257e59bc63a7a8fba19962460b4173b2892c7b9 30440220332862cc3e90012842b791744e367a511c83a2e49faf8e46f5113af37090778f02206bdbc1e683be6828e690c08f6ba97aa23225a958ec0d72ae86e5d94c1c44be66 3045022009874ad934fefbb8d0d8836531e7dbe238663dca6234ac97918c922dbbfd6b9a022100a08dcc3c94fc3723613486cba5ca5914f9f8a7aeacda9416988d813b39598bc2 3045022018885036eb9a475a976ab5fbd0bb4b851399600de663a539abc25aafbcc31d26022100fc612983f53d98c99358480e2f946a099b88c427dc3f125ce6149907061c744b 3045022100814d98e57abf3f642cae07b1bc9c3840b94ef40eda8cedc668b984c98b6e490002205c9a79889771e2c74bd33503d85a0aa2ae891e0c3a8e0acc8df620c06d73e0c6 304402200f5fc0ac4ae90d3f5ba6cc0748f83757b03d4a492b7eb2e2ebb52f870d1e4dc7022055b52cf8941c0fcd1736a635f24045d36e12ae400ee397b56010ff299e5a28be 3046022100a233a92d0eac10c4e0527f0c77fe271b774ef8ebeefb6d2ca30028d2348854210221009d041ff55f850a32a1f5cedf51e507816a5f0430bf759cc07b2e32769193a5fb 304502206cf296963d073bbab2b05e4e3f05d738efaf17147e5d4eeaaf3b0144270a25bc022100ba1e7aefa873eb4e30255e584e21ab8cd73634458d3815646543e95fe117a3be 3046022100e5f363938c1bdb71854919ab38d901ff5617392a5b91e6b724dbd38abe62386e022100c12873ff50d8af9555175b71ba0e8597905c3b7e13e02a8451c5db054ae79d0c 30440220575edccc761e3d56b184f45e5dca673ea28b555b0918ae3b4538ecdaae564c8e02200130bdecb67d11806ed444e0594a2b30ddcdb789a097adc5285b2c10be1a472d 304502202b066696002e2f98498b7503dc5b84260903e3a3eb2a851df1d299e0aad4735202210082879807cb084774df5e8b167a80b143ff8ba05ab165bf41b14f13c06ed5ad8d 30460221008bc7da6e04f981763425ccb984254274b61e9dce47430448bba89d1d640dda6b022100c4e7512b96438b00bef68ca98ac4357b18b6dadf3cb27d8cd73a26e014138121 3045022100a333e7a290b55b04a5140bcc839fe3bc4d3b2697ad1d7a594668be79fed11457022066e31665635c46275abf10a8002f095d776665f0cb454ba14c1addd0e164cc13 3045022100a74757ef1f91c75650c6d7bf21e043ee9ae9b3bbcc005b9ea5462b06b7fa541402200ba2bb57d3b24fead34320198bf0f9246982bb5eba130d2d8148288fb0cfabe3 3045022100af1d827c453a1321f05049661cfc9f74202f9a41c04ebe6c27d2c601514f5c570220167352bcfafd6dd6edd6955b69d74962421a9ba453320c830b2e311843e1c169 30460221009067b79e018049decf3acda877b470dc3fe51703f677a2a0102a7b21dd0d7e85022100c6a957decb6aca7afc142e23fd7cae4fc4fa3e31ec047986532554f008e01c50 3045022100a550dc0b0e93bcd4f0f906c9eda08a66b49ca0117740d6f5ebbcb37d509ce14c022062fb348a07465f79d17ac5a95d6fbc454110adf41fec61920615f51552c32181 304402202e08254dc339625aa727bb6e2288e2afae8a9f99ea401a94f39b6e3d4ce3c8eb02206094f81f49ec91370f66ea9d6603b64cfa48fa8d1f5fa30f733215e5149a8f53 3045022100e64344da90de1b922176aec77341ae1c5f8d20459f5d55ca8eefedc55868306d022077a30ba6df9493d35f786ede9c9a4ba4cb60f8fea3bda52b10c05047f075f6ca 304402203bd32468c5ca85d3709531b8ebcb15adb152549f4adf563b07bd7e07d18d765d02203ed9a51e1d668872be7fc189a366a5f1ca9aa579b87fd2486af899295eb830fd 304402201e867e6a6d6e1dfa2a7d13cf06f9608c96d1728fa682b7a909594c2cd8c0b0ee02204ee8679bd951ea756cd5830d02e84f96d5d159e7652d971fcfcf55e67fbcf4ec 30460221009e91940f911c2c817cb9ba838e8b5e7496588702756b21e2a8e4382f91ebd634022100b1c329356aff019a57b1974c866a90e57b710685b353f567ebb5e4790ce92a2b 3045022100a760fea178854dc32065db24e603005ab308151851ab21e621eaff377f6b9eda02204829840b59ba25542aeffc1ad201b01c535e595729908251c21c8ff278898ebd 3045022100b783ea29e41f6b174199e9196417d98ca4a7aa2ab82d79c4cc1d9ff83776902b022018fc5b5609cfa2c8e9c572b5a607a593db2d392f3997d8772e25ed80f2892e00 3044022026f17397f7283ab9642c095afc16a396516196dac73a23a252b9e8877dbabf3f02205b1bd4868eae7d0d047854fb36334b417940b3504215bca6ab94f0e844c4583d 304402202af764f43ceb7feb3dcd4c4c2bcd0fbff1db8f2a3e97532c2c4efe5764ce4bdc0220573c6127befaaa2ac3a1bd44fecdc738602fdb929f558b12f3c6d9b57862734c 304402201a2b1fb421319120ba500ce90235241aed997de0f3964195ab1d9545e10eaefb02200c358b4dd3edd9555614e0b95dae567bc2d60ba8326b4c810173d4eb6630bae8 30440220686b452456b5fca5603598c585aa3d286591c628e5a2bf893eeb1872e5b6a67102204eba86d83692d5806be2733def271b5ce4fb4ee6b353f4b19f9e2d0835512a03 30460221009e3ecefd546139ddab688330a6758c3b4c27b7103c9fb8cf32c9a6a94b76745a022100eda31fc040380867384042418fe3044fd48dfe808d048f8804d2819692522e05 304402205d4db02678757d5b61c4336bfc7b074cde4f1b333719508822dcc81722e7ec540220166523a52cd127a9017e4616af60d6a5ee744b45c377ceac112b52ba7993aa69 3044022010d963c66b49989ebd42b214b27606436e90c9afbf407826e90f273ce7266b14022066f7670b6a0fd4ae56150dc59ee8952d4b3821b2da68f0cc6065b470f3ffd4ae 3046022100eb9373c2b67f3e1ea46fef2b13660777e6208962726b3a9fe94b030787a7f2e3022100a0904175bf8de75340d9c468f57ebd07f6f93f08816e5b40630b7f05d3d785e4 304502200955b84ed0e162aadd027b6437a2f0f4156a72ce4dd515db70cdb43d3b9dcfa2022100aabbcddd84bda7beb1a2380541eac751ca0b3544f2d1fd20c39fcf79e0e74868 304402205945cab3548151afa4d8103cf564dd347e400d6a3c17de4926a8de0554004f9c02207e6b8c8c0578011608d383a7ef03c6c98f3468dc1efcaa95085a0a3d15089340 30440220086badfc4b74c365be636f5ae9e427d3dbd7f877290389af40b90fd20a16cde902207dde6bff8297b1bb2e8f27944e817fafe86da22d39c10f81b4f3283a44ef24b5 3045022100bc8db017978892c0e3c5e8a77d060054b6c7ce908a2aa44e78d642c28209c2f10220647a075c66942a6392e1e1e5450294ae3f0270193023bba33755ab12adbd4387 304402202e84b4c190f6f8ec92a0e4ab4ab383816e59e5572158c51c61ab5dbb32651bbd0220380518acbee027bee4864a261e2b7c0fd97fd2277278851c5130dd63cff4d676 3045022100fd256f3baada5fe7a21f3fbbf92c87225c9463405ac55f5b3898f1c8a4a79af702204338a7b8af12e9599f6b3f89f9f08e842e3ef49f199b5d62237e623f935a63b1 304402206d5d7d9d9e87ab9db52a18ff1e8da8a13c7a28def249e293f8e6174f141f69a50220056ec20a777f5d56eecdc88dd1387994084d6a822c873f249c1f11be4f722e0e 304502200de3b8586949251bc4524eec365d2e9c03372c8a8307960b91df5f3fe40015cf022100ad32728ae55b91445a22268c656be55017d622a0fb992078bace2307471d8ec7 3044022048b1cee73f04a076ef743dec51a542069b49224453f7b1f3864f2287491154dc022068bdb9ed7cab2ee220ae29db1b8672364573cbd63de30aecc4a4f59ac9c37989 304402201ce974d0db3a65e7853f87af2bb935cb1178da6c7c2be21d3555dc95401db4a5022022c0cecdd9bcf29ad44d3dd2c7bbb262d6c2cac91459b88598177051e000c272 304502203390a9500acfb20a1f358743e7c07ecab531b859254cb85a74cca96d1a24c8a2022100a36bed444e6d4282fb3b967fea1652eb72406a796295c56e4b1a0797f87fbeb6 3046022100d102389ba2b4bbad816ec59cd1c3542749720aa3300f6f1d043da30fc88faa45022100a15a021f7076bad0284d9e0adfb044d484aba91fa11ac90e87363cf152652f02 3044022014e632e7ceb2ed6033cc08cb2860416d0c84b509071302c0260bf4b1e07ea9c20220207dd2fbb82ae2c105b187a9ad2dd0df052935a07fb3c1eee6dac2aa03af5f58 304402203e8c375c55985fba2b769a53336297bf434eb8f97aad1f952739a964e9de1c7e02201402a652dd8cac27cd207ab0254540425d628331466ba21f3c071da27c5940df 30450221008c2550a01095d637cc8ef30b0f0edc46df7773a6f5ae74865eaf23ec4f337970022016e6377db9a441c3a8c691abe9d106a3dc4a9a639373caab4c3390ef855b08f4 30440220592430ebb54526be2d9cb9293149758272a7883910738fe39bfd6f5b38e914ca0220777080e2adc9261a87bfb23f6ca660d03f6b3fea1d3a096d47ff678a36c2769a 3045022027decb750d48bcb1e6a543eebf1adaba70c8cba1d90a293fe8090b322079f485022100e36869e068b4242d899e07ca699d3f7ddb276508ec5662134cae4f1e5f1ab28b 3046022100c524ef4a0be796f0b26a30eea889e27fea98ec9016465b80014a799a262b1808022100c968dbaef9b1316b73968f0a8d76ef64d7d14d4451a9ef4a90f139663b7221ca 304502204cf7726601d17507f0851ec8d20b35af158c127972b49cc2872ee4e8e433b146022100cd4c69809f49b50a1fc16a1b8b7623a9ab580d45a85e8d5b270e6461e7b97576 30440221009c8db883248fb7d32ecae74b6c9016bef707529bc5ba4583412de8ac25f30a44021f32b1c1df64ea387e43938730956a6386b9e087601db4734abe8ac695fb19ef 304502202a7fa156d12671d77d4496d7dce3539437b7aaf02650f3145bb3cce5ae5139c6022100e3fd16f28b478db3ff700201e80e99eea07d1dc7b095a430cdaabff81daf63c9 304402202cdfece9da66c90548dfcebbd3df8c1005d065c844cd4641e615c000f57ade4802201221341f86af6e2dbc42924a76eee50e1bcc2ec0c65f448b4f8099cdad5c0f5e 30450220188f1594581c7a0a65c8f501c9087fe116bf01117a9ef22eea9a2a7372966b18022100bcefd70f99622cea176ddf673345809880b89db30a7330577688d74ae17ba13e 304402203d9108b33c805611f51d824ef3ae51be0d46c36428c612b49941b9dc5590f486022038a7e8d95eda70249a47ba3ab95090c9552c2a77a74eac665d30a00074d7a4e1 30440220492a85741bff77277374c9e7fda585d47536a61ca1e01184394dbe06119935d302207523876bfcb60576d63a7d6a440ff41466fd3907a62d53f27ec125988c0197a5 3046022100d224be0ddb3c6ddf4be67a732e5646c897f0796f6cae0f21a331f438d4b6ffc4022100b8699562dbf381db07cb360e6818f5ae32adc479ebcb054d7758b35541fda75f 3046022100b487de83cc99d34d1c3a04d9573ab628f8900b75ac4762098e9afa8f43e18490022100c96e0cd21c481fb14a8b729ab681c67917598f76388972bf2ae757174eb5b5c3 304402204126fc82e445717709238938703f10a811495d4f8e4708b1711f75934d8f4dc4022012454ccd084c47e90154153d788288e263cfb6406dfa53d1339869d3ac47747f 304402204a890f0536e893e460731eb81cedafa501dbba377b2ad2e3fc42c162b54dbe9602203b93d0404139ca93aecc00422a8dc9445ca2cfcd32ccd164fd377d7cdea242d2 3045022100b4a655af8168ca72e1be79098101d1b915b28eee0bc85e339389c614317f0cdc02206b4df35b4913ed4e95e3ddab08aae2b70eb7ad2bcd875ffd727df6a839249430 304402206b67ac37fc0011c2c85bf2cd99ca0b44a2a3e1004b0f2a874418f929af1b160302202cc0bddc4bacd41da63c628a6575ca489c342ef7d4785f086e553602639fa128 304602210094c6d76055972c711c8d638178fa810a4979832d82442c9c64a07f5f3bc28080022100b31699bd7e1b69a79889f11e9645c607f93ed69d6ba0b9edb5e86559f086a066 304502207bf90ba9ae261ed5b3e1b896b8bbb1520298efadae101fe88e7677401ca1c11b022100a0e90384e12c1c163b06b3015ff0677ffe1b2bcf2f5365174fb8d44de316cfee 3046022100e0f7f78a6a2ec593c8c8d2c29a83eb2d21b3c14d1d399aac897f8d0ba145e602022100b5b5fcef4617a70f03dab24850fde0ccc6ecc6aa87e9854f3b7cbb9fd811a977 304402204924c2cbfa8d5bf4e25c4567b92a09b99bb05acad0fdba6b1d65a6dd5735a1db0220505876e225c04ec9d59f07a0237aeec2654dcbb612d5e9904b7e411d679cd83f 304402207f034425ad56c7e1a37f39e114d05dcbdb960e892953d2624ef747b11a14c2f5022001bc5b75367cb5845edb7d59b9d4f63a3e492b19c2ad499f40c3e43feb4d706f 3045022048ee237a13d0602d3e48a6410e524f14bcc0109812f6093b44417de633fb10df022100e8b1b50e741fb4db41ff83529783c8315e6ae7fde1a0c6c167f685912872b7be 3046022100e9f4fd573c2126da7aeb7b61e5d2cec326f67b28b95ce0c67c774677b507eed9022100e38964ede3945eeedbcdb3987c29a457a4a09bedf253422f6dd9856bb6be4a62 3045022007cf6dd6f4c8a4ca606b872d186f413ebd183c53d6d8fa40d344a74c52a0f1c5022100bfcd210bf96b6e1fbdb1265eee36c8ad925dd34de696aeaed0d0a411d66a9819 30440220178fc265c010da10fb48bbe2e7739c8ac5ff504493b07b61cc73a64f70bb427b022078ebb5fba5cfb8e401f853d26658f688751f1534db9ded0e2dd63311485b402b 304402207bfd9896776db28e3c4e678daea0dda3c4b4a7258381e55c26ef1239912b5b6a022055428c67e0762942c0fb788a3c1c6395c2ceaa222f278c9c29bcc0a9b5718224 3046022100adfe90d3141eb0e5219bac809f93924bd2a86a1988113a79252841a1557f6bc2022100cca73e29c108403c17eeb790ece89f37fed35a33e8513599e08fa8ff2eb3f99d 304402204a46fc9ca0732fae7182d905a298d4ac8cbbb0db9479d27c216f459dcaba13a00220677087c52a32953c3989d5a0ac3c79375c5e41f9427d1fde55084b1204b7e965 3045022100cdd573a0b5f3e15af5d1c1dcd47c37011834494d682fa99598940d7a431cc083022008fe22ed45133f86f08db1bce67c0efe8bea5cf55cf4c9caafdb5f92df6e7efb 3046022100dbf38f15ff37171d704c68e0bfbadf1c51ec344cf4f02da9b3a03b74832b9a67022100a1f5979c2a20214e1a16ca16f48f69c3691883c149fd24e329e2eafcf2751ee2 3045022100b1862060abf43646d56c97f85f1f61680ef4af8c8c0b3d7ec6134df0b63480ff02206d02e37ab3865228e098abbd3ec2267dc17ad9187efe2fa32e51e81f93903a59 3045022069a49193c2255fed4ef7e3c6672102754f761fa457dc8759cae927ad05bedadc022100f3773e0ad73bb7f29b3fd7267097ae62ba3eaec4f4ad2cb4b40a97345ff510d7 3044022076e4cf7c1312d11e64a0f4b3daf824a45d065a5682081a6a0f69ee05858765f0022032d0a999286fa8dec0f1976351a15af082779f2607550f6ac2654fcf1a01f026 3045022100e8b0fdb50ad1891cc2572307d622b52a912c828ae020525fde752a1511230d9d0220600733e8f2ba8f9e328331b62ac98e73a1e73716f207c449bc9ddc46b83dc326 3045022100ba8b39fdb4b15c93e0d45cbf17104b46b8304b97a83f0c18fe9a1e844f1d709f022034cbcdff35ec7be29beabba1aa772298357ede6fd34f0cd38d950f0beacedccd 30450220486c7233c60fb70ad08a11b0e2c80f6be05f693fe46b570f8b6b914027034e4b022100a78fad21df827b3bfc645f4122df5a6278aa91310895ab0b1ae4544528c94fe0 3046022100b76e22f849668c3cc024cc78f5b6bf9f3ad0938de5db1d8c699939c186a28b85022100f399e518920a71a97bbc05fb101b99f1d7dd0c2138a430b888c4871355b5f67d 3045022100adcd3e876894c242dea16766c4175585c55c49ce05db31043ffcbd902083f5a1022045682f0b37ec5697caf188576ddbfaf4c41a0a4fa40da26e05d9e7e345735e05 30450221008aa50ccefbcbd27f4eec7ee753daa5b8d29707f4e9155775f1a10dcfb59db9f402200a286d4f23793eba32a88f90b480a1bcbb3f6aeeda04dccbed744e1fb403318e 3045022100f0d9216f0e75725d7e574827a4d3eefdb7d6748f7ec406d6e78197e7c9a36d4c02202a1e754fe0699cd0be4b4392541ce8d8990de197801be20b2baa6e40ce2b301b 304402200e49ce1442d6f86d3e96d841ea8323be3c76bb28b76ae376b36c92dae98131d50220232f1066d8dc3f7085b6a1850a009d6a1775292a28456e763ca9ae612c98dc2e 30450220688b3641d71124b198265cdfe97094b35351f0ede38b892ee6ccc97b84fc7152022100be5c8441dc99eacbdc22710765d2424a79e1a749a71a05f6fac842fb224f614b 304402205e04dccdb456c260a81ecb43ca5a68bee0347ecf554897eac20e2fcf376e06d302203123bd21ddfd8212f54e88900ba1654deb6f015ff4eda8a378e46115c8f7b25d 30450221008fef284f54bf086002e21c57475e86fbcfa55aeab4f3c79c6d0057fb1687a7180220011f67b6d9c42e55dbda6aaf923da05063477d89a7b0a502a2af50c6e2daefd4 3046022100809c322a0c93775d8e984e8abf697b468ebc9ce9504c69c3cf325c2c2fd1a926022100b23596a4cd609758eaf56e63874f2ccd9fc596de50fb7cb38705192873ac3201 3045022100eb713f0ca94af49965a0a1190f33a36e2cb6d8e9cce5800fca29facdbe73ab8d02201287b279cb7115be9b0f8f92029097a7b87316c62d9394463a0fd6be3b083f74 3046022100bbc2cbb99dada405c355db7e0099810cde343c615d486a44fd1470098ec3a75c022100e2a9df65decba48700a44008df7ad82f66c548113431de056cbe02e4540c54c0 3044022071e0bc06696fcdb4ae23e4e245fc60b34b344f520d051a98fd823374ca774c5b02204a148ea03c0fb478730bb4956b8c6d72971e97417c7cdc57416bf5b7b1f3da8b 3044022067eb664e280e81b23b316ab3f1cfc82a918a5d5a6c9f679d7344441170bdbe8c022079bffe6df855dc6a71d92c1216bc98e2a8ed62869f984d69967a1ec8d0493bcc 304502201a98bc5cdd94a3b30516224d4666213a5e8f55cc0f40e56e363aa96b2d39da34022100c37f5848295b226633d192f0c12d2dc90fec848ecc590f1531d64a92ef833131 3045022100e46d62b002a9291c90026b1a931938ff7d3d785e2321a536eb1830f63bd11a4202206d0a0a9d3a2fb1aed08656c8ee322464685a8624c2079c6979b95eacea9c8049 3046022100d0cd94ac993faf7207f6150c1854b078094014e7f499b506a84e6009b5a784aa022100c3259aab5f84409f2f341a9099d1493cdd2675a72133cec45d6b76f90cac445b 3045022100ef251f4ec5c7475881cceaac944a6af3a5292b0afd3ad1d1f6cc1011a8546999022005c535642db598dc2b326fead1c9b283842036c05bea37daad4a37148719f507 3046022100c5212517fc121dfcae6f502c1003e9afa8b3075b4a9494ebf7b4a53258d61680022100ac5819f01e6d818bdd5c249165b6feb2d9a9c75cbfa1373ca68f4ccb6f87dc42 3046022100b39f6198706958d9e493f1d468d1d698a0a03f9115a73909b5b6df4719e4df59022100ef5db74b17c61353f697a27c4ce1e9bc60711faf1e69629a9d2099b8614163f9 3045022012a957503f5db687900c9f3f7a8b3ba0f6d96db03f8c6694fed5f9d6143e22a60221008663487bafafb5c7736a76022e4be98a5de3d1e109f4c9c3ef39b97b5b2032b7 304502200304c7a67396434559d8890badea35def4c85c4bf5248868c03840ef5a46f77b0221008a7a97213252f67ba8830af3639f1b2f433b0bf88f480b995be4de3c65dc08ac 3045022100941ea848edeee1c83bf812ac7e55363d9e8738b0e71538ba7b0336db45a2f8e302207ee5851ae4faf883538c57b5abffbde8ebafda1f31612388368c8a692d05410a 304402206172cf6862582ca291052c6a00d8da3d705eb4cbdfcbdb3fc1b1c17ebdbfc62602207860cef257f4738161cc9a9c74d73c57f3494d3c80283f7c39684678a43317a0 3045022100db9e58d387901bfcd426a2194c6b7f12ae0866e088a056c87f3f9e9d6dfdbe2802200dcda4562469d99be8255928f11898c222ae111a575f80ed146addd8945a2570 3045022100dd8355c064c758548da997f4a8710623a699a967fb794a4413531638417a21ca0220562c9f493159d00ffa51ff3c5a241176c44e063899a7329bde757defd3dd1736 3045022100e431b531918ed5c0951de64b0fb90fec255487663e57989add70f21deb906eb3022033c47651b42c890e5c5fe2e4591736789c1a7fcf6ba5ca50db5d4d8431311892 3045022100836291a0a88ef921dd1b76a33ec9f458b72580d58759e7c31860ca33da082b5b02202f6bb2156ba46334cf1dc6f02e8c065cf0e6d27148e791e4a14351fda5a65771 3045022037f2786c4c06f50302d6205bf3e6583063566969154db9b468477f413ee568e8022100f6a492ec14d9cb40e22fd4004a96fb9b14f93982f153b40bf828cd22eb98d467 3045022100e7ca72b5c64d785e8b6a1e8e5a0cdb896aa568cca564a91a413bdaed5d1b3a9202203578cf76920af2d065ef791b821d4922a2c4a079411b2f99d0ad680a1a58524e 3045022100b2914ea2039d6367ee765ba5fad4e6ed1a77f03450da3f16bbc5733a61b035e802207d667519c9b5e4997e00040a604cc33b04bc4246a965b1167a962a69362892a6 30450221008c9fefbf60977f699345736f3d4fb6ba57ac4c11c79dfee570824687ec80c654022025a93361f5b9b590ff391fec108df501ff19a2f84a9f9c8a7780949db4baa24c 3045022100c86edfb0fba2d1814cbdeedf45b1a76a82bbd549e600c88740ef5a0b6c1e82af0220410a5ab77619e02603013e29aa2ad5ff35fade5e59d23acab42665604cf6b93b 3045022100b8c03cabbbd9ccbcf4f3aaa2bfc4475e42e9a1155d897e90a5d5a02425e39885022053b4946f92d0f94c48057a2dd27d33e6736bcc7c1b6e5528981f579b98b7c9bf 30450221009ea1ce919f7b7ae2b5817ff4c68efdf51fbf85b23aa6654b1c2ea5a1fe36104302206ca04c6d64fcb576e2913121b1b4d3eb2c5b9eb8fe01a1a7ef06e6409b3f56ab 3044022069588e491c5425a6b7921500e5804c080806d9639c609e1a330a9fc864a9294402202d05ff5ad0089df465fffe5be547f8018f0b4f027790110d2555bf66f4c65f8f 3045022051e346a429712d98717df900d1b3067c6e82aea38f75959580426f9d1b1161770221009f96ea43139da2f39e3daecea5c37b463b4cceb85d8159d6e694e6d7f7556009 30450221009c9d885251de0c420b7ffdcfadbdf74ff20936def11272cd490bd90610b263d8022052a53f5cf1f9057a5f2bf4a9bec90a77de0680ae4dc5fc774f78957102392872 3046022100b4c1550caecdcfb2cca9be3c88a1c2e01c2ab9099f88038cbd54a54f1d910714022100b5f487ef9daa0b830ded0c638baecb89abd7ba149630ce075f86e24c64246d37 304402201834ef84abbc65a93360cc50eb444c48051e70021eb44f8304ee0464af367f3b02200ce0365975c116f39770c40481166df094dc419237c4aaafb4f36cf1685b80ea 30460221009f5c2f30bd4414698244deb01da4dd1227349f99a626094b977b8f0e413e6d61022100b0d4e5330cbc56a3aeba0da78b2556dedeb13d8d8b8455c4f353e775a17db68a 304502206031ceebfe4c3a36e88693135f5b4e2a9a1b9b9eb787148c8afaf6847e55d820022100b441e2cce6752d1c58bcc906c01fb2ebd8f9527084977b981cfe1ddbc6710e9b 304402202d90af7df41e2a6172858124e96eb13657371750e973a7f864e10c3ebf195cbc02206295eb99714b908150e18f17c0b84e10103e946a38292ad79b89697245af69bf 3045022059f1e0ff39df09a4bfc9cce8ad9aabd7348cd769f601b496569083c527d4607f022100febc2ef2dd247bc79d6dab4c1b9decd15b81a25aef40612848be9337922a3308 3045022100e6b978ae12d1df2e3cca13e7b8c5a69067a1bb0193c10be1b63569c37ed8700c02207c12c848f996fa8223c726c885dbf525663b9f90bb5e65fd03425005db0ab3bb 3044022068ba3b6d579649c4069cd78f9d7509e345a630f51e4148fa712a5411ad47247e022012e43b453e9f0249903d863949f04dc76d5748b822c7e9fa500483f97c842d79 3044022079dae97efd5f9f45f5092d7433440d52044c2987c149029e253f270d29b90f5002203ddcdae78a0d87e132af8db60c6b97111249fdec195e1056049e7407be18a22c 3046022100acad77a61491ec30b3bd8aa5b7e176d4d4e5311b6bbab8725098f1cb5d783c65022100c2b68ade1f71cd1dcdf7df3e24fd27a0730ab2d9d38b9445f3c92da88c47dce1 3046022100d8adba892704817ad702b5a92eff10b69c8694f049183f2db463e17a548e0ffa022100891ad14e8af2a3755a4ebdf637205220ec5a200d6a45ab1c4487a0f2346daed7 304502203f979c5ec3c823d3f240153f24085a0cafebb046ac9c2c9d75bbfc322fee7759022100ec12908c5c95ca8a9b83b341e74dc8382b4dee0b279bafda96a34512e8275531 304602210096d646bf0eb81848894ad68cab44f6ce00293523c910b1739eaa7469f9c46fe1022100c5708eb7dab88191dfbeee5754042cb9d73b19b45c00dfe45cc950af2cdb858d 304502202472a6244d09f2945ceca1b5c564190f05ae48d978a6c90ddbe80b95e7222689022100da7bbcc194987555c0733678ef40395fb388decee1288f1cdff4af4a14b2768b 3046022100b014ce296ee3e7c7c9940bb469eaafd27ed367f5825c2e741b7c0576d32ee83302210080531eea0519a814ea46627c4213b1d0735e8ad56e2a47849310abb89782352c 304502206cdd4c419d29bb85839d245b667b62a08c8635c5041f762e9629d3dafaa7790f022100cb16b0c0cd895111e8cb2be87663a804c43247f2d7e1693c53c7d12b073045df 3046022100ca1edf55cd8bd2c6dca485f8f04f8fcaceaf2cb288182f19aa492443de3fd9cf022100c94b77b92e9fb421ef64566c460f8451e01c0831162af68b2bd2c4b945f1af52 3044022021ab23c242cbd40f7a6ee6f8d0a131e4be67a1e3f5d240a25d090543d2437a0102200c11b0dd02b9374c754686c0224189fb17ebb70f8244fca0b6aa4d9fe2961184 304502206798e6c7d5ccb557bd97e657ab369de1957f66b29050e2e083c0528495eef709022100ff58013cec4566e6626c4032d5c01c3d675be41fa602cb661ebc60b57d591c07 30460221009be6942b920a8bf91293e98a35275c1691fd733905850beb23e2f2adecd4a58902210084afcffcf3176b625b5a663f21cebd69a7b915e75d94bcb35dd6a1b114923aa3 304402204c761d9e381f698404f4090edf62398588800d1b19a634c64a9a5ec017124c1f02202f4c21b0b0f351e7e9243a0a197701f6ad1c153ba461985c2aef47c87f7367fc 30460221009611552804f27176af2241f47e720ed3e3d91399c95d1671d074b08b1ac34830022100e15418141d5d42c91c579255017d7dd1f5cee626b75469fb95c217d8f2a16edb 30460221009e8ee34b27deae02ce3b359a3a7c6473a56ca92f8e3185e0185b2e7c08139eba022100e1768052b7c4fa5d9fbd3676ee29c0f9b1c1f38e78aab502a3e9fb38209e4ae7 3046022100fb771589fee00b86530552824e28058f0cee42e9b0a58efdf37df3507979e45702210080ae82fe3bc573ccdc1eddbd0058fec34b5e11567af721c82a7a7b8317e0768b 3045022100da2bb761e999884788d7210150926402872a3b3383c168452513a5b4dae281170220157c77e16a10d67133a4f7c40735b0f5734b14a3254f999e9980395c6ce2975c 3045022100e3239900877bfd9bc7ff45437056d59efd8858c9212e04714ceadc16b4f659a602203b78cbc944a8317b8bd1ddb584557eac2f51357ab33e24ab40587fb438749d7d 3045022100ad67fe0fe30dd277da25786019f20244c8e4019fa41589509332bd4281d6a16a02205635c3e66b37e3266b38d0d5df2c232429f8a2ee94b6264a8904ef930832ba00 30450221009a145cf520612f3295741cbca6e6ffb637993f14d8856b6f9a3dc4dc4a6d357b02202e1fd1798d390721584aaacd4abfe81ad14594811b5db3cbd2766a350a81be46 3046022100b10a5ba21de18c5041b6f7ecd828013155937abb6d0f3aa9af09dcb51e1b757f022100cf24b8ead122cf1311dc35355f8e6df334f818f52de96045a957388f0738f84d 3046022100e97c0a014499afd2b71af94b8abc8a8aa1f081f23a075a01c2e0aa798570de44022100948904e927b99ffcc1a2afd4df831915c224a6c1b5ee89eecf9b1d4ba2ea1b4b 304502203820c963843ff9268868e5be7c506dbf47fdd7a5045d684f9d22b2151d815fde022100ef93b18812e3c37a2edb313b6435ae7f821cbc7d66fe00c2726de39a7ced8555 3046022100af26ca0e7d3b7f9e579cbf6fd937c580b48dd6a2dea7cdca054440b2634d7994022100eab510887049d5bb5b4770e0cb3e1331b462781926dbbd074c8fa18bab4fabf6 304402206dac8f26c59b0b8724e8145e21156e729c054a3af0151c213a70e29e0a37a6c902202de12ad245af0ed3127358b631aa345b3e83a65504b0c29f943e51fce2d47185 3045022100d5979b64e052f12e9f5ee9643af263449eaedda9b2b813b3e6c6a2a8121d91cb022008e8c0437cdb0040b9ab7ae199c91b68bd9f475b05ded333a28be2436941587d 3046022100baba012e3b995a5d887f4854c1abc984a31a4579c8ef1c44d0c34b486023588c022100f8f57f9c5712770d58ae9ab0da4eb92c578a7b8ba98e84a656c22b60f5597cc6 304402201dbc099a384e6f28dfbc0dafc64e9067ae77da9f62226ffd2c90dcfa3e82349002207babe6ca9aa0a5ae09dff5f339aa7402dd82214358620b146c9e2afe23d74476 3045022100f9b24d0991dff1db3b3adcba8d8227a50201cb5cd18ad8e3cb03856c6a1b908602207c8853ea6b191b3dc692787048dfe72916114a1ea2038d3caf5c9c954ce00fe1 304502206b7323912f1c55fa2e1d93a8afc7de0b6bd88f8531fff9f6eb6b6035a88a99ef022100d7f3e3f1461035e3adbe97f864f74d5803b230644438c67a1e2adc8c9c959159 3046022100e7bab1d446f46989c809f9639400dd1d2c4a4c1636d668f3cb9255baf5970176022100fdf965c318551f2f93b0bb614610fafc38105c4cac620945552bdb2f38558feb 304502202e114af8184d88109ff90c35310f81edfde1c8a9ae81bb89ed933cc6760bb1a40221009053781181befa55fbc79441a3c7de7555491816e6e67099456d9ff81777e01b 3044022037ac22e92d27d9eae610e23ca85f1898c4cb42f03377a5a860e6bf8b9b1c63f202200583ec928e563d2e002da20254624354285a34805f38e088797a0bea63fb44ca 30450220434fcce9cec20c61cedffeca0cd0cc19351da9c14f082ed6ed081bd228c1f252022100c343bff1b5cee1a205999087bdace01895947c704da416fbf69d0dc3fb5aa96f 3045022100ddbecee5e12da216af4c9a48e9eb23332c5e43c5737eb666262174420f77ad4f02200732f9930d086201e37495ee634eda0b66ab56db9734f336374ad479d0f13058 304502203ba9c5dd05901996fa7c4a820c22ccc4fcc69628a326505052fe6914968e23f7022100d492ad895143224a873b2161b67b2c651c62008fad6a28ba0e5d8fc64d91de3a 304502207712f3b46576e5689c3fd7bc4841b04e8f4b44d1b20a83b36748b13516e42e8f022100d73ccab847bd32a894881b2a62c0970607350f4329581a23f745c64ea6a14527 3046022100d908a73a0a18770086db041601eaca30bc236b86b35de5ddfed1321463389ddc022100c632a1be9fd71452038c39bded3f33dea4cf1772174e688106aa00896b5c8950 3046022100b3d20a4a6bc1ea82c10a7f3d1512127098d17f3e71272f904c651107caaf6dd9022100b065bc72387c7ae6856d86026abef8eba11689ceb43fefb5db8ec0101dad9e43 30450220773e058f0c1d8021decf10a56eaa7772c9e514d84e5ce59fb8c6c957feaa8618022100a34f45be8680448d1b394c43e169243ea41b7f7dae7c2fa33cf6caf818012e16 304402200779bfe73b53174f1c0ebf7506f9cd318171696ffe3e5fe2e9e0c148077ae6a2022066af20551f70a14c57ed350f516637204f224addc7d104e2b007a3cf7f127767 3046022100ff34fa1ed621baf5f7dd220d8b65bf14e2088aad312b05913b3999372ec17447022100da8cdc730bfe2943d257d2b5114e2c5539345a4a6cea5e2e42ccbc8ce6d1d7e6 3046022100ea182d7c225d69293718f4751de211f11cb596e9b4aff075ddf37bb641a8a144022100e481065c17e4dcab9dbf127d2cfd75149b51656b05f29d551c6691c162c36e83 3045022100a6c5cf78d5b52572a992e88f68217cf2de723bfd6796a00c181017f9819a29290220182218d6697f28de004e6c83d2373e0b2db709e96158bbe55dd6997df048cb01 304402203094dffec5e41cb2dffae258e5873d9002cd15952f0b51e7c8539c993c6385bd02201035620cf3f16778016d298f6d1af9a8e3817e3d3f85250cfee07e7fa4e73d5e 304402203d1e3e312e851974bcc8e782910ebcdcbffaa4f6ac38814e4bda1e566a19d09402203bc36b8f03a8952f6a012cc9f6bdb089759b404b62c884c3625a3ad6ae358cd6 304602210094b152f1c02fde35c251aeba3c4ab5a8e212229bfeb0eb26e0176572dc2e293502210080ce30e577e6b026ba3577f9d26b57b7bd83a8342fdcbc0faaa54564301d20a2 3045022100c1184d9dd9723672e256ab6105158c64735a9120f6ef965de861ff5a6cec189c0220573850a87c614dc33d2313a05a709fc94d76f9aadfa9441d1c48d5546e287e17 3046022100c9f1f4368ef65d0da92bf5f925b4f86b2d8f8249a298be664a19baa04eef90ef022100b70d35788f9e16e2cc3f1d95eb0f941ee790836151b45e2724e3c8385dcef419 304502206d3bf9d82c7c853def721265c556170c826c644b1b06a467268c1b7a643030f90221009baaac3c8ac7832673bbd39253942912b7ab5199473931a1d6c9514da80e0040 30450220609a59f2c87158767f316a462bc46dd8d8b02a43ce6706e3f558ffabbb9db0a3022100fd50aa95348145d90eb90bc8937e9f7a2b2497af3dc1e7fcd508035b4b771318 3046022100ee9e3582687ac57b91c675346dc098e14e7df54d43d34ba7ab2e188301183b60022100e4858548acb6d7a27dd3323c9930dbb7ef8b9683ea6762f0ecdc8b241837265e 3046022100ba81ecf6caafe4fc3ce5d2791e49671cfabfa142ba2bdfde43724321ee0355dd022100c0a2af152fc9d79f9bb0ec049924c1af2f734733fc553c41fd1a9db688f84198 30450220216761e9e3129111f0a8956c29dbe511cbbc4bee90af6fd4039ebe683f266ca8022100926b168ac72c0b4937bb4ea6fba43bea963f178fac477cf87bd0eb72d0f8506d 304502200340d24724c77c54a2d8339f79230e7617c727399739a9561e2b0ba7946c705e022100a7bd120278f3baa0385ad9d6f33ea0908fddb9c2f72bc22774c8dfecb1fc55aa 30440220529e3faddaafd2e661245223f815220cce5a17c63671fe31c12b3fa7b0154162022044ecb1f7c0fd8e0834883144abac0fa2d9ea6e1e71058d50a9c07d49b82e6c3b 304502205f97047c6d162a44d17d2f30c503573e1032688f18809c7e791691bf9596563d022100f4199d65baa0de240b20727df18aaaa7c137502d09968cccc478572f5af4728c 3045022100fc0723861077ed6eaf4e1ffbb14ed59001b670a5fa4a990ba739eae2a02227ac02200d091d32d715e01e1dfcbca5fb56647f1e8252c61e24c072147b1998b903f9cc 3045022100af6862773623363dc761c09b13a594ea08e7492b73078a10953d95b46d8f341b022031acd74b354c555afbb07ec19f18cbb29de72c5c2ebb94ebeb66c82a9a1f6802 304502202fabd7e9b9c1277a0ce0e9b74760e233a9680c000a490f9cc87b4149c6e65cd0022100abbd27cf9188d281c357f51c9a43935c085960f97dc4f76f1dd8ce7cf81225cf 30460221008763df8a0766bb323dad65618ee58a3a935f91fb347e1558022dbd5278ed015c022100d72d8549f992eb76a8b68178a3ab6a8ff80420af7371a8ebabe99baf20db57b2 304402206ae80405e898b17561c8abe77987fc3780d546d8e1ea6728dcc4ab0f91b70a0b02204d66da29881ca816be4bbd609dc9fb7ec8c88066c65083c5ae5b6c325562cc58 3044022027babc18b0c40e85e171bd2b46ebff9fdd284136ba913198055bcb91ce95047402202a14af912d0c0453e53b25ff6aa7bae5e6f9c90decf7f813322a09ee3aebf9e0 3045022051e9427003216b7ca68b9e767e11b629823868813ae5aadd3fed97437274261e022100fdd9418c3ba087e50cac83c97e5e85a1f498fb6db11b40464141271af3dc18f3 304402206e2eefb3fa352a9f6cb291c80e060798e339b9b0a4ecbfde0c1a2318ecc858bf02203733069390a086b48ffc31ef2dfb372d176ebdaebb731aa53dc257b8d94b51a0 3046022100d0aaf1ab2ea2e5b3412b3b1ef4a3b29f8ced7e3659e8b3c230840543f8bfa6b4022100bb317897a4daacc4648ff1d038a4a302534b30244629ac9d7b6b8d30d35591e1 3044022021ef2f978e4f7cee90ebd54a19c316008a6db5dc289163d5670450b581bbf64c02205454a89e4729f772b5eacf55c3502d88f78c1db6977678d07ef615d163be6647 3046022100f2b5440d1a09af6b51071d0b2955e4c789f89d9e400aa4fa343930dce2d1d64f022100926a0524ab7310465ce1cd6cbcad236acde7d3a52e806be9d93df48c0473218e 30460221008ff1b831f141b676f540e9250907ddb83b6bea9753619495afd6e5cd1e131b5e022100edb7c3db86f189eba023b5cbd245b2016b2673ec1800cb694c0ca59153d86dcd 30460221008c3f2728093e782595bc273780a14a98604aedca6cab93a540a350a951760777022100cd0092b7dd7bf82d66ca610533aa37b3c73bff6a814ac04ee64625ec2598286c 3045022000d81469ce6b501a9773e615e9d2928df6ef06ee73a36546e5ff277c1d54a309022100bef1e7373a8607d6007775723fb85c5f5cc2ecf1894f34b423a8a725ce15d505 3045022100d3e15424b065f3d64a899b31539a98d24ea1f682a4eccd518b1571384bed9775022015b3f668bdde7bb7918b43291ef5182c18c10cf1555fcd97f5c1a5dd8b14f7ef 30450220491132efb4184d3f80d17aaf71b4c878abdee729316643929ae521981496a0e6022100b477c5b60b997ab26bf4f587b2091e3188034874834067fc3c3b137c09752ad6 304402201b772c1c827f7c08b30ad2bba1d98cf17dc0f967a920d42d925e99c943bf2ef702202287ec3020f403ef1975a1e1de59220437f96854f013de312f91865ef11139b6 304502200e1f3eae7611d5c2b396c5781ea2ae8804d36e66a6ab4a6e7fc90e8ad41c29a90221009360297647ddbca90a7df6e48bcf00c768e943c17385bcb8b27a69667d923f04 3045022100d59253381afea7dd82b3df351cb7533e81cd44063a6de5637f207558b4b47c1502201b6d3999921101787c6992aaa9c61ff407e6f5c3a6c9b4aec6494f02ef943028 304502202d6ea0da96cef67fe037e642e9e9c354ee22dadacc22e63a120af6c49e3cd056022100c0d4819a60c2a0a1314b008554754f4b8e17093970e170b424c450b0a44aaf41 3044022003e4f470c7a256d14ee16b970efc608a743c6e5f053674b7c9221d6f0d0e947002202a3282536109bfb402e34dd6810d542ddcf2d6008d513a9115f2160487994caf 30450220473e68ffd71e65155088b5c813c397a5db4e4e60eaec2cba842aff0e32a60808022100eb71d30ad4e4541a59911900d8c29fe541941061e60e0fd7f81ef4e511848ef7 304502204c2cda6ba8d1205f3db094932fdf0a5e6e1e0cebc6e4a475d49e928d3e381ebb022100af4c5284404d8ba860bc2c9bbde1150fa646c8f2158eaf24af4db1605a8a1589 3045022100c118583d8689e224ba18ab1a2ce460fc46b7bda4a38b429c3f48c8d473845eb402203ed26126be36515c46c3c46a0f622784ee5f02184d021b8dd595fb0ffefaa96e 304502210091ccdde715d1ea52491edeb3ee9890071ca28e88687c1246e18128de3eefda4c022032e02bfb0fd0cef26d23924e43f9fe139016963db43c2fbe02c3b2f5123db20d 3046022100b265eec8e3f22d96546f930e4eacfe34ade256a9d05c07aaf34aaf498e84435a022100db8ccbd24634921e8fc443d539d896de809b8badcd09b6486de5280fb1d063db 3045022100d7aafdd32e2029225081bb8a1c1341f121b913fea0c7a5e7c43e82ca5e27173a022042aee7332f00e0729360f52491ed7eee9542b5f74e57ec9126722a2ecd1622fb 3046022100a606ed23cdc171468c965a6cec76d3fac8f89e93ed7411eda104dfc8765e2364022100fc81dd868e3f04be45beee3818481c02de0ab23fc447ff419a46aacfdbbd4442 30440220662d1313b650363329a61a50ff6c29f1e0f973357eb9a431396d6adf40c2bae102205185e69701248740e768053ead4fae7a240acc7d5e0abff84a49da5554e29870 304502210089ff413f76e14407e315b40acde618f10df42072d8ad65a2091df970003dbacd022035aa49449e754733dcc17d7490217a71bbe6792289ba21ea5d2de0163d33e930 3046022100e9e3cfc5878b85cf08522f2cd64046723d0f5d770e1b23cc3ac32a144977858c022100e5f7641bd50bc5f4b8db6df628dacefe1ddd494a343e6662a18c37e6504623fe 304402201727075bbcdfd5debcac2be8ea6596c48b26c02bc273087307760be6e3d4157602207fec7dce494cb1ea113e3af73755dfd8e2e75d39cd63e05928b36f54b7fc397c 3044022040373cd2464a62aed8c775c03d8b91c8881a042e8e67916a9deaa3849012bea9022020a2a78a553162990ddd77255b730e986feb3f55dde7c6fcb2224ea143571389 3045022100c0151cfbed338236aca679fd39e5c95f9acf5c9803076de9163a8cddab2dda9602204b6a917f527fccde4e902b6cc4d4ad3dcc0ff056697656887f22e103b8e5c327 3046022100c3069eab324fc2e1e84eac389a052e695d27e88bc692372201309438dc123a48022100bcc2c82d0ed74e42f144fed269860084ef1b5a552a60caafe70b1122404a4e54 304502204e6e584c36d600749ff02893e833c4435ed9d815cc0b6a84248449bd4f5b8c00022100e61f34abdafe326cbba4c6a0daee2029dd69802b693a34cbf92593549f2f57df 304502204202798c3709d25a5666f4f3c71ccc4974e5e0cba2a244c739c7a936a7c79653022100c3fa0d1de952483115a440456ae36ed541d0b356d0b2938b977dcbb126759c11 3046022100e3f5097421c03107040c55d298caaee6851cf0f2445d74641f37b0d2bca979e3022100c31912a5189239e8608ca148cb37525a04b1eb1c93330e90fa2072822527527a 3045022100f78ca182b92a45f5a8fed94356c288cca6f5e85f776928822eecf8c49493b8bb0220049e4b60c00159adaf5b6d888b646fef6b3f03e0275e3502b6b621b750397b8e 3045022100b3e1b7d3220a605418c39448784c7d4886f461abda3804976d400660a54f295f022072a0a9487d9cc3a14ad687594593c670f798f411dff4632f066f4d8822efaa11 30450220437d1bd1d4ed78ae2ec8ec3510ff51b6b0232affeb09ac21a495086a38a89096022100a4ea459187c10746976aba2f1a385c3b7b176db14c9cce8c16803e9cee000e14 304402207fe9fb39770371ff2d507292710e8c2a4e12ac39f34a2751152b2adbfae59588022073b46aa7c25a930a6248b777fd7ad62d65d8a9ed8c196c6974da4048508c57c7 3045022100b799ae0437d6cb39803835326b2196d10685ebdff51b4a4feeafc5f525fcb56a022045735614ba6974e683212206672213114c63bd96bb59c94fd62094386bd13b77 304502202d07e396e8c33aac92c9f16fc1f40610cf185a1c615cb0c65ba6db02da1b65e3022100a9219e236099d571c5ef1501840ece5989cbb7706cf64f7b1d8e9a31c15bfabb 30440220614cd8d8807f8967c072ced46c822fd028d5c496113a64591d505ca14da3ee620220408779f2f7f81ce29f17e5de0479fde193d33eecb015595579bef50feda80a83 30450220791bf1f1e53579aa5dfb0922512a7cd0dd1ccd019785610d7959c2d8b2858066022100ba0c775b2b116c11158b5432168bc24026fc5def82846f86a103b424871d94b4 3045022027bc8cc3f299efe8060b75c8ba0649d240852c9664688718c1b16eebae94617b0221008228d1bac3fd84ef6a8dab5bc4c441e2018e7eb402d20c30555f991c2f921010 3045022100845d9a3d901d4f92b8ec87d50d8ceffe868b9db842c988f2667681a69f6c341f02201cded2d52ffa37129996db2a4ecea0fa894b871aef4557588bf4a3674ecb9ac6 3045022100ae57dc6caffe275f07d41a9c16931be6cc5e3a2adc1ba9d09bb9a860a658338c02205e6f755e69d4fa4570495afef12c13d6241b7323348d300de52c0b74cd94f4d8 304502210081b78764d3e1e30e888496467305259aa28d19bfdc77da0518b5c91dfdc6f9790220416197d769f78a695c2661f5e51ed01bee648e2b7b8e8c2a753f76f4d5e3b0f4 30450221009b40b7cd96e5d5289d978374437a258832eeb04bfbffdd5c43d3f1cdd4cb3caa022008f146160a3b2f6066d246167d4f67a02383ecb9d40fe992ee9f8568cf550c18 3044022061e5c3e0648ac86adc7ea0235bae5e568509283a6de7f2c2d5d8bf968201a55202205d63b3e6f1fb9d045b03fc48dbef1f6dac552a4a7c34429f856e2bcaf370584c 3044022034dfb0ac35d98c38213b35b6bf63efa78d6c4bb01491bd9a75de93444227525302207a15cfd768fe9221138c0138a2928f659594636aa218a26f42e67275c2505377 3045022100f966bf55609a88696b3976a66b90b977adc0603d93bfa8b24fdd514bd3b2c64702203b39a7c63b582b5f1789cb4c9f0d6e013c69d3ea22e28ae758b03070112e1d62 3045022100c4b04e35ab9d962fb61ed99a92afa303af3cf8a2da77bf0a764aaa5eab0481160220134daef272ac997a368143c842e9e0f1da74c986910e650a9bb32c10a9f36708 3045022100aef2febb01802a499280f6468d1cabcad6647126d0dd2c60c1d9c2ac4ce0b34402206f2dff1c7f8d5df4323c9773c8b17996cea13a36ed30319ed909723c9ed09648 30440220198d85b64af4adb501f521ea1bb01b3bd460ccb4c87dd28519319f7209aceace02207246830d654f92f5e1028ae87f631d165e12b22a58535baf67202031c07cd5d4 304402204be21af0c834f7ea3179ce7e16236286957a2ecd5dfa0a09b8dd5ba4bd396a0a0220788366f9e6605281b3a8600785ea3bd4267fe078281f1d93bec293531f7f1752 3045022100c3fbce01f7aad78b4f32133c1686ef93dfb74de6d66da75f1144ef768cd80a8002201410cc923fa9307bed5254b9de04e65c6d89c218664176d3a9c907835ca739d4 3045022100c916b737f325b28124eed708c374dcede6059a28f16854807945b1e1f0cac5f8022050ed085d3c0bfae67e9bf30a285c4b6648f691166a9f44ea84dbf6465066c343 3046022100dfbb0c4f06f66268f7859c4677dfad76e7ec70dcb7b6e6e32ffb138c803ed3cd022100daa300a24ec3a03504cc334a5e62977434dc43082f94f0ea904691dc8bbd69ca 304502200c51ace10550c1306d2a236bf9d7938c25508d7bb47f750c71a2172c2094bbd0022100a856a6f6099707d7a865644d95b854f17b3ca96d5f3254206b5dc1f89980e889 304402205902a200f6844caff6757b8a75cce4039d4c4c0a1c84173ea163f6d35f38cd4602200c4fd706fd11688a8c0deb5a78776e5732e39250c2bb629671488eb1a29bc4e8 3045022100b9e0ec4d302d34290a7f4fdd29ae0ff2f047e0a2cddf9e8a32bf9ad0d5d5bce102202ae8e640e04e8c7703ab6699f8c5b257422f0919ec933cfb649daf71525728ea 3045022100f2acf4260f79c6ae0736ff066d6d17f88bb6019efb6117d6d7df5cff0617d545022004c060ba6a477d999a3ed843ba44dd6ee86f3375cce416b7d9c8cb9d8ed94d2c 304402207c8dcc146732751abb3d4fff6c37ba2abe79ef28603dd685c9f557025987758702202587dcc2b62d1288163f9555e0fe46e09f8cb14e8319fbb0f1c72d1080939ce5 3045022100b8543eb94931673d09620337f1406c8e4182b1ebb54403376bb0bdb24e0ebc1702205b8976c569c38021a75e3a79e27cef7951d5051d17e5cf300f8b3b2f8e6f6418 3046022100f5131d90321713815506dad1594be9ba292fc15b299438b6439d0a36b3c8c6d9022100f1a30caa95728b228e1cd4e2e8cd6b111eac45ed72bb238827423eb82db890e7 304502204329e8adcaf8581d8d86b919f0f70acbf28fbcb732dbf1fdb3740adb70d9e66f022100cf4e14daaf3ac7819a5ea69c1f5c239211bcabd8464f1d07f070236273f5b8e6 30440220420fff69b30e3f5d0746aed1e13bb100d48d3b4e793f47b787abb9784f74be2c0220476ce40e7c99bdecf946fb09b92bb6325b28043dfae81328b1acda8c556125e4 304502200a0dc8254569c4ec03a7c57cdd57f18bd1b080067d8834b1d1b732c03cbef3a20221008a46fc9f0c177724e98900582c0e6bebe601295a604a365f050b81ed90215424 304602210091f5f84ff8bdcdb33f1cd3cc1d538d9fcfc8e71474c80d54712816f17bec8f8a022100af69608de7b445b7566ac4d0ba257ad238530f9fc344176ad6519643d6509cab 3045022022841739def8845141ed4d12a2f9f7a49061ca87c4fff1c7cfe78e96c5242c3b0221008e1fd4978b3fd2a5490571ac2eff0295efaa0e6a53edffe766a1fb2151de935f 304402207948ee8cbafdf90353ade73fd0f6f07d671daa472beb7d306810307b9c8084880220170cccc5ce25cba13dd36944c3bc6c0e065a6b14265ba13d0c2a28eba0c3d57d 304402207f747f3efda2c9489c66176e6e05d3907e711f2204511a1c0c0d510264b681620220598d2c07f7b02a37a87966ef5bf4332bcb9819606c076b90259b7ff89b844d40 3045022046e94bdeb9bf4e05213c592ede0b62c302d4370417bcf3f36a933cd86afadf5a022100fa6bae16af880a035ac46177194afc50324c9b10114516ab13242764865f1deb 3045022060cf78b27c101713f2b2211b395cd7d2317d7ea1e6e19a1ffe22488e182eb0b8022100c2af0d0b253eac867685df4a56665be1d22cfe2aa2fb6c896b43fc638136bc69 30450220465cff07e60537ffc42799844375bdb998bff05771e98165b61d9c415dab2bd60221009169b918a938f8051b5275df57aeaccf327d4bf921a53cdb04800842fead4794 3045022032212756aa19f47981faf177ce1a49efc88ff0c10cf1ec234fefd08fe89e8857022100ee97c1dd85881e8a5a4c9b2a7c2075fa424af2972e0143922dc0a5a6da1d5a0f 304402205f6f1b0b1d4da199ae2d06e4bdb6af53fe959b8e89557dea03e485a6b3e7262e02201edf91ddca38eb496683425959343a7cc30a67be8a1e803c71d6dbe0fd9fe936 3046022100d554df2b7ff1a04fe07120f495720456b19d3bacd314d46d15bf1e91a58a2915022100a034c3458ed11e5356fb3a2339acf5af0d5ee4a8167acebee3bfa00b0be9943e 304402201a35928569811b3cd84b1dbcc990ad3c2ce0e63327ff6f70d7784acd66b9a060022059893951ea1903ecd84d66d9f7c49884b5e46ee76ea814d0d944afc27ce9ead1 304402203cfcdd6e5126c09303810d11369398f707fc40bdc51e66275f9c5922c719541202207bb7016b91a4dfaf9fd07b8d4a414a92b0d6e1b5e271670345e2ae6318ad1df6 30460221009de96dbf15dca3a32511c8bd7c61fab59c9415a3f516a7a1fcca2080f6af3c77022100ee6e9e9431f6252a62e5d6953e1c2d71199cac79e6458869f14300f9f9de97ac 3045022100e69355d5ab022d12a6c4827e09df73e4b095c73fd827167ce3bb77c5d68abcc20220154739f928160f52e4925157107f121f3539f2c0169cf854db5885f46fd9a14b 3046022100d227dc46561fa4d04dfe7f9670ecbf9bbcc8af001cc97482a24d7a0eebb7c4fd022100c89e4951c1cb5b4892e745c301994abed69fe206ce0cf622b04be11066e914a3 3044022019ad26962f38f29e22e774a19506e8b9a7ae93ac979f2b70049bd3ca4664307d022001a07baf88e7c9adbc3c29ac071975a5d2724372c09c353e8d2a940bd5eeca56 3044022002057e855004e3606ed7849059e3c12213a47dfc9e62746f75ef094163f7f36702203096973649034a0d5379e47b5911c51fca02b29d6173c939892667bb4334aa50 3045022100df670fea36f1f94a3d89b275d5f6473163a5c4f906782f1a2a4ff93d326a851a0220524c5f587e76bf460d2bd99d175129fe02cb55d9c9a20d270d5d89719b1034fb 304502205ac5b9098bd715ec574ba2a2deb786e6319d36d1711e289dc5fa53be8ad38db5022100cca38a498f6738c627c5886c87fc0bef13748868562e70e57028ea78e1f55a8e 3045022100954f08c307a2337b31695f561bedd5f8b370261094682b4a657bb840875e22e102203745b5b272bbbbe13fc4c387f6f9199cc57191e391593d41964e47d7e8cf7ca0 3046022100df626088f3fc53f6ef51b80a5138f86cbf78c3d855cc0bde915f7f6fbd16cfab022100fb04f9bc13111aea8c9cd459dfc6ebe064fe1a1336e1a81c21a0f0024353aa5f 304602210090de15045b24efc7edff200b1b1a994fa8cc194859c114c29c931a22926fcea1022100a98e734a28511831fdd4411b87b6ec64e5416338083e6c53a987700295b96ad4 3046022100c3b146b76d813d34b616c7d166dec49779cc0c8d306c67baaa816dc037fb1faf022100cd2ebbc6cad438d739c860ac99f243a049560d45c6b8fbebf02492f61beb6164 30450220116cefc836fce4f9a57bf7726f5d9a32824edaec7f6ad38482c36ef270c48afc022100ef1a13bd9a4ce71772a208d5bf8be89e65af58f81a58f138f72884f48da1ae60 30450221008d0504c1ae64c01d75d1fddb93a176a551a6d3878052d8f7146dbdd945d273de0220398e9fbb8ad93d4d8086c60b4c2e17a6277b43d5052b0ea97e0e251bd52fcc62 304502204acc5ecd2f11a52e10e5ed81dd8cbbc4effa5cf61ecaf291fbdea7623bc616cb0221008010df885a889168a7f47ed03533288f4ffc93bfda35fd6f9066a21bc2ea2944 304402205000efae7b347d01ae6481b1762c69207eb5649936d506ec359d541ffcf5adf202205f55a6e41aa7f2e715ee77c8f5d3833042be871f3fb9319f736d36247d79a757 304502200eb870dd5d207acfc13e615eb32e6d21894b3b2130f32b09556fd20b5b2626c5022100b62ce8a5fad0566665ebd23ccfb81dfaef789792c7f5853868d5d84d134b496e 3045022100f2c825617c503393a80c983486cced7d5a05c2736c9b2964796e829a24abf7a502204e3fbce7ec80a524af7c5949569bb8c46f11a1ac20f677fc2d50e4e7b1113fe3 3046022100d37d88aee492137763cfcc6805561e3805e112b8f35fce39b6e64de75143e718022100d3f6c1c195bfed2ff0c69b6f59f7132492ebeedfd6837f8edd6d8df105602f5b 3046022100c7db414fb6dac128519d121f89bb0d6b54873f919bf51f77337cb461a257daa7022100874b7cc844bb8afa8e45727b03415148d914492f46617dff2728e6a6af512e8c 304402207ed527a86d68c639631a6350700747fc928b710c7bbc75bebbb770622b708d0f022023e00a8e0d93134c99bb504bca3b7afce615990add4c55358a76facb0ac67e8b 30440220504ae372e22c6fa15a070f9368b9819345dbe9a39648dfeeae2638cacbaf34f702201d4499ae48471864b101832ece59aa7db1bebc68687a91c8ecb60e2cabb63b8f 3045022100c3fd83e504046252049ef8c356dea51ca05d71a1e518e1015257fabd367c299b02201fb2fa21b3a971d85a7b7acc6a4f5c0322a27480bcb66d10e7cbf70f24a5cd78 30450220127b83539e572f106477dc2380d3c225cc79fe22aa4faf0388317715a0ffb62d022100fe27b828157948ca3cb4fa305182db257994e3d1bb84f5f71d52c082fc8cc783 30440220362ae79448e984d4d47eb026d436e3ee8014a32059c5815f0361b0ae0149e2ac022073a6003c7a81ba122ea17ef3f88fd8297d17210837595d9e0d5939e4917d9fad 3044022041cfb0c1a3543604894ea406a7fe088ea73fe2a1a110463b31fc784b8f2a8ee202207239e5436fbab70450e0dca876bd9454f4de1f60de418fc7e73df5b8c46c966d 304502206109fe9b8c9ef63faf2e8945ce60a67d5a92f02e17ae441a66f31ef62d5808c9022100ed9891307e22a3cf58d8607622018ffdac6f384a7cbf5aae40025ed33a2c774f 30450220217a90c145429db5d0476c1efb8b053932c8a6bc1c43ed118044f9bb340e999f022100872a62adea380f8a803c808616ab69205661be365717c587a1cd6034e7c35926 3045022100f022c985f02a6c30bb072f333c23274f4a5c9b2e6e8e916a4a48919a6ea7cc490220707df8354601902de437fc9f0c491784c7d22d0be4d9469031aa79f50a0f8da4 304402205064d4190da7c2fe1d519f6231f840b317991985d6f23034020737963efc794d0220262521204585df825f433333d7c7f3d3ba890163083223d7bbb5b0f95e05570b 30440220544b0a44f1021e2fec229104f67e4f626d335a6cea9634133dd6bff3e1163fad0220226238b297b9928619e2c93125a5f44eb4f94992ce92ab0b2dc6a2027d9fbc79 304502210083c337a5e0c536d0cdc45bff12d1630b53f273d99b8c8cc2a20929ef30478ce00220212c07323d7cbc9136aeaf44031579c2b98a88d21c272e7f14731fba8fcda085 304402201ebb1a3501340110db3525682b69d706359829daa4c8b54988c0ea7183a836a402205f7c328744c2666b9abfaf75bfb6b484b3bec1a142e2c4da5a83b97f5485d521 30440220452aef6a1b186711da34c008548759f14c2e31628a6420f506351955292572db022020684ee543b46a10836d4af7e765b30c2eaa0a0c2883b59508f0340e01c90fcc 3046022100d0071812b0221eebfa1c33a2df7c620565d9bd1036e941a72a74f6111c89403c02210096801fcdb9ead750fc6d834fc66bed5f183165d97ac451f681b6b2b7166a9912 304402200ad62d3ce4e4dc1a31f1b5d479177c2361e97556be89c2859decba752122ea7602204136475a91e08d016551b95fa23e3f3394b961c39eb74de6d0b687f577a455ff 30450220366e6258facd32324b6c961eeaf1aef74fcb8cc2beb5684f704c491201bdf972022100d4d34bf851c1e678641890dfa1505cecae0c3e48dd2f5fcc8ed06c0054a4a25b 3043021f053c1c237337e2cd79a4538fcb52a8a7ba104d820fd9d892e2ca4d5bc878ec02204a6c34a9081d5587be493f7646882472b334a131196f77b26094eb538286520c 3045022100bf5ccd440f93df657dd9d9ff94623387c230c968c335df213d7c40ba89197ddf02207413d94b5e0748243b963a270ef3c04f567cb2276c89f0fe2fc368014ef38a0f 30450220374366fcd429c5a2b32de97df54795bbb70276a04f4067237137b01b3caee377022100effe99ea78e4ab1b95b59bbb2fb3cd864cd36561de00d2ff15d48fd19b9f9ea9 3045022005e5d5350182d949569e65007e9ee926a1007ee43c6b219b9acd8303e8d47f11022100826b68625cda4fd7da1e02ab3ec5d55a5fe3fbcfdc3d9b17a83ec7add04849b3 3046022100c3d5bd4cd7a98c7ddfadf3094ed287af32e295a37787b35ceba76793c5f3fa96022100c47af726c71b10ed174ce61bb1eba9ed73447e54f9a70ae05c091d142e0797d0 304602210093b586f072179626d6ba10fc2bca490896ffd0e89b10bfe47ef96f033b7cca9b02210085fe836005ab3896bd4746d4b9d5a6fa4b5892ef6246ee755b53e06e895bbd18 30460221009259b8a7ddf755f2f0a5f39d69127a466f6218b3cdfcbc70bcb92df98cb0fa27022100fba5469ca1c722277c8ab4c0159e93d2f5500845684d5715f878279d8158deec 3046022100ae66f0d309f2c53dfbf16d63bf3aeaac57d7cdf860a07dcad98bb97f1283f51802210090daae94c40e788f543c323d26857c7ef695c9803fa7b507c85d402aeeac07f9 3045022079562ca0d72488a97edbf443bf8ae42274b296c2e91cc3c7337724387ccf66d702210085378a8db009125dfc2fb83f2e7f3b1a8839bba9569129d4648c5bc4adf8c816 3046022100af7a1601f5abcb11ffd48acbe9fc49c2bcbbbdaca730ec5aabde3fdb44afd32b022100e258a89f318d767e436a09d7a9befa8ca8bba992368045a08ec446a228191f00 30450221008210244c35d7e50f275d38131c7f3e693d4fc691b0927cd08c078fa92d1c16e202206be9e66d8cef096b02df5e650ee7b7f4fb0e1f314f8591b3da58e8e64582ccc3 3045022065aa4eb02bd5229dae05b31aa0d778b78d3150b4cf2fcedc3caaa45c9f85602a022100ce32407409062d15071d7fc3e6d7feea3636068e978b3544e29467d85080039b 3046022100e10547e64b1bc7dba47713ffeb2f7da3fed9bab73d02bf672ebd8120e398be65022100807c93ab72c006ad6916658004257ea6f30303e4308706a8c6c2644d1733cc99 3045022100d6aa1142538c54619925135f8de22ba11c857ac8e7a24098e0affd197fcd68b002202c85240e1390959254e24280c8510268c678a2a7d5c711e0bd3e7d7e80cd9769 30440220276e2b4da8aa22047b2fc62abcc83b456670dbda079b7f1b440795f3ba9a24d20220251e6b892e47f4f3ff5d4732dac6b2b870b77e6f29284f09f4b877b548fab6a6 3046022100833c8f2ca527049475a78024cfc05a98c17c6f1325e7e9b36bbb76409ade988c022100ebce04eef195dd3092eb70ef087179a9016ae72b3eb5dc46233aafe48a48437b 30450220088f0dfcfd8f3b483c660d052517954a94bbb9162e45f5def3bba455dcaa1f6c022100bf1dac66018c614a826d219081152d68441833a0c2fa183ac813baf84338bf2d 30450221008621c3fe5fe3114c86bf8af6710118be3d7f10f297f57dc3f99185f6b4dea0750220389e008bad7e38c53404577bbac4504c74ab6533615f2d51a5819d87fe19ed9c 3044022063aac7ef9b6ad73dc538557ae9907156e846cc55fc63f7af400a18fce5d223fe0220485d57af2d8d5137ade9d72de9f177d217c71caa8b81a3e6bfe2bf2afe4e9d74 3044022002b4a0f2322b87c77d9f2f2b3fd8e9eb94f7c8cfc1cc1922768e9da90e05436f02206dce57ff78febed23f5594fb5f64e42e779cfeed8f708917cb9c68223588d423 3046022100ffbafde5fc822ac485f2851605c0ac64e7b68c6abc32b3ae78052d93de390913022100dd5ea966dfcd2e0211e78cfe52b0c0becd960b8484b422e4fc84c4c29f5ce4ae 304402206857c84c098c118001c293dd949d93be5daa8cbe93f917f2e0817f84d7eb975402203834a864780093569232868177d4bde6ba318d511c4cdcce3fd1621409c27bcb 3045022100a131db029889f0011fde1d7e2b9b842ccf99fdf37d731c9177543d6063ed4e8c0220796ce2865a52385f8881647f7a2db44c3e1cf5def6f4da3709f0258efa354055 3045022100b422746e3ed0c1fe2a77c70de5b6665663d81b7bb73aa20521454c491d0a8f1302206036b0a8bf4fcab5c3a1a3a733f26c8660a94ae57d0d08f91d2d76227c701fa3 30450221008e0e38b8e55c3a87d271d45e35dd6736098433e5849ae2eeac5b9afa640af964022019e93975f9d95bb4491f26900a85624eefacb004239e5c9968ace2e975193b59 304402202b7e6e632e030290057cf0e84bad68f042281df011c3df6da04a04d97e34cd460220022408034b1bdaa9d015a5e05e000e1f85bd75c32dd3e173263145039f1e82f6 30460221008033d51d7669f9f507d840a29d9c6c6f3ca10674d746bf25e3a576872a284198022100b56e0fbb0aa9b7aaaadb016a01e13abe840359faf32f9bdce56122c0499788b4 3046022100faca21c2008167ea2106badabe70e415634a371fd0efd82c88b5d5d003223513022100ec62a5e5dc5a00bdb6e5d140b6eeece7532e9a0dc641a7a527b9eb392b1c1aa0 3044022049aa3d952f47bb6d8daf7cf358122742a5053b055645dbf31e4065670596dc1a02206b79d627854fe178a3ff86e7b5ca8c4410f4374503016c05c4d82d5d6cb50c3d 3044022011cd727dbe42860f4313ba2de3d10c166e937738a77fa9c80149255dbafd53cf02204759b84b24591fc07326f76ee1b65d5a0573cc0e0e1daaa52596c7fb0980bf6d 304502201061a506f4bffa6b3ecf3629087c27d298de5ae2d21198e96148788c8be745ea022100cb00daa1eadf7a092129ad213197c430df872d251bd3b2418dfc4ae7d15b60d9 30450220588984f5f87b1d4192f32c442e24212543c975a6bbbf8d2f7f59d573d65b95a0022100cb2b66053c34f37269972c661e48b7b20cc3a3efb880b532c5d3b78573d63c5e 304402206e6f38152f79d3f35994c49d688ab12fe72b3fbcdd85192a6ab8746d539d1f2e022078c2ca812f1083e02b38681ad295db4a1f8983060cb67ed7c261b2cf223556c7 3045022100f1647d81d19ad947cfaa374aa55038432641804f457c1068570eb8377816d807022003e3915b12c9d28afda7cc75dc3dacd9cdf12f79642d8be90c75dece806f8317 3044022036e7dfea971d25371bafa2827d5cda21f6b8648c591cfd2e58ae1b35bbddfc01022055f9ab74cdb09e7b1cf74c65800e4ca1e67fd000a5d8f724fa4829562da79f81 3046022100b3930b5494b831327fce43fbaaaed4d2c7ac0ec68ba2622d1017a7ea1509e72b022100a3eacecb8d9fba54a17a7e37504348c2a6f66dd74496d16e492abedced45036a 30440220135878e1fd987d8662858aa318be69314c14cd8eaf4f8a958f5ccc61b6248ed20220627be4aa36a746f5d3f52ce2fe69096d5fafd798f2b88a4ab75eae44fd3e3c3b 304502200add8484cc61d5753d49e3c787608fc2757992a1d94a57fd46c9bad93ebe2fc60221008e2a67344b1451c79456313222e8c5a504ecef336b661a26e705e5d409a88831 30450221009b28792db9422ea88f9fc0a8cffc09e4a26638e3ce01df1dc4088d6ca113315902204b0ca5cd84ff3b3ce405e68ec10016df1c9f60cccf722bc0dd8dca49d76ca6ab 3044022079e531427bdef53339065b41f8f6663892fbb7cc38196c3905867ed215e26dec022047355a2ad6cbf42605d8bfc83e22f4e576ff2a0e98eb4ce82652ce9b166cb1c8 3046022100eb8807e4eddb2508800609eee653bd05a9a5c31576ac1c4ad12880db14112caf022100b6cf0e9ad18d55cb1b0b4f308072b71327b01f0e6359070a45082901d4e1630a 304502205cadeee6e2e1b94f2afa498acdff13035b1d1ac29c6112800b6f1509d324a3f9022100f38711712f7e28f884bafc21ad5b9d2b1a5f49e6d6e7a48f615a9e369cbaef39 3046022100c40d70945365f520d04ab870f64fe2f2f19beeed87d28058f5953be11ddb76fd022100c1792b05c987a8a0438075bfe828c0d437dc0858bb94bc9e672f5577d965a3ae 3045022014accec34c4bbfc787b4a86386ec6dfa619617024850b27fed30dcc1d2870d52022100a4fb4877432168073e98323ff346e70b8bf23a8024787563ce62d1ff29c1b3d7 3045022100c55f6f15668648b0d26f29a56e03bba41b381a68dd1a93a5f41784a77a25f8ca0220128e50c23db7278dbfab56d7be0eedfd1ada83dface14a707942b799f15f9a5f 304502207de773fc521e67097c1b2f16536a0977f72c2d3bd509736ee5590ff9ac1aeaa4022100eb5e8d16973d605c4a9558db262179f25835550406d96fab29144c94112347bd 30460221008e90ea6af8b3ac96c446f519fbd7cb61c8c047a2b2b4993f6d2668c240d74f0b022100d5058164718ebb3dcad3861f2c79a3b308f2e5c8e83f059f5f8f8bc8f286c519 30460221008ee9d899ce36c138eff5dcbfe9f3b28039c6e623f12ada023bab313574104ce9022100ab0d8a02a2930e959d0e3586835f4c43aa9d7c72c2ca6dee9ea629603c22067c 3046022100d95404278a581d390ef89f7ee9cfd77278dc5feec3b2d63a0050c8cff021cdda022100b91d6b9387a9820f650096d5ca2f3fedd971e0d4d3d61ce430d859859fb7a339 3046022100f68f7699f962a497b4d144e6b2f229e9a677a6908faecd72c6d2bef53279bd3b022100e74f4da7b2ef11eabdbaaac915f0c7415246aed3b81e8d252d83d9a78088f118 3044022059e305574476946492e4a6ad4737b3b1743700a637875ad6c4393e27099e18b502205585ee27a212aea6a1015ace5353d5eeed70a8c099ce1ca399058cc8f07361dd 3045022100b27ed25c7468b1369e29a11f8ec93e706d2144b957873c132997263defb3e6db02200e6b303ef5baf28041d40235569e399ce3261578bcf722f24465dd3ae5cfda6b 3044022030972ab691fcd20db4df522a6fef0e46fd1448e66e174f9d50794cad7744b59b022000c3888e8135aeeb1e2a0ff5b36f8088defb8a8174c223dd8f3b7417dacc59e8 304502207586254e9aa91547bc13dca57115d064922ce6125369c1d566959372ca445e84022100a2b25c24562c55dcee5d85466193c2d0674ffcc1db64273741d1703a215bae9f 304402204796d21d1dff86432a480ce23ce880a9383d6f236244090d2242be1bfcd7166b022033fc99ae83ba6a9dc8d65960510020eec93f0747e6dfd1e62fadc70ac1cac425 3044022028109cebd83afae84752470f7f4bfe94f49fc4774daecf74568529b46eb845810220587422e67c4e20693947b3ac192cfa577f8abf9537bafbcdbf8f1ca06091ae78 304502205c19082ddc0520fc6982a3e3aedacbf833f27514b909fd51d4e8a6dea8beb175022100f587d78c5ab283d363ae7c1237871577186f94695c97fae4cef0bb25feedb625 30440220307444d30743840f55230d8f1334d0ad091f90e69c6dbbe65b1af4a8108401eb022059efef27a139730e1f2da3e59a2840aff7954c7b30975abe3e9d9131f7d45033 3046022100b94ca33505115cf30f4fcae9a4135ba16fa6cf1b4dc56a91736023e1a534cd07022100bcacddf739cb75c11f9761867807eac469a4b86e5750d2dc6b92cd6a9c9b4ee6 3045022100d5a1a50728a165d5ba70d354d170c1d46bbd2c220809f6b419e8f278ef5f6ebc02204f88a38b7ac8af1eb17bf8c9cdbf20fa7c3ddb4d29eb8defd6ad754fc8360356 3046022100d2da22bfa4c4ed2df45de738ef69a6c1c6b44bbcfece2b6064c0035f5c0fade40221008c3d034c7f5f81022fda9567472589ff3ecef753414fff28216e4440c0bc1d3f 304402200bf363203c96880440142d8e3a286d018d1a9ca836e7e221fb903f4d9ee2c68302201a6e573079da2431a7b4af191458d18afc4437f049a5c9dae80a61ec0d7ab107 30450220173ae929c67cda1cccce4add54d5be35af30dfb543d965708ce9f7bea9809a86022100e306183b7e9b5e275d8b3306fe61cf10983c661bb6b12da3f09b14ca3c7d98e9 3046022100d3634e008d28f047a1ccd6c81f8fb84dfd4e56c401b3a4eaf85779b83d47a05a0221008de6840c39f1429dd95d4afce17025cd5a144c77de66b6882068e95b5fbc3b21 3044022060e1e9356d502383b462f3d048e76f5048b2b3c504c0035dc6ded974cb10354802200e752bf6a8ef947b547645d2ba9a16c3bd47fdba02ee6ae5354d1d2f52d205f3 304402200642306bcc119217f893975a50b52ec03c97f211444981c97af5b5559339cbd60220745fd46590363023640ed402151bd3c92a9e7f512f672a3b301496e79d4fb2bf 304502210093d2f2b542ceba3583e585d8aeb7bef3600f54e1bd6109211f34ee696632f66802205b7a77f5cf8f13d8203ef3a14b547d42408244a7bc5f480407f4ba2775c842c5 3046022100c43ffaae37e1a992998b85e115bbc50754bb8ea94540b7b47012dd0ba689668d02210082ee4735b250d36f45457995b33779389858592080bfcc90c36a9cec24bc5b3b 3046022100951e7d5bcd8edf1750c4a607ef8695955745da5bd48db83f553a5b6c683c99fa022100c8d600f6c9ab3b8be7279cb2d6cfa3482af7a2716f87f49041b5cb3c20c5eb4f 3045022100eba4359be6da3b95b1805cd3b189c111c25f85ff450ba5fb76689a6d052be9fc02201ff8ce710a726675436d000da8b189e4fa861558d89a2bdcb2bfcaae98b3ff7a 3046022100d9719bac6a75237fed22deae0ddc7e0116c2cc4d55d15c301904bb53f22f5493022100d406a1414e0bc1e29352ec74d531d224c1523a452392773ecb811ec0d605336a 3045022012b652d41717edc44b3a6db72aa1b896ef5c02e5b6d6778af00e03a2b01d06df022100f9787ba65a9445c222f10bcbbf0a463c48ec658edb6c27cc73ada8d1e882cac5 3046022100c10eef6c27b448f269a73f64cce6dd4d7ddcfd86964cc77db4fc1c256fb85451022100b5c458430d27f25954e3f707ef877afadfb4630a89172ccdf580c2a089159f16 304402207e1d2a388f52148bf76029c428f7410d182677aa02d1a62e1fc78f06d900c6fb02204c3d23496aa66bb865be86bc84c20952f8aa42a112f5143e9e58688ec84057e0 3045022047dbfc87f8dea7ed30616ef13158870e5b51b9771a3675044178c6ab43f2184c022100ef22d918c3bbf6e0d82825d638dad536fa3fb8e3fb127e1a3f5b9a8c73a8fe2b 30460221009e9d9dd48ea59c94c3135e53b9c474ba8e67203c0a1ff4fae9d33c2dd674c119022100a7b8ccfb192e4b6dc54b4b2bdfabbb9ac405ed0abe4e3255bfbba2acbc2049ab 3045022013b6dc045296fafdae05fc6ac26e7af0431aa6d26cbcbe9ba7a8a8545e49b765022100cd2434948979eef4f97b18c66ca89a2d8daa80b4a5e265ce9f2387f783f549a9 30460221008d633893f1760620491950947859fa6e05e8f3b1c2a3ec601620e817f847d553022100f75b7d133179c83e8dfee3b65816fa2e31a6604ac818020e9734b0bef4109d84 3044022072a97aeb54733e66fd09bd80aab9fb29b041c5099698588355acd607c174541302203e0c29db141b127d7a733ef2453c6fcb75485ff470af3a5a948a01b254f59bf1 304502207be06427d7a73634f55d5344d3362eb2add1932d58c3d702c578951a997a2060022100e274f3336f2ca931ba4435e6df9ef661d71613e9edc1ff700b8077e65cbe663c 30440220500c19b69ec101bb180c5e0bf1b7127750391c857b57fc5f6d7a87062e831c2102202817386cf574b3b33798abce716e4761dc10c761c66539440f13fbfcee6d5c49 30460221008dd29c37dd5ffeb98d4e0c68f741d72e4ee0b3af86d5ecb25b72e4a142be7a37022100b1a2f71788226f4111aef74c172505152b2caa0b4521173f334403b0eb2f05d6 304502207389083f49fdf61467f74054392b3dc82303f328b27135ce2bbeb1e22ed42e4a022100e4ec98958fe0ec8cf4ab05540d2671e3e358ee670c84172d4e87f69ff5644685 3045022100effce21ea9860dc817dd33f6484d4b60a214831f9922387e12fb6562206ca8ad0220103ecbc676f883f745ef1329f0c12e652cb3b0410d9e068eaf5024bf0974771a 304402201ec8dc8e87c84c963df8469ca9e8304f0955539f74dfde519a562f392acbea86022027d996f7eb7aba61296963ae94b62f21abb5393157e6aea030883cdeb7c68fbb 304402207cbc6b9ae3d3583b4d70545463a0aa72cbfe06cb5c0452cdff29b9114b5ab75d02201c8ef3ccdc2cb7cd2c6123060c2f64f0ea96027b82de278c3183eb541faab89b 30450220428b6d3c00a32531a3371ea77ed4ccbbc36a9d9dc4391795a52c0d52a2435b48022100ab447fff2672146d58b2af0207a306ef1dd49bcf457da39b749dcb8e89649c51 3045022100af722bb59707c24732c72d4430234af9ec693cc4335619ed59dbf72d229fb12202207d1992fec8df0e9cc2292d2d10e6f02fd2a5096d49f054582604235de548067b 3044022068ef642162ed20f669e1d86ed4a0c58f3b094db1cde5ea0c51a721b7ef98b55602206c0e2b12b156f1c16c1a40732f5493f27e174f82ee5f217b22b469c9278e99c1 304502202ac63d138c6fc1b904faff2c507051138da0f25cd745f8afbf1e9a98fd5b48d3022100a262d7f2db353624405222468a7ac0f7e8e8d6ef9a523631e8ffd666bd2e8489 3046022100ff5a0ff230a54d5f33bfce91befd40e76f3fae6a5a45ef15c52f2a31926864cc022100a8ddb60d309e34492f4d2f1096760113ca7ce106d15964e5eaf59adc282d3749 30440220285c2bdcbe6523823d333ae9b03bf4f7e18fcc525fe2780823c4db69227ecd2a02201ef3bdd683cf6aa73c6324f4fe4f2bccbaed82ebe07dce0c801b42e8e3befff9 3045022100837423a633801d37443a1fa81cff980aed299cbb8503d284604cc69ed7cca413022000d24f41dd683f500a5c7f7e14ed26f979f5707318d2df6370d86e911007b895 3045022024400b6858221568680cb3a7514512baf331c652f8655c1d227326e507ed4093022100c30add27aa028d4a85fac9911e08885d22088d0a7f235f57eea9bb03c3d41121 304502206034c0b375cdeab4b20744ca74bf6f8c60f49860802269b651c133be3f1b8300022100b2e485ef56a45b93f15ed4b2ab97f32060a08815885e37e9c9a0ffe5dad74a49 304502210088b601ccd35d545f69d1ccba8f0f0ee85793639095425c1e38d615404b5df30b02202686fc8dcf11438aac1dda952b0b2c8d48c4efa9727101d9e3d6bb048ca0e220 304402205273dcba4df48919ccc90e9121e82c767d37348d02a53d40b33d32a70ab96d4a0220393428bf731874cb674e34c4acbbef9600f32d47f254adcc16c0cabba04de338 304502210099c5ab067cab5c6c2669dace8b5e5c44e7eb1a6ee60206b230aed738a640035e02200572c1a16372156477e1f51c2b16fe4040b852e5d12fc194ce1685fec1fab901 304402201d76171cdb77846b4a67b7184edbced9be15c0ab36a23e916bbdf3742c556b2702200f1616a5f62fa3202c95648c99333f078378d29e81623421c4f59d898d87aba0 30450220628db06bad8e4f84a2ac266932ec2353e93f5f65013d31cef073f90ea99ad4c4022100ed4dc5769eb47be747a3dc91af60f4020e7c9b84ff48154a5ea0fea36028740d 3045022100e733704218fc0d42d863ffda42e9c4dcabd971103a9c89bbf82246d7fa2958590220762fbc9708ef96dcbbd5cd4cf49b3caa1284732fb87b0ef69f303021823f9791 3045022060cb7e169004993401f59ee668656585d84cf89a138d9860802b1c205c1a143d022100cc2bd87b24f770564dee165e8be30d236519c3a658c78ecc7f5990bfe1e0f407 3045022070d0468bedc54312ba7430d801c3f64c20e0d12b34e221edc83d2ffd5fbc216402210091794a3fec7dcda37cbf13ef64d37ff0bfb9150b02c359fb895d6d8a12ea9f89 304502200c246e1499fd27b4a4c77cea75adca5e55ef42c730c4c708d99c0a9f093018600221008c7ecb17882de1eae82407bf102b05c663547e888d09fbf664e7629fcc77d03f 3046022100d0aa06ee5e9274b10d5ab01e15b8b13e87eec50b3b03408bd2d2481fced02e92022100c693e80c1ba1fa93ea683b9ce20111d2de8cc2ac7ff6627cba8f29e79996d326 304402202ede67ad109e80248764448910e34c3d98b6da4f6343b2e57e3697d4f4e044d10220151bb2f1c768c12ff321d82d000c4923faf836135e3752d50e275a28344648e8 3045022100c0fa66d1cf8f36b4e7e729c351e52ca2ff1c74b21b506aa9cd8276b764b18add02207a81e1bede735bcfc97eff47271bb9c177d31f5629d3bf296cd966be7c46604c 30440220183aa1914bf630a5ff6a28036f3c826b5919e8f2f0c5fbc7890d00cbe0ba5f0202207b3a21e1735238385027d147d9f1fed40e5e96e90819419360c3934589df464b 30460221008a9eaa156c37f663309c17af1c74abaa469ce60c68b9f8524f64cf2b1213ea4e022100bf86f39744f8c663c73894d12d32e07675803ceb548dea85a156d16f9ac40fb0 304402205b0f36d43d2b8bb664f02183f16f91bddf5197ea813f99e7b0c78db5c5728207022049459a90b5bc91a2549ca3f9ccee2a8756357938a12380f06d5d152f31231ca7 3044022050ec84c1425ff25f4f59a73b43b2f9ba18930229550eb569142279cd96a13eb502205540f735b9ac93a1e7f671b086d4546d5b1a81016535d712eb4e4a2da0f6c9f7 304502202eaa16216c9c0289fb3f2236e12ecb1338d861ba73a17186a66c574f6a0f5567022100cd2c3ff1b0baac40361b22bf2ca4724577fd22b732a16441905fae8cfbff1618 304402206e10cfd845181eae7ed5869c01d5a907b5aa980e79524dbbcea4dfb5b3ccec9b02200eb07ea9780e04ad75c0ea49ddccf9abfa04b0659ce23d04b685369408814735 3046022100971424b4d9408d4845c28b3cf08a9145e690faca38dba805c189b951ab7c0a2a022100c6c796d1d76f82932ee3f65f815341922fe76687a54aa4888949415182a62fab 30450221008ebaba7f546101a682ae1beefd6102ea3c25db9c0f723eff9d3b856d183f60ff0220025238b8286952b4747a4d07308bc683e4a181cb3e348f4c8c6e5ff8870cbc80 304602210090eedad408a3b921d4ba45ab0cec5d11a489542ae5ce3fd2c5797596791b903c022100aae9047974c38918543b245a5cb39365dd1f8601a6d79c80bb3b30bf23193d9d 3044022036c9c790362dc9037044842f0fb71fb677b28dabf022fa77d52471f64d34fce1022058ff510db22b6ad04004718faa03c47aac3d275d8de51062bd24ebc990c2ba01 3045022100f5c7d73beb3c202abf6cde807d9f52b3c4073b7497082a73dd46f954b99598ce02206aaf9e751cd6e3d17e5d923bf0f262613396e207daed7e8ff3fb4ebb7c9afdab 304402202217d6c9e2a1570ebf423b970660033f5f8cf88b3ea17013efdb97b3e34a800802206fe61d32709a0fc0893f672c403106800b3bbe84d0d8aa888216ea33912b0adf 30440220512d7d066452e11c4f47ee58ae448e01e9c25ad1940d9471f25f9e27d2a0a267022056cd718825b59c50de27369b3710c51fc3245a22db3836f2f1bf914459edb083 304402206656e2b86b10466cada6a38b6fb3f4f4f7fe525a96748fa1e154dfaaf8c877740220331aaf20a733baa966821502d76ce3373be543019d33ad55e518adac141001c8 3046022100a2fcc4a7b0123385798960abc5d72e840a5a9272e03de4bf295c794131777c72022100d4bab58dec8e7619011d423920a83ea188becbc5c7bfc137ec9a739f4b6ca194 304402202c087f21d76f2ed87c2585a7cfa7005a360546e1b908c67dfdf6e41745d38efb022035e5a859451312c6b1a285db26fcbe7c8c80fc7da91bb5038dca87749f20a1b3 30440220728927a421d4d08e95fb71f8a15659ecdd2dfedbd1b23e03aa733b4609c78c420220632a6f4c2b881d040f8477e109dc698fe3525caa1192a5f7d6a803e5b646ba0b 3045022100cf7af99a0b8ba43fc52d006730bfb4617c2b9635ca9db9042854c2860297fd4602203aee751f4ad9729c24504c70cfaef7399ddc79839b994fb93b845f41c3ecd452 3046022100c49fbf905267aa9d4ba549cff30fbdd1892eb9624309044e54e036b1d576632902210084fb28247b275bcbbb2045a1bd3d8ec39465496c597b7959035aea3980984694 30450221008fbded3bf01b0bde8d5b100d9ff8b4281d1feaa847867dfaaa4753b1272539a40220029fa653b8baea953402154baec6c4a3d8f003c2ab0efb73816ef9499d9bfd80 30450220042ded147dc325abafb97c091b6d0b1de3696b85fe296f7f5005da0f2822de15022100a37cbf693c686baebc171d1ae0a13289c0b609d177442d6c787154def161ac7e 30440220713eb1f51b6bdfe06738f8790d7e3498c31df2765af460ea606a52ae3173024b02203a1334616b9ee91137028b5d9b844a2206c1548fd05ca5401027814d5c49ffe5 304402202a25f9cebfc31e87b202d0ece9466416c250f00381be4898f0247d4222ea75020220276d492118302b7e115ee60b8f80b9c6001a43d2fe8d4557d66b73c48baaa256 304402207e5076747dac2d55628abdbcd7d95dbd7af7a86e847db5a2661de6f9e76e555102202a21fdee34db3e9d8ca18619d47dd11684d06a79205e7416c913ca658b5bf2d0 30450220345c8e663314cf5aaf26f16843306fd30cbcee29aa93ad72fcbd7b1b0ca368fb022100d54c2e2bda91edb952675ada31359a731feb33934f5c0a39a328261f9e9ac863 3045022100b5e196b9b044db99d3b64489545f06f0efb1e96a1f200b8f347a6fd2c63f07190220346d98f68d9d64dc0532c0d5263bb71246f2efaf91540d41967550033e201781 304402205af8c95c3f8bbb193874c00a9b6d36779bd47a18b172e09c55d452b8d4f7e727022073f5e8ca04a1bca787248d38d3da45fdce1f1153037f8f824af4e749c620fd12 30460221009365968c8fad07e6bb6f50495b6e0ac621b66488e10b1ddc1144b18e96a6adac02210080284166b0d303ae8ce7c72a765ed9e91b9d2fad3eedef5a9cbba25a8ee21030 3045022100aaa727bbde5245307220c9325df82cb1a474fd8395e05b87279adf24765392f40220050cd5cbc63e43399f1efa9df0607a4fac75b8db645dce027f96604c88c9d889 3045022100bf13029443a07869109bc19ad356e4c92461e936ba75627d28d6498dcbee06df022079f6b07cd10c7b565daf85b475e59daefb96030894fc0c151372e23d5df87ce5 30450220570df5bbfb481edb9f3e13859413d593198300e4d0af129dafab1cb755f89d08022100be127ed7e58492984754ccb0a9d6cc8304c80cb76012b44b31629480f3143218 30450221009196dd3c328f4c9dcd6bde16401f5089651f2df97f8b370f856580f0a781b63402205aa656d3ab3964a477812a2ed4f2b4644b1521157e442d1b3b7b3bbae00d0082 3045022100c78f561b2ddc3a0c53c560b67de0ad3967fa9959f0c78832508e447c42f40b4102205a2ddb783533455e6863229b460cfa948f8ac29b89804066228868f3b01d376b 30450220165631a49fec52f45973a7d65fd6e446b8bdcc47cba37f417caad9827442220902210097ca455fccb493f1bb8992699426fcd0fb2cfdf7e06ea821eaa8963f1a369fe4 30450221008c578d581114526b1220634c460883d457d4ad0cf5f8f037335b0dfcdb193ffa02206b0b5dd33eccd8d7c022653485c773494287cfd88b82c79e55faee619eefa904 3045022100813aab951d059172434640c621747d1a0053feab136aefbe0117131fd777488302200e0e1225f979fd5b6b36ef694cc864e15819b03cd87e64604983ff14bde3f80d 3045022100c46bf89482bd9afc59abf23bae12f595fdcb0b2a5b3f713cc82668b5c492ab4a02207fcb2ecc90e48592fc443701a914e0819affddb9861b7b35efc9ded7682c44fa 3045022100e24c22f6a9627d7ad13cf980cde88d57821d23d7d92ae523f214905414a41f24022050f5d024c8669983ab4eede63a331021dd3f16a6e82923e15ee3239662e87e7b 3045022100f2f9314cb38601146520db9fa35a679cdae6bfdb6383554a07f9378786a4cb75022012b374290ceef2fbacaf0b2635c1165f6c37e70276047ac09369d0947c973559 304402200d83a89ff697647b0149d38d9d2c43f0068001e330bee4909854510640a9f6560220161f769d9a52a647096f183135fbafc427331631682e1054e243cd2e84000d2d 3046022100ce85d44a773ca523bfc99c08ed305add68444fc4b8f3057b7ef8b93e6647cc37022100eaa55d3358cb08dbaddc364c8e626cc5f269cca855b9b9d9120efe402867804d 3046022100edc5d2be143387d483e03f72d9be5d1aac6832dc362b5e5cbb587ceafe38fcc5022100ef1e9c632edd6c49f92db74a9f195e699798cd89c31cd2b8bcef5b10156ead8e 3045022071ba9303269e4259aa5ca74973c1fde7ff3e8a79c321f3592571de1ad166dc3e0221009ae12d573fa1300f4085c0a3322f30c09ae7f018a65bfbd05e48d824b8a33c26 3045022100a6390cfa0161b479edf75486c7b6ed2fa5db9ad17ec726cd6054a445dfedc5f5022005aaedc350ef94bf15921952190a6d22952dd7f7be499502625e2e5aaba9165f 304502203b58a4c13598f7ea7f5bde9e8bba0d8a94151d6c66accad680daf0e79767bf9f02210085c8a397569ba17c9f94d97e31fd4407fc4308735f4bdc995e4d8cfd97a83329 304502206d2b6839e5a684bd0ef40ab1f02387c1928d63b1677b6d1eabe187876e51e7b4022100c73bd2ed4a5d5b194fca28ce9ea7a54b482abceef5fb16de52ee543b9ab47ea8 3044022069ead3ca5f184f22930b3d86068d33c7d9ecfdd0f3b55fee0cdbf92b2c6c1c1a022041e2367a3ab47a7d7ed047c8dedd8980a696af30d82fd321bed6850a4c7d18e7 3045022010dcbc84833839d65db0107da77679389d55b00adc9d49f680b68872ae23eb8a0221008666e57d7738910552697fecdc0df8a5e671db9cdc46530ab6cb5ed5c8d8e9dd 30450221009ff2c8b2ec628e480fe926dc76c74b09073bf8bd85fd1c5dc7d80493cc861abf02203be41c19ecf3952589e7788cfa5e763db54ab9f59f9798ed3205a1c82b45f5b9 3045022100a3dc26648efde10f0b32a183f0e83ed681f6e8b3843a91c1511511ee170b521e0220432eb0651723b805e7626ab6286e2c2a61d9a400674685375e87304f85a5f335 3046022100ca69fbe7aac2f90126bf92f8bb4e8735f26187e0fc559982050004568cd3bd6a022100a0a08f0481683a1e08e8d04a62908ebd3f903c618b937dd486a53f6ca30874cd 3046022100bdc8d0924abf0c6354fe92936b026a9ecf89e912db9080a0acde81f2875028f7022100d3178a353a9e74b9758c049557b33b7b2f5f6b36c2437ac36f6f3c071027138e 30460221009c584a990161bb05ff7d1b56d47824473bc7a25903a93aa9a67e83b811595ec5022100ca8c162fd9e63d64850a11b584f11fe324bf9652a1cac3a4467f4e8337733f21 3045022100e0676579ab03a8f74896bb194af85e38ba5f30093ba3301a0eade20fc1478c6e022055829d7094f6962983386400f0ab90efe4d9276ddd56f509e0612afdc4bb24f0 3045022100cfd96fbeee61e0483e35f1e2cf2a5f1b6ea45964bd8fc41683f64ed7cbd471bd022061d17d2b696f788e7df261ffd95bc3c19e7b0a35169374341e8a1efe53d6db1c 3044022073795e5d7948852cb8132b1f3e816083e9dd433a8b3acc71ca3431f0b4d62da5022003977424159588def1a2aa0f728ab816fab1646feb7260c67b0b21a436922b15 304502200d9472276f66b14c8e768a0fcefa52d049a18b4de3c4fb2d1fcdfbee80f1f05a0221009f146119eb35aff7f889ca3379605aefa4e3f6ac7af7340b84c291e039c8fcf1 304402204c92bee0026aaa86b071813cbda43a43c7aedf2562d9e2861341859278925d030220477942650db08322c336214958af8a08f24abc1f8c87f71b147eb1712f13a728 3044022072f0515a7254d6e059445f2b09c3cd5b0b7233d31c701e296c1b9abb58a0f6d90220699c97f2f84089c9fc1549a9708a4a938441b9d83d4e6c28990081803667468a 3046022100bd8cde90ded0e923e8a6de86daa65a1ac45ba6c06286b5cf351ef0d7cab933910221008c11a19b1aec38866b296203cb6ffbae3284a213b543fc6899c781aef9816919 304502210090f0eb9eb54366908de8ff9a6415cc3ef9875b00718252202f8c095e5838f43402206311038425e4d46e386af750fd098df5106c7a70b4f382a123b7a845bf083733 3045022048096e41c6a9f6aa5098ea50e7e6456fca1783a170390c9755f8d5b6d66995f7022100d02345d4bc6436c26e8d2d85cd9913b14e717d42a9630c9694ebb392f62fef77 304502207f7d8a5873d589b2c97eb2914656a3a7e6315a5b380235d688a54bfe976d758c022100bdc312f30bf449201df4b41e07b72a2734a1a34f654bf6123f16d727d65a1855 3046022100c72619d44e81aaa4741a046d3aebd4b15150702022ec0392a13a41c277efdb81022100e94cda54b74f72d670577ed054462a0cd7fc030beaca0f90e5ca42f8e3442660 30440220528663fa8cb959947071ea48df295dd4c4275d8ba8ecc32572b4ebee39776b8d022070079602ef269b90bbb98c715ad8c37454c13d25403bcc1f96f4e39083eec004 3045022032993eec72c7e18fe2c6300a64cc203230e13f09dba3378ef9994c8fd269944c022100cc08f902109ce3793e8d4fd30ddfa0da111f88e4875ffa9b2ad4c7cf2f5477c2 304502207ea54bf54bfc36b782046d192766578c4016e5bc8003ae651d2e037468aeb959022100a28487d303efe4811895ad49816a59ca08a81b635e07ff91fb3545382bbfe493 3046022100c51ce10b466283366f4a5e6f36268a2200ae9fd8b184be8175e116976098620a022100e0c0bfb4050ff230743a695c175b73ceab7a6e61c2923c7ec7279b498ec76b44 3046022100d164aab1e986c2d09c46e627d03d359900831905833bea1693275a010b29301802210090dfa990957bbd92ce8b6e465cd7c940aa1933f209deec7615c2d9711091290c 304402200868aa3e02f4c25ed802c19f89c42150fb1ae04b5d98f5351152faa62d9c8dd902202b64b66ee541633574017e730ff505b2fe9f495a3adb87c8213fc6843fde59e8 3046022100eff3f2907722c91aac6d4fd455f5407869b591e53ac5f3c55e492f2d49d8d0e3022100d9daafeec11e2bf4b4b2e98872480a74d3dded6578fef9196075dbfd7586b4bb 3045022100f751571ecf016725a678b3d4ffba1c4d010728941e18defab35976144b61426a02200a64412ec23d5d2362055161d9521b9b5b18fc69bef6a80ea690e0c6b2671b68 30450220166c349fe8f94606587e25ad2dc4393f424d03b7ecf41ac900342ce8e7d3134f022100de99b7fc10a6dd18492514848e883b852fd84187bff2da519c43156345368e1e 3045022100f321459d0c9bffa8eb75db3ecaf85145921d212f246110f48626e37ba6d054b202204efcd942e7f8adc01a34c98f319b9109c738412777933c284ab07861e8e7e886 30450220146ec569aad6621b47dfe9e52067e1a8722658b919d25c1e4371cda08efbaa60022100809e20551b05488d58f7f5cf9ca31b3d9261573ffeb29027b3307c1babdfa644 3045022002b789ddb408558a4ab9c5954f995ce1213cfb2c660826eaf80e62221bb0af49022100ac441ef92c65f2f130c7a99ada899530660d3159b7eed0ad24363690dc4accb4 3044022020f28ec70e38125cc7c0d83c98916ba77f1e254625bd929df3e94a1792bc006d022047afdf8f661944f5e3e6dbaae6f9b97ffe3fe575adcfc50304c5d32629012492 3046022100af5f33775c670b850c757d1701aabd1c8813c76938cce52705f062f6cb08a290022100d24a83bba9b17e65a7420160ed825874393457a8094e9366528e52c1836ae52e 3045022100ababd62a1bf8c34a21a50d1266a8814a9cc6663139b0068cd6a967693adbe6e80220271e4fb0c8692f4465c91a98b8968548ca060127f50e83e808377d53770da9b2 3046022100fa5a030ac320fca837207d8783469287154a0f0c6755ce797d4ff8175cfe583c022100ec7567347fc5a504ac48c90f8ed412255ec76f990908a3e10e9798f83f7b22e8 3046022100b167813233d78cec5c79351e6c6dae0fbf474cb047c5f641264bc649364e19bf0221009831aa5df37f958e262c2fc62f6cb9d32c82cac3112db073efe1ca7dfb6750d0 3045022100c57a81bab0c13f1c07e83b3d7e14994343d0d835433c48e098d2c3387b41654802205682a503a50cba8c0309e4964c54e081f576a1670410eb91a45a8983e78c5c65 3045022100f1880c434e5a040848dd398d3b225418f7e083dc19d1993236d58817fce2fadd02203944535cde7fe7d4b1468b868952b812db192ab808c874a169fddcfb7ac44993 3046022100a56cd263f0b697a76d69365df0b9518e07c6a25eaf583ecb550354cb84d6b338022100ae15001e58bfdaaa359815b357c78d6f33a28d119069247ab34c37fe6a60d568 304402202a47f67e2191ad129823420dad31a45eb3f54a514572b3556a40480117965b97022061faa68e33fff810bc809341e89cbf84ec35c60be316f889280cb7fc38bffe29 3045022100bb8d749d854cf53c4b0fb5a0b9e8400cb2b05bbc6d73086327addeaeefaec19b0220209c16776d9d25eb64d43c330ddfc11abcb2c8cedb3c774dbcbbaacff57cddb9 3045022012edb322b830530471cb6c9b077bb2ca1d8dc0428a00b873cef3806a896388da02210085d5363cc44d72b4e6fe9dbdb746967ea4e639f355c1d2ced892e47d90227d31 3045022052582fd2543d3ea36113b74288e7ae9819070b652e721c0363744faa509db2c70221008236f1e34615de555ba31d073f811c2e0e5ee6126addaefaec9e60a4b9d3926d 3045022034a283c2c41af8dccb57c62001d208f5f5ff2d874f341f257cf2a2cbb1ee927e022100d2ea1c5f38714751acaf3e559eab06249cecbaa1df0a0821f27afc1164f15105 304402206760ce626d6338a09dbf351da7e6c547ed9ac111c09a826eefd5a1a0fa54a52402207a5f9d3984bca88cc6ac6c4dfae465a232cd68bd86eaee6fc096ae8b5984df19 3046022100f8c45595529b857007eafef7bf56ec5c63c1d3b02ce7dc17834de75a36d4802002210097493466919e210f5d109faa216e9303754dd37a151aee4bc0dbebab7e073ac1 3046022100d0c6e238867b228e4fd35ba25e63933b91c36cb8ed8ac8f0c03cee3563a3627a022100d0a6c085cfb161fa96d640a68b659afb86e22e03ef1096e7e18ede32030bb6f8 3045022039040a4b598616b29eba3f7f9453a333f1a6b4f653e86bfb639d277c50e8880f022100c177efc39d282e29b4a566cbc72338890487780c2b4e25453e29c38c4104e3c2 3045022100d20993afd20f526fb9007562fb7d42a2c46944f9e5d386ad02e47671019e51b702201ff2d2e0fc6dea736d0adc6adc9eb9d772c6540a074775a5a45345a5e0301eb8 3046022100852c5abb8860f9bc46cfa1592325b59e3b9582e2c0d6373072814941f73d1bba0221009345a6519077f8c6b7fc64f20a961c2f8535be32d4011ccf2338635346f6d783 3044022032f3657a5613d57af734587a683efbc41da2ff704295f952a52706d03f4f7c0b02200b2dd0b9cac3db7d5d68a1b6726123c0eb266c716a5c239a50f60cbf6c39150d 304402203dd755228c3012e0ca395a5638ae865c6da3dc5770508a0ce9a58363000d571c022060f331ce294155292b0459619460c2c8c9155fc1acc3087b7de90f9a2b9ae53b 3046022100c1bca5b8ade6e5629c6465e764f5dec6cd49055b9bb4c7e6510774edfaeb1046022100cec455f546cd5884445fc33b32d56637a5bbd0d4b87947385589ec622e2deb1f 3045022065ad671af351a8a83df28d90ad55eac41a069baf7478b5ed17300407b39e5942022100c7d7d4499394e2bfc820b3d40a468890636ca15d699fa3e3772b89ae0d0ff35f 3046022100bd9a057eaf0cb1a3fd337bd2afd119264c1e52dd705da8048a04d6bfec520d4e022100b3eb493cb656521656442669e165b72e7ece22e840d3841b2457eb9f8f12f605 30450220422f74fdd3c86243256e7445636dd35a9f79339cb3ba872de4b931ec337cff8e022100ad1719071de18c31401c7ece62a2607ae721dbf55066b095ee057e192b6bd2f9 3046022100be680e7cc250022ee35ee45bd8e8cd42fd6fe1c2a37d285a1c0b40371ba26e8d022100ae28d789397aab21c442b85392017c8b5b84c5ea70e77e35d1c8b9d78e35b447 304502200e10398e7fbb78d879114280177d2687eb57ad42183437c125d470b9d68ba26f022100caf86e0566414bb943ba8d400d079758d97f44b8794fe31113ff0328a664b81a 304502201d03737bb567e2fed78c1012b5c809e1bc4a8beba6c494e840255e4f08ff1b58022100dbbd8eec73c24068bdc69a97dc1eefccdc17d3841e062240ac882f294d287ec7 30440220135e46cfa099332a6bae3f8de44f9c9e1cc3f1147f465230c992fbd2e22b049a02206a8951f04dfb4e1b574b97facb4039c070e737d47d71db0eb80c15e62b4d7c66 304602210093a092eba01d682e61f814daedaa4124e214ad22056868d562973909e12a6e38022100e04857762e2acbb85c14b81c251ac111c69d44976313928350e46d1c45a5ad59 304402207a8b3d7cb41eeffa4f52b79e102f124ac131a920c933e69223c148a4d7661b0502202c41a0e98b96a2ab4db75b9824fa7fd17a19f21df856411ee10c004ecc25318d 3045022100944cbdd9925209a02876a1fdfc50c68770483a6600416e7f49f4964f6deaeade022041042588ec6d11fa55a1dcfee4f4e87b8929a7f3b236e28ab215028f299964e2 3046022100b08565b9897ad7607c54ffbe758e5a25ed3c843e6ba40c037c788a8d9e8525f7022100f7bcdf5976003f905eb1e953d385e8fa3520ba416a031886d02c4dd85c8bb68c 3045022078d78e2f8deb13f3e44c8508e69de0844db6d4386717bdce72dcc353ce59d3d6022100ccacb42284732390d74e0e97259c1056c6341228071166fa78cbb9e7e0255c25 3045022100a08e636a3389c26d8e6eab8c805eb65a82dd0ea2808a86189a2db1505bb956d8022000be78a4666a0ab3acc1b6f97937a0be4863c88dc578d06d128c31d181043c21 3045022100dc2691b238dbd3ce0ad1c783749888f3987585345175b20daff739494f3dbb3402200936b0fed315443246f1eaf3afc883038967bf6835cc8d5b43ffcc9fdc0ee378 3044022028aca1f8e3a1e2ef41e4ea2dac9b22fa8346467e8cd4e75450df7743c8c090fb022012a67f25d2d9eee7f0471dde54456fa05ff81b1a6bb15d2df36e376512376765 3045022100821a5e876d1c0e04eab3a3f5b708c6bd99df9f95415c2bc68c6708b07f3a3ea3022058b8cf2745902f9a27003f1c78685f9cad70531b32e668630b8f3781dd6a89d8 3045022027aa19507302c7371c1dba22ebf8cafc76361de1b553b81c20d14f544312b772022100b1ac9b6c23bbd035c33d9e3800ad8edcbb2ad2dbbbcb4cd067e8526406a160aa 304402203f52376b95d52b3776ec77eb6eada20cdab1c1f9f385367c25bfe2ba8378d6720220659f60d467619e69d413be974f16260a13ac6f6e0b0af544b068db897b208102 3046022100925857f7847d8b45423f1ec29e1cdccd0854985607ed3eab37faf92946d9de39022100f1ff8bef40c8f0241b15aa4f4c3d97a41e7a4654a3d546ba92d6e76c7fd44b29 30440220090eeed62e1066db240f57c77eeb225e3e25ba143d0beb4a6ca2934a6fb1e909022008f66ba7b68a1015d5286c00b73f7b0c2747f77225a76bdf5dd31f14fbf8a147 3046022100ed0b1f42861cfe4a59e0359ec50cebfacf42bb590b437b52142e0a6d4999126f022100dc2dcee3623e350b7128a7bbacff8bd536ef458adab13000a51607fb78f2abab 304402205b9b173d0d35ad0d6a135ce84fc20d45a335ae2b4f95f9cf73f5e759c8643ba6022008bb2e62b1581d1dde43d091122ed8e6f9d67d1194dd4d42f71a9e106eb3f83e 3046022100e1b5686078c28364e51b45f413338f2d5ad1f7a8ffa3b55027ef3b107312a750022100c77cf5950d1dd8790d2b435931f3118a9c8d387f72530da15835d9493018d016 304402203cd65404dffb6bd160a96a26699e69b375fdd15420ae9a32f0f71b143346721402206ba7a67cbbe1729a6cc52375959aee4b07767d37e24ed9bcb707788871dc1fb2 3045022100deb12fd1c7096d0732d5a431533c113e25c14a5f9cdbfaa5910662fa02789b260220284cc4afa12f89df5134cd63df2e0e2643292381a5d3e45655bcc2a763a4bfac 3045022100bbaa88ce05167fd2d3e16020298325c5ba46ef3369b383ad454cf064ebdc961c02205cb471d83599b9210b444e7d3f6063ccb8fa64b614204cb3ea684104662552c9 3045022100b94bda0dcc068e4aeb91ad2923d394fee9863072b629c25c315e002867c75c7d02202339904362ff0080da5f5d1d7f8cd6ca0f2089d7f3359d259d9453066c739c62 304602210083e1c5f7d08b3fb4ad6c0f65978dedebebb23dc5a78f7ca05255b184ceb50f85022100871f69d3669e376d2c40d1a3dd47764b2e757a5547e00791866374e874df689c 3044022004d994244c132bb35bcc673197153a4fbb206dcb15101ee2a25c9638160d261f02207c957ae9b4892289e917556f3567dec4d70e95b0358bb3f3d4522298330853b0 3045022100cf3413b01a4c1a68d85b3123c2368e48ec0b79b084d173ea89ebc57e8063902d022016b6333181e0d60f886515a6c349d76df02be224393a144f8c586fc5ce234b6d 304402205b08bad9c1af1cb7d0fe513cc744151477180e5d62719489673c425e48934ba70220238dc1b2cf89737ff1b0635ab689061864f7ce150f8ae2a8f7e13c69fccde5d7 3045022100ceedd2aa267cbcdf170d63e891949ba85e9da5cc81542ba4a4947daee067a47b02205d223263d75cad2c477e5e651cf8eecbc927752814106f34ed14f0efc8374b6b 3046022100a4a9638ee1e84977215eefd341bfe1e36e9f88f773247d28f25e3a3e615f0cca022100e830258b2c8c7549961ca06e27c06ed81cbe34d64d9514a468bc25ccd175c8ca 3044022047883f84e573a36b77c758ea1df5e8afdc02dc92b44926a87a7f32b6d055902d0220110ccb3a16489406719c57c6651d62521faa5edf8a89e163c885c05c2d73a4af 3045022100f60762a19bbb37c30a986b419113e1c0b9da0c6cf60da548a07425146d88e31902201f0b54736593901cd6b32064de3adb0ebbb461bfd11b1a3104fa763d7d0e6c72 304402200317f3a250e80a1158ec5390b3b134dc47fd0588424f7c82ae3fa80e21ed156502205ecdd6d5f7b904d723d2723e60a91d8ef0ceab5a9db6f1ff2f4eeb6e6c99c6e0 30440220689411b28318997a4a0a7bc0b3a0f75b3083031fe99fa5563862f594db052bd8022075d227faa35809a699b4fbbb122e6afade10328597fedaf471dc7343863fe2ef 3044021f45b1c4d77ec427dfe93760f8c6fcdff92393ececde093aa94a2b4a468754bf022100ce13db1755f33dca7d9f9d25cd8f9e09bf52de5c094517de3512a835b3a4efe2 304402202ed3038a94c475e11bb57493b8ea11b23b506987dfd7ef444f702acf0d6575060220630acec4240a77c8aaf3ff6db79ef82773f469c83725721afe63ad6f36ad29c8 3045022034d590e5e90c1383b0fd3eaafda1ed0c48f401364bd1edc64ad9179d452831ce022100c46674d87b2fbd6e2119ba5329e23b8b974a56fe2900144cbf1a7569e9fe2cc3 3046022100f6f5dcd2c4a873b583a2f44a6e37164a328b74c24c5ee78a39adf845b4e46a13022100ff1511bca5f9caa32066703235770c9098a9af251579cd92d7bb1a55e2fb5f56 30450221008cffe6aebff15cbbfc8970e078d335c22ec28db0fc87456c0fe6f3a66c1f337302203f23eb5f59b89c4ce2badfb9a4a7b72bf1f4e24641b0282d3921afc65aad3299 3046022100915d4c7eaecd685a143088dad40ba5c4c20d89eed749f89f0ca75e6fe24bdcb6022100d68c90f96a57ab75bf688ab0f247dd674250a894950208a26a2a6adea042cb58 3045022100aa38b7f8781fc0b9d2b4792c047f91b189a09e1c09291349732acf5c4823cf05022038e0d4b4f0f873ab55133b3f6abfa7d39ba7ce7c3caf2517706c4ec8d7c507dd 30450220595b45b339ce9db765a122ffaf8ca7e56bb9e333a79387dbd1d42151d7334698022100b6543d1daeec06be1f64255a8318736136600aa73fa26c0d4ff69b910b3339c7 3044021f0f54605327ef314228d7747f95ae571be1f50039bd28f6ca9a3684e6749c63022100ba01ccc7f42859bf0ad0b74018ba4e38555530cf5203c4787baa39e6a5770cd4 3046022100d1642c6783bf7c8d77efa9f8cedc1d6328e60777a10c83956e20a14349b6179f022100a4faad83f62aeeac76213792febced45d9914d2977903fa3893bb37f2965e508 30460221008006dcc04ec2c97c3268ddc48db4e930549a84b4d66b4e4fb58e52c4cc3f82f5022100c3864d65ec7650e135509214d22cb96160b67edf4f08f04c34fc037b7f7c5fb0 30450220719a7ce103db779383b88540546a582c747de2edfa1472efef867fdb577afbc7022100e6d158de4bf4676a1514ce7e97e58a8c333656197f70000718d9b4d949217608 304502202d0dce8c89bc22e2499ee96b880e77941e57d416a19fb5ad5c64a555f86b2b45022100a755f5973f241c0d32e585157adadf2d64d6be149a97c6a112b777dea6362fcf 304502210080823f4db621a6f29781dff6aa2feede791ab41103a7c69d7b359b2737ec3fe602204e6a38f5acce96b4679a06c4eb06dd9896f576521bc94055754695af2f031985 3045022100c037f0f5527206a185b57e6f8e9624a56576d8c7f08637c140e7b0e0094891af0220168a4dcf17b9a87dbdb6c2a5889c02708cc260da58e7cabde72899f7303efb6b 3046022100e323af1a8e213ac600982eff4922306d156cfdaea435357e9092556b5cebd4790221008484fd5ceb375fad1420a4a2dfa1adce339dd6eda4a78c4fc69e6a7575549a79 304502201672c44148d713229b73294501a499657aa8a77f38e515f11885a68ee1c60773022100a91b4537d1da0c3690f7bcd96bd6bdcfb0b5ebab3126e7597386e543c2a9bb55 3046022100bb01d1ecb1724ca4cf6b27e1fa5eb848f0e5bb19860dd4b4795130eacb537344022100d09a15a550d7d57b065134de60f827b61220d7ccd690ca27f9851a08e3b436f9 30450220539e78f7634ffc257c1400d02a400d7885cb7eddccb761a33939a0f7adf7da800221008bed701c19271c2f808dc7be0204743a31696f8e5f115cc55d7017eb36355f83 30450220742db417885f2b7b615ef64b00d4b7c0b447435085e2c105520eb8a68684f217022100efe422ce1898ef0f6bdd42e8e5ab5112a6c6974833fe1c5297923cfead0d25c3 304502205502a6736140b7d60f4285f24f74905ae283895359196b1110db565ce53e5d83022100a20c7aaeb1b6b83432984a74a82f1f21c9ad796925a72d5d7e169ec4ef05cfc0 304402201159d6eb2830be0efbd079b848f2baea9b13501698f8af8fc46c41f73ae1335a022022c269aa83bfa10647eecf5174efed90cfc8b432613751f5dcbf162b2e224801 30460221008d6050d704ed334f091fadc66d844028be80ca65ef0ac24a7f9ca2f81cc48423022100de5580b51c8e45a97a06529d6e3b1aca8abd23f3500e35e069d1ba90a6481f49 304502207863772d6caea6d83c914fce2473406ff761ccf97a419b75d64de64239b2c8bc022100a3207ce8551b7628fffc26b0dc6b8bf1f1e0cb644deb97f65e505f873a993e2d 304402202eff38637fe987892a2ed9a3dd5f36db5f877a0cfd7380818d9be938b36051660220570e5789c3519afbdabdaec5ba90d8dc4ae60708e05962cd253a05b4880ccf51 30450221008f6e09bcd440574f67af2599130d860bf1d8db326f528a65dd48cd6e63cafcb502200d8b917c28ab86ade26fcddc33b9fa0e8e445426f32cbbd279fe79406a4c363a 3045022100b3ff177f102aad99027cea9758cfb9b6e057fd6940ed7957ed9ccf22f01bd694022012d019e8c0a0f30253d0640409a759d670765f883ff80a9ee1496163e55073f7 304402205481dab7e58f7ad138ae691d3998697a59b66afcfe51739105a1a9f80c83d08d0220277952842326ef6e1d4380334b66107b2d967c928ac6057934229641f9b7f6b3 3044022068e307d87b74716c6177ec7a01a35876110159b91756466f1215168fd042be0a02202bffe8d01f68c48ccb9431ce70c3f65906700e49f850013db11a37d609408f35 30450220425e44fb57f5ad6936c0b722739f65166818155888dd6ca7930017578f8d1559022100b74c2ed97caa191d6e13e9d67eac870c818e89c8477f91c3e2dbb754db61f423 304502204c764fcfcb55fe4c287a6ba8f8ef849934f1656d952255999d24598bc53a3133022100f757b854fb8b88403c546029c75ba8fd60c7a0498d0ea8ed24d72ab2698d4993 3045022100c04ac2d8ad165c2aeb51db05e76362e3c8bf70bd2733028cee4e47de95fedcf4022077d7f0e4c97faeec82846a273416dfe275330813817547b44d698d2b77a05508 304502210092798d155e18f17d3b0101810cc204f0a89f7a74d3a77543514f881076865755022071da94ebe02953f6fff343d309d54d7bdbfcd63343cad86b622bff37254090af 3045022100efddcc60ce92b1c652aea3d1c0c4f7d353c010d4fe8420d16dd3711ed9b84f0e022013a35b990da29cbaa1ce742ed3b0f0a53bcb346d7b4e8a613ed0fb2eaee03278 30450221009eb4b0af4644c8a0c68466a4473660e511942b7f7cefe6e75ee6b1a59f807bf502200fb7d9773d22ef360057e29be9d3057584d0e9e5544e31cc33c4f7518aea0291 3044022050245a62428d691e5d5dc44cd95795da6edd1b8e1ee70b84f982d17eceb4c51f022018eb26a76628aa54b9b66a9ee22d292abe49f73c8cb3c77f15fbb4b3e37e6901 3045022100eb5dc4724a4732c78ee2ecb00b68031ed59d078a3a6850a9f3a7c24b2d1e307f022062134e9a68b1ddbb372be5f5e746406ddf934b688821844182f766ee5b653212 304502203610dba41a03be1f809d2de50981a8ea9896062b6123dea85a5c868a0d415781022100e710ab47e16a750233244b0afd2961fe06779ad77ef90525799dde989b92fe86 304402201450ee4efe8181130f9920e11195bea323297236e418fef684ce1835c8721a710220433f18a7d9cee68f9e2ca69a5dcab10c60363ff9947537c3ab472a5bee7e91a6 30450221008ef0f6a47a5a4ba788e301594fc2e127339b1d0cd4f5b4d89db7bc0158f92b98022039e5cbe86998763f7391e66c2b4c57042aed654300879918d4bfc9831fae406e 3044022037ee2973546b77ce33fd53e982ec1aefc7351efb870338edebf4a18c97cfed3b02206e15a0633cb8c00cf44768dfeec7364cee1862600d6ba88f7ca705f92efd4be0 3046022100dfdd1fdb574d72e92d8136ced40a7fa56fb7f3a7e3ffb9674542937e3eaf2349022100edbb769d06112e873461579e05239b05e2b6cae6f512a5d940520f13ae781e37 30450221008871795bccc6270845b63f555314c2ddca4e5cf1ae5485031296f07d140cd85802201cd20dddcc83e37a03ba0d9227bfef32750201341f4d59638ca4285b962891b9 304502201bb113a29cbdb78153d7154dd4220b146525600e69cea08b41d7b9b60b4a50b80221008412fc2e9329feb132ca8c9bf45bae7fe9e2c239538b893c37d2a9bece37c958 3046022100ad7f848075a2cefec161a7ba340be36a8eec08e17c2dae6a6a695233536db07a022100fc4a0643ec3c7e62985806dde34b91aaadbf313eb0059416abcba19fd2b18ac8 304502207d3a43af05afe0943a287f0a46e424381970b25be2e49f706957175bebefe858022100a0523175e699ddf3063ca1da045776e4efe3efd133e18247bc19165a79df5819 3046022100ae36dec05bb403392a24df38fa6e0f6d3288c30c1265b462b31c683b24b0bb1b022100c655d5fb556070cdb39991ba785acfc28f4995de57931a92ae3f7b512e542ca1 3044022040a04d639d58593b0732265c8a1c14afd21f9a8f9161f55fbb774168060849ba02204fcfb45cbd7bd88b6c204a0a405e704167ea32a1dc664f5a8919345d28544630 3046022100c75dd9f42013f564448a87cc5ce952df63abe8bb4678295591c4908494ceab720221009748a4424df03291de17407ace7f183bc1d6bad2883bda4950966bc743f0a76b 304602210080c052a347f4ada16d3a41bcdec3cb9fe2147b6fdb6380b441154cca143f130f022100ee84bdd62648787c50af9760297d587549406f0b9898278f4bc6e04db3566afc 3046022100bbb264967fbfb4aa610971e54c7647089019aaefbc45c2b409be705ab6636f7d0221009027288a4118230ff44e4f2a06e0106092743fba1852c2f201a1c0d3acbf1b38 3044022060be5d04e7d3bfcf5ad1ee04138247090ed65a11fba075e34d27f92d8ed0447802200db15880ca330265c214faa3a4bc692d702eb4e402e02d48c9be133d53a346e4 304402204d6be35df4aa2c3841076bdc1e289832be93c43bb19e1cfa4c1acbb6aed937c60220404bcb62108c585696d409593eb230a99d3defd698eb1ea45779419982e373d5 304502200c3edadabe83277de1a5ed4fd0bf42cd3b47858181d812b402d38e7a478c1a28022100f3a0482988965d562a907ca0e20c421e2a3bb6245bcb9ed1bb57616f80420445 30450220770342f0ae48d1bfc32b66217f9fc6df92d89f83520f64e9996afe8044ed103102210082b91222b4ada5e427f5df2ec8e5e215c2815050543b4368ab2cc671357fb045 3044022057371b80a0869c83efbd2a45c537149fda4b3501cc2dec60046e76f83bdb1d8a02206aa5de0e48d64699d603a5e5dd9330b9818f0954c9c7f124e4f44933ce2a0dbf 304402203600e25c5358fe6a3081d6239f48988ef88f4b22738cd627acb981b733d94b9a022000e5a0f2360c9e8160331d83ad5d269ad2c687d4c39f15e8c6907561a5f8b64b 3045022100a2229f0034918dcad9d306fd9c34f0ccabda8026c79379ed60261c1aa755674402203a25d5e86b89ef629b7d5b8e830a5d41d58ad0d246009eb3da834ccccba84295 3045022100db8703460c1d6b4e921b646bedd6528222eaab74426f28dccb2885fc4692ba9a022008aad6b6bb6b2d697e06b09b80d1f14d4518527e3fe8196489948caceb6f5125 3045022100bcc87f8e4df92b0af894bd39ed42a644bad195ef73077a724313b951ad396f7902205a101c23386234e226bddeb5405c979bcce8753eeb9087a827d55eff61813a2d 3045022100874a4c64dcc669e7d1be12d515fc5a1217422f712ea34f10b28afd4aff205720022007828e2168847daa5bdeaf6b52b68e3f85965843d035a0ac3514e02ef06433e6 3045022100aff051d6b9671b6f38aa919e9ea182e8b608d7912426dd4a8a86dcc63122c92802207c44872e1363e3264b2588a7e977a4f9f85807b1405052c55500892adcb7c6a2 3045022071c78968fbf95fc62b78555e3e0a47f80b016d4b60e8b39bea1df24e7f81d8e90221008ae7d1db9ff66da96147a3f13c52c1f5e696da1b2df8ea771287baec681826a3 3046022100993168a2dbfc13ddb12fa50cd5c5cf030db4042cb0eac714b74211cbcad9061f02210099c11c869da64d695fe767f9799b65f8225786bae702bbe1db435d63e56af162 3046022100f52dafb5a8c61afdeeff1fe4b77797899ae0ed6aabf4039d34e26a1b293a151d022100ebdde42e068f8ee1a3dae6d775cac595a2b4e113ab21cdb44b01ac763faffb7b 3045022100aa365fc3431743344bfc50751f6330211354a168fce0d1d67a54c2eb20f43caa02206f987261e630d740b1f2bb18848443c329e7ac997142b344633f5fac08adf7b4 3045022100fb0c55062b8dc7eb34232b9a7910e2a90e28f9500d99f09a22d779aa9e823d4602200be42e67dcaf0aafb0573db310b7d08ea7eebbc032e233bd08e82f270c7013e8 3045022100d5eb1e97b1ff45b1f6a32c0cb28945d5e0561ddb3a86a3502a7c3e448e12cd3d02200b11772a9b36c9bcefb4fab944786505ac9484d1fb265bdf2a7a9ee94fd86b5f 304402201c5a66838beae1ac02e3ae8b20aa3577fca9b6088b526a50933797062ed1c5d6022067c76629a0d26b1054bc55c5e597214bbf516b3e54f0d504398252df005fb7cf 3045022100a7fa3985c4f0e0e70469f000713557e50ebde0b532ad5a85e3060c55174a61f002203e19cdd6e2c07c72445d295fd1ccbe193e13329142687a916fcdac5e953e0762 3044022029d6b0979ce979e4fb16ac471bc084884e767cb13e496d59c44506070f32afa90220020dc6e495ac5b054cfaffd116dd77b45fd5c64c63b4a0b59a4a8facc9e9cd70 304502200a0936190ccf9cd34159a7c1f9acbb19def728d9231b6b452d0f4e352f0b3f41022100b8f47ffcd1137e63ce66db4f8c830913e5fa9bbcd2eab85d5a1d89c541f56495 3046022100a22f1878f06c77719ca0be345238346a5efeba7fb8c00514315c0b6eab799797022100b9896b2468a3a928abbc63f5fa8f5042fd6a8f22bfa444e00c2f3a3d0250e295 304402202b23208ede7f9af72dc650ed8ee571890dd00731af020e7fc8a92b1f04e130c902206827457069276ee85f96be522368ab46084888c3eaf6dd4f376f9f6381815daa 3044022043e227cc2666d338f10ff968ee02723359e53c93ae1dbfd09ef7011c9d5d12b6022069a32ec30a2294bed4df9a9a1ad25069306b7174399acb009c1fd3c3bb20c646 3045022100db21f36918c15544e6014cc267fd1bbfdfd99fa62688b05c8802cf276cbecc13022073cae45318db10c8589872ca485805bdd2cbec172b9ea95fe0543c6658f49ea5 3045022100d82c7b2050170734c47d1f7b0b0789c83d9763aa929f0e4bbf71194cf47240d6022014ed6495c29507418521b9e87f8b9ba972419e78e9baf8ad66955cf4e1d79113 304602210085e8e8cbef8ec50ba9e36deab9be4a750ca0a53cfa64a5970a7da02b74912679022100e108830202254707fe86b80c4f476cc7a95ce1d5a3f06a7501ae23e9f5332f7a 30440220584bff9d98a6b637980650d90df7f269c2e03bd720caea55dff7e957c183b3a30220406685069210956c3289863f8851cb3d21eaa168524bef9718234b1b4f7a7197 3045022100f5255c351dfeccd20e11cb60086ef987de888b9b1bbff61a1de936aaeab5bbb302207a8654ad47d690037922bc4bfe76527239e0f639cd11d5fdedb23f495c64e408 304502201fc78386feaac4fa135b0999f94414348fbec4c3ae7c18520eda8f9a420efe37022100ca51f47ffa8e4ea6b714ab044d90e4b66a10da8c16f3db8cc64020837a0f6697 304502201d49363cf4d50e0cd3da0ab0123b642181e4a22b5c0c4d9c3e08064bf4d31cf3022100fd7906a7bbb1ffd14ac068704f059469f6dde4c2d269e1a6eb8d646a578bebc0 304402203ba7fe76f06bc449361f3bd31ba46066bce5c76da25dcfe5bb6398c83875834f02207ac8baa45e6d71b05a94debde3c794af410643468789142757981679645bfb57 30450220525e5537f48ef96ed1426ef4b371b790eb179e75b62949b25724a38e168d8f8a022100dfa1ad3998ec67f78c7e4edbc5f31680478e22d5df48af3e4be168f595c8ae57 304502204216e2e410b516948fcca5a4d75fa2a450cf1eb39f627ddee41de5068f79a7a40221009bd9b462917879bd3b340ed8ac7f8becb49a5194f2e9fb2d4df23582b76a0078 304402204aafdaf1a897d29dbc32cd1deff4d97fe76c19a77fe15ce96d8c338fc6446dc50220148a76e7c372ba05890c6dbd1330db6f6fa6476c7d0d9926ebd389eaa39dd2ac 3045022100a37b5cc1f9aae8a114f5c7c0acad885e8076beba0e8873bd5a8349d7a27c751702207ef00675263f4a1d6d2a63c3a5c2037d74e2add4278930905b38d7d974838d23 304402206e08a86554e50c8c931ba3d65be954c449b7a1aa55611b08e18f5e8deda7284402207955388c77f2131dc25bc51805b351cbf0dab647b8b301451d3eb6162481fce6 304402200fcedb5f07ec06e90e0d205a810b1237c591e6a262e455ae8450d8cbf53e20c802205e695c73510a57ba10695a695a60e6fddbef5f6a67fcb875c6398ad862808817 3045022100d37556284fb7b44a953bf0240775ff361d5ea919f6415bc59293f3336616c39102206b09e3cc2de08996d664cafd230ada5e49fd2e3f7d8f8d0f0e8aa5cbc52e92ac 3045022100e6e49db5717745aac1c79e87902f5fcb6394fed47ffe980bc89b3c9593c2123d02204c9b81365a38e13a1d90b4567ff3e86edc351182540f63d5c03ea8822fbb8235 3044022015b535b544d1df15c68e1acb2967e7ecbde9a60aaa96b130facdd3031c4208cb02201c268f782eb4f58bc61de3fabe60750166eb18f767a1fa4940cc24a567c5c9bf 3046022100fb570f0af87611e4dff3d6f688a2e9a5e8760afaba8efc97a77be1c95cdaa352022100d89128c3efba603a7cddeb44f6a7d5f14e20f87e13b6cf51d5e7c547c2c460d6 3044022075777dfb63714154892ff70b33be88ae65e556ce1866d0bb832cc9bb3cc693e602200d8bd789a37fc626c364d00ee122d8d200efdbf799ac086a76fe3284bc4305d6 3045022076ac857eb43474107d423e4d82040c0304fcabda5f29a78ca3a30c6878a408c30221009ca796adc072c3c40eb6782df803af2d8f4fb0eca3a4b244f70dfcf964660af6 3045022100a94259fef8bcf99fd1d26eff539baab1ff5418fdf36f485157986a9a60143dd8022005d1b02ab08f84d9d549b37945f8fd39f658cc95ca119e7dd859f4a98cb1df09 3045022100af3a250c96d6d37aadc0cc5cc76e1dd7ae2a58122bc5b8565119024de980857a02205307397ae51c3519bbf4a325c78e12c9d0d91e10a229ebbf78661536d589aa9e 30440220070fb6746fb9b6c1ea815096fc893e7a22289ba001272512513fc78a5af4faff0220180d035f7f7acf1f5080ce3822eb6379469a290b87da2ff36fffafd9b0ffe363 304402207e7b2ec2d517bffc56a774812f6176cc2e290df15c5bde89eb28dd4219a02d3f0220662c8b9fad0ace7b52af6389167fdd34aab2bdeb1249e0a21a549dcad629021a 304402203011041977bf2eb9ba79abdc7dcee2b01b8431a9d87ecdb6a0851a0e081ccb94022045f1d17d38aa0d0c97a28e83b8c93f9b1623e02c7902299dac9cb2db203acadd 304502204106a64840f6e5420e112dbd694a577246d1ec126568438b0698d9ea9b7ea342022100e5a1d1b2b6079da27a5ee847be8bd4491e8fdcafba25399fc41bde904f6552bf 3045022100b294bd96b25009a060df7b64743bfc2837d0d1301fca0a3781099076b812026802206adfeb200e8445697d09f60851288c4184926521279569d470a2da882f1cd034 304602210084cd81f9587698345480b006fa6d3404b04cac6a7b09c3cfcba431e5fc5871f70221008f21e27360e7ce19d79dcb40b777c2a0617d1eff9c5a54fd869344c0aa7780fe 3044022074f96490a1eb7edf4b5dfb4c3f04e8eee7cd0302f2e888b834d970649451af21022066610634f51f543b0b99f75996d31643773a9cf8b6c9d095f11e7765c6735bc5 304402204153c3afb6617146233a5b46ce091717349c1bbf4de533632a84bc9138d5f8ef02200c1e7abeac801d4709bdd25911ba9cf2fce0db0b958565f170c5030afb8fddef 3045022100edb1a4ee8e0a20bbc1a88e3c80b6e8993a08d8e823dc18115e77160a70dec06b02202ef440bac7466853229b6d41c540215be5a737675d2aee55f5913cec2c464eb8 3045022100d131b89a34f084c479423973c93f59f22f387c7fe806895737eb96178995e3790220426c7852694382e2953dbd6abe8c4d6a9f50f12b4656573ee946d78b7daeccf7 304402205195af99cef72614d6691d26fd0c3f69bcbfc3e206cfb4d59983817dfd29c52b022008f26e2d5887eb820716196c78d99e333790eeb26723823b7c6bd463c72a1846 3046022100cafe254bfcf44eed7ca002781e3e0fef7f2f6eb7f6c55e07b7ddd92c87a27a50022100b2a49037262d3dc905ec5b647f7f049766ae26ccececfb173dbe453c0c434ecc 304402200bd36c2ed993cfa92acad8e4b5654f6e96c031f02f7a997b88b98cb73349c8690220217116ee0ebe908ca12a1d257b6c285f732a1fb28e523a5c77a2caec49094829 304502203450791cc760c5093ce87d7dbec74829d0d8e9b400643787f56ffb163beb2436022100b14b9171380b9e75074385e7f792965b243ab528af859d655cd67a7aa848ebe2 3044022046f58d93e81d73f9d1362539678c78c26f9be6898eaf98f45e70ef070ed3c91b02201769bdcd7ab3fc35592cbb96ba41397b67a3fe4303381ae6fbc7c3bfc2708fc4 3045022100851e5c6a929dba3d54e1061982906b12f53c91399c17a6741f99b84008dbc753022061791bf252fabc2b3f43e7e1394e26a0e7de5f0b86a7fab235668d224bfd2560 3045022100dff5c9e6b8f960773710dcae10898b7e329107a9877c075c67221ceeb36c610602204d8ca3da6870e4b363a62ee0aee070a5396b931fc3309b2e72b4940a10bf7f4f 304602210095cc9e38875e31dd92b731465fecaa589f147cc6af3177072ddc6b9bbbbb06580221009e5c9fe57e9c06fd13b794a10748d3083d1e23e5369ac3562ab4670b5b27d9dc 3045022100c925cf60e006955fea74cad1c3df1c9609e17e64b1a486d643b3ede56c3ffe1202202d4579511a9a074780d98ecf4a9fe5334b2af5afcb287931c46a8ffbcfe3a5ad 3044022012c478daed903c2bee77f87323f2a231fbb0acda39e543e3e34d788c654757a4022049a661c59ea09a2530f67d47078d3fb190c42683a82e5774a10563a52f81538a 304402201f8e9f22ec40fb33b1f2368497e82665d7d0e28cf6f0ee550d53e7d7274f0184022011bf93500b8ff5b7134420a9b11791ce5d79d8ef226979d59a030edbb6e6e4fa 3046022100abe0e6b95e7d2689cb68eddaf1a5000d0cadd9076b6344b0ad82f508309b126e022100b499526fc5c7fc59226224c2c39b52da12b01488e926ba059a4b670f36521938 3046022100d8c2091f52522a6d39ae99db132ff5ff59a241ae9525c3eecaf1821c27afcfdd022100c0d6e9976d39da1cc3e2f98aa97c66be142a47cb297f68a959af88636b1aca97 3045022100bc0ccf571bc8480f792913cdd702ed26d1c5ee70cb7336b1e855eb35f9fca864022053264fdb88b39f2a3b85bfde0ef5d74f29d7f585fceccbb69e98adbfbc721079 304402204fd2395e9cb105513ead67488c6b303df314286d4aacb477c14549303a57dc1f02203578d1edbb479210dbdf8df3b326ead711c36894cd4702c6b9cb63f0c4a95cb9 304402202dc32b15f0981eb32ef03b85c17ec7f9912c251a9bfd27825c6a91bedbfaef7b0220190fd6d1968555cbbe4f46520716dcdc823584921a52f2c519a5630537229b66 3046022100f1dc23a366bae5c6c163a3ea71fb35eb0884e9d6345b94bd5ba70cd0f9449c5002210098e66c03cae194387a43ad2626239b2c3aa070fa434aae4c7a9c3cc574b50dca 3045022100b3bc648d3e3b7ad9cca6656803baccd91c2aae387e33f75b822cfb0854823dda02202efe42dce3cb365ee850c85d851277b0289598bd39e5126d2c23ebe11291176d 30440220038d49cb1fcce2fec24030f5fe6dbfcc307b70906f8901f7a6f08941a89bb2bc02205cbeb01d6a5c7a7c31430af2849c89cd7006ec080f04c19e27d9ab8dbab398aa 3046022100d7db2ac10d21c3392bcd3c6e8f46a0309e2c404b9149b319dba0bdc9f1bbee700221008181d59c7d5b34ad51360b59bb09b10231e88d12e35b597e06dc680caafbdd30 304402200f343e7d07b450e78a56b9870757796eec949c553b2162e78293b429df7b66f902205df4459fa105436d4ace1c07e71a1cc97a06e247908a2f01e7248824a1bfc3f9 3045022100f5689ea63d832f3fae1b3a97bd8f74f47f2bbaa5511afed0fdc82ddcaa39decb02203e8fba8d76bff2e9671c5bd2fbe24bc577fbcd89463e7a23a7ee41a73995100f 304302200cbfb89cc2d67441f6b62fffa647cc7245305a06575121a5920871b143c5646c021f6f3db4263e1617e592ff0b1182cd84903f1ec1e2eabcecbc245f35b09fab25 3044022057e69aae06a0920aed613bd269dca5ad07450c1ef447e7e8787b05cd84705a33022074ed1de8d41a34d92d3c1e788d7820f908df695af19cc007b62eb8f75e26ed30 30460221009f1f1ab8f55c35ae7314ac8803c882fbf58b933554a90271faa423d838b88bb5022100db16ae9728bfb11e981cc61b0e27a8342aa0d86e7bf2ee3a61d50c81efaee6e4 304502204f35831880f4f97752533f509398daecd4859879cfd82c9cdd8f5245c9db21fc022100ae3565686711062c05695805831edcc3f3d82cc2ee077b6a1a0259441820bd71 3046022100810105b79ab7d54197a9f5b40c3e0f4b283d5d8f9253a2c5ddf9c5a274a75528022100ff03b327d2e6235bd4ee0f9ebce6082fd5347d61e4b1367da0cd9dd4ef45230b 304402205b554de338e2fd18dfa42d46a14dc055f7751057d7420078de1e32e199e92f01022057284c52d7d9f5c86aca671c0f216a96116885a81ff924d385c87601684fdc59 3046022100edca862d9bb149dfc1ce3bd04770a951727631592b0e776d553e4e560b9a9399022100f70a1fa8678543479c07b158eeb1d38b6578ef2d7d1f39958943295e831251cc 30450220276f1d6c91905f330a1f73a67c603b1fcccda4e5f447b0ef8ece9ba03b921352022100fe95b387383780403b4dc079b88070918e3abb88ed42756d10f863862f49e031 304502210083855dc351f4900593e6f2728a211f87811eae82af4fe30303d3fc7374afaca902204e3504ca7735b1c7fb18f55787ca9d01c85039c0ddbbbccb68c5882e3a025f14 3045022100edf5192c12b8304b3bfef2aa72cce11786fc863f6d64e144e779385f0db7fe09022044dce9355e509a741f187d56f08594683d73dbf38b8cce90027adf8a267a83e7 304402206d7f0f38da55ecae94fa3977552fe9d5c8416fef2c39cf614b42bb19b75b379f02207f0fd9887408021f22802055cc20bb2b6f7541cf3c90e1163a8e7649fffff4c5 304502204c86b5060877c60b36d3dc32c307a38937b2b375da11bc1c6757d8e6deac8e7e022100f0ea4962adb01bf389c0946f3b5ca5679dd580a872981771bcde90bbe1f8bd43 3045022007808e15f3c15633982865d0eb226253dac5908a7f5a25619ea93c0583e7ff2e022100ff9243f7b9f7a5dc95fc26adc4c4d89b22e1687560c5e6d409d1b52dba3f71ba 3045022041fc5b01471f7a56e2872fb38c081554e3145b858d17597fbf7f75ccbb072a380221009087d1a523de89febfb72db0fc5dea19f43077deb338da0fbff78b4986936fd3 3046022100eeb1195ed4ff085a744003305189a589f3c1322c9057ea55cf536cd60c03ef2d022100b4e96183899f5fc627ce5e5b4e81d3d281ef1354977cdfee9b7c96d11595ce0e 3046022100d20dc9d36fcd3820b49357a8fd67c16f3e2557374d7d0eb7f9fbe41fa3a3a482022100a62eb03e62de5270b90ec5815b6deffe666707199aaa7a1e05164de7a1fbdcf3 3045022100f24613650e2f683d41233fe157078c5e8664ca9502b7031b690f93b65432fa9a0220062bb37ddd85930aafca7b3209d51dd925afb11526e3967c08a16ab8408e3ea0 304402200172b3f9ac2803434fa223b25ee03bf34ad4ae161425999c6f46be0f4b9fcea902205710ea800a8b72fd5f29bad4a45b708e0f20073853894da58bafd6835423bb48 30440220630b6abbf1a76d7d4e2c9c3dc12a6aaef6975c7fb490441ae352f5f875c0ec1b022061ee60ed5e9a90713d5ec2ba62901e976be04a198cf357b7c3aec1b13f379711 3045022100e214ed5f9fc157d33f0f069aac4ab9bbb3310728c0b0c62c02b265e5ae85870d022059c8f0671d53449b9732aa92b03fb0721076e8dd0b8cf3e4f3643a1772e9e336 304402207ebe6069665aa1161de2eb7e586b5a6223175bbea17923f314487e7c49c6402102203982d1974321095f46b92a76739f9c32ddc58d3f58b33fd60161de42e853e6f2 3045022100e6eeb11c8c51c608794945afdb72d49b779edeb6d8700122bcc32e36f6a416a4022034cedc450ed2dd1b22d7ef1373e0a9807c343f6518e29f158adacea14a1aff7e 304402201e985488648a4a4b93411a833b08b5ca6892cb64618da1688fed5d338b85041702202fd228c845a169736e3da12efea85f25e58f9aa352e189be2f9ad2d7557623c1 304502206c81a082ccd0d573a4a197dd5c64ed5abbe2f625b4c0c78a0720b7e2fc671703022100f9392b81640e31e4aa19b4c1c096614e38136ed01a2f20b1bdee6eb8ee31a54f 3045022100fdf17c58cb471245eba997c9c1ad777cf5d49f9a01d1e13792322f79ef2b6f92022022be83d23cfff19f9f08936612ed8c0bc2042a39f3e3e8bc6c4797b90dc6dc1d 30450221008d9d7f2d766c3b50f40aaecba724bc1cd464c275fdf22ff6aaff0dfda4743e6402203b19bf37c3a2da52b38b11e492d15844a1986d66829a1c8082e1b7a86e7a4a4b 3044022029bc5728f11b699ab62de6c5afa1a0deb5dbb03aa07064d72421b3289dad5ac90220463b0ac6acf683d8c4c89fa5f2389cd59f2e1f7dce0878f9900af12bc5462c45 30450220262ce5ba872966e2f0eb110b935e8d7c864b629ccb5aa9185203c5fe5c3a1f73022100e678d4d3960ca2fe0310ed232d808430cf9cfc57dc7f75aceea4239b5031b322 3046022100960efbd9421d56e0a98fecc808b9a279193885e7a39b04bc102a823efa2dbda1022100bc70f748583e7f83018e452f3fb726a478755e08cd6183842221ac95f8b6f6b5 3044022020b1544c159eecc27c5141aab56edb116003c414411ee5d55d2dde85c8ef8368022056cac548cc8df69f7500adba91f227b527e1221b574c1192b899806d840c21c6 3045022100c634ddc6f7d828e53cb4b92b2bd11b1ae23a0d0bfac67ddc8d87027f02fb9c1402200b626cb89d824097d133ef324414c0fcddc7218581c1f420193504e02485524e 304502207c25d4302f3fdda2a7139421ec68f9caf238fcd28f5b8bd8f8dad29416c15025022100a0103723fcc63c22eda3991174e7e9f29b999a224511f819cdb296b0a197301b 30460221008d0bfdde81eb53d48bed13f29ab77de491897d6ff8ab80a67adc5df5918b4988022100a5de4eb853ae0b957a0866c6f86830c3e52213dd11120b51d06ef2cac339365f 304502202926b2e15f957eaf8133851472f8be07f298ff7cf49fb42d892333d413d6e0790221009bc56289d9b34d2d798eaad2f5d11d9e684402c331cd2fe7fdedce7406843ccf 304402202eb3af5d872627dd737467da25cf4be468b52247ee8d7ad874a5381e98236bc6022036f3c725cf5b567dba432aa635776fb19d722d9a5d1c5138cc34d203e9d8d662 304502206414a82d0e91cbceccbce901500296ce230c1ee33b6404be7615ea1405d5f5c3022100cfda139e56c4ade5a3ea49485b4bb16773f868ce103c34edb18544f092bdd574 304402203048ecd658373234bba23812dd5c1c03fdd092f49c67484a9d93ee45a09d0004022040f86cf6879fcba409ede639530ee9747e09ef04e89e304709e44a901d90979d 304402207c1a9a7be586325e700ca22ee4ea7762704a30098a900da97bd756d19e3da105022079203bc2c1f3ebb4025608da91cd9a9dbba09a720b09193e9d0d518280342fa8 304502201c9aeeb9ec8af14d09369da9488bf69845ae5bd63fbeadeaa8f7fac7b17ee0dd022100fd2eb9cdcceff9c40c1aa3e7432dfff58eddc2f5f13874ce4e35721c7566fcc5 304402200455a304901e7feb00a0977a8a64a6e82016d1ec5964aff33cb4f70c00e8b7ee022026594455907b1b083301a926f1a803388fdf35079696bac80e7666674c841f92 3044022008aaf7c38f25ae65cf1f5657988753faf5930a8db2cb1173c3ce25912025ad74022075b9dfae6cd2e84c5f737cf17b43f1391a10d49566d5b004fcb66c73b077a081 3044022026b6e304af5b1226040f70401b443cbca41741b955243f3fc346f4f7f48d710802200ae02e987a3d694cdf5e07f17b5c27d9d554588b1364c7160e63da3b20fd8b19 3045022100c8d5d922f6fbf8af36ea17e94a6235da7993a1b9998e1cd4b4eb449cd575f27802201ea8dcb520241243cb3f7497be564750044ca755f3877316f5e8d9302bca64b5 304402207d47ec074f529f88c32c3d6680ecf4c48a6a62358c51b197f4bb6fde3e6e63bf022036135b3e3822c9f85dbd9a6afa90f953a8bdae0b8b9845a4aac686f2d8d6185f 3046022100c928b97a5b3ec436ca83424d410896357c08a5b60181afc0250ece745806079d0221008560b6e317929e0ad88d59c3ab273446804c88c71be5db4b21cb0b6f1d03dfeb 3045022100c70a47ea5da7714e1c7962fab4bbec46db7b70c1c83e8eb538dc35924433ff22022060270b4a0c532ec7aa9e732dc8dd676a9653e5ccf1fb660b70a2b740a73395ba 304402202c5c06f3e71c97ed0a1e9335a5aa593506948b606944b595ac2bf502be448d5702206e2bfe247e1d6c62c89b1eb533f49380797f029b147c20cc5321c887a5c05636 304402203a95bffd06cec290fcc7ea96d12e966cc2598b1a1d01b6bcce355d45180cfcb7022059d9ff08e185cb8e715eaee6f42845104f6e25b1f3f2f91dc30184ae2c313d7e 3045022043884a5c7efc43ee393faab2b8405c3284c1eb2d732fa5ba0e46aee5a150ec8a022100a9a1bfcff82fc70eaf6fe5d4f8eca8be9f291cb4acda60b99f59b4bd7922bf7a 30450221008fe5892b67e66b819d47cdf92a34f4cf87501b96e3a44ab01ef160f3e9132a06022032f318a2c3e4c3969eedfaa2854fc2a47f9095d756381de62cbfbad7c182d3fb 30450221009699eea90de05028bf0edf838a2f7084aae91a4382e0e50b856e693f858e530502205bc093df2422f7ff5f453db85ca94bbf6981f6fa228089edebfcee9d7717d960 30460221008804fd5a06d2f5dbf7d90b458e9883daf0feffd7894c629e09db9061ea360812022100b5a4892b3ebd846f6157a47979c26daae543495c980437b754adcc2d58672994 3045022100f8dacb64a458c2b07f359c68ddcfbd57dcfa2fbf0838e6cef470910187de6bb802204ad6c56440219cc5bb11d24e3e19003e02a569b90ee99e80f1ef0eac9692e6a4 304502202edb1c4cef8123f37a016bb84ff90c6a6cb7aed9c4927c85a52f66c6ed73193b022100d0e6ec4d13ce6fef288451ee43a19b9af4451f9a04a6000945b1e01946d2b807 30450221009107894d0bc4ffc787a98c53f9692194a5baabc48672f521a0e89592726bc929022012b8d2f101c5d662f34c4dedc1317d23e954a02367faeb46362611248d5c57e9 3046022100846dfc94379eb26cbc09494a49232cf993c1d5f901d8701b14e295ea22c00e71022100afcbd4bfc4db49edd89c2acf078adb3bd791da27ab2e41ab86ece370e582294e 3044022057bcdd0ba3e9dd26ca4ba4c0bd0e17240360bc3a42b17ad836d0cf1104833a2102200aeed921f70d02a133900452ab15eb46e4c9e74dccb85f79e572146d380ae75f 30440220757895ab3108e53f16a88899cd2d10a5000b1b01249850dcce2bcf270f577be102207ec9c87b43dfff3483a32ab8a7392e9035544777d7b049154167bcdfb14548eb 304502207c74e497ea52270df2603b7c991091fda5cdac51630f194a99258ed3698486ff022100baa6147e5f092cad3bff0f87114d91484907bcb131953c7cb7dd5ff5d8fd96dd 3045022100ba56482dc9fa752b28bf9149e5024106980981c9152e630ece989e9381e8f1c102207e251bf155e3aa7bcae4d639a2c77992e773af5003f7bddfaa02922d15f6a5f7 304502202794ff70cb870e3aedf26fbe9c780c8cf8fcf5deb3716aa4580ac89440cc68e1022100a8724e850b1f315224ebe8f6a80e05e92a6d1b44d60842cf626cb4ee22c3292b 304502206f5eb031efcb6d854a53fda437a8bc9a1e44ac7b0f6ed260d5ab5fad617b86eb022100a5a8f9676feedf35998b755e598ab5d09e05e9c5c60d7e7c732e53204bf4f4b4 3046022100f20d53ee96ec8c063aa473764492e1685d134b7644e99d57ddcf6ce21601479c0221009387f4ba32ae2662551c34bf73bc680b7380a9f7a2aa62e9f2f3728df6d98cca 3046022100c06678ae22009300df344ca08029ea0fd8f14d858e3cdbb52009e412fc59e4b2022100fee8b4a6899c58ed9d0dca576e1a86275ef490530750fea476c599ae0da6053e 30450220733543c0d760eb38c8e70c4868f1c7d29b55b881311e10425dd16301a1863ff3022100c1d2605b1a30ac3cd0a7f9cb338edb3bb42ada2ee7289028e79c03d99ecc6783 30450221008d914f62b4847bbb05cfb5b08bd598038c8133a9f9e95a1e5229fb4ca0b64041022029653dc1acb12a688f53148a06d4903c4752652c51b36757464227896fafe1af 304502204e16b2293f06e24ae4e9c32233561b783318b1f140286dddcbb1098bb18f81660221009fff6c63b086b6e0b891a75a80d134bf45c99127328cb86cdd73acf366d9d1cf 3046022100aa15528c55e640cee9d3dea3a18da6e01689c3450b5f09aea6cc0eec1f5563a9022100daa9fde8a37af3e39fea1c6ad78797726550f9630025859e14938f38d634f216 304502203f9fb51af4ab4ae36270162d67808f6cff13a3ec27b2a6e7f8f1d0afca3f9434022100ebaf09fb09636b9de70213c952bcffebafd8d07cb2f410ec11bee8b8699fdfdb 3044022012c2d2732fa6d7b6607ea3bf7af4d02567f6e212158228147cb2ae18c5d47cbf022043d37fc01fe850897e1a07f5258d52801048599884138b48c98cfde3c95cd804 304502200e05e689d3dd474e188f8325b34fa74fa4b5eb04af652f77556921cd67ea9586022100a1b8aecb656dbc0ba4a49cb75e8f9bcccabbce9f0e05cd843eb335675fe14198 304502210092df2e96a0897cb21cbe2f3e1cd13ff2be91a10fd9e601d6700941dc7a1c68b2022072760b2d03a26e051caebc431a6c9bd8266a5806b4e6b0ecc9024140d7c2f718 304402206ce28adc6f38f26b2f31cb3c5dbbc17546e9ef4ae8626b6ad8c6b2dbac6b66b6022065ae4e7cd9eee8e046d85d910d2ab17d56b231a39da82a1d288923275f0dc656 304402200ad8a3a4fb1462f4af490920730d17266957d5648e16a385f400046a9959be7802206ea90bb37ecf12d3377b3e27ddddb29e61ddaa69dab5f1fcea4497b35c5bd37f 3046022100cb685d62a7a6cd9414c3a01c3e97809375b11d25763cb491ccc4c3ff85e5971b022100abeda3aa6fb95c6b29d2101f5255f3caf92757979780287d62c573c204d7745f 304502210087a607ab979c57996ab4182afc908b94be35ad115564bfcd607d81770e5d28b502201dc27cd2b53464d2a7b6bfe373015a870ae954024958d534ebc16857872837ab 3045022100d04910f8f5d5ee0345e890ba5497cc39bbb8973ba30c60a5ca2f6a73e7825bc30220322ee928ce2b0dbd74d04d090d47f83af024c5aefc4979057e61c0e726b8ca41 3045022100a5f90599c4d97c6afbd71bca28947ece9b8d05b72723f2268d730a27ebeedff402203bd465cafa5fcae5207b34f62337769a945862f5e3a9c080002b8e27d3981c80 30440220214c35ff03601172cf45470d2ab131ef21172a167a0ebacad98d0136e522e29d02201a1b8388de1349b9c88cbcecad1c6465325a8149f3c336a96cd053ceb210b4cb 3046022100e8bb7a4d7a44fe7a0e2ab9b855de1b402576d2b70749a230ab6b846756d1f6fa022100bb9c3cb1ce7be3fd2f181a9c7196e78c5c8391b777fe15374463a1ca8ed8bef9 304502201eec029d55f0ea371459cfd689f94123fe38ab8df675ba53d760af962446e069022100f801fb1d4bd332ec5f88fe7020fe3d64e890eb2e6ee5a724f5fe292da5f611a5 3045022078bba360de7e9f07773a49a411fa35277ad81708ed0a199b78c5c387701f0ad8022100b03300126cec37807dd6b85b625033d8ff53d715e4e268699b7d21f52d968299 3046022100edfac60632e242cbffe184f30c2433f4ece6abb5fb307841644123cdaaf71381022100e13514427b21b303bd2365c8b99d027f0d56a5a3220b6b7a6535055d42873b01 304502200a0ed3da8e21eeca06e49c9891b8fb3421c0f27afaf05979bb057a2205b963ea022100969677839738ad31210aaa7d8c76d57a460b2b51c903c1eda96f9f5e32baf927 30440220084d22e6cb821224b59adcd5a69feca41f01f2c97e8522dc1039f8b69f80e66f02203d8a713ccdc81ade53a566d26cca8f86ac03de2bca6304129ddd727dc5919cfa 3045022021fac0ee843d87ef5d26f81a4d6b38c589da310b132cfd11adfd767d48a4b8a9022100c59c2878ef8ac4da6347f23e31d8e99bd6b57b9c8bb7f555bef8b692561874f7 30440220087cbba3ca3b007607db58a2b846a1a6a261b6a1cfe8ee8ba1cb5ff2f9899fa502202c7cb134e27be211632efcdac83b84fe4a8ec47e619d4457302c9a477a256b16 3044022017799b9f58d332e8ad606d439b624917203b7e7ba6853aefcf8340b3dc0cb36d02204c061fcc05c920eb509bbba86ef739d854dd88350b159d3453350686b15c8634 3046022100b805181cb3fbd31e16c01665beefc1e3ccfef5242cd7ac6657cacd58cd996867022100c4d49148e77f6c6dd62bff09629c122eeabbd12da4b8012b210a037c990567fa 30450221009ab8f20ceb230b235ffff151fa48c7a3c4440d61c39f10b0a44b3832124d57bd0220724e38951f108de8e412ee5573fb4b1529933b4be1dce320cec852eb55fb715a 3046022100c362c9d36ef0919919b7156d3a6bdb96298fe5fb13d1efce438e5b702cc9b8b8022100e953d7b586b7ec59019caedebc68df4dacf5594ca67f4df4ceca5c9a4116665d 3045022100ef9cde803ab85e317e118723b3c990e506bea126e8a2fa89907edabf0ff9093202203dda6492463e9b03cfa9f2a48678c3691725e98405b8f5cd18666169fb8f199b 30440220118f31cbf090d459f636069cc297f0f78e359bba09e541f0fff5a136c816c85102201d3acb373c876bffedc33a15a016300de299179bb83d814a57e3ccd94bab211b 30450220185cb55a3c370dfb8f73d80fb07d22c66b992745b6135b2e3edf64fad6595f99022100fb8d3ebb6d2abf69265975dc606c87aef8aec042b7dcc9146e534a046d59ce0f 304502210086c84d52b7528c834353bfedf205105fb528e59c122705d42632a586ebcdbfc60220164e7471e0a3b02a943453c6497d0948060b93266614ae0a48ea9aac9393f171 30460221009c897a7a70fc5c36fb18fa1119027ed1cd1495cc17dc132ca53ef58d943ad174022100d84cd7052178a70daff387ac0f4ab93fdebcdcfbc0c90a7e29d9a2146b44aa34 304402201306a88ce1cb435c76cc0b0535514c5aa53057e50957a3545131872bfbf9fb8602205bee73265923d3ba5a9f486619f034e49ca64828e699a1e632b7b563cec77501 304402204385490136a30b97f70f37dff840ea8c9ad0898a6b3a04932978c8f82865339502200e14c81c8129cf5e53961cc8e3fef7a2dc430a6156239668f4b34d0903de8bcc 3046022100c02388c391885063e8e5c0f3aad87436ed632c2eb90bd258410d40c077d1f439022100df9a576a7c0595b6ee5a020fd6b71befb93d341f0402592852070c83f9c0aaa5 3044022077b3ead952280eeebd34250a40cd3d9329f47acc551a7c5c1cba1353aa2c2c5b022029207f8ce3102c8a20b94c9003b378d79ce2db74c64e58650352eaa7d794ccd2 304502210088f213cf37bed97a34c40f43ddeadefc4b18db2e35e40cc2944ef3fddb0fb880022046c635683b0b37bcf1d3105b2365e39cec9386fbc2ecd6e5313fc9649830eefa 3046022100801ef52a580bd1634f8e7318f4298dddecca8f9d1d2503072e34c4fcd9ba6403022100e316154f54360c8decb2fa7e676fce8f8d31b8a139d5484d32afaa247fc53100 304402203917cfa9885f8e980be976bdc31722ee0484aebf515e784d08d37b5458d91ac00220348bc9f9d5c7cfb29eb84d2917ff32fc22d7bc0c8f1712bd46152241fe3d6f98 3045022100a6baa1eedd705c3a9073922b35c7e52734ce7489115012d424fa0466209dc2ee022037fe69269326b6c4f4cd3a71675bb0edec28162ed631ef66e58545537bb11ee5 304402205b1ed049cb7f2ab409b00dd5eba2e89712737f1d19d13a09ddab2e05d7a4d76f02201d64fd4a1b3a15897a5d4c932ae736053930f44905cc437da1f6c9f0e4035baf 3045022100ba7490265e60b62d1cb1b442f032daeb95d971733a5aac7b24494433595ef7f00220144e69f419e669e0c4aeb58ea10ada3d46c7cc33f02735ae49f2ea0a43c48c5f 3045022100a5b953fefc47cbeb505eed5105b5865339b17c5ec83640ee3027897ebe8a9f28022028f39530b579f5dd3289e3ead647b3e32fdddf524cd4d17f7b29b1ee88e57760 3046022100b39d13543fba3a1f53422d64d6e09e77b3f65715ca315baf0730f56c1a6410d90221008ae1574c075b8c2ac4dd90cc23f8fc6a12d129cdd5e5d03a8219944608ea222c 3046022100e7d574e6212fb01974fc6aded64417071e04096e53a375c26b771de1bfc5d487022100ea3424b2492432289d69d94bf8066c4c4d1f5ed47e92f737f90a23e181b5685c 3046022100ca09784cc97b8fc2c2ee2f42933393c21302df374c1e7cfb4ec0044ab659ca0c022100e6e784dbf146487abfca46819e739039185523593ff5c96b7bc8bbdef71fa4a2 304402204ebb32f74c8706456717abb7818ca8a0727d565d775567800452aa39c4287130022008ff8993cdbfe0138685bb395d42cb0657e27d3f41f5296802fd129ab4409a79 304402206e7ecb1d148c69b0040f2ad6846b4da1bf03bf0f21780b9db43442eff5890ac20220754122126ab8209d1377cb0e81423163c3f2b971c43d2f4a6a78db01328cc8e9 3046022100ab261820a2284c48eb3bdf96cefe6eb28d4a36316b8f912aa1c819bf75332b36022100bd6616d64e73e4caf316e193168a40931772c840c3e8c859023a2e28418f4ecb 3046022100c599151192707bc9d8ff70532da823206aa9bdea4445e13beb9d91d7bd1d9568022100b623a239d7744cad91ab46fb6882cae75f92c8e42400eb8d26626f267e1167e4 30450220227d0cddaf25dc0b2fa495c0e10c1e786a8cd0808c090521d3de4dc65119d18a022100eb07b46d0eee1a266563527350f56d2567f6e40f1385cf459f3c7883fef319b2 3045022100d4bd97c5b28a06e7028a614dfe179c14e07e39521b5c21d810027fd31d91df2202201e0b4ede1098ba356e0104b8fe6d72554db27f5f1a5fafa083ebd3380419363a 30450221008217702c24e5ce128aaf00ce5cb1ba94298b3a3cd752ca61b6a7fb82d31e12df022037630f796782b1e923b5e56c969e9c5fabdd4d0ec615bd2654354e67b88e5141 30450221009af3a71849d20565b6a3c6c7dca00ee96367471beaa1f922c73d15fc6e8d54b202201f894365148aa1e657f8d643d2182740309637235ab289ebff1d4986cdd4d9c9 3045022100c4fc241a424441f571d90791c3ed3a49eda5b33bebe4413988fcc80009ba009402206560f2ec64102a656eb183a65ff5c0aa22c63869b48f50a1b619320de222e5ff 3046022100fdd8ced5b5fa2211402f1afc2d3f741fa863d061087e373db7700d4f4e946034022100aad2a14872c669b156a01f1485ab8b3dd4b6c9e563a0c7a526f0cc0a75c57986 3045022100a92b8a3f0994878a201a048db320730db9b3717eadbc5dfc330b6595e9c8ccae022006498961031b8770637d00f615dad8c446a9951f2d6d8ccd9eb210f8a77945e8 3045022100b30ef1c6cfa730e6f4aec4c2b58533edf20f33c8f07c3b2336bb0f145ecea81902203e0faeece066567bbdb3057144224f2fcfdcdec2febe7f3b37f1c2ee264386bf 3045022078b3673e4b1097079ece5d9f110eab67dd98a5ee87e051dc2e20c543566348d0022100bc53232ddc8f200c4e9fd938c47f3afe6d06c4d3bc4dadf9714a94eaaf3b9913 304502206f4f03d36edb67283182671fab0b17511e3f9e7b870ea97934f46d41a5724da7022100eb165c51b593b904f1590e8e1791aad8ef44d96317468df3ae53805a9d845db4 3046022100c952cff95c83fb7c06c5fecf6c11d64a50974c2bee422997be2aeb19b90a88ff022100acb184a90c46cfc331d48ed6389371ddad20ea191664ce44d3f3a8b16254d1a4 3045022001accd57f03862bb6cc00c39b95337a73ed4c29fc002c3f131e0e10d43c3e29e022100b4f93c0e61e59873e2683cae04415190f2a97f2fd157826a8229c719dc04e691 3046022100bc6e7b1b6a3bc8e22f622430281691fffb584407b5ccbfdc8aa0cf24fcd752f102210094eab5a1c9f1b35407dcc6edd48b4e93f5f06917dc518c62eb029c6b8202c518 30460221009d4ba10b532c72b5f01a80e0455fc45e7e84af81900230af9649627a5999998702210081b0b41a760998442b5fbcad0a2d744238168da915fc0e20370b724d60b84085 3046022100dfc917e5fb04a2f57d46596ccd00cdb450e6fd4d3e465970d79913b9aaf7f834022100b19404132ccb60cf0b03459949f32f1c42ad4846f667cfb633a978a00d8db214 304502206c4659ca33a5a4d53295347cde21eaffa7778a5e70affa918ba7abbba243b107022100dbc7d2eb9767939b2f36e3872b02a8f9afa765799c50ad01703d50d9c86e62cd 3045022100eff2d88857a14f0fc606fc7cd8051bbe465abb612f1e6ea94099cb8b279d3a1d02205d3c77b20ff35117381f9f1287fa52b5c0e8c3f757e67b947bb19674706b8119 304502202689fb5fd3c8456361a2ca646c1235197e078d73809345813ebb5781a9055ad1022100f9da4d53d42aa103a25a4d8552e4e9692a5cfd37732e7c14264e1be098f37ae0 30440220337906fc8618b7c3c48731f397863da0c4bb49d51d670cee51712652b5eb788a02207fb3ca2dbfac9466a00c16eac294865c82cf99065900d2523b266b7cbd4b8188 3046022100f409cda5faad9cbb836e3b7a3c86c5d841b5148f5803777b422830a49740544b0221008f460c4355a69fa649109c94c706406362817649061aea5101390b26af854426 3045022100ad5ec89fa1b9b2cdcd00e8e5c5b6e0300128d9b49f73893ae7ac2260ea36f427022035baef76dbba352f4c870cf6f9d3884a43d0277c008149d937f0bfcfabc2d80d 3046022100bdfc1a94116f3a43f5a02861b8e80db22beb948283ccf3cb0b58148c8e266fb4022100d31bc1172c14fc678f41b8a2142282a853bfceb5803693a81c7fdc534d60e92d 304502210088122fa119564ce01b80f5f21951df762275bffa0529cc42d78b12864349560a022060aca3c994caf339fe39ffcbc33dabd5ff4a55edd3b47f8f1ead630265f36977 3045022100e1e9e4bdda7d0e1737a55a026cae16fe8c94aadf7c9776fcdb492a6ee501a19b02200da6561c7a2a1acc0b238c1c956149eed2d61e20c64d5eeabc6fa4116113c516 304402202f9c98dd5348f70919a41a399d8eb196de9fd1c4e9fdcba4f5058758d8d8fefb02203c9ee949f366882294cdc9bfdf533a129690bb31e5ddd72409f2e4659f19df34 3044022039122bfb4352dc878fec66bb7f5ef9b2e57aef33d6f7f16c8a0fede167d2e951022079a91444e3c47d9fe06ca1ed7d7e04d505956fe8b39b1e4a39718d3a0b8e884b 3046022100e53c936e37519441bf838be019ff8bca13422603449a581f904df29fb9996663022100fe637722f0d46f4feeaf3c9a06ffdf099056c6b12053ff03e0cdb3dfaf3917a5 3046022100ed1790ff88ef9cd9ce4d0d5b465bbb5a4b4d7ff095924785705e7df2eab87c07022100fcc16e3f4ad2a8c2e6df3ed7174012bfcc454098f9bffdf7d794013ee20fb4ee 3045022100e528256daa9a3781c87b0a3a761d2dcba801539eb63acb53e333e45c86d6974e022058a9b6c27589658458d83ab6a7ba9e7ef5ab55bf0a0ca1f596225f4f21c6da92 3045022100f023a8b8eaeee97f9e1c6e4bc3f2d1c6c58630fd48b953a992095c7ffd2f14b90220699409e3aeb5c7dac6c5dc4dddd18cde0d10719f00ed7061fc6525bdbf6f621b 3046022100ba57c2c1ce319a9c4b501d59a778f8e0eabaa2379d717c6aa19339eee1661d3a022100bb7af567c9f9aa85f3d96993d5af7ec4f990b54ba9c7e71324b8843c2af247ec 30460221008f4431bd72d3e8969dacb715453ccff459f8f63247357d55196fad49d07194a90221009c634488d01c98b95e5802d3f98c5865fe55ac2cd77113579debb767191e121e 3045022100accdedd122a033e86c7ac46f38e640b94772de7797ad7851b09dd1f336dc764302201c8c451213fe8fae7d2e6aedb5b726d2583505a0795b50a62b2a406aead47575 3045022100ae8ee986744e5fbe09c92434cf898b923178f91650fe85da9f19e5ac5999cb48022055dff96f32fd934cbd956d1643027d4c0ae79b7a33f30d314fd6cf4f9498d46c 3046022100fd9e6d51c31dd51b349ca6ccd6ce7516bdcc79821fe54f70470b3efc629949e8022100a10ea416139dcaf5ff2c4da3b7b30a5523634c8e0f34b38e2059d3647133a98f 3046022100ac780e0519cce585653fa9ae33b89da948985c914e9fcc4117571b355b483fa3022100d56652293e2bb69492dbbdeb7768f01141ff306afa5fe3f624d383a3c8d66a91 30460221008eab53d42986e32e065a5837f7fbaa35da5403566b01f4516b8ed338c032ace8022100e2cb5a3a2a79bbce0674f778dda43feab993ae0ca1b00f23a5fa10ac9e7f22a9 3046022100c68840fabf43af31256f14cab7300e2536ca61fe74fbcdc45507df7f9b8e472f022100fa133cfc4ce13914ba626146c7e8a5bd6a753ca90c4e47c24290196d61e8c75f 304502203d3609f7c844790e2ff915cf7694f8841aa497497b520316c96282b512592c27022100a5192065466f9fec1f2150f16d164d7c045b5bec45725dc3a7d8988130cf38db 304402204cfffb456b59dcff948e342bbcb03c8721bc9ed5e54b3c6bbda039295b8d37b60220696b4e2e5cfc620a38370199def234cd1fc38e4a7464cdc53a60ce490689baed 3046022100f46553640679a4d5a7f275172ad2d600785ad28c2c528ef670d2dbc009d97d2e02210086cd9fc08077c07c0849add919d604702782c3b2ffd84bcb53243ac9dca7aeff 3044022069eaaca3c62207be45a03731b3bc9b70d9510edf5fc6d082c70604596cd48e33022040be8b5cc5ff1179ddf3178d77c157930d8c4720d64b293508573f10e5a14890 304402203186b5b768e5894cac3e61fa3f0e20107d71ea1d02f4ff7d302c936a26bfe0f602202317082371bb26ee1d97cb48ea9345f3fc6c9c7e7bdca4589b1af22aaf87b0c8 304502210083157fd333862c8e0a3ea2dfa2e86bfd1c7156fa4b37d5bc522c30d33f9e46d2022009b42d188ab7ba1bd94d8b11d33ec8b066bd94e25fbdd212ee35dc9505a5239d 3045022100a9d308b934170cb2d0b9071826f76d65fbc486146da15df3726eb05f8c3085c902205a68fbf3d28e3433df320bafc1fbd43d0a12036530779f47dc8c1ef4307c5ed7 30450221008a5897a86e21cdc4b98815f85e028721675c4e635897dc91ab3f65c4d24a05a7022006612143da80cdcb6cf7571e53e1ce86a40ec457a35144acaaab77f3a0cab2e4 3046022100a11232bf8e9d8d84f1a62b0d4f67268be77a7d57b8bbe6e641bf82c241e774b10221008c15f5efc1e99cc01a574a89709c3adc93fe497104c35f9586bd503c0779016c 3046022100aebbace503db6efa319f869ce001577fb2ed04eb4da8217a13100e35bb3a6d410221008f72802947926ea1856c7137466e6a2fb0996638942255b9a7ee734876e7a9d2 30440220244ec7450909bbea44c2f477aa68ff6140994768bb27156da0ae2a5ca9c361140220540ea33b0c84e107963d16fdeda58ee17558127bd65a0f3b0a16f7f0e0a580f7 304402202a6a5a5fd36b4ae119cc55cfcff4ad2c1c084557e24ee188205e7774a2f50bb6022048df6e84256f1faa965af03b0a64a27d56e9e8463bfd59107997c5d417606f50 3046022100a40b8d6732c56bcf2fc7d5661d9af8a061aedd2d20ee4668530ae1e0c8e9f1cd022100cab1f2bcf3f132540e4e017150edcc9e616427a830fbdffc59a38829c316f12a 3045022100e9593040e058d255e59b8a3aba3dc2e0ab7452087f32d25bb4088a941fc80b3102204d05f84073229ab1db306ed49a87809ed2824f8c33ce1b45eac560a3c3ff84fd 3044022035445304792f911af2a07238c836611b9e907b92f591329fdbaf1738146fa5e0022019bbcba2f4afc217df811f79ef478ff60ac164712efadbd982fcc297d5860447 3046022100912b0d1cc205c936cf19b8377006a9cfb60c848fd1e229c75ef203ee217f747e022100875a0ff92a2317e973bba1adb440026075fad3bc17962c93a4f1587e4fd86260 3044022042c4d201fb397514896cec169ddeed1cc1ee4e3f5dd3a2b88a6787790e08a41d0220562f23dfa2127308492addc41d9c8211a0e1fef65369612b73852f814257763f 3045022100af83ca0c5309cc21256da16b0c0d8072b6dbd57577d37b3862c84fbe0da5337302201dfb60835723e6a64a7ed107157ab76520d2820cda7e98578fc5357fc3ba4970 3046022100ffa15292f06dc03ebf3dc1c04f87f3637cda0fda2557e805bad14ff54a8abd44022100f20fa716fd348474e7dfa3aaf1644810d230025ad8a3d60ee99b188c0a8fa5dd 30450221008026a98b6de6663876027251d3d55b5737e99aaf33fce958118cc9510510c8e9022077b2baaf311c6546b31c5d002446795b3877098ee1301733eef627d05b41cddb 304402200d66897a805970511528fab2b960f5f9479eec1e2eb46a2696f782fa11e43ec302207b9b568484acadb8fb11a8228d708f30ea7b0ab7ecb121f2ea8b833b7fecd8a2 3045022100e6151fc9b065ef43e8555ef78a4dec1fa2e29a11a5eadff32bc90a6db332983e022026244dfaba05a0f8cd3b52f7be9142fe71558f6207a5ae7bf1967d883f7c0b4d 30450221008654ea7b4e757ae71389ef498f2fa25904a2b154c10745d2af7dfc814e69688802200cf6a55c02c070f22096651878f89fe9b25ddae161e9f1a22ec927f4c94db4ac 30450220627ccdea0c5f9d1625b5fbc1194e523e80ab47760fab22e395c361f89664af33022100bff33b3c661415680b9d4bd05cf2f53852ffbe9eebc44ffa6ec979fa5f11aa08 3046022100a99d27fa9feb96b38f66989dcb9cc5d5851b8d38b9a40fd5d6fe3e3036499c0a022100a27ae5fded3c4c55f88fc84b8679f6109bb5e04cac23a626db1635493a53c93e 30450221009160b4808effd24776be40c83bc3f393b81ec69758abfeb37a11e2dec1798265022029b4efd8d4a2d946326ef627d94be8b384cd0cc56d9b79e11475853bda423a7e 3046022100f4578d45adf70ad634b9f0c081cdb208b1c62b48d9041eb064d3e9fb3f080072022100ae7762dc98042987a4b5d270f7c01109a1c680b846746b5c9516e36478fc9533 3046022100d5c1d08cc20b53809b93a92f7166072c87a463e3c9d8cdd0f8f2e90afd4d2769022100d7a826e5aa52db5cd5d88925f345436b5340974b45b26e0df46d7a586d364098 304402203af023f87b3caa27975866c96844b5f62e7d19f071baa2e93bdd583cd2cbf531022002c1f49f0e67415c2aa41668a0d74cc56d781c3e44a39c849e34e525f879aae6 3045022077aacc860a1918a43da1a46768a7e0ac127e55a7a8a9700b92e6f886b5cafefd022100e62adaf58899b10e2f45d725cd92ff6aa7f777365ded5a6f2cb380281e77443b 304502201977cc4b9b6c4b700d89d2d17512a8e5302fc86132a21013588fde075b4dcd7f022100c227486655c67087e8ae11cedcddb925949af29a3f4f9fed6c97444d747cddfd 30460221008e35dc5c8390c0e55de70112177f138dbbdd34475694bd691706551e2c484d1202210091d3757f7ac676559dfeac3792fdccc548994d22c4e699a29a0003144835ce0a 304502205c5aa1da683598316215436bb0cc73dfbdd6a0d333ce22ec199cb39af7a32043022100eee0c0278eb224e084969bb3cf35e7cf6b510d5cf54f9503b6a8b4cc66f94ba5 304402204d1ae3f82352d01ada182ce251db30b37bf79d410073963de46b4eaf45edd2ef02204aeb8234502ac62b7afd778daf0a5beca1787fd17859e302dea3a823741d3450 30460221009e1a06c09a26751c4e33ed4570a82a6da77b211afcb577181fcf319d77cddbfc022100bfe9d613dfb7fc1475eb0cb88237bdb5718db458d991941808ca23253dc7f935 3045022100de396c5b0534c8af7ef0761a8d0ab1e87a6ddadece657b2aade10505335fb3aa022010feb2bd5f82b7ca792fd26a8d9e743df9eb40b48a754fc0ccf19ee0b3076df1 30440220779cb9f5ccfc80f9fe804b047115634695a13049d20bf205996ec68caf28188002200947aed9fc30a37e76225ca07da330c3e74397c7e540977169253c5fb7717482 3044022006129e8993e21130890da5c923ea28cae62c094da5c345b61086d20439ea66270220083a7e0dfa9200faeb811a9ea18c37aa9d7d2d5c90b1ec4c09828a7436a3deca 304402206075a641033ffec2f4b4feb85a6f963b53229a79f2a30258199b73c2668c41bc02203c17f6ef2f5d23a2871b76a590dae57a8150e3dc1be88233f24afa935851288a 3044022003c88ad607c0299b47043ee08a550b8e4ba85ead748394435c2cf6ea90ea2ac802200d952cf34a4072bfed4fbfe6e8740855853fd913fb097b988f57c230b72bdf1f 304502205bd9a3ef3bd36936a35f1afc70e7e3dd4b863abce30ba579b9cc9f3f6516fc92022100feab2bd7abc229f783720e98f0cf0c60f31b2626dd85b581b294508ec209e537 3044022015efaaa46c616c4849cdc766f1cc8b72b4e5c210a7f81ffa5e7ac3c76bd253f1022048817824f3de58714dca0a78dfd8de6764d9d7f227afc740cd02244d729de459 3046022100e04cb33b71846becded1a80aab9b2eb9bc8fcfcb49470352e61db77fc0f455b8022100e4a7e8fb89a376a2276ac3e74b2b0db399ad09a95905ffab027a7ae127599391 30450220376330891bdb7409e7a1c78013363536f52287427990725929ec7ae378b144d3022100ee79d44f9ccce3e9c4d35317298bdf311582e26604e1b453f3c88a12f40a339b 3046022100ad1eb0196dff813ed687616a3c8fbbf8f6c85eb651fa65feaf47bbcc5798ccd9022100f5baa8306e37035028fd9eccb8bba9968d694f9d5d64eef26f10c3e46b417ae6 3046022100ea3f553c2e98cc8235b3baa26c54eda33af1d6b018182dd7c52bbd2f7fb3002e0221009c52ddd7304efefd5f537149e19bcd1a072218922e57bbca1d62679c8b77dfbe 3046022100c601e85bcad8cee57a819e62cf116ef0febdeaca4e8ed473fa3f21fc75eae9c20221008302f8dee05c507b8942bc80446ecbfe395b8a7a5d35d2877256168cfe017a06 3046022100ff4141f46d7d2553eb4d4dbb71025cd6f043a36c8c3cc59a0530ae9790f70602022100edf1eb5a1c3d911ca8119419fb0b521c92aefb339361563326c399d7eed498a3 3045022100a40a107cdc638863d7b86cceaa8acd87cf2f2cab25122cd11bccc3277dd1adcb0220681bec0f025f29964bf7536be24a81643e8ab8c93b8129b4c130b9f644b4aac9 3046022100c852118168bebf29e4457b68e6a15cb356bcc20e161c3f5fc201e6d496d0c0b3022100e713b4a41de774ad63d1f7b947f75e31111543dd7424619021d20b16146536fa 30450220190b84cf86df44d26c0e19a9ab7c05da2cd1b038cf509d6ddaded7213488ae4e022100a3ece4766696b602b09133cb1ac9b9d6f3caa7816f76824940a78172d256ed7c 3044022045474bb2f840bb95362b9f334a74034ada015b603e86f2963cf3d651c2fafef10220080d5558a1010edebcbd9558e66e38b497d53a88e4f940c4bcb90c1fbd100751 3045022100845e09079b9d35dbd37ecdf775d7bec7efd97a6329817f2b01785685390d41dd022055f7134ad8108fb1936404327a597da02024462b9f1fa673a5b019304ad8b382 3044022076490e25dbc3327dc2e8d5872049650cb4d498951d1e4c45e699e596b2d965b102202f7d2e77b6519b2512eba861c317273befed5c100e82f82dd62abe1881991ab0 3046022100b72e55aa04f9acde367264c6ce34e2f1b494841987a01889bfab7210c257e19a022100b7eb1f027665dc1e22372384a89dcda1f249d634e86614cf8283f800473a37ba 304502206d1632635e6f055369c7eba2be3b1ac0c9bd74daad1a266daf36c19b74cc791e022100d6e366c4ed6b1fdee47b4d0dd1e5ef3f1073997d3d8f7d4efcc0dbb63c694cec 3044022019655d049156a32b308487e93dc8928db590c39634304fc3b652a81eb8941ec6022041acdc5f77c9ac4fe357260144ad11399a46815dfe8faad6cd9ae371fe9ea126 3046022100e2223f6f9090646c1dcf44f097a204f6fbdd852871b1df8d73f4b36af70a7c96022100c03f282160482e9f63ad946e0db4ec8a23bc3beee1a0c49803fe2dbfe5da7327 3045022100e28cc1084727394d57aa6fd8a9cd67beb3f72042ab27acdbc59ad62b93ac569b02203acf3d81583b223c92e3122a049ea541fe59541ba7e8240f17edd530855f4615 30450221009fbe5aa901cf91b04b556de859dd25e9ab8474e659efdb6d7c3f880b87e804b102207f0fc90a1ac681580308c2f8c9a9597dd89f3fdd08d208467895bc50a61fbe50 304402203aa029e2dd057dbfa834a23b26de33d881aecea18d936e4ccf184094d0cb621602204ffeec120eeac7f322d5ce83e00d23145678d8f66788e976703a61f97a33bd2f 3045022100b2c064452d7e459be06b4b29f4f60eb9733485876950ba5411120b9d1c858d8e0220445bfd931c3e54f7e6de9e8bcf9399857059a5ce728db949e2f20d1553fc4542 3045022100ae74ac09d56dca5eb587692c86a69090353c48fa38c248e65dfc740d71470ba602206da13734abfdd27aff6fc77054cfa8037aeda00d3899953e1f72ce60a35bae30 3044022057d1c6eb4fb6a2b128df715eba9b6d93d148c49207de0cd18396ff757224341802201fc8ad0c7d4a952971f61d58918869bade40c480da1275ec10072a1c108278d5 304402203b8d780af33a8d431e596e215626b378d706440620bc59b0dc6d6effae8386bb02204774abd509f5f38b04b1980b3b03d1c8018b6968c9fa060a02ba52e74e505799 3045022073ec4731148ed61d50d034cbfdcd118f8f8862ba036d399c70dda0df757cbd4c02210095f5669266f6bdd72a072aff11a75a4cf6fc721ce7f3fe5b2e6bb849f9e4312b 30450221008fe5299b99a38a77c2203bacaa323f64326324eb70fa3eb3794d55b0ab13173802200c923f32aca6c1736f613fc4368eb4c7d98e60f906b76901058ee05329bea437 3045022031b0ba45564389d3dc91dc3fd1d48901d010f20da81640b90d05e83ab9f87ff20221009f5792c6611f04d9853c522bbec05ea65baaae8583078d0561cd714e77388a71 30460221009e2f4995981f0603627040a5afcbafcf88df95a9078ef3aa6d84dc61593079a6022100a845ea2951ebd885d8d3cb6f90ea2c4bc499b7565888ffe8c54a268171e2b091 3046022100e90e2c19fe94056b15c168b916cbc41bb0aca761b87c3a5c101619cb72f39438022100b9ab5db0039c646eab433b4aebdb40286c01e1fe6f7339140889e45218d7828a 3046022100f63d77d39fc787f3fb6392a924a6cbfa627d313edfb1ca208e25f8f4c9b96a4c022100b0256390706510226179249aab8ca361aadfb43f067e6b1418e2962e821adc74 30450220333173f5ed8dadb4cd0f07b3c03e76e90470f43132ed18006919ea645781c472022100a14e56f945fe1ae362ab4f7d1a0c86e86483da836b77845f91f7b2756c1aa93b 3045022100a21f7ae79667ae9c7d33ca32730bf6093d266f8f9476cb11a4d9ca08a9d8758f02206bf4b5e86bf18a836fa305834e6f61724f049a77906614fee83027cb93912607 30440220512f2655f345d49f06855a0b1ec66fec98b0533bd51b627e931a09cca4952c84022032cf09487420af497f16f1235e59a1b8ce6ed8e6ee2022058c1e786521682b3f 3045022100a0ccfa5f4428aec765cd0c34d0b28a1d82a969149a8965e96573e37a678da73402202face37b621576eebd6fc45b3798369c47bb311f83f88f4e4eef9db8133948d2 304602210081764e0e9634d4a07c91aec74cba9ef270bd97e7e2cc2cf640a7a4937196be8f022100e251dde8fd944aa59413fea9722f4ccfe572a73fc6c3be2ade0cf6bbdcb808fa 304502202339b7a370234a05941b78507a0048d74ccbb3b166f2e58c55b4b664908a464f022100d6a83e863760fa51cf7d2053f245ee21de9458ff4b81f29fb980229e72beb398 3045022100930e0aec8fd1027679f92dc25c79ae21fb52514196e3b20f69143aae4434152c022053540fb6a1ca3b160f325bc20eacd5eadebd0f80415c23a2318c06d244513a8e 30440220472a7f944de35d1fd54b00101799d91a60a8d95cf5c648e13e6dc2db070bfc89022025cc2f8cbf4ebcdbf4631da6c9d6c591a945ec42a8d89c9b7061a43cca16aff2 3046022100864575022754c0395658cdbbd86da637029a7f588ce7aa50de6ae08f778d2766022100dad4a973a8295ee16228e77d1adfa8ff27ef501991ec332c94e0502c207103da 304402201a76888aee85f3a9215994730f678ce5a985916f41a9d5e3c588c78754eeb460022004059c0dc539a6befb617b37638b28d574ea102b7b337fdeba31c3ff0ae9e2f9 3045022100fb8ae5a768c742c6b8b9b6bb8b916b4355885bcf14fa4ef886a9fddc810389b0022005a70fc32665b3801cda508ea412ab66cacfe8a527688236c9bf2d8700ff5979 304502210091708679d3d24aec9217d17f0fe0af7f0a4c358165a6b044aca58278fc43bc67022041d512ec3b685c4f8013cf7dacf521c6c25c8a0ff336b50a677b43f67903718f 3045022100eac6a645256ff9cd48c1502e1554e4bd5a168689df67fc4bf0f2a296beb99ea3022023b1ca316fe0aa6041b140657af15e41daf3bad84cdcea9236a21163f894a30d 30440220605c672b4c32a1cd77ded86ceafc09fbab78b45a2750c2c0a13c656bf8e14ac102203e71e5f87a5e45397ea6a0357c2f573c1d28a88921e81e29247aff1560139633 304402202ece1b1e2441e3a02cf66d3dfb81f8344f02552e0b1adc649d8c3859b485a36102206eba5f46163a75134435ca6c1b7fce310cf88ad3cc37610f0903e8406a69ef2d 3044022070018131dd0d7546e0abc5169e69b012839b31bfd3c551c33b9dde3889a093c302206697af517944bf43d7901f778a541d093398cc89e0fcb21682abf5bae65eea71 3045022100e2f11193e6bf89c5478657a831abadbc2fb17342ab5c612e0942a09d721e952b0220512d66b8bdebbe1eafeca164c5d5264410440bce258902e1ed4b374e7ccbc203 304502205dad48955ae06691b29cfbcbb0872b2ad64cbe45d28d845f0a80f73af1a2fd5b022100d4bd47bae14064033984d79bb1d3bbb999e07cc3df64de5b6e561835a925854f 30460221008f48b263e8827254386714da0e85fb04392398ef7d3ff5b9b3e385a6336e9585022100be80baf29056f85ee9b556e996e6ba125dc840669cdc4dfb67c6c2ae72f08d3b 3046022100a64fc4aeebfded26ad82572c4a7e14e4c043edcc7f259703751e46dbecb0e03e0221008c371f13c0ac7970875a385ab84144eb95604057b414534e904664eb1a6782a1 30450221008d5545c1184fe591e1a5f99769dd8e73997a52573a01ec28db17e217766fa09c022033f099997cc69347723795001530f59aca217f2a0e483753bbd7d514666ff139 3045022045df10f91bb4bcce7d08df03e47db2e0f829da3d5e51666ac09d20c12c8e115c0221009062318c637035d73b7442f1e0a2503225a5a28a54128248316b3670bd4f5275 304402203913f17080be5b8a79f25e30cd0c91e54ede3b927a1d29c1b1a6daa77e9949d7022056ef5aa014431fa8e78efa8b727c89deb7d03e526fe7e0693c460f34e708d223 30450220134ff4f78a3ed41459afa4704e81d7c0d281f7fdfcb909c9ea8c805d75224320022100cfc88e886ac155d14b508eeaa960353d5210d9353a7d4b41a11016f38899be7b 3045022028fdb409aca63f6b221014ece7904ab68e394b897cee6f964e7f96e62071821e022100c8794b5d73ea496d3323330d2778fa5a0723213becea015c73f2ac49573b1cdd 3044022049ea7e8cc3824433932f75bb70b224ef13fb73c5ce9d5f1b936355936f6d98b40220694d4f0019fb3141a107ee23f1a2797754929118b5eadfbc4986661a1307d8f8 3046022100ba667068feee3c7d859f6e31bdf4c9a907824eae553732aa4cf40c274d588d8802210096f0faffc016e4cc1bcc03387a16bd295da403966a6b72d6e9ad413e5439d25a 3046022100e606dbdbb36f1a0da984d028dbbffb399bb7c4a07d3c705e42020d22691b0e9e0221009b6df9c47c6a142d4c7a8299abbc27e69293f46bfc1a7f998a3be10f5a824a1f 304402204ce6d66bbd7d47214f96302db3c6bb6b0273615652b806ee59aebfb1bb903429022059b3c1d5b1aa4922b22efa709e3a08190520272bbf7a29b1c3131b86a8ed87e5 3045022100c1ddde085270d7a72d26d587e55634c4dc7ee20e873c103c72c47e8fddbe273902206d1ae32d4b5699c69eeb63bcc6597909d90883f1c70c4f7e1c976acda88246a6 30450220054fcd9eb6747114d7063e009ea500f77f203f5c966cc84eefac3832c298a057022100cbfa65c6c159a2a70b2b695d6b19244424c8c1833c4e1e44f384076c02fab16b 3046022100846ed93487f3eb05bd616cc9d02e8a596bc9c1de480055bce3ded35f473e40d1022100a0dba34912286c9a84ce41e5364e731d265907f3d19c043007ef0c01292d00f4 304602210088068c7c0c298aecaba8d8f9157e785b29b50774b9bc21e8a3a07b6e1143dc52022100e42438e873a9cee095711979a4e79ba80db7d635ce614671ed28528c2d1028c4 304402206ec901484369c055c0221397a370470caf844819c8bceb4bd747e23e8236ebb802207578f0328660122aeb8245b454e47221a4403e5c8403a0c46503c96356bd09db 3045022100fa7323ac53ce055ec34a3e86f65b07533118b855c4f88d0fc692058522a3bffb022058279115280bcb5f29b6d843a9e6cf26e4bc6f1edb7e6b94ca2247e4ac00e2a0 304502207f59091dabf68dcf5a08962609a3691651ae5f0d1a8e4f5ad4eeb569e21ed24f022100b162ea9158aba59b82d06e8eae545c710d0d3d7bcca313da6e15354bbd75f1c9 30450220695bc15ba9ea41ac724382b93f19012439136222b6d13325b877d923f8558074022100db5f4b97ce308ff36a4ed984a763440a5dfca88f8ce5e13bfda56aa589177cd0 30440220760174dbe1093c1ce908a0692ab17d83496f02f1cd740c34091f5f8a4cca16a402207a8f7addea095c1d5e1e40269ecc7fd4fccd6d3f66387697c42890a99cd39a32 3045022100a0041602578dcad8392eead90858936e49a58023772c9afb103b434efa9031050220164d1c65cf5879db9438b769cfc23d69c061a6a2445c5ce4b5c5ec9ab751218c 3046022100e60ff8dfa39dfe2ea6bdd99de076a088fd4c91bf6e2ccf1ddc6d2c98e920881a022100f8a8f7a3d1acb62d1cb4094b70edd2bd68849ecfc9ba397d9ae3715b625aa9f4 3046022100c03f08e2adbcb966c701ac593ee351ed818217781e4bd8f1b87ca1fff7dd8c300221008043b7a934c9dd3b39da1391adde08a28a7de2fb7c2ad254ef604494f2e10ab4 3046022100aaae8bb9b9d804295d3a93f6a6a0c87bf51da0b7e139c3d94c774a73a5878b01022100cfbd10725163cd5cecc745b72f68fcec35825c9ced5aee835faad973f39d7ad1 3046022100908254502a6a7aacca842e68f85be69a18dac6c742da6ee4d586f28c10dfebc1022100dbef87e347b7eefb214251e6e4c058844084cbf2c4826b9cd7827d0babc28da3 304602210083d6251874933ae67ce95df6e11edffb1ca30b3130c2b1525cb1e28aeecda25702210081253292ad3b98a4e6d9934d1d0d8968b45d05554b4271625903cddaf4d9140c 3045022046d5df43432939abc20fd2ea6b47b8de25192c7b5c5c1d933e7415d6f66bc4c1022100e26410139ea2393c8522d21af7b244278eec29deb3ef1e80fbc94679132028ba 30460221009e3bdcaaadbad022fc247b2cbffec540df0db154cdb6f618f5d97415b01dda73022100fbf96c2140f53051ffb8eab69a859d954a9d549b1db185d77868add7cb6ba344 30460221008623cfd5b003dd2780423d9226930fbbcc484d7d554a232d24d5e95e56c88864022100ee233e056997a5e14b378f4ad3abac2d7d4e0abd976a37f2f150d3e14aa2558b 3046022100eef667fcab241f2f9c63e9b6c3b267644ebefea67fedd235dae24c5c203e7e88022100a6b2d3db0f1a179767d4a37607c939d646bfe19e9b2ff9c37d84d1bb029f562a 3045022100a71ccafe0d3ae1b98077cf0cbf8db7510ae41e1516ee3c4cbb552a6f1ecf5ab302203d9f04a78d3d2bd6385eae3e1fc11529a529258a3af78193ba32f2692d9b98ad 3046022100d72348fdc20d863c41f140151249f19ba66bae82f12033bbcea090c8214456ff022100b0410639b5091a96f9b00ae489673dbb132ba143ab947b5f8e3be79e718c390f 304502204a4080a7a38c2d70fc7a1848e3a3ca7c3b2555d3e94b2f8d96411fb572048c650221008cf2b2f851ba0a8d7d555ff65bd819f415e0ae25db00602788d3a5b580baead1 304502200e8d95731c0d52d439af616386625dad780f9f40c9354f25d0f43f4a5cc6d46c022100f3fda6de46e1efb0ea109ea830c5519000636fb8a091773c234864e55400de09 304402203fbc398127aef506a74cbe7cac98999c45b9a4b05069b7f4087d23cd5c0064ba022011efd057379143dec76ccc4da3987e03ddf0068f213b1af5712e9c44a48b953a 3046022100e1096829673842e479805182bce6f87fef13c92e40ccfa408b400199c05cb065022100bd901630a1805a0bd418a38d3a4ebb2929cc2aa79dba7412f65f85033720aa93 304602210089ec2640cd160d01b270c96c17be5b52e2da15818e69b29bbd63b4f97f42c24a022100ca0a1369b4724b470350be06f20f52f65fcdba2858eeaf49650fa5d356e052a5 304502200cf361befcc5f423ce385539d5fe0184227651789ece54b0c2a8ce7a0064bb8d022100e27583ee8cc48e9c741d8a1ecf6cf6678f7e3678240a3ba31eecb66af6f35047 3044022046500a0f99aa0701863c0a8b41c08c3757684101d40e30602e4e3e26526df47e0220775917c31f3163534ffb1831a3df81a8013258cbd46e7584555b0592059d2da3 304402206787642835b55c4baf715fb500fab53dd11c9cd7e9f107bcc6cd9d3d8b9df06d02207608b0ca4da447e26908f637cba874cce650444a03df03bc57e74fc1af267e52 3045022031d96712b4deaf7a636b9afcd51efe43b2e20fea225a1b45b20e4299ad546cd4022100b0181ee9088eb209942e8415de12265025e75dbed0afe3691b78146679d1ee4d 3046022100e08472f8d1f177598e765f442bc3c14a4cdb484ecad60d2ebe71a4bf9c8e015b022100822be45ffe69bf045b1368f6f64528a02764a1754113f7249bf0414662df8682 3045022100d594e59e541d3a9f3f9aa98c01f23e6757118c56921b7d3559f584ac38a5fd74022040f8a721382e46b2355572f6dabd41d150d7cfeb3d766b0bc610017b14abe89a 304502210089749c70d995a32da6c7ce7cf89e6f42a001353841a71d2611e11da9a7e135dc02204a6fa639a1e07e9c1c732e2f93aac56192eda248c3e3c24da84186d265c1cbab 30460221009eb5c653e314da9c419055cf0a9960ec52b7dc0f0505cc36f2857a58c3e886240221009ccab220fd49be5a8d1579481b8e7d311cdd3baca3f76b1bfcfb60684914ee50 3045022100b3bb7beeb43a6927bf78620379f2b0f9293aea0bb5c81ec94e0ca69095c2bc7f022059dcff11566fd611e11afc74ecc9a1199c2a9715e30f82a441135dd5484be404 3045022056a47c7851e2f9646a189b2356df19eb82003e23e8ab982c176846ce307eed9c022100fd9cdcbcbcb6e4cec056330c64b3800a3f09004c98fe4ae989607e56d79d5903 3044022059302b12b10e84dfe2ad068a056e1257aaa703598c2dcd530f2798e7bf2478e9022019e870624996483c643abbc5d3cfbb30e4daf9f4ffc9ef41c86bbbb355eeff86 3045022038b077900f10142d773bb143ff0c879daadf30d711061ee40180de77e03e01fb022100f54c7231a496a6103b7181c0e76ffd577c21669d509462b15404fba9629ed8b1 3045022100cb4711ba8fb77960299e33c6552eaf6274411c58a5b8e0f5106223824152ed970220617d7f41c0d19fbdead7f0b34f4344d3167d7bf481f71cb3b44458f8724337b0 30440220096108391c48bec5d9963d9c5fae21a44bec37f28a9ecdcc652275534a93dab802201d81a497f4a3f0232a1600e07c36cab91ef0482ddb75f665c02b70a30af9e0de 3045022100c2b0723460de3b005f9be706e07e5a0822f1eb161b042e48c65c61d9dc4deace022031eeaa5c09155ee1c532d8ffd6125d3bc9ed7b1a62ef062d21da1a04f8e6f439 30430220438b9ae2506ec2476df0453f0198e174645ae71d07c7bf1f67033dce28e169cd021f19c55300c64fc27ee09508750799d98af3745c1e88286eb39c70bc14d44200 3045022024dee601a01f4d9556bcfbfaf5ff8854a92d32e76273146d9d897b9178f82ee0022100bfe8f5585887772e1446acc57c442de3131ad0207c78a2be8432de9c03d1af38 3045022100d14ef355b37d96902069417651fe0c9d95d5d705f04ab92b726fb6ec01a00f74022026bf2bb8f3fdadfeed0c6848689cb45bfe5450e0c8f83dc9a904b3fddf7ae2be 304402202f3436834caf87e00510fc73604d335a192e0792b57d7b2d2786c463011457c902203aa4e96d614166c981e245be2957d744a3cbf010533b91a424ac5c81da3a5b31 3046022100ed14a6b4cacba8cbaf25bc5efd34374c476a11edbe34066383d581aefa6ee7f20221008daad2fe6814a020a9d94f54be69668a19ae4e5afc3954167379d0dcb6904277 3046022100ff75c9166ba763f0d9c41fa155f75eb8b62bd09c57a9292e96ee8bc23a303ed0022100a86eb8d11ac6dc4bdab90870018b6e3ee79a1487b000c80331c21e603e1818fd 3045022100c82040d1dc16eb9481a4329fd2243ec913c318fdd88405f3f78507d297fe94420220319d8cf3d571fec0a509d9ac6fadd856c122feda813be815d22f32fa1a6a3a8c 304502207360f8d746028f7a6e57bb7d7852ca3b89675d13341cce183e9a4ead60a3897f02210090ca62c5a0cc8b9a0aa3e4372f84aff90eb1fb0e9a9c2534ba9747232ce163c1 304502200bee4f9ef7eb413d4c14635b64e862376b07e9ae0da2422b4a722f8dcd980651022100aafb21bd0f310887d9a4367b116e140ae15004ff6831ba03841ebaedd50177a2 3045022044d5f5c56b4f5755a386ee3f5dee705225868cea61b676a05d8a7cebc6a3a163022100c66c9d73ed0becee29a0fe09e47f1eea3c8f4596d0512782e1e20d516c7362dc 3045022100cbe776f806749d12399b4c005e26bf4eac66729766a18b04e98de96a7a3a5a5702202825f6b5e4b1bb5cdf0668c5c68eea4bed6cefc547b19e72c92145b659f98e09 3045022100a021b91747387253c92825efe162c3c9facd2c488b6929e8b25babb3a222213e022033b1d6e24d9846a8191be488ae6a0586bb93b40b0c6c081912cef3b53b93c4f9 304402207d6d961ba1877b3eabe7a99d7785d87a8e902b524d6004ba1a3dbc2e0963ae6302207f510d141da3ade75d76b11553fa53d138bcf6cb634e30cdbcc9204c6838fd85 3046022100de9c75aaf77babc518127a52c8090bd214a6d020d8c7a4b0d62912f804050646022100be1d620b3fb678be6334148af47f1723917c04bc79126411b56075b27f96ccfd 3045022050626d39d0c9ee995e2431248f4232fd44ec00fa46854170090f171622b21949022100928f61413365ab7e4b29373a5c9ea10521164a06fc54d612ebd4f9df4c88639c 3046022100e01f7667394f5890e8c1b5bf9a2b01b5ae3424ee457c0ac988448013feed4444022100d725d3450af51a5c0e856eb1d00cad30fafd4f9ac03041e840114f067f6adeb2 3045022075ba0b0a3ecce17cd311d6ab151f3cee67637fc6174478099122502f6b2349ff022100897f3e64cd685b3c8c971c117fabcc75b5b4810d77cd16df9d6bd9404cd2d574 3046022100d7b7be3febf6ce659b3d56ca825c3112c03e5203ab4e3f0c439eee6434c367bc02210091c77936ad00ea0f673a5238dc10f553a2d769cc8a9af7f24ac3a683079a36cf 304602210094dc722295ac37a095d29b3017d6e91e8ba5194325a769546cd5d3a5c4c67eb5022100e2c10d39d8d1b49c170725c0636dd591f7deb8bfb68ef7dc44aa806778a0f4aa 3045022100d0e89dcd9fc847e00ed1dcd2249bdb46b46416f7fd2318b5f3d44a67d0e5bd77022058c966573cd4e97dcb39d12feeb4cc09ffd37238c24bd21838ef1ea1a7a88f66 304502210083db7336973895fa0b20686a775582b3710e6230283a958cca55a8268fa68f82022003f3c3f0a2a47bfe6beb4475535396d1dd1bd5084012ac706a47335eeb39aa40 304502200a31ce5cffcbb3185ba1ac7536f6a779139e663d30df51236e8956519e37bef2022100de08aa517b2f495e8352e56db8165952b6ab6931c9041b355cf6d000d87f0293 3044022020928289187b017c253a50410b830558048b84a504f1a894f312249dcfaf9b5b0220519c37cacb9796144808445e0efc2783d580e05f5683e10893c75d5c57b40844 30450220757db20f6d6068bfc2761953df21108bf07e6b2309d60e176aad2911eda13c22022100e8b9e2d34403f798f9e90d0842bcdb85629fa20f5d681b6edc8278bd633117af 304402201b827307ccd83883c3e1bb5ed62d63ac8dc14c9707215adb93a1ee66ede5e33902204ac2de9fb623002d70061d95a9b78fa2344691d61a7ef6a8ce86cd1892ac56c3 3046022100b8056fbc57d7c600aeddd337989df40c4658c5abfc2ca042ad091caf58acf59d022100e7b3ae33cec77fa0beca95a697817d1d16f86e52dcb9e5f075d6f11ade2ff722 3046022100dc2966603e08211d1f484b6c5006ff221113d450ae8c80f50dfb2fb875bdb472022100b60e4b50f9c47d1aadded082a455ba020e4c587d04133909438bf2a7612ffc1e 304502210085c047d101cb47344941933627647bc46f8f9a2c101543bded5b109fdf299eb0022029780cd7960bc518cc0fd93ecd3f848fe458930a00265fc0e2503d476101c041 304402206e0f1bb66cf2c6e456a58cdcf17345ee703a87f4a2b7e2d397a7818eeb808963022019e88e1b3dbb4e43fe7aabb86eae3f08190c0c0ec4c2a07d61f58945fa2ab91b 30440220063a53e3e4104595c76e816036ae90935329fe3ea687994a6dce6bf708a78c93022003a877bc31d97f53f36fe85aa8ea4536df7d83216ced7f2735c4622dcc5e114f 3044022044100386ccd183d40ba525e46201c4d5c737dfbcb19eac447e5bbb1ab898414502207fea3dcffaed9654212ba95b7e9ad32ea8a158f8e4d4aea02f162560099c31ca 30450220229375fcbdd7b0600f785d2392f8b4a72959a7a5c22ae5711bb301e08a180e78022100f16077f26c3fe750b555fd5a3dbd0a044308c509045eca698c01322081279343 30440220325f36df1586bf67ca81dc8226b6e17027c2aaf2823048724ffd6291de5cd0cf02202b3101afa768d1887136449a6f34e7c424a6eb7ceffff0860c5f47743ab763a2 30450221008c1290210c94a98e5d3b884d3ff3ce989e1e4b2a6e3f57a61d5069edf05f831002200568f566f5e8624a6abed546f57855b8517f15e9d638204b98c18a4408d6c1b8 304402200aa8e58fdc68d7de3c1b1c5e14ade5c16dc156b37c6852dd384ce22509c95826022068ea4d90f217c0a40c1675beb1b0486c9655ef8c335ccd5e92a34841aa46b75e 3045022100b5d0c9c7420fb2c9f0d36f8c918abeb23e569028a79e489d674235fec1e32925022025fbaf1458c7cbcd8ec60e45deff7a06dbcc72727956a3f4cbef6c7744672e68 30460221008a755d831ba7bc5e9c85c2e5f13fda6933607e210ea80d90000ca13513cc0e0a022100e98a3bc27bfc37a0fa089fe3aab2b01978c1c1b97a5fe56e30c7481e1a7f080a 3045022100ed826f82eec768a3054226e11a9b588c8e95f4f2f9f1172b7f832bad5df4675502203d49fee2c4939f959950b4150f6cf021d5025e5ae4f5b63216e4172a0768a0b2 3046022100e1d92dd518fa706857d623161a67b855e6c13fed768af59551091b291d1d5d79022100e2616079da107f89ce0980f82362db8331f6f2734288cf372a8865946d97a35f 304502204bdaf87a0fc5482615ea56267c8d0dea083d3da37b9a53454ed46fb01e337eda022100b8155f46875f1e81152c97c709f91a3c108c528ac2d0437a5f2755b3c4e32a46 3044022024c956e03abb182195c986d2cdb8fc98bd2c480ab984ca3a08f4289f404ebedd02207b0bdeb756868b3b4bbc39293c37659390bd48506608414f949e497743444e91 3045022100ea72bd2b231d5f8cc921c033190b5e5ce94c772282ece4b1d9cac793db1660e802204373d31865408b60b66ed95e4b966a3e6d6a53a69980bc92cf09eebfb437ca32 30440220234c28767a8f0464b59145cb7396fdc1992247c660911cc88e4d927d240b54a1022075ecc4c160f9eb01028d31b171b822f468b9048f06ef03477bec5e0d1a76c4b2 30450221008e256b796f6a984c8086afa76eb9c039180791d645a6a7265da561e706db343502207d012c7a938425cfc55e13c0889cb61790b56f2c5b3508eaf00fb3846119ddaf 3046022100a86e2d298f553b9373af02ba1780680efeb7d6e16f52e383d4bbea288075375a02210096b64a4d42e1d58d523b2e93e943b379d08d75ab47a2eedb3ed96dfe03073530 30450221008b6097a24127337bfcc929618015e6b8107ebf944165250adeb624bb825236fd0220571d3c628a6ad58f4d14ecc73dc6314bb3659d5563ccfa500d596c351857fa99 30440220410ef6617a37c6c46ea98a902e8bd4f3a0dd87f24d3bc6621b5b556458b3095402205caec0bfeff3f127d897aff926bddeaa190c871835901593d622759e4cb51355 30450220592171be9b2364012702c927ab9a3b2e6a4330cd5c95153b638602327f9151ac0221008eeed1afd678a5bd1901b7a9ae0bc25b220fb0f604cfe2aaf33ee71c14415a37 304502210092d05f039ca4b0be11a05f00511b52a764efb2b437d4bdde32a4db4fcff65fb7022034903f672786e72ffa6e10df643cf6d417f6e0045d4f8cb591eceb501f8fd3e3 3045022100db52a224f5f2f0760f8b6d90704a442a9d3033b4295544ca0cb1978b3c7b0f0e022041dddfa53481bad3409efd2d61dc255aff3790b05228257762f5f85bffa73bf6 304502207a328ba62d377742abe542f9ded0d38226adcc8fa17c36e7658d20d771edb1ea022100c67384483368d20063842bd36d7eb2335d27d6ff0930700f6c103438b4a63708 304602210084433ff4612d6a3312726638dd05137cf9ae0dc0ea869e5dcefc57447f563533022100d4780c211d744af945ceff56f312c66641903fb37cd761bc7e289d949ddba048 304502201b387e31d0bd9d470fcaeb761081530bf00fc619753802c7b5ebf60fd5b21f6c0221008560859abac643af98def0fc34e13705782ee2b550c32933ac0bd17d22e2522d 304502203081b1800268e1c56bc9f9f71e5f6f70605bc6cedce0eadfd4eb6452e497ddc1022100ff5fe99502d305950577262504eda9fc77b5242fc37bbd1779ca280306a9148a 3046022100d5eaf48d7b615f40c0315e4d9bbc6a9531d4b1d72714136198c6e78a5881b77b022100b604e1b78c5df3224fa6dc4fd0bca37f205232fc245d6d5ac01e2c1d117bc324 3046022100c4c056c45714a67faac3c4dbfe21f5c382513fe4bc6e772a9a267b9917cace970221009d362af91cea4ae1c4d59c80edf432f15ab156b00e7a0f4bb2a0ada3ff7b82f4 304602210083a6e9986d94bf084dd1981c171577dc28a0516d0fe7876a90e5dd32d3e6e5eb022100ef5e8cb9c8e70ab42053583b603ce89e89e53039cad6cc5e1d88e4cd6cd571e5 304602210096dffdb429bb3a1df90e7d2ba3d6712aa5c6ab8b30f8dd3156bfaf504451d0cf0221008915be87eaa3c9db1835fcfbd6254a6f7fabad9560b31f7ba69d13a0e639f2ce 304502200427b3ac3c7849df7b9f7b52c335d5cb007fde04955e5759cc35dc85aa75bebe022100dbe81576825fe8ad5cfebf223c700b0cbe30dffc8aacae0e8f8a8d3ce329197c 3045022100ce56ce0da7ef72e5f226661e15f849fcd7501b292a1f61854fe24e8ba316cc3902202cc5bc8862cd4e74cbb4778ac79036228c7126d3b3718f81df5cc51da1c5e6a4 304402204beb6c8839639402177288e0e9dc2809b1fe656b6a222fcb5aed1256d267261f022053fd5f6d209c4f5e991887784beb5b52df5b9174085209a45c6d06c0763a3cc9 304602210097aa4fc3750ee7fb9180fee42a0bfd59dcaa1bdce83225d9b660c5aa1a433b73022100857da99a3fcba8876db91513767775f40a255c198e10edb919e7f30a45405b3a 30460221008a9b6883cee1f5e6fc16e150b0b80b482a6cdd983bad0fcb687577839c2ed4cd022100c763be3844eaf9ce44c760ed833867f75606c9cc38c5c7e5f8d851ce95807448 3046022100c3c6202dd5a698fbb42141735a528097360a6070c4bd1d15d595ca7308a3fcdf022100b0370c3e588dab4ef6dd70c73101bcc60b7b9505ebcf2cbbdce81e31a3c7b8b3 3045022100d2da9023ff74bdea23a834f4e8389f8a4d15cef91e3d455fa2ab658cbff276750220544dd476197244315e831cf13a99a6814a1116609478c772020d0635c46cfdf5 3045022076e2f193a00b8b1a1319249365baf2e036f8aa805f78eea1baeecc59fcf66a660221008a504d89a490e06025a2b6a5891850116458de530cc21e9b621a229493b4aea2 3046022100bcd653955fb9af2d0e3fe5a443d664e51fc0964778ebf445ab20c56e5939f6e5022100b80a8765d32541f1dc87005d5f7a44f85a43ef66ca7cb7a26094768ff882ffbe 3045022072e131d18a3b8f942ad9025d236dcf6e4c8d3d616f2d78029b1a590a26aebc74022100a1430fc44598f347c0ced7ba2ac0c1427a92b32e03dda5639eb8a3b6488fd0dd 3045022078ab9e7e7f92daca53bdacbe994e1220466c12e2a14f7b812e6244ea5a13c041022100fd78524d80ee10e1870289b1156ad85be6c7c1ccf1779f3391467aee09091367 3046022100894691bc7c35dc9c99af5f490dd89eb8c045d198f90eeb31942486faa00158ae022100805c3697e7f7be638c6d993871ddcf9cfefc76c4b71df050d70c66193cca7725 30450220561c618cef8f20b31ab374cb2986f5b9cbbba6f9dfc467ee5f63446640d1e185022100956b54c388b89aec1f82c58d452ad9bd450803d1d30d9e0fdb3aa1ae3a282568 3045022072de21aa72d0b615ff84ad17cc66813989bae0dcb69b05c29c522f8545b95f1d022100edcaca923c2c2b0f5a140b65a96180404e1427ea99787f3853e4db125ea5487f 3046022100ae67b88b0d2819c3e8511714661fd3569975e3a4f3793a0b1e994cd9a922414e022100a57689f938edfa43d0c51ef10f3eb7f044768218ab4dba514e3b540143a6c090 3046022100c387983071b81bb18f986a1edafdc65d76b2cc7a9df514f9fe414230b5fd8c2d022100b68d8d0ce58af1b7432cc372daa43df55cda2c11a2e364f3c97c4cc2ebe276e5 3045022076151467bb47b9beebb93c652141df3e3cf7ef1a54919bd73d5cc32dc0b0879f022100d88be71a1bef4b63f9ee9c42ae5e878105bd9324d59457ec5166e77e6ed25e4b 3045022100f514a087acc53d786a6bf3a746843896a4d3a2f66111b59cad38264355e208a702205dc0099a435f51e645d39cd6fff01faf86e4190d1b5515b16f24d04f7111d911 3045022077f254c18b5da0cf85ef7064c4a74c4e69b6347c97fdb533c404ed78bbdf94020221009ca21212def0ff548f291799746ff86873ab6153a90a8919f71e2a06f8280b96 3044022046ed0f334b51f1ee6c46a1135bb40c2396865158962a0f53c7e8ace4aa050f5a02207d537327322e96c5421d9028305d80f5515b5b7eedbd20b5dc9b9d10f74a4b41 3046022100c15633616275ba7fa5f2ec1e6392311281328d9970155f06a68c410eeb232062022100ca628919b8328b16df48f89393d61ea44f664ee2f3743520cfddde30365bb335 3046022100ac7e39e54b117a45a16a846665d6f3cf5385c35dc4d929011aa40cb287ad845f02210081c997d297875b6b10a55759856a18940e4d819c0d23769481afac0d5c0743ae 3046022100c6e21a76af00dc9bfb7c3f4212908d5b40edaddda5816f712e29919a410fc915022100fcb9b31b589a58b0348d566cf5214555b8317913d72d93c2e9899d406305440c 3045022011bcf426849be12cfb6edc10c64716070f3ff7d3a78de2a8a5a632b2e294d730022100ac11dd43d3207fd2fd21deecf2eceaa19d444c212428022a7a8fec8d7f58fce5 3046022100df3579ee1dcafca04dc0d9ae0bcc831643042a70f3d8922168ec750f56a7891d022100bb9d1e062bb017e356179c3be75cd453b02caabb5fdeef7a28a8c91bb33306c8 304402203aab41f4b83949f3b7bbf419c27b0839cd22a691e558e3ec1ee368ecbf302ff102203e33771c6b232e29c7863e13b9d47db83e55fb3167da8cbb3e0853b6f71ec894 3046022100d3f1753a973dc9dd821ff4bbf3ddc86a0d8cf99b5d578c7bde5db25dcc369f1a022100bb48418ff277c1f11c590aff31656ae6b743a07d9a1bde8a15259e6abc267ff3 3046022100d13e21cc6480e56764f45c53fbe98312835d1e78edb512f8e3fee7fac3f90e8b022100aab3df4384757339b1d6ced33b3f1c23ba86a980f40dcd23d0d9585652c7f091 30450221008b3e34d12f01b1ba4f85d47a7eea4a6cd16f5d7c95e4bf4c5ecde3ae15d25f0e02203e3e18f1f15b35df7124fe4e3c5f477de6cd108ccbb46a15f6b4329d2f7589f1 3045022070ac3733a08f8462bfb99606d9bb0847cff583cc1df4bf48d4a8d36106889a51022100edf5095fa25dc53d5bea724f92c5c0d3185390696ddfd6b98455dfedf6371879 3045022100d64d040f53ac41cd3e5fc1e0c38ff9726809508aa21f31c8fe7dec9519ec79f70220337c5da85a75c1b2c1c36fb0a6e9ac6985b6e4892495a9b287bd30216a0acf99 3045022100a3c276208810594e3ad25ef02cfda31448187117ba00adb2a160094019fb13c90220405de9d994216fe7cf1b9c897a159011ada0746b683d0267d8bf46fb016b9359 30460221008996f9aea7b63f07846ca14f8bdf555152adfb314e435d1b7c69d8a465bffd58022100a07815b06319f1284856b700d66ae35e96c34ec5955dde31728d556c6801243d 304402203833939bb711e4b71d3a9bb274ed8bb73c9c0e3954890872ccbf223b2458bc41022002ef07ae5b08f7f8472f7c2e4e96fd2f3bc8648e8efcdfd6342ec4e9babcfda8 3046022100ded75ef2152557dedd14613ab7908044745e7caa4694238a5ca5a67bf97ce8e0022100e561e3c51ebff10f879f6cd7037a8246c7c66207f04b25595d12deffd965d8bd 3046022100e521c83032cc0313ce627065ceefa09fb3995f07e46ac52f53e5c70f07d3595e022100bad79425670ed2d997ac17b7e79e474b128402c6a80f5afbff375e897fc76551 3045022100aefcb323aa0f862b53addc22e8c1fb26c84ce28a5ba566e28b1578d879e0218602200dd359a1e63613395a1e1473026941dde86406e5ca0031340cb6ab99e1c634a8 3045022100fb63bf9e99ce831adfbd7c9f86a29f99496be5652a7d471c55731f943d91ad1802200685033a2666ccd28327fac111abbb49f6b5b0cdb11e66db3e4f63b38c18e230 304402202d64c7a2ad8a79a89beb7d96f11323c5a83e915235c588a7c4e40a37ff81a58e02202eb6edb38628e88f8291497e33a3f5d68ea7c00344922ffcda9a1ea179406c5b 304402205c7ae07ebdfc4337b8678af3690af7041c4337eba7579a695b025fcd575c91b202204101728214ef4faa8091568e911f2e866e645d3283c31404d8ce3e06ef7ef548 30450220427a9244da189a38fce30f40432d0cdffa15380da71d283bcd019786fd4a9a86022100c9c82aa43320aee17bc64d7f68e825ed09de88966c99ea5cfeab7b3ab4e5dc34 3046022100d3e3feadf65d06f2c8024b5bc6444a135fe610ec44effb972ba1b81e9cf3c3170221008c1bb54c92f9cc393dd3fd20d5692cf9a77de6cf0d477346d171746324b9572b 3045022100a80fe916d87f86a9dc511a4011419deb205318e19eea69bfff7d199d8d6cfab0022019cb9682831a71ee66a7f824d660a4132daae9c155a54029275d28b82cd2e83a 3045022026fe09ba441a75eedbd6404b6c963a6985c4ea386d6f28b25a64031078ea6c550221009ec1d07a10c5255ac74432075c90b53b2a1a4d39f9db1c2d62aadf477d28bbb9 304502207f321299c5addd856c93f063b432d5acb7c7ab0aaf2733f7d843b1131dfa842e022100ade88f3409f6690b05f1e4d5c528bcbcc715be61cee73c941e83c27c42196f16 30450221009d5795ef954a9ed214bff3ebcc861f78c8625aa8d1bdc5abd68ea756598694c702203f6f05b0c0f33db79c509528285181092bf34ad15d4761f80278f77eeb081537 304502203fe9043ad14b6faa137d84e68c3add3664e0666e883858ccc7d6322dba787245022100bc29c9448953394e496a715264f1fc9327f45162fbd51e92316ec8dd54da2c69 3045022013d0c68e32517cfbe363d347601d904264dbb24652ff119e8275313072d22116022100bb45689736ab15c613a13b02cbab24c78ff1e8ae39acff90cec4bca2b7c109fc 3044022074e6ec237c9790f401e1f0cf251718a81856a4894980716131028fbc418ef5a3022077c5fd3a30a545fc94ea109587b17f7f77ef4a4039fca3f6dab9447abc13a791 304402204aadbbf255661e416308f2fce2b1f9cf4c24801963da9058a26e4e8f3ce71e7002204be96cc4f5856ecae871c92ea997b3dc0d564b5bf760b3f0b3872dd47f9b970f 3045022100e468969aa9b07b72880a6a7802f6bfb8bb89ac2d707fba1cfde1b3596ca6f7f302207cd39f1b50c56ee585b10c6f4f1689107200031a9af93302f141e4beabd132ba 3046022100c5f4e98cd528fd9a073b5d0b91836f2a7e31e4a4b6fd9d56ab2aea0a56ce67c4022100c330af39bff2082e0abb3f0f48a49a8850793bee846b9711c7cf3726191bcf18 30440220266b0cf799f394476474da96f8380b11df85c91bce1d3787ef9f7d9e1fd60c020220744cdf1e99f180650603b071b4dea09c75d9b48c307874fac2bd9e8b22f8445b 3045022100cecac2b036d2e763af6cf85c4f96cdb276dca47467d922b5f6a73069741b7d8502201acc5c661784f6f139c39405a0ec13d390a6cfbd993252bb88967eda748e3b12 30450220660bd96a02fde72cc31d08835a975d918ce8ace33bdd3950cc55293c236aec170221009e4e2e98182a44c61712d6494f100a75e141f7a74350f227edafdd2d0aaa1fad 304402203c1a1fdc139988349d11c592316ec5a9ab3aa286b136c60a7960b354b415752002207fd7d205462e473b1d8f5d44d9e895678bb22c70aa3e03fef8f3f2a5e0a57d2c 3046022100a977ca98e66c5134547851e27c23169d3a2347fd4fec6f24c94c3a63787244a0022100dba97539c66d31068e0a5aec7984430a7a89731a7727a69cded9b4ff7fdc33f5 3046022100e30f74d44bf4ae375bdfd9df2ebcb5c781e49aa06d7090b60f5282f23f4343220221009b56e508bd2a30260d563b853c0785031a02eff8c1ff8f79bbe4b15c610723ac 304502204e078e00aaa57a585e427ca9061ed539a8ee6bebc59e0db2bb12196e9de8d22f022100c2dfcfbbc965d825fb1c322c7c22b4459b044cef6b23c287bb6dd80c7668bd8d 304502210098cda0fa447b0c8959e99d41fde1695d4c6609fe6b93d82288c8c4c94aaf2654022049e837cad81cacae3636244bf26e307ddaf5f9d442cc3d2275f1b359ae6b7140 3046022100dd8908b76084f6e54a78c743912fb4ec84059665d28e4d1d98755e11a00af1240221009730111492bf29bd6c6a4998d1c384770126a5f41e17ae73b3d20a0e20e141d4 3045022006e288c8c17d8e5b0505ab26cef4bbf84889f9756608d637de4b13e37d4274f4022100b47ec54c1d7082c9498297a2865be4a2dc66d9f502e4c4a2c41b6e548eb8c831 30460221008f39019cbef4334a17ddf68078c75c0c7395f561c757e8b200e3a3560a03ec53022100f6d86f07d01adfc6860df7081ac531efac39009ea85e141e2aae5652e6cd433b 3044022006cab26ebbd13ff4fb97919f158a99e6eef130ec7ada7724057f673ba31885af022002d7c0e47247aa810ec83c99f540eebb3cc1ae90441ca68bd83ea8f1e4bab21d 3045022100a9afeeef3d02bd3185afc0499df67a662a9f98b7053f7532c728b675fece861802203eeb739729b00ee24b4f1513faf60cdfc220d1f1a7167e6355a39a7ff0b6da98 30440220173c81bd38d3b84b668b0c60249d8e19a806f9c671ec306b41d701a1479875220220252c610e09855d26ec92aed56d802383fde5e11684134ddd9fa14747a82658df 304402203c9c0a013b71107f9e88223c6582105f8b493a5038f161d49c6a3597094fefff02204cfcea2089a51fde58eba4009bb2ecf52e6884559f125e04e81558f424de0413 3045022100cc198daa455cb3b1fc28f7532d3ad4f71e35fbfedf05e16359bacd610c180d3002201f946758fa20a42a705e6102d842d479c7ef83cf7784e0ee54b014d07486ec18 304402205219ceef78053d3381a80bc1b7fd8a1cebc64d6c8f9b401d4cf1997d2870a37002200648ad85127166aff9633a525b5f7316b7f31959a7a463695348635827c99c9a 304502200a75e344598a380b3dab44aba0496ed2a82cdbbbf8a9513914eb3a4a88a64d20022100e3c2ad6206b326b4fb96b595975452113ab699d2d46d8119b8610f752e87b9b5 3045022048c1f05c51b68bfb2613d1a46db5f7adf04da7518d2b199a821a8388a1cc54b8022100fbc584ae26dd4f8f90390cb2b5d44d4d03eec68e03f5101ef116c1e46b2161e1 304402200dcdaad21e236d7f9be0bfce750e52d91cd5cee6895bf37b3363b000a7d0693502203976a9b9a25ed4885d016a32fb338a75df538249ddfd60fb7a997e71ba68f2bb 304502210097f628169a15d0389139919c50bb525e19142a00ba47364ebe7ee3cc82aa7e3b02200f5aa5eacd3037c3af3ebee580a0fec9ff4b55acd2de87331c20edd279fe9602 3046022100ed817249ec777b5295ee3b293e5aaa1a25402d5d18a4fe3dccf5b339b5f2deab022100a70321abc144dc4481f23cdeaeb6f8932fe4f5156ae9211b56ff0194b10051eb 30460221009a900f1eb48ae2a7111d8b4331606962ce69c53634807a07ec6ea2edc644daf2022100a1cec0a8fae95384ced624869053ca54d553d240606e56b5b7311ec334b21cb6 304602210088bb6775531a59b5149e6baf8630c753dae739ee47b68465e67be8320d100ffe0221008ba1de7071c108926c54b19bc4d2f019ff3ecd5f80bfcefdfed482f06b9da319 304402200521db7cc67836992d392a7489d35a67f3cadc991e536921bff3b0068fe1a0160220071f6188264f55bec66477a100cc37f718324971e44a02dcd52d7667baea807d 3045022100feae15cc0f531fe0ece4a9fbc682bf1be0cc532c9ea501e977ec232c2a1afa4202205fa053c441b99ddbbc7c9ac68cdd657235bf7bdbea831c8d5439a17905ec7805 30440220347dd8c2f0126ef81fbe7a88a09f3a5b7725737301e0b9f415bff83ea2c3e85702203b7eadb843e35dfe415333d2b8dc18c569ab6052e3397f5ceef29a072ba61411 304402205613eadacb2143a98afbe20d950a21f39e5d576ccba787f22c9547ffacbe240602204a418961d051a8e230090e463029d13635df4c639f5652dd5d2c84c559d1f905 304502202ce1af344222042f727fa61b5fc96b8c52ac2f67f9fbaca7f92cd72e6b02c848022100d5668f1c2285e62924e854b12c304b2ae373d713ddd4e74e83c9ed302d3df802 3044022060f4f4d0162459f3ff5822ef05710932f5541f1dcac0acde36902ea7615beadc022019770d40b7cd8cbce2420687aea06f37acbb949c475ce2985ee5f4de8ab6e9cc 3044022058807ad68e394d14b30908e457a508bbd79c7e8ea1ab6d75c749670835085f4602204720156844362b92204b9cc8cb64927bdc48e3479621e011d7f2a0a6166c121d 304402204d7edf3ec1eefa65d48b2fed2aa7204310a01cf1d12a88a0d6eb88c1cc41021002203a6b9a58440efe9ea5423ec2062ccef9bb4b0b8d8c2615547646bd7ec3e3a040 304402201e47c65760a3881ae7f4afaf8ddac90551b8c7b90d65d88d8611a009fa5636bb02202f5645bba3639ac21f9fa8bf9da939c1c2a76281c76b25388a4e2cce9cd7769a 3045022100f345a18e1ad5980907ef44caab8f7b57a50a67106a3d5b67329558e985774e28022024a4387dbd176592b1b68ff1327f23eee6d23c878b5b95712bd0ab74f4448d9b 304402206fc83683e1b318edcc0ce59fb9ceedc0005292c12b1f35f91083da73e5e77c45022012107246edacde4b5504f1e027fa2b71344f2f9a28c5969e75c3c4f734c7f3a4 3046022100965274a4b703102529f1dc8be17f3011afed1dc62f5f17f4111784a3472dc4a5022100e2f98ec56c2f4cab17b16ebf5fe67164c47bce49eb9c647408f89fe2d9085fc8 30440220229ebeafff4c6ff252d01e686c9d85005cd46937eb0edbb87f98e9704527fb2e0220361f3a121564ee36a1073904fc99ba9fa19320ec2941a4199ddbcdde16860dcf 30460221009881be97b918eac886986288f7d2b330098f42df729078fea9608fe992daef1b02210081c452e018aaaeb1e83cc10734fcb024fb6c783ca05cb406ee3b9eba3006be2c 3046022100cf8754be542bcdf767fdc5bdbcc7fbfd186018a213883c28515ac7499c456d56022100a454904e524f3fa5bfbc20e96fceb4ad77bcc51981d050a5402ec59a05e88976 3046022100ac542cd016bc95d66bab5dc26eb3c0f3369b5a48677891485ac5ede9e658ba4e022100bbe6eefaf71ebe122d05fe7c384e6137888dd0d80d37b2739055ea256cbb105e 304502200bd89e7e2e1911bd5c195a6d8df430ab033a081c8424b63de42c4e86385703e2022100f3971c06e475dff18d1908b164f5bb8a2ef3b43fd26192bce7c8d9aa8ee5c951 304502205eb67ac9a4a2c914fca3f1410dc364c9d9bc1fd37c8a2175617ac78f513eaeb0022100da9386fbd6096c176a9403f416dd4a21fb1132a4b525cf43c6c9dbed38934b0e 304502201be58b6d263ac94be93338b63086e64f1cb56809b80ceeaea461a9faa2bed100022100c15fbd1663b0766d8923c601761ed8a0788002c0d657e0c1f9a653d620d0c85a 3045022005864ccd7db8da699d1f56acf6452a0b47046c62d609f77704e01f65af464cca022100a1973f1eaedd64ea88f105456c9275b8b2d67429b8f5c6e7e1ba26004ce39f4e 3046022100d81566d82526b09c2b1cd02dd5ac48e950d6cec28b2b9f6a3a776a508107d917022100ad8abc9ad52df46c76720dc484dac19ef6f4f6c28c9fd5129f2970c42e67ce2c 304502204df04e0d32177acf39e4780e953ebf2fabe67e10d0662f9a2d38416c8b40f37f022100aac09774de49fad7e802cc93fcfdf6e0238d46184ed6f901707367e600dd0eac 3044022074898ede260dd3d19923778b8466b228bbc92e078dc7f1f7ca48ad39524edd1002204756f4d20dd7d1c29a347f8d382957c81f1af7d4860756c6fdee246d30e607d4 304502203f84485319ef4300c587261db985f5076e50575657286d5fd7b3ec25abcc8f91022100c989322f0ceedf8acfcfda95a37e84b56f911e8ce3fe441e0fcf161c9467cd7f 304502204d84115f107e2e468598c0776dce84abd9f7d9c9aec0c5fe70e4cc874f6b9f1e022100f9fe8b1de8372e255848ed47fd94edb2cc907a97118bce193627678b41d57f10 304402203330c05903b4f02598c52391d42b136cd1abe78af5953dcf063038791802bdc502205fab7fb2ffc8e945bae0fd163ead71b73d5ee34dc8559b92978c68245bd4bacb 3045022100965c209b9bbf0507225b1b1cb73dfbcbf30b96d7517b51c6109c3546c5f8488302205c720243936307d4bcb3edf9e9a0919c9d8df797827667d7f5b06e1a02dc10c0 30440220010a5a1cc834917e9ff41c4a5ae5c815b8675fd5d3f46cf099050c0721d7959902207d0c735b126b63d82cab3a358ebdcd25e44dd4fd97b89f8e44661977c105a6a7 304502202aead8fb54af62bc8d2fb53dd158309f5ceccd86926c1118d03c4eeb13c8c6cb022100c117a1666b02fdaefe05eadeb06562a3a38b3974b0476226bfeb1acefd7829e5 3045022079775d5a305db247fba295414efca0f35c39fb04c090b8b70f650737ba253838022100b2430312d1b6186d4b35048d1b2ead30fd3d5807b81488440a19fd7a7fd17dda 304402205bcedefa250cd44361de5bf04b30673d45fb037d2194cb997423cc6679a223f102200e764f25426359c191b62eb9fa94338fc6b0e31d10c92f1cd71a23e817d95e63 3045022015d3171e59daf829997021592d524462a92197308d0f6eaa041c9e66397b1224022100da335008bb8285e263e3680d962d975890657523aa1da72d7f076974c5cb20a5 3046022100db9ae1368daa8cd6cfe54584b21a1e489238ed93296317545bd8e113454606b1022100c73b83b5ed6a6dd68528deab28eb734cbe81f460453d9de7da8232a939a2fc84 3044022011b98f448e1d49b92d46bcc48f8809d21e801e74a579d5497b5f241b8a9802ba02200b420d3797191e630132dc9aa2fd83d050768a17ae54f8921009ef5b5d91bac9 3045022100934d87d09fd2ec39ffbb73920c1a8ca25e0f8a29e0ea85b99f3ef42170c8ec9f02205e7178b3700e0c67673a7ce155c5ac073f062b68b4f174bc22230963f01a6aff 30450220506fb470606401aa31e5ed1325ce04738e44d57d2947ad25b39b3d94255ceeed022100ace874ce94d146c93772344b73d7f15340c0afaa0d599283d2b6572cc03449e2 304402206b87eaae6034eecebab1fa6162843b14d394c34991971e9e2c67f535a1d7d95d0220646df2b111d353d0a17e8256dfa3114e2fd89604c2a44ac8f5335be5cec9c16e 30450221009b3583033c1e3f633c98120dc2b6f64fe78de6d4b78ab5cdc42aadee3289b0540220320ec0b44da9463d859e417607f87ef0dfed6b238d5689dabb6d8b66e589d90a 3045022100b02e6bfeb0df7b44a480b85b76e24993847382591cd5f4bc909ac3dbf52ca5560220446263d1b7760bea3bef8370ce87ee306d1529fbc6669442d279be61576def3a 30450220482aa68b4b44a8cd5aecbd90857f2e4be27409c001e22ea62b4530a47209a938022100874b9052606f90151530e3a20eea35aa2997a9b17a2c77c50a9d60ae5bd5c310 3045022100a8b398505543e1583449bfdd0d62e90570b693d7924e3089f0851570ff1644f002206d2f8b49887fb7e987ed358a238e38386fa2e51ff4d26cbfe88cdb9fbd9e1bcd 3046022100f3d4891b50c5d8226bff7e7b8f5704d7015343272f0a28b738dc10df2183b89d022100900d8b839dde65d5a6108e53c006b895b447b7c87586dbc5972cebde54ac5d88 304402207a25c841f8712d1fa313e0dfdd1fcf1801b8f95cde4f9cf7c5995e42500a4d4202200f235dc3dcef0fca8173a2827b6f2d89718f8031535d4bbafbebc8e69e55e1d2 304402206cad97954e0cc3f96a0c1bd4266213134fb005f68547c3b0f63ced36792a28180220748a677ff6c9295650d7c4dc9aa78c1b12576c5563846aaacfc81d71cae6123d 304502207327b0fd7889466955efd86af0358f29ee59e544a21227dc577c4e63371e484702210093b830dd6b75532ea8431cd677ee108d9d61d283ede7b858c0a54489ec1b97ce 3045022049bab49e8e4fb7d3dc5ea73137d1969895ed7a315bc86607412685a2b0e0f92b022100a20d7c424543242cbfb012ca782d3e3522e6425dd025bccf5014c36b042976eb 30460221009051153da627a4c96aa671f0b6db4c89a8d16768808f1a9109cf52034f3dea72022100cb551f3abc33def2b9032cc81c2d5502eacfe402e757ab259a1cd6e2c0a58f10 304502204bfc463daecf192f9453673340e822cc1f8530db291c4f9892b22271bb1580860221008678fa456afebf2677071d9228545dab0a525f239b125ae39049750acec13a0c 3044022007ade2812c83d86ddbbece762d70b5fe4a605978e542202b1a8410989785420702203a35a4d18d8bd6319d8c2584824e6134edc765e2174c1b6ac1ca9787ddad4a5c 3045022009b9f6e4176a8ed66be6aabfb308da917601c0d8f847ebe26bafc1ef159f8e9b022100f2dce227fd994d871e2245733b734ea1246c3c74ce84a7ba892308d199204084 304502210080f67dfacd05a1babf85e70fbb872f9fa21af3e7e422deac30ec3532bf14a005022035d0c04e1eb7e339166b72d9dcfc03f07e57ca02ced79812bc4e62049712901d 3045022100b900bab2f9a6ef13bbc6ffaf21c872b2cd20a2711cf5251fd6421eb55a50070602205b264590fb2f9cd023fc07d1842cf6c458b90502f0cd25291df716598dce541f 3046022100dc0058ae02f6eaa209d09a3aa854acac95997ff1d655143930d2b0d8bdd9352c022100891b3cf8fa36bbc504eca148f776e31e879d7c4defc2c556feb1266304c4c046 3045022100a9b7c95ab7ba44baeed4a2f1bd19f8108e4a06da051d742742b3e8e8a48a3c3002205664e35350e020070f6ab7fa48715dab069fa8d097befa85e8f7faea270b4fb1 3045022064d3a2c92d1fcc2a12aadf1ba2dadadf9991278372281a64e1654529dd5b3433022100ca051b3bbf57556c9714f125adb1fcbbe4a03d75a19544b55cf44834ab987f60 304502206e6b1ef4a44bc3a54c74d970ffde43c265b47b30f26e3577d4fd3e8164141fc2022100b9b63b7bd505aa1751cc4447fe142af2c945f4cb4ac799c767bc4aca631529e7 3045022100b665a254a3e7cfa7ff959c4724945286a2be3cc815d1b93994a837b05bfd6f2a02200611b6c9465e7c2d4de9af7ee811205985c4f7080a7cc0e8cce0a3fa7ae9eed9 3045022100e825f49774ac3ef852263c6bbdbd1bee86a123e2073ccbd3430d497650f19a04022078e6b21d3779861c8d7152d5287989cc38404613951b6c8cdf82bbdab48a6531 3045022100c9fb6ce2f8f4dfbcad1cb75edf4710a464be2a2430438aebe35e1cae6d6b169002205dd08a44e31deb9089f40d7d68dd5366ce3c7afc335e61e80866fb635c12d44e 304502207b2e7fe98321dff11fcd62b915ed4985f01d9c179803baa93ce7bedda21b46cc022100acfb02c4d71d3775f076541a595ab1efb7827bf7db0b9df99678a0e9a4296975 3044022058beb52313eb5d57e67f92f05608c5c5b01ad399ba7ec3e2a0263e3a60f7ab8c022027f2728e928b47878b8f0f441bd83075f9a8e5601701ac5fbc472ba0e5689285 3044022058627f07b1a81fc3ba0e38f95a9a0589293f3f8c78c7c064e5abae0cd3b5c04202200d8c57fbe2a2e173d25e1bc8897fcbd4af1c5b4131c64d8a77b306658401c05a 3045022100bbf0ffe831745efd02a40fa2b66cbcd04eefe7a11759392a90dca8f22e9d2ac702205f9a981c282a98e9912a5c7d42ffb2ff5170bd29aabc02f2896d31cb7d0fb471 30450221009a14a9de0fbe3ec820a7d5524236340882acaddf37ec2cdeea36425c50dc01e402203d4f4341ac5866bc4bd2282ba379db01d66822b622b9e9679d0f63e69f07a460 304502201d88784e9fc41d1a91d47e7385943ce1294df151bb8cf46539f0dd1fe8d967be022100d95275179cd49679cd3761353f755614a01d01137e6c83de47aaa1139204db10 304502210095349f2f97ec0cb309eb16249cf8febd51293dc9978990296d77d83fd5ebed2102206673d64d0741b4bbfbf6724af2f863a0d90dd17641c64a5bfc50da5dd1f04ee2 3044022025d8ed5144000a08263355962a5b46fb9cc832f485efe27fe4f420e0a9307022022069f85e4d0999e27a93157c2fd3b60e44537ba755e3fbe9db41b131f0b227a750 3044022032ac3543391f807bbf4e65f59e6690d1f196b5fa0c95187ba37b23ee88b369e802205586991521df1b99b06466dbd94e03dd4b2ec20a19cd8080a0f26bca65317c1a 3046022100996434069a515f1ba69bc4a419d6ada0e86d77e84d526919888ff2caed293669022100a7446b86c2e681bef769d04615806641cbc2027445e4e0b16c8f79d6a5328de1 3045022100864bb9d6c24395811af31a87cb41f05d3b47efd71e69dd9b17c54a7fa2ad7984022077fdd9f59be58662bcf74a0ecb740a9871ec45240012259e15e6e023d6749563 30450220720c530af80be96381338e37afb12fdbcde5f8e4439e1491526deb3684490ea1022100f36944347c85d00865c1910b0b0f0cde4e6a5aa099e8cd5438a0ca3b2a43cc8d 30460221008d44f5d4c2cc9e834ba7cba9345a097d69f17e6b46a6c7a138c2e864f3997887022100e991c12b8680afbc9385d23ac9b1759e4b88fe6c5722ff6c9cff3acd5f1a9ccb 3046022100cee2e5a1e0630eaaa85ac02afebe243d6841a3428ac4b8deeeeca72404c6c57f0221008ae69c9878ad854c9e800a782d33309d467945bbfb4e3a0d070e56a3a73d958c 304502206e00dbac5eb65f53ee668ac4af35db06d119d44e9dd03c0906ffb514d5fdc561022100d5998ac2006af49e1060d82f93a17e6d648cd6a1fa89d03ae735cc6fc9041659 304602210088f9cb6c81c86bd0df75819e9a030c0d5b7bf0215a99299c5326261be6bf1180022100e87303807a8b8fe4e18f6e9b5f618f3a66e164f272460b9fba426e897419144b 304402207580904aa75eda07dfcd79849506326f83b5e7f27cac15f85bd03919cf78390a022009de75635bb52ccd5b869ecd95ed55e7fae658e5feb3a822e02582537e18ea9f 304502200d596dab448131b002206f4caa4792f06552775290ee8538961869d5af13923f022100cc2b5729e056a32a5a2c5d80dea52f180fe0b4fcf841ce0d5c4f9550998c3ee4 3046022100e28e6f57879b47e5112b96a82235bdeabaa0e169f1ff3f51e9034e6991259e0a022100dae6f7150d1d65c7710f70927c9146ee97e17d7e66e58b1f7f38cc487e5677d8 3046022100c75b4624d38b3e0ae4ab5ff5c52d0c6be7acbe7849fc572343014582e4cfa4a6022100eda3558e0187564f55592ffed8a9625067aa0ed4913f1a8dbfe984745a5e180e 30460221008721885190afc227864402738844f55d941afcd11d43793b404a5a94353ae404022100883a94fbaf2e683035b71086c31b7760cb5baaff9d9277fa7e1c3f43d663a824 3045022014a834cc252db1dba1a2e4900817f65201350e0b46a6f06a7a937992e9fedef1022100b6a9ef19573f8330757d478a01768f74acff08aa5dd4e8c31e956c654c6367d7 3046022100fd0a43764dd4209545b5be28e302ee995f069f5203249e782188c91f06a347d0022100ee0222ba9f6997a4fcbe596c7feb1f7407a6c0a47b4dcebdf285f2ac6b120d7c 3046022100aca0b188b97bd0b065f7fbb6a799467bd71a7739524fc02d8b36a639a2c9f6c5022100824e5510eb7c4d5e81d3ed5ca838de76dbb526b322f8e15d6d4c9e54107a434a 3045022100d67e4297121bbbb222ec8cccf643db03120cc96d11ba906c6ae1b36967e3d51c022042a55cd1a978b39cfb5eba98958ee6b9faee237af5517ccdc271ed02c9d580c5 30450220483fc02e2a0b9864d11867f4084a6a4b81b90cce76a2861cb8effe1571ba0883022100b1512d764bc5e27155b4aa92f09625159d2f94007e3512d9aeaa28e8f233986f 3045022008f44f3d3b0b11b80b41e95d704618421ae646878e67b6b743d89e49e36e9463022100db95b04bf27e061c709529f02081488807326bef4f94e9aa9e6584ec50550546 304502206f644ea1afbb61195e9e5e6a5c1d75498ccc95dc5ded56a0c746ca7d55b3a7e8022100a02fa7f63b02677016096c40a967ea9b0ba18ace4d8a127fed20786654216bdd 3046022100bbf18b2451aa503a98c6cda6691719434f87fc32e5e183fbcea7c802b781f51b022100976027646059c423f259c29834bb37f36b91a41811bffa41575b83ea146a74ef 3045022100f8f55e19093b8de879567b7e56a7f5eb7201c4eaf043e6da0a2b41395612ad0d02201e44906fbf11fe20f63d40ff044dd34ee594b6d8f50ff38b793ac1f838362683 304502200825e97f6a6073079f97c0157bf6b012b0e1c4c17f5e610b50628797baf1b619022100824da3f070e3320d2db841df4c84ac1a572497c2ff68480a3f3b0bbc508c3727 3046022100ed032eb4151a90342b8908f8200516b69bf8611172586dded9c9c8beb30cc79d022100af1bbecb0ead46ecffc537be39cf8863f18aeae4417e5a85757150ac336715cf 304502206d28ba4de63c83c0dde7248109b05eccff6e26f5bbd6f711ebbf36e191dd4782022100efc2ad51815a74b30a93bcbfb0e321bb0902d0d50b99c66359e7e45f6f6fa612 3045022100e407c34671aab41b83386365904e9637f7aa260cfd840ea7779739ab48fe01760220529562bd727e344b2b23257d38ffa6e9e384dbbbee36ad47ba0742dfbba3ea7d 3045022100d3e9aca4c42c1fa6d2e9fa3e303aa6e3c6b507c828ed7b006fdda8cf09d109cb022067f49852200cb1183efe6a739710c90bffc6f00f816706af97cc4393bc7c43a0 3045022100aa1aa4d89c3dd1441fcb060bf80f9245ea0bd312a2d1ffe18240939891703cce022029d93b0c5c083eb09af2f3ea628f8ad9421c35ce1a7e8746c050970f18d8d3d5 30450220534bbac038651f1f949eb465cf5593b5ee5e508018667f986d5e5b2f4a6c3a14022100b45a7e0c88ab8888d111d3ee0bd5b70f6d582bb6b84157bd6f7795386550f5fa 304402200e78118569d46058f614f042dddb430a5692c2a04c61063c3591dae0c2bcffdb022068610d87c4cf59850ad5240dfe584a97a2693b3dff1ed94ef893e6d78720f9a5 3046022100fbab9dfd5fa681d6370fdae58efb942a45d2636245a27774060fad1970bd07c5022100b0f6f82c247db240910bd40e7193bb2a7416fe3b982734b37ddacb793a8345b8 304502207db817d9eb4c0f24d638e0eb5439225149a5966c5b960839cfb38791d3cc4204022100c7853111db45dfda1692b9150113a68505a33cb1142d38d77183e8d0b9efd63a 30440220203b3f588386ae17b614b85d1f91567cbba5d48425fb9d736115e67c8ea7e8d30220410c32d9ace14aa44e557f95b99bdc2bf866a295544f2b037ff11963ca3aef45 30440220381357b7455de5ae61ae706623a1fca65c50c17fb9217dad1702f7a3f0108c7a022012d426019af290413066ac8b70b1c28ca1875b90fe1b05665a661229184cc420 3045022002983d3632eea8c7bed50a2927c240bc89c1d508371b514e419369948d8b3986022100826be6958e101b52a63beecad8bf10cd998c66741e277f4a1228d5ab9993258f 3045022078601a4c60495a3f6fc2aa82df7aac25070c006ed8e997632e463095bc9a4c43022100d6bf72ddeab5b09d10c51d0497e775663259f917cf3ba9053a27af21d82c2f5e 30450221009c58a22a9ebc43c70ed675fc668a2c275bef5afbb22f2bcdfadcb537d60a9a27022067a17a08f506e1d95196a6a43423bdbba8286d66b2b7bed697d46288cb33deff 3046022100a4cd2acaf7a4834f8d959822be0a63cf048a1ece0869edbb94ea95056e0e0c26022100bc8d31777abc10682b5fafe94f6705c21d25f7c65f2ad5c1218e2c6e033d41ab 30440220365aacc168a6904941db1c9e82969250bf019ea79806d5160980850f66d2399702206bcddba3d8fd7d9f24d96d3aebc01352e6d01dd2c6e34807fc1c530980180134 3045022100dbb9513fdd46d4ed0495b19ce751c0c49199aebf2189e7914737370dca264032022052dd7638028dbac24a3ea78706f91ac7bd80138c2cc551b44a70959a65c62af4 3046022100e74784d7e6155f294a36396fd06d5e219ffbbd21d349c910b522a47469998076022100f2a76aad56896d8254dbd5b51d1ee45abbfec8a19f938412f93065e80cb2147e 3045022100a118a7f4b5cde4f20b8eff95e51ab010fd9b0f3b135ffcb01f88fab5d00f0d16022007cdb269c03f7e54a913a05dbf1982351fefd4dd4ecd0f822f6c3b68f487b19e 30450221008e9ebf7883fba8d3c7be6f8f6e9b48ac6202b49ea86d907ee3320f7e85c2c831022013e19e82fb96da17736ef491d15e3ab1bfd11f9110d2884f9579f7ffa126222c 3046022100fa68a2db7dfc6dddf0921535252e7597292703098076454cb74591fe14c5afb4022100c1b0dae5637a643fc2a32ec65088911fd6ba43342aa2bae672a3211dfd700bc2 304402207a3f29a9efb728ffb4b577c897fa9bc0c588c4349c3734bc0a7754ed4c85b8fe022014b23e69522054835cc73206e966d4f2afb2882bd26be67f2961d219f7871a8f 30450220635403be1ecb94f7099542552d99cba6b46eec5423245e2ee772f1e5a30388b8022100dcf56ecba02614e97d72d9cb32c4856c284f7f7363e7b9f0f1ab854525d6ad9e 30450220048a22709a0bc05cc675237dbc5cc5f76e7078b27a7cd90bb298073d507eff6a022100c78ba09bc8672c36d8d770f6a359b4dfc3bf036a70800c10c43ba68bd5155ff2 304602210091d4d0ec156814984f608819f93d4586036eeb51efa49dbdb57fe185d23631b5022100b3c29262d22174e4efe0bf3632083651d76b6b358be447bdcbbdc4759a608f2c 304502204ec42d99becb5309ffc9d0a32b0510fb8f9a2b632fb4c12a5cd67595dff9fcda022100a03557e1babee6b55b26e8fa6115754b54b8b5a10c95736808a8d289e3085d20 3045022050b71e9c59fddf54f598c65a5f83e7999fe262a1f5ca6fccc24335af0ebadd0c0221008a3a318fb04f2511d1fa741c5b0f5d1a163bdcb421dcf22fcea6eb5b466a3047 3045022036ecd994d94275cfa1dbfb1f8bf1ce0316ebffbebd490056fa5af0ba09bfb86a0221008c1c6a56cb951b7d7471aa76f612285d4cb708bd80622732065ea50ab7159999 304502203310edcd85d1a2ae76f765c330411bffb16ce04e9c708b358373df889bc69b72022100f5bba0fdb9b1faa2c7931785740892720dbc8bd5323c9ea0fcfe2128a7d83934 304402205bf72dd9f4c0445e6aa9325d8613d8c72233bd814ebc1b2e03cdc1ea14ba35980220650ed00c8d871aa88c32ecf9b4fc6454c43f6c4f3cd428910ae28a7a1c122ab7 3046022100be33f61dbe0b187cd3b8f23fd9ca569eedf2c8075e6868e1657d42ebaf89e5aa022100c9db4391ba027026bec63d970d79a39be8f50250c7fa4925d8ff69e367fb8135 304502206e6a78564b5391a8ee3c89d95fb864b5cf018749f59bf2fa2ceab50274c6ab48022100abe3112b011aac2fe39fc720596416c2a1f75eae13072962a8c463855589a3ab 304502200488a2e2e5edec4fe088645b06aeff8df69e2984b24c742bd7e222cc8d08070402210094f37523b69dd2a1797309055acf2371ed67df395b152144e63dad8f0f72ec91 304402207dee3378ddd6f64bff0a4f899cc0c1c8c7683494d6368f01aeccf26fe707577102205d1dce1799c4969bf244113b3ccebe79b76e0d82fc4f276918aa4c6201d3a1d6 3045022100c511cb7fe34712ab8b1eac9be241b322af508196c2502104348ced2b56c5cb5d02207fa06b68b682f6533fbaaaecae2af8690b335410fcaed49044c9cbab3d451650 304402206e3c57b56ff043b366201a3b3719a7d962a56cbf7d890c4bac7b9a7231cffaef02207ce65cb1c5fa1b7533869f2210245e9ba5cfc6e1dde7d30f4424473e8c443990 3045022100c799f61faadc2ca4790fa80804acdd238711853d2c48bbdc33d72e47e044cfc902206c1dd660bc082ab420e9dacede37d38226f6b3dc81707c315f4166c576613748 304502203cb8ffde754bdc548143aa085f5711fce7a78f811566f2f3bb32e84dfb553d1c022100d138cf0ed61ebe3de8c4abb0bf8f1f7a4658cc5344769ffa301ad41900c48f02 30450221009511d18b3f2af905e4bfcd9666332c54e45d5b812c362db69b85ef2b3a93a53f02201d016441e71e5ec9c566b3e14ec08e4e16fd7bfd6e230f51bc16050c034bd195 30460221008d39c34c33a238c8cc578fd83a70305e0d6135a0e68b0a2ddd9142c2ca4c08040221008c26a5ebd6dbae7b8633a0963042d7a6d5a4e14b816bb1d4b00c8f3d58cd4faf 3045022100a497a3207dddd24339bc2cbe3d32bdc1b36d8de1ceb13c874d0ed81be7e3df02022074b8a3fa01b458fec07e2583a766bfbef0279082a4f5efb59d5159a376f44593 3046022100e26ca8c659c8a01bc7660293f2237cc8dfa3f70ebfa607114fc76812afd742d1022100c6358d3b6f960f19b9774f086f5f580dc665bd38ef3818f9954b9090e5eda0c7 3045022079ea4e9b194d8aeac4c32a0d6edc00eb2cc8050bb3def5844f875dc3eded972d022100bab03d8dd7ad719c57db96f9b8e1abcdee6ca992073352114a24981bd318228c 304602210099f097127b21d02fc916ceaa48af0337b4590d34462b02a6c806b63744da2d86022100809ac60d91813fa7ad023e572b4591cde097e8d391e8414d2f73e049a8a1d8a7 3045022100f7d225225ca22ae39d3fb44444f971100c7b1d5e489e54d236c756d6ca991f1f022074b0b7431ccac337ec4bc2d95e33d59d6c8e13d1e95c211f901d2c93716b51d4 3045022100f02f800ac3499d286ca04eb5a6c108b8a9e7e1486920b82a235260b05bfe9a45022021d657b3649ec84398a99752f7e20c3dc2b0b3056894f632cc947b96182d65e2 3045022100adf3e99342a8006a8406a919a74b94e6f12920cae34286628e3b7774fb62c7a602206dded61193e979e3d18f1b79cc13728335491289f0ffa493955d2e436b327b23 304502205d4987daabe978bc989e263eee2d66f74f74516ea383bf6d46268a07d7ec5b2c022100e665f176bc69f66bd52dc9b53cdfc4b7a9fc5a780e65c455d4dae98217e140ce 3046022100dd9ce0f0e574404ac5a7fea16f1a8a22fe343f5e7c19eb5963cf23808c0f6efd022100c64808309b1bec6a073832e2c9e20de5e6c48d58295a1093e23c46c5b301d398 3046022100f5f91f7d31a366759c96cebd92330b1288c216f4ff252aac4a4812af8c8bd7b50221008212b6164de4f5fc78e469480999b22b5a6c827a3fea2d5311f6baefb093c9c8 304402202e8fdac5f8ffe12c3123be5c2459ba7605bd32223725a223851a6f16afa6224a02203b609deaa3c70252640b7560ad9a0489b71b569dc79ce43be41d17884f859497 304502202650c48a9b18f596a904cf2c8ac832b2b052236499e9ac80d5bc11e7bf5263f2022100d06625374fffef6a04d164f1cd2f3433efefc0becf15e7c63e8bc19e7dab0b9b 304402202a415973e06c01c7d5479ddfeb04e53496b75aa4affebbc818075439e86ec83a02207930e0f8ca2aeed142cab5bff504c40210b149872dfcb6300f0107fcca9c0a53 30450221009be86b1b05a2d5fe92139c094ef10a2169e997592ad757c901fe02182ae4287b0220629046d80f8722bab442b0a90b0dd3cccb04cf9f463e0728c63b1b451428d8d7 3045022045401502f9c11333014839b2bf580ae85639c4fa4505e5321ffc2d6378bdd386022100ca691b573f2fcbaf87350f14d4b5197e47be48637704becb0aac86d7c3ae91da 304502207f5217487b425c15e796accc2436e493ce2d89f1d82a4b0380aaeb4360ad24fe02210086a99b712fa5f05716c3a23f04e854031c09a342e85e939d8f602f8c7bbfdf5b 3045022100e1b8e0fbe40184ffd86c35ba25d039c942a58844ff996cc7e135b9659e0b8ac3022021e9877d112d6af38df9972c2fcb79fbb605788ae9cc61d673d8a8a7678cf68b 304402200a6d5e61dc3865568deb61644fb2e80c3e18dd1b35a7dbc258bfa81d1fd419ed02204d83045d8bf77cb1b6004e78fcad9a74fcbbf56fcec2185f6c195bab2f8b6763 3044022068104ed088c19952f6ec066d617371ecde95696e56205f94127f5b577f5c7db2022000e1a3003f6412eb7595d4d9d060cf77e618ab5c9d0456c7ced3dfadd804243f 3046022100eb53d2d581eea22245e9629552bbec084db78d6713a5e422df1514486ffcd04902210093fb5e39a28e2d966c2b8ec9286df400e3435354d8d905f9eac747e867fd2489 304402205c2714c0350937b78978b44f94b18f7e8cadfc61028561448ff12e5e4f8b29e302201846a9f1ef77bb00d5b65d0f2446b6a8527ba6fb723c181d8542772b5ed79af7 3045022100ed75740321eccff0c2ec57fdcd8c27c4b6a62205fcb4e119545710b0adbdd4ca02201abf5bf7e48ff9f38d28a433ff69690506699acfbee52d9a2ef8436c0f927f32 304502206c64431168febd6cb10ff8bb94d72ac04d1fdb8676934ad606136b6fd2415a3d022100c74e86b383ffe866260e4b1870ba326ce17df34ef401db64a37d3c3f67adb0bb 30450220703048e92a9f7023967522dd5d089b01f18c8630de138ce1000344220c33705a022100b37714550e35b2a427a026f3daee03f19b8a24701aabd1a185c960f30b91fb9b 3045022100b7ae7ae902de0176ddb599ce5af594739efb0dd2d14faa76b168b1898ba6159a02202f712411289efb8c296a1d49b941f8f3e8dc22199f72f382b9cfd35136dc4f5f 3044022030d18d023b5dcf77839840db47a19f57a901bebe6aba373d58afd060a5bd1e53022039a94cc627d258b74189016626fdd0f30660e9e37fcc022599d3b55069eb465e 3045022100d32a258951605677fa03b7af02774ae0907fef85280b0f669d7aa73b81d001000220265a5eed75de20617c0422c7601b73f1a4f9451f1e6227834fd2513c721e8875 3044022009f02325d3079224d2c2db85d07060117ddd57f675ef2f46c5bae50412bcd5c60220058d5600652bd718492f8b95f288b83c424916a1006d057305fff8394d4890ca 304402202fe65b64ea49cdae778f19a04969898f6f3cdc6092ec0e584c2238ac2f3ef3fc02203570ae1546aa06a7d26c5e02ada154bd6363e3393d42db6846ffc6dcc1f35cf1 3046022100d83118b96a601dc0301751efcc05d23f3881467c0600afdb222739b99b82ef7102210095dcaf15004cc10c8314587f3d7ea59bf583b9ec8d3abb2467fae264e0a37e72 30450220081ab0ba4564dd52e3ca55c3aac275d12c70e3bf96dcdfd9dcb7d6064559c63d022100a5b4f5a8967f97d6d813b0d8079105e449c4f919ee8222458db5fd4d9e7d1d2c 3045022100e920b1cb1d3055d4203c1cd5171fc3e62251c5552776579c436a50791f2f7ee202202dd418f2f52d6c2565ff4bfc7eb427da66cefeedbc15dcadb893439ff6c1c375 3046022100803aeac005a2f176b4b1a44eca69da26e7aa13860be767c27ed73d73fa9ed272022100dc1160725746568db203a859c5dcdf638691f323357604a09f7abcf2feb90381 3045022048737f4e14af42e685f14ba6eb9b097de9b0554f0f45705a1e719e8438354505022100b8d1d75ceb55db88e27402c1bea83d88d86c46ead69789603d3415bb43a18af2 304402204063941478d8081084a4433af83e09877f59714a8e45395d69b5494663189245022030ef9a91ae6f49763bfda1585b0815d0db2526ea6abd03f163429f282f3434cd 30440220797e85a555760851e3633ce8712ec8d0dcad0b0b61edb912990ac6cc2e0d2d5202204a859ec52bcb79b090518d0fe5f42edf654c44ec8b58ec5d2554b5222b4dfaaa 3045022074019d2898a447a932559fe94708cfc13a8b6ebc47c4a13ca949122f99f2eb3a022100e6c6159da83daf85985a6c916d16afbee2d812af4968dcca4348b6b9c83ec3ac 3046022100cf8a29fc68d8ab4e9f08dab586c7788aea445f39a693037dc208d9365905d3aa022100fa519940bc7e354077725f18df317b3bb248c0b068f56223aa8a88de806ca476 304402200b71db8bee3e98823aad5c4452893546ffa6ef32f4c5d5c2b5295ccb4fb67a7002206e142f2f97d61eb5591f6bbbceb0ac828196efdbc8198020c8872d97cf0a0879 3045022100a08a9356fc3a29646eb9881b6fd1c7108b155abc48e12ff1226f34606b8da5eb02207867828ef9a1c1f3042c9dcbf31eec8431e8bd93f014a339f3940235c1164234 3046022100e2613eb294493e93e99c853a4cb54165fd927836064c189e35d00de12811b981022100b25e302a68ab78b05fc31fb27c284552fd7abcf04374bc6de4f99a55784cb4dc 3045022003bfc25ea40b17cd0acd2d0b47d04c7cddd38c4b03c45a6ab71dd1e368412efd022100988e9f7620c5b9ff73028c105b9499295aae08bf162a82e3cde7217b0c244d34 304502207899e55ce7162a0a0badd83961031e6db96130b4d3623e1fc06b12cd2aad5c90022100a3cd71f5861e7b4ebe530861ebb131cc72defd9e8194a2d051e3abe3d3143b17 304402206f6a38bda1900824f476ab9ae59e9c00d310881941385f185421ed739bc66205022031640d89a298ddb4aae8593a1e52868c718c6f9aeccaaf67a506db742a907278 304402201328257715cbd65871c3571f4fdd18e4d0fa3f88e9b1d96b538d646a2cd3e7af022034abc43d0102e142cd76ffdba2b5b1c8ef2bacdccba9255c5a632b0c6f058ab2 30460221008ef989ea9e8ddade4438107dde4b0b742841608373b2298333526b74bbf6e15d022100e54e8216741fc9800ce06abfd1277ec547d58a9a59533e0b94d544eeea296837 3046022100ee55cb99e4e3c345e1782e925fee4e12dd61c8f60421161ad0b1d7be033ef5fb022100df83d5e3624296428cba269f1c9fac23650c2c7fabe8ffb2a58e16b506d25c42 3044022048d0718bc4ffe1deeeab3efb310b9d672e83ddd26b4be9cfbc8b857c9fb32f9f02204a8245b463c01253e05b73f81f15c9e3dbb4d14ca424fddd055d3a9989777837 3044022033bcf1445d9d49ac529f73dc9ff0677b5fc8d619cdf433cfa6d208258efb83200220255242c4bf8740273d6992440a5850a7e6adf161a0d3a207c97730445c7d4108 3046022100d586a797bcbee41bf1b9cc9131765d1e7b17f92a30475ea7079474000536bc5c022100f814ee6cfe16af3f5516e5dc5363fdc75ac0f52e05b43a24391d48e953480b0f 3045022100d0015bc603a8d6f00bcdbdde85e4b24e884acb7b21bdfdbeea6576daf7940450022002c19e5a6e82e142bb71d09dd84aea3a4e35b70513a990051ec7d1bee112a7ff 3046022100828224816a7fa5f49e00d49094ec8de720432754c3668996be2b172656a150a8022100c6b9e1779b6dbbba93fbe6887de6f9a2a685ec155d0af26866d79df632c351dd 3046022100f33ec58094ae032bc3630e9336956b697b004de30a247fd5babac5bb96881aff022100c810c33de942efecae5e24b7b49273c80ad2a584539544ac5d31698ff73cefc1 3046022100f72fce36ad5513368aef98d9f55365e86ced1a42598704f511b28eefa84f463c022100bf4644b66b1ea826fc7714d21c5b5578466fb429811780992a61a1cac4022506 3045022100caa65492e2b347d628858677adb4fba38bcaf40a66b86957b914b2c2d2e6adde022047df6d6f2e5855b82000320803cd0adb4694b72e108965ea0b7e285990ad50d8 30440220428055284a3310ef8ed5a5f328b04499ef28d9a6bf37cc4952159e5a2a399b4402201bcbaf99a75433b8cfd5d405054c330b31c764ced3de66a3420059f3068d6761 304402205ec78b9f7a0d3f42a007f5abfd1466b96706fab43ea15b767cf930708a6ca4b202202f58045ce331b49d5c9e0d6cd337132fff5a1042f0b2a894e848b3848f4f0629 3045022100e1fe3c24bb8722fc03e7a1212e250e06ba377507094bad0964fcfcc2e167e27d0220203c6c3ae4f0353968cf0d922c520d215e09d03fb7cfb437b7b4648fe0dbcf56 3046022100e4b2331f6bf4d68cb17296c2b30a8895d860a3cdd690c96d9398e25170e689ef02210090f28d2b287f73ef91df80d82d111358598e2c38516d833e623a51761cdbe246 304502200918c462ccb4ca9d4539e93f495591f8c8d2f23dadae3cb49d8b83d60863635b022100b345d8c40852724f12a04b1d6ec38f9e988b8ea8d24cebf67361a0ae1cf8dcf3 304502206234642bb1209215a013da6d14f91dde53b94a00c27ac28dab285c692b7efc0d022100dc2e2cc956288f1d1a0be2bbcfa94a7a56d3d4be1b9630d591621a80fd85cb3a 3046022100b697e7ad2887201ccf1fd38bb4116e7819ca9fb642de8267b376165d03ceddaa022100c972389c9ffbf8fea2ec1aa75031af418616049a6baf5f71eea81d8ab97b6584 304502206350e267f7347e2c9d1f4ff2cca1bbe0ccc090d50ba85ef2425a5a1ec366f00f022100c74ecb45bd40b76b2d9ca0004c46fb39208184bce045ca24659b2f9275ed0356 3046022100eb037e68716766c139b90f271a67ebc0a6be87da08a317d2a31d4c3331e50838022100aa0f9287541100a005aa5fd813ab45c164abaa264a9b99ce9671f45270146967 304402204e361bf1c17b926d0ecc7ab174de25cc083e3efc7737eed5d33d8121cd2bfd0902202651096fe15e35d79aa4bd6647dad17d85902dfaf59a4c3d4acb66ec98686fd7 304402205558cfc1d5fd44eda67bad57d1a98117bcdca927bac9865aff60fbcd747381b40220309bbd9e1738f14644a8009e3a9382dca617d8698de6cc87ea24ea204059bc62 3046022100dccf1de00975c535403b506895402748cbd7d17b080ab0ee0afa255a9bd4c961022100ab7eee7a7f1efd2eca6a0db9a4dc7db72ee1de636a7c9407266142a036ffdf36 3044022026d44459f8e80f430d413e4606d7fb1abc704feb846d5729d78c81ac9da7444102206a35ec820344af9c6b2542f6c1771adf61a4ce2a75b4592c15e4f19f10e23bcc 304402204264019e0f6fc671d3d69443c298c09c7daa97c8aab7494178a32d77c24050ad02207bc4de95ba05d88326e2c340451f6b8a8208619905cbe36d77093a1b1fee284f 304402200a3da88038ade51113b352ddc6be3d69600d970a039c0daf01b87be97cef5d31022054acb3f4b0e8b9c237b44ed0a068228e2189a145a75d5f030e1475bef28fdb8b 3045022100be4a3d040751ac1b2ee3abdde2fd73fbab3cb15e416d95d3fb48eb8e00e403a7022016d7858ddd93b88516764e6551ff2a02ab012fed0c8199ea23e3f6d80e33b20f 3045022100ed91ff7cb7408bbd4441471cbe7af7bd4fb83af8aa19155ef32e5122254872f402205f7d4263b3f85fff2ffcaa5f5e1f152a4a44db033c8d3b9e1e3c944be9239870 3045022100936968c310517903003c1572076009e3f3a5fe4a7451d26af0002cd94bcabdfd02207208ad03d40338490665617c164291f02d50244598e397d879557315855672eb 304402205157a3e59c421ab22d4d5ec46d3828e1aa7452db9a8f6f16a5058e49a377eabb02200b5f03f49ba591f65772d56e21e0a4995dc2fc5496060544ebedd1dbfa9e1d6e 3046022100d733558dd8f70a23ca5b99411bfe97ae1b412caf998fd662a623d728c4f93f53022100ea10e094af91041caec4e222a49e582e3290e96e56a38281712fc30568c7f40d 304402201eb4257e899b9bf75d46431ce82c0d8457b7f594150ddacb7a505e1d0533c64a022021728cb820e17950f26b723ca0bb5bbd12a147dd0a4e0a76b78627f5c1f625bc 3045022100b3e144628ee407b5e0b8b227e8702999cc1d2e3d4130ddcc9c73c2815311f3ec02205e51030280aa4b5ad81bc9fa13fe4b1054e8c5c115d6504f44b7b734326544c9 3044022002be77b375ce5d8ad42a1404b227c420aff1eb1cf05690f4d13a7597bb6cc13802200850b87b1cbeacf58015e78925e68f830d8fd23da2dcfbba984d53caed6c4441 3046022100f05ccd46e0200deb691e573f8032dcc74eea3d7c124f2208169820e85d936da9022100a2247c3ec5e9d471a58b494253cd765d837e0becacde8e5f684934dbe32901c1 30460221008cd9feae43c04da0db6310a850b19965ab86dfb35cae86f7581454db81ef519c02210095c86f0cc79ba793d54c76c6531acd6d3c754acf7691a7a0b80475f4a5e06b4b 304502200f05feb78cb7c83298d27099280ac49f642783d1d66a3f9c06a8a609fba2e0f5022100e38eb3fac981414aed8c9ab8f2077a712c1c6268dd7ea44d1b3d82294cdffba2 3046022100b3ae89f4cd14dfb282a2dcd404fdc918aabcee6c3957f6ea987dbe4eb6e8fca0022100bf28248da9106582e2d209cd23366c143ee174afcc41f6aa0e1db6d747bb77d3 3046022100ced22dd697b9bedcd26cddfbcbca0504081e1e82eaa9532b5ca23c844b3d6312022100b9f459f4f50649a8a3b268a3beee09d5684cf725aec29f0a166b0a205895da37 3045022019d6daa8b3209e264571cc8f2eec579b04b74b8f80268a879c57c21d264ca135022100e44cc1ee47a5e13d557c12e703e5a990ef927ffca370eeafbc0a9c037950524b 304602210080f33ffbaa7f48d9b64590f1b523b3e02a5a19f8398e0e0381b3ef0a033c637a022100a3b9c9723c70cffde3303f72bf2e1483779e81474154640d7d9968bae2f8d02b 304502202cdca11b8009c87e9923be8ff793c9c4e9988b0298ddea3358c93a10de2a0e24022100f378cade0bab6e7a76a45c2ad6e5e29d9f0f4116d5351a3f268f456b2a7476c1 304402204d6666ee0d6f14ffd1d723ddcbd2a4b853fe364631b39811dad4ac109901696e022046026e4d28226a70eaaa13b26f338fe01bfc831faf755406e50d9855f18cc00f 3045022100d565fbb71f07a8073769ee43b78c795160a75e146901f968332622080a0f95fb02202fb8b2377e8afc3566dcf773122f98a69124006bdcae46b762701203068a82a8 30440220329845066ddf717897d74ed9ee42da4bce8e0c963438eaea9f5021e8319627bc022036601c4f3ae83aa944c14d4b5f12e6f1c8f8e502385bd135d274245f413bca98 30450221008cc1c9e94f34fedfae1a5593ca7c4265c9d361f7f3347315f9b6e89da389223f022019374b95700d5e9f51eeb1869e64017f8571bcbcbdef5784739e9eb0d0923889 304402206c31148aabc2108d8b6ec53179b100c2060bfced948acec725439a50b2132cde02204409aab9f7d3d904e062b4b0bd427b0d089621c0729b77a5f116c74116d698ad 3046022100bc12aa48068ebace3abfdf1175c754d327fe224336278d82abbbec2499a18011022100f3e036c6d1574c971a02430db37888bead6601c33e7ffa2bec71c3ef3db721c0 304402207d7d97f8c44093682af54e81a34276efb8af1ce929e236c485937430545111ae022024b939f20ffb539dcb9da814795ee7127d58bbc8501291215992dc4f8377921b 304502210084f46d7504b190ee9c91af4c55d596635de42f4660148d2dc7c12d30f3621154022058d983c8c0bee021bfc1fdbe30eeda732987033dc1618ef0406979c050f32ea1 3046022100976dab94315ef2bf849237123b442e459277bab52b1eca302a7f4f736f475ec5022100a67facfee2c467775d1c430fe10d28cf002b6e39c4fb53573af38070d0d060cb 3045022100b59ade7a01bd40f331dd2aa106eacb577e84a0687cdbc599c968540f8afc0ed9022074f448ca20bd3180bf3a2542a48475ccd9f819dcb22a42db73e56ff4e0db5bae 3045022064b519746670104fee0cda938746cd8156f55074fb5f67e9a5a5c42fd0c2c574022100d8b8bee02a7f53560d513299a16de96726b4e3c7dd473fec8a6fd1d5aaf80d2a 3046022100fb57e947918703506c541fef587673f4da2647cdc1c69fc4487816f7cd617a1b022100a1dfc4a7d4871cde71ceb3ca675e346c79795c83dc2fe08a956e781751493b52 3044022070acc62882a9f6dcb5c238e33a55673dc7a084b1445447c7d095f7900e6e119602201ec2b4f7a93b3abeec58b778a28f8bc2e9db78e1c3e167fbe248b7e1999a4dfe 3044022079b1b7930e1f803f0732aa7ffcf29462be180794b4613380f8edadd0f56d67e6022008deeaae892ed0d870d4e213ce820a846f5de053d3b17f0b6a1e1e90f72ee69b 3044022005cff24930f753b6d704ed1fd5c9516bc26948dd5ad29e4606b1cd541eeb033b02203714dbd9277124713a4cee92a0bad4e13335acd0601258d525b92a9f7c1164aa 3045022100eeb14a256c839640bbfc8c06be47c845674ee30aba3c1f2b6a4751960ad384720220391675264326c3041eefbc9cd75248103916eed67df41ab52fede45d3967e990 30450220304c0a25772eec30aba3360d6671f866417857529386462fcd269327122e18e6022100fae926d3df5c084c2b34fbc46085344195c4c6b6f77e05a670af1d9b203219f3 3046022100877ed6fc526ff1f63358629189686d879474728f08b70e66a29a9178c459ada4022100b673fe9b2afea0a8922ea0954bf6ad5182222e9839f0059da340af8ab78ac5b6 3046022100ddca9b7b5dbf75f64da960202dcb98789a9013f5b8b08d529f1757858a642a02022100afaea50499b6f263443dfd7e5b97f3efa861cd0b696e10faea08ecbfedb24ca6 30450220326df4d8d9c3109eb6fd7db2f1cb7b6e0d0236cec7252c1aac810526980f7d47022100e75798400304d14a2c63764b84b0ae4dfc5f8d9bcaba7c7f225a4f7c2deb9ae7 3046022100dfbe4206f3b7383e54abe03a53360f632a8a3a174285e207e50b32eaa855aeaa022100c48e516bfd8fd7079f648e1461acdddd3ae7054b7487f1d7a19c0a8373247e3e 3044022027a17c129d71bfb9488ae6d52b69bca9632d47c99a0d4a0d8aabecb96669bb7802200c00878bab072da031e12baa734a1aab00a40a20d2d2a0342e012c7ea6d6a3bf 304402203ef7be1b34dc652fcda35578eaa647bed5e082b230cd28732182567d4a61f5cf0220688768c80722673f4aae6e8803800df3c62398fa30f244eaa35d69a904ca8c11 30460221008c0684567cb3b213f74170d56a9f10d9e105721ae6e0ebcf79cd98d18cd92876022100ec3d4a5c50de46ece0e4b4d95ef45678546b68cdfaea1b05e0ebe9fee42cf741 30450220329c5b3acc62357e4c133023ca78df1da0f120aabd16c474646336181905dbf8022100dc6edbb61f8efe52c077405f00b7d414ea5ceca7538e7536ec814a1bcd62e56c 3045022100c3d2bbee0b7ed6533835df1d91409d50f82254efdcc2b22b3e4a9d7abb9a586e02205bd04a7a2348edeeba813b32c4a7bd3a675bfbb591c65b68083f38d80aa44f75 3046022100ca879daa769803526cc2861dfc374663a5996412c4b996ac51ce34cc42396053022100feb6e052be1960e2023e71f5ebe065fbf1af9d954570c9ca5c5a532d9cdd9ca7 3045022100c38780553cadd83ba2b4b59cf3c979e7065a893e017aeaca7474e6c75d08399d022071f0de2183225e82da7fa0e38ba7ba048c492a1ed208b2661b3f3ac4f7a43410 3046022100b95a8b62bf8a3e039a500a4eb64f70daa0333f1d06b3cc70ea4c4a8b4d33d030022100e71a0fa5e703835bf42a002784c52109bccf4723f3f172379b6581cb27958925 3045022057ec10771b0eed693e16aa411a1191bbc5b580870619484d24e77bc57c6f6fe6022100c19df77802659c056b6e3b0cab71a41a9a866da7c6b8f8ae918626f37f48f29b 3046022100cd6c2f10fff81936b39e941f9bacd5fd360e9e90f48311dfcafe3386408c674d0221008a22512bca26caff0dcaf0d50949ad971540971467952fc1e9828e5205bcf8cd 3045022100a9d04c8115f219869ed6df555b44ed3011672fd4b026f4571650a7a2116fee2b02200099f4722092742cca12951ef2329446b81e21dd22e5adfcefccdce57163d5ce 3044022019195117075f5df7616e58bcee8d3b0a3568edeb492bae855525544aab8fd4ea02207f5196d1910f48e29365301656de6d97a0b14881ed51b0bf5bf35e7a851aff46 3046022100a70b52920cb084a1b485385bb02e987cba12aea506e68ec0b82591db0cdbf1b4022100fb1010698f002032faac8965c5f61e6c13345f444e3ac545033ce749a3e47214 3046022100e27f4b45620d3296e88aacaca229e74af9ed3d53644d07d740f72dbc0288d144022100b35826d4a7a4f176b6ac786c42facbb9e438f06d75cc001a9a2050052da107b1 3045022100a61bbb584e3c2c15b34cc6b06fabfec79cb5315eae199f65c16cdefef52827f202200100fe40bd6ad8dacc871c513303a8c51d1990ee1b24084c51b2604f99ea178a 304402204bd797763a1e0c53d9846e5caf2223b8a8441ef148aff31839154864c4d00609022025614aa832312045989117ea64993f21cd047718d0f333e66da33baab624908d 3045022057ce1ee9f974bfe0905190a0f307b00d65046bb4653069275c3f341a73fad194022100a4ef21201bc3791a9d9cb4fbae207931934994724be04cb0ca2cd13b28b051db 304402206e5b7c95c271c03a4d52177cac2773e413d53cbfab5d4e19a9509f9245e3540b02205bd5828b5d68e49787227524430ba584e3b297606a603a2b5dac0b7ba4ccb791 304402202f9d889da7da558458cb174c65486fe08e9491ea7c42d25c23e36875e693b227022012de5d53c43e6ad98ea70c938f1cc550f6fdc842426ef05fabb4c14a237ed58b 3046022100c9c712df6af38ce91a89e167b0e2d1eb692401887025a434e3a4ad831457950e022100c2f785e12904c0a8a7d4560a0e17bda5cb64fe5e10f1c27330603e9676498ae5 3046022100f9f5d1d2f5fa0c7acae51d2d6fe77c4ee4e554bc047e4c407f43c98713efc9260221008c7c087cb36d91d0f21e8789adf3003f5e5c082f288cb7d7faabac2b542db14c 30440220147701427f572a78cfaa1d02d042428ae022f96c2dc1c09dd5b8473d49e86c270220329a14b8a9c8597b105d97bedb97896e109e95512011e93a092d6181b28f3508 30440220521ace84e0d748e08696dcee25bd651b81bfcb13e23fd5f987b5432adc08d4fc02200873966d6a638a49c46d3b375e28f679fe3f06399a32cb888639bfdb7110eef4 3045022100b53a8635d5bec94612ffe0d0e2b14bf44dc95f10c9e8bbcfa8fa64a03c3fa7bc02204c3ce159153134a85939871faf99469e40aafa3aca5ab68bf2804172b1c25f36 304402202be96bc498b84bd6c73f7ea5b7b6a8119d144cc4c4f92c74b61d223afb4b6b0c02206eda18caf2ff2815e1964bb84cc026f64f58f0b96d6a94568676b7be93dee5ab 3046022100cfb2eebc02c73bc6af628d4a5447a2a1b6cc00140d16756336d36b9f1c7afb22022100ae6c9b73e913dceed494dfc15f86e74baf5a9d0db0d1c52313fa955bf29aacac 30450221008b7d913e644cef306e4f988355c53c92714c8f8368bf8fbee45ca072b579d0fc022056749686990bfb3360fe005b32c1009d0d4b9cadd4f13faab7a31d3772c17c20 3046022100e7abc828510bd0c7f0015455a7c5ccdfe849126bf1575ab26f599151e915cae1022100e035b28bb72e1609a9d6fff807953ac9df5e4876f278867e631864b58e9f606d 304402202f4b3640f0711b4ac5f04fd95075aee3e3e868df696b0930ab5634729a2a0fb1022004017ead7e3ce2eca760f355d5997f76b6ee7d6fc1b22c670d19aa25706014bf 30460221009defc5efc8f3811fca41f41d28987800f61ad3fbe74f7bb0f10a368e60d2d2a7022100dd16382ea971200e84072c1f3703722cf7be43a9417aab6c70aa56f71d1431e6 3046022100c27fb4316d8ae1e2c816897d65535d693b27bc1ec3521212f1bc1639ffa1450e022100c073ca1c7c608df73d288d1191ea72462d6cdef5b69c8664f1760acbfb0c316d 304402206e64e299f9ac0b8fa6ef94169e395d2439dcf14b638d899b0b2fd85e7a11505a022017b11792271b30dd979e9f3a532604a6baffe22db910fe95e7fdef31ed4a2527 3044022036957d1d0e86c9e116e48b569d8a323aafd4d7e1cf18133b93628e08fe739f1f02201823a6e31fd8f1496cb8638d8bc0ef8746cdeb35d52f699737d3078223caf002 3046022100bf35903b1c92d87b06b75f29cc4d21f2f08476df3092e25614dfae548e981a49022100b4f28a4b25ffed8b29c561f7f1da6596ddac06641ed6e8c5ae56a3512b8e68d0 3046022100d42cf81068393ee385f193f181fcd04affc3660ef3d8c704bd78acd2e5a22e71022100c543377b9dcb46b9b90e0a7db95e48be5e6af639377e0ff259795207a1b668e0 304402204b1feea99aa664f419e6e498f800a724859ab365f25cdff3ed287b145ad6a3620220292ea96a03525ec22dbf9cb92eceb4159b2164d15af28707144f50e852a17910 30450221009da30842e4b1a4028ab4ddadf51b51871eafe90d2cfc1d445c7b5c16435d221c02205def0ef671bc0693cdb62fc1348d73a7544ae42723bbf13f5f0de68a32704e02 3045022100f92882f037a3ddb043710d7d68b8b49b9c5d6157e3f83b95038fe649e1cb5bde02200fc7ae3475d18614d239d59e54193e7373ff2d0ad65485ee22fbd6296ea763dd 3046022100b44f922bbce4809858a93b230554e4e06eb347e670c4b7effaad43269688eb9c022100cdaa54cdbf7210cf1c48e56d53da130a474f52af85f7dbaaaf47a65ca6d650f1 3045022100c4b02e562a9c4b16c5045266079026d27b9c9f9a7369c44a9beb62226858ba63022032a4b5d0df096d04a464f6aa7fb80f5348ca414b0cd00d33f6ec38129398affa 3045022100d712a59801b8df5e14917e960b09c13c1c24e38612a18f63b23ccb0adc0f246d0220082285eacb53d3ee14d2f0315c58ff031f174a1af16fd7d635e54881ffa00231 304502210085d015d46a50755ce364fdc36c2a06c14c63c6e6b3b7260193d3a126cf524a1c022028ddc8e23cf67a6c01914b7e46043c50309f59081387112ae296742893d53007 3046022100c29cac7acc0bd90f892a4bd82e6c80c4fbdb747f91c5c5917f75fb6836491d94022100fe9073153ed086376b72d599d8d67c254df29a67a7989ae78da1394fad2ba240 30440220725acdcc2cc67e6ab67991016a8d67f440ddc8b3b55138bbcc74a34cae66e4ee02205243e3d9e9a99e363c69bb1962281576fab8ccf6bd0c06786164fc522d1e4042 304402205caee4531c9923df997ed1a1ef3df0272ab4e5329a644f298b6fd94c6937be3d0220408658c306e5e21c53da99a59d22bf1b81b1e7cc49381d539da5c62b996c8db0 3045022058439a9b2242be60c298426bb0aab9dc4d216bef3145a228d02555a0e625ba91022100b1b30e0b54158b68d9f3890b2ccdd765ad51fea05546bd39d4a3ffcc57e2b74a 3046022100d6dde84f2b02ca83e4680682add2b13a146947f56145abc9833ca9ef9e5aead3022100a2a71af2cb5f661500bfe8c95cf06743bbf453cec75d7fa58b10b34f006a8aa5 3046022100c0f7e74ad9856b7dfb09a73053183e6e30a02a9a252e9bcb0f38f2ebc8d57b66022100f927cd8bdba16652d52375bfb21e558b1014a788ee45d8a7196bd27b7241fa5e 304502200a8569eaaa97394960a2ed589805154e9160fd296f0bff36556c7bc980aeda27022100a007fc66171009d47b9e2a5cb4605f3716d33d676cf2323472cd5e767d6daf8d 3045022100a0f6dcf2a35cb01ce6ad250da22b03f241f3b600fe4ff4ef7eeb875d6624fd2402205421cbccc22d78c6213a9474de48ec810897c82c9b094dcd2c4afbf2c637a665 304502206dee8140d29b98a3f4e0babc8e6912d2c5a02b9c45d084967ce8adc965955a76022100cb927c213c7ab1731a151d09eeb489c6b7286a98357d10532f333c48b170807d 3045022100e91b84579b743b52f43506f0b8bb81718641be10f9ba87dd30b47379edda1b1a02202eddf9bc34638c6dd341acf82ded7784d2efb71fbb5133e9fddae0fc403e8197 3045022100c0b9d4e0feee47335b26152101fd769de4d14cc5702600a2e853cef9374a463702202314df2830467dfbfbddf6b71fbfed6a590da6dde81eefd1554c368dd209c048 3045022025e4cedf22f00e662cb0538e7e6fb848924a1b0aa15effe679003700a095b1fa022100cc2a334a93600e3ec97290f68828e4a79ff934a80e8d63436142ae4d4aa28b6f 3046022100931404263676c82d3e1832947456232cbc89db62374ec7b78e2427bcba1bceec022100b160e10b5acbb5a18365df21158c0ce415312d97b544315d05676dc87e08bea2 3044022027d7d49d934fe2dc032baa88f1ca1a133a90ee9fde088f0351cf1ec52876d2e4022034e8cc9c2bfb5175f1d1837b5dc93c46f87d1938b9508c50138d948f3cf6b567 3045022100fdfad37d47d6423df856474a4f51cb0124062453a754e75715d605187ce7a0a402201ebc3222ab98ce71e706e67c652d27d1ce82b61faed068316d4900392dac6bcd 3045022050d4e30bf4199311c344c09850bf30d306588aeb9ed8f6852a8ac1aa865b455c022100db76bb08995163fc7e07b3c73c2e058d1b602a849e1ff5d621231e2085185cdb 304502203fe608af220bffc47ab4cef22e6b71940470b4a8e439041e5e9768eedde12675022100e26f0bf0ba37878fa492c1200e8500ae996bdc407842694305d754162a573990 3045022100c55d6f333b996627c9c7cc5e529b8189b4b1b166bba9c511fec155e67080f9fa02206e92b80978bc10f7084d86da1cebd25a37b09d480ae9b504d8bc7446f937ca4d 3044022064125b09696606a95f22fedee624074e32d8a625fdc100ffcbbae00f68db437202204beccb1f9c5b0b8249c52d7c4f2d28db8976da233b10a8563540f03b83cf474a 3045022100906e78cc62f8ace315f47846343a9fff006bb27c60e72137176ddba775454790022037ffe2c05806d4a98e63a2651cabd2a56df7921d2d7cc0726ba324e33bf3875f 3045022100d38690772e8d8aafae6a5433d188f498b278cfac75c8e06cc0e976ccbb4877470220259b24c44c4bc82ce41fc910b7cd4db484afac732ff3f9bab64c4dd30e5feb6c 304402207ab7df3db84a458769dfbe09f3bab98bacae29e3cf82985a171f14a0a56b791d022047545baa756abba9588d1eeb729a7a1b972fb2531e65ff1233704204ed7e6b0f 30440220742b27bd0ecb166dc96f260f03ea4a6280a2727ed616d849eb8524239a810fee02203b538887379d15e841bced31f2556f3021633b40c2adf55c0ba98b493b61788c 30440220561a56f4938d411081ddc1b09ce4364d037ba41956837d2c9ea6d86056ef3ecc022076f9d3f9f2f27827878e5d105e9d389bf33c5c933b1453335f37e35ece64df03 3045022100ce6aef232c2a928263b0e3118029e28dc0ef5ac5905c7cde6c8c3c61ce3e03370220524fbef6ee6bef6ffb3faae11e2033cdd89efacb8cd38929f545f5698bd51542 304502210085fc2de75ddf190c08b44bf2217aba96e4e9d41be403a57b127ad1979815350002205356baed0b6665cbbdf80efc1aa8fefce73ea074a2d33224423c499005585daf 304402204019e8052471e3f8d069361a3b140de8e439a0007f4ea690229d7fdb6344bb2102202a03da2bdb2efdf63be253c722235fe4a6d7984a9a8cc685cc848a654dc88b9c 3044022024c22d9cd249cb888adf30a2b54f6cdba7b8df87a21f927ec9d471ef9adaa54b02200a2aaeb45d1e94ebec01c1c426f14e9722c698d6ee332c38f9ec6201dbe5f2e2 30440220359ce265dd33cd6994b0a61f13d2826faf9cc1bef79bebbae0b1d01b4313d16b02207616f6b7cbb795b3ecfded24f87e7d70d424c33afdf27132d2d4c1c32993efa1 304502207cc4bf03aba412de9fcb0966422548f43f6eb8aa7a18c4d83717974bbf57d5e4022100f517f044837331ed43c4752d2413d750192af61206fb1ca0d21e1095656b0687 304402201e886fd317cffb33a838acd7e5fb9c3007d631dd84e88b457b01d6cae8f6643f022009686b29fbd302e284ac2cf79212bc894a602e49f30ec79cf0985953a96e3b4d 304402206e8e1105c603911d10554dc2e5cd71b0d6215cae7825c498f0fe21781fee7b020220065e175778be80e22dc2739238b21064bb2ef5d6f25302b978d4e5b9f72e853c 3045022100870483ec7dda36f44b2d2f195bf8e7c1c35bc4a49a2dd12e27d62c4d18c0f13102206f2c3ae84848628670283a20ce0c6232d28d33e4540d168a08bc72df803d24e8 304402200dee7595174dfa13ac49aaa3ce9815985b2067a55f986e40546185dadd276c5e02204aa24e45f2d762d3a73bed9130b8a05d9dce6e654eed1118cc5c6ca3971690e8 3046022100c87edb110956aab7e8977db55a8321313174b4b65ab500a5e9208b6ad5354e00022100ca0a070c215bffefa8e750dd6ac70894f5ba672f14c169e267f177a14723626d 30450221008647b1d4e232b614d4e32034e1a2a5d0ba9cce10cdb54b560b260dbdb5fea21a02201062303becef879698c7a17bc95f6b1da99d362f0ca00218e8f3f01a9e8aca3a 304502204a03181e72da74efc750bfc4fe3697d3c004f7373ac2ffa5d692e01393d4b8e80221008f6459283cf6627f25a633aa9b69d2b7f1e688eb1dbebb5ed185d1d290056481 304602210095580e78ba053205d0ee060e3405cab69fded0297948fe660eab804676eb35b5022100942cb79158b9bdb486ae8c76d06c03838d0327e3dd75879c89299b0f0f4869d0 3045022063866ad1342b832cea7c018f070dfb63a2eed9c895090f0a31870cd144d0306c022100ab3761a9b583dc31b442feaa498499754142665578b7a20426d8b32e3239edf1 3045022100f1a4e5b1353062c766aef8d8e70a8249aec683d0791bf760974d5cafe76c04c7022013ccb81139216b183b6b8594e3c20cf18fc54896d054d5b38f9c466665e706a0 304502201c1b4762a9fdab9bc636ae3a9f09f87c704b0032cea3b92f3fccef37a4311b05022100d2fb199e28e2866e277a02e2996fad12ed1bf3045dc36df5d6fd6311f98fc843 3045022100a24919c62d3da916e4c66517f63254a0d2c231d57406ac66fb9969a03e6c48b702201d1e946acbf59c45a9276fb3c77d8f58624c42e5e5f39d8153028c80f3eb33b7 3044022054f3494d0e4d1fb4587d01a01c609f4b50c94acf6e01b63295adca9f4ec5befc02201bd79156df2bc525517a8de7a2a72276ecff03260eb32589a2d8ebf951693a6b 304402207e2f3508b3a5eef724f5694650e60495bb53a5b9fbea7a2a4611cb77c643036b02207c840c9ead56d83b28d2e4e07ac0942e3d8a856a2bdeca07f5ac07dab97495c7 304402207fac78f9c5665334470311572bddf115e8129e851951df03606e634f46d7e48d02201ca494e71d354cb3a68b8e7479bde88ff83f0d79dcc4063526d985413f4d46c8 3045022053f5949858eb5524b22d0331fa39246e458b305480f293f92d52f49edc206ae1022100b91e8484a6dd28ed36535c9a29d9b8256385bca984e877612abb1c81e522face 30450221008cd4aaed7bbd4ffed535961c515efcd5b4603d28129ee624afde94a8e131dfe60220456b1716f4ad639497016a907c5ea70e1965626ef504587ebd10cc309d36c0be 304402206d6834e25f469bf4c1c26af4f09013815471d8b1b0a6308871fd3578a513a3d302200dcf282d8e940799c40373a0a21c1481c3230a80b9d80ae43f2ed3dca526bf45 3045022100e7bb52262a1248ae0ede7c39f4c437370409b01ab008984a314acccc7dd0e79302200ddce7914f18b46117f9214907cb6e3547a0018241413a2f5839a897acb1b3c5 30460221009399f69102ee32d4add72fff6ff39b9ecbaab96f3e7f2406ab8562fc30c1d3c5022100cb59bf62727e58efda65dff5670a9af27f0f6fa629d9182957b5c560ffa62f6b 304602210092738c5e86a2b76d07a73e090ae7e0671ca4250dbdce068426363903137ac91a022100c5f0e14f4cf008388cc07a950781083e03e6183a1eee94b1906b2d08023a8ac5 30440220486543daa0e550ec11996e30f2013e34266be337e7d8b9702d4ade554580cc5302206e09bc59b8f8cc823861e2f776f75bf30c29b1230642c327ca77b964c4a41192 3044022010abc7445424720502776583a0ad7dc8947e345e260b148149cce5eb430aebe702204b144383a8c9371f67211c7ccf7532c616520c2b840428ad75196a030c35621b 3045022079dc27a513317da2ea1fe311b98d223885cc1e3574d6e0f99b6c1350691fa0a7022100d7f7f6404e5afd1fcfc4d6e70d8579f2ccebc811d0be9823e60dfc66d52dd6f3 304402200b2cdce9b5d9daf6415a487aea5ae7b8d42b058be3d471ccc612f93e4aac80c7022006402b1c9a8ef29b6d2d1e5d15955f7b4d6db2b9d5021a57fde77913b4dd8944 304402206135d4dc95e16e976cbda43df6b1e2852ffb426e9c3760b76a7945eafc2b70e302200e651292a28d8f5e723170c463be656ade12859a67416eb4a08e7824c996b453 3045022100a2775ec2483ffa8dd1f90f3d8b121de70e4e998a8f984d55d997f6af149367e2022051c46fa13cfce093c6fe7954808881cacaf937f49d4576bbc15b0c603fc5b651 3045022100d681a3fc5ea5c5e26c0fb927cbddb6f58320e6981024071e74f6d604f20e9101022078e27cb04cc400f0ffb30227b7c811710b854b381ddd26f172f21d730dd4cdd6 30450220224afc8743db891526c07ac7b31146fb2b4b1b37eca6e3dd51a94f4320c0e465022100d774fe798b5f2ff67701ef9cbda509d3257242a89b8dc746e82ef3599452b4e7 3045022077ac6e4a16dc3802e647cb358e2ca2ab7be9042b3499a868a83105ae0d780608022100bfecadd4dd149cd4e14344071832954dd40adab3ad07c452298ce3dfa716e558 30450220317f65af244a63a1b6f87e1414e25434a8e1297c534f6367bce315e10253afb0022100b45e595fb75c98260c9767092eb082749fc4786de4e026764b3c340e527408b1 304502201de6d61c9a681f88c6d410251852bbeb480aac7fecf119132eaec6aa212c177a022100bced9ceb88ce482c52d8a9c87b423236ef1d79340db366087d0543ed5df14936 3045022100b0cf42886377ea969735d72f608cb960b638047dcf52e9d8ce3024034f3332df022024cafeff21ac13485dea3d8c6d40122d338fa3b611adb3920bc38622388ee4c5 3045022100b0c98f0bd1fad1d8fee26238326bee3bf923415280f2bbf1be990865e15d980802206e6414205665a5be2aab645b96a4674917172fb2f06864ffac5d82b682ecd369 3045022100ee0cafebc8128c3b7d85690cb265c9858ce35aabe9461df74937670a7c61a3f202202d73c0dd79bc77d7ea54df1b8c2fa73eefd553520fdbe59efa18e9f3964594c9 304502202a9b613a9c8294fbd41992e16602c4e8ed6ec732613a8d0e33a270efb73645ad022100a827fc6a29651be0bea8b7d8d23814e9efdb13d278886469e9044793e7993592 3045022100b4fca935b329cd78f96dd3b24c14c95f6c19a0554b93c62ba86b8f143c4c47d4022038354132cc7166d0317459e632aa43a19732fd6624e6950e04ee80fd6ea24580 304402200a837bd405dc9de8b182e764b0092432bbb2e7669e2b95b2bd19463953637ddb022029d329b0e153a96ed2fd674972fca5684309cdf66a9fef2230ba2589ad1a12a0 3045022100b5d2c506078421de8c1983ebbdc640bd5edf226f3b54adbb231e48d15c06d95e022006f4084c0c9a23f4b88c9e483487fe24a5499ca72d85ed20eb5ef2bd524dc127 3046022100fa0d66965650d2995b088cf91d396f1babfdd24ea584daaff8b7ba0018add3ec0221008d72cb12ceba675289fdfab688771475a18466d33ad932a220e0fb81da3ef681 3045022035bcf845e974c2de3dd9a30157f390f969cf531bda9d2c527490af5ec862783c0221008ba26b92cced44b0cf8377b95518115ddb3be25f56efdf4b054c27170130c58c 3045022100cba367004e196599ed1a828cd6d0af28c8fbaaeb0f0d8dcfe1a990aded865edd02202c750cf15f140b663706faa55fb62589a22e859c63f63944414463d153e788fb 3046022100a1cb2d5d6cb8a3c5a9c56d262ec5c371a424d2ae2eef9b0a08c30d1b14f3df95022100acd49ed2d1c11a652276e6c71de67b360d624561da1a40cfcfbecd721538f122 3044022010e9005fa3f1792c3ae415b3b443c0941cc47c39de3b87aa1a178eab47f471de022013dd4865098bc63f29d4ab72ecb152baa17c40839f58e48fe47b5c32c7f17f56 3045022100e4f5d14e12376ddcee127b28ffaaa9f0352197a8e204825306f3e045a94db8f602205695fafa8e26555d994309c9a8dbfec500e0976f6b502ddc3bda03ce978d5b05 304502201e98a7cc05843b7caf25e5ded9e4004080e1d4e89e6aacfb8abb44542d288f7202210089c74b72f2498ab568224ec2939ae8b954c1b949801cb5a82c8c9e003f968251 30440220478b950d8995af02730ec624e3021cb5654d762abce38ac9dfe00659f41bb2f80220161657fed1be672b3de504e76c8fc12a9b66a19f15595abd6338fcfea063da1e 3044022075d3d92acea1a7d8313d386e81d8dfce09a77a61cebec38c823daf2cd758a468022020827a199574a8453856aa61202e8763885d377729171c6a78c0d6e552d2098d 304402206b756f2c3c3d83243149bb3493e177cba44046af748d7fb2b7ef37469f2e3acb02207d4cb2dc7fc9405eef4fd3d795fd17fddaedb908bcb367e2909344ab9c430134 3044022041b656990fabcea0ac906e45cfcca6e6adc8ccf08045c5b6ec08d65a344dc9dd02205d6e573e31a94716c6a0a1bfc239f337d0495c22af6cb0f100c4521672e31493 3046022100d127ee7ecc2b04d13095344d306e747d5f778aeee76f5cefbc312998cb9d477e022100f56d8fe404fa8ecb25895b818b37101791ab0b562184e04f418078b51a47f66d 3045022100f594963566fc77d87d65d08890600361c3d779830c138d4862eccc947296f3fa022069e1af627ec1b2cd6c3d8cd8e702f84448f923cb5944a07208b077a95866d205 304502204217e9055944d124b90f35042a585ca9658d97de25d04a7ad23109c05c4ab6e80221008af0e67b15e6e9cbcd80f192a1337be478529e220162187ea440b480f34f0efd 3045022100b5eb676f51337006a1309da7775971650e7a44475f1d9ba01b11cd75395ce697022065787a1553e195b14a6c668e6251d40ce0cb5e41a0f9da87569b594ef11e05ce 3045022100a661c247e86a94eec785434ee6c421c38651333421ff21b1fa6d540abdedac6e022061c2ef752714112551c8d714b760d113d34eed637e020c8dae1cb27725b7d27b 3045022042e95ab134a1615c43dfd967651dfae893c9bd9a1602c2b01f0ab0a58c2b7b3f022100898492ef5b1ea79b344e5269cf1ea5ee9564505436171d5c50d43f4d33b34aaf 3045022100ad75e8d1345a34d0e2d504400a228b78668bccc6177c2ed2d6e5a6186f07b24a0220793b38a7fb927e6819bc8ff5a54f130e2ebef4b3a5ce960259aafa6079d24917 3046022100da30a23bf6b7bc3ab240d7f7363f4845499ce329d383767c0987a24e15195293022100eb7c3a3d833cb3bcdc4d4991dd178c21266bb9666107df6db45f66c26e5eadaf 304402200ed6ea0885981c4ce255bc405b1809ca15c6d92caffc0d5fb20b4f23f0ac4f3f02205352f972cc2f8459381d9d7c378799f100165cd98234ac335344d2660a5dab8e 3044022036a6c4251d29215f45f84586f18dd317717101e3a718b14a867aeb671fed5785022017d398763195e728026c6ce4707c122dd84d075e9a7297a0453082470e8b996d 3046022100a271781d80352332d8691c5fb067376d1744214bcf43bc50fef20cb91045e736022100b235f103d8fe49e2433d1bb7de259d93bf907f9e20f02ec474037b412f2d073b 30440220339953bd22f71156a569c69a11909114121b8d504681581a1570b4aa8df92d1702202d70fc2ac499ce8f3dd84622c62b3fb1d397652552313259178d8d6bda46b04c 3046022100a6bf505d2069d514fd18003fca4e92d06a68f53fadfced36309cdcf710ae65a5022100c7f8ab423206169575690f0a361fc5302df73e885d3d82873322b8f6aaab9640 3045022100ded80c33b762e17a7177ee4b35a4dc4d6a081759a224ff59353c7559d5407557022048dfd2e3da707bd74e567d627ccf66e9f53d9f5e03a056b85063dc760189d5a0 30460221009425870ed4ea64687b3934c7a001cdd8b8c27d8dbbaea5f212de3a4b3157837a02210080404932db50465f24f46b42870f852c736e6678b6ca46c9da06393f7ce82fef 3045022100f17f3a25302390604bb0d79cbaedf04c5db30c8893852b215ffea90d49d3734a022029abefce7da954614777b652b44577e27e64100f3e5f5b302965aeb524d737db 304402206050a582a990c4b7f29cd8ce10041fada201952c077b6826967d36a390edb4c6022075fbeae00fda6b33841cc76d1667121d253aca8fb532588e257c2ff7c3da471c 30450221008e137e4157625e1f0c81627b62873898bb4eeeba33ae0f6197a7ac349bada03b02200ca37df11065ec8e074a19f20e9586d4e4a0401b0858305c83e70207a8ebb609 30440220411a99d77c6a7c6260a9201fd30b5bed2a300b3c097251a39d6c41ba106a0ff40220021d7926545700e861441a1fa170df984d82ce10d8eeca2a6d2f03d8297ee71a 3044022004df4f58760e133ef06fa9668036fc99589a8c9e6233a75608669848340115d5022035b9dce5f1192e17a596f7f375d02a6aca46a9632b03b11659caa6aa4b1fcf7e 304602210085176204ea2e39d0bd8c00bf5b272f5d8d17820bf5f5ad698e65efd8b5db1a45022100bf0fe4968a863fae162ca50d9ae5e50dde55489c93a758b91ff0a8fc40ca1060 30460221009b192e2ca4cd22b362afdc9f89cf1e6e230ab6cfa3bc1055a34e3d1dcf958d0f022100ef16915e1b1193cc7a138de843dd2aca0f8d91a8cf014ceef3fcd5f0e0e37735 30450221009c752223c0d8afdbc03fe227a0239847bf135c245c13f7e792ab87e2cbfdf76d02200e42b39978834c7b084b22e54fd3a1b6048174feb120575e73ba0c85754c5e2d 304402207b25edabc97b52941c9ec95899a87ee1d7fe318f212594d4f7b3e74eeb9f5077022006a976b84e8d7b820975336595c441a0b76a4c542083369ec0421c94194586b4 3046022100c723ff1532e4b304fdee0acfc825bd9e60023f4fe3544bfffec5cc3f42d9494c022100d46bd311a7553ed8bd22e2fffce2c336b08af12d18e00efd04a10997c5504281 304502203e96af2b261a4950fb61b8a80f139a68d719dc1c01097d299500606d3353bdc4022100cfe036a49f05cabd652b154368b5e66b78584b492a6f5228ad8f78b27bfa63d2 3046022100a117b5cda401681b967d1d126010e99cf05bc75555ee1e9838da0c1bb73364060221009b10dec7bb4d196ecfbf545d14560a4b9e6accb9d478550a76405d03257bf2e9 304502201179313b796dd4f3165beaab984dab89624d6413f28879bd28aa12e25cdf0f4d022100ad297ab40217db9cfd8748046c73d6fb056eb52b14dc29a5048d8faa860ac237 3046022100d288fb7b5279200483f5e57389ad6abc706df188f8946926afb6121ac5fda1b0022100b066e80ae40911f2d828a07a1df24a74ae2a34d610983d09276bb9f3773f8bc8 304402201d5bb9dc6d9579cb76d6b791fdb94534b9f09b4cd86d80d99047ed549ed7120102201917262caa16d31db5b7acd509d57c44b15621a78bb2531c134c268daa04ae9a 3045022008beb51e12e7101f554aecf17941116f6608ed316f476f53b6460b10673b554d022100b93cbdf4908e29e7597c7d205c8920bc165239bff06f25ec729a277aa47d8431 3046022100c50c578c232c734085fc6b1f281269303d37a2cb736084f7a6eb9327d9a5e3b2022100ee3d0f97b20f63d801afa324369776cdfe33b48e519baed0f750fbd352f34c3a 3045022100c83dd08af0fc99e000608c185eb02e34f54e390cce1a8ebd2b8694e1154e97ca022015aa1364e2184315914e7de0ab69286fc576661bf3a8a9c1d1ceb3b5ee6fd9e3 3045022100dd62c901be958c62881048d46e24d85f000d836ff181dfdbff432dc64618067302205e4c786c5b301d29dad748cb4a1ef22ae8a0df2d517d3a5898060286b43e5790 3045022100f454ce7f329b1d26b14cf1bb333f1f85471b93ee8ee381313fc296970fbd3c1902201fd6acd5373066f7cc52d26f6edf51abdf5121597ce10e6c5ec2ed8a2ed11bf6 3045022034893ca6ce42b4561a9a5fa78a43ef4ef63657da25186e0dfb5c4a28cfccf1b00221008570543a6ec6cb87bb9da33f398775d325f0920bdb2c1e38dceb4d8bd3762366 3045022100bd7d8c057021cc6b6266bdf485a6777c75820d0b79cb0edbb992877a633ca7910220010306b0e8cf6f41b49d8f4bdc6dd9138f7dc20ae74955c71d8a08d75bf24f75 304602210081c96d5f4e9d530a8de794a70446608a8bec0bcb79cb224a15c1a490b123a8fa022100bc96063888c0a336851887fb1e040bf170e723c9165c1e976fea3558dfb191a2 3045022100c770999f58d6e0f71e6aadc2b07b9167fd907a2b6b187a3bf13b4e7187f159a90220604fd021f2cc291d28e5441fd93add65785c6f2af6866a0419960a6c453c3e5c 3045022100875b10623c73ac4f22d0ee91d338734f80213df96f9e4154880a5371daf6cb80022032ccc7e80a94b871edad10d45dd3202fb9d3f58d20a1513fa46ceea71d690153 3045022100e7cca5439daef95af5ce4e5b7b049fafdbb3285ca18832fc9cfabd69c15a594002203ca906ffb91b3f9057b7acfcde9a8b41f93a1daac65a6c3b814e554dca773ba6 3046022100c75c93606ab74dfdd67ee9273ebf84d2ded4ceb79f6bf2ec32077a67665e93fa022100943831a19ff75ca35c8aa987ada56f1dd12004cf4afb8669b776d7cfb39cd802 3046022100b8bc82f927b18257d8a4dd4d1f2bdde5e51cfc9586fe6f0e5359855b7ea7039a022100ea209ae2423e2d76b894c63321644952a21a468d2a763138c19d5bb69ab75cfe 304402206ee94a3a5e7241ff79d54dc525b1b3ecbd4f4a51d251835901a9ce97eaa95848022009e39a32c27a03ee41d507b17001073b9f33486282a3284b4d7f7146af832d51 304502205d2d0cadce7f7a48c222eb263ad99a508c089e5347b8c6dd49b6c31f305c3d25022100840fac5197581c591efbfa2e531c55b41bb02227e746c7c18538508ba3b8772d 3046022100f5ab6a0bf40bd351462b88a6e8523f44293d4f06e225328d94bee06f189f3301022100c02a0b6ca07fb854c534e4fe67b1533468a3d7ece0f6e9861c9ab40bb1a95952 3045022100c1d09f6b2f472aa4fed250bc3076f49f9882d4970edab8141ef768c6512f2f9c022046d378bf4fd32e992fb382813ac5e6faaf2621f7000dff968b3c9d1aef8a3ae6 3045022100c5d340e9f3a765d315696c301c7f1ec5efe62dc2c8b6a7a091df6b248d83ffe502200b31f96531f290ba3f77d9c67890ffb2e33376567989920db1287f3fea211781 3044022073f6291d1355239a6f87c36aeade828cea369650bfd61d558dd3a8ef5b6c0d41022067c0d03c685456e0c23f548462bb25e94d94ceca9bd6ebbf284cc214d20720f9 30450220379b463c00005e384fc13ba1ab4e000462316c6cb8c1ef4cbcfa77604330569c022100d4ad0a8dae78b8cd09ff01dfbea5fbdcabad36718200720a4f90a281dcd0f8ab 30440220695007bc990a44f30a6f32c728eb3554b28e8301e2c62b1090b653a22df141db022020706ca144a059652abb7952e4c733e5f30fd56ca17ca1533f8a89179c24dc26 3046022100e5ae74d95e3bc28f96b986f84bafc5b4d69a64253a3ac79fbdd25177598da4bd022100988762e1a023674d0b62e2dc0ba2c689975ceda5f78d7f2e390717eea7740ee4 304402200f50d239282a5a9c43386766a62bb614d707d0d8a573fa4671f9f9cf85715ea102202c95f5d842fd4b15c1931085f087a53f134d898a773879602ce807c79e26598c 304402203833d892bb6cb1e07951308d482a6b45148788c09391423499f1225ff506d46a02200e58825a2ce5e98fed129125cfe572e3bb44b6cca70bc318a079e20806607ac8 3045022031fe263b9a437ea8fdf9ef20c7595ffacb17df8fc6d0d9c99bfd261ceeba1cad0221009f4c961388b460e8fabcd7fd262ab95bb28d35422d79c9625bfc310ecec60164 304602210080cda55835e34c6d337882c4a3e5e22d512fe103fcfb60395d859720779d5597022100fdad61828fc288a31f61d2c80fde45ae034801466a9d7de1662959ed9d5f4bb7 3046022100fcdf71315f467663c0a92768c1f093793023f45701a0dbe45e431a5baf98cf1a022100f051709431c806b04b7d77ac2195015a9b6ffd83941373fa5ab8aeab544ce322 3045022100965da6f440fe17f6ccb2e1b8bddbbaa1a64be84d11c279da54f3dc6ca331d4e9022068f8e2ab1db08cd497e68a490f3b20cd448647b4f490ab318cd55138c1ef2627 304502206ad6ec750c18349feed7065d2e3344d254b27376cd1f8abc4310ca9d7fc1aa390221009a9303a562a1d690e4614149f0a09c330999ec03adf46237c0f26660f068a9b4 3045022031f68b2bf033251ed8ab3b3d8bfb43e6e0a942c922ee6b67f36801e3dacfd937022100ff4984fafd6855163b47da996703c1b01a68feb080fcc79d34e9c3908f8571df 30440220113c00277f239562ab496e93cea53b02ac6279a852c1253348877928e44bf577022067bedc44fb12e0d4ad49314713fdedaf55cf54f01e15ef12a4734aa248e2720b 304502201ca5747ab401089fbc07224b90571a9a6d61a5d003c2346c2299d59cbefb7731022100e637675dafa2039e68878d36ad376bf612b2b76a6d1a9a9487d5b7f117a66f66 3046022100d671150cec2c5f8d1e8fb6dfccc49d2b4f1dc07b3d6acced061775ea1a1fab95022100da0b8fe3f741d85d0d0cb46c7bb098466c78ad03d5f3e723a13234d97ae612a1 3046022100ab8ad791b32d27c9218828a950a4dbf6ac1845eaeacce20aa2dfb1dbed8d3e140221009a1f83ca80a696386c71e1a36fa1771d8fe2e4f907f05faee39e262eb9c02cd7 3045022100cb36f1ba1d9e25f50698c2dd7846ba9feb991b7d563e26eafff174f061249a52022036b41e6f2055bd0d05b78f42a039dd56383a6d76e6c85927e7409b3f4886e4f1 3045022100d12a14b517fe7219942a02eaf77fa623725c7896246a8dbc7a95876c1928dc0f02206b08f5306d9cadc46286302b64acf9ca8321dc5b0e3f5710db5982a7ee9814b8 3045022100d2243656444a99312a755ebb198c9f775c3484ed47107790b7d75a7650eb6f95022064fb5e3f32cd2bbe47fc8b52ab293b8354153a946accd66abb9665bd31b32836 30440220152c17e2a36b5695fa2de15085c635d3b74312b118df5e7af5b3cf1889349ebd02201b5451569c2b78aded447e6d542b3ee72cfb1fca6488bb8cd6502abbcfd1b712 304502207eb07c4170a8503956efd000f8d5a44a259a0d715bed3975d56007c8d001218f022100dff73f1d617711dc12982e1547be4226153289f5f943987926b0f18cb5f960b3 3045022100c0271c7f0dfaaf87c8e9f0dc5b63d1f3b9b29333a0bf780df16fb4d09399b49f02207a345aadb2688a9704b855e420c9f5c94508a0bfef252f6db896ae2d804e41b0 3046022100c1e74181df55695285fc0958b8d62ef27fd453b0ff10d83841e67448211201ba022100e251562fc0d9d5fcac909c87eb0e35d67d4cbc465ea550f76766bef28104e705 304602210094b2cc871da0b323630878f82c050ba16ff40ff29bde75ecde7a41d37ac63130022100f1c8e4befeada862a453e3cca23c6fd7c73f71b7fe917f2041c230cd4e7f0cff 30450221008fcc802c6db4487c85cad4e608cfa4a0c12733aa96154a90021b371f853bde29022057504076e0c7c51c121b968263bdf92c4a8d388450d4c768d74ba4eede9ac800 304502207781ba036453909f8c25db9086b9ce641ad5f3e161fb32930fab1c5a0e8861a1022100b6ce4926d937cb5d322151f8a4c47dd89e19877206698ad5786cf405c2edcbc8 3045022100df2ebb8f1074bd2904bf6115fb5500f897e662bc4b7568f13f53904896af377b02207a63de33b5b4b7d8971a9d50ba7c17f485a8be8ba36190612cf710d5255ca490 3046022100baca9cd9e61e8ef72663c28e1124eae9090fa29fa33e825d588ff24f2c03c47c022100c9541fb75fee210bdcb37bd22c5d504de66e510239c6b8cebe63677d36881c96 3045022100bd7dac04b182e0c8445050b19b8b6aeaf2cccde37df48b2f5862a760519e0ab2022001c28386fd3a958431eef96f8123de4a0e47cf9074fb92ecbf3e6f5989f920e1 3046022100f75d493b235391c1108f87e428c12b750a4fa6a9a1639ba524153d3075eca5b8022100e7b000a87907782bb96e75a41d29222ad527f3ac6d75e67c6e74de13dc39ff0b 3045022100fa67522067be1969f3723a295de248793b7955ee7723d0e2a379ebaaaea71737022063035b46a1460e95a8622233243002e0fec14d7dec5aeb7c37c8fd38ad8e9eca 304502204b00f5e2785ad3a86df2d2fdf80223fb0e980797cddccd899b3918a58d624a49022100f32bcab385a7c40806a13de3d433dd42b9c08460e5b25ea20359145df50b6618 3046022100a8ac597f013dc60e128e581a7a913dab979900744a9021dd701c5a7dc907b0d2022100e9c5132b214207854cb3ebeac42ca8f683c97df011b114864552ed6a503126dd 3046022100890067704813ede401bbff6de02f92963294a0d55d44099ec3583c9f82edbcd3022100a6cae165fa728e0d85fc93f38b9a533510d87af702b1643ff9022ac273abd8dd 3045022100a8af262b63d726dd96a2b2676c47a3b5c6b8d27d1c60393a80e2f6831d660e6e022007dc0eecd5baf7479b372a4685c9e5e83f15d6c5e97f283078db92cd5cfa1b27 304502202d8fc78700c3bdca01d05f9e25f2a31c62c34d9d79aa0175db12b0b6567137c9022100fc94f7ef202c487af28f9ab945ee7357abf36154e1c3765b363b3a5475cc2eae 304402203b08f6bcb641107aaab3e50c967e414e5157ba099116565a40efff3b7f477c03022010b3f2b3c81d12d79354f8ebf9f4adc7f82caab107a6355d87cb4de0a6a21286 3045022100f352d52fc561ca4c061c398dbdc9f7dd36badc4b2c85e67a5c03458cc6a6c0290220264f06912def23238cc0eee79af7d98726502388ce46b7f75c965ed51d7acf09 3045022100b0fab053adc275cdea75cfa2b6b3337384c011a5ca247da1f9cb6f0d141ec6c502203c235941b2043ae3d3d69853eb99b74ca1146931d4cd713e29b9974992d772f7 304502205a168108f151606f304e84ef203e41071df8fb2ee9804c2134a9a6d42c844db40221009c471769961a61aff5b2d45a47ed5cba5b461891c867260e86a39fd1e3bf3130 3046022100d3146b4f946bd21777e52efb636c70b9aff39448d2b9beef7c948fe3738eaf43022100952c7c3963b9dfdef35954f81a964f238b4e93d0f98d4dc191c2e115e8718966 30460221008394b2fefc9acb85468fe61ed282a2886a63f40713fad923d98de75011e4a8be022100bb94b863fd0c978000d0fac46c97635d37bc63035fb7e06aa66c9fc33ff63a92 3045022049a90283874ee2295d10df7cd55a6417d577deba4b43ff51db7fc24ed049abf4022100c0f8fbcdfb140c89ed9b952bddc2b116be1c25d0da0afa51456e8c4ca1864808 304402205e9a4eb708d66f788712170642e107b2f5a0d8cbeb75a4f36971fcb6a372d42f022016a7722bfc1b0a3ff1ff8e942e2b79cc402b13a82deb9ae23de85ed78e2c5816 3046022100d57de6adb6b3c61687e282a99698c425482bb40240448056558241512c66bfd0022100ac684c97e02da5d7909cdabcdb0d89cfedc7e1178400e8148bdd973976005292 3045022100ca80a12cb88e0cdb961e5f76a10a6eaca11ca877eb171f1bc25390549853fec602205d48410a71653eff4d993782a4bedf889a13628a796df03b5e4a4e80e3f889ab 304502210086212903ecace737150eb0d9ede2658765e4e0d7bc789d81b4e96e86a57500f702206f800d485e3ae7fe667e173796f647ead4f9240b3157e6a5ddcee96b7a57ef26 3045022012ad196a8a817c8fc16678277906e6daf71cfa6aed96ea4150d6d484258a83aa0221009646dd46721b7c216d5e2bbb7b65c0e52b4dd9ae1f916e88ed210c388af4697c 30460221009f6ac0649658b1253e9ba60ae866ef39d191f49e8d3655304ae6b779a42526cd022100afae035759f2edcd81bfd0d5677ecf986a106d0823febcad23ca121e86eed3d0 3046022100b6047ac42f94fd547ca04000b043dfe85ad29a0d2eb4d74662b06ff87d7ee623022100c6312063d0aa5492f8b645ac31c866a4084a791963cf01d45c1fe121f4734c3d 304602210080af6a73815361986658e248ec544f5ed67ea3182968b671d711eee8ce58acba02210092ab422611519faae3b4e4ca1f3e714d31faebfeb6b188571c1b7d88cca900da 3044022069fd7f1ac1359d2b6f66e1d05f817307ad2db81bf3fe1cd65f0d6f2f081959160220639391cffa14ceb54dd333d269968efe3da575ec0e8893c4a879c63f1e799d3f 3044022027ad1cfa3b3c87bc602629a1b246f3641d0f2d5925a96556d742c4453bd48ce90220089552874732e088929bc09371c35ceb1b82e5672c61d1b7925588217d0f98e8 304402203691d22932ed3a77cbace07efe02da6589cf483604e30b0871629723b03bcaa5022078844af078f4cbe4bd98062685f1eddb073e6ae91b8c4c772f18075d27c48a47 3045022047c214665d697b66f7c6a1426d6a9733d47f6d7a763d1e1bb5486fb00b012de4022100a416cd01fd090019030cf2fa1879db720ee257a647db276e232ec7cbdd0359cd 3045022100ae6938d87cfd72bcd7624904f2a0175e8f58dea4e1ed6c1013d104225c1178dc02206d7246ecfba6cd3e7739d81d61fbc6107d34b3155299a9c5283b3c9de527fce3 3045022100f66d8a2edfae36b4bbe99bc1bb228dcd8c5cd1add41093c83f5e029d4b2c8fd602204dc73d09f7d490aa73070d7be53451a2b7256813f96671b7c3b012c36974f8f0 304402200b2c4293e7f7fadfa6da5111707cb0bbe1b9445d09cce7ecd636517b4a3cb3c6022025fd8cd219bc41ad67065af869ebe465942541b2846e477f0451da404f1e560a 30460221009c48f3f2ec3862350bf7d75b4403831f6444c96ff551946279663b9d1c1123b202210080d3e92c7d5583aab5adccd11c7ef66bb8dcd7f10950c7093e31a3041c702208 3044022041dbb00e247acf56e800bbea6ab22986c784506fb868ecb8fcd5bd6419ac6e7102200a04c0b5e55c298420879b53ceed688d94b78fe98bccd4eef9e98e7c333116f6 3045022100b3ca949758af9a3bd4e207bc6847dd059805af84c98e48f500373c321130c72d02206dbf4d1f821f52be034dc2035e9c91a27676318bfd8124305bd5aff57ad2cc43 304502200e9e882539102440ca939cccb5697bbfd99cb69c3eb55333ec8c258f5e284e0c022100ed9b6ce384ef2580698b6702c327b51b64065ad4480e5e6d1a318b80663b1545 3045022100c3f9b10008f3ff6b514811d8cc38344603bb2ac01c7ef8f23d7985849dd3779802200287c25ff47c1ebeab0628707dc2c86841aa596b97f962a40a41be0a5342e56a 304502202fa2e120e714fed4811105097e74212b1a298f9aa0b9de861972d3e000a1bd88022100845f82a6a3200ffade7a5b13a32c50b535bf33004fa50c4a04764714d22bb117 3045022100e167a979bc4f51ffb40fbc0f81f31b246bbd35554545cc5d3dbb4d82aa36fdc202205aed4662a02a945d01d27476932984e6135b2b241831840a88339c57cdb914f8 30440220359934dff735b224ebc0c7e46250be3645fc721f0751abbcd04c29e9dd651a5b0220064a863e78e2f0d1366c29184d3b237a1d35b2ce5aec913700294484438720ad 304602210097bd8ada99358646d6520aa574ed7ed3ba2416df58180bf858e11789c7d07c3e022100da9102494469197b946157723c30a4fd191243c81cde0290e5354980a842d632 3046022100a348b531d611ff3a97e8c990422a49bbc7ce91924b1570fa191ef68ee182c8e4022100b5247606260192ecf3ddabcab6892a32e5bb1158449facf0b02ec736754d5fe1 304502204e7dccc97072d995b4241065f122667f8c85b50cfdb464785b251f0c4438f080022100b6ab74e04d97db6bc64baeca592c4724a2afcc3d6c048796d36441e4d31f45a6 3046022100a000f8dab4df547215a1123f724509f256ff1a43169515f62f7d273ec662bc19022100bf0b642686e16a3b888891c9f1ba6eab4a5fd38ff717c5588fe4b18e96133c62 3045022100b6e6ca9b8cc424f49a285100440f0a8674a9528155719bca48bb4886353c296802201f01d5face63e141310baec5eee4ea302e1a7c1e1b0fac9641615fda92068969 3045022100b9335a45d241b2354f2f7f352ceb757a65befc6c7943e134b4a6248e8cd74c2d02201666830c2d32649550b8126048c754b10f3e1daa97d201953454661ba387d582 30450220765e6089c21532f9a5fa2b2b4da9a56161873a12d8e25a6d631a4c5b4125b7b802210085a8a08b62ab51b408a0bc4f588b37bccb43f211b57895a6565018b5553fb348 30460221008417b766643509fec5c5b73357131755388f17fab2cc204b764c6d32c3bbe0860221009e79266e06057d3342b76f7593a662cf6267274bdfdedef6b2d6a760c62c9a4f 304402201d589adb877705b1147d71f4b872c59d1ac1a68159618cc33ed7f3a427d67fee022008f554675c41dabe321518a51c2172520b61157dd63a66a524e1eddf14c4e88a 3045022100c97c4e0fe8cc2da37521c900d8de4aae25eec8ae256b9b06c23cb5bc7f8d323002203a25cba895437dd3654d1c3b5dce551bec89358c0f851cf2e967affc6cf20d5b 3045022100f230808622bd995f2cc22d0a08716cbec19666cf9b4663dd90ed835d6ddf805002200f4f4960e891c4527e1ee6c68aa4a1d0b01cfcff00d8785f817690f8282e3f7d 3044022078d794c811e70950feac5f9bb0f40d3a96cab57b5d5909b48dca42c9143894f102206f1da1811cce92c2f6dc2a55cc4324cb00e9061615cc5f5d6189b7f69809e5e8 304402207e455bfd2bfbc40a4a026e0f63ed4d2b0e5dd88342cc8151b45e56a9a73e77d00220685646541b782d44914a31911948af792b1f0155e19f28645a0811c40fe2ea9b 30460221008cdb866c933f3f3c2ce9e601a44ae0615423e53d435c1dba1d88677a84781976022100a89a7deb8877570a794ae55ff1f431fc18c91a3a1a81618ec9983dd4f4757a18 3046022100ab556bd6ae6fa2f98443e1a8d7e97818130282a67a7911f78dff0a0cac44f7dd02210098315641e2407b7b49675f411a57391c0a6239c740775973d916aa482497907d 3045022100ddbdc7479125f67c9ada2df11c1f0509e67a489146dda6a6e4dc5bb6b5be274f0220390d2b710bb0cf92e3800dced452721521497e7484d3b1856c184b873cd60e39 3044022052c7d35e0167048662519a44bb14b2f35393464914ca78a4cef4dc47a5a0b631022039057e595d24c324923b8dc6b4318c8497093c3c8aace7f5991fd7205ca154bb 3045022078779bd2efa7d6c626527faca6be48ba2b8d42bceaf055acc0623ceecf5218dc022100e62bed10cf99e3eea67d6a5b2f98fd5f0bb5514b3fe3cd94cb4cac1f8ca9ba26 3045022047fe20325a477a56c867e4c4a79bcce22d48599b1c00d9f73c09d9f37a5087c7022100deaa170f450d946a36998985970a841a8e2f746a0d28e200467c2d0a29630a9c 304402206b319510f670f5a08e816c8d1b4600e95f538d2b4f148559dd8c26f75db1caca02202960883483d42a9285ae4c3413b9bd62f3aac8abc7a2c4054b9a0b639e6a771c 3045022100e6e04cfc8ff4a0dfe4507a20828ee63ac4d4e3ce35b8ee55f72463a734fd182902201efb46ebdaeb562818cdd51a15e546d40af31eaf4cab95a45565e3b49e31422e 3044022075820b7af5ec4870c3db762a9eb3e9c5d84bfd2a4dd5b375d41832ab1c44c54e02203f7f5c38ab490cecd0440a2967e6e155fb2495080bf881ab17680a7d6a30500c 3045022100c79f125e44a494e56e294d8ffbd16e14557b6226512ea38de8ed8c7bd43503fb02204076528c3221a28aa9faf0c29ef322768a94f7120722da46e2bff843fb363189 3045022100a41a68f54d4da15748a072a2030cc093c705dfa986880a6dda37c98734aa94230220707486161ff366d5c3df01825fd9fb9b16142ebac5b6e5fa334d1df3f8ed199e 3045022013a20ba96ff3f2dbe67a612338a2a69fd1ec7f4e9cebd2ee5dbbff792ab5acf602210094cb8088ed07e842f2abeb15d7b5c651bfcc13909cc35b496455636ca109288e 3046022100c8a2a037cd1393da9ceeb1c92287e5614b9671df7496119c7d7614f5ca31f637022100b846046ef89ad9d8cf3638889efc3f8145ad2bb77b831964ff60d0466b9811ff 304402204819f8796d57eca71b60b889631cb37ad8d4f4c309ba8d021453254b9bbca6b402204192153e16a1fdf691fb305c7a73bbfaf7bc1b994923e1f81707ba8807165223 30450220624129778c9335f3d6e79c86e8c373f5daef4a0597d16cbac00b4dd1c1e062c0022100d1fadf708e77003460ad1eccfae9d101f0a596974c13f6a755f38914ef51de20 3046022100fdca47e2466e73920d3b9195a9df8e9632f886a1236c55a0bf2bb064ab0fc386022100a2f23675a87c286ccd2da39ab964df1919b8b6966dc00e9681cd9d5efed94470 304502201617240dfccaa014002c8cbb269b624aa1e6f82c859fcf4b294a19b4f58739210221008effa2875b4dc952a2be557918b98d6d80840251e87b617957741a8bab5c892a 30440220185e72021e73fc04ff6a980b72762de7038cc2a17036627275c572f4068b47c202201ae1adc069a30589f74a541deee65c2ca12531a643abf4b61a66c7cfac6278a0 304402206b2450dd721168c5d225dedbdc41d483c0976ffe1e8d60d40602d2fb8c369ab2022030ab166c7da8de7f466321b6e6eade43c33b2cc85c69d40d6095042e62499c7c 3045022100d4bb8cafb03335106be6f63ae861e9a181821f0db743f6cf77f2820e2c19ab1502201814c5edc5ede3d0d6827c4bdcae8134ed77b84ecc7928839b09aedfdc0d47b5 3045022100c5f61ffe459130dc24f772368e6ff983e56d858f9cdfcf6384712689bf0978c1022029f32fbdd5c03c61a2e44aab3a0a758c3f67be05cbb75a7754d4272177cc8d6a 30440220658f401a1add4e369c1900dc55982c0a71e0ee9af7ca75a96b5c011db9de38670220678ef10937fadfa9bb67a365e6471bc50bee2182a7ca4c7efd7add991645cf5d 30450220278b4f5f834be3b04965a2c8ac7da57a028bb4a15ebae156b03a3a10e13949180221009a6f025c390abad471ad5d6ba3bd3d7fe5e480c86a915539daf83b7ab94a307b 304502210081c9a7047cee1f9870c18ba7b47bb52baf15cd92b25e943df1b0ed5b5cf53dc902202407d54fca4941f9b9a12239ab914206de5c775faa7f287c85d466fae48d50f2 3045022100f570ec812695813fded47603cafd200aeddcb7ec40f556e3b61af583ea2b13e002203f9614dd3508bf43889d913ff9e93679aa23389c2be91b9e5b803a681186663d 304402203853e11b246092f3b8d4f82883d740e4f784aad7c9c2fe6de973f688eed988610220659b8d8bdc5a3a49dde4696d5f3662ee292f103ed10abaccb24bcc4ed5442219 3044022015082862ff4af549d86299c00c6f0024360e707b0485981139e7ceea0777482002201dd40a222838f108f907bc910037e4efaa649450e0ab7c697c8e49b3a10bda94 3045022100ecfee6389ba29a5e0978dc12169dc9f52f2a6879ff37133d59e53f1bb0e8e0db0220370a91f8acddaae5efa53da69d02ba40965c197d3f4053fc5e291f6c0a411a7a 3045022100896ff001d07acc56313600333a2de28f19e2864a194a0a517b78d40ae532459e02202b9e01510ad9a4aac3caab00a4e85ea14b8fe56d4d45ae03b06497c7a41cf77a 3045022008b6ac5e810bfae872ffc19384bad6538b11f7e8fbffa86f8f0ff972281db3ce022100ec05df71c4ea5195f141de71fbb544eee59dde99a154691a17d2441a9eeeda3d 304402200d1beec03a48b36de6325715e66a983d07f98a139e79b5ca10f4fb73e0088c2a022051f1b716ab58cf5c55e75838ded1102db79be68e6801b329705c26ddc5ded851 3045022100c469f37438a7c01ce4f2831c6f066ceae1a050f706f03351803f850e0a4527bf0220720957f2b84e2bd8a9221138fad2b25e84218ff6fccdd3086365e793cfff2997 304502203c30cd2b480cc08243cb521803eb68eb83b1153bc24fcbd7b02b98fd710acdee02210092b6c453e79fc6c8e79eac74ef39d05d65701c2f711c8bfcb5d7e7acbbde8791 30440220680094b8640caf77475208f9e6389242698b1a0d8fc5e754eab6c018329129fd02205b4270fd376892a2225e404be8e8c56f2d2ba47a393e8a05a81b99118cab1baf 3046022100a74b420270acdd8ea025eb293787e8d0ee712f6c5ebb8c536874ad0d301408bf022100fbc5d11e7aefeebc83f6448b2789bb9fa04343af95f287cf34700839ed7c9030 3046022100da2ef69588da2f5314087419d321aaeae56574808e6ac4262d69753bf3d8d1c5022100a9e4681c413d26ebb591ce9edf86ae81f1185b66a0cbb251e1fc372d95ca68ba 304402200b4da6cac11ff0c5ea8faf05c83e9fcefac18f5ddfd2f3efd1b5fbf3c8909c32022046535751ace7ad1fb2d8321c6da2960eae45832032127943da55d72bb659044c 3045022043bed7770b11d60c248a1558e74a2cc4a9c27eb3b3eaa74acf978e81eff2b7300221009770781530aec7a91307c4393aa5d2ad3b91f89fe05042d03db4329d895272b5 3044022033fe5a7e430473b92034a2a880770ba40160708356de81931985eb88d729a85402201da814c91a922e9e56d4d239dfeef95cbbee5251c30a35a55208656024e55f2f 30440220442574ab0cad3cca3bd9797f3b13adccad31a4a112b211ebfe89f60b9e596cf602202dc594813893c480225d3867f573b52360334e41158cd8266ec003fc64cbf61f 30460221009316f2c14e5533f80f534847c5ad8c4dbab15291933ab4a05e4c73d8077f5af302210081b6d988a045d0ae7ee92fcdcc436da147e45695932b07dd9277fa85408f197c 3046022100ed36c7800a13b0dd362ca18301b7396efcab4d7e63ed3fce5603521b5f7bae940221009227941982ca83851ca86c55953503c2063cfee3c3d7572d223cdaf193cb596f 3044022052b8e6c749c0524ed69ed0b756d8a47caf6a1a019cefcaf4a870a58721872ff002205d6baa35fdc4600b4f3193b5bb32f7fd0e09690213cd23f57e4af6beac38eeb8 3045022013429ad4b381c52d137c57b566f92f03ecb2c16d6ec5c5d91e8b5af8c408bb9d022100b0ceb740282080b10b3dbebcb118a1b29b0a673b6ec83125020722ebd0e19f43 3045022005dde47da13e53e41fb5af13a2a36993bc3db12b461f879c7524c8e19a955312022100830fc7b8ce7aaa476ab2756ade99c22c6395665b8a0e3116a6742ed936f6eb37 304602210095af64b820fcb6530520254671985a67996732546983528a879a93102b92973a022100c33a654e05c34a15011db33f3cc60b13edb60c047cd9b86d578d9ab1d585d2c9 3046022100c8612cb0337214b9013725aff3a70d5efa765e516da933869a3b5ad2017bea480221009028e072d22eb5e05e5c7ad064d5bb6e49f919cf5c7ff592095e3a717abdd9cd 30440220768fcd7d93ab20fed3f599fc90c506cbce4b4d260bbaaee6389a723ff0d1dc3e022022aee8e0eb62ad9e44a63a9b2e02df479bbb506a067dc18bdbc945c205df80be 30450221009a3d411f768355c15c77215f83e816f47a4ddf25db9e6e16d7a0eb13114594aa02201b6944b53f66abb42fedfe54810c57fadde9c1d0f7fea2c0ebb034bc2f91137e 3044022079509057a6fcbbe58ee787c9ca9f02033afdd4e37ca54f7f9089e59f49c8fb540220226717c425a96684e0ffb0f7f30877af758a2dd0f6edb3060122d2b123dc4097 3046022100f746da87e3b75043e7a01e358ee4fb7ff3fc04f7afd56eed3a830643b6fa12bc022100f597fb18d216881cad878c218bb295a1b73bd57c5af660b9f07ff32be428b979 3046022100de05f8ec4476dd7af0a4e9b0d7324da95d395b3ebf39e9e4fca5e3b90f40b66b022100af3e5ab60a28578e5ddb5c3145e596ed50d959ace3879e2c1b32d14f8466e479 30450220498942147fd410cc50046d78797730247f4be43ef0721eb126bcee01b0330767022100bb3589fd4bc09c49fe540fd8a2dd6eca40a9db8e120f5dab0682e650e0039bdf 30450220384d9e37157318125ec1f83beccffd1cf22594f3f0c51cb2c49e9c475ffa06a9022100dd09f88ee7cbafbccc749ea389ed7013e107e98b459b05bac3144a3c990810ac 3045022100901e45c79b2e0cdcd2347a20b9b1864c7526e142c24df1a91337a3f36ae89f40022035842f0228930c0134f2c53731c3a3c379e513a2aa8a470370a0d7cfced3cc7e 3045022100a5941b56650a977950ced4e85128b70c33245208a44378ee9deb0c6e6de7b93702207a55dd8cf93c5d0ac80de11a365d047bfffa03cbaaa49cbe3c2a7ee8f02996dc 304602210084ee037655a1c498bed57190c9c2741a739b4d6546382dfd9ce6eaede366c2250221008c7b6e6c978496b9f2881250468676ccfa0fc7893d688efc16e1f8531942ed18 3044022025ee7bf7b23de18ac5712ae92c15503789492ddf4c90601c340fad5ae8eb869d02206ec2faa6e100b3370aa4279920ca77362442e1fd7cfa3150d21e381a9ec141fe 30430220757cf4fa7055f8b3ffc1aa3d3a90ab64da45f37b5f70573dcba661ca3b214262021f0dc108c83b9c70c65c08feeac5097d868d30af69014072bca9a6bfa41fd661 30440220267eae5ffea82fd9be4746c8227f303e4c2ecc45cf3097cff78b87b8a8c94eea02201640bb87caf7ee931ac450cb13d0267a99f0b8bf5731f4b1198fe48d407cd23a 3044022033e6349a7bb0ba8b9b7e8a572c5bcf8b5e70025ceb777b36c4d24414d11de53602202b9a7b9e8bf86a784eb84fb3065558c0daf4d1ace3582811347726c6ef7692d9 3046022100ee6a0f5cc8e21fa2956985bf7419f4d5004be4678046f9066c3b2e512637574d022100db030b32967356151a0ced7b1c5d799dcbc52d8fb46b1509f1db1b8f36f04d83 3045022100c4302dff5bf64a6845e922d65b828f4cb6a58ea9df7ba5b7f7191702e0fcbbff02207a82767ab0b6c4b3ead890023f076dbebc6f646e8b611d7f9384f37468728850 3046022100ca8f5aff760865225b7cf2b6467f1b85d5f4abd5c9ac2b1b92a6e900fe95333a022100d0471370a0576933b39fab830de85bb21061323afcdee91c4a434bedaa11dd54 3046022100db3ed4f5ad2c6736d2d0c61ffdaa97a826f8b550f634c51df44d76f333bb5815022100c6de3718614ee9b432bef1f38ad8b899e5e543d497e30964a5d4565f4967eb9f 3046022100b3d40bbca8c47a09a375af60919db8d0d609ecb90c354170a834f2645b14ffa3022100eb9236abf3cb8992dde4fe279bff1a85a5565c58d7501450c26b5878921f1b47 3046022100de496f0e6c71298b76afd05da1e3c03f85628c8bf7acca59860e05cbf27821400221008235dbd2a1d8ee8c862c89226131c065d09dd16ee39d67ea24f46f21addaa549 3045022100ab301a9e1263ad2f4af56f0f0d44ddb8fbcbf330a943ccf46ecdebc00c33336c02203af4dfba1fd98ef048b3ad0b35018186f6d50396903597e4e7a70edd307ef09d 30450221008b6894738f35e29cd2cac3f8197e42ab54f328bb97bdcf81c4413c55ea4989440220333be02060cc39e7de1eff50ca49c4ebcb95fc3092a897c58b6b71fc9df85014 304602210090f1ef9defc9c3dc24154abef358d2e8dc3752dd8a7e1ba004daab559271607b022100e45d65c05e0edb2604862d21fc0cf204893f9f72cd61b4eee525fe5961b9fce5 3043021f502ab1754e0e8349042891770dc86593966f047a7b24c25d1434b33c31a2a202205b66d69b952718fc64ec4d541032abafd3bb1afc77f50a0c2b5c287c5d24c6e4 3045022078610b20ce327a835ce4f7feeee431e5d0e5c83a3d0e048f47702b03cb5c1458022100cbfc4dad84070b87823ccda6a33fd244b254c9c0b0d8bf5085dd037bd774cf91 3045022100fedc61a46f1640adddff4fce3cc808dd1b89987f0618007d9b55f657c8972702022065353601fcd43c6041be62f51b745951df7f5f6fc155d4909208358cd45dfe62 304502201e24a259b2d2a8c3939d573e7654cd7c796ddf6fe73ae59deaf1771c6d7f072f022100f3caadf0057645aaa42e147ee54c6b1d42dd9bc58751a54bc9178feda479cde1 3046022100bc3950bc8d4136358f640a14ab56826a713e893e8ff36ed45d6de9023fb14cf5022100ac11d3973de3b59a65c6dfc526ed5c4e632fffc71053afa82336039b7267c0ed 304402206d52448752bcdb2f66c0910bf055bba6f559d7507fa7eaacadcc269dfd7544c90220465a65a62c06f73d66c05fa1dae6dbc3b571ef212bafe22d860a2d47babe8d5f 30450220501c6caa4a317244444ac7b579ab57fa9bdac9db4e35c51f34c84f189cf67ffe022100d61bbcc1686aca023494ddd8c923dbd46803fa97d69258d766168ec7254ebf8a 304402202726547aa344b70203532405d40f8b9870cea708cfaa91f03a12e2457ac84f2f02202a7d2046d8d0971b06437f92018e60e52d94a769b4afb822a1c99c9dfb8c4031 30450220700e215fc59b3e560e7ea9e8a2f3db8f40966c58bf04e35e375e14232624e601022100c4706129ff300ca9876d57c7193584376f1202321323b9ae1e78bdb8b87a5dad 3045022100a48b59f3e4550efdedf795517c844c969a7806b8febddfe22aaad63d9a97731d02206bedd919f321ace0ce917b779f1d8a2a4953e0e3b75df49d6e20b2176d2e6457 304402207d3b1b37aca27826c4e85b0a69d60041bf06a5e84d571c03aff096ad5ce76a1a02202a0a9c65bbacbd256313d56aa3df2fc19d1b66c21cb42074ac881ec29c4f3088 3045022100bf00aa51bfe00cf9a9653a497b96c8bfb358ecac588b24a19ac7d8907914b27402203675bc833e0ae486b77e3f7df7f974c69e801d83848d8bf49424801752777e45 3045022052bcf38a5592c4e1e274f16faaf50db17d9921490ecf11563d4d3757d544fe9b022100958dc70d69b9167d2083992d8fb3faf33b60d40e6bb989ec5f40b760af2bc9cb 3045022100eea4b5593dd9f8946f0d5511d41359e9f57c1a5850614c469f182ca50985a152022043aa5277e72b19b9e7396751c6b55f0b06be48baceb601f645a5b433c871dbd1 3046022100fc5fdd16dc4596d3771857f3f06081fa1a2b3f3c96ca047153d4ea8d9cb47cc20221008be8797fca119681311e619e264343e80cb5dd6cf6e66d54fc85591956985e64 30450220532e67307cbed93d52e14f4c4f9fcbf3e6e3107abfb41682e08d33db80d2d841022100e1a1b7195b5fc3bef4192ebb7a29518c0d87eac5f963939db984aac8099b7bd0 304602210086cd7b56d95da07ded04855db69c29a4f9c4e35dabe4115d44a060855bf221310221008ef3cbf77564a3eeb479f42ed061adea23ce54cc33cea510120ee86500d2451b 3046022100cf310a49c01f16113909b80c1c2b21aab7fa8197ce306fd0db2177fc1c53c7b602210089525d68be591d1ae6f04f2f5327f4451e0830629c9be6304845b6781f01ee6c 30450220586fb34d08cf2c65c80edbeafe937f4da3ede26e307cfdfa2f2ce72df00f3366022100ab2339d3a60b5dc0742d5589120933af4bc17adfecc825d9416af26b9b3d5801 3045022031dd43761cd02c4488aca90fed04bf0daba698c85c2631f6c0ba31a943470c4c02210094003be685e06743cd5184f86dec091cf33868a6b6fa0151294eb7aeff1680bb 30450221008875c9a4b999c6db9f93c4b41a37ab7c1483ca059b16f68111c9b3b1a9bb09180220452e196c9887cb657fcb64ba34bc89ad0272c062d3f1e44b5ba6f033d641fc96 304502205a2ebbabe981cfc8cbfc0f01821f26675dd22d474dfa7e4cbf3f939b3d82b806022100b5ed26defd563f8a191290fc79193d7ce8c8e61a404b2af8a509556e7819a6dc 3045022100e797821ddbad32d6f875c1d98f78bded99483854dd530c91addb58163db899fc0220549b64eaaf0c16eb2b87ef90770a4e147dc6cb6babf5954b3e8f4c3994a95b66 3045022100c9d27b6c5c4d540dcd8e8c470e7e823fbb58bd5f1afe358a329e79e3dbd965b302207879d788f3040022b9f837b73dfd5234b1247d26cc46f8995a09dc946d99f682 304502203e674b50447e0b29028f2dabcf4d079ade8ead921774b2b35d566b348ff37681022100d0ee02b65539504dda6ed5ab0e05354807981bfccb6631bc459db072eab78a5b 3045022100a1fdeb7457c794f68b642d4882f4525a8a56972315e4d998d859e8ac96ddb02c02203bcd5a79c7f6fea99851acbeec31eb22e5da6ab0b0649832ff1283886db5f421 3044022070ae28df6a304f1820db5524ba0a47b36c0c20349396d11a69d0fc1eb4ba0a85022054ac9bf20277de3fa33fb43abe06d52cea65a249ef1908d93e01891adede7661 304402202bd30f300aabec1394097365ba4553b5e81d1c24f5b3bacd13f7c0f974f8bf6702206ca8f1e7081ece5cd5abd5d1cfe191d0c1d97e4cc6bd0fe0fb71e9de3350049e 304502202bab201f78ab87d3d099bf22bb7dc31bac430c3ba7553a1e6afca3a9e8cacf8e022100f80dded6e7da27de13ef69187e2c68b7ff5d7225f06bdefe21012f55377995d6 304402203d192499e656d7f34e7c0c835a2aca101cec9ce8472962ced1ebdb5ba9002479022037ea3f620027b847300d670c2f574d79a9bf56fbb72ea709b7d4fdfcee669fec 304502210087f147a099855ed42cbb1c2e3d66efb48f0de9613120e0453bc2e893e293fd2402202323cd26a737a12b0cdea6234ac6ca69a74df18866b1452036e50a00c82ddeac 3045022100fec4870cafde008d11b8be64f8e03a4d0d4719eceed07ff9883cde1d31453d2902206cfbd9e64c868d5ab16df299efac7ab6b24c15e50384f21f4cb649c1faa59546 304402200cb8c0cada5c0b3a21a41ce0197d24142c7ba1bd7b1e041bb13d3434354a15530220583984668e0f923a0b0ebc081783332dc411945d46a8ab4c4ae63cd1ca2d92bf 3045022100d213b46ea9652bd2b6fb73cf4977d163b0f7581312df4d22f8ef818027cebf3702200f4ab4679e31eb948315b4b5593f24f3fde6093ab9237eb700a4351009d1c0bd 3046022100bea14b0a29f1479a5b6e2ca682b76e329ba8f10014485876dc3388d6c0f6839e022100ee15e6da48add374ebb730197526b24ea70524e15108eb489a3f9067ab49ed6d 304502202950648711494a8b127688a4752364e409c4b04e71526aacdf95073412d3c37f022100e730faf8b0a72ce3860a6efe477674b451877ca429831f3ac7442b44d0cf7c18 304402201cd455f7bf82ff3f142dee9d1ae4bc87a09f292f05e9b0b37a4634330d3a6a1402206556b8df77b749bbd7476b3a7063b57052477b47b74874a5705602e167d170cc 3045022046aaa9e50366b2df33e52c64a70f8ad22867297f31897fa76bb755aeb6c41380022100e1c02135e1428f4709a1c53b94f267ff65f110388c01dffbe984d77a95958d09 304402205065be6ae1d7bdd870f2670c8212e707ea95b234641257d6c9676c62d72dbc40022034725dc30d54550ce28ecdf4e80507e3f63e333758a81861a07fd1d41ee23f10 30440220137915725e6e52fa9292e43053fa954ad81c03abf92fcb730666e13dda3874f202207115b08f7c4379376f6c04adf4a411c39f5555b15803af863f26caf40dc10202 30440220447675be085a94bc57b94812b717bf2451cd2198eacb20cc85a7589db04a2a7a022043bad397b50071ddb32074777ed6caa5c15233e898aaa0bc98a83cdc6bcd6584 3046022100d5dfa7b560a2ce89e5df8f59e2bf62071919e8686017bb67d89f04af8c7bf73c022100e83cd50a7968d867c514de21c0a0c459bd1222feda9eda15cffc6e4417ff47d0 3046022100e1769d0e344a03e3923d63d9cf8b8034fbb67b6a141b72a829e1f131c3b4b5d4022100ba0426d4f1c2fc563b8974aca385f69e43daff1068176d4b09c61fe7ce0b2f78 3045022100fa03025ea963a8f26de6381f7e5aa85da77430ee47efacaa645491b10ce67b300220311a319cde8b39e3eb3106689bbe6a715aa55d471b3110795799399c174391b1 3045022100c9dce4a8f96f76a642988c609b320544efcf1c2ac612a8af43451eb185a4cab602204c9dc9a7bafee120d34be2a27d57356dd4067c2595994f022141aa9c0fe67b02 3045022100d5ef9bcf5f5fae55c306d2c1b1bc668bfef993427bdd6c0dbbc49186b4ebd77702207f976c55ade80141a23229a64ec17a5a85e39638fe8d7d88938f2cc34d48255e 304402206671557d41ee21b2fead71da3c731ef57a5513f30d4c21e3e20beff482abb98f02201dfe5b33dc2d61e2411fec4cd743d6041cc6bb3dcc93ee9ab42363f1d22c3d0a 3045022100ca44fa6d6cde935d447bf40a03516c5f0222e19cca2de5a798128af8e84ad2c4022054c6b2ba37fafb0ab18d5a662f55c777def5ecec4a20413069bf7f77abeb7159 304502207a81d896f305d0690463442e18b913a0ba6f58c0a1fd839a5283def04ebac066022100e61ea4c64337182f8bc91126e335e4baefe7fc0377265817022afe5205960e81 30450221009f08119e99f984e74a13bea91894f8e99e09c9829d165b516fac5052c5213907022055a35670b6c3c753edcee4c19c56371d1ab063960d97f080c8ee1a5857fa0244 304402203b2103ba19b1c1853deeff537773c0780963018422b98113ccf1c9c3142e10ea02204772c29546b9c18872c77fb2f73f624cbab901374df59e7e0d00839b6ba0db96 304402202ea4f3e4212f4e69e257e7ac5d872465dd6478694a5073eaff2cab44267c9adf02203c0c22dd03884bce2bf1d51248b919099747b52e4697d54b7b86bd6d2d225cb1 304502205b911482f5b99261f85d43be0cf5676e4cf5f55f6ce0928858c6cb60c1f65a3b0221008e9d2d131e319aedaa77603d183d814837c23e3fd9c5472c4885737a20020be5 304402205a569c7748c563d41170983520048cf4df1c916f9d7883da166ba111a73e95c0022033d57bd9aa64173eec7f708ad5f061ad6f95fc28af409ba72c65a9a8295b2d7b 304502201f45fca001c82e23e01153ee728f6637be22dc37f3c241fbe74008a5ccd46fb1022100fde724bd9ab324fc283ab7028460c30b2c963aab46b823e4dc311a3ca03998c0 304502206abae6544152a55c6443f6fd9b0a086be44507188ad9ca161501538da52e17ac022100d2b97678c2ea63514180ff3d965a12c304e5e22507e67fcb21cca6f8d7cca097 3046022100c97358711852981d1e9654a6adc3521b1a284dbe73aa5698eabfba7185ea9470022100b0e6d4ff3702bd36e2e792c417372b020cc1b97e235bac3614008a5ca7270c1a 304402205c1bd42a7f6d451cf3a6441993d069a41fbbb2fb6f1b14956f5b735e308ea50c022032a8f9da48c9f7dad5aff0db551d42ceee0168f92702876a95cefdcb6f354c2d 30450221009e93822a7bc95883172819ee0077934426c2a52fa0949120a20255d5a4e1773b022027ac0cbd51f603c01234f283d0eb06e2aee2ff73931d52d703ea90922c3008b2 3046022100a565d4c623ea650db909d938c2ac2f70c2a77e08c5f0f9180b4c097b715d1fad0221009c36721022ab5f512a1946db7d5057cbc78b2a5c72bc634a2ab3f73c753d0986 304402207b93a7df84714cacdae601c886cb7ef06535350922f8ec4c951667008e4cc6af02202b8f1c38ec5c926757deb13ee40f76790c6a074f83da52dcd6d2673610df8663 3045022054bb20c268196fb799e654671df2eb043fb8bd0c012ce6b9fc6b75e514c409db022100ce50f60157074d69bee091074cd70d55637029f08b85d6d31aacdc242f983685 3045022100cfbfca49ae176fe5463cf0a1c491dc420352074a793b6cec41f15ab1da4b48de02202f5f0c58a4fd3aa4abe9a1465f39f145419d022ff6fc35f9a9c43e80acf7a1b2 3044022027e4698958e68a763b4d148dfb5f117df9c8a7b2f2bab21f4dd0109d4f9a54fe02206effc57ab7ea57caa09bd4217671d9a79acda4fd725a21c3fe5f1184ad8cbbc1 30450220648aab0990935219bb02bf720bc9c955e4c494e8f65b84b5572b44ef5fe4c223022100e2fb459c761535a4009f2b96ea6c17b2ebb1a7c85cf3ef3409f534d5c71c6b86 30440220496201916c2df92ca8023336ee7bc2d6ee526aec26f4966cf165f6b908f9c09302200851383e463d567800216b4b5dd209036288a9119989032e8755d61e983bd25d 304502200d7855d41749d909c911cff75f52c705fcb92265bf952d668a06e17827e69f25022100efd510bbd49a966f5571b03bb89fa01bfb473749f95a94abf0c77b118b14a8c8 304402203db9906da06a7e4729efef6ac2fe179049a747819d33827eada61699ed281b1c02202e3b1ecca79d1b041a1e35cfe12bd4f4db815333fa9eddcf9ce97663648f1a9a 3045022100d4ca90ff8794d04cc1b212f96fdfe2ce3b5901f67a9cab86e675a0438153d0b00220569fa54979d711f0d8b5a334df131f771e61373e572b80fe1bb80ce170c6bf73 3045022010a46b629c9fc44dfea1457ad037d474faaac229e2ecaafaee79a6f2298ded63022100f03b2e6f8fb3539b3175b38210bec40d7a055c32e5803462e9d230ce74616c44 3045022026d28313918a649351d9bb459a0a27221f25f7371df66455ee3ce591a5e585a7022100cfeb21f9450c19da0c5ed0493dc041928f549d4e29e4b18338c19cd04c7986a3 3045022100ecf5f2dce6cc2d26e31e0c239bb857bbab76e143630e02a4a20129435c6c194f02200881f7ba25027963cb78f9f7cba88648ff0fd4de0e2bce9517dedf430922322d 3046022100aa1d6fcdad5e671810efc70fc8e83c52a6f3582932f8f68e7208ce0227a5e0120221009cdfa517f39e969eb6d15840fa4052d458ba54a769909616a9447823af554d8b 3046022100fbccf3bc0072ff29d21427667e63be2875dbb4565f10cc391cb11a83b013d814022100856049fa70beb859005b77f5463655dcc213fc9f2ff91c1ba42097aeee2db823 30450220047ffacd6a04d308984057600b3d9833d97a08165a1aecd8e980b07ed84dfdc7022100a15b5437b0a63a46e0c34eeefe44081441becf6a380c1401c643d4f8f8bfe7b6 304402203d8d0eac6f0e870eb72c28c9b2a13859b37de98fd385fff029eca29a1364cf3702205db0bf25007e5fdfbf58235a3d600a43a4adcc75b21941d7cc3a9c345db0ae76 3046022100e2ed4e3b7880ffbdf00626464c901db49d72e9670fd9eff4e85ee313fa6c2ee60221009e5aeef57be0f751dfd592a049fc15ed091fd432d3a05b85051e3646f07bae08 3046022100e9bd897039d59c5b5f384b712921940c6fe0aa1098cc8e9ae3586bad7530cda4022100f0dec390a701b6e81b29c63b5287afadded1ef79073a84187300064c487c96fd 3045022057d82a27bdd4eb03de8dc70fc102025dd70eb1019a721cc691b0a9a0e7f46754022100b4e4b2ee84a321b2bd99b86b68e1bd495de5a034d0934a1c6a4d4c478e817595 3045022100dc651271d7c1a2ba255ae2d1766205fe49583d49010b67a2b6fe08d988f8985802202aaa46defcbaa565f9b76013185f9edcda92ca14a0eb219508fc996c28dcabfd 3044022043b11498d75bfbd0725df4b632c9dd4b5e2038b74489e5fbbae44c04e379f03f0220680b7bbfbf0cfd32b67de586fd8c1ec98af1c1171c6a82f3607dfd2a5463697a 304502204ff29c10ed2a0a97f35cd9d326f7b93cabffcbc64ca43a6ee9569ee92b46d1ee022100b5f613def8722acb860ca7d7b2a4c6fe8566e0b46792962998ce03395ce96a89 3046022100be5a395e9085c735c94ededa769705e12ea305072cd6bc45f2e384c9b93e1762022100c0cc40c05ac7992961c1bb92486d2fae7122585d7e0991b53453e30c4220321a 3044022018a58655595f95bdd857cda8b326dce36699cd733ef4ae918a96442b602bfd5302207e5a69af4d3e7e01d562aa01ceb85b013a5aae23e3491efe424d7a90cea2065e 3046022100b3cdaee683311d89c7753c17d0f0ec858fe72e9a4d17c43e1c55050fdcb709c70221008bb0f635a4c4736a5754239d7cfe52b90867b3fad920418691c426da3b12ed27 3045022100b9d0b66ad4754cb4e6684d881e97cc18860c5f8f190442b5fb2db7d7c3b9e3680220241539c1be5a4ade2167c17d1db39dec1004701655a5f8fa4ec2dcf217c307f4 3044022074ea46df7e727da4138d1010225b84eeb290e4a63bdc384b34781e2d8665991a022054e79b965c55f8f5a3283451a6db356fa0746f883d049d76743c9033218e3c69 304402200114b2b18c74db75bdd5dffd4405992f87047198d1648b28f75d2778eb60db3f02205f2d761b6f1cffcc247a775b2d38abb68a3694bc845e42c32ff8e4950eb4767d 30450220226d769f930574fb1773af7ed88e97fd2f413a9170e0319b524b11b45db38a0e02210096c40d3a821372df4c685397fc42f6a82a75554c3ccd6a64d5886b42d287f096 304502204d8246982edeff89c72cfa0a3cd1dac6d5ac5be07ce42ec5c2691d96a5b991000221009056f72da4dbaa617298a95df857e219954fdaed5354a2feac09318d0b405ce7 304502205348e1b5fc4da2de45f1a194b794b8ca11b46ef0d5f63f4b9b8bfc057b5e363102210082f8bad8f5decd7a09b89756bd078cf4b33225581ea1e0f8a6c9dc9601580a19 3046022100bc8d9b6b41b7d1dc936db758910f49d47667ede91189438caeeec9387cae9b42022100ceb50bfb9e4616e3293d6780e560b6c161239be6e63548cbd3e5cb34a24d2f9b 3044022050dd1c89299f76039866ff40ba838e25216dab4cb014f93b9faa9b160873e72602204b2df3d371c927fab40da168385876248f026bd0182ab20d05196e1b1b5b33be 3045022100f07c2d7f3baae86d852eb691ea36ab7c33e7bdb7995edf33b6481b2dc233ce8a02204ae9fcea6a12dd642880c511301c5becc7c83d02c32763612e6d15487c836e3b 3045022072748fa4e7bd56dd60f2556e7af2a1cbca2ceec658e338d41fe92e3147758918022100fd40699c422a6fc396ba9d1624a6463a9858337356df962f13b2b25dea4ed309 3045022100ef441d537836044c53a7817f562c3dc23b14800fdb7e489448dee8ef2b3bf48102207c9ff4ad66545903124d2bef1ee6ee5878634cad4f3dd7886813a66d3d9f6fe3 3045022100c0214ec4a4f4557bd1f49b6c8dd74265ec40caf2f2318454f849047f459a94ad022067fc3b384feea50b85ca77d6b9bd3c5ef313b4f35f838820bbcad38c2a0e828b 3046022100fcc8a29bfad5b700eb2d1cc9d0253953874561e3955fbf69058775e9ea051690022100db3e20881a16287a2c920d93ef5d75d9c2a9dc43d3666b985d73f0f98ba379e4 304402207f95b3a3a94d44d08a94de4839f60d1fa3fbce0613ed34a3ea8bcab4f7ba1cc302207b8ca801e66e78f889c9117958ec7fe9d113702ad257adc2cfff9747d44ce4c1 3045022100b703fe1bea6803ec346c9de159dee13a57b53bc4ca2874022a6a842fa864a2d002207fcee63908668ebbfbebf34e412b208023ffa35f75a49b3e73448d6c31b92475 3046022100af7ad0faf3594516a848193fd8a16325aaba980095826aff549dd719034b104b02210089ee0927bca00511f471f5c947a8b501a95a17667280300745460a80e56f3c81 304402203ee0b7f86b2cc2af3c362b4e37b94f20e7d5dab294691211820b65ad3db1be7902207e580dd869d6e4e347326165a831c03ab5bda1ea1153c93c341f030d8f62e86b 304402207e125c0ef903b92d8cc0495442dfb296f3171bd3fac87a5bbf4d852b5052d3630220344c3fc96082bded4eb9eb5a12d6a2cb591b69a3e4561497d2176096dcc9b130 3046022100d0a205fda48c2a0880e2bd149557ce860cc1d64c05dad469551248d747cbc9730221009ff422f4e2eb127fdbfc932880d33b28c7305509728d1c7fa2b9ce01b3889c06 304502202d70be1588eceeda391b74de6f11c7cc86c7ca0c0cb11dcaf6e14a06cb8c93e4022100ae89fc17e5e3d361a3e76e5fb4c40ee46da1002989ecbac42c5e4ca1c43d4e63 3045022100df671cf810b6e357cef0da6b0337b8a9c0153dab5f6d0341ed5a5afe1a793482022024ae85c3a1aa9b206e8f7dcbe60dda4a8d5e1e95d80045929860618cbfd21400 30450220692283fbf7fc6c512f7f60bee99536c10d5c37cf59ba48ee120eb9b82e1db2f7022100a28ab71e0cdcba7595dd4a0aeb0598f8e51e0cc2c58bd9757305585d75c54596 304502204aecdc3306624b8d648cfbfad4877c5724ccb6707159fc0b33f20932da88198d022100a358f1249c9654a071a1ec22df21e525edcaae0f70644466129b335c80a60b51 30450220312d05cb0025eb0794b947725fa48aa3f6a86aa4e1de23822deb461af56bc33e022100f11304d3a64a121718e90fe5ab037ae9553636e4bd4e8ba3f07540281d16fcf8 3045022100a400b86e3902758d87ab69ecca476aa7975c4c95659041f5c1a5ac5ed5a33c97022014293313d6ef097d492958b0edfdc718fa65100b7da6c970bfce3fc094c36715 3045022100ad4b9fc508766b66e8df61d3bb080a33b1c1a55359d55cb3f519ce914f4a82eb022058f228fde256a00808f358d546b2d1d2c113173aa4c7a54f012e8d63ebc02b85 3045022100ebd48865789b13ac1f8812e7ce31eb1199a6f4fea9ec6ee161da5b97669e97a3022002c5cc6f251ff4cbbf2fb870c60d81a31b1e5e5bd9ec33527c26f764e6158743 30440220684d53b4264abceef107a04d70d0d6a808001d5aa4d0fad0ec3d374b77eb7ea0022072f6cb64683600e3791fb5398fba551ca4a47ddddb160e9519fe756ef77d3413 304602210088694c66ba7630605ca1016f31e9c309ccfd915aa3d38162108e6cabeeefda1c02210095600a8b9b79430e081aa1ac16a6957e90bca710ab3ccb3950550da538b5d5eb 3045022035b183786458e64b6b0d59f6e576fbdd4ca04d65573e8ff9a92ba506f376d5f5022100de45c1ecb2ac1738bf16eac1a2ee6226ceb933ee6aba3019d94165be9299ea4d 304402202156f35e68e9da2cecf20d08874a1311ea3ee2d3b58357d1b96a82fb4c1f785d02204a4666c0d0427ab7da0b3138c74072217fa236969a28c01a7aab9d40fb5c4bc2 3045022100f503dcd1af00f89886e0a8453cd2bf53821d3c6b537b5ce1584680671baa2ce902200d04f9e82b338e6d2c914fb3d25cb6020daed3c40b0b41598c00b13d10dbb5af 30440220292ed770a9ea33f33187a7a819625bd0f1c8ba66a245add2ce96c22c73c4a8eb022065096e89577612641740ef501f114f42f021b53dca029d78a486ddd467ac160d 3046022100de5d9b4e5df0293296164cf296889a3a88c697027b4216c93436cbf203b82147022100ebdce9633a03486f2a3738b483df83d5fe37bf22eaf60ef44c58c4168b7f889b 3045022021d5217a85ac7f6c02f48f1855b5e5a01a1873050257d1425e3483327fc3b5c3022100f0fe2f445f5a8beb6bf138aa06f52fa42c75639e346b55c8bdfcd9f15adc9a39 30450221009cf6a17d6a4d2465dd035d2a3e9dd876621fc1a6473caf2bd5aad38d21921bfc022010313238690708808ebf5f98c6205a640f8f0e9d8e7f3112904c3d7834531744 304502202854578dbbcc3655a2a46294f5ab7c8e260b6c707aa718a442adc024d559d86902210095c8b289c5d75aafa65249c8b7a03878a20707034e4a8db3dce984d0be168351 30440220382ebba923e282baa477ca19cc328e154b1e0693d61428e19eeeab3a8b81167402206acdba7364e69fb92f0cc5760ab06ef785ca5478bb68384a1a8f94d508de943e 3045022100b3ed1f2c9d9e61361671a4987b2622666ca817203758567449c31c7bbf3bee3302204c92a05c0a9e38fcdddd420e89b73b7c7530728cbe14598c9a5065e82f888be9 30450220788e45cedc292ac70f0181e65ff1abd4a94a0525b84a5b4c1ae143fce2159e8b022100e76df4edc41103259cfda613fa5cf92ad5261ccd2ad4b6b364cb039e92d0f340 3045022100f95654cc64fc28df53467d324174e518efe8d26bb5f515c9d6ce42165a412f180220464be194f09cd031a234c4413e3d42ef5060fe4d7bbf6adaa767d09f4e7cff00 3045022100fd7206934a6f6e533618a6aa2de7579305ac35191a013483e4c53a4c4735009f02204cf1a041c6d6524c4f5764bda84c931e05ceb681521b781249a2c54fbc4e8218 304402200489a1748012c09b4f8b34c51b7da3de3b1c729c1049ce8bcdce0a4dd49801f7022057c9b31161274d997f7adf7c58bbf07f2e5c34d679213c44c72536f80f7238bd 304402206e9de2c69e7b09415e466aeb9cb2b4c3b8e875d7901649632a0233a70871cd7602207e892a86194521e3b022687f43fbc3ddc5393606af9266b16814ab3dc9e7073a 3045022100e19983bcc72826f7df06968459c12db096a67e96471d8dc8bc472804a3e6fb4f0220307c20a81b19644f66e716185d49a8bd6ac385b1bcfa9f7996f61c162b9b2e2b 3045022100a45c4177471eea8fe0ad996a7b21ec6b1536cbac48ebbb0eec2e3346114d310e022035bb50eaa3c7985b177ac91632dd87170e82f8493ddabb456b439e3e0719856d 3045022100ec8127e97ee96715326f2ccf1c7ba753226758619f0c37ff69429edbd1fbeae9022004e4597ba44760f9dffe1ea585c610c5fe3244ec7b7c3472f33285f41ad6908e 3045022100de907b68ac49430b2efecaa13dabdabc24a39602b0fe16d673d9b6a7ce8b6630022045dc82181a4a26314a8a05fa6ed83be2aea3b6fe6adf229a98c33d9ac17dca50 3044022012022191b679c86c8c1cb8ff59f6c82a7257b7688975e56893a791eb542a3c8f0220451ef2836f8a9ebda92d56da071537aeceff3f87439c1b6107c7645cc8369b33 304402202a30558ae5ca5e352999a9852afe5e7a747053058aa40639525bd445e5f69166022019c69bf00c57a53ad32c6f7fdfd59129edccf2b369cb1a8f722a9046c4d77e4b 30440220398e3ad9302b688cf41d17940da4f9ff74487d0174a76a1910c6993a73a69bb102207a78ae575157b47d9a43133f4c35e63f2729e2fa019aa69b96da7a4edcddb202 30460221008ed5d1c223fe447afc11c3fb515a767d80c3cc514ca72f1d83307241eed5a4b5022100e0ff7ca4c5bc1c75b385e085862b55435cf0a9480ffd2db1dec1e4a9c3c6813d 304502205eb1ad80cbf0fef62978f942e393ad59fa472d2d677ddd8d4b373f0691cd5229022100e0871cc9bb68effe6dc1ed9d2438f26f34f3aad6ed5ce50514617fe605acc8a7 3046022100bf43532ab80f22b2c80272f9e5615e1c5e099343a88570e16605556e8714a4c80221009b302ee1c7801275ef00a914d01a80fa124d0792fb0db92dcd1dc1d48e11c4db 304402202d9a84ef9ff83707e27062f063b7a28c627514d9cac1fe39793c8b2633d3f5ed02207c43291e5c04fdc313668b26b39a12484740dd197f9f0c1b108655d03fb2dfe5 3046022100c76500b03b288f8d3322c37c41a2eb549bd6c0863c765fec4d6486169afb62fc022100c963974634c5cc3339c43ce7dc108c80d9dff98a0dd1188006eae8f53816e588 30440220173fd390dcde8b05c3d9076c9b8c2c20a0ea6c089cf72c48324674ec33c1a76102207550ac70bb694bfbfbf024e0b0182b8e5b48265dba624965b3ce6a16c64071e2 3046022100fe16e6c99bf4e45f918caa9763af0122fc31b633fecf1b886ac8ee206f881a4802210097dc5b79c0903f1eb2651082ee67a311c78ca55767336040f804c3904cf1dae0 304502207e15754f89b466e89ffba7c982e46555c8dd6ac4d3ffba1d6ad19ce61b3a3ca302210095afb5b75ef347c6fd67e7938975049791ad94dd5f2471dc7bb6e7263c39409b 30440220353dc038173fb6e23a320e027c4e2beb0e10a9dfea14c8e6884e4addfe217a390220569bd929190d916bbcf69474814ab51eb24a3bf79eb6bb7510e854f59eaf771e 3045022100bce50b3f376c307b2875ae6c32bfca3a2b2e95b07774ad646955da727a92ed9602203d1986f4b827582c94260b6524a6e6a70f94eb38ff18821502dbd61b7e88bf8c 304502210088adad1585026aa81c74d5b690d4c8d58702c57d7b528c05627e7bc50957af9302205d7dcb9fe4981cf84a46345952e8a7f6e35e14de36be8664403344f5e1e4fd1f 3044022001bd337e3b91e17dacecbb6a74cfc6f839c9cc74e24af8b476347ae11018a8c702201d064664a73162291791eb0478b37148dc0f3cdf60ea4f33c3420228f72d1f8d 3045022100ab916b2fef77602e56751ebe22158572af417183c8c2602f55ad1d8ef77a156502201c7154fdd8db175b6012bc9b90defcd30a997fa76b08790863857bf6bab28ae6 3044022071040a2220e474d92a8f8f07121f0fd7df5dba67b77b5caa66aa43addd69634802205afeb459badb83e5b3cfdee1e20e6cf5797b61495c5189a837918fee4c566a64 3046022100c127037a2528e23f6d622a66975de12610cfb51ee614ef98bd24d9793638df9e022100f1a0ba981baa11001339b94cf1e2a948c705a69484c8c8a3a13d258eab96e5bc 304402203dc91f43125fa4a31f94bcf7bb1717517ab3fbcb3a82418077411522b036f5e0022004b77ae35c4796c7130b04a8618ae9fb832751abad2351ffac250e8b34038311 304502204347ec393b6a42ae16ca617cb5a7322ba95590f1727df66e908ea442e2081553022100e77ccb3061bc747214683fec27f6f295292d685e8dec04aac194822b2b3b2a44 3045022100b5b01108b5394651fbd7969f96cde9fb8f36ddb60d59549110d1b74db3916eca02205769bc583bacb1fbb33c8025ec59d13cd24c1d424f39b4cf1d3f15d00257c2b9 30440220086cfe134f67299a85ba661a0b3fdbef9b8d5248d8ab708a4ef776d7f215e9d1022036b74c70b401cce76c92d4e70c36c2473e90c81656caf69d77baf43244d60ffc 3046022100803415573b8cc6de0030a575c0831c499801475384cb25dbe52786d3a487835602210091cdd0fcdba13b93ad0f93273dc558f3409604f8891d43a7aa0222e49306bbe2 3045022060e0fb690da11ab485d6d9ecfd31383c616e13fa2b7fa01ca5dee1c9c2f385e9022100b0d9b94021417582509c422ad2c1cca323c4b9d40f78a13e41e49a16eca73bdd 3045022100ca681f1aab2c30eab2d9eb81d1a703840c0729fbe1a60e409b5d294f06446876022074e3c104321b7db211730effa9ac5f25106192abf0cc722d9313a6e752beb74b 3046022100fa607f7028f4968f8c37f67b4940e8181e167bdb63459f5fd70d94726a9bcefd022100fc1753f30917a9924f71f70a4f6dab282b223fd3fe2e6a7b0df05cb44923941c 304402205bcdcd9fca4e01b54c39988e26c443abd977fadf734b3b72dc5c5dd95085884002200bfbdda71ed2794fd13a7b3c41193c65bd55c47a43a5e8b76241aba3eae11320 3045022053c75bd146b018297030a5cdb9f21d4feab710f9d3a6222dfd7dfe540945a153022100927f86aa7c2eb030798fc72fbc52fe28154d14f2837910bed9bf3baada17ae25 304502205278a4b62d6422f832ea4a1c1e5f4153e00e3c9c1c515e67f41f03d5a3eb800c022100ff2522ad610401f67b3cab236302333b5b9891164986888f6e2a2690ea76ffcb 304402206e7db2c63a202fea05bacea7b184e66969705fbb6b0975014a9df03b0d4f48fd0220065af0bd7900d002549d8abebf274dd90ef9bf7333fdf00209a81f8e0c354929 3045022100aece661053bd4ddd858e6d4ce5b9e2ec3ba5129e535d63f4adde6d5b08e6c0dd02204a5f09c678e7430f58717e9a6aadbe8aa4d7b69bb9bab1e8553386094864d337 304502201e5ed27cb6b664a8f7837e6517f667b8bd6d12d21d92db7e51de192e95f9cd4b022100cd4958c563d09edd12beec4e793050c0c2f8d8cddb3b4371dc550b363b1f6836 30440220137a655dacb7dda51066c703ea6013723052d83c66347007880a5720f46690ee022055b815e7c5f199e544ce2623d06eaae1d9a97c2c77dbfb3554915f0feb6f4425 304502204075e5cb29249ff624c4dba9b159d6a17af5f7597036628722a38452516311f00221008b76e8f7427a42b419f01794a091ee86e985df58c6caa8a24f6fa532f6aabb6b 3046022100826baa07d7bf0a7002ab20f06bcc5692826c67d95684e7e2098e42af6de57af30221008cc06d0607cc9ef6be75ea203f5423940df948dc532769549c0d6f5fa5c80fbb 3045022035739ada08c16d849dfdbe0a6632b9d031fa648d2bc512893280f54c51d9397f0221009192af0d4b4506e84b92e2e3334c588a0587e192c3959cffc28b0bd9f1aaef55 3045022019f0232712b8f6389c9e4d65f76cf59fe5fceef6f8ccc18ad932d4fde433ed1102210088fcedd7ad8288a75c5a13c671f75804ecc2328722f39290457e5e01bd81f3c2 3046022100cdc4ac12558897d09994e810fca98f7227b9f22e16d21b6c771f90745f062116022100cb96d647121ab23de6c8d81fe53a67072051c43124031a671fd64a3220db3d68 3046022100e9b24ea044c28a5f9933199e356d05ea64c48a212ab1be60e0c19a744426e013022100989d4dde2edca671079f648ea6279994a056c773306912a4a636a7aa861b37af 304402205855b3569244c2cfd12ca593dc35c9d1144c102c8ffe2ec03b67b7735c05935602200892edbde46bca62fcee269caef4c5ba6e512fbd75073c431f0645760969089a 3045022100bc708caa97bdbbd79f9ce19f792ffc520a47637171c604090b3b3e5b93786de402205ec40ea1f8b7f75df39d50f716b5928c6eccd181a2d3fa83f2d444aa482b4927 3046022100f47f0aba29e275bd6be7f2396a2de55110533732d97bebffd81395abce676ee1022100a934feff357c485348ad0beaf8df0688584afafa355fea4382eec26a4bac07c3 304502201d393ac298b2ba0422eb27ffffd47e0a30d4e68895d443e4136174e4dba5b723022100d03b005d40a39ac0d2fda43295a46804bac8001dbe0effa5a3d60e9116e77581 30450220267f0f748cd3625aaf40e49369c8e4d9e2603a76d31a284e9f2156429668463b022100e63c821ada983f2e1890eaa9657f1192e9d516c00966c71fe5e4547e094c5fcb 3045022045878ad42bb6225faf8394531b635b245f96371e49ffe6978021d72a00605b50022100afa67a144177588dc53f76c0a0d468b8717c68d15d884a0840a350a2be814b5e 3045022100952673900a6880207ab4c29aa7f5f30a59ba61a4fec3a2f7e87155464890d5db02202a85fbb1fec45e1898cca628b8d22d139817e0d090599d3e8652c664c51d6118 3046022100dd3e57eae240456b3ee79bcc98f32b9a2dc9c764e4420ce2c9dcc9d322a6c313022100b11668950ddcbfabab3ececdff7e93c6738cf20ac73b45a2be9d82149039b401 304502206efc664ee6e9c55b421392c22fe77a0955192af2830bd02d6369e91216a1f675022100b0f8aee6825e10f4e73ae9e892a4dfb249300710510f79a079c65911687c7f5c 304402206745b0d02de54fe31bf3cd4fb68c08a12f07a68cf532d722b328f9a9d673d9a50220058c42701bfe9d75c67e5bca0b0b664f5470639687d60cd405cfe06d2b22e038 3046022100f9d59754abd0dbe926b7ca22ba73daaeec8d788c1cee8e50a586235956a078f0022100ac200e6ad5ba147e0cb62c984795f00ba882e4ce0e97e1a78eb1df27956158c1 304602210091214075a7103dfc64ac1e2ef2a82b12c5c9223a79cca8f1fdaf07ecad42ddc8022100c73047f2c39cab5944b5fb93a23721c508a512fe57a9c084dcb9f945e8bcc03f 30450221008e62a169bc44fd6b330b45f3d53aa2e6991f74e6b4efc4fcff40ce510730665e02201d49deb964973152e0f48923dcf343ed263248f4d9b5169e313fa02c346bfb09 3045022100f05662529475f8fd14cbcc6d9f0a0a31ccb1e0439bf925a3a502cbc5be15bf720220753a09f3d6eeb207e8418404c2404613588ed2cceff2d58260ff6f11498e79be 304402202bfd57c60844e15ddaddac257eb6ffdfe36657bb81ff3b526bcb5111b9feedfd022025a6b34800f3011ae9bdbf921ef1f02ae0d7d6cd095ea0707adb03008f1a56bd 3046022100a18cbf60b9b5ed1a457abf4d5a36664df9bc3af1bdb68a9f4f8df003fb68254d022100d63108a648ee8969afc84a0bbbfc21e8fc60119b8619573d7aa1448ea44c8078 3044022027c6a7abed8e25d41f149229b0c353df8c9b627a2d2fe0bad6f177ec3adee10c02206490d514d6f5c65c55e936cf9b65fec4f938a7e38e3120103ca91a279fd76edf 3044022021da834d8c065a190ce98705ea862edfe3a121c93c102291255f452f63ccfcb4022016b6abb720783b3d839e26822b402061ce40a690aa1dcb32ad343769df8dd51d 3045022100a20e1c27df5359ffd80325e96fcc8971a46667462dfdcbbb7b35c85846742a670220344396ebdedffaf74ff2d30ffc55afb05be945e3174468aacaccc205fe544834 3046022100fe5621eb093d25768c57c8fd29e3dcd970eea28326af2a547342e28a20fb4f50022100d5cf1b1b5fea66a72bda40e6e2572c85ee54700b50afba70515f4526767d4132 30440220471603a30e13e5f9c2d191eaefc187c60dec9038274fd2c0f003da90485222fd022032fa69e8a67ccf344fbaaf2565b2f5824d4be23ab1abeed009552fdecd68dbc5 3045022100fc15f5c9226f6d77bcf3aca16b648f2ddf45dd1cd2aa24ca0bb3f1a61287485902207881f489b15a0e1b2c852ed1a57235dfbc03b440a0e3d98192aacf50d9a5d76f 3046022100a3f977551d229f9ec018de6a50ba8f8cae9147d9d3e4ffab0afc984b243f3263022100d8fb298752a9d7492686d68e2759825bebb25dba14b0fdebf1cf7fc810805d10 3045022100b57d823f098d5132380f6c7441f6106d51486778aa922ce5414f5f8932bef97e02207a851baa6f140dbba0822043309708be7001b64316e20eb86d36c38f42aad8e6 3045022100905dbcfecc8156667dc5f2e2c841f2f1cc03fcc2438c50e606ac8ac9b278da21022015cfe4bb93c3ecced6568e7ca0f7ab8714955afc63c541e66a0fd83af2e05c66 3045022053f63e3aeea6263995cce077df7ffea38c4beaa931a058d821b6504715cd1341022100fd4b43b7c55fbfcf687d6325712eaa5397d72f35ef9cdca1209454212ece6fad 304602210088dded377f56bfdb80a4dd96b1f9bc90f200a7db79dbe8b49b54f18c0f6b8812022100b4ab6713cdb8073043590774061b186e19dbd686f7b95848168643a448ff603f 3045022100b29bdeb2b3d35395ee586338813b113d682eacd5ffa788efa1d1a39d14077f2702204297bab4599958238b87869d6e43d5c5490654138da1ca7d7b9a9135248b9b88 3046022100919565faa2d5b62d48961b1b1de3b2cf62f1f68a6255e0d3e398a86d70205f08022100afbea18eca9e36982758adfaee3d997580bd9ea563e571aa17d9ed855a265669 3045022100d6cbe37e131cf046b4dd736e0141942e3d7044b6a56fa021ea471a2fd617f9c902204ebd2e73f76a704cc59b45609870e2d767c817f99aefb5175b531579e23f884a 30440220620f97330a7289ecfeb900ac68e489f1724b16576119b41c8a3e9b5a6bc9d0e502205b361011f87b5cb9dcadf6a19cf46f492a2471f34eff8e454f560c03f7ac97b6 3045022075a852491211b52f9a47ddb60213ad31bcf9a97ef2265acda084a2c6c489f666022100e35850db4bfdb4089666426214a0ec006fb64e288771ec44d4a1e6f226599f41 3045022100e968f2d67583f852a7104291782afe48e9609824238482bc723b61153807f11c02207cdcc29cb814d2be5f7860326fe69fd15f86c339797dcee80280a76abbf8370e 304602210086289717379ccfc22a390f67f859f502c3ddd874d161eab2c5acb0c0b5767453022100eb818ffce1d6041cd219f2fb512d9a66256a1a478c5b474b4635808efad2696e 30450220537d9bb905571a9f4aa0fceadc6cf3aa18cb4b27a7650324a2f8c193635c439f0221008e16968a7828c9bdf1d606ad8a859661c1e56774eaaf0ed7b140e572ea695451 3044022048fcedc2483f85b7bae3a216cc16b20b77a90096c36008d85abb5a4c54f7795902204c94cc3ebb58c583a75a80b9c9be8fc05e64bc81d77ac3e3de35287b6b5e5343 3045022060b3f78aba55206ce66a3875aa9188c191e0ea454b0cd11ea8a9fbd4e27b646602210087732b823e8923879f1443c937b3d3a4f9a092f9fca0438811ff215015fe70fc 3045022007b87f55d6c76071a8cb5b23e5d869e3926da2a9d7ff8f71ef7402482d43bae8022100e4e09a69638ebc707eeeb2ae0d158f33453715eb9c9e9f237e0bb5b50d5fd98c 3044022044bc4c1124c353dd2bcfb25a2ac252df10d6da83a05758b02d6b5f1fb971d7dd02205ed4e3208f3020f716cf96862da2b4a3113b0746a368b365de4d03af88d096c0 3045022100a0f914f2df807e0bf1a3fd2cd798cf2524912f9e72ae83e33fd2c48f035fa35902204a203b8e9afe4b05c44014e74c2465452c1fb49e549c161cbba7a51495b1b333 304502203e59d326d4aff02b7e51ae05ea6a7162547889564c710250919ffcd56ffd9598022100d6e1cac1bd91abebe417b071b6ac8fca7152d275f9a32e9b6a08b227e027b713 3046022100efbf903ccd3c704ab9bff1e6869d6760a0c52ff8e1aa718418fefdaa2b04d152022100ec24ed6cd8b7d3e3b1edf20972148e500025e5aa0b669aed2bcfe945516b2dc9 30450220137ceb7b90f1772837263e5083a77b9f9b8d7e4046321a052ea207bfd310ff55022100fabf7bf2df5c9d014d14691b8567c5abab6d80bbb47ac32fa7aff28412959035 3046022100fad85abe70aee2ee032dc7c31145aa31824ab5aa712843dac101baca26cb9b70022100df76f7f2a1d38071f47fa2dcd8c69300f84d66b1bbe3991308163710b48cbb14 304502210084e9214b5c9b5e80e121520e3bdc2ed88ac7c1c6cd580acafbb0e24b513037ce0220116024688e0c97c4fad1ed5fd178be03553e8a26c4ae85c7b146901122b99616 3045022100908ab8ed92c13838a46e0fa49993bd0f4eeefe32ac45d0edc3d154d669f3370802202ce0f852ad5ae0a3059057c092bdf413a714b91328f2613e7652b99f06d761f0 3045022100b8fb153a53a4b9a4059d2f6ffd898cb117974130f94af42bc68d01adf96e8507022060928d4d0891ad7cf11fb2a45d54831b9e23d5f2cf8e3b3ce82282828d514686 3044021f7d4b7fae54f0ad86eb63d38bbd429730344a529b10320cb5d125f0dc2fd677022100c7015db01ee4199fcdc474493f97bf172f0069bb56f8096f8efabfb65fd92edf 304502206019a36c6c2eeffec072604c78d1ff6b23def9722fdce804aeb0dd8e457a68f6022100a01575c1e41cd275a5e8bae46a882c0741f06e43c1e3bc794fd5d0e19bea040e 30440220526e8e0e9376772232e2f8b8f902858bb399355c3e367c00203de9fcf691026902201f2ab7c19ab833d7e67985a409b22ca17be51c522f7e62a142532b4f83247835 3046022100e35e51ad82d1a1da15527347c843fe030eba7d4140e1bd63d9f03d5dc7aed206022100c6c7441beacfaf9886765863730962f58ba907af7da73c5f8aa9100d9eca645c 3046022100b899b98c01312e853afa5eb784c318f9436f8333b79509038ee006980a57dab8022100ef732337b19f2813a98710eb13fe963f7ee827f32f78591a5487cbcd29f68b16 30440220053b4a9a5c2b9af19a171fa622be503eed9cb0df8b01ec1b0d6e5257bab4e4570220631ace7c1196c8b5434f3c9de00474d7d3129d0b78423f7d7389d29392fd3672 304502203dc2e4cf681166ef985d07be78250b0abc960445f1b3b0287f2148877de23121022100c4c856365bf479a6599b3ed6e1102027a0fc2849060732fc2608e7437355113d 3046022100838c7d98f081fb15568fbead0454564a0e582e1d77620c4ddbdf4dbc02e9201002210098ea26db3c1aa5880005c5ff2156a9f590d845edc9d57d1b507ad53c69311008 3045022100b43f41820b9763c1528d84894766930cc7716af230b15296d095d0c8675b1abf0220405085a03cdbab11cbf72c5c613cd465259c28e0c2b7ba7d260ba226df6a524f 304402203e1fced59bcb36e8f97ba4e63ba7b458cf03d9f8e923b47d66f180de61effde702202364a7f59b3ba48bd5dddf8e32e282f61aae9fc824f37ee404b75c67bd29bc00 30460221009c35767ae0abfb0e57699fd05000bf7dc6f39ceeb4d49bfb8dc0f79f04c6da65022100e3af4121a9af466d7723d1999c0e93b810c1e76495474a56053913cd8ef533d1 3045022100c8ec397bd63027ff76d4a4b5d074249bef68837a72dfafdc89dbca119948eeb30220189085465c9a204e1c93bd0824b4b7b427a8761eb32b0c536bd73e3faa979817 304402206b9a663656d3244e4406588f7389d6387f464bbba41afb7693bc29d2f0593f6102200d3904ae64bf7486cdf29791ec6430967b47f61cd20a0644f6953d4f2215e6b8 304502204d507ca113e3f55da7f23404aa3fc3ab38056c6129118b657fac0b0e561dd91c0221009dc19f65d47e60cd4f6c918ea9db8814c077114f895d12523aac976037937f73 3045022100f1d73e200b0a483f4387fd5a0e5f8524c148f825f8753dd5f4af2582f6bd7700022056ad7af209ab055208c7e327f319bd856461dd5a276f29ba739bfa370ed4c59b 3045022100a8985d9d4e917a5b90563764a99aff1ffec8dfa9f212ada8674a5abcd0db16990220439d28709e6dde6898132f6edabb2c0a472d642a79d0b178cea8b1152226f397 3045022100fd5aa03e88f8030f64ac03a6bdde815006f5a7dcaec1cc79e2b77f57fc1f8ac402203ca3b74953e0d5e365f5191923656ed82eb2c658bc915ddb66884672b47511fc 30440220782c2c06b86f74366ad3e8259d32d9ddd159306fb730b98b6f74491329e885b602207a6677a986fe73a89e117172fabcabe48fa20b1c745276375178f6b0525c7d05 304502210081bb270179d9f03bdf2f250406b7c981ef91c9125dc0f3934f3f5eef7981732102203d2d86b73a1e00d3e27a0764819fcbc582ba23209f92659ad6d9733a93b00de7 30460221009def7b94804df9f7f569642a2b4f905e33dfa4d7214d9fcab19160616087a256022100f8bdf0323725ff2bc61f0b75267f8b087489989b278249cd9a2ed2e995a0fe7a 3044022007bc47fcc188a3005a05a4277a82c2628628ad67fcb11c9e74849447b9ff7dbf02200bad3a5851f145b57cacbb21ad96757b514cb09a6262650aaafbaee4e241da4d 30460221009bad67f3e7db8f398232566efbafb59dc67df2c08898a0b279b89a1b4fb21969022100c68e7782d3403fe8880449f79b6ddb661d624cf728a911903f162904cb19ddf8 3045022100d0b9db5d4277d2792ed38d4ae9b7bd169eef151e66ce49c78b35832ba4bc1be1022051e350c9d5b8794e166a7401b576251cc904c6484abaa9a334a4384fdd647c4b 304502204dafa5fc395a397a0cfbbcb2d086fa58d5bac9e67381447136cad0fe42d246bd022100a13a1bc1e5d307a21b1e11c231d04c33e8b4b5eae61cceaab7891527f951d17c 304402201cbdac10845f9eb7b3f2b52121bfc86a358d8139bbc969537a3a14ff02fb7d6502206a6752ebe158ccc83637b8fe0974ddb9fc7783cccbad5385fec24a4c0bccaae9 304502201f99f317e551f8fa587fdd919619137512bfb6d9752f5149336d0e6def8a6c97022100f28ce9e5765d90772e30c92edb6c5b9ae3151f41f47de88347f6c5f139776c58 3045022028c7623ae4374b88fd26997a3ec5ed3d062d00d24978bcd2fa0f583819035cd8022100ba0908f49a3c8fe5abdedea23e5a7cd5fa2e7315c2e94aec0f888532c0f7087c 304502207a69ab72253105bbdfa77eb700dad440f4da8f5b236b4b05e41daeb79ad78f7d022100ed2fa830453c336fe15010c76b0857aaf75bbbd9aabf7f3af9bbc522ad4fdf26 30450220414499c101c466de23e2e6a4dd511d1c8132f700335206538659496203fc89fc0221009e8a369725d14f5b3cb3d32a47510efd9be9717df4a6563e359eab894fbe24b9 3045022100cd2d43af192ba5a3b92cfaf0be0f29e8f9e2f3968658814715cfb329d5400c57022064f2882b831cc94c3eba08614c3ca2696eb6afc4ba02458bcc379eaacbb12da2 304502210083e8782ed31cb260003615a4fab76a81be73d5bb5ab5b4484003cbddb5a16f72022002974d558ef6792003dd7ca0ff5a75417903873675cd42fa606af5b7e95f37c7 3045022100984676999b6abe638979b6590496e78d94fb57a2cbe6ca39ec9141d56952e6db02203d55cd0eaf4be3ccbbe7eae6537fe85bba17b99c81c34b221113fb4e1f1f0f45 3045022100d5730a034f2db50091210222df88f5aa2484830a0b83d1ab4d3e292d0aed0b5c02206366060fb8ce5a89f0b2bc75c8061d1633a13be0ded22d8dffa0317d4ff1a97e 3046022100f31ba73f43ba46981736bd2b9c0499e8bd4b36d841703490bfeee71eccda170e022100f8a2dd4f5f4faec1fd19a513c38299f5de01435de1111d465fd467f5bf2cc20b 304502206be9a6d8cc207a0d8dcfa43b13b6518ea36691ba4c9b207448836126f7f0627c022100fcf8c656b35fd92f2a8afbc9824d360d5ae2af271739f340a14aeff7d9fb4541 30450221008cc6e6a920cec61ec3bc13a3b2bc06ef3c47ee27ec732354f5eb0878767cb3d102204b08dfbee04adafbe124f4539bd336eaf541bc9dd552ce6f9fdc87062249ca98 3045022100bdfb15cc6ab3d1d068df6979927a505d8be5cd1bd9c3f1011a420bcc7b0c02a902206f5dfe0b7ecc8092f34bcdf4fda8b7c3ddbbbd935e5e6e1d2c3eb33e3cc281d1 304502204811c231826a53212f284cb3cdfb6b181b00f5bd00f443c8c62eee95528da949022100b4f12dad61145411593569c1eed5e4bab74b88cdb007cfa60349c1b5b738f8f1 3045022100a3dd75004696fff3339c17d01afc9a52ea3c9b5a562f4b3e487a1faba97db8360220393ca7fc4826c6dbb85e2e90440c380f07d01f5b84738dbd771b5b69038b5b1e 304502203d7a2b5e1cc0f2ca9e129b7e343f69633ffaf7479b0eff64b3adcd7f8c8598a4022100974dcff27b8114f4ba73ec68752588109c362529119db0ea1e556a549bb84176 3045022100a243e118fc4ced1a1929beceae0181b0e0ce768bb58009234a9644688b49d180022067d63663f1548a6cb65675d2b6e008e647fce74997a711110db7d003de7b80cd 30450220520dfb24ca6e0741116f02dcb031db568a61fe6a92a58af1d33d1b227f7554cb022100b4db8cb68450b0bbc6132e4c1f3d30b1d026b5fdd9ab2365b377e15702c656fb 3045022043f90c7162ea2cde6421c78dead4a117e18e7d3eb6b3f72b28375f677233c9e5022100dcdeaa1c9600868ca02c5f934f1b0e0f314780f689c4c2efe034228380b5f8b2 3046022100ad4c154014e292035f6247701aded3d5df1d26a8bdfc63481aa10d0e38d07316022100fc955119b8d47b29ee0cd9be758211293acd872d9ca6c41b85df2b22a3fe4111 304602210085d823e0dcdb057f58102185fd6763047b57cdbac1850d57d79177329d5312de022100994cb112c11e524665996471681490b3bed48dbb6797d66210e469743ba7486a 3045022100d08110eb2ef2d5cac21305fdcd1a6bf6341d4fe1781c1421d360efdd6dccc91202205ffd4237b8c52c5f74f9160be83ac5bc575189e05bc9d82ed9768af2d9f2e7ed 30460221008eccd323a15d6f72a3efd2d47ddd3b2fe40e9bfb1f2cacdb2e1312b53e0af03b022100ee6b4cc08e6e862b65bf1afb345b1955a80a95a8e5e0909d6b7b96805d27c9f2 3045022100f3a918d9011d18cc63bdff84b4154de5e7bc720e72531c99b090cb0c7d06231602204fbc747c6880a06b9eb8675a236e98054bd9b4d0a2e1a478d351db5f6e57b58a 30450220566e6a98e95d769a604f1fbbaa5f0e985422af393d498680cb583fe6c281eb630221008b9c708de41719c2e2c0ef3eb98940ad435c66a2cf34a83f034825b0914dc80c 3045022100e22fb123fd4951cbd5ee4bc04fbbe30b93cbbc32446b87e8c6c51b5d731166e8022072cd97c973bdda4abb16c5c90552e680d0a7abab035e96f97968cdd80992cd6e 3046022100c61af41a9e9abae1a7dd9ca0fcbb6b632cc42b32f51f771b768b9fc808d3774e022100bceb500f2037b29e975d4daf08392a2f331c9d73340414bc187a77cb51775572 304502203a42cbdea0230a8bb2bdbd1721325940b05012ec08c3f94a0310543218c78f14022100db419403ebf3220e3462fb6c4a1aea090c0c5b03e91e033c45b58f4b1acc4cb6 30450221008bc4a4ef3e67210a293e95965b52cc17c6c50cb66198c860fd6e7461284fcb5902203b48023c1b5632acf39abdb6c0db77d64df8078fadd9d56b80de2f1dedb40394 304402203929ba1e387087d411207122035712f6c21b93273a4c3ab524ada45a522f23ac0220752bedad4307647708579997d1c5d70076e009f1619f19b8418754a4bb37fef1 30440220374c9952546f4a6308b2517c1d44890f60d4452c3b75d7ee1a13ba89a53a8d340220225b01eb69cded8016bc5afe236b4c906c91dc08c620d4ddee521f980bdff484 3045022100851d15f70ada41e3efb917a35afd84d8c6192869686553d9e907901069029596022029c2e1e06314d5192997960e831444e9254ba24736d4c3c8ce1d5074b7108cf5 30460221008784c183e39840134d3a0f7be97b55d22a883f4c938864ee27e52a7d0cd26326022100d74b359ba14893b8ca2f0f0ef9bb71298e017aedcf5e9261767422a241f1d64e 3046022100c5fb873fd14f034a11203e91f65ce2675b55185bab7cd5d5b2e7a35ae9a24d07022100e6a6618d8dc7503ee77bae2cb0b4c425c6e0ecd4a5f29f766603048cc7609c93 30440220689ab811e3ae2603a7db6169a0a540d48ec45a0eadb5617ea510e5a112e5299a022016731e6557c9d51c6e744bf07cf4fe353906e7689461cd46b666ee6302171b22 3046022100bb3015bb32a329145ebc80f58f94fd994b275992c24d535d96e95a4fba899cce022100d01fe0af86fe70fcbe7918fbf22b8b09a4027f81f05ba64ad9efc512d676b4b7 304502201286e651067334603c43d6859a2feaf4ecf6f501c9fedeaf69b9005a5a46a09f0221009c997c0da48327b9373638dcbfbe4b974d97dcef44c79fd8d756d78febdc8328 3046022100ec590a377e2b567132afd91061a3dc67b09ade788ccc89b18f511db997156221022100cea2be7504e54a4a251137699f45b0d5cb416a8073f69adb693e4d5b01f80da6 3045022100c0219e57f4f6de8490dc46267f5d3819099e0bfc06c47e109d5c9f480d516b5702201596daeb1f9c1fcd9059e9cd09d61017ad3b4fa07cf2800893771b8f3e74f0ec 304402205955d2957b6010fcaaa0fb3c884cc4be7a91582bdfb57fce4e383b862fe3ff740220434247c8ecb86b1053c4d81842e5b4a99acc84ec25b68b055b2d6a072432c82c 3045022100ac18666deb4feb537d90c85c57e3ceec582546ebe0903227f4f48ce9100ce8c90220743c75e1d4773ff6b16c8d57f63f63c50e1270094f479ca805f9a37e9debc0fb 3044022012305e45268871d7be6048dca60a367fe0e85bf2ee68d31097d35560350996c202204be82f3f38e9d10810c03b7f3e1c6ecff09e5669f4d9ecd989d8c4a16726cb82 3045022100ff037303ad8b0edb3dc50ff33e88bbe2ac654eef8eddfbcf4c09b521325c8aa4022003969a07f9c8cf294359e7e6e018a66962dc7bdf8b5e889a64581fbb9de3d52a 3044021f20aaf45989f4168d66f0ddeda11b65ba788e32dd8bdde206ca72178b5f6a2b022100821ba69c39093075f95bcd059a218d9bb32e6e1b8cdb55da4fa564e4cdfc44a4 3046022100d5b124bf7b31a4e7bc4a9fef10719810b2195d16750602b440a441e25a5d36d4022100f8fac360ae0c8ccf4b2d3f67ec8359bda4bb45322cb83f4ab30ca2594f70a33b 3045022062fae4d450a6c4403bf7b106a6e36959d30cebbd33279f9e64644397cb677f18022100eabdf83386f1968982063ab0265d708d2817f2a4c6af4965200930137825377a 304502207b8c2bafc773b0cf0eb2948b82483821d54a54919ad600ffcab714ca9de68d0e022100fd3da04bf2826f447c165694d6c98be73a06c1bc26ddf3f5ef156680054a1159 304402206e1d06654ba82dac50255d870c04e89a5fd63ba8382e12e4c9a3e6ec91fd4af002201eae6077dc8fb5bb0b29a97958e96a9a0b163386d05547d46412a7140b0a7b94 304402204f8408525ccdaa8730be6f5a700a460b01e26054b6d65eafd5b986050f8cbb6d0220740c9e6974114a59d3cbab075a32054f67ce557873efcb161dfc78f2528e4681 3046022100f18764126db4aaf2df4d5570019c9f8270e7aaf5b52b06adec47d06e804a8b1b022100ddb20e7532288c84ebd39fe8dc13dc62a867ea432668897007b61194269d9bb8 3045022100e397c88ed31cd226c1ac275bda600f7d76e4efe52eca03925694aaa8346944390220705c741f690471f4f89e6a557de85f04e28219e5d724818223108c756bfe64a6 3044022023101a6026de5109869622505cfbb6fd24886c6f7a77f0870373844a3ab35c6702201b8946b47e21e385d62ad066fb91a685a33a86489f3ce26b188b1961cb5c268f 304402201f8a986cb56c81bbe623555913e6c4cc0859c1d56a427df5b715e1bc279f6eac02204126f22d87ec2b19331d23cf78c9136bf41620953fde42332c76247f8e93df00 3046022100d4bc887cefa1158dfbd933466e2608be4809f8279a340e8824066dba2f0cd73c0221009d062bfdd5ca237218ab81e362f4faecc4cdab19d390705fe8077796148cc361 3045022077bbf97854612822dab7aa08c49593931a1e54eecc7dd7a57e26795ab741489e022100d99e1060f2633c87b0fca0d65eb1a0ba36beaec2ff2adbfbefc7464f6392616d 3045022100f2dd7ab2484643d409256667a0b5073ccd36b69c8261732c2e10ace45fbb5b5602205e598225567a268591d3e916b0a8beb7886aa4b13003df3a5b56b02b4213845e 3046022100a7113da6bd9fe685e3ab361208c25531b1ee18decb057d6626b8afb86a66ccb7022100f8318ba7c7c1c20b3db40d09d72bcd2a7363f7054360e254180d8f7e6d263c23 30440220270d6ae6a2a30fc8bdc8bed496564d23d02b5f8e12aa1bb8787c0d65995d592e0220699a05aeb41d45011fcccf0fcda92700b7c5321e366cb38d96a7d31ee55e54c9 3046022100c67cf9c0f37349c1335b08907325771536f9c189d042180fafcdf2b65870570902210080d1c703afd4914edad097d3c2db510d97e8a298a222bb942f79a34057dfae72 304402205134fe3c4b818b6a80477da5d2c90c04a87347a8270b99a81aa9563250524df70220183b8b97ae907e245b555da51c9efc308c664f30521d3eab0b345b95a94ecf67 3045022100b68d54c2db664d1751614a5161b18d9ddf8b5db371db86d69cf97cbe3b05bb410220600fb0f5828de4af1515ffc9160ccec2f047d295e5c6dc6164d7fac7dbf0f821 3046022100d9abb48430de57a31b2b5326d855c72d9ce9b4afbfc786ca871257dc157b2091022100e386e80af4d766422b20914fd066383dfc99c50d321d4ae6beed9a0e48a8cc03 3045022053207baa80aa9f8e0e492993e574f1a22b64abe5ffd46925bbf98bf5c95cd12f022100995b67b7a3831a0404eca973f28eeda696c49b77c32841dc03dd0949cf78898d 3046022100a63eea1578ed74bb642844450c0d0ead29f4363f68a22417b64ffd5f6285aebb022100859cd64aaaaccd594da3cf3a702affa01a97122db0bf98cc6f5fb7a15edc8581 3046022100cb780d2ac705669fd496da0caed275147c2a580e1987a5d999a0f430781e4db9022100c982a877d2da32f7d811a93a7bc4098b4d22785a1a4863d52147e2c179b275c5 3045022100e60ecd4c949ef87c5f0804c689b952e40e9506013353577ac20a0555584228880220413c5a2bb9924a6cd8456836b1ad977d621a462dbb9181b038643a95696f09ec 3044022036aeaf248ed6b702d929af8dfa202f50c23894c1f068379618f446592484f92e0220115ba8bad99c2753998d6772c359c8f9106da9fc0290498481b8b11990cba28b 304502204a19c050367420ee4f3e17eaf02f93eb8f2161c3fe68ecbdb25013deda0b55d1022100abcf166e87fe44b58ae036df36ec3186e91d7bf3f690ac1ce50432c551d26734 304602210082afc16ffe76e6a26465b7b66950d483da6e7ecdf2d834aa5642810cdcd47c46022100a3b1012f61b557b2cb24766e67a4e7942fbb814df078e0fae7f4c7c27114c2d6 304402205893c566ef622efdccd7a31f0c5f4b0618ac7b017e2dfc5e46c1252f74b0280102202fbc27f7afd116aadf181c907fdf7a713fd6d9f180f0f98522bc728837812bfc 304502201244e263c79f465faab2035ef1cff47459458beac6266d319c2dfdecc75f74b0022100a29dd555930bc459e11c4459fbb81c3e50d03aebcedbee9536aa7b160b4a3ae3 3046022100d051dd4fc2503c063e8ca3f1c01ddd6fdd8232d8c35fabab425079b282923305022100c4051830b88bea66e4932d9013e627f5145e3c975d350d05aba7b6261aef752a 3046022100dff9c3ff063c9451c1b7508c734ab659e9a22614e9da7f7587d55ecbd620dca2022100d7f2303bfeb1a66d396940e7306ad5d2227ad669fec03a15e609ad62f94795c6 3045022100f620c7ef99da6c7d5d4f5dd583649aed210d749cd4c9ad02fafb1897af0c106a02201bd95402f1f58dd4c0de79248042b4693884cec8cebdf9641c9722fa5740ff75 30450220250ac3f202f7cdb0c0959dc8b727fcca53832aaf53cab934b14a6a0b0ca76b9902210082134910e7d880ae6c921f4fc5da3026c42785580edb23d2bb3ab70eb5940a65 3045022100bf656fbc9b7cae8b1565116415440dc9211d4f7eb5186766f94fae3d5c48114e022017c16e334b183afdeae9a9bc10c960454eb1a71393d57c3c623679b0fae047d3 3045022100f07522cfa53a900906596fd836daf331124d03008e60de323236e3a5dbd9b3e502207c53820cf2a2201546f6a48e0db5df25747fc5cec818432f05963a0309e70e7e 304402203b53c143e02b348812e7a51cb0657b901c1abe41359303637b284ddd7798dc0b022045311cbaf78f29269abcf767cfac83d88118d79bbbff2beb2610611019efd073 304502205d62a3c6d8dfc52fc24b15a3ce431e024ad0e4216544e25d40e5f7a6e9b2ecd1022100eb95dbb6396e60390d9e3fb9dedb422df2fd70d62ead41ab6039c5ed89f0d4e5 3046022100ebfcfa6ef9f34dada07607dfc42a5d1a6b67d6e6f750d0ec2d05d4a32f694f83022100f94acb5edb692fa8bcd89fbebdd5f313b1b3e52f3bcc6f1e32a10a8b76c9387e 3046022100fbd1456c81bce93e57bf09cf230a838e0ca420de33b879047eb18c6e1a8e925f022100fb4c9a8375791f43f7b5e743f5707f15353cc9260683b1c601df0c28832f6bea 3045022000c8a7944dd70a911bffd8e83ae302e741cbd033d09d611e3bb7c061459de411022100b192836e10f579cf91eced3f6c7c5636f7dbf1edcc22c9d87e4e293afac9286c 3046022100ac20772c1d9b07e3405d656c8c79896de4f8793ebc75a9cc775bc5bb3bb93202022100abe05879556c7c0a7f3e92e9fd7f42943c78bb3feef02fb494e687cba1943242 3045022100a890672bb53783d52b86fc12fcf112838b7c8c38adfabb1445adaa92be05901a022007871265f1061b3aa2079b0ba44d902d9e97891c43d016c9f6f7dcd8c35b4cc6 3046022100b881c00170fb336835e072c222cf74d19540c58d117d370c69b20331c4253068022100c971d05e9b21fcd38efd9c24d99bde1de0e1dfd1a8a94a2963aa0ad7d614c78e 30450220092c90a766ef1b1af24062f9fd256b1d2d7ba27310d830a376a85ff1fdc037120221008bdd05db7d548d61f4f1eb9381fde54e7f78b54f2331168363d4d2070b58f7bd 3045022100d97ae30b17ca8607d7aa1a41a33638a43bd08c1e7d68d76f393def154642491b02207cf0db3e696fa0dff251fd059f6ef9f317a25949ee1ca183f7e61a0fa4f9cbc7 3046022100cb759c1c367cb7d1f475bc0975a5f11e61d905bce3f3eb557e745e9b064783b8022100cba9b5186a383cdff0aa9d3c9779383cb36e13781f1914e6f16ed8135711674d 3046022100b835ac31e8a3268e432e0d40ab0d69ecd119a8a39307cd7939c36027107c09ef0221009ac39a26066cd8b58ca11644c30ae304fab0e36279bebeb876804ca1e16ad956 3045022100af974c98402126f61c52a0ebdd9b72c5eaec1066c548f9d04b79b635b65740ce022030f98aa8f35ca1518f708dd23c5f9805c97a23e315b8485b1d24026faa6dc822 aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/ecdsa-fuzz-corpus/windows/000077500000000000000000000000001456575232400250455ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/ecdsa-fuzz-corpus/windows/p256_sig_corpus.txt000066400000000000000000053512401456575232400305510ustar00rootroot00000000000000304502207f7a18db5ca298a9bedaa64a47fefaa25627167475795f806deac8223cc9605a0221009a5aeed4b20e65f28b56e47898fe9ceea77c0a555b834ffcf8025968002ca758 304502206680a288b0955008c12e562f2a7788846477fe9c4f6d64422d528f0ec0ee8e9a022100e34ba82284ed14ddfab06cdef86cf18252039ceb605eb3c0f393a57cc17abae1 30450220593a761e61ac5eb37f8c397c8985eb0303b9e6f4a52327b660fc0dad5a1a7998022100bb0de6bae4d13e19743985362022d54540174caee4b9d6726843bab431b590c3 30460221009de0ea97c1868fac7b40cd626bf24500699fe844d8a32191234629fce4c16c1c022100add8158256141f2b2cf205b724ab0de63360e275ca5d269b957da73e6c9f9f56 304502204ddc0363a584136b0d52eac10ebc08470786353a6f4d13459cf220613a54c0b2022100c96d53f2f15fd4d689357e9123a9bec93bafa9dfe65425cfaed57d8dff2feceb 30450221008d132ca9ac295e273c5eb31dce52b6189661530c80940da02f524e7b0761195902201d24faba1c00781c5e0083a4f43801e6b0fa10b8f3fe7dd8ce837f881476e37f 304502207c3ee49cca41c2e79a82a11a8eda8e62345bb64ccce59a73e9898293bac3e1b3022100c06193f1d4670d7b8782513ae573157e0745bb323f7a0c4cfdb7780005883407 304402206e87402a62fed38f0cb31bbc427d92185974aa0ad311bb96211607b62e79ad3d0220114c023350e84dc637bc2c56b0783be5c8c859ba614884cd55057e13c5ee839f 3045022064e493d8dec0ac97861591a857d982ebcb7f5272632d47128fffb0177087187102210099c19982812776f79239f6a4b71a47a4237b796e99f83905bd49ad4ce651b5e4 304602210099785412b47ad626daf77c31a32f0e8247d21f7d0709f54887da85ecb9133469022100bb808d13ce18b7a7aee0f420b7f740c7b004e7755258a1d60addec54d90089f2 3045022100cd8bfa2004de4f810c1cd303b4027299cc1d776e1f41849defb0d50f023ee77c022038aa1fd252cf6dc113c2f8244f61bf781c671514efcca9977d03e3975c7b006e 3044022067d4a8697b1dd383ab8345839fb70552d8d53ed79684e2a173bcba624d2ece17022056adc5ffeeb1d93225f770e7be33ed5cbc50a8e0c59cfd113ceed9b77f8b8ba6 3045022100c9193ec3fb79d50459a692f9d3f2915401de45180facf32575b11a2368771e270220492f10f94c0c58e1baec3e08de6a0fb43ccec5ebbf503552fd2c8f7ff72950d5 3045022067f1cd29bb58bf2fbd2a9861698035b209fba4148b65fb7ae530edb145cbf0ca0221009dcee7ad7717d9733ff475dd19d191fb69b8fb779ca2e3caa56c9f96a79dc459 30440220677f9a1263bf0858791057f0f16ae65cd7a97e0566a1cee5410b3ae9381e043102206565e304782d3136d507c59cac57c28dad329194df7919788d6dcf454fb68555 3046022100ecfb01ab00d65e2ffc45d707fa98e90ef5fa44d496cf78dfb35d5a3df6d6684e022100c442869bb88f88d92032e8098051cc631a8a257d385ae8584873e8f33f7f3a01 3046022100abfa388471e31b4ab767876d301a5cb3ecd12b6f590ab2191da305c709cf8518022100e11842338f7d432a8f6434df2aa2a5d30c17c07950db201ebd4431d802e9dc28 3046022100b65546300a305d701d3dbe85b4e3bfeb990f91268101dd0c615d501a0b254877022100ea4bf3f9202ec4b0de0c48a4fba0ba447f90b4abb24f7dab956bae4f131e6e9e 304402200edc37146dc087727849d1da73d3e227475368bfb9194dde070b10fab874b050022001620fe19d58c18d73cc3ca102cbec18992fa787fb2fca4a35e94136190bc238 304402205eb707294c990b756ef6a0bd946a339d7d7476ac79b535457f6fdd0f45e363be02201ad991c65b4c46660e2b3f577a63ea0da660d7b083f85bcc830e1e847ab4416c 30450221009a2ee37b2beb9b20214baf77bd94b15aaea3c57c850ddf2348ee99baccf455d202203d2c3edfb38efda99ebd70ea94af948adca2b7a147811ea9098dc0853a506658 304402203e94e20501a6f0ec78b6a0b5a02665f708de05b939e955d18407e0ef0b2d426502205223429890b443112a303c695c1f7dbcfc59086d645cb30138c5ce40f312142f 304502203c62ef73ddb0d24e2b9272c705c30cc12061e576a93fa9b936fd74aa2a1d1169022100e1ed6f5eca297d1f667439fb9852dabaa5f3d12e4d93edd4f53262918e7099ee 3045022100cfeaf05f6853feaa037e152320d3639cbb8241f919cc5cd968076399a066e83f022019438778a9b2f9961e83234451e176a272efbee44097ba6db1dd90d65a78216d 3045022100fa9b5fed60d929372486813bc3200100bfe79445d4749c714373cb9167e1d8b202207300c5504d1ca5a750c3f0c6314b1c5f69addf137ba35eb2b52bcc35daf53ad7 3046022100f93071b441a2dc8205eaa1249d84ad4018a41cc6636c43db04c940d937c0d0c0022100e03fea4293a248fdc937f68697a87b4667e5d55ba12ab3e1c49b012c3fceb94c 30460221008acc3289f30e457d4b53c14ce5ed3c9870a1b35f111285a15d6b872c63b53be6022100f635810cf6cb8bc5dd9d58f14d198153bef444d86ed8cbb85e51bba95481a00b 3046022100e8ea8e9d2e3c6de28ed52dae6c76537af1d5a830c5e50f34acf3f99475928a72022100a973415b3ff21ce49911ac016c212a8b3f6376a2e21f984dad033125a10c585d 3046022100f406b6724a94067bac4bcc382834b96a660b825698a6e57e5cddf4321d2eae4b022100ec7d77ed333204ebbad43d34c4eaf2fd86fb27c088ef4a971db1dbf49b3f10a6 304402202da85f64f629a51a779f5249bd222b43a968722b6e0566b12f88503e8a086dcf02207cd4000405f2ac40e9ed2c3ad7c47bae60c8c8068b332efd0a432eabed5c06f3 30460221008b579e64feefba6efd87945046370ff00b3b62ae2ddd4be2099ae426a9f36f97022100fde971a5d7b67837093d95d0679d918096f0d9dfb8aadc2be8f0e307491e0820 304502203510be557b0bd368b7a73fb44550440705217d3d6e336331dc68e3268454b8ce0221008c636d50474ccf6086215a896dcdbd46729fe1843bbf482547f4f376d9d1c423 304502210091fbac5e1e4587988c17f00bbd73346be6a524bd83c0ab65afc5a06ce9cfcb11022048a4b272f0c551b1bedf81a9fb0272ecd43e061a51724e4cc2ad19f64b471f7f 304402203e8c0f6f1b6afecace39c6b93405006fead3b1b5a7d8b1c9e711fd732bbd136902204dad68c636bc522221c1bb176b937ce7291c146f72125508da7fddd5c9750e06 3044022039875880444f436cb3a1d9f8100819f94a927ca812ed6ce4052f899216ceda4502206cb3e2f6f3e6c054e5799fa1868baacd55d4ff703af19f36b9ebf9d245abe8a8 3044022035814dc1734140e10eada599622aac0329ab27a91692f3b0346bdd5bae4fa5cf02204bf99e92051e613da9b50dc07accf7fac6926194a12330bd1873056267e71932 304402204fa5bbcee849bf59194a61e9b412895579031fa92c34a1cc861358c55f0d8ceb02203ecc4de8ce3ca3755b61606c37851c06b79a68890af9d62d7cd360f5c49746e8 30450221009c5ae65d6fd70252c4f07d1ec1be9564ba9969574327ba6ec8d4ae9ad89ff7a00220428a7cae2c5357a748ca144b1086324de6240a5db0f9bf13d1fed9880e1554c4 30450220605ba895dc39ae849cc31af1dd6e590c72da39ad97ea3ee8a8a4b72591e71162022100a1f21bfffb84fda3af7cc80c826b7faf699d9047abaa19741b95d5447b7c1a2f 3046022100a147c096cfc68108cae334bb49fb9366f2b9526ca48c30b69b455849fe47bff9022100cb308e76e1b3b77fb30715349af774114311423a072bc7580e4258b39ce1d5dc 30440220245ebeb751de68e9d2e9f6764d442c092ce35f4a7cf2b0ff5a10e9fe6692c421022051f0d3a10579d4ea67dc6e5e9f2d944364ab7953635b5a472c62567e6d87bb94 3046022100f3ab47faf77c9200bf62445524dc47b1ac25355c670c753fdc1b53887b71a33f022100c61f4c8fd9725d85c75274f1e24d7ac77ca7fbe1e687c6253ee5336a1d17d932 304502204b5e335f4993b458dca443bea0d1747f18797204f7bd4879c0d8dbd4febb0558022100cb5749fd484342525421080a3ac9bc77ce6a9f235300419b983144fae6479535 30440220645f8cc365978e2fc0cc6791054f88b99ce290b4a3980c937ef78c7db25dedcf02204e5c33fc37cb543e1607a1c3271e2a67fafb9046c9f463687c0891cdd5b86cc6 3046022100ed2e3f9e43b7d6724f45af5f970e0094a9ea360754582fb9f1a6506116dc6d26022100d0f236cfeba18fdc7e1e541ba06e9a553b4d0a673d524debfad64c08ff5fb8c5 304502207f056c61dc351a7c867b488ac6c044e35fcf486a825c0b28728846fa71995a78022100fb0047d0201e743b35be52acbb0b71ca01af033414a662615374c352cb2ee2d5 304402206802445fc79b75d4771e490701501cc2e7f4ad686103538df6940c6129ddbda102203f041e726739683c3ffa766050007de45a7f1c2642cc15abcc5adfbc7fe7a63e 30450221009532685c3fcfbcfa64090c6ec762c230f2ba2b701858c0ebc54cf6b6894d98f3022057e1daa65ada0fdb4cba2d4afacb175ac06c8a94e15bf3cebabd764723c24328 304502201328a593b05a44a4f4e4233f451a281c96a8c91772e5a8b13b43d4d53872ac26022100e969fe2a60b554c4805bffe35f3d3e74dc58555f7b98600c23b28657c490d799 30450221008a86ab2c1c3ae79ec0b5d532262565f997b4c9b6ce23924e174de872f2f1108402206612f63a0af0532ed9111081077b7621e15f49ef4c332595a14dcd3e8944311d 3045022002ff742eb7e91532ce8158317065f227ae9c59c111bbc70d648dc54c97fb42e4022100eec297a4cd79f366eb8645c8ba1ebd3a2cb472f382669bdf944da36b029c85fe 304402205fc8b2abdf36040114e6f270dc16e6b08d5eb25cd1b417eac0c7a2fa91ae93d502202d7a3947c20da8fd782bf4f1fa5b665b154fe5b651d65674c34deffe250e84f8 304502210093ff0ac848c2f6e8b963261dc734ef43184e31e5715a854a99a17d0be45dd6e7022064f533a9c24ddfad587699f21239ade3537f5a44d2f30c1951f83b8c664364e7 3045022100cd88864754bc5ebedce5e3be163759f46185fa72ba186068bdf6e77e7e269cea022037a6e5d03db9efa1b2a97ff89c14a5994119b026d197a308b155def54eee46c6 3045022100fa0d52cf25468588e939eb56d7f76a398f48571a5420c56785f13c9b66a1baee022056ad66b821d81d4a51f20efffd849bab62e3e4457825ed349a1608299271188e 304402200ac6c5e89cb6fed49cb0f5577b32bf91f138a9ca0a9bf9dca0a7e38f205f507902201ea84953deaead021365875451eda12e5a2119a002a541e3b872f108af5ce06b 304502207eed11e1fb18eb5a8ff1c5a84136268bc7cc8bab3b3850d6f3498c9c97d46cb6022100bf4232edcafa468b1951860fb29db42e885bf50dcff6ac0709ac414bc09d2791 3046022100e8abc984ae2921542711ee32a448ea8d47dc2eceb787e510cfbcce36f5bf442b022100e1d1655a72e30936a0bf5f41e862c86137c519703be0cf40c1157cc29cb4ffb9 3045022100dd9d264187d1a851c13a5a534af550d0d296c9749a5cdc0307a71a46d0f4f17502203dd51c34551ddb61d7eb6e376b21d6ec500dca036874e94500bc5ca54290f844 30440220483a760c6f836350eaa372ea0d5519a6155905d709ce541a9b1c91ee14577d8302204dc519248cc9417fe095fe0eef3cdf569ef6ab7b865414a3f9802c8984ecb3f0 3046022100c4ea9edc021aafdbdd902a2344706e37288880ce1f1a8b7f2783f87a135f7c3e022100e455aab90ce3a93dba3d63425b133f73564613d21fb1c14ab77a2c1e4ac316d7 3046022100d453a8ddae453181f43175fe2d4a5c63ab3deba19c91838c7aae1bcbffdc2171022100a3baae703bf484625ec8532678dff00246c9fdda7858ff95b2335ec063f9294e 3046022100bb04278dc34ac877043db3183cf786987b2913f975907eb34ec3ccf87c1136bd022100d7e50f1f730c6f89829d555dd9eab333699fcd844da092e485235bfebe13e4da 3046022100f575b3c3f07f475645cbde665c161f89e1a88779e1fc53e987f198b3763a4b1202210088f9a052292d4418fa6f0264815079bb68c9fb48952bf50560eaa1ef8c480c17 304402203d85dbf5b1cb0876e8a2178f373226caab7b45a39dc3eebc288f14bf259f38c9022059e49bf626465959df8ec598ed7f71b0d431976ae691fb21465eb0fc14be4816 304402200df9a34858e1eba7cdb1a6d7e4bc5cec0f578394c1889c0fe7074131a92e1d750220204a08928697e698f6c0fb98a1d0f97af6906a05e6e0e149df330a22740a1c91 3044022041e2bd312d27221f31f4c03234f79134c8c89a68932534082eb98d0f9910687302203ed5bb427873766a68140e3e6263dabcbe191428e8654f99906d31f8cae225ed 30440220105f8b5c5b66ad60ac5b09ba1a328a990b26f1df43f41d64ea6303318c227d3202203b26389c9bafcf9729fd6fc4d5dcca0df2947808703a99efb4347d036445d0d8 3046022100e2e2c60ee30b0202c5ce9f1e7b16ed8839410312065803a444b8aee1c21017b5022100c7deb932a948a3a6ff2a7db23b821705a6324452c77211e3280e902f95e7a309 304502207256f80fc44911e6ed5d1b28f587a10575899daca55b4c4801e03ca94f757b22022100fc80a1ec09a48b0713182611d4931228abb2e73cc77c3e037969a348bddf7057 304402201474a4bde8835548e45a0c2acf0021100db696d848ba14067e0bd0be1c8450c102206fa0b66ad1297ea6d67aa7679cb2e570c0c0383dfc30de815e27070fd5d43d4c 30450221008633eea19fd20013568c1a7d6ce432d58c4570a8f2b1ce15c38151e01a61260a02207de2db3fccaef5100182e7200ca68867a7cb0c1b54e3f7270f5ee88af0298765 3046022100b00571b1180716bcd953319fcaaf0a50b6da4d729be51389c00a7027a54b4c040221008262ed584e03cd4efdd61daa4866570ed7a48d28c1c16ca7c6edce4c293c78b2 30450220079904e3ca7f72bf750f87cf1d6b1c547cfa676223e2dc94829fdaefabdfa7cb022100f4b5ef188707a6804d28d3ec63d9d28ab15e459fa03cb05cd811d56b310a8e31 3045022100bc0fc4958bd780fa0f7563ca6d3b4cb4f3462f61ea3cd4f39195ec07e43700c602203b95b20c554d9b9c6ebd78e424627253c53a17a40eaefd55502a87113d0ab56c 30450220348a6fdabe5d0c4b1440037a64df2043f484f69080f86e29acc588543b2c4368022100aa30b9e75e196f15a8dac5a18b7b377e5177f5e98f4313283b61ecf201a58e94 3045022100bc0c4dee7e814a257edbfb5988f08cb5af93f4f05e66f423b635780f6dfb8cf1022055f827a0f9b8dbc1163b762799a8eb99f222095473c65caa50db91f4669de1c3 30440220631a31b795961dcd40fdf9a736b94d4a22067b80ec410995107ba8ca8f9473f402202a16eaca746de0fc199562713b5e67175bdc4a751d2ce9eedd11b94b7309318f 3045022100a2ee05a16401aa330e1e82a83a9c4a8e14f60691e722eae9e46398e2349e6ab90220729f9f38323dbe0c01ee9fa3e64186464c61d2ae0ec214c3c8d3a162c6fef4b8 304502205f9ada63cd37ad075d24c0e7d6d3a65a10470fa014937c0ef74fbd0d29127c63022100f73a89ff5d3b7c30c99e5759b4819876162a098c376247b3a6b8f0198bb884a1 304502201344e47e5d41e4281992ad06fc807c743276afeee84afcea392ce86304e4f2c1022100dcf48c09ed56ff141ab1b9e90a2d5aa1b591e48ca3d6ac4c977dd6c0d85b57ea 30450221008815b1333bf3b807acb525fd90685511c8aec4b34f9349e81aa3f05ac4c0de2a02205658bb1b5306af8815038286de55ee08efa900a1bdc47e87807d9ccf7c50e616 3045022100fc0189f20786fd3ce1935a910c6122f2e505279f4999450b291cc6caa08c3272022015b70f3cf4083112c487e52e9b255599f590fc62544d726f970c9c73528b1f18 304502202f5f9e2b58eb8ebfe8e480b410473ee8ae7241a84250ed1dfab638a9449aff88022100dc24cdbdabfef2b5ab0bd1ad4a7195b7c68ec0e5a5b53c9d0ccbf22945fb3e28 3045022060c8daaab4b43811d929925c7e76f0597c49a8cd5a831c92241709e867ca411a022100dff3d6ef4beeaa27f5c9c77afe1f116ad8b56526783218359ae97d523a51807e 3044022012acdce24c79e3d355671d37cefd3e1b759a127cbf3daa94a7799342c222bc4c022019c09f5614102bfd500893b4ffd4df3762055dc831e73c0b38d8f7564728c409 30440220460025f67e655c807b561af756df20b4d774b7d4c820dcb60b3f4fc406d899f102205f2b015e18fa10d8fa15b7b03e25d03d8bc0970176e681c3cc3ed6e721888b93 3046022100ff305b72046ee1a2effd5676a1be2b0e5517d2c976ecf05b1084ce19ca822042022100ffe1824f58ccea6509575b4cea253d530e2083fe0ab13aa608bc1cd243b23079 3046022100cbfaaa2378fb8b382a4aa5cab0e1bd0d43d9426dd8398cdff257638185dc43f302210088b743c0f84a2bb0d1b0bb82deddeaae9f1d32110421b866c3b896bda7b1fcdb 304502207a01795212ef64327564461486dfe862c1b3045ecb377e767b9861fdd2097a81022100bb3ee3dfd925f247ada0ade11d26cb7c2fc9fc489010e248eb9de63884e28838 3046022100f3284fd365f90976b981af24454b750af140d91dec17a37b1be5d35fe4b46001022100eddb424dbef580ad1f59dfa5a14b46c5f1c29d5a108edeab841aa2ea84dc60e7 30440220203efd3586d1e7175ccfaf18d64b4a76924875db1da010befdd755b6299e33ee02207d2d87a9a0e35801ddf7755a9fba872914aa5882f7e41765e97b16fb33a5981c 3046022100a1b1f7cc34f6a1ecb6000dcc8a1c0f4d6bce40a0079eb1a8a862fd2fa865420c022100f9289f28b56a1c79dc8a369ee186d0e9e65acf2578c7d0ad09f981bc38263aae 3044022069ee9b025796b8b5a5b1061523d3e2894fd959d1ba2a95d50401fcf4da8a6493022042a3473f26ea85aafcc178a01156f0f1ef021df5d1df5bd16411040229edb8bb 3044022077d4673032968db13c85e9cc71e69e1595b78989049107a738df6f6a48896de802204456ecd88eab90a6e8abbd9584542f684b1b7d91706bf175961fd5994e0032ad 3045022028d0ffa25b18ce94a51b6ea25254d6b2302317997483d7e2d0c586352275ab8d022100bff5a82be888beff970f47ea5beda854950bc3282beafd6087e5a6e425121c2e 3046022100bc146994b3da2e3216d0c746ebee5e57764eb259f0a04e0c75333c24afe020e1022100f962b67086531e49fcd2d1e046446335a8e190b6029a295dad211ef6ec2ebd38 3046022100a12f10c2914f3d51d4fd8e1525e5730a58a6de89905fc5f18fcfd89d50e8d28f022100b45c80500d6be04dd4d41c58b59a1e50a6fc6ca1f268753dd20603e5f17e4f49 3044022075105e09bcc400b4325cf74bef215314f2be9d0b7b00d0e31f2ee80ad25685160220055237b10e9fa213e7b6fcf964d4e98734497e944cb3c32151710b6b73e1e606 3045022015fe3bc2491ad6bb600a1452f214528923df4f486b2266acf1cbea60a5100447022100e225192d1e49e416222b6be5d5e7ffdb9b8aa1d4a27b8c64da39c41dfb32e9db 3045022100a8f56ad1118bdc386a0114007ca134b78b73149ed2e98a0de432ff211baa5e0d0220763d8c7db89858b38a2f30ee9d3dc309a25bd08ad7c98ca9663263b060edf9c8 3046022100be1e155cfd6a54ac7ff385acd781476e58b45d5fe0a0f6100cacc9629a94dca9022100a777b6eb1cb7772e31763f5bc20a835efcc7490672d3727136d76b379822e350 3046022100d1fdf253b8e05724b4629238a0140eea91e2c5fe2c4fb77ede0e3b1e68f38ea8022100eae9c8647cc3fe49d65e981a40b958cc380c81b330f5738360a7d82ef454f5d5 3045022100988c77cc205e2a862f929a72dacf1dbe6a6f623e9a98e53a3f1316edb7ebc9920220334e65e12fbf27ad3a1cdc7e30ea589b64d3083dfab8d909d4f8fefbac66289c 3045022100cf4dc71a250f31771d43f7be0f0494fa76a24775fa6fae9863ae0fad6e0741c50220585fdd1b0ac33bdf43566e84bdd06913f9ed42e65e77e6cd6d34b9ba8f8ed541 3045022032b2c961f5ca6b31d225423a1184aef8b0b7d8ce2ec84d392ebaa12377b0df73022100dbdfca3247538a50c065c32cd5c67ffd85484c0a0e6988624e24fcfd109fef3b 3046022100a9a4349f2871036a33b6f51ed718ad4a647304c88ff240247d42da697bb94ec1022100e528cd16a4c96ebc040d2d3188bff57e36c0a1b141b90c74b54b12d0117749d8 30460221008c525baa99b3bad05634fd7fe77f9dfe567c1b414fe1d8230af7913e4664863b022100b8686d7cb0854b403cfaf55f963d291f96bf502c00180cdac11c258960611bc5 3045022100873f3910308ca7013251bfcd5fc45f629b2951966949e8eaf1d8b064246ebed80220164a1e68c4be7702b085f5d18c5a83fd3f31e49736dbcc46ba64b20febd91de9 30450220360c9b7cca17c3cd535fa9f0dfe41147d291e27309523888de08813b68dee5870221009cd59c0788ade98676838acee5bafe19612e0d6a5c479120e48b416e7a6549db 30460221009df3b242f903e7d46fd235e3a3918bf5d056145dd930d2975449d154b5346be6022100f2e1fa9fd3cf754383abe3c6f0ebd6cf04e1ca1e0ce4a70e940f2d8337764cb0 3044022005c685fb830dcea5dec8d3a634ea43d99bb9a921d53f8fa4dd538cad1f577ee902204c001d4a5c269e47133bfe887a5cd62e60a3d8fdcbdc06cd74c86d5c32692ec6 3046022100b8d66f7d066eeb2cb065d84034815d98c91155f7a23bab19b1a02de3515b83ea022100fb3a1de15b55a934b48429f527f0c1e667071404aa839b6c5c2358c69ad88ef0 304402203122a5cf5ca71f09a369768a97c4d085c3ec34ae02e88913732a3777725a66c102206bfa6c51d45aad6131ae2904fbb327c950d7389904d862a923860120548e1504 3045022100a076b73335bbab38e01dea36785e5f78510475cd52dca4e355ebe35b35f1306102200374d942941cbcfef69332c2eb1527157bf7e35be6479668914687dfab729358 304502204c4f68b958f9e173ae88a3900e775c4e15756b0aef3471bb287d7c29668aa5d4022100d040019295f021afff1690a3309b3ecd9540659a4a4db77e62afba7b4ed9ec38 304402202ac18f41f46aad29b77ee568af58c90597251229bd178ca46f647ef4b33fa9a1022068274738cab6eaaf66eb03d44c2b4825b70c3a11e8abd4553eb018ab22fecf33 30460221009bf42a82ea913df9c463caeab4570e7ca30de20df61292c890bf61ebe1720fb802210098f78195f0886496fc5a14461b33b33817212c80f73adf6d570d925b83d0b1ec 3046022100ff3a7453c7ac6d65c62e141e3e7f715fadbfdf1739d8e4ca49989c0792d93b80022100dc82e8254d074bb894a7c9214bd6d4a83519915da200e3c38df6d1a6c9fa0e1b 3045022100b3cd5d55dc27cb52ac9f5ce87ef431274b847001703c5e4f504886bc29af0bf4022072d5a8c753eda56e09afc2c2b8fdf6ac9162f1e2577f14a543a2a1dff5c07d46 304402205f38dcffbe11a11c3edc6e0811c46708e7d9fa8f9c9b9bafd7ac853234830ee902200098c579875130c696978d060df5880e27ace5306d2632c6279c459bb3a9767a 3045022100a9af37e80f7570ae07c8db18d4025543610d8b7e1a3c246e5ddd0943b79e5b6c022016e439d2a3bd468fcc8d98dc0c736a5b55f4ccb55b752ee6b428a33602866af8 3046022100c43bc49e8ca7e849f79e88482527a967c73a4d3fcf2d23d3694262230beaa3a1022100829bccfb723e4cb4646a281eec10115a15863528ae31af7c50f2c20b4a00b5b1 3046022100e622c4bc809413fe23d417f41c9ca60a81c6d0c91508921087807e6793d37e8c022100cf3a8adbaefa5f13030b2d50cba461b440acc4ac1395fdcc97ce6cd34466938a 3046022100d4cc47a83020a2f8d1371d2a65fc708a2ce621a5c9561c64b4c22bb3c0440d3a022100977fa8b6aac7419ce58bd9ccae6249e7ca4b4a48043b54fc2c827f8008e174c7 3046022100e2b39d77198512cbf945ca366d4a1b9a5d413a28b93c3b95b121f873a9b096be022100f280e36fef525b345912a55c03a04933b4dbe8178cc8ad149352841c6f76a5dc 304502203576b015671c9288fb4e204a8a29d9e2618c4ba879369ba9678df1c5c34edb24022100a8f83c28d460dd4dcac46b71a95dd0b414919349afb47212a8fe979b81af7f85 3046022100c9ad9ed0cf3c0ed3c6a2b989c003230bc0dcc8e80f296941eeabf224c195c16502210095ee4be10522a820ff78b3c12f9035d41d49bef5e31dfc24aaf7078dd0cdc092 3045022016a81bde75042d5880e302573a1ab305f737ce12ad34f0396ead000a30ffcbf2022100cb8a6363b66b80d50ad9dfd0f66b84186c5b2de469f25025aa18140ecc945d1a 304402205f28b360725dc26aeea2161047a6c526352a670a8d229d37d02a925602b49e8f02202eb6370ef8979a438e9257d2ef577f48c8e5c06d179031725d44b02cb0ce56c2 3045022100aee3d4f0da8946ff33b6213e27b5080efa9839d82c3c567f8b2975097e2ec62102204e4f2bcf4d02e778f799f5e3a12f8d0be1eb621a8d20fd51deee38b7ce152959 3046022100cbfeb7840ce1d43d87d706cf0d68282ff4aa75469b11317d8c25606619982f63022100d1ca24c5b7d9f0e530048d3e8a6117e1c8571d121fe3c25f2fadce08d4818396 304502204774f521d84026833d47c760157c11194f85c806da498df2d7a14e5620459792022100c3835f82fe06148b4378d8bb62dbe1bc131c28d3770837977ac21015ffc8d6ea 3044022069ddf761991b75222f6a972edbdc2dacdfb01f7c72d3511e79e082c3449d547702204c02e1ef602634e8ba8612c4f2fe27f38376bfe44b38682cb672ba26d4925d69 3046022100ca02f4cd6019797a6929df6bdfbb1976b4cb19d326c51dc97861024c2f61700e022100dd7c25590ec577acfb1ce399bcd6d1dd009712c0eb85b1bdc71476579bd33547 3046022100aeeda2a7088cb3d7ad357b72383e081a63aaf2eb7e4ab625f481d93e71774105022100fa7a511c301de00a5220117ac926f455235a41a6e73653cc8538f6593dc5d30d 304402200716c6e4c2749beca5b859ec9f41237d479865d7c43abf5bf46cf8e968b85609022072f4179943e792b7474892d71e3084bd6a866bc796a06cc9c11ffddbe0d1ca1b 304502210081119cdb31ae5980f36b2b8876172abeee7346a1644ea2f9b895376bad513973022031ac156f0be1f79695e1a3e293eeca5fed8dd77a73da8b771638b692ff3ad7d8 3045022100ba8c58ae4646a545a83503b23dbc5394079b8e73afeb152f69867aff821578a502200377d46cea04a70ec50b842f2fa3582fda998cb0b5249d21bdefc84573ef3d00 30440220482f762c266669f0a7b8387a8b01c442cf0a20125c233f6c509fe8041fa8193f022061034c81e2aeb3782df36ecb281c928ea750da1308784060968412dbb6cdbdf2 3045022013c7b297721f85dc28c9af45e94351c22bcfabe7cc2f001595da5f6704ff681102210097e4189e47590057b41744e1b091280d691830b061c8b09c359837e0dde2f3fe 3045022052ab119abe0ae503f118d001fcc69458c3626e75620a2dce206eb97a926885d5022100f521ca70ba7291deaa36e2cb09af69815b543588291acae26508f16d48a4e226 3045022100d23ab21a510bf3eff5b14acaaa0869cb933501ee9623c925682f082452c7dbc902206684f53113845efc5e4152012705854c7361dfcea647eb8fde7ce80bf98d4cdd 3045022071406650881c1a431cf56018b81752e95332492456d6336fe92c63281fc483f5022100cc344cf2ae55a0f319ebcbe64ca1902c15d477fb958ce60c25632b5ef1459519 304502202273a290573da5bd53be32733649641454dcb72cab2c0a46143c96904306903102210095001f3a60011e74ab2e39552cece1ea08883252003ea45adea5851b4cf42f64 304502210099f73b2c5e6608dc4e28baeb27d3d50e70dde93d86b6e405a7c8afd5cdcb85fd02206a4cfd4bc355ab0b289d47d9f7e6cd6f60f3f2b4470b942b577d1c37b7f209d3 3044022070804a0bbb61eb6627b57c991042474ce6470a3ddd62ae11b313ba303e7b1e880220715bcef95d865ff50578d56206fd5bc552a9a66c0a7850fc7b73b4c305018f98 3044022000818875848add5685237f0eac65e7d53e5cfa3a93a173f9dfb9674af51a6273022004420fca8faf84f6c6eeee6b650155e4b579d62b34238f02042bff8b417b5c23 3046022100e27d58054aee06638d4b79aa2d2018ee40084b2d5df956b6f2466a495518e09f022100b46cf9f759c97bc26ca685983f882b92f4a60a7d5db87ccf709a08ad0c7391db 3045022100bb0c558436e54f8ea824470e6101feaf635fd52e0faf8254a4cc8d2e4eaef01b02205c0ff1647449ccf9dd96309dd7cc707ef878c68a5a7ed58087c9404dc7f24dfa 30440220494639f28d9c31a1d25b012aea8b5558c3ffc2cdc97829e861a2ce0a13fb48ec02200b23901fb166a470b2f28950de55f570e1969c32631e4c384b771e4731e56017 3046022100c016e507093500a1012c39ec541b37ad0eb3d0c100e5058a6cc72e98c4a071f8022100af1b2de8731683c475e68a9e32d65bb0ca75c9423de46798bef8abbd0180112a 304602210086307b4b9a89f7ff235ef34d73cec596762531658227fd6226c34deeb01e4691022100a1c7e72d6fec138583ccc9756182f189ac5986bb534d24efc64947c63b82f998 30460221009de5f9db517caecdffe25c9433d4ba5af928d458714bde7b7393643f97aa0481022100c21db63811eca25dc6d0e729ebc2ee74169031fef546e034a188d22391f1f710 3044022072294f76c6139a61b718b31ac68ae7a6e3161d63675e684c8cc076d84dd8dca202203e5502139674c596c605a052b586855f0675f487f22d515b95ddc5b81da55ad0 304502202d9d04c0918bd9e1199496895d4a027c9a461fb5833e6f15690c75ac32cbcebf022100c7818b87c3b67190c374fae21d7d3edf695334a8b3394408bcb470406169bac2 304402201f0df21b20c7b613e6366c1dbca82f7683016895327dba14e3f0375c0b6406880220655d17f56d1ce7aa5dfd321c61a2c026cfc73488d5dd2679518597e77baa1845 3045022100df89ea7770e382b319b84a205580becb906fd6d25a4dd7da3914032a0c511f4f02202ac733ef8c734fecfcc68b1946d12e5aea5375c42453c17590669c6d620c49d8 3045022100f4e0a5d815f4d03790e3e6d20bdde3e84666d326e1cc4f7076afe8cad502f0b802201c4a70905f58cdc1226e907b1bdaa9e8148c1abc7724ab1663743bdd5edfe068 3046022100ed92ae7cfe75978052c7fdd55416deee83fc2acdb64202ff477e6a0575ef0a09022100d8e5e1ff94fb21ef8b4125cdd3697db2c42ddc2bf034f0d9d2c10e644c071e27 30460221008cfc63b51544385d105485217adea4b25d7839ec75feeee4fa12ef969bf39733022100d9e778f63f14874ca6fe68772cbdce4082a15d240ec1903d0b96e452c4e694ba 3046022100c0156879fa90b4f48574aac3a220d9cdd39680f9294245107785eaf19f5a660b022100f80e5a8dcb601b7080c279736a4df575e5c1d01a7338e57dbb04ca464a8c701c 30450220175bc780ee4ad024fea632b4867a9c92cfc65e12ed22461abce71695fe5286ba022100f6fd66977ca006a40c159f869a0c653a3a99e2b185238d1221f6a12da1a53343 3044022041401ec836afd01bc8ad2e785ce3597dea4311ff03853758013700f0ed4f788a02207feb753bdd47676a3477002451cee78f327ed9839cf69b1029942ff007df0335 30440220567cc351c9ecf115aa13c8d086f4139df82e599c385d92564b6976fbd506b64b022041ce7d94f808f43378ee2e6b26e9dd116d3c14d164b51b2c0b1fa6d5be0916e4 304502202e3d87eba8cce1a2b48f37879fe4668796e65fe7cb03d52f0c260ff157d36976022100edc1ebd66a0472a033bbe2af35142885b1d9acf419c3a9f789f34d75ca6af62b 3045022100eec86789e11f6ce7989fa724b2951a45b5bc228218a9fc273f05ac55ae2f211402203fdf134415399b061cf18f8ce54e9d266a93a5cadd70a100862940d45bf7ed7e 30450221009ea1a46331bcd2116ea963f6ac1d247c8e569f4507327c867ef974f7dc24ccde02200baac7ec467182469df9790fd578f53071ebe939992a2723a4abf9fde54224a4 3046022100d3d3cddf711573dc381312a42c5d45d1f77c90c424d265e4ea69bcaa20ed6d4c022100a0d2979314660a0c4b5f823729361925c0a73ccde36a453f1fb521088e5c0153 304402204f2ef97b4167245583fd19a37fa25517c9e4242c21d2de63d0d4db73cfbc1e9102206f6d7fb70076f996892e7e7054059b155c1f994fe74951e1b9aebb10910452be 304502201b7e4f9039f98f61ba3b194d9cd613c15ab053fa0c7dfdcfbefe15ac541aa8ff022100bf2e32e8a416d046606714b350c6707fccb0ae5c594ee25d92771af34c5ff143 304402202f4236743ec6b358e96b4aaa9365bd6a9b442337e6b06f29fdadf41ff1667a4d0220117f6f2d5dd7dc8f3ee63843ec043f15755ee590e34fe5771cd925ad847f691d 304502203082c95488ef7f12098a36c75b2f4bdc7f30c0a1c7d8c8e1e1cb0ab59164ee0202210095d31a29f011c77b3eed1135f835c1cad7c84ee09eac880b2d737a941595cd60 30440220042cb3ebe734197bf5f9442c48ba7e7d5d98efe8360dd4c5d4818a38aa8af98d02202879e13daa6380dbfc67b2ba57c22e3d8b7a96c4a0d9f5e6c36abf080eb20dac 304502206fc4043abcf57ed2f42a4c6597d50e7f63b5e2df90a7857c192e496e98452d6c0221009de09f62861a4cc621a2e8b62a1fdd17db7cce67bb8c7e1a053c94ea4e0e46bd 3046022100dc8e67774dc84bbcb5ad25bea4e9a71e64fe85e0317d5d3f669cb73d5b92c55f022100a8e48bd46c035c2cf07386d29a2c972a1353eba27eeb997f1d81557618c33fbc 30460221009a3b944102f66839a3b4e1546fb85f1568e3bf11b7f6740aaa1e374b485efb3c022100ca0876ffc3ec8bf854987f81c93ac58da3f8665c1f3fd49168fa0d44e21efc84 3046022100e2596ac71a90ad2edff440397b5e76231eaaef2387dbf0a045b28cbfcf871b39022100ed919b441c89b79fafca4f343e315c2df62aef39fd69cf6c5a0f258a8d3bf5e9 3046022100c87f6e9e08b368e5897d83e14e9adb176e6f445aed116a4b826cbec32b08af0d022100e9a6bcdbad360ceefd6dd1e984356f9aab961326f4d4e0afb85f51705485fd70 30460221008cf17face98fded2dbac9b6fb4aa5f1fafaa3d355ade0f05669833f5e7e71b8b022100a6c59aba6a7452c6eb4576b2220f284a6e2f274fb6418492c14e0b7db28fcbc9 3045022067fda1887c6e32a285a902b99f21287b4ea7ca03d04a12808184b4244e5bdf0e022100852235f923bb43ab76984d3eb374910e742c95f59ff8caf9eb6505b28a83c935 3046022100a9928430b802a4c46a0f4df2e8c3e7725609a0997f59d2e353b79bb9a218cb78022100dafbabde88ca440cb46f1c9d923c3394b16c6e34aefac9a7886f4e9e4879935b 3045022100bd0bf7a77e370c8da6eed6d96d241951e39cbb462d6a3a6957e0d9de0cd6c4e70220271c802033758697beefbbfdbb762cd0c70f3d3b9db4f855d987391f8f708ca7 3045022100ddcce20582fba96f00007be5abd80789bcf01651a098028ad48646d62e1800bc022002e79190187762d73e09ffca1ee8f5cf4090ce69b65cc9b48295a088f14471ca 3044022065d4eaa5cf9c9dc554a4dd67bdadb8697cb37b439850936298e8cf7b8ab7513c02204b5dc818601688f4738f803d223f37d7200512a58125fe111351643a2b5b16c6 304502200f1179a7cba55d4caa489cb9cbca60689d7a8130cf7416747e950e5013e30064022100dc99e955661c7ce08dd3588d625c51d321156f1bfb79a9f94e6767e492dc138a 3046022100bc3de42bdfaa547f37ddda0a6a543a7027043f06c64ccbdc0eff92a0c260203502210094ff997d1fb781ae83c9c02462479c552bfc5492e2eb29c407f4185cf7ca9daf 304502207049a988793a884e2db89e1e98148297de52dc411414f1f126ab2a35ff3a8040022100f7a4d0f289c7fbe1214943a097f43b1c2490c25cad974779f86c6ae7c495b75a 304502201c75e1f22d3450fd44a511cd799986db49000048281e32c3d7472e21655f64a8022100e69b7251fc189cf0d5de176ba228800be0865a4635ea443c141f183f362621b1 304502205cb600702454478c399d3d07a4c2156344ad4711dab606068670f3ec6f762fd8022100b09653c29c8561320e000053761272e0dd391a9ba22ece984edaa8a960ed7ca1 3046022100e25524b7986e0fa2a3b50b16109bd8e1c6bb7f2fcd17b01e6e22b08d7550f20502210081e578e68efc7067f32ebd2e36c22a78bac0c84d842dcaa9be51bce788a60a21 304402200a553d63d9c9fefa93474a6a326a5113c3cca32b1c815a7e74736d4567ba5a1a02206223afd7e7ce5ad88bb7a83bf7a12afde169d328f87a9dcc1b74ad9a6fbd27e0 304502200701656a1b6af64ae6e3f11ab6626d057143fed89cc653399c66d1caa8a565c1022100f59b3421cccba7777096723495fd9e82fa36c3df658c286600a2130b0ad20dc7 3045022003305409090c22458d3e09b29dd9ba33c7d27545509d2bc2fef9f491fc43ff4b022100ecf9a9cebad5ea0d9ab75ab23b38dc1e1610afcecd3fdad1e4605ffb5f6b2ff8 3046022100d3752ef70f5928308f06fcf585a7b350142c749f93c9e957875fe5a6290f3bb1022100fd0ebca1f57854b148793610a278c8f17609c1956e801d2c9181e2b10eb4eaa0 30450220384e678513d31a4f2dc4b481f0c9259acbd1b08689518aa4a31bf58cc7be0b2d0221008ff051e853a370e52f917189230b777712c8dc63f534e9944632a6f0663f3583 3045022100b45b27318396377e120e2b90db7722a573aed4628f3a95ee72df13f0982ff5c302202c6c7680bcf994d15baaf6c4160fe43767870323669940dfd0975883c5903c1e 3045022100c40d35219e128003fdde855e9445c1f4c5beba7b4a9f12c3f5557d942086f1360220569b0aa6a5b175b7c5e3c15bc59868017a2f6e471e288327c435457742a31bcb 3045022100d08754d3ff1c88aaf4a3dd8a3e5e09c027e8850f092b5d2952a0da2c2f461e950220754dc36c6c1fce0adb5783c13f20d4d87574f77ace3d6122f6000102787b14d4 3044022055da5c7ee4b8e74f8010c1e488939767033d329eaea3c757d90de4f2b1f947ac02201f1a737eab8a31a7fde8cfbb8e591083401ee6eda49587b6ce7373208c03a3ce 30450220610ac0e8cea8b824337ae7c5f40a51114edb86acbbca93ff22f4fa6977299cc2022100b9a572e8e42d497c0fa4d5707a33d8fedd0c4609982f8f2ea0d97802cbc80ab3 3046022100b54b3d8fde08cb98624d31f6bcff3d06f1351beb49fb568c62d87ee75b9bc03e0221008962980a637e0ceb4dce02a05c2fa55a2bbb8436dd7df3a5bebc232f1a4b2706 3044022053365cc59cc43fd68a1b13bc9ce901fdb4438e3fd2bdec53c48e059b38a63aee022068030f2d16acd026697bdce6061124449fe04c7ee66ed51bea05e1708783779c 3046022100fc393b6573c5b54142e7c26f79128813f74a029600dbf5cd7c23435bba7db07a022100961cce52b1d37010589dc3a302cd255d91f1fa3a5a3ae2b02a3749f5e5212a88 304502200f5dd7e2d3269187f4b72dfca866f3733a4b14b89c06470388e22029cb6fd425022100f9b10f57fb5f5c6c8b9c981ac18ee269f7698ce842dc67ff88d46c596b26fa0e 30450221008ebc79caf725fcfa0ec0e776a35444ee9df348ba03b43fb63612d0bc2c69c0eb02201d19334f64e57043efd425d6ccd72f0fcfb1d52e6cf53e5c85f45636c57a9016 3044022034bb487b6cf8bf4d42f4421c6b0470d1fbac095a5428fbaf313072a557a7f2e202201a2438731dafb474a04347ca5e90957bc4c3cef58b2e2d44a6d2cb1bafcabaeb 3045022100ea1ea9d53cb39487205502f12e12bc1e0c3e821d94d8cf69329046f215a31cd402200901f65eb28b25d97f9f81ec624722abfbc83baafa1951c6ef03e18539a4e7e9 3045022013de335d15c87c4fe85b80f7e5df2391a0ccd4645dab9f2ad0c33f3319fae24e02210097e1ad6b12b8098e9ea21a56dd8cf026941ba8273e55200cb60d846804f18fc9 30450220347f3325bc4743ce4b544336b6496bda036763f408670cf0b2c3398d86c0006302210084c83f387deae0cad51351c7e0ac0c1228d9bb6f3b5a529718756b6cbda88d58 3045022009c9571087a0947d5aaadab986276e5e7dba43f69dcda424eace519a8fd3873d022100ee140d01a9fc57c8a52530d8bc5f128a073284471bdd83ba8bb4cbf54fa61b9f 30440220183fb82cf6c7a319cb1586692a2eab642d905f67ce016311e75c728de3a54e2302201efe3311be9d78a8c7bd85770b6c662a2b1b1939ce6a701f477289c4dbd501da 304502206b36cbba098a5e1f8367fe17844c44269f482872854abe33244eb68eea5057b70221008be392112e8629422deec6e691e6cbd2da23ea63ca599a068214e8f6ce798f12 304402207c6ffa142cb391361212b9538dabf803457e8b37ca0a82b74ac55fdd52195a3d022052095fefbb0c9e4b693d929ea91ac01bd412cb9cd7aaa366bf0bed7aa7567bf4 3046022100a633c29f3fa7fc86317c47f9911064f282ba151ff7ab09ab0bad0b2be9ccf934022100937f37e3a74d8161dbe66a1abc99a6c6d382d0e553678c5ac22a1518203dadae 304402204da2a80f3ee14e8f19f9d5b95ab0abd18e6d63ecfd7acd5c67487a0baaacae6702203ce409778c2b822b00b459a8cd6bc0b2c1d68f6bbd25fc110fba5d75eac0b551 3046022100ab9a04521d35b4f3b05f9156bcedd3d7818532ea3e042aa9d59f777430869550022100e341dfa2a7f92bc835f1bb56c1871ccc30284dcc65a12b60c9937b302ae5e94b 304402207c60aaa30784d95962edf5b8cfc075763bb13ddbdbd510b0a173079ba658714202206c714d4615ded0396539258779a79b67972ce3224a184e538cebdb89aa87ce7e 3044022039ab925ae788e657e81256d02cf255c08ba046cbb30630197f3b7d66ff377e56022031b86c805a194362242a7b2f6841fbce54fedc8aa35b195df50815c18589a6b9 304502203c7ab41319f81c4ab5b1983848284a55bb8e6105ab8d44a94e9ef481e3f3ca62022100e532f6eccd8b2bc19725ce2bedc152c2caa20d8e37d858abe5566d4f50876f44 30450221009d7ff09dde2972362f835eda87a71d88c36c9fcf264e24f7a1112696d8fc855702201c40a32b89ccd2033f93396bf3195851534304755b774b35ae1f1962540c37f4 3046022100e1970e3f28c27e907bfa1544e502265e3ca28e521d983f693a14eb0ca894e87d0221009441cee9a7ee5589cb93ccfb757d835c7599214ca36534c02485a37299b140a4 3044022005556275613778f4dc58dcf12c3471e28a60fb15dba0081868b758ec93f7eebe02204bb3740ce23202cf756e81b83293fd91f96a721729b19bc5c7ab71f5a20a114c 3045022100af8ea04d71dffed0501142f0af2c25f8e97f403f60ca6ca0996e0e6d6d45768a022068cba05effdd329666404da58c945195a7ce78fff15826e89c35d3ff307914e6 304402202d9f53c0a9e634f512a52e8ada1b0bcf94b8219307b8ce9ae7deaa6db770c9f5022060725d1d13ed33fe39dd642c1e6ceb0de05c324fd615c14e1cc3bbcc5ebbc015 304502204071f64e2e5c3b131bd451e2780d043104ea5a95228ba918e6a4a248c7f93479022100951b5ae2916568e1dba54686a978fcb462a924cc18fa73df47a74be2ef6c42d4 3045022100a8ec43e76153a52074183d13d79743e9cb9eb1fe6a57e0d286bd6541a494b3cc02204ddd55693e7cac6510357fe77f987fc567b88d5a3e4753217dbcc90a25dc54fc 3046022100dbe7ef5a436d6f80735b6b6754dc18f3dd1a59398a4d3634a44c5acdd97421fc02210085cb7e8b8331b2230bb3dbcbac294dcc987ed6ce18da2db23644c76733464e9c 3045022062e96d514228d62969e5118b471696a0bcacd415ccdede48bca91c3fcd0b9a06022100db4b69b50da4d9fd85512a84ecfc45115258141bdb6daa8129afe8f956898c4b 304402206e547bd999b5bdedd56cd09cfc849bd54449bd31012c3b2cd54558b68b0941a50220584475e155c9f7dc4a18d1d08acc21c74fa104ad66912e944dbab164681e5aaa 304502203acf54a70d9b433c587f1ef08549a68b4d4ada0b487e210326bd7b4df7a5decf022100bd43c3ec5d220e4ee74ae914bc2e5a708e5de3ff3a6b872cb5ac82f4e23e948c 3046022100f1c0a33ce759ff2a117663111637defa2ffe69bd7ed8c5e5e3bfa09f2214848f022100ba993acce884e9c994259cc4245be22ab9351bb4c0043160e2450e0e83feff76 30460221009e1e6ee3bb5dbd5079e897ff2db4bd90d66bcba3f8288b071373d1c8a27a213302210088d7a632311784b45dd257440b9d30c41ca6eb5b96ce7dea64b529c8af2aaba3 3045022100ccebd072d5984fa913953f0255536cf2ee759e6975e57905d621b88170fc018702203022b8043b746d330049b4cdc3c3a4f93573c434633674e16fd97c5c98745d16 304502207875245c3d277d2dfb085debf7843af165fd3c7798003b029d930ec9304df7fa022100849872aa53a42b0e0ae6ac9ddfa1ab3cc7f64e58379b28b0cc9151b70c1880ed 3046022100ce04e3596f872527421b3e38a4314d3f7f3714b707f5891cac04cf29a4b4abef0221008e55730feac1c81f32e98c3122ea8bb8ec2298670658e747e24a609512409016 304502200a6cf21792973c30b445aa112688d4233ffed0c8d5722eb8673b448541cced16022100f42bc243ef5aa95256c7754827fda2a24b0037665f187a6a98e989f3cec73280 3046022100cc60ab3ad8c7d13806a1e4b4f8eb79881d3fa08bee0ad70e5770b7d0398ceddc022100e3fce2795ffd34e02911b584021cbbddb8745b9c380705bfbef1490e39041165 3045022100deb79e40c9d4040972ff9d345139cd5af82c8a5829e1dcf10f71218e55b3162102201e2d4a3481326b74b58cbbf4acced283f9871a5d9dfe2639d19b261ac319d419 3044022076b3d837c519ca1a69ab10c00aa909f8c1ae03e4462e0ca586e42f74f5cf33c90220513b5a6cbb89184742fd0a4161e30cc7de2916e7549fa0e6cf5a52dbb418bf4c 304502210089bf45542db6ec1d9a7fb66c873bb1550dc5ee0d4148087b0e519b0f6b72de1902205484e2686c5fdc061cc3763b55c72eeacb20f4197627bdcd811cd489531a1110 3045022006ee921e29dd420af88956ef2d6960fbf8039acef87d4fc855a2500c5e28c63f022100f6c382b018cf8940a662e118919bc93fea6c85cfcb72546f3ab1db5d39a0890f 3045022100f490e18e0d63810ba1ba3965d4b5be64526b22e9c23aeaaafe8ab8cbe03b9a6e0220789504850c500cebdfebbf1af69a4b897dbd21aadd502e036efa9057b7e29832 3046022100c3f686093566a8d1617cea6275ac02a87c3c6a02b5641be250aeee7f049a6e93022100ae7b4b8b4cb41a1ca48d8c0907630fb33e227c797aa948261d4290c7a7006756 304502204c4e446b764428b6f931c6d96dd66b2a74a964a418bd7d4c8833610751273d6e022100844e914be8c4d3adf483254764daec1469e9b394fd18ffa941722863c5f8de63 3045022100f0abae6c52e6c9e74d50fee5e7bcc2ff307d901d57618a1ed39b7fc0a11bd61b02207d482dd112f943119b6e2d1e4c072f87ebba96928cb3fcf444acc767db35d906 3045022100ffe6bedf75e8a5933537076f2eba187adf526ecb2f1345e33608f80f4c430236022051fe73941cef18a94d099375965227507f6e59a6d157ac26370359a215981f9c 3046022100b93d7b9fd147382c6602918c72c5403a01627ebd155b957b745823a3e888ccf0022100adb13ccc9fd0d08bbefb772c3615a3a7232dbb1dd7198d43d3c961151f27b7f9 304502204968b914efcb4c1ee5c01da624112c64af1c5368f434a7f3e0d8ab569603e5a8022100e0ae71f10351057cb40cb46087045433f03b4ba1db08586c8fc46cf5098ddcbf 3046022100d428bd01bfc03b582fb2f34b6daea2c8acda08b9feac7ba4562e3fe446eaba5b022100f768550223e9d52d86b7d79624325c12ca5f1431778214bad0c8029861c992be 304402203a5aefa1e05ecc28b3419591a40fd67603f263bc59e09243ddcb5bcacb7dc7e40220091dfa9ce98f4fad0e0f916ed70ccb7e6c9265dc9a4b77701dcc4982511762a0 3044022018fe7c6ecbda6d84159c52a115b0367dc5a85cc8da1af03434276ffd5791885c02202b30ab1e977f8de6501d5c6f9433c71c9f54b218060131a39a84390d060236c4 30440220517167706fdb001e7e33395ee6ae56d6f597fc1bc719b1d8351bc0c54fd9942302201ea630e7c1dc85e2960df0d33239768c7164f07dd33eb62bcf2d32481e9e8f4d 30450220687f7d417ceb4f6a38d757db4b9786acfd75c0f938ad2e73c473eafb20844a00022100f609abed1ce251558342d90ab2d3c06d4d454574ab39d5355b3250f03996e4bb 3045022028a2fa2cbc6ee1a1f66bbb3e203ed680512eeedd735b7337c440177ac3743812022100f16db130f997e43c1967c151a9a30d9507f65118040b1d4e12b4e7711f76b677 304502200f39f24d27ef9dffcb3c2ed03ec197c53cb3340ee2f6302f9043e63e0d5524b0022100fbd758d0990027c86c87e02de2609f9d7371192c1340d24a1e8051d709de6016 304402203a70262835c23a42e769740925eb4eb02f7164747fd1c6672d08ec16dc48d1fb02202e73619fcdf78ddbbe3c413ec6f1b7701eb2ec6cbfba747cbad312c26e0abee6 304602210095b5994c16f7c0a23f6b5dd303876a5d98501f2c0501bf04471de5e1ecf11055022100d852e4e81a182e2a6508d9e17022c29d213a54fa26b14ba3648961c6359f488a 3046022100c0900e40b584e982fc26d2e74ca867acf6dd7ea651b3d4db4504dc45e2b1894f022100cf273a44c0d057f58a274329fce1410c4093a46cc8667c6afcb8a15d65917803 304402200fce019978dd770f08cd0e662dc59788e284f7b763f8ad30fc6f9eaee1dec7280220624f874eef13c48f22c86f939c247df9df5cc30d79e34ef030e17144c67d9958 304502206da97c37cb0033e2c89ab00bdfa0717fc8e71ca11dd8468edfe483e99a78e75e0221009ddc486dbfb0a1a44b9e07759a5c435614021db6042d37341135b48396c87961 304502200f10a8d49d19a71daa26173ff32c5b8c12411ceec7576198053b5826d27a0ae5022100ced7d45586698c6f23d1c8537110ec6455351b2f366ccaf90fa6c52d59ae4cfb 30450220639300435cd23f23ed9c2a8fe057b087c80406434592d6d2b116d14b701cc440022100eec4287a150544b093a14f5cd8765926d4344e731ba5f1dbbf6a5eabb4b2b59e 304402207bb10bb95fea4548798c674d44a4831153ce4ce5b8ebbcf7e3018a52950aee76022015b86d87a49f3b0697ff63299a6d582c5c27550b33c9ef80a944052b59ebeb8f 304402201634f22c5f5f3cccd66f98114e393d0d492213f498702bae139dc590af37e76d02204fa95dd00ff215e80df005de58c99ff0b52698bba8a403e285c494e497804fcd 3046022100f13c7cd0ba04f9ab64500c1b3fbe7df821fba2ad2e8dab663b472d1aeada46c80221008b29487b18af418f3afd7e43c604609f411d1ea4b4c42dea3d85008821f0737d 304502200671ed57a35be49a8a19e2593fef46f0e6ab3d77e70b57626d88d3bce550a31e022100ac25f6e5c5a653a29b5a1cef25229fc29bd73ec6b7e5321dc04315ca726e35b2 3046022100892baf7a892765a15e709aa9c0a0e67ff98f392abd2cdc33105bf04ddb7f4803022100e0e2a66c3f8f56bdad2d49a964d01aa94046a8943f483cda334a627cf0433287 3046022100ed9f07c051ffaf5b9840f855c31500e72764ed328a8c3191961690c14bd73d9f022100a0860f81ababa95b857bdddff60b1967eee0b791a5ce01a02f4038a4e12a4895 3046022100e08a9c6dafc4bf350a6891248ea82c75f0afcf85c6aa72a4c4fdab69254ecf67022100cfcbc39058220d3e3693739ce78f30672f813d3323e427dfcd00ab8f52c9d74e 304402202577c0334f425773b3b1d2f43686561152dcf8e138a53372a99d49451d4b43050220629c0a01f2c94dfd7b7a899b1118d6e47e110c1739ad3dd907fdc326f830fc31 3045022079c7c9daec227353fa2cdce92b5ea1a599e9c2ffdd0a52d921f2388dc94619db022100b9169f771acc8274d796fe04a8f8a338c43486364c98856efb6058ad02e71ecc 3045022100f84411bb2b5ec9af7cbefa1a2e2e0d5fe607d38d20765dbf95dbd90aae33db58022031b8853ba6000042dc02d83beff08d06e4be934a6fc86f7ddad3bc458d5c6dba 3045022100eac75ebcc69a7f6858e801a6514791e1668925ae8b538ac90cb7e3e393b3963602206b38ed1da37aed16118b06d5048c11f49be9279698a261d91d5ff2f96235e0d1 3045022100e238b091963bf6f718884c66fc726319037a7b160c9a7f24f7ce03eead15f84b022054e665ea1bbfddfa6cb064d8d9e453eef9c7910e68af23d187f1f3569f7704ab 3044022042f57fa1aaa94f4f90f738ec38a63f4829e2036068ba365087a4527ddeddb8540220652ef09df0e7c56258922a68cec0ba65d030d306e403fb1ef24988a53fb407e3 30450221008797e1319dd669b3fbc44e6cd3688fd2cad846bc84b744805422eaceba411377022072130c1fa7b4169fd564fc4da28c7bf6dfd2a8b95bf59ed4702789f66c225eb1 30450221008b26a00877378fec6f2968f7d3e2be9f4a13b98e8bab8ce2802a613fb058bab102202b7e27e7e60f0c3d99563089c57bd98e3af76eacb096c239158701bc4c5fb2f3 304402200518f907dacb4f9f2a705148e0943ccc0f48926760fe90692bad954b935d4dfd022017a6c2bf8046b6e07650d0c012ec264cf8d1f072c1f0f82c144f3457a0574a0b 304402205a5af0a057e54760a9fcd9f0edc676720b35f4ac1570dcd24aa7c9e2244d408e02200dcf7afca77a9e2dd1fcfefa45fbb489e8c77d701527ea29e8a7ea1736bb7eb3 3045022025e0bf0ff422661d2839cdab3b031e8b993456d4632a940c59075597cbbcb2450221009f04b91d0b27dbf234617b1928694894b0ed06d95cbfdeb1bc8b7b7f3411f197 3046022100999c2df40183265b75ce20c5fcd39c00a902f0633be00dfe0bd355c1027265fe022100b55e90fc3af8419c35f3997293a18577ee0653a0bd367e76d37de7bc7f85da7c 3046022100e406ec735f6832b301e05bbc23fb85dd761d7fc5e7762f2dedace0093b43ecb6022100c31c2f8135870c197ed5a3f99c5dfcdfd70588b4386a3b021dfa57f0ca698972 304402201310aef0f40eeb8d2d4412f77997a3b8676146b786451156a6860a30ed597ca20220458b9d7d241ac74dca0fc1afeecd2d7ced06bc7f090672f4514c4c043150a8be 304402207e91f7e088676412ca44e1d96640da6aa3acb9dfdd6d8d21f091ddfef9c3a536022015caf05f29adb5e36628e09fdb8c8a5012592c8b30e26742141b0a4262a499e7 304502203962e624db7bd2d0861d21ff678401e821f52c5744dbc60c7c1fa751fbcb9160022100f5660297b64aa5da30a7a96e6dafea0fc7fc55048bf1b9a483990584e898a913 304402206f799d72e4ab43fe3728e8771531bfb6b776515b6a1ac88d75667dde6e39fc6d02204bfc192b11c7d92fd11c6796e5a8174b5fd133f80a808cfd5d9d5dadfd9957f8 30440220743afe3f0cbad4d731a65d91346ea1145a930f3cff16e10180167546cc57fa450220409a38485eb823aa7a0ccc9403abd177986d82c6dcdb12f154811aa395bb0250 3044022052085fd0bd7f6a34329c7ba784e7bb12cfe7f838740da249c2cbf6d919cbdac002207895859adbce12cbaebff560e2d74ea01e953fdf54adcea222591bab41af6dd5 3045022073c24944c151043dd1cf63a0457288defdd1f493feb986288a056c994b7da23e022100c77cb3efc53d94e7b1c91e8dd4c94065b1b284b027a1ce344353ec4e489eb745 3045022100cb3f51f50fbda3aee62239c46037385ad7e4fa2e876a82a9f142549c146e3ba102201154bfc2dc53a6a3b089b5e7fb27f2c59a8fcb44c8c78591c2ff47d19791e460 3045022100972d4435d1c40688b89ae81837beed78d658fae8643ecf173c83f0532ecc6a49022073c061676785281d9de5bc540c68b893b435828c78b6624a38813a39c686b6f7 3044022046ce01cc4e5d7cb4bc67b7da110ae6804af5726647b0378e47ca57cf9b5caa73022067d47128060ef85349286deb6d8f789b3d1853ca5be738674df4ed84cc7ac130 304402202417011f0d8cefad38594baa858f410efbff54a31ab0cd4860d8e369c5c7214202200f193c5f547070723d2b0fcb2fe157fe72d4b93960e59240771726f18125689e 304402202d360eab5c23efa6090815cdbc903285f4fe7dd93d6067a6a9d8cc921ca17db6022023e4355ae57741cca45eb316015538ce3cd0b281a72f947b97772ef8e9bf19aa 3044022060198010aec5f94c2ca38e88d12ec8c42b0d35a2d5e56abe6e9b69734aea33b402206b564a60c871e2b59e37e1c880f45ba8ef69d7e07d183ab99807a52b15e29d6d 304402207d2855ee2193ba445b4603bc3ee495d56265dfe467fbc325dc262d9e2df7a0bf02203c903622121d8e13b93e8cda979c8215394a34b889c4ef6ffe9946cbc78a25d1 304402206f359744f8954e9b72a2367b4f35d7c607180766c99050ee913602b91ad86e7202207e61c82141fcaa99a7b8ad5de468ceb57687042e6badbaede5ef49d33d5e0577 3045022100d3ce4242b1eea21b3b609e4fe04e278a84722cd24456d701d17db6bdfb54b2c302202a3787e147dc3305678b48d955ee72fa76333b3c3735f31919c3d0b5afbc3d2b 304402202527a870f206dacad4f40793c236c5f692fb9de5ebe8bc6b3af4772635f56ec10220382169771dd53169c6eb7fd5200829443cf88f20d4ec70ceb61167c6759b4754 3045022100f914e4e9c6cf363fdb95dd763a924e4e56552a01f10112604eb380437e9678fc0220124e15c42620e1112597a33676e5128abbd74dfbcbad6651e6e1f6a888e325b3 3046022100cb37a2106291e11bd7c3571a1479d566201eb32f5eb4e3e510fb1c58e8c39dd1022100f24a39fd2eb621da2b48dd1401ad9a3bb8cf4194e354a76fe792d83405da6a89 3044022028abea1526d48aff336fd91d7f93317131a4bba13a4f34e316302537e39251990220672dfe48fb48efea5d1dea336ea4f22b89cf3e55b42763a657fbec8d2c099111 304402205749077f176301d488742fe2b938f97b1ea03233b9d384087a9404752719da4302201af31abd612b1665de14ab1ba2231b60f1bbe6ef2b6f3041ffd7a8edb88f746c 304402200bce5460ebd02f2102529548aee777a13d4de6589275ff65fe84ded77763db41022061e0107e8407626f5b8c593a6b6dddee3e3a5a41dc5daebd4ef9ce0a1b30d3cb 3045022100904a151724aac615cd3edaf6789a881798e4930d30c5ed87775f18fcc8fe011202201fcb952618148d640cf0222fbe28153875ea5bf9d60ffec6e82a4298281d7cd0 3046022100b5616475e0c8aaf4041fe322fda0640babe65ea6e07e004c3d7a8a4b66b3d34c022100edb2d50535357483b48a1051b91dee6b4ad8426600356fa17d803155ee7ea6d5 304602210084493ca8d735d4701869b56d2055dd2c32f629606a8db2cd75cb655f55546caa02210098d2bf4a42fac62de84a59d17748633cd7c4343a7ecb7d475d39e5861f9176f5 3045022011d1fc0028f4d950994578e39d0c78dfe5fe785e2046b60dffae606a67c9d99c02210090ebf2778a9b834cdc875b6ead1b9e97a6be710db6c97529830754d0dd6d29e9 3045022100f19117f4e1960d52ebf5b6753b84e7d9d90d18163faaf5caca798eb7d23c153302207f61b8e39d376726cd55586166547ee301088b572082bcc3c51170bb3d4348a1 30450220669836ffd326718086184618408e1d9a8fa4abb8288d9529aa347a37656aeba9022100e6c1df11f298a0c743ab87783a62df0f213432c0095067ca84a86a705f79a54b 304502202ef7c1523fa9791392828f16ce3e4f976587ad210f23f859f9e68657cd422c4f022100ff59f9fb3ce6e701c6c9d73c10f9a322ecef3e7d29bff096d305f339d656911b 3045022100d87694282ccf619eb1fb731ca5c8e69e6c64fae3842a30c70cfb8f00acd05bf1022009e18aec58cb6fa2fc985a87cfd8ad1ebcbd2a072acfadf8fea1b59f05ba13ba 30460221008b72c04d5b80b22d9317ea97483bb1fcd0c6810e7b4b996a59b4b4bd963dd46b022100bb384cad1ff2ecd5759fff2c5c1559fc5d7dd611ecbc2dfc0f5a23382be1c32f 30440220605b425b1d8cc0fdbf9dc83c4efcaa67ec3be0491479b50cb6128f568b9bf48002204d73a9f3c914391757fa283a920496416649da7406250a5dddf36695458de49b 3046022100a693f6526be035ebebeee730cf02b83d62f218a93a0ea755c79a8fc9a217e29902210080500c1a1c51de99c2c452cd58d48864ceea69dd668eb730dab44712ed0a7da1 3044022046309789fa4b6b0d4142a28ab7b63f9fb5c535e1bf5e0b68e9df7cb224043d3e022057c61a81c6cd0e2a32367b3820fd81415cb558bd2947a6111065ed406fab0d15 30450221008cfcfaa76023c640be236af123037d0fc4349356e595739a90b1784c5f3d44e202201a5a6c99802403a405b9709cc4b6236f532a8025772275479061b131f8afc85e 3045022100c3a361ff9855b73835959f841dbb4b75412b96c2f86a7233414913bd6efc28c702207676fa196acb02297dc1605427bfc4876fde085f639a9e5761addfa8979e52aa 304502201b4cec19a99233e488475b92a3d16bdbd6603850abe9c79a1564f0ec0e803623022100f906131deee6374054686b9ae9a46099a9602d92f5ad364a144d731819dafd2d 30450220350c0458cf55a388a07809555345b1907a4a2a1c1c35d8d5fe621c8e9be66c1b022100883fb05f42d01ec6fc267cd3a68c19647687092f8dfa39b64d6fe882879b9326 3044022077ee87b89fe98573c5195e321e2bd7a4de8f2e4811167fcc26022367ec465406022009ef7036a6028189196fa63972417c337919efe99d6796d776be65afffca5e15 304402203b58fa732a4986ffb1eab8c2dbbdd3c8ee029ecda2713d4ae9705956e80cb6d502205c4c443699a405642fd362e2421e2403f3771e21e2b51075b98b93141a0a85cb 30460221008df6f86158b947a593e72036f4ab21f77b50f1ae282843c83a860e26a9b8a9c0022100ce3f6cf8bc434b82b3f0a62b954775b0e0667e3f621b9c44f3e6afc7ce646c34 30440220797487a96de0b2bdfe505637372aed7e8f2ea3c7e9514a038142f9e9a6db3a4e0220464c2a62bca113a12be7187cdfdccc3014f5aef6ddea0f6c1e3f752cec18c4c1 3045022100a4acdaaa8433662f8c2f901f5a48fc0556905e3d5383fce417c0d17ee50aba1b0220237adabe083901d67c72d31b19185ce6d76c25274a11fd2a12efa26d2ec759f9 304402206f15c52a3058b38784eb7972cda6a96e07674d8a585ff2c6eed599465cc19d09022031b3ca501ddecd447e711bb444883a490a45bf3a3805d00cac59f63a87103f8e 3045022044fc009eb0b92d3a453876c89867bce272b80a1ef709b34a8683acaae6b6369e0221008a10aa4d1d3a36a4988ea75e544800c4396f8e275fa62ec8390a06b6cdde3235 30440220168aa2bb551ef24c3d0699630cef66fc455f749eaf12fb5b0ae261e3cc769209022012d2d3f9eba7c667776c28d9bcef5f490c8ee639ad9f6a387258395a9bbbc18e 304402203fcc3b72fc4c7b71b05576f17667360a229233dde0c2b677c3e3b9dc811eadb30220255fee90159f0983212c5d30f05fedb1594a3e7bdf39ec869b0ad98052f928a2 30440220778b04b1368e01700814c46fd11d0e800e584c1c1e228a3c5cecf6b144bd86d902201747c1ecae5f7aab326f1510ed18e61d6fb88fa67879897cbaef518d0675cc39 3045022100c018626f2de1c1db03a75e92dc6ad69f3b581061f75348b5ce2e52e6ea95815a02202d9ead3c1698ad49d87b834592fb5b78fd3d105ae7b4cb63f2490f4ea42a2eda 3044022042c623d6676b3eccf1908248146809244f05040fcfeac2bbc9a9402f6669e47802204baa0dd3b8629157d3877091a9a16b3510d5513e49b45d254e0cc203f8a7ed46 3046022100ee3e1d61a0b06f312306ed0f4bd4ae43e0d0e7fe5ec72e1a0ab74697a2129c36022100f912ff97568e033b90cc9ce91de960156518b41fd3dd33e73a0661fdd9c155f4 3044022078ad3cc6b3ded114d788edf36233e4decefb0b35c95e315a53b549d27415c9e102203ced4ff9a1c77fb191070ce0086663effd0435628c547a01f85b8706b5dc4927 304502210097cddb5e9f1b4fbf603797bc7624456f858c0893bf3de8db1d1caa339402a46202207857fb4fcc5de2ebb2f570745b95a2a3e3de14b0f20cfd407e5395eeff6bcab0 3046022100a99d6ca235f524f8789eb2f591c8b49e20240465230d85ad5f5d558c3ff94378022100a77edeebf6d29b5e4bc3cbd0f57d5ca951835d4d58f775589126a8e4ff3d7cd6 3045022100d322c5410e104dd3336d677201471ff8e9c4e760c33202d566064a691488a7c2022014209864b2e20b26927ff8389afd49b6c70c4aa1121be2fa64aabc5f63116e58 30440220540c8143ed9768a999645a093a18ed5796e627883a754c3d4a3326b8699e65a602205726ddd48cde60f001f3fe8924bbb50a13721b0e858f4cdf030b7ba09aa4faf4 304402206df9a238ec9831918b37133b695953180c60641d7996a739aa19b0da3ff0761602200e08716997e008aa7cb11bb19076c6ae9aa111e81e8397af9e65af3cc718696a 3044022014d25e1b005214f5fdf05fd6baaa278222a20f5dd560e9cfa55681a4f9cc4b5e0220246e09c5ee010b2069bf794bf032173b10408b919edf96c1a14a6cf1cf156c7f 304602210086a3bf47d9c34f7f88fd49b2c245232460475a3f11889a10c5d3d85b3ff724e4022100bbe2529c5357865d4b3525671ee410d91f79dbc98b6ab992ed968df6c3e9fe30 3044022075385eae4e9772eaa9b2754dc910d1d37a944127ddfbbfc9b50b575a2c016602022009aed4a86f737e3d150a6a4e7025c2f5b25760a1effa84319c0f8369d217cb43 3045022007082607a8928ae796242b635dc6af9e95618a91e95187dafe7ece302ca829c6022100bd1ab4da212912ed8dfac74fe264708a421aa0e05bf0295089ae4bafbe293652 304502203451ed509ad4cdbcfed8e437d092c9363f9395b3dcf38a56043f001e2062a425022100866dbe9f33510946ea5e2c31e30557670b41c01c29781e2b75ad308f42882daf 30460221008a8f235744908dbbd7adf0f0cab1491d99150bd10b5d982601f55c4f0c0555db0221008e1f0a7a759a4dbe534fa34b3fcb6da386db4e6bd91e5234e5a88d282c7451aa 3045022100fa407687b4033ebc68cbc535a1954345aa4b9d31a2e7e8caba60ddbeb3a469af022064f663cf1307082d257781b20c07482c29cfc822b50aaf55dc1c1353fc2b6c83 304502203add42cae55666775322487576d696ee05554886f3d52f6796646dbfd4f825f9022100807ed0adf83f5efed8843777c1c2a1e7e5ac6d3a68ee4e5aedf359f87d4ec3b9 304502203915359ff6acce9b217b4754c4d2e606ca7b6b2d2a264f46ca9c22ee78521fb7022100887db243248a6858c404fc426ec83ae8610c8bef567aca0dd48d9e085c2fe6bf 304502202e3fb1c46811c6686f138c4d4c5b72fea308008a63d5b6db9456db4d01c0b3c0022100bd252a715662ecaf328b2e572a22cd4b2d79d4234540b937ad648763e14665b3 3045022100bd811c19ff9a30dbd3221abb78bdbeb8ca68020fa4dabbe214315cca4920e02102206dec898777258275378315defd28ea6b65f6a7c4773ec9e33d4b556706272f67 304502200d94b398e19732dadee309a474ae5b46c10e73bd273729919279f3fe67d61ec8022100d4f343315d30f07a60fe9f97d017e2b633bfc0373d5bdb5e666d8087775d5cfa 3045022100aca4440060b494fd3eb1684288e44f91a4ac73db63d5f6c336c5fbf976edce020220225c1bbff61dbb2d64d35752c0261e3658c04322415e4b5ca54d56c52efeb027 304402201907d7b40db4b02b3f3de4c1faeabb0eff1ac0bdc0cdd31da059692302a0fde802200307433b2d6bdfa1d0e62fcb73a77a32936f42c6ef83f167849ead99d11a39bc 3046022100db711022ed0feea5cb31ced1c187ff11c2fb806c86d04147fa1495b4d2cf17b7022100a7f3dbb9334e4767529a6af2509ddcd06bb77ea8b61935fc2fca686faac18819 3046022100f9b7eae1d4e2307d39357c82e446faa08e6f700931036df575afcf8a34dcf051022100997e605b83b1b3e55d4d5bff533074ab8fcde0307998001846adf24860a66546 3045022100c68a57e6eb68877eac1d0a016aed49bcad11d6cb2a70b48c608d45e816304069022063d462870e27c8492e2f651491ef51833df8ac7cd719c1a55e345e8f00713c3b 3045022100f396fc8775159e36dd9eecf24b3c2f9d9de50ec175baa31a098f07f0942fa4d50220639687fd6f0ca371ce3ea6c632d8c03d00943ef8051afbf0f658468f37d144d3 3045022100cb9e9190532e1307dbeee4fb28b2148ad4a33d2e8dd9c14eeddea4ac1f0aa669022071c86704f48fa934a086d7b801d9fde6cf5d0a184c150be7b601ed6b0455ebee 30440220138af7903a8008d7ca261a6aa9bd6e80d59a4ffe8fd753b856c6b81b750c69cb0220264cd2d1e29aea3c03964429a10c0d4d212342c0dbcb14bb00258c8adfa88427 3046022100bf3335fe0a6db38bd14a5fca9fb2fd608f982b02d2aab8b459bbe3f7f1b7d031022100922f86d8da2fe0c85e23d6ed36d3471106a1e709755a20b791497377eae7f071 3045022100f58207760b99c41ae3d1bbb972bc547068a498554b74a392e26f5c3b0a7d70eb022063e29a7fdd52a0cff544fbee77b9890dc718023f0e1ca9b230b6193fec1e8435 3045022100a0310335a6dbd4698688308331d09d3207dfc00491edb749929b9b06ca15afd502201e2639ce61861c7a4a39f302b0da6e9a2910283140ed020c585d9a5eeebf4af8 3046022100a049369627291e98da700bda04e247b64edbc82560d03e5d3e2aad9aaa821e8c022100afcb9d4994ac477e5100b991f8ac9651f5361706ec5a6c50b3bb251849fddda1 304402201bd70820b54adb18742d01e311f7aa620af8374ed761008314ddb4c7218ac5f2022028f1bb4780b3348b2f3bcc081799daacfecc111c65537fd03534b95fe35547c4 3046022100ed8f085ef0e3f840f8ec1e29ffad1025a533804fe5ad186b116c28e6b3cc10bb022100cdd2a93155e9c37fedd3cbde70165cd461d496b2594339ebfb5348eedf14c6a5 3045022100aa5ed092d0f17daddecd597021b92d4fb8f6c12fd1ac1ef0438ed51ee4881405022032983f33986e62aa83332b1c79d35a5bf7c562ffee13eb9319724b1e52228be4 304402201efd3abe3cd8b2775f65eb14251be6c8e35b1b96cd4acb089f187dfe36af9a6902203d898eaaed93e79bb95190f54e9d218972aebb08ee5ace71b1206e59f8549d7e 304502202054e0165901ad9bf818460c41cb09bfe4ea5bbe0b5a86d3a201fc3915cb3607022100fe62c4ef399743386b6b6f688710fe54595e4dc1dec825e7c7f95ed87891c871 30450220607cbfcaa2e6e1e1d8451006db858e98ad0b33f7d0a9d178ea54411471625719022100e673b9da4666630972ff61f869e978f43928038ef90544ab2a9cab1c90a01b1a 3045022043946101c94a5b54c5b2c5f8c56d4e4a786ab26de25a3e85d30bf2da09509ba2022100a7bc02d1d4ef294e0bcbfeb21debab61cb119864d54fe87533c84654e1dd1fc7 3045022100a424b6dae4bb8802b45806f5020c1bee287f42d1009d1db8cb17b3fe603efb2302207f482baf92c7212d051e03f29c66a12a6b463ac02853c9ae756d38b53819a383 304502205079843d80c96778ebd0c2d24655675beb3600e06b9a88e694457cf347061bbd022100d54acb77fbe6e16564a221437b585bc2b3d3db4e513b7da1f58e147f62fc4283 304402206a7f13b1028ce461cd423a5d1dbe45f27a4eec1d5560758ed7a97c4dd8a913ec022069d9b77dcb6f40ae7e42fc774c0da42d06faf8d1d191e3fcf5884e3bd291faaf 304402201bbc145009c74d0c2243a82ce1bcce149b34b8af60b8323de000ad117ac4fcad0220611e56ddc75bff5ab8ee02bbd727dddc398e5411e0bc0068fa1bfc2d822e528f 3046022100f83ae7e0c9fde2f56e80662deb7fc9d166dbbbcb848e6c09369837b941e3c1d90221008f8956d20d5139e3ad4d541bbe96f832e3ddc9eb6ed4fff1cd6abfac6207b20d 3046022100943d4388ed6924b5787de5fe33f8981f76653a41d4d258851413bfb0b27ac72e022100e2e4460fdbf1075fa4565021cfb253fcda33f61d0479617c2ada3c6c622fa9b7 3045022100a37f5ffd42774b10230d176e416330f04878a41e7babf84445519f909dc224c502204e54a8963ecc6bbc4315141f20baf7ce575286ade708b6305d8888c190fd4ab4 3044022059707591a5440abbbd58117aa8bb8080b9568fb0919342c3982719eadff02847022040033da23a29ebcb0aa049ab299e067f4783bbf78b2bf40925964b8207faf08b 30440220258837d1eb16e4c1350349cce99be653f04d1922ad4c0b769b7243e1d8d8a6b502203b5e3e1d65b484244a2487ee9f9b61cdb5d69fa23a11344e16b22007d2830a14 3045022076ef46d6dc7502f93c6dd1a1137cd45d0d848505aa598325915a52ba8e262aaf022100fc870dd5ddd654397a6ff53f7984030972fc0c8ba383116af1bc41325753c286 3046022100ab117f90c6da6416df6922b63b9aaf2254a2b2895de8d988384228915e7790e3022100c39840f3e2726a8cb40179ef475cc7e96fcc18a573a7515ce509b8a52097f8c2 3046022100816e7a1eb77e227cdb43c9e90dc4807ff0131c7bd3b6bad066d7041ceeb86145022100ec10e9296f38782e1212fb146c7c045574f265ec8278e37651e50785e4bf3de8 3045022100a59051a8be14b64d07b5aeeea134d7d2f894c1c1da5f8b2aa3833f1a27e9609f0220209373d9b0221e0dce1df3c5674e206f78fc8fdbc6dab8beb4638ce8c941cf8c 30460221009c55900b8366dc2e0d773ed74ae8bf3b9e3f4c48e5d2e310da4315f837e991f1022100ec3d3dadfd97428531564309450df5ab2b278ecca1da82676025ee2dff763984 3046022100a5d7d53f885bc49788463c737529503f72680aab6da9de361f1cbfc04260d64702210087d3c38a5e9e7de617f580f1d8c214ee23136d129bea62ac4137bbcc0f5dd17e 30450220441c476489de661da5ba3398240726e4c40be07830ffb38f31ee32a3900e9a47022100cbb2dd79a148b8224cb37aae998b003189d70477e580f40bdec6f93629747f91 3045022100af4849c2267628512710543a22eaf065fd58e82f3ec49f8dfe599d5ff3e4c46d02206434026d93f41d72aa8d2cd1cd5b547c2356c971ac4c374c42f7e7e98a32ccbb 304502202d85866e5051cd7094a3d4ea2923a065a3f7c5ba79ac37d4081cffe998d3890d022100f1561aee018eed25bf163727b06e0a55c3341d39e702e4b02fb32a6545e7ac7a 3046022100ac1bf74923356437d665c93e177de16d21b4ef4ca704325a7405ab0633230b050221009e0f8541c76e576ca2d76f1824cd2c26a2ed9f0c3e3c14c2cdbd2041ae51e534 3045022100afba7026a356eff4395af00e910efd1d8d27688bc355fb852ed463250a15377802203611271300c230396a2fa9857f7939a1d50f8edff5ebec756e3fea825dc26fe3 304402203ce62a3344cd71d2b8027d4b51b9edaa5484175aae11fde7dff3d86b147a0399022071ae2ae9f65508dfdac1b3333d9e6144422c0a1768956283b0c943fff5cbe043 30440220527fbfa7f8180d8229d7bf88a7aea831c3ce37c5acd690f1c6264f54d74f941f022033ff5e3fa698b1ba86849f413b76fd18b2aa139a6c6952c4a5ce14556c3b6e7e 30450221009bfb2d7c1b1af4a7f88e982e5150659c71828c95a44ef4ef8608403c3c5d2598022007f16e122d9b05e083cd85dc59d05db190a143c6dbbed11505a370ba8f3b9f62 304402202e62b901d3768bac014e22ca11aee54b6b793fc0421f65f8f23fd6618a4c218502201071bbb3a1f0d4092aac2ed4d0915b36f622f2446da9531d121db1f92944b382 304402203d25dfdc506e548cd4b2c5ef08e369b12f1f00030af45e6455c2fb0df20979d6022031bbe5b5682382e03a3f6e58b919b6ba4d0d1860ec334dd60627f33bfa552572 304402200a8ae8072104f26910e8369d9731fbf321ee1d6182aff05133f23cdb8e3bb79702205f58513c93441b351de902add1d3bac6e9635ddf71cc1e946e716a4d6ea93264 304402200483d87a9c6e5355afc6f546dbf8416a48e650c24d54ccf5ac3927aed0c7c37a02206bd522f6ef66debe6c2473f6b1146bd523a9e76cb879f5b84ec9afe10d05862f 304502207d7364ecf2abdb13bc19d553cc8f3ac56390646defab706eaa067d29cb838060022100929d915b9c757a92b0c4e92c6be0b97715697b280b3ed22fbe5af48650d1f5d0 3045022100a7b25ba03758874d580822f84607b6a733c1333e2271039ecb48601bdb5be362022055cf769f24393721f0fdd9ff208819061d1e067188362141d7823368ec3fcbc9 3045022100af092afaf2cd70d3db9dbd197dc969acdc44ee902df89bcd77598c50130e34130220189392e6f835557d8ae42c9e1b84deef2395deda0ff3b20bb59feeadcc75de9d 304502202703cc0b100fcda702e69796464617a77f45029081c631c6787d333cfcb6675e0221009f53abf8b59289ccbc12be369996b2b4f51afcbccd3a6d65da146f2a04903910 304402206e766873424f6d2b54a4b921e067d4ee758d54f72d8ff1166b0a630a664671b1022003ed0b807ae0191493e6f75c530de0937cdf34ebf192d47c6036ff0b921b7d63 3044022004917de43952710da4b0156f65ae37b0db5295fdc8ffc629a96b2bd8986b17880220395f639f138c88b87380cda445d8b98e2b20c17ef6401d01936f8ca020384e6c 3046022100e991dffb99f69a43c1f7db1bce57d5cd91ae1a5462a0628a74f13437654e0095022100dc462330cf3811dea7dfda798785f2c2e9e41bae4d6c748895655811b0dbe007 3046022100b3ec7c7fe281bee98eb2e1fc6a93fb8841ef4e7892bcf9ee6c8b549f3be39729022100cbec08b3a34f089f9c7e73eb4419e946a421cf9a2e36e6fe0e0f54f0b3c04886 3045022078da0c73cf9d95ca153120fc0000ca16905399e0be16c1864c99755740b62383022100e8100dbe4e5c18308d5c4832d34ff4b56f9fdfdc58c73d214417d62102ad9b45 3045022100cf0135833d9cf978cc205a99e466598323d2a92a8c872474981de183c95a819602204586f04b326813e16dab3a413df760516f3bbfe559021d2a477f672646bf55b5 30460221009717b1390f689416db061d9e13c2cbac8af949e985e49e01eada7d32cad0c1d8022100e6abb7a2f8ed30fa1a04c65da2aa51edc9c0f991f6e823d112932b80e128e8f8 3046022100fbabe0f7058019a3a3245577dc2ed335c541de151c0e7fef7f7e23e54dc15f25022100f785a6b6f606acaedf71945a2eebe7298da83e04bff9de46788e6ac548de4819 3045022100d067045cb3377b2e927a68b1852644071baf31e8f44baa263da6900bdcd4ad5b02205b55deaf98e50bb377275891b177002ff9b35a8afe0c11dfc09696d3a3bac095 304502202e8f1acbd06e4951927087db0a73598fb8024966643331ee12390cc65b1a7ac8022100edc63785fb365322125f3c110a34e86b91b17b9e4e6610d8871173a9eab28215 30430220505d0e92ade33653d7807f75dbe2bc673435a232e42e16106314d67cd6ec888a021f2169e1a0ecb90da1ea202edf5c126402855fa2523f8d2b2bcf91bbc512990b 3044022033045b38717912e40c160fb5ded5f7b99e1030065038dfde40563cbabc417e3602207f32d5ed53f519eb05289428278640d6b2a66c9131285f03cde83400d518ff59 304502206d4db40c3c6c8785441828d2a2e1197e51c6a8314c6b1a0c5cd8c63186bd2aa902210083c5cdf8ae0429667a3eccbfb4dd0212365976564e7910e17c6998a26a30e2c1 3046022100b82e0cc9d2ab0bb7d5fd3005173e1b5d1d92278df8f887a3ef63e064d33ce200022100a6100a6bf97bdba2f32ea5ae12f78c4fe0e14427e8cfac0e5bbfa06ebfce60ea 3046022100de47f5e7be795cc90d36a18f693e24aa4ebb5835a78ef438470d8188e0c2cc0d022100890ca447d5de766f19c52533bf6f393aaca18ff094671414ac2cb0c0f3a3768e 304502210085f07216141290f4d693de2b3cf2637ffe89f86f08c21c02f48752c9768ecab102200fbae85511be2b9a1d1e5965f9532bc7cbfe76ad9550277fa8da69cf66ec1c3c 30440220642097b0f30dbd501d815bb5612fe633ce94e535fec1d3d105dac1f2bddb2288022069f29df3e0194b1c319d0d7ab276c7f976b5ccd57dfeafd61a54a2adff9fff69 3045022100b874a4e72ae2fc2532983edfb175a46b91b56d0bb96d6c352c9be0741785c0a70220078d9d2e9640dd8300384208d25b659fd52cae0917d0df2afd5722369369fdbf 304502204c1af055a292e61019b7fe062b633309a1db5ff846dc72a04c5b2a1bef5b6e14022100ae9c40d8a20f5796a2134cf02065bd79d4a2176c80c5b52014e31a1335f05d61 30450221009673b834baa7fd667235ab07df3c975ff100bed7e5608550832fe7515805c39602207c2e0eb05db20a8f9607009811a63716343a688e9d0d4e864b4366fdc6805fb9 3045022028fe65df1dd7c75762a67c641d586cbbdfd5eb77c518f99311f199ed01f652db022100dc564f738ad925305db2b6f3803f1d5c16c566eb6510839e1afb970814f6f258 30460221009c5ca9df4b7b271b688344235c1f9824867fdbcc16d4c54ddae8821b8abdb6bd022100a3d1064b34828ec2c7dcf27896dbc98c553fbb554a1983e209411944a689aa4d 3045022100ba36138aa8f102cd1b207731f152bffc75fcc0ec7c900083b75e7e2a02571dae02203a19d3c0ff9e22f6496219672442886c288a8ba10a20318857cff8360c66d57c 304502203751ff14676312a30cee6043275fc7c66383ee2525fbe65e059cf936f4756f5d022100a555bc6a7c0687b8c7af8b6ade57b2bed173977f920d969c4eeeb40e7568c50d 3045022100a8e8ca9983ddf5d74067283fb1674cc507afb35c07397f66c3b792589936a2dd02202a804cb7688b9b7739cfc86c4067e88005d1846f74f1c95ea985b15d515ed277 3045022100b893b684c22cd02c678589f59acd5bb557896851a0e73a65d336d48b88aacd2d02206afcf8a56a2aa6e2eee7e814f276c519264d850c07ae3727dd86ec42cb8e71e3 304402203997b9fe734bd234b0aab2795655df7fa563207979c1eface8a9a0a2834e8087022054ba3e08756019b360b76bc522413f0b418203bf3df780b7f6aa6a5f2dc03ee1 3045022100bc4d2bb3516948e577e884d14c1f3d74eea7433dfe6a9d2f04b44379430e8afb022007411652449e0df875c5256caac8c852520f45fc5cccaf773fe33777e73d1155 304402203db2425b33f0abda0db7d4d941a4a96e787764e5a4a65a08c61d569aa68a20a502202ab64bc7f92676ebf92e0ab73a388f17dfd10ef4051d34935105fa90e09f6be8 30440220155877650ce8873d831bb9f397d73f3fc905478950144f3bc315eca2af96296c022011d6a054caa5de2c0c1580bc6ded713285c47bafacdc3845796a2f901c76a04a 304402205876352f33dc859244a22ff4741eeca916938b5a683a2f9bbc54ce557e297e1e02201e25b32dac4ebd510e3dbda500a505e1149e965047a21c2b09828663ab737620 30450221009f7ac7edcd743906c1f441c0f797dd3045f50ad70816f3afb9e6b903f1bd2e1c02207a7e09742a2f5dd98482b3fd920545d3e03c0fb1a7ec146d3051114a39162f8f 304402205ea40d87b285ee749c76be448eff301e843328a67ee1d5451117162b08992b8c02205bac05118b219c6c1a3546915763939e94415ac331c7e53030047c2b705b0045 3046022100f40743daaace79b0ddf695639612795b3500c7724756626790630b4ee1c9157002210096c5313836560cc71e5e14f91650967895b1073eb1142b575dce88ab752fa0b1 3046022100b85ee348d2da0ee34f2f40b8fe7e7513018051afdfca4fd29d0a805c5a21bcba022100c1cbeba23cd23353a74205776dea861e57f8c216c2b40df3a51a98bb5747bcae 304502204262c45157f27698f64871114a45336bb17dc002c7c8395e78b451e0793960fb0221008433994946215fefd139b131ac95955c992162453cd3311d06a307bb357ab1c0 304402207d17bc1c3283fc349b0f96478b2643e702202e148c9aeae55ed02d4697a8ba70022078e0c7d10fdac6d1a0680621b61cc0d4e5285babba909420e022f028dfee6e1c 3046022100a87be89b5d3a5fe4431cfe254fd5a743dcce4640bf7a3e9496371726080646fe022100a6d0792ca519284c14ef938cb01e61dc59205c79944abeb23b3ae6af3ee1030a 3044022063b24b92e1fd9f63581311f0422b4f205677885213b1d5b571f44170655ea9210220577fee6045c3973d8e96beb6464cc2bbfa059c6870e4a63170c3c666442d8fd7 3044022025c87c39169057f387c5e8f2acbab4d9aadc17e4b78832ee0186a0e8913d6664022075aa0f2bdf848e3108bc2be0e2153f2d94a73043f14069aa1a536ff108c0a588 3045022100cf9f500c596c5fa63deac2ba31cb231ca1fc176ce792a40ac01af719f16663d2022056eaaa19f9c92eade3dcc2abe20709fd23c9b399c7c63b6e5afdf80272520021 304402205780c741f7c59f7046fa5837527b8104f613c7393f4897b03ba8706cf47834530220359e1e16f7f4ff555d759706e7c2cf82346a0cae7dcf89dbf788b0e35185f17e 304502206f48a784755a10da7811e5e791568b43e0a595f2219e9343433fddcff87576dd022100f887639fad00ef8b1512ba544a6f2a4b65ce89ad51b4524c4ebe20b9510768c7 304502201fe40af21a5840eba4f2757ceac08482de99079129b29401dd4ce46c41a26614022100fdb56a1caa3aeaedaacb24f4b126cf1dd0dd26a3fa2989c2c377e5534a6504d0 304402200916004a85b07d7a97b22995c47bbadd7ae164e3ca34ee5e6669a7410adc2532022058c741de95e60f58092770ff46f3163cd702259204d823ee57323f44270f738d 3044022075e09d0ae3f527a9d66df9ca582640f5cd8c9f9bc3ac938b1782e559d99b7bf6022038b15a84bc9899636a889a55afc0c9793f7ba56f4dc9c0f311a26499755b482e 304502210090b806b988d2c9a5708001206cceea683fa4f9486b564c352c5981c1a3d1040302206fac7a32a71dff5b1028cf89ea849dd99644a0811d48da54884ea98abf6d3f30 304602210099cb970474f64ec9012bbb646b41ea35ed7252ce5467ef67ac3b30ea87ec6fc8022100d4f3dca0c78dea33cc9cbeeb943be9c3121fa74ec4e709735751ccd63ec53810 3045022100dfc0747b359aa407e39821be7b45cd73e29f52e8f0afd2e10280da0726edd5ae02200773da2350df8431032973b53d1d5b9c720a6e5d9e2132a3fef4417e6c67ed24 304502206ab9f4c5e610b440b81908cecba3e26b4adcdd2e63f5f489176bb681c03c1353022100cde422d83b59cf9d26b0364253d9a903743744633ec33e19812605349fd92c41 3046022100953b539781bdc9509b2946978e11ced19b49f2715a77ec5de7432cd3ca57d1b50221009debe5f0e39a1002d5487b4c4d941529cff41568ccde4a10becdf9b945709e7d 304502201dbd888ac0f29e240df15137826a72efe2e2414fd7af6d4f6ee3f7d82520becd022100f1e646e56536c2b244bf8433ecbc54f07c3f9e902320d6c7f98e8c26ef8736e1 304502206846330c173383fa7d0eefc3f20ce5a00394a9a5a2485677ea2c15378ae50b6d022100919603772a810f3a3a7fc68cbe444bba9711fdf05594fb60e9e9570ecf519098 304502207db040950ae0b1aeeb48349062006232111897165cd4509bbfb42f169dcdc9dd022100cadb89bead83fa5cf006da8d6a094649b461f8fbe3c19bffed0e72706312f6f3 3046022100fab8834ab502663be8f35262ff3b202290bb728808433f134b32884311ddf2f2022100b032803ccb0402e47db5cfd4c6aff34ed42e4fab21f3edd63dc0f27d9aee214d 3045022018aa397c0189b72b55ebcde799715180473bb723e44a3f3bc7e7005d28db83850221008ca92e37048b7872d6279cda63f9ee7205f358e30751bf3fd7fcbec2d76c6d06 304402203a58ad4645084db793c6222f16d403f033aad139cae5407ee7d0687b74cf2bdd02204f49e608871a34c68dd5851d03e5984ad7993d1a4f3f7ebd2824173578cbbc96 3045022100efed9161db28000d7b1c01bbb9d451ebea819200678f1a0757eb43f374df3a6b0220065d3f13126468ed77101e0b9b679434091715074ffe99993917141f6dd10158 304502205cf136722cea416f5f7c244192230cc1382ae20b5a6eb683df54c2041c17f6b00221008813eac71b3196b53a479cbedb7118ddb5b6c3bca813208f9978a4c32353d9ca 3046022100cbc1f14704075b5b105c13189a4f5fa8289bda48ecd464d8517374567e86a1650221008891a95daad71ea13739c91885b28a354c2d64b25bec2e597e28d0a6c5c5c671 3046022100de2da6c784e2284460c912bb2243070dde1d7b0764b209b0311b1e7f7fdefde502210084ab543516cdbc88426d55c370f11186b1e92e895a74f30b22b218f3f03468fe 3045022100eebb4e1be92849eb69c444795eb4464a211f9d7f21d7f0f44a4dc0483950d9ab02200bd847c818b63b44816eb70617f580ba912aedf1aec30378828663bbbf3707fc 304502207470bf2dc3f1fad4990982078c891fb17950017526e488ea25cfc640e56747ab022100de315123e7fe37cd868c3076ba18be1cfa3e0eefbd2fe7056f893d9051ff9cbb 3045022015b453e794822def908fbea0905db278dce7976ade75269bebd5f2491ee12d4b0221009e5bc13fd06cda8cc6726450ad6c2dbdb2610df238c1b5d78035528b9e1363b3 3045022053d6f72485a4e9d549dfc38cb9acfa866fe4afeeef0c000057ccfcdaff9681c9022100f1aaf0f8faadf5f69e889fbcc3b534b90dc5a30f621dd162e90e9071703c2253 3046022100cb374dcc62146604809f87b0a659fb47f703bc9ee3d01eb0660a9a4c654b4527022100cd00f1557b0ba771dcb7ca63196674b2cdb84d1c827b65443ebbb95687ec7e49 304402203afeb846c51e829abc881fbae3a0ff91a498d318a7108a594a866b4b895d1e14022070d031e087b0f40bb523f76a95caf3891ba6a325e46ca530f4641fc7fdfd05b2 30440220327c6fdfd7e13b78996c01c93274691c6bf3387ccd47dad57601ca5830135e1d022056fd24ba9c890acf94780f74b32295ad393d246d5275187f09ae4cd416db64ab 3045022100b4013dd86669cabb7f5c410e04350a38390b92bbcd827299859f070382676b2d02200750fd902e584bf7d6c8adf6078f0956fde215805c4bc91563bef98026c0fde6 3045022100ac227a66a6508fc504401c35d3f74f2dba07cb0e946441ee8e730383946a539502207acaa2dc798d096ce6ccfd8eb463002628b620175d9f4db42ddc2fc7eb2fec7f 3045022100b12274132bd9847f91bd2a77514b585927701b0458163a1b6f769ed9ec92a928022019b7a9137f4ce75a4995cb62641d6012c85294008d9068505d28b47d59cc265a 3046022100964d03487947602ad89c005e2cbf6aa3d0d8b3458c29872671dce79f9c33dade02210086d21925beead3f4d64a06acf614cfb65bffb996f30680f929a7c9c8e8927576 304402200597d6dc75d300089ee2789b8d511afad1017c76b5eccacc5ebd14e37dd7b65602200db8c68c9e33e9a20889a54157b661750ae9cb5e431db333938d750aa4cf1336 3046022100f4aad31bf5a758134b7bab096a8bd11a5a6f2a847dccb2d20401ce768f5616f4022100ca0131f84a184dfb5f890597b61c9f483e7941523333d4abd856f4659f0a8036 30440220709b9e599b4875b26c9cf2a3cafaab7a1b569e8f119a3f9c227a2ae14ce63ada02206b53ad60dc3d7442ca78607aa4e803791509986b0e939f1b5728b9c770f938da 30440220022a081eb03e20fee3a79b70db1013550d05f0418ce4150c7bff39150258df3e022024557138b6892c15c5897b75aef3522722946e79c5e11e0f7e58aa1915835bd9 3044022060d898f86fac43f491f8579896fccc0a2ab71f9ce64813651fe386638736e91f022052a4f58d482d959a0b30e06c2c59cc2e8d99555490c09b484e4ccc017d48c70e 304402201345026b6b039e82af657ee87cb2e50597d8c4faf5f670873da98725da2f87af022041fdf167836d9119f113356452d7b94559a60adb26ca08f3c5fc1cf6d6b09f6b 304602210096f8c919197d082c00b5a436f44810ed5794bc5a1dd1c349ff44ba9849ee6ef0022100ed8b26f5d1cb42840d5a564fd7e893bd1ebe272895812755bacf837dcb64cf5b 3045022100c2b3c2e732fd1da4718c99acac1b912a85893d748737cf6290fe3f9a512de37202207936165b4d555d4e8d726a48e0053816fd0e621a46087f981903ad0609d527c7 304502204fcb3c0149c950c44c9e56c6a42ee88384d1ded6a37767b80abff67b7bd27f61022100bf21109dd6d467898fb7b1d72bae8342c3966ef326284312181876bada93e91d 3045022002f0b3222b8831e8a8bff061faee73d60fb79ee8afcc72934e64e684434d9e1e022100cff273ff7f533c6c624a2e78a6d6950d6a870ce6ad2246f7ea12bd770e0f3cad 304502203754f2b6485dd5cdd570b703f8743eead25dec0984bbeaeafdd9f839d1427a50022100bb363a6478e7abc9716efd7bfceefd608fb26f7ddfad9729aa9750c220c027ea 3045022100f1018320c073ff1f5fa66d038dcaed9fe240e37002aca82e86516e9349a73e46022018b0b5613b897477d182865b2cb127e7228bfacbf4e38152ec7ba7cb1f5d17a5 30460221008fcb7e1d22d3aa7f57c935bdc853e4c46f9221dd1e4c5fda967c99049a8be0f1022100a74855eb0793cd71e53547ab0ecc6dd4ee43b941b36d03117ba0598b7047f56a 3046022100abdbf7fff4690bc2795d9099639111876bdfce7bfd612c2c84b060c3267e86590221009e767060f450b1ead2fb30d7efb8bb0c733959400c13c5aebcc7b52410d50721 3046022100a8c10f6588b1230b302cb5ee8a55bd264912edd258b17a398f2a4831930dda80022100de5db875a5676b9256a8bd4b58f63c200aa49886d03dd2bb3805c8e75137e6a1 30460221009c6be9d9a7a0d54de0247a4e7910c66ce3c73f31344f44a482db4034efeaacb2022100847ef5e256d76074f38878f5db6286929ca0640cc62f1a7fe988e8a78e85603e 30450220498933d06ddc1a911cbddb42bafda03dce7154beff9f0dfd0f81e8121ea3cdec0221008be01df1c2228acaa0cd2a2c2f4ba7f2f743d6c4d142733324efdaf2f5aa5d64 304402203e73a2518303b5657ad7fc41e6d541d252cc22cb0c905437c1581446f2a0c6e8022068a05fafc13e6c48b6e81afdf0c4fe20f68fdbf3613ceba3f63bfd8f7c0cc199 3045022005fcec9650c36d3d60e5be776bf2c78eea588351c42768e0b17b73d06e891ac6022100b20021f5cc7a77ee0335db161f340f1d8942f835367125f6f50c51785c2f27df 304502205d997db887fb7f6430acf7cf3b0e98411fc1ec852fedd642c19b244fbc8ff556022100cd87ceb686274ab525e3c7ccf6a642728447b1330e31a1293a5a0088591518c1 3045022100acac69d5143c10b3d74d554511483e0cc5d32e737dc05c833a7f8d531ad99df1022054bf14bf1427f375d3f53a9f680a786a986756d15853f4b02da7dfc68565a87a 30450220696ead40e04262f7d263271ab7f7d3312476841738e2ae7380e27ac5da877189022100a8dd1ce8c4158d56c2d2df39185a2d9e7d3cddb114ae74be48f93e5eaf5f608b 3046022100d049e2c660c870e5655291a006b9d9e90e75271e6ef014b4f27e3d0683a2c106022100b48484a1acb0ec4c42d78120c09a388f4dcc468d360375229eccb158e8950f73 304502204ca320fc7e4c1ccf63486518a0da8e9b04c3dbccfdb7af50c6b859d72c69cb4c022100ebb07bdf8703d8f2b5cc2a7710e927c841567a1a235a3ceee5db03951c6ab5f3 3045022100863a453c0e52b49f3e3da975bacecbc4e712c0b433c84c94323fc9b691288e89022079d889698efe51d7faa0611612bfd54b16bcc24f22d9c213ec6333d4d755875c 3046022100f14599446c8de27c113f9b0e9ad8e28b4c7b5b9956bda35a3e3bb8c9e7abca780221009e2247ceb1024156602949df9bf71484d2f24b9df3cc01a8be6e54570586d9d9 3045022041bbed2bc541501181db259e8530474483ba6f6360cfbb0a84ad334ca8f2172a022100ed42c03ca501cd8989b03aed791034b2de3d7eddc6f97df572c6e67844ba1b63 30460221009c4f554fa0f5e849f7bf223626e06215584bed53a8e21f29a9427390b6d3795c022100d4472ac78865dbf0cbc7e9273ac3978eb8d9f95b9570721db0de03685b0b71eb 30450220676f0b77149bc67db98c1cdd0e6393a7ca85c58150ac06d156cc159445304a28022100e6e8ae00c7b5f8fdeb3313f9ace2c1dd8edf3d66ee8851293646e5d0b705649b 3046022100acf96a26bef33d067570cbcc902142e19fb8381781a12f2e09bc15ebf081c9420221008b9a9048647e4d9eac6db83496af57e3eba74c64ef6d3cdf12a8187b0a795e39 3046022100aa2219a743e401d658859470b52a265ec8e2f15fa990b7e0cbef9bac113733f6022100cced7cdc5b9b36f8de235dc446ce439ea5e60d74b10e11947c8ed9de8084837c 3045022100a0d1ecfb80ce6bec9e170973d43fd05796b9964f7a52965ea86371a40f0c5584022008862565a5a99d49c53c7a263020bb8640f558c154206ca9e22d8d1f9a442280 304502205a328d86216d9a203519fe59eed7c2406274ca145c93a1458b6e89a2a113bd92022100b98180503388e96479c764f10566c57faaec868f1e0b3b7b60e2a37e079d3c8e 30450221008db630faae9cd7eb9dbde91bb206c8b818faa4f5b4ea8d7f54601d0eea233e0a022059db37dab60c54d6f134428212164e15707a5255a84ae93fdaad5f3e7adb55d7 3045022100fd00141989b07ec986ffe5abe31e8500848e66bf1b5dbe4148a6d0f57c6ddfbd02204e2213bf4a33ed1f5db6520e4c58853b53048ada897c64e67d3205643f9dd77e 304502202bc074fc4bb1e98644cb4da268ae74bb51c57c4ff38e07d08dfb4cd263b8dd98022100d7f691a66b093fc0d36ae587908c7973e7347e28854c7e72e4462d0c11ad6c5c 304402204831919a3085aa31cdee5d6e40333650939af1d9dca2feab226b17fef24771570220215cf5eeb0b153a0f1841fb7c83c80ee03d02aa81c56669fa7f64dfaae9279d1 3044022078354362bf8cc0f77d998f0db594163e7121362a8d00a7adfd79f7f9f419f58002205e7d8df27986b1671032507e1a20d9fc84f3a49d0095cb49d28e465183cab09e 3045022008e5e2b1a382d8ca12b7eb807e0a3283f0ca397bad8b1beeba3403b2a4cd5376022100bd137e370ec794df5bd7d468567075d603c899903bbf53ae2a3e488ab49aa3c2 30450221008506fb729dbc08f6f5bbb3d8c97b2e87e70d43e11cf7776c6c83591e3728681802204f4cca7dec32f137e980d49768de14c96a55e1f87d60d263338d01e858b35199 304502205e994eb78d52f7cccfc62edfac28aa5c524fe2acb40fb70c3328fa5d946c0a4302210082a6f9385e47f7ebeae72d49b536dd38607596110148f83f529ab75f801b6c03 3046022100c456529874c285250e2730c361e473a5f853ba175881dfcad5c2022a780ccdc7022100ac1cfc2466d8ca66e7585cef7f618d7295a5ee52788a6c7bd16379eeb60d870e 3045022100e36374a79d166089140a02d045b110bcf99bbbbc61db486e80b6c6f0f2808c1d02201ffe3a213d1020d3e3df0c9f43e6151d4963b4f3118a0375af3ad3fcfe837fc9 30450220113fc03ee0b8c9afaa7dcdee71865e0690227d85ed2fc47513b788cf45e128f3022100e5af357b346441a1433b876661f83204fd940a4a2df0ba582157d1da6ecfb0ba 304502201b0963855c122d49660c153cc2c328fade66372eda4b0d0a7e9201ab1ee99205022100c6d79c26dd4c8669916440f1466b73f5dc688c042a048b28c66e71d764713063 3045022078e2c89cea54ccdc51d9839779312a53dba064d52afb7a7547e19b74c63c830a022100a2113d2d1ddbbc37e074e62b47c9602521eb8bb8bff6e05697d93d24837e4245 3046022100fc2aa0c0f0754295a2e9983e4934eb7137dddd8075c7aa4d32a6f19d767f4aae022100d6feaa06896783fe2ba49ac5be43871f015c2c3ed6046c3bf90ce541c6457c16 304502202ef6fb999c5b29ffc121e76af688e8840757e7e12dc8803b76649b007602beb70221008f62c3160ceed7746149c750bf796014dd4f5dbf674780906e6525ff53ba40a3 3046022100810916e4ec2f6937f8e91b933fc016ba5c125d96b400b39f1945c31479e0c1f8022100ab11be1aa1ab5679ea44047d2fc5a077b90ead9ec0847f307d60cf629113ccce 3046022100c96477776936c43852cfcc96f7c7fe8bd2dce61f5f2635083b7bad1f82d49452022100ed86215dea03391861043a1a0e74733c82e8990587d332c4c3a9eb99fe4d9c36 304502207095544e4c16b8acd7a2247d6dfe496cebbee31e0957cf00875709015a9f9a6c022100ae405edd4cabd7ba8e21a944372b98696ddca978630b37687cabddb91583b2fc 3045022100b0f3a1d5a1ffd5f73869caf5fb16c189d39caccd53c9007faf612ff44c816ca5022060e47bd5b90ad3fdf0774d7e3767729738f19d47d52bdd7a1ae106ae800edf82 30460221009f276afc73da32bbc195bf1828106285c292f9bb6a90a547207254614238ff2002210093736742bd9007ba6aef12c4c05e7f992ec2c7a85a82ad0d721e8268009a4ab6 304502201616458fcee54b6aacf4fdf2024e5b247874df54aae175c5c908247e3bb2c8d6022100bcfe53185c9323d78df02f0b28188ea11369da2d6361cb8e7bc75d71172e91cc 30440220711bf8a29ea5605661ad027c1d05c216961ae7eed104fa55da57c5f32b33269002201e735f9d2fabbbf1da6fdfc93097e8f4ede018e93b1e06531f4dfb977beb5996 3046022100a0bbeb16497f8e4b3e58f21c031d939da002f8bc3c610866f6061b1d452a85c7022100f7629091b888d7808df358e1f7ae8cd4719fdecab8635ef72813f79295ba038c 3045022100aefe012c63b822a2f80e5e28f72d178d4a94db310d3814db8b54f5cc62a29417022077015b26edb9e5d2aa087927d6f4588d1896e7dc83262b96cb1251da35d39243 3046022100b2d280b0d9bce3ef4fda64bcf6424c86611cee016116156124a15c3999ba9c3c022100826b603e779cc77b0b246db690f4e04eed58e4164fd3185f89c1bece29cbfa9f 3045022100f9c60287a4d5481563876953f495d13d25099ad77915581d521092c95477488502200f63b20b4dbe3be9c9b6c3816125c8f70bfee4b9a4c89bd844d8c17a1c583b14 304502206107c8d557b12611169b72fc8637305d1ea33061eace5d7cf0429f79ab0730e30221009f744b0fa03da0d90da129b454ec7becfd85da7a20c68665be507bfb2be622f8 304502204fd5a7ea0755f1f5792847f6fe2e30d06b6f63b46d95a25aa0b9c1c664c882c9022100905442b9ec0d4cf9a94d4a2ef4ee4609387ac266e846961710abe1d99ac636d8 3045022100e6fba211ece0a2ee53eb337408397ae1abfd031d84e539d09477b905d8a23c0e0220190bf12b57c25c2022ea9f301280a1000e37ffcee3f9481cf2b691de44ee7d47 304502206bf323593d9d8f73ddd0c6ee2ebceaef54f6daf5961fe50c1bca08042ebb1ed002210097e1b95c0e1fb0d0c6e08a643152373506543cffdecb5d939f36b8576d068897 30450220622a117cac9140cd227ae44a9f5bcb4711c381e8c469a210331c09a62666d465022100c8c6cbd8466712e3defbd4ccca5f2f5d39b78ed9e5089f15c7fd3f2413014628 30450220427f5f569461352bbd9a55b63d95d992e29465bf1baff8bc1bd96fd06a52d09c022100cd49972e197dbcffc4b077593c6ba65b89ae352a64acc1fbd760862364c5762c 304502203ba721a214ac5f54984fd441a261c3333a2698f2f513430938ee868be6f12090022100a69000387058b916d1c67ae7ce4f2f3a89bf09e6f776ffaeb0e8ac4a95d72641 30450220534e3e60cb90fae3799687fb68c40c896537bd72e21ceb53622570392c4f9ee4022100d12dc7368509930d89e8c8e1c1b38a265f7e9e450392d9059cc5b7a7fff2daf5 30450221009b4048a491884c664d59284dd51857aee9020b6b99917ce4e63a2b53c94431a202206b4e64c6690e97db94cb671ef615fa807f30ff71264689f27a877b8c617e5750 3046022100b5eadf94f002ca9f8a802dfb5ccde92c6d3070ae2eedb0c586a84860b12acc51022100f2eea66730a7529229ca5c311acc8f4d253104db482b259da81895d332f794ff 304502210081500e01ce194cc9c48a04668a9d2b8889ec35f9f006bfbcf80bf2a001de0dc202205bdb2d7f0e9e9f4920f60acfc5c797f6cfcb493ba16bf5534304686a5128c64f 3044022069db7ec097f2c46dfdb0d588f5efbb8091474d23d5d7f2263bdb34fac1607ca302201aed5192942d5037b8f7de45f9f3531d9d0875ae6acfaa3825d61d4745b7f2f8 3044022035549ada3b0cbe035bd3b21a0298db3682d1393559b02fa72b2cc605a8f5d93902205dba58260c0415eb774cecca21dbce07dde2b1ee5b37faad05d2318a1b61b80d 304502207c68a8e787687539977eed38abdc9ba0744afbe40b242dfe0284f47794ac8e9c0221008bb19b252328a9f05af11a8a22f7de4f226fa70e35c4c9e6eddb2fb18bf8c49a 3046022100f418bdd9811c5584c1d11d3d0ab6017956f77d99cc892b76f5725cf75fc98c87022100f0debf147f9eb089a3dce7d89f8818ed3d55532b9cb46ad41568ed22ea9a4d83 3045022100d126d61b52f4de6ea08cc62df87696d9b01765c9e2973e8250075c01c8f44bb20220358d4a847e51180f54a9a78cf7f69090cc9667c03e8c0c76860bac14e52693dd 3045022100dab9b9688addd024391a30fcb631ff34a5cdeba872c5dd80d3f0ceaa9d73007f022055a21b72e7efeb476efb3718c636e9d50538e39e9780fe019d0bf38a43805fb3 3046022100e3353728cca9395d3d77803e4c13add677e04671778fa0f95472e81e0cc8b4fd022100a7238be29230912dda53ba39e3658b6d94ae842ae2730303bc41a941669f7577 30440220671b80d74fbfe1bf26b80f9795c48ec7aadb213a5099ef25d5b5a6a69b8332ae0220315473692a3b0b0d3d4c30700e26227fa5605fcfdcdabe75bf0585713f501888 3045022100d454c8743e177b6ad112e1322a1e175bb6ca507d0d89224e4609456b8eba2be102205c2dc49e680f1972c132fcf3d711f5051ed28bcb337f01378740d79072098216 3044022021ea5f96e602dd6578b9da05063af48aeffe59abdacd3187aa25c7f4c5347ac9022016564c0eb5b7edfd671ab89b4f956a1cd4882ce0d683386e5793ecf875bdfb54 30460221008aa74706f600d03ffb0391a28e01fd39a301c54c11a0d640d50b74a960f46c560221009bf9f78ab76c3c1f69a62dad7de29c2b66f000c2bd3995473a01e0339b5dd494 304402200196f72a54bb5b934efea3f8c823c3226e40f0382dfa92a293720778924b91c402207bcd1f75e3c26242e8079fc76f843cb3d77ab3ae71b3d457be0d4ddd811ff316 3046022100a689b22984f84fc1ddda11a911a1ef4c21d20553dd3701de4ae808c249b17dd4022100bca8caf8dcc2b17ccbe8bed58dc1bcb2598a605ccea89a4fe5f8d8e44da4eb47 304602210099d8626c697c5764a96fd0e10a2a328f15e3bb6894aef5893f9abbab4a8e8ab0022100f7573f22b95ac75d64478e927ecc0465868922856c4ca0461adf0699f48cefe6 30450220175cfca196350487d5f5df0a0094f3d564eecaf425ceeca46233d4903e6ad21e022100c123b030642918c4ebc61632cc9dd5e74faeb4125af7d1802c2b84f7daa6f540 304502205010d80fc3a59cde91f7acb17339497ef4f2eb3fd77c21ab0792f8b0152193ea022100bd8d51fdcf28812eea71e8764f63fe73bfa38d69c7ee2512873fab47681a2ec4 30450220232152c0b22ab7ec761f697325d1367dbc3a732e92791fb14f0f3119a62df93f022100b772b2d76a3cfe2e11643c49fa8211d7deb29f6626e40938a8206edd368bfaf9 304402204fa2ac10e792ff19ecc34f1c90edfef9157b83ffd94c8b5ed235553df958153e02204259630a8634dd8de15c2ec5166e72413e131683cba1e2d994db940140bcf8f8 304502207d1dda91e2f2735cc03ab2aa08a839f3c8118a0648a8bd76715ac2686dfb65e3022100e116373653e119dbaf7176ac917f1c8c147e8a183c50f7e8f5f26d910ff7ec47 30440220660397afca6249500f841a867a27600252cf2dbfbdeda26fbcf0185fb460f07202205b5970d2163cd671ca96945b9c98621544eb7a3f91df054f4df89963e6935c9e 3044022018ee8054b45df45383f401563893e707fa58b8ffe16a35b8b9d6b5e02204e633022028ebd4eedbcb0ab19f76b89dc256440e77b06a04344c46c1c06e208e60f94026 304502203c90ef1b798a9c0547b6f8c407fc5c719599b307644dbc615ff27f2268e323350221008216da37d404f7bfc7c63c19357cf6b279b3801471ef8bd1d0d5b1a6143c4850 30450220565f71280c39ca54d58786cc3db297ff5ea9a64879271287c54313ad9804b5c7022100b50bf38720d1fb97b4d5d0cd1585d4eec05d18ff6a5b1da51f7e057f00ee046c 304502205e1002f0627ebee3d895d41f95c4b7b001166e13c296042be19e9be2467045b2022100fd42da018912197baa393d4f3bb8f24dd133cc394214f8f009f86acab65abf14 3043021f4705b981433db5f4859f78b9d0d1b1801fce9fd005fa45f24537dd63a5036602200278dece0946b11c84d254c95ae77021638d69cc357b9f91d1294533689a4247 3046022100bf2df5a1fe081ed755ff996b68cc5b6e2ab5041fada34e74ccc629d3a1da9f55022100eaec9cf96dcf98c604ac225d8dce803024531da023373fd928860e11037cd96f 30460221008a09dac363c6296b3c0e5152e429c31d67cff810daa075bcc7391f1aded617d9022100dd8d201166b6760e62a0f58e97a16b7d2abd3b851f32ecf3ee683f0e9c375da5 3046022100a047d99630a5f161e3159aac55ba6f10292b0f36e81f9fef9df9f3ead1ad5653022100934fc783ffce1a426911e22d4e888e6ff0db16aa1e029d901d05bb5d56ec5495 304402207dcb6165f0ed7620abc03cb053047cd472843854d4cafd50bc055c73c3937d1602203fa4b511dfcffa5d0a60be1f4f59cf479290771c4c8b650cd0381a5cacb0c401 3045022100e5e9379315a1ef926ba59fb94a72ced3f17d26299b692e63b0448d852bc7c4c3022068903455ba1708baa00f8341eaada379d9b371c1e23970d49c0ea4c07cd34482 304502210095f2de6795e7ed2bd5db476433cb102c255d933f4c9cde4086665892d8e965fb02201ebf0b3b95a6c06ace7badc801ccb5067682e5496b900a18f1aa89f10fb5f73a 30450221008868581dcde42c1ec54cafe114724fc2321f14f2963877ebccb73ae867a0ff37022061cb93bffca102883d9d851b701212f5322fcbc4ad371708df050d470a24bb9f 3046022100a1dd72247c7cf7a36000eb44672e1eb54f8269e57cebd7329ddb4205a41fed21022100b041c2747162da7323e35bc78a44087825126be4001b1876af27d755056d6adf 304402202039663d29ec183e7aa77d822270971507eabc9a9f621e71b486dd64db03839e022028157c4faaf395bf8dbb38b826a1bde421f461257fe8e1f145e01c90e8e79861 3044022059ab7495e31bb8a954e72213a385524563e5bb1f46dab8cd9650da778b749b9402200efdb73e1bb1cc483cab28029d05328aabf5d98e977a438195e55e6f8692139a 30460221008599b87b8d7fea604e07efe823c51f53c49e79df6c63e1caf7f5a2b0c51bc907022100e2987eb16f72da4b00e4fdb9cd9fcd21fe27173b4a78dc7db74eb3d480ceb9fe 3046022100f8d1453d1db1bb9db369b0750304e2720c9ebf202fef63582e2517fdb6ce9945022100f6b3eae18911dfe93c1907868e23c984ec9b785067327e5a06cf69c8fa161798 3045022100fe89c6e229cf5be3c174fc75f006e761c55803d122480e6d63dd70cb933bd6450220774bef80cdf2b668931df429fa01eb3212bbd79e6cd35566ea00dc3ec921d29c 3045022100b1af52addbd336cfbbfd86acdc5738cd062df52810128e615d4292887fe1e30f02203925d965ee363222bf84ebd4376f05107e6176446d08a4f341511f5f0ef71b2f 3046022100c7b1846a3a1151055dc25e9b2afe1a920e4d4b972f982982729bca927bf53b52022100afe293ad7183d52d142dccde6212a246ea9e72c4b1935a938e0d8984f0af5d31 304502206cf8c70a36e16423a63cea2e0f26c9c9dd3427b02f7f111b6b9b2e5ce5e3e764022100b58f45162c1f371f3c691cf8c66b052f8e5596e18727f6091b1c7b9e021db79d 3045022026967ca6e957df4ad0f3ecc01d2f1af585bd9580f7e032791c3367cf307d9588022100d2d8f974403327abc9296e15ab698f8799330a841cee6baab2eb5438ad13d5b1 3046022100fc9eb3bb1f5db186bfcda283c14331a2ec8967c71792a3937e856ba49569b3af022100bcf383b08dc6c0076297f583850f320201a2762c5306a1ea07dc499521993df4 304402201501d801820c48cc17fddb8907e92230d127588444971e12464fe5493716066d02207c700e5882306b0afefebe2bb7f8c0d18fd2fb6f8c6e8523b33b642451cc2756 3045022100e0372b98ee30bb7174de94f2e508776f8164a43e4e83c1fc54e5b473dbf3476f022017d82e15bdc16393ff150ec974dde0754169a405454bbc896cf2a76ae44f7aff 3044022077bd8d0cc95f9cffe3fb95561f5e151cda72af962ecebfdcc4dd341c7e6210f3022073cb04699c0bd2651c8c605cc5e63613c8c43e3c5f0699a7067609e8aa29b7b1 304402205a4047795fd15e08173d50fd75b59d70e8ba81c1a5fe6a76c0cdff0efdab89a202204085ee54b0556346d87edfffda7639abac81aef61e8daf8ebd1d5fbcf98128e9 304402201c3074c71cda04f2f5982ee07f414a2e035cf9d214d51df1eb0f931b7442b0f60220522e0341864e2d425a811df2f2fbf0904b10fa8cdfb77c1c7936c29b1018c72a 30450221008070e7d09841867cf2675d91098f2f1112ffecd9347f2aaba9ad8cdaecf37350022021d41c0c1ca5d48743f03fbc0c7891bab629f415e09056f1c7738f40ec864d48 3045022100f14652d71cb54d44462db703075dba2f520e0aca4db7c093bf6d21c4b55fbab40220549b2eb929cddd5b6231f5dadf129adadffa104ffba99bb15abfef5ed14343ff 3046022100e5a2c71857bb2325e4822182ed4d3709c3f736415faf9b0991cf145460ccfd9102210088f7fb8e7f7b1b920c677fb71af82d7c450bd120b4e43a94594b8980dad08d1b 30440220110cbe5b061b22c2dbfcf87ae2dc7e30314b16163a51c4602ed80cc2ceb1e6b502204bd8da8d5353fdbb0a4c0bfccc5e17b03a975553790867baf590e3072a3e1ddc 3046022100d8097c404b29c6bebb0ccac9450f0589b21d1b7afbcd64c082c2486898a1d668022100c5859e480d6433f3c091f74f235b4667843c9ae22e80fc716fee65ff91a2fc39 3045022100c8431ff616e4616af4faa93084693605e6cfd81f7c3c86bd1d74d7a9f18e2ac80220286b27a306fe2f5a3209fb333a858b42a006ebce6b7ead38c40da54bc0b61301 3046022100ea2d4cf65aeed298a17486f8e7e72a597c8c04f7f2ea68e385d4c3f89091ba94022100c5d5ac3dd1cdeb595e1c1b8eb5f09bf99df0904ad98b9577ed383629b9028745 304502210099b17e47147aa4f259e5ac3aa85205a90bfdb25faf6f3f77108afc690edeebe002201260faf09db6225d703090973a42a893bcc9520b39d8675a0ea9d02b87483a0c 3045022100fbe9f34b70b39ce5f78c505d8f59fb22ab85e2eb415ed4863045a44f895e971102201cd28297beacce40f57c978088b1382cd6e46ffdd74fac88af59edfd3d94bc3e 3046022100b1dd0816bcfcb6abb0dc336fbcaac4b478417dd09ab87c1fdd090aa5ee24fa9d02210084825ee78c757ff1b9bbfacf294b1b48f53360b81ab5a938d31c0c3d89458082 30460221009b2d51cb7b5f7409ec9a5f437a19c847a6e301f66452e89baf81b6c4a11b5903022100fbefdba250e7f8d61d313d7f6ad7d85c7f8b44877c871b1d05b47b2e8414ba53 3045022100c0d1cf7f041bec870c2057e6bc39d4b989df9aea7057b609996729d2029ff3c5022021329c7bcd4fd3152713c9ed922543c84ba9f4ffabfb376eeec2de45b36c1fef 3045022100a0d29cfe1a56653dbe353817b54829480cb8a8a3b732d261efd2980b65843c260220695655096dfec1ec33364e16ff11c9f2f1de144a5fc880a7f09763dfde2855fd 304602210089b0259d1979bd92b5372ca7bf526ff95f870a83753c292078ac33be942974590221008224a42217845efde501c21142dfb9aa75acdf386b98e827489f09cb15368837 304502205aa5479e41510d4ed7d5b18c23c9b48d91138661ebb07f779caf7c4bfe9e803e02210095f138ea0185719638d1ae705e373eec63a716884d7cce9df4af1a4543fecdc9 3044022018a8813eeabc6750393b5ab837e4e451e55a3b19c1c1eb28ff6b1981faad20c102205c6e0e3e4d7e108d9cf81cc9a1f59726d499fc69e8761212b4cdf65d7618f3d3 3044022055cf5b973cf8bb4075d38efe50215d2706567f62b43597ae9146af009dd4131202207f212da92b15e64a0085fec1e2fd91546a9457d1155ad84a358e93eb29cc316b 3045022100bf660ce0c016317b5df6454ce85ae0011107304856051347829284d71f0e9431022042446c108b8d7a3e87a8f7649d56a0481d693c3cce949775ba9e4d0d60e60adb 3045022022666fb2dda4caa49f6e7d4a71980936c24b4455ecb21b68a4e3f733af67449c022100c1804397cd1db728a2f73c7a14dab980f24c2ba273cf5f4742ca09d1650e21c2 304402205b6fa20d20b73913aab5044796e755a7f3adbbb5423bd3a25a136e99a51c31c2022039299bd4f1459024897ed28f73945130249d1d88cd9ccb5859190460ea4afa08 3045022100ca69be2a6669ddb1d4532b2bc3ac3eba84cde88f092d53b8c9bb689d990665da0220755c1927f942312d01e9d1f5fdc37d2d89616799753e95c3e035ad6c89ae1a46 304502204e0e722d8063e20d1d0a9706885fb3d144b9b5dc85ab41783463ea314950be12022100f521fa5d09793f15435570dae67ebd1189334425e815c10301677bd7983155b5 3045022036d4de5c25845e69c8d68db6dcc759e74560462f274839dd1c6eda3684f2a20a022100eba50db6daaad3a4553537205945bbd5d5c9176c7d380bd883a93124fc1376d7 3046022100abe1cdf63481af7d977bb88c4d2f2174348608ee26b024636b2c28d6ceb2cc17022100ed42401937e3cb4966d26c76882c74eb5cd19d45e5f2d34b1fe9f969185a4e8f 30440220732ecd01a472c59fe2be98b0df11f0b41242fbc40d12822f9009273a14f808aa022067d9c04d030133d6cdba24ebbf7a1dc36618d943bc8e87b1a7366fc3f447aa7d 30460221008c3c0d6dec40ea070cffe4513f4e83a5a7442183e6aa6039fb06de541028c94f022100a7fc527dd46b0cada8e808b5457897ac4be0683e3f754c243ac45a1e718d89b1 30440220540df0f94ad84804e81b89199850d4e6159db1af16368d470dd84c138056b802022002601f9c9256e834e77e278d1df5c98bc08f756eb5768434b8ad989d6eb98c96 304602210086fd3ee0e74c3c5f7c5a1785b1bf86c21799b95049eb5e0ce1709741c0e38cc6022100c0eb4f2295b833da70cc5366bcf90fa96c24a4e44b36b65e86389f23ffe064a4 30460221008c36487e6eb7127dbb5cc0e4725aa355473e96a2d2c6ecb9d50f90e0373fb156022100e0e722cdef38bed418c3ed48a71a5674cf6857bc877035143b677c9cc9c9b979 304402200b7f716d76f5be1187d2369d0f0e8b190f6133ddb0fb9151f12d1a83394d0e4602200bfb20bf87b35859badd5bc8435f57d826d67d7d4e8407368b111eeeb4f0bbfc 304402202b86c6a55fe7cddd1a583988d00b52187a54f1bc050d68b20d2ff20375f3f9a902206046f8359ae379f744dff1708484436f28bfa4c99f60b5c15feec97f9cc8d949 3046022100835b54f660ac51e58bcc55324a357ff5a7a8c37c6140445f1e42c4f90fbcf651022100e705353d018b0ab69583e2f374eb866de71b702b7e7868b5b09e2bd6451d7921 3046022100864ef8b5b951cf39dc21b8f5cd8ed0748998f9ec9032f62485acbe3afac753de022100e6162f90bd6f7e65dcdefa77dac75106be248b1962523824dc190ea30cd574f0 3046022100d7c7a738fc4db5287bdfe9b07db625b5514a23de3cb5176b55d97bc17cf4f3b3022100c8b5c5c6cf58b3a42c1a7d4fbf5b10fb37b0a6c9db7f6e2a9e0d596d25ee191c 304402201d9d1bb730b7345ebcdb21189ccfcd654fc7531ff4ceca090f3ef36edb206359022014fff88795bc6536589b182cae4c9591aacc194b0387cc80a3f47e10acec666d 3046022100a141bdcf045059a39f88fcec239000506da100cc20b79d230754cd7f8b605e95022100dc3797f41e179fb706e59ce2c0c047b4609a2fafed6ff20a19e120d9d46081c7 30440220175534a89d585c2144eeb0a97cde3fcf8dcae1e00fd59373c3adb435c79b7acd0220098895fa0cbbf794c5a0ed6cc5cab98c99bdadc986509d0f42c6d65e35575150 304502210084aeef4c40c0255fc2aa6e0bd29f2d13386f1adca4e7482b8e3179c061d2520402207b905ea98bd067f20c6ac306a95cabb64e9d713acfe0dbd14a960519923c95eb 3046022100ff88b487297188a16870ef182cfb61e38fb3beea5b7e32d3d49a1030fb662515022100db2b7eef6e02b8905d9f770fe0b821c1c9335169800a6e8315fa41c77626ed73 3045022100d7eb01f011a43ddb6ec5e95e2537e1c1246a61c037c1b6d21f73ae69222357680220361c15174d8c34052f03f830e9820b2687ca5a81811ea72e2f3764dcdee469fc 304402203d5c18c5f81ba3e76ca70fc96cf53d67a7e84d01abda56a053c0e4098122b0f10220367df9a8a75dc27244e2a29b8f62fdd60ad21299bfbc80abcf5110a2e2d3bf10 3046022100f0f1c16bf64f0c3f363030fb6fb968497931e02a5c0966afe76f5e673a480dae0221008a6de69709968722c325719135df41e7b3c2e7e8a7689da55680f6770f761d22 3044022062dc719ba0dc1c62f71d41d9d9bd4b2041d1617fbf357f5801235c64c8cc5db90220308316b25f86eb612a9b36790e654018977a399d8896de739131b12f58c58b0f 304402202480f74cece9ed415cce542e1781b2a74ea64e8e1b908012b2250bfcecba188602203fa7bb62664e0ae79fb86d0dd1bb5781fa5fbfc00f4721367727cfeefd1f61d3 3044022044c90bad86711b91dadbcc13b5e73941b00dc81ee899e05a37bdd6e6be3f68c802201801a47b41fc1e5c9cfc98280f2b9443358f5b489a2c0ff62fa76ea40da7ffee 3046022100f1a960bc714fd661d22c59ed962e84757f9de4e05d825312410da67b3b3fbb75022100c4c09d78534e7742f1790717e0a0b9dcbc02700f5339e94fc90e5eaa8e4eb9b4 30450221008e2fb551ef563b84668ca5f551f5c9baa242be9c65aa1c2ba1d6f0092dbe579b02200efc205dac19bb14bce81613b09bf5eb7dc8afdf1d62f3d4452b4cf466531cbc 304402205b462ea0eeac7959bd5958c397e7e0024b398485be5a61f1852cc1c6091adceb02203f3ebf703f320f5f5d314a575313d73cb0914ca6b0c308937c0eb730c5b590cd 3046022100edc54a82054e8bed222f79e33bd91112dc9b10a5b60d969a6cf9461561f0817a022100a6bd1f113f03f1af2b5f34270059cca7b0db6e6903c2cd53addc3a12d0db9423 3045022067c841017b4d30a8ba67a3b78bf540a125a4428eb4bb0b4f3d662e19cc466289022100db2649aad0ffd67168cfcd4309439db890f9760e187df3bc0af66a1848626534 3044022060455dcb393f091db62cc5343e46ad5b5416caa9508027e2275bdeb2c6737ee00220390154bd411c086dfae012b225582dc7c0d3232f8efbe8faa6ecdbbb53369e67 304402201b24402c9b2916800c096bb6e005207df0466ce7abd54e37bf1de20c97ed4a18022051c2405df8c6c7d4d583fc7146e990d3cbd013667af0e1a3a0ae96dda141906e 3044022039057b62ff4398a3f254d5a8055087fdcb7a07b6eb095ea053c0ea704a54218a02204958debbc20bef26d12a8f29ed70b4ed3a773d8997c9448ea85e7ab889d6a663 3046022100d901d96fe53b17314ddfcee1b03a52ff3e002fb148ab3676d780437615ee11f7022100a1411ba80e2c86b8b0c1e5b14a141df01f9141800cc45d62f4441532a2d73aef 304402207c10980b163539869c71992dc462b5c09cc61807c11302ef53a52351ae6d5902022041c4f5ebc287ab02f1b83ca2e0a5e18513c3e5efe0b8ad29918745a235c7d3e0 3044022069e352d2118b76cf478b3ac91b174896b1f4fd3223b30e80c7ea560f7178cfdf02202c060e05a72aa25ad672de9a4916758073d86aa0d4fcb0d5638632149ebaf7dc 3044022009fc61e87952f1ca2f9da577e2645bd6e571af6c4dd825549ba0dbc75119c08502201564a8dd9f4db5e9952fd17f0e9b330d04b228cb7587989d73fa05c2d38ca570 3046022100bd29829411d681d22aca039b41484769ef46ad557d03bbf9125b3d684136d6e2022100b641d708725de7d70e8852becb2d21cf682c258bf9f9c04fcdc288a33861418c 30460221009b59319ba370c5d41be9ec6d10f5477b1327371114e7633f07c504f9a059690a0221009372a5f8e9690a98abb7cda2c31422c3dfb1eeb34345d5211895b98257753a05 304402200886156b69e5039d55e8e153bacf40af00fb1c41b8dbf59b1ee493bdcc03ae1d02205c5048aa9856649cdbbadbbf3e02123a121b9f624f536c51b3df187d6e66185b 3044022054ca677865a1942dfa74d01aafe5eaef13c2aa2649fe1ded10ff612fc865e06a022049f5fdb1569f591f6e25b891ae8d46ec884db0aaa298a51663a8e217eb03f08e 3046022100f3255761485b371a354f04ae7664452679686892549e4e89a29c89498f4b76a5022100b45a6a6aa9654451d67ee0fa141c25e2dd26477e8504f69ebda709d29c1a2787 3045022100b74aa41d3bb0ba9c9f5d60bc6fef47425ac9a45751729980a1083de6a708b39f0220671fdd5c149d2ed3e3ffda1beec51e0acc184945ee4c875f0ef92a8a79a1d694 30460221008e945537066003f0666d0b9417f9eb82aed8425a5de4415c46bd99d349949b78022100fa999497a8d0758d248ea908bdc613799cec5ae35ff1fb750b57a849ecd22d6d 3045022013bdff4bcd52316d0bf23e7517b2a0f9d51079ce7d8871b3a4e8af518e6b2f21022100ab018c4a277808fbe4eee0300949d9ad6372f519f68f5848360604d89a9cad99 3045022053dda200f4451ef8b02ddd09b9ef0720d427fd444e1919a038c99424533ee476022100894caf92268d22df0d84868a591e05bae84f013b7bf5a343f98cdf9b04b58321 3044022067ac54fa94a6c46350d27f21676c1f4f937207cd1d1b0d5825c213573b74f1f502205b2936ab09e6eb18d2560ccbad7aca7df085ab79fc91cb10ceccc047f696a37f 3045022100a48cfbf9da954e87041eb6b70360737ac106430cadd4afbbf6d4e68db1d519f9022066692d0458645a3e8660fdd6550ac8105f911402f2ed54e49fc8f26848b6cdd4 3045022100a072b27e177f21fe329a98cc025a656d867ecf6bbd15833210833d2e56f0442402202fb9232bd98f82208cecf69a70715d4901b88f6cdff22a973abcbe22178702dd 304502203715ed917b41f2f54d4e7ab470c1daaf0bdd243df5e90707b6d08529aa575419022100add1359e3f4e62549cb9bf8a30f86cad3c940ce6537f7056ea27a4e079b10b56 3046022100e45bae66d93b8c287566004879b12393517ed1dcd396188eee2cf8d2e3e17cef0221009d63fd32b5832e8cbfb1eeb925ed5bfbf160df26ff5c0a99a47f3c7c25f573cf 3044022017f98533cfdecec710dabfad555423d9e6736f7c642abb050a601022ef3a2ff802202ec1e7950033e12fa16e1c34fcc3a6f629cb5a3b038737efef215f079632515d 304402200b457eb14ab6f6a0a3bed03bdae96063d9c2ce65ccdbd4603d9bf8314464f63f022020466f430b81933f622ae15a280e7568542e3f1e220d02544121dc42b2307ff0 304502204b45adf4ca73d1def4ae265a2ca74910986e731c5a266ed951a9567be38ad705022100897c45cfa7d404c7af2d39ae9aa5a48ac5777d444a33ca64f74eaf9e04ebb05a 304502205415393055c3c69058e48cab8617e8c397610b2354cef1a6acb0420fcf4d86b5022100d713f8a7e1e82f0e278315afa84820dd0160415d915d03227b33967e4d92aa4a 304402202d65f3052ddcc9c5af1dfc117b358d0e9ce024a44752e895153c6e7cd65a6941022020bddfb8c35802a5a5697ea9d32e99223cf790f6c249414c8654a59be14f1dc2 3045022001f505a0a1924f0d09b341a30b269604acd3d925b1854ef6224f0aa5deb15b98022100c5f507d55a3b4456c5b09fa00ccd43380e641f5feddc7bf59a4d2faaef9cbe26 3046022100abf2399b7879e81ad20bf3cbfa1b4316f17b264d387ad39aab2d3457a8295179022100f7b781c6caa57ac51de0712acf1290e1f37990e5646e8b449248e481a6509f98 3044022077949f4935f191977fe15074095b33eaf7f31e76e46cf52aab2d01665918acac0220481206846002fc9c33811433835223e64736c5b966a9499390ba57ba3b040789 3046022100ca6c8311fe7af081aa65439d865b843faa807ec94b5a98617cbecb6c6c2e0e93022100c9d91d4d5b7a994f8a167c7cd633696daeaad9e9c6848e540d4892a7ad32447b 3046022100ca36be931dc9d169bead62c149bbe1372442239464ac28657c1b85a23dd0c690022100cd5ea325053660427f6ad680cc67bd08b38763171033677bbc158b9a5a929227 3045022051fad3c78d9a7d56e78d52302b17b753fe5ebb61e9920f2089833d98306fbe2e022100bcc5b798da6b5ac6214e4dd828b041d362abbb73dc2b9b4c53b01b06d0b6cbe6 3045022100e5d199152bb8a9eca9ef99359165e7dd0802f577b1bef86c003bd2295a3c7fd102202f1085b4fb5e6cabfeb37bf926cc6c54044ed672497500574ce115cf151e975e 30450221008850535fc48e062a62d8996af2fc929c0fee35235e50b5778dcd8d5b6121a469022005c71b40c78055ac2f51582e9fd20afad9a5594d0f959b5d7875e7216937e33a 3045022100b7dc4bfbcc5cfd52409e4e7bd41ae238bc2c9880965d75b014f0a05dc68f6cd0022004e14945a381c8a02a4c49e26a2e596bfdb6eb58b1f85a61a8ef64164857087f 3045022100908752b1f29de1fa036199839d7a8ada7c9c9d058186e76739b7c71f0c6fa5bd022024663e54c40f1b131c3a85570443bbbe4b9d2cc033b7b1a67dd394c4961bb7a8 3044022049857335095d941120b8e6a8e1274656ca04479adb3fd58f71561e9862ddcf8f02207e5b622c676ac009594d6264a2e4f41f9599ddc6657bbb7327bd947ce4f95c77 3045022100e403654e2fd51436c5838d345be7ba1d1f4b8412e52db9d8720f1a990788f58402205f76cfb0c013dbdad1ddbd98330d41a7c48452f80e42da324a0d285aed1c5a7c 3045022100fe41f49eaf1182acac959f5270ec1a7ccd5b96fd91bf976eb9e871557275612102205ae32437433dc7086dccfc0597ccdcb1a65171c1a0fe25cd306ad0c54118c696 3046022100dbf1cb81cf937b0ac02995af399b71fdf4ca24f943960043b15b8a114aacff96022100cbba979881ab33d33b25e878410fa796936f4e20ecdc6ddf90ee544a47367e1d 3045022100c805220ccf3a304462c07340cdf82c2b414b9fe8e5dba5175e90f1a008dd41b802202b9ba65e2390845a0dd111701dfc6fda6d5c926272adcc4baa7e42b675ef60fb 3045022016ebcea52ecaa1edb6107fc0e46317662e5a72d097d6b2dfe62c294cd47a9626022100f56517db8af65b5cacde2952882b7366b2da3508142728f1ce699718fef20619 3045022100e9221fdc910b007328dd08c40bd82b0f16f903bc390b7c6cb9f5d4b73e4c6c2802200dd2ce6d3472fe3f1f2280174a63bce51347acff845ce3b12cd1079a843d0285 3045022100f8831976b179d43d042840a95e34218ab31a94cc4d8aad97ca76918723222082022020ffd9af3d1640686d265628a30d26eb9be8880ea7b86dfd079c9ab01de6efc9 30440220146380e8a37f4872f084d334bbfa03b7a0a0e582d62136f948f780d682d04ad40220246e88113b3c2c315c03c52ff7eecf4c65c12014a5386ce786304975f69b9069 30460221009822b59454f685591acec3a007a1db8fef4a49a9dc253f59bee36362cc11ae38022100a4e380372f74935fd66d1b8a395842728fe678a6f3b7b2d5184fadf2f9101110 3044022028b9b5d018dd71ded97d182253118ced9d3573a0973ebba28770a7b6c9b78d48022059ae10e52b8925fcd74860259d884f11623f944a9d583cea90eb1fa1338d4136 3045022045173db53590ce827bc854dd229b7cc782666ba10fcc9b89797b014e1054ad7e0221008dcf0d0a3b1b877adbf01485ff18cbf691a55dc900df91f45452753cbc14bb8b 304502200ec85ad676777b8baafae64bf45e4dfa665e66cb003cad7c2496108405a664ae022100a67ec96deb9149f7c8b81bd294f9d487796340db948a4afb2288d5f4ddaa155f 304402204748cd5413adc0bee51612c6650ed8ba65e3e8e5c6b7664ea1881c5b8c65f8cd02207ce6ae36596e708063916f0a7632605e6a135eb18f15e3432df76a94ebe9b6ce 30440220475a694afd6169304ca539ce46b5360688cccedfbb3ad5ad3932e45586f7dbf50220740632cdf2d68924599b1b9cec0aa54706eac03ab13ee36cad1b0b3fce2bb38a 3045022100b7b3ba4670c77e198664b128419d5b250673bfc45e286c03ed87095935e9cfd502204ad6210e3433813ceadd5d275305f9536336bb31ccbc73cd41ec1d5c7bcdf7f4 304602210096178626e6d58842ae2a565df02a6880959f8f35c219ac9f0759c63de43e1bff022100ae4d3bd679aeb7316288fb619ca6ec065074cdc0429596d8505a7c267895dab4 3045022012b5a9f8ec77b46a6bdae0244532b52927a3ebf2ff0a51468ae274716f2a4a18022100886f3ac72a27f1ee6cf0faecd5061abf89728c6ae8dd5f95e9de23f8b8bb9896 3044022050dc4e4c3db00ea1491f4e6c359a92edf0c399f7beb5d9fea8db5b1717ffbbd1022037ee9d8818b33e735d23a43c2c136f8dc7e5e388b09b29b0c7bf05453e8e91fb 3045022100f4c93c1af947d7d0ee7bb19d98358b7f2088ed61d0329d86d4db0e261302369402203f78ebc33e56c820247ae0b760d1477fd0e891b73de3ce2e92c2ab03b0a3f212 3045022100d59768993a34e5e6df780d7f99f3fa81411679d14a4afb2bc43f9e5dec01a0a70220052cf342040b7cd4b01a5f61726c623281bca8ee751b099656f8d59a0b9f36c1 30450220333e88309e7cb35f54cd337a9ae0e27b59e1ba436a2e8190847dd4ac3f24b8e70221008be65474875002c0bbdbfeeac293b14dbc556270e199380c0dbd6b5606db7637 3045022100fa5f35d0c3954f0886f031b6fbfe1ed5625ebac0049723c9fc43b7bd041ffb7702200d40e987ad592a76a475fbb2a868380bd41fea2e2b7255ec252d78506839b400 3045022100dafaa6d035f4b7779d6af0f84797ab30cd69030d2e98d96c89ebb6440becdd3e0220373c5aae55f89af654cd592566bff9da498eb826d7efdc5c2d1a14418026be8a 3046022100ae7b8fc13a5ac28d78bfdbde110213e2373ccb0c6c05b081551bb6cffc69df400221008a7e2cfbcf439ae94a5b2c2350541325537bcfd57d3ae85ce69018eb0cdafbb3 3045022100e82b8184d0ad0ec0c6c550231c9cd577732a9b54dc3c1d7c2c0b3e6cfe1e05d002204d8c2d73b8168fd1de4e0f993d5e45ade02656d40af9b9e32df59ee72e7c1a51 304502204d31fc3bb28dbb952271947818fd802196e10226cf59c0bdbb38fbd47b63041702210099611851906bcc0fa195d1f1e99ad474d91c53a8ffdba8031e818196442a29d4 3045022100cee19d00ded423be22d3cc3aa22977d93f6ed7b053b49e4ec359d1160ab75bbd0220031babadd0c76c2773d7d63926c70139e69f5de6019a5390ba7f09b16be3f40a 30450220638652131c0862ae21c81dc69fce74d0328ee012908c0552b6fe4798b4c46dfa022100adb6d6bf872a4ac609cae35072e4f9278fd807b984ab83ca1806d467d9e344df 3045022100ed4acf339e530cfea47e2d07fdd394b6a4a133136438aa474a54ebec6ee5a04a022050c574386f800b9b68ea85ebfbe5a6b8a4807b0a3a03d7c483418d1dce8a3a35 304502210093a77527e530cd98770d270dec6b20cbd83fced751599888aad7766d448ec5da022036c1b9519849d425aadf511dd7aa5f272dea97919a66bc6de36a0feee98fc0d4 3044022013714559fc40b91c2bdb451d22b72d2fd8e327aa2c55cf8b320402b5198caa780220602b221c9ab0efc3377d1de831c14a5a2eadd9d269102e8d357890f55f6cf0e3 3045022100c2ab9128763ca66b9547e5f00af517e1a7add19958b426ff1f2adf1a8d7b0a8d0220788937e3147612e241d989acf5f735c3352b72c28c1e789d0a16eab76c82535c 304402200e3f4583eceae86decd939fb6b7b11a757d4b1f2e3614183fde8f61d10541dc802205faa4baa32f90da1423a9c6c39ee64c7a707a64f9e374d9b24002f86cc1be0b6 3045022100cd84ac283ad2267ff69689548e84b61ecb14a1b37a14938c8543c9f7ebb00c0002207bfd5d7ffe291dd9bc96c8462d5b7998dd55233a8fb4fbbef4419fa10d6c79e9 30460221009830519097bc59638878a8855058013357c92449bcc78a97e2cd5b6363e33ac0022100b7524bdf9d4ffba50bc8329ab896d1b8b1daf3541749a4f57bdb6dc875a66e94 3045022100bbfb7ee231f3aefae9f7d6669994c47452b3f567f206e383aab4ae95871c4d6d0220163cb488abf8c6bb3c4add40168961d69cb30a81f24d0566d6dc2d3a498ad45c 3045022100c820dec2bb6f82ff4993655012a71849c2e9f436f459b928143c452e3e6a0ec902204fd676cd276a47123d162afced3b14eeb38c97804bcdbbda4dfcedb9c21e7274 3045022100fdce549d2c467f317eb441bcd97ddd8d42578775b3021f6e0a456683921b2f4f02201cf1bd23e7faca702b06afc32eb98474d19bb981a5be497800737f0127107a82 30450220722eb148c8030e6cf46a858df5faa1df9dac134601e0911ca88f62fa6f69079f022100c2baadbc19338ede3ca3e5ed635afcb462ab6c5f150338e960315b3b498c2501 3045022071d3f5af558c492a507ed933b9474d8502192e526775fb4950044358ba7bcfb1022100af6b3d65f26e0cc1e8fc70b355691f651d7d9109df628a3ff6a1c26abe7031f5 3045022100e97455b5158ba7dd196ca521e39ff7b176cde4e129be8aede6da237d461057430220670c41476fad2badbec969002807784162d3e7b10a3b05bf9cba8e8878236bf2 3046022100fd6aed217b5d14fa411fe723ae7dd9cdb3ad8653cfc3fc0b38a193fb646e106b022100a929274a83f56e1bf08345ed7748b135d69df1819f5bc408554d868cffcb7c25 3045022100ec99b0bf27789aa3de4a3252def2ff69975045f0acd835bd1443e02b55a01a0e022002caba16482382ee92a35266494db18903b8e8778a254d7838dcb3fa0977ac72 3044022061b9846f3027db1b6ebd886c790b7e783472adb4c1f7e7b6bf545a451a8c56320220606981cc9213450984e92568422b329b18a6c77bed334b99879f0d3c8488a0aa 3046022100f9b0a6f34819d9f6da96a76a507e43c49cd377c56b588419b6eff47aab3e39ff022100ead06e033d71aabab4fd27e866e658ccdcbf48efb242424d479e976c780fd55f 30460221009c131c706fa9f4814e94c90649c66020515ea19018daa50413a3ab6d52f672d402210092688ceb0792cf677e85b9882456345a228d53411f7ea3f624a52e55289f824a 304402200d1bf909b2506480942b50e7c72d7722472e839e9bcbdf38b82b32a1206b6d90022071f0cec5a5c385c7e152e66df2799623b01ac4554cd602510bc31650a640edf4 304402207b92f4600b681a0cc34efe7e67222bb10f736ad87c017b64ab7e769f22155dc30220470409092d0d695fa4a532948095d674d7485c98dc782dd7ae58360837950e71 3046022100d49cd3c6b654475a4f9275c4e23901e8d78d6dd2a630200e958e5e2e34796ce3022100bed03e6952362a2458949200364018a19c7d68154295630715aa846100be0063 30450221008413cd56ef0fcad7a3b94fd66db0c478ea8221e46831dd4b965da9f3d2c3af90022045af63f76488797fca7d3efe883b8699f8e4fe999795e918b18df5d4a481adac 30450220405c4ed6ff27e7ec815854f48ec6c6550507f300062cd8fcb107f8d6593f34ec022100d9ebe60aa17d268fa0bfd941967bda1a6d05e22f051db3c1b93ceb0087160a71 3045022024bc1d4b0a74fc3aca6f8c788b1895e9d4b3925560d5bae1f6e840b484911d0802210080bb8448722467fb5235e4075cf1ff619739aa590592b47233ad784d784d103c 3046022100898c5f7eac783a70c619d566ddc91f09cf1894769bc4af16eae0c53cae27dc400221008e543c5528512f3a74f42ea8fdd4df5a4f1ae1b20a75f61d88542b244a474614 304502204cf0d702d953dd6c851cb55a337752cf3888bb57e1302130ec48742af5950584022100f6694124b5ae2403e7c27d767a44e7713680f6b84c6916618de5284ad6f38283 3044022100f5acaabfe7178249da48f45e5dd1c2c5ec8b1a0453342e3c5424d5452de44244021f2f4f2e5a1a9d181530126389f876cb7e1663ccb114f3166cd015b210a24825 30450220548828b2627d79df7e5baa30d5608faad67213db379a5f68277358fd29ffd70b02210099dda69955d7a6eabec610e2056d353f9eecf8362db71b1d0f476f36f66f4995 304402207aa2aef939b6e527a75e19ec38af9c9ccd58a10127c00e9640a594b6222a8543022024e703c589104262ec7e21388488daa94b8cf4b50cc75b91e55fa0dffd1e0095 3045022100b1c362761f576741b5dc6f7f0592320ea38a0eebb3650fbfeb9bc7b1f5d44be9022013d90ced9dbc9d14d2257c6960f640ce2962e7285607eef082b458a4293eddf5 3046022100bacc8511604f5db13470df518fd31bdbec67448e6bac1d4a2ed3de8e77195ba4022100d82c659df04f15dbb185eff06bf24b547c612e7f1ee7105acb9ba10c5b8e8a57 3045022100edb82e359d56b674ad5602e745def5426286845b03e2909b53cedb9215a9436a02201605c2628dbdbff052a5eaa181112b8e6a2de8cfebc2a9e311d4af4f1422557e 3044022063b8b9cd9f6be4704a65afd2a43b4e4dc418852330abbda1be02e549dd74883f0220386eb1fcf9082150d619118c5900d7ffa72de3458f4f0e63a0da8904f2d91504 304402205bb359ffef94330ffad8a1d1e6bbda646e33e2b4742c30a0a6161c9a452fa3da02206e0857ac49b97457e049603185a9abe5ae37d2a95c9c2411f9531f8c9f19d8cd 30450221009c258ee9e11c03fb15de306321531f6b8f44ed893341f453c7d7d79a8344b8f00220028fb0d2e35235014e07165801826b14543a9270bacb2640a69de2e55131572e 3045022100a1bb4c6a054d1ee9ab49fd272f290084bd7dec0af8d94f539866b99539bc8a3d02200904c9f408d751bddc91854028ad95fd91b74d95530301137b31365e67274cb8 30450220163e6b1fbe83486721b3e91a4b92b5968bed5ecc82fdd59293e1f52492bc1d8b022100d02bb66cfbbe2d1d264720510ee285bdd70b01cc08697cf668a4324d838e1bdf 3045022027d301b0cec498b7d64f7915d2c89e11918d38b0f23d7dbfcff0dc1cdb0cf196022100f79b55f762f7c397894b6448e9c7c597763d18e8409d5a00cbaf44ffacaa9911 304602210082ae0794cc9be5cff85790d9de8d89c1742f918d26d4e01416a9084167439fbe022100e7fd9be4c5b5ac3914e9fdd12108e0695d174515bd76fbfcff70b7f8e4c12b8b 30440220620d09f08a6abd9929573ef73cf285ffced08c49c0da61763eed963bae61962902206c58caf9abf2408178a4420b15aec2b3f3294d07f542a9d144a23efa3e9d6b71 304402200cb66adc54126d84d40a2949c2c8ba68bd7223f58846fc59f5e87470fdec7d86022031bc498e09ad2eae9a80f2b333e58922ea8130886ab0697b116c9d88fbb436ed 3046022100f906c2a4c35910a5461ff9386a942e364a678d6d4fc13bce370d5ecd300f1b630221008cdd03cc977b63c03c45e4148ab793f90263014e89de0c22d12d10ce1e7b74d2 30450221008c237eab74c389a51d247f6623f2f4e47b4965a24148ab79715f53fcab4d1aa8022026de819cca0f12d6bcb7bc12a22a7412378d52b388a5c6967f84f5a0266a9aa4 3045022100fafd21ff9c3911357e9e7800b5b15880f3c00d690ca01762013980bbdac7127e02201b707747edd52b9caccdf67306ef885c8e6ab6bab616254dad41b0f54662b16b 3046022100bd63744e6cb5df105674b2deb87503ff7c01ef13c323671a8483cd5ad17d4b7402210091c51595821a05c21ca2d57d210ad8516db890271032e5f86322ef7ac2960dc0 3045022100df8e8886941d363fa07a2c4f6df9c47f9e51fd9e1f931ae5b9dc0b85c106fbe00220746008a9c94dc7ce3b55d94b0dc3f016d179e6cd89f9127aad903aa1d604d839 3045022100a7a05426cf45f292656f10544e179110dc92d4b5237117db05b9a16b0840f7cf022065d0bd28473deaab6fb1c01b14b411466de2586d927a13761ba1d2a6a40a16f1 3045022074c3aca6a4762f4ed7aef4f497d01627e6261a45b9e14b5a8761e0bd209c970c02210083d254a12ea9955e3f85a82dc590894f101eb4653ac81482270bae13eda5887a 30450221009013fba12e0316b2459e7308a56de8c979b542e654e8e7a6906a9f625363d635022033daf19e5db12c057372b59215ad8e5b7538652358a81c740e84195ccb8a2555 30450221008f631f9ec9b9e0fce3bd5fe547b82d2919a4dc70af69dd37949b8e75a57d33640220259775abd31ce915ae34f891c859ac5f8293af3e37cb4b4426508ed89955fbd7 3044022018be548d466dda565f277a0d0b53cdc3a5b87dea48e89bbe40fe4898d15bd528022011a9962af8abd971faa3f3f4f16f34bd112b931d83e77999b641ea7d9b79a6e1 304402207c970d933aa65b3807f8947df977efe295b7f5601e35ede5552349a5e416f57102203a89344717b7b5c04e3a9494c452632199b4b7f1a8acd9bc4bbf3655fe40e719 304502202101c201df44f7f79b90a767a22f8ca7cbca785d5e30a31e525e8ce9a6eab901022100905d915da02830053e984aed0d45c7c876a4b9844dffe2e4cbd0b9462b798dd6 30440220350379ab953d6c4ee0adea26afbd4986dc7c530ca77e3a5ab8ed069e275c6b5b022006f191321d8cb056d392de6daa19fe04d444caa34e88393a34cf9e171327c202 3045022031c8773b9477f1bd874df80fd465ea692648a378420a119848831b3b84a1557f022100b10f7c0b49b547520ae0e8a7ebb757adc1d83d76b01baed9a9c3ca66b7af2d09 3046022100965abe44030cba6d7f912ae706c6f22492ce8ad780eb4bc73f95f09ba4b24d42022100c81623d0f709c8414efd3fa52d5f9360944c90651e939e3c5985087eff712f79 3045022100ba2a3eb77ba1a43514e288e001b831447e3ec7b744259474a256a63ce4e510c6022072664b5de7de1ccdb8e50581b9323f3d8c3eb83de566e6250d5c846f12226393 304402200cbf722de02ee40157faa68ca3bc0e8f3e4223e297808f950cdb871296e8f6f60220152a0dd24956b8a3c18141d6d81b5737b7b4111b9554b039097286ae80e1b4e1 3044022028df16bfcc07be7be02b8de0fe0648c8d2a3a45394a8e3c10a3a06206893b488022059cbe44f14ed2b218660ebec5f2839cdfcfdaa6ab919f6b81a30aea434b0701f 3044022054443437c5f76ba1d7e3effb809469f192f9664e47051e92d6827b9073e8ff14022035196ea77bea99502d4ca90bee7d99b1fc004504959850c8c2b5b028c376a4dc 30450220752c7adb4b84ac122d36946bf87f58a57d69b69d9347b5f2c9e2f255c6db8e58022100f82183e9a20d0438976db64ab303fd982f482c4f31496beb862b586d3ec94ad5 304402201771c71383932f9be1bbbfccf7e61151bd34908d1485237439ace12086b4cbaa022059980905f778006dfa6128d41fce7603b692b5584fdac715d97e9ecfdb7ee99f 3045022010021e36c71bd9c5f5c7a97d923e05601d60a8c438c87ce0ed6b834bbde82fe2022100e68de65252e876fa2eed6f3acc35d7646946f348c9793d1a874f3d7841d0789e 3046022100eaed2dbfe279943e56436736afc4b93be68022b020a54ea8b61fe475552235f3022100d64219a40da766bb0973fdbc6e1b31b9506a8e320177fcdaadcb9756a1641e9b 304602210099d94f59bb89e08c93d2313518535ab6d7a0431fab2da93264f39a8f5ec967840221008bf09ddae039065e6a548e31fc43ce16496467e76e29269e9f5cf9a37d68a874 3044022025228a6e482dc5dfa07ce951a4b180bd1b9a6e396c1087779687cf1b060ac40b0220782a4429f9144a9e330a5a9b5d8f0db13fda8cac90bf44ebb4490618860c805c 3045022100a9c6f3cc34bd9f4f06e393840dc0aa7c3f279b09822279c859e7121d64ecb6e002202e0f6821d1177ef91ccf9f27a30f3be27c40e440b0edd1fc1b4092edb4df59d5 30460221009807c251ef9496eab10c5276f61dd7116a8bbffacd465d016645a2dc5b6a3952022100b517b680aa5db41e00464c7c447378b74bcd906b38b08416db63a48dd151614e 3046022100989a54bf060b283f572f17448efe7ddbc2cd376c51c556005b4cd2c184030379022100f02ed6ea0e5ef09991cbafee13155dbeb83f13bf067f486b7f673fb2f2adec75 3045022040b528047d82b8586fb58df0e50c0c4e1890dca33051ad96aeefcd5b3da082d602210087e1d3c2deef3175155f74bdfb9667c77c3b89b878fe4c77d6d8ef15c04f1d63 3046022100e226c9699ac400569787834dc87a82edb2a4750c77ec4ffb33ccd35e34457df5022100c40e95177a999991e57ec05ff868a195af881da92352cf9ad03b29a98fc0db07 30460221009817c5d8ff0265ce33c5f425c34467f3ff5514586c8bcba0867a5c509853163a022100f086f1d99c5377b04e2f58702bc97a66f596ae07065507381cfce80ba049b0a8 3045022077cdc867d3dd1844fcf4793dcafd194cb210a7129437060618363d82f7a3ad7e022100b3467350cfb3d633286fcbb2a515d4924c535631e6379ab4db8bfede85feb8ba 304402206f618b67c99488d5816270ec05e7971222829ecdda0a209a6d0c8a8f91e04a4402204f43c4f18a1e2faf9c22e04fecddd4d6ecde2804817fd6824bd9d0d859547bab 304502204076195e266f45ae45123944bc0a7cc278ca2d10f4a46e8d4715bd76132472ed022100b3738912a1fddd941400fce1d7954f0d4a5dbaaddcd00e2245a8f38412545e43 304602210094daaf4d5e04653f357a138555ed5cee24c9ea326c918bbd39ac31c7bbb1a2ce022100cf2277f34800634f94c8441647712d59fff3d2cc36503e991725cec062cc364a 3045022019feac9f678642117c2340d01076f973e8a147d66ee3f8f4daded419f77c5645022100de13d3385134434812a623420041c904e265d6088b24ab82d4ec95144e8f50ad 3046022100fe3f7b11fbee9cb7b1f2f37f4a72710771a226ee812f03b15a31d6bc4512e1f1022100e3a6767adc6d0cb4e634261b7da562b6d04ea8511befedda64a25c4c68d0e1aa 304502207aa8f8b055f865923f9db7a1e957d316ebc4533195800457537cdc96b6498d76022100f4890b8804a2269a4eba76e8ef12c96033fe828d7d6205a244db313f787f1be6 30450220449d26c63571a33593b864e4ed15930e78b89e2c78e7019a2959f7baf350868e0221009b3096db71119f79755f3cd5b4f0565f134b4df64b3cb18e5e9841e6703f68dc 3045022100b74f37c2d3d6c4347e74efca4e3cdf0b3a5f093b005afb9e317b4476215dd5e502204a2fc3d635e831552d64b9073f54055472c07fa90899aaf8aa420a3983e2bc15 3046022100cedf643629a2fc243a66585aeb1c066896a42eb25907e3ab20f56e794522687d022100aad6eb296b3936ca67da583cf0c06b09063ae37098df6d396a65ed6aab7c2a76 3044022064010afe66137ab108a4c6e96820b65168081fbc8f0b2eb050d5a6a2dd6a2da802202e5c774bf55b632b18c71fc165398adab0e580e5873c548c21e90fbd67ed3cbc 3045022100cdbc75e7c95a726d8668a129ed42bb0f4ca4d90b1ebec17f3c01656cbf9bb4eb022069d3fc6236bf38ecebe4f93c796273aac6a2a32494db8318773fd6e546ea40e5 304402204afe9d91a1ca07fc81dbfdb59c94261ec715f5151996200401604efe7a3126d5022001220907d5a355025031e2569d2ce4985f6f9604d68037229d6374ee9ddaba9c 30450220155b25cea456b073c6b040756327327f39e7af813b1382a9d43ee1d6533d0475022100a581451d6e624fb6853a7e975740f181c30714cabb1f598647994f9fe975abf0 3046022100fcff081abb1a5d02606bb9a83adf1f9975116ea7abf6c0436e087bfc660933c1022100c4c165b52045096c8b129f8aa826fd2d3fa2ea1247ea2eab9f8c904bddda86c2 3046022100953c564e3f2e4ebce42834009bb6270e4640cd9e340e35bc28279fa85f23385502210093fd9e1158acc9599179d6b2ad402f614b05d86729cb97835df1e4aedbd9938b 3045022100987f8ca00f28a0f068a67d9caecca1190488260eafac2ff58d3eeac7a93496d6022024d381e19ab3417ff8c708d8033ae2f4838ecda83d079ca587847b7de481084c 3046022100e61f35ab1b9cb3c76c68d5fcc83cdceee995457df22bdefbc5ae08f6f990ff21022100a0e629862986ac04b2e21f409f2190e193537054683ff08fcc9d912d54aa2234 304402205ed0758addea422b83ba5e2727d3e372c4d979537c01d8f5e07907dfaced0adb02202c176abe3ce3a57ce4875c37aeea3b628d3debcb4f5cbe39dfe3269592f81785 3046022100dd9c19fd03a5b45cb19081af62846b8d4873095da494e4728fd88a189c9ed7c9022100caa6db7897988adba9bd757080d8b5183ca0077984a4ef8647e12aaa7e853961 30450220426549b82ec49098c4e90d3b14cdc5afd490b9598c7d5a13fccd275bee31888f022100c5618b09d045154c21c22868b787bdcf1a75ed6198b9477bab485607255b372a 3046022100b92d9c50ef1f46d710cad8a2e01795de364736d2a9f52c4219d0450ad3aaaa87022100b56d78a05e42c0b4b482ac62e457d0bc86764194c863bcac1e88411c9942929a 304502206ec76f6c2818107df710168a73c26cfd69157cde8c80fa8877a1b65aa737b66b022100bb2346544b68ebd3b9a182b554e55227410c6eedf0ab11453d2faac667b7474e 30450220570aff54b7a9f5b0c56fbd5a291db2b27656c890893ad3ed087da7e06834e95f022100e4258a435c94fc2499d6d0514572e9bc7299c519a8938829346444fffa63cbf1 3046022100c0fb45326aa9dd29e38a057a9193b5169204fb0b8a60d1b0a24b49dc9716329d0221008a333d4cfd14a0ba1ee0600c1f984639d483e980300974897804ea054c052323 3045022100aa1b263c207133875b7c4324a706df84abe92c20a51aa33277256a05cbd591fb022059c2ee8e702bafa4a45ceaf4abf0d4f281716579a20d513ad7fb724fdea19e33 304402204a6d4cfbf05c764d0c2b42735c1fba45a942771a761612318e178051e1b1f36c02206cf75ac0e7e32ab34a1ef38e1249617fadd554a01c2fc07f69f0fd6d04276890 3045022100a102a5f9b09ceb86bb7053f2529b6bae8a0cf58c49ebc139174dd307cc05076c0220378910ade687364eb71a1f86f73d6686c94aadc159f1e410dcea21f8746fae68 304402204af4ed0deff0fe35b9e5073468d150acb9ccc1884a42e738c103efe12d9fbb65022033ca88ba58088dd7b9a63299295c77033082ce63392788041863e5f9c51023b3 3044022039f7c424fe1f186e35c921d731cc2d07b727e64520f3a0ae4d662bbfa3faffbe022026d0adbce52da6aaeec9deb69be41c63f7ac00a150193ced70d804eaf5dc6ead 304502203aeba318dc203c16510c314a224cb282eb862aec8ae0a742820a7e641b8819f9022100cac23b47c6836d0e030f1e140ecc1d971aaf3b4d6289ef0c039a319c35becda8 3044022060962a80a4fb4be61cdb132a7a6a342f638bc447cf841d4cdae3a48e6cb46e9402203594c25225e675de2fdcdc0218c5676426b4f808d448cedd51ddaf9f785ee13d 304502201541f5b59234ab317b47d1de7332acc81da3064deac5a4edf6957c22552c94ad022100974f49ae9f18dd6a3c6c457b7874d1353cdc6123081cdce5e50ae2682b949569 3045022100955f1ac8e8dbd6d6ad51c9b22f7b2029c7100c4853caf7b65a433bddde7da78502203a97bba48592a5f86b0a57a4367fb485eaf114c451d714c1e9f3121a8d9f29a8 3045022100ed2d286e80a5b581d6816750502bea599d262041a6c7e26a68da06a8529f3c0a022039557ce633ca8589f4c7d4a6b10885103079cd7c19dd99006d9edaf0da372e13 304402202f3a85a43b818d91f0e49ed339fbda11d84577c81d3d7e6e80b8c45144d8ea790220010545aff8a3830673756775bd9caeb4735fff6d75100c7c1b6ce3629b024b0e 304602210092259f48ffd017c902f83404ec077a2015026a2c317ea07a80b43885e09a2cc7022100e871e91115570262351b01cce084cf9556df1e5073ff810d0537bb672968f474 3045022100976a4dd00524d95878d29a1d5aab324f199e612ba23b01bd995e46254eb57b9c022047ee7aa89a406ba313c17f6cededb9cf1855e59c1d1f08d0c65eaaa188c6f500 304402200af6f3c1da5cb6cdb61de3421d72f304a03358ad3ae0fb22e84f5a7d710ef4660220105b215017d85995739d104bef25f851bc97ccecb9deff793beb36acccf48cf4 3045022100d2ed8d05a08124caa879e3023c052a70a39a4e052e9170c680dd7b1488f5f4d802207fae3dc79fa54ef1502f2057bf97be714b14e8740b977edba7fb65e811bba55d 304402206a31db7fed01fe37fefd97a2989a728b4df057dee91b614cc93f81d774e89e36022070947f13f4a14022cf4f7f4babbe90c1dcd6c2e44f7133df0f2160d3b8c98fa2 3046022100d785c4b2e79f9c7078f547819d885cff4d4c1823fa8e62fe01588b9cecc19c610221009b13a1f1bb44dbb4f4f8aeaf253863de1ebbf5783b488e7dc089916feae53bf5 3045022100b4c431634d2f15cfdb5b52cb7ec4b3ce27bbc9bdd3e41f81d022727a27d704e30220389585017136bfeaa6fcfaa1cc140fb41d89752f85df4b0f6f0227e740d5a610 3045022048d686bfb60a84c60d5b5685347c5f3e984808f2dc7fe57fa32bb9d7c3b54512022100e0deb8cdf849bb09f231f7fa1123764d6a0e35823afa73fbb7c85761dc4a6d90 304502207efb89d87c8f04c77ddf01f523ff406c32fd81bbc00f1a82262b3868a0878912022100fd66f1e7295f4132dbd72b62dbb220a98b1d265001496682d9c827a9d1ba6008 304402206d76d4a0ab06c0b7a222cc4409349eeb88cd2fd9d313dcf09cadf7f71146199602205bec180bc9ef1ed6a37517b811fc08f8cd4e06a24c11d22f2a55e3be71b0d288 3046022100af54860f080ce9afda043514b305595be8c7f85efac48af7035021ff5edb9bb2022100860b74fed22a38f34074db6174a988b0462cd9808bd8cc1d66bf9d0f25d7a20c 304402204ef6b762a20570b20e25694f41ffa75a236c26cfa9a341c93fa0ebafb207a26a02205ec535c3a35dac590461de22762afefbb906adf51ab8d2db6be239f30303ad3d 304502203d18269c5b3502ad62a350fb1d2de1668abcf39351dbeff740d94829541ab584022100bd9888bd01787557659ef0987d15a7683369af11b6d58d4dc26b57f5a05ac305 30440220630ce121706ef6993baf13793d5d38cba91a530ad2a4024e02caf53c7531b91f022058d297b78e1f6b7afa55f4ee557959362d52bcdcffbd9e55732b24f348dccf9f 3045022020b3dabf26c25793f31cd34a5e767366aaee07b1fda1b9973eeba3ec22ae1f74022100a52c83427efe6950d1a5c8996a50a3b4350c95c55b9b6a3305f5742f808af0df 304402203f86778f906c42d5cb6332108d820f5d27b417f0d4ac2d75cc6d3e15b4527e1b022066fdfe339214786bb24f331f529d696cd11373ca5f2b4b216d9851af5a026dbc 304402203745d516029771d48cf229f3f79e6080061b39decf273af090ba1d64c37a19bf022006b74901386d0ef69cbb82f5c595c433e4a0e759784c4bab8c94d3f850b5e55a 3046022100be6b8d3b3ac56de9ec4ca1570a24560df4f8def62a5ff77c3f8270a046518e740221009f811f6e326d0bccd32558d8cd79b24d7dbd7fe6a477e3241049ba29517b6aca 3044022010cfa17e31e9fcd3e35ffaa563683e86bf84f3fa98a4777aa0daefa967f52f4b022049f2dc23bbabbca8140497852c068bc25db474fcd37a4958d871f599859c263e 3045022100cf1cf9638754ed1842be20a1942cc11e9d2e2c73dc74f608901fb939a6fde6750220349b9e33e5c840703c4c9c41d9bd419455641b09390d24f2b1bad4e24c11dbcf 3046022100cf590d83de01051e39a7b0cac490c830fac78718d852f05142a71109924bbc01022100cfb5523ab4679932f024ff8b7edbaa75249e3954dced9985e1caf6b71eb20b81 3044022013fb1d6951a13130369b5d15575333d3a3a351e86c1f1c4d8485b5c3232807cb0220409b6f1a1bc0901e88bbaf48181adb071abbccf9a1af9b644d2b7e7fcce5572d 3046022100f2171adb9deadd816e9f8218517da57337e108fb08414d53bf1013efe24f2f17022100f388e196ca1243a5b1f9620c0dad50ae69e482ef3fb8a42599d35508f9163cb4 3045022100a8458418ae0220d9ad4ca86dda995e711c3844b03fadb3655831aa937c175f21022033b6200199181e71b8d421742181dfdc6b64f2542c44fa0d81e3c240d29f40f9 304602210092bccf9c2073a671a0622231638632275debe426a310865580a99784d8381a40022100828f195fb7ae7f52d2f62e561cfd578b0d2b672606f339932b59f7ad46c69153 30450220218fb884ebfa2a4777a8ae6d35869eb7b90a3dc8a28081b6833a206a081822b30221009e269322eb80e83f301def1ff956e9c216243381ebf7c641d81ed588f8555913 3045022003c651ecb447aa2fe5d18bed30413b6e61c63aacf05ff2ceeed7ea1bf6552781022100af51db15aebd35e510a17ecdec5301d46059e64491682bc7ca3ba348c4c13c15 3045022100ac3cc55fc2cc47be2f8afc40d7ae2cb16b79f7a9ce1416a7595ce61507e1a29502204acb3b1565ac2f5711d6461812165bc666e0462aa8f95821a651b98403075d2a 3045022072ecefbbf7d5b1724f009b460e9a35d3ae2f075ee1cf149d095f9dbb49395201022100d7ddb5a579aa83c06fa0510ec1740bdcdf1fbc811037abebb5d4de2ea30ef8a9 304502210084006e78e7546e5700e1debeab7e6d9105bf85ebedbd1fb3840a30b173ebf68d02204b41921b5fa909cd481327fd7f0fcda7090dba14d29fe1e5c2dcfdc6d2edc243 3045022100db44a635bbf4a7240d096dbb3d54683e880657b9142d4df7a457afe6b284a11e022076eaa411b0715b3210bea830ea811e8a5a74f6177f15022022f2cf3cf4b55b4e 304502205e14b961c04167ce3f194cf3f7e162346a1479da8ea8a4685ae07e4196afdfb6022100bb6befc7e374516479468d3ad08e15457b11839f865b379cf704d528726d7b34 3045022010cfbfafa5b6c5e630d42e4e1b780c671d68073eff96d0085c9f2df88259bdd5022100c28f1c9ac84206c5e3347376b315020a8cb3485c6eb826b6092a889a5d3df035 304602210090d1807eeef6d140783f1d28da1e58483b68a1f0d808d70571474c727ed0fa6f022100dd024e1fc0373854426eda040a1be6725a80ece6628d258005d1f0b594a3997a 30450220124dadb9601457c5c496b6aba9a55efa3b1e7afdcc52db0da26c04457abdb673022100f2f33962274f27f67b454175c4b15fd5cd34874bbaa21b8519ce44cb76f4ec9d 304502206b72a07a704304fb949ce1bd9f3d373849c58aa23e517f2f3d8edcca2c5bd5b90221008a5d342ce82d473971f1a23d9340b0930a3f65cac09ecb31c52636b17df983f5 3045022100f1348f8606ffd603a330f10fe659f432c8c74c64c3fd741903461fd13db2ff8f0220511cf063b9f557006cf1a88fbbc2e9cde11085b9d74c5095d36c0d8d69adac1a 3045022100c5959eae32f404b77f279d64aa03d3df8875155d35b54f5fc1f2724a3395f40802207743f0aa2624dd4a1c204d380734577ef6fa504aa6432f810df07adfbe0b88e6 304402205ff9f52c427c0407fe5cda4ce3703e407137a8773b39d80f829df9c4794e0704022036d7d03e8f1225cdcd10d120d1d594559dfcf1c6b0be93b38d4e008c0ba7fd1a 30450221008dea459af33af944b7890b7fb0a21f9ada7828cfcd1430f093325275ef95490002207f31576ec94238cf5382bddefcad5174e2a27af454103c148f0e60f5de6b9694 304502202324896eb07d5f57f4a30e0639b74d80e886a3f5dca74e1631a79024df989d14022100951cffce31974ee11b79ebd802bf7e87dea4a80fe7781753fe706377ea9aaab1 304402205085b3ad3e410c2ccb20d493bffbdb0996c95a9b7f592b3065a736fe05c8b0da022037fb58e7d2346b9bcb6037e8bf86128a84fe5fe57bf95a8511099cc9aa98f9c2 3046022100d9e37bc642b7f3792d76d9e68f1c4c8594ca16721ecb1b4e0fd8b72e1f68293e0221008d9126d5af4a45cb9d8770a9e385cd7ba9e0babec1240727d3f7bd62c71f8fec 304402203b858d80898d5805ee5c34cfa4b3194cc3c861f221aaed09caf7fa3233f37f4602205cb125f60c1ff0b8801f6054bb6c392bb04a3283bad968d1f2c1c909f7f3950a 3045022022d447a8598398cf1c2da21fbcd1a3ceb285919391f9c601bac2c665cf849d83022100c958d4dd74a770501c5516f051d30f4dc6c02e965b60c944f6f1ea3c888b6544 3045022100d7c8a6ec04c0ded57a6001c212ce90030c651023b3e437ccd467bfc88301b31102201c11fe413122520a2c01deb9fc5ec2f60f8387fa412176c51fd1245090269fd0 3046022100c188fbdde9d104e06c10331bfd7fbd010740d3b00a5cf1fd7f8d1b359b6824dc022100b518924c7fd1b630ea7597e0c845567747ef1874bb0e3ec01436b442c3e79d38 3046022100a9e99c71a5fbc4af52ec86c8120f3771461eeef5fa6d683609e3428625eae9ad0221009db1eca6031ba3f4306db3d326974db0e8e32b4174f060c9e02ae7d4c0202f52 304402205830bb90567fb4e4d6c8327c6a12a5b380c955ac038fcfc3a9dbd463707107a502206ff39badc1b48b3ad39ba9038ed0d9fde123f2f153580e1d882cefcae7eb9c43 3045022100b2d7e58a99715b836d4c96314421c30f98b52dcac832cd2b462dc5caa4546b38022020fe9153d6a9d1f0893a2cc4345656e628a83f4b5ad7220cebf7088e62927c1b 304502201f86c49741f20a2476daabc9861d65729b5aa588a22e7c166fd8e73b4148005b02210089c9c3ec492383a480041402afd6f6fe2e25ab16620de1b8392a8b8a836110b6 3046022100cbfa536040f6fc4a34a1991b9f590de0d89c9ffbafa65bec49ba05b1b49412b4022100c24ffdb48eef216942902adb245fec96adc2a3515d2f865b9c079ea86d114af9 3046022100cc822033c5ad6109f14a44b3db51e13c5f59a42ca6c69aca1653fe912765c86002210088fc5257839f533205554059d0ab062cebf25e27e195a4cbf15d210cde13596b 3046022100d25667523b1b67e2275c49ae6577da09b4b1689268bf76626e44b00d1419f859022100fc2c289a3b459281441fc3bd51fc4d677cf41c84e926a3ee62951461514556a7 3046022100804b1f12d78cc8e9106b5d8f7a3185ba7d61590b0bbefb24f99f42ddfac8b738022100f3a69a8e6e471e7e4463852a9983ab9b2a44058aad23dac9256c86d7263ee17d 3045022052b617313c6b3ccab43fabf0e7fd2d2081702c8461236aaad41320307bc048c60221008081f9c832007f74046c399a58e979a81df8c2d8f48a67090abaffd7c8a85307 3045022100cb165731828515e597687a447f04da6bbcc202bba6126c200d177091bcc37a5d022033b9d73fa154609b3fb254cabea86af6ae6ffb9219c4c2548f05f378625975c0 304502207d0a8ef6fad9cd5eabbc4292cf0268e6ffe681550ebd766596f1959ee6487108022100a507954a179250ca20b77521510e27166e4bd93e948b308098ab2804d753d7e2 3046022100bbb6d8dbe37d8c7381233b1590525832c2120a8a945d44c51ed27dedfdcb096c022100f701fdb6e371ee2044ce3135b52824d63dc9df8c9c42dd8bb9c16834449da7d8 3045022063068d3184c975be3a5a2ae5d98a8ec8c64f97e1d62b15ed728bed17f7417669022100fb9fbfa21cfb06623900a3b10e56bd69f0a25aa63ce764acc5fb163ea4a64aee 3046022100b612505335680f32643e24bc5dddd8ee88fb9bc6f608ede65f94695b5fe25dca0221009874e2d2cc8cacbe7c81f6c49d3f515863812c1890f4cb6655a3803f14bddccf 3045022100c20b7e561d9672b105d14c662a990a9923dd3d0f6b4f02b54f6a625b6b1c940802204c14e9ec550217041123b2fd20d155e815769696d03ad52ff9374b2ebd4cfb75 304502204a836b45dfec0ba38443cc825998aa9fef5d326bbca4d64356070ea8752720e5022100f2838b905a79ef7ef1c1387d5a32b99e3b50d6d697581ac8f47da89387225023 30440220779e3fda94e50e7b95234bbcc91a1d69ba74ee4dcd6e01ef971ddac77660093d022014cdef6d879c7c9e0273e68cb727c819580a586cdab77961ec5416ddf82f373d 3044022001c46fdaf1add387b92d381068dfbe86abe11510b0b4940310b12c2ef1c5ddf402202d7e09372a0dd0f8d8911faff0e37044efacbd168465aefe90e0a069678b9ec6 3046022100dcf08c48b95abbb39c852ec5702de62ece959b7e1e1c9661f1127849b6bf4068022100cd02127fd48e7e1b8577d1d9ec617a4fc5628c9ab785a16ee142091a8245a4a3 30440220562a3edafbc99c1adf2cdbdf85b3afbd173df07c431b1eaa3d2df0cdc93d5efa022067b65d2d21199c41933ce381da9979265f4ecb8c65fbfcb1221c7090c9aab39c 3045022100c234b3c0e4cda3f3bd4731e5e15a68c599f859e2385f877a62a15fbed48858b802206105ec89b461fa7acf0719bfd58329fff57bd9f7db05ad71a83899a4e39fbfb2 3044022004d80ac6384464d021df8056c0b935b39d451ee59f051c0194afd75ebd1100390220054c13be3d520076c74d82b34bb1752b971967198d4166bec808b3e3308bf479 3046022100b6d02348aa4f97688a3fe61ae7a2966fd5203cfa4b21fe1c9c2590206b504d4f022100f502211a76e4c4d6c199a6efdb1eb582ce805da9033c34da95a5b684ff39a949 30450221008473d2c0697ae68c06c63ba747d7448fca6460a0f63ccdb68e4f063f4cba2a3f022058f3cc99ca3a14d415abfbc74a20e2f7184b91307cb9f0263208f4fa59cfa51b 3045022100dd6e0536500136e293b9bffe7be70c8f91bdd3b5bca2df7533242c05b1c405cd022042a35397f2657a2317daff2bbd48ed95ed53d8212eaa30c01478e87d9a282968 3046022100821a124df65ae73bc18529a6bf946c19fc33995fcf37288d810b4a5a96b9297e0221008a60b659754e7d7a0fc5829cd978a8cc7052f4bd4d76a05ff5a895ebd8dfb335 3045022100b81b7cf6d6331cd6e3710b7ae63a44ba42488558728cc5c4b17970ef71617d6d022013553106f299261d3c94644955e7a1695cd6ce79a09fe92ec72a432af63413e7 304402200fe9b8274b5837e152de8c039c181bc0915ae5ddae5f960197ed4ba7e784171e02202a916c31030f4e81e87f63bae910d87046e4372e73c83ad4e440231aa2acb9b8 3046022100f5771faa1b14376d060671a58b959da7cdbb31ae8190e96867081e050bfc2468022100b55b535dfc02cf023b7b3f3eebf02ae211fa6282ee6a38c2d1da306767850c75 3046022100bcb8514903a674b311a54cb1f060eb809436ba383362fa05cb96c17fccb4f7fe022100e8dcf48a590c587605122dd01667921e81fbcc887507185ac825cbc9b1fb8ef9 3045022040ab96a295dbc159624a664bacf3b1a28d2a3652cb55526455e97c6a9ed59ee1022100b7594cfa5d5253b3c9dfcd82c003732a7a1ec331c576f481abfae64646286c00 304402204f422f78a488169932248c0221d4e2c835d0282418e67a499db1967f2c39824702202f0fad21316d6876ec12f2306ee111a377faec1dfd0335a163f81dbdbebce0aa 304502203680de5c3c1ae2d2b940542348d3d48489da10ca55a77e57244209451f980b3b0221008e2df4b9229bb1004b24f2e1f83eebc19da1bb9c59e3a93c97837e95657b3e1d 304402201d010497ca33bbafbf359d368f609bb8e5401d099556e85a39a07e2c6b493695022031eda0ebdf431b798424217843c5ae39124a69d01ebb2b09ab9fc8e70b0efe5c 3046022100ce78d1a735519876c32774ee60f60834cdf03325ff8574dfef8b19dc1cae1ab4022100a2a224f57a35d2244fc800e97517d0bc65d76460153a3fcefa6e65bd4afbb6ab 304402203a86afbdecd792c8bb53e3a7433313d3fedbecfa156310413d6d142ac2948da90220265d5947ec076f390be4c6a860c9608eb9a8884eecf10aba513bb56bc906084e 3045022023b3fcec11edb6c77c82207cb837f877467f5bead92ddb8339370372b9a675430221008fe731be942832acd1e63062d1598efa58726603175ce1c4f311a035684033a2 3046022100b4f7ff9407cf031d01a02d68c32bbc3be4f1094003ec03c06d7fcc4a2cba322c02210090409858d2cd60a36e85bcb3f3c49e5bc1de0b1b07d191ce9946e3e0fa77ecaf 3046022100945d31589dbce576ab96019befe2978effb7ac9a0ce875b1a724efb872163f00022100b4b98dbc765e598eed7c8e1f180062309c5cff79e475b3c8a91692a94a0e777a 3045022100c61ab843911c0af821842f5eac72efab552ee3b4ce1861682d7b8a6e00f3470a022048f1ed1649ed58f3b8b4343584bc797d86c30a86e5e6b3414cce9fc043841821 30450220114a37109cce2d668f56fa9c0f0ad5aea8b5db6c69429406ccfa6a3b0bacb18e0221009fb538af68a7b221155cc09dada4af761ea4d569d09cc6f729c885ff040055d5 3046022100bd9da9c9438438b29188a94a54762a5d8bec55602d0bfb9815e1a316030c4f250221008912ed27a2eaeb450bece3e94c850fd493be043febca191e1999092f35d0da81 304402202ef7858daa7d1f043bd8946cce3e19ad7aa3f8be91b994c3dd57037c598be4a002200551cf5e59e599757b00f2dfcfefecc290a318dc8f7d00bed6811c1f88fb9bf1 3045022016a98f507d1fcc33a95569c485fe5038fd1d33ff3ae7079327d1b2a44908d63d022100b3e8480b1f63421e0c38213c3a9d31a68d7cc4a891faed01d902f6eff1710713 3045022009ba172b08df14a03faf20661cce14fd25ebbb1ab265b18dea91adf4ea9d5d3e022100a51c0f35b0000916652ae0514e7c0a4bed7655e7858b71b7f5e17d535af03809 3046022100a4e7cfef833fe8765c418e40a53a9c5026b6364b5e72f057e0d7c167ab332ab402210092405a704474632fb224f57a01413536d411f210abe913145fe5af9943d2bb40 3046022100dce65a6cc99ab577fba2f65bef8b6de848414b98130caa784c68a377d10a6050022100b6fe3fef69d3be09e08a991dc08bffc93f34648e80bbc7a057f8a4f9f3ad8220 3046022100bebed4e745b62d6ffeca3410538bb9571e644a24b0d54084c8a23d5f4e2193e10221009bd1f168afc8a4f2f9340fb71ac9f04375b832d2ecb83433d856bb4d53b92d90 3045022100bce8c73aaec617a037e5db61386974eb9b49e614f9874cefe7432ad7eed0f1e8022017d7bff5d9ff8f8bf043a4f91510da3a9895a2d226ee2ff442f6bcdc2806c6f0 304402202608aecbf459fe2383894c1a25e9de12d179b459c41bef3fc096132a59c335c10220369f40307e3c2328a454e4f7d570c8d19b79ef99f6175cab52981d038a8eacde 3045022100894c3b24fff5143e241c3ad03d717161ce4c0b21b8c3936784e4e53a5a3d1830022003c6c582d46569c18e588dcd213545ffa70283d17320566e84003bab471039f4 3044022054589f55fdbf4a30e8e656be83da7c2d32425880569d96518c10f5de47db68ad022012fd07865f9ac0165b3343120491e0627f4790cbd1caba224b15bd27f9d851c5 3046022100a2f09423058700a9ba043398164361526b70b0c1a41ac4d1a078cb9d6ea0eba3022100e35f025cd7a1dd05dc7be4a853a39146537f7c198df08703c20747bea24b6b11 3046022100b8d1e17a3d085584777ae01a6203b38af2658bae9531b2ba75e11f6b00dcc2d1022100cc641a0442b3b76f81f9213f35d1f55bf6be48017522fb281929b32c40f2af68 3045022100ddb820deb11604e3279677e7a180135aa4f045a6825ed87f523d3e985a720d2002207fcf8877b93e04086a9e7e23c1402b4f1bc11274f617c5fc58cb4bcbcae9e71a 304602210093859657908251fb04daba3dc5143ecb6c5a04a7e5fcccdc3c29738c8d60b21a022100e9e780adb63dc16e73e58ca3df8b9aa437fee66304f7067f76cd839adc06ff24 3045022100ba054b0cf208bc40b8ca181bedb562594d718b24999699e9fcf38b166ac5c2a302207beab7197caaf8c6ca541fa26b88e733467e47e8c4fc5bd730f561010540cc5f 304402207494f989798bcdec6f79c01148e43c136f11ba215cab5ae46f6ae121afd808ec02206618732e2541206a1642827f340b3a8c512aedf93916e022cc5e3f289f735ddd 304502201f28b64ac5c07b328cbf6ffbe7eb27bb529d0e3eeda9855a90159b408ff76643022100d1e7fd8b62507b56a6f2c6f9d7aece8c1ba7484ee48f9568d1584175e676bb72 3046022100b25a590aee22c9c8c3fe1afa73c3e4537b86d5e71b96198d3bac411ea129b968022100d2518240649f5a4a73ff2d49c04ebacdf180e16d5029107a5c08fca652ee29f8 304502206fab23653d92f607ae30d47f511a680c1d32bd3a713ef6a86e3ffbb767c17e8e0221009500eb4f9c328a09e731b2c499c3ffb832bafae692cf7896520f010cc0655484 3046022100ad8774ba6a3f4ea76bbe2557be04bd22c25d8bc16710239d5ea03a6919c1bc5f0221008e4a0280477eb37e331bc0117384809d32ea6c29c463983ab7f15ec9b444eab5 304502205f5e6912780886340d7957f4a17c8c8b1fdecb311d42b08b6ca30e76ac8c2261022100a22bb57b237965065afe3287fb684dc1dde9e1bf5c876f0cda3a5927ed293377 30440220787aa692265112bd7a35cf89fe08cb426b6e3ca657ebc9d5d7db02e4fa0828bf02207ddf77bf243c0e7131d2e542bc9673bc407fc1bf25bde7ae0ec4141a11c16333 304502203a34326de23ce1ea3f4a1cdac5b8848e0c77173c83e5f49b881494a086df8777022100bd4fa3c01a1cd2ef6bf8d9505cb6e4c0e4a357c97222d7d45d0cd6d79e47fac0 30460221009f6d249f8679a702923ba4325f9ae3f5a0da301c28a19f76bf89952dae537ffc022100e6f50fffc0eb76d8c3204b5fda169264da0ba0966c3936ea814aa6b1e89e831a 3045022100c90aeb494d36529b1f2db9721f5e390e5a497f83bf50100156995c81b9859a0e0220161c5a05311cdb58319db16cbd8c2c2eea9ecf8d51d70bba221a1fe28b562fc0 3045022045041a333c1213194d83a9677603fcd187afc6ff9318e5fa5ef923108b7dd4ce022100d861297d12914009d13d3ad307aac62f0f47f7ca02c14f1c88d7ad7c8987ad6e 30450220407c91ff985662164f1856e427cd3451f891d4afe2a2f3ba1da9fef252e617d80221008dd97e29f61c9992e59a89116142b3411ee1f654ddb803167e2231aacdc45a19 30460221009740b4b0f3577c5a3f8f1a033238a98573f1056ac27a54d841d361925631cabd022100c6f15dc0a371fa3b68a26132c31990596a3678c97a8b8eb5bc1bc1ccd2a974d9 3045022100c7d433763dccd571df299259c54e060960de744c77b19cab87f845f8bedb0c5202204bfde915d1fb5a80ecd25b7d234475c79f019658eedadc88021fd4d7416a8db6 304602210090dbe72cbf5f594ebb53924c4d0c11e1ab97e868746a645cd586ce557dff3875022100c80a8f8cd2d8304eea74ec20fcba35da865323c33c683f86303d50ca29925a8a 30440220705613ae040098ddc5457d906aab91a55768df02b646efcde528be2e020a458f022026d9475d3381fc53dfb641272fdd33ec48066f5c218234a27a3259bd44abd191 304602210089b974a6654d603742ef4dd862f92eb2a2eae6bdf587790ff9c341b929718acc022100d808d71288e5e4793d5ca4a2510151066a6a927dde9b0829bf9137aa5e58f5d7 30450220402e7ebc53684301e99d06d33be2148c4e1cbd76aa3b2c05e4e4f1afe3d5a54202210090e557a1f69586a8837de7edcdb1c43e2317b0ebb0f7c372289f001d21c0fcea 3044022035a1c12ad87bed5c3f68d73245d635d4616a95e997f53352e5f0cbd01184f31f02201d844606d2eea81405de1cfe4d2cd2d86d6dc4330318fa03bbc3a7e543a68f35 304502204acf8e7da7cd286f55b8a5b4199b09e245cb0879c6f0e9ea370f628ea3625f9b022100c67316eda124f36e5357043e21cce54a788c8124ee7fc21bd19ba36f9bf0c1fa 304402201bd22d754e286aca42d17c34dbe8c1e774cc25050cc7db03892a2cf641e12dfb022050bd056cc7f9631fbff11bdbc766f861962d1984f35e7f7c51c798636f96331a 304502201be2005c0af061623e4da2034650ae31fa71857dbe505b3c1132d18eb7ade258022100e03d255f60381e448b101e936cef906ca62c390364708d457da410095daa2a8e 3045022100ead4b7de245c9561507a44e578291293cc637af40dd895621343b5fd2e9c50df02205d499c5df020a2faef5424842d409fe7c0f4bc0ffead13efd03544925372df77 3044022039173afcdc21849b085c9b9925725a34133381c2702fcac662e9df873b75df1502203b0068f2dd266d907572014240dec888fda9597508abbcca9bfeb7195eb47ce6 3046022100f9f2dceaf94d39e9425c3d8a4737f271d3003b39afa7f1cb50fd06c21d01467c02210092039cbbf2e63070c6e4e60efd1ba7bc9a77941e589b4eb281eb6c83c92b7b65 30440220337191d690ede8f81a7eff9984f9d5df88aa685d6bd819d8014f213fb7450f1d02200d6a155a47ee60c7d313b0d5a3bc78beae3eca4e11106e897447e644ab817b99 304502202d394823b5abf7d5a1cf3a2d18bdfec3b44cd92d2ac2a663e33693867e9fffb102210087fc0a26bbfd4ece4ba9ae881101aafd314347bda68435d8b1c37bb4accfd6c8 30460221009b87f4a2202a7936bec3f83b9eaa544765b5dfe0cfd9f2b293c4113fcddb26c0022100e289843be91a5943b55aec31953afe513cf56decaa5a6eefe7cfdc019cbbb54b 30460221008467b7bbb88c8200ee4a6572b6e6f0120a1f34a9958317a5c7d2d64c7b5c98cf022100b19dc4b119dc8ccda269018d9126248f75b18b12dcf127c14ffab5f0a20e6040 30440220721d73d190a72d5c4bdaa0acf2c1b50bf6c06d87661a0df1a383961539dbe28b02200238f6f233cbe63f660360bc4bf662ea4b51ade91ecdc9da1a1e8a3a3ffe6bf4 304502201369355c9e6e34ab231b5d604c76053941c172fb0aee30bf47ec6cb1af61f24302210082d2847969a844d6f72ee830b26e1b448b383f3464b54dcff447fdbca2593cc1 30460221008e9911e1ca5959b100ae43f5c9bf62b6f87542dc318031a708d4a465f4438610022100a003b50922c368d47627180f3b42f731bdaea58fdb79eb1240a17ef92de54ed1 3046022100f98132a3fb49864007d54e604dec6aefbd854328a637c29f3df52108ccdb48e90221009d077450ded802ae88a9f6aecc2fa7a45c625b4f548f1c593706d97674604a96 30460221009f5833a8351f085be609cbbf7a250e3a5937ef7f9006513867b5a7439c7a30550221008661cec5b9efc45e2c35a1867e13bf137dd51a6bd17fa2b3e15d39a919067689 3045022100b0d69011bea637f2fb9be875a5bcdc86bc70e458be204c4704b6c6b87fb6b0420220530af5f26a1ef21a1fb0613d08eb0b5d39d923506aa03235db68dec30ec1ae0d 30460221008ce4d3f059cc9522347bdc36513058294946c4e74f6066fa9f778f98c916707c022100d44d5e596a330fc12dce9eb34cfa95356fad194eb66a6f6cc1dcab2f432c71ef 304402204a21233640f154c40a49efa44d9505280902c4fd19dc2d44fd0b883f1ebb07da02203beab43609d1700a623d57f56175c4967da7cd74c3b5101bef3b8d3d8c5cae32 3044022022f97a5f40d899437ac0dbf3696729695a29898ba14eca8303801cb7ead83535022062076bfe0e192aac2fb862644fc86bed66c4c6530090557a1373bb360c0d19f7 304402200e612cbe56b33e4207f351388089ed97ccb6a65464d633f2ab8a0c1c40badf2f022059e1b9579a39b6aa19d8538db8e9e737bdc67bf4496596a9abbb785a30f3bae0 3046022100ccd231766766cd4a68e0e78ab2218c075a8f724ff4539f43373e7b3a85f0e314022100cbe1612e4ccafba06fed43e2a29e344f450c083bd326ef8f3a644a4b6403fbb0 3046022100ba46f52a8596817349d6fb6c6f817546355e1d509db05f3942b954ddf1943be602210092f298f2fa06267e20ed80109ebb8a665d3fb1496781ed15f25fbd61ffd9b7d1 3044022024dc2de3947364fc759fae651ad4773b506665a6f1feed16fa028183cede357a02206151207f44fc7b2e2656105c37c0be92a50a571bca1b1817bee8e3d4b42c945d 304502202bc2ee5f1fa4a9ddb8c14a6657b793ced025736c1f7632658cafbee924ce82d0022100f07e1a6a7367439e9422d2874d1405bf12834c73079ad7780ff6a8bff8e5cdbf 304502203e6bd4db5a793c07a92cb11858024b97154db1a23302bf57c550bdeb8fe70750022100ec9ee3376e1494aacb791623852d1ebb3a365d1d60c62f4cef2b04a87cb81f33 3045022100e2eadd7e0e74614ae5b41d9f4b2998006b846ddb3f0d67b98416c3eb67c01aaf0220273542d6da14b7cd8c1e75c3311524bfcf968c56a54b6d1b66e3ca673099e9fd 304402201a559e1afdf42332410e7146821f639c99f06ae57c0c79829041acd7d1108ae3022033fa9ba8013ca6f4c57b5523408f5b4ed9447d49691605ccd965c146c0db5dbd 3045022100fbc859f9743f26351936a0395d7078b056d8c991a9124b814244cb0a0185a527022007f12351608804d1fe351df161ba1524dab179708d5194ad2fa5c0c627565523 304402205f107a502c0ca03fdaa3e2290aaa0bc777f4aef2d69f8f44fed2e1662dbc6b32022076d1f1f90279a475b67f2a98e68cbfb020af613239ef1a2a8d8093e89f0237b2 304502201839a28ca75672b394cb66ee0c53e9344f23cc6b941ec06bc0bd78840e611e4b0221008ca9684310555f36ac2e766f6510121e4bdfe4e0f5cbff3a8d4121596921af12 304602210085b6bd34b587c6bd290c51baf9bae47ea2a54d4b1dfc11e39161e0c897b98281022100ba0ff2814046c4951b9514c19d4b6551af1a68874f92ad96ef56d3b41bdccdb6 3045022100b2e24ad9931daccc820a6cbcbc32aca53e80dffaccf24d159a523c9c2179bfde022006cb167117b486983327bdc549da4f156fca12089f7e5dc45d47e271f80dd3d1 304402200b91b72f7e613e7e8c6abf431049414757e2257e4d1540056b79b8be03dfee7c0220442a8a7fa2b75b0d6368d17ce1870f690d8e891c3a5821493967c0b7fa78f7d9 3044022063164a8bb13235293edd3dbc174395063059f8dbd99f80a260d3c66ac3c2288002204b7214f3ad8a1af6e252dc5f881bb21e9dcd3e968a803c7e0f20c344d84620ea 3044022010893c8051bf1129abcee71c1324736a977289e82b7f392eed18382efa0b9b8d0220665376b59504fd5d2da33f678aec2d3afadef45e77953723d4e3c96035394a51 30440220365deb6f0c6044b231e7f35cee0a501e0515d5543c365a6119d249a64b6c963202205a66d78ad33cc0ef6c304d27a4193cb972b6d6947b69cb258ec31132a7f63778 30450221008a01188e5559ca1c9930e8f557b36bcbc3dcbe3202590736954efac013e79608022022149179507a157e9255a139f91b7f43c4c9bd0dc25b3ef3bb12ff64101bad05 30460221008fe04134e31cd555de5aa3c19002ba69fa6457ed4e57986d051c910aa00fa00b022100b947a025d9aa69fb68d12369f6c28bbba4717d559675e568257437fd692de342 3046022100d794fd00a69fcee34d542f16f49967b23f7b163759efc5d51f4e8609c0d0b87c022100964c7d63c1ddf4d353aa6b65c3c5cc6e486c5f2ea2091d523e336061ad075613 304502201c21dcc43ca6f1d45d0bee1e9adf04ab7a98b137531060d7cb12e56668c07723022100b3a71cadd9ce1158565be3a4c0519526dd1b6708bd4f0ba34f0f17704c8e4e10 304402201805200f1db5c2d802e6672a4cb3d4fd5dfcbd06c0f04d290f691c4e1587c1a3022045cc5f60693a4e67bc6fa2bbd342cbfb99bd65d815e192d0ee0d88613655e371 304502205607dfd44670d0a9a95a8e2915f5d9a13240fa040ce8ea8727db01bb22629910022100a1457f218239d0c7e0ecb91e243012f706d30481de5b78379be23c99061f08a3 30450220223ab56253096233737752ad9fd4c6f5df8751d616e743b866b899b31befa796022100c7d52e117a531178209bc09129f5face5f6a5edee933054115a6a9d29ec84844 304402201106895e36729edb3ab3db8840bce98283d36312dc1977ec5b67cbb66366f11f022063cdf5d66cc6d2d7fd3585da5748d6267dfc653f55e7b635d93293533fa613bd 304502206a39e10279ef96ee4b2fede2e645e0e1e54dcc805330dca9818904e5fc29316a022100a620d3f9983473b0d34f44901131013904526b4ccfb1b9b04701dc8331ef6155 3045022008b94abbdfcdaca99fc8f601f2165023d55159eba2e3a5db2c160c5835b452b3022100df23ed10951fbe4296e5dbd4df119104e8307839bc6aa1d0f5535ba4871c6144 3046022100865ffced621ab0a1db4e51502440139defcdcd228ed040fefaf6750da8713c8a0221008eb45ae53b89be00b56e25d5ef190541b369301e510cbd8d3101b34fea8350cd 3044022063a2597099b177d179196c0ad65929ee1e1db5e4eb6d744671b436467017fdfa0220254d3ec5bb4e34c0a7265d0b61fec65e22cf963e3df8ff832f782e430fc01a40 304402202688ba6d28ce30937a74929668d20435073cd0bbf8aea93e05166b1507615c0402200c98db29ae436c3f8e21ee82cf69a7a547e06c61f8661c778bcd8adc55f6ea71 304402207b5d93130b476310323f122c4d5bd821f0938c5c2450fd61fbbe89b32252502c02201437593d491d2f6e2d31687afd6948579b9de71836263bccbffa4e23be2e0ad8 3046022100d83918d17a9cfb645b4729c306656a4dc42039b97c21943226e9312e1bd57b23022100ae08c2f322162b49daf8ec042ec1b647ac2a46eee61e710d9754e9529eab827d 3045022100c074bd52a7065ae3e180ad12189d7f60060d2120f157c21e82d88d4039411f4202202102957a27efff7fb2aa299c4b1f26d204caa897fc938509f667567e0cf9f8b9 30440220658183eccc3d90072c508a72a4e732a616648b221f4de0f54d9b772e65b2a5ce02203f9b9f5881f7c5e4c4ef99ecb3e4127769bd647856cb0ce2bb32581d89530131 3046022100f0bc93423e549ab6821e5b15e87ce04c246827307776bf943bdb3e9598fcb0750221009bb81165a51389abc5561b56faca74f414b928b81beb1bf4f985a429f707e3b9 30450221009790980cd889fbcd41f9c9528f7a734eab18e34de7a108032cac8ce0dc2bb4af022040f28203d499f9d579a062dda5ae61f06ff842cdfb3db94c19e726eeb2fb6495 30460221008a0aa099a036398e01a41928bd5237f7e730b3ef87e6489e88f6d8bdfb125d5f022100bc88484006906ded31b86e3f8074afb185ff7571d295049c1ebb548324c9e86b 3045022100aca76e84c4c16d5c0756edfe0e09b9bcd72b08ad897406fc17467adf5ddd89e702200a368d382368113b51712b7ea1d869ed940d2ac6fd133a9adb74ef9cc67ae90c 3045022016092dd1e04a4a3b1180eeee983e435d406dc66cb75085bebc22045cd7ad4b9a022100d99634f17cbbc3284b07512c314fd57f3ff32cea98e9ce439b34cc9ab678a237 30450220016f6aaa0db8928ad135b32a8ea48078d5ead8647159f16e6de7b235d2e6f2bc022100f08c4dd06f2bd6e763bbff00725654bc86015b1f2eafdb8c36ea86adea4097cc 3045022100f396c83e0c0b05a3043e2417623b157d5eee1c12c5f0e55855748b15fd9baa49022038babbc56bc5b07bb99ba2f75dcd76f557952e5b261e216d5a855ee80aabe224 3045022100db7f4a9a9b3febb7d2dfcbcb7e2d1f906cdc24f870ffe9f82b754309d292228102203837ac81fb408ecb6e5d6babd526090c09ee3d9561db85e97f478b3cb0190e86 3046022100c7bd9d0a8b046716c131cadd4780b7e6d9590a695ee0c4e9534a035313e46da1022100e3e8d26dc56f6ab54fcb24bd81929e6ea440580ceff9b55bea76bd846a1c63e1 304502206def675ca0d4810e75b7c2295912c606dcdd48ca0378418fe911d1f1df4097c9022100bd99c26bfb5beced31e6d8b2b22f3ac98d26b121d58587c0f9e8eb71ad7c1f81 3044022039e4fa134fafd2eab208a3b319d986855a3a03ce7ccdde6c483049d5f00d43180220525dcaa4268270bb461a29ad48e775570a19c430a1f82f18b4196d91a8a47bca 3045022100cae7dff1e41116f973b0a03dcc5e70284629941f38cf83f4b3c5f18049caa0ca022017655b7935f4d5f13c557c420fb44d143e661f1c7986fce5ba9cd968fcb74ea2 30460221009ee85e7b9752754fe1f8b0fd8f50a0b176b67a5d79679d8c614016271fd24e70022100d400ecb1f4f18661b639ef3a50182ef6a6de858f79a3745d670937c7a4078ef7 3044022079a2722de3ba5deadca7c205a92bc391cb038ee53c38f08d95ba824cd19bbbea02205cb84b66cf7159fd445e864793b6b090a9437f74930610124d1177d21bcdde3e 3045022031fd4260cb84b11494cac791e3b9fc20a731064b55074887223b7ee5da038b0502210085845c64a00d0315c7c1b8995c554fbdb06aa029e0a4d4ae9415b5bb4a22ef49 3045022100f250abde757f3ed19446772bbeed7be75f24c6e40cf70d81b3810220197ff320022010daa80f87af9c8df10d5ea270123d5ce2e0332e064989f86fe5e66ba923add6 30440220676114aafeb9fec2e7c5aefc018c1637be6607d4fb3bceb8361a1cb0d8727ed502202b0275bcf98bde51cd97397d978ec5a52ea89f3a630dd45b5233bdb1d030fa2a 3045022100e7d89c814c0fd010a33ea3376fd727306d222e557414559c8971e79664e27d310220441262db79b884a097ae3d70dd45aa0723d7df855fbfe0b42ed3dd7349c8be5b 3045022024e7c619692bcd14c5da596da74e6fb8f3dd0f9a0850c0cd82acd5c01c680b78022100898706a2ff53cb98099059ef965b2c5d67a863b8350467909feff66a7a5bbcfe 304402202f3c0904e845af7afc0fbecf3004d908ab4158c2ccd7df765c04f64915082cf20220217d8b766cb8a71406e565b8c708ee6440bfefbeded0ee3c466ad07776b7f21e 304402200621f8ede724e45de76da650806f82c4a6c023ddbbfca520dc884f34b9d7e44b022033ac77139483ba03ebcaf4b22086c4ddc1596b7d87f33a56106f380e08bf39e0 3046022100ef81290f4811c6a29ad95b3649f817406d697d6a69c10498fef054e1ddbeea2c0221009358f09a73d2cdebc04bdcada257c1a7f466eea4c5e0604ef8e38f1ceb630169 304402203bc3ac3656a58d9c773b1400a3e25c292e909ec2674c0bc6f455cd56876a096c02205017728654ca7a8c845a4673038d7f0ae34c1d0b8634b92f7747c547cdcfc542 30440220068bf6550b2e6915420a3da0fe3473b9927f369846ea1979399bc972ccc09bc102200753fc4befc0b94d4abea476451a689f3b1b67a2d0b3ff9a3cf51be4c2384d80 304402200c8e23489f796f28891439e4540fab101d9dcc867f8295a60c01d235e446339e02201ad76415fec5661076318d78c9908887e42742b1731d28d1d878b8dd0033114e 30440220541c822437e75057f64560d8ce6547aa9c1ba3b7b898aacb1eac326a4393d7cc022030e5d61a313f11c83592f7ebb80bc3ebe5bc615975df8c400ad2317a3ddae429 30450220222764ab1d06b13c93d5e40885c5793936ba20ef4d4f3f6dbe332e3a72c55df1022100aaa33cbe6336240d847f975c3849fd0a192b424cc1a29198bfcc948bbc94ac80 3045022100957ad69a20c18618a93dcec478b7bbd97bc731a6fc2b19eb18513cd5d52814bb022057350b1cbd86f5ed7d4e7559532af8f70987ebf66398bbc452ee0e46650691e6 3045022029b3047a7613f1c0ba720f52505c746348fba568185a9cf600119223656eca5702210099fb078a55ba9619461d81f3fea2e58e4e47b4271ff9ec7ba64952d9a3dc7b03 3046022100e3bc07de3dcedf090afc9e30a428855bfada8d88fbc73e95b27f1d64866b8fc4022100cb69a11531bbc18e4dcd122841ded07a9354e61ed11f6d8cfcb0491897345d1c 304402204f2a98965d70977d1a0752f0751027b72d3c76b8cc627525a52081019618eb2a022023de146767d0f25320bc06bcabfb5ad1932942fad85bbe739dbd474e1006ce33 304502210081666a88595483d49528e6e717f92af75171e687c25a4a48eeb4ca01032405720220209b5ecc78aa55cca1d2669cc417b81172259275b3f33393abb252b36015b4d6 3044022070686673d22c6e39f29a6fe6bfc437e1c03494a8127a48a6e0939c8c6e370bf402200870964694865cc5a55d39877a1f21625d2ab89d6370c41cf2ea2106736aee26 3044022078ddeea67f802a84a34e49ef82d0cca1bfcb219853c4662d083e899f1afe1cbe0220433eb7ee235adc28facde62ac47ca4268d5d4ad9b9e0d71c66a3245d2f8d55f1 3045022100bdade0b0b4e9b10d7427fb69bfc375662180d97fefad05dcae49dac783539967022031623dc598eeb14b3d71e549f083a2325e7c531019550854583bf8fc1688c254 304402204c0cb57fcf4566d3ea581c564199dea7a425607be7d13a2ee5be51b4367816dd02205f8ad028bcd5817aa54e81b79dc269df8c114e1c0d8892ec85cbced647d5b8e5 3046022100dbc04943638512b3e4a0bb0289ac0bc26ba709071a33cd6de8ad345f29480d590221008803fbdd9ddaba853a8406257bca73b59e4140596786604d346ca158728fa63d 3045022045330b80f0cfcbaefab887ca8444eed15dc4312723ae23339461dcd4d0de5d4f022100dd2eadd8442cdb86b19f329985288f703646ad0cd2d204c60d3feea19b7f035b 3045022044d41c9a8ff6875a19e7b7f185ef9b31901811a96bf07a5120bb9164ec723fa0022100cac7643734d7b63968314c2f6e1c884fbad0b56c74509bd09f45ef151f30a91a 304602210081262ecbc35e281b897958f0f9e08f58dd8296f9717a6d20f9d2b216b1b01783022100a8caec1260c18b4a81c9e624a0e6c71b7e9612e8899997abaf8acec288af79a4 30460221009cf122bce0265c31f160aa62c73ea65614270d1a864f876beead09224b3b920202210081f304a79b01b0990b03407dab613e5330a420788b5cfac5e3301eff94d65d71 3046022100a58dda057a7a4cf62f713b43340ee20370d76d2d4e9f7b05da93948edf8c2612022100cf515970839750f159a8c6d000547107b246f026935b924458d27ba8bdc13023 3045022100c5b3628e0ff22fd0c6e50a1605113473344f269038f6786feca1d6e8032417a0022064afd36f01039a91909c7fd15f8a854eae0f02f179d080c14c77b00da7703531 3046022100f99b8f40a05073e737347beb24df154c3ab1ae64762031858d910d53c237f1f702210099656e6b1028ff6f20fe39c8f66849297e0cd21d3990050ba7e3d965dc3e7ed3 3046022100d08326798bcf6268356b3f442338e2ccf69edb5bc8d7f82ec31edbca650ee6a0022100ceb6db272bb69e10b2142cd4740b993fed9c577da544581aa801d79f2755c66d 30450220550bf086756934407b5dfe8e0d32385b21c710166f121cb75b7d8830b000fa51022100937f1eea48cf888a05f39028ef2e6565f798a326814ab8d5906d7ef1658aee3a 304502210080bec5c764542107a51451684f9f5a522e88d56fa29cf929ac5f3c21771dc7e8022052a3997a95a9cc3160418e9e8ecbfd7a0b0e4c00888b5e3694ce1126d7f8da17 3045022100ae7f3f4e7d8ec3075155384bd72eb867abfb82f30175b0645e8d453d9ecd77f1022046ea0a8e70d2e355cfab60c92b9bc96b27d7c4a46bf5448e64409017a50ab091 304402204d51c1ee6b8ba384cd3057e1441fb71d69d4aa432b3a1095c1e82244da87509802202bd6ab5bcac25085068a16d6524e02d93a8defd862f5b4889b2e48384f9b2fc9 3045022075197e2b783f4405f05ae2de3a7b8fad336cc5e488942819484955079fd16c84022100d95bfab6867ee4cae0f758ba3ac7ad253df75f19d0fcbaf88ca0d5bfff906a9e 304402203035260697d409651f2a04641469afc4b44fd1f3b8170362e43a2e1c3cb5a7f2022029ba8112506880d2da4eff58ff6be0095b213eacfbc93e597d456cbe1b7c97ff 3046022100e92fe431790b202657a86a60a632b5e272bb895db9120da32df0bf10c6f00ff20221009f42b899c4ddc0c5aaac0cafaf019ed85a6c1822d34936f04e2b093cdb6285f5 30440220498b71351669b7c0b275e7b416367739acbe24f8b40525d099d6d07289531fd002200286a4fb529900dfee46f5678d2b819e9b21645436cdca03d8443dd6a634e7c7 30460221008e9cc6629a14cc0095a10a91c16435d6b175f06bf31b7036c06be4963ef52f84022100b29487efad919658e8bbc59d5923386ffcca7793f83859248c32886ff69c58b1 30440220782b29c8ea5edda9f7c584b0467942c6c33df3e7030f476ba74eb69c7cb0b6f50220412ba93e3d369fd3e32fc0edb3309d23b33ddbbcbd2da998d5af8cef4c963001 304502202a9c18785f06d629974b08780c30a20e61f823089a05640d19fa578267b83d87022100e284d56acf363e7fd96b9aa9d27e50916bc8a09fd2f0278e16deee0e9f2ef848 3045022100ea14fe9e6661a3645709de59ec3e9f793f0d37cb474753a1d6efc7d01fd1557902201f8bac9fd28f57cfae4c686698c9ae28d351b8274f07d946116b57f54b571f49 3046022100cf59f44bfaf3a6ffa741bbe36d47dba9421beeabf8128283f789ca9c270a3124022100ff66d4a119b59e687c61b4a351693ba0d7590b834425541e4b07acf3c213b41b 30440220796b5797e6db58b5d701b5486b51ea75b96e47a95defe85222a3791207a131370220504dcb9fdac41308482fc3dd60cdbbae57e13bc67355c00c537f4d5b703f8378 3046022100a3b55578d0b004903b0f3319ae57108cf9166c442b906b1530ee8f4c354ec632022100d24d005b58f473b77c6853799579c64206af8365e709c7dacb071718e9909f9e 304602210097f6c9480a56898be4d7beb0bed2732824958cc7fb0509eaa5c5139b87cdcb53022100b4f23d6c90a2b1f14726eb61846fb8e52a4be14abaf0df31c93d14b86e08fb9a 304502210092c35a2a2cf43dbc1d488dc4a29f15fe0c0f1334b2699fb1c37ff024342a9f8d0220488dd6b88e400bc50f36ad97c243083d1be7a2e8876cccce01b20d7b7030216d 3045022100cf0d57236b81a7d3b3d330cf505c2650f0a4efd7730bcb566e779b28e3bd1fef022004965ea4c6323174cd609070832962a01bccd934e1b4c9acf23813daca0a6662 3044022030217ae5e7977172b1e9e31dd9aa73f37bc5a95dc182cb23d3341572e70442af02206a9621f6664377eba58c32e5eaf7f66f06605a3a964b6acbb43f25af8632306f 3045022100cce8f90c9c938787fdc99c9e1fc8b7d924ee0a88ffadcd32670c1cbf69ed7bbb02200bb8e71f4f2913944202c7150702dfb6047f7eac1db5f2277ad6b64d5435b6f8 304502207e4889d8554777984b4ae97eddf956d8f851af00b4242a974279c11bec54c7cb022100bff071d7ab1376f1798d92b3869a08e7fe66190f5f2bba662e6ba30a8222d4aa 304502200c667b514fbfd23b02f1eab6b9353e61a072f34c5afcfb4fac3f695ae1761cf4022100882280d75de24887a08dab026af8783c9414abf1f70a8ecb85554d2c5cab6f6e 304402203a6451bfac74c842061f2d0593393baf2c1ea07b5c1bc47fc3688d35b46bd08502201fcbbae561e0d34a7552306bb47cb6c5c76536199f1cd6a6312a680e90b42ad3 304402207c7ac59a1be5ae6fd456e1b83759b18ea18b2f956a6789cfa875b6627dbc592a022024437831dce9d0724249108599c8bfca679ac7925172864e0b7510c90f37ac41 30440220330cebb83ab8b65cc969e3b3aa90ee88ad4aa7e63203574b9b9e423274d58fbf0220648b4152bcb361177d869bde199aacdd2ce792c76bf055d8c76dc67af23c56b9 3045022100dfa68cc3e76eaced6cd24be46598634544852a196289650ba6d897caa712b69d02204f0d46412206bcd9433a3d90af702f2c610daaf51cc3116206283071f089fd48 3046022100c8eaee29fa7a8c363ed355d76dfffba014af31598560edb002bd16d08a4ecf640221008800a34bb9ceccc0afbee100bb9d0f2e7927f47a48a9daf5c8374716e8f400a1 3046022100c71d38ffe213e7267ff9872d47d15decae94441c3f0b6b7175e9567abc6e7852022100f8ad6ac07ff97ade82908948a7e186bcc2be3a6d973732dc6d193f406d17a194 304602210096bf4213b6d7f8fee41d4d440bd95fc6c41a5cbe464d93ea7600f864e4666f16022100fadfa68bc43a4bc96619c9123b820df7334432bfe8f5d4f7c977954bd17060a7 3046022100b2b57e49fa4fcb2d876d84e0514534f4dc6bda0846d50499759d331ca09d378d022100df6f82ac0c8d0fb13367e8516d3050874f4281398a8b796b76e90d5a5eff4e69 3045022100deb82c309035a3a4e3d2dfc685a849ac6e51efe37b79b63ac59bff98884d8a7602207ae2fc5a82650ad10717d68e9c4ecf3cac8f9e73c4540c71d8c3a52e189d4322 3044022049ae6c280bd534b958c21e0d9b372072cfdf42ab7f8473c1be16de1245cfbd8702201a6190041253df630e826dc6c1ee2beafccb1fff17a4921705a0760ee8f2a620 30440220416bfa82a883f941e64b9e0f9e0a5f8dea75fb4d105c7c85afe33c4df75e0a56022067ef6603fb77029ada5ee7467c152066c13ac86be77bcfa7025650ad6e8f0499 3044022034ceea5c36ef4f374771374f779d42ecf9b3f3d5f73fdfed227a1f4d76c323b60220661fc0c7f2383d16611cfa396ed7fede4d4da378bbc0c962e279a4384d4cf9db 3045022100ccfed4d744c86b1dd1d697e621920e1ab96d2934df970ded5d38e9bd6959dcdc0220629591c038403cb4e96ae641623f6c7b5bb3900c17c681ff099969cc7fa59e5c 3045022002fda5ab9b565af5a3a23f2bcbd2129c8466acffe8288cfafe60769de2e79b3202210086dd29bdab6aa164905c8f1214159ec43007b8e7f02d10637fb803083f4e695b 30460221008c1d144340157a6c37e5542d9400e74ba3dba1408827172e30e7e87b6c892ab90221009bed5e64c1ef6e492eacacddd1e749eb8c3fd0f3d6c49626e4e8177ab9b388df 3044022000a298449ae55dfa0a6a996aec09b6530d2b7fdfc34949557b29eb514e27edc8022034f65a0300f8bec00408b1b6fc03b0ace6053e96c3a2f3bb09d30c5517f04a1d 304402207b777a8c534127d292f7f3f9142f5cc5031b2d321c45a3d4ad6276c0f9f62ae502203fb6ba13c3e9bb782ca678cdd51651f5186df7d8628bff6d8954280d62b7eb27 30440220481fbb5b52594bc8ddf8084779e9bf69d648745e57b5c6b9d2d1cd649c02bbb10220215269cefac20a9efb152ea0f460a6f608086a055a0548bbcaf257c645392cb0 3046022100a49a52fed40a1384a163a3f18e85ddbe49558b9b2ff396af8b690989cb64408702210099b184955462f70c6e86049ef609f2c0e8056e7979fd9872669e9faf6f3a811a 304402207de248a643d3b2365639cb329e2f602543dbfcedaa07145738b043e901a90c7402203d85deaa94fc0b239b4ccc64cda2f9687dd0c0eb1989a4798683db8a06461263 3046022100c8c5586bdd2d754d3fe3d0beb22dfa2733e65b9751a8cea0843920496268c3b10221009fd7305da62e30986331faac611d75d4200e4b7097014d5a10d006242f55ff41 304502203111a4d8e9aba51a9ebb8c7981bdf79beba48ccc060515220c225d538b6a2f74022100851b089f32a31ac2065ddb5226399b4a51b8736562b87b9a6a2512636f1e84aa 304502202f32fcdf95cb5c2291c4a0f8aae64d26ca84333d02b6ad149a1803a89c7080cd022100ecae8554b89f9423b2057174d8a40bb0734a41a729689344cd9e1ed79e2cfd7b 3046022100e01bf793815d88bf77f3132d1c874565a6e1aa606a16722bf722cd0624b1efee02210090dbab7a18f9a35ce3d85a58939efd734804dab324846e38e3c63044a30e7bde 3046022100ba05a5176ab7f52906c06cf1bf2ba5ba762c1013d0315aa7492e553c5ce4bec3022100c1b7506374ad77276b4cb6a0dc121d7ba325788f2f7f416e9235a0eb9ec7f152 3046022100b5e58d80e65f218028cab25a21078748de8aa420bfd035977bbd36542c4e09d8022100b58ea7c447428ba08d5636697c2726a59218f35eb685bf6e06513661d6a8d93d 304402200ecef92ae76a23b969535ac6e2ee8dd5d634dc80450789e1f2014a89028146b702202879e36f0680c7f4c1cc49455cafe56574570d0be87094bc9c0622f524489760 3046022100daf13cf7621ed2d1b0573e57220370227810dfc368148aafdd8a2eb4bae11545022100858a8fcf68b688591e71d701c4725796f2720c424ec59dba91bb1726336569e7 3046022100a6a04a0f3ec9206e734cd676342c8a1306cfbf6dcd1599e30fab9cfee7c6088d022100990efca67963cd7ef7f4bdec34a684176ded14d9d97219b3a32466f8ef474a95 3046022100ecea4353593689215e7431d4bc6d33a6b29ec18ec093aa6f316161c712b6520e022100dfc3f027da628214b0a0982bdd408e2449628733e449d025e0a2315cce6e78a5 30460221009020ef2a2a7da25c2175c16da18f6dd278c2d4426f9e9f1f9d060edf4c1c912b022100cad84824467149aad177fa34790d636056e481a524b9ef88a5144ff1d357fb1f 3046022100af0555d9e88323acb7d71fc9d79a7ff82f8ea4aa875b932ca0745ecfc3e184ac022100ac6d96a96bb47ae82750abe19d4ffd9b37adae9b4a2fe886d31e5cd87bcc94ac 3045022057e9249c5c73ca535a0a016b5313b6e9ef51f62acdd306fef21b9bbc65c07298022100f4872b6662de0bf8d755c75f84eb165740b97bc316f3611ec606be6da836bf23 30440220458ffa2d5e182bd51e28c0903eb8a311cb33a714382da17ea92db3319a94080802204b5d1315a9f7fb421a7005662f57e8210e5eeac975d68fa7ede6b0668f439b34 3044022047b4fdc1a9fe8d7d375b3686366bd944035d156dbb27b74a5b7dbe53e1c55be20220720e7c8aa71bd4ca5040aee871c915aa3dd469cff82e9df119598df41f019893 3044022031c8504450f813f3cc7fa7b55ec8cff29cc09634d9afdbea1b7191feedc2bc9702201769c1313077b678c13df6e455eb4477347ed648eea07796d0a61b33a5907cd4 30450221008fb048f33b1cbc7505f14848aa9f00f303cedd2d86c6109ce2384d2db237fc2b022040e45de0f79f550a6af269871101f622ae613a8fa479d97bc8011572499d2a6d 3046022100fc651275f58f49cc0bb5d3b91d7f8c2b0becf927f9d25b2335c0ad1bee7840b0022100dac676b1b5b9f84b5e0dc485fa0c725983096a239b81f75ebef85d2c2a5194b2 3044022017eafa1602749821ba2dfda2c8651fc2d336f26a68eb9092ced7f2cf41253adf02205b1433c5f54010ec6b95d558aeb5d1efeaf60d5ad49e6b1c5d0bca5982d9cf79 304502203f5fcfa3c812a24787602f627fcae4909e2af6370c06585cffc3be640c5fd04b022100e22d4ce222f437d5aa16e7bbd7776d506ba5f0ed170697b506a9060ad04abe63 3044022052246db2d7c655581f61a9387479e314b2d5f886856ae0cfbfb0fb7c7717d24f02206bf0cbdce489745cd1ed85854d11fb8ffc51bf395eefcf3f72d51e9ca8f10032 304502202f98ae6374616c8b422a0bad6bc4b0258dd9257461221ab43b7ad3d492a784fd022100d790742bb3b11d60cfb3b593b5aba098bf767daf7cb511fd9019bfc54f91ed20 304502202795ff1a9698fa2ed4e3d964f3cd17e87e1be8769d6e9791fb73ac588ee9b6e5022100dfec9fef5ab184181c5a2d2b7333bcf17f6e3fc83197391223b28f5897b2d81a 3046022100d5725338f214a73bb6907f826781d65bf22633b30f6356d8b3ea0267771b16a602210096bec35a47811614eaa2facae69b4e4e9da31609ed0695360abcf868f5e41f7e 3046022100f7f22cf00a5f687609682e8c2b6f2fc7bbcc75d570b585c5302821deaf6982fd022100d2d9f1e83ab5ecf2d366e0a41fa98365dfb9b9379f5663baaae799ddeb03ef8e 3045022100d7bc0f8387babda4fa836664e33db9959ac15967a730068f2e08086fce437b6302204e2bb72e4f39598e272d7e8066e2cab1c28b2c07b8fd8ca4c7b0e89ea5b061d8 3046022100d1b96930d6c5ed89feeb2c603a3af336880366562b8ed450bfa429132d580ea6022100f775acfaef0fff75b76b7bf7c15f04ca96de4b4fccfd30c2a712b5c2988e00fe 304502210098114aa01a2035681e7fbca2a470b854305f4a5948f83cf72d7380fba40153ff02207f646e87a9d996286190406651eda30a497c70850248fbafc80a53bffe82936e 304402202d2ba4929a3a433b3bf8ea0f63dffff2288903fc241e1926f018e42d2db14777022022ac05ab246667bbdcc48db2b2bf2ba999f00bb59fef329b20b9a26c7996a081 3045022100a219e3e9191c64a3498088f5eb0143475be457cfe1c0199c8d82d066be9877d9022077ccbf24b9ec306ac96d427295a134c906b29fad6ecca2a058403b1f39cf2004 304602210091880c8898cf30ccef2a337738210a41f7f6b5cefeb84d9276d501e6f6878f92022100ca572ca4aa37cef235a459041ddc01d5eace28780086b23c5bf89710dab2b36a 3045022100b1f6abec662b0f0ac6c215baa5e938305583ec98053a6511a599be2c8c0aebbd02201c65ae3a35f7b818f496005b55cc254909509411559979d9a1a632984691bf70 3046022100ca358de7419c853820d60888cd589c0207448a72c49664f847eb5fa6f23569e3022100fdfc1d28f3665cdab7790fa1cb1584e212aaf0db441ec203bb9072470d9a0435 3044022044d3bdeea072a031cd38238f197ee5b9a0dc6f5355adb11cc2e886942887241202207e27d07a52be05257edb4e3975e878934d8698ca2ee8c7d211492d7a041a2a22 304402206932e21fd491be74611a1421f6e50f7b2e3251276ac085f4113fc486216e72f402206dea8e4f4fd3594fe880e8e8f23b68b4260efd0c939e9da36663f569811f9184 3046022100f385e27aa20c3c2afff7acf8dc514ee6fa51ab351772bc573589b62a5ec9b248022100812ec9e40908b20e0ee75489d8b2a39306027a6261da31aca0f0afc982f06074 304402204720700e15967b1b33f25187511722885c1bee53b592831e5b5fd77959ca30ef022016fb3c1aec8831c1af3e97da3211ea2eab5c21decace779d52b78aa3b1e6fafd 304402202a149e3676aa888c50e3dcb97136ad77cc26db9a4d5a10fb14e655cc0295bfa202207dbd9d0a1cdead10e90f5c16029a94062257648c04da02abd50409135657c168 30440220105e680eb8ff26bdd73e6815355586fe3e315e721f72e8d0235b8b569bf039690220531fa4e0e8540a20bf8ace27ed60add930f88495d30d6775c9880ef94f039901 30440220246d5e6ff0067395b01e32b49aad147248a8b9b96b7d212e288700bfc2d2e7350220365db6b94d04fa2c6e6ef03af16459edce244934a1cc875f88a95f016e335926 3045022043ee2fa55019b5609c8bc5da8407403d30a8e67cdfee684d9144203c706fb4fe022100a4485b9de7627f8509cc4595b89c00adfa42b1087d15af8e8a2eec4282dde5d0 3045022074e719de41c5d6e1f18a237e43f2bfdd9d32b7de13ac667f0ae5b80596be12f1022100b7e7226031daefc250d29ec108cdbc1d767db24c18ea9b22759f60e5c2cfb320 3046022100807f3d006f71450c5690ab1acb55959d6e82ae3a6a11faa512f93e313c15b908022100a6069fc458d18abf44d1f81ba32096fe30be6044a04edaea71c2459de7839546 3045022049c3600787ec0a1ffd3bd80fc4287709daf2da9daecda24e693727bf51143ce4022100ca71e2652c583ecef46e756c1d7e24145c685c1e512c4b2de41c181b4f6d34f6 3044022046c17f9d67511d19a904e2036197460e1669e51e25bb75ab9f3db0a8a95f5d0902200193e2bda032700d8306a9df7b5a7faf49326eff32c690b16d23cca3791c2500 3045022100def37824f97ce05973f1d96d03ea403a0c021218ddd3cbdf61018a063e69799f022048283301c692d40631ef75b5c4853b6b4194c1dde62fc53701cf6579cfa426a6 3046022100c1ec2107d8b39afbd6f6a2121c39f44a09b5aac06a64f7e61709b613fa50363c022100de5e69e6fa840466ddcbd8c0e05bc8e48cb20131832d5968657427a8e86097eb 3046022100a1198e858e40dd6445a14c9a2a850ebf26e67844b6c5e47a16029e92240d5dec0221008d81aedac0b9b1dde4a0cd02ea7d57ab9788a9c5ca5c87cddff4eef37041fd89 3045022100debefb0d6883df499012d459955651e5450b283eceda5786fd66e4be760d77fe022061ae56af1c771b3fe804b73306d660df3df8ecdd98539e951ab1a0c198050f6e 304402202be8c3644d0fd0bfa4658f89b8149d3f6d802886801dc00f8cf09c34c97945ab02206f28609bd83f2234ba9280041b2681f494d39c3a8e99c9bcb111b18bfe504e67 30460221008457b6f8d16617fb6e5e39fc2917182f770f6af4d85caeae8b1e44dcac8b28b9022100f1486d2927e37f71612aeba0bffac09a8d5f9698c25fca03175a4877c5e77d59 304502207d57427f8f275adaf044aeb2df92e4340b44d15142148204f225d23fcf296167022100b9b3de949f0dfbd0cad4b6a9f034d78f5cdf1a6bf9c49e23225d5c7c7180e865 3045022063c3a92e46be7ed9b4d1a140a12437fb43658322c3e1a05a36d55eb339556147022100a2077dc4693393fa0e98b914222ec895dc7ab5676226a9d7f27e58246383a33f 3045022011d9e030de5f9e2a97c9fd6bab1c3b2cf59783c8bea2c31493b277b1c07bebb8022100b78e258fdc259bddf6a5ca3f87da8fe320082bee94723667e5f9ba31e922aa02 304502201340d85c7153560f3a1cdc50ef494a2336949d8a5bbd45006100f5040204e73e022100f3dea7ccd1aac08084d9e68b5f327c0344798f0e40c6a3174b4dcf459d7ff43e 30460221009f6b88c089fff2dd87bcda511cf8a96ddd669bc806945fd9df549ae701267d32022100e53f2d747a195d09eb38a457ea5adbf8928874686211d89ed17630974f170402 304502204a3662db64ef31b5ce28a30296e4b1619c57f5d31f07eeacd9bcc1acdc55fed7022100e253a4c45fbe2da28470148b33840db6800f264b68d6ec70eaf1605bdeb30ba3 304502200b450e66c4386bf70653498b5f8bf62eb55d7d038bd25d281dc03afbd81105f1022100e36f4ce8c6444192e868946d59e8408e0a5a79c87104803aa57c04c691edf17c 304502202041f23da9e5b0cff70d403fd50000f4c1d11eea58bad05374c46159b832b5e0022100d627a3cad1bac1080e32e0c65b0dbcb9abf1d803744aa40c9cee50d35b5b05fd 30440220377a76a556642861a8a5cb29d8b53970559639629381b78b805f12f9ff1f6f1902203fc00750df896b43ee3eca185e0258d3b2dfad5672dee8f1bc8e080c9303733c 30440220568bb6232b0e3c97e3ebd91c66d1a3fa936651d53e92f3ce8a684a9bffd38e9a022065fd4934b9c35d9bb54c2d7d5365883dc8c05b010f23157d4e015789704fe134 3045022100bd8bec1c2f542bc3d34ac0353a4646126c18b0cfc1c9f68a91f58111aa0ef76d02203556adeecdad46b55b5029eccd88456330715917e2f1cc43055d21ca5ec85556 3045022100c3dab31c06cdfd3111c2f9b6ba70ecec0cd3a6e9799ac8b5d99c95d29c3427530220177133d661ae72605bec0bd482c0345de984d3cf1c207a830c9475b1bab5a90d 304402204874ce1acfce8258eafc9a76ff2e662c72f90ca59d82ecce09350a420beadcc7022053cac2e69dd7b94abc059b7e0a4d9a6c09d89a228cab531a19c16257b67c0c41 30450221008476537fd27ae0384342a58e3dafee1a4bea7beb5de3aa5d1f1dfe49fddd8d3f02206641d635279f47b94d0e145c9656f0671cd4088265f289a0d9979f7366023091 30450220262e25efc4244c3fa8c4a7f34f7f782d1309f2f804364d7902fc79229bdc5e34022100dd617d8e53c5a5ed5309f10cbc54eaf43da6a230d9a5f18e5e00b131651dfc32 3046022100a6a78467d8b2769b5141821a4dfb48b9381820d4d681c75fe2a4770d9649fb20022100870cce88c3f174dea52378bd756b99d41a1cb91a8f2fcc401b4c507a20425b6b 3046022100f72465a7eab4cac7b6b6d91d1bc56aba14f0a7f514962e132abefcd4268a596d022100bfb53195d0833dcdac8de7995a1bc3f15b986b329ad0af69a587d66fa211e592 3046022100940e75aef38d23cb6b443e5a0607a02ff575a9e3168ea737f549da1bfa60c487022100d0fee1332d071dad1251bc0dec3f4790c9476dbccf9846bf73b9a10fad077465 3044022051a89599b4dea4f416a01abf4cbcd0dd9fc0fae8d343ab72c72d95e7b67523af02203829e98c2408c7154cdb328a01d42dcf831739f3133828d1884fbc23596a7c2e 304502204f0eba98667066c4c5d08cd2df3790c0cd5681f76353c39f13cab58e1af961e202210094dce3247f48901b164acaf696b53a4566b4a20589f616e02d5dff66b5b75f3f 30440220610d45baddc8bbb8ca00b70d2fbc930e6f17683b553f6c49d799cf4a9bea1d6a022044a49d15d7696584f176ebe366fc2c32084fee5cc91f2d6547550cb69e9c5c45 304402202eb6a42860c08fece9cca9d6590403f2484ee484a8831b248d607266d2333f8f02202293a7925fc27977feb938a7c6fe22da5dd810a3e1355ac36f0011781941a64c 30460221009eaee7a1462a2df6e2824e031c2ef9c88a84b3ddb9bde7aaaa4e9e33a92826b8022100fe2e2dffc6343566d1be30ad0367ab12a9c851a2bfad604896a7415de72f2f50 30440220356380d9f22345e261f9e36d596e2bae36e6fa681126964e92df2aa23fe384110220016d6f50f337ae9338706e552f5e4fb5b20f0485c1dd5a337f59973d929bc5c8 3046022100f9bfacfb2b80960dc6cb3cbf7b4304a25b4a41021bfe0a8ca2f61225b23f5313022100b72cff10d86afdb39c812c2b1ba22342f14e9060235d67a9d254e89bb120e4e9 3046022100ff11273f57fe8ecb5330c87b621e2a4a46cf02c1764e58435d888b239b1ce5cd022100ecde39bd24445818a8c2b432e1536f8a33c629aaecce7fbf6e46bc0be5073157 3046022100bfe7472a209e59cc67482736729c2d9c7487cdae5db3b545fb33d06b2278debd022100c6b6141d0771e3b2b3453319381658733a4edff41b17d700d96b26214d76f302 304402205b8dc0344647afcf63c65f9030d5d7d31858ea6539c7328b2c43dd04a51ae62802206b14d11301afbb949baee0285c1c9b1feacbb036f0d3d8f9cf43748d7b6920e1 304402203969ed26532df4ff523b0071c8dd755f4eb521dbc243f9b8c417acf34147bd7902206722c148a20e55b7d179e39d46c93f0a901b3293116ed8163e578b31b7bf428d 3045022100931c1b94556a5d53ab37c359f15ecd0e841e62529d675d1a4a50004de969a02202203d322f60df97910bc6b911bba9f7a76a77c2cf238eb1070fa7bcdcf4e4b60a29 304402207249ed8e19828f104b6f6f68007bef3e424c12ff6613e2839d55d13d5668eae902204892fefd4f36757618a49fb1857e7fa2258ab88f421df59dccbf176a4832ac02 3045022100a297a7177fd775935b5ca8527a23452b1c9a34d1acb66c15299889886af2db3a02200ba7547ec3053fa762d7a91f4da1421719cdf07a837eabc40c0b3bd5e02c464a 3045022100cc98f2cc6addbea8c67324fb85a28d56345a73143a456ec7b228674f98d3fba502201e5b796133ecfa04dbb4dbd9b9b4283f2afd3fa15281d06890322794eb4ad141 3045022100e57d95c23e9b8b7902c89beefa06ee00c57628e0221a7922c4a98e03d3711d4e02207f3fa27428928cb076bc15b635ff2d36339126ba21ee8450166a6823ff5b4f13 3045022100e0475412e20b2cdc85a17f29ad81e9f5f25adcc3820549db72300ae2a5cc3f5e02200d0f0e519cb2fbc10dce4d7c68662ef6554e5c92ce4205a2168be54c4a9bb7f7 3046022100a05ed21edef295eedc54a13202efdc0fe9f2dd7baedf80fdb070734c8912368202210090519306d3f403178605f030ca490e7f17e8095b1dfac215e2edb9bde4789a8d 30460221008188f47a8c0430841b7855433e3716bce7dd22cd0ff73c301da4360b8a2c2ac5022100ff3a1a1306160a05c4a696dc2e830f011329b3de927dd791101155108f3c1d41 304502202dc1d34dee47b06ed0936b276cfe631527a02481d0f8616e90631f74635d03bc0221008cbdba53663fb9872a0840ad1121de9fc1a315aaf022fea10f48f645cf15b8e4 30460221008054e9930c9e9f82cac68388f75a7e675176a927daa64db1ebe515d46a24488c022100979f46dcffc852e4cc10008b743faca0969ed6a8d3c3273f0a2db7939ac0c013 304602210092b1449b01ff788780c571794a389e87ab50c9e1438d1ee2bf10f6a623c41e0b022100e156c5c480b8839ede9ff91acb4b2e73bb21805a16684cdb8d26c8472c4189e3 304502202ef33925f1326953e9a2743333c9615a8f1b4a75276cd7e9acc6428e77318f280221008ba1d921b66ac005b0727fbcb0f9e58807d494c48f6a0da693d31414c7830b78 3045022100c4588f1a6c7a23a72952c38435b7211339d824e5b949e7ed2c7c648aa077d3600220657685c2ddd667d2fb9758b6060b39f7a8c511a3b304a49f4fcb622aed9ee491 3045022012a41e470fd49e10de536889d6fc462579ceacde0bdfb75f3919746f993efc8e022100c154c17abcaa86af433ffbcd84c3033f120b5d42f7a099599393ec0057963a7e 3046022100cde35b8d3c1a075041eed512eab819a6553ea1225892d1d2d6b5e5cb01c9cdd302210094c222503958a11128eb5a9a41d6a1356eb6772b4d6b0f221bb78c9f550059ad 304402207bb7d410e6225c0a7ba12482a9faddf5d35cfb6452db3f2e00db5448087e086602202d6114ab0317fa6d2bb3a4a4376e16d9499371a363456cf176e967f97f2c07f7 304502205d8e4bc3ad94d9ee7694366da9b0c4235bde3e22b683348ec577592bcd30853a02210096e29897b22f87f521ff828e5f4ca305dffd33a7453d8f55965eeded5580c032 304402206d834c4a95ac64f97ede654fbb907391afb88d995b6be5c2e6a72932890a6606022028361e0343e8266080089c242315f4846e2f8ad6ad4da22690dd989efa8186ff 304402200fa9852c7d2476e77f086ced6518bb313c1d4eb4b71966d3913b556abf1e34c7022023664664c5a3d9e8678f3886a3ff40143338cbbf6e1a39dd735e6e83102ab54c 3045022100f3eb2b26adb6d2356a3e003e021c8cd4e02ba38e2af5409da9ccf3999221fd730220356446531dbe677d3490d9b4396092eb9cbc21fd9d3b15d310b03c65e9136fb1 304402207850c544469f160908ecf8ed330115aa3a4e9597a8056d1bdd46794dd6f010e602200c81e28b740f0e9ec72cb8617c2a5dca3a7126888c5ecbfdb38e7228289272d6 3046022100905f1c0e8d3294f4584baccd266b0c05bb8a5ebc3bc9e7bf2057b81f91dc7d49022100e2258e9b797e2f90cb58aafcddf1cb9f54371f2649733d8325601bc8b6c624a2 304602210098f746259d1edcfcf85fde5cb3b1180f61744dd8bd59f0ef24697db39a8a0e8c022100f1aea09e77e14e810e867862a4ab7f8713eff34b3488c19b9c147a681f56aac9 3045022100fc741ba82d9b12c0b96328b005229bdd9ba95e11ac861de2461e09ade443241402205d1e91e4a6682be8166e9e11107c75be75d6e03e56c3453e6e85a9a463bbf663 304402207e225162d86cf955c18c26bb2e44dea28c55f7b1649bc69670a9d28ccf2ec2a702205fa94bacf69d05743cf8ff4f19c5efdb8ddc53eb1cb1c9bf106bcca2b5176f4c 304402202499a4e8040685b137e13845dfe152375c2540c0003c85f0be2186442d3730680220064eb8c3ab0492b74ff1356925e742b8971a7149ce6730b5663687a489fbad69 304502210095efae8e4727d9ea0a8e1924407173580aa97507833a5bea77db92e7132ba75f022009e8f87ec16d0aabf8297d8c4481c5450bd2e3950fc7f6ea662a5d3789f74e1d 30450221009bef3b1aa397594dafff1dee545463496757c764b0010ac3fc7752bf8aaf660902205008405d077f883edb14450438ce3dfcb4dbdbbc3d71ec26a60faa5b21a1ba36 3045022100ca3e856fae91243b60ff7913334a157c604292b0bb55ff9d3ddd67a0dacfd19c0220058b793a1eba7033229f341b07943df496284f9873d182e197417c25ae086d87 3045022100b80fa0c12e0c6656fad6070411abed9762e8a7307a2d00686c9812b65b38691e02207f12d3f1720bb0af03fb9365b89d5abb37cf53d3f7da11de269049264b790a27 304502206c169d439af46feebef4464daa402cf0ad3bf8a02acbc79b6959b95457089ad0022100c44f81a8f92182706da52d48285b5d319565020cbd7d020d72676fcbacc5c39d 3045022100de93ead559501578331773928103dcd095526053d0fd8fc9deec16b72beeeda30220506affa9cbffe859470d3593a1e9b89c225d875bb14ac9f3bfd74e09c428939b 30450220132f973b03cf19f7a0d94e9e4d12b210a7be0c9210be89e8047294cf2054850d022100c79858b12c779a7d44ee6891512b43d960e291a79e8b293cd5762b2e94a539bd 3045022006acdbde5ce81cbfc669c90e913af6685030acc9f838d68d327578f55f7cd81b022100a31c5d9eef3e862e5d711e49e006a828697797f561e5a7beddd1c485f68fc9c2 3046022100f10b063d544f06b86723e72bc1c84ad0726fa42c24b50796cd86e2ba202aaa92022100e1b185493aaaaf9e5a50718c59b22b637ff7defc3acbb04c0a91be5e85918e55 304402204963fe99e0c2e94cf1837e41c748333b2a443d25ac507ad24ab41b229bfa449402201723bff30410fbdb4bae8ddd58a104cb71a9b80748e490210808e3c9a952712a 30450221008e87a95ae5e7ccd67ee9628cd42aabfaec9ca715ce5ceb1bdd62d00dddafb625022008a5e8b4aeb6d58f91954bfd10b22e3dbee12f314b68750ee7e05ec5def1c864 304502202ee55f1d9c67864dfbd9182ea140eebbd703252d8c658c5c904effa9ebb0428d0221009ff8597cb5e7cb20af1e4a438cf6500e92cd9b63729517128225514849ccaf3f 3046022100b05932633537d16de9947a0876c2eda6dd0a117bbe9177bd8ff2cb2ed6571c940221008f936f13f8ead204dc6a7cdbc3d2cef26596c609b37c591eb4c91c414025bb23 3045022100f5ccbc6c5253406de3b4ef37687656aa3ea5088109afbc77f61fe764dc5224330220349ee17829d2814523c59af89bae87d4cd91e7d860b0600eeffa9e6e7ada17c6 304502204a109bd4ce20138e89dfa6655810dbec3e66770165c7dd335b1e516a9f4d4daf022100e01537c9594947e2c3d4acc71152bdad08ab841a0d71d8a2a80ddba1a300ff96 3046022100aa022ecfaa4742e00dd51360cbeb91566c50f2c664bd2b7eaf4cc2c210e8dcf6022100b5fde301813caaee08a9ed57dfd97d3108f5304dba4170aac33d1cf5e0508ebe 304502204e2c0eaea2a286b5043b99f79ac8b20b9f800b336d796dd57f6a2d229fdcf596022100bde5ad2dcd26b73ec0bc018f550d1bb1890979aa0deff64519b97a9ccc3f3f4e 3045022100a29113d81595e8496a65fc03215faf5496bb29ee7faec31ccf1d7abf87b4e5fa02207ba261a6e731e407aacb11f66af39a0a1c6195f43a852abbccd4912e67fd8c45 3045022029c5eae1c6e44e6113496f6d708b36a887fe6495907658ae1c1d75d18e71c42a022100fa1adb06f3dbf826b8b78c1ca235bf983c0c2e484fe13b3639754b9600e70ab0 304502202cc497ce5f1ba26aac7b42f556a1e56bc8f9fe88cc8d05957d87898bb8999a81022100acd810966747ae8964b8b236291ebb05ea2f9839548c3c5ee5271eba4bedf6dc 304402201dd8eed25094e158b260f751b142265709327c90527e9beccc63fb8c7510066c022056f9fca1e48c8243719e5b8328df24a83cac8bc36a14e43b7473b649f7f04c2c 3046022100d356ec944de22871e1a65ee9d07ffc3b373ab74dba12c196d5665406144618de022100b76f4b99ed49201222a83c0b098884ecc7d6267f969acfc1e7fb1aeeff09e4db 304502204ef30b18370b96ac3fed27e5249680c8a30115f2aee7c6011b0dfa5d2332f205022100802b0248885c593891fe1c19cd2e06836600b3b3da0fbf23f13287782ba0fdfd 3046022100a56ee7f7516617a518a924745b5cfb169ed59ea9d93a04df8b0ce1155544c459022100cf1896508147f325f429bd983c0c95ae175b17e3557c9729b114994dbce5aea7 3045022011e8c8059135022ca25e4a629d43d6c88d660664f1d7ff744c7e334c3899bbca022100a0700be2b85331bd0679581476371720efd56560986ef3b7a33724b2b08ee012 3045022100c26c7675acaa79f2c16a060172ed548bf29592399034f3d7c464711b65cc19460220204f33575605a7022361fecf1434547de59a3b9b23d99c449e87f5e275bdf197 304402201f574c375ff4ebca8e6609eecb885586b70863deddd16233c3103dc5ac3fc6f002204dbe8165c4f8b7c0a6a2f8ed56c02c5ec78f9f2fa52f283a09e8e66b5e4e16cd 304502201782ecabed3c29bf6f2fb61062fecb1108c559f228163dab2be4f268d5650520022100c0e7cb98e1c45525556cc953adbb2d07f6de2f45b7322c451d2ad1a3f1496150 3046022100a6903d99fd59115781359d2f9779ef0c4a4572dd460b0fcd19b60b17541aa6ba022100a84d486b4c28526c634bdd1ed1b3b4fd1672e5d35c2f58cbc393222756dabc35 3045022100bb47bf09b3dc92817caf04e37fae6d0da78091c88f8909540d99fb4766ef6d6202201a3721d0f1c8a9aa2ef1a28f8d57b3dc9840ac0548e6452e6a61fd69ea5d7c96 3044022062e328d937812861406194a48e89c9518c92635593b79bd690e3c22d0369c8580220418193513f1a4e1ccfbbc37d52f4147c8118ba34e9b9d2cbc40df6f78c430c4a 3046022100fd54c2090e8379a041732fc1300aa768d80142060483f08f94de5e6d44433b4a022100fc4e769ff39e5d704c740305b70e243e1fc05732d4dec2068993c12cb0baaaa0 30440220335ff81d54cda7963f542a54593711ddff7c94b39388014be16a19d90829ee10022031d0e23db4d330b90e6c09f710d69ba889b07a8a1309fd950ed67b0e3279adce 30450221008dcdfbff1d74810562fc52514a166880f2eeae88479c5f6195b3e80422e5eff002201d9c34d17f83d9d73c27503ffef0a255569525afb5aa9d029f9c1b5e55797f09 30460221008331ab35fb2431d30131852a71ac4f246db924482d010345d8d0cde23b2f4063022100807f060f26289e1e1e9fe31e0ba76f49bfab9220747d9c1ee083703fd3ebd09b 304402202e651d262c7b9fc4e33ab4c21ef87589b67ed7b966c0c04cee52842f8dbee13c022046db79c654a6300a936c7f2f9b184ff49a61563d193298d8bab3379a708361cb 3045022033b48ee4b8546234ecf21855b0cc7e0906cb5334ae25ce4b2e932c4d281f3eb5022100a3a7b1d2726f6366daf0c47b027a3f749b5088f3ca2084a9b631908c0b4f2ef7 3045022100f7b5c3e698d5864e70f741741e14a3c4dae6f66472624b10097b7158a9e96a66022017cf04c57bd1b31d637655efd2a36ecf265e96460f24c77565311887897d3a3f 3046022100912a68ec3f0a17db61e6f63bbe50efb45d45eeed5e078dd5fbeb46c43575361f022100abc4769c60e303658d4a1e3fbb55c1157b140824ff63d3fa6d2462379447af8d 30450221009c29100d59e9304f52ebe21019a7ad11fd57f6b594a44af4eea8a17bdeaab86e0220380cf833f7d47ea2ccde43588f45ad043661d3833e0de4e105d4936a0ec19b13 30440220747af721d56ca02ac39090087239c1916ddec78b4fb37e7dd655c69508427265022023973e30292e466260a7c2deec60d09d6030f196b11d9497d7471ffbee6e34fd 3044022019682bfed13763adfe9005dcf0c3e35e5bd8d823a9d0a343607ce5cc0ec58ad702207186554a92cf434756bf62116153fac1b954562187b104943088aca1eef69c89 304402207da3bd847601d2df6d35029b6b9e28e8d899b4603e5968a246632cfcb8132bf20220326e053968c872938b94ad83683a51cbd9f0e63c562442e435ca384958720bec 3045022018898d82ea48046e78ef72309380ec1be3bb2d991c977d0226703ea59943944602210099cab8b14665fdd2540a7b18bf14c423989e4ce3e7457d4815a402e4149a71ef 3044022037ff3356daea43cf76326361f7d6dfb6d7b06743e3d62e73bdc0da3d07adf6ba022072f8534289919992c3cdd7e078df4574489288335538eb73c5baf07b15d8c8f7 3045022100bbda76af274335929f94ecd235d953850859e126ddaaa5b31e5f8dd55fdee1940220221b9ad8d7783c01d1fe38b738913d52c774ad1958cd15b87715154abbafe15b 3045022038fe48683b1913d903ca9e78309a5c43cc08f89ea008c4d17ea960bdbfacf44f0221009153b101471440346e870fae7716840951c9d450404364e9ccb965344d561b0b 304502207d28347755f595df8d40a7f09b66c1d4708ae3390c0fb68a8f2d54415a74a31a022100a14ddf3e288e0a92f711cd0021163b9b9ad71291ef53b6cf7491e8715cc8b197 3045022100fb0b6d39570e31ab1926164e5e589e069169df44675e8e567179e2a9ba04bc000220788a3ddc62ee772f17adeacaa433478580e3d43d39774d2714c5970cb8eebd8f 3046022100ae511da3d9ded311591e0c9ddc8fe37e60e98a17eb430c8d423781a661bcb8a1022100915291a70681a38532b12cec1182b28be65bec86cf77e82f14eae036ff21a99a 3046022100c5ad4cdaa262be4c9fca8c9424230eaa0fce7f03c8deed3eac14c2fdf8457fae022100e3b512e6d116562b3f34fc2469d53fd747988deee8d6d16fac57e6baec5f86ca 3046022100d0c93cd2dbad209e44a82db2034d8fd4bf2859ccceb348472b74bfeda4828729022100d8b2d8180e1c7af3db4e4d00faa61e6b8ad76db35e8662a56dea5708beb88f65 304502200f3d4cc1c85922707fcf1d97690cc9fcf3bac6967be5da5d06123c403eeb281302210087f6744d2d628dd07910bad4bd118497994ae9d2e6fc41b3c04e47cc8828715f 304502204f3cc4b3c8a9073d61e0399966f279295bc826440a8ad49fbf4d09aa4816b914022100baaa38b60ec4acc4e69655afd3de9b31bc8c2e5fb3cae9dfaedabc0db80f6dc2 304402201c6a365795061cb35bb1d325ebed6a7356fc613d789648014170ef9597baffb20220225a07bb1a945b854bada8da42ec0dc1ccfb737ee8a00c1d3277a33152b459d5 30450220454c5eb760ea5cd4f0f420a035d0dcb2b82f986b647b01e9085d3f4b61ce6ab3022100da195708e78a640e753b0a6b8303903d7c34efe18aa4d8e4364c9a43d1f0750b 3045022100fc18a6e7fb433fd3b836d5aa2e8e4da89eba0a3d3b6e18ee1efb3fbd2ad9de4a022046bd6cea04948f444a4efbf736ea424fed5481ab062993f64a88de4053d6208d 30440220030240508bf5417c96a367c986f1150f01861bd302f98990ebcb3db162d075d0022074ecebe93f42cea30eeb9f2226f505e0f64766d66c39683a32c27947164f3022 3045022075c712c1d29880ca2946c7ddc233dfc81c895a63b0a6d980e7b70ab12101485d022100c3ec1bee346cf7cfa6982b730f69a4f7e278ca5b4dcbd42dac091d9a6d52a464 3046022100e7272a9ae98366bb33cc535cf8440e93ceea905c7ec858c21f6d2e1282bcc3b402210088c8d3297de218f88b611ac7305d939eeb2670bc4b154d41fd98abd12402b1f2 304502207bfa8f0e9747daa2b9e63a52642a77898a160c3fdd84b77611a745bbda9189b0022100cc131f94e2df87102e5e92949757a4a1e03da2140a9db4bde532a2c65a932017 3045022100b7512e2659ba7b00084947ca66d191ce640a046894ffc159241a13ab8393ed43022064c63ec6abcb22d7205dad3102273e966e5977d275435fe8bca9bc77ef4c5637 3044022038202d81f925bec2f389117705f8709881fa3f6355c927bc67a0c38a56e3d4a302201273bda66d1956ee5aec66dd9bd6b2cef61492a2dd03687380c237a876e965d9 30440220046f1a2210cc1a481cdead5df9d15efb0b3ebf0746655bd02d036afcaeae78a202202d2b1bc037d8ca1ec72157d8387350ca1923ac81ee1a7bc1ab6e1d693fa37165 304402207089bd9f351f4af967b59df1331c1560bf91d6aeba6b2abf019f51adfa53f3560220380bbbd1b47a86d03e92472b2a60971c6b62080424b26bba7d7475c3c831a699 3046022100c6061f83fcfcc29ada7edda8c1086c65fd6eb3d5f593a8178543d726baa920c9022100ee3594411e268784167413f7565e3d9bc46f34c27648a5539224641f93c527af 30450220356542ac80a1e9e762e74dce68c2d640f74b15364cf11b99c63eac167f64f276022100c5c2885176649083ea766647202acba5c590ff2efdf633368c905253238e2b2a 3045022100e24ad4d1377cf9be599825bc031e7a1076af59ba33237863ec16e3c22793038a022015426a737996965111e4d0150db34033ed3ffe9341d28266d2a59eea0aad3351 304402207d68da08d01147edeb787b48fa412626bd3106d223455ccad16d49c3f32131c102206f5e774a07e076034386f3fd5ccca49bb0ca53db17f8bfbd254ce6aa64765638 304402201842b01f8b925c23eb3bf73509eae19cd623f96cf9646ee989751f87f6b1ee6e0220799bb9ca84a49e5e0d85d07c828d46abaa4325581922ca597f2d9efbffd751cd 3045022100e67dfa2615fe47d5d714ef98784d9e1961563aa77b2fa91bf4dcd9a1f4f337180220448614b27e81dc563d440fb9f37cb6bba6b671341e744277dca76b19668792b5 30450220741f19c40394893d68497e3adf2d0014409e3c67def1e7f0851af8188bb9427502210095467d21b1129ff26aca0fb1397e4b3fcc50627953816a50407e23a7c7812090 304502201bbb3047b1bc858d22566e5323b42bdb0c54bee048237c53116ce0faadc3a4db022100f493fbc90c9871f2a82d95f669859d41131f047080064bf6468853459d2aa099 304502205cc23c3ad4398cec73b1c21f149644548115494324c972b881423a5e7c57844f022100af4bd167e85f28953eedee14d1c798112a8f9c327a05e70e2e801b5c488cf5a8 3045022071ebe47c2a23fcb28465f5f8b042a0a5a172b6173bfa60c9a6510e80b6f46882022100c081a3d0270e63da296d68aca58abb89bbbad212d1836747b0a1740c5840a1bd 30450220416a7931f50f1cc61c1d91727bf5c4709d316ea7516e030c0f098b1ef87d93f3022100f22a08eef59123c8b14e20502d72216a997ff3779ddf4aec0de39cb8c91a11c6 304402201887fe09ccf1eade1849c875240d5d3c27af39802eec7bb1d26ee8c70115fad702206eea675e3267968ccd3dfc49981d99875857f66c083f68e1cbf46c00bf16f498 304402200362041f0acd3197c767962ae2b5e7713451d1c6905bb72d6fb19eb1cae63b56022030eed765a81a443b50e9fbf6366fce6107f9cb94ff0191f7403dffd5bce9a27b 3046022100fc8a3eaa66b2e6bf43cf29bbf8faa5225d17612f9f53d2913e60f73d806ff18c022100d523fdecebdd758efa772f92bb97ddc23d96e8a54d6b39caaf637fbbe9f71013 3045022100f2a9fced1a1f39cee3720f0165edd4c02a76c1ef541e27dd537ecd392579355f02202376c2247f15879bae514bb4df565b1049cf1e9051e47db82c4b6f554214bbcc 30450220446acdf5c6afe456e7da73a353c4ebc2bab2b96e9344ea9ad6f6ff029b02f131022100baa7a0e63e391eb4fddb04b80ab062bf49261be83a48cf03a5647260bdd82f30 3045022023daefc01dca51aef34f8f8f3a0449cb8bb3fd295888bd54291bcbd8ab451c0f022100d94f0cf3648eaa92cdae92491e609ef5cc4f172b19e263c5f68a401752608869 304502200b56b82d35b26aa60dafd9dafcebd823a61af10c3d26cc0465907f20c5f0e68a022100e446005c75d03742891c667e6b35f5e6e4f35c972979a6f613e6f5cc105eb06c 304502206058a74d64012cf8f9f8733da40ab9d9481da27f1495b6d81dfcb5424e02d0ad0221008c080b8a3a19fc81b56efe944f70c14a1717fb036ab2b593be2ca90ae278474a 304602210095062b3f0e5086ea3697ea15251cf7818885e2efe0582a68b76d4c2bba9a36c702210097a4104e565f1b990798042c80f5a48adc127f2634f830a7fd9f4df1ba2931db 304502202c900d54473b83f12cda6965ead68eb03286e5323db3e2a711db6e4beec08252022100a876d9c94738fd76ca71ad993d68e2c70899ad22394df317c7a7900fed267382 3045022000ccbb8ee946e460466fa1074993265c015edd1fd1f269ca29c1eb1599d44d65022100b7115d569389fcc80dbba2a414e6068c8e19fd8b8be6c7c06e050eacebb3dab1 3045022100a43bdfa66cdfbd4f8de5b6dcc5d2a6fb90669517acd1582d2bd7323d591193fd02206c8c5435b960b6b11e56992fce9496dec89868c70cace80481ac991116e5fd50 30440220364e43a606da8011fe78c9960956c957f4005e6fcead5c0c1fff364d8bb4f012022004ee5e33985b2e0412d65bf81c8f6046dd02f4b799230d59606b4fced0d3ba70 304402207373f1927ea9028cfd03e002c9066bb1a0b13d92d3730a37556f0ba5c589e073022021cdac3d5c9c1e9a1c61575fd155e6b77f5f8cb380fbfba9b49e514373cd8d14 3046022100a7ece64fe5f8f4b861b98be3bdb66c899612d61b3a0a7ce758f8b75f7950eabe022100b21c97bd180bd8a99ffbe08373c88b1adebe7ff9463b0fbed6b03401dc674e30 30450220441bf543c4f546f06f8bbf0d840b70058c44ffe3aa0a155eda3c5a357afd00d6022100dcd5630a1cd15abab8b1924dbe9d4a12d49173175480066e6caa1c3058028867 3045022100df761f79a67abcea1068ba1738529a89e974ee47fada3eacef1bd5d83f14089002202b8ac3547f3b1ebea776de171eadd6f3c8db5ed966eca23b892ae544137832b6 3046022100b2836bd1314c26726071da6e5a1c2a9dfccd7265b45e96ab9c718329f878469f0221009fac406cd0772b5f6929c3a204aa7bb4d4d737710794f007d0b537a5a76e4b0d 3046022100e07497b1696154c17777c7fb6cb8c52fa8b51201a34b62f87d7167e3db7c7a38022100fa5c22a5402aa128327de470dd022be2d9aa90d34e17c37c3834e073134c4ec0 304502203f801966439ef0481725f64623fb743c422ba9042d4623edeb76285eae901589022100dddba4a7d39f70d1b50f63418d4a731a0748bc4f068ec1cd72b042ca5873fe0e 3044022075aa420a5cc57006251cc4765c0cd25c704ddd396903c60bda64b682eb7ae75a022033e45c26fa8713472a80aaffffda68b79a92feb07910d9275724c0b37ca6cbcb 30440220530c683e9ef49b6ff2609c277eb18d75a45ae51a5a85de0a935d8faffcd36c2f022061c84cfe55f545c34061e882342d737ebc60310f32f06ece60e17e69772cc246 3045022100c41bf0071248436a5965db42211cd6cc326225e610a534a8bfabe5a1988a534a02200e58856a397bf9a57140c5029accee0d173e40eac6af4c3b35c914584c3c1315 3046022100c951b622c7cc1fe76a7ba15efb114d7f8bf6bce91a6a5f64512731c58fd940af022100cf5d0cdabbe4f50856c03ab791575bfebc52f2699137391e1e59a09f40641019 3044022001351145364eb27b2e2eafbc9f2a40f85ad0718ad7047f5b2239ae584b786643022024bb65081863625aca02ccc441aad1ede42186418a78601089989c6198c2b544 3046022100cd074c965202ef973c0297deb0a3efa539ed9466b4268302c2e393695af192f40221009a6e7e2c62d244e19f791d0b159afe319f84bb5322ceae4839a8376af398dd7d 3046022100ef23493f3ef9fa32bc170868a21910a33e5cd4ff83c48ac70f938e981783d8e4022100dba0cd1eb472ccb4a56864ccc0f0d718f908d896c9c29f499653c2c8f4c9f086 30450221009b560ecb91c6edd40f3b70c0ab0b8a04bf4fa100a549d9a9bd1fdacfc017e27602207c7d833021f7445cacb6b38acf4999bb19ffc022fca020c37e75af9e8bf70703 3045022072a688aa6bcf1bd33672032e30c0790930c3b3cb5f4eed61a55da8aa14f1d916022100a32097e2c91dcd86bdd729a7ada3c9425fbe3685bcaadf1f689043a91d442b44 3046022100f9459c95b143e9ff6d8cd3f2b4f7b6a23cadca629fc14e71906c2881656e8061022100970eb19db642a927a3e044266f2eb87a7764bd40189a0f43c25776bc0ed86234 3045022100ab76a75a83ec8acb14b79e92ff8b95f3575acfd5405a4aceb9cb4d203aa5d39202203cb6466e0ef690dea3321eb21b541e53348be033977831a9cab1a160cf923e30 3045022100b2c01db1dcd49b31a9a83e70f6e211c3b706201a504d504951d3b5ab9779797b0220698f4395f0fa4a89778ea52ba3edfaf0fa044645e18bfe8070c697c61bd2cdf5 30450221008d77924b671c2e8a8fb8120200dc9ec6e98095ea99b65add13c17b596b52a0bd022020bbb1c72c71f2b987bb5672588c03b32f8e1de547498c5abcf6c6df89c2af8c 304402201423867dacb46adefc305ec8731c42366d7f62b467f99eaf4f6b96d91b29ddba02205781c1e101e5ad766658a046bef1af18a46332dfa56bf1216d6b0748ca813aeb 3045022100f82fc9e0e5783c293ff3453056ac1d9aef5b671216c6c47bbd9617b81f3b4c1a02205e3123450852b4c493cc426273770f2bf898d1f6088321c2e3919164188e1a98 30440220465b8d7e9af556f86259aa886168796aeb5c0911bf42f8ae32008e62861063ff022071308e3c5ca66435f0e845088bc972b0d4a20252181b5beafd247eb798b92ef6 30440220429d1879156ccf995a163c85f13abf942a4f1efc516cd9b8f251d267f7b23de702202ed96b5f5486b011966a1badd014c63dcfb52c52f3679c0a864082ada7ccda70 3046022100f943d8fbeb5ade0888eea200ea3142f04f96049e709ab097499d0a95a1ebe0400221008ebed8ef2387801086507952664118b72cc51bf505c3fa3bd669c541959765f6 3046022100fd50a779ea5309b00c57c5c688658874ad0e89b0f7753c657df0f7fcedfcc9ad022100cf85be9bb11095a85619d33eecc7b362e8dd371bfb7137d774431efef82022bf 3044022050e53ec434d8a45cb075ad3e4de22442c3184760d9abff5491c2f3006c72ecf4022002a72254cc6a90ded67245d2fcbdb0eff7ba40a2ee2ab4baefb5796d562e8462 3044022029729a0b2d6ac16360538648a30c6df3b6059e531ea1853e8d85e784b763b40802202b0ab3faea50d15e9d203b71427580f43ea260bb5c3f6e45dab9be93a6bdba74 30460221009f435f9ce96a28877360dca028ae48e719efcb4d8e97dd414eb0ef500f5fe54c022100e0fdef95e1f1c66f21345a647549da327ec3fe65d756c38bdfe77e430d813c5d 3045022079a04988679ba6527b0e537da189cc121073ebd9cc8dccf1928f185b57ed324d022100ee598597f7296738db80e2891c6ed16ece25bc1636613029e6bcdbe7f586c07e 3045022100f5312a6f56af2c462b92d81701ea97014df0bf98677545d990a7dfcd14ab71a102206ed83f0db19af7f3e9985d5ee4aeb75179675d5d57cebabf2466c4b1d27ae81d 304502202ccf860d449d87aff4bac3bd14896076ba47c1175b051593a50523477219dacc022100f8398962aa6c240f60dc584179633bfdeddefa7bb5baabd86b3c628373c61edb 30440220120d73ed99568939410534357f4e7b7bb7b3584c26e24e9131162cc23a7bb46402200a8084eed0db62ba34cf8a77554bd97e1be98522df5e269b57dd22c9bf7b286b 3045022100e7c099b2efbba4a6ca15c252a896a7d1cffaca82f6909164638e8ab18c9529ed02201cd53917168ffc42c193e38bf5e83fa8dd8c827289cd3d80b818c33771fe19ce 304402202997f18357d371a7f5a93097cd67c319ef46e9b60980e332b247be9b527023f0022043edebfd744eff2ced03d358e3e0b23e93aa84b75992c44f025876e1cc0f4bfe 304502207c3af4088ad369d5114c7bc0e365271b3b3cb748032466dcd89415e4cfff54db02210089a9429783e81ee5fb88424c59495fba1c002d8e16d1718edc548f3ad6b54919 3044022057f29aa2fd275f6476287611610f0d8aad84843a841e416dd762c8dfd53b1d7e02205d3b940ecb008abaf672f6d93eb67ff8405abf21dfafea3ebabc7808687395a8 30440220103b832c349e5fb0dbc0aab468a7fb265bca91417c50269691cadceabcafa1e102206d9e67e0b1330cd8e1de2146c7b7fcbebcccb77fc893193dee424b6c46fc4d40 3045022100b617c894d8d957c53f21567277bd5531947807274fba815bec98efbe3040527f0220243f31811a4f2d99460d036980102f4106ee7688f9320db39f67f8dbe8e870e3 3046022100894c4c5bc66f3cf64f526f27923df9c51adc4c9350872372f5109e54f0d48f7d022100d8237afe4ac97350a955f5a327c7be59e008374124afeb3d1c308e6cddcdd9bd 3045022100a961d38fbddb4b7307260200f965c0452204c71a383078b70f28bd08514bfaed02200ecdce1c4c016e56f4b319f6daba96f609924fe64c9559e3096866e477af6b48 304502210085e968a64dda8c3d2173484077895ca61e11b2163248a0a379742723cf0ae40b022058c6905a105462716cefc2999df2798968e2b8a9b4445dbeb7a14d991adadcbf 304402202a610f5ce97f23119daffe6c46dfb60228941ea46d8c9ea636765b83bd10b926022039adf05561f5ce3ca2f2b13f579d005a917aa41f55d65bdd3d874eea7bcf3921 3045022007aeafdbd0d40e50e05458945a5d0bec7691df16b5d015ca85de5cbf84df8e1f022100ed530037119bb9ef0a82a8f8d9f6c140b95760f4b9c2cbf9305b21046235dade 3046022100e1b2239b01b848b62071d62782960a290af059a777ed22d5f9b0d444773883d7022100ac80628b7668c03add9779ed683dc88ecbb733efe36ee7e3444d6c8ba53023a0 304402201d2c78032de3cd944dc89bf98595451abccecd2844b665db0131ddc251d5af6002204589d7043aa1e1050fa86644a44e1172b850d6f8c58640ab055e57098db7df13 304502201c58950f5335fc8a042fac2b4ae1eedcf96c60da89cdadefbb48066667e81bf70221009985cdea17ec09ac4ebd563be38dfbaa7a4b9e5a1492996611c29bfb0e8d0a85 30450220614ca8b8fd3cd37aa043cb0a02c985801ff1b6bc74872d51c22a13e0910af87c022100e8d85c5b60331ed5af8a26b6f5050a385a611bead595839d0109164da598eee5 304402205c5e5ac40262ce208239c89cca10a5b470a37484c2c219bd1ba44b8ceb9d88a302201bfa1d44507d8ba0a47f108fc88f22055b5860f378645233469e09a18913a1bd 304502210098582e10196e3801abf5d062a07cdcc8e854545849e7a77e43238be08c18dae3022037c82e288ed29aba95b05e1a28fff8a5269dd3e1bcbcc970f33d49de19eb470b 30450221009ff1e6b1bc6e6dae45484812d6d50c24b181e0e1d9fa9d6e54d9cfe6dc58b81402206c68046c7401a9742cf75c3e3af5e1fd8edb9c326a3a52c346ba6749d3de85a6 3046022100bfb13e35d368100bc49cff535823dda951f1edc3f225caea685189364677e475022100954ec0fe07914ed5abc8a620ab5fadbb2a117438d775bdf1a60a53063b242ba8 304402204dbddd8cb785bfba5e44d43fb5fc243db57a766f45669709ac78f9b00d83035b0220548912008bf06e8a0e9547a423472415872ea853073a5d30800af51935d4b4c9 30450221008859a34e144856b88d0699b70111dfcb274e7783f660a8cd62c0209e04c94a0b022030a50d9bf9c1ba184f264e3ba4c15aef77ff7b7bce4b1bbf7f56ea5c54e83a16 304402200fa3655cbb5f3c04444b903245e97e0b9848fa5baca9f424f77ca1722c66abf502202d013c89cecf83e4ae959f3b2573944b452a5ab66dfe15e11a681310282626b8 3046022100bd9acd4308c5f704e67e51afc07b831e76004dc4985f001fd97ae3653cad2603022100c7a496f93bbd8659e9ce29830f04f71fb79b6d325734dc316366e1fd05b97345 3046022100d912860ae5f2101a1fa4b8cd87905fe5e020a1c9f159f32699e621eeadbf9a20022100ef6718c2a1ef120201fa630cf877de1ab2f7ea66dac56348501c7e75479981fe 304402205e3cbe3b31e91c03b6ab42a2af701c8f718f807335f8407962bcea9d4230629b022070c5c91e6c354386a3c7b3475ce6452790d7c40a3b99516ac149da67bcf9a044 3045022100d3b1d7dcd2e519cf1eb899c1473eb52d6269b551a16e47f1994d61203f7ecc6c02204e40c9d03c70b1617f5011bee1b195454c10655af4800cc5be29cbf656794dd5 304502207aa4c272f335d14b908f7490741102e9ca500638a60ebbed39e20f47f022fca60221008b1d8e2ce5300e0e8ab47e0c1e9c58e782bed458fca9a5926f2ed8f2b0c124a7 304402206aa8d2df6ef9970454a24fd1711940725c1f32629fbf05a87ffc50bbc71dd8ff02204031cbba0ee3343645bd41e5cc16c19e8d9bc0c0992b5b0c9ba23d0dd184771b 304502210092690e3b8c1cec6789e3b749ef6d72b1e15e7cc102ec12d15be8c2264b578ba802203c93ae90ccbbbafcc3fe4324902bdfb8faf26af55f3a3540e07a4016e2198ce8 3045022039f5f14efb1c70f667c8d8ad0a508aeb708fec4aefc4c9e7638878e96f6674190221008f67782e38bf98745b510bf84e77864a753af81c8f6ab9b5aa2d696befb63e9f 3044022038acc411e2e3f4cbb863a9a68994e01853975258c6e49dd00f54bf0c0c2b393c022013dcadd4d93ad137996737e4471e7b59f0e61ddbddb105ec4e96e9a4ada1ffe3 3045022046d2cf34df6759d5b3d2dcff677d17e474de263f2b472f88f0a905a930da5534022100e3774fa59c491bb2eed58fcddc67b5cacf0b08a9c1ca9e8e2565fa9deb51b5ed 3045022100c7d1a5c61ab960a431e3d09b83458499490210e3a82935a98c3ee5e138e84cb9022006f0a3936b1d7d3afe6e7a1b6245bba18f9c7809e29d76840077fbcc2fe2024d 3044022023bd533cbc871ed7466d6b566f78bf9ab488de5b4d624164549c4b16f897cda202207e19453ab1be18ed35424a4851c61ad1b7476ad7fd5ec1b494d59661456c3750 3044022046874e6c8fa1ef49df80a95837868deb4c057258c2c18bad2adf88bd2ca60814022074d4c57215b2afa4e9599a2bf9758b237f9190740d362c14ed8d1b7d5ca81a1f 30450220493d7e863849670b5c23db53a0c1ecd3206adcb8ce5bfd7534e8326e2f63285b02210084e335bcde45d9a26aad79a5451fd843c4e08cb35c8cf6dd7c5353194e09dd1a 3045022058ffc177a8d8b0de25f48d22da7d5cb7d71c7bab83964077fb0610493eaade3c022100d70055df8d91d14425aeb40445fad97ab4292728d6e4492fb6cc085e3bd994c7 3046022100cbaaa0edd3220229a0e6ea1ba47c7cf25d3f4d600e57ccf73412b1543a4f7bb4022100f9269eb9a0d9c987739b0f3ea941829952fb984e50064b5f40fda6addc7f1904 30450220489d1429905eba17689cf11acca7d3eb0e18d364b1287eb69ee6a8c0707e65e0022100aa52e50003f2a591fff647ca2e342f1fa5dcea65de48c32b590189c3e178a66e 3045022100d0b134af33489b2191f135d6d013049f3ca827214ca3ea808f91bdc5c6158e98022029fb984abbc44d2041a19e317a783697b53005166d6399969869234fff4b4fda 304402204b79d4e105d87ba9b276107204b01699b34861f9960c69a367be94b71b5a1a1902206f33e445c8767453604eb8df9dc8123eb7e75ba339b748f1f82ceb696a85887f 30450220336670c5d24e1fe065452280544fe4e5bcc80b4a7da718fe6fe0a8d8ff72929d022100e46da3c3487fe7e13da2c1634e0f1cbdf5eec9eceda723cb6b8d24dc5e09b8db 3046022100887f5e16773df1ad7f5aa44747e4df76798312fc2e2a578ce690696784a58d7902210082239a2cbf17f4300c3353c6ba714a79f51c5a03b3a14a019ebd92b754f68b7f 3045022039b3d6cfe65e8c6c55a5fd2da46d6e116ed978036322e75ab33cc4be2dc1b0ed0221008f39ff375af4672c934b946ecd04815ffec1f1462479240fc4adaa050f8d92e5 3044021f02dd37b80c6634a54aeb73a79b19c68745222fdb2a2b559f3a53925018bf4b022100fbb23f85ce0037000a7e3b94c0f59b93ec0eb54e9244e389b64e93eaacacf55a 30450220524decc513d377623583fd2cd59810d8aebe5272a8418c2ec80a143ef1f6c86d022100d5eb4b97d58c82fc6bafaa1deac4b5b6775639e11c78ca8600ee3a123df6fbb8 3046022100bf520d527a0e0a5e345e84f8ee9f29150dedcf60d377036b0b655a189f93c68c022100f37d2cbc1224079444a0aff9e7b3c17c31ad2369548281e75ac6a5e84c737c98 30460221008302e48abae71c3a05c187d55556bde2be3ed13f73fd9faa5e92193ca73076fb02210081b962fcc121d3f39df4d6a5dd77224b991d7e574455e24183decaa9072d8a39 3044022044f17ea931d919c8715b94471b158f4fd54b1eee898cd9fb925b979dbbd908cc02204ea8795095faf5fe0b35db1c7c39c45b87d6e4de4ef0aae6bf703d8ce0cf7867 304502202e07ea410fe88e9019e32391ec2440306b190668476f391fb8f4c2363d9b52c9022100c5124b5adda26244912edddbf2536e2cd30275d136a4156f6358d7d72a9423a0 304502205f94ba0ebbfc50d6361158d8c5f88c87b7cadcb121acc0b8b65c86e702fbe01d022100a4ef77b8e31ff2addf2203bd2276ba963557c19b6b7669387929735776eb9958 30450220328d339ab46aaf97eebbcd3d9bd41830030134dd1a8c45ef6060d658268a3e51022100fa068107c8859c75628c1688fa3bab6cb5aef35a8386bac507501c0002d00109 30460221008113dbbd2a1cd4b30ee56f33c8090c920f32cbf73c3fe12bc83ce62d9185920c022100cf8b6a58b4761b907b3789cb02856640549853f6c48d41ee62f54f6b9e024eec 3046022100fdc68e89927724cee24a1a5c87a98fb5258336dd10a8d6dd926dd20dffe4f8ca022100db8957dd95c261db30e44d27279463c79884b1e1f248dc8eb79f459257162919 3044022049cf54e183d9a9e24b5e5258294efe1459cbdef7aa0268ab7045ce2349bed12e0220142a732b40a20784b15a7a399dd0f2394e523084960915171b318187ce557676 3046022100edbb341e785258490dba49f385367f37140ec1fc5759816f7368700119f8944f022100da032bdd28315133cf675314443adf62fcaac73a0c0817108d6dd7097d541cbe 30450220580e93465e7bc1dd8599dfc4e55c1e6566e8fb82d1e665d8ecb587cef818cdef022100b021d33e9624abe902ae368698945f6d6e889fd01e490a23acf6202212461ef1 3045022019bea94ed39cde4ed2d4e95b1482e63a5c6185b9d61b5a1590e7445cec6e9bc8022100895a79f3e35fd3c922e1200d4510307d96bb7676887c801a57eaea0507a44b1a 304402205dabc55a0163d69e063c4e82f907ce4fbd607154fecc955d6349743162f939aa0220397de8f62e9bddda0ff0352fc36de54765701f6a656dc0e83812e2a43d33b79e 304402202d468f930f4c07578d99695f46a8ebb5d1d5786ffb4825b1e041d7e976e799ab0220190edcec2be6fae0e09a5fb4a1fda2de50d1e54edfab352528556d56b90c15b3 30450221009e2c5bce3fe2152cc27e200b0b8273a4b2763d4ea257c6897971689fde05b7c102203fa93dd5feba455db608e8a8db279c9248b9bc14c55c993e244a5ab2c4784ba5 3046022100967fa1a28ba1acc107bcc9b96fe7ac7a9a365f865572041e62e3be781680d5900221009ed2ca46df23c70329b2554918e16acb84235a81c86e993807c97b36c286f435 3044022048a1a578193da3b47fbb5c6bef27ea249417da2f6a57eb6ec274f0bb863b0de7022002398e7ce78954f7256858b2d0d9d5e1b89c146c5a73d7548f4cb61e19a10bde 3045022100d5054aa79c06996c29bb1fa4325d479982242da3502a0bde7c61ab8fdf3c0476022026a4dba3bf8413bc0364252c320dab7f19892e1e621890a7c154caa509d01e8f 304502205979daf1558a7fa7d16a830ca4f08f5e94acb5e817670307a244f478da40b8d8022100fdaf2d36a84cd958e924d2bd3f0e1d1f6ea359c71eac0d1aa68c0286424bf8d4 30450221009cf7b90464fba4b7cdeeaa86cbecff05dee2b5f28d5e7fcf5efba03b2ddc63da022074d99cec57e35859314c98e86a574e603804587dff5bf4a46c1e9603e01b4757 30450220698e630f7056655f7f8c33701fdd91ced5a88bfc6ca09149fed46a3085a67e29022100963b1baadb72fdf073b74716f70441559fc47edba908066306ed22bc6b98a688 30440220204c81fc27f0e92eb81bedf372bd0db6457443091112b4bc724287a9671be25402202a9d324de2ddf651984abec84542f79f3b09277cd894032ecef214a618123c54 30440220570b3122eb5b0b58fd78ddd892f8a4d4883c0fc655ccd79aad2c3dc4b886cb4a022035165f85f4b2d378dac2646d09f275b6d5d8b382345547e529e38cda40daa65f 3044022077f97245fbc93c91e41f25e6c8d6ab954577134d54566487995cb979e67bcfd4022070e6c31bfd90f0f07269c03fbf6239dc065e77ac9e125aaa3f743de356e16f16 3045022100b8c335961daa711763347ceb4fcbe0391f4208e8b6479d9a3bd17cb9bc479cb70220778fa8674dfbb48686d5f0009f8fcc3aad5e967e62590d58f389829f5bd9995a 3045022100bf28096d27f188066d98322ae5cf91c57c204bbfd3defe9c4fa3aef7ad7d9b3702201858d975a2e161bbeb135a3063778eacfb7209c2ceb3a358be2d32938a2f66c9 304402201657cd9321e15f507c0d4cad9fdb1305952de2d0afbc972f3b9d37c9a21e32fe022005804f97478730c98f34bc5ee14e1f7459b1f446786c8ee2a31b9e879450b8aa 3044022043af8fac0202bc173e0c858f3a50a1929cc003fb15150e0510c05cbdae48b6c7022000cd3c101047a44b1b9c26fa4c843eabc043fbe311e16cbd9bdc6b1745c6cb53 3046022100fe4148caf6bb190185b6ec7e151e6e240f6727dea55af3cca03ec7e80f1255a6022100e43093bb5fc46e86b17f2893fa190380a3d7338e9af325145a1d5506eb4b83f2 3044022027937aebd96a60b33248666e0b770746034dc2ab8aeff67bd1955c1d3f331045022035d803dd54057eedd1fe260fada8ce71686e946cee5f94b438707cfe21f16d33 304502205efb80e4f7b08c276046782594bf4b71502760fe4babe1019d4a403274a263e2022100e103a1227053f555de3f3b7051effb45211d29a05fc56135783e2bb0a60f066b 304502202bb4179c8900e6dc288ba1eaa7d1b425e0205d2d770e0b2d359f2cd840398da0022100ce5420b72541bdf1fe3ced307406719a2c37ba94f09b1526a0e2ba75ccb3fb00 3046022100888f8b95d1b9780a8fa0d0ad1b88d33c8cb44af2fd278a97bf81e4f3591e4dc20221008ff82407830393f42346f508f16ff78fe1f448f06cf7d98d8c762a5c6e174f42 3044022068e654955bd09ac453b999df606110fa576cddf04c09c4b62a8e761d54bb129002201ef4bc41dbeb1432a5fc7133e493e50c2e485abe0d8835e0fd4937b88d78a104 3046022100e68eaee5d7a995c1a08f0d4056e0e5c59e3e977455e9d8bf71ec9a6836e74603022100db469349ae4f9e412079c4181cc4313e99494713ed6c5d17df422d7738e2cab4 3045022033ed403fbaeff0e828148ac4a4a6dba891950539032192d2a89b63ce7de23023022100dd4477a452dbbebefc29384739f8f6bcb96437c6e05eba9649b36ec8e6ddc3ef 3045022100c6a6a540f4d6a0608be18851598173113b319dc59df72f8e359a25aed9e352200220515632158ba4924db919fb9aa6f44f2b2377b27f37467484a601f6a4d68bc612 3045022100f309d8eea03d26b138f0135ca18d8ddf0855fee8ae0247a3b372b48410134d530220226274d5e99cb7f1ceac31d74b188362bd6a1fabc48a97123279923ed45c6dc6 30450220385b6719d10d8b4c3c8465fa94eefcab20089c1dbc01143a2cfa00b5c878472f022100fb2dcaf9c9bd1c892fb42b24d8947a837058feb520e1916c015422741fa5ef0d 304402204b18c72bc92db0be22eabbb55d13922390fefc9c9fcc522ecbcaf623657a6a7f0220125736ee4bde148338b36b6754240113429f733d24e01ea65a71a239782684a2 304402203bd69db7fbca6de94c5c426ffb10710a6a38c087967325fe85cd00d3008442d302205c1566c5fba334f0516134cfa0b405718980f9f782df335c5f67ceca86a5df26 3046022100914adc2a986909ae6accbf269a3127aade8d3f8aa1a6083a5d81984ff537aea2022100b89a3dda3bfbc6e090217d087ffe075b3b4f1f15a30d829499f0a1aa11c293d4 3045022100cd8f5a985f050e46bd9015e5e6d2fc64cc06e47130cfcfa31582ceba59d390f702201f00f624f39bd6f38dd699ebd6a6891403226823cf58f4fc511663eb0f657107 3044022060f8d4757be0151b1ce4b15504d0bb19e8dca634d92e0d084e9f835ea23e5b3b02202b77733ac4ace9527770e048132f43eea6eee2ea3f4f2eb99a2a11907de55ed0 304502210095c238e0495b108e5a37e3b1fa9413085fd449de2010051a15a6d6729e7815610220371b07bb0bd17beb8e1b2ae192c709c675f583dfa83096fb754497c6d7bd4377 304502200495090594f25553e3a669b895be78e0d0308a8f42d1a397be5afd070b23df90022100a1f75594fbd6472490805881a3874e997aad509df0bc964efda14376baa96d9a 30450220067eb53da8f60e8f1d7a81a6b517c6662021f37703159d90e45ded3c5082dd76022100e4d987c16d0fccdf08c02ae960df59ce6a9ccf5abaabdde0ce2820cac0682ef6 3046022100ddd460b14e20ababfb257c5c559ff825341e34ce81466df2768d4b5ffb37631202210085326d95651ba97f0aa68d1408aac60613c1e30e4299d8004822622c2ba18aba 3045022100bf2dfa62845eb8c9e1ce74805944940ee7e21d1aaeeddccd418e67090c4566a4022013e2ee3cf23965dc11ab37be61b841f4a18e2c935e3e98c18695c840b5115b50 3046022100f373e0485c4f35d8f3158ba23349f2869def0a6df6f019e8e1c66c812c40aa2f0221009f4a8edd18740412149fbfdf4b61097bc2213dede89ebf8a4e4cbcda053adc39 3045022100bf19077170221789cf8d1b9c68e3b2ac5b53ff93576a10328dda76c10ffdc5570220128ebf0fbdbd80b67ba7f20756aafc3d6ced01fd3154a57f4d0535f169a388b7 304402203ee6b7824783c1ad1f22176291d225d033b18518cebf604ceea6ed9a5358974d0220576d3b17baa21503013db41449ec7e2e1051f4b0d8d375dfbec2999f5d25dadb 304502204cecebf69288732e44a1113eb5f748499c20fe4ce0a19fbe34637670be637efa022100a9794bc0aa512b788711683b500ca9aaeabaaa5866cbe1442691b3e184e2e4b4 304502210092b942de25ebe2f9f8c0e828fdc88d6e0da07e1160934a32941cbb57ce5aa92f022008a3d62330795ffaa1e0d62d3ab62cfe6233de4aa9809bdb063de21326131414 3046022100a387866afb10c244efe7767cd0e2461a65c29b2cbbdf4233529136afd090702c022100e4855e9eb59e964da77a2879f9645f8114752a07778bdfc2cf127560f9e3b56f 304502204fe26aa48d92f7f05f325266097d0a92c9947807c72b6ebc48e408f9eb03cc24022100c8f6020861fbabf86fe67e799461d5df8c4179571e0b4e29d4e92602c7baca44 3046022100859f7be4cb7376e1ce7998f9e5e695edd3073a0c84d74063e9ca2979dd0978f1022100fb89c5b5a8a5b02c562980022a7e2e935cdc67d1c3ad62b9ad990ed65a238f86 3046022100d2fd1ab1c518c4f5fc0819714ffe6c10f617f564726d3767948d8076b988bb94022100fa44317b6bd585ca72cbaa7ab3ed5834756afa1bc0a2465c95b2f0e3fc889735 3045022048d588ab0c8d1034f57aa10591e22ef3062b97c5a4ccac793dce8b7d5220d25d022100c43409bcc55465d999db460f09d1c96c4cf048d52284e19d2b293fc5b6f3b1a2 3045022100bf54cd607971356e7fccbede40feeead8e407cf6f039396a4b14a2a0aefa74a3022073f62f54b4bf162e67fea84b78433026803e2b14dc4fc5ab0059e6f12bcc5db7 3045022100920d530e66bbfb218c967199788b3c92ed41c6626a7d431a4a5fc7e27de4055e022049cf2d63d26c18f19554d0c467df65b3591434d46b1d9d20cc8e3b076f18a782 304402202bf8173ffe6fe324a8f3a704bd9a366a2c07057a058bfe056b34ebc319f5ff16022051e2ebdeb25bf67abfc8838f4b6a449c5f9dba221adaf8d7f85ce659a30e4b0c 3046022100fb099b344ba634a568a62192ce0c189501c61ca408f1ba9d93d1eb61d8d1e0af022100cc9c6452af3a0c5fb8a127c8eb5b46992bebaff462104702ded5883ec62e25eb 3044022053c35592041e708b96655afb7e679778f2084b474ea40a3fee766fb27117f68b02204a351c92194a51233937fcce45fb4ed4b8834149fe63756106c07aa340fd4e8c 3045022100bc7d5d1ef5dc9693f5e9721c6d10b75011563e2d77b3ea5d46dd22b0320bce7c02201e8887d26eab2a5846a3c9f4eeb262d1131b20b4ca484775dc4070e6abe19c42 304402201eef93ec0e46226a18e6ae536fca163b5ee73842adfd5247bd62b90794c316ac022064b8e0f1b8ef88b7b33ce69ba93f98c493f67a09f0a798d21bad145299d4e1c9 3046022100cfdb30e80f652cff3b88c235ddfccadf1914592e18055f7e8ad7e2421dd7f54d022100ddbcc53c6c48e05db1e681190eb109c29ba2340b8583cb83aa91df99714767b7 3045022100b5efa67db8d73bc0d0514c9bf44cb69aea55452c8bc4c8c5e890d4b3e04bf5090220112310ccba6d2fa18244569b050c56a97153880aba6f2a05c153bf9f3b16aac8 3045022100e86b488c8cd991f84726cca0a06a77297fc68f2916506b5da0ecfc35d938ba0f022037c612fb16840f85fe494d0b376ffd141f4d20ffbbab64e41adb5f6cb02ee9b1 30450221008bc3b761a1bcc1a0aa974a04a0d01d41a17766eab6de24072cd52845f8b8f3de0220681187bf9ec616d197c223903157abc5d471e5e24fe4e5fdcc7f8c02a2fdea56 3045022100b1733e1035965b07fd9a40ca3cd03580dc66497f9006f8aceb025b57cecc8c2a02201418647196f390d09f027e9e5a94aecd7098b369fcccaa0c39130da5c1cabe46 30440220249503f79e57d2e3b8e2207b67ffadc5c0a65f15b0243dda531ea211da00203f0220282d3ce32ef05255c5a6e4a7b95fadee94903825fc944fda72f69cb03381abc7 30440220346cb1563cc1c3a335044d9293c3aa992f603d85d8888cb18fa1c3ed2fcb57ba022061a5058e4dc074fd76437e7f317b52222034d32e79d2564e3423494901c46adc 30460221009422be6a1bf979bc041c532a55a703fc75a1e81b2709a900369f4d2c6c800524022100a74ee46d001a0923f6689dc79c936d97edb33b750568fefa3fcb629510edfa50 30440220391b35b7f53ed46a2812657b4c1d7e2703e63b0f529fb47310dfbea03ccd1e9f022049ce958f966a451a533f48339485897a8346d656dd83c0bb3e64d8da0d991ecc 3045022001eea7cf2558126b7e4a631902ca62fb03d151310bb019774779b87aefcde66202210085b60d24b201a848eac16e38e452b1bd07f5c7154f3b1cd23d2e7009bf0171cd 3046022100d8f60f50659ff8fba626c8933847803323e943402457493abc59fe8125cbffcd02210097cc5b021c2d85961fb41e563a67c7fb51b9ff8511aec8fe69c22023ec185d40 30450220467a5d907051986b63b2f2e63d36a2894e6d19e9ad9dd18652996644b8a9c38a022100bbe240fc06f25530e5191cebab99ed7e30ffea62dda90439643e4a9f010a7bde 304502210080b0e2ef63ce547e776556ae21c03037753e5b305d918138b0b93afb4453865d022061d3e84ba087de32e6ee6a821f50765722134694563b6c20b257a47e0f51496b 304502203e1c4cb7280407d35f8d29137cfe5d8a8a1875e544d71856a65a77c091e2595202210090107f624ca7dd4941b7ad5400c962e0b28adec63bcded590cf2241bd2725c20 304502200d88da86357bf51b0fabe42c8eb63feb9b5ffcd3724588617b531fb2c87affed022100f4ed71359ed3a89d724d98aa41b66e94627546794df36a0d26e64bfc8d686f38 3045022013ae262764307b8b9c13312b8dca4529c119bd44d3511322ee97bdde6f8457e2022100e8ce27ce12a54dc33f8d2dff795439c8fa43b94223e63102350594cc7b0154ae 304602210099783b5c22b2bf12b28bb6649486b9c0223a2f1b687b8e376cc012e8f95be92a022100b614fafbfb2c8f012bff8968a2330768ed2f433ce82ed67d1323ead4ffdbd9c5 3045022100e1a2e36ad0ea297eb631d30a935c79069a5d4206ce4d91ddf3775defdb4a27630220570e76a5f571cb50e37007bdda5ccd7bacac956b097457e8d31d91ea30e5d436 304502210095d8869cdac7baf901ccc82deb1b79ae84ab717e17db37431059a1accfaa3301022025dc3e54a68c61e9504760109512d4d5a9cba26f0c07d380d07308a6915473f3 3045022100f35fa09117c31e63f08e50dddea3c324f06c04a6984544e28ffdecd8ce6980f7022054f3bfc17612057f8567a36c402ce46aa562ec49e82826c6b473713e3d6f456c 30440220164919cbc86548bdb1d17cbc2481ab8f9206ab1cc70f3f1cc1bf20d389bcc50a02204cef424a6c291bed6bc8ab1da49dca220b8469c25094c3d1bfe38f2ccbca0d66 304502201d488134bb96454cc08d78a178fc8a88b3135cd854add4519a7ff2374089dfbd022100b634955ec52f0139894cdc9ccabf6f29c63d841e0a8544fec8f762a54bf28266 3046022100a87100fde9854d1e7664902db3c9e3cd35e302a4852af0397eace0fc12e451f1022100def2d6f727e9edecbb534099182207b81a92d6e517e8534a9dafc93c7c2f46fd 3045022100e43dd61b5c10cdd4b111d3d0d025de825853bf838115c9c455d60fe396de965102207732cad6457c982918d0333182f26abe979e570b46e064d012c2bd87c79aecc1 3046022100c5dedc2eac72b5d81f5cc8270a178423bc903674f5df98dae883de29d02cae62022100966e780d9290659409bb9f75dbe6901fa1b153b0e0bcb607c0de5d476a438fb9 3045022100d32aacb7f625b146d30a7df9e5d73f39235a04d8fbf36f5b3d0380f909793d0e02200f7c3d134006694260129c2e379415f65713a333d6727c3af6048251e45a5754 3046022100e4037c8a253dc867e2c13596894e0cc23aec799501990b97f190346cab5caee9022100d2aaf4217487ff3fe0ffd6736c292d99a1835f3d62e652ba1913c0aebbced5e9 3045022075d20419c5a638b726e97eb5b826c5b81b3130ec35afb3e8571d1afc73c0c461022100c969f572d66f252fb06e6d84e44e29e04de07e442600f9248b72960706a203ea 304502201aef858fca1f2cf24191be0eed7e086a2a1017311fa6ff537ec67594326976640221009e0b1c9bed3cfc86d4cd861d288073d9d9acbf57df08301cace1c2afb8e0c71e 3045022012025f986e8bb6eb4a2591882b80622c74e9498b7ecca1de4be1828ef4572f69022100abc9b7069bad9af5e953546b8ada4de582fde99784b24881d5e8a3ade74250f9 304502207c36e33426751ac0b2698c7061403c7d9f483e4786bad05f000b4e961bc18b39022100d684ab3413f15b2c6f5e65e0c050df276d40bd95affd29e9a4628216775f20f4 3046022100c92969dcf32509f40332541111b4a5720cfa12ce7afe49a6acb1775ef81d50b9022100f9d5db2d8d7b8df119801e99dfb84ab5de73e0a7f198dd69857d23b79dabbaa7 304502210090423922fd0e3422c5e9bd4159115618c2758276f80a211ac4d446bf4fa575700220470ac851ccaccf7ad971e236a910bc91d880bae638332fb1f92b7d5de7941d0e 3046022100ce507a07e6899c2ce722d1dde356c8e61279e1a80505e92d38959177f5711092022100e4fb70ffaa23ea64853eb9b5d58b85cf7ae9b10878f25d926eef88cfd65b3c2d 3045022100f6d62561106e4a14bd3153b09fbb25dc72b3e99ddf3a3e253c3ff8172c82830d02205aec5224e8e8b59b3cf2067f73e7703f9d38ed49f4c4011f59a563209950d012 30450220194b37082c9cb7696d2005a767bd09d8115585d5ca01d70ff55fbd39d596cc1e022100d4555981ed015b5783e687f92dc448900fd47df52ce00908f4004166502d2365 3045022100a084493c6123cb3b06418b0fdbddc73ee3e5c3f8e37610fc4f276950442625cf02202d2dace70643da00c0d39a40a8c6651dbdea1338e2edf41f666d669018682c17 3044022039b04fc8626def149952f1a76ddd7b79940137864a0b9eb2503dca844bc4726702202002a15bbd338381fbc0d7c525f911e752f36b2bc6e426c32574144fca01311f 3044022045fcc23006ec9e9d0406eaf5345a62faf419c18f311062e398197430712e855e02207dea924dd9b2d50f5519f02d6620742f4ae4493863142d067def3af0a674d4f8 3044022015fcd9679d1e42979ba1d4f816fab4bc3c62400996c2fde00107630094c870cd02202ba8eb228ece32bddcd1aaedf2527d956d9547985a43cd51ad8b4fdf50cc2add 3046022100a826c507ad8358daef3ece8ef2c8262f57c10a2c686492823ed8ad18b1e51caf022100d563f19dd1c2c0c20ccd305971d8b4dd6fb63cde99b495975521fff5127798f5 3045022056f53c6e6b4cd0b4ea99563f630c8f546333b57dd087f9852b2c0757a507f91802210084f2a1edb78549f269b79c9aceddd135198ec5aaaa212da467747bef88e9b757 304402205fd23f6d514c72fb72fd991ef5584d9209ec40582e6e0bcbb55dc7b3919301c80220078a95bd9bfacdf99ad924e08eb83180fa0b9e45803bb146169efe6510c873f0 3045022100e76750e08b11c6028d963feb3ffe27da388186daa99cccaa2b9dc8516310686a02203147da36f246200a5c9d8a87a22ca8e1a173f0eb90d155e052df88b9443a7c29 30440220504fe65105905af9a566b39c43ca820f7330d70e8c23d93e2d29efa54ff761f002205d47d1869ef60904816aad6ccffe1446753f9ec8df539aa99d450ab12c6d9ddf 3045022100b0aa5be84208107bc09008c25a62a695b2a1b8c0c79f281c4d020a64ec228a780220433066472d5f8c32804aa5213b01b57ed767a9b835dc71b71a71fe5ac432696e 30450220571c622cff7ba5d8b8eb6f1411cc7b574b50e7ef98c198d6a621fbb0da743496022100dfa268fb9f6a8e57f7e311fc03de6d4ee79a7db2cdfd47770fdd521817152f76 304402207dd809474a8ad5679f693936cfeceaae728ef0a44c4e72c1522ac4669307f8ee022046d22ca6d937cf6dc3aae9cb1a0342d41b4cc4ddcf7af1c3c320fe9721320119 30450220313a5f94f27495b49ca393e659d91d41f3b143a7785e1cb7bff29652c270a6f6022100e484874eb0b057b4edf33133a331fa9ef16bb8328312f822e1c1067c27e0f7ff 3046022100ba21748a064a0c1ac0dd69bd5f15fb1d35edbb70341b9daf4e15eb9df0cf0b20022100e2f80dc8f35337e30e686b7a45e8c61b9840acc8e79b3eae2dd1ab47ba674f19 3045022100976936aa5aefd0a8379d2525c0f8bccfa29a25f41692a7e275fc1eba2949070702201068e3ee277d1f20247b477c4ced2cb8e5b1aa9e74c38a0c97deb82bd9714412 3045022009177ee98deaadeda76b6d24281dd3852eb18bdeebb9eaf8e8f076465e463495022100f9f0b75c59432e0d688c34cf67d848598e22705822cabac80169cbd361777b14 3046022100d5e45dcf0ba5b9fe8cc3f7d576ed67dc877c6e736a6609a73f5c60e5494ef6ac022100d21ae3dfafdfe5122da2725267882d18cc0b24912a5b99ec9d8dac378c13b4fe 304402202e9f21758769f1657248cbefef7185d0e05445c9c4d9da21a3ad9d29afdcb72702200b73b1d0c085552ec59e3bf84eaea822b72a473ae43478e319ad8eaeff148d8a 30440220277c47375149ebb20a70bec561108a059059cc8b301522cbfdd917cd36cb253c02201a483d4de1f3c5ae0516a17f6c633d35ba46776bb5b617c7c1944c84ac1c27b2 304602210098bd604ad06d5f4da475f5fa155800a9ed0f12e879b24864aa150b65f1df1c40022100c4d5b6aa3cf0f7225f8eea7353bdf2bc3539045d9ca2a030399df525f15b455a 3046022100d0aff4bfd2146976156850d3c0d34963314f28828786113b835e1fe3235e67eb0221009c71bd2b79fa492edd717c8c341aab7a19757dc470f5d008db86d0d561fa44ce 3045022100fd7aeda5bcc84a744fa8d3b8523c07ce2a6f99be72e8db2035f4b0949e69b36502203e4877708b511af8bf63810dd1544e9ea5556b519042dd7f865cc233286573d1 3045022073e0269df7126e4392f70cbc6d9bf74da8a96e41a440c30ef1e87609e508048d022100d8b4fc4f9041f153408cb45daf4c5ba30d2ca0b9f7fcaf30aac053265e9bcc62 304502200fd2f856d8fe4156ea526338068f83a56bd921fd402b77fd9fc225247090aa90022100c3ab4a5cbb989147c679aa8a217ad88424ab6d2d2789943998b8e54a0d5dd5d1 3045022100d0aa5b08b9f6f5a8db194af2657de69c90d2361c2f148962560f4c9a9a039a6902207f59c9935a89a4d6120fd65741166b76c53e83c6219c2b3fb2dd590cbfa470a4 3045022100cf342decce1d3256ded4ad51b01297ba397a048bc2f040e5efb4c036c9e3e8dd02205fbfa12f1889592272d5dbdde8944bf1f51df9c3b37aede1c61b917ea85a2894 3045022100b524c5268d4a1ab8cc99b0a49f7e297b18944b9086a55b877007943238b5bc5e022062c3fb0dd76c3e4b9a95f8847913c0f5719250c6bb5df7fe3fa5cfe35414d56d 304402200ab8caaf11b59e6d3df91c096d3cc14e3aed4e5c8e12f3244717f8f584c1d0a30220236867928221f7d5e26725020803d328a70179cd96f28918ed80ed3cdd4b4f23 3045022100c897fb431442ece10fc02b5c19a5f8719b71c83ebbebdf38f5e1ab72a2e474b402204f716e6e6e5e89ce68736e13e6704fb3cf1203ec08c415e27427a83fcdf5502d 30450221009cf98e8f9681ba52c5446381a5cd566b190e8f02e4c1ceced7f9d26a1a768a860220212eb1ee4e8fe04103056a6431d85c2fb8cafc9a789971cebb050d355418c6f0 304502201071b9f753edbf5ce52157e5aaae889cf27f1f7f5e40afc845121a448604afe9022100a4a33b49919c3dd60a06d0eb05cbd1d6c98fd54a6ccf2497be833867e1b7170a 304402203f457a156402064027055077b22efb3da22cb03800fa6272e2bd31c9f656ec42022002adf04e30e55f9e1799d2b525df512918901079cbb0ab713ba06dff6d87db97 304502203431779622fbc243b218d58357c28b856ad8de1551a28afd5efba6efaf6721030221009faa820d2acbebd56be339116783a95e9b601fd1c42bc067f992f1dfe888d484 304502207bc5c657213c0a49724a10ad78d8a5eba2e2e933f2df143b370511ebff3b5fa9022100d5f673975cee0b3c589eecbddbb8d271ec4e511df936e771745c203b329ad021 3045022100a67313d20d77b0cc93a93840f85d1805a9fb94fade1497c4993e28a820c14171022031d18f9c3b40d4d65ae44011630586f72f4011e90419f1ac6ee4c98d7c666992 3045022100a2316b27181c9948cfd1c7b686b8ebfa700ebeb4b0cadbae89c3417f9c74c74d02202c39b46e995ccf0967380e92d3300a15a9aba1d74a3f45d1ea2b150178c02ff0 3046022100e0478e48e83783307672ab3b12a1a36dff481641bbaa583b8a76c1a0b6614e9f022100d640d2cd932fd4d90ae040671149a40fb2f6beda864c83cbf44ac366c53d2f1f 304402203d99bb3201b196404da170423213101b9f2c20d183b07a45942948210b8c53fa0220261f290253e834576610035529788f8b8d1a1e77900e11ab33435b0b5b666b5a 3046022100a9f246ab3e66f1e1619efb50fc7d1f8caa95602319f2d037ffdea5441c413d440221008bfca3a979a6784f8c4efd9eb663fa80a68f31ca4703ea081838af6cb8ac822d 3045022100c069eee86ab8ed4c1b64d6ab3a5129d1b3eb701cf98d53566ca5c326398603ce02204c6fac9b7d48774d6f980705a4133276229a7b640fd57f8415a3d130db1b8e83 3045022100d347a9279d5b222b07ec385b7e31a7b3ff8949ad27f2faeab3d7b4eaa407517902203c8b975e6ab9025b8397f18486ef83f3bfc4a9d910a6b15e6ae6fc3a0ad349c7 304502204b782a1dce19c94991394d4c33249dd7a81ed7bdfdb2470e4bcc57a87eb18564022100b73c7515ce536fce91efa37b280a68b8b3b9e75854b3f18483b095bf818ee4b9 3045022100b635cfc79837b5b27ebd6979b90a78652ce617abfc322a647177e860647571e0022015ca670ab6c7f40991073c2a724da1377743a866f1a16511f5bdc07f800774f6 3046022100b9e24cfd527ffbb8c25700feff8b765f4ccd801e45abf2afed76fd94d1acd2d0022100df418ecd8538d7e08623e5ce80b898e4a418d678e82579c3b9dc65a2066c8a4d 3044022001cb431aa2ed2ad5d68f5b323e4e14d22650444a2e12f9ef47d6a871f29b64c802201a502f841181b67596769584a0ebffea5dd59b77bd8590eebffdba59167a8efe 3046022100d77b53d52022b5188d435a2780aef0f9a9d2be7cc6e9dc99f01f6f82d52bf0a2022100a6577ead36653b94e014d18bfd503204820a1149bc2f384499232eb66b4a3dc6 3045022063266ac26c846cc4bf2dd1348c4a705de8c46c9f80eaa568c5c160d26f3cdda7022100fa7a1ab910a3f58420f14fb6dca2df6a459e450522da45a68764d5c62aba46bd 304402204c1dde247c31e061ae85541164ca1e1060c868a94d78f35aa3a25f396eb2573e0220072e3d99d7828bab5cf34b4d095c681e419a16690cc81c70758bcec45af069b8 304502205b55da389d40b89e38087aba799b8bc0a0139f741029fc428f98ec45905b26f2022100fcc6933e95fe658a6a5d58a340e4e00237b7546e332f5c9f0d10f3d1bde6f3e1 3045022100a8b22e09dcbcf41f967c3340c37afdfd4fd7d57ebc7274a76e8ebc1343968a5702204971f4ece7568d87dbf472a7408995eac474424ac032c8bf9b2cb26fe3762441 3045022100e4d7b1ff5ae42707c3a2097d2e330376d9dd9a6de7d9d5ed9cbd9d5848f0755702205cb825db39f9db7e9c8c10324abbe128905d24e0b3c51001ea2a357bcaedc7b7 30440220052ac45c81301e5bdc8d3d2b669c0d96c9c0090b68ef256c576a54a524e9ea9b02202ba37c78152fc48967376d36a53d5dbb5f181b01b1cef3ccdca8658e79333b1e 3045022059b6aaf6671423a54639d6b85ae64d6dbe662e7feb00441290eb170a8a6357f9022100991ab868e3db1cbd77a19d50b79747a363838b948450aba03276545f4c4f1374 304402207f42b9f0fce74707c2a7e2f7fe0f50d2c06e6264d0c9188be6cf295c6d01022602200153c6ae2ec4a08d18a14c7caa17f684e68e72441569f4bd9139bde9a4d24b7c 304402207a0126995ca713a50810d8aa19f8e9cf63b71c9c1c734823036444f7282ed2c80220594cf1fed4514811cbfcfc87eece76febf68738b620642bf1bbdf25a6050f9d2 3045022100e9cdb3ff751ae4b7cd8826916f6c1fc36f7da4a894520b1f153819ba9ff873de0220630d186b6f6c870c270e402eff20f4aeef181035202d57950386ea57ac686d8e 3045022100ebe0624fe27411c41cd2fb6a9a6534c0ecb8ef7b59455d3be1f227f7fd871e6402202ab7bb0b3a5bad99b1dd7827baaebc03d129ba0c70d995fb2f4bca4b7051d15a 3046022100b24b69debb695a1009c38661e47cdb8d8ba72ef44edbbf0eb15341a0e8ca0d57022100ef2313418cce6d9365306d5d4cdbc631ed005a5cf88e3dc5e172ead21c44b637 304502210083077a4c672cd5ab59dec2f0d93cd535d7a4ff6cb1ae8757851455992779fa1502201b361ee5a6008f63e4ed919081c674e6ad46378bc7aeacf3c77bcda5921e7594 304402206c70b34a7c8dfcd0202fefdddb38deb4a551b2251e835ec78f1301d6d2f4636f02203c42ee6c3631c5a3307b81afd044df6a78886879661146e3870618a529042b12 3046022100ee3cbee815102a6bba543f118aaca5f9fcf96b24869260eef970e7fa0d206810022100fe802241fdf07c74ffbb535bc1c88405e4770df8178ed05c7af081c4d25a4e9b 30450220679558419b8183039419252692b6c0736ec434c7c737b8864dc84143b19d67ea022100ee9fcf6b8e8d45bce630e6a5f63389fd33b1d600224b3befce34c3b2feb8f0c5 3046022100deb990ed41c0544d9ab72cde6a1c7a1cbd3a2cec90dda02041941b8d3fdcf2bf022100c14af58624e8c417ceec1c600a7a9df30266490d7fbfc8bb5bd6d8dd9d574bbb 30440220137c96fc717a3149764d8636c66904be3f0a96ff36302dc5769d3a98c1c26a1b022058c40c280614e6ba81899f03e3d3a5ff30d6a45690c5aaa4712117bf9501b6e1 3044022004b0b41edd830bfcf2e06fd0a75d88e2c13ef2dadc52430255ff02da119565c60220347f1701d49d099e6072628d01b5385b3d0e01e7f239c86df2a62aab6b353122 3046022100d45de8c7cba2d6a8fcf8f1c96f4ae9fb4027f19514567f7f56dcddf60a28e785022100b7865891b95961187649af5bd7b21769eee3903c07e53f3f4fbafeb48c064a6b 3045022100faa4990fec7cdda7862914aa7285ab8e9a4f4b7cad13290e44b84c866f8c91d302201ecf4124ce044454fbebb54251a8bff101b480393a209c1316f13252c1c9b368 3045022100e44394f88a4f313213cd2828366fcfc3b92cb4f134b66889b7c2f0ce9d5d60cf02201d525e00e5294014aa34eadc35f3fde7a63019fd6711bbe5964dc37d2b0bd17b 3046022100c4afc3ab0b40f760780e8d1cf22b1795e904c2a77bf1472873c2edb045a77d15022100da6bb03f87042327b121bc2fd7ceb5e44e042c4955f4fa397a053f863ed550c4 3044022031333303e0790cb708cb8db5e2e340898cc260d63e42a44d153a5133d302895802207593acd5bf69a0c92997cca1a1ff84f1069fc4cf3373861547a524a16feeaac7 3045022100931f0e9dc2e0f00de373afe3f7642ff1174f36201c3eafa969e7164014a3a82d02207025b9c7d2715dff035c6019047642fa540719cdb869cad90948950dc3ead609 304502210096feda7baba5561766fdca3c45b6ddd45b6304f6a8f4958947a07747ac0eb2fa022030e6e42280ce0b7f8677c7aa338cfabf43034f58108737012ce63d3a4a650283 304502207a8ef40641cdf889d968541b5730875e01ceb7b03da9ba3c87e848ada796ef4302210093629d76ae5f64f7134cfd66f77b2754c7f3f64b10dadc1fcedc8c9849629024 304502207f17f92ded5cdff50a2e2ce59267b86d316a5cc0e6bbd471b114696595831417022100c8f458071cc827ef7cd4d615ef999a2df777b7d4c8647fbcbe00a942049a2b79 3044022016fe80cb570e5ca8504ab8d2adc59e7e3a919080c8fc53f402b0786bd86050440220253172dfab421958e3e194b97910b55d7ee9371512e1e72597db4a31c4898e3e 30450220496971e729e6957d4a3c33775ee6408d66b4d457d2a8e2c9d3c4d573415c7d790221008107fcc03dc1a94494ec3821d81a81f097430e0cdc01dfc66502f4b0f2a81459 3045022100eb2c5181840d42408f4b09b3954bedd912794bc724d652b6b55c64452efdde210220086a2b85e897d9ae50691fc9790742eb5175dfa7aad1728ee00f396bc2f42717 304602210093932212f2a0229b8df82f694ee7f6eab245711c30f9529abbbeee090e84f6bc022100affa9c735e21f58b2701694e1bbf2440fbc2e7ee484a0103e4e14d42264ec923 3044022059232de971ed61c42a416fe0ddb6206417dbaa0faf3384c6cd0aed697fbce18f022064639a0bfbdc75797af75ea7eb09f7f750f47caac87231eca9211ea198db4036 30450221008377e53fab1b7319b16a8b52dcdf7270619862340ffd03cc785f8c23bd32c2d702207c42dae79eb3cc4bfbbb7a30db8c14f650e6b3432ffbcdf813ba70f9208e0e8f 3046022100b69fa9d1598f04cec9456b1fa32d7df1c4abec0d51df948c73f188a837f4fe18022100e5b4c5912bdf77270ec3df6b31ec7f3143910c6155743a3ba7831fa4e048d2d7 304502210095f9358042264856d3b127e7e65d51f7797734492a72c903897ff09e397308b002201ebf335cb67f939f588b3febbdc17496445f8cac00b270a1acc0bd0bd9e5cee6 3044022032dffb9eecde7fa0590196c5af31f52a8f63d06482fac5a216021bd4516ecba7022060501da1d688e954c2a1ef04b7f8f6c9e72f7543d05d3c437f981ecd7cf629a2 3045022006150f73f5a0158c07906caaae9ebc50c7afaa889e91551f10798af203a52c3a022100aee137be7f32a6e02603825b68fb75cb2c450b4df4626b0459f3f6b99c8c0628 304502203f69258f32e1cb219a389bc5819776bbd9a07809d97a04fab64a0464ab75d57e022100e84059cb65b450f33e6ecb4ec485bc885deef3d51dd4422291cb7f55ab41c220 304402202ca7c9027ebbe257499d458e836f69cdb074529d98b6075d8038aeee16a033930220060dae033f513ecc1e83be1695642661e13253198a2448957329fb4f5df24e33 3045022068a26e0ac39c6acb644aa95f2d1932e2c3c406c1322e350b8a5e449cb7dcc5f4022100ba334cd4755ffe3a73372e3a7f4459d9720b2e79d0507345e6ce251c19db649b 30450220627aef08f6bcb04b4b1af4d5928e1e766e78797b73cb888ec77b965d639ca37f022100d5cd710909ce5e5ffe4b4a001817b40e33d7d902d316a212a49dcdd945125ba2 3045022038a95f590e482a85f13358915d5c8f33506616660ee8b0224e9cd51d6feb1ee902210098b2de84638e4881390a3776d82c886f504fb1eb60c359eb620e5262a7bfa41a 30440220188789a1015f4e610f0d68051786f42c0cf42a672998867736cbaf924022442f0220558c6f3b5cbf79de262cab1f82d579aebca4debc8559480c47294d90c1d963c6 304402206a21764c284678a4e1d89ddb9e41fe6c03ce39edb0647802fd3bd3c7f758485902203b58622d135185cedc4a760afc62e8f410fbcdca9b77426ecd8474c61df9a22c 304402207a6bd3eef7255724b6f86285f5969d21cfea38edae8ab8597bc6b631b948c74c02203f873a33e902cf68b84744dc5b42b9ad2b969e1c77e085087a8a6f745ecee695 3046022100d12ff2eb59bd8aa9ebf86471cd709dac842a19152bdafa7298c024646d590c53022100be857a6471ff81fc09cfee5717dec43fb28eb306047d0b52a7b2917495993fac 304602210081400877fdfc5eb42ea379fcf3eb9895ac3e56553192b32b4af9073253629932022100954aa0e7fe47302aa7660c378163266f1e2541e193b374528adb3e4c403d9e8b 30460221008cdb0be009716b6ce46ddb93396fd3f22357094b476152094ffa16c98f1f0b85022100b2afa10449bdd984bbc6f9b889d70d815d923ae5288f91d3432301ef22c3a878 304402205e4419a9421bb7a72db1defc98903aecff625feef24ed661b7a9b5a9a306c36002206aa19f14c457e4123ea1bd60be92a8196626c6a946526a11712b59936a501253 304402201dafbcd0f88307d2107df46c27779edf525b8e0639bc424cdcd18a013127b9b002202e9bee7246af9f61f03446d2b64df33638aed06b6e72048805d06a6b8c08771a 3044022034a858ed50cc73718e8ce5b7b1232ede23d333059cc8a0ba9019f1fa9d24ce0e022040c344f93db67b58b150deb3e1d60d4f51f2ac1e3a8ef2d330232ce7bb4ed9ee 3045022100872bec7c6b636e142cf6512b6637f5807e601c22fb22f0679be91512bb48805b02205025e33550f732538fdb45b2f7b9fe67559c71189bfba51adac9c0cfa498592e 30440220724530dfb4a79decc87fb8efe32c90c96ed4a07be931c1f6b245dc3e60ce059a02206de154569e4e04de4889f74529a9f717b04f0e321ede3a130e528c41837ac033 3045022100b1d11bdd73a5c0b43eb81d56cbfd986535f43dd3ed5f3c52d80e8d1f23b17cdb022021c1e4d5e2c13d1f191023e272ef880d2b60b4e985e6ac4fb896b00e6ff526e5 304502201e8b4decc231b84a14e72009e0ef4ca4316fe62568b3b169bc750a36c1a836cb022100e86589b80372808edb89d5dfa334a897e1e88885d8745f1d12e93a1651963a1a 304502205eb6ae586e0b37a6b02f18a0fda70758546d86861c2d93d80ac951f2436121ab022100806a663cd842004a56ccb51187409231fd565cc6dd5a06388ef73a2980d1053b 304502201013795d5e9f978a493edefd4d5a614d09333bbb389cf50a296e954be88f71db022100aa94999d345df8ed0e106364449564f6203ff43aa0cfd4f19214a9d02943b102 3046022100d713fee796e742e5f47adb9d665aec94458c60a64eabd38f599780870662fb13022100b240868703701c6046a5f7a4d9ba1143ba68dc5b043840e784f0311704afe762 30460221009dfa98b9fed78bbce1d9e73a48a94537b967cbfed88e934062be57dade3e4bd5022100e9c7b6d1eaf5a6d47e0a0aed4ff7ae0377d0fbe6599eb197a07df05842e3b171 3045022073010c266c65b313d1e209f57b75ff8f42bf469eaac3fca6613eebf4445709da022100b36a6f42d4d229f2911f724a4aba210d2209fddc375ac9e6182e3c264082db9e 3046022100f71247badf3df4fa0215bf30987bcc5ac729ac1d68e7661f06f2ba5fd64aa1a502210097ef0d671bec22ea34c1461eedaf861e84051bd5016d69196707b12b9d83f010 3046022100a2355e3b544aeb9f5f8291a3e58a089cb755ef96127cb78df3577e152a8b9e72022100caa519a067c2176d3521d9794b9799bac3ed60537a9bf51bb0d0af33779f2aad 30450220365ae2d858e4293eb93e75d505e8719145119b37bf6ab532584c9181886f9113022100be8076c3d66429aaae9210904a6c9b881ebccd12e5f04c987570f56e6674f957 304502210086cf0c323651192c574d25b33039b63c6208da5253347505c3f57a59dbfc744602202d92102dca8136dee656700ec24558d38c6c4286bcfab0342a38065ea61e4c6f 3046022100ba7f615536d95490dfae40bb7ab6526a3f309c1b0411695843ad22ab9762f8690221008bd19777d0042aac08beaf93d8958ff48ed79ea797e40f85cfee3636c3e86644 3045022100884a4d4410aef17e3e6d52b028fb4775768124f0d290f45cd08cd6d0e55fca37022024ba1d550b8d0ce526b012ce4841097cceff88cd2fa1c24d022da5b2e4829ca2 304502203bc81848e266361391767519b78dfd253a5e94dcf6a45400188e9b2388d3653002210083027393f4d9b3a159682cafb0dad90c9acb4398c76b4bc6040ed111035a544f 3046022100bfd5b0050b4fa29a54d8b96fc74f51b4b4f4b5b115b6e4dc09aeeafb1f4b4458022100b7c99bdae4bb2ff97631f81030a6a86cba53a7d3e3b54a8cf3733c074f35ced1 3046022100b9777796dcafed2d5cbc25315780924f681753101d14d43a0ad8b1582f19a0d3022100b74e50bde2f42f15bcfa988c6cfc6c7d69cd1176a8ca5a5051c76b901105d24b 30450220582b1ca29cebdd9223d833fd07e80be6cfee0fd63dd9ff9613c9c42f0e26829c022100ab83bd04aba0a459d70f856ec3a739e365fde35a029b4aae689040a75f133842 3044022044b50d2e42768adf9370287faeed0ced24aa161ff58efa9a018ba825271f8813022052de0a9187d7c5677c7734c76ecb99fef28d81d9029dd2675543be447838c2f5 3046022100ca0604260a46f8985d6a7c7b4586826b2d617604af3244a14b06c6ecb11e9b71022100a3f248b6a60950523194ba1a420b6fd7706c75b7b13a207b41ecfa35ffa63455 3046022100d3c96af0bad624bcd50e438604e06444f357c9495e5259303764659fd10adb87022100f2c97730a8d8141f0272ef465f1cc25f6b592a589513153761af8089338774bb 30440220108c1fc12c0ee53d5241b06ccafbdb210024b30e364a3653dfb2aa0c98cae2d60220106d04d75cf811ec1718b2d4aee227bb4a7f44747aaf533a8abff23819bda64d 3045022038590e995946a1b59ed4ccad8746af119c8fd17db3f296710d85853676970eac022100c46892396f376b4e90d8105fd42f5c4346842180684689910d837d07d7276c01 3046022100908b0ee195692bc1451a7ae652bccc440018a7baf538a9d5ae48dbd1f8ad748002210092c029a3b08e79786a8cc2c4fd07f5f4f0ab021c853455c1754bb3e6f6fd85b8 3045022019960c1bd32643d1a9eaa77e97ab1cbf2b4a0f933cc88102961e3b2bf070ac750221009c460f18ce4a489f93c1036099371b61addaa80e480e74390357d811f8011b60 3044022039e9dbab90db1ee353b456a627b91ad4354ab6841ac23979ff76d884e9ce00e3022068661b1eb58ddd808b3d8291b0aba9aed8c78d07c8b6179966f38c20e0a62bf8 304602210086951ad0570eb8b13d87de3222e727a99dce13be262ae8c440f0f221038da60e0221009bd975e0ff974fa140ecd789b79abfe14e317d3bf8315a47468ab04fd19956ea 30460221008afc03ce2d0d076c331aabc04ce083b6521f8bcb9a8588239f385bcd0c076b15022100d1b7a5865fbd9c9ce2342edb72c6ce83d940b20d243585a2ff0f6cf32878ab7f 304402207eabbc774336b891f5069e7aaf7d3d30085ae9f6c6d7ddc3e6959b4ee4f709a30220139263de9cbe949a165ffebabe6f6b671d46ca5d384a3d1d1b2a2248bd73b868 3045022100835af09672e7339bab16acb8e23fae3479c7b5c7f292fdffce567f0c10de117e02205aae6a6c17ef5084d7f7c64619694cee0bb8275b7af78feb473ce29e91b45353 304402201509de2fbcdee6c9256610bbf2c92a9523bd5efb3d7a8e742c4fa8755914062e022049881426d5bfb0a31518bf0d227dabe9c55b9c684e73b520e53905986b3c067a 3046022100a156c002ae58c96a776347667a7c0ca0ab510a1bc8b99f55b8af39cb13bad807022100c3b7bbc7efa79a78753d4bef958e6a32e92242521466622f08b34e4a2c3c7f7b 3046022100c20ebfe924abf1e988d0ab60ce82a5dd7050c232561f9b724625be7b460943d30221009b3233364ea496b64ba9767043048c6609963ae3e8d4c353b44634c0304cb9a6 3045022100902fa53f5bcd71d7676700ad17ec207ea784c2f7217efcfe305d7a4e5475eae402203e2b0c785635c14d057182aa8789135b849518151978868fc20f2ef093c04345 304502203d970f1aef0b8c93bd44f2627c3e858ba5a69b03702d6aa022470a8533143050022100f1fd2adcb35316da1498e60e9b7a21210cb0d407dc3f0d4f5f729496cc31a401 3045022006ea7d9d43a3b727a64bed901e9f3e058fa3570f9df2b63520da30fc130d69a1022100b1ee10cb5acab535b230a24bbbb6bf93c8a247c3f35649e719ddf1daa62ab9ea 304602210099d1f5450271125512b94f62e261d5d52d35e59dadc037059d237906c5432215022100e13c1e445128b19d3def852f3a629ff5e82b45fa073348cb5c5d98490eb86f75 3045022100da53d0a8ac10606ea58364ce05bda5439d111a3f6df48eded76a0e3681658ec0022059cdfa3251263623fb01a346606c451f2827ee5cbabca661e506e1831d04efcb 30450220594e81c00671b3689aadb44edd70de7e8c84c977c0310cf8771a1862f1bdc3d202210084752ac76be9901c98840072e6fd5c02074796f93d5cb7a0773c7b1497cb10e0 3045022100ed39606fe7fd452fb730bc4401d6316fa934469afef2d8bbe339c9767d46b84302201fece2df020134932972f29098f72fb48c55768f2a564b96894374bd15e71ef7 3045022100e777123ae8afc5dee1a7da541f4ff0fe735aa4c846b6f820582cd644139285bf02204a733daf16b6e740b74c1da4ecd9161e250e54204d391f8a7b406920ab77b363 3045022019e52490480fec28ac5babf54c068c8fe3465f47498674528dd26a0931b8d1d6022100c4ead28913e197db9097d79fa40e138623915ef07e6f375cd2e5533f18e6a7cc 3046022100cd47ea787fdf97a98da24a1cd2df421d2f554d08f40ad69680b8b58bc2cec5a8022100841021e41be62d9364c069cda79d2f7fd0831f120cba8c342a897ddf1ee83295 3046022100b3f9989b24d0279adbe3da115f8a15dd358e36bfadc8c3c0b8966466f731cf9a022100b5d088abbc102d2c740f7db0c85a5adcb0a122c1b7a492fa7b78f5c033adab0a 304502205ed7b4fee26262a992d1ef53f75fabdcd5067812ec9df17c0dd11711ac6c78f90221008f84ab39c825eb2c9c482ee1659da2d5c5002204717b9f8006668d1fb152d61d 3045022010e236ab2e0906a6d8e0c18ab8537c75260e1dc4008f439800099a740c7fdc61022100fc36c34ce40f8b102fb13a9f426f92a215b235a8966b18ea5da6c68997b38e95 3045022015e4d8c7d1ce407975eac7c6944a4afce855771241de3c51f76a3c4a8cb21ad3022100f004ed312e97da2e1b49706041983403d7ca8d685df835b09e69921de879447e 3046022100c5072f19602bc97e0cd23194a7359da9faa6636644969f50dba0603eb1e108d6022100f6b63e9c9a6c0a61e8eb17f907a4af9dfb14c121e33c008d4033cf512e17d63b 3045022017a0231e611d33023c462b2db7e4d03280f21e797cadc9f65a28d2ec2142d5e20221008ae4c8d3116902269aaf11d1c296e5d2366c98cd5923df341c09e6b50fe73507 3045022042e6c37229799d5323fc6615ff9418b66df0c430b94964ef12f58996ec98560b022100da42fce396d5a58a133d951b9807b18309e7252fbcc0b28973842229333cc526 3046022100cd99c09c727ece9323a1d5de0d52ca7447764dbc561123cd556e7d14fa1871190221008f5dde7e06d499008c6e1856e730ae5f88493eb3b316b6bb5dfb33d6b40ee956 3046022100b019789748712df57e06d60072442880a1fa18bcc50217a3df3432ea2253c99c022100a3458e13f92f3ea093391bb97b7cb0244808cf7f8b83d9258bcc7db133527ee5 30450220140444b06a75e1fcb43b3e6725c1138da390a396a966c59ef21a291653c1f78302210095810966c16177a69a02f8db0921a14d085ffe7228343f8e69810fae63e74045 3046022100a9cc360a9ab7ff2986bd9cee75988a3bb47f51647a00cd706242c03245c65adf022100aab42f469f8e1d9ccc980c13ca7e2d5c36ba64bfcf1f39207c1ab53dc4e49263 304402203dca21ca02df79dcabd563bf1db481548457b2e629529e01f54d08f7a0a2bf20022064fd9087920be3cc771cb8ba190e56fea1334a89a1b920ae352a02352b7ed9c5 3045022100830386cc35eddbcf5408c70e4ccac4c85c0995e196a5810378ed2bc695a02780022032c26e2a6d9eb949b7404968273eafac65cabc0eb3e125bd5803f9aa7fde0eb8 3045022078809338699053144b8d7df662b4e3f76bfd9d771cc02cbd5e4b163b537bbeaf022100871502e14080035d778a5353e553fab517b6fc534fc6da0f19812b66af13afca 3046022100ebcf5bc5896ef91bc4492e3231e247890d04e71563d0c2ac97fefdad259288f7022100ff3ebc94948a1512ce7334462b2644ed657fa8d9b981961574b9126df48898c7 304602210095f1d2300c1fe4412d35ed3025a275e145fd22371acc4b305d2d77201f078d300221008b5f817419ff2e23e8bcb163fc2cca8aca82607ad964d078790df0ed0845b35d 3046022100b98d13152462f14d73c1793a6cdc0a7dbc8a88c6e921dffed535602417c4596b022100ba964a8d587d019cdca2215d3e2a0dc2c94a30b834aa51bfd3c94b4ccb311ea6 3045022100f22d68b6e3630d50a34744a1703398a235e7f3081353f0aa955dc92cd986284702205010d101b10787d8142e026d8986610f9e41e64df00c7b32a4316a3f3c98f9a7 3045022100bed517e4f1eca858fa6c7c6e4f4181c91ff34d9ff43e346e87d3ee6d5e36cf570220472539d21a4cd730cfc6b83cf4514c28eef27c7b073c90663abb7e94b23eb0bb 3046022100a99043bffd8c372a047be9372e2c4aceb8929c08e40be2b9d75d052cd364bd9c022100a6202e2001607960a4074b7c37d17239444f452e9e81e9b994ec4bc71c8993e5 30440220775cbb4b93ad307920edb6b44aa30a82b8feaeaee5f6f1ee2071599b617aed6102206cd0d4bca018f66ee4eb4a39cb567e3930430582e09af1090da07d1834801f20 30440220623db0b1b39fefcb961dc908ad2229f85bef7d16b665dd3bc6df78d7d095434302201cb9151ed8616715ad408fafa39dc82e5a7802f78405ee4f5786e1c6eb4c2447 304502207ce2c31415218dc377d923d64021181d2df544837418a91de0245a8203d55618022100e2ea71c50d66da0ec1d41baa45cb172a13889308fb7faa5acbbcdcc02202fa22 3046022100e5a8ab5f8b018ffaef6cb7172491ef4525b534ac04392aadc491225ba47b319b022100ab3ceed29e7d797b64a822a8cdfe3a22eff8290c5d35d873834e2c8ec6d5da1e 3045022100d13578bc6c236f0a128ee360c8d249d53341121bea0018e6a31eae285fe43ba0022052dec2ed2fb6e1067c286ce9ec4b7e8778f1134bc66a7591060aae8e8ac826bf 3044022051f91f3e53b35aa3d6c6e8fb8b3b7d4bd798caa2aedc644baed426b6ab44b5940220772961eec8ea77078c91dfb7fc865a37e22608b08efc8a1ea10c0ff8d9da76b2 3045022100e18b70047443e8920be70a3e9e8d4f59feca8d08a616c58ad9fea903e411160a022073a5a11d5ae051950b248cd149b57c1e89babeca501a1fe9097d79f4c92a4735 3045022046173874696d394a61c65e46a71a6f0b03605d79f8fca7b97e929d20856df3f8022100cabf2a90d240fcc05512916d9bac7effce678beba6d49d15460ee9f19a7e5910 3045022067836788457bb999c2944d287ed17a4967c6655cace01d28ac21dc091bd9e7910221009bfeef5324682d72ea69969a38cce458b4795ad41c3fae1532ef6d1a718dc98d 3045022100a97211964af07b543af470f50b980bf9e2c2f3795bb57515cfd550d16a9e3b94022009cc8ed90ef8263767c2be704ae2e56118ab5a468bc53ade2fda0ab037049f6c 304502210084d0d8457a9adad2429c9495ae8680f9cc3ba3cac64725ec5dfe425a98126d0402203fee8cbba1b6315a48f9a162c5361a11ff070404fd69ad78a71f0a6463e42420 30450221008698cb58f1e27783e8584fe793106447b81a8f4a21d284b034521505b4a8b5090220075b9f67d0a3d92be55f1e72020e3fd89d040fc2c7775180778620b45c5c44dc 3045022024c45906ceb01bc7214b8ec740e1cea5aba76d6d81a7fa131b57fdc5c00258af022100bd4bfc36835eafed5e1c54995690c7c0b681583ded59746f77e43e1986cd4aab 3045022072d9300bc3a8625f361b8e19cb7840025b13a61f4e37eae5441b0983c89190c5022100e86faef1c18317c8912f705dfaf9461c15f2a5bc4bf496af0eaca91f9b3d019f 304502204f77537e7a46fbc3f3831eb8dbdf38db1f41eb1c6a1cfc7f9c800e9cfb866335022100cb39aeaa728ef2020e445ad0f5db7e61af62c0daa005cb9c0ad5eebffd02a369 304502201fbb65bc6eb6b0e82a178cdedbcf52da2aeb226b47b71ebce22e530304d30cfd022100bd20e54f8a28ffe5ceab6af17a0cb0eed297d1719a8587fe5e6692977dfb5d0c 3046022100899a69b8a4e523a6eefadad0a38f4cb5520ecc91716d50fc023ab40a99b01a97022100bb01327e5db52660d60454e96697e0ffcc9c87f5dc8ee7d09e716daa4ea5818f 30440220326105227c624e058c98f00f4350704bfa441f2cbe784ebfcd5cce141f19084a022022d38b470459a96d6d0d8c557850341fb5540d5ce74702c4a4c64363dafc6770 3045022100e59d4a9529dd309b7c7b4579db2b9031fcf12339a1cdcbd7631d7ea93f8bcd7802205c09350d4f21631c1934263caaff14438e39f831cd68cb1a38e9650df99addad 3046022100aed2eb272e481244cda3fca767175760b43f081e6ac2dca87be16410f5437723022100fa06c297bd6fcc8af141547e5381b2983181a0914538225660c809b2a66f3d5d 30450221009e2fab81f118873bc1933baa8deb5bafdf86e64cbdce8596002b3bb9d17447fe0220093f2b5b09004e0d0051eae46c0a2423fc25aa6de8af2cae84be05a0ca3baaa0 3045022100ac3869fd773184b6f73e1221ab4d88258895e003c0f72923b611fe58192b950302202377a2c6b2dd212812fb073cefd19b4701047951e68a7243fde18ce587c25a06 304402206207c1a33c0c03caa456d515084b59def7a52b30bd481e04a7d172a9d49a501602206f764de7dd50ee916a0b459ce2e2618e1f0e0702c41b33913958a5552ff33159 304502210091a458ff93abbcd7a35132ef424ec2e92e7398f136fa9027c9904741c2d3738a02204eef9616a40dbf00c4e1eac7eb4b1b52bcdc4b31b93942b657293c47c0da86e9 3045022061961f7ddfb0cb7b2a2f78bcbae5744614b36bb8f63467289e43bc06166759fc022100815128f0d98417e3a6966900bcfa38c803398ac468c52c5d3b274fa6a574c108 3044022006e84af1c927be85b5f7ea2b18f69217686c29852863691f068415460e29fce702204da4e63e1876b1a6c1d90eec87bac08409712818db0474530c68b0b72e3fe935 30440220473b0ceebacaa46d7cdf38a2e63fa2f8092e14777d29d35218e9628ff671ee960220640de008434ed127e618ce56105f9aeddcb2d84f01332d9532c37d16881d12d1 3046022100cb27b4419f07a1477e62d85ea3fe6300de1e752102dd7888a18ffa8d9d4c6fc30221009ba0dc9bcf9ff545cb34bca99c4c852c36f00264ff993f80933d8dd66912d948 304502205cae974ae226b1454f910699f421af07b5fd21bfe89f2ef464ab4caeb9e1caa6022100d2ff519bf66c7e11e64cc758452159dea1bbfe7245a20da8f6fd3ff7aae0f4c9 304402203652c2a952430fad3b86a66b3c4b105149b41171f0ef3717f8b1bc02f56205530220279b77799833ead05225d39711f1b7ae40b49976439e4fefe862b4acf9a62223 304502201a9ce87372f527db84f667b5a5cecbec2b626aca3098cc123eb48e1864b092aa022100f7d758dd256aad6c6c09b27dba7a9b4bc9463387da027df4479c2d7b03a28fbe 3045022056ebeec1497384b8f045dde68103003bde458d90857670186e234684237a886c022100a8bb8a66a904aecd6f89891f97f6ad37f7932742fe0777c20b0e6a17ff1a4c4f 304502202b7f1c402a790ce62908edeceab2f606088effcea1e75028fb27de3103be6794022100cbf84b36b8c2ffdd5c21e74a7282165e6296e4c8f4fc54e70411290d267f933c 3044022062be3a9125bec43ed9746214a84609ca974cfde959898a4a27b99312e08129140220434f3dcf70aaa7dee5b520d35a70be5ce6ebbe5333f6f0730312c7e63b242548 3046022100c41cc76c5fa3d3b2e184127fff23df3498405e939f90ab418731d25263670538022100b4370be7643c7d16e12e0018a9b283d5d8daefe28b2f4e5dbbd4b49d4ccd2fcd 30450221008462757937b1d60dc88c515f8b14ec8a1d59312b8d4a569e2c49f7b4da93523c022054951b2a2c232a712333f13f132f3fdc443398799c72253a43fd9f4a696ed797 3045022056470a58cc2f1478019f21bca57f4d8ad9d10a5bb7241d719555b7ce0cd2c76f022100f4e8624dd757306143f8763fbd54175b887f1f359af80912a5de81a9a1a407b3 3046022100c0c269d78292610690760db2bc32806aa886393ff1fb1f71b64e1fe8bf134fb3022100bd6f1ebab76c1f7aac60ea67a3386009c9d623eb0773df3e520b7538c66d52c8 3044022054572c0f1163c8e0b1bc0468b3379b17d74837d3a6b67b72a59a5cc9da0e29f002203e41f39480737d4a6df30b2daf5295998c87fbfc973dabc88341e5c6c60f5803 30440220240093bcf24f4de02f224cd7568217206bf744b3198323780839b9ea69f1bb1302207a18b950ec0df2c45ceeed1abd7f2c18f0ba647305b4f2313dedda5440f70df2 304502204bc0f99b576dac68690834a20690cc8aca36e5bd91bf882d682db3de5f228503022100add4108489ece03c4dfef3457e05bcd79c117be43a7147cd16c438ddc0dfd939 304602210088247602fcb2cd6c4389aafd89d3d45eacea6cf31b0483051ba10b2b2ed67d20022100da219dce9cfdcd820ee802b783178bf1065a37597b4925cf3f6a6ac3e94c0f0f 3045022100b13543cf290c8e23b771bc4136c9bf83358db80a5f4b672c745c700f75c7a95c0220320b41fdab9862399c4de30fae00f7cc2488ccbceaae3362ac849e66a69d0c96 3045022100ad2615b24ae03a297193c6060a6ad6efda3e846e0f85b844909dc458da1d07820220116f258c5b2f05896c5ecfea0c85e5d2176e8ae86e374bab4c00beab50947c5b 304602210091556450ecfee58b37d473130224e1a208b305054e3813771e457c27b36c633d022100cd50174a14fef6d1c11220cfdc694d1a4e8422a22d1c9168120da96634b92954 3045022011269cf931bdac81eb8a39bc90906dee76462efd6a4e006bede806a63252b5d7022100b89b8f1c2811e6f3d826606630e5942657cbe8d4eaa4096d6868f6c6ed7f75e8 3045022100bb6bfd123124cd08de07fcfc0af68e8cd39a03f5c8a06407b635d8d65972577e02204b9b4cfb075d6cbd9da979fb4a07b0e6179d3b8c3664bd55bd12bf67c2ebf449 3045022100d1875f1a2fc196e2ad1ffc54a66960ac099dbf241cf6f42aed812c7584345d45022023179f77d3159953a5324c9537032e24aa23f21e465eff2c601c095ab9b18add 30440220666b00ccaeb35809e9c8a01127a8caa232656e01a7e6126709f5a7d139ca613e022021f5bf38849a9ff2a41113de5d5cab1e8fea09f52be752035ca711ede6f1f2a1 3045022100b2b725c446aecf59633ac9d35c9ee2c4c7eb267620f702d3fa474b5628fe808002204bd5eaf75e1eab6d48c5afb9b5c8127dae4559250aad3a1479c28a049d2ac5ce 3046022100aa6d970426edebee998b8d4c1c1df61e544b6e789522c2292cbabd1ee34f49b1022100f3f5def94b6c52c7b81af5db0fa6ab1a6f79ddf4eccc339badcf65b3d7cac896 3046022100b43d095f817745faa67a4f114b77736a3bd8571d9b16a1b04de050515fab1e53022100e297608d56cf919e532867eb7475e1920d1599ca75b0d542e9e8bd2e53cb1120 304502207cbec642432cd365662cad4c05ccba3967eba2ac212b679d2e1687e71f488646022100ebdc2a5dfd2d690b3ead16ba0fd232b1d25ebce3d71d4e93e647211b032ef6bb 304502207b74081d45618f441fe93cc4a7a3b326bc3865723db67bb3d81d4a3551ab94eb022100bc9e29eae0e7c2812912c8a4ea5d86a6c7e3171716c42c48d4e4602f67d6687c 3046022100d3589367fd57e1a02915249d766a1922c4a5eb2e8307330f3f933b40d3f9f817022100ee84b5427a69b2040257c178d5e2e7f2a5a9318e13759dd4002f4581237ad6d7 3044022039a7cd14560b6014a31f6146aef023922d89ec3294a913ce7acac4477dbbb1900220061cb37cba43997fcbc586541a88dc6ad00b6589c174ba7ac93464150c49574f 3046022100ff170380dfb9a02bb498061336ade29edcdf89b40b5014fc99e70bc620e39558022100e0a66ad85963113557081124803a428a6dc7010cc2ee71af040f84b4c586bf05 304502200ff9934e348c73a20583c31728702176a4fc55fb107811a29d3bcb2a6e87b362022100f2340ae48b57f1a10358500877540fbca62c5e08fe12068156a3ce808595c384 30450221009960907c3d7a15473b894ff3d4ee3a5f29ab73ca39281571c25fe7bb72aacf780220777691a1313ec28704d20820169fbeaf9de8d9ec06bca991faac415be158c8b4 3044022074bbf69a88b3e87d668cad886c9f5047ffa024405c304fa80540c33efd6477f402200da52e20e11377c3cb1531a3a9e57fc21b0ff8a19a731faf11dc9f811f1124c5 3046022100d852b21472509e480b417178a85254ad9813281937161db966b8cb876525c26b022100d0a6d716b85619ca44f8f3550943dbb2b04bc91e7d6242c62f7b3139412afa0d 3045022100df8ddff93a45b66a6b1d2f7d9202daee16621f366da93394881e22ce3b5fe230022020af699507f07dc75bea6b569abd6aa4498b58faa2ef623d51f84b6b6fb9c642 304502204778139a13fd9a75a079c73b4af7c2a6c3fa02d67c968c550590980435469793022100a2d56cbde27ce839456bbe5201d000658050dbb04c6bd1abf8dfc8fa9879296f 3046022100e712d036f5b2b04b404008aac146fa3f7fe3f191ac4468b8bc4f16bf62346db4022100a7e8a51ef0d619263567074452c7bbc47fecb3b1c4c2db8d25b8a220f3396a62 304402202061cfd3446708a93c1962bfec4325cd52181ee197b24deccc85b27bd13e04fb0220463e4fd4ba9fc41c5650d2f8a3963731220ebb305bb161b3bd041e1aa431c264 30440220472220bc720e7a70d0a6c3c5fcc2a7b8a2485afa9b130df78d11548f4664427c022072c89506fda2490822d9f534fb0185a98d217cad5f20494c464bd3fc2ce8060f 304402200a6e58d11102698a90cd7a224e03798c698c7891fa17a3e12dd596644e4555b202202b19694b186e3f184b64635a76a0777debd9183939db34fc865f7939765381ea 3046022100c8685632c720b4412d9b9030098edc5e2e9b0ce5107966f977f9762fa48f3e43022100e1ffa542e8dddc67472d25801cd868e49c44ba2549c7ea28f441438c3e13b2a7 304402203e51db757653d1e8564621747d30aba40a7ae8e5356b76eefcbd50acc0eab11c022034b6a23bb9b3ac111143956cf74d11d8924583783e9726734dbe88ad66514aea 304602210090fd82e630afe4119f94b00efabdd70eb36d2ccac7f403b3e11dc460cf5245fa02210096e91f1c3ab7d590d572a0bedd1ec99cb6fff789e5e6e3d0781d151b10600d84 3045022100aca6e407594d16e22d1a61909d1d4bc8a7e61e1edeee74e8b53b69bd1c2f545102207b3c4ffc7c811c45b35f41389a90f771008cacb963ce66282126c6f656910faa 3045022100e3a0f69d524702fa66cc86788c807637c0fbbcd36871b2086bc63c5bedc02564022035857abeed37ea59f85d35c8a600ad50b8b8145deedca90234f8de098edce1d8 304502202cde7039a67a49d54f06d50055de54504ca98399dcda52d63f89ef0c2e3c71cb022100ee112d6c1a7e80132fa26258bc5b3f4afe6f1d4f7afdcb7c9ffb645d04eb066e 3046022100fbdadb0b7446fdd9f26472dbd08daf52bf10e9316432352cbcba7297b454d21d022100dd5481669c679f1a8d8e8a79f5dc2c4d7bcffe738fbd478568eaa3803ed0f7d3 30460221009ea0f0d4bee6bce83b9f41ee0eeaa55200205c299c298b0ccadf1f1a978e74dd022100e3f37eb7458f9c778353f8db179c697aae2b0a52f1d81246e95e6a592363a80c 30450220200a3625880fb4ea18df491d586cf606554eab038ff776a1dec640455c091634022100852a969bc1d20d564e2a526fffd44215cdad98e1af3f5dea92fad23e6a131062 3045022035003ee3a64bab51d4ceb0241ffd765cf1aee10ebfaef11be0d654efc9033e4a022100e3178723e364dbda102c72df5b955653f626916b7eb3db2d3ecab65c04b72cec 3046022100cfaf53e9b2206aa19a123aea007998c64314a8eab7c15ab5abbc05126657bb4a022100b5fff42b82a49a5c6f2d00fb0f11cfcdd5facec1c4c8e5827f584d741a359c5a 304402206221efa661847266370e87ab1fc7aa7b181ff0203c4513f7baad42e358718722022019ab74bc51fe9fbbe4f51ee2f17fda4d2c2cfd74047781e0b139dae2e74300bf 304402201570fb920371b07be4076f3d953f2d7284b5ab4fd97f3060223ffff2cdd87c76022028c17ecf6f66a370c8937c295f13fc2b1a50232c58859f7892d11440c3492c77 3046022100a3ce84db7bc137b269c2d8387610a92a5342ac0f1868a9b561bcb39e66f2cb0a022100ed2e79397a22253164c3dea0ebdca7fdd6bd702845fbd1cc51358f2aff4286fc 3046022100c16bdf122dab06a2b5a5820a270fd5c0d51a8ff51020284d0152c1000aefe6aa022100a9b0897fe38eee119e17050f5f5b978021dd403f77579484f585ab52e6ae552c 304502207e5b3770d7f2894c8a84b007ef1538f2324ee6ab5586986542ee38256adee8a2022100cd1d9efbbded61ffe28d7d87ad73b9350508b57d622fde45931ac728fface7a9 3044022014459885dcecf20dcd20b2b06d007e3e07d4d36e185d1b4afbd3f2fee1cf785402201e53f3797569280287b4a62007152dfe0d4129de3159caa8b0c066c076759f96 3045022038f23aa3a29b600e53f64e1d428bbb92f752ea15725761e782e509c794f6c1d8022100a81b20e0ddc7f187deafdccadde444a8e88c4c30d0fd74ad3571cbc150dbfbc8 304502205d5f2d5fa31573330af09f24adf57442739721dca35aa9e39e1e7fc066d6ea8e0221009444f931c309f43ecf25bf37819af9bc88b9b0c1d9c48e1b4492219f2193afab 3046022100c384474614cbe2d8f0b9556195861e35288b90e3cef47df606d85ec7a8c845f6022100c82087d4c3fa9610675aa99e89da2414d1fa16286fa0beed155f42c94bccdf08 304402200e189a6cc90b3d6859d3d4e8543ef89a53e553b06caf2b8fc37f32c5159226b1022027f4a408ad5fa01e8cd4936eedc6c19f7d3a7f327bc422f803558dd0c5bbe770 3045022003433763403573c9ea89c6f1340f05e9a31a515df14fbf6791021d00a5ba3141022100e84dbc77de6159bf24af6aaf112da369a4629bee044097c7b9a7a8ea4275ba86 3045022100a70e19ae7b61b7e37778f1ea55addd24308880ded2a7566d739bb2cf79fe8136022039d0abb6958af8ab831af6ac025b70bdf75ad0005ef26b38a5458dd261e6fac5 304402203b580b0909de8c19d559ec9f252841d0a2e01c9310338b45333a42e18cc2d8c102206e5885183f4d21f4c2d2c519d9b84a95df917318dce0de1149268946f0060aee 3044022053c6ab73349ebf4ab8272571af873301e67fc28aaa74325bdf921e59551c695d022074ca75bc56b464045aaaf1c35313dc136ae5da5299aba5e2a839580b551120ed 304402203b5feb81db6edff72cbfa5ddbeccee8d3ba2f234e9636326a20c7be7cffd7157022009cb6336e814015fdcfb64bc481879885155020dc5c3915a86eac39178a69efe 30440220383112de7d991df9b57cc5d39bc157c03d0aa87423cce6de8e23bae07b004bbf02202d1ab48d618c50dbe916cc7228d402b8f02c568c299a27e0b7fbe01c220987bf 3046022100f6644dce0c90930a52006b8e9a3c3227d14f1a10c624f05f25c36b221f14676f022100fbd1cbcd1a964af443ddc9639e0d72819ba9b9f37801327754a76de9656d755d 30440220785f428fd63a70ec3afcc73f103c54d1d7f81072fbabbe3e090472bf10009a2402207850c1a4660322caec53d637e61773ca93fd80b31585d86f9e0ea65c412c7810 304502205eb444d97593a744e667d5235c908ab1bf4be67d05b262c9f2ed37e840881451022100efed568a4d0b5b9bb3b970c0311de0e907f5a72889e23675add91d1044ae9171 30450220213b8d8aea306d1912abdd2e1e8b7ab882b300449376a9373949df526555b885022100f31ba5caffdf265de13cbffcc3fb80aa1c7231d1ca9634cfbc441bb0a235189f 3045022100e5e6b6782d82cf4d670fb1e1b6e22ac0937bdb8bbe7aa5a995c0e328fc89224d022068c3ef96e07908a53e05a89fd69ce649fe68f829ea6ecaed953fced03677b386 3045022100bb6e9b9613a5b7ef9679d57f358a784b1aa9c86f290c9fe093378986a210bec3022068f2028beef68e12110edbdbf3acf1663ab38f6d33194b6f69936f5b2dd4015d 3045022100c00c87e175f481bdc6c6818ff3682b8de1c5f3350fd75d1682abff4383e08d60022053b27b452347b724a6603dc2916075b7e7dab713236fee0d7cd0f0a1853d60a3 3046022100d04cf282d7b550bdf3bc80d698979f3f26ea4e7ce819b2a4ceb2710ecc00d86a0221009952c793a355aa17252a93ea62cdd3171757ca1edf162429fb14f092b96983df 30440220758b321287b10c34a467cde5a63d3bc4fb9d3e4d473ee5e305e472680f0cde9002203adef16c2e2597397c81bfca12be8bb5f19fd26c1a4d526186ae9dfb0946f41d 304502203d3df019db8176b8c916bd44e96bb6bf26f275636072cd92b1376f3898f6c45a022100d5d7b0982afbecc8d730781efe546fd406a3081dfd95f3b7c270ff52ea2e67c2 3044022024c16dc727a1d2ce8db4e57f05ebd0b33df4aa7c072b10fd75cc375a46c2b0cf022058decb7b25543e4ebe680c18720a9dc97cfbaf574c871808faffd8872c8b3840 304502205110705ebd2b35406b2dd4814f998f5ed8259c499a05bd79ddff6c68d69ec94a022100af1a7f631671a4f65e638db94482f407e794723761d17396ec8afc11550a2d57 30450221009e0d767209a39b1fcb2283f1f47440add59276afd5f9640af64d7538d58d418e022008c274eb2436a62786e88814aded3e1a3b4c1031d46bbcb3a4860b8034d8e85b 3046022100eb002f450de0162c852937d62e11ecb31ba74a12e8757f6f1c78a184581ee17b022100824152167e41bb069a490069db6c0fcf56a7dd2798732d7b1c1a9d78d35bd61f 304402203e0de96c49c5bb22cdce9574191cc48dc47283b8f64b737dcc5bf8dd37a37f92022008e6ae0c191f0f206ded2d6cb465121bca109da19414c15cf11818c06b5d24db 3045022100fee5100f138ee86ff3362059de2628b86754f94724bf70f59f20d074ae144adb022034b380f3369ccb91728e0d9aef2f45c9688ec08f951cb6c68b6d13306d854dc8 3045022100a61e88b9f265b03c58f9af1e9111e44bda7f608f3249e84943bbd56d5bae2fc002204ddc0f14b9308560bd77f631fd4f0af8bfa270d6b0b9c8673bcd9ca149a77527 3046022100a4bb086f008b2256455c8201521170c8ea7fe9de3c37202d364c36eaffab867402210090c38dc38b8ee00b28e84d150cd7df63e2f6ec04a350b229f77a1b96263d23eb 3046022100b412d9ecad3b2d87667a47fe397ef0d49690c718c2a2673d0d1e2b6de07aa659022100de8092b016b98997d0b01e7a71545aaa37828ad6bedf152507475cc89b69e1b2 3046022100b8a758a0dc7e0f013f19baf5eb2c84f55d101936c51778bda0226eac40f1dd84022100cfc0d62232350c9fd0d254cc586e03f474ee0eecf906fec0d41758ae9f85322d 30450220387804528bceda7258b43b0cdd4ee7f7d4598388e0a47c2ffcaea8dfc39eaf2f0221009e962dbd51ad937110ec63222339612050fc829b6089908f842b3567a647d434 3045022037ee8ded1aeae4556d785c898dd58aa47f627802f742c3cfd6c5b6d54d786618022100c730366f7bfd00bab838a5e8a2666f84171962ffc5bb9e81fd87706bb0f49d65 3046022100fbab5ce008eef625c46d202c9b619236f3751fab8dede2e6ff8d974616422e35022100ee33e9f42f162e68f5484bfcda761acd65c233281d02da6418741e968162e222 30440220503251c0e4f713de62878d697300ac3e88a6b637a2f8e780720b8148e8de40c60220476dd39cfad509521e8e01bfe2cc74e0d782ea773a4b9f92b9c1749d1666e6dc 304402200ade73bc1ca3bce0cd7ee6a3a9474b40a1e50dd387e7af70e1c5285dca33206702201050042582f914af89d187d68c616ca402e8086e18910b07721d8c6f35e65c01 3046022100dc194007f8a969726edbfdc9d5e22039871e4b6927d7061cc5b8ed11a92dadf4022100e2e4c3a35799574b11571a6aa3cfebebc4308921630b61e7bdfd3c4b2c452343 3046022100fb33c39a1f4be5b92ac020769bb226424dbe3c034df433c3489b8681e0d93d16022100f36e4c978e0a9f8d31315234a0b75205e8a05bc267ab0bced36f3a616efcf4f4 3045022100fbe2b424e856b110eb3ff47b72275a26073ac46229409cf0f61528598d6075f602202cd9560b7a01950b688b1586a252efcd5c0ce9fdbde9b2a220ee81ce06617f14 304502204905dc02c0453de826aa11735dcebe15bde43feb1d96c1f88acb54280a616a2c022100c9c1526dc3351f95e4f96784395a7406cf03ba408694e74e821d6cb8ac48287c 3045022003d89bf3a6d703cc6464ff6d94d1a5cb9baf949a832f1bfbc563833eda17ca71022100f591b4dfc021b3b3736280743397a3c68bf00f9c2aa82925ad686862ba348a2a 30450220601ce878d34e9a7e9f02552a6e403d019a9bb87049d2d2e6bbccdbf7b064ff8d022100f3d2a3fb5b60baad17ecfaed71f701092d6e61b34f210a3e05731d77592fcb03 304502202d459dd7061061846432026079c3dc24285a0d57d7cf714b9fec482ebd01cc6f022100fd4739cdac76ebec5474ea641901cf0a1ce2822ecf61f726122a38610d9ea081 30450220654f2caa90936066bd062a2d98f033eb3a14663b9edcfe90bd8fe02bc658cb20022100e7760815af53820de6b2e1708813335f7ba8b30177f6e1852555a7e171cbec50 3046022100a33adbecee94462c2e392ad0912738b6e9953f9e272401b5a495a754b258ac75022100e6c70bf126a69e3113baf173cec2846f243784acabe94adf7533bacc58c0bc36 3046022100e86a4e863301b9bbec15fbb290eae791d37e4608923de004f6439909fc53492a0221008a6d307d6a0a0894e2415f5a87cf5b45925204fb87cfd8577ecf8724375b2f22 3046022100de89f930cef8c96fa7aa7a14f124644f9fb74ad213396137b3a67b54100ad831022100e90ce3bb00b40af3e24f65666f7002084edbc2fd3e60a024fd6499b785815788 304402206f14b288d753770d36031a843054a68415745a3f46be84d739ad03b99db816b202202946f689cdefa825793337f558314cc200d1ffc74c67257fa1ca9f3c66cee7a8 30460221008d0ca9d617ff88faaa9e4d6c23d289f054303a7e14d7adc28d9c03326c26a060022100bd6a26d36df6c8283dfe94831ed20b10d5e7a1cbbf6995068e98c7cf879995b4 304502204a541630755db4e25339e91b8f60ff56e1fee4b1e9d33f5a9568c0444c3afcf7022100ba31cd8d57e12bc32ea93a353129ef82d45f51ea1076331d6d87eb55ed68e435 3046022100c36c646da314fdbbb534c4940e8151bcf90bae0f8431ac09d247dcc316c2c6f9022100c602d52437a8a363d2824fc946167c48ac95251e4dc4abf286d7563fa99d3370 304502207c0f84c2c8cbb5dfb78536fb0d36c420aabab59b91f2d89b3a4aac3e9b2d7abb022100ba98361a56cdc0ee1d3ae5ac8530584fda5df8bc5ade002031080a52ef11e3c4 304402204a7aa2a222d4e76decce20b8db8ade6c2024c364636aa3e3bf6e8279cb3f1f3202203ac6c9052d8d45865f5a79f25cd51f2e2b5ad356b0d135860666d84a8b9351ef 3045022100dea4c622e25a37dab0d3f6eb22a68da20d75178dee3d493cd4627112141f1e190220658ca518bf19b8f522096fafd7c38f585b52a5146073eb006e077dff716553e7 3045022009d742abd9a11341238cb19e8f07972b5f7d49c62dce5ae21551767febe02568022100efa2bf1b5e3f47ac11a8e5410215bcf3895d1f2f87f55889553dba8e025eb1ba 3046022100f31e6e19f2bb54f22d4f65f63ce51e3e09b7b1c605af25686f4f18fdc675884902210094d3ba2919da60f999f44f7ced868eae17dfa02797b332648a0cfed48b8980e3 304402204f88253824283a92578867e62c7abc934997cdd63e4a5e0f369a7a5eeb2b7cc202205b4da313bb001a00ddd580ed684059798f24d446e2cd151a246f569e504f2fc5 3046022100abebf557216ad839ddcc2886beffcc955c2b1257f22150569a68a6f4f70ff975022100be9c97478965fd96b35622110ccbe28782fcd09b032925fa6b55dbfb4ca5be5a 3045022078ebed2334de10c596b114fe75892c0add8b6a3fc0ddf009349c0d5edbccbd12022100ec258e28f1a44d5a048f05cc404746a3abd0521084fa211a6de6d2379c75e0c8 3044022048edace64cf8ba14e4821ef28cc25bd5a72d5dd4d09e288be08345871562115002201c0c4f0b001948a5477b22b55e35407ab9afd739a2ac727c4f04e35b069dda93 3046022100a1e9e6bc2110089cd36b28f147cea14463ed756b9fe90c244f1bef3d96f9b147022100b0559b5e244af2c8bec20c9d7a6b31c8be82ff0f3c91d87585cd6a60534372d9 3044022018855dd8abb9c5dd74d856ac45c4172c133cc333777faf74ccfac6295f6661ad02202067d853fc24c3b211d4c6e1af3bffef8df744dd9522119d07f924379fedf9ed 304402206c15de1cb2260d725efaafa4da5310e00686ee4656533da2bd658040bc637e8d022007348364ce4fa0352a5619e946738b9ab29138f342d11a33ed44530da9ed8959 304602210082e313b5d01fb50aea22fbf32820c341b5f9c0dd1f05a0032b5c6b6b7e35eaf2022100fe5d44a340204341a5dcffc099a309963a39b6df5568670f09ed4b14a44564f5 30450220628803eb90d99c3c7890557c3638e1910104ff24aba6d01ebe7092966d997076022100ae01b172060793d59463b9d9921b00467ae96902f30333c0e6793564d1b740c1 3046022100a8f85faf1bc008b65eae646f821925d2874a3df4499f86376d40bb1e8ab4efdc022100ba0f2516b0f0083393548d417161d5099764c71551708748c0faed26800253a4 30460221008b604b89a69224c50a4096615315879451eb384d0c4bfd48d204b964619a4155022100aa9cc11ac4f3c9b71a985ffa35bec27b2968605117ca39b6b03af6969d288383 30450220212ea86248f4f893bf2b96942b0d5991c9848b193424f1bacb88ba4a6d9886ea022100c40702689a17159e87ab0b4eb5def001408c38b350a77a837093f61ef1ec9d23 3046022100b52220ffe9499ade9a6b79daa2a86640e8854781f29835a843480df47d7952a50221009f3b0300026ace24f554067e473f4362531e2bbc29b954a35d5630a40014a95e 3045022100d68209721eb058670b0f4c639261453b838c192dc32837ca8df7089e8574577f0220052e86bf2367c2bf41e72442c06fe63f96b28396b670c4c49c8250e1f6f2853b 30460221009dbebe759264ca7d6182e4577db318e18667fc204144f3b110a9fd842ed06da1022100f96a8da6513fa3f9a8853dfeb7fb00b3913e288f886de4ee3814bb8472b9a43a 304402200993a2100853da8b1c75f0815b0cc372199af1fc31bd8aa48701c04eaf981f1b02205181f44a2283d19f6c7449da69d6ef4d411efffc86905d4a1326c5dde1f5c061 3045022100ebadf48c49a92c380859390dd15c1809d137492da24861fcebe35736f10dab9902204790ffe17bbb9f174a3aa8965cd2b6a3a3453025721a47233f7b31f5b848431e 3045022100e2f79b6aa4a779d6e0cf6bee05462106d1d9cc4665a25209a91e343d26881fba0220378bacb81b852773fd7fe8342da7eb348ac7089adedf371938abc4606d652b7d 304402201d6940ec5f3af17f1f0d0b37373e1200959ae2581eaf4844e0bec35a69495c2d022066b121b93c9b0061b26e0c1175a967d7f998b8c885f19682758ff98338b0c63f 3046022100c6d11372492a5cbd7cfa0245d38b95eeb3c477e1664b59618c7b7f6406f14425022100f9fcd71c3253a80da1fe0cf1f15ee163f2ef4b6b86184707c48be5901b38a9bb 30450220070616bc9136395766ce429e6c4db594c6d6a9270059ed0237cc61a91340325f022100c4f1607da13262fc7ff1d21df20e67a42ded02d3b75c8abce74b063dca6fcb46 30450221008f424850211430bbf8e7347a80ddb456a6bf61ca3a27db8072397cd61e35f209022003599588c78cc1c94bba3fe09024cdfa0454ad85ef85195c480344d712884616 3046022100b660031cfd0a16ef53476175011a2bee3bb0160f2e29a5889caf8f2978e014a8022100d25c1cc0e3883e2045519afcb1c344f52657ae7e3db99b7c523cf32e58e53aea 30450221008ed9c2e40c81aa882ce60e68f8eddb4784a6b7980786ce49cd397be1d57c95d302203a10343d8fbd9f704618b71fe98865df1bd103220459f6eaa18292c1b00648c1 30440220596ed92c764ea130b77b799c6258af35c4f1bb60dda1b68cd8f0b2d087ad445902205d12a88410f670d0c999dd5e39b515581e16fea8f54f4c0b95c5e00595b32d41 30450221009fa9239bf8bf470aaacf4530f53a9b75cc42c6cac69afe56ce210f159f12b8d70220492ac882fc149d169d9f3abacc75eb8597ffbf90415522d46d861e28f2c01d2e 30440220347ab16522c036f0e84c8a5851f2f07c96296550af651849f757043095bb3ba8022027a5e64370d6bfe95c2011ad639b570238cb1f36c291e49a61985736f57ea582 304502201e034d2e8842c715c54f1cf337f51cdd48edc07b6ff68ba2fc22927c6234da56022100c1de3efee333473817de3b292341b21f6fcb37f7fa5642177abdfa8ec5986de5 3046022100ffc93266c38a0f7122d4b92e228278f4e597b21be744cfba407be043b9226aeb0221008700d41a65dc42f2c92546e6b5ba12a15ee9b15640dec526828163348d04e8ae 3046022100f977d7940e96ab8f56681e1255cbec489b6a64f74a805cc14c28eb716b390672022100e412f3b65c78b1402e766bdaef95552bd155c131e133bdc59fb8e176247ff29e 304402201fd5a2aada433ff8af27c865ddc145105c9fb899f0a7f731f15a46fdf008156f022058d3d6c1f20680ea3205acad0effe2da508c2bc6d1ac01fdd315e0af249ce2ec 304402205f6fbbba25acb6ab161d315f5ccc29fe7969fd0e53a90d431b49d39f1557466102206696996d63e796c79a0bb703f4580124944a9b6d759e657125e25d45d7883575 3046022100bfb6a65d98f515daff89e75e4514d027ea4c2d09e6e0accfdb05982d55295ecb022100e79275a617b2658e6d1344793b4f607672608f1ad4b0f0bb832acb04c1ed58ca 3046022100a712c95b5d17fe8ae47910102d02367c666fd3662042cda010fce1ef330fe763022100fa975f360238906c8c48ac2c9d3e169374f1a2220d4aac6fa10751462475378b 3045022100cfc94a17e32db16994797a87d04fbb1f79d1ebb7e246141dfeccbfc49c403ea0022075dceb2a22012372b7522fe4cbdc3a45d9f1db0c04603ea86636986021829377 3044022074ba9c035ec8dd474a0fd2cd9f952a7b76f9e943bd75b384fad37e50b1e9b2d702204adfed438683c64aba5b37473ebc3462248ba8aabaeaa97ac88d99a3a6b9fab5 3044022066dd3ce6cb5def5a9733384d0d8c59a96d59a4b71beeb5ef320341101cc44d5002206c786106176d1ae694482c47f9096ad4343e4f33b46ec5fd8e48b95881c2856f 304502205b83c643f19b0fb22ac5b0943a773de40b2b9c9f7a89d3480215045ee60d0a3a022100f99d1d2ae2a08f8873297ba713501b0e39caeede0d18ec56ced7e67520fe6c54 3045022100f65b0b9731a339876d93cacd9cc1f4f3794ddcba6937c24c82171eca3360d3fc022071ef05bf62ed42bfeb3b0c15da8a5cadf25bf0ef04f796d52343c0a818e4b1b9 304402203ffb877e5b1db01272979e449d0c44eacee3606da75ae4b6c5c9b4ad6a6dc44302201bbac28c1774a42b650439060b3b76c899b2ecfa4c73fd1f71f07b7fe07f7755 30460221008d28f106ee47cecc9bfbf337157f4be4cb117faeb176518e5daa5778a9d24cba022100ef828466819d25a2caee1581aa0f4a056ffb695e0ff319481ceb777f17569c36 3045022100e2a392694284171e5ef3a48fd8649f0f7225eb55dec379afcf60867cee6bd76802201122f857701113cb62c886f419f2a2e87b95a2af39ca0d978d0198bcc1332fe6 3046022100c33d46b515be96489dc706e3c4dbf02039a1c1c58daab4c05c43f1157f3d4adf022100d3ed30a36073e90b0364a4c4e253e6468da838b16705b754f012dbd8fb19735c 304502210090557c61766a9b6496c4628504f1bacb88ecfd94df66e0de6b02b9ecdead2a0902207e033cfe5dc586ec0e7b174c241c08ca6f1a739ee0ec2f5d5d39afe0d2112683 30440220571c73abab196a10dbf3e4a872e41998390f6094f6f3e4b60b986c7deab6335602202ac9e257213a4f28a1f78b7447b50ab2d1608c47f179a51e624ff87abaf86ec0 304402201ca7e1d0b9cb87cb58ed1d1736e9785fd241e56e8c0a728f2f3abbab6bda1bbd022045f01a7805663048063cafae8d0a5cd9514654caa0f0999f937c479cbcd6a9f8 3045022100f551a9ac35ace0895a5aa1882cf5cec68cc9dc4650b80139494c577b3cc5ea3a02206138681697109feaee7d2d0a184fd5b8b73bad4ec69c65f0d5f629713802132b 3045022100b8e23ba7a064f455fdd5dc21573025f693770e2e12de390a6f80a2b6c9d34d3102203694e9527a8d872c7d387574db5060fa6a45edb9c8a888a133e9b001bbe65c08 3046022100d77cc88a7dde6a501e2bd8cbe2b804f53e94aa38ac95be7c0d741566bff63203022100eb76e8ee68b4f80c4982ef8b27caa8d143aa8d9bbc0e24c83576f70aa4695877 304502206af853224738f88e63adee4616dddb17cbe4db60b3ca3d981b0347cf5722cb17022100db192f8582d5366d941f2077748f0f73cb9aa89a55caf33eb7223a6a80c787df 304402202ea19f8c84acf1450cb2fb0a07c0db216aa4206737e89b78d4f54f66c431563102205de6fe969f2d843763f7bd471dddc1efc27bd4f25909ff2d2c16a36c10e39a0a 30460221008c66acecd80f4807e45f526f498c8bdec6478717fa81ded9a8949480bf730986022100996d0d8b30abc25859a5def0f05639726838295e3d2818fade20efb1a8fb0ab5 3045022035271dae9c92c5d1672ba8eda805b72ef4a126589fa286e6b86111ba790f69d2022100d89b31c6aa325cdab3035c5c749ba164dcfdb5a118f0e6dff3e6a1884886576d 3044022077d1f97614be805c934115500b9c32f780bbb7d7e0768efbb6bb22014d2cac19022068f7c4bc597abf6054a9580aaf59f9ed88091970896f0860c2e2ae8b065cc150 30440220500a8d6107b8db127e38b6d4c8ba2dfc844fcdb94e270ea81fae050c1afea05202206d6cee994809b0f7782be68890f8bdb21ef47f468e0ecc79a114d8978e791dc2 3046022100ec12eda7c55ba8852bf42f3140e72574e6a918d08fb149fca5eafd129cd6ab2f022100e6641e82dba747227395776d3102aedd007abcf1eb3e6e4a01edc0cf443ccaf1 30450221009105dcaa16d4a2f54cdddf9c7555c9434e6a7a6e34029905a1f17aeb7f182e3402201c7b3171a98f8b707ea405e3646291fb3fc93c383d66ccdfb6c2cc508c2992c0 3045022022ce04f52ec54bc39e8537f23bd5182c569a3360e7759f448c5488c4a142dbe8022100b92f11a21decfcaf20b431ecda8eed36747c97e19532f9e913c34ed296f76c6b 3046022100fb468e6dc61434194d1a9bd4268c18889bcb4f749db6dbbd419fc65c5a3db22802210099a0b20238e533a086bd3a5cd6fc0e249a1521b727697b0023945f9a969c1876 304502202a0b1dd466493f0ff87c314a3914993fcadf1117998f1b830103aaf9f2ac597c022100e67f670b9862c3ac3b308c243f5d78edc8483bc1d4698d6b256b300ae66605dd 3045022056a490ab2b37dc0b5ac0b46d669b24fcdf8af07d38f6820b8a9ef2a6c12172a3022100869eb17e422ec20bfccf4f3a4a0cc371deb870c89ddc065d626fec79e985be14 3046022100d4cc6117d8e62db0f8d32b5761c6cd49b14e06cd8497184bb7a805062d248a1f022100cef2e2afd2054fdefc8d801542beb1ef8d9018dc3b10520227797b41e5ced7eb 3046022100eb755eb9a650376bea1d225e84a6ffc61380c7a5f5d444f3f6b6e9d6b10a9399022100d05605ffeadaf6bb5a45fb6588e33001ff806172bd2d72952ced9e596f4d14ee 3044022074d6183c8d87afe34173fdf7d34843c41aa95456048abc59a37b2979850d6fe302207b6e590321be315e6051bdcf38abbf4291ed9f8e7e2cd3fb8f948cdfa0ed332e 3045022100cded9ae190b3690347b40fe8e8b174010623909d58dc8d7abee7788b0a224db202207a5f38ce35fe2e501c76d39625cd235ec7e1ad5bc9475a032fe569c9757ae875 304502206efbbc60d54e8c1d2e2b2e10d0da9668f7921cda68a10fe23cdefeb10043b291022100fb1ab571391dd21187e97f0809f053fb615a57efa4e969f134a5594d074b3ce6 304402202528ecb001f6ff8692fcbea4780eb5ac8f113d8037269d7f36ef67a4911b0cae0220539383ade59c5eebad131e5c9a7c217013cb889b279423c938e1bc8dacd79702 3046022100b3f9475728b16e8c51ba177d38d05a7c0f6b5d258ce71dbadcaa49afa50ffaea022100ee0d3ffadd59e11fe676438fdeca8047c5a0dcb590d7e2a7030e440888db458b 3046022100d16a978a77944c6831622a10242a50e32306c54a3554c0bad6e0b013cd579de90221009c2c78ad0a54f37183eaf50ce49bd4ff648d18f9b2e16e05185027e23d67b2ec 3045022100a6c8d841774621cec80a44dec666dfe59ea26b9078fb1cb0016c606941247691022028b04d431784d501fae9ae0a6ae76faf14f034fd002223bba20e30191542d68b 3045022027931acfd61fc151a64df1ac416629b4f4831543c041ceb3d38c61587231ca5c022100b581924e022f39147d314f4a3f91375f75d4c3560249d419367b9e83cf6fa128 304502207617a26e493756fc14b7b604e955bb59f199a3dcd88558e1dd4d2d4a62c23fbf022100959c1c1dff16ae8f550da8899a89968779d94d10d61495eff396a1f3a9e5bbc8 3046022100bfd28ac076fcdd333c9d33319379523aca87eb0f773238506a968fe7f10345e0022100f34d5537776d4b2078e11c8ca393b57de451e076d3b4f058670427633ecac8ce 304402203fd842b0db31d4c8fdda88d1858066a48a2fae9dd4c0c9270891bc45ffeacf1b02203a465418756d3c6a8da0e5b479bfa9e3f7327edfd0c230d512c2721940204673 30460221009410c9c5a79674195431aef1079a2d17917272cd6692e24f0b0d080d096b2a93022100a2acb6510d8e25e224c156c0c0cd850c5d29f20d69c50648fa8f309610041e63 30450220106a10c1c91bbf104174df564c505071bf78793619008b3cd50cdfc6e4774795022100993c641aee75f8f604b1f58c2a6134b13ebab6ceaa7b1cedc99b26379fba158e 3045022100d378a0ffbfe969262f65d000a79bf5821a7a09e971f82461fa57ca59eeb7652602205edc0b15bc1d5d7a047d0c3f5ab583020e7c03602a8e5f63a1d2287a567885e2 3046022100ccbc7a76f977a71b4f792625638081ef12c91d4d9ff84b7abb62a7e7df676b9f022100a865ffb28702233d7e823612aa8539f8e7440529e4d536dfb31c1465b488a890 3044021f70857a57f167ed8dfc2f62aaf7d1d0dc49c0107d9b37780a4d323ff1ecc96c022100f98b0bc649688d92e008c192a39bff0da5645fd4dd77b254651554e7a592b601 3045022100c3cb44f1985f6deabe1d38dbceaefc76e892d13782a5bd475cee982c9f12992b022039eff81d2799f04c06151098d388a39925231030b375ddf7c858c17873cfc42c 3046022100ad416ea19fb636bdc57a762c1c4fc7554429371187174b4f8f912e27ca06174c022100de61766086414a08206b70925df6920f4587f49b6bba31afddb0fc1eaf394ed0 304402202952103ba542cd7759c7ad4e7beadd01916487cb54ef963fd6834d8006671c4e02203f868850360b340847cef5d95c5ad1a9f73b87c8fe7dbaf51df38ce19b488e35 3046022100e046939fe94ebb15266a833804a8f61f0ec4a206fa06392be52c1b7a5c3243dd02210084c9199458b513a85d4021f920b87200ea299b9143c4e4fd91ed3d2039818a5d 3046022100ca2a5080fb54734de78de41bad91e9a780988d0fefc31128601c71eebf93fc56022100e1983b82526e0519f89d52ac2bd3f8321ef6f7ad11c608f697c70b4978edf32f 30440220264826057cfd631d6fa660869b35083cffb2b3d35d2078d323a228a59f9415170220447f4b8f82ceebd69d074fdd596269972db0a97a3ea49d0e3312127ffd6f57cb 3046022100973b15c274843fe052b48b55b33614797a5705581b7482927d6ad36e2086d4b3022100b9b6944b60b9e051a0997cba1032a31d77a48fdb50f96cce91af0688c62610fd 3045022100ea001adedc9302b8ac60579b69cf46de00124005e34a80a0b91767f134502543022062e1603a3238fc6b22b32df653238663e7c0d38d4150ff182c0dc87653a13396 3045022100f72d2837da74f68f56791ab2a0c326cb642626e7511c04f14d988a1cfbdaf6df022015518caa96ab75443014fa68372690ce7712eb7ea1a58ebff3a42930b72cb272 3045022100dad792ca1864df2b358348c9019a1c3b413ecac853ddb581e1bb2fa07c91ad9002201d818d2ecb05bcd6ed17318d2596caf39469c7cde4fe55d534b9b70d2e00e407 30450220460ad1ddadbb734b0747156b22438c256e6d15e8c185a6f1c2569c1d2ed90e93022100c0539ad2f28ca323ff2a6d83c49e4cfd324d378b866c138cf039bfecc4b0a94a 304602210088317711fa7b6a539adda9fd4b3563e48ec4ddd941258a3dc9930390dc9ce441022100a81684a9b76ae5c4fbc2315e2da37eb57206f625a742320f4f8bd03d4bcfebcf 3045022100ea468acb192c50be2c4c2a57545a47588da7cdde4210bb990eb8187224e870b202203cdf6518df0a1705ba20ba96693f2f05af65abd86a7e5151e1dba1706dc6ac1a 3045022100c35c61c302c519927c220bad020e95865f9ad2b1323355cb300288e49028f232022005b88e1a1556ad2b72c5f98d9a1cd3638f40161b1dfa18cf9424f38ad964458e 3045022100d3f6bb5f225a64456d66dacab21ec03d40eafac07a6f4aeba2f148d2c715dc7602200a750fc68a24b5c90c58417f23b611980aca0528b79a5930e9f4598c40512661 304402202561ca1ed0893bb3536792ececf7c068fcd2401045e0ec81509481d7742137000220362695e5cbbe0b479a713370f7c30a6c2622845b95138e806c1db436585f97e3 3045022100bcc9495fe3679e2c40b33a761053c7aeb6910ff5885f26c5c1679322747a41330220028d99a15d42857c9dcf3c9a327d02e2e52c6f263a64cc75d9ae362866f8863b 3045022100e4294de45b732d020aac9b46dd4e389e748cfdbb0f6e97e7678a7d5fd5ae95ff022045157ea46dcd56bd723935903e833fc0fae261435bfbc10de19bbee2a43a0dff 30440220716fbf6457546d629d453a77d943e15d16ec521c507bc07b8c18ffe697747a8d022066f5b348265a08f27d76939e733582c1242e111606b618b167c8259e7fb9286f 3046022100fc85c011229a482be8804813c14940b2402e70c40a280965886cbcd0262ed659022100acff7e38d97077fc90b46ec6902f551aaff737a3b0876fb4ada42bc772155255 30450221008d4791ffc18c0a7178674a2d57e677255139b90ef3c1fdaa35897e7d96e183490220430de2ed74f1de3bd3328970485f125138c9b74c59303be51b251b3b5cfa60eb 304402204fc29f2c520ab5d732ff2f687d645ea15bd87b48005c551c7b0aea9f63eeeebf02205c5688462e1136e2cf0ae240b40cfc35c78ab1d123f0b6e0d0dadee79eb1b74d 3045022100e98cf9d8ad457d07ae930548f36c848b6023e6e5fa94cce6af3549063a57976002206e3624f345d3bfe8232b84b3dde6e9c2b0dd76dae8ee5c3d5f72d0ea09acd08f 3045022049cef04f9bc7ee75d33a63ff350cccce509dcc8e9967bca8168cceae500b306202210083797e4aab7006cf9b11dc6650375df0ed079d333e25d4b265a3213470b94467 3045022058624c5b3465a24c3f875cadceec9c27a30384922ea33080ff28eebacc370b15022100bd2f5e658b372f0ca9bba2899b43909bc11ecc3a9c9a70a06decb94039e89f7c 304502200fd1836464bfe7f9ec1f50fa4a411957aa628998ce7d5186abd622a3c3c9650d022100bbb00ed6f96db490b396f313558be1bf92e40c42291eadb4e5dfcef22355fa3d 30450220285255a6f3d05f98c13cf50290e7e5cf6e2bfe94191bbf628a40b53ef5185471022100e70dd69b88cd0bf17a2494844a1df99d16e02ffeea0b591d5aea8e5e8e9fa563 304402201ab2cc3505bb09de689c564b36e1d1beb19ec644f4ea755bbf59bdeba68b5da6022014b75d359600e37da336e9f9cb7472fd165a1f16ceec718269787e48e404dc4e 304502204a0841f4d7dfc5df1259578fbc82b36322bd8492f2ec4ce0d38fa347f2745e02022100e00bce69fca0bf53531609bb5426413a029f5d22bcfaa98c7bcd9cc8963a30ce 3045022054ea9359c27005ec2de0e849f1a5995e41f18cd3102d43b49426bfdaf32c8ec00221009ece2107f24d7f84ce086a4a7603d76770ac8a5c2fb52537764ce8ed6e74489a 3045022067a8051d9990610512bf7646bd33f510aef9b918ff44aa63ba9763ac6dbb7673022100cf9b9e65a73bb4629154c86bc4d58a27df7c8b7c4d7a0eb4dd40ca82307035c6 30440220030a7424f38b661e43d512bc50e3935979e6d0ebf27a0cca445cb2ae7199fc9102202b690c22b08bb75b0942ca2851bc82b4fa37fff11f89d351a3003e6b6fdcf395 3045022100c7f29961b3d1bf0e764bc7b896b80a8609781c3ab0057becaa6fce8c5c033b8202204100d70b710b9b9cbe736d5b61d9f2c383cfba3ade74d6b4c7681f9277fae3db 304402207ef05db20d65136ae3063bd9af9a6fbcd38da9f0deac36ab374421ff054c10fe022007fcae685c8cff745e6ab8d9717ef5299d018f18e5d1c9bfe0a7d59cfd02b5e7 30450221009233fb5d77e50af5f304d218a4629193d4aee3e7d98f54a3bc887304c553517702202fed9be703485337426b0e8d14f3c8b05d204579a14d588a72d62615bf95eaa8 3045022100873337fc1e7df7f9ca4de95c1a15fd083e1e38f572c389413995001e7b293dfa02204cac8302c9e3c21076c41f2f994f5c35805a25312fcc55e308d991f829a09e31 3044022067bcf20e039c6c7a08a2dba018944c494c5d376d882d983521ad50b9df8a2b4f02204e45602f4788dc8f8e523b96fc1d092b4948513c05ec8075b96bf1afce738cd2 304502207e462c8da2ba44c7f99e080f520485d152d750427470e5055d14e3dd89ae4139022100abcf2bad1d53e9240e95090dc9a25307bc2c464e8100c0cdb04ed83b0ced7450 3045022100ba598fc67580b6effd0b9ca75c6ab0610a48f62b954e21ba0f7b0f9dc3d69e1302206a6c5c3a7d5dc41a962800cdff834024c199e18d82afffea7376b000e1620e1d 3045022100fd97efddf876118fe9972a72b64153fb1ce9111a50eb0bd3cf2e8a785cc001f302204d9201ae80dab88d094df62a3123d0881a961443ce6fa8b394c17eb0f02e3f2e 304402205922523def114927de35a6e58b66b8093f84aa48f96a5e7d9eb0beb6910ffb8d022024ece443cf1ff93fa76c4d1586063bf95eadfed6c4f6d651a9932340f9b6e153 3046022100ae84daefc08946548712805b297bd30046b8d95b1af3d0cd2d88e807f3b3d94a022100c06d2d2dd21a44e26ca67eeadf352aceac632f525c4dd36daabb70e16da502ff 3044022075228d7a32a9156825a346156f5399706d35df1eae7a58d3c1cb4149e327590102203719908d85a058e82e307ec5e0bef841bfc894517fbf9134563f9a7550afbd78 304402207415abdcadf773158019c1c567f0f8b1f83dfe065022f7cd921b35b3c019835802202fb214eda40ef5f36d80a768e1a8dfc27ffbbb6aa8688004f1414ec8633df595 304502204b1a9a7cd81133dc596766f96b72a895ce14a21ab34a47d21fb53d5e3e08994d022100b2f8e1ccf44137bcdadd71f14c15dc87f820447a25872c4b2ac0783db6cc1e95 30440220469c0e3987e47ea2eb75060805156404334857e740a4992cd15901c88b9fcde102204e95e83972fa6e10368e501c6924fd6bd868059a8318fdb4e3c759d3a6ef58c2 3045022042ddae1d3f9331a5d557498b21f53838fcf70f241fcbcd6898f93314d0c6f418022100f0860b6f6ce2b5a94bc00616298a9205b96d839e6371a4cdd2835097a2b86578 3045022013f48201fdb0b1fb7424be381f33b433766623b441d0cec2db1c084abcd9935b022100b6002dfa576043c68fd7058994e6d0083d29dfb89f4f7ba27f00834ed4dd1336 304502202aab40b45a749213d6b62f528939639bb2eadaaa8bee2efbf1a3d038ab8c5e01022100b56261def7f6c62f49cc70e79b3cfb807b95b7554f565320bdf3738403e11634 3044022026323e1a1c13f530b1b124e7469a53545fa936c90eccd0e09c4c51600f16de1502206d2bde5d31ad8a61e7eee6729ba0525dff66d8a6ae5c97e91259ad26ff88e075 3046022100e66be4104b62b5fb0fcb769c5b9d7514cfd02ec58bb9add4e52374c6559f97e90221008cbe4fa3a271bfc22d7932671a4c603af97f8042d49a531367cf65fa0bd0f45e 3046022100e1f5cd9ca6dd3ecd1716ee5e75b2bf4aa17c4dc6ee4e28e4fd147b70a0ea071d022100f1dea2161748074c93fa792cb5dd18849f003ae3faf96b10466d3d1c6061a111 3045022100a1097d2678bb3e22aa42e7f7708ad42b326e92f636f06db75128ecbac64341b502203645401de9e1e88baf697f69c712a583cd87fc3ac4c6cf55c4b273d90a2154ce 3045022100d2a3a0b18bb04be79e995f5ab0747d0b6da5659414d1e62b8502f26f33d1fd760220317f549943933e553cc28d5df3e4aa6e31909864bf1281c8608c72461097d81c 3046022100c728ee50aebc78bad98d77844ef826af3711883efb8b5867cd2a4d8dfd054d6e0221009e01aa8e7238d0f9718b9c5481c99568809b645c9a30ec7b3356d0ac19475fff 3046022100950ed4ef48012fe9da316766f76380e1e0e35e1199775128fc59810643a3020e022100e75be0ff059fb5609e6327009e2418f3bac1541110ad78ae3c3a7c6e3aff0ccc 30450221009b31b4ce61e1070d6670615607b4ac25d3b8a4f8b3b0bd69f3943b123a20690902206e4fff61a57889bf642a94882093d97fa648f160a74ad000ad129cad9c7158de 3046022100bdac78753cfe881538718134620023d165a893ae150b8971bce6d7e21a49c1a0022100a0d0c7de69a40175d0590887f672e00f8bc370eac7ee56bafee87d4d501e19ef 304502207e6d701eb91f54b51ec5502daefa820fa7d961fcc3e1dc3ff1a49288df57099702210094ac243a09826d8d5811d1cf9ed6cd92a8c034d823ffdd8b1d5e532f7f53e439 3045022100d65ac3d8c6d62a3f8a6f3f91188dc24eca698d7725c07b8c4f6f72b5ba82c4a002200f6d360a4f84e00a3896971258071b1f63a781fc5ba32f9a636c3894346e1606 3046022100e87ece481a168e60663ba5fa8dc2022658407699da5b78854dfb8f4714ac4892022100d33f3ad10b4d735b2cb58054759016eeb3d5870a4107a2adecb60ec453fe72da 304402203042c694fd3346a56278eb4f8d0f88d1bf764b2fe4bc659f5b489342a397b186022036a86fc4c98c284f4b16a2b05b444930075d49741589b17cf33e4de644fd7b88 3046022100e6c999c5269ddc756886155c7552d6a6229e25135e991ac34f9b431da95092d7022100e47798a59e06965d8f245f754407e0d63d0c85a044d5ef13beca11a0b73a6954 3046022100a6ddab187ef876e5b6e6856dca0f83502cbbc548e0aada819d396896eb91e9f4022100aa0072d384faab032ed280056565c7be0e581f11176c1057275d269813f5ee3b 304502202e72fc1f5e95713c978ab57dd43e0342f3f22c8f499bc5ab1c6537f1d2ed4b04022100946cb342d18662e199f959b852c132237561eadf984b1a2219473e1710246e6b 304502206874de3e2cc404a8119622197585635de0f082c78d3c236c234717cadb837cf0022100c179a89985a5a7e447633251e96d94711d41b5bc653f8a40419ec724cf522422 3046022100bd9eff5a363b40b6fab3c8509186c05ea7c0682f6e857de2f6d3f953ffef68ff022100d93ced44a2688afb113f0360697e6b690e99faed75d8ef00533cf2ef591b986f 3044022017a18bffc7a7033d5ccd29f4dabee661bb93aab0ad837cca981334d55eac0284022053beef5a63b5c74f1596294ec00d828edcba83da9efee65d587928aa8340d51a 3045022100accd125b82832afcc34f388bda46f51b76cd8066b689159622148a308fb3108f02200a483a62d963b7f96efa4be213dc965d47ffb87c0bb5cf490a47c3fbb1aa907e 3045022100f5c7f8c3c70774e53fe463d705d8ecff67b8392c73267397351dd3ee96ec490702205db02a8e45e18cabf1dc4021c417da39c71f70465000fc2b350e0ffb3b29a922 304502201dee027ae83307270192e86522a9c82b5ba28b8d361a0cbdf583151750163484022100dba886837372639cd5bcac0762f481b825dda2fe2793be60887b30954d11baa4 304502207218aa27cdea1a4ffbf9acb7857d780f227a7303e5268e0512d22e6a162cab18022100a0b1809b5c74b2480cf4283251304d236a491ba101beabf01ddbbf178fc7d3cc 3045022100c3084ba5e26dea00876f37a3e7c0756accfc08c887bad357eb4c5bf89693e8d702203d5e77f229109f6189a96cfa7edc28e1c145fa16c016e7aa8555d847cc61095b 3045022100c3ba9d39b5040bd10d0b6468e84f6326d15b34d3c9c275421d2644af90052ff70220731d01432a8cd3c95299d866ccfbb0395aed884f3b86606317e538d6d2f584d7 3045022100d054278dc7a312f602b855af61d12e9aa8953112ded984aa537762156f9ffb5102201efbec6450339ec6b170f775cdc4860d2b317c57360762acdc1e488a9d116e44 304602210096a26cee8556b4a757fb9bfa227683bc4bdc5d360f8ba08858dfe564c5ea0daf0221008791f4bc502ee17bd13c8f3996b3182f3741cbc60f2d83414cfd51ddf1faf91e 3046022100c8382a804472300b03abc8a484468a3812cbb31f766e230b089c4697ccec10f1022100dd2e5e86f267317c9965931c4ca17cd25f17be0de9219258544732b760cdbc1b 3045022100c680024b5a944773269bd978338a2787c9d317012cee765b5419a962549db40d022052b1c2a01056198f290fa251201a64ed2e9b3fac1947262356c0c620992deba7 3045022100c2c3afdae6b5930864e286e1292a4ec319e57f3d21b667f042e10142ce477f7202201f242bf81ec8b983a97451232b0d0fe24ec9523b9f615b508aa5d65df470c428 3045022100f3294864cf192c1bd8d242b725e4a2e62dd4431ce50a79355fad1d0605ac5c3402200e999121721bdce85a0cf5d8442d83965e207a83f4b16537beddda42ca3a88d0 3046022100a4f1539f2fd21e13473108bbb6c9c87d7fbdb07092b8fbfe1afb36f53fff8999022100800759a49a79f94aadb0eac312edf02c0f9b1a672c067f1a3c6a6c06f1184811 3044022014eaf7dbb1bca6e3e1b395f587002cb95ef321a5c17154042cb6eb483d0eb4100220057fae078298b6f4c9355a925b03816337ed42be3eb22d197089ee9fa5e3aa86 3045022072e1d92e0a5ca7e55cf70d21cf76b4592208aff9317518de840496e4003e698e022100e8d874a2537649f5325833ea9f7cbb598a4d96451f1e2e546861a100bacb9909 304502206d586e75dfbfa4a61f51b6658ab84a1b67bc4a239c16bef7022531c67ea22ea90221009b7e2e8202ee801bc3de512d26fa26289ff7471e1bc34fb151554455dfd149a0 3046022100c394edae3b23a624065063e68b432981e33f6f4c08fe3396795500305c91d0b10221008320979f13b52684589bf2c0c6c09314ee0e3f7bedd558e799d549971a3a72b5 30440220384da2486f38af0f22876bdfaabd5cf207bceb2f783c827325d8ab82f20b571302201df482463f3e2de546d974a7ea731b5b6e0c384cffaca64ed1d943d3a7b6aa50 30460221009f75d2ff43456822e8e03ec4fc5368b801e2ea1895ed3e42fc215880788df09d022100dfeb60c9eceb15e8d9d0602253818c1db7d4b45c39e7fde4f668acb2831c0e45 3046022100a25d69b6d220df25be8956dcecedbcd9602790a949882c71bfe11649ccac3aaa022100f8ae44714829bf57ee2af87dc9a014ade8e66966c408a53345d317075eb25f2f 3046022100e905270f7a27f45414e0f24b3cee41761ea6aac191aa126ce014dc6ae1978ee50221009168029c248f5370ed19382813f7e6cf1b2b9bdf14d818d682f6345567af20be 304402203999170019ad7f70c54300371c92103130a095a8bfd74d8f8a827ac996e8c6e8022064f39edd52104e25d94d109b3f42096eaf0ba2bdd5740de0fab94fa59e4f8004 304402205e2b44aa1afe8235469d25f19ec0de6f4b33bb78e380041403ca3dad38ab54a302203545bff7a6a01dbc1af0e18b79ae842e24c12e14c76edb2556ad105ea0cf0afe 3046022100e653feec9ae80195513e855a85fa147c0022bb73f9678c3e523bc621816bec89022100ad88b941424b4429b8e3e2e740a3993c7d4a00d6114b499398a4b0713662c690 3045022013382f5c3ae5d00bb046d6989f50ed76da2ab0e6a8b61a1fc26ebadd058e5737022100a6bc6586005a6ae11b24b33f019533188ee01247c7fbeb001eeafe15938b8d03 3046022100f0bab21a25a8b1939a3a5d398b2ca875dcf0d34bf10322ee2d01903492077131022100b55a3b4257dd00f65aaae203732d9f81345d15f1a581ff5fcec756ca0dcd8b34 3045022100c35d35b9182086f08bfb7b523e51d627d657ed22a2529afad53b288545e645e5022004f19a5bfea9cc4ea39d7524f950195a8ec6ee1cf08109dab8b023ff591056c0 3045022100e323ef522648c94b551dafdd01289bdc96efad43f3f71928c13986932dcb3fc90220109d21779a7adfded12b9013d264552cc46ef5a15133c93801b3832b6f559fec 3046022100d96defd3bc96c3d30f3b2688182559083333aee22555d087e30f35122ce8d780022100a4fa63c8e1945f3bd4e699f5c87eed4498eca957fdab988ce8dca1dbcf2df5af 3044022075db3bbfd17cfff65430b99130228bd36a09255f83febda6a0661e2db6ac28ac02200ba01f58679b1e5c2c84612ca9dbae00f6411649f48f686fe5dfb74060cf7611 3045022100dc2ce7b029c7f6ddc1c9d1a8a712717880b6a8a2f0f9bb811b75eac382289aa102206e9fd3d802d3a54b68bd57a06d70580f7d1e98fd6f3f075747d7338a4c325bf2 304402201371f8651e5da416e26faf5e9bd5967592835e3be4def0489712ef173c204db102201e86ec233e5819ea3193606c15ac137dbee5e2b924e826e4ad7efb07e1725e6c 30450221008fba08e6ce0415c8d98d7bbd6522b48fc227da63412c23e4893bb432a23c63d80220524edebe3636af2eccbf5f83e0f38bb2885392dfcc030061fb9661532ae54402 3044022055e458e932593a04aca99fff66f0fccd2d3d4bb834a819cee4520fe2d6a56e4102206b7a23b7861b862855da8cc8428bfa5d365003df08bf4d56b25fc2315ea3922c 3045022100acdac2b2d75b0c7aaffd87d5319a1f4d9b026bd008dafa6ed231eeb7020ec08d022007ed775f6b2438582b57e8968e7e345965640f6e1911fda38ff62bafaac59b9e 3045022100d683535c694a947fad6165f67eaf07249ece353475ba1cd8cab59b1b487192a802205c70a61e4afb590391e0407209c810916bc2473b33b1d047481835232cacdeca 3045022065f6d4d77824f54d4a4b3cb4dc1ba8e85f2649b549675f13fcbbd68bc0c2c0a7022100dd36cab9da2003c314cc4106732f0ef55ddcd7666834ac0a52b1f3ce129d4a31 3045022100f0bd9875468828472fa77ba304f8629c0cff59ecf2da44d014a25cd33369357502202b15c9ddb9cae8ae3f522260ba5f28aa5ae2c250cf3cc5908388109ba81cf55f 3045022100cd181ff7246966e60e444799791e9dfe3509227ab37bb855e792502911bc3c5b022037633f281f2ddc8ccacb381187b673ce2cdab21ef9b19c359c46c5df94ef709d 3046022100a8be2f9024ebf0530300f56c4ce574b3f31a32f9cb5457d6e70dcec05b0061f6022100b8f8a707c4fbb642d04b4e8645692181e49de1b3d271c51adf8824ec42149298 3046022100989578e9a24302308ce08fe34b08445de39567b218d7a29e760db73ca328879b022100a0e6801c509464a14b740761e62d28e2f4d7dfa9f4d085cab44d8c0cc11cd965 3045022100f54f03af2b76a5f15ef1ef467a8c0fadea4fac698f4b5f892a268ed122c9aeeb022026f24414f8b28279fa86dab9d4287cda417a586d3a8345db6ff85160559e25f0 30440220422487132b181a3cfca71651b8fbcd0cc52cf777541ac925057d3d54fd1d40aa02206a1e55559e12a87c4500676f0011e6378a25568fce59cf99a82366e3290550e2 3046022100b508f3fb5a127bfa9ccaad3ee623afa7e261e7add977b5746ba44c0c9d0b00d6022100f5fd50522800e9d3a49f0f786b892293f92751bc240e38ef845ea3bc367e326d 3046022100fcc09da1f350f2d4073c1cef956aae1dbdf230a446e2020380d57998affaf4390221009d7c605dfc9ffc8ac520bc992206468c16392e06182ff0137008825821357e96 3045022100be2beb41b4ba327c86710687bf43f8048324fa8c2aabe21d73494a932d3c754502200ebbb8105680901f04c4effc3562a105604019b9db53371643ef77eb33a83a35 3045022100afd2d481d15ffda0779caf5f1ec86284c3fc0147c4d3f1ccc2b1ede099c21fd702202ca5f0cdf3b42b48d0254a8837e6c1eccd26b79271791e9c5b1faec7f2ca0028 304502206394a72128f73a6de81c6e09cea2d4faeaa31018826098ac1b2b0b4fd902d7f1022100c87b2d0839ea654c19bb4741bf78e23df88334acc2c5591404dda2f0265f6916 3045022100905385aa609cea45688ace069accf836c725afc1ba1c76fa20b3ceaf57d6d06c022022852ef9e659e01929ad922c16962ed7118104771739fc3aa927a580100fffcf 304502201e455df6b27ed8713cea20718baa5e8820e4fdd3dde187a528114ab41bbca0df022100be367b238cfce80ce06187a114006663ff65febe12c194728f017de419b0f516 30460221008020ebdbde62ad08ed427c6430012f0459a4a24a738fbc062c2c217a04165276022100e49a200c362673e4d97f35b87edcffc2e227669efd87c56e96ab1fb13a39ae2f 304502210089e29046521ea87aedd977ae822d413ded9afa63cc4cc43674c6f0f676fd16ef0220360723e8defa528ab06165506a6b9ab945ed7dae7657def3f693e3a5bf9af069 3046022100eabdc1244221428132bce0e884d18627a9e28d655c0453b25f45ad4cd3af49c1022100fcdc214b5fe0023c44db333a3b60ad91da39bb90484f9bc54463c4f62dcb51a9 3045022100fe7897214bee5283f4c90c488be7a11bededf08e85bd02ba19324f57409952ce022060e784f0dd728d13faaf95ec297745bb6cac0425ebe2b477e2e6355580921fb9 3045022018737291e08a4bfe4db83878a824e6c01df020de5f7609f1202dcae9a2eded84022100f44637672b5d927db6da8b5ff00e52c921a390548cfd93682f7acd3d51d5aed0 3046022100813504c3fcd445e6f26fb33ca179b751567fc2f3273e730a0647bdbcdf16bde2022100b7f969d8444841acfb33f7e69b697cf3937ee9acb0c5956a7706d461d2992aa0 3045022100c82f8617bd16d7ba7f387352e7c9adb2600a3e5e3dd77245715416a0c5d08676022051d778a15ea959b9945901220809358b082f50353e79501ad0acc3b41d68be6b 3045022100a51e53c6f0e8fb451d4ca15cbff6d4e5a0a6a37d8bd1a58411cfd238b7fb7a340220188f96202f03361702a89885733587a95527f32102f67a026a6ef776ff1d2346 304502200ba9d5c303d6241d5856354c972601cf1b34b7a2743b300709dd21ddd3f760ad022100f4892f73f4eb0585b3d40ad4b5a24eec440d428976a8341e5fcd57741c243648 3046022100a01d289cbb618d424f73b657b3ea951afc07c4312b66fef6de892be092072d61022100f39d4050cf4ca8d6f253b247713c34fe985928edc3cec02da773ed2824771096 304402204981afbfe3272857e525105a908a901e951f77dc2bb9a1f6909844e7f79a594602206ffd790f0b16be8589835174c6de248578273fa9a257ebc5c248d755101d9b46 3045022100983b783ccc5d0d1bbd5bc1c62d21457525dffec1d6169ffdb3d15e71b9fc53a2022068e1debb86a25ffc6fbdf64665a0fccaf9181f7fb1322f02208ef111407a0bf4 304402206686775006a0e1e977f111c21284b9c25f9d06f1b2a2b84d3ce4b84981b42d49022062349e127b186844adbd8fe61a3ed068126a6b13404756be2e90207d9a6c0b2d 3045022100b87a33ab503dcef86bca51adc64c7cb1d56b1f830aa6efbb84b0ef076e581aa502201b9096ee5800bb9d09bcd9c21b89b5d644c71ae504a977b158f0e2bb9679b17f 304502201a74f7bf18119953b24a7a025d121fbe0e3d2bf3ebf5f16455e2d6c6f8d9ff7e02210093288c7e74fbfbc3fc09c398af68c0ac4c14e66996cd00100f4405313d62a437 3045022064877efa55a5b57cf3231ba1c96dd2e13c77ecae111296a2c9b594e720170c74022100df642c46635a95a27ba0d01208c4d60eea184dfc9eafbb71c87cda11c5f04900 30440220786ff88622b013fcb636b0fac972f7b9ed3ccbaa21f1ee8dbf0dd3756172600502202474d3179da8055b10fda325fa8150e844981162827613e31ece95709bad7e4a 304502207800482d3e550459249992583f8dd66a8abdcbc5385ae8b4765e55dc65b7ff29022100c86faa79d8418f11166d02d1d9b5c93e237b8e61de1f4f3b3c7d5eb06c9cc647 3046022100f72c1c7576b7a437782e2fd309a1fb1359cd32b5fcf929d50496232845f7998b022100d4071b9930789af679ff1f79da4ac0f92e13f920169536559827df7a0f6ced9f 3046022100ee493373ae342aceb1cd771cf074dc55b535512a07f5f9889f468841495d4eb302210089f32004910c7aef080b7a39dfc3a99fcbcf64c412b1d1150f49dac652009748 304402205fc7a0fc1cf7af2dd3e6c30d841e1e1b8a78876eb00f067fa2994983277200550220055f2974ff61cf07e9e102fc836a4b0311f432b6932c4cd0049efc18dd9d3a6c 30450220015d07a6468a8d37a1ba854ca5699a4bfdf8fab58657cc3de6a02599a706a137022100cf19a2565c16143f4f070cbe4c9dd610feb3d1efdaab95f71aa67747c9cd8429 3046022100f54298518bbb5bd186100f35dc5dc6e68b30e9bf9a7391ef30332f67dc5be6150221008d840118e5c117bc5219637a424f7f4c8ad2f6c25e970a9a5c03b949bb4fe2f9 304502202d04afca1e6288537e888a644e96027cc4706e53a7763702e7ada094698782e7022100a772c03f713983f4956f00618dbdb2424524f1098bdb1eca7e0912b103d121a4 304502207e26f7fdb34f5806bb9f5335b93e5b64f99273a3310098248f77402456600233022100d9b42c477d79d0ff6a1b6cb01414cff07c64eda33aaf52d0d9ca84caf2b8e29d 304502204bcea419b64704b24d9d433c594b7b944e4674aedc93ead3e4dba40b62e06621022100a09c694a60a3a13747f4a182e454dc87f7d94abf064d1c6d723e43a0ef9a9b40 3044022062424094c2321f6bfb0d750ba0854bcf4883bf94606286258ec68a9bba326ea102202460b494e6f7931ceefa79ff2dddab43c77d798dd78f39e85580f74fb6595b75 3045022100d2d961500f6387ad36e98816673e29e0f8d15ce176047777266e1111d4750767022079f84ac162d34a5cf45d3a7ff597a9550ca1c3520535133e88dd02a8e9fbd2e2 3044022011b1c4332d742694f4528097d6b3a972fec436e51b506a81feddafb122e17e84022045182d27e5a178da32ac9a35dee9cc149844b2ac76ec657d295fba2943efc6fe 3046022100c6aa06f4a195bdb8918ce9bc06e21c48f9db1185e75c961085ed85b4965d4b3d022100df7be82a1501103ba587451c31b3f5360499f80c799cb7cc9193b1ece4d7e171 3046022100a03dfea80714105378ea5849a1d66099b7d8c84aecb7e8542ec85d5af252e549022100edf44ad037ce4b23a7479bf5f8e3d0d4ecd5bfa8b995a23f14919b2d4fc3e4b0 304502202f2ce0a6d79bea55c5b0fc84a6c63986e7994627911a4acd3d0e9927b1db2f93022100bae8aea795c3260d1e4ceb0d6a69e94a65c4d1ce7d4046c4009174fca0f89a23 304402200944ed906610ab56c24f0801d424a743dea27de5b073b68982f4352983a0a57b02204a3ba735703ad505af9d04c612e301388f5e2319d58298fcd8292b84637d7c06 30460221008da9a026e6f5e004aa2afbd82e5e45af7351b67f84eeff6c30eaea2d724cd5d6022100d7119fd823b9859ff3222af6c7f1aed2c3a128d9272a237fc8a3c91847fd4f95 3046022100eb105c4342f1b955c2ec65f601afa48a18bcf99ee3309b41d6019ca9b8e19513022100bafe8f1855ea650f223f2edf35632042fba3657deecd6cc7135c97c01e1a4c65 3045022100826e3078f72462db1aafa741f45747a1f4841f8d6becd6646ab7db5b891095e40220223b19d1339e8f275633509ccf4668328395883048c128cafd25e9fd86f5f5fb 304502202bf6c0507d65166429f029c620c757b961d0e99a8aac3e27490319406dcedf400221009a3844ff2f76417c067ff9bf853b5cc2c611c28e06e438d163446d6d2ada72dc 304502204ea6b09a8e3d0981d6d658de7d9d845fd6ba2e54c3dff8664d963a24fe6cfea2022100a2d83eea6c0bb80bdbf0c796bdc6775924a8a457eaab91519cb35e87c2816033 30460221009befbd7e353e5c6d3ff5fa6f47e0ca79329b3216c8e5172513f2cb33ae177799022100b5ec11dc08887f21c360ac9c7fd163482b0ae85f63f0bafcf0ba7e5357b0fd48 304502203ed78712e3179fe38ad99a7356156ed24f4da2e4cafbb853971bac4ec0c27005022100d03c9d2a6bd94368e055400d8863fb163ba35625ec95328508b46c4779c2d620 3045022100bb5dfccacb06a31d132fde9db89e1fa82d1c8601c766fcd9dbcd0fc3e7b818d2022035575ea8c3bfccba2035b6a203744bada4d7a898113cf7a5a3e5a10fa6a35d8a 304402202c046720e6023540ef2fbd7e0ae1de05688cb098eb5f0a3b7ed07c840fef61d9022042cfaa569d7b1e626defeec4bcd86622a74b85fc5769b0e88911b8ec90eb50eb 304502206d1d10612a1abaf1355cc268141c6acde127ff63554fc51a028f629a1f0e94630221009c425ec114b9dae1a39e90733e503d5446faa557a23b5150b07465ce1b9cd408 3046022100e5a748814fe4b7542845a5d94b440d7a6468fac01e10c54cad7c97127c852c33022100dcf80ee0fbde76e07ff2f64904a7e20db0ffc881d7bfee9c1edfd0a016a7b871 304402202b1ed3e2baf432fcae349beced31f79af580f54b34e6b111bb621e9f95a854340220309fce95a7eef84379f3550d268b3a26235f9b48b63b4ff3c361f0dc832fdca2 3045022053b63f0165a8d6b222172d3748769181feaf8939468ef1a0e720bb532e5458c9022100f7db530545ce1f463e57af72963fdf4df171d34c1f313ff4ec456078a9eccd52 3046022100a6c05f3cedc026bc243e4ee1bbfb986c5e50cd7a35938cc7644a652e96e8b0ef022100bc68bf88dd70e346a6eb74bb096c033c0336e24b4c6f869602f2957d9cf82496 3046022100e108e832ae7cda40859a1d219cf3e28cfb6c280bea2ad27abc319f6bc6a9e3a9022100dbdb0e9b4704c19dbc0808152e6b595294ff95c6398f4b0cef5913e510bf2486 304402206afb7372f373891510187ff92244b8d655e9f672680bea5d511b8e4ec1bbc961022044fbd37c6e946ce873005a6cc74957d4fb086e6fb6925a1aff2b433850ea1579 30450220446955818a45cddc9d63507d4c2fbd97dae1581af004ce16695cf5986fa8c3d1022100bde901474487d827eae588ed75dd58bcdc46c8bc9307ea19acc1b551ca6beffa 304502201aa1c453518623634306ce397ad4b0c8c3110a9cc205bad6ddd586a1da7ce5fd022100eab556c11473290c4c1b151014024c5530df0baa3ac6a2aaef6721e6eb6888c9 30460221008d31db6087f141118446c17546e0b2cd9612ab547dcfc68bfb9a1e272107813b022100d9917dcf4aa460a2cfb081069f16d7c6bc6e02c42edf858f8cf4f809efa528d7 3044022010f09f82af245df6261053e875ddcd121b7a10e45367827f4272f9bc892f30e702202b467e3579d444a234a5cd4619b4ff5fdb19d28a95d71374abe6d37343994269 3046022100b1d0508dbbaec7ae6cc6ee63b33a6ebf5ff4f6fae6e1fb152eb756b4f9a49ac8022100fdb2f1d1a8ac17ea53f49557ad8836e974a42f2222f013649720ea650609aa0e 30450220240c85381745a411e287791818000774920ca3e13fb96f2abf3ae1b0c3a683e0022100945bc08db2016347505535a1985c6bd688ee1b3011314b525ca4687842ac3b40 3045022100814fbde44c7414961852e2795212b102badd09c094a93de59405bccbc06b0bc402204887952105f9b5fe33af47cb4c6d1a2278b8585ae5774ce56e8727e7a86864ce 30440220339ca7780fbdf26ccaacfde503a5db2f737204e765a7acf1452ef9c3f7856a8f02203fc43acacd436a838505ac616ace97db21b84aeb8c790572f4cbcdfd8ab00ed6 3044022007505886710083de7ba3a2b42f1c5ebd11f52c0e8d46d3f1b391e4b6d7e15d7602203a3a304c46fec1861de0c59048b6790842ac3e0182ebcc8d14dbfb70078a570a 304502204154ab63bfb28448e427e103cb3f22e9f5e55f85ae605992ebda2c7dd9ca232f022100e96a131dae17927afe0800d605773c120519ab96e6e72b0f81bc6e66c2e78648 304502200c25016ffcbcb42b9ef9e6477129b8542c125c2e0e23316b1af1363d50df6662022100877c665ded3bd327259888e58a1990c39c7c4d6daadcd054e6099a3f4b67b312 3044022022f9deae6c093ff50d4637d07b63d86b5872f8e11ecae8deef3020d257bc725702200fee2b0db5b8539e6d4d512b994d22537795fda19715c360dfe2277f4dd7416f 3046022100b2d185d5ed73a17475754547b2e8a27c69a53a13283389640d19daac724d5e4a022100c4124c16aa64b4666305993c7ee87b63b703217d8e8f9ae93ced6cccf35f9733 304502207fd8cd54d27082e724a70f0e1fa027e00f1e2d043c3101d395368c2a94d100a2022100b7d8fde6e34f86cb166b71eb6015a97e79b1760908e48d2e47c87393b6aae2fa 3045022100eab5f94945c09e20d864cea73db8111c206686fe91f1f2581a062157afedf24a022048d32ebb333716b6f5c4be8abf178e682e235fc97307d4a2a85af898c6c319e3 3044022011f62de0fd887ededa5498d06c2375f158684936864090db1d349f407401fc9302207a513d9034f85e020d53c8749e73f4c1494b13a0407d58f28e0dd3233fa0d40e 3044022060cb66b5bad56fc5a8a78bbc0459f20a34fe21ea3c38a611f39bfae466e564d6022066254102061e5f435ef9ac70c8fbc158c31161847f162cee16931373bff45ee0 3046022100eda3b8cf2eba17a8abf93f1ed7353b926d7d8682342d453bf78cea09835b30d0022100d943ef2b663b075b11f30a1fbd0cae94c5bcba9f89c986447e97ec04d4cea443 3045022059eb3d29337861d6da6432a086a48ad6d34c73d5f6f9e7aa2e8e298db723574d022100dea7e70b52a3ab9bb0dd41280b5f08f23f739ab20e957e3f2785fd966f4e1522 30450220694b556d011b5fe10d0ad0c3fd25787c5990ca3b377a452fbe44988619033165022100e96c5a433780a6f05e382313971a9a5068e526691307f203dd324f97c26924b7 3046022100857f9d7c7aea5eb2d8c0fee084c1a8ff03912ad38630c0531728805ec639d44f022100b72554e4fb0b078a572e5d58888e321a23d2dc4f224f44b66a30438a9a0832a8 3046022100ad025ceda292950404ad1f29575911b0a3116eaf4ca9bfdc74caadab3597f3b7022100d6026036249ba67025eda13e305a21126c24a1e7fdfac625d68e0ed5527d4ae0 3045022100db04f791108bdefc594149db09bd11575fa8945449d0ab891c07215dd5e08e890220098010c1b436f0f535203a59124351efa19f06f911be55f55e0a7cff48d0c6fc 304502201aad448fc01d2e20908e6d617f6446fd34d052ed399eb8293130e3c1ccde1693022100a1c0de73a8b7319f2c5ff6a17cafb77a1243282b74b9a31060245f83f13ee631 3045022100e22c71767910fc65ef929247cec5cb0c52f96633689809917c16f30efe7266450220277265c4ee58d3bb5c589495e8edebd049cd671e42bcc719a43276bd2bba2d7f 30450220039f5beebb7877de20a3c914062430d957b32c4c594799416a1d5ca173ec841f022100caf9eddc11df7205b71c5175e3df345ad97e5bb3af27059a82e16d4c31a4d78a 3046022100a6b79ef1cd3c92021219bab0916b5a62222c1927f282390ec2b9d970bd2eb513022100aa4d20de8d5febcb1a75988ccb3fcea20bab7bca50914c8044df7b4f7b1a2706 30460221009ab3f63d07aac98d345b47105867d2ecab81516ca9b4888aa8db0d0f0d263c58022100f70ce9f22935b8f0c3c1e9624c9677e308a2d385b802fed2f3368b37e2b49098 3045022100cd256a4e1aa476495cce228bfa005fa1f3bcf177edba47ad7cd9f8904c99dae3022063c7132cfd135445763be1d963b51011ed578ec6b0054f54ba36beceea557f1d 304502204e948718d5504cf71a77d46919b4f9e05bafde8dba408502346c8d86d915711b0221009c953e7c33d008866be1a874cecf853324566e2687e37de0fd7dd5cec318d3b1 3044022056c5ee02992f33b6f4e70fe180cd9bfe6291a57907075ba1f0eac2d4872df4d302204fb6c0ea07ea90f7e5c618be4dc1da58e7b6b20dadba628439583b7cf0f6d9b6 30440220219d5e14d57d2fdfd15284cf469cd5a59a049698447113fd8e104ad57f0cc459022038c6cae565c9c63cd1afba1ba604454e52381ae1445fbf67bc043ec8086651a1 304502203c56f8cc93bbd45bd8cca3f8f89ee8d00dea42a9586fa34171720e76ad1547e80221008c7e6723ba04f1f447e5d7afe794c08a7aea33e2c2fb4dd94b6e46a519d9d364 30450220239d727aff2222b78d8006d1ede5fe4930402794c307c9727fa1a227f77cbd6d022100a8eba7b89c29889f62db36fab10b1bac74c5a6ed07792d9048f0ae6a6983ef21 3045022100eb74b5fee25f8db1955648f7cb318ab898d7c7c0b246319289ffab4588f066da02205dc405cbef199e265ad49a5a525f4a30d976c329b126e1c8f2a6fd2d049b215c 3045022100b2411f6a14abc6f1518f3c30013d05b9403748c14c56c059051e0edf7436b2e502204ac3e1f6fa3417f7b2392e187db6ff181bd2838bfa8b834dde12d320045e4013 3046022100a1e1ced241c249b688f305857a4eee5111edec151043c201eb00bf57caacb98e022100ef01bee40e4acac1e2f8f76ba4a180a3723c978360e24383f5a49b6e7615e55a 304502204ca1d355e16e940d3b50eeb8d6ee00bd60c96dd0d9eddacd88eb12de0418b858022100fad746965b85ed05fc6db2a679a7ee7059651d22674ca187c9c24c45ea6d26f8 3046022100b22f50f0d295b53796c12ab5e2ff06151c1afa09f2805c1836b7bfbae6b740bc022100e345c904259c5585fd6e46a6d72b8f235a7acf7b97c3a215002081758a9620b9 3045022100e975906882ef3ab4909da5a48ec625fa1b9f14dd6c304f7c9305f518c7b6404602203a23ba2a6f33eb80b06793966eceb87784fbbed8a0a570b0887d2d107ace8c37 304502202e32e269d5567276be424969685acc5e23aedff8d097614557b471d141b31b7f022100bdba771b60c87e577e698e7879a3267bcef8d42286d24e6868bba56151b3584c 3045022100d7ca35d04044a963b6a33ad158cb86606d2d967eaead0d6fa2809dcc70aced0102202d8e603e5be79fe2e0d64fc70391dfa680271c9409b0387d4d0413d508e44508 3044022008c02d6c803e7dd1ebaf114aa899d620895c8b72d3752cd9271d1b73248c333402205e77221c3823563515a5a7829b2959b2930eb36144528d478e57d4bc57dfef57 30450221009cc6ac00d14d00c1186b4bff317714cdfc376d81d887607c0d57f82868f4d3ca0220740d11350d62d851d8f7b4a3df035d70b30e3a1e2ac4c8e69944aeafd3c7c97c 3044022071cc51d333927d829189e73238605337240e9a9a81d28103d49cdda2e096d587022022ff6ce990501897e9402412f900994c49539ada09d81df45c275e5e5c69cbb7 304502210080937b49f99ea68fdf79c35018dd83fbc1819f0f56d11c4f2f9bb7c2c5507982022078f8feed2e36ed9babd3e686aed503bdb3828b9fad07c8ec45c3e0202580ae2f 304402206c9a7dc57ac346012418e83578bf2cc2334ae76b06e2c601a530bd1e86e5d9c602206deadbf163845f4bd0d2bffdf57060d06406f37f88c4d4161ff707ab66f66b6f 3045022100ce4ed7e5e994d497de97aff8be4a88e7c06f1cf0953892591e09e7c5f84866c60220729ead7d45ea8c23c7c86d55ae5f7fde3904bdc6ebd974345abb0f29f9dd771e 30460221008807ce34cdcbcd55694e839b02676c8fcf87fa4fe82b6d651c9cd830a004fe7a022100ef5b3ebec0e33c1fc73beb625d55d2c45cd2a505541963de4c74ec61360cd4d3 3044022015a81ec7f954afd559c39ae981f9a8ef2a1b5cb9c32fe5b254046a35d2f3ab5402201bbf9dd4b805b5bd3115d41d1b5af392d2dbe75cbfc516e140480a20699e3b90 30440220306c5b86b470f81c7685a41fc81870a07da8d2e13a5481564a82ee6e226a36ac022061a09fb0bfa85dae5d06c294a794d6729f900032517889c76c291bdce917931e 3044022021f704dec3546723a75b095eba36c0a1dcda9589d3c4ac80d177e4a2150aac97022042c81334f0cd367fb2953c77047bd1059e32e11408dda63c6183541090f8174f 30460221009cfbae278b1cb48b7796c48a36aa95817cdb6dd7deb474751ba8fe1ceebb7f35022100e4b72e39f0b8a400319ca170c9df940026a8442889f7a755eb304034c4808775 3046022100a8e2e74f46dfb4c062f39f0f2c191973504192430b6f045d5508dfbad39aa19a022100c457d4985776f2909e40ce8ff255801545a7f4ad2c630fb096114ae62006b2fa 3045022100bd11cda5f758962f976745b7ddced745e344bbaa26b3e0017fda1dcf00577d14022037ca88f8f10b81fbfeb9bc65da48a63568b86551b9eda3771184cbc42c0037f0 3045022015f5cc45d705f02d871be050fac08f0105b33c61c8d7c44427296ff6808be2ed022100e46e2fe8b6c5ee51fb578cf31b8ff34d338860cd789a7a38b35cc4a79bd18ca5 30450220720ede24f5e7615403188639c13cae440b01b90661d52f2c0b31fa076d7663b60221008040b2fb08deae1575fc4abedf7cfd1aa6b27b73bb4ce8addc9c7f091bed6a13 3045022100ac61fee1582018b6a2df5c2d9ce3e7dcc808c95b590ee13db2d5bee2309e3bb302207e81ed997ff1ff7d3c8ad31e54ff0ea3905e6bdb7354f223f16831d3d6a7fdd7 3045022100efc4a6609fb1a1a06b4a29a12453cfd5abb8585775f6956c7d151fe549619cc002201d16872b36ef2c07eb09387088b44665977a62567b827219018458d491bd30ed 304402204d4098fa4f72b7e4759992bf8539c4b2dec5e4242ac6f88146b601abf6f1d46f022040e5cfee6b660114402fdc9521380539ab61f65a471526f98588b53fe02a79db 30460221009dd92fa6b4565336c909550f16235224e241af32d1a77924ea174d3178d51fb302210097a5aa051f84ae50f62bae10637f5c33138a2f7bbafb391b19240641bb82ee6c 30460221008d562f912fae1ff00ffe533743f0af1245b16f461e7ee2755a2d41ced23059d90221009e4475ded55d63e5bbf46268d39655cb56c18200ca367fef37cf20322aea9eda 3045022034b1ab8e93e274e193bfeb21e4e8a85cd1b8045ac93253b0b09b788f1dba332402210096a010f794dda0a8f5c211c1ca254f9e752b5b4811aacaece22ed7e69034b79e 304402200c66d342eaebac2697d82a8cf7827eb2bec387cba42cc11816ea2ccf419fac3a022028ceb4f8ea2ec7b1d11fec2e062794060293795434e35f84ca069aa8fb47094a 3046022100fca757cc3f617529175528644497ed5e47cacbec0a8d7bf88acd4025d219f13d022100b899b37f7f3dbd8753301b4fe8e68104889c841c2c2ef27494703d1c03cb8d79 3045022050ea9432c34fae81bbc8065d18b50762e5ec26a86877665f0dda0b6104894f66022100f3446817426b660644ed382a0910a99b282fd839dd165bbbcc0fa361d29553dc 30450221009e05819d98a37cfd51596f1517f701068fa3b15314252a760c5b5f9cdbd3044d022044aa5348ea5f51557dc350f8248e6b2049b629fe87563b252d1401e79a37ffd1 3046022100f330f38584859f7f4315df0c3a618f873636b2c90446398072a5b3eddea89c78022100afe2926267444bfd9be8ea7c76eae17d4a7b0829376befb5e7dced4afc78bc80 3046022100996ca0ef8a17890831010ae056f25518a1cb41f3bc0f7b95af0708a1b0fed174022100e9f43dfb0c618d847cb99e890daca362dc3fba920cf6b74445f455242a4ae241 3046022100c4fe6f159095c611e8eacf17c8dd2df6224ce257dec5a966bfef376c52c6170a022100d93f887320e24aec1ad0ee9937340b6692b8d88ed6f766b85a1a5376ae8b5de4 3046022100f6cedc792fbc71e1a1ad2dcacd3aba7319336be9e7a7e7f076fd9d138fc3c603022100fd03c22d2914810c56b46ed0c86f67f4d0fae5d2aad5ff586a25c6c07c76654a 3046022100d4b97191b98b1e444196015d3a941be6b8880f82500722d7ba1b378d572e6b60022100fb05bd7a3c780cdab7ed588421d0a7ae59bee2ca80a7447a968247201e84b0fc 304402206b37c4a95e8a1980f5ac80a6b32b9bc13f81b266fac9628ba0a4dc5d0cad936e0220313192e8557fc2087a504099eed4d6075cee7411f15500c07662243572e868e6 30450220384a6bf58f0cf2c21cb89c01604363d604247747e29758a6061661d1b9400c80022100ec86d4cc551f48e105bc6fe40dc8f2e0f3e3a8287b7044094161e995fd0683a7 3046022100b6d2d8e2c464dd1e9c198982e94bbd0b903a84751646a8fa6b4ef01dab18c557022100b50cee3d90f8c95c3be080f12818e30b8b329c1d66afc70803fcf32522d59e54 3045022100fca377a1cd5e4d5bb055e11eb21f57bcf398d864f3f0d96ce6fc8557f4074ad10220141f8529eabed3fe9c4e1e2eaf6cc6dec51e78d8a284d83d93aa6798b6cb2c7b 3045022100e9882dbc67b882c49278e27619ddde397d89527ee1267a2dd29467b6fc4140f5022071f3a734445f9b938c05717914c181ce20081fd5d36278aa6fff44ddeef5fc42 304502204c0ef186a86c929345801067fcf219d986e2c4892ade42f25f08d2e42da6085b022100d9eca52ea247fab4b699240175e25efc75f0ac186ad577ff515c23eae25c6e34 3045022100e28515d5b9dfc0675ef1984b6849267c82af78b89fdde6a9fce93f51dc316b9002204f749d2e2e292528e6a55ade100df772ff13fa6334664060accae0d6fef91046 3045022100d5ba10a2cbd884d274f5f31af88cf1cec9dbb1f3dc9fc8f7a653123d5829313502204239cfa5d4baf1f6132b11c701dbc81c03a41208895f0be79a8d980d9590d12d 3045022100807bc46d20faa1a318e15f8071f94e4faa6d4818efbd439e38385984ac3dc72d02203e15b380ee281acb0424e33c0b2dc0101d4fd7da6e49186984aaf2a7358f07a3 3046022100ff936fc8ff24250fc52480ad67f53eca7943d293f7fc8c7098b7fcbb9953317b022100daf3a41cd2e206ee4826187927f43059c54e2cd290f91b685a9df57fef445186 304502206bc78327fdb708c765b37a8b9abfc49fc0aaa0328183a63cd9b3714a56838eee022100a442b54494e35ab685c7a7b4b386a77e7b68780275a14819c3c6d52c42ca60da 3045022100c5f1849f49cc3ea44c837219cc705f9cc9e3703b7470613f977b06a087201258022075f7d09a96017c4ef089baa17c2ad70c536536bc106f5bf004d05b626eb049bc 3045022100b9fa03769f31f702b94e1adf3035b3931991b4d2220517f877eb5f896c35fb3b02202f2adb3fc97bd71f9c8ea6b40fc477aa8324eb8fea59b2230630de641242413a 3046022100a6be333c5e02afe5ba4d2ae3df006b2ac6172fb9b695f448a1e373633ff4cbe3022100c8d57d559700f9afe8042026eb52f5a3b91d7a04a0922d41886aefabd193396a 304502201e171a28aac07bdd37dc82fc86c2a027ad37a21c82f8bd75cb854c56396b4f1e02210095316fab50f93160c7a8dad01112f4a6836c2e99ee28c5c6457c1ac453fa838f 304402203d9ae80351f6809a4b998cde1b891a2f7951075107e3dbeee0caf9ab154c595102207ed46c0ef9ac7a809476a51f60e165c482ec9e44f4f634ab7e51e426391b042f 3045022100917f7f56a5a45efcd4ed57e73a8868795e3112d5d9ba3f859ac8f389a4693ac102206897e9e61c2edd10b81aebcfdc6460ac374ec5ee28747337aa53b1bae1ec2f0d 30450221009f8ddc6b5b7c196f296c16a2d77b03fc9b28b69f8ac877268f7664d18946856b02200602a39be9045ca6a2986c399b3d678c2a0375e2cfe3830c9d95c39986afffc8 304402203c0c487a04b54edf0770b35120e8c378f40e155ceac17cff21ecea7fc5231f5d022063a8b95e991c1cd4b262a8ea0c228717750bb7c53ef30686fecb15a4d57c2c39 3044022029e3ee11c1cc6c4d1beaad2aa742c373a68c1918b18e06f34a1dc74661837b8c02205b07649be8f527387b4a23bad6658c0c052ed2b1310f9b3f82953ffc96cb8cc0 3045022100e9fe0293bd8edd063f254f21d7f762d2195b7e4d2ac81ddfbcead776fe3a6ae6022047e2dfbc7ea18bf7f6f0d7f5a34971a9171f7c09d8cbc58d2af43339aecf9b4a 3045022100a2c4f71ed016d4f021ca0b68e87080cb815bb6250366db5275d751120cb6860602201dacdc880b50e450aec9cc77908e0499afbbf8bb83b0988dafbc449d9703feeb 3046022100e1051223991c2312ad01d07d75d3b6fc87a7995edce476d828a4a4e87dc1ba49022100a1cf40ffb31e4c8f4a113a3bd8a05cb9dcadad762238df09b5323d47b26f011c 30460221008f7efdb019662c6a0e5f63896b42d6f3212e00e2a42f8e712e80c13881ecaa7e022100a80ee18343687d089c0583d70618278c991711ac8fb1c9b7057175bcb281a2f9 3046022100d9b103eaebb51c7d51a120017da36f242d6e79ed2f6cfee3693b4f36d9157ec2022100cf7202affaa4044879d48cf93d1c0c1f10cb799e571648687343556c19f63f66 3046022100a6a6d1d8efcc83f46281589a10a71e7a4cafca557407dcd5714a89766921532f022100a2da1d667def8565b7505833e8bf97c6de2509b0988ce8f29edb86e6364fa507 3046022100b0f09d667d7bba702814b53626cf6168da250a52287c9631a76fdd27bfa67164022100c384b6957a8a1117398796eaf5c2c3f863308a48957e176a50cca175667031f6 304402204e3c0f584ca7c2b20e7d47443acc742ef3674fe2ef1f0ef1115f6f101983a7ac02207d5321cac3a84f95ccf4608e2b2715fa5ed538a354d6dc4ca42278ab490e2772 3046022100cc7e9e0e8cbe707a30f9d05fa6f59e1a7684f72f21c48ccb51e4692be2f4b8b0022100d7abcbd2f3b49ddb411604d4b6f85240088034f16b51ce8a40d2840ce706767c 3045022044fe0ca4170b113cdde580c5e617e1a61a39ba15945861cd9a52534bf3a74e8b022100a2a3e9e8bdecdb765b06909a5c7ac173805812255a1d8927144d0e27e2bd64d6 30440220522313b334f909696f0a566ba2a23e860bfca1338310dbe7c9f5771d6b34cbc8022057adf77ebea0a35028f729b43e29d5277697c2e14fcd83fa9a24d097ba6ac7b5 3046022100fd45b6619733c76c5649b36ee5a34fcd5e4e62177b36eec3fb77cdf3521fadf5022100e12e1a3d10a9c44b60349ae7b9a7dac1703d86ccd757f66425162493a104d675 3046022100b5ac048b2e2a5a366d3310292577d27a8f7833e2972e163c5f21fb49590ffb68022100ca912b68148a940f013c24b8d16bd6a550a370d62a188562ee424cb425c5550c 304502210096ab2a1c0db466b928c105992995728d5f1682377147e712204630bfb9a57ecd022035215cd1db94c4e83f5b1dbafd09168a40e6745c13c48d677653ae08fac63db0 304502200295181d573943c36ffc022e88366c6ca660eb885b2336e04cca550b0e5fcaf5022100fce97f4afbddc792a071aa89579ff5298ed6966fcd8c3f37d9465c82fb35faf2 304502200f2c17b1e2670c1e741de1a3171d2dcaa738ef65bc1f7728563809c4f12dce2502210083ff616dd18e1415fa6852e4c26f8416018580037fca84223d5609e44134c45f 3046022100cfa5af6ab7d8c9f169e8d6c9d85c03a15065315ea02e6d6fd6e09ac8cd5cce43022100d0c0fd2269267b5fc045c45b5d14ff030b2323f9a61e44709783b19f21d79509 3045022100e0b2988df4d1cc7b4e083f4f1ec221c8427022e516bdd10ee6bed306b3024278022044c0705e81a8ce4b4fd4c0257ae85831c88e711804a4175572e6f8d0926dd0aa 304502204bdb0409d4faff9956494a685a60bd7a6ad18e0ffaa67400776c1d6df3af6515022100947d3cf3e4aa87103ba88e125bdbcbe5ca0f4412bd99a074d074817c50a92c19 304502204d08dd7dbd482dffb29c79214faac5f4b52e71fd32ce67b5668dfcf3130434df022100eb425835fcea6ea70906164175a3afbc38b8db4f5f651e7dc0cf339cda5044a8 30460221008259a84fadf704eb0d85faf1a3f895ce5f768a9427a8ab47886800e126f085d7022100a47fb7b911f6a7fc17b4bbd03ac0224938aa7ae7c9d1c6e2f5eca6cc9df2503c 3045022100c7a99610d2f5566021d5844855ddea554807ade8161cc607c59de460f1629b17022049f5f5824b51a4b5b686e3cf6fd7d66ce88ef5030be7628f72dc3fe358fa9f48 304502202c2ba1b57598829eaa798f073c2c782993be8fa07a8270d1040a060bc159c420022100882d81a9fe2dadbc60022efbfae872cc50f7b2ad7badeb2db0e407173d6be2eb 3045022100a115e83812201f5b44d920cce4c5cdbecde8ef11ee5283806fed2a39bae444bc0220793d13c9289ef40e0399225f68a61096484b65446be8eed245f577b23d13ee23 30460221009c99b5bddf41c361b081ed69afe460148090a36d34c684d7936c131b594e0e23022100ca09c7999da80ef4dab9430632f08792d7020c57c7c952f8766aafefb540628f 3045022100d1be43cdcae24bdc4e04e8c0c689516c9ff40a04bbf4b2b823acff35661ae38c022020d462e6e853fd92525cbeeba78019cf22919859231284ffbb5c71a2a4e1d54f 304502210094487e668f57bdd5da35be8c29d8e3f8e4ed06f4f5b668f874a7fa62bc628d86022057ac6b649badbc86db194d1210a76f6a50316056b6c88e8342ba3adcd4fb66ab 3045022100f20378e816d651d1ed224f6088371afd4e4696239ed699e8b43df1656028d1a102207997a39f71ce2ae0f104693ad0428352f178c0d647988e85b77a8ae1a76ddec8 3045022069d7baec855fb9deb7e6c54b05fd7aa002b3caef77cba4edddcdea3346df7015022100b8d74904957dd7ee315fd2bbba8695576ac1f8d9fd09e56f1b174555e75240c9 3045022001b2fedfee3ff47b3676ea06179af6a61990b678ec9f1372ab1d5fca7b93bb13022100dfd6dceaa7c083922824ed2f373675ec86f7a2d27411cd88ba9502d1556a7624 304602210094374e45d8f8d77aff64184d3313055452c953752b3152e1b0f5e1764773c94c022100d3579f5b1f1e2365fcc39f0ba837e8937b7ff85de06649016c2456b13f346b91 304402200aae8cee0bcca4d41cc2105d6ed0961be1480c3e9c8189ef0914b367faf3c92d02204ad7b46906b0f456778acff2b1c7246f710f3034920c8c2b952aacb12dfac223 3046022100e9d97cb2fbbaf93e8ea8fe7aa0b88ec6ec3c7772e678c13963cb18587299e3dc022100ddf440025ec98114a0d0355bd11b28719ae140c7eb5cdd9a8d2b1f025f4252ac 3045022100f72b3e01519a273bc7bdc28affc1c3ecb55d875bae6dbcfa9dbc12e89dd9cdd202200dd4b34655b7f698764e288107acf78fdf15fd8c55d8afa3aa254c8c8bbab7c7 3045022100abea0db95a88c782974e9907ed76b764acc142376afba55773890534ea881a15022045a536a293b985a4375aea099b120b9442ca921159f2c3ee519da0cb7ae6f7ee 30450221009383e210cf0ac588ecc66bc5491b99c38f073e68a63397fed5a4211c285fe69e022062ef5b107bd4efd56b356e21331264c6ef13af29856294569dd9a6614b38d14c 304502201076acdbb13549d97c12f73a90ebd6d40a17c2396899bad73733faa8b589f2ec02210087b18a0548f824b2838dcfb77b0563ba7ce920ceb5942158fdaf6255144a31ca 30450220734fcf9189070647d957ad2a5e1aa315c7fe40784b670d2c03ca6d62631d8c7202210098de6c97dda296394bf33987ec3d91cb555e4b66aead00c5b09883a4ef95ad6d 3046022100856bc734fcd528337e074fa6a5bca6cc6c6153cc0c5cd17bf66fb209854029d6022100a05abc70738b617787e790b192fb83430e8d5a49245f241b6e294086a8a9a03c 304502201054280e3990bd12f6aeae4a00c9ee0655d20d5115272bf6ac501a7cc2272f2e0221008535b85d0dd869d07716070c581a036afc4406ec4cc70826e37979be81259342 3045022100b6a1a5d8f4d450551630a78b4dc2200a4c1af5a677dfe46bb53bdc357d64f2eb02202f8f67c563dfb08fffc8654e1404a28b057d88e2e4395551c168c10ac735b02c 304502200f77593e933768a01b6e0d02a5ca1fbd64c057d5ccacb9cee4d517f7d67deb5d022100e7352dfd6fe31691c5d908703858d308821983a131535ab68a951a5708652cca 3045022065729a924c43370f288635534dafe3747b39287e6c63c2dcf7ac287b45b28d57022100cd3acec6480b88776bf4053b90903ac56dc430d6fcbb3afa0e688be320481b15 304602210084d88e72fe571dadc863a38d887855d33bbfd4d3d2795877c711fdb95470afbb022100e2b0941890513a559f2b88f6fc0a2a5b458fedaff54268822fa8084bd5bd1364 3045022100f3bbe9fcec4e6cfe8dd8f3f7264335c2d761207cb6a926d14630172b8c50acab02201bd2cd4788dd49f68ba8914d372586e38bf00f4289fd8d50e8172772f51eab67 3045022100b90065c4d67b42f21d5cd0e0b85da2c019e3a6dd24c42b30bbfa77b9154c39d102202fb6e04e99066e9030673d2206df328d00cd6aea00250924d28795f0f0281727 3046022100f27a5718419bfd696c0f3776ad7ce8872183902ba665afaaecfaf417814afd38022100a970448456a9a2fb28a210e87d900bdaa35dd72d61d1e4931eb1549d44b2a148 3045022064adb88d008cd12a7aedbce95ef0dfa96ff78593546213430191dcb17eaad339022100d92545c5f9c81dc7adaef07d443b0679d2c3673b197a8d313a898ca9fa8779ff 3045022100840d95dabb6a3456767c83aa237b3a985a7b9578b1ee407bc320170553328af50220055859a4e3a4567387cb1f54208ad96a825b201f276ecc1401851bda0d02e802 3045022032eebb199aac6b27e3027956cf9cf16ec953d4b4eb05837d5f79a47fed804f35022100ce72e61a26b3fab126695ae9d3c7e0148c1ce9750e65fafdc798d73d1d9d7cc6 3046022100d42a98d2e667c5648d8ff87559153115b102e199a684d2877d6fefe1f8b473c4022100f05abdb2a86e949cd0a8c94c2d2d2d70c7113560e919f4430bf72ad08bc6aa3b 30450220131c31b2ac2c5623ff2e445cbfe1a33b1ab0afa4fa5308b5ad55cfd1d471f41c022100e845c3afce22a33bbe8443cf6d5b53d3b0c879ba11054eac426c349179ecc4f4 304502202234f3c5eb9abe6881d8f280e96e65404a7de9909d63e7d173e6d8fd255dfe1e022100b4dc88b23c7f52c5e35916609fedae91763aa51ee1f60314f13f427f75cd5e07 304502207abbad49d162644b57726094e503b689db516dd9950a30a391b8965b91e8ec38022100d2dc6720e7d82f144e24aae93007da95d18dedcb05522c9f9387c731250684ca 3046022100f95e6dd2d949a8c4fa9a8f27d7c98ff74e6d111b527d18de887786ea72cf33c4022100dbcae9ff0a480d0539d54759f538bcd99804c9d4a5d66f9a6f59d563e15d3ef8 3044022077de7c34e86f848c67e316a00a8d4b421a4bf71446e5ad9d62c4d354e8ba329d022010b3677955d4354ff7f273d3d0dec6b50c3eb9a23ab29caa2174623b8103e5cc 3044022045a4d5bc21f71997fbdf33797d0660fa6a3789fd22dfc1861bf31c456f29054102204447487c4810fb5cfaa7a5f9dc74b9acfe6a5b8822cad2521b79be51c59b7b8d 30460221008029fe1e8ee1970b37e96d0793dbb2877de81e1aa89caaf960c94a74609889d6022100ac11e16718fceb87eedd42c6b0d8ab3f6cc65fa18a34c786563dcbae6f267964 304502204cba1ec50c2a5619f7294339de59efe161a9ab07e85cd8630cc2941c1de5e9d1022100a8d91c13d9af760bb761782c2350519c4a338f529c51b8d745fa919ef08e36a9 3046022100804af0c8a79e76ab76f901a840edaae4c24d75abe4360f7c7abdc997ab75ff84022100e3e95d8411671ec5b591ba4d7324d869978006fed6a277d1632f8309506f467e 30440220623763950d0406f4e01c5657927d563389dc8b744e218c2efcabf77b004e265002203ce62a6fc8d2bd4f7e02e8823684d8512a00014a98ef4035cfd007195a7ec57b 3045022100b018538f1ee192c445111655cb10f9e0d8974aed8e3af9015053fdd27264730802202452e3ae9d076c83cc38474b7f0feb6b8b53df019a54bd31cd090291dec501b4 304502207e31accac01bbad02a22b7c27976e0e8819c608270adb5a88db0406584dbb580022100fb36c62a4eaafa20c3230f0ef3fe8419c89f56027696959d938467068607e27a 3046022100829973296257c089dfc4b3c6067a1f6c2f8978c392749c6f70b37eca64950edf022100f202ac3f20a495554aeea304843bc97264662e8eb0943cda919005a86d15ebb6 3045022100c797e3cb9a7fed80b5c49005e8722b3d9fbaa39c5c03e6543d600c73b333ad4802201872bf428c1d8a3777759f8d4bc8208cac35849131821e234679cc895feba8ef 3044022006f79eb18bbf87c726d0715e86f59dd0ce26c54971983ac37c80b0205e42e73602200d1204cc7bf50d4cf83c08f956cda9a2647dfa1c62d871ec77612c4794711b38 3045022100f0bb7d477fcc61b2db749e3fdb88655922a2b2c501317c7fc4a359dbf0bf4b5a022028468c1aa71f614859ff04c5306726309efb1f32863ef81158ffe6e53c33805a 304502203acdf40eda88fde9094693803e8e11c0e5a3550f6d327b43c91a817486e636ce022100df676e063e665048cf5710fda72ab9d7dfced734183c9af16122c6cce63e3947 304502205169647b286353b1c4c5f1600f2a73839caad08c0e151f18e9f23f21f78f3c3302210083fd72ff1c33ed0136335ce84dc80c86800ccfcfde67c8ab080aedf4df649e8a 304402202c9b03f7f5f79d249a473d38281d10bcc266327ccaf75bf6723813e9527cc98002203bcdee887133882e050aff1ebb37889f606d5a7dd9e8a35bd68bf4f082adc1ca 30450221009bf3f2c5d8cd7873ed548a8e7ac61d984b441ff3402a1d053afb477d135a5d3a02207d30b9d2cb7c962fa8dee1cb329b69c666450a8abb033704276584d4901ede16 3045022100f36bcf86b45e7d1b17686bd32cb3ea1387e858d28d7d5fe8c67ee860a1bf88770220674c49ab273328768b95f81856ad0fbab6d2a8442b82b1b91eb802d62e4da402 3046022100c140d16f2c7f28a126a3e15fcb4bab419e763f9c21f6a89f7ee1271688250927022100e532805c80f50cc702cfa01a216688b71bd587d3a5bd8d33548d947136ff2a1e 3046022100bacdd25962eb0afa0828f393aafb074598a399ee068b8e28b9dd7d156acfad0a022100e07dffbdffde074d2ee928ca0e44e0c290245dc2995a05c4a15971b2f28b659c 3046022100d909443b643be63590d3677828872bde3597c5f905413a5818724586c2e1d4af022100be24884b5034b34947b55f9903aad77aba26d9ef631571992a41381a60533901 304502207b21f458a48e5390c93b4c7e4ef967e1b1d3a66fb3f9abf4cdf202df6bb619af0221009a3881e4729e31f99e07520c214070bdce05da006e3e84e9e6c536c50dca5e69 3045022100d80f743eac1c3774849ec3b726bb79b6c12c5c06b85b78a21a49f1d047e63c9d022043c6b8acac2b5c45aea1a3300dccbd3d2c5a38863b2d901ba225a437f2f73e71 3046022100eb37c2b774230921002954f8c2277769ce07f71f915be73bfb24e39415f9b63202210090226c2471a11018d4dd1e7027d44f52dc2d8a5f73966f4be2cd7be765cbcf68 3045022078af78abb3d0c74bcd6a31698e3d26f916a522af51ec7006133ead111a949c91022100b4ce1446d012ed8bf514af28082e2224bd70f9e451fb2e4bc53424959095e14b 30440220214a6aafbfddc1f6a44259745959f2021a9972acf4a6d25871117877d637e5f90220079747eac8feba596e5dac153f6985488df1803738bad24cd012078022f2e97d 30440220049b280a5fc1e79fe2ecccd886033b90970c12cf8a37e59c1818a6aaff48401b02204aec61da2aa228423817aaa2fd24cb027394204416ea7539bdd99931059af0bd 304502204ecb5ca1416e6f6b28b235b4c847e5a983e4138022f2672062d3cd6a6fd3e73f0221008db00593981fcc61f42f79e95fca656fcc621696e203c5130aa50b02fe57e98a 3045022100815b479d5135e736f77a3ac03f25da7c451f52b55b87f034e8725086632675d502200f310ef5afa46ae5043923fbfe6ee0d6d4e960f644191fbce30ecf6c5516396a 3045022100b05037f495723f8434672cc1bc87dd2c493c23521322c2416c2b01704192378302203cdbc4aaa227b5e627db53574dad12ef4dd3f57622aff773c01dee9785526eec 304502210082cb69cb52fc6aca2b74492eba916f895af731b897a34b65ad075582e97bd11d022071f49e6e433791fac19f25d27568eed1929cdb24750b7ad65c265f4d5a439c9d 3045022100e42ce467b895c09edca3816b71490781e5f1f25ebe94831f3ec34be570968a94022028c47a4a184d63b3da5fe38f1bac56ed39ed04ff65bae9b1bbca71afa61501dd 3045022067ce36cb6c7706d06a357ed8a2ca8566a733ee9ab057d89550eaf79eb1463556022100c73f61f4837e8f46c333ebe89e54575fdbb7424f3d64718b04d85a3ac915da03 304402203432be662bbeb12e32cb96c7b963f4f4544850e8a8f9f742322549f20098e6c2022022f6be8dbcf9b03144749d3fb38ccd266f05aac83d84fedaf27d9ba3e9fd2327 3046022100ab9e436f360696255c2d54e2fd8845974d43888a9e8d01b36941d35af934d510022100b535853a35614aed3759734381f428672f87f0d6fda6cf9d624d099ec83f49fd 3046022100e3800e9fba61a8c3fa1b5f3c91d3c2bb85083a2a5c25e992762c3f3a9dd23064022100c3f736f68d30ccd057312a8c266c2666531f6b6aa7f29d8c96c1b5c872794776 3045022100cc352a24efeb1d4b9253c4b3320f3352caf609cb0b2fb19408b44d65d74df0ff0220200421a61919f52a1fc92a5237f596866babdf52836880f898e477e8fe79384f 3044022079b0a5f3e1ff8a9aa1eaac5e8cf8ed640b15bc814d1a09cf33d130b68af803550220407c78e6c64fe6fe59bfdc65caf77917013248d2651c4184c1e56d82663544db 3045022100fa53b95b28963150677ffd69decd56f9b13fe68aad24b405a67c2cf82359e55b022009014fffd7fe2900857b9bedb0f9e8ef3a2cd4104e773a80740185a410fce45f 30450220289d4e31ab98877d29833b6ce3fd74b6fdcf1582bb913f003b763d6f89f26933022100e6489f1da484b4c338fda8cd79eaa937a71df1c1f49039db98cb98c6120598a4 30430220552403d59145dfb9776e5da6ff51e7b0347d50f1dc31abf8a310bbbb8bb31a9a021f390647bb1cd97b85d6a112b147c720b748d4165393cf41bb3ae4d85c3ffb59 30450220577093033064f4373836313491745a9503f12b49b38486a1d9fc4bae585f540f022100dede5ed342af66fd92838e0d4bfd1f198c14eb9d0a5f495ce80b0e2617784637 30450221009748fc5ee1898812423a27bd2ec66acaec5c7f59941c7d8e3dff986909d20be70220259ca29ed3faab23b19264e01827b354a596208ea9fc82374ab41dda59799eb7 3046022100b0bdc51aba5047a25fb4d80432035e9efc91fd6cd9bd57a035003b7decf0e062022100c0a376e14c9dcff183cc0415de8e0bf47993b3ccb71178f3526a7ef47a78d144 3044022011facecebcddfae9fd595b09e57bbc12068a3477a6bb7d94d11423cbf1f785fa022069488234b2722be5376bfee17819fd5d2176c266854565d600f6e8bcd2b51be2 3045022036cdb2f34a161d7bc5f5506ee863b0a4bac1457b16a1004d070b2e63f0dd09a4022100d096ff65b4b4fd95daf6b6220f4c1189017070c39f680356ce57529cb3063fdc 3044022024e1d1091657525a4e487f84df4d8ab464cbb7036c6c1fe32dcc9dcae8db153602202d2550a64728ee288f00c951afa6cadd656f562459f489ece80eb5bc68624dbc 3045022100ae071f1e8e1db8e069faff1d1e99134aeeac47228bf6067ef4059ba2f3787888022058f640129f77b45b7b23a1ba14ddbaadd60942c7f9d417600b21049ae58f2078 3044022010403f78fa96c0c623f9c7e717b0828d0e8ca3828097b46583eb889fabe55c59022050844cabea01d880454f5cac2cf045dde06cc1192de3903178561c1d031a1b4b 304402205a133d18566222fa5368d627160b80dcd78f3211ff6012c0bc5f3d4032133f6602206cc90cd800494e2d8c8046e582cbe2b565ff20beadc534dd6ca5d75682c7d5bf 304502203d8e1737cdb0b023fd9ee5e55c3d4a172428ea0203455625f2aaa7ce47dd73710221009fcbe209e5e7f88d5ab8fd39ea8b27c1854597e389527d449dd7ff5829feaa12 3045022075ff180190369dd082ab6dce2588c21726a109c006dbb6ede0cfc70abac798a7022100c15f9c0bdb9bbb70c80e5bff01c0530f60d82063862f0bcd504f47589d86ba38 3046022100c0b247c24d213761be34e016bdf343a76e26a7bf960f57e1d3ecf087d90445d9022100f0fd5fa61148618bcca0696aeaa2110f494c193959f796c150c39b8dd817e915 3046022100a7e0321689bf1c56efffc968d90e1f865ed0702ade370d57a7d1918dffd76b1a022100d73cceb0012ca21a4ba5e7b08534703b604a83e87526326cf10548a8b38f30c9 304502206043015782bc0ac48b30ad26e64862621ccc5999a07c4a818bc16a478c9a8f56022100c3ca038af58be403f76e8c7daa2e2697bb25e3644f0d774acd25927af0f53925 3045022053659bc3283dd1f2570d0eac15c795493992665990d26478c1d5f8502f10354e02210095d84c5f2b53b135ff264bed8093928a77879ec865c65f780f74e375939707a4 3044022037716fc23e5b55358eca382ca55ac989e4e29e454838d4302b07edc65ef2465202206afcdb7fd1b3212ac48a6c09365d397fa0bc5922f2be9761a43ba99ae67489a0 304502202d57c4f7c623ac1f4943a8ab3bea83b785d02d1b97592a0246c0d7c2bc3d2932022100c5c12293f3e51e9650e3bd2ff279911d0502d6588c678a75ef9880a5ea9ace2c 304502201b2cd54f1b9842679900e627e38d4fe6de2da40672eb8a42e94ecfb16810cd26022100fe86d2ac475f9bc9cafd079286c504f5ab56032a5b5951df6bf2d45e36a69c94 3045022064fa7b59908046c8f701df967fbc50c6304d6490db769f9bd33b96dc09916b96022100deb22dc33c413729d3a2b091eaff477b8e429472b5a14addaeff68cd281b3847 3044022007b9fbde0f89b8a4a5e2880c1500b51b939b75c5875a6b22fb1bc5f7b1a9bd770220648791447b7038a3270b496a2f7d9cc83148c8f42986cd12679440610e8b3781 30440220698583d9d9021daec193daaa539dbdf920b6093628f612d23cb9f70300c071a602205c7f544aaa126150c7a13bc29fcf1f5af8a106280e4c3b5de7f8298510e573a2 3046022100caa8f159b3e3c35e154871657dbaa74ba9d8d91461737cb2d4a0c549485b531d022100da62ee8efc0b425e154d6177adbfb77948c2d9a7ec1d984a55d8f27062c84425 30460221009fc8d0f22a39b02c5ddc174685f865819e0fd636964753483e69fd25ae28eeeb022100f33ca1f2a4661141cbc935130927f6770591bd0a250475bbff86c5aee2d5fbfd 304402205567b4ffdecadf620b9d05ad7c63f98724d00043358804a40d5eeda35c4155fd02207e88ca272b4b661e755a5d9d45216830ca7dbb8e3afd3c973561f4e699b41d44 304502207393aedc78e0add4b9f0266546f221969aba13e00c9667dbbd4f23f0288f1be7022100934567f123f9a857f788f9415cfb6a1eeba9a728aebc3700a39bf4ef81c89a10 3045022046f1c8716cc72a0c9256e6b03698504dbc28c366fdfb625e85a00c7ae7d6cd18022100e5f2e3d7289a6b2641c1c51d8b0c3b87c71a24df35cc8978776202eea144a3f4 304502200dda9cd3190ba3d1f45ee99857a2985b12f8ea5e1dba0a2ad17adbe2cde9f7bb022100a127f2c6da3a2dbfd74b78c765eeae526a9af7fb4a891fd5cde2e610c9d553df 304502205fc9923f271a65fcb21827096aef18d34623ac6fa00285d2d6453159b7ddf20e022100ce3c1796f043f5a1ca45c4ff5e7bbb996cddb3034ce6fd1eb6454f6f0e87a245 30440220255dc32a7c69acea378134cca3ce1bc4d3d52909b72c1ca4ac5a46f09451f5fe02205ee6aa510497def9795d67b39689f9e1bfbae353e6f0fbd84c6b03a8c123c481 3046022100b5486b0cedd8c1c06b69713df684dad1d81fba81f63820525173e355854e00fd022100e8cf7cfe787535f7e6cee36efc6e4919b13c2450b3db8aa01c7efe6cb89d6f8f 3046022100e8b8be5c57f9fc0ab3d9591da5853f0b6557c7deae333ad1d3eda769602bb503022100ee97615619138576df2518ec4d87ed71f12e15bee3c4339f1843396b26f800b7 3046022100ae30a8d4b606b6b8527f1297bfab5c83b966b40ac2a7073ba939310783801d98022100a0243f51b55e25ae8ba1fa24da338ba7d9e53c59328a8b0b8378e126df13458a 3044022063232f628115f9032a02f9b543437df3ee03ec1dbed4589a4bcb9615d5c96c92022034400a5403a4b35228064bf866c07ec2633e16ebb00b49102c6c97a4e45ad575 3046022100f29af04fa93e6af4f59f76d838f89864d9ccbccf5051311d24e2802cb9d4bc40022100d0909068604f238ad4d748ca924a6ea69bc2085a777a8bcd2d9cf6782cc0a9c3 304402206953483f0df9099a5e25fabe5025385bc0e6f79392a137fcd990f95176e7948002207e5ccd03360c65d8a28230dd9a273fb0038638a67c2f63a1f32216b40cc2603a 3045022100fa87cb35e7f1e9d1103e49de8b38ec9f178050e4e3ee449ced3610b208c6c4650220378afb40dbeaa60607ded8de4a1456d50be00d9be26ab4e040284794ae996b27 304402206411bb543e1365fbd806be04a9590be9e5e42e51b0167c3234de9f02b8a555e90220102abeb94b041c128396daacc9ec22f7ff352d3a7abaf5a26bd658451d61663a 304402200ff54ae835dd869853c418c4bb236392f70a8a3266ee423099a4a7c144f4bbbb0220480f372bf82acfafdc26505cd3db6c634695d4da277ac2138b293ba09d5786ee 304502205c7baa9014dd055296e0ab7c3ee03267b9623790c380856ea3dcbe8c7928d744022100fc2c9324ee6d14718ae2d35f589c5bf2bf932e007e91ed17d2a15057c2cd834c 3045022100d965b2de28cf3669cb33a989b65c20bc3b253daed298ca5e010a327cc01013c50220595cb95ded128a08b48d10174530c8f22388b2cf94a2c26822bf4ac62f874109 30450220220ea249eb90649d0de8bf354508465dfcfe9e8067b86bf78668d6c7b1449961022100d38236f53164c69377a0468a8c44c5aafbce6da637a1586447d312f89321e9c3 304502204a42128e68021542dbd171394e9eebd5e51e8ec066e499cf1b7674afc5b0bc4a022100ea36fb903e1bcea72616379b6b3914d0d0be05cd4625d3c909031b41cf711de6 304502202cfdd279a4ce912c65568aa97efe53a429e951770533ffb8ebc9b2bca764ced8022100ed62c48cfa0b1262eb80e27f04b8e00d58090d29dd8eeafd2fcc70dc0d691be5 3046022100cf7149bddd6906de9bd939539ae6b9a36d9b001faf6efaceff039f0d186ca8cf022100a75d546ade9e1f856d12de24a904846c8a85f78dcf04941d41cb94480b4d7bc3 3045022100b114991ceb942223fea3f38ef6fb8ddcde645dffc7bb622513c3d587bfb872c00220369db2422a65c29b5120867170cd5be3fa2f26a76907f9f37ecf2a2d7a454eff 3046022100af77201d241651941a8861685beb0582366fb0984a79748b668a000d155ed81902210081a384d539333a28f64cb84121cfe04d1c24196c76ad050574a3e4b675dd9e98 30440220205ea443b1e00ac1b0a15571ca6481f3114689885ab77a78d2b621db735e9fa00220740209c5a588b256e9fb292086d234fd0e0acbae128f6f67304af5191e861d35 30460221008b60b3e8360deb735a533f2f828d548113652d1f21714e82aa93f4bd857da637022100e76daf17655ab7004fe0980b7c0e290cdaa267594de4343776b7b7db26f67bbe 3045022100f2a74cebd735ffe64de0cd86ea25ca9f363d70678ef3a941e5d319ad90e244b2022072859fb67972b0b478e748842e3ae48dddabb7d207611c3c4988d2f43342a497 3045022100f4c7b8cc7cfe15b5d1f15f934d2d2c03d17ac6e673be186e1a9415afd0e3105902201596a2cdd8aae046c3a0b58b7a55eb2130301adb3d13084e8c269b504bb012c8 304502207e962a46821c76d59ac75e1cdcf44016ed991fdd81fb24d19439b3fece7a0765022100b139d49bb4dc661f3e91977620b882d3714a630958f66b81618ed4e64115f446 3045022100f898ab57af249d8d534b9e02130683c3047b1ab6fa239339a7c4134a0e3842bc022003a3dae78206d03eac80f3ce5f20894379f8cad83a0d875c1040e106ed50c7cf 3044022062f86547e2168d04b87b17a982f5113eb775c243ffdcb3e70aa77a3794c3f7b702205bbcc8b9f6ae06b89e6c076527ed77afc9bbe741ef5efc239eb0bcc058246c63 3045022100ad8b99acf1e7c85546fc9b3ddad0dd5bfa1d732f4a71c83f2849010de063bddf02200b99777dbf8870ddf18090368f094d70930fdaf0668738629adc3b0592eb2fd8 3046022100d09aec3064b9c5cbc3da2fa9468a06c9590d2a8ead7e4a54659ca5c860c4d368022100a611e5f479c2f541866e75abbce5a794a806c16461b200694b02b9293389d706 304402206621e4b276e9dc61769ecc2f55a9887e9f2b4133f83b0740c0fd4d7298416dec022078d9bc588238e3467f9cbd10c941c253ba7d6fdeaa468c0be29fc056d3cddcfd 3046022100822f676b2a2a52c7171308540ba552e18b8454cd89611940afe1ba1145d3930c022100da1b4dcba3e995ae6c9fc69363fbbe018ef7c4a9965e67704ccc48ed6ca07aa7 304402207cdabf92eb8a45a254f62ecdceb70a536c1afb5968fd8c723ee1cbbed6fa7315022065a06ecc783fac5ab5bbc344be3c598a968351188dd83d84a04db1a2c77c28ab 304402206d12aefa5ca26b8514d28651cf9340747569de0f1a11a8d08f0cce0116699102022002c2528e5aa155f5d16fda8ce5542e064e8a2c29e3bfa507759110883b890db8 3044022027f1887e1f7b77ac45780694fc47bcb548e4f5480e9a53c61fc4fa20217ae11c022026f32065a928ea3412122df6a137b4085a69091dd7d2f714d35e3c7a904a3520 304402203cc1045f996be36e4426cbd6f541c4d058ca446e582bbc7aaeaaf6199d32db2e02203f5ea405ac8527a287046b9d18c31fca91bc8fc4d1816811272ad027d0671593 3045022100b721e481d71c6263ce021be423ba38ebed455a4ba8aeadf4aea8ce15fd215f63022033ddadff8c2bebd0eb82149a33d4bd782a63c5d441a2bced61a75908fe4cdcac 304502205cc767a13f3615aa593c5c80b52f3c4628c79ea985405cc3a8efd04e1648d30b022100d4b478a67f9bbd2e80b5e599c5a5d6e4cf3f287f4d9df149c1876954d882556d 3046022100fa62ba6b032ba186b13f403d93d867f89e1371a0333cdcfaf5ad26c7a9463aba022100ba23d4d79d4c75792cfc984c37adb1ff1d3b9daed88cfbe8c36a5697e3be3513 304402205005f96d5938171d9937538412b24708ebb7c54e1f2c793c624729dd69c0c540022004ce531928649aa6e2595ad45a9aedc24e627401bd6987e21ca4bf922dd03545 3045022100dd8f12aa9a6207a32ca635fc576540b2048c4208117b9d8bf74c90d486f56f7c022050cdf0f6655a807f6c4dca22d00fe016a53c2a630e7f173a430d0b8cf0e42ef4 304402203379bfbd7ee238896545a75532691a6fc2efd64f5a653c58bdaece214da0c63302202aba8550ee318c900b357946f1a40d37a915b246b92b60e8fdff05f082ef284b 304402205b00580f87ccff3ab0000e64e52116d588972e9fb54c3717b3b1d50da07f7c91022060267a2677bd020be37123859d8ad1ee2071bcb5e32a5026129ebef070a3b4d8 304402207589c2ba1fc4c815c1748cbab0a71f15f7e5191b8568ca32c490f20d1e091f9302206070db88b46ec67a699ea93b01540e835cdb61b72ef55ac9142e944bf6e33463 304502206ea89b9180d5dc4d644d5dc8657a94877110b7f8d914245e4aa42f7bf0cb0cc0022100ceea5ff63518fc18208b2077871f130571f67b67570ca4362f26a8fbf9a0f72b 3045022071f38659f7b79cc795972a4393fd01ff77afad1015f90bf13e8903f1167715f3022100e5dcb7df0600c68828f1a9236d9b9a42f2827bfaf979f15497c8808e011dbc7b 304502203f7440fe9d107124deb1177bd0b5b8eea37a1df692f949602dc9e4d484fff9dc022100d8773344d32e9422834f41c2c326850f8eaa3a87172b3a61b4128d27e346f212 304402201361e7f92032f24a23f76769cf0ec0f8619af12f978ba77a4d0b9fa59ea6bff1022040ef8da791ec5fbecc05f71fb35d99d81020a80f8cc248517e61454530358f26 3044022020d0e4fb029c0de0805ccdb918f9a124e3077c2da6247e3360394d0b7ff423ac022004930854bfe78ea4735da4fa91c0c13ed65a23594cd2b9fe9b75df317d00645e 3045022100cb7311b8d22bb5dc7a8c90652c216372a45700515cfee3eb000f1b516a8d7d5e02201f2ec077d632bc871e200a509611627f62d36057d7b4a82bae996e1b52d28101 3046022100a099844f8f578c2bdb4ef41f781931af6f006bbb884a84bce2188cd3f30f98be022100d3ef3ce2cf05a4109653097479fc0a8f31c2a5c1b89386e6dfadc2d48d5de9d1 3045022100db2b086b4ea6d05c0789d0be53c993f9e08cd64d35ed7e671fcd07315f7270e30220259ba1b63cc310438831e778b323350a65d3593ca5b1e5772e08b1dbda569b67 304402202125f4b0e106cbe8e2ecd356ffdd0178417098b4275bdd45c33404b95e1708a6022063db5b71666aef74777878e1d2ab20ad2e963468ef169e7e1c27c5871f9faca1 3045022100e68633c8d02024f2702bec407e96da8f210037795b2b6323763f3cc1544e8fe0022058c1537072dc5ff3179d7e5e33081bc869318250271864c5051a0d6de3f7156a 304402200b5cdaf8347f02a88025cfd58ade4da4f3dcfde9c68930b32155eba718ca0a0e02207b626e865361a9f925b229bd7506174d0f36ee1f227821edb97f932eb83871f9 3044022041753f7470bc1872ed64669b4ab4d0fc5a58d3e7ae9849ad054c98a7afd0223502201442a84f216af9c410a80000c4d1691b68d996b96278ff048725556967c0bac7 3046022100b93c6bb15d093a4652fe72dceace0a3d666db4e5a0171f5f4b1d63341e34e7de022100958efdf45512b79bf5890d948e168425d163fe9db102b6b05fe68a85e3ade85d 304402203c8dccf9705826953a57049fe4e41f3683f77319199ff5088006000057eee17902205463c7165c2a12f5cdd04e2e32c2e164d705b3d77884d70885b623f96e00055f 3045022100d83f3b4ec7934e17b7f4f702ddbf11d9bc2eab62d172e13b84361e63de315e0802203db5eb77c1becaf2d6494a0dc2934fa2183194552d2f06f8c18b76321a535a6b 304402200802a2a211b91646fd0a4125fa1ca4898754f063532a0dc3f1fee4d2afad9f9102207f0dccba87d36a4d0ff077507f774900d494d05997f6bf88d1f848566665c3c9 3045022076f7d3e45ad1679d8417c2efeffb0e45f26fe1e91719d98be534c99ecf4666d1022100b85e28f7519249820db6cfca358ccdfe1564c7dc039d66f076e810ce58d8a0bd 3046022100dd94cdd3a6acc4d19b76b2448021e8c53b86244d298020ad726d8b25781a0d4f022100d887c383508a3f03aab7568c671a26932a99bd179f038d004f2163f1900be6e8 3044022055f08b74f402e0d6b7704c196a7393419a3a324881707cd27b8570eada3656fc0220547ea591fccaf2ab32ea61b8a1defda4c419ad8d62e8f211beb84ef0388119bf 3044022035b507f74c66074777f503c04666ee54f249e20ae67f776c7bd6205756c22c7e02206c95e85143f5825a30b52d7bab556de5da3b11769620f2645108e8a0e633ebfe 3045022100bd1aed34a17a84daf36572521edfc8d4d0cd781b351981ce8f4755fa6eb29e32022003c09e026ffb6fe835d1e55a90876fccefc9e35cb37a5fd1282368a3f3d66dd9 30460221009fc0e0a50b98329e4679e2dc6e09d27dec7ddc576ce48ccd702299f4a81e7b5c022100a56662569063c790f57bede957085649c61d912e1ae2d622732b6749fe6ea809 3045022100acd062ac6dd576a85a1c5303cf8e968f1922adea7964da71eec133ac7391f54102205e8a5188ad528afb0bff49afaf5a95c44670897669818d3e6bce9df18d29be05 3045022100bfeda84ff9f33fd089af7037f71413e1e6cd0214f98b6296578b0ba8de5e1b1d02207902caf9e5505617ba4e7ef538bf5672b30b9fc3ae6279ca2aa003cb212c9426 3046022100bca93f77b12a7b21755ec3b3e5d0af743f2c482a2d60401f891c89d372c49a43022100cabee73d8ab7758af1e7ce4b50d7746ff4f1af5f9c731c9d716ed8eea64b2c83 3046022100c68b6d4f7a489cca9ff79cdf1444284889dce38038fcf3de2e8cef2365da5dee0221009b8c2c8dc4a5a5e3e66c9672d1d4674b000ef43f3b70f40dddc3fb8b2c21b581 304402203ee8775d190f6cde1867cf48e3acc902b173212b9e778746bc68aab59e227fea022047cd06126157df22e92285bd3d2163b2f84c09806b30bc2901c50bc232b31e7e 3044022055def9d2a77ab69954855c48e1b962351936961df38b7a89ce1afcb900881c560220048ce602746411fce382bf35028e9c2b4551161d4461a63ab97de37617d6e452 3045022100d95de963033d0338ca9db3d99e4980c83e42e7cd5dd0233711bb43f87e88bb1902205f2896056efa867a03bf5943563d1eb3f6a204a00c3aef348950a07181a0592a 3045022100f4ece385fbdd4c1de7f0ca8abfa0f7ca047b7c852da23515b7e3bead0bd4724e0220744b1de19b2f6f533233a5f7830e3bc80313b5b542952e9c37e65b759d74f5a1 3046022100f18a2c0396775fca6b50650d9e780fc02e614b453dead2e0adebcc16f788d9a1022100b0fbb9dceca00afcc3ba3754a4a6e9d1a9213ea79f8aab7d891fbf835a598e5d 30450221009f4977b9b4d5a745a051a6e9796d40349e94b19d946cf2ea85a7a36ba956f372022072cec9757ff2182d03aec3d02a0fa04a0a8abe829c54d05e3f92fd61e7f40416 304502200c546da53802742911678729f7e16fb788c73d53bd07223943a478cc887f7205022100806eca8c01f05ee5942af317d0c1a1c70600677b2716ff03671fd9c291a61350 3045022100fda71c424b7da3ac25d8e46a291cdc85ba24fe12d85bd3e48afe7e867ff998c40220136ea26c46ff50a6658a2a8074e944f25ea4340299b4f45e0c96d9cd6d2a7724 3045022100847a021cc1691b6544b0517e6e5135717cc00a0a364870c1991058c2fdfb970e0220156f521b6bdd7005cdf72bd693caa604b8961f56bf237118e5f4015f1bf1ba17 3045022100b5c8bfc8273728c956377251bf5a840b1b8c1f2f081a677970e8a84316ff135802207f564a09924106e36f21628ec1531f4b8f9ec2826aba712cd73f14307bf6a570 3045022025f173763cb4277b5f15ab172f512cbc3c1ef9422b3c37dd796760f0eef19450022100fcd9ac8b7df6c59618b69185e51718025e1fc39246ccc4d2af10f8c5686ceb42 3046022100c2f51606312bcdc0bf048385201aff55d79608e2fd7a21adf6a0fc99865204ba022100a0c699341b0cd67f9712955879747049ad672d8cdbbaf22529618e91a7935d88 3046022100b6177bde77b02f2469ff28c1fcaea47776edad0c01d2c575d971889f9e0c46db022100ec3e10baa6dd262e95f9505cb006a2ce1b439a702e314d7000060b86bd3b2fd6 3045022070b17e5b437f1627720d8c372339139391f2a7c0844b6420449f2b78dc2e26bc022100e77ecf67170f3fc9f39cd20a372fb17b66d5b6e80dcc34bca2123c1e03333c7a 3045022100c457ec238dbfe9852806ed8f5e09cd8c189a77ff794b0fc034fa31edf51bc0220220515706df449b55233100ac976007a02b45a75373724add1f8f4ad84361d28c65 304502201fae533fd5e9037c4b8f4b2b4d331371c4b09ca32f473c63f232cbdf8bed7ccb022100f44936f58ff884fa4dcee6f7d322c83dff64b6060ecc43325e75fb5d4f13f0b7 3045022052a9e65daa7d32c98f7b35dc070c0d03e901614c413cda741f341cc123d9bed9022100ce7475cf4793848bccc46765197edb7c97a415678d87df2930ed5737d962fd42 304502207e2da0dadbabfa1e0c91d5c879ac88a45e4fe84ac647cd6fb3b493078d7b2c27022100880ed62dd4cfd95b690cf613c89521ceff6e79e73bb9c5294d6131d4c6e5d619 3046022100e398cd4e3ccaf68d6a3988eed0a55c827c7b166ebc1a130c5b24374718d734cd022100b32348a25bfbab4259f5073d99cdcbb6d82f1609c078cf690fe414942b6b5fa8 3045022079aeae6e8d4b1bd4a059ba23f9167d2d0bf32dda0642188960722762dfbd3119022100b93cb2b1a93f175f6cf048bdb00389edc2a4455c89d4862b7868675b58cf8b8e 3045022058cf582e1eba8372127459d484ac455e5b4e93da1c056f62dce55a031a4c493d022100809b35429bb492f82f71d320eb9c89a4c54139c76e96eb4167dfec0c92db73e1 3045022050d4a932c11e78522bb4b800561edea0df597a62440f182f4d323b829ed75ef3022100ab84f5ddc582e0efe88823d8c885595c9352e1f80c711e27cdc3153931c4f1a5 3046022100dca97c55e92b5d0d1924d467cbf7804e62796352bcfd2358fa5704cfc0d5421e022100ba4ae28e7fefadbb201507fcb712029a5e3e75b9203776ca977609b61d0ccd89 304402201d672ae236d90af9156e4a44651b1cdd30631d4c38aaf0e5919743aebd300a22022029a18fd82c968aa09ea3efc34b87811f03f1a9a15c0725579a53a2ce5e3d40c7 304402203c78b766275548d74c24a4ad7d43b1bf099c1d4fb44e4969293ee1d28595222e02203f92ef23b550b1646ae2e98ab86249d48227f63db6a51c59238791aa3f56075a 3046022100a976d9a9f168497227e55e8cba5c33a231b5661a7080a22fdfee29dd31103f5d022100e72465ebe2c93726aba313eb1cc4f934fda769da5f12a98beba6754db0b7b7c0 3046022100e11e3b531ab5caac8e9bf4771ee7ca6fe32dde24e33d8f6fe7f2b27fb6934b33022100e7560823a96773da1f492a008ecd0801a9bb4b5c6bd2ab57dbddddf512a88b3e 3046022100f803e01c6b64112215ac93c3c0e75935c890939cc247845509d8a99472fb6736022100e4754dcdcd0a10201312bf3268fd3ceda4e46467f97951c379f21407df9c3242 3046022100f707b859fd811e865de8c3cb1a8e3dfb66abf36bc315a4fc50da89fd68865e44022100c4e4485cbef4f916eaa669821fe75346500f3c38eff28b47ecfae838b68b1b0f 3045022100ef4bae404f5366fadba87534ac63030872cd20fd89fc14512e00fff4e110992f0220689585cc75e671aa7749ea2ac0e7b7fc4494811fd506fedb83452792a8d9124c 30440220359d6dbe0c59b90fb0e261700280cdac56e20829026958a18c904116434dce69022066b19d85fd9fcd8f2bdc876544945da7ccfb6fb947c33fe71acf103a9d0bbf0a 3045022100b62a07ca6f5c9d2920225ee1765c1410b802744dfc9eafd3782c4a6509d1ecdb02203f546fd52dbbc1fc5146ca4d8e4d72999949c9cb91785e2e2c4a978dc0933ada 3045022075ff528757e776135b1a4d69024552741b01079e85ee13ea03d839f77e8ced75022100cfbd885e1557cd2e53cdaed297069c9ec3ed544380f20d8718115c21b448c568 304402202e20ff54b7267aa84f680a539323d1fca299073df9bfefd2054ce983411b1f7a0220200ccc942e8f788ac9183a1868c212867204d1b29059d41e6ea5acfd325e49d2 3046022100fd611316bbbda4f724f68d6c28cbcfbed068c67e6830ce74ce22dcfe6586ffd9022100ae7393086f1e70a1d0c6f0014f14c5c3f3377374f1c2e2ecb8700eaeb5f6b376 3046022100b68c79904e1e74c2438de09b3e2c5b969b4c17abd3d08828ca282bd677750eb9022100916123534b16989b06c678770b080b362d33cfabcc865fb420bd60b4dcf641f0 304502200c7797c204af9034ba8af62f1212e97084e3e638c5487648541ed348f503f9fe02210084d0abdbd8144a8082b496878109cd51e21499660c11f2b6f0b4e331d36ce418 3046022100b0edfaf1f35f19fbc417ba59b0d449068edf12af35b380ac070090522d07b6a3022100948f830b9373028d8b55d42a78df55920549a6180ff0dcc0ced0433e5453855c 304502206051f7582829d82eb381c032581108940e850dc7575528840f46a186a227f5a2022100926ba86eb204c7232865e091d815009474ee6e29683a159497ed0488c70ed3d2 3045022007de7bb3f3a7ffecb37036ddeb6121c0ba6d391f865047951e8d6dff6e7a0419022100ea02f35f423376e2b0cb1e228d91de970333ca2f6a801c6a685156ed68b21055 304502204ccfb6b066ecfd08749dc71745f08652143d4db71bb776a6613b2e4e4f05a732022100a5436944a20572c12e978080064adcb8d543867745532438a164a8f43a794ab8 304502202d6875802294935e80084d024692b3163d194c0ee08ac3212ff853b31b35be3102210091142f99d6826950ee1ddde35a25395b2e6832ff87f04b1a464a0b56789a21ef 3045022100a269500a12d63f1d5f87d8257e0f5f5433f1c1f538262862644d1f8f6aba4db002203d6c62e1ec93d0b7f65c7193c4da921d5a61a65495e8643f1d26a559b8ea73bb 3045022100cbd1dc571e6f7167bcfea5ecacc40d6e5d408f6cfde1e97ded1760f6a657cfcd02203573931d0aaac465679289db50a48c5eb6f9b733b8a7d0207db1bb3fce921136 3046022100b82e91af84d7e6b7cd82c5cb1467e5922fe97adfad5e18ce5b49687efb93fb8e022100c9d761ec321f93bbdd094ff37e9b1039de10d6f9472bce59c843cba46fe01610 3046022100f0730316c3ebcb166d09cefc274f1f88965654d95efea1c7d3a34aad08a75bdd0221009439b3a5c48dd470cae90a37375036bf8a07efe108dd69c1f0d879118a281725 3045022100978a6634d9ccf9531477728789db4e921cac54460ec308602ff314d380199c62022079ddf9b0a90037952cc7fd7c79fb44064d9b8210f3d9273267c34b3cd0928559 3046022100dff2e7f6f131977404ffc7faf83b3af893a016b013088ec6f671842e5390ab6e022100df19f5dfa1fc9c9a504da13dc1de224eac468acaebb2b412c09bc8081f5006a2 304502204f124794613e42fbf0ef26cb716ba258128351a8ce7d850199866f67eca087cc022100c9df573dce504742984d989486ba4405393ab05afb9c933ec5d867d6a41a8f48 30440220605db156a713d9e754c0091e9743199552a069740863c21becac508b4b12987a02205437d613991305a4734849ac71f8e2c1870e4c0f720cce1d953b3bb194fae67c 3046022100c745338a191a954a01d60a986eedf92941d2a49643952a81f2a7c422e009aa35022100f75aca95623d8abcc7ea331e1ee25ec9a3eb3db5821a911ab67afbab85d0eb13 304502210096fda5474ac41cfee0c03ed03db90bf364cfe559d64a2b72a9bf92eb0798105502204fb221ba3fc214cacc41465d57a3eb6f1e187f8816627e5f046b8dfdd35f944c 30460221008b072d36e85c37326e047995e094de2c6c2e9f58650a6c691d1e50757f1211ec022100a013983844d0a030ca7024f301affbffc8827955b54612180b6d2a0e4086e339 3046022100c158df09b4f30c1e0bed2dc84df15f5b18759c6ff30146e07d30a83cf035b40a022100babc31b1548c6c108d3c018f1dc47e30ec0bbc9b42359dd49fa4508910151f2e 3046022100ed43ef476c9a18f5cab4a76467edace202fdec2c30e2c56a51a3845ed52f1d70022100dc20c9d37bff0ae1a68bcffcfbf1d011d0e30355ed773ae8ee9ed86e31cfd8e7 3045022100fe51efd9a95bcce1d91d2f41c9dc164e11de60c374494284d23101bba661128a022079bf682dced38221c62630aae61b3c8f91b0ebc82078417694dbcf2f4c827220 304602210083e8ced5b453212c30004d1f4c61781328d623b43d6b27d1b169cd29f1250d88022100ff5f0bf3c07b1f44782596827c7d7abc08e897cef544505c40e693aa7ed521d1 3044022001c98ae29e8a0af9066341a56472b6642a83090c39b93a98e923bd08c0feeb6702203ed3143df951dfd324a88d4f3e27545f7f9ec7f5a66dc92d632fc9bfde0b6231 3045022100971e6ffe63cafb9be4e84139ff5c4ed0f2bc25b7396a96c0426ba6a3008c336c022054c5a70a89da315a816cf8451eddd395d355208224beca3eed08f12b1736b85e 3045022100b4037a6927a93d6de9338ff0941053aa54e7ad0119d21d40211ad6cf01cf31a7022035bdf9e52c8c8c606c08bde7f63856b07246ef541ebee4a4b94915e6dcba7f18 304402203b5a1508b09478b356423b113bed7fb10e9b0d49b3fd3f7059829970391b522502203bd44b18015f0f6306e3c57c8594e029f78251159828e45551719db40ad9c861 30460221009dca731ea634ddb318c83092bec119939e5e08de1661c672fe3acf22e262788c022100ae1a7e7c6fa27ab35268c5746659f2b61a785aa78e7cc79d197959a556d769c8 30440220203a45516a7510835d951d0f2d785d3e9c3ee5ca60cc2755af7729cf8cb1a9db022000a18d06d7df8fb40b75cceab7a953417a0ce1aa3a26cd923d394edf448b74f7 304602210085ed618caa50333aee14208a120fbced3004e2cbb3b4fdeef2e395afd326ecd1022100a1ccbd47421c518c597f36ffde28e20577a0e5142575d275101a2f8e8e06ff28 3045022029135e61920b36ac5103781b89cdb561cb4c2fa376172cfbb0687c3d0f00b7a00221009be7cf7ec59555a7237a04d38f6c44914df04464854a97ed86d2b2708dcd635a 30450220694ef99228bddb08dde7d1a6240e752128d072615c4b1bb4a96d250915d62ea2022100901c5c43d0ab78a0f727083d08828fc395e6bbadde79c4385580c9bd9285e055 304502202372dc5034495473123d61338b197da9ce819c908d3e93d51d3d95ea5a90a20a022100fe64ea11e8eb789defca60739c0202cb9de8553f7ef2304a3e048a773df2bf74 304502202ffc572451a4879f23db2b05f2cf9f10ed94068694922baa923b677a6774016c0221008e26833de4c8f62c5932a2485afe9c427e96489d37c6bbe0c456d3eacc052c00 304502210094d5d22deeead575a057e9a91908956255e91ab675ca4e8c8bbc05664152a410022038db0437e18ea423e3c16c1557695d1cdcca4f9544a3b09b692eb78f88558207 3044022012a34175eb08af625b1dc6ecbd5f33f9d7aca50497bf525fa1c186a7c0c90c320220014c94db5b8cdaca411c6eaf2800deded789ecc777ecb4c343b318d6702247e4 30450220297150b67cecd97d19734660a14919fc665a38e4468931875c996852acc0c140022100e310d17f4b4a2d9aff50e3cd21848b3bd20cbff3907539532e4c5b54330277be 30450220066aeded4e541e32c228322e5a7a2196f3f9b19eaa1c69a1c1f5d64c776e7b9f022100cda3307d8405a32a55613c167f3375eb95d51107bfa61be28e6fd8ad81be7d08 3044022046e39bc4c9567e847fa0ae2e1f2a4ae1fb231aed54d236209fbfe21d8cf3db460220653397c4eae99ce8e355f345bae7461f8df4186492625a99dbb72cf7553994a1 304502202a1b9fb2654bf226e3eff157887d35945a9acf418892b26e0046c9496a736f6202210087402d82bb4ea283875fdd898c58bb5fa3b53e0032674ce10a6db88749e2579c 3045022100fff474d8a4c4e9be360a6ea96cc0dbb40b8242f7b3c65043db5fef47d959ea6102203e4b1619b8b94e581113467ff39660735bffc1ea944df335c1f247da416a2218 3045022100837836f4ff06e9e5281147f821a0f093136b4e9de0f358e6b9cd04e0397a227b02207d84ab24d2f7689192512ba6d5349e4df945bb368dcfe0e8f014f9051aa0d9a6 30440221009e45c3bc7b4e8c9ebdf83043079c8901b23af016ce74cc2f7935b5e22e71b76b021f545cbd18dbdb07b1b42cfa1e5372926e89f084fe49a4c31a058dd2377fd259 3046022100c29a73b3b1da08119fe73f6aed16beb943b4602f4ad577f61b4aa25182e26cbf022100e8a64d81e00b67d0a55878a535cf020b71e352bac38d133675d1d95b1409ab41 304402201b31878f3e46f29c82836310b65957bd7a767aca45030ae38c5935f6a471edbe02200d4376eaf88ef7758fe55ab6af5aebfe5cefd54f47f151dbd00b060c11b05a07 3046022100adbc0783d7807def90797d18b39ae6bd7923429001fcc0a7840262d28a9476e9022100ae7c9970d05834eb2592bdb551b72b787a774d1016821343ef9b6f7af4f89602 3045022100dea2d2c52b05b46ed5f7e1637c73ec8498f77e2e0a249ee68da5f9a88dc6e400022009e1db55c42143221aa7704e742c8408485f764a9e700ecf01bb2e96be20de7c 3046022100dafadc87d5bc44ab15dbdc323644d9c684f8a9c1b504247cd2277e877684b5f3022100ffcd1b43135b7440946b4fcd14ff488d6600e6b317413e5f40a8447b5afebe9f 3045022100a73da5009ba49c722228b66b42d80639536b5d69845325c364d2e74a9245419702202c27a244700db854142249232122ca946b36df320df777ebac5df50b3fed7c2b 304402206367306d1a8786a392463f4d96b0239ec76132f4596874e5cee617f3f39c0767022000f70dc269af4f25c5c6fb7d545b1f244935b135fa2dbdb6f21f2dcfed12676e 3046022100c5d4bc23d1c4ff289ca19a766e45651e657a611ef4bbabdaf7291b2f9822d26b022100963f2d9b9cc6f81ff1ba28d9524684b2ca356803a5ca0cce179348777080d22b 3046022100ac04683fafd67558a7d986b5ff1cf0fb52d9ccea56d7a3dbb99b657ca9ece053022100c207f50edd382fee6d187641ec3cf444ac0c476d8fd499e409257593ed26b8f4 304502206907cddfa65e7e74ab8dd4a6fd4228ee3d9cd870db2f6ca4f81740e3d0dc211c0221009e7b7c1aa75d05fa4e3e3fc5634bc3637c6ddd224982533b159e27dcd05b84eb 3046022100e967359a93e2f1425b35dc550de8e6da89f299a61b7bd22143e5aa1794a868b4022100992ebc266492d28f14bdc00a4b61d9534762b71ae6d3f797ad7cf2b7c60edd48 3046022100f0d7bc2b63549f4ecd63f4c8b25b4b446c1d70891e16e31e6d8a05aa11e2b96f022100e1ddc19263d90cbd31f3b63f07d91a6680ac0d46ad265760aeb1c02a25e6d35e 304402201b216397d22c77f28ac624256d28c388bfddde35e7cf8da172fb4c8e2cd6065e02202a22ac8e68f7dd5c0c4c4a1611077656ae8e92aeabf0a8a20c75b3a39f9543ee 304402201cbb1351be2acee4f1a205fef8dba5f2cc33c4181f7a6e49cbcd3565f4f155e702201bb1bda4e7ca3e69bb7fc339f0454d0e5a82e17106f19a68c70c7c1259ed6565 3045022067ddfcf39bb7db7e6914ee7c2531d0e145157d35eeed17b21e8bf1da358ef353022100da99314232db89cfbaa24d36578155a48c20cfa222bd38c8dbf688efc4f28f99 304402204fb0a887a4fa9ec1ea7a5dded48eb6cc03ed031eb7537542a6c3d9601213163302203ac0466629452c4b97d5afe37362972e2deb3fb27af359825e4493b982feb1b7 30460221009f476387c571a99f1954d4c196068969cf26552997c733251dbbfe7708bd10550221009852de2526d963bcc72be44b950384c07292a3c93d29b9fe49eb65763d33b4cb 304502206e34ec1eb9984c5ca35eee3108caa939c67cb9de62e6e38c577a8341e929fd90022100c5518ccb69ed6caf7cc6a588e26683836833df31e9e3b878c6c47faeccfbe0ca 3045022100f1fcad66c00e05b5cb8aee7ad06e5ae7301fe428d0a777ac2e00c7e2868db275022039f7aaf4013af5907b038e4a35c5852c4858eb0e14f1e1ecbed34ff6a8675d8e 3045022100b20bd89b5d3ab004bd10f3570cc59586122c95be106886e36ca9ac3e583936c80220516dace925a2e6cb4a39aaec52bbe53dd2f2a5c66082d1cba0970492b30b389c 3045022100d933d00b12efe9bf99668875b9c655ac445286c9d7873586157824f1aa1bad07022059096ccbda7e3051c1308731fce4314fd5ef0530f9090c18a0cde3f8bddb8c63 30440220166410c9ac5695fe9b4f8884b9c65f7aca59631ee4e6278e9616b422195ffbcf0220052d307b3abfca65220b2ff7e46e2fb9f67c96d5b402391d4a02de6eaabc01a2 3046022100ebafd57bbe285abf787255b09df3553d6c32689acfcae19f87582779b7f212bd022100c4352609fda80be7951e716f4215eb2a7632e96610020d73e71334fc290de690 3046022100c0312279ee8529926a167e44747e9a3baa9f6a9ab813878f96b98e1df7176240022100ce471f9a3e7a596418a4ef5abc3858e41535925e8cee85d2d3cf73efae027b33 30460221008f520282249dbce699d146fc6183f602095e5a322cac16a65b2a4210bcbf1181022100f59da1365b7548f374c2b5dafb9a9f5913296d28a541d9bd297772dff2afc7ee 30450220586a8629854b6d2a33555ffda8e1b60c55c33487307d415cc9c8fb7248b25bd3022100f5c5d8e5cbb621ab5740e1820f2e9562a898b8a70131c4a63afc8495fa7ee152 304502204fcf17593f657c962406b24ad3d35af0664c6b1e9ff29a27126c11db818585600221008927b058d3ce6eeb25eeb2ea52105c345fe87c9037d84b8e8d71effc6df9b168 30440220024d72001078b3ea2f57f2b83972ca91e34f06629f489a1ba4e4406b06781e7d02202208affe5db3d1729ae403a5c6d3e47b86c5b68e86a982e910c611ac755213d6 304502204b8888882293f6c453b0c1d65c9460623725c16aad2e9184a5e10b9b47115d1b022100c1be7b5a942a061b15ea2a52123ef841d028c3936eb133aadc3239e153385a76 304502201cc529fb77ca7d369eba9610d44e61bdf46049fedf1840d25cd4f270ded4525a022100e5dcc02f2b68b12fe9916c194fa706a84bd035fc4b1971f76d0b2891883e7bae 3046022100d031d58d3fc76df4be061d4eee291228e98ad05bae2b138656097373f2d58f35022100d0ddce2cf68a8b534ea50304a97edf883928d553c45cb78cef1ef5b10ad532d3 3045022100e76a80e74bb87436d55d254f265e5a6495e3a469fe6b721ec15ea53904799d2a022001425952a7129f1372c83f1f97dca4e050517ba78aa53e089e166a7a9a08d5a0 3044022005b35aa60953e6e38a7ba1c5dfdb48d3ba79f808f86881b9c505170c0f05d5b3022058709e8ce2f031c179bf86554936efb493d473cb08ec72e6991db88b27ffc1d0 3046022100928c7dbf7c2ca15e126f70df3d5836d75abd25e9ea10c8bc64853050ce166e76022100f197b8cf07ef6aca4d9449278c5028b24669354ca541ea90da5325c98e7533bb 3045022100f377af125df0f1ad9a3ad0b8bc264872c1fdda145ebf33dd135ddce948f93d4202205932a9380e4f55039aea11fb91667c7a80b251ea8d1775a69009bd4c8a3632a2 3046022100f91267545eb10eb581ca87e6ce54549e5405e0479cac6d974353cc30b9183310022100e172bfc8e8aa171a2f87056c912146e8beaa3ce0f58daf56d1aaf4b1322ae05e 3046022100c2eb2f39a962e0cca441249ed7f99a4d03b63f4ddeda4a541e95f49afe9b1ed3022100c210b894f50d433fc5df7bbc26a49477af17045bf8dcd076639119139e08b92d 3046022100b84943393fb6d834d1efd6a1f860b881c55c94625dcda553539a50421ac1d9f9022100eedddf810d37c0871562d3ee28c3719e15f3a12ebdb9c20b5ab6a5d231dc46d7 304502207b23408aa399ca609df93bb7c9ffe98e2f435f42d99d4056c5bafd52223ef96a022100e5c05db78da4f3488c24501b67fdfca34d27d22201f5001ddb6f632398886760 3046022100e0e7dca0a121d5721424304072e58003c7e2a9d20948ace3260c9bb384dcc72a0221008d341e67501c17e35ebdd257b53d16f3ceefcc88f074ff50d1dcef95ca58ffc4 3046022100e94256939ea744b840c0e080f9ede5ebb2a70a007bd26f9a97c71ffba1e3244e02210086a629fcbef2bea22dba34eb8c6239999db9c16662db50355de6ffe1129550bd 3045022100a5f4b3a179bb31736da2a05b3212ecde6d6e25f71703648753173a3c099cc40102205b9baddabb76b870eda78e6975e1064703688600b6f18f667d14fd2cf0149ba1 3045022001e20ac35adb320b3ee4633ccca27c1adadc7f71a30af582b3aef8a5994dad6d022100df1fd91662c3ec04a8387b5eed4a8c63e360dcc876eeeb324e0b01a6571949d6 3045022056d8a937e4e792f6dbf4ed282c0f8511207669e8d064a0ce1b60175a88dc5942022100fb05f8d7bf9cf0e2f74b8e2b31e64c19f1030ed90d616791ca890a0ecad0c253 3046022100f7e0606dc6dd4c7ffc77ad11d9e95033413ac6d15514ea65eaf00d8721cef4d2022100bbffc75c3491c68790f3d9182f2a74277d7447e087f5811538770db6ab5a8400 3045022051a23f4be06558147dcaa36880f36163adb58d16bc04c5ded9854e66780d3456022100d366a40baf066032e554d44bf02b8a4feb3dcbcba2028a22cbf459a1c574bb24 304402206e4d00dcf6a3575281d8e01f9fde48864581b27309c06747af3d6bb26f7ad34e0220340f41e82b23b6bc80033917ed60e8aaece94a705197c610092287e8464567ed 304402206588d9d6b091094697c6f47e511e24059ad8818416c3c62682e692aea9ab09fa0220171e502fa38ddeb960d99ae0b1b8ca5b267e075563e3314c34db5f87dd80e082 304502202fac16b117393fb6e87f0a6720dd9a351de5ded68b51655173cd991665be28d4022100a9fde825c4d59bd08775b82d468d4cee3c0e07fc993c6243a733b2475ccaf602 304502202a7608ab938c75b4f9aaec90ca0655bf4fef4977b798bbb9c837c5f91a7ad534022100953b932d7df5d263df71da36951e5fc207e7320cf522142165c4e93c3c20cb8c 30450220372d681add89da51d94f32a71d046fdaeca0140b80714828cadd65e48ba5e917022100fa658891f8986c4d7e3c5703f95d520e71d9955503d572a8d0e6ba2ba3e3a9e0 304502201ebf28cb340201bfcef1a551adc70667efbcf3e2236843b90b5bc14b3a6d987c022100aa34e4d73cc5cbaf74f050c3207b497f7a1131690d8810b80b1ab7217053d4ab 304502207f44e74a380ad784f5d8ac330d333801db2f66da860dfb2c04c631153225e5b4022100fa351412c0c6562e1d56131e1456d6136e5d72b8fcc5597ac2444d0fcb9ae152 304402204b3572e52b066ea00ba76b879c7c8f95181883839d1f6c4070eddc04f5274d02022034793fde20c3eb6cf4ccb1d2d4c8c7c8fefc00b4d92d62b3bbbd194c0a2144e7 3044022049cd2a7f97d76f71e8ba4144b5108ee857fc1470a8e5df958069dd2fbda58dd702200f7982be4fe700f8f0e46b851881a9078e502c38613ff588778892a1005d8a8c 3045022100f4d05c2dd29e150a5b3f20a7562008a743a1f597f2ace021d61b272f9b4e3f9a022066d3c3ff3997ba197fdaa07d62595a7dce017b4802621f4947cca7d578cc42ef 3045022100a2dcce6abd65c2c6f23c943f8f4584b73df8848d932034c3d2d92cce3df883f202205678736e7554bd6b8025cbfe1b08584db4eacaed87dda6b964d51e23aebca383 3046022100d6fb98c5fa232d6a09572574e093d1043881f80fe98ea38b764d6f3cca77ee260221009cba7c1ce216c7db543ce2307e1c3395be0d438a9fa64ac32af592edd3f9b0b1 304502201cdb204248d2cf9325bec00b8378525acb73f3d922508a820e20e40352fb138f022100bf53bfd997110b1d9441332ca24b274c0f8f51d569892c2a66b9014ba61a9f57 304402201541012a0426f8f91722454e287b375eaa1f6ee93d0d8eec05a526f91b33532a0220520c3223b4e212225f747b58113f12e5454c03e5da0a21a7c8d382f64e768c2b 3045022054eda6b35a6ad5a17c2a15ae9d91b8355054ea8456be1796e476771c2b5bc884022100d5495bdf0c45a4ca1155cb6902ba2cacd5655b0283bcf34a388d1d8f4f0df9a6 3046022100ca7f4a12c560fdeca9e1242e25ed1e9bbf8c0336de6b89b53199511990605b1702210088f8b3ee818d6714cbba3ccca27e29a1a5145ac2ea5dfa56b5215c26437d2ce5 30440220470bd0e5c9572d718cb60948a57d3fcff7e237d102956b4ea063284080771aeb02205747fc9c739729d8688ea4770799030d5849341debebf8bfd205c531daa15518 304402203b396d5a57b108b5120926baee2fca9c748c1f386a44513db92a98e913fa008902202025632c219e9439d2d85e64c6d44a6e4f4d06cb2abd21066c167d9521154271 3044022013458f18fb38090458d915c574e410b06d1d0ce6f99e338695c79abb3040a1a802205387fe0d4ebd34d2843903f50e2f3be5f3e632c65aeefb19b3045dafacb947f7 3046022100a09bdf34fe01b947c569810627f2dc35ed01668431ea511462b2ad3341b4bbd9022100bd3495b69ee77434d4a8d0ed8555b09fba66e85f686b1b86a0c234f87cb2689a 3044022015e9cafd6b71410a128471e4405faf803a2ee80ebc4b8eceb68846915041b7f902204c577c18019ec3482e048e501a4b309fd9d66684c1ae1e68eb5d86ef674d67c9 30460221008a0b98f2fd7878a518f4d562c770f43a85113f1af238059d178a509bdcd15911022100a582c604c7a8ec24fe9ab6d84b8ebf482fde32ef6dcded943d96257f09f7e997 3046022100964a85b2a5cd62a77e1c24c17d438c04b599b39d4d0fed70bbf25c0608e251c8022100e209df191f27df39d331c5bf596da92fe61619dd80168fd543677581196807b4 30440220744cd91d4f865a985cf91023a2d0c2959116aab25253b4d6f09ee0fb45f9337802200fa66c839126aee65b39e60aa01a877723e846568354d8f3a86fd7e18106f1ad 304502210088a6895bdf76614cf3e9cab6dc6e46c9459794e6de02ee828ba7bae1c035bddc022028a9193395e71416328cced080aaec9fc000579ee59877b99b78d41e4482ff2f 3045022024d38e2182eef426fd91f252d22f678b544b04b7c970e9c9f3baf86cde09d7cf022100fa8f8dbe32f7c3b191b3d4830e59fad5d9e6143666790bd9a98017f716ba4078 30450221008ee25a7436a620db4cdc443b909b38682bdb672bd893e537f26bf887128f33eb02201d958d61cea2f549de5739494cf5cafb9587b43e6b7de4b0844e243688ffe461 304502201c384a9d2e2e44b6a4871c68b20b566e01c55c54c460b92b722784e69f8526b402210096470b3dbff9d3a61aa2c80050f0f52465af6d86cf29b819ec6fbd75e11d6671 304402200fed8827e919cb7dbcec523c2fa9750d04efd697e26096765d838fcdb782ee0c02200f15ad501edd3e0fe5df43636c3e950071ecb14924ed815044faf3fcfa77d863 3045022100acc00e74f1c3d047f55b8c1bc71d1f2674408f974de6cfba0e8a69fc641abf4f022035aa38b0461132baea247ce9d4767012955ab9a8a5d49f97a83493c599c4d216 30450221008af3c0154f5d8eed950a428aa9edca3d6dddcc1333cf44ab1eb4f026f366a13b02205ba11fae2f667b9a6fb801d509bf630990e9b886c69871377bc3a5462da8027c 3045022100abe424c1c21b3fc7af764d1e4dbcdc72a1e337979d6c540f89ec623183c69c48022053f22940ecbbf7f336a53a2fa8fa959a29c1c6bbf79de13df8d1d511b53dad9f 3045022100eeb80eb68dcda067128ade9f6388c3c5a6853b6d70f41e65e4b3f419d3b64bdc02201131cce61e33a5b754cd321e023fc2992bb661e6faad1ca787cafca6da0bc503 304502205c941b504f59cead23b0a4a58610b59cbcf46573d0a7922d63ad0409ef6efd840221008e41b4eb8d6e7ca8801f703db03d205f6afa8fba8da5df43b2e11820e600dc0e 3044022022ee13f7613decbaad3d59775a3f740edf5c65e70c36f1cb054fef494377595d02200fe3a155ac4d3d303ecb8be56594aa254e346984001b6675b788dcfb7b1ed093 3046022100a28aa659d249adad7dad51e6d1d4fe0a83a7e2d8b60db038cfddfcbf258f01c4022100d482153ff2413303ba7a4e21e4bb48c959bc7747d5db61f6f33cd3a15b130d78 304502200f6dc531a315b8500052c4a1c7668173ab21aa76013378835a8c0003eb9d9f320221008d65be9933f391ac22b30ff9f62a5bbe24d048d006b6053376002d68d33dbba5 304502206af5bba03b6075ddc80d8e256d0fbf5e7f310e1559362bb40413268c4877d304022100c60eb0c59e6c6babb5406f59856b9ecb89b7723177c53f2912442598b3934d86 3045022100ce7b69753e4febe6f044864e1f9888690c4954eb778f7b5442e93287715dc9ba02205c0aa3ffba66b460221a16045d92ce46c2620d2480eb1e0d1569fbb1bfe315d2 304502210091fa0c03e10d8ed5e6cb701d5d3622a26bb4c65bacd7124bafb692f5de22c434022064fd947836e014922ed4ec3376e4cdbaaf4154ec50f2ad94769194f398abe840 3046022100cb817bf8c2a063674c1deacf0c48dc7d2eac4883f29e8ee5f5ff80abb69196950221008b5f1394adf410c6a5f1644b70cd0cf7a4653bcc91c6af74de600a64cd81bb1a 3046022100b8d1779ba02102f5bc3b26180704c95949a0ed058ddd8ae0a028e09b9a17b749022100891c1e4edd4579de91826c4e17fa56f82a881db345a805ec8639db274cd9d5bf 3046022100d295dba07386cf1e6f580819311fc01423f819ebad65e42e04af3e614fd539b90221008902edeb93288644502f21f0904c307562ac66afdc6c606d623edf392c1d8b3e 3045022100f73361e31f32bad19f6d0ec3156a8ae8e0142ea59abd29902b060b8c32a271d602202595aed59879d52c75fa4ba4c503eac2df7e55924fa1da0a4e1b74d52f87723b 304502204e55693bbb1ecfa93f018e96579ad138d8bfb56af46900591f3f2dbff52707e202210090ee6fa019d1d304f7fd46fb27b2e8de07090527c9f6902697ee8dc6be585637 30450221009776579e5a9269928806d9ea9227056df24804a72fd7fbb203a1000fdc85e0e80220085276ec431da38203a08c5f01de1bed773e610e37b5652396daebac0ef9c7f5 3046022100faeecce3950bb7bf4995582d17d4bcf9b094ff488cd044332f10b56c8aedec53022100d96059ef3860617be3c717fabae5cf29546e086313e1948dee6517f371275deb 3045022100b92ff2d1be09ecb21f8dfae9dd4b09516462e943329c319fbaf001acae79bce30220148b1e133d5c7fa62e5fdf91149a25407c31c95d4c5e1b6f233164b3d92cc7b5 304502206b34325d690e58c1f0bf4102e482e21de38200cdc4a0e943a9be6b11b6ebf458022100c56f628e1017daa1ce8cec3dc30d0e74c33fcef5b4776a0d476882069e75f0fd 3044022029f0b123a0b74f7ffab879d4cc5ef3883b800a371901534528d78061ee259e41022012b9775d479ceeb425b8c35b935f4f277548733551e075889efe413fa3366086 3045022100e479f5b06dc6d717736672b90cbf6c8021b4cb8d6f5c1d01be16f26c791020aa02201077697d1c2afe4fc598358a6871e027c0bc2d88f4b35769ef3ba2cd19509b9f 3045022100cdef025d9d92d8479ff0570d5635da0fc71253e7467b0d07f7bb6a6984d18a26022079ca9b249b1b79aaa71b4d6f64c0223b2d06b2b99b81631e209520c1ceb3a232 304402202e9680757efe962ebd68c1ed08702f35528356ff3f42058c5a90a6578c27def002201b3d4046e24bd1e29e42011d666ca180a52900568d18d97ddeab9301aa43943f 3046022100bccd23f414b67d3349ef8ddfa8a96f2a56416b3e5d623748b2fbabe23b4cf9b2022100bf391cafe61c983915ee6c96374710e36f626b9d053aab1ef9a5f5587aeb6d9c 3045022100c22f39767a4f15527ba0ec6d38e1b3ea80737edf0c17dbe843199c2ef668b7d90220614780c59bd5a3753b2018c2f56ac853d407dc127688d7716c7d086304da1507 3045022100d78062056c675e69640f259e705f6783e9208d44816eff4be578f77ae27c301b0220029d6f96977407749218c330a75d076d9953534fdc41b51b8a91f8920d844319 304502207b90c7841c56b006e28507529b0db7dcf82bc5525807c33b4a24933c584f0b26022100cb58433e82142bd31f2bc60130636e0e59a81be5a68e38985ea24cf72f918ef0 30450221009ac35af70d54b40b5899203580b7d64e635ae650fb2f11b3c477408633bd3f1a0220535414c2bc99980ee2889eef5b4292f28e51dec98362e98da9721cc9f582f1a1 3045022100a3cc0f51e1aa1f05976c381518941e4f97eef7aaf66270279972231e6b7e47a1022062b85e03e7306a02149d31abdd75b408562319b3363e96840fe089506a26e293 304402206aad536dee66776aea242747f1057102f9ad45cafffade1c9fdcc10d6ae757e0022021352284836adbb72b5f55c42cc38e96da9959b2ce1803da9e5bad33fd208e31 304502203bb8998ca133047de7cd5da709158e1e1693b2a139a017909dad7a6a0c4dec10022100c4e302a17398c1417869a21a630bd291746b72f5e3b3f5601769d90255b4fa31 3046022100ad7f1236acd96e881a8da5fd339faaef234912f2392823202491e45714925176022100d53ba50688097f05c43b23d6addc1419209a4d15acd4082a89951d162cdacdb2 3046022100bab66a55bd7cdad97a2916073da4b5da5951d7748bd2641021162306036a9edb022100aa6ec3eb3ccac25dfa1a26c86e0b72df1e1e9a51ff2b0d6673e1edec53b4c21a 3045022100acf40db53a65f2b774b22be137052204661924fbb0151b15eade9222f9130ca6022021c55010e0aaf74cecc6f644a82789d5c6f60913879250b3db5d25ba05989148 304402202a9c09f1498df6a2f4840eb561bced501d76996151e84cc3da95ac16f8e566b802201a968fe4cf3ad3741c16690b152b069b6c2fdbe611d0280f124f1f01e371857f 3045022100b74c65f3b4ba59261bdb1f6100b8f26f38e33ea35ee92a31d1ddcdb2662cac3a02205bc8606b025c11fa4826080ae8728ddea3b3b127ba66e94718e868be9311fe8f 3045022076e46c0336f7b24233416bef62035e3c1fe973d9e93380bfd9786ed74e5b8f74022100838edc4cd03b168da4b32f6cbaa2daf6dbece75adde79d199acfa4d322a1e50c 3045022100e856a13df9db3271fdb2d52f6ba4aa472f486de20b85f61e134108771ee9df86022017ae17c9e3b3ccdd78a0d2a421a3c978d0aa7e7d1395cb605a68ca07c742cf20 30440220204a35af8a7a90dc45446c0f35764d793575a6ba4913e6ba2864dd394d5fbcba02204a182fdc88edda7ab13fd6ff1a277cbbe0b9bd4872d905544a38a458db77eeff 30450221009407ecb08edd8d34b872b8a13eabb15f23134edd2458ff7ddc9df4c3f23e944d0220659b7b8c7b3345b02c87031ca7dae8d69f6273b32cbc16ffbf0770724257d323 30460221008a732613f05f281eeb7d95002ae48d9cf31890e041eb6bf49a34c7e8a7630a53022100ddda7e31efb9de227adf8d432cf5b62d961336f6bb608392116088adff9c55ab 304402200967b19684f8852fcda79a9d559f1fdaf64ce0e1bba1e35baf4d6c6254f611320220782240bad18064f1e67f83799dad255d59769a5a65fe47f929a52f9a6c55da00 304502210094314c8739e543026405acf97d46f4ce26894bf847370c2cf41b6188ed4e9ac402207dda32b23de297b6f9c8e6a5cde9b6df028238dec99eef0dd853e7bc8e2b20e4 30440220327e96af5cde1bb24bb256cdb3e80dbb0b979e4d8071e5e4fadd3b51c73beb70022061c3c066b12611784f7d11ed87904b227e906227cd115a7595b68c31219e7510 30460221008f6b2ab8fd6f309e6cb54fc761cde9c0e48ae1950e88f5f5d7731d33392116c0022100f707ad6285957321e537d8eb03feb0f317d9281f5e9e1ea5b5f72f195a722a12 3046022100c49ad8a7300fd1a14b889ab39d34541e62579a14f1fce0d6ea0018cbfb6a6bd8022100b14a8ed248ab11cf6cd3ffdf39286633cf68e1ddf30f89c32f605456f8047a33 3044022014686a725b51a8a5d7221ca1c5545ead6479cca267d42d5382a8513811580e0e022022d9697a6e002bcb2eb38d074014f07f4f67a4ed524da0bc5590a45b377fe48f 3046022100a580a50f207b415a5b739fd8961362f1897c51f6d224bfdf1cf03f415612e4ec022100da75d2ebb4e8cb98a90d9a7b8f4f05b2b2884827ae21b0da142c1d7682d7da0d 30450220616bedc4d6425a7620291108f9031f21629c34e62f860b4690f840fdcee793820221009b7faba38138a1543d66a68a9b424afd2948149c551bb8579f26291ea0e4bcda 304502210096ed79906884af68583389b015b8c398255c0929773d386d97616e22ed33e5200220680496946856acf779d11b223069e814be10f651f9091caa6e80ca793ec6c06f 304502210094eefaaa592e4928d3f33fa0fe3485b088bc9157fd2a59d63314cdcee834642b022072fde3a6a373bb1805344c340897de0c7f3ee0ccee4ce5be5c4e9540c4584869 30450221008c047affd31736c410e64706dd366d8756c456656f7e388d0a29ff13ff94177302205f9e0318a7bd5929aac1b20b48d7c06bede90d99e7847c97f8274799eb63dc82 30440220252304bf8aca1dbf69c1fedbd0d63428c418b4d43acef1d2e27c4ca1faf33179022008075f9d76f36b4ca5c49ebb56a60b0b12e7961a637d82ce3a772c84d75468d9 304502202b493659c5a7399cf3fb023a0a886c5d68c57caa9b1dc41da199614963e7653d022100f8f39efa6c2c7322505e8539ae7fbfd0d26e6a9583f4e0e0ffc654b74bb7d3af 30440220687b6175ba867f99fb51fb558efcd0065cc9031f1830f92a9b4b5075637d8a4d02204976f87d9fc6d885fbad48d9b1572fe00eb42a63326bd36907ffac036cef8642 3046022100e5a1c97de5c63f34fac8ca85aff7c9e813063fe9b3fe21ecffeead58c6171eba022100d82044b451e08f5edb1ed480939743d1afa25b5a9ff16fcf54683f858a8b65e6 30450220702f681ab70f9ffe3a769c3ff24d0ad96a5689764407877391b7b28c42829469022100abab44017c8a8f424726ebd1f690b9d2c62e750061b85fa06b763aa741ee7b36 3045022010536fe8d0d7575e8c798c190e191cd47425232420092ff3a4adf56914866e7a02210087de3f1ba5b49572df1735e42bd6b968107f8e676f4858afcc381af85e112662 304502200933b1f7a2b6d70c43c507ca9dd3aca7573cbfcf308500e613c295cc83fdea05022100b31a9cdcaf9b7affbce17bf3f6aad4cd6e40e971e7a35ab1f9a4093e513931a5 3044022003b0942069e74bc10d1d9525b24e31f5b5623bcb586c3c8808a7fe6f1254748c022017ab5929831ffaa0134e35f2afa2a4fa72f8b48564e695c9fca31c6264f0ef23 304402203e9d4705b28d2aa192031c43e3056d71f7bb472ab87740f9f38a13ae208907d602206aaec813ca00354abe0780281542854191529d5da1c65976787542465abae441 3046022100c4a7ff2db48690c26ff84593d7d8f7e1d91268754046bd6f1e5e9a9fbabbb115022100deacb28d260b469a5eab100de725c642d9087e60c8689b10a236dd407ab69638 3044022100ffa2f4772166f98844b8f24f51b1dc2cb88f6b36ec885a22940317c7f3095e18021f4f9ffccadeb570bee399aa33843a901ec1160c49f04f3081fe25ae50edca12 304402205e4eafa739c8d2265eb2814f2b14f9b021c377071658b75a4293e9f5f85c265c0220310a2f9ca906b834f0ae6b1ebb0d402abb4ad183bcf575b253c452f4637ecb26 3045022072798567af39e3905a776dc06ca0fcadf5c7351373ee75f18f647a22754691fd022100860874b185faea78ce9b3cfedaabe8b4380b20a4df1308e15cf7aaaa57072134 30460221008f6ee9c18e26cfd9d9e8bc6ee19a743335f51eb1d954296d916907ad67d9099a022100f51b79e34447f543c86f44a39c7421db753bed5ff123e9f0e6dd04a1da17197a 30450221008eee5152c2f3bdc6bcbafab110252ae20766507b99aa0827ed00ca340958a3e702207c84e06ecdf444420bf7ce9937ce8ade5776c346c3770ebd497d35d6f4972ce4 304402205df3939d1837978cf773383eebb1504e37a9bccea0fb597706cac9c692aa4f3b02204b3abbf3ce8e978acde3ed764a12b1dc8c051d8822095320c9ed3432712e7db3 304402200293fa3a5a84f91e046cc4139ee843df3bc2595dcf6353992d08852553b8218e02203a9b861b5dc27d04a1668a0b3abff83fa531b3a94396cb9a1ac232fee7d31c31 3045022007e4e8c1dee82c29d056e4635d2432f741d260c44a3ee55223ab38999dc10e15022100e0812e6bb1d74f36f174b841fabfb3f199c6ec03d5d3ef3c3b392426bb3a92ed 3045022100dcde253843e87f0b9744c352213b4f9d73d60c9ed497b682b21981f7d75e29d202202ae9456b9ad2104fa2c9532d2cfc591d7afba43a6e84b0173637408426db34d1 3046022100babcd8ab40e14161c0cd390895f855dd847cdee507161e03c6b43ecfdaca77be022100e07d90d8e1daed0a84b0d89f93b4e908b5f86f46d4b9c0c30f4e91eac2d8e7f1 304402206d4cf4405a5a032fc149a4d22663f4318ab708c2ccf5987e1eef6a1c37798a6002207368f9072fcd5d2c76ce4a73b76820650bd6b5f3e7cdc2eac8758383514f052c 3045022100c526c47afbf12720776952e9ebedd1c8b305f0ca01b577235c7cc4d2082defe202204308a0056a902defceea1cc1c655948dd1943c09d1bf616ca6903720010fbd0b 3045022100ae140bbb09e9ac4dbd72e8a8fc9f68b4cf4e9c44958457723fd1e3fbd031496302206a2275d1e3bd22cee6ef6f6847c4350fedf08f9584854daa969be2e7072cf41e 3046022100805e19c21ffce3538ce30d04fe734edabd589101d6fa87c98d709c78e51ba2e1022100c2398f0e5a786898c9fe3ab2d70490f6e51a36950348075803ff6eab213a001f 304602210097dd1c5f08e1480ff6f95732393b9eb5736960de1605cd57d338a002d10a52b4022100d076687f2f0e06c52ced63779c62d3391264faa23d5da74a77a70f5145bcf33f 3046022100b6ddec1e78853b3cd87a5aa93ffc3e94bccd9e59ad2afaf9365d94b6cc4c4d15022100cb5476a5c87e5b5095f453d37fd3015c1d36f63888d55f8f5ac3db12d63daba8 3045022100f5726438b64eb61f04d78f5e1730f76d3db8e0703c8c64b350157df3aab13fe1022038dc9dad348a5dc249e9dd2b94d24d548040a4054649c7451b223b2252c0ff72 3046022100c31e9e9f651a14b1282698abe945e158d06ca655198faf9ccd87a71769ad87c6022100a9fa12cb27526d4b073fc7636db5e424c0e800b068c79c889a2efe14bfe7b6b9 3045022100a3e09627377b097b57877ffe87157f6b5163fcfce56c55e588c4ee8946eac0a1022017c6ed779f6a860fa81f3312ca810444d5788c572b18e932cdcff21e551e0776 304502207f5d73c689a146582f386a913f608620efa5c611f0f94d3eb4fccb3887a7d625022100fc69430bc9d5ade9a7f76e8ce73ad2a9d60118112c922d70f31b6efe407bd35d 3045022032ab849122000f26841aad234c4beb2be79906c0057c96f56eee0c497273db300221009104c1e07925748508f18f8325c8a96dc6e70f0011f10fe534184b4b942b02b5 3045022100a96fbb118e52d5a36137880aef1901fb596096985d25dc28ecb8d4f2cec928c702205cbaed9c608184ca293f912b47fb45b979af02d56b0d61ddb124985def60f7c2 3045022100b48a1283fe39a6db3f563d2e0a1617e7cc6463ec7484dec872476b95e843f66d0220437dd066841d743846cafe2b9d0942e7a3e0eec4f66ce55770db0efbafa069d8 3044022022eb157e252af63896794577b6bf096f695e8602ebdd5419539737a265d1caf10220777c15a1090b2175b108066b4194d2940ec220bf3a7d8f79a653870602c51ef1 3044022027ff3c9948e7dbc77bebed5406d529624480c8610f72f90f366c670350ab653802206925b5feaa8f91379b288d97f2c66e874cfa23924e613df067fa22c52815ff1e 3045022076baca1952a52432181d0717b83e18db41457be38105eaaf60dcbd91235be616022100bcabcb54eb28a8aaeecc4bcc5f18cacfc70cfa41ce1fb4d0472f60bd0275be52 30460221009b11eb3d41beba18d48acfce297d8065a5809b694f4f56de62b67573d8c91185022100a1d6bcbabd21dfd90e6209c2657e8882b268a3dcffb09d140cc526858a9633e5 3046022100e7a32556b79e0112e2881e236c4f2055172995bc0e97c0bf6ca060c39a353354022100aa760a8ac93dab0e3d56c1d703a3d6862a1ed115e96d3836b3fd0a653f68539b 304502205769e6721aae3543e3d03af140143399b9ec3fe326f8d15f5897058f260a4716022100fa55c28f3043b7aaf8e7e64147faeb2046d62645c03e180a10f7793a9919501e 30440220414f4aacb5702fe72c06b10681706033820687e2ef3a0c43fedcac914b920d0202204085e8f77047b196fd1d6fa18bb9296e19ae1ee575184654c45e37fc2fa3effb 3045022100b695a2748ecb13479a0ddbed0afcf668d95135c58fc097cf6bb519cfa171b0ec02204014214ff0c8b2c2b07007314c62cd3f6f95477518a131c2a9899ed5bcf5e163 304502200b533fc5fd53e305cb89cefea4480dbb79e723deddb6f82178e54dd723771640022100a3e08be759f46147df1bfac4af19d7667e35f7ffa8273ecd82094d3ff81cc11b 3045022061957e2520ef79130fd73d8a0e38b2e0e8fb88761ac0e416acb994701324f9cf02210084ed5a224ca66c4f68464c6ffacbd5ed0e25868349f87cb72770f6d5e8571344 304502203dad4d637e9f082aa2e6182a2e213939a1980ad290ada27efdcfde89378132af022100e845814b2e312d4a72026ab9b053158ba806a6ae335ebf9733937f5eef93bde8 3045022100e08a4346877f35f342e6aace0242514835803ae55d71c076f77010c10c1948a102206c7f6884ef5c928e900d50d9305cc3050d7c6cbce0f6a3d7a815baeede228487 3045022100acdfca9eead5ea962d7fb8cdbe5f0be2b9919ce49cb9e2f8270986376d82bf8d02203e3e2f914d0df875e2e8115d6a632490ac33d1f1cbf0ce9c4bdbdc36f823d9dd 3045022100809634a71975fc8a4e87bd145a4751e896e881385dff4b9a69b379c70448a94402204933250c821122c308b8d6c3c424a93cdf616c6aa57a1f53ea55c4448cd3b651 3046022100b530c500eb8e7d3f838c7c0823a41350eb5a560aa9c49b5673a4558dce4d5cb1022100b9d928d55e797c18e3cd037e542cf0e78ccd5c88d3b44079e5255b5096f5d5f8 3045022100fa64cce539bd7d9d46021caf22a899ec9a89cbf6936ee40b79f3851fa4e5910f022019729288979f2a84b9a1215835635fdb498d03d1bc9f1a76f0b3374fe429224f 3045022048d07f797025defb7b03f175aff86e7e748f16898ef8277a879545ef14e681630221009b1a3fc045b7dc59c12a1f5b449e91a04585720ecb067bd7fcf05a7d567fa4ba 30440220168749142db5e20b118e141e97ca6ddc3c4cdd2d1f8dcb79a0730595407854eb02206f03648e47a90ff506b8393bd6797bb0596cf11db6aba95a7fdad3ed52d0814a 30460221008656ae50d23d61578b364fa51cb7e4754f4f78bb6dbe3c9234262fc1600787ae022100e8960454035e90bfe606aad8fbb27a393757adf5a302662fb54c1ec36c6bd7ad 30450220300856f32e0ec4c3976cd56c8d9db64eea72c889300f97df931613e77a30bcdc022100fb25b5b2afaeb2116b4889235ea65b9980c8a9aa72664cf518184da3ea56d185 30440220318a190a0c830f7113e7df70b32f490e7c0e1d992008b94aa34444200672ac30022011086e73758f56003b3911f0b76e494b1101b506baeb21e21c71113104f20a21 3044022001d99c72fbff644ed79e47acc1fb08bfaeedfd91c4c746e916fef865b0e3476e02206a8f9ee494ea27a24cc560ccd5b28807c3ec041bfa07fd126e19b72eb09df20d 30440220182d8c6c3e6c7ff1f3cc13325cbfb844ee55661777bd7af405751a3ef3ad919002206698b2454021f8d9c354b7f101ea741da332692b61aa5ce67d5017f8aecd210f 3044022013fa09d309f7bc9eafb74868bf35fe1b368c9dbcca767009767541fa2e778b8d022015adf4918b089e753e8e8a0f885da12a060923aca0c1d7b37ccb1650f37af64c 3046022100dcb079fe16c86bd1a588f7ddc466802d93c7bc037f3834043ad580edd45e0ad5022100fb599e0af1c96d51be882503f171946f3f26ce809ab377a5d44e0118eedf3c91 304502210091d6c61f7ce9420ad8d9d54eb1419606ada45edfe47173417d4eee6f920f61ab02200877bb8ee2df9f4575e452039852cd8d6e86a688f2086a2d05b544bf15ab06e1 304402203540169352e914f96f26f51243c21510cdb79de2e4993dc53e986305cb9265e002202c5cc533dc6fb8d7ce73d1bbbbbc31a8fed53bed0e23e08388da6db751616250 3045022100d538428e79737819d245805d3c4e11f173bd61a65b70aa3eec2209b8e56b97fd02202e2eb2144c10f19780ead1f237cd2bcf0820cd89adde315134823efdea720499 304402207e259f619479a5495c8549f4cae74df0dc9f368c256cba40a2d6ad215d1b1a90022072ce22ff9ea27219e447b7180f4280a37bb419213e001759d90a87382724d100 3046022100cdb7b349ba583b4937e07092a0964fa32136db6de82529ad49eeb0f589714229022100f0e292ff77ed214f8c86a127cdea3eb01368ffd0f1cfa5ab6e45cf2c1c4058a8 304502207870bf6e0e9a185b1ca7ebccc031dc7c14d60da65081c650b08a20267a639608022100f43d948f60b10b463570c5ff446a4707b2e4760e3e9af78b2cdd8cee0514549e 30440220769b62e5307f82b66b5c21573c1aa9a844a13ff431bc82a9e5fb00af60beacf9022029c2b712a31d2ccae572dbfdf96062c395569167b815a75f5965fcd017af7e88 3045022100d327b742052ba94d1722908a7a2f700551a2ed268ab9c4816aa765d719837b1d02201bbff34fcfc9e5b3fa744b06dc25efed53c2a71d0a9f5fffee01adf928c66ae7 30440220779388afb56c98a51c3c27c1afb5a98e50d34988e55e554394e7a45e4fdab54c022043f25f4af8bacb0b3674819f43d472819157ddaea09b6ae44f4489c533267ead 3046022100fd97a7687aa98fefb91a4690e3a5a06589a44577828c75db206cb7d560a29527022100cef804027234608b1ee39572bb41bd778b21b2597bc1ff86cc09ef86d52e4981 304502204f9660ac0cbb3850ce3ac39c391c38a8861e112dffd973d945bb9bc4a5a9a168022100840074cb3e8fcd9b0611bf01bae2cb0c9f68046a3331c63e8686dfd215e85eee 304502203917565acf824972a4634e0dd3eef4a95f4f546f7dffac9ad5f6d452b69f0f6f0221009d7dca33e2a23e6dc2f99b9b5898ef002f4b2f61332cd4cd93b183f80a2352fb 3045022100bf59c09b4b1cf751178f750f4eabf1f5c77d1dc5258d37374b245293f58b3ae3022019dab7d87a935673c4ac8d826e51bbb4b4c99a6131a2d5781f1b6f2144bb6901 30450220744d4044e1c3bef081899fa6525907a0dfcc00061bc6a3e25166365133b0b385022100fd9321dfbab9c726d9793603c6ce72b84e1c66037ce2cd996eed682314b66aac 30440220236609c0201a7ef07e60bacbac8890ff89ccbac326c99aa1e8ed62488ec02725022001a1c1e38f40fa93e43f0f9c54e2b4b01bbc9531da6d432cd5fae592cc8d3b7a 3045022072eb4aa90f6dfe9a4fa1c46e7028d777f448f2d6e19c4aed381a3844bb9da48302210091a5fef00eab97e59b7111f67566c9695706f03dde665665556e1d4ee25c5dca 3045022100c9b30fa9b8daef4c3253db2bf363b362aa79c5c9a8f7d9f202c8ebe6c820c6c802202f3703b589bb002482c5c7e477a85ef476a89d32f4d85441b54d78e02188b055 3046022100c9563561117f324ab96053e327139146fa8ea65dafd36ca44022f33d7346120e0221009ee9e29478e230d63d1ad3922426b3421546898f551f906b59d3a8f9f5108fc9 3045022057ec834771527db480948a6039326389d50644b535e405e14f7a25ea0e24d3ca022100c4871e5894ed4a34fecc74b101d609c70893d651d12a7fdf69c93c9513871839 3046022100a3f9c3f506976df0ec15b691692662af93902ecc7e8d30cb58e2c0430f24f5a30221008d7c422095adea1686c3c318dcc976543cc62ee87495d132e1104da92004c899 3045022100d38e45594d885f7c7fc1b63e0a493428d47dbb3bbc7844dbd9a862b23dbcf3f502202a4f4f7bed6501bfd3e69c51aad9b1721de0850617d02e214e26aa8c219c729e 304502205a6b91fc2e7d11097421aa59eb50491ef5a3fdd28de189a6f89a66339f85d32d022100a148dd8eb5047b2c686c8045970d268d88c64466a1185c433d7734438a66eafe 304502201ebc9074632f4cf47e996e9b48cec908ebc8fae0628bb467a1b814bd4ec6637d022100bf2682e24f51ccb3c3c446664bffee3769c6e886ebbb01695cfc088483d6ab54 3044022048390ea86a9594c31ff53f19ec974716f16faab3db21e9a9944b1e1c83ec8c97022026e35ecf590b3fb588c9fd3e6f728d9e7352f84493d745fd03c75f59fc21d0f3 3046022100a70a18091c93cc7601839950bd88d7cb792f2b9f094072dbe607fd78720f26ee022100eb734c3990b44c3c7ef71911a80b513827d8607aa6fbaec25316c73d5402a816 304502210096e02f3e06c5342b5171882f54a303a68ef74937b3f05a4aae293cd9c870515e022065f5b24d55f9ddf9871653b8256ee71309052fa30774a5938b57bf4e7b0c7e74 30440220170cdb5e3be312a404311cb6913be16fd5bfa77c08fa9d23cabbedc926622df202205aba95e97bfcbd8320d08fbc39aab4d4f184c88a63c58f894c18009a6143cf60 3046022100c030b4088ee02e5e57c8192f10ef93fda7c64876b2e93c2b809daba3980ff4b4022100cc8d375dd4bcc819385f8ec7bd6ec0e9334013ecf4433beb7b5e2b4c42276e12 3044022034a3bd66c9e0578e402a73ec66b3027ba01af52c152b4ae48eb4b34877f863ec02207819abcc629f9c3685b2ee23ea1420079b45bf4a5207ae6e3a820160d2befdb6 3046022100874094dc55b492f4192063df0acde7b25b05ca1ee179d84f51ac65506f2d5007022100bbf98f51d555a8afb83e3e6672818540740b89826e20228e583ce2cac2cc0417 304402202da08ba34e015290babe571daddb5045566b5ecb6a2d8a251d4a46d1a662b228022060a62104c8663f85b72878123979a7c0418a143c67a3a2c0d6ea71440df51507 3045022100de3f434177941c8e18c7bf082160694a09fbcf80beec008e7efc6c04cb4f0eb502205aa129ffbe8fc06beb3fd9b4d108522eaad5f2c7e5d57dd7018a9bcbaa011211 304402205a124d620d95d87390eb03b7ddffa5520dc31e1eb9cb86b11803f00a2cfbf3e40220388c9a2e00bd8ee833b7ff41c33ecd0db5766d4ba6c7bad958099670b2b48d6c 3045022072f2213e9889b4f68c59ea07cc4069623ac2b532f784518c558af67683c1baea02210099505ca5c700854a2c87623ee981f985447e1a587637c50a58785d955d33bd41 3046022100c9032084571ca89d6bf28d7a734c235f9b4b7405e0a080eb5e9d4f7393e48dfa022100ce2678f0b82b56389d42ea8c6a681dbccc9b6624b33f74f3e86d75a3e85fef7a 30440220566f17630d9201c2adeb738e4655559d466b1d2acce18d68f507fa0e50300b30022074b2fad7055d2d427fd0fde52b19a5f4f02e8ee739b6c5bce39110e8afe05a74 3046022100fa885d8c84e1b979a25048b4c4a0c39ac20fa2cd5537477b71645f4534dcb81e0221008246e2906a06c9c46098ab6764f578e98d2a00f4374564d9c390b2a988522896 3046022100a19de9eba1b2251b3001df0b0b8dd299b48ee40a6f5e5961af5f947c221accef022100ff7460c40468bc340a1231e7da25320e92e5de12b3c307a3af1f9fa94177caff 3045022100b69526161465de78e97bfb1ecc2ec4b3d43313cc3a8973c6a44b9a69768e67e602206e867d4198cbdc620037131c718b6dedd7781594650de4509e984dd99db2b413 3045022100ac22f3b3361f533d31587c736c547348f55533ec75862b37e128a8c18c817c0a02205db1d881fc7265f9baffbac492694118a37bfc4d1a18e70152cc9730e53b3c38 304602210082d483b1a7fb8a42055ff9cf2731f43d09739b970a10ac1d231b0f61aedba712022100f4cbb7001d8d0d4e9f21a85cbd083bf68db2ce5f7e796f09ea572091b23f0626 3045022100c5dc7406f5328357f41850b6d0a73193e436851aff5b773ef9f18d00ef3ff04202206bf6e23605f47f11603dc886173f615e677710e9a82651f77f121adee7bae460 3046022100d1fed6141f8e7a5354396a1303090f6de1888c5365fad2005dcdbe7ed7ccf080022100b78c81b71403f2eb1658caadd5625a7bebd0ad10a9c720e73718e51258c629c5 304402206f7349d10facc0628840e6dd3bb63933528e5749b18d8cc058075f25f0f06f3b02205806f7d3ba393f939ed51c7e6e69b9212d93ac988ef8bd5522caafda0d8d14f8 3046022100eb1b36ba5b1518f8549c3734f28417296e5a24d5d9fdf672b75d465ac002c396022100de991bdf8de93036fa38cf03b59dfbefbcb2e767b88751b075aa0b6d168ade5f 3045022018e91f23a1915ea61f77c98a7f89542b85be0bcbdce89cdfd523e91020e3e6bf022100b47d640fb019648e1f0734a5c9fcc36bc724d5d74d55e4b53a66be40e2ed5bc7 3045022056164a8ef5d1dc3be98f5447deb43dec11903dc08b9102b738f5cca6e28c1341022100a9dad27f924ca53ea4a8e64f204eca910f0f00e2f4c5bd0e69d83e604d189ead 30450220084190889607e6c80b8fb28959682d83bab60d7cb33e46aab3b548527fcea0bb022100dea96139d1aa627f30e15eadb51ee5b1433ad46b6f1f90e73c3ca9d851dffdae 304502210094e99543fc60796845ffbb042608f1747cdc8717ee51e6f384caf9eb68d6cdb502204d337c7f00b1b1a91c0f315f563d71f6488027cc5c89ccd9d6b2569f5f034732 3045022100947a3ef474ed14dc931d7df9c6717f896495c44058ae5c4a41390b623451d3230220586d9d48885979cbc32731a1f113365b4c81f459c4fbdb234f70f0f4f11fb458 3046022100867e1f818d5a2eab6b23d11cb6c0ff2e32476080397e596b9736dd1f3d7c1d57022100b8ed35673baecbfbaaf0258f7ab33cd1245b4004562402870063908f29bf13bd 3044022025ffb1571b5e6ff43df1af18f36356e3f8e0effffe5ac2214e002a7ddddd5970022041bfbd024bc8f14f611c23674cc4e095fcb5b7ad4556b9893ef395bbdf31058f 30440220625cbd188bfbd1662570e3aa14f615c9ee7d5a7215a3b39d4c7c3632ff8bc00302200a4e986610bdd774b26d7e8fed82a184fcd102619ef841661cb7f439ac070a99 3045022100c1a2062c87b3d2b6ae9e5dc57e97c02d943cc74d834ec8c78f6a7f94ff3dc6730220269b607f6e9195de787c59d49914405907a7b68d814fcec7245d09922d05324b 30450221008cda4ae103d734bd219171958ff430c9591a47db269cb55a45e2fe66d61e544802205b65645c3230ea96a93dbe0f814e4cd3362ff77ff5b7ccdc80cded38c62fec1a 3046022100cafe1a1329d9ee07bbd0beca3983ed8618ab357763ea5905034362049704185d022100b0450f696d18b1584d4894bdff8ab40e78e935b72d6cae55bca9e98cda2b4a48 3046022100ee1bd5c057da41066909905fb470eeba6766d6b372cbffd1d2154f51ca994ed1022100d10f487a9df07034a49bc5f0839cb3017b2eea913d5e4115ff200ddc275d8080 304502207dda9380b48a02142d2cfff093c14152913f54d05eba1dfcef5f20ab41bd5a7402210085b7990e19f58a042abc41e3ba020570a33dd9950a52887ca03fd39f9f51f965 3046022100e69b2ea48fae70b5053860f31aa8f5c24fb2ad03f2ba71f7b2dd597eecd5c05f022100b59ad8c4f4a79aa17739f3388c08056e96126e6148049006247b3833c937fc9c 304502206ddc262a27cdc70432f1fe33aaa71974350166395136d13f34139de4ad8d9240022100a196f3f43b01f78aa4fdb108ce83734173cfc4b2400248ce58256c5473fa843d 30450220668d1951398af3c8bc9e3b7ec435b23a31e158ea82a1370449669936356196c0022100a25b2113a676cd5efb0c077ee38361a4f900134da3a761920998ad692992bac4 304502201646b746123d00b50e5e5d9a33829253868c25f1cca08d828deb8f675b491247022100fd6ba6c4f635f32e89d0096ee407ea86f75c7607c39131bc5ec0170aaa62945c 304402206d1bb119129865246f770f1e5699f017ca92ad967c3234573c436bc99ac549d4022058e66ad0345b64f4c4431294b26307eaac9a29af8a7cb6e8a1e7dc6826e5f1f6 3046022100c42a5071420207289a8b6201e819836f86ba3280dc3250cc1e059dcd491e69e7022100ddcae4bc3111f787362c38734bd632bcf6a99acd72b788f956aca0dc4c5d5c58 3044022071bafdf16d0bbd0e0169f01bff75dc4d5c9cf5214a8aaedd5e872e38db1df212022014bd6957b2e38083694e77882f4a98e9ad5b978ea5fb063bfd3de4674a89af6d 304502202ee9a8f5b08d3e028368acda86a6c14ca2d12de73c5b3240b343a99a063928e5022100f31b042db444c2fd14e6ab2d5aa76af85f7e9656f407d94e06516317bfdaa358 304402200affdfbc480314eebfdff210ac2e64ed4d15fd1523ab5d7ecfedefce46f4c2cf022030a2e0275341a510fa113dd79509a072a0943db3e964c3b1c2dbe6367182bdec 3045022100eeacd38e4b7aa23df55d442bee5802f7bce424eeed39c0fd8ad4c04a4598a41a02207dcba2a9f26eb8dec3c44f8e69a9ff91a2386d490a47e3aa1db04957c52af3c6 304502210083e57e0260ecaace81038fc1c4f1fcea5b0cbf9a6f234a552b2ac99f53ea6b84022056d7b6c52cda256a1150cb49085b9155de7a919a186f80933b4261f6de50cd85 30450220498f1a8a16d9ddaccb27ab024dc423811855368febb6f8d61149b131a8dfcacf022100c8b3c2a760c8ff2c8804f76b349186cf82b6a2d863e3e81898f118785fa7467d 304402201cd23dc08ca2aea9930eff96c77c81c9acb31ed73c3d1cec36fc5042c7292a7302206b750eef5e72436133f68a9e826282284729c8ae911ac7dce143d22e6d65d1ab 30460221009caafd38a0072632882d51f13d445ff575a21a6a64e309850f6f80202d915ed3022100978fe70c19ed62f126f6f0665ed28c1986857bb200e91df749cd0276ae1b488f 304402205f3b59a7ba0b755e9ee38e53cfc64fe4dc8a99e288e19e1bad166d58974f0ed1022032a6b3ce0921c1782f2e446c2eba878288ebf6afeb20ac36193ee934fd3db35d 30440220096a3d0e620b03c4308ce2b2c8eb5514e78e9f2eee7668fb2f516e83c9c35f6b0220731e7111819bfc558ce7fdf857b9c6cb6e887b8c6b57aa0d6195d37df657f8a1 304402204b4fb0580e87c957ea9c319ca5ff533664e4cd56905c549365bf58628860b9d702204a0c8872c5e3a19064570a31383fa00b0f4a6d2ec344e53b257300a68255e9e4 304502210098bab634f9f5f1b66ad8051414cfb2cf4c5932036e9ddd0c5ae786d28ac38f1902200d694b3193aad06cfa4648e339306335472449b4c73e6959bf5b5d1a67c13fc2 30450221009311a8c3ef69c3baf6127dd11ae5892e491022b6855e461bfc38cc934e072f64022077a945a88aee485bba40a9600c9b28d93255b83499f7c42bcd63921b94af8879 3045022100f02b01859204203edbd7acb4a1e4c96954dd03386972fa217611b2994f9a8e73022044022f4cd72496c7446c208c72cb5bee58b9c1c26ce7cfd451d475c5201f92f0 3045022100a014e5f099ce267e67da68fc611088bc207d8a31459f414c2c2c60d0a9b11efb02207aad9cbc2b28b08de1e2935166469b15725baf1a1558c4f0a96fc62b4691325a 304502202959337af3599ee5612c01d8db482aa29a459c0e06625a357b59049c1be6f31c0221009ccf6efc82d15311f3680e4a2972a0ebd0e183c6d91d4d867b0098f69538e48d 3044022077377a141c75244b1feee4559e8b5dc7d834e474355dc7ad0b17c019a19cfe5102201996a53caff898da3ec74b303b7990a2304216cda5adafed96ebcb5a11cca56a 30440220591dd9a2b4d173ba1aba06bbe75cc0891ca1739b31a721b6c96c3357a0990b1902202e4f568fbed7f23f729b795b06545f663d574c5bd3ba97baa7b81d25f17b17da 3045022100e34beb410d8e0d6da49ca112d80a0db3862c0b79f33e0170b97baa2e3f16afe50220559c939692a1a7c102376d0a4df29a052104c0426979f5486f7cf6bf06ac069b 30440220204fca4682b3c53f25a251594b1e3523dcc39d6221e80693b1484162eefb840a0220721e8fdd93ebdbccf6c583e874db03bff86d1dca6dbc00c4b701eca62f11c341 3045022100b1fb4763cd97f8cfad2474b6865b2de301d5d05f4632bceab7f8b6ff1ee98df602207a9a8641c9d052c6e56ae6a4f2934f557a779a8e53b190433b0225069b67e8bc 304402202048155e29281eb2c62eb1d9980826ee24ca6c25580a6207e30af9814365486502203662e26c06f6442c3f3a3c59dfeb78ccc3ab067993486da88107b66933802098 30460221008331b2ad3f80079739e89d42fdf10b11d3f6a50eb5b3b50f7154a20d5ad08a54022100b2ced4c2aa3a1b06938cda102ea6c0fec71a569d8b9139ee8e31403f4aa55fa2 304502210095e0d72d4a9dfba6119a49b189d6e8d54515635aeac0a217b2cdf66e44c917970220383ec69def22acb296bca3055beb47a24c12ffefe62ffe152619bc5e8cc51927 304602210093351aa38639c5b40f0e4b39bf5bf0b312482d72902d8d1d594acc2b0ee3188e022100808012278c78fbaa39ac7a94e034a1925b954f6d17eb0ef80064c8cb1d8043ab 304502210086aab9fd955faf0369eb07f2e9f688bc2151baf23138da98ab0e7d675e5c9af40220162f3364386eba94e3e0762b1f812a2214edab33cdeb5cd132f4d63ba92db8cc 3045022100baebeaee92250876dd764aae40b3398912934da4a7a7e7cf94a88999c232fe1a0220081a3ecacc30f548df1195834fcc31610653899234a148c4ac45a79589d44ec1 3046022100926723efc34b64effaf67e20f5e669c0e741b6937137e8cee46fc3ea4c2826d2022100df84540bb2a870491dfb2516b466345eaa19dc85afa2eadefe87a016e0c10a8b 3045022100fb4cafb1baefd1425825584d0c7a2d17108c516990aaa2a4d506700671949af9022017b0347ea386d3e2b2f51ccfb0bdfdebfbd04c62009f7a294edfccf8ae8c590d 304502205bdccfb3120c363c6fc1c3186b65cb4f49c5c7602dd2f57e932d326dcbe02afa022100df86e5be188f261cc5f4113a9d2089c30638cd03547bc26d189da0c1ad49f413 3044022068b99d3dc3bcb44cac5861223c93c674c95576125a83f91dcc56abce81eb5f4a02200820cf568b0d66aa522fb21c7a7ac51657184efaea1c8693b8338b6e9d2ac639 3046022100b4f1efef792f03a2f70273fd50b80d442de2d1eb62acf6c3c77d8d21a321fda3022100922eccdf2e0d47e774b4c71fe256cb7b3aa0ebe5737983cdeb4303e8c2ddd4b5 3046022100fcb2e62eadf6bce79baeb72cc0b55305e0f10f04a56597653188e05848bcb41f022100e31d8204d28b0ad09f13f3410751a20bc25702f4d60812d31f7ecd524403a057 3046022100b436b75e6ad210a1462eaaf73dabe2fd0f40cbd59b22d5abf174c6d10cfaf7d902210099da466c4614352d72a23f8c71458a693bbb1f27a3084318db792063452dc7df 304402201688b66739691599b9add30de1712ae5caf947ad658030f71a2250b02d27f9a702202e7fab5987cbc18139f49a7b239db1817d03f233468d2b7d832f688b062d948c 3046022100c30c55b70e75c7b0090b1f30713c7d6f70da10eeeb046d6214378d00d1bdf2e1022100edcc9da170164fd66c3a2fb8d8eae4dcad54c1b0d707b633aa0dbc09700d0d69 30450221008bab83ea27deed9ef7b1b74dd5be9c71d447eb7db542522022c513b289dd9fa9022007beb2f09f00e331049e1aa97dd7dd243ed2d5d8ec6a97a83df5d44de950c18e 304402204880aaafd6022c6e68fcc92d5adbed476b29201ff148ef66c15dc863828398ce02205aa5e2a257ae6af1400a478114e5d04a17ebbb8fcc115d8ec3c0e1cb265a7092 3045022007ba6c4ace859253f1b17717d5b3df14603be5b9dc3edad2a77a5b951e722053022100bd358abe3821530289ed1b170a201a0997847e79d0ca235af9134176927da4a7 3045022100ec4b318c112bc04023738ddad369f99ed03f1f0af982c847d763233701071a30022009c2bdeb13e2dea6c408598024df9817085f03c2165e53c64dbf650565d89ac2 3045022100bdc4d44af677975d68ddd9bf9ee1767b6bb84c9acb978136da14b38ebfeebefc02205d3f5df4305c810f8f8fee3f232396b0c3bb44a899700d781fa00b99dc907dc1 3045022100b2076e2041e306d20c671fc2e6996c2bb0b4a3e1394cdd8e026eb82c9ff12a9b02207c2b3d3796a193295d907e1e55b7f33fa158d0a0944f1aed5a0bf4d7d6eba2b6 3044022033b715d10530c9fbd019df1a0761742eff65b16963cc80a7085728c7af39131302201c8e1bdfdbe6fda5cddeba46ea012a11ff7bc84b6904d0f3c495962f5e8aa2ed 3045022040d868467906ec026b8edf70f6464dcfd5f04771bb1bd072c29826bfb48a5e30022100ef37af87a06b6c1d28e2c8cf198c5ca7493a769b61859293633dc58a3b24a796 304502203eb9bab3ef2a2d9db8f71478697b8aa4c9fb0e14c360ac2c1c4b76e826d3ab3d0221009500f29c5e5830f2b6a60981dc880171d828c0d774b4aa1a2537c661dc7f85f9 304502206626a03f20f8e84c4f5ca614c1abcb82ee1a233b3742e67467d9051c98e042c8022100c5b769a54137e1219a731b628a8220a22f549ae160bbd9e5f38d0a37b08aa825 3046022100b58bfa3b4bc8cb9b947e0dbd94d5631448a828c9ae8310bd2f4d74e073c58e5f0221009e2f45d66ea78aa9acf1ff92d551a1b449140cf26847e7c3f94d7393a9322558 3045022100e2d768cf7e42ec7edea433977b1dee4138d2f6653ec61f6a4b75d531040168b4022015d72a5c11fa5de0a8684ab36adfe502ace797e7da10795f48f618b002165aff 3045022035548d089e24f9bb2c7a517f9acb9f76ee182bd218ad17a17fbbee5b2465fd690221008acc0aab042d2e68f5fe22064cda7df985c252371f94d85b469eec08f2689577 3046022100bd7c241bbcb44b86aa5967f04a0d4af403c1f7cbb6c4f9af0bd4f47ddc90de6e022100fcbd3460da963b7baf42a8c0fd7c7c3665c791f5ccca0a7ff59fe888decf9d93 3045022100e15bab2f74e6b08e148e8d012a29ee13691a1e4d314459c9b4599fab79ef8aaf022053615fc5023b91ee63bcf551e54f98e43c562f8a15c92517400420e61439cbda 304402206109e0f5732706ea224364af8ba9a95fdfb05e2da199d934d07b954450e224f6022023719b1d24b9951d41a7a09c4eea8c3a52caa7faac9f759526ec3c6120e59590 3045022100f11767b9418978ce2a9a2eb4ed3e6fd74a480e5f435c0a19efa1103ffe9e666502203380d749a1e555298d3fef0496b483631ab149f4062c2b3140875512762fd3a7 3045022024beff16e42f53d58b8ba02c9aa40b4e45ad1e0edd9c8be97e2cfb614eeeaba20221008de3a22ba526c9a562e070869c2ceceec04444e83bc7eebd9e039aeacce3cdb7 304402206d0793a4efb136e4ab0e5e564883fad0e51c683a900dab65108cf2c9d7e8a28b02206b3266ec7bc314b5cbf4d55cb399cf37025fb0e847ed3d0350983b525b12cf47 3046022100978b84259901e25dcbc392d696926c3e398154d1582d537d9cb2c0f945e442c3022100a0b584e939d0275e15609dd968b1b324293f12205d14dc985c293e83df0c9f03 3046022100db8aaa3239a16d0232234c3979d7cd0581a3468e81f5b2718cf3c661542f551d02210087be4a88c1f11a4540850f4ee438d1385d4d31f62e4d34561996720dd0173def 3046022100cd83ed518e345cc00965387e544b8b7c1ac4b29970ac1fec964b9f1f7b8ea44c022100a3c711582dcf9fd6b80a9bcbda5f94820c2945227ab5fa698ad418e87d86486f 304402206edded00f4eaa7b5aba61e11be996513b09dc5dd62bbba580a435f20309c0d6e0220474fccf1534a47a95ebc00b9b9c39de54c2234c3a9f994e8b72a839f073ca5a4 30450221008718d7f4aa6822762f4b20ce2782506368019d530f15ae823485828d45f5fa6302207cfaceed3fd7657850cf52017f2122e705bc03128f6adec4cb0ebb2b876a7b29 304402204a378ba787ddbe18e2703244b58f3f6c9b2e9a4d7d36c7a9ce3c8db32ac2320002202fb409d814ab86c24f6595776bebc15161d235ddea9b4fad41aded8898e9c512 3046022100dbb642f5899fca16c7771ffdf7666a48cd2a3d3aaf596aeb8182462bdab61c8c0221008c2434e48d4d5a31997f2a72ea5a86a12f6583e68f94544de2c0ed3dec5bdd9b 30460221009dc7af40e33c798474676a28cb04c8842b6b0b611da9b4cf1a0422e699fec325022100b66a42a93cf2bf914ee85887f901fdf98acac83919be39d9534cb52c2f963cab 3045022100e6deaf2f62beabeec88857f5182bb0c497b50a235c7d3c08001d73eee133c7dd02202531e799843de1d473c462ad6427d1892f6f1e4a9fca74b8dbeeed597e7f3d42 3045022100ec7d00dc02b2ea7600b44403bbcc878cb292d8c90ee865e1f93bcb1fe53b34a80220496548b201ad3c9d62d61a15c5050f0b4fd3f534c04d2c5f24e767d0379d7276 304502210093c6e8aa5ad787847de1c1d6144d49b3711a1d253cc8459e75232d449746a2da02205520a28f3e9085bfe515a108ca016d6e17069dcad8649b9f9754343f5bb1d6ab 30460221009f1265004f43cace753f49bc84ef74de8edd4e8192d7ae93d1eeb6439c58f039022100a884a5ee25541f6b8e0dae28742613f05b3624c1641a19083e5c3184e0c55f3f 304402206b73da323047137fd57d531ba89d960d7fba040615d2f2ca744856c9f21fc4b802203e3fd63e226693112a0856511201462b201a098001922934c1379dc870d34b37 304402202082cd064f3931ffcb7a48a553fa6b9ad9e7d22d0c963e3a2eb11384db7f653d02200c538416ef2cc5cfb7d3976b2898e146a9460a888c902323ba4052b9b54159f7 304502200ba1dc02a9aa23bfce6eb583a774d57d4639ce042f127168d9110f2fc09bea56022100b97bb569ed1955e063abb338ada63dd58573c8f24a4d3dcf7f54c66ec08afe81 3044022068c1fdec9b0f6468af39041af525999aeec3078d9bda419fc4f464764fc596fd02201ec0a1ec4855851fe51b565ef125352970de0891d7e2899cf63fe239dbb4f66b 3045022100a52c3478fc1ac5669b0e6cf70e281d3278133ae83224388e6d2a3e48e492044b0220309084ecf579f0a6d7ba9f1d9c49a20f9ce58b7911f51b4d25c745cd42cd17d8 3045022032f8c9ef77f2df758e870080ceb2eb58c4c1766f5b1c1bc1e6962f25d4523f89022100bf7b6756aadadabb7ff09e7512621eaa912bfadd27acd09563490e6602f028a8 3045022015b84cda4f020c6016a159fba113190fb91143f31a72b51b4bc113a963650128022100dfbe3cfa08c7bc7fd82b005bcdf809dfff1ef3d365166e4200cf9305b1158f05 3045022100ba0b459d17e5fe481d49e21c405859a52dadab763ababc5459786d687f7d43d202201789384561983774ab6ba1f2184077f4502a334e89948a720b60cd49be2821cb 30450221009b228d01909a78102b41b61f360f3457af06f3484589aed9bd4fe72b7ae81163022001e8ece6d0cd307a419448c53b148254a317739aa7f4da48d36d7a3de7f63a45 3046022100fc2d70ff4bcf6230a97ccc50b4de919619e6289c2434122e1165528f098b660b022100f40ff35d6ab2d73e71efe663a67ca52f2cb010dee40342089525ec545342887b 304602210091b2c8d125512e7688e1c04f1e158fd6ae15c7f309b21f4bccb6726f0a9a4750022100951d54eba98c7f6850238dbe9b04e789364293dbdd6b749201c7261d25d273d9 3044022072c9b8609dccadb9ea702db2e8f5907213ba21331e61020be38d5e64be4436170220197e0424950c0cb692e9266e43ceac2e3390a3d8266faeb303a68426489cecd9 3045022100d03fb3604cadb53389abe2b7757c662a2c57bbb2f90cd1d85d155627305eef4202207dc16a9243f46f4f2f9521b540cd81c989f0f90fa433534b9419e5ab4c80873b 3045022100c3ebca07814a6b60e8db481fff45f57e6a69348d72dc227cb03f34ce9a6a9845022060aebd5f5f8c9d75f24a37d8b4146401feb8d41e7efe953ae9d43bbf71d54140 3046022100e75e281936da0dcfa270e8c794dfaf75537302fbaab7912783752ea4a87d42c1022100aff3726840501ef4cd6cc38d9780daba6e25ed048057cd637267c6e55db3bbe7 304402206bbaa271714ad768071fdf8d9066bdc63dfd78be3ed4f6838e598216e40f71d902206e96c099f719f7de24df7914344119d40535ece06b7dab8d6df4a5f7efa58349 3045022100edc113cc5d6094840061f011313073d7df3c3bd001a2a7eeef2a7b5be2c3b7c60220739b3571e4a4898dc6dfdfece8dabf7bf79f1d0ea5e261131f7edd76425c5f10 304402205111765fccca55d4a813b43ae1e2b8fbb2a9605ea6e65a8543d311332a0b1812022038b8b33ee6badc4c810356b96c5d269b0bc8378c8f45b0683cae289289766afb 30440220360fc691ab3b4582cc6e70df2356b3fe701a68ea51ab94472f555a503d82fcc502204aa5ba1f65348b1336af3fae3bd4ad2c441545f267a4665aa7d1972f40f6cd5c 304402203847f1e8529c18a081264bc5718e59cf0cdf3b0128e4049feb0ba1199e0049a102203b460421e850f5a6439e9090ea0f805b8598ffa903650606bd6a6a86841d1aee 3046022100bb208bb3063df214c0aeae2b80321faee79d8b8c70fec92bd30a59e97e9a7ec8022100acba02e0dcf49dd08d3754cdcafc860673d59abf23dbd61b090c99ec03eb03fe 3046022100ac8647e8d4615b95ad7a44efc09d3ebc9bc1bfe6bc8eab73cdddba86790e31d6022100e8572b012597c00643eb43f8980db5a0727916ca52075975a63c1f2e66d54145 30450220709efb5300e2c8c56e0d0c98cdc3408657f0053ed3dea762b0c72cc3dba25839022100dfe789d096248217ef9a34bde994f432f3ce908b8b12015b97627ebfaf6592c3 3046022100ea31e7f6448ef7701d2f7871f5e61845069c9229981f7dea054aa092e2976717022100b15a83a766d4522dcf2804f894276065bbdc54e033311d04abcf4b781c1b9283 30450221009c36a65be7534397d195a1d6d346ace6c2998028223d7f75f55d01e037871ef2022007e8d30743e60d4eb4cfcc8c51b14b638bd6f577a535a66b3a28cdb67ec435a3 3044022023a7dc461013d4f7f2f1598259272f68da171bcde6ee6f1fbe804a0686bec48e02206f109ca0bd120c73a3b5c336c4ed5b1410ac35c0d3fff5275e8cc538a801aade 304502202529ec4f0cd9a77ba443c410e8138ecaa1ff88c799f9b2f59c2b73e7f6561090022100bba5705ca29d235a83b8899c41724499c953d150a3a96f548dec4643c8a914ed 304502210093924bfdb1a7bc1c381df73f902deb6802767f802e274a0a47d719b5670a4d75022021eef4ad52b0660a1ef364d505cc0166a16e3755b6364b9ff603da825cb70b55 3046022100c6faca9b209d81263c7f2840339453970bff066d8b93fc5b1aa7e057b251c0a6022100bd8d7e3a5bd1110a294f9d7bfb226bd4b132beda8bcc791b9da95939fd7635c1 3046022100ad7ddb7f57143f33302f0d9d352894514dc2c04cf932faa2ae5e6ccd83009a1e022100b7ba62836669fa97ab124d447294981d2cc52042783cbabebf7a8fff928c06c6 304502202a447d07f8f2cc06adeb21ecde56904e916c49a7d95e50d48335bc87b63dcf15022100a6f120662fe2c5a7fd1d291ab768e54c525f50eebd36644a6d54da8a5570863e 30450221008229a1ce618f1e75ce5d363bb90aa139020266d4da48ef00b057bb9b0361bbae0220713b96caee275bd08efe053ee4ad5f601aa9d0d221d35fca8c7892a0d7d11a32 3045022100c83efbd08773230048f26b75864f77377a4ed1b191a528dc922652a8b72b23360220119f53d57a5e6d86acbd9d8d40ae91d16be1793c71d0cd0eac388c5b4d94f4ee 304402203b8e0013bbcf36a7f9d88d6e590811be2bcf44cefe02a9b57023c51455b72c5a02200234904c98618bc08d822a14516d1243772a4385560f6b47c633202938e0aa8e 3044022013e8e9817606788f26f317e657f45410655075238a54c37d13396832465c0e10022025537d60123649ec7fee7b850b61460eed792104fe295ab7ed9066d0afb4c73d 3045022046d3272d007ad4905b523eb9aa28841135a0351b4b238dd65f0677c2c86a6077022100d13a50be5a21038ff10c05e2fc1a558f19a42dc9bf50945632006ecc9007f164 304402204eb103bfdc0cba6a9b5e67f69e508d01163bb0615ab4fc41e71ff4feda84305e02206c716058a914650c33944cf8e32c1dc75e3f533edc0806c9566bf7f7d0c37453 304502204554d86897d7bd7beb1bebdc1a5efb6521ebe2b8201ed0f25d605a0425ff21fe02210081c30f770781c6df2cedb8a09af95196b7cfa0fbbb40a5d6484aae74d685ac93 304502202c2f0894893b766e99af8820a90f0c17489b16134164694d6d12ee98910490e9022100fd081406567f60184daf1bf50028ddebf0539e7c946048af0619333aa3921cb1 3044022069cbfe86a64a3da59f3ba534a66fa16fdd5df1394714a0a661094f3b5b78814a02207bd7efceb5cb0cd212749f420ffedf6b529cca201ed916f43c322dbe2de522e6 30450221009f8ddb0fc6ae8d9a3a233a61f1d2f6c4943883fd664f1b7183f8fc74b5e5b46f022037ae0953873fec0c14a47971a4d975b9f27b514b753cbf47fb87e20584b7936a 3045022100d7b023f91719f1142827d943fda2ec336d166d3b82fa0a462b166ef8faf5afb702200a16f51eafffebca8072e1c6626f3fa21abb71c786773e1f33d48874d8813014 3046022100fd1514d1c9364b7ad79e4c5e8b7f7b0313c5a7605517501f32839355d3ec1f5202210096bc6fb544f27bed937922f676d4d627d93f108029165ab2c497b2a91c4b558d 3045022100c9ea4e581518dc1f66a804715c85eb5b9bb253e16867334f2fbb52bc9939f2de0220632c684bb1f0d0b3039ce8f4a70964833103d17d266d752d0265e5ee2d227007 30450221008b874762de74cf33ed2a3e7a9bd4b8a09151e901c244dd9f963115dd0cd489cd02202ed8854247469c96d521cf59592f797c540669194a76fa02c37a023f4a8ffacb 304402200bbac1a7ca8079927949e662925fa49e620a1eeda4f392560f004da5c348593002204b362f40381516138d4244f42cd5f90d25a1bfceebcd5a6bd790aef98cfae823 3045022100ae8e78ae2f7200601b764ec4ef12b3d68d435efd3bb5d66b7b2c8eee646d79b002200fd5b1e61bc80d007c13f587d32465a50e2b5a1ed988e2844c861aaa6d0d3d71 30450221009d3dd548abc6f4bc37f3852a26dec356b3cdfdda9695311464e0feb9ce4a33c602202ccd80461e3ce51c2fc931e00beed65582ed191050b71be397a6550fe3722c33 3045022100edcab02e5329cee27bb18f2dc7169b55e607170ebb6b168d727a058c49e1b5960220388bc461a0ade153af3620366115f205c3b2f5e64742deac9dc861f016c2af8f 3045022100c01f96401bcda3dc5ee07bb1bc4dbb3bb44f2edfef682b858a7f0d52ee620dd402207c4d4a27e56f2a192e0e43b43fd215a79c833f2f20cbeca32bb0565b1f73dedc 3046022100fe8761309bc9fe8eca1f78e7f2477bb6a14cfaacd62480cbc16a23199516c4b5022100f4aa320989663965f65ef5d3670af07291c1e405852063325821d67aa7a0c2a0 304402204f53ac9f1284a32940004db995e236b550d49c2c0407c7dfd30d4ea45595c92702201308fe6dad6f9109d7c34332e32ae837a22fbae0a843f136da2101fce6636d8a 3045022100c33f95c5b6a81517a586cb5650712143802c6c08f02b02c7da6a50cb63bfbf870220019ddf654af667f4aaf14892590b6cfee831cf120d5ed602684fd0415e535f7f 304402203732546bd2bf13dfbbd5ac1882b304c95f467bad8bd1f92c2b7c20cbc19bf573022035854be9f19a6450d8af7e1e036bf0d8ae8f12db4be1f7f3859cfb52a0885705 30460221009b40c271ba02dd4d7c2b042218bde11a85be37fff5e53c7a61ea6d68ebbf018e022100a1a97fe25073c12b49cbdaae40fcfc0d2c02abef52e782f49a80f0c29c850173 304502210094b517dcebdd7ef371852d5ef890dc5c760e8ede3bc1e742eb147c26ca0c1bfb02202b7982d3e7ffe83dab57974689d10f5ad4908bab5d01d80af017974f401723da 304402202445d2825f1de27fe344439fd3c62ac58d57f5fca0ca4809df4509b01fc5fb36022056a905baf19bbc656867de94e38f9d327044df89869c467f93b9c0136b550fb4 3045022100d58143cefff3ca229a6b41a20be8b66bd19048fb8da2ce6389d273599583907c0220734cd203ce938d6a96ac1344563afa5a0cf15670855b6f452edcbd803a588c79 304402200bbc88356c714ee21de167c7d9fa91652cf4dcbe6f267c3ac804c6e43f31760402202e7bdf9f196f568abfefcd9f31316e32316883ec8841d08f890a150604af6e77 304402201573697d55721242278ce4ecaa04285af7ce03a35a59f352ef505732b865a449022032fb9bf61f4cf51796df6c31c48658bd0e20b16d31951d9db7d9671535071c04 3046022100abc8448655b2a71901d64e2e857d7cf924852d17b3b58e287ad3fa6070a55f020221009d56eece85ed8cc6fedc87168ebf4207a7c3b5eac899437a0c5adc52569d3ef4 304502206b074590cc69251172f2a23ea87f0d2b96f06131716ab792a735b38be25fe0f6022100e432663ae4f3bdf0fd4d2a3544467c19fb39ed41c84879c5dbbca76a10a6aaa1 30440220709559b392829cfeb16b98a8fdecd2abe97671a9b26b8197283e66630d18342e022027bdb5999ad86f17f3e68d92833749cdbf84f14becc34787cb01168cfc6a8ad0 3045022100e654ba2ee6efd643c781d7d85582fd157b71205e3eec5defec307b3d5778bdb902205ac268f4794e2e1dada757a32ef02cc689af3eaad0808a250e8ec26b15047d8c 3045022100a0f23e289fff525f4f5b5d7ced3ee0b5fc209d783492e8ad776ae4778d0e1417022018ccc44444753a6dd5f959361c061dacaea3afde55ee65c21958fa0c1f450048 3046022100da9ddaaad086287888509c44d8674316d784fa45f31b1fe0b5876a0d38e75c5f0221009302f4ba649905b34693073ea3656fbd068fbec1ffa889cac779f0f707e88ade 304402201d0051d4ddcea4bd2877e53466fd1a0fafb4602b7d7490e4b7e821fbe9e11a220220156276b87270cf6c2430fab1609e722343dbd609927504c30deae6113b55186d 30440220278ec881aac523fde958f1fb580b6fcaef4cc037f00482344773ea66db52142d0220393e1dfc60e179bbd6509e82b56a224e4b88f198b5074b24a3a2f67957aac072 304502204428e299a7fca925f2701b17157632658ad6efca4a3ad5e2fa90ce09c59841b50221009737b79b0ec995a98ae914a32730b6af7efa5553563b0d8409c873ed2295974b 3045022070f467b244272638bd2d4d3ef4b91fccd698631b2c0fde99e8cc34e7a4a3ead6022100d7263d56aab27a20b7fcf3ea6a0747723568d26eeda163a173419c2e0f3e35bb 3046022100acc94e9882d9aba74e7a774ae098b7af9b0b342a975f47fb6072c1f422bf9987022100bf89e766102d6684b4402eb5d15b747d0ec025658a212d1d05b4e04eaa609b87 3044022018b5ea08598178115cf61337a5d5afd27b3b056bc45a53af2268e348f07059ec02207f7816794d303054e86ad157462905ebb8692b847e9a524b8f9f5dc5fbe8333b 304502201f57013835107f3da1e8449393f3b8e26efd6a13e4fabea4362b0cff0d697e4e022100e46178b11e6223a6eb9afd09866cc5afb6c41dfd11098dcc4eeb97046835da20 304502204e9e9e16d9f580dbdc88ebd717e2ef9cd032ea04d60fdbfa6751d1ef32c263d9022100dd095c9c20c705b78acf565c5d337ec55683956d6b07ed9a8886399cf538ef66 304502200a35e813f6357cc03ca76dc8f5ceb74760f5e9cc0a0ff180984ede29fea0fc240221009905bd33dbae75bdbbcd3589853a5529f8dd9995f6cc7ad959d890d56d7ceaf9 3045022100a29f63d6d1ce1d8fa470fde8bc3cdcc497240bbe1c901f01db760ddbb6c2fa160220092f5071db062c99ea1521416c5232e9b2bb5bcf44aa5e298b2af326b9fc3e96 30460221008bf6d266d5b2201a06c93d2f7c9dc7581b97b359a265621370a8cc2de82ffa77022100c7e2a4e3149faf54fe94ea3c81efb95ac01fe5c135c825458587dfee26a01b2e 304502210099eefaa33e95f9734dac5fb4baf47b18f353dbde36922e16ea2336c1cde9b02b022037bd8f2957e30dff2df555093e109cef5217a6a4ad8c08b413b8166c8e6f04b2 3045022100a97573f392ab748892191ff0dbc7df25f438db24f4a4edc663f928a238fef4c702201bee019e3ff83353dd190e469fbeedce4dc18cd04347846fc957e66679f4e260 304402200bb9fa4d82060a426629ba7d01f47218ec8d4371179e82d8eaa33a5f7331a96e0220360aa6788664e8cbdf6aac2a2a4f4f7aedfd8aaa3d31d0bab397e5cf51a1d016 304502206128270b932cb4247bbc64c20a81f9ec247dae465e960bb67385cc0e4f357788022100b21894e733d924f2822f7584c0ab9a8eaa094a90c530b38127f68d720d4aad3d 3046022100ebb85bd8a4570cb97a07ce4c98c6d82e754a1b0dc84a52e421135a61681f22f9022100d3040e6161356dbdb5e21a4acce2865cda17822abac0c752e2cfb2747bd12065 304402201eee53c579bacec6eba68af6f6da511a3aa37748440927fe240389b7f06f552d02206683f492835ce1b3d69d2c35de003862f63b85a448af81cc464d4c61921ab300 304502201bbaba3e239f0afc43e8dcb4717702c3366ff74fd81d3d5cdb08f9ad7788eeb9022100c44bdb4ccf71b272d1b008a3996cdd123dfb91ec2c7f00b292ecfbca934c81be 304502205ae04d5da3fc33e7dc3e2fd0b03680965b597f7b4cd436ba3e99652f053da337022100c9987ded4e4bd218ce320109259677b5ae3d6df0c1ea3a3e3608f4d81cd98267 304402205d5dcc9125fe7d8b74b3d238aaff407b1d384caf3c8b8076353a078a883177b9022003a32020b2ee5906560a06ab285c7331a09d6e859fe47a242cae16379ff3d8c4 304502206b15027153ad6edf487e232e3e3d22c59ca6395a5cd7473855e87f8f4981f5020221009557afcfe274ce2bb939ff1b9d6f36bc553fe93e043214aa215d73e8290698d3 3046022100ea2072a356b8f7ba47565eda85987e2a0b2716e3e607879378c674e9b9a27b7b02210090b3e8dd7ddfb9556b86d80e5c1084efdbc00a3f271115a0d2ca3b7a209b86b3 304402207cd7dee77076373137833bf8bb3b3283404065d520f9b5a8885dc0e4e99aec360220510df5def659994f77b45d51f4bad34a250fd43f44de9b8480365a72a16b6bb4 304602210089f45d07d0b6405b2de540bcc8aaf53c01608410f16ac1dfeceaea43846b76c9022100a24e7f4500b737598eef9a3387d0b17b268bf6d0ba86a2c63098ccae7c47739d 3046022100935204636cfb6a9fe89fe7204cfdaaa3f7709c94da3538af40df5835a11dd201022100ca5c5d97ce036c66d6a6854835475660c5477deda104d7e06347d177215736ed 3045022100d8c8cbc2a505e9afc474e1c8d8735936ac4dea7d99d3e2ee41e47ba3b5822198022028e7568f766a04064e7029ffa42f5dd41f30937122250f623556c5ffa2ba12e0 3045022100d6120a2b8a2445dc88eb0d574330b3c1b7f2712027adf8b37417b4411761d0f80220187041c7cd7c417da82127110feb307292222e706a743bf1054f1f428b4a80b6 3044022067aeaf07a5cdbc70657c985a86aaa5356da0e5b89e8b1a972b84f5689f09896802203d2f1059bef35211995f5e4fbaf5cb295a9001c723216d2c3dca816308f497d3 3045022100e4b586505219fb2ae05ac50c9286e8560cfb15d481606d18a35b80bfc79198490220247c2272c6a8a4b4f74a9a5f5c501f636fb1d51ce2d62a0c7754a72c29d707d5 3046022100b53499a8ed51bd3b4f4bbce19621bb08963363418454af7685c0725cab5bb90c022100e2775b7bb41d7248a732e909169e3c0e1ef0108b9007c19e2b7de5a8504dc87b 3045022100acff8765292541b49deefef6c398c3455152f61c43dd6e6f4da01cf775237fb002204144104c622cb4ae8db323fa4b1414c8e3f0b7d72de3d0472f06460981088371 3046022100d2084165690cfd90b661005d396b41ada56898f3f837aaac65378b42be8253ac0221008a3a535582902e5c4264d8d1619fa2839ec6cb1e4cc97dd584890756006aedc5 30450221009c13f06c5f9dd4f420a867afe575247159ed82939f629a585f99afc72efe99c002204bc399736ae058bb99aba14ffa75f71849aeda73182aebe6f0019f0fbad8496f 3044022018737d8b976f7c1af13e16f9b4a76c95515f594925c0312856d483826674d077022060dbc42df6a19d48edbe4a19f2735b0b3e90bcb3e5a9a6bef0198506b4eccd87 3046022100bbf6512630e86cdc3d1c1043e94a3cdec5b7767c1193000971d46359d84660c0022100d0f6654e7b61d9afd15fb5d2956bc05085552208a8327f29627ca412def6dca2 3045022100c018d414dc17091f4d94be1153dbe4287f5e86320c123e3c7efee97a681f642c0220348dfec71ec51dc5d5c0e869861292eccb35dc9abb1c479340af89fe66e9f5c0 3045022071b770ab8c3e471952046516831575e3d39ba014e1c0f3cbae595bc1f640560e022100c9591c46ab97102feb6022c3ab4611ae72c15e5cf493bb53ef1aae5e598552b6 3046022100dbc899028e02eb628c4ae9902655fc2076e077b8bae124018acbef8417651f64022100ea3ec24b21c64b3f52701bf094d85049ae78d0a5e72fc90d29b9ee1f5618cf31 30460221008b953fca17fbb4e2f842d2976775bd8aac53e26e4e586d96a317174e0b726ff8022100aa8436bb2588a9fefd04fcda3591577d6cf67e34e87f4e17e1c839faec28c4a0 3045022100d95a9d7bd3f7878cdc294f9e0445fd7a173926cf79ed37938115d3976465074e02205168ac3e5e1d386cc00c3e8db58b30bf56bc65319a598b29010b7cbb58716b65 3046022100bdd0ebbb4b4e1470651da727a7429bac91accef09bee563d4148c643853e178e022100f62a7cacaec3fd6df28569692e33c6515dc0d563844bb3a1b82834bff5b373f0 30450220410daf51be0a2871e8f40adeb13b5f0b364012f0c912089333c4821401599bb1022100ae2446646a61762b94f59ff1f271a5d5954638df8bbb31f00df082f3f18b3f22 3046022100e2f8f19783e52f0bcdfd5f098442016596d7a0a483e0e9d358c9a3e7c5ecc9be022100a9a12f027f378268e1a9c0daaa16c46f974955fd42fade261d2ea4555dafed12 30440220233bee7402ebeaaf555217a3ca09f008a045d427efe8d88ef3453b6684271af002203b3ca470ab758f9aa9aec3fecda77906c6ab558dc29e7c993f110082e7c185ab 3045022100d4e4fca516af159f5482d21715506d2d097e3f7e021eb5e78b0ec76a955c4d7502206e5b817afd2af30832bab53cf0c49fc0ee1df68aa2809da3facaf3b2a28e562e 304502204572f5b78c7fcdd4aff1a68935b9106b853ed8532499de487816323348777b80022100e8e8555ef86d6429065d372395de11e66fc01a5da60dab48f881d5919584e882 3044022058d84b619cd7acdc6bebf361373cb575bbc11892bde97d46141430ea29768fc202204d3c64d5d9cbca9a72315f9bda99b7361b712eb7aacdbc1484139962e402a316 304502202725ad15b303557d7117ccba867edd7ee1e74d90d64abc206f6753f172daca67022100f7870ef4acc3f162e2cab8f0667ce24f295cdcc9a47c042a3ace0626cc232625 3046022100cd53693dde3fa814b60db6885167791c146843ab4db5e3f2ba759857e8272687022100c88e64f79daecb6b9e3bc9ebac08fe2939b689de70ca1ed7bf363bc0d4383bb9 3045022100a0480d19c128c91096c45362d22df4937e34ba25d0fffe6015d48ed7b7af35dc02206d11f9b2299e0a67ed7052488a5daff5430143a3238c06c25fff9bf1eb84209f 3045022100e453d1f347b0be6e71413996d6661f0c3859fb4d8990e3dfda0b155e2ed73c33022050b41add3ce19eff95bd10bc0ea28818ca34046c027e2897fde10379525c1a44 3045022017f0b39bece2c4d0b596a36c6e5ec4c19764653c0a84574e9e5f6f895e30ae0b022100d84eaa0c83e33d97cc603b9208d4760a394055e84d8a1e966c020a1bd1f52fb4 3045022100ca6dcf86c336f4e5eb261800afa989502c46555a240dc17d3a379464133517960220271706de46b5ee596f7b225292f025ef3286702d8107dbc3cfae2a8bcb8087fc 3046022100fafe4d4903ebf25d4654591d01a31e052462397d3a58e792ed7a0e46c706e0bd022100b51b99b8db5407a563a5ec3edc4232064619cb09c013813d78499188875f134e 304402203279139439b257a0421e3ba1822c0b38bf65de6028a33e2a487b8b09b1fd23ec022009c4184f642bec8b6fe6aad5ecd41c0af8bc0b624d90f9bf106dcfe23b1b1e60 3046022100fbaeb1b0abe0dfeabdb19093ae760cbb4eedf2fb9681b5958668d36cc7a05762022100b53600443eb938c6feadfdee6d8081f851d18898478fb5caa842e1898fbd4d4d 3045022001fa672555e28c1e3ca094395529103c122c802511b927033d2ddf5e10835863022100d9ff65b85578f33887389ae0abac31ed7aac59c49f2dbb4f6877cc7655b06e24 3045022100d7675f64f2754cbc400e11d03fac8549871b02df7e0609f22c53ebf8e065f75d02205118440dfaf31fe6a607f90b688c3763e00c7aafe14ccc091110743d1706410c 3046022100ac09abe64dbd6cde6e35cf153b0d99132b40bdd59d9124354d78f47a3abc74c7022100a17acc6fec220ff5bfdb0de8e98e93929e89f1626ff12c51734dfd78c3bc4345 304602210097ab60576d74768621679539e5267f221307e532b066ba622075c060925f603a02210096221be6ae01f50252e9ce22173f2d2a6c1c805be3eb4a8717b23cc4404abd9e 304402205c9b282cb182c6a17691c489050ce53292b461715c5494fe5d1c094e765aba5202205a95c75a624a0f7d8bc80714deede93a5443198a700cf93b284fac25b27cdc10 304402204d59ee7d5870b3aff44a628ff79e5439a8677e5c2ff1a48fced274f8114f9d5f02206c314a9144cdcaf26d10b86217ebd2a3bcd1ed68fa198a6727a11f4e1a8380ac 3046022100d827c4a977c6f8103c8494760f6eb3bce2e45604bf57d6ba7ec739a30ba49d8c022100b48817a4900828c6a53016f6a891273e8e90c23dae4dcde0fd47471dffa22d66 3045022100ac41c9e0e2f504e9cde0aff089c66be6be131f7da40d470a0c8b4fc00a5050f502202cbad4927be901451aa200b1ab09c6df8329678c92d307250dc3d109dd205fca 304502205fe7f236b534369d2ab8b0df9ad4dce5f4d82424f231a64aebefa51f077f350b022100caa5a448366ad1b05e28b02471e7aaa07b76fec0a0e308c857e1587417ed3fa4 3045022100b809c16fcf4af7aba9d25c9e3cd4237c0c3bf1c1c61a92449afcbd446f47334102201d8033c7eb71364b48b122a0d72521af0578a007ee3d1f3ebd9298e34544a261 30450220673457c14c55d93cf7809c147d5b99451a5ef585106e897be43041c133504e28022100f3142a133668903921e3b6d76cb05e74b862cf8300a185919a295495298ed31d 304402204c7d4b5808f49f7ea06b61cb9178bcb16065cb57ea382843b7cc2f469fc7573802202023f58718fc94968065b934433c62c0b5ac0426cd9ba209662f4d11c3f16ef9 304502202452db793e58e017cedf394a9c0698492f22804734541312adeb25cabfe62309022100d2e336bf1e56bdd2e650e483fd0b9992b39a5c9346a346c676b5142eb8d41294 3045022100bd85b6440e86a838299aa055f8d5ba776cfa6b25ed64a53ed494329a9353c16d02207a14e4340a326f843ff3451942da0b377c0ed35c9a081ff1b639d0546c9b474f 304502203bea7601e13a03e6f6ea29cb32b697bc6dc2b2107a8953e8494875b637197746022100841ad941046e3bca38683c7773039041a8649eaf1346797ea52eec31d3ef17dd 3045022100c58408b1d9a2e170a8084970320926a647d5a148f31308c824ecad96496404a3022019ce21376704435a3d25462e3f6c311c76d2a78ec44f3d8cba2bdf8622473242 3046022100c7933f823be577fac1ae6cc9e4f2896a9983155676983efbefb4234be45c6de40221008103336b9b4a0a53b3c2236e210afcbd902bbba9e1f9148704c47ccad8625de7 304402203f8db80cedff9808f53a35985f14fabd9918e82f3f3538771412f499831cda7902203bf6749fb3b77a50e9529b837c2b4d7ecb2aa45be4599ac1bb71e8ae5b90bec9 304602210097192722ed1e075701cceae0c1842050fc056590b574336940afb173c3dcc435022100d294ef15a38d54dd12e9508086a909772d3b60767f1ce928c27ab8726ac4a04d 3045022059701f366fdc4e1b1496798c054e050d8d97de9de11c85552fd0bbc67a3eaa2e0221008fcd1462d356ff0c4a87480628decdf1a83d1994d1c4234c4b95fb752cb93b05 3045022100c486135e2528d1fcd4db87f8c7533426c039e854b240daf2701dbad2cffe0ed602201ae8c64e6b1f413145d7d46578c782409bba60eb6d957beec977f91a9fa60d58 304502210096125f6f287238c9cca62bea3e940af8005a44c069a3fd1ebaee0b6e53f5baf0022071f8f9406a93a8f36377edba884bb5e9cb318a6e36eb52c2b9543d978710bae4 30460221009f5a5b767b389df54d76ad6ed53ea98b43aed377f871e50ace82f092b55bf7bd022100cdf418e03e6e9ef44e739892af6d53c8ec403c3b41054bd8f9a99a7919a0109f 304502203c1bddc4c590f0299982730756c08164228b9473876709c81e49134848e06ae0022100d63e30aa4ef434e5c96099aa0c47f5be336602b8e5a9c54e9762e6512a0f0e41 304402205cdfae8c3a38ffb19d9d7d6797b20504afa49caccbbfbc60f94cec3e0e7fcee6022027834cf58356c981fdf33ad37340c817165413df011b11ee34bf8ced7d67918e 304402201d61b1010596176afcb4741e747511ea7269ba31197a8a64124b9d583e47d3e102200a4691e2d72227c7a0717fc14fbd49c2c38f9de412525b97578fa2a6a6076798 3045022027b8c6acd09a267a4b04991eeb803db15a2a7d4618a0584a0cfc27ddd306d414022100964b9c808272cb75ebf148b6459247a7dad184d5409bf837d2db37f26803aa88 3045022032e18ae144b9a8c45804aa18d647c43f9d96303dda92f50e782a849c659347ca022100a376af2f859a220d724aecfb32b1cba04925d4c2a6adbc2754ffd530bec3d9b1 3045022100d5418f81ee44588dc4760eae4758e1ffaae81559abc49bc3cdd2bff46aa371a70220707f955cd976bd862b1e73c8bc8f67b30c2ac6d73dfa5b68b31d983b7bbc0cb6 3045022100a18a085bb3acd8efca14d2a72e9498ee1f4b7d55242cc3964454a36fd94283e1022023fbac3398ca0c33ad2b97e24a303b595e262d3608bdf6ec4b17cce71369e983 3046022100e1d0678c7f318eed442d54a3ab119bc1e091dfabafcf164a5e8a0e4c4e57abb50221009320a30c4a5ca38f76aca67c88a477770366baec29f6d2582ed48f2197045592 3046022100ddfce8a1f3e23dcda2a356f1203cb3ebc8a35f1d2ef891f5d644795f0633a66602210089b33e97110ff47500fe78f6ac9bd3e328501c11f6bd26e337d46095c3f398bd 3045022012044805c6f3a90fff4e22588203584c0a6d2d0c4257af1d103f7115ccc20236022100a3158bdcc7bf52fe7f0c1f577db2f20187261380b47597b40117be7e7b211e2c 3046022100de9d105f81b2733fd6b8f9032cda42625cef857ae99eaf1ea2ec95e094c44a50022100cba7561106e552eb1cfe89d1d4b3c6f401a30ed0da7938c69266b82dc8ff3bfa 30440220237654d2183aac423e39c8b65c434d1bb7560e3f86e7bbbd12f52b0e28bfe907022041435e877f09bfdc52a955a04895b8ab4726884107f3571638192a81dee08393 3044022008e5617ef8b511ef88c6d305ecfc95747142411c765f700aa0251a5cf4ff7fc3022050fae83f16c38955e40eafd1d81b84f10ddec5bda39314eaa825b0a14b9e26ae 3045022038cb748338336256dede984df568950da6e3f58b77f034faf646ef0041982953022100a6566a0e43157bbe3e6bfe06f457c417d9eadcc6aae17cd30ffca0ebd0e4e193 3046022100a4f33478611149db8541c50af5ec5493b6d0dfe84fa01be5708321f9b6ff82b2022100fa3435b4330ca79b6eb5d9caf49d00a0b8c392eba0be534c7a2e96b687b10f36 3045022077f4971e31dda799df7b8bb8fc3c7e910a6f78ce04fd393554a70cf55d06a3fe022100907e62b96a947b61ba5d5ed0d4c6e6f8f2a46ebfa1ec9ceb2d8e80e229552986 3044022072930e760134db22a9ebea6104b7786c7225854e36d021a3e301931cb2dc201b02206a56a443d5b77a5ea544af9630ccbe2d37438af855e62ee77cc82656ffd917aa 3046022100de63c170856c4f1cd7654c64a2d9fd28e4af404bb31f08565b7fc2779457ef6a0221009eb8c91ca28f3b762fc030bd2715b6acd371a7fa0b1e22d63479a9158855196c 3046022100a6a1eb1282f1bfa77fd55dc0bea0a9c4bc7cfe800377c7afc09a25a0a7f0ca6f022100a79ea4fcb5b082caea7e96726aaed7390b7fff06664e2b36f53b1cb03d4f9f41 3046022100fe440d550e6f0be9d4d0915e9cf8ad321730b55a82665cac1f126e2d1cd3678a022100db810316b9f71461e07503b730658e51e03daa641e5b0cabf42a3e0a7bf344dd 3045022049ecc4262f3f96caa4afe9ba6354bc84cdf2558ec4d9ec4823c7e2d006acb82c022100a895d762f151df778a51e93ffbf8a5b19b8e6bcf90336fbb85575ec9fd42408c 3046022100d09bf2339dc9d6eef339d9cebb712b3b19998f4309dde7ebcaee89241edf36bb022100f026e3d6ee934960b04f613e44e6aecf289a5bbdb2a67f2f427eb90fe5aac5f4 3045022003887f97df2d6c464fa3c95aa62f6b8dcd1b96c5992cc4df1f8bfda6a83c8598022100e710cb319619c9541fd61f0f3693b678dda6b1d1b7733ef243ed68a2de1331f0 3045022075d916dfc14c8798981510bc338602fba5130455b3c06a9a488f4859c2c3987102210086797ca33572abcd62e9a7b7256f908504f0bb1bace197002ceee83302c1139d 3046022100e0fb752939455477f49168c38c7fad4f20d0fe0fb49726199a0a0b15ebafdbe3022100b2c7eb3227e343b09d870213a38624c1680c64b3894749e61c9aaaf1b1fb5290 304502200f2b484a8e5f3c216a2dcfa126b30b25200ccf6c8a3232f838222b6ace6c99a90221008485ae07f5acd9954c4cc47e0ce6b0876027317c34b33c3b9942eda00a873b40 3045022100bf7230a51f6498ebcd3597d77e7305f789a7da40a1f98ce08717331fce18a3ed0220480bf04668b54e69e11eb05834cf26493844aeaf2059f3ba4b104f65bab9ce36 304402201622978ee29ebc39a6966e5e4acf20fac8ab07880b155a2cfe7ab70fb062233b02203c0ac1e290cd817bda819ef5b77fb321499fbd39ffcda6e25f39f11ffc9f6036 304402200222ae600608321406ac90586ff2aa58db1199b60f9eeab4b4f2e3f917c89917022073129ec2facc17e4dc92097065307df4c7cd476c4952a7a963b121a799be2ab8 3044022051c32da0adc784edda1d686fff4fa99eb1c3bebd4f288d4d78b6894f0eacebdd0220325bd8f344ef255d228493fafe81f648235f29d9800f462a5586337a0da7432a 3045022051a1a17c16e2e9ca77a20ed47fbdf7002d38b1657806c7bb6a8124344dc6313202210082cd77e365e58cb6916749bee63b1e662e3bf697d4bfd8e8c9bcf055d5069827 30450220045c22c0d775f36b497548292f351b87ad9d0a5f161133728591785b5cb96fd50221008067045b3b8c7b75541e8cf28502cb73001caa39c2f53a541e633dc48ae57a5f 3045022100bccbf412350628afab3533ea90fc54ba404da4c524f3547ff73a505f2f227472022014e28bdd5d1ee62687f3af7e4aa57f7a72b64f73c2ab29134b2d25cd6225b151 304402202641b89e3d930831bf668cc36d135125964bf0fb9ea27b69c304b69993c655e602204aa8c5dd88af79cecce4e18d093e7adfb6cc335d10a405a764ebfa85f1dd2a2f 304502203af244f0e34384dac88d88b6bdfd229595cd1e9eb58d5e9c2a04034e16979ecd022100da8ce0d848408039a8ffddce24cd7df37c003b5c7f369aa8a2272796836a6cc3 3046022100baa54226e5e6050db071f18d50de141da1a4c882c79b2aa6269a73b3156e3cb7022100960e17dc95549776c8c8ec600f3bc353e1a5718c847c60348da1589ba9f074e6 304502201bcb5bf30838bd7c457aab03003ce6879750c1363ab583de181b060e955401610221009c6de844d5488ab8af856f1bda6452f38778c3a5233979f56054c8cad28b6a05 30460221008479de8dacb1d6abb91f37ded54e9b174f5d79a1f3a2ef1a163295d046e1d881022100c87d170619444f4bdee2599511687e261754c86aa670e795705c71016c168169 3046022100e3745b1498a0d2b2ee08df3fcf7d7d121e33180fcc7805a906806bf4e569042a022100e51c824acca27c9ecc361d4728da8aab6036d6a3718b8d9d1a1f1f450389f23e 3045022100c0a2ff8a29eabab674a672c2c34c512366542f3ba8ea7b7a180cfaa58b905130022075d74487d43c1a2310d0892b87726a879b1cde75f92393a5c192685e3bbcaad2 30440220054f624e1810b8c9c64293b316a48400c6a6670e69c553892b06b95131cd8799022023a895b52f404dbb977243597033acc2ad1c36a4c51936f7773e25a00ff648c2 304502201f26cf03f1b1393719a4aa3def137d6333ce2ea4a77a221f715c9768fe942cd7022100d53b03b0d55bebe602b272f14fcde2f4987a8cab4089f1fa77b136495d1535c8 304502201478341b5341f308a62d0c76e923ee27ba419998fce383e9b767dab2f753074d022100daefb93fc3551464ba1d6861491ee12a5036b3aedf049971c5a52b158527934d 304402207024b713e7d12cdc1b3f0f0ae75da39ce0420bb6fc4274263c5229bc2ae60606022063f454bb653594b9abb5fdd3859d775e0b5af609d65c311d0c2e903b2e300c0e 3045022100cee0cee883f7864cbf5f674d6f1cb6090845799a0f0190d156e488c9d175e15c0220241fa4edd1fe6165b27d4ecfb5ee248f78f7c30db77b2bd6e6af295c81efa744 3044022076244d8716faecda85e12dd23397e8a163935e61eccb6fa2fb284d75317e712d022066382c422b3c50019372af317503fe35c133d2e6a30550d31a1a1e47f5f66f35 304502204b0708fc4298bc2737a886b88c3029ca5ba3c546fde603c61279ab3acf133eb6022100f2d94bf670d244425eddf66102b5fbf7f454e60f0f9db70542f30294b1125475 304502202d59c33e81813642e113a8e08e66fc1e8bc028ea79097e22945e7aa9b97916c9022100b1b72c0ca2e4380a67b0dd4b88d39a91e78ec89c2c7774df2cbd2b7b3c449bb0 3045022100ca1afb14a73a7399a701dd8ca31711552cb118d78c60734a7e0e1904d3da363202202355b052c1549c5685134c483614cf2e510d98f0faba547416c236e8494a42e3 304502210086cbe8d1abfdf2649859c808775ad5be653b25d1e539ba857077dbae5a741d4f02201a97dadeb907f0b9db8dff98ad186b8f62c99741f233710e5c40a4590aa9149b 30450220683207fe10c05e92066320cfb5e48202cf89d8191a34ef7556d45ad64c7961f60221009aa5ede40b7422f180ed1814565af3efc08e83474c267e3afc7351acf11d3e3f 304602210099e5d17df160ea5160dd95b00b954f9a39b3263214527ef22306e78137a0c941022100bedd8ab561d55824becc14d81525833bfb42acb5ffa7909d7fce8e8b5bdf45f3 3045022100fa62c631719c67250bedf43eb7985081a96ed50f1abcf4e8639272a7347dfdea02201bf485a1410f0cccb4d25b6d8255250c3cd33a865ebb067369d2436b5e65c374 3046022100a5afe09fb0a809d79e5f3015a393c6958b1240c8e1e1ab8179accab3721a38df022100e1c51c8dd7a4d66af2b81383314c5fcd4576921ac4b467c166bd3b5ea3f163b7 3045022072e9e49f56f94f612350fdf54c978e59bfa80e6cfff04993e1b3a375d0499571022100f3bb924c13778b10234f8b88393a128765f981e7af3f5246ae0e238d522f43d6 3044022062e6fcd40d695c8954f4e012580633bf8e0b5d19fd8c49e6e7cb83486e74514e022003e0aadb56e52f3042f57874a745c9b248cc7b8a31f2f63b120680b2e5a497bc 304402206591aefd689ecfd5d57312f723f35805b884f66f954d9acf1d2b89ef93c0c50d022041b4a3a1c00ba349719f423331598aa7ed6327341dd9ed58741d0d4c701cbeca 3045022100edc640e1a1d5a91774e4318f73e5cebbabf9f94415a54b2addc5ed1f16b46aba02204cb4086930d2fea963c5f17137234b983419e763089ab48113d70b1f0e4fd1de 304602210099bdea4a4d45901c9298da73256fa1409522936ac943e72a947f9c3056ea98cd022100f3df2f5e6768e31c392b4a2b158c9a2f804df4fedb095a980aa26fe146d56dc9 30460221009d41b7face1c62345d69c311103d2958582f687eb280f99af6408a9daecf2eb4022100e1fd55fc5c7a183b354a98c28ef8ec5d1655570f11512cc859250691508434e9 304502207828abd8de3cc6ad410d01d0e5e88b4b5e45db232a95a0c4e8b4a2e4d4cf2d0b022100d10c73ccadace6eafc0a5f20a9186184faedcd64871fefcbbe7ad55677d2c6df 30440220161f7689be922c01de0a74eb63bb08bbcbb1049c1a0377ac7dbdaf62aff0338b022033e1807e454145d26133c3339828aee40b099f5a6b75ddeb627855d3452d4500 304402203b7e363441ab9c31a7337afe098d549b58d222c60fab98a2c04130aad95924bc0220357effe7f193d3f0aa70bfb80bf388022f1b3bb2f6137d5f7a7639efde4c64e7 304502202b9c30deb4f5c577706696d80b81d2574d5d06f08f325c2b80767ad402684ebb022100d723d872cc3d87aabe785758beaf43cf44bd39242c9a433e3536bb3d6496cd88 3044022003bfca5af89fdce98f44fd1eb17b4c945a3af3465d2f2ef3bd95369af6b39270022051b8e96d4bab6508cdb0ab0173eeac5a9e5295e27c684340dceca49d3ab2bde8 304502202ad30eac1929b1680dc8d389ad781fb790fa7e3115b985e5a264fb2553d72d3b02210081b63c636bc522ce0f6add4a5a2035b84fd79fe98305ede9cc10b571917135ed 3046022100d495b732a6d762ac694f672f5a42a445caebbc8450d04340dea307bf7fa5ab4c02210083d936cd274a5d0476ef39e35e66a664051867f386d1e776b333093a8c99a962 3046022100eb94792f9a4003c577a1aacfcda38dd3949d17165b08c1b0738f19517ef6a052022100ba011a84b9a12707e9c14779998bf96db09ea1bc6c14b6dac90869ccb0d97e0d 3045022100e5bcb95daf16ad3f72aca342293264b3279727be7cbd707c10fca29cf97207550220147489dd0e3390529c665a2299aecdb6a151ea5112d434a0c94073acacdc4f0d 3045022100f3f4fd576ebaf565d00b36c564070275251ecbce6ddeee62d431f7f3070b625f022042cc16a220d7b254bf4b8e61dd4d17edb92b6ae72639de7b57a1e3090fa60aa1 3046022100c7ae6b338a5a0c24b37c5f641052a370718745353829a3aae3c23ae76686592d022100f5a741e37788b59c6a13e521f498cbe55ae08cd11724bc3a666c9e777d119c8d 304502210088568f57bdca17dd1807444666bfeceefc4404996c3ba549d849590b685f97310220438fa0deb6cc38aa0dd4722ec03089382067cc31327955df0f789053a7ba18b4 3044022025913079d5fac687f5f781d4dc0ebefafa5f24b06741efb23a42ebed90cbce7b022031954129210c202f4bd8d8e4a01958d908b98e6117388dbbae513efbd7287703 304502201ce4404aa4d27d702dc864c94d7a2d29023ddd48c3a5492f6241cb5df5a72bd60221009fec4bc84e55fbf4396126529f6b1dc57577d5c270df817240abb32f516e204f 3045022029a6603979a042e403234bb87c57122f73ad050ba2f9e5a662fe138995f3f30802210083566403f477dc5b82d04e1a950323e3ab4c3b46ae602720952e96a0ee924879 3045022100f0503f824ba2b34f1c0d72af05fae074e543bc59753f3dd8605fdfc18e10e2f7022022db29a898a96eca1383690a88d3f2ab96cd05dc324971593fcccddc6d4eaf90 304502205236a1594e935a258386822e43249ca170fb032558953946bd7b1b9c84f76c8e022100f8c469f078c5cf0c5350cd5e90209840bdb045dd9c9d99c943c33bbb9733e09e 304502205c2a2e54ecbaa7b43fb5a043b3f0ed7be258b26276f66d49a1ec69f74ec16bfd022100f6ac4705d6451398be7740f14f08586f8d4a43be1357e995fc08590041c9e593 3045022100cafb1d25632a9aec33d5567ec3c41421174c0c0f67335ecf4a4ae797e6e79d6702205b0a1a07661866793b776d376e7b47eca2c2a30596276331b94783f711aeaa44 3045022100f3c40b71e7a40f9c3bcfd22e1d17a40c5509dc0211d1b9a4c77a3580c4b60c9f022013771e6de98b58960f8e69c32ed567801d78355e6cdbefbc7783fa02ffce7eb0 3045022060b8156fce6e5a0199ce3e6a4598340aaa57da35421e5e4df217201e885050c3022100da25855f5ec5bdece3d0f03e6c2e3c2b6bb322533e24a95ec17c7bc49253d44a 3045022100baa84b3bcecccf37f8c8afecaecc77f133559b1e9cff577d941d981c7035daa202205f738bdbbfad0b66e6edcd1845609ba499afb468596ac2baa560ab0752212ab3 304502206136eb0a5353111d53f31a52c83899fb87003f3b7688f3d0661474656997a7ee0221009c22f9234de32d8e82d5f4ece02c1a8fde998fbbe697f48ec07b6ea37436698d 304402205ba7f7f745f0d574383629857f34455c3296b6c3920f3d6dee4333840187bb140220507544c281041957d7a7fdcb90571b193bce8130296274af416d98e21b7ba1a1 3044022026c65a0a997630fbf230003477f7115a08e131798510cde44db748f435d8c09d02201458857e957030081ffe54bb78198eee7dc53341fcee617329776b543ba5e4ad 3044022002b21cce50d530bab5a228df4818c4cbad38403a4f8df52347f922956cef74b90220623c7cb9d987e66d6e37766e48014159142b112259b6d46d9ea4551fac9b24d2 304502203210e6eb370c10bda347af3e735a48ea9b0a9ead74a0b83a14175a0008bc19af022100fd5ca106cc5a6cd0e22b5c2095a58a8dc5f8037dad3488c0ef27d35d3dd2f994 3046022100b8cf2a5b876dba9a64b899aa003badbb5fc184d4a2223114dbcc5df4a9a716ce022100edd83d67e14cfd6efd7a35aaca7891220e698184206f447ff470b5f1087d5009 3046022100ad7eef8484285636db096195bcf007a780a7b50a24a3669fd816da978a791ef4022100ba4f92c81fe795a6916262041d908f80c144df0614429e087bc8f39638c612f9 304402200c91bee5ebc484df97dbeb210c4660897fe9b2b4ecd8112709be4539bea0a1270220766935eef6020b466c18ce6db21b12c6dacf6c86a6722e6fb7583099265f895f 3046022100e8a5cfbd2767d23bea186e471f8eaa818add268d590422fecced924378dac442022100eccd41b86521cb9b3208290f3e983f87b0f258672c07a8aac25b95e9ffd3c573 30440220318df858c98a726bcf273bd495b5adc0f6d7ad1193c0b1797e291c2d93784fbc02207ed2e9861985b7c64bcce5db8b3e066f28311f99fdf5c22f5a247235884acd79 3046022100c7b27232e61d129cc9b41173d0c104560f47c91b0ce5c90da9e33d098b3076ab0221008f42db9846b381bb3b0fc0f21f313f60c723792e2545f015cea9b5744b11faed 3046022100b29119b9447406a1cdd4306cad838b94d2d8fe32e5c7969375c0ed9b64cf7652022100bd0e5375d4a21e371b56f1b2a6f4de98e1a6f8c0dde9adab9050f0b13a9959bc 304402202b10945f091b1ebfcace58e641e028b3400e702077f6920befe9191a771af80b02204ecfe998b1757dcc673b0573d3889ef5a2cd5d128feae99edd2d14c545671319 3045022100b7b97ac9a914fba70710ef55e05506183449e3c3788aea520fefe3f06e2c07810220285e8310fa5eb33f01f781d894e0114c944068e5a465edb545832fd2ce5e23e5 304402200379e14de31cb7934d47d280dcd660249fb85a20e4a632fdb5b498aedfa6170602200ac127aaf9a27e614a547422fbfd33195fba2c84584f9e4e8ab11dc63dcf455d 3046022100df1c91f3b727d8c0671ec3e790a24f1342eba5cbf893bb9681de125a590227a2022100b12ba85050d67d2ae2ed1dfbb640251af90be7c0a7bbcd9509c88cc9611eb917 3046022100e59f07c63383992eed31e2dcec1d6e03fc2276ed10e12b63f3ac1b8de64f6d34022100ae3b4d54fc68c3315e1717f412e26acbfee3c1a6e76ddbfd7f716c385e292645 3045022065944c62945930ef648406c906f3dceae626f22760e889b576ef67ebd77dc4d5022100e65f137484a8f3d934d952720711fc1d85b0715684c733877333a21fa8d81a32 30440220782640dd3ff41f6fa9cd2a0e6e49bf549b863d3d7ef5a72513ed49abe9eba8d1022037b8f03f90030fa2dd1aaf6f397ba0d3431407e7fe75e6bcd091bc6ce330713f 3045022100be1ae43166285b15fe05bd90864382aebd217d07ac679a8c91835aac3a9bafbc022017b784f19a5c3f70d9620b7aaa5ebf7339f4b2b60e67fd0cfdb8bc5a86510154 304502203ab3fad035d2e6507b44aa17d058723f1f32f95144ca396f42644c3906076152022100994abd33e322083076d4b20ad7d5ef667091a2e53f8cc3cfbf06c0a6cf3f3e27 30450221008ec96993d7aa42fbd61e106d1edfbd5e03940cc7cc600e445ae54c4d418340d6022005963f0aa5a00a1e6f2ff6133b342280ff0f498ccb5fe9bf82eeb34e1bd74f86 304502203ab8ee64e2bd25039ab944916787b66de73613457c26845a9935493025a3d5c4022100886210c8f613a396f7b3960a20a590b991842649f2eed01dde4ce1879a37ab00 304502205c086e8816a5f852ecc457dd641cb7a47500560304a2f9393e0757f6ae52a0f3022100ffc1356f2abdd27c43239be0f96ddebccc3c554fb222f72853aee13938441aa9 3046022100b63396205a7937cdb5f233f4dc5aacf9439f67172103c548668adba41a8fddb4022100879cf0bc6c67374de803e7bd73b5060668f09d2ddeebbb387b8fd514a5bb8dd2 304602210088df0c52052460326fa915b7a6e51b615197d2deb5d9d973b99ec3c76d86a9df022100f46bdecca7e57bfbe7b3be479103499a3f1de74c64adbe338803db43635c0bac 304402204563b94d2ad45b31177cf3e9dbed3bfb854d5cf02865856561b44ed12671615c02207ceeeb5cc98b5737cb8f9c54b1d8e7f4cb6b466b61139a0ff720df4ae8636602 3044022039610888d2007c3f132eb63da26060f1ff8e3b2c89a3ed2bb15a965e22cfbcfa02205fb1d742f69f39c9b90690a72a85bc5cc5858e2a2f1c64cfdbe69221553d6746 304502201c95dc709ac22d6b1f648bead0db7303f8f0a0f2e3132aabcdb982fbe05e620b022100e92c870b42aaf6bb24b9940b92ab4b4ffeb71371058fe68c93d88e4e94e43855 30440220219a5e9cfa57827b8e4ac41cf0edf0742252d5e186b268e436aa6fdeaf6264a70220578e6f9f9ede3e1a250952793eb962274616ee0a3985fc2e14a93c5f92280e5a 3045022100a5347c3f33c43264774a0a424da12d1a044bb3f1d949d518f98cb47221edeeed0220796db0298647ec3acf60c296fb8f85281260524eb13dbbb6617ef1c861457427 304402202abec6363fd4e99f5f8c3c1572ac5c18510b53b72c653547e24840640c5cfe280220293d3ea4ec51753491b568e4def84d5dd26aeac2810b1688a38f02dfbc9cd3c9 3045022100f83a9c4d12b0d5b80b669e1c02bc78e899ae329711f4a72748e6b38ff2ad2a1602205d2392333ea96a398c416bb9b7f184fd6d9871a769577862dd8a728f54413ee1 3046022100c7da1108cd316c6fb7d0fb8d5daf358b66aa93a9af2f2a22f679b053b2c38d7902210089bea2220cfaeacf92a2b4df26d302672bfc173712216f13f925290170d4c1d8 304402203a9ceadbef333972f99cceedf7e1c5329354613d0d42c34046d28496d228835802205d6846b48b98a4408b7fc2ce2e451c855193ca0a4095bb7804c35f0c2baec0e2 3045022100eade3e1e7b2d7d55de46231688e374c131ad120e3b6412b701d5100ed16e62f602204897a9137d40892b20e345ae6f9db6bddc44213c3ff863fd97d21a228495d420 3046022100ced73117ccdd2d0b2bf2e6c0d32873ca517898ea7a901686f0066d7844396bb202210083f7a7810947b5fdcef795915e30aca123ea612ed1279361f01deec1cbc68d71 30450220770af1d73ab74bb075b37676bc7efedbcff0216cb94de073366f419673f354d2022100847ad5753450ce6a393295493598087a3ab2262a8725482a6b533fb1a2199563 30450221008269ecf18e2ca58329c809db73429cb224efc52a2e65d9b686258ed7453a382c02200d5c97cb47e8375d2e80cdeadb5a372b663152ed81164755de7c13c3d312ebfb 304502204c04528512645746f4608ff0d95ea963daf6c9c06236bd9621d3990aab804921022100fe1607b87a8b52f333c30b25895ada7efc768549c7825be5b89014ae859ba9bc 3046022100e0883c667a969ac467825c5dc6e9efc17685ce0bcd9d40464d22b4aa07e0c93c0221009aabf0f383e516f11c42329cebac03ab50d39beb0252d72614c1cd231aef2391 3046022100e03ba3af07521d28cd7d6c6bb54418dc3f1ae467741e4214b01118c8b0d9ead8022100eb8af8c6c07343a1fcefd9fb7833a3148f89963c7f381948c37e0cf639d659d1 304502205d5d967abbf064bd790f8599ee685f54a72271c07eb075876321c7766701ff110221009a7216f803498a278ab761e200c45550c0c955e04cb366a520124ac29c78e7ee 304502207712000809d1f74bd96fa3ab2d52607d200cb7a38aa7437faffb1323d531c67e022100ae8159495c4ecb8b8f850d3b8d0f75753d49fbd6d73fd03e1a5eba120bbf5cd4 304502201c92859667b03f09fbcc09d3d888f8f844f6a6ecd4f427f4fcafacd83b2e5b17022100a77892559714a9dc685a7cf1878b244941dd4820b66eae5db62e24c740f1f223 3046022100a286948619e004504f50302ad7ea4f8010411897cb19c8fdf167e6ae3f9e8c86022100cdc77dc228ef787d90a31c9e8c89dbe753dfc9f3be75581b5e982fdc6b06b355 3046022100c4d658ca42d448693dc099e9d9cbed1ffc2ce10d781afbf82e1e29d23c2476c3022100c0bb78773c96f68e86eb36f718497a265478ab5f5fb7b6a2f469587988782125 30450221008521bae19b6ec6d5e332d400fa1a00d2f140fac96ccbe8f02c382bec440651c302206a702c805e0dcc6331ce84d2ac1428d91a71044528407855c7ffd98a7176f43f 3044022071b301f612a9c3d3d468f484bbc5934a1511946c1c4a968b30e1fef8bf65e6ac0220681fc56b68ff8a8ac5a687685252c840c82a91ed7c0aa65d5a9d3a8441a5057a 3045022100f62390cb898b4cd09835f7c95a0ba2e7213ff4d831c068498f6c2c9c59625098022002bf76e84744a492969fd9f72c5b8d1d5d12cf36a97caa7682d7d7eb40eceaef 3046022100841b40c00dd79f15ec8da6bf3e30c033396cbb5fafe3471df54e7fdd9157f120022100b65ec84e27bc8d8853c43c336a1fb3c9ece8d7499ecb1c0feb998ad6547a6092 30440220143ba9f5624108ff6b4aa786700b84fec2c0d2b33f8dd69e1d5fa7069980b4830220600426e44d9b053ef4399c1a4ffba6a398e3ac9bf5ae4e3e82fb675ee8827198 304502200dcac69a0f0becc3ff1a22315cc3e79d68880ee34af70b0735143d71b5fe9ba9022100c02b27c02a66a7af41b55d28bccf6f5a63ecf3d6ed6d015cf52a76c6fa3f4c2b 304402203250e57af58622b5c74f9103d66ee4c8a36969b717f616756d6b330889157be3022017b35f46c1b1a620bf1dc6e0024310f025217c5786a3a488388719d811c389d3 30440220354896c733b7541bb9569e46b25027c3d0d0061dd1b6e72ec5cc7d8b09590752022066536278d285ae5a9841171336049c0280fdb24d4e8f6db01accee4d82d5ac94 3046022100df744aad24c30e97d23f601f1431c3fcd43c0b4c8b775e55594d696513a53e0c022100894a7fdee8e3f1bad4cc0f19050eedf911c6d9a74f98d824f1f8fdb7a03b92e0 304402204b18f4792e72bd698d9d5c879258760af16642e6e258377975128e95a0d7b7cd022041b901b452cda5b82894b4a96da77d06ca22cee376cf3be738850424bbe5245f 3045022100b12561aa1885860dd943753423201081364a6d494ef7a2425c58dbb6bf67e0aa02205ad458d1388ff8eb7dc89d5f453f6fae48d2ef7e4bb8c04d74a6f81b3645ed44 3045022100a42c367c234c8504ce47b10be9f9579ced1b28ccf02f9dfd5f0689cd15e4ed9402200ab09f98a0bed480bdbdb75ab6f33ce1245165a6784dd328364ed4b7014168ab 3045022100a8b0e9ef26310c9e72247c8b41cebf627af0a943916748d44754243aa252fa8402204297796f5d93633ac77a2a758c3392e1d2582c0d02e9e71b23b452d6ff1bd6d4 30450220249a7470fce3fd7ad9485f3a9ffd6e400adb5f3ec6decb3eb5b8b57fc42c3c450221008551a020930101fa6273112781f25711ae0b65f6155b0d72d4bb0b69a0dfb530 30450220399d059c03ceb5facd9904ad985880fbec3ab4b08aa876a590f7eddd2b0975e2022100af50dd82c814f149dfca4f71ebdc91de1665fff4e3e607d8e562a7b9278be13c 3046022100820bf8809a9070a21a05fa2843daaed49fb58e0508f36bdafcbceb877f6a121e022100c2bea8163c3605aa7cfd8f7f8e835c118749d5e8a6e2fc796fe3048a4add384f 3044022065b84ac4287a7ace487b237cdf3bc6d1c0d6a32ba4412a34ae9afa159732bea502204b6add05b9796f754bbdbc177fc41c3c0948e9e995e06ffcd66b0da94efb434c 3046022100eb45290387e3dbec98a1cfb65194691be69e5d5d1fce1991c80d8905c00935b8022100c8c9b29bdfb425a63577c6d5a4de7752217721d61e5253775588cc12f92ef6e5 30440220551af24fbb305278244b94ac07f30476b032cf8f928ebe396ee2b3aec9788f0f022019c0bf19c1f8cc45928ba4562e9eec4a4e107f57f1f6d95197fc33d8a5ff9fd0 304402205be1099c4f9de0606a70e41b05b5c36c4d4317dfc0fae0ef27d7eccc98c936c6022030b95353389731aab2916793e1ba39ca299bcb9be0913f83c120a8ec5f2f7523 3044022072112cf3653ba0d9e0df41a51e4c07bd01ea40ff0fa12d972ffcd72272bfc17a0220049d64cd1f74fdfa60eac34fb54863058b2ba44d6acc7c2a56fca8aacc1ce19a 3046022100b5aa8c3f952e63439c98984ab35ccea390f814de847446ac521881ed7f27ba670221009f9089365bec6792a7cfb9e62283bc162eace4fb42e67270ac03db9044995845 30450220134061824ecbc90c67ff56d62977a824bc12b920ff64ea3fb8fb62712d341b6e022100a0b7460029fcca9fb59d21992fdfb7017cce6845fde13e480ee75019604d9dbe 3045022100cc8c00264ef78d07a7882abdaddea7a8925e59e99a395c1b0e32bd703860341d022028067dc3b5aa21db84e5bdfb799509b5393c596cea71cf27baddbd86e32c542c 3046022100e770424f1af42fa5c310b452b160d54356fd5c5826999b3321885d4e1261bb0b02210088da511f7f421824e35977966a9379f416c255b718f80a17686805cf6be1de19 304402202a617958d874ee054a5443fa0e8f33569093960e6a52bc7f082d8fe96f4d2aef02204664bf56ab9409585a4a2efd16824bc7a0ae4dd423342de9b7a6c4a7edf535fe 3045022100d716ee6967a661849050d00dc63e2c10a3c874d0e907f2ac2ded1b6877652a21022077bf905259c947eb336710ed97bc63c1831fe335a554575d25da073a56ec25c6 30460221008ae365d7dc8edec2612e6c3c8320d229f3df751578fed0ab38261e1d87ee8f04022100bed158face13474ff9adfb917b0147e0b9c7929af806cbd27afaa4a4ed71e7be 304402200a68d41313b6fc73c324544596c2f075ad19655b6c5fd3f0495952a006565dac02202861f5a94ca780e7d6f4074f00aa7b86d15d1dcbe85270527e53f79ab59da5e0 3046022100c9aada03468d7ff152ff449296d91d7af18bfd79ec6464f40069c3e5aff2e3ca022100a3778e361aaaca6f45e23e5672572fa9e25ba65ed007841b93b6041aea5ed008 30450220604d892e0045d20eaddeb36371050ec4b47925cf5f9535879f0e106bf305adbe022100aa2c5aaf3ffebc918c6191ef6c7684c9d0476952fcb29b01528adc7da78bcbeb 304502204659876c8542279862ea520edb311ffc4495fb1b14f76c5a12e6d368a86aae8e022100f6ecd426186a934a5ed3c1181d8edfde03d984487688a36634ae52b94f478e9c 3046022100ff5d8836bdd463860216b2c193e7785b5fe253475d110210b9cdabe602fcc0df022100fcbad7cf70fa6b62cfdc61996e2f651084a4198affca3946175aa7c1ad20afa2 3045022100c1aa8c9f735e5c2384ad25d372a95aea935f2f99b894b2597da9335b9966bf0c0220627756b97c11a6b5c36ef83b6213d1b8fa5a151c1a1f87d84067076e7dae96d5 30460221009b10bc55ffef3f4b73cd778055c451a461317af9f647aee3143fefa3ea91f3da022100dc911d9a0b211761ddf59a991ccbc4a93ce3e3e80cafecae82fcec04f9bc702a 304402201a696467d017bfd78b71b0c163884b388af83d7d3ba951215a4ae3bcbd3983be02204770e6714e1e302afbaa8acdeff7538ceda22ffacd1ecb8f76ca1449be8e17f2 304402205bd8d8d41251e4115f6ccd24b9ed587fa11f7fc90acbac00b04c3cf85ed0dbe002205275ddcaab57c82bc8e30a1c482bfa8571577a04e191d1383cc0ba96d0285d65 304402203a6f30bc39fa470ba54cd77a49a8cbb771bbe9fe6fc4811a008882c949c2338c02204dd59bd8e528d16fa485dc3f6b8ba50d84a659d65216829e0d1d87a0c7e9c51d 3046022100ae876e8f725dbb41edfc1bc21f648027f5087378dc8f1a4775b803c68c08cbad0221009fbe4abb97715a7f340ec3058876fc3fc78da657fc132240546850abb5788150 30450220678f8c38c3fbe7d01694f087b55fec3c814d417b98895a67254d4e32f6be3b2c022100f710018d9afd2f3db39f48c3964c87c912aa736889578b67e8afd7a2c28c571f 3045022100b78dc4b5ee71abca12add559278290de5a4047db08cb57a306be85bf5c7ad67a02206cd1ecaa33875cfd714d7a057104ef39ba647207c4dcdfe28b3bf9a3040f7b74 30450220703d4800358259eae4b7750dd76dab8834aeb45313c448e322ab27d81c3310990221009724638c26eece5fdceb5bf6cfa152f4b1d30d2b2fc37d3b8a12c3ce0e87cbd3 3045022100d659cfabcbe1abecc797aacc3c3a2a8ed6203accc67165f5b27f1750252c72f802202815de01f5b3ae9548b2ffea411845d68965711cb0db02709ca645b42df8ea11 30450220484e3d8f622c34331c768127f43c51692aa0b0f80b931bef56e977d5f75410c00221008a2e5d3298bd596d925260fca5be54aace8fd49a24a4e778018e0a1f1565b168 30450220458192e2a69bdf209bd45fcadd97f603380dc58b8bc4aa387e2e055df65e0381022100c372c7e747b4a74dd428872bd1e0f89eaf85b029fd40e861e34d4326e9c67fad 3045022100b26534b2bbd337190b195ee81bb521eab55361769ab542f72352c710fbe6bfe802205ba74c9c552b0b718b37239dfac45b9d2df576f6ff8e140b1d1e55e6408466bf 3046022100d10733a56394d0ce5b4e85e411a98a67e5b8bdd15158265bb0df4894a10fccc5022100a7bca3272011f3ba444708d64254618e56c419e069b6ed1b0d70226a3069ccfb 304402202b8e5640694413213a1c20b46cb021face4ea0a6360557caf5b12b84334b4ca402206f828582168418c4895d23ee187ebad1500552377bcb8f78c19e9c3db65d1896 3044022047a4bb9b48c840318b6c6f32562784fca26039a114ae82310df9ee4113c39d6a02200879e2e77c869972fe117b1d2595949173fb3f4dd1ce6dbcc1ffce00ef782402 3044022030f81259af02c7dcb35b6fb4066a1ead1d38ec16e36bf40e325cddbde7e0e2f8022045eb633b31e88b79cac16499c583d50376247891d48b440cfc02449332881fac 3045022100bfbb981e603179936cd2742861bd6cb75679b02440408d1bc956350a182d2cac02205085714ee1d4381c80313d5745895cb34a98d78168b22721a26c63bf37024482 304402206d0c773b5bdf21af0d2e3e6b544b295943796f057ea3c302fec4c0d426f2429c02204556da5b53c50af231183a19224a7e425b02986fc39234166e37aaa44bd20fed 3044022065540d31809852320159d3d100642ac8b137b6bcc37dea595e444de16c344c3a02203b4825a8c074c267c3758b5744917c0bcc18544f9efbc8e086789298d12a15ed 304502205b3dee2a19e5578d58df38d93dd77c71b9a3d2c2d5e2aed23727374dd2dad9de0221008f5c51e33623c2e900092a253e7c4f5ec1c63c5096ae21902cc3a3a6f31a2607 3045022038ed1362bee9aa9fa952fed07791a7f43a5713e4451e7d745700f8bf9e18f73f022100853fdc48d121c5facc7da2a44d6133ed9a86c45fb1c2a6000bf46778230eb5ff 30460221008995290cdb40d0757bd26334396d8ad4d96a355a1577f18a6bc2b277e40226ba022100df3e2b50b5b7ca663cde38051ae5d0702dade91eb59fba5b7d4399becc746e8b 30450220653ec74bffb5dca677e6b3eb532c12349e5996680ee11bb0e5d38b2c5d071e6a022100c423646dc0c6bc1b7932fdba6a03c33db3243d6b6ee8cbd5f375569a85997187 3046022100e8f8af1d9012573fa99f68c3c1a603e65675049d42f28684725c50eface5715f022100dd5b7916c6aec12c37cb1dc0e07034b49e4e6aea832ad279f6b398cfdeb79a2c 3045022100b666844b00106af54f2fd4bc96f8501df7fef6d747a4193684729c9f600e011a0220035561de1502fb3cf7feff46b7a77ed31cb5e8c499edc5e2efcb1ed099617cca 30450220576a2503a93d4209557cc0dca5eae101ac5d4a4da0b79142aa0bee5f2caf64960221009403e6e38257a4643d8735714a108b4e161bdbc05513b5c01f973d1e404ffd50 30440220515deab9ca24296a6a0bdc82d9e3a093da55e7662ca37ab52817df1711e0b6a902206fcbc413e926f477e913ecea4e141fd480b3863d553f90d9a3d7cf3cf5cbb673 3045022100f32c809ce6d59c06858068f6fb14bc081220b59e15d16cce520dee9182196dca0220372d41e4e0163d9797f736d9df2910ce34ccbb954727af7703304eb7d2b98f7f 304502207452ee51caf1e9457c243a4367ed1c99784bab87f90a149a5aed9bc30402d05f022100e290b4207a44529dfc596f57d1ac58a76e4cd3439d9465da253c8eb2f00a9968 304402204c481a7dfc82ea479a2802247e0899555bb16203b992b14eba761893e03703ee02201cc1a3df4d4fbd5ff33aa1a873bbdb4ffbaf22d1112b19072444a8c3ee52bf24 304402202ad2a389b908a3e8f781f5b4eaeaf8e8725a46ec07353a969caa3e14fe49659c0220019b234ac91070ee2f1356e6006fa695c3a7abb189ed688c83b9acd669ce6f58 304502201653c09742327c2a1805ed224e92ddd72e7c5f24df80bf77e42b39ab597f332c022100c2ff45bb5dec18c217a39266ab5b7367fa281124fc9c60d9fcca420457c1095a 304502200117b447ab177a0b2e46d57f7db883dd7d9fe90d682f163c070e1ec95d10a46902210089a6b118b14d483aaaa27fb000f9fcadae9b47992d2cc88ffe09ca7a077d8575 30450220469833f16c5699a8fca91c9de5ea73c1a065077a1d22c14739be3506defd79b00221008cac9f628ab4a1840b08b9bd7c4287929ca3380ddff1ebd7983c372874c769c9 3045022100b1fa4e4fd4b6204b37c595c84299a2f6f838658a49d095b546f3b882b10f0eda022008170b87434ac4c239ee07cb95889cf80be84c083f144cc99bce7f1675461f4c 30450221009ecaa0a678e6963c4254cfed344a92476fe8b97b3affe66e4747d51b44a0285002202fb6d355f05523f2ba0a078d92e81be8aa4501f27c29a55738c3ca2d9e53e5a0 3045022100f97ef090a3871361c36c3169d3dcb374411e06818e6e02ee4d48e4ea1b4e832e02200e5f7a6a92346d156245ce24e2740da62911bb9a94247941519a67bfc9f8af94 304502205bd1ed87f5a0dfe287d29a62b4e353927ac87d8592841424bc2cd1c71228bab2022100da840584e6ce903cf9c13b20e73878d70812962d8680e29942cb2bd34e34b51b 30450221009afa7278a7a3c30b50d7a878a1b2e817f7761741184b1b33365b14b675e85406022051a58f0b543353934cf4ceaee4d519af4d39fd5f95440a24017a31965a68dafd 3045022100d395366c1fb8f5f43816c89ac35982ed560c76f57e846b9d8852213e970b584302205a604ea3bbf4b4cfa2d489efd76bcd0bbfedaf28ccbbaafe93dac63414e0f9bd 30450220601d96d15b2808f7f66359c85ca111ae94718b26c1b2c1c736a8381415815dbf022100ac01d70a6878843b148afba2c640b34088aeb09aca3b8d599daa3f0281657b86 304402205b16c14dff768b1a50c85c49523a4200c7292878536105b98ef7b99403cdf67902204c03bb72715129192119c22fd1ee7e1b56c76ba7281dd891ab1b687daa56e873 304502205774cc772077b6ba169b11d7a43a9312540be2f34b3136e993de753a76da1bce022100f75ab48a6e81d617541c9b4569fe3b26a8adb7c2dcba18b0e53f990e81d6a227 3045022051efcec5885d2bbbf6fa311777611781cfc2f178ce9a57398c75e04edf75ff83022100b792e8e83a376367500a049b4b84dc39655e02b0131535380ca4ed70df6c7b89 304402206c850796dcaa94a451b22c26ec46704f05fa11a3151a9cbc476482259bedb37e02207f33f7e2dd1a53b609e66c57dd597328d3ab14be55fd9dc8b21fd3ffae161ccf 3046022100cd03d1afe6e48cfb47addc3703027e433aad9a64a1356a80eec7bddb43ef57f50221008a177f11b5b86cfa13c7a9b8fb331665db295a97e9b1f105db6d220ffb0bb70a 304402201de48d879c7ce9f54e45198379bff325707e6313c27014359aa455847cc940ed0220085ca34474e23933cb0f05ceb85c15889a75435c78f872884995ec9a88a5b6b3 3046022100d1d87052e413796d5eb24826a7e066c209d95cd5839cd1a3cba7e8dda8f36e13022100a87423e8436e132b2c7c4e234f7ff0631e078d864fc4ed3c17f747a948cf4071 304402206f41e57bd695b951998a1d4be0e952f2a37b2e1e69569e35966723f0bbfcfa9102204b88a29424e0c0b57bef18950bb78ba16806a7c1351a14c96cd19422ed403aca 3045022100b824768b560936a012150276009579b28b3c0b9096a6de24559a897cbf26080d02207077ac245ec1785aa5aa0876a4c9efaad95cec27b7d30962545a1213967fb2ae 3045022100b3e8d266ef6f41a8436ef68c8447125f18a5cd0c25d05072647e464bce1ef0fe022023ca8d1f3513d6e5449005d65f9a95acc3c3326b831f7452c3fdd3adb42d1cf9 3044022075be61497f25eea47a045f46161dde654c03538fa7e638f5b19571e85121daf802204bf6e028915535f739e72192a3b93f815630692c2e88ab19849079f09a1870e3 30450220146a54aa38911a9d37fcedd4beaa882b34a56b1cf21e77dce65873e27938c062022100fb739d01395906c084508d6c0b3082a6e423b399a13e0af5fbb80fb0cafdca7b 304502200774a437f383e17ed4f076a613da5712883fc77cf1854e9f88088cd4f6f6cca30221009ff8224da4755bc84597b108c455994e67e438606aa5e1877f4045bf90f25f04 304502207e3854c738a59807e504a7c6a055c6553dfbef343e4eda09abc324d9e2d9f0b7022100f3dcd7f3aec16ee923f0c12e35774daa79fd0eba0fc77198c7e0f23591f2f762 3045022100840a68a166e2b933ae64128c4a7cc441ecb5e028b6bd43007bee03cde5c0d176022032c52eef6e83a97e29653383c248af43ff7a3d21544ef8f3cd79d0f8eecbaef3 30440220797e04ba24794bda4cf860bdf04d16e486c9a1d86a44704e64af8269462c26b402200d42ad68d200278e2d22c8716736ab7b38d3e1ad71b01be99ee12581ae8d11ed 3045022100e948c9717dc1d29826e8463d031745ef4773b45d7254134e74d8a400db4962b1022005d7f407205aa7d2dd66e7f8bcd89c1b3e3c6bff49037dc836d891babd8d719c 30450220470fe07acf7a3e246f149b1cf21ca2a8c05acb4df35bd7ea526cceb288364e4c022100b3dcc9ba52e953bd11e388ab015e8d2cf4b74844308de1791e0dfde34c909363 3044022016ddbd515fd0cd2572dddc9ae6a186732b8843338202d0d9ac67402b93ad807502200b6493eaa326e0455c7d9f0584ea08ac0c2d74c0df8f2afb50bdc6479cc3457e 30450220235b6f14a14a1a3937d1d193dbffcc995d780d2fb5b4295c7aa0a41dbd113a580221009639899428224b824cbc9b508ba20d7930e86b0c96c8f8b619697ee18f2edfcf 3046022100b602c04c9a8199c20de945d5c9dc0ee7599ee737462eb93a048c95f41ed123510221008356ac36e86ffc59773968943835c3301cac9324ffa7e05c71ba5a7dab59ab3b 3046022100cfac0883d09f18cde90b33cdf4f219dd15062300cf69d7d224fcee296c9bf7d6022100c5df3a72e5bdbb8234276a3b5370f45a7f335550c199518c56c02d9cfb5a2ab3 3045022100dbc2c9300f8838f39934287b1a3842b0d6108cbc3a0d597c39717c9466da3a4d02206c389e82480ab78f42f956e6f7c0d891cce045882e10123921feca3a6e15608f 3045022100e64737ec4cd0adcbbb4e9122b7544184becc97ded8b2a137ae91a9a0bf4b781d0220180321f17ce067711cb4f1562fda915fcb7fef91bb4278f5f5dec9952edceb6c 3046022100ea83d56c80756c185676f767c6c96ddee85681d10886fbc1dc1f0ed0e1e36e2c02210087a8cc8a7f3fe9338970c9446cc1eba31920311f756d8a64107cb5c6be59df4b 30450220400760ac7149ab5df63d0664ecd330980b74d7f6991254b0d5982ef582c3b06a022100eb43094d7dc122dff6712ba4d36610c34f8206b812f979372756fb83fb8ca743 3045022100c4a30ed9caa1d78a3c0b58f02c8ceb9501cd05f27ae24e2354322de50d43625002207b2001ca2bbe497adbf2ae23805de4d259d742b9a1c2c3e79ff05ef27b5b8bde 30440220551be37f5532d7d75a552b926b7edc8fc9dd5b8e1d8d7eadabe97383fda5cb10022057d0cb338c2b217e311790a75df0c6b4b28acc62403a77d866babbb39adf04ed 304402203525c83a0229b1a3a03e50c65153918cf77dbc763d90c76de42d4eaeca9fbcf102200592f312f24bb1c710d5a565caf4b4a8f3cd938d68f4b88abc4e49d33e8648b4 30440220148643a0bf6ba1972d5e43e4a435feaf5d3de6bf9b08c490fa916eb19260970d022018b4ed534e2b5854ee5e0cef8f07906f716ce3e0df142be43c576c4a669d3652 3045022100d0cab5b3c3d3847a7323ff382e312bf7063189ba6a0572f5544b009be45042d502205d4eaef0176abf0f5a1099c6dbe4959a3bdb20eef2f8a6ceb5931e72836c2961 3045022100f036675c98ab97809a6299d118c67561c549e482a4e3e0aa07e36f6db0ddf16a02205eaebfe32d4b85575bc716680c9717283741da03a66431aa88e9fc2065418801 304502206ca71f340942e09232c8012722291a7204f63c03e14b26483e581f4a682b0f37022100f353a0ba9e5582784fd6729b6d711d1f68996e70b2a1117b00ec6740f3ea4dcb 30450221009c42943c05fce0136dcf8eb6cdabe2d578e5378c8f098a4caee7a3555a16caf502200d204902d79f040846fbbd38b2ceaaa766c4952ad19fa65e879e3c6691f0233b 304502206cd50247c5fb2652bca3c5002ed2b1be1b3a4f495f3c861e300b6219ef6bca31022100e150619e432beb8372cf1d3f7504b5f8b893849a8f3a7eb3ee5c5d16543c6fe1 3045022100ef466d064ecf6b5329ed369d67627e2876d2026b68fb434a14c45ec39d07be5b0220716ec325cea44700289831a5bc28e7e4cc32dfe9e87d09b96f0097c9e9f4f70c 3046022100c5bf8a2fee2699b2a82b51f287e0f7d9768b68234028708d3370059c40b4ecdd022100b96080a715d40352352f0b9013d3a5fb42922fb34356475feb517f58e30ed3cc 30450220101217fb36bb4835923bfa5e95a8a637f739f8cf3e313fc98ee4fa57d05bff0e022100b732f746fae1f50241faeba810f788afedeac14d93de46cfa37177c8913c295e 3046022100ad5a3fd491aced08cf1e353e7652ec0c619e3b49354e34e696ce625a7f26de66022100e9e60866af6750e5ada88dc9132eb7729da32fcf76030d074e3bb889a1882233 304402206a7c70b63ce37d8a99973023f76625a227803f623a3c0a1b8c7b240a7a904b4402207deeb6e6e088680c0864b6a880ec1f4d96a241a009b10c175de0ea42e05f1e97 3046022100a8dca1ebf0782fc3d38bd449f7698ac2b5eea1edd6e2a04ba831ed13040b33b80221009217ae32ab34e930cd9abbdf40bb4c633c7a9cd64a0574038d3811668d19a173 304502205c56be43ab1e5236c11682f29ecbfcf584c3e007ed618424c53fb7fdaffe65b702210098df22d49dea1f36a4b93ca441fd505f886fae95e17442bdd4b25a99f53b6c24 3046022100a911ef435bbc7254b75e671ba19fa4f74e533a04ffea49bd4e1e74828e89dee1022100a8ba807e803f2aa471ce670125d4fe22b1e36d02973361ba5e1a991208bae07f 3045022056bc93b9346ee321802a9b4ff5e4958d89d8907acc775526264583fe1713a510022100c97c52ad0af791785a48e770718b9c277d72cffd3e6b39dd84a8cb1f3b441ca0 3045022056c30098a117124e8ab84c5cf28cb759651d73c817c21c3b0e5a5eae893640cd022100a27e88eaa2c8ab1b9a54fc411bf176d2253db187ec9cbf8fc7769ac48665592b 3046022100910ff624ebddfe52ceeba39d767eb05f91b7b0e6ba9e03f98d662ba2fdcca0ad022100aceca37cc651443bbcc7b4bebd66ab431c8472bef7f84d05d9871ad804b963f7 3045022100aac41416603b3c3ef83769d79790c9f16916ed32e0acec7baaef24b21472a39a02206d67d903254d3f1006b5cfd04b30b1ffb827163c4fff817ba0845085c0b919c0 3045022100a5cd467fa96ffde176267d956e13ca0cdfea563e8e3d495c47ce66b0438b3e2b0220657f28d5702f35bbe889273269f1f688a6845c6bf451fb3f62ed261c2f46c30d 3045022100c49c920d3c57569866a0d516053fbe9fd4a3d8a519e8fe3a17205ee4ad7410ad02202eb86b0fc654fd212e001c77ea13119ec75755f472ff89bf15acb648f30e044f 3045022100d04a4fbe78a9207bd8ca666de26c46c9c0901e1fb63b92289149d39b95b8e56c022024b5306c44c536b744e769ee389708c930f8493f2e9f7f83809fd24f921a73a8 3046022100ce36464ebeba191e7eefabd43c6b86d737af09855f213dcee0301c4b3bf5afeb022100b2e02fedc5bd3f6962b75714c341de2f42f1c5727b76fbbaa55cebafc3dd30be 304502210090fe074cb213caa6a281cde9f29cab8aec95faf0eb9849988c1ce19b74193ad902202a8d58aad2f9d27c7f6f97a772da82cb29b1feca220f62c929da00ad2e5b50fe 3045022058950141302f541c945f59b4f3a932fb44639c3ebe98a1adca2452c9f786c0d40221009b23fd5b1861409c823e486d979ee068265a15c702cada229fb59679376bca34 304502204d60d4598d9475147784b34a41fcc07bb476f5b4e5627af998c69d006794174c022100f69fe01775716ee39f7008519aea29d959e4d8874b5627686e69efc8225de148 304502207694f1fdf082a35bda0b925fa0bb8a6eb6d18dff7046467ca7765d6fe2aaa0370221008440e7c5577be9f11c4fef50ddadd462d11ee6eab5a447be0cd79afdb54167c8 3045022100b3982161d46af163a2b2d9c805390698e6c0e702a06eaa2cdd05bff6fa0d0dca02201bdd89722cc6b30ecb2f71977964052f42d592fe9211d90cbdeca7326d132061 30460221009a847bb1342b4ce18872d3593f5e049ad703495e4ff9cbf8c36e0f45aaafa70b022100ebf57cb847f015df62a67037d8c7e5732379615e845f9e5a8137329422f3b9fb 3045022075401db4f88fa642e7ce771fddcf494b176ef1389edc737257f172ff18e5b1df022100d9581e9e05a3d2240a24ea461877847f4c73f2c95e23d3d37d07d69c88c1e5fb 3045022100c14686e6595540e385893f3b5403edcf5eeb82492af32ed51a59e7139ff1d9bd022036024bb006c175a043efc4e39138def5eaa1cfbb61098f179d241fa3f38c8585 304502200c78e965fc83d4672802f20e35e4d1e2bc3dc3c49df07e4757f7267233a920b7022100d20af78c7fd2200b34248ffdb1efe665f8239e2abb7ab42e1241133d6dba8372 304402207d3c83928baa5ee0d73dbb81a5332ec5e6798ee684d5ff099aa130147049fabd02205b181861fde31578f1d01b66a8a3e5cfdef2e6657e56d5ce0b49d31a7723bfa7 3044022003366c4a31f6f70cc28c03d071222c8c4ec9a8c55e74527f4ab45071ee5b114b0220580b91939f80b1f4514307948ebd4e0a484d5240cc3be101bd661b14b6423776 304502210096df369aa3e3d247b3892f17557ac01a89af877e0e83426a960ae29c763e662b022074fabd51c28d572c6e1940663c93f0af40031f4f661c89cee5130e6829715408 304502207bdeb7a026a98a395a7888f7d522cb5ba24bd7d22cca629ab2ccb675a2778f65022100a63ba1af16049372cf3b9be3c610bfb5edb7a03453dd3f3e389b9e4332e79f83 3045022030b230fb0ab34e6cd866f0d18d1ce68dbbb1d8d1289c5e8b3d939f56fed4c659022100fe21abe14be228437ad8b3c67553a6941f05ffcb4f4ff466e7eae80f769ad158 3044022065f591256970fa71aa97c53f58cf13ce7034af60edce0b6a60d17143b96410540220163ab63657c01014d13b6c3a8b8350d2ee44357107635af4d6b94ab39f2aae9e 304502200db7c147a1a6e9ad92376edf238f2a4fcd8a2fa2471ceb555863761935818c30022100b0e1e116e3cec10b3479c174ab61c70610d30f01bbfb0e250f5d04e228f34933 304502207e325028237c47e70eda4c053db6c5b6db4a31caf096a8ed36bc0608b61ad934022100ce9dde1b8b78d2ac01099620aeacbd45fb6610d7e9472c91e9e167e8c46f167c 30450220523d3bf8a8135b61e9d018a5f5428aa9e13d273f0efc957dba4e9fa0b976fb110221008049e5dfd3a1092a418fd6dd4a6d6a2cd7fec8543cdbf55be401c18ca4032ee5 3045022100de948f543645f354c6ce281cf6591889ddd531d8af445198827fa216c9f91012022010ba73095d8c7ae6280a7be7eca7c423b62d977040b44dcd1f892e7e17dc3ae1 304402202bff3c9b7613d18b6c23f1f770c26e0f153e12b33d849380a76414ff2cf70c0f02202338f4af5a510b93cab1050ca2604e01199e02ef8aa9916b305e4d95ee037dd2 3046022100e9b646d474f3b021900606282ccb7ca12b8854332e3f9114ee9df5981493c0b4022100d722a91b72453cbbb86ec265ae4d01da9fd5eea0fb99fda5ebd01962c19820a7 304602210098017a329e1a3558c38025fa3c31dac8b3e464f674f6dc50948d31049a485798022100fdaa618b0470480d262d4ae74b4df56412892e20b6d3629b02405dc7bdb3a5ec 304402206c3cd573d3234909e7d84389f75ae7afac115c77ad354da9b84ddfd166b5ada90220157c64d1552b75a318864e14e897dbadac9b4db6fc56543a85b93195f14a8cd7 30460221009b10273ac0503cb1688e9fef3b4073e6638c23cbd4e7f56bd2c226e19ce6802b022100e8e69901d03475d8b4a771e10cf50da090743a4352be68bd5b19461e94cb34a0 3045022100d558cdc7c955a655e85209dcf035b6a797510a8257fec2202d9edcaf160b5d55022016e67ab27d36f04eb6ce616d0e930c148db99e97abd86b615257dd3ed78b8207 304502203f50d9b53e9de8fcd00762c16096c10f1f5569cc994b4ad1cd908f6a8b032d27022100d22fd8a99ee7cfc0c21b4441b10fabe79fe5b7a76376db69b407184290bdad30 30450220625839e3696ce4f3e808bbe379a01d1699b432e19b832c3073a7ef99339f8df50221008c2c671e6e0d027e7ab00c1d0c39d606f956b1efc931ec2727c2afbb3959b2cf 304602210087137ed01916e868bd1852f4f37bc04e21790578f7f7f60a01d877fa7d358d91022100acbed2794143b0a9c009c3ac36507e83932e42f3baf299fca74085dd33a1641b 3044022020baf9bc30f25da3cc4d7025ac552eb97a9871d0dd8f6c45cfa37b8beb10840602207ae520314831077c0dbbd3fe130065bc1dbb0f839565cab45713ecb4b5bac7ee 304402201c07e8bc3b312edde395195ed724390837a765eefbda4aca8f7040002daf97f902205d3778d098537738797081b3c8c6dcef746536136d241f76e4635de6acf91cf5 304402203c9e15ec3530d61e022058554a310dce3f4456c6efa1dde5f2d6e3eb2ef3868a02201e22b4e9df095b04b374fdfb292a1798c2fb139445e6ebf5c4264be65e7ef239 3044022028fcce5d6c060f6e06c7a9e19d759ec8ab65081907e516824497181c9b9b87c0022004dbb1a3d70cc4898b49fff313a126096261733da47afdb9fd31140e9874ed62 3045022100f8f0bc0a2fa9e5a74f98b51ca7475baf9f14a0628a42f5e0d13fde13cb6599f5022050a642c763045e0ffd1ad88d7d30fee8a722eb7e057d4d3c86f88199aa23d326 304402200fbf6b8c7b8724d3e5c0c30834b94d2a3401756bbd9173c3f177260b30e84f6802206877bf5cc9d37ceacb631dfe3e70210f24556da0e8104fa30b62cad1085ba85d 3044022010e3ecdb2fbc30fc30cf61a0d40be91971e4232b4e44139c1830c6e9ae732396022019db151a4f3501917332dcd186f75c3144dbdc2ba2043834966594b433af1227 3046022100b995c770fbe9478d31027501bdcbd1e3c6b4c8b357210a8668b575819c2f18f6022100f0d48ea12bd07af1bb42c436ff79dd8a0c1b1cbf02642f3d39343db61b94dfee 304402203586142993e2f3020325fa8cd2b23feb5ed55477e0a236a5ee564d5ed6d0861b0220438b66f7921f80830f0d11c3bccc969a8390dbc4c537071b352d5c9de3f12406 304502201822465a3075d81d6874231700461c0e72eb4f0e890f4ddde4c9e66102540dc5022100c604ca4a4ade4505f9671e56e0bec6a334f846a61634260c48f44974d8638f39 3045022100d031a9f56edd268640a9b3e7a3aeda6d8e2f7a1bf53c0865f7fb4022495022f1022036d17475419e66a94b34370c2dbc3a050048615e88566378eda2261268c32b35 304402201c874daf00cd425f56a91d3495d8b5a7c5d55bd9815adf0d07f6bcc0c30679d802207dfd91bc6534e01d0033a166535d8d6874ae44a0f8eaa71e941a261c9dd55b2c 3046022100e347d4c8536b6b8b1bc69822b4b84f5b2d821a3b88f64c0eb64ae989166aee0e022100d69953b3cf1e711693626069e64b5cc270acd77378862330c25b027327107db1 304402203891f5aac43f9b953fd87b41f0a05409c260d3a63aa42af8c6b61d123c52367a022052ed2c02192b70408f62e48f7f7cc73fa2182004d6917e2133010c3ed75eeda7 30450220416266334241f3dc4f364835390a7b60d671b96679f5d86fb51d571b5cfbdc68022100cc215ca0c751b8bf67e1701eac736c8b72989a9e58de27c3c187b4ccfce775cd 304502202611ca13a4d7b68b6e1a7462d7c4ef16db6cffd22822b7f25b721ffada659c45022100ae7a4ee550db8d26fa156a88eb997373e54816eaaaeb9990e9009225bc2aacd0 3044022012db718f0ae8fe397f7445b55c7be1b6ef13d27f69a2bed88763ef06e8df08dd02203e1e24eda6dd043a5f82af3a1402df2eda38126b1ff9a2c202e5b95e241f0ad7 304402205b9ba3ee016ac3d324add8f2864c29070a379767e3f5a8d80fc820e5f824542402205121bb3550ff4d94c18f9778c445a49fbb725acf16996b2732599516cdece6dc 3045022100912f238cd95a22f449d9040c14093d8a00e50858917d60d95d7a911c5889a357022064eba1bed8725a2fbbfe3969e86e180a143aa6a4b812b3028fd35a0354a6d805 3045022100c98de0108f0477a3ddafa2a2817167b88e87da1213836b845c306d051ae3d0e002204ad58761c975b24bd7ddb662f185a96ff3881603b228925f8fae8626bc276820 3045022100f1354842ae8a6ab2cd247f5283e24f0e98e9a241faaccff96df9e42efe748b2302207953b2a43ad1976f6568e09a2c722532676add58e86ec7d3cc058830234fd5f7 30450220746a7e40941287c921dcf5d69913ef69d0cded2a28a39ac32b9535c1ca2e0c85022100ca382d02ea4d39edef92028e5a7a6693326d1c1bb952558a0f9b9231cf9886ea 304502205e70ae0709604ff6d888df2675de8ab8de437f45a4eef71050463f98e9a56b3c022100a429c2d608ca32a0da5365a8c69562b4c521fc7b7ed6983a56380398dbbaa46b 3046022100abfb8f9d3af96933dd8a4e5d6d82c55842b5e32c9a6d73307e51b86efd3219020221008d28114885055e183d898a5862c16983a184f2edd4239496af579020d181e7b9 304502206cb783a9af0813d9595c09fa64a543ae0e22706c916b66a7651a1be7d598f6d6022100ae508de02abd0e8af5989cfb045d43f98e4f394dd851cb822412e5c9c9a4a69c 3045022100a2c989480440c1b09a6dd99e4a47cba41dc839f396621923d0c7bb678033989202202987280a2bace59337f76ff0271e1a21cd4b2c6c6815dd79552bfa963480e763 3045022100b1de83349c432aeb319a78c6876fc46377fb1f7fccc11c72ffc4637b43c35f82022051c3293a651b685ce6c94cb2612fa00f814717ee0f16a5701d71414cdc072aba 3044022068422a211fbfa578cfa34aa50cd958f2344d7e292da650b511f13c930d4ce631022061b0d3923230daa7a05f3dd0a77e33ccc8f3fa617bb0dffcb25a56c0696a9598 3046022100e5217fd91f0e23fdf76a627e44602d5c553bf0cead3cac1cb485a9fec6eeca68022100898900941cbc7f2cecfb5fb26a6be3b8c7f6b081ad043bac330bce6c5a72bf2d 30440220705fc9cc7ee486930b58e2c8bb9a9adfdb94ada4d655dc0dd5001142909056d8022016fe0fd387b0f65d5a6a9f8ea0607edb5bfcbf8c835dae7b309d99978216e0e7 3044022021557538f4080b5ed734fd4823b234d379a71d7c0d367b0336d0da707d7da8c00220521bb40a0cd93210bd82c1a7bd455fc7a0d24f5cc5a35434c55d5485842384d6 304502203beb274f22e3e4c20fe93454f0ab2fe84ee1d3d4d97f71a521f4d817b8a4b7580221008c39d2735b282e224a644422d9b2279c32b35ec85c27d71738c6385354daf27a 304502206875a826fb43b32670db99e289d68df1e0d71fc7a7f214bc9ebe5d146a19eefe022100fc6eb6938e764980dff07b2774ecddf8e933c55cc91ef31b360bc9fced8feed8 30450220179f9c2694a7c46c22be71140d84c48b68455bfb7b8e3cffd239959175add984022100f4781ad707e968f10bd4b61734839570a232aee6512630afbe86cedb95597e04 30460221008fccef54899ab9f4ea5117113d038ea8833fced55defb004f6092d12859c6b3102210096e8876238d6e25e74f1cd573992bc937bcbbe103f3d48fcb10536a520315021 304502210087060d9f44bce43a65711acd2899113c8485c1d9daabeebca849eb5daa1b561a02200b2abbb4abd5406e035b85a6762b4e33567bc575714836059884c9bd67493980 3046022100e459ed907eda472177ddcb3d92294fbad6341fd981108d639ed74db4aff51318022100af34a35c8939fcd99247b19d9cd8efbe2a076768adfe550660f670a403ac80c6 3044022065fd4f434aeaf19c3c2e74447d6d04e1911fd095151d856e7db8080ff091230c022005f465b52b025e818431e5efe526cc40f7fc8966835ba118ebd52dc9ed7650e1 3046022100fa43d786edfc075935bfe1cf7011b45ffb67e6e6ec899581c92374f369d86e99022100865fcb96642301734db4312eda4a4d6c9464939f55962af72d4d3abb773d4623 30440220721bf1d2d72e283686e8b648c8cfbfd784f1941c29871d62dd9df6db6bf1800502206f213d81d341387a14df7e4a67cc190191e28e19f1e540d0d610de423c2d8ac0 3045022067a50616de70c6a118db092186bbd411862ee2c840195b9e9a522cd52e4d46eb022100f4beb3bf4d1327e367bb7f6fec7211c4e935a2fa132917134edd044b352f74d2 3046022100fefd9e950ea8ed6c9999efe1957bd0e3516215fa1265c38a7424506a759492cf022100ba6a42cb3ae9397a0cb8783e3ed736e94d419262ae65e70d12cdf062fd9605b9 3045022100f6efedceda9c86480fcc377de981d9809d377e33d1337bac57919479aa6674b902206ea7e8230215836c25c4d52ad530d4d8ce24195770ef2a7e27a38e0fe2719638 304502204f5ee0f9e47c4c03c0a82374b8863d077cd5c4e628a7231b0864789e27f99055022100b24057920375d3352e3329e5b384a6ac6e9794c71d51bea4d8f825e549248583 30450220014bf01d721cf65bb4e4b27d3df69c2d8a15690a8745245671105ef0985f397a0221008e96e930228bc2995d0cda1e5d95817d2ccef0cb3cfa8476115b14b08627a960 30460221008235198fd8ce3a4c684bf22c4738ecae689495f752c908e4730b43e4797b4d1c022100a36e9942d4bde80fd4825484ccecc2502262d72797e14e5cf7888001f9ea7e25 3045022100ae79ba9c8c3a09b9eadf484fc4c54f87f15d1a86a922b2efa5a6e996eb40e2060220463736e94a8eda33a17d99172fc9300bc33e9b6d3344f2d4cc8b42a564d71ff4 304402206d03b544bb054cba81b7b85a71f47751d4a06137ee21dd21c3f24e0d0245fc8402207f607a8ea7ffed40609140368b192678ff891339b28a854620a9f690d37e6d37 3045022100d9674fe34a439bc7bb02d77b5098dcdc893efbe74fc7cc1d534803061182f29c02204175e82a8cb53f3af53e3cc97ca24553082c537ac3420c1506fdd6a35766bfd8 30450221009ac53581447c909b635c9e01c883cc4540f0479a31b0ca1ea19819a06eccd901022006468a32e8d00c2b4fe771a6188ccc339709fcf44f2cdadf64ecd46a9772c20f 3044022021f37bcb93f4cd1ad120556fafb9f2c1f011b9a66f4bbd0f0bedecf4aa32453f02203febb077e192601b03f250ee05c32fb3eede26045caaa17dab58e1c1d7619fcb 3046022100d9eafd45faf1479823266fde95288ef0bdb19c9062dc8bde95aa5d757a34aafe022100df4963fce2cbe914caffc1f6fdc99c02fae67d1f7ea1b888987c3166d98a9b19 304402206426f9a6950ba845a89799b1f482bf7b6a996e6ddd5b38b7fa2b443899593134022070fd1844e717d3fd81dc4e39420cd5ef341a28037d1b95ecfe9ba1f6a786bb63 3044022045ebe29ccd7a199cea0a9ea8e317da45c9188f3f70aaa2e1757a9ffa74183e9302205eb8120535f8bf8e1ac71ec50f2f2d66d6f7e375bb7e562424b2fb974259ab84 304402203334882da8674f54b859a2827e15f03c4996b3c0ef1566187e78758422dc68700220442f18dd6ee407f8939690af253ae6bb7a0de022f68c5e5e1235424332f759e8 304502207da41e1f6506c4ea30830fe5f7b5ad3715dbcff285ced5bcffb8cc60d223a25a02210098db01b21d455acfffb6a894263f81e82bcec035a2427da048064b81faf36e96 3046022100ab943d5190d25568825698e674074ea30f4a782fa47e65910a29e501689a35570221009a3bf5e076a4fd6080e85de80bf7d27d00b9b1990bd22a38b0a5ddccedb37a51 3046022100fa4e509eac4a05c90c0816410fe04c337302a65df194d06383ffd93ca595b4b3022100cff9c54517627b2da59adb44b74387df97f8bd7f7b506bc8d6871d43688928a7 3045022100eb3b4fe8df089f8bcaca4a09bb57a75782d5c4ce3d2429f5ff68807a0d9b7f3b022072fe464dec4a44258eadac6abd93240541ae986a9776675e34685ff4b2dd9a98 3045022100d51c37f762da94eb402c032402baf201a546afe6a3654c1c832b9502bb522f4402205c16770cddf60e327d92f9f7838ee74b9cc9bb7850c8e4637efb56086cf09e6c 304402205341a0e678dcabc79a4a021c6ec7887e9622c937741ee4fd823d5868de4bc5cc02205d101fa862244a6d8858a15acb3b11c3fb7a4683c92a240bb4c61f019d5e8c47 304402206eca2e842dcc782c7f380d5e810547f0d15f1fd8e4b6da2c7d75b4a6c187f93302207bb6bd49fc8f65ad3f2eea5af633dea8bb345ff85d6d41ba5eb79927f4d66b43 3046022100f4066475a7adf74ab388c100e7ffb723104ab9edb88ec1d0185591ff8ed076a9022100bb3a11bcda2a7bdf97ccd8290992adcfdbbcc90bbad63bf9312421aaa592ee10 3046022100e37eb1770e9144cacf2ea6ff38812c120ab1c65866d8262d86cf7dca41e983c60221009ef339f86a09c3c8794c12b1b21749c62fde85941e22f29f8309edbe5bc8e3f0 304502204de024b36e21e5a752aebd753ecdb53a20dc564cc2d68b435f3e4163b8b044c3022100fcd81e9b864574f904626190f82f0ba5c5a1289789ffa0bacf53491f72e37cef 3045022100a1d8e06c692e3fb7d10a678d3d6561e4900b2f98333e7324c1d4b545ff7f73d2022068d8a52a3dd1944cb12af5435e0d514d258f95754bf63e5f021217fd9e60775c 3045022004f456f79f2d165aa5ddf3ccfebe2e83cd8248366319ed9fe62325d78db367fa022100cba97d360ed25d81da9a534db68344a99ba3d0182930b9f743fda3bf400ec4ed 3045022100d9dc08b61d28a62edca07631351c7c3eb71e931e1a8d4eba6ccb98752f1abe5502202ed4bdcc854cb363a699dfec72976892f6b5cef94783d13c5c846e9affd4b98f 3046022100b95c9855b9d43a002f0743413ade74facaa80ffe9f41aa0b7982405095c40aba02210085cb12df5a39993c84148adc58fe46a5fff3b431751c60393069ed992bf13502 304402207d018493077d362757af9d5cde61dbf3c3dead40019dc3e1afe4847af24ded4502201d0e95ce3b07569139dd314ae670afab1879c65a36c530490dee69f89619f836 30440220401aa437d4de3bb517fd53ddb4ba1623f88dd2fb6a9853fb0fd52c44640ba2c502200c2ca51f4bfe1029c8fcd066fa8e3dab3a35c56eb4d58cc25aefe5e438f86f84 3044022024e47690f4084da8242cdd9933d4d6202b4d3523335dac6eccbacc9a04f67ae702206f4355195d3a0267152ffbde2efa0b6708ec6ab358397e7cc18ebbb8b6f4ee76 304502202daf3aec09d295596ee6317980d0d088c8310b44b4d125645e07e1e88f79375602210097e018128f060056ebea507bb15ba63a03a53f7895c330a8f8fc761d8a1f05cb 3044022018f0c015fd78352d3ad14eafb722846a030f545062ba907369a8830279359c7d02205366bb9428eb064b5da1f912687c034a492c0bdc6a770a44a71a15134c8b1002 30450220110bc238cff41f1a711ec481f6bd96e44b510fc7fb297c51d46df89bc87e084e022100cbe346d348d3c6178016bd38f0b77423209f8a590dfadb3114e4e9db46273014 30450220190671cd30a5c4d0be8eaefac1bc94e8a8e402941d9a050119173f2b2506334a022100b0fca70fc9bc32a52f9cd2727771e8fff8cacc4a9d5f41273e22f6a161820511 304402204dd33ad2210e0555d24464e58f79ce4a7926e7b465af111ece1f94389957cacb0220068c2d19c0e93e45f4cd4cb1759962bc1ba274c651a13f224b5e3407a677bfde 304602210083c8167a08f2a3d5aeaaf6f1b0744ad428590ca1f07762bf3ee10131a50b3794022100a3b123392d8148b371c1811e4cf01bce70a2c36b0f523f4cf655f518b816628e 304502202c4de8dd8a38544de6f80c409817697b2ea3c3d3de870765f7c318e76465f2dd022100c453fdf523f9e4c6c749740190171e3bcd3dfa67213a808965ef5b34defd507d 3045022100d278ca825c019d1d564faea5e4b21a477d30aebc34efa46b86dd5e86d5ee4bb702200eaf8ffe7e4c23311a65ca7367645fc5434c0addb83c8f02ae2310bdab75b435 3046022100eac6d0ad6fd364fe53ea42259c41e2d2ded6173f33bdaedfadbbe32d02822051022100bb383bd09c1c75ac53fa0006e3798b7a93eefe2dba9b2cec3e2060f02ce72e34 3045022100ab025ac3f1492f706edcf47c9ece2828ff4b17b4ecab56b6e3ef1011a673d19002200e847d65227c41114e46bee0eb5cac4870e06826e760e7c8590ce602a90bef3d 3045022100d2b61e2be682b73e88d08588d4b5ef7dc4d438a9843c8cca9ca3973080ffb74602200ab03334e33a334b7680eea526d1d45ef3a5dd897d981623bdb3cac8647549f5 304402202c4b5ee7fd2dea8019646274a51778507a043dc62ba96cc91725681f01f4ea4e022006f3017eab283ab48708015f1f47fd3386e021323a58ae0756107c9c4dbdbe84 3046022100817573815e9a18b2cfc78a8f8ab9d3bbc4c53f446117273e70edcec1dee0ec35022100c1ab56830d013b5a8b8f5d9e608963e00dfae1d6acf0bb99262fa26676692fed 3046022100afb7f5e408f030efd35cde35cdaa5a75af94f3c8436a017d71a5268be62b6892022100ea51cc86b9359f4ac728df291f5f9220610700d35680e1c5a892ae69b055f3e4 304402201de365d22b53f337883425f83c3d81b185e8e71796daa45df2bdb171fdfeec9c0220596395380e79d793e8e8cb64a100d3d7c0f95dcf12a16ef501dd46ad877bb0c9 30460221009fe11c4e1835ab4a86fb0098402fc513e0898badc44963a74a3ce13cb6a49ad9022100a0267c61d0827ba8704ea0323d63a50ef26e3839ae109fe9fff1b66b67dc8d35 3045022100fe3486b0fa892e46bc948b9cd79e7d13993b231134bf6ebfe9545219b8f6f7a602205beffc99358e8ab40e43e1766161f7d82d0e16ab287984b01615c3ae761969af 30440220521ab1a46b5038a11f2f9b1c7ca2ad3af98a95b19075c32174244ba538b6892a02202993c8f5e5d4e456eb141d84feb7a0b9ecb1c165d8767be616e3167d7c35f434 30450220559440c455a85ac9efc1cc8b1ca7434fb9bff46538c25c5ffbb0e0fce728e761022100d71173505ccb4674e6ec1b72f1931cddb528ba45f908d7b3c7e0c5ed32e31311 3045022100d58f8d6dcf525b0bb3c1afdfb50f51895ca95142cc1068cb04f101b1ff047df9022070ad7e0e47693a1088224f0ae850d21b0b1162942afa98780840325f5f855c01 3045022100d23e5f7227ab97e1c6c4020831044c214910fbe1a4d699f4d521c058597c1b9a022013572fdae9e0140587fc87e7011cdfd760f71891072030fe8660bc47a3ee9b75 3046022100f0327d38638225acce7326ad732f46e7d1676c38661de07b0ffe163af782a3ba022100e7140cb67a1f956b2b772f13d382f2f266ef91f9d1d3731687a25e742b48a59c 3044022068a35eafb4bcb07da5e61e8dc6ac0959738be457eeacd1a299239d6bdee8ba1702201475ce0cdd82a87468076b827dd4826ce202077a7a76420c38b30d1db7e53eee 304502204a3c8fa588003fa5df2426449ee15e7262663e98edf97ed03b5a708aa41ba374022100801bf5ea3626194a20a7ddb521041c2c2b403c8235b54fc6556d9adcedb5c9a0 3045022100dcfe729e9f5afeb05d2f1d2fd8cce9dd3364d0245d0a563e2b3e4509b8aa619a02206abdee028c8567264d75045df43daeea87a1ffe0293cc70b6a9e1aa5f749b79f 3046022100f0a0a9ebe18c9d76d959ac34cf44028b0d0fb034f8a0b7d540367f7ae25d7da1022100c028aeab3780ce2d1e94f572bb3fa616df493f648fb954a404ccc9ee93a303a3 30440220294beeab6cc9940d566e473de1c2c7734be69d9718eeeb1b209f7a10760f263b02207666fff0c1764494b6776db6b6dfa386955cda2d70a8e92e6f4d50cb6efcf5fd 30450220772d86dd0133fc485ae8cb42eed93b0455df3e1e3810958f7612d465c2aaefc7022100b2ecae7d68b20fe0e64552ea76c7b1595c0d1d76f3311667551c6fe2e0461e77 30440220038558c48ec69b6503a59e6a736ca063d742f362ca1d592f64adbdd6edf113f9022039bcfe8f2991a8e439d3678818c1f066c7d3dca4c9ce24e4a6c700fb387964ac 30440220465e8c31d1476ee5a7a058a8b67b6bbafab6e80bced766d9aeec4c249c6e4ccd022066ab44ce25dc6abbf63edd8f5ad7f8c88887f156698c98211277063588aa087b 3045022056035597ab97096562fe36d6193086ce0e00dad36c01d0fbd562e6537a27f564022100f769b18b7c18cb7c1e982034d09f3de24bc6bd4b973c953360654453bbba200f 30440220692ba34e1c7e7f41df93b2062747431d3c410f27c9292d021eadb53992bfe1c60220321ce514b914aa6f265bf5dbe36a3bea5bd1afa50739f5b9f9fb9c4367c2d7d7 304602210094febfc4e744f9306a3773ae81d52dfc04d9df6b4ed98fd0cc797e38ae819de9022100fc4983cf8cf24d7a2bb2414599cc3e62fe143b3766f07ab5bd809e221ad5d070 304502203a5f20ecc98e9a39f276ad7def90eca362aac614ac319efda3cf66f3b3d7e58c022100cef4ef902727fb85176122a65e4d12903f9ad8e7a6e21b6fcbab2bb158034152 3045022075af6e33b2cc81e62d4117888080e66ec3a8b5cfdd49c27988b1bcf8eac466c1022100d2c81566b5951946fc4dc2f4c049e4821c3b61e54831e9ebc27db39d670e9618 3045022065ad1367cc33ccefba8c48faf60934df215b8caa950aaba26ef3165df192b1c7022100b329e2b7b496283177406e2814389e0a8b0e59010c914f2539a2a6770498d123 3045022100fba8d66841d46474156d50a0b744bbc26696d1810f2667bb3bd758d532c4806602201bcb8789476bf37cd44db461acfbdfef2e5ca9a6296445940da93b29ccef6f88 3046022100d249a64aa046d1872dc47058c5148f8d1ced0acb4f4640bfd68ccaede58d2624022100bc752a595fcbaba7c78f5b570d2ea3d38a08f7ef459e3e331657839af94e5523 30440220238c5b938667d6e1347c70040f59520f2fb12e131d67a00ee5be4ce2be83e4060220067604b476e48afbbfca90983a57141a1c27061030c496a7c8aee2d364b0d310 304502201705516eb199bee83e63c0ce835eaa02864dbfc584eed5b2e5e9b826b18878da022100ed8e3061e4ff3df5daf6ddb0f25c74e4cf20fe409815a608aeb7709dab405c53 30450221009fc38b1b2ac8785eaececac72563f4e1636495c9348948ccc59f2e4669ee9a6502205e7b175e994b2afa7f3482424363d4b99f6040f3ead98000b64891f30052a213 3044022032aa5c60a252938303e9ea4c65f3c353d0883c9b2c8725c0355d44a010b6e504022072a6cc5352e8181b0f0cb680fcbc8e85aa3a2679d6f718da3f8b7192c26320bb 3046022100be2fe726875a4c0c43e3bf56f94a1a767b2bf1fa5364ff294724280858dab0d2022100eadc9e4ab24bfbfeb7802bc6f2874ea4ef068bce4d0b7bf45aa10592cc5c052d 3046022100d44661130fb83662c79c1997e6b0aefa91872150ac48c7efd5510dc4c65dafad022100f6011c26ec095ee487de5d1fbd0e36c9519ad439d3b5d9dffad35e4df40e3bad 3045022071c807585bcdb2954e4c183afda324d2bef7c999026a35f2d8a99b7af8798540022100c63a08ef293aa63b8ec8af8ef8936477f31912c619e5523a18c7047db4d314e6 304502210094b588e52e0106d89e4487214b271977da16490418cfe39c8fe4873ff66c225202207cda87ee1c09c1183fc5b82d49fcf687bcc97e363681433469c4a43936c684eb 304402202a77d8b9378fa29940f0cc16caac1eecf71bafaf6cc8286ccb8e31351f42642602201d63d87a273832721a0699ae4ee7bd1aabd41a51a5f3a3f8cf9997532e5abdeb 3045022100b4045a22d4ff08cf6caca87ece2b279ea9a3d3c7420ffc38b2613163fb071e6d022069beeb3ea6797c368e9f5b5f28368c9bd944509fbcf94be94877afcabc22c84d 30450220195da89de9299c4ffc2da34eb43afc4f6f6c5246dfaba1be9d0b2ddfcc3c6333022100d8a7e5de5a970c65443d0d856b0c72db8f0d7f95268ce4978122d17495fc43c7 304502204c355f1b9e995342d358d674f5bd0566699c8b9e1262b83147c53e0f552fdbff0221008a7c2b1b765236456d8efc1e86d7d29bd4a48084f71a482dacb9cb8cb4c2c795 3045022100eaf7c98bbbb19bbd3132c7042d50173c957a721dd06d7a9404c638dc68f2c29d02202c8acb7f36ce4dc589e2812cc2bc1ac83ec23e42f4a62248757c6de2dc96e819 30460221009e4b543a383c4f9218bcef4d502b149c34a8b6c29cbcd542179230205b64ba38022100a638154af142a09162edf894d3a80721a87eee7d1e93402b051d003814f417fd 3045022100a95c9be3d351f6937435acb5f7394db3274ab82c67d679a54b84786ce6aa193a022029693bb154f151c273c7293475ecfbc9191a72f2c13f3207b54b06e5200fb9e4 3045022100a900e8b39719e7b8a26ea270c90f70297e755d7406eed1526a92eae9b773a232022027e90c9b1075ddea3ef578483e07b41c241125d1ac21d82b96bea65356b46878 304502200b2e3db87a11507b4c0a9d0f691e684569b9fc01f20b855710bf8e9c43c9a451022100f5ce9ae6858381045462e25a4ee355d88d5f7e46af2ed2d367dc216139e4c040 3046022100ec947d1be0a707dcb2f7e3d470a8738478fe4cd4f351db2cf74acb4a973670a1022100fd0e946598cff4d740d49b6cb0fa5540aa729d7b7f8e97d643b256e08ee37930 3046022100f496a32bfce7fb446ccfbfb43f328d312938ecef1079a8ea979d33ba8a30419b022100ae1beacf58c86848896dcce34942be0252a5f13115ec7610fe2464ff0910a9fd 30460221008beeef322d524aefa94697dbb07016261c877704b8cb07fb7cdedb140ccf4ce0022100c99f011b3c592b2984e6b1c19b41d61f7169b1253db1a98617bac74e72c1d2e5 3046022100e32aa8970d4042e7a9ee20f26d4f766cc668a36b57bd9ef95a98d30637e048ee02210084da00b1be2acaccaea175427d0a4424d350248ad8bee033ea96c39df4caf4ca 304602210083a0be3d478f4ed8377bd82ec4508a1d443040083d562aa4f947ee44a4c60fbc0221008acd1a69893ec00bcd8b6ca17fcb67498bc2ba1c2324becaa87228d48fadc71a 30450220528b078e378ba064fad5b1aeaad1647595de9a77e74cf459fb7d499869bafa480221008657e30798e7b21e57e901652eed260318f13a227ed873a2954396d4744dc803 3044022023b09fc57083bfd0fa6b690c9363870c254fdc890f6d9faafb834aee6bf871350220431ea64f599ffcebe7d2b0a37d039dc1395b1f1c4638da86dc66c160121f3b6c 3046022100b686b210d71a26aca518fc34b51f9226701fb8369218def3a9d02b49a1402053022100a515b209368d39937e4fbc8257a713b4403323c220932b77671d4c86a3643d2a 3045022100cc00f427edadb5750ff1e4b021873b412f4ddf19cf803157644cf4225dd33fd1022016021f44eda61f5daa20d9e833cca85e145031eb5a62e7641ef12219b8ab000c 30450220596859d1f550b62ca69a1d023210dcb1934987dad582278eff06e6b61ae41cef0221009df4fde63b3d5e94e5bfa499fa8a12832cb71b3026ee43ed57e448a24ef8db5c 304502207fa39c952e17c6256f2e17d61638ad949048e101826fa48b5c5ebba413234393022100a38050dad873ed468eaffd09ead4d943c0c49998c353bfd88c5ff3f77d13ed5e 304402202a13bd57fbe193dcaf394848c0645361bd5b70ac0e1235fc674b367d1cf7eb0102207e12cca84f1b49774cf4eb872bfa09e0b6646ac64960043a1c0d189b85eee8e6 30440220676c06f43bb4b85c209f0dace55d7e593db22083b9df342b25dab43353accb14022073db41cbf38d010ff82b88e0b0608b9043773b7368a2a606c8a322900b1ea683 3045022100c524b9b76655ad63af6b17716f77fbadec64ddb13f764c2b662996cf94209d30022060a7f5bd1d8b4061ee66f93353280f6c492cbc10b4508c0cacabe4c7d350755e 3045022100a5eb1e333d5082256272d9355c4b13b1213b0b6228afb17cd40de56af78aa94702202e2b2e7ff4c28f899d375dbbe74f753d986fb33eb0ac1bdfbc8160cce244d845 3046022100bbf412cef1ccd23cba31ea4532a1a10bad4cc34901c4e5e8fdc0a39de435606f022100a732587a927c3a5410de0101c13da184d724378adb32a3565248c24885a80f0b 3045022049627e91fb50fadd787464be882a1b920f05cd8c273e013f0f1ae0169cdaa6790221008f4262e2deca85c14cb85cc574b1b03691cf5a22ba91f2b81baffa2657ab6c16 3045022100ff2c560a93b3a80c7ecd8c82a8fa5e5dba70658899a4f66ba500af264ea3357f02200da3eecc6320c2e6a4e37f11d3e560d4509431ecff5756d854e04426b61cd93f 3045022056063d35676c3b49060fb5d0ce4c422d02153c2d979bc8c65c67785504bd134a0221009dc40b7903272ac9c1756078daa7289128a26ccf2c18e418d1ebd972b6cc7e39 3046022100aac33bf265d612de3cdf5443ea8da4dfe27b0e2c8418e8967e486408ad293fd2022100d38c874ec8e98a056ef7848bb903477c98dc550dea8137ed264cc0a699de5c5a 3046022100e7abe86f4720aef415792bb4f77fe57edfea498d31c5af878d982340e085b7fb022100e4787c7f60309dfbec666594500aceb338c590f9762a5cce64ab6811c86dbf55 30450221009f5590a1d7834d839326e1a4dc3bb9c2ccfb9cc69c54e35c7378255b963efa0402206830d30d67e18e131f162b219b99bfcd21255fbba9fc82599806156b6f35fac4 304402201823cc591fe8745c1aff124274165d02c6f7847964c1948ed8055b61dfc631fb022050990f3107040060688b82a23b607d2a0a78739df321249742ff8cd832c30066 304402210089feb2faa668e365c466223074b421e1426f72b18e4616fb955e1574f31a0723021f76be4197966af90b7001fd61d6bfc9151373f329c33846a1817cca3d039dbd 30450221009685e345ebc285a35e0796a9c377a602ea2c0c0c7ac12c82b79639c11756f27f02207d54e79b471bded892b1c447763797e2868fa681c7b184d4a5f92c21e81d875d 304402202750a8ba3a7939306c3a47ed075a2e8f3c302b4bba9975608417fe80dc80a35702202c3f7ad3ff05276235a6e2df575798ac755d640b5b1fcf158e9330717141e3f7 3045022100f7d41549751efc3e54f5c103d66ea9fc8374e77ef3ee6b6e5f3df4bd82ff995902205c3f8de790d52554cd77ef7e019c79a3d51236ae1a00ab330a6c4133ffc55cab 304502201cb7fbebdef2e0f8227020011f1e1993de3231b4cc8cc37955cd7ec0f12d9677022100e4413932418f44e7cdb30211697d47f6ba1e324c4faf3f102e54341c99891b7c 3045022004355c0d26170a198195d4861ab081f59fa137d4e6e5cef3d7cbb37e1d891c70022100950a04af160c6849594c706fc8b0d20ccec9b23d2d65c0c2501805fb1ec9a6b3 3045022100e31db77a84ddb28132496a1d7bb561a2da704b9cac78dfadf36309744077649f022024f78a8d0e5545396c6beac075ba83b1e3c86b536f9204c9ac474b516e654c32 304502210094e426bf388da5febf5e766a430034d81695884c168958d2322414a4b702782a022065f381be6686738f5317a65e86fc41059efa4948cce4432b1182f464b7defd32 304602210095feb89064da10c7952a5f9445abb73281f578b266df500d6d638f8759c59370022100a9164790a8f6ac1943caee959d02ec8cb0210101aafd6c53a19fe292ad0af3ca 304402203c5a1112b6dacd002f340785599767b44d11a9e49e8c0a22a59bf62fad1295f802206f076bd85bb9edda9f35ab7de71ff253c259155133a1c64b907434ddaa56038c 30460221009b485ecb90947b207940a18f841631b0f6088a21f14b98f8b193b40ed1b4925202210096efa6e9fe1d1253a56617069650152b5cc823beb92976f39947523707f76237 304502205257810e65d0bb2e1bcf6b1234e225aa447cab6528156789a4fcd3edaed06dd5022100c3848ec3e4265762f1c70069d7236c3fa18e0b0b6bb45ad061be2f3e9d1be17d 3045022100caa1d0be7c43eba1af601027d6f8f58987de47fbfe27937a35f52834874fe25f0220436478d629d8ba60620814ea22fbb002cbb931d6e79a61749e94c65605fb5848 3045022100c68e7f9925dfd7fee7d0268f18395a684d420f41f36b4503f56440e19d0c0e7e02205818b1c73866548a6f3ccac233e369782e09009995921fa6503e16541a9e16f4 3045022006e2ad40eb4b7e2fd5fd6fee836f5e540f6134f336c7aeb180b970e5df74e98d02210091172bb5cb49e70cfbc0be8733f825e9d7f4dc78f497910352a37e8eaba37cfe 304502210081ae17ede4c72664cac145c4d65f74905d37a81ea0d172dbfb59c5c074b7b77202203f87113450333fb183ee250634eb6e9c8f25e15cacf2904f578b2c83b25e702c 3043022017fb92be3c23fe54b78991524bd84c0d3cc31578495cda3318d8176b17521a9a021f545202d54bc0034f8062e8f5af2304a7c652633098905e30a49f6d115a20bd 304602210097aafd7d2f6b3ef67b9563666fbd831dbf2510a1e00d7bed0ab73138c71d433a022100e4d847e8df41e80237cda37211cc52ecb33d688f0e582360b140e2738564ae24 3046022100af7f9679a07c54b8f3d60b9e5c42ae6bbdd2667e999cd927b090d3c3ad5feaf6022100dc72622e99a13903c8ee4cd196c12ab810af4897d41fc01f450c76497a906dbb 3046022100b7a076ec1305a508f7204e31869db8fdaf25b9157b2f63b20879b2a6283d3a87022100b9a63de39ea8be30053074dabfcea37fdd354769e417d82682b4a263d960ce7f 3044022061d45bbbbe1cda0bcba559c1f8beaca2510285dd4191c36285020508682983f702206b2ae3786a0f4cb5c001064061199a9ca4cde63383f301d6f0b6cf199b744459 30450220226278d044d12b12f4d17f350a77121316c974ad58bdc743dfd736fb395e8d78022100d30411a762cfd0c837efde4e361ea0a1daefcebd775c949b3c9686a0b16be436 30460221008092d98bded1b09baebba6558f47f75f8e17e5ada4932e92675cd6fd61cb7dcc0221009c6698ceecbc1bf54164f433593df61f23c6201556cf7403d7292d5d8398e60c 304502210082e396870ac9018ce5db380bbc8f3d6e1c91189290a7d7945d662518e1b80ae202205c3017ccd785c8f4b37466b721f20af823f35b813b64dbfe2090a2006091f85d 3045022100f6e440667cff86b9a15566371caa66808ddbd935ad515e5f4aba7158fd8f48280220141da5781bc17e1bbf2c2c0610fe8ef4fe9028051fb0e2aad084915bc7859b62 3045022100cf151312688c7c0f2dd4301d57bc4791f9065b828488420704b5d04cfa3622b60220379cbb69efba74b6d2ce16312c110d37f1c298ccf33de29a8c6084f61f27fcdd 3044022053275a0f55b4ac72380a697a4a607e723f7b6a86ea759abe73c23cb1152d833102207e17bf1f61bc492c3eb03f14c48a68444ab5cadc78d4ffd8c20afd2494b4bac2 304602210094339f92693585fcb9ca21be299939bc0d8d4336c515596f3a7c914a7bba1c9c022100a45c2c9d151b555e4eb5ab44083a3eed05e6a212419c52ccfeb4b9fb60dd5315 3046022100ed7cfa25ec1f38d69bf1b203c67235c26e4257ea1621235f205c765d430a69140221008b603e49349070f9be01d9f2c863c5c61a3addb2f336ebe461dae5f7130c2b3b 30440220519f3c796297a1f83b45eb1527c2cf17da607b07319365b9ebf39a982c68b2f802206e5ceb3b22521ade4f4f2ad6bf1f5c167c34db2a929ddf0b728611f1ec7d1c93 3046022100923594f47642625193f692675c9a887b71225b107acd6f03d60f011cfafb2f00022100924c8ea4c00bfe7ba5d20b51ddf8a81bed05c61a18ee0374db0d18ce736ccf4b 3044022073fbccc59a48ddb401d4ae710f195d63c7a2a0c75f7b0a8ce6fbd19cbb572ea30220263b8ef786fadad658403421032d181d19f7557b73296dec780b8a6ae3ce416b 3045022100edd059b49545694af03b08950874c11e7fb70af750733ebc0036ce3007d2eaf30220521910460f8adb86022891eb6e6c3860b9df72ad94a9ef0b2315acea23138852 3044022078cb9ac8ba2b977584908a0fb393408de7938ee765f07b12a2e2926dac3cbe5102203487d12fac00a09492ec9e5f06ae7b98047791b7891a6b593749888154552f0f 30440220519b1d38b773668be6985fd0a0b9e4b23396c0ef97ea548ae19757804984d241022042227b3287882afe3ca6e4e584698080cdb9427e6a9ce84cd24a64c60884a0c7 304502200db053dbe91023a5cc6d669dff0429587acaf07241c79b5710bb8758cc6c8433022100dcea4f98a2f85b3a89ac1d40c896593384a50374220e5fc1113353af89cc42ea 3046022100e793bebd31d711000d403db242d80671cd9c593ea23ebb284173c6f6358cb94702210092d7e2a5f7bf22ed143f8b1bec1a6a8ba3be4063263b05dbb3cec82f59f85351 3045022030c184685723f23bfb97491e9f1071d9926dbcd239bf21a7f720d755dd0c09ed022100c212a91b526f29d0e07a8b5883ca0ee21b55b96b00baf9ef2dcede9a7347190d 3046022100bae0132aab3c1d246e257349d74af766efa74c515ce5529c308c54498da4e5ef022100a16e3809d667e9e6d2e5ba9b7ddbbff6c64968eb595fa66d85aa968e794ed7a9 304502204efb253f5daddbe4dd5048d7ee5c32f3ffa8f7a5f55876d993e78c458f4b333f022100ee3b416d8e130aeed89512583a3103de45a20d8bb3a90a1f6c60c902413d5502 3045022014c188527aa40721330ff542901fd259a4e39dc4f377ee44a1901b5551140b4e022100f2f37dcf265af37d3684276e31c615d6d262715126a10c30158716672384d798 3045022100b50891bb89a96f0cf6679be0dc7f697a4def8d28fd69269394ec2cfd7660764c022003d147c017fa2e8c2fe2a1423c5a435ee194536fdb48b0568764117e668ce88d 3044022021be4d49bba9104b871f2bb2b99985f749a04910d9fbf18ef646ab579856a867022008ed6da9f46ef22d2d506f44c79755e961ab11c13bddc8b29fdb4861362ff19e 3044022028adf014b38c54ad0dbaf12be31a3c1bcb2624dd739468efd5fa5069e04cda9202206231670875c9e86ddf34556771e5d5ece0ed0033917bcea1e7c2f61fd854945a 3046022100cde0f199ec938556cb7c6260f60f18d0ae8cedd7678542b62a48993142da3687022100e21fc9db91fc4b2ed22e19eefad83ed791d81044d6d59a63c921369be17b634b 3046022100bc707e49241b8f3a4178b89b5e5e62a6bb2ab9c6157fef3d51ef551f7af6de68022100be85a102c24594a427db89296412ab321af53d995837a17e746c1fd3b3d603a3 304402205853f4477c8430d198dc9e55951ffdea0a217f7166d86f420f2cb285a37ac9be02204ec977bd1b8ef330d1c4eb9f25fbcf7f638aa0788911c133c5075ba358951fcf 3046022100c2250e7c8150e6d4be4e166d0d2cfd6fadeb37b65ad77a6840a450f32d0eeeb9022100a0c523c425123e6fd2649d434ca0f5cddd0df1807469120e9f7cd67849840abd 3045022100da1533baaaf8c652d06ff6df34a51479a58813e47b0bd7d9ad61f4249e36ce2e02200e6f4242dc237df0ab1b8462603fb6881e02c773bc02c3432c1fe5698ee8fab7 3044022051aabae3609b0106b3151d0cae5436d3d0a4f80b0f93791ec358cfec879fac56022047760a12e5689d8ee19a205d217e841a308216b1ab1379c9e1e0d94bc1d81bde 304502206f920d9c8c10657984fc04554f28567857641df249a2b8504144428ccea6dff002210099ef6376c5340253ba9b21eb627bb999b57cc9a3cd3c848012dc90bee415dc17 3046022100d130d41eace427ed74f8f808585b5c0febbdbad05b527d63d7f5348a8f6aac4b0221008eeeb12d3ad8afa29830276f11057112a9f1fbfb116918e0803d5a46a65e96d6 3045022100bfd875f1c867e0b54d047ceb169b88ca72b8f62d2dcf53b747477f791a6ed1fc02201e71698b4abf133eccaa5495fc352525955a7e75f1729f8bafbcdbb96a198c21 3046022100e27dddac2f61031d253ce0c5be335f1fa2eec23afa129a1a4a8c9a7954cc2e0802210082640716b780512e99126db657df7193d828d8a9260cbaf78122aed1eb837fbf 30440220395b2ef0a3eb23b6b0be9a8ea19c848fa85a8304ab559ad945091c409065a73f02204b90ce4fdff35ad4c66f0cdf3d4f19a6c4a4fa56efa2215b96f3b71aa2b62977 3045022100f99072fbde3b81b52f74c2a0da8f33a1cf5947f0dcbbcb32d9443038dc25c8a90220423376e328abfdd52c2519cb4667517cc440c9d6c54ac04a7617b42a3599cff2 304502210095cffd8a80a1e4e9b4bba41010ea934667d2b1d5e2d5abddfa8b64f81cb9532202200a2fbf3fb7a5bd38390f3cdb85c02411a323158703023202042e0632ecf3f6e0 304402206d93296b980f406afb6dbf3c271c26fdbf16c7251758359e6c8458aeb1e9efc80220314107ac1a21a323f60624001948ad3738b6520cea475529d67471ef6d05a24f 304602210082b75aac1b2927d7b011568ede990aaaac220fda1cbe5e12f07fd6a906f2a590022100f7ccf479d1c0d9f8b07cf527370114c1327cf6da64e9d59bda5095632721b2e6 30460221008965b4e83043bd9cc4a72178ce95a4bdb8867565d4bc1de893bc0908392b76c6022100a4cf67a3d26552e443d38f780348c98118a5a172bc3fda2302d16367aa70b464 3045022100eb4ea27cd915aed6040c07843527783c0b6210967d608574d64df7514cc284730220388ad0dabb727ca4110c06a7a2558ee8013330d90c850c8bf79d3c273f0912b4 3046022100c1fe5a9c84a382afd0c2e18e6f27a4d7637a56cfd951c0e95c50044cf8d0a6a6022100c48fa73601a394d8394c9a142e44c7f37b3a294644d4b8f6a1931199ddf89ee4 3046022100a0051af89b5d169dd5bfe7dec1ad73549a73357ce609d46f0ef7a87f4a341ece022100abb4c9de4c6c6136739c3119fea39c5f605bbb047cf975e8e8da64109f0cfcf1 3046022100b0f76918fb478522ef46f690c0a3f43931f3fd816bb49b7c95c68737dedcbf6c0221008432d090bb283e0754e7c0bf16a1af1621357a146a67205b78a665203eb8401e 3046022100dc67cbc7c3f082f617b478fd506e3b9ad0ed5adcac2fbc15f6510fe11878b2c3022100ee5b019379c43f21c3a3f1bb1e27e71dbc30d7a4fce81d329d5a5ed1cc3b44ab 3045022100de8a3c33bfaa4998fc51bc489c7e6ffcfcdc7a0b5ad42bd1b94ea8ff6135056d02201afefebec3d20bb34402c6121ba6a2b425d9daddef5ce0956d1d6fee00056864 3045022100f253a36047a7a0e14bb7723a876684faad41a15ae94294e12d285e4fef94987002203e3708dece74a4299399676f2d2e97413cd455020c4bf158ba9e4727364a5d29 3045022100c2bee435735f58970540c7845ca4eb2e743e2b3b00b7134c213fb348d5090f64022021a42648ecf20e11f20151d7e3399dab5d9095d45983a2707b58d11dccc65914 3045022100ba7900138243e7d017af906c85562b9eaedff40969839456d5d3366610245dba0220637b003fa94c4a22bf807c23549271179f7a521b53428a282506a8727a914302 3045022031236f6911ffc2aab74b9b83f1eb12069313ebebdf11b0519a041546902d19fe022100fde2c37c6f8342163dfb10234dbf5af96b18656182f3a4ac9287ccde72762454 3046022100f588bb4a438e93a507bc4b434844e25b95eed893386265e49e5b4f144426be0d022100de2174382d2418e36a556071ad6d122426441ae066d9c5103d38dd4b14c59e4c 304402204f46ea1076e917734b2d814733a007f5a5a7a2708d283df23a2a478ef344d0d3022002de9e0bf7a2d9f2a9fd51d7db3776d3d2e2821a156c9dbfe199d9b69872807b 3045022100e59ef8e9cae2b22a49713882ce85533a717df1c0b9fbb4de7c1d757ce8675ae60220670633f197e7c8226b593fb6ae9a1da49283fcafb80c032ef350eed1a587ded7 3046022100c38a4022274c6e0debb9f75e40edbb55e9680055fefa43dc755fc2fdfa5ac2f7022100e1a57811c9ae96391c31c018155e801b063e8d6f08af8c97b743e0e4cde12804 3046022100e6c14d62bb179022f68cd1e044d297288053da44d24abb50dfef640fabe79554022100f0f00575183dfe44a6bc6b2add07e570ee0d32becdd1b6ee7cbe546c706b9175 304502201bce161d3e2ceba241b9bf000d4950b4b8a92691919dd972ab31ff14965b19a5022100ae789d3f1720c046fb5983c6ecc1bfb972803a7ad41a4c3e17447d7c73c880a1 30450220743186477b4d655a6575282ed0801be12397591761ae416c0e85e21726a51856022100cdfcbf164afcb15def0a6785a20b28fc06cd0c1c161bf9270f9bf31bbac7524b 3046022100fb10023b26ebaee6f1d25fc539afbe63cdf54edff02282c7343377417fd64bd5022100a2d7bd7a3a63f03b6f9301a8abcd6c88fb334ffe1a78c94090c5c6121cad0ccb 3046022100bae6c23267c970f8dbbdaf2abcfc5126927e72d432663dd13cbc26a6f3969680022100d7f983621c6097aea1371b36b617bb5318e489143e3998c2bd9d5eeb0ad46ab0 304602210085f2914fb70eda9724d13e72da71827dc2d4f56fa1a6464686d6f3760e3f0c41022100dab0038382472727873f234e2700037da6a9515926bbbcc9b608193a9365dc65 3046022100b8a23ebcd096b4b6ab1677847d2b9574c9e3d3f2bd8fe12225aed28f9b20219c022100eb181dbf48c60f3180377f6c2367b7fd5b6ab835371e288338ecfceeed2fc5c7 3045022100bcc0b286ab55a1204d645e09ca026cd2c6bbe0a6b4d5e753a88b00f53ad7d54d022079e13ba16ee351efaa8f2dfccb94e0c06242597e1ddce7a293283bf0a4c703c2 3046022100a2601557f7b5d9b683cd023e1d4e681533fc244c8212a81b23fff62f54c92687022100d034612014e009ad58147d889fda2b5cbd5365de9e54e5b771981e14bbb25241 304602210084e0368dc6c92440a7ee8ac426bf54b6c9bfee152571bb931b65455b3d33fce4022100c5a3c497588179df0c4503bc158d469a541de7626c20877522922f08017c4ed4 304502207eede69be92100399154c22058ee3969f0cea33380a304171b06e7d11b1f5f66022100e1d2cd7d8a21bdb2d0fc2f9d8ef029b8dbd60d66421687500fb4c14665f9faba 3046022100cbff6adb3088bd73c010834f180127756630a16cc95e6169052af0146566aaf2022100ea89eff4e2a8f9c2287925209b11f8c02e6c3b81d6ab823517cfc3d180627621 3046022100f65d1400a286347ed8f4aa4a74d40b2d1b074782f4201a7ccf8d5f4bbb754864022100ca401996932f6573831f554f3bbad02fd77e4faf3ab3d7f12b1bd233c42f0a23 3045022100c7b4f4f7b1bcb1eff5a234020deaeb07712e80ed4c2a340d047a4cbb04e93cd502204c77ab24c6ab437af7563ef53a8d5021b32bb9fc0a5d5527269bcb825a1d27bf 3045022100fc065934a123c77716c0a9e5dd9dd21e943607b5c968c47bd8115f07abca8fbc02204e0cf041e673fe39195304861e872c0b10df6ecaec305f64548ec3602f67cf3d 304502202db22172885f42d9ce879de04dc0cbf87289f40300e03b3a1f6c08a5f78fb092022100dd41d7b7df9fb3e0dc2a4b6f39da73384fc0d6709bfb06a5c2544d60088b08f2 3046022100f48bdcc662d3392ef42e46d21189f2f6711ed6a3cbf4e5d3a5778a418802f370022100aca4742cb75b4ad2ecf2836bf59000e005667bf99d14a6d73da1871970635217 30450221009f81fe6a0f957d9b888bb0961b38df8fb234460c4830311c6d3907931ae962cc0220503d45c8b9191acfdb9b29736d4f154b5fe2317cfb4861e70d10effc85109a1a 3046022100cd47765bd1c4621014163bfe0dacb8f6b21251dc9ffd910353fe63be69d4846a0221009818ed55ffc30107ab114a60fb418add72a8c8499a2bf9fca25b3074392a74e6 3046022100e85b16c69a9edde96e7409e90defbbae6f7b509651c48c77199587ce91bdaf9d022100ba9f9a8164f1816af1ca8a12392e7e666e695b513e4ba4b96267b54132d161e2 304402205139eb4e64bb4da74765e88752829d047f85b35b93d925b3ed71b1b9672578790220591676e5906554b71018c151a08e6dedb8dd1e45871207a9f70b17436cd084df 3046022100d2da27b5d355eee08adb9cf1b616069c569d11e348b237205f52670860f2aab1022100fa0f05803b94344093f96fa14b8789d851b9533b0b0833c415235341aad4d1ff 304502202211d2edd132d5968eb5270958c6b36bf734d6386347df63ec2df8b2a03ddb85022100ee0892aa415c9686ce98f883d6b3ef5ff25d753029e473349a6aa735ff4a7221 30450220477dcdd90686e80dd063ecdf2269db97f6339971b2d387a9e413557cffecc96e022100f755881f954c0d4b21e9ea8fb6b2326d180f188089b3cb3f7c43488e62956292 3045022057b0e1638d9ddc83a5fb4b170f06dacf9e78a342547592fcc0866398924037d9022100c3f7c37102bfc87a58b7132dbc816094cb94939df3ea184eee86143ce62ceee4 3046022100d36c26551d371d4a94e57702125f863eb6264d019a37eddcea4e7e79e79f1215022100e96ffb1f6bd87ed931068836d69e7ffd61aabc11892c9cb89d19db0293ccad34 30450220689bdb51ebcf299b1e2701dce917611af68431282626d978e4c2142116f3f7ee022100b96cab908b029426a4b6c693350b2d2841eac1a3628e013e4259eae417d6b6e3 3045022100d577806359998a917811fb324da3ca4245c89c84f4a47e82c92a61061ded03fd0220799867dbf98f69ccf77f0c2fc7cdfdc512cf4b3f5dd508f057bbe6a325115c8f 304602210087274652d97757c6de9b5c57ddf957d96f40cf106e8e2b4ebd00f935e6c7fe5a022100eb7a014bb1c0fadd1d036a1583ca275ac667c097dcd79ca5830cae31ce6f2ae8 304402201d5473f7ec6570328afdc1e0e99bc3c02bd62f8579472cc87ca723204dcb5ba90220700e17dabbe08032a6c6f8d21c624b25c7181b8fb2e3d0c6535a7e3146af8d95 304502204954341a03d99cd73e26aec0387de1c0360293c18bb45d2dd4c529b21297febd022100d11fb8c40597ac5699150627a27e0560dba5dca9e829ce5d44d9acf419f11a42 304502205fb595457d35b2fcc2cecda9f3032304d2d21758740b09e8b7664dc30db181c9022100b9a7090e269fbf201a507b9f378956d93b47d03b61dcac5d2edeb3e727343602 3046022100f4fd75c0beda9f3d2d1263d574e166a4939140979daa5ab926cd94e73edc340b022100eb86afacaf9da60acb7cbded1c28139152b6febba4607bed564931c4b4edc718 304402207417c67496a7692aa62c298803a934eba74d48232e08c5ad8522225f872c54580220231bf19f2d889e10c5c7e6526861d258d53d16f71a6fd86b1eed01f511838953 304502202968c318859627f1a74845a0fb41accba6bc2dd4f7bf48dde086136dc712540d022100f33d421c30927193561bae9f3f25b70a6378e3c3ecbb76dc3e3f0a78c333d9da 3046022100ea28e87413c69616493697205cefb85d3e57d6ca0c7ae5e8a04ecf8e50a5cbed022100f68f5d2bebe7548260b5f2ea98e18d0113e18ccaf4c8905cddc12eb47bbbbef7 3045022100e7e08e89d38b4094c90d26a162d0adfc9ae93e41e3089cfcd8a139c8cab7978a0220613aec4ace8452cbe909e0ff02aa7aed7b3409101a995440dec5aac8850d0d20 3044022049d362c4f74f66ff769236e49edd93c581f42bbaac1994e4c1dab2228c87de94022006e86548ef3280bb382e1fa1e048ed8cbfb1bc8ca3b031ef71da7948e9f46e8c 304502205290456344099e825862d823a5edddc551dba9d9238182c91ff29f4acc032a0d022100eb2cec568561e58f55a28ff2192cbf9f839467505f11f9cbe452d17a83c735c0 304502205a7079fa1abfba470c092329c90ffac45387b2c9282937e9d176d6b8d89aec98022100ef5b06aa139932c0d6467517634d30aa2134812506f182bde4e0469ce16eb41a 3045022100994326d74ec26aa208346d08b4185a781ad57c8a76a1e438058351be94dda70d022021a9c144e4e1791d88b2e48b1ff39fdb3aefc6b0f23f09e828dd30817b2b6bd7 3046022100eb2b01324b12df916a8f6031b0352f2653d5ede6cede3a5951a7224b8021d1ca022100dfdf9d812795af1a79e1f829d417caa6ff609d1dabc708c4ccb5153e973777f3 304502205bacc5d8106eaab1046f742de3fec96c25474409fee2c861bea7b70e533f50dd022100fa8e3d73ea36af766140f0944fd8ff6db50e39086539016a63e2b5798fd7b619 3045022100ea1f6f5e0d008c147e63f67e42f05e6e7962966e3e95f029e734221b3779200302206ac476b1d127675872626bd1242c4db4e399b026f4eb85d9b11bede35d437096 3046022100ca6d3bc157d2e8ff471eacb9ec0cfde9894533ed4c440ba52bd98c51c8e5a060022100848fe598c2bbb768d6f913301989d86a638fe4461424e699f10239c68ed24f0b 304502201a995be7cce1344592df2352577b12f755d40fa49e67ec48bdbe434a9d73cc55022100bc1ccd112643de5048dc9be7cde4b7acfe9f6c372a2bda5e7d3cd3a7184495b6 304602210086e2e76d5759be15e78a2903d4c37584deb3fe4a8cb3e676abb90d704f523a1a022100dc83a6d461f6b12fd329947f5ae59b789b72d8a7ee388248f39a93480330d6c4 3045022100e64023acaead6efad902708431aac6550af290a352cbbadc3703eae994592eb002204a4b202c5ace6151205eaee04a7b8c4307f337f9e95ac48fe0ae11f649cc5fa1 304402207e9cb736e8d9c273caf5bfa5f7ac5140afaca6afa6767a2e008341c6cc400c0402204757ae0d966fe4e5fe5b5244fa9855d8a5a4091dd890c5997575138b3622c1f8 3045022100aa66a4de01e2dde30b57ba5313ad1930206cdc95c02711aaed7d114454b919cc02201fa287e8fab2d5d8e8dd389dbe16d7cb29f58e8961d17cd09bc1c6aef62e96d5 304502210097dcce0f3014cd6ae5e6707d02009f607217e1ff54b7480cb66edc201101d99a022067be12676accf83656be601f2cb885c22627f2e475c29f99872404de188ff121 304402200fbdb57cb9e281597fc2ab5ddf2642eda8cebe2600509f805e6b28350184389002204968b7f5f3502f65245bb1f8c41cf275afe72e281c7034392a79f1780a80becd 30450220335728b6c43c944e5539ee535f49a8d4ddb9d5c426aa005a1aef1bd421f10844022100a696f1a1f9b1d26ff4302eb66ee1e62a03992ee394b502c802a95c5a43fdd6d1 30450220057d20b388c7b037f35c379fff354baed5542a6fd66f46297df2140eb27bd8ba0221009f96762c6bb681741928a81e81f5e5ef25e50cf429975f06068d7e756e4f5be1 30450220630a8f9f43e57316cb7fa35c62fcc4f3e0574da6f0dcc2a42c3bf4cb387d3031022100d8b6f93c94de17782f946035627974af2728dde2d582cf9963aef905c5a37251 3045022100f1f40afba178666549ceb469c55207e53b8bf5f8a0195762bb784438a0d5aa23022011a8485474b1865e70b81302f43eb620e273ae9996ea6bed0cd310ac549617cc 3045022100f6336ae0db4c7cb17039b4508b24ff2c263ae3819f372e638e26a748689f678c0220498125841dc770798c82878c0da3374c8df5c6981399bd3b9556456e721960e0 30450220478c89919ac123fb28bb507249e364c5f198f3a0656e7616eda0d91fcc5c0abb022100fe960cf9e297dc0f14ee575101fff5d5f1e20c17742e444b5d3a5c4a868bdaed 30440220772609d965bcc7f2e5cc4d8bc9a366e09079e9eab1fa933dcef4448a2eaafa1a02205b821eec1eb6da582f76b9980cc0bda18fdff68b451326c4e8964734dfee960b 304402202481138d1e27b5180e000fe68cae7491bf59ac643ba02ccb30be8bf8cc48ce950220159b5d2e367fe24d074a66db72ac650e3974bb2c6a365f6ad7730c179b674e17 3045022100d9c6fbe8748e81903d07e9907e200d28727c9baf0540c2ce8c124ca4dcb0476a02200c38b130dead3cd19f754de49452ce3e29c110cba418593a0576efbc4cbe94d4 30460221008ae1df6224fdbc5933c9c07d1cc728134dc717048c13eb8e841c3a7bdbc1c5a402210096084d79f43a4e039a52d624080dbe7bd04ba077453675c3b6419b42f80bbc7f 3045022025ea562e1e7cb6e715ac1a21d58904896294c7f811c75b7e235f40bba90d9696022100e16e6194611811ca637bcfc0fdf09b1bb1ea488c5792bdec56125d9c9717f7d0 3046022100ea8187b425b219f3e7cec91683edfa017f28310814f10d261aaee3baf4dd972e022100e2cc6c8b186c33504773425136e5a45135e3a6edf82edd21f7b937e675cdfd23 304502205a1ef4286d3281537e13136635d04d47a43a6043ee5ead6830cfdd5748089ed3022100825f98ed096fccb9fa9f6361df400a2c07f32e197ef37d484a28e004bd58971b 3046022100d662fce3c1a7ac4563a88e00f11dbc57cdf4efdee9832c3ee1c74a701bb05a090221008d771e4df3a2c1750e8c2ac90277d03957385f715f6a6897e6ff6bd467dc0c6f 3045022034608659613c6227b98d3b1ec7efadbefd0ba8087da309e9e47998dcf5abc7cf022100c59294cdca1c2be3f65dd87cad3d95a5aad296f64fb7f35b006c53b995b469bb 3044022079c453f1a9b7cc9f88131af77c162b6052084c4a407706f6d144de4ce6574d6e022017850d7e17771ca57f1da4efaf34375398c23542a24f81bb9164d4f8c3ff3bee 3046022100dedde3e39f246fa7df32ea39471fd988cdf7138031c8ee0466950c19703cded0022100ed99aa16e12127e02325ea095b1b0e77ad3e8745b6dc2b12cd402863166c3151 304502202c8b90121f09c8329f769e8d32b5a114be8740e700be26919f35d18e05d2cbfa022100a59fcef96fc84735d4f46c8d715ff9d2bd8c89dd40fd0133957b18ba1c597056 3045022100fa3a8b8213593d5aabe65f78126186303b968c510bad72b303cc35918f0773380220776555948d0fbf2ed745ce64237a76171ca6a5c51ca2be4725796029b43a91db 3045022100aaeb721a6b266c08202ceb7239c22db13bc2590a8315172a4e9b1f780a5c82c902205a8ef6932263ec55b50cc725447d6af6450b956294412b55be80110b4b401a18 3046022100ee7ca50765b3bc43a6f1ae7ee4f8b4f6a78965339f4cc508c1321ec5e92373e9022100a1b5dcc05168da8e211bdaaeb86c1f6f6d301c363e803508963098e5b3521da1 3045022100fb2e8467be7217048e5fb8dc6ccf95313cefdeb356ab58136273c0058774868102202631744995731dfb281cf5d91f479b3d97fd61ef2f3273c3a1bf840775a0b921 304502203d10602af0a239a3f010ffedad7f48a2206fb6c775d8700d26fd506afcf090d50221008bbd1e702d111288cb7a7ed25db2811ab96b2e1234ec51d93506da4751b7a315 3045022100c9352a03e351ff233c3e2e58694267f63098f32f5d574e1ba50e34ef4351fc2a02203a7051f108ce4c113046726e7b908db25fdc1a62fc3319846534f5e19ba31927 3046022100916542bd4fff8eb045067e241895b21a19b66e0f53e73798172dc26138d36969022100af30d4934f5fa5a7c68aaf8546cc80e3f18b8262f51d42032652c4920c594ae6 304402200b19a7480dfa300e5ca90169047287ef4bc40f6e509b0d7ab93f213cb53ac4ed022016bd19b232532902795e932989649ede44dd68638c58d382f3b2ab868c009252 3046022100acf01d75df94bcc1114f707f77edd46f42c1b946d7aaabe2beb44f8cbec68f5702210094889995c67fc91da1767c78789a895d723c566e434267e44ad93e3573c07933 304502206d889c500f1c0f43ff2f70417dcb481be707e75c004bd977ffa8c1d1d803eea4022100b635e2c050fde341f73515ebc3fb19363cc0292d45854084f8b1517ef6084e1f 304502203b0e4167a5fb69f82dfb21ef3deb1d16a1d771af963b8db9c66578a4ec3e22480221008d309035b955b2cda58f1ef329b72526939c4ebfc9670a5b053d55e5e3012e2d 3045022065e44d2609d6541d3608effe56b6cdf887de3d64057d764ea4bd28e3d6f048f002210098c4e3bd505b115ca50334ee01666fc85a99d22752c2d8d1ea550a690510b324 3046022100a8cdb900cd4335455f8c96fd309d3c4d81e734665cddd3315911627d63f41640022100b5c76f342fdf3b4ce37822fbfe7704a646c208989d93fdad3180db1021aaadfb 304602210086fa8e6f6795d87c7d86458206f0cf28a42583c566f6a6bb33b0bbb15a1fe1fb022100946049a7e69bed1aaa98066cc99f6d5afe990e42653af08cfa532540152d7bbb 30450221009545e5009d74b26ccbab67b1d8047c6f11acd256d3421fc1db533e4f2306892b02206d13839262f62de4a1e5d4fd758ef35843e30c7eeaa6356d537c073ebace41cf 30450221009a9efeeec7ded153e98c8e26de3a8d19aa1a9e373d68463b91eaaf08c59f723a022067be5f828f230993b592664794770113a1bdd8f0ebf5126eb5e94c3fef37abb1 3045022100d1e2f5c764605871f9de31528784eeac17c34b988987ff4d7341e195871fc625022017024bbfb8b75a17cfaf54f652c25021d1f96d14e09d647e1385dad71229ca82 304502202e47609ea49468635c5127ff931a4ff2deeb97977d088e6b405d935560123df0022100eecf26e36578407ef9259f99a3fc6a9d6421522ddb313c8642d8f6e5337b397a 3046022100999d970162429b5d64da564f25b6ec8b2069cae9eacaf975d9cd97ee0dcc5ec0022100937fcfac275b1744573750378562efb997e6e974493fbe2633092ce6624ef20d 304402204a145b3edc6e337809c15ef873d839de45b93c4c53dcf2701ab1ac4efcd44560022042eb1558a0035aea6ff310a728cfe5c623ef6faacbb53a7dd3474ae5d1176b3e 304502210085bcfb4aa5bae35ddb38dcbee21f2adc54a7a5d5fb8d9619cb108b82f602f2fb02201c8c9706f2cc1a4b785971d34ba01c4ecc7f8e48ec751e7a10964910ccbd935a 3046022100e29eb9bbcda5ec33e34fb60883c74c7727cefa0ecb86fa61e71e047207bc7a39022100cd4ae03165f65cd77d74c3447ed38b03c801fe519fba4f9448fc4eec2a823b50 3045022100869d993b7db63cf54f47fbcca125b55a574483d02b9bfab95bdcad6273e4f6d302203cc0317a900a9c8767f1aa99d7c645c05e578b9335fc8b9c951e2f97c20b032f 30440220617e28e87a36229c6d5a1112c22025781508e3def3154de636126e60591ff0bc0220677e6e0ad552b1a5fb1e207c6674324f70cee7af030ffe033c23165863a8336c 304402200892085a8e6be8798e4448594d4e8447a321ef62540b3650fdbb1779b8153e67022055ef87a8ac75e0c4cde885eeea8e9aee3029630c5a121b855c4cccdd72c9a0c1 3045022100a337eed099159f1969a6441a1fd8c782a9304a88518318d173c0a2f06d06d1fd02204113ddbe7dd15bb4db68ac8a1eb4bb411a67b5d87da7966cfc9a97252e187fd9 304402206a02c2be482b1a0ce41cdca0f7e7a79a05df09b70a337f2bbf45e8e182bdd55f02203e2fe46a112b701e0749a8a2033bd7db8759b174d0c929f864ccacf4dce1b0e0 30450220275589c6b39cce5b0e09fc517602481cacfe6f62bfeaf373f2592689d050cad7022100f6581186162d32d30e4f850797ed3f944a235e74d231f563673f96ed33ac0094 3044022034d032cf82d9f0790ff8c49f20160dbceb7a63bddcf00edf7077168989c8a51d022071fce055350a433c710912e0d0af204cdb743fa977dac01380689872c88985e1 3046022100dc2b8752ddda699fae0652c5d65c7d0cc0dfe5069ff8096b3dc0eba5f8ed458e022100a90f83463fae9de813d539fdfb8004de7420d9c46e9fdb6c46c2835d71c0d396 30440220525608f6e785f68457fd35c87e08d1a2f6a42d6e56cd3599be459f3201512b5902200f691779b2fab42758fea50cca029dac78962a2ed2d6d47d54682b7c9aa0a8ce 304502204f37c16a93bb53e811c6fabdd014e65c2aaaf950ee01123d7e083455be5d1955022100a17f4a4b2ef88cba66f924eac34175ae91e8e5337d31ef905fe4a10559a775e3 304302205d22f0d7ee8fdf3f53f3f9c6d18d6bdebb88a4cf7ea851f601b5a1393104fbee021f544401b87407c72919d8662bea490d7584b7d9e0f6e1f069793f468ddbde24 3046022100feb1de4fe87c510cfaf1fdb2cfab7132c7ee30d5d7d1cdef395e136a8e2d83ab022100cf3c76a06e14a249d1ce4b25d38a9cd8fc8e8532f2e1c1635c5e588cbfe3530c 30450220360ba4084badf01820a58094016f2feb96afbad11266a69783497c01ba45f6e6022100eae85e075942718a1baee78da68afc305db99a1505986b7ade45dfa535a3dafa 304502204713731f82b35ca8a8144504932dc84cc71b11e05fb86aa70bd37ab993d36a4a022100c1a52c7642988de41ae1b0d62ec7b103013db923934acb3c2b589c94b7ef22b9 3045022100c498de9fa51589e2411406967c792b1d002bc54682e885ed56fc1d66a84b60f9022028caa417c7d81225f4c5d92d0dd8788361bd0712d089f47e29ff7e37fff38aeb 3045022100cfb1a47746b022773694efdc58474b448b20ae54e3c5e480da0b67aa0c0ce77d02202ff25e2d9c2e88f7a72fb85ac51d2019e9aaecc3d5235a0133be68e3bb599cbe 3044022064b524bccfc8297db92f9ecaf88da1d1b7794e637f1eff7d7899834d7eca15d50220443a48e4ec92058d1f7d5cf5015272ffa793b25c0e9e4a5ee3297c129ff6cff5 304502204bbbefa93d49d2a10a579709aee586cb02a9ab464d83ced2dd7c2063434c9971022100e79a620c44fded7e0db844f7998d0ed61f0bdb17d144f64bdc8258413776a807 3045022100c1ea029a9e2694e927777db03c513a3b404b5ec18cd91b280fd147719fa23d950220268f92d28ec784c2a18d03718f2f4474244f14cc82366a51d6a795c330e87c09 3046022100bc403821d43ea75e3e1664e001a8ce547bb73f4f9a15e23161654b74842304ff022100e7a99c9185fc07fe89318e8a6497f77439a301a3634cf992fad21256e9fbdca2 3045022100d0213ef4eb1435deb44b9a7708a59b0fce38e8ab8e88bf4428cdf3e0538530ad02202b6f80acb9bc5c1a06bffe5f39161375ef8bc95252c3f9ade5d4a11ce450541e 304402205b31275de85620af1dd0d00733da34237e5683e6d72e26d11217c9236780441902205ee3961ecb082e4dfde8066ff7f15865fde2e7be9296d4d88a101adea342f199 304402206e561a9164903c72236be6b3134ba0e94d1afa94218bd258c228269ab5d60d9602204b4e2f12629bd581a181bbda5d04c89ee00cc1431d9794df0485e42132a10522 304402200b8feca2509374e4657ac0b6614c8c3788b982b04b20f6eb36c5b4d47ac12afa022005866c35aa963fe8452ffcbf9af0e6dfc87ea9472c432cc1952e9864407ce225 3045022100ea2a738b04e39072f8b6df6998d50ac7efb30f3e3dab0eaa9e9032bf0c8c392c022077d486d34b33f34bd5cf89e20afa08a7512316aeea89db9d1bed6f73d78d5f82 304402201f2253cb6e61ee13986b4f38935d74d5f0c95ef54cca77d43e0276d2830bc15f022046f1166c7f5903f66ad88480bf720c8a0ecbd810370970dc23a2431567e4a81f 304502206ecfd02ad315ecbea5fa4e2ad3f97ba3d330024251d94793cb47b255a8553877022100f6e9f738e575d055041d893abbf5b3eb0284dd9f0275b9f3d01b51783e74dc4c 3045022100cd5059e8e4e952105bd0ec0ab8df5c465f8ade00e3657e26796f8ad1ea4d4bf10220722d9fd57eb37c9ea591ee42447acc3c8b4cb9ec6c16a7379ba235c6f5f7ef8d 3046022100b9c87ebc712ae3450f0f81e251f2276af5d2f78e63cac8eac401e66128fffa3e022100eb8a1823b059e77ecdc7a40542f7d78d10cb497f3e93eaad46bd719c599ffebb 3046022100fcf2861be5c45996e0af18b9d4e18594704dc21b88702977a07e570186b96288022100b347a471c6421a4514522e585a510bbebdd2a32a7689adb3742d4d90589938cb 3045022100ecd42f062d9419e3b822febddceeda8d8261b055eb15a309770cf9d27e4b43000220740cf9a7c4ebf487af3256c209f74d6a5d5f33db2bf6e4d9cd6f3cb4f6a6b98e 3046022100e5242f35f65288f9bf86521f5edebaea5ccffd11aa61cd3fcae264d9b2d68c1b022100d608147a89e5ca2e3420d10859f04446cef2f68e9c6252b15b2a18a11d444630 3045022021e8e7ddbf6ced39d7791b8c5f5810e9295d879dd876ed6136e6493e4b211cef022100fccb70c5b5594b986dc15eb8f62d8bc97a5d61a554775fd707146fec074a31fa 3044022052b99ccf1ccf336e31420178c7e06b202d155e90681d745f677cd0e7c8c1c92802202319fa29a03e577a60e5d8ce2831aab130d51fbd1763583803ef5cea7128d38e 3046022100a2b3d69b50edd96a78e5f11b8afd5bbb450173dd30d6854ff390383f3470e7b9022100f0dfa8fcc9f498bff8459170581eb2fb9582b3aead4432aa4b0901c1654eacc4 304502207f875424ebc8d1f853b594babd7a832fec032ae4b163ea66b5eff41b76b4aed0022100e65e8209a1d0eb8a4610eb63b844152a49f26036a7c30a3714c3161e06348f02 304502205512351a0319ea672ffe416bc6c44238ad130f5275aaffaa24949057165e4688022100861a70889089029d7951d903d6a3531d995109dbb3ff7bd1d9b0bf44fee7e8b6 3044022020cc7828f6d39b39f2761fce45641a542abb2e31d1abb3bacb716d888770e50402201622a9dc112df9a85f1c775120cbe557be0313db2a04a79698281b5ea8b889f0 3046022100a4bb437354d513755894e18a3721b1831a54117d91d0aac5286ef53978eb546d022100b4b0c34197e3349577ce27aa557d225fab87b83e1a338f8236bec3cc8d8082e2 3046022100bcf98000802ff4e6683a29311e9ef09059405bb690ed46d9491c0a96a6a50a17022100feee117d6a6d48b1beff80f68f7f7897bdd861d840267576bb6b0194631b09ae 3045022034e984ce130dcff7122e453a7d4f8ea063e97229ca7c97cb797f42f7749243620221009e4ed43cac541781a56b860676ab30fd30f342bd37c9576555559e06fac4e8b0 3044022045e3b91936e31446775f23ad27f0243eb99be0e3f743dff7722eedc7d4b76a9802200c9457dec9f51bb7aaae90a856dc48554edeff1ed50ed8dd5f765199717c7801 3044022038c434804f1d9c8b8e8a3dca7c59a97ac6652c305893d2d16144d9c408fa7e8202203a399183e89dc553d4530cad560a0909c3a00a8994620d4747da607afb0b926e 3045022100816647283321881bf2cdb85d5f72c26ee9ba8f262196754f3cd788cca0fca272022038c638b5db443df45699f1c968081331338b8fe60d67cdb0168a87564e64e819 30450220039587ce5af046bd89a065b9ea902c1a50e58a9253bc9e9c47f468d57ab75837022100ab70d2a456157b1fc09d47c3aee5e37e66c79f7949fa99788584082f7595f0f7 3044022073537e8de60205324c37b30fcb7b638a1a0808f28e2c1976c6c0212a903934bc02205e4c1ca45aaee87aec803f2b062068460072442cf01662bdf52162a6bcc29754 304402200f56f297a74ee21721bf063d1ebeba6cbce43e5ad3eb4791614d147f6f02b1eb022074db6dee92f91032813b326948c2f69d1b96beb6664f0ae152dd81b4c37ce11d 30450221009bb90be5065b981ce9e396dd688c53d678ba549377e2ea9d3301ff66da6fbff0022052434750b97587ab295bab13df5017b5e258b513466aaadccb633fb78027d5f8 304502210085b880de1694878393bf6c505fc7a9f16d6b6ec4e38ec95e4d03c24492016418022027d182083bd44cbb620963f23d8e73ee8c43547b13a7f09cb4ffcaa2afa286b9 3046022100b3accb81a753284df9be090eaf9b17c37a2cd7e05bb7b5945f3fb293aa8aecbb022100d6c6261812d5ecfa2fb94410baba81d27f455f7320cc5fa0d7ed53cd93d2a291 3044022026116d49ea3dd3119773a4008f03cbc81f37c48d2daf6094d0a2e0ffdc1ce7f00220259d56e73164ec3651f4f31fffd5182ad47b463fb63e98569b20295568f55e74 30450220513978ab4c3181ba13a88479361e07cfc2a19ac6289b3babd15ad7281f8cb1f602210087c659b4b2be325666d44f4a79d4e5c693bd6d24ac7335113f0e4d2d4b646f3e 30460221008df6c63abdca4fd6b975c0f2120e4e9a621ee41fceb47171b093bbcb1283d535022100cf8501395d4c3fc4984615e11b417a679defc16f9cf071c4ddb78c7a7f8104bb 3045022015df2d3011399290cfc199e634660473127d82aaa2daf1f5e9efb6f3572fc56f0221009104d4cf8d940071a8a73f8d1bbf7b5e949c20730219bc054600e1c02c135eb5 3044022028bbda07ec65dc153737bfb05318a33f9710a4a0dd7c20a9732f5b63d17476ed02203b54b82324b6e0ab2fec715c37a4bee21e0f375072979e3d89abf85bc5518def 304602210093bd394fc8d23f474104c49e67afd78ecf39dabfc9127eb7a238b92af206e9f9022100ec6a5b38113c5553c2aa8152dc534370d9ade2a1c8930ff38e481799d2145cca 3045022100f6267154455bc051175ed7f2f6cb8491aa9540c5ebe056e65953065ba57bf421022021036012e970fa8c9ddf2261ed24e81f9970971872f71800924c416363b9aca9 30450220366d966af7002efa99b5f32db42820845dfc15ad7fd385faa2670b54771bcbaf022100bd253b095ff161c2212271322edf7fc86c29ee575f95250bae95060e3aaac5f4 3044022047c86978063c11303e34cf3e2badb3b9fd630f1893d417b5c2c1b6083001b7c3022061421482466339d734358f127b05b86a2a4403c810df21fb937c7059de869d52 304402201a07ceb12584662375fe5447eb7a36f1a9719b8114a54efea66fa78eb9549077022034ad393371e9f4f3393f4e96a2c5fab5d09668fc524d2f888bee5be62b7d729f 304502201070889ebfcd2c7228999a2889e92668fba9ddc79ab13dbcc766c690a45b4134022100af98430c397853aaefc537dcac8473a19213a48ae7b4eed894bc9da5e00a322e 3044022078983f5340c78032312ec63180437522504f1950210ca782b798ab2d57ad106802200d6e2c1f6f20f9c37abdd854a66a6ff856506aafab4a8c23eb0eefb94ea4b0bc 3044022073421dc1d3311d209ac605b5e7e4355e3127bd6967aaf5094c6fcfc25434bf15022062d91947b4ae2fbe67cf832673db46f30f34db57d53c255cadf0c5c7969d119a 304502210085da27942910e38e3485670f48fb49b5c27ab697ac729c2d7e3b835c75c2b7b3022074c7bef9db311b952debac027ab3f5bcb000ef2589a273dd8601a9621f8dfa5f 3046022100fa06281447f3169051ca7b1f8ca0adb5d02c734090fca67b7d72de82166181f2022100c7d474976a2ac6a51ac02724734a5b3cd0601783c76f320260119062e5ff2fef 30450221009cbf39b4067d7a6f22b62c3e003cf765ad7c7e404997d1695eae34088e40f0660220091fb56f59c7e028f0ea8ac8e6a58d3281a841f0e699a5406da13565fcc4de5a 3045022063d8a11585289f8c77db3a68947ebd1dbad9cb8560c427f9be57436ff12fb0b7022100cc662548096a14d2f9953d010389931f62277229a880566d71c06a81ee37c7c1 30440220487add91ce68afe7ca864f14124ef35c6be98c97e023385990937d9ddc9c008802207f09e86d9bf90302b7be82e127318d8c3f554640bb2ab77de63e650ae5f9c226 3046022100d4271cd805ac8082ecc21aae12b89ae822d215ec78e92037cf7232a5cd3b53d2022100d3a9dd63c0d02a2ceba5762da25242d083e2f60cf75baeed121f35bbcf3d1192 3045022100d8546e8e05b1531f40a22a9fbba5a15c75e27f7f03a2ff2f966d69cca3db7ec70220358c73df4296b58eb86486b36eae70cc5dac7593eef3575562059b8ba4a52d05 3045022100b3258456ef3ee3a356a3cfe78b4daddf3690c04a92a603fea18988cdcf5b8c3902207d202f680322a5d0379bc9234e355a406dc5c84a522513fce1ca4dd87d4a7cbb 3045022100a46e1a05adb6911247dc0dffd823e60b36fbb8c6cdc832858f96641d841af71e022031df7b85e96c08534ac3710b893db7f70ebde4be863a9737a04880c117b41c2e 304502201bbfc20d614a1345367ee1de961a1b59be007134c7ebd86b36f287784428716d022100acf30269afe9bb728aece646f231613739232cafb9ffdbf8c0ea46a979bef359 3045022050da8c05b220e399fb73eb6b1140176d293befde3f9b0a0890f30380c8d6098d022100a6fe037113cc3eaef970f2b645b7580470421d554a15aea9e0f610288c218371 3045022023abdd8ec56191d0778415c964e6c747bd8b002fcd287bb4bfac02bcbb9d69fe022100e2179750a21260b43f23bf8aa03042a8ac1fc5b7e9cd8603386ea197b1f234e7 304402204219edd8e690f4ae1b4017da2dee6cd6e57201a43a87093872f43d9fab42538a022059351499876618a552a2ccc98e1a2f866d3ca1f75b4f6fc8b59ad7504d9a46be 304502201303a9c24fe458912a9d70b3ee11a7f89c79aeb939d030eb7059fe0159e7422e022100e64d43c04d61593086ae5481e4ad95451b8ed531ae2e7286ab4154b78ac3bdbd 3045022058b6432b9751283b7e30390065aeac9a85210d446b7f30b67a11c044bc7d00c9022100fcdc97302c280b25d59a4d1eb2f18620787847db8bbc31793ed5e4d6e59cb4c8 30450220122962e0a2a6df67caafa6990b3cee4f3810ce8edf820d76425bbf2db50b1101022100e6fac38c03b37f560e99f94b8e8a5be50c5a5ce98894fe24b7d54f770614454b 304402202355c16da9957c2b4a00ec543d3fc7fed400fd0ffd9cb369e38264c5ca4eed8902201d57c0b615b2e2a46bea0738c6eb58eac753b9b0bfa4b62c4f9a3f0efcd3ca5f 304402201f18589367ba6f5a3fc2f754c78ebfc0a607aa46456d377dc27454382ff2c517022051a61ee8e278c007d52375447bda3196f570bff5e701a2b4de796db803d86c71 3046022100fc1565d0d9fcc0f75da53a69228035504b0c6bac8cfb3146bd188e32b84ae9c3022100d181496d510283bd3b6d9f78b3399fea3cf0ec62f3ec4fbfc4173972335fab64 3046022100ce8ba2fff236065774a2e216e048a62a8ab98dc494c19fc67d699eace65ffe58022100f11f821d76035303cb2d3f3d8dcfce543b53b434332db8092c78a16d361ebf5d 3045022100e032fc33a89db8b51be03328d437b60208f009e6b3c933329ca34741a9192da20220213e80b228c291a7b5b37f0ad9b5ff5dc28198db10b9fd4abcd39471196d0946 3045022100bd59d978b2ac33c444c48c3181667e498c0b09c639c32edd132c7e823e7390c502203e80c100c6f24cbf065577d801555a732dd88b2d3fde579a14f233b160f81f65 3044022067a9bf40336b5bd3e3f50fb7ecf21a492586c4e0d88b775e51dbca9f129146af02205cf60e3f1cf18532f56e61c3376837824cfa90e4f5f99a2150909bcaf75dd5b8 304502201af85d27aeb33d4eb1fcfa1b7d160e4380bcb6dae89c0821ccb764d3ef321f51022100c0e4c93a3e9a62cdfcebe5f9135f411b4c1024401124e8b810d0f168bf439223 3045022100fad9386fb34f6efc4a2de70878950eed4670943ca34afe022927b68d88bfa4ff02207a7ef54ccab94b911a599beb814844da6c4ad3a55a257ec6aab85a6a32e77013 3046022100bf02fa5347c3fcb0bd58863cb17a81058b14f99282dfae32ac18245eacadfa10022100c223e6174256c82f4a4a32be05824820e43a40039b1ef0e1a0ae354e75693d39 304502201487e34b1af3b0558b81d9668fdfb4c53ba025365a982ac5411768cde82766df022100b3870f8b04c7e62ebca878b220871d6b56c1b235c85a86925a2860173722ecd1 3044022034134c12f2570ecfccce707e084c8addc1f97be9ddf2cda4bbf9616c5bd7421c0220456152fbf6e1b7f6a602a7b4b6ab9459d590fa4dbdab2a49c4e799abd5492c3d 3045022100e8e9063163ed34562be2c8bce7bc6df7e589bde51c72b8895c431c518c551093022068fcc64869847e033fc1cc44a2d80be8a132be893d849e0a4214f0ca6ebcf495 3046022100a9fb6d457cfe614036f047aff9896ca31838751f6438842bdbc5ce7b9a828820022100f8d1207c618889b9e040cd39845511dc794bacff29c3aae27d593495d9d4e2aa 30460221008189a7f192f1d429c8d813de8e5585b5b87b8bf69eacbe390f42a6706657e780022100a6b5309d014b01567f3777269aa6414f25ca993e91b2cf1910b2c0009f40ebe4 3045022048674a4f5449addfe682a50f82cbf54caa98e5dab32009218fe05425455c7313022100ca541ad0dc1dce93d2c17cef5cdbedaae091c0c84cdb53b115dd0297294589d1 30450220679d05fb2586bcc4db505ed8a0645e57119852b5e50caae4b4068074b3e84318022100b80740b1c3161262f29e227ffb706c2c64af3c444e515818f081aca72f674f59 3046022100b172dc21e5cfa9f90be53f251fe4e3f88bed676b81a52cbf76dfa0628dd01e9c0221009a4dfd930986f8c1dc469fa6f0ca913b0e296031602e1e72ad2879eb3f6214a8 304402201d808dca0334970b08f515db18d5358500c2086dd743a371901748dd7ef0c2e602207a67baf4ceaba7f0708c6a8d331d25c2a03a8f75f10db2f0e9b31addf79b3e77 3045022100d2a309e86e97d84d2b5048cfdc2f9b66b16d33f1f527352f94439f1aec98204b022074bedd3c432fe821e2b6a0eec9921311f53cbf1827855534a9f6dcfda117d77c 30440220743df3a00f27d818bba63f7c5f09a92397a9348b062db96619a97b083faa6fd102206eaf932a59cd01ba384fd7951445725273ccd567b279befa5eb7d3280462f2f9 3046022100de0ee210d01fc783c45c7a08f709e2ffbe9d226835d2fcbcc2fd805dd9132708022100cdb7f0bdbe1216a697cb5e12abbd368f0e8a761637011e998c7b1b2ae5a8892f 3044022077851926f780e4a13b07a835698ec2262021ead894bff1b8b6dfc3b232ff1be8022063fb533e1e8072aa818627a95f43c1436281f21009c3eb19223c7e1d03606612 3046022100c429bb0bb90d2aa760489cd84ca1b06b2ce11844f3fb23fe66299ed90b5ff4e0022100df7bb52c6846949a6a0acce7757de15863e8e6e46e619e9f16615ef6754b6f2e 30460221009de6690eaeb2bb2d8c6542b5fb17728f416da74cc3813daf7d5a8307bb61479b022100a868b56f35d3a22bcd67403bf92a387913b2e62ad5d2027107a35e50b06249d2 304402205a5c98fef72716ba5547978d655ffdd4ca90011535e0a7d314dbd3ee6a4ebd9f02202b5232e04ade068d8589ca43d9baa49bba7956075d015164f196061281cbea87 30450221009540d31ea4bd0830a9f8e7d18e1f22d32d7c87d9f62cb85fd7f8923557d82d4602203a90ec0b923f07393fcaa921180963fa3076f10dc89b186b7c8c4513d87a601e 3046022100b092a4f61d972b1b534107f524ebe0945075c912bbc29247b7dcc706effa64c8022100999e6229379eddbba9cacecf9bfefbc899dfaef98c49b10955c93288d92f0364 3046022100bf9fdb8b333452f6c49a89a26678c15fb31f665bcdf679b3368cf2aca85b43ef022100ed2825cbf94de2b1f0e7f346396c8af2c4cb7fe1a35cdf2644b48c39d6341cb7 3046022100ae5d5f925c7e6a9c119a314e7476c8f1e4b01d46dfb2dc9128c4783302dd5c92022100b10cdb5c1961a5ef4da64e4a919e24fe1f55e152c644f600e6694260542113ff 3046022100873e4ecdaf195246fe20b63b0b25852d27edfb4a25d00bcc23620f4565d9eb9c02210096f7c92e6b1ba17c2a06b98eaa03b1d6346b3d851ce66997fb1cd037b1120857 3044022042fd90dbc97e5325505a9f964641f84962471ec643677bc96fe934eeee4415b9022000bfc97d38d99c5c27d048f8bd1ab5fe57d105706f8d43bf59f9caddc81db3e3 304402201e22ab9ea1a847e1a4c1c29914cb9eec5440821280eed90182b0273045ba8ecc0220490be403457a3482b8ef4c1532f6bcfee0b5d48b66ba2993e4c3b3c7fe027437 3045022100c9b49de2d5d2eedbee0030878a86bc46c4b01bbacc91ebbe892423ed0c040d1302207add1479ea57e13748500c053de502286eb62ea2a224130e32bbf9055c279130 30450220120906f3049bced0db172143934288f6d8df529662cc5fcb483425a0582f148e022100decda0fffc269ed61bedc01614744ab68091480101e76746c1777f26fc106a64 30450221009caaf3c6429303e01ad3adfec4ad0cfcdf7f06d611d3e44a128f08e1aa1a306a02202ed8f293263ebe1cf56a6e380efc3b1449b1f45229782e55f4bbc9a3f6924cd7 3045022020b56151acaf253aba665b3aa1dace2a0dd83b97f2f05f8a0a24b0fb0144fb1f022100d9d4219147ad15e7256d5bcd561eeb2800a336d75c2346685dde3d394a21f337 30460221008f6942f73b628638578325e015e3ddf87eadf2d60b68276d9eec0380295f84a0022100de68b641c90d62f86397cd0489c433fff2a5cf8ee17badffcf1646445263c1ad 3046022100e089037b8dfc48be4852425c6b0f9595a2dfb0448e1e993aa4c3daa37cd30f13022100fffc8e012996ec00cea62ead97ca59096bd430b1a8cefa1880488abc61bea656 3046022100e773ab9464e18c8d6fac66d12b357191e76403df007b268dd4ecdccbc271e9a4022100f0b0ce7748d5e08b108f5083068aea595ff853e005db87bc49f80b10e8bdffae 3045022100c566e393dcb0f1ce93cc2f08930b8c88019735ed84d9461b4200885f5187dd24022048498ad24312c11406b8a9a3ed19f2bbdaff872e5fe794cd1cc15f70ff091e31 3046022100efe776f6e02fb655f94aba381441f116b5d30f940ad5a3ab795b29ea2a615a68022100fbcc04a995e9a446148cc439c6fea9a8c9726ab80bc4fdb7774605eaaa15afaa 3046022100ca902c296fd156974f1152792280f980745824ae80dcfd9d92b759087dea1141022100ea29f902c181572673c937f284914f67ee730bc5792b561cdfa6f422532731ba 30450220755314e1d46e4122dcc411dce3ffeb5fc38630757835dceb44a8b4ef24e90193022100f3314dd818a148e0fccfc079633003a5c48d00d416741c92a990d4fa173da84d 304502200222731f84965eccf5dd0c077feabe2bd112acfa9644dd39796498e5964067d9022100f574056666365eaa52100dfeeb3f6202ef8880cd176607535ab209eb98634ebd 30440220793b1a7db57c05d3014a00088664c1d69bb3b50562a650426f3e6d35b6d258a1022060f83508aa54b034d35647165f543f206323d94f9b7a7e256f427d8b46eb9521 304502207a1c97dd4abf15c698ae411004bed16a5ae1d26eb67a5185897b15eba8bfeb6e022100c52b28d574046346c760c01a8b1cfa7256c9b38b1a1d659107a2ffc4f868e771 304402200e982139cc894683b8d659a383ea2d4849923de6d7b365debf12c64b92af7e5b02200c4c5d4becb5c917601e2e66a3de1845463b08c1fce984fe3cf59b1d1fdcb5a3 3046022100ecea5cdf03eae245692bcc33c1ede2e5da1a780dc0f5526c52cf01e3c68d37b9022100b0e0ea326525205a7e7353a26c5c88eff821eb2bf1a8db770736a75ba4260551 3044022048c911441debfaba5fda20df73ca787c3b36726fe528cd2d29eea366fb26a7b3022029c87a7062c6c0634d5dfd9fdf3ba00b69e6e3ad38db77582839a83a7152aaf1 304502206c61734b61901bc85ea9f0b81a27c5c0a3276a1085a6d608f478abf108d30c3c0221008a1ddb43627758c4ace020277c9a34effaf9b7503f115c72a52454c13dd6f864 30460221009ff1fc66dd832ac1796360ef4df87cc5b630fe521da521f66b9215c6a06dc0f70221008ef4a0f9d057b1926446550d68c878b6439ca360d2887bb2c0f8fc11be6c080e 3046022100ceb2e72cc8a4cdb02a82092e3e92db5e6e230753083d8f7d41e393d894ba3a8b022100e8067ae6d0fb220c2b60bd7bed1457092ab78d8f8cd12ffd4f21eeb3fe0de70c 30450220304efb633d364cc2ea269dbeaf55217c8994b97037056ed4e8e59d50592be9bc022100e37fc8681ad5e268e4eaa4b000721f354015825db52080fe6ab03727db389bbe 3046022100f75dd51f03636d5755820c93eb9830a922c5c8802a1c4fb1f269193fc6f7c6bd022100c3ff368d51de0ba37481c8dc728d61e5f683ce1496c39765c265bf3e3d9f916c 3045022100fb07b49400e2eeddb3fa5fb7088f8119647617d3d00e48eee73a0e461b9c1cfb02202d3eb846e5412889dd9065303df0d00c2db2220456400b319116dc6c26f48958 30450220056d515cd9499320934564d6fdb080c711d95b4078b2db17f5c192d3788e9f30022100dcbdf1acd1f2b0bec859cc530037d8098bbe379153a12b3fcf4f88f4931b7bb5 3044022059fe35ba926b42ea45aa778e13fb6a2e577c39217d92aff463e187920e32468d02205ed8c662e6b901abea495ec8d884ceb0586da8a6b69aec6e890d4a65998fce2e 3045022071fa07fdee6e37ec393aeb07cbc9c561e537103846a96ae2aac880e1c8ee9e39022100a8976416e42bf12932396f0ee97fb74ac6881f720dcf7a57cb30bf692a0748b6 30440220672e47947f9877c9a579d34a0a6c80f291642f55578ccde643742c1ba066e75d022010584d7fcde4aecd9d97f3a1eb926d3447026990cb986caf0eed4e9bbfa9d065 304502203680ffad958f07c6c74c99c065cd754f1a43531d77d5a3222fe12a7be75c38fe022100b8462601778d6979d575716f158a097567b73c7651adb1b6dc27a7d87561f207 30460221009bb209f13a98bf72786b0dde3e1ffe737e385661085226ce83e8aab4be16375e022100f034914710fd7970ae6be4c650c3cab20c8ae88e6d269b4b1a19a29357b91afe 3045022021cd0d74857e51cee568660f5c47b967fb244c4ed7ae9e75957b4e2ede31c38f022100d62b34599aee2a9345e09efa88a5b68d522a82522584e6412bda10af5a9bf70f 3045022100b3bf4ebac263fa105c8ba8c895cc0393454ba7b696bb5e7aa5e4d9ffa3bc6bb7022001fd376460edb81ee46742bc3b89d42979bbb9c4bce1f88e3c70d9bb2409bed9 3046022100995370ed0cca9cf859cf0345654984021f612d635f90bd7f24a63f3db36c2baf02210083e11795b361586fd6008b5865374cef10a14e7670d16d3ee1376df24d4dfd95 3045022057e982586cfcc90f37494ed6ea6f0f3f4e65416eaf672be020c5de1c1f33d11f0221008084d9749000ff27a42d321a955f10d15c7064db730bcc41c6696f4acd5bc837 3046022100a38b4a5bbcf893310c3d8afec23ffbc9f55ddf31b684be5ada4d88f699296d3b022100b4effefd37e93cf9bae3b7342d66965eaefb062df6d81f5bc006a509f2ad191a 3046022100d8dfc1265b4697b057932e03fc821385b0e4e553c264efd1ec8469ad77a3c9aa022100895c1f8ddf33690c0522ca49d23cb57348eb258aefbc3bcc5950e818f1fe5bb0 3044022065f6f369dc7b01354b25e0252221dd4501156f6843dc3e026a2ad5703dab560e02203b7c505ae9abcf97555c3180e5dd14765cb41b26a1c21a97cdf4028641276507 30450220322599db92d88632f72d06387ccc187aa945139df23e6f4c455e4fd412e1aff5022100d14e5a489e299f972b124fd582ee281d5827dd5c09398e2c1d61a23a8b304d6f 3046022100e74ba1d9e23d4e480bb3a4e72241d21186da3ff172d753273499e08541eb12e6022100a8633d1931625d65018d35e9d37f28866a55d04b3cb235044b7c7f4127fcda22 304302200f06bbae6f50ac51eaa7bde60e87bec038e315f2a6a3e403a6c6c049ae4d26f9021f1ee01ad079782af00a9f60c4a35437783250b6b4160d63d7d4e22c6085c609 3046022100969926ac2420c3b525ea24ad19b7a337157c543ca6baeb5895933bcf7c8feac1022100a65314de1c7ea055d129dfebac4f88a70bcf318ec2a948026ccea15d4e1fe44d 30460221009ffa8378c221cc2064ad92b9f34e38097c21a96f820a3005a11fa6cb3bd43cc9022100b4ed76f2f4208dc0e700f05a042929d383daac6a41f4a10a51ea47d42c9b2b8f 3045022008210ace18dcf2b77a98e14d2ea3bdbdfb90f0bf1e61beff7ecefbdaa26559c1022100f2203ee7b0d2e18a31bc6ce90ac351581a39b4d546c636a7d8d0823218e6c03a 3046022100cb4e6083e2dd62ebacfecee3a4fba3baed0f799d73e29f2a4c1737fed823698d022100fa320cb05de4e26767c87054f111afc4ba1aac1698273c52e8937ca57f71e4b6 304402206cf53f4b6228afab23d6a47a725ec05f17d6d9ed1d5932422e09d1b06ed5452c0220139378529ac2453122f1009e0d2ad3e3f9657f3edfb1ac267b7131c0c8dfeb71 3045022100f9d4125c9724908571c12708ee6798d429f800f22397ee558cc74917a7c365d70220694fb9c90a7d8ed7c70edbf90aa5b703aebbe230b6b8943e5ba4dba66cbfb8e0 3044022024e79c962400d39f678bedfa47b6daa03128b0b0e3af137e200cb94f5264112a02206e84884d19948fe02e0f201fdf7d04c3326afe33e65116b8ebded8ff8947a9fb 304402206d83d2dcd7afedfec4e841d6b3eee2b14e643bc20d53b5c27338a78854775a4a022030d1b738bdb402cbed6d4a371c8811cd1fc64e068f2bfaa046b5ed121b272fb0 30450221008fe5a263cd415165cdb66894c068c1a3c2fb191a8b5c9757aeab91957a868ceb02202cb0c53b17edc2c5b6048f13c6fe5ba498c02cb6d45d0b3af28bd2ddcfcf1f63 3044022061c48618a92ea777a6148c27174ba3f4be47f02d3ab3cebcb27b3f9270bf9e4902204d7618add2c60132da8a51e3dbeac7e7111b1406bbe5e931913b007be4e13384 30460221009cddde289ef381c4c697cdbc640e43444e5d51f8b49ef66b0944c59ffe9c496a022100cdfca6d8e1c04966e5c6af5c94321847ae32319f65b73475c3bba55fc4e2b102 304402202c684d4e83d39d04c17f848a659331af66c7c4424447b9ba822ea3bad15a3d7f022021eee607eec6fb36c17704b933151e15d8b05eb2f3acbd7a102ccbaf52508ed0 304502201a830ab1d0734b44265c031c8737e1c2b19961287dd55d0c70ba65bed72f4dd1022100e75552e0e6bcbba57b5314751d63ae47dfd2981d766fa9b31156c3d08e1146b2 304402203e1b46c2fdf6b5f1815ad115c8c258b5374df0c76681820fe140d96e4d3c2ca1022073969ded86d880e6c68abd334602326629d51593ae7fa7da5ac8f906a8f592aa 3046022100ed5e7506db25deec00dc222f7900e7cfaf9f3121e9eefc04644ce48d0afe8f0d02210081016a2ee565e3512d54919404d41772d8e50dc4989bd271be751951b6fbae5a 30440220533b7eea03d32f477c9247e20dc88d574fe206ba9d1d8884fbbdb9d1f1ff88bf0220222f17f877b6efe82cd4e80c9571ee63ed69b1b9efeff5be32c6b8a6d9df23e9 304502204c11f4450883758740e1ce4adb1abe8b06f7e77fb43578e3a07765793bd69cc0022100f8d1d0654909d6c5c0425ff8fc726371608c3246c7cd7dd722d352aa32a84622 3045022100f4606b6291ee2d2a631396f4efa56e6c0e1e4015ff15e5fc3f9a726233de34ac022036905b3b394d67371e2652ee9a81290e498e0c08bc93302ab0bf76222b367321 3046022100d3ddf3316e1e8352fde6c98da4bf9dcd78e0c6dedadd3b56804e33b6e6957b4e0221009dadf8adb6a37469d54f575f0f010077c9156eceae547ec9569cd978c7493024 304502201533bb20935d8a88a3c7eb868deb0046502775a147c4a35ba665bedb56dad249022100ea7d051fa4d56f0b24815f14342248212fa1c1206f4165329d61f709c96a1bf8 3045022100fcfcafed8b1e98294d19bfe36d60c7d3e3fe2e6f8ddfeb0f1d7854c1268183d902206daa5ff298de26688630915ea41abd66ce18bf2e55c290be7e20fde4dca5de2d 304502204aac1be7da0fac8ea1271eeaceb99d88d7cdf164ca538ec514d59fb8cd73ba010221009a99f9830c43e9830b60ed7db0519f2e11e2f7e15b2b147cd2cc83eb3883ed5d 3046022100e88a1d90b7aad1c1b960869d1ba126fb770b13c4f9fd524ff049af180b458060022100f76fefd4f91ab7c5b33c1a9da4790e37d441b1c6ed2497fe538cc6d403edc415 3046022100dfe0130917718e3101d418056d781f9f387615beec1421556073e0667b64dc2602210094f7c646f71f61e6e3f153efa05a06da89dce7ef3b813500f228c44f145e70e1 304402202efcf4ccb02c6186f21a41b712c583727632c3f9da8e9c05034d344d7cc8e0490220661eccc87cb8da28703fd8514ab5afdd0faf4e59974c538ebad483ddc17e0e0d 3044022057b038c8e9e2092d2e21af819875f7f5109797893c82f4f778e2bb97586aa06302207c840518414c7e716189f0f76a350a3e5c9d76a14e217e660f43bf265541467e 30440220484dcad3c95082949d99c0b8d5c7e9db7b06f3d8b8f0851721c348d1f59d7bc40220334706a822d786ad37a799304d9b8ace4a778bee978795b88e64a2b479e2a409 3045022017aeb73a86dff585e60a94dd2006d237e6b1913784c44cc5bfe08b4ffbf8d36a022100f6c0e75d2af1150f354b779cd92ad80ff5d9a3b72adfd5ade783f63f93f8f170 3045022035122e15c7e302927851f3ce251c16013b657f8110bd3930457bbf6868e0510f022100c7f39cc23770e5651e13fdc70168dccc5870cd80f3a10af5475e01184ba15464 304502206b9d40db712a72c04a43fb1b53ddcd412ed170b512a8353a847474365b31410d022100b06f8667e70d01f4c3659f9e262d5bafebb9bd960419f12f9ef36de10c3d8358 3045022100dbdd5036975b06b51981a9986903bfc032347285a4655a96445d48f44b5565f2022027482f415f9d3df1554b152f0ad1988afae431ebc54eb2989821808e6454ae7c 30440220009ca58da4d6b5b73ae15895facdfdcadcc40a2af010ca60687cc18478be8bb30220464cf8ff1992601241a5018e59b373ff138178eebf05626aa71823c23c9c4054 30450221009759ef56181f3feace24385c769f7b135a7a661e4d1417dc2d8ca9e7e18f2667022051e9dd9d7c883fe978e133f35cf871ed65fc7891c1d61808e9cb9f40778117e7 3045022100e2a1104d5dd501846cc8d9f3c22633b04eabcfcde8df77c746ba288dbde63513022019d86c66d79b83bf3cb251c81d35ae279bd342008d2aa8c82a003d79ecac3a85 30440220667957adc396d56d18b4846c4d3d628d65a0396b058a2d555292dc7b647c472502206b18964e296730633769e2fab8a0b7b4616ccad57aebd68eaf11ebacb36582af 3045022100cf38c3910ddec6a07f1739e7a283e9043efaed9ed893dc2b70540425eab5e4f4022072bb3e8e6577543f83b664a056842873737881b98c75a1715612c85ab9c81993 304402202997517c642e04c90b694ab4b41aca98d5cb2ce799cd6cf43404b4a762d46730022009022c66d689079bcbb8dff9f7314374746452d74004e182e2e7ea10e49b4bfc 3046022100d1037a49b1c857112e30d1a3d6ab3deb767cb4692a6724a0fdbc7c2a9b987c26022100c743a11366fc472011a9d043b15cbdc2b119b086a8c0ad63d4dfb56dcb7b78b8 304402206c4f965e8c0ca56cbe8e554d0442e142093e764a960d8cb9d04be38899250c8d022035eceaa4538d04b81a534292746c464b412063bd9e17ca87085dac3de7d0a735 3045022100ea14b5434e2d8258249bd2ea8956fb3b20a2ce2d0de4f1942d7dbeafc6ee461b022019d044c7f6bd31d33f2156169ffaa58a2956e334f40dc9a4548aa235d8551586 3046022100e50f367494e5cdc934052d36c1dcd9d2375a87ff4ba4c36eb2f646be8e17a2e6022100ae200d7f4a11529c34e80a2dda19f5c59908751e2a701d8a7eeb4681cf78568d 3045022100d1666b1362c63cb7842870fde10f61144d5e9f9ff9f403d368f79b80bb1de80e0220562c0822f432ebb295ef004793dc9dae32b3cfd03c60fb8b787729da17f5757a 3046022100cb714a12b2f06773eae3da5d265bc98fc1575c1b8f25f5a7fd06d85982203de1022100fbf31d4345da24eb36ecae618798c29003fc213c0e3127e3e9a03152454ad90b 3046022100cf08abe66bdacc70dc08a1192c3293ee5a6fefe2b563223b99a0937f01df1b720221009c4b464e25dc041e7f707974cf55f27c058332e361d545d3ad899e94cc1464e3 3046022100c561769bd367432550759a96d5daee47bc415daeb88f7647b3f29a0b43d14538022100b32860acf6cd5bc1da96404c43606760e84ec90e79d2fbecc3af4b3d62242cac 304502200711d2302338d044c3c13b377d1f1d3bfc3fb9c6f45fa763411287f5eda6586a022100e5836c904fafe0e44578c69119c34058f03fd460f92e7e1dc6f2db1145caf1cf 3045022035720dcbef7a4741e271ae70845dd0aad14a67758d36875d86b6b37764a14b5e022100c318e247cee216931e3896b147c7e7796a139fea59a02eb5063cfd5d8dadc5cb 30450220349038ef46ef28291af1cbbd6cb97ea19bf75839348e264a6cf895d5e43a55d9022100b0c005e062add2ca3ea6f30a9c0b3b1079f1a9ed45bf0a8ad819fdb428772744 3045022100f99d2cd036acaec51e7907f58de16307928c501400c197f2751013ede0149a66022073229906b575f00b271685796db579bc15329517b61d61e6d179f7cba2483b9e 30450220620475617e950b825fb48362ba6d7cf1879474ceef1a32e92720f21ff5d2576e022100f8d704c2b9aaae24e64c26bde5be4c9f73a4200b49cdd4516b14ff694ffec285 30450220656711bf6d29db40090bbf46f23c3fc7a26cd9545257ea3a1a1fcf832974806b022100c055311a3db8215c4efd21a64b38d3b3a084b3df90d90ecee393d840743bfb09 304402206957de537a1138b9b79be31762254e3931979477666a1243b5fa5461ce68df1c02203a716cdc14970eb718adc8e36d0942e70c9a8d5eb53b323a583e2c12a554c46c 3045022024de6c579049c2592f8bbe632142e6f58b6eacfc13e9475a3ac178e745d14c8d022100d668899e19de4523299481805752f25a790a6f2ad78759b551e5a9a6ab46012d 30460221008db6bcd3a1ce35a69844d498f8ba053caa69d6e91820e4d4a1cced37bfa51d21022100b38e3a318364d3c01c645596e550b9737ecc30878aa3df89d6ab7de776278ec4 3046022100d5d35212698d89bdaa0a1faba38ffed0c30830243644923d7b2ce818578f41e4022100b743f87c79006c92796b03b67bdb8887c925c8ea4d08a5dca5940f25d9ab5f16 3045022100eeed2f6b64bad176cd7b6c22b946f9a032f68b5703e8c4b5817d34f36f1a6c7d0220446715b5df2a5ceeecc5f4d6e713ba5a92a732fc2bd45872eee7379cc5d03211 304402201a9199f0c8c80515380c23f758bcba9bc0d44e0c743e0e657c204d44aee4a994022000c372e8d1e5aff190e4058ff69a1a6644dcd05bcc33c764b946abf01cf3c2a4 3045022046270db41ff3d1fc308f00e255781470ef8f9a86aa4f676aa075b02d325e3eb7022100a9e34eaa622b285021a53c2e1d68524ee6c3b6f9c6218a7d8a376951e8c91f3f 3045022100db3607f410c3768bee26f2ca4a311107826700aa52f5a1edffdbfeee8d530e0402207505b1d8ee401651f76abd0c140f0c78a67cd9358c8c9da960bb7496014941be 3046022100f84b06353ba40cb869739a7d6aa95291a4249e06fe18d94813cb2b7490f3a88d022100b0f239c89df085ea883ea1db4d0dc2b5645be48d442daef4e80a3030d604ea7d 304402204a9740dd6b6ec75cdaa4a2472504b59c2fe84efd8b70ca7adf0c286c631353f402201aec9f2b9681c025546b6602ad1b093fabfe6e3d7b94f76137a49ca0fdda067b 3045022026aa1866feedf150e513b432b398e29c17cd7453dc5a07f6049643be02db1e2d0221009dd095af219abb8c7782bc4ee3a0edd01416581681d9c46d6ea67cd42f0867de 304502206061b4170118a6f264404c298741066265e0b624ca74cb8baddc8d90c5c82320022100d9cc3379bea367692a8cac88b2de07db7669904d355aeeba1038b72f04f5099c 3045022100ed95b92c66262c645352b16708fe677e28d9135f69680ef4a07b1b2022a250de0220096fd2e5e9457aaa2f6fa7692e2ff801e9fef27196cad9f214fcb71a4ed38dc4 3045022100c61d3f8ac63738f15e23375c10e36b6f3df254585a990013e10f68efb376176602203c9b392f42b42e182c7873694f0eacea30fa1745f68afb46d4b0a152e92d446a 304502201ddd9705482e3317d417463377f07816f76f79ef884774c705c77af8a253f475022100d94d6617677ee82a732adc15e32306271554829556f2d4b1a8511d95b16baa1c 304502207571cce360bc76316a8ae93e3da8350c7737ac9f496382bfd858d287b5e61679022100a2fd8d7506e5b80b16ba32f3e96c9bdc5237f2219b0f622bc0317c370266208b 3044022054b90fae366b167ea57642cea83fda61f79521dea7ed1d15b8bd2695f5ddf13402202cb8f2703c3a5ae1e0075525b4f63bb2377fff623684775b0984d8641e396365 304402205353adc1960dda8629a7dda9f061ad33a762d7e53202e67f9cedad0659bf6e2b0220234c373cdafaffec9de036bd2a73a5a1e3d91c858f6387766b13138877f47c4b 304502205c8c7d97eac9bab0a9dedb6e97e2a5be6b13be8d73436a4e0aa0ffdf5706ebc7022100a3d0fd2c18c68b0eedf17b13596835933c0c1db41028439bab4a10c1ecaac558 304502202cf0a7fda886ae9930ac9201e3cc89631764314e02cca56affc75709e9fe5707022100a00ea1bdf76c1c48e9c41b8161929290d96f61821b509b2d64d413fd0fb5cc60 3045022100d08186ed291bd818fcc0ad506eca207f687078ebcd4dab0850386081b70040ae02205913424b1bd482f1b534f30c96e1d62355c3251bd422aa0114ba95efb10d19c4 3044022048211dd4b597064f3c05de2d529bf44cbbc8da5e5274ed582a34b22b902bd4f50220489168f841377edf47257257a8ce56f8065de291bcae24553269a5107656d935 3045022100d47a68c677351e2e93530e8a82d907359598b6dfd48e5bce1326420b67bf5999022006097a38397edc7d212f67f762bd26305744fdf7a4fa3bf3bd94054223b4fde8 30450220788880c3aa3c185186f582bd80f2ed2af48edea731022ce0b584be9e9b43a4a6022100e6ffc0bf11acc3e1391a2b44bc8834979215454e669528e45428c3bdb7e0d629 3045022100a5752ed5864f78a4ee33623b571f334386fe08090dcedbde4f508fd689d7cad102205ed5ae6fd88c0f4caea6b7880fb92166bd2fa9190235e3ae7534aac0ea2c77ed 3045022074b0fcfdfd71a72e62aecbc07f2415520b43b75b14a71fee58f0c5b8ef8d5721022100df109ad5f78a6bedc6f42623f3fe41f1de799ab3b616929b449a1fe066020fab 30450220373bbd95edae617d89807191de01203454929e8a1681fc5324963f354ce82cfe022100b40aa5e78051ead1f110c66b2abbf66bc3a91f8deff7feb13b320ac40f0b4aa7 3045022100eb4f94eba1b89cf14d7ada7812431dc1b514cb50c1ab884780883ed26ce3d928022021e5e2d41cc151fb8982cc77d9e8c9f066de60589fb8cb18f66c3a382cfba453 304502210087f69b3113859efb6ef2088d48d7ca002002ae85da9978c5c969e45f755bf22502203b0b8ad82f0f4fd1da47c7575666a68a4ecd738aa578fdbea201c005223bc53f 30440220216325f83a69e8f1e47850243c5e076acffc054beab40aa2d363f8804a92c14202207cddd4b05b7e3c07d72d139ee944e9bbeabb06618da0c8067602a605b4be9777 3044022040c31194622eb8db711142c585da070d0740eaa358740380122722669de820c802207368a6cd29e0a5a4b30b1a212b240fb7301bbd29054078e50b31892274186eac 3045022100e63c4165a9f722e7b3ce7c33d3c45fae917fe0f53b572ffb087bd7bc82074e0002207e5661c48ef3ed9bcbbfc235e532f57d00c0ac1593be1fd43751ca2f7c1b4bf0 3044022067018bdc37f497b1c9552083174c584154fcef0162b3d8a4f86065aabe176b0202201a433dcae1e6cdbfe82399b23585c0796cfdd2786ba75fbb07f6af5467865e4d 3045022066fed6533c9f5be6a144598a276232133ef4a1ecb725d7acb90e7fb698b0f9800221008efe8fcc586c0b097c62606d60b5548010db73326cc0921de7fba4f0afb63968 3045022100cbb27b81d8725578c00df49628e96eaf39c3f305fb76a091d4753d0b15c2a80602200e76243d8071d0a9c02f8e026c5a7b9a853bd720c9bb83f2dbd10692c9390bcf 3046022100c782bfc4440f36589ce194918f54583b9ed86c9bcb44b9b739f8ab3bc4f3547c022100aff0b3f33c1c08f6b6755b0b6d10795f786b7d4f795f1bac52b7769b271ea9cd 3046022100d6651dde3737e1d7453790c8ddc8acfc99dc4f58c03fc726e98015316ecaaae3022100ae3cc35183376c75ff5eac437d7a91b0ba32e25a19902269d2ef865df9ef38d1 3045022100b0f2d841e7d3ed713faa23cf89e9e627fb62ceec08c89ebf23937592a666fa3102203508740b15b2e57ac26c7f8c6a9d1226da4f13009c89a56411fdaafce498f172 304402201d1d7851d703a4c65dd1bff608578cd45834076dfa3ba39a60694a8156f18a5802200937e880ac75ee4fd6a4a312f437a5c871b794e6d1c94455f00b93aa17c5d266 304502202d674efb58c78a1ae3c9b5854d300308807afc9fd9195d573809a0c94a3336a70221009d4c99014b518936926f7c54895bfa3fe0d5d1b5ff3c395e935f4310cef45792 304502210085dee3c22b12d2738227658539c66c11ff88b290072253eb2a98bfea7183fefc02200c4cd6ea7a1cb0a6ab11359ea944a616e7c7ba71b1f444b42d2d5fcdd63f980d 304502204744ae802198dbdd4316407badbd1a84e3bb774a202cc6dc47d7935a71d6b617022100eae3780822a2c4d81a86e51c01e968ae7b88966929e8147d719c3fd84817f488 3045022100f582b22b05c51e512c1b262066a505cb70d9527dfd9ea02a0614e4999021b3b602200ef74f926009b66d09a9cca37ca3a8d43bbfc181d61bcd7822a3889c975e85b1 3045022068c1938ae88bf0bb39c2394848df4d349d2dd1594dfc4bbaee9ab2b0a6e15f04022100e398ba1354a937c7538e17d2a4782b1917d3fd65f30fd5457334f98c49d631e0 30440220125295d665acf3bb297abab0636ec36fedcf9cdd7043b93991811752f1896dbe02201d917e2c6e4104bcba8be4c3cbfcf84b23a2a2fc9432567c0b102da6348909db 30440220490e6970b0470c1ec9bdf8019ccc4949cbff6596435a97a5cfbd1ebcab4e4ad80220415c13b512814589c0411e73252b17c817c1f6ced0d0a5877121b2b5bc3ce7d2 304502210086fce2456b0a44785d8f29876bc0f191090ac07c800a83b470cc9c80bf719dbb02200160756738f65382d7fd0e2f3cbe6e343dfaf36cd9bd92f13123fd0f43878d07 30460221009e067d14818d1f8787cb7aa61cd8fae85de2970ef3122d915cadaaee762031b4022100fe64f4343961704426042a137c6dd75c012ee711d7db91c8a8c8604cf2ca4541 3046022100d18460f9761bc834787750121da3a7c0c546f9d0f213c59182d26ee23a6afcee022100f7ce512a90f0a96f735b2ae82b2bcc7eb6e56f1077fe07dc9a882815744381e8 3044022062cee916c2ae93ebdf6aa394ceff9b6d143548cfdd7715ba28a407b2b3abf60f0220107cffe2e279795507070d764ae478d68d47439f055529740c85bf531f4dfc11 3045022100893e39c80425b5ae829a5d06155d1491012929dfac55d1d50bde4af17998c571022075442a981117076eb8bba64dac740afc1936938329d1b89e52947b10150d22c8 304502201e52cd2e9064fdca3f501cc5726aa0438620aa6154e8ad7de0658a13f8343dfb022100a6b9e9ef9aed9772ca2a73928d6f80acb2e1cd77d583f80ad58103129eb6a108 30440220718975e2bbd1ed2d5cb5d012321bb81b07c827f0adff3df702bdde8342ed837802200d5df2ad4e7d9e822fea25692a8225bcd3f4b3fd7e9e3e8a68ad35caf965f0dc 304402207e440b6f282aa91bc7eb0cad34869362a8a7fa49e3357260511086b63d7ba89b022048c343e214a717f5261c37745663a6cf76d8f402f9b5961f8075f93fd8a48fc4 30440220548500818f08e1229ec96e714c168467a1f058c3ad9e621ff20855f6324b4549022022931143da36fd67845edcbf07c359e67fe4deb3f8c429669231d4ed2153e6e3 304502200174ccc90287999a24482171fb59d5ce8aab204549d57cb109d47f75bd0e680e022100dc77d5aa3bb0178083d42d17e4805b12ff5718cc06f3bf2bb74d2946bfb351e2 30460221008131d309e6b9d00d2b61c021ca85717d928dccca7bd65d606a2b170752090acb02210096553298143586bff96d5f00eab99c59aaeff5fdae721e76abee50a5041b4290 30460221008090be133bc679e5cc81d0b1de95d7c0b33b84d889fffe947ffb332164afd4d8022100b06cd1e72943a388ee173377f678d560279554c3e71c64307ef850cefd678927 3044022044089b4f65e5f78bdc9f6fb6f61c7c1f6898ced29bcf96e8d4349f4af2e1a0af02207cdfbfe24209a0674d73c022ff3127e84c25fbbe199e1d00fbcc9db870eb2362 30450220311c33079d2a4bff307c587b2456754b4043323b9592bfc247bc9146b55a33a2022100925fada3fb12ee2dad21ce7dd3a54da9afd627ee0ddf266ac44f749094100c24 3046022100fdaf36abf68b1773a5d27cf31e6255fc1a0f51eb76b475dabb4efb0df281eddd022100e39d146135e1487fad606153305f66d76b58753cabe7244a92d872587e583305 3045022100a371e70483e44c5cba63349a9c0dfd135c2134df921ef26402606e07a5a843240220195ed5d9edbb78fb9774f6bd93b05f0bf4c918d18a7e3275eb29a0c128f89cac 3045022100b540751eb36a7e65243e1c14a4696b1c0928c652c1c1de805ed5f2fc7ef913f1022077af0248f7f51bb14b42494e406d45a0a96dfd35c384a43278a277f425e20c75 30450220176f687d771e59771a7f72b42aee8c3313a04224afa48dfbcd1428f3636e365c022100adc3549d3b35eb12f8daa1a6a4f048914f8003ebdaa7cf8d7366d9c3e80c3ea8 3045022100b991616e011e18bce725ae301fc3624d6f6582cb801f49636790b1f7931044da022039b2a1b3473d2ba01b174d560789af1dac96d54203a7216768e5b930dd9183ea 3046022100b25b46c3b2291d7f90eeaeae3f26f412df3d7bade7468ae31ab77535959f64d1022100f43e142af9c305efe867929913b01d0ba3481a78bf58faa28b620b74c1105ff3 3046022100a84b29de3da89ec9e479377c4c2cd4cc22fb3aa88908ded9c41219ee94fd5282022100b3164c6d9e4378561e0831d7e82ce4badf32583427fc7e347b0bd4f01f07cd1d 3046022100902e2762810d071825bd2b48bbf14cdc3e1f4d3bf4da9e38e2a4b03a934ae5b30221009809ef8428f817e5fcee7916499f4f6caa717a28bc18686b8eda60078d6c6cb5 304402200dc609e3e01d52519a6fd506947f8c440d9bbb54162c3d768379e844bf9b24de02205c59b50d90f1e70937e32f300d77176e9f62dd25b2dcbdf330b3e40b00ef2bce 3045022014abba8f31edb86a99bd0435570aa0a930243213c0d371c6a9ed6db950b8a5ed022100c74ad09d95ab75c958630c009280dba63dd699ef9b0b8fce1b973b3153ab6ebc 3046022100ef703c2d4a0ae1328e9b5fcc56634ee2afb5a69cfc99026db22b63f901cfb55b022100a43eb75c91863c3207ceb7598824471907ab791714cc35c95dad1e641a87d637 3045022065207489d317a739b759cf33ae27438c3a8570dded82ddc71d4ab9172387fafc022100a6a2136299900a7936a00c929f4fb090e947256502921eb19cb80bc9ce78b94d 30450220503bf255680bdbe71d38e039204ddb2994c44d7e8782e5ebc421de7aae3b21660221009a824a2c057847bfc376009cedfbde9d0159c321bab2efbf9a96170d562b723f 304502210086e01ede74546148ce96e39c091029c2e47e2aced6ece558a1687129e9f6feb902202b0f4c4ff73ddde4a319aa0bad0b018844161bf7f24e2e97088f56a7ea75b219 30440220041892fb2c2f56f2791e6144540abbadbf54d0767dbb60f29bc33c15e86bf28c02200287d8287b21c7ffcd3c5e7af243a3aaa8203a95f16e4d7db657cb0db08559f7 3045022100e975a405116ca5978f7be5cfe16da63177bee1dd7ca2f23b4b2d5f58dad699a602201f3bbe631c59869b34766b9781901ebaac15fde6ddd16b6b529923b2af90b1d6 30460221009d257bea784cacf82925a52c40773aefbaa4a59bc0a5efbacc25ab6fbe8e392d0221009d45cb336843f1accd4941908bc633c28c3aa7c35eb7a14fd2c99e8981598b88 304502203c2adc3c1572c034c97ec6ed549f1ebe5b4ad0c71e34607ed3eb250f53891e18022100969448a08882c7d28b6a0869f87d12a8e59f28fb3e378a886965048e95423b1d 304502202a2fce098e2dbd3be8de3bf63b4e1c4db76b9f23c1be656c9a30a7f96d1d1923022100e5a56e5b75927435da9dc29c5a16a20cdd17a35454e1303d164ff12ce68f7820 304502207b2d9766cd4cefba0dd56c3a5ae9e660d89796e8a46f090e06eca4c14bd6aece0221008bd09234526beae866ad8ed92acb9c1283e8e039c3f2eb3e8efd9ffbb2f81b9b 3044022074a84ce2906ddf78f45ada62889e095f5f5e7c3c562e8af4851b8807e8c7ea4c02201fcf5b95ed1a3702c0b9fe8da97764adb73a64ff20dd6965e21da7cb650be169 304502210093e7e602e2092e96f6400e2ca2ac77ab6a5e02ba9fe08e26c7da00f31afc391902206c3d315f97dc48f29f6a7e990ce1db443cb355ef47e5bd1579f81a28adf8cfd3 3045022100b53615aac7bd6cdf47b1663aaa5bcc12933237ccda89f8cb671a0ff835a8b2a30220009ee52b1c960632cfef79d38755b73e78fbfdef8659feb389fb8ba5c09f9b9e 3045022100df704d113f6867f45227e34d3cfa383e6236de10b7ef40167430a8bc6b5534d302204f9ccd18ed17774e798d29315a224ad82dcb8173e8adb2b3976ecde02e9716d9 304502201b8ab38dc3df8a030a9a51844e004b35ac6761b60a29a8ff23935d1499cc5dbd022100b49632ae2e4e21ae32fe9c76e8e5ad3db1ca9c3ca9127c995f74a84751c1ca40 304602210095d86a14d8f2f1499506a19a86e7ad18c882e101b33061af3a44c80bb587100f022100a82a6ee8fecc17a9a2d5266489e08cb310bcca0a45bfb00655eae53a0b19e491 3046022100b515de8dd4d1c992b79b2d7fdf0bbb8c31afb607012ff0e8b08f0a37d52a03d7022100bcbe8e6fd33ffc0d220b3b227b263b4c4c8efc0181d7fa7f00f79080516230b4 3045022100fceeb2605382f3a0f717f389c37f5b13823c136247346562df08b013ea56fe690220230af8859c05b821ca2cbedbb1a393dbce5a495f66526d5837428f7d166f2755 304502203851064936eac46a7deffd9a7cb76b3031d8c7ec0765a4d31fb015e337c86dcb022100800047d2b980ac6815b9858281939cfc054bb1578063d33b3f4ee96bd9b01891 304502210097a602d883b42d879ad551874a5c0138e22bedd1a68ef681fe3515a407123f6002207e1c5f9f248bb7cefb27e6c5df5a41882134c30046c0e975df8958720de55d40 3046022100988477d3f5748cde178cd2c0c4805f13627ec7f1d66e01bb72b1cdd85cef1414022100f7e888b0d6fce311b6a03d419aa94a17f067f9100c5bb2f89b3a15ab00cfbafe 30440220463332f0aa0937569d203b20a168f4453899adcca5750ff5ef64cb505a0e1e7302204ed265d84d92cfb0df9f0b815653b2964b15968bba36fbb46bbb81ff187c334a 304402206f8270588fc192fa27502a249e94baf0f7048e626ecb3ab3f2b941e71b2d624b0220484ea6e5aaeb62c28c838a3239a13a2fa67c09e41b7ccc7906bc0513e988b893 3045022100f5ded8c38d057210edd2bd1acf8d806e4e7eac4d1312891d868082b60e49fbb002204ae094eb7ca5e8c738a658366451639ccfd0e3f6307953db85bec7c2a529f5b0 304602210089df530aee534b8cea2718d9259e63f10107efa0951ea62eb90933ea311709fb0221008dba48263c026aeda2dc7d6d3ba25147a2916609afadd078cb969d27e1e906a9 3046022100a21e57d2dd47e1302ef2c717a12def136f1a6d5f70868b65ee01baf1e89f1793022100e003232e80e32488005af1a8014a8528c847e739c9912b2a266354291a0e507a 3046022100a575fbbdaeee3731fa4db1913899d9d3786a88b8fe72288ac0037cabf10c84ec022100afdcf726163649c3ea91fd3a5fff666112ab7f230c98eb50f65e249b0ed0c923 3045022100f3ffb9c87be5753268b6117f97b7502eeb49c9466a25486309f30f1f7d06320402207f69d21d42538fc24312b43ffda7e0321212d2c134b81bf40fdb79748cb2e028 3045022100fb7c83732a1871d33dafce7c71e1d3302076b34d8870f9ada67f6ef81b25be1f022030703ef687009acbfb97b424a5e27027894df5669cfb46691be19f11e2005b4f 3045022100c8907daba8fe5514c70af8a510612cd1776d7a81d51f0f7b66c5789f855cd4c702206942b26c45e274332f1c1230b53366bb478d620e20df6a55b9bbaddfaf361b04 3045022100dc5ecc547613e9484cb0b5f91710bb45a1619a87e898965960f63abc349df0d9022009c44da8e7074f607c32ce71044f0712def96decc61bec1265bcc71f44754682 3044022043c2e869726958c2a5f92663b10cba09a4fd1560dd4e2f143c331db64821d92e022059dfc1af6fac5fee4f75babca23f731eae5f02844e4be579867ba00e8e85db26 3045022100afcbb961080ba4592e0b8e703cb4966d5b75b9dfac57f002ba72901ce412918302206179515171534774bdea180647e4c0acf314e6c1fa7af2d1e3818492e60ff933 304402201d2eb7b8101ec864cf0f92ddc45f02dd28c4aa65c0318a41b50b84a7efaf915b02206a86a3d568f7b76ce09730a86de16e0fa869f6bee475bc58f8f1d4551ea139c7 30450221008c528e39a459a8699b23fae66fc69115784f501d1d115a8459e9442c83c9a32002202a4a10e279d4516cbca8a95bdc3f13268bbcfa37c4c9fd1a8fecbf60fa2711c9 3046022100c095fc4c38746093019561b20a0b4c087ae6eb9daddd9f4061ca1025efc588810221009938d97d8e35eceb7345bf086eb0ce6eeddd08864cefdf4eac0ed087b19630d5 304402202ce3693a3ceab1e587a8c57c7f919b6434d92e4238b56e48937f377d2d89d80b02204bdb39ae2226294acf047c8e3892a709556785ae2a8c8519d775ec8b48932358 3046022100ac3525712296c8d9ee6a9f9988bcbb1c49a927e8edb8f857c2da66dbf7486092022100f2d148c92a1cf58a1f12509e48c82208844633d2ce06aed7d693f11306f3fe22 3045022100fa6c1382f7ae3b80b466293bc57721e062517f344179d7f102379a1172fa2562022024ca0499ba6b3fe54a88406e0b4fef8f9bd2d5e8ae900219d7a2c3dd4df36f5b 304502210080be84ec590866ceb65551425c2baf99bd353753d0fb80769b19f30ab977360d02203673f659653779b535c9fb88901944dcf80be6f7d79a6fda12ae0d371d26b9a0 3044022034e58122101b7652f963969ec87149f3b48cc6550901c56088cf1c22ef81e3670220070e75cc01bcdb6b67565d13d9fb3fa49f1680719bacd8ece6902023cb72f154 304402200e1577bdc245bf18698ea8a9e064766e1fd102787be7a0f90b7b3f33ac3d4fac02201701edc5a9a9497fd78d59036a0cdd059e1863797d41a96de9aed072be459660 3044022069686004cb418d614d08b9b985b8b08962bc40f3c2f5fa9f9ee63e3ffa956bcd0220254de51c87cb95db90fd55dfe1ae4b339dc0658d4f1efa104b93360c2d98dcab 30440220736c029c1c61d7dc835a4753f9538dca488dd14670b7aa2071d85e6b78c175e0022031d17c7dcc9fad225dddfd574c758747dc3dbb9b82aebf76acb17d57f96c3ded 3044022041d55ff0fda1f5d2a7f60d906654cf6d9fdbd6b316d15c3f50c3a250cf61e3170220584af708437d62ea8bc84d4f41e533b7aa839bfc268223880b48b75a8284d272 30460221009f2cb46de1eaed9314f1520402c7a8665fa7f5bc6255041e1623b1fc52b7dd110221009f573ae1e4793ccc1f8a3726be5d1a72592e42889658bc14ecfedcee90e90ec3 304502202aa0017fd565c2bcd0df4b1138b90515ef2739f2037c599679c370ef03f4cb92022100cbcec81ba3ad2382e1ff06db4ab5cb55e8978340faa108b8487a844a9a2c59d5 30450221008c0ae19715409101f6b188ef9bce4123e62bbc5cf949c6970cc411df4f1d3220022079d6a375bbe559c0da8c16d06828a88ce565562b26dc52fac5871fcacca04874 30450221008c15ef7ec340ba36d7bfb4d6f9820e2641d2201aca3d4dd5ff8801d7813c57fd022013e42fa4c45ebef6a4e7f6d6ffcf54f217e56bcf00ec9d7dec27f276cfd23977 3045022100c9b1dce39ea2d6084f6339b5b5af7829e26c84291ccd3201bf9ff37b5b496db0022029296f595182d62cab6518602bad764cce6221ac82dd02bc9428da04482dd3ff 304502210089c1fed5fbde993a86de89ae03fb71a5e0cc9872b126780f2aeb41ab1e5b88c9022036d38ea72bfe837756e722632d1acd0089eb143e9cf2fde522a25ad3f8f867de 304402202231c825f788d6b9ad5b0b6ae5de421387ed04b58a07d0a17d383afa517354fe02200cad0b36afbf02f206921cda68aa361bbcd6d94513a9dde4035d16de2647cd45 3045022100ee07f114aa7b208d27b3ea71072e491443f9ca15229a0642aa695b81d17d1307022009f27ea82f5a72eb4105b1e21f58851287fc2232de62c1b6d99e2d9f81a87443 30450221008cf377727d069018d42b5a0beb7f638f98adbbde69969c77b80ceb93a402efac02201f671fa5807c954aebaf43417f78a0ad069e9833bd6f689c2a9818f5265bd213 3046022100a53359f42e7a6f1dbeb724e05243678f1ebbbd3e449a1ed768b5ad97b85ead92022100dc8070e3755d981d842e3a826eaddab6d44e79ead2260502d9bc6e7a06878ec7 3045022100a7de4f23c41006f61d847ed6783e9387619555b44019b2fcbfded86effe1b6dd022051f2e9c66e156b5ae8614d837ef656c0ede43f1abbfa9c43a1f7b6262f55db46 304502202b2cfa386f38d51abdada2de322c31df1da78ea547befff15d02975eb3978bd1022100c694fc31acdc3c4a84abd383bd24a9e853f00cddfa44fc5fdf6e09f4e85bbb68 3046022100e935043de7945ec7e53f9ee4d87c46501232468d7b49e8d4a17a9dceeb7880b1022100c08660668f78d5d374e420154958be15aeb76295f1e9150b4c168598a40849bd 3044022004dbe58453bfbac3382984c08b36d7108ae31efd88f2b64a0ffcfb7e90cfb1b202204159b60614c1a24abb7ee8360f7c0b604d03c772724ded9df2c1e130f7a8a95c 3046022100f2c9b4a8a5b7814551eb821e043015b485598b16860ea2aea52759dc40ce9abb022100b669296f5f8aec4bab80ff9d50bbd7cf644738abfc4ce0c387b38ee11450853b 3046022100d8828e15de7b8cbd79a637197149f5abcfd58748e360709e20812178771d563b022100868a82f0aa63d674126b15467109f0b1fbce15bf5b858fd5f25a378a9681fe54 3045022036ca6997768c8a2087f2a4d54872c06589e535a2ac8c1b8c1f5b3c68dd1e4c0e022100823951a8dedd9fdf782cba99b38405e398534d37cb23545ca268a0604857471d 304402201ba69ad5d990b175e7ca950011629527445a63e784343693134a7c8faa85d02902203f59b8487da162c24a18e0125ca4eb02d572fbd1929ce2cf34741d7c1e00fba4 3045022100c8d9c3453cab48e02643d25df32977c38e2238dcba093aaca6dfcaa6e994d77c02205a299494c7e735bcfe0878feae89920b73aa19a34095c57658bdbf2614d3aa37 304502207e06df0acbcfd4931f9331a7898d06efbb4aafd487b275f6c70c1a16f11c07d8022100fbb82f8a23afeeba0de02adec04ca8c073df0c58f5b2286c53b2ec2e8e13b6ea 30450221009e62d9904f0348e20960226cf4851317362b59e6a34bd76f895bb20295858b9a0220418a8ceae6053fbc51f61d299554dc437597e2a450adc0de3dd6df26e1550f6c 30450221009f51098643a34c61e09d67194f6ddf81e8f8f56505ed4471492167273a27b3030220395973ee3f68721495f20b74818e288d5ea52ad0654b5dc2f58aa3a26d14b956 3046022100a35c16e238e574c0fd4a333b18244d520e6cea288484eedd83d6f0353e7bf642022100d8dd2aabdf47685d71caa022fc34472ff6e2f70d90c337e25537f84db3756fa2 304402201b85ae7750d376e9e415e4f5d2ea2f1a50772a885735937a6c627fd90223c60902203935712ddfc06296d21cd2d9f694e2fc7221341fb7e89d3eb7e02361ed3b53b4 3046022100c4021b3a1a8c086f30ff1991ffc124e41bd14ee9e0015621400143a83dfca683022100a6964b203bcb014ee0b2f11b4dfc098429604259fe1d5eff7362e37b69188057 304402202ca2409045f8f128175e1e6c373b23334e1842698fd1d0ee7c7a26fc11d8540d0220413369bb4b60dce846510972dd76985a2da0a4fcd19b9a9eb8339fd29a8da103 3044022074cb84520ecf3a721ee418a003fab54fcf38a9aa8f0aaf84366be442113bdd2002203800440d73d6933ed94372c531f9434a499f65dacf842f23c74dfa6fc5295afc 3046022100c6f86f2d8c6e12546b37c71ec3c958fc1037b2a90e4a212607a2098e2fc19d87022100e857f6521bde755d8cae2277e760b4a8fbd1ec971a4abe8851c8fc5ed913aba0 30440220165494322cd834e6b52aa9115327c6ff43859bd720bbafcf5a470d59f00f96f602202a3a71d8e5ca760bba68af41da13d3944a737a9e0461e33c8103a3707480d8c5 304502203a84a2ba1f760fd0cb09767ee45356e9327b19c310106ea672745618a217321e022100a5509359f97100d2b37823a0cc91ea8f825b2f31f91baeb2d1bd0c89d6fd60f4 30460221008e1d02a518ca7276b209c3b8dc4f246afbbb882b613164791539c3a43c0fd06b022100d7b094c2352f8fdabbc92a2336086288f174483341073c79a6f1879315cc4d3d 3045022100b994c55359edd1c457c0de1fb5553631ab2c3d733c54233f91e5c65fd015e1a2022000b9c7d45eb2440b1d38ba5c1af30c538f9c6994399d8d4e015e476a15ebf4f2 30460221008f9b23f747e83f1d5d8055ea71537ed212c11586f3eb2d721c17d272b7e71053022100d7cbae770fa4c3615e1eb5e05ace903ef31f5d711e7d47aeb70d7902899b3c0e 30460221008abac3a19b1c8241fc20258c888c5cbf5637a5c297113a3e595e4fe1512d8d1e022100d7614cb54688423404f534dd10b19e355f2f243987e1b09b7c5c4090b796caae 3046022100f11b670c8361f7877d135ec05717f1b69dddc6f4b91e07d681440bde35cfe006022100b9139aa64579c89f14403c9b70df46f557d8d50b60e2c638c1485e0de9036d15 3045022029b0ebc83d6f532d4289b51c5827d33992944d87a1b80c831291438f683720c3022100ad9775933b05c182624a0b6a7c8a08cf171c30051e8618c1b61712e2894eec05 3045022100b2a4ad9c5d663afbe953ffef7495a14e1818c7d3a27d8a08325e634228ee92f80220249aefbfa14589e4722392f4692036f83df495314c8f148e4156a071a1d8a387 3044022005b1dcf8ea226b1e1545f76678e370758b22278dfbea233de77c6932940bf8a40220326103bc825ae16794fab6f68243a05fcf70178a7a703f42684a3eab9e74ec02 30460221008bc9081e72a506faf784670e340fa595204ac8e275123de92b538057d2161533022100a45e31fb5cfc02daee0d077bc2f8c82ab80c67b0389ce4c8e35b7ed8967438b3 3046022100e47f1b8fdd3a43f9510fcd64dc189a24ce9c7d171187a3fb9da6478967dcf403022100b7986303f19a42692a9f295b258099dbe8a45d3e4f75f1919643aef56bbcadcb 304402204e43dac5ad5c2ae9c12f767c15fa3590df4135d43bd69eb1a988cbecee83af1202207dea72e89aa7023ca519875b8f7bc3a029e0eb34c8a52001a1d34a2b4a47bc59 3046022100827899de4f6fcbbfaa62d8538a6bef77acb5fd8711eab2cf8bb7ef2cd7d88fd8022100bce0b4461b57090c55e333001e67b342f8b5091af48c24a9e3258972a00b0300 3046022100d3464d36ea3ec22641d5657062471fc84897c479e4ee37c42b784c7e9a42f732022100a9a9637dfcaff77e1390f2fb232095530f13cb8d84387685d6ecad2993424e1f 3045022100890b1dec95bfad868b54505893a5fdebfff9b914b31c66d79eea9adca05c2162022049ddb32516f1654bc15aafa5ad070954b4664555a1917ea37f1ac6ae4ca3c03f 3045022100888a83eb8dcf8d570899eaddc933b44b83bf8494c2fe266608d6ef1abd43bc5a022066f0c2a0348b5cb7addcc65fecc03ef074619efbdebf216cd756554b740e160f 3046022100f0dbce6e0348c8d599a45dc856bea6584b2e9d52a44e8141891ed984fb36947e022100bfa064787be003cd00bc00db97ec0c0e2b12c37e0e596c80313d4dc0c5e044a1 3044022069ee023b1b3fe013317e1decbf609d505f99ae31feaf0626f684796a56eaccfb022012a268f5979a765d35d6b9613e6df8c1338082a0c1cd18c7b3c7c464ffe33820 3045022100a0acd05367d19d2ae85395a293a1e93362b2ed7f8f60077d13be893166b14ed302201ae0884d51296927fe6554c2ce119e53571232639d570a4c6471ab6fa29e2cfd 304502200db10c51539520506b31b6f05978bdbaf7891d6414ad0fb6a83e2bb33d74dc96022100c6561cec3ef034102a3151e74df82d87e726fa9479f8a92c8f0021739b7edcca 304402201804a74cf564c2947ab44887ed07bc259fbb26b090e77f128af9c1149c04aeff022055b1fa3df36c9fc903ae11964de7c69967ce97727ca49ccafe45c47a2ab7f05e 304502205bc220983c5fdc13ac5ea54e13fc6cc57cdd6a0cab4f73f5f6bd3cc31997dcf5022100e810a1807a352c3e712c200f81104991e29fd74eaad6c278f868761741a47674 3045022100af1507d72b011112a5eed2c065f273c4e691a955b0e2d9c1c9bd56ce835aa09e0220050d757c9025da409ca415cb61a8cdb7b6b145120a799d8e2a641d32b8341c72 30460221008d99f2c978653bd1daa160b66db06b414a375eb4e79dd793d4a3cc15a6926d4e022100a172f73d9fa1c0caf31f93d92bbe3a39b474c1dfe4e9fcdb5c77f444d5673696 3044022002313eaa329e404f0a5936c56cb7e6962fa331a2d3b61b903a676472b3ee29b7022040a54cb2f77dfe87d675159459d5fdf8bb94322a970b55739b8edd8cd06cf222 304402202d0ce25402fff9566f5f2473206f643a81ee23243fef6de7e507e32406522e400220398316c682766c5ae8728a61e5ef0ac61743ca9872b6b67088025fad290558d8 3045022047b21f30f2c8d185141e3a844d80d14ffb5d833104cbfcf261671387e4549b3602210093c276537305fda2760f610d6d934958738a65e8c1db2b78ad1bb92967acb4e8 304402204cb264992f35311fb972663ea2e4fbc16e19add51b520d4b3685e55ddc42944d0220337fa3e742176fc5484567625eb7eae6868e749da4e54790cda5f4bc71939392 3045022100fec75744060b2e792475cc3f880a22465e8005c914377656788e199a8d6f29b202207abfb07651db4d5d1091e1da4b19f9b42f0be80b95b18762f95c4a28651693cb 30460221008270bbb50490eacc0028a0ef53d16e5126c401495c48031f75e088205ed668fc022100f2666eda57268c5d1d8cc057de4a1f550fd0c6b6fc9093733cebbfb97f922f40 304502205b25a97bbdf3d726a25ddfe44b5e5a7ad2aa37636d281330142ff6454ea22aeb022100e659420cef4b3e09caa88363bf10bf4288e414a0f108626e96b889ffd3cbd7ce 3046022100ba3b8b6cd4c9c557123e7d13951e86564d1baa2d47813f0cf13379c5c175af33022100fd5e6f03c967b87dc1e360bb41c46172c2e1bcc6ec91306a21e91d98197ee863 3044022028fa032ca9b5bba522825e8f8d0f53fd46301bf4bda58e059ee0f68dec4efc8202200921878373629bb3de92e2d3fed84eb57f0481bf922155373b0becb959c1b38e 3046022100ccca2e2a5adaa21c65d54794de053df8a6d3c509787d0df5f269a4cfba069dbe022100b2b66df2cde97a9bbed2dbc4835ec14b979288b298c00cd780bd047c29551a64 3045022100d61c9699a087608758310843f45f674df3f76fbe4e45b30c91563657ff1bd674022031f3ab5dbcffddf031453ebb38357819fdc3d3b51a1ddb33ff9381eac5e97ea3 3046022100908aded6e969ea16f219c2b63252bc9a76c8f72d806569a084708e194f7c7b410221008801af8a72c63bf3c8fb31f0fc0c367b243f9876be658853eb7b8d0ec8218bb9 3044022039dbc8e5493d6ee6a811d1e92cf8dcd04fc40491fa3c0d39b883ddf82c5e3bb802204e29d86e9387cffef4f4bb30226451d1a9f14f7ffb9c99f64dfcdb6534ae0d36 3046022100fefb0e7221e96329390c655882b0d2b4e42c6634858dad333fb69c74ba3d60810221009079ed7e1e35ac666fd889aa7ce80e24c74c0e906baf79fb15c847a8782e8ea1 30460221008974441840fa2ea550de4a7fe14c38df39361b7785269324166222480319bc58022100f1ebb2d0662ba76298bc610fe2560dce61eb1e711410dd42085a2dc77fb97437 304502210086bdda48949b8186d11445a5714d61a4fe4952840dca78f128f211b1f1486a4002201b422ce2fda4c89193546481befff786b9446e72784c6407d734e44173026c82 30460221008fcc00439dd57e6b50ecd644bb88ada82edf83f0ad98d9e8ad661eb1ef1f1fae022100e2379d521a9499f0d922797b2aad9b291163f236c76d9a4a86cfcf7c64d3ae4e 3045022020f387ed72c389f3eb0580ca468446fae81316c4dd5f8cd4ce99b8d2fbc6ba930221008ce87f63c0fee729937d66f58e1ac647348feb4de45a332ac4d789d30cb1b577 30450221008e81b74e50d85ea3a6a6368a2682eb4b5eadccbd635f88267a42c5059f8a8ca6022027451869374af4cf5b0cd6f07b08fdd85cf8c4788ed29b6f6bec6e68fe279283 3045022100ba400897e3d67e12427c0fecc42b56ae8f7ad98b54f0331ec05081dd01851c6f0220545deab285d391cff08c3c00c8e961ec1597113738636d34739a8d5e9fed4ab2 304502201eb4fa54ee7204b48fd13dacd1d1251c650a2dc334231b8102ef57a9e98b94ee022100efbfc347bf73d3b3d861aceeed0473f509663ab14f6fa5fd1d3a729b3f1e4702 30460221008016bb4940426c96b139caf6e0987bb49d0a0a2fbb82be9cddf28d3d58b6af3c0221008417ed8025f835f90a2b610f5ff6175648deb6873440c13b12514495acec740a 30450221008e92ed9c5cac4d3dcd2c0db66a073c5a2a75043533c2f2e8514810e69bbfee5a02200ee0099a078d4999e4118d533b87f7c7859b2baf965b3415269448f6d6c94515 304502210088dec32709ce97e3d87e9e80ecb335f173b42262f7a922bce175978a08d66471022054407f63689d4a94c14f37e216d730a4ac1ac170ba0265797d2004091cf66f08 3044022007b75da1b5cee46d66c064359e2e898a9d9edf5902d2b8f722a11b6af90277d602201496f013920e7c5a89dd217992e85555e05865c7a9719862bd6fba4e3143d2b0 3046022100d9c456310d1fdb054362b83f512e56bd730ed904c9287bf06d4e38e51e5b56fa022100b4ad437867a3ac6b0a3b1e78e18673ef1d1018218a6fdec45085c6eca2e736ce 304502204cf4c56da673134191688c9e856452205b122d08e027d1dabaf33ca95085d521022100d70120b2d46d962cd34374644d0ce2418ed7bd7a8c38f5d620b7037813680c71 3044022013f199300ad1bcda1ef44634334cb0bc633319e73089b42375c4fd2c05189dcf02206a76757e85eb790f5a116f229400972fa2dcf0df904c7b8adfcee29fbd568129 3046022100846ea86165a25dfc5e79c0f9bde4e885b6908b11a434c5ad4557ad74e0570feb022100f69643ae494442ea9e8a1fc773766355c0c8e84446af81b9c2853548c21a1fe5 3045022002559d3dcc125746c575b542087e67ec74971df2b9e824632e3b14a48e7f4718022100a9dd18e2fa6914938564471448afc4079354f81b9ad04787416793f18088cf31 30450220383b2a707d889660ea77bfd93ef1844bf3acecda9c3b060268aa8959f3cdb40f022100db03038a8c6cd78acb4e02a440c1282a436d0b79f73597783e1653975eeb49b6 30450220289172228032d749cb6e55b016f19ec5bb1f964a7cfff186c09062413564857f022100d795ae01778b22d739cf59c58430307ed542cea901c733943827b5511fbd8bf1 304402204875fd5f082d6701d6de79f864a7e6ab74cd5082d83e7ecf5cdb10297342d66702205bdeb61d8868026e830143020733e2e9b01c001988ee517f20a9be0a68d80385 304402205b47fc555fb5ab7da4b5465f7d4f76c29678329fcaeb057144281f768fbf804602206b5d8bc3402c37b5d4e36c7f3aee2fa057a91373689eb2fcb164b26fa769fbe9 3046022100c6965dc1614677eceb5571f71269f991c54eae212b0a5414d85c66c1e709c3b5022100e586b4e6608dc1fac18f16c527aad48de8e0421a0e68c46734f422771ba4e703 3045022100a5edd57184f870bb3ac8f993de5de2071a732c277dbf14a1c3b4e77a965c6b61022010f23cd2f72c451c6a5f86b87f7499606cee88c46782b1c07a1cf6e7bb71f1bd 3046022100ef30ad8076bfcda53b18c5da443a59edfa3a911e457c21f6350dcbcd8580820d022100dc1d25614276f249fa5e838999906048f60b5566e51056db5862a22c2e77cdc6 3044022072abda32d6698296247f1fa7c28efda4f15fcafe4aab0322c0c413d87ceb2ac2022070ad3b5ebe066ea8c852a2ebcfac7604485de070d84b147f0b54334cdcbf2b06 30450220405dc780cd4e1b8e6e7a1547b31c6277bda7f5d87a774089e3cece5f6b1da3d802210091ae905a26a97322fc5325a22e5db900180ff3bf6d7c7141a5ace7cc30ff3498 30450220487a5290d4f026429651485f9ba0158257f145e64ae65d424a8ca864b89182f9022100b5fe4792aa10b92c7756d9d6a3bbc3e6e3d99cf97f28c7fe81aa8572524b5825 3045022100a5558cc56dc688015a5977c23f1b7cb4fa73db3918233edf3e512e9ba763303d02206a86bbe6d9a40c65c29cfb899b469032a85c0634a1a0d73302d5654d98c9492d 304502207a229b6d57ecfe8c2aa0053f4d81a0bfa918291f7effab98a5805ba5bf5f51a5022100ced5e5d87d936c5f47c4f2a1731d6edd960e1f00427107d45ed80119563ff9ce 304502207e9f3d83fc6755232723caca4a4c3858f319e8ff88aa71826dc91230a3a3cd97022100d21c41ad17bf8c8a446b59be141596165e124da23eef6f2568767469c7ed3ac2 3046022100eb392d7195ef01c7fe88b036f51c1e31bcb1d7d97f1561d603f654bbbe5ebed50221008173fef8c0f9f2381e3922ff29b829510d034370d08596f54dd992a8942c00d0 30450221009208e3eb5e7365cf39969e10b95b05a05c461f81fa93cc40b7c9e209a7c9521b0220328115549275e8f54844b6efe55fea82148dc8a283ee7089b694a67e3727cf74 304502206c511f657ac5017c2b38d6babf9eb91add0adb89690ca3057f74633b65581e0a022100d131397ef5b0bc65e6c628209f7784a512366d67f86f1c7f3a1b050ec3b4f63d 304502205f949152d7c52f4f4980a9a97fc719c69c5ab033650105fe5f920f9344247f750221008633da244c3ff4118a87e82f720b5b5e7c7831a78a31385915f60a8dc0ef1547 304402207894c1e4ee5a8a4c323e928cf12f152e60c902b656ce2c15b986351204c01ad80220335e393d609ff0828f95a1a6150fdcc46aa4836baba1adb7e5f70ad10d3cd8d3 3045022059d5e5887ea50cb2492b08ecc019dd85c530ab3f2653b99bba174b4950d3e2ef022100c5c03fda9dfe7d89c49e10be10e865f7659efa2774854ea683a91f9a2fdd08c4 3046022100943afcf59601ec1be64d13d7f73a6ead3df56c9dba6eaf948f687a8aa4263a07022100f57e35a50cc29aca0922c18013e75934ece461e67318dafeae2e42e8ecec03c5 3045022100b40bf1dc03b8c9bd9788449c8c2731ff72beb48645c4eb412ddc5943c454172502207e1465ae8507b54e3a509fecf9bb73bd7efb0f54f4304dc8a09ac21b38a8468b 30450220323ad372f6f5e37039d12845f051c0b3704441872d40a383b20395c955c71ea6022100cb6adef2f11796b22e4b144cff9015ba2ca4c68ff95a225d5e3481194bafdef8 3045022100dfa956cdb07a0e922e86eae392dffb87e34d17800937f2a8760de86bf847b95a02206dbb68c86d2fddd84b8d1db014780a72783ff76958a632c9d3f710c9e02f400a 30450220047a5579fbf2792f2935060e646834dcbd19ef444547e2158cf97fd3dfadf4a3022100c2e78c2f9c4f6883cdc0dbdec12456df43124c480179a8a03a6989b89dc0c258 30460221009aa1bd06d815d3993e4450dc65b27031829c1a18e7d11a15965ac5836871221f022100f951ed56cdbdcae1bdfb871faa5fcd91b965f4ed07aaac7dc0c68fdff5d6b4a9 3045022100fed221eb41488cedafa82770bc5178e0add6eb7216fc483a11a3ceba8b1087f40220404ecd06b55c975ccc9da728e86b164be21193842ee5126a0119311e36085f35 3046022100eb2a69201f4084759d681ce9a5d7aa6598f418c150d27f45e04c343278c54d88022100c856d4f767a7bbdacf3a9c9d2e07dd656b306fd5101328aaed66ca4de245d58a 30450221008d8e590ea3b45cae5b55a898bf24bb418aa9b1714f83307787d5fb0edc0481be02202b4fc76db22c98e8a553b35f19413834b321bcd52b07b6e692e7f516008aaa77 304402206efd5dfef3ea95b7d10a6660cabdbeb12f6a653d0e4da1150f26d374261ada6b022069f968a900a00d51f9fb36b5fd5557b938e2d7bb92e28104379f1ff76bb10023 3046022100e13f19c0df7dd9c03d8c61b6fd58ec54c7beec2a44e38472f4013ac666ba9b13022100f30ead061e90b37da0641ca51e530353e0f587d002198fde650117d9263df995 304402204be3e8fed18153f4e7e51e26b84f3b63c1e8b2107e225184aa787119570ad9b00220159822e959cd57991f76eed8e3eb0bd056dc45763189d4fa16670866abfbe470 3045022100a7c920ed3f4f3daa278092d6fd5f61af9f2868606dd5198a4748912d8a58654c02201aba21c9db263484650a6a047149abd3f5cb31aed256235e5243e363693e49f7 30440220272073720513984827d31e4247babc55e8ed727672caff4c0dd3979bd7f5ef2e022019cebc921c389c9959b614910c0e16900cde1a2e8871d07766c30f251421a8ed 3043021f13fa5d04c8ea2b1fd4d8eec68db7ee8d9ffd3859bf65fd89d62f4f1eb383df0220302454b7431b78cc7aa798266b6af5f1d88d7c800bb47bc9280e35d656a09092 3045022058920aa63cd2d3c3c603226b7d479860acb189e8ba024e397e97f2591826e112022100a473be1c05716021c98ecd489102c2642a602c67825cd374fe3d4642f083fcba 304402201dba1401b5eb293749926648708b19cef34e773f19ddc19e2188a1a2dba6ce3f022050c52fa95944ab637da90f36576cdda9dd88283add1af6fa7dc1e3068c1f2580 304502210097ed678ec2912eade9609fc8db311d8a2d4dac1e2aeef7129ab29f61d828c5d302202de1c5c9bbece619ab52823f5a7eb6592da9cb8d6982f325bfbb1ba0b4a39110 304402207ca6a1df3b436b6cecfa3b8bb77e17ff7b224c392c2c093c81ae0e2af186ccf902206d29ca2d3515acded37ad6a6bcabc6c2b36d990928433424f1ced16fd11c3886 304402202799c3433a7db88a956f3dd750bcf27016312efe14a8ccc9042c1e892fac7d8202200f9aa9167da3a043702659046329e6546f7fce4f98dec7dd5980ac4e9f6f7864 304502200843c264d940cd911364c2795bf74e91aadfaca8188519da1c664442f6faa41a0221008e178e01025c0092af3219ac31cd1de0b7805519454e77f42de66cab03ffe682 3044022053da46cc1634d254201257e654a64a4167a0d256e0135de63b8a6c0ffa4f313a022053e975d596e6a185ea34c24935c6fd3df65d5ac41ae860da1f1db29e83cd929b 3045022100f137ab83c4eae67de5d448cd3b7efae40709c9cb4d6eb621d26b236e7cbf9d3f022039f415a3b2940029efd8fe0426aebafa5eb08b85318885ffdbc6d0e19fbd7f52 304502206287d15655088ed3de68eea949a7038402316299918e6db9e9ce7fec22d873b8022100cf1fa15ce6469d7b699efad0f36eb431b817215b901bc82929768e15282a950b 3046022100ef37bb1b6e2c986fe0db7e63ad170ab23bd816ba01bdc38cb4f120be1f56bce7022100aae2371db98e7121f305a52b779ecb65066298e7a1e5178b55324407f0dec471 30460221008bbb61659ec967b0b85b097d04d0bf0a50a42699d492a742ce4bec0330294131022100849cb33e5fef32d2431161830476d3de0453a23f7af3fdf2cbb89254fc5aade4 3046022100ebc4df941bd3acee816f15b5b08ca8e8b2b6fdb306913d6ef403cfd5ec0c366d022100ffe227ce6c2ca4e0ec285691d5cf0e270136c40e8eb9c33ef8e2074162030b14 3046022100fa8e4292d7aaf9759c140fe5281605c15781556f3e3b725ac41731786af539af022100e64d2016f4cea2165e632f7c6bc654eb80f22ec8f093965ab2470fc647718149 304402200c5ec90c3ae13ed9c79fc8ff737dc803968cdb7b173ccd67cb7d4ad4a25762c702203f527fe1cf1ab6440cb848198f1d0bf899b61dafcaced76a8bebf952f0b0ba6b 3045022100c3b455efa956accf5c5d1d2211cfa43999e729214dcbb93ccef529a07a90661a022032a3b61c37787287e75070bfe5cd60f790c0ad2b18a2c48c20befb3259bcedc5 3046022100b3534699f1ed1f5e232446223b398830c07f61e4d9e389edbcce7df4b147b992022100af7e064288113c559c2edb78cfe28e02400ee46c9f0725de3bd588c75c07fa9c 304402202b7475e1e3ad118350278987d571a52798e19f292fc77c814b29fbd58ce380210220255deda5462da8b9227ef56ab85ef87b634b7764e5cd02002252d3f31120a5f8 3046022100931947f3a8f50afe0eaf4e30e013d8668fe74e8c3ecb0275fba547667354ef640221009c8d9c1d783223fe3fc63fd3a6d6756ed84dd060175df105c89c2ec3dfd44573 304502207193863dd761cca8da6d5913f8961ce3f920dcf8f88c9d24c1d1a907051a8e94022100b8c73b2a1d0bcdded12cba927ba6a7c4c268d1efe2143f327c1e9a80451c42e6 304402203cfa5c80c31d756e7c5b7eb9b25075dab832c2f998fcd722ab96e8baf6bbd4e5022063378c596f990b8662a6b32110aa745c3ef651489abafe6e759c019c9241ee09 30450220128643258a005f5d585fa8b4bc32e41a5fd6d0ffd774ca868f013a5697cdd3fc022100c8a867d8ce820ee7e7138d1ea0c3c772ad7121339a09b7891e71ac2e1bc925b7 3046022100adc01659354767b70d9ed07b0d4875f8344abf2c099babe9161242fa54ec9a6e022100e090ec3b85b9d48b660f57e4022a1d307029e539fa6cbb0538544091319dc3ba 3045022100ac5e94691a19699d256e053ca46ce90c027b5d79da5b97f68a06f56e7fc2168602202864eca5d90a09993f6eeccc155934d72992604b70c030f28dd24486dfffb950 3046022100dcaad56a710792d3dd2301d5ee1d651c77a8028f4e48d2840c27b47c53da9a1b022100daa15294fbeeec69df340b5dc6e5524a66c61b2f660810bc71346c6ee2579073 304502202bb806254694d178c4f258a0baf484ec1b834c0a58ab60892ef351b09d14d084022100df1fc4ed51f5bc3f39ef47899e1d026b3b322cf0f4df66ea2a57a9370b3bc361 304502206f21631d20667bc352fb134d71bec3d86eca21a34fbb478b7d8f1e67e10a739b0221008bd477b13164d056ee3a061438d2169d697bdf6d2a3fc3801f2c3115315ee0a2 30450220335ecc8547bea1c09d53e847359c84939be7ac4f56487de3d86bf9b26d322fd0022100fa3d68b9372235896f5064c406df38276efe98e308a86f30c456ad3ffa2bd19c 3045022007f2a2e37667665f831c70d2a64d178bdc83d437e708aa6a770d7547c58d84d4022100cae8a39db662f9f5d85fc916f1bffdfedb7668670362e8a88de2049506f96aa3 3045022100e682d80fda38e2667ce6eee1ac138e7b0ad7ca98523320ff91a18eaf5453fbba02202583fd3787f428f2346aff90858ba493c6600d470ecb7e047a955ec980f91b18 3045022100f22f26ea337b1f52681b87593e0de4688b19fc861fed024cb7ac5c628de2bc8a02203dd54393c0703e6a893cfc528fefbd40c3af845f177a86f9ad4e959a49e9e060 30450220148436dd9845570abea20fffe9f236fc98e5903e0cb71a2a67306e78b046699d022100bee0f59d0ff4c63968a3683b6a1a1ac540a4fa366f34d8e19412f504b911cf4b 304502202b9cb950e581d616bf286265d6596c9d783852e855cd7f907ec9d611e7f1d5a7022100ab946f35e0126fd410272f500cd468f272afa581189cd6bc60b6351b54adfe0d 304502201267aadd8808cbf23f402af6f6264070861952d10a8ea59a527301164d50f1ef0221008c076b8ed0f74f9992f264a22addd2f79c8673f5d7d0464a8c87699f32723261 304402205a527d8c463077b958832e74bc078ef0df4ef7d3c6a3e3a7fd0eb956d5dec0620220053ee13ddf5b8734f5122db4ed88cc717c1aae1f0f52e2435d436736ca552c85 304502206e787d39f36302f54c32f2ac114912183c57cf1bed2b01fa2aabcab7013816d5022100d17f54525aa53f0ba7c75cd487cc87138abd706b7a5ce640c55634aa378c96a7 3046022100a6fad6a2ad9fa8ecce319e91ded1ab2b0a974533abb590fb44c3d02673fa79b1022100de9b3d728f1a7fbcd09e2114afe3cd20e2695cf58b190ecd12c710c89d8e519f 30450220265399216e57d59909bac4534a82983e0a27ae10606a74f4d5e7d027f3fa0241022100a19cee591006e624c8d56834c7d9f0ffec9830372e86b75ed9976855bcb1b14d 3046022100a2f3f2c5862a745f2ea8c983883b789150c83eaf9bdd488b421cce8f56652c45022100aaaf92bef088065e7f36d4298e6f3d6a56037c1a741464df851663dc8a3eb2c7 3045022100eb709e3ba4d62f8617c29667492cb5a56fe12e493e8ec720d6c456673636d23e022038592efd29b8996f28457a6a231fc4f381d697573e1db7b9a5e861bcd2a3c052 3045022100de87ad9d9d0702c48bc089d40e0a918ecafa247f56399d1bb910bfbd4987808902203f7c9fda4d9434a67478e1cdedbbdc78e25b1b639a458e294c567716374865d2 3044022039fe35e41fab1a0da93ae58cb4978c8fca8dce43249ddf3a15b87a6066fd0a2a02203106729b383acff4c79a1be11f2a36706061aaf91bbb3a95d5c915a5097da25d 304402200745e186295ffc4cfb56a6536955cbf0333f4cd02b3bab63802dd211b69f9cb6022053cb36b8b0bf547446f1972a2914d3af996216d9431fa8783f5b058373e78b7d 304502203bed9e2e5b9ea648360a6856fa66da4b51c8e092c44ca8fd8ed6ae90198765010221009d7d7a379c49eefacff63f725be417f95f2a42ce46cd068425e51fb196eaaff9 3046022100d0936a0a0cb1336cd4278f560b2a52e4dcb9e36e8751ddd1dd687ffab283a261022100c972e1660055dc1d5d5ef70113150cf6a1dac0c86367503fbc59214a7213fdd8 3044022069f2e8bfc3ae668c06594c3204d04c44adcc14e4732c357150dc5fca8412a56602201b3610d34e371739b7fd86cf9db49927cd37cb99c9aa3ac5dbfe033a4a57f35f 3045022100b45952e1d216e42d894898fb7dafbe66789306b8a08fd085c55b2bf0fdd77cf5022078ad670dbb88b1efb6f28dbf07843d09a6e7656c058b66da782e19c28dc1984e 30450220417ab903208a0310820f6842970b33541a159dbacb6848f5ca3a0bf61d0510570221008b8896c88f4a327b434bc7b58ffd3742bc1d4dc9c27ce4dc24561447acd2fe9a 3046022100aa5c3739ee2d44bee8ee28147866b309cde3e0080ed9f1b326d97a5fc9c4abba022100ea01abef21399df6dc6b561a9a5c3c914594245808bd48cb75cc9e0fc991310c 3046022100fd1bd7cfdceca31b4cb011e8eac20953773f2bc51daca94c970d4701bdb30558022100e7629d2707f5e80a3718de735700c54016c794db95e020152c4340ecef2e53f1 3045022100e1450e862b01f3a0d0e7afec644550c49c38779d6366366e9172b3dda09d350102205aed9e8e9fe88a3c4c0e97059b64cd5a6349891993060de2e4a64d6cbb4c0eb2 304502205fad28adc1fc01272f1564ec6035ffc14bffa8c991ca09101fd6ffe268d35b5f022100f3bd7dab5aedb2c147cce58c4bc226298d6272db3f58b7a10381f36606dd00d2 3046022100a95b84631857e1e3548b9faf68f76e6a731ce9d4bb818ada61271a3412e96f75022100a72440ccda9f3d55b6cc653064ac9ebf4512f3d43f37cce2c40b9a7eda03a4ba 304502204d5dbcad928f91fa574bb4cc35943c4e79225be9e80196460feee2bfe65ee4c9022100c22be7dfe622a25f7669a8ac6862397857bdc30edfd9ca6d7bde7c7cc853f3ce 304502204630598bde317ccdfcf417ab0faa3d39c9c2e1b35c7d2f56d83b5cd330442a74022100ce35c7d85abe28f7d6f33b3bcc0adf1882c123370db34ef48c05223217249941 304502200ec76d51f4a2cb48ee70016a57efa142d32890055eb45c132a9002b0ab40c396022100f84a1b03422f62563a9d39d29188818da14015bb9a448bfe3989f24edc1a4cab 3045022100d8b5be1bca3c976eec942778000c7b76903c2da5f32b78ca43d3713401224a5b022072adf43ccad1454fd3bd34520977e01f9e9507e9760691cf183622c0493fd383 3045022070dc1f6feaa91ccf9d5d450f5249edc6afe1da397733775950f4c2f86df31fb5022100ccd32f1e9f176bece56ae18a9142a9586b8dd406765bad7534234b18719fe5de 3046022100a9a8a241a5e02487500c5785aa866f396e0bb2e6c51fef30aa5aa2c6e5bec9a8022100bbad9dffd62d300d4a4be735915975d696ac55a773f3db91969457e91a35a6ff 304402207790e80b153e8cc23c4d45cf67b8d0da2a07591bf92bf050c79436cae8b1bf180220359c97cddf424c3624b46362ffe5fdcf890c7a25d9367b3a126e4743fa29eb56 30450221008d4585fc684fe2fee0f38d411d9e701f5fbe8991b4594ba467c6190c758ba77902200d99cee3f19f2278bcce145b2849862af0a3a7144d2f6285b7361caba696bf42 304502207d23bbf2874e09487491feb977541c766e8b3fb93a4d6727ab2b162f403b52e9022100eeaf63cf9c585884848856ad171fd1eb6fdbee57896a6ca91fd18049c6dfd587 30460221008aa0378f5c5ac6f80424595be0cc5505792c3d8a20669f647e6dece9f3497613022100c383a8b23027f7e36802aa28f203c85b134846293033c8695e817f0fb7f61c6a 3046022100dee7cc9f94f59b5760bb512df7805c03f387c34cf8b3db1a205215b849741909022100a3b9b19a691ecd9f12080a538a62aa685319c431f0ea327f18f690a7cf2e0fc5 304502203aa6ad93af4ce663424f956c27738906ee6dc7bef44c6d38a2a4fef04ad1de0c022100cbaf18f78a463ea34773a0bbf3ef1e9fe8aea1b4387bc123cbe98f512c6d38f3 30440220309741eee8cb7bb4a48948262dd8a6a0f3728eedf34548c760c21edb0acaccad022075e4b91ac425b0a89695a5ad73c38ba0f1fa43c5a9a83b4f4cf94b80eb4a727f 30440220111c8a153643518ed440845f291e583167a126253bd49b6d769a5e2232068e5f02206984807def797c684a1510e214e02fc9173b0192d85a44415b2db7524531d7cb 3046022100fad1dd70e59c963b684238f86296ca67350e838f5633ec4a1a71874261e3e425022100a30495641b4d00a6856cd54523c39952ccc18c13c7d64db4bd0f85597adeff32 30440220710037412e0929b17091b337a848b33fddd560ec66a7ccc0e17dabdf8c71aaca02201aeb4e8de9476b9845fedcadf2af69771ac97290ec9813a242924b785306a551 3044022015439abe211a3da18114e2e44caa173c005b158968acf3db6c360035cad78dab02205c00058422968f67d435206966e2aca3f645927da01a2d8cf10362311158cdf5 3046022100ffab2e552ed9d4d8828bdb82b40390447e52d4e115f4fc618e74a984d74eaa01022100ddcad8084ebebbef73921191891e904a9ca8df99318dbbc68357b509d60afc72 30450220235e8a81f845c28f3003464d5de37c04afebe974c43c14076bf3be54386967990221008e1a462fa5333bbceff111012e515c88afbf866c998af5a9e3e16793c904ecdb 304402201c115063048a6bf018318195f98e7d2f84f62a4dbedac5cec07e9365b6200cc9022028e9af8e11f42d5d9dfdfbd8b11eea7cd1647c9d7b507c827c19f90987805cbc 3044022005bbc02d0048ab4e58df1c23c5a9856188258bdaf939c6525b787ef154f1a5de02204fb1a9a7137ae6d6e91249d6a0a0f58d9fe993c292a88b93e743f66b105f7d2d 304402203729ec86d9d37f384952cbf6f9bad2ef02a8c2731e0491ca17726324cfc47858022067a679687267af030abd2547a21bf7b97386a753d9703abc0be4b1fed9599a86 3045022100d20ff833035a18cf4bfc952ed3e18b38e9e7391308f856ad9fb08d99b9ed691502203991e70da24a3a5bd2d374b396a9dccc1584ac085fe4292e73d1bada2da979dc 3045022017e79a2c89c7b517e0aa8d79da4f90b343eca0eb4e660a346540866720719bb0022100fee87525cc599e56189a7778bf5bf0407650713c56e70222a543014e52c0df18 3046022100a2e65b4bda3040f69b026ddf69be4e9f8dc08d3ea27cc80a5643332b7243cf73022100ee7128d73194696330c6e04ce500f6fcae46357d10b205bc8affc38adec30e80 30450221008c686d2adfb9ffd986a3e9c4c8e819a9fa58a0a314766a3feeeb373e1d32866302202ec53828f99f83e3da6540a2766c14c20817c48808a61931b2f110aec5002d42 304502207ccc2534f14aa0efd07b1ecc781a889e6cdfc5343fe6459685b326476b946af9022100b52412caace6c82eea8428a250b3d7260f98889aa408d3adf5903796882854d7 3045022100ed0eca93424f0c4e675d7267b09f4c4d9f3769f46078150e63c95ce5488ffad30220536b15ad64e09e960649a627326539a44d72187ce6162247887a95482337b715 304502207443ac679dd9a11db8ed6bc147246a3d130afaf09f6d696d3d5b950565858bda022100a15b064362cc06b3e2aec6420bd03ef4b9300f54e5ec11fb3730ad36983ad0d8 3045022059e58e86375f575345692f228fe033d6e3607da402e222d8c22379dae73b9f84022100f2ed28beb6e2f3d15df0c4c9b88961e82d6789a95b1f205118dddba167f55c23 3045022100d2d0b58aac67cc769dead26801973dd40bbf62336bc25e21594ef0ccd1f2d8f902207b70fecfdf79da7221e2e4325b6ce75660dd4671443179de02d5e9dbd8ce40c6 304602210086aa174f5b028e0dc1bb3e76cae1fbb692dc0b8ce4c43e5ed54b8a4be9a95574022100c1ededf16ab5cbafceab0f386495d75a9d366e7ccb87e10de0742640d014bf46 3046022100cedf86b2cae527f15c5422e401154fcf13b07bf248a37057f185a68e51670f0c022100a18671f031ff270c76033d9534112e41c52c819e36e9fe776a37caad9155e3fb 3046022100b907f5b202704d432b60ca928f62f58705deca3beab210380dcf19309c4c789c022100a5816338cd048a7a5eece1864c2657bde02277b69c6e290c40bbe1a830e3e8cc 3046022100976396dcc3fb71d8a4b53f168bfc6958f9d91823ba47e152d2a092b2b8b1a486022100c77fef38d6be733e94ea5b58586096bc856920589ec79d82504dbf1797592cbb 304502207eb35ee47953c0a3afcfba1136c5c07e45395ee05315f26c0684fe8d0350a4900221009199a092ce3ba88541a272cc393a83698f9feea1466c2494d4ce60d1dbd0d35a 3045022100eeb4d0512fe10dd094df4705ab5c80dccfa2f09c39bc5ae17688bbde0910e6d402200a0ef6947501f8f8f2ffd20c9612630e9c22c4e8af12ebc0e71fee4c85ce9a7c 304602210095ee1de27fa2b8efe3bc5c6b32610880584a63988db71bb45fc1f2dc29853f760221009d161d0b8c9c57c48c00c7667ebd538292e5361f294241273f041511f92210ed 3045022041a95e08643c90461299f07dc86e54f59a8b748a3722ca7aa6aadd604921ca3502210094ea8f6656374c70c5b86a6e6a2316591db40e0647f927428706fcd91e86633c 304502210090860683b4c5058a6a82b6cd5f8875cd0141747347f98c1fffff43c6edd440370220056a871662e6c4850ed6e22541edc81e677060b9e6220b880957d7b7e93e7f4e 3045022100c54bafc8baf20011376f23105bc708e76f8073c9b2a34b040e3705c7c4bae6040220568a56bbd6f50e81b08d255036792a8dfbe6a5a7ca4ff88a5c07d514f60e1aac 3045022100edcdbfabd167ca30fd2573468d470a674f6781562afba04e5001ea145a6fd7fd0220648bf8c79f04ef4f9b210b21190fc12247892ee46abe62da803da1b81864c9d8 3045022100cf0fd666cdf013b4fc97af9216d291d5ec6671025284620ab488d388786dff3002205c94d7a0d441fdfadf37f1f85b52253e01760eb43fd53f061e085921d22c2d18 3045022011ecdefad087f35a451cc69630189d10b136930afe9eceaebe6766599d82d36a0221008e7b0742ee4858132328dbb0434d9d9e821bb647b60421267f37f3f784fd8f75 3045022045ed6062f1a46d5b833ec9706c77b979846b78c1c53c3fb09181f4081d8c4036022100ac3aee2ca76837821a4eeb23d50300a0f5dc6f456a355e6483e9b3fbf97d11e0 3045022030e34bdfe47cec03f12db1dd78f094e318e9f86c33dcead5b818e09d3ad3eee2022100ce97ad949474b01a7515fa7a1e438cd78e4e71dc2be82e4868e6c3ce50a0e0a7 3045022100bea089deb10d2f3bc58bc44d82d0396a8bdbdc1201f12f9d2acbe0e71eddc32202207bc183070725ad583e7624e6c13f4138c8baab9a3a3a6645292b4ce81c24012c 3046022100c4d0fbf9a06ca50ad957dd5959152171e0507bd4bd629f59a298025781d6de0b0221008877b3b7916b7051e120ad063cd690358a29af0c78886280acf5f5be64bcf462 3044022016088a3580e299b54bee9d8555ae893a8b26d9b239eb6670b8d2198299cbe4b602200ea3ac4fc99d93898bae433986579f772660dce002f8b3d3f1eefa90952ae365 3046022100afcdef3c14dcca840c5b9581946be7804ffdda020fb8417551897ed153140d38022100b4e9c92ba2ca897645fd30515daabc20dd29b9e8d3d6409a8e232fe0b793df39 30460221008404a792128489cd1a879ac6995d2d8c7105628e32d922002c5aa72002298e1d022100f7b932d50df8aa648c6cbf6d3a2d68172f4a7e6870068e7d50c1381ab72c1214 3046022100f3c6a83226e0137bf432bee773772c138e23629d48b18df39783665ff9904382022100f209257bfebc10dd979b5843977dc64b2a4b060233c71e6dd85f92b8159ad49f 3045022100a539001a84c63607c2c9e2cb547dd61d25a3a997d82ac40f63c0f33be266d2a3022060054300680a0c988d7ecc7520ef5547cc8521bd871ddcc91cedc9eaacae404f 3046022100803be6309a43f8483e6de8ddf9101a438aec86f67ce3f7bfc2025087ad256fd5022100f2793378399a9c12593021817e8be018bf64f5f5b6e21aca17268a586db6e92a 3045022017eac87f5656d9bcd3309136cce15219aa352f54b662fe7d2ad19d53c9e367db022100e9fa00bff87f8b9d5da102776ad8c2b22fc631f46f3338decfc1c986aa1e3536 3044022010bcfd188f317e6b6640b007988ae26091963c32cef1e15f4274f4d38f1fdb9002204c2feda3196e8dd066dccf54c2d09ee7f5f20ffb9a6173d06df57db89b372078 3046022100a3cbd1f33cb9c011362dd4b7a5516b31955d34fbda10f267437afbb30e22a43c022100bcc8ebb4dbab152e70f83706ccf430c87c9d213b09a545b74305b2c04cce62ad 3045022030761bd356c196f9f82020b3334c11efb2314a3fd1df4c3499140fe3affa478e022100de2f6243d463fe48e5a1c1446f605945bdf3adabcc155ebe8a1a544bff68a396 304402206a0eeb5f45af390e708ef16d6ef02c4c1db0a09ee75117ec03d7d1daeae820d1022062e25eb0669d6f3f55d7fc971e7b22f76987de9cc88eb1d65cd138f230f04df0 3044022035f63d186b857fd2e3e34365a9718df4f528c330a68945c6491d27c7c622519402202126969fac1c65165e3223f2900364cc6e489136bdc608cb09713293f9615cd1 304402202d2ff8baf1d4190c809bf26beba90aa459345a722cb733ff1a0ff3d20523085c0220294ddf49bd65c0de9f2d53749508dcddca5a5805f1ae1072f9b3be168f05b05a 3045022100d1875755588438cd6a3dd22a3d12ce5dc65e5fb247f092a365ccffc0a9bc7ad702201807b33840861b5d1ec0c2585c5ce4f9e41335d4a77b6308a70f71fb2864c3fc 3045022100ceb39b3dca3a8959d259536db79b1e9643689bf1680a4c3285056d9cc9d0e1e00220186dca2984d108c77c4a028a3d80525c1b33fafda6ec7292776ad47c223d760a 304402206cabb7dec1dd7e2adf28a1ee34e9a8e71bde965b8308209ce817d0775c9154d10220295f3aa908bd76db3d568a6e39eb6cfa96cb7522c30a1f15648f94014d6b8a47 3046022100ae54d29bd064f0051dafde3960b2f83054237ded98bd591886f5f4673a9df625022100a37b96ae65eb251e58719dfa45e7a7b16193d44b26baa109115104875ac469c5 3045022039bb62579e87230acf446cf86177f3931b971f3efdcd7fdc55136c0247e86647022100fa8fe3febd9ff30a510d5a9900031d8a1d181595df48b7ac3701b7689eb3f716 3046022100e14da5fc721f7224928993f103e846f01829d7c9349ef39307f55ccaf25537df0221008226064f5dc40957904db0f50448df3a171c5a312112045d23deff22aaa9d103 3045022100bc5da85ca693737a9e7e6b5a34a877e0f7feffc7f2334a6756d3b1041cb6f3a1022044eaca9fa32be9eeb797d54aa0d9e9aff8480fc869e6d8abed476d4b4763f8e2 30440220431012806a8180b1734575cb1a2bea16d678565e21f3e44b761c0a21b81f847202206c0f66f33657de1291b07b8851035b8007f2dd04f677ad204a7dfa0a84dfa0d4 304502206ad6ff598dc5a42fb144a266f8b85e86e820b0e05284df2f94f9064dd55d6137022100aca6449abc2bb001c4c5af3978c90873ae6a89e410fa40f9f955c678add641f3 30450221009efb94060e064a5558b9b591c2bd9e8009f40dcc2d765ece4a044b8a86fa8d39022038de148d8671564a1f5c13bdfa5f189741ca3d95db089d61bc89ceb75ea73c06 304502210082b21593d4ed9d2658f1b0c3cc1bfe4c57bf677f37591c40ea652967216ecf8c022002079c604f2edf5d137ccd5e40353bafde2db5611000613b646e8cdd29063384 304402206716adf74b234bc5d16d1fbd0239157280e757edb3344d07b6396de9cd5010b802205eeb2a4d2fc2a2f28893aecd933d8faf4e52399139a3af28b79cdb34a208cef2 3046022100f703cea33233bb10aafe8e46dd517ded5d0bd9d36feedebb4cee42994a724828022100d0c30f6083f61dc925e248073a7c25d2ba619db906c0ab3ce6fe1e4ec2d46f06 304502206a177a8b10e0dbb54a548d006d138b30439d5e3a23f7a50a20128399217ff42702210098d5c6915978608a9ae3987c22be2956859fa425671988822695e932d1de155a 3045022100a9fa9a8050bc71966f21ac44cde3324d915d1e44bec71da052729edaf4365b8302205337440777217e55e9e26ca3f42b592b088869604f3436899531139ba5c55f0f 3046022100adb686848c2850c8eb9030973a24bf18911ea8a797bacd4383d76eb0220dc8e7022100f35e8a3c86c4e81776a6d9a485ccb3ceed7dc8b9a532b6a756e9ae993e7418af 3045022100fb7b6b0cb2a41bd2fbb152022b00b6c19ea8b6461e7dc4d4771cf44ae5a5aa5b0220276e2705bbc5717aa6d1d057ec8a15e85865a285a7fd169acefbc6d6ad25967e 3045022100a3f65949e7314c26c54ca33e09b079bdaa327dba68c37b05bc169d4fca6c694e022006d41c4b5d9a31861f0118893a4fc53b5f5b74db404fb3ffdd695752204badbf 304402202148eaa03ec05353ef2d3c5ede95c5a6f435c01c3c564b49333ad1fecd4da06002207202888898784312e55501936ed96ce21ef58f75fbb5a350ae2f4f779163c6c1 3045022100b2895027b694ef0367737628eec33854ef861b2faf1864247c57493ca478efd9022075553a10841180a5376ac84911787f0e081d3c6d1447be8fc0ce4f7c9266d2b5 3045022100fa395c05e308e3fb97c3e083ea3bbe4c771ea8e1f6f5f15e5eae2663d9205ec80220389a0ba40ce5ea87825444b1ecd1107a72f1163baa20e9bb30d49b5d7627f2e6 3046022100c4bed6f0b3ed8f13e35de219efcc03a62d7a5dff3fe363e4af0bb0d3bbc47cf0022100a74bb96e6dabca993af80019304f41a9521d9c064363942034854a839130b3b7 3046022100aef234553af20a8a567c07038950f5ac6f33f2d90f4282f914e7fc0723f3e63a022100c90d6cec312b58f0b6aa65df13521b581f695bb7b70bdf967bd67d713230abb9 3046022100bcecf846a1ee38265be77c152353d0c2d2ec2e2363331c0d7966042b8f6e99e5022100c8543052d3a65188d36f85d565bb61208cf5c02da32800e02bbb059b2abcf306 3045022100c7a1f428adbbdedcb7430384f359dd4eb608e280f9d67aa4ef9e379bb47c3c88022041f6112fc5d4068b56617f39804908646a9b4608cc600ddb6b2ccbbf5c84de6f 3046022100c5f6e59c41870fca012f7bb6ef40d6c16785475aa7e25015d16dac4608ba5ddb022100f18dada580b05ca7b43f8e8705054150e60c0745d1dfbea01e5685133569d3e4 304402206f1f47c0409bf10ea4fc2202f828a9dad7c0957cdb413b9d014d235ff8a60a40022010d112dcef6897412472c3a535bcb30d520a9beda2f3e2037d10bc6d386f8831 3045022100bd32c298de7f4c5ad7500fb9d60d649d28c22b080d6290c929a1ca39b2d093b5022036f8b2703c93508bf3d9ebcd3518a2a4b29770a5d38ba4c81709fc8fdebd4c69 3046022100a82ae7c155d88d6e2f55f18075e7fe15ae002168d2a9b6962cf34e83f5b8ae36022100b9aa32830bed4767189ee45d2d6bd7e845776ef37846eb46b6c3931feb7620fc 3046022100a2042aa4f671b870399e641def232ff5a4fc2b4ab483f936c0538c1c348c681b022100e57a11c249339b9a2d1860f02dd6e1e30c4d2ac7416e82bc2736cbfde16b450a 3044022072c58bcf7ca73d2e236b85aa1a75f5df1048ce93aae37f0c9a2b2479379ca0f90220363e36ab2c0e603d42f4c64c65ed1fd0c55059c08f0e56b5e992e5d3cf4f22a5 3045022033098abea01dba09cb790a7454a9d56485ba394011ef5a2d59614a6b6446f9be022100bdd9ff622e9843840b63672c2c5adf93d15b49a262e5f201e67b89aca5833c88 30450220241eabe215ea92f2319616ea03335b2627f48f11377ee59d0c71b81ed94588320221009e348e7b6c8f2a7b7f6ced5bc743c47bdc89ce6c4b025e164cc93755c896cdca 304402207a5ca78f45f7ce053d994eb015a3020fda39c541f3b105bc80db41c77372799502201804c1400dae4ed70cb86ad1673e67ce667b62120748d0d9f9ec866b827f6119 3046022100f7542405d0161feeacf8904f6d4c46a2dae2a123343fd096cb7755505578fa63022100df4914bb02b389c737fd395abc867a33458706345ec97db2efab71bed86b4dbe 3046022100866acfebe8d47239a60219d937f770974c1d940872d88d2715e0112567dbe1cf022100c11aa40bb5e044daad29da5f456d526fd082c109672ac101f074b3be2f74d224 3046022100dfc1c08cada85aae21ecada369bf2d4552d457010f130f1e9485b207fa24c601022100ff823f7a9f4c2af735246f6803e2cb992bd589cbab28e9c3221f9d09186a8c71 3045022100a97fe25cf515df84bde490f35e6018034eae6ef96c601376e1ef8339934aafa80220099d6db7646e6a2ad9014a92ff85ace94796642343d2d7d7718eee75498a7d34 3045022100988ab9b3f0642dde969ef3a62607b0f26bc69404c7c57ab92a863fd36183ad3a02205ea1e812eb00126142d94d663487c929aec1ccd66dfb193b71653d827c1ba7d0 3046022100856a00af916b14a7976dbc419f1f3d2038d3e4b5153025eceabb0f6b1f5e1289022100af8d3ee68b3a37d3f8a706f189c010954265e58b16ce6af018b8e06ddd51f9b9 3045022100c7b2b7b3db5ace3d2934453d8f4bb33ac346e693ecb870706525b1f7358bfc1f02201856336eec1d2555f72b2509e555a5047b12c706b761660edd2959cb0a56cf09 30450221008d34c3624b556483c47c68915b7cd5f7897995a69251cc40b854fbee8fa5e7b102200ef9ace3617ca750c90f2183935a63b3de869057f686befa145166203ee2d502 304502206a318c713e8d9e15113dba9685d89852a5bdf20e2063ea406debd624c9567961022100ee7a19f684ffae5f14cb0b552d9cd588799542ec3aa600d4dae2e58b7e076e69 304402201f14eb7ac1dc8fa2503184f1a35cd08b4f8795e5533bd6417bd41f6eff1e2ca802207ae0afba6d821849fc023c3e75f17e000313c2d67ea103b227d4c9b784326c8f 3045022100c9dd779ee417f2bfbff10cbc3689bf385f1571ddbc5166eafa26ae4b1d0af4bf022007d41aae68d33d6626df7a2feecdaa8cb63666254d94383144007f2e963c714c 3045022100d10f130c83c618e2f367fcabf990665e5d054a027fa380da9ebb081b1fd59e4802201d8c60d501cd83e4d0fd8b75d908d4f877acd81ee08e0b814dbb4ed89b804863 30450220759467c5f8222026d9263a358b629059e9abcf4cd6aaa0d3609971786defb96d022100af25d6a01ace3c851970903f91f606f8ac987bd2526cfa8e958ab44b052bbb51 3045022043e79ebd2c064e50ec9737e07a7a9dcc936a2fdb07249590db793b1c66e022160221009d05ea1c8550ce0d2f02aaeb1cefeba3f11327f5061980a385a8fcc76017270a 3046022100d2cab5958610a3f9444a6c2319ddc26ea0cc02a5945261cb628cc9f4f976bd9e022100fa36802a314848b4fd6c95d99413feb4afa2ff97ce2bd6fc09c8d4fc04181c03 3045022100d978a5fa07d852f1552ed219be21393e0c9267f573fcbdf985e43ad17f09aaa4022077e141664522e5ffd681ce1f0fc51267e70bffd01b5b463d3703e944d241acad 3046022100a4c27860414b37b78dabf9b69eb55e173780e03db249f6f5cc5287dc555ab81e02210080fc7bdba3128adbd58d04a9eeb4c65eeae2e55a1bb4748094498b35769b3569 30460221009a3d9207b1ddf7f218351820cda31c0efdc714de2fa2c516f516870e410ad12e0221009c6e2914a843877083f3f07a0cade75a522c36ecb393c3d6b7cb42079986688f 3044022047e1320f74d1c8e9e7ffe8397f10a8a3f936b0a7cddd51cdf675497a7f958c2b022004baa2422b5cc61fd94fbc204922e38b1c7bb88ffd87944fda0f0d1745c0fbea 3046022100a699d291751593740030e41fbd35bcf2fbe4076720cdeea89e6bdf696dd6abc0022100aa538212440cf39cf271a14ddea57c1e6d58d9d826fe38170c6222113e30ccea 304402207a79e957c94ba049a81267c3162d8315dc126d17702f1bc0a24687c6227d182302204fa78e0fd188065c44a1b813ee402d5a908d760bc83ee064b2a9ca93c91c0ee5 3046022100a89d7f3171ca56e2394567b80dcc321d25fb1af12e973ff477a953bad3744c9002210087de11b834c7fb7752376803e2e130b85cda46f4581d062ae2b807af34d659fb 3044022032c27201165d839ff5015bfd2e0302b06312bc9888a9a8e97a2a4893a06b2f9a02200bcf5d08ffb1dfb08e36b1529e49448a45bee177ee117796c6d4f53427715300 3044022049203c0fed8a6399e36cf519f4da705e695cd73ccf0015cebf25841a3aade17b02206979e262f726670acfebd6385d85641c7fdbfbb94e436e95d3fef70eee5c95e2 3046022100bfa37036547fc3e536467c22500df103f0c57b2f646e43d5eaaed65f541ec2da022100dd8392bc5ccb9973ec0e73e4566d642de5bcba9fdb5f31aa700e063f916f5d47 304502204779bdd4e41a596c805e87ff8dc93f0e370e3b73815b915d4f839de703c3618202210096d17bb41dc77903f07acc682f3a3a28ff4984fe236438228282ad286b314d2d 3046022100dca367b378950642fa60b2eb138da7bdd60e2233540e266b5e3733543e0988120221008a82b7813f6021a7de595ee728f1a184f8122b852d23886ead5f023b9ed10e31 304502207d498d22368ca4ed7bc7f02f949ee49cfbab0d61ad4a9704f35d9e41018f8f08022100bc5ddf673514fb62d245606860363980f00d94ca8685580a41d167a10e6403e8 3046022100c2bea2b9d46bfeb871f96f00e1760e99221140f8dcf8af66c5700c3d1fbc21db022100ccb35f9c792e7f06945d6d833cedb943f465c409725f5de9e4f1e3cd842074eb 304402201512563a862f25942a2ed417924227720ec33fed4ca423490092328828d16f77022022d5a0d1304c2661bf99faa8e6eae43bd86a2b011d27192334b43c5103f60b8c 3045022100bf1e52987230db752df30febc69e055a02f6fd0f78a6e2fe082f43cd716e67d9022065c8f2986a767f21fe8d58ed9e9828809915df19d5e58d24ba37014919668cac 304502206bc194f27d93ac53d60f467fe42096ea1bbc6a9e50be0b49591c3acfe05daa2a022100c548e6426f6decbf04aba8f36f103c746eeb4c48db0a36933719fb22dcc584c2 3044022065d1a56b17653f37e4c015854f1a8c76509f6c7bd3857271cbd3bfffa519c03902207c7dc68df24eb0e480c4245689018c11420a28bda4178ee630ac8ffa6758c649 3044022037d871958f9a7c34561079c6afe5da9cabfc8ee092206e8fa99f49a57680eb7a0220598e5ea06d2b3ec9164f0033562308ad54dacd08e7bc7f7fed6c67917a957eca 304502202a8e953abca375972e5a9e44732b162337b3a6b6508f71574f92e6bea95d613b022100b33727fc24224b2416980fd028fecd15c6cc8a6e4f182721cb266d9cab87d9c9 3046022100a42994e7eb8c69ffa2ad07fa130144c386e19a4caadf765b1f95874a156c0c1e022100bc4a847b8bb29a3c81a242db4b70d816fe06c8a7d38a2207d5ce88c94e9dbc4f 3045022073ee72a81fc128599f63f68ee4a75af418b23502602f60bc05143a87415725a2022100dfbff34f620e03e4799a1dc6e00d6ed3969b166de2900303701c8c1d607dc642 3046022100de3a830ef52130fa4b7118400bde74071a2ae3b1e7a36e9101f82d4e5b0266de022100e8e067de08bfff4d6bcb86a0042028d8f573f93d370ff887bb5e21bcc11d5029 3045022015eda9ba924a3d158404c1401583b4f9b586250a5a926d74eb607a78274bbf5b022100dc953048c9f42a2b77eb19f855d9c5c4d879d10f09f4fb579f1a008a40d89250 3044022043e108af81ef35cb289a7a78543b83b1b2e97a90bbc04719afa78d6b655b7c98022008346fca3b5664e172171c4e64f2b032c1a28559e0c26d36472b7d1b02e8a5ff 3045022100b9a1f23fae32291410844edbf74f1e4f7f653fee50b7cf1634aac24e0d7c62230220477de9e501cce63e86a0882dfdcfe5aca3eb1ca152e36676eefd2df8e4f95d05 3045022100fff4deb0a79c7ff0664d2ffe1be3d88ccc8eee24b5a32ac14a5172d79cadd41a022050cd316c16f6d10f579cc5dc194d68168bc25457ee520e34355fa8e13cd99e3b 3046022100d2378da9062574cb0d8725c1dd7fb80c6643882c077e57167cfa91ec926c299f022100f06358b1add30ec1681c27ecf1ba3ac3af82b96820b13663aff7fbda3b709806 3044022027ef267850bd56108bd0c3c88bfaa6ff35b10772d35b5f501f10bd314632adab022063124267935a2ea7f942385684b9fa802e7195e1b1b8ab02d73ff6b37e7da472 3045022100ef28548799a85a84864dd9436ee0876cd3d23f08561717fb8516e7efa4f6d103022044d22f08b15ee18a9f851597d8ed059d45e2cc6643f540709dac9573eb9b8ed6 30450220062db1a0ec8d22dce34a6d3d262422319a5d540e7ebd9666eeb0574af577a6c4022100f0a049b87e160d92112f18ba5f40021ecea2ef23ba6bedbc6b42ea8feb952642 304402200af629bff97d9550fe187b8696cbc8531092ad0eec022ab9c82ebc8f9e934af902201db793059ad29f0ba8ed7f83a0df2f8ec20d58f8b9b74326dedc221a84470f3d 3045022100c8f4169fbe6a20f45222a111b5c8d79c30c902bb47c5dda5c671c0d1ac7c1fea02203e0d6e015118cb15b8315048bd1f61910f824af319b48b2f07adc687fbc113d4 30440220057f14c6ad2e247b7d0ef1d0b2bc34bdf5eeea009dbbdf283c625adcfed8c21902207d4b4a693a13320f1bb4daec2096fc4c140a79ca123adbbf3765475606f24537 304502200f3bb96cba278ec1b327a8fbea48caa995eb622b0bba8f521b177f18c96afa9002210092407e9fcaa42884e8815e48680e43c375edb1fa3e6cf4c0c27aa3e6bc98d90d 3045022042acf20cc49c6e962966289e8dd00b1bed9a591355b080b2c18d2b71c4902ced022100ca55a066f01b1c844e9b16d6ca9efc585625061975100e2bb1a9e046d80a8b55 30440220290d7f880a3964b6255ebf23a3707ae8b32fda5bbdf9f9a2d86fe15a3b44f62502202038e6154b39ae6955c08c585293690c53486b417eac38435dcb7548a451d3df 3045022100b2bf980ecd408c4f095183221d970a6692b72d6f76d0951ed7ed49600f48821b022017ff20cce09dbd61f8e914abc81708d56d36e2c16f16cb618aa10327b5758f44 304602210088f225a00e00ec92a4194c14c721ffb1e7f746002e049c32bc2d49e4226622220221009975f56e5fdb4f1b20b453b47031a6da0b2e5677f411c258122927dbb7295fed 3044022027cbd6877ede296d95fc1cdf305e01fec8f001fbb2bcf764dcf15937be3b82090220454afe2e0192193cf7f61b89504e0b7fa0aafab2713a32347e43aa65551a23b1 3045022067c7a6b5660028aaad8bcdbd5f8a68edd5c5fbeafdceaf0a0a3c86c8cf58429c022100f87a76ee07c4f9cfb20226f6b96657e4cbc5aac2ee4d4d832cab7a2b93ee7f03 3046022100f4eea43ad4ede70156a7cedbb517a22119606b4c23322c3962121d3122df3098022100950199abbdbced19ab3aa4c654a60751461cf8489af2bb23149892c4d1c22e80 3046022100be3564f7f481423707e6899ea5b70703125d8cf0f383ac2a314a9a128b180328022100fbf2f6d5e3e291a7574ac015a73119701c4b4f950f1eab17b8562fb5e2b1a791 3046022100f76a50d5a233114fe5f48ea39609aebb52bc3442285417653f67b1e431a430ee022100f609b1e57abc344f7490a45205de4ed87d78cc3e7d1681ed3ccd434e6739b270 3044022073f504e2eee3d413e580f52d8280c7de02ae410146c46557f9ca10b6c35301170220730668235ae9a8612a9c098557d0e378a8ef5e9bb4c4d5c71959107cb6a2aeef 304502210080971e138f7e74655e65ab98a36904f59d9176281d1897bc8ef28f6dde7e288d022023fce43c341c4621f1f5e2d5f164eb2f754070d76063ceaf0701590188833820 304402206bc10e58a352c61eb860fb9262ba004c27fad87bf59e27c97074bbdf4e9f7c8d022027e8b0094b809915d242aa005288ba0bdb9528f1807df29b83451e1cdb79dd44 304602210095c634fff3f16172a55da62b66eccfde121f88d64d4a9363cc7085773d5533a6022100c89938de5f2b5673110ff99cc5bf5058768ad6c0e81ad753a4cbd259513a1e3f 304402203cc0cc4950d327e948af5887c7a0310fdb5bc0f2a8f0ce43a59aafae4daac69e02200fd554995baca7b8dd61d55bec6517ea820951913346587d5d46ba8eb6f9b231 304402201148d3d0c5219929beb0edc9952d2a244a778748404131bae8bbab223d4b3dc902205261a830b894a13c2c51720b4806c9eb0e088a8d6c37386a5ed16c7869a9491b 3046022100c2f38c782bfa1b7ed695cc1b16c1588104422fc1a2e44cf6f91742af1f180180022100b011b0e119523e4e7a87a341959d92efcdd4b7b46a264623e8a79cb4366bbbca 3045022100c6a62da412281f2b9286692667789b5e4a674423962f49260d66a3ff2e6ea3f1022064f86f3031db55517c9349b5cf4d30a73c98aafa438bace727d588749dbd9b86 30450221009e8164b67957387ba915aebb8a0aca86a63b2a2751793c18be2672777206e0b30220797ffc01e0f856e600326daf512afed760b6876969dd3d663bcef59fadc5dc2f 3046022100df049c23b35a67738349a0936c6625cb3afa8b37b581a2447b92bb2cb875589c02210099606083dfd0e3bd340505329360766924c2ded2827333317e62925c66890e64 304402200181f6c876762489725e53242036fe030e894f97517eb4517c06ec38ea1138fa022055d73f33963056170b25cb8cbfd0e06bd559c09d0160e7432a5b47c138dfd08b 304502206c122c780d6be7fddd3cbc20758ef3d95844c19d45aa534ebca59e0ebbcaf099022100c086012b608b2eba1a4b582634acd2451be4c8160a575421184e966cb2a1c5db 304502203aa9f2b2a7e49a6d5979c50395ad885740c27014f1a94b91a8ea2974c3dbb0c90221009d40a0c94f29b5651ab856fc1146a2b1ce9e93b92784e83e22dd5da427dc953a 3044022005c3ba3d1c42ce454e5c251e80d628c45d3b20b812b842b84e7f6ccbda630181022008b0f0aa4f4db874b422ec70f089130918b2ca9824af27f788bd7e93573861d1 30460221009358546ac8126812a2ae149ee7215c04c8c711b2810c600289b27657657952ab022100a849ed22bc4d4c9cb47ba131a9db1c1d223ac229d0ad3edc19d9c8a7320a37b9 3045022100dc2665133e6497c1c67d8659983bd5fd06919bef8649c2688b08f4b74de8c52302203f36dd44a196cae55e587626a02f780acdf6b3ee44cd6323207336adfcc003ee 3044022078d1f2000dda787416e1c2974b2e3ac88df9f69653127860c789d84a4f4627b70220727bf25d107789e323adff5704a3242030a28c1fc5a3775e24e65c998a0f4a06 3044022046e407dd1824587847339429062d6c127c1875bc92edb96e7daaf630b7dee7080220082bb7f5999da8b768f9f4fd83ecec20c8e75b0491d8febc6ad2453cc43047b9 3046022100aff36fcc402797636474bd8275010535f2d2c4af29147af1abc474b7a0ce81c6022100bd70504a0613788ed8e9b332058465af3e1f85723db360b8c14ed542f934bda8 3046022100924fa982accfabeed4c44fd268f081fe2d7a39e9f9119a635c5c781e8c6c3dcc022100ccbce5fac577876a958bb00983995022a2e843eaf7f882b6449d7eaf6fd11a7c 304402203bf49d94476633bb04bc83e19d3702365337dd1a32e76a3b84f5e6b9e334c168022031eee712e5822039d4fc13d2ddc1aebe0717880503cc8f516e5d869dc1b2a8a2 3045022049be936eec74409fd876cfa5dd532c4d90eb53b36a2aa6561fdbe06d2631e79e022100ba0b5842208108dbe222bc27aaf657e376cc6cab58303e9235e31a5ed3343a47 3046022100eab40558fef2bbb78bfdac19af2130df3dbb8d97c51d1cef476f0cd25443ffda022100cf599462e3ece9ffe6551c8eb2f72801c03069696fd6a807e26ed274608072fb 304402204ec4ccab19a1bbff536526bc3de96b466438f975e2068d6b03c82446ae0d65900220474acbf6cde420e27d050c4459e750b3919371335f3c1048b3c58669d6d44dcb 30460221009dc62f7bc66326343c0887363201b8faed6dedfa807a464c6bc6e896a85d19e1022100b56d69e290e7be728a60b93ea988c4513ae68e4141e6feb0e8e92089f2248074 30440220497933d7def258eee2e1090384115490090f424fc828e4a948f011953fbfe304022041ff1a8030d25302098ebf4e2e6490203f3bb28e1b1ad4e2a99a1cc274327ae7 3046022100f42bcd1c0d304fae7462deb084ed8cc734ba06a80f528e5f937e18a6ba1b6d11022100c045add79a12e85a8a7627cd573f55bcbaca0a684786dc5ce47148ea10ea224c 3045022100ade9aa7ac6f26a7d975426743bc30a0eedea251c563a12f834c623b177885cdc0220034792475ee81d73269fff899513cf294f8ce70a867116c41d9210616a491ea7 30440220689077f866d368f96a219e0d7de35927ecf5723765a4fc66788884dbae49f0bd02201b78362f69b6001bbe6c5b28b789b8a1279cc66619be1803bdb41adba8ecd413 3045022100e12c41cb64f55f8e017d275f33252fd9f34d7982bd3d0c119763f574979ec1e802203128fa57a64ccf690b3c40991ada38bfef1504310a14ac93596ae490e949d080 3046022100dbe617cce46f216c84af14d9f2b994d48f693caa6ff10e2b0405626682414f4a022100d38271e332b130d3f98ed8b5a99ae05f87f56077941c7314bb1fb8db00945ccb 3045022100a491c5c3ba494b797dc4168b32a7e9e38f23575a1226605137140d35ea7fd262022014508d94ee2ce85dd37979b52b7795417f86ade8176a6a27c84bc2281eb24e7e 3046022100fb3b660871154a46fc115ffb0f27acfd94e9275022fac19a839b0d8dd3db6ff0022100c59f1634f384532a515beabec31f975ba7a6a5042636fd2c9b85069499930af9 304402206360c6121dde88a1ca5ed6a0226fa51f5ac60b21e14bde498f3b6d91a7e824700220774335ec940c9e45c730a03b94172ace5370bb964cea75feb9c8ed80863ab4fc 3045022100b2ee373637e23d809cd9f79d00488eb809c62b47509171626f83fb78cb2ea2ac022054b18d4f0cf14282824d67a28e61378675a32f09e640c582b628736fa12216e0 3045022067de3b0bbed44ab0f58df77296a7b9b38c95949b5252914594fc15bdfbb88bc5022100b42a37f4a206c7844f0475d42f526cce8e3c67945b98083426aaecb52b70be33 3046022100a941ff96c25f6114a188d0df14342773f88a6ccce10601705d7b32dd4c89e886022100b7e46d4c09643ccf7ff8530ade7bdc6e5a154e0ffc96f31ce2dd4610208754bf 3045022100aa6303be87c28554f21c26602fb1f1e2b7e02eecd6123ad6d7558c27af43f4fd02206e11c831615bba2dfd601174b76159998df81add099a22922c5f364ae27b8c76 304602210095a066678ad895d36d2159e7748dd342184585eb916c91a541a2cfab25bd6e84022100a921ad65aaead73b6820a1dc50d35d60b8326ec57d02c2c8c7c3d853353b171e 304502210085c2847a49dfd602ca6dc86809c621462d136f6b7f00dfd653ec9793e4911739022046347f932eeea6b96969cdcade280c77173245b93d10e402be88710447ae8f5c 304402206f4bf523f9bfb74db782df5e3d8a78ae7e96cb6d13b216689f1775c6446f4137022028bdf6e299477b1813c097fdae00fb5a12d240b44ffd4abef796d19c45a0429f 3044022001f881d558e1d452b3e3350c1b56f62216b0d849b0398bda4bc3be44fac77a7302204de39b22880211631ede3237a48728de2e9949575df0b6a25ff70096490f0d2f 3046022100ee7c3f46ae680bf46b51edfe5cd259a0b4ac44fbfd30b098384a0fa6878c441e022100c0ed3bd4e26d51493e59230d7264d8cfc5fee6ac296ac597fc04ce3d91a24365 3045022100e6214ca54b938a3f171fd89f977cf9696312fc3add0b703e50bf508aa0d53e8b02207429400d58fc2b4a74694cb402ea3d30cc9e1babb2d3cf1d40979a413a03ceeb 3045022100ab90454d09e7d32200c16d5a3802dd4838e00976a7610276d21f77a43a191a0e022047c3e9c64a4d84f8ed23741e03984b4cf66f9d5692e8355c913cf725b2dda510 3045022100c756e85f468545ea0184f68d7b2656d48c82e01fa4da84a4602bddc2693038fb02204a8daf8a22ec07cce593077213e5592fa9fe561de470f3dc944ddf624a367947 304402200c19f5f58a211a2520ce89ea9fcc7a72b7d0ceae9e0665f35cd8521983967d6102206e09f6bf7a18947afa7e3737776d24b2116a578c64235882d856dfcf9254f204 304402206d8c1a9339c6a93837fc5901d4b7875f72d9cd6c091c717ea2e7ff1074a05e6f022009447ce3efe67b2c2ef782e909b1d29642028880a27bb002b66c778ca681f845 3045022055ce311fe3c9db23924e8d3259b13abf4656feafed9f02b9a377700905e8c66a0221008010d2f314b5f127d00307c7f128c6acab3dc81209350fb3af37afa19702a602 3045022100cbb8d7f28eaf2bd5ed075751f7cd466e6e01043b9011aecb10f1bf98cce4d2a20220456810b8c794696c76bcd2ca7c770b8120849292d140b0d149c6d26c2ae954a8 3046022100831358f6a6be521979fdec3b25963efce7c951d559a291bef11be0e992cf6747022100bfed59e44ccf604609e5a22877d5a318d4ff807ff0b23bbb0465cc058e6f007b 3045022100e306f1b92de109f5e4927f7fbbd3c41050ff9ce41694f1bca6319a1853aa0bf10220041f4160815f7f938c83ad0132adcb13a94324cf4eafb6529e02268ca6957883 30440220565bc6aa28a6e13cb7a6edfaf4a690304aa776957c232467758c8f8b3cd806a0022050d2e2708ca30b0dcf00a9b2e0f9c16a47eabf825763e85b010164f926bc954c 30460221008d3ac6c783e7f65f3d4fadad3e584bdbdb3d8a02b686f1f7d3c834cd3ee97511022100f60939f405a31efeed6f07a398d7d17260a3a408279b5456ce0d1e3f810914b0 3046022100fd79ddb42e1d828a16659ea95423f2a7d278b76b4941cd3aa8ba188b657157e3022100c61362e2b2518ee474e70750f33d1b7d16f495de1b6ce84df9cb167166121898 30440220519b2cdc3abc23d9df4fe6d7c6a97ffe25f6afaf7e23ede4fddf35dc539f10a00220151f4b783edcb9b9ebe70eacc16afd9798452a5f030f4bde5b58ae1c372e3375 3046022100d906fad81c5274f7d253c8703a016969c0178ad72debe22ad9cbcd06869230f802210096a97cefd554ecd6db1f5c027469b38a871d249d948b900b5f4f606724f338a5 304402200eb15fc611031171fbd7fc19ec8b28d17c774a378cc5916169d657dcb7d9abdc02204a5da382d9085ce11dcb4431991b48161568bc2f5878b748e4df1c58af9ad7bd 3045022100bd8fffc26805daeb17026a9d3babfb0bd1ebd6ce06e5c17271845959ee90ac9a02207bca4022dd2cd4ca483f9c782391c942d406a43244d132756752da3d88d988f1 3046022100f72edf44d1c72fe934b91c96b626d00ef08421764e22c3be2eaab6bc20b2ebd7022100e202704b8c9880f167fe5ca2f1bf26f70ea58bccacfb8642b7413e79136c2b7a 3046022100de7ccb9aae1732cbaea4b2a303dbbeb2ca61a0d2c86973119258ba7c5ea172fd022100f16990ca81fecd9afaad2a55db53b34daa834998be6b85774f3fe2e8721737ce 3046022100b7ca6ec58429f351dd4cd1057a873610a77af47d109eb49f1ba0d99f3985389c022100f46ee7582f05cccd6774b116458df121a095124d65978292c8b2a8160ea1a8af 3046022100a6d00d7758a4044cadea449608146dd1a0e9f5ed7446c269885afb5741891885022100de839ce798353c36af71160146d512562d4f761ecdc2423099776300c28169a6 3045022100a3284c67603616bf387738e91d5f492ef22ac63814d02a87b63d0aea7ba79c6d02206d350ea5a8ba9a1519307dfa58c618b5bc04650f16d7eeb0eda250fb9387e3e3 304602210094a6f7f5f6bfbc2f12a440129a71c7d8a0e84e80dcd9498d6b5404da41fa6e6b022100896e0443aa938d590a8f752d58570e0300c43afbda66d5b0609d26c5c8cd8c3b 3046022100bedc0129ff96cefeade309ffdac93c86da07016e6e7fcd78050f223133c1680d022100c850c2fd0ed96d249ad486d3851af22746c6b8384de3a4bdfe50fdca39ce0edc 3045022100cc5fff488d02c74d344e276cbb5e985f8ce9826a6a6c951647bd74a58debcba102207922e53740fbca0949efcd34c8ab05d2ae00aec874db732e061d296a2cf832c0 304502207d978b17307401962204df11f1579f1ad2475ccd788fee62d2b8c3ca788b61a3022100a0d764bd9207c8c86d0998408b2e6445d9289f52c9980af491312c8afbc8d5a5 30440220225fbc07568509b4e05905ad312e677e8c19b37b3fdd10df5394cf08b35fe1c70220205bdb93c04f8a4d4ac9c8438233fcbd1d29b6d8d9a2bcec6d6e7483c931d3ae 3045022100b5930eec3340324207bcd02e8cfa719be2d57b7a3688557810d0722f7a7c9de002204009509832c07c9c55f78f667ec1c7283a3640c8506d845ad62503a62698fdc1 3044022011d8c2895d821c2888fab8c7f2d1f633648cfb8a3eb002fecebeba79f18655b102200537d3d42fe02fa00a37f65743f30ba708f0e0e41f930d087f2c9c5a0d005088 304602210089143df5b6daa8848e3b20177f0c22480fff136150910fb032192746955950ec022100faaec1916bd439e5c7c477ed985fd7504a08426d5a5553af4b280590a4c5d7c8 304402204b7c961c4c5873de46bb8053debb376c9df55f5a7a8c0c7f75bfd3dc7ae4835f02200962b9205690a6fa248871127a79b1571298f6e6ff77715e734e21a1a156c340 304402201695c03092ee17d9c2608c07aa2b25a5817a764b356ad45f1fa93a718612ed3602202037af4e56ea07f7cbbcdaa0c399db96694832d917892cba8cf7b4b14ec0a9b5 3043021f603d9f00d7c62810fd1aa03888e87900cea906ef5a5629cea876702340c04f02203b23e59225bf5dc81b372ef235b675f1188223c54c3b66ca042884470b771341 3045022100a3b0e55aaa8e1a8b456186ca5c6739d3f2c747243f82f281bed0354d32dd528602202300eac30d607ffdd4b2cee471b680267b755500b32e584a63e94635caed87e9 304402203ba240010983c42fe60d8e6f025469f86d586eaddfc89f5520b3993b8b2d4fca02202ea98622ad85fa5e08f3398928943b6741f6a54819eafe3e3e5a0ebfb745649c 30450220070e68c8e63304ffd17cfc61908bb36b8c0fa274a0cddb5020a5996be840965502210087419bdbcd9cb43939610039a8a610f228a3a105b558349568a60e16c37a6b80 30460221009ab8aa67fef85679b774d8092f4676aef1b81a7bdc57bfeb596fce4ad38c0cb5022100c2419784dbe2ee145ae8b65ec54822c98b3b61d3a771a231edb186f0900d67a5 3045022100cb266822f0ceba9e916393d7eabe5ee4c3f7ca7d05aeda0a9f51589323a3f5d2022056dcd4c6b09c76de2878753e7099f05f5d68f9039a2b56b41e1644df9603e0d7 3046022100fc6777aa1a92561b8ac8cdefbf3782ee1cd647713cde50275e2ba8abed503c9a022100e20003b8e16336452e0325ce011576657964d91ea61b5f425e19d89d8c7b5cb7 3044022062c388f1df92e42abe6c493dba06479319b7ba802be05fa39556d44293f99a5502200c6f0538e4b51b2497eab3709cfd58ad689ccfd87ab6fab7ce0c11fb28a5b3b3 3045022100a0e7ff0bd18bbca67a2bfa2d7b0543ed32ba2b686de11d5b075451705ff3a4a102201a176a73d526bb783fd97ddb88b740eb32599d98ea63ca0bedb6ca25f4fe3732 3045022030a9485d39df313a7a4866056aff60c90c94d205a127e1e6d51f94ab75876e1d022100a457059724e07373e2f0fdfc43fefbe8244e2ef12dee13ceac8b014c8d46b220 3045022036b429b65c7e274f5b0031762452bccc7bb96ace2aa9ad7a54d8d12e4e5bef0b022100a5e6e5c3b2917215e78241fbebee6720b8f59cd68bd270a5c94c67aec43ed728 3046022100a68aa265e2820e3d68527a86305fe53b62a268c0c1808814f23b9b62588b2a1d022100d7533a24bdc6096f49922a18236ec8a7b7b8ba76e263d8deaed88d19d5f99380 30460221009b3b1efd8bf6466e1c2a46df91d37eef1f4c06e18a5e287182548056c9882f31022100a6677bf3b6cdf8921cd0c5d70ba1687ff16d03265ca9b28883d546c4f27e7abb 30450221009a79b66ba0673255d1f1b3ce98a8ca028d1b860a9573d26a02a68f1f590eb10902203d3822c1cf5fd0825f90f05c07bf03c3356c5e9d44f483551cbf7e21b6bd63af 3045022073ec78f9af2a8f3b03f7fd134dbaef00d611e00926d09f762ff7a8e928887e47022100b6199f8f44a14868ce0232bfeafd3de6709d9a0d2229236e214c9f077f360c66 3046022100b1ea6750152e8c8742e23ab36466c6861b7e4a08ab7b94679fe0704b9eb630f20221008a104e3c4ce2aa21f65bd8f56f474fa891eecdf1bf2f2d069eb987dff528be9d 30460221009892c131e4a0e0674d67de2b6400fd43fa809066fba2dcbff260c5fe90114a77022100fa62c5442b589b98fae2f97200d09c5f021493d546308a1a4288b0662388eaf0 3045022057c65aa5ee1a06684d4c9c0bce64bd0a6064fd2a10062c9c4af0253eb7cfe032022100829e6ecdeee9195bdfebae58c7431e8f9016ee10897321a08db0dd1ad5e1dedc 3046022100b616a544d6e7a8d34829e2a6abbe91d1d817e73ccafcc460c1224f657af92490022100b1a864c91ac3a87591a627210877672477b46e48deb0d2981a878ade883f51d6 3045022100c02b50e536bd4a346ef9ed387663ee61f83176c709f2c15506f44e83a6d7b0b902200ddd7cb5bf9569b010e4db262d8ca7d0ec4bd068a96f4ba4ce14d1e1788c879d 3045022100e6e59a3b1bdfd7eea0b5e752bfdf97715c8f695e4e1bd893db74dc3a868dd13402201ac281f107efbdd445f931a33fd0b2e44d00455b0030c88b3d8d925cf090fa6f 3045022030abe91e02a84d69b367e7d33d7a37de963a0139b4c2e101ea094cb0e6206014022100f9c51478ac5239d337f238299723667ec7dcdc64143886f826d07b7dc8ea97d8 30440220789aac56f5afd86dc560aa80bf408d69a2bf5580a0fc5372a012e94893313bee02206b384366448d4089d5d157737481621303c7e3c30e73eb6487052f04567a83ba 304502210094723bb1da54aaa5e9ae7bf139734f382e81808bb668db2a4bc20d1d2dc3a62402202ab177f80970be099190d7ce7b77cd9c1b2a84eb3187217c1c33c5514005e414 3045022100eb684d3d7552fcb45665b160665e7f0e6d57b68804642654ae2ec1a7a283eb19022003c0693d9a3d7229f73bf8b3389704711e74c7dff1a9145e1f08969947f45858 3045022005dcb2f039bf91cd087620e21bcb3cb6fef267493b80530a101a64be3eb8419e02210092c1de6aa68e09bd5c889b0200da0dc181cddea11d9cf31cbaca89e38865f01d 3046022100a56a3599f50e6264239b5ba8a4c0431959d75f7cd92ffb20dc552705d368cd79022100b0fe3b8a1401dabe6e073f51a69a5810ab2313fdb53deda2d2bd21e930f2477f 304502203a1b3a3382a383978ff0ee1718df75e27252d0a8cb5c1de8c060f509ddc9528d022100c93ac929fd8a10e70f05aab870d52f9cf55478ccf52c71de07555741e395c0e3 304502202f90cda5c9dbb4e27f36c95fddae4d7e2a24b7e40a8ce2a4338cd4653ac9f150022100c80bc29744314fc22f9112b8fdffcfc70c4d7774a6f486201e2035401cb4f06b 3045022100a8af183a7c9e9f3fa3090568375ec8f2ecddbb09c6f7b7e3c688c528c2437c07022066d58d4f7a1a8bc7baf6b280a9df5535fa59925344d7814ece683a61b57ba719 3045022100e41697cd9ecf80d20404303eb409715406c4edfdea9956367706c264dc523b1f02206109b095ea6732bff7602c6c161a92f4770bbf7c93b13c4cff4fd78e6157d3d6 3045022100f2214a30d13dd01174b3c4eb1516b9bb390b2a1b75a51fc939cbf0d1713eb3aa02202de96f590fd297819d95c8e45617aab32ca2ff0230b4c8bca27b1818b000cf4f 3045022100fad5bf8dfe75011df8b9adf2111a608fcd2aa6fc63183a56d8094fae9c5a8bad022014f473367924f162e5256f1c0a867924e0d5d6dac588798e265cbc2a4c7aedde 3046022100e045f69d6326322dc5a3f6618332132f5e72baeaf2db4b59a180a15366d3a1eb022100e1ef8a8663070f088da9932fee7f8a4b6d7d1c3a854bc06edf5a67eac392ad01 304502201b92476b11566f1aba2fd31a0256e6a3d2e1fed1bf769342ebca05ef5f99034b022100f885d427b36e1a978a464c9c083acdfaf73124e0cade8d6c2274e8c73f736bec 304502202b10435697d2e0ad88f551bfb146d0510a476c135398389ba0ede211bee531d0022100ae49bb327afba9844fca00fc7593871951cadf69f5f1362a4e7d7aa446b9ba3c 304502202a14eec08462d790637b91f1e538b6ccb5e4232454d3e36014d3e2cfbc583168022100fb90e1ebf18c6c4bae4747e96ef944b033a14cbd9fcffd0ada26bf9a32298ed9 3046022100c16a5777d4740b3786d9d18e43f25d2d855b14e5e2d09f0fab19afb10b80eb82022100ac02081eeea2f6bf46eecdd080efdcbf19c2d1005957ea84499bf8184a1f4020 304502201ef527eef9e94c57311286e26438433e1e244ee6a68b457b55e2461ecca5dce5022100a4c0de6b2556aa5fa17f677d188e78ec57de559d69461fead1de50a26632c894 30460221009b043df87d581e4504de31f81f82bf1e0553ac149d7e911ffccb7d3986e6d654022100a4c447e467d779ea572527b095325769d621f7697b3c5187a2762eb17116d17b 3045022100f4aaffb059898b958d1e72e54606e3b1a87f05d6b7bab338441e84d8f9f8f12902207b8a213948d492847950f8d7e32879dfeeb96e0c95ff021a7d52e2c207c3ee70 3044022033948b42cc65e0c6ab4b2a0e8f1399bbd74dad29c3486790039fab5b3dc38bda02204548c2fc3a08dbce0f78b78754bc022431388d1f6abe707bdc226bf90a3bc1c3 304402207dc010dd4aa721200aa2b56f4f2c8ba14bc799e08121221c2e81afc9840692f202203d7a6edac44cb0d679265b834fb91f0ab2570130ecc0eba737b7e3a5aa2df41b 3045022100a6e6cf451b6c4d219684fcb2034cb04b0372d6cf9cf5f575f6c0ef01ba5f246a0220694de6b8926d5476bdb6ec809545f96247c48d38f4c344000915220ec7133a42 3045022056e9a742b7648ce5e914e6c00592e047f94975553b53b8733309000fa855a8c9022100f045de4f981547c929bebe840cabc464a5ea2fb5c9d7a91be74bea093083b9e1 304402207403fc9bf47de31dfe7bb9321b9d5c4bb5cdf3e8e40f9b56963c131baccbac0f0220545964f2ade74e6fe0fbe5d225e6f3ebf3a1197fe51822a87772784e5f089f9a 3045022100a3dfbc1f4bec48b931d1cc58ce2ae8344e7f71d400c7d55302de89f1a3f4b74702201d82e8eea83ad569cc99fc12efa0532b29bfd83657df87ef246f44929350624c 3046022100bee9f04da380c3a12dfbd72cbe65d83f3068a412142d9b99100410f4af4ea0df022100bb4c23d93708ba4e7e29b6041ab5ee8b76b5fa7e37de2283a9c9fbda7e0c0828 304502201e403eff2a1e2bdc4e709596c1e6f36dc6ee05d7692d9b5da300b931f024474b0221008b2d29b5960c53a01490920abf44e77d7fda2d6b0e3e61d5c5408d53f7517fb4 3046022100cd78a1e5dd3d0bac7211d319a6ac1b359803042f71dac9452d462181b68ce642022100e05bc180a73edf1f0c8d9d013b2fc3da461dd38f51d992a3a5da7feb9b872896 304502200f703b24ffbc11ae462f80549ed2cc3d5fc1e886587ea3dbba4921aa71a013dc0221008f2a42a01e81b8f395139301df56704ef1733263e80901a3c57ceef6edc09464 3046022100e65de1f98757c8c65e456203e68aae1ef3c55c683527acb22c762c779008ae3f022100c5db657e5a3812c9313a3a045e0dbc1996f579b1159147666b5b5e0f57505143 30450220361dcda710777957456872037aa1875272f7a819d410253cdf84e355dfb1ff6d022100ef32e4c2da3600c9747b75d88cf9b44d4ef2043abd0f5cb53478abf6cc17e2d4 3045022100ea510aef8399991e7105f7550901347a944cd299083941aa1ebe64ac54d99d7802202b39c7289c895e0bda7a57f838899ec4c5235252b063e33e6c12b88349e820a9 304402206fc1b38f0b8110cee9eb9eedefeb689bcbeb0893932125ccd208b695f288600002204b7337f05ef767e8fc09b778b95f0018d7f70ea280e74f4eb1369574b1cc5248 3044022071134b22797ce8feb997298d93c168e681f8ebbe124ee5cad9a897d0b5a35b97022035550e419bf614672a13e6856625643d0acff2cf7effdb1f64be4f3f05ef8e00 304502203915a8c2cf8b75c9b88dbef5624acb9909f9d27f7085f1ea94eb498ac1b4334a022100e3f6d7890d5bd64659bec02512d8fadd6d7bd213cce25a58467df0c3cbcef27b 30450220014805b9a851927ce9c25e16669673564ce37669fe514231b39f3e2b69cd7f95022100fa083822c11b8a8173ae8b845cb8534983a09d7e970a2ec4c002c226b5650545 3046022100a9dafe5cc43ada4975282b26c4c4f0f090c9c358c6c793c56bfbd88f190eb8660221009f18ca06c7cde0e0d675a8b9110836c912d5546338f6fb370e3596f2bdd98db0 304402206db810e3b62409ebcc21ca23e863a6ad51ead61a9f7e5f753854081668f87a6a0220029730e63cb9e747d543b3f8fd88afa6749f00bc2be048abc36c70f5c7daf579 3046022100edd90541a7fdbd8a7b747581c0dc858d5b6f91efe676c2b044b4d8d09d976a2f022100f8cb91be1840dc8ee5b1cd627a11c8827626b0c1fb3923bbaa1e656f54a7a1af 304402201e9c4b49705145fc26fb9be2af31e49729da2fcb298a9d40cf1c99e5369151df02202fd6a9e90a7e0611a06ba00e1c7c970cc271c7193da4e0aa9a879c84990166b4 304602210091f771dd81543ec89cc960df06bebb131ccd7ff337f447cb82cc1109d2136b8f022100b994809de71f9b8780121237fdd3e8ce8120db2b8f5d02f75c0eb2af552dd000 304502203acf1e503eb9e094cac19e1828f0be7d8774e7bb3e9c37fb7d10dc043c839890022100feccd5f0e759d2e4d2dba4812929ccb8954caf4ddd660ed6674c983532e7c773 304502200cc071536a696cdfa66e5bb4f7cfcd0cb71ff9351110e86185e11b39b6efde5b022100feac746e8ac2248281988028a8cf506cf55a8c60b40afd2e6b5acc5116e4c56e 3046022100c24f870deee0517d96601d703ae3c1aad1f3c354e6d772e33bf9ec835fdc1bd602210083dfd78d9b248c5844198b16642193d47b6565ae0d078149c04c4b27aa93fbce 3044022031a89fba6943d4362b771ab0109ae4408d9980ca4ed1349b839899d447eafd220220316110d19bb98eeb324e6b3860990cabb32e1e5e62f6c6fe312e142337bb794f 304502206903b2d7aa7bf846a2bdb6dd8b9579f8a96ff63fcf9438bbb415bcaf02f5b7af02210089173aa3dd12c949aceeceb04d39772dedeada296c34aa4c9e4bdac136062275 304602210088c0c9291f53f938fbc11141955bfd96f11afdf81ad2f6b72b8533ec8238a046022100ef175f93ca50a9dfec68907011dda7687e0b4620680a57649468dc7945a2fd6c 3046022100b31be778ad0790e2164a919b1bfb6ac3663e44d6c53453d1fea50f0a33521ed0022100b55d527e4e309981b097dac1ed705cd00da7b9c8b4a4a1974c82928c739fed0d 3045022100edcdfd8b6b018ac75cb3271608c88e35c22b998f93d7e2f5094e0fad4afa21860220474e1c64f8f9866d085aa9def035b1c13db1290fb013caacaf22fd3574ffe0a9 30450220154df63c2101fde5a68b665949d2c6134d7e1237c2d88cc17cb441df00afb7b3022100ff35c809460e5cb28e9568d431b202574a6c5012c6d30ddb9b41e7d45bca1e01 3046022100b952e819bcd2685376208dc90bc8a84be004a97db22eda484a000e4e620dcd48022100ea486987bf7cd681efdfb16784f90ab6a1cb42eb5ce277480db8fcd69d7a50b8 3046022100811dec5cc053647331b16d2fd75e272948027e56f58dc4b1165d903e73d842f1022100bc136fe09ccbf58d19a0a52e6397e8967617fa9459b981390b0be653a8edf5ba 3046022100aebd7269d3bc168115b22cb37ed83aef955ee05bd76ce3f665bb4cc7fecb28d3022100d3b0c14d36400fcee80f306c51cc5724014cc2ab7633138fd241cfd9e68a14ff 3046022100b72214b23a1d6f6054a23a37626cfd8ddf7a1a5fb74296026d83a07ed9b83050022100d3e7b95dba93d6e24565c9792d417bae68b453f3b06460d3bf41c2f5bca4f2d2 30450221008ea9f33912d26c36f10c1ff61865b4c6878f950731d1391431afa48efe81bce402202abfc093f1fa519601d85b66948af0050c8ce14dfbce65c313296b76d26480dc 3046022100850f37ef261252d2d97edf34955296a88186e8397d7892c7a7557e3ff53bf3a902210082462568edd7bcf0842525b863ff830cbb7a151d700b8f17584e4eb0d3242354 304502206d6e28347cd1170473076ca55643df3fcc5df816423a2c5e9d7350a5f24bd89e022100d8ada81e992a9eaddd01cc7aa527289951e7e2d53adccdae56bd16e9848f790f 3045022100dc5e985951542ea824c4239b8774c1bd519705c36f82a40037009541e360e5100220373ac484d9d46e84e9d97835d852e53a2871c2e3546b0c4eb150ff3c184939c2 3045022061083397a277588bab6d6a43f0e0d30be489e98ce4f1c5dd10a99d4845099d3d022100e3baf7d8a1c6b8e0eddfff13afb10da5540e886b0f4120d06b98216b250722cd 304502203a2a4ffefc3546254552a82b88888fdc4b217027294e09424a2f73ac42adf5c4022100ded8308c385185248096cb7ebe1cc9698642e8a3922321d8af0403403761afdc 3044022022210efac19ec01d34d6062762a9a6d082f301b390966ec4b2038823ccd0df99022049adb28dafdaa48badedd1b6849067e16d1214fd09d7c897fa05354ffa9d09e4 30460221008e847d3a222866d3983568c048cc104a55a6e6a451c74aac638654a428265050022100f071730d9cc3fb2e8a73ee5e506ba307eaf110b75e01b3136fb537fbe8fc7836 30440220262ecd348cea479a2c020e8e6bd857ad22cad43c70f66597e39d833adf8a7454022052f22731a34cd6e193c2e45664799dd6dfff9eb9f1a1ffd10521b08b732bc652 3045022100ca3a430c32312aade22e9b14130471e93002199ba99789affaa8d0d8886ea9be0220677fc0f13427d4fe6eb2a6373d010fdc9df956fb2f22e8545c134acbb1071345 304402201ae9d820f39bfb9167fdd4eb34eb0dead32da393be8f5d2ba27d421b0831e0d002200b01291c8aa59e72b7ef51f2ffa19624320e072f3ac5e34727e96b89ce725b10 3046022100d0d1749f09c89386b597f9faf9332a5cf2576a04c3088fb63b79091b335caddc022100bd12eaba372f82d80675ee8f67cc99d4d0d0c70de74d4cb94e8a3cc20a624fd2 3045022072ff384cf594c4e506d3cbfb4b601c84dc054dd21abf8864b7abe095476438d2022100a0d87b998d91296de58fbe3c3106aefd69343fed35979b7a33e687c10e656801 304402200c2b4795635de03dcc5898de3b82f58477d6452636ae541ca313381083635dd9022008d58714ae1c363c9af6d3a257c8c426ed796fba9f1bba650fca470cf5a6eb28 3046022100d873167f0dbf994d05d6dbec60446802633a8fdef6a066a6b25c4e0ce96dd538022100c311ce0c7f26ff3dfa43ebe8e97b874d9c177b05b6caeb81ecd74514faab83c5 30450221008bb84df9fbf79f30314a536a4fe19b2e8446fdb0c38a48113282d46192c9d6580220440578868f29eb97885109e85a7eacd45108589554091811b87078713a9ede92 3046022100c890a080955919e52e4e8a6cb5a27b29b633dadfe8180c295438705d3b3173d1022100cf0ebade5497452c8e01072f9b1563a35cc14ab4a41993a94832b198125db12e 3045022100b11af2218193b9542ed0a426368903bf4094be101528375f66db9318d7b9370c022032f8925409baaeefd3e8900b5fed23ae2e7e144e9a1466642bdfaacfb120f09b 3045022046430a59a299e557f80c94ec33e07e12aff95e35a1af5f850b87360cea2ab329022100d6687cadf4f1327e0f1603aaf63f309f43842d1fd56f5a6fe1bc019741c6c5c9 304402206cd7af16355bcd240a9f932aee1d49d088adb9b6bf0fd22102de8747d80e6a5d022066b25047ca3f6bdc9c35abe1faac1edc0bb094d2b75c4f9cb476eb0da718119c 3046022100efc61a617ee3b1312b019f0fdc877286889a385541f41ca2be77500ce8b61d10022100de1b4565fb00ec067b18179b8e11e09fffde7c224180e90982698cc024b8eec9 30440220263e443eedc44f304bd7c6af394ae0541ead08c1f5417a87fde1def6dba77ade0220724f574bcad4e4e9a13b22ee1c26f7f9b17e90d350d03ae51a0263f49b7c3ac1 3045022041534fa68923e0a0cd544e504c6514ded64825043937f4494fc9c283df1fe0fe022100bf60f10e51077f6b598f9558e7653692e0718d8c989d7fd7db7fbdbafefba69d 30450221008921caecdf5282ab874767921f8975ff081503ee3dce6c1b1836b8766c3481ca02200c2ca729ad9111575cc1c052791433c1e6db9efac7bfbff95b4997a62824cf90 3046022100966979bf9dac1d2e6e3948f94945874dcdc878cacaca7a5dd3c1d55ad312f824022100cb64aa0cc93c1d89763e16a42b7af3d7fbb9ffb5e4ae46890bc76fa46acd645d 3045022079ae71afba93955bb33a2758aa037dd17632aced3f2aa939d1b61fc7b89bcbec022100b843bf40940bdf27da8bff3e7209d2cfef28fa45a238090b8c0fd81f11f183cb 3046022100e388993539439878386e0d826c43adb11a497ea71230e709600a72f0b6fa9226022100f678ff2f55635bbd6ba0c53e6a353c3b1a37978e3a076c25186c6827c394cd93 304502203c2ca1d0dfe74f9ad18ecbcdb67e4746a68fe3528c91580800e0aa96187565b1022100da79c4ec5869dfd461e8a1fd494d212c3235540cc78be1af78a94eb5fc7b6f02 3045022100cef60d01896510347f9ec85aae2109f408d18998fc07add34a369f71e1b8e71202200b8c2edd6e2ef1b05d464f38e5eeae0db9c60ea297949088fe093420b37b9813 30450221009bb63c2031a56509d340d88f9188dd88176d64bf29a008e06c1c8e0af0d980b8022073a88e663fc3ea13737d04a50a8304b7e7c759196a45a73c511b9627cb65f892 304402202add78e6197bb3bd802775a3f217ebe896435f7dffae0b95c2158eff0060ee1102205d427a817fcfdd90a22fab9b8512f259c80185c90d06f0dbb03d01415a5208f1 30460221008db400a90c9b6cd414d9023e2a5d8d99a5b7183c7815bc37ba90e6af26d1c09d0221009e76692fb6ba3770c1d0a02ef90b016f65a55eb81ecefa84ae1e33aaedda8db1 3045022022bba0c85795fba8cb5c09a71641f9a4da2da5e72f50912d3f9b7ef1e823e76a022100b990345bfb31effefa49fa182d34d5abf34dbc2c2a5974ed369bf36bc1dc952d 3045022100f8d0ca505a9b9f07001308169da342c7298ba1d73a305ec7c797a06a347032000220226df9aef67158c21f99daaeb5a3a2bc5827fe6226e57ab9b4e3967471bc1e0e 3046022100ba37873ed08f33f9f8ed86f74f7d891bcaf06bc1f2a62c8609f3752959055add022100c49cca50238229ba308253be0f62062514b0982899019c8da351ee2743bc8965 3044022059f371a307bb8c7235fbfd7e932ce809a114d1c4992a1af8fd02206cee8134fb0220331d3d9024147afd6fdd966d468bb7102d32959de0ae3657cad386a6a5c18876 30450220463e5089078b37754fbb8e2801e06d88826b320896647a3296feaf1f9a1c9f040221008d3f3690ab1f68ec8096802c009168e2e214c71ff7a0dcda72b5371da160e6e3 30450220488db869f077728fd6e898e17f7cf7298935cc91f48d63dc885d2534d445489f022100d93e038e5e1a8eacf631e281986a9cc454b9050df3228b0ad6f7f534ec2f474e 304402200ce815426c1e08510296fc78e3746ac2ed8ba1367741fa2ff60953f0afd3074b02205312ef93f634e922ecc0b5c0734f924ee8b4dcfe4d953945fb9db939d5f365fc 3044022011ad631ce8711238b61828f35ade5efabb7824352e10fbf313b48619fc6faa6c0220528bcf3aa56e4307975b16f21eee28e363ddedd807528be538843a482b24cfa3 3046022100811e80ee6813b8c5445dc40294f758af6fe24691dc8d20aa99923d9b1376765402210082d7aa070dba9a9855d6e27f18cc8bb3c43b2790f65001ffaff2131c79d0efa0 3046022100be19e86538be8f03512ac58dd8cf654e5075d9710add94fbe8002951653122e8022100cf05edc1c12feaac35003654a5ed50d9f7aee45f3d0e4d620459833c8e893dc5 304402204d880fac896a69bfba0a5be2b6d10e20f627cac0cc995604e287584e55afba1c0220521388b6c1444a833a280b46ccedbfb06832146e7c816a04fb54d7164884f3f2 3045022100f14aece62bf1773aaf7b4ef9a9ae2d3bfd1df4a402ea1e76093843525cfa5a8602205feeea52cc208835377202b34110ce37630b8aba2c3b93979afe474bc706619b 30460221009677ef17506675a5aa12306fa8651fd5698f302d9f2aec6b030f9c380cb4e5eb02210081aa169669bd4894c0d524fe153fea4713478925ff24a0106a0b0bb6555e9070 30440220234daeae61996dd0114fce897b10e5d4566df3ed9b42ddfbeba3e3065485ee0e02200f7baaf5b1805a94414becfccc781f10881a032dad061a5f3c486a06f41fad55 304502210095bc3aaa6539247aa0d6ead66042dbfff19b5f1fb983d5c246ab47acf3a2b6b402200f4167e6d48a29b26a2dfc3721ca7d74964c2478908e1a576644cb58ea376fe0 30440220175710ba80b5ff9dcc9fc2bd015a5b1dc7f4b062eddc5df5c285e1e2c6ef8ade0220617700a56898e9e604309c5d162105bb573495967dcbd91748fef0b7d7a509d3 304502202144afa88861afaed5edd445b42ca3ba8563edf9ca5bf768fe0bbf37046972080221009d9e5ed69979d5998bc60da164cc511b3c616fb61039a9f46dd09c3761d760f0 304502205c36d222563086aa9b892682069c0b84061de775a75f46175e914885a0a470ce022100b650defddce66e33fafd99c3e718c11530050f8c46bc165d8c792b2e938c7705 3045022100d7b3e8bdbd5f29b17be1e545e8e25108aa3784f5d99f1113e73ebaf45333d58f02202bf4cb3f348d98ffc9c49536ae8d29b21734e0d9ab61c887fe519527940acf2e 3045022100fff1606ce6adbd804183a48a077e453aeee0519dd58be725499b99dddf911e3702207a773197dba68d53218357c6c1d2fcfc4b3f19cb1ef60305b6038db2115ea82f 304502203a7f84a65fc4469a4acf4cfbdde8d3f04e0c07370529ac4e31752d255ce6e3af022100ea4cf062ba313e2a7a58e1b0f61d689053b07f290e2f38d6b8d7efd0336adf0e 3045022100d812239331cc5bf2f3176a1080181c84079d36a6ad134f78c533c18dd1af223c022017136e16bee62def5907c1e9d8a9c5e83ee177820b5baa6cd540bcd435a5858e 3046022100b1f0a6158462cd5636f885ad02207b7cb97613c75cf9c8e74733ececae27eabc0221008d15b85f733bdd0ec506bdeb7b451e2b4b112fd65c317870e2a7f5e73afff3f1 304502206c9e5accb3757f017fd9f12825d564c79237f336f9c3e6d26ac68b5911b51ac0022100d488ebd6fd66585d9c0a0e073bd2e3decc44a8c8551011a26f4cb159381e94ef 3046022100bb4922cf1d68f1022d80f2924c68efe715baa8c595e44f6c40d23e7123755fee022100a7971cd579485b04611edf4914a3adc664fd9d5b6e37d5632c1b100a1919e1cc 3044022059f61c601c4729870720721dd058c8230f88fd58ae891270f291e4591cc155ae02200625c6af127894c32305294da16b2c7df325301eb9c6bdf29094925a797a0a73 3045022100bd65a9d0212826e971e0de5ec07ea4c7fe0308e9c91505f63f857cb811a08b4d0220069bfe94883bab016545cc7ea0fdcfea5e980719339cfb0337a4e40153482597 304402207750d2e73856e278f0056ba459c06aec6c82f822e61ce07385dbff1b32b7886d0220495cd8bd53f67c2991ee29e08bbe0589e4073620fc61d7931031605fca0d321f 3045022100b235cf9f2216d8d1a7c46b1069cf140f94c8062d534b6dcdb3b5cfc0b62ca650022024a745b871136d48c27d361cf8ec1ad871056b6a3708986ae1286d9c6e572c9e 3044022065b4e7c340e7b8b0c5a52d41ed494b2921457f82b348e3cb20cd49e0cda970ac02203ec0ee1a045d766861cdc852130923071b265e43c571125bfec22b7baca8f9ea 3045022100dd19c3f0b53375577928310e8fe756fe4b05002a3c27ee56879a8ef97b4b4bbb0220275cd3201b192efe7f0c0555576655b6f52bb39eb1b7edb1443efabfae11b558 304402200cb878f5f764ad22b36d5a6444dde01018ef51fb1941e52f697474d466a2b7ad022004945faabb9a7e357d65651c8bb876d13e6a30a6dc1fe0cd64e85216fea83fcc 30450221008caea791ad60c2ea3330430faf2d6e518159d64ad34e167c1d1a2fbeb9f4ca3f02204dd27d790b0e0c52a92e1eb64a4608ffba7e4e7b2491e34c78c824f4bafbfb3f 30450220691fbab8e62c06ca267dd29def4a7861b696081a3817f8f783274a4672fe65c3022100829ba8ab9bdf197121f6f3b5594209514b791f5cc0f94f5cae0c11b8c86943bf 3046022100833e57bfcce2c8818a5756d2ed927af6358062d3ae631658f2d08da3452399f5022100ca327c75a77861de972b9fe6bf83798934b8916f289f68a1a2527637cad84527 3045022100e43d75278759baa6bc210d21b083aabd50e52868e34b1cbafe205612a04d00980220729fe8904412abae25580ef41bfa92397ce7d6a962f45c69e24c5a5d9ab12575 304502205634b7122d923a89f4abc4a8a4e4823e35c0d9a2ed9dc7f5a31653674c9e73c2022100ab822a862e544e1c348086548343724152ea3a5d82f9302c489feca69b1e6923 3046022100be8cf4495b2f905f646b2c4a5acce85bbfb7fc0ccef1568c0f69aad81e9658f3022100bd5ff5201d50d2d0ef4fac249505c646784bf69914122ecb94e9d8bf6a2e7c4b 3045022100c7cb7967e2f270aca9238432eb3d5d52d52f7047a79751990ea1b92c4f819bad022076ebb882a27de444b285cab9f1185da8eb5055be1cedfc47a382c4523038405e 304502210090daa8adcd30b1f6a021f0fa64a70396fde51ad56aaf316b9b89ff2b06c381c302203204261f1aa8a62fef639ac1743aa33361a6f989338885214e2b88dcf7922154 30450220330401e770882a5becd7cf5f7420c22cb95ef30c7d144a94e4041dbce96958620221008b99b4c8bb641d96399cf6b6c71c93314044a0dabed8230a611dfe93ae63e1b1 304502207fe6e41c8ec49f5aeff3c6a431d34e73aabeb9554179cec7f78a689d6770ee1a022100a3220edc6713cc309062180d622eda549f3efce6541fbd14bf4efadd4998f948 3046022100abeb8827fb363094b0db070105b51dacd983e255252b64bc195f8fa4d13de0af022100e0a039a2f039803a9e3c5767b3633504036ae429dad7d881f3ce9cecd0db0cd8 30450220646ef945b1abb25da5f482bbac22aba5ce7ccec7e8bd6623b82dfccfe9c5e5cf022100918cb1be5e91f12868e1032d1fb0756ae3a3607ffb5a34e6c6de428ea7ce9015 3045022100e6fc180dfc428e200562ec15cdf94a9065e1d07be2e2e851e3f4cefd8f8304a8022022640386ab1e1eb6f2c92d42be086a1fc7b187c4519171c40e4e5dfbbb23101a 3044022066a4fefba316e6aafd551af966ae316ba67d76aa8b99465242be3986612a18f3022048052976dd84bb05987896620dde15455b0a22065a8d6deb4669c8d4fd6a05f9 3045022063dccc91586cc5a0a87f5eba26e421b0a3f3c9543ad971f91bd5be0ef97c46c4022100baebe041127f32102b19c3f78d3434213e13a9306174621124ea49bc28a73407 304402207b01341d41c9c5e4687d669c9c181836b87cee5dab58899ada3ad61041faa82d0220385e5e65ea8d8cf276ec2f705dbc924264a1828007c19344057f531215508b40 3045022042e029de7869b768025b586a13e04cd65c87160f5a604763bd91f8634aa0aedd022100acd42f971f14c0be2f170e372672a123e112dfba914eb69169580c5d563dcbb3 3044022048402292bdf34a90b0790bcc0ed541141f22b95af40a3c338321b7119cbbd8f802202d524f81e46a63fb316a8f5381726a5c153cf8b8567ad21c6da6bd91c08f2edd 3045022100f7fccf0d51122358de3944d1d28602507d0069dc0d0b64eabc524372cbd8692c02206ee335a5afe1134a4d86e5e7e5a142048d60826b897a0478f22bcc01cf725c4c 3045022002f562475f466e10434ad294d3b632d99bd243c13ddfde8a09946315f4340ea7022100a21b297a6e9de330d8f871c5514e5eb170826247aedd5009a475e9ee67b93bd2 304402207d799441b5cc2fe2de567da12d95eb20b4760ee316988a1a09d68682858019a50220607a4a9f7ebdc59da3bc831ea22097a254d60bceda90ccb2f772f4fa363cc71f 304402207332363be1ea0bd9f211b0cbe73e820073096da78a77984e4990b02021a2362f02207d604b55a4ae5e6bb946a0d5d6ee5be60e9974bf33f1719fad68aed3c2b34af1 3044022041ee9cac004753f47010e085fe72303ed9fc9b685c1225cb969ac42fe1e7dc9102200e99906248976e4052c71c0bd959eccf634243f401dde10d536537612a27bee6 3045022100a01df752ef429f18170c1791c8ccfcc3194783f002538b572e80cb8fd680911702204b7b808d91fec1067def0d22c9e6a9fb039368754881754ee08774101fa66849 3046022100c4d8b30e84c39f5d207ef3a413930cfe0fd9c335bb758fa9408d1d027ff37ca7022100be51fc01176be91c94c4a5b04f95f1eb8e593d5f253cda0011d2b7d40e5b1ab5 3046022100c6bdc6515a6377726d228714750c787c0789cb70da5aa27182c006d45e70746f022100a89c6e7a49c04c949233ac8bee881438661b48b26866702d805e226ec0c6c6cd 3045022100b8bf5115cf28d035b8416247e4476e6b4716b35a6e3bf09a2f35ff69982830c40220026d851d01d5bb92e5a5ec8333ed18a3d17c38aa629054a07c5172113f592236 30460221009e748cab6462c3bddd74902c6c33674775de2fc5593c223ded0c82d68436d9d4022100e15474a80a76bbc8fb19ef67e2b2397437e03e20ef3baa8eb96c8f3c25bb0651 304402203b5cb44e5fd83623b50a4b0a9212c30e3106ad3d920b198a9439b60e8f8d668a022001fad283a1aa989b3cd9053b07216fca29a4ebd0d29b39da3b5ff120291d2691 304602210093fcd540a00772acfb9bbedde7091006bda1537dbc6afb1ba83f80dd5856b797022100a82a41421e1d55bced04a4bbb402f041271793f4f6026cad7a8eaad24bfc7397 304402202b41230363a95bcbf1ae7eaaaebf2876f08881fceccf274fb7e46d53c5cbccb80220679ad849a89bf1ce97bdd28fc5503a7e22f8a917a476488064c2d3221b8abdb2 3045022100c3fccef1826c1319f928f28908a9e19c16303e226426178efdacd71a57d31d2502207b7aa03bb8dcc63ab6b5909961809befcdb4e8f9d0c73a9820f667bd354c12ff 304402202c2f15573e1bd91981afcfd85b526587e9a476c80cd333695af557f221d9fff402201f7b2b765f3f4ab4420c76c40c755a76bbf92319975886564b9626c39632ab47 30450220343d53059e0909fa8b4b12a43cbbeeecf0651eb4ac5071fb50949c372f665f48022100a00c6449e8352b16a35788aa22c0c286d3e5ed07f9b52028a8943aaab92187d3 3045022063b6380294ec11b771e48e4444879a4939270dba58237bbcaf805d07878c3d62022100a8a2bf969005721a27f16143aeecbe60ac4201ba7e6a8326ddfb01180c5cd43b 30450220643656b824ec2147cfa70e50ef3718f68d631145880e02e4c0c512e39b1cb65a02210096fe2e8ed0f3b747d9cd0e3bbda09df3efffbebc49f83d8cd04358580ccfcbca 3044022075e659601fb069d1323c5c8d88b8319e0bb977aa7e24b0bd31f1981667cd512902205ad1c79861e7abb823a14484dc3309a8ec62360c0a272a9f03d91bdac8d1d907 30450221009145268d3b7c7e724c85c526a7921c00e45fe9e0ba623c1af2451f4a5a0deee8022026f3aa9522ec07f294b88c85c3a961119f1e0b6e8a92b7bfa4a65b830181f9c1 3045022100cdc4bb63269f87aca4fc7b41462260606d131fdd70703d4f3e980573cf2114530220256acfb85fa947e799eeb9df510085b1435afe1cf37dd283f34dc8cec7bb9322 30460221008477b1992fa135c8dad88fc6b75cb92e3691eae70ff2a6740c0ed13e80a028f7022100a123f22b0f121a8dc50cabbc4c602c6889f4e9514a485233b207299a3d1ba94e 304502200682288bc1e9c87358110b8a506e915a37caabb69e17d3ae9c28f8788b5a3f77022100ffa9bc7198ee4df56e250f4e3736c7551c22ab13b42b86202da74a952975e4f1 3045022100829371de42dc7928c182e4f357ad7ea5eeff36ef8f431313ecb2e843143ec392022062c33f9618fc353fa99e3ff9502483747245d96912b82dadcf736e30b35054cf 30450220672fb3e25e6fa6183f7bd276aecacd8a8c40735bbc3d598415d00f9f63ba907c022100f0970b82ec8ad134bedd6e8c1bfa9fc1840a633cec92b98d363f3c70e46a3140 304502200bf8b75cfd275a01f333c2da0ca177cc05940866426245cfd000f52c0c0158f0022100eb7e47547611355498e96d57468f800b46068cf27316f509376b223c3fecf16a 3046022100d363813f88d623f426f6d8e1c11439cdfac0cc32dd2a42e59e65ce1ea49b882b022100dec8fc986068c85e2d34945968dc68e87ecd813d7c6cbfce0ff8786f48dcfcd2 3045022100bcce24642ca45c077a3cfd25c93f7bb7d8d146a2c8cadbfb65b9c5adbd88fb5902201deec1404eb9b92c5bb0e7e9ec43c730978a6091150df9c91bfde79251f72d43 3046022100eaf0e99823beaa8dcf0ff051e43f2a1cb5653ccb63dfdd854a7b22201c5d99fe022100cbc9a133ecee37c440e66f5fd3f83eaf2be1a26650ce936d401f345f7dfe377a 304402200ee207efd9531029a70072095827768487c768b30686f82de801e95f753055720220402f7b508b015832ec81002099a8917cf243827db02b0c3d88ea6a964c773651 304402203e744881ac7dfb6f2cb4f033280b9268021ed14a322a7199b00d51c0c045191e02204eb1ab28675c0cbeeef7ba962782498d6c9f131eff594ea38fd5a9d15777e90b 3045022100abed2baec48866061c46b5160b0cfa2df1815d0aa23a2aec4d8762875b2c7f900220730fdede64423efca260c002217d059cf094a3b6f9f914a134f07dc87f6cf9d1 3045022007bc8ee4e351893a6d098feb08d8ff1a9037167213f46471a8a2958ec9fe6f89022100a0a29d87a601f27595415b62ef1161b7b9813f598654e28a2f6efe6292bb8d80 3045022036d9aa9434e30af289e26e1090b763bd1da6fbdb1a0c77e6b147c62aab79a25a02210083df61ca30ad741540fa4b73a4946e7957aa688c1359455d7d21574b326e4b13 3043021f297fefa9d90dd8b6c79c2168e5a4044869e1ca37f674ff6208df8c4eff5dc402202d32fd4b35d77760a1600beb8070b8a8f54cf57e28e2e0c5a41eecb2b9296883 3044022068a93e9423a93efa6ccf32addd269294ed49e770310f969899bb053184d4e10f022005316af6141363e5db22c15114d29088c0deb09ac73390c2f2b982de0b4621e6 3044022026ccb2c9e935557ecf5a2a44bb16e105962f9f072e55c8ac5913920cd413eefe0220227d82f6d711c128f5c7e9f9e3776aa86164332695732673179c4522bb9d5b6a 3045022100f21182433493f70e066f821cd34361cf88c2be46776ce76e77eb4867f37c5ce202201e490aab011b19e7edb5851fefd522822f5d2de4c5dd1fdd032ef3aec141ac19 30440220567f684fe5085d3271d1a70e6581e24394b1b2a36d71ac891f3bb7a64bf16d0d0220361ff9025e2d09827ccb9ec07621c65f944af08152106653960cb14941860411 3045022100bea473685902c54e19fba9898fc354572e8c194222dea738874281790b1b858a02205161ece8b2f854bb0b117fde284e5bd1f9e3a4f0d1d40e7e33f7780107ee2000 304402207dfa69f8bd1be0fbcddaca84cf5ed87689bc211d802adbc2c0a2fc22bd1b8fe602201c374f23abd94a1d2d71e3e0f6f15552b444f8b62a409ccad670557fd376d597 30460221009621b4116122376d8af103dd2c14ca460a9391ff733714baffbb2883e9439d13022100bd3a5f62743620af4bf83d3fd1005786f2b430163375b8baba9b2f57db4cd11b 30460221008cb8ce334d1d1a26c6abd55b715aeaa0a9244ba5713533f28a929cddb24daf9b022100cb19b82ecf102ba06167ea30111eb06b1a513fbfeec21aa83a194359f4d4a78a 3045022100d95c8a3ad659c3d622ae50ab0d8abeeb40fecbcbc320f4623f8b636141edd3ae02202230418e973095b462fd051e3a017ff75f4340ea8951226483c99bc258d74217 3046022100b609f8983d134dd36f9313b4e77ff18bb75e98cb3599e5b7119a610e6d1b3aca0221008efcf9992e98b8dc74a708b2858e109f420dca8cf15936325107ebe57d78281c 304502204f8ae42324c1f53344ba98c4a5c385f28579e0b6f147f0d2af026fc578cc236c022100c9043a4c909163208118d2d8060174edd7bef2c3c024ecf44321148060c9951c 304402201cbc2bb491b2ecb00a4251bd3e785c663de703fbbae12257e72aff56f893a0d602200d16faf445945abad3ddd5145ce0d84c3f9c3baa8146a245de363aa43429ea2d 3045022035e62c4d722012403956d520226b8ab7f29e83400d692966452fbc7e198974a90221008389211be3997c4ffc8702bc79dd8c894010c2ce278e8bdfb158c24159eaa011 30450221008f325c53cbc1f0f1d9ac3f7502c8ab1f4c03700f010ab72f21709920cea4e8d802202b57f03af49e3a9e683106799c747250fd3874973b735dac39e54d0f595b1af1 304402202864991cb5ca19ac6524ea1141671507cae384d521e29894d5f01ebe8e96da070220566d5ac2db39cfe3859624e7442b4fb581ef2bc4e787cd6b6a36a8925b05ee28 30440220583869e6f749feb8a994ccfc25010f37300ec52663093203a601a45de364653402200a7a241799773193cda60e98e2f99a2a6c30b51914a2f3561c71b11d1c8eb53c 3045022100b013f4d5aa6543d49100822ff762bb5b1256a289cf491c882f7f98ff7c1f019d02202c036d3f2ff066431741dcb54ebaac6fa37ee15dd2e7fda1377d5790f5e87480 3045022100f69a4938c766f22d93413a47febca89e97d6c67b9717c47e5a13a601272a789902207701cf59fdf6fbb0ac3bcf07cd7c4adca1002e831618191312ff5d098cffe74c 30460221009131de2c915e27bfb7ba548f119f95aad2e8634158476e0a7f4b4c176707a8ce022100ed4727825a29d9900bc4d6e4c35a45641456dcd79f0c14e5e6d10a458029a6ce 304402207de379b5f735a6d1cddb2c329ada1c189cc316b067986a10eac5511be7afa07d02206af3b76fd842063da8fc1697feab428821b602a54f367545ff8bf598f6887dd4 3046022100ec72ddfdf383c9ca246080e75ddd7aece42603e5a9f746b204b258235d5688c7022100c8d4777b6d86a7e7389190f2832ab0fdfe4456e5f91d7cfb9e59a1e30a156d94 30440220542c437dc120057e906b1274ebe7507f42c4d0e581e91f9d556f0a9da9c6a8d50220733012f8fd5304ea76cb168724b58e2b212cb7c14ef026b4bb5cc9b4ad74c687 3045022100f0fa5fb7102cfccbbdeecb935ebd327e2649e972fa062ed19e679cdf4ed018fd022071cfe270531fef2023748653784ff073ed578f2a54b77c85f0fe6b901811e71f 3046022100f66f0aa5c1c889e13f59ccd94c23df100fc94462ffa0536e8a35d49f255f56b9022100ce04a8e2c814a33e60cdfa4182228382a17d557b8e35a23f774469aef34eaf76 30460221008a17846bf94206fbb3881b820338d74a037a787e264656d6a867d4fe0393242a0221008208cabfb90036802b1084cd2f2d0951d313db76532b2f161143d3d7832b6ee3 304502200faa28b1b6bda66c6eb1842b60f4fba62828fe949b30bf641deeb7da94dcb2b10221008acaa18183a0cee3b53ff68f58fdf76295dd6cb4c6192f06a915900f85e54d0e 30450220429678883a8ed245b57b893380b36906247e62f3b575ca60922d41b706b8e3a20221008ef3e2adb5cbe1035ec5f5d7b02645292065ce8ac2f356c6d9cc965c241327f0 30460221009dc30986a715ec9897f3849c628829780ee6f7def17a501a17b25da7716125b3022100bcf000e91169ea3e289ab79fd0a8f689e6fc573c08f4009e0bedd8efba2e96eb 304402202ae75b38759a96ab7a255f6b2de567e54aef9bcbebfc33a7365549276629b1190220263d0cea454896c67b88bfc35bb3583fb01c6a084ff89829a8e781ed85f2b63e 3044021f07c7badc27bf7a0433361f6d3e4c9d76c72a39f5b7cc15f81153ef0086bdb40221009a67ed2dadc26ccba1d24087c0c108712ff06ca452263c6e3f614185ddc331ab 3045022100f9070fea2877a598298553eb099db6207c23a8ea6fd4634aaac47f76c19451e7022040af23a7fc7354daf7abf68b279abd050d214d513aadcb61f564161f6a5a4e1c 3044022074a72684f74953729d1450790a8b1e9251a57acdfccdf22976ef01ce738af99502200338c24949234dc9a0616ea11f5f31561bc36528c5b008a03e4ba742fc8d7111 30460221008209775edbae32d571f4c7a1aac889074996dec300a79137438cdb2f412bba93022100f55f6b0c2c1f42d6d3b7d4e3c5bfac36511dcf1451230913465cd08880bf5359 3046022100ba8dc137f18f363c0d784968689507a0e74f02c830ad41b47a871213c8de34fa0221008c70adfd3a996505578adb0712731b55f57bd0060f130d2d10fee01de618a5e2 304402202b4891945887892e46efba8f509eb196787f605f65ed7a3e35f6e17eee18d1eb022023de9e9f0d1f7e7593a90d1c84a975090255b999aed50799b0b63178fedc5e46 30440220655657958861ece993e7e4b9e7ed835b451dee273078289d85394f97b943eac0022015a9ce0ad1efe8ee317fa1bea5d5ec9301055a7b322b465a1c12e29a69d688bf 30450220128b489bca425497794b386295406c96e1b97875b430b093b399bc940bb08e79022100d9a943b0308a697e6447c2a452e6af8d447542e418a5766233f0a7894d23a3e3 30450221008aa98f3bca7337995d3221e5ba2fbae2d1dc72e7d7f8bd90b6b88e31873fbc17022066b071a24c046cf4be82d92d505cc1f5194b2b6ed3329cd62f022f5b2974913d 3046022100b2f688d4915f3a8f77183e4c1b79d2e8557a444a9ae97a26acc709bc51436b400221009d61b1b3a29a51720a098d6094180508a46e2dbb8816e83b0bfdbcff8fb51c63 3045022037ef40f5401d559d6d7addcaa379a09ea64bd4636ac0eee1f5b1bb9b50c5fb8d022100d373d3347dcafa1dd2da9a4537a122cc35dbbf95b40e0958b71c91284bfb77e8 3045022100b36b57897d4275e2085c01897cf81203ff602c3ee84d35ddaaa863504b50f536022043ad61ec5e2ec44f7e03d2d94596337852ad39616503a5740b51d2c930d45087 3046022100d93526010263ecb9642da959fea37f022f03f08e13954b5ee98c8e675bdb08d5022100edd079e0d407b0002266eb0eced1ad879b9348f7cefbf53c5e29f6e7efbb4fef 3045022067251b9b621342df0633eb28ec95c55188c88b9a319f085e4d2e93e9c8f37845022100fc0f282085c8b6065c67693202b1a04588bd70618ad312b3f75f24f0b8611959 3046022100be9830d16eeb9d171accedb84d2a1698dd25c87f8554be926708a426d253567b0221009f4ec390aed96819818cb35309323951a1557c789586116beae7fc45724c9547 3045022004a76b099fe9ec2b60d0faebc6bf284f3894bd8eefc8ea48109f8dd8eb0fbf98022100f24ccb57c8c464e570a88dc1c653740a02edcd412688725d93ec0817c0b42c13 30460221008fbf49a4e75b9492ee6d0fecdd90a648f763ea987f4dfa8f61beed8d1110e7cb022100a2d9bba4cc6b17de8026c846b7713c5bcc4427fb7e0ea585ef95202a1416356a 3044022027c5ef1f724fb3be365629301dfc86ad7a37d96e0f7728d36cb60b63bf098766022026a62980576158a9255e12edf977aa190ef2f0a4668bb5d9456a5ba20f1d08e1 30450221008c53969ed64115e69f5027ae8f0bb2f0a8137cf0daf7f60406ffc02db89fc51402204d2db842c15a6d8175a0d4fef3a48ab2e6d2701f2a57c124615182ab76948241 304502206979cc3d92cfbb0c64be8d34885a01c06dad58adbf9124da760dca225bb294f9022100cb33f59d11f4e664757fd590f5bf370d7df6aaefac9eb0964f95ea84c8dde846 304402206f39a0aefafb6d166137fd7d2dddb91bdcc483d28c1741a73e6ec81c8bd2906b02204ea9d4c9ee6d06260c8ae67d8206c9878bb76da4459a7956d3c50f0f61111d01 3045022100b6bff6338421b7d2709d79139bfc71caccc957ea9a7e4601430a8c59c6c9536002202c15e877d861ed620be276672381b39adcf32404218548bdd96edd7c306d5900 3046022100a83c162a6e89c6efdc6caf7747467cd0d429d655a201e41e2121c67b4d3e6fac022100dfbd90c36fd54412cefe5b7438c2a560aeb6e8929f966b36e3ae3943cb71e51a 3045022100cd0f7201e9cf6650c3713c0c49025dd7d466697b0964d018f88d0529b8f0bd09022018683cc004c19d701ea0542e1000c88b09b647f3c406958369e694c992acd3be 3045022100c2f800f402f148f6b3684e7d988a9b3074705e83d0c01a0ef13c5f4bc23e489302200308e7d83809f5646765fc424543901d65c8b703fe1dc123328d618eebedc52c 3045022100f41bd870ffd9af7dc28226d407d2be705834e7572569c837d0a5094b5156bc81022041dd7f2ce6d96f0fcb183c93abc33503b1e2a595083200fe637eaf747b1d560b 3046022100d28f4e3e7d9212cd9563df4a402f35abd853cb93eb109eeff86f28e88cc7138e022100934597c03de5f58fa230d2863bdb524079d0d7824fb8ecb7db56ed860079c70d 3046022100f471f22d0e8185b9760ca348c0762e2760bf15d78b55ad782e94129dba93cead022100d1507102fbb2fd85674b818d7fa68b954e71856bb8d19c7b704fdb7961a80f78 3045022100beb9249d657678d5a15619822477242dfbb63d9781353c8a36cc99664394e7d0022021a3c22b872e935010f2b2ccc1cf5069c0672ffd5778f3dd36e7593fe9e6a826 3046022100c740e5031f5ed313bdf2a8fbbe186cfe225199d432fd1835708ae70353ec61dd022100c71a5912b3d4870604d627acd69955af27e33f6189e76f1957f25fa71e10df40 3046022100ce9908883e9ce0e93f28b023f2f2c9e94a2918959b9762cc40e7da9728a20911022100d2f1011488f700d4af0a3be610b7ea117560f5f8f3335eb635081da3e7f9bcd7 3044022001444b20b4be180754a7db6308bd5c3e2ebd934c0f95830c2df928470c1f98a0022058b906e5e88a2a13c1365c7e5f7785a77a64fdc910106bef95f0e4c0b2436ded 304402207f87e0c4c3cc4383daccbf4c34bdb5456d2b32e7879402a3500069a8613948640220554279b3965814af9fe62389ccbc497278f4da76acb897bcbead611e7f772f67 30450220318e6128064390567c395161a7dfc1a05a4d24109e043704e104d3cd636853cd022100be5a72295c0f38cb0749bc79f54b2db888fe2799782f1a31cd98b30989671bd2 304402207b0334687da75c25f171681704c4d88694685c4435d60b24700e1691e6b0ecf102205c8a313b5f3e662689d1564cf5ecb443a35825dddaba6679d8048bb563ab7275 3045022100e6b13a4790910200d1f9963a9639a7d4089cc118f89f921de9c1a04e3902e93702205873ea7222707a2bfb693e2dc3d0c0e9d6c979b36465c47143d73a150088f050 304502203013407256af7f5ece75dc767e579306d821bd3edc8e9234cb00731535b39ea50221009096cf755fccfd7673141192876baaa9826e0da65590bb5ccc72b4af019bd42e 304402205a351718a4a19bc42ef95db19b24a7267141b60b531f4c1fc3a5874a20ef95d302205a6a2c689f2402702c56db923985daec7f336345a953f213bfb34c77e63cd0ef 3046022100dfbc64a6ceffc6718282732c5adc03f999454e4bd290d03e24d0eb91099725d0022100db26a1262a4a466eebc592b48101de7c2f094f402fe5d2a55a2c6d66d65a05c9 304502200e640c80af319c76234f150dc61492134d247ce4b5804e92f5f7c04d41b96255022100919dfb1416854f098e2c75c0aa378363d3744ff5aa8775efb725ff739211ddc6 30440220262a6714d8557b9ecd79e438a9d2518c155b43c5f635325b93f9ff69bf0f6ec502200b776d9986f852b0040430ebe93c0d9d9838951028e9db1c437baaf405bdf120 3045022100ade0dbcf7525afb3c1ba04e4e9701db766daf491ed92e71f0f34420fd1844ef30220074f859599bd3ddedad4d46f0cb78465afee9f60f23e7e27c9e5c692513fbc89 30440220146c82bcc58841739108810c0346be170ecca7c392f95c88a72c1e5b2864f25e022020540df3fe35f1a115f5beffc8886cbcc29e9256cfbe19f8f74cbb5f40969422 30460221009a122d13338338433a25049490e46d001806c1483beed30c0d2118b7a545ee64022100e268d84c90b21a41dcc0d1b649800b0e7e1c8f3d3c8980e3ab8c21db0223b2cb 30450221008e7f6d8e81938198731f23bb3de41858be1fa739316920e38c43157541a01e8b0220417e6f44c46f3b0fc49907cc7cfbd5049c4dd5caf5c21443559aa9e36c960b61 3045022100907ddd9b19f97d315e0a89a9b27846872193c27268ce0b171844749e5984f7170220220626f614ed17e64119626136edf0d6262c004a30f953ed038644743fa6bd5c 304402201ffb0d10f49d5c7eb5a98a6a38a91cdf613a07906f9f11f7cf4c74b132804f050220282d5e1d7ccc9317cbf9f4d504aa624e9c399123d5545e2f328959e69cfea183 3046022100a0ea8e9f380ba66b8c3bbb944200c07ccb72c08b31b6a2f1ff232b92490be2f6022100cd47d40685dd7867c2a4c7826156ff2fcf2c71581ee8457b50aa02046a828035 304602210093c1f3919d238c5c844ad3d72c1d94ce5ebd680401ca32a6299d2a3337f6e247022100c18f3efe66249f8b0caf355ad756b9ac2889435e0e2f298f9b160b563be320d0 3045022100d7d4788599b53fe03227630556efafbe15704e59bd147e7dc6ec24b7e4b942a90220164fa8253c3e13ad039508a26a2d3a9b297da105d0614860eed4df429100b9b7 3046022100f77cc183ed352d7e95d4978bc45a93d7d4ae99615737891bd908b5ab71d467bf02210093a9617ff4c9a6bbcfd81cf8fa27e2a3450fcb517b412e95e8d832b3f7ae2eec 30440220667bc5c65b167e5962649286229547800c403543c4027614b11fe6f06e509acf022036694e2cc60a661b92d5d9787d9ffdf51e97344a64c78a56b20131d5746adf14 304402202fdac7aab5bd21a40cb5026cec526013d14697fc1f0da6b1c7405caec94d29e40220217c168caea5fdd6e922526cb76c2bfe1f17bb6a1eb9f084baaca0bbe5c79732 3044022008ac7b16fb2da3440f1770cf511ddb435046c4f49379e8bbb9879e49f02f9a9d022018b6efbc2be259e322aa5142de2839c2a33e2ef28d273eb043f8a5e578f95f67 30440220134692be664a3ce444595defb2dac9352eefde97671beaea9047af3d0a6963660220757553e009357ee73a95ed3bdc27164a1553a1e3df11cc1cdf5146ad0a3959ca 30450220049c3e0a1e8e571638dfbf88286ef798b374a177cb678937fb2907e5d4623d03022100de342ea8e718f5f3bd353296998315272a95941d01028e3c4078a6da478bad2b 30450221008e756a23e8a9af3ff0650b0cbad7212d3ad530b21ac12de640a1acdba5c3190f02204311334d43233aa27cdc75e9d2867c9f69884df481b323bdb25a917cc307c579 3045022100bfdad00ddb4f103ad64a0b637312050727c3f4c34a74345921184cf814686daa02207a8ef4a1dad80eafa8cfff5b1782ca541f59c0e33fd4e5a3cfbf3a90032584af 30450220355410fc63de5a4d9f0c079b63a3992ac79521f4aa6553bb70cb7b190a72027e02210083237921be1dd162c2a59ad360f1314d760b1724ad83494f1a3953790ddaa965 30440220574b6fdb9111fbed950c1bb1f9b06b6ca3ff81f8231eefb1635059179772d07c02201f765c4253e9566646d0274cb352d810adbebe27e6adfe24338c0aaaff85bb8d 3045022100cae2a433177f1161b72dd861ad166652a0906a7656c3cd8a5ef21173d0e89df5022008fd7df3828d4c641c2b2e825cf72a8392e12bc753ea688a5a9f02bd4a9a6bb5 3045022001cbdae283666fa6af0c0dffc58ece60b6e6f86f951257d39c0cdf3b8671b1e702210091ace91b888ce27cbe6a53ccfce28f520c5f3c294632d15661a4688091bb11de 30450220230681a9c9b61fd40761a8873a7e70a778873f436be69a2ba38d0bedca069f1f022100802bc5a06354d01565cfe5dd486e6fe288ecd400a3450950a4341c785c4c36fe 304502201407ffc191e47731de7ac2721e65d2dc606176d19cb5fe3c171373558c8d7ef7022100d410b093eb2584e33c04b2c1c73785e18592161c2dced8ed19e935e50722fa70 3046022100e02164b7a53ae4dda1bac2e3d3a5e5f5ba295c3155e23445a9645cb767c57808022100fe287a88ad1a2337c4a21a339d6d6f723e48ede10759c0364e614cf919fbf1f4 30450220621fda6561f7fe7b71dfdd0672f55f0287fb313fd52666a5fb119c77f21effef022100bc89ddedac20ad2b933e49bc9441515b0be9f81e692cbeae55f2dee1c9c7bb0d 304502203e57e7781cc049f5583c9a1501f1c511bc773b9bc3ea5590ce109aec73d0be7e022100e521a132e569a28b53fce4165b6acf5a51575912aa0e59aa110ea66b65353916 3045022100f5a777203f282da568c50ff1802578e689792062f7a4e19e48afacc2a016a657022004752c061742bb1823b45a6523e59e1e026dd1c29ed07e6f11885ac9b9630a81 3045022075d3d8134062398c9ac85ebdd7c8e1e2da2feee0e5ff35224cd9d1b82a133650022100edf4862114887fd25087105970665c1c2b1e2e03e3b0c31f2652a3a59bb6cd1a 30440220684df7cede25c77d55955461f00717fe1bf7eb563509c217fa97ce1b25ebcadc022063e89abce0d31cfa60459377ed09cdda32535c1e5be27869b29036fc1d20d687 3044022047302bb28642d479ffc9c36c40f726283df812d2df44d1f8897cf0b1df30aed702202441035252ec21ca499ef66d3db10707f42183db4f78e95df84851b420aa5172 304502206c895a19c68872764ec78ccc002a3e1c37673b354b34e5401dc2c9279b031201022100c663c0ff52fd6509fa8e86e5d28f8bee745d81d112dab685d456ba9832ab3010 304502206033967ff4d990dfa2e894fc5913473098f4080c506e6a461d537e5dde8cd96d022100869d3472f2d72b6b11d7cf77d21916175f77e02713461b5c411fa685a230e91d 3046022100de741cfa137d7d8a17e13908ec66c3fada9048ff2d0347d1263684202d119954022100d3bb7475017b4766f2c88257fbfe4bea83ff09fa12e56529801a8057af3a1103 3045022100cd81eb527a746e9d9f8a516369adf8ce7b7656f69e5bb4580b4479512e3b23fe022040792234fede22a7fa01ee628dbd751dd46f32474aaf735af2fbeaee8b13785b 3045022008f1c51375e0da28d784b130dbac068e8a3f3aef8b0210951c00078bd54a70f0022100a97d731a3cb32c5a50c14d8551fdab1b8f5fdaa10d887984bf797c230ccaf635 304402202fab74ce98c3602a8097460c83f594d25ea7e1faa9daa9cdf3dc5fe88c5674d602206bf5f30a01f17b4a921f10db5b56e0b22d0bf05ebb47280d4475f66d3bb0272c 3045022100d9aaa8c2c20f5ab20b8d5189cabab4db5b37d156fef6debed55a655204614cfe022002751ee0405c7012c1a87dc85788e354db43cf38c5084c7b645afc2487e58abc 304502206858e820656976d440d98609f9dcfe5dbca03eafea3ef1696758fc4388aaf755022100b88294d57480cfa813237b6a71b36d362580fa476d914bc56cbc971c35b6beed 3044022002f836f3c05f9570bfb74edb48958c06101ded1a9e5eebdc62bc14bd30f7df5202204beb7745974d9ee44f8bb2ec2e47d40efe3325323234415d1a4293d491b039d7 30440220430a90314ed5b7e204b3eb18a74e9d063b5a9bc7a12691652b8dd4e3c2f1644402207e44e752dbd0de4256e248a067d4beff340ffb95444c7067e76068b269f08e92 3045022071f31eaf0fa627aeec80e464f02a9a7a44b374b429588c53885d50e949b9e2cc022100fcad3f33013a11a2309dcd97ec018be93b0581f0feb6c3d0f15a56e02e30a76e 3045022100cc92b98d7cc15ef09f497ae47e6ced8dab61d125c34a28c4432444a066008883022042d7d301ffa0171cc54f35c3fcfc44f9be4bbb5ba4b05b14b2cbf973ab6114b1 30440220757e684735f8e4207514efaf7c978bdda50923df2bc8a3d56eb1f8b10ae99c1f022038b84283c74a0cb4d6d8efce9045e7b065d8028479ed754744f5a42bd300fa44 304502205ccb32be6405047fbfcc509a4903022e5d36b79bff5fa99428ce90694c2090c6022100fc1085a2575f20b7c338feb90a75a83b1cbe49f837bce6e22e5f6a6e743ac77d 30450221009a2c65a968d9f84a521b1341a2e089e9ccdefd9e6840e394c49cdc1c8b02cc3502205fdc5409ee8b405d718dcd1d75ed1db1babd74f5c0f632e3e134800c2d53ece9 30440220581bef0d585744d1758c81e0f26d405742e6cc0ceca8bdcf2c99a0add7c70b10022048eb3ab980b07cacfdbb412915e1d062176cbee260ea79fb638b7e71d171a06a 304502206547694ce27ee71f0e670de80349a56a36ea802976669a133b8107d376440af90221009c504272a6408ef7cc7a98985db2ec50f58c075909358ca8c2527f5bff74313d 3046022100f24e041eb8aa792e476f6c203b8d12c46129787bc37ffb09845b55fe9cc22cfa022100bafc3e29cd86b278fb6030994b85ae0e8ea88e6c7eee82252cdaa3300e7903da 30450221009efc34b5d1761604ec23736baee8f81424627dad5ae41e828f8e3ca47c7f9fd602204e5f05724bce3941b806f3bfb0bac27c3d6217b878db9815f6ef35eac27fce12 304402204ca7ea855b14424d6b360d56066334243ec4c4891fa0065147f3f358f22c4c3e022067a084b90c1de93b8703e630fed22c33735e276b0728fa425a8881e93d320481 304602210080563f177a79a0ebb7c7d3d5b7ca31712d986db0467b67de6264c656a347523e022100bc2dbd8b362226587df0d5c342e3313da5592479740025faf40b396540a3572c 3045022100f6e699d3d6520db63839b93bc5793527f4dabe8ab6666db2354a8ae48441e973022021b1a989179352204b32b17900a994b3c9205bf425e72d8bc44dd0c6eb45f8f5 304502210092b5a83253fc6da0827593cad6a868f25d1ff6a2834c93904cced2bf4e6428ab02202601c31787fa291e90a4aea4560fb117e3e7ea72b722ad6112824eedcad3bb02 3045022100be6dab751718775ae47c83f7ca519fe23cb787fa444393d94042ee0f79392ade0220767a3c48159cbb8faabc453a6f6c10836ebc316a77c1561345b1c718507d4e5f 3046022100c39145730a14ceff52939da10d7aa901954d08ce0c046457762a743e107b02b4022100b75631028e07c1ca972a3b8f3592f52cafca70917ab912b4913c39c3d36924f9 3045022100abd2d7589a1c4767af1abb4ce7d52ae7d1940a3b719a8a21e467d008b3cf897a02202dd7fbb4112f28b1beba8e76e829c230fd7e3591795b56befa8f8daeb2eb831a 3044022027c9a18f90329a58165382124674c71f4e19e445233095a8da8cabf958085f60022067d875652b315a66e65672a60b89d95f9e7da29d4e06404c0e78bdbc97a766b7 30460221009b40d348dfb0ed802300a1e39ee9d5266bc53a49b52a56660575c43cdebf7d51022100b615651fcadfb13f1299eff1c077a0bf5cb30e99d9dbbd97ab4afb1446aeab6f 3045022100a21198543c45beabd91960b29f185b054f9191088587fd2ba24d190d8ed362260220158a2b5674969b7fa2015cf301a65d1c03e2232820b99e1b1920c9dfefe277c3 304402204148d669057266a12209168a5dde5d86accf6ceee8dcec6079ee3ae6cd9121d502200131583b52296cc639786b8d0a5797e29b3f8041e2ffa8376272341218fb259c 3046022100a532f69abe32f052d0405804ab0e5d72d2f646122186aa6985a1122948b253c1022100a8cb7dd75edcf6af7c852d560746009a21acf43e2b9e54b83b275de4e48ce3a3 30460221008553a9299c249a7b2dfc252d38e3def78cbbe1fb4a93d77c58b057adfabb970c022100dbb0dd69ab096a1ccc318d0689d35cc8976a2b566fd9eaa8b40fbd9de054be6c 3045022100d08c350af667dc74fbacceeeda3504a76503652dc4c95b8981582c73e73c983b02204ad30720bafe74abf23607553372fac6439b785546857dbfe7d6bf37d8f466a6 304502207c2cd93757d87b1d38e8b31903b693598fdd70e73674933eb4740c13c0eeda3a02210087b6017d211249673c37ea4727060c928118e1218016bbfae9f68fc7159f28bd 3044022072961ef7905aa2d44cae5d793d1e149676afe456902ac045b6686df0a97cd0bd022037cf703df61e5d3fe98e55d92e136b2461d5a224912f667ec9cbafbd3b8195c9 304402206be37000a920839cd36636f583c9800e0ae24e0c24a5ee5decb4ac79b3465255022032f4d297b58cb35be0e2662512db1fe822e7ffff5356e379cc92c7e2093e5554 3046022100abcd5c7078391490f778f5433537296c792afda0ec0a32715edb6ad99dd5d569022100f5fce7ee5fa655e532ea41dbc839b4d1d48e15c970abd0d23d1ed8f01b1c3fdf 304402203b3334ce0b7e10e7a026f01169afd80556f882883effd049deb6c85ac3bfd62e02206606531199ccace56492c6f138405be20d1b92bd9e3281a6ac825203356eeda6 3044022013e47a05d0aae9261a5ea99a890b9f511a295e93b6a067c7e78708213ff384120220422fc764cd2b4c0bf129fe330e59bc1e32d613b0bc6b20f9799902d75e5ab633 3045022100c8d1001b6e2c25e8b417afa315ba78235e9cf72d601459d4d50062a1a49dec87022067460fb2e651f6973b0f806d0656d2edc6158e6be905c2c79da747c21dc76f8e 3044022038440ea38f04534876f0d4ad71181ab2285e4db974037718726d90a2f8ada9da0220774afdf61dea14237e6d4be4e7e5f1ad74ff0f0ff8f7243b60d314fe6f963714 304402207fb16e798b1cedc59feeeff4d5494390a92c3c87205a3cdf9623778ddd475c35022072fbdbe8949d0745d3166ad1dd87fcfbe3300ba9e58d94cbb75d127a4a916e9b 3045022100de7a1867d49ef507bb5444e6a1e3aa44e11af1c912592a8a7493bece86d05d8d02207c59f73a94df8f7956f34bffa499c762250e481a8a7f2967d73e661239af723a 304502200ba5221f76c5cda13d54504d6a07b3b7142f734eeacf39e91e4260291ba826b2022100c2c4eb864ac7dcd5fcc1d1b2b5d74ba29c40fd077830a366785f0608e3f10f52 3046022100ee14e74318188f86ba44c655b95aece7f9d92528994c4f559d8d0c815aa9c385022100a9a0acc468747c1b0b1c5ddee6d3e37ed18946ec25bc63e4c7bbc3929ce14d11 304602210093a8b61845de536a674220b8317a081a299121c451887c7fce993969513ea2a4022100c180bf4655a81927923cd5532194aa2c3f1418642070ac299360b6ca5dec9acd 3045022025777ceb674c1f9fffe89befae47c24a3f62cd7f2aca302b176381c332ad8cbd022100a235f0090aafbed4731865371090e25f38213e8024e7d8eff4aa0da836aae2d1 3046022100d0bb58a19bc8d9604fc275bcfc9702d82d311103fa48d1e062c09e4d3cd068e3022100d8fc011f5b3152d9fdfdb1b0e622b62a4e2ebc7a8d686cd3e3051b4ed7e4f406 30460221008a23f8b462a9658b927c137740f6ced45dfa2019eb2e707317477790345d2135022100b4bb63e6d5cf470ae93fdcc5ed768f3fa839ad173d3ff2957f0c4d50092a2aeb 3045022100ad73a07b15c954ecf29fb738d29e840a35a2102aec3fc74e42c7d6760d8ca9eb02205e09444290bff2ffa77b68d342248377e6171f244d58e619c6684ee5d9c6dba2 3045022022c820c61fffb5fda63689f3154afd3eeff632a2f899381a9639bb2c7f2863f5022100eaa732bac6710e236d5b98668cf26d1cca32a1d9a7dce3033625aef6a3cea32d 3045022020baa313724da1dc99737c3302a85f7a0db6ccad7c7d8bccd2cc0044982d7766022100cdc95ad443451bb431b00acf5565c33fc8a8e08951e5496aa933bd91d2132412 3045022100a718e420a158914bb0d3492d63cb0c455a413f64e40e81593ad8c862092c5b1c022038cb6dbcbff1e77164e197d4edbaedee907edc86b42a29b4bfcdc1af6cb7f063 3044022050d79f464867155f4842c8e8dd8f3d80e37dfda9ee9e0a7269411518d1ffe8d50220103720d345d3101317ff6f8349c362ae4b02601cf2873d0179a0f1726346cff2 3046022100939083b6ca4c6e33986650a1cd62d17c716930f69aa47ebe37af12e54e490c3a022100d8a748af3afea9176d1a147a5490d781ff32936099ff6a46cc9548421d3726f2 3046022100f249e75f64c58177fe3c07782d6452c5e3d3c6a01664b782b6cc17b4e67720f9022100815676974ff3e54963a17419ff55056fe4337985c5a56b1b704942faa1b13c2d 304402202ff4200a46c84991eb0a4f51cbb4ca6f4444f8204e15c1f628d078fccd1d6060022052af784c0f20e75f87410e13bbf867dd5958a98355f85e77523743b386884840 3045022055a29e710b7ad7723f7f1ac77d30ac29affebed20d5e72d67c54790f64d70097022100a613db93e14186a33c55a3759b70fd3d9df085a05eacc28c1793d833e0cd96fe 3046022100ef6c1ce5fc0fbc61bf05cfd0ab3ef6ca8de88056fb6eb7d203de0dc4ed68bf08022100d3d16ecd9b3623e5551d7a4ff7ccb62c0b4d6d5f11f620ab279c8003595cbbb2 3046022100e64b166f38a86c4a6b5823136163fd17893abdf89d4a8212b3112eec3ff0d702022100d6e67360f0cebf84adaee20ba8c67b7e53066dea0dc7b9d6c70dd48092c2b765 3045022100a7aa2e2a84d405c287ffaa7ff7123c2803e3c726b73f48cf6e4e70d1b463fb4d02202ed41ec235e9e48b9bb83689e0cf99533c9b012ef90d0a5ea04c51467e8c8379 3045022100b6ebc492292fb0b3aba5e41700fc8aa6b204b204a8c5a0b9e46714f1409b377f02201ab6e9d134b6a00c1798065d2cc471c050c70d09a1e6826914c1a0ac2fbaec48 3046022100e168dc14c825eef18e498841ef5311af15c06dcbd26870ce99759b1ab9814834022100f988a8c371736a33be568dcc2c30f3ccc9adb6f3a74d8f43436cd336056c522a 30450221009d3cd8d54b55982330045e3f8731521a17ca093dac5ff353bb821be804f91fe1022062b4c469692570c6bb63815b2e7b562905ff12286c2945677c0c4fa9283eee73 304502203a4780a7710d6328b282dedded2962ce2b81a26ce8694b007ae4631be2d7599c022100e5607425e0f3f0251865966e323f71cd01ee368da04415de1d8b74d6ef2f6a44 30450220014a8fa2c388ee362d81bd1f9b4f4d504c189f99630327ef9b4783190dd85df1022100e44ec30716532478b3756962d036028a8e1585129861c143ec344b1819ea9bcc 3045022052ea9300bcecde881615a28df7e9e229089096ab3508c3f7dad8a8c6e1cf7712022100a8f9f0adb94bbf290b4c6e8ebb4e7da006eec7de8a0385e561b28262058dcddf 3046022100a96893ab0e0e27dc74bbb7894f922a1dd868c98f1fa53283b5d4ed424c468ff9022100f954d039a033deb4282752524e99dc744ef89f0ea230fd20abc204e2906a9dd6 304502204c33fa43575914e9bbf9ce6e4300b89960db9fda9f6451b8232a04e2c45c8c02022100a8331e040f3c3dd148c92f1e04beadfd64d629dc26e3d0be7705a581fbd9d380 304502201a8996da29d147b4d9463990479d3ecc87cf9b221247cba19d8a67e2d50c0a7c022100da408a6006355494013b8458f0e8cc6dc7ae9dd3aae108f80544105a6c849bca 304402201a84aa4f60cf77b93ffd783d39347c8d7859ec8187e8e4e97e976ee0c5fe837302204e241921f65462a6531c9e2c6a825784df5625038dc052251a29f9920c94b811 3044022044aa7f77a1c1af7b7bfec56168d8b9a261e9cc5626303a94a2eebf05f4bbae2a022064e2da1ae5481cdfa6e799f8af2c536115aebc903c27a0a67cfe9d9f4402cf1e 3046022100aaaddee49c4ada28336b3c00e67702b540904fde888a39ada4180c5aa0e6f31a022100e847affb44d3b9173c0d370b3f8466ab9f1af4e00f69e541d8d165a0f7381be4 3046022100b7f4d060e91718eaf15c1e7ac391fc81b59a29a516062582637eaaf10ff21974022100e9e91c98f231c5e41772fe67cc2122671a892110484d261dae8e8b78b85a8f91 304402206bf02595c9a772c63f403b19a8e769cdecce7f49c1a841104ee1698b6805de2f02205e1933718a5c1582f7cbc0a70d1ec3d1cbb32599d11c194a2e175ddadeb0a62e 304402204058199722f77a5e58b544fc387ce74df6a9903ea72f5f714cc10a60cac0f8050220140cb99d475c7979399d217442cc99d1a08a4adfdc485f07dfc9034817e7186f 3045022100a201ad1c2fcf88d694e1c2a0bc355c22356648db3f70eaaac561ede6a5995982022016bc7ecfd4d3cf1aac839bfc653772d606abf6b0bb721e7e7ce5960ddb3da922 30450220698d8950d5f309f263e5f3b3dfbb177f1f62b8f35a3d1cb5183fa294901c6087022100eda315a6bd523bed1f21182f2d8cfb155ea16247ee4a9e4a5f54a608578cac2a 304502207251adac046d47556d5ab72946ad5d7be331daff4ce1e7c2393abbad6c262932022100f8f944fb658db298b713b794cd08d456ac2089aba708eba6fe9d1a47fd1957f2 3044022071b2830cd51dabb5c4a97e430fa65d51f3a344d748cd6ebc30e8456c61a8536c0220680f43fbe47a0096334f9a3b28c2abe85d063749c722ff4cdfec4fff4310212c 3046022100ae5cd125e3ba6d808fb6f6dbee79326e2ec890e42684d45811feccea5b4ac624022100d934b41a22b341b40dde836890a70ca03f830ddc59c479f673209b90cc450090 3045022100ef00f3fd4eaa836af856dc001bf5123d88dc9ea10a96621c3dcb19a2c5fdba8f02203ea3e5015c259bc31957446c205c3357dc39aaa271994a44491e6584fd9d419e 3045022067140e7e75fe5d299e33e11b645fe6eedf3b0c4df7de0253682144fab2d8884802210099c1977a7842825ca0f8a16b0d20455479aba6cd78909f78ab3b0010980ba487 304502204069b154458f35ac8ecb589baf109133611c3e867dbebec3136b361604116ad0022100d5789a4c20f9fa8392397aeb6365e49abb2ad359c1dcdbc5357ef9c96d5b913d 304402203d77bfb0dfcade9fcb54524696af261e866e9a1e434727808af585588b24274e02203368f2b3a92dd451ad637118232d7bf6a5d92c69108c9d2e91c307e0585a49d8 3045022100aa97771d2046d8bf6888dc111fcedd1dcd23771dabd89cdde9756558618f797302204aa7477280947d6265b53ad06efd40d61672bb5773bd2f64349f483e3fe71981 304402201e9ecdb2d0430074b7d69489f178a24c93a6d47d07dec889f82141155d3d6a3002207fbc8277c6ae5b6ead73af1505d92272cea9e0ca7aa342e5eb513cdc169f0718 30440220065d20d6df591d4ab45fc5810e316865c639c66164cbdf4f07861eabc4eb2ad402204661e64f5f8d4ea158386dd6e08b4a55127c626a1d59f6ca9d7e210e562f19c2 30450220256451d365a060c29ef7cde84e6379bfe0a2f2a6482c24faa1be8853bb0db487022100ed62749defb2a8ce8db313ff8e21d3a42a778b2b4269917334444d129eaa5102 30450220522c4ebbcef2a62a1854ca23e5284a24b44fcf0d94c1f1c4ba86c77d2ab341ad02210093fd76e1df536688c0f074f8a68df7cef4ba994be942f685919e31b3232ca22d 30450220103d08d9cd5ebb732114afc8df6ca3adb9c7cabe53af7812d0add3467e31db9e022100f522f0a6eeb83353ba6d94e85d2dc4d3fa7477143d936f58ff78bb0b456e3f61 3046022100e5ff933a081f6a48e27e6884f1543a9dcb27c85fad6bd194ae18037a774f75b4022100fd00c2bd4320aa962d81ed3b1b19dd867424ebd06a1935949546e1146fcf4778 3044022059c901152414d313a075c85fbdd3761926d7795b1280dea2e6d42e5bd2c93a0a022062b3f41aa1fa4e75a7d06d667e2e67175e0449d56592467bc8acdeb1e7ab5aa9 3044022029fccd0387fe6f2e04c3761c9f6d44041c51081d1a4055f0e00e3395ab63221b0220013d6ec61a02ef46b2d3c1d44437d526f6012b88aa6daa2d1a7262412c52e251 304502201b9dc62eb11a9539860cb918b2cacaff26bd5e866580e17c658b4b3a5041f99c022100b6d62c3e0a72ae4d649683b7cb04f711bbcfc57dbdd65ec46a22e7761fd4f6b3 304402201f7c09decf01d58cf0f26168e74ad7797e624b8494cc284e85b670a76d600d540220541a3ad7bf10699ec94b67df695975a7df6da4983db6d994ffa8ecf51689189c 304402205dc1975939e513cc5e2ce25b708ceb18e7bebcaae56b97e3bfc7452c57fd50200220476a3cc67eb55e43cfab761706c0b898ee2364bc3409eda723e4e8d0f2b3c89c 3045022066e5176cd69cb1324afb785f20cd410156a3d3673de30ac2315ce95894b397b6022100eeedfbfd42a3f34866189f11004853662f27ef1f45a20b7b9eddd4141afe1707 30460221008ae0abea6cdcaec9b9144a4287c521e91a0a9d7d4bd581dec3ae60ad5a48f815022100f61a46bd8f70880cde0d83b6172c5afdedc2c67db1897d749f33f0d511f7b302 3044022042ed5ae7c35d3554cc5df0d70b668dac0ebc8d27e46d6bb463be821c44d9fe3b02207844cce338f991efde8ab2727574038c0994b3a89e34b9a713957fdf6f488ca1 304502200178c1dfefb11feb04d34fafce2f0a75ea149e29236a61f529e032853baed2160221009fe634055a5dd7a4c9d3e764df2309058dd1fa2fc2a93e1d159f6bb8402de454 3046022100e585fdc932cdbc0fa41533c904d203f3927d580ed4bb087c7b459084f847c2ed022100e20ca8a0eb13d2fd6cd96228c3d20124a29983e6047703cb109d7f4b6dc8c5fc 3046022100ce0fd9944fb77d5161ee7a4faf00207c64327c9f61ecb41be0d7d608b2a0693c0221009959b8f1b41266be54cf72b91ef2cf8d6df347061eea9a9582d9feac3152e745 3046022100fe4bd062908adca0cd13f5f800ef6b0e29643cae82471defc0e2200594b734c9022100a3468fcebba98ce438fe4397eea22eab452b23e309ba0d2b9722068de0798f77 30450221008415d7caa15c2738f2eb9ef15727e2339121ccfaf141c0f4e1b70d8a11c2b5fe0220796243b874c2f1c122137542cd8dfd2b52b13f050f826c9592fc4a2add83de8d 304502203cee2c50bf6a3b96c8e332a24cb39995a2e563f26577d1edbb35486a433d5ec50221008867e2bb071b87cf863cd6001bb326069adeb71e700efe4f9be26e4672d29bc0 30460221008d8b153ff01ea1382dfa51775062009a8928298aaca759cdc697997f59909316022100c37ce660651497c52a913a3988eaf6e0aeb18e4b3dad2b0cd7b49ccdabb94077 3046022100cf134422b7900010bf3fc08a98242d39f67fc74e69475910ece65106e654d69f0221008a065caeeeb6f153c2764085f63367e7a5a9af5d94fd115bc94c9a5c6c5ec08f 3046022100c24d7ce19a8e77691f45743b2097f09ae7ebb9a2a590790b5fcc6aeeba0aaed7022100c354b26c68546040c3fb1f0cc0c19968894e5491312f8e15830193ee7f090522 3045022100c7024ddab199c4dc0c1b5a5ef69b0cbb3be2e7454c1b6753c9b4364562f2f7dd02205743d7103b616e67098e30f6764b96eb081d452dd93c9375c6dd7193c40e38aa 304502210096b81c138f081cca2e68ea06c7aa4741647b6628105025d52cc1a8b1aedc89430220195f7604308cefeeb78890147c718d2ab9f4981ab117dbf8a7f197dd2a84e555 3045022100c1e9b1ffe69cba23b52bea7de2aa2c19868c2b527a72af3662ad5bd5a42dcda50220749c0df2f3bb0750c257d5275e4dfb9cdeb404bff54cf05de2a48ccd24075a3c 3045022100d5cf7a90360d5cdb6fe66d304876687bc1f8df0e5397511c4179ed2008ee51df02201b6420e4f5221e635f844c61429cb77533cb1f8280c206c85b06281850c16978 3046022100a380cb6a2dcecb355f02c31ee3fef9a47f551e5d17a2895e2036ea6dc73b988b022100aa318b8f10792cb7083b9fc85bb2fc24cdc68e06851642e123c9dac02db66aac 3046022100f728ddadf3339591af8bfeebeafa485a6a164c188190b983345f61d812bb76ee022100dccda1aa5f045e63ea017df91af448459a25ec53f6594c6d400c183bb15cdbe7 3046022100f02bcfdcb8c32718cf407e30226257817b90513f572c70d42d3f7a85b7de9bf4022100d1a7eb83f2b45dd46d9a99873d7b714258a4f3029a3ad64835a3c373b581f3dc 3046022100d567aa59a12f866a96606326d70a45778e1a1a675a483f8964a00d8985a86069022100c27b2244de1ede24afe01f9364f9286e8f583584212e0cc8683627a6e19dd9c9 3044022064fffa095b4f29fbf13a4b29fb187e711c22a64ab03e6c3a2fb88b1333b64006022014a5d7bddb4e4f3bdca488e9e6a866ddae5a3242bc42f32dc568087d192e7ab8 3046022100df84cdef22b63aafafb8e38aa714a4140c00078a70692fd493ce16bec4ce47df022100fea0879aeea4b3e5e59e1df905ebaeaa7ae44837d7ab66fe84b0a4395b35e85f 3044022026f3ebea839cfc7446c6dcb5e7dd80c25bc5418a3a1a253757e3d996d6e263030220462576600d5f011e301e13e7363eed22734affd946b7c12dfa48537a8b636a89 3045022054aa0a9d589ccfc703c636728077331076f2f5e7c4e8a815d0ba467378bc2fb6022100fa1827771068f01d6c71d7004961894e3c0fa0af0b80c250082f4d1473ef6b0a 3046022100d1fd1fc144e10d929742a6368643f75ab9e1b32d8d8ba7eb1d3a49c5ef7f0c27022100a7a86f0680a7d392b2d203997b284f1aea54fb939d697e6c9d16234327693421 30450220169337ad66e1f0c1bbe3578a25d09a06275d12d951a65d2d567137760d66f42f022100c36d9c61526d5fe62924dd6fd7f4a9047999eba75e13f75fd54940600afab445 3046022100dfc5a0646176922f5577e4aedf11acfbafe5c62e36ddb5d7f742b22c0704d7f90221009fdfd7f7b28951458992723c489b437a68eda82b6c17cd3beca0a719f46acba8 3046022100996b797630599124f2d80a9cc7aa6c450795799fcde4656bc81e1b9723bf1061022100b0c98f12556081bfd233233afb54fdd7ff69d2d986a685e8d198e8e94f152044 3046022100b406dade314c795d572151c8b1148d2215383e36efd270c0779322c1167c236f022100c5e948ca30986653f2a1075bc04b6ab30f9c804ddb1be58cd50d1f1fa0b45d97 3045022100c88842fbe711e1e19aa1ac33ae8148655d7dd4a4720c1fea73ee094e094648b70220295e198907351d855b3887dfad8c243a1fd7eb44dda69c4bad136027f965801d 3044022069dad2c4dd945eb86f85fd02375d2a84874674c56c8194a174a4c3a27d53d7d6022060b19ee1c91fbbb05e63b4824c1ba4b8668dbd16daed42226eacb971777c0f98 3046022100f4e010134af03d3787b6e95e3f0cab10312389179d82fd9c8c5caf8ab6dfcd7502210086ddbcd9cb0f011f40074055bb1b7b454a22c996464bbdcad399f4164e94734b 3046022100bf0498c8c0b17b8ee723089964fd6fe5941b3b1cad9bbdcfd79eda5dd2e60da9022100f2bd64f767edff460b8e0c7f7a7cd23b6d3a24f43a0afcf10c309bc0b361304a 3045022100ac2bcdc68c33c5c4b1a2fc9ea9fa003828dad21eb558181ccdc1645f63019ae802202a7366da3343c74b76ecc2849ea604d78054aaeeb45563c97c5b60a367c0346e 304502203504ff42f6461d71be60441bfe61ec386ed34986a8bc8c0f5f08c078e97aa7b3022100b7fd64bab2404b4293bd556c6d0496b0262a2aa3eb68a761245b58bc4a646844 3046022100cff35cff073f6d5c27005b3ecd682c08798877bd6c59bc2e78043dd6d2b5e18f022100f951997f962937cab17307c2b07f1ac51a91b129263b8fc812c7366f9df2a6b3 3044022066a48223d808c94a39882331bf9be860790b490178e6b38dc131e01c2d80392c02206404f3bde8ed15fbd18fd8027501366e5eb666c66e310c4a8625f8dd184426f9 3045022100a16374902023dfe98d5d77e35d9609e9a3b50bc574224f48d9faa93eee1107d802201209e914239c0eb10850f76162520a15fffaa8ae9fd48b2697fee6a17501b8ec 30450220162eef9f77a90e73ac806790638493361b7d8a3e06e1dfb9a9f6a0be6205ff100221008f5975b3646c9f28bcec1449f0e0367aa54eb49d84bae706b75ce5db9e993086 304502210093cec325c208884be0bde1fe6907876fc2516a72cea4c736e60510fc06aeda6d0220615fc10f0b44b0c6319d2955e642c7c2fe8dc43d22dec295d2e316cff8f570f9 304402207959dbcdbffbc40b4aca1a2f757ee9ca3f934fd69ff12180413b680f9bc9983b02207d6329a3608e317ef012c387237decdba2048b46c4b108e6ae94abaa66817ab6 3045022100b632406391664d8b6bd38183d5f0e8fcfdc7838765f3c2a325a2fd2133ad2b3602206315d70e62045680f45c4a48e6b6accf42bfec84aa6d1249df99a4ac80bb9b50 304502202ce5c2ff8ba1cbb1de5770dbf29273d84d60c23492da56d64c223072e0903916022100ca645e31bf85958fadfc3993b4273a5757ee1934f093570f057b84183cba6dfb 304402201bd82fab77f518bccbed47417213e5fa89e3be89bf47fb3acf3ba56ed76b4d4d02203198afd12b73b402a98bee9b9a78d9c78bab9ad7c7e6136172a77da39feab7ea 30440220418068c9b2d6820087c79e84ba0d3fd97cf82b67c1d8a30b98bac9f10018bc9c0220473f11b4289d7673f8eeacc98cb1019e62df3d5b0a326d7d8d8461db62e22f7a 304402205b0337b43951313e53a9a9712ff1fbc22f4146e2264980c890ce7452bb16767b022035212777644b56df938606196b887170bd6ade26884ee6bc4191f83885758dd3 3045022100bffa59d3f6e55c804ea42fbceb9f1dfdbdf47ad4efd203ed5d5e0f039287e921022068db633ae89d2a46d6ce1b8ffeaf50490631225a8277e80a1d59ad032e021120 30460221009b7ee4083642d4f62eb29cfbacf67e73d780caeba967384521fa5c3ae6cb378402210087a8dacb03263936c3ca59cc818031056a853c985123efd477ba7ec36812ee6f 3045022028e7009146fb6bc99bbb92dad78dd518f80b23a21823ce650f95ba707ea9ee25022100f1582895cf939d2b3b6443975cd9f689ea263d61e0765349364afe7416f2f3e7 3046022100c9f284e90f72ba31e3a6e38479d81fcd1db79f2efbaf51fb181a3d5744aba6ca022100b29435e34339064d64f292c44338190a29e358ea4cb6b5147d3e35485d4bad81 30450220031586696735eefedb99ed2b855f2a1bc18a6a049db6e40ff4f9b26c050d0f610221009a425bb6728909b4ca3eb7f16ae131606c2c640ed512d44aac187d62dab63805 304502202a9d4faf9cde3a6a40cdbb8177632a7128fd650410831fb795b05e85c4c64540022100b23fc0b6f57f89f942d2b00e7bf7bd8e33bf51c7db15f4160e251850ae25dffb 304402204efe5e82df56ff28e718c4a2144d453e6ac12b34512a76e46bfea19920aa5e1302202df3f216638c78f89f0b2d67ee2112c4dfdd26f1ab2899ca92a39c6244112f5a 304402202203c24f972a88af5971a293f83636f88747511ed3ba01b7b0cd9e12a569c8da02201619ef14ea76ad4d5d4930fed861da218931b74befd7693931f0838e85ed53ac 304402201f215c4eacb85843b6197002650855f3c66130835f2dd24495eec4c4c8c05f51022017f8ecf8e3cc72f41391af5d72a75f44264ef684126c211dbae29ab1047ddeb3 3045022100cacc4ba30d12926bb7b8032a4119d80aeef211ceb58062da042f5e9f43fceeec02203232b4d21963893a76743e1d9e4fca991c907a302de329c17bf7edf418d61504 3046022100cc997feaf9472fa7448a96aa9cdfa3db914c6334b43fe673e7543650c3739d4b022100d6cf0eb080ff323aabd9c66115b18c3e785529b4dda0be56be7d854c7f104e7c 304402200da37f003160b4afe005c8fc0f4e2c16181b1fedd4fa5ebd00d1d8c624cf663a02204fbe9eb97ef83ee7b99276075108116c392e7a4e54875b2f3d3ea94cf2239ff1 304402203f25bd2cded6069b03253eab0f7110792f57e1b7c1fd7f53d91abaf8beb8e86202203963d16c5f55261f64e0577ad9fc1d0dcafbed2c84fee1be4957923d61eed0b6 30450221009bd39018823d5bb0d848ebf7fbc40301edd626c2318a77a68611e67ef5429e620220669c02e741fb371123a14f64591c2576959e81bdbe32814f77f9fe63e3ca56b9 3045022032c48db004f19b47a60ff0951372e10a38817a91b89a57c47c0845cde4f92fd8022100fde86d2b262fa105f5cbf3a7283ca08e4d0a245eb3890b1b79a3c0e86dea40f0 304402203a886e5a3befb1c82d4f0f164a852b6d3e7fafb8e7007e3497800e460fef5d2902201b5c0fcaef4c25edb305b6665e0d860ee183fc80e341e67b548f0f1bbe4e21d7 3046022100bcd1c6770858643d867d361c60c19a83d842a3aced05679676d0251dec93f7da02210080471b718b6d7ad1e3fcc7ba251a3934a33f49fc3ca5f086be730dab7f3ef6f6 304502200af2f61f8ce0c8bb8c290e8bf1366b4c0ab49dcc5871c67327047493915f08e1022100f08f01bd5da0fbfc25375b3cb4cb72e7b85d1febfc4ea8742913cb656cdafdb2 304502207ee18789fff7f76c73935b75e61e94f1077ea028d189017eff391e8fcb1ccf20022100fccb0384d84062e0988ca940ef5485b56fe0885934b59bb7feddfa00a2d5baa3 30460221009ac6f40d680c8557ce52126c3396783b656b984ff905aa0c296330c9e4363a8b022100b6c3fe77e5c498459edf3d72d983b4d5244f53b470292f08f5e7dac15656e46d 3045022100f6165c3eda06a414db5c720d25c47954caac8ae58b00f60c267682ce3247bec8022074f5b46cc1339489c12a7b33a458a20a5a6502b0facedcd8b033d0f28ce5240e 304402201c00da03891f9652650208cee0caef092bd4dc0d053dbf7d9a4df543b3f5568002203c468b3d4c3af6bd0ff49fb6c1866dc16422577876ae9558532034ff452bd874 304502206411db5829963234a21e1c8e865f34252e8c97d2c284de4cf67cc0278cf6ac1c022100d20569c0954a28fe74a2947bd2e99800312eb7ffc9c4ab4c9fad6feb249c0cc4 3046022100849ce468d9f4b94771d53407fa6d2ee724bdf71cabb447e93c13d37ac6d7b2f602210084cc002bccf49ffe48b7b68386028d9a2f19cfc262335f8056d9fabd8024cd39 3045022100fa99f2e39b570aed49991ded17940445c564e80e8677a29fc5769475ca54ddfb022040393454ed67e0b2bb82219a54d2f009f1f330c9c7f4daee46e25f995379af75 304402202874baf42c0f4af835d181094ef13258acbb3b04f29e3155ddb3b7588b38d29002202fda93daa370c8b216052fc96a954ffb82e37f56fefbeb7e4d457420a34d27d4 3045022100f2e850849c82dc8649cb70d0a33ee95383a092eca05b14c54fe9805ddf4eb7250220609c8a59b2a159fa72754b66369588eb57c5f9038a7f4cb03e441e8c492661b7 3046022100ecb7f2c8e13eff49bb8a26733ee049a8a6a99341ac4792eb59e251f47d4c3bcd022100c71ec53845a3cd2081a7f1a7dce26721e60cc7efd300638c486884620bb740b1 3046022100dc33c620b31728a96b405c3c7d9ff12ad01bc903e2ae3d85c1b0e7c2985a9eaa022100f5079fbce4b025b1a9666826e65625dc48b0680111f2f2797139feaf2b8ca084 3044022025388e31c6d65e00237678e85e7ef0c1b02ed132bc2cf73655cb81a03c0b33bc0220203f7c9b8665daf4ca6beeae33aaa3ae4731948c138af290421a964a948ba2c0 3045022005a515d28ff0227de8bc15bf24e3573450da2d4e2fef2fa613ff8f1385b6fa3e022100ae9abe993ab5507edc2fca3deb18e6d79697fd9ced5bcc69a6c1dec3376e2bef 304502210089f48406c4effdce6bc41040728ca609b749901a4826889b9f32b13674af41ef022078f25a0b1b92072a758d82951a1621fbb53547afb988df4220b274ab9c57fd11 3046022100d9f9f72b1b63894d85b728ee224a703b7626134bb563a579a7932adf347a89bc022100a23066be4153a3785c4a3be5d23db66ee2c51478e5f310654cb259bdba4b630a 3046022100f524ad59b56418ce860dcefbc68dede2b902e3a989fcce80efc18bd2c3bd615b022100a2d9307f1a6289bd82862e160f7b65ff7c7c58a6bf49646c8a36c3dcab2d91ac 3045022005413376c52a21b83a55ed3077190e79f1169bc72336f8b90bfde47139aba363022100a9614b15902afee00da460573321c10ed1c7582ea3b97507a29639b51f712f9f 3044022015750672839b50ce015ee414d37e1c8ed83451ec9af0691eba287a956c66ee5b02205dac0d6745a6ceff4a027e24eb3436fc0a58ee2141c311665b805e3443608aa4 304502210084a7e23ab83774667318fc0cc35e136758e10dfe7a3b35965845d0e7fb3580580220753c92e651bec5c939e86efb53589b9adf5482e60db407409bbbeb2b428015f0 3045022100b338fea3d0ddca2d9e82ce2e9740a1a5f61914bda422e438ce2b26232f979dd6022022608acbf2fe315e966ea70120cb7db1bdf7e492a5cd68804589c95abaa27a00 304502206e7e91d11652fcc1c1df84e92759e8df0d1b0b9a4ad65a3eca1b2083ecef37cb022100f07171700e48d93dfcb237c9aab7bf99e4e86406af405adf7f3af2d06e0d4840 3044022011b3bbfbbdeb47a5e4ae213e145e67aca265e4f60d34da7fdb22b2b9f5e12e7a02202b0343a9258c40c44f4596298bb3b0ebdf1548efc4090a9a0fc6d1c48d6cbe81 304402207de92b9abc0e6ab036943ab25105f39551840a5abb9434d7c967b9d8e5b5d94a02205f685493cb546c0f4be985a79c1fc53c6d47249956e603015a15026b34673c3f 3045022100b97bb9a07f88d996f0e76e7fdae9b5a8ddc805954c2a491a29ae0bade1f10afd022047e6d291ae89167370708e44b033c0e1cf38de75aedbb96b9d46ad49591c596e 3045022100ed9694f513f7ad15b1292edd0fbf60c89c8a6d8e8479e9c87a999e41679ea45802200c0975c08d9c2a6a84bcc61873eb6b16aff088efbb242f5680d88bdefb2ab110 3045022100d71fef066c881c1180631518eb8ade4f04a003bb2912016c6441cc5663d4efd902202b1a8729065b6037c6f1038e7a3d3b5001c2a26ccdc26f075d702b8484a2de30 304402203d7802bd482709ba40119fda87745d0ca724a1989d1effb10451f38819ec421b02201c27f2607662de8c8286c705e11e9c4f39dc7fd74d6471fa2aabcc7a20814583 3044022001e5781e73476536b3528de74808032379f0ce8943f4a6b93ad2e90ba059452a022007afbeeffca1af17e40f2fd774c28b53c45ab63450601f0dbc33583e162ec7d0 304402205f8ac4ac98ad9285972fb492598ea57b11b25929391c744ff7cddd8cdd0b5fb902206b9f606c00a344e2da12d63284157a2ea78d53a392075615cd2af6a783881d15 30460221009ef705129af665fd43a26c54019e0b6fdfbf024e177d937fe270aa929a5dfbd2022100cdb77192fc6ac7947bb1edf3c0026246fce47ac5f813a7979c4e2200b2ad082d 3045022010b74f464e51fca5c2835a757285ec9f58279b27b42c1e71f181158cae1858dc022100ae2345ed381c20a9b1c86921a39f10330b53f1869fbe53cf2ba7d93e5da3b79f 3046022100e163d8a4e5e88d258136c407f7f5c493279845bc9102b6c4e45d55a38e3b4bec022100e21fa5a467e4e85b10c638593447ebeae30a0772abea41454adfc67b5875e6d7 3045022100cdea6b0b2588851d014d3f30e7166f1b797a381f54e62d968ed079577155ea2302201395577d22a84c19f890de2acd38690dc94998e4fa2741bee72c9b68668dc13a 3045022100870dcfb2ed08815a9f225f7a012a8ec557e052f6e10c5ee3e59730626931c4e30220719df4f0d79e1be8209517306af883593373760a0be1bcd9223221f98815e7f5 304402205797f88657a1ce89995a46435aa502d102f6c5e2bd3b582ff24e1aeec52ca85b022074207d41ef1cc40d04900c3340d82d3c24ac7e5e60da3a476024dfb87b72039c 3046022100aecc392607eabd617a52d915e6dc44751bc878e0a7d008673a03190444a442f802210086c7c8abf702b5dd7181fba35ffdaae021771d6a4558318efa731f8ebfdb30f3 304502200cf7b88e3cc90a6c4063bb4612b792db77d63be430378371dce14a4fc2023987022100d0888d8aef45ecf1b3aa3f51b77ead4d12feaf53a74656333064f62e257ba757 30440220308fb14c3bcea938cd0cb6024eedf41f471b24fd55a15d7d996bf5e2b4c026880220073883b03143fe1dff3235d79d3e2005cb7e4a59a2106be32afe89db9a448773 3046022100f9a3094121e62a5704a189e1267be2575e60d0fb812f86fac0fdd1fd757d26fd022100b2577eb0f4267ffce11f9456269050862aefc9efbd21ce6bf53b98f92a665b7f 3045022100b81bff639fe469ce4dcabf69cc40f89895fd2b776de7fb9b928fce5b5d43c28802205d56e53ef4c57e695e4730ec1c65712398457cf5b60e37088eb242efb9a6b81c 3044022020797db5c5a94ee9dd4360c7077685225d4eb75b748b6e1cabd3e30e7e3c20be022051d5e00657b54fddd94f587cb60d8f7e0420ffce17f81bb8ac3ce0790a88972b 3044022051e4d536844d09953421681cb5a0f08abd84b29028484385ee137eca2c0912610220334bf96b8da291de019553c6e602afe67d2982f0435a794032b996f7a0b7f52b 3045022100bdb51e9baffa3cc9889c880a589942a6136b1efbc1be620be499c1f7201287ce02203c514b728d7154f21a8626899d1e54d2d3ea274d79a51de996ee5598c02e3633 304502201a3898829a8313bf1805efc3e924822d5cd08ca9bd5e7f816ab66c6e5e565a4f022100fc21967fe3cf972ee3f4ffe291be5c46dcacd23fc7ff7ccb883f8c68d4f6f07d 3045022100a6115385bba6d2f297b9bc32f5721115e5ddb1f33776e884315f9d5322fc545602201cddbb89955ea7e5b64f3af71fe89d8c323b7d462527b19d2a874a62d804c5bd 30460221009e6aa151374b3f8b407c4e7f97b51185aa7a6b2364464385cce4956baf636641022100a55ffbf6ff4ea7f4386d5cba69af98fb3b31f9873f621b8e4134b3d6b337a383 304402201af672fd0e67c33855f59fe540b124022bfbc67407da0029f750c7d5926836180220558aa7c9217195ede084f1f159623f812a728f1872f966d227c17f33aca3ec04 304402203e8e1c291e2f6a10856569019dd216032dead1cf0f10061346cb6806df329da102202641fcf9f0d154950726a4807c7cd8f53a363e909d8be6299fd78eeea348342b 3046022100c893f5cbf2e4b87b63faea6db86391863d1986543e21e8af0bb05c44614efa7d022100f8ab9869ecaed37149766612d8c7aedec8bcf186ad6a0ac4350566915f4a5442 3045022100f0c6f77ab2bb656574ebd6c3080890c800104e3f1b46edb847c47749bf420ef4022023632eed7cde712ffd1ca4f362fe23199fddb00f34895707537662b88105652f 3044021f5543457520503422a4f9389cbc5ca192bf36631fb545d2d677363d90e6b34c022100e822de99a2bc07072f2b6cef33c6d10d606694737b198ed5b713bc1c9057d052 304602210098c96e0f1da8615c4183467d936a478fffb1a29683143e4866391a83fcee97fa022100953bbd36d752faefc07f2771933a4bfc40298274e2afcaffe4223a464fdeaf70 304502200086bd0bda06041affe3946493e41ff0ba16fc4b6187df3e9e9aec7825525cdd022100aec6db828129f4e816a66567d34c6a31d28a9fa42453bd18cfd6b2544d4f96e5 30450220015c83832871551375e4c8d0cbb9297b1f23c936e25cbe0f2c4543f90f97ac28022100b7b558c5f35710fe3d1c2dbc5b7ce9136fdc82e6ac0f5f71a73a79cd3f030192 304502206978033772416e6bcbc520c47020ea0d8dff097e98db5b2823aeaea0027b40cd022100ba447fc5d8a03f71074d359c9b0a725c0173078610d63d509191d63caf7b2711 304402200cfb78cdc8f019e48dd0815137e64dad9ecc45034fb33734f30ab1fef30aa36702206a69d0cdc415011043b1fb5351e627d386fa8210fa8b1430085708921f4905f0 3044022050acb7deb1422ce8288c6e7fac51365293feeff9e4e780f331248f7d4fcaaeef02201d9240fb84f00592a9bf7d010e2258f35d42c887cf0da8721311d6f81405a81f 3046022100b66aefdef9656f836257d86540b752e04f5f0c80367ed41ae461f7ae86c331f302210083ef97dad5ac9aef2de606ef12598cf42c22b04a8ec66cad32d5854d77906a4f 3046022100c05544fac7f71a7a7db6349a623f3b26dcdcb2c79693ed02e0fa72129e03e30c022100e771a923bcc6eeaa931d8875b50b369d88305eb0c2de66ad2b662a5bc8daec40 304402205ac0d3d2145990c8aa15902f20c1292f023122b7b9c100ffdceffecd4aa9138502206dfdd9513751577096d2d59d365566872ef5307f3c7ca247c975976f76c83734 304502202c836b5f3ec474ba500f1599442d87b7fd1116416caeb1f468e98025091e16aa022100d483e56167677787dc1afe56f7243db6eb64b491def7ef15b5549483469a6963 3045022100a574e80c263686b75bf7ac5420063f1215632cea5e6008dac5f71cb4bca9864102201fdb18181ef57666b1cdcebbc9a69a64b455ae89ff4b4213e901c22e66996330 30450220462398560eb128c641c145f67fa3a19b96ef81c9948554b8f9827cf53499360c022100a35d6b0d3f24767102299b606d81d45832b57291b4001d19969a0682d1cd2d41 3046022100f9b9d1af2de50a12f62716b6432511f303746de74b30aaf7b2d40d2cac414a03022100b280261cc1c2da74826703c1017161fadf73bffa33991337b8532d0846d201a2 304402205aad113d0939c0edb1f3fcd08c56a14d773949e45109af656532fd4503923db30220613f9eeb853b59706fc236f34618764b7a0508ed42c9bdc0332f2593cbbd108e 3046022100edb4b5bf6eb38ba071e7699e6779eafa3bfb8d7703b7badf9d055a0360811b8c022100f084676cb946cca2c5cc67dca1cb38b2be9a93303b12656d5189c9357f0c5e5b 3045022100dfbf0c76a259f962d120d62129456d15fec518d4369cb9afe40ee9c1082eb91602205f1c35092cd9df5a8214917d8c4fd816f0307c6422cb7b2ec73aff0bab30410d 304402202e1e2192eb88f3aaa5cec2837b024221ee3862390af5025ba3c6bc71437eab8602207f3523b29581e5930b09aaf3d479a807dc98a4c46a34e8e3062b742933a511a9 3044022037dfbb1c5d98224dcadce915f7a7905666a7ead1a22a156d88622668900d7c8a02206057f3ac0d1783a7d36331216a01db2bbf5c1a995da6f04b077b8111e8545e5a 3046022100f7cdec7d10c5783532f6b2a8e7adc751c4695dc921ff8e71e162a317c71a947f02210084dda4fadce71468f45be056c6f132dffb62624a8653f9040763c2d7d3e6e3cc 3045022100c224173598a2c03dc64f4eb1e48ecfd70113f4e774b21c38c84a4a935c22f51802200485e0a44d12a1745665de0697948d3e7c26e6ed39f7b9a1d34f3d566e29107d 30450221009fa88ca65007d66a9ad5ea4be6128e709fc65687b0897cd722cb387ff1ccf86b02207c6bba74e2312cf8b219577d80ed7bd0b1ec9a7a8a8e98dac48dbfd0a4f28ab6 3045022100beef156e1e578a5d388a6416f44ba046f5b385edf0945c61eea77e101a24d46f02201bc3f205d4a9e6ae0f3dcec770fe318ee91dcdd82cf717b25e1cd6ec9f03de9d 304502210093957e518802a7025f28542635ea673af9519a7777ffe43d0257dccb65891aae022004073a44da60c6cb5b183ea4173abe99acac4a81873e95aebe0be7a73fdc1e42 30450221009f35d7389619ad991898cf1e61bfffe7cc8e99d37d25a3515498130b53ccadd6022039b446aa68590e1c5b9ff94e8aa5644b834d415023e0b9a03588cb1ca889b59d 3046022100b0180dd8ba9651cbe094500deeeae0382bbe23e58416aa4a16f62566bc1813b7022100f452ff57300b919eecde6d33acb714046db68c829bae60a56fd597d96aef330b 3046022100e6303bc3ea1caf2597f6c624e82795d7cba49998c1d8ee4113b9d46e36d4d1d7022100da0543cb65c165733ffc313f4acea5c01948ec16efd89233fd6d120ad60e7a60 3046022100ed431a7cb69e58e299ed92465fac7d13e9a2c6085862538f1883b009cb9b6359022100aade8a159960b45cb478a5c78dd07c21e65494706f67559ff82645a9398e6e61 3045022100be88e74d9954a1b1ddeaabe8938b6196d9738d8027fa500a29a8af581ca7549a02206b04f514dc6cc77330c99c8b3838435c2b15acb7c961b7734300982a64416d04 3046022100c6a1158f5403e1980b4d397238df2f1f2bd0126573f148690b07eaefc872f93e022100f8e193824a4719fe6d70ceb3172897b0669030295b4c14f35b7fa8cd2f001b9f 304502204dd5bcd5e026c1cb09bcd1f6cd011d87b07959a38d6ef6010f874d1eab904f8c022100fda10de09881db8191c1e9e161720132167aa11ea86943bda90265292e44a65c 304502204e7a58f83c707a94bd9fa3e8edf7a2ec1974afe17f722b68f0e3fada48dfa84402210096334381f7dcb2e52ae7affb8f70c696c36460212f1bd9e3bb704974adab46d9 3046022100d6c5cac5f2fba279772348c5bd4da8a465885d8521fe8cd81782cbcb2ef2f3a5022100f447c5eb7a0e620b52bd8b4b84afc368598d531dd835b4b460096cb89830ccd4 3046022100907d9dbdd63a38d6ff2ca2571669861ffe283f2e83d811dedef481676bb538060221008859bc90371cc4820b11efb1611b35a63fac9c0c5e06500619892af467fc841e 3045022100bfaf7b592238eca04701ed9a8f63fbd175e0c216804b54f8e9590bda4877807602206e208228ac9b986d5b908d38af3dd2ca104fa424b2822df1d68dfa456e5e2421 3045022100e165bd3bfe9150ed9edc05d7b1e6515933747f003a49210a2d8fe8a46e58344d022030eb179215b3f3febfedb34e18841fa6ce3e8d8eeca58290a0debddd97a7c816 30440220043064dc8e0321aadc7c8ad1c1c58ba94886a61082a44de5c8e0ff0b1fe7008c02203385e2bb0b23a4bd93073cceda727e79ffcaea70d7bda7c9688521ff90ca53f8 3045022100dead653ad3e6fbd0d0865e7c315c2b0f353585171c7d96d9c0f18b219fd54c1b022073f9b7cb49fd9c9c9eeeb63c403cac05392fd0259118728f4b8152055c326d04 304402206da0ceba267f859843ec9359deb5604588549e3ed7e4f5bd3a5f0f3b8bb8e31602200464bb2e1fa4fac244a74e188f37cb6ec8906319b21c30e0b377d72f0ce1703f 304602210097e6b8b6acef82951cf6157b2829a65c87153f190450f3407b72089cba053742022100d8663c6832677edc6e7ab4da35c9ac2966289cbb6c3f5f667d0306de945b9d7c 30450220680c31d36e4a958f2fc2f69d4ac3a22ad5c3dc16a4534cc9337b048d4eb69bfd022100e7140fa772f8e752f0828b5cdfd00d99dd31d2cb8d6418c89625777a4bd0dc68 3045022100c551423d55784331421e7872561baa2065b097dcbb50b90b577cf74df6cd4ae002201ea4031f63070a3589bbb60b0b9f4139057ebe8a6e2dfa761eb42f884ccc4569 3045022100adcf2ddfb482fb8715321367a5025bf87b2959cdfc73728ff0cd255a371eb617022041bb160ca663bdd5d4ef7b58434abb2fc3ebf641864d3ab089e0780ee423052c 30450221008154637fa0604578da86b0835e298eaeed69701ac74f9a984d5b3efc1510d05c022028406daf4d7f1608c4a63befced8bdb74759c4b7304c9125e56977b43988d1fb 3045022100da5f57576f2fd28ec215946f4a32d06f2f06d29758be2a7182afdcc16308b93002201fed524cacb2e25b504b5a6bfb7bdf846a917197d53ecc304fda4749a32a736c 30450220321de9adc91c4cc707ce47078bd6c4073486e95792ea233b0ddea51beba5c329022100b67716313bf9da6bad4e849fa4e60691b802d1314d60aa6de8472e7d750b8e18 3045022100bf98b55181d1bf8fd670178de7d112933f1a97ce60311702cc7d47f2243dc3fa0220425831c6eb4eca1d13ad30cf98e8ea4a7558bfd56288eebd85022be8588e4582 3045022100b6b0a1d67d49162906c5d96c45d5d7242d797ba9e30f6bd131b2cf63b95d1a4002204ce7a8d62d3d39a8a09f0b16751c9be9f30e9af776df206c900690ea43a041a2 30440220780a1475a283868c260ca330342051e546be24bac8ce68bff44a2928fb395dd70220060eb2938f89a09693cff5a8f1b27ead9e449446680a2d25047e9722361cf1c4 30450220206cf3277ec99c3cbe699460396ec542246fef5f554bbe89acf4a1e37b95f940022100f01cddabaaaf038253ecfc9129d6d299c71b4c2bcc4250da1501dd7ad5cd9e30 3046022100936beea786acee8dc02a54dc7082610ddf4f9bafc26e67a861468c9d7ff1ba6f022100bb6768a435fc150e2152760c68e52380991aacdda9e8ba7e04afd5e3f997cb74 304402201531df8800d39f6d4c71036cd99ecb165115393ebd2272fa37a6d2dcfc5c048002201d5581f0c9cf9898259383faf0cf31f2ac9f8e72b6a910575596a61b9d9d2198 3045022100f4f58bd4581043b04493dbfb5fd1cc59c24d498f9bea25df1edb1fa02f5baa1902202a9b9cf993def4d0e3085245fc0901fa77f760a81b328a8a072a0659f7bf1497 304602210096f01059844795310d92e6f580a43fb1957459dd19057dd1920ab964e37c7a56022100c05f289f5aa8a39e7843dd20fb6fca998be885a489ad027e775580df46fc1cc4 304402206ac59b20fdad4bf7066a2b7a507c9c4f63b0e786b908d91bb9cc288a2840542d02207bf73eb8367b8d5ff837258c00b5b8b6a1a6a1750f739716ceacba8b9025fb36 30450221009b0f1bc3a68c69ec7493ce8ccfa6a8960ca9bcfafe247fe6cb92daa29c8aa898022002e6dd023c52e8555c4f697ba96e407bd91ccdeff3dabd49cb25e7567c18ba1b 304502201de973c89a1199a8c3425d4673cf1a57c799280be9eac5b2cf87d504fc973703022100dc2c6d212fbab342e7535d15eb60aefd0a5ae1aa5e2bb5921121b106a1857703 304502205b7baf25ef4b391818430083b20434d75c295655bcb6d2b6bfc8658afeabe098022100996f1e90d7f0f66398adec6c916402e389e8db593c7d6f74cf33a09ba57dde1f 3045022100f8386410f1d31631600fffda275f7012a4209ca423dcf457b60116f48f4067a302200359347e494c5749a7c10c526f334ae2432a8dd29d4d8d3e6d1f83642bb0ba92 3045022100a6c5550065c08a81dc520925e97b38a4b1f56c5d2d25870202fb0e7c80ff9c3002206fe7448a987723eaccce6f0834ee9c4d4df7d94f8d09a89e4022fb2e7c0ee3c7 30450220090894beb53d14fdd54d753330ad7c2cecfa8316d6a92fd703acbf2eff1057f0022100c8f234156e2d65be002319849d90af881f6f5aaf122e0181cde9354684d6e266 304402204791febf51894e3e05da840493a033945c9803bbb656bb6041ef5b082dbe0f1402201b74ee32be7758e15b49d4b2352eaae98d1c3900ca9df271113b7d33f4c0b5cd 30450220103709914106d608767ef431f1f95a3bcf442169dc42790701e8aa2b226b939f022100e62ee86b51e603da3de3d53b6680bd63449b10b71fba70e49ab092a398a6b816 30450221008413a6b8ae77d24f3652ccea1733e82d15b885300e691d507ecc028f4f09904002203c09e003ff39480d504aae24d46884a99118a095a48701333ef8493fe2ded5a5 304602210089dc52472d267f3c1d4c02bc4784a314012171c33b294ca061cc6247e58af90a022100ff10b1065c3979ae7a20c75812ea5dca8e30ed23abc94cf01e27e3feb51b95c0 304402205d7fd9420c7403298d28f005c367245a76e1c52b20ad6c890c1ab37867700e9f0220336bc14c75ec1a193f473e10958383e47ee0badc3d9ad569dfc4d4c84362d226 3045022100ce34fcda502f56e040b3917f769c907533cc5de843401d38e103e874b137c9be0220072a4761ae0c7bcd9b222306276146fe6a27239fdb79836a19d1fdba3a5d25bd 304502205e4a23168ec1fd93d96da08e719dba7ca9021102ca654db67b10df36d5a3cb43022100d5d175bacc55aaf81e334abf572b213dea44d93feee4aa1b15f7d8bd6d4eb098 3045022035140b11e87e6ec373d34e9bfefbe65786dc5ef21845ebe5d02b4b4ae7cd9716022100a8754dd64f602ef46a27bf206400d08e18d3b69c3c926fcca87d4824d92b2f1a 3044022005842a00ac1699e67a7a8e1cc5577cd4745f1be1c5b337f3410f1e13ced88a4602203ca553d231b6700c1b681c8cfc3307df0867830cde72df03bb2575c497a9e3c2 304402203359f772101d666d80163a9f31add42caafcdee81f0d9332329d482b96a855420220420e13fc9d7a9ff877350444c512d0ae83b30f7c5ea2df371399213a433a169c 30440220463b48e96c6368424c7f785a356840b3036b64194ec80f8087b4fa8f15152aae02201d1c78456a5cbc9d1c5dbb3ed0ce88f599f524da294cc46dccba6484f302e0a9 3046022100e4e91e2b88946c2880d30acc0b95f5fdd8bdf420fb265b0e0aea9f279303aa8b022100d70d0972e4c6cae84ae4a8f0c170b7612b9200ef7beb98f7cbf585b6e7404fc9 3045022100c4c04f39b1a94a01353db3656da1c9eb495c71b19aa4d62ec452f04a54c7a8ed022038250ecefa2b8b233495e8080499f16d78a599fc34ab5ff2931f5631caf3a48e 30460221008148ddc23199289127002257613263b2f7e86dfd8dd24ac380dac326cb816f5d022100b38fd0e6cb71a623ae0fff3648431ba574caca782942363942481857d66d4193 30450221009b9ba14a122273c789e83b5672caa4b7b74901a5950f9b278e8de811acc684f9022060b9cd380707aa29b90447b83375edfcd915ea84fe3409d5980873c7029b8491 30460221008e450a327d2616f89a4df4992abb59d0355eb98e35f1ad488967539ecad97c6e022100c12b796a8a51d06ddfee74694e813f4b991bc16f056464865c564c1d124380c9 30440220338f89200ca43dc90aa5632879a2a937924aee67ff6eb07e5f82af46121f51ab02204f0723c2b06831e04620d5abd247cfc6100763ab219322e63edfbc5c53da3f99 304402204221e0e3b09d855877f24d7f8b2724cff20b50c41097d5a749d6ae1114f2e1b802204cb64b5f79fae6fa9b2cf5335c092628af2014e21d0e56ab3bb9dbb2bf9af8f9 30440220732b129c7615241ee213e5849daaec65b12dcea94bd0504cf6d1bd4b3a0da41402205b16c0e1ae1b23583129ce25bd60dccce0b5235292da96f39e0bf23372e67106 304402206331eef82d993fca75160e722a65a3a425a7ab1ee5d59e3866a4ca09cef131ca022038fdc10a609a90189deb0de2dd171d64d236c6efdc7c87546caa0db788147c59 30450221008b791d9ea40ff8bfb33e25f9777ff072dc660be652b6d554040cb33710cd7665022060242fcfac3fb869a4ef654f79a1f753c1735730f1c0e99357edd32d372643a8 30440220010296de45c906c93d3fc13d5322c697bd5fd350469dbdc6830d3d1beb12106c022076e58913c6e702a4610c9a0b928e6fc23238e5b67591422abf9290cf46e66a3a 3046022100da5da4e0e68ed770fe9603d22843548bab5db8d4c805b4cf0a18fd91662f9cef02210094e0eb1b46cef4e4b12a87514b196640d3c514f558e3b0643ed1281a27b7d5bd 30450221009afb65701eb012dcf32d8fcc9abe79fb4323a47e4e02702e1e80dc3b6099638f022047dc6cfdbefd3b56077474c6ba3271c85f68ed711f39f278136ee3e7862e4eef 3046022100db3a4e3ddc4c8803fc1bfa04b28122f28d261fcefbbf793038891e3b9217137c022100fd410f65005c643c5eeb57d2ca9d800a0831c2f6354ce4f30e07bbdfb5963d74 3046022100e5a4f650290d1f556ef7b139db04246e9a6299d48baa6c3e33a0e857d13da5960221008f7222c0dea9c2b6011915e2c550278b3d18ee3899204219538cb8c3d50e898f 3046022100e5f6eb5f791be5576df805264655ef74b131e529c69339567e7cb2fa3d9bdc200221008fd893dc866c61380c51be47acad501162752b342d27a0287669c3566bc4b92f 3044022046e3a22f8f18e5b0a32e7ee1400744490df010f96bcbf4f1e683b48ac89c5ffd02200d938441574cd83fbb2866bacdb1f2b80c7368ef1983f0a9954932f7b337f29c 304402204159629ee69d3cf1ba9ab8a1c99cd44279b9f4a08fb15ff0e976be82e9af010502207205ed93f68a9d1550f6c85607d7d0f6905149c11fbf7d07a016debd6cb20c40 30450220659534bc511ae4b44933df5f6b1dc72d3d43eb8d663a302eae37cbe9080908df022100d783df6365f3e8782b6c8d3e0ca57435723a9b3fdf091c183b159016cc0ff6a3 3045022100ac3053b162a11ce0f2458c3e69be430757085f6046acab5cde5c34a915f0283d022076cf5bec533be83f71a031423ecb1ee5f35213b120efe7f4ddc9b431b40632fe 3045022100b3aed46ab469122d5a64d11786ae598fc9070c668fcfe6adbf72a6d9f527fae5022058c1e960d6fe00a55cc7f6cfc125ab0240a9fb7b5a7570d10bbc43a7a9b86552 3046022100d83420972b3603ebe926e8b36ed25dfdd705a47d780bc75e366597df3b1f688502210099239c66fef0e9f7d8974d6a1400fc4dc38a8b3c6ab831e10ec5564a7df96f3b 30450220387e9652ff44f2e669efc442a3b833067b7c3caaef434f4d88aabcc44586be41022100a69268998225d60cc01052caec97e38cd94c7a69b52fee1520b115a71a230438 304402201854f5e31782a2f24f3aa38aea0d5a9870876ad10fab4e242095c40a566a83b4022062e69e7abb79db683601bd31474f2a7fc4777245e0096090a30c8366f2034224 3045022006d1e178aa4e52eeb8ccff90d67be809775395ff25fb6b2636cd068c8529cae4022100dceb7b3be2353e39bb6daeda8a5219a8449d3517c2bdbe22cbf138838a560bc4 304502202d24a580bd5e0042647f165de38fb9fe4ccc9e262556a44071a3c350fdd89d08022100aaaf155b16c32e597ad5c6613a92ea026b6950ec5db3b82b1cfd2bc546fc790d 3045022040ac1a6ce89f1300415684f9a908672060e547476e4616a986ae485c26ccbe7d02210095aa6bbb572a90d50f5044fb572bc962ea421c5c922b6f71945ac0aae60b85b9 3045022034192b25cef9a1bd330e76eb76b9df0c1544e9eb4380a126dd012d5ff2f3bae7022100a55b568d06c464a5da860034caef4d0228198962aa4de4722584e32cffab80ae 3045022100811ffaea8416f6001be3adb8e139b7efedb3dda025047ef2d5ca2c2f323e51cc02200d0edb8782cfaea19eb53fea903e1d5b5f8891111347a8795d26ab5c755a328e 304502200dbf6a61bc9ad92f63f6c83f28b2b1978ff2561d73bf20b7cb6e9a680b83ba94022100e411eb70de3155a21fc2773bd0dae3aa7f4aa120db527035504eb8ddd544ff55 3046022100a9ccf596b40741e3ea9bab38989092d5a382c412e2562402358c001ce5e8b53d0221008949c3a9a774fa6eab178409218891be36ac1b21ec944b2584bb46874fef1114 3046022100a2a97def918201fdbf9b141374e93ddd715259765b9fbfeabbfdf8d5c4c84ccd0221009354354fd672e2dae8e667f9cdbd1aa8e34afaa0fb9b7140f6095ca605f6bf7b 3044022055600e4dee3038052cba67b3971f4b6ae882d53bc996cd2e98b03695a14837f80220182b10c5bc9551480a3726967b0919cd4e88c20a11722870d80822f149bffdd1 304402205fed1b584ccae0ca0c4c9d0eb06a3548d490b22dd58b02db549bb7f63d4c92470220185d1e303c22e6b0544bcd0052283e122f86db41edac281fa23faafb370b4ae4 304502200e197fcd04165a8fdb01b2ccafbe973a6724dc5ba2cd6e9662b3adc099b658d00221009ae4aef40b7a45187b387996a5a7e3efd1e3f7cd0790395a237f7af28a0532c2 3046022100a7cf6cb6dc553bd6f0e87bfa5ce4f8d997bd5c568c6f53a0b2e3823206b9e893022100b4a22b79420186aa20a7bf4573addb14b88ff6c683d1910f86fec22e3112081d 3045022100fae19701394772fe5b76c78b971888fd1560bbdb2a64f6a279126370991102f202202952b4b557ffbb903006ad145fd55f88bd18b357479b72f5f4ebb7f47c6defc8 3045022100c43f5040aea85f2e7525597ffc66bcaf60c2e6ed5360956e013d8b7b2d64b65302201dfa69629cd981da92a8a92347cd400dbe0bdf9f85472423b3d3bbb25af22719 3045022100ca438c79f89b2e1d3e00d67cf309ca0c7a7ade0876f040be2d48cd30ed4fc27a022075a431a5cae11790b0a915140685764be31c740e53b9117dee1d5409208b4d4e 3046022100edb91d187ef556c9213ea005aec04d782db61b4ab78ca6ca22600b5d15ec7eb9022100f7168b0a1aeeae48b490a9aa42f3d030ed5d71f4f87a653c9d0c02d99ace6e46 304502204afbcc1124afc1dfb15a1c5fa4b4fd171aec00554a84c1203aae984b229c30a8022100c0e6d4cdca8ea7370e845a8ac9780c825dedf417e3bb85aa3325754ae24cf510 304502202a4756284c499cf45a8eaed98da22cac50924385c8fe6c38ca1de5a766f27c26022100bc40bf471fb0d0d963faf8ee25e42e9bb9d2c5d4c56c61db4f7e0390c7380cfa 30450221008dc5ca9024ebd9d8fd109050bd379490b86f8ba31e5fd1a5bd5d0a0c4cac68d70220552916f68840b6919d4ffd80ecc5a24bc671bb41d3a10c7d1b0389088ef32248 304502202407215889b99b81be4b62fcf9d6fdc4c9ec253a4c3302d4d2af79f4372da5e3022100967621e66de42a42406be5336bc1757cff7bab6f3ccd95f2dbcd4683bcfdbfba 304402200443ff1257eb45b8537dc801db7fb19c7ea15ba1d4a1718eaf7f7246187e4fbd022063fd737a7d3a895037da18955c33c778c760e25731eb195d5d467ddc4c794b0f 3044022000c8425aaabdf00735a6e7b2fa32f5f3ae108881da25939fa12ea6f814890be702203eb791d1ed4f4e3b404d595804bdc10ef6d73b70bf8a8c0ed4e35779e39c59f5 3045022100ae3360787a740130de88d4e58823738dd68704c698446f5dc2ccc99a6cf854e9022066dacfa98b8c81bf7774b365e4d587087cd1b45205f5d5a096c52b58eb29fbbc 304502205c092136bf64afdf2b2af758fe69f849a008a2198ac636b8f9c5416963f9b825022100e9536a51b875b91f7aec244e6afaec567dec26ddf9089d2f393ca2249a2cecde 3045022100c87c93be421e95490ba8c811f558a80ad31e1e2cda3e25e36dbcf74cb4c2168c022008612263d2fbc8b0770b2470a900844be1e97eeca0168c13c8e318b24f465331 3046022100901609054a8111beeaa287716ed889d58d8fb76a7ad706d596732e62216db6ec022100ffd2dc6f406e3c838d273d200c08e51d3507ae77cc07a3ceca8097c777b88804 30440220316758dfac2bc5df8cd3d97f33450cc97c6b4edb433cad95ecf1830f0f8ed6ca02204499699faf3e790965356c4ca3a3f2e14736cfd9581a9ca804652a53f49165d4 3045022100e59d20da1beefed134587f53ec8040d7d15dff746aeabb234c0eedbfe91a0f47022079f29fcbd3608ea91ada8781778a216bff631a57317b4f02a27b7c835afb3b44 3045022075834a6efffa4d99d1c239c70ebc721ae90d19c9ddc0783e169c27e9d99521c7022100de170ac7e74aa0f6266c6e366d26638578b05365e9ba9458060c72c998d3eb46 304402201eb999229625fe3a8b666caded0928cd6fd9800f8da0dd38b256550f147cf6ac0220666b0844b4654a4d4db32da831be468d7483191843d320e567cdb5022a552482 3046022100b982c79ba0255f1a2c7ac5eb118ab44b37b2d8969a23f20934ebf1850c16471002210095610e140cebfd29aa4a9c45bca3aff86da96a629b082c2f87df05e8ed91444a 3045022100f8587e67f116405b333921e79bb5d9442d71a22708301e54a75b36c8a6fc0001022003f841dcf4faf918a9b9211d29d26cf6241aafff226a61a131e2462e299b5670 304402201f0ddb7cf0f80f96cef19332d50d395f55f870ee56378606c79694b249213c59022029929c402886e48a63b765ed2bd690c959d98508329df1d16e315640263fc536 30450221009d73f0b663b0d31be3e11f6623128a176914b5dc5f36d94d47f4269b591632f60220336318826c08d2bdb97b305364267041ea4e8c35af9c17da2b7610479468349d 304502205b1da84bf326511f496a1d89b37044cad77de1e586b1b64d6b8aab5a9a9deb16022100b676456bf05bc3a81333bf3db56107e9715668e243590f797046a8665fe3d23f 3046022100aabcfa7af379ed0c862d2ffbcd64ac0c19776990fc684a96fa9e62007891e1a6022100fa5f79324722bcd6e8ffa151ef494386cf69c11067487819ef6ba4e1949bf3c6 30450220571dfa1192dfabc348ef489d5339aaa52cb033f6c07f27b26c9f8de9577395e102210090a258846cd33ee5d3a77406a36a8eb367ef04074281aaa72ffee86dd66231ff 304602210085876e63fe19f492aa2774efd789be72bde4dbc697b22abc0d45ae5def3e599f022100ad06528944a7fe7ad1b132a864a5b1ed0fcb5931e41abf68cf84e51293d12421 3046022100f720aca611c4d83500f61078b4140fa6899f1ac107a0c738db228beb90921f7b02210082f127c4928fdba55b34344a948ff5d1a37d391a99f594537bde5319f482c55f 30450220394bee5489467ba8fa52ddaaa9636778d05b4bed3bfa435c4dad42fb64bb6c59022100cdfff3813f4bd5abb0dfce2479bd665577af3faa86d39ec52d620105764ebc2f 304502202d03cb39ce5a0c6a1435db0f901efe65af4fed04f956878d4aa531f13feef7fa022100eae3e83f02199026ac59f80cb2574bcfca855a8b827144eba17086e07e65efb2 304502206b569b48926231b040a13d8bdebab3e1ed367ae2b5a8c1e2aa94adbd22bab50e022100f4ecf68f91a0bc2f073502b655d5fb2eb446a53274ca524f147c230d76b029e6 3045022034f183e254aa69a070677dced85c170db9bf6b4698dfe47ff4805dc10ef8fd6b022100eb2545276c989e64f93120ad460b344a7e3a9101773e13d24d23bff25ee43d99 304402204c9b27752b8ef8f917236cfb44c70a3e5ee9956ca6cd058bf6f9e354217240ba02204d3e123469dcb07380b69e15cc9d31434b572fa970062dc03e18189d19f86bc6 30460221009b6b1fda8f2abded215b39d4a69e8ab7ecb0de7b86d9573895935e7e92efcb98022100dbecd12b0ff83e6e0699ee538c6f52bbddb5bc9f3d5307a4e92ca3c78ab93a6a 3046022100e0548020a16c274d3e2cb6b8092144d5a12eae0e9eb65bb8a351b75920a0c9b7022100c780e70233b3012e49e966ac20648a8dd0c40f842fe01e8ff3cf368108be41ac 3045022100a232040e68ea8ff0f41fd3bd7c26033b23d3fcbaf587d9b8a338150db9c56bc102201e30c1f6340e3567d3d762fd43ac445b90253f86de88ea090468838431cdbb15 3046022100fa25928bd106053f0e46ce62b0e2ed6d56d8f1c3bcadf9d9e01f0f2dc9c94901022100ccd7e2b925a2f15eb7ecb08db0127b1a05499bc60668aa135356c63f9fcdd88f 3045022100ce3cfa92a81211b807f8ad33ac297d3e905367c14d246297c6c55ba0cff18f2c0220132e17e96f5c04a1b635ef9372ff6ba027d31fe98d276528e37bb0660295a8d6 3045022100cb8b272f0969252c8503639fed1b922bfcc15f3041d15724743bd81beb84f3760220254be3af01ced6238332a458273704db00811920c78de8603357480e4ea61410 3045022100d782caac4285699f034661b79f9618cbc59b561aa635c4a05b82902d88a11dd802202a5d969845d520dcd71ab6fad0018175624cdef982616ec40d340440d24e2b79 304502200766d424cd205f5c272024c01b3fb4240ec9398de8532e980fb74d7fdedc45f9022100966a96707249a59a580b6053e53f2fd66dba2500ca13450ea3884ab150423eee 304502201375529bf4efed7f59a8a1ff011f8b9fadffbca18a35535fa5001687c5b71cf7022100d45b7b00bd857fe46d0881ed35ee61a918e6efc6a2420fa7c48d5e4f771781a0 30450221008f567632df3f6743ed449d311dccb5a8443c2309035dd77135549142f28d389302206146a6a0d38962ec79c8f6a4ae367eaf69642dc7073ebd825aa59ec3a1a08e59 3045022100e6aabba43259687b8117ec476d7b2fdfa59ce36f6e8b72b4a9e1f5acb0bc86c002205f93c74cf9c62fd3bba85c0fffcacfb7c45f5fb4d174ddb43621fd18697c140b 3046022100ae06e552b431f7fefa80cbb264de9a9e64e7a84e0ebadcbf00fc677482dda772022100b0386ef7dce1005294c50258f90d23fe1f8ee2baa68bce0dc17e51bd38ca7348 30440220477d5817c8277bd7c6e6db0f199f76d54904d796ffe4129585b5ca30ed8286c2022067e9c4c1f1ebb4c2d18ca3f54aa26af0b0b82f3a84dda82d515403103b7353be 304502210094079cc2f5e14d6822ce0829bb8c904a975fd35e8d2c52a097b691a993d8629c022066f513e8e5ca4e9dd2c0abbee525d9780b96eab620a6a8dee0907665f096428a 304402203be7207e62bf226daeea05bd75f52a18a4b73b483a9a79401d86281d0386cb0d022029788650e17055eae7148690aac0d2da0921ffbfe0109a152aa675f15e74c572 30450221008bd662895c83b7e96b2dfc923898bcb0eb78b3a2bf6ec39d0a7c3f28edf1796f022055c226713589c31cde0251521c658583a1ccf4f45a98d0be064c534f5afb8f20 304502202e9175b98c66bb90d9dde119f5234d8c3d99a0cb5b9f5d028b5d3f1d579d9821022100b2d5aa944a669227a884897c05e98a0908f2e9c5e605ef1cf29922c35975eb2a 3046022100cfd3bbff71cb2f6c1ffa9776e4bd618d5af6ca4e6b085affffdf2b02a4c7fe9f022100f43ba927f5c31f1fb9bc92d54e93089563c10665f438f20e2fb5f81d8beed3d6 304402202539b73e6a7a110bfdc50421221bdbea872ca2f1ab7280855181808e211f03f40220484edd5c90ba534f5bcf076fd31edfd7a932783128233374316774f5ba0de76a 3044022007374814f00faf2d9d56829b89e4b70981f219e2aab1a006c457ff503485f11702202b0e90d77e5e3f1ffa75cfec45168be6ce88c655973336a9bf2a8c93f3b981e0 304502201ce5deb42c8bad5653bcb3a25ca0b648e04d6c8954dfc6ac757c5aaf2f0e8af2022100c219691f4c9863c07c6792f5d816c64b970f0443a9aa1ad9ded533cc28794bba 3045022058280b3b47cd6ad6af77d15b0de83b40459b1531bcebc3a7b6f420204f77a159022100f7e5dd587310a5e2e16493df6987a0c5309f15c1f6b5dc2f94d2c4ea1eb3b59c 304402205cfbeb5e2795bd9a487612541bbc4cf2767b6c61d26dd97c963592b51ddef1f7022030490f9015cdc92dae982a5ffb859980018b82ffe1ae9436eca915b58b3d2b51 3045022100a1eacc48dd402f1b2ef3b04e935ac24ddc3deee369107b69cac641f97acf09ba02205839af95a64aee2bbab4fdad4dede6ed52841d8436563eb2af03ae1b224e24b2 3045022100d7400d96005bc0732d9b635e6d5aaec8fda0da32d40cd5ead6f34e6ee0edf04102203829074dc2d7575279f32da16176b0130e49a2e1e217d2baf6a80afc5f9c1891 3044022011a9abeaea23e15821e8ed053dc5ecc7eb520135c119a66d0695ce9c47e154140220096454342e4322e3eb12d0415abb764104ac77fa05176cca95a0bf14f941b0d2 3046022100c802895570425cf87ea53ad9126a6ff46c6e22039457f804b87b3f92010ec50f0221009d0f9fdfce43f3fde85a24ea785bbb5c4aff7e07c21b3750c39842e45aeb8977 304602210085bdf39ecb8f9b83430b505a27a29a6a72b9394880f21c8641d4ef977b496003022100a37db5a5ace4ef34ba83a7a4f9710e515ccb1316d16f22260d1d6ab2041bf408 3044022022d8dca2348221f4b6601fc944e222560f15b6b0a89de7c060c0e50d0757b941022061bda4d76e1215f628c2771288c2788008267495b75ad8d4711fb68590e60309 3045022100c7a9123083e3fbfbbd0387e31138bb34e73f8b73dce2f1e0395d09ed8355548602206fca544bb8e3dd61bac87cb91cdf4b0b3d2f87cdd6ee76f0438d2feb99490507 304402207d6f2d5a677d912d477af58e716d4f3c89d9d1a9612eab7f3bfaa73a38dbbd1102200607421b5f0569549fcfae451bc1d07ed955fd6e51da700d47c2f9bc7877660e 304602210089f1fde110f7f6c38531be4b063723c55a51fa6b3b92c41eb6b2a1e4a7f1126702210086c595e3b3bac9be3e58a0ff827a1aa9b5e12559ada0069d98c95bdb29714757 30460221009bd1b2a1c473575c8320a45df581da6bfa750a83201ed234b98b841ad036e139022100ea510fd61ba2639a6bb420bec4aaf94940a07e0f7ac849a62dc4fd50c2f9da65 3045022100fbc0e36e17e1019d17d4bd77497f81962f5d24f801d74289a60d0e9f07fa649902206e10664c5c928afb0d79fbdb9d1df93770da93ce0c02999c72434ab3255f2a10 3045022025151a1f19b48aa7851f165c8b7b4332ad1d5f863b7544c04bd92323aa10fcb6022100f62500b7574b5c0d51a889a6f50a68148f19843018f09f7c70589d4a5952634f 3045022100e8a69eb2d73482eb138dd6a92c42b17a68f0e54124a3b868b9370f2fe856b6fb0220047723d5e598498c7a28a8cf35cd4a2c26de41e7bf50142dd681de2df069c388 304602210090251abf2ea263af1d358d75e6845c336797029009c8fad700bed96a7cdb7213022100ee6cd3bc8e9b58943a0b0ed5bfbddd6c827df7ec3ed8e28c036672cec16daeb5 3044022065b24e62c1bf34077b6c312e626489aa39484261109f36e8ca003961c8d5221302207313575ae9527defd7a69b3ab57e04917b6eef6a3e419f7fe32131e5f9cc9a9a 3045022050cc693d88bfb4015cf6b539a655fd5c29e4d87c03f316bca232af1f88381066022100f3066c5150cc1f0a5c5ae976fb2304ad3702375cb912c74c11548cf1f838373f 304402204eea395cbb7404bc90a6479c0079985a7ae2b94b7635adbf51caca6f5cf4e064022010e854ff0d197b2d9288a0d7bd5515c04773cd25bfbd8fa1e9ccad47af40d453 3045022100d7cb38cf6b52cb26056c51098b98c86f46cdf19faca2ff3504ed262dede3d92c02206fa5523cba79bf2c8d1b0ed19f87d521a76bebfc033c5708319f81bc64c54097 3044022061f614afa2a78a84fabb8dcd653c6999420b31865c172cd9afdbd097ca13c033022003ba0ac701c6ccfff61b7e3dc9d04e705d76ac28e8ce286985bfae718b2184f0 3044022052e79326b9af39991185c59167f69181b705e7e0919321fd62d5b9cefc2746f302203c62c45a8334c28ecaaaeb31a2bfe723f877f954e485b80f61fd4ff913489d68 3045022100c7a651ede82488e61da061589eea63f7e85d2da61265942a29d38a17e842fe8e0220442b5a9c791b72d03121868310f35d3a7eb541ccb54aea137da4fa0d0c5cc265 3045022100b7a5f077790abce0cbc23c11211b77e986a31c259ecadda692c14ace951a26de022047ac5082bf7f22d604ddda5d2e7f4c016592bbfb2145007bb824bec7146dc80a 3046022100e5e9bd2079af07cb3af7cb3ba0be97394545393fa74276861f8fd1f292d4da1c022100a0ce41cd48b3b6a3319484943961bffd83a7c2ddb5893e7aeba37d73239c6c82 3044022017b48b8211081620b6c20bdac526713c05ffeb43d5d20d1708e3a359f950f40a0220291cab53fe3ff12145e2e6634743d77a8b7f1f5700cdaef35cfbd4fcdec47de9 30440220403a2117d456eebb945a71e111f4e45a3d7a2d5788d93f704207509b9d4fd11f022038c54455913c1c1138e54197043a8306a4268d594f6f08eca354ed557acd7d18 304502202dfebb77c22e18909ddd6d0143807e91921c1ac53348cebb2e37fc78c121195802210093914140156b3a810dc73047d2d76ec92b7fd662c412702549d6e92b9585d2fe 3045022100a673b10dc452f287840a93b35bc97d924979a5547392980577da5cf00752328002203b8c1597a8d996267e3c3e9bd8258a1d80bd27b09c0101f8eb5bda02d73ddfb3 3045022041acf482da6f5b02c216df09ca9419f323bf7d94a55ed3e652c0b4d4653ecd26022100907357033b942f27d88d83c5439eb2d8d7b0310be6371fa13a7974d2afd8ffaa 3044022019fca2f6363b84d8670897e93b817381f77d5f0d7041e71d43ba63b1bc7397d502207a9b7ea5c4b4ae3bfe4f9a6282cf5cf73e0d1bf1de35af2d89eb2f6528f52acf 3045022001841a9b4283edcf5594d04ca2a2eaa66ea96f4a619c99d73ea285c65c78d5f402210088ce1c827052e98564f1834f7aad43582837dd0814fdf2f529b8a9cf94743a54 304502200e8d801e384409408f1af54b2c70ef770af3860be553bae6bdecaa24d68eb9cf022100adfba6acd5feb0e460dbc782bfc07f35119435217afe89c9eec09d04d1883062 304402207273871ea407af285c3c92ae699ec27d7026eba468ece26c46fa31773427616f022002d613d64c6432d8389a0eded90f5ee2fcc82c8d85bb834af302546091919675 3046022100921595d399997f93af41344bb99f280a251a9130f270953c92608172c121531402210093d970726e56e2c40137978b0bdaf1a982b538eb48bef3009f3aaad90a119919 3045022100e55770131623a51be059c2fbfafb602e9a2d19710da1257136d613ca2c0efd6a022078993c50254fa1d3fe5340b14c397df93af84fb36d399a38f88e98050e83f29e 304502210094f4ba09f8a95e0664f3447498a839fc223185d5e8d4948ddb0f567e572ca01702207e0655e0015e5850c683303bc120413685444c19ed5a3bc38eece09d90491bf5 3045022033316fed5f31648f6df927a6d9dd4c2d79fb0f7ef631525c477621e0df2d9447022100f2905eb6f0d8b3cedb3ed5778676586d8ac11f542c9fead9bb8c332cc295ecdc 3046022100b00b1445214a4cd93d5032ee4b1456c84da2ca4abd9a3082a2a1814e49acb7180221009a73063d3331d861498e315e940a9eb87807fea45d89c6496429187feb70b03e 3046022100d1aea57f185bb233d5dbecf750919e73d6a0c42b18475bed6ed94fb50d41e047022100c5987cabca7ced8c4d8c51dc5b186fa2b369fc0b9247d5a92d2ee36bdd46d398 3046022100cccf8fbf4e53409ffd269d339b7f8cae42538a3993e5adbeb1dbc074c6826f18022100a6af623106a1065be6a9f49fc71ce042bb9574e725214a2c1a7e07ada9f6d375 304502200eb369ef40f2bf3e01ff73bec2c685129429b67d4d97e407bcd8de69b6f56cda022100aca4bb1355770b04286835fa626fb2590731fbb396670cd3fea55a97da8576d0 3045022100fd7d92621a27ff6081c0c10790df889fd0130dee46a76d9097f0960cd4f3c3820220450901d8381e3d47e8196411d11bec1abe48bd8e5a216e16bc49e4f8e0e5e110 3045022019f7aee17927b2db920ecde315d2c16ef10f64294830451df1e77291ece697b1022100cf093630392f9ea7d8cb51224b900390772f4e4a43c1d2f6a6ea0ea7bb5e1423 30450221009c9bbb0084f42a1b8da805002abc87294d71439d5e52838304b5f4a500dd4e0f02200f2a6086aa19b1a2e24cdaaae70dc15c38916757e814f0a2581966696f6d6378 3045022010cc6b8584de0955c1ff8be0e7f16d68fdc4349b1b1043b8d4ee24b43f8c75ef022100e985a0fca3421dad737c44025fecf958c2cf0a355d9757c73832cc6d58388bae 3045022010f7fd546ed15b30c541f9a617850f3e2e243ccbadff4633f4baeaae452c6d8a022100cfc55f3ad24e4725dec69b26d018c71f3f241d36cb22e387bfd65a06d3dae3e6 304502202d146322e4c3f1df5bcd34a62fbe122c4c68c038cc0c4ae4899aaf8e84092fe3022100b90eb4f76731f087e09a7b949a50fa0ee3b469fe6fcf8339bcf9cd5e162f1d4c 30450220187e23017828ed990841d3d485cdeed86a941eed55b309f66f3d9bf9e24e99d6022100faf6c8be9c26e25f63dd6462eabe5478dacd9286cf12ab9ac2a80036b6f2c027 3045022100bda3199a1463fff313d3f0e3cc21e00a7cc127aa58b8d0a129e54801e0d9261d02206ef42d51c94c1df833f41534d0568243b3a2f27797dbc68987b1562b7c34152f 3045022100d23a6459d8512cfe0c62fbfce1b4f3fcbb92cf211db4dc71c3355c94dc6f30a002206aac6d16a4f0cf20dde31437bd14ab627e127efb31db754214e2264d9f312b32 3045022100e98635348e055b3dc72a0c509f6936e0be94370a5d14c35dacd9a367e50463f20220113229f6343c97b2fb191877bda373ed2ec79321f5248d81c3ecad9f808ffe9d 3046022100aab394d8f0b4d59b504ff4ea664a92e38adc7bd28d781fc0ecc644d0d0ab7a1c022100bc00dd5411c55803023222aebd19171adaa26622d3fe38171d959db0040feb88 3046022100cb9eefc093253beb46f5489ad6ff8368bec75da067e7bcb7e0dc664c4b26b1c6022100bcce1ee18ecd278fb1cf03f776c798fea5f7fd512075de2e5c5c8859921928ca 3044022060bfab4d8cba31ea23678332dc219aa23b4932e7a95c219d2d0c346e3db0b32502204efd36c75eee0bd6615bed68a7331f04c2c616c051291f24e59c02bd434856df 3045022100aebfaac9d1bec933e281f6f7b912147f9b1b0adea9bbbe218cff332b0eb9637a02205a9e48778b45a8209deb49dbe232e4cb925f679aef8af174c909f1852d4a0adb 304402207c70fd1562fbd40fd051b6d3ff321693b8d83cfaa97aabdb7db02d98a9fc0df402201db11c2282ed68847947cfe0f6ff3a59f372f591d3af385ef62b738c153cc4e4 3044022031d2091980e359dc0e244fcdbb31ed9c2a377ceb5191b99c835d792eda74fddc022021cf6ff24fd9f365d205e8cd75c4176fd79b99866591e5cc90be425d7f184b9d 3044022024ebefaa3172d3d7e0e44433869950fb1d838d19e63c5f9b48bf7207bfc349250220209a22c3e409aa9d464cef855eac1fe0a48788709ddf463c4949b89d673fb389 304402200dba994f96fa0a9259cddb5ef2e3ac31b30824bfe9ed58e5fa3497c722d2e0ee02207240506427697bd43e8a91de8ab93234a94b7f724248b191e8eb7fb6d460360b 30450220296b0840ae40253830b5c1b3cfd0886fddde66de26c5fb946d0b3533d41dc099022100ffe7abf37061d7f5fd2e2f4b237f8104a0f7a2fc009616262849d7803b576bb7 304302201a1838c93ccb6df0a12134923bc2e70f5388ba4bd1dc16b723e1b63a5f216668021f5972b0d26cfce5d5b4d9df4950ba466061509bb3edc861c036c72335b2e231 304402201e40a86bd7462a55d6990bb2e4d31c1a1de42698081e2ee3926474af7a5a61ff022062dc8fd343c4ce77f18f28636569c067c7e079ddc42c5a4c27725535d66b87cc 3046022100839dc85923c0e8038ec0a132ae1b672cad98b87fd2454460cac172fc0f0abf1d022100ddc8e461614412dcce88a013ff4f32a05cf2762e9718c53f7706e599a1bab5f6 304502203ed78969778be97de73e4afa55c4fc47cb0b7c8f7fcb80cf77ac4f61ab41c888022100b139d7ff23a59e859dc875daac76df6d068d274ff995528d929d96f35314a9cc 30460221008a3dfe9f3fba9112ff9c701a0a25b974dfc37cc5b4b92955ed00c60443b9d8d2022100ad3ee7655f2972014b7a10868873c05bf3ce8c8b421201d7f449c69a8b3422a3 304502205e8e70a1908858a49b439158c6a93f03ded8e0effa756675f80e2630f679db19022100898d7c74d789a0de5d305e686a1fe46d96f3dd2694b778d36bc6e12c1777a9aa 3046022100aa2d0750a1c1ffc30f46ff1e12e9b6ca148857ded70e3afcd2bb41c134281b4c0221008a31403a95482930a8332f74b8c36e9f179acd7e8321ddb706add7163cb9c427 30450220121d079990f5fd94e2bd5d73788f97b24629d1e64c2ba1353f9429fa28cd2d81022100b2490b9c11502aaf6dca0c3b8e079de881d01a24bcc804d77bf0b9267e8719c1 304502200f502eda18de9be986efb454eb031ea647dd3a7bf81d6f4d6746c502081087b30221008aaabb7c6bed3be831ad1f50157ece7577583ea9e001dbce5db6348e26643850 30450220213a52b55d3d29c7e0c3ec9841d794960bfc7bf8a9dea0ae147a0232f256536d022100e1d9264b80a3d9b4012d4fff8f589e26c98da62b73058d5f0a149de68f6575a0 304502202b4b9afb3ab5c8bb6196921974dd1303cec7216417484caa172d3f5466bd5efe02210093f708cc35a937bcf1d93f666f5a1256ab5a1d122a5448e47dc08fbfc757009e 3045022100866d1d97b1dce13c96c3f682b7c3f783a70c7be6a1f710e2c7c48f436dd093b902207b106d491995e03a8e17661be14b199d84584bb40027bd25e4a965214fd858cc 3046022100b4ec9732a0ba783b4f845b619c55230364d4bcf83c80e53dc80021653c2a83ea022100f841350feafe91e1cd47825c5977312397c1fe0033e84a1456abfd9b3583a2d1 3044022074e84e081847c5d737108f809daa0a782acb798b9be9fa18a2796f5248a1f24a0220230856ba723f9c828ef2fef39c60d9e7f3e735530fb675383fa9b9585bd8c5ff 3046022100bc11bb6c68607f20e80f72a179953305c9b1cf3b11ca2057165c695cd50a83b702210080150e126dba72dca52a065ec8b0182747a13d02d8f1326f96f8d11d41ceb018 3045022051e3072d0b306bc81d255c68398231442f9b4a5328a3a483466ea476c94143a9022100959ef9e6b1d37ebe14533117c30da33e294b5159cabd6594172238ddbf025f5e 30440220634f424f17a1ae4e64c89977462ac329063a78dbdbcbcd49d767dc04a696438602205c9e1b3aeb6b511f8ac8f4fd3b9c392a6ccecb7e066ddf369f5c289a7ff3d619 304402205ec7dd6968ac4d72180858428e923cbe9fa1420fc5436e668cdfd0ba9dcc4f41022034b851a629b0f7bb3e53f91397026d1b711534d15a8f4127887f1e481b75819e 3046022100d2f79e58e7981af9d5e0ddc8904071b57b453df7b9ec68243cd6a6641910bc20022100bebf93491eac39cafd9a408c692c99830a1270eb84714b2ccecaeadcdc8c09c5 304402202b92e9029b26c98bbd2a4183c09300ab6784eab04d6fc3b145fc1aa2b7270509022042fc187ccdcc5a44b9e684620641d0f3900f9ff2173912a552e50bc27072a9ac 3045022031ab617be526ce3f549f3f16610401fb20f992144a4bca7f001ff3d7cc3d56bf022100edfb3a45003fd2eb5263006ab4efb46c0a1b0e97b41648d52d161f95abbab23e 3044022062ce20860787e279f9b900de8fe3ef612e221b2d69742dfaaf658cc08c3afb68022017689243d6f32e19e45449c6a5c50b0f220358b5a82cb6d3e1f56228df3e6d5b 304402205c70d9e02a6e5c6629445e3910f4765ea39303085147ed5669e3ae97ae95290c022010d3c4954fd03346598f8c2f8c265a979b293555248a5ae012d61ac36ce54e3b 3045022100fd9522d3a32a5c30e0c7c0397a21736a24a2cb11259301fa6673a71291c7a00b02203f897c1081a1be75e199cd0340443a75b2f3dcf33d24f60d340cd28ef654b213 3045022100f05af6287987ac6885baaca1f76f2e71faaa97978b0f32cd6599147387e865c202206e7bd9df1acca152baabceceeb35dc7cfb8af1c86b26b287e4bdd97e0429c179 304502210099819e0b92e42e15bbe79341c4c9ebf7c1f215f513c648c187a5ee80c3095b5802205273c46df8dcc36c767469d0741552c094764febf488c95caec27acc892d1dd7 304602210089ffd29816204bedf47213c80cc598f9dfc612176f7f55af0ffe8fa9c3033a95022100c380d7eb22fd07c54ebaee074f427963cc7076cc8344ba35f583aceffbee0a15 304402204fdec92fa47352b2bd8d53acffb1e60d0a3d8cc98a74521c7046598ff44f0b8902207b48239fc03d6af181733f3500ca5d2d18390ba399ce62da66ccd403455576ba 304402203fe0c26b5b2b1d5380dff7cfb86a1b71722eadec821d1fa683bc7012b7926c8902201863adbfdcf25c55378633d9002e25eba2131cda5f49067dfb82e9f67e554b4a 3045022100eda3a9dcf02901fa9c81035b883a43a9bbb72fef34f4bfdb6c18fe15c47bd182022039ec5a920ca1841359d806c1847da1856cebbf2c48bb0174df93ff48dd5e9d61 3045022100c93985e48c0fcde277ab364f261f2fbd1179c55529fc2ef7f1fd3ead85858cb202200e72993e33b43837147c01ea4811086041fe7cf389ace23e294857068ccdeedf 3045022100b5406c7a5645a4fdfc3bb50a29a0da764abbc5e7b514190a97910288f06e8011022072d8a2d9eff9f8e3c8a8c834ec822eb70c7441b020a618f2b423c6691c30e8c6 3046022100c69effd8f412db4d3b4d0990f5e477f4afc9e3771b78855ae445f3383ecf0bed022100c100805fbbdc6bc51059f15965ceb5854d56603a8cb212ae4226819e7b25558c 3046022100a3d625c1bcbad1500a93a1478351d360ed09c46204030b4c9776ede4daf93e2f022100b7d274ee0793a89772d6f59ccf00ca091c756860cf13336371a9293f6495cead 304502210092ee2ba4fbf6c2be8a497bcd261dcf93cda1874219c423a460b7441096ee155e022068c0187bbcc85f6963fde65a25ed5293ef4d157a3f4eb22d509b4c09d50b2c50 304502210097f0138dbe5a332e2a7ca9b15c6b319f646120297672b1c6cb0e5f88eaee0f0802202ea8ae5794cf3069801478d97986b430a92e7ed1aec45de932caa2123391b5eb 3045022100f3a00a7a365d365355e4918a86df9caf963e9af5086a1b7561ac7c99ae2d4a9302202cd3148251409d23fc98b9475af23b8ddf51f921ea64c6263b5d7855f4db982f 3045022018983c9877c8d93afaf1f74509dfad7967a217b98debb579cb82b83ae12b0b2a0221009cfa2c528fef032e9a2e3a9b4e6c53ea02d6ad5c1a189c2b5b0ab499507c986a 3046022100bf2689ccda70c4064190d1fc043e9ff3fd1eeef5460e5f2dd6e4fc9d9b4b51f202210096c0b906a9eb2160814045a973202cf32ec13b3e42214458a3681c2fbb61e377 3044022006592e245fd415b2eb80ee60c4e773ce80af399d5557d3f58609a3cece88afe5022012efacc168bd02cd7681cf12398914d7e037272199fe602187c78d58ff135795 3046022100f74af3e69397d71464d0d92600ccadb29f5d58425e679f1aa262398f579aaab2022100bcd0018d872e07134353f4ec23b8ea49383f18bb5cd04808cbbd64e6658d1d07 3045022100ca0de9ee58bca14da0afc651b2988f8a428d1270b362b97a9026f6667da2338b022009ae7f0d34e81b4a7569edd8f0e2ee6504fd8743514c2f943db6375303a0c779 3045022061c467afc0117a502ee54ad7c0d2226023a7a2d94c11b3aaac1eba70807f5c3c022100f21edf15ea3078b3ade8d21c5b67c13245d8ef827739abc88867d1054c83dace 304502207a316840de58c098f3b17d1f7a95aaeed7d96d55d2f7dd8cbfe1b13da861baf8022100f624cdbacb4856f8ab695367cbeedd56922d3d2d5da2b9c6d5a99efdc26d26c3 3044022075fc03b23e2cd8243ac864a68a5d4f51214a548ee6665d51c5fd4f1469461b8f022024f65daeb5ffaffb8403ef4d5274c595ba74e14b14727b6f4c016b3943edfa05 3045022100b23332a66ce76991d07a71d84f865f03321ecad473a7db23b02cecd38f3b1fdb02202e81c11a8c4615b427238b371176375499b44fea955bcf2930a7b5b57810e803 3046022100c9bb623f24b789531b4fc9066e9f33326a1ee6de2b47a2571d28316ef98f7c1d022100f97cbb7b9c5babee9800913d95dd2804465fae3a2848863c0b79123ef9d25f85 3045022100d8b05a6869c829675d28732ede36ae8a159559e94535199c1dd44bbe6170f6d702200f72e275abcca3942c1165cd95bcb4e92e61e3cffa66b701a087adda0b8f7cd7 304402205af64c2bf5bcaa5bcac44ad152ccb8ffe6e61afae3ea5acf371c1558407f6a2d02204a725dc82bde76c094bfb36b2edcefa106a568e52277dc0bb1b862a681e3856d 3044022041abf3c23b5e683f6f7251346b1849737ebd5854b56e13c82465750d40b146ce022076928db6ad1a0e9b02bb0098e59427cfeb267886066e70f1cb3b02a327fc9e32 30440220443d9f053abe5fe3a5b7bed1a04d969ef3bef11c119843c4a107649c92b481150220728ea175dd84666cafef3206903952dc8ace0455107206ff1340b033942d000e 3045022100d0b99f507024182c960ca7f6f48b1fb3fd4f254b62d63ae7f439bebfe984c37402207c4e3e9b7fbdd12311faf607f662f5bd7eabec626b85489f29b7186f20860791 30460221009cd02745a09cfc5cc6a041cbf893813d8e28ef5aa1889b283bf79fe9148a03a00221008411ba94c659b1943149cfa6a81fdf6c4a5066e33db0c94ba0e4ff596d1eae44 304402205e3970c6766b3e90e378d7762c332ae0515e1a9d365dd67329778d324e043e15022036abc3c60de9270a08c343265b97cddb880d6e6aff81046a6fa02522f79f8b91 3046022100cf3b64a9c89110903de67b9118bc714cdc178cae6d67034a3500a9c9d9550d2602210098e516771cebaf0ad01a7a95cf89ef1c47f9985472b794e32580bc58ae24d0a2 3045022100ee2d8775b18429de68fdca0b937edcc36529ba2152397f2a94ccdeb3ea603a2e02200a3edcebaebcd53e8651e82cb7764f6cff39e81a103985156904ac226d712936 304502207f09e626038b3a0953624b0f19f06d432ba7dd4f270335cf44c536066e45d8ab0221009d687554b4d31313711d25d21184538014334e1548f68c8ec32f4fbb97b0bdad 304402204fecc1ff78b730e3a225a6900c238349f5079f877081758980ceff5964b7553f022031e1484b996773483e188ad407021c62b2a5b9f660da14f78f258d8073942161 3045022055eb3a6b103e4a4255615825ae2213e405022e8117d1233f8579142706b1c6ac022100b869bdf4dbb2ee098ff1773f6fd00ee2388ce2e1e6e8842a93f5829f0d853aaa 3046022100aaaf8c9bce469adddd2b1f9fc3b4adb9c5fac00b717d71607b9ced0c78796ee0022100cfa44b485e915c96504518075d83946752c1a0acba36917157b389d1ee13f38e 3045022100d05a0d67f04c971205f334388e8d9d34711ed2b99dcc06bd37f44aff5a883a50022055b161dbd5c34f9e5b0034b663b91e675fff06a7f28989a9f8162d0a04c0ee83 30460221008308b7de57afd3e300965ff57b30a2ef5e2f00365a88d9b39abd3933fd668870022100c8842f478b39247ae38ace34b7029b1b32c0808dbbb07f67ed845e182e38574c 3045022100ee7fd8806767601e2f739479467863a7a4e1e260e2fcf81abd98311932b9d2cf022050ebd58abd1ee03ad68867718061387648c4d3c1767af62e71e8b2e2bf97a950 304502210096ed3d54a597d65435141d7498ae671eb464c931ab3f7358dedaec121a8f1675022064056918af9f16f9f7c088736bb842cce8d8b5d653b9bb28cdcc45e58fe81747 30460221009aff780c078a71e0246232b2bca7c0135c7eaf7cc876fd6aac4563094e5c9e7d022100b09f47338d65f57e2a04bad641c5ebe2dc77ddea63448d56aafd738df496055c 3044022001edd5d6aaad5a8f2ab580a801aaba02807ad71012d45a29a0c2e5779c61055802207abea7ad35fee1a8bc6f3335523ea5e868bc006803c01f1c131baffbefc595f9 304402201013f5c7792966463c4609e7be6b55ba473e5a3e722d3f6797be5b316608e43f02206496b74f2ed41d22686d2586296dd0fd0ee156aeda418f023920331fd16be437 3046022100b7c131dda600f2e152722a62aa91fd3820a7aa5d16c44aec1d3c06387ec884af022100ca27c586d3626c8a271ddeba4ad4cb48ee80338a2e546fa39c0e473fc13dfcf8 3045022100845f6ab20fe77a42f46aec766ea05af8de38585cbe70e13b5700aadfc7d537d20220363867d62df3b22966581ba243100f79baa65477a7ea0835913011a94c3e231b 3045022100fa596ac2fad22302d114b30d2a043d8391a115fefc9b4743a0b1cc85f96d33ad022011f48018d52aa3442aa794388cd8b154a2e48f90da1e828a9104ce823cf28ab6 3045022100be8b5c1d40553313cde610d9e573eed283a71029f8c305e30e9e3c05ca2efbb4022043d6962e0156d36ec04a8a4a3be8043f5869a8c38b02828f7703185ef8f0687a 304402206ca4a478ff3396da0083d86edf263c7d1e498158f42804a2c38e0aff61594a5b02207aa14d0ed851fdc0da3bb62cfe58f8d72657fbcbe0e3562445a5aa65745e8370 304402203eb85b48e953b465c36404102c85ddcfccbf658bf829a65298362323ccce2cb902204a452c1902429c2604d19b2fa483469a477c2dd45ce9853ba81d78eb5790fe63 3046022100d534a02bb2f34a5f9301ecd39c85df9526f1554a6a79be92cb42b87555784ee6022100e5630886107848e93308bd72924d6996ca41d30ce662d8c4b61705c94f17fe87 3045022100c65752943ea6a9d1c3cfe66364bd0efdac27faf81632a566acc967c6f75d990a02205977c8cdfe9a1938d27dddff9d88cae5416c31c322843ac075dcaea10dc7c0f1 3045022100a8124fac30c701b74d768d92c5a45d032f724376e8b9f6327eb25f1ed982bb1002204af290f627cfe0ec80c866b8f8b0fb3de2d9e6ee5b402734be77cd2cfa6b0dbe 3045022100de1347e853931762cd15b1303f15df8598f6757b527020e075b21d308fd76af402202ffd04a7b5e34c48e52c26eecd3606ac0aa135d62263a9c2bed413d82e05e968 304502201df98802031f7257623dba81b64c35739493a136d47d8467dc42d84d3e848197022100c7dc8f9b755f527873847e5bc213cd03c472d060f8795f2636317f899bbacf9a 304502203ac350d0482e328855c79780a6592c3d2afc85395cc1540e13ececd41c3d9897022100892cbb3ffffb15e06b22df29d34d546a874795b6be7f98b564de431b042ae961 3046022100b24fc0d72ffe972a8025f20e199a8ee917a619e59882c20cceac6bc46e909de6022100bfe3ca13f90aa9c57385871e6b828bfd43bdf79305697ceb0d4fd84f7666503f 30450220242d942f87b142fce5797471289c0ba925a84287ed383c3a6a219de920557932022100b1e1e01e9a1697dc16bd4d4d4ef49e83dda166a5fa1380d2352bb42bb564a710 3045022100d7c9ee4eb83103e5cd2d81dd92bd618873fb6405ed9673b1cba4c6933140744902207f83fdf43d86719d81ea8d9338e2f7297147626d69b12f67bd617d60a785aa97 304502207cadf2742e6ecbb5bfcdd1d197c8d3f37c6bf5d6d0cb8556a23220f0dde54e13022100f0d0c2f10e17a3ca00a918090c4c7cbcddf47abc3bb3016908a90cab050e725b 3044022023b04f81d4427a050c1568fc9d71483aac6586e6b48f6a0c4b2c025dc1209693022071f1b37934808befb4f7fd08a395637aea16e0998c5aea2c554ca3cba4dc9220 3046022100e48ea32edcbebeab884fda7d17affb18110d00c73d9c918a412f2de3defb7d38022100d7d8ab33681a9db690b1a8fc9d522d6f57ae9fe51b5dabb833b5b5c5220af2ad 3045022100da8253dbcd3aa5192d9b9c8026a1077668107053605cfb46489028d37e2ad2c2022056b3c84c6788bdabd9548323c9caae42468f5528916576cf04b01d280396fc24 3046022100a6ee28819266238fc880baa18844c138b88ded142d43f69a73af77a7402dbcbd022100eb8cd56525430ba873ac34ff5f1f34646d9b19c3f6b1fcd6dcdff5492ccc7f87 3045022037a3020b120044ee673acfca3fbe8761ae7e1311ef8853bf9572d32d98cf0ac5022100857601d4a3f8d9d60dccd9ca51b22b529211f204803ccf9f581b4602fe29bce5 3046022100b46d296c869a24a10e7f573e64045b5139a10575413d0512a5b8782636b959ed022100b055c16f4280c153a828426635f341fbf0285a543ce3a016707543b8727ba1a0 3045022100a3cf3399956c84d9394b30b5b12ad1969cd4cc224966c151614accbb672ac8fb022020b09d90939e56ec7821e3a43b6176c44947a3fb6c23e26860a6591dc0835129 304402207b3d693f38890272b74adb4f422d27b19860c3a3584f92bba128b42f49f2b6e8022026c31d0674d28c02f04a70b55cd47cc1ccead5dc373f83d6231da3f2341b079c 30450220116da3cf5f7485ee9132e1b86feb979536c8245a64a2ff2136fad97121cc6cf3022100cac44d8777cfce9b373584d93eabf25d5d775aa16779edeb716b476a0dfb8770 3045022030ddcaaf7a5553274938e94b3a2a2431fe9340071f76aff832f1f28699cddbfd022100f5e30a13a4d1dc9a073901944860bae59502b7326c9f31890549404fb6d6ea0a 3045022012c3a9ebdc81f8c46ae421231c4fa649a9515feb9a694702a1b4797410ba2d90022100ef44c98f257d604c5aa9ae948988409b063152eacedb774ca512637a0aa90298 30440220203067c3dbaf57abfd235e9ca43bf3f888ad5f14f82bfe277ccc376e699a81f3022011394716632040e801bf49626eaa21d6b6fbee0bbe68e29d6e4b7dce33db2b4c 3045022100a8e51f99f8b9588877c4b40a801fe0dd31f130c318a4696f5c6014f71c8b0b3a022079ca2eef821a47684720ffba66e87834c075c22941df54d8a08ff4ea79e4ccb9 3045022100e2f46505a05e1fc513c476a90db8dcafcc353ff3d4df53d6c97708495fe521e702201e0b033a7e4cc3323826f58caecbdb861a538d2fea7563668b402a6602456582 3044022066bb8a90a66f7e019bbc09372762a200ee00572e8431aa0d7257863d58361dea02203a87e2e85cc8f331dca8d490a40d7c74e601c86dc3d80516109c573ffb51dbcb 3045022100c2b490833b1ac324f022ecf2cc7d80b7d39830fb9d1d33e8449fa11b7691358a02204a74ba14297a92c2349dffb22a9fa29d5a5c2571cd559f643380d45657b0f6cd 304502201ee581117f6f4ea249284837ddd1ff95b5d784b5ef308eb8f34bbb618357ea0d022100b5d6d4963775150e4d6b104d04858e3095b87f6e32c5bbcb2dc33ee82f4a862b 30460221009ed91042eb698433145fd56a1f86c5b6b39b3096a671687c38d752e114f13e17022100f3ee72ce88d7b32be7f6a9d2ed32684cc5581826ad9c43fcadff457819e49882 304502201dd813364c3078053a4e088c327db7620b758114d8d2998c02da3b22870d0a76022100d9780c3e331b8cd638c97cd53b4953f0b396c602bebfe1b16234a09d36d3e393 3045022100f5db6b2933da92797defcf57a89c3089ef79af0fe1ac76475030ba8cec1a67b2022071292bf5d4e8ec7b11b141f7a72d45db11c2324ed1e0b51278eeeba418139bdd 30450220233c4f3acb0669e80b448e8b1c65c5bf4fb1834ec9079a9b6dd6df5f1794e67d022100b83c899ba65f894f11a5e88bcb8647977278c8e1aa17398d242f7107c3af3937 304402203dd8bca1a48613f6cb12138ebbcab4fdba5ec3cd8dec50d01b26c38a586bc03b02205e0b66029a44e829909b260fe895d25fd50ad2a08223e6bba009f096a0240158 3045022072d72eee21fa9fc2dcd1c9cb5761645be29ebbdd6c28029145176a692cd8060d022100a363c62bc5548181e288368f5aa9d7b17ff16937b76869fddff5af69f30b20a7 304402205e56f1577934a1f4ca4687b3dbb9275b3a420272231ff3e69a223b05246aa3c902205872f3e830c567d51132f0a0b90a7e012e5babaf04cce1b6ac607dae475014c6 304502200912e650f9a2dbc93e40c8de8caff23198c9a8381b6472e1e996ae2c4ab032da022100bdf5e07870866e18f9a295770e3857dd50cd230c4be98112b831798d2b497022 3046022100956b582d9b5758de3436e720fbc7deff4a6d07996e31951487789c87da4a5454022100c77c38e44a84b61c43cf035eaf655b89d77d6520e80d4f4bc3da66b79f575f30 3045022100809644f939bae1761c97b47e744f626eec1977bf43a6f7da1af6a36734393d4302206fc9cc0803eb0751da4bf7b5cfd2c2869a80d5ca6e1b9def3fa4c70b0f9c6be7 304502210095be684b373a68d7bb30b2f990105e9cf355ee9fdd257711b5dd45bf0b0f7b8002201154df9aa2b7ea68ad7f7bb3e3f5e0f064347463aed119ac80833a5147925d00 3046022100dfcef8ff1f7a98670f371e36596d2980e75a676e5b6e9b1e558d6bf4659bb916022100e2f3db823269e030f7914c2489c53fc0c7662b09ae3752a6ebe22e8cafbdcb04 3044022036c7dd08993877954a719cc3a4cb16ce660b1c08e79f0341328a493c1de20eaf0220194a822dd8377b4102da953e6e1c2ea60514cf47e81154a3b1c09091f93c3509 3045022100ca6ec7f7c1560304e97d3e80387966190b9e9c82c1f754a80a8b3239498aa7fe0220150eed7d7fe63971d74c9557e17bc2028455eead4bad7f5b542f94cca145d368 30450220287584139c0f2531883f8b6f3eb31f2cc7e985606aa72eb332e5630304dba1240221009e50203e09af9750c3a93ad96434b3b88ef279bbce8fc3c0a9948649c28a5449 3046022100a009cf3aee1ff5e8b502e301ea2c28de7cd1d05186559095319751838b8b336c022100f0ab78d5716f53e0df4c4da48a1faf327831eccdcadf2f081050882ba0e02b99 3045022100dbb24a193eab0945c343baa334ff3be30c873130bd1fae07bac0414bce4a3fac022019d0a8758b28339cb254688b18838352c09d3de3ec8b6a71a862fe62254f40a7 30440220278c308b585bfe43cccf327507ca080e8c42fe25c8bda1ce03465b882e431ace02207a6f3f50726b3c4a2ae1a71f469d66df98ff078fc028f256741897c0d6923eca 304402203b5e77b11b08dc41a2ecb8ab1e75834f9674b5069a97aac8aa399db8e35466a4022017cc40e656196126bcb5f8c04a701ccec57468b53058314d3d71300bdfc0112b 304502210093810d3b8217c6a23765e087e26bb48c5c889aa050759c9ba3d29d3418e8b3ab022071dd7cbca4462a654cbe3f7f3ccbc0f2e45b8d5d67820623606f56c7954493e9 304402202e8681446fb67078dd62c76cc52706201525f90d0e6dbbb0a51b632a5e2986780220533a2b887b6caafaabc46871bf54e4d2c035b18f0b6913e39d4d6fe60409fa7c 3044022011ef5b4af38c3c75572c347daf95dd4b88d2e327c9096043dbdba3ed31176a2202203ae6c53a9541ca65a7fe98046df31e2fec68befccd645af4aef6957b1a661769 304502207f68c873d24375b1bc84846d41dab11a3d0dc1d82c246b92cefbd1471a5d8f460221009433f0999ca52098261cb250a5ae367bfff99037c44e1aca5b4aea0d18844e5c 3046022100b92161aaa68d73d03e0a04d95872c037ec0ff3975546958542b32aee13ce104f022100f6f75ec921cc681a9414e93de9d703dafa55e24ebb9a9ac261b9361f884633b2 3045022029a489ac9c044b9c93ca463c380222819cba550a35ce55d15f6924d594f339390221008c0131d8fe4d86f834649774b531ee5d0990b722be729e71593bab136c7c1ea1 304402201e85b48a362a1692eee6983d27149be6643d30b63c8b09255664168ae1b1186102200cf5b81f06cd61686f044d793a423c3b7518f7b7f68df86fbde2d6e06f75ff91 3046022100dbb87bec1a44e59d22a370d310dbcf4786d99d671a8abba416f70daa6a4cd112022100e2003ff237d10bd313babe6e8fed355467f6b5ae296350d37f4fc0ce13dbc15f 3044022040e1b62dba0f7e44bc025bdc49f391734df6045668b9d1c659b23aa4f724971502205acde8083a4b4db69869c88cd09754a719e0e40ce7c9e125b5ff56d137a8f65e 3046022100a9fc67d196885c5213f62e38e157b94e5b1e204297294cb692aa8f64515d6e34022100c80327383bce3c912934c18656e831591fbc2f3e2a6163294b7d5d1e00f42d98 3045022100845e71c81bf4fca02c94ed76e19820b0477377b055bd4fd2b3ee30bfe741e7d202205c4fbfee6c82c69de7ebb561775907ec21afde20e12b7337ce7e362e46965f3b 3045022100d3c453808f69f9e039eea6e5c1399c68ff2b4287e251a9d833d5cba70147f33202203668d2cb2d09d8185972923d09378a2f06b0ea5cd45a7aa4df0583d2344ca53a 3046022100c985a89ac1bf4de4a8aa8ebf6e06a1b49d1fb31700b670bce97a3ba5f2a2114c022100f618d635a53138dc954d4ee80cd770ec494eb420a938034cb21db0b349a5c005 3045022100a3731635adda35314619ae6f6ee115e23eef96232335350870e496b89e21bd6702207635600f936067fecf903c32dbb0abc36a11ae928fc7f42f62a8dd827b836b96 3046022100cd5489aaa64fa984c86148b223e94a489d750b62576c05f781261c12b2fcaf2202210081354ccae5ac91535a5636038f5ea0ff3722f485fad6de61bd5da302c1c8e156 304402206e2feeed28af615aeb47f1f339ee8017a5c213482fe5b92fd1d7990b59cd9f9502200ea1ec67fb250e4e08d09e7ee7b42deddcf47a2f41f1c762e71609d127740ecf 3046022100b5876929213e8301810094f4eccf6288274c2b30ea0d48eafb685e8e640a2b4b022100f572a7ce22d02bec9fe8b6f81ee81854bdae860569d5d093f9909487a7a12246 3046022100c94aac20d70fc69939de312a297217959b73cf94cdd5978e2d8527e6ed978de2022100e2f8a0bb5845fff7e625c78c09b051b9acb6bfe519c762a587a60f0474ec7d05 3046022100e179b24f8163cc9253ebeda502d17fe490224c48ca33298bf8332a5faa162c400221008c0a14e9c65e11342eee69b79e82d319de0c7f8effd4a8cc7f532566e3b73320 304502206e14ae7f97ca0f63cfa426ff1b5cf406cdeb3f304c1f20c18a221cdc2d1e8dcf022100fbb6f7976e434c69ebf5cde0922ecf3f428a14a4e527835b479e5ac685ec4c86 3046022100ccbf458b09389ff37ab0d7d35a2ffc8e8e54e76cda61b7f6a8175ca4fe759e390221008b32a4cc7faccefa119646491263728abc25336801bfdab4e171b3555659a8e5 304502202688c1086804dcfcb2a2ef13bdf7d855c6697ffdb0582c1f6a4891a3f8a76c8f022100c22ed4071d77a6818b394798a44cad7d791d85dfdd6709a357451e7ff3896387 304402205b2c6637b559d63b501f176b8b821879bde8a6500da93729167e2c65b820a95302204a1e6ae597645f77fd72caa6587f0845e0a04be2de6151962db55374637c2ae6 3044022017fec47fc07514291ff0e38bcdf1f6e85d4f0a81543a2618bef733821c6c6df802203e59306d2e25e5a901e3bcebd7e7de439d8a5f2b5808b347ee204b27877c581f 3045022100d08d0f5150b04a81743b12a97b71a6432749a510af5a6a6bc293a06539326bb102207301fab01444ea848c6f4f89f307be53f17007b04be2fef9d5ba0d6ddcb897be 304402203731b9d9aa8ecacdd3e78d55d523a68973e9675faae0b324a177778a5d79b42e022049516e85125f2daf6e1e340bb43f80406d59cca9efba839eec7730ad8bef8e20 30460221009a90e4685c33d3ec5c7d55f5d03d69807748a8e87b6b19035fc9a8679b757ab6022100b3a8eb77c159161619a70f09123c154609ef3a164ba25aaa00b07b353ee2b6fd 304402201a63b2f91884dab25c7ccfc7cd23604d89923314ba6963f20d1160aba9d3bfb30220116e53478c3fe82152595d5e7c888f47d85a99fc6278030d4bfe306d30ec7734 3046022100a4608bd07263bd672084643705b618ca3b15ede470c255c870a654002591aca5022100811e6b963d3740caec13ae6f1f9bd2899c3a7c977a039af36dff819da6422111 304502210090801394c27317cbf2546ba0ed07d87594f6110a37d768ca9b5d9ae91ff4d3a802201675c13aa8f9b2cacf558e2ebc95e0a6892f84228b33e44a8d93109dc89083cc 3044022056f65714be2e0cc17257dac5c7c10b2d22c7c3b9948c35e7f91b14b0fe4c095b02205711c972edd418dcf789057837191d629b580440944f8da9ede0b974d3613623 3046022100afedfa8850e5caa95057d0bfc4ef8214bb4d7d72ce592d6d455d4c999e63e41f02210093ca55f87780b7208951f0b23b2cf4957bc8164a2b07e18542d6b75ee2854b11 304402204a21d5e35958085ec93bcc94cb0bc28ff6444bfbe9313cc734449ceceb4708210220527ebd9705847a242de73096a7e1071b970653295ed6f06847e3cd1d8ffd0301 3045022100884767e92e9ea1a8fcd149022471c6f02bacff32b023e4a7f3a274a6f95edb730220281e9ea95ff132c0f6d8dfb904eb0f98f9529de264dfab619f37eaf32afd5f09 30450220595ce7b122f2274e966b4e854f09d21f2f16d09cf7b741efce23d8cd88a65f71022100a1d671da47e81a7e307d9ef30c6afb57d1d63ea817c21691aa757acac5264629 304402203b5eb4fdc8754c212ff99b75cbca3361e47fff7cdbb73779cade6e6590d675be022046a1a9d31da57fa50dac848d4891f5a16e212a0cf5e3a7cd2eee5ff0e71592d9 3044022004d7eb8b81ba86972c7b853d6bf57889e8bd6ba35a72f259fa8b1f6db9433ae0022013ceb389814b4e8c431a31e5915a9f4c217a3b533b63cd5187083bc599bec7b4 304502206c4a7246ce162063f0e495726059843ff18c9489ba2d93508a8b5ada4f88ad7a022100b29f5f013de4903d8ff6ca1d99c697de6ea0ffa465d18bc80de6ca29ad427a09 3046022100a4ac8346ab9989a50dab0ebe0425314615f388374309fbc29d2fdc24a377d550022100b8ad7f6073fbaf7b0aaabd0a1b6fbd1331c4e6a055716388fc20e92b903b856a 304502207a5b2ba7d6268745086fade9e48d3461f852d516153415c49ae97576c18d3479022100b07a9db7c946617341ad4805cf7ccc47f273372560f213fdf265116fe4356965 3045022100bb84417bfc4bfbaa3c78480336f4b7d562d80b337c57cc529e58f01c55d1c4290220267230d373d35f6097d060dcde37be47e0a94bf4b7d723f4fb0fe781a12879cd 3046022100aee60ee8b239b866009b6a368a7fa7edb19f01cff6474000d470a442bd761ea60221008813ac994f9ef53a9cb1ef5bf8d0b2bf778ec9d41268b8c3499e57a53d2939bc 3046022100822a719f102d77d5b03f993b52c2e29d1f634eed6fabb8c0cefbf91a9a64857b022100988c1d047574d73087b6ca5f0217f437d3c98a2da27c681cbe96364fd52d3de4 3045022100ba57b54a3092538dadb718ca0b29d0756488dd69f676ceed9f346cdf05d3e07202200d79eba106af3d8691100e388e962b23b3d29f4f8eb77ddf817d3df76a949d42 3044022076ca085e1e80089d358ab8b6f8067e138acfbc7517cf452392bf39e94c7548b1022012c3bd4017d251a4ac9a6cc5572748451eb9ca961b37133310e9851bcf553ea0 3045022064cb4fa86046c0423f10066dfdefcffa671763dd5fc75f4cabcbc295c65c90100221009cd25b114fa0d20889db5c4999596532b1ab2ed6acbb7efd7173201fdbbd345e 3046022100e28e432bec14c5f5446b867ed9bedd3c5c4a5a09bc63b9093df95b3630ccb7ee022100f6121dbaf9a64877d1df8d74e3a7245f13dd16ba3f4123827ba4f0a19d94e8e9 304402200e9e553e570e38f64e094aca5fc0350a1223572670d4e41c84fe924ef198e18f0220307fc0033c43a4b616c8bc2b67f24bd0139b221213324a417eb3bd4637b6d8c7 30450220270ebbd520aea4ac3af82f8e27362e53c49695c1f2fe2796b249c934b0fa1d4d022100b8d4b6856f2cf44033f0d6c5d5ab7d81463b4505289b9c2d6a8c23868c049d92 304502207f16d83b45089859309be0a9f8f7395fe40a6654535d169a9910fbcdfebcd9bb0221008054c77515e11d5f5c52598a1415fb1646b4f340b933af5c250c9a5f06a32f04 3045022100bbfc879bd7595049533d3a090e6a691bd35e378d637ea058d0ceeb63a4a3c7df02201f873dfd00b75aa3935233846db22241220ca9e3123531991b0a8aa1508a1ff1 3045022100848ac909b3d9afc3c77eb2e3d61206b6deb19da9efc0b7566cb0214164ba8d3302206d0c20061bb7edadfa2a0fc2f52e77be87a3f3410d4a1c9ec390cca9ee7a16f3 3044022060f7cb23e8af77f4cc07250ddc3b15813a8777eb34ebb50405115aadf95ae09802203f5727e7d08e2b32e65da03d84108ba209f69545e6f5bc0272505b3d2e7102c8 3045022100ee57bb699930a701e40984347f183abd442efe57c8b3b2a8840f0c70de291075022000d1ea3c2f3574fc970bf979c407411e99d13c403067eb812ed7683593081050 3045022100d713ac66843c849d5c1c43f817578f75add2f5320acf59c52818e8e9ed39019b022010a562392281fffec6c2e24e3c802f8b89b3171429fdf15035840a8dd7adfb6b 3045022025b455274647716c800e454db93212955240cc1e4dd4609b724223c3987d7c1902210098a0cb854f0462cf830a783fc10ef00f35a9910725b01b14313f8fe0f4d024fd 3046022100fe0a8e0f02e9a03befb332ca8bea17b3e0f2fa627967f6fa33f9d87cc32e37d4022100a7afcc9f5feab56fbaf7a81255543fbb9d5e30e28c4fcb4b5679028ad75d1faa 3046022100c577069239bacfe0567430a2598372d1980ab663247dcb6aa68a7296ba50adc5022100f280ede0f4e9bfd81bf5ab18a53c262a3895cb32a5fa85f1c4a5ee86e3814d66 304502203aa622bd2c02555889397ccc8889584783648611479881ada492e9b8de49e648022100f1ee8cecd6231705a5729ede9203022a92f08dcae6e866b05933e5687f5524da 304402202442b360800fab6bbba9785cd083e2e8d809e54253a0ea9d65b196538eb2beca02204e4ac67c79c23a8444266729441bd92ba2d1840507c8c0a6adc0f885aa3e7f1b 3045022100881173e90e8537e0ed82172bac9e49c4b5a356bfad841f4e3f506ede6918f69002206653778a47ee970d2aa58ded58a2b60d8f755b49a2ac1e6ed8a411625bf78466 3045022100d0b1d3de91f78a1ba806511201eb32626f6b67d73d97a76f622aeddfcbf96e8202203d93d10e9147c690889c560f6687966ad0774a6c64734a13f46867f86f8b8117 304502206edafd14a979a4803ec5d7452b82d763230ca284aa784074b9227b080f2e2606022100e15020f9a8ee0a0fddb34bbda2115bf2611d3956fdf6e8c5d294da8c0c3cb21e 3046022100ffa20db8bbbb245b2efe6931230f31f9738cfb31020cbbbde14791b96182a000022100b0083f04451603f5c8dde3f0303973cda51dd63bd5fe2a9db770d6d397ac985b 3045022054bcc5a4ac200dde5d98ebc37132a296282fe5425b53f36dd5795f2bac11949902210085c1f4ba50937c7532360e6566c1eb16f86145f1599bb0a5b26f18c8df44dcf8 3044022062e123e529f5ed92bf28f204abc6efa77dbc1dcc05460111d94c524d4d1da5b3022031193adf113ef7cb219488dd1c5a6b7482bbbecdff81b597e9897fb04bdc2a37 3046022100a4edac45114ae64e4cc150d9699f8f36531de7b228ba1b3162bbbe024913aa9a022100cd9b50f36fad62325e9239c0f6ee63c034131e2e63f85493c1787183809d80dd 3046022100e8c2b0b31561b43dc907b9f7017ed656e367f25bc08a409adb862456eca49d3c022100f1b4562ddaa379ec574ac56e67990e3317946ad2eac5d683f107f66eeb55eb6e 3045022029fe2f7d06f060c9d2168c3c8ae33f348852ba0fa74490f2f8feea92efbe74820221009b3c85058fc26aeff383def047c9112152961783641fb6557136abbaf49be87d 30450220065102afd24610dab7528cf99f3277d03f48332bf692b127eab19cd4a9c6940d022100bf6ac6d08841a5c9f98dacd623aebbe3acbffb6917c9621deafb2e49faeb3a29 3046022100964e30340522317f7dc4a779af6976824fb91e21e70f427f4df25284845f2c22022100d88187a7fe3572e44740c50c76b20d53896bd4a0977d7b095c5b82ea07c5b62a 304402201e25c7c9cd67035fc846623449d676f6e9892ea24cec6e300bd6557c9d11195c022026a45bda24264ae64eae539e977f5152e9b663b9920da5274af407081555bd77 30450220750da26045e0eb1a1c5b69db89ee05ce6d2899304368bc357b80669742684154022100e79e349cb5320d34b0caebcbe25517c380d4c12d5bedb90b2bb089ad49559373 304502206b041607e9ea39efc933aac64d51b5ae80e9e9d2f529721c36d66701f6b9bc9c022100caf461e2a230e54911aa2cdce4b7d25db2a7733374eb271a67d58a4ec6ad0551 3046022100bb7e714a8a9cd259ed4888faaf313c33b3b03309b09971a68695f0ea30dea419022100ca9127e5595a6e6147958094728f127900b20542b42acec62610241ced6d341c 304502201070f4c9acc228bdbdd0e252d3eeadc8e3af3c7a9ff5d4900f3b9f34bf43f6de022100c06d76d66ec769ef1ff6af7e9993bdb2f87de59d857b746e0f148c0eb194da6d 304502207d78a40445930e3176bb24c7eaeb2b753589cb8e3ead31318f986b517390c52b022100e933645f5e8bb27fa44830c2c79e49786f8336b023e34dd3c83a48ee695c5d73 30450221008fe121d1115501f8cad9de080bdf0a94f8d1cd26b961b732098a3f551a6b25fb02205456c0824eba5d041ba24357661f23e51f5d787717c1239c76ad65a6149deeae 30440220399b528662e695a762e553dc4dfb9c2860e15995b2f28afd2d1e461d5ed39b1b02202a53acd70e44cad1b3d72431a58a07e1d6d3ac929f224686bd5f1ddb15dca925 3045022100d5331114301c254d379e846fe57ee0c89fdcf633dae01628bb36dce04d70b9bb022062d0a0850121e17185d57aff6242f6155f0cf5915406ce0ff789133375480801 3046022100a765d3b95dee98b4547cc3391b7f729a3a1e8e1637235893b45ff1d3c43fa90102210080c7569d0489259495bb9ddd8bd69b2bb92068e1569ccc17cf292b220a46d2d5 304402203995fa5678bf4c23cc3640fb8ba9ae465f235e6eeb652e3c6bc3b7a92b981eb1022046f8bb180133a4695edbc3a54ed0ec0c74dec6259cc25d3f03dca81a99e62423 304502210095f89bc2aaeac473212f1eb9b143ed1c006233b2b8fb93e1af6aca16fffec744022044c710682aba302f5a67804123c81d584197f66cef0a497a8268e7252821c551 304502204feab0222adc0b82ce0c44b43ea73856d5c457621aa7ca00165c01e1f3829f6a022100bf014535d36430c5242655ead4d2852e5ddb9571d70de3ac0d334cac41ca6dd6 30460221008b33a196897c1c38e2620363c76567dc22cd085373001a5ffabba9d52cb17a26022100924cc31960f94f62def0f9d34512b4ee054910fc5699afe9ce3711e49f0926b7 304502207d71fb6a8c2f93d072b3b0bb33422a566e55586949545a00a6df45bf441656e1022100aa3f931e0901f75b91f12ebc1f0b4c8effe69ae3cf3d4a041a291b8c4b8dbac9 30460221008adb833169fe18b13290dbf3c65914e45b6427c721b84497d50c5e2369ca6c50022100c1f05cbb0fed6133a5f7b2f22b79146ccdf2e198d995f8b438ca5ce93bf80287 3045022100aaf2625f17059ff539650af4a511a791c5997b1a95210807ad4df22e3b2cd84b0220640a4db1df8eb96bbf658ed731f7aa34bc1a900ed428bfa46ca0ff1fae885eaf 30460221009de20465420aeb0f1d6744ab45a3328030b34a7b45ea4dd2561ee4819e14ce9e022100f87a40927cac43e6a1991214ac117dc0e5c854ca2b225f938b01f24b30bff1ab 3045022100bba1bd4d7ef055892a91a2e1001992551ed3e373e893256f3fbd22b6c7030b6a02204a71fab530f0502ee44dd2813918f3f5efe80c5fe6f3f9f6ed3d228d42c22f95 3046022100dca88e72da88c466d430deee199c159ee21692b842d61c989dde92ffcaaa2582022100c59ce684ce202ea1ae0b16c17d190c590f28c1fdb3f0b273d7b37ba3ddd62962 3045022100a0c96ec7348e2ececdb5d2dae933fed82cdd117f7e8163375b2f5ef5490d6f8e02203e3b55cd45d2c904e29052191826ee34eca7a7175ac2a9b3921643574b6589af 3045022100cff7155a8c0f11149e5990ce68618ea7ef9c8fcd68161d1d4ee2c0e7c059eb0c022068d571237f9ad2acb5ce4b3d4d2b6864eed9fbb4f6eff76fd0b02c148814faec 3046022100c9d1432e24e5a6ce7134ac6d5631f73b13526f3daaaf2bf532dae494a3f18014022100c2d497859a52b0440b31fe9b3f5bc25d8bd72798fa4ac826e2719ddba46f9134 3045022025e2af11ad290b995cc8c3404ff9d9e761baba65bc727c91a6de9ac5939018f3022100d5a00004ac8e7f17d48452755e71af8cbaabe04e8c20e2021c4da03d51650de3 3045022067bb427a3b7757e09e6051c8751f886ffce3f73447b01b8a0c6c141fb7ffa686022100bf34cde83f67d8c3fe365a918ac640727e51e78602c69b904bf0ca4d9c9e5e8e 3045022049709e832f28b6a8f9994acbf940f89dd64ad7daa4b3d6fc6373a658cdbd66ba022100e9171fbc0cd8f1ba38e15169c4e9fecece611d4db18b474cef6005084f265083 304402204de43463964ad5852b3f299e081098a7d6ef5229ec5fb4eb5e376964bd0240c0022031d3c44936be6b64eac2e04be082f6c7e9b0120b2cfcba842398a2853f54fcf9 3045022100fa8b8f322b80caa9b8d878aeab62fa4e62a30b50d38c8dc09202a0717426706c02206696f6ec89b947335a13af969eb66c4f51fae28c2e929ca5826f8294c20a69ac 3046022100d93e9f550cd3ae3d2af69ab5f571f5b3b4cd3446e12871cd1c9c89c93dc3f1ed022100e0bd3779d0f36f1f7d0e06110e07e8c3c42023eada8ec72561a29b2a20ad3356 3045022061ee6f0869cb2e90bfaa7136afabd1065de906907a02d9b4a7c95c1db94b0f7e0221008f69cd4e8770bc38eb144a1daeb6f215c0fe19364f75503ea3da513b44c40481 3046022100b6efac30a206fc1b7af02b1ca12420fe3d7b2642d78208828c3bf1b7ef2b474e022100d586c2804853fcd126161539cce033675e6be80a4a677faffe74ea4ef4ce2dbb 304502206f2929e897e3ffca3739bfd295d45f10bde5a65f92f587eca3ad05f29d27ed31022100971fcba01f6fa61f8ebf5fde0864b0c287b82661164f72381cde32c25dd33a90 3046022100c21e73022a49e9f51442765e4836a8c7b048ccb949b183e59fa534180ae29ae5022100eb6fdf3286bcb19aa132048057420873366dfbf92a13bfa8526bda25984c7795 3046022100c1ec7bd1ba94f779e09f1afa9685fe47b99d8cb410ca0c9131f4a26a94992d47022100fad91bd78b63e51a076d535c73dfb60c307776020dec215942d1f96d5480f059 30450220160fb28b72021fbe0db70ea1e37b7ddf1254d3fa294ab2e3790aacc3617d4cb60221009c1eeb149a55488c74633956a2d5d19f9e2e676f31609e7d970f5cf40c4c3f5b 304402207a18f302af2df0e4c3be78fde61ff1cbc7167034fbb307a3f1d9de964fa1bf0702207f85e35c1ca5769f1f92f55886820a24cb70ce6630be1be8af565d722944fa7a 30450220166ff2e9d1f9865868e9a76363687723a693ac8ed5efee64b39b417c426477d80221009dc0d46bdad398b33aa3d5f71ea9543cbc6f3447b45741438e2ea4fcef131448 304402204941acd0cc1615cc69aed8db2878beac3538980e0cbe2c4b6f008be5a721f29602201445fde17d56f731b534daf9bd122bef22b5ce6bb4b93644679aaee1a0096767 304502204f22ef7000a718037d976f5b09b8cfb164a82077720621c6eafa00f81e5e2f82022100f6364ee4a7827d2b7f9c5d5b1d415f034ddf3d51dfe15343fe3cc42ffd0c38de 3046022100c563f43d5fd13ad8532c67b9602add42384983e4b76118c57d049dcd31809090022100bd9e139b4b8d48b9a6da11ea9360f067bd35f76e32b7cfcc5acb1a5881395f75 3046022100d40596c2968488bb1c2e63e92bcc08474465787556d93f3bf9b5cfa5a1d46e430221009078f3fc23bcad4b7f49929a38189f854e2bb6e3b140d02cab805f68300fd7b6 3046022100a9f3798557b0276c53b1f97cdc0a2b5a508dc1e508c8fe4c928acc0ffbae2fe6022100896bdc3955b4fdc33ba79367f57a4c7d660bede3a9a81da7dd4980cd7721c911 304402205c281d71026dae235a3a4e0f8159225773b7cab8007f9cb31aa7919c0346263b0220632ae96978407ea78ceb14dea3544496781c77d7dc2be36d10afa9c21b8e784a 3046022100ba7e12c0cba841df31e06de94726a08ce3d9977617bdef993ddbd7862c1c1ba8022100c789c647b24465da510a75c475f8b157d067ea6b84dbe545fcc1b0d6d78dcf54 3045022100c846232b418ff21723014d269391d9254789e5e13562e83700b761b6608285810220145d7da3715fe3eb7c4bc0aba3d1d84ae646ff314281791d070e6e8881e08ed1 3045022100f231b133e94185a17431f4a37c35b81790a01f65d6e30b140f11a9991b8fa90c022016f1684fa1c849422c15fc83d6b83d5435acf8cbabbdce1f2bc09e83b063a992 30440220495f6971811464c195c396da0d090703cd153b5f5574f4f7d4c3311b201144c3022065e53d67c96258c18cd2e83bda670f4c6d9b27a92208a10bdbe3664d658f120a 304502204656f61e9bf965a26fac4d3a53890c7a0858c17384eb7dd18d751ab485729c98022100e6799d516982934973a92c1d1aca8b15b67e640fa220e6bb63699c2ef482ef57 304402207165552ba7c2ccb9df8bcbd01917a4ec79040194e3badd9d5d8a13149bac301702204e9a2443e2fc77aa7c12a10e5de34b61b807371ce51dba6a84b82832ea82cb1d 304502210099596d6168b9e96457ef1b29fd2e49bb644590674413a34abfe1b65c5b2a2b6a02202549f06abcf7baf63222ed4588174e7a88fadbf8f911d243544ce286d06e4e8c 304402207782cff258c592ec2765451822f4135fd0f46958ef421a5fe4c4e74556b120ef02206586a3b5941ace4fa8a67ff1c20c960ede07725490f84a3c33ae7f4b64791611 304402207b1486b1cd870c3d1ca626e2cd3635984037b6a1452cf2c6c352b94b2762d14c022000865d2c59b65ed19babb31a8097a63043538b1c8562dbeaf7dd0148cd18d119 3045022100d78bf9e19b702b3b348bef85115890cbb88bae83845847b72682987d0d2e48a202202b157fbb11a728a8559ca8745e5d38ad75d0bb4a8a411bd0686759ed880283d4 3045022100cf829a0d4c93907b3f692612edd761ca7f61ac0443d17116388635ff5f7417c2022047e458ac9c6db8a6885b4a0574fe9333534a3ebd67d64bad0ba684ef67edbbc4 3045022100d390194bb79c02ef6814c1ed4104eb410ea775a08ff349c4867bb9b4500c336b02206f0272069f3904fa51dfc605bb3d4a4f961006b780d788ab3fc820bacb3942d7 30440220311092d44dbfb1307fda54b742f684828b746b01c323c3547cebde92f6c67603022032cc812b6cb7fdd3299c281606e968992f5a98f6eb62c4eb45c06714a508d730 30460221009c2d46432b3bffcbd012cdf64fdc9191071d2441524097926919170babea1139022100dd23042dd6383bddea94fd458b7153bd17067d7510538f8f1e7f38390c5bc83f 3045022100a0e1dac7c10b0a5c476e181100a7f1366de0d6280393a68adde3fc796ffc7040022076f98fbc83708b1069092bd8c7ad4948a3eae2b2a32b38521bb4def0bec83315 304402205be12966a0ecd3703b1a03a3ff0bbc2757679fd77d7b454179d70dedeb9af7740220536827bd46b247f53865ac06674850e152ffcd87ad56c455381b230835ae43a7 304502210088d9eddc028503d9a85a94ea6301720ba684f5add2ba62b59caee5fb902511b0022038acdac3da332220cd5c916317155684808be5eeb0dd8fc7da66674a257ffcdf 304502205f0a4084005a890cc5a1001049323686b3763751da6397eb061f084ecf0a7c520221009fccb1e8c0479ad6772bac65db04a109a6a75dd091a2be2f9c617c743ed4de44 3045022100a806f63dd29643c84c35feb5c7ebd2d7267d7c8074191eb23e1406ee5310952a022057e40b0dc0443db7af6de32f0fba2a1f957190d67b8b9146d338ef44c8fc2dcd 3044022043c8153318e76dd20595a50b9d288ea8b0702932c08db068956c7093618d71e7022046ed46d2ceb394347a5acd2102dba1b6f7241e4a88f8ee4b23feb9238fdcf917 30460221008f22c5c82eeb4a116c0c0866ddd355c164b36a1e215a69157655d5a598ef0fac022100f5eae5b6c2aaacd7205e63ba39439aba4243026512b611076e06a59449006cb6 30460221008cbe4fc68bf4ad7be4e75018302db97056b2f2710c00e7cd33c6d7abcc5f0d2a022100e9946839564b6e1ce6bf990e7128f370dc700e3319998110fca481673ae13c2b 304402202f6c92fd481c5f29219076e5b72c415b60f49388495127f973d010f9438771d70220551c38c21aee20e2d1cc81c3e942cda3bc4151ca2695b8959918172cac1e0923 3045022038ca596430e43343e71e485f8a7f97e63cce93bebbdbe0f6edc42f7dd6613950022100f5a9e254f593787562b5540145b75b10276940e9cb63231fdb558c585e67bf87 3046022100a533a0484e155839eee7dc5cd642983c7013752253c3511b1f8fa100af50c058022100b3928e36dc51e30fc4a9c0f377af729817b0e2eac6821cfc67a7c578c2aaec74 3046022100ad41d1ddda7368263f190f3f3d987e562fbe34df38cbae24fddcae292132532e022100d292db670844474d9a42b20e52c7984aa49050e525f383865dec671f1162f9b4 304502206562cdbd3ff810ca2dd93f0763c021534a8048af5b0fb4ccdf09dfd1a01ec32f022100d63e6513c7432e1c0ccaffed9fcea7ce1c87079aa934716cbe962325dba9544c 3045022063550d1992bd6bb6a94b169ef4e4e43626a16f4241578f16d94fbca7d557005e022100a7a821c48681abcfc2a174033c6142a91b70a4016afc650c346bb592c2623294 304402203024b111b835fc93a455bc2b99bfd4abb8631978ad0ed2c7a8e90244b0fcd3fc02206364a47375925e0440a1e996686b0848e889651faadfc59a376a09b5a8a0ebac 304502201619d3e27cbad4d3c579fb390f9fab16241dd2d31fd89f841181eda2efecb0e3022100b95c948efc860ee6c528f369a355b2aeeec44718df8e9e1e5c9904cce23ad22b 3046022100ea80595b457100af70e8536e8e019349c91de7a1a06db6014b914e7ee36d7e340221009aa13af1fa52654c842518de2bf4a909954522645eae84544503426005598bc9 304502210093d7ca5fafe20bc50720f85a8b936ceeafea494c73ced25768125d03d0d8b6aa022009539894e2fd3fddeee694c45a28f29c5eeab6fab1168c3848768ac64f28f4fe 3046022100df4044f670f87589a8b3265c521dc83574c774bb6d421b403b89b2dc8cb48c8d0221009443fd6778089c4476d6a7b4d0c3e55fd65e546efd6951fbb71ef0205ae6662a 3046022100f28fe098a273dca5081ef02f68ce16cf1de56c628de83f51d372ed6092c1ddb5022100b47aaa650ba865903d9cc370b90966ff3e858895ec01ebc77f590ba9ad65c360 3044022068000e9da4c414440debd41061acb2bda581df789ec35f125fe178baebf2856102207fff316b4143c5b6b2eb60a2c756293a06bcf10a906b8879b16e0c7849e9e386 304502200d952ab3a41795fe8786fc8b4cc410fb77cfbae2d2f7dd73b2f0f140b26f2477022100d12ca51311d753c2f68a476d8a04db0d5e9a647189c3b17b3571fac682505a1e 3045022100c1a450aabbb576693fc30a7e012712b0daeb0ab15a330f8c5f097a162344aa13022007f0da96f77fef3b7e8b52776abb5406685a7496f22b7936461ba91766865e15 304502206abc0cd8b5b5ec550d7471fe163878c7596315c3cf5d6c99337475a1e01a4b6f022100ee1ca7aabcd6487653dce76373c696331a1a020ebdf29a08e9e8a67fe255888b 3046022100a92222d732ccdb82b484b66a2a5847b0e896f778adc56e946aa09d830f8d05ce022100bc5d2fcfd2637d3cf352c53cf4d2c7e5c957d233f535bbbfa2dad6c2c9b26514 3045022100f25f9b0e57da834d068ba657ce6c47b4cb79fd7123a096a35c49600a02b1b55f022004f82eadce85936f013b53ac234755ddb07da925623b470b6658dd18ba67f036 3046022100cecba9e78c410d7e0f2b2ddf218d5d73d146bec72bfa2f46c4d1fed2456ece22022100b96229b38d61f17c3eec765c8801a15b55ea29bbe067ab3ee9c56265b7c3d3be 304502207e3035c53b33f59162f3875024473264498ca1b411d31eaffa5940dcb6ff1c5a022100aab2e3ff5ffd9f7c7548f8d71866dd8478a42c669c7f61bc24d81f44bb537a78 3045022035da1cb311fe66f35c8b25c7a16d36b085cf60c7fe52f7364726b86d803733d9022100cb58007115a2906da8504b333fda0237e02dbf7243bd9353d31feb79e4cd1d02 3046022100f62c6ca48bbd2d4900ff9880abae55649aa5972319e9c1cf5721ede1a9a8218c0221009ff2faf749210235bc23cc0418cbb2ea0b4648d880fd5df02da8e11d05ebbf09 304402203498233f43b3e31bc4a3034122f68444387a6b31f3dcf67e3177412d04df6d98022005a5505f6b778d2dca8bcd292fd24a60dcc7c59cdf24bb8fc3a92c95020c4e12 30450220410f8c7f0a3a18cb2d494e5fe13adcb3c84ae74d5555e3a5509429c169b6e30d022100bf49d007a6972fbf967d4e6439e94035e29297904cfc0a169767e3aa88f6ea51 30440220119a8c133930f9c9d2d4f412f75bb0fcf7ddd783f676af80c135fa8526461f9902205acb0b34d8f5c9a70508255d32abdc39859d4bdfda12bd575dfd436f8e29a40f 304502210091461883afa42ba4721c6eaacfac2de340671b01422cd0103f45c9236a6f538602206297626f7137a76e8dc832821c70de2e67be95fdb4ef86e98b2424bc87f494e6 3045022100b9af6b01862d7c752d5bc3c8f7e2a8a448bf6e4cf0ebea15f66c49eee1942ff302202cd1fc5a04c0f88429e342383c66a3238afbbef401307f3e26fc9f38b2c34931 304602210083bf081cada0f16018c6047d8bb03cc70abc2ce397d025dc47eb2d00d64b32d202210084ed8fc229916d24abcf52acc030d96fd4bf5ef526c657911a746f2550113e7c 3045022100d760f4ce597917a2a8871d6adb68bcf91da56b2728db46e2bbd322f394ea0c5b022071cc9ec2d7170d9837209ecb02349d19ccdc16446fa516425c8da639dcb44a72 3045022100836708363d5139c9b7f79aaf2b3f5919b63e22e634f54367d1a709a1d42f7c9d022006aaa4ad925044d88fd200148b7057d452984f9259efc6895bb8140767c5f4c1 304402202469d186b8d3c97e834dcbb7f7edaf7c60a7e9bfcd5e6e083c87c74923827d470220545bb6bbfe2639cc0d82d98a65b88c4c3a988b25a8513ae9f530158405cf0c74 30450220747dc060488d895e1fd7341b854b4a27557a1c73b08235c8ea274d83ebeb1f3a022100b5f52e5485f3fd2774081e28f17febc6b6b9f45a7390fabfea679e035dc8fb8f 304402201092b14a24b6e918c1bc08f3584edba15996803f7f9fbc15eba7b748d49545c002207cea34d5ae2cc95bee780c7cc9f856f475a2717199b311306b244fdae123d854 3046022100c91242f21eb8ed7f982a7b28d11d64e0eb635bd552440e46429edca0b46afe15022100c5bcc5d70de84819e8d199cad4f37c6938e1dca3ce9d2a90e46b60a8addd928d 304602210086cb3355914f0fbec895767b4c7e76ae33dac8451f37752434f90ae2897eabce0221009b8c9dd041bf1f87686512828c8c59a15e12b31656115a86292bad4b5c85bdc7 304402206bb6bd8a838dff95ed91a5964075205820bf34b00df3d0c4e3f9fcbd1938f2690220552f6cc33a1c27fde4c5ad73694f8b848d720c880429326f861e4c4566f69784 3046022100a79a8605049ab99bb8ec797fdddd82cb645778b0619d51da33a2c87b4b44d5fc022100e77f4a578e7f4155960fac54b67954f9d77b61c5d6a804978615b883e7816ef8 30450221009fc5e317e061d5abe12d4065fa0312fcf791dc44e5ff95063b3830368717e50f0220670e901484019160b6c8bde818fcb4b8d4619f599043d30b502d9b5357a6c7b8 30450220529769612c3ec7fa6fa872deefbb3562cca62bde238b95648ce8af50116e66f0022100bdf6925c32834f8024b6ea91dd74ced5fb0bdfb10d9231780056c820aa656198 3045022100f2690a0f001d8b425ab6210ce9f887cad896128b66f48bc8274f46f1dbf1cc77022041db301273652ed94880c05c177be9fe74defdb3bb9dafdbc895dd6c71303242 3045022100928da51686612adcb8b63164d9ece0f34e77de8051f5da0cdbdeb56b54d39968022038506a53195c6b3733f0bba4cd04d419106784a6370515e7f935289163487b23 3046022100e80b345364d665377c70d031de02c1a1063cd7c74cbbc333f6797f36841c7dcb022100b24ad79ec16a8d9fe46384a39b12fc399da274b3bf0b6b5205ec2de37e8eb25a 30450221009960630339a8a8af9b4cc3cd1042f8408a8026e21abcf04a7abd4d34a6bf3f0d02206c07b871d1b11ea9240737a65d5c9c0f3cfe8b271288083c44db20e7dc442c5f 304402201f480e80e7ff3e702425d99a4ce581a37f9e55e1578691f4c8a39460f5acf33e022005e2852f478bf3e4c83d0b17e83ec87e87c94eec7c82674eacb45a6ff3276739 3045022100b99ebcc91860e2d4d32a8235fb2cd0a603e9a6caf3835d4942175b36f66f5fff022038d38255086eb494ef4a66cf045ca0e3ff05f3abd2c133d89b73dc01a4dc864b 30450220073da156fc067cf6d8fd6c088753cc34dd230862e959e615a590e7ac597cec0c022100c8939ace96f89b5880f74b0747fb0a07c22bc53fa1d1780fa6cac4738a9acd6f 3045022100c05e15eb7fe4017ea398145643fd359bf03100033c96970503f8469333d5803602206df8b4b76b2435a7bf4cea0117eacfb05e0935b7e12484d2bc79e033a2a25f94 304402206afb4cc06252a998300b65a4ad89ee40bdfe8733f25fbcbca3f9079cf6440dd20220419b9d823d15c4f2df74b01a30cb8f2c2cfda1d383e9ab3c23f336535d601a81 3045022100b75068c945d1bc2df4811095a8e6e24328d217647347bbc9dfc0425870ab6d350220102ee3a9ecb508222ddc7dbaa81d273ab6b0e537f41a2ad0f4c0b87500358a3f 3046022100f387d5ef5ef93d5ecaab9fb46ab6909a9a9132cd9751d1aa51c06c1caa3ba1cf022100aa6de2e04b22445164e3f500713ba10321eaf76538e3225de6b70e8ae6aebeba 3045022100c58dd01778725ff6f5b53eb5545f7bcf363fde403d4564b9f6ff1eeb577fbaa80220610b79ef3fbfe3286491289da35b7f7291d1e909ae6ebe57dec0fd7d86e44028 304402205ef0f1c4ce1cf983b1f0055d3698ac7c4ee60c23f9af99146ee3f1a7b24d7fd202207ef101287ed2fd997e0368b26a130209e0da439d4e7c1962dee906c58bc687a2 3045022047c4a546bff1af08b25be88f724615283fd1e5633e2ce1a8557c55e1219a6e40022100bb7648c206080b878225d1d1422e398316e7baadd1fc6dd0a737800b8994aca8 3045022100b065988f94458b4fccfe7da0ef872efbb4082ccfb09ceb0d9a93bd2a5bc4857b02201cdb30a93757d60ecc7a450274c18e56b023967e8a68736cd25bc74d62fbe053 304502204b8e73a2d9c3262393a0caa6f511ec18db0bee1032a7f86a78c89d956546eec6022100db68758371cf31b852fa287d2c62ae1d505373d776136f622ea2f618bdb77daa 3046022100ddca87857109d0f7da48a1227f6040a9ae0df0901bbca8aca724b5fd56ec75c7022100924cd44aff190ae2e4faef6f16b1b4fa71f8d2850b7b7dc34c72954163412f3f 304402200805f4c6a046ef3246fcedafaee5e12e87f1ae9964b973009afad1262c392d31022075c9f25ddd72804ffb6c562cd8cc739f39f93414d2470cfbe60b791dad055b90 304502203787589416a03b9ef01854ac12ac0a74f62f5c510d73a4fc587c1fab2f3ee2a50221009181e65632ef22122d0a1e2eb46da5e54190d96a08e85ede82b1fbf34142d111 304502204eead5c18960ae2eab7b6ebf9de9aa95194faa615a1363b0092f009d87b40ea5022100a5279cf0a9aeb76f162b0009662b75239dd5f1f5fc997d1a50a37852fb27c6b2 304402200434e65981c57806e8cf8ecb8bab5c7705667efdf57919fd380e0b252d8225d8022054315c4eda1ebd1e75641a9a152815fbf2d5365022eef784653ffe77332ddade 3046022100866e7e3efc69232dcc85070d6a129d5173e921403e15d8719f5640ae6b76c11e02210092c4fa03a1189d57ccabe34442987a35c0e48087ce992d90001d24aba6f7314f 30460221009b97e9094b4fcfddd418d65bb1945b7d89ae2a6be4e0769ceb7b3629c12719aa022100ea9f71287c38b4f2ac672b0653cda6b517e32ccca23cb9d0ab686c06c682135f 3046022100bd4f750f0ec3952efc6eb8c340e114b56472b3116b800db10a0862cef2d9f3fa0221008c3df03ecb3e5fd5d6886077b242760f1894c332aad386479b50834848e14d63 3045022024e39852873bf3afdc2245fe5ef9fe23cc7677d94558a3f12a35c748097da619022100beaab6cb2a58a41cdcf76fdb842c513ceee8fb94d79b0e4f83acc2d13e0f5cd8 3046022100d3c0f047df0b8c9db1446d48a7c2c204c352bb86c4bcccdb7e9d2e7d228cfc25022100e114b9ee41307276c3f71ab000b006d3c72998fd5a5204fa8874de3ed4d159ef 304602210080afa9855e602155deccf132ecf46bd9863c321cc0feb68000da81e97bd4f42f022100c68f8e90a494d6688b14965825e10548bce6d28edc4f667149aea6a4af840be3 304402206129a41c91be638d2e675da0108c2c05b3111008c0bd4c611f39e2caac7c72100220522d3f71d5c49450ce4f0534c2a1a9013643047da65fb248b72bde012c188bcc 3046022100e354cc15df5c025da3886c40e8d3be2b235fa73bdd8eb3d1afa8f92c8d367e7a022100af6995f23f27d624ceb51b855fdab05bb33078bbda9f3cc629c461c0dc7d15cb 3045022100e5fad9882cdfb0959131221f10e648bc10e193b606f5badf9e59679d2b5034310220133e3bae1394c82d5530909870b8c0f4494a0a2d09e2a33d07430cc44215137f 3044022017baea5f64a9c517395c57e61adf5dc03d8ccf4b61bdd2c7038e41e5cdcd4e4402200648e491a0a4996fb7414bdcde2149f8eadf3ec9ee9ee1ce162919cbd04e8b42 304502210084b617806b2c95961c2df9d948a3916662e77711b31d5a8b3b7e89f602171f4e02202c2389d552528be0f592240932cb0dd4ffbc19ef70680db80a9752665832a850 304502205e33dab5ed3f737eaa776b3a43f3b1d002d2eccd42cc16ecc9b2e47de79698fe022100a89a2c6b0ff27bfe434fe3f1e348ebf8270ec7c72fd56b1f87bf8d7c096c9595 3045022100c7e336429431cec543261b8653323494b4e01c243c9d92980cc137baea86b0f002207cc1a67ebe4064e68dc7c35c31ab56a352d5a3beff525090cf1e4763bfaecee9 3044022055c3bf6cce296f4fbbe1dfcce7d49299d90ee3a6b4c7226f8050c0ee34dc450902203fd8caf51b03dd4edb355d0c2052708a6c1bde22099d9d0aebc43f39e1054982 3045022100c9d4fdb50b7320e02845f465f6b47f4adac8e007427c5aa3eaa430fbfa3a36d402203cd5c6fa99a2de56f8e2ece520f603304c9a105c2138110090770a4bf8bce8b4 304402205a1439c380225ba5536f8eb20a5c570bbb5c63e0d152368e313973e0403faaf80220781e1e883ec942d0e6ffea0ad35c0d471d55c951719a5545f05bdd378b10f565 304502203aed1a0f883d77578ebacc52777efe79867da6370f8bf752182cacd4055d0925022100b907d96e02de8201c0208267b718b63dec7b786a0bb2726cfe2dede22209b6b2 3045022076336b34d384f4a223c41a5811fbdb43dbf4c4e6ddffdd4e8ff8e841dacf2d73022100a0f07b6d727736604554d51a44573c867ab3d68e465bbf14724ce351afcead26 304402207788d6933b90813bb3c26023e98bd6abea09eea6574ef224339e8096bfb4c6500220710000eea07974e6a15e4d37886c9cc89f10ad44b6edd943bc721a5d418fb853 30450221009be15f6447f0f6fd8f7e8d6d48187039e80f28cc7a9671954b38370127aea5080220182915673aed4811682f817ce42f1882e2d800e81151a06795d8763718373154 304402203e70f5be1e5caee2b5346f023478ba242e9ddee1c4e386a6861229dd6e596be902201c581585be3aaa5447dea1d7a93a1748bd28e1494823b20c20079aa56fdfbe87 304502204761b78223517e8cd7d59bfbb69c7ee83c737c7a3a5c94e5ad9de0dbe568f35a022100a88a25056a611d057a6eaf3d0d380654d6721bd89a38f082310fcc953ae426f7 3045022100d2d0bf24104b461f005d072c5899989c476c8ebe6de550ea0bab7943e7e2402a02203e6291e0a82f84956065fb0eacad48e1a8c67fac68a8303079a3733e726099bb 3045022100f50c486f8edb0c16f7b6314dbaa6a14f7bdaeb58af52b9d279b1ea7a0c5499d302205840020c1221561c198b863568b5ce1976b95f85373d7d61a19d70a55f1dff77 304502200cb1ce1380171c44b69f44fb473ab7677df249bd3f0053c2a39de93512e0be4e022100e569eb3e7aae53d1c76cca6ba4874cdae27fc482927b09b1076251b1d853df71 304402207d43e9d9ef918fae39bed4f096b5628c2716cb8d5a555d69c9f768ca62d3babb0220417ce8ea3f342b40fc3d1513c58359705739f991411e185d0f390552caabae04 3045022100fe4a78606e354e6ccd560481743f74d72e14c731b4ffdffd92c53a7e1205a7e4022014762af1b682a8edb233272e18930d0e2a58057c0222693c33a879ec4840fe39 304402204330e35ce3898ce67b9e3de5bccab85230fa9733420f6b60bfa47f17c893a481022030a3d3f768b6d75b2bc484f9b4d762192265270c0b87661ad0e832d4fdc40c01 3045022100c080950e718f0ad76fee0a39a8d7ed6d292a39edcfafb59e961ce486a791c4eb0220218543f99173f8869576280c0a9c2aa07685fab6ac067f6edc0ae04ea853805d 3045022100cff16f8270438a0b5ca5908325d3e476b5eb94ca10d1daee61a8a12e7a2221140220063bbe330278bb8d5dd1e8d27dbc04cb931b6e91ff50af26f4123b0322fc1796 304402205187d04c7c5bd9e1331ac7facd149707c0f40354a92c258c339c67aa19a5a0380220310389caad5220079c95d0abbd6456432c21eb45f05406f0ed44aac9ab73c724 3046022100f09d435aca824267632ad0f87ab76d91dedc56f8657fb9738d16da64c219a6d8022100c7687e7db72a3e6842117ff750738d56e1454319415255cb8db4d4247cbeceaf 3046022100d53009324f0e82cc0edb05091863e03ee0db5ba473c1068dfa264c743ed3aebf022100f3eff315ca6730df62f9d0d8af2d0a295f5e38b2477c5cc42033c1bbd107a305 304402200d4cc23c1a64b4c0e7759d0b76dc6f65fafae439d6d9346b63a956e8c60baa0902201365258b10e9ac38d0df06b461a9cade932e875a878390dd9cc778b63880b836 304502202bd8cb398b622f5b129533b8ed3e8a97c4852d4b35e8b88a83da07472c8a7be6022100f19c768595e2ddc6cf71afcefb8cc875f44568a312fef1e210e1d8cc31485f19 3046022100c6cf661a8053cffe9bd72ca1b3bc2e7941e41db230556f6a84c2cadc289da199022100f0e338660dfb6649b8633d05d59524a27d0fb8becda2192f8b6ff237f89472f8 3045022057d396ba02a35bd0c4ed92531b8dc8fae10a409e19e9fb463a7ceec5ad8961e1022100de0f57163319d4722b058242ec5c67fea113cf3003ada88306d7602ff1070091 304402202f6836568d4b516badacd9aa1b4d8e342bab479660f985e1984bd274c38b14ab022012e9c1c1409fc435027cae20d0d9a91b35fe95199261574bb756437e65d8571d 30460221008089a32803d5f614b67b5d9d18b51a9a929f5c0f1838a4102fb1ceb75272b3eb0221009d722ce2cd1dd033e01f5644c3ffa212b52cd30087ea0b01a027c86ed3525239 304402204ba9932f25900752f0f25fd68ac54126cbf92f5ba2c56a564a076eb42113441a02207bee7f23e82025d63e77577e443c29e54920cb0fa823c2ebd3cf77c8171a4c72 30450220710581bd87510fdae09930ea1056cf3d6d98a650c032499bbdad37699a6ee001022100ea156e72ae42e3244eb2b2d362f0f94942e176b39f08ef831d4d316d0c173ea8 3045022100a95942d966ca9a6eafd7a114864ab0740ef4b65b7f2e8d79ee5396348bb6854e02200beb57f23c85b408e97f81a014085f6882802a699e51f3e74138ce7c2f3b132b 3046022100f476cb2e698a2e5a2f307d261bfa939ad47306b6d24b95c28acd5ebb8f15ef0b022100994e83bb1f17a24d5f06e0d3736f7f374199a8d2b4069c58b5e219c33e23b60c 30440220379dc66412a615daca28188520c588ceff6752ee03af7f4f92d368810688505302201519ffd2b4898c4caff76146493799a55d526a981ab495941fa81a6b6a04b3b7 304502210099618ae886c4d064eba6031b1cd43058c21bd9438e603e6359df8b22265e69dc02206dad1a063319afad77e9afe69eed6657275bc7c953e9d4e9e90c6cea21c2b66f 30440220190e5733776e45045d3ab85e29e06af64efafd3a1109aaf8219d0ca6140448b102200c15edbed936bfb8d6a1058a545dab9fc6cef6af0e4b7e6ccc6cb92495d3382a 3044022005b2e5691c8680017159eb6a2769345786af274340b4365cd10021a5c571901d02207442d5a49b66a17e57eb63ab09dbe5b345d533e2626173bf1a72ca01ae76a787 30450220242f25fd8f9c657cd2b3a7af7aaf3967cab4a5c17861776ce4675af6eacdb5d0022100cfd93a75854e8c9a31a51b52901d2eaadb907836bc75060a621e81bfb4f76e84 3046022100caee1f9d6fd744c6c3da15900201dadf1216b869938b36ec30ef59e5634ee4f7022100d549d03e4d220eed6a211404f9c669ef8e46fd4fec7d3d7b529ca16d51322cc7 304502200879c165873c188c0916a104c03a2d1fbdffa5b01a857f746472efde3f3b75d1022100afa6915e8de0f2e0d2b6e74e3a8ba8f9baf8845936d7d6bcf8ef393cd1d18347 30440220329d6bc1cf5c9dc6dbdd69aef843fedf8fd8edc0a451fb4a8348ee8e03b21ec402207abe42164be882e5d6744adab8a9f0cdf449b06957a0cd1acf507ea2520c23b8 304602210090ed2c91ce441812c6aa1ea1031e44ed1b906bf30e361d860f36894cca2e568d022100b11c41d6a11a5bd370ed0d7ed201c04fe6a1ed205bd12533a8982141fc28e643 3044022053287e8bdb3b62a188a266919cb441d2bb181c42dcac4b78bb8618aab19c6f1a0220619cf7c490c9050430c051577a6e4c83c2848760143ede03640cec3d2c05e614 304502201c7d34076f6756d40c6c3ed29cf2bbe3c8fd0d6cf3f79adfd58dd3363a742e55022100f2f60f6691d2e35c4c02dec68e8ad9ec335876f9255019fdae08a1d51567f4e7 3046022100cc0bc6307593b607eaed1d82f420a7515e39048c07277d78694f3c6067a66924022100cc74408c9d0de15eac8f5e5c9c4f97f1655476832cdef743bd7a468db1dae9fb 304502205cc74ddd4dc37c5b74402ab30cc5c09ec8e79545688516becdcee54522f546360221009d4a326547fc2b8a12d20ecc05b0bb8db1c34e06bd404001b22294434d02eef0 3044022035d25b156b16035611fb2ecfc90b54c1867a7f3f861f74f6bb45ffc66ddd4ad40220699f5210bfd624067317325c599a643e9103f587456f066c28fd568183dc11cf 304402204ea75743d2856a1847896300d1e6b97f4faef7c07df22e6c067f7bb43ddd31f502207c1c66d11089f45181e61c8a1bfab4f839dbbf0b6b98adecbfdb4bf2c11be8a6 30450220045209e75b973a9434b3bc5c56db65ee322ed571603719ac46a86fc6be107df8022100cae372ad9a95f29159107ee335faa07493144713b4f0462a273c226890ae29fa 3046022100d0b1234291e67edff17835b7b6a9e47376571c2f8bccf6d3d50d80a0e0c41722022100ba21aa0a8529b710f412ca38655f4ff2dd48e0d269385c21be7b0f0707ad00df 304502206d48e5667a0df05b91de87c384a8e62eec29915f043e1cf0ce51468784c3cdcc022100fe0671d74eec866296db4d689fa341e4132d9859faedb2dc49facd4ab2e01e9d 3046022100c207d982e738ac5d9559f536eb37cea5d22f905ff2852a423cda7aef2e597c3a022100c8fba3ad5c2deadfe0c81c981e1d77d5077cfb9ce6a0d7bfe64ea75943390d3f 3045022011504aec7b2d5c41c68195b954b2c4e4992d5ebe33f5062995b5b2de40bf024f022100e06143727cd74b58660097e7e688a31b4fffc880bb9678750043ecd4b3bcd2dd 3046022100d470f8461ab136945e81c9822f753f40a9c2f70e9fb9a58df631de917c5e6528022100fa408161e6a3f4887b65b635dd4cd3bd2763e02f924d876ccb02da550f827d64 3045022100b3da98601a9222a21e2387c61fd1adc663ec0d35431d37935f5ca7e81a0a3d4f022067fa45d4ebbbd37664350778235f47c9c34a02ff7995471f29ca021578273fcb 3044022008e059093127713e7bf227e57c74da2ad7c792e021b2c37d2165ba12e0934125022060cd1eecd6a5c3b9b7e8e02b71ac76ebf3ebfb21000f788779666cdca256a8ad 304402207e0379b5c94eb63888a5e9ea6536fa662f4878c01f8ddb2cb6e42306c8e5f6fd02205c89eed4b26101390ba88e2acddaebaa98f65dcd66cf495a0012f02d8ee6d9af 3045022100c7bbfba117dae75c221d366b16c87cd77ac56eb8601e59e986633675344285a902206d77fd7516166a27047139e95a587c3a20a2be60f94a58998f8cd0e029397556 3044022037e71113064047cedb9525abadf0e693fcf83ebd7afe497eb62ee3fcb10206ba02204ff8e61b98545de02f5a89f8046b630e49dbc47f6d5c3b68b3ca7a457a138e00 304602210083005db151e5bdafe8c4f70554f8d4ec5ef46c406f3bf9c72074e12c5d6a5ca6022100c15cfb9766fd3ae7bd1e239622ebe7f1409094f28ea5d3d62fbf4f3b48e78ca7 3045022100d484ed6566d92cce65f12dcc9f001419d7d03a2d000ea11b85e75791f989bc8702204bfd1ac9bad724514eb0265e34f3b5f0e7d00511a34fbc8df69d90bac176550a 30450221008ebded04617371db90569ef0cb1a38e293e287a3e5c03c8c3409008ed9e62f6d0220527688609d2f6d28b1bf92057b3248f059869a51611eb3f0b4c0e84bf0b5939a 3046022100b8d94da8819681b0894b12b68b123369eb45f0e7d085f5611be6e2605c017a62022100ad9a86e1440ee124fc8c8d07ca70e14782919b098ae5e021f138202cf6fce211 30440220797eed5ed1d04ca4c4dc2b12a4f1205ebe9085a95b59874ededce58144caf60302206f4f5debe11198d0fb4a7307a1a2d6e45fa45107a6451be2635f112b2cf6ade5 30440220568b99838a8c5682f9b83956e0181a3f383ec8c40b273cf201a4c2996278555102201d198004a297a7e227a7abcad99557b10cffed8c6fd605fc249048380dba2a55 30450220736e190ea02a461a27cedffe56ed5b38af5529b99673db757a99423d48a143c7022100f4a153fe13613201e15c8b6d41e1263f44ca16759a30abfab5a9630291dab2d6 3046022100db5b0cc11c1c2ccfb2df6853d47703b44cc3d46d3bebf328a0e76ef359bab8af022100e9085aa321616b129f4981d226fe1cb04ebc455a56d83abc11e8adafb47cb768 3045022014ff1295837ce2faba2d66e376fb011da8e5e64dd2dc4ac30e172c51ab0f5509022100ff7c9b09de1d1069c4c6afc3c6aa855682ce6a2aabdf990182254aa267b530f1 304502200e949778140ca539fdc15de1613655af3a97145736cc4c69912a17f59a393250022100b2276ae56d849ae00a446fd86a244852b5b9343f4e2215916b2456e36bf03d63 30440220320eea1a14987634aad5186e8ad39058b6300837f58a3349356fad7dcf43e90202204da02cf6bc40516526d8f1d7daa18aa7491202112b39ca4a549415cd1ae88334 3045022100feea9daa379dbd2867937584c665e92fd0bc7ea0908240383263b66d7be7ba63022015c33ea4171ddefba08541839a0219a7754fdc607038330f056639ace3e86501 3046022100db8061c81bfe9d164e9edb84c00543ce321542945cc369fde8a4de713453e5dc022100ca51da477baf021b43eae5d7c1994aad7f6a0856559fa078e7767a414b005ec2 3045022100e288513c8ff8002129e2da9edd2e2c495288dc85d053157a1c2a7d4678ab2e2602205f853d94996055002fa42002ee62fa84abc448125ef38f22798cc8f1283b73ad 3045022067e773a0704f4557771149700eea33080ff853f802b00d73fd392baba7be0201022100b4aea58869982ea7650853af01886473feea05495c4ac85b00d25e874d13193e 3044022058e7d894db61e3b367b84c83e9b03ad853d38667932b9aa3e132f8737ad16a380220767818f3f33dd6d5329e781d0898938e0ae3f7f3ba058c2c48a509aa90f6c513 304402201136ff71d78123abe86fa35d20059441f875768a7f8bc7d4645a6afdfb6fc14e022014aa752bc5739baf9ef6c65977f302899b3fb51efd0bcc30f9d711b623b2b4ee 3045022100fa048e6745f29e048d5134043563eff0d8d5b2e7726bf97c1d5f0da4e38167be02201397a5091cc2d505d8303fdc3c3076d1856868c830b894f768380afe5e1e6c49 304502207cfaf1561a332a3bc03be2bdccbddcd4116807a976ed2bff2aff3763d12746c8022100b586853d0f79c712eb4782b618a1dc7f5ecff33750a41b67cfa6768f8cc74eaf 3045022100a0df8791774876aeeb7e38d1a8ee48544781138e0a5f84254eca9230b655ef0b02201b5a8cd1dcb1539ba9cae24db17ea1ec090d925ee3b2e9dde52d924681e3e88f 3046022100d6a2752139e4e4871c0003253d53b69735ba544ed29933251f4ae30d57034446022100ee583670981065dd822a415d6ebb79aba072e97e415a355056cfc1889fdd7fda 3046022100de9697e9cb258990d4b8c1a946dac5fadd2824e814616be919ac22bad5edbd71022100c8838763abf4ae3efc27bcecf51f7de73aa040cfe8f3614b58999107058b46d7 304502207925888290fd44fe006a0b542297107553b188be2d28b3000899860f022091c1022100bdab03ddf006c2b46254d6957d67582f2e378e9564204073f6cd2e426243d317 3045022100bfb6e49805ae25faad1eb2ceaac11feb47d2074f431c3fcb29d632da232bafa40220661fbd8b2c46e34495544fdf0200359b483f7f89a8fc6a476fea34615247a9fc 3044022065db05cac3d2d2e7bc2775769c462271098fefee34bbf7ec5c18507b99f21e9e02204384ae9c27fca730826abc70b08d7c92123b086bb6180368f169914274e69a21 3045022100b81b54e22108e9c1fa294aa28d6e8819542acddffbeabda3b8168e606cb74dc802207fb4cd76f15e6235a49459fe06f38de9a065bab554b9b2ef24f4a4477f757daa 304502207b36164d06c2771842a00e489e1530238afa1ed211df456af2f2b0b72bb2677d0221009aa6979686acf52542ae716d293e57e873033404a0dcd221757ac6fbc6af2358 3046022100dd71098fc72d309501f79d1fd149a5cf5f16fe1301c199cae25f535309f7c42f0221008f3fa15554d6df900cc5accde3ed65e4bfb19b7caba5d7cd59c0936c18105e32 3046022100db903c07027dcbb7bca4c933ea2215c87cb28ab7994b13098025a8bb01a49c42022100dcbd9f70060e1868a5a475c44b5ab2d480436f9491765a2a09e87de8b39c1df5 3045022100dde29e1d7ffd98376183ff5498e8a1e3569f3de4ca25a8ef3e62095a30eb166502203b036cfd42dee1b14ff226a5fc7e54ea5a69244f6816144fa57fd9199ba7916f 304602210088965d0f523a7e2228707dacb62addb6426678f9b4b40b02468abe743fde03ea022100c471cc0138c103d1e5e1f478f521033f9b05de0e654199f01264744a33b51780 304502200c689704fa850c72a97ad3528428fef1c9dea74acd9d3d005db76d484cdd59a2022100f484ee5a93f1f37ec5c582fef2739ca8bd0cd331d24890c7eb334b0ad4823a3e 3044022076726645d11dc3a891a3d45435b14ffbf1c873512edce9bceddafa673659c27602200c4f162e4f16b23b7a2697bd6dff55aef85e03ce9220017c21a5da08a74c0fca 3045022019af765fb55accdca72b719435d93c154d84386b644e4b90fdb3aa02b4fdf4e0022100a8fade2a90cd84a8d11414c60a46174eae9f869c83cc1124a61383260d29fb17 304402207694badf10827c4378c64aefb18c6a2fdd2beece7baaf7e87a76e06a5ccb612202204e2047a5a0b2a5498916133d2ab69cfefcc5bbf2110c0d4e889d41c5d731989d 3045022100ae2588ea8de85915724bea62b495a52f415ba65ba2c25e6778bcb08c909437910220092370799b239fd60edfd90b30bb47fd4a3e27846a83f2a551cd5b212cb297bd 30460221009e4a7031303c0acf54cadcce212582ad56c1eb9eb9e59df3f61a5ac657f45b57022100ba587bb4d8a2584958e0074e941f8f537f9f9069c4fe4e3aa623d21632aafcf0 304402203e421d638229cffbc0f01e8fba6e609cbeffdfda2454593d895568723610058a02205c8d9cd803b3fe1aab2698fb18b8b798011ab7ecc3d89af2b2879b9d19f03cb6 304402206c38ccc93aca4d3d12c7252b210dc0a3df502d17a7b98ad4414856ff5e117568022043b05a28287e4aa257d4aa97e255351fbf4ee62584541b47e62fc2680a67ccfd 304402201802f42a05c536c2e7835683924009cfc03e9e8fbb0033167edd6af01026e9c30220158c07c68aa51939c343f2e5d9f2278c2b332f8b2c2947da5a2d146ac1c5e30e 30450221009d0e59531d15d7a181334d554ca926334536fe22cdb6969057af361bd076256f0220723ec3e947a77d62626fa808baba04d8d4eb4c37e969903e082bb61aba941788 30450220384e19786de90da4433a8c81262dfff913e2fc882d870e06d6e3f0267ea78da1022100e7f602a8c0f44bc8db52341a6c8b58dbe20e160c1e4eadd0af9782bd6317fefb 304502200424651a30f8c5372a215161721453231371fc842721f389d5ef822504af67ee022100842140b9941389f11b2e10acfe542f8b40f3bcf00e71c6f53d8ba38cf5b8ae3e 304402205ba78a38c56e8b2ec22e908926f26c7f8fe156c8f9b0d2f77cbe70db8f49de170220749765aa9031c0155c6b8eaf3200803e103de334d8f660bca556a8e0fcb2300e 304402201b3262daca4540d47fe7f94b72732d69b90a16f03280392f53b1d1eec1e13eb902203ee46030f658c0b7aaaaea5b025bee14ffddb1102b414dd8ae0a5cb29532bb43 304602210089c4a0da7f2319e1c7c361c578765ef12b7cb5d0e751a34eef33c20ff56f7e14022100decc8c04ef9ce8f64732102ad55b07634b84f1b45a5ae7cd686836a6857364ba 3045022100ed73f2925721a2e7e7113675c893f07b3a77e340fcf6fb0c6355ba289447672f02205419aae4545560c48a7325b9487c928604478be96687b5c05e10f4da475d7b61 3046022100a7b47dc2d9e12913703b92a7eb3feaeceaea062147e4006b9b1ed3fb0a38e7ed022100f88d370e95db145c3e720b5d4fa466905229357868311cd051bfe699bae7a43e 30450220146bbc29272776d8b04477a316caa5bf5200a9313fbd861274681c42f1ef8f47022100d48e333e980253ad0d394e34464a9206a42134524b9dd9468c71ea9d694d295b 304502204cf51017adf0b5b5e257e28f45e0c346e34a8effd800ecf3d60fbe1ffa2703ec022100c5898abed67bb8d49b1435f3a62cfd7c7e2a35baa536d5e328552c0fe6865c1a 3045022062f3530cef6d5972118b790d7e0330e4de156b9e988ab32e62800da0e893608b022100cec6851ed6787ac9cfd1773835ccafd4f9a1deb057e87647e796b4dfd25310ef 3045022100b81e4259c3dab2e2c523beb7205ce484811e65b9d9d5e7304cf8404f2de4d5840220625cae43b5ca4f986782eebc5c9e620177af7c8db5e5cbd4b405d78f52d95221 3046022100b0aa603f1bdf9834625d460f84988e967d02187da78d6608aca508f1671b459d022100d6bf7c4b3867631a797b8e2f5f4ebb84346318d49209abb8307c8f6d7e9ce315 30450221008ab0400a2ef282faf9b2a37c86114b3e54b78bbe0d44c056020a27e812f9b01002207f1e62aba056fa891d6962afa6324950c43603fd8b50f4a6859998a2863cb5d8 30440220122d647248aad8601df685c385d096eaeeb7a2081c3eee75fd81e7ac6023d04a02202c504a74023c1bee2cfc413ae0f5851e945cb88be51c5fcff20a9ec5d6b95ddd 3046022100fe18fdf32d33d6a2d33b5559c700cc97445c52cb7440a476721f1ea133f87b27022100f7205af1e2e576b252f29fcba9c2ee59f93a49eb4cd21571a0513606c4b99f02 3045022100d5a7b9cd4dd1c36b614d9871bde976c2a97058f76f38aab33f301826e1109fac02207ceea771b318398dcb9c1604ea43ba4343ffb070e9b9af9b862df222c4055f88 3045022075ee8f05ae1d1c0678f10d956df9b2514ef625a6605b8201f955fc71c7a501a6022100968799873be157508cb845ed577c5f62fa0d7257d4bd3e3cba0a951764dfce04 304402205b56e472c29f2e040fc3a04c487403462a1b2ada09552c16386de6bc39d2acec0220455cbd48ea709abd2cb2f3c27ebc5492e38cb97f39fa91f94e6a839bc678c1ca 3045022049b91bc821089b487509c07ccad1942a7fc13ccbae536e5321e136371a0c17ff022100a0603f762cf4492876a651f59ca20b9d95f84e36af8f30bb989945a4ba748c81 3045022078dae2ffec61d6471581ac483aa256acccfb84b014852c343491a31898881835022100faadff3d40b40eab1147fa974a2e7a35c93e21d4974bef746b77d4e86e68863f 3046022100d84c5d01d34f561e0ca53bb09efe416e3effdd25b94cc71335eb6b9fe9194796022100c64391cf22fba824860c181d257b2634d3950ebc4454332fc748009cc228f084 3046022100b1605934d743a3383a15169506549e01f77ae80ef588ecd365c795c82f7c0ce8022100d253eeef51679c76915612bbe0457b7d3ac660e65bc5d6185c8c46c51de70e11 3045022100b26384a7d3dbb69a183ac0a0f17f6c053514b8f96d80985f408178ca6dd9d68102206b44746f1f67473b378061125d9c0b468d05ab5fd0a67ba4c89d6a701dad506e 304502201ebd41678cc2cefbab58fccf4956c91bdf014b05b1e0a9cd2cab6e075557df45022100c11bc1208af53ba4a9621913d4b4fd769187a2e288369dd1b4b546bd5b493bde 304502201d6fde3cae88bcfc508f9baf9c0dda85d031196d5ec01e93628b418754aa9a77022100bd42a707269f876566e22a978bd5a946c2ad3cadead21b1329edd5836750f749 304602210089f928494d46c588eb455a8e9855c23b72b3270989efeae3b6d71b5f196d48f9022100a8796f5cab2fc2c11a370d4b471b6a091239b3fde3be793995bf937b21e9448f 30450220013721a27485da0efa9a40925d223b7168183e4eb82450fc040d89edd4fd350e022100ecc67cd952903b34a11ceddbb0bc4fffd5bcb64af96a734850c98b3e57cbb625 304502205ce6eff509aa7e31ecf6f976f72f1efff8f0b9e7010b71f85dfac397f943cae1022100b99a0d6846ce38b31f34bc0bcb946b6a81822fa6da01cc4d54f8086cb8a9d663 30450220570a9edcc13ffa50c7155f5dff83c2ff19cf06aa5438fe89141dff9f10351f700221009d2423ec40169e2b505ce04dbef5ea29c757d9e75b67bafab29247538c868966 3046022100e867e599a8abc47d490b0ef49f7605ea729c55c4121543011cbeb9355057b8d5022100d3848ed57350446ade08a82f99f8dde79a9745720d7f424460bb93fecf2284bb 3046022100cf4a2ddb95354849bcb42d2801de2dad1d4cb7f36bb8f12eb80049b92980fa9902210087789e24f83cf6e4426c5815158fba78ee16265d8a0c79c79175192b42029a06 304402203cfbc8e88f7a8bdb3d2a5d4375ad419f79feb0178f7acedbe24ea8e823b7f88c022055960e47ca7cc7014d433d9f53098b4975bf2e61c2545cc47da5712128f606d4 3045022014a642dcd5f535d5f5770baca755aa1335caaca3d0bdc3293baa6cbfb46b831f022100e9e74d4a378a43840255604aeb64a05a0406df5ec8e9a789b46199bdb943f101 3046022100f7f93c32528cf4bcf13d787c52bbc923a99034657a98945c5fefdb37c9ec7ac5022100a52fec57ec72b57c3fcfb43a266730440c953d054090eac3d6659303b132ac59 3045022100ff100191cf758df90cdf58a7356c0fd4b920c555c18977a1c82fee8ab51d553c022009486ac9589d52d8652bf7c104754b41f3e0200ad5438bd49f563a3b3dd182cf 3046022100f00cb7ff44bdb9732cc97fb591e56ff6d435743d2d784157cefea2cf16cdd5f90221008d2692dc69921b7c8786d1db48d62ba5614db174ef12e50d5cae8fcc63963717 30450220202485024efc5a9765b9e06d54e9f2bd06dd16c67d6eb949b579c5787264b0030221009e711e3fb826b9b7acfac8ae998d871c9b0ce832a7269c89d5e9fbced93e74e7 304502201c92b37465951f0b9febfea458cffa7341f703dd37e49442ca0ff1789966f4f1022100de4bfc909e870d561b4125293c50d4a246548b8ba8de4d5a1c0191cf8299d269 304402206c500c495b795c2fd15d12b3091ead14dcebda2916f4f6b7914214bf38839c0b02201e9fe133581d5cc4ae4198f1876a1879ea26471acb0b4e6bc38f87f0585344aa 3045022057693e0d83e13ee494013bc3585e7a6f9d712ed030f0b57a51c5df197ebaecc4022100e62a1d31324a1bb8a6bfd9fca0cb1b918fd6faa5ee33a1598a7ea16c18df4e6a 3046022100a9fb5672acc45db1ba0dcc4681bdda8b35d523053aede369bf5fe721a0d7c95b022100f567c733dcaa21765998f7934c6a8d238640f436de4104d0f8e0067c7aaf8d93 3044022006c31925ec8cf79a5579bfc685e9d00fd0f349f852770b251937dd3203e1bbab022003bf0481bfeea3faa30f497c7b37d794021c37c86129ff8e69a051dbfea1b76c 3046022100cbae9847bfda2c4ca859066b90707818187737a76ee4d766393d21f14afcfe73022100fd0a9155d41998ef7d97f3c55871a73b7e47af5bd1b834bff32ac7794de648ee 304502200f0eaed3dbb9b7f8ce95cf50f2c593dec51644a52f035a1d37c0fc5198d31fcf022100e8b422e7da85fca83c4230eba04ea7a0de2f50c13083c6ccf9ab4df17d504499 30460221008a1650514e5bd21560897e98369720f2bba32cecf5e62134583fe967fbc1e722022100c5a58e1ad6cc86f758830beeccf3c8f79a81a29e32fd858be78d971bdb38e646 304502205b40a848bd3c4658f6dc6b2e140bc26baca404422afee51e1187e60cf39ca76a0221008e92c7a67f5cf3fe00a3ce40466935b9682b29448ed8ce3447f3b977c9ecfda2 304402203ea0b997e6ca2b50b7dde3d0182a6861010c23d763b8f64e0c8d32759c1ebf8e02202132a770e0694f5d12eb240523e3bc964b5a75edb66cbbb81e22575b1c21fa9f 30440220565bf4b68f0513d7a81920a07622748a27d77b86a578cba500a85a7419508b10022001cdfb077204a0c757630572ebc9ed9bd781348a57f0b271caf66a6cb5603633 3044022065b536122ffd2dd5417f112b64fe79c97b0c9f487027eebf76020c5eb7a5113e022057991b72d2de0a009f1becc5074c1ad5881c92dfed01f05b863e1b72ddd4bae3 30460221008e8849e7292d112da30921d26f049c8380ca43414cc6469c811fbf115c20a168022100d1270925a7ca937e5f0627df029e82c44198cf110eed63eec8142fa7a2b3118e 304402204664417ac63be04d3aceb0307b2c30d25b541ba5f7796d602f13b5872af44592022055e5dc6787d0668f4bd1c3a7c04cd8134ff9b4c5d3c99142bdb438aa063dd447 304402201e45dc688065769856eca3b2a44728b5345d11ccdcac68ca5255ed8b7bb116350220327d4bed4a9dbfe338816bfbd61bf9a94e4dcd9380a8ad522aaf06dfdd709310 304502210087cc9af50a8f9244061a9d00d507a6ff30bd7ceba8717cb490d3f2cc70b3978b02205895022046ce9f0bf132db4587895d03f55433d1eed641e8f20c8e8bcb20f669 304402205ddf24530e8b31ac80b9e142f6c4276ebde8c067f0eb49669baeda681b7dcee90220297b934e1cbdde2196f0fc6abe471d53e97490bc0eb5be73e872d57ec009489c 3046022100dc0fdc8efbdfc283cddf8248425cbac4b45492048f3379d548c7b387cd26e22a0221008f7660948e7b886600da25758b9439a9999e6f6fa5cf97a722bd24fc8245736b 3046022100eaa58c9060cf5cd05183c7b12b6f01fcd3d9efb66b349e8023fd2613b3e52b0b022100e64a9b9f56919e09a823aa4afb6cd2cdc2d9412722b93d241bf947de0aedaebc 30460221008ea0ee5cc1727aa2ed9922ecc3351b36c5224df3a89cd90337fdf5a34071fd87022100a2fc29e64ef63ef0eae5900ceb49a1eeaf524cf439b2308d194b913d9e495609 3045022100d2706478b8f2f6ca1d5d5b062512157ca591015ad08740d9a12d9660f46f63030220049cb8f1572a1a3de4ecfffe7c638cf87cd22a2077fa00a63d87587d350e10bf 3046022100c06abc37e5b606cf19df12dbb80eae08a0ecdde4ff66f6cf923ca1a87ddaa1ba022100b4c5ae9d1b1d2a551094b97545183007f07284850fdc1b394de6cd5438182b97 304502207dcf0a20a5a51171745ac73ee3c329ecffa05e54d76833e3e23e646455a58009022100d50794e8f5d7355a8b093488b748c90eccda59762d3260c89a67f6b8b7ea6619 3045022100e617a89cea7d53b1f9bd4e9b1e8758b8dd61816e1eb27cc9e723bba187e6f90702206ed5523a3c858e91d5dc1d79f55a117a05792446632c9a2d850f560c3a3337ec 304402205f4d1451532e30d8e9facccd525e28671e07773501374fdc9a11487b00e8ab9f02201559590614367e0c2e8e5bec9a7c125811019b61f9399c81ca979091611f55d6 3045022100a20c400803b850249361026e866fb40eee7ec553b73a0ab710c9339d4c47ef7e022070652592efe44b28ac10374b89faecacefaa469a9fbe7b246ee4215b9abf9241 304502210098198db465d01656f9069f67fa531d0652cde7f9eaed12345030383db6448964022077f2c7883c8a7e5677fb1875479c446f6ac98c90350ea59f3d8b5c8ef78718f0 3045022100f5d29ab74b062eb857ba022577fa128736db0a285e0290717880b6ef9285702e0220609cbbc0c8ec4595c779572a4c2889b9a6caa70547103e77c30583a67a164fdf 3045022100cd45b3293767528b284fb503db9a76464a017a394e6a7ab4ceeda2f92e8abeef02206a503996270666056e7a034e75f97d5610288d567069e128c3d3d36d7b7d1760 304402204ec7222f73c194da9049bf91a8e5e16f9a67e49104d7e0d478b358a19f2ba34802201e345f1b0d3d9d904e702789b75e85122e3c56ba7d45e095e43c0ac36311d859 304402203d770ce148b8ef9bace14db1c00b65398b7700f36c076bc145c92bef079dc75c022005dd62e54c4149197b411bbf4d7959724f56d568b965001691deca3fa6feaf83 304402203b7a0a3bbf6f26aaaccc70abb33503abdc4a635a84afd0d117a39c822ea259510220216f4bcf42d25c682c2fbc0676f974789f846c257a7037332726b14c75bbf2c6 304502205d9936136f52181207cba8cf596309b87bbedba8938767034cbad7b5b95c3d72022100b274975ab8c3b74f36b03bd54e4dd08ed6778ff485056e6f1ebab61802194226 3045022066adbbe825ec4b6fd92b310fd43bc4d61ef0bae6214b6ba334a740c884a0a64b022100cb2945e240f24982118a845ff12d607d1d2a3bf33d37340f24ab64bbe6d61396 3046022100d59202cda764dc2b9349452cb85c5bb6499c3269b58027c562345c71e25cf4d8022100a1108c281b3244a46ae05fca6829b657e9ccdf97831d431735a1ad106806c470 304402203b72b22032e67c1e44e11a3a489969c6f86f5a46bf44409b8d3c6c67f905d26b022048c3549c5ed685fb600602e1727508a4a667937a3599080a225436cc07ad9dfc 30460221008a7abe4fa378745f73034214b175ad0b8bd6812420415a56227081a3a5b0bed1022100d7cc41b2053412c7e383fc0b9a4ee8d19ae7ace7efd79a6d68ba14f9af96f825 304502207ff9415038eb657f70a58c726d9b1df91f40b1d2deebd66d19fe30db4b91d328022100c595b049f4fafdcb27129eb93b603e4eb112da5977336962be32ba438d7c9c63 30440220298e37c5b225fbcf80d4340ec533e66eb6220d2b1c21d2247d88bbca37639db40220617a721c15e199e0cef7d0f3744e112904ca4a2f3484ccf176be11e620b04398 30450220692c6ef2066c194d13e9b66971f7eea9cf2d5bb0973bdea47fe23b101f03b799022100feb912fa5c276cc299b48bd2cb323d9f9fe975245d9e36931fdd629153cb587c 304402204dcae5c20dbd6d27d4e083d695cc9ff70640eb6ad83e5b2a78a1c869954da8b002204540efef4e3d057466a80ba6436e546612b10636a7c0916c010c3b46d7811fbc 3045022100f78a4a6e44ad651c7c7850ca3701a60b5d3f72e0af12376311cbffec327db442022075654032c4c704f4709110c745f97bad8c75923fdf49d00573f6bffc88ed6347 304402201d8bd4d8cd94316beb00e7874f671ed8327125128f0bea4e39e2c20bd5ba74da02205814c307e530f115e4c6e67e98e202ae2dd15abb3a30ee7e0e7c7f946149dff7 30450220724212cc882b58f48e51cbf2cb14fa943a0de8383c1edbce377749fa5aeeb318022100a4dfae83df8bff8ec6a3dc2092d6b70eb539acb7ed675f2e7b299b963ad164cc 3045022100a7fa33b5abe01548dacf96b23d2bd35044e5019b48b9c0dd9a6298a8050eeb14022037ce38021d22a9718ce39f47894ab1ce63300c5644de006a965b9d11d5500a21 3043021f077eeab9ea65e69af9f0f3960b43fcc37359c296237eb31caef7af63f6875e02201253639188ee3d5df61157813606bfeb79f99f4ed2e654660c2862e6ba71bcf3 304502202a0d81c6347af274d6da35c716f41899002de090a68e1190a91cf3fc7b34c7e702210089a283121aa68204fb50e2f13867adfeb1baad46324da942327c47c275f78f1c 304502203a1288b15abbc5ec368fd86c70ab7ac7f34415dd4d18df6acb2ab72e3a21eeb3022100a9c93da323ad0254a2900ded775e963afcb93a4263c0c30ec68c57477db8122b 30450220614a3d5a2c933b6e2f4161d1ecf8bb275394406fe43200e2b30e55d9da54c401022100a05e1b56d84501d39df1871de407de81d147317376ec911d3e7116d345772af7 30460221009e5970f20c1c93012da91cf4fc3c16faa4fec0e966ac5fefedc290483d0f79ce022100fc68ca100b68f79ff7f3e6774a7b02e21817b39c50d398506e24ae4de4c809ee 304502201a1daca14cb202ccf0490dceaa7cd61666ca595f12d9370365ae36f1b394a2e5022100cf09aa3e1a8392e7b9fed66a2d404eea8b79823f564509b9fc91da3f502f24d1 3046022100ee319d10b5eb17cfd4efa4b6cdb36f7fc714be90174e98256a51c03cae265c9b022100b8cfa65fe306a51c5956eacb158816081b4d98e015ad71d4447ef47bc10dcfff 3045022100d388dcc23a489fcd8a9b18523888510c9c76a6b815d19dfa0d4a4a59d6b8e47d02200ce8205b0c250c270f8e067f383b4b8f501d912a52454245b77bdb7feab29fbe 304402207134473a0a5b129c51952a3c219946793d6da427ae0c6ca6a7f9a7b057637a97022005c960dcfa4ef3f0ed1fd87fd2d4d298a9c0fd8d0cf614bb836b5dc5f2dcbfb5 3045022100e6ab29347abdae4ab3daac67b3b242e2bc8dcd6b99633cd8de88889787548a160220060ba8a22f28c487c79736f1c5ac485fa0a531412b630121565c1cd81280da92 304502210088889f745d0c53372ec7b3172b1e5ee0352291c158651bcc6ebb0011a73d2ba002206e6811f71aaf8be4a17af08a9a86f52e7caa6b3c2e39324564cb287e88318dad 304502201a0ab0bf51e07a35d6269c74529606a512f61ea76920505112167c96928e2664022100e2f8cbd3464e1ed392e6301d977cdf377f9599c616bf357704a64bded5884b17 3045022100a59533d479aaf0b33ff76ab0eecd5db628b104df6b1ccdcbf0017ea737a813dc02207a1f4e97dcd0367310714ce533085c5f1c1502b75505886acfaa4cf67c20e5f2 304402200ca7138f1d0dd11ed76ec89ee1f90bf01adfd6239e1491a5ae7cf057e5d92d9102200558ddd0b1784c244afbdc27f1511dff885c062b1815a65e85f7e1c369c1ff31 3046022100cd5afd48b3477bef8f51d418744164f8d1a944a4c07762f52c0dd957ced038f0022100ec743d94c178303e34ee896d7764282bb62ddc178cb304a836e9008ab3a868b0 304402204aafd47d2cc50f29e953d7cc83052ba080709aee1cff6688b2cf102a6fa8536702201465651008036dc12a5b38ba9c1c8be8ae548d945bb27ac780314bb90bb96dcd 304502210092d2c40da2bfd2067c537180daae9f57f4657e617bb99f1ee9f38551da17b16202202e0eccc4237fa84708b43efbf2152dd08330e36ad20c910107eb87ec0b81a7b3 3045022024a4814d538e05a82d58aa2ca1f9292cae03189af76f84538f9edad49d3545010221008ad92cbd522003b907ebcd3eb9a78872dbc12edcae4e918f06aac48e89d885a9 3045022100d393c9237186f44560cb7ff7a07bd80e01fae74074e044cbcf448abaadf873aa02206fb23e8dc155e8dcfc4980f1f70a9337280b08b60984ec7bb57fea84166f60e2 304502200eccb6de670610ed3d16edf80b04cf9bcba2300422aef58be21ccb8bf63219e1022100c086384202c2fb99a83e0d162ab507fcf6c3458f20684f4a392ee56859587d15 3045022021ed4229f8db09413224172c35b6adbb57932de187735865d747cc860ac3da9a0221009327babfc7390c1f8b8d3399e4b7cd407546262682df01bdba9fc44c67905a86 3046022100e75e9a2ff917c18af2c7aeac4e776740b8fe7f057bbb87f9361842c5f255e191022100f2c6aef1e624cc58117d7be65e2f77d6be6ffe2e7a9aa8199e28cf9338893d7f 3045022100d94fed115581c59826e077cbc050872611cde50d82388202f495aa055b76994b0220713c1cdff005ec6be288ccf661090e553b9605389e9f12e004cb9bd4c9006b45 30450220391fefccb4dfa1c081befe49347f9b55d1284d3751f4a432372e850c87b85b750221008b2392f81e4139cab6dbb834d1fda1ad80b84f7870dae0322eca0fbd03287de3 3046022100a7c24af2c30aed577a4019580e817598bf3f0c2a64dcc28ea1f68a579b826f460221009bce2a36a6e5da5850ceab004bf5fd91e6658466f653442f913ddd52cd80fa16 3045022100aacd1828260600d6f334e387640edb02abc199baea3882c67f6256ca670ecc2b0220640c85d41845706783bba63b4d305cd4bab47a5bda6550db50285dde76548bf3 304402202ee4f5c1bfcc4fced3e37dc64b4006f27e4fdcbab5e3fccfd8338c6900cb5c5902205816c848415f68659922e5fa0fa076dc5b347dd7002816da5436652b1b60e13d 304502205399e3fcc149438ea12d8b76c29f0a9cdfcc6fe295960b0cdc20ca8ce12ada64022100fe18d62383aaef608046349a8e1aa9f9e380e18ef8473784b3096435c0f7e988 3046022100f967115b165a383752b170ab596b4a6aa86e5d3e8b1b85574d3a988863bb9b1a022100f0144b25d234a3536cefe787ce382bc220e306ea4b38535aa0a3a841cc14fcfe 30460221009b35ea7b7be207fc95a218d18f3e0c40e7c7e605c85b8302f6b05c1ee1e6a667022100a727269651c2006bf9fd1da3424eda1c4c490c48a4c78c15fb04792a45d32538 30450220467ec3f5ea4d0aed50762647cf5c0fee22b7a713d47b16b4144f66703e9534a8022100a4d98736f80f48c70ae61f76bffbba6fcf1cd939f0f0f81467e58148b0f07359 3043022059de6e822e4c4ae438929675a83e03f300b7a52c9b8dc9a764c4f9c19bdd89f2021f73ecd4590ff3eb0dcf8e0196ff7d635176a9a48165badcd647c2d0038294d6 3046022100b8055a3b9469943b28799e5a08c587c8597e53c043b15b44f199da1ba21cf862022100afebf9d137f65d5dcfb1410a79748db3d7bd98041035d2a8a50d47d23aa73138 304602210092b71f2fd76af05aad4333c1b1376496be07ebec8c7eafa8ed9e3eb7ff8d3106022100f2dd5353c031a7d772e3f4b9aa99749cd4f17026764386bbbf8e16f2f1785162 3046022100b9ac2197905751c9c83e41fb686747fc2997f44f3eba6dfe8de370d4e145780902210098eaab1b6f26452739f83ba733513563dec1a6c6a3a24baba003426fadd954e1 30450220278a8d7309aef86600dcbd862e2e2797d6f553413e5cd3827c0cea662e638d51022100acb9afa2f0febe61ceb22b3c4e7fb305ae58483e0d54ea820d426e08a915867b 3044022030a4dbde13dd0aa37ae53f84e1fb8ccc0a356a751c824c2cd2a7e1f7904c682a0220464e419f1fe8f9670a882480c566061219558f458ca42f81b7cf682a2cc623f1 304402205fc9acbca80f764aaae4962e6b8f5722c0bae10348dc0296bc5891ca9caf7f2d022056484c4c93207b5f749d0713b444741f8584da4e3e7c8f022ee9e2a4384a9766 3045022100ffb3c42e7625ddcca5c18c5cc2fdfb1bfe76e4ff7471a08ddd3cd29a168d9653022053b7f5f1bb712db5ca5ae50e2eb7cda6f2037034018ae90b1471c42ee9112200 30440220091383941eedbf6f677ab2e13b9acfb7dd2348abc45d8d2cf549d7269562833a0220492cc07d2de3d6fc90dcac793301a21e41ba0709ba38485d8e9f80c66e552cf5 304402201304f084fa72d7ff007d82e9c32af70100a30cd930ad98d1d3e89032ccbdb8930220074a82b697cd4c0f1cb6526a7f139e4d49264dbc3e670f2f37aa7bbfe707a7ff 3046022100dac713212c2e68ba3515a42a212c330ab991206e9c00c8306e0e40a0ab03e397022100e308c0efa6faa2786e81cf57b30498c6a0a9f470613d83e1b50f36de19c2d8dc 304402202bfdc26ee24e5b38cd1ed47b66fac68be7ee71a1ee7da59bd59ef011584f34dc022070f58940a44fffd5ae621797bfe66ad4a778099bb4198c31592a26ff139699ea 304502201809269aa3f2c081615b73caaae35771cba1125ce3857e28f560b62bf53bd2bd022100c0e04592de2ae41bbb77843d18157dd49ab1772a576b9f638d8d13b8cfe7c0ef 30440220058343f62a27a10e084a04f6596c3f651c1e28ffe886cd0a51357b8728f169c40220303a57399248882e8e1be7224fdc2184f53e8aa22c200c863de2674703c1640e 3046022100c24c1b3b13e1b22141f24d07c3d9fcc0c5e901908381a3cf26f2bf17cd27fa840221008e5e2cc03f7aa710e9e83ef6c9c3da9f93c840e3dfb4f81be698cafaa9c89fac 3044022028c0429c3859ef41a377f7b10d8e45a4f95f06283faa1fcde539ed308e96b79a022031c465f8cd63e8b8c140034d1ed8dcc9997e04dbc37524300ba91c145b6e07fd 3045022100e3129b420421083baa3e123ef8ab442c1e79eb6b9e0c8eeaa021b084c1ca47fe022056f16e6052ef9484192a94e22c600a45db7d3871ba18e0d53d49055a9c583be8 3045022014f89fcdcb21503cec4036ed067ea525874f28ea16075fb00a2eea275c843080022100e32c54c452c302bb27e690edfdb8ad6189624e20411a41311e2f843f259d71bd 3045022100ca68786f26d5042add6acf9dcd8d9bd7bb98d1d7d26e10bfd269f143767528ed022014dc3149f904162c0cd7bd8911cc7157332e46f4452c0a1d084a2637b97f5bca 3046022100f55f6648eebf9f05c2e32438e304ab840627e1bbe4c732f2f57c0ca4809a45160221008d34d713183a23f6abc2014287b9926096f9a78df04da771eea4ed7c75762c72 3044022064cb8432dd349fd51d56769a002857f4f2843bfc91d112d7e00b84b7314eda7002205a6ad6f1c2b31b52f20a5c3cd2a44d8736dfc8e526dd12690a3f3343b6b0dcc1 3045022100e92a820b80da468ce117f0117397ca80b74b38a55536f1f7f8a269ea146745dd02203b54ccfc8c644c5b8f8bda794ee821d68fca735a9f44b7bb73e7fb841002acdf 3046022100e88a025f42594ef5a518e79e70bf8ce34857e99e0e1cc6b22d9a4dcd592b8520022100fdb3f33e29d57b86505a080c6c7f9b6effe513767459528a0ca2eec68b6e0615 304502202d4389307b8b6233423b21b6ebe5279f32695cdd34fd056ba71cdc01b12bac19022100cb00ceb76d2ecacbadbd0b818f6771af71c7ca5d44262f38dd320b353a5fbac6 304502205dd9433773d88b132438a26494eadd003920b7306501e4bd03aaa34b17da7a46022100bfa0e391202afc21dd96d1098157827aacb3bbe052fab3acd77420b02c56710d 3045022100cc6255dae3b0a4b59e9235afd512ca67dcc7bddfbe20c42e592352596d875479022060f56e025770642da1d90e87f506af34ebe55ef9dd4a9793eb712e322ae244cf 3046022100e5ac471bded94071fafb3060726f7b9b1dd8add168931035ff216f3eecceb04e02210093ac816d00893b80f5794fd3c60caaf87d76b900cdf5305c7fc7c926b8a9fbdf 304402206a10883d7146b02c587e322a053ded6db42621d015bd38fe8bdfc06e9ce6835d0220127415bc54e8b20edb6e51589a52e5405482f2211ef7c0ddaa3145a0d9f4c9c3 3045022100f0fe0d09e2e91705cd8329d3d289c578098299e9fa767260f1ed4a85bf9116a3022046375282aea879e78a4855dcfd10f87ecf80df6f18451a18aca1c7590631f45c 30450220725871fe1e473bb412396adddd2486ea2aca44143a79919fed87ad57c88e80b9022100c8473acb9cc2e0f149c3eda32a98acdad0a84e5c9ed084f58df69ee3b8d7da65 3045022100e96e6136e9b682e00e7f00378e561ced6883e56d0045dbc8bacc72096e6e874c02200a4cb3c261b69ea0168dd2df11d470ea0cb64c6d887e67651f1786c6c299920d 30450220373060aa78b550e5f037d7197bbe7d88c80910ddeacad088cb4015deba08d40b022100b88b0611722ada73a67cc6c4f52bb211cdfef8a4069269b645525c757c09858d 3044022073b169d18673ea16d22d296d63227efa29be9c9fe45bf8e2600e3077b81a8f7c022014a53bb16a3a38301a669c86e845513a732f62a9d354066449e5ad9d8d25065d 3046022100f1984932124f066770e7d8e8450b78d30335e216454ca892d85c325268a03738022100b25a3829466df32c9be9292297ce9107798d3ea2e47b8ca4a65a7a21621d5ac7 304502203833f9051e94ed86ff64f6620b99c61df28e5712c06d776d4bb50afbbe742945022100d6836505bc35c4de4d636b487f033991b6242e67aecc77d566b4673494c0571d 3046022100f042bb44ff75c826fcbd4336e24f7bc2e455a832de601cdfbae038cc217c7e6f022100c4d60166bcb5d6dae61465d0c1c29805d424ef684fcf83bbd972027c2351b172 3046022100d87b96fab47b966e78bea05e29b3c620dc4df2337159ea4faf95ea25410d3f14022100c187d39fe4f33f8bbd41c4421b4f2efd631b21fe7cad9a20ca5bf93a18a1e7d1 304402201e853f9d7c79fbb42a443f3cee2914dfc00da5544702b0131f8913b95817176e02201073e1da337996834660f1af1e5bd48dbd8f9256dbb02413f08c0cbc22c1d7c5 3045022100a0491cd5f9dd963be85993a3c5c3e1c799344fa8f7bab3e42be78b76e8bc1b9502207ef3fd3c007a72a20ff53f52e71c473e3d0318ce1711c34409a5c2f288c11d79 3046022100a369c8b0b46e400f7e70f43fdaa6d080bd920a74064f711edd4d9a10ae32d2ce022100f99b7c016c9abb77cccacc66985718f40000054f4eacbc53efee2a5892980d4a 3046022100d906f78ed9d51f3c83a88b01ffd75889953c95c3d2cbfc7162446fa02c3bc692022100eb4426b2f24ebe34e1dc6b6c0f0e0308a19ddc0550749993ffd6bdf3430d24f6 30450220704fa62c77a2702f7e8a45fc9de737b26970cbb5fa39680863f44652e402e5dc022100fc29055ee20bd962305a2a3e75f002501b0962f961bc22f68fece40bd5d3fd3a 304402207787fe18459cacd2f09c90c1ae617c5e8647dacfa7404b3c4dea83621d1a00ff02207b195f90ebc35f46dc7b7af5fddb71240fd714de7913bd0896721f4ad5ab7f68 3046022100c70845a0ac4c2035c578d1a85c07f31aeae70c59a422fe08f3e2aba3c9ba4420022100b6be7c447e6fb12630898f8a18dae49bc96cdc9d1aa94c1d2c9171d61ffa4ab3 3045022008a969bf2a05c582c314dac42ac0b45904923d7865d6c166f4b260f78a9f22a8022100da34defabaa689cd0dc2f4daf0d85a3741b452f5b993252f90ed8752c1283997 304402200940f4a1f4ecb8359664bb272d5e01cdc146d19ac1a589efe739c14aad0ab12f022076e547624be2e9379864d1ced65b67328c744c8c7454339c3930c0c2f630439f 3045022076b4c6193ba0d8e009a2ea20152c9750197ba9733a73ca1d4bef557e30bd4b66022100b06224816b5f254ebbab1db0d80673792ae229ce7c4f2ce3b6b4892f4e6032d9 3045022100ecbbb182cfd2960ac0abe76e2c0671da0f52a1bdbdcd2457cf5e08721ba4c3620220201c71c05b01ace76c1694e84ce6c500cb4623b3ece21ac66fd6876b9884012c 304502201b04ad37a3c9605e379529f7983157ebdcc8cb0f4d3636e20ac9f0671477f30b022100fcb4a798824765ea51119acc210d71460763c3c576e82f3bae44c82f356e42e6 30450221009bdea450d107de5e944554767470405f2d12177ebd04b511aea3f0110e18042c02204435317d27527e60cccf204c8ff6eb99e31cae8930528e6861d655cfa8569f90 3046022100c3643920488392b6f6008a0d11295cdb5e5d7d64cb472fb2366cb23b4ed0cba80221009e8c59d21606aea942ea7d418eb1ab94ddc4668f3e566f8d5577ef72b28af212 304402202737b4b41848dcd2f64b81eaa7761ae724761a00444f1cbdf3852281ee2a266e022067ff4707151e3227336cb5b0bdcfe3de70c2e431d822f4010996cc9e9d94e290 304402202acae4349648eb054237f57c6667ff10c598ef47af5693b36c19697ce7d8f3fc02204c9f518e7a56e3757de1b6c07a514af82c1cb4b8de23070eb3065e6309dbd43a 30450220478864e5436a5dc497c6c87b3fc1c6f6b015745e7249887c8ae27ae7bb7ebe20022100c303a204d255ad372905263700ec2ad8386645160c80893aee19fbb67c79afbe 304402201ed4b521c6a74eb871cbbe52cc44b410a8783e2b6cf782d2dbfc1d5b6eca3fd702204bc559c23eaca6e321bd51fc9213cf02c7ce5fa942d3ec8e686adc9371eba4ed 30460221008103dc0e6e446af97b1bfe51f8159f227816522e1ef047733cb03d4f03fc82820221008bcb690287a191fbb79ddde3a108797fa5db75c7a591fbf7b58ea2c5c8cc7950 304402207270774ab20e07af6556ebfcf0d81054a098f0318b58f46e2df64e7369c4415702205e2cecfa68290fbb524dc3969f322a6ecdc85b313a1e7ed7364085a5e4789465 304402203030ddf09a994e13a884a363290d88905fc5ca80423d9792c69bcfd1d22ab76b02200a6e6a3842b21d3518fe6c9bbbfb5298d05edba3aaf7cc60b619a724e8ec6ea3 3046022100b6e839f5a45252d063202295c78375592a5f1d0c832e6f28fd6df7aaf268e138022100ec93f1008e6b22f5f613ab1ff20de1fe0519d9546f052ebbb190d67583cbeaf1 3045022100d020c52efa05e1b56845de052dbf5890a643c46a3560650a2f1983182b9d2673022051ec118f6a6a5427ffec98c2914d73a6eeb3c5da23942223f607c7c6aff77a99 3045022050d4c2ea1ee28d6fdf45cfe5a6b0315344f033672beca33fc4c1ba88f3575a39022100fdafffaa125bf3506c59cb239c6123e83bcd18cf3e291e0e1414895607dd2721 304502205f648e5c19ffcfbe11d73f22d33fdf70486ccadefd3e328e45dbf71e51a6134f022100fb1bb4e1e75813aa476148a256ba970788f44b8956e6cf962cc746bcf6a4b079 30460221009dd44cac81f3e651ced50354cc522f2a052706c78c57c9a0c324cbf12320e829022100efc98883426097a14b119317e23124bfc323ad5f64a0abb084c2165e4a6b589d 30440220278df2f83334aad1f1dfbc9baccd396d641d60b9d2fb4b65f871dace14a618dd02206f73b36f72158faef128a9bfa63433ae05ab35348365ec1da65240089205632e 3046022100a686057d4d17686fa5d8b0d76a31fb125f2a1829c388a46b28a138ca599c2324022100fe7609c5f6c8fd6d5a73d07df8a6d9deaa07eb7a9da729e9cabb846f69aac8fd 3045022039ed1512c6b12e55107fd9467e3cc951cc89c6ab9daf7d7b445e00b67df1e23b022100ce107302147ce4fdb8d84a8ea7caf7b4fd4c4c154a8a8f9a9e1fefbbcd2f8465 304402205aa6403e1425e5eb3fc2064653bc51022fd5b0fff326853e59894a4b019100cc022064ecfb0e21c41167d83f67489e7e2d965a2be7b15abe6e080d02d3ce9913cf14 3044022019bacf328fb39bcad09fa4c5fa111c98f6d3a1fefb6a22e621590c88d879700002205e6ba894ffa27b84939ea84563da67afba03fa6872f9f8542aa7cb39d7bf319b 3045022066b8a0d5cd4eae2c5d5fec1561febe68e8c34fe522874cee782c8fd093f9e661022100abca88619c59f4504b6295d50f4e9c06d31d753785a9f2578e54b6e83fe460b7 3046022100ad525d3c45a40e22c9fe9b0dd47770e539577e0753d7a29c26bcac69793f8fc4022100e7a4f33e88903e15677ab81c2c766f6256334e7ba670da263bc458c1fa227846 30450220328dab5c64c8a864c49c9fa2fe58711cad8ed2f6fa650f0e6eafa3e470868485022100ffa5ab2604262cd092b730fb874b1ed660a7c4cb3b7dd6e5319d13eec147d4d3 3046022100d0415daa59035542247ddf9cd9d41edd1759e7d546cb1b85a4e5a95654610535022100e250b947451138b3a5147a9acaf764495d0210a0c4353b494d4525512bce3004 3046022100dec15e92bff40674c4d4a478fe2a13cd460a6096de2445d6ee25bd78e7af9208022100c57293123ea31591fdbfcd49b4ab7a7afe6eee8d63a382536754c332e149197b 3045022100ab39fd9513081dc2afa7302b3a7cdf0f73d261fd3bb9ba6fa5990f879a5100de0220061cf2e25d9b9447f7d38f8b79e286a468ff231192471625897aff59185b89ee 304402206f5a47e0cd4cb1e5fb45854887adab4f4be1cdf8684d78cff0e27085ef665a0102200da5b318f9a7701730d85ff3799d370eb96282f0a2e1e788ed32c45db7e68aa2 304402203dee8af917d56ef6492882630f78af93270869e6ff659c5ffcbb6216ba9f9c5902205fac270e1efb3c86afd671b67374579cb5f81e541136c14df257f879543d9934 3044022100e0c3576776cc03d49bc25f3ccc31516dd796c3cb19ee022c82b2ef0e642ad9f1021f2bb7f42212906805d36284b63f1cc6cf9ec9b8483b8cf7c3c70b56dd7c80bc 3045022100c8b26a06b44444838ad5a40d14660c85f1c36631661be49889717c3d5755ac4a0220237c1658005fd53e95c75ac17ee481e37a73f8e94e4bbbbd749d1da8e127b1d6 3046022100fb3986f41df99068f947c6629e195f8fbf7d81e1822324dcdd86a60885b27f0802210092d0344a1b9828104cbd8d54aab3f92eef938455975d48306b0787e4d1db50f6 3045022036f4c623289e8ff81e8d16092c43d6256d79164308c925cb3c620ec416a19bea022100fed1a2c68dfe4b4b44cdd9f034e07dd9c86e2df4c7c3a27e30a5f35b75fe6712 30450221008c7ac23243b81c9e1c3bb2f6c785732ae9b7705bffb0b6bdf9cb7fe67591d5c0022057f5a563e97a0bdb0973a70ae8a078c56d00f2b804fa44e290f72101115806c9 304502200393a17df7ad31da8b80177f1e8b96d1f6b24d81ea157a3daf2e0e17b83101aa02210083d897c89d92b2ee86d5f8938dbb44d89d85dc4a38b31c9a4d1ab902b172446f 304502200a963094c0d01a2ff901bf4385b4bff34a416f934170b1b750e1ca9adb74b6d1022100f5bd5f1d9ef97c6e0c3e267dc18f3dc3cd1e84cf28d90d937c08496d7a3ad10f 3046022100fbf9affda874fabef4261e9f43db304381034a1d9e2924db3d5542c3d9cf1943022100f33afd223402df5dc18a14cd21a90b8cd12a9ad9be757988af8795b80852d2c6 3044022005f0cad70b871d77d538d3c0a6ecf57a44048213b43480b11a494f0df1b2602f02203ca701e1603061af79ed5f7d1756e34bd8e8de7ae61f1c18966a24ff7031b4a1 304502206349d4f8955368b83d1fc5807bd3aee0dc399a8d99cfeffe900ba56b20b0f5380221009614bb8922dc11845fa712086454e4e8e004fbcdc89ebc4b4801caa4883e4853 3044022073e2953d5657574e7f75cedc833ed77f04d20802730e2ab364dcc96d46306f0e022028f079b4af8b7896815ee528c7e7002e49a81bb00e6c5d7067c734ef8ff53c00 3045022100a2b13d5691bf236216f6404b74100d232ffb8b04fd33556030143d975447434002200614a8a01c38ca4799b7fdaef356e7141ee448b1ae50f8ab8e87c95aa9bb0b9d 3046022100d418bc43ad765cc1e85c6d18a99eccff705924abbb1be7004989e57a5b9f59c7022100b6a3bf649a3e70e19b363f868f14b8b9454cde61824f2c17488d215de8b22ec1 3045022008b28bd6ff6ba6a01ff566195c1fe8e11084240e3b16dbea0425d9999f15fea1022100bdc592f5b54b5b7111a479dcc283ae452d1b5278f0edc7b5285e5ee31ab6754f 30450221009388e074adf7759480d25af3597a8b49c76f2e6025adac4fca18b66fafe00271022040fb6b6f0bf97c79878f45312d766855443ed848cb047ca5a43d00c595d9d48c 304502207289f5ff5c746cf0009cd4e329dcef044d6870549b58209b0eb86ef4e5583e7c02210085b6a47f9e632714ecb01bb8745c60685131b8d70cea4a98aedd22c2df3af742 3044022071105cf9638a35a81eeb2167a431ab5a55bf98cacb408e6066b1c5ee2588f75a0220344ecf9a1ef77f1eb5022e0af2bba7f1b6b5f1b23abb63ad8c67790553bbbae2 3046022100c4181e0706c235339610c6b396d8ffad92a8faf6f52d4b510ff7bfd1f0642210022100afb55a036de7290507e7c77bba86918341b4ec958179674ce1f0c5c394c84b9d 3046022100d361ca337e6154f2d9c8e5d1f128ec78a0705c7ee16e8d686c24a499c8296c4c022100b8a4e8ea01f61aa616b3c52f382f4641e71481fea8e49afc8ea724c8518310be 3045022100e8ce61bce6dab2c4d0eda7fec4b39f68000b3292eff4fdc50cdf298fa2c2b432022076551395461ffec7b13b453fdaed024ca0c1426b1c17ce46768d951c002e739c 3046022100b3248efc1c2bade847f09b6a30fbd4b00273b902d3a5e8fb1e61f6fe13fa7206022100a7df4be404a581683d0ec24f8d9a2894606d151687f5fe6b2d9d270be06ee404 3045022047de6e023bfe3830e8a511df1d2ca89f4891d83f2a70271fc65290979d945e0d022100c1799842868937cf353bdb73145578fd868c1f379bc18857d0b828111417cc6c 3045022100911d5ad1936cb0f953372ff35999b81d81acefa04c0af53ec7d19b7637365995022039a4832fb21ded25d7788729f8cefaa02793a857f32de7595b464b5d0eba7741 30450220793dab9ec9fc45c6df96682a8fe088cf9ac5ab7c803ada77ff454963dbc1fbbf0221008a4635c81061adbdf0b8b7d498c984b119d3ee7b5400890d09aa9c3f0258de5d 3045022057688f6a97d9da7f466e2e697af2515fafe58ce2594fa72896f56ff50cbd3d69022100ee8476892c83144629cc114a11686e4d3f121289a60f16c0166511aaa1afa99a 3045022061b412ee676a47f3a1fe0c064517fe8064d582f9bad87e0012157a242967ae9d0221008d3b980c7ec575cb0a7bc48856bb71994a576683c6c6b5fcc8a4ec8e6d0fedca 30450220411eb5eb796ffe6a90888c2a321dcd0077d1c84abe7bdbcbeefab8f058070fc1022100bef55789b5eb721be01e6e46e02b1e32e38cf9f21c8daba25cfd27f0d5d1690c 3045022056ba36a2adeb9624ad8b3523eced85ec3a7ccfc71f56cb2ad46a0f19aced5127022100e5b36d96a92e15e7014f83831509273f27cadc1421908fc0b7cd3d46cf19e67f 3046022100a5b5cbbe94ca481715916a4b5c456e2d792b8b597be7d9b787f6e20346d49073022100d248d30c00852a3c42d1b53d6b48f498eb1850e18d333325c30079e2981c44bd 3045022100e7532f765cd8b67ee1aad7cfc61606fb427f9ed8bf87443a954d43194e85aba902202655d01224b495bacfafa5b6c450fe4d8fa09545fa106edf0517b42bf083cf3a 30460221009e5698cdcc3a6c5282e54ed113caf09ba2789faa1595c8811a1c2cd7afb0db6e022100c9b889d486a1da7ea135f15b27f49bc8a011f7fd9ee86008d3715af4a802b02b 304502201c888fb638e56e6b0f5317debb09ab4e3a0fd0fb72dbbc654a236789bf88c1ab022100bb776c72e8aa6864dc736a54f5df409ed703a9e86cedf4ab9d7fe2ef32bc5afc 3044022013bd817e2ac38afd58d0364e6746ad0516b99eee815e05590182d93980c44de002201f37f587861ae8347e8fd2871d59713475560cf85fe726fac119701f33e1cb0c 304402204b832f30e3788d30837b0423f45759409b89d1b8da72eb30496c5b28053ec40902205ca080e5d03c2a32e4f39cd5b13641a8cac43062c876655a0a58bce957d16a18 3045022100e9bc36db69e4c62000d3fa3bb86e364d42a3bb867804265f2063221552a09ecc02205db6682a18f154476a7ab90bdb625aadece2439edc84f89e81466dfc30b6df9a 3045022100d776a0f07e4a72c39e9137ed38262c4f20a099bdda7e78aea601183d45af2eb802203f267c25a346e4246692d3e27fe127d22c12a01e00c159349adf5822d3e3343e 304402203724d54e5f6039043753f022b57c11c40d1c370a6ff1cff1e794dedf0142a0f9022027a82cc9a9997ace610a4a670133cf94a73beb45e38883f245871f45dbe92c24 30440220147383c138f753f3e4e736b17f851ad88e7646df1b66ae11f72baf8e0749d2be02200c1c02bdc2cb53efe1898467c2f4d9bf3b87debaac89db35b881dede54739c27 304602210095fa3657cd6b829fb6380f5297fa3d865faa199731b00d916d27e6ad071fe728022100afe1aa3453c91af39bd9af97a8a696996217c461f7bc19f2c2c3d1776242f8aa 3045022100c6c31b9d8f3cfc1f9c9674aeab3b922d64bfb46fe5505ec185b77405a290b6e20220347fbf5be0b9108988ca7854ae9ecf8a13033049263358db949f60d87c7cb15b 30440220360a83de7328ad81963e1f880baad8bc616fc68d132d462e542d45174f4396a1022024d3ca261968bf9f686db019b233df89882993de9038beb6f5221def4279698a 3046022100eb9deab874fe219813e6b1ec163fd4cd1f8cf1c994c7adf0ea9c196190db5b02022100e4198572d3182f1281ebd374e6ee61276ab6572f655b1b063f6b36bfa5c55d74 3046022100d1455c68e94773893967040f424d98d04065f247d7ff60333f62b3da93c31605022100d15a6aad69398ae5f4bfc1c31ce634b37f324851c1cb0f61cd67692b6c5536fd 30440220067157850080f4e81fee77345def8c9e8015c2ad386c825c3ae4789a4b42f60502205db38f3dda9437ea1e611d8deb38fc21281fd9cf70235d378358f29b4397776a 3045022100ebffbb8487944ccdeb8a1611702364277de6c5c944235223cb75f597acfadf2f0220454c20244ebf057d1f0f40eee7722e4d14f45ddcce753de4748a51e7ebb7976b 30450220737143b8374978fd7485509ad2788a5ce69fad8b4b55e08e4b108f9f29e3d26f022100c6e04f8161e6fcbd39ae1bf32eeaf708d3d2524d7b0439d6e7e4bd992e65e577 3045022017ec5e4f3d74115caf8f50581e3e46a5a44fe3be5a7f4b2b0879d946ee369eff022100df2433d8b929f45abde7f1d00513f77ced0abb20db427373cbcefc34e2743a87 304502210090d763892dd40f7639cc281b9e24b78deb417aed729b3c7a3f8e9b2ee3e4a4ec022006d1e4fce90c51693345fbce42ceb0366ec94f189b199a1da6f101cafc96f04f 3046022100bd62f109ae8b8a9661e828c2dfa2e17e0254f9bf6b45baf6535fd1bc3673daab022100cb1a985deeb565e24886f7a46aaa815c85acb054a99d71ac3472fde7b5622026 304502202a6b045a6b16d268b07d3679d305bb21d4ebef3d3381927dc948bfdd251bdc8d022100b6f5f2ebf7295762c42f02485a82d57f0e736cc8a0eb9632ac99255092a301f9 3045022045f2e275e066526f2d7489f13c58d8a0af249f9bf5f42772bb1796d17ed8bcd8022100fcf55358f9163836c54dd291bd33712ba67dcf5708e89e3ef070119fd28627b3 3046022100c7eecb3b0ffd1d68e79ef56fc451a031368dc86cde09a83127c73ff5d2a2c203022100a96bb57ca475fd30b9f98615f30c17156484c85ef7366597c8adc93d2872b2aa 304402204f66050a93c6a0cdfe0c4817add03cfb162d1ce78e0d94a858c78dcd9cb7f56602201584c6af866545683bc69346d1186eefd04c9f6ff65b0e01373042994ebf025a 3045022100d2f0eb152de61d4726608ad1edc095d9d224b146e73bc34cd0c2f9f44b80750902203bf07ac1520b984a0d328588349128c16fd4291f68bfdec3328e177c3f542edf 3046022100bb8c582452e2d7460bb2624811649023c4c28437a296976fd504b7d7e85c5c40022100a78f793fbbf8909755232e21bce6c6d3bc8586f3861a4aea9ed07add6fea6b57 30460221009daf4ba94c628244d8fc444a7ed26dea3eab89678fc30b312365c45d137246e7022100974e3158f50d454aedc82568d7fa5812957c7acbddf7d3b748aac22a5a6a397e 304502210086893e7f4d0b3695156ad83a8275e8f571a12badd31d08b34373a3041862eb3302203bd20065520851642216a816f6c96e2c97ae38cc0befea3d7495570b97defef3 304402206a7bef086de553c92122b167a3612b04b897a5708b6814b05a74b549ac73b7d30220277c857affecc9f62ea0f1bd6738e3dfa636d2a0854029ac359f2801abc5a1d7 304402203636efedc16d75f0cae321cee73f9a8637441b779a5643b487a7bd3064dca5a502204c3291da2f067fc417138657f846128f7c0590866f6782480d610bcae7af532c 304402201ab643bd74a089b31998827e7eb4195cfa2093eca58c9670804fc047b392d02202200140f6663c0650407f01f677b90401123ba3120177366b9f569f000c0ec21b61 3046022100bbfa5c1fecd928f06649b5f1f71303d5172ebf900bfa99e601910b6bdb9969d6022100be00cc2001dec0f3a51496ffd0022d3cfbf92f74b519fbe09d153349b3b6f99a 3046022100a8f5bde3a73b9a75cabe37cc7b9a376ba26810fa6d34b03305effd51af32f26a022100e3fe674fad4d3b6462d0c8f059123586640804fdd58374833dab8fcc03a8255b 304502206669bbb4ead6dfbb34cf1b8577a82739c4aa16b44e0c2ed48691296ee4bbe096022100e6c939869a7226de9ef11305c8616368f1f8929228fe943a1f5f2f06dd3eadf2 3045022100e71267a10941af81127db58309d30a9d12660a8e751a3dde3e760ebb177b6444022002d60f3d1903a3ecdc628dcc58c5a6fe0d8726329fac76f20eae047a69d7ffa1 3045022100c345205ceca8aa41ea8e43c3478ec524db89a9d86805e8a9430d23defb2e6cfd02203ca5981e42150ca3d8ecbea22ced1595f89d441ec0219a5a65f6e5dd47410741 3045022100df614d326fa9630cf8e08b037ed2e053b52f393aaca63bc0170e84243b1f0a67022011ab0dba69acf85313532f454fa49e3215a136feb3fc61000b99ca7c40bb8a1b 30450220670213ca95e8383bfe1ae040678aac60dc8df3b355bbec09fbeb8cd583ddf0bb022100e363431442872478f9b3b699c40dd6a27e9aa66b78205d1c9913238496cde2b2 3045022100aee5f970560a77311899708f9b7a01aaf1cc8a2cb4fe1663313075b76944e094022059aa830d401d6740bf58062fe1ad82436069850401377bd8a498f114cf96fd1c 30460221008b891c70bc0c31a707f169d512dd47f6a2c78ec62cf1a9706c75a4dab468d5e8022100f374d4d9cff1a3d5600d4783af37acb3bb5f1b718bca603c2c476daaedce6539 3045022100a3b43b52a3bfdb134ff0ee99da59e1b5ee416991f7de970e3aa2ce825435d6e702207124e864b31692175997f3119cab2cfe021bd9e6f810cb47988fee26ed04d99b 3046022100992641906d515d299dc26c8bfe77cac2c2bd6a9d1cb6e6bec89302b1a6ea6faa022100ac6098386216609c82bb934b27c71a3e7f3f586e39ad5e8caa837b33cde79513 304502206c98fb134ef5ce7222a17343d4e24213070da9201e123d2411a82c394d8234cd022100b75c45516f5799825cb7e99929cb751bebf2224c50755b76355ead6029c19653 304502207384352899a5765bf9ddb791175e3c763b2298fd4cbbddb29d1d2cb284f6248a022100b4858eb2f1307fe5be789e22d825d8e577c79443bebe349b219d2a9fff05aba8 304402203b8e9f54ac3eaa916275e33f7ece20ffec4491d3f568bf9976a0f3394e64bac302206689738c3912918d9dd966834959ca50b47810309b16a3c2e0d2f5db9ee881e9 3046022100d45ccf208c803d5e4e87b383221a196130bb8ecf409110641f657f2246b95505022100847e4e57005a594f01b8232ecf6e2659dd06ea75791e5448bfb90aaba21554a7 3046022100aeb8118875efa15bcb4c4485910424fc3b399722e53b062c1c536eb54e6ef679022100e4d7f56c1fef0bd30297bbf67073f92dfd1cfaf110c858c87510458bae814c69 3045022060069912dc79a63483e13c283a64bd272ff8e3c0903fc4c711edfa76d7147473022100a9422e80f36cd4cac38108be6cbe425b9f5a88497beff6eaa11705d24ba2b08e 30440220079aa3e8112d86126bccd078cbe7c285abcecc1f03c0e12cadc0b02d54539b9402207b3475123052ddaffa27f663158129a525e6b53e7fb9c0ee54632a0aa1ed478c 30450220313fd37b20089260201979551591a5f4b5acc501d2e79d6e9df2cd1147d8f560022100f313bbcf2f3f7a501988455845cc436866d0cd60569246c07dd3576025cc6309 304502210084ad87fcffb171582786463d896f3e8c295cd28dc4c1be4bb6236516f7bafc19022034306ff7e4803fd4e8488f6b601e089ebad9e0577f696337a4551e39c2a3715b 304502202b0d5ea8bdf42f157ea794c9180f77ebfc74528d27b96cd658faf429e0bda9a2022100a41de7c4ad5ec569b43e550783d291aa17187ff252822d40fcd610905b5af27e 3046022100a728f81257c7c6d573a2362150e434c875a85858fcd4d5c0d14d9a2d50a8eb0102210097088dc7def4a5ea2213d1f4d1323c60b3fb66d92159d88e34e1b10b3006919b 304502210091d12eeff32e8bd1bc89d52c94ca9de8ac26feab43b67957f0c2fb1b45678add02201d9d5b0fa2b1dd1d4b7139759914d6927be1632889475d34b3b9a2e8fba7b123 3046022100fa151b9b55e0d2ca838b0d26a533173cc0a74700b7b6fa9182d8f7b190ba39bd022100e88dd68b8234a84f0dbe070d82f3b8e030e88b8038e259d8325bef67d11baf63 3045022100f4f0e3670855388ce0131b237fd1227b99575799d27c57891fa15f036b3c98010220137247b15bfa521bc866a41c58f1817f6297c4dc2072aae8e0a81d17f92922c8 304502200362913269283495d0b07a835b9e97f66c337e1f82231b0bab0f851253fa4941022100f86c84215b76abb864d5aefc198c3be857313beea4601902f92a5f905de94007 304402204f2153dba6e56b4c6a31eb37d90ccdc53f9b9a0cda2aa993123354d9c2ac0ff002204e7814fe744db8187a33df8667a43f9e764b1205567d8724d041d16fb3c17e97 3045022100bee5ec411a9583f1a3b656fe6734065e4dc382ea149fd3ea29d8233ea0e203a3022066d23db5d601d243c880a7cb0634b29d481d36b0fd57d2bc900dad6579b2058b 3045022020ad15b4f496dfb8619be40880f33c1efea31b5b0e82cf23d501fab4c61bafee022100a3903d2605fadd1a5604b2a39ac8c32c3150e2cefcbce3e712e1f4061d4f1ca8 3046022100b2a6c37e6825a385ddb206d9e3405ab93f3a46ad00f994a73b643cba15e274d2022100fe3338191d7515f0a3026d8b7c6875f199d2648911dc5cb9bed3677d5d7d4543 3045022100b6ce41db71c32368efec904bb7b8d33c83ad8e3db108d2594ebfa97aa68291c3022058652cf330b180bd972ed6bde47169394ec8568f9d1ba43f3055474b54b3758c 3044022042232ffcc14c2498329638fac1492e51016b8f9ac44a6d6d28ca30fd78214ae6022058b5ecc9a41e85f50635211adbbc85ab4896df38a880241dd67c500213a5b6eb 3045022100eebc994aff8ae405376146893e65e74bee987c2b74da3a7e882c43a0460abe9e02204de6c830536f4ff359d52341673d78574bcd6ac5b514157abc3c5209c4558fbe 3046022100ea9185cea9b8dc3349c1cf3a1bbedb800d9502678f727703aff657293c35477b022100a1f199d11b591d150d4f9f95631f4b1b41dc4f52b85fc6df36f3ed13cf0f4af1 304402204eb49fd44a34f795e76a3ec518c712485207383ed53c2d8e82303c7b8b74185a0220235527358492d89788d12acb4ac238c71bd479ac42d1a11a96d041816435bd70 304502202ad7d14dd3ec83ffce3d322f61bbb616591a8c75270294e99fe25175a0c0d944022100f49c1349d80a88692226ad9dc8be7a88d66ef9e958d37bb9e98f59fdeb8fa947 3045022100834590611c6ba3a20c3b4b70eb6664ed6a7360c0f597f3ec3530440f5b41314f022009b3a7a4f5199d3ec4e4ec12d14e1fda33e69f32e498b95c06b1c414b9ae6baa 30450220559315095b4f5b6203af2504b4bcc4825d3154fc36794307513e0879dc9853e3022100ac94d90b87d0e410c0f118deac444dac03eb2341a3edf372ca3dc5b0e7210962 304502207753903040b5f4f41c263918b3fa9eb1b5e1e3813fe208a75bb82f6ac547f93b02210094f1a9e13430948f139b08cc7f64378cec503657e4527f4dd49257cd2cc3f03b 30450220261625dc6e5e886b837984381863e8fcbc1db08382107071a35dae92cff5708e022100be49b7b1e90ce59bf15fa33c1de67cacff404cf928fe1099160877b453ec0fec 3045022100e41c89bc68c35f9ce029cdcdc540692884a4c09e50f4420470e9a5188dbefd1f02207dc81cdfa5751c88275cc1cb46063fe3f81afd1d0f27d2bf6f973514680a097a 304402202ea4a9f4f3b5b44f19b45ebe5829d02ab0c55268994b3d45abdfea9433e0c9d6022025320cc331d666eafcd1df591506082e84f7e8b93056926d73fc617f5cd88a6c 3046022100eb827ea1022b03a054708706f768b16b065f00d90df52e0f3bf1b102b54195f2022100f1b3c4f1f859a02c991c3b486ce223bf3230ea0c1ad4a297a4f8b80202163051 3046022100cb760312c4e010ce5f1f673e04bfc581f62c436c49307b4d3f4f5f53d61f9bde022100f2c784b373d9a9be4c0dd4f4ec877812053090a19a92bc5389637e53ceee270b 3046022100c3ad7b2980bdaa3d49b089ede3e375b521af196f6cb68430277789a0f12cfdd5022100e0c092bdb3cc0b4861f3b09d231bb7e695e7cfecee975943eada1926b120f3e0 304502210093dbaf8e2043d73f0887ac95788577a96b9278f5530e75fa740b2067a810643e0220696df6c6a2e6c93ee7e8ede420db4313f0862988b445c8a17f469d20e785c635 30450220071e77030cfc44f860fda4bb15bfec8828ae5a9c20f448449fdd6cb838483bb9022100a1b6736a064e2a614bbebf7178c35aa69f0f27f86685828a7a5828a48df75864 304402204b72ddb5c234a8510f24994ada8f6290cf4c63670e5910697ff96be4944c9114022036633f4ddecf73bcffa0ea403d9c848bf1ecd54ff9647380dc8559755b2acc1e 3046022100a1b7abb545aedbb292b98fdff81dc610a75ba5ad7320551156f6ed8cd403b8920221009a2c594a569bb07cb7826f446a39e40e0d4b89c057ec03b48c52d8656e276e99 304502201da7ec7e2f6f6b3280f766ad9f385cdba868b2889038e2e202e6459c796f1061022100801e2bcdb5866cce40ea80dc79f8d529ae6f074a5095be4ef8776999d46d3d25 30460221008e9587688bf5b919d7ea695038b877beffdace21f6c65954c1d8fab06cac8c75022100f47cb9b628c1e3e870bf3db4a5922aea2d0c8656bccd1d8dab2a0858d421ece2 30450220751090911f97e63b7a8cee3f5397d2a29c86ac92e7cd60ed0ab23a73289cfc00022100f1eba80f31a5f1fb0f9c8b7d2158edc33b98f97e089b8ae37fa60935fc9feae0 3046022100a1066cd8d68f35ea942167b7d713b134b887cfca9cf86c977769d03735e83626022100a5597cda3d4969defe5409738cc6346a8643a0004bb8d7e6d3fd19e71c47f048 3046022100fe46cd827307a619e893c510745a9422d0d148bc1696b781e16ae0334c3d03ca022100bd74af4de12ea161831d3539d11c78825aa3de6e4edcab914834dc89b00dbfb0 3045022100bf513b722a30bbcc7c844b69a92ae77922b90ddc8c42732d79e0976239226b19022054ffac9c3ea56148377dee9adb102796fc72a5ce20977f7de3d7ad0e6dc8e665 3046022100b262c6c24897931aa6bd90168213f40d7507cff9144e51ab2d47ae235af41fc2022100b276386218fcb09b18dc57f4966622fe8241b0a963dff8e33fa1300d1ef06d0b 3046022100da0c7a070a3476c182742809a1119d97d8c9738606c8bd8f821442aa17fdbf40022100fb33b75bd5a0b383c589a53f757b4e049af63b76b01c92b65c9d55f1acf0ddf3 304502202e7de3c0d3c05d5a2148f1438d48d714b88dc4b5359a48fdd606fdc1ba2d93d1022100f2ef08b762d32116bc8dcdf3a369e8a876e505d24df644ae4a6fcc2913f48d66 30440220720cb3c12eb90e97148fb30e4d3158aed784b0bd1f9b7c3767ec36e3c4d69d310220682503165fa361a49854a974fea3d1606389008c435596aa72ac4836bb1e3ab2 3044022021aa78879fc34f471c6f41e784b51aa6d3d5aeda1c50e66204f3c207c24338ba022019fd62118dc1e4408eafe8bebb4656e833b2bae1805153eb5b8c6c6f2459622e 3046022100b0d4cfb8f9d5253a732462164614b828ba3b64f95b164ff8833491017d42964f022100b9e5c769f946e20d81b6fce31bf9a393f33a513dbaef6dec0de928fd0c2e1cad 3046022100d5c6cc0ba6a5fe8ee0bb6655cb77317eefdd16897a2a4af131e2b0ea01110969022100c2925597698dbbee556f5a387a7deadd5a942f207cbcaf339716a027b4087b2d 3045022100e46f1e97c139a388ec50b1b150811a1c9df758dbb39ae0941066872cd4a7a55102200dd1af2ee1a017c69b3d859a0b85441e047968b6e6236f86e18c1d109113cc84 30460221008935c0c7cd0976ed0411eaf031c26b282f8f49b1218eea9560c646969314d4c10221009660fb12cb29b4c669cae7a329dd2a11aab753a3b0112dec54f4e4aedb821708 3046022100b8ec3b6dc20d32cd9636451be0cdea5b404077dba273342366cd1a8687949a9c022100e0d9964e596207fe86bea48f3f3698ef70fdcf62f388a2078f340b68f74ad03b 3045022100faba885c2fdec2ba1c5a3106bf9a9fab2798573fcc0969c7aef2df636bd9193f0220443bf3ace56045380ea434bc69c3dc427b2ffdd6b2bde2fb16b00ca190893b61 3045022002114e794ebfa1c0da88052b4ea9e245594629e9ded08ef8079c7a9e93c0a31d022100f6d34f544beb68889a55010e0d1fbd47cf4e1aac127b7d1ea78079359939ab42 30450220148121f4f137a670b880b16e38c3f01c8364000accd943168ebfa84831821f12022100f8a1ab1ccc3c6a29e929fdce25b259f7b38a7e8967c1bb940341dc19d96095b3 304402203231c271f3ef965dc7afc3c7d16a7e2ffb52ebd456da078d5da53e65f9029be10220065544722f76156bd1cfdc441865b556bdfef35b2763d3ca3a78924a803092f8 304402205c64300e76110d50332f6cccf0c1415cabdb65c8c82c76f2910a1b79b9bef590022015011b6842e643145ef246da258964dbaf7a12527093d985884c825196cd001d 3044021f7394c8631352f8ceb911575d358b49a5c6a7abe1cb6827409638329a8829c4022100efff634d7a96f4135d6acdb9699261ffd2a58ecd11e6f641c90edd19bc3e858a 3045022100bee0b5d9ecb3b68f2d543c778dbee831877731ade66a5f69db589f1a3fe25f1602205552f594cf06f566a7a18ac3a290fe4df400d7494d066889827f0b6a63382aaa 3046022100cfbfdd397598b8ac9ce7ffa0e1e55601fe0ad4de100986e29b099f655d290249022100c05aacda6515fc14a17c47563a3e58797f479bf4dc772781fd9fb48f2ee5f36e 30450221009f7f6298d93c13783453fe67b1ccaadb0445b860cfef5680bfac6365b774687f022017c6d7154b94e0c464f6937f0ae8ba73e67a9d5514b50334a07f2fa8154cb743 3045022100d6a7441a2cb0655356e1fb7946d4c533ca1e9a10f34d6a32dc4aaca3321ccc2a022068f06f249afb969bbd62dbdccf7efd25d19686c17251c0a6aae0cda4b7f140d9 3046022100d16c3b9ac7263630a35944b73bd25186158b3505582eaf54c7be0db20a18b6970221009743be655499ef8797bfe1b51751d0ad3b3f7c746f57733bc3164a074933685b 3045022100eddb45c202b5d78f19c245184cda72afaf2879a6cc87e95cff2747694b0b4eeb022044a7abc1dbfd5e438aa8792be85858604fd0567ea44d9f39efd01cf36e9d9142 304402206d2936f21c0ff4b676fe0940997f3d1045b2a6a0110b7a1abd74044844274290022060265fa55c7f058e5dbef8396b21c774328a503dfc023a3dad38475193c47999 3045022100cf7946445894da84efc8d7f6f048f73897b1644012cb9265b16494e36c438eec022071c5efa73d6006c802e5a2f46a40cfd8f9baad93d651193404799d91a9e6bac6 3045022058956bdbefaaf7b735071424c0cce258cedf14ac3834fe4a1eca2d169789a9f2022100a36c51fcd94bcc6a9426947220e91014ccdd92b9fcadd9ce31578a05bf2be534 3045022100e8ddab07623c200bf516b0d060d10a36d70f61e2c79d909bde1deb710632fd44022034c1256a708b854b3f3571771dcae90e084d535bf5add5d2476061035a9f796f 3046022100f3b73b11e4238a4e5090f89c432a78f8592bae393e3b12d3e9cd8f66e1703fb2022100cd8b48855078100b38e7927ef1ddd2167012cee00c6e5d876d898b587f1ca52a 3044022044eb347398cc11ad685a393ffae5b886d1b2c7c9c51661df0c183d3efd87f164022071c264eff3544030c42274aa8c2befe98220aa3457bc95535c8d2aef421b2172 30450220219ae310f80c14cf7a1f23cbd6fc7583400b6b483125bc2baf987db1c5c912490221008a64fc2f624e0a5ae5fe40f511c1041466afb4dfce35ce294608ba35376ae9f5 304502200cf06dc7eb37a4ab1ced7b21c5aafd75a976b23b950243fde3a63c964a14a457022100b10a5b778e8fb4973cd93c6d9ecaa1545e689d1230f849d3062c75bc14a7e585 304402201513919d1271a9cf9d0f6f4df1c2ec928ee55cc3172bb7852e9668604a2e0c4902204759ba8d15e0305d206bb312209e0b0f568397c1ba01e323dba7da6e80b5f035 304502210080c9eea3b7499dd1d701a0d7813a196f83fd24d374797a395760caa36e09da7002204bca054d306432f8abc1dfe414c9b46b6b75a77d0652e1a8b02c805f4c128968 3045022100faefc172f24c5c0ffac1bc3d96abe62e0e112dca841fcb732ac1ded042f2f7c402204efb67d57258ce0d1d151ade3e16d5b78379581281709120ab79775f647bb122 304502206238366055f9b1b75555a2f0687e9c0623e4bb740adf8b3e94ba242355d8d7d7022100e22aa138e38a6cdaa53df158ea76575a8e2588a1c402fbe6f77edc0b25999ba2 30440220457a58a0d85d3bb213fca56dcd6ed2a8d3f57388cafb100aca7f5859cc29b3ce02204e6aa125e2d92ff875781c7238b67c2e7ec73e65179a0b04645fbd9933bdd5da 30450221009487c65bff01c63cc75eb860866d869535e142aea2cac75af2b01dde24e3899802204c2783953a7e8c4dacc25b4d917f05928b8613c5470bce7811fe7f1b197567e1 3045022100cf919a01a1cce9dd866adc95fc2a93c13fcebc3d40b3ef65ef95b969f63167bd02207f38ac56fb0b2eef6c2412dc2233381d5ebf5e31470d09d47b3b5318d5939eb4 3044022005546d714289b9574dad2ed6370898141ba0ef6180c8ce9e63d4af9c3b03c83b02203628fdae0251fe46a730b9e3818c419594b066c08251dfc927581926709cb144 3045022062c9a602a545572b9ce923c28f0a5942204c43b2b156c405226ca2c751dddd5d022100f50e9b94ff91aff7729df5690764eb1c080ce4b1fe3d1f39bd8933462001a518 30450220793dac3f5c9bba4d86273fd749b3f349bc38ced30aff3c75b59bf17e20a4198d022100c6f9c20ca2427d0f74435b8e3cb1c33ca3589ee8677f9cc338c45a031aa95e30 30450220711e52f609f394b0ffabd43c2b224c338eadee82c74529cfe0e30e620818fe78022100b9d0e2986178c1d90165a4fbbf5e0e5f91490fb2bfc194a7357e9204f1494bd4 304402205488fb31d333e815faa72f56765b25088cf3e476100fd1de2a684a45c26e77da0220695d4b379480859fee5bdad62a25c7340b56d79181e1d5295c6a4db8c942badc 30450221008449abce47b1b652a5afb2f1720e999f7e5caab7ab1a2a3d2f702c5a5fe3f227022046fe577d4a77a78be9ea69b26e28138c48026be407eddd3a349a9d04f6aef9f6 304502205393ab6aae45dbbcd044600a0fdf12f34a6c1fdfd24785956f8d2d0b43d361f3022100dc845d4ec9e67dedf6d550f35825faeb1bf7093b9698fe74bffcbe21e2a286b8 304402206eab41fcc557316cb08d6ebeccf41e10ec1248e19ab13eecc8c29f111da317e1022040581c054b4b59c3fea646d92cb3b897fd9690eca83dbe32d4c016939f480517 3045022100dc916a853bb9aa07a863f6c74d1e352cf9caebdddfd859fabacc96b2ac1e33b702203ec97138edb2d0ed18b7840aecf1ee4c44a59f95d3bac7d6856e8a58ad616a07 304502200e3d35b638a47156e3a86e565c363079b2f770a7424848e91a7bb56c36b0aa98022100ebcf6b0da6e45af310f3160b2cabecc45551526340170e55327efacf406da2b3 3045022100b46eadb12be8ce1fa96d12bbb129c460d2478ff832ada5e6e20157914b86bc0c02207812d9ee8e1ba3fa2492db4f890ea3929d28d7913cad223ce488166ec784604c 304502207426fbb7275301e3b6369689e67af42dcdfaa5c0416cab5f9bafcc473ce413c1022100a55ca9661bfeb93a3c0eee273f4462c564aed76e2364332a89fb6b7bb74a2c7c 3045022100a21ab4200a7d78414795eee54f52348c345215b4e4b33d3ebff27217ee29d4960220421389d52b20ed559286dd1bb7e0e13935124b352d2f60f0b773ef5fba886007 3044022012c95f262a989cbedcefd111b6557654d6c86313922a1a97e77a478c727eaa99022059a8ed35b9064cc9a87400ee2d6bc1a255ad26e3c5f42ee330fd0e698e9abb1b 3046022100a437c98de4daf0be43dd4a02b09c7845ee1a99b07e72f830e2bd655cd478d332022100ece9706b5ce99254459daba41eac62faa3856e507f275f6fcd2d554c1cf58c72 3044022010cbd4277d36623caba48703e1d83c8657ff5b7b3f5a17b6a73d9a1c44c9a212022064b1d2d75c1912fc8ae22732d65ec73d1ffddc4ca5f07e3a72a61913e6f0f75e 304402206593d11901e410521b941fa25891612aeadfac227a5801bdbd4d79cae9a1a53f022004c313504fe9c3596eb9cd568f6bcf1e5cb4e014ae7978eaaac4b1ceb51dc99f 3046022100d09d23bf19b77014d7956daa5e4be756d088b1c123dc0f902cd4e8bae18fbd7f022100f6cbe84f86122b2bb30c6301b3a5ce958049509319af4c45e7c35090c104f7ec 304402207c78d5444c64ac4dac880b552cbc39d077d02f19924a562a733fd9be5df91db402203876424d9a24a0c144ef5123a3969e2679c9e5518da3f2f6ee20015e96641dfa 3044022060979488dad885ec73d7fe7a1feca2f16bc4bcf5f05354b3ab98d535274f84de022062e5424023034db15f88427ef95544a6bacc8e796804f2bd41e1c92929a3660f 304502206565d1c92856bc2b68dd956b6516d9b882f59fe4626a4b47dfb5f3a772f956c4022100f2f14f097ee85acef292e6b94d49ac25c8cf4a6adffa9980dfc3a97ebae0f3a0 304502206832e8d94e68923d1a58da4fda6983b0ffb519cf23f84c4066dfbf1888eeb99d022100b3ea6c911d7fcddc76e42678bf45666e663ecc40055668a0814567af3b450d40 304502200ad8aae9e701e5c650b6bc863d79533342477dccae1646ed513da2760ac3ca90022100fd2a75b4e2c8f3e22da6f9f541722fa83886645c62be8bf1e3c81958e91953f9 30460221008425e83ac0c4c30c7f6b77c0dae6a4ccfaee72c0bc7e9e9d9f389b4dc052984e0221009982f1d0c421548eb702b12f35e67ee26afbb14d714b86b592f9346f2d58691d 3046022100cb0a25d5d23f05f0a8133502972b787730e765601d8bf9a44282ce8185ffb4fa022100af47352d1517cbd81b9b07dc18a1460e0dd847bada81226772d6030847998dc4 3046022100e52a9b35d19794dd1d1d24dcfda66d094d9c585035b654475d005483e46b55fd0221008887e64c3db99dba09b4ff70b03bbb6be5d57dce62c967b1a7a0065350c69101 30440220389319127a4e6dd91adbb635d680001e63e37054d2960c837eee2273272a9e5e02204387d50dc8a11b18013639068be4072ecb258d6fe7f3d2f64cad0c194f084cdb 30440220374b5ef5013c3d2dfbd75a8a068ae3be451564d78cf5af364361b6479f64218b02205fdc639238cf67f7603bcd8e6b8b9124ce13768fbaebf496812ea38a4dfb3ac6 304502207c7db51dc4c9c820d52db3fcccde1aec8762a8c92e519512fabcb6903395b54e022100b871cf2531f416c327df9828b7f18965b7b2646602d529b6a7f6008f4774f85a 3046022100e6ffdfae9ebaab155bea1fce19b256401c91790638553301005045edf465ae42022100a296e8bca97951b3aaf7d4c7baff47e3bf56fe4b8daea31632ad0ddf1f303456 304402200cf65b5e33631ddf51aa96b0a29e64fa6074414a0e1285ce27014adeaa8657e80220103f99d13cb92a0ee4940d4592562b87a9f5720689ac49a1cc2fc83d7fbdda52 304502210080487db842dcc18ee4bba8db162dbc8cf9ffc28fc20fb56771af9002a58a1a1a02202e8ce47d13426cf6123a86ec230e681997ac0ddc8e39fb4a86516cf155e3ab3b 3045022100b9616ab5e4cdd17b3926fd649d2bac50c0c3d39c58c0cca88f97d7bd511ed71c0220451fce699adc93c9e0ef9cd04e372b7f67fa2ecb1da766f07e85cb3a7f3d7e14 3045022027a5d241c6b27c85cda01617615e449c40b6bb7f468f4c2fa81fbaa564f1a158022100e0dbb8841e1528d0b03be7777fb39fa271dc8415012c33325910e627ae3271e5 3045022075074fee1ddbd6c90a69785e9dfeb11a1fcad78f75333cf4cc69ba98288441a1022100ccba1b1389882f832f618a81b24b2b08af44237a22b394f43551c65b83ae64bf 3045022100c5522fd53e4aacfd8dc12bcf66953f05c1006e4776d21a5bb8eaed60e85d35ac022004d73bc5eb3b8496361cfa84b97228829dfee936a52d1ad18b43386a354732a1 30460221009938c93acdbb60e97038ff21a73ee9a070cfec88be1b97ab1fca9f9453b8653c022100a5f331f7006b71abe243129c322e00d2d42c88a5f5f1fef9ae846622c339d07b 304402207e107f53e7b5daf3b4d4415c5193ec7d9a5c2422913048afcc6528bb222e18ef0220401fe5e644aa81485527d8647dc47984f3847178cfac05ba67834b56fb236de4 3044022038da12ae1deaed5fc81e365f0df7728467f51ed72ee0a2b39e08e37ed787723e02202d201dca22c966eec11be22626dbda29ae746e736051805b42c3441bb23187e2 30450221008458e4fa6ccf6c8e10351875ae530a9f527c92da2832864c6ff998ea197c575202205c897c4657d664e14757121164d8405ec57dc89bd4003120e54493147ec35a23 3046022100d41282ce18d8cfcc83b9c04baec2c9ebb118e8e2cbd57013f7c2e8734d05c7a0022100c8ed58f94ed56e286bbf5da0ab3321149d910a6a3835eecf9eafe35ee9080182 3045022053b6e22e8474fec232842ccc35c82657d03d61c142165df64e392f660a51ef2f022100d9b0d6041d81be19cb2b13211ea7225dd68e45f44187e9b197d182fc58ee9674 304502207f0fa32e1a8fee5d425e746abdaad97c14930da40931343352dfcc1dab064702022100ce67b73865d1ea66232b406a9f0ff70afc48de705e0ff6bab270fbce17ffaa22 304402206e5fd8fafadf033764867d8ae1984cf01fffaebf2461aa5a409731c0f7b2c218022021c7c8aca7fbabf30089b8b1b8d8d3d77c0b0d58bebbeffde97fd244adf48733 304502201026a2b92fe63dd6ca8d8d67073ef0c508f23506cd1fb2eab8f487f2651f26de022100c8a18a38e1fab9a64b7517af99a639e642fdbdd633083bbb45844306e9d70e91 3045022077676db8ae0d10f323c114c59ef42137779a412c4ac40296a8a7eb93c0929e8102210085ecb237766c9feb600dd7cc12e248ea2f67530f3e941f775dbf2913f3932f66 30450220481330c3e618f8a695c92cc6f60e0dab2e0e41486d6b71a30612b31c035d0f5a022100d102910de3dad9ad61a3de6d2963d5686df999e4686be9f10f5bb02b663d715c 30460221009513b7275ae37c031e3b443ee85c558d2139ccf9ed3c369340c45f0ae6765aaf022100f33e0491eee65b81ba82e2ddc8a75ce8b05b0406c618ef6d76cee1f6beb24899 3045022038f71bd3f1f357c2091fde7bc98f645a97a695fda4121983ae6d32a233573530022100efa703c8881b43e5227f1b0b39e7e9d7691310e1ef763c4952b0f311a08a36ab 3045022072cf1edfeb79b74191958e3c3c5f04d11ab22aa1d413be4718510051c3fee96e022100f49c837e742f4d21ee2be3a2051fd6ee1564fec0904ece29df4e77d9fc4b51b8 3045022100af4f342fac386af93a1b562d6483eba534f5041532f33accfa8d636b2b1c92e702204cd6d8fe0b2bb4b5f610932b038a1817687ead7a7888ca591e8a97a78466e1ba 304502204f767c17dfc1d3a76b130e1049dd25e0796f768487493430984d9cbb60319f8e022100ad53c330f0e9a093b17adce6f7882e41ae3e3487e3bc47b34c2bb878dc231746 30450221009f4e2d3aaba8c62cf4efec8b63f900c89182f6ab27fe20e6c70e5659faa07ee8022016a3d27b7427ba90064ffc1db550d72e249a4d925079fd4d5f5a58f32d4ae768 3046022100b4c45c3dadf1ca0c662781866b6411fa78b912991159369bb92c3ab794b87275022100dc5f0a5423925d68c6779a607e0641e72a4a2bad7881b04e92d644f356c54924 3046022100dd4bf008be14d0232504411004bc6e96cd0a7e22ef6e0e6a4e4d8f3e3b6a1b9b022100ce21251e5659d54e5283afae0291c2a255fb1e96c7fe391000e99d7aa3691f00 3045022010442dcb7e1bb28667d7e325d190dd5ea0e666595a4759001cbf922ef9c80447022100ddf352c57d2230731363bc11c22af87f8dbef7483fca89d7f673a47822750602 30450221008c9e14f807691a66cfa275e26708553e7f8a1fdb260ecfb71b86b145af6d742e02200d5d95e716fb081f91fb9a4d80be3da28729b1b60a9ba6592306e189d2de5c3d 3045022100f4ddef9aefb8b4f743fe2774d8d751e5912187d724d786b3242a574baf5455ec02203c382601a32d96d5ae27edc0ba87111a5649754ec846ec507696e319d4d32bda 30450221008f9df9821aec50e75fd18e904223da28ce107fce0e0d4abafb412ce04016d3c002201aee494bede4253c30d93b7bf86100e57af988651e79e79eab8d8b764924a29a 30450220179c69073190209e900e2886e22cd4e9d00e8db04db74d79405d6436bd812cd7022100cb229a403fdb2118039fd157298cb08684cf4b7c717d301079dbd07cc54bf1c7 304402206575c6988aa77176002ee287370c135edd639af4dff574955b8ab241b39028520220169a2684a000ae4cf79484698857d4fe1dffbaab7f3c489df54ff7ae2d51e823 3046022100b63e7b1ab113d36f638011678ec44537d9f80fb8700b2c5def04b65e27f9f86b022100d7a49a1b54f9a251ce7c6d89b0d6bfbb17700ed725f4423a60125e8c08f4d31a 304402201740436ab581530c27fbf7a9febd8842134901dd98dc2b384c0058782ef06fae02202f3f45b4f72a72904f02002dbb87644f6910dd1f346982b13e876c41092d1550 304402201701b4be87f057a34a76af85a6cb61a73b19a0ec5b5d17bf7e65742cbfc04a36022003f59d9a9cc2f1472364cb1343a0d80e3c32f0755a0258fe56391492a263d491 3044022061fb252c35f6a53121350e70885edb2907b0c0973b562fe86501fe792eb0b11202205a3fabfa5852452f19cfa9cfe4ee79b22d688071dfa5e0c49eba66e431e6e5d0 3045022100b746787765319ef80ea93ec6b635b14fbe3bf06b78fb27524be1bb8f4ecc8841022073c51ef7b6e57d1b4959970909f41d1a0c10ecaeacfe7f6642350c787e71a1d9 3046022100ca80347b230ee048a48558659d3775f44513c803151ef12fbeec871f1b03b8cb022100fe85795ce0bcf3988d76d16fc405cf6aeb3be1f53f9d0d083a003084ba5ae68e 3046022100b7c208252e63d7b3ddd85be3f6a83f70dfb3c8d51dc31f30d169533b710ed28e02210085ff5221952aac278db5f726289a026d0669b12506d3ebf4b49ff15b331e3821 3045022016a509595246c6d63f680b66667820c214092e6e2fe61fb5d4772da71f40b0b2022100854db4aa1f5399c7bb515f3e914205dc07fb6754a6056b7823bb74aebbde3622 3045022029265584b2bb7f0c6099fa2a47235de177c64834164324ceadae4347990410f7022100a7af6ff4bcf3c132e7a81b160cd6674b075cff69dd2845bdc53d8f6ebc337f24 3045022100ef3a29255913a7bac70e50c59ee171060c4926511f71de6709810a7426cad8ea02204b0fe3fe93fb27a0fa6f1094e2ea3b212fb0351bc0c4883623fe13cc92541451 3045022100f1ff3479bd0dde42ad15af90844411a6cb6b1cf1d27f4f9410c93568046f9e6d02201b5b542b92113d7d9b20f71c982d2197bdc53224b763a291ea367c02a4db463c 304402200ad7846e53aa2629a610fc8fb4d17c784927f44d8809cca15ffb3be7aa47e5fa02206848c23d45ca71923baa6007319646e047f75d654cf3277510fb6f3b288ddc0e 304502203042a1a3fc8bdae95a0dc65fd3ef50dcac752aa7524f61eb831dd55f225de674022100881f64e952c2a7c977622d70df36b7ea1bb20127d2b1d25c707a4d1bb87a08ae 3046022100c5d8983f174bbe13b66f25aa3f251a6478462e60f493593efd55d2bc8060d6b1022100aa4907b39bf03a4ff5dd695eb2126861517d5c5e523e26f6932ee9082babe5d7 304402202b6aa6c7b7cb78b8fb8ee384f755192bab172da1e78dce14097b2f0761333cf7022072627d70d0c4ca64dadfa82bba1aae51124b2cefbb668fc75f9e50fd6ba9960a 3046022100e31fc628f9645f089c357208b27f898c7b51b241dba236e9c51f13548de6a4e0022100a7b026f49e63ea6b305bdfbcbfb4a14a037ec21bcf1e0b713140436b46ac81c9 304502202aff4fc794c23d01950a21d35360bca6efc9dd5b3bc3ea8480830bb18afa84da022100b76d3ab24bd8dc57238b87dc4e490998db42aaaf88ef1b7bbeed6418dc668438 3045022100efac5f53162c89b1a2a2b042e6a2969e50e3775af17c08ea1a51237cfdc38f9302207de1b002ffb1eee879e5ccf0e298d09ee1fdd110f93d3056dff7cf0e4ea42071 304402202ff026cfb604a4fede6b334f732a6290e145e85d1d49db1ef77ae4a8b7fb59ae02207439f578c10f0b81ff139cd3d9e64d1d09abf4c956a0601739b1c3b246a374d5 3045022054101a8a98ffd25a6c824e6f0deff626db8d690fd3045d82718025e08b4975ae022100b2b979cd2013439d4e494a179b4a65f6c0f82bb9a12bd4bef01cacbce362c0f3 30450220130210fc4885a9f101fbf2984416d63fc31cc9ee75003b9fbdd51dccd7cb070f022100e75a0e388ade2c55cc7b59f1a47f35c9970cf83fa6af40dfd5469910c67c5535 30460221008101ac86ddf6e1cdea87e8ca801f24112df6cc8ff69ca1fa1f4aa2e0869aa98a0221008b7460f35fb2807a27f373f07121d3740902b2a8fc0860cb5e2bbacbb2b55233 3045022026bf42d09c5b4cfc39b6cdeda033ed38e9bef1f86ef2ad454bca9571742dfffd022100bdd78cdfeb394658a4d5d39a88b227478cc84ad34b6de9a397d6c2532f87f6ce 304502206e75888da5fef33ae640dd7d67d7b4ba8df4a02147f7306d854d5d1e5b78c1c3022100a15c6e3955a3ad2b7b15bcfd7ab49837a9280db33b98429e2b77b73697341ec2 3044022024b1bdced935571a2202d2e93bb8f03d2585718832b711057856393a2e8c8fae02205aaa8edfe2355ce6260142ea833be945479dec1928348a76dd6e06bb48c1d1ec 304502207fceb8adedc6ad60ffb55f519d1083cb4f9f8cea7510f984c9158f087579cece02210082c48114c85b7072bcf7e0bd252a882bbb11007365bd316a32570b3f3f1b0123 3045022100ef44633de25c19213d52dbe7433676f67d1c3b9261aca583372a55b53ee2bda3022068228ffc739024586a890bb1417b374478a23592be72255c2e98b8c8eb667d8e 3045022100a275fc44f0c25645f855f9afbfb5124e43076468eab2f285e1ae437fa637670c022057615012ff096e849fa2161d2d036f435ee3121d3d8f7670b2ddd4fc439c660e 304402206442b607a4595a99a9b832e48bc804151ca9fc8b3e09e921e14223542289d726022029fd7cd810cf81d045d890145730194d5dc1808b1cda05ea73eebb5d003007c9 304502204541a7ede9e7a0b0a72bd016a8859b0eb2d14b6777d48380c004f49ceacce63b022100dd56bcb314af63d5d376760c487ae8a736ef12d183a3c89a558317d0d31cab3f 3045022015935bb26a8ac5b62949e3b3a939c7162b7182dda807b38deab18c098a332ea502210094260712ac9f27cf508b03047a93b8f8013fe3e8255dde1b5ec0df4021c4fe35 3046022100b4d3f1b58479efbd0c4d5855a6102fc6d583ba49634924c8cdb2672e74f873bb022100910f9e9826b80ebbf5aec42a6f647a0b2bdeb72b402df2208b9cb54c7ab9780c 3045022100b9e0cea81013a8880bee23b9471c2ea786196f8575735aa3d9ecc98c7a70063e02203e3bf3c87a318d7870ed9329f77ebdf1ec25dc2acc4c57c722ac9d437e5797e0 304402205fe8fd10a6d1c0fdec6a5c3199ebb1a911f85d7bfee82b6ea6078ae138b6ded2022072e0405d7a2331922321242043e93be9d200c20def0bf772d56f3efcfb282d4b 304402207f9c7930b8dd322e848666b42b9bfe8888f31e7a4afd792948ad996bb74119c602202caae771d087de0114020c1dab7988e82612103610969ffaf1aaf68ad931f531 3045022100bf6bee4d1f14a1b7d1e18c9c8d3a3e78edb5e2849716a7a2b5938404a0476e040220066a3880a9375edcefe4c7395f49b89d8c65ff9f1d68385a06855e6f17c03f96 3044022056ffe0bc461c9dc496fdab8c0cb2b963795195babbca23c1a1fad3e99dda0380022044acfc48b3c98794e7db377d7de0490a2a0f3f3fc4628356ca52e1e26d2a6146 3044022052a1ec40a16477923fc8a1fae07dd55c7ca4a7220ac921314c113e776c37d6b802200cd8f05ce43cc45be2e94de8d7084315eba702caeaeb60db5f6e22316a9072ac 304402204344393ed572398df2b88fc72e22a3c522efe0c970a38cdc29b0527605ac67ba02205864ea711ad75603fcc905845cec05ad9e17a7abcef296e3a5dff63672c81d33 304502200243ed790807ac92becd99970bd1d10a8e020badedaac0e39d7ea0f5ab545fae022100c8182f5b6478d75ac8fe4d6922440d3a8680bd061e6c7806398b2c11f6afda26 304402202f156c3fb8824090ba1f6753dcd1303e200eab45bea1fad3298f25f1d5bf7dd702206215a61518a70bcc5c797879cf1e01685ea74fca2b1b16d1ddc92ab43b386648 3045022100d0d1f6d34ada3c9b5356b9637b8c83e5127a60bb667370486bfca84acf34829902204b3d2008a98b43c00ac9d95b6a6fff7194a44c3e107b58376c92fb2553f7d7cf 304502206b45a587796b707dbac0e656db54cb1668b57a650cc815cc3c1c13252a2c8e42022100e3cd58f3c47ce2f022e42f0fa062a20fde7b56921f32841a46b3dffc8a3aee76 30440220510f79f86a711ac02e3c3a5aa74edd617245836b3978e6adcee01ed488d2fac502201503f97d6ccb0c617696d9672e012280f3defcf4344daf6629fabac9c0836499 3046022100b071e9bbab4d83665ae08401bfe4219d91e695dea910338790e6463eb9f9ede5022100e3268250c5d56e2c4c9a511f8ed214659aef75c4e290e663515238691db57490 3045022100c842d4f2a2d9ec5793fa75ab1481432c9270f40c269f4d3bc6d6540c3f0758f902203ee8cbbd679c4392c76b120c932efe5923326cbca5db43fe61bf116f697e045d 3045022100d284e3bebcdaf6196e4f2453da92db27b200c28ac34d8de1eaae999ba55c7c9502201f8258c9c4686af067a886fc1782a2e8163cfddc94198df03735f6ab50b0fa9b 30440220288202d37fee4ad58c15a0f2f280aeb475801127ccb0727585e63130bfecb4ce02207252494440bce1e600575514d723f004300dad562c5d7695748c7b290cc970db 3045022100abaf23b447b818fe30a9af344d4e7d481973e603cac0df3874241e41d34d3c8402206cd8fbdf848f4f55f062edcdd8ccd355b82f8313450ef6f92278242d4117415a 3045022004d872f3326265e44bc4aac135a3fdd6d292a52fccc7fd2b48f65837c6ef1731022100a7ec709f0b1704969b013e5ce73fdaf7a956fe6f101139fe40aa2dd78c9cbf51 3045022100d45dc8f749ab626204f7929d052b08cb5946773c9585b7729e914852d0c69f8e0220576678fa7974e2669f8f92c4b2ec39d468b6e26bfc3e2691ffdd20c20f9634f1 304502202b632cf6a4c1b76c18db27ebeb267f8ccf75acc2a2211eb147fa7ef6d51fc780022100c6d19bd24e6de027df19116f4e7e9f0490ef7af981b3817b2e47fc3474944410 30450221009043fa9f4812f5d75ddac7fc393370bea68dd85c2e7fca8dbd6fd55055398a6202206949966390d53cec92e0f364d20d2c18c54e29925559975c2713ae2511d2bbb6 304402204744f02b1e68e2cbd128b277e9032790e9cf66e9d3911e5759e0fe00f41ac5f8022059938e2dfe20476bcb809f8676b37e1653d142a2a9fa05686d2c720936c16e02 304402203100723ae051915658d147dd552055218a6731269787bf4de3e8a2d79861428e02207c723faab7688c38668401645fb56c80c1a9f22eca1b707fbf23f456f610b9d4 3045022100ee15a6c77d16c6bb0641ef7e86309c7f988e3e6dbbadb958b35695792d64e2c002207b9948cd93f558a66a8528d4b438ca2c41238579a12ce455d702d0feba0b8c83 30450221009370e9fa042a4bdb37e555b9b532d85ff4a66d4a8ffb6898ad32c07420c81160022075db6d7e49a0d01017a28656159c1f5ba172bc2cef82929f97fc2a6001f126d4 30460221009b544693b68ca78771931b9d431563e47743feec8b04697fa1dbc3c3995ee4a802210090cd8b05d588ef59f1382adbdb83ce3923007e0068dac43e31e829b96eb39001 3046022100af8049a4606f7cee00332e75470e57a8a76442fe99bb6765953c1c44995456f6022100ee5ccc5190826275af288dad6822352f16f9d35d900ec7e86a48ad41903f8cda 304502210090e0e2755886a3fbaa0d36588bad3e54091df5dda5a9c93ff194c2d17cd0192702202b62262bac44c751ee40bde2d1961113aaac251c6f1a4eb4271c3186b49e7082 3045022100927a5edd68fbfa1fca8e83deab8f7c0715d6bd1eec1321ebb0b697277e3e671502206f438c6498a6e870774e719622e414d4b7ac1df7f58896c90593875228cb08c5 3046022100e3e97e66e8d713b90160c09110f21646fc5ff109aa6871e531b0b83ff1e4e90c022100df1eba4174702400e882d8fa40bf5a9758fd7efffbad32ef2baf02ddd9524b06 3045022100880e7152aa0c10c66450a9212e28e71300e0377ce32897490ff54ec972a009f6022065551f4595611e263670cb1b7f70b2048b99499612f56b251d542c8b39fae3a1 3045022100d03a35c9066d601a66e0617697b70249e23477064657e966cd77b66a67cda021022019d9d8f108f5866f269578ef7fb165203c36bc33ce0b8af5b4e06df66f17e2e4 3046022100f39559085aa4c4c6c607e924b5a686b27632460a12cce10220917294d065711602210093bfd149d5198f10db1ea419a18242e5072d3ca57e6266bf5fa20365be8e4ebc 304402206081d2b25714e96bbe769c76b1fd8da95342cc1b5537c3ab5b29bef3e688504c02203468a564cb306927655c14011d804bfbf8f11d133908b30128e385ed68e4b38b 30450220606f3d82714ca6f1706419fb6d409bd6a783cb3c4219199352c6312fbf855bb1022100d52f74f0424c2ffbe705003748539c53b53818c3b9b8c6ef9eccd04caf07b6dd 3044022053a20e3857a80c3a73c14ea047dfbfcbfd055d7d686e577c04747e025fadf10d02207fe8acc6eae84d8563395a08f0cf06613ab58e85bdc52e1eaeaae51ddf0ae0a4 304402200e25b88d0d4f577cb2cd564e47d0831e348647270bebddc90399f3aba1491be702204bf96c8981e2a56bded7295f1d3d6b5c5f3bafd8519cdc5b88c5427de7895eee 304502206014e080fd95ab5cf2654dbadd216d399e329027416c257b239ccf466182ef41022100a6d1eb73664c09769a74d258a6647f6e2c788fd8019ebb05673367af4eaeb8e5 3045022100a53b1377de78f3ebee1843d232166bcbb424869a306df636786266130a0a5a0d022051b88776495c44f775b39619a0df775994b249c228d8e485f21a0aa08ec4d7e7 30450221008d3a420089321e0b0c168194ce4c346074110b516430db087fb215e422b7bec702202a34788ffd3052352bb35a141e079a9713c19ab54f4e3c8f31915c2ae4d6c4c5 304502200e9f75a86d5955e9ca4f2cb2ff88f946ee7b019c37ccb63b51129cf0a3fea69e022100c3a26caaec660fbfaeceace87cc01803c80b880159a1feab8516f69c569ebded 304602210082b4244aeaa09cd904c5ce450e6244bd3b2b657e4c699b1b08de23950ad2fbcc022100fa59282b5c8070cd65510db53c5d802aa77797c5d857c839e34c0f9caccfd22e 30450220346390c016b771dcd4636a48aab170f68591eaeba4f20dab5f437d019a8d5297022100f1d0db74881372394a02b840168bb2621117e84bba508db130fea94d1da37462 304502202914adca1a0643db3cc965995a0b02750cf6a80741521d7d07a1c24e244266c0022100efe2d3ed6aae39766062415917c7d65c654692d522aa15e60f757709aadd8ba7 304402200bc4afb12d452d3dfdd2cf6da988ae675996d037c93ddb30d08d5ba0195328d402205a5153cf9211022c23ea68e1fdcc223863f0d263d6185459ec8c19f8c1a97ff5 304502204d096ba9a71fbdf266ff86c389b5e871b84f711dba1b3dd43c038c2e23903705022100ec2c3544310e77a31d9469a1befc5d5b57f9a68a8303b4f932ddff8ed817b9a3 30450220018aecf3e0d41b710eb9557cf852cd238d44ed847f760c6d0defbe56fd4455dd0221009e7c2842a99976037acedd877aaf5e413e63b644327fdb9b903ccb04206a283e 3046022100eb10fc7a7d62d020c4afcc42e77fe82b94dd375edcd8d3599515a765c6a5e9ea0221009399010df5873d0d007fbdd577051273409fdc7dcebc0286a7555f5244eea060 3045022100e4e63e1ef88d79680105f498fa52574bbef21b8f8bee25f25e0ac32befda751002203e87e7d9ceede37ec128ee5baa424302d6af6d7d70db0b9a208ebef88331d4ad 30460221008168dbe3add30de1ff3f8b21ae593bb736e66fc7c57d931e960ca094a45d9c14022100f316b0ae3d3b87afa4bcb657ff95eeda8239070e33e9248875f0e361a48a9f73 304402203405e7c030dd2a16afbe1d89de8da0f4c4acf1d1534dfe17b01c9c62d9398c5702207ad859bcb90302231f30f91c769355163484469368c7bfcd7b3f42dd74c53006 3045022100a6574e8d5bf62b2b597b0b9c592a151a8ae377f00c0e963b770fc9b8c17bae3302206ac733ec66945142290db3b2f3bb12d7c64746311863326b0e6545f79584c3d9 3045022046055813eb57261735736b181eed22890bd3bd9e2e48c77dbb626f5cc796871d022100ef258b0630d4aad62074c097f6a2978e56b1805c1602713ebf1b0b4396fb4e3a 30450220196eccef9722ea8f83ca821a0083564cf36c7aeaa786329cf4fe5989e4466a1a022100d6aef46bf59788b4962a228f5e0b892e6a0f9a0d5cd5440620fc9ad6f0878c88 30450220185c9ae7cb33f28ca6314346bbc4f2385d351e346d3e50268bb8bf30c6c9e0d10221009709f382bda8697027bc199ddc8e7355feb16f6ba550d84b0927a4c49b8e5a78 3046022100c05f45e1268133f3b892ca9ccace13987fd21e23a6d69b25ddf4c8a36ee80384022100b4a47844da99303f8596e8ebe378325f2f3fdc404698416c9f0198844c838f8a 30450220190544337ceab6d052ffc2b7644619d9986adaa35c438dd12c6d3059a5d1013e022100cc5f2b4f6a98593c807e236bb5289dd2293f6500a4a88c2e4b727425177ce293 3045022100993b4b76a41a7dcbecb75b3e02604417a30198f79f51f20bf3df75e5cd7128ef022006fbe202c73f3f65e85c775fbfee8d92cf17275f6709493827e467ba40f35570 3046022100e74468299396eac863300b8956c98ed00bf6030828f6734d2001f0f11c04f5a6022100817b1ea4cb7fbbfb6b788c361ddb8242b60c04fce2cfe9503cc025bf39376e08 3043021f5823055eabe77527405c809b2b64e8f6394d08964e1d771199075f208d546c0220165bc9e177b75bbaebbadb89acd7919f9dec8c8c37855dad453dfffbd358e74f 3046022100c814165b17bd366fb2694055206f88e72a4c3bb876af234f9cc42b95a1bd6dd4022100b1509c01c627999d5bc04a040700ff1df2877d1c6cca8f906b6d1d45073dcc2f 30450220706f3e9935f0a9d11e929abc55d0b8635ee66664ed3bb3659de2ded7058446e1022100a3970389d49cf698337622699ef8e3c913c2f68287379dff93e455b96ed2b51f 3044022057cfb77b83e70dcc9f2da8ca5731c9524c503831af6dda3b8fdc618bb074cd170220513208f2d37ac24d7959d6bf3bb78ba4f682445dda4a9ac637f1de3333256ded 304402206176b660e1f31091253ff8a65be55e5feafc9cbac296bd28022e08c2ba3cceda02205ca4fa18a8d6aee35381586b4d22ad54defddd3e3a9753355975032bb6fd6fbb 30450221008614920528c127eaf1573220072b2c35377ad144d8018d02a3c1d46cdf9df03f022056afb074383c08c6d7d3d335fbc3266f58cb0cb41816ace1e686fa258869f096 3046022100b95d3cbffa25f501fe048e0a55a294ac5f7dd3970704cf5c66fb8bf12a0b74f2022100a6b5ff497e0db8365bc073433121f7dddf1cb1d8d6dc7b0f54df55ba3d3ba839 3046022100bf7ceda687a4fb8b899f091570dde91c99b7fcbe96090e9a0885df500ac3260d0221009a1227f03331ce9089d05095decd6aeac8c27318fd209877cf4f72b52b2daef7 3045022100946ef20c00effc2bd9885775ee93a895d8507d826cd6720ce84f1fb26aa02a9a0220238e3d836c34b6cf682b77756787a360aa623b1ef8e9e574ef02bb64ff09c6d5 3045022100feb2d1b8dd7477ad06bdc7b8c2bad35164b8c343ede92099348fea651298ab39022020df518103f6aa26578d4e60f646c9431cfd9b28f53410617002b6bf54902e50 304402204f465f246a91ba92cfbe8004ae0acbee7641230ebd64630f5723233940bb9592022068f726c78a5105fbaf1a48e6791f8238065832c7be5d076a9084c77957287d92 3046022100e153e5c95d8faf1f6aba3342ad293b217f5592d4f0884b2d5bb29ddd2fdefe46022100c49e0c87776ab55fc56dffb4147d5e738f15795d76ad296e3bc365f705815124 304402203af37fd156fc14f5c511dcc2041de9b6a61d03e4c13265d7376557a003c6c83202204aaebab012e35aac47c54fe193f12adcf6130f6ab3038567ab65685d4b27a9f5 304502203674dcc0b48ed8fece20801aaf50e674a7882ed70195b49b9d0d5d5d92a2e04802210086b39a0a0cd3cc4f1b327536635512fb20f3d3b8945282d051ff9293909d8d92 3045022100a7db4d6532a5499372e5b5afba6226423ec21fbbbd10fd377a4e31615250e4ad022077320f1b163d509f44aa89f03e2fdc075f475ba8d6a8c63cbff4877a80ca43a9 3045022100bcbd219027fa875cb9800feb15f2f1ffc646aed3d6c459c951a52e3c11dd180d02206db93488f2cfeff9224a8d50912ed0642ec4a25f7d2bdc58aa4c5f37202a7951 3044022047ffa32edfedd026a36fc1537bff404a204a1493adaa75882ece35c0d5cf5f62022038a7cfb0d9817ecd92e98419ebb30faaad81c2974dac00fcb3cbbe99bd2406c5 304402204e04d36a32b234f390d165d0c3cf897a45d882826e5f763311663d61610ee9b002205d3e88c7e6faef222e44a0a43639d3e6af10d765c70d7a0e4ae4222fb628d481 3045022100a5ff0f12a7307c49cc7bba88324c0a8e43e21ea7ff3b09bdd3a33847a05c1e8302204e7d538b082168b2a2fc25badfc3a41d7533458f6c877edbed8af211cf70bee4 304402202e87f6752cee3437d9732ab2a7492ae30482bb2dc096543552423005ab9a54bd022003ede7963bd01f1776092012da579f6f0433ad63a1af261affb6cac51893dbb7 304502210098e89aa3c9d97fd3cfbcc9d3b2ca55f3eb7b8f9d43973161a5d2d4619a64ffa902202b26b54ef4fe96909fd179ec66a5baf389d9b39a0b709f761217c500006be0c8 3046022100a04a9b937470303c8384ae79d57513d15c4aafbc7083769847322cf11434cfa4022100b87b342bfa091627f2b089053bcd8477fd55ed725959c35be9a79ad258afa9a5 304402205840ed0554d066ebaf6dca61547737492f6d5acd0ad33107b60bbead98a77c090220781eb021e4b992c362738cfad444b146242b021e7c6dc65e926accc5697d3eb2 3046022100a2f743c65e6f32d750b8d3512fccf18f712954d783b39bd9b2f09e9ae2865a3a022100edb672675ef90e4936ab22109be6b6d7a5eeac5ea659f9bffc958dd0d67c5d99 3045022100b55fd6d3aba73328e4ecf86c75cbecd575ebca260a921c3da60075b2c37926bc02203f9e0f6b0dd602509c35f55961d70886b14daa1021c18316ef8bddc452d261b6 3045022029f380c1f4259b8a486d77a3fc78e45d3d76d40b4b6e366ed9bfc623bdc4bd24022100fde95e4c4d4c84e57b34d8eb0be476d5c4bcd2ae59478df93f5a757ed79ffbd6 3045022100ed86e37d592aed5da1f82cd22317b9085cbff0a8deebb825850296e1d1495fca02206383934fb420cedc9cb75805685f7a22309b5b5dc2320134f5ef1e1b37d7910f 30460221008eacd7dfaa7567e1f9ca107a95dd9759346658f98b0b87b8645d6c5c8c9babc102210084fe571a406499435ead1ce9ae089417bd02f8be668864d088cafe5133a1e140 3045022100af4b37b0d24c6b04691931117c77b94deb70204138da2e7ccd3dafd11bad2d9202204af585bfca6ef5c745e72bcea5895a901087e274e195c8b172ae9c2cc0e5fc4a 304502206cd6a452063497e3983cb7ef3982a167ede45f6fbf73f73ac5ef41f81202ff21022100d660d844b893a51742b71bd6b86e29ba0204c208de1d6c08e1c32c50854afc2c 3045022100d311034fb8283dbcba6faf7dcd1817c6405852effcd3b2a32ac61677c2e175dd0220180befb7085b47502786714807a4660cb4f07310c7d54a11645e3e56db75b827 3045022100a6848fcd2939e3ec81c96f073452d1d05791c869f05061583dbf4eaff12875ed022043ca0ec8cb835cb45257d8c0c4e4b62d3476453b1b069bf172a8024519d1f62b 30450221009f8f239d336af58a19676f244e6e641492476cebabebd277900827837a3375d602206f0f77c8a1ca68d1c83687457c1560b55317906ffc3da6834fa6ec14aa4541c6 304402201a1758ad8a516b0a0b7f0666d9824036bb6b9d5c1b36cb283e1eb2fd3e85e47b02206284e6a68a65c67b2bfc12074fa802e744d73c3d46ed4d3a8c150b8300d9c31f 304502204467d01dbcf1965f25e256570f75ec65ef21a3ce5a3eac571eb466d784c6c11a022100c759f8427871b39ed6cd7f6437b7db1204a5d4879c4097ba68d69a98eda63255 304402207517ddf83bc323a90f3742c77a1a215ca1f6e1fabfdae4c39d205a6e86323ee202204c424535b48fc567cb5c7df21042e8dddcf589d39f7fd0571af5c452c60dfb71 30440220577fc2911ef254404006d67a52c537ed55f812f8aef562910ac1ea0096f5139e022034e79c44f7942c06a7f1cf1ad9c6199d08a93b8a2ff21f8e260978daa4e96c37 304502205e6329fe7493999aea90bff3178d0c4d6e34e55e7d4ee4bceb113d29937b3a97022100b4b164557ec95040a1528209f35b60ac8db606d3cf50b39c70e87ca079382292 3046022100a8bfd05a1a38b596bbec89aa5dee6e91a5cfb084c6d705316d63804a92964bdd022100c62131348da861ab3f2e1e267d35b178ddfa8397735ba47359ff8cb6802b114d 3043021f16982b62df5adce9f18615d52c8c437a9d87e24609283f8f40f56fb9fac2be022021c16596686c3264d8612ffdf8109ab838c7e88c6ee15eeb3cbc25ddbacb9f7d 304502203059670897f6533f3fd8800dad7cb04c917a2a4eab4fbb459d4bb095b2ee99890221009f4bf3900be92d62e356596f372b2418fb95676971cc7a25916a1534c7d4dccd 3046022100e4d815606415e4f5de485e2517c92430962c6e798498374e807fa06d52230e85022100d56111267c8f88804f6b887a60e4b1dc05fd1627620ee205de233f10f250a125 304402201a5dfd940a357b5adeb66250c8df4664fd11a0840cd8e3775c5e30aaf9367b28022048e9a57485d419583b674e02eb0bfc9eff2fc6eadfc093c11458685f3428f6c0 3046022100dff4fe21444d9acbea36c604b24d7681aee5c1be4c6c3451d2c8601a61edbfa1022100b0c57bee0b35140c7ca63ee528f119eeb713b2a2f09aa072f8a9ca954c174b30 30450221009cda291f2f8efd73226a5e7a57790fe9cff3024dadc94667af86d7859017a9670220669912562e6b00774dc546e42c2e42e2bbb7b3d7d1477b7acf68bd8ae2f29bdd 3045022011f698acebc06128b3a5a25926b5b1ae2c8eaa402b92b702862ce1c5d6d63c6e02210094574bd5d392f5a5b2fcfd309a3aba29ef0c6b1945e3bc9057dde119c5dcb5ef 3045022022f27ba8ad85a57c49ee4cd409924639a49407ae9df564cd685407bf860a9359022100afd4c01179546f78aeec5b5d6fe9985035277ec989900208448c2d925921203c 3045022100f87845fdccbe5a6436e1aede73403e09fa42c390fa3d5b842ae3711ee5aaf75d022022f0436ae2df60aa7a9b0e9beb9f51081824ee5810873a4f7e0ce0f7796b4173 30460221008cc2ae9d4fb1cfacbbee434788dc545cd4f05c9ce07f9d75a8734c865349a372022100cf91439d4ebfd00dc2c336161d843fc00d0e027a6967e6fdb341cec0a861f715 304502207423bd5adff60d15effa95b524f81fd20c5c6efd3105165ce856555998bffa69022100804927582d4c168b1159995422a8a49e03e5bc82443ad5db2d242daaf9b780e8 3046022100c1e963893e49223efc600e8a442447563463a75e848719a59284522d4ae3cd51022100b08f4c3928aba723880ef8f40619c28329bae0dc69d28dc75ab0ab39ee130a1e 3046022100ae258759a3a19e31e91a2fb653b840cd40fd6a461624559a28f39277f8f63022022100d085298ada850b09463b7ace04ee09682518aac019a0efa68520b6df68b078c1 304402205bc40322226e704b677fc7f92fbd786a781f5d48f51ae3854592c0ea27217dff022066d6763fadd3434e1a73a6a714111f8f72dc09b0e9d90215e221d8dccade8e20 304402203fc056d7c98102add89f62874a3c1852246f510bcbf9cd0e7400d9903736b93102200cc867dd33ec22fca559d08af4de95352a286c75fd2681368e1fd8119ad9f748 30450220171871ae19db9a8b61071704c8a8b07ff42cfbed4b90487e310cae8fd56c0c11022100b11a0c95b46fac13d5c8bd102a45dd33c1e16a3ad85cce1449146169e776e589 304402202b1f5ac851b5e6067bce4e83b38495a6eb1f6f15bdcdafbef7d5c17cc8efa456022011e6410b4cfae79cfa4192ee58b4f8aae1ac40b6122f4c5ed8d7e4db40a34c7d 3046022100bb11b1775f664a0c385847e1475371daf6502d64b2243c64d904cc1e0be0bf8c022100b2bac50fd32ebeb17318a7366ff24cb30c1c89859ccb749356e6f4a370bb29c3 304502210088838811c7c0cf7cd216f2d7c916b4131782de52d9a18fe26bb916be12baeee202204f236acb368df1dee3270c5000a8e279d962f0c17b1fea1b470f86f37c4b57f8 3045022100cbe610a4ac031d8f83b1c633afaf5ce4034aec51e8017a488f3a412c9d2c3cd602204302837ac4f01b1db8287f7ea7e72261e20ea8fd90727981bb8804403d55d1d7 3045022100915a4d37efe2afdb98e499224e0180a1bc48bf60eb421956d1690f3bfa59019b02207060a04050d3f18110fd60c4f4798904e660082379367e1680afc7d0496d3a1e 3045022046d56f1520c4f344b460bfc61e6d496b30aa71246c4ff7da3efc66807178c5b0022100a37ef3048dbf10f5447287c4b8e90895be9c2650646682bfe995d61a3dc418d4 3046022100a61b425719a863ed1c197ea5232019d58597fdbc94dc7c939a8cf9c3728e9742022100ab0007ab873fb389916d5dc463f34ed38aa4d7d24a9eeb2c2041bbc3e6a45dd7 304502207f10d0e3e693655b079da65195a6d01163a492c8fa80c3a7f87bd45a74d51b1b022100ca9f864ad03b0e28abaeaaca160043ad60bc463641015eb9ace83ccc4d63685a 304402204a3a6ee21bfcd27018acffb631079f1d6a4da8b370d9d40120f91e487750be3b02201fa0df1ebe7066ec7d9ad22ada6101aa18ef999769b9557c3fe216589489611f 30450220685f05bba9fa36b74a3237c76c30131a3779b82d2cacbcd4581719a475513923022100b8e3d2adf071e12b1e460ddd49ba9e5da826d3532a67e2c6f0736664c7c0ab90 3046022100e7ff463acf49c004ef35ddf9a5217208601ee590fb656ca54e1b427e2660c80a022100bb9b0c3625f2f5f326a35c2c374c2a4ccaff41a66b39a2f7dfb686e9a9e62f7b 304502202466d9c10b9c08f631014291bfd1502841e92dc3a9c09917120b0c62091ef75a022100e5d89f5813d490b719d5d76de65b9ecf76886aee4a939735a4976a409ec0bf16 3046022100cc8d20cf8cce7dfaa7c31a2085b1fb526adfd70bafbf1cafa766b90c4decff300221008e129b703d8c109efec68f582aa36d84c7cf52f24e41101831539031a8a0c860 304502201b50e6db80777fbd5a02b9cb8e3eb2a989e1e6a7585ecf70366a438f431da048022100b9edcb8986931830bd20f3c50bbbffaec7d575fd563863fc0f858199969fbe7c 30450220049c4c7f38afed5b11de20659fce95d5488b01ac2c8d6d61688ba2ccc9265d32022100c37d55fb72309e43a54312796c8f3b4ae4cbae304ed46e054611f11dc7369853 3046022100cdbb9dc213821ac90beec7baab0c8a6adbd71d6af19e2419d287d69e4dd870e0022100927031c39dd1f76d89a787eb36e18442cf1ac077620282fddc0a0e9bdc76c0ad 3046022100c2b57be1067a9cb46976703d64521a58b74a34c6a853af43fdf9e891c12954bc022100a9bb76954682e5520b508f3f2417214cd03514b0d6ffd248637d361c0ec98352 304402201fc0a8d6842356038d5c7f57f9d26f4016a0af0d7481274ea02bfff106ae669402201a3901a8d5db2dc9712a607abed4d9bd51faa5db0f9aa94633b887f9d6e3ed14 3045022047e077515d72f247d6b23c3d926b9518bea95810b520e65d48f3e5977e4b68c3022100843cc191ecfcd54cc7841fe3f4cc664e6994a6b6487000562f393a8171a2b441 3045022028d9c188f72a5dcdbee6464715f97869f0bf99e985544b260bf3fc3db5d52560022100bd612fb00f6fc03203eb6e39a15da4c32dc2186b2e1e5fa419c4f00bca630cb7 30460221009ce64aea026438dccf0b114b8e2d15b913bf44db5e8f6d0dee823844d5d1c5d10221008bcecfd7aeeafa3a9d6cb0d70d862950ed670a62f820ef15368dba6861f56976 3045022100bfc6cc4e07ef0df398ccbb471f74d1cd97b219ffabab44d01e73b8362212205002207f907c9a39b900f0130751442b5fb453489ef03148c6c40e1fce4dde10ea2c46 3046022100bbd7849045a8f79c5f8b1c5fda87e95caeb7bd24dd130f23f28a7b16c2b03496022100aec8d3d8b623685b8b165e77e33c9b2d1d9c43866d7d50c0d2e978b650a70dc2 3046022100e4f26b729a92872c6345f09361969094f7b5e9c1806268ed03d0f836015138b1022100b0718a5437c724e76a67ae661b2dbfa6b6d9c99b493bcd6681d1fe14c7eb276a 3045022100f4d68f09f876f63a8672f1612aef476da9db85f8f9039cbe935cced3c970170c0220657fcd184e372c2f68417d215655ddbdadcabc79ff5d244e7b8f1c653d79af7f 30440220333d70ca6e91c4819760b3f680faff8822003af587bbca38623cf542ffbd4622022060abe8cd0df504710b83ff237ad748da3aed60968500fcf2583d40c84e6c275d 3045022100c9156c425989ae1dc4c6adc34568e000d88d0ddff1c6b71379c6223b9ea5671c022047b54a583ca22f27fbdda31eb048cdf962e0e6c44a1079cf68910b1160a7e73c 304502201f7ccf282b17b774b669eb14d9438bc9c9ab23991415d08254bcc89673824558022100ba2d2e89ce76fdac051e63be530e78925c35f78419dd207c5d1eae35847aa951 3046022100804b1a8a95eb96af29556c89131133095c31961323c91b38b661e05a8fa646a5022100bfbfccd5683cf6a3974081972dcab044891168e142a9b5175c79bc178ff130ea 30450220580033b0499225fc813ac96d438731f800ffbe139746c141072fb759de42aacc0221009219c74dbcf5cd59d50886f89c50c68caf52816d2fbd1384a636cb45d3e3938b 30450220234501950dd4610d6901b1af95b8b5d65af1fb054db38ce0d70cab4438fb13950221008c2440e3069fb63409b6d0299bc2a2728a150ca8690af494694f98385580044c 3045022020f5cd436ec121be4d51a5a7e67a0c17376f30171fe5d964eeedf5ae85ee99240221009802de6a8e8d8964e261c7addedc95889cb415f4403a3daaaba2fafe7051500d 3044022100d86a43b33c36ff09c4cf4d6552521ea88ca97cd2d8d0582727d36322b0cd1e19021f1322195f23d4ed15e9c6e8d0836fdef5626213f5c185a118213cb8e188a5a1 304402206d43ebd164f47639fee7d6357f150ca2199aed8e73a843692be69d5fc4187d840220304bc6cd51f91254c31b8ee92f5c2263063126f30a9433a5bcc0ee9c174d0edb 304402207e562ec2f6daf9011aa711545937e088df0de20814afe0d8e8e062642001dea202202a3135424d3cf41f9f15ff36a5dc19884c0283676699f3da9ba6a7ad766f3423 3045022100d83b187e92c0e2af016fc2357f12f5d7cfb41f22410a58f90640c910ac794869022018b8b80296cf50325f7b160cda0e3474a502668dba8fe51a362d400eb9a0734a 3045022046d95bf6973cb8feb1956ec5357f65d1995c98a9bb35d9e895f045f1a128a63a022100e51133fce9be05da34d1030e0e16ce5a2cc5d1e980f1e9df28eba42c1b802b3d 30440220529a8d953ef855fbdb7082d41c29b1c02c263284d2ea88f1c92b2626c4ec05e202205dd2d2c16ab0151a19b3c272b6b4847a9a0bd39b1b125c81d1934e5e9c99e7e6 304402204b174991095d32ec072fc9ebd1495e28752846002d31f1a786345f890d3e863a02200e4b33ba87eb01e10fa3028376ca3e06dcff11f76a021cbc37993894267ba9f6 304402207df695c4baaea6dad4da37f1ce25be3078ddfad41b45b2cebd947924dc5c4a5902202d959b704cf512429866ccedc7000564d2ab36cd9874a55ea4828c20658d6120 30460221008319e6acf0ea363fe19579ae231652460f001149aac7983df69c385299c132f20221008171b6e0432d655606461cba349217ec174c2bd8bb98adb3625fe37c78e6fc60 3045022073be616b232bd0431038206fc8424ab3295768872bd35af90918fa15552e09eb022100c7ee4c1c26393447d384e1268b9117994b67df09cba74d3a6a7923e68fee81fa 3045022041569d3e57cdccc262be18f0ad3ad2427411b33493cbd964eb42438c12d72af2022100c34647e912869ed56eac5dc433cb811c8d6e3d3bab0c835f563013e5d0caeb89 304502201f581cffb096da3809505958ed12d87930e49e83029c8d0633f5262923fea1c10221009e6a83b04c199de789ab112cf26b3376f12d9154856bfe62927604f5493b9420 304602210081dfa3cae82713b17a71d456f81e34a326ce9e05644d985e9d63ae74f9837266022100931af2ee9acf6ac3710eb96d3f38f5cd1f42ede759c017626a7fc9c2c0a33748 304402205d4b29b92aa5f2823d305f46080a19f2b50fc06fb5029dd8f1a02c30385c8215022012205ff16724ca9a1e1a94c981618b1931210ce683a05ff6a5ff59171ba05ca1 3045022041b55d9f67150308dd6e81b00749d11c35f316c6cba3ae152758a37f8bc46b94022100a8cf32b4ab81f322650cc9291b288af11232e0cfe9e49599f188fb1dece18e8e 304502203e24b0be6c920199bb5f4cd500f37e2340257dfa812b6f0998f7efe7715b4877022100ca5fb000493937a2dd07f7d1de96d55be49d6e7d203e2ea5c10934142b5ab25f 3045022100b79dc5ea567ad225872f769314d5d1b821dd9619a6631374c4541f94fd22b9a802206a104e9899ec9a20dd1af5c0808f79234889da55adc479e3f2b4811535597115 30450220121438d7ae9e2c4d002460b2bba2618fa7e8d6531e5267e6b91aa098b662a59c022100a64eb2f2409eacb2ef9b4cbd102a550c10fd8c0f1916124a8acffc3be4701ad9 304502210089f9b73ea3fc8cae06202acb5084a222ed11a02f37d1aab342321dfa39dc67ea02205538af9b9ea7cc6a5b6774eec3c30092f21bb9efd9031ccfb14ed79b574c0e87 3046022100d0979e81b1fa957f8a260be65cf061a7a6a88c86d021cdc2a5999c82e03121ab022100c16aa50064fd2933cce4eae347ed1de1415e4adb76615aebdc83e0781bc15fc2 3046022100be4d050dc189972daae7a85ef50cb159a74ae8f7ae519c46b39022ad1040b79b0221008252f3a41a2aa978963d400cc57a493f69131250a21b71e217e1597b9ac2ce2b 304502200499cfb9a1ccf686f992b7ce8337d8bfd243dcd7f7230351f3da49d1d2c9a45f022100c6a95dc3ecffb8b1159eb2484eca020f0fdeb3b8ac0a31ab5dd8bade3d2ea1a7 3045022100f8e3973a18554a67606a704bd7848ceb81c5c7e7dc9ea876e1c328516d0f596e022056f3cf0c8bfd9177f22a9defa5137af5bebd6c2526b50309d3a9643420b2615a 3045022100dc659faac7aa2a1aa58df0afa9a77c5d62056a0a39020cd56ce58c793944912f022049957065875e68a034dac098fb6ad32b386fcf662b9b58137fd2a3c9f8884cb7 30440220738411923c8a38641c5016ef7ca91f933cd8622d09cb149a35b057ba5b773d0502201a1192e932b3058f8ee699446e0badf67051e15dc1750f15ecdf386ad8f497cf 304402200177cb1aa95962fe89510e1bde994126d7e528f95d6ed3e935394a9be7f9b366022004759bbfe6671cab630e6d4a609fb00e26cc23096ab408628954559b15a80c8d 3046022100f0845271da1c849d9ec856b5fc6fe36c48bf7e6c6eca0157fc6d3eee59f8e8b6022100ac6e2b30abf105062b7707b7e9f5f84a36c04be6ef114bca8576a8c9f1e37eac 3044022022abe22f660045ff41056bcaf3ab845db625a47b458f807e38852e69d466366f022024825561217f32bb49ab530c98f5a38c9efbaf27487892f64f2dec56adc22940 3045022100bf31373ea432e65a0d7bf0dae8c3af568dd4d8683cd803facaca928140e2b64002200b4dab3c5cbcb7e5f8fa3157e79ad9ea3a6f755021ac10fe09e47dc3d7364cc1 3044022006454069540592c16cc02c00abd723f57cdc44aabf6ff702a46c74bc86991c1702205c4cb9ad042bf673b51b4d6198e0a4c9ca2de11ca45f5807322fa3f495d3b9d2 304402206c549893c539166251824e9dd00bbe0d9b7d4d6f6be0e3dcda45f9fae68e74f302202bfe6cdf9f4b61d64e38a52ee691f3c94acbe11b68ac818c447767f865df65ca 3045022100daaaeba3ff81a0c5a3a544ee8d2c6871fc1be59b7bc175ca394bc5254250b8510220173e3ba14a0edf8e045bc9ebbddfcbae35d5b49262372f9386645239f6379c62 304402204ec59e21a1ee77e779dd577b782a172996e22051bc3d54b40b8b941aadb2968902207143d111ee6f581aedba3b12797673b4996ab53bdf087dd65228ce5f88f29e12 304502206b1e73b6ed5f21e77723e0a306cc0b074a13c01bb75467f0221dd386d7f33a790221008111e605b6a2a7b42cea12e84e764c608898acad2f741c3a7e943ff39120642c 304402207263f154203acc8c605d051fc9b472b91f2805e560aea08f3fe3f1bd9141aee702202cfbb96f4ef15c4067428cf6d0a11bedf1c7a6916921f58d610f0ae3c7190911 30450220371a9debbae29568f5faa21d92c31ab70b676aad54c6e76340ea9ae5b60c8a3e022100bcf2c551ba91f7a896d36a0b297e1b8726297be4bda1da36b0fe142f2108aec1 3045022100e937635412981d971fdbd26eae556666ee3c91714d3e74589f694f793064ff9e022070a947d144d6f57573194825506cbdd05829fc3f209a7f5cb3d07fed5c2a4576 30440220796f899fecfcf0c267e8ea0fc6c80d39d958c7b61a4358910948de177c12818c02201e8a0d3e277f970ed0e748310bc07b311aa1b873ed836db5e001b6b25a6a6b9c 304402202bfdf882926ba61dacf8605011ea9e5c58c6c2332d20db8550ba504feedbd4f702204f07e16afdb12dda68c5481726bb186bf39ea8e126561124489857729b5e28d5 304502200fe857f296aba30e730d17ba5613e5221ca9d5a9d36f075867b7c6ad85392e7c022100a87319da9690cd3540d140a2f50d4a08604f2c50097d14039c33f46010dff4c5 3046022100cdae492ac79d953840a36b1cbb6d35610515f14361a036864b32bdf6283be740022100abb1f0f8581609056567d6ebcbd64a566795e05d813191ae3c23238635539db4 304502201b431902a999838aba392192899db3b3ca1dbcc5c34a44f7533b2be02bd02377022100939401c84d3310e88381e17e12ccfb5afbd4cb32382012cedee6bee67c023590 304502207f8864b123ca2d75395e9578c981b8f4da6fce767df8469664992deca1c67cf602210089acf15e5b8421cd23dd82f05da4ca020f84d97d7b9419b11491e6fdf499e54f 3045022015abe9289c44ef06ca61e9efa7ba446bc64d47bd8db5c0d8314ab3c738ad919e022100d35c4d742ff3380d1c512a27f046de8732b40de00003e1808d3859a5e27947ee 304502210091124abd8a5bb2e4895b64009658a72335948b51c927e345ce9cfb9a8e88d04302204684b5f90da04bb9f9107758faeb4e3d0a391d95a8872a2578c0c0601d5925e6 3045022100c58e86dbf7347dc832788024655c9e3c7ceca6c359956dd4d13ccbbb0cd71c70022063aff29dc190e88f2746b418e1bc21c7b14555b7dd56ba49b065ba423267e207 30450221009b78bb000cfd929241b12ab4e7dcbc3a7057d8be56ab1503078ef7823a4f6b8c02207b9bfc9037cb199242ca4ea4386ba8ee316a32611779feefdf37690eadeb9664 3044022072a7766b9dae113cfac6c9f2ec6d78f0038cc718414b7984780d2de4c5860413022006d63d016518805df4d10595b87138713609eb3efc8efbeef57bf30fbc350ab4 3045022008f3e0e620b237870e4bd23aeae836181bc0fa986f2cb4941b570d2bb8db0e78022100933e78510592f0fae5fccbc3d1b10c90480c83107e69e2feb07a4e307c43f6af 3045022100f41b238aec636882d04d9b6ec515df2a60995be48ae17620f4520474669eb357022050fe926d36add0f82f7fd9ec6c2e8b600f38cf7c2ac52f32c6afc8205c81f41c 3045022033968bd1cf4a36736772a6c3693576773585a345a0fed107c69f3a767af528b1022100fcb253872846fb68b30a40025a8b8a6d6e2b7c537e2a8813bb7290c6a80eca05 3045022100e9fa37e7960480b7d6df62a8747e278751deaf472147fc083d0367ceb67abc84022070e2e7802e56d1f9096bbba1f21d44312b16d66dd7d96d0f3e01e94227d9d96c 304402202c150cb82330ad24f5a96d0ba9b9f3f7a37c60117cbc1f206e5121209cce0c1d02201404f5cd620fc3fb36b8c5d89d1ceec79644c03106029f609d96c0fb1e620336 3045022100a702ae3f23af94cc4a5aa7aabc27d9ffe22b815dc93ba9f4fa4a10564db9b40f0220020c06f020154a38a106d89eca20d2f19fc3b376ae134cb457ed284095293c46 30450220745165bf39a5dbaaff7b574e39c54a5e671b5bc8508a886abceb3dfe9c5ad6fc022100b55f9a2f1a2aeb28bed5ec1a7707710dd58fee8a74edcfec4a1bdfaac507dc82 30450221008e527c83eefdf9bddccb2af8cb5cfd844ae0a968886c3542ce0813f37228411202207faa63cc7d41b70bb29b162c7f3a4aa0b595ddb06b1473889b060cb289364c6f 304502200eae975a82e88c2039d055c3632bdb4375a65e7d1ec070efc515c6dbdcb21b1a022100d8bc34ac5c3a0ffd5a8acf8ae121b5ed8fd061599827a12addde57bd03dc21a3 30450220363a34df474eccb4ca576c874d5a1d1ce0dc757e94d587be9390af0d337a40c8022100ad5efa8e8045fe87129fe13dcacf37e238e110f2cd24bb337006e7eed9f8958c 3045022100d365f452ef57423f93f07635990571b1b28f42a874ebd7e4e81cadec7ed7ed1302202f96143bd31cfcd787a5006121410b655d58126eaa85a15b731cf8c15ac13167 3045022100d9da481504523aaebcd32fd17d9d4a47bb28ed4ebaee1309c990a4fbd9840f540220698e91b0998c4e0cb6d3155bbaaeb61fd4b5e3c0613b9eb28328795e0c7e93b9 3046022100b30e0904689b6ddde334fcd3bb173daa006f06a05bda1ed2a2ac179c73d866b8022100d72ee2375d437e481016f97d7360578d338fa2749b1456e9242bc7de0af9a2df 304602210097950a991ee6ae928c4958929ab51f55828a6a40e77bca6c8efbfc9dba4230a3022100b5c51441b88836da546353f33d91f173009b189fba3a52210ac96bb3fd516057 304502200ab1a79ef3eba27a13a649bb7ee3d88e91583a21e3b6c4cf13887325ff7e142a022100c340548069391d23311de63d0e985c155e14afb0b122b2dbba9f901fdcf2b7ce 304502206b1a576d94800530b0e205d9b7f901ff856505e90adcaec86b039c3fd8e0a8870221008e83ffd5d416a1c0f0616b9ea349e7d53b6813a25618a20d8a5bd655ea85c6ce 3045022100bc8b11417b8d9b7cd98535e8a3449854280c5e37e7fc4f20b749dc059bc7806d02207f4b166ccbaf31d00c84b465d5c5d1617d5781559b6f2a84da4b8a1094834c28 3046022100ff81d46050527ebe86c5040e05195304c350da318207a7d609206b17324522f2022100872bca5a3703ce0d2caa63963fd270ef7ef3330330898abe559700bd30b40a3b 3046022100f4f31ed12a9ae57bd8b0e38a6a69c472ac61883ee2f1dbaec14fd965c2a928930221009ed09d59a023ced0722c53dff16bab0e2fe19eeac5be8e729efd187d0b7864fc 304502203dd863aa5ab53df127326e49540b9898352215e3f12e0d4ce16bd9ac6224d0e80221009c0ff289f5c2237f4ab904f0a0eae1fdfa87d430c0f7acea049f8f5979cbe84c 304502206bf425c6b1cac634c26b845ebf1e3e6fb4473d8925a833c61fc971ddfa9908aa022100e71482bf38a9d6a293e812cc1426b22fb73aa69011526d84b446d2ec574e5aa9 30450221008561c684e707dca95d0bb70f9ec78fabd1a53a7cd5c887a1c17ead60f07cb08c02206b1fe33b4d01681841f199133af496e160a0111659a8866ea3945248091b2ac0 304502200e80b139816cdb179d94f2ba608b8f829dc0a0dbadc9568061d5b632730513d0022100d64990a740a2ea2d67655acdb65e87b3f45db3ada1a46040ea44e76d2b002eb2 3045022076034a13b67c1286387a34c0f8732990c640caff1554536a74a38f650f567be3022100dfa38be60c14e8c749e1e61d1914f1c547405d5a5330dfc84cf63db5ee5cf286 304502206478d243fbdb2a0c427acb2c52f7da65868588f34810482032c27a577bc7c8c9022100af4931c689088169c1449b7335e59735338d685e1ac35f83fa9edb2b7191d64b 304502201fa2913832e92b702f6e3e0a12fd712ea958ba6b28d64936643217b71b752e71022100e35b3c8b65fb08e77c56d7d10faf031053a071196cd64e5acf672758de71d2f2 3045022100afca97e0237bf2993d7ae0de2e1f785d2ed5ef92a9f6926a9765315d935d3368022024ddfc7e1e693df7f2fdb1375bab8f0005818d2ca4d4ca81ebc8134cceaa1e91 3046022100e56d788139e31d8767af5293858190b3f5a95d80ad78c299c7487602f8184e22022100d6e71d2089484c0c4a6bcce8994a3445718ecceb76aa40bce5c53f0f9d5201f6 3045022008be142689e8a1d558d3034b87461ea31b014fb1797d7687a24495a06bc46b97022100af8a7fbf10aedb080ebc593b78484049422d862e4511abe9e1c7dc6b229c97ec 3045022029b6812bea941efc2e7c7f983a6ec02939cbed1ee73ff3c9c9036183c9dcd23202210082137beac4740da3a84c5f21562f8dccebbe7265b15beca378ce71d9cdc254f2 3046022100910aedbe24c379f2f8ed343095294dad95ebf1c7342232ef1509dd2ebcfcd038022100cc9ac78c154686af84a832d7a44119b44cfbb8fef5285ee39948cdc4391cc518 304602210096548edd7c46de581a72f9f8fe9e33271ef8895e0d087f519b9f63c95fa99ad2022100c528c1a18503318070b37550e329b9b9b7fd0ea6e8c612a8094d56164d77b8bc 3046022100c08ff8d1f05f7841475e014ed406ded9bd8dcf17046967c2c0fad733a4781e10022100ebc9434610f6a627488513dc7e5fba6d9c9f764e7fc59b2322eba7653fb5d0cc 3046022100e5605a7cfdf30b412dec48eb3c0bd01c590c5df53266aa47f6cf6a307c9fe1d1022100a4f0488a643881b4afb43167d1a28bae409ce04c087aa0aa9e9546e64ff8f1b9 304502200dcb2789d24a57e4ed89b31a436ef1da9b3e59c8a41e83ca277c02cf68270878022100fda45d86376bb74825b5bea06e8a63cfadc8132a634de0d175f031bb8b26e03a 304602210084aca8b29ae92d991309e271507ddf9e126d34183b5a23135ec0b4485a9ee369022100fe8aa3e73db33767f3d970710fd813fc2f358169068854d6f41534558ab033f3 3045022100d24cb6f412c468575104593fef8121eee4410f22c5956bae03a394212b596a2c02203b87736c8b09f15f07a3e14a98300edb55ef4b7563342bc090384ad2264359d4 304502210096d44afc6630ba6086c49c28e198afa85f5ce6710f0dd7f116592c7461c5003702205f1130720a17b569fef631f41e1bc539657139c9475b02d49d4fb5b21af032ba 30450220637763a1127f0d0ae683ea1135c4c8f9af85c198716a90509bc06eaf0446c52a022100afb4694fd1cce76f4fb85472c2ea090ed0d71ca5efa9ccfec989248ae057bbb9 3045022046b30339b72497129fb56957b576c6cecbfb1da35f6c8d2ff649c1a6283139220221009f0ff87ff06251727be31dc346469486ca879766ec59c447f8ced82f73f7a0a5 3046022100cab3cfb51b05bc434a1cabde7a5abc738095fa231f886f55d806e644773693630221009a47a0ebf32c3b48d600a2f3926228b47caa05a7dad6117d51319e8a445a0107 304502210081f2192cc086ad695bd7dd013f1e87402c620318e0514f21f769fd7ad4f56f530220601ca5c69d10c986569088f4ef8a38aef02744466e5af3e195f7253a5146fee2 30450221008a4ca936a0cd6eb328477848faaf5adbd974486a2ab43ed6da0b7322bdde216a02204ff8c93b20d329bb7febbea851cd59026e8f82ff6090186419f38b4f914c6b3f 304602210082967c145974cc2f12e0890cb202bd45d3297f1ddb7bd18f30c81e182bf840fc0221008081626a98c94799cf2dc5c6408bccc658c71fdd06477609ba2c72de58c6a1f3 30460221008679f0000cdc8a505644f09382e41f93dc33e8671be92d80195769288e59f37e0221009dc56ba70b697ddc531f70d687567c81a04a6d75a4fe85a934c008c75fc7e8c4 304402202e00f32b128a2659aa1cd8b1447c8a5f22f316cdd68b0af3df17fece49017c1e02206db7fde2398dd633b5d32f639afb006c4f42ccdce4a932db7a755f7ede040372 3046022100a702e569f22b6a487649a8040b3d2208b059b95339b1b400788183b1934c184e022100f838b59317492d84412a9d36025df5ecb57f7889c15ff0b2451827356d0340ed 3045022100dad90be29294e5d0bdfba33c6dd17bbe7979a820688ce4123deca2c25df4db300220639c85fe9f77b847ed00da8476b75fb7cf8ddc1634bb5d99bd54df596690966d 3045022100a1635f22d398ba81c028e7cfc20c3575d07cb6c0f6d697ec1eda1a3c40089ec702204bfe88ce59ddf215369e395db48bd25b945180283b15847e74cb790bcd4bb0e4 3045022100fafd8e31514a550c825ffe27e6022e8b3e62c139148fc73bb251ad923508317c022045e94e246dd3ec7e8963c6a4d3b6d72c0b2040f17ed27e1c04551f52bb6adc28 304502201b647677905eeea5d0eafcfdc5df339641a8fafac60e0b7d167a079dbbfe9935022100c4ce401a75bc639a82a693d3ec2f225c30c97d5d462a5d765008dee3ba17b21a 3045022100d8c21df3f50770857ae841a0d10d086e2ded14f1bdb5869b71016db9d20d7fd7022046c955b3c6d451028ad3278e6f7147d12bccf4c0958e124d7fe8b122e4f9f052 304502203a0e2286a3fc64921093671d8f54d40220316aced78a6b47a6253bcd511ffbab022100ed29f195d5c8664f88f8691ce1488fbaf77f03e15f7c184cc8720a9f93fc26ba 304602210092842e6ab30aa33230110e7ba975b5dbdd937ebfb638f1bfb62ec5acc7bfd32f022100c40cc6757af45d09a5263f2c45e2faec9a12a33962ba322c1a9d219a5ad04b7b 3045022100bc70073957600dd38d935fd9e9b9ccc4fbcea2ad0f0e9ed125bae84e3a31d74702205375eee00d3a537de4ee55c92970f536e7d10c25f8993c70745576e8bbd55c90 304502210094914b2e4a0a73a5f5a62745b2fb4f90a6ff7d1da1dfe086f23b874ffb6a2d30022038de95676a311d7a381c31c2a9fd5d5f5b90fd6b8911554a84fe43385d58b7c4 3045022100a652453690726fc6cde4a7efae814dff38a966e16f9f16f6d24c1420585cddac022010e1742da6f37bc7219729c4ca6b29f6a70ad6081e9f4feedc852cfbf4a81d63 3045022100d3bea368d3ea98e10c8879f2c6821332b084185b6736111390584f4a21f4354602204b6fa2e6a7dd40022b1cbb5b3c234d5a2dede82d1a52b08520778a991e1ac159 304502205ef53cba6906d1582cd2d1ad744063b52597913625759fb0b5f78dac67dc7e7002210087a615d44ec37d93f4e665f3dd82f4d2a6bbfe71d91d28c34e2ed39895fb7f1e 304502205239be6c695f0bdb85fb096239baed19ad51e96e428a8cd49b8ec823ef6b7664022100afc02648a4bcb239bfd4f19946ad0d0bb2a1ac8062698d2278a1619d637daf1c 30440220356d9d33886fe84d70146df149eaf3108b77d2dbe8073d2298da38f05d9d28a20220076284e54e0f94ac9133a865af5d0b8ad3d48b5cf1af9d1f3b4661cf4cf14f21 3045022100a882f542065c8a166ed9ac0028721c0ca0d309a81e627e56f0197ba06827615a022017cf22f94cde472eaafe7aca2e1a2c84ea00442febc8144b8d779dae96e1452a 3045022100a99d0fc853da583798ffe539e12bd35e686c56b7e32bc99a2feef861843aa76b02201a7c4f70ce6e6b4901eb582c8acdb472b530c25b72abe68b55a27cf9474db3a6 3046022100936cdc7b2bbe3e5c729ab9ae40fc9f8104086f7fdc0c72304ddf54239bd8fa37022100aecb6386382478cac7d2ac3c9c4fd9c4abd8b5f3d3883d893c3c9fc7e606509e 30460221008fc60d6c6e96dfc70e8bcae043e18804609619a2165f997e1db6754721d4251e022100941e79519d1a4ead5875131cfd6409f2e2b9af1f526d3d4b556952d4e543175b 3045022100f827a7dc0b7f8933295f9aa3a4f4c0a7ef3eb2e69b8ca1a354e5f8cc2b9073eb022007067c5644792e501f548e7bb57d44692c8b5fd283500a020cf12eee09e0d0d0 304402203e9582ab97b2562fce86ebe844da0a73a6de90b845a8a6b85a4463b305f7e37102207ac3354b73e265df1ef2a2677c2dd4bb347d20bfda7356d6e8336820642f7b47 3046022100a78626aa42eef5976253507d0ccb57cbbedf58e128efbbb5b689b21d08f7f68f022100801e8b2991dd46e5b143cf96ab9f2de60789ee4ae2d5927f02f80516f584e604 3046022100d99f12f7b52e7654f02606a73b76078bc7918d90ee4395008134b28adc157555022100c3f90309a651d68f24f7f115013271cdb4b99e1c5c3bb47c0d91b77e1610ac9a 3045022100c7d146839af4ba82f5792135b47dc73553955df27978385a283111a1e71e2b39022037c67ca28c72c50de286de4bb91fb5210deed30a0be184ab6faf76a20d803159 3044022075b65337ed7bbc3e301de0009a556509345cdb1ee5513073c057970b4e349aa7022029cd721d970b65b7cbcb8e764e7d56efea479df0e6feddba68613ce2acb6ba77 3044022036ba97a469219c2bff9586137faddff0b720945fbbdc9b75ceb7df5f99d4f70a022046372d31069ae6a398b112218f3701a7cfa0ddb6d8042a6b2f9f06bdcaafb7eb 304502204030ffad1347c2e01cecfc474d79176e976bb1932a79786785a057e39c759762022100d55a9e7ccac6a25279b22fd93a896725fd3a00f96734b4d4d725642678f96c1f 3044022068673353406049240456a9f68e672ae8461b563e5826d4489f76478e0cc1b8860220560d556c3cbf6ca9235280a3768f5a088b0ef7a90b3721c52dc71b5e33f3d511 304502202cf6deaff5bb40387aae7e7a9407165e7d6069815992045fbd7fb2cc4a3d550d022100f51132c0ac72b90713c94c9bb5ff082488d6e38e44eced9632a369f448970994 304402205f2cde356478b4cc4a35da8fff8d484e840e6e4ed7772236a5567d21282fe16c0220318e3ff996a20425479c044458cae26befd936ef721d223802711c6dc074bf79 3046022100c60eb1f2f6eceeb014644c11e6ba2e30a3e37024125f51b388d51959355da6a80221008c85187693a586d1c5ac6382bc02fba4a68113be1a7056e939f4502a1e175b17 30450220300ae987889ba68cf02fbba61768d8f9c69094cce4d99f4e0dfd6517a1925dc8022100b2881729e367ff9e8dbdb7dd646a3ab1e9aafb6b714c58ce9fa037b6ba74c0ee 3046022100c968a9a472351004d19160caf198025ffaf4739b91487e62fdeacc3e81f34b690221009680990127449f1da3655dfa8f17f22365d98bb812c6f6578c593632c5e93d83 304402207588eee536677fbd3301be5204e5cb73c6bf226db1f20e9a984e5ca514d903ab0220374febae04659eaab768499e2389bd36ccbc1cea52a5574288aebc23f8b2d00b 3046022100ce869c853a88e5d6a241f3b6f6590fda418b21b5429b6c2bd6c1d965219ed6a70221009e0d1991ad030a9bd7d92ac0fce6ef676f849c02808c57f0ad9cf98d0ea6bda8 3045022033fdf7dcf125763aa50aa6c60300ea52afc6d270ca41a3f4f3032391fbb02ff1022100edfb0f8fe6587d70e695973c596a6cf3c86606d1ea30e8acc591fbe0acdcac4f 304402207e4671943e29a4d54f09ddfef4a292120cf90733c74b4e7fbc08eab4fbdd359c0220455affcb6ec6388a65e24fb740da65d02a3d3717e9f141a11f5bc7a02636a7fd 3045022100e34f73409559d8b08eba1e5645ee873c3fbd72162a827af543e2023ea7ac610402202b09a99d2808470ba69c793386a2008e163a45b0021d223446afc0d6553fa4a1 304402203508df62dacf9ae6dd6648d8d6494985202c7c55927357d66d6696bdf669bf600220614bf7fda398a2a0f38f971efe5b25e4daa9e95a2ad61dcc662f8ee4239e2011 3045022064e5af7f54704826bdea1c62d64e9bbb990f07554e5e59c3ebb17a230f3a3b55022100f6ea71ab220a13d9264c41e1fe70ffe5831a33edf7ccd0b8bd7c833590884909 3046022100c6c9b7f71499f675879fd729b050dfb4cb3556544aa8d12bfabdce3d83f29df4022100fb57ca097036c808c25c794f0106f81c3e20c2d1668a39b1920ca87fcdf3bb29 3045022100f10e44c49f60b6fbb55d49868eefe3e7dbcada1c78472f86fe5162c77d86cff702202ae1d95ebdf41cad88f2c20ac722957f6ba73c105573c0a2ba0a4fd90bdb83a8 3045022059d8c214604c45d844ecce47813eeff05ce4514d8bb3b4dcbf105c8dc309f89d022100a15678af867535aeb0cc16b0a43905656d0aef2c6a9c4793cd1f5c35427c1b55 3045022061c998c6c1cfe4072e0bc3dc79f71771e6b3968be51ba06aeee0a775e6af2277022100bc808b80d8846c550930ded45231e550469d860b9c0a16ce58714fb2c3d18bf7 304502202b3a851529d9ccb25584a86138f3244b16feb9c2092037fb27295c5e0f61a071022100c08006c93c12899e43e140803700211b533b77030c5e264143484b10ec637ee0 30450220158023e40992a447c27cb2cd86513421463eaef1af2a50b5900849869c81a5d70221008019743297320c42861e1b19ad8d18832f2ed9692e7725f5d22f4c8f5fcb13cb 3046022100a16294329b2e15494612a9642ad6321a60684d5c7498ca1d6c885f05c7e1d917022100f36363610f1ddacc472ab0256f7c98bcbf5fc493765bc7ab8178e6893f0e3586 3045022100e8220d5e005d7404901ad4c32fd8fbd0cdca5ab4470d5812d2fea59cf80e546b022002c72d684ed141524a65976fc50fee19d3d67f0a1a458f6674f09724589a6211 30440220180a75388d59aa0c4176dabe111e1a9b99a2b5cb158b2cc0e668b2e7e00b95e40220746a622575c321a5d44be54f596649622eed5633119cc6cf7fce1060d1bcd8aa 304402207210eddd4ecd127639a7a91acbc5d4829d3e8e8d82b4b6b451ab1a05b08f2a830220430584c522c3f4ba0716aef4f12026300215d5172bfa844f49b2afab63b3d83b 304502204fcce613fe35dd47787713e6fab18d45daccb4e3740a9ed07e36515938d2c1c10221008dee00d891a05297af051e79105801f0916cd30f566504b97ad2a93cd6768a31 304502200fecb07e72fd09c7429fc9e35bcd9d2216059d7fcb79ae56896f4c309d487fd302210086ef63ff88b56d855b7705f4bf3c8a253c39360a71af50324757235314f4cf4f 3046022100ca0548879bb5d6d28f89684d3479a4902d8216d5867bb6603003094ef357e690022100932af356b40c4da6e161b476c7a44c0a2a2fd93256923088976cbd630e0d0115 30460221009f8335f85c505deb1b7de4020a6931936a7c3c56878caf95ca98905f659cdef0022100b4a9e33051714d3236a495c7a933ce2eb120aca798b86644a7b6b83294cc5688 304502203030b0fb9098bfb360d264c0728ae69512cacbc650e3ce00610fe48bcb226f5c022100be450493baae26e4255f927ab14f59eac03e4db589dd6ed368e2d3377d2b5278 3044022016eab5092e83d97182bef0af610efca0ed432a6b0b47da7446844f573cbb4e4502202b78142d80460a1cfe477549b869d8c96a77138169db2f3499985204e1f65103 3044022025b7efff30b8b33b74ac910407e3f65ec18a8810163597a9f83c240a463d9e0102204b36aad2849cd93e3ea203152f8375c2b739a0ceb32b9eead6bd3d7533a2bb9d 304402205634c11cde85c204a8b479cafb44cb5d9c7194b32591a22ac7ad8c75eb4a11c3022030016a4857e0ff681bd0688f1b4c7e88f00495ace6745b7dab0959d1c3a34f44 304602210084d513880bc08385628fd99e56cf0e82d7bc45bdecda0a94380998895a1daf48022100d1291bb74c457910a75d352ec5bdaf93c7b27fe75c09868197d115e89f06d3ee 304502201d934bce53641da15308d95950ac6569fb27ba87e7308f885a6de362ab2985790221009c2a231b5863c07d7ac3bf33ea8748d284e704800e7a88f9dc038fa64171308c 3046022100b1b2fb5f294be7d89e0a97a3625973ab292e537a8d3f68e7fa7167e2c0dc2785022100b619cc544ccad3eae347d7292bfd87319370b19cc7cab15b215c96e7f12761c9 304402204201a0efbfb9f93c78b4b982cfc28fee50aaa5280a1a237fea8e7eee7cc417f602203c18e5d6e3b602474030247567a37c12af7c1f97d1f556737bcf95aedfa9199f 3046022100a017db751dc2930572df6f321f951ffb9b8fa794d121dbd4e4af58efd534073f022100b4e302525e55fc2205eea63b854ef2b2fe9321134e837173a11348d47c99f277 3044022027a31a134a0b2356bba3ac1a17d206dab238017ac5ae752e2d3eae82202520e902206c4827e5925c806851c8941aab5ce51a1d8e07eaec5474cf20162acce844261b 304402201b0be60b40c67bfffe5c18020d0a24e27b24ef3d95fea1f03d5b288ed23cf827022059040debccfa51b596cb4d3abc2e7671f1aefa86a0f0bf108cdd39633f48c769 3046022100e2165ab06443cc46eea794f4d8e077ed3e375cf1854ef4bc1fc046a727faceda022100f86a6b1e88089d3916a9350aa35be6a5794c4d43a277ed61a0588b7c67085d45 3045022100b70741308e8345aca3e046f380c7afae55b916990b13293c90bf0de91ff2024502206b37fdc0366e32d63b6383e63d2621ca49d50e78a5e2c0b57a0f8d60273eaa02 3045022100bbae3c993f5cae2afb2653413125606b4e8152488de8f829a0f69977e140a7550220693df387f77a51843e799f26d9b83c33b3a0d88cb41aa06799d625b09e1ec5d1 304502202d1445e0f22d539a32fdb42d64808818cde20832a864f85110c6079d064e1207022100f17fe1ea9c090956944a4b2bdf4711acd1bbbfdbffa2820b10a64a3bd81a232b 3045022100a8f5dca6f69076b52fe188ec94fad946b9dc0240f7b318fe82559c3229ff8b58022036e7dcd19eaa9f6c10c8a0367aebee5275b46a795a185e74e4a67e90b8664ebf 3046022100c4684cf79f2f05424f63c389bcabe6989797be8d8ee52bf5059cb292fa2391a5022100849b9cc3e46cd7ae1505028e06c9879d2fc323ea14b35592c93b0262a88834ca 3046022100fd72bdfccca219af1e48b30db4570642e386d0044d6168b2e1d15a9132b2abd8022100a154fe34316dea9c51d9d8548576deae4ba931a69f1b37ce8c40a7404371fa41 304402206a2c52860ca68acc445ea1d317547731e71e250913ca3e7a8d7d88fb9548e6a0022027dbd47e7cc133f684125a3fbec518f033ea94fc84ab91733ec43c2226ff974e 3046022100fb91fc65b893ec87fafd97a7c813a9d2c2d3942c10acb11f78b0674543ded41e022100beee0e11a65c1beaf420b9e40ec1cefb2b10fd66e810b639ecc7e9750e72f3f1 304402203772878d0c79c2f5b6be8693b981b7dd81a7a01991a13939e36f013f3d37e050022032dc18169c70e02f4dd621ad5485da3125f7d1ed910792b7bc3fd13a03733f61 304402201d7dd9107cf8191ebcefabf04d0664cb5305cf7228b8b4a54501a1c60f7ff5a402202e8f8ce40ce6053691f0f026f6d5c5ccf45f717c449e22bc559975bb12c3b163 304502200501af8473dffab953c4bde50446984662c34dc05d8e17af25b8a8959fb7f48a022100c87326bcfe6c187744f7562615d2c2242413fd9707a50c748b0cf609b307cb35 30440220568f0b1131821db1fa668e6892b875b0aa7178e625437fd41c62d4d624c08c3502207d1be954ba9c502e27de6876fb6a13b642b4b13be035552bfa187595f00b9a79 3045022100a0cd629ce04a678a4b9a9512819c09ff790770f8b0ca108b06504a571fd6beeb0220654d19ca973446921a4cacbc6984ad0f6cb5684f7867c16c0a125364330f0620 30450221008204a6d46923e141045f1fb1b0f783a335e02b27babb065d7925364419f1dcc202202a340a73cf0afa8c19ab4fb6512de57a1e3b9a491bca1f5e151e11faaa976478 3045022100adb741b271a2fd367be4b2570b31140734bcb8c507b2526aaa462e14df08fad20220535ec810b5c472e070d6790423688f8a342b18952ac9fd30e0cf9d170c6fbfea 304402202eb478efac5f7e261ff6b692f9e2b93349fae28e378879dd023648a930fcc59d02200e205f8cc36350d3e813b8134434b926417b2f666a6b2bc676d8590f0d755d38 304402207211b764b16d10a90288bd3e72cf223ebcb07243b3d140ec4f4e1f6df47a841b0220463646e95d9683c4cc9ac63df72006a73b923eb3a8f105749732986211487cf9 304402203ebcac62ad8fa75085505f473424f4043ca9690bd886060e62d9c2328cd1ea6e02207b76bd39079a4259fb6b223e4ccb8242fe729c2384ecde8dc7f3faa08aef7e12 304502200250ddc87c2f586504ad4be952f5bce352547d84931e8d48a620b196d4d1f516022100ca5327c1d328ebecd4249fac1df5c5e6fc2dfaf8384692c420fc107848313c6c 30460221008096fce253e2e9a6768ada59bff1dabf30e5aa5783075b1dbf5611e2d40f042a022100d7ec013a340328bed0c434a7e4c69a60ba5911b4a2a1e7e4381ff83a4ebdae84 3046022100dcb397c6ba39be290593664f00dd0b9647d6eca6838cbea8e356614c3cb16b77022100ad1c5ea1a598eace5d45e6d73d05a147ee3a1ae9a470d1b96e165d20b034ab1a 30450220651bee835afbe8046c9e49c20a334718badd3d2749ace5d2563f4b285eb70c55022100cbd2bcda7eb362c9bb10317a95e0b8d5c4084a46e9b02057899329ab7ab2ecea 3045022033fc76db06ee90e1bcc65428e69312f11f244f30f3842a470405d69f7af3ed6e02210097631931a12f76ed7aa97f99c6dad5ecb41e7a5ff8742731e03a0d99ed4bd6d9 30450221009643428babc9e136072b3e73706359c64ba2a0a606f4c9ab5a172afe48eff18e022058c84a87fa429a182a710955115cfe56625734542ca0bd927c706139729257c6 3045022032c8b1e417a872f64f161b23fd661d90fcbe505daa0a0b1012c59981f754e7c502210085d9220d09e389a281b633b3c8dba9a1cddfeb7d807558c23ef336cd73755a33 3044022078309f48658c38b2da177d85bd603530e122413e6b8757791e8ee874ae6bfd09022055bb3f7f6b99d48eda33431292653739643054d08d6feeb55544242505ab02cb 30440220749103f7dcc09f5249fb03a21c511632b4fdcedded8d80dd05892c87774597520220384c34731e8caf567d4b4c196eb00bbd798ffe089fa83fea16f0987ccf461e3c 304502210094c3b31b2a2af0519f158a49908a836c6bb6ae1944847dd849fdc90864b6dfc00220788b750854850e6515770716560e7cd4e2fd0964fc53162e62913646b11de184 3046022100ca73fe0bfcc1c851f832e7a1fa8b03b45f00346f99536a4623b1476288825b36022100f36efeabfb8b1d5c1d3ca1e9de97911ad3aab8cab903a019430e9ac79431f040 3046022100b8c69f8e86d430f513eba9c6d650ac7bcdfae0208959a142a598b6b8b1d14fe8022100e0442974160c982960cd1ca3a72bdfcc51b3254f130978bc2b0c71bca58479d8 304502205a99fbf506e3d08289ee795d1b56a3b9c2e24a470b59c8c7241049feb3cebe9c022100daeb469bcec721831947da406e31a4dcf0de198fca21eac8c79026b799e2e36a 3044022037dc4faa61ba5e1b0abe0c4a0ed487ef93700be41f9800857e878a5dc573e41b022020af2f91129dfde5a71fb2ba0de6cbe28a2a21b0c2cb47d0522a92abf766d73d 30460221009b88d92f6fabc9567714d5f2ac8123add00d7fd93c9443b97e74a428ecde8d16022100c5b50c26d249452dc22d03627e7c63f5bb140be44a8d864a0e4184d393f5478f 3045022100e199d44b9158aab22b705540f81185707d556b74d2faa2dd2848296027b403a8022071592cd6479649ddb086ce446df1ecdb0b33b80cfb9ce2c7328fd9f266d0505f 30440220400b18b31d4839cf7d1059d8d3e4f2f638542ff6b2c33fe01b4d87bb8d4d1d7102202761f06d09be7fb5dc8c0f500341edc57f44b52ba9abe15c2f1ba111f4b70721 3045022100fe82820b85b48f2520d68215c358efdd37e524871bf9c6571083cbb14853e8560220750d479e58540a627bd0ef6a0a0b8ae00a22326e394df02cf68e649f2724153d 3045022100b2eeafd9abe3d72e279e3832c3edbcb5e850fda14d476a509f630169e85e69ad02204e109dbc2d34ceea60b36d4dc68f93985df3eb99011d7c4a87edbee90519772e 3046022100be496a1a59ece376f46e71ad8e5003174f2fd80c652c2fd2b3d344a6416ea7cd022100bf0f9f572693648ad2199fa51a1ac3517680c95d5627b0c95455a4db94e75a78 3045022100fd18de924cc5f40233b348304c615495c3ce25ceffe911d974a4c5fa7812cd3e0220310ba4523f08d112047915adf6e42f60b3fd5a3ee159f328309850245a5292e5 3045022100aac5721cd13ccae0fe714b185cc6ca7fa7cba0cb3971e3b4d5dae2e2971c73aa02204506d161abc8dd3d4809d1c564acae0ccbd3738552141f82365f3b9023790191 3045022049ba24b3e84d8c76317a94cf0f3839282e69e996cfd990e29675723e9dca92ea022100cdfefb816b9c94ce21f493a25a624d04e7dc376d73752bc8de67512ca103c8c3 3045022100ea5d85c0405382737a31fa914433a58b2e3d83b3abb1d0cdb7d93092bc73b45102207256c43f31af1e7f8fae95165d9db318b456517775cbf25ee6281ab020bef0d0 3045022100a0d83b5b4727d35b9674fa12f0ad7ed12dffaa69a1473f2e742804f7963c63750220634d57aa0573892fa5fe566dffaca0c195758d226e33beab0107b83abf598a74 304502206dee04ad90f2c88dd18b75f024a0ed442b347dfe4837fbbedfca9d62931fe5ce022100f5a45c8f0ddd8a745f9d99019e7fd2d3de9548f10eb4dc88188b6f73938c57c2 30450220264584498d9dc5b7005d2c7081628a18927b6eb907af1db2cdaa269849297cf9022100f80878d87387432e35ca4678e063bfd25526989b1ffbd9afe4153e9e8cc209a1 304502201d0d752e01e40da8d22535d0731521f571f46060b0d5453e2dd666ac64e7180f022100e6c47c806be64fbdabdf6be55c528832f47582c581002e84a007df802c529691 3046022100b24778415a3b7d3d04922f4d864be7acd7c6b8c2e1ecd2c4277b103a8bde0928022100e517a8e77190f648cad05468f2ecc38afd2404a475da09ccc9b49ecf6bb1b915 304402203bac3710c7330e73245d3794ad4b716d2ba31a827d48f8ba609131346c123c5e0220412e384ba5489276b92dc6f56ab44ca73cc71989dc53ff16a3c75e5302b96420 304502207e76f11ed8cd2f14d456eb7d947a06de4b679bbe9e24260cc379222c78cfdf52022100add2bdf6ad4740ec1bfe2cf1f725a7d694a239ea24d8fa7968161d62fba83346 3045022100873d5a2698c70bd39f2ed462da5777953d5a6e0e21a4bf65b3652470ecd837ac02205b7ce384297ce52f13846b27cadcd1cbe4249db6b2332dffb2b63902be73eeb1 3045022100ec70c65f6a11fdcbfdf3f3d563005ebb301b340f907d48abf04948836f146ae6022014765c61deeca42dcb08b47fa26689c08848f70d35b24c28b1d9ffd803b82b52 30450220539b72e04119ce7ae938b061aa33f7159cdc56f6ea4ecb24b5de293d0c23f4ce022100ce4a8cf25da1b6e2a805c1d5dbd3cb344bc47073ddd4c8d1ff35eef7f69613b7 304502202aa53925200e41bce4abf9506369f48a25929ce01366cffcd95c9091721765d0022100864353f22f47399716a46d8b643929d3d56a763d67e65057cb1993b740e608f1 304402206ea5abf1bfc2a56460fdf4ab3a63c0c0049d02f08290e4f79c9d3e7a3146918502207579642418a35af731e8b0522c38ae6f667326b9d6cb0829be92779f5072c0b6 3045022100d4faf50c88b71e57f2e483591d3d535c93cb4673d541d5a044b438abb8d8779b02203c6da561604c2eb6ccda0c1fd6dd92eaf016b291279dd09dd970198f4bd37bbb 30440220195b47f0ed639ab90ca910214b7f8c7dc0c940fb0031fb0f855da3f673ec955502205c7655253741f7e81706560446a6cb2faa439ba46e5f63f7cbcaa3859f2d694d 304502210092ec508ee245b98da54d4d4b0a5ccc3bbf5bab9849c768e4f422814bf26df7ab0220103e60984f3530cf850f24a7eae4a56866edeaba68b6cacef2a872826914b9a5 3045022100bbdf95ccc59bf500f2a34a165eabbb22272212302a6f516b9eb4052fdab59f67022016771921b261b9c572c19742dae14e9c4b70d6f31ea0379705829df7bf181210 304502201897d28089c2fb45a823c59a361c4995fa9dd375a42843f53995bb0c53d765e6022100fbaf15bee01e43964460adf4ef931751adc71139cc47cfced1acc58657b20d68 3045022100d319dc41fe6cb25fcb642ba47a3afc1664910534cfe034a030d4782f86f887230220166a048ad5d19ee46564685a6a9eb4af1bbc9d284a3933b70084113e3fea2638 30440220241dc49d13cf4bd7db29553230bc7ca0d4da8bc1a25d5a4ba427f5f740322ad7022072bb38cb7ae59b60fe4ad37fcf8240763b17a72cf149322aee2e15a2c1b852b0 304502210085a317fa756a1a49ee0ea353f0ff3a01ea2da3bfd72961d03e6aac41c6c59708022070f9e462cbfc855b277558c0d662a4f32dadab2cc62f5da8a7128a202c8f8db4 304402203659f942527a1b3e139f0d55d51541969c6a6e7933af54b6a0c2022ef6b60a8902206c485bb4a27a40a8059be282fc7592bb37f42df12c36e74b6574fdbae07cfd0f 3046022100ac38c92daf3230ac096cb8ca6aeadade550c3f2623dcfb2d2a392794bb74e74a0221009f37c47a9e2383f0c6f0ef36f99ea6a517fb1d1d41565dea103dffd99d9299ac 3045022100cea8f88db857569444d6ab3f82149154f34e778b9f5f12a799049a7e8c5ed4fd02206d779b7652fa1509e5926f85f7ee32ee74266d8bc7242069a362a2f6a38d1c0d 3046022100f6175410dfd481e860998eb5174cda6fca5532e78c11e5f9058650a273ec801a022100c016c6f54f538e536f37f37d19ad965bfe2d4b2851118abb71c0979e4f7df4ed 3045022100d014d32bf28e901bf0aa8281bd83860adfba70925b3c6bb1ef13193eb2d6f13f02203ed42d4ac102b35045ec25f267b7f63d649458ecb476f4e9090f294b9c566ce4 3045022079e8a0ad42f1434f6bee930c9f5670c25d8380f6a2dfbf38c9ba4a5c62166cbc022100ee248711b3001c480d25b9c1295b9e2b6bd0243dd70e1919c879a451c14f8907 3045022030597d9be44903ad7eaa92cb76a059de78052204f14d5bffbdb16f5b0f6503c7022100f1f61fac1e72d01d63cb7cb6818df385240d1e2547ce040b6d891ce8805d9899 3045022100c0294c802841b3efdd8d0c1ba373d49e0d13f51a4a2c5609472d660ad93434850220324e24f14839545ca3f473d42c20d1bd3e5f6925ad863cc60b883e110692b301 304402206e827aa99709398f55c4d099b5e796cd395ba277c86f0eb6cee8a1461494be4502205f958b0c084dbe70e702245f44050dd9d266de46fc6b5828b969f9116d5dbd0f 3046022100ccb00623d65df49edcc5163b1b94301f95b58efa4967a91b9e34f5fbed4c587702210087db88e43598b68669dba3f0573607ed7645fb57b57c88096098809accf98ba1 304502206afe9538f4eab45787b8fae569fac3d0286c8ae2b9ddc088f4907d21573c1be2022100f977d495c7cdae77639cda0195e1f9be6b67dc14b3043faa8b82d0320266e813 3045022100d5a9446f3c160cf59c543947c35448538c5c7f1c82bbe287914da9738d385eb40220358c825752eb411712bc59a9843a0e943c3625fe317f1a7bd001c62ea6a744b5 304502210098f550c02291651425571c5a966b6fd8900252c9928e51a81a59bc05f9b1c50002204edc1c2c13549e6b4cc171a6750bf232cfc96daa957c5fc30c3d705face1e13a 30440220025a07f0cd4f5de854672c6ab6e10dc6e0cec5694420e04120fbe17ed494c02a0220265bc3fe68fa1bda3a794f529540d2cd91f1331d363ba4cacdd09a5fd5b28463 304402206567e4311bf6c3d06788dde4780f62d7ce0fd839fc3980b86c8c1c62d7cbb21102203f635b0bf9d58dff0b15494e006ada4ccb1859f511ccf82bb2bcf77ab70adb74 3046022100f34500637ee859a1b610e9948536d35ae9fc79d7709e0e85ae056d1362ac55ff022100c22ed492971c4d98ebe14e64b8af7725df62d3b8b0c0a44c5a0a0f76da04195c 304402203c821ab01dab69b1fd09d459e6fda95b58f80a89e2360f16b5a921112a222890022073c21b1970975484c7cd0e0d6e1ca0aa02b3c33acf63f90408fa5c983c4ccfc4 30440220116277f77593af6f90047602c973de05ddb61a29b26bc124bdf023a9214f394a022051807def7edf93cf548b61e64d142bf84cba4445a138743b57be4966b924e894 3045022100dc45f8c4da1478198797698e0d17c430918346b179822ead302c97516177c119022079469877c4939bea1ffb75070f37d996a7b3cae86a2c5987bf7b510d3651d056 30450220789ef945a8d573384a3a377651c9f1f3d4517165ceb4f86b354993272641121b022100d350e26bb57be95a33213f3dc4896319f581df6abbbf26d0c52cb66b37217ba4 3044022035590ba9e28447f55b6ba06b08759102273dabacb4b9aa5b2cd0c7cb09d1f5de02204a6a4d6afcaa068562b495e5e4ec8948ee44528d33a57dc293a3625c9aa6894d 304402203894bb68540ebb103d7d04975c25ca7ecb770cfa26dd35b66dffc67987a3feed02206e5d8d66272dd4c5fbf79097a80b5d3d41d68e9f71bb5089defdfbdf78733226 30450220405bb3224918eb1043d30ec7cececd7d5b5b73a6b96eb9e62073e4210b097a9f0221008411bcbf0ad4cd50069ba7450a26593929054bb4b7774b02cdf3b6b658d3a684 3045022100d93b7e29d7511cfe4542d4d71a7e2c91ed32b2e6cfd886f5660b016fc8843b3f0220432e9083c44dd50fc4e2b69f61bc4d44daaa18a760cf594409de8462f3fc7163 30440220734f6c00cedc50267f1289ca5b56b5346e14f5d2da425576a0dec9371841e09c0220534abf2632d18f7f125e4f288f8f587e96e6a94442205d1b3bf652c5c8d0371c 3045022100b1586ffaccf72b8df4ef5da916da5d0855b118ca8cca010a8de7cc45942aa1b6022047dd12a0da05c5315538a80e351e332eff9cac68de262f260cd79ad508af91c8 304402206514ee33a6fe4b327e4c38660aeacbd509c9314a0cf4c3b49071c5972345d75f022056dae128fbb4b1621580a37215a531e30c9ee7062ce4cf66dc1051fb6f33d47f 304502200decca235424f21ea4a478de802a962bfe99d61cae73da88f437ee0a61710f8a0221009466ae110ff772fdfcbaf320e09498ef997f07618afbd64bce218933963f8719 30440220370fb7b4d74ab59717cc8d5554db5b6c05551d0dc8db1d3780b6218d8037d68702201df6eb31a7177c6e3d8a1823c9d5e338f6f92b1c613f1accab4869d509c184e9 30450221008f6fefd01ad4892831964705fa64a88baaca676e6e3eb3baedffd1ea33f3e7ca022006c17c9807f81485bb8363c75f86a9bc5d1c7fb2c6c49cdb5b2a10822b5d63b7 304402206338c5c9f07d8c094441c7561de4571381a6e942d197c3027cbc1d546f41ad2d02207a18408784524b246aa2b882426e95084e9ae9786d51ac80ae3d834301c2053d 3045022076af3bf66b0a9c474ca503a12a14f9b646a7a46b9fb8d824d06ba6f98f9893b3022100d65fe5bc030bee3bf92d55057488a4713af62eb2c8d7796e60956e7596bef705 3046022100e3eea71a8695892d6dc1f36aeada41c75df082b332906cfad1559e1cf40b3f00022100811529387e074a13879cff9bf9ec5d451e82a53a1c565b995cf0ebeee8a56b32 3045022064a1c76b13071822d2be30df3140477378ff009d6cba5d54883af551ad1f0503022100c5c64942f5cbf3c4edc9e3cd6f16a94ea0722f334b7e8603787f8750531aca3a 3046022100f70c2959a67d793c2659b28553785fa1ccf8bd7c9642a445d28aa255289a1d10022100b62ec9fffdd4ae267b09de385501251d5022cd23eba685bfc061ea58bac9092b 3046022100e0eb481377a4cad03c84182780da786d4f82601510cab885e159b7eb014c5c02022100dd55f184080d0d705f199498787dc53c62724aeaff5e9af8e9874c380549706e 3045022100ca70b820574360d8bb19a016cbd5185bc93ae6f140ee9ceab9f56ed792b0d12002205291e37f09a6bed5111b87e075902f03b02757a785b897ab73e40fdea53e0de4 304402202c3b89bda1d227abaa90b1ead6898694c29746607b54d8c7460ca2e68ea884bc022000c4177169db5cfc54e10850e9a89a2f3766386620ea6d4fcbbad93d84d6f5da 304502204376a5a3a18fcbbd33aa594ecae0b67cbbec2510c091abf082f7d61311ea9b69022100fe5bb57beac73b70faa674c2fcc87170ed8e67f44f35efe21cb5052da4a43ae2 3046022100d04df9a717b7e93b3d148aad3337f28c83bfb7acfe38e4aa737fde5c5d5d40c40221008eadd8c54eaa6ff06c54a39e97d6804e6429e932224682c17c50d28e531af5bc 304502210087c63e21fe97d037548aa2cd43ea17bc8a48e6fd80261f6969dcb7f66799bdeb0220791624dd710309d61a331b973ccb2ad961b305bb8c1ae46378b80a0f881d1ec5 3046022100f761a090b431680921e7011b33c921a309aa9c74556695c39dd379c1f52c0a9e022100cace8ae17c869d1ec0a2a9bb8c21d43979d3e6a3d0c6f29b3a9875fe4279f7b9 3045022100abf3f2e4f80f93c3b08a79146836bd9eff325327a8ecedaa56739d371bf99a2b0220614865203304d517b8b80b30a95bb575c5a072004b751f8239a31410462db8dd 3046022100f49faedc7631e192e5812ed1776ec16da4ddf6352683734df27f18229a667f080221009448442291f6ad65194c10b6e5bfe6d83b2f782e1019461770d91983543dc195 3045022100f069ff0dc35104088f2764e1c30bf6c21148a0625b794b104d618291e6d9a9a6022004fb32890e6ae1ad525dbc8e7d2e869f973b08eea1b098c91a8154552dd01d12 3045022063674d1cdb1929a38ff35857e62867dae3b968be7145a8ee550bc80e4b87b706022100b8bc8d1d8009c31c8ec86ddd53825fb5190c75485b1d75050076873ed286d878 30450220310b21e652b0615928c960604090d39869b5b12f1746821fd4dc3c4abba1fdff022100ff644611ca16cb63e7a29e7de0e1c8c904bc7b91be2ddcdeac22c53e61b45d65 3045022100fb08d48ad5de2a49b48db365a222dddd3ea86ef5c0a542583de156be57a06e930220796096775bf6c877ac4717bd8ccf7710618f55ec61abb764db5a0df853f9ac02 304502207bffcf7704947415f301e3288c8776a4b46c5cfd5fa6a62d65f45d2441e06339022100a810315e7a1429c5fc07da44be94124fb2a18f22eab216ccaa3f3722a2cd8a2e 3046022100f9225dd804a752a243d50f72966e6b118c58e7f7193d5200e8df0abcfed8e4060221009fe42a149ad932f3b50e42c615260b432293a69b78598e53ae5ad95f225aefca 3045022100f15d042c5fadd77fde5386f27d2d3ae3f27a618d99bda0686bb6cf3896fbcb1d022021267fe901d74e1c83c72ab525dd30fca991e6f6d06de9beabcd6f2f62c22174 304402205573faec91ad870bd9575a9b77008759b613c82c2097a78508625dfc1055933e02202b60e9cf7dad266f7a9d5537b7f8b72c92ee01a80b447655bdecc4272fa9bfa9 3046022100a5bb135568f15d97aa21aa83fa16097db69d2e543e822930d00af53e8b844a99022100c781515945ff4a30d02e6a8915ad07960e483ade8ef493f9f41ec0fe7c9fa991 3046022100d31b0cdb6ce8154c6eb210e70ff0e7d2e4c5adccb7da08a31f71981483a7f25f022100af391e452162d3a24e217398d3b9e4feffb9cfe689f38eb7d22bbb32237cf677 304502201a23a1e711b9e00b972cd1768ca321d2bd3a2aa6947a980b0b1c266c3ae776a7022100cc963e9df0f00ea1f386d841e5bb9df09d4e52cf93c6891bfae33db49c84b8d2 30460221008ee4efdc77f2b4544bff4f08992a15911fdfeb592ef3415700027d35ec2028fa022100f992f983ea48551b372b58a304e765a69fdc4a39c56079a1944af8d0dee2e653 30450220480b97a28e06883e19500e8c6a2f0c87e93e76c56b55aabd2ff5f41dbab794af0221009afb00b1d14aa0356c873ac3a8049ad8b2563f1a839f793ff13d179653d0edd2 30450220740516e38418a32edc0771d6ce485469466b06f24f78e98894953eeb23d91896022100faf45bf4d2fde16fb1cd6eb3fe17837a72c01e46174cb18a3ceb620b83944a48 304502201974f47bf1d38fc17775a8ae9c1dbca8379b72095e198f7e97e617c71ca58883022100a06011ac57efc06ff9c1aaadf0e400cc4c1e8aeb0962d738e021620df217583c 3046022100ac66301f62a8e00991e29a71e74e057a3580123757f12fbf6ca9188946db99de0221009cb5b65b6a639637af78e2954050722b99d7c923268b65c648e9d8f82476aae7 3045022068da560f90b3b17e69e7a6eb38cc17123cc56f5e63e8aa738843eb569cd1f1db022100a7c874f1ae94bb3d557530f8cfd1cb47e77e2573327a70688aa9e9715f98f06e 3046022100c68c6e9d18bed3292bf1a9475d924e4d46e86cdf4b9b69d724550da26403d257022100b1ed3a00c69f24e64b6b6a5cf0df4ab1a2f1c130514992d5611e6ed104f30ba7 304502206ed531f9b54a84a3197ca4b7b5cb876a40cebc2b6c57d38b419e3e313a6941cd022100dd5d967deb73ddfe1f6c777bc2d461c012539f6111cd22421083aedf647b29db 3045022100d2b225abe1147ea6590df387aa2cede4b5db92811decca41ab615e79e588f13d02204f7beda5a9c8f537e84f4a4a178d33afd9a7b3dc3c9455f35e3be3e96a9e760e 3044022016d6039f7e2b781e20dde379a200288c56c875ce079963880dde4ded4d26b8d202203156a404f451d157ceab54c56c61a821d685d790fa0411acd089ba17c550d30b 30450221008196701c078f89a0feac5fd2e83933a2352beea222d42e639a9e154d89263f300220037cb02027ef7b152bcbd39ef9c323e2aef8e0d82ea3501e4641a695858566c5 304602210084fa73265d836597685e9fc28934e5a921e568710feef273d097ae004a338ccf022100a39722b9be5a827862352e8e690a04da08016d266c84683838c14d2deb6b8e3f 304502207cd9acd8fe9e9eb917d013cb0cc6b681c366b2060de14019c47d9cd90f2a8c69022100adf52f9840304d660d645cb08ca768fa5bfca087397c73dc6bb917c22253d655 304502205df31969f4f9d8fe3b52debf30128215cfd6603c0297320a771118e3a0e1f0fb022100b388da9fe2df3868fb410efc752c02b4aa8234bda23a92a585174471ea476341 3046022100cc7493076061fc4fc1c07fe0f2162e3fd856d67f3fd2b4d07951895e4f0334a4022100dc572cc7f5618d1d036045b53438cebba25ace0c6a967887f754f778b31a571c 304402202e8d16fb3cb4d0b9649b2b60c3e87a871b14f4ab5ead28ac5de80015d8e70441022035917ac6ce627afba761ed9030c6fd1819ebefee7cc2f81c36a1766fd9dc6d18 3046022100e79e551aa901242aee59baf172add04a9a5c74917953b9054b924ec53352acca022100bf1678c786140cb36a96f39b9531c8c11f9b3717b02e414703d4381a4a50299f 30440220723947b40a5f43510de76232d33318b8e8f3c761b644fa421b8ea444f4d88c96022006c6e4147451f2eec9311aff83f1234555046b6f801c06d3570bd99192c825a0 3045022050e7e6f9b17f13cd37bd10d10ff987910326e71ee766961b066ff7378586eaef02210086686be39941132b2150b9ad6341a26f06cabe43f6a7b22203473208e1928d71 3044022005d063874d34878859161ee3d1d138f2a87d97e6ec82395dfba076336bc644da0220583c08f48ad11735a4facc928a3db6d8756bcb437abd8b19cc5adbd70badd1be 304502206d62ba576ff68606622d45056b1c409984c46687926c7d580dbe745925eb37ef02210091ec2ff1025e63683ee1778a432d7a1656892b77a105d5e9e17166a13ff3e4ef 3045022100aa99683fd3e666b6693c8349d500f4ae086144f718c69f98266d7f70aa468bea02205498ff27ff3a3ddcfef169c675cc6a9fe30442d14654e4c0b8e579febdf4e3d5 3046022100ec588966f746a41706624652c6aafa565c3dc055cc2faea3e44314c4f9c47b070221009a7cbe6de63278da167db1f6a46911f9fdd4c55747f46f288ebd61285c35f5bd 3046022100d943b0bdeebee89e4d277d4247f49652b63648b265045bca45c0523f8e9cd738022100ae1af8e8a0027a8a936e12463cc6b5a5c23746992fc695d78668dccf0dab6149 304402204b5cb5429d160a6bd4e7117a49015d0411ee5331d2d4a2ca471cea5600d7a38c02204341d7dbf231e800610b6769cf3a7352ba7591ecdbfdf85b427d7a5ed4730497 3045022000ca05d445752c7df07fe89fac53d5f040d2e1fe47fe0e74e9331714a9ded4fa022100a68d231ac2ec7b032cb9bfa23f357d73993d0677ec838847a2a3f002d5d51c2e 3045022076188e99387446169dd7de7a1e23eaf739ff51023913317d36fa51ab453bc0ed022100988f0b312803ec9e16dc0048c928e3d23b6f9803cc3b9bfdea4cddd0367734dc 3045022100e7e11155864c8df4b16b2a92629c921188e8a0f30dae8b567aaef20c1790d65302200aa91434b05575b358acabf6d3425e8b233136cf40827c50c4c89754fd7af4db 3045022026813fcc46ee3b9c8babdfd81463c967cf9ee7f1a3530c7ae3f1c15bdd9571e50221009d1d8f20fdffb7dd08333c9863c9c2b8d2e3693c5f07b7d6c7352b10810b8c03 30450220491b9ca1c396035d7f1ba12badfdc9937f0f1c01da28a134c16470ea9044c3a7022100d18b0a9814ebf90391c58003328f5e11cfb6cb4578edb4b1439fd55fc72a8654 3044022046d89862af31475700b349f724f8cc5fc602cc24547f6a9a1c72a57dde269189022011b5e23c79c4a24bc59a34d2c12e8507053150707ad8d1263df9dff79191e805 3046022100b3c10023a0ca541568c539f5c7802669082fa5407687cc2950a0ef33374e0f01022100aa8e85e2d6657fe03a79c83761729a9ab2ffac75b85fd8b5d9f5c51ebd67560e 3045022069a77874ae0ff496b1bc3fe694c01865f89190c22ba739d18fd84ccf32c236b2022100c2dd7cd36c1e30d661a8894caf07ee3cd6756ff4fb47d70204d324e2a1d17344 3046022100c39f39bcd0cfd1df023248e1c9dd8807dc955cc8c6d7a2bb75aff294dfae8af1022100abfbffd217b9eb5792c69c4aa7d214adf5539b9f7df65c297189e501c67274b6 3046022100e61cfb6b49b2cfb3e7799f556a1a6074ea553ea99f1bf5819461b2829b8e8846022100dd5d245ccf9cb457e46184e7ff27c6ca7b5285a7adcf26375c5f223bf89b4982 304502202966bbac6f250acf002ef76654442a9e84693b1ab85917bff79059f95f5bf1af022100ab47476e385529f5bfa95396f163d7e2469c0f8be576993c835e71d3eb23e0e9 304502207123bdd80be51c2f8be11e9b13f97e54096479659ed7cf711a6bade621eb9c60022100b8dcb41cc58afa9cddf8b5ade52a465f7a9bc91c2bd7a1cf4391f5935107a35b 304402200f2a72add73ec1b2ab4a82d714ac7440e33d4cb00512b88958ce6c17bf0659ee02207096b1caa7a0a7042bc9a4d63c3d4725af12daac06d5c5f262358e62d78ac98a 3045022003e5729bd5ff92c4d785a294fcbab1ce0e05340460de560895e12b23b74d7462022100bbef73c8402d96b6cc5553e460574aa71bccecf001332c51ea91c9e2bac079ef 3044022066a377f02842f9339a02cae6617495d950f62eea1a228967353a049ac4ab5d4d022015e4200b7d0f029f6fa1d9525e41972ad7ccb129c39beea6fa925c66ed3f76f5 3045022100b84de3ba53aff57443b410975a0b0092d057f2aae27112ed853df22fd420f5130220343d21047cbd3efcb7553a48978c663257a0bda71699ab645be227bb6f719300 3045022100b7260878c741867db7f6be815c2078bed5c2bceeff9b87dd87b5b05cf5c3d42b02204ad66692de39dc9c9d4b67bcc131885a6756bc69200baee07105b5eed8e3812e 3044022038dc8c2e3edb226938111674202325aa2c386a0634ed3b8a19e025ac58cca4df02202f113a83f54f816b4e4aea148466ba0bbf695998e6f3d12f49b464443f1087d4 30450220447098e3bb6b8f150563edbd3ccd3e2142d19c49c21ea627cee479768454f1fc022100c44a0f4f27ce7038056f51e0bd6639d3599e22985ff8aff188ff4dc5f903fba5 3046022100a4d443b86a29a1e2e9b24ddc7b6023c78c49daa93095cf75aaa391f62a2dc703022100ce335b9134c583931418e928f791ab0b7d986d114cd9347a991e2d338380603b 30450220726779693c7ad82e6b6b8a6df50f733e85b1b3603cc64af4ec8b12ad03cd7326022100b566531a5aae4579e6fbabb7a7d1ff8efc8283551d2513b478efe3952c2ab5a6 304402201e717fe1fdf84102eab5c0b1428d9cf19bfc994f98f614ab4fee75ff384be2880220455efd25fdc491da9cb8d1dacf4ca1735bd076c190bc48c381eb397a1a2161c2 3045022100e6aa1e14a504dc9ea02f46202704d7232ca236e669a26ab09cc04e729b24e8b40220062571e7ea8769eaa2d7457dcdf08ec5d9fe5140c65e511097a276d26e5527ff 3046022100ac6b196f49c156d5971a954651d2444f2d72c6c2f08f1d59228e53c481fcb360022100ff52db5583aa00e4df19427a404228a34565cd2fca36f1c849a93fa736e18768 304402202561f77439079af30b995297594083236aa11d64d010eb8fe422ab51a7ae1ab50220061e3ad8e1c4bc041a081d610cf51e6dd713b227acdd85ffd49ab0e18c966f6c 304602210088544bbe19acb8fed48d4431e6f65e783b55b1dc38ea631069d9c01e7e10869e022100e6ce44938eea8da180488ab618e6e78d61639c26c32b5bf36cb5a42a6f595722 304402203db1e34d7a9ec1e44543838ba2d1a103bf1c386d1b165cb4543d41b7b8c5b3e00220540a70b1995d751eaa963acaece2cda5e90761f96b1d8071c102eedac80ae67e 30440220273ac088349c8e73e7abe337f5858d7e7d0c44a843fd235fe4fd505b7bfb6e550220236412d64a1ae408742b21243366220d88577f8fb19f54742b97e42ee66dfa21 3045022040381f4a51d234157e3bf9a96dd9dbae43eb712981d3ab40cc25fa42ffffd613022100df0c3e350e540d93f40cc0e7f4497465fba7a1d5cc7e622369eedb5d8dfe24a2 30460221008f9e10d3484a2f1b62bf1a25df2a629b97eb70ea2b403c47cbf63e1e84319ef1022100dd134afe539ac50f93b121b48de64c3e5ea4d5118fe6579a1c180c6558f6eb5c 3046022100e8688910ec91a38dd9b84782c1e7cda467a00226e799af2f101ca23c515513650221009275ac3de7bf2a2ba6ce925fd6a3381404b88029c0ed08a4b3ed522daf0185e1 30450220720eece93b73fbe57d0f86eaa964eabe9ff36f9df9a2eb406ef5808171a58d19022100bcd8a5c5eab07a181b676465f6c6ed6a190b8aca0822f3829acdb677c8845ca3 3045022100aba0008cf0cab932d03de7136ea4f5181a4f85a86604b4d11f79ef88a70f525e022003c871f752301c45a34494a18c665307446ff6c50e3403cefb4d5ef5605fdfa5 304502200c093ef6821869d1744c9ebec08acd429f63748cef605e0e3b4291723a58f1280221008dd324b3f73c6ee551bd6166abf7fbd2829e16b3adc02236c393a83591f78730 3045022043423748b2068e62b181c82bc1a35b6561f39424bc123ed7232fb317281efe67022100882900f4c274f84e5f97825739736b026f06a143f2ddb0a4d8d59fd9c2c6da21 3045022066b7f0ed263e353985c77c5b598431f606e17dac5dd8a7bde6d1d9c2d6375eea022100a976ff454516b6a4b6a6710fd1b36f3380dd8b90a6c90dd18d63b9398bc53bef 3044022018be78c7983b8d0890bee076115aebbc32df3da7bb515bde0b5a346da8dab4d10220096470c91b5f1a14f2c19c33597421ad46015595264874a1d4b08653bcdeda56 30440220075c235ab78e5957cfaf224576be7b13ee2141e98b2662b7d973f6dd75402f4002203fad39d59b4708a40fd2aa945e4e3155b35cfd57c771e25a129077dd97cb7e12 3046022100e0a792bbc9889c277101c1d42ff174b12244cebdb78fe710079d527557882ef60221008cc2f43de612f17458af3b1c200aac5e19b39a4cfefbdc21327e95a7c072e036 3045022002467632790db73c9ed69fd8c20b8ce8a5ab7c63a619d90f3facb3195e692fe8022100efdb1f788be6a43b1739a00a79f80e2ddb35f4b70e6e113bd36a3b4ed52b58a5 3044022059b150e8dc544720284d4825a7bc99be199c505654886b470c53a5e8fbdd8e4702201ef7839f3ef0a59d2895eae1f3ad2c8970e0da206d8f305a6500d209c0f8eaf7 3045022100cdc2b968909148efa72324b383bd6de64e27b87b7bf3701954176796883dc9fc02204a43ad504456fce24c3196cabc673438d33a6aa8a38a1c02ccc14799b04330b1 304402204ebf8f5a64775b56995fe32e1eeb913f5b630e4d2c23690c53c9fee3c5e212e902206e4023b072038ee80df32be00653c70de2739ce75238e1574b5d91c6de914391 304502206b6082e3f8c9ca2b9070e93222937b588af80a1587de226d1e476b6e900f05d002210087ed184b8adacdd9cebbe8ab7efdbe2e16d91eb834314a91c71058db12a8f5b5 3045022054bd3df80a7bb497df77ddc31c03cbb6a917444d9fea68a82671e34f11d0cb96022100ea8589f9c3ed09358cf710e75ce80fedeb1776bb9d66f67a481cca0e02f902cb 3044022000a29d1646794e2b0e71bbce8e293c18d2cb8b9caad9b6f33a6a014cdfb552cc0220498ba8fcf7f6c90748e4c6f3b0a420710308e76836755a93380eed2e852c1d35 3046022100a13e6da4f51cb428aa2f6061271c7e2c22c17c2ce0d66a79903c33554ac91a3d0221009820cc8cb9c321fc1319b4d4e099cc5286f92a10b1c87d9b76929664442eb3a0 3046022100ea0d226876f8f8f72e512bd1b072234ca4f69cd661540085bb1b14937cdc8a73022100ea0d7d6eed5ff10e2cf256da23bc20cdb460a0cda65c365a96382dc2e687fcd0 30460221009ae9c06b66ae48120529cd1f7a352cbc2976fcecb34eb43e77f9d49fda7c1d5c022100d4e827677cea5a532cb5c923f6ed9288de8b8cac0c42565d10a2fbc25501643d 3045022039490f5a31c831b6521917dd683fe4f0f790b7ed142cd7349686ac32c32d6975022100cba7f1333aa5267c0b561610fbdeac1bc0a1c7817019a4d2843ad346378ca304 3046022100b84abb9d2824980ce23ca5dcad87b97df237df153006eeb3f35c38010c4cda92022100ff62415d086c8e9daf1dc0b34346769217c1df8afcecacf8c244dd232354da75 3044022054dc4f98523db112c481002bdd1d976f58d7d5b26877e746cb5a44b4f641363c02206011fa65cbdc46845b31284728200548de9f0696844655512e738393391117a2 304402202edbd03159c9b49eb8743163360b1a0c041e997177b4a6bc526de6c13a1f38630220483bdb8c67b42a7e366ea4df10aa0a283aa3201eeec5d38fa9c54ed834ee7e0a 3046022100960295a50f38db018eb14d3f3b4f0babec3d9b849417c34625c00d70109e05d302210090f4098b86276e2496a125267616427fad3c2ffa4b802797f5225e6cd574c799 304402202d08910f7e5b83e9472d2d880470df9655031f6a2a7626ac4763ab3392860d4b02202a443ca584206311316f7589eb2c48d108710795e2575e99adddaff9396f90ea 304402203c47fa80fa973c2ea2d78ca82228d4e4fe3ae16ef9d13bef3516e07fcb31537602203f02d4871aa4c8a6f6e6438317f97e756a2deaf7b8fd066c9dc85f697359e11d 304502204d65ebb1291c9b01577dd416a5900272cd47313a947aff339f182f56052f286a022100d9f0313f89c5384ee3535e45308b5c28633d8fa4dfe2931406e1354d6414360c 304502203de6906e0d4de12e4ba48b2579f896bb3d3b0565888453874de153d1f354eb27022100a1fe2c058c43927e93a9474293d44c51e83fd956eda2f75bbfa95b04e36a57f1 3045022100bb05c49c39d8298370fb27a1c54e59b26436141146965f00969532c60f007cd40220761d8b0e41e4e20eb71b9ce302ade967f0cb6880e3f5897c2e957c2a76be73b6 304502206f5df85c906623dbd6b2bf9daaee84fcfba6313ee8bb97ba2cffb86214dc109e0221009832ead7c1d674e2d2a1bf9e482dd948c66602148b73e96c9fa8072594ba241e 304502203b7b42f69f7c8cc5de4adfb3e77cf48b8ef1a7c1e2f997ceee59bd3b54ad32d40221009521ff2c1569cfd54576d7618d3871cc0f047854c060b459736d87ebc8b93a28 304402207b89e176b1065bf6330cd69caa9e9fd34e5cf6171680614ed952f4b527f72b2d022006311cb91f711c33fb071658206eb3078a6dca8f4fe1ee6c50c6df13c6f4016d 30440220033cc4cb0271ec6cbfcbc87e21bdc7f8cf7916fd651e8d2fd6c17935a44d88d0022040d2939db3a7bb2d2d5835ffcecc57e81bbdfcda53bd1e69a571a972cefd6b75 3045022050623110b905cd0a13e667533d8a408370c61ca66aed85ebf4a826492b59790a022100c63da9a3fe7b3c3b294262e3c4c6abaee0426e756cb3a503ffbc083f2c058b06 3046022100d24f2518050790a860db1b18ba3f2cc94e6a54d08ca4280a535d32dcf101cdab02210095e508829afc222e4787a468c3ba97905fac0a206d80ef09f5a0136ec7c5688c 304502200d48dd3523c795877a6cb7475c85f278c282d2c33bd4139848b2cdcf2dc6fba7022100a3b10a7b65ebe73b6dfe9d44d683be66b848c64c7caf8d77a48be72f3983db07 3046022100b024c81ac7d315e3995a18ebbdab258bf71bcc1386dfe1ece63747322d4e1542022100c4c57d25236bfd13d74a234521b12d538f4c3ef2301ad04a3768cd915d20bebd 30450220152378df1c5eac287543722ddd7a57506b3353a34cf1f476441184bb35bf02e1022100964facc9493402979a048c028c6d36359944993c46aec59b6c7eba8346df596d 3045022100c7f55af87c3ad08cea1cb8abda73d9711bba168c88a9fd8c9dfad972a99f47c60220579aec4d7eae117f1ee50427ee0e61e3e8c666a99f461600b5204ca87eb2685f 304402200e08a45e3aaf41178a0fa843cc0c16064c4acb303224e421a4bb34e3f87ab71a022069a48a0346a876921047c2daed5bb0db223d9cf91b4db9a5df4bc409ab12d23c 3045022100f90e3aeb557ff92c9d5912388b45badad8734727b1a70cd95257cdfa4d74d3ee02206b9029e78a68b28290d6dce1d8088255924d78a09c6eddb3014e0cc13684bd60 3046022100f56c236710340b4d974d2706070dec04bc958220b856494707e018f8e30c15c7022100e129a53964fa01557f5bdbdad00c2319f6a0ed3b67184ac4f05a56cb6d92a8e1 304502205440715183750cfcb1d8186fe3f8eae1e77269dfa2211806c26154709ec8b78702210080db8f5310bc16d715d7fb4e44f35a7cf6203a521d05c3cd97f908241d242a99 3045022027a08000b7abfb32dd8dfceb0cb385c2c43631fdb9d4c5a4cae00898a7733d0102210082e717d3cbe3f0938c4bffcbddc7a5b08ca054d6005df8b2ae2a22705f9f9ef2 3046022100990aaec8bfdb668dc80a857cdf7d34dd66879f8376edb3fbe08cadb61a4a9628022100c780ade7a15de1a76e6a8d8963c844193f3cb35145ab8b90f76324b85cc522ee 304402201e5a3debfda32dd6f842032459c09ab0cd9bcfec5468dd6bde69d2eb4445e82802204e490f7cf009d4923c585a21fa74bce4ef8dad02e2ce29913272214c01898889 3045022100a8241c82030bcfbf064b77bfffeb3322d652f805d4ba8fee3a4159538bffde52022041e0fce16c46b0f01247848fecd5d250f1b02b89614edf7a737aeae02a38754a 3045022100fea44ba3374e5f505d5d1d5e24518e032aaa95220f51041db4b877240c4147a402207c2339c7cba0c1e426253349ccd5e591e35e112a2312851e491f241b4e00649f 3044022053eca1047e4ab1ec06d6087fc4f6d0d1b5811edacb36a36b280abd3b176260b902204269bb71cd8abac52dd0d91ceb56834f32f9a3bbdc82337fdc8e38e677329dd2 3046022100c6a8f2fdacd8b710156dd784aa57beeed1d0006eed5552eaf5c733d066d7db2d02210086d5cc10ea800d3b451fb3eee0bedf2739ae1a1900db370a7e3cd84e9aec53e0 3044022044d75961f8766b1ca748b5b49629dada561f411d3b777ba0569fe3ca9621958b022035edd4559b1b2fbe7e8cc1731617112341b2219ff7857b66e96c96132142f119 3045022100b4a82456bd021dcc33472a5a3e2b20e9a3aa9080c9aaf6eb8a9e0edc368bf019022077353e474f3493a09f1fce9d5c55815f9d8b55546553d80994656ff2d2ae718c 304502200402a16df1fd09e7537a8134f2c0cbc79f9db77518927b6bb2b198632c6658ad022100e9a19487e949f491a2b266ddd6b8f14e88d148289b8048875e5515562243d8b0 3046022100e6eb19240ca5458840a3e06f6498d1c84f717834f7e4b2eb9100c3f197d9f84402210091a5df07509beedadd16202e030c1dee1ffe9500949e9876f20685aa1e774f71 3045022100b7d97296783bc75345c6a1861230c94789760094ffc3fe113c623728639db18d022066dcafa8ce5e6ad9602c8870698bd0f110502bc13f1aa35725cb764ee00dae54 3045022065ae56bb76bf301a1c0f7bfad5f47cf009d0e3c977e81ebfd5433caa99410d3b0221009ac2d38d484c5ee84750bfbbef0ade4efb5173a7435de2bd6c9bf9fc5872d45e 3045022078830da4540f0aa9b7ecdffdf093157ae6a49a8b81f79ddce6914a2d4ab0a315022100d0dac0bbc3c8ba4dde4a8458276876f7e49944efd905e2e7746ecc57f68dbb28 3045022055696b56c5346ec571eb10cded69ba947116540dfb0dbfc134b5dc242bc285df022100af68e2293726670b68c308b775814c0bfe4148cb090f98953c13fbf9bb071f13 3045022100ad5c3952f07bca2b133bd6fb7f23d819d971caad966f96b727752113c5851d5d02204b9681c6ea5f585c93ce601e378a3bfba900baaf3fc34b66cdf81d9be60e355b 3045022073c1cfbc836ca701dabfe8562f309eb1bed4010479ec19662e5dbb8c314db794022100978f96d05161b396a8e46516026f7a8a67f431555966ad2fa95f4a4ec2ff1bb8 3046022100d6768c095d431dd02f82b2591ffc2d7b283d252614af2641362dc80c2c7dc6cf022100e3958b3ed64f9568a6f3bcd2e3505753b0afaa065373ff487811ea3f9b2e01db 3044022077d734f4f131b040101fa3967f9056d09011f1932b06c36ac27080553ce503fb022032327c01b65d8c36f64cdc877a0a8658bc6896cd312007b0e22f793c97911818 304402204bc1780d5e4e3a6723f8bfe9858f53081043cf7586ae4cafe3e6010edbecd52802205c5d9e69b5925467c236348b6ade5ddf1302a0b7792c6e4f74800d095c2f69ba 3045022100debbcbea7d3431cf9005ff3ba5627280576353c790afc319110134a1e51f6add022069c3be4a783d42f5878b1fc4590b5cac0c505aa6a747bbb7f2fe7ea03f506dcc 3044022058f7fec4802cb3d82e844420893b4228063d6613a73e358268afe5c37a761f93022033ea8b07557474fc0e22a3463124fba6ae0c4dc46197f8f84be24d62dbcc4df5 3046022100ee0fa387c13adb9db18295aaf425fe864109cea1d263620f463b006305738cd2022100935030137c006cb87faff6d8fedace50ec845281c2305080f86a7348c821d7b3 3045022100ec72acee2f1e8291d100033a8ed257d57a09b4080db5e9b12bb61011953ad0bd0220324f20d4801d6f2bb6d88adac892c83c8fe8f9f38c0b28997cb72e0391432248 3046022100d4fb5da46f4ff9620823702d05f56829b980a4ea1ce397c2ace3b414ed0ce3d0022100a4912bc86f0105f30d02ee852dada82e1d7eb0c62675c5bfe2f428cce92f3f94 30440220572c0faa5949e2726fcace788895ddbc701e91ed1dd605b875fdab7f4228fa400220602e52435755cb852f8857a260124e1bdf51d420291020cf1dd31b2404fefd71 304502204a9ac5dbd733b7ba8160f0143ed96495878049096f1dcbe4b32bc48432f539b5022100be94cfad78e71f52c47111cde8ee205c17d7741b26f726c24d6dbbcee69946cf 3045022100f0aca539d860c60436817ffd64b450d261f07d7fec8ea1c2827b75510923f91b02205a157daaf5425227227226dc31a361f10ac6303e6666de4d0b10a557dfe5805b 3046022100c92ce2bb64d2e9939f199ce409b4d7113d00d5a92b6ba54f6607e269dc5f74ef022100b033bec7cafafaf8fe8bdd07412be4f1d91c77a3cb10788664f5554221d14f25 3046022100e8779227f569556fe8e164a0dff58a39eba7b9ca4cd3e71e37825f0cad189cab022100a212f4c9d1fae49e46dd394384cada1a999753ad6ebdc668c963f15eeeeddbc9 3046022100f8b2d68ef148c563465d6ef84fc533e30945d9c9f0c30531856f253b813210be0221008432e1d76e35cb363b056468935e9668fbb7bbcbecefa21e961f7bc2839282f8 304402204563c830a317c6baea51016f30802678325bc09adef86539b304ceb068fdbb38022067ebc9eac3502ea71c53f174b3ec6b1bf689c570cc53a1f3c263e59cc7b211a9 304402205a0cab3c679e65a198c4606d9b14f89ad624661e147945fc315ca4833f9cefed0220472846c698a017acf9b4232bb575770f45c345a18e37d234003e66bed50f3f84 3046022100d07cff2e8afc2877484a606ae3671279afb8add141be7296b31c59853ff4d9c1022100faae9af830298daf0d36862ca5d19df26da93a9a9187cedcc6584a939f16c783 3046022100854a29ff0164e2a34fa87291591d701b9b55604669f0b843f8111893f410a281022100ff1bbe1677eed3933607dbbc861d6c5bbf2da438e6a84334ff1749c66a8ee83c 304402201ad34e85a867c4655b028b5803fc2bdf1e316cfbcf3c8164c496220e78a7dba90220424d9890ffadcb6d82ac5bcecd1dc83a77b9edc5ea1574f42d8858ad46680a8d 30440220522aa649ce222f2d13b86a01ec3a79548cf7682f54390e499c15cb04bb03709b022054ce26c131a5059cfa874c8a66f949a87e00421bc099e1b1d8fb879a4a663f9a 3045022100c27b4f5e2fcf44f8059abaf8822b48d434d681c0761b2986d4754c9393cf4ae802204642678cd3d994a48b855709dd3b25679b5999adf10fdbe67054af4bd27c5db0 3045022023677030ae71b073746cb6e6b556bafb34ad12827296f140a3edcc5807973866022100f6136ce3c26b146bdb3082040c66b5fe5746d6ca540b0edc776cae5f34a98622 3046022100b0af57bf25086738b713440fc264eeb1110fc865f5387ae2cd0d100635c1ca9b022100c16088bc7813781651f0622a63649aeaa6ce29332f83a8a1f1b3c818d0cfdcde 30450220750b00667b439d0b14cef5cbb08e46213613b9fb77d01f5415735f3f0433cb2a022100c0eb655339dc11bd6c71e0d59bb647ddad43b29495451e408e5596c568003022 304402202e528d09ca678bd6199686ae916f852ed8a80e37b79b51d867c85fca33f362730220184070aacecd6b56c409d7d7e5c5e0003053237c2c5fb4a000d1ef53437e4a0d 3046022100bbde9a6cbc7a1a1e0fac1221246d8f0854ecc201c93717c1d3e5fa543ab0f043022100f530ccced4922cfff3eb770cd04d56cf13420548f9a566d35ff837fdd8fbf177 304402203c6e47fab9d2f438e55d98c90dc7b2e6760989ebee357562ae1b9181923233a502201ea23409f6ea73d15edbe7744081656db4572559386a2eeeaed6cc9dcf4d016f 304402206c5a27e58d6c6c85116e7c0b3b2ea1c4bcbcb3a37c6c4c7bc1d0b18e68f0ff8a022026e31e3de4ac2beff9efe093cf640c9e31c7db3f3ba90b4063dc6bd77ac0a425 3046022100cf3896e9cb6f402ff3da2ed4ba98f01e413ed46dbea24dc652135eab0b29da3a02210086fdcc30cf944fcd88f42841d6e55c2d4d4518b1191df81b312aa86198ee4010 3045022100e47c14a609cf72602559af36af7972ab7a4d3836216a7836eaba03b41c04a48f022037b8fd4bfc89b880217217dba649c7cd4c4067ca628890267aaf129e723165b2 3044022035cd6c199294e40f66de5eda4b4a1677ad9d0b915d11acaf55a5149b777da6ea022020fbf5bbad5b263951b40c792eda70ba5e977a6e20fa21b9aec0d6a630a4ba62 3044022023c48bd8aa2870612b795f4036fb72dfd8a1e9546aa60ef6e78e037ca7a44e0702200f2b33b9faa882b9bae6741ec860de65d8932a82239a6d7913eec9c40f11dc78 3044022051051d5bf62ab4173d82f834cda652bfbfa3d74b7213dab47ffb2120d60713e302206b889777b55387be47c11e1516bf1e46edae963fdd4ba49f02b7559970a2bebb 30440220328dbf08eadd9cd6bda7ad51ad4820e34d5d730d309de3be4580c6451ebb04e7022046dcd17611ac7134cebf217472c747e17a361f1c2c2b4a56aaf3e104da3c25aa 3045022008afe5006075464b54bea9229d2af6b57915bfe09aaa1517fc07546a1237260f022100ceef9b0fb9b83712cce846892647a1092eb11911df3ded31f6c381ececb37d1f 3045022100d36c5c9929a96f5361065879c3623e79dfb90811d02a54aac51477a9be061cb002203d4014ac0ccc00f608363ba8294c12952c9d5fbf78d9f660b5c78a12c240a3d1 3046022100ec2fa12ae04411c36ae6df13b39046b7a28cd89423f6ac9f9dd9ddd1d61a6250022100fcaea82e62216ec72a61b0c71f76eb8f374158b00fcebe13e9c4aaa2e897047e 3045022012ae132f91be2065f1f2c48dd7cb14bb781f7aeb7af6ce86f2970407269baf0c022100a84c58fbb40ce8b3b6b786c5f3cd6b756a8e75254eb868dc3958ffbf797895dc 304502204fa9eba1e9fe259863201cfe1f7f224fe4b32b7a686ebcbf06958192d5f3a5ec022100b35ea9d1d7ec9923123b6964f4136bcbd0b423935c11b60d60873b0f58db578f 3045022100f37f3170933913da1dbc94962a9bc7c6a570ed14ac926668d77ea52e3b0b4034022034581db7bcd42cb0a8609b767a0634e38a887bfeac2f9ba6ffee6d37f4fe0e8a 304402201a71bd419c58313e96b5e5fc47fb79a68a04abe244ca57b59eb9a2d79912e5980220258e4331a190e31f8b42e4a5dd4b2931b9425c0f636783c874c107303507e7a1 3045022100dd96f976ac6e462582b7f51fe695eeffac88f4fbace5f256efdd385702b930440220728d9bcc4f19c9cfc2d262f12a8ed6b4238fdda292456258de089f79f09b5690 304402200f14162869996c499e2fff7049bbca41bb42d18ee149107e0d9995045deb24d602206cfdec1eed8b0e58fc2b11b011b2a0d2d4a5b1ee0e1efeac37b340df33f95a53 304402204dbe59bb82b687f4e4a1245dcc2e84fe64a6484a05e2fd374ca98e9bb2e6864002202ce4ec955109e9476f1aaa42d4648f0bd1f19ba2a5de2756b904081ea6d47ba3 3046022100e3929de968ce52d6a9216da43fe0ed061ae8b2b31f0f94822c969050b42ae7c5022100f87e07c9092cff55ee2a42442552b7dfbd4e36118f8dd53b67df4ace774f4107 3044022010f53113462b036f9ac93a2ad98f9a9f0e4d0cc9818591b918defdcf4265308d02207588932ac69f842095e8514c60b6860120e1068759f756dc922e6efc964a29b0 3046022100e1eb9148b0c6927d1fbcfb58453d6e2f86a762e85c25ed62bd13762fcea6e3f50221009ed6a763966a36d3740c1cfa95ba84c860899774fa926e1a7e395a718f600029 30460221009427145f24718e67f3044f466859025749d575069e336ec7608e451b4a1cdbf6022100f395cbe031c28692cad992aeba55e4535e2ee2b120611bad226cfd5e95753b7d 3045022100acad3d3938928d0b5c6d8f212eec9e4659730114b1c9466e62c40d2fe45aef01022002a464857500e3e4ed7866cadda8210c4b90456348849d4ceb940a1a561117fa 3045022100e1f520fbc8d812e4829ad784701a87c29b7abe29539f439a7b460b44ab18d78b02202b6a082c77d31dc41af7fe68e212c68b9ce053c029dad7509a95203752a852fd 3044022002976e5b498101a9ddefae5abf2578df8bcdcd9a9a277dda9cb132852c0407a502200af75eb5f8eb5e0695f64359b9bb7434ae95be88fbcd3cfa22369d4acd921033 304502201b0a2684eb4835fa260bd4ab46e4a9dbde71c2757cbbabe65011d6f71c4722f5022100cb463d09bacc2a540e473c5f402c263032bd2ca5592c9c165ba95ad7882038ad 30450221008fde4dc460898cc3ca43a3b38e90466d94f9b6ebc6b8915d2504c4dbc7a4b6e102207d727562c98a215852f94a7e1272fc53166fd585031d185e0b566acb29cefb32 30440220305fac57d8917cdd05147a41ceaa2335dc8a2e06054793124e775d6d1ccbe8e402204b7de24632e4def9a753c5c0148c1125a7b6cacb7932ebb4d14d2d16e0cbb4ff 3045022100bdcc8a3843ced6bd1bb13c5719fbace999c669b6f83c03ea4132efb6c73d35ed022026458fb8cd4127cd21560e839ec5eb6947fb212e863d5bf8d42f17c3b2b8daf3 3046022100a3be472846389167c87c03b05d1e6515af8c424fd9f8187031009b94821a745f022100a01281522a0ade182eeaf2162a4c822698fdf8fa53292da8901ea67189600543 3045022031b4fb3f1a6fd6d80e4c6919b7b8c12fbcd1b27226273eacd8d0e4d57d48a4a9022100bfba625093bfb404eaa17056059de55650546990e97540c0e94c35b4829fce13 3045022071fd0b0d210a1c39e2064997e5a92181e86d0314217c622856cc74c1c06020a402210084e2a33d39d0def6c41693f413806f5f455e99406600867fea6b4260749891a0 3044022051a1b97426b9b43b79941f1de1427f87b0182ebf717c21093bd15fc02a3aec7a0220121c483c513326af78aa6a98500e761bb8ecb8de10265467700dbc6cce970bad 3045022041540b718731d124d7f68d9a027eeaea3a7a9bcb595d3327498d36c2c32549d3022100b52ccae1c674b435408b21df75dfc2bc05e9221aaa36b54c2e94d821fe3de93d 304402204f75467bdef07f94d7027b6e3ce7b61c267582c5266ea18d88e1ff7c26520dad022041b5fab569684a132836a1a7a654eacaf572b623e92bf955e96ed12d810eb03c 3045022059cbeb14a9ec4014949069ae6e2015b7feacfad8ab8ed56cbbd60a4aab93f069022100e99421de69933e8e5d4e733d52e6bb0d46e0b3f23ba3abe4b73bbec841f453a9 304502204d7cf7f74ab59f2dfecd8e6835554fe3da731433cbd6dee5f8fa1ba2fdc7d389022100bff05955009b0dc0fdfd2fef7862851af1a210b91daecf99106d8d546a0bfda4 3045022014f4dcb5067e5f0e3f64aa9dc0925e3e4d6cd34ab55a9e4ff53934cf89652bb1022100c2cc6afaf65ffe4f7defc14d13bb74a9498f657d033387d3030b330b10b5c011 3046022100f01bab90459bfc326516b5695e8a3011594b6d8cc23e69b0a42db7c7304321e3022100ba1e85d8dd5b1ca31e2d3088198600eb7ee8c15317600740c0c83802f3359252 30450221009edfc39a970d22eec06e46b2c45d96e69b2a4921c3402a5fa0a2073b2b9c27e202204b567de59648f0176d149b09109bab02d8d06510b7dc2165fc886e5d90d6c4a6 3046022100ea3bb61181d9e0cabb75cb73d286d84beea1e5ea95f052b1a130a83f652d03da022100c11667e6ca6ad8736fcdb0156b242a55612cfa749d514c2f67b177a841dac36f 304402202dfe6957bed17b505ca65608da13c2ce810819c8825c9e75bed60a640713ab1902202a0b56173ac9ac43f3a054fb11e2c1c8c21b4582ccf9ec9475a78fdbcefce713 3045022100bde8dfe2b0911deeccdbcc42010fbfd291ca338c3511416de2442d4f33255b7202202b1fec000d1d71e57c11ea324eff8f1ea56586276b54ca0a2d8010c0a042d3bf 304502202e80ecb1b86d871d73b22bee776ec2bcb408ef14c948b9f75199a48fe7460dc2022100e87e8b004aed2989fb04a7906e5961852099ba4534adc17acee5608c8d1ba411 304502206761a57b9cd9cf1ed2112569cbea2c2db0b886257fd8cd0a19960cd80f5a8978022100f009a9260bf75b3255a908e10616001a500b2fe5684a9c8724e990bbdd3e92f8 304502210082a0f899cb4c2f64c542a3a62194c7e9043508ec9ce687b0b9761fe0c67f718b02202714669cf098586e7def14fe404348ca10eba9711405f1bf6c4cf6e72f591433 304402206900f4e874c08ebbfa6b2abc73df6dc9403b042d93356f3801fdc49d7fec711802206e7fc58ab562418b7c584772e54fd295aa36505fd2c2febb2b6bb5a4cefdf439 3046022100db71f98494219f031fe7747745f34ff4898d805390d083940d59bae056822e360221009a6cc7b487bcdc57ad685567ecd2c2d47b68b47eb84854d866d1f95a589f627f 3046022100f5f910e4865bebbdc83c3476fc7d7d2f7f6759243e8c7fb0a4cbe20e6b62c56802210082c555db1e3f270cb6df993b3b2e592969c4d877a893d868842d7c38c683d8d7 3046022100b914182081f340828bef6e7b931ef77d15abffb46b2dfc537a5ebddc4eb546000221008ddec3a38cd29889d23199441caf39f3f954340684b0f9e140b4c66775675d64 3046022100912dc764b586856360b07553a1d6101ccdb753b7e0ac6017101da021fc8a3114022100d6648183016f6d3ceb84be55cf2c027c8145d9dd3ac5a6b927a3e6b777fe7257 304502204b83524873a8afa025b3f2eca2691e1e5d0a6a071a4bac5797c426b7c5234907022100df277a61f6d097c7fc1d7780015a7de05004e55ee4d59a610366a554725432a7 3046022100802167159187e4a896ad35c595b5a072387fedbee551e62ab045e306a8bbdefb0221009ff6f2f462bf3a7e9ef8196cd2e19603e6cdea8a5916dbde8285e519d5fc9c86 3045022100ddf60cd6c24565de9c2a0c5832fcf9288962823325c6102d117d5c070d22409d02201cc79c08b924b6aacb8c334eb2c1e64cc7ac0edb687bc009a527d579fa6eeca5 304502207ccd1ad7839f2c02bf42d2a2e0437fc3d27759fbe99c5df664a7cf10a41f5e17022100b9149ed7c43afc90647ce24247723e8fc224aaa0d6ae50d4d92ae547eab36648 304502210096cf974b774c8dad5119616501eced5ffbd1108387788e945162372f59909b6402203cab151cf255c65041b2fc38d07c46a5adc8532e6fe13ce6bbf62ba36721daf1 3044022021e853f26216715176d16c1476e8247ab838b02dadba05f278ffdecda161a279022042f55ad2e59c396930ee2aa6579599774bb8e28bd0509590eecc73ffc7f225d1 304602210084a51d829dc060104fef7cadd194050657b5316b375ddfc4cc5c439674393cae0221008fdab577f2e87b96fe1db6a3b04793fdce64648346a9cc0eb389b52d8d0e3138 3046022100b58c9cbe61b1bccd6b657238577091901e9b674437f63b832d4e53c7c90075430221008d6eb19d996892bd8483f44cc466aefe54a62479b86818262442fd820d6f8471 3045022100c09f405431160277aeda87aa62ca11ee27b805a641202f54240b5fb18449ce780220153380fe8c91a7437c35efe7be758cef2802a52f354c90049eb0433ff266e520 3045022100b3bf878df901579bade20397614b1076c1387c6edd10d8caacbcf4cd3d88729d02200a2bf95b3ccbb93f186ee0f687bc74b5725a4f2ade5053a1b714ecfde5b34674 3045022100d3d1de7f7d54ad686139ec92ca37ba04457a09b62d5c959035e05d62372e9108022065bda6dc9cdda8950671f48f998e0b1facecafff35de0ca9b83d3558bc60addc 3045022100874d08b47fcc524d04e157416d2657b7701dcb7347da470911e3e9d791796e20022019fc676bf9d9c0ab48110b63801816651321181e6d132a8a28221516661d77ed 30450220268234dac8f052e8c15daa7c0aece98941a31a50f90d6d74844309674ab78a300221009c05724a0d67215a4da0924986e97b94d8d5922f119bc9cb7046d78c015b444f 30450221008121f2c7c374d04d7672de8a0956b3b5c396146dbadb7da2039f0221b5fea7c402204d49cddb266da5fe7679b518e3e22bc38a9410a63fab60d5928044294e982c61 3045022015b4bed4f213e3d4123a655c5310591b32f5fddfa4d555d4128d1ba4da328f54022100db648bd4143830c2180fc5bca3c66fab94db0a2e4d565e41e73d404ee15c54d6 30460221009bba5e5c100524462ae8de2f2dd1ec319767696601866f6051d836c83af5705902210091690a94c30b3f3e12133167de6f82777f5c5e1ed7173deaf6c30a00376aedde 30460221009988c0facde77ee21536e386442a64fa200c58f784bacebbd4464e5553351dbc022100fdfae65f877e0eb56ffb5941a9288ba5e3a16c21b0f97cac831ff72cf348870e 304502205775ea714fc9a346a3b819e805e27d3a261038758940ff66b970003880a7b5dd022100840c27413ee78458c8a287b813efe696bb3e3d9a027edb332e80ba9ad6db9ade 3046022100cdca98f01e28554492e5942faa3b1ad86801f923930ad332c646db258f99fb6e022100dda2b879178a7d79abf2b10bbc97f4128ae42ee085cd6af6f69e2625588a5d52 3045022029df906fb1ae1281447f7149429086d37fe716fa7576422159631f5067c64906022100ca4c0ace0d1971d6d28fd5c901b0bdf6a84d421e8145995ea49c924ebe32057d 3046022100cd8d3076e66168087434d8c16ac42db5e4f37deee4752b4f1ced3d97c855ef23022100f53fd2ebc6c4aa13b7c00702f69d74b7310b85994c9e4c806d7699ba161ed434 3045022023f0b58fef8b38e0535fcd9cdc8e242221cce07bfe3a62ee450d54332daf94d3022100be21fa657fc95adc825f06df551a92954ed2cfaaf8a2db5df106b942bb4ea519 3044022013432527ae724f9baf3df1655203308c648efa87ecbd24a601870b6c0e4c42fa02203cf1055c412ffc9191b90c5de7d38e03802f8717f7fdab92b287809f878db65a 304502207a2e4994516016a5d2e5f592c4a5424af5b7e9e9f5a20a0ec7253646227d07d0022100a5dedc55e008e054f75c2310dfea177848d8a4a94d609f7e1113e65237c53c38 3045022100fe10321ad3df38e6f9443e866cce260154aa1ec62184b22160c3fde880e3e54f0220612e3c66883a8d77c45ada3f3d28c59455fe978e621fcca75579e9ea4c451c88 3045022065adcc09c99bf02b175f4522d5b3a2d2ab88b7c0eded35a59b655dec651812ae02210080c2c4e1d72f7238f264bf3ba15dc10cbca63034e66f9e1c35e172ff903a7d0f 3045022037fb2b39c5d456ab7a0477bb82344310b5ea49e599df3a38595aa8c694da7ef2022100c4b0e339e8aea9df964b3ef3bf2867c431cf7319fa0e76f1954e7e9c67cd8b9a 304502205d75e753d695b8682ab78657ce81a013d66c048e191a283dad2704275a0de04d022100ff374084afecbd76dc4e25e241adee0cac8fba279739c828a194676138a5e960 304402201eabfb337b7e439dfcd61af9f1073d7f769e77502f448f779d7c156dbd182cf7022058a65708bbb55fb0db325977bc56aaf124c15fed499b8556901efb4051c75c22 3045022100a5b14639f736b04cd86fb04201a2dd0356f4099e86bedf84f6b5e519205c677e02205b82ceb8cb41f86fc8bc8f401161a0e6d5d8d79ef7badb5caeacfeeec26265c0 3045022023fbcce656163d77a12e17dd051486213c2e9d622fae783b71279170d49195990221009cd3c6b11e02b172e055105a1a7c49e5c7f7a325198c5ae7812dfe8e574a48f3 304402204897b8c68df9ff41e3503a72c46c5cbf2f313c2218fce629250089d58f27105c02200a4ceb443a3256cce7939d74b18e0bd41597008904f8d8d2c2c06d67f27a42c7 3046022100af3d1a000d50688f53ae8a81c254c1c6b8736947a1e3d7867cf61c61421097f9022100b92c344c44000ffc9f6a8078bb5648f27ed4a10dc73a1c2561b6cb4ea35cb76f 3045022100a4fe129ae72437af6beb8131988735fc51fd32e03cf8e15019c840b6c3e176be02206946ae3d4300740f32e0a1a4b1bcb505e424b7d31dd7a57ef594c1bf0aaf3ca7 3046022100d605f37eafb710d9f64c623560db7cc556367bb2b362b2b92cf155c527030b83022100ac2e4d6ea29641cb4b072bb1410c20f43bf808ab73f7cb243dd0fc92b206333e 304402205f59ef066f818f014a621bd0189185f2150e14c45f9ec4a5eabf5e49f46d438502200ff59d300d3a8d2aa33ff868558efcd7745f302b5e3ee81bb0264a3a8f13e9a7 30450220125802cda8ce780eaf878fc2522333475e65b8d615f0427b200f3e668df3c9a7022100d622d4448cb0bca310ff2adcfb68f162ceecf190e101b0f580fec59e271253d3 3045022100e40aae38d59d3bbe9a810c29a14b7441ee85c32efc8ead11aeaa872b35459cb502207c0dba415d2123e322b8b27b3208a1594a41fa194a054d53a76870f4f995a2c3 30440220579ed57487d0b3cd530877306944256f8dbbfce16011d41c5a4a7314896f23870220472a5498e8df80b2a7fd779010d1a2f96229f8f786b15a24db15c551d1503212 3046022100ae084065aa86aea6834e9d138aa5cd5777346ab650ed0f50c19aff192a6ddd58022100b94b9f5ea66d799a30ef04e84743c5daf42b0c477cb75f9716e953756c3f8de4 3046022100be9cc6ef1077164bd3e95d9401fcd00a71be1430b9890f5350f0f77a3b71abd6022100c55806d156f8e6f8f6556774e4c481e25cee2b67a2c08965a39af17c0f752b79 3045022100ad48e7921d4d34dcb862582aedd8259bb4cac08554a02c3638ec5f5672896a81022049e0530913097a7319e15e3269e0846f9ce45235e88f0e707336fdfff9ce4878 3045022070f142455ec9d1b04690e92161db02d3d2533d6df9d060ce804851063918a1100221009b53eaef282adf77123c53c9870418ca91f47d89c1f4aeea794fb6c6772a4e57 304502207e552181b9bae20509f677fff89e6e0e5a1b1c2137146e1ffe540976dde447ba022100ed21cbf25c0c0a9e9d7824aaf1992f11134d29d09868fd9b7c49294d6aea7355 304502202b5c130e6f5277801ee3002ab346872b3093871611e3d0c8b1b8f4729bd5ae2302210085fc04114f9638eb3a0df17262c10deeca53c743dae27629110d2e28ff801ba2 3044022056c9a8bcd682adfcbd989a7969ea8743a70128e86e196463dbcd1285e00ef35302206631fc4b67db13e284e9d66047e62ff30d7b8ad410becfadd77537dbcc7c2f3e 30460221008bb598deac4e1aaff8876d9524c2a4dbb51e8165666455ba2d06c41929888ef2022100c1fee5ebf15da86426b0759787dbd5533bf38a4026a3d9ab3b5261453a574b9f 304402201de8c8cdfdbd23b4f0a537b4fac95d24f648b02e3f6035d5e56f8ca649c91bfa0220273cd385b01a18f3a90b482d05b1a77e54f0e99a9e58952ace56ce6cf3288d21 3046022100b2fa9671c2e08fa54b0a35f30224ace5f886ca2856d36d49e4f2a5f2442e530b022100acb719577dfd4efc7809feec87cfe1a9557f7a92088be02737943f2299aa9d60 304502202a2458412ad31e986924764dfdda30ed691b0c64c5ec18a1c22a408593359f1b022100ebfa4dfda383609afb29a529754703f1caff3211f2b1f25e16428909f1b9ca90 3045022100aa29b53896ae5898a7fb4b1c36332536042ae4e242060182435e427b3a55496b02204ee1555ea9ecc564b87b4cc9588ca968736f98515a872dbd6ce0a7325cfcb28b 30460221009fa45d3ae6a79ceac0328bcd1a2a103069877a87d816a9b65a724ce30c0f0b38022100967cf3dd81f8a77bdc2f671e86b64d92402503c53dba8041ab366bf9900c595f 3045022100a61cfbd3d69bd905101c7c8ac95a06723f8b43b52d3e46f9babe6bd54d8c7c7502207576bb10863b3132710d453c4248cf8a93bb29b0c2d948f9b3b53d394cf1c6ed 304402206de394c31707553663b5ada0ada4f89728a487d89bc462d0871281812c5a3a6c0220273d7892bd7f2ed22d2bd42096e278806dc1dbd9b5c5349d98a5e3a45be5a8b9 304502207c5eb45774a6b89b3a2123898bdd94aa449d2abf22469de6090bad8c1f05b2fc02210093bf153ca1464a2984af43e04b34e09fedaf56bb4ac8598586c597724decae9a 3044022042f40518e9095e5069dda10a5d7bc73de960cdb07dd76a25e75e2450d1cfd3c30220060854fa1d34623e60922a38efcbc092e4b913b6c28e4411ec7e433e52cab53b 3046022100fa1fbcd1224a37dc48ec95265885e54d029bfc4e68424e9e3e7bb770bca1f4f2022100e449d2fe188f14ff893b0424cca15194d2426181263eaa962856e8c7f40af53e 3045022051f101e86ad9385469553b9b6aa944ea7ad1a1d9b509ccb0d4b1d60a72e07186022100a94665d54b76c6da0c9257301bb290824da1f65027686372ec9032e4137b0be3 3046022100d471075a7455b1256f8ee811140d020a2710e21b0c0c0902d0b42b02f9f3bb6d022100d2d3300d562efaf6732015ae01e3338e5b1d72b5f27662ad4c63fa2d98d4dd3a 3045022025cc0b83e3f53f85b96d9defef5054e60696b030801537d1926245e2357b754b022100c29a3edb74cbfa0d0294f0970c9eca9008c1ccfe6db15a3e5bee25bdbf5a546c 3046022100e0bcda3618834aa4799af98c96281f6817a9d6384148cb534914c32c90e83d04022100f0b5846b2ce27b5beb227154d96b5bcd414d3ad1da4b0b312623828d790c0767 304402205757ac3d1b8b46706e4c4a7ab939244cb0ce2c71d464f7ca27f94adafcde5aba02206f014c0a87fbb583da4428a1515941cc5c565d85397770f99f42ec12e354af44 3045022100ae25ae0816b4db27b7531658fd876a23709e4bf91d47d53d8d04e50d01495e54022072f17d7f1cf314d80be8c7ce4593257be7e994cac3a96f1030a879c7bdcd1325 3046022100d3bdd3f3c7cee05d05c200db3bb211bbc406876c41ee0d80f8638597288c719702210095c5a0dfd9832de62ad877f28b8b717c14913ed3d6f9ea7c622fb450b1f5589c 3045022100fae1764cfa507de4cfac8ea085ebc70fec99587de3b0243147210a17245d990202204838026d58b9b411143a5a9e7894e28ed99d5b1d6056f40ff6c1c28b3c749ee8 304502204df11516b4174244fd63dca5c3957ad2cc6e9d138f1637083ddecc14f055889e022100df6ca711e3b67c671a4e7ba79dd56dbc889adb776901397043d809b13834fb82 3045022027cbc4cfe742af6593e1d632c912b868dec019e35ff988a5fd2e468356f35b39022100b48f2492ee86ddf0d80a0438fc412db1d45038ea31625baf3369ad0e2999c0d7 304602210090a86424383042b5f59dbca93653a9dd67ae1b8d6010ab8800269a4913428335022100b46912b2831faa8d0f7fc25658cd6a4d3ffadcc361cbde515a83c7b44fa5fd53 3045022100e308d88d63d1d668875bee2686f4385a7753ff9e5f6e759845b9022c76afa6a6022005fa7c9c1a86af8255aee4f4c68ee8c47b963347563dff32e8a4d16c1dff0ea6 3045022011a77f36e3f522bba5fc02a0b51702bd3f3f4eb1aa43743f813748e26e10a49b022100bc6bc7266aa05c51befa83616749fe524d6045accd53f678f41be8b6197a0531 3046022100ec8366e69308ef63ac04e62338ab8451fb753e245b492797c7bf02016093cd72022100aa6e16ff72173ea7d48f93cf85e8301d89d45b853b834cdf4341f619ccdc383a 3045022100e07b6b33f04fb51cdbd021c9dfc70366bec758b9ee0bf7f31c1eb92ae5d4ee8602205d79ec683ec0d05629e51f0003d117d69deda043800d29df1cb7c30bc9ed5fbe 304502204edb10191f2c83b9b5eee7a032f479c434cce58bc92bb25b0a6d346a8466cecb0221009c5c94189909f5fd3fd3c0ce0dfec9370420755b283026f3084bb4af3635f52e 3045022100e1aeec8b22bd182dc5739605f5581ca5ee7c04d5286ca9fa71e7785da7bdd4b002207cf3b4b3b6d284f725b6917a5a57a67347642d7075d3bda195dde0ebe9916f68 304402206cf806b67f0527db63bf1dbc4d57d32ee88d0d0d07dba6f1ac1a14e79c3f545b022006f45e272153c8b53f07e385bcc74096eabb9da7da693cf13564de28e54e50b7 30450221008b2fbd829ac551eef3ca8d40609eebf537be0446b3bc42cf4a5c1e90e142864c0220564f8163e034cddc45a04a3c0d2d3fab6f2b622677c40c6ca25325cf0267eb3b 3045022100b769a28fe61ffa5ccea5b5b05b8c0dc0b773d9fdc5d658618562f07d09a4acc2022050f67f87762106950f3626aa94691c90fdc8fcbc6b1999a70b018e1c04293fa3 30460221009a29b46fe5f36220ef0ee4c9cc7f1c61671538052cbd2da676b76f9b0ed7fcf102210099a23b640361688912136cc79fb56910b289f6b66b38199d875c738ca15c676a 3045022100b7528357f7fdd9bdbcdda1967c354d200b484539781bc2d714e685b359e95e96022035aa4fc15a65268f7916d6af6bf3bf52abd0c3ed0efcac428c2bbff5074ac844 304502205bb0402cd95bdf09d5b1821ccd9b56eed4f9a94df8dfcbf459c8f0fbc544dd5d022100e9a09f9f89fdcf3551b8409996520eb14db90a38630af3c8eedcb5c5f26ebd07 3045022100873443228b912f9f02fec760f37e9dff16a5c57f97d58e4be37e556287ecd0ce022054e14e456d17ce5dd7c90418f926a4b6e54845a31ad5b8c25f3614304be3a1ce 30440220066900b8d9a2b7df97a98eb83bd361ebcd1bcc4dc453d1c7b700ae12765adc420220478ec3db9ab32612770f55b5b5a27a9eef36e8e56ef4ec69795b36216166cbba 3046022100fa74759925086db543b91934fb11be303d7fffc07cefb604cac4dcddb9957919022100f54d170d2a946020455fef5d4d02e84645a00a1dbd1d9e21aa666d4b5f93d3b2 3045022100b4cb3246261545e16b50fc7c6a52aa8a42c4d2a1af616c091966cf35b3f1323a022019634705c0fd5aab2429bab6f5f8d495b2f6dd8f3a6d06f38a1f304378a66f22 3045022100b75da4f02c6594efe44445fa866efa7e42903dd9a9d9d5fadced10870ca0425602200dd96fe8ac02b8165e5235be9635190ec4fba70518a3a3e37e317b018c6cbe6d 3045022100ce14b27e35bb58c1f78a5c2eb4567fc7232abae5b6d4dfff2b749438657eabed0220084b55bcc715177c0449f48173207c4f94f84656720b943538e96398fedf555b 3046022100bdf390422849f79afb11cb2649fb1e1fa93380a943eef80ad4047c3f0c06189b0221008a2dc0285fc205d53f5376ed1761b9eb7898d7fa5c8550f716bfbbfd9977e97a 30440220278123b980a26020e1fcc9d38a4332748c26b2ad8e136ecb287a569d09adbab0022052de4fa6e082a72e9164f11dbfe3d6f889e1ff0973ede495d24a2e2e2fb67778 3045022100f90db2968f688374d2469890bf753a32b76fd972c4a0c4bca42b54e0c4c79e4e0220209bdc9b957262ef2bccf5995829feb47dae001705d87252ee23998948d3e8bf 3045022100e7a3110f32327a71f770be8b22bd5e54b0e5a5925cc88a3e963138087eb79151022056eaaa3f04126c1ef3af7d6fd1440b73fa466318ba447ee5a1d43909858d0502 3046022100ffeb636e9541c5b5588531dd6d65b91f9fdfe69c8de82bc36f3f02c5d48ee7b602210087ec953582df837919b0c82755a0875b1ee4765d8f7190ff7d0cf72dde7dbaa1 3046022100e7bc593f8df51157e938c5066e9e13cce9187b43eda7e1de301d651d5f1aae0c022100ee10f9038e8915c03ff96f9a0dcf990dea77ffb861c2ac000224deac45a16446 3046022100969c895e9a2ca04d02819b7b8e9e923a91ac5a2af37ad02a9fdd47d55a501292022100e6f0e697aacfe6e73d433de29f0a4149d1c0ab8fa0532d5d65d071a86d0cbd09 304402205c2493d70a25a2d2e48cc9046926a411e11530f75ccd3daab394c1da16f15dac02207b97a82fb858a07ad940bd2d3d68637991e654273b3904d0b67116bae98e69b2 3045022100d69224875174a8b84b2eb1b724d7dd31f7cf84544cb2b8ea682d518d506dcfa10220133cea2f748e4ba91e987d26cfe706b05809fb1d7577efabcf298712c9d2e96e 3046022100f5bcbe41c2cb5653e4691a6de3871cff224d56bd1d69ec01d913148b7d91ed85022100f0baa9fc6ffd93c39bd272f189052acbac54d21d412960b20d43528719b78139 30440220772f4d86b5cfaee26516cd3e5ee6a4243b43568494e76d6c4f3d86f3eaf3b325022008b45b9c6f86995248c52215548ff1b432aa2e500df36bfa3752c4575b17b0d9 304402201bcc010672cd7eb5a87e307faea4343a7c07fc538cee551a8d3dfd218ba13a1a022032c8348fb8f7d3d21da9da60f23dadf6f454008eb3b6aea72e3050e7ba6b69de 3046022100b1adf120ef622b3a42662b4b807113946e687b6ba6d3f17822e5d5f9f0333c48022100cb730d9147fc4fadbd8d09bf5f4e40a60cb4cd1518dd955989f1c8aad0b3bf67 304402200f8eb49545479acba4f770ab8781264d99f07fb917c827086d6a0a1be48645e602200b0432441cdc0addc8e244c139d28471062c7aa1ac87eadb04fc7f5a8eb4997a 30450221008ed052c9b119ddd994eff5fbf113a92adcf809fd91c180cf65ec096d22aea5860220627ea6449e061a0c92d71b7ff9109a709d625e085cc3160b479271d4e76952df 3046022100e8f35777ccf564e0fa56c7395e3adfe8e317d21ca3659c66107d8c592561f69f0221009314031115810e16a021cbe29098fea6b3625d235d8adf235c2078ee2b1f37e6 3045022100edbb76acfba67cc258817ad236d920fb163f157a04058f75364240920d52acaa02206434016f9d6f609a9d0be12258492536c62828ff9058b74f67111aa80ecb8bf3 304402200f55dcfb1cbc5b09ae287bb106142c64c10dddc91390cefc9a3977db0d7f5307022052004a1a93881b1e329b5634effa5a5aea3e94012d83ef6a9df2e739a72cf67b 304402207a797435f033770294cfa5eb3c550992429c7455d01386b65bae90c45f640ce7022039eefb7d370b0deee95f3559dcf1f55e762739194748ba793d6a52fd80616aab 304502204775e1addf2550a1b7830ff71d5b5ac96b19389a1baef0e8e0a1b14c4a915832022100cfa101564779473648a59ddbb279c26ef1954f1536f14ea18be5143413a7b8a6 3046022100bfcbc8c2dd99f2a7261ac4499e7f6e9e823bca6a63cc0d1fcdda450fb7d458f4022100d3089dd7823c8af76edd277102318b3ba7783247949b0437ee214a125b4900e0 30450221008be8b70f3273ef6815e6b89fcb0efb7908a0b2ef4585928402365158c04b287102200f96a6f8fbe2853de5a51b40a7f83608f5b88caa64afc5005336787731364a86 30460221009e125df5dc76ace2b543fb4a1dc7fa5f6920d30bfd5cf4e1f7e8fe2bd8197b0a022100e1f5f83f780f4cc27c34691a759a0e1350255139d0a8647b27dc2af4d4049d4d 3046022100f68346945d87714249808dd534f3d2b347a0c469a88eb0917c240219b1d38edb022100ebb38eb64d1a41f37037cca8c94a78fc45f69b1cdaaf726472358b32404ff8ea 3046022100b22e6cad7897290346af2d5de472a5709e17f107f6b75edf0da4834143bf6ac60221008c09227d3374661bf55b138a7c0e3844c28b0d43b7925df3b59a9003c463338b 3045022100b70b2066130b8770cbbc295b41d0aa8756af509b6fe64e9a8d5099f3f3c3f853022058fc66a0d6bac31c0bc552c86a873009938de7d75a3f15208e14bafce76824cc 304502200c1bfd4dd9edc330f12dd6cfd60fec3145a3790c519fb2a8ec2860b6938d80b6022100ae487a235bee20ecdea03c278c210a18fa47279066df4b2e896a9caea078ae9f 30440220122691b12b95f485c2b60123d43af166c150cfe6c08de257e22cdf949397d9cf0220680ec3f23b6d09f3c380865b9150e04186248f488041e7e6b38597e366fdc067 3045022100dcc5767fc9555c7b32595ea15bde8c3bdc535d7a73a678e8a5fd17e473735523022066c559fab9dbdb01a36a6a283f56a393a8afaa60a43aac6cd56956948d64cf5d 304402203da6b7e23121cd9a12461ccc31b9b799b6bdc3d28425e2a6d6f6443e2f129dc40220616304975135c9a35c37d7c3ccd0c50247af7ecd6b69fb6d1fbb9aaf6eb793d7 3045022100858df1054f96d5ea3fce36c844830be80d67ddb6292279bac10400b8738c3dcd02203f3005615ceab3d6685a1dd2c7a5b4cbd02741e2253ba6cd8c1c2bb88f49c12a 304502206c6479b941440215ace6c18f4eb5983f77ac5f4bd392b17c49e541dd68b02955022100f86532b40af88ae4c532ab25c69806c69d371dc00f7e0ca43d0ae776f3aff11a 3045022100f7d2bf3a19ecd854fb91c9db53d707f9be9f4d53fccbc1fde865d5681ce1e38002202adba9c61ff23f974cf7839fc04fdf5e61bc436366647c23581a0ab62894ce86 30460221009e8b62565f845c8da2e36b10e73a24b7798cd7c1fbd3d682c6534b10ce2f328d022100dd707caac577b7c424ff9fe6037d406cac3b4073bf09faa130adf083b85a6fd0 3046022100bcc4f561213c8d8f3dc30876ba62744e7bc6ec94b5ed4bd7af7fe09055811ba5022100a0a70c2818242b9f8d3cb749f6f7137f38d437d40ae90a2bf8670330d52c91ee 3045022100ec20e7ba862399c900ec816ec80c6eaa9fd573f5d7fef12915eff19dd5f7b6b002203336b15f979801f3f02bfd42308346b55903632406890d4e30fe1e443b366ac4 3046022100d775ae75111f52637a502cfc899d6272bc68464548a55beccaba281c2c038dc9022100aec0f70b4cdcecb3d8f594f907f29f79cd59a98f951859e7ffa71a829304d8f2 3045022100817a03da155f64a952d751c468ee8e9096ff9f90162ff3950eb8986fb0af2bca02201bb69db5b57faec59992c3e75de939547c73396c3920eea2580be68135948dba 30440220754c44a4a9c8e892bc9fce63be9d6fcc320c448d096d28f177d7c1cb4ccd3e1c022077548a83b632c709bff8816bbf6b946e4b430ed3c2329c8ea0eb731f0dc8dd68 30450221008fea51b89b8f957186668c6682137b34f49a6565957fe7afcaf787099fc3be0a022002eaab4b9bf52b417d95c1fbfd794bdc2f07f892b135ff91419ac79149fd7a5a 3046022100a720e4a6adab78ce35d970051feee045c0c44c7cd61023d21ccada04c6c77eb5022100b5a7109ccd070fbfd562204c598f8ad0ae48af03d50eca0de7b72efe78f5817d 3046022100c7f97c932cbfa5301fcffbf907e81d5ad164fc8eac53d25a8633fda455a99636022100d6580e99fd985b7486bf7120ba16679a8002c3bcd540890d38c5c9b9f4937b1c 304402203ce4cb5e5bfd320314af1d76371932fc8dd8452d5bb46a8cc393293ed30379580220784432c703beeece08163c27aba9d989e18190db94b5d012c1e95b6f1bcee68a 3045022035d443a71ccd4e90a37e85a0471064def3a2a32710cc0d249204b61cbefda7e3022100857ba12045a68cde87155350ab1fddc0de67334482e59c441282bfe9350b3e38 30450220794753bd6222c4521b9b7370aeaf0b74476c776c054ae55f611bd7a703b5f0ee022100adba8bd18efa7acce237f528c526209c4adbeaa4454c22110f267997865a6686 3045022042332446d4ef37e198468ca3c5ddfd7ad31005da8a21b35b502c0e5a41bd8eab0221009ac6a088c8be811601583bc09b983ab7d37d48355cca2fb8e19b206c7e965599 30450220564074eb291f5033aab4093d27603204ea421cff7c887abed825f43489535177022100cfcceca44a3692a546bfc80818fbe860c0165fdc8a629b45db0ea46a522864b6 30460221008917f4c321b9e0a4d2d2ab56720977baa0b111b0ae4efced60d8f1ba040ac2ce022100f70c26f13de4d735ceced3fcc5f8fb9eabb67002ed0bdc0bd885d9084cda605a 3046022100d6c8bec5627ac2011150fa405d24834cd39cbb2f42502e99d6a9e6b415bfa99902210083270d19fbec6aa12a7ef6f730c6c96168bb8fe8513c0392a58b939c91b1d609 3045022100c7ad2e1b3734072939442bac244266768c3f8f22b999e56f56b7a59f81ce0cf102204f88a47986060a6dc9b6de89ee8cb3fa325dadd7626f51b0d9feae931e54070a 3045022100b63ee182ef295d8a8d7b2eb8ca532aff47302c129dc5392fd79f1bb7886bf87802205c895b3b1dfac11bc724646cbaef5804b82304f7084143f243224e2ef1839d3c 3045022100a28d4c1b8a0aa028c6fa579f7f48ddde020fd9bf9bde4600836612087185115f022069cb3d6b57d60731bbb0e15100c8b6c8832fbdc663237ac0656fb1ead8710233 30460221008f3d58257da9100b9ef62313e33cf19c3bb88f2703c9680479d99e2d7b8752c5022100ba517ae41e1210e65577f595b811908e5e09de9b62ac07bc2e79740353f6bf90 304502203f59144a5054ba7f59c09b8c255501f67204390665edbab6cb9f97f37eac75b9022100dc9a452eb198cf4a6874391810189a39808b5a00e9f009351d73579cd016ed9f 30460221009647d336175460254afb017fb4255287b11a09ef0bea3611de364985296f1bd202210085ad78eb05923734b0e7606959c6573d5398f5ba19ec350e1dcaaadae2114b68 30450221008806fef7f1f38cec6ccfed8b862ef28417e814c442bc1a7826605e6f11e17486022003a9b8230fdbb1048b715ca6d73b5f6dbbee9cf9d3f1bd8a14b2219976bbe4ee 3045022100ea6371eaeec77ef1ac22524b40d4d0100c06a66afbe2414ce3abf41bea8f3444022053e4d15b3878aca515b786278d9eafb16ed2d4e63762f8fc408908faea04d108 3045022100922637a399f082fd9d1e981b7189bdc5f5c7678f9e077aea3af85f3bb107199f02203ad862068af84612b74d36dac02e2366649fca7a60b4041e4329ececa303e141 304402200f2bb100f58e8c9f4653e936f08fedc5c364addf77a73fd301be4a0a0b8cc22402203700d160a02d7aa54f75456395d58428f37d1bd414bb3a09175c4380fe10bdaa 3045022030d6d9d48b6c07ab1dd5aab61c9856dd5d64d11d9c7fab2453ba93904ac0e6cd022100d653b29b81e4696ee82cb2487d5590b5d259bd043c7741b6c9e3c965ce9eafd7 3046022100a875efdf50b28c8f6eb8da62a3e8b343902bb0ed07df323562559900e5530c1e02210083b16634bbc7fa663e4e2c7ea387398b9981ff21d73912b091c109ef9177e737 304502203c1862304df704e7dc14be3e0cacc579482a7b60675a778449816dba16961be6022100a45ef206ef2c7a083f21921c279d47e09ecdd3eb11ff18f2266fb9b6a3b9b25c 304402207b5663578d74faa6721c2150d0a01d458872780b17c321f4dd246819ec8b1deb02205fa009a8e097faa7c4d0549e47c2135159688a10606b212dba35294efa73b025 3045022100958bb6ab3fe73536478da12fb5eccd832a4c267202eb1008a4ed1e79f688880302204302c2ac8dae04e89bedaf32a77139ebde0ba066c62efd646ec7470281b44b29 3044022043a7e5c50c44e8d07040b75ecabac6db2fa4f80eaa56521a371064db4514285002207daf1bb23b1354fd04ce804ae331090c9e4950b80274865af3f6e6d8e8eb59e0 3046022100d1c3127f701534dd8acc8a1ee05281543f6cd59a97740d78a87c38e95b73107f022100bd49cdc3c44fd17d40bbe5858ceb390e191542f5949f423fb36b63a5f69e2f76 3044022009a51afcc9b4b813652f63beb54b8d2ca034bd40054ff282546b335bd0d46fa50220416bac331e08851f29f1a389b593c3c1da941fa5497fa080b5b35627d02d06d7 3046022100c3737b7ab527a3989be29c604f841d667aa22685ea03b4b99cd9c4ada7bce806022100aae33c8cc475ba039d91f7bc4143d4cabc0d92f16e9d90249c0e996f51dce914 30440220523bd91d57e1a79b1e8d261840e569dc27b1fb511d60c970b61a62c9a53fe6200220697b3995ca34c5079365611474e38c3fdd48e650af7230961e37b7107349c96f 304402200dc145bd29b68cf0a43533409d2c1a77dcfb27040237ea40b5819cef8c78e6840220211f87240d47f930f61b9ff57669bab9b6721118527f09ad39901faf941b450b 3043021f6a99ff895656a5757966eea5a79e1d672b4f7012ab287f95b690c51567000202207aaf8f77cbbca2cd27011ded9082940fc123c22ef2f197d81d51ed2e4b837eac 3045022100fd212bec9003f704e934b382cc10dc5983792a68f030a8c81117aa06461820e4022002dbb773e8b201db2fff1a7d971a536d3f6ec75ad82c58782dc48c231f301533 304502210089a69099a3d434fef4b7f0219c0084ecf924d0e2bb45f7b2d11843fec069611c02200d40ece489424d3c3d270d7b81879df2b0b3c1a31715c52a5dbf4846aae72832 3045022100efe8b11db41dc01a4001b529af4f9321959c1fc492dc012f0bd91c37fda061be0220299573fc300b72195838e978bc07253e49f9558ce9ce23bfe1c489c09da0870b 304502204d4f3b04f1fc0d274a069892d70b52f60d8805ac9a8a45a491015dc2417c3534022100e4e207f8e4f528ba4fb851b88bfb3daeb0bfd4a7eb8b115fdfbc6fd1aeae2390 3045022100ee0b2534cd82d1ff451d795694bf60f85e6407a286e633edf028e2f487919e3c022064f8f66fbd3ab26351e30f2c637a6d7597cb653989880b0d14e614491138d06d 3046022100c1e5577721ddad2068e9adc3dbea0c70aa0fba9bacf26c2440a5a10d9f5773bd022100cfecf2f68d0f1f55605285dec3549b9356857eb98e1505c115ad0558fceae87f 304502206d91a9b3c668714ef158a020938bb3ccabcf6e8f75ff78e22e3677f62751bdb902210091fc7e00fe2dffe4095c0e1c504f34f58acfd99fd3389f7df5e21501741478a9 30440220692d7ca643de7247eefce8b87a5c2a513e1b18c0adc0afec910c2a24fca09b0102201a27423b64b7fe3b32f917a50ab6d791be924c218dcee8ca752d5b8a75e35a6e 3046022100ce5939d7cf4bbd404e1469a3dc01d0c9a5835d2593c0b366175022c2556064e9022100a2bae3f2afe9cb2c24fec5f96d1f1596f5d165400e366a669b70384f4b7da5e4 3046022100bfe62d20d769a5a3ae77ec68fa27b22d53b7efb1eaf04afed82faf04cf5d935a0221008d1d38c1a01e7aab79480d38fbb4b52accf615219efe740db3722372c147b94c 304402203ebec10bf1e8a22a8713fc218ffc8a47553d98d236c47061e02e007aa048b273022031be32ce3a9b3a43d118884b65be896a866cf77ade0a13beb6912576125dd848 3045022100a63682e2b9c350eec82f426bc85dd46f0330b2a4225b25dda6207995360560c902204f7cfaebe312aa02471e666ba20437a0897b589f577c5d85f69e8d6fd0e593a1 30450220425107a53b3849be4920ce1e85bc04c80c8e81fe30e27e820a39ab386fd2eea9022100a881aa8fe393c5742b9158d5cfecd6a2b8ff708de5a557850c5b27ba50faf69e 304502205dedbe03628ba3cfa001a95d0106f4b7666c238e159403a6a4c421c1a01a2464022100b30889fe533ff5f8ca8d831d2cb5fce4278f7987d7c8cb96ef4f15530195eaa7 3046022100a740cd18cade7f38584f5933346db9482ededea1b5cb92398de1c8ff615b5080022100f713d0684c61292271de9447458fe1c0c4ef818b97c74dc5cbf6445502384889 3045022056b23e4a0b847ad46b909cddeafd12493d88eca106a98cdfc0066627b2b5b93c022100e1c5107048f73e6432a449968bcde9d44dfb8b896271e2bd112f72b72ee62f90 3045022100afb5d5a36573318894676d7572e70203ba82fb7377c71662103e5ecccc50356102206875db8f4309289a18c1296dba16d4ef0ddd80f33e700c2d68b009ff2355e19c 3046022100b4b5b62bdecf273ea50583633d4e3dcf3cabc56c21b054cade49e40e0048237d022100a702ce7f73360fffa7c197f4d2000c1f33b595db8ee89c327760256e8c435e9f 304402201acd948e91261e78897ddc40b7112b490c0e98a3c4fd128e3d9dc1a08de06fe002200db4a53f7a0bc0db537dfd3869f2e8baca0324d9694a5935d424da7a5b2bac23 304402203347874e2776cf8afd4277a7d185b3aecd57d9d9947372f7d452622556c35d9202200323600db9748d5300351659988c6264a6569891272d1935d614a6b99485cf6f 3045022100b3b68cdb597a3e30f452b8f90cbaea4a5b687c46bed3ec530c4235a4815b4ae102206f9ae26e3c4c6f80f4a985ec3f813a953cfd3218b3084f1edd638c5c353b5afc 304502202f94af7c9f6f06fac4b5c089b5dca1a5bf5936298c81c645894622c136f8920a022100f2e4dab261d6600ee3e7128e1e5a8b293539508e267b858e5ce7c12127cc2431 3046022100a6819c210f98bf52d1782f03a9de66650dcef7e7c162736e5f30e4aaa55ea5b1022100d1ecd4bf551963df0de8945e4aca2260a1a00cfe4d9911f47f11f66b414a2362 3046022100dc0264af54b95b97ad3717e514d459bee02fe07051c80da0fe1ea69d6e1b372a022100fa3ef8eb44192c629775877ff13ab67ed68f8914b20200b5fc433bcde588635d 304502202a9ec8e368f7e984dd859ec797f79ddd8f8f37dee2b4397633367486eb5b9f5d022100e9cf42b9862fab368404cef3ed9403cd5d5eecaf9e302987ea03a56b4010493a 3046022100d68e80d2b0c61e9112ab6013cd43f4136bfbcb95b1d1d1f090dea4529a9f2b4a022100c1874ef7b901f87e136ca883eb3080aeeeb5bae914188318b6e2f7ca429373a2 304502207eaa9d923f10e9365dde82de975b192a01d9b1cfee50bf76642d854b3af009390221009623684c6f31d1f612c2f097a4da0d2ddf740805d2c92482c0681cad01f7dab7 304402201f076f9866fccb4b9902a1fe835081f78b70ab7bc42603a1b4696d84ddc2f8bd022042b43d22b806e84c6d432fad8e020ed5e58d6975a2607bd16fbc47b67ca93a16 3046022100eadbbd7b5849a84dae00cb1dbe198a72005c42f1c6e102df0e50cd2015ec3492022100919c9c7ba622d9a818abae8d83426e20dfac05d30f196d97e72a52f66c102290 30460221009bd8549b1124e5541b432a289cda552a5217f66517d095fde3d861415ef1dfff022100d40868a89baad97a9b5542685848bf7acbade01bf18dc06317d9f13405302b0c 30450220481b075798f7eef954f6486a75b973abc9bf932394b42dbd19200e30509f4cd7022100ef91ab1d64b818511945429e72ae816b5638efe141d948c6c0d46111b04f04e2 304402203e3225f3df71c771c8e29714cf52c57864d1aba47ca363a304fff00ec8c91164022005d205efa5aaff8478f863598f4196681a3f471be88f66a343f06d2fa4e06b79 304602210099a0793f694b354a8f855dffca0767110ee0955ca3d228ac8cb986596d8513f8022100de7932f17dab9f0e84860e2757a82696023379dfc4cf58fae04414b064dbc20b 30450220189db92b7bc2a02c3a1da608ec0e0e7df25c0796fe83b559320e8b8b4979deae0221009c54cb679463a4c0ebb5bd53ec4ce4e23f23a8c9a75b56ed54774650efb0a143 3046022100cb2a6b6508590e7b410f824d8c61ea22d75328fefd51f6170ddd367561aaa4fd022100e7624a365df6fd82d5538478d296631d3b4c37fe81f417ef3252345230bd83af 30450220433168fc086fb3af5097d4d247b8d8e8ef0caffc8778c1617eed71af22915dc5022100b931943efa21de92d4afe3a2f2f2304f4bc63e02a39e19f1fbfc0cae91823797 3045022100b2d33a46039583b84e23b63c5d1cf5f30728631c6a6336a2bc79a62c98cb49f502204ada58e604e9724c38fc7405391e573f3026b0452a0a82092dfd4e42ca4ffef6 3046022100f24dbbfe086c097534f088fb8760cb9321b1486e7a5927e7abbea281c59d4ffb022100c36db0fff570cf65835c973dba8121d7a70568b0dfc39a94b496b215721ce722 3046022100eee54057ed32a828d27d2bfbd0d1b336c9415f278fb71568e5ebb5839939bc78022100cafada1a4cc278ca17f1aff067f9663d672488495d28feae67ec7bb70a27f57f 3045022019d19f330616206a9f93eafb0abdb9740ea11dd322d111d805654381bd649c70022100af06bfeef33c3abfade9662281368afe815a1df0bf27a51964f38514c1af852d 3045022100a90c3627b84d73e185f70e960f254d5497de0d268a303979fbafe302b525e8a30220570888ad92da20f55b452d22986c4547b5ee7db9f96f6c9a0cc5ff1a1a725330 3045022079730f28398db374f5b38b96e9d8a6291f2e76486abc09894ee14380f8faf20a022100e086603ca4281372f3d6d2b5ae42c020a02dbe07e763e927b4ded9f1f3617faf 304402203250b9416dab8f471ca59de157183702bc8288f7fb29b11309f2ff2a7a147e230220724552e3aaa0eac6861e830c1a926060577741f55c5f2ba6f7dbc52a5c1da171 3046022100a1b9826a99c6cecfcbdada2d79949b9dcc4537f469b69be14c4bcaa3dd12d48e0221008d5edc8e2ac60b19e73c3053d0b9e3aacb88bc766c1d5ef96cf1e06d7561be81 304402204e9ca7d9a3de175d7c88c26455d7abbf9207d106ad5567009810ac35b79e966d02201609d9a37bb9f8e3f0cda915f02e4024cefe95dd0429da756e252000c6e1925e 304402202b47cd6ccb4f97605eececb5c22eb9ee263b81bac8c49ad1a082dec53615a4ab022061bc687da0e077cf8f5df05ff2586bfaceafa5e159f77b47d2a69c9de5941efb 304502200bcee488fec22d392fd8b76197f37f5c246fd07558451d2b87a0a66d9ed04bda022100d8e2e74f1693552c97350750322ed5e0d1e6e2e47b05aaaa8f999fcf965346f4 30450220338e6e26a1e2ebfdb0e54867968996754ed2a067717ab0fdbd81429e3b4a061a022100ae099cc80b07df6e295deb17432805b823c4cfa8b110cfadd6234581577313b4 30450221009ae311ea773ca81d7f5122f448c8e0e1ce6761def638a1c0932de7448e81cf2b02204a7b514738c5a304c1d0145876fdb29121d40f95d94d552997dcb4497b94600a 304502202c72227531613edd77741b221a571a7d76e265ba8b56197c77be7ac5d61bc3ea022100a40ebc1a0e1d733d10b27df5e87b442ca8f58f13623fc0cf17716fea71bdf394 3045022100b599e332d88d145d1e530a5429f23b7b47306aaa2b4ec0a91692d533c24217a0022013120af746ee8ce6b5340c36ab08682ac285e0a488bff702e79ea0fe627d8c10 3045022100ff509454a7fda3c5bbbc0ad8894d6f6fc85c4174307ce4a38c3bb90b08cce2de02201e0668581d7937f00e32404eb4295d429bfea3b14ed9af81384e4527c9ad450b 304602210080bfb3d89e0da25b13d028d90460cd13bf5824dd53e2aefaedfbde21fbcb720a022100f746ad6b6094df888069f10e80627ca33ac567e7f32e7b6678e82b560358ccbe 304402207692b1f8b7e152e52d8ec1e4abbe9286263005a65085eee27fee68de55731e6102201d9cce5cc97b3d067546deaa48b4f82ff4abd92aa09f824d4325f3f065af6440 304402200524917a4814d01af1288581a4cdf222e7c822da072e4da0a094f41de228057f02200735681255b9cb282a5bcda9e131dddf029c7f900a683850b32d1fb8483fe72f 3045022100e8a647d5b28eb95f4b0da8e3e8e6b28acef95710933673b15786794c9265d46e022035f1e166e389d10910683ae066e7548d53c2ddfdf4d0c2c39acface5deaee143 304502210090da88476b6e609482fd7c307fc9aefa9e4dc1732e7f6352bd325d66697a8cd4022039776ad31b634a5fb1c590022bfe0c5e34a911c0462832cb8884530bf0ff8224 3045022100bb1ace9985dbac4e9bb3b3e4a9ac10b2abd289edbf7fe3f0e8d8ce0faba89a55022060a0e9e9997675774afefef8d46a470d5a0e793fb3e9121a36109b9506b14d7b 30450221009262bb79d7e0777a2dfc28d6dfa34814c7a1807529fb68a269be61ac0a79994f02207982da588fd6a1337e3a45269dbb8aa89d95208bd269639674c0f840339ebb46 3046022100b593ef988b63d8878249af428044f766342a3b00cedac97d8476dff3c3d5f864022100de383e64cad7aa6a9a7ba0a1277afeebd342ab109c6a605fd74a470f76a3422f 3044022079e1b7b30dce2a40ac2afd01c68f6e2fcc054529a9bcd334d8105f7e393d694f02200a8b3c729b45673f1412e3e7339e823e5af3d33b8ba42c55aa189c8df110ef73 3045022100ba3d41830229b654edd5f173adc4a25c45dc5f03275319087da9643e40220c3e022025beac2fa57154846a01b01fe1fc2af14b8f4d506f8c9ff25a32fd7b9c36d679 304402201331e7610009671efa3d143f23731ef0ceb7a0198ad749d2c270dd27f038a7ca02204b1dba2204559b108a139a4b61ec1f2959962b8a46334b11ae59e6312f7f598e 30450220306f57dce257bc042ee6de0d82c99b0636f1b3d6fbbbad5d803293cc31ef61bb02210088377ba52ec6076951257e79eb51f084ceae5c30a8290a1fc646ca09f7729c87 3046022100f515632488a2bb1f40efc9819f4db02c0ec138ba8c5f76c1a5af30902fce7d92022100bd08f2771eb6aca1362faf50a6a51272a0664d9149b9f1b8de0d99eb07447e51 3046022100d1fa096b7fbf94f253906c5bdf3e8299d8218b9ec2642cb1de2d85ee478e2240022100eb0d4f4074d2d48b43d76ec396eb6a2fd00e43c3753032347f5e20240d9ef03b 3045022100b6e1078de4833c3dbb1cee69e7cb1e8e4caeb0d1d279f2b5ae2cadcf00f65bfb022019ff13d6991b5bbfa6306c4ce5025669ddc6b1aa0a8e67fbe8cf7895f69187f6 3045022065189fae57a44c21faf94fc23c8dd6a3ab4de1525c4fa2c01709f23fe122d89d0221009bf6c4588a12bb259d5f5a0aab8aa33b15db6466d5970494dfccc92b6268187c 3045022100d75ba2dc0d9067dd61b19b00cc008fe42a55b7f6472182953f917ed751351d8c0220523defcd40135966092dbb38bee4f3f0871cef70f79a8704edbc4475edd1aa64 3045022100d64dfd7a294a57a28c28be232e05e75b45349ba7fdd2de8e6a940735f78af7b6022066e386cecf77e5b6d945b27901fc8cecb458427213954cb41c4c6599642d691d 3046022100ec46ad51011a5ec7ec4ab02ed13db2f125469b85061702e70ff3a032d65edf8b02210080e19b65c2c462e45a6f9c41a6084a3fb06d84744e2a0ec1f8dcf754d8c07576 304402203a5195b030ed649fc0fa8bb658abeb652a00031c795cf636db6032123617f8710220481d025a1a988fcee249c0495566d406a01309d1394f18afaf03bbdfcb327cf3 3045022100de6191436efefb268b33fa6102492fd87f291db551d0641b5f9371943e7992a302203814db5db4c2768154ae3e177aa4184537f95fac7914a5e526bc4d3da3d3c2e3 30440220058c5628de313efa53afcad973d847dd4047b671cf49d33a186b46a7c8b89a6d022046e79163c2f4c619795ab1f159486826d13b058d3a368f66a0481d88720f6831 3045022052ae86ba081d88d382e32d48886534b46393f7713c35fc7494fa0a226190d4de022100c8a6004b8f72f73e9c49b126afebbcfaaee4170589cf2afb2e96c1b93aa40ff6 30450220450aaf19c416949b41aeba77702f3e32d5d8285610cbafdc03374fa09265d155022100e1ee1b1ad8fffcbb72a4fc01616b29ea2566ebf5ef8c15cd9556c6fa52748262 3044022021d4283ba6d7c198134789cb803e9b80df0aa8f3d65c26a8913b077c21cc97150220266760ad30094566ddd7e51010e1d93a5430107fcd511edc8375ab19bdd2499f 304502203eb5524ab8947e2c552455d6dc3b3d5e26806040266cb44e9e8ea1a15644aebd0221008dcbac635216bf9eb33995120823c55a260a5171bcf72f231560103a4d81564a 30450221008a308bad3940c5909fa2272c2842eb5073cdcf57deb5fde452275e478bf29ddb02205e9530e1d217167b60eef220d3f641c6f9164b5d7e6931210246fa8f32dad95e 304502207573657ec8cfdedc1c0bb0f63d9c351c9a02772f5427a3c9c46b6545dfbde6fe022100c2b989e7e7845ce6d647cb4e901d51922634319fd7c8743d1351585a1ecf149f 304602210099a6947a545079b3b40973d16a3583ea3fc49fe40e42044b715c5ee27eeb12b9022100a08205d1bc854525ba59765d2889a483d82fad70e7a3ca65a11d2746a148285b 3046022100e0572a697634f6a5ca6077b95f0e4a0136500c8371cf4dff1291580a364f7323022100ea37be408627fce709d5b650b2a4e52369203efd7759f3a3b3fc35a76230b0aa 304402200e22bc77a3622417d2ccff5ff3cfe2616633f4d2cad20cd0f142972e6a484f1a02204ff718726bfac99dffe829714b9d9ec2803f1931cd24584049d98432d200fd74 3046022100abba721e67d2ac444fec34ce1d37edef984785660e8ac494c05a875ae2c9ce99022100f9f1811e4f2ac55564c6f84f4d9ad081c5f60dcfa882e52ea246d290538e4365 3044022004c43028074955f445da0593a762fcac108892119d3de8d0ba97fd06cda7ce8002207886efd2f1c8bee0669734de85bf96843d63bd0ab9978a1ed1dfe3c4fdb1d2b5 30450220738fe6fd463045c13c2e2403b9722bb43c6708e939a83a9b5146440d7ec85980022100d8a1567904c42de74d122b07226f3e5b4d6104a929360e0b0f7732e87fe9f621 3046022100f46a7c974f5a2acb596e945254682411ad4614ff1077a690921c3e43417d3e72022100bd717fe0fb129f6f253f4fbfba9fbf5392427cd65b7b8a2cb8c25e7b9b375cb7 304402204846996f80c1c1c88602f12b6056366ad47683776b1068453af8d129dcc1d93c022059d009d66064a83949d5856967873438c8a80339d0bfafabbed96a6db1eba519 304502207df7fd1f2d8ca256ef39511370cdbcaa728fa4994a66226e4772e645dc7c409a022100931316c3ae1a0783b69f24ad501d55e8f4738de5ff4377b9f9680f05188ad182 304402203e519053c489d7ce10f404637109e1c53b1043be19a51b7f044b4738c46fa8d102203a51e351b80bdfb14051adb36b87385ed41d0baef57fcea0abde981155ac3f1d 304502202afdeebeab12dffe385f209c1aa94b983960973f768f5ae7c993828aff057bc1022100c08e22c5670df8bb759c2a9cf3cbac3e96c556edb51b31e4798aa2f6c9d3d445 3045022060a31a25d9233d8e9b0c694706d20f08f9c3a730f078c5b85b556973e984006a022100cd0ac07802a8e6012a9cee108a8dba2d06e10f4b28d9e44abdf269d85472ee0d 3046022100d672edde9f0ab9030165d9d423fc51c7205754db42d794e7b6a4f487db5383730221009069ee275d75635ce469458ff4a291a8c0fc7624335abc42068083b4197fc762 3045022054683746ff35e4acbd6f8a504d7a0422c1ff41c0f172a400c688735fcb65ff8a022100e8114618bee3558e43bd9510bd7a3f04f76286245ca247e1f7b4bd0ec42590ee 3045022100dcfaba81868e3d20a7c4db5801f433dd2ad1532e574960760c92254f013706d30220229ea6c1104a16fbdf52bbd5256833d9af2f407db7588c8874fe5ce8dbe41316 3045022100c1a67073c7c58d7fb7e43c630e9b957ab8aa00781c40b2f176b36d7b0974427d02205cdac317932210095730bed6ab3af0ca2538efb5267760342af1afa0ccaa8512 3045022100c2f27e697c66a4fbc7411c5b852d53496467991ea20741d77bf24a5a3b6daf2a02207f81b9ef75f131e74eedd23028defdeb8f17334cb0eb5acc191a83d6018e9dc1 3046022100b811cc6d9ae6dd3630c473d35ced7a77138741147f640fb1adae9af557015a1e022100c218391f5cc49c8fc2eae8611cc35a9420373858c2df240a91c8f69a530c5981 30440220416fed0cf3615a875ec792e1b7b84198ddb0dc6e53b66ab1f9cea20be9f637a402200383f4c320e7466431820d38b32fe34f4837d904908f3a45e2a4fd62e8a07f09 3046022100ab1eac1b9665fd6dd92e30326664b2bc09080a617cf47fdfba301b9bae95686602210090e805e42f27bd489ad5ff824323af528332ac50dd569d15ed2e5a44fc558865 3046022100d4be3fdb42e88583e7d4f0e27ce2310de73dc488af00c9a6aef5169b8de85a690221008d770ffad95f3f3c2a4d59f52017bd9a3e1e59e9e9321d48b751b26fca6810f2 3046022100cbb438c954382091cabe13c17702c823643ef566ce915bacb3cda3fdf8cff3dc022100b91ea95c8b62b3cf852533bab5b3a562cb73c08a622a9785c95d11af7d5c770b 3045022100d19d46c9609d0d641456441b61504c5fd490fdf2d7518e24cf79f46b45f5617b02200aa016b7bf20d00d30695f3e585311fe4413b3a75f604e537c9e6d2f671f71a7 3044022057dcf52614f2ade89aba71f64062e6ea07e93f3ad9f96c9294ebb9c30907290c022059effeed1733e6d8eec1637e17b01f8e459edcff7e330072eb2ca7de0ef0c356 3046022100f7f45df41885b20c3f117c69351246c31e44d5f1c65fa04b1f9052d23a13b2b1022100dd8fde0283cd5a7ed1cd6f9ec5116fb0a3752a8c7a5ec6d45063260377fba879 3046022100a00aa942d957c0301f3d7f9bc57b1251568f7807f781ded51bbd790c1e926dd9022100df02586de76a6571e34c917efeda8ca839119c6e7db4e59c39f510f1e3ba09f4 304502206e356afb5b543ddde35070a36494ee1ad556d8383185ed9580682abbbdd33d29022100a689dc052ca8ee67d2fe27f143fde9be5b25e79efbc28af170e64bc567058d55 3045022100a83601e1737fe6644ad00bf2e96e3bedc2087a160d95f58fe13c501e7a7dd7500220041b2c0c4a1f9bd936f0ac092b08cfd5c33c1789c2b8896ac3997e7f6ad11523 304502207429c71eb282829a0222d93d1e6cf6af3b68ad8794ad9d612fa718fe8fb230f6022100bcbf0e1dc9c9a373a5df98fa16d00cdafab1d31fce1d07ac89078ea0692db97a 3045022004635514d19a7ca9c343654dd8e8642afc70c82647d3ad268e040b4749ff5f3c022100fc86394ccdc0cffd7566b143e8a9749c901118e55c2c227275d92d5c943618fb 3045022100a7bd12d8e7475bdb0eda5df41feea56a0b28390aed5b99d8bb199d680f7e654102207e522ac45743832c69996b648a747d4cf9f4e4bc05dd9c11ec32ecd74d2a5ab0 3046022100faa3cdd0e03f3fe3fd8c02bc3e7b67d4ed5989875b9698db9a2a845e1eb19431022100cf6a3d6d9488802102e0d7f0d5f2aee95a058c91c74e0d870f439e9e32870a58 3045022054e9920c271fad4c1de28a1dd60388e308dce1f1f799e8e1d9576cc054909824022100bfe313d0e21de8b0d1e9577b7e976a46006a89efa03a2c55ae2c985d2a23e5de 3046022100820ed928d897660394b370a3dd458d71e82534e421b1ebbacf9b233c37655d1f022100de1b821b1b37a94232bf60d2029722ce8fb6d82509a48fde9af8a35cd5bb0e5f 3046022100eb97b45b1c5c4ba81506daa00109ef9473a7f840f56924bc682e13bed1a89f37022100df9b1823b778610256c253ff887465f602d77c67525a1fdb63fc5b2816fc574b 304502204bdf2d0d8bc2b708e6c5cbf4aa2909800dc0c634afbadb2fad63f70452185d8f022100ac1c5210c6464f649c3535a4d98b1ac93d88088bf4deaf1130e3830a8ca61c94 304502207b2bc45e4426379e2d3ee674daaf70f7bec87e2cb2ca8619b99c88b27ef90cd80221009c320bcaf364cb519d4d5bbbccbc165afd770076d2e4e69d964c51c555f10acb 304502206d5ef5699f284d75d62f526a240376e5859c57cf59dcf464ac6fc8bb7d927db30221009d8e8cf57aeff830992ff5da31c08e6634cc5b8b6900e3786a1c9b92b77fc56c 30440220111ac66141d5607e689aecec4c1f60445513af3f2beee479346dc25e8b1b10480220596bd8bd4c2f1643add5bcb8114925d8f1f86c44ee6944c503aa6659a9c2cca7 30460221009ff63b78dd953d1156cbbb1a2810f72abe2bf66b82bc8ed833a375d265bebf5e022100f4732bb7018ef4eb3533abed3b3ee38c42395a5cebabb30576f6a5ad566f90fc 3045022020900e59a4866d403864e6deb2a9627cdd141a7f845aa17d924ff30e61d5195702210086a6bb657b6ada55afc564c7cfa87a84de8b4a9a485421c2ec798b1e9089e085 3045022030d380a9bcb6bf51e19c36b01ad3e39b1de96fa936c4a21f7c5d709216548d790221008d0bef2a92c57dfcf70346678b528170bb714eacc25106af9ab8d3e0463e7be4 304502207846a31808e27350d3bbc4842cee7b5becf826ef744d6dd6bd1c6ad6fb86a069022100cd1e49a9a26cd22c564a2c97765e8ecd58b5c0bfdd6116e5af31c2e964ab55ed 3046022100c1035ee86dcfbcd2df33d8a486f00415cada551c9400454c8d9f433b29b76c8d022100993a4ebe7e8794a3a06f8a79138a4834029b732de8ae0ae75cca414a57247605 304502207f66787a93beac6f7b361425ba1346ba1fa1f21c98c4860a34f60313e153db9c022100d44374b2824a58398373876d235145de3ce03034a73a9b587fd696c4b9b23ddc 3045022100876ca78788f95929d1835d1ebe3d89c3b332d65afe37eefc479293562f9865380220028a831e1c3e1d44f2db8524a25cdb253f4989df8bbfc569828f26df2a36343a 30460221009e0a0d5779ea9959bbd1b1d25d5d803ce451b97117c905dc594a85d4ff538fb9022100d88d19ba251c8de20a95d611caa084011569bdfa55eb3f158770ff3b6bea1041 304502207156f98abf549411a1326530a684292e35a1ccf90da6da70e2c7c5868a69e825022100bcd4ae113759e35ce4808111dad5ae63f8f8b599f59637872e75b5e501e7d5d9 3045022100a8c17daf421c4203d3eade091910fb46dde45215b74815987232a82b4b4ce3c602206a93140b21dbe6cd16f1e1d1c8f0a50e8ab0fc5d54b9fe762e25beb0f5af7aa6 3045022100889bd559b4206a8fa8914099903293c202bf0eae2d273de5495d751c3be101a3022018653b0b11fb75cd52ddaddd41fc6de80d5121af73f2243261f3efb2ade3fd3d 30440220026ed52af324d5fb3f194657d9ff21b55152f67494dae4d1582cd4145d507136022060bbef8bcaa8fb6ad3136400c9c5082eff8eba8ebf383b4850c63d204566c8e4 304502201d6dbc577b2c6db0d2fdf2846cdcb242d70e99d98967ec7ec6f628708c52f99f0221008eda9451e54773b9ff493f96e02f1f19141dd5abb797b1921eab184fbab7ab2f 3046022100aba4b9e7df127662e5e1cb17fd642ddfea58fced3cd4330b4deb0398221dc389022100f75f253b70b6d9d0e64ea7e806a75252e1fd62262bbe642d78dc9c363f3f9dc3 3044022009f4df9f2c2b2554c6a778a851ece6c8e41ac1b93862ab4e8b04d877b47c905802207027f146f20fd30a30d67f9deedfd267a2f939ec2ce49171b2d282ab18e0aaf9 304402201b6694b12db916ae24fc9f8991cb70154f1ea87fa7d74719c5544e2f50f3c4dd02202fe83066bc0ef9d8aa231683d496175b43cdfde43df71d253b252b9401759780 30440220688c54f164e9a4368625b794c9897728284c70f991a9eaaf4be9c3b580ac1e410220348b4fc4712b753d0f5f37857c9010379c5a915e97a3411fc7a16e62ffa2d01c 304502207213f4c7cf00ba40757e94a78865e46b195e0aed310c60791f944625bd4e11ee022100cd5bb4ff114fae75588afbb9883491ea21bed1e3ee8fc4c99cd852169e93710b 3045022100c13cd5b3ead8dc8e5266100a6140a41102cfc1adc1bfc93bba4754b5e2f920220220746035e71e92444d709050be1a27e281ed47fd4621997eaee8529fc5b2307b85 30440220060643a6ee0eac32b299d488bfb75d7f1fb61031d617bdd140bbe79cad7393b7022019d4a3b5a5527d82264e2303840394570106c54100a91b8fa4ba89ba19d961dc 304602210080f70f3826dc1a72cfe749f19fddf3c7b2506ea8c7740d1cfa860585a32a5d57022100eac1187636fa2b63323baf1a97bdc5be763af582d06d47f0a8f70ff3ab527681 304502207aed420b17e67a58796d976937f0c66bf8a8e485c571baa8e98bd4b371665824022100ffa8e10908b7ba5751c8c6cabfd18b8baed3043798aae9d8c2cbae81f4e16bd4 3046022100e0199036bad29ea5e37cd3ea3eb279a0bc503d37fbb1127034ab12ca0db476b8022100f8729f18144b7766df1f20b9240173a8849a0fcbc3162bd913adc6b56cc12689 3045022100ccae35d6541544a159bdf213fe7271847811865d86012a6902b6b07db208eba302200e58a835be82497407ae00bc87d6f5861d4ba7dfb8f87a342f70264b3689e3ab 304502204c5335f450dd8e0d7db8cbb43ef7abb49d6e5d71450f6e71ea3f123049bf59c2022100d4a8f02f9c1b91b2354e577ac9222fffddae3f88f813a2cd064487c5555d23e3 304502200425c38ee76f6028fb7a51680405aae7590971a60c952e060d3735724d4078e3022100d7c8fa09c38cdc49e9699b37c12eee4c3ab92269440c84e72d1c0ac0d0b03960 3045022028a78c9042d244e9b7e0f1e4321ab38f5391971280070b23abfca1e61ded82eb022100e5f60b1723db2adcb8b09cf1d9824ed225d385037b6772a5d8ef9bef38c53795 304402202602b1e75264b87d237f38767916a0fd828c0c5530aa716c466389d16cd683fc02203541d3d2bc7446e2129d42002e6b6f1acb5dd5d334ea8be73e8ca6fdc48e3bc9 3045022100cf3aa2daea6dfa5968c7714a222d6d6e4c4454dbf5d6074a7235aea1119668c302205dac05f9263b109ce2d266d5359b9a9a198642775d02095ade771040aefabd35 304602210080c6e1ed154ab2565e9b60822b8d4eedc78809e65b1fb51381d41989d2122dd70221009607e057f46f7e1423635d387b2a1317925b318780a9245c60ab14ecd93bc7ee 304502206785f7211065eb6fd1d175a6bae54c3c4817642ad463d4a20f82a15d976f2f18022100b263af98855173de071fdf13d8566eb791132461a34d9c671f97c9a2745d860c 30450220451dc2c9ac1cedc6ccd23d4ae8277491b3ccd9fc2a1d3cf943070ceb07a4ec0a022100c229cb48bb44dcd2ae73d407b4f163e169d2a4f539ee809342baa2b885d7074d 3046022100989f79e3700bef8b5948a8dc394d31686673f499ca6cf7b7c7f665ec7aa391b50221008dbe018a3706eb0646a4c127ce397dd175bbecc70e401367461d01bc6f4e6dc6 3046022100a3124540d50fd0b6ad3cb11f31d5c02641dc2e5e6558af32a729bf2a574da6b5022100ab15b5faba2b6ea896833aeca9a61707f283af3ffb5e9d45d6a7b9b13b88e859 3046022100b57a35ece3895e5622bdce6e130501d994bb79a9e7e321d19f5bec66b28f0b9d022100b0419d67c469963d37dd5181c8c646aada9c1e3c02e9f027ebdaa61da43740b7 304602210096cc332ccd7e2564223295413112019e14e9c5cfc2d481e35b4932b93c1c4281022100b85cc6a20056d9440d38209133ba717a4eb8c2cb7551b17904bb89107bf353ea 3046022100b9ecfaaade42a61c63e62fe3944f00ddf59269ab2e90ccb099d07366cacc8a39022100d121f3af20ac06425683affa8926c541b13c66ab04f0f265db0d81ec6c78b475 30440220782b456106ad7a1b5741c813af24c7603bed9de4546d337c134800730ea071fe022079163dfe38bdd42d08b4e88bc2ef49f5b31ede4b24323a2b958e2ed174184b62 304402200621bc0132fa42859f47bde725e3bad6d7f8554c943f6a3487ae3d56afc85856022073cde4bb1c42591fa0548756eba50421d14df15e64a1791d46f8af64c556c7ad 3046022100e9e093a8f414082ee9c303db52be5379905ceacb3cf0e167aaeb7507974052e80221009281e1a6d79fe301b7f4f3bb355377f0169e19aadcf692e51d0040422cac5962 3044022071e13c7e1509d74e401767df25c2104ebf876ea7e2b7fbacbd5b04467843615502204551f6104b757b2b7558bd6055b4727f596f57556c0019b9617d6dd3ea8254fa 30450220180401fffe77582e363e3311601f93a1ac7f7cbfa56870a1c54d484fbd45b46b0221008ca7ea42930daa275d9056f4b589c27b929d959f158523e8393f5729254dd0c7 3045022100f63790e8176aa34d3d7dcff0c2e8c5fb7b7bbb04c393ff35200bb430dec622f702205196cf1d7a0fba765347d3b8c9d290a996427d6f26459b68b73f625582eded5d 3045022100c66de84269b876d36bdfe975bf8559a33dd52301b4139984f0ce2da8ce1889f502204a1959e0ba0f9816521f7a1f7e5179075518a4b2c4df0b96d0fb9663183381e5 304502205271c1c07faa63979b37c0d4e5a486152040c5d505fdc24dfe8e6695251380cc022100820336ed8a65c53436294fe596f064912d3b376f26d6a5f177e0640e20f62569 3045022003aab925384ef0eb9b9a2895196bfb8476dc38e5258a41b3494afb57a576d9fb022100d044677a3552c07ab369abb98e2132be4fcb10ffb3f6163156d1c75d54d53a95 304502203eb6e8b06081d282326cc34baaf8bdcd9c25cfa2e424ff0bd5eb39f0e452137b0221008ff822dfee87225d001ab55274d6e7c4071101c8005a79ad75e08bf7d9e28406 304402205b808f4a4446677ece9118a2e6b7f90c8e2eaa5811f188ce1786dd590eed307f0220705aa7d28d9b72491fe9be10a5b1dd006cfb561e64258da8e5035bb6d34dc8a8 3045022100e5fcb2cdbf3ee2919223e2278bcac1bd01ffad23358f9cb1473b21c7dca3458f022021d5b51c0d8dfe4aecda3706a268e87b0a4790a3b8bcdfc88bce31fe46b0259e 3045022100ad1a83db79f7c463aaf993673c2fb22790ebb700ce64f1fc3488e2ae20e9bc0f022010093c8da033b41aabd895e13bb15e2cbda89141899755005e369bf263c2cef8 30460221009ddd801b8d3c2a55f94d1d630eee8829d5d59079913cd7127c8b28685d601522022100aeef12c1e10a0d564b744edff3085065140005be6e867b14cb7bd7886cf230ce 30460221009dadc1e740d71bfe1dfefad2eafd9ec712193e895c56d1e5364a0d64a2cca6ec022100aba3517b639459e5558cd11fdde24dd5109bd463a7cb6f11a9c6a2678b65ab82 3046022100c7fcb612fa4701afd343a346b289bf03436e997597a699547a548647f82c0f48022100f1a74a5346a8c74d661ec654fc6d0275510ca27be96b668247bf585403d18af2 30440220526543a1fdab367f56866c40358c309248de5adfb8da91ef424f944f1aa5d6860220561b5223a6cde4bb1e3d09567a1702778eece2078f6df006e4cbff30489e3c87 30450220774ca5a2bf9be280d976c44214bc2051a0abf412fe43324d6eabc0fda2f4a026022100af563845ae9bea2112d767ca23bcb5af82d46b56809216666ad2ab538b5d484e 3045022100835510e09ce88b305a25be9c256e91e87b9cf4a814b080639c227cc856989d3e022012f1fbb16ba4375bda7353f03c0064a2e193e857f10143db144306b998fed6cc 30450220214a8c724f63b9466df19c7a2f31119bff8d6daaad543e30776a2d4c988884a4022100daf99caf1dc75354ee323481988554acf098da22ece08ba174d7958d2a2d2ee6 3044022057f941970705659b0922a5b105b36c6c022b7900d1e21420c0ec5ae2474f8fb0022056d8b56bf12c556dcb01afdada0e14ee8a05e7eb4f47e28a5001315f0ca5e4c6 3044022045533f62d65d38d727c072e6f3192f4dab4bb7f67c0e49be0e834dab453c680302202d83c31076dd0d3f3de1a2f37c2f01fdc02bea9eff6c9fc22a0aa508d61dc419 3046022100c23ad126092fdd3f7b0809a4ffc984285872b5cd65551413e23ba3204f2e8128022100c9e1965d7d171ddbed779454fc2569243684afc40e49d7482fd6987a4f00e96c 30450221008a7a30b0899572b4e7ff52820d66ed987355d08d68dd67ee4b7648eae89ea6cd022035a7a5580bfb6f8fc9c6945a0533efd476dca052642fe7ee65c2d4056f201770 304402204ca1ce2571e4d2ceb4a6f8d89838ac07f6a7d5db90ac37ead26eb9120f866963022035d0821106aef0188caff8369d6fa552c4936b6cdde6d3cdf8b7c0a3e339243a 3046022100c4258f837c08b58637eb9262d5816f24666f715d57f82b2d345bf52c9d65f553022100caf0903b765433d0c9852533222dacb1785590c4772a7316e005bbda75d611ed 30460221008e8666bf80d094c4ffde21b93b0d71a0a2c139461a3fa8e6d1657323ac8e0e7d022100c1a9877164be9e738a2f170ef39c2c77f930a06e0b3225eeafeef054d0040215 3046022100ae75af5fe8095514222a716a601c340257633e445ff132666df7c2c21470e349022100ac45df5276df51c2c34173cad513437abfb9e3811b7b0a6482f339255fec45d4 30440220076119e9158a7b762ee19835950a3bbcf72cc5f3ac25806214e8310edf52d826022039703f3841cde890e6e1bdaf90562c338c71819469cf1d7c8107f6bece01c9d1 3046022100fa5f71383c21436c9cfea7f809f44d609e3461f06107f0ca079c00d3ae98650a022100ce2896934167022d4a018eb13ac34666d0ea1237f97128e11993a1ce011b86cf 3046022100f46b4f3cd2e7b3a0865f8167f0633d1913f6d40a0ee35229d09cd972b83745d7022100dcb3733ef82c3dfd976e2522364976548754469c0baf5880732b93fee7eef07d 30450221009865109d58b476cf2da878891987abccf6a252c89b0d39a93da69fa6f6a216ee02203909b0f3861dd4d4e0fda40d8eacd01cde05a67730b3f4a0fbae7b83bebbc3d4 30460221009558afd647b9db19e647589563f893ae0446dc783920126117fbc7bfef407f4f022100faeb7017363d3675c2e3660e51a5327550586f9745e64532b5c31113bcb6ae29 3046022100ec53c67a7b68ca2186d015b416b52880bb2b1314f8fb27a3fa1f44dc4e91b841022100c0d2afe19345d415f94448eadf96067037ee7c3735cd01e47c8860272a53f645 30450220665cb602c5a45847842741426e7871c1e77fd101d6debe18cf5198068f6cc543022100e6fd1e2033f4a08041493203e2fc150b2d89100360257beb832629d3874adb14 304402204968002fbe17aa8e970a443f675c739b810cbd9f3f2d543ce89153c3bb982367022003a8fe0ac7ed7a2cc35addfa1d2cb04f8933306b1d195e76c663db051086d17b 3044022038bc5bc0704bb2a98b4ba98779ab9d451fae6cadb5aec5eff41dc3ac6cd6b6690220435e846087518323f0d49a256d7e803918c74d8d143fd1944e40d645bbea5666 3045022100ad2e92d8a608c5152914151a1083a37af380a19573088a1c6f3b6dbc084624fe02204a2d5a6a3f509d3ae7f3368ad2a2a1f64ad0dfa5cc86b4b57ae8a1b739fd35bb 304402200c1bbfcc068aa6a7c52b127413fc2bc96c4c03c4091a2e72657f928f6ba665380220426c7e68e1df9391a7da59c118e419c6dfd2be6033e85bcc4f5b8bc4d655126f 3045022100e76f87591ea9ff9e165558c6a10d6eabb108e1b2ab808c8b235e29a31d9b5c0c022052597f0e250f1f6c22b833ae6deebd19c6870e1160929d572185b262983872a4 3045022100ae9b0a62a3883cd755dd64fdc5ed5bbe932284a43cc026ed34b8db2e0059a351022056c67911e0d041fd00142541989f9c1014c43f09d6d1ffdd607c48f645bb3d6d 304402202446594341e2dd1415c57a2b414e6df788068f28480aabca92b8f7f09ec40621022027b5b4b040c10e952efb3c15dcc99cd2fdadfea0dde03b3d736343fcd94e3434 3046022100bc0b10a3b771e2f36d36ebef44cdd5b85eb8bc8e80532d3183956989215125bf022100ce27065d2ce75dd36fe6a9106e9eb7d26119c6171e1f5302d503788fc205e2d3 30440220576faa1b2953d4c3aac34dae7cb66f1e785bbfdfc9c9d95ee4741b18d7086fdb022060dbaea7d8796229d087274d91ce51cf42104b6c6391d2e17ee1cee03cefe8f8 3045022100d8a55b13d7a2ac871588af709469f4d658185b07841a9da982341d173a1b441502205996cdd1e91801d0780039c4c70f3a88189dc887eecd8d044ee1f669f66b4fef 3046022100dc0f145ae2e2b1f9d3f6fc7fa33e93df684e75856c4185c65b03bf612e322ec2022100bb2325869d688168595f59eb1a2fbd16b0eb29650bdc44d95bc289b371ddb327 3044022053cee635459cb47495d2257c0583ffa53344f90d793f51a6b7c4c33cb03e48a702202b73b9f695774c8e0fc47fb32124a3c7cc7031e215c0c850dfa3537731e79a67 3046022100dbcbd3dfbb04dc98c88e132acc634caaba050745bbba81ee2e9e9014f23b602302210080769674e7d2dbc2b59c3cdf492c9e528640cd3d1620a811539f2e1d3993f754 3045022100d83f7aec67a890daca038aaecf950714c3d2208a0462016dac517871e6234f1c0220362f3c5c97b5026c700c8e3a123ff035f6f0da3a65319440341918d46b70f3d2 3045022100845a004bdd2f63add445a759a15acb04bb657d937305cd12ee27ba7d0c9617dd02201bae7bac4e8b9beabddb35781a81ae19d6a3d70708f41fb3582834ee8a90263d 304402202c880b3d37439af6569aa57e1b7b8f1b483a5707060025be057e7760b3b1fec902203afc2a054661479cd8926f1960a65e8ff0d053164e0b38bc62669725cfcbd29a 3045022100e17d27b2968c5d4db0f1a14c62c5a332f2b3f44389f120a830619809393489000220686cea3e6f8e1aa0883c4a1a9c72f8ac73ff0834b6f234f71970d64d3ee45946 3046022100c4ad6203e2ec34c735e7b2fc47cd852e0394695e750d39e04ebdcea2948b098202210095b2ad0ff4080b40a4dc509b9839e15cda14806fb89867e9904f24b1ba203383 304402204c080aa6d401ead92f2e13b6e21438113994d37caddbe0527dc7e864d57719470220076a0746e243d1448d651b8cc41a244e7f9e594eeaf78e9a7863023f8ff8330f 3045022100c48eadd08f03819fcc09d07d4c55cbf0374f106472456cb83e72ee4419be2f0d0220484668ddf2ae98c9d744458c678f1ece188f3bbdd1da8eb6ba0e8e1d8fec3fd3 3046022100bd511d21da47143148c06e04ed68d50678558e50e6f60d67593a6d28328cc094022100f468df899ba0f86a0820b09f4082a0d3dc3b3b1b1967c2addb0bc7f58044b6e3 3046022100ba09cf53ce8ed0b683139e20753f8490f4f8602227b8891beb13d4c240666716022100a8fae00800a01a98ef1030270e56fad5812b73fb5588b4f98f37885a54703af8 3044022054dd717e1225bf54345f8640ca415f066f459a9e3d67ad1a4df4ef7deb09901f02205189198b3c13080143e4e790111a007df936d6ba279606d7ccb7029154a6ae0b 304402206a40c15b3776a3b3b2b0960ee53e62d3fc317b5df6c35675bb6fef4a4858b9be0220625fde0531880a81f36ff71e90906ea8751da47704d8d3c7d585ba5c3cfc7a9c 30450220775d0813d7b5ba63372061432938ed65fc3891655c47664e3d1bd87895bf5727022100955004cccf25eb7cd85ac8d1549fdb4c9bc6f89546983e23019d78ff9c631e92 3045022100bc88502942585f761e823b6a006f6a9b0237df86923d84fc2bb360c6d907aba702206ceed36f2ee38f06b3ebb97d34840a46b9f471f929334c3875895767a5b187bc 3046022100cb27396deee950d855dd91f5aa61b1b6935383129377e01b552e4de6af6cc45202210088d90d3eb3d42627dfd4e332fd78a5b09d20929eb0148499fa39ed3713d7de5c 30450221008798aff2c91ddd6ad648f4b82c4b4c3aab830cf5112b999ff1d6dbe80d63e6dd02202538a388dc657dadd276abbdc8e71eb74f768e62e7fbe55cf8aedb27c4f6e1be 304502206c854c5791f859458e37308e7a67c207a2158fc28bf32cacd14928d2fc71ef13022100a28b64a5aedff4c13d3b141f62d7d140492ebb740a6dbbd7e67bd966e3d680fb 3045022050a791e436550671894c162239f8704f67df780cd3116d7ed3a932c8006e4b35022100f57025fa7266917765f0953f29fcf2c4751f26b4ac6eb5a4b76f2d71710c7982 30450220500471379b7e242985ee9c3e141ca531f70b2dd6c98537a9079ec64aa105f1ab022100b09b896c691f7c5195ab8888c2fc764bcfd69499e82adc00dfc544d2224d841e 3045022100a69f0aa52b7a5f8c7588b6764475c9c10d54c6731d10bb0aadc01250a7833358022030f9ff2f74934c24114af90f7eb397caf4dc0e16d96f6bc043aedf98b30c0a90 3044022058c27f4428f003f12cded0f31d5dec18de7dd1c39f7bd528be4644ffc8cde9c202201c7e346d6af79de86134e99b23748bdc13a97542e33a85d9f1e1b4589b740dbe 304402202788ffedaaa56976c3324af335949aa4ad32c22ec834df270ffaf84d0d8dd82b02206cea90e20625cfc82a9d8b391dd0031560be61683f5e2cf331cd993292a051d6 3045022100a7d8b23b3ccd7f94d00f619843b61171df8887dedccedc1eba8d8c908817a44b02205f74582689c7ef00446b154b2f3389730753bd56cf21409652b831daf8c4711b 304402200c0ad0ec0a730db2cdf13ecf3a2a2a9f6716b91cffd36b10b3a9f45821ef6144022003ad795d9f5e181463c259281e892757900b15b400931eff0e41f66dc04ea2ea 30440220253ef129d78c2d66b3169954670ca97f5e566a9782f8f0eb643f5838e4d85a2a02200ea3826826de9c09e14d6b67dba68ee626c7261c40535d71d91326ea3f313d30 3046022100873b92ce86ecbe5c79c3871accbd9410f282f6a8f656731d97864f4a0a33ef2102210090bb1610d57e6e6fae01307977e3fb7e22054abe6141446f44618aae8ae4ae61 30450220525112f9025c9d7b1d55f65ea979b0389cc66272359500a7413865d73ef9d8680221008d991528a587e541db2665973372328655d85aeeff8da0f70576dcda189eb9aa 3045022100e296f1d9c5ad8d7f544746057b8eacc4067416f2c382d316a0024e65a55dfc7202207c3c5c81ea644c6eeb214ae993a2613d7f02288dcb2a75f75db22c8b1c8cad8d 3045022100a636ea7dce872630f14d9c4f41a6eaccee2a0c9113da82b4e4a0c2629cc54a63022000afb9828661a8d9ae7dde173812ea20fab685745e416bf3380c93a32791a165 304602210092b4bc15bb41cf702196ced0d34d471c2b3a34929940c8dc6576e427337a6f310221008300a5fe8e9596e34927361f5be12d3159171fecd4e302999b37da07dbe11e35 3045022100defa377c31d14981a3c957c27a99c21ccbe74ce34051dce0056a86ac494aa754022015643d855bbce719aee7711a061ec60103bd3304ed3e31b8503e7ebed0b12310 3046022100db6a0f2f71ccaf4f6a082e642d7e0b68a88ef583c637e7cfc1fa12f3174ed82c022100c9d19c0fc17e0015c3332d642b9b27d676cd83ce32bf3a81fbba7bce7d9d0924 30460221009d2c4aa9f2894d0adcdd04078831c40a01e079ff44de5e01112f0ce45d1583a8022100d1bbabc452b7e0cc977db6c695fa0cf92a810af2ff2292bfdf51e56d5d6fe000 304402202e531d45d7b8c5108ba7c76749fab6ac861271ca55a8d31d17f43a469cfd8701022010f5651760c27d3a1eaa6faea835fcd62fab61e31a0fa2836acd0df03745c8ee 304402200b03064d0b90acece0f441a5e4406dd937c43566e3a72dea05d2a521844d6e7b022019cbdc0d92dd75aed2c9065b0f3b6d6e4de3d9c5ce79beadb2a527605297caff 3046022100b0257d7dc6793b7f9d75748c6285dec5c1bb50e8faf993c457b39dca18390e1a022100ef918b1202fd4b3621beaedd7e025d0d975b2ad385b4b5768c1954d02a6e2d31 304402206f2455a751a5a74c5d992ddca23d1e337b917eebfce729fcca9ffffd602a5e56022035575aa670007f3cdbfd2bff73a9fc28e07433dc95fa9a3b5668e19be34e64e3 3044022010efb4ce008f33234bc2d02fc2444716c7edf2bebd62963a16f4e64d15c8606d02207c4723ddeed4f75fba43a67eeb18944ba5fe93d67267f5c46563f65fecb23d76 3044022033b0e73ae03dca2b7ae1cda95aa7c36ebd326b836c5c4b039bfe890d94afac32022033e177141920ca5dfa4252e1a45c320681d7f293b2d4e1d1ae8b506510b6726f 30450220084525ce7d59461dbb6ed6234d200f19c5f30fa65bd5c21d2f0bc25f00dcec7a022100d07b578db42b23ce5fa11195c0b0a50505d0c1b90d76bbbff58b40f35336734e 3045022016c1d5e61a90d2419035abacb1c1a8f07ecb01de5be99a71e9bacb27f8675e57022100af67f33c3ebff102fa5d7751a5a04d0e97804cb1aeec19aafc9595ca68984f3c 30440220188e74a223886f37361fd208a3428a7098fa1b450212dcc06e3bc35709ccf011022072d4e0a7158664cd0b74087b29d55579b2cf53206819f99d62e69631e9974149 3045022100b7edc2b8b71b998523352084ebc451062e643696f8a83cf5590c5c3f7e9abecb02206a3199e94a9c4b5fd8f9722542315a5f7344096ad288ddb73fee7ba054fdc74c 3044021f192705377a862fa8a3a22cc9708b741dba7a52f95d9be45260d911e4676cb7022100d11da0c770d17f8c0ceb6aa31f815aa2a3ed92d13027c21e0607ae1861981b80 30450220445bb1c98e6826002c928da632225b3c4557b1fd831a102ad90fad260c27f6aa0221009e593b20ddf364c7470970fb063e9c4b1137795a9b76e2466651f13eab9ba14f 3045022100b0b08df2f017fb7790afdb5a0bd82d688b948c5d705c14713eff5bd6b730ff9a0220507ebfcaf9bb80ba9eefca78375f76617bd458e8139ae64ee74e3b86c9809725 304502204b975993914bb17d076f72ebfdfb64d1471c322605ec13d8ea28717550748568022100f901ddc5760e3ab2c00506dc621c26ac8b7ab04b5a10b07b373863e29b6be057 3046022100e269f789f1d641ed27f7c684734fa9755526fb4f61d65708a5a74a3e5efb6843022100d3b24eccbc222483d565d4e9acb28892d6118e1ac65b17a47791616130ddff62 3045022100915e4d778d26cd6baf7c6e5ad3807ca5f7c997c14a639da2fcb69f0666b0571a0220183dd40c9ed67d89e56e4225e884d90a6b71699f12ebb6c4cd50b686b8a02928 3046022100a7360a600e85ed2e7153cdd54e1208737279729e76147075828a47e020006a77022100f8d9db27a36f624e8fe61417dc131bad80626d9aba9aa831b34465ee16e9b700 3045022100dcb20985362edc3b5899c458ae8aa7512c8e2d2f66ceab13f212a91cf58ab25b022020c5f9e0b34dece4c8cf491b641ae7e9a1a10eb6b4cbb7de16f1740b5b0ed6e6 304502202974f0087a612355d1c3d86b3605e8c382c0891783478151f11b6294b5a42be9022100ebcf3915c063000fcede999a868983b4762cb9b9a7c9b11759c30be3f0f1a807 3045022100f653890727ff3d2b044fe8938f2de8d2dc2422ef9e2c0ca02a6e6b49fb9821ff0220380f89c6d5458f991b474a6a6fae4665dff26b738ab3bbe9ec85d83cd80b072a 30440220036ef0e9e97730cffa12226e0b5293cad420dbf37161aa54380650964c4c224102207ba2efd327f90c80f3c007503e01b05413aa5df00ded9389efacace39b7e78e6 3045022100ba9824c12ee4b014fdb34df98717fa2735a62f91e9bb10bc48b690552d7fe87c02201f351e343cfa71a43dc27f61c325068c8cc259ec92b475e1b107f297fa721ba6 3045022100a55b566bdae9fd5470409251c0aa529a7e6e353d1f70662ae5c5652b2482afd0022072d7ac9a5656cea5a1672c3435bc45bce01d533d720394807daa09d5d262f4b2 3045022000ed5290d2f5b8f063cd1f238edd336086345b06a9a803b2a0956a5bba11806c022100d3a2679d8e6101e92f8546609a2d737349f2d1da3d795c6077ee5c83b4b9f716 3044022030238123033bfc74004fce6728f59b76ddbecb930b3b89d8e5b46edce63551e402202b62575ecaa758a0487511696e92f37763a08d2fb61a32df0bf22462f8abee6e 3045022019d4e31749a704f80de433b4126ab4477bbc14cec60883da1c993b95cdd98f30022100d5ca92577a12ff3858a9ee612847d3b7e19dda4cfd155a88a922a122f54169e1 304402200da90d71b27ff6f9e04f9f228416cdebeda3d0ac23a144917b6cf6f2f84028da02200544e5d4333aab63cea229c7b93ba17e58c6f5fb2857c77531255527a61865a5 3044022053919f59bea092eb90a5d416da0bb50102d761e96bbb08ca9f5c465b0f1628110220588914962ef88ff2ec326e1d3a123cb5c96aabc0c0db5c7d0ac43b09d46bd9cc 304402201d07e8b9b8ac86f76243697b090c19fe8d3c1b33b099db2be8ee71ddcd10512b02207fefec57aeb3bd0462e5072b01cdf33c504be2ce32aeb3b56bf8a2220c60ce58 3045022100eb916f91f7907bfa41dac20a850b4942be096e72d690886b271214e96e1f1129022045b4bc5bceac6716f1e8bfd7be5871fdafdeac87310c71d4f230285e7b915af8 3046022100b01bb7c8463c7b16bb215fbb56a5ee28285d98a473713c6dabd33e2fabc48964022100dbd4a7bf87667dc7c4a6da81ce5d1f86b1db4bbeb4ce028bf2c96cfcef01550c 3046022100d4d4babd82651b3c630fcb745af7297917059357d5c1895fe55a1e10add4ff8f02210084277f9ababc5848dde7cc296ef6cd54d947a8ea70fb38f029dd4f2aef0c18f7 3044022059df20a82b28e6e9d5239052ebcc64b8a0cac7343022475586a4fa8fd1c4790902204377f039e240ed8138f6421c3436c4dc6f9a911862f45658af3664628d377bb6 304502210093f5fe8c789a2ba76aac8e3786029cd7adb3dbe4bdf3c7197b543019fafb612102202fa1ab99587d6c9f3200d39e5dd1db093d6b482059217173940384ceb3d34cac 3045022026ed4b73bb2eab353d8ab0bc41de44d7e31d23c88c5db9f011537e1f7dbb506c02210086fc0a36408e5bd09adb3fb1b00c5233174506adaaea6bb2a7ad62e38012b701 3045022100c90627f5a70cb08b870438b833315d3d90f8cb20ffa03e4b7d60b990995adff502205dd3812ccbdb17b7876fc49dec30fac4961b07f3a9278b295063ad8b7eb399db 304502205cd0b31eefa2a949100dd0ecd512b0b48044a309244353be545ea2e9ddd7d2370221008907996a29ace25573a4bb882f05c0b10eea880e529f2befc96fa8da1575a0d0 30450220439e1ab49ac94c1a558c070844626794e3b76816c8c8698f122a11fb7a26e337022100be23ef0c750774ba8e2a05db43eeb985f8eb364a54d8b0845b8d72522dbffe6d 304502202d93af43bb77cf877381facade2dac3c2134ff6eadd37d1eb8a7a4401634bcbc022100d765f7bd9d0b9a7a80911dd5a92bb4b7bdbebae8dfeb7ccf35352638f73643f5 3045022057a3c6dbf7bdedf3c14b277522a9c35d2a50e56eab0713799c69ab05c6bed822022100df2a29a7c09beca77e37334667199b00189fe96248c7244ced8edb2225595ea8 30440220366dc664cfc1b6f54040247153443002b3aa646c393df953b8862308261548db02202b1ba0d97451f4896c6af5cf46f93799555909a26ad9568c3448b825202f3981 3044022074d170fc5e63cc47c4ad03a9553bd726471569e5eb5a88056e04c52483df5b4602207c81ea2707e8b1b6fec6ab2334b8d9c777189dee69d46a4c75ceebdee7e2f3b8 304502210092258b838528dbe721b1f071580df648b0292cc2ba319231d8d53450d7f9d5d2022075876cb0e076ea4acdaa55226633996d5f9ab06fceb539baf6379b41053616c4 304402204f3ca48c42da615d6ed06a5cbcd41a2491b067ead3a483433502b2f3cc5bc63e022075d5d9c34900882aca215d6bbb35a82473cc852986c9538eda41e94c4cc9aa57 304502210091067e0c937260b293badf4d38db2519b58013a55599cca24cfe07855a2e1dc8022018e216dd9ba2a2bb19c8b650eece8a595e9d776aa185bf826c7693b8de87bf2d 30460221008526762a7ae54e809f45c054225d87f10309936d11d03e4307b2fd6c745a2a78022100d8aca0d8384634e330957a8d827f68cbb9bc59bb2ce2786c2acf5fa3663995d5 3046022100a7d56ee1786dc94960de72b9970713e5563c1c87d2f4ef0227db3a113a69d5d4022100d9df38f617d705d657883c9a5146ab1543cfd7af3dea4f68d87a2654c9ace768 30460221009c0c64cefd5adf5e17eccf5ecee68562cabe027e31571be4c619a5461893ff2d022100e1c5c8b50e51448d894c20715860a7fff21944beb53e7aed193e1a673cb2dbf5 3045022100f2b2a2ea5419dbe33f2ebaf96e20ae8c2d105b51a8171f4e8e402a7f7e97e644022010698f45632a01ddba609b0b1a7bb17405f135baa026ebbb0af93f9b01da95b6 3044022017a97b01bfee8c260e79467b2c018b678692154d36aa6ac0ab5353305832c76b02201b699586f081dad5770baf04e623b37a27bb2b8c4b238eae29ed849afa08d6cd 30460221008daf53eb8d4d199255c2ab8eb8589f401a8e806069d95a8c1e367f6f5293cb16022100820504c9279ed5a9ab0727b28218f2784bbebdde61143c013450a7fdc71fbc63 3044022028891a8b2bd2204962c473165544f22f61c34946aa3cc73399c3aad82e7aee5002202309c87d2089665fdfed901d86947c4409c1cb7fd7ceba19b1d288dd3b00a23e 3045022100f97833fa4fb2f3cf8e71d1ee7e18bd0eb5891dc33f8cf806eb3c0168919032ef022024a2e27373f3ec6dd54d85ab4534d044cb6f86f7367a469b7e5771b7f6ac2796 3045022100df31287a1e03e6881a123ae1ccad924c7c544474ac7c53b8febdea99486d2c2802202d4498c07175f078eade493e260bc68919911b35e0442401b94ba0c0da93a82f 3046022100f6b45dc68edbc01f86f7becab3507a29735ed53ef1651f3d94f3c33939af9a6c022100ca0d5d0cdcb1d6cc711796f7637d6dc483e3def17cc61eb60ec6b0209a5dddc8 30440220055b17698cacdc1e5a5e024c1acd0db2f660997ea0ee20fa17e7eb7dde52c75702207a23305d29ae3a31ccb98180144b2a8646795314b4f1e08859a6b212a764b80b 304502210081e0f497283af604d0404c112e6c57f37f1dcaa0b42941756511625fa2922260022052ae1910c5af808e431e902a6514fa36a5fe54722e3b4c5eabdde06638127dfe 304602210089ca4e6fa852c3c03030265f34b7cd743600bc21e309995b5b81cdf4be5c7be9022100bd9205cdf0a3e2b8ae79998f0a0504b6abbc04f2207e1ee4d34250cc9d03d35a 304502206203bb7aa1d052ba5ea7a51908c8abcc627d51d96fd6cbd81e03171ee973f12c022100aeaf7093b9697e8070f73b9198672040b44ddddadb3b092c5c4d180e22a4e3bb 3045022007c90631918f04072b84a1333ba981f58c1a04d16a742a9dc154c6a0eee33c2f02210080f30fb65c799d59668d7da1e323ff1a24d2dc428e091643e64b7061d5d3a991 3046022100f12d21513105365a9f7b99dfd85b074218c58d8f14f92561c8d337938023b9660221008980bd8f2f7684297da35c95770c72bd1c1823338fa7ddd74079759904164be1 3045022100a758e658617e0b06cdcee69b8f6c121d36a499cf5eade1a698b9b0244c5da0c402205cb791b8997095249e46a0614c700081cb8bade1da13fdd2b2526e2e38831ec7 30450220705f2dac4d9cf978931f0dcddd3a82f34cc6dc717c6adbd6fb0040e37248714c022100b79864a09e5ddaa74ffe7e1ae67a7d1f71a31a3f22fd720e6ba8fb6088ef1df3 3045022100a1ed580daafc4645133b4a66deee091f014f4ae58b853b0d0a837530b591dc340220375235288873712138ec12fa1441039d08d23fe6b2b98d57813456fa2d451aac 304402200471654df55ba8bb0225f9c7b62d8d9c974573c32ba5f1edd76580fbc8ecea850220590cc2909c66927cdb55b7de2aeb29deb16561b4cd7530babccefcb59565dc4c 3046022100da2c03954fcc213bd96b892d0a44198e87974508790b3777260e881a17ee47eb022100e78dfb71716b6c9f4822c8871fb678293b000f34b7c8e0839f00c856002affd6 304502204fd8dbabf0f2f4cf54215663b120810a7a1f8daf76166d9a7ad9874867633125022100aa7be36adfb8b1d1c09f86504b499f3ebd9896e077cd55c6cce1443f967e0d92 304602210083f3cc45dff17a97b29a2128c3a9beeaeb937bae905a89ede45df8fab321a2dd02210087e06f112a584e381f652ef199d9b54c80036d88b864aa52e6490242084f7550 3045022021423330daf43d633661cc2a6d8d1410102a4bcf3d3dc202e807532598763b21022100da06bc3bda263f1d144af1683a96be2b953449f49228828cf684006f5e6138f4 3045022100857c95858e0ef7c79bb96612706c24d4ba7e939b6085c384d43783e6fe92494c0220265451359b8390645f17fb43bb13e09a6ce224b0df3e0c7a9deb81c3ba8df090 304402206850f735be266ae70c4c09eb4eade5754c59ee4cf78f6379cb9702249c26b66e022052ec8fc8bd465b7e8cbab6d0cd6ae78d12ca4104953993f3ae1b57b1e0428092 304502203cce5fc1575adf3965f00b33398f86fbf963a63fc35c6bda66948bc408819579022100a7d06e192e70e25426a370fad7b20a1396b95f5ae3c4f12a494f0a887370b25e 304402203dcba8822630bd83044f70a1d40b2e293639b91f97d153bf8a4cb97c3e5bd4a702203a775200a3cd56f6d04208bf768d30b73144e5baeae4c843c462f8f2b7b37ff8 30460221009491d93c044ddf70eab044a4d9c7fed40a8fb4d3744f116b6aec0fb58dc1b4c10221008b05aead88d44324da1f095f072b821c0de54c7b84060ef1617e7ec3911852c6 304502200207b02f7909a1fd261bf65f428d7da7d676eb171d4327feb57cfabfc9d02aa302210095044d91d1228fadf93c2f4a209b92d321dae2a5b244b53aecdddfdd4d95bb1c 3046022100a462634f496d7bfde657a61ac1d839228c1f0f575fdeab21ac5187f1de1bdcbf02210094af4c2121349b8a634e608770a59b69ae3da7105fbd3ea1a92f61b1efe15f82 3045022100d95a72435e4bca0019048e075261f256f42c3f4681b80b0de3316d364e3f512c02200724625791f50f0aba24a54ddb88c8610a96343641fcea7a53c58a49bf40204f 30440220418200a461c303c5a27c0eed6cf8c18afc5fdd7f28bdab2897f8df8d5ed2dfaa0220444e67da107f1110b0bf50ebaebaae6989a1d2d093452ffda03a6d763dfb471f 304502203093d635c90c0e6258cac54a7815580917bd4fa75086a0e0c9be9f5d6c4794e60221009ccd6e9d2fe743d23ad5b5603f620a6d862bc3f76b41fcfe71819f1dd1a19ff5 3046022100cbcb5518c13bf7ad383481cbfd4b345459f9e7fe87d7d6e0adda1cb0f180b89f0221008fdb3e1b2e4e2bfb8f90af57f1068c10744c942d806b71f1c289678d1d686616 3046022100993a7e9a866574914bd58fccd5074aa783131e7ff458031b9374a6aff306cadf022100d55cb49c26483a419a6ca1673a3e825f4ba69bef5297530fbf3d18cc4062dc39 3045022100dadd892a5c261aa97ef333897de97b129b5333801a31a6ee0f2f9a829c2db70b0220447923fe6e17426a4e3944767347994ca85be0f8e80a188e063012f17088cdb4 3046022100c8de41421d5966ed8f0c693c2634ae651112c6c999c5e04a8af9a8d786f080ff022100b868fc26ef12ff5433e4ffe1d6c4b4c291644aaa0d17fdb7925034a550c26524 304502210096880081520070831a9b02ff35c41569c51361f7d88834356f28ab3bfa94b682022021eb9f8ae33c80ac82addc89179ede62e971b59818361da60635747ab852ed7a 30440220733be10712227e21758dff18eb8b41e92588095e412edc32a946c4e9a0759848022043a55cadc5a02f67a3dc1374a32bd0805354e3b87fa068c8b9cf99e874fd519b 30450220792d56508e9defc6966c21911c269c330a59ce2f48cb7c4a53a5fb37d44f4628022100df95899409ecc49b6f851f5b3280fcf819c6998f4f1faf5a17e565bbe9b65620 3045022016975f6347da22be8a9d4809c1bb715696963c7af38cf0761d989b7243390cc8022100a45e08d28e74162de9c9bf8515ed6b41e80a8d1cf4c301baa5a3ed853006cf8a 3046022100fdcd927723017397ba4f5b225e61228e0715bcf427f348a9a3cb108f76ef03690221008c9df56ba4813fc0a4c7cf322ea126226586c3d812bffe12c653c42c329deaba 3045022100af6bd9bd37b445d30137c97c88c5383589c5923cee21e7efaee93aa7c6b1f56702204f1b9bda09a444c8406d67fa37e4ab6f2fea7d07af4052dd31a7d313f2a7e76b 30460221009d5373f390c3154c1ab741d62f7f29f6247e1642fe54b7f1a801def8c34241ae02210099dd825e954b6ba2b85371d22c85e252e93d02052cc251bb548dcdf5337a6182 3045022100dd15b477b5b56b8299b416b5d9da1fcd58b5386d33a72c8afdb14f61e97116270220668a7f4be8115d0212a95fa843147c1ad0acdc6bf71cdb7fb0ca52993cd2c115 3045022058c9b4d231be5d216c02841f9b43134654d06a6a60a21ad8ee3be3ec3b19566a022100b1fe40fe6d81c25c9c0771c232daf64df0fc9275a308b561722ef9cc83252958 30440220737a0140f9cc4202a94a989e8e5cd88747d1777bdf40040bef21c6e5bb04ae0c022060ffdec044526ac6464b26852bdcbcfe01edb07ec5074d4813f07962dbe8f708 3046022100de09b6aa7cbaab34af088514d7dcafeda00db6a566f48e706f02046e65a14d2d0221009b539cd3ef203a80d04f89d78565831f83d2a92cc4e480aaee26cbb6b1877d48 3046022100bd79fc27800d765dc5c35dded3bbd4263c64cac41b9316d21b37afac30e749bb022100a9248efdab5d6bc50bd00f8b89dd01b90c37ee161346a051110a6dc475c5a077 3046022100be614b50645aa3b6e8156f5dc64a27e5cacc8892ff053f3dbc1dd213e075589b022100e931ff8eba5e2ed80f544e8fcba993932a6f651b5c5ff480f8afeab572a225c5 3044022020cbeba9e96437849df27c2167567f68ef63be162f999776384e014f1dbf8e91022072fd708992e9b8d88ab407b8e75643232c49e3cfac6381d99956cb439a346359 304502204630a36037e14ad7f88373649f56412730548abee62adf76e1f16bb9efba75db022100bac2d8f125bd519df3f5b4d48c55b58a25f39659163ab12de5f7eb5b180b34b8 3046022100b5846c18f1cd82299f2941c741ed7d2401aa0465415a01cc6a07cefef33601ae022100fc8aa94398d3bb98d219eb4cd079cdbf956a571d35cace72ed7953f0524e8058 3045022100f945ef7a5e862dc53a2aeafa6c2d28f495c525e1c3ccad4c4bf707c7858793d00220686acae17f843455103283486a24e576460aaa5879e3f06d65b335482d5047b0 304502205051160cf074b22f88cffc7eb4f4f3dc0dd3db74726e4a0f09a81c6f14244ba4022100a56a9190dc020b5f487d57cd31f400c6d9f33fc794acda8a0a93959cfec71287 304502210082d4e4e17304d84f694f8b3ebd0aa61d9a1a190d13d2932398be86c06b9553ee02201d938e19735eb1d09544311ee07742f98e64b96d7a4624e502c12a15b22b5720 3045022100a8df5d7c0580a6c476c436f379e613a912e419f4c4e83f91885978e5b796824802206676cd1b2b07fe62a45a32561500f9a76ff8faf3df3e2d894d0539dfe884b7d7 30450220460baf3c99cffa4f70c9ca2ddf4e3ecfdf7875e948f79266dff308611b19f2b0022100f39279b33b8bc17595ff10c810bbad1eab154c91646dd3637ad79b281b01bee4 3045022100860d5aa051cb1ee8299c22568f883fef39a046f758982131ad001616c8341f9402202397db5c3f37cd44217a72fd820be663ed8cc54aa2a553a7af3704a65e229966 3045022050cec4ebefa29a7d26072eaa7289f33d81a6fb0c0e8364e0729b25a8283a6f7e022100efe77977219582c2a902011103a3839fd1718e60c6decf26822aa5b21dad85d0 304502204fb58e9f70a080b8112f8e96023bf2d00dd65c93028c4359bf83ab37c4020a5c022100fffdf0aaed3a74f43fa3379712b29b4e0254b4040aa3ac99e8664c3dc11e915b 304402202f7b987f60addfeed77c6e6e6f5d9343bf3ccbfd881f4415e6fd140d4e68c6f302207f0ca6d751205a4ecb152af35853e8bd371e8dcad85e3e15b690dad49ba3785c 3045022100ecf534ccf6bcacb219b0c6ef8ebcee0828e3db8952ffb33666307c569f05fac902202dd411c78cf0b4a5e163e03fb36b29b4fbe924466b1f75c2ac55d8edbdca3d0e 304502210098fe9cf2f09cac5db7c6f68fe22f5f2e4ebeeaa8751d256254eec09efb40aa1b0220505460f1ebdc249a23a6283a61fb6ae865d19e08618fd76e0fd6ae854f3e0440 3046022100ddc8b53a03db8ba74ecc5706683cc6a021c8725b418e6b826bbe67133e291780022100b528538bef0cc964a4248d27fb6b1a51f1f1d2c9e405726551308337dd5d8418 3046022100f262981d913c56e0030c757c6756235736f7029ff8337982a643d9b3c83e8d5b022100e410262df1b8f6dfd651c6a7f04ac01e36e22e511c8f2f1c5bd319c6633d0d17 304502202c6f0d67a327099ab60009fd37ac4bcf747e29df2b986b74e0c4491769965013022100e86e428ad7420ddc97201d6b6fcc2735b18f341d678bd429bafefb192fe837dd 304502206a0b675e8524b6cd026fef05ac2a0481e14033a8919d2b6d7e366906fff4dace0221009979109a86f799ded684ac050c27cd1bf3e734ad33a0cd817f76c20d8aa871df 3045022033b11a806220d18368d45787816abffbe3fa6cff5d95131748b1876e7c2d1d3d022100f7aee8bc0acbcb69c554497df9ff6a63e9e05468d90452ded1ff86ce2cef170b 304502201647709e79aa4e609f5d45d0cf6a2f962f049f9777da008b20f03efdb251eb8a0221008f6656126f93cb3189f50f05cdefac6073f7530db4cddd1d14c3cb27272de9ed 3045022100ada3e2a77db7f86bf649a7d4f0d9fe4a5178b13d530719a5d9244588a3ca7b3102205815eb0468b50a8927c7124ccd9b0b69fc0321c3d32f1cdd57a11aae7709095f 304502206a5cf21778fab39fd9c9aad08d7b0c2c12ef17a810be61f6bb2f5e23b4fe915c022100dc6f5e78e69a26262c9619e6330568b7b71d3b415017e298428234d457f922ea 3045022100c25ce7e399273c705622bc729014f2f330c048434a2f028b3a417e5dc41e521102204e054c35dcde581cedca83e119adcab3659264dd30f167fb105510e661548a52 3045022060286057b10f2e94a6644c1663f30655229e38a44210b012f3632c417afe7110022100dced9f49618b908928ec07048279364568f1377e085218689261da014934114a 3046022100e8a4f40c84fcc1efb3a6daa7db20cb8052d9cfe8dbe4092c2f8117afb884d337022100a1bb58a9b3f372b1e2679230dc5dabbee53982927c04e98bb29f426eb377c8c3 3045022100c4d1230248fa501a583d976e1711578fd7271b8b1e60aea1e029ac94d9600d1c02202fa51d128d771308a5bbfdbed5a7fc2263d62bbedf4401b03ae4dedf0ccf67ae 3045022100dc78c1c829552c60d6113c463dfb0624d63ba03d19b7e23008f60de9fd33308802206bdded449185caa926b72fc37ba337358f7057939d61feaed35b30eb08f5cd4f 304402206a5a98bce2e9e98f0caa52db81cada696b82e657a550a6580a6dc4ebb43db16302203e89f19a30d584817514019db6f6c1d8bce2a96f16dd59b6faf51bd2adb46d92 3046022100ab281da11058b53ccfa16203d863f5aa6cad9029ded82d4c542660f801edef1702210082aa49ccd02a381e3bfe1727cb10cc08a4a944c374cbf86c8ec413f54e6caf50 3045022100a2534f5738fc7ca67da03e91337e2826e556e6387096f13f64b1eb287eb23c2602203f92320f024f9710ac93649165b1854470e0ac8ebd6a73b72eb0b3d3afa47cc0 304502202c764cbed62ba6f79731b92b074a2b4c757c121040fafaeb742efd82d7692d030221009d347754caa73672ec6b97601132aca4d35365a1986809223d6fb84dccbcb926 30440220018245fe8dc566ac6a14ceb900687f049a28dd5238a7f125d7afb516343d93fc02205f063643a22ebad043f4d566b86bbaf46dfcc82d655c7ee8cae6aa1f0d06cd5d 304402207398c2f8e3d77f2293e01a407ba820e806592d9129673927757a5d0f2f9a0f670220420252abeb1214df6a774a1581ff7164f770387eba2d91a83a8b0f3fb09d8cce 3045022001ed232421d935f17b30bfd9b3853a82475911e409e104ab60d45bdd6005d820022100a0093744a3dc70678e558533d4480501d8d96a6f5f3c3f01a2d48fd644bb067e 30440220445a4a9d7093ccb2293e820e86878200b1f7c0b88bc70ccaed9b431aa6a362a70220016822041252314d625d1b5e34820e9a077b03648af288e1ecd928a2077a3dd9 3044022007efe5fb310d6850cfc65e7952b4b4080479a1378f56dcd28ca14356700340d20220090f2c64fa9fb3c68bcf598a00d1be1168912a33cda1fd749dca18c2961af4bd 3044022032ecaaef7fe459e5402ca4301f2276f5a1133ff45457147235c69f236bb8852e022053434be2e4650642505661b6f0f4a8f4e66af229f8ba1b12f57f02d56cec077c 30450220663c98a106153021112b1310283570660dd02edeb09aa98f1bec36cfddc7d0ac02210083c6813db5471734c3b2fec44c2f9a44a17726e505e7b58f8b15228ade4f65f2 3044022007c4719b768dcaa6990b4c845aa254d65384e7b1350f7a00ed57374075f644d50220709687e2459abbd9aba220d140c681ac240817301dd0c536c66d8c0e2122312d 3046022100d5a940b99cb6757b54d13497c373d58715f0e170cc20de9362427020418c02f3022100f6572882674abf954d6ca38e9d3a8fa0a4a4cd2453b858004f8067b0a281dbbb 3045022100af731eb8dd864fa5b7c46660bc5675764acc25c8e86a1a8b6fba5b05198ddbae02202bf92bb9be582a429b7dc89c1e12157534892b47e43930b4d466fd039f68f7b8 3045022100bee9ba7919c66f34cceff9a58f584a179908a05c3fbdf7d01e1c61188759725d02207c97717f7b392218351bcb89a1d862c21bbb867ac2e1a56c5dc6f23f2e05dcd9 304502210091fccdd880db2a7a810151fe54bf44ec7dc026c865e5a77a98373e9b7f0bac1d02205960c26c0f7dab729215ef59d82887ca2e7e1d6c7ae52d5af2ff97bc103c2709 304502204c817c65185d026763612ff119127d5fd556725a9957914be16e7160ae48b684022100be1f00209d044680ac0729686c7ab5a184d221600f1437c3902578ae898b48f8 30440220659751d8ffd6fd11dfe34c23ff363028bed72d680a54d51945f24eee6433aea702207b4543cffd54af2a80c9a485c4eac64841f6945a5d252e5f5ae936f147fd7131 3046022100a0f3bddbddefe0c573e8146d7d8525683cfb22a20f6fc337cd68a2c498f356ea0221009ae39bcc56cc07b99b44f83b5eb9f22296c0e395d1c66af895b5eb5db67c69a8 3045022016ff05d97e90e8b4235473d04f0ebe7a2521273b95872f069502474f3fa2aa4b0221009b25bc276f4aedfde57c1aa91b21716a7a790f2f3acd645bb65bc5017470926d 304402206aaa9e0b354e8d815ff8cd6f1155d4f34aa4eaa7c4f0d6d5dd557a6ae626b78602203717d554b9f3a3596c6ba6bf49b240a1cdacfdb14a217265532ccf3e52ce3934 304402204bdeec685d5b728e0bf25b7ced1844af7b6b3cadc4025cedceb8253591d3eb440220797d0fc502fecc36035d7790edeb2cf8a2d1c4a39b6a499f188a13e01acb5740 304402204e78c55a1d97e0c9bee8892bab96a1ad93c0c61e40e51527d44c009363d5c52502207eaa7ff47365d23cee2c3db6b851e683c05bd10af89ec596cb057f6e8e94dd56 3044022057b283111eb6df7a58a2ab4451f6844959dbb5140ec6c5ed69a65925210103c802204f28aabc7f4d4661b3329ef965c28689a6d2377826fc7aa3f5fc92da92120323 304402202cfc1b6a1b5864922eb372866c34060e94c0236aacc44e2778c07e77b4ab778502203f36335eb593aca42bf1b6d2553080eede4af92f015c7ee0eb33df8bd6a9060c 30450221009c6e715c5e45e90c6f8c6c8c66dd902afe5c6e7f0a57fa75aca4be572a78a848022004dd2abcab6765179e55b067f043dbf13b50d0aae38b8f71d1b980bc073b2d90 3045022100d9b29e8436c62d089e67e9277cce9a309e68d2c0886d54888080714dea7610810220390d8d9213830089b7f4a7feae2e364952894e03bc8aa5a6f547d56128261387 3045022100d5758e58e8a0364597e8c00cf81bbda493a697aa6841d8d74f486ce4d1b3672a0220227d037f4d86a51e929137a982ea88ece2489a6781d27fe29033e1460966f085 3045022100f0be3f6dfd7beb7d49573c20a2041ff7c833c1e4ffbb95d821ad8f44bb64919c022058223de99bb6d5131cdc682d3f1f2519bd111b81e1899c66e048e6db886e44e7 304402205cfb1d8a968b642b4ba76ae09f850c9464bbd5aee6bf30a849f280427ce7e37502203aac0b0822a4688b273fa0aea4c08df12759f7070871c1adce4a254352ee4747 3045022100fcb7f5fff957a6f8dd581d188f60ed4d65beeb786315795f5c3bc3cf37b88aca0220028db2152dda124ed75187e6cb98711abdff17d2f8ed8ee97c24602b50f35009 3046022100ca96971de4ed6552d93ebfc5fda3485925c4bb2ee723656d2072f91356274eee02210086511865892ad180bc278e09254c83d3ba1995ed79ba51441cbda8f68165ca7e 3045022100bcc8e204ee9c667991dd757b35aa314397e84d22806db3f2ef1acedafc367305022043369c39c840dbacc7627b4647ea56f69aa07701669b4ea61d5f6ba05da422e8 3045022100995a3a555ee13b35c9c60e73ca60f92427ba886218c516a21baace87432717f7022048bc9957de7fbbc5fab2e0a7ac4de740a1aac6ba9e8a59e32ddef98cbc01f069 3046022100b5286fcad5de163929ccba407b20ef27fa6132853a31609cf1dfbc6c1f5598db022100852feaf56d005566f18e26cbf13373b8b1fa69b7df3873c2747d8692dd1a7ddb 3046022100fe19b4fe7aa9a0c4be7513086f065613164bcfc831713082fcf3dc39803e44c3022100860024ddd2e736fd3ab3b79c611143ea05fc13aaa7878928c40aaa70cfd05df4 304502206a64e2e42bbb6541a041f5ab365b2535ef1ce8db46d0129fee7902f744149cbd022100bfae3c84ac2bfa02060d82aa30f17a3df73955ae94e75ad6ff3715617d3c2b9b 304402204eea1dc822cc1b66a1f44a735428dd90eef6e6c2af9a94fd3f512916b8791c12022057bd93c27aaeb2748d0abc083f7df7779ffa24ab95873f90493813bc9acf1b2e 304602210091ff3f9f003421abb56213d6ee8c039ee782d61bafc178500477a7ef6faefc15022100ae525122260373d1ce917dec3714352325af61e8081126d3efbeb1cebd85e60e 3046022100819dce52a25feac795de99f78daf41d98cfc779faa6657e44b436cf873a0d7a00221009d997834422c8acfc00815d2b2d47225a5ac52322e10716309d6d9d2940daea1 304402206beb9b8428b369179e2d41e32e5bbc5dfb1b35481940fe7433bed7e9f640095202204c4faeaacc8f29381c7f12ad4e357a2eb50a1edaa6824a36dcae34dbc27956b5 304402201b01c2a9418f7f31c77cc0e2fd3e87701aa6cd48746d7704cddaa676ec85c30402204785e31ef3c7febdc3461bf02cd2fc294afdad4a8d691787d447fdbb722f0eb6 3046022100f68d2bc2dd3a365c9cf2eb08e8bec713f105c7f50da0cae576606e1a4c3e47c6022100e9e2560cb4176a30afb7ecd752c8465aeb132c0901f0cfd07893035e91b897b9 3045022100927b8ad9ab29ad8afde391b18ff897d743bceb0a58e66c04e00659d1860e095002207f5b792e0bef776cc0614412460fc7aae905f14458f77108c1e6dc5344eec370 3044022061a18875c427e7f632bd7dbc8683b21207d6505729ac398fe20959c9b729a3920220585d85690989b96d1eea29a5e471a2a406084fd802bf28c8ecb2590db2caa95e 304402203f3c44639370607ba1764f5bed3465060faaaba5bd322a77bc43c876b9f374ac02205ff27d6ec102c6fc20fa539d5b370e35f5cf16c80d351c0df6249b0c2277cde2 3046022100c56cb6c8b3c7f4b8716a1e6572550e3e53b0915845baf602e80bc05f84a64678022100aed4680f4c024b46e46061dc886068db77671e7f4eab8132fdac265f3cb99792 3044022006aa090154cb65c63c5c186530923eace555255f2141ee80cdc580447cb978a20220143b76298667d9b8116aceab62dc7c0c0e080a00fc4c49efe170e9e62262e872 304502207e18ddc98a7f19e2e67ad39dc2a6e5626f13b8639e1b93b9956181b8841cf35b022100a4a8476e2786d91233bb35c173a296d08c14f5ba21ed6ba48dca69095cc42f7d 304402200803dd3f791970231d5da3859bd5046a3a0eb8426273fe9aec16ea6ac4d3dce502201b1c45d3ee1d4d77f599552c18571838f9326d701a64c6b531f1fadd7ba246e8 3046022100b3dcfa2d0c5b9ca07ff9a7cded3c02d4b0b0a6af3ffe80a832403cffde4d0eab022100b5a07daa01129dab417776d69273f209dac4fb71c27a6b0fa1060ee2200bce2f 304402201532531a0a6d99365fccbdae5c96da51edf8d7506156d9429b948d3971a19ad002202f622e8e934903fb1bd855adbeaf37dcd477c12a8cfa4a47b8a9b3486b249b18 304502204c8ecec07048d34be1f3944c2e678d8afd965796bc7b10de75b79e3aa21469da022100e16d98b29586419993f9993f43ef8fb9ecec0b6ddcd0e8b60bbe51734a4f2c9e 3045022100ce1b6ef92b7342bbbf8e5226764e2928ae05ce3463a6c7a06979196c5bf6a28202207b1349b16757ddb0e22117618a4cbaa18e3ee3dffc0be79d9572ad684e10530e 3045022100c3dca65bd65e79c2ed29dc53f4ef03a0b27d2b1201bdb9af4aa304643c56037b0220595642a61bb6a563b4294c7dc3138000a86928a4df1f69f93b8abecdf19491d7 3045022100f7fb8ae335965f172ae9879c1ba86e2659ffc57e13871fd351a13505df84b31f022078754729f65e1fc77d297cbfcdc85ce55b525d8dc41691a267ea05c5dd17f7ab 3045022072f39404db53be0c6a5ea520eb0365618807f57c93b4bb918ac2548b29e776040221008a627b14d7f9814825639cf09e014443a20ae6a1eb853b0de2369b0d4c21187b 304502207f9481e89cb9d3d3c90795368323b256663bff5e5b7d35ad9c33fd99b2cb361b022100a500acc2b8b3c05ecf12a180ce539620f669c5074d604093d5d2fd1303cce05a 3046022100af366225f9d64d7d8c0c3656329b7a60f3d6483687e1bcc512e45163d7e6518a0221009a7962d691e209389270182bae70ff096db366e498c4fca545b3446812f39bea 3046022100a071f03eebb110654f78df3e2f88b4392829faa7978003306f4dc1308d54ff700221008bcc260ef3e7205168b27b869b5718d8ef43e62a0276d6392dde397aee8b9328 3045022100ae5a826c3f8e61bae604b98bc3544637420349424e245958402fa34a73b9d1df0220215d3f10f649f7098492eddb63ad9fe4f01867d1e49ec9b748573b32c10599b2 3045022100f10fd11bb9e0ebf0286a1c9650582cfffde320f101d122dabf014ce52ce9a363022075eb68255af9b4f91f14348362cb7a61a4139cc86e4f6963411ba748ff9598be 3045022100d11bc47f864205089bbb57c7ae0a2ba2b7de9001eba849fd4bf1b936a13a1c3c02200d55859a6318cc3ef0bce39bd86413f8f444b2a84bba23108b60275a0a1dcd73 3045022100e6fb20021195c9a13702cdfece7ff6892c57046ac1acca8f3ca020391f0eefa8022072518fdb47800ac17a9bfbb8df1179f45959e40a5056234ce1cf19b9d3beff55 3046022100b4b0225ce31c29e98a8f17a23175fe71bcacdcfc9cb02866ee3eb4a4f7457d16022100a4f7e50457908be5d6782695390f3aac2be18ca689aec7e58be7f9c7b045736a 304402202e4865d2773f0ffddef4c47344ab512800c33e15d3a192249a0e9d6e764b59c2022003974d8be08f985bcefa4a348913c4ff72241eb9ac0689c8cb62e8ef24267e90 3045022100cf002c1fb26543f2b9cb625962f6b79528944b9a5288f975920b85160016266e022077cf0ad708c80e286ba882c5cf0cfb14c4370e7f142c0349ce8a4905a7c11136 3044022010f8035176530108690e4c1ee06276a5732621dcbcc3e04b11fd675c3b82bf45022008137340f92c96544e4ea11933f697029c918d1083436ed0a131e195f76fbad3 30450220563039939368cdc0d490bb799c542e5a3b074465cbae16cdc9b6d12051dde1be022100e2dee723d31e36a6a822b1abc4f62c4ce9fc3cf788bdc918b1ebf431f5ee6ed0 3045022100f404eda155c487dcba0ebcf7c2c4b312cb0483bebcf3496bb3ff8f25e07f47da02206cc91810d45b10bbf6e1ea83bc0d564966e8b1beef0bdcd572edb44daffa0338 3045022100df65a6bc89179d764f188626d8d05f7fb3f6accef329270b297406902c2846d6022060fc7dbc16a45f0afe190f55d23cce66b74a631e3640b2673e25ecebe7d7ec09 3046022100bf6b5ab0b44d79537ee920a84bd5e8aa2ed79fa21ad2aa992666cc093e2e07d1022100df9ea5b3c3da8cc0b43739ebe9a9cb4d221bf3e4f82a8b0f1304b41e96fcb9fa 304502200fc2c3c1747519c7c2d41fa966a6a64e7740c64bed4f5f2af8c5d024175d37ea0221008aae3086af9d9cf4ae6a435764d1eaa9cca8d3d293500f993ebbe3a50299aa0a 304402203457f3ff2b1fe8457fa911e1bc583ac071b31637d91136e3506dd3b3a426148d022052fdbe652d1ffed1bb2c1645fcf02fa7e10a3a87798315ef001f592074efd8ff 304602210086318df25d1a8e77b3ef8c7e61c7e2c860cfde849b7f19bda3424255766a20eb022100be0471e38e89e3db39dfd6d8305b5637e08e33a4b84320a9fb4a92839c396e02 304402202ea9f3ce4ec1bf9a8936c8e7feca2a4fd9bea46392aff6179f74af7f82f57720022016289aadbff5c3d118e0cd00c853903476b38f02d3b7392575bb98c113237bf9 304402207b72295421e75c0fb24ef201b8152f26a9d3b59d34fb53e8dbc1d02e672386c102203437799539eb9bffe1b3b9c74188cbc5a4a47ab216f31eae6be4cdab862d920a 3046022100956640f2b0c581b505b07e1c30c90155d7c8b866288a883e033c3383891049b2022100d668ef030beae0d9d077ae33517f1c5440de4fe6025e193e588f46faf91de562 304402203bcd7e087aa51a632db19b924f42338d8e7d31e4f3661f51c3a2054c922b767a0220348ff1382ea02f8ee29948fcf1a76eeef3499c7e78709b6890eeaadf9800de9b 30440220055b655825e3f65325b30ec1c098e1130188c0ca261e2b5b571ab534646d5226022024afe0049e05c04ef5c523b0cbbe29794c80aba74344946e89ec2d67e779751f 3046022100b0df3dca2b2fa0a7fab0d84d0fd4789b1088fe46f0b717f211ee87fc2639a137022100f6658161e7726043970d60dbd7239f414668d96827dd59596e1e584c9b12fbfa 3044022029211b1a12dc001d1e73f2ec18f0f4b074bf63684265edbe48fb4f003cec789f022057b0212ed71d2b53901d087d0360253fb25dd1d41ff8744459f130ff2df06d5d 3046022100e9041041e3a8808d7cf871313d41458fa45ebe65172b3c1dcf0a7b3608ca41950221009d1bb2a3bea5e36eabb62b1ba6df48e614e9072ed44cde3c2bbf7a3596625107 3046022100b78d33ae0ee34b766bd1ba6b0def017c3ba749339acfde6fb296e211a61f2502022100986ee9c54f438ef5bd26bae369e999bf233b4f13c369b4551290397890bbe368 3046022100c96137e02f49529a479a7ad13b4495d328d07e9c627af0081403b47612d09117022100a7409ce0722592b0a5a7ef6a739cd41f5fcd1bd25b967c95f8146b3a96ab6c09 3046022100d9f819fbf3bd466e3209fef5bb8f62b85446ce2587498be25dcd3b6eaad35e51022100b26ec7c99fd3de91a09fc0620109b5cc0033b5a4b49851c949002d53dfd39f28 3044022065d9e292f997d4fbaab64b26d29c1b8cf26e74c3a14e4f592a0fbe68724fdca7022054fc62c48056e577805897a34f0e9a8694ab66a1de85f2560631bbc35fd7ec27 30450221008be2b20a953268ba07e58aea343711490a4147725faf460770498a106fdd687f022041cadf22da610aa3d49e19fef35e35e86d4455162939a2814aaa7c6ad96a020b 30450221008b321a8d21c827c4678cb67b1e00931fe82932fe74b8cf46cb96e1f84e8f438d0220038dece24ce278a6fe6092647b47b6114f6f42fd2af23b9387c9e878481d37e1 3046022100e19331cfac2a34ec1ab4c1833ed88e0644cc4c0fa97c864d5951c302e3012866022100abf8d778acda47be5f9692b5805bdc276f2bd517eb1a355b5f0584e48b3dec3f 3044022001f3d1a29e82c1a37652c5d77e6f7fa929802d542f79b898ec42dfd9c61f1f97022021f626f3126d966e211a96be7649c3c1435c9f825b3b05b38bd30328c746051a 304502203090389262c186d665193f28f0a35f2ed95956300f596df8107b5606a9f4035e022100a1797f1bfe70b31d2f08bc5c33756f04142506de162f58b309ff5caf751fc530 3046022100aed95e128f51cfa7ccb634e8a05e6246fcb9ad881c21dd2ac380e96c37600ce4022100be369eb233594db42dcd3d311f7e7c986a711384439a86cd1b9fe1d9144e35a5 3044022049ca124fa62f4437d035ed3fd0e29578a65468a59311fb75d7c03e61b1fa0ad102204bf4e4c06ec7a0dafce4ba3420443b9b192d7b3fdd43c8730481530c5435056a 3044022076f5c1a1ab84281e64faaa2b034e364e46d7f7377f61cc860cebedbe35113788022045aa605289d1008a422c67855239ed28ebe2ef7e88895efc132f44243de1be56 30460221008b821f456f0dba41b0f20b203a343b84cffaf549ba7c3d5263c19f2680c91624022100ef911450a741e81fdcc0d914abd90f504af3440000220a392726c45716c6b76e 3045022047c2cd34fce85a926b4d3084bb968ce101435788095e5af17e3341eacd4a9400022100cbff0500e96e64d0b053a3bbb5abf5fb8d99191bc47a7bd43e7b0ee64cfb8156 3045022100dd08aa5befb92ea0980b3cf49591cebccdbea8b21f1aff445b6400d6ed4684b202202a4a4b182afc9acefc8167afed8e0dddf211a1cb69a7464427cee97244e7b9fa 304502210087e8fe2fa7ed03e1a01d49e0a57c7a465e07285f96c4eb35dc09128220a7207d0220098868f078119c1af2d29fbd4ace9521cf3d05e00e89cd481e54a0be6d854998 3046022100ffdccefdd819086008753f942974a50517ee0c7c44c9eb2f64082006d799901e022100ae2b528ed9e6cde114ac37fc3cc047262de51a6fbad4223d387168e28257a37d 3045022100d0215923e8af61b9503fbc797668c50795c97823fc8484d562873c25744d898f02205abda4d213dae3e7387f0931dc851e5e307dbd204b8858e3e832367815368b85 3044022070245a67292bbb2266b3f4ccb69f66353ddd0c0b50f9dab0aeef5be6c68b022902206bc96638901f826c37bd171e13ea74d9b35b2ac1096f3d399e8707b146befd85 3044022004d4d2bf3a375d018154645079833cecde11744ba4735c626f62016144e8a24d0220496542bb230b61191d05e82ab361f79b91d17abf6124752978ba08b8ed502f6e 3046022100f4347ab297d6be79de6829da8340a5e7300b483c5fd6c7f8907e43ab41a7c6fe022100f0bba8277c89918e76dec4ffbbb8d5750fe3ee934b51b3302280029ec6524a84 3046022100fae19085a0c047e5b62a6028eb889a23bf40efe6038a92b69b5d41914ba2b45e022100fda394209d1470f5b1dd05561f1f9d1e40f1d630fca481f407751ebaf1296fe8 3044022000a216c681fbe8e475a52d76b12171b67c2c6b3c809ed66861e15b593664446c022026d6a3b346b89f8165e4d8ab5ef80abcd1948b2bc85bc268752ce93286382c98 304402201d9a42289393cd864129917eb07a9b20dd215dcc5ba7263a0e88b226e2af6149022035688406351f422d17d0027346358b542d64bd0a825c60ba6b353b966140116f 3046022100fc34f067b5d8007861676f75a8b4d6741ebb6fd06b0b49d82dcb2b462e2ed9c40221009853f484c82c96623df5e5e148a7c96edc7209c7606854d977f61290c25a6348 304402203833df958751c68a5193f2c139de6ec5b05bf8e703a05656ec25ddffc9a364930220103d05f47dbbb8e01346b7eeb4807559d1475af914b4f6199210a96256bf12e4 30450221008b054af534287fd996ef0d1f0d767b07f49b49c7fc76dec90e4a56b4f78405810220086498cf4923f94cfbe13c551e489a18b12892b55c6ab570690ad5febea5d85f 3045022100fc64ebc1cf5ccb268352bb8a1a77fe8a2db510cf085166304220bee8fe8e524602203de2fe8352f8d36524ea40c3a17c8f435a3c32198555a346f7976182345f7440 30450220213ecf3eca9b39e1bda0922a620786e34b7789d860ee050d4ee9954697cbc7ac022100fd4bf04822c4bc482138e0f4d6565faa3ac2a946749efbca4c80024f512cf5a1 3044022067a7f2e44ec133c8a6f285ffaa831db17f63e99849b3619f2c8c79c60234afc30220505b0d32c3e5f1cc4195fc629d69473c43617af5f88492b6b39a040a6edecd00 30440220782fedfa41ae1165494f0ce362360621b156923846353f3fca38895b24059ce1022033702ae40ab9afb356b62e5465c3d9659cef1734488cd4d520e1c86afa8616d7 3046022100b7c742b837e99e6103a2dc135c5a83024de626383c2fd288b4a90c85b669f56902210084360ce184fb75c6748f36fd3b4e832db72a227815db9d962fd9ecc7ae3b9bb9 3045022100ca0653eb970109b2840d2f316e8594775ad00c62de5671aea2670d94991ac20602203c5bb05d662b60f9c5534bda3b169a37f17ad72d091682973223b40bd2d32f84 304402202810d13b70412980a4610fce12e132a87ef13836b52224c114c76eeeae73eda80220563f183b381a374ef67948fe7ca8b3d7507537e5b8af65168d2897e27ace7bf6 304402202a0395b59bdc60a8e7c61ca5e6a1c1ae9d4015f317aaac98c836b5a3285339520220096bdb2ab5b0fca0e342684e5c24e9c2b417d6ad077744a70f34f2081947c5d1 30450221009274c0482b41cac3b01f342c77ca31f64b5ed719259cc3363be8a7005f30cf7e02206e1ea83ecc956ff3b2171636055e6ef60866536efab550f80646340672a14f18 3046022100a024d02d9ce50b2a7db853574c200865262dd158d4c7ca3aa353e6e623724a29022100dcdbc343a76e022f981eee536499fccf3037c378155236f208a878e99c3911ec 3045022100f39b890b286869cd7d56c8395d917360f522c6fb1d5c6d5c95ba2086c2506f44022050cbdc3ec5fac894c88de5a9054d4268a76280d3d4b96ec85473febe0773280d 30440220409e35263a28ddea4c64f5e671ab1bee2f1fbbe0d18928d63485b4a257b7509102202597c24d08ae7767e27822b537cc0d0dca88fe107ccc2846125bd2d491a6c716 3046022100bc5c88dfcbd97ee1c6a4e7963bd04d2698346721bae88ca81fde8f31b0cc78da022100d4284c5310d33c01d95b0175d2726c878f64ccbb3ec0048b4eee233a400471b8 3044022046b413b118155ca3f7049dde41f01cf63366a22d86b373188c7ccfe783724ce602202849c8af9ce5314dec6333ae8bcc58e1b61b97d110a7eb73df63301682a0add8 3045022033e283dc20382eb28924dc5d676c140a477aa4b25ef23535c665dd4cbab56dfc022100f4ebbe93284ed1c112a70173936e5f5b940f49fd84f69abf2c08b37ace1bfcce 3046022100d17b389aac84fe3b4948a85b959d5f5da0c69b5c06505b02524d7cc3c6997ec60221009eda3e6760c96d707007703761338e6fa405b45820b050c7f7ca60f348d3612e 304602210096a9bf01b945dfc045f849fb8253450e7729ad25591818d0c5d062b88e00083f02210092968ec9ba0d12e19f61f180a555946f6b8b1759769ce79ea663f2735fb0175d 3044022046415fae8fc34345a3e576c9af47da3b8ec2e3f79c45431769ea3d1282cba33702207d59763e2bc60a93ea68b919627e74166a8b82b31b767a9c9ef6d0cfeeae1382 304502207566620c0d6ad40107f29fc11ecc043c98946a31ef06e33bfaf944350c390343022100c633964091b399f38283b80f5f3487b47763c80575c83b8d993260f728bb8607 3046022100ca23d177d62c423bcc764360907ebd111ba667b04b389d3d95773f47a848145f022100d5b5cc60a5e195da87e74689ad638fc3c6a6f0a1e61a633c2496a409496b6afd 3045022006791ab7ef5a7a34b5508c9f6cd49b1d5f0989803c9a3ccff299ead9632762e1022100d49f087a1f5744f44745183b4b92fb21b1517ae6486d7b4b60236ff1699621fb 304402200d4d4818665e42be7f8fa5ab6ac08018ff9f221097cce2aff6a3fe8736b28aad02200f5ed0fba732a18a9195c89aecab9744460e96bd54294016389a5287a11f42d5 30440220674384abbe7cd8e58dee62dd7b37f5ef5b33a5bab0813beacaac2b2a13cf628802200cc7ce388ef4f25b3383ddc69310cd4dfe3b0db36e333ad232c1c43bc6b9fc29 30450221008af000444b9bcfc9dd9c20bb7ab5e4e159e77d3d6048eaa3e5a3bd2deb9341b302201830b3e808667a46a283a9008bf1e2acc008047b4a78dd90e5f5a23a5a013946 3046022100c33c0b25a7d6929259b4f001372e9b8e8496b5e6cf29e14acd5b06c5ee2b29d2022100d05d03ea18e19e351336f61ea9992eba54bb541fee1af0a73dced7b7ab3002ff 3045022007e6cf5ca51013a94975ef3ab9e8a3fa308691ff861be183f2f91593ebc3feeb022100d74a4caff6e62dade87c6354191e388265c4498fe60022a7b47a93b666fb96e1 3045022062bb1398fcc5c254a1ada17a16b418332caf465ff4e0078b01dea966a748a5ce022100f1fb3e41f6b6d8774a3d8e6e2b7110efe80d73948ef8ba97e06e3019bec0760c 3045022100c8dc9efe330b207c57f6f4e3bac9804544a7aa96a61af1e52ec08d5f7cf75269022032df17397eccd5ed80286967b037c08aa7f75dcd77f1fded7b9f2c4209beac95 3044022001b287311ba4ee440e2bc4f234cbdbf1c017fd0c40e7148832073b81a6c5f2ee022016672d08e8dcaca3d9930cbfded4e7842f1e10c3470cad402c398a7bfd07e079 30460221009a99955c2e431a180a229b5553432bb4a16aee94927cacad85f39b0b33d3608a02210081aeeffd23afe2b822d34918f03f36f876939c1431d2745b928d9953d46fda88 304502205d9a16bee961e1651b2f793a47dd8044b7b2a138df86201d0ce86f501f4feff5022100a44a0a2e2b1401a55976b62ba52e94ce70e10ac963bce8a0d132d8f2b1d7d326 304402203027fb61d68f83cbf369111b8b657405759392c663f543f58e7b1165aa65d7f002203cca8e4af1d9f620b7c0dcdd68ca10f750ad14a41eebb78b48db50d21f0a67a1 30440220566d36704aa0d6cdc1e98b5093db20ad812fd86d11dd9e4fece5025fe4a6334602205597aaf0aca8d1e6a7eb3494d5916b32e097e798c1c2da52a9146d39ceaa5647 3045022051e4a414eb87b9523abae7ab357b2cef1cec4557df70c434a28e2a7ffe0c3df902210084ec63818f8a6f536b2dc2fbd6da7e19ec6097e5add1efb28a622044cd7fd67e 3044022064e0c2dec4ad80df83d7142d8f909bb53906c49b796b93bae3555267da7ac1e2022069b3942aebda5a9f05f61751fa7bd184e9ccc51798189c2a97716fe08c0faaac 3045022100bd87109b6806d9c5b6e5eafb83aef036c33a7ffbf86e30ece52961df8464bd18022007c8fe44f6e20496cc1856e84d5015aa75df9bf79d465a57c9a10062af8e678f 3045022100b45ff459052cb33a26bd6c2a615df150454dc070c9e9ffb5983c2fa0fd551d570220021e013cbf100a7cc682f06364f88da209f05484e24fa6985b9777af1a48b33d 30440220441724b6b10b4fc2daa417cf2a09fed95dfffe13ac6e3485242d730aedf049030220650f55c93ed605fac399828f80a5abbc93add2eb5e2b61988591749c94753cb4 3046022100ccd244747a0bc9fb289c1f40097319ded31b731a96ee626420a9fea5f3b0b6c5022100b0ef2b91f3728532e68f5aa6094c44c94eb3e8d4db9ca2b726a97dd8135683ee 3046022100dedd602ba893238e8d9d8942570b2a26df2575f3b7199c0e5a386790b00882d2022100a1cc18bf475f77d51ba4e24b8ade2bf8e66ff4c262edfeaa92939a4380d05951 304502210086c483b139a1a589b8151cba5c8bfd2548404d53d1a45076878bb7a001ca1e8e02204b36ede44673065ac8c33bd23435e5e423fd61d7dc38a1017cf71588f0cc39da 304502203eea9f654f682a5f2ecbda6bbb46cf6a766c35d02d2aafdea9ff84105894e47602210088602fa8114789fd74da3fad7bcc5b2da113f040992ae79dec8f9c491728a1a2 3046022100b3ea385b7143a7c207224ad12db07d63b75a5ad378c082964329bb2aac3725be022100c91b44864a9ac9cfaaf1411f442a0e2db5f7adf199dc09ebb212a8fb56d86d87 304502207261e347fbe23fede11ca46b1d1980c85b000f98fb7efef829495a61165cceee02210083f7299b4dc74ad752298f601028348e9037752e1188c2e2909b519ec4b832b3 3045022049e3886cf1209f0cf828d4281f97c967046d8c48996433b03a02abfc87d7822d022100a1e4e89a6ee017355a985d8bcd55f2ea06c6f55359c646a79ba7cc54c6c6c9c7 3044022006a3ed10d4bc519fed432a8e83e54514ea54cf3be4c0575786e965059edd93c202207cc267d461ab1aaf83b60cc5f4d5d829ab3ac41a78d8cd8c100201d105ac8ee3 3044022050c1f8148c72e36a98d2b1aec0685a1697556013207f65b1be171794ae9812f402202557b77f271342b2affdc91c3bc6f939b9f6ff99ae44e36efd28451315833304 3044022036fbf663257ce0cf3bf617a4e17f13bcec214c481ab3285f11cdc410a0e498bc02201db10acd929ed5dd8120c50d0433e293b02ccf7d6760ed5bf1426ac592e97f61 3045022100bbd0f4fa0080279a09e33c9a03b43ad7786bd33a746552b7e81a9a78317bcec40220013774e5f54c6c31a0d081f4738b46f1475e19288b69fee78baae6d058f78697 3045022100ee45e5896529d7a7f9aeb7bef21b2cc34aeebab08cae4143b38bfd5aac4f356d02203e320a1cb81f77da1fda431e9fabadcfeb93017f7eddc4e47ad6974a6519408a 30460221009980c17e28c28850dcf112c01961847be8e186e3eb84c713cbb09bd66d678eb3022100bfbdd0cf4a21253c79bbb8f24ba18a14d2a8871c00463e85735afa6534e2c298 30460221008acc90c37c3b6ed0ab6b401576e6863b0e86651625e3c45845956dfdc6078bce022100a9dda714fb1e456eaf058a3ad411331d8c67de532479843b7e7cb9cc513bc99e 304402204b4ecaf8382d6484418b66d840f707f27dc0d2eec1129dbb7b47831ad3c90fef02204b54ee78c33d7aee39240c0058abc2aa827d98ccbe904c6dcf70702a6b52e6d7 30450221008c67d90be3407f5034da82dc9d343366528d78077d46449362238a9b2e7055c102201dae767ee98efbbab94d3fbdcc3f076882c9122f8e5029749d429c62643e8081 304402202269c798ab6dfc353ed114e6095d0ed91ea1da49d7a3c3bbf0d2f06e2d3629bf0220158e68184fd145eec5880465d86292131ceaea9d55d3aa370230181884a22fbd 30450221009d73bdef7248fcd939e3e265ec294d1fea2b1829ed8fda4896303fa33bc5500402206216b6c0862d598a2d80e7bcf121bf3f5f0f0f5edd073813ffbbe46f4369602b 3045022034b55620971f1d1c5b92fee2c9829c7927d5290b578d9664fa3c52f36c1b7a7702210083d909d16c8d82c6422556f257d412c49d9e18ff0ebc64b88df07d8bdc6e0edf 3045022100a403398d5a8103d69158ad5b75cc9d012a842858cd6090979bb8128391b6aa6a022065e0d87414874a97629435277f041693b0d0a6e9b7cb42d17bff91cc9e89a78f 30450221009233b43136922216f2560f320b511805cfe4927cbff469376b19ee4ef6f65edd02203583f68c0305bab87bcf386e5621137e4a7b7219725ba85f265d657cc120a354 3046022100e6c0f7f4f7d6161fafec1e7a986fe9922088c6eddd270dcb74bd3001c23bb119022100d05017678629d6c2fc0b7807bcfaced444f04e319f09119d2ae3e9c91fff8367 3046022100c9cd7a28dbf8b7a7a4e3d2102cd40ae23d3ab15cd067a1af81d7bf8951b4cba1022100b996657bafd9cef94bca38317425433c1e4dfe89fa14f9f0991f46cc8413a692 304602210086d63657b3773bab67470a0c35718250c5e0fb69f285c7160596249bdcc04e2f02210080cd41541208f6fc34e9265708b92cdad921a7d8eb19c57a5b929da1030c7888 30440220282dd7b2a1f44922a6d01aab32f34b8b512117f8e101539a49b71945f8e6b3370220523adaf1e3ac9b1fd4b057c9faf79398b9cf5c29907eb4175918baa47aa7d8bc 3045022100f47e26f06cab2b6fd6f22cdbcaab18e81db2329820db32b957c464a072c395a7022019a6d73fc2acb62085bb2ab8af4e5b1375c3f856253cf17977c3702230394eeb 304502207f09db448607a3ab066fbd3a71a6c1ec4b636bc64dcf85a743914d3392bebf6d022100ed6a358beac6c1d145c0b440c48804f9e6fa485c51e390a6f152101a4f91504b 3044022066d86e7fc1a731bc0b0032a6a3154131a8d69e917968531bf739bad424e7e8f30220558ae50e4b7043ef0d4e4382133017570aa7920eebf187c87f170c4b04934a2d 3045022100d021eada2fa4c43d60a5e4eb796f83de30371493f45612e52dc686a8fc1fc81202205e663ee552572a43218c7e9f7ee6f54108bf18cec01c12dd17896dfbf872c82e 304402204a096add3ed7685054aa3718432bbe43fe33de8282d1937307d80383057b9dc802205122475af7b5269292b0a30d7fe08a113b5b0d5076064f30f0778d30384cfe5e 30450221009692c37a7c6a31f37de33fc6fad26a39386a72da6d6da36a4ce1ad53c07abe5502206c6dedccd0316277e117ab0ab7cc3ac83eefd8843e9e44c181e4e76f585b0a72 3046022100a812fff479dabc1919cfe8a7686d1e3dc922e16fb1aeb5f4ec78da33696e66930221008cc39c842df6f0d3d1c5965c1d0bac026d1fa7a070fe7a05b79fc060bc9afea1 3044022043b98ee6b0139c0b12ba163ee56c2c61fcef6c45d335ac8a86d516d01db5454f022022a4c32fc1c2df43fd22c757a1210274abad26dfd8e85f84bd1d9257ef4fe8dd 3045022010e1e4652624832d986aa56fae229623b982e7a6bd8347fa161f4a1298b776b00221009400518e5b56ac45d92545962275086cd1299d72e9f66059dd026128d4f20a8a 3046022100fcb35cec03c8506f40060c6fada367b3468da42acfdd3a0a09d23977269e70b00221009c02ec1c57cc818ec837b30ea7d3e006ca296404d59f0f82e83ff82f81108bb7 3045022062057c0e808fd22a65ea4f020fcca12270692b0d5164b8196407a66504a8442e022100e8e500c971ec8f1ec8b87225faa1acf57f6dbd380e88f9e4567f66e211a5a0ec 304402200baa8914e35364019e8b50708ef155f3d5e970859c0a7d9985a2e0e6a52753ea02205e95e897c9d100b33f2f1d2d5e4914bd5e8c4d18e802a197aae251d9113993db 3046022100ae9522e7e54859a6bc81fdaea79a3345d3b28949c55e78e81f9041943c17b500022100e10b434ac9373130d6564fc6e178ea14de16752fcce636dece3951084500f168 3044022002392ab56c3106de1ad2434b5abe2e59cedf0f4ae3d91aad194f2f2117ede13a02201457216b99d866a1858bd5f63a5da6676f52985496a1d93522f09984521bb6ad 304502204fa2ddb0296ff0b8f2123d0a1fac5e260e990adb9da305052fa0a7d7d213a1dc0221009746355b0fd301931561df4d7dcd5a7ff099326165290998c6e60f7267753d98 304402201c58b2f4e3b715527d1e9c1971366f7fca586a514629a255500ca29bf77f391402202071fb8ce39de961f2d0f1fa93443a9961f13d095a3baad7e5a97705c6b9e09d 3045022100e911098380d98e55dec995e3e30bf331a9385df0e1b465d312e437693ba3568d022078e6614df8fbb3a16c7b92875b073d1d3a89b8e854fe7ebefd55a6672b6228a6 3046022100bf56f0cc6f0c436056a003c282c2f5a6be5347f07e70bcf370b7810f59fe588e022100ce4ba17a1a83adcf73d8ab82fa2de0f11ffc55121fb58ce62b088d7a5cd8cb40 304502203298e4d09ad4a5e04166620831535e621721c86e2b455e969c9ec60e45d307b6022100b7578a88e4c83b827656e2b369fde267254ebccfae584a6ddaa686958192cb22 3043021f21c4ae9093a53001fe86364bf7f0dd3cb5e4f19635632ce29f9eefb783183202205bac5c847d386bba9a43016676001d4c163939948c6cc00de168bce79772dcbc 3045022050700139958d9db9ca193de7cf7096e6ef1124332f5b36bbdd640aef2676fc7b022100a837e6832a7320b04d36a261391105cc1a47ddabe9652916dd5275609f22faf1 3046022100fb9f141830da14d38f159944c7280254df9ea39709c39c1c48be004223d080b5022100a909c07a314ac2bcc019d0cc281eaa4bf6d2439cd1c32ddfa01720de77e7789c 3046022100a0fb0d3c9324bce321e16dd68a8b7359e9ba7fc2304dcd01704a9463eb68f240022100dbcff16cc15d8fc0442943a71521139e8efd283de6aa49b0846f2b66505d5653 304402201edb80726de4f6bce292ed986aaa03716a7aa775c7c6e7ca8bb6d26ff9527f5f022056a5bc1d3ec0bd9dc6155b624207d11ea403008ecf51d143b26e2ecf85c58c5b 3046022100908865215353a76e6b100c652e6d32a080a59b5adb945051b62b0d163337cf010221009f15a4c6c9b030d493328d3d6e1c243e100051498bc7b2aa6c9b5c5c15408f8c 3045022062fa5ff2d13fb09044aefa45fedfd77b593e6a2355325b8c56a7f4113b6fcb0202210086df7f9b7bfa2a079620a9c338f5eaaf340ad7f575df0530e9982d60d6a00d1c 304502205ad53227c1eccbcb823490ea739e192a04b6ec954dff9f29d1a8ffe0496fac17022100a693992678edeb1b2fd125a0c97937af3c2a3a17f82ca3dd6ff83318ce9c533b 3046022100fba7abacafc3df714cc20d6adfc5a3e4ad438831cf872bc01eb0de2bdaf2496b02210099a37e6c9add72b8da715e1cf62f84b878d7f18bc009116d49d9b8d95d2e8dc3 3045022100c2a3ab851cafeb86e7c48f735ebcb38ed07852227849caa93640b58f827b7ea00220229d799d2eb602d1aacf18e654b0ab92ef3746a7449547cb34089c250ecf2dbb 3046022100c74401da92715c9d5359755589146191c9669461fc40267c9e661b0831b78310022100d2e4cb0f913b2c55165d435c6456f792ba5adb18b1b9e093ddc651150637c8d2 3045022100dd639a3eb48fcaab8546aa4d4f1b8f44808263ff1f49a6410ea0460d51aa99830220378f0d5d22a896b369bf9604b71b3cafca9fecde76864599fb2394a4dc1d6478 304502200ba6399f280f253c243f4acbeaa66756489f316aeb641d2a6b10e6307d935637022100dca85396550556e23be0814cd66641e3f668b3e40f5c8f0fbb39b14ce4e7224d 30440220726bc92eb9f6a2a98bccec38669f5159f4a3e80f5ccfb3d79694377d77e47a9f0220261c4e7fcbc6fe4bf6d700c5dd3711685774c46b87af70c6440128564d8b9fa8 304502201e4f5bdcc7dbf79b62565e5a02569be8539b31d8eaf64d5484637a4e70a64c86022100f202fa4072b0543a8c7d00a3280daba6f7700bde979776daef9914107945a39d 304402207d7bacd8b2b35b4a982278574452cd7f507617fd148dca0949fde53c4eb3fa7f02202d6bc17f0464f9f2dfe162274ff1308246a4d49019bcd800ee10ccc71dcffbab 304402204cd3d75c59ccce210c258c19ed25b842e0e0ff2e30763c71d31593b86208dd9402204ee48d1a9cebf3978b6311086dfe1fa3ff83a9de48fef72cac15b7d17df867fb 304502201bfc777cef90591d5e589552085fe813402e6a03f87f9dfc0c78ef23d9adba33022100fba750c6a59ad590fba2b8099eb553c8d60e5a10efc0c8bd2dc77df136f8b3dd 304502205c570a6e8e8f8598f55c01b9e8e92aadf11aa1e042ec1b680100aac72b359567022100a98d7f7187e1977e2f5f0ffc5fc6557bb7ae3290e4ee1a7f847149287832830e 3046022100c28b2a249cfb6875f85bf62ce6faf9acb47332440e6c6ba131e44e10b576fda1022100f5b844607c967cc8f9138b3ba4a36f5319f5472ef302a1f35247b747dcde426a 3046022100d93f769c25b59017de31b32638f111f2abfda7f7e812a3a582ad8474e2f268770221009dfae594dc0d18ce16704ce1e2fbe3bc840c3b108fcc0b382cf56517f211d341 3045022100d4d672585ddb43ebb6e832bfeb9d459c8eae67e1ed9bfd717f440ed8adf8bebf02203c53427472f4b74c4f1e128604f5ddb8b3e32af38ee759320e4a99f2842d04e8 3044022051c9e06583089e3756f5f3a76d3747e6c568028662ac36c44860607822c12c6202204d1691e7011fe7c6632b5bee419997cc09bb0badc46e13dd7baa997429cb5143 3046022100be5929267101c6ca18c6370b69c5a27591d91cebe157445e77dec264c10ae6b1022100fa60afa15d1b512111b09d399c9be5c594e379ffbbc881ba2260be2235f7b18b 3044022051dccbc0a8fa552b8655e1be433a56f65c5deeb7edcee380a3f2b30a12eba9b402203f61188ee47f92cbec4fb4afb59dd9248394d5a5dc785d4367ec0cf6c7b5be89 304402203a941b2b3a1005ac9cb9a484e36409b54b1b367c7d9df9a16854d95747e9b98a02203780c19c588578beceeda00d2538cf84ca8f9b17a39323ba2533354157f35660 304402203231a9bca259ba1cfe68ceb8e9f526a40643f866fc5ad4d304f3382ce7f6a88202205e43e0faa98dd83f0fa720937bc323677a14037f0ec1a6a1a28b0232500774f3 3045022100d6e16353d2fae517ce5be60872c0109b1529a70dae5c70770abd73d0798de7b7022053c52c0c8214ef897aaa1e3239bd682a4f55663118fcee438a310ba77abacee5 3045022100dd4a6657131382b95ca76e58eefb88478a424879f037e9c136cc968123974f090220126b496a0599580e805241550f7803e29ab4fdb7792f7923972e9cb47d1fb9bd 304602210090dc29d5e4ba0bd237ef679204ba62774be0bbb8564761a0d12301772abea6d4022100ea630926061b27806d5a1cc791e36e36fe86de8c8b6debebbe3ee1a2c184f52a 3046022100fb046808d76da846589621715c6e633c9a47a1780d08e49698e0be394363bad702210097e98702344657a15c547d6a016086e6f881310c25398cf2c7de6d75194e920e 30450220551cb872636d75aed718412e07e3fcefb9d966eb2c7bb0931196bd09d955884e022100fc55e72af1a81c9bedcef61dd32d1f0253eac9525f82f0d1a5b0b55e056449ee 3045022100ef8b8486067f5aa01f1246b37723fecfa14b80d47669c36748242e84e0f595e6022074532eae53fd82218c02ec655d8407e834481b9ea675583b6db2824cf75a1056 3045022100bc7ff095c09dc27542069603282134869788b8c8820c1463ecd93456168ef3f902200e478621f039421a4e771cd5a5d9abeba7d8ff2fc7d0bc3e8d5c8e7b1ee1908c 3046022100a3b3842541c8560a390a700ae0e2e36a1e1ad1faeede0c630baa1a9a1b05bbbe022100b66701ffad88a34361f9c1ecfe8aa1c585692f13fd31720332170ad87900e458 304502204b4ab1ff3dd08d6962c640ca740612fd0d3b113cd2e97d3ae1775661d95cd12b0221009854ce2e7d4df9a19756a6f81d4b5061e5e2743aa502abebf42e3f2b383c7aa4 3046022100878c9dcd990a6db29386f57b415b5afebbcc0a19011c961b2456570d2f19b719022100a6dc4ab821a914e8ae9844e63c22cab1c3642f9b9b75ad4bfb71608985a49186 304502204e6999af0fc68cc4dc5d04fe00e91c235c30b75c8b1de00990bc873d6f323c0a022100ba2e6ee8e9c395b997d86f39f75f86d3c355de5c4f0e382b2bbd985e11aaed22 3045022100e3e4a09b953054ac5968c02e406e0f4bffe9c36579d5353cb86ae2a86e078b2e022031121b46828fa287f76846846d1870379afe64d8ba1bfc27cd14a143b4dcb7b0 304502203a25b645a2528925e0a678565900d6cbe9a80856d48b08faf0ee42f47678bbff022100f8176dad0acc79cc0dc614c26459507d586276f26feaa59db73cb19abf7d4ecc 3045022100c90e72f862971ec0ce9b7247f9e0dc7196c56b1640d71a0314e53a94b3e65fb5022010f005c087b96fa48de9ab33054d12af9cfa2c4a1aa1ba8da4da5ef72dada3ae 3046022100e751bfb2c3b5ce5153607d0d772e9d5bf1c78436ff9ceb9e163c258b4b1626ff02210081c64c7d97ad4820726eb0d5dd95c1414ea9834056914962f596221aed39403e 3045022100d76b31531766aaeb397e08a9b80c0ee0fe8f7b3b73860b829d5eb7d93f0f072502207a281c5aa0fd7a26ea4c8da38ee80ab2bb3b27e4c323c5d241e6d5cdb4b89490 304502203f5e9e9e9fd2a44068cb68d45d7b52f1361d5f2996c421cc72ff2dc40df46c63022100df3a9187a299a83fa724512e3301afdf501487d15ec59344552fee64c6488223 3044022003fafb146a94e66e312b16a92de8178c00cf8ca4ed6cf601fc056a47456ec20d022061796ebf125d6b3694cd77fccf71422bb89754518e51b3fd48854e9c7813e3c1 3046022100aecdde0e669a0d022ec6b0cc35e63a5b0a2e2063bf4bf96755a4780c8e886a10022100df889b68e9486440a42b264a3f5b15d5705cc16a4f6ccfa9cdc1820c7b9f3614 3045022006c9d169dc135556dd6ae35a1814e2728f3b43c2b7b7ed07dfaf616b88eb04cb022100f565bfbde972b85a787e3d08306d8299dc8f0d6933d2705e291282c0e7bf18f9 3045022016b5e7147dea9e0683f4a42653a39b31a7e886a85482cc4027d030513584a83a022100f4f0c349ad5d453a9f8b1f11b89c6cb759c81af3f6b2de35f2504b24fba73b5f 3046022100e2080712becab8f6494cf5fcbb30a1a3cb3de35aae433f362e0a2c6c5afeb504022100e502c0a24c728c41ec5f9a943cfa3f861845411d805420d60ce0e30963fa5a60 304502200895bc34f4eed4d157d94ba6069297f491d019583547d6d5b9c2a9402718de3f022100a08aa1f4b7338e669554596721fd7a74ceac19adf500834a7bcf6805650d43ed 304402200bd7e6610ecb47ee35ad620ef7f0d0551f5c6c7eaa1f096d87bd8ed8c54622b1022020c3edda6214c75f7c0b68d14c736b972e4cf9010aa2b1efc5afb2682b88ffc1 30440220318c6aa774ab591cb373f5bbd86abc1f6e6fbe06508ef8e274fb50e4ee66bf7f02200a8af0a82787e97a3cd44236c25028c120a43aed52637bbc86c28d40f57b9df6 304502200fd1f8e383023c8d768480200b5caf4f08ae2b914401db9ce9b9c18e6633f72f022100cc36aae49abac016681451427db85d3099bc470bbe73d930911be05884f1b4d2 3046022100e07d6a6d432f3e3c0619a050e579530062fb794f8167ab43ea17f9ebf483bbb4022100b5f3589ca69e4c97d6ad50dce28acb100886ccca211bd81ac20df9c0ea78a918 3044022062e9d794b1d9b0476b7368a11cd965665441cb48f6c8dcd93f8e2954842244c402205ea0ed41353c26597857fb1d696fb6c5c6cc3abb5217fa1821a81bc45ca9d574 3046022100edcbfb63f436eb343708720f6c9edf90e92f706c8f1007a46d608921d0132214022100b1383928719f11a85ba9130426d7d6c8ca710f9f3737325a05fc7bb383672bd4 3045022100ba0f5010d1903b87d4d00cba3d046ca8d60a31738beac8fab576eaa5257c071902202c80cdba4cc52a34cf859cd185b3faf101310e92d725c732e08db82e42f9f9b6 30450221008059aea72c6ff4845476773a6e6ef6d0a161b5d37b618cbdc59b6c7edfed5e6502202b0aa59b988ffebb2a1ce8b081e7daf5311aa904d0d5cfaaa041b404cadd798e 30440220240e444d375a0affbe42ba8f8f198331e0efef81727e6beb7fc430b70feb016f02201a2110711eecfa972ffce39d9845a8c7d0b797d479e043640880f48b7108c2b6 304402201b757709e0965f59bb56969554823d3ffb03b85dcb332f1abc0ccdff99b14af302203f5988eeb361d0b312c5b9936c7a5079b97e526b210d2e42cc8ef37051d34d8c 3045022100f5c53570ace2192ad6528d9a50df396b3a5ca0ecd2a19b4a2d088d8b60fae9350220685b172d9bc46326b3eb904f83124004de73218dde27d14e99cd1e2c99b4809f 3045022017a7978a1cf6136f1db10a6027522b19f60c776fa8861c9054377c9a8b331c330221008e4714c244d77fdc557edc46aa63ff41da26382a6ded9c3d3102ba17f09d3039 3045022100bdd61dd3ea9ad53fb0e5543b2ca8682635a86d5affba890e613438b618510c4f022059d51f6c022f2eb09c774b573b2f47356688eafffadbcf4fd3529c0734481820 304402200c104a191bde91e5d6aabe25c88eee332014d2d4f92482596e24ad2a4a7817df02200eed692e19f3ff76cf7a15af680a716a16f5d7ee4cd60244728bb5ddc354ce26 3046022100d0f347250be37ecf6615354c8af3ce9154b5f74b366cdf727267d89931eef41f022100c6a9abb362de9b09e392a0eb4eb211bb9eed978283461d94b5176ddc642cdb2c 30450221009337a3fcfd0111bc0f7a9889ccee9d412d1e9a2b4d10bc272c33ff929b24f2ab02206e71fba79132e67016310b933c149752dfb5915dfd5e555ac0be3b2604d5c67f 3045022100e8610a6c78e13c58db0babd1218c9f39b4ec7e5ece5d740de8487ecbaf0cf6bd022013c198f72344fd06815e755fc68ebb75b5e3030e4de4aa96db842169aa6c486c 3045022100d7c42c5440165959076c9f0ac25a3c6323628b47d95579ddd64cf1c80d4304080220136a6d0a43e4f44bb70245a65940dfad75f88aad5b78d3b83cdd74e859c4da4f 304402203ed6cc744da6aa451bf49c714ac79cbcf3c28b43913e2a51dedf6703e71bb34f0220389bb85e2f5aa9785d3e89b6e7524133948bfe635b00b8433c53ecad04fabe44 30440220468ad4ec3db73987dbdc144c38039f6feaaf9074f4ef1bc27ebf1169fbe3d0cc02204f50909a9fd84765b7cd9e4bee0b7e668b4ba692ccf64e5d286625f53c735389 3046022100ab4f4f759c3d7babf870977b14e415014c85ad169c9a14a04914bd4828860e5b022100ceafc990b005462e2fe4373d4872225270cdc5bb4462e29b166fdb17e298d34f 3046022100967f6f7202cd0eb307f18d65c775ea88d0b55d9f5e9d45e00bf3b18c86c9ae9c022100c66df69f83214dd4f2539241ae27147a60344ff9c04c84fe6ec20c030e27b6b5 3045022100babbf7622964faba20bc76db8744b3103656b39f8b469824e99216b965b462a6022062ec02212c8d012038279490e14968f861169e4c37f092b16fa3918f8c81cbf7 304402201d80c37afa2a5693c32f1a15896aee98056c4084fc821dabbb39201fefa42be202207e46a0cb33449480cf86d1b2378f20674d1ad9b039da37425c43e5d64497e8a6 3045022100844dc48c11bfebe35d143b303ca31d8991201cdd203c4e525a2602c2cb1d6cbf02201e8c2d59e818c1b35462c9860eff6e50ed8d00d146739aef9cffd66538a4d217 304502202c1a0a659a0c90de9031f9dd75d6182736c731501e410a065ef6d3a08657baa60221009bd2201844caf9741ead8aea5d5dedc0bc14c7e3a5ad6bb4ccaf706bcb780bc0 304502210094582caa26ead02b513d580a190e8fffd9cb927a54e98ff0f6d40dad6e0e78eb02207a7e577917be26b705bfa099d51320662fcc3c840121737ccc511900a2e1b314 30440220309c7eddc334a6ebc9c881461edf684725ce85a8440464646939947d5a81291a022049320e9f53453ef16ec05c37257c3da994c0092fbb52803a0af593884e8d6b49 3044022045af09cd865ce156b77beaca12c5d8534a8d68d38767d3ddc7cd479a30787b71022004b136da1119de37285ef567ea75030725fb36f4c7ea420db305cd50645721f1 304502203225229ef9620f6f6f67795be70a1283ce02379bdd8eee4e0ebb998f00509aba02210093edfdc2b995a0f228977fb05d71c945ae992963cfef99df08fb4160efc7ddbb 304502204a76ba890adbb918c18947e4b929dd8b44ce9837cc9861900cde8aa3ffaee900022100f0002dbc41b72a0206950249d6586080ccaaff82d3ca7570ef60bcc1bd186b1e 3045022038e253938b655a33f306bc434212b64af0b4c8e668cdf101d7ee985349f4a78a022100a69cc87fecfb93effa747fe171f3f691d4048e4c117ce234a89a8537d455cc8c 30450221009a490a2684c12005c14d31752b88282b11244299a3f30b0a828a61a94d184ac302207f2a779e6d325f6442c8fe1982c2305bddc344e31c0d9a1eb89b31fa927151f2 30440220239d278f2e2214efb0a34a68f454d423e02f5cc796200b5c771e33ea35364d97022046a9b473cda3be4e3107d263f2f7ef5df2abb0404600df17022ae1046e96c761 3045022100a6d6cc1b226da3a39f7f23c3ece3519fe112d9a4104d7972b62bde7ae29db77a02207775b52aae691c05507a7ca528df55d38cf995762fae66ae423c4e1ddf232a37 3046022100a42c640208b0b9c2a29ee172145bb99e4a9a4c612f2c26ede6d9da7e420f0bac022100a55945e79ed7a7ac7c458548917225629d2002e71cf4d987d7c8f38626fe8f64 30450220167b9a0e882b5fdfd05a917df2caff4d0ce0147553b62805590ca1072c26b48702210089e48e37348d85f02057307c911dc056f0a8b78bb06fd6444d51ae2c4ef7a547 30450220076bce9e0055d78a6eaa86fc8a8a89e97985f51e4135e86f1315a1d1b6df2fbe022100bc64abb3ed6e270aa55cc283a9a689d8da2755492b8363f3652e6620038cfc25 3045022040d880923f090e9909281b7fff8cea0877f01f94bea10a507f42cff2cd32ae930221008ea9b7dffe6d16e98d57bec1b82b1825eddcb802aa5f9e0019aa85f1db7fa8b4 304502203184ed66b86ea9a62c0e93ca9e503523f03c6af3ba284605d51013493daefe3e022100df97172df8bbca79707d8cb8cd8e6296548e3c9b20457b9067df987c757951df 3044022029c945ae1d761ea14c30f0e6dc6a835dd3563bab72412f33d574f7b8ed8bdf8302207fb47b1acf743735fe996c5b11282f8436954e9a189b4233b17b8d38e94e84b7 3045022100cd32edc14b9c691839ba0e8538f563466f6da73865106330ecee0376928bd28a022077e617d6e52490198cc3aff8c89956670721195a86dc6978eb993794854306d2 304502202cc8d19f90f465db4b68907ed927a7ca82839e4204ab51483f1ca66273d2f57d022100841c6302ca04412654c7ed4e9068316246687fa0781f516ffb3c05c5c907e294 30440220513854cc42b489234497fa116137dc88c57aa25295b317719e5f2262a7b9067f02207a4a4cb05c53d2044eff1b5a52c2f277275bcc14459368384d16dddc7330def9 3046022100c86c90cf5e0de764c5aec02c47dae0735ac7964d3bb5bb4d339cb2183b6ab03a022100c5290b4685ee29484f733b901fe117e45b2dbc99d2d5bace5cfb2f2112820194 3046022100dd6dee6861c0199a5ce03d8569b1c18343ae038ed0ded4d8e4ec145b30419d14022100b1c427b7c42487921747002e00faa86eb16aea68d725a2d9879ffd9564d53f93 3046022100917581cda9c5929e7e491a3a00332bfecbfadd625f20f3eaed51939bad33fd490221008a79281d4a518a25a1af744c7e3ee7c843287f052d200a1ab4accc850c067157 30460221008f67730d1277ed50bddc8710642748b7c6dbf277139e8317e795dfa2934154ec0221009deea711493c60d22221933f98324b6e4cd3c63b2eb2b802eface4fd3782302a 30450221009a2b27e30f28e58ce4b47aa18865e6e0db274bd56292ada320fb5f5f03d33f9102202a90be3b20005a21f9e479dac859d604e3e8cf0f36ca350d0ea15d4972abf53a 304502203546bf2f160146d3f3627e31a05b91edfcd788865a771087b1aae1403747b60c022100ab1145b42f16fbf0da07c6ac76f6280013480406e29d7b2f64cacd30ebc7956f 3045022100a5cf8d703a057a29f7c6ec9228b2ee6300874f48bfdb01eb0872cc676d87e59902206274ec8835a219a66b3485418ef8244205e68af8ad99bea639abb6d02c882345 3046022100b83990910d28f77bc6c5742a6e802ec38f65194278506737b5dcda7614cb4843022100fd2244b9bc50e2a7a64d501a4de2dc782f7b3a754cacf0968c984cd575466da7 3045022100e5e54c4b784f0a9b83a11a11000192631f3c89948717d997d61cd7f04fff797d0220474361cabafa98153081b97f1b28b327099a68cfc227f2b2206f1b4f63c66b13 3045022100d765b0ac63dbe53d7a647a1a0963b664b7665fe609ef3ac18657157f37d8bdb902204f71e0e6c7f1207ed73b51ad9f75fa587ff5dc2c46ab0f238d1b624c651e4c78 304502210083344cb82a96fbca2b22193cdd29cf846036167c3629c95e32a699d317a115f002200c5aeffe72b873175169473cd8791cf93f67e9846fc814a976306fb4e064a21d 3046022100c199f8f892a7a36668ad057531bc8320f0aa774db14eba1df7812c3e54e706fd02210090a7b266274354c193fd3ab87db0b87c9fae21272170baa55d951bef1bd11f5d 304402201cef9a60bfa73c43f91e7eb93550be49c05adf6080b9c07ff260ef6d5374e68a02202f8892bb334b9be8a63e4a6d96e8718a873a165f079b53dc3cb0ca3b084b29dd 304402202fc4f8ced6e4b933731405272e1796d960ccb6e8a44979f9f1851908cbeebaf102207886bff294375f715c60c615ffa029e39e403f3094e141cc60f6bf85a1b1e9a5 304502202d7e7b5ec996ae62909ad184da641eb3111f00f9c798f05b14e20940cafb46560221009f94a794b4a100d53079c28d8eacff1704a3fdc78e619538ec6a991e1f22572a 304402200e5ae0292e53abdb4321d3009b61def2be759a698240528573ecee05698b23290220551b2805957de12fe64563a95c1619fd3f4113ac357c2340f84245f882e9c776 304402202c3afc688721424e98ce7ee7738b69c11fd998bebd24dcb384d1f4d1c4b4d49e022051046c0a3e93ab2361b9453256398257e8673d07b304820fdd206b5edd487291 3045022035c12b78ea9a44b4f28f5f2468a320d9b7b65ee937646fe8acbbf50d93760f27022100bf43c01ed3cd07eb95f1b3a41a602eaf181b613cc02b9f57f195730fb794fb13 30450220746899ec45c2270cc2b59d4bd3a3e511c93964a0070caf12f889ee52f05fbf76022100f315f1f316d21fadc1e1a16a905413f9ff6ba8578429a284fcb75851d51b7971 304502205b848e3fbbc3cb5c1aafc5194ee2548f97ca1573d06be5a9d3877648b1ff01a4022100a2fdea6319738374f29aa2e75b8a1ac4453eb648dbd8ca485cdfc0e22f0e806f 3045022066285822eb771e22e83870806d174d3b038b4d4eb2c8bc7b1af2aa7645d648fe022100f11399ce8d67ef5675a6f0ec45d33474d2af166478347fba72c56d74f364b93f 304502202446e121dd65cdf4ec54f38129897b9aebf90a8b4590897295581927c0036ca0022100a1777febbd8df7f1f1ee806a3ced7302e4a156e68fd3c7ab27de07691a746a57 3044022034181760b229b4587a0e6d9596165e2c707d0dcaf68a99e2eb60abc9496e9da90220671c2154c09771a2c77f1f88c1a5167f4830bbde685204599220ef1ca19bb78d 3045022100d9f4e3eb0c85761acd387795782e900d010065edd043864409ce21c3a512e556022078cc22ece829beaf7b69a3225cb36a191504f698904cf5fc03307d5b557f3be5 3045022057f01bd14c452897360a83ea22d6290b5034490bf7f03a187ae47d7d0edee30d022100ad9855541b33ae8e3a44aab0bf432e86cf6648a62ff60eac969670c7a9222e58 304402206e12ab3ead84e909d174594915c18676d23437e9b0987905262a5eafd0b77b710220045e716f404e73e4a22a7a5acf7c9754aaf5378551a79c5d258b487d2851470b 3044022004fa6a217880370e13d0ee51107e74c396f308d96392d266405155bfeeff00f9022021322a5169f8660dcfee971a6174404ec0f1cded50882fcff8ce859846be0b95 3045022100d2e3ac53e9f025f11a4783889fe6425751d2a8f3cd7cfce1aa8227ccc0760d9102204327c56999301e5d53ad979918509ac799c0a8a774d84ddadddba5a74960e992 30460221009c2084cdb86d1b94fbbe445fdd5bc98408c4da6502a402922aa6af6da4d24db3022100e1fc76abc9a373af34fc9a47c83aeac6db573908529fd36b9134aa010a6febbd 3046022100cf5230947d3729678c7bd31b36a5f432fd9067beee01558a1cd14505ee7f7035022100c250c0032edffd8e8625ff954e42ed4532db075f835832c8cd1c316368d557cd 3046022100b18afcc19a1c599d8766522967aead0369fa844ca6ff253dec1bc35fc10e8d56022100d296327a25ff9d4979828a616e36a69f0df7b3fd9166810342dbb28558da6143 3046022100b7f5af215f0d642c0fb5dcec0d0b1e725a509705fa8d5b39754833ac6e47714e02210095134ad0225a0489bca4e0edd024cdc4612def88c54cdcb6cc8264498db3d19f 304502202a98de771753dfe420c9e7b72da95ac3f3719bf359ebaf0ae9786a6e9db1afb90221009e6fdd68fdcb65906978209ff5eed0952b8f06376e392cf5e45142d39efd1235 3045022100e677fd02435bbadb053cfa15410a91ff4745f9e82049ed252244dc26d630bdec02200cadfa0a0404cd5b8cffae419970e9c983e6653645046135198fdfa0e4efce56 304502210088de037c73b0af9bff315102402c09523d2571765be0253ae243e322bd373d6202205a141cefc418b8a8fba6e80bb9352d7ae8946750b71f7dc9a2f220554fa2f4c9 3046022100bf14c9cf92db6adb61934d50374ccded3eeecdbede4fee625f14739120d4ba55022100a1511b09b17d0126d0be0c2ee29211713002c0697e958e2349d03bbb46ee02ee 304402204d91c7d50776d1c30e032bf8033d92bf59f0ae2087f09528576e73d04b642ff702201f848504bb179bc16179a58c5f824a01b82f5807d7f6ef715fc7b1a7e7b83ff5 304502202af85362e042cfcaea6af8109b40143839968d191a48c1a1cb2db77f8105a9a0022100aebd8eea6192c7e42113affb5450ae3c5b1cf155cfab6bef1c9fe243a29c9eb2 3046022100a365bfe1377e5e74e2192f73de200e84bf56ee741e8350e87ff596b324abbfe7022100b3089a5b7a0c94f6833bdf64d739a92819f1f4359bc0976fd533729b7df3070f 3046022100978f6482b22d8d8567fd9ea542cbc6e6bf3e4a95c5f04213cb5777d43d5990c9022100ef25b9e9ea15f2c4d04a028d185ab5a90f6f18a1a0c9a5c016b3af70d4458391 3044022035c6dc65fde968579e82ee736ef26b43f5bcfb5bde1e73e782cf72c9c4bec90702204dcd59bf18d64ea42f1eff61f48ade39d4c047eafbaa4216661dccc04515c64f 3044022008b24bd106286df4a23ce03c185b48458f3987d359851453956512ed641717db02205f4f15e6a20f868a6c8b9bc0ff593e0902ea55d336d2b64ceee08a58652dde93 3046022100a2aed1ffd303df397f406b3fa35b2a9d07e574470f14ee92eba95296ebe11b9d022100fd0d37925a4c96025863737c1264e84ca42a19cd9b8ba0a50c6e17cc27cd4bd7 304402207b494d0daf414cc855c6390d61f505c30f08dc78982e0f6675c6a688e161fa88022078c3540ddeaaa9281f21f018f5e5016590594f2fe2a0cb0f97e37af0339f7114 304402207aefa50ac9f771571285202a342df2c2134873b299bef5f71619d3af3ca01aaf022052a263063df6ed1c46af9909e5e42a4796608dbfcd717090995c7a28e9a5a4f3 3045022100fb9c16e7f836188d39379739783796477dc74e54d6fe13a263a112913bf2e49b022020c2f9169bceb83e8d59743649a6910d548979705685394ab23aed72e0378b13 30450220138fdea6e21cf49b0e84e63e2f38f59c4a4d7da8aef33dd22e9648bd6f939a23022100aa03066ffaff436353a64a76aa9da9f0b2e730fd66d3e904a18d348094b0c278 304502203752e498b07b5ffcbe10a70ba8edd1c680651883d2c26048d9c5ab22a9e37de6022100cb299f9f36ca82f5f34e43f3963ceb858378daf32b12ad2d563b0a647e451f93 30450221008ac24d95890574c398798d2ea0d56fa86b219d269bd2d4d4dd98e830cdcfc020022039390bec26b6cabc62a23ef8a00f5c7c6f60e4445c817c8bd8c7833eeb7eff1b 30460221009bb087ac57c877f951a3a0a9dccc24f42a96eb5961e034c18836e0a98856c2e7022100f528d39de1600b516f83268eedd8f2b8534fabaeb1eb5d19261545eca7eeb9b4 3046022100810da212325f181e6a8dd457c751c68099059215bee8fe96a40f6723d243a45c022100917d8a56d27f03017ef7e8cc0f2130291cca29c6e8f9478e1f2f7463af1361a3 3045022100c4e32c725fb0325a812b054452d8c5b85d2f0d1b623ce96414e518ca45d5940202201a9d391fda9f2dae5c6bdcc470df203de5cc3491bdf10613446b4e2011da1bfa 3044022014c4620d4e8bddbdcee2a0fca93d33752e92ef62079649db1bfbf239caae16c90220100f8fb22c43be6e17a666921233fa553f8fc14e78c58f339c5bf3a73a8a72ff 3045022100851cd4a12d17d143fb8136ef54180fbd725513fbc3aa36ed89e6c47d798584e802202c4cb7eaf7e4eca5cc98ca9e167d99b509516010c793ed2f4146aec625fcab6a 3046022100c6993af655084a29a66ed68de78de5d933bd3bbea8a8c67bdce20f6b8362a09e022100c7230addaeb1d74544f0443c3f8b97dc5a2abb25c92dd8a67ec0ae91b36f7635 3045022069c5470d7510607b42d4a57510f68b0fd1ff1ad0cd1f9c785ab46294b3e08d2f022100fbd04bedca5ac6d04045ca55a11a0e5e7ecaa27d93d1df6074852f3f289caa67 3045022050e17bccf78320541be2a1efe8e2803cc00474ca8b581e6a7381e43494f1080b022100a71b2ce42d38a1f6f431deb60f177bb3b9266b2acfb7bd46389f36a9138dfdc8 3045022100cc7f94f27752b1482fd590db7d8af817f6219329fea646fc1dbbd86c89ef9cbf022024fcdc2a8a0d102f8dc40c1cb36a32e1feea8b96a83d890a50bf93becf1dfafb 3046022100b3607c9acc404e8b9c0c8932ee8289fdb85dc0ff9f2b7189b6eb462754f0a6c8022100e3a632a36e2fbf29bc78b792f13f7213d0e38dbb09a80f6b9476f849bc6a14e0 30450220438ef09d2741801acb1c13645f05d8539018034b5d65e07521624733fbf82a46022100d6876ed46a492fcd443dbe1311c93e2a1aba16232ce4f4d826bfd0716cb5dcc4 304402200b6c36c8c3e167e808cc19fd1f0f8b9275551d70b581c16a7b459217d853c5be022004629da13705e188cf1fd61a6dbab83d2308a5a9fdc96d9e39ac600ccd3bc763 3046022100894f880e30d98751531d5eb726c9f19c2cd449677a3498a9a0933f212fcb69a40221009a9fc643a9a43e721ff33574624b4a3d97e702510fb4f88377d09733b0bd5e82 3046022100a539e34c9855456f7b7f7600b274a3360b1272552638b56228c0ed4f611c3396022100892f0fd4fc94c4097d7a2114ae744bca0c7efb35242f4cc84891df7e8fe7dd3b 3045022061a2635a8819fd45a0537198b05cc9d8073b0fb390c8aad804ba4bacc8969be8022100a8566726159b2d6b6c4780b90b9875ea6395567468ab1cacb06087c97f4c62bd 3045022100bd54d6ffbb63007fa9dc58a0d31027058b6a4a747d8c5d8497cb03bb8152fc690220508f5837eafb519742697c49f1966964ad0efe78462a3458b9d8ea5c0a722b42 3045022013ef26d7c1cc3e80b30eb35c1e5bc8bd288440b62a602893b9bd44ab78ccce03022100e6a0a41551f743716b92e83745ecaae6cdd8c76914e95f5cd313d96fd6f66351 3046022100c2a0289292e66a9f64dbf5f61ed3a90a671bd073bee4fdd5f5d108e513d8f0e00221009d033590849c6757d8df27211cbb458c75fb2ae361544560ff287c4da0af5afd 30440220384823c42e7042a0634f3c15bc6332c137227f7cb27dc17ca92bed187898efeb022018d3adea7149ae392d7fecd56738edb123adcfe77838a92bc2a2bcd2004fe3c9 304502210080efefb27bb0df7a3ec62c73db19c62e3a9ef407060ebaf29f681fd52013b56502201dd5049ff34a0c05ad3ba73c75ace74ffb21f0c0ce5ea8ab66402eff10a42602 3045022078a0ae241cc521923fbf98a72a773c7655dcc6ce264f9460833ee6a73fc17e12022100c2c69533aaf93296ec260a86e7c4f94226333d49bafa9ec66534b56687486810 30450220720e20a09c9a1284ec952ec6ce55e4e1210ea6687ff3b9f5b295cf0fca204faa02210092ebf4aec70aedde2a2c366027dce1f6822ac3ced73749eac3d82908495d440c 304402207354777d21113dc74a668ed01248d42fdcf02cddeaa0bd69fe7785f0bec0c93e02205aa82987f996e676b253f0467123a52308772ed3b942fb925fd5b3448798481a 3045022100e86ebee4682fbb3d159f90c46c3554fc310103ff7577b8f07b938672f36fa36d02203cca80d809bae06d4d9d5f6ad2a7b692f35116709a96b84a9066dc7419f70d99 3045022100e0d802fdb8756e47808a0ed28bb0532e14d2678b611e8088ef3f1aa976ce487002207b5311c29a785d9faa38e8566a753502a595b996b9ad18b4b6ce947e4802ca6f 304502210095d283b609df2a454f0b4f1430c6c3d2640b53f38276bfb477ab6af72614be4a02202cba98ce09714050520312180a10faebce82fceebd99dfc9911806dc336710f6 3046022100d96b7f61887bff77410289167c918bca6338fa60d6e4d59a0b9ea05f5acdc922022100bc85ebc129e179c7779e70852a5aa95765368e26b6b8e49ecbd45c08ad2c0e79 30440220284a944b46e99c1fe0c08bc0f0cfe1f7c1fb557f44b257de8187eef10506de4e02200b12a37f1f867ba441ddfc36aed22a4598506e5cb9b328f1349be98b5df7bbaa 3045022004fd1899051fbb5f7d7a8ed413460a2dcf7e71257d2055843845284bc110725b022100e9cb931b8c246b1d6fd1658c0cf0b57a2b1897217e2bf78b951ce597d0c2f704 3046022100ff2df8e41ca73b5e87c02224e7d12300896e7d8f21b27ea5f475c4701e469e8b022100e456666849de094177e8ed37f05861f2ab0be5adee4e893553b3740582e26f1c 3045022100cb54b2f19982f7930e0b39ba756de6d93962208077b166caef22484edb71e8a20220550f7ed1316833a18c0b6b19e33a85d3b08ce58242c2330c27071eaafdd3d56c 3046022100f284ae06c9a333de1836827054f791bbbd40593a09451e290d8f06a588dc5f5b0221008a25dd08f699b5fff380572d908758ea74c06647b4765c1e66e460099ac8e8e6 3044022059b9cdef1a2428fa12cda0a34207f97003daf21a8aa424e613e9ed8acf90e2bc02201bcfb596055debfb1a260c02a662cabe3c8bfec21c8bb59356fc963afb28ab65 3045022100e6caf1d25dc2cee8e1927cf48bfe0dfd5b0e98758a4e5d89268a40d872bdc2e302206a9b97a2e1a86f373f8b26b50f39dfc99b9e34c2de64aa668c00c7782cba3631 3044022004159cfec0df4850de3d67db4f7a192df334a4db45f2ac57c14115e2d81e84000220622a801b8bbc207eaa222ba1d5fb8a044a3512acaea7fc50bd500386569b4b2b 304402206dbd913ef33b332e30d5346902cfec09f1663974c6501c79ed1345185da38893022040455d1fe6ff4804e72ba1f2b6b257f5fc5ffa9ab6d29c75e4a90a2603e0b0f1 3045022100ce57c2d8b29fb4568834b23b863edfc3d6553cd964c1e324bdb95babede277160220091620ba1e45daeb3bae9a50990a67137608ddd22c29566b3871ef9bc1fda42f 30450220604253fc47de748f35d32aa0e16a024e957ccd5cdff1e49095e768f9cc547c15022100e41026421186421fff4c1a30a8ad3647fb478d50b7414b86e786e20a27a32254 30450220063421d9790473e028dee611adadab3be1a61ca2c5610d5f698a967ad25a146b022100dcf9b4165e49852a12b5359f7b26b8340e674700e9ee35f04f14518ad5e68632 30450221009a466b0c21a4a402683ce5ddaf398ffb8c94e7830047508e4e895422e27a84c1022048aa84eb2c580f162c7b7be401f097d4e69c3708e969954a8a1644871159f1c2 3046022100a458c2b059519b12c4f444f6116c68feb78f2a0a274b80435eaf607304d5b569022100d6c1c129c218e8ebe6576225c397a4687f1937ae32a2db3a41ab80e207f983a2 304402207b025fceb777eff2eeb19536ada2680ce06d4888c9330790c76f08abf252559f02204cb5df109aba6f60c03f026ffc8b5d232aa9763b4712b33b62d041a7386f8d22 304402204e886e24f5131d855bf20fe7f2f831dbf7cbbda970bde054aefcb38a22e4b70302202ad613490dab7e4a59da35615887505d2046953242837c0c231a0ca24696db2a 304502200e0b07d488707169ce9f1136a76007c8e8961baf2ca437b73779c7bd8d479ffb022100ccae9edc29df01bbab8739f04999cbb60815ff7050f33a336e3a1466aa432c02 304502202356a6b3ab4e3f07d8e7002b13ef45004bc1e95ece723ba1231b0c896f318e99022100a5dd5c3a64bd43e5873074a5078366c348c1a0465f393685f1ff5b82fd111f3e 30450220786af99c7914fbd0410767aaad27eef1335a96e8e0a89bc2f9f85c0c51df59c402210098fbdbd0e992837914ae2079c463011bdc5585521370d24acc42633f40f79602 3045022100ca4b6b8e5db5b69eb96f45e9decac450d39c5b90fd1a943674ab9525fd9cc800022024a53f3d33cac0d435e086c1a13ea6e931642959ded10e2b9acdd5e7741c3092 30450220564e5dce21e13a13576a470d3b89b3e7b6e3ac009adcde87d4ee3ff51a92fedc022100ba04830b029c8dfd76d82b62f2c342ced94911f69e33d6886926adc184cc7ecc 3045022100d6b8dd9142fc54b8b98d3105938fd3fcb6766c71babff577666844299a5ff35a0220137bcdcb9313df9f26839743cdd075944ed43771a2825721b89fd240d4bbcbe6 3046022100b723d1e3729950b5309359ba1f79021c33d89e6543692f8718f763bb46b71168022100c582b1fa6afb3da4a72bec18f82f44e56056aee0d8ff683b997f57a0ae994f02 3045022100998d9c969493d275ea8a3074272346f8cb03e4ed6b7a5994cfe8b50b4123bde2022067720062fdbb6874a9f0b901cf2c2d7a953cab434632b0ba8da3275273ef7d40 3046022100c0dfe0ec7a26db6a509df01dbe803facb669c9a75905f93835beaf32d020d375022100c4b1ed13f4854973e0ac0d415fb36433e794611d8fefe103a8592067293a2e09 3044022068da63683d96efc583db4502321dc699d53f83680165d4315dafa1ea03a794660220544c6ca01c9868c3d5188ba6a4c8562739bb78f015173348d18325056aa751f4 304402201798cb2d7a1907d55eed069eb092fbbbc0199d4656779d5dbfe2f2a2ce2af45402206a3af3b2615d35be8d5de240598a51897d8ba83a94bd98d5db92d7534a66955f 3045022100cb63756ce52104054bbae2b02edf5994cf2b3566df6656e20454afb6f7f2f2c1022028bd1401a67083cffbe1503613de54b26ad8d5d48941646cf05200630264c1b5 304502200121d6a53eec3346fad1200fbbc89645f27aee129af9e7169c86468cfc21e191022100bc14f4240ae2c069e476e3906a158a9cc5481000d71b68182c281f7298c2516f 3046022100d17f169adb9b64fde603d6aed9fa7dc79480ef5ac37d62604d0c38fd5b647d3e022100fae2b71d38051b567cb6b5394f427772b9c9ea4d1a932661a0e650710b2680e4 3046022100a0b79cf165029a07fe112c86055c65564d7e28451c231d94b51b9995eddc4815022100ea7a1fd1ecb18632613c8811ec27b261a102439d1c02829ce2d82a36f27fe198 3045022100a302e1628fa86966384cc1827731d03ccd51aa2fd3b07ff11020acc4bfd6d1cc02204e05511207ba37f3d522748992f0d18a1b2ba816e6697d8caa9a6869568e3030 304502200965a2a69726faef2a3400a65a564e5c26936ec3fd8d98105f26a29d347e338a022100a9961dbe5b8a3fc8cfcedc54460c2fc9fa66f6148d1948e63865eee55861506c 3045022100d87bbe64a8e3352be15577da9b377181c969ee8f0162d55c41272546b87e444902203dc0fe6aa80805a56896c5cb69793c63267d89488c02d3221c00cb0c468c5b90 3046022100dc56f6f21c9270f71ba54214fbb55f08eea98959e846e46e35e97e0e0ff8df51022100bf10d1c544cc2723f2fb67ee1c8c40c376c9f800dcd72a3f11a70f75b3a0f951 304402202620881d0a97947c7ccb30a87ea53c93f87a527cd37e36bdf8207eaefa9a653c02203047d1f6bedda219512a5a2ac9c787c7b864c523c94234e85beb5856248374eb 3044022035baa487e8fe7d4e1bd662f31df1e2cb1c72ea12339e24ad336c26108146d0dc02201bc1722529e3390b06753534a6cee05910053decd2a3c1256154162cd1bd17be 304402206adf745d458017a85a9a85e144f2352e477101abcb4b02b1969b4de6ac7e2f2c022041dde70c620fbf3ea6c2c370db1913697b8efba3a6a2428dfd905bb0d86840df 30460221008abf87240b60b82b9be93b73a95d3db5dac31ee22f614b24abe7da48e0285209022100fb3b98bdb2061883e18ec4651de085d3e756bf763114b51be7d692b62ff49ee3 3046022100b7b9413fcc0a2bbeeeed0f2d900b9cd30b5a437f879bbcdd62e504de10b7d04e022100fdd9f6b92e4b3fe6848ce076d9afe0706a44e46e9bc56249c1b64a6052e4dcb4 3046022100d714a81eb6086aabfd19ce41094828399f3f9d59c0f32c1b5c48d25b37095443022100a9d3e377a6d4faa398a872d8d91655b64fda823fce781e6cdd65194342a33b68 30460221008fec9171bc91aee8da12dce41f90118b67fa618db4de18a5412d5f5fc4ef3da90221009c0a594b7091973aa57a28c48a7d5c9ee3c5a043806484066633849ac309d487 304602210084fcd48146462125b80c3a182ef038e6f7d15ea9a79852c672618e26ac5ecf0f022100dc13218b5f13f72fed339d9de8663565f352926eefd564b00ce0472dc3a5be0d 3046022100d2261abdbfbe0db9f594f32213a876a300279e3a20e98f3d0da0d4ca2bf4284d022100da301fd4cee167ed5039d624fbdcad96b69712ff68d82b9bc03ded8ffab3ac94 3046022100c78b53305013ffeda724e1f1bb32ff2fef30d735dfcff93ff37576e97f4f0e59022100ee5f51047864e3b76fd7f299ba5137a65dc98d31ce0e2dd97cc8d0fc60329e58 304502201775f4f8744e45bfb1929cb8d9a6ebd45bca026211449fe4ab8ab51fbc1b7f07022100f4b83fa603a98f77a24323faa3009a263a852ac290403a299498c446dbd6137f 3046022100b9ba269c9c0a98d484d786aef94e4e8dbfc867a9bf0e391d335d967e631dc6bc022100f1407a64214b406b3139b863cb6f7b0e739adb27e56e9c35ed3ae029cdbbf9a3 304502201fa5b205325fee5586da91f49483cabf25c1dae0f207958f9402463831db6014022100d0e0c393772e56cd2de82a9939bed462e9d0a9c59eeb606fa893c778a994bd9e 3046022100a42c99bc3c735934bea865835e3ffed4541b9a61c1376e8d68f8122cee0fa4c7022100ea28957e79ac526519ac0120e8c491c785b8cb48e8cc72f7614f9286ed815b22 304502201a69eedeff48a3c909bb77f6b7840f7002953ba07e1d5cdf6be73222e70c058e02210090b62ccd6cda294e164282d0f1f271ecc7ff08e217778bcdaae2c996bccb145f 3044022019a41445f1a61978b01baab41e6ef7a2f23f968c291a34f50135826206a44f63022059410331023b65bb75c562305b94cb461a2bf5dd70a7ab15588e6611fb4f0739 304502200a3a368eb35a2907a42bb780fc3132993566b603274f33507d5e798cba9691d502210090e58ad3d3b111ef9f32ae59093e2973c9065753a1b573420f8a911f86dfcb8b 304502202e7a3f639726c2cd83f9b82a4445ce386bb46c78507df871343777029548afa5022100f44179ee34f7bdc6e5ddbfaf948f554e6de3b745fea912b52d7a993e8db7c6be 3045022074ea4b3aa23dfffdaaed53b0d9a0114c39ba33d08648a7885f3dcff207e55bb1022100fe5de01dad5c3cb5e37a4053529257da2d9092c5f2c0d230ef7a14e349d91289 3045022018788d82bb3dbaebb0e3ec17f2dc770af53d9e9c2acd72c40b3ad67e760b05a70221009359f7350a4c2cf5fde244c021b85121d29fed7323be0ab421055eadf4ed1385 3045022100bcbf517727e5d9980f6d952b9753c8c8700c0f8356a744fb76a45910909e888b02203426c5e4472c3ba0c8096aaad39f801f0ba1f7b9dfc485921a68f7397cb059b1 3044022071042006fc22ec8d8fe59e14f7de3e546b4d953860a8b2bca3ee54cfad816dd50220452d053d5f64f94fee67d1581d5cf97da22dc08af2ead78ce55eabd7ed530686 3046022100b117566a38cddc6f28dcbd3c82491fc41324eb9b6a4549d4bf3cbd40da72d9f8022100d0d35a42511c9b908e73bfe0ea318ec27b34e4753bc60d30a488d7359ac1c0c1 304502210095969f1ce9cef94e762bd5b524c9193a6311554ce27ba685ed6b8231b3bf082402205080163768cded7c69648b261fe209e6f8aa6acb8003bf5e86fed49992fdc82b 3046022100d9bf3d49db75ece40888d5696b034dc1643ada1a52f376010fbd3d0093f26996022100c54d1f2200cb03275124b8327329064467847992d936518e14327e7d7a34764c 304502204e63b8c7eb9c2940fc63a493bc01109722188068dfb2b3479569ed40b7043b59022100bccd289c6222d7af859a2a71386482cbe0ff80d81b41c0f0c76309a2d6238893 3045022100b61c0f05258899a85d0a062e5b2477b1afd1d13c7b0b53db1f725b591a55d75f022060a52dcee052834170b076c975feb768915462ad905d037e20c629044ee7361d 3045022100f052fc1ed9b0511fc86b0e52708f45887c38a6f329962f581c1e9ac310384d4b02206ac5b4b19bc5b8c149dfe47e7d4302f0c06bc9b65d4fc961a52ab84ad549dc35 3045022066222000df309177c8bb5d13846647abe76dda7de835fb395a3b85566b9c1cd4022100e7f43432b92ad843ea27e59669e721bd0f308d20d9804c8d984f085911b48371 3046022100d5e626debcee1e92cb3403c2aed73e552da50ff42d443a510c75905e702a9e69022100b8df4721b932de8f404faaa42bcd23bb540716afe0b3befee31c9479ba18bf5a 3046022100e2218163e78528745c8361c2e1793a500d8f2e94b80ab937de851c45fce44d73022100d96806d2a6f5a0929fdc2b89f3e98543b34a6a87804a01fbbf94c0e868ebe8c6 3046022100b0be5a4aa095279b69ab2c743e13146092b6387884a5b400bf44c5bfce456ec50221009dfcc7bd2c38bd052441b8911d1d6ba7a6162a4eec5d152e7d18d9c2e8958b19 3045022100b52efe07a7feb98f6efbaa0cba02638239a36292ca586e693dcd20abacd773aa02200274d4c4fc46e0f3f58fe09eea3580c5af6bcbedabf937b9e6d84f837e6156e9 30440220673b488c4bf0896804337c5faa0341fc1b0d89692708bb5db0c77eaaf1967a7d02205541b01897814729e4263f403a2decbf89f278f2781cbc45a99a44e95884eb12 3045022100a7dfdee996e1fa24b8c57a7c0fa824bc62e69669328252beb5a33d3b54f465a5022036ce2d246be84e7c6589845bf44afdd34c4b5de19594f07ebe235fa9457761b8 3045022100b60f4ef873ae0dba1a8a32720d0367667000fcedec9bd5add2d35c9c0f8f8d0c02206870e38bcbc04dd179ddfba9a4ec91809f84b998e28b598cd22e694e8601ec09 3046022100b97158e65422bcb425bcad34d945c85b61217e9b1a619b64b9c18970583260e5022100aaeb93c5c40f52091c52b05c43e7d3d83e827a619417ba5b7080df3a3407acb4 3045022100820d76fb20f86c2674aa6aa1063d43e86409b10d44e9c67594d0c699f2aafbe302204cf1648544eee95d7cff28c9b944e331752a2e0db9b590c223ccc4b9b179ffd1 3046022100a228cb63b2c2630d6dab24b9cf867ef150ef86a8ab7fbeec726b86bf9f81747e022100d197ac71ad20f2af01b2d0625877c3eedd574a616a4c671d34611bc6aa9cf0bf 3045022064e55c99b33c67e9a8cdbd5c1cb9ff495cf9f01f32cbce0e349c503863a308e2022100b85b3e2832a25c0e20a609572bdcf606c1a46e609982e71c6cefac384d161c10 3045022052732c113ed4e25a07cd4f243b82bee63037a6c36a33f9a0f1d9edadcaba5b15022100ed8f5cd1c4361ca934dd8bfb821e3a3231bd4614146231d90c08565044ca8bd7 304502203acd1a790001e2c1cec8cdf585cbb119cdca1dbe64eb312890c2c7b79c0b81ae0221009f88ffff9c692a01a46ce96b36898b22a0155ff216230337e249e99a44caf9f5 3046022100dae7f4662b2b6877513820f51df52e37d6a69c9a2f2592f63150736a706a52c9022100f3d661df3d2085a2c2d3f868ea3838ff8c51fc1b2aec5006a655bddd104e5cfb 3045022100918ca6d1180814e4801cb643defaf4b27f22268665f741f77984c57a10a1a2dd022045721b8bf491fe981c9e5c423bf31248d8bbb0c995acdacaf3418a3ffb05322a 3046022100951814d8dd82133a115c17c3753e156ee4e1e623c5b938779f2e556ae6cfc565022100dc34acc906652bdb1265c3f1313caab2bee5a9aef167aa3eef90a8408a049471 3045022100c67b0bfe84811d995cb4e260b34b99fa6d3e3ef7fc8f764abfac43319508a64d02200fced0ba4e86423362817f5dd4929e9ea65e4ca6a5f1796168b49300389c5eb8 3045022100cbc396f40973cf235d549cfdb3a92cbb2291c580d35c58ae63a856cd0d99baf4022019c22e3ae2fb8ffcb5a595ae3bf2ee3d6f03511488593663808ac74d58adba38 304502201de5c3fbc9da27eb174b251833262d439d979c388ef573550e30fb24441adde4022100bce29938c2531ea1700aa118bb86aab2bf386865892f9632f0e13ffbd6efb5b2 3046022100ee53245a77807bdc47651a41ba4c2e10b74736cc1d54cebd3a0fa54be8c939a3022100b9817d6739f930e4215e75248781bb7cd4522e682545644f415a43e568cbe312 304402207941d22ac73013bfb4df8c82260fc3be197fd287887fd7b73225d618b766da8a02205ebfd6709dcaf2fd4b5809d0b0b88320b007e8f2ac6217c5f992be2cbe06a7db 304402202f727bfb9297eec17d2e95fa8d22b9f2a25596b7ca4f9e19b14bd6e0448f6f2802204f01e6c672aa66a6b340ead5e1a0ad3a5d0404f680e556b154707f23bb0dcd75 3045022100ebdf453c6668a5250b2112a5eb948e48cd873bc94654e43bcbc9ca445f8f763e022044bdaf2c39aec92ad5e08544724b10fe9f64d2275be6a519f0218d3a6a030379 30450221008db9bed6407ef9a2a419ecd07993bdb7bf46da409de45201746fc323826490a9022017a01d7aeb5967e881e4da60dc1fd5bc677f55e64d392634bb3e343c3fa06773 3045022100abea511538dab72197a74e30eb5d8ea4946f04742c3f89921438603664161f2302206d683806da5a9211c536ec3491d74d3b6c9a2f17a64f8f9e4ac9a6eaac5123a3 3045022100bf93b7ce8f60c41b676a3cafdf248ed6b49faa895c883f7b029b71020ddaf5150220181f5dbeb0c8fa39d0707dac269b626b66f2f1369c8ef367052b3d00ce5b35d0 30460221009bfd96f866d4cb0f4158f6af37e3a9847a4ea097eda9f7191eeb625889eb943d022100fc8805866f5fb9f4b9ae6138bc1b27365e222a7379a7d61305464a3b9eea65f9 3045022100e9f0c00cee921e63661fdb58c5af1c0dc39858ec899a38d91f030a94490005ac02203b72c0d509561805c15c2f1d4e8e366669d9c1006f861eb03f45287ed6ea8536 3045022051a9636b64b6db751cb4283257cc59c877f8c028823a563def5049ae81121ee8022100c872ecc4b593079e6335fa2d892d7c4add5f5dd4f24d8d4f968d0e2f23e68b60 3045022100ad4946dfb3a59d7ab4032adf07ca3000ff9d088556f2520b25f5ced00dbe275f02200d01176e7dcd2e30ff9fa513cd2d66e0da80df99ce55d1b1b2ff8d8338625d8e 3045022077a50e900effa37903afa3d0c2f240386eef4dc5453bbdc74c12b0b1f527da8a022100c4e095bc55f60e65e255bd8f0444489903078f8dd5ad9ec94fa78b7b1a09e554 3046022100ca30633e738c1c283da0d8dbdf7aa83bd745274d82f79fd44d9945098217abee022100d6f2d9a8c06c6e64315fe83f5bffdfc6c508ef8578a7ee9e8f11dcca00b3877c 304402204105f3850ffc4838a8d72465b2bf7bd5be85e21a57434daa7e0a9ec090962dd202206fa6183c938528ba2643fd3e1e4c90a79f324d03c39725ef9ca08967e3477d0f 30450220595214bf08a1087ac504924f5b257394d61316a6cf083c53465581226cba7a38022100d63599f84d7214763301bf942c0c67992c9a8ee1c9d45f0a5343d7b46963301b 3045022100902b1e8c3b7e5fb7d07a62e5cd9179ff680feb2b569a5f79019e7a436597af7c022055e5d792bed2aa9de340b97260e134fccc02799cfd2e6423430897b7f49e0ed9 30440220636131dd2aef82f55bd5826163139a0348bd2eb747764fdcffe82a1b709ee19e022015205102fa2a56ef64ab0a5a519716335ba82b8906af0f13e08aa07c7126b1a5 30440220796d1caa34d91d612d06709ea5b90117e0e82300fa3eb67971e828e8c60d63b00220194025df071a8476e849a65521f65401b0a843448ffe29e0b0ddcdcf71c2aaa2 304402203687207b930ef0724b7cb531a88fb26606bebda9a0114f5cdf2d713ad7f9e4810220218f78d283039bb9b0aec24864f36a48e79dc3e8a65c37a95ca298a701478bf0 3046022100d22e509897c190b6f1b4d0b5c9733ea2ba928ffd37fb85fe961c7d7ffc83b81a022100c37a9b2f27e3e2927bed410670053ee8507d63b52ee4517ed8c2d8a1e6e38558 304502202114c746608d336919c7507641685fb5c5210ab7c6bbfa7ace1bdd65f326e2e6022100b073fbaea9a1880a852f678a7bd63c2d5e6a1c3c4d9848e9ec3b6620a2da19df 304402201f2ed220d2159f0c0f4154d57dc6b9f2b391ad76ba5d56ecd7946172d9c9e6220220227c8bb855ac166ef4073331590efa59a44d18944140a1ad169c96b8be40f0c7 3046022100f69daa5b47207d3c0f4b53c3e810474d0dd404b43c9c6b678a4fef95717cc83a022100c08dcfd0777938855c1a953884197dcf148e0d1cfcacf5bf29bd8240b589ca0f 3045022100b3feadd9a30e3e18ef22f4c224c60226256e00532f610621cb65f0f6b7a7dfa4022045cb7a4c3376ad909e6fafe8b54317064b9c3c01a5fa3c0a56e6760173b4ab7d 30440220544366cc804d4c118bd41ad6732c9cf023edd42cb20138a002aec264edc1858402203105ee5c01e264f65776e1ad8ce378f2e827dc975ca238b17e3a863fdb3dcdf4 304502205ccbc1048814b989ea769a7662a61fc6d7a61057f2a1bb16286127aec0d95b67022100c189eba4326b810cdea052195b22b24674c67126d87efe212db52e15a00cb1e8 30450220602c4429be0dd7e04c78f5830ef7e6b0bce4f40a40e32dc2dfdbeba00f2b62610221009a9576a49eb0255fc130b92521255aefbdc98d17d8c10b09b318cf94727c48e8 3046022100a5729f73bd9e98a5c44128ff64e98f5117338fb00fbac47c20d4730719665016022100ba752ff2231dbe459cb8ed0bd6152ea19b256c0d4b4e877ee1dfd3f23b9e4dba 304402201fe7545ae69c333bd7b26b28eca523e2098c2d479a73a585f84c3d07ad7fd3380220093d055cca37c49cb5d2cbf508afcc56215395febd23a79fa9049429255e226e 304602210091078741ba7e698d8e6ed7acd656e84aa80a0f5a3b9a1aa835e11e42a5aee9db0221009d073576f32baa59f49c1be61fee7d1ecaa2ca0fabe45dd8ab727e6dc3c23c37 304502202f8bb902c68075272ca10444d94adbc50d7b5d4387960cd1fd3f0790fdf6f2ab022100d8c2215a34a42bc560bf725a85beaf5bd382585f02e5222fe0e6476cb7bc7324 30460221009cae618cf6405812a188192bd7eac4d43dd14c203b63319602960fe17c4bf55b022100cd0333a9607ac16d6bfa5ca157bb2d18ea9d98f9c9851627c8340427fee0a99e 3045022066a2d5f66fd515fcdecd4dc88dc5381d14c4b76dee7703ba93575057cf6f1417022100bb3417f2c0939f8e747616fc474ee760edcb25e586f45f1d469be680bef4e84d 30440220614c823859318fd3ac2797e251f8dec9f166b384b5fdbe878b1e7929a75d3d32022041e484932600b930e8aa48dbd93456f372bf0e5f91fac172adc565a7c0223ea3 30460221009e7a9dc55743fa5ef2fccfdd5692defd78ad4f7bdf1d74f9cb33c05d42ad8870022100f968af6273352fc8cb2705e179aa3bed82b5edabc4b4e23ca88b5b4be102b7d6 3046022100840c79673d1d37bc90cc6997b0ddbc942a11527fe1b120dbaaba2a522a606e220221008bb517c3bc5a97a3ff0c7f1ef5e9fef0b859fc9baca7d3752cc6912334c49cdd 3046022100cdf4d36059d1d5e51ac090f7b0317519e3418fd1538f029170452185918bfb2502210081da3234a57731178f5e02ab8f9ad80a80a1358d4bf9eef396dbcfe3ac0d8fb0 304502207ec3c66e860e0ae10f46b7738394824e9b48fa28969ad71235c832435772ce60022100868dd1bd1af7e031b2fb9ec32fc6a3a81b26dc2c6c4684a8f25e089e3fb67707 304502210099b4ebe64685fc15637cfdf03be9c07d5d372a8d2c6db470889fa6645b428bb8022025fb578080ef565915d04e5150a3896b0500a7c6cddda3dea408f4601e765d3d 30450220465b7236142412a6093c01e6951876780d7c1db5d1261b0d0e6aee510ed2b30502210098eea589c66a181d54862e252b68f0142809f5b6350a830c66c8e5957dd4bc56 3045022100e400d76e44a2a0d8d93d30edaefd5bc96b02e2c61e2bde9b53789c122c5f000e022016cc2b570a01e8e9c555f6fe0923a2a85e02101facbe932ab8e1f30cb9d00cab 304502210080b2145ea8695975fa799ad9c25a6a1f672f6d1d0128d4146eacada359ff9b7302207bf051bf73705e7542373b711ec4e972d40f1cf6ea177fa4569c9aca5ed241ff 304502207ae9cd97ea5dcffb333f6a74f404e9c1c6affa60d0caef52c168abe9feaad96b022100b665f299f84f4725c3f7f742648336e72c363659582993d367c7af257d51debd 3046022100b26020dbee3e8e132edc1f76df1a74ec04f60c0604283016192824c6a60e1873022100970ec42fa751de53de0c7bf3ad8af600e03c30254a1c5443a789ec8a64de91d6 304502205a3485e2be8adc049f43a738b11a6c9a7ff3cbadb4088dbfa2bb6990d29c1a72022100cc8ff7762357f7d7d0605e53a252cbf3fa5470963f3546b85033454ebde4cac9 30450221008f71f2bb092e61fbf743989c03996a5df4cf3c26561cf842775e76978234e27402200cc6798fdaf13baf1ff2828c6a0e1e6f425d9b0ab8ec67f513162bb25ad2b436 3045022100e9e17e3320fd04b8987b1379391b848e9fb24158631823cd4eb96211992abb06022031a19968ad37639417c2a50eac8b40d67a55cb1e6e018086d04ab05d66ab7f90 30450220023313237fb9df0ee4c814f6620b47ccbcc641620fc3c3a8593b34774d706913022100bcbb1543cc057e8db0fa33780938538fd09a928304022499ffa52ce35e389de6 3045022100ff7d3452c8aab6bafd03dffd451cf552a0704177dad29afd083c5b66486e041402207356482ea4028cba827e1931f746491ff753cbd7cf9a3e78b310a3288f2fe7b7 3045022100a3ff19f6e33306a69bac6532129243b86d1d90da38874cfcf873a5f909a7ae1f02206392fbb4658b8adaab277dc3ed6be1c70b0a9d61ec68d3708d10e2c89b082f88 3046022100b7eb9ad34fbfd3f2d1f96fbcc7e4519a37ce1e339091db0e3ac27a3a59f1bf2f022100df050a35a55f720805e3992298eb925ef7dfa2586b4e3176a38b1df972a8220b 30460221009230b658f373b9e7850bff1fa5f87e1839e4047035a33fa1efd88ef17268c036022100f4a79b682b9fdcf69918a236cbae6749712140fedb4b56eb2e41b8d350c80804 3045022026bf18083b971f72d6c0e3ffc2ff9951bc5d10386b788ce634bf62210092121b022100ebb1d35af67ec1222ba8f0a9dd0e90e230e94e56f1873d1714bc08a846c9887b 30450220637f5a2cf3ce931baf887092703019af73aca035da2b5e64e9f4d0ec17e93fbc0221008f5ae99b08aae018ded56d1468df140c7d257e4a7b694b54d4c734e055610982 3044022068c2454d0fb6c56d092d48af133a0b86977eed637ed5a5f36196bc0974c2c73602202eb5c31e654aa1ee31c360f77b154302ac81f2b2bd1bbd6797d3e2cdb0948ad9 304502210093bd753ac5bdc962f2cbb2a20e8db026f860ef2a0214ccd27fb149546af54c9a0220558ac13f2f1fc1e93e7617b9fb12aad72bfd34583607b45aa3b679bf7df6b767 304402204af50a8aed36cb647ba3970b59c40f2183e7a4c06c00645c609b303ce75795070220709d83231c13d488596be06b12f42d44614a4cbdabcb8dd715632e276ef09698 304402202fdcfafecd5348d44b1f203082da79cec33488525598cf610e653d51422877410220332690bb4e9c9a7f273a77e041faf6dda062c50307f21848202cc3bc90468e49 3044022078e6c10e4da302258ca0d35a6b854f5bc16a5b6aeb7882198de8c0cbdd919c950220423365cdc7ed5b98fe1212d9d96765c684c6c313774585014b349d9cc5a207c3 3045022100aa0f0ed3ca44f1ac966e6f238168d10a15d5cea8a81e5f5304b1c0922364ea2d0220239c138fe1a0b93253010b016910124a54119d04a000d90530ddebd0fa2207e2 30460221008ce8c330d54419d452aaad3a558539f4c4865c3d163c29bfa414c6c948142732022100d2da7e4d43d825c6bf1b5fb0d80d8b9a47b1b8d818b40d47182e50273215637c 3045022100c8dfd0e076f19484f7d22ea382a181b98437807cdf08e45427c2ab89993a2645022025bc17394590668d2e12392fe44875b59c954b7941cc25d477f1e9428d9a02c4 3046022100815a2047a5e5aa7777c5e20e6277323ae84e4f43a731edebd3ed98100f63968b022100a3eeecd3f38402c3f5434db46c7e8e9ca75772d2fd55025be5d31059e7eac0dd 304502207bfb7b9a1eff59910465752557e4f016b8e284ebf49b4fd77642a427fa71ebe7022100febd974ada359f7eb0cec294d2979f094bfc14e1df731b632b67b6fffad59a0a 3044022043d54d3826df63762c755f5b9632a0c29b584e9f672a12763a21340382775aa8022057419fb98708ecdb1e634fca171655dc4fa79f8006c77ebb813999e94fc67999 304502201043778a6aa2268937de31badfe4a3cb602c8502806178f25dbd958853e1343c022100a9088916ed90a271c7da6d8a002efe264f599038436528ec38e146c774fed697 3045022032a8289856da97bbeb7f52e9bfeed3baec631708640df2074f6b3a5f89b372480221008a52b607bdae5507d2ce89e6db57229d2934c52767fc1f5055ffde3b35133477 3046022100f0aa3378583d1eb8e117953b4aeb2ec3d245b7bc2fd81097bbbca935b877315d0221008c3da925dee56e07670635e79bb5da9ceee624e847f448e3ad2ffda808e90115 3045022100bc04d88dd301ddeb01af3addd99295cb06fa07ea8ee732c9d83643fb404f3cc402200972b8122d84a3d8d9c6e49583212eb238e4aad55872dda70c933bf67c68a797 304502200a6ed2bb923d37b96e9f023eeedaa8307dd18258227bf941390bef1dbaff22a5022100dba50a891b4c6d99970dfe726d02611a5c42c5ac36497ba61b908399828dede4 304502203c6c323364583e53205354b8d32bc4d9ee017b9edbf2e79c31444a4f7d0a9630022100e68658538ad9126549fa7a2ed47ca35eff153c49f7e98a89c5b72685c9e6d168 304402207b13d88aed2a3dae2afd87bc99f979c4a01be22a373bccbd7c99da3d8aa5479c0220282363944a0e08a8487eea0ea24acb2eb0e487b8b52739f7fff0d4e3d6761649 3045022100da1e15638f935c3ed8dc7d5093fdb3ae36299edeeeec0efef9ec5d8206ba9eb402202962cd395a9ddc7a3df22f8b7fe7723c44529330bb182e2f764f840ca67c7ab0 304502210088687a75124828a420489eab3b8c61756c5a6ac77f2b2ee3425fcfd20171a5380220539ed07f8272658c6e15a14dcadebacf3fe5b85e613c1768ab0ae9d4441bfdd0 3046022100ac98303baba16490596cfa7be7e218cad2ea5976e79340919ae0c1819ead2dd2022100a4ee205d038ffb94959c03784b90f9aa3b053c4c8e53f199ddb66af6985bc52a 30440220368e36830c7eae74b15b4c0f2c9b0b0d7ddf727186069030bff5132a9042784902200ed4e1a71d8c7029d953658db8713555baeb5e1549316276b881609ff32e93bc 304502202401bfacae225831894e2bb19ee6e413b91a531f55881834d3b17afa044ddfad022100e490054865b4de7caea3434f4be3e8ff6915ab55a5f89542828f6d75a4278e17 3046022100cc620cc531563b4137678e7ba4f4b86bae4632406305371070f686619b80cbe2022100c84feb4d6e24725b6bdf406981350ac3de49a112f31096466561c352e6b82dce 3045022100cdd3470f84c90fdec2783bba4e8b1ed10a72fef6db07af14b64b2f3732db13ab022009cb2a0684af99e13584855a85f10b9b1f33a4bb2dcf80c4c3dd1b7d9f72e7b8 304502203f83b2908e64e1bc4e279640593135091616e79814095482acdbf9b9a41b3e06022100ed930a517ac20c1a274c3a6c14a2fb96bf12562f48a8ee20638d14a6a6a801e3 3046022100bb8fb77d3959afc528ab64c6c2da1612528d79402bde26a0c8bd3fd72d75dfcf022100ba1c3b63ac78128e184760f10c223fef7d933e858576fa462d6c266cf6a185d9 30450220459cead03ef098a926ca213cb9fe298239ba855e9d65269cb4ff9377ffe32fcd0221009c9f9408580d834b5d4333c2891aee0cd2188ef0b451cd7cbfafcad62ae3d7fb 3045022100d61f8372c6b4875a8fcc1216cf51608019a64f7bd6e43ed282fcc28985b9df72022040a1e1b503f57cde2359caa8a80cbb3caaf5c07cfa705617b56ae347b11111e9 304502203da61c1cd8c80e20a89ab581db0bb18874789af59aa11f21b3699f70dfc71b1c022100c1723809347e13749b5c1cc55557a53724dbd3de28e28c1cd4ec69e8d36fc52a 30440220185d571036d40ac122e57944b521bc9092adf94e0f89ff2d0d9ce47449f02bb8022058206364c4aa8c3f9cd8b18409ab8ed5b7f65f5b274d193b8847519e06267724 3045022100de4309627fa813186cd6b4f17c1e49baac28cb5012f1a571722421ff4c5624290220689574d93b051ba2f773da752493973243b854627f2b79727e399ee8f2233650 3044022038df648de16bb9e992f64eea36c802520b56d9f5db2d159e16c89a21cd44e5f3022039580aac71152b1c090beaa3ddff37eed6d6758a7eb98a791670cebad4c7b71a 3045022100bc2790ffa5a676edfd03719eed836076a3c16ff9892114fa269e7fe7b378162c0220066dbc41a24e47a0a7675dea6ea90f197ecb15a7ea242f4b052b6821b50273ac 3044022030891416cfd3c7ba4aed7711682cd967fb147e886ea9f893f4c7323986473410022037e55dbf45ae6c37f2d12118f50645375ba10c6a4ace265ead4a51df853a3fcd 304402202847701d01555bf8f382fc99a08d26b4b12522f954fe092aac37f66c1f8ff5a2022049400eba3cad55b13b8685d80007faa7308d6e0146394af96d74fb9e0de6f3f6 30450220359c3053ee699d99d7bcde10d72a149b6abd6a52727b75f59b46ea63f99b972c022100819cb998c50ec00b31b75ead9955885547907bb9c731baf2dbfa186c7c048848 304402206d0ac6f197d7d49043bd324030f8d97216f887509c91513814bbcfd8fb69813102205a21b75a48565e5fa0559461fb3ac83880cba4b34e21adcd9182ca0a74eb49c1 3046022100be133051eb2a9cd879a286063c3a99137b97bd1cbe605c6e7566f4635e101f51022100f1288b79bff5a022ceee1586bb6874c3a42cbf39c775103fba90bd8cd0966aeb 3044022056a6ee76414e1f598d7b52006be9f618b96bee6f6caa809365b27492bdf3d4d1022053a062fb30e5824db4c64e0b2a6036a3d398ecb652d278cd56135aa22c911100 3046022100b38de6180b9355743411c20851bd35a66ec9925f24a49658c7b61b219a91c771022100c18257eaee1e3072eb4fa5da815d1491c685b53533b1897a48fc8e0a0e72b93b 304502204f9d0f4d1df1e2a774bb86390fbcf9ddecc621d50fbe924e8d9fd1bd2cd35d5f022100c04c670bed8874d8648f919c494fcc1b143587e73c39931d8fc4d3dfc3a92980 304602210092e57f9e735bbec0580ffaf9bbbbb0a5b96b2d175b2eeb2362e5280892577abb022100f325dc5762d9a2f3c06fb02b89b9717d8537cb878f2263c18b04f4e1233d036b 3045022100a70a8c9e8206a7a029dad4b4ae0a76b37a8d1cffc102e5233d5603533ee26ab3022072e4476452b79ac7cf2bde04163ab137f131a92c3ea30ac99b961b91a18aee6c 304402206fa80cd76782802b1b3cef89b43f32cf60c9da612d7c0700dc06f92b5e02af8502202b004350cee149ddff0a09ae5950a616f521c56182e9f03eeac57781fca9b36e 30450220234db42bacca58dd836f236212e7d8ea3f388389fb9d243cf22d3274b9444f460221008ba1ff60bebaaee6a5dff2e690e9ac6833c6f4da327a3b75ff81149270fa1c58 304402207a8f337cb3808aa0369b822528f7fc988cfaa7fbcab96f9f30ef05f0d1a5731202206bb66b295c131433951750fbada5e3388a78b23acffbfb368b890d4b4d1616c2 30450220449635e87065adb91df1afc205b1ed1daf5846031bafc100de1b9ad92001e902022100dc222752f0ae0a689c59fe0580d031f875ee892357a6363b8b2ad6a627f91665 30460221008f62229ce9088479f4d566dd2e0cd34481f4202dec3721e732c70d47c2298ea2022100886c2c9397bf4637fe8e117898441bd4d20ec7c1bb11cf9e541dfde023487527 30440220060242779b09edadaf7b605eb3f3f5e43a4e65027a2db0e6005272bbcd981413022023035ea97a457dbef453cf57cd0766efcd3cea236b4b101f23d86a6f27e595da 3046022100ac1273f46598df9bc468f798374bb1496f39e3779872cc853e71ed14f36ef7f3022100fd0f4c6fb13664e8954a28694fe71d5599950d900903cfe93e89c40dc0cdf328 30460221008acbbd9bb69066b9d6eaf9efbeb71da76fffbe425b6d29926d62fcfaf1284758022100ee3b3c30a1f1b935c3eb6ffdaee86b96d20b2d31e9cef8fafc99b7a8efee412d 3044022068122d7f869e8dfb13eea3c86615e3aabd0264e3588812c387d50bdd7528e7a50220783f500ead5fa53f7abafcdefe352b68a10d59cd7c3e6838f65be7d7cf07190c 304502200e17c341b47d4573da49d223a097d9019765ac9920f918cc93f320a722a37276022100d2e73c9a99ce0676133e41aec3cb95039b1a41d7725250e24b660cd2d62032ee 30440220280ac0b9e9bbbfb0612d37fe6fc60612dd6e1bf048f0d90f80a581e81a70a6cc0220220a93a287d108b7f40b519ff9ad539cc8a30c98b85f2cb663f80b002b3288e1 304502205a0db1df712b16da8593f01b89db16c2e8c2c471064d89770ad7ea0386d16230022100facc7e8b235376aa373d3504e94dacf07d754fa731156463d8a7376b87df95b0 3045022100c4d1ee03cd8d21cccebe98da719c14e5bb26653350ed367cc3f6d0a84d94807f0220134c1040f3a1333a0a42ca151fe6fd424011035fd3c804f81545ba7c1fd00ed4 3044022028d27ee26e192e1f5ee7d2a28258f3a3d1a3a16955d61b4af85f1aa36757fdb80220410eacdad200a83b1d9cc6137d12e4762fd6781144af87b5b5cbc58797e977d6 3044022026093d86671fd0074b4745ae600027064a71436d29871371e4e8f7e1cc6c2b4702206866a6694415099547533070006d3cdce6537dadbdc4e929185c64a598862556 304402207b79154512f176980bcf221853cc38657935a58622ca30938f7d57884ee26b4502206d41c28781042ded52a88771a42d97624ff6a91bb0ab222eaae862f2fb45c7f8 3045022100b4ca144356f0b80b1a7beb4a89721112888143b61641a8139ba5f9aadecf924c022030372bd595ed12fcac226efddc99f9df9d2f568dc9030b5d31da3a66efbd629c 3046022100e53e4f6bf9c65957fa024bc06b64495b99ee0ddd3ec07274dcca29c3132c2b38022100a0a9872fd07c311f265666a90ed776fbc3fbcb6a80f697e0fe4de24e94218872 304402204909446a66e70186ab02c64361677b40995fa517a170077a71dfb3d91976ef7302200dcbc99939341b22c7c94c3471e66b3640f7db535af7787826439db17ceb44d7 304502205b6817431bb149ab6f0e17f74910579e1fa75bf7c76ff45c15a6de185730fc97022100c4b3e52db6b5f5a3be91d326a9fa34ca19b2b9cca9ad025aac095567a8a20186 3046022100cc7fa7f5bd024251566b85481f9d86a5877870c5bfdf9e3076cb699ee50a95ed022100dc68f286b173bffedd08a697b753a81a4eb98a748b54d55ce39d5f2dead346e8 3045022100b466a44951f0554e2581d99a1d6974b35682a9b1a25c6701170861588c010446022000a391291c37034f778d4580a30641c23d8a188efca3720bc3e288b2a8993b32 3046022100fe428f205c56a97ace04b2aa5b1d2ce48900a720fe672d2fcc6ba7c6615ac48f022100ec4079e85fa35027dbfd3460a4241915d13be3f1554a9c44231feb3fe0eb4eb0 304402203f7a327876536d165c93aded671e6f8f8748700a3c2bf1db899df7d1377d74bc0220613a005c28c5c8bdd9d458dbdc03537f7c7b4dd9e5f38426f48c4ef3e9bb63d3 3044022067cc5ef8b1eb946ba58ec78931d71270eb75bfb3294bf43fb69ed4d172302ca00220192385144ae0d95aa3abd4dc9bd626bba90adb72a6aec2af3163ad6eab247b3a 304502202a4330baecaad6766843fd30f26f3552e499f7795e154824844d215f53cb722a022100af48bc00db3ab6fa4468c585c7078f8b7030ee4cecf095799d0d0f8721944ca9 304502205f9b9357b355c895f62d171360c061c1da9e2cb7c5bc9d3e946c4e985f4a13820221009175d9ff2d7b232081f068891ad75dbe45dbb2035cc890bb9ffb70656246c57b 304402203bf69d3334e482d188d9fb63ca754b1bb83fb56b771290addcd9f69277e28c5702206e7b68130f3e1adb92255d1a647c4b9e9ff3ab50bd10705971fe74acbeea5681 3045022100d9b281c154bec98d70905442940bb62549e78e18b56f5f39c4b95f93cece5ac5022044d5ad9badb63ee965bc24292a2b3bb030eb727f018b4bc787d4dfea34eca1fc 30450221008d09f2ac39f0ccf854ac92ffa8dd0563422545b38aa542703299ed771f964b0f02200c8a45fd6689489fc9d54f6773e967487bc1ecbf8d9df5cf1c33fad1730772ec 304402200ba34f446f92c60b57bd7aa4b74dc7e46e9d0116a36a3ebc2a50919a807d699b022027d31a7f57d5d59befae990294c9a61144f4d510e8ae69b46cc0492148e05ece 30450220699e440c405f8477dff7bbfa5cbf8af4063e18021f3cfea2764b9979d9df7462022100f7e39035c56338d5a0957a608b0de6ffe16fcd22b97d7118e1a1b4fb8fea3b6e 3045022100ca244cbe869186d65694e7ca8a925003d5556da428ceb62a08494f9c9f499cef022052db648b3b170bb6d2ee80b531bce0a626078ec19977db42e4460517a61bc0fe 3046022100cc319b90df0d239aaf0a1e7726031a660e9b5e087c2b99ca3aa24884e8d63b76022100b5d652467281de9d5d3a28b2e663335518d79609dde3c0a23496d31701848c5d 30450220040794e594ee48c9627daf307d7857ccb0ca1e0bf4c791dc36044a4b9009cf4302210096c131805490f8607cc9cdb126289bcc485149990420af8c014c46b126bbc269 3045022100c2d0e804fecb1f0027bec531d0f7975a58b0098cfa9649827c809755b2411efd02202bb0ac0175f7454b573cf34a276735fc59a4e16a02dd607dd8b3202ffbf020f2 3046022100e561ac353642aeb9161d1ce5c258859d785bb9fca712ae090e3af65c17d70815022100c0751b7d198b1e234ca7c980285f474bd796f3484ccee7dcd49df9572f23c1db 304502207a51f335695b254ca5d2c83df988a7ee73be4aaf5ad26245154228da8c22428302210080cc448985320b05d7c2c03fad33382be1bc330c75b97c7f67dafb04c62e8249 3044022053b2ad836e388211d804a907b9e9f8c8a1aec7a0a17db60720df0e18cc909eb00220143ef6cfa654e2fefa2080b80d7a432a04e7d5fa0e351c8200de70182ffaba50 3046022100daba0d26e899f56f51deb27b9028bd74907451dac7ea7e438de812824bc95654022100f9e01fa2cbbb1695c59f808d5ee66aeb984c1b14ee07e8d4ac22ed52119c8d00 304602210083169b0026cd3aa664231c6691e6cf7a4eb7b7b884ab9af142639841e23382f0022100f4dcebc8e00d57fcc4920f41e251df4bc2c56233ea8d93358211230cc9ff5cbf 3046022100d2371fb193f90b00590727e8ba1b35c276e213cb4a1594c7893fd8ad4bee5d68022100990b36397b3e0e2457d1d49aff9aedc0f18708d55cec3c979a8c3ba8bbf513c2 3045022100b0a299d1459eed83615365b72abf447ee218cd19c887b793c93ff0c8d18e59b40220576796f59193b83f1abfe95ca1e670903c5c6302cf44aec380ed708f601d814f 304502200bde3c9e6d1235a0450281a332aafa527ca61169e0e66b9e6984b7288c7d3338022100d0f6dd27b300219ebecd9837db4e43f2ce71ec2faf6354b659c3498e36c1c44e 3046022100bdbabf6f9d0b402c2a3213fe11244cecd01669d96e4500d624f5ca517a2bac1f022100a4f01f21ff12d68b43181c0dbcedf7d70311eba38e4f998448372491c0afadc0 3045022037b2e6e63665c664b585aeaad0d8e66b65dd065c030a12f8a1eb6453cf8fb13f022100eefb37377fc733c6d7b3be4ca3c9fc2d47bf40b26a326b78bd706cedf92c443f 3045022100905a9d764a30e13c602e0425bb01bd4cc2b90119758b40c2759b9424170e028b02200853ab049d205a142c99f7bc5b8075e8345ecea3a4987c12b4552bb6fae7de3e 3045022065c571d2f1bdb9d4f5c82dbc7237ea7a56d49439a59089697a5896054106a57a022100d6965b18ebb8d857cc9b2214ee6ed3d906e331be52738db325202e5a6fb01abf 3044022054b3b991248d3c52c2df692c5f6720c6ef5c55382cc502ed2ea4e7a38be8d032022004e48f17e26a49f84b2490ce192013d167b271b066bb22438d8863702fc9b280 3046022100a192ebb1108e2ae4772e3b48d3e62c771f5dbaac4fc1bf268c877829b4ede7a8022100a120c51a6af95941b765dd251a9ff1b06997d1d883b5e3893238c4cc686c3a07 3046022100ac96e344f9d1509af4e512a3536918dc907dec1f7e314983d68cf03f5b6563fd02210098039a631e618804b9eb6d6e220a55fb4e2c6cdf4731ea8608e2e3207778c91e 3045022036e382d80f781b3fdf8312af9e67f19dd9de39acc5eb9859078667d6c97ff839022100dd7268abaf56f077f5f8b7fee209450db5aea39f7703e536c269aa43fcb7d91b 304502210089da0268d8922f7ae4f9c6a3152260f08e734d50c08ee6c4c288b6a39f2399b60220174fa04d808e838ad161c7619734f8574764f99693fe2164e079d7a2b3c68c4e 30460221009bcb1b6a04e3c095f0ee1966b4f74d995ede68e361ccc63691a43dcc42215e6e022100eca7552f7000e14104178591d2a391f4c1f69884d7601abd5ca40811528ce66e 3045022067d4c535db6d401abea98bff0dae7053876c1474197dc117595af4fe2a57d8fa022100952067b8611fa0d5f70b6ee869ab00223c9d51608a88785fb8d40711e9557a79 304402206e718c09f54ef643535e1cb018c7cfb97fc290b3238d9a9cfc5892412612db9902201e3e11c9691acfe52e6d300d669401023ca813367645163596ebc9e815bae2af 30460221008b6a0e38c4bc3742e2bb1a82cc205ae3dd5a622a1541275bd44993aa6c3fd624022100f467b756adde3391c39a64c0aadc02e9211e2cd8192e8c76cd02d94aaff21514 3045022100fb2e80b7f4baf8e292d3d1cd4bf6ee95a10ca743ec1d71be599155daf487ce5f02207d69d7fdd9bec1b94d23f43687983478f93f2ea7335e2163d8d4767511b353c6 3045022100aaf2a6a0bc6aee2007c873546d77b37fe782f17e8cb695154600fb0de7a4083102202ea376f12e1a1084f88dd86a068397c16abe2f7ff98803c6990956976c98c6e6 3044022035c012dc455bec0a44467a2f32fa8bd61bf09812a0d153c823545192d5e195a50220714988f771590785c8e72abad2447872b65cea85cb416b8b0812f07d10638074 30460221009b8b02b38fc29317f8deff1c723b2643a747bf87a3527fe079e829ca6f058b56022100d8a9ac8a3f63afadcca5fa63b316d08a7344ea8894b61d45cbec33316c900ff8 304502203ee85778aea490c2e66d74a66f307f1ff082a8c11e44f8e53c8ed7ce755fa74c022100af401b4e369671105f862343871a53a97002d9b0e260e2c8f6d3a53a37dd6c30 304402201af305a3505aacae4bc64eaf235596f0f9f9e3926c5dc43974842899612d40a90220017180c25ff9d09662d3310b53e5e802cf99883106bed3fab308e0a78522997e 304402200c526e8b3bbfff8845f7e2d782b58b6f203c6b0a1441d7314c33db88ff1cc9330220016c19a232542ebbb5cc53341d42f0d77361929341e51cc06af3bc459a2e412f 30450220181b9eb10595ee403e179b79fe749be31ec2afa9d25238a32aa73b0af7ad10c7022100cd61ab7a81719611ba289bc7006c42ea47b0b8c3b218dc12c87a249d6f202f70 304502207ec42ec506fb9e627a91d2d8b9898f3e84b8a283e5d3379cdc0c0984816eb684022100b179889eaa0205c043822769edce3e3bd6d77ff5a13696b34147b32e4832338f 3045022100e2a6d8bd09be3ae1d2aca82e8770615e8d895224aade27b790227308e5fd4631022024d43430fd7724adaa338ed352e3acf8eaafdd933f54753642b5ce4085073350 3046022100dff33fb5b9d6daa51854008db5420a6b355391f54576c8e8c08699fed07664110221008f1fa5f7414b57785900ce2096163fc7bae0a5ecca1aff772702d495b9b88010 3045022100eb19aa8b60d11ed706eecc6bcc0cbfd6935157f877df419f6c8bbc858c58cea202201a4bf87f128ae738653487e2ffa1eb41d13455239fc570f0579ef7475308400d 3046022100c9669f20bde68f9dfbca4668190b3842845e61684e3305293b9002068138d862022100890de0f2b45985278e5b859791158a7058764958b1961d097e1f29003550642c 3045022100f11a234d235a4e9b2e4b14fb141c22cb0021a50eeaa40bc6973a5a0d32da8e82022038093c701e1c14572da2150f33e405c0e73a371723d947e810a845635ff158fa 30460221009ab3ed5bc06b1037d749639563f772a258a65cf8830f0bf46ebe7f664772bdf402210097b524436faa4a91d569a12a8a77efebf7ea2f253568fdb8ebb86ead0e3579de 3045022100e7f70fdf512c0ecc0a274a56d35a390af444adcc3984cd1fd9970f0a2b0c796002207fbe7f907c16384b10a77b5302600bc136c305745fb5a8ea683aea472a5caf59 3046022100c2a2b87f4ed4dbe0ad0613a2f5059485f2919f981c3256441b8d65790c8a3180022100f079cddb317b723b159e58d909ca687ec176f6b8899f4a05b9e8965e83f38a32 304502210091f5280b82dc939b39f859854378c485eb4173c9ef3414e9f02e291c7234d487022040df4b682f70cf7a80de5d54aae514d683a930ba576acd0fb571636ee74680d0 304402207aa44da9f33f1cc9a65811e201614043d1cc267c32aabbeb707ffeb68484938f02201191ca863af7576760651c0985b94098e4541230b6489015fec0a61ceb52569a 3046022100b42ffd6de551c1c0ad3d659be0a86edddde6f0ef7a4880b5ae2591709209578e022100d24c4aefb1a6c912cd37cb717357fe314715e146ce2d27044ceb3a09b1f4d127 304402202772bd91aaca99e3c02bea85a198a6facf047992c9a64c347ad241b1be0367d6022026fed39f6fda27726ed06789c3a988ecb86c3c8eaf419ba741ec978a4bca6fac 3045022002a1cd82ff09cc709053cdc0963294e54beaf4e076bf3f0708673320b9055f89022100bc28954955cc81994dc37fd43f6f0861f131c26b2a03ad43749a7b5b5882fec2 304402201318a204f402f96d5d0d01ef1194366f683a0c422824151eb704be59e8575b4a022028d4763a417d12e0d2e0208427dc46377c8958e42be0bb3c3c3476e2fcfb5ea3 304402205e61b6331d0324ab4e498ab8c5b0eba19824a5897b6a8963c6cd6fa5348ca4be02206ef8f0018a880a284d1eed5d3017cfe2c59dbbcfb61827734ee81e61e3181527 3045022100c52bce6fb2189c86b2f4a9df98e6111364e40131f4b2be2d1f995c1f6411e49b0220768bb319d1baafa2de74af752698de32c4710762dead43ca8c56cb1c49188404 304502210097d0b0bcd40f322a96ae0d2f97b8b9728517e341765d3c3b7f24392694ce3b3d0220703d47389818ca59bca4e2b79186406ba3b4dc4bb19b0233935394fb887c3712 304402201873967d11400ef7396b09eeb92eb86f0cbb2983c26eebe8f02f6f927e536f8402206b3cf2e1ad6b2edc684a495939a0f280a4491b0da999f4738b79a24af5a42a0c 3045022026bf3152c3d520ec3a366af0712bed4155970fbdf69c0c3c701826399d6b0493022100f3bdbac000ef8a33eef095ab2a66cfbbcc9813289218371ba699e71fdc4fb492 304502207b999c3cd86e9d9ffe1ba7e2e8d15d59f3d822f501cf3ab33b3e4eb3803ecda1022100bbda90b22d35d0f502a57e6c9f9caa40b14596011e01aea3cc71ef17f8c61643 3045022100c3f7b755bd23a6a2a99e86fd30330312f6441eff9005337628f170ac05727d4302205f11c8bb2eecf902c27f62f75951a1d1ec0e7d5b1401c100e69eba528e59f00b 30460221009fb9d5a1075f99974460182dde4ee07ba8ba6db2694d06a035cae853611a23c7022100e3cb14ff02409a5a9e590e2d591ab219438784908443586ee192d75c455500d6 3045022013307e1c07b50e511744a029392603c7dd4ce69258d61ba029e9584710ed81a3022100a78b572c6c0e9d95c4647ee4977653340a7034103749cbd742c97d98f9a1cbbc 30440220047fa8e16dc032b3fb8708f6e5061fcfda045534376cc0196c7bfea4ba681f2302202e5b47bc19ca9e059eb4000f71c40cfb675eb5c54a5a1cbed38dfd6d0a35b03f 304502204f68677e8a4e6909a5652c968f929b6c5b21953e5fcdad683f4eb902826e4833022100850f0d00fc54430a3e2873bffd06cf382c631ee3097d7527683030cb8ffb458c 3046022100b7a75948f964b5729de434afb9da3b7b81d327ff0d2ad837178001bc86e01ffb022100827043b8207f76b622f7ae092905cda7b079d8e811290de246b9c00ca642c9eb 304402205188e8a9aacd907a3ee1ddaf0d4c9c9b464f61457f43ba90171b8c033aaee805022008225d1adbdc39c328828d4d1ec48c54da979a0506e3b355aaee7d85a1a5c560 3046022100e00d0632373002321cd12cb0f0bb2f4ec3439e8410a6ebc742837b9669a54ae70221008ebca7a54d71fe7c93a7f206e6d36bf0b63628340cc1d75b0d21edf5e4ec7e19 3046022100b5eab2b7711cbff87b32a872a46455798a53a820d2f5a000ef012a3bab2d7a9f022100bdb0738517af29021b4ff00d921d332b6827126ebe4151e5820a167a2f40f049 3046022100dd70af598c38d7a1c60e5bccfb4bee35a69a0398ded5dac20782cd7cdeff9cbf022100eb18dba802bf3aa800514819ae969a82b3ebb05f4145d857fadfc97fb1d02e2b 3046022100890753180e3865a92338a3df3e7498d521af7c9fd162eef06ba7a8f0d922fd66022100cd87cb5483cef033c4fb2df538624a8f226c52a6063ccc1ed6c72e7185730b15 30450221009e3c53aa34cab4938c6c465cff33131e387f7748a1957d89236f1eca834e1d1402206c708525bd627214587bb7bc85c7129fc9e84d747a0b135bf565953d6089a472 304502206e8ab2d75e9209a6b8e875ef235191d7259d3990f269e1fc9488d7db5812cd03022100c81ed05ab38a9e388ce72eaf6075e92b2bb218d6ef7d4e226f986c35273464cc 3044022024583713dcd8ad6e17b19e5fb8c42d23b852d02bed629dc1f88496eecce9cbf3022033eb57714b60a5b90e2138a263358e9305c1046b90159bd4ff4a854d1125b88a 304502210080d75c91a7477e215dc69b1819c526c7bd0719564aef08969e8b241d47acae4c02200ea223497ec9ac5adb78fb5af71b2319fbd419f0447c84f7c25ed0663ffbb549 3046022100f4cf69e2bda32add73efdc06acac8346b4d2b7f7927483ec53ac002427ab7e6e022100ff76ba64645bde44804be0e9772837b64d6f4019e455128f599153f419e79a17 304602210099b4723b036408a2d16e28f1e2aec6dc83eb416b950bb0d9e559a1388ba4f6fc022100e973c7ad7dbdcb54f476603ba3edd7c53c5b56b862079bb5e55f5f478f5768ee 304502202580d73e5ab6fe4eef0a1e75f2ca55a3f35a794c9af2a5bd27429500b628a8ee022100e066d5256dbb2cf77611242000c5ddebcdda8630deea64e09b022774d5300466 3044022033f66ad8317c6f67675aa19ba053a97be54b340ac835ff3b44b6f11b2d06778f02200542d96c4e71c8e46c576051a6bee7c26f70df5af8a72568e6cdabdb32dad005 3046022100ee44e9c48d5eedef29bbd37db1736b7e4cfe10eb69c6cca81a0a78c7d8b403ad022100a27a680b9d8d90fcc11a1201b356626b5033f59017ca153ddaaeb7bafa66144a 304402201038b30cb594347b7324fc9090cae94d76ee75379ec85e0572b2ce28640df89b022079b89b65059369262882788641f9e41cf8d538421e6e4bde59d30f62bdaef9f7 304502201fe8bedfa49eac7fa4c10abb3e1eabe91a83c7b612a7ea1819bcc39c363d3dab022100ad1fb4e4bcd0905e732746615139aae76bdb1603b7da9b484103fb23b5f108c3 30450221008e5b02408162a5b87f82d07b7f790bfad458df482d8c79f2355392a0dd60664f02204e7ba84d482748beb93650754ac1b1fcc1ab8c9bd53361d1090988f06d787ab7 304402200575365e8f19523f238118fbd655c6662c5d32f44fc57ab9417ee335308bdac90220206e690eddb1d1ee45fc197900abb06994309d934318c0c950dcd9b738b4046b 3046022100ef5830de956169aa813594a647f69f1cc6fc586b04caeb14e17e3dd423d5ea3c022100c842d24115e1d7b0e2a71e0c1823bc7d5793a7ecfe5b3a0c662be446c6db0516 304402206996e4e619d6c8cb448a57f2557ef05b290f298504a7f98c21a45f3a101c1a3e022016f3a92779922c20607342eb4be7b12e8c2ca59f3c2e51ef4effb4c35780ab04 3046022100a4a4be8ae5a1cf82b45a517dcd8fce7d4f01bae82d7e1ea4d79b4f557f61f49b022100ac2650ac4f6b26f235c31ec92351ec8ec2cc55cdb9d777f3a2d6be614811e5b4 304402200c0a72a19f98a88315debf57985f55edce68a2544a34f8dafc863826033831560220373a38545326159bd9311f55a9b0b9403fd86b0461cc4478358fba4cf5c0a252 3046022100e49124787607a1ad98797dd1104f0c967e63ae8b75bed82415b4dbe33a7a2c35022100def563c80a646cf4f914d4a26dcc797e9442cdb789d9e8c349a7d082b1de8ccb 3045022100a0a44d7b14df6b316941dd20970304e90403def005ff22a4d8c4061a54848f5c02204af63f6bb45f5feec046869ef7c2a3eb8ff8a81604fd1a52182cf2f8aa6bf165 30440220403ee47013a99aeec37e7b4ca84297a3cc00d26b31c614bceb9861da1283680d022044e5c4fda4c070cd6103d58026b4655dc5a73fa0b7521c38c4cc7d808c999243 304502203e52d48c6c58b8846e7e1e9713179fe4a3dd58b9fab537d04d7e5d20a209e608022100d29699e1fe755f0423965dd8344091e5fa947c5d67ab57156824f5c3d14ead70 304502206013b8f54cb68fb4849bb19acc880f34ba9c7e6809b044a591c64846b7432f51022100a7b7a68fffdc065f0b74bec8bb8bbc8e7d2deea0770bfd05e783ea9e0dbafcb9 3045022056684bacc920b55c3ae2be6cb0fec2b6f8b453dc40a085cafd17e3e09acfafb30221008d1e8b5e51e97c6b4d52335b296923c968c3c2186599bc35294e5c1021a33d38 3046022100bd3299cde7a28f906107b362129400930047f73d7b4f5b5bee3d880b5c3dca61022100c09e7b54f945462068d0f6f905b40ec57e4be4fe448d8872ceb1c3db3f4aeec6 30450220322d57ff5a58d7a1b3c845182b895166961a05f647e0183b9aec37bfd0262ad9022100e74281f8d8c97106cb16e3cd62287d7b39919e5b50ef1fcd7f64241395aed141 30450221008420c7a49bcf356f22ca8d55ccf301e2d22e9919c93d3c22e005001a863aac04022044203cc11a3290105924f7a4e6c9bba53cb2111d6bec235f2d7d208b3f11bacb 3046022100d7501b24cf9473750d2925886c0963ef6c2885c5bc53dd1612b21bf468841a43022100b731ade9a04b7d27a32c32890a1c14d9e37d96c6946805274c37a3e80883e3ad 30440220702df229a0f426de157c30b343253f874794750b0f28b9856aeb54a326fd86aa02207c460e7cffaa6243b0cc6b8d9b24a2c77219f1b524d26c864d8f6d9a5189c806 30450220453b6ba2d53213e345f930d2eb1cea40f3f8bd7f7c1a52076e39eeda8c70baa7022100ff3b241d9e1250643c5308fbf47c71d665b47542bc2501a9ad8c5f68717e6e9a 3046022100852de7ce4ff8ec2b95269bfbf66f5ad4d3ba0a8726235b50b8fc6cd356f7a94b022100f3c30512e5eaacb6f50b36934875990037bd08404b96b0c11c2ffa33252647e6 3046022100feb059d40579398ec2c3d073ae5bf4b1053c2d966341e1aaeee107b820ab848f022100cce51259c01f6c612413f78af6f9dfdc90d5b53f70999a4c8f188b7f4a118172 3045022024753674f297f094b45ccc8d8110da0958a3d5cb5e2831bf7317a78da32c8a180221009fee4ffa52c610e3a693a3e5480a303de5194d2904a6813aca0532e21a40f38e 30440220551b35fe0121ad2ea05a4ff359bc7399bfcf6800c49487299eec802c6dc451ec02204b6314bb7122713b139899592419cf9c815b5298f29cfc2bb0c7f6a1873b19d8 3045022100b24b3283ee6f995a173f39fe7aabef8e187b1ba2f3bac7309ae2db14e6ff0e7c02204fa755804908f5147f9aed71ad10fad0e7d3c243726f24312e864ff7f4ce13a8 3045022100ef13c9ae3fc5abf9e1709811efc7c61033c449c71bafad914027403daa227dae02200359600f4a3c5c8dcd20557cbd8920582af680c2519bf192bf9c8f2478c4600c 304502206760ccffb95b577890bb4ecdd0a4db4b5a0405b133590204e4cbf0b0b65f4d3d022100b9a8a7592f5bd09f3d67a18ed2954ee40b8950da5110ac6ea140c33b40bef5ff 30450220073893685d782d8a7851d1fef486ea5c2a4101e7edbe33b8654350e1adf067f5022100e6c7c84a3d752e3028b15030b343295c26e81860ae59597540b265719b303e3c 304502210097fb8a9239259e18e3deebf4128d12788079cb256847b69ce95c4f7701f560b0022030ab17807ff2edaa186133ca5aa7d5ee3640f8a85e639d3128661d7ebabedd6a 3046022100e05902a4e25cefc7e08fe980821e8e9ca8a26660e7a2837581b4484b8c73f893022100e14cec89c99eaba21df9e3493a4b8060d8c6d935abb9f809ebd7a5398e6a115a 3046022100e1436c6086fac0247f95c4f278a468cf98ab7fcf0bf513331b2efc4ef6b12e83022100a7aa34c678d4dfee8ff13954c32e11d4427e048ac724deb766b84a799274dfd0 30450220449fd3391c2f3a6c481c859c84c49feeb207ffb4dfd895683a3d9fde502b844d022100d84ee5a0636341c339f9299492b0aa0b2b2198b9da67c104daaef41cad4bbd78 304402201efebe9e3d15578080ffd88afcd79d0428bc8dfa80ab75743c9643d7f8403bde0220384fa68d54c980859aa3cbe31350f450a57591ad5ce8f33f6f6fc2e5947d9143 3045022100aab8cdcec88baeefc7b27bfb7f4a5561e042033585c22c5badbeff6f6670be4002205453584493f86c04d7687e62eae373f32044d450fe0f072f108091cb0e444a9b 3044022024035a66e5e5c484caa3a3fcc009a88ce70d5eb8c28f0e13ae00609732a4fffc0220444a942f637ebac791e8ffb4ddcab3cf3b8f0fe4747dd9bbb805896c5ab27a9b 3045022100b25bb280da4b63729c5e87d15d4185082964a16cd16fc0a22061d49354c9c63f022038897668b3015c0f3434d80806a2bb6462af256917c6970679d49bead2deabf1 3045022100b9f3f7f8732606e344b7e3e4d2dce496b82fa9e6e9519f8074afbc928a9fb3d1022058e4ea13d1372c9150e053d353bbcda6d3b386e04a046c78b7f829f0f7307ccb 3045022100ed51d95b0d5f5a817c3a4649736605df10276e49afd71bb59829d9be74dab1fd02202a005d8348127d7d98d04dff9683ad555c9937e437645f951af59bc9f6c17457 304402200be0abafc1f0b756d5ecfe53e33af9fe3d3c8919467ea047d24412851e4b90c90220022566f9a04815ea7fe1b8992ec23a057c29470ba07ab06b48884d478cb2e9a1 3045022100ee6d16eec48ecfea078ab963cbeefea4565da581923ee1aeabb13dadc11e8243022010918304ad9b4a28e573028bda5e78e76c3e6f9af28a2c3eda0df25aad961c8b 304502201f04c2f81a0eb197e233ab2e546a72d6a336a45789adb9716556785da6aeb0d1022100abf548cdf18fdd2d9e89b66a3412e66bbc7f39b18a5d45b0e73f9949cd93f5b2 3045022100863f041ddb15dff67a9812c7a943cb2a41b064e74263d00f4de3ac2646278e3f022065be91c04834592f8ab4982d5fa4fb668ed5843f8917261ba24d48a24b1e90b8 304402201129d4dc7fc28dbc98eedc258d696c38317d857c88c4d4a134452e73e3b0b8f2022039a7b97f73d8ce1c0eaf4b55ffe651b17e015886b5152a8c7252599bdde84a2b 3046022100d6401b9841de2d14acf6a03b632cfe08eea55f58ba5c3d10dc5667bcdb7ed328022100c66659771312a94905389f0d556fc08945999500f3403a80222c287c7a66e28d 3046022100def3e17d31f91cfde2f9e49cafb4c05749a6b802932ec0658d86c7b0e2a4f373022100dadc4056c0cf919f62497284138cb1394d11e225252fd521b2b6e5982e1b07a6 3045022100bdf0aa078cc7b932b269a6eb614a774375b2981716f7d683c99faaa1604dcbad02207a75911b70e9ad7706166acedd02bb29d49438cf0e1d6bf0e605b5701d0d0c92 30450220195e3c86c48fdca67ffa4b68bd94a63c403395f4d256f682dac65bd348ba62e0022100d6215642f33a695a9bdcfd0fe8ed71b029f3c2ab0d21fa4d22ddef2b60c19607 3045022061a78f8a2f6ccba8c026b13ae5bed56f6bb82c8bb5fd7ebae6a8e1da068b096f022100f5f5d4b57e9ba18e7ca28f40c3fbb0a1f5139573aabdd4901192dc1d4dbcb738 3046022100bd58aedd8d65e5e994534fe53244a1560957625b966cbb4776cd1e54d4451986022100ecbaee4687290dad2383b46c3311eae830a78c1131c238b6f95a15d3da3b509f 3045022100e8bcd2f9f7a718135bd2c5192ba58a30a87a58e41b414dcaa0b4039b43139e7502202ab4029ad8715132792714bd55e838d8d29e0d957434d9cb816721270550a8fb 30440220354042a551eeb545d015746897eb2a29107ebd7f17c6fcea469fec9f03054fd502201a8ec57daf772296857d11ad9d78a5d8214d05f29ed3673140ebdafa80606347 3044022018a358ac836e254f70b17100a6280402be3fb3888e51d4c322702fff9daf430f022054dbe4f3afc2c9da4739a4ca8c45f0757576e671a6e9bead95a90932a3156afb 3046022100b111b5e56b7997c7b1d2f7f5a9b6abaa3adf1935e162a9c877df0474c78d6240022100ca523995de340c04c81149b9666304b6712751c76b900dc0952afb9bcf9e3ca5 3045022037f0f90fb09f14e884b35fb62c3c704314865bb7037e15da3f32b77ea2182799022100bfcadcffbda8ef2a402c69c1c26955909778e4357f9fa89a7fc13933e08beac3 3046022100bab82011fbc81fadd9403b7ae43779a4bf1d8d1ed582c7a19bb079f1e81284b202210094bd24fb29a0061cfb458934b061659aedf885cf2b7c26d612064606bc737c3c 30440220788365a7d20ff7f5992c71d9439c0a284b3e2b026a50940b15dd926910329652022062d15967cd730639da033db2cd0c9256be3d53baa5e2ebe0cf3c452f88887681 304602210096270a2f825d1316a82bd7f2acba2b689a272519faf80f4c23fe135a9c1ffe2a022100c8e40d4a46e7e8ae664b47c4ae28e8cfac14da1101cc6fbb993d6e797fae9301 3045022068f4edf9e1730db2b3c1c5a60ead4108c733e81b0cfccabeb683efc91e1f5507022100bacb57f2a03409075b481bdab34ffadc03447dea9b1c6b06da3ea1c08184380e 3046022100ae6a041aeffdf4b0668a260740d8d07ea28a2dcac4cbd719f2bf91389f3d548202210080c7f35f5440cbb9ad42436cda35523e1609fbbe8b69ae5981953602798b4199 3046022100b385af8c73b7a9e65ebe32586efe2150ff6c1c24aa5d0761b971fcc411c3bf55022100e57a143fa9b01fd80c152898c10a7bfa177d66a8b964ee32ce74ec0b22f9c11c 30440220339148ec4f43978cf1c5194c62c1f973543bc86eaf4f5724595f64586dfdadbe02206d091f2ff9d60e4d05d655ae231ef87c3c31ec5a80d1452241cee0c7302459b7 3046022100af03cba2dea062aacdd818ef789e933a7a906d2a4c67433be00e17579bc02f770221008cb6a664edad4b0b1935492195a778556872ea2404b6ce6773d5cfdcc8a7be06 3045022100b82235fdd860088defe5501284970050f7dfbedf317757c4b4235520cab78a1b022026dfcb0d1a15ae7b8bab27c85a13629668c03726dc3a826159083090be25ba99 3046022100e73c0a20a11fe11ec07dbec363805d0f8f6d97b59e41e560b0a8327a07042bd3022100b138ded743200eb4bf07eb57c3b618e584e7979c9f6ffa10efc8d55b92567b7b 30450221008a5fc2324365c6e7e30807bdd297819e1ccab6f335134f24a3390fbcc2bf1626022079966fd13c6288ee82317ae9844d289f5c310a189f27a6768388ae94924aaa9f 3045022100aac1ce908862a35e19c1f70b79ceea3bc67e4ad1f4201fd01bf6da3e79b7ba37022000eff2c240e762a8b2d19f6f2810c0a37dc14005c4f4fe2ef200ce6c533f0c6f 3045022100983364ec990093ccd6b335fb67bdf85567b8ca1fffbaac0d05f2cf286d1156db022014fed6888dd6ac60c93b3efd2957433dde0eff8a9b8ee6fa992bb90e05317acb 3046022100e623ddcf32a65f32cb59daa6e03e965566c1dbb1f877d7fe9699225d2788c2ee022100879cac66e6300c7112f5f0d12dc6cf48602286b0d23b955a1840e3be7ef372d1 304402203259906ac764b622caab07aaec8c466d89cac32799635e4e242b08fd50469e6f02204ed9f7b94a2cd3e55bf2091fae98b52f911bea1630c16b996063b392d133726e 304402202e9343140a35e4063cefc9995e8e51df4ee90ade665180bc65f814e6fa799e6002204a960da7adf1d664f100e1b33c9cec208f4500c9e3f797c05a57f138d92077db 304502200bd1d82d54c8e52ef2459f37430099729ef91f89762975027c252fbdc37be1520221009fd1f1db0966f75a4397607b0c28ddbe9afb400eb98f1e45894ad772fa5d3faa 304402205bfafc546976468f9566570feb8ec0b75bed5af3aef776dea35dba859f561088022036da166b24a26d2f0d3c119663f4cc15a0fa57ee9242e1a36ba9854c9e09714e 30440220366f94d734184944ca6aca089c71243c9861feb2c4f96bf989e9860fff413c24022041b0accb441f2595022885fca2155b1f1564c758afee1b0efb0de852f105645a 3044022045a5190e0fd0cefef8a2eb705e7581f45c49bd2687de4a21c21f2c5e7a612987022000d52346f99610b684cbe5a9e2fc2799c7aa38f7b399a3ef8a4d9e9ee69ec80a 3045022100afabb085976da6b945a6deae956ea13f053320d4f841c6491c6c6b00eee67c6b0220700e4484122e232ac52e20ec9b7daeda3b22086de53b66c8b8dc78dc9be61f9e 304502200805616301394b44d964da0bedb097c4dd94962d99b554e408968621da1c2bca022100e3a6f5c321b5edeb9a76edaf54a7501c2a6568dc57d9dc0219eb76dbc1443144 3046022100ac1528c9355411cb68877ef190a175c8d565e277e010286afdbb0a8394d2793c022100b89afad4c20019084709ddafdd4da0ec3804a48b23117216a2330a74855eb22d 304502210098b7e2f56075a91579b678f6618a2e7576a5e6f30deece3e9ec61b39877d627002200d77be7d54d4c3e41c0a117827cc19282cf8b10493d715bbfb9b6b2679cd2848 3046022100a607a6634e0322c51fce4784623da774ad5437cd277ce8436108dd6bb240d4f9022100f3f4926e1e5e905239f8786d75384cce9ab1cef8a3f3776f967dfb9b395fabee 3046022100c9a94c4ccff2b76828f773681bd89c5f06250d0201c55aac87ef1a4c7863ee86022100d7e715118617b9480c40e24a2f67bf11541c1372050be793b3dec21e3abf5111 304402205fd1f693a5f4a2bd752abf774f5131b4bfd4589d4048cc8af0298efc866edff3022015ad6f5ad11aac795f00688512ab79df2f57d9db02f87ea6e847f2e0c6c8c5c9 304502206a86e47fcd8c2337c5c73af5f8f6fab6a84fb2887cb9d24955977883bdc4bf35022100c5b24e629f6c1626cfe1c050b80b79f5cc6f52b909586a0302f5d014e93ca195 30440220051b4cfa0145fc7abb9f770fafdf39491eeff9b1b31418c56837c18d931a73c1022042cbf73125affec1c366bff7d0cb332d5c454163a4d1713197ae785c200b2d90 3045022073588f66faf8492bb1156e39653720e7a048216f7a085a12a47e4b38f681ac3c022100b6355b4c1a1180a6b1a6d76068c5511d8efdcac0b2726370e462a6531830d3b8 304502200ccebbb3c4e88b598a6bce72afdb792c87fbc8d9b86f3baa10a47bcf2c841a53022100fe6df647f42bd8f2d3b66e288078a1da7e836d51afcac1801e874b8cf369ab93 3044022011fd37aed70fa961818bf3f7649151a09041f6d27abdf185f40af07eb760da9e02205cea2d2c37eca9234fedb2db68cd6839429f5f9518a9a8f70e122064ded8433d 30450221009b88f7ec6f043a1a5077fdeb73778eabd3bc409fd284cd36ba01635a27afc2360220258d30b4ca8326a2b1cb41314d8db87250a4d36173366d3278d2f934aa78e75a 304502210094ed057dd008d50cd50145518c31962361ba51b8b67721b0ff3102e379f01ff0022076f8349529bdd9bd49afab0d28bf80d80eb7492e9b1cee99cce8e9e8ac4b2db3 304402205d9d928bae17c8d564083f61e02807ee24858f1d0f54c8e8d9afb0e0fd67e12902201d8a259b4568ddc32713937013d26e9726bd85450ad2876cf27a10bc2d16e1b0 3046022100f2b0635c297aa77fc643028ede91a53da6163b5d4338110854f8da3d4faa6eb8022100b465fa6ef92bfa6171141ad686c5eec49e37cf4124f106954380b9830af37bc7 3046022100c081e90ec0dbcfe4d0f5156ae50c3b04e71d0184c584881fbc88c44094eee32d022100e7c8dac2e058d619f831c2f9f169609ab4c7c6b9e05daa9803e6189934481b42 30440220686a15e7c771a98aaafc248f33cf9d65d99c70d0b19bc94c6da9cd476001fe32022051370fe930ad8c58bd924dc288e698accd7be123b259d8db4bcf8ff0278f8196 3045022012cc64e3b990cd48a8f66344f953406fd776cd8125b856e48c19526e99c1639e022100e41a4c2306c5c61774420ec618ecd3223547ee14c492bc2acd36977788e3b256 3046022100fa44159f12657fb49eb6a59e50e0c0e7eb02921cc023141f7db4bb323d5b2ef6022100c27a020b8e74766b545b9189ba5bde8c63529b38fcfc74188643bfe54b7921c3 304402205a5dace73d7415387403b40536c42317b3e51b8363b3a5184be3fe9aece26bd702206b61d32a79cfdcd457b96f2adbd1644db3616942425599beb5bb41d004928c8f 3046022100c9e567b828f54f99b40a989f5da6732b27c4f6957c34d8627e0d570db28f70f8022100995892b5d3e267d456b97543f34b8ef615f113b640cdef17fdcdb4d20f221a0e 3046022100e929dbb8d1b18ff7bf2327072032ad1a85f403ccaf4e1e81f4c204776ae049220221009ff81b06f8c3ae86e44d7803fc8929c1ce356cf88d878d0448b17ffe433f62c9 3046022100e3d99c405644ebda15dbd07f2ff4a7b20857678e5165722c6826dfdf34eab4bc022100ad3aba548025fcebeee8073111ac3b72598ed92f48ed9513f18900080b06cddb 3046022100988ba7c883b1efa487be3887e0e389df3b2d5c6b44eaf3ce66ea93127bfda9be022100a82fcd7fcf1093cad30699559915f3b8e3bdd4f158af99d441eb7a85c27be5d7 3045022019ea6bfedd97beee874fc60fccbe5afb1325cbe2f4ed526f53769ccdefb5d636022100a85dc8fa65445572140ebb965a937a41c6a5d25d80a434d2c48004dbb6c7990e 304502206116bf7c15d847f73e3ef37d8b039dbb1b2caef1fae0a0c8ef277317425ed667022100d333f5a55bc70eac2953d20e5ada26497c61c51b2c6887613a6bc5e7c67d8c9c 3045022100f92059ab061fa878ca7fb6cd52495d64f3f84e7a57b9e4db2d4ae110fe2a711b0220616cf9dfff40770a135004469fd22675650cbb0f72340eadded4767822a9d348 3044022044384d76084c0321fff3df74e868d779b1bfb863f1d88382029377df6730b9c7022011e837a4ce3f49ea25af935f2432a2dc3f90702a0822a8db1712ad3f91a86bda 3045022100ca3ed53503582de39f7b118c1685acf6164a116ebfd213dee87caa41714071dc02200363eab0b7d8aadaa75c68ee2519b7825dcff2828ca583201d81606680ff77ab 3045022100a6a542a6de74088f8af504a8b53d4982b93c78780817bb927c1f7be52e29a61102201c7ecfebb0473d6c191fe9bd399e7d27306542ac97602fbf7901cfd51c74dfd2 304402202e042547454057746caebd97b64559e44e0d470801abaea6590e1d5ab55a3b0a022041d939e42cbe08b28a892916ef7118b166ca454d40ebefd719e71d4cab9dc8fd 304502205499582d86d3a489eb236bba0f9b698a502ff2d92c13a0229f491c5f4ee613d2022100e9b2a495aed5d121fc0046c2605ad9da7301035a68feaeb8fed585bd95388747 304402203a5683f271218e13eeb20f347f8786e48c58c735cf81ebec5d8ecc5b5a661bef0220027dd5029f7cbdcf78bbb57629edcbb926e2844d1737de3b42513646664fa7ee 30460221008f6549c166c4cd730c714d7dbaa835eb839d53d95e313b3f45fe143455896aab022100dc373bf719aee7b91da746cac0faa48c519f397e000ece692169e6637d1ddbe8 3045022100b5d9c4b0c5fe8a57018b09874c41b2073704fada26d5a44db94c6e96df0d95b002205c69aec6d0e1b9fd5d523fc4d7c4afa3cbaee1e75667f652958ce088bae9aa42 304402202895232cd3e237e813815a12ca2f8a97d610e2a7d14df09a49186f3360b888a7022038ee311a22686a4688839922a42993643a88952a210d17e6982bdc9cfef0f35d 3045022100bc8e7045e670472a0f2643681521f1d4dd5f989a1bb839cf1d53141dc6399cdb02202396fb46c7497d88c3e637a66e2ccf548df87c2fd030d898e96c22a9a9a9e53b 3046022100f216005fbb5faed48ee0e5e6760aa581893c4604457bb5a44692f4c2f514376f022100e23b11ae13dc8f65b18ef7703dd78d7f5ded45d32caa01139fc55502eb412987 304602210087fbfe85f20251cac67d41f1cc5dea2084ebace7c3c2d895471c2c7eb997961e022100b765259b4e125f59b792afb97dfb877d7ee25c7ceed8235466fd477f6cb24711 304502201afa4efefd57a0441c39a9180ba1271f65b5df274108aa88545d8cda35ab7fb0022100bf834874e61f0fbc8474080f012d16690bc344148c3699bf3b6396d58f40c2a7 30440220549a8d263ac9e8441d8afa92016dfce1170852a4501304e54e4d1c9d4edc23f50220417639ada458fcf4da7df6c87fa0c4731fe79fa8ca16c8633ea1cd805f75f967 3046022100c50c434ae7b447887fda39e6e054e96e18537c6bf84620908bb354663f41a1dc0221008722d3e3f610c7fa520531cd58144a1aff54ef7495eb6f04c2a44632ece58559 304502207bd9d07b2ecfb4c0c21219488d616cfe49cc68241a38fa0bfbafd202aed0d208022100b3771a513949182390839f373a9bdb26b6333e8f6749c34df60083d9337315b1 3045022100e53b7fdae93ee5b6aab865a0f6540d22feae8811701b604f0d007a9224a87bd8022005b5e02d30fd9d75b3bcceb11fc97d108eb06a4a501701bdda45eff08ac4ce7e 3045022100a8952de8f8751e3a8e4acc0ee9e4e99eebd59f0803a39f3395f9c907b7eeb68f02200b612bc3b5ecd15a32238f9ec5fc4f07425908df8953e59e72cda9dec67307bb 304502210083298265e667cbeb6eb5f8ffe56f4e3a5386aa19511943c371fa45cf0eb9281f02203ecf752d3c0d39856f9f7a3f1b6eb0968ebe1e5c54e357265e45ac20ae54044c 3045022075cf94a248060f77ec30d66c4673205ff18652fd927d636e1105ee8677a03b3b022100ad50cfe37310b4d6038b514743ad0018731d72904f4c0fee9cf65cb10a0aa088 3046022100ca85eff7baaf1a4494d6018e24757c721ab8668161dfc0731e2389322f3b038d022100fa69c428333048115525bb5b1d247e4df33b0c286966934b6ce884a07f20882c 3046022100a097706312442012a99a264aaee87c2400a9948d182d5194553dcfb845d3b3bc022100c4c8807eca3ef651657a156aecb991d0317ec083af9b44fe9ba442833c2bb09e 3045022077e466b36298f8c4f5ff30ce57142017364bbe0ea4e2675dcd57b9211205a6de022100b79b661a154f5de3c4926b842ceda09775a74c6ae8c0c4317cfed3ad5d504161 3045022100e15d6efbdf28be19d54a60f20858cd44c1f5fc15258f538840c3507c651737ec022012256c59b2f3f10e11699cbc034322977f4cc006f54626b14d542aa35124eb4a 3045022068e58de369adcdb25b7f881eb85db311bbeedfb14dd55544720d38e03e76d7c6022100bc19cbcf7a205a110cfcc8043e4fa9c25efd5f900449713d3f4c5bd932ee1056 304402206a2b6aba4989b665908908d464c4dacdc893192bd6fbdfd6e96abbc70f6bbba802207a13799ffc62dc87bc3d7362ba3569b91a128861b2c8054fa4ec74d212241de1 3045022078195724757b665a92a96cc87b5d8fd34ed13f00511fd1d2e13849035504b8fc02210095c86c8b8679b9f37a0722f022c0677631900c413d0221912d8439812da74e76 3044022079be4d0074812a17b88a9088a041e17ae8f64b366635b36d6cc5df8b181e9ef002202abf39ca882227821775c251afc1a8e43557ef641b17de32751579bc19b88970 3046022100d47d8036627fee66d5708654ad77149426c3a8c2d35f71db5e89090f77180f3d022100f6533318bbfefe82305988da4baeb6bef769db8ca4f734a634f543ebbd106369 3045022100a754237fd0176bbe7960a2850457ac67b0950bb5ad446b5d119a62b26c02fd2002204efb1b5fc596a76a311d74593eae837feac0662b11e78cfced022efa35a033f1 3044022047faf1cf034277e199bce4820551187bc1f9f91824776e711ce99a7e8235c399022040c71e7e949d46ec6cfd0f37f2c6a6891118e5636a768aa05cb4d30012d3edf0 3046022100d868b6e5333fd40d633ea2ab91b0efbe4ff196e514a790e11aa06d15d65bb4430221008f58f8d8df3b3a9330b12164df36db9e8d9ffa9977fce476fb4ca78b6fd68d2a 3046022100b41bfa96f00c95aa59bcf4cc4d5ba26bf11688d60812defac18e6ec9b50d4fe7022100ea10dc0a0edaf663d85d2bf4ec1a2874ba9d57e9fc8b28e7d9b3493106317a33 3045022100be6bf26d8dc7b7b5879b2d14abfe6317f022c085fc7ce00767afeac7ac79aed70220026ffad94fdbb123bcbd3fb7ed60a2fdbe4755110660c6fc88028e0dbeb3c06e 3044021f1f4db547116c869fe13a3088054d060f97997dc9d07fc768b9abe5967e6ca7022100c68a36ac8e31b54d2bd2eab6cb1471e478078e073b5a82186038cfbd47eccd1f 3045022013ceb392cb786cbbc0db5febbcf920858204087797a71f6908af25ca154a9b9d022100f80f377a2632f0e8deb3451e75b90c477f80e14ad5b6f373c78ef2e3799869cb 3045022100ad54680b791382328996c4c91d3f0aa5b5c86779c2098daaba877a58894c9d420220752bfd2c4dca24657e61ce09ecf45caa8175341cbe5b0f00ac472498b60df42b 3045022022653c649b9b093f79eabd342649772e02cfb6ca456c2865f66d05a486ad3d34022100dc127c756348d8da4ffa31e1fe9fbb059ddcae44bdd6eaadf463946f68152c58 304402207110a4c1c3f718df3a4ef1c2bd0499716f4df59c236b24b3c74cd5f03c58979e022007e6bd1b4a7332f09ee1bbee5f0318cd703e3204291cdbcb875c4a64e4f82b6a 304502204c80cd561fb3eafc9c546d087307d868a286841cfbedab6ea98e59569a860eb802210086ebed7b60fb744f1b05a635094eca467b8b7e3dec26c21daca37670b7c14aa9 3046022100db07575f2eb0df405a8acf4fdcdca0c1c36a7a11edff344c2f0315bda75247d2022100a29663369813e7d24bf6c6174afa17f1d808d442c78ff4b42f8f1cda5cd21741 3046022100b1d138d02d01573384431b5026998dc9a3a5d27cdf190a9fbbc5a667af8093fd0221009e3ed0452624441c028504e86ec3c11a4ec403bfcb5ea9e47523bc7f88a280cf 3045022100cc8044fc66262319d0ae9e1bb9a1b79c87c782ab3301381b8ec9d69358e68f7f022075cbf58ba0f27ae090ed23c81188a03dcdbd83b2080527271f3605a13337ccb0 304502200e62a70fa0db6d095f8211ae4dd119494782d4aeced58f1cfcc3b45e4511bf960221008df2d728177fac61c910b2b7ec53e1a7122b482508174ad119d20dac69e7c311 304402206312e9f3ffe11f9c0351f6f166f823a8a0ab71d0de9edeaf305144cc494d38740220541a96cbfca1768f9c3e9033224cd6938a5b48ac50fcc3afe5e4b7bf9c6661bb 304502203e5323917481616556506c75a1518176d2c1b1b87cc0f583f62f54db4e1364dc022100eaaf4ba33025b3b198ee0826dda6255b7f87f2134bb259029b19ef2666503956 30460221009976bb95c3729da4f59da114835a5b05ce2420002f111ec09c878feb7160686f022100e51158d7af583c2fc1ceaf58750e66b810604f2d3644fabec88d008c248182e9 304402205b44762aaba1c7c1ee1b2cd95f0ca36b9f110a18c016e0a9aaae632b4325bc1a02202b2c4bf8a4398a7a55116492c4cde171b2bf756115d3256901e15e2680a52f7a 3045022100a7fe027ae4f939cddae5091acc1918d98c7172fe75b7829ada319b52e7839ada02204ac0b2ba13a3b11611eaa5f7a489e8faa8efdd9e20051e71ea57ab482599d158 3045022100da8086f23072d50f65575f4213c5a1d0074ca22ca8d4842d608a92b39933e76902204525fe679a421877a1a4d8cee7373e66f017a3de81323aacba65193e85e63248 304602210090b6aa494f6e6616025acd581964bc201672759c80918f9762c27b1ae82aea37022100e69f3a71fc7440c4f5224dcd377eeea4f9554cd17a56fdf37a6006dc651672db 3046022100bc2dd70f33ed47a724df9c4c3e786c83904ad738a7eb415e81c8983f55321980022100b5fdbab3b084de09b9f6cb01ad717fa7f8f5cd78bf135a1f9b96c505f598151b 304502201a3aa2bfc1138165bf27808ae8ef0c524a2a1b12a9ab99e5867413161ef63a54022100d4a02d0f93bd313abf2b0e1577904397d077651041a7bcf6eded54328e7d6b63 3046022100eacdd9114078f4e089260d20c96e94a1e6162e7f5a5fc4d6bcb28e1531a56641022100abadcd00f42c3b7722bd6f1cc615d3d40e79ff7a86bc8ea1ed19888fc3e59b6c 304502205d8fb3cf5ec5ffac94330e0498b3fe9a72e5dc421beca481477adf11ae314e14022100e59fd26f7c70ab27632dbb72bee8fd8cfd6ad0f8671063ec65da6da868222a8c 3046022100f7e053b108d988507c74bb5d3b5ac60305c17635fe271894eea92126ffbc1b790221008e4c8da3f13b93b953bae3e04211b1cb2d5628d96ff0a9c1cf4cc08aed65a6c6 304502202bbe88d02f910d5a4b8725e519453133c251f3f2fb2fe68ee31add4c90928e7e022100a9ef3727fc2865a8f55f4a989f26123613ae335af586d29630480d04ee284f06 304502204065f9a6e386f6bd3597d10b5daacefb2d60de11e18c108d70fa8f7328f6e09a022100d7dc7ce9f4a3a4f7e2f79c623aa11a01483e01976ba7ef15c61ef5a5abb6b598 304402207943e5fa8417be5f9b7d71596ad2581afc398ac0079777f68996ecfa32625113022014901a019dde4eeb289a12f04c10e9b33afb3803d58774d9b595fcf8ace6d2fb 3046022100d99df7fe5d731c7a0d38723ba0cb5292b120b142eec1e3e55114b1a2614ebff1022100b5902ad87681c6266d3d1acf0496ccee647787153595c1bf37fb7d398c0e9457 30450220024ab72ed5902d15487a2e69a75ba17166a678509a82e02b3ea6085c596c44cb02210092f349cb41216049d70c13a0b9aa5ed0fe61a9e587a3f4f22b8d715cc89a29ee 3044022050623eec286774c552ae4fd628195481e22926eb2381b30da4dc3326a36b68a9022046af5bcd2d437d4d06b71470686226d0887ab82218017375e48a2604e89422df 3045022100d0e5db2b1515bfdbd7720ac44f35ac437f5941f8b37206a04e3f14912454c2b102201c3be9cd785dfb4655936b8654851a64a9d4afe1a1e45b2796e98a6d4c0989b8 3044022001fff1c5cecfc48e38e4b43cedd6c6069f7df7dc50f62519d8069773363d604a02204b2687ebd0afab9fca1a37f4ff7e6127a8f41edde84c6e624e80ba9a173ddf0c 3044022036d51fe07874071b8c50d9f2c456b86376a7d5103e2707c210cda74e951af95b02202ca8d23df977ce0f13ec0622884f87f0a6dff104c8680ffc36f7e12fa03adf1b 30440220381c87d1c9ffbe554ace07977a5113712b2bab1f38790d730a48418b98390630022045aaf3c5929dab27f1af96263cfb3827bbf2d557bd6e18622011dfbb1c8ffe3f 3046022100bc89e63611bcb11212014d15b3e179b8d709a7dfe083da4b8b320dfb13564e1a022100befcad4861f1cc556cc3e2de47d384e88c82b2b254545a6375f81b55e09eef8e 3044022026f6b94c7c99b78a9a260609728e99e3b236b74838485378376095cde5ecfe4b022055370ba3aa5a5c9abde0f37ca7bdbcf432151516b45a51717eaf11fd151153d3 3045022100866058c82128ce1bbb4602a7f1e7bd20069654a57134c4a6311aca8644a1d890022074af223dbe6df9489a58cbb92e3ea1ccae73cf678a65796327e0248c33cdbc75 3046022100d79e9d6d6f9778c316e373525b0b4772553c3c831c75009b9c592a2545d22175022100fee70a911d3d5fcc8325ae3dc08536dddaaff5090c1a7bd14a0bf6629c2fd0c5 3046022100b9da144e536dc90becd4641d830fbcdeececa17d4b66f096f5fede180b9ed4110221008f60edebd8da868297487a1270441ba0f1db85cca678f5e2e407b423b3e26aab 30450221008eae9428786b8b8001e859ad8f32684c560f37261457a0b17b7d9b888a62a74d02207c438a98b8f15e698c7c792adb5c3e2ce570920804201c3a44f3a88e8e430352 3045022100b9e873f6287a3d1551e1d00eb8a0514544e8818b1f20c281d023d5d13961a38f022017e99b438bf41402fc2ae5a859e34d87e65499337c40175f04e09e6f5fdf368b 3046022100aa78d946014bd1f23272810bc743a0efd637587c3432f5cb35093b736f2c1935022100955abd79363493ba19183855267e7c9231caef3f6fd0b7b8e23f47321a2c88d8 3045022060b29ba8998987fd3487214f418d9f6be211934c3bf183d293f7cf9d4a35f5bd0221008f330bafe71a6001a96df28fc1900ac86adb3ad67950e2be6f9b6319e4286d7a 30450221009f41456f32927660423ad2f52bd14d04606022a101d150ba49fd4e2f820cc70102200db6e51335f54dd44fc147cc3f8577d460c67c1f8f1ef0927024b2def357f9ee 3045022100de8a1912b48ac3d064bb47d840fbb64dcb584aa6027ebc72be2a056052b80ed502207c227234cdac166b07712bf811db0487bdaaf91839a057efbd105aa2dbf2e202 3045022100d768b3268452d3090345e190b3177c295cff85912f1463ac82c2ef16279a8f3b022057709718ad2370582c7d995a158c85bbc6cb602b01538626715df884a306b897 3045022100d3d8f1e34e217966e29e1bf227dec52d2950bed5560831735b8011d5d072d8b80220053238898e6e640840ac45adf56bf88a7c2a732d6fce7fc50ecd9afe69261fd4 304502207c4453877419b05b3ee44668ea1ccce628238154c68c02ddbe982ad5095e6a02022100fd61b9bbd0ae40fb571aeaed27e29988592ee2e905a8dbc09a8739e54f58e140 3044022070b0135240f0bc4f75c75d7614d2adeaef2e2c851c745cf885e9c14a40099db50220399999636e5dd9a5c7b7a22c70863123418792729c3f2e99528920a562200a1d 3045022100f0cb22d4e7da2b87fb23434db77839f696f0c79fc9cd509ded381c4013c3ebfe02201128bf4c210d31c9df1a6342d6b6fb4822fc337018d2b71707c652f514f2da50 304402204aef4d49cb1e2ea448f72f0009fe01ea2277bcdc10bc005b751c3b1f8083825902206754e859e6c9aac093954aa47716b13d66a20e2fc9e47dd12528271b0dd199bb 3046022100e2f74d416273748831a19c3de5b47d03ab2433114cb1e3d3a8263bded31d0b280221009b59d21f0bf1cc5821212bce78aaeb7858c36d1c9c93257b9815a186ce64c4a2 304402205f0eabf1282318c4d284c3f1eaa06ef6143d369c59edf7b713634dd7a84f52bd02207fd13bcd74db1b4b3203e1392dbe9b8fccf86988af3eee11f7699d166c5d1051 3046022100cbcc055879da27c78647a4f5d2a4c7bd293cd5bd206d176153cfd28fdfa1686e022100c05d3f24025b192507ccd7e9619ad27b4923a7af575f77850394f8eec9c203bf 30450221009e7043ae01ab4fe3de2a895a913658095fa93a82c0451fb75ad702b5b52bd8000220610ace71e80867f8032d9efc86dbc8ab3f2d4f4046e09cf181ca9ba5812d3c8b 3045022100c9c8680c2b327c6f633d56885bf376b4785dcd54d59c26ee458434a9becb8c7a0220634763b6ef10ebbfe450bca15aecc6b5bdaa0f5a072139b628b78c2832ef8fe7 304502200a1599de6da9f730a47b0f147d37159c2d4e860af51b12d38babab6dc3f4386902210096c532e6b5252aed6009e0f15b3ef7433281853057b1848580b49eb99602f351 3046022100ab13518117480d1a599e39bc3972bbdf2a4f75f7f4b21820496b8a44ada1795d022100a06a03309cf2927d131471ca8b5e885e71af1d202f3e7da6fae3962d5f3c804a 30450221009111a64912568bbfec12c8eb44c873868191e3a72b780845f6e87de903b2f1580220421f983240a46048c6baebbf4c54451a8c260f2ca818b888f9aa1ee94203cf4a 30450220322e5d61e7d7cb51fcb8d77ad94b17f303258e33bf3e012833d70d34bc711099022100ec88fcb0a06219b5d38e3ec1812876e765ef63f09bcf68ea44c39acab0209f90 3045022100cdd0aa6bc4e7cd22e4110fd5bebcc2388452cd7e4a2bb3186f1e0952e7760ac302207e1eb79a5260a4a82ebbb30720d23d9c63c0f9ad27afc95f88f9f6f2f5e8d282 304502204531b031c7490346f7b3b9c5ad1e14d85367873265895a581e23ae40736f2d1e022100931aeb3905d8f93fb47e4cf0c9614449aa04130c211a8451194cb9ca82769eae 304402201ab0e8cac440b378f6e7f1a1d0f373bf9b010956336ebc03e931086646efded802200c07ae16ce800123f85e83e9006a9ee71cac0ef89e290f7b0946dc245d72d947 304402205ee1a53f2f57cf1cea0896cb7bdc68daa1bf0608896470acd4f31b5ec11e05ea022018d5721c56dae501746b9d0231f709ffb65525c07fb870ea5085ddf294930264 304402203fdce6dd7f7b2338309329b6c91de8ac093f789f2102d75e510279326a16e3ea0220290ad83c70349ad5d507b3af5ac20398e3ba6be97a3c735659ea2c6c0a533303 3046022100af6dd431344fe134e838d821b51c30ed5239e9389558e3fa8cd2b4989da4f3b5022100fd03d36c1dbec6feb01ba517a76e45390a7ae0c37dee6a34723ab4717fe74849 3046022100e768a999c73531e85556a2451d944260579e33ea4e0557b6da45898527630a8f022100f48c73e871c7807a25dc335a2596ac38dc0eb0cdc8803b3b8ae4d79b8370122c 3044022014d0ae4f9b026ec1fc67c12c054d44a78b91962c1d5dc6dd2525e692231ffb5d022029366b1009621ae37382df46ebd3ef8549ebdb4bc50f946d22bf630970fde761 3044022073b9202278f61c4ca103ec8ca138cb046d720e4f44f32d262c2a433c6850ac98022075a7b91a7d748bf01f6fb868402874bff69ba7b4c63cefc0cb82d0c982dfecb0 3046022100de4865b00e232f8c66b1c4cb03035e60d4033516ae29f1ece5863471c76d2c5d022100db2b1a89b89828d1a36c87c96f8047a9f69c40d70e89a7ca57477173c7edfd84 3044022031858f6d29e8b52cb521c1b40f695a9ab699f9b1858679dcb034e9402311dbf202201d2bf873f2172b7f73b2c060fc337c9a7f955c732a657c325e3d26936a5c1557 3045022100b10821eb95f7c7d03a6f76e47a967f5885dc6f45818dc0824b52c3e716bd687f0220383806c6f21c07772a71a7d8d50bd0fcc04beefdc875cb53624cde32ec2c1ce3 3045022100d12c09654b5b43a5d1cfd2e439ef4cefe5b56b881e4000627e1f861380b9ad200220697cd3ed8439eb43d759e6809a2dbbb623f2c6b295ffa427fcdce48eafc6d525 304502207f2eaed3e995dd2f1b50484c6d40ae215cc0dd35227bba7abfb82212d8233f910221008192ed21bd5c9ce20962bdd793a439171a38faadd038242facdb8b9b1f24ce80 30460221009705f0a7ecc3ad818ad53090111a4e1775c84e6fab3c0848ca55077a0725efd2022100a09cc1d8f4064333c949dec87840442d0db1f436afe5e8ba774cf10156b39fc1 3045022100f97843669acf641866b413932b0ee607556af36adbcf50bd9bc71ce1127f5a2a02205ab372c68a0ddad210a7829ec1b64e9c06353ffee1582e561e845c5db1c6f559 304502206b76a2db1368aa5eb24e733e39edcfd6126e5e198fa88cfe7cb8fd85f9d9ddd0022100f2879728cce148e113e1de0f414224556a585b1795eb8630b12659a89f301a1e 30450221008a2215d22dcd9531a6a6e4a2d390ecf4770744061a8243d052ae0aa8b0da6a5b022061be688b3cd0a9b6eea9e7abd8fc334088844d4827da329da8501f53a176628b 3045022066004b960994796f8ae8ca4d081fa8be3cd2dfe9baa50ad8dc2c9b31f7cd304f022100833189deceb11309f5b347c3c3a9a21874f47e4a1ccc1a8409c6e80c01107044 3045022072845e99d6109add80f3a2c0603e5fc0e9448146f3d71b7ed538b1874b0be8f30221009d69684d312bd2d0efa65185ae2a0dd741b570e99795005392f881d3f7fa6ea5 304502205935b634aafd482065a1a89e553d4b4065dade5d17de01d0581f3ecf90cb2d51022100b24cfa3722627891ebcd28bee9a3861daf2cbbcc8452710408f8918f6194696b 304502203b4b78e9d3777e3243bb0dd28000394cdbc0aa9fa25c15dc6ec7df079f94060d022100d51ca58d709e3946ae719d5d7e3eb1550da1260c18f3125c5fac9c5da3f10182 3046022100ca602c9d9d29046e47eb48913e4aa27c9e14ba20f75e9a715ae7b72766187e5602210094bd34927eeb5ec50a2736166330f6ccbb7f6339bac002f8f7fe29c312d616b7 3046022100a826f14be9caf7a7afd5118dc4ae8224d9548bff81b0ba4d5d6752b04eeee97c022100ffb7fd30090f94585627f3eae2f409b634fe2e18801191a0ea5998128d23b119 3045022100ab28f877eefcc506c8572b701a61a47194330a90f72fa31992ee554d292e5071022034f3e6cc7c6fb38102ce99c6ab09505e5f887aef6c477e4bd8a53371d510960d 3045022100f193bb8a70cf7e00bfad338111b8536fc5e28edd0ecdf114f6a10cf147e42c16022000c971410710063c88ca8d1f4eeec1e9063b4d215993ef98da430b2ff78a7edf 304502202e1c9e32064a222e319079cc0edd1b6ee7e3f3ae060fee09dec79972bc2b950f022100ba0d00c2458ec73307aba9ab8799ba07eab34a6a466d6aca9b71e691c50d7860 30460221009083878e3c3ddba441d192f7d5b508d6bb234b3d98c5aea7cd8abcb5c7afdc94022100a3eb8d83519efc5dfe541b3ece4fa10d784b0666a08cabebbae28f3311f90b46 3045022034567a24a18b8d22eefced59b912550b053988d8b5476d80dc52e124a98cf017022100a1eeb0211d833f80f42910c2351e76f0facf3692ddd6406ebfb8c1a6afdd0d55 3045022100e5ba6c591599eed6e66ade0e21969f337adac34008ddf1e59d5a8a0983a3a2ee022018e46670918fe5fdef4437eb749340ae50a1a0055307b79af383a58d30ffd6e8 304502207bc68f443e5ffd1513d45d5b121893f1567423ac5e5ea351fa4a3494a42b94c1022100829c0e83e2099a1f2b17078f330d23e46e5db9ff9ba4f6ce40c63f550600b4ba 304402203828b6378e9f21ef611c247609f0eb9b1b2223806be72b4530d8496be6a5028e0220648f5981588173d0bab7e95092c8fbc2a81c36ddec078b0d229d069936c50478 3045022013ab6aca1b184ef10374292c5c9706ed7224a86ea34c90117485aa56b6d007b3022100e9bbe6cb42f32c441c03d1a76f4ca61cc6dba6257b287c1baada8f4496a68c02 304502202eda802b97783dab35b58124289d9552fe0b0190984dce8f6446cbae40c65c32022100d2719ff89a64ba414d28682da2b4f45e9d01ec46a842914449aebe24a483a682 3045022004b8317cad79e0c679386a2e04370d8e07e6ac6ac79e77f7ad365a07c065d50e022100a78a428bfb067ae11d776b9853ec1913be95f2be631182d2cfbf98bb387a5538 3045022079bbe38b752429e25eb43efc947ef8db57bfcf4a02a903c5ec9575251dd6a074022100aa3c6ee3995c24369ad15fa33f210b0fe3a01e9732061e56063c3f74451c0b62 3045022100b1d0661a071cfb688467761c4ecdec6c8e634c257701c08944c5d148685fb9b002202ce64d6b5eff11fbd73979e54d24450295944cee3dd8a81decd9382b274037d5 30460221009ae401b336086b66d1c3971bb705d5e1e7eaa70d3fe57936fedd6501c843c1e8022100dd774c6c405a3df6a3dfb5074fcdb1f3da304ad5cb0577ca684e0148be87a613 3046022100893ff586e226b41ba7b12655d036b9523c830b8b4fdf30fa013e809a97f856b8022100d463569f09015ef8e701a7eb33e6a9dc85137d8469a1e766cbc04e2e1fa82c87 30440220790057f5417794fb46346b3bde3c0ffb7b8a19e746a3a135204e16272d0840ba02204d59149f5bf20018a32893f2905815bd9e90c85d646a5896f8587a61ddf4ec69 3045022010261f07b317a49d043968fa798231b0d029324030ce35680eebd4eca8334a9c022100a1b8229d2459f008cbc14ddda0fcf643044877215dda2ff93f6e4a010c2a4301 304502202f25d82c45adec09fc8fc287ec880b978ab537aaadc1f4e909fac6b34ace0d4c02210083399b0e69c7371388ccb49a3698460a681643d9e8a8b3599c455628b0cbbf76 304502206535d8d99563e0b450957bbffb4648fb6c1127c12ee0de32971c7d8a2062f5ff0221009cca74d33efbe7d66c3e6c5f96100f1117147d5501448bfef88955e7dd602830 304502210096efdfbd3252acd09f950313b83dcf03077d9fa63107891fd78ae4ad4004808b02202e07a3fb67a1b8226acd5666b968a7b894db5dd291f14b6a8f4a363065fae308 3045022100dbcce045ecbdb322b0d6a7eefe85510c948fcd135b1d15d537961babfd254e9d022023e4a3b53bce1d5643eac10cb37c4d98b8b096cc2e510f5c24dddff6412ef08f 3045022031aad59d91fcc220f5d5146fc026ffc81bd4a735b1f967d8e71edeb508524b270221008cf6bb314e97afe0327038ceb81db821e093bd05f96564aaa0269c0daeccc272 3045022100bb752cf02154639d9a73a21b4d4d81d82c36f44ab396b67526dfea462647a78602201cdbde4852606db63111893c63045015ae3d14717cb567d3e45b60dd398a095e 3045022014d16b08d9bfc743f9c3a4218bd0b551711d6455944db747974f9b0f9938d853022100bfce70f3479bc53a71b8c541224447485178f2523ed3e21ad1b924210a7187d7 3046022100aee5a955d719547b91880c3c0b19d36215c0e458ed3bd96338de730f7bef1511022100c2837f03a0cf262a307e3b48ab533785b8ac4fc1323279641f8f2cabf18f88ff 304402202a59c020cf93c6a147bb8c66856109364bc87d22a9e3ee72e91f711877fc7b810220019099ef913e6ccebda0c494c5f97da769a36b13008b9bdbc32043dc0e39f3f6 3045022100e7c85de2b50e728b160d259f9ea1f4ab5c215c39f6855f95a89152c943765b710220089e9ed5887de17a867aaf99d9a066df0dee367930c5ec58ded1ae07ed9cbf07 3046022100e66ceeece987fe23cc38199417875c0d8b3d68e66578671207903cbcc7c782b6022100943f0fd9b7b2ceebd48eb097fefe99f28965cf1fd8e241be905669cf2a62c007 304502207ad1dad64adb9827a3c4c46504c5dfd8f693acb7ae4900afd4f8d8efe06247c3022100c43a5ae4768fbff95b50f29e9cc13e926960280abd8d2d52d28191c4261680a6 3045022100938790a2e82108cf88066d321dc2416c29f447958f072dff67b703fea85685df022001dc73200a15498f23f1e5a9d38ff6bf9beb38da512b6f4c4e98202fe66d375b 3045022100b0f12a4fca9ce058d6ba1838d0f9f3c3af7fb720a5ec63a4a276ea199b0794aa0220265318b80d4f9a1b5370bb9fafae6f90d51c36e453dd953049d5827aba09d5e5 304502203240bae40a086d663d14d7b11c6676d85e5c56c33ef33f227bf8cf715cfa4d65022100c13736bbfffd3a529d8b9a5092e869f31d098a99bebdd4f658023ece144e8112 3045022100e38500cb53cbac07ca20a6f0f7461ed8c6701f6fe2e74324393ab363a880d7c202206b8de88c6fac1959a88d3e1c7275abdd5a32574e05fd549d26b0ad68a95fc403 3046022100c39a7ecbfb25f34c5b836f0f8868b7ce3b43a149d359ef6a63ac512721acfa640221008542d31ab42a47915c95e6dad4f5f051f58dc8ed88c876c3502561de7f766873 3046022100c01ab8dce795df0490e70daba59f8b2ce8b226a03dbf61ab43bd6c8b82e1e38b022100a6b8779446b0e360f332deeefb5c9dd995c41624b4d32a7cbcf69fda4a12a550 304502204fa45ea186d765af935a2c5ee73c6f7f5d8a426109b2b3caf26d2f32a2f14468022100a864b807ca1c61d9dbd64e49ebc9b43888697d8e962d025592ef78b23495e949 3046022100bea04f35092c4d2addcc16d44a3b606e9df0262a47cc9ec8d5c785031661a451022100cf12e809cdf6f5e4d6a8126dc043f5f9e4040cb89a11a244de73302b6edfebe8 3046022100e90bf62c508d9eab58b792e2e0b272f48b11c75f7086248cef7253be3871b608022100f141475dcafccf28e27834cb47e3eefee270ff950f64fb63f53ddd8323061f9f 3045022100805d0a207723fab7815cf2ba5ccd57d42de8748ea2b8dc0cd72a29abefaf33db02202ca5fe744259b52ec03e4c0200a4e970162d2eec836a8242cf5a671477dff1c0 3046022100dc6b982baafbfbb60efa6a683440480047b8f8721f0e1543d441ac470c5c80ff022100e0cab1406ba74182ecc17fe46c52ee851e4c87950a526717849eac55c409c2b4 304402200d3361e4399aefa5b2c6373c0e65163cbb28b0db6dfa00549ddb0876057a88aa022014b83611c4908b8e3d4a3f6a60141d500745af32ced9384b5ca149369800409a 3046022100a8d4406ade9d95c61e6da3a4cf0bd7f711c7c60ef74d3f549821f7989e09f7e9022100f55e97ce849ecf9b5b02b1ebbfe68991c007d2211af13d69b58d5c7e6099a461 304402204aa2402a8bccb14d8328bc4a7d2f11c1f90cf9e5befc5b64d659a9b1ebd39e2e02206536bb0261b6ba66fb5270e3cb286fd1238340c7979a733c01f95d91ea3b263a 30440220225f9f6a1c85c5ab8bafad49f0462769dfd92e280bd6a55468642244aebb2618022028a8c0704fe314f86d2a0b6b623f80f52e7b7071e1b2566aee15e29d950feba7 3046022100c4b8306cbef1c2d89b4ebdd88c97b45211aee5753434ffe76ac7cf77f01e3c43022100ad7ff238757d72b2730af52af9213fc23cb782c5069026577c702989c39846fe 304402200d314a312b1c2c5f0f78a54c6ed58bb366a427af16c66edfc9edb6591e2d01a602204add67644e86b4a7596f21edbba97dadd689e445e86ec4e38a4c06e76d78b459 3045022100f8c34808ec4b83b6de14dfb810760346109294960ffaacb0c2dc77fa0664ace402203a688e0bf9e45ebbb1198954a1f6cbb39aabd68072036820f0da4d0d7c63fdfa 304402206d086c2309966438b2a1b713ab53d2eeca08cc7db973c7672ba61a19313d72ee02201470903adf2e40418d98e0ac7d55588ed4dd53738f53055f4f97d707af547b87 3045022100fccc7f5a49178c906935b6593fd4c20871d18ab50da00717244de34d4fff4317022027aefffc0a3f4478ed64442f4257c6a0639925eb6a4b7c22e3f698a184dd2fb2 3045022100fe7c4a72c65300702c470d0a482e839ce55de7458c0c5d28a7ec61e70b1d29ba022030f85282bf585ffd77ec47489fd19506188114ede51ba040589a95fbac0a22d2 3044022053bcc2a93ff3a05c00f4b5f0ae064331b2a7a53973c93b2e6282ca9ead5a036c02207d5369f87979a32efad225ed40176fc4a678e8e89e6bf3a537632e8d53779cc7 3046022100e544e0a9546d916d6324ccaa28a71170800f3c150e0c8fdaac6f766eeb573394022100933fb29c4e05dcc03313f04308ed6c2ff67cc315d5ef966c6fffda5d395d6474 3046022100f1b3c0f51a467add4e6c0a53f99836ed71449ea2b32d84f696986b2eac7eceee022100cc126c9eb5ba43c5bfcf4759e391c09634d05c36a26a366936dfdf67b32efd8f 3046022100a9846709747167f654c183e0655006afb1fc81403e89521bc520e7bee0d8e4c0022100c4fa2666ead853c26445fe055914d0a241264b4053d6671bc977d86b6089145e 3045022100b844981c9fc081bfcf0fce208a008418c4358219a91a81b2f2b81efa4534118b02205969dc65ea8e37aab10ac49b24d15af35ed2a17c6891f436076da98964ff333f 3045022100a7f793c5eedc2760b590256a3360e21a13fa4a680c3545bcb14ab9854edd379e02207e9407ee9fcb53e46b5719ba3bfdbf24ea39863e71b381105687b19481e36907 304502202ba9dc9e3b040b9fb577e30c6cf004112f6a2f6d332ee84cc468e50e04b6821c022100f82e43775355ab16a316f1c665ca9c5e5eebb966c85fced06dc45d2877a5fd73 3045022100af2d27135f0152c2023669e55ae9923c055b1fa288cf8740a74f4bf3428389c40220631dd40d2adbdeb51cf4420a87b804cf79ecd2ce102ccbaa3370b594b0303ecd 30440220687cc41bee1eb8f1341ad7572a85528d58c4bb6047db0eb759a00c0dcc4cb288022059f2d89d9795ef6748b70c1267b1463c22a1c0315d827ac52c1010e927b1c0ae 30460221009e073e033dea2c48fef30b364be22623160dbbb226a90e2d4fbe30e76a4aeca102210096ff6635bfe744f11ef8afff6aff8d23ccc446c680acffacdb126cc8fd6c9085 3045022100ba0db20a150e131b53819b6c73609fae45707c7ea1d1ca960fd4a4a202a5bddf02205edd93a2345c3e4e49872ba06685a1338bdda674ae7627c5bf2554bb35b5fa23 3044022035328d4a75a1d96dca3c29ee085573c171753a6f743875d7bd33793056411eb30220393d58eea210263680a119157a99398b7ccda6f08e63c74f5e25d78f77199168 3046022100a5b300ea2ce7a9599b7e81b8b48dc26f42d52dc5f1d4646dacfbd8fe99d37f8b022100ac1fd32b408a37e10b497d565a74e01dd5e0442883c6d5966d9a5f4c40733da5 3045022043e4a0754cd84a3f09d87044c18bb733362a13b9219eb0312295300870f23d22022100ca8100b093061b2d1ebae0e0489b18348cbb85e0c968a6fa81858e1f9de89936 3045022100b67898631ad6fa29878435c0feb0d8f3a5aa0a0ff6089458aefa0f336c1d1b55022026d8e38253f5d67e9e15ffad17fe79c405d36965eb658e42687a7ea4a7096b8f 30450221009b2aebcd2b9554bbcab2b35693568a03c785a1a090b0bd1c67c0c7fc37f258c60220494449523ffb777eb853ef6d73c3648c374fc386e511314fa5930018a0cb5ba1 3046022100d0dbe60951d9abbdc2186fbc5dbeb0b815858e197f160b52a50e5f37461f491f022100aac9a0d9a88c32f4011d5425e7246ead1da3ea064bced5be6639513d89c566f6 3045022100c80215c0055746eaf80ed9f76ddefaa2bdb2f874c8a4b7f2932925b96b199af50220095ffb2c8e24d111bf5d8b348a11a881e9457d1171ae04bea6afbb90eaf913f8 3046022100f7a022aa02b11dcf07f2be6cdf4e8558e3e90127d87dea7e3c8d30c7d2a21f80022100d69152ef737fc95b70f823b411f39d111219747aa2816e354993ab792e6d0970 3045022100f2988f3664a4815b7a7ad6426171ad160119570922d1691c8edba2d5eed3c0c402204b48011084cd3a10b70b115ff10164a5d9f92e829968ebf50dda72883d62dbb7 3045022100e65171b9b6ba03990ce647934c983efdb8f328e430e4a86a6f6664c4fb606f5e02207f8b53fb39461b990ab6322cd54cf1640c5f9e729b9616809e5d6442cb546b70 3045022100ae0e483adfe0077f59241c39ce6704b74f04f6dfbf8b162bad5a36b35247606402205a9b6cb8f60d0a950eab36f29fa08eb3ccb90efd42e4183bee0f42d1968cdd0d 3046022100e3a181f3641b3e86d56216dc60378cdcbfdba4f6e2730ac6a8576f5836926bc1022100ad698aba77335c04d4bd9ac3491ee036e54f4fc5f02824164aba815a8966e882 3045022100c1b89152ef966ae959de7f7f7eefcdcc9be75a355f41d0debe2bfe4cb2eaddd6022017be0e6f17859e617c9c37a9966bcc42b1e770a60b43cc80d1b609bb23e6a383 304402205af29266d0f0d93e7fd01e065de0901af5ef9008c354c961a1a00d4056e1e0c60220432b778d669c7fbc013fd07e80c10be66608cb938986608c9cdfd8708f959a1d 3046022100a35e1ed50458e6319a2a2c3c6e193c85625ae1e49598e4049810020376b77c2102210093b18dd9367496910e7bfc4939a55d372068ab65651005d4d95e72dcd1c0a3a2 3045022068dc713d2d720f8d45775eb9cc1513e0fae28d9f44aea53338328d77f2e2b9d2022100a504498472d4fa4d33cec75479e16888cb3dc68debb56fb92d507f90b42c3a81 3045022100e50878f25684f7abae40e7c7838724ba51b53aeb5a11c07e6ce7008a814462f7022006d270e7d607bb2b0112f4e605e0760edc28bb61f57e367e3a5d3fe8230af45d 30460221009121dec07c8cd11765b7ad61e98e915738fcba79c95f9091a31524140b6e07ca022100bd0bb5a5d7275eec74dc7a79dd08bf552da065553d0432aab93ca77e689a3eb8 304502207afc8277db6c23b57c822217be095d2881c2ed4ede23a36210abbf71d96396b6022100930e24617e04ebec6644bf8a34afae97a9edad5850ac4358ec3b5a86be370a6e 3046022100fc67e0b6de235b841f38b7f6c451a8c90a7f7cbd76e47391a53bbff25255f02b0221008a4cc7383eaddb9d15bba22634aa2263f4ad56d6c5f1f846071580ea1147b523 3045022058ad6f71b3f0f575ac329856e17cb5360e715e812c4d1db6a9b2b93cea94a22f022100c6dd0d85b8437f4c121ed8ec8e332c63ec6c5e7d419847f4886ec386cca7ad9f 3046022100b7f782e2b3b84df6b55786df4ee0199ba88bdf342d8844c56e51aaaf26b6292e022100f8dbf2ab7a2820e6d273e9f43917513a657cd24beaaf6100d1536d3c3e789292 304502202d36d4ef4870c0c4172c146ee0b90281f3fa8a9359d510fe0d8771c38b0adc8e022100a8c5b6aea49f599f69beaf21e23426e5f98f576fd57597d1650e5ae8375a04db 3045022076fac9e53553849e80f9d87aa57c07144a0e85da8ff041552a2cb84108fe452e022100af467775acd385578f0eab016cf53b6af3f4692892f607c2f89a7787d4c039b2 304402205c2a50a0e6f810ebe79dac97debfcd9f72f6564759c303b3d2fc833f38a52e0702206236104a9a7e636b1bd5222cb0e1003d35a562ebafe4dab7a58f8c0940799a03 3045022100a233068dc81b2d26a37fd7d5d7f1af97304949aead5afbe0cc2ea3daa8f22b4c02200b6616bebc436bf88f5224f1fcf750b0a2360b98a12606eb26169974abfea03c 3045022005edc2e8ae7232c0d17b1686362d07045f3d0ecc52ce591f585f4f504db2399d0221008547852bdb28602c0663f654dd4b40b08176da011aafe167fc49e7731e0a16d6 30440220093e5bce697a76fb720fcbb9a80651702120e8621db7d7216d17b5cb7924277e022025d3f7659d31d79fdfcdb3e87ab797daf8e118e583f486bd5376bcf03a29324f 3045022100e7a6e65d83e38c25784efe83d29ef703d4e847d2d61ac90b94658a2168d46cc102202d0e54fc0a9da219947fe6c89f62a4f1b9a340fdeef66860a94985d4459d7a98 3046022100939722dfd7d800b9d8211a89add08a35035498999a4b8879b6f535d4b37fb95b022100c1283dadf23b48119dd97ab19b8776ead6b49cfe9b6e8e0371a0135786e08640 3045022100c197cd778d66f080279ec5690efcbf4acfb1598badef85df8b2292a2e172297502201d3b10641cb1b7d5b37898c15a83665e05cf89be0fd1ecca5376d9a441a43972 3045022020d558059e9220f9599da92c690fd0cd3ea67db90e690783bd30ffe4ea9dbf48022100bfa4276762b912c10e08117cdd4542aa04e1b7140ade07a01f5291b5b92f880b 304402202d51e186e51e6b06a321de798ebaaccacfddb8d89e6ed20368a07d6115b017bb02201aca9eb8616aa8a6858d163b9b4e4835e64f96e6a6f9cc1256397e6d2b914f6e 3045022100b3913e445db8dd795d8f8c00747c1baade61d34345f8e298cc40a08cd5544820022068ac02b162c08f50587f4c5a905be8c1d9dd85632dbcbef2778c544893f57025 3045022100d8aa189de99eb49d5386582c55150a30d3f3f06f32a2371aaf13e8f814fc7e47022032b0b4f7a4d93b4ed7dfefa8bcdd6e0898a2fbaf814224912fada3cb0b6b9d46 3045022076e2e63150f233add3d37fb6bf55eb9dbc3d0d151dd72733ed977ea7c1b7ad83022100913e2ef349b49224236aa840245eabb565260e30d8eedad0e4d1c1634cdb67d8 3044022014d9e3a0c5d9e596c2899451ccfee5855e91c26d6a642e55f04d4c872a67166f022037ec9a8d3de3deef59f663e40be2658e51a7c9b3354a01dfe7de181ed5ba4a69 304402201fba595737de4d839bb33bde2d2e410816f5593092a39489f463ac1c15810207022059b3f00bb83bbacbed5cfe00d92eb8b536d2712c23869bdae41f940c4d5daa26 304402201a9bd95044966febb3c63b163850007a8627ea60c742eb8ab1dc3d09aa5763a202202b1a759374df94705d24b5301465ff518baf190c99d43fed0f9b8b6e5e5aae4e 3044022076adc9cc097eb1ffba7d1b05c7adf5853aa7d4ffd77c7ea87e5786e2c641ede70220330f2751f6d8790635c126b6d3c5055ea4ce50c85b19012c00e9933a330da906 3046022100af04511fe990b3b185492fcb74d3984db61c97afec458a170c343214eecec00b022100ef0903262aa719e38bd5c7ba8f65b94f958518fe71a5f97a8a420a5a42fbb586 3045022100ed7e98d9c2428ed2b9b07c8c2ebe610ada80c4a46a7da4209435401858712ddf022024b57f8d19ac8b9635f8b3fbee624a5214d27dd27acddbfc9d83fc65f6d69d0a 304502202e6ff17536c56806e38e00e2ae4d7030e68845ac24ec3c161d1f2b46424fa20b0221008f8be4ae6aa2f91edb858efc4a430101aecaadcc4164b96055fc93907ef311ed 304502206b072e7b603a16272db3e3c58640a85f6f31354ed09f89a585f5e509e5162a71022100b2a67b80c066479cf76e888f6e93db978c143c5eee5a783d4b843678ccc0da82 3046022100f3e3d50b01c1b77ae3b395b63824df4558d8a9c962c95e16ce9e498f2472f10c022100f0490c8ca634db548fa7eaac05b574b06947d181b79477118306f1cc358ad6a2 3044022044b6f439373c6927a64fb29fdbe4f81868b3aa8ab04bae689234cfc24d0d390b0220200140639d010792b5bd97ab55fbeac57182c71ff69fd98577c319d08c18a50b 3046022100ad37b37b5af8240160bd6cf3bb1dc115a1257e1e606fbc79d7a047f3c392a22c022100aecf420b470671bad5fe4df30e01b79b2c23a6a9691789f46813059e63531f20 3045022100d27f5f0f822887a7311fe756a8119e75614b2eff79bfb683a8287de1baf34a2f02200f18cce1c1aa9200a059812061821af507a391d9b90edb779f5315b10dbb675b 3045022100ae8e685745e1dbd261da941513f97a5c68306a77b0e339485ab405e87d72b6ce02204412c916ab4f6ccf3a6f3227a07a8e0e4f716ef1e94327d58737b42c58975480 304502210090bf9b09522f81fe1004f096a28204018fdab42285a06a3d54d4022f2d53166f02207c25b0f34f19f487b6c4f3b202a95e32b377104d0589a75c82145f29e7f2a823 30450220727947abe53108f6511cfba961c569dab3aabdd5d2c3eefa6b596d1d23fcc78402210090cda11e4fcb02fb49209cf7beb17ab246e97d19dd70bccf5c30b52d6329cc73 3045022100d436bcd9b608c4fdb54a87daa95fd2f6f6ca91ea3c70f172b2c43cc52956c8960220662b81ea0045391eda118e39b90455c15b72ad4b23a318323d35ff7e0885bc5b 304502203bafdf575e631f5746bc0fb8f451f0b69ee2c1336557cc8a50037fa9ab4a1e47022100b9afc1da1c2ae74c167c44433f248ea79df5e942c554ffc25312e820ee37c7e6 30440220418b02a2d526463c0936c4521a497b0bae8217286b26a730d1148f8f34fb4de002201e9668cd60c4ab88afd011caebcbaab747f99a8438a5293a3b7027c7adb6b3f8 304502202da2920299bdccd54e384d717071fc28dcc69a57c9a6c0b74e61ca0e38bdfb01022100970959148fe472577b54d5926428a2ed4153917abd534c5f54e521285477f855 30450220396a843aa157c97a9f097709ba837ed99a59ef2d84773c67868ae2c06cb84774022100ab344fd563fea968e51ffce662b2985ed26eb01989c3122b7e47c9ec4bc982f3 304602210095892664179264b95045a48f75786556e8855b028abb7bbc378302695898ea3e022100dff9d1979126b22d649fbf1d76f0b40fbd06c187f7eeb17e9b97db0735763216 3046022100b78c1afdde001e4be42c51e29c21f531b248a137cd67a5e3d3bdcd88bc16d13c022100d73e89ee492c601f6494e6ce58c44872cbbeabfc742940a38c26a70b6cbccd10 3046022100914d1b1563c59df6c8ddd5a264a3ba24f17752c58250e560036c5ebb3bbff96f0221009c7e2a6be49e44564131e4cacbcb00877d655ca9dd3a7f07fb1f4dbc11907ced 3045022100b0aca610bfe5eff9c56c16e0f20c498ca5653058ae0174c379c9759f7b3e9668022012c997c28652185d995d672a9a105086037c16a918fe248fbf5545abbc380718 3045022051b2da0711d89691bc886d5aadd0147b1c258912ab80c54a08338137be0d1ef2022100c52719a9ba563c14553db64e6ef6d0d8b82a60cd980522d461721ceeb7dbf2e4 3045022100df9156f88906d9ba02e4b24192b073c76283be24edc36578c4822b5c6eb9c64d02201abbeb51fa76bca59538cf174e39d49dfa459a0ebba771a6344283cf9e36802b 30450221009c216b18a71597de675bb7e8c94bd6de3bb0aa327dec87b80ffb46531df61b6d022054a02de1e13630e1bda34048e74ad326fa11c0e62854a545ef012f81b68beb9f 3045022100e88aeb3e0cce6a8f6717811cd850d3fee132c60c0a5c1fe6d8f18eb6521b5df70220190e92515d0e8f5cc3155965fee331e3fafb9c2904ad5dad9ef94a6e6172cb68 3045022100d87753c969b448b1914762b00469d48468ef040a00387e1a0b3e4e097a0b4e1e02203b1b2f09ec09b950a31d32b273f05c28e6f338580ba9b2beff9e9270f691862f 304402202c4db1f2ceda658e1c3cb57daf02714ee844976615cb978923f47b55abb0326e02203cfed8272aecd76272c0dd7a49a61def6aba050bbb9076725b23f965757ee4ac 3046022100e88929354ccd26c2137d57c8d3411e7d3a74533a48b524420a31be1fce550afb022100b7b3d485b294c7143b0b5250c789db5d9f2a0d7443a89b2a8d18a2c2a30ad437 3046022100eb28837ad75434fb500531c22861e3061dd4c3a0bcda0d7ccbfbe66dab2d87bf022100ad17eeac6179166f0a1953d6abacf72acc4a6de8242ee5919f60c7fa55e1abfc 304502203ab30ff26d8fe6bd46175058a8a5a2db089ec6c514ad737fbe3a094a1ca7d803022100fd0ac3f05a2b6d49c7704b3473a378a30612658365f66e8cd06c6325a6faf17c 3045022020db718172c3bb420a5c5ff8b19ffccd12d2a970c2945cb4cf220bd571f959c3022100c5e6495aff300361339170a4300fa46249dfaca9160e57ec4e9602f2c00f584b 3046022100e2219c95813ef9b860bbc68599541f20e903666c4faf9cfb0e912778166396c0022100f9ab9c26e6ed27414f2f234a104d0428a2b5fa572e119081a8c3e7f5ad66ee45 3045022100ef60f197c83d777162f79713dd36e07a0be1ee9db78442f3f4a5128a4ff554c1022064c7e4b84810954370bfa4111cff1e871e5044f7132aa95835119cc1c317cef4 3046022100f263fea90c3b53dadc8084b1cc399159e593b371e6c7ea1d8a406903c8b8b35e022100e4a2b3cf19be1fe31dd10915352922c312a76c3725b1890d5de4c72bfb1abac2 3046022100b593d7cc94904546b809290f4b59b8080c0174553a21038c4982f077212a6b8e0221009cb9fa611554bcea3c1b6836fe4abe68953fe36be4da7180ded5fdfa36e771e5 304502206f84cc227eacb76d0be097279f3591f20f6328464fa9a91df712da65f774ccf9022100910880474ca258f739d1774092e32323b5e7afffeb5c00fe70e22dfd1609aeab 3045022100f57656e0a0f3d9c61b047053df42cc9875739a0926ebbb89f800c9349e9395f0022059eeab6d3e783018cca535e7b6dbaa53be9a2d61cb216df794b5708565eb91b3 3045022063ea07653eba31f8f5f3907b414dd1e31fa5038c206b8b218d15bfb50f3e3b57022100a3b886ec6c8f16bf4d47ef7618d3ea539bce6ff3fb3a5ec4dac00017484d35a9 304502204ad3e2eae8b4242e1b4c15b73c536b33edad28b0b2080e3e894ce1def53726f7022100f834eb457dd24ab0b657fec4ae951be696a98b460e8b30eafe7ed824f9c0137f 30460221009b3ae71d600272451be218984ff76daf1ce792a839597e03a1f713c1765553e7022100b06682a48ab0ff52ccc94d44fab6ff0c56c9874891b3aa72011b3fd073c6c0e6 3045022100ee5eba9b54aa0c66db815d8674e91d53caeb736c4f7b1b3eda5e4e6c75d52f64022006f29390610a25301f5c47b3b2f926c6e7177d4870febe128915a030bb78060e 304502206d41d36d7860cb393be8da8a8af7cddc62e81e35caced5ff09b29d70711200c20221009a6ccef7a9a2d383c361bf2f1ee8ae5b871204eeb72dc92cc95a1aefa36bfedd 304402205a1eebc48981b686872c57a43bdb7744b790da10cd770672d0f466f5d15d54c3022052945200e7468abb70be3f75558328637b8b790a84df7d49774be6e69450df1b 30460221008681d385e3d8264fe7fa63616085273e4ef08c5c2a2dc213338adc2e03e5676f022100a8be8860bd1651690d4eedd099b7a23df136e46426baa847676c5f82f8553af8 30440220708ae22f7e30da6fefd0358f5395458f93f80ca51eb4bc4f4ae6039b56154ecd022023064f15f33a2d6cd80491dd39444687aced7dbde39fcebef836d8af4cf25a12 3046022100eb05574b3617d3e5559f2359c427a26f82c3b3ea43784be1ef78d9b5daee98f2022100da48c8156386bfee2cfc9dfab550c853ebfa2aae177eab4e2de23b3df9bb0fa0 3045022100cdf0454415b49a5e33dc04ab85aa526af552296680d0e6d0ee7b1d40491730d902201b88fe34d894091c3f1e6cbf0e6a86420458e5c15628a5488b19ffe8fb0fcc6d 3045022100ac410ffd4b2d14d6b8c0ed1134e7aea4e647846abec00f66619301c4dafd10c10220785d20b87ea6ed2585ab963225dc9b500a0467f94c4ff2892d60342907018bf0 304402200dbe008905896ad7692a077ff5afb4759f7e8c40469e6f3ae8b7c896b66436330220018c453af6c7ef085cb7085b8ea83944d697a2f4817663af4c304db26824eecf 3044022021cb5385c4882c4edc7be3203fe9c235d3dbb6589505e73bd8bc329dae44762002204d9d56f7d1de5876da4cdb99afc09f90b0c4acedeaca7122767f8e1ea66dbb7d 304402200589d4f9a03135083717811cb3870e90af6342b4f8e31eac08e69292bba4d31d022067ef441aad8b2c7d14bedaa5244d94081761633d9ed8d7ac5a3547a35a2cfe94 3046022100e9af6932396f257d3a76974f9a1a4177a87ad76dcf035c8757ecdbc4c46c02aa022100e7955ea8fd054b4a4713d6bd3d2e8b9e10a3b28d7bd25b02a622afbb940ff797 30440220645de5aa58627b652e8ca4acaee9426ac032d2a23ca133f0871ec0f7d1364a9d022043f0641fa5310b0606019bafd49b6396d652efc73c2173877eb588ea0026ca40 3045022100af012f77e6bf1dfdb0f0d8120f71536fc546221f8ad96f73bfcf9b32d48801a6022015061bd055698366f7914f1013706f1c39363b3862f11b43a44713e0aebfc9ed 3046022100a65c6d198de91b5d4f826835ccf0bf9fda75f7109d7a15ef2d07806ddfaacc170221008f7454a8beaac691715244d6ec007e30a10080e81a9c25fc9d10cf6ea0235e72 3045022100e4b2fc554598e802d79cf2faa8a564a2dcc3d9023d1fe5f5a198e3b0ef292b39022051c2f1f91060538cfda3077fbfc0fdb60d15bd11f70fa7c1689fde9647e8b95e 3046022100f68777e4e2b5c79ed311d66d6e000e25ba085d53edb56cd8148192a76ef9761c022100ba8c183153256205d108d7ef60609d26b9e0715ebf8860d0179e6cd100f27ae5 304502201a6038ff1ea3ae36199c733f4ea4bcce071042b8e00b84049f69acf00ea02916022100856e3ba2fda128e8ffe466916edecd51ac43b00a08b87567ed16b984d0fcce31 304502204e131e0dd85873e23bb74683a97cc880ee1144297e05d2c952ec8909e6cf6f5e0221009c39ee6203531fdbc80af673ba59a6697b187a12f71894c8c2b0520812b39834 3045022100c72d629e72e77e72839be318e33ba78daad029c0a49289067b49352793ac146e02206c8bc57878369406c4f6a4780e09360d3f18d0624a59b573cc910f3721620722 304502201d678e887565b9673cef0a6b09d6557a9848d6190eb23fa49a3436b5050096a3022100a0068e9e3d567e0aa77d3ce26c587520c372688feb27fc2fc60964961e93585c 3045022100a6d7d07bcea2c9f45c356dc44e185a754d79a6c4db2344e14c26a89e96d99ccb02202abd10099967878483fb963f282fbf79ba2b9b0005219dae7ece133eeb21acc0 3046022100f20a1d3da51ce06f4965debb18f5c1ea7536c98fc6727eb86c802882d4d7f936022100ba08559f9aaa628109d98307ae88951cd7394ce8bc230c75963d5bfd77014d98 30450221009b8c044023a4d788a1bd2a2408403b205c3c5e09d23b542bf16692472d4e605902203160ce2cc9fb53ce53b7d5a47ef7ab5dd73f2a72b2af2e4fc9b1693fbc53e91c 3045022100dad599bdec9ebf962da7c4cc3707f2718001b11474df85ced3ffac72b90a1ecf022007cb6339c6c7cde74807f17fd78d88816bb6f91a031640aea6bde53170fcba35 3045022100caa6fb6ce7e899fa8027d3c08b74c59de5223e06ee46cda9f3ab1169890a868202204e344437ce8fad918d6e1ebd08ae4f54f3234842eed6934a5167b5e55e592804 3046022100eb74728062e5e17d39810f8f7f8e98a50621a33d1cf4b7cad19cb35c497cdde4022100ea383838d4550e25373e362de37b89494dfdb71aecb9a46716bdb69420ae539b 304502210097c433c77e82b9faa2b741523b5a9b9dcade22d204839652edee2f1181f84548022078390741409c23400575a84bd8bb61419c76a0578c74487a9e0b4ffcf4a9c72d 3046022100e4a12304a06b10e39a5c6e650e35378d59194dbc88ff27f3b2999186e000ad31022100fc619b6c635a3db316b2027b421209205f1b1bac43123f7fbd753dce48fb46e7 3044022022f1ebff1d66e98c55d0b85c12dfe6ca6c0556b08932acbf6e211c3ead2deeb802207df6c75e1f7ce8b863ae77222949b7e070122da659b53f23ecd154ac6e13b6d0 304402204312d53ffad3ea513b5d5057ca2f99736c7f2436b8bd967a127213943dde1c81022015f7921957d10a6a4275b8c502313de1fe100d3b36102a7236e2fc5a0e903e6c 3044022078567d3cbd3f730e0008bb001c3f47b5b49d0a1f3e0501678fe53211c03428e102204e87e9e5b067e90dd6bcf39d612281fc86e7cf18baf0e80e843f0e348f804017 3044022004e0a7ee218e40a638a0140a3726147fe5eff6ff2ad734e0e20d6b73e3b751650220471bbdbcd7157cc9b46f316aff9d7c15862c8136537826ad3e8193bb55c522e6 304502200751195cc9d03319f042a68f381a8b8956c455a0954a3193bd99eb5aa2dee17b022100d1c7b8c3961eadc37decea47125443040efab0eb0478a8ea4dc87e39ddec1d13 30440220117c9dfca2d92d7d978a1fd7ec493bd34f35b27e9c26780abeaac74582448fe402204a2364c479ab0c97d5d6153a51f3e00741b4fb11bd45f1c5a5b5541cd8a5d47d 304502204e46881ec4cb15327c83317cc6596f4091575d324feb27fa4e8b64a142ded79002210082624d3f2b679c2aaca66de0e26987c235dc461bd7480c6355684ace610f0375 3045022100a2478d5f7710902d6cfa2b26621fb8ed2ae851f5a3734707663a4c15631cfe1102200b2d3dbb5174d785d5363d3e74622cc06e3955a5b16c01f9576272fb8d8fc4b9 3045022100a1586869cd2b2c5bff0a667eae03d4879ee89ab3f9956532d871d2e722e82aab022024c063101fc365823bfa030f65df01a65b01dcc7b09947952f43213d68cfcc9a 30450220084a75057027752d179541368d827deb412ae8d523928aa12d080869ab2525d1022100c7768efd119191fb9f0019539a97c18aec6219fb4227793c8af018df69dfa826 304502203c6d2bc7aa34f331a86b8b780c9f4c231d5a7e3fd067edf0b1149308a31a7d6c022100d08772a9841a78495ae321e265aebe9d35193dfe873c8820fe21fb71cf8fae67 30440220263f2df3eb428392645bcb4fcc6f970607d147e12bb5d391f6dbfb429dad4418022001d228e7b764e22bf2aaba7d1aa2e6bcdc4c06745ba5ebf3d3968e2095ccc3f9 3046022100e43035348676d9da746679bb1291a5e7d5d8474c6c65150036438c81d8e95243022100f8e4c0cc2a3fb26a60a932a9665947a308bd1e663a25c11fe580d3b22bf5b698 30450220420077194d6f880b2adc93f6382ce0827d5b4d9c42cff2b67390cdf0f9daa2c9022100bb316294a6c1b3fe816938507d19eea39fcaec29109a0b8feff642dea2a83aea 3044021f0ce635992a07fd8c430e3bb36f9776e5a87f972af925d465b0dce4c5321f6c0221009bc69e9f1bb8ad9b7c4d3d6a9e94d7892edbab5936581be3c945742cb2a0021b 30440220631488ad8cc4d6bd37fb73867117a90a24c00e9edb2d5219d16e6e960daf9b2f02203eb3d6467d44d613e18c2eaaa141a80fcec09eeab95fc1451747b9f34a9d5b0c 30450221008a36ed9709bc1cf6a9d80292e21d9825c2372cefdebc9a5358ae7cd3b2659057022037e1e706058e15c7a6f1af5fc8c15c1e5ad9c5c10a09e0e44bce8b6f77a0d3c0 304402202a78b6fd71190c828feb325d8b0a0297cb9a505f276ec8287f0d4ed2afe30fc7022012a14e4f109d38f1dbd6c41bf897ad1765e694c3663ebfcaa6a98fe51116280f 3044022074d8d3334d303a72594fbfa583e8f6f2902a973412585c306a0ab6f81e91a44602206a042160c93b654c5c8c0956e1adc0d1abc8e64f3f97335cf13e1d4bf97ba01b 30450220168d5cd533367ad6e7fb19402e47c30c8d8fc7c8b2b0f564d23e7d56b7ed39bc0221008234390cafd9632313612096e99060d90f44527a62d4435efc229fad4edc034a 3045022072686420e25f83d23f64225e2cfd752abd61f6031b94a5f645124ebbee4a4921022100e77f77aa9896d608d05ecf1ac691f5f2d2fe0633d56454df3d1a8039472f58c3 3044022100850b690b87de14837024bb673a1004f10c63f094b9420990dd3377acc6161bc0021f5bfdab0ca294d1c68bf19b995407a3aba3886b978e48bf1715625a14dc15e5 3044022049bfcf23ec5ad48748acea50e8c24d5bdc5387ef660099f1252148eff43801c902205b134ded32207babe68c52986baf7743ce0a9aad56f850baa60059a44c1fe63b 304402201a8ef97aa482c5e7dcc558abab8687ee085fad662578626b8a9262a63351d6ac02204af00e568178c02087e6c160da4c5d436e4d5f5098c446e160058f2be963a25e 3044022015e5b235a9ebf80ac3f4d889260d843a74f264cfaebf4551c63722cbe89345ae02201ff2dab5f41ea6de992d6756ef755e6a692c2ccb288cef79fd2ab8cd26f7c566 3046022100b574cc61e8204fab3e5de8ebca75055c237c0fc677a0a22005c6dc7549f0900e022100fc0c32bebb58cc43b99c8a88c502af45106a0cf3a4095eb1276b7e8f9bb537b0 3046022100b8f719903986d763bc01ff44ba982e0c41e2daf198cd14dafed849f39950d62a022100cde3068c5319dc37aca8dedc383e36931b711df4d9fcb9825daa4c3b01595610 3046022100ead4dc362e1c450826a84a4896cdf5d988814e1fd99cd937f12f8c8caa14be4d022100ae89ec0a30e863224fee84d8bb125c570d2c3095b3f1d1edc78ea962df11023a 304402207fc87be798be7e587b8aae5964fcd0926737b667316542ae4e6e42a5288a5304022000f03920741d3df1b0671170b76c502b62448d4811009f8842335d1e23254b15 3046022100af890d8c9abd752d29cf3abaec7ff9b5479cd0086c4e4c732a6baf8bae10d7cc022100e394724ca22e05b383268b933f412316d151485cb8191bf0b12b043909f87caa 3046022100e7136d693e09b7ff67b46b9491b49f92c12a9a2f6b00b1b8b90d2f2fd3a36e93022100c35c25e213b7eba30e0d63e45828567d4743d6f6466aa99d4b0cda4308e0a72c 3045022022b32321ee7478ef1da979548ccf8d260ae3adfe7858eae2e08e04b7942c487a022100b8df428e13c8f022cd3baa6ca0a39542ae45f1dd8a1b7edcd4540490a3cad6e2 3045022100bd12141d6819ca78baf68701867a651f114f796539c505059f6936083051c0c70220600bbb4bd36150a180d24caa31e7ec1406910b5f3a4e4c07238f913fb2ceed8a 3045022100c021c61e72274ea8b2abbae0b47d631eb8ee02ee7d1efd33799354b9adf5c42c02207387e6f0ae4b64f51ec7a00279abfbd98b06f70757d29c858ae1455183eaf9d5 3045022100d679213301a6420592dddde31ab7d08307dd4c886885e6def328598226c1db3d022076a4b553c8a67227dc413e50307297dc6d102c220c24cbb0b73540325cbd8a51 3044022040d58545c999b300795d9f6aa68596a4cef2dc6c088a9c89cf3017d8e07dd6970220536a1caa9a377b61a609eb93180b98795e2ad025d1886398c375d644fa8d87cf 3045022100c3411484879c88a102c18434cb83635a87a256e428b5b70b04a142593248d3b302207e7d474dfd888d9983c5de26ac32b4d80f36a4fe23440bca45f26f9e2766b702 3045022043028a0075cd1adacfe5ca7a9c58dbdee7e5b96be6aa0a5e77f1ceeeb0f82ded0221008c9d40e5a3ea239d0702b35244183510db5f5bdc7803118e85fdc27f13a15a15 3046022100f4b6c72b95f7e9052f3c4e11692883bbeadfc0ae6bbbb2c003918eabb8f5cab8022100cdf2765ab3c98529db2d7a85f3fc41e31e7b4d03b580c47435caf1d6fcb51025 3046022100f42adf94257845ed4d4bd1f44131047e64339f99f74c76f7a1898cf27d5b344d022100899a4479abdff96187bff6712b4a20bb2595b87853db942e6bfba843e8e2c33e 304602210084137eecfae52b90eaf757f6a1e791fcf0a2664b63efe6daaf55aec622ca7191022100a79b95bcee6b46c09b07deea2eb177fa9ef0acf1a5b698690b76889c6124b280 304502205b62b0dfae9cfbd653aa120dc31ee20824181977c2c7fb1f492fcaafe20d56ed022100cb84e59529d2e7573e4c187386a2eb3bf85b9fb1f3aa4c997aed338212123852 3046022100ff6b9c7c79ec02e2f40c18ff3b20e2810b2c97228a0b0b301d8bf41b645f38f2022100cc7396dfbc108850fe0f7f7cabdaba21b678f98bd4dcdf227538c58cdc62caba 3046022100daafe5f4326d2fcee35cb5c4fcfb7a19edc3e6feeea69708916f44789a3d1801022100f0a265f6f1a1d87449e2f0b0a68c98a689b4874a2f4285484b6b7a9b6aa3ab99 3045022100d369f59eed5618e8364a4063cbc3052917518212eb20ab171c6d79ab9f76c75202205f519ae9e96d6337247343fbc975809974bd3bda03d45213f303a41730d80885 304502200eaa97e55c22d4e260dae7f64e8ce5bfde01bb28d41546336b0aff5bcf799a92022100aa60c17470b37e090af8d406c039b7bf5f435ed2f28e7edf327e9dc2f63c0497 304402205f248a4551e8f9730a9d83e936fe8c741c81fda35ef6390be64ac6789d845b6e022078bd1b89573c86484eeb8b2aa81614a6e3b6ffb8e3d949ca074a2577e40af293 304502202c0524498102ba96268015bfa22b15fe7e8815e24930010edbed9b483f576a9a022100e8d71229c27a6f12af92ae2f84358a9c43f77a133c7a2e530d2f9ef66857e34a 3045022047f2a2c08b8ed91cdbca5e158a98dac44a5be8ae6adb1e792933687e183e74ff0221009f3fc280f3ff5cbc6cc1ddd08c67016f4766f028d984ef2338599985a2a02c83 3046022100a5ed43069cf83138fdb252beaf0f9897a0119f73de52178b640ba37fa89762fe022100a974ad2b90d889d8f58ee1bbc83763d916386bf9c0b22899ea54db238658539f 3045022018db229954acf6ea074810f10e283170dea4d58499b59f37c8892bdd7cb4736e022100b06e85fb33f6bf823d4d4e60ff68ab9fb8d470b41b10569634c0164a638ef73a 3045022100a645cd29e04083584043c495a04e33e8a57bc69e6beb57b6d504467275a959b20220425520bd8358e0d9a5e3f682dacea8d34d7682b0257bdcac8aedf25c7efad566 30450220130bb7719cb3823250905ad52c561bec548899e979e14ee097125a823b4f5ff9022100ad4b61e647090a74462a42360bc6f543bcdff41d6cf4ed0611eca5311e3d89b8 3046022100944936981f26f70b834bb5fdcdfe0042c6f3103efd7b8e94eafde7aad36999ee022100c2a0bae480bcac4630118153681cab61266775750dfb0b5c93d7e62b9f20f738 3046022100a9f74e98185dab635fed1914f96ca8b5ac78289e793eae63a5b0b5cde8e4a107022100980267298120b2655e483e7bbe70bc6fdcaaf891ac4ca3dbbf1b49c382b56047 3045022100916ad925709aa76150a8fff1e261d36cae1ffa842c3424843948220345428a8f0220794dc0741a7f6b8ecc8d90759d15e91ddcba079c963353e2678de50cc894c360 30440220412b5033cb3e26c130bdbe8a35e03547c563180601103944e2d0273f83fb849802200f8e05f1a5e6111b693dac8765075ed90414cb94a00a7d68332e23d836905ab8 3046022100aa41cac191141cc0a9a3caddc0bf2518e05cfa62b8089eb8978ce4bf6c385774022100d8c3943f08f5f935ac31a6a37fd7bacd9e2eced038afe8276c50c989b965eb5f 3046022100aea611a53ae02a317f0865040e2ab2cb529c100de1ff68a7206416771c3d80dd022100e3c2db08f9535c2f44482adbd809cd6dd16ee0a8fa0f6645197fde77bcb34d74 30450220317460abf69c13b01dec728a26583c8c28db57c956439242d54d25c7830ab9ca022100f213dcbd6604a74e2afc0091e764bdd9ed4016df66216974dd3571aeeae15d66 304502210090757793bcb1402cc9c159b8301817532c03f72b454dff33baf64b54cb71f5cd02201e69e3386db02d5ca9683d6968662025dfa7179259fbee54d3c11c2c4cd4a2a7 304402202d00caab18f16802660338f8f0a29312566a04247bd394bbdd5f5102727aef16022042feefafc3910900d52db2816ac411e8e0d823c85d315a8b04021f56f2e31b55 3045022100d8a325a452aad20101e0ba93dbdce493bc25a0b06e8e9babad06d2e7f326ba4902201f9e039ab355c98b59699ec7bc6779486567f65051989eb89b6b1486b10f4048 304402207e9bde9878013e3ecfc68eb918d55d1e30ec7491114caf236213e6366be64d6102205af405c8f78b73b3a1ab849548805b976a751c7eaf8a6ef613ca1aee7ec3ee4a 30440220575c9c7a3091b7908f21bd0060966a977c14d28061de6517015383a0bc2ba8bc022041a9b00b1aa697bb46eb901667fb782304356d2a8144ef01933dda7ec5c7e204 3045022100dd6f7bfcb31bd8ce020b423ea6012324e825cc71e03552b17ff0860ec99ae23f02207185518e04d0f110e0ebb0910fdcc5a02bc420b75ac90ed7aebffd96feccf5fd 3046022100bfc30d3ff733b3a111e243188e9e346598a267d47a1f1c93fbdb2568b2417443022100da873873b503c56bdcda3eb797e176f0e735864b9f27efd5e0a7178bf972fc20 304502206bd187b37d76c6e4facc8138467af2cab0ad3d94fda88ffbe7192225a2839bfd022100e59bca36af0c95709b63999e71a235a072f2932010558f164ecc392dc89d87e8 3046022100d7af30b4254d2f9428bbba2ba8df22dacc0e17f07c47c9c0f686965b177375870221008bcf8f7f3fe809bb4f37ae4162e71c3bc6c693021c7e9425df1050afb4533f59 304402201d330664be316079fad5006125c413643234fc944baf79377f30fd1d6077021602201e891172f1315e7454d3b4ed761e54a8e0dd705b01048d5737ada4cc4464ea1c 3045022100ea23c0999f6dd13bcdd692f6b9125ff9eacc6a9a3bf80f2f6d69de23b391f03b02201b9d12f275bbcd93089e3456794571c8e635d706aead5699be160da26d10b6d2 304502206dfe29aee8506aa50d61fc6eac33eec8e895bfc6eed26ccb9688e8bf796c220c022100cb0a91adbeddbc23bb092179339a59a0d7ad197ca713d21bc29d33a465ec829c 304402203fe8e292399d4171c585e87bf7d2d70ce352b4d72c5c8eeaf5795bcb51e764f102207adaa5892dff6e3ffad295384d6237c86cf88dabcc94fb7e2ef1fb7ce5fa4079 3044022064642455dba2f6100412019a65d231087ad3354f9bfb5049bc4ddc78f56c786902207b4690c49be36ba1b07376aac352a18593758d86e5ea5660f119eb0d1ff1db4b 30450220797b4222eeebb9093a580098a0eb9c38eef13d801ed38a3662e4960d5d1c4fa3022100aea06b6e041ccde3def1eba5cc59d7303bfb777bf0dc258e60e55ae68880a53d 3046022100c02352d3103300739f2d2edbb3a180d22684d82a12e37e7259af81e7b2afc010022100cd197f2f21379443b1fe39b8ea95014fe85952b6ed0b8237ef05e2dc4fce57f1 3045022049c2227587ec9280a03a689ea08aff254f33358f5f5f1fd43e615587c4ca2562022100e2f53e91bd69a3cf95feb12c190a8f3b605d37ef8913de41b45ddf6f10a5e83a 3045022004f4b9480345c1b0d64ca2719b25ea1320a1fd001476b8d49fa9a49a91b1d48b022100dd54b03f5fc5b5d1323c76f633aadc0d837a24da787c39ec8696dff70bcefd90 304602210091bab683dd6ffb834b07979e896cb56eff01cfc85feeac1c5391583a4284c739022100b1a823e1a49f9c8f27d0dea70e3b656eeb2e9a7f803fe81008999fe3bd32e89f 30450220600b7d9f940c039cce6198827ce0a59e441e40499fea2be0bdd651f6cde60195022100cf7cf8f78d13b6fc0b4cb305cda8564bec3872eaa12d610a5c90b2f0e1c719a5 3046022100ae21c5a478efd7f61f10b4112f401aa835fdb4304b6eefb3d8355461b8e07bb1022100e45978ab4aff0bca932659e1ed6d0cbac57b86432b45bce3e782df7a4b60e8ca 30460221008685cfdf4bd02ac76ba8429c68d049dbb66155d3a46a811909cd2631770d69980221009fc0560a87a7bd216550dc3812a3febb7a1737633e096bbaf399541277e0010e 3044022028fbadd7826523f0145bf7f2bac66c22f2d377723177e3b4527fa34aa765c7e202200b9f72313bd2ac7c57942dfdb55e395528921329c85a55322d5fc70688607253 304502206d6f2687eba7004ab007e6189b7cd60a6301a23f0c79cbc3b7746a9ad235ae17022100fbf8440f184dcacea308ce61c493d2529e4dbd3e9046b1ab535aa498d9c0a8aa 304402205c8c60cbc426f2d354955c2450d20b8ed6612760374c89be2259575ec8b5c00c02200be2ddb734a148cbbfcf86904997b434d362a4a70e20d8554475fc71f5aa2812 304402204c538c3fe0b8fcbcc3e9ed1679249e5cc5bb7c735c1a0b2f033d213c1d3ed74c02200e5ba8851751765f4c833ec00709f5390f56ed060afd9cc3f8fd499ec729bf38 3046022100ad024d2cccb3009c3d03cf469c15c9359f076313c2f266bf0a998eee4d6a7c93022100bb3a038bc123f1512a2ddbd73a505bfbcc39ef9d95e2f4fb3af68ae2948fbe9c 30450220283ec6a22b6703bb3a8f76e3a2a1bf95f7c095cbd975466d7d5e9622ccdcce44022100fa7e49b8bcb25de4b3dfaeffb7a1e302aba086fa40361266ab72ccbf760004e6 3046022100a64f22ccd662751822824c04a6815cab6e8daaeb9949a46223cfff7140192080022100900b547d46d76e881c2b9b344a60d8046fc7289a9181dc75a3ee8ba2283c51a8 3046022100d68c63f7efb1772f1fd6f51b58e92258bd4bc72fbacb1b3fe39ef61471d60c3e0221008a48f69fc5ff69ddf0ea0adc173ec115e24dbc6eb2ffc6b73c5aeb5a3ac8baac 3046022100d2b31d9f2b48913d29c70b46e44de23a60597b25d7cfcdc2ddc2aae72b5973010221008ee799bf3209732dd23ce16fa00d544806bcd70c6286837805d2f09497304290 30450220379ce2c1875a592d54b794c388c4e9072582d0c8fe76eba1a343f8ec3dec8c1b022100d7f1d418beaaf24d4bbace5b6d89672cf9002b9c0e6fe8c5bbeef655222c5f3a 3045022026c2d7fa94067dc3e06a64abbf24ea2422b190ce852055f8c196cdf114d0eb05022100ae978e0faa3edec21cd7a30c2d307de9cc05ccf37fc6b74aa32c9d0fdab3a8da 3045022100dcdf4f36eb460235d1743d0100a7be83239d5691612b425ee36fb0063bd8a2cb02202771bb6007663871cc3775d96bb5fdd745f270664e8f4dd4d0085a72136e35bc 304402204a306f7e48e1fb0ca0133270b0651fe16fc2de074a3f2c3fae957ee08da5160302200772099b9580f52a06c58fa22c8b735a54ef3373181ced0017b862dbfd7387e5 3045022100a31a6f90350a22db40158b0d56601ad846a707c5bec53486d9634dd313b65faf0220511a8b84c7c222a927a38903591fbe6ba472cb804626ac1423da8d941ac01638 304402201d86baf2d6d0447189fd6ecfc462722d5e85b0efd81b69259aef95819bbda45202200cfcb171762e5d8ac8fa1951561b536f018e5f38bac969167d983a479ba0baa0 3044022006a55aec216b06ac4d036b1cb73abd88b118baa97f24ff58e45ad9333937c0e00220689957c3f25348ff1138a7b4b509f19ddc02fc471301ddcc121d442c8f1000d3 3046022100a5b9fcb9f54f7ad391aa40992ef5b8b6e458e75abd9492baf97f2ef2a247482b022100918015d26355e90f5a1dbd91151522a921f227876a8080aca71828cff4b0a4ab 3044022056a17353536e97792eaed14331598d7c9ea88e361e512935e80c3f18823cb7c602202a45f8740a3bad722f937b2e2c80893392bbee59b9f16c7a25e7627b95d01e19 3045022100bf0b361e6a237c9cf351c5f5660598e0c1b918118179bb4fc226875996296e0702204bd5bcdff78751f9b559c169fa60476dcc4d71968b846d5257c58d51a9cb3476 3045022100889294783f6d670a3c4642f574e0e23b9ea911219819aae2ae90defdef5e437e02204ea44d12c17ab9c22c6e3fafe2560de25bc26998d47359c890fb3ef674c74b41 304502206ab30d4d01e6f666d4e10fdb94c2fdcec4bd744721377be968ba9b16710c7e8a022100eed981cabf55d50c30e087fee718799f48e99418b1ad625e3cc0d051a504fe12 3045022100873f41b57837505739e2f5bc32f89f4cbc91cb46b8786ebcecbe6030ad65dbc50220059347b3d6def7810335033c92920075ce81db73c4e852d13c83542cf2fe28c1 3044022025eef156caaa55434c108a2853e385501e873a92b6e10585a8d7c44eb4328c8a02207c977d2214ddf7ff1e671e3497fb5219e4b1b1e2094e2bb64bd6be8d45ddfeec 3045022009bc0363fd747506204052badf4d8ff494e32d4df4b7c757955f31db25b4d93a0221008079b8834b7c6f98acb3736f7b2c87b6e120a4ead0505670a0d1d18e6c5e72ff 3044022037fcc80f3de0a57692bf7d884a0658c839bf0cd6fa4d8fc9a1c1ef953524abcb02203576648180f68ef1e01ba1fb2d25fe5028c99aeb4efe11f9d9d69f851f3da6bc 304402204e3861dc60f2be80c46ddcac5121d86d03d8c2863a051ead0c97675b19b6ab29022074b3abf2f4e163c3c06345b076e6fbf8b691555a7fa109a131f2f3e5cbf2abf2 3046022100cbd8864342b373ed438a557d854d300f9cffbd9ce7687b4b88d6de6bb8ee73fd022100c016b165905c13553605e90a0c4fbfde77d2f0df7cc9cefd6b2c3a30b5232bb2 3046022100efdcd98e97ce1ed3a845af5715dece23c94a6f75c8de444b312fab32e58dab61022100b4265bcd1c1f5048612f4187959dda3da1438bd0c6d49c5aecd6818ba482d41f 30440220276d50bf5de5d9d7e6055280acae443db35232daa38b4fd5b224262eb3c5a5c202206667642d15db430668478749ee5b13d157832148c081b0555bf22c262c235b3f 3045022100e14d03c03a671d4e0bede8723939d53a7ebbde09d41af9c7cd0ae626a99cb9fd02207e38ab0c32eccd8e2e3505b3fdebf88e6df9097a1935bea7c6a72ed9c2c9143f 3045022100a67cf70294e44223945beab4b88bd06f03fbdc2fde0979b478b8a86cdba52c5d022034a29acbb23e7de92e74981b2d68902dbbc1b2a35096600d3efff1815c790a9c 3044022033249cb5c4b14bb16f1c24e233fa8b0d75427d078027b428178c2675074ccdf50220195d1a234c10cf5552480226084c12ffe0af1513ab8845466f97ad7be05eee75 3045022100de4bad23eff9d5aa464d7080727abd1a2a6a3d010c2a5797b2bf3470bdb936040220110d2ea541cfdea9661742b41dee23a9e7cd515bcfdc06c194da368af405902c 3046022100acfbd22612073767e2898ed68562511a50a2d7a0a80a681d94aee942dc4b3890022100ecd116e7f0d824bedcb2c0b0f6bc1f3e9aacf2b6942a4466d84ba479a9b8da70 3045022100c190f0a224db8401b08ee67bd7a6e0da22f0eaa4ec4fdbe5f3a696ee46c8290b022063c7ce54ae138d03042408cd669ce490173c886619f83e49580856cb113b36cb 304402201aee396b4c46aa67394e89b2f8ba2fb310dacdf705df270acbcd7e4de1adbe110220103fbd4c34b45e86d32b9ea00028a42be5a8b072d135097f994a9b38a0bee129 304402205cea6fb3156b93c7990a5c885dc3f83da005090d1eb4857d71901fe1a8dcf23f022013332f44c2d366fd21b8dc527a872a6c8f219f0c44de14335934acfc3a315edd 3045022100a24b6906cc648cec28594c224c9678470a401e21e403c734a78536764619fc03022071da63e1bc19872daa6ae655f16f1d7cba377fa3967aa7ef843356c2a2f27f73 3044022022b592bbc1c5d88ba9e76f15fd99a9336a2a040d7269d31449238423650600080220159ea3e677820eb8a05684a5374a0929b0ac07292db5513579d388d7f362b8c8 3046022100d28b07ef1afebc3ee085795761d13fb7266e74a9db35ea85c3633604023d6dea022100b23bda1dd55ce5db1e0a9454fbce138c268f5ddd83b1f8ec2e8f2dacef52356b 3045022100dc8751eb21cf7e70bf7c87848d06ede3fd61f863a596f7474422c3f231e0c31902207c6be74fb3b5a233fc61fcbf553d36832e59a1b14b2689b541de636530ccdf76 3044022019a3d522f49a2d69cb62f43e83c774789af926deadcdfe56cb6e7d500e9c3588022051916ebd98ec7b1b1edda3c1b29ac61cff4cf6972d69c1b6ba30ca1e777461dc 3046022100cdae6402ea83c4df6e6e80001630329d38606efdd3446dfec70328fdd5d350a9022100c558a2c1ca80500ef8b9c2650f0b07f0d91706c1895da6246b989c9710b14204 304402205328194c1bac8539bef7c86543eb6609762bba58e166c1fdceee790a720ddaef0220616e0815d351bf3480665f188eb07075ce7951f3c0e955ed96bc8b360ac0777e 30440220186e721566c3fad06434410f7ce34ffed55bd5071edddaab7d3251a1725baf7602203c448349613f5a67ca3da195b26e97c888952b4122977a3e9acc316e9d683c47 3044022061463f8b3f831865bb001beb337e7b003b7c1bc109acfb9b28237f4ce9f3970b0220736b7a05ca949717ef4543fd73eb4c16d7d9abc74e298c44b53260be3ae2e631 3045022100c131cfb331feb9a25248a5550251a26096b82778b4748b67bd3f6204fbe4d39a02206ce0442bc0fbe6beeef747fa3b3d9093930cb4d35d43b2d32367083db6044cae 3046022100f54939bdbb7d52a3b1acbd5a5f681324d41d417996b9bdb14b1ae549401d5e78022100a05bdd740f3a64649562e9d6c9624299a0c1a3b633e901d47eeedc955894b17b 3044022046d56c583d0c2d6abf8c1c3830e09e386329e27d5b09a3c54c77c66ecdca009502204642d66825bde8f5cbd4f1a7736cdeea5a7dc5f57a1f197ad689a07a12839430 304402201f55c545c214474ff672305e10a3b4d2befc6e723f272cf4199bab25656d2a6c022075beafb76413921c98c750f63f354992526f06f7e7c08492eea014dea0c5dba0 3045022100f26e148c63c7278d2644832ec752f1321cf8d5335fbc468dc688ff5a7dc22ddf02205cc280fef813e0d55b6500878afc89fefe35ab4679c63be4f6bbe3ad63b81dbf 3045022100aef2ea27df377b484c207bd8667564bd40d8510bd025fe6944364c72319d7ae8022003d901d85b8576a66b4715b43d9b31bc0fbfbcc9246f0b70dda571b2d21acd5c 304502201815cedd97cd7e1ce18308a177bd32b438ffd218783a02c07671f1fd547bf5f8022100a538e2436bdeb40457bfc5d78cd185eb463c42adfbd82d27ff2df5989bafbba6 304502207025a320b3472568cc8edbeb13d3f3276b31f6a31889052c325936dea31d9d33022100b1c9b7ff75616f2a2e97bbddc1ccdc5815201e7c29b18cb1c04fe9fa7fb112f3 3044022057fc8cb090740d0d5b823d3e9482617f61931a2cc0dd06735262970043f8deb70220383ebea9d06454eac75ceab40d9d3e3f52fa66e7236c4eb3dfeeb9f4a4a421a2 3045022100e5727e0011706cc6c770af067525ac6fd69d151998b12bcc205cb1df1df59cce0220360536bcd25581f153c9499fc4eea63e4720c66420cee6bf6061d603f8c8e845 304402205fc4a1047af2bf23bff84b5927ecff77d622d9611f4fa33c18e3ccb7ba68f6ee02203c977ac3b735b8cf8de35d9c69a26f88e2f7fa530a01e165a0879445a35673d6 304502204cb611b845ff47a9998dfee5b7b813ae9415f34a425ddc0014d4bc1c1c3e825a022100a5b39fd684f69d466352d816979062fc710f2be0b295f643a3bfe1750d65db9d 304402206c0b605e33390ffed6015a4847b65fafcfb0136604673b7484fddb85dd94232202207c54be40eb499d2c145ba97620dca42dbf2e644b580bf28cad2f2e862cb33042 3046022100b86ef39c0af6931e808d0f14148d6f7806d819e6240a7ed1715908a82480d91f022100f1c575d783bcfedf8707877883248307a180ecd5dc161c28ac6566edebfa3ffa 304402200c8f248b6f0af27e258ebb35b89572b54c0e18e0ba54d34414cbdf1de28007050220082e9d6334b4c39b8465b769114a5bf05f77f44849f00d2f0611a0d5eee7adf5 3045022100b228989cb0f9017e6a5fae7d4b7c6194f6e9216081f4e1bcb576b7e80b8d8e6f02201e06789621b50f8233fcea9731f0350ddb66851682cafeccc4c1378a0a9b27b7 304502210095c1ca285ba9195804461e8b62845bdf0953739aa7e4eba8d263aa24dbdbf84c02201b64c02d2b1a29aa00d230ce27f68b1d6527a1a7caee317917ec7e8f78f4b929 3045022100d9240011caceb75ab1021574cbc717a2aaedf67f4719f1758ed241694bbd874002203705d102a7eb57019d1efd3c514bf016dd7dc0171d88b032184a877a31b69cd0 304402200d5edb2ccc0e39b3883bb7d27b15e9c7082dd55a0ce14aad6823b411715d304d02202a5ec9d701021584872e1646b0ac9a47f3c19df58203719b48dd183dd8b7a4d6 3045022100e318bbd31442893ebd4083c93519fa800defddf91cd56c4409285a2dfdd886f70220291803b74ecce96e22c71f16e763797b41f2d86ae6967f273e5063a92787a616 30450220752781616a60d5cd5811d39e0ee1b1877597873b31e637175ce51d116940e26f022100b00bfade3ff5f8d6b88033f0c96d5126645c224f1c00b0a11082b968e24f1df2 3045022100d5c8381ef0f3db40c41fdc5eb35b039a1869f8a055f1d7a5dc6f08013e45dffa02201e68fd142a731ed45ce705e5c28ec78e4db5caecf4ba44c1de14ad5c7477b663 3044022010bd09b234eeda36d1ebb0fd8a6330d04682efb63b8471cd6e29260af484436702205336105f968c843c4387b510cfb0eacf0753688debf2e28bcb3a5c3d84fd4d92 3044022065bc97dc90c6cb6d2ed7e4a3befa977d8db9cc3cddd3956a00aaa389ff3162ca02205fb071984038bb892c045e9377a641d14cd1ffa741ac1ee1e0e57a3ddfe3fc09 3045022100ac3a9c459a342bc9e5594b6b116b70a3ceff43223a3af9866a5cf02d370d219702204ddbba38ea71b6e6654f7ee30d41a29975894a1d0531597ccd2462ba281ff670 3045022100daab33481a0acb3863a09650c1779d47268d4dcc00de25da7144f9fd899a780202200304a76e2e052d60fc597f2dd908c5ad8b49a9a8e04dbca10e8e9a26562360c4 304402202a4dbb8439d571d503d9d9370e0e5ba68b0a22b19308ac7f891463f28fa6512e02205f47c1cbbfdd03aedac5ba3d31b7f504e5168ea20f2d29d7fd0a838b7d848c24 3045022100895b97cbb50e3753cf6a2cf45a790c92068d0dd9932948d9bbc37f6ccc290bca02205cd478860bdbb3372a43fb6d1120492d30e7e2f66b65dd9812d667da0723e765 3045022100e498dc61c5dbfecf1f45a8273b64ec41d7243a7c6bbc35465b042b4b951a38760220516fb8edc30c6474500e2223ffdcc08734fcf0d43b55bea0702aaf6bb071b309 3045022100eb67b7d4c84df2a52d5f076abdd2cfd82b73d81a3c584142b8e8ae2fed0fa1080220238b9abc6119ccb828a70fc31cf3caaa40a71b121926540d4df6c5070417645c 3046022100d4445e1a485bb35150d4b1039882161b2cabf0084037a400f4ba49e0694f4a38022100b627ee4d7a972d2250d66b2a10685bb4659a71e728e9f37ffbd2c7e644a818b6 3045022100e409e1c41d63f49ce9709b1f6f3ba15d86320e3a179150a1f64a7c7eb53f207202202bd2afe68aacd21de8c5b1c4d6de8828a72c6bcc51572b83acff3a4547843a22 3046022100c08da787edce012067c1dd24a43b3b3fa7f8e821ade170f7a34bccc7d58d443e022100e6bc0241997951695ddcbbde3ac6c778dc055c275f5f6cf558a5c6524e781837 3044022068c9155ee6733fed60c094b935e2cb29897a796956afa4537c9698c6b9170266022079d1d37b7d97a73313b63fa2c7089cc157fb7976d98c7e47eb079f492f709666 3045022100c067b42de0531afc03364643f0432fd36fadfc4f1841b719feb1b45cc5ad183b022030a26c630be0ae5cd674a01138defefb2648cc5fb156954c0b43cb6c8c454642 3046022100c4fbd8bed444d650d7efe5695f6cf76598660d764968f41429eb52a4edd95c46022100e8fc5515c4c9d5a89d5b0ec860fcd139ce37a3f1bb93c65148774d98aa45f8a7 304402200e28608801587de3830488b6e5c8d61f01d3dc49094e926ec3919870bd50e75c022065d61ef6b7ace21988c90e93e5a239f9230b01458700076b72bd47ed1a3dd24c 3045022100da906e74fb0a2083aabb6fcd91f8c7cb3249a0774d075f404130e410bccc29350220297d69f35c5a9876623a67cea97bdff222da68b2a5f5d05bfb4286d6bac94b78 3045022100bf6d002d3ef26ceebdc39d7578b767be23c5338f4f64278a08eb1208a58a1c1102202aaecaebe28d21d87b85db13599d276ea48f3ccd469fb7dda6bcce747b537590 3044022036df2f8074b35d90e2bc34bcc21987f09b1c5581dea83bc2d9bbdd6f25f4572d02206c079af95dc9f8959e1802f8e1138db13add72c5251002b64bda51cd1403f663 304402207d77b4fd1ae17516a5e35c305c9e41a9a4f7d471deb4857e531e47920a04d64d02200aeff744325fa0e95f1b281c362e4e88dc3a5fd60491bab4227c03d5a510a4e9 304402206c5f80028704c3b6cc8f9857de60eb029cb52b46ccc77909a56fd6b1f6b65381022064649fddc47e2e3cce96aff765ea3b415eaac2cc414cfbbd56ce3e9c5e336cff 3045022100a284b289acfa3c2596b8b38ff204f0198c11cf63dda75ff9424e95e2563ca5430220133ea283d499d09c79f192a72912653c6a66012bf03eb489c0711efc8f708103 304502207400b427bb384c569b9f55b59041cc21d2110190273f2533a366b3abfc7be1d4022100fcd386dc115674136b83b5b198952ce958ca9fb615bef07c9e2a0826bd3b66bc 3046022100cda96d5e7bcc55c82692734274b6fb9fe23e86239dcb8c092c5360993649bd6f022100fb610fb69e45e8b938ee764077b4eef0788b2fa298094f7177ed3bf80c313845 3045022011a91e5e6b1bbcf76c6468f7cd4c68dc9fcdb574a8b43278120785d3ca5ffc5602210091c4a3f3edca37e7bfe4c8032133e8c91c4f050cfd7ce5dc9a4e214e2a753e86 3045022100eca234e8550e25f4de6481de2a5860476ce2a371ea1c64ba3aa95de25922ca3802200e47f22ec6c4f9b3e020e7d42600ae3e2c228268da11e8a7de17f7c253d2f18e 3046022100a8749ebcaa0e5a7d32e622eecb489ec354b59cbd3166bf129b79c01f2ee0b5e8022100e09ce8a232c84aecf31b0f4775467197ee5ebd58733c3a3e0221eb8f78160ac9 3045022100f1e10bb078472a8f714c34416797da55833a9338571e2abf48f90245492f78650220515accc47bdb0f5acb4e64ee964b04f35d926c8b9471ab48485c5b03c6367f16 3046022100809742d93bd050464f4d3955239ee1052ed8d31157ec9fec0655e8ce3200a6fd022100c138898768c6f18ca40af6868911dd343e1dc38610dc4ef479594961c58a5bcc 3046022100db81776e4e0016cb7c04282502a9a48383cfb399d8e9e8a86fc5a134e1c29d1e022100f1b489b06f20912a492667f99bc453e11707149b29c818ba48a5537406c58273 304402202c65499e52019730703896766c335005340847b6b2d4cd1761f836b180b1bc77022046103187bc8e4168cc79cd77b7d3f04501f4f07b6b54107c6f2290fbde8aa7a8 30440220615e5191f64efa506f38ef88f8dcd422e31f511ca721d8c4cbf9df6684410b6602206226b7ddc47748b87b6eb8719f3a4c610f4921169eb4a07d8fa173d00e810d5b 30440220270187d150e5679df8d618422075a5b28dfb5fe8527be0740d907cb18e5989ed02201fe3baabd0ed0989044a0c31ffb85e79d230a030f8542bcbf54ee80c8db91cd0 304502207ba979e352f383bb7592e048fad86ab8baffe081e0f7c38d6ada14b769141a12022100cf0a613b4361adcbf9d3ebcdbe4451994ad314b803ab08025404b00b4503c6ff 3045022100c7e02279143c3ce1a8e0c55628a48d1b3cc3356c62002a666384da269dbb0e6e022033f8e8633c984ae8057f9589cc372e3ad9dc92766647af859cb99336f1e96829 3045022100aa201d3326360b0f10469061fd079b4909a5f2fdbdb9821ccdac13eb12deed4702204a1a197a5b6600a40c5d7e6e571813168271a8b4bba2717055bf74585b9925cf 3045022100986972c54c350340c03c766dd200571069d7fedca8af5b234d6cbcc2d884d9cf0220314eee6f5c98d6e9b6240559e255af6004620b4c2988e16945d1a0f55691af6d 304502201d3ce3188cf112acd0404a6611de54b9b221cf9f71065ed4a4f0a52117f5dc7a022100e45d34b5c16ac4d0a7be46807903e588cb3b7b6b1527968cb7d9551a49c15dda 304402201ab7e825b9d2f003b0658a50bd03da960c07a060ccbcc725b5d8e36b4d3fc23b0220615ebaf1c5ee86f18393fc39d423adbd2fa574bdf303860757c97d889d453ba4 3046022100bfbd4d21763576d903d2698133c5265f27301aed9191c10f2c9410677076b220022100f539346b4a3aab0fd0b7e2d82a980d12cb70136b1c84233576f9ce6d5d3a6228 30450221008dde754ddb25e1b082d54f9b2b7a1b875d4e4925b9a39720e81e5ab2b592980f02206cc1473910f9d79ef54875b9b63718577111e58050bc6c12ce98a4f454c47de7 3044022004310f1661026195507f88eabb0851c4b1162599f25f606a1df9d89885296f6102206cfee76f7bf22cc284fad4384e01c990f1f2065eb5cc82de243afb6bd0cd9fed 304502205a84974177ce9468f2df6922c854271ebb6625b038c14814cd6d39642dc444c4022100e43bfec91de2ec04749f002a20ead838409383215116ffe95589b9f35f6b0788 30440220362a327f776f387a7725a69e73769351c09e445306d7d077eb2124e44779908b02204cbc870bbdd6704214fbd01d157ed276662231fc766ea93b46bfde3176b99ee8 304502203b0245a214783a1db4ae7b3d03f77a6a76bb6f89ffc533fc17a6065bb51d12a3022100d0691b24475c1132435b2068dd41394ce178b2ce6da7eac4dad9aeeb8082b35f 3045022100dd639c0cc0cdce9187131fbe438393e4f94b92f19a48c52df62c7b189d2e1ab602205aaa4229dd7ec08cb877494cd7eb70a8002bb596fc2d50276ca713b777bbcb23 3045022100e51c829a9798dee087a176beee0340a5be2696f9101b5a510f25edede4aac2b402204aac1a7e12850322f1245a23a65d8c6c97596a3845770191849e4347df5e4ba7 30450221008d2ac237e5e8f70abd26867d142efe610d3e690fc5326ba46b60ed1ebd40590402203c9b271210022cf3bb7a3b5fa1ac66b7af603e8a13c9d6b81e122478b0e765a3 3046022100f04ef6ad1aa04e89763de9070a3f376a38ddc84a70e09bedc17d71170a826ee40221009181daed2eed7e447e2e72407ce6307b6d2e1ca5c73e9dff9b8bb23ff8564e9b 304502210086e4c60080d59ca1be401d8dcd229ec3234e95b16a71ddec5dac285363981c08022050daf006c069e51cb64070141dff60d67d0fdf3e5dacfdd497c6ffa9a53e337c 30450221008f615328e815e9d1756ca9ad791db5bc34de91a3c648f9a82939966fa780292a0220547186f2a9085b12210f88f982019301f111ba814a5f59ec0b4f31b09895ea63 3046022100881766fb48c491932bdbc56254de5a069fb9cb6fc4d3f00f60b72e6ee646e884022100e604ab0ebfda3cf59418de45c88c780e87df38664962c4a02460d90386fa1f9d 3045022058b6247049b4d9aaf747090f8a1d1d8a22ecc1b6d5a28e8739f2a89183c2d1ec022100aa3904f93bd37474cce8239677ee2b53ce5e6e99e0b9c6114fafb3b9dd98167d 3044022025d35ddeb06273cdea52b8bf35f578abb509e52564179ae8bf61e0b97f00b22202200bd57d63c8e84e546b2ee939d2b77b20f464bf86c5d3becbf3be13be1f6418af 3045022100c333e1c4d4ac83433c9f2c2a51c715677d0772a1ad03f7cd6993a6c93f85610802202f47368421ebce1821e266c25f14d73a615f746c916870d55706df1e3cfa8aa4 3045022048ab54432ea716364141e96a55fe175543a7a153ff35b000399f6a66047e60d0022100fd97fff6ada5184a9b23ef04750f9954d83e4cd1df1497ec136eb6f06567615e 304402202f78edbca08fef8c36147b423b1bf6e7470c34886ebdd5aa34b2689b558d88bc0220579b364fa2371cce9c0e4a72a4d34436984094fd633f23f88a43ad066e12fb3d 304402206ff80aa022747a7d9d868b988ed142391b79ca04890848d3a45b95eb1f4e5c5e02207b8045339b10311cba62197269385105fb093cdaff08c855d14e493a19fba3ce 30460221008984d6ad83750d4dadcfbefbf7f834a2b947d5aa3a59f78c81e10d002ad30a9d022100f96ff6eeadabbca21d7d10ba173340e669c92764392f40c917953cbfb0350811 304502202b07113d8657b42aafa1386befe532b619b85ada0d2170d0a2cf904316000c150221009f6d4cf7dba7244f1315213416571e9581c6c270586ea00e70b62acec500f26a 3045022100854ddd80771b44b5aee0ac4a769fe61d4608868ca24a3c57d70d27eaaf0ac5a102200506671eab3c7a4f2636500bf1e8d67d58ee1c25784e8dca77d35146c17c42a7 30440220739df3f2b19624045a32cdae87be4b2fe42c79f154eabd2f77392624d9a97d0d022005c9c86e3f43c1e15b3df6575363145a5248101a9b7317375224e1992a395bb1 304502205e723a29abfc2821b26220dbe4fe4ae0aa7dd3c7a1ae37c256c1a5b45ee0619d022100ff8b23a79cf13d496aea7dde3adf4104ba5ad29afc5cfa552a7fc3405cc3afce 30440220698d9af4c81894e249b4c919b7c242e9e41f0d3ab9ca29652d5dcf99742f739e02205c33daab9bb332c8548dece51f1c06e841dacb6a1bcd4698c4924f852d15dc0b 304502206c130645bb85c1505cb4769b0d64dba311040169fb836bcd627a60d2323eebcd022100fdc365f66c582f8b7ac875bddf66ed0a8106881cf125e6dde0d55c57a22bf656 3046022100b5471605992c91437380302ce4c2a2997c1d86e65d8f6ff070d7423de7ac95a0022100eb55075b381688490b68665ba621ba7ff2ecac42964db9c5e5cdecfd9775d3da 30440220130bd6de3ae3712b0078d8a918a0338c154effac3a03cce626799c165e30186e0220028dba709e1e0f34ff43c5fdc1fa3f9fdb9c5ca30254d087bbe605395700b948 3045022014b19df7e4fad6d7b39d1d30bc0508af4c87ffdf12bc0cd445f68518a66b8983022100b9392a0a0b36cdf38455df6fbea6cec16ae2c749af40542b8af395010d3202c4 3045022100eacebadb40bd6ea7e23a4e52791f0f381e8b28c91425f122887994a3f24f2b96022002c65d3d65a1026e29f9a218eaa4730557954444eb0a0fb2bffdf9817ed20e53 3045022100f64f3386463b33c1d8b2baca25fb1d5f6fb37723965c738d37746516204dd000022061073178502fa78f9a3e254431b1adb75d82633657598d4d62a7f5658c5c5fc1 304502202dafff09b84d9b5ba8ad4864fe37a88bd6048e04b9911df35d8b85d89f5f6b6a022100f5804a9e17aa5c322eaa98b20c2e953e185c85602bbf9c3c5cd00c0e6a24ce5b 3045022008f392f79819856117c104bccb8e8c2d416d4d46900705b128f38d62312bca7e022100fa5495430efb6da569f1b1055ba3e3a3a434854712105c1a97cf2f553a537ad4 304402205d2079eefe8369216d9142a75117ed0698ac077538229ba1379083f0b20050f4022008a3e55f3dc3bd3d28d5a6f549244af4a015bc73dd8ab7aec12d27c3dcd33a5b 3045022100845ea9a2eb2c23248af0bac65ea7f0c5b65edad85cf128861df808c2a6a017300220617c84853ea3260c6fbd7f157057116747a4c158036458740bdf5d6bb05f6b15 304402200139ed8349c0d5cb5ed0c7346852e2c3af539dcd290cc3ef22ba012f6318998a02202d86064c03e6c312d3676930b0eb2caebe96c15ab7c8d251dfdac14ef29269fe 30440220012524f97d6722c88041e239a89d5eada996e0532869a5f664e4b83135155434022053318cc2aecebdc866267bfeaea8e74cb8d4a6cea53f9ac20bdc3e38eed4b0fc 30440220131952ebcd47421405277f24f2bd037fcd0d2462bc06cc3810226e3a379c3c3c02205b4efa0c0a91ead67c87bd212c876e8319dbe376870fb79a85be6a582b32197b 30440220351276df789c16f6b9bf25582ccf5e3cf34d37baca70cd31d0d671746048a3f402201ea09864750b099a6214d192f2701548538f83967ed799c0aa95da79550fd102 3045022100ab31feb1e62b51a9d060230b778d3bf8b377423f249b0676ead6c810e74344da02202248e44723410c633c2f2254e1f0a6e0327b55a8a4985df6ba69ac755b0034ac 3046022100e30721480ee442d38da8b242bfd8fd78ecdbb8463a6f30150f4d6799618b5b76022100bda51d09c46b8d282134ee3d59355a3635396a22c5108e205e61e1391a565efc 3044022002c7030260f75ab7833bac419dfcc0b34f0d06b681ecf2a6aaf862232dbe40500220148d7742e7df85eea95b77df1b7065981c3014f84159fe218a042f2eaf3d503d 30450220649e0078acae220c05bd33562d94aa1c30eff1320673fe16669c484ad4086439022100eca8235ae6a775c1233d0cc982332365022adfa2a16bffcd959f02bfd6e7c4cc 304402206dcbd5a8a81805a353aeb56338513e9685601262c5527a6319205f83c4300a0c0220340b68e44ae90eba1069848558a46d325f45b30c496b6dc1908a73c78b408e56 3044022041d811b25e0da60a41bc72009a00e6fe7cf99681429c2aea7d6309a8914ff27c0220512f5f36286ef13d1ccc1a2228b8e37b55af3a0b96bca7f3a9d423ecfe775fcd 304502202f66ebb27126d2ad90aa2899a46551881ea777efe8305e2a281e3ee63fd6f43b022100f7bb8ed3d970ba782f839f8a53af9845cf70cfe18d309db97a75f49c482f9038 304402202adf923643af74ca0378ada8c50bae611fd2e15d62ba6c87d70e5f7e5a5a539802203cdc653b4a5cb0f3b70f4176f5d702cc876744f8d0771dcfb81a29c722fabe3b 304402205ccf1a2c501d1f5993048ea85b3ff69698ab591ab266648df56801f1e5ae8fd4022047754edc4b2a68018b33c5a92db3921f8dee24b7d85e78f0fcb5145106fa4043 30450220113b42dec2a48b9be2ea25bf823c258ffe7f45c7ee1829758a7c07854fe1be52022100acb099fbb0439139f8678abddffe3bb51930244d192366a407b30e93760ef531 304502207823696a7cddcfdc12878446ac7991e4cdca80eebf66cf7264f3df0b8fd70f0a022100e532b26971b76d3f4708120648bc452dd13b50a42efdc6a3d3ca3307c8e670b5 304502207cdd787c529cfdedfc4d60b4e6b7c400167fc0343bcbd7f87defb70f8a7eacfb02210084ac97315b4881fc525f8437bdf8110253b9bb7b8f21f37dc4550ad52f8650c7 3046022100bc72a6aa93ec7f9aae763e65575ffb837ae46e2d892e7e87d16abbf51b743dea022100a368f1f4f78a9ab77234e86aabf6b05949388b276e778a0f32f06980b6c7284e 30440220205445ee54d8f4b8160048911550c504e9ce4ee543d5cbcedf10987a61e686960220448bde7e304d7c4d27584049b3e5c7141b60f3fc0f81349e425212f17ccdb2d0 30440220532a35c284fed5495eea86951b7fd9a3b02da4fb7fbd2cff13d87da3ac2afbc902206f7fb248ff1801fa060c1ac4091149e2a943a1f7e241e1d1d15eaba7cc421ef1 3044022013220db882d938b82628d8588f344626cb38de1519ed2cdb5c965d3f6bc6c7e5022015059f9364a1eab61b56ef8825bfd031723179b90d095b09dc26afaeca6456dc 3045022100be6323f43769d4c735dcf143c0770d817a0851c1e86a8ea46b1038fce258d5fe02207a4c18cad48f9bc918c13e9f5f5814734b08fe115f699da4fe1814fb1bd080e1 3046022100ef18842bf89aa4d7b7316b46f337f974b679bce6a1972b8ea9b7adc68e92ae6f022100d9d009210aa9b5985bdebf768cf60f5555031d8fa04073ab369587b5163a81cc 304402200b7b3165bef02c98a394521cca7147fb90c2d524dbddabd0cba3d33afa92912102201ba921a59b553e56be044b8bfd34fb11171432a0b453835dd0b361125f71e96b 3045022100c4606cb72f40584658badb8d6867f539f5e60defb82777c1cb2a2ae5bf84a282022030e7221e51c2c5999d4105112a9f6b60e9a5a9eee355dd447bdb8e181646400e 304402203744f8d071ffd9d31b31f07a977c8dcba44c3cf97ec06fc3be9a348b93a3e3100220127ff08e52a27390d631dce21ac666476653b389e3c459ee7561e8c91fa38724 3045022022c6338acfea844a6a1926f782a319572fa4abbd933ab4ddf880eff6a80d1bdf022100b01fb22b0bd6b93b2beec5790481d32ca22160e1ca29d6d62c32180b9b0adb45 3045022100a41ba9a03044fdf0be168ba4c84d7d0d3cc5b6a0e0e8e7b53a97f376ea5b3a7d0220483044bd100ea64a159266a342965ae6639d143f078e9620312587b4a2b8ab60 304502203492277eb1f39dfa09d1025bf740a7403437caabb6d2cbba03ca2c156ff7ece1022100fbf40bfb5cde31f04832284cd9dbf87ab79c3a88e94f1667368a219589dfdd3b 304602210095ac3a1af4f6ce900ab4965a35a937e21ff4eb244587af9917ea95116e66d49c022100cc050b9fdbadb2f3390211c0a54d77383dacd364cd80a44adc096e1531010dc0 3045022100d0e87aa7d2b914259c23fe3bd2da5509b1446dfe0c1b541d0c405901821d1a6002200c014a8f02d3f8130a5b4e1dbe3b4471f56b819f18731cf2290d35a888be21ed 304502210099e6df544953d63fde03b8f5edc032d982dd827952732a583f319a42686c8668022020b8fca1a7cc35ab68bba6d4e5e3078d6d6740447edeeab5ccd4515e8c720747 3045022037c68eeacf5fad485d9f5ce1a2a08889390124975844025d75328bca5c4796ef022100cdff131334973800f0ce9aab598b8f7fc7d35d694da161b9636a45953d4c0ba5 30460221009af96de237e0a7fc34ebbf7d05af5cec12ce6df7f07579ac20f127ff4af1313a022100bbd774e9c6fc856473d55a146ee3d3a54357e3af644d8f8c3ffbe3756a8fd83d 3045022100fea0b48e4e8190dd7e8f412e70cd75152e9ad0980b23ec4ee027728fe81ae5b502202818046f6cdb07bb3c88b06f712305869b7369ed1d84e47f50ac7e1f60acf33c 304502203f970463cface7415cb17cbd8ef35772c452f91d0e086aac4ee17027389ef2110221008e91bcef9002ab72c48fcc2c33ac82a7ad20b8850c8e902dc582a5d77128f68c 3046022100ca504f79c96d71dae4ffcf6aafe1748753a1d5d5bc593ebb2c380e4e206ee628022100fa080d2562fcaa95bb2eb310a7e307eebca49ea5611cd339af89d627927c650e 3045022070a0ed613863ea4125ad7ec79242b2d9a6c05e6f4bc2329c7ac261bec33468e60221009225f02a0904828367d7d485701c3a897f159f36e4b1ca9a400304cdabed9a89 3046022100945af14f5d7af7a627a3803885940f9838b14bc7827fe79a0e0f957f68b049da02210086917d0984f7102e447115ca845f799a2883d30bf107d3272f1a2d37c2b0ce78 30460221009cdf94ac37fc71cd77bc9a9ec5e191b424fdfc32f3892907dbb59ba342afd3c2022100b2132ac4bde44c1d41bc7f5dc65671ec823c90372754aed24a882b9b4fdb63f2 3044022054bac6a6a0e519e505d273574ffdb12ff27a2eeafc99e4901066eb04d6dbd40b022002d3c4a9253f4c7997409d422de732680903471bf0eeff1e56dd5358d003c5ef 3045022100aa8d63dd963a0edca52d202c36ac4bc92d72ebec0969abbc7fb553096734b666022051b012887e195fce6d8476ac9b2d534206bcf305ae09cc01cb7f660e9b3de8a7 304602210090a801d19514ac74eb5dc6ba4ff32b268519c988d49eabaf86676b08e15d9aaf022100a5345f3de9f57747246779dc4e23fd8db75224ad43cc3c429fb6d7dd4b266d36 304402201a3c7084fdf272d41bc866f2dcbd2f34c810732f2ac20ce61dedc6905a9b4158022026957efa3b8416db04958e0a33a35c29dce72d3f87529ceb7dd7ab9880e6a92d 3045022100939e9ec9cd17969e580d9a40177849d6f4c3311d255716531ad1e15d459d3b460220436ae5eaf4741f09508468649d0742e9626711a2e3bbfc2e4f5c473910eac9ac 304402206f339213e45bb4aa685bbf07356fa73dbd1e6ed4f8f534fa324f0756b1a060cf022036f80454c44ef8ce50d3c33b58aa64c62baf3c5f69658f53da0d1f2ae9d6f6ae 3046022100e7c7669b797865f72c87df368978b05460fe8654525c08636e695ae5a93f9ef10221009abd948fa81526888dffe3179387d1a52aef56efed0e65966b83397c909ec3fc 30440220261d2c9bce6adf6d5a493c9c54e46b3dc01b132a354e3a43b1a3a3e2ce4141e80220728e49db131c8b8a2136e70cd6e18457e5c81ea9acf44599e5a96c9e9ecea058 3045022100fefb5a15b16389d1741aeee04296fed5dd75b1ebbbe659e4dc5dc08965422c8f022034a8c931ba2bca2cd7508afc6af4c70b0ec1e8b0002413e6b8f59bfd6fbb4e72 304402204431b0074d3c8f62ddf5bdfb1acce689c87bc6054bdf02950cf9bfe95ed59134022014a36fbaa9fa8cc7b16eba5d13f7b9bd9bc9c5b441c489fabf889fcf9ad5f895 3046022100ff1decf4e06d5a32ab5530254419fb21c5ad1853baddb916f53f92af8263561e022100cf5d34afc75a683dd97a0547234aaf22083c378d7b2ddf5dd65f249538aed575 3044022011618c9c2324f98deff58af0a01b8052313cf607af11d98c15783e28b47d7d6c0220450843762b1744754870fdcde9367b33f5a2164b006b1a859276258f27662ddd 3045022100c7116668b548cbaa810c7a07d6239f7ade3c4f5e3bdec8830d620935a2e793de02205b7fa8d5e023892f1140b4d248bccad1f7139edb58c4b74794c112f05b16f9e0 3045022100a42023829d68934d196654c05a5d0378fd7817c10f6e79e1bb7d9529e1c7a5e502200241e979fe4b19fc493a376427bd3a8d367615234fd5117c94f85a6dabdeebb0 304502200ff715955a410c15cea7ee42c7565c256a7be6bd7fa42cfad535ce72bef5e1cb0221008c2697539164ec52f6045428323a63cebfa102871f8e269acc8844909fb70415 3046022100e6e3db633a0d6b1b07f8da8e7392faab3159e4399d0e0b263e72aecb69c565b5022100f3aee29aaaf081ce6bf2267477cd3366cc94bb997c4203462f1b4271a9e2d976 304502202c0d44b92471d7aec3d17a1b48ebb5c0d2f7f9d9bccd371dbef13c23bbbdf25f02210096b2062aac1c33457948e2923af818e9bac7373ddfcdcc5dbe15a5975fdea85d 304502200a5a9101085979b068365edf9391dfbbdc49fac26eadc4182b975d6a9c183bc5022100e35d5b97d7c1a9b7aeb3b7c04b5864223a543cb91ea58f124ad539e1d3e59bd1 3044022057b7f29bb92bdb52f53039251874a6c312f1462ceb5b78767b527d14cb46f3a902205037ecd2feb3f8f1a9fb4abfa96cee954e45a3960f51d5d861411177e07901c3 3045022100988e83439fc7f664a0aee2129edd708cf7e2fb27eba6e438e50de86f2ed04ee902202e54276b1fd95a4bd3d07a4e9b4ebd139d8acb0aeb0eca1dddcf3a4c1aba21cb 30460221008c64025dc4b659e108f6b546d5e6eff8edf16177a4ccee2b2dd9e25706a26a06022100bdcf46f122c4d7ce7c8e994d7fd2deb12b9073de5280b0a1a5c065d908e25c00 3045022100ba8b341f789d203a76fe81c4a626ea3adfa4f96d3b05015674504c7a27c848d302202504cc66eae953046d4c5b6b1c2530c45e6a6319fb6342188c3d67e7c0c4d7b9 30460221008406727bc6104bb263d4de507d11faab79df4044aa1775a31dc376f1d4dcd4ef022100a1166981669517f30166c10ed8aa49c4e3c5a18b464ecd56ceb1ab342a03056b 304502207797f6e501a386e388a573de5196bf8df844388ba81bcd1702588edf8d418901022100b27763043f882ff3ca8c251926b63999f07ca981133b49bb8656f1a60aab2f90 3045022100d02dc3b8216a38c28a25563e0f06c0f798a23b3163778456ad3dd70ad947318e02202b4da2bb5f3cd288cf70fa77f35e7ac6d9f8012a07818e41bbad061d7f1e982b 3045022100cb319c350d228c5f85c0d2ee8f8208421624ebce58cfbd17419df693a7b30b0c02201310a977a9d98f6811ff245e1ebdf645af53853aee7f3d30883c6cfb7debcbf0 30460221009ee4cb9e569d172462e8daecdb3edf313b24544b62a41f5c7bbe39262714a0500221009e705b605d16c24f0b78a3ed6b4b4e6ea0935ce651dc945bdd8033c672f87d47 3046022100893177438c45200e8fef20b479cc798426a0bf6f0d1f466534ab962676678398022100c325d91b2a84b728f32864d676ba71fe175fa0b88159e13cf726370b60a885b9 3045022100876c15611411653b0031184a2d949197e0857b66f2005eb5d5f189506c5524a70220474928b24fea52297db67825bd9b6c6e12790c9efdbc10a9824f708cd8505e54 304502202821462470fd1dd40c1a88770bbab42fcbed78a0f5c7c89e8b9f006897249631022100edee595bbac2f40e2062d67ae443d56d8bff91989d90f9904783b9f07f4d3396 3046022100f5cfde8865dd618031553bc283df33640b2b28557efd62ade18a6f4a991333d7022100f9f9f91d131057d3a998af033ce3684a277bf9dc81e7ec0c631a73ac9d80d8c2 3044022071b3dfe24db3a41abd86e2dea4ee94b61e35d01d295810e2851e2ccf9d230d0402206ba050d7be58621344a158614128d59bb1036692037302b92ccebc43e17d1a95 3045022100909c64f81982c404bbdca3787537589098e97bbaad665bb2c2050c3895f1531102201e33bacf3cca054de00ca22ca9c9a7540dee163bf3840486835bab94c30cbe4f 3045022100aab984770a9066f0f257297f88035e872f1a913090e472c3217fa1ead21394f202204dd7aa5ac630431e22f566a525d3233edb20100309cf4edf11236643558f0345 304502203a37cd18b9d929384156edc6bcec53dcb720b9c1b71911fa6791a1001544b44e022100dfaf93ddd73f93a9286d79b91e0f9b79897ca8da72b579b2d8c8f7bda3ee35ac 3045022100a08e71bb15494814afc9a136a39ffe2bd84ea9511a9e7bdf319394338cbdad4802200c05e33c5c1440bbd586d75526c400ff474a240a19eea5e0b69270520ddcb570 3045022100ff7b81f9b16e533c7fd6e708c7641ab512aeadd480567cc5709f3adf7cd2f3e202204e1f62411688dc66783635586cbeb80c84e5c5fa7f65482c8c5d0c6d9502c5c3 30450220575ee5cb13d15e0ccbc481d1d4a693193ebd8455196d6e1a5607e4d8d7c650e4022100edbf82e2a2c4374a903970167f60ef5786f0427b8d0af4ece5dd59e5c6d5ecdf 304402207c90dad3f2ac3c68b1aeafaa08b844fe9c004c2c8549f9225f37a054bf52aa4602207ec9a6048fa10225c91b419cd6b8a4193ec00ab47a03346f83583529d9241763 3044022014652884c8feff15edb30a365ec1d22b1d3168fed07530c4520feb156abefe65022013c010b943ad4ae8344179807224d319e8fe9765951eda46e130d6999c623578 3045022100f2a0629a4d5d9aae99189d1aec2989473f42ca58054a9a3b186514ccaa656a5402205cb4af4e4e5d66c2c3add42b5433a0255447671d7dfefc1ef3d9d851763f6a7f 304502204969d050cb5bfcdf6321c36bf4912068fb420d60aae18dab34c8009b36ae5496022100b73cb5ac75aa14db3a73b336e13f607f5293717a2e7dbb970ab5dbbc38a6e92c 30440220542a50a53edf06946f2185820bda119d83ee5a40225ef8af192054680c2341b402202953ac09107722289163209b00dba56c2908f707fa1f4aae895d7cba1481da84 304402202580a50499991fcede61836699d07adcfeea15dafecc5914b3c948942bdb283002202ed2a453e8ad7a4fccdad25e9588588476f7e60209662bb0ea5f9c1dfd0179b6 304402204e05d153c35b1e76d335e10a5f0e0503a73f7410654c14a02d5406af50ed3b4d0220449468697991c4b7b1566949e7867862d879289d5976985f914be5882b02b5c5 3045022100b492cf3fb95e76e8bf02f9aba3dbf1a24be332f828ad7dff213c4921948731f2022064a35ebe835026c01adf19238b837df722c711b8c4b17282ec0965181c035244 304502207ae6448e4c9751d13f347014612c9c58eeba97e2e3ec907fc95c62475f178830022100f0c282c5a2deed17ae0727acb48c5b626782b982b49b25439f7e1a527d0a0361 304502202af2afd7eb54dcef871e73e9b292ed5df5ab7f2eb6c1c4dd2204abe6f38310d3022100fd2442fbcf684fa68e65d860fc75cdc64f3ddec25418e0cb6a5cbd7380e6ae64 304502201993204a39e9947b78dca2b9b4113087a2218f38d9b78bad11f08942fca9894b02210090bcce50dc101f396a25f68f8cbf0147d44a79834b85776e8ee6d115d87a8216 3045022008335fe1bc35c25063ca0d9affafebfc148a4a5f9447837112d7a92824784c7d0221008a2d781e2001ee9ca54c0656f130991fb4bd62652fc9b1a4a743dbd9a718624d 3046022100b53661e27c16adae5b2a2325734d203cdc8126e685c3427eda108112dc00ccbb022100847905e11bb7db01f4e3c022b4a0082229027009c1c890a3396dcc543c8e144b 3045022100a2bd13ee249126ac1e8550423d2bb0fe2968eb66cd80b70d51155ce4c317188102205feb6b757f736ecab6e7a6bb8a7eefeb76044a925299d1cb7e4396a3c2d8877d 3044022005f1e4159a8c23d3176193a9d6d5a3d42f7184ca9e62ba042b48e462d0eadcbe022014b7f8a5fc416b8be80efda140afb0b45846b66ca89aeebfd1e64a12d69dab45 3046022100a2bc2a57c848dcea8cad015ac6f34e0f48777f51e063869eb31436e49c9f4f8e022100c884ba56b994382831c7bb067141255894e5b7119dd4f65c1165a78692343c9e 304402200fe0127915d5eabdb10d94ba1c61277b55475f1e4ef7b9bd4290a6f5a9c4b03602207bda9bee0ea2ef6a2dfc39228125a39f8ef2c841f4a7c90b7dee714b22d3f206 3044022053e847e9aa5eafed4a8f27a950bdce9a7c1dc17fab5ce35d0da0effbebd221a1022020bede85de9adf29adbd980f99786aa1de1b2c830680987ff1738470aadffa89 3044022001f2fca496ee01cebfc3ed848bb925f0c7541ca84d27376aeb7e0af5ef03fac502207defd379ed8c9df998accc4646967f0b22cad00b5f47228806f63cc7177617b0 304502202656d0ec9f3dd72eb1a4f01dffa6ac1df601a055daaa349dfde2423b25df4260022100a90b0617ad016fd0d22180a925ad481834e353a9e6015e04f9d2ab10a5dd885f 3045022100bad6d4b0f510ce2ff29731ed5479337ae39deea7f3236b2c4acaa52426ec493202201eb13eaef5f3a0dd20ff6332bac4541847df90bd5d1535509201eece9655ca61 3046022100d6811988ef4df5fc4215abde2c705676f432b901b0793c8fdf68df7a4289518c022100ac72e8075251fe7984b6f4f2a07bee3fd0d8432ce02f8332a51f3fadd778d159 304502205e3c1db970eacd24b25e02ca352c8153cca3ad28d238c9b8ffa4521263e097ef022100f18937b745a8641b5bc7131727f8099e2ee6d9cd9c167c2c81bb6f7e8296c51f 304502205807cc1e3163149fd61b0afd2a231578ae60c0266ad38190fa58a4038b35b541022100c3771cb4eea3bef0540abdcf8001e7810204908c2d46aa438e1f37a109d78df0 304502202b2b6829827c80a666d85cf1d37c26cdde6a823380c2fb0b9def3a3db4a7304c0221008d2ac7057b437a211fa7d539cec210d531635ecdcbbddbb5e34ec25c7ae55619 3045022026ceac85e502fe6822805fb9072240d5bd0a3e4eec39bfa07c5cced1893dcabb022100eb2136d472787fb3c5fe53ae30ad2a4d8e6f7c09999bc1e95ca103970b75684b 3045022100f5f4ca8580e028b0d6589f43ab2bd6be02c5fb678ec23600afbe31c55672b812022049a7ac70383ba9dc9e082f72d70e90543e6fcc7502c12f5c91db818b5e0e5cd5 3045022100ee5e58a5e03a6bbe740953de3690bc48f9f0742063f579e6177aa39a4bcbc73b022037e1a230a4ec9b9faff294474cb3995049146cc57a5edc5b9a8aec6b535c5534 3044022019adcdddc786f3b8de7b642b1a0f6b5077ce18a768d566aceb3c97c88738c04c02207f3dc24962925dbd1019184b41b3e602445b38b342bb4d8f36e32cb104313b34 304502205b2955354a10fd9c7faaa13f4675461c9bd31555aba95ca60940cfb6ab02b418022100f4b72db18a83d00a8a94cdda20a7ef1fc476ecd35ff1b780e825cd5c42477234 30460221009d09e7cd6d415c27c3b71cd54334a79842c923fb426d493777e9cf7184df9b050221009736058064157cdab97695c733a7dcf2455c0eec8745a1ffa00baf1fe23c2f29 304402204028468c068998f48f18f11a6593676da81f1f3b5738780098edaac16947cc0e0220422420ad2d5a02fae51d9809cd0311216a785a551e65f16e8f47526cfdfdb2ab 3046022100b166f4cb3d2a4afa94e608f07d4acb2efeb6a1589a24153676197241cdfbc132022100dd1a667d9d06b718beeb3660a1a98513c9034ed65cba91cd69830ab60ec785b3 3045022100c5cf8fbd434408f50d537cf7a00a74dd5da6364f5e7614c5f0f0b6a7109752f8022031c9a4f32958c99a29f9c34981c0142de07cbaff2ad73a9802dd4ca91e5d0931 3045022100fcaf565f4108c7b534d01e310b1e021641e9d3caf5cefd061bf8b81c4143a1e80220633c6aee5eab7079dc7cad3f75db816c52e0c7cf17b370d9bb3110924e9e1d38 3044022023130746d08a2fcf9ef3c1972bf33c2c5db5359680f17c9a7f54cc4c40cd10b202201f0295ef4e220ba97e4645d8d069b3a217e8ced21d2b5b2962f7bfc979354e27 3045022053ba87b2db2f47e5e923f48039b2ea56ba7c5bbc6f7a0761ffdcf7d7d695fd5a022100f8561017da4e6c8bfbb89afa89d95b644cd624213385ae06ab0358baab3baad3 30450221008deb0014018ed1d44f36bd9363b266c7033e8fb0c1caac13ece1b96ad6d12634022026f03529bf3067f74a1b365295c2230c26989f6328ffce2884f4d475c8991d8d 304602210094e17669f43806dd34644d1def2fa675464909683146155486c6b0e27a098c0c022100fb7bfbe6f3f87fc480d4b08302e25bfd2300762c1d11d2875290a41a021a0281 3044022075f8e341dcba9aefff55857a7b67772b84238fafb526663fa9ff939243ea4997022007bd7b819fb8b55d1fc650756841ca5079c5f72abb08257585c3050cc7ef9706 304502203c4e5b0474e026181ede014907aa98e3b7efc94c9000fe13db7aee9b4578e04e022100f06db4a4875817d712fc0953ea3e41fd97166f75de0a20c24bf36fc61ee4c28b 3045022005c7e9d2414669fd9dcc070d2887d644b717f61136fd01a408168b3598337231022100f967c6c800de8be0f6c18e97f6e4d79e0a0587eb6d6a2219e835cd65d73d5650 3046022100e8df115be47ca077817226723a9d8248310732aeb2377f464c7cfed52273871c02210089d421656a0499942c8fbf3a5843d27c15132c8f3ccd1ddeb23fac474b4db123 3044022045c946a7e0cc22bb28a50234f65d34b08a7b68e722a609f8b6b142fd147af8980220758397ad5b0fcb1d16edb7946a32937c08f365176d093fb836a4cb66fa7d1da5 30450220544c158decf087c2eea0b17d2712c5595e567c23ce5a213d47f789bd46133d30022100b120a124397b4d340b39c7f0c98de330c9d54e72d2bf91a11be53d395e4ce26c 304602210091ed4b39a2acc6b8421814a0c2ec344eb129694a7dc59704535ef427636db2a0022100f24f8c39029ea4c1a4cdb1a0f7f2511cc5deb19d535de227581746a6fd27b1f3 3046022100cff322074889d07277f57e023fd63a866c34b3c1069d981f482521cba099a2c40221008459c1d641ec6aa76cae4790f7d23fa04eab45dd2ce239c3c7f31e69d354af44 304502201188cc1433bb8e29d3d1bb3c4ca5edc292d3745feac8e02c357fa71f6457e3cf022100e8c1d004c431ef76c96a5b4cb37bbffeca47c3f9f754b5c5bcd8a4057f2d3bbe 3046022100f6882481683700a509186593564d0d682ecf7c539ba2b2b9bd03c38cb5db9f2e022100d7583288a34a1732dc90e25cc37de69c70e78894d0a00d89274fbd5f76ffe4fd 30440220010a5e671c4c3cb7db50b2e7ef01f4541c3e044347cc4a3bc0072de66187fb9b02204b42bfeccfdc32155eb0b2c37352e88d84510d6be59a3161fcb3db840934b714 304402203fb1185255e5e6fee029dd9a464984c60e69d68d749d55b77b3a1f7984a6ddd20220758ca9ecfb6ee0e579aa08bed9c237be5919307d7a2787fb2a60d7c51a63886d 3045022072ef824ceba86320d3a1af5031c986c5c2d03c749f4c6602ccc5ee5e7c831b57022100befa3db7aad137831283e953266969327b16f459cd12d1e8ef402991181193ca 3046022100bae40e033e1ca940dee07a6ab44b8c069eb8496b66e5fecbddafb6a81b945413022100a1e0c4f977a6f392933481dc6540d02376d775ed674be29453de88bb64274fd1 3046022100b6ab1727029a476fac14e6bfd81f95616f03edb5ee2be67f023c84ddeb6b17ce022100a03d0021939d3cdaac590f4d293980aa4b05d0d8174ef94cf6348ceb02bea54a 3045022100e86178653c475157aecdec3a1ebafa169e476b61e61d915afa3717cf6d9ef6f1022030299edc70136f56f86ad3083903bed658ff5d9b7dfd741ab5a74d2464e00fbb 30450220475c05d7f9b8af926d5a18fecd2eb93da13dfef21812caa66056bd893b2dd3160221008396146e98996b2c2220b0ef3e5487ce040cc7c946d0c292667ce1929f04b51b 304502200b41281f2e3376c1af9a3b92a6dc7a2797fbe1dfc3e31d63b299c672c602cb6d022100c86531e23b26a97753ebbe098daa41fee43ffe51fd1dda38cd8274e44c5ff402 30450220286adc72c976a56b05b5097e1c392903af334b3e4fe378e4725ffaaf868a194c0221009db2e54fff4486523b1ba83f8cfe2e0a73ee89d0b5d484bf81e661b1233a6fd3 304502205aea5b9d0e4c6220cdc7a29653fa92fdadb5af150c578d13d5e90c6077b3d726022100a801208c1e6d193d970a6b5d73946c555e7306fdd98d2abc974a66c713ed3fae 3045022100c08793d657f4969415d3f2b2ff0473a5e5636caaaec948099facc4f5661161ef02200816fbf269f897ddf2bdfc6eac7ee305564b7d0ea09719e66a76a6311806282e 3045022049530bd5e5d58e47f1c5739af0252d0b6d5c4cc63573139e540548804b4b1906022100c079c7810c5076ca434fe8e3ed592ea8c0979475d9aa9a8100c6d462c86418b0 304502201f6f68eaa91d36e4ab46535ed6cc64c410c21399daa6c7eb73197282b311e909022100b2d51922b21283b2521d0cc90368cce8c02f45aed8eeddef6f3bd3a12b4fe82d 30450220090f78516991726eb3e36a05363055f4a71d52dd4aaf5e32eb0792d41b29765e022100e92d462c1ba8cddd5ed919ced382a3029474e5e764dbf1ef8db3ed890a692e82 304402201f38bde18b1544f815ccd5884aabb14d205ed5dc6a549d17998eacf72215ed30022075c56cc853ec8b33027446785336fe8837246ad19a5b7f9c42e73ba152a23009 304502202eb9d1859454619d034f9144edf276fb85e4a59a99965d80f4e4217a78b1f239022100f5eee6c456b8fd94186fb8183cc9f16672e916b398f1eaf7fe8194e7a583a0e3 3046022100e84dde3cc9dfebb37eeb24c328458e096e40cacd9b80d2da7b295d1c162137370221009ba6d6f75bd522c6095bb6bd53091034fe307db546b4af8fa27e618b345c6cda 3045022030a9cfcc0fbcaa4d3e6c842bf6f744abd8c867edf4043f421c26abae2c7220b1022100cf6478c2e4c48c434156ff592d57718c25f725e29be56ce3b5897179b94a9134 304402202491ce65b9eca589078fa9d4ca91d44dd86b87c9b9c588f8774c2fe6eb5b32ee022013b9e76a7c2d5546ff4da2431bf084f5123acd52362f17e3fe64c59fa323e26b 304502203d8fc437c09f33b43ec6f60b608474335f61d16f15583e02e8c827068661eb06022100bac3db64f5b15665016f2b395f6b85a6d24812599545e407ea9a423af83b1d23 30450220029734ed33e757eb6517fd3708db5c587294887e980b82bf00a4b29fa75c708b022100ba70b8c5aacf0dd9953832707cd8ada29131eed72888f8dc6a5d32805de750d6 30450220332bd89beb26eac2e8530bd79ccaee48b37a03507081174ebdaa1a3cd1b8c4e8022100b4a00aa63bae9c80f534e9a510960bcd56312e76909ed02b9df9f82e2e505fc5 304402202d6ff6b9bf8df04f25c5dd558975121cc247b48c3a289ac802d9b7a8d92590450220588c8b2b2c24d02e2eb13026f28b9264973ca154077521fbc8d0a1d1aba6defc 3046022100a326a6903a7d80a5facd5eeff2ce50392f5557c03e2211fa6c1067a1ce433c020221009d0519694602551ef1e1bb9425d78dd7007c5746bf5cb22ebfbdaf54216b85a7 3045022052a06ea098461e5be8255876183a63ece7a15828b788aa89f2fa82d3196cc110022100cd03499c9e4c9015f5a7b8a27bbdcb4d9b02be0f1518521f3b02b0c351cecb4d 304402205fabc07b2962fa33c5835ea3a30cb0f1d9d429e7292d735be95488c1a28120f502207ea53d4b92093006f3f11449208c4ea47598aa332d712751c3b7355bd91431d2 3045022100b411167a3f45c51d67e2c52c63f440f84d5fb08b3d347fcb30348385b87eeb3502207ee1557b6d6a25c31adc6a76c8e6aebe738815b07ec4c3ca537db327168eddd6 304402202b5d5ec32e29dcc5d9f5509c7b0666b0216981c9a5d29251248735f915c54f3002201f98d848f043bfccca9e306536d240744710c0731edfa2b728b57ada4d633316 30450221008fca94b7924fdb2bf8b051857f2ab0fdf02e4e4298d8f0a10ce88401f39b512b022046ec3f580f425ce0b705c351f370daf165440bc777729aaebc37fac6c2343334 3045022028be2f17cb8498219bc3477ed7faa6ca9d15aa615eb12b52c6cd3a957e629eca022100d32b0fb9da7b5e8a7192161177e405ca33f21bd6961b99ab188914025c1c922c 30450220766114f02854fa1e3e4eb7714415be739ee1d4a38fccc84f78fe05c8041b856e022100ad6cced281ad6bdf88041e7fe09dddaa094923efb846e4d6c38dab8127330e10 304502202d9a3760b6a8c2986e36a711565c8eb39a0d1218d6a8d6ef6c033b467b8e65b8022100adddc037d46c2640836b2f9feeac625f07949b0998511519c1cf40afd1c21b76 304402204c7b9c56578a09744705075ef68e510dc6018914382307c3cf060e28dec268e40220495ebc1334990c6a571fb0ef64f99ad0b648110b183af789b56ac19acd8f4ae5 3045022020358cc4e0cd312dd44a6aa46b27edfbcbce45365c7862b6089e6c5d1f4e7d57022100e2d003e37b617a8d74ae0ea038b9752b1166c6ec05f641ec9db4caf5a27908cb 3045022100bd2ffef7b21f8d01d915c87608e0afa8c26e4829d3744abd81613f6a436a40b102200a30a159cb51f8e17d6f79ad8d1727958a606e39ebb0c9ec7676a06af7152508 304402205920292a062aec760ffa686ede552603d9bd539250534776742446855ddc020102200c19023dc275216cfc7017d543d63cce56ffcc5300f3d6841ff3aa9a84b9e4b6 3046022100f6fdfa6f6eb2a7b57498a7eb7311df7e402bd449a98184cfe43c48223d9a707f02210080fea0f2cebf60939942e374b117b0eddbe2260b5260dbd565594f7e21164f10 30450221008406ad499791354043ddb784245b092e8e282f23ed0cf569f82bdd3cde09f73e0220544a6c8d31fb562976a8266f532888d2efdbdb024b579e99d5e751ba92120811 3044022029bc01a679612d683a180041c218c4b48992e3645d9d4c0f12e8251e02edd26c022027934a4ba893fbb703ec5435ef80f1b20943a1a36b5a5a598fbff728e9674881 3046022100c0fba194fafeeb87286d9056c8ae931f436ddeb7be9405707770e9e439d3a69c0221009a77dfcea3a1bfe4a4d34bcfc378c9d44d33132a15b8472b921d7fda515bc5a4 30450221009b7f55eba1160e833a81c7dea45909ad362ab27f2db455aaa8784cfef4f77cf9022043819eec73c6a919f6126cb4171899f7aead66004ef8c37af710d7bff8a234a7 3046022100f97ca2830ef22098d513c612f08cd35bfa2f3f032b57d40ec19550d52a977091022100cf7ec9bc943fe38008ab36c5b69180bb3e09071f8d3c01b8a628d5f8a18cec8e 3046022100bda954eef15191dd4f4e8bffcb624238908614424f0fa4d42b27609e5ae0fc10022100a1112df452c7673a094a67fbeed04117fedd563011d98a12ce097db341d60b12 30440220673aa4401523c90cf3c01a4d3fdf72ab21667e6c75b232b0dc762ae3814d48080220573eac399c65cb93cbe1eed2827302431a0ec0efeee6d3df70cd343e185a0f8f 3044022079dc7333dafce2601d980876c9ab3d69433eca39251beb3b45714e853534f6ed02202a5ce5ce9b70afe7b8306978076f6e97b9a30b156cb7dc69eb8feb113f189a4b 304502201d6a0c9d3a44622c5b5a3e829237d7a2e4869db43a210f1b658bcb4781da0326022100c519441cac86b8f65dfa10623af4ed496b34fbfbbede86a5f19fa6679acbc2a1 3046022100c0edaa728e7fc1ad886d04632ffd1ef210c83f864063aa3de227fcec8f710c49022100ddf3b2e6f5ef38e3733c5b53679130541cf955556e2bcf951013ef341e9945e1 304502210098568a54480de6087fbf88fdd48ce0ee3415f4de6a960fcbff40c9c3793b235f0220462e17b22036bc2db228236c543fb4c113ef68e1f7e2905dab6a89384781fcd5 3045022100b3e97cf3639704ddb2b8023d0308da58a769cb7ec05f1b594a402131b1bda8db022001531414b077e54db4e2eebfde4ce222767406ceab37d0472be0ea13037327ea 30450221008db0dc455b42332d6c83c45fe5c2caa3d39e7af14bc89f99bf84adb6ba42b1100220215cdf6c7806ae67d818ec22181872c5e3fbec838e1f3ab921fe76aa1891d339 30440220055e55880a1673e54305cc36be65b5e3375c40bbda0881f1e829413ba19f74cc02201435e3cec162abffa899c7261ffd24d2d59a24ccea325b478ae97e9dc15a5660 3045022100b2dbd324afa4fd0fc034de7f90712c93a11d24cc12221bba260f13139a7063fe0220201b86760fa23b2ad9b19c0707f5159d7fc691aa6ed7f281dabbed88c6ebd87a 304402200dd8d4c9fe6b3609223a49ea03de4da51a9d1e5c51ff75f4ddf435a145fc32bd02204f8a0f792ca743a41da506a2dd644b88dcf68aad9664b820a98c4b1130176a3a 3045022003d0a5e1c48dc23996b80dd11eddda9c412027aa5155ff04bd2029092b527b7e022100fa459022e568710855751fb3fee980ff2b803fc8274d0b3654ea65432efe6adc 3045022100c76eb47b1d377738baea74a3a813c5da6b057e288ba277657d85b066fb3498f102206d07b28414dd4ac75b0b2e472ba09f10eef229be1e37f427b2a5c33445069caf 3046022100e27bef17e1ba8ebdb0da9332ae33b55e853c939cf7d091b989fbab35ab4d5453022100f930c21365e5f0b9e5ea22b5bd0b8e8420c792e590d473e7df5fc72555bd9eff 3046022100e6db59d9c804225e599954170959ed232bcccb9818ad2089fe834047cfcf4be6022100ec385241baff726e8d673e42875775fbf789c16503fccea562b0938214b94765 3046022100ec2cdd8d320f6bf707aa1a2dbdaf5f35eed20d980430f217abec74c597eb9a9b022100b683e28cc4bdd07961a8a8fa20ac9b8b071bb6733d8f8c59b29cecd9c30d026d 3046022100963c3206d3d9ee5539b140e86393572d9043f545d8775f619c151cba5cb4e26a022100c8b4c742d0b389c7098f3f7c479ea6140eabc1c3c41cdd15820019de859edc8c 304402204c1bcb24a4929aa763fd8e23eced59d0026df1a5d221f52c13d531a20a83a6ce022044c767a7fc37b663093e17d9c1139a6dc2eebce61c91c7967235e2135b5f72f0 3046022100e0ba064723147a566fb28996400bf7d6c7e0302ab40d4192eded3aac49546789022100ff7a88af96f6a21cf85c3ec5f8373b781e36a0f28ee864dbf8fe1ce8868f670a 3044022014ef5909df59cec3a83e800445182b58640468b680219679fdda405163e113ee02203724b922bbd200f3b01198959991d4888410df525b53820d01bfe7c88361e7d6 3045022100ec3da45065eda35b3e4966d0975765c73de7cece40730ced3cd138ffda6d3cf40220727b6630643f3e28379a75a929c8247e47cb10131f4b70f890460e8c5811f84b 3046022100f6bb994d2e53978070ab0912176fe9e313ecc0db8d6183eef614a9fb3cd5adf6022100d67f11e9ba87aeed2403b7de37fd795b5d5fd2dcf1c722469445d75cc7ab33ba 3044022051e5d78adff8738dbac40a61456333cbd50921ad4b1a0158eaf4d50d3f5d5a0102202d771c996b4b39c217d8b0ceb0c932d35d2eabd0474e207cf0c36990e7aa21ac 30450220253fa4518d169a37f3cb7e3d58631f83299a85a3e25ddc5f68e8f7f004528dbc022100ef187a68561795bb0ba5efa5d5e20ea5f84295294e0bad6aa2da61854918698f 304602210093f48a5e7b884969d9702c8cf1858b5a152c6ba0aad7407b84c4d57020dc7db20221009bda822750f70c3fa2f4b73c41f08709699f8cd4d1b0596d12b1b68fa6f8ad99 304502200119540c83d2fbb63d8497d81e84ad9fbedfa3368d1d8ecdd0a174a04a7d0787022100c76f05ea648e079df571bcd377c9603e7582deb8af23b7c2d3377dfc19d5b880 304402203267b9f634af84e278e009c899556c1008152c5281441540b881f2701060b192022044798793fdb1ed7b2f9028cefa81c11d04314dc7d92e8ff70f9ece1a86e4b805 3046022100a02c85c2a2ceb0e0f156d95eb1b36b88d8ee56301d35acceda08d69ce2c0e79902210081a1ff5e7c4681befba22ad6df3aabd51e4ae686c1117f8d2abf208930349276 30450220192842777746eded5c9c89c9bcde8ad925e362681e49c18ec081b65901bed9a7022100b500c84501fa5678acea8142df25c3232b9db3e552cd0f97fbf33612fe5ba108 3044022065a294181d7e1c00901e5376e448581edb4fc79f4984bfba97db66daacb6d3fc02201f1277cb9053871436701b31966fab713ed30232b94949eacc6cdda1ffdb7946 304402205b4a6709f7a11c564020e591f2b2e9d29fa2be277997ce3a0b1d8c0798d069e6022007ce9c25d76ebd579b696ec4df1b24816966b8f16e150017c34f0c1838d6078c 3045022100e906bdaa82cb2c58d620c0dbd0b5a48678072702d34a25f5c16c84acc554d50202200f079fe4499d0594b5eb49c0b385b1823c5b8dbefda0715b6bf12eebb57bb92c 30450220682e76699370f305147244c219d3c167088493d215e0943a343a3e2c97bee05002210095d8da3e7ac7108e8e5d6cf6ae988e7e92baf4f7ed44e1b6d58995755c3e9b94 3045022100f862950fb694ca0fd81e261c74ff800b8c44a282bc0d5d999ec62b828ab4a502022010e63b1d03f4f2e020458b818d0e8b3fc7677067f104ab5f7222232075b22b95 3046022100db43da7b53638651f965c8a8128b3029e5fde7d5d91ca537b1d9f367933a270d02210086f36bcb9110cccc744890a8b5aa0b3e5cd7a8ca9c98e3d1fb5bf04e27099c7d 304602210086d196b035ea540ee9d536c10b8a2db0dcb8b252122544d70c17866755589f710221009b77affc100aec78167f57a2dd25260e0d54066ddb889816569036cf60d2df8f 3045022100a757b44bc44e8ff8bc06e706a988bbbebd0bd598ed26833b41f0a1d985d5f33502201a1ce8b454090139bd1ad5abfd46f1b1aeb8cdf746edd85164b330445bb25127 3045022100e0a895e4ac97383cf682a6a26e205a304856522486f66336c25130d24f1e1e8a02207ddfd612335e6f89fd8b2801d173b5d1f8c7d256d5d9ee3126b511fd8e9f88a1 30440220526892c621799699fa1c2bde39dbd08ef92ac5b1aba3fcecfaaaaef882feacda022042ca404d6ee1a8a12f0e6b31d897c42a3ae0bf441a13f58a50fae4a1a90d973d 30450220548f7ad5c796d5fc436dedc8cb67788091a6d2a304a537309a0f8d2e58ec5161022100db443ecc5ca4d32f9636490410395dba0ef437df034d628f2f565b8846beafbb 3045022079df0654a85e2b3bf55308a2ab75be8ebecde1c516c7caaa3bc5ba990d23e21d022100a2bcca590dd85c19f667940eddaeb0d8fd2707fda5489700565b6c85414ef019 30440220290ea81db7ebde92611bd2cfaa7d586d76fa850b1301f68b8eab4046e5937b250220346cbc09f239d5f066ddc7a19dbe6d564194297a452d7eefe3847d71ad370730 304502202508cc26028457457ec9c1d8fc5b26d5d08096a9a54d15bb6f84144cea874cab022100e73a18d4df55a9759a51d3fb01089e2ef9d7a6f9adb46b6875626bd694815f0b 304602210087daddfc4275095fab9896e40441619c1d4c7fb8d186410a7f36ce3e73b15878022100dcae6f077e8c1f448b72ac2ce714a74d4c027812b6f5bf80652165741a825a81 3046022100cb2c577441d36306af12bedb6fddde6b2a7fe9b86cab1d64b9e5b9286f0db9580221008d0e43a0941c7abb6f21e591f4219cd10ad49170f392982abb80158ecfb58953 30450220177504c15a3b7c7a65f031021d5be28ab8ed3de15d69e41a770745cf4948fe9e022100ac6d79dbbe84981336f3d7da7a119986157cb2bddc94071bd334837c4d0eae5a 3044022050960796725e7d79848acad857da3533d6894dbc66f35f001f7fa7890e7be5fc0220267fc03e57e4904eb7ad425fdac7d95a457ec45a6b87f8a4fe187bdfe0d13b24 3044022029ea00b80826e6cad262a240609577e8ec85c970b99e224cf6bcf2640f77b2ab02205bd2bf08f50222e104e1632fb89e2068c434c1723a68f4af15fd3bc8c70efbed 304502200eb0b5ed28bc5c3640fa3bf4178abbd3660e406653d3d1c0471cd9d0ffbfb8d502210097da21e3d35bebb4f653f4974cb40ffbcbd23f95822aa307701bfcce4008b8c6 3046022100bb97a69f04c30415bb337ebf77f933221ab29e943bc0b5d61ce7e050418ec099022100ec7455b19af5c48eddfeab389ddd120efc7e6afbe0172914b4891782ab221aa7 3046022100ce3ab894dd66971ab3cda4ef57530ec7a3b8c1fc29976e86244d2fed1542cdc102210082be5b7516a17a58b8e49b09e70d2227bd8eb45706a26d1d450b4aedf0e47e8b 3046022100f0d2dccde5d57b36d29214142a486f4a9c17d0f3aaf3e1a9306bf26b0559788f022100c7149db323e01f1f015389ec65f72aa53c8b77e0619da8b9be5f9312b1374176 304402206c85541a77624fed5c50c1fd65130b48d6b18ccc4a4b8e91acd03ff5b1216721022051a586a79939c65bad8a9a178220e295d8d495b1b76dc850398d37eb6dd25a88 3046022100dada246900d16e399d9089ca7447efc58e7a65af066e754ca36f621bd8e4d63f022100aa53cc8976cd9433d4a092ea6fd291e898ecf35e3381a0538f8a681074299be8 3045022100845b4eb84856366314599ffa9f666f7e3b4e03028c881bf97f598ae42d4ffcba02206f8a458f3ca61dc747c83f2dd594abcd328e31c8f0f2732b22faf4584b00bddf 3045022100feabb7e2f6b48b25d1e1383447a45e5d68d3d71ad8565033a7146ed128f6522402204e4b2d8df62dd92b463e3ea56ee6f84feafdc704e320a01834d08af2eca6e75a 3045022100c7162f7fbef68400bad4a422e93d6f8c05ebc5824d15204553076425a73c623a022003ec013ab122e1dfb8b40f256f311c6d962d757aba8f4289e85767478eda9757 30450221008210f28a972043c8876b182bfe7254dd94036e9fbe46f17d59dfe5d2f29992e00220217169f03e797f07aa990996da976ded85bc69e640db3aceb5ea28103e247d6b 30440220380b8a1b53cdadd2edc013f64f3b224519977f0d6a4a0ea2a31e71c5e539e9cb022061d33afa94460defee34be6b56b8368e9c0854fe2b06e924d307109992099c03 3045022100b3fdaa5f81052b12b0436ade7741a7cbadbe57ca7c56994a456c309376cc05f20220311245540d81557d87f0ab4432736115883f3eed6df1f0a20b38b99b06fb2957 30450221009337c9c9949b0948d52126d533cebce7b827f82f3f8bba9c9c6c4290d7736219022050c8166a17deaa7bc2b5dd27b376491451ae9bd636d33808e849d66a0377de9e 30450221008527b18a850eb6a99eb9275949500c1566022e1ad789aab87a53431cd8fae5dc02205350ca35798ca06ad61c50f4178973b7063784ed57983146b549d0194a06649a 3045022100c4db0aa8c4b89054715a0829a7219d739a9657a3d7e374e0318bf093ab5d4dab02206d2370e0961857d9e7f104818b67e1c602b6a5abb4b69aa3fcd807cb23a3fad3 3045022100fdebdb4ecca9f6726095f7b746197473dee609b749382f2e16ea9aa5e99f4d7c02207b77e35632b9939eab03611aad845203d1d9dd68c24074b4bb9ed8efba6adee9 3046022100f6b9655836d571604c28997e4fa5e909311d250d8be290d87b6910e27b4d9db40221009d68aad4510aed8181ef60a0527144abb86ee55148ee61e702818836fcf8f206 3045022100f3ccad6d75c03f4b83a6d08cbfe44804dd501e801fe1893011d91468bb1908c402205ff2bbb1d4eca2bf2286606283c3c7921b09d4568c7b1eb33d896589072e2aac 30440220313395e63a2d9b19678830d07fca5245bb2f5db6fc9b296cb48a3f31977c55380220196f95bbf1a74c8ba236d78c74984ae0165221ef5a620fa4d2cb3037f5eba1b3 304502210094d964108b3040683190cb601b51348c9575776dcb866a5f4308b2f892dc7ca202207cda06b88fc6e23cb2952fdf498edb6fb2fa62318cc465ed85cfd2f59cbb55c1 304402206750e9357eb793878201bf428ed70e8335d86e0477faa78c4e64a365cee7dc2b02206d960f7af2cec96e7d0ed80a150224120c2e0797021e441ef7d4ce93aa789f5d 3045022100cef765645f7fcc3ce40d961fbadb78e5475871d549969952ab8b6686525110ae02201083f8bc66722f654b00d93eea668911897ad79e67a40acff43fd8512f2cc81f 3044022063d2631934a5cec0c50309b2061acd40ba6edff3be65acffa7f41826d3c63ec5022014d5713a4e209aa3bec7601e7c38cc519f7a87e369dd13bc1186e79e36cbe1e3 3046022100b09ba2f6f72b52d920f6f9216b5b301c5be5fd01c35249ce5948df3fb3c6920f022100f43f344c1c3956c921e6b75fe68ebc4e3705eb64a716157dd0b579254eea317b 3045022100d46aa5795e153b63c5a88f40303bbb708f0b0bd21e43bf00f09b6152ac44b6510220671c92f74d21b6ae38ee4fd404bc350b13c8276ddd7d15b4b7151958162ea17d 304302202bf053310633533728bf8b7e8fc05efec293d5d1e98a9a355f252263f369d3a4021f2467b7a992a6f560ec805f7f037d5575408a54446bdfc4eb338646b34a2f99 304402204516700d4f233042ffa7c9f4a6e4c8c3dd7599d8e4e35a69f807f8981f55b2b00220247f05beb0b208752f6132b1b2f4deb866f90d86848e9732e1ec2b5da82efbb5 30450220168241c119417d3a63bc6b992d6b139f671f9bdd9d0e247eb2e56ebf8abdb663022100b7e93121f92b6ec0e2e426fdfdca41eeba05213285d05e771c3fe4368e371e9f 3046022100dee6dd16e6c0b82590a56863c61beddf1197f33364dacb77f93d4cc26c97f3f1022100e73143e5cdcb21dcdbfbce1a2ec47d1e60022e5fe32e3aae1b233c56303b5d06 3045022100ada0332ffa57011c6d5ba94c9fb00be6661f028d044fe0e36aa8ae0db841b9350220414332a663c3c3e244d47e34c7c3e23a734848bf5eb33ff6b1223b9982324aa0 3045022100c6fd0eb7157e7d60c864cdc63fc405ba6549476d532e5df4583d6a6af019687b02204623d565ac1989709ac0ced12f3650b6b1d409480d937cf40455090ec1c5980b 3045022100b7d7dc8b3b12990c3b71f0cc102c0608d2f3962c3b386ccaaf82f17d4ffccf0c02201faa8d7b1efc6e776aab50f4c2ef2f2ad950e7f9e4a599626f83ebb13caa2622 3046022100bd6deb6da14770db4727d90fbf98889443daf4d556f6a4dfee5883017d6cce97022100ff523fff5199b0f82f3bef4087f50b257b7331071bb072ae78eafb0087729042 30460221009366a70150a03075491e7ab75c91f703400df00ab483e870089edb2664189c7b0221008d3335b9c4f4199e37d87f2f637ad605b756c729a44054f00c286e60c5e48d1e 3044022017503c2ad6cb0d98eb3ef678c25a809d8cc09e98d16289d2523e002a1a0c2f2102204bb6251d7140363ce1600cf029a1c0ea9a6333c5271d85f86b6da441e78fed4d 3046022100f0cb934e3109a13fb778088d0f11abf7c1548b9610cf10c86e41276b42fbc5e90221008e23b7a9780ac28d72366b587437dcdc9556ebfb519afb07c40ecca66fe00dae 304402205d078b39f3247ae1f1a57e77432ff5f1ebef33e5869ce4c7f43f0bb8ab4334790220032d0eb1f0bddeea77eb553a653de18ae797506e9fa8d254197138c0cf68ece9 30460221009bea6bcd13419d043ffdd7b2942ccfd9bc2775ca37afcc4a1b34f2a56daa9d14022100ee2948dd86cbb47b15a3d32a008c9ca677452d5e678d3cbe5a1a4ed62514849f 30440220639533ef78c4d4d575fb7c6f256bdc526e89e6141822c832a1143f3e1144881902201398b45fac6c52aebed965f3c1e6be90daadc5ca079011e825a5459420eb5868 3046022100fa35cb4cc76b9b032a1590f89fa8fffa51c1798ac9f638ac7a3b619bd99c9f7b02210087e8b3477956233d118c7ac3d6a623706aa780b9b76a7e740a1ebdba18e0b5a0 3046022100f68537e3f7ce6062e9801bd5d3e72f8e46b920fe451ae84809fcabb607b74811022100b0e0ed42983e40840bc9a46404a34ffa00707dadffc768eb8d464778802a000d 30450220651f8ca4f1fe3d31161ce0e3d510e7030e24cf1676e2c3ce00e6008b5dd0377f02210093cd938f1b9b6c578603adcb7d4e749d05fa7315e4ba839bedb5bde8299bb382 304502203960439e4b033dad6b5d2a10fffbf16b797a77279537f33ad839079cd41638d7022100ce9c75c450543091f51e9489049811efb49f71f448c07e7cd944a92246a555d7 3045022100d0b11bc9ca1bad2c9195b30660b07d5023d497b33f19847b956f6176b360399902207d7580a6426119c730d32311a0a180ec233b232f6c3fc6fea51e4de2d4eac86e 3045022100cf960b4747ea1c8cd8ede7391ec0869fe6f2b1bb0d4fa49d5b2bf98d21ce36aa02201469a0d16b31222845b32ff53ab3c745a16fd00fe2267972ebe33bb1c8682018 30460221009119dac82bfa0ef5d506e4f7c16c8aa7b22d3a560336330d65af7640a873cf96022100c65ae9d9310d9920f012a1632747a1e961c304d70086c6016f1a839aaf89f596 30460221008c799d57cb663777d995900c156db08624e417c0a07fda9d100bbf7502867da8022100df9de21dc90dbfc1f472025cf752f20e7470a052084327577f4df1727976c6d2 304402203c192d179243064734e1e31a135bbabc7d1f757da700223f57b519a78fb6c15e02201148116b73cbe81b145464a84519fd0036732ab2c9cc4797b499772cfefb5e3b 3045022100906c9663f114cec88a0bfdeed10577f1b65a65efcee16ecd41d508a92daf7f1902205b3d3fafa2b7786ba7f8d330162425a853b71b0301a3eb1c82cab21f38b9aba5 3045022100c0d0954bfd987d42bac6ee71e94021a753958980e7662b4378051935b854118b02204a5aab5fb800dd9fc9fd99d472dc52613cbc5f267c718080672ded5ffc57cbf3 3045022100d0a882ca47f14d2907ce6383ad0a50978a7ee8f3ba5207b21fa0b43a1382539702202e9b0841ac73fdc35937a7c8503ed335b09fb4ad1325ca5994d85ab594228af2 304502206bb86e857ea51d1dcb310d083d8f00e77cf582df079693a19a6a245a18164837022100d3a8d23607486a592696e161ef38237e1ad85f0b6915b2f4b2f33ac485c8d612 3045022100ca0efc75f196cc4aedd104f9d94f4f0cf752b812485be52c1b9457df4fe8400702207b956226d1af6379818c9b37d973ede00cd2d864afa127aa4fd4befac7a5d7db 30440220479722acf9a2f5baaca7d732d9f3678cfe01c3aaa16005db9f95fbed1f330824022012ce3b4c5ca10ce80db58b8db991cee6b347cbdcc40aecb5b3a86a429ee21721 304502202f12050042489b28ccf0b21a6055b0133d87fb5e234db3fdcee6b8b65a4f5eb1022100ae1439153ccdda3566c5dfa5218e67e472fbdddbec673379a60988e51a854018 3045022100b93945a7392c64bbd9c6861bb06c0598b5f634285678998b61042c901d1f0f2302207e04e59a9433fdaff9e338cf841eaeaff6bb22785a494dd31fdaf5b40f0cf7e9 30460221009921a2bff6d5263cc6db84ce105877d7090d1969cfa17d06e48d96654ae0e35c022100a49d945b85c3ca69abb721138a07605cc254267103c75adbb1e1bb5539666614 304602210082148c64c69551c373f391f656c9210c83841e09ea64952d35146166459f9035022100e4405733cdd53d656bcbdde0e266efbbf5f8c9ce70cd517a9fe5cb4c606e9a6f 3046022100c03b4f93a12a7ec7c6f47f0a1d5a09468ff9dc4b4ba262570b1ddc14eadce977022100ce09f971f47e952ca7d3f82547dd1da0e9c2ff336cdf49d8f98e44a17bc55da1 304502203be335b54cfd117864a6e4d5b6f8a555d9f7d3e8868fa23204c780bebae585cf0221008e0a8792cc81f8a5bead6e66d02f0189031e4212a66481abb528b7f11eca2beb 30450220620869d2c1a469795b69f80fb6d6df4944cd995803cf8d1dd733f2fe573754c4022100b8c443061c91883b4b1b02ca3faa6645a6a890e6ab5732da718847886293ca44 3045022043a5ab4870f70b56fe9d1c09e8be6a65b67951ef599eca58f175f3da17e20e67022100adafef157789cf6ea3ceaf7b0b664f6609d3cc1f2cb650bfae3caf38960e27a0 3045022100ea4e2218ed106f903faa7742ee407106e8c648b83f1d78dd8aa8c4ec0ecadf1002201a43e682c81ac743945a7baa7acc17e80306d730492bead1cf39840ab923e494 3045022011127f1cc7e1abd3fde126ef00c80c1dc8a6062aad7dce8ef5d4b5c56928bbcf022100ff050f128a3760aeeebc5dad7677fbf19540582e02cf5bb4e09f62aa029aa0f1 3045022100dc32b7ed36adf565b98ad23aba7bf3d4429ee98950bb0b8ce2663779d3fd984802202092014ad891ae89ac2bb8534d2de92cddc6c82ea289e09240a680c1e3df891d 304402204e6f1ab6833c270f477cd5c37ff0668697843769f4e099911cada503c23a68e102203d29dd2b4647f8d852ceebf5afbf45f8cc98078f086075b78369256c0de2b0db 304602210094d330553a13e8887c10feb70ebf4c33ff04f83c76be3601e696de8fe67d916c022100923599e4dd8aae3fa3b5feb706ec683500bc21bff2241c52a502a6e39f03e5c8 304602210096817dccc72d0b32930a153ba4d001481e8f0d66a3faabbc3af248f5ddea700e022100b8ecca7e7b74f083cdba0dced2469b3671a083ee7691056c9798f60f632dc8a8 304502205b565ffe26a2fd5a5b2f4d81ce18d526981d1f5ce2a9e44cf979732614407482022100f3f7a75a4074629d4cfc19b981d2d9d681a059c2da25a371d6c7880e676a8ff8 3045022100ebf02daa8620ff8af150fe7da4877a4d72dfc5029b4c2941b7d92edd3b100689022027dace262c6a834c653b09e6af26c35b6fe3d4bb511b69767abc994e3b68196a 304502210091823ce661814eb3446a0b62d0dff4950f396826c602e5b47b9f388dc3ab9301022027c166a0f17cc76782fb7056a1aabc2879e78fdcb2e30961984a209f0d6a0697 304602210089c6e38988264b9aff1562be548f41c1918bab8c48ef721b109a64d45d807399022100f2b2e2affee6376de8cfc2d74250b37f985f3d2c480a6fa53a4b46bca9c53a4a 304502200b0ce89764770f4901909970f5e0c1c03c14082994bc5df345229413d5ddd7f302210083e5046c0b6b1b97664487b7251fa0f6a773412ccf6103c55437bfafba010db3 3045022100ea109ba8df8ddd373a24ab48c21112e524ee630a99333f184cc725721c17561f02202996b5768ca2fe633366680d2fca1b4e2c93f664881a5eeba90c98c1db564544 3045022100c1f1ff0e5251cbb7d58845a3965ddf0b8bec083996701fb2a0dd690097155a4d022046dc8a53f77d27370eb54824c11cb19f9a430a5e8d7ac1d21b68a3b06c12cab4 3046022100badb8b219c0f219f07a5723f671583083373260fcb2c1293ed1f8944105187e1022100c367087213af6d4988be098656224e8033b64330dd279ea4fbf9fc3400ca91c1 3045022100c2a72e8da0b9fa0757ffdbcb4d6de57b07563a23f49f71fad01df386299f02310220113aac31a69b32f64056c34647b73df83f1b711a1302debf140d14110f98fd90 304402204d03916e4a1e9dfd78ac641e757780c6ff56122e28fa422a0977db38653291eb0220158738ac3489af16b09b838fd2952642a3e400745feda29849c0a49fe8324dbd 3045022100b94eba405e8a4fa3016e6ef6407408348ee2fce48655a3c8204b2d9549c23d0502207716cbcf67eb57ac355d5afd3a77d7d5b2346f7e84a46176c6705ee3ffa59fe4 304502210080478d06ffd1fe5189fba7ad6da6fccd2008b8f787136688f53e72d829b60f7902204cd0b48a69fe8a743f0bdb1ad3bb6f722f94009453ca15045beba68315309b0e 304402206bb1e2b9ad0dd86ba52ba6d4a7113f9ff52368744e2f98456eaa076c1ef4daff022052ffc04dc9d0746c330203bbd8f014e59782fcc610238bbd43211dcc554ae1e8 304402204300df37a0eb0a1ee5e0bd486771723c06f18b2cf9cffe70612df30ac34c914f022034d49f6a331dd8cd852be494935d4940b4218919698d1f35232a1fb1d1f931d6 3046022100bd6bd12d14d4237db4d31168fd9ac5091e2b6f9f857189cf6c9951cd786bd76f022100d8bd25f02d1c85ec711f448056c92a5305fac77e4e327dbea3ec754f397c692e 3045022100b6cbf634dd35b3544311b5c17b977376f4bfb1deffa7bf15bedc2d7cc3cef1bd022039897d2199bb05447e66e96e0b02499da763c35dd1ca950fdb205c1bcacff59a 3046022100e82ff95655ce7669ca214288bfe53218083b7de9bd9ad0ec97cfe4a9c4cd6b420221009536a10e180155331ab661f01b3c5c96a149dd8d69f07ad57b2f5b2e5a46f09a 304402206a02750c901b23ff6f31309940e3a6b3148b4a28948082d17b8d1f913fd7766c02200b6a3085d6945b687ddebfe80b69efff29c619dfd5419c429880cac9f4837a65 3045022100df918f7655b36cfe8f0498477f69e3a5deeff84ab49f22c0df1e202eeef19cec02200698909c28bf9eaba86511edc4deeab2b228978cb134e93cb2d8e99af6e03dd1 3045022100e68d69d57b4e9c32d12879879954d4179cbab8f9c1d86cb473ca7add480c0cd702203e2cb268d63558c374e86cf874c9fa88860b1261456e9256aa2704e458c1cde5 30440220691f44de5c8444fbf14f99ec2610ba7b503d4ec022658b9bad33fbd39d3120d502206ff176724459cbd61b1a91fa52a2cd802949abacf7c0de59a142f73e7e8f2cf4 30450221008e38c8e2fcc7ce81c7119567e5d4b28a6783068167ba03bdacadbb1afed40d840220535c8f9a8823e7418bed1017652e6dab2cc6d2dd46614f7b43a95da1fcc714e1 304502210084b796ac7c402332b5b0fb9d32277956eba653f70ad94c128384ddd6ec4e9a2a022029fd286e09f7c15e5998ead341efa3c7ffade595d59ff3b3914427aa8632bae8 30450221009e246477fcea8df88628e8f36095ce19626b79229ee7ac8a11cbcb0be664a290022056671de7b6f02bcfdd29bd8a417bf98dfbeaab6a7b3dba50f3cf21a328603e55 3045022100801a2d40460f5c2c018927df8cc315a4371ace228fb537b91fcc168519343bd3022004d02983e152084f804583be3b629fb7e4d015b01f3d3150bc52d23b2d300301 3045022100ca2aa7daf8e72a0bdbef144572c45e2c416ce809bab4c3351285bebc0ae0618a022079b33cbaf99597eca4896d4b26505622afb48291063c99dc77eebaffb654e8a8 3044022016665791c3cbcdfcc6797918c6854e29fc04c87b2a3e01e9ecc3541d48b82ff3022070550f693a5b17d90eb0bbe8b81a06be357a340efbe3dcc96df3a79b86b84f8a 304402204d1f1d876ce328bc390e4676774057e78d526a75e92dd8bbd77911b2d606ce6002206bede91fbc6cb651e5e7905101d5011c13746a28a438cca79a635dc104bdf5a8 3046022100f505a4908940bf90165bb17a1dc7f124ce9640ee0b6322a5c3d6ec67df9c18ba022100a6561629e9fc04020acb8ad1a4aac6c1d1e4c67e18f47bd8014c3a6dadc05c36 3045022100cbcb87b0c96242344cabb91b17d1cc4b269d971af15f0a3a7aee25f1b123d9d0022012829960ef86674e3297cb6a4c436d9197d1481f78951534238e338ac4eeb8fc 3044022004643f22388c2663b6617a466ce21cc89e869a4f637182666e53c9d721a245200220719ba6ee11342b48bec45ab17eaa4ae32004d9d94c964a19b6fa03ffbe7bd372 3045022003e2a2aed186cd8571c6853bd248f7f9e71a380549e01123c0b1bfb7366193a7022100c572f95d7a61940aee758b416f31c22f8003cfea0edc1ca54baa5a6c927a13a0 3045022100b5d71c56f052db8a2aa109190f9157d9b491091140bcd7717af5652ac1d594bf02202ffcfd98fa7ed19a705e8a815d08cb6e1465520f7719491d9ebae776ed470991 3046022100bfc9c95ba7f550edaa0117187976a015d7a459e1aeb59825b68bf2b8c9a7ad78022100bee295e0cdd309be27a28e1152fc81faafe38fea9238229854cb5f21e764a6d8 3045022100b76a3e8177c86feece6bf2f4b261d52a971bac72f0d1edb199ef3d9e56d3f45e02204f91515d032619711f3c1714b73e6e3fbd62eacb8e43f293c2c46713109cc241 3045022100e019d7baac7b2b9b47e50aea5d4aad9e4bdc7915962ba904508e017271378e680220566f524d3d14790921702f17a19794f985cdf90ecd989b37ce0120b9a010e174 3045022100cb909e79e056d26804fef3b4ec801dc93b311fb327148596500c71a664cd30ef022039d46aa9ebd7464e8497d4263c958ebd0ea1153df60e66cd17caaa5801c4b253 304502210096081c906662e0d9fc49a4186ecea082980fef85e0232dffa578ba0252aa89ae022056838886c54b751319e0186a203a5b059ab4e05810a3b3c123d7d3515cf014b5 304502203ab6fcbf647ee9d182bc97b63c339c8de51c23d4ed5084af9e7844e07fe8cf9c022100e6cb362626fbb19f8fbecf554b96f770c36716c8ec7a7cf3ccab37bdc93b5106 3045022100a823acdaffc628f3e47fb103ac7655abc65928b583da5a8d936c5f7d93bcb4c202203a242a08a432f04843889ecda15f561882a1b49c8e07c1b54869042bb1be490c 30460221008662ba9b61fe630bf45eff92853e7fc926c4e4a5c27235a694ec48ca984e4200022100d5b2c47259e7f3255d9775ee9865506c92a4645d0b54e179bd29b7dcb1816706 3046022100e85d9e7a8490bb9291a6a4191ab9bf1969899b7738895fd7decb1e062e3b57d5022100de579e050fe440d6ec2ac188ab2ceb9e81cebe5c7e826cd634699c6d1167a73a 3046022100b91915f52c4fd46b4a071f7c168759f70ea33b84be1ac288ecd8d26970f3034e02210098733d95e4e7aaf0b507b8f0bb55425e714ead302be7c747a1b36fe205d53339 3045022100998dc58e622fe40158fd8a977722da4c5cdda741e5747f9de2f70ceb4063d49c022067b7e3079015be4d246ae10f78ca1b622d92c086223831c8b5bc34f6e241999a 30440220720a663fe57354280bda5e92f464dc15284862ff73f297acec11e8be18f50b5702204cde755e2c37d45d9c171dc2474a55c5cf573636d04350fc86703baf6e163960 3044021f66c50a20d4d6d9acaa8d73af244058efa65bf62aad5054838e238b5805f6de022100cc219ed6d96669119fca3ae2841c33102794a0bb6aab687439a1d52127e6c7c9 30450221008fdb57a89b776d2d28fd5df05af2872d75914714c61137d7f10da986bcff4c4d0220034b7fabeb7150d9c1712761bd3bdb8f9e40a1e0558850d98691f1d8afe238f6 30440220108bf5fd7aee9cce2f7e9f4d0f10fee612f026fce454484e1bdcc30c46e5143c022074e33bffe0145221f2225637d25dd6383c9fde827bc5b0986988d0e23a1a0aa0 304502205a636d822f1d5d929c2f61e05c04232acdb4b91064dbb469a17e2e55c35efc2c022100a57431b00b8f7ccd9adb31e6eea41d24d19e246884fac85f77be5a2dbe96a597 304402206af16f148fab1697b20745dad3243be611c825cd9ae0217a9cc636d024ccbdf502206752b1be572d349934c3d688fc7526e076b69122d1de3bdf1e79612a27d258e7 3045022100cc082bfff255a1be914dd2dd2e973814bfb59e37e50a202c6d8d4bcb6935028902202185f90b2628982bb1e7223839c0f2583b9e07745aafd1151c7ab62ab2286212 30440220458f3a78ad1033a7a3bc2d3459d892742707e9b4ca30f1cdbeb5cdfebd9f992402202d47a499418057d39ddf5fc4a3966a3a2e51ea13ba8ceb77abb3bafac3ad013d 30460221008dc69d27caaaed9efd59fbf442939dd7cb9615de65d2345c1bf4db1c3598849a022100b7a9ddfc6cd1f15ef6d5d96d2dce34da1e6ac901421b994e1c0c946dfeab980b 304502200438a85bb6eff80719dce749d869f7a76ff2893142b06291044c218dc48de9d2022100bdc697d9600ff1d3ad7f82b2a71821dfaec5353d846396e5f51172ed9238a022 30440220046952f42379b85db83a10b7e123ea5c6504ae1378dfbfebd533a488940be71e022028e165e5d16a374ef30d4a226f9dd542a9d78dab8c932f07f1bde389e3c358c5 3045022042a12b1d41c732e23702d11fc9bfc079dd173667a13acd6d08837323c3a35e63022100a9029e8a87f8685b138336c0a53f38d6caaa100e355550545a48b5f6a1041bc2 3046022100b7816585f91f2b335b471f2e8c87d4419aa21fe808cdc2ad008801e04fdabe37022100ff6227bdc14aeef2d2d0dfff4eed6687764d911561ac9354e547c616e59f6ee2 3045022100faed5af92ae9a5e1955e942131d71bea0b92cab4d30def19b565bb160bb1b2d702203af557b150c81567aa49172e5c0156745aef0c579c29c520d8bea135f519d8b4 3046022100c42778943aa41f9f902e3be592caedf340921634bec20daf8ecc92e0566354f0022100c57f1eb996673a3345613a3e01640cea7f45372c57635205e2d2dbc6023d0f21 3045022071b5f1f826da7dddc43828a8b0c0e46c1b28a37b76c675c0ca1452a9c7151357022100b7d7c64b358984a19b08dc806433148ca234b5d917ed40586abee39384198466 3044022043c9e4b0e867da162bb06834dfcf27dbf68300692957b7ec29ffb3109cfa2aed02202d438da291492db32532f81bf10c059c09ebb5410e0cda247886c10874e1983c 3045022100ee48e66154fb555e102ac83fbb5d27ae6d8a1ab743adb6b2c11e4d6822b6104702203f714ae8c23f6f60e85e31e3cf89113514ca1b824fdea937d43313b9272d0fee 304502207406d689d20e8a2890ecdbe49fe191a63d8a552fea486b5c2ae966ec045f2aa1022100aefd555c55c93ba703b398c4a135c757b00abbce3d2accb716ffa0ed1609bc35 3044022043ca63cb59c124edda9504f84b081833297c19b198cde90c60958a88954ec5ca02205cc152524199aee6fe501e6b5505f4d23470cc93a3430d60db126881b74770c6 30460221009e2fcd444c66e3e6ea16d0c540371c59cefe5c8ee81792c18bc003afe18a6e9e02210086da08a0cad895f0f7ae5e3ee7dfb4fed185032095b92cff195a0e2e921ec1f1 3044022052fbb0525c20449cd065f12c9907775f168a79840d94dc6d5be4521e2694a1df022057480bcc3b0270775918f76217271e9f85e6d56952f146497cb9b927e7c1b38a 304402206aefd0b65e9c0110394c0f46d3e89c86c0f31a14eeeba8c867ef7485de7cc41602203d161132b5f7cb57b1708acd825b86a9dfe86161946ff4fec1492f59f6defda7 3046022100c37593a10ebdffb2b400b272d43f3409866689d52127b515f19d5e56df2fdb06022100fde223030fb1afa23cc8cbc2bb6469f5ec83098af40f64e751806d2ed8027ed6 3045022100b60aafb771bdfa80e2bc8c6e79bd697c84a51c1db572efa090cbdcd9f92311a002201330c400c63d367a34c72793517a904b9f0a0b54f20b5cd50330d2412cc751c8 304402200750ee5356dafe414311da7379d03e8393c813d1b9c0dda79e84d146d28389cf02201a1ccd5c3eacccf45edce47352de0435d7708076f9663f1bc264cfdd7da7cfd0 304402206bbc285188b6d7c223743fe703e5b2bba95fe0f814d22e23dfcfbd032566b36a02202cc1f30226ed27ddc44b271d0a641de0b2b3750a1f67dd3e191660b1bed1eb13 3045022100d8a7f0c7cbe1425b38aa6029d6e5d655671cbd86649ba90ed967244884f5124c02205452e16bb5792062812bcd19e5c4d0692dae0953a2df884a82858975666e093a 30450220510e7fa9638108c7f67552a703d5ad5316e32bace69ae59af2f8d3eceae1aba3022100ffacc4c8af7cd781c7f562e75c72a01f47d778b836f3cb4c6335804e34d3aea2 304402207c49c354bd78a8df11ebe722bc567e66a784282b8327610cb599a499e7a2a71202203983ec5b4752dd81915799da898bf82e54c7b77d0b75128f06b4bc381b4ebe56 304402203edcc170757214fb3b1ce60d80d028ce8e8de0fe2e115ecd0c3538afd466a6600220261056917e1c25b11fe640c021e6774e299dd051789e10a4b11d0953d815ac9d 3045022100a214649db611ede718b99a73df0051a1520d4aa6f147166389b0b16803becf9902204715dbf4c8bc14bae7a35ddb39b0ed988129211bd354e37d14fa60ed275b25b7 3045022100abd04875d69b9866b8fcce8842205666a1b3f5b4e0772d921486acf9fa18909b022056ad8a52bcdaf0c6f17c93c7b87494cc096cf32114244db4219c5bf7cadebe34 3046022100ded75cbc386ffc74dee11cbd16089db9875303dd945f4782e5b590134bbc2fa7022100b92393d115b7499c9cccd2a6d7e2674791eb8858ccc09cb10962da02c3cbaaf1 3046022100f825b325cf475e7df63f67e39afa0cfd5d5fe81beb969da3da74f232072f5273022100fb0b1083b66d2239d4f5c7d60adc20f66c908ca8632ec3b39fe9617c1b97ad2b 3046022100f960aa1bb0db8c840ff5a3f0cc7756ff84c1ef04d69e527ec8763451d172cf380221008c878e0874af2453b3a8bffedca71a5b53c111213472cdb9bcc5ae14a4fc95ac 304402202d25260dd68d7bd748448e78a776cf6e873a2f6daadc53d8940e7ea7dbc4029b0220710ba7af44cef9f6f7341a4d32e59deea9150edeb88c9fd9fdd735b883c182a8 3045022100e035a457e035c101ce6aae4c40d1d8db670f8571c1e6b31627b100f5bc5e92fb02200c55d7ff86fcd5453eadaf0f0dc0994016966dc0700ea3b22948e4ec4c5b2570 3045022100e70f902ca1952784a098557cae4c246ecee1b4bcae4cf7f194507984eb48793702202d760dfdb942a9044736fb280f652410cdbdb2426e59eb4e863951ea0bb50763 304502204fca384ad93ec4ca1e4b8543805f7e96a8879e932f1ec491e0b47daba8dcc2f6022100caaef002265df25db740b013e225bb265be9d8169ac36e46c0e6cc1e5aba2dbb 30450220439797d86793c4afd6187ed23be65ebda2291ee54e836510440c35c1382531d1022100ebcc5e550414291546429056f70e3104d61704e3bf64564aeb3be995ac68014b 304402202ec12d61ba62f946b8a10ad7dd809f822e7f1580816edc477e18c3bf5784cca002200f49cd62f94054ed74ce6c2ab4ff2cea1d74b0beba2f9afefd8e6e6b56dca893 3045022042a76626f3c54c330f4cd437a78b59d6598e618f60e342253b08c3e74083086c022100911ac3bda9a8fd37a960e18ad6628e130b7f11f14d5b9e89063dfd5c572d7f3c 3045022038cf4ee05f6a8674242c89a9d54aca3a3d69b88fc1d631c7491fd090d5271b91022100af9223ff8abf0400b80a24d08a452704dc9dc9d13cc73a6ade00cea95e70f7e3 3046022100a508c22137ae18b66feef2e8e59f0351e7582dfe69bf5d29494323c8f5c3b43e022100e44534d0d838bdcc59486ff19ee220d9f99a44fcf94ac2cb3b8d7de3dd4bdf77 30450220026f9a3da2148b0733ab87ff86877765d9d9433a83423876d64e8d32aeac9432022100a776254b4e9e43b1a61ef5dcc4001d3d76ec0c2dbabc8c1ae4abd2211f47161a 3046022100f5b1d71f35d726f865b35c3a02e446cd8883a4a6e58fdf57e0af53b8398bdcc4022100b8326ae65acc54c137373e9e357eb7eafd0c1b9f66b15a9b688b0345fbb77c44 3045022100db84a41746af2837f92204e25993ab7c6e84736dc51db40f6d12e96a9ee64e14022010aa84ca4fc72cd7b1fa5739e79a8b35f61e5cafeb61e41632b8a78603f58e13 30440220406729fc767ebc18ae18d8d6e02024461cb691c3d52f9970eed495a3a6105dc0022035aa8acd569121a45bd9e685ac37fdf7959f988ad7c9b9547cb8af8d9d6f4044 304502203611af2565bd948346360e5a4cb5803dadb130dc27296fa5d2243e351c4c846f022100d316a8441c5a33f22dfecea7bb64f3ff2875123118a2973b383935f54a7d6ed1 3045022073639506b995510504e87ff0559a7deb71472ba430e5ad08c4e55909c3b6af0c0221008396c33c84770745f0d342f5db145e722e33f4fe80de3485730a44d56bb18e80 304502203de974a46191fe82073aca3647c947aae629a5e3e514c94e60a56f73bd1f14910221009bec1aafd7edcd00d6fc9e868907879f6d2db84076d8c7777e1d7e0a45974db5 3044022048694732b7854191a4eb0dc423177bc1c9b7b511af447d7f3b524eec7f75ddec022072ecc5af7a688f6c4d9f8ed27049dc7ecb0ab4972c0227f4bd51079a556a1732 3045022100ae855fac737617fb59ca54bfeb42386c4d5459198ff069dc22dfde1eaac619c702204685bf1f12d1db018cf8c8c987bdfbea7d8f50a9439b3a0ac19e374c7da09d46 304402204e867c6e3adea967e6bd18067d044140abd695c5e7b13f79e55c26ad937558c30220487822d041d0ab0760bd6a05b9dc0a7d17eecf3f3d0ed57d96e214b5050dcb03 3045022072443a763b567ba0e204cc958d911ea9298cfeca7969c53a877adc7d25165224022100941e53b720870d0718a565d72bb65d58e7a5d7c50a30de5bb16202dbff776d5b 3044022079d4082dcdbe10ffb809037a826b8c1c694459126cd20a52b5b16ee3ab682b85022031931794b5ca61c655cfef32e2fba19718f014fa6a3db4257511a602bc7aeb88 3044022100d70a9b4a39e58007aec586a98cb691bf29c21ce5779bc3084cff8ea1a5654123021f21d4f5b0c508f81e6c8f8b71d8b8dc31ebe85a9dc3bc63beeeb01493d503d5 304602210097fcf5a66ba5b8e63544112c5c5105f8b7217bd0e6616ca6e8dc0968a6d01162022100c601a084681de55a87e91c39b3786576495a205022e079c6d21ab1bd7a528c29 3046022100ee2f1596c9be6b53d0e2bc3a246ee4a28b9471bf130f12dcb90501cb5546072a022100828fd9e269f8f89103a71a371b84d115177a4f09f19d990b43b2def552f95010 3045022100e1217143432c57a404fd978f451ae0bed48f1ff32ba8b266ccb3ea147399064202200c8f09aea00b19d56d814f26003fadf85fccdde16f9f0862d04105a0b9aa288c 3045022100c3d404a118f34eefccdbaa69c0d93c13e1408a7a26050942ca341cfb2e58793702202e31bba8c033f25d6af7defd3867612e3ea99e283528e5a6e36a26a5415f5313 3045022100ecb583bd1a383f65b473a6aa208a95bda4cfaf39b24a7cfbe15919817c0598560220536b3e146e93a766e8a7117added4189e17858ea6ae777912f59ad41cd39492a 3045022059721fc530f6c5f2e174e4b9d8efea2ca740d37eaee7b2d8de700314365a6080022100e1c73ef90a7bd6db245a229d483b429516e896be06c35dca9e1704a33c1f9ed1 3045022071d365f9cd09a4edb24ec31a44af2731cbb7d505c4cff4659c28fc192baa9b07022100815bae0ea9b3b0e20248dab7af2ac9b80c52d2a5ffdc9c1524e6b049649d2485 30450220790616911e3b79b9c2e6bf7dcf6db286be50d5112ffd3b91cb125d11f12f50bd022100f3a9bee0439913fdf07e3ac6c59c4b36b39e6d98d5eff8334bb93a1bf13788a5 30440220016ba8cb0167b3ae7124488ad102fbfc94f966cdd67c3d5daca97fd22ce7bb000220431984dc12120fe918191e79206cc08c03b3a081e97557f7c41cabe8c1da1f47 3045022050ce593bf01e76b04cf1042c7a2041c75f5f68adb61812b9903026573175bc85022100eb1546a4aaa3133b2808dfd224326022ccf3be5c0eaaad8d94cc832af70257b9 3045022004f7d113ee0d19ebcd34fe248d6f5389dfa3d50e2ef6e43c90bd3df5bef0e9b7022100ad7fd78c5f363ee1777713b6eaecbd45350a74afea5157a61e8ad0cc8a9b2d2c 3045022070f4e4cc112ff71c17843619536321ca5ef82aca4f170773a053b696538f36ee022100dc1ec29def6c9af637f1466a01e4f766cc6f2152b296a7deb19725a44d2de6e5 30450220500ce572f225b51f0ca5fc23865d53bc72a3ffd2d512bb9c02082cbbdf5074ec022100e3e71b152be1b627d3fa3ee057808810e7ba21f46ffac680ceac48f45aae56a2 304402204732b0942c110eedec494527b3443e7ff23966138d3e4896c679489bc4a8b53b022075985d3400f5f34099b126bb8952fed3f21d2106f8c349ff43f7abd2f321d713 30450221008a2c9ef7878ee799e8c971fd94b778fb5052ab4f208837cae2953ae7bee2d3bf022025b06435d3c80ffc839bd4cc7c104cdf5effa8eaaa17ca5eea91233f97872fc6 3045022036cefc9cb2c7f6abfb627f1b7a0af9a72cad10922a126c1dfa5ac40be78bd0c40221009cb39fce8b04668ea0e6e1e1a161bd58eb2df071e5276b5e380c74fde9bc3f9f 3046022100e8e9693d6ec5f0e06b4a316d45ed24a94c9a17182ba01d4cbb517ca7eea22385022100e4951efbcb11fa8d9da60012dbe6ddadee64f2381b3b7d61527ed8a747e5dcad 3046022100e03060a0941790abe84a9534cfc953e8605f8986335e91bd3373e986645aa248022100b5010ddb84a7bfd67e429a446ea2765e39aa77d1a1a865b8aa4a5f2e1697f4e6 304502210097b269d73443b91362076058c48255506a83d8998fc22f8a67bc3484c156621b02204ca88caa1bd473f18e5a0bbafbb88f29363267f245196490173c00eefdad7d74 3045022100ea0a54ca1c99e2db5cb89650d7d003299d3a7da1da4bce6391ec4f6588fc087a022063ccc1bc9965df82dd25484774a0f00fd713b420858f35be760fdad5bef6acdd 3045022100a3d73c4d0029752873561b478dcb5dfd512a1bfee69a8bc5570b5f68a9164f8802202446a2810d7d4f37ea0ea88c76286654d6c3adbdbc45918ae16b97e2e6dadaeb 3045022100f84b4d1eec9f57f57bf0bac73141b64586016fe59128c19f104cd39b1794971002204798fca0fec3d7584f41518a1fa9c540bc15796a0add8e70633fd549c54c9c85 3044022031814fea5c15f0a67f6afcf0c9c5aa4ac8ccd5e5a4bbadc4b53fd2214db7396f02206f71335230726ac54df668a488c3a83a334c37e6bb6a834c5b74fa94aa981b6c 3045022100f9a4480bb7e64d5ce29e781c1f9a6b23356cc6a2e3b9c0040e5cbacb4c2a3bad02204ca45a47d982407da11092245cbbfe827043b11edaf1aa93e33a301713ee30d9 3045022100b1584bd9f2f7f1f5784e32eece885974cba80e1c5b98fc7e575adb9203a344ee02202ebced23a2dd1f0a223a5c6ec74e3fe150bad50569af4801dcf44edb12adfc18 3045022100d6887692d3e00eaff0b165e82fad1c6334bc6c2725bc8f669338aa5b9192ad8d02200ba0557aa630e42bf1ee8f481a798ea11ed3ae024637a0825c151c53ebb2fc9e 3046022100db15e48c36cd043065402c9fc26ace82f8675f12ccc51f03b862dc97d2999331022100ecac46bb1d92929481e7477b68cb04a5d66306fd8998712a469f3e424e7aa163 30450220192c0d8d8e2e133009796fd7efa0a46f1057999817ae83178e57496081b22d74022100d783e3a26eb488b5b0d3453794918d5ea19d5e425bff9864eaedc09bbd30d95d 3046022100b8324ec03e613e0d8e71d5f9d1513fbe77db320debe9702dab10f3b05944ae80022100847a117114cb9fcfde57cdfa285005417ba71a69461d53c2659c52b2c6cde414 30450221009096574275f3aa9e0ab38a6a3f1d0aab4ffa6f492be1992e1f05680a8c729e3202206a0b31ab50fa4210076437a9ad25a77cf73be08178eb47025c0ccc4c4c5537af 3046022100c2f88856f0b68c4629301d20eb9a79feec374685ca4783b859f7ab69770e07540221008ddc59703dc4c007029ae644317bb57ee6ba6e421946e1ed5957ee8b368b18d2 3046022100c9b94194e658138cf8952b21286f9b7bb733ce7aef9a0007514709ae66e2a7c2022100f82b8b73311f1dc08cbc212fbe9738513b14854c3c2818b67d9842e1e729dab9 3046022100cac73db2b3af70d34f1aeeb25f5af3e89c0aa332cea8a27bf6f503b0f65e80580221009b50bf4a147f0950aeaf94d549313db037b20f453182df63d632096daae7bb2e 304402200eefc384331565a22c7ff9256bd50da2321110bac726474c0cefa9ef4ccee99c02203c2a171d28cc3af46c94eb3a5ea5cf4fcd4e30907cbd2531df2d2a975e10d5f3 30450220024e00a3634fbcb7e98fc637b186931177bdd5929d09c88c259628c8f6cdf7b1022100a98c765eb6a0c4bcef4903d2b4835244ec6453274d0a34ed999bce2fe696f5f7 3046022100a7b79cc0154b8b75ec74ae77eb07a7dec27680db0772b13edae07b5e8d2ac51d022100c300be8f3fccd48d09ea957a7e25080c3886183d796ac55b6ef2f302ed89125c 3045022100f27d80294bdf3219a369b550fdeed7d4e5c06a47bf59da058851920c6197c2ac0220297b1ac15acb2da1c90ba89d173112cf16f196cd52123e02170223f248b48c27 304602210088c5a6ee2d0ec7ad14ef5f6f30ec5e569d72f8cc74eb322d5902f5f870f07226022100c257a3f79384adc916d79f5716ba94f4618132e2747efada14806e080596a17f 304502205df52c208994ec2e76e8ad2b99e1dd8166ea66fb09602da6aa9b16fb43274332022100c7ffb2e2b27b9666ecdf4eb6ce4633883ac6179eafc744b0a1945b0805965c43 304502210082f60a21dfd50ed6587451e88726bab1e273aea0f908dd584e870a4ecc76e6a102207b51ce897910e128ebd53847ea62956b29c3f2dd70e2dadb636b24b42b543ece 3046022100a64f2725d12106790138d762fa096f24722cd135df415dc52001e49beee822ea022100ba25f8a2eba6a433b435bc2e31869229bd4c5a71ec2229e4c7f545f3f0a09918 304502201dbd40db0d6afeca466a4429b8cea06f26bf418ca5290e4236b994fd5514f5c90221008aaf8c2b8430bdaa5a8b5ee3f17c67f6ab5cf248b61aa7c1dd4ac7030815a98a 3045022100e14e2f1dac49b17ee30958a7887f3936127a47b65da5f7208779d8679c26135b02206d8f131c86458a2643859e5a7df8ac7eba68b9e3b1e7a09b0f8f05231dd45d8a 30450221008432401337de25a74f43f7d46482ae3c2117299d27547b5a2917ca3b664011d802205baf201cc2e78d5eaef5a123261b9dda56f00c1be0e6b817b285da08c0539d51 3046022100d3881fc75b58ff43acfcc0e423755ec0d717140fe937ce4bd76f3f11c79c713f022100a39d2bb19d18970ea50b1efc17245a977784980ed45f542903cdab1d6a967af7 3044022062ffc14a8923ba861aa9c5c69fa1dd1dcd131e5f2abfd6e30ab71ab778a7236d022073e778bef37b8252bf9cb7125bcc4ef9b0359abe9d29a557dbe30ea220337a41 3046022100d19b293298913fe4d4f85041307a93eb299d88f574842750ac0b3d440c84c7b3022100f01c23241ef75bd7ca0ad1bb9a6c989d4954ac86292b097a7138672e6954455b 3046022100af4836021749a6e3e74b1d686cba3466c38e543276e7e8024766094e040a3296022100c8d343c6f32fce7b77d7db0f3d733671de6abc1b7a423ea68bdf552980107f46 3045022039c9cc079414209a8f85b842b44edbf02424646ffe9e98a706aa6fb4039a3018022100eb92963e506bfaa0bf763f3ca76b5c7ee2b41cb2824cb313ff4b1c1a54a11947 3045022066974fd485f29d80eef468a141d48d80aaffe15b7de790ba136ba5103c3ed2f60221008e6330c89d4897f77af2df190cb1bdeb4bc08d1ee34ccba59dcf1a804fdd36f0 3044022000e58f15bada8feb9bd74f538259a61c9229d9b3a71a1b9b48db5ad233878043022021924014ec300993e8370a5677d0f236fc5dfaa7e146bb9c599cabd2b67ba525 30440220497d1581645400ff4f9b91e431d0f80df32c2c97943cd303be928a25e860c47d02203f4e3504eb8684b6cdc5cf309b2fc3fbd593f14e257a279fa61ca88174b305e7 30450220064fdf92c89b2e7e591be1f9ae01050a2366506b062677601dd17bc328a25018022100ffcea9ccd22c1168aef3af8e252888cba5b2ba50c2f8950244c0bde879e8d05e 30460221009e2ed87d60a49ba17722fdf56b3f52865aaf214393278bfe21e0d065cd5a0ba4022100a3c774b7d8864f79e7541b9bbfd4f696c76fa80f245ed0701a244536e850905f 3046022100c8763a00a550c708dfab8a12e522748938847b26047735738ba3ad3a5ba9e59c02210083391e6f119fd1d7a4162149b5193116967d70957b8e78541f1c8178a01a08ef 3046022100e5a1531e205c41e84ca5c1c7d9bd151b65255e314a21bd504af79790652bb380022100f6e1d5285467118baa612a194c2b2a4ba34f2c129aff86f49a819b637748b9a5 304502201fe1c42074b201eb8ce23069c6050ab5126ff41957cdceaeb29c97e9e4b7d8b0022100f8cfb85e396a510358ea910d631a131ee9145bcc6519fbd5300f6ec94fd76870 3046022100f5c28948f2a471c1446b1b227bff794d23b8359cc5d2666dc6f9dbb173dacff6022100801aeff4dd5cd6bd4d5796e9c6ed083548eb29880ec214ad284127e147d65a33 3045022100fd1e456886a3f1e1184844bdf0261c447ed81040e732a509869b461c4c2843fb022033cb10cd4503790e6f972e9c620ba8bc56056d13ece8952b207d1eb2ffb63960 3046022100f4781362848f4759b2fea2980806d5c5a33cc2ea9076334ee445fb3c86d68649022100930abd5f3b2ed2610abb409ecf837ab1c31dc6cde4a3e4e3dc468396ddeedbf7 3046022100f1a9d254dafb3f7c5803f150c562c977e50094be03bda8e3c1899e7d1a3fc6cf022100f33e46a392948e100fa9ca60bc623810383c9dc66f670af0a698e4137eca5513 3045022100e6c188c3a427c02bf39d0ea3108d0ae14d6c9a323e20f310e72776fc366ca59302204d612e4f571c5685e17a33cec1b98714473c07769136251dbff1d202b6309768 304402204dd9e959abb7bf8589f1e640592bff128123f4ac1720d12a10ea9a64cadc894102203c8205ebddaf6feac7566d738990e59048fa7e5ccd4b90d2508e0d5387b1a1fa 3044022021d04e48a9d04ad7b6fdb2e7df11a691160244a00233618bb086bc5b4ff4de600220714164e8e0444eb77e83bb3b095517212d8eea6f818a998c460ba9417c99da2d 304402202d905dea54dfc65647871dda0702b8f5eab5b7b367a66700e64729aef234d71e022079f0765d630d15f417f8ec268fc88e2087a081106b226bdd1339760aa336c236 3045022100e7d40363b8e543e95cd111a0173a23291524742c86984495f58f5521f4282da50220183d8648b4d10aebe9aa1e1b4c1745928aad87a0adfdea1b8623d2fa5e7ec4ce 3046022100fe8c9fde173f854c74ec51bfd0ce6389de13b5ddf962d92257b5d42b5ec1070c022100950e9e3419c8ef75f8185a2332bf7f4fdc6b2e140476f82dd9cf94fb0522334a 304402201ab07299d25521ad0ae91c7173cad4368fdcad1915b462fdfc10af63f2fd6753022024792c2f1df719c79b2bb2277df7d207c3ca5e294244102aec06650acfe1b35b 304502204bec3b09eae3908f6e93292aada006b7658bcf21fd59b3150702fbf50e6cb8a1022100bd66a1db387d11b82af35422ff79eaa4b63fc682d28b7d163c7838e5a64e2afb 30440220570067cb2f772993eff24432c104a2667b508a5aa61bc05843b78f39293f1f02022034d12a5509448446ff79cebd2f1384f6eced11eba3f4838091327b4d83862f3b 3046022100ccf2d5aa105783712a4af8fb8362df9f7b04e5b96d574f49ea8d903c192a9d7f022100916daf8a0142167be4e86aa5f401f40db04c5045273fc22bfda51d814c602be7 3045022100fc57b830fe4c002bd01c69b07d9ad6e151f102fc0005e27f581c9d3462dc873d022061741055ab811652ee40dc74c95e62323b1033983889d6f65ab169b8c97bcae9 3044022028c11d9b4a4ab034967d861de4aba12a342f6bca5346230deeb7cc9705f5a02e02200479e32e182fcbeab8ef8b9360189b474691664052d5da4d68b7f866ffc18c93 304502205d68b6a0f8b90f2c6c699802d044a41d59d1c244029cee01bc0f76ec2e9b335902210082bbffc8bace262b3b3bf95e5ac48f35c4dd8df07f412c50335105b60ac029e9 304402207694d7f9787c94700cd30ea0fbcf587d2033904716cde1bb5c61e11fbccd30aa0220117e9f965dda579ae25b83b8ff4e5984cbeb00cb53b52f65c507453461f55262 304602210087199b53732667528012d88132d3e37ee8adb0b8cbf73bf0742416d889a76e880221009db183881d2e892e574f497ee63db1cb606ef4266705088dea19df42beee8ff5 304402202962d087c464d386faac53347199841a23f495adffb08740fcad79636059a80502202cc7a96efc89dea41ddb7651953d1daf7ef5565ade411455213f9aaa8c8ba37d 3046022100f8f32d334ec5a6854832c3fb7a9aa5b60a607143394ab8c2b64b1daf0fdfa20c022100aefb3ccaec1d4fabead1e3e5a8bb1a7d564f530385686d930133b371065aff21 304502205f683170528d164b24b4d5038a669f2d3f8fd921023e813083760427618ed4810221008b06f60ef41af9c11e24ad7851d222ab5d6ffeec7978991b5202e5e5095814f3 3045022100e5dbfc820c6092d2b16c132e7487c7a6bb1072b313f659e5287af4d434cfd4d20220771f64b3b024faaa4a4b38f4b3951f95a40362ab07308bf4802d9519a6052319 304502206de680a0c007d949c89c9dc523848b5adf6fcbc8927b88a6b25c4088150b99c6022100d2321de19ce7c18c6097f6c5541981b16879f71eda44c74173bfe85847a67308 30450220783ac2b9291e6d42416e47aa075dbc45b4a749f0cb15f9b7b000e36ab172c030022100b27160f5cdb25fe6260741f792c1488e5c57415852060bf6fae2f619f1b8d466 304402200762433b528e15cc08b555a68a80b6234bbe2e1be51c64f1f92a3245e169dfa3022058033256ef20ac26ac1e043c626cfe20f30993e484c55d6236cf42702b9493e6 3044022050beb72d2211177b3b0ae6b00d6ac8c546c6a1efea634d62100bae4d7e706188022067e2b383363f38c5f5e2bd50ae6696899f61a5a60439f51c4626e8ea965cb58b 3045022073baebb15e64ccb30d8c0ebff27ef3ac088e5edeea08154905dd8d0d8afeb0a7022100e77f1dc9502d0436b55acf322d0b559350173481a046e7510d60f235a9e9bd0d 3046022100949e752c1bf2b310f097d3d1caa55347c9ccdbb99b28d163ac03523972cf1d19022100e69ab14028830b1a5c9d6a890f73bdc1755c40536b0084586f426b4112f4bee1 3045022100b3492ae4435234b65299bdc278635400cfc8dbd1a0acb726d5cdc6442236b7260220173bb9e75e2cc64ba852c6971d3c9dc5313cfbde621c515628fdc3954fdcdd0b 304502207a0aac6c61bc252c6e6d69b5aa88ac206c15db63597d25977937e2eecd7b7728022100ef873a9d4e38facf7a1d962ea485070b0a3e0ccebd9581e4cf5beaa50e6c7c23 30450221009c6432b83173952328ca037e2535056cb355217dafa8f97aef485a2cb9b47bb5022035869538346356844c1851223f8ce27c6daa04ece7ee780beb598f3ed677424f 3045022100eb04f0af99f9a91d220b7a65e2f82095e3a00ba1dd3b841ae3f8e1d80df2b91d0220026e428e193a57c4f305936873a90d891cf165f8514cadfd92b295a76852ecae 3045022100d5d57ea6708f4ee80e5484a1b9c20770f54fe7c449c7d8e05fcb905b9272c58f02203bff8abb2c91ff0c786cf9b9366855bf9009d96001aac4b074df8b8ea77c242f 3045022100c2d2307e3b98e8078c2c90c510cac24f07cfd69f7305971aa68c84aaf3b1ee650220390d635ae9e141f707f8e57b8e3d48c3bb4a3989017aa006fc42299486f391b9 3046022100999ea2c82af5ad71e519c485255f167ef42b68d66cefa98c97665f220ec805e6022100a419c4519275fcaf9dfc983edff1c2c28b6ac668fe44d3934fa20327487f4c42 304402205c34a807c43879f5b35be0d13c0ba8df1f881ef0a9c91af98e491a03d85ea6450220713ebded3e02fb399cf0ffab17117932b4fc29a0fb7dc9adb45ff25c744a48f8 30440220563e891912018da9004489932ce9c2a092291ddcb24018f1c0e75e4dc69c8524022047792b7bcc6770ceb8f4ec6193b541b3fa9acbee8e7c5a7fb438b4b72c9b4aca 3046022100be68e3f05e3d701a5046bb608659cfc2cd45c821657c01b75c7b74a36e448d58022100f67ac9cd649e0cf77af6643fbfaf5f8254af1c214230d4edf581a0bbcd9d3325 3045022019c8a23e7972d62c55b3cd8fb892622f1148e79dee5828ca64f9565bd2b7a87c022100c6985abcdf8b1f8b7073241bfb381a81cd500ac194f028e84d023050628dd569 30440220174e77c04c1c2219c933742d50bea2391e43231289205cacf13d1f3fdd5bb59c02200dcdc8d97e9a1f91f5910d5395b1cc157fab32c47b13510f7aa38da103b1cf4b 304402204bf706b7ecd0f960219b056dbd9501c565d073222ea3c0b7eb5f7cf9018f321202207040f91e0bbed26abbc058fce4322511bc9d980332e0ab987c981950338182c0 3046022100d1b8ca32f316bfe57ce64e4a889b1b21f2309aa168e9e3a93a9a26f0dc1526e202210085bb27469c7923c81bf65491e15de46eedf8edc6dcf7bf384029a1d95762138a 3046022100d655058cc05293e7152876f8285c201cd93afebeca26fe2756d85f02e6db5436022100e09dad377a7fe428c5ff362197ba3f85b539b3025697b90306f2ec6d1164ffd8 304502207d791b7d6a0dc522145cd4ba388b18e5165af3b819b0bffc4bccb08ea89bbba2022100f26a711891842f923da3d90519c784e177946f82ee784a800a4f2891784ce417 3046022100ae7d48c3455d7e46290512539825a0d02d4578c01fa16be079b9ae04f0abfb5a022100acd140e01ad5dead5efd50174c3b03a6ec25ea88d524d71459f41968358b154c 3045022100bb2f4f94d4e1337960dece91eeba035b1fc0486d7271b2e12fb7e57f8787e5c502207c9efa3db7ea81b412ebccf171f39856556e96c5852c81dcd4595bbc84d803f6 304502210096b34b9ff18e241b86bc8741143f8402c69c7c744646a2542203eee61d341dd402205ca71a882b3ab4ae123a3a5dcb79fb5c5a89ec5d002894439a2c205a75100033 3045022100cd39c7f0f5df466209de1557a3891b49bef1f6acf90d4d858e5115d444f3efae02203e5395cf197091955ab3789b7cf2ed2049cf79a82b2b2a600e07537e0b2033e3 3045022100e8d7e483bd4ccc4ed7e609162b1047b20216bd7b32fb687caa6dda9ba857f24002205093088936eae08e4099f52f146662974e57b3ca072703f4e950f0f2f47d99e6 3046022100942e575b27e418619a29fd12e6af5dee3518298c7b21e342d4f2489aebed92ae022100ad3ad9610c5f2cc90e36ff01f5421a3f6927659f49b714f80a50e327b755bba3 3046022100f690869f27a0b2bcb4e74e84512dc2213b9d454970832c4ad24d48d1f9002a9602210080324d4c779afeacb4f47866ad3db4d28c9a965f1cdcfb3712a6b6a9af6bbb77 304502203abdc98507adb93e46a2fc1c44a08c89ba860c956f52b2defbc332a51109475e022100fd363d18efde5c18e2c6b3868a0b1cbfde9d9fbed7edee03ab5b3072b504169f 30450220449b4d247a1ad55734f95df0ec88ed6ca1a1d6702b94f75f576245b1ac1871d5022100f1e8a8831bcbde2755458bc8c3d8d13fb9769becd00803fe47ad5c14393c1186 304402203831d5a8ce6e3d9001bb6edd5020446a0959b222894d337c1b1e28c38a3a364602204e43fc41f83104b6cf0660336b630d19ec41164b65c32a1b8bde8c3799eb9743 3046022100d4ced69bb4af971116f2520f4f68ed0a7a7c3c503c093ea4dfb69a065b16317e0221009f331fa65aaae010f7131c7028c446c0f8a21757ec4d8b1f23ec69a25524d8b3 304502204f28181a379cbe1a07d9b8bb5e7afb3174dc7662a1ad32700e7488b1df3b445a022100cf3fbba948ea3d80c1d0341d2eb974a23766cf32fa62244061adb353c4f167c4 30450220793014614a8f251d61f2fe1ecb0ebb650509a04f9e2963b091e2f61b10c61019022100c917fb93c16ffb064bf355432625bb8158aafffe004f2abc74352dabab19a9fa 3045022100b7803cdd85e85cbc6c84656ea57d348cc186298a9937efaf570db57778b8446602202645994d98bd348f0a5d48d749c880bfe12737c13ace98fbb17222d43cfacd0c 3045022014c1be938022a2f2ccbc5b7823e2b112277a2c07455dfd39c47e629118c7516f022100e21265cb1e138c6456e8ceb5d70c0f891b866c825e166d6f1cd041b20a9e6663 304502206829e106c223ad3f24385e302daeb8a029c1bac9690b41d7aada5434f69ad3eb022100b7e213ac73877c4ce49f2497b04a820594299e9060993899f1fa746b063a19db 3045022048678c36343a17f5a212beea8b43ec9c07f6ee24b89c9f72d74266e3c954d10e022100f9a327137861159312553d9fb3915f72332cd34e74fa88beaaab806e9dd28f46 3046022100d3f2eb8b76ac3f34a315894d718bf56e0a28a35eff9854a0758b89089eb62d57022100ec1a6e309f9c773cff616cf25bfa78d1e5f94ff7bdbb3aac49c93898777c72be 3045022100e75477a06640a60c1473f7503ffe53caf98b7e2d464445263625ea2551546ab1022050b5fcb2a49460a07e1b34bddfe3879dfb91b94df548b6cd04db2652686ec011 3045022073a3d5c5e65d1bfe43b22d0305b0bdfee7aaa65306a14fa31e82de59b1b6f658022100fc33ce27983a970c3703892bf3ccfc51fce5b95961aa3bab53fdb1718e4da09f 304502203800f6f94a617e563ec238c6529ddeca9e382f52b32bbf6a1dee824a0e149eba022100d24062626b5bf822a7b671267f829476ed4180550f3623a285a1040b6a4c96fa 3045022010d4e1b04eaaf252349d34dc57945a191f616673b45d151587dd39631718cf29022100e02739eb7a3617aed536ff48e587b3b4ec753497fd27ed1a646356c708b43a3d 3046022100cc628afbedf2684a0099b1395303fd26ac8c1537e47536e68fc7c07b05ea2e36022100c0f5957355978cd01c4caeee9135152c51b1c294a5d1892f803e28754a0f4df7 3045022050f92950656c3807cc5f7770cee53f848127eaa7f9c82b2b78a11920043867d7022100ddb0f13943c61bafee4edf3a9910513c64673dec0843bc0cf9d01968acbfd658 3046022100d5c3afb5fe907d00b12040aa0adf760b9397e381a961650d47a70cd7750f1bad022100a9256b8b9d350e5dfd71c6484f3911c01519a2ccabed9791430fd122a9104bdd 3045022009195b191e0f7115049de307691a613cbacf709ec576e172206ea52ac6845b65022100f531326381d17587b60c476342596bb0cf9019cde8b09f225107cf7ff39347c6 3046022100e50da29edcf9542963644b1771e37acc1e2ab1a2965f469dd10edaa6860fa53a022100c6d5ffa91ebc5f2776b445466dda22a197412e6f5f679fdd6ce87009cc34b763 30460221009ef64a46f14acd89b780eb46ee751d856ac7a72c6baec3d616610bc18a66d619022100e530d69ce7d00a73b48a9f307ce075bcf88e51927911fdda320107938487585f 3046022100d974bc523432c04db2269b5aaf78cb6970454a55b9da1db709821a4d8383a601022100d69f4efbd1b2fb93be1c687ade6b63ae6f52d1c976c646ecc2cc2e93ff210395 3046022100df627a11b35585c4feaf80c169caa2d8c218c574958912cf3b4520fa3eef39ce022100a1c312636ef1edb46443a9ca38795fba27b86a35c5885cea0053aa26ba0765b7 3045022100aa2e45e53cbdd3f87b4a250a3da160fce3ec39d8ec96597935c561fd2a79fa2b022047963455258557668935c1960f1e57454bf6bc3dc785ed850883b4c40f51ef86 304602210096f86783643e82b0c1bf14c64421dbda981e785f98167f4d59f4b41c233ad581022100afb8295b99464d228070f48f1a9eb99cc8c5efdce6aac22f70d0b97d0dc3dd95 3045022100bf87b1e89fe113f97f3672dccf5f29f3a9813475343c358adca435f8483b3f1d02200384ab1c53f185f2ec55380d19ba7eff8541716b3b806e8efb4149224abaa546 304402200e83ddb9a75bf93d6e2cba56436d0b4aa3a06d379c67bb2d93ede663bd09e93b022013ef51bc2a6dd2a31a12f8b668820332ab5ba9457482e2f53d6f3cbecb7e7f6e 304402207cb7e03a94d07faa21c4321633cbfc7c1c83f9c63922843e2a26992c4473805502201e959ab444840a09d9064501945062834336983f6c88dc6292beb549f60f1c9f 304402200523efe510a9e7f0d936e7124a0e0041043eebccbb64b1fd7ff316a1a51f92a702201e092fafa331714b7076a7e7d991c70988afc518ca2f2bf79d0a377dd606073d 304502210089cf71447e1a9e59f299ee2b61c902dfc50d5de1881933c21daca07682f28d290220271890962d54f2e31c52bcf849d328e9794850de47ebfca436792eb844ddecae 3046022100e95d6c709c51fc203abf1f4b4969e76601ae666c8381e6f432330c2962b10a78022100d8fae1367df74a2c070e3a6dbd1148f1d97cc62e6a5aca5e12fc57268acf8da6 304502205c1659d5195076061918c43051136a02f774a56001f6165635e4d63ed0726a55022100f25bfeb5ec9e94f695d2edabdd7542589d0701e9582c4258f4fdb389ad606c95 3046022100caf70bd3fb7a0fff41dd38a85200e9090c127787fbed4861501e320bea421105022100c98d4d6fcaecea4839d3be7f7b468a16ce00cf3148b17e5215e96ddb06c3dc67 304402207c4700238d015e6ebb67c92d7d23795b334fc64e2f645e941420b1122bf673f3022013832cb9e718561ab8dcfa311c7c7b2a20c74f994f815c8db4b64bd62e0c8326 30450220129e48c8b9d9b747662b2739d9ba95afb6d45240911c28f146137e1d1422de3b022100a1571f92259211493d39b5e29e1c1e376184fb597baac83fa6639637519a79bd 30450221008505cfc9fe3c764a3b0ac2470d85f0ce9487c4a45d372ef0c232eb26271ca916022043b7d425c12fd0564f2623b9456e3a77b6706de4c9df4d2415c15498b99f6169 3046022100cf3c27125063db784c1ee09d8a2b5c6541eb818ee974fe04b65587a66c627dd6022100c118535d24cba77f1ebf1cbd57357ae23b0dc6a5fe0156bd222dff8d9efff331 3046022100d93e171366ce8d9815382080e4570c33404d63c53d3054ba11d4915d5a236228022100df5994ff55b69ff7a01f57d5f50cd2d6032d92c032a8593a3973f2d992b57cc0 3046022100a6ce24527603033af0d357c9fc67f88ccaa28c73dd1f80c414f98b7061f5a08d022100ad86cac411b497aa0888b9907073987b070926976c10f8f52dd2c4cd3d217448 30440220443bf84b1323cee7d2e2cdd64503ab6221ab45e15ae990b84415c8494461da9c0220127abbf99324d4d65c6dbcf282fc877b52fe643dc745ec8d6444068d4e9e28c4 304502210095c163d674aae55bf6b2e1249bebaa71d653882d9b9b42c40c49f8900e8769a9022030fbd3337436a3bc06fc1dfa98577248840786be49971aa7234502f8c5e35f5e 3045022100f225b5024158fa7d072121e1c5018f01860e5e0cef1f98159097a7a2c7e4604e02204b8c86f788754940c65f224828f7ee9e42bf87a0c804c8f906f751836f63be02 3044022045cb99a338ce0ae8740a1fb7a1b0166304c7d80420dfca829ab113e81233280302203b31fec42212cc9a4beb00f06b702d49cfc7a13e029fda7c96b51be7a8a8a8d1 3046022100c26ec2713f9a4904b8b203bae65675496e6c25b90cebfd0d552934a03c88db86022100806c144c5f4700368bcd4be15c4eb6049d1a72fa259c9d867d66741347302257 30450220544f4e9f277cebbfae2d72b47bfdd41d34536110af1c147db2f40fa77f2ebdbb02210089af4a3c7008a648482c9a34f435d17ff4af18aeaed6b44cfddd669c66cef745 304602210097f60636e7569077e0e84ceca4a44d25a6bc01714321442a0bd8ec4f9a77f1e3022100800a7dadee1c6e1b78f58f4b986f7d868c11a0acefba9933ed6b85bd88263396 3044022060ecb8f3326aae797375dd10b5946dd2799f2ac4c03062017b1c8d3767b2ba01022023194ad81291840f552710c8e2aa13fedf04586b1ee9a440c252b0a4c6a5f082 3045022100d1531bfec441b8ae030d622816420022b5aa52229126060a55e5addb4a8748e6022059616c74124c12ad0eac1f9a0b07368a8bf4308d4f9fd705956231e5a77e2088 304402200088785e945736abfbac32f6e98bced8062634843101935ae8896fca53f3033c02200fa6528be11b8b86507107efc75072bf22de2c88574daf26a4a98087e3ffda77 304502210099d0776636abf4e6132241a3a75346f3d46dca3b84368981c9b521c30a4963bf0220236ad5317272a8ae216a859d6d614c2908b6057809010da0e758e95d50594a18 304402200b8657fc16ca76fa1c438f695d058f8c6152341a7e4ba38f5e93c5eaee724ded022024e7be055cee4dc17827831c9fa1ddb2ed80fcd1c9e03c79d021d8955cc70748 3046022100e5df5fd65d7d23820a085d154380dee7928843904b4e6ae4ff452c858702294302210084cebdef6fd96e16aa538407c68410fbabfe928b05522af57b7af020e1094eb0 3045022100a223fab5fc670c93d318f573d7e69bacfb25aaad0d18668809fa8da1801b87a202204171912dcbba67132e3b6f30675ac5ae6e4084b5e17110d3d3f042b62a118e94 304402201842db90f9b5a66e65a0cc7ec6d4adae4572ba6f91dd8b0906d8d30494873ec602203f791629033bec37646472e56ee89b87c6097213611931891aa8d84d9c4c1d4b 3045022079c191b593550902c552b3fead64e1830c99fdf8a38e2e00879560074bf0b363022100e5e39a58bb8b521a07f429fb795520adfe952450499cf6d74d8a23b86f42d29d 304402207953cb3558c32afee10e9d8862539c2e15cf05bb03736c16db74458e9b7d6e7002201cc35c9aff3c6e8eada7d601e9e8f2e23e3c6fa0527100e7409b183e7c48197b 30450221008d2e1230fa12faebf09889468307959825c506c176bdb225f951e88f192cfb0e02204b50ea0a96c8db71f58cfd617f19fe7611dff3e0ee1a58f8fefd00044cf06dbb 3045022100be30e9aebca9048a714b8d1398a38c9f26142f9cc84f421e81cb064612bc3f38022062a86db97669c3e864ec6362634c92b3c0cace5cb227a1a12ed1eec18eb42012 3045022100b0b5ca90c4106a14041596d3fc8c9fe6e48b9cfe39b30a01742982d6837766f402204c18972dc44ea2077d33b7865094d9f728bba64276100ec010b38c5097968c92 304402201e3c4e79ec47dbbb371d2154ce4c88860adf071454a0d34db54655027609071602204e994f89b89a6fb1d02f9be1c82f4290249fc0b537ff172ad01448ee9c6f3137 3045022100b4f66e7593f73cbbeab99fe3cf3417d1f365a4ab3c8781bdf67792b3d71de21d022038aed8b8f5ab96eb8e83731c9cff063d5693a0ddae68a0684fb82d94c11b6428 3045022071d753de1c306686fd61440dd1637c6a92b14829882d0bab5e13d47a039ebbda022100ce887bafe784b6e91d3114564b2d3de538034fa5fc4bbfc612c2eddc669a65a0 3046022100bb8ddcd20150f6f691674704b2266eb291935a7be1a5aec3765225225a4df573022100e4efe72454dc860d5987577fc7428bdfc2036b6529f71a3f62e768cc82ca7923 3045022100c841488fa3b177c32a72c740cf4b2457567f5f7979968888c57bb8331c3fbfb902205735534c88b61769cceb85423adb8f48b419a4eca4892705fa9d1d0a75bb2b36 3046022100c3a195459f1ca58c680fe76913d3804c0e2b3ea088023aa4b38914f75d3cf783022100e0ffb7713d42238416a88b6ff2fd6de7fe3dd407da609480201255ef01748c8b 304402206348ab0caecc03206db2369533ec8cc12b51c01064b1a3551dd6d8f0c6206daf022072628c7c81b176247e8eb8fe7a00a0f85d7f9bbac063a71f2f19a94a20d830d6 3045022100e6c490ac0777f20ac88bc0da592d50ddadbb00c489fd1c834a064bdde9cf3522022003962e36996ff3da49d8a385f93801ab47ee533a4eaee36d5c4e59d391fd43e0 3045022100fc5e63bc7adac9086833fe22c3f05930fabed3a65d33e4996d57f9c8db0f920e02204fb58ea63eeb9389e876e5be39ff25e9554788bc00f06138449ceda4a72bb5fb 3046022100b70a5cf7dc581d587d48e5e181d92bec1e95cb0f481182d39fbd625e448f80a2022100d5dccc496f2d869e686138809dafaaa3974ea9970c4c651d8eb44936eac6b226 304402206aa583a58c354addaad4878bfa36cf9905417961acd9efb16db6ca6939e45bc202206191d5183b47204e50cb07124a9c76a6f3cc2ac9d24e327eba7ea3482bf10359 3045022100ace43e03dbf176832c6fd0c761964754bc5b1c95639cecfe0cca6b35b9125e9502203ea10f2e9cd3d42b5f9cdc5661bc6788687a1d41e0475cc4b5c59efb63d8bc6d 3046022100a99149372a6abfd5fc4efc970e1eac6d75e0783113f5376f1e7f08ba6d482978022100a0732004b8c432c7c396e51ccd2e73f618b6a225e4ffa374352a853782f28811 3044022047569469a3175964e9f8309e849525a3d605269514c1dd564ed4a250cf4cb851022022b0de94e5ef26a1ddfc5dfe1431ff2310a7f61f8e146c40901370eebce3a593 3046022100ec34018c3c399bce46e754c1acf92b349b7249720b8ca2361f83c382e7dc1705022100dcbf9a76ced486085009c762b80c5d9072cc8c5c9750d598a333f38e945b967f 3045022100e7394dc6a2bab9ec3544612896520967e2bbf2b3317ee566c978e54381c3397f022072c2753f6916c7a8d7dba53eeba5e5922aad6805a13b722cea030a74b38aa381 3046022100c19fdbc4c880b3b33fa1a940238696b9c4ac3fd93eae393f21b23eae487b533202210080d5afe61e5105afe08dc9865ee573f7d2fb242e46ab4b74ee0aa0211ff0df0a 30440220038992a3c419ceaa9dcf3eb3b78571bda903755a7da6d756d98be7e805cf1c740220166db7a9e37fdd49467c275e65fb11d8ff6b69cd55027475b3b8210cf7d0a902 3046022100c2613eba22b481800b039d364e1c5900cf53312ccf8d60919744cfc12de921fb022100fcd5c2dc60ab57b1b5e0e9945d20a0744a43c59e3827f57f800866a0b5e70862 3045022100ab3432b7eacfce7dc0799e2f1253cd744ad6ebd4064235d3be05f63cea2bdf2f0220316688d5718e59f05aacaac4cb37cc12747cc2adb792b690be6f175109584a9b 3046022100f1c1b663ab8a8106813a79a9662cefbd5bb0947ad425faab2287208494606cff022100948fe76b5dbb90d14a915432e8241fbf04ff3eb8c5eb355780ac38b1b82c9cac 3046022100b86ab58ea774d7499466a99e4d51a2063dacb9aa0a3c6ddcb6690ef92c58ea2e022100e5a35b39227e2764866a0a37f66172efb54e59910cb9e71330ea25dfb4fa1212 304402203f01e2b1a86e6fcac2b0d91034da4fdb86664d0d70ad8a74a95449dbf248a7c6022030e36f953fc6fd0eb624246c5b23470672963c5a427497dfb40ca5780571485e 3046022100974313070a00e18437268db614cf603462e332fdc22016315988fc1127ee1475022100ebc6ecc95a0a32a4171d0c0bcfb69b982e856733b40dc2e42c299fb9debae209 30440220472cc10590acdbb892fee212f1b9a01dba046efba38f824ec6310573a1a12ca50220333ceb2ca66db5bb71184ee7dd0524d30ef1be6186d5b7bb5f500d31c2e3cae0 3044022011e698f2a79153fcf7b1500d3b908f9a5fb1079dc7e3648ca9de11ab384da18c02201301ae9105dab0a91516820c0b4c3ea3c2382e71d1005f1d93bcebcb077c6a0d 304402202512f34d30c3a5538103c7b2917e4bce5f81c8f95818f6f88b6ba98f636b95d402205ebfcdfc15501788df5f23c76024f9125471885ea16b7b26762f695b99387966 3046022100c944d904bbdfbe856d7d43d79ba78757ed4e8a3547f6d52dc97cced92f160281022100ee1e2cf6618f8547004297843bea92a925c133a8f69e98a8bc013033bbd037c1 3046022100880d6a95683552c21fb1bf04a944dcc20418895128ceb311a74d33bb9e7c36eb022100eb6c67796654cbc038b9fc2803ead7b7521f266dab26aef8e7dc6719db46bbd6 3044022064ccb47f7238ddddab5488eb1940d7777b61bfef08939df13f245b431a22d8f202204bb92d2306842d2044019339e0a935b58060dbf8bac8858a774599e97402e433 3045022100cf44eb66ebeb9814181f91b91e42151c3fec6232d789b3c68622b2243ee0a52902204013a807b10778fe1355b0b69cb2851ae2e4243f7ec0aed4c6ed3e7e8c7a34d8 3046022100953f30ba4a9530bb59830e8a1bb8df651f098b2e14c1e19fcc643937cdcd869c022100ac2d76a3d45dbabffde66795e294c211df9d653acccb7e4a0d2ecc74a91e1705 3044022047e9c5029393c8db11cae29cbc58ecf2980efadd7df53c98207b7e74462263c602205addf4eb260999109d680a0bfdfa3331f309765359babf77f8c085cbfdc39b55 3046022100dfbe979689f505f9addaebc2b1d0b5a445d77e450bb68eb0492118550b5e1035022100fbbb9403ca32621c9bc5b2ee65644a98b525b81129f03b83a8961fbfb8fe2b6a 3046022100e8f87ac95dd4af360de1cc149e87fd16f35256656e5c03dddd8bf2f8d5de88a6022100fd34ba85fca002cc365ad3ec29703da9262c8afd9a8fa3dede9499992fde1a3d 30450220418a6d65f32dcfcf101bfc709a7eb30a16592ac194a6fd4a7531641277674ea002210098514a702ee72268e41af3b9fb7978a69a4f22d00b2f197dc2fb4c00a076f302 30450220130885a8656dc57978e673e9d49760f4b9aa318c598ff351a90bfc942a358061022100814ecef7a29d139fc93be77d4467539d9b1b10e37b0aa09f2dafdafe9406a7b7 3045022100b501ca782fc067e90c2527d77c51adaf0c6acea086abc48d77da96aa3797cf53022001a952788fb043b0f03a3bce2b2fb18ae0ac88f60f8c75b6999083ad73226e5d 3045022100ac98f2f9c803255340ae67506c63427ef78d741e3bc7b4bd03486383a18f033b022021d5415b5356d749e02033779fe0b118987b327dd61fd924ef429f00e76c2005 30440220209c8e63cd175d93c5ee24a1ca79b7f77da6aa6215e5bdb8463b20f32eaf430a022024bad061dda692e7fc1b37270d03e64fc8ed8a89ae435d5d4b703aa0decd5344 3046022100f5e4bdba74ab928caa8008f36fda63f4c05144761ad00f10ed7df2bf50b67eea0221009ce676b40aba1a3588fea5d3ff7a9d4425465981f4fd2274a8afd4d61aa0b893 30460221009bf6171decd315b0518df528b2ae23d4aa2f3e0bcf09b2f07772b92bf26f3ec5022100e7383265d475e83bcf7a5d403976b630e9fad4de89b2f8091a5836fc2ab660c9 3045022100e4456d7a983fc804edc4af080d40a48170e92d75753f558aaf4ce2f42192ff02022000d1d5dfdfac6811600d0c37112663412693b46089943cf1e1bedb07f502b4a9 3045022100b90d6bb2542fd00a2c93e977190b82a50dc84cd55d00bde0c5cf2530b81005ed02202485547475cfca0e27b43b7adcf8019d355bfc409cca5f5081571396b2b62462 3045022100d496ea72e34b31b3d7ae71392d2ef67f3327aa6bcc2bad78b49aec7657922a06022062fc67e2d67b8e58c36ec10e56347c024066ee5406faff039c9a39dfb9db056d 3045022100fb0c7ac8eef565ac3ea3ae472de9076c3cccc3aacfa25496f377e8e16ca09c37022021205f02a9271b1b5c1e86067f8a5bcb9495c4c216dcd79a1d159894fe317c19 304402202ca024e9d496bce046fd01d5a5bbcb2990362af93a9226e3bb1b314ebd7b497a02202bd1e82213b879c3afe57493fcdc09b1f97e6ac0211049f295ed8514375b3564 304402203239d2ac8f5a1227b012eed9f2388202c92ab46ca4d8ac0740a39319fc41bfc7022064d0ead679724a82f804f8ab2b34f7ff2f96e138bf671dbbd607b381b67146e7 3046022100c6cffae9c18808272c7fae238f10bdf11f0020847b16055c1f08c96b581284b3022100f36146bffdae1b9100cf80fc782886caf35141342bac9f9d483fe90e63bfd5c3 3044022076b919ac0a1ed27b44082fe8564220564796f4ee07e03de88ff8d9476b933aa20220526bf985895f7dcd49a9f733a8059efdbd35078553664c44fe6139a351aaae4e 304502204942ee00a95f479d6c32e527729645b0eafe3b0f9bc2c292064760208c631eeb022100facb49888e00c0e86b069b30df0244d2e323c90010de3aa1f91a1952b92beb07 304402203f1fff1b8bb69c3e8c32ea70b129485332354908b3859ea96a3b67b396cbdd3102202ecb6950d57e8d5f7849a14ea3a4fa7f7a38969e22d3425ab0c5e071c9d5782f 30440220470c19958d7c463f275d57432c89bdf4532323385d1b2514c143b5dcc88765e0022075535d5aeb67ed297de5d1d2b6f149f9a8a7d9be75926d028006555d76c45b63 3045022014a66fc6445680f5317c1f96fbf606f2454bac5d76994e72f23ec8ebf3cd9aaa022100b92ec9d65503097ccbb4e448ed7063c4f9563023690ef19ff77feb1977af8afa 304402202f8fd74ac51ca30910f4302a3efbe53c610a624e4dc378093f68b45d792121a802205cd3e883f9e50a0c0fbdcd01d8154f3871a48302c26de58cbd1d215df12f9bb9 3046022100b1b01f18a52cafac64c593a40015542af26bc57f0c456052c59f9a489dc83bfe022100ddd31a170790b3c71bb8d620c7e8c04983b0a247c7012fad7c3cb7447058b29a 3046022100c60a0d4dfae3ddfe39fbb81ee8de827ba14dc8a44b83150c59f9e0fdda7de811022100d1f954fd2c03998ac9c9846198b00fe043dd99fee00448763c9707eda3b61dff 30450221008e8380655890c958e2222e16b71dd6d9880fb2c036de2478e704e345098c93c402207c35a7ead4954638579375a9666bf68e7bddf1486e0ba172e28bfdee0972ab38 30440220665eda11edd77a3bfd3ba7a92b1faf9f6ad5514f25575e782abe002a2a71bbec02205395d809fc471cb0ce52dd9c0ce6613a600db3168f02960b416dcc1874bb9a63 30450221008c027ecbe7bff77f3ae64206ef9ce068c5b951f7e009737f2ce91263a977186302201bcc63e38a681d063f5c1da926b604d4ca5e0788327cbc85f537e2eb113d4730 304502203d06cee98dd4d2389bbb1f036ac0d7d717379e8ee181d7cb35e2e28170456024022100cb3dc1f14ce260578364f4d12918c4cfb26467f0177faa13fc4c784c3b7cd97f 304402203d1ed609f252d470589a9fdedb7d5e639d9ee68ca5002735bb678731ccac1bda02202d475e6689cfb8bc4a343f4877506d998040490d01f1c9e0b5592e7898da1866 3046022100e453c89f710d45998f0c23082878d6b2e10822d78d959ae8cfb8039d78387f6a022100d3ac5063100f4b21c8226980d9de1a46b47231043ffbd0da9a863038c6829f13 3045022100fb705b7aced8ac4766bd9df512cd8be1cd47f3714e21cf1b9a04faaed31032ab02204d30407f7b1d4a812e33339f6ef64c216b45acac2c5145f55dd49196951d4662 3045022100938f31077e37409796c943710be66a06393e7901430e0328ecb054ad7a7c5fc002204e977555fa2efa977f5d19a2cf06300953f8153dc5f0824d94f75de793a7122a 30450220497a45f95f49a297e98929eadcd1ed11c263a2833c977bef6b77f5238b87f564022100823ab97d79d5e8c38e899f2e59c337815d0f07ac3e6b70f686b3924e111e1f7c 30440220533b1348601c28864172316f4e8af614f3e47cb3c2ebd4a3619e7f487cee3424022039332b39eb359b804f0b864eb6906d16b4cf2117c191a64e9aa76f113b18ae55 3046022100aff2409ec1d091d17f9bd34e06ab89f5c6a3623001be10e49eeb3668580e49fc022100da8067ddf0af704661c2f34a9facb4f1db93c08d1cafa90eace8c81874b1dea7 30440220139223777956e16760faad0e660c2866e93edf64f05b95cba7a7c0843c3a532502205e67709182a675604c692f253de59059a9dd238ebf49689b5e53393d8ded9206 304502210080001a0af3b1c38ee73a4c4e6238015eb43eb07039a02f47a22a9ad4af9e794102201b2fc2a02cffc7452d0f1b459c918f63fb1cb2574d35eec06d8bfc60d0a72d45 3044022052345d8553fc0d11dec1809abaf77dd2e74bee65c376bb84ef4f63daa0dd24e902200f90704087e046b2e9dcd0a578b9984c878bcc3012c3ff687ca905f63c50c10e 304502203e02ee77e86a013a80c1a246cf71fcfb205deb6a010bdd1563f90a4c5dfc85da022100a5498a13df4ae729ec34fa46b63e47db79e6545fc4a5cd26a510b868aa73e3e6 30450221008c0f0710964f9ef19b519a90f6ce4a8e3873f6961df3e1c67bba4cf47970bd090220662ff29c693b4e886362191cbd9f2e3a521baf613f45fa5375cad3b3622eb97f 304502207946a98316153d9e0bbb57c80c8db90bab228afe78738ee56dc04f67978ffe010221009baa49f28a9f655a3809f90e694f103804fe99bf91b03768f2c0d2f047c56561 3046022100d5760aa2c046f4d1d7eb19705193a816ae6ba9f43b6542fe29cdb8931976c26a022100c3f14561cc62c1d063e6902c91769a47b76846ed9c1d221362afb86b98580e10 3046022100c10835e9c015351f9c3ae00e65a934be0448ce90d96e5603a24e7f34c375a509022100fda1491aaaa678cf0bb54d7115ef9cd4296e28467e0af383ad1b3945e3d74bab 304502210081606b3481149a00b3e65f1281eba330a970581621bb7fdab5fc99e9a57c956a02207e7beae757c0b376bc8f7faed510d79b649a129b8e2fe38496bb2f6bec064f89 3046022100a09797974c1a0461101fd4519f175cb589cee5ed733321a1e6ffdcdee0d5865d022100f4a7cf30233e83012c93686402378412e38ebeba79c830fd3ec95c9932be4012 304502207de6915e73a5537aea996b575bb8f527880502f61043ceeeab525de1eff907f50221009317afdc1e2b3f27c763e307ff2190622ded59d4340fc6185165d12e4d57e10a 3045022100f079e27be47be5f8867164176a093834a5bbb5fff1f679f8f5c4a795fcebba4702201ea58b8a07e5526f5c12b6ee2d0834e79e8cb96495dd59468c6cec6ca27fb77e 3045022100dca4df2a7d8c4b815c1ceac38bb6ab6eb0ea652e699210cb922d49ebf9522b5102206c5751f31d422526598b43270087b216b159253127ecff119a970ec0388a7966 30450220024a3cc3aa8c8b7b5cbc824f432a421171978b56e56e3e66893fbc3d3ba86642022100995d645d549e588d4636ce1e27a06a90e5ad9b59e854b23fefe91e79a4ac61a9 3044022067b2309e1f4b66f64dfb7cbb6a29ffd14dca371eb0def795386641cdf535b76b022068570c7933480346631628a56a2490457f2bc5ac2a3e6bb561a99dd27faaf2c2 3046022100afab2708ffb6e57a2a525a27ca5a2774798771259b24559cf0293c3da196dfc3022100aed9f1d0a45f3238d06a59150ebbcd262435afc3183e05a76276685cdb78be4f 3046022100c48e1efab7fad86a1ebae5525292b1ede59d8797a1b0ba0a2c3b79d5198ba43a022100b39207a329e0aedc169d98dcadf29fb5b5337b7d9d4742e91de5801550c885a9 304402201a235f3ccaa6d8b43620afabe6aeee028f3f1fd151666ec7ca0d8f07c59131c402207662b9298dd4544989c795652253625773cfbb90ebfcec477e1ccbfd5732c912 30460221008f6e629eb7d7c6299ead3d58cdb02f216c36c2082c083a95773f82355d11d792022100d7b00791edff08eba7651b8022546fd88037eb93a75a65792c03c1578bb7a404 3046022100bf20e4aeed93bfc956bfde92f56ec173f670aaae028b9696baf811c2ac8bdba7022100b241bd359e1b83d579bfee1a316c1af26e264a77be7058a7f3946279e02b604d 3045022100eab0f07c9f3686c1b107f44d0458b23d45e4de7e19e4bdf1c82ee731dbafa685022061f4912371e6a946ab90b8b5dd8562fc8f7dfb54e414aa8fc90678c2b38fea88 30460221009981455bf28e0077e2b859e771c6623837fc6c9ebeff74d408789f2b7164d02f0221009a996fc37e3261586117d820b2d446de6c96d27b0e84e005e33ad73f3633ec1a 304402206f3a82dc898544b28c3198d4f84c1ff62fb4e471dfa40999e7dc444bab54f29c02206f49b6293cef1820003b1f50b00439cca872b5eda9cc853f1df64dd0bf64a824 3044022038d8cf7c8bfa080d0276f71b95364e8df4c526ddfeac8eaa949f20df5953883c0220765a2635e4c2d1b1ee7592324edbba384476cc603c85d8059d9a65c1348404c2 304402201d233c35fde8eb1cefe007db4b49ab37371fdbab8c63b002fb44bbee1e03cfdd022015a695f4df5d114a79c9ecb35271a1ece980f948b8f3122b5a9941f92a872f35 30450220268029d8988f22910df74b6983d302fb7045488f670e52b32f3cb81319e0534f02210099325d52da33cfe8679e3dcac417945c9293ee7a30fc79bea1b9f51058606352 3046022100ea5a3e1f45c6ed9f61c6a39a66e4d05d6e28df9d143a6d6b032b7517a2d03458022100e80013ea5ed07a45ba81f79169366b82f49219193452ac3b6c89db2a0c959ff6 3045022021655d7e69544976a50694be44bff101f315b39c336ae436b91b4e5653dfa27e022100a9e5d75fa0714a015bcbc708fe9ffacd7168378deab4b22a761deffda3305189 3044022012745420b1054cfaebce0cc7cf5d6f3a66160da270b55bc4acf4beb013f8ff13022044e656daa43f7818c04dab0dc32ec5851f86b827854161e93733efc181a92e0f 3046022100c7edff33643ece0d1097470438cdf30ba28fbcf117e5272adca3965d0cf4e3c8022100ac3ef3f28b2604bb4b3a8c8e0fe690ef1964d2b03f68c3032cbcb54ada283f6f 30450220497b7ff243799cdafefb394b3b5a70ceaadc8ef1e1d933149291a63a39eb0bbf022100df9a46030337ca50823d9d2a7455624611158e8f9d4697f39df604de2f2e02c0 304502206d63962c628d2d88e57c9b413c02572b3bb889106d28281f96d48710e687f93c022100ee914d7109c23c96736651117144af976dbf8bcfdcd4d924a4c5be3b7d09ba6a 3046022100ba1a979c5535fb106d9754c1d90a55338147cd1ad5cbad82b1f165527fe86905022100c70cc9574151d474bee88a5bddd032ccbab59d20f6d7752b3462e856102e30d0 30440220476879eb3376fb2dfba258f96d4f4368d5e55cb8eff9ee44eb117452de750bae02204657c1b533902ac394c52705ebc91482a74cdbf7c9ca6ac5ef2156591b86a4de 30460221009700e0ce06dbfe64988296b918588065637c0a5ec101fd849b5f468aba03f937022100ae28d902f32fe8d77af8fdf7712bd67380fd1049c8895d431e218119d5c7638e 3046022100a4fbbdc4013f167ccae680fdace4ff8ccfbc9f8ac28d911eb4d31de3ccffa4ed022100bc94bc5519e58bda90b55b2d5281779e3c5d6cd4ed083db76fa01e1045cbd460 3045022064376fcd31a39485443b3be3af586699f457ea77107819ad51ee6a7c7d7daf35022100a57c3fa4e851913455cda48b292306c1e6f5bbf969d220450b0f4559efbcad3c 3045022046fd167a5a3c9ff318696d9948eacc360800fc4b701e19f6c60eedb7548f50d4022100cee58a11136974c3ee4de0ec625823f6b55a28556a2963c8ff08e62a501edd59 304402202ecd46e957a4a5ec3a2b5338bab605e08616c343f898340f8fc547b0e9d0f52902203c1e0b96ff7b8a9051d7ab08d78c434bb57e4ad3227b60586724b670add8f8ad 3045022100fc67133ed2950916a2b901f50de937025280fef0832404957272fe6b9deebb8f022020492ebef64f39cfa45aa4bc17da47b3604b15f5fc6624eabfa92247c8ff9572 3044022076b17c92fcc9492b62df1ffcb3c55c1e20a7955f4c947804eafa1085fe011fe5022065b73bbf0b9c0a0f74ceadc48a138eb3183af065993709d0f9f99f3b1ffd830e 304402206d67596234c3b801571624a86aa549a05a0eda75e8243366cee8581a7079810602203b557e253462a8c5a2b85f64d90148107b49f3bdc8848cfd283de1684e7207b9 304502203d265d88b9c8aa4c8293a185b9d5b8cde29e40d67afce73df63622f28f39e6c1022100cf9c74432a24bdf814f1920c260ce440bda1fb4ac67c0cda46986d811ddc6dbb 3046022100e0c7387447343cfdcf28b07549db9fb793e2fe5e7ef2a2f94338e23ca31d288f0221009d62ecd0160627b127208303671f7eee734f428ae0b57dfe9985e41f7d917b97 3045022100e3f794c1c754e4102c87d54cea6df6aa65999353c7bc9eaa7e9ab472b8ff2b470220571a7cd9b9287fc680c7dc6aa805a275e4697628f0846de3a34142d704db5266 304502210090771232fef90196a2d14951b7c6e99d302ed90c476dd778602b99353cbcf312022018fa07a735c7bd58177ab372246456bfc873e1cddc2af9501ce25135ee05c7db 3046022100ee527da2cff3bb1b17a9a02e78539785801aee44503009ac117d0e4c044f8fc2022100f5f7622abb7d7ab550f5e2142c476e6019f241bc6ae4d7f833172dc9815edc8c 3045022100c7cc74f54c16561dae394a051ce6eae92198d635f2544df31bb251fcbd35deaf02205783b7240e989806d39ac8b0439375f41f9447cd3cbd345f20c1088543ca01f9 30460221009bab00332c30d631a3ee2d42788f28d87a4900937fc5e81bb37a8dda0075fa940221008d527a2eb82fc942b864b67abc2cdd48550aeba912dfd92dfeeb25c649cda0e1 3045022034ad91f2334bbdbc42675f2d5a2db9aaddcab09a9d158b56cabf8da0370c1974022100c083baf097c52f9c8d4dca67106e3f3ad918c5ee203e5a6876d4066839ac1a77 304402200a88d7a25889ac5ab02ae606128354327725da8602c64ec2ff6870bde7406fce0220252c81a391f9ebcf23585bf1e6f10b50c58f37d59df6230173b5c4bd4a7350d3 3046022100a12aa9e4e3c9b5f8fbc0f16dc095fc8cb91e3a9e0cfd4cfa27809f8cd3f6c2f8022100f8405691a5db23490dc94bd14f1c8f6baf7db529b71f9f7a30017d3ff05d4db5 3045022100a8b6cf0666ade9915852fc5a323a32be833bc7fb4f826b1f5ff2448858571ccf0220300c79f87e0d43cd424e83300dee231c82824da0894df5dbc97e1043cad26dbb 3044022026dd8d3ae1ea11c6ad0fce03cf4bf44a2ca9a800a1c6e1aa7d86538f0c9213e6022060a78064d37d2789ac90873faa13f947c17f3f2793eab81a171fc9a84369472e 304502201de3f5316806fc0c926185bea246f670f6fa6227bd0480f925e7a22397170b02022100a0b54414fd7e450deeb0a89da6f789241e7d7c80800f770b924d3dc8e278f7a6 3044022005a7f6faac2541e279221defac0b6cb5cb6a1c68521bb08ae542bc776ff5cca002203eff2119271989de379156b71f60c84578de373bfd0e170eb724d399b7a52565 3046022100e736289e1bc866f65ebe2fadd392d728d3347b2ea3146e481bebbb81299badf7022100d93bb78204605690d8c477692d04940f187363f88338c6ddc55a84280eb0504e 3046022100d1f5bb7a2e8cc838c6b77eaf0f06ec63c33fde44e8fad8c30047861c24104d10022100a5648fe007b10ba286a5566b67e7d1fac94a0224a972fe9125b6aa130f8e10db 3045022100a0680582bc8227b873dbb16c31fcaf6ab88056e18f73085b48daaa7580ba195402200cc6937ff1900d751dbcace192dae213306e953762e17940d14bf9cf858630f5 30450221009500903e1f48518d7297c39549a7cfe8620baf31571965e1513fdfeae302e2d902200b4a3c1330d7e7ab28abe3aab8b7314ebe02dc63e4599be70bbee68dfc0d95a0 3045022100f4ef278c41c943d38bca40faf4a7beedc20459104fb2f05f288af03e19e9442502203b5be1d6524d0f2e22ec30ad3f074a0b06e0b302f56f95c0973b267b6f462db7 3044022060a33b550e1b5d8b3fd0928adaec920d5eb2ba845603185e0ef65320e7201710022024b8a20a20555f2715c3c7ac93aeafd108f93c6583cde2b98de9da7d1affdc33 304402202281967d42501b4250fa7464de71c47336b90f655258ab1e4bbb3799330c5f5e022045c1004a5f543049d09b1aa1736583a80e39582afb8ff04aa5a925c5a0fdd63d 3045022044047e1e4547b35692116af7298d15b18dff2ca552079984708ae1c85a1d849b022100f2c5137482ee09e8afde623d1f4e295a57199af349706bc1b0ff5f0ec580e697 304502207d13c300dd0263f5bf98b48c37e20e3bfca768f8479e2048e2f82326798c135c022100fdfcda2b1aaa392a6b769afacde088215469c08b76b13f815b3efd16964ca4ab 304602210080e3c405660a5c76dfb513d0cc93a571082edd8979178b88b12fafd42a816062022100b55516b0b5f4195e5a9c565431cc1916ecf458c16e7452f6f8185362796e6892 3046022100d3a9dd89dc817ceaed68258a2d2a6f5447542bfaba0427a0251c6bea13a4d897022100bbbeb00d95f86dff4159cd53d1585f7cf53e3396721d885518f03cd11584a9cf 30450220695c4c34e5897588a4087ae59694dfe72f48ea181ab493a1db925e138f3435e50221009ce0c4a723ae10164fab6da1ef0a60caeff7f6e89c1f9e1fa0e6fc3c61bd951f 3045022100e036248f9918732d86bf691e060c4c86f92cc499a32d4b9a97aa047b28df3a110220342473a97e04b98208c94e9a5f509f93a7c624ca2a1c9842bd9ff7f556010bb5 3045022100ecc8e86f84d2d4b65a9a20062a75098969a5d93659308264418287acec6c32e002205b3543531c26500563b9b6c9d6542c096bd069180d3087b1f70b55e8d348a915 3044022054f36f8ae99077521c707f5afcd9662ddba40d0b6ac081b798c39510c2b1acfd02206f98eabf98d8afcf5f31241c5b0b7b7224f7310b75ba755db268d81c96beef29 3046022100f8b9ccfb602e8bebfe883d5cf529f01df8150a6db72139b84e75329dfbb136af022100d9fb6758102d5fa49cd9d7ef8ceb563bcee9ed1d4b442d0467e52fcea1f8b40e 304402206eb9e9474e4189458feeec6d3bfeb040b24ee579f834926b56f81f59f290cc3f02204ffa9cb9fce9775ac0ebd90401c25bddf5d175d1005ceffcc53db9327790ae1d 3045022100ebb93083afeba2d8bf6ff4686e3d1d654aa475cc227d90637226411966bb414702204dd8bf0e7bc98fbbd9d11cd58a3e5db079ce3324e993cf9022624fabb76ffdeb 304502205255eed0ca8956040d388bcb745abd6cf4925862f409658170c87202272700d2022100c21007c4ce99f3ffdf14553f5daa944f91ffc42af904a158493ed09b0f2e0199 3045022100a3574efa86d3ff6ce67fee1f4624f51d2be1057a7c84b404e2acde0fbbdf2b43022034109eb9a09268cdc80674c1fc8a3e965a90bf62cc07e55d367476fbff4acd52 3046022100b04bdd77e51e38142c74c14f9c3f82502190a96f1b3417476d654188d029f4ed022100ecb9e8e059d7792e36dbcd36d5306354d51da225814356c941d7d67d91b0908b 30450220717622a236e6571e80f0ab816a6d000bb1540a4bdf866710baf3797cee14f4f3022100fb32487304718cd21993f11524e198753f6813a48dcc2bdff495535963e39430 30450220755a6252f027ec964853fe229b648baeea27e92a3543e501fccc6f967437971a0221009cb79f3e91c422c85bdc6bf1549aef1a21832afa1a7cc870e4b09ef1296e792c 3046022100d34ce76b8cdb973bf3e6845f2b1a3e41568300b4b4341acfc60e450b0d3bbf00022100f22b4ed1d8c33a8c8dbb830970db156287ee95018193d78b3448088489b4555a 3046022100e4d66b35b37f14eb7e3a344adc1a1d5afabefc440bb4a4fb23c54379521fcc45022100e9932937c5cc1b4315baa190fd7f4c98fd4b6ea768ad7d1fc5a55d44980265d4 3044022064f19161ba9ccac2f170ee7c81a6f3e0a1c5a8c6219017e268bbd1f9de29aff802207530aa3f878e207e3c6736d0b1337c13db50acb493e2e48b9c4708b1be339f30 304502207a28c0bbdfb05a784996f7eed572fb6e3c324f78b9112e1348358736c7565828022100f7883ff2f6deb5eaa3fb8412d1b63dede1201c5542c6eb46fa284d2949f17da4 3045022100aaa8d74d1d13c309c98ed0c1deb456757681321592e4e2c888441368add3db7402200cc230d670b6a0010a5a9706e2bc1d15d298e9a149b49134854e85e74d9a5e96 304502200ad0f3e8b478959c6e1e04dfc93fe1ed83823f7d25ed14453542348b69d6f94c022100ad48b66e8c669f42fb8e84b8dd6066a9c262948ec00ddee3d7679a787d466a97 3045022100b2a0bffba56ab8098c96d179338f23f0056d90bd443eac7c551d5157cf9247ac02204bdf500341e645b60b092b8ed96ef1507bfa803fd23ba1a60576883c058702d6 3045022100b07c172db9c3e33b584b81ee9181e1e4608fca2955e74adbfb0aa297a1d09b8602200bc58296afc902003003c57c586b5ea69a049832caa642401d7212e0230ae75b 3045022068853fbd73832a5a64509da0d1b0e5020901c7ab9efcf18445ade7f9880e5076022100e3f85fe2a381b98e381a32a94abbccdd075e831574abbf793f024487c0105cb8 3045022072a88e73ef93e291458497a567935b31401a3b8229ae4eda3a976cc392d797a4022100955f337ab58de3f3e818d96e48d7b1c9be0ef5c7f4adc8846037f121b0a8835c 30460221008c489f17da9dab6d5234cd85fbfdb213876e04237df7c94fdd47b085a19f6227022100d93ec0e4d80598c267c66a44fec9e68cabae013113ed8ca7c36aeac71be4f2b4 3046022100f9dc47d45cf97bd28b62c1fd5b1d9dd91232f2098cdbaf22fb718a1424d7054b022100bb37d8fb1bfe18d0f1b972acb409b0858490fefd4860df2d3739668225be8a95 3045022060c8617637a42b9c0721967cb599ff5b9e80322b1d7a1bbb8280a300f1ad2e71022100c299fb5e3c372d2bd241aeae80c786c3db6217b68c3f79e7ca559a515f877bd7 304402203ce1c15598d654fdf6178317c700ebe704e0abb01dd954c47949a71f8281563b022063dd6c18c12063c0f69f0a783a9962f0e3e55b066e8cd766c09e69b8d571590c 3045022100e4fd86a6aea4d0f000f854bab37d6e6b354979006879eb43f59b7c6c1f7b879a0220747871dc02f771144be0270a27c405dc4fbdd7b2833bbdc2a81df9d9bc66e3b0 304402204431f0c36ec9f21be7d7c97896917a01daed56274165179f6c8b00cdaaa434a60220297b018f494151fc1d38b3cbabd6adb9532472205a410219d984646210073865 3046022100aa3cd753d201371e58c682898d8161565a9d545233757cb5eda38d20bbfc6ef50221009dd59d9acfa5ade4d1a3dc3d6212e43ee5718f2c7dccd62179aabf55c797482e 30440220213f2996d70111331f714a7086535198669c8466d56a85755d0045a3526baeee022041eb2391b94822bc8713ff1ebc74b55611defa87fdbfea241b1a4b0af3c23935 304502202aef8b6794646b90a3a51e211d7a87851baef9c01ea983c82f3a9aa3f8ddeea7022100d4ba84a462bd3281f8a3f7a01107c3120094a5723160e7bc9cb62c92f882d7a7 30450221008ecbc3e2a97616a16a2d13561afe29efceb02907b0578cb56586e58969f7ec5502200ff3970ba2f30249bb94243b28368111485f6145f8c362bc87cb9f9d483d152f 3045022100d92a129aa3f8b50464123b30018ca6409330cb0d5964c64f08b2390bd1be1df802202dc53f27110c37845bbf6da20b86fa27530bccc141e91749b514e7b973afc481 304502200e2fe7d90ac6ed33fc34b19e1e0ee19d9b7142f68d0a6cd99f7ef2b4f9231b2a022100fdb6574657e6a3193f710172d06560effa4998501fcb11c8bef8a81dfc07d98c 30460221008e25ce61cd542a991d22e276c136d802f6efeeaaade6bf4d63318fb021fc5fc2022100dc26436dc209526e1db96bda1e3febd79f38aab0ea2a3095b8eeb49117dedbb5 304602210080a41a075c8fde4bb86f1e720e8ce1693762562a377bd113f922b756003dc4c1022100cf79d07d73037623817c167cd6b66e18f9e173de095339fa4ea653b06d774859 3044022029c8c2497d292088eb1e672df6526c47ed3d02c68c4ef2ea6a4c3d0449e946d70220358e11499d23fdf7448d6a4ba08ddd0ffcb3e4665e7de757426f74c3d50fc702 3045022100e99efc631421959d49806f0205851626c83679095a835b2e6781df3a0bb6c77a02201b5a41a157953693cfdb5c2f3a5beec02a37ec1406527a6756b0f30f1da4c723 3045022100ece40c9ae138db07397ac58b2967482de99fe0e7a1cd1ca3203703f30ddc804b022054e9058802efa1cf0ff54159a3c2c21063d09600c3715fb7534665d73956bd93 3046022100b285d2c86b0cde3a279582b2fee6cf55cf8e119fdbb3818f4b7d17f6818113ce022100a7ee2256133e2dcf858a772b4ae6ceccb5bf7b2549aaf6829306ab6a04505393 3045022031e8bbe2e9b072a18bf5677ee77dfa964eafb45234a7b68cd958e606d93211e0022100e50607a1fc6d92861a644f2add8c7688d2ac499b0c57b7f43479ce399da9e128 304502205051d07f8fc4eaf7b6ae13348a669da6c55dee2b5152e1848e666f50df1583cf022100ca7fd658d00d8abbd7c3920aa8dbbf616f9809d167b5ce926a222935045165c9 3046022100eb2b529dbbcfa7191b5fed8e075835aaeb3a28b7cd20f649d6e1eb04d988cbc6022100fa3d80fc23c71afd0a7fa0b30883733d2eae982937f716918d76a6bf29f8380a 304502206f999f2dbc0bdedb8aecef1821d1e8e846767b0cf49507d4cf98eae3ddfde35a022100cc62fc12ca339192d443406cb934bb8ec597766ece9d3650ba9033b87b293d37 3046022100d8e5371703cbead196fb29a3aa201089602d312e4c12e0c8d2cb2af4d9295c58022100e2690c2651b28b5bbb26ac9e8643082512ab4568954c865fd45f60bf19fce4ca 3044022073a135646d2ffeed7a8974ea686a81b41e7ae93fed121933302fcde574f7681d022074e16ec90835b663f71301d6870fdec85cf96802a229a67c532fb422a1f220b9 304502204c3288c6119a039670fbc75a9c735fe1f65088a851ef3229d4ddd8794b1b2e88022100add3d12fec9b30d1b65b6962c4ddc70b0c6aca1cc9e61084bc296bc27f430e77 30440220131727265f8e6a4d330637ca476b4efbf533685103c4b6b7a02235c24aaf8b2e0220153113653f23425f42acd259f693cc368f284b9ab25d770e8e80524393dae0da 3045022100cb154f8d71086faed34fbc93adc71ca0786f23568e451a51323261c6d1582af002201c9882a9c443102665cacecb2ef85d3fc8c6b4de459723d6bdbca82eeef899ab 3045022100ee5681ec4823cdbf744d665685e5ff862c79805ff3a34df4aa2726688a35da29022040f0c22a013c3db59dab43212f458f15a8b66e8be80f7316a226cc8dcfa0f98f 304502206daeb9a5024735817388ad901ba21a8022dfff45fbe8bb6d6941baf6522413b0022100c15fd016f16f3a191ff0d06c45030ce238b2b0003adf367ed3cd25739e9b5d1d 30460221008247ce6e5fc16fa346584ae16bc70cf6e778dcefc39543f545b915b5bdb5e963022100a4b1949bb25ab908653438fc78c7600242d4312ff4a70e71216ac7ddeac2770b 30440220352e5bbfebbab5b36879a72a69f835d586f9a220f7480676b198cfc445676e72022006ad33b8db9c7f2b37eeb7be1fa020fd6071cc78512adba49566bb6d3a458e1d 3046022100de76410ce90016687f60fbd7a57ce7610d28d570f78b5f1c5e6873d7afdeebd2022100876ec8127a3f4faaab36b421c247f1152b77ebe26e018ba8e25ca28820fb5ead 30450221008f0bcd7b45701a4c04f0e3941baa52107cc2ccbb5865002d46a682b76e7a70f40220345524c845b0d409280eccd3746acde4a26682892912477b7d1264dd70c0ce84 3046022100b01ba6115c12f015e5c0ff9295b1752d94e292e650dd0ff86a713259da3654d8022100b29f8b0af09cbe4182aea18ec3cf945e05b374b5cc96355fdbf0200aca1ab553 304502204abb3587263f3f9238ed87f2d03be045f5b1bcb3f7037a6a09bfb637d2fb0dd0022100d244a4b32a643dc92ea442afeefca8d148f9033d44a2fb8e75f1faa923388539 30460221008ca9d603893172d8b95e352022cfcffe908ae25ca4eb24c19d2c20a526e4e0ea022100e1c7a3aa531fabbbd57490cdd9318d951c615a889c0c6493b32cff1e585b10d4 304602210089ceb2055354c53f7126615e8cd150e9f47225bd01a7cb2330a37bd1cb8bd985022100b81abb9a2c66f3d02e23c88f67fb4fdd1965fbeb99e7f68c80b5dd88ded74718 304502203a36aa85318ffecf1d7ac53ed61c38ccaac20679086c75cf003d556b3ff1ab5a02210089230d5bec29ef1a57aee5494ca36f931f6a4d1634e2af8cc6cb04b69f2e357b 3045022026c02d92c9fd77d26b2eeaf6f8d617bb3d5e173c36899bc842f7f28e7f1da9d6022100afa2e6f6d3571c0ffacaf2651f7d7f625b243600f9f6bd5bcffa795a13ecd886 304502204a336836d571d36c6edc5a2f401ce421aad8859aa10466bf6e8268b9681264d3022100c78aaf22f6782c17f1d1d970e26734a6475408612ea531013ab922387a88ecc5 304502203b6c5aad61b80ff12cca78089b21e2d3913906722f301a80466ced3cbc04c1b4022100ab7a3865ddafd2d2b737ddcb494a232ab1cf6618b0dd9db74f3e373e16cc0f65 304402203206fcb6caeced9b5b3104d3fe850640cc5e8eff583ee43d06853c2da524b25c02202379b6b7e3ec06b430bb29b433c3b18f89a996b395d34362bd02c731d08b4c63 30450221009055465c8ad753a3450e89ee6901a5c107fdb2c956c3a51dceddf2382acf428902207b2c0d09ae20e13f38eac556be26da600ec4f2a66b7bb80a85b6c9bd1d999ce0 30440220759ee5a6dfd443e3fc53f23973947daf742cc8c54c5d62c3c424dff1631196e702204a5e184733fa05d63793e0b0a1998a3ef35e59c346248f072e8c46ee145275f2 304502206faa8e715b9c422df3b848f0412ac39896bebd1cc45514705ba38ca8e764f323022100f405535dc9da6515b976eb275958878b94bf721ed8bc0ebdb60d0e0ec7b3db55 3046022100dd787155f0b9907700e492cbafe16090e5b4bd0e08be91fcb08070c83b21136f022100fb1d20c9e3ac803a9461c4456e3198adb3852f242d8004b2d38d11f6fcf1497c 3046022100f60decfcc17b11c7627e7cec2daf02c036a0e061fa40daf74a41edb68adc74e60221008c038f3cb0639284451d171c60298e05c4578a50a2937212ea4993751ffc8a9d 30450220639e7733043b7117d8acfa77f096af8a0e234c7403ae21d6ba3b6cd3564a0b55022100bba0f1259f8dc3c1d58dc9a0cdbe94e0f8e86833084ee5a87f1bc7933b5e4f15 3044022011e361381613025a54324c553a3239d98f44d7bd8797f3e4f68c5c1c78d32ed50220109eef0b1380c9db0003b3deb9367bea43b77f48a5f2e18182566d9f07f055e5 304502206e4fbf2775ca67f62e683db091f2f4daa11daae67852e4e5ada36cc78117a2af022100d4bd2705f4e06f735ff6c6892ac7c37d0958eb3c72fe881454e5b0d06a776be5 3045022100e936288b695bc9a7134f5e8ea13e04c0bc79d94212bb5411297ccc1f4cf3c497022030ee5b7f0129c5f6051c1d5171a38286803f5e6941a518f9f5939dd5f2c97ff7 3046022100d6b70724d10326adb64860c92e355ccc6f45b361ec922579d455890ff08aa6950221008f7fc3bea6e6acb41c8f1206825ba8d2d99c364570f33dffece61b38d54b2906 304502210081d1cf05e1138ead978469d03272fd7c976c4856d438cc87289034e7ab4a5f8c02200a3507be47a0309ef82cb8b479c6e24f6325c3ecc2a2aeb21f351b8aa7a63a31 304502203e379ad4357b5e3b7b4d30bebef9597e3ffb4d976c0e52ea6c56d1dce1a439ed022100b4ac898f52be307f368739be559614753a688f0b02e352e686dbf7f0bb8f05b3 3044022031fbe9ef38a3b22c13773034bab51a588acaaf4afd8c9bfe2d03d93b9ae5509402204d6a9798449f254c7d39de2ad2d587bd44026edd236088defc18ba3a132317bc 30460221008aaeb3511d04ad08bcae9abf4775490f6ecfff0300ac40614834df0d17dd3074022100dfb5ca89761654df790e5090882f6217bcc5210e247b4aa646fa8210820cd408 304502202abd5b102dcb16b964f12de3881289a5048e31a025162235b3953fa59409ab15022100f9689e049c091c79ce3bac269f1f8685f9ca2ba1c8bdcfe9d4f495e72cded30f 3046022100aa3a7b64ad930c33e65a7cf0ec7bc9d843e735822dec7c5ed155f031c02e5040022100de69c4e55e3d2b6aa8603b4142a7117723ab0fd717e80c756b3aa381e7f1a572 3044022049fde36d0fe49ed10eb4566b59f45026d2e556907644dfe3c8d83792729c9c6802204d4d31aa09a6a4ca13f615d145837d1e940546fa2edc50b50f48906ec085478a 3044022044f5abbf40fa86bff1268cf8cc3ac098dcea0f3c5669e9f807028b3df90ed63b022006afab4ca52e8baf4593199dbd0922e13a1e1742b4677de6ef3291e7bfd5521a 3045022100ea4736f700dabb1f8b2b025b5470c1fca619c078efa7b0ceb6083e8349380dbd02205ad00cd6c3af585977cc3f990ddfa8fd9ecf2e75ae657618ef8f3a2de935cc69 304502205c6973d94f19a9c06efae3321d618d6ac9fce86e983f86f68b949d999370b0e2022100855ec09dd2435182d8a0cbe004e18b2775f75896b0e9e40415e7c202d4aca81b 3046022100e4cedc9a977f2d3056d10085f703cbf88274ff0f1eef9b330462036c756382e1022100d838e6888110377c22537c8b26b682a9df18628faaa7127852a977aea6703504 3046022100f73337fcf83f83f7373122a7f7fdc105aafdaaf06a33f157afb49f4bec41df39022100e460e45a6ccd2c976a47a7fb7f88fa511e5f8a6d985a00f3f53731b06ce1140a 304402205f74c5633c1022bbedda548473357d8f912b307fc1afd86408cf158a61d23ae9022009c4d237e4d859d94bd4877ae71772d7732653da4c5299c6cdc309e76d209398 3045022100a5232c90a3ab25a1f03b747c3f326feacfb06d8fd01240d170eca910840ea588022018a939c5cff22c1f816e3ac367c9bf0f9358e7db7b47a77eae20f54ebfb56e60 3046022100da19889009b702f35ad4a11ec6c712dad7788d404e92b829f725dc5ae8ab37a2022100eda06fc209b43d92aeb3146ace673eba26f312f477fb6e71f2f2077d83700103 3046022100c28209b7fd6c4e259d7b891e816426444e4d643f716c93d0fc39752b7763af88022100cc97e3af4c4c1f69ee7210fa389dff49f4325ac9a85eea883e8472a68f9b7df8 3046022100fbead393efd24f98a7d63604cbae37a0e13b0875b2616b9943a38c2149305701022100e2b0a78898f5786c17e6c3e871efeecb7431e0a7b79b87e716363d54528c430d 3045022013e753d1d9cc60d77a036a8bf1cc6c995b775d0907fbc5c1c75bbe1c78f4ac65022100fd3ebdb595ceb0a88a538d1f084ef06b4e2f452de82b2c124482cd6e32e627a8 3046022100dccfabddbbbdf48359bfa5d52bf320495a6a4aca407e32ad26509f9d9cc179e4022100e259d6a1b98f7581e3aeccc25c308830bbecb9a79f9fa2f9af950c22276f51b6 3046022100c1df0566a81e263a4178369bc4733018aa93ecddf40808abb9f00cba35ef0dea022100f2c95e20d3bbe06e6dc1825f3013e4babb5a29db3ef3b12e01272173299af06c 304502210099140f85617a11625b6c046a5eb6807b6c3691bfefee5a2260b7c703685a0853022049b432f2076daf6c902a626c3faf200357a7484971d7e981208e609b98407838 304402201c157d168549fbd80c8cf6833aa376def6fed92c100e671b8367deab9618785102203870f1be202bbeb97325130c00b05afc416f169f350efc3bd54c47235fbaf1e6 30460221009bb0d63910fa3e71b39295f95476b55915249e3d7ae8f57a31893a994278a1b60221008302038d89e8df9f5c24148bae7afd1c9fc83798f7a9db7939aa69fa70fb29e9 3046022100fbc19fe67093ca626c17b4722e8b2ee9990b1ad960a1515c8faaa354e8743c00022100ac6becb98e0b5a308d52fa63de381be88dfe06deabe37ce6e7d0ac994de763d6 3045022004223f14f0cf1a8e7123bb53759b75cec7a6e51501ad353b47d4442a3cb9b745022100f821416296657cd149c5eada74ddad209a0ba86836ae5a7e1254edf62f8660a3 30440220275015b4445c786254f232805d17b88e9281193b08e4e592e8d1eb8a0e8d9d2e022055737a18eed9637f10bde4be5a3e6bfe401e6ccc3d82e7e3fdf3db61a6b1fb4d 304602210080062ccdbaad26c26015a97a7481b089c07ae0f894841c4226f2eab6e07753f4022100dee02e4d06001a50a0cdc7f8b41bdee0eeefb4c1688ecb39598f4a65b08ca015 3046022100a1dc927bd8a22d2bc303a2f246d65479a4e381d9cfa0133696cf80a7ae04b521022100d6266d53502bb6da751323410f47c44686377b9d3132257a0b31ae32b82dd5d2 3046022100f2b83da4e7c9ea6d2f07df8cc46f912e12049f84b5b65e90d4307060e446d8520221009db39e3c9fa9d3be23ca4a0d18419c4cbeaf54ba3610662a6084b5d143086bd2 3046022100d851e108f74e755c5148ec4134308293d127b33ea8c9162542393ac8c49392e30221008ea89f0d61670e1edbd0e21ca831f4a723536732aab6dd1f20c5bda5487c2a54 3045022029760b6f6e07c7976fdb1a2552b37a2980bfc0f2ec292546abb30e156c98769d022100e3abb42d127ff41cab6edd51c438d61b82fb7fc49e1221f4977f90930681e0fa 304502202ecbb8e092f22a0785e57b77927d756bd4646b900b8071214e880f96a6aed33d022100ebc14bfb596d78af640d1601b6b847f56ab17c476c71f10570c1d2a052f4a7a0 304502210091afc6d825678aa2c7e82f7abd1168b6c61ac8e037343f90394d600a38b85c8c02200f0d7076403af1ae1fe00b2d771163d7cc2bf22ee6afa5edc0e3f28c6fba527e 304502203b8c89874d7f29f697dfe9a0490a0e69af6b87c7692b2a3c977f539389fac25a022100a1e4d8d33b986e654f9c43290157cc1be5c0a9d81b8c7ac7655e9e90786fa5ab 3044022033270ea8b4aac7884b650d0b5ef39c07f7c8e5310b3b63a479dea9b4b14281fb02204423d10c4c368bbf2b1eae9b9d5ac230f96d1cc88ec1b96731843cf2b527388c 3045022003906019785e50ecaae5160f5bc40371c6bd40a7f9acd4202e795490196af543022100e4ce3d2010d1afc3b344ba70821867bd380f818facb97e6be9476b69a110eacf 304402200d4523ea74ca6329c30405391360dd20cb72df3c2320991edb56c8f17e03e9a10220570ef130ac3b83da3998509e6df9dc09fdf43ac77560060230ac5c7f2aa4b0f6 304402206ab113ea94122aa917fd2250dbc48ddccf34e85d4674bd3ec1a0f4540dfb3b4f02203313b311edc24ea3562f16bbc120ec383a5a4a51a6459220a5043472a5b87012 3046022100dfa392b90a958bdd2b19661ee2431a967a95f6c12037942c57ed9d40fa1b767b0221008d488958d87c94498d31d0d16348e903abb8e4c7c4463abcce24671a5e88564d 30450220015a11352415d2f4035b625787b8febbf9f782f7fed0c8ff0f1a2f2870f85b4c022100f1dbad096b9cb450b3d5859696aded34605bda5908517a1a8dee150c10b2a1ee 3045022013147ffcdc93d41d3dc9ee03366c2934520c6f0d724ae583000177de3ba4c6aa022100c33046580c5b7e634a78428bfec3f205a141a178137491856ad4a800a5716cdf 3046022100d906ed0de795399f5ba43ff05fa89460bb6c8051f4d340e946b4492f172b597b02210092cfbdb78a1d319f3bab63d2125fb7935a0bc461f40d4d6148036e38d4b08d69 304402200b7167cc8aef107e172e6cc3431e235bb72a67c6cb046a7fc383efab8e65baac02200f44a8754ff3e851a208a464e4421c3fb09a415a31df4938be88d10787aa5d09 30440220549d369ef2b360e8bc41e2f10095e7d84c1969e2ca2c3ab0663f72895338f3f2022057be9b953400fb942cfe3f30369ae86760f9f9d2bd250cb79da7812789946f60 304402200ae781f9cec7facc7238d2f34e3b18011197e0234ea0efd2a82264a9cf1fb6c902204245ecec9169f04a0075d40e4ce406bd4f7dcb59ce558b6e53fc533194e2200c 30460221009530e9de03b2f10454c6a56936834b4614dc5ee57a00c7e600103b15c38946fc022100cf1158d3d307c1bbaaefdbe92439dbfd42cfb47d07bec5f54c3384ba1809ff5c 30460221008f2d182fab91e24cc1489e56dbf5e99f054fcf09b9d80c919c70b4e05a361a2902210099861227e95c160ab0252b4d95c2d38f305744d33b7604faa731c8d451b18ed4 3045022100cc9b1fed09564a1a417d15674ee8348c0345b62fad1bb516fd9871d56e8a423b02207ffd414e69d6d3260417d2b94368836a89a5dbbc644de1bc3edba3159103de23 304402207562681a8ce7d56341912bd48fcbbd672212f1998447a64dbee146330fab42ce022016eb78585338e4a4fba06590e42a6148cd10086f703f560964ed4e60d430fc71 3045022100abd01f429f8a5a0ce8d47319f6207c6e0f33658ffda1d80e9e3c3164a35cc710022034ee41c15b7152489e252e0d91b8e2f0771ed82f3608befc7d9f6f647885da2e 30460221008500f8269d5c7b8e20451a1c9bd344ee0569462eca50041de47fdcda3de20e020221009f0eb513c1497c32a6e028551232f7d8e1678cf7395ae082163332e237ec8b86 3044022066b528be87996bf62e64573ebafeab6bf8969a9a71f8ccef7587d9fd8f44c6970220269c752aeaf414fa6072bd5d869fb003a43b265035e84c71a46156da3bb4c92d 3045022100adfffde7370a5cd7c17397d3647be4bbb758561696db138e1b0dc5ff0e66316502202f6a24fbe55b0bba35669318fe6c72d44771753176f691cb27ba10a027e649f1 304502201e0369e4e4f02deacf40e4815afc04ef642cf4e77f91b038834744e0d1666705022100bfe16bd1ff667ef5668ecb7135e4e9053ea8b420ec0879f5fccc03bfa07198de 304602210095343923e5c72c5b99267f65ec6bb402bd511146549aa06dc42a9e8399093080022100b4b0fc0f0f6839e15c752431e25246423cbbdd7a1355de88dfdb94a1f86fbdbe 304502200f3b856e66fc00d122d901a23abb51d495bd30d9dd1d060c4fc78f4493542e4d022100d24ca9a15ef789f42edd7dc1214ad87f489dcec3e2a93a6f7c5c5b28e1f72221 30450220206cbe0e070c093250c3fcae325482d17cb0315203d59b4699d3799aab8be695022100f4e2d812c8d2aff165547f5ef80bb473728e2e6c395361cc11959670800460b9 3045022100b9339d8bef7f868d79300ef778c9f492c698a396ce3c0207fdd5b728fef9293d022025ed3026746ee04f4bcf17ef50bf25e0bb7e2695c1261e46186c62a58426018d 3045022100de018364d24b8e8009934d19567a92346fae941a307005e2784ac29f0550a490022008d8296b6af55cea518a4c4ed36721805fc71e329e1485bf61ea54a88830b80b 304502201da9a9042eff13294f5ced37d5e941f96c2941fd5405926d08cff41c175673f8022100e4735f883623519ed5aa53221a64df0755e81c788b66d01e2b764788c122e803 3046022100b839f2b74b204da3c0ba5142e212840676452078cd3a6cece41b67790a35aed6022100cc43fd84ba3414580a88ac03d710b3c11faf6148c8db78f2ff8ebcd8772e08db 3045022100e4c92f3c536a390549bb774fca5f326cddef7638128d87a95f39f409a350acd402202a46779770973363877f1c31dff1373526a229c973727f452434a7c8f7423e1f 3045022100b4a014f017ff51e6b3af817bca35b9243cff5a4eee3f91d3db82a4191373aa8502201efee8fea0c0a92ad56187b802b8a4b93b1eea14a4889ff93587d8e4b239eb81 30440220560ec5bf21ed49605cc3f44504ae382219adadc8553439f0db69f8a77eaff5410220759b19a86b82062433659b55ec130c534a0e1a90737e79dbf957da8eff94bfbf 3045022061d33cdb1c4c39d76de0c39ed62fdbe101badc9aa428800024b8ef5ef31536e5022100841d548329b1dc52b763440cf512f4585f837cc14e197b4e7aaaa3da5d789fee 304602210088bc0b345bcdc90b9d7fdd86b5d878c32ebbc1073bb548247029cf851afaabf5022100971a4521bb215a09294432b65416a2cd32daec7a00be99634c10838f600b1dd4 3046022100e5992f0aa92221ac9143da56a284ab985922a349ee5a90881c1c34271af11db3022100a2dc431dd23475444cabcd582cb813aacc8cc054b8d68a084493d3b24c09c6c7 3046022100e26567101c4996f0173c47c2870225d906fda3531848e2fdbe7cf399f1c8f2100221008692126bf9a0baf1f62e28c358c5fcc1fdda19a3f4e7a51adeffed1300fd0813 3045022100802cd1323a6a2b7942b29a7bc38d60952506439b8e54d0d02bc7f8a6fbe92a65022024b8b07c184f640ed9ccd130804dcb769ee68fb6df81bdcfd8fd4184b1e8703e 3045022100a0f01a417ef7b18efec30fc0fb968083939266f3d18813147e1c3098ca8e08e202204bc55adf8758bb1a297dc9fe1692c395389b475e5acb7e022d23b70f122ef331 3046022100970c2c5239739fbbaa0c308c27be9ebacc74048d2e90f72c3295744518fb2389022100bd5840da33d97840fe8d99b82f1f157839d84f0200d8832c8c9473ed716e22f9 30450220420d5c61977ba03b2946bcb5d8372d6ceb137d1b7b3855a53e46d9f0f9b23a6a022100fcd6598d388d023d07205fcf9a48f2e99449e1e3e96a6ca592e64d51abf9ec98 3045022100d7dae27e81b317d0c9de6230d182194407d715c12232f133a79457a43d11fdee022058e92d5d5862a4a036dbf68626661d4ce81f7efe5603158bde74df9d4a9f119f 304402200665289fe46fa4f193949439d1e8fa9b3afd347d348840d8ff18820a52dd4f4502204b25f8feb9e8dc3a4738e59052865deda4e0186e2405857137d44a92d654f0ea 3046022100cd21399f9ca3e9e818c6d2bbb13c39c7445c13a5c6283be523318928431aa3bb022100b860b573a129537fa854546ecd2120271d8615c8895086289b7b9a769ea74c60 3045022100dacfaed4968f89959df3eb3a7caea9eb66a5ef1ee357647a0c0f853c6558d48302204cf17e09233ccf70c98502ec4b3f549d168b4349a5b79eea6a09ccedb2348f48 3045022100b1ee21fd37a82ee11393fde3c0742bd5aa6aaf56cb799daec8eb95bc070630c10220592e6aee759e03860dd451dd6d1176bab6188ced149c8806d5f592f71d9ab50d 3046022100e351713f92bcb6f3284548dbd2a032790d3550ef15b70213ec4973551cda4867022100dbd863a803c270f155df86db2983ef0aa430023d9e260044df7a296c4191d801 304402206385232998115173b243f51df52e24717a61a4777d151c6f46a6f38bb17b834102204b381f7769ac7e75f10c8aa2761bb1bfa5916d28b4411330b1ddaabb47258c97 3045022100afdc8ed78251521a00c95be2d9bbe0e8cb6f55f95ce637e73915baee56ff670b02203fc5bad5dfc30936dc37272da5e91189729b78ec0a17ef6884b31e2ce3af3a5f 304502204d79430bcbe57c3b854b150c43cfea9bbb7f3a77ec855da3800c6d97735afc500221008985589f607ba97acb15d34ffe19bdb475a2183cc4b0dfb8b7b80221fdc53eee 30450220422a122fb12f4dc97d9facd2ae5ae6789b554c37bf6dde918f8873121e9a2f64022100d9ee396659337d67532fbcf633539d872c1c17aaf2f9a27917b5f869236a1483 30450221009e00a25ab9e213c8307f15466499a2d11fca168b6a9a7221d9871b098c197bfd02206a4101fb7c040e84f927445fa8c1471cfaa451befe4a8424e3b2ab9977128f82 3046022100cba49c808b64641e451ccca62f89c18d66e6fcab803c43a27a71499d26773dd6022100a1c6b738c2351bde4792a42b3eaf5f995320e19806e027ecebb629cf54e09a2f 3046022100b2948b5991545783a736a854f66de15e724c51d51d39537f4486e801863567810221009cd9b0bd9eb7158c2bc4fa699a20e4ae603269f4ce9f9e6ce95bb2323fbeb482 304502200d37a8e9a7338026ec90db6d58245bf7b2ced20f0760376231ef16f8520862cd022100fa7ffbd4a96d6976a21b76cd9bea7bdceaccd35c71225dbc667ae75c5e03627b 3045022100b30342fda0a4b6b6b2dd144dfcf0dc2483e8cbab4adff15ccc22d2d712ded9f802203e6284979f94b9eb89d78c40b8a142deaafa98f0fba655735a30af275456ddf1 304402201d98e2fee904127414883da21672a334a159e4b467e454cf0a72abb8f8689b4702202adc977533078286aca816021f5096ce22920204a5d45317c52a5dfa554ddd00 304402202395f0bc676fe0cfe0a8f03419d5102bc7e188c44eda75438d30e38bc426f3090220506972642a5eaef816369655decebb1d6090fdef088dba7d01261d1c85261087 30450221009a5d2e937eb8a5ad217a953a2dd214439eeb7e5e91438e46e23de35e6a0acd1802200b547e5c58b5d9d24d799ddc9007b98a8a24a38463dde78c1274390e83ec150b 304502201e389a49933b4c8dfa8ef3a959f467429c6ae6e030ddb8c046bf9a3e0f24454e022100c0d3f9377a04aebea4192071d85823d600008b1099b63b1884456002febb2a13 30440220735fac5c76953a5190349f55cc95f045cf707afd778bc630f1a23d5acf95d6af0220079828d854a4e098da50d9405e0f5bc083d383ee51befc3d3968aaeea090dc96 30450221009d2ca8eebcdadf3bf21b30556e7c1e339fd60864d89d20965f74d8a9f697f583022056fb2924ab4fb0b23663ee764a26e1cc46e7402da6ab45cfd7f37295cf99610b 3044022005ec22a3a8552868973015f03c3dd8e591eedbfde1b249773a7c786b743b44d40220798290c9a218c7ee09d3b9588c625ba13a14c7421cecba97c7e25682ccda05a2 30460221008837beb1219dddc924a08ce91903148a3292b7d9cdcb5d2917d621a7c390d0f1022100c72a67097ff703cff2caf49d1e354cba32757fd723db2544da03ea21fe4b8cc4 304402207fb361d9e8c1d7d68e7555ace6c5cd17d187e1e6b32d8330f5e228f6bb7054ae0220303c158350c4174d8f8ca8409fc66f19c3d78aa9d8df7c158b4c8dd57fde1b46 304402202dfe33dd1da3c06a767a9e2ce740db1908b7d4ac61fb37b60b6652e51279bd13022040a963848fb5f734f31dd75b7f63986fb2e672de39c97e6728446f08173bdc27 3045022045d52ffaa9494071b1a47e52aab76e46a2b968eb853ad3fea605acf055a5a8d10221009c832d472917e9c8519c4305f76d95c7f0c51949fe7abccf7385229e98aaca2e 3045022002971cce0849055b398f2a3d9d8e937bb984b52cfa939124e63fd018fde269fb022100dd346258a8f570bee597538f011a487f0100170eb1bf4690fb958ec1bc682059 30450221008611613b3b1ae4172eac7e1443a2d50297d35c2e28d0f3f26ebe1c4b4edf776b022015743696c8a07d30975816a838dcb7d8ed82f38fa8a6c4c5b8e2df4c639954ac 304402207c0ef173658c34cf6973bb42032a534c55541f1a72a6d27b4b3cc2ee4465d1f002200c28e996b031df4ace9deb2010d0c6de20b9c47e0c94065dc246a75a99d9d0b4 304502204476eb83f2a12985ccd96e008944fd86d121d250f427aa18000222c2d7c56d86022100bb07a88da732dc8e580eaeed0c417156ca2b757a306ea357f3b427824303348d 3045022100db27cb11b82cded869d129c3a2a0f18e3e659025ca944ede257010e4ed014e36022038242569ec4db70243806141916b4ff11e61ff96e8bba93a4a2adeeb87155c9a 304502202d38e9f215693e477d556cea2698e0237109a2301239757bfa3ce5128d800253022100a02cc76b8cc06904f6b8dfae000bd0bdd609e4d61b0b502ad59aad08ac296de9 3045022100a26b7900cb5352117edb5d4f00b3997e9c4a94b050d999dfd2c66c460eb8d43d02207db4605978314ce1e10565795009d46436f89e8424f878935431934be6ffcf70 3046022100b1edc4651f18580b7bc6a239fe682b5dcb18b82e66f8e8e6b935f89747ead8bc022100f4490828eed2a3535f444670be13f55ff41ed4f6dd93a413247670f50809ecf3 3046022100d36b7c7e2914912e83220e1a15ab4cf795ef5a9f38e9b93dbf12b534ff5781c20221009b34217f89ff5578ee4c26f80d3c9055e60b40789dd3e75f6004425f1794bc82 304502204b052185bdda806e17c3f097f725cd3b1108c71b88a8e5b1c37a23f129894b6d022100ebf966c9ba92206cf1d93db2a12eb7eabd66ced1ad154a535650bcff22e6c06b 3045022100d5132efc3b674b63953966a450152522aa8cbd1dcec939d49a3e105cd399cabe0220051975bbbd673d532f630c3828972f6cabd6b07308b1c00378ebc24af3d0cacf 304502206a96f9ccd7c51fea250517f22b2bb45577918de78bfc0c50238580de329f0e4a022100ce833e467fb4c30f214980ba472de10f6268dbfa5f8d9013aaf4ed59b713317e 30450220671d0bbe41d2a977b4e37c52e32e3a02446214e854efaeb9839b3ffd9ad101fe022100bef5e865d40d974212bcefaaabbb40a8ba6a49fe924a1266fb928ed46591576a 304602210099a3517309fab1e67ff06e6b1337f1ac79540fc67f65c89d709ff4d579bea9c2022100a17631b0ac08ea89c47a1ed01efc6b313c1f3cd1be5759f3854769a880fc267d 3046022100ade4956538852e703423fa62c9f79a207281f071d0bc7dd4a5db39f719c5ced2022100f710205ce7995e500635452dc2e7c00becbfae885f416902e30728646942dab8 3045022006f1fb2aabb67c19fc007fd4adac2e304e644dc89ffe76c849f568eede65359d022100cd02d0862d0aeef3923dc66facacec59b604d183b03c3e7e8cfcbadde27a7dd1 304402207be9cc6c415f240903742c70f3b824b94d2f01c6d58374b4d084da9d74aeed12022040d765ac9c77024ceaf9e7ef3f3bfeb0afe7f5a505e154ac158e8b9025df870a 3045022100905a839d0794e21b1ebb900c55b3a5ab4286774941a92e0c2cff5064749e62550220565f64f89371bf5a2703991b8be10fd968c790399b76bb2d544d61446271f6e4 3045022100c6b7c6699cd95dc220917137dfbbe7a34b6d1d14d599b4e9197a626321116045022018541b411301335cce790753d3e364153c280603005f51178e08d6ed881981e2 304402202cd9f92dad55809d6509865750376599d44c188cae1f7f079763b5f29bfc762802203caddb607cbd1f61132a6fe3899b74582c3ed254133bcd71ff565ab0055b70b5 304502201ee17517a38993f8c4a37f76d221d6e4ee020af96b3f0c64f032d3cdfcc1d754022100b0ba3056968d7fac07bfe282e388658970d1279615b0f775e842e7d10fb7a787 3046022100f0665161ed7c6911272ce8608da255539936f4e6b1c83b4ee15a89143cfa2049022100aaa59a5ed797525db78683246ce4a0881b32b269a9dde5464b0da5d01836d324 3045022100ac7b5c7301d29aeb4fea8612a2fb59303e0094aa885ee03a4222af9c1dfd06bc022041d7053299f13b18fdab2fea28595b2d8d184071098961d05732aa47764fe137 304402207ff1985e94041f9b3a3168a1f050d403a2a40a3b6454eb83fd93c1924dc59def02205053f4ebb42151a5bf13cbe03cdad63741277416048348e08fc2f1e3d707bd42 3045022100b48c3630e4845b0db5adbef7ed8373e0ebbebc69c9f3aea7cfbda70b936d909a02206db75ca3821e6a75e41a9a458bbf5fe1fe1199063062aa9d1b59438822d254d2 3045022073626d75386d42c4802935d9dad180a0ed8d64303963d59c47661dbe953158c00221009b9091395a692edfc325eb7df94ab8b635bfd27da6853dc92c318e3f4ee7eb7c 304402207934b6cb1bda443680b2a518c96d690c4a27ba27e31e13b1781a711579bbb57602203dec94cb64d9a754ec134853d070892198da68db8804ae3291786b33a28c98c4 304502205eb759a2e2b76d7902a0d6464a99cc1da9ce8eebac337394e05db5035d91d579022100cfe4932e1382ac3522b5be251dd6ab16cf4d677b660bd7bdb22c90bad94b7a03 3045022100fe116f8678360be9b66fcddaa84435910f5a3c847af4ad5563b065b2cb0f069702202c176dbfd477773b6a94de9d938fe4c21e0215272d04e00521c1aa20c8c2f32e 3045022100b7752f3bb9517792ea7301c7ec9bb5625e986f9fc28bc5c7bc5ab64a600263ad0220569594a87689415083f7b67c141cb12791a9c8f97d1b237f0e3f9af92d41348a 3045022100fac12279c3feb01a0d65fa6ff9bb25a459d0eac23a55c7d440f3531f965bb02c02200505c950478d91b4d8c43be07c8d0d101ead74e0815e2fc1de7d2fcc574f9d08 3045022100f6481130e72d8335854ea2bcc4da4d4f0d9aaa32064f369b2ca83312d3c105eb0220296dbc2ff8ded3913cdf7be7283b8f5fb9eb414f24d978efad5767b5ba3aef21 3045022100e8b91c76d35127cbd9ea85fdb64fdb8e40646171fc53a9857a8a535b338b461202206542e707dbff83cdf0927bfea3c005330f0699458649fe3db05035e9cc1109ff 3045022100cffba95de0482dc5ec90ccb6b296b2f780466a465674af11616a0c586e9a130f022032878efd06ca7158870552e7f96474437914f4b989daf309f9b453ad3f63fcda 30450220772eb8a75914607efadc930a7479bbc5d70e0ed3bfe549bb6d70628153bef072022100dc5ecc7b4fd6b22d74e85771357e4a14fe86838f66f730df7a0476759cf3388e 3044022003d37fd87641e2b6a7e4dd9ecb58b1dc8892a30cf20e7242849cf104ef481b2902201cfa987643c3d14a90dab26b12b3896f5927fe1214f8955805366cb487d3cb94 3045022100d42dbaeb9294115c8d18cc41a39db5fcf4e145d916dd04b323107c76a671b40e022065b8fa6890dd405fc52e4ea42f90cee79d040d54a26155c87c79daea95d9f13f 3046022100d2ab753479656c644fd4d97dfdd8fe73bf1512696ae45eaa7666459eb1716bce02210083ef48b8dbd739e3e37f12fa8d751f6445ed0a87c96665534d1e112a854e3e76 304502207ebe1f094570b6a9bbd54496b357309c81adfe9e7b50c84c4ae9fd016e22f06f0221008de8b3701fc82408fbb11cfb3273db7670bf3d4a901189bac21a56d60b683114 3046022100e641c14c6ad4a8f33a2e06bc721234b6d04cd7e0022a52d688cb5bd5f70e3bed022100b6dce2f7b74d06bbc5f03cb5d4c6eb21d5da32d5d3b4f1ddb7013c13611a9843 304502203fd0aeb384166da7c6ae2779569fb88f9226282e6e74de1d7c1a73d21ac2768b022100bfa034601b22b537e32d8ecec0a6606d6f3d9c29a71e844d5d08b45ac11ff107 3044022011fd45f0e2dca30dc1a04bd55a92b8291fcf96c77e31737ac3658f794ad783eb022065603a093b33c4eeaaa465dfa7ee8ea906e177df2640907e5f53001cceade958 3045022100ab91fc5cc479c908798480283cc77e1e0d87e2cb5fc86b581b9de0759c7447fe02206f6068fdd4456d1b33dbe622b3288a96d0607eafb8441e50111beb6500c95081 3046022100960438d10b24b0a69aaf7c7d5671e3b8fc1d71158489c3b8f012efd07a475c0e0221008ffac041dd25a6637fa48379dec7320558320a1c5206be433fb8995a186180ba 3046022100af45d0fcdd5c858a90c6fde2bf6b1c338d63343b2bcb0603d39f3d5e15fdd1fb022100acd5fa413f227e38c7dff579a74af80ff7793a3c8d836dc68cc824427294e79d 3045022027cf78f828ee41a70dd286783cdeeb6a611a03e698d59d5c0beb4134cddc00da022100bd7fd6f94ba1952ddc635191f209c419dbb8cc2feedd191edb0d55007ed7deed 3045022100a054877a39dd16d31f69d02ad9689e0cbc5ab302e05633851988b1babf0104c30220689614746104b7322593d51475d1f3437ac1f7788b0d88e2d31b991844df9025 304402205bcb76816f0c2eea3d78c560dc3b86f51c621dda39cf846d0ad56acdb4ea892502201cfb33ba24a2c45de2c921f1430121ec7658a0aeeb8d48621dc04e4ab1e987e6 3044022025d13c123fedd7b13e8d639c4b10fdad90dd369a2047de81fd65e7a557094dde0220137835d27d4fd0f00c69175605b4a49ccf72cd7bf5296aaca810cd367c6bb833 304402204161319992543ee316e5c9b8b7ef9e877a7a807c74aadd297fdb2eac12fb30d602206d3f93db2e7fb5836b9d666abf6cdb2eb7a83b0c8a0be48cad79676fc2cfe879 3045022051bf0b03a9068ab59b7516d7a2d5a2065e0dfb823ce21063e7ecc56d085babe9022100bced6f33170944385e03317ba5b0450480a8af28f0130e1223253146ffcbe21a 3045022100809a64cbb53154218a44f0df1a54fdabfae4c5d733463778ee55f6a9e0da23c2022038716c0d9b41bdd1c1320878f3ac15b412ed05c6a66c44d7d44c4b441395d548 304402207d9c5ae885b9f54b08cff72d8ef254d2de3dcc9bc2918dab97b59e45ca83be2e022053324448bdb5a669e1b5a6f5a42bc4cbe1b677505407090546e137500b489d66 3045022100fa3c41173c3544f2de708e00b2d722ef591ae8a3744f88795ab6a9af9f5a9ef70220024d321fc5cac2725bf1f4a973bb16c9f5fa550d1b41acc831e4973da2841ead 304402206a766254406e23fa47a531c087ed617fd1c679cbafed207cd0fbb48eae9f35bc02206a65329d26409680b07a8aa0991a0e2159bc4b6b2ba01bc9936c92595652589d 30450221008720b228fc626d30c1f4310df8ed881b2120408be212f5ee99d0cf9972be651902201df8ee1fef45a2789b7a268272a7ce31a578bebe2b7b2466c658fa87877a8302 304502204e3041e5131b49d2e1367cf6b2161b891ba93a18b5d5072ef4aeec278ce4be660221009d954c98c829cc9c8660ac12ef7a2b3ac81d23d90d9799c89d4f570c968569a2 3044022068c7b8d0e418164ea99079ebc4eff2b83547efcbcd57aa284cad03c3b24c5b2d0220248cfca8dd273a4e434bd0afa78824970f832afd1a56aa43ea30b7c9ed60937e 3046022100baf811c611809bcadd088a96ba68941757c5e6c7ee7e072d868ba71c4b76f665022100cab2ce062521d846eae5490b5ed7da3f0a7b2b8e0f2852a7b62f18587f9d201f 3045022100e4a3e01de1f744d78203af2ebdc25d8f045e06bdf54f2138871f5fb53d9a6425022060f34c9c3bb3111e9e70d0f96519386d555f1679f3010579610e4064f000635c 3045022100a6e5e833f0af1c1962ef3c9cce794a0ed8f373b53c530e8821cf2c3fe9d7004702202c1361c972f1c784ab141d64a97260a42f38c394801a0b7369a81ab7b3f045ce 3045022100985b24b575dc0426324675ea9a87e1864cea92e04fc184d2a0f5a862434214d5022033c4529c03dc48cbff0461e32a170617169ba771af68a5fd251007a8ca38bcae 3046022100f34baf206358649addc2b64d1fd0ab2e6d2732d729086f82e6b07d7d4f476c240221008f9e6e9e99f216ecc1ff78c599c5547d12c961bd578453a9b04d6e4c39915b4a 3046022100926e8db18de67ae43448091af1bf7570acb830a238ecf29d72aa366494c603ce022100f6156592f489d667d0fa2eb84b64b949fe99ba884a2311809818f53b2fc1e48e 30450220660c970b0772fe956847f8fe117d7d41a39a15377c92778081119bac7471b357022100b6aae0cba361fa28c4f5b9881371400f9053acbd6e6bb59c61851b5dc0efc79b 30440220029e8db6bad7b0d6df9528a3cb96239f9c0ce79b97ae0ad4f017ecfddb0c45dc022050cc3333f5d17fb346efa3b86b20056da6fb5d1c74f3cfffc6c1097512b6451c 304502203755914912cb6e3b3c924676a850e323564e6163f4fc1ede15ec63333d48e85f022100c79f90d8e22a4960d85cffe8c42764180cc00c3a14fc6e4df33b8c06fe46621e 3046022100d58d1a34d5ecd8461866494d199b94f54cf4d0482289d85ee49b1a84815849d20221008aa5215636985e0a321d3d44d04e5846e3a5889d52cc40dafc21c34f91becbef 3046022100920aa90e841f4565ceb69f2f43955278699237cde75304f596033a5c8300793d022100c4a4d6c5d235d60001a265422e141a3281c0f33259e7f5a28211f728c7cd91d6 30460221008005a083a49a97214942e901b3e571f20acd01523257e230c42b95b1d371a6f7022100d9f2d72980a3f5f26cac78fe3b8bc70aaef7d695a17b0f973db98a31c3afe9b1 3046022100ec81536a84132da952d9d8840bd33704020b0b6414714af35e960080f4885235022100ce845d2f4439173687f6ab1559bcb66e4670af00b21be9ae5fa24c0ac370ad6e 3046022100fb29fec320a68ebd31c25006a3eea6c1b81524451d432fbd001a4a5be0e3c4bc022100d5ef7e7e28fd206534b71354c37e9630b48a1fc219249ba26ec365d4d5283f6a 30450220012adfea619a33aa0e935527a65b0dda4fcb00a3054cc5748f188cf818754605022100d04fcab5957b8f14dcc0160dc89c070c6695174804ee834127cb90d95495a4d8 304502210099171f0a55e86a0df808d60a6f353759a02febd7d5d6cbbe5fdeeda9c936fa4202206acb84637be939759699ad11e38cdb20fa664eb8e13875b714d06e015b90a13d 3045022076bb053685824aa595cc3744c11eeef7d34cf77b15d46811ee3cdf3eb4ffe3c70221008ebedeeec2bb11559b1f2045ef093d749ea884665ce4d5c90fbcd5dbffaf40af 3045022100836826cd92fcf59a1295305558a92733c84b6815ac15ae917375b00f4f2a03b80220426773014619679e7282936a8881bf042fabc13aab8173add258d75f72b77d93 3046022100dc90642d465112d61b0114dbdc492e966a8c791ca1bae53be234f38cce88051a02210099f9148080ced09cc2e48e45f026b910da88b5e723bdcc1a485641b97bc067c8 304402203c78ac2d9f934a9cf5db1bab51ec4fdd350b8b8cfbef8ecd06f3aadd0d174c46022063cb94b4bf2f3c41863e7a5d18363006680651b652861a2ceb78bf23e7964e97 30450221008184cfa28846f47aedb29d43c4a74df00228433582de4e2e59bbac3d2c4b50f502206af5b31a70fee5bf65224b2b227d8ae44392695c659a0706507750d135cc9005 3046022100ae3cd8aa966095166e2bd2d9ab3a20ef75e94d24f47969a26d2aef54181ab156022100c3dd4a5a050f269e03d9d7373414f82c888d1355660f98a1304f0f1fe7c3588a 30450220094a4ca4749c102fbd47985ea28aadeb71b11b6bdc001b22b4e3d8da354d3af50221009d101756f165bf6b6f7e1002208191019f41fde45c2e70c05515432a5cefd4a7 3046022100ef62c049f3c981fe0c15599047ea84a399c8b2c6acd03f2ed42f1b5a11ccef04022100cf007d7f44eb725146b61bf5be3f3e5baf349f56836cd84f59ffc79066bdf386 30450221009ca0d2b7590987a08a82a4911f732850fa9c76fb32f1805c1e892fcc1a50aedc022059830e3aed5b9ec6dc9a87e349fdbb4ef583eb7ce26d9d9e2029d1c4f57d383f 30450220454853ba1cf82b6ccef91ba0ae874c4678132b85d126b2d05491587047c12244022100dad198c69b957ba17aa56dc62bdb051382903169454043438c143a02e48c6bc2 3045022074a3595847bd83db085505ef87f3de32e68c05cca116f4ea36c6614a3928a0b2022100d67f64244581faaefbddc9868e85e5218c8c09437a858a9cdda5f5572fcf4175 304402204103651a4d5f405a628411ee5c8d8086ebc78e740ee58bbfd8e8605f30ded81b022026c49a07fcd40e1f9ad12a2b5b2f843ad8d38ef12372442916931a40255fbb8c 3045022100e24d08f2c949fded83fa3855b4267820893b15d5830781589826b68077b092c002200507064c3701d1d92045e505e749d06d39b38d246e590a0dbf4be9bf322caf4c 30440220337a9f86618f9230b656faab3488fef50912c4c5d8f5580cf6e5e893c3c7c83402206a6adf1239577536c367a3a65fe30da7850eb877d8bf5512d0e10c5ff28886d4 3046022100c6ee5e96f5692fc978c1045d11fb1a902ab8572d528149c11164e9de9a7238fe022100f0deb199c3a1e53c03bebdafc89fb87d837cc3b7a5909ae8bc8fa048de3d027d 3044022032a59472291c033f9be625a9bdde39f09d80765abb2f6a733ab764fe7852bc4c02206a03c8aee7707d2ba59b44626fbfd2aae23344dec82af482d18458725df8f8b3 3046022100dd4d3aa6595edea163150d06de77aeb2a494b5a14aa049422ca8e7a92a50a44e0221009555cb7f5610d2b6e16d76991ac0a0aaafb51a1f37338d5e73f20eb99b1ae840 3046022100bd03a2db189b300398523f638949871ced7e195f4af8978b8829582943945e88022100ed9162416c9b55aa1295077ee71fda93a691505cdca0508d37c029a1431104c2 3046022100e26d8b56252db132694357c8d5613a8f00e1c1e87274f8b21326fca074576ea3022100d41d1d8378cd8f8ed05b9d6092c47077bb0b8b587ed529b6b78395a90e961769 304502200096d3b45e6c8abf5fabe67229ace9516c9ace8707a8791bfff097a9d0833400022100bfe5fc74b2646bd977078636688fd9a475c838d29446a838eb15a147ed9d1ff8 3044022050654cd94921004d75b06474e4aa5ed0e8266c53c2fdff3aa1cda1860ba61f1402205d72e41230a2cf06f9e3a04ce5b1e6814af79d0a3fcbfe8d40afba480a427a67 3045022073eba1ce995f83ca633b3c91f14e26f9d96c336079e3a85158d4dc64b38396c6022100cf32b9c984b50e8878fc70f1460bfa7e3a9f47fc22269cf8c992e544de2f5a18 3045022011d55ee001c911f9be1bed60fb4c074f789777663bdd7e33bbeb78a3465ea6aa022100b4b8ec0941d938e1808b13846f5fe11b22f797a72725bfbbd28cbb36068f7276 304502204d244ccf16fb0180227f3fe3a1c709255b6b5ee9867c5d361fb8d67498f5e7e9022100808ca7779667644216105e98fc299b35ce48ee212f785088a45188d0de50d990 3046022100c36536d80a9674659dbced3e5aba0a92bc1d457dd720624b4cc02afc3a69bd3a0221009568efacbec158e7c24fbee17409d04b99f060168c3199b8ce21979d82f0e615 3046022100fb41c7e0d558aacb62c4bd468009c3cf9bc158a9cc87ba399adb7cbecf4db243022100f28b1031f038a8ca70cb514cad212592c950ded5480b60ce96da22ad36e63c3e 304502205eef3ab0595cddeaaa507719075c29b9a376ffaff3d4e538f8cb936903bb76ef022100b436eafb5669e46af6c25ade96041385f24cfca460d8d4b2dd7d12254ee9903e 30460221008aa580b05e710bbd89ffde7728a9d3cf281102d7d50d6d338b3c4871de099bdf0221009efe64f7d8bbbf9d613cfd687b872e519df24ae97b0205127d4409d2b5b2c1df 3045022100c83030b903c32b23948cc1208c67aca9a8ef1817e98e1ec77fad4b3f44a26ce802207fe36e676269450316d77fd97972581e897fd9ed9f5f8008677474f96ddc4928 3046022100c5951bc3322bfceff980a2ba87b6b73d77eed724300b7b2f0755df264d2d0ace022100f44b5a33742114c6f39ff9ac813792286e4dd22c270f08235ec4213344c18230 304502200f8657c7ca6bc54d2b5b0f67bdab86d070386a7007d3c1f2cdbec967e8bd89e8022100a7308b93bd408348776883b49047b0d47e151dc27df13042ec2ec35a9966b0ee 304402201060fe9dbae6f466c80e5d8aba596c395f0c5fe30f3f63625e32fb08a911c08c02206a3c12a3285ed2742f4294c36494e06e8dd4f55466a137d9ba163e7618c9c585 3045022100865a7a40b29f40e11537dc0e24fb842b779a1465afdff199797b6b952000460702205246f3d2eaa888e903ea44ecdf11f934329e79040d6224b0d931fe1a17831b65 3044022054108d59bd64fe5a516d45fd046b77805771105ab5e2dc7c760057934ab691d30220333a257463a4a5dacc7498982c6203cdb8a173561c60958d132252bf68df1f65 3045022100850046ad8ef0b455c79680e7526dcf560eefafcb53891f228a99a90a6b619edd022077610663bc27c673e59f6833b10c53e9b43be483d846331a7267e29c1176e771 3045022043d83ca61cffbb75be5cadc96741176a4d3d811a5c184a90f5b8422e0fd3183d022100b61fcbb98fbd76e2bac2290719a4b4dc9a412e1a477c8fc54d68d8a8642335b0 304502207ae76650fd9185e4f1801cc5d317f4f7d132058a0881a645da01c158243bf555022100f26069bc2075ece59be16f01225c4bead58e2dc0ebf756970ee1fd0f334e20d8 30450220422e4e4466b2dd84293b0d11bfc93fbc10d2f3411b4a33c96b2e684f7dca87ec022100e68cefdc9490e3edfe06b0547b0fc29d1fea90501e3007b21daea8986f8193cb 304502206521b3043537e72c03d402cdba3a78d998ca35a85035d38418cce11e97f638df022100f31bfb929224f15777ddb6d3b9e59ae27762f48e49879148151534002fb337ed 3046022100e9b90fb4f3df0cf9fa6febead30b19bc93d8a87883f31ff7c152ba7173b5224a022100caf2033262deedfaf7b9d071b8afd18bee7c328f6ad4a94185ce6148e9fc143c 3045022100f6a3d32608c2154f14df30a34000c12c5c5bc80bb469a35b7fc05504518a3801022063390895c0833046a5bd9d244cc5769589f99e73f455327621311483ef5c27be 3046022100f82741b1f7f5e090804e2277c6b778acd953b2e527fa6e4a98fcac82082ba81f022100abd785e42dfd7f5e136d111a15f8c04dcd2040f93ea4f90bb3fe140da96f3e9d 3045022100a46010b3c1ef7e6b8571b009a8d4839190d337876a3f757a7c6448efbd5b9d4a02203efab558a9cad33163893ebe6e52f774ac84265ad38faf83efdcb2468ba7f2cf 304402207230203a85b775e25bc7f176023d546c6ca81ba7e9e4171c717095c6e70810c4022025359b0e1fd702b6dbaef5894f70babdef422b6cd7669a242eb2a1c573ee81d7 3045022064c5bb3d5256d16bdaf053148846bcb11a867ca3f832320f48a5a455a91fa94b0221009b0e8b0cca3821ad32ffe4d2c3b785676858e92c96e37bb09d7f8ef3ca49faf5 30450221008b153b12c70a096ee4092a5af72ad2216a4efe620b7cb4c2f2d20b2e8e2adb8802204ce8631823c5d04dae2e5df2d4d42547222414962ac2e3f5fd3f4d68038fd868 3045022008fe735579eec67c9233b53cae7483cfa9763afb048b2bc19fabdfa6b1b887690221009793093a42b178e6dc05d486ddbc38ccc5644083e2a499f4517f09b54c3d0b05 30450220488d8dc6684356415765f6abaf88f13dd73aaab7fdb84f9ebf91e2834ea44bc8022100bdb3db032045ea697db920aad1e1a920b8a7533a1f01ac7391472e4db3f8fade 3046022100824052563d435fe6200cf112702d5adb0992327b5817d9ba3e9487d8f009fb6c022100e0fd404566b1873d75363b70a77bf3902ad8f8069423394eaf0dd538bfb75e24 3045022100fd0acbdbbee67fe6bd0b57df3491c2dd7553fcd643966687bd4b1094d033c73602202be9d2cc8825c3b2c47230fcfc2439a5477f971d9301d08bb9119e245fb5d8e7 304402200fc4b15c94b56fc8013c201a728b69aedf59628120c8d8074f8c043fb5f79d2502205368923624597a8a6aa249b7d1c9342840500849660eaf0e25434e089e76c4be 30450221009525de99e82dc7782cd8499f077f8d7fa003c568432270a42b753411bd77dc7002201d23365f878d0deaef9a22d37a3ad91cc2095559111c484ebc4ca4e68ed6e59a 3046022100a2222bc017890873e865dcd3fa2e9f45e2509c59fce3d17620b8b9f27667f1cd022100905a94c098958da7f820f4d2449e5d3dc3a4b096d750816da28e49a68d9a4d79 3045022100dfadf526df433813a060ef5265c213bade1468f8c5539cff845bb2849ccc51670220583cb4f90ce2415b3b6e62f4641b1a75a65089079f78fe7d04de499ae950eadd 304402206c6d7f7c8379916009dfdfb8639275ca8d35edcfe71cfcf0bad7c74a5e633488022019372c4c7f09d4b430755594e20eace2fec97aa69c6ef1332df791cdedfce128 304402207f46199b4c04ac2481a2ce7705c3e27dbdee9861f51e5029264f7ed7fd9c729002205eecc47bc3f8296ceb09707b4a0b1dab4cb4d1f82cf89679c196f767b2ede03d 3045022100a55818cace9ef09004fccd653fd3f89995a55f94c1bab9f113dcd12f3aa420f802200d2c014fc59dcd2703800951f974917db0f02be4d38ab6384dae4f6f53f8c41f 30450220317975d8504238c3b674198102fc4cd07010453ba3c6275e20bcb65350d340de022100d48ccd7061219697928e8d467041e63bdc9e96c06ec9a3f9adfc4b5bd12d8f1e 3046022100c0169020e0e72d14c876a3c4c6b88cdfb5cf591f8103f3d84d4b6638623624c0022100e9862ae69842127717f6a7a1e5978b642c8e5bb93b2db65007b00f2f7a6cfedc 3046022100956b4cfea7b6ee6edde38cd34c8dc28322a34dcf570f7b19dd88b19d89092580022100b52729e0297d6588319eb878366c4940b0ca4b2e862047cb51fee98abb58eaca 30440220327fcdf82fa838a5aa3b5bd47e285a8f94a692fd7b30bf15ab5940e723d95a7702201d4c59a407c2b18fda0d594c2d5e0b7fc87b25484fa801f7672cb54525b3e586 3046022100cc112254ffef8035574f84e89e808a28aa246cb0b3c7237dde40648b2a964e75022100d139c2e0b9d32d40a00c62b9915afb08b4c0c63460bc41292d1b913d2ac5d71e 3044022069267010b54732157bc78787f5a8527f5ef8c4f988531425ece59d3821c9d85d022070cbcce7770e578a23cc34c2d55fe955336c70df2187367188a1626c1d6285da 3046022100816c60ef867d4b2779d71f3a38b180b71d2cd0877380faadb174477d250ceccc0221009287bf882ef537004813de1de3f554395edef09afe8eebbe5e0265a9077e4c2c 3045022055cc1a75969e148959b7bf2420ca00112ec6c47031f059a83ed037bac183ca69022100c462938b40ba6e4c74ca1eef9a9bb0733ee3aa999c91e766add08e5914d202bb 3044021f53827a57c163fcd4855ffac738d99dddf6fa13070a724af8ca7ce83e6260ef022100c690e1dfeb99d678a692455d8ac98d116aa1da7616db7d702933efda7a53c1c4 3046022100d86b35dc71de987964bc0190e008d012dcf686ac466e46e0326ef02e471455b8022100a630b196e98b2992f3b9248051349193d31ddb8b0bdcd6b999e8edde5892ad33 3046022100e06390eb01684effc9f2ff780a0e1ef40e8f657f10ba0a951bde4ed9889c8582022100d30c6fc63466ee3ad8086d7d613fb510461a5cd3ec85f488235611ef3e4d56e2 304402200a0bd13ea809fc066c31b43f433cba547776f3df2b5df26cbdac98fdbc1241a40220516c50114a15c1e3bfaa5ee10c7fecd500c10dedced639756ea9a4b65ec7fafb 3045022100b9edf1d3f28c77ee1983968ff51bcd00cd2dcaf3589b3956f5f0e704293b6d7202203b816af618992a1277e62c346d4cfac8c5de99365aae6b667039ffb514040823 304502205bf03cb416f773e8b6637dc9f10517aa62298f21e1530f1e0c24c7b74fe3fffb022100eb5ef08c5b08f1411b93304b3401125c999c6bf2bcd926592e73745a76eb7973 3044022056c241a456af8f4f3e349b05b815d68ef125914a48d7783351229f6a01fcdfcf022012bdae973542611268fc794512c7573efb898ab11786396aec278b065922c9c4 3045022100c23166154db98fc8a285a323f158cfc87052f823197446c2f261f472796fea3802201e12c4d7849dedfdfa1a295c178ee357edda2c2f3b25c7a1d308be07eecbc80b 3046022100941668369a4b92b73fea2c4cd3d3acb083c5daf12ac5ffb87057d3369cbbfe57022100a8262b3ab49865de4bb18d349ff309d5dae32d87ec59c1f685b87db174d9936c 30440220398c1e41eae2396badc5986d76175d8c82bdc01139db0c04ab35e550be136802022072eb041f12bc018932e66055171643ba6cf6631f86be45b7461c40298429c267 3045022100bce52564b2e266330c1341574c7e7add9766e2d7a6baf1d72753cb5b44e69c0b022018cee0570f1b1dc6ede0c85c2a7dc5650e7fddfec59a3aafc18f1673281ce0f4 3045022041c0a654f67cb5ba1de5d2041a85d56a6bacee4f4ea65e60fb7242de3bb100bb0221008e11ce96b3769c1b2e63e1e71d225df80248751acbcf86479abfb9d7e4cfa53d 3045022100e1febc116905a3b0a6aee65168bb17fa554fb4d967a79d512cd197b4637521e602203dc95c739baf4a97bb5a7942119aae8ea02e4d8a3e3f10271a6e30f210f7cac5 3045022100ba0e48b1c7e9b1642d86ed4c606378dfe6f179edafd7408688efdea555e96d9502201049c6b301ffb5c0dfa03345402cfd56641d1480f71fb4e8cd06ba680a1fcccf 30460221008fe2878d9ac9b7791884c3d4620dbcc7238252864b57ea25e8b596fefa288f0b0221009661fee4b54276b0537a8ead22165942e49705e96ec830bf67235c6da002b1b8 304402206b076aff6a5704a85c5bbcd77e64d1afd95e34468ac25cedca2e2780b42b73f102202bc38675ab4e5805854f953f36b6b48e03ab8af5c3515d648d4a3e43031d649e 3044022032ce60dfd781762f650c2dbc1636762324115cf2ad5154d914a84dd114e7238c022022ae2e0382f5237b3a28438bad7d3f093d789225af7cf86f306f8d891ebd7f73 304402202da13437de8dde33c348cdfad235988dcbd304ab45de96ed8f6da084d7e708e602205bbd857f31d072e9ac0d137fc2ae42b7b18e310997d8953aaf04b9fc52fdbe88 3046022100eb4ce0aff31a68a9963953055c5cb56f29f7c2304949eb1a2cb23497fcdd3ce00221008cae3d74cb61801aa9e532d2a0372277efd924e0e570b93fdc1237a35bf100a9 3046022100f3304259dc754c2a6cf9ddf0db2c8b892390c5d0b6f0b3f438985c670376028d022100c8d4c6d28a434ead9ac1f65d85f9b7858da02010e3296df7e80e11b1a41b4ef9 304402200b26a94e81dc9934b6a2a28b8e17e555b176c1989fe03da2262b85526bac9abb022013812db387d8860e909506f12512d232820f418438186e125b4b13dd160d20d0 304502202516b1ba8630844dd3ba8e15ac4d8ba50549b2f4ae4663444f50015503f9cb460221008f140b2e118e9e97a4c845b50b1e45eaf014fa6f7f0eb264cd791e8a997bdaba 3045022100c416e21047dc5d92e8513baf6df0384bc9928ed29023ecee8c702f98b6ccf58a02200298416b869ad33bd1e72d0a46e775cae025f72eb8a6cc6e964912cba37c57af 30440220323b4660ae80c22d913c48ffb6d99f1324bb3d07911dcc1560b31b86424114be0220577b40a0b6f890fed92f1033bf79b30ae75aa8b8f50bf46736752b0a22d7d2bd 3046022100973997b111636c7af30520844358d6752998174e9eacafc74482a667021f5b77022100f9a00f5ba23eb22691cf893e9a317326c380020e1ea4a4a3dae6372b75198c81 3045022100c396a145cf71aac67291b36117a05b99b80b4de36dc243d96f957a461a9edde30220606e896fab8b73b7b7976642dd841671be425a633092fe6ef87f2292e1f062fe 304502202c60eae5d1febe31d8d70f8f17d7fb5340c78ffd5228d77a47b2fa5f809fe6aa022100a53639b57344f9e573c7faf432b87260ab59f7ae4a83a945df08404b05c97b69 304502210097efdabfe35ccc679b1d9624f8c221c58a01953fc17f1c6393722ed763378f3b022058088671c6df22e2a6da1cfeb597e6dcef7c836f6a459a6f3907dd4f05d401a1 3045022100e39bede07844b2aefc7c6da3f48ac985d68e1e339dde820694bba44a77aed1d702205e0c0498e161cbc67aa381ac46c67a5745e87c207256a29a0c884bdd907612f5 3046022100964bfea407585184eaec02a71757b133a5f760897982183fa358796f1c3634ca022100cc31c56d633e50de494d14eebe8411b7a2a5fbfe8ac2d36663f951b325dabf5f 30450221008704433595d1eefb83ce649760f9462c5877b6442344b9e561965c40c0f80f4102200c944ccfcf394b55309eb095693ad0a58c40b02f2843b8ef38e4ce69793e860d 3046022100df7c9f89f8c6004b72e9ca84c581e5ca09c6b5aaf67b40c04e3aaaf3b88002e2022100da0e8831a1ed7ba01d75c5792d5e146f381156ed184491b3c17d02fd9ab875d8 3045022100bca29c75a2d18a3e9734ab94f214f758318ea4d7d9b650d1b76d1b76c619e5bf02207fc7ef88a9abe5f91b9bb2044256d5eb4b5086b9ff2515797230a602a2a6a8c6 3046022100d2e70d3697acdd59f3acaea9595ba00bbca2d7ec69e2a4643c96d86603a24300022100addfa2af30654e4cef6c827fd7d34a7f16572ee1c0a4173c333841beb08809c9 30440220292e143eb31154134985022121008b542287b7fa6dff7ef83e97d5467160cb62022073ee866a3750017430cc75065ae5ef8f630307eb725aeafbe351e6a7ecc66bd2 30440220502343ed57ee97270e320053e035371b7345adff0b930b7ffccaf023a28ccfe60220265de204c87e748e116137b6de3e3c19f4d6ec56459ca93d894defce4dfdae50 3045022100fc9168d5114720d0ee03564286ca39cd351afb88f281c2e4df0bdce001754f570220254337f713a26d8b42a22648bc98e5a1771dc8907e21ef73d1ca0af5bb7ead9b 3044022078f8827e48704e85e3509b02c759b2c35d0ada7fd261e27df2fcd32cbb2ecea302200389507859fe203559869d6756434e9c918afc8a1b36ebb9245eb6243b538c5c 3045022100d97bd5dc30b4d7b1983eac8c8a98a0caa70172e51ffe958aec8f5d8847dc0581022063b990330e6ea7f80d69007f1f7b759d81dd8a8ac0f9ad807d5d4319dfe3590d 30450220656b54337a12f3fae5a791972f5c9975b8da4053998f1d18cae5d6dfbf3ee10c022100f36caf4a253e0a58fcf5c474f39c55259adbf8a61375c4a306d8b6b8aac66ccd 3045022100f4a39e9f023375572e214ac13fb58329b5f33b9c9ba3999483029348f87878620220764967d9cfe300371fd9dc67d18b4fd8a088d6032ebbb84a488fac4bf2d66049 304502204e2c6bf115a75f70d31bfae0a729c0d106bf1eda6e2669bd42c88555048a9572022100a4f5a0b1332828af50385a4ea50df1fdc5774259e148ba743873e6b132bec8d0 3046022100b4cde67bdc0016e0b460e3fc9a4713182698dc69934490d3934ab527ed439898022100921499e63095bcd2d7a9718248314745ba9a5ed3f08a0df94b960373d4c72e1c 3045022047e678929773227e7d906baedd34dd6682d81bbce14528afc03a9d84226b6bef02210091dc444d40b0815cd2c97bf173e243d55dab7bee29815e343480c053c8a35872 30450221009aa9950a9dbf8036b10b48af7d51768650c57598476d7d67ecfdab0b93a73a010220233f558a877215905ff4c24f97d293d3c1ef19178669e5a6a7fa194a0e03492a 3045022100c7d64229e9c208b7315a3f50fd2b392438f15a552311ba5663ee2592b044c06f022072d9ead1329cd57e95f5e61700ae969ee07dd6cb09cd2ae6f47502a6df58ae58 30450221008425cd17cf34248097d78bf4f0c4c01121aac22f0464cf0ef35cbfb7cb5f5d9202207ced0dae4e009ee9e37d4b1f0c4be484ff2c88c946d46577a8a447b79163c686 3046022100fb1f19537daac399dea07eb0ceb0dc9dea0747edd3720e50a4c92c5f50b1bfc5022100ab298a507fffd34a467943f629a1ccaa7f8808db35570d0b0202df0bcfacfcf5 3045022100c6c988ae8964522a622943c0361a85d213f7afad62c64436c36de714307b61c602201203475a96858486e968ad5cdcdd760f00923c40120a5d6ebb5be6e01888f3ea 304502202e9105764aeb2cbf5108090b0e532936ea7ebb6ac0af83d915d1c3fb8297ddbd022100e3fae92b34ba82d611c728c91ce893f9fb231b91a0f816c78c49caf20b6f95ab 304402206c13e5f576e1ef836019efe62abfc8013c5f638ed084a612a51fdedfb16ec423022048c38740c4ddf39ba26aeb4ee29429811404b27854ada20c00ea8ebd4a3d52a1 30440220266f6330f429409aa447a9aa3077f34a37982f2d58e8db13faffd3e34d3f415f02200f6a3eea161915ccfa87e22ef795a4c2ee733383bae35f5eea68d2f05506d05a 304502203a6c07e6fd2e60c5aa54f053fc1d8047a0f9d7af0713e8cce30194cd43c055ac0221008dd8103f26c2fe0a8c5eabc20a53f61fbad1102640d12da8136539484af9d10d 3045022038dfadfa3f227eca5a26879459df0f10cce293e148e9025d0d4191be01c8e43e02210092835ebddd81abc916552afb4606dd7c6a97e1d226512da7c9e3d68236be21a3 3046022100ef113c8e89b5c32b764f256c7d228d0408fd5368f175445ab4af1394d6992794022100cb55135b5b1d7e0db1a89490e5419ea6f8203bdf666bb2cb9dd932c88246cc7d 3044022014ef85823f9f53c5e323d834cdf0d4fc01f6f01c9c326d14180da427f4e01963022045d06a77151c8a4db7492d3ead9cea19457340b51547010538bdec9cc5319564 3045022100a2880f9fc22f5adcae075ea442ab20c94ebcc8a58376af0a4572364c73f69f590220592eddbf7ef8b132ae002ba5e64519ea240b5a07fafc4a34e237eaad04637ac3 30450220086dea52d82c4b057f70f46d232a95f7282f6037d436f55f23d67116a9d95ef4022100d9414293c6b30cb7a84b0d106af90950fc8aa776febc57a1c94fbe9c72826418 30450221008d7f4e98cf2863d0d7e6c908c3e8e3623c70a8b190a6b5910f3f9b4fcdba7e37022055ea460d61e46ffdd38b924c46eef3945a1dcae49def91503b0dfdd0b70c0152 30450221008eb78d0f349e4a8424c1d43c42f253dc48a729af6a42c84c9334662f57db37b0022018ff8c4bea5860ce7a72f6b8652248f836c154bc98b8f9a2941f46932eb04173 3046022100f76289dff5b9bb349ae03a198ede689ac877e3d0070653f4c57ee52b3ccbe4ea022100b41e0dc295894e45af31eeda0f9b7ad5b99a28e4cfeb19c951a60d2c0963dd32 3046022100c34828e0746a9b9789b2cccbe7ec76250f995f6d5fac03bb20264f77fb0ecb6b022100ccf0ffa915113faccf7749ac3bd1bd67564b46d28619eee2dbfc0f88d36d77f8 30440220128a344d96d15bf498ad0aaadb6d28f003a5825f8f9a3b65746d570ecc5c7aa302200693727cfdb36d36d54883d79ffecf8b48e51f0c50897bed46e67007e5dcb73d 3046022100e6e85df63177147e6748f9f27f020efeb1d18e1aceca748ffed9927092a5dd9f022100cfdab668d6b450973520a0e342166b0fdabce6c396b7407de99e892b35fd807c 3044022040a208cc01121ea5d7af54b786295c16cd1369324c514b637f8f53f54121998d0220709ea17713a8d98e3a8250074c199c5c5b22c057e15fea71d57a1b3673cb3d01 3046022100839f87bcb52f0ee3f53c43dd48b1b76f17a753df2ae9f8739dcd6b7668bc85f7022100ba8e9cf79d499a82c8d28dd18731ddbf4d8fa1605eb5c8df01f7c6c57a5284e3 3044022023ca79bded467f7493900c5cc5ab355d0f92e09d0250510e9d454964ad61f654022027b42646e5d7da96fe2a852704526c6f7adfbd308e0a182678a2b2f2138fb89d 30450220354c1e13a6a96ef9fec7c295fba2a189349ebd72133dcb2cbd3ca13872e30cf6022100d8f5d9d637d54c6df82408bf50bce668459801222a3e39c51821e2ea2abc1c64 3045022100d2602244fe8171c3761b31b9a9967ece44c9e0fd6613a5f1e3f5f73491c2108602201e9eb1b667bf97d3b75bd1d5f91674c5181d5d2d286d895f5f3f530644ec7419 3045022068c9681e13825ce50422509837acca89925ff96fc8fcaf5f2bda53582a85b2c2022100c36bb43ce4a720de33ebb1cb67286df0a71af07667f17866cc20a5ea06ce5de3 3046022100c48c0c105098031d1837d30c1c5dfc39e90b4129b61f033b8c2bebd13fe1a612022100b2d8bb1116cba00399756c59ee7c64c85d7aafa1befc503b7d96cf53ce067785 304402203f208fa83895ed0e947133d54a3de3528bc3629fb00804d8a86a9b9b8001de32022006a422ce2a2865f9a4cd89c7d76bb29da1f511b885d1561bf40deeb5c47a1426 30440220238b93ed9f5b0baabd8baa67e584eb3013d24597b451fdd43c6890111fa493d202202e37f5dcfb53a2adbc1ede4f553084dd3538a05e389232ca49da2436cb849713 3045022100f69f316f0e330bb446eedc07590f7f21cf5c0cd0996a0800bde6f2ab8af36532022015cb36a586bdf804aa690b818345648ed141c96eaed53ea13d45781a7ad625b5 30450221009b6a3f0af6374ca811deea7159d2cc480a05ad18d7971cb9f3071871c993d1a502202436b0786e90260cb5090ce5b938bf4e3605cc1cc12dd453c2428ee5b3bee7c2 3045022100c2a83d4b99c9bcdfb83366eba70d41f209702f1e670d72b88255df054f2c949902203a02df262643464b382e5123f10d0886e89da34d17a302999d0db9e9b2e15a1c 3046022100c8ff7c55f98077e92c3829fd6c5b69b4d19d2311f47ba0a00126be27df1906ab022100b111eeaeee47ccf8eba8956b7231ff03cd787505c70e28e7aef67ddeae2c9831 3045022100c90ecaf632e08f06ded2c660d69d388dfde566b74e37a9a76524630dc04f534302206955aaef5557c7453508710fb399e2cbfceb2979e533687d5ff2e28ca7e9479c 304402200ebc5930489208746acc4313129346894948bb75787ddbdef80b33d2ead03b55022009644661f667b1e9d895d56ce5efb6d54e2d4033aa58706b730faa0c46e3a0be 3044022014767de4779724395fb296aa8d8ec7fa1641ba0e121d363cc298d3e681ee19f5022071f43328050a2eb7788a891f730a921ba6e5956549a15ea70b09810779f2820f 304402206f2964b86a5793a1912ef759ccb7e329c59852923b270990edc1855cb61de38c022004375a375f4e649c34cf5b726eefca8dfba6b550c3f31fa337dd993e0c428a79 3046022100c81c147f4bd31c25f3990abb198523d9c5cec3ebbd90cf34027714dd491d9233022100b80d0434358eec8d8e9c87f760621d12b81a738106cfce97dda66621d028d05e 304502210098293fcfa6e514aa9d3de3d9651a5460eaf7b416626188f36e4396889a320ed802205b94a5024208c43feafd9246b468f8af725be40e8cb8618fcbbef15db9fbec54 3046022100cedbafd2809171bafacec5d6e02f6e6f8ee7d14ed164525bffb126b59eec7909022100be588ac6851b9d386ac4232af965447b555063ee7189ce8dffd6d8d0902fcac0 3046022100fee9ff274367b0f42204498307f93ace92abb02718960a5e718db67ca1105622022100d8abd4464df5686c4305b415c704db417b692438482f58eedd8d15889f5178e7 304402201d9d814c53237737d04836d2079aaaeaacecf2b63fea66bdd9575b5472392b2b0220308fc239b8049bd2761bb08906dc412b3e4f99529b0ec200163cc37959fb789c 3044022010377f3bd396a931b27b7ac0f4803bc94e76a6e8761bcce9bc37f586ef1676a6022050042d5e996bf1b47f5f744bcbb8d9fca832e5ebd71be84c749a70b56e1de8f2 3045022100c8d9a9d5c9c4a59adc583eec81e69b9d4becbd62de17edb6b20419bd6577f888022008a40c432d519f82bc5f3b9b08182d25e1f5924c507b902eb1f25402dcc73816 3045022100d93c9677cf4e17c7d0ed726593c24fbf46e5b46de3c1c452b823854334589e1d02203d9688ee20f6e26ba063f3bd61ec3c26d19a3cd20a7e27ccdcd59cf2750faafe 30450220796fe62709976f463f04a8461f76d826e0aaf4323ba526994fffa0732dfd265c022100ec2e3a40ba01a8e205d05ef97e8cc5a26f4a37bd76661c94eeb6ad3654272a62 30440220401712f33fba4fd19d774e39e357067c4f830ba7a7cd08648ad1a3f3ba4a6f5402203e778eb802655680ec2c2e3bebcfafbad2e806eb71e2ab4194f24715a6f733a6 30440220388a47c74d6b70986f25af5765c7263b7723cba58970c1079aac5a9acdac38350220425b1ce67b171261d29f744947ef93d35e5fb2a5111fa0c4c6ab23aa13050837 3044022010615891f24d7d7848e3b218b8f71932b8901799521633212c92a3deaf2902c10220357afc244ddceed080e676e2b60cea194b0c70fd5dba476cda5d2e79fb49f681 304402203c8725f5b785e05293649fccadb7dc80999e458812f49241476a92923de9f2e90220408892f854bb9181a60a662b1dbe5abffc27ae06934a24be15a12cdde843d17a 304502207608d26b2da5b5233e2f712cc1b952b754cea78d0e2cce1509f395b4e1e67bd702210081784a369339549c4e9185555c5341a5aaad9deafafa94d5582351b37331438a 3044022022800866ff942e29265192e9302b92e2c325daf9c996b980f64573d3e0d3c817022033479dbe9c817aa69b00b570cdd51a2c7154783d3613549ff9ac53d8659ee45f 3045022100dfaf6f7c7867969beef6e918d93a8a4b8e0f866450985d27a94ff8a117badad602204118aae08bc4316d4f27e1e9aeae73ac31055eccb8d840661e505b6afbdd0dea 30450221008a105981c002bc690a86c814193f4b95bd547971c3a60d2d96bf641df0428f3e02202bb457dc20fc288cdc5ffbdb66eda9914aadc1f51f0b622a303b692e8b6dc8e4 304502203620c891a5d02ad15e0c5af11ad62d2e0c8f23e9d5b20c5cbffe69e3e0180904022100a90473b782c3edf4a4c52820cf10349843b3972303d79fbfbecd1b5383f8e774 304502207220d8052fafcfd27d90abd70dc90d3341690c29be59272404620cd5eccc2092022100a3b4f0c10e951ce49272b5ed631214040ac945cb71f16fcc9ec970d0e44003fc 3046022100ae9d735ac6f8e031fff320e96d52dcd7ce7cf2405d614fe03a85177be54e681e022100e185dad3bbe052653b9e37557d5513158652383bf213f58aae24baaa22677365 3046022100976b5f9f0a9e3189ef51ea336c56d050d60d1de8dd63446ea80ddb0149c944dc022100856937636760b51ece6005a4b9c4ec8139cd06a047adb810934c69805010f124 30450220286917ebce635d990948425b8ba427c9921f621b6a3c867c2498356989bfe838022100b499a23efedc016e60b2df83eaf310a4f6079d16130a1e632cc72515e9489c65 30460221009c0b5e19bbfe6841c9c6e3b9d2ef3a10360d2db0f32177691ea3b87a5a17dc55022100e95317f905f4cf46f7c3a261df6017d44da44867e5a93e9154fdae1c2257f4b5 304402206b07ce24c5bea41b26edffbc22d7fec827a463b812ab4746f8cc3032fccbfbf7022039a6c00d38862914c937a73d96c2be07a5767412febcd5a6a5384b9b288476bb 3046022100853764b90f58daca7eb05b66cc51c8591bd51444d76b582200cc4d509e977a32022100de50cfda2ff94fe372560d593183427e938b427e15b6ee31f43494f6428de9af 304402200f8425d4c6c0ee606e9d9906a05cbdb191a4e53650ac322e88929223b32e97b802207af1ae945036494eca2b62713b5485959660e3f8623f6b5ad46b78dbded802a5 3045022100b22306f268e852f4549511320c796e6f9e437ef6165fff9244816aa115f3829702201a94cf7dcc67e5095af679271669ea2ce6a66b3689370d0c1f0dacbdd4c3c7cd 304402201c3991e48af1e3d3a556ec062b3a7232690c6aa018035e6da7d6855586a6bf1902201b3b9b4899f5d845da0c43797b2c8c9e3d1248c9d69025a72f402d0544a29824 3045022035a538b003a670b96a85fae7b52a66be432406de525e46e8cae26d86d8cf7f9e022100c665bca32503b351d0c1a8a04f9268af73eb8b3e51bd0ed48ae483e25965701f 30450220674ac7c3f13fcaa0fce5f327ccab5b39e8c802750ab5d9f55d4821edb97f23e302210089dd99b5a40e011be491ac23a301150b69b5ffff01f5ea8a65670653fc4c65e7 3046022100cf4b8c5e01917653fcf26d591d6bd1ff61e7ef7195e7b55583c7662efebd6312022100a016c14fdcdeb3741f8dabfc3a6ebd3754b6a558b77aff6b71fe3a5da3a6779d 3045022023b6f3927edd4bf3081aeab67d5f0314454ec9b0987f5219fd187b59ba620e00022100a3a41ec9f02dddb664b1b547bd915262f2544ac148ecbf9e83a5650da5ac5bd8 3044022025c860606f7555d01686c3c56ced83cecd2435eea011875666865091c7d93ee702203c91f70f4747ef42b87d6b0e5cc13b13743d8fb67894451f29e6e293b51325d2 304502202c53beec5ac2c67f8e466fa2c2335ddda91c79ac6b200b5a60b1be94272ccc2402210096d4a651463e8f4568492b2a7cd5929c4db5ee276d3b50619b042518b33d8587 304402201fe5ccf7568ef80475e802443d5d47c7a3ac75964e054e0b0a82f0dbaf6e4fd902202732ecc1fc3710c5f6ba8f2ab0f55b3c478fffd0a96833a6546053afed2eb044 3046022100bbdcd7b9c6f0ed32df57347bc77af049a60f4147097c6577809cf77b31f4defb022100e0f810ed57203e210bbb44dfe7b4266ee752601403e7b7ad3ee8ff04d500ed14 3044022005a5ce39e27d43f9a255a76df903116c2c0bed464a4b0bfd98a4ae0a1b1ba4e802201cbe106260243596bbc33e6278c3da2316e31b978bfe1e7899e1a423bc66ef8c 3046022100e81a680e791349db450645af9e3eeeb452339ebdf8bc2df93fbf80bd4dae9068022100f306d22ffa7ddece2ca720ab41645451572b158ee7ef5846cabb7d621643094d 3046022100ba93456a494ef66911cca36d0668c033ef6e62f6fdc1f4bb144f6332bf82046f022100af2cf7a9544b2d1882d9de9149b24ec7376e723d82b4a125ebea968291a6eefa 3045022100a0cd127f7dbd037abbc75351b354fbffee197daf3fcccd5f2cb58722fd774bc702207076f10e393ab967e3fb843fa881e8aef2dda30d4319a31209d38d8d5ea5c762 3045022043d058da72f1827d22f31a0426bdbe02485c555f09fae2a2365e09a59c33d015022100b77d856f16041eb61fa56a85b0a85066c115de439791ff63cbb8ec131f318cdb 304402205c17f7ea39ce8c5043e4a7e00a3305cae68e3b9b749f9726d49ae45cf2b76180022024f8a6a5ca61ad72f4f46e9e1d3fba64d9d01caa803467984cae84a58eb2982e 3045022100b040d3cf17e8dd5d438a848246ee767c5fdaeba0755f23b5b95a7959812468f3022035e570376d79394ef1d3c8dede88dc88d55d9bf158f023386ec5da5ab6afd309 3046022100aeada8785159adb59694a5a289e23fe47f5fb9d1c103fe851634ed586b5e8fa3022100e23d564518ed9b3923560c27af48f1ebc1ac4a83cca6f93e6d8edfc13a11bbff 3046022100f2c9daa08368d6b80f8676b20b3cbaf2327c9ff9f7817c65277bed88712ebd620221009a1f59cb2e1318fd1c534150bf94c3e0d88f62fdf76ba603c9f5d3c9382190e5 304502201044fe3eab886979648e2deb6e884766f73f60db795c1b3b14e43edd0fc5407f022100fdaa706935ed8c594ea5c682e24bd4bb9924aa139814ae2cde50ca1d5e9cb5d1 304402206f80e2825a4e234f6df7081cd565c0d7131b12f0c338250221bf98efad41e55702205776d001d5c2283b91f5f852ef1578ec01fc6efe5c96b4016c0801becb476d4c 3046022100c3f148bb8045099af288eafe63ec34f3cc0eda4b987fd1249ad8032a8bd7b0cf022100b7356f11f63533a9f6557698dc47ceae237090f8293775d4ba79ca181fa9b66c 3045022100dd77cc5b45e920aa4f15096258d432dd7f29e259b195814bcf3c9470fe7e2d200220519e8c6af5436696522df5c137ee65f78cbeaa81adc82fbc08af0549139ab28b 3046022100bf8a678d1d5108b2903b8f1a81bcd3abfe7b0070d81679e3681f56c39082a37a022100d875b28742616b15cf186fb2e173681d1086f487b8d25232a3ffbabb109fcb6b 304402205d78ad3f8d2bf4600f9e48f4332adf47e93e39d321a22dfbbdabfd4e60927c370220441f1e780ed3eb6f4b77f6fca4830c13441be9a4c046567ff29f44a91a80665b 3046022100e08b9971158170de8b9d14ab2ccbce0be3a2fbd2c1bd5f255efee0827849c759022100cd407833393863915a8e7eaa51593a3d31ee09b307a172ec5febe835595c2e27 304402203a1e2af34bc9883402890b4f19a9fba2932b43c5014b72644d3c27eb690d039602207401786745acdb7111cbc14bbf9113b4d7dde305ed2a3ce23abfbd34ac890d76 30450220708267e806301347f81d4892681a555a62abd5b3b1b7e11dd00c05911c85161c022100d4f68496c49bae7a9ed4bbc640ffef4efa5a45ec61c1905330f6b98e8ec352bf 304402205f78a11f6a5004dee37a6cb3753fcc01c9bc1e00771cfa8a017882eea7b6c56b02203b3e8cd7edf63b99888019b6d08ecaa3c62375113572947be3f4a913b2dbba6b 3045022100b8cab88d764ac94492541fcddb664210d213cf17fbedf88038628bf18a44662f022077855ecf0f5717d0a212c587efbf373e1778c31495ed5ead0e8e45a85f711dbf 30440220190995a214e69fc0e65853876d044462cfcab01b0f17978ca164a96e01bbdfd202204e594f9cc68da935601f9f562035203167a663940124f5677ca5bea49c698e51 3045022100edd5004de8d42852e410b7bbf327cfacabc25c59b1ff5023a306f14870f5920d02207435bb4d088d56a691b12f8ce8459931b66375f8c31febb8e02f4c224aef9022 304402207797a451c516e42f531101b3961fb18b5c068b0ef19c931bd9ee02d5f4569c8502200d6300715523359b495acbdc52fd21fa8bd86d5e8e36b273a5a20c520ca535f7 3046022100a7ca3b0489865b4065318dd3fa3d37ca3fc39fb6fa239e4efeefc5aa16b381ae022100fa8e2fa8fbb4e75ffda9e7ff9f7652f364eb3b619fef1d580d4f6268904cec71 3046022100d66938154988a0eb049c38bbf14fe00dbb02244f42d5175cf7b69a0b4f0dd83a0221008ba331057f32cd4c50c94306c63da2285fa9eed70515f1d7141e7097441c66b9 30440220783001c3032982f246ac349a33467036e008bcfd4b5255cecd9a1208df8d630802205078f57c8bd1ab25fd51b5e72afba916cbbb084faea3991c239f5ea2b3390f75 304502200567bc020875930167a983864263ba75d8a481d57916f05bdcac41618d4af8a1022100a936ddfdfa791582c4b00eaf071d00eb7d9fae8678ab57b1f759fd460df3b775 3045022100e614419130827a14a5d6b9a97a5afc194356081bbc47645e1c3642f77ca403a002203ca24f34e8d2ec645803f28d3cacbfc6dbec7aad278447cbef8c91371db0a69b 3046022100daa913805d793cf272e9e7d059edd321d80dad7f4cb2e802bf93383804cfdd3d022100e8d48c9b4f159c8e2e3aabc7e27344240b6e1925798204b7de2199bcae6f4e84 3045022100891a204f06d2825d4c1e16be87fb53204d1d1de172189911c9cf505f40aa98130220314b0ff6e6f77e4b936e67bec4bb756e91067f184722eac508fb3878ae87cde3 3046022100de4979c7e39db0f4bbbfa614c8ea74f39abfebc0830ee7d54cbdabe2e6c06ae3022100d78433db507308db9e7c281f43b5e574085234d75a87dc511dc5868300a88d7a 3045022000c544d6c07172bba5583c8b70abb62c143b4cb19cc6284178a90ae80093a02c022100862108a4fef4a3c0eee7d3fa7bfe31a8d2092dd1b041a4fda0076c6592b199f0 3045022100c17b43e0ce30b5126ab3fc313700de3806a9dc0bb2d33efa023a22c44c92693702201c481fe5313e93077864d1907e9e2bcfcabc53c48fbbf319ae9716550cdaf1ea 3046022100ed7320cdb5153ea3b5997ac7f66fa3c5c1e5d31062137e57810b2ade3eed52a902210086f3861157abf0547db8b17f0aa5ef428954d2c16ac7f2d6bd66b104e4023987 3044022051f69459d89c164d0c122e6635346afa79631d8143b4f363b0176562db6c0c0102201f8b7655095f7b692dea57194bb4001a4f9641e55dbfb5801fc724455f81685f 304402203b987bdefe9596913b47b4e588dcbd60cfc01e39ff07b87629106b58f58f49090220453cc49f381a6033d91f08a4809c468cd7cf21c25e006e17a44bcdf5f518f254 30440220742eca6409dfe801be0453682de7362753f6926fc0101308701e338d0fe3915f022013d3aca2893f3a7834d60b0f2c628c32068ba6635937c36e1c3e2c5c9317d441 30450220230440ccd5c2a9676fee7e699c46328959f2ddb8dd526b07c5ebd5b33574325e022100e3fa2d9893d0bc3b80bb8c71da01f29e6ba3ba700ce2b81d42735a13db62adb1 304402203bfbd43f2f6823458a2b6fe26530d2a237bc9bc6ac78832a978568a0113a85e9022029021598f81361dcd70bfbab352618363fc65d2116e14c021a2401b563e51ba9 304502205e3d32ff69c8175ffaf89040d64bd8efd46f0abeb1860bcfa1148125549109d8022100f0326b6064f96e5a5f23674fa4b776f788f6795f52ac88d2a4325c096742526a 30450220448394e4b38c6d6960c27f30bd783bcb949c8342eb63ad41abf4c6f97168fa0a022100b58649f97850f958fe98f3de8ecb11faf8e9402c8832f8e6aaf89230dfada827 3045022100f655939bb34452504b8dee290cffb6580029821988be0de43137a63cfed5411102200f53884cd8109d297169656e5315d6a035a2629b1b5558bd8f867b2c0ae2dbc4 30460221008865e635d7ebdfbcb90dc0f684a26001201ce8d3e95e1937dc088f35a5ebc0940221008035a4aa6cfa677ff9fd14ccfe2d0a4fdcb9a0caaab5a6667f2f43ce073ae615 304402200fd19416a2bc77eb51ef71f25cc58bcc6abf9ed93d511470ecdf127bd54fb4a802206f3e7e2b694ec9b017890314728224fc9a0229ba448f3c2359291e1546669455 304502205073915f752c2244c1797afa671e45b131f072feabbd2002b15804316c41d294022100eefc2089d750dbc0b93d42bd9011941abb295cbb66b58d6b7a8b5b64c16de4d8 3046022100b624065cbc4715dac122782481066b782f156ee02ff626a88c6ecf20b5fec4b8022100bd86b3b00a32f915b7ab9318c911a71f6b9831f8e8c577b011fe907e2a5340ca 304502205d32265d4fc1df2cf513b8fe70dec0dd07e0e61d86eeb8b3135af331cb2c64e802210096f2d6d6c8e04e0ec13c6ec977b81a53608a3d01e49d74cc522cfc10ae826471 3044022069ea566e8144c6eb5c195b475066593c58c1d1f7fd415285d6d142763065e32702206b5a1988efb219dcefd11a8283d96c6b5a52a2fe8fe48e5a2de4c2e6a721ea31 3045022100f558adb3572431090782216862a6d611c7188501cb29daccda3a697a3d85a2f602206be9d10d6f936ed5888d657e351dc8174c172026e2ac09eaeeb7fd70c4c8b50c 3045022035d55ec3f89bbb37beb09cb8b6faef31309910b10574afd14bbb8e243028a31e022100d9dcea7eefc61b68940035a9930ab1ce8de0a8f7a5daba6f2cfcde7ecd477431 30440220781f798df7eb7b98bf754e1b54fa6c8441bbf7dbb792ec2503733edb006f27af02204bae6f53149da5dd2f9084b0f16351f898e55abcf87179cadba78b532a227c49 304402202ee434358a2cde9ea749d644a5349f92cc6d1bb411beb4377d9bb9c9eeb0333502201bc1de3a14e9147eb440e351871f2d30339f1d824990ac9199ffe4375c9eacac 3046022100db4ba5ca382b0f73bee53def12bdff182927be2bf95162bcf356f9f616040514022100885f9b2a97d23c5bc3e389dba72a999f8e7aa4583c8c771db0e706ed4d7cb5de 3044022060dc3c0333a235a5481ec44028596c177e24092f9459020b23f0d0a08ab0106e02200337ce297b99cfff3b432609b368564f10f9a9560e9bde345ef5309c1259ddcb 30450220691ef34f7bb24299a2e75fe6f38a5479cd012da34f668a973cc1c224b4c76e1c022100b3afa9fdea1dd7ef238b7a08943b74df378adc21c2e56ab44a6aae29aa0e8825 30440220658d6ec91d62b0b6fa650c8dab15802d10d8fa54f3eace7fa852b8e0a32e82d102202f35d1adced504013d145ad053bacc8ad57f58b11784a139c1544b86af2b8ca7 304402200d7f24f0629ad1147b3e141c462dc0de0132b3f242d7018fb31b7af2354f8d6902206412e14caca4020b706e2346a478fb8165b2870d84fa6e550232122e7bf255f3 3044022050c4ec64a784ed69f06fd917690dbf8b97ef90f298d5cc339a40ce2f90ee5b3802204c55fd6633ef27324a58202112dfbb1e8dbd6d9992c5bc99c933c9dd5454aa01 3045022100e16580a7fb4a7493b16ed8ddb2166e8c0ab9ad579fe17c263338c806740a2c5602201d1569f0c6a11663ae39278c2228ad7e228bf41754ed1979bd343f434ba0fe02 3045022100b526a0977d2df01aa626cbe37d04bcf8ed48bf69c0d5b0590106867dc55baf9b02206da12b00d2aa4d7624ceb9e38ad490378a505313d5901162f8464c76d1c89c24 3045022100f4c4d4d85765fe642a66af65648f90bf847570c3acf45183eb3d56dcfbb78123022003d15122bb97a974570ce1c1a2644a42fb625cdabc8cab3e6bf69a4edb205e06 3045022100e1b8f4425750b0fdcf5370b64a7e489594ddafd7f3f186bf9bc5a9f039e8e50202203184196fd51df60d889c828910272f4bea1b9b33741920caadb1d029fdef0c09 3046022100aae347627c561e0548cbc646e761d60287ba58da84117c414064a6dc1b3a504d022100d6ec5a8849f13ece83c75f399cf20b6d964fd643438a16e0b8c8e47a172d7831 30460221008310c05128c5864799e469cab23ed77bef7bd13aa144025d2925271f5122723202210092947e3b596ebc840f7cebc58e297178e18c08d5802f983450f2cbc954a0aa28 30450221008c34237134ffc60a088d09904e71c0ad36a8d77a4cad2459c871ea7a06e6a0b102201438de6230e8df7ad5937179fe970e14ee3b8d9c51c05747b47769b7b562b01c 3045022069f1f9d645f293130da9420251811130f7a6f3a7169bd98ef2cb18d384d8a51b0221008720b21c6961ef6a5345f7522bb12000e6d10b17548965ae3dfa98c6a006f94e 304502206ca00ee6f894599294a87201ff574eb6edc05ae824bc2412dfd971d4587ab02f022100ac6c341e84f1d9b3b46574d5ddd41786b0d7ee362b7489b8c5efaf7d7dd91968 3046022100f880163a583d76dfa93b9b87cac3595d6977fb6959246ac6738971e33a19afc4022100bdc4eabbcd7deb31828688954b4f07282054804a583cf21aef5a49eca6440277 3045022100ffed287e2d47d93bc4fe07465901bc863ab26cc2dedbbaadcf7b5ba0e7819def022012453ca9480f7233e46374a7812953fec08e5dc84d1d47808ef36b198b48cd2b 30450220231bd1ff25ac81b3859728c4f8fc49557e4f0f0afc441a2d419192de285f295c022100a759e998eed4f46950f7136444aae71af8b9f7172ac162796f9ec6730d75d77f 3045022100bc92a1e89bd2fec87c901c116ee4a7e1093870ce3d3c6b5e89abb127cd128dae02203dd70accd92568df037884e5a7f404ffb17abf902e950d1032591c1b70dacbd7 30440220428893837dc6bf1ecb7597978581aef6ebf3514bbb571a7d0792f5c67ae5939f022078ab83cb8a3cda985529d5a26d279a8a42d8e649d9b1f56d181b1b241d843eb4 304502207d5d2ce31973c244b5341467ea50792202dda0d080c08a9e7620c7e2113cd343022100d549fee74d805fe6011d9d3ff2cf1d89e7acf7b63d58946084da0720474b077a 3046022100cb76adcb5aa00a3e9b69b1c0841680dca092d8036a18dbad8168d2a1780e5301022100a0f3540b81c11767f90314b1c4a478fb72a08e020b7359e3a154b5411291d54f 304602210090ed92cd3ae23ab0ca7fa79babab901819a9e87b71e6d3efa27bc21c29cf367102210093ce8cc44c30b6d099cda990f7c7d7e244c94a24cdd1bcac75940d4146129809 3046022100b848863d749591153ce1ee778f7c508e12fd14112b4146da124ec61c4572112c022100bd800742724450d0c2ebd7772db34657005a6fc57e3e238df33317ffd8e9456d 3045022100b7efb74c564cb46abc63b27d1cce03bcd1c7ab6baf5ce6b47a9509fd7a4dca6d02207a8850c125bb80ff00817de25a74246cbd4eff1402772bbe64316aa4dfea721c 3044022055ca7558655ab5d47d3d1e0ec8d5d35ed02b9c4a0a8be04e7d1b107b7f9fa5b302204cece0d0fa6ec1b7b3c0d2f3de2df853698e9c1527044de092688516adcf74a9 3045022078924713174852d58e752c6ec818a3a5278c12316d004bb9d95645c9e3443b27022100c42b17b8f6baee13089bbbb2cba82c40eba6b5111a7899506b5f19b2179d7169 3045022012b7017b6f3ae9484d52d26a225f4e4a3ea81facf7a724e19fe862de583715f80221008c252aa1b3786267950b029c68f940e9db94f3987eee2d02bbbc168803c18627 3044022017c245f7260f4662f71f10b60b18379551090d6beff6417537c461d2f9765bb602203a752950fe4682d94bf83b58de71360d48e9ede4870e519b0efa870ff2a55302 3045022070811df65832319f6b39abef7802a600da4f8b461c81607a8682186b1bd8b891022100e0724c39b73836c49316b387e0a18cb3e41cbf822d0b6c5f5780159c18b2fa34 3045022100b15c96c8ca302f0e0059234fc51381ac70b115807380a6eeca4160a3cf46ee4e022006138021441d6034053ab7d8e61b300671475fded94bb60a0f96be546c24a65d 30450221008da48982a162df3e19eddc74b8bb33eb1b5ef0da443b2c8512ea9a938b61a230022045fd630f24ce109a7d4a1f0d4b2b7489475e6363e0b2d4e3172a979edf27c8d3 30450221009548dca2d24d897804c3b2edd91f5cefb36c31dd66aa7ec5f1e457ce0abcf50c022014653732fe9838c2ded09bf9f23f53d3f769fe24f124d46e1ef004c020a16a33 3046022100fcdc444a55a90f4e062a8ba2c6e29943fe9ec6534033fad5f81a88fdbef504aa022100a4fa6146bf966e080a6a02e3d9fb3ec94d4a3783fe06fd48109aa1c45cd26423 304402207dae701b2404aa035e5c1c84f4ad9213ed7f424fe811fd93f7f875368050a8e90220385d300b06addd66ecc25aa9f99f79025ff182e8e54bfc6b2d7ea61fce0d3b53 304502207754a11be2264d23dd622fc903ee5fe893e614e7756453449757b60a715081d7022100a33a70a17672e39f47c10168b323a4279e11bbfe07aaacd5ff93162416b00ab6 3045022100e9320f7a79c0d81db98d082fe7187846f8c5d0e96bd6ce9d6036b119123c89d7022056399151d00327793915b25690eb44785112ecf7865cfe2ecd9b1d374543f8d3 30460221009e773a9485fdf09aad22e53cdc83f022eb5383cb9f3cbfbe885f56b86a7af067022100e40fed2762dfa629c7eb5a825ad1a9b4f248657138f67c23c7214a01aa1830e6 304502205b49031a3e5a40c36f8a1750b083a5c4a5c46782405089a609e6e1c81573ebdf022100af2f01d75d4a810cce38fc68704b51dbc3be16813b5e503224b8ae1ae5ea448e 304402207a37d76511715ab34061e49701b50ca590f4931df1e44139b4634352ea82e84e022073a78ba3b7505eae624926740aa7d4ccb623c3448f23a4a09494a0ce2eb7491b 3044022029e5b945ec8e9fa6cb03a256be27a8f790c7bf03152dca3ba186e8342c29a03c02203038a36d1fd7386bce892018061f128a072b3be21fa8380488b23842ab962fe9 30450220298f19685a1250e7b26248a82a09f56b3a0214e68bc938ad92cd90b93306cb19022100d6d657b9203381a33b60cb7d356ceed5fab4a2288f4186e17d59237ad421bdec 3045022032563dd35035335823832585e0a35e50562fe813795319caf3efff42bc228522022100bced9211e15eff046e63d9abf3a0aa29ab2b4ca44b86d38ef749cd0a63c736a9 3045022100c72fe391ef37824bd2e6519834354d249c310ab763d1c48cdfeb7fda481fee5a02200a4d7d4059d1059d3b818ea3daa8b44a01fe141bcb121b0066cd61eebdb2c352 3045022024d056e79e3d71d4894bf5250987164b52be1b51f423b23789c01bea28739a72022100f49ce59169daf3b01670ca51f378b0eeebbae907aaaa0dc331fa5dcdba03dd8a 3045022100e3a30e71e301371e9d79bcf84d088db1dd56db638d07971d9187e821c140d0ba02205cae9addb72f05c9ccdf2f6b4a8c97b3b8c4f10d66d672dde50dbc63a0b3bf3f 3044022050d31462c6281b2cde6e50efa3d633b08730b7de7db473e2b8c3a3c8d77f99120220348985f0b3c4ad161761bcc5866de4d5ef9edfb962e09b5c2fdd2fd67067d371 30450221009f61f36378ec0a8f0c0019241c12a5351c3449f90f627dc966ca4a9453ccf6e60220227859bfabdd96c9390aefd22d2c3491e5f8a528a9c242b0e9d57703c253a3e1 3045022100faafff1231eea2995582cbf2342aed6badfa9c013223a3882392925108997437022028ea4487a756c7a396239a96c30b43e105eba1fcf5722c6a8c650bacea1802f2 30450220080fc3316a665cdd157aed2969a722a9b33eeb2d5a1197bae94ba6bd3245c8f5022100c318b5530ef8976dd695ee835cb98cf7152befb03a7c66f69f4a0e654de14e1e 304502205ff3fe2ec7ee6f43f52dbe91533f1400a7d00a9d5a2dcb749da203532526fc9b022100ad5019cf97cb6c1e4598cc10304a40e769c8ccd49f51b93dd08afefe9f795335 3044022031ed2ef70877d13e073784cf46fa3db036e305bb86291f575c36db2903ac2761022063de3d515e84b8fb14c517c880e65b8ac2b0012eb2f1ac9b126909104d1fe8f7 3045022100fea4bac72c6da19e2c12a37f0ca3b806349d5ac1a62c6c582da59e22d6cdb1f30220533842445f34d85499831553962bd26da945e0db21a6b80f31ae61bd71d0c909 3045022100a3754719fd117c8df9e087aeee811cdbd1a5031c1d5d82e8d2442a25c0420568022059cf482ec26a4b17c53eb32f5964378b4827cf461e06c6888e35e9096aea48d4 304502206a65cd7127b5e6fbeb93d8c9dec836fde168983b03e8b5f6209bfa647ff28e05022100cf00d5dd824d659b5028c943e7e103c5603ce54cb7511baae7f424cb7a4b185e 30450220095dc7d54b8796162787818e403aac38512729023ed2e867563174c58b288ce4022100c021f09c2461c50a1de03907964f9621925a69fcec7b4959dc030178807c877d 3046022100f43ba701fcaf860e93fc2f7c2728d300c4814f8b5b67d726481802774377d0c7022100c644af6bf8c9b24cad60dc4ce678523e055a9051e0a20c9db2237f5ed7bec5da 30450220291307c6a1517e87c783219cc1ede781acf562b6c91bae10aee823e541cb8949022100cb0f7680bdac8d1db5280cdd06f7ae36f6717797930bfba32fa14253b9b3e1cd 3046022100d84db2ea028b191413ddf6420d6ad6fa6cbda8c6e41a83c58eedbe8333fb90ef022100b8e1d08b031ead2822035eba3ce4c026161bbcda1c178554413ef7a33479d4bc 3045022100d104302b64456af4d60dc8af340ce6da9c289a5ca21b5912b82ad4a25b229927022068315a54dfd074a1dceae146ef989e8a547bd515d7e043955228d9bb24a39d7c 3045022100bb8d51e7922c080c68a0d4b5dd7c0f1be4d4baf9f73ae55e9424e88d8868245e02202d02b3388349e36c5f4de32f0036ce13f31a839ea86f233ffdb90ad9cab21f4a 3045022100cb19b8c99b8334fe9847877463187306c9c7beded77d11fde0159c3dc97bf134022005e0d595effd08577e8fce70d26d29b298e8fc1503d9638820dbb418cc2df41a 3045022021fc1d677a73b6d5c751ccd0e3e934a79a5a4c3fbf0ead89756141394f1cebed022100a68471e898534c1ffc0db719fe999942fb4fdce93abd034d59c63d95af902cf6 304502210084c0b033b94c83a570e385bc20e9728798ae06a4cac4027138c96fe4c41f4f8c022037cf44da63e36a895771c97f888813f9a4e91b7d26d4ccba212cd2138bfab5da 3046022100ec2e937def755fe2ce747c029b7283f04dcb3415bed8cb2d67c0e1f9afd3dabf02210080a4b8307befbee05366474309ea8f6cdd3cc8dbd1742707c0e9cc573a854c7b 304402200e6d2b07c06245a87d271250c3fd2ac5ec30273fbd658d829ae53fb5d4a6014c02207f49aa75b6f8860e405a58145fc7d14868364fa48f3cf1f92b9a7fe41db0c1a8 3046022100cb84ba879301485fec8b47fe8ecd837591cb592d171f979db783eadf24926c3f022100d120bf25254faaea0d6e3813f639a1553776253fb0a7477ec883cb55c02f91e6 3044022049c32b5031b113478f1c7d87d400e3f5ce4aa71c52e0454020ca6b8e0fd14fa10220428f6934102eb071c65a1cd000e846dcb855a08d1223ac04566a5b50c7823deb 3045022022b40592d4e39e2587df6df196bb4420c941b528554e13ab63807eb04686e366022100e4a1ae99c7099fdc33165c8bfb1ad4390690c2392d0466f6caf6836eb1517ff7 3045022100c0a6e0f1b110da2e5bfaa14f6c69a40d7fd21c9106a2617352af84e86eaad4ae02205a2917c15b574385c86640d67545c9a25988fa3411f36c7c1514faaf8dd5fa6b 3045022100946fe18a79da244bafb6f81358449cc0bf049a923207c447998ef759322e56ea02206346abbc6f639f4cdca81bb4be8844f544ac0fba389454d96afb4184274bb3f8 304502207a2906e1718cc133dd0642cb4f3d54ee9be50eb1504cddf8854b8e4d6664f294022100a35402929a88cd108351657010660e7fba8ea157977cad460e4dca6c6dddf717 3045022001331a19dc246c1e71080d858d4ce6b5f1a5f5314b1fa892b4f8f5e1a22e1031022100c7095740b448c337f7cb63d96d2d336dd74cc3cbc579f352f97dd20a2b92da23 30450221009b94725bbe51f4c16d48ab6656d84735541bad8d01e772837adc865104ad5d5802205f8211193cd58552744e1a2b559cda04061c38dce54d2dc1493bfaf045734408 3045022100f94f4a98ec601e5ddfa12584bcf8c829bb6caa74d0367670cf0c5b350cf3e9eb0220187627b246a405b4cfb9689bc313585406beee4527efb874fa078fb25509464f 30440220619a4d7b680450383cae0a9db82ef0172f8569af8f1faa0315ae815a481d6a5202202c81ce2e3f4431c00b0cb040eee0db4dbb76d4b390e5bc3eb7e824376ddf5a14 3045022100faeaaa7f131403ac73fc14eaf0adca23679a20286d660ca24cd23eecb2061d77022066ba6e2e13e8feaa1e15b24b609b02aefd028eaed4a8eeb6d0bcd599f8eaba34 3046022100fa207edd7e0fcb374982a490f0eacfbdaf9fe79b20fe6629fa6a4e2345d32949022100d6397163eea9808df705f7e814f048d00133418666e1bbbb87bd14a10a80ff5a 3046022100d7b6fcec85f57d043a9cf8b347579f7dfdc2bbf989bf1c36b49b2429e717fea2022100f590e6b42acbc68bc5d9492b8a9692a2841aa0b5bdda27f2b1c0f0847794167e 304502203e6f4b110841020212a101415cc29996e1858300deae91387976635df0f7d6a5022100bf8a4b9c39baadfc23cd2b14b79718dbca045f7b0e90b7b48e6a0a3c64eec63a 30440220413d617e48ec81252211bdbca7cb89e378f2af2dee698ca34168dd937e623ca7022077e8176027b2806982b07c785be750d2fe9ee68ad83a4f67b78f6f3da5ddf4f4 30450220237d6af62235ab9d5ea273c10998d3e45ac77ff922d96da0483b471f54ec1a39022100b3abedd74a559813d427ebb1e90ea8ee539506691f9d7d125d29673bc6239c12 3045022008376605b668d9d1a4b307b93bd78f65be50457ba33497f8195c3f47f18e93a90221009389155df4ffd267ea88c51a23b61937c703993922c46c8d9ee0bdfde9167e12 3045022100a4362656d67a7e08b5230035dcbac0844dbcf4e5eab2e0efa9ef13f4746d94780220458373d1649fb7873dd0291c34d2831dbdb52b663ade5cfd6124c9817cae8388 3045022100da5d1e227baef8725f92ee4ae5e328ab37338e62855f3e060019b4bb4b4675c002203f5a7aeef808a2b6c51c05419a3ac0041fe7a911b683de10589c60cd73ffc661 304402204343f917ae93778e81bfb56bbe61944ea13476e962d0f6ab6ab03c97a9aff3c702205910768362377ad50fc65b2edf09656e403d02924475a900e94f66b057acf764 304602210093b3e6faa011b5ef8888847b7ea26dd7c132ec96456e93b6f794a3389b833501022100933cbff43585271570ec6de496ae715c1c75dab475a6313686980172cb9bc1ac 304502203372115481292a1e97b17edee14162a56514e629524e88df5a62be9939cc16c5022100bc891c9a14fdf0aa7897cfe80e10608505b421feb2210c25b433de1efa7566be 3046022100f3a19884a68eccf68f7959559ac47e0c28430a92e01f0d9aad086a48eb2697fa022100e05dec5a2f09d4ede6a5849a8a88ca6feefb44decc5ef8897529272a3d47607a 304402202b940e1cf3b1120636fbdb0f4172e236808dd3fa401ef501f73bee62c65bca080220265da79bae3eb77e00f6c98c1558cf7380a1c3e5d7cb8a7d7ccd0844f76fdad0 3046022100f70511911a306aaa9e2002102cab4e72290276a65fe641fc2a6e2fa7ceff5a6c022100ad7bb29b2d46573ea99db6994dcc01c40183d23a014624fe6967db919c65fc29 304402203bcf7d3549044fa966cc0c6cb5a860d9df0a2f55c4065b00d5edde50c0f6aebc02200cf236e6ec95cf466b331c11c047917c793d99ac0ac20fcc52bc56edc2f902c1 304602210081bababc03b3e3381d7f34cecc727ae3a5ccb0bf10bf71d3782623fe7d6c80ad022100ea7e04479ec724e58ece9823983407ee0a811ced400510883b81598f123b378e 30450221009bb0ece6928d170328b2cda34a5ab8621633e396bc6e008cf73e55b4d69a963702207ff4424ab4f7a337d4ebef9c052aa4938353dca71809e66992e2cdfd5e47db68 304402204188331ed6eecc037dd34c92f06368958738564f5d7a2b7ded068d05a6f7efc1022044e2ce263e9dc0602026f0bef5d8d94a7d2b311ac33d078950f94962c74ffab5 3044022076e8f08720cfd9d60920dae9ae1b0af7346b2b3f8018815066d197a6d5c98c4102201220c2628e1e148a7e4b3f50a65b7addf8d04c8d09c3d97b3327cec1c7ce4708 304502200151cb6285ef7b7ff52f9e98c2ae84fdc50e3fc77a31d863cda8a086fabe2dee022100e02f628c80f7a8a995bffde4f9a36b44ae064be8cb160f6a24006f0eddd7fb35 3045022014ef2d6b97d9202be4ed9297f15a02bd23d5a02b07056dcb712c5e670213220b022100bdef2c3746041988f24653446d581d72164db5ddec414876977913a625b9cde1 3045022100be9c56a70162f084ff1812ed77e36c0fdb350e4b79a6205d52edd813d335378d022033c2fe17ca7ee0eafab91ade1a4e1d25c9e377c3502d5fd8d976603e597910be 30450221008cb04526250c7e327e75a6602ac37ce854cda0d55c8eca27150146d53763430c02202425e7a4d89fa269da4780420c0660795adb3533d95dcb6672c24904fd48eb13 3045022100b056f9063688160bb11f67955f5b71dd862eb5f6e4626cc2d21021f464714bd7022008435dba9552fe303b8555943b0fd4675811f5e36174ce4b2f84612e50d4ae71 304402200f18d6789980e51d3f8cdf04c42b1b5b64cba100aad4fa5d4a15e60fdc5eb82f02207af8a7380123ee357ba149ae2dab00559d44063f1177dc8be815224e4cb26eff 30450220535e68bc7d6392677ee5a027931f85818ad8d94d7afe0d3543e4a022435a9e98022100eec8d28a2cb950aa5d1ae7e93919102b53a38aa4dfa9d343cc9045c588ec395d 3046022100fd70bdb5eb753bb28fb9ebffecf7960b27e0362712647b43d1bebedcce3079b1022100c25dc1060c9b5a9c97e78615af4df2ddba03a0191f05fb16e61875f94a0da42f 304402206b8d8f27ef8feaffef4000d1bda99106d957c9d55f990b26b051e573c4079a4302207e138c9d2bead75f684b9c2b408fc8d136f95d585c999a06158c802c8ab0f5fa 3045022100f774d37d9831fbb8fd5ab7b301580183a76a923bffba04cb7314b8f3c265e27802203fac26aef79120d7ffcd94c5d09b6032463f530214f397832c06a5d3aa1ea17f 3045022012737c585bec41ba5540c5b2f60e37a5bac21dfa14e0ee7abc6f7a8f8563750c022100cc7a130ab0cf97a865e25352aa9150c66cf99dcc346e687c54b8851460091249 3044022031b854897fbca282a44899c7c40c7a4eb0bfb40a07cc280b4c2a74c9c6198a8e02201b79fdfffa89a7641644a3c3c071567efca72d0aac63d2170d6a81d7b2256af9 30460221009073db9cceeb418c36775ab0c01dcdcf00bb8de9e71f819f4f64d2acfd846c5d022100cba796bc7ca7dad1b6615a8636e0064638db31b7033c0661c7a04a88f343c261 3045022045e4daea15ff19c5eece71bad51b4b783a2f7efff091e3327ad50a595818de50022100b66c633143faf8bdba69754cd141305d1ddb4e4aa7fcbb74a7ffec3f5b35cc5c 30440220249bb78ae123ed145f178647091276a8c19dec3433d543a63096d50521f143a60220214f4c049a54c1934a2bc9117497dd89db6879fd07e15875a12437dc2bfeceeb 3046022100b1206e833a61c24762a82f1c1afc6be9de284770f9a9be8e6d94746411ed24f3022100b0b74ab01ebf9b6721a26712a4b44bf47e0d22b3a814cf6a5031ce110631a1f8 30450221008dbdb7ed28f0f79ee7478bb5102efc7d3f82439ab064a370d4f46829ef58c25f0220381eb88777923e51f41e760d6bbad51f4bd08c4eb6382cd4d811bae42a72c3d9 3045022100c6482dc2d38ab31e560f7dd61d6d77ae80e9fddd49c76c00d25b9d9bd60f28cc022014b0bb0e7bad01e7e3997c8b5f883b3d3f7da40286c5dffff0b215905699b93c 304402202ef4e21005b01583d7828ab6af0c1e506e4f87fd6263b09833b88fa0398ebcfa02207f8aa6a3c02ab361b297c3e367c0c617f081525b183df1a294739f116656ced7 304402204cde0d4263309900a3840e038737aa0db838547e47b0694ba2de8bc349637ef2022078d4b6499090cb0a7e10077f7b9d4bc3e624664fc2b1e16ace50e133d6df6a89 3046022100a8066ef36ebe17a019578b7fa0e4cef8b4d46e2c028281cb6d520c17c656f3f3022100b53f5eacff3575e3bcfa39f62e75802575b4e274dd5ab53d6bc220bf383f2428 30440220292ed47edf91ce9b30d29633a0dc9fb258999dee87bf5d946a8a6391493e40a502201b8619e93a89ee703c8eb728ee011db14d839b706bcfab1f6b8becfb9cd93284 3046022100e32f0396f777dd8f4c10185ee09ebf650e79778c6f2363954f07e35cf83f6b1b022100ff64eacabc71924cd85aec2101efe78adf2dcfff1325e2ee678f36de4e9f5469 3044022076591a213ef5516979125120b8f1cfd1d71fcd2cda46180a14e6c9eda9de1762022002ec95a739eaf775833eb1ea5e38019321074ec0ec20db52da438c41c39fb699 30450220041240a4a7cdd3b00bb89710ad165b83fc21c42d95c32ad9fd73e1b15bf021e50221009401990faa4efb3a233d9410e3645ca12deec855a15adae711977f6fc026811e 3045022100c0bd5f376efbb0e7eba6867e43e3b1a56618e187431b626a6fe2c7b4c0938975022069370828a72a55ee732ddb388b7819c3cddc8bf7a477e8fa41a1299992ec91f9 3045022100c5bc5d3d6b62ff3542fbc13c7971b8d74031039f00da8e9df48755671567026202205d40d9e6ac07d196aaeed73b419dc479672462bb56418c0673f94d61911dd165 304502201a9e7d02d407ee80a82108f3326ca4b24aadaca0ae8b7bc9c4879c1e65ee57fd022100e20befc52714e950d73a6d8f84a418f1cb0185a2da5e2ceb99bf79e9e5c192d6 30450220637076a681a4f949a1093bd0d7411d4ef047f769a20bd2a9c07489d32508c1a9022100d2f8846b3654785027f555e36cdce62723fc9d802cceb2ddbea87ef6f5ec3a9b 3045022100950f4d7a6fd7dcef459d3795325d3d6b348de109296e220f14061cfc94c26c1702203f0d07b3f08e5ab6bc62cddf1c1e24232acb5b7fb687602a35adc73ad6967dbf 3046022100e633568aecb11d086ce19a9016b8fcec52384a9987829e9de7234029962af0aa022100cfef50f9b7a531712bba2df3643386e9ccc78810f9ad6331d5d82a1e378d0243 30450220023e40ee3250f742d42d04411ba047c422d221ddfafc0732b35223281ab9b9870221009ea240357a9656cb9a1e64394ce34dc28a416a10b8cd5c8e913fc1d7f44bc9fd 3044022036178438d7f85d309acdfe25ece16261a7e9ee68e92f9b8480283d37779435eb022062e2798ed9d926a98e61f3f82c21ecbfded78df8db324479f6dd880e92ce5908 304502205565c5f9e62adec0ce8340cf9fe62563cc217c64853e95660760cf3c862773b6022100ea41bece28d980c23d9a4eb357a74407041a07c744654c7fa129bd735ab2f343 304502201bb63090ea0ba971709ab495ecbbfb4734851722496ae459cb11447167e8fbf9022100e6b68a72ceb3a67b734d610f487887845528075542f90f3a2fc4c94d51e54030 3046022100e765f18631806fb11b0d5aad56c6cdc9329d63fb1285684e299d073dbc73f62f022100b25ff4eb3d42d8fcb8d6fd1f39976162bb2ac26a2ac9b1dc41d26f7ebd8448cc 3045022005c4efa47f4c9237c7a358892d1642c987485ee52ead3614ffcc936b854a9694022100eb199c170a52cc6e9f37204febe6e68283bdec1505d3b529b27fc0113d76dd9a 3045022100b52b1ce7506f96b26704b2bff6b77905c7155133cfe4af517bd7ea3f0b76dada0220484d6697311d44a712dd9f1a903446666a3a9ae0fa3398da8dc317de1da2ca50 3045022100880373524e02f457a44f450e8bcfdbbab92fca262d4d41383bbe5810e247b76502207668b3512aa506890ce5f08bc35c122fc060d93cfad8258a141eaf9a1acbe615 3046022100ed8915b93df07c5f4c0d7c7a74a892046f9d96df9644f19aeabb39bc3d092c19022100fc3af1ec127e93fba69c8e286ba1fa5c525143f7b4db386bcfd1e8d9b63bde66 30450220023706d273d90dc35ed2cef8ca7330a05bb6759593bf2594b3385a45f1dbb7c9022100aaf4f432a0c0482d5817646bd322ac6b6c700c4cf84a4c5b04485d7f5cd280c3 3044022055b326d42e57d3ff52b2070c04c9fc6fee493927c892f4dbde924ad40199191f0220626ad857194e4325e76ef098c00e8ac4c54b89574395bacedaf33a1bf856fff7 3046022100c2d9d227e11e106673715a80e3cb610f611e5f3214f3e12aef6981e88f9e0e9702210095c857f920b4528eca82cb5291e32a519c6d1cfe82ddf4b16e57cc9103605829 30450220667ac30b15d603c72bdc0cb670c892695acd4813a8f58c4a64732658d5ff12d2022100cb61c2412d6a7a457e98fd43a929eb5fdc2325828ccca633ac56a49f585bb41e 3046022100ab8b3dfffac0415bae7a0419a1baf5d68ddd052ea340d10fa285c2ca1f50ba7b02210092b665062c4542f885fc692d1ea0b282272dd8da85e4298836fab34e5c664351 3045022100d484d45b59c9fbf7f6cfba955f1f92e6717bf220a4404641116ad5951e0c0939022049dec354a94677fcdd20957be45f84dae4dd4f7248801f8cf0ebb1c917ee2ca9 30440220729c27ed3124df0b44c707eb204cfe36c924caec901386a15cc72c2be897886502205578128b85bbc0bd248001a210c65a5769e8e1dda5fff91b775c1b4820978621 304502210082ae15cfbe07698d309b40ea60485a0ae7e25e4d88cc23425bd3e5744c2f8ebd02206fedcbecce4199e22bc6f40a00982b4b3f51271a30fd90c0151038c8baef4579 3046022100a5eee883f3e729b25a5f50d71bc2cdd9976e6216635d9578b36df6022cf7e53b022100adce718ba9cc1ebcfec680ad7d8681fac576e75dd9d0e766b380978e7b0488ba 3046022100bd33c90b6253372c18c4d411a17958969d18aa39747edfd09628da1e8ff04f64022100eb866aa74dfd5e0b851f0215bfd8c88155f7983a6426624776ff5094844b8443 304402203329b98ea2494e6003a91fcc18842ec52c307383fc3dc3dbe41aac2691c81c96022005236b1c4340c78574ae3a35501f3cdd0ae12b39bfe6d4b95ba4ce4a4ded8c75 3045022100d533c7fd4838896d9b6bab1e0afb2ee1c0e9b8e4287e7cc95da68a4e05331b9102202a8b94ec1d8b7b234ed596cd5572bb2009585b60dabe92fba7e88955739fdfa1 30440220238998e5d1903fb6e6f0c8f65cce491fa2bc2a74a363ff3b857eee27e12ff323022054bffe6dc9d0f674bce62f15451744690f339e6f84e0846e480c6bf6fe19f4c5 304502210095fe16a4ad6ebafc448c906ad7bb2438cf7c1faf3d5209578974d4fe017cb51a0220335bde836dee27c9ddd87f0c58b0add9ce8ca821c12d5e5c4f39ccbe89788b77 304602210093392ac8feba8062d248cdd7d1f68be7f6303e64055936b391765c07a5b5851e022100ff110ccecfec80ccb9b1ad3b16111f3cc64a8c53cb8a7026736020a3bb9d66d7 3046022100b52c7a4b10b0376dc8b50776c310a96271504f9134ff5f2c9a055fd64e00a170022100f6bf5c44c24a008a06a13374eb80dd3f82f31414064fc6ad566525a7cb317fd1 3046022100cb548624e9e95ac8f2c7bc24d492f28b3f6d09b06ac14bfd945e25cfc4c642f2022100d4667a8edef4bf4d9f2e6f13756a715d21cda3ec9f172bddae864e469e16d0cd 30450221008b0d4928c18e95596bf76f8d894b4388ab0baa7876cf7955d79f2b7dd8ecc4d402200c338ab87a6e7f7745ff79ab3689167d8a3ff4fc166e84d4029f7035a2c3f9a9 304502210088d820183789b1e1af3e9f8b7187b45b270d379e0804eee856b39ad5ff9b58ca02201b8080da6ff582c1f3c6f824956e3a22c7234ed9b69bf33e6c91062b65c45442 3045022058a6dcf35936282756c348f051c840665c40428588d5eaeb3a2eca096832221e022100a9c3cbd7d1589e8726e1a2a678d74b06b66b28ae371919671b8bffcfe0822e9c 3045022100ec278536754ee54b70f31e3dcf6feffbcd1a17d1f5970aaed31b732e8c6bfcee02200af3ab26ea57541c031b6c6583ad124cc62e64feb02d176965c3a9250dcf4deb 3045022074211934b1493e87b970415c7011e8548893ee67a4903d61040ccee6278e21b20221008cc1dbdc28112cf67dbed62e8b5168e85b0f96138d6b68e1600978f319d79a2c 3045022100c9124fa2de3f0bd40721d6030e90e19b85e5ad1545baae36a4dc020fe0623e4b022055f17d1b2bafba27a775297fe01cff0ccca79f866492de550e94e3b02553801e 304502203cdef7cdf83e93c74e292b6f2c8edfcb435b445fec85938c34fbd8872786c8ff022100a80341124f12b402e08d2fbbe14a56da71d42000eafa6902bca0b25fc2c803f2 3044022055d71030551b7c443de8db14faf18a43712fa4b813ef97d67e18ea2214f0fd2c02203713a24a98607f87669c04321425358209c0cb83db0c70decc1f449682bd6c1a 304402203c2a6636c1d1abb5b577826fe2485a5b663c226ec7fd591aa048e7cbd0b97b1d02202fed6557a85d257de2d8aaf79f2b39f9fc125bb6660858381ab89130998572e4 30450220248accc59e06762da34b73d141ed604e33d61a5f97828e2ace3a8f882a4a61d2022100acaf73ee3d23ac2cc2d39d003903c46201c61e10b1b468920b03617d010ce93a 30440220423b65365fc91a9af6436b0ea5ff418a26bbe85a8bebe726278551d1210e2e05022063d79adf33204ef76823a8e97d19e74a5a70963842c1e17a804d9698e4e62e2b 3045022100ae910201e38aa199874e4771a4b0cb8b50dfb0e1b43f21822534376b0289fd18022075ce14e190c8c0933d0635de0047c2172edfdda8e0bfe04aaa97cefb18ca48e8 3046022100ac4a49afb7772cfee458c7ed6a575cd46bf1670364d0e85696d7283bdc1f1c7902210094336d47cd22c9a2d718cab632ce99f84111eb70c5dd01b3217d8aa34a69278a 3045022077095d8021a48a0e7f989fd773a890302653cf1805802a6b39eaa90669eaa5610221009ce9ef532312c3ca6773cc6c0decc126347cf3b19d3ea17db566836bebbf9465 3045022100a6893c210d4731a4b8a4945dbaa24ae120c36000b506f5634a0aa515b6c840c5022065b3540930689a703633dbad85a1e9a7d27b4b58da434aa7f80e974f04d3dbdd 304402202349e838e033038a5424449ad248be71cb0f47d41cd21538124314d77778f96e02205486af9b321e436b04621e3fd4f8e2011451d0c11b93432d6e4c99b5b27a973f 304502200eb4a18ce44bf1fc1e7ad4b5839a4cb28a6b0824b14ac410ff514e7eeed4a8af0221008ec38133a0ce94167082e167dc93209c3da80eb7fae13009622b4207df350fae 3046022100952598b8fc6908f22f4b0dfd8d2f44e2d33dbfc07990c9ec2523f6da08651691022100944e8c5a3514e3f83cd9554a64fe1a95bd33cedb2c44499a1c5b4c90e6724b59 3046022100ebc42e280bdcf6d370ec99dbfd509ecc9396a62abe25372030d6605657d75a5902210087b13e5860677ec2cef0935f2a48aa6526e9245c04949a3db14901235653bcbb 304502210084c31611674779093f26f95248552175b73d2053cbe16ff1526e6654dc27c97302201a9993c11f3c867d302ab627b2d0a6b33b8377d1f4aad954746ee568e7e42b6e 304502210089af8062e5d18a4e57185c3f90f224f572baa1a0e8c0ad37afefe8ed04d1c159022050f6666ae5bdf5b586edb97702a1ef1792873bf2c0ab8789dd2ac76c9c27eaaa 3045022100873879231539c755832722420efbf0275fb4e221d19190980841b81d4c3e1a0802202b129f61cc012953524868daf19282e6bdfa20087a2c8f0c4caced069d076902 3045022100c511dc794fec816144c2da27e48d9da64d6bae75a695bc6a75990c8f0ab9fd7d02205701bf63c1b9b4b06a370aaecdc48be27dfd096898f73c40ca3377a63d4ad836 3046022100ca8381aeceb642e3aa2f727c019c208639330102673922c637239dc6164eb13b022100aae8f870db6d22b2dd66fe35a827302c3cf42d2023579ec1f6e6b57395f1f45a 30450220773f9397d6a0ba73a4005d7fb50ea41af621a7d6418b3fc5c6d61da1253ab4b6022100d1a2f618732303c51d2053e036b64888b107509ea3fa2b46cacaba50d2335326 30440220569b470f0872f2ecc4fc319553c5cec6a36f49d8048197a8784b011bceecc0330220185455b6ccd617202eb776ae2673a568a6442567a4aafe4ac5ab3ac168da7dec 3045022056cf428855ecd39498c3bb69109bcb3178912c654e6cc209e97cc65e326e76f60221008f22084ad4e5e2a71a676e5a2e9b6a8dcdc171901a3c855b27ae691843c1331b 30440220069c1d64aa0c2aab489d653e0468a95a56f1eec6a93c0db1f5b9c6560bca9432022039459e42ed1ad2c293a05af3714f458c819ce6761fdea67222dbdc6ee3c148c4 304402201f3b97a0f32b623f9c512a2c068aef2fcebea3a0121a860832b291498ed3a2f802205707fd9a8ac5a637048ff880c472ab1d7a31db237d0e94c9a71d20265c5342e7 3046022100bb792d245a88ee1faeca53a2826a4074c700a8540a50e897faf8e5f971883e7d022100da15aec29c8ce0747cde7b6c32c3fb7c6a5b2b87097725b6ad3026b7d19d0888 3046022100dfb770f72901b48299a65d452ec2303beb60b26d05739c53afb6b956fbc7f5af022100b5042ae6c0fd42f2f527d7f1f060d407fc222ddc13bfd00f21dc9a98f4d3e1b2 3045022100dd02d5d566e6560e997c17949de6ed4eb9c3f774063850a2f7f8df618aa00ddb02202aed1463425b9142aa4bb72b796e263c4fa2c571f2667b87f534f3b6bfced709 3046022100e3de0b0a12e5db2455695bd06e038686440c1a8f4254e0445dcbde1b51bac5f1022100deaad7abdce3b69d83859eb2b474c25a209a10bdf445e306d88752899bccda2d 3046022100efa4caeb51ed620eb2ea01f2d3d8a1c6cb8f509ddb12fdda486e87f80fd14b6f0221009dc80ab44ddfcc20730b748afb50c7f4cf218b84747883b33f5bb3c17a048311 30450220653251411b2b60294375098230d4791278414f52157d7895e38041a8b16cbab202210098d2672ff90861cbb0cf09af8d1df321bd6a6249f113ed2c0ed53bb46a87f458 30450221008e972c03a6173f89f6ae2a295403ceb995024a94c5844ea501f991f4509c7f64022005d9f1caea012a2e6bdce7b8efc6cf267ba5ec6d06e8269cc3d873fea06c79c1 3044022073cbb8f4470c5f2da5b296ee72a768353a0ca6767cfc7481c9983cfe22d1cf2202201cd0ec95743571f52afdae11fa2e3d81f17585800476b64f3c1761161cb1ad3b 3045022100b9bd36ee4d7ff74873a6caf29564e3aa43fbb11a22697fbc373e074f758a7ef702201c750d2cdf50d62b9bf17e2a1968e30b6837f045429c77cc09dc83b000807531 3045022100b309c80301dac48ee7f554a8da13dd89cbb63e52a837c84719bf12295c9f751c0220455a39349fc4445caadea07eb20604ff8d524742605a8a318bfeab87900cdf67 3045022100c1f759d435fe9054c941d028a77fa300ad0863b3db5736b5d2004d053acf340e022005d989519c64870eabf8efaa9399a01900499c5f1eb5d608ceb2197037e43411 3045022100dff79c522f1296dfc48bc2f8dd87627ab6df62f77595a70d0872e0a24c7f671802207326a713b899e84563e4a88ace3b0e6bda7a3f3dcdb6be931f7c10400fa8d938 3046022100bebe9ab6d7da5cd6e9fe5ce89866da3037dd47214fc6ad2e2741041cfcec75a7022100e5dc872b07041c8b7bc5d39231ddb8397f257d87ce3516acf4670db2d2006c38 30460221008af93ae1934441543e112add3da0e5b6eab5e2516c98ad0cbc254427ed2399f7022100d11bc039084234d11eeaba817ec684d683ae0d19c7e55ae532d10a8fb01f5713 30450220715abaf6be9d69d36976281d6a90ba6d195d1ae1eec8a81fed2f322f245d9b3b022100f20edd3f80b2b9bfe2eaa103a57ac8b9eb537927cd4485c1c3e9728a6ff2d7e5 3045022100ae2f1445f2fbdc6d6976c874f32ca9b209fb79f391e62118777bfe2574229a1b022064317e01f8ca4dd658084af9f8394dac1d148a5f335c5f5a6b64c413817ddca3 30450221009de7d90308a38a8c872c2c39de57b45221ad86087eb2762d9ff88fbc1f6def970220356d148121e3ed516028b225b6a2ffc2b1264b6b0e117e672d43f74b5a8eb003 3044022042df806dd4dbb39010f2f5b97c6023a926ba7517f758f955a1a929620505eecd02200a1f09df80446e4af92b6ebed9df8d6df7208251b8b21f089b3534f205fdbe49 3045022100f4400f9c54d08af714d5bf24adcab9296446f7353d675db43d7ed053569384a702204b28ea0f97912a156b815a5dc324e9a2f116473740075a2046fd7ae95bda1195 3045022100a6ee514b709578eed50aa01d4f7e5fe3a1a7346aac79f6b051fd006300fafe8c02207d28235a10f1210c719b2dfc5e23fc30073e68eaf7cf9c408cb2da0ccb90c579 3046022100fd337733cfec468632d99ad40900e1b59c3c8804fa7ea413768c3574579b9c28022100ff9c92ef062501dc86d534587f32ef97df51d2d77db911dcece18967d8e0350c 3044022031f2f94c3a33b928fbdff7a8bcb8773c83d726fe76fc196c5af34779ea4f18e802206d87a82bafeb94cb8e1f6176c2c2f305bf087c60bd3e584f66b57f3c2bb537d4 3046022100a52dc9b8a6cbf175f7aea5d3ae4a2b27004cf766290c48f4ed58e3032f35f724022100ae5e4fa1e614c0be7b6ea7e260e43f56e81cdc3fed4290fafb760075ea2c5535 30440220302ee1da5e8a166ac8b50e681631487cf19f6bd6689b3e304ada3b8b6943c79002207406cb0202f4b8caa3fada17bcc8e9068db7178096cb63bbbe4642d450ae1434 304602210098fab7c1abf4b9c03bd0307f4edbddc35f39b6309af4bd5e29b7e821cdfac6df022100e93c54f150d0e8cf28f2ffb335f03314b7d57e43024c667af6e973acfb948194 3045022009eb24d7c5569a4db30acef2fdc691d9cc9e87cb5ed6ff4f2e19159e3fd9bd94022100f9f124a4e9b0b8734a2ab6261d3e32c97c1c04924f557dccf4a1dfa5e3cfb2b0 3045022100c289e7c54adea828b5c6d4b3846fdf472703a23f68af53caca01eeb1fe9bf38f02207ee0f7bc20fc760cf95f7cc4a4fa841eec5d1e6ff6864f6ae21d2172b1ab8905 3045022029bcbbf15f1e9348038e121ca829ae837cafb0cb4993574c2f96350b486cb7910221009a8d1076dd20591eae3b644bda3fa140aaef6e7951c2d9e6147deb5be3236d9f 3046022100fdd60f902d737cf7dd09950ee1f79bbaae43d09806ff092c065fa6c8e0bf135c022100d1750578f17cd539da88c2f7b8f6fdddff20f31348f99f0a8ca871e285a58329 3044022057addb5bebfd5513ce47e54889b132d5ba1205ea0a2f08f8c20ad8449f9b297602202fd87e8bb03aebec8f1f553c40a5bc85fb7e156bf3a3302e4d2ad7c20e784b4d 3045022100b89ded9fbd7c8f77d5030d81df6f3d8d695888543e86859640abb2c994d52a0d02207dedddcf0639016feb28690943a9549b662a754dcb5b6c2e2ce9ffa8a7d3dc41 304502202eb52aca567ea374ced988cd74702c9b30e97420a42b8ae6fca899380ebc7d650221008bb29c00959997a2c1d3d4751e0679803544e220759e81b30257aa310138def7 30450220285948a1264e94ecbe43e3f4a60bea1cc26b5d1bb36eac2f2d0ce732f2b9b143022100b22dfa4f3a662a1d2eb0f8e88e1374146d0e32c0d50df38141b6d04e1185eb0f 30440220465da2132c4e51f8d5ea80dc1c4028744f8fe5ff66bbd060d673559b3429505c02206bea83a32a74788eae22c66688de80cbf011c23148d4bcb40a2f25708177edf1 3045022063a289c0cfe4b9c75595fec37b62bc94f5fa30ba2ce63b62c192e913af6781f5022100a425bf4e6e76a6c8bb918be5b0ef707cc411a5855258b5078232b7e9d0f2e754 3046022100e84c295becb77596c31bc84af0e76c77108edf5466a2063c2df1df3c86326312022100ae8cd2c145e82aeb4f146e54c4d16c6a7a1e59cfb54e8d881efb5b922dfb43a0 304502205ab086eee8f7f2c158c36a40e53c04b30f59d67b85ec92838e4ec8235f3e623c022100ff297e3cd1c1e47705f4bb27523228fd6f251c291cbd253ab070fe693a858645 3045022016a01ba04a1be103122a3152be697160405d591df1c1e5c30f54169ba8aaefc5022100facf7990e032c2a82736a93026d8afe712bcdbbca488bb981a5ad52ab763dfe0 3046022100d1435d5477348dea03a16145f1e34a1ec6e5ae9b643549acb76f9992b3462623022100c912f06ae17b4ea4e9a1258376f4421f1118f12a0571fd2745ed874ae285e361 304502202bd866a1fecb16a5dd39768c33a92e4c8a474317c44f43bc68725435983ab9b20221009dfa381a78817a3ff2d2c6cb3734b1115737ddc8a9ede2806042e333d9ea2e24 30440220060c0a6e077f80ff7b93e946ed7318f1507a4c8dd719ca7615838965e7976780022022ce45326bbfe0a26366849caa5fa81940c374097fc1540dc785b022b6073929 3045022100e3828a38f700c48a3edbdd18aeeb6d4148bc6116d4ef28406a8132b974241e850220259e8ac39f9724690ad70d0f834221462e795cb917706c3c63af9b91d079fffc 30460221009268b0b2a8d76bbe4d89fdf3039d278ab4d335387dca303eebb5d99b8084ec38022100d51c652152858b049aaa1b7d333f524fd8525ec9e73847f2f39cedeff2e84468 3045022100a207440153f6593c6b548c888bee903a60192df60a65f03d07d76713b52a3804022013a28dce89d6def91e99cff5c5414879221d0445e2bafb171b9c71812b6d33a3 3045022029a0cde463b10c0dfe7f08c124977c6cabeaf8c1502071f45bab03fef47ef6c2022100b030727a585ae47074537eb153b66c349014bd3212c9b089ce509ab90f7318a6 30440220573e9751e39789be29366a92a2006466200d957ace11f64179e87803f8dede8e0220023c21454dc6a5feab1ae7e5d8c12af0f9a50171afa73d60757749d0d159410a 30450220311e4a65e28aba07a28ec6916968371ca64b138653e1a75230520a0914999152022100bb69ef500c99dfe8d6d479854b1fa108f91302f25ceec394379844e714e3e642 304402207c9d2aa7e957c89452deb42df2ca31021532a525edadc455c6401e2facb9e3170220222554e301f3db0d230940f087afcfee56c712ee860b12feebfac0425dc42967 304602210092bb71ba44706718766df07dbba5dd50f77583ae786742492aefa2e874aa5baf022100cb548dc6cac57a08fe28191a3de88c4723fb5b56a5679d30a4f3ba138b4149b0 3045022100edf1849810fc795d6a1e0e67fa2858bca932a5708853bea7135990d44d9ff691022079b4ae5cf503be4a78c5c8fe84e28176bbb1fd17888ad002f297288afbb71fba 30450220301e057fa1692c20f59bee4762d9756697fe2a8a136938dc24cf14bf1443dbbb022100b43727f63b438425d55a453271d19727c5fc732eeb4edcce863f5477670f715c 3045022017e1fe389b5e4e3247cffdd2064ad01b063aa5d4cbce52dc06ef979b601a36ef022100dd29079a56142450f32f50a11c746a07ee201983fae308b34a296fa1cdfa0ec4 3044022020b8ea91cbd4cd37f85f17551076e75c23fb7b6c96312abbae83118c66a92a6002204577570b995bcb387febdab48839749fc49053a9b4815f97d254d9b9da276e4f 3046022100cbdebd93ee3b8753dc114627df92f863c2d9838d90299a3f46c3c23850cab3b40221009fe0f881323aa7d574ca83f63c10c5a5da6f9c69ee023bfe103918c64260b304 3045022100b098c1b938b58588c84be38547b8b5fb79bf834602d67f5555d111178de012e802207f78a9a4deeaee80834e829d86e33747affbd97377ca66ede15988b09ad22021 304602210094c01aa207498e1de7a65bdfcaec07907c1987ffdacc244b5c238ddc3bc6c36b022100de1264426c819ec4f7560c71bf754761a36bfd19429e25f44d8e14f4ec8e845a 3045022100af4fb5d50122eb8aab9f556337826ddef7e1988f25ddce3b8ac39791b4a78521022018b1a06f899846d102d657a36e70f5fb9bd864bd15795c4c8beda6f09f9cc7a8 3044022001c43dcdc2e01dc4fb736ec5709d48bf9f269d0044c6da7b90eb8e759c5554300220779a29371047ac1a821ec159ef8d80fa34c7285561d5d912bb559b0856bcef5f 30440220249ef3f8936b2196561c12ecf76921e6ad509e3e67825492ec84dada0848d7cd0220597361b5f1c42ce96f669c8fe2a44feb142f7b23dd697062e5d10a6013a7c40e 304502205bdf5461f90936d9feef2b8c0cbdda340eecc0a3cbe9ed4762db4ae13b7973ff022100c59101e53034ffb51c378e903032433870b277296c0e8a36036bbd39e67f9270 3045022100ea8fe9f5ccf19682f8c749d41c61647c97fcfa3c63681086cc521637258188ed02206cf46836f66d4f3880207e21ad87f9a6385af398b0dbe05e9f86731d48d6a4e5 304502210088f1f002e4366247de6e2f94cf32e74b51a2dfac4f16ce2bea6001296b04b1fa02206c0db4b438d724a21a3d97c0937535e6f59587dfdb96fb38dfdc50ad7a552b6b 3046022100af64726f409a4cf8bbf8f8da3bef27cb7ce0b917439738f6ccc0a4fc7ad313150221008653442b348ec2032f090d9ef0c1a8603313d01c1c3d5c0cad29104997f0eed8 3045022100c83b11eaf7d9d4f8e87b97ccbcf48fd8b80fdf7e71b3e46bbef6947e286bb26d022042857084e2552383300d950b8ddbbe82c39db17d1fa094ee9fde2fef3906d901 30450220276c1d43963d4786b44f651e9b03ddccc6201760f95325aa9c03b89c16572924022100a5d6a175707bee8eee41d6667b2183c8c875d64f56bfa64701bb35ec9ab4f82b 3044022055f80fa73d2f1908b262f1c07114324df05aa53c5fd7aea5b9adbb79553bc04c022033104002682da375e35d9894c7387d51f491dac2a7b66d96bd8a74dbe5c315f9 3045022002d00735ddde6f1e16a1a5a2e3558e947a98a0f5e8908a99e8d3c157b4cdd550022100d59866464977ee30ee8378912aa770b294df8db6c57f6e0da81790f7ffe6765f 304502205da89e8377c1c5b963958312abb00d3b9cb6323a13abc104b18823c231d10a950221008c0d25b632e31e1ef122fb520d34f80c2e25b08319500d590eb25ccc0053feeb 30450221009894e42a4ea2431ec609754f10e34e6aa8a7f9c1775e7053e5f2f6f6684495f6022052dfbfbc4760b1c5a38037631ffeda1b27523bf2408bd0d27b9753ad06c9d1d5 3045022100a766c93370fb6d6e62edc89e013b95c66734492032962fbd6b1193407519584c0220372dd8e49b6a72e6c1c7f843d454dd1def616d0a0238d15569db1cb5d806b0a4 3046022100d82417a34d5311708d3784b6849b9e0f58ee6ed19c150a23974c37be48731875022100b70374a030f819226a12ccae08e5e0e3337b7554ae84ae563e7432862947a06e 3046022100fe594542fc5c616405baaaccf0254f473c17a7bfae7f21d4733508d84a9b6075022100840e2d2a858f48ab98875cd209f8cf1ffd088baef4add9f6e409c695aab5cf9f 304402205fafc297bf4aa71a7232eaa68fd10470fa8e1337c285f37502682de4a5b6107d02207b0199f7d2c1180a2fc7734f18ef0ef19c535d77bfa7ce48659feb151bfcb611 304402206d379db1e7012bc474970f193d4675d77381a349f61a7856598e2555ee33b8e902206f3550e44032a2b540321b21b8d699f6fa18eb69129cbc72c809bb71c8f1516c 3046022100ccef7349aaf0bbf0bda3f9bb82f7e8870ea308b9cc5f88761be56c67b59e8b70022100a8b549e4e664a1c36f3b6c2c5fa0191e924f6f25092b5a58cd2977ac381f9dc9 3046022100a70d2c486a651009dfd54b77eae28406e0feca23169428634fa2cd52ea4cf4940221009c6112f0678cdd34367061f6cc2c8369a67c1322a0443d42214a9898dacd75dc 3045022070a96fc34c3ff090ffac57aa2a4e83a81181fb97ed2328adcd4e566cab0a8b77022100dd2ec91ee947ca43a7d88eabea1672ff3d927cc7dd98345f3dd6e5a017325ba1 30460221009a049d665c2bf916abb8995bc68ad06126942a705ab3a3f6cb8426eaf3ecf2c9022100b0ed9e340bc4451f460d7a6b1c2f70b0c3fc6cfcb159f18b4e70bacae0e2fb46 304502200c068ddcc9cc3cebb53ea963cffbc7eea371df5440bbf8841e73ae42bffd599d022100d1667d42689241d4bc05a4d70bcbf4410f160acd1a9b6cdd2f26a4f0721bcea3 30450220363e39e79c05977c263d141172b0e76f6286a01b2c739e771f3d090bc73ad8a6022100effed687c59f1505e7c194b5e1207d975ae86899f93bfce57ad52c4fd22f707d 30450220301b892fe0b0a81fa4777be4af83366cbaabf2fe70edb767f76667f71fa58e3a022100f10ee42044febefc1288c439fe22a3c943e67ef8d88125da2fa8b694544cdf99 3046022100ca0bc14f7333df94a17a5fb6e16b83e4c92a9678261af6910a11250296495acd022100d6bf0e26ffc1eb659a472e71038d4dd01e15ac5864c0a86d41b606ec19133273 3046022100b7e7d91b8ccf6768fc68ed71fc59dbcb0181e50b1523d7196f94d9282631af79022100ade8f2f13c5272993bac02b2b35e2bc55f94cf860ebfb6572f0bdfb9207916be 3046022100e815afef36f534702ca2a38b0d34f7cadce2b271dac4b467dcb3856a064a4c540221009c2c8c500276fcc1b177233a95ca29eca2688c40f65bd5de6bb6491190b2562f 3046022100c2a11fdf82bd95e2a6706c9af758bee53f970e395bf999bda84826fdeacbd3f2022100e41bd6e069fc30917ef41fbd5ec16f1d087eb7496de4158aae576dd0018394eb 304502207ddc020e30d8827dd6effd8bb3fddc6be902348b81856cb15e8774b8db299a73022100855fcf8a54ddd1a5beecd2d7dbdef8a09bbf57abee215e5c1a37db1f26ae5187 304502210088f16e91537d301182d330e80d465b65fec9712305a45c7b7b2b6e42b5d892e5022074bd4f5d7f7d302960686b7a7b315c98147e1a5ceda3f99a32aff22c5fa5f22c 304502205be254a0e201650a06fdc0959612abae18be800a5d47a3d75d49dc8bd7b90b09022100bb308dcbcacd2dd8b9a0c8438d2f5153858e93eb39bdf6948208ba59e4fbc810 3046022100d3ab1edb624b9efa1f6a8f2163cccae5c2d4bde3e26735d86ea08cfcf93413d6022100e562def705f155e6cbeec8c4a5b03d77c34a8795df6589292f9e9da5b09d93b9 3046022100bf9dc3c62999074781ecb941f51bd8e1fba2ecb6ccf770af25a252989894c622022100c490fba6c981077be08e7ce2a2a59e237d1d67963cbfdebb63477da8f59166bb 3045022100f46716d504bf4130c982fb61e9b6b2693a3086815ab4e785499ddd55d6a958bb02203cd5365d4aed7a3705f77c3db1da421cabf84cb9523206221947f03f083f88e7 3046022100dc935c034182b505eb64425e9ff834c4a8581866afffc1f8ac817df871073769022100ad35ca72fd03b10e6aab37109bc243cb7838f469b921da338114635eaba1d130 3045022100fea0636c113385111af4d40df4468a56500b2157c967ae905fcf20f13e4c679702206e99dbba1f740f0a56b525794956df97d8a3bba2f6df857a3e2a09a0ad164c4e 304602210082d90c7babbeddb2cdab9eccdceba1b6e53bc9b6806e1632278485a29c46a370022100d93c71d92ad3a336cbbc864188dcb07109ab06c801287d7ad3d079eb6d76355c 3045022100db26a8d1a4d28ff7db4492bd543b6d7ef3b289d564a3684d37e5eadacd04a91e022020f68c32191eab674aa4ece5a15ce80c07e9b3ca56a9b2c2a4d82148ac1a8126 3045022062a4c890aedd0ab7c7abf58ec346d1bf139eabfed06806885d7b7c2fc073150b022100fcf9bdc44d241665991372c730c7547d25aa2f4f3b7693b03073f17d6e554167 3045022053d72fa868afcfd288e3b44ad40929c9922d033c079e11f5c2dcf9a2ecadea0a022100e5ef3a924a319f28124e94780202704ba637d1e68e7fadf99fb75e6a785b8c02 3046022100ae3d23dd1aa956afe96522d686b9ca56af31194a8d25320b076e33b96bb1dd22022100b7fade9bbe334664fc0727b92a86ab94504cec0d6d4632a8356671342636f020 3045022066b2c8e4919707f5e9644f747315ca320a0a2a17782ee2e6c110550f369f778d02210081f26ab2307302614107d4469451cbbea54f1909fe3c9095ec6176bc3b92f764 3045022100db7686fb2de740a114f521a3b66177507d62b78976750ce2fdd031d70fdbb10a02203b80b8077a5cd10dbcf5886e4694f6cce3d7d674d5da885a223d2bdfeaf094eb 3044022027bf704332515cb9488c76e5cd785838482f82ff35f5c1236a87956f84065887022018034ab2e6c61b4cd951bff6d569b9f5e07cb2ae98ce5519792a16bf6bdca078 30440220226dfae7fbcd28f4dd6d90edc470ca72d1f4663337c97a570cbc1cac90c87d870220539a32b0e2b3dd743c0de0a6a9218ce5a90e689f78ae754194b9bb0a0afbff3f 304402207e285e1676d7fdd52d45b01fb293c3eb16461c06d9e9dc26945e10b9507525b702207d39babeb289232c6877dbda43e4a9a8409af75d8749801bcbb8539ba26f2075 3045022100bfe65f3865847a087ea5d745b2c2b172136fca308159a9685b1419da429e3aeb0220046069743aff4735cb29161626ee2186928654c9f0b39703fb967da5c44eb044 3046022100acceae8ef1116401070ab71440eabf2f8043d4e333a3d2c86704cd2951207535022100cc3022cdee702a2ae3a080c849e69f1c77e3c3ed406503a25d75daf9683491d5 304602210096c596917668b407f97bdbe1fb0f4c0ebec1aafaf6417ac06ab269079d2dd01e0221009ae827c13a07c057d6bb4c514cf948945fd9ea48e43c43c93f77c0d1dc7651c1 3046022100fd2c505e03c02ebd0fd621deda99bf196e1521de7c4734f1b551913444e17b4b022100aa2de5b5c0b1d58087da50f3c4023841af1b90b6d16ec602bb781e52f2e04208 3044022048a3c6d60e83f5b0bf868bdd9704bdb8fd100aacde6d92033b518f109de3bd9102206c2f24ddbeb025bae0beffbe737c7840ba51a52b68677e52cf28457abb123039 3045022049ed9da0cdcc6bc29aca36a1a5c3e3501c714a543be5429188cb388af6cb8c4c02210090651328b436805297a14865090978d49229c7e13a2e5cde3facc5b9bc528ec7 30450220158aedbcfcbff3bcd2e1f87012f05e42c74e73e90237ac86844e81411bbbd3d0022100b32fc5cb358d90bf48ab9a0b5bb4f2a17ba9f1383cd14168852911623320fae1 304502202a9d4567cb77093a9f45e62491caaa27ffb90dd613b9224d10c78a981076f7d4022100f14c3efd65d6e989e3fabb815a18041c7ca588fa1ab330cd9fc7d6a85c236433 3045022000fae4af44ba93e5740ae97ffe7591843c2a909a991e4c73d2f595f71c494dcd022100e5014a3d91839ec0a31d1b683b0b2667038135924df272e7dd05a0a495aa903b 3045022100f93226c3736e17c0e7a116cb086674546ffc0fcb09e4bd65bc7cf179b372866802203dc0d35b14c637095f7e0b187f054dee51c5c8b0a69250423825d05a8862655a 3045022026760c0d054a3a353dc3595df3eb4a4ee19efc382d5142fa7e1a253278c2c3d0022100d14ab6b59b86e72206e373e3023033c52d0159665d71ec43146bde4f086d7e4f 3045022011f9e0297823ad19e6d79b9c8c56ee40450e567e5a68427c6dac220f7809491c022100b2a96a27f7df8a49aebda5c798e9fa693bf5f9815f1dcf48efb0116fd89619dc 304402207dfd25925948afca5e6e76b8b53ef3744b991a40bbef4322e296cdf9412c080a02201d4f8d7c6ee7748fa06ec01c6f72afd74b352dceba584122149f0dd3aba29998 3044022002227ce29e7626c27e020435958d632269eb2f65847bfdad7c080afb03e4791202207a09adc5fd322e110b8e0b9a15ee7e749d71b1b6e7a2b0010d8ef0b4e94a7e96 3046022100a59e2ae920c150cb0594f19e226950d1aefd0ad6a2074c1afeb318c1fc033cb8022100dd6631ac5036123edb9e40efe157c26b7b7feb9d1710b3295f71f0ed388c11b2 3045022015d32a4f458126d0045b0c950c26fdf3588269ce78fd2dc26342b6a8b574de68022100b8bfa9a2c838f9d1de92da39cd801737ea44624f2d5d525b7ba3b274cc9e48b6 304402201dd6e379505a69998593ba59416f5225f4b8e3a98154d1d4fd08539bfadab1af022037b59e929d9a23895df2975bf74c268b24ca53e3152c4252638af09f40240057 3044022072bde841b7cef382e4bad5469d68a933ed03dbb5ed5b9d7975a423bb92179da502200708f028770b1b71d3b55acffca4261c9e71a1890ea6bb92eb34751b466e2715 3045022100c244ad4ee9bc25ebed1588ce1758db7944257cb18a7fa43285a8ed4bc4b371d102200d25314a22bd1d2caf869ad50352364f75e5f95387fd73768211e0bbca3526bb 30450220667ba86e3d66e3a07ca18f2022eee090b20793a2253455f536c34fad62a3aad9022100aff561e58a9f49f384692921145b04de2cb201d663c29cb53630ae01cafe6785 304402203de1d2404728dc1351aac3478d509ead8c186826d0bf300f0e2559a5d3e6f20b022059be84e20be72b3f0e6da927fd2b0de5e2f7c721c1d029c04c0c1e79952ff7b0 3044022047e7356f849aa0c6215c4e9f73e1ae6961c5afefa51594f16efb01757cf778dc02202f5ee9a6e34b29476cbe100daec54c453a26056d65ad0b765dd1df503de6306a 3046022100859dc325f4d5d5f7f1ebb4aaa6fcb7b659f61f9b1f0aee9eb32e39f2392ef2be022100995a4b22738a8db8fe2b066837ed24d0c76ea29b95e353fef8621d84d1cc3971 3045022100d0f26aba647c0d5aada8e04934ca440dc68a1f29e64b48bcce780b78a2e0f751022051ac3b06b3fbdd14de2666a46cc7713850ab5cfdfef61761236b1133380429a4 304502207d910acc208bbd5e4ce63377ecc0ed41cf1eca71af9558d9b1e5de7d5daf064702210094b012115b5062fdcee70ab6bc502e3e444b2552f0165514c209ae784b90e8ab 3045022100d769e2a888a7fde1d179feb044447f28aa1eb1026b5f6f18fd2d3e4cc9a9d94702204474811bc13f6c3913290130186ffb7bc32b493958312e984f5d1d8ee7b4c69c 3046022100b8a22c7fbc0ff6e5a0605ac1708a24dc93c81fb06e513c4f31d5003c02e924d8022100b66274f9cffc0fe53a91e145e063330b7fb634881dd9a077e94dc3cd69668623 3044022071665fd2c5ee278c9c0c0de74f8748cd9aae40fe9ac77faf6419992acfd091cd0220538775bbc8a933cf011c569dbfca1441aaf3d4c02208e2a57325a7b3701421c3 30460221009eea1856078ab3006543389e73556c9c23cbe1ac31c60e85c4e38561c129a5a40221009b956e4fe581a2da67131958525333ac978eb4edc4e64356f1e1a13d43d2bc4a 30440220198d2acce6ec69783f9970f07e71b7fad0c3c9525bb9404ac230a0740e12b1ed0220776ebae29c17da4c10588a2d983e3f707f5feba2c830ca6d48ffcf05badd804d 3045022100d94d8c734bffe1be96afc45f5edcace48f1fb414ac0f2a4d149689c7b4f4e47b02205673d1b0370ebac3c2246a2c45acc606b166e7b0d2cfbaddce7a2bf8d10abef2 3045022100d6440698ee1681d0dfbdc5d272c7231cbb50a9b57500174683d0ffdd0a0386e9022055cb590a1e5467cdb1be90620653db93f1f262e118f37921677c8c6c446c3446 304402207b290a949e093679692342ad5af8b0a8e8bbb16c37e2f138652e01a10035f31d022028ef71528ca5a76dfb801f9651d656a7b664e3c1a54f190ac49176a47027f08b 304502201881f2a5843c225d75223182e4354f62a8e01223556c3967507146b0cc21dc960221008719ca1bbf95685641e4b12ec7e6cda7c63a4e541578272b2066d071679f3923 30450221009747b034f4d6fb79ec51078e6ec0dad7938a21782bd897b936e300b2bd894c49022038d372e2c30466cb73705fff8a7da71e0defb63a7bedd86277e52552395e86ca 3045022100808c4bcb648caa1592c931cd55271618589da45e986e0aec8518c92085f2fd04022014f48597c454c36107cdce9de77dddfc62375868db8501091d86bb44b1d75e0b 304502201b742a24f0e65cd3ee4eab2d6626c7cf35dbdd5d562ee4ef20bb69f9b4dccefe022100f466f61ad9c7e7f5fd92ab1f7da07b810bf530cfa9bfc96782cfcc154d6df052 30450220434cd004168fdcf485f6bb83fb86cee97184aaef421c1564eefecf8072e5df42022100e7fd96a2a82cdc638f3996fb9e15a9c1c68ce4328bf846477034abfad2fe1913 30460221008c61aff70c217350155c567f99dc5211980005dfa4626796957efd4530161c6102210089033a477f63dfc04ac7aa668535ade543642af296da977f9586377244df3b13 3046022100f9c16738eb812b4202581c7b3ce99d9c3359dc205f8b23fd809c81caec5b4a81022100d70f2c067e400eff06b824e2c3f03d925097bda098ddad23fbde31b9b55e452b 304502201844cadc5c9d1e29d306686cb0dcbe4dca6ba992b5c5ffdb6be5f05fba79a313022100a2994af83935f4b6f23a72b1bfdadde0456a7fe5d345f4651a379b5e1f0d01bc 3046022100b24f0a42cf731c6e46ce87339915e4b7452c05f82835d359f9cfdcef8bf931970221009d3a13cd393818196cfd2d50ed26107f60f42e54bae7b808518f3b1830927013 30450220396e540de86004474d8cece9409b1e978e44a62aa1d49ae87b19759ce6d08eb8022100f6b4dd36a85daeffebd994045659ac9a37ba01e37097030b15102183785c35f6 3045022100d2b3666b877b0ac6d31ad880c23da866a9c382a19aa93c8c9b1235d3f5630b0902201a065733ced67d4a6e89a43a7d56e421546297685cede761a2ce33872da15c98 3043021f5ec6da8342e8612625afeb813b30049ddca8c21e2765d87e69b94199cbcf28022014c9ed94c1c1cf8d7ad59b0d590c93370acda3fb5571f0ab15de94277d385794 304402201e76ba18dd20ac54a0c573241eceeabdfa61f5a2d7721ca140044d7da52cd4900220023bf491b16f52f43073053e9401e5b9067779c894e1c12f1a8c14f85e13565e 304502207158da712ab94b2ae160806937362428e3327d6b4ca8dfaaacfd6a3278571a94022100e8e6edd1d3e72c303f25f91bbda587d5860c56b2d1db759f5fc87c7cb787d418 3046022100a354d0cd2d572b728d0570a9c7e54584049a97cc44ab72bacc43c4b5217ce4b8022100f4b3a6dfd606bc4a6ae20b6ecddedf43f2910a5e9ca297f86ff2fd780e914b39 3044022043142da5b0b77bf2b3b50e5b506ca7388081f931a9c9414e8c235e74136a7cf702206359ec30f3a8b32bdbff5fbff2c5618b0f892ef060bb478c25282e67a1c039d9 30450221008f1e25b0876e847d82fd7f0ebf9e08887232bc0fdd291ab9562b600fe15588510220414ed9f6e0e17ccc6bf453b7c709ca55850b0c5055097a7aab1dfb08caf173ea 3045022058fbaf234f21611ac1a779f6e8bb3a1674211232b03a36bf9c9fd644ec72d41f0221009b92160ab6a38aaae63742452f6daa71032596b859a15f5943c9644f4e053566 304502201cb4520dbb04a4ec2fc6e2aafdc40790d7242a52d6702871d819b31c30edbe6d022100a04c095450bfb6a449c5535e8665bacdfc044162a72f1a3f6fa303ac999fd74e 3046022100cb3e8d13bb701208fb79cdf0a244856f222803910b0248f24e2ac64e92e321b8022100c109b6db55fad2cd9a455b9cc249348e6fa76c46a24e0289b64c17da518cd4b4 30440220076944a9b608268e40784c4bbe7c728914ee57990763e8e9fae6fe2abf96306d0220367544fc2c36d67e9fc7cc30ca0aed6e2b0fdab32b7bb7e127f7ff80ffceb3b6 3045022003931ad6cf35225b7d3eaff750db4dda26adf5a411c1fa2e3de255fe9b8cfd51022100a4555a433b61614377b74cb614d696261bd4f8a78965f19d898095cbcdb025b3 304502202cb5dca6c5c0d32b8abd8b8c64935532c8bc35ddf147164718c37ab22cf196a1022100cf406cd8c04333322850ad5b17307694734564f1b6cf3690fdd128273a143287 304502206070aeecfeb6b536f9faec4e1c8860facfab3df3af19fccb761d34f437f4b66e022100ec89c905749119ed77aeb1e1298cb23742f28208443b93acf00435ad3bc656c5 3046022100b4821bdd22b1a15bafe628b91b1f24a3aa6e8c4ed98291a70939a2cff475af7102210096cba669eab8a65aa72f251d5b265f2dbe6d2af9058cfd942fdca3f26ccd9d52 3045022068510a7872633c4cff8551dcb96eceb7a63a8b44b2267792d38df39b8b325921022100af4d3391537ce7501ee0ffaebbcbb28faee7f5285cf6a489213fe63f7467f9a7 3045022031fa5a3a1ad28db1cbacc63bb92e4062a332a52448a2fadd6bdd94c82843b844022100a917a46c5031f8b95af1d435fd54dba79af28ce7473f0fdfa264d7490b1e932a 3045022100ee1484b655fb5221e3e6fd9f8d7966600a192ddd944576c1d2dffc457bae51a90220757981066cae9bc9c3d9ae96f0a00fcbd58323042954e360c41fd21ab30bcc5e 3044022002ad4be5cce5c757e460971c4ba3790c244b6f645ee5a8404cb575161eb73ebe02203da4a9f3667bea4cfea42994994b910672d5d4613f31fa460dd46cb4cea8210b 30440220743dd923dab382faaee4b24469bf5f22b8a614d7f9921f58201acc6a3f8e9bd402207fa8379f2ba46335d20523a0358aa02995c6b9272c4219dc7df5f54068afc1a9 3045022100801d620ec6bada71890fb848b2c0b8be8840576c512fac6cc74f1b537901a6c0022063377a671613ca519f73f9ab3a084a4bc5e9bed27f6dc69141b994569522cccd 304502202e143991110568fb332d8ac02a46db5d5f3b879d9d6b6374ed72e48f3268fb410221008d3bf7904604b2f1ef356230fc5c84f36d6712fdeaa7d3d7bb04217cc3a430bc 30450220090f6cc1890a531a78ba1b7fbb426b71bd44012738dbc8a920dfa2862b8f5c98022100ecf7eef9f113eb66360b1f6b8bef8984bcccd93684414befb20c63e57ebbceb2 3045022060879380ade2d1750dc5f9316ad4a84647626d3b2a6accea8941685176aff2bf022100cca90e0df6b2b42bdc976906ad059a401ca4c7e64c862f70d037781a4871e4ae 304602210083e54f1da2ca6ebac9cf2944a6d6aa6f063e6e185d5231a601ff3f3308abbfbd022100ab13092eb474c788707c0834397e391b36c55310f51d1e234f8caaae33d75eff 3044022012c908ae4b4b32307ba510d05a5ad1eb9ee74f1f9df29f0d3e0bf5eebfbd363202203c33d8a6e2c3a673f3e67016645f501564a3801cb7dc0961aeda44ae41b84727 3044022078c324c631565ac78567f2cfb4f85d02ab862c1e9801b661d56d1b046d57da44022049a2bfcae6fefcc835f90304f93370596fadc7eb446d49aa9414b3386b5defec 3044022001406a38db8a3485c0fe642263b3cae923482e67324b4e39cb40a3f1ea5f066502206c85d8852bc2c7138bd06f63fdfbe94895bf680f455f8a15f6a50e730c36e0f9 3045022100c72a619fc915c5bd360e2b004931bff1a7aff69d7e5ac9b96be04d6093f5c47d022002f5f2764fe17635788d3f783b53f870a0a2c2e35c30b672cecaddbe58b78c81 304502206680af20ff4f87b96910440a5f41f00c18c4308f923e49a577ab50d0741582b4022100c503229b24eb6179d26638ae552be1b31ddedc6a9b5e956536ca6eec6c020b45 3046022100f8703a5ce56df8150ee934cba6369f51503acdd3ae0c4ccb363bdd36cbf46b4c022100f54410ba7d35bbef593dda10bb2d7ff550500953064c6fd89b24cfe0193b9a75 3046022100f01144a1ce14ccbf02fca8568fdf850e4c14576ed7172d8630d4a46499b14671022100957cbf9367983ffef79f6de929afdfade2ac5200519c5700ff36c107a2a9b0a8 3046022100a86d7bd0dfe421916ef816f7482f723ab325281b4ddfbe0e8668a43e40fe8b590221008eaf86a9ff2c2e9876b3ba48e03233e40db0f7da75f8d97384a86728771aea50 304502205135d045391055123ede1208595af24f6d30fad48a704119e90d78b23a2bdaaa0221009feaf49d59a4361b3cb84f02b899f1dc372f4a9e9782071465ffe63fcd39811a 3045022100f403546e6190565e875ae224106ad9f906075b617eb6cef1c731a0f32b03f6d202205d5486b5f42b5bc349b43afb936c8b2504754fff75633103f74dc072aff9d013 3044022027cc55bdd1639176ce35e2e5f59ee7b734c9438a0aa55aa42d309387025ea07102201ba940d8b4e817b2880f56c4673fea499028b5ad78596ba247127c580ce22e55 3045022033e73f7671f8f67d3ca6cd33de97346922c171b984c3e5cebc40807925ed6cb5022100a6a6a810e5bb266932e2d5374f2e97fc5307a9669c8b4e594e0b070795944671 30440220186e640cb335ccdf76eed697786d8543392c65ae28a90c1400ddd87689abb14b022078550cf0015fb2a4912b7414d1444c894ffdb87af7f510d0235301eb7af557ee 30450220234c0ba4fac04a621e6f7d931dfcebbc5587f1f3ac6fcb663315862b2fd2214c022100e2409fd76db4ef812ef31760fb8f097bcdc246604ce12ac5d95e7dab14f47de8 3046022100899ced6a9b27c875d54e19031d628b82699340c865a8e901796c943aab685820022100bfdbec468e4886cd47ed0b84f2b1293a5a8ff2fb824f4f8bd6c31d6505d44e9c 304502207007539bf98b902a926a724d96f901c6134aa4f1e52d2b7db2b65484dec2696d022100eeb7faba5341adf210951154b4566c98d7df6c53af2fad1d9f04bb99ee988850 304502200728262f658e4f4d9a2051564c245b2a27d7dff7b31aab14270d4559374c41ad022100d9f5afcade12fc129d2eb3e5b7f0f9704c75929d9455526d68ee18bdefe7231d 304502205b854cc089cf39860813bfca7f5c20779da1558d9221f272b177cbd158967369022100dea7f559c1415a160a00e6b5da1f2b1a154c1449b9ae4c2542ae309ea7b1b050 3045022100f59a95cbcc28f284a55b72b66b6355f6c660e2e8c75d4df96ad032847fb5eae8022044910f0e116041f3f1c6c2f5adb46c1e96996ebb5c05fe3e1d80344133f231da 304502203425bfda2c00977dfd47c50364e200190e1b710ce380a76a344680ea27a64279022100fc99be3aa4a8055e08d830ac0d9aa8063be9acb73a231295160d6665d8344708 304402203035e1ef175d3ef423cdae3388d2822129972fe07391e1ac0ba7851d155f228002205435408f396c1c21c4be11fa2899380649addd8671de8f93fafc4c2801c69595 30450220748ea6d7ce52373222e71edeffe406597ff7c0e319ca2bd3e4ca4fe17db03a7e022100dbd595489116d9123f2e8d3e37876e04a635ba539dd0267f72300a98d07bc2ff 304402203f8b54f2d1106fd939ef51da064db97f1bd7f5d4007bdabd6f2f2aa60a7351ea0220290e4b258f8ea86b59f76c5c19c3cc5addd019f5eb27afbdcb981bf22a2d7c9d 3045022019de8c2cc1808f0458984cf7ec7cc4b91f6ec4ac636789a427e4d32665c5e67a022100fb519fb48553c21b10fb2c96dbf9b0d6202d72d7ee831adb07cd36091e913326 3045022100d57122a321cf97f6f57bd54c5a4d6fa2ee3f07093f0fa4f8f0bb0a278bf36ba10220047e98890614d0da143df5ac4b1480833787e0c218fdb59beb6966edadcbf237 304502200b76650fc06f283f5c33b1700196afcf15ed97e96b2806a76083c9cce586f9b7022100e674b6906e1793803aa2c23d8a50dd118725dda101bad8739a962ab3975101b2 3046022100fce2b39e7e94b701bce4276822ead0b7e94dde5aa61fb382f5f550177e1bad0c022100ad244cf8a58ea83667cb26a8d2c3963ad91ff9b86d695e9475e67ff9eafe4665 3046022100df4ee70d347bef6ca7b04a8449590fbbc7b038f9fa44b3ba1c4ccbb5f91ce426022100bdfcd7969fe2232067f3b9f11eec2043784c97a6de0eaec788e6cf6839e7b66b 304502207840458ae8fd7d5ab27946b72498be8b897d569fd400759842856852d2313fbd02210096d436bc07ab61907dbf0f093a1076f704f2ef20b72e9197a61ce67a5eaed427 3044022037b3513685aeb7f14dda4a8c339ec0079f90d9606f038f1aabcd587930bb9a3402206016629f75a8c973941f51b96bd637c5c954211912f370988edac3a6cd985fbc 3045022065775e869f0cb4ce27b17334769287e66e6eefbd1cb861656cb9bfe4a2ca774302210084cb38594e8240f7e7fc6ad06561584d4879175c8af7bbd9e4f0a975bc06797b 3046022100b8e94699c6d2693a8c8a5c2cc38d57a5a2c2fb4dbdd5dfd9fbc14fb6eeb5307502210080929ba490f1cad2b665c151269f9595a51cf08beb7d38df389bbc16c2521bf0 304502205e4d0f9fc291d93f78f53be42532ae05e4236a02653a8407a25fc2903b12f1f1022100ce7d1c54c8f961ae7776b9c87dfdf42674d24317025f7d1326d1bc0a917ae888 3045022029ade0e0bb1e909520173cc694a0ab766cfd493ec6e2bbf2c76881a5b28a4a17022100eb6688b79be4a52db32f25eb81ed9a958ef7c3757a113ca0e0206873a20ddde9 3046022100f3734461747b74e5c307005df427b73137fcf0be83d53f7c500e42ce99a78b3d022100e11f85a157fbd39a5be934d95d0404bac81c281d3cc3e2801094e7d8d9d18bc2 3045022100a5d79e305302fe839c58f2d6fd6d67023371aa1f69bb6fc31f9c65a6c4697d71022038ad7b9af9924bee152f3ac707a1ed2e3b9adeb7a2b7ab4915ecb49c3e99631b 3045022100a1bc72099357e4157816fe45d5b8f014ca163fc81535cb98d294034a1b8d5a29022058b1b2032ee9be7adf8743c541459339a070d5a7565e55090cdbb7d327dd1cca 304402203d13606d9e9a8f3cc6abb02f811ac52c25b4f58a0464fa7b2464ca49985112fb02201fc17c466a00b8a3847e3b0462f8da7ad952f2db2d193cbf5e2c51749b476505 3044022043feef886ecba37b68784c5f33b4087d41e1d07ce3440856b2b817feddec08690220404db09908f15ac5c1c3c2e382ce71428147903af9bbba46ed75f137ce821056 3044022015edc17839051307e1a2998abf5619c4e1fe7046e14526b1f69a5fae56367276022024a6e8c3b74afbcfa33163d1c14dd4d0038e8ceea4bb419af8e85ec5e0e277dd 30450220611977285cb253b18c905e474e03ca7bfd0e5edb162f8730d3a54347657c31a5022100d24084128bde5fbcbba31b7eb85f8f1b40e8fbb25c1f34e2d1630fc597be185a 3045022100af0a6a39b08800f5aa9512bfe7209a18620d375ccac97937ed9d9b3af5feff63022038b7d0cc8f26caf0500d3ddcc9129ec1eb89d75430da87c8aae71fbacdaa1171 304502201bdf3c59d446049d751704a4dc36a1504cd093c4bff99cf81b579b62be0ec936022100b6f91659538dcd3c48b7527d220bcbae44fd96cb177c52890608dc6ce6e58250 30460221009dd16bd92d5197eae3a8d190c9bf5bddc77a6353f14a205e7b7d6a01949d35d8022100e05cbc2aceaf62cacb443edd3e3fa8338a9563035dcb41cc97ce0388364eb175 3045022100e916ebee16a51081a3d6cd306d350d61eb292ee6ba182fce41dd728bc6631f88022018ea5f6e39dbc35cb9dc18d8eba9262c9e6f4641cc642884b1203ee5d821c56e 3046022100ec60b0e73209e6907267afe8a347d087bf11184df26a919307e506e78412a762022100b5a65f28fb6235b247275f32223a326c68e1624af8b691905517545255923e62 3045022100b83ba6b5f87de20b198b122fb2f64646e873640f74cfe3d859f611e5ef3caec4022052b45155e393b8f4efda7d3b78f88d004d23e7797e7117f5ba8d95a2401f1404 30450220467e37604fe9e3c2b1ad2889d270adeb64789877736646157042ef29a1b55bde022100f6675e4d050d6cc6f4976d59b01ad908d008dfea9b51968b727b708e5d214edf 3045022100a425067f1608628e0a98f14df0e7fe372e77fdb695d9707d9db1b1cf6c0f7d8602202323e03336da62e3ab1f9325f71ce7af8a65effa560e2a45c8f193bf28bebb19 304502205265ddbed017a474690d87dd80e63f6dcbeab435b8076acb0f2632e3f3f0ed41022100f7522415a80669e1d124a25cf3aa6664166299d0f32c0bac2aa2cc7f965fe84e 3046022100dd6b7806f3a40ddcd1ad3af8977f4310f5b7ffa445735732edd3a193ccce1b38022100f1f65c4910a84cdeb504c96c928e35b0c3c03e14a707468d4bd397638832c2b3 3046022100a789a56fc9023168555c1d4bbb82c5094cace7c499c2e4834f03601d0d24cdc8022100ebe019b1d272041dfec6bcbb94dd49332ad5dacc9efe96a8208443a4267ad60a 3044022018bc5217edeee40490eb9d1883794a8383caade8a5de74826995e7153d2d313102200b157f262ec811fcdb9760e82e78e80d88cd657e06cdaa9c578a6feb23d21ff9 30440220098a8d9b750a1d5328d99cbefe071c630a54320f9afdac5ff3e8794152c746e4022004128e264df63dfca37fa4c60c94843003c0283b61e641f83929915077ac806c 304402200c62e0f0a6464b893db5d91419b98f8af9697d7c60a5fc84e41d28671350bba20220749fc3417ad65dee9b5cc3814370717ec4473853131c225bafb96318b88822f5 3046022100e4a22c7f0a96c83b59b2bf9fcb94ef6e91f3abfac0b545c66e243e5d9ef5c0dc022100887a9ba7d9d7f2165a733bd341e3afbd8b30de4be002ec6cba260f4ff0ec5d21 3045022100d681a9bfc3e7467642fba9d8846d39758aac1530d17f274895b5dc3ccb4fc68002204983dd2a41b89f7e9f8a75702810b472234878e51cef26fb1714c0f3c9ffc17c 304402204c6c7da9ab1c92994f7357c083b22164a2bce19af959a7103761284b7faa7f200220391fdd30de22b5e33220eb84ad85814de15ebd3afebd7a53745ee542c16e2ac8 304402206538446880f66fa0941382f14196598df45077bb68d7f27d6fe962962731c5e1022003d97b73f761a3abf341919e0711d4cd17ad8de084d11a7a255f6465a3986774 3046022100d1ed8ac6dd713a3985e94ed2a6621a0e949f33ff4bf8745abe6ad41358e9a4d6022100888f85c48b288aa36843f1dc564045ff567729b62d5920ff3ea611dfed083f73 304502203b2c81912b79196c399b7caa592a683128cf1171f2260a743156edc82e6dcfc9022100f03d34a0c97b1ac11f7d48e862a80d8808646b8748b534d126ab22f145bdec3e 304502210086e48ddea6ed24519b2c5cec92690bcb842c2947f219b2eaed06c1759ba2a16c02207b352d8bd0ec129dfa4af751ff034fc30221df41ac8636b775f9726fb2cb5007 30450221008f79419946eb2700705b23caa7fd2e4e74fba328e3ee1abc0b87b8e925b1bd5502204ee11bf017a96a9deb525356c7e29d6006ae82ad404fd1fbcb3cd7ad988151be 3045022100c37928f1e51ffa60fe19594e296a933a69966ae0a82083fa07a61d87ab7ae28f022029abecb10a8aa44070629dd1545ddc50900c2b07a1a0a89ac7bb281cf7884386 30440220636d1b2e887da34e9e57b04a5bdcb15f667ce0156b675cae12bf3e0543be4e9d022078df8ca218d08142a24dcaaea05b00f588a9b18a4ce66c85d366c99c17e367ed 3046022100bbc02419bee5ed73d0dfadf4f28fd6a540abb16086d26b18fe67df6b9518f0a6022100e0b907427a61eef33f9d35f1231463484467ce79fb4793f4ef89c60ca9a642a3 304402203980d47fb5451818c8b462a549c1f69d87915f2d72e52d698e75e2e88d8b16c5022024c79346bd093c92e6cc57551a38e803ebb9a29df0014568d64f3b3369ca4232 3045022100a734db9ab37a7b23ea2bdf8e0ebc0bc16be7c4d7cc3640de2822b35184f268890220214dba50204c4463e11cc646633a28c4fb58745ac892962ed23f834272c99984 304402204ae0650d90aec40f5d99ff88d10908b29c90dd61c1be43e09eb2184ce49c1c980220327d46f3ce2db99259b3998bd163f0acecae92fe19027b5b97c3ec2d6c5bee20 3046022100815936f19c32c1c9ea871e1b675711152aa725f64dabb3acff005d713c3d12a5022100bcacf76cc7931669861d7bc1e8e7621c9900e99bc7693aaaa8da01bce140d7e9 3046022100f813210dec0b0a8f77aaae04481d9371c6da1e5566214ef7c85ca5ed75135bf2022100a2f92fddfa521069f855084e607072079687ad63c869434dbadd8d1c5de2376f 3045022012cd76effd2f56b9d8f3a8c9b7cfe8e22748388f911039b674710e7fe3925b75022100c63d8ac138fc913fc9a530170fa2f88c5ce5bc9197428af3a390b8195fcaf084 304402206c665c5545b6fced0039ffd15eb3fdb578090dc4fbba4dd2afb78686660a5bba022055021f6b43722488164361b79218964eee61bc253ca83fd272248635d1a5518d 304402205fd681a6f63bd424ebbb68f96f0bfd24edc290132bf1c254e648470cbc0acf4c022073b5a683484f1de202b06ab98eb7a8624a4b4706a61d2a75441cc682da707eb3 3046022100db157d31c59980a0f0a736145c72652e1d9cc40a0028e848fac4a3b8dc45c6de022100afa437043e37d14fbb0f38ba4be2488385d20949c2e1e3aeee3bcde6d08c7b54 304502207bd9af11bfaecea1d603937d035c6d2cca8b8560b0033f8f67261626f361b42e022100f604d5d62c849dfbdc5a2fabdc249e2ee6f32a2d020d93f47df0e53eb8cb183c 304602210080c1a8612db9d66632fff5290de29e3e2dd41cf5c2f8801252a7eec2c3abc6bc022100fa5f1ad448e42b8c822d1cbea7b23651e91b5669e869e91664d22df827a93a43 3046022100ab734807b899806e4a9b4527c8c3736a226799efb2bc45ee3c5a6320afe57f0b022100cecd08e3c3245760d816dfd2523ab408ef1153462b1e484a8fddfc99a98b0f78 30460221009f208f8c66cd486881cb19d7994bcb7f3e6dbd6a69810ca0c82d2a6fd3a37c08022100b4c77efa4ada022e72d865efac69827f00f5393bcc196d1e4b730ea8814694f9 3045022100a218d687f462af2615f0640c3279710d4904f7ca254567bdb1c0977d262c30fa022052567f4b5ae2b24406415062ca93791c6bba8a89ac59b21abca965d0b59ac22b 3045022015da2c369a625a0de947ac75afcd171b0cdc591ae4a40d9da7d877effd44425b022100b2510a72c5e6e7d260454d981f82ff690f3bd52a8c4fdc2b91d1a0c1201bca16 3046022100c3b84f7d4b55226292657936d5a3a25c6c00ca12b5dbed3e43087311b612746c022100a2b88204d29abb997d047a87697be228d1b0bee2b008d839ad20ae519cf3cbd9 304602210094bc5573103a807a7f865d119b63daf516c1cba707c799d383fef2f2fcf5a85d022100e77cbb553d38a8f42ba03880323888c348d3f86c07da1ec53a95e3bd68e68265 304402203710696e8b37de346756f7b1077a438a5c3f44dac534271a4195d745d84ebb0602202c9b106c3c2a863d5062c2337d7955e8c16218220b4018ad025960f28e71e97c 3046022100ec9151d831f1120c8ab85b53345d0a71a352fdf9489995be46a5ea986cb49ae202210090922c7858067dc9f634303262ef68a9a079498a12de92b7569b46afaa801a2d 304502210087abc2e26b0d4d5728d23495695919c85da8074919de626a4661e6a79cdeda0302202ea074af1ff7f70efa5d1f191a48b6a2f5ef76b3d3a759118af7fac63c2a325f 3045022100b837dd24d4b32b0940e3249b9bd608f2bec431b1ff810837126b8134d64e4d8d02202c480ac59ce269084105686c1805478f9d55d8f9fd88f7f471c6a8aac70478f0 3045022059e8115d4be51a1dc85ba988e385799c7d38195c799d0b3b4f9446d7e6f6c6fd022100e5727767cf193b46b0759badd1e02d95f291075f65f96d168f88653fa530ae5f 30440220576323d414887f580c6a816bf83440cbe2943adfa4423d7ea268872a457f52f002205623fae054de2b6de9f1755ceaff46ed4b9b795727c10cdf08c3aa317c1a6235 3046022100ca9357ab6c79df34d33b8ec1fcdb8bd24fa33662ec44748abd8468eec5d4bbc8022100d819b306f6a9df6b2706d669405537e1a940dea072bb611f650fa58f9dff511b 3045022100b7355b2fecb98c86f24e7cb8333f0aefa0d6d0c2038446b61d9f22c492904319022001919c8c2abd6e6b2a61406011fb4a5b3130b78bb56a35767c3a0c5be1af09be 30440220077d3718ab588cce5bd3140d584b1d6137d9309adb018736d9a8ca1f7ff1999f02205fb47010871c9bfc799822a7d204f9ea17ce8b8b797cb9814cb341a676a14d15 304602210087fc846528d3a939a0e65f88ed122530b637712851338117aed34ae0b5210dfa022100efd2db24e7da812001f4d5c0b0e0bbcde3a6bd479982bb00d02be26d78796608 3045022040fd9d400d8bbd13df673c590295a92c3f40c2e27d897c2a77f910581d6b0c4d0221008c6f67584f50fff07bd4285c0c76e7037b66488ea166833cda819aba2d3be06e 3045022100ac3d21322070ee8c04a29dea4f663939624a5a726d93bde1a1c9c779569075a70220160ae180c8591ff5934be7ce94caff89feecb49ef0c02c005d1856f8d83108a8 30450220279fcf93efe96f90aa4616c3cd9b145e6689baa4f9777dda493a9516d9371967022100f9844f5acc66125259bbf94688d9c8001109b6d6bbd2912038664b75fc97cb1d 304402203f69e2d1d9f07307e89665b3368a8cccd0c77618bf9f5c835870fc276866b78e022002d6640b56e46c65898aaab627ff9310c34cc27d04597a237b78d77a092fd085 3045022100a53fd7d42389fcd2b21301410138106765ea4c0d20cbf41aa64457662bb026d1022022b994a05f341c3e22b144fd47a397b5d97bf4164da7f3fe568f47d6b65a5029 3045022100cbcd6599a5ec01d93e215fa9cfc176c9d07e144496fb0203359604340e3469f9022070fb25c26a4b004e2a79e5617ae75e053a0b9c4910d06ecef42d598a11500c08 3046022100e663d087c3927c467ac4947370e2157b28a49967a4d33292fe60f638834d421e022100aed5429c923e98e8a771b728e7b0f058f9ad226f388dd5542325077d3b493817 30460221009eadeb208d0d7fec95411060c166d097fef82920eba8638bd75c58dae0d3022c022100fc1556ce2f634ab59f9b5e1e56b24e4fe9e7659d5f99bccbfc61bece14c6fe89 30450221008c3e46e4bcb88b1961caf915c693a0eb45ae68cf65d3e091497786003674cb1f02200b4f86311d5b4fe93447ee2b8f2732d7c42a1af13c9d042c5754fe651d65a83f 3044022047f8091d1da81c27eaf0be45f4256ee772bd7979296f300680a91a84327cbbd00220399d0b45ae3a0055df32ced1ca63a42eed89665370caa51a095f7672ad3cc6c1 3046022100e0c560779cf3ab37cf747c55b151aabf7504cc5f76ee5035477fb5bd8093cd7f022100b1e82109deaee1d87855fc3e100543a349d5fda6450e9fd322b6b581ab88fe79 304402200a750d64346609690b1d3c6f7b3d6dc8fd9fd6a851332465a601039a721a8a820220143d6568db064618c75294acc44ff1788752f0713df8306099f1eb15e9903178 304402207f723ed4698405462e6e9e6ffa20c6bc0c5d61076ba9d97e4e4e213eaa2160c4022077d91e3ce31270553ee1876684382d7a782471e1a2b55ac647f46082eef7bd8e 304402202f83c92464769500ffbe38ce1aebad6600f2d454f7e973cffb737dfcbcd2d1e8022051e03e3f6ba1b91493cbeab244d79c2c6e4ce85d7f1c3b61de6f3b5cc2204fbf 304402201e63f5c3af519331d286de4d8cdb5877c922cfa7b907c92ec6061d0713d6477b022067084f21563f4922dafb94c68aceec8b6bfb57b7cf180f059e5dd23e540f8600 30450221008beda80dfa23c2b08fb8a473dd1931d5059c4861c35bd0565b5937761517a86f02205b000f998ee272caf2f0b6630354d6e078eaad44955bd3289ef00da3373e7321 3045022100b90b460a1e0c3ea4cd58c48c71df44d80efb7345dec5745139c6051ced71b43e02204aca11165d799bc593dc1a0177e963942c83082e4f1d634fd61ed5558d98579f 3045022100f3a4a9066119381d61e0687dbffebb1ac726638ea5158ac47914275952a99bae02207885311717aec1ad21627a258cd30ab70eff900529b6e12bf86ba16013942216 3046022100ddb4e2c2753879890d485e33138499ee7c63aa32e85d15773f61ee531a39bae5022100fd6a05f31c6029c84362a8a7672ec04127f07009b48d38b8e09a81623c0c1678 3045022100eb481c41e8c32921ba5710d312cd0ce491f29a3ea3d3e875c7f1ab5f87294b6202203538528af7d0addd480df572e790b48e6e8c57054c02ca6c262b7e97e8e68ead 3045022100848e281e9f004ff21e549ab48b3ff8852756a1a0f24af2ff0b1248717ef499080220128ea1689a2e5b9ecf42d4eceb16ba62b94a8c302f2183c30248356fcfabbcaf 3045022100ba0828bfe55e9e0946b70c377b2bb7ecf47a5e564a5770482c1fd72981d2b2cf0220281c3f01fb7b9016f33880b9eefd84e293ca827f078072db779a4dcd10807866 3045022100bcb107ff96b5f1689e508fc3ac0d1e792c3c70c749ee7f2bef9e89463069c294022060ff210c5fdea8f793dbdf8707259747446ac09498d2037c5530d186daa92abb 3044022072e3d0d35e6dd635e8a5babbdb65d51c4076fad4fc338f6d09c6d0155b8b1bdf022012f3c185c3c423e14eda17162b658dd533ebcfa3e9a175c10a06df5dbc95f672 304502204a491001c7cecf14c31ea1e595958ad1981fcf458906654dced42a57880d318202210097de4b5ef38fefc035b391c5d7515f2378eccbbb36cc125d08c25d4912650a00 3045022070262b9a57f1f9d387033592d04b342f01c6b5b70f1fd771826da3a9e72834a10221009ca5ec8ec30bf120a40b421bbb80b6881dbd1009832d798854a0ee3a492466cd 304502201b96fa8db254df617e03c7b902beab6fe6a9f862626ef2ad6fef897a582b27cc022100f1ffd81f2bf68289a837881ab16f84b8d3ab1514a6b9b97422f0b27fb3c4a7db 30460221009ec3f62f8dd45bf2ecba8dc21e652dc551cfcf12d233aabd03f115118da04ed90221008117d5016f06c9bb0e236699a2c450abad4b7bb1fc246ab6dad2e9ce416a79e9 30440220581d183db6d5b252927b4d6cb47c9535d80ef572b8dd7a203bb72f49ace40fdb022055f975a5ea2b3739b8afd7a27c5dd3800a2114ea4e7061c21c2fe3a3efb65660 304402202c1153fdb761a871d2e248e0176126b8b6f68c8ba0b8fe05a611c624db19b9f9022061f27c08f87aa5bf4831f7b7f5ceec0e210898724215c1793cd5cc55362ee565 3045022100f28a08af1e989d454fa33c496c56f8de9cfed76ecf9233907a492954abb7cde002201dcbb56dcc7063492a655220b5ffaf5057c2c43a433cd858f6c34312207848ce 30450220460f9efc69a623279976f9bffc888f65e934a640b268ec1c219838420a98eb93022100d50b567b30b03f2c383c4e5a8ecbf600e0361ab72dfb197ae9281b605b136c09 3046022100bb815bdb68eaeba63a3d556450a183c440d7db42b65d8ba0853f9cad15ea0134022100815de8f9353ac219cac4f323fae36b950630bbe413a6a53caf80a88c69c80721 304502206899da5125c3df3c5c923d3427ded1a9f834e35583f8a2ba9e7940e5e3aaa27a022100b4c18f3e9a7828350fc0665363fc4fad18acd5aff73d21a7b5bb944a0ef95ed0 3045022100d7bea661e3476d3a199a7d70da09d9fa3996673f5523b9fc3301a504f20f822b0220386e452bbc80490bf8742da425960160fdb33c9e34ee698e8e2bfc932e6e738a 3046022100ecfaab85886d8d85bbd452844ffb440951e9cb4d765ca70146bcb8aef0cf7144022100b264a9f6b4f2cf6645a935414074a4c619b07c869a527c9303983fa119abab7e 3044022051476d29bc71dfa24067860352b764bf8ea842dee1dba6874b69048012d524ee0220756ee0a88122a698187804086e51b59be62f1fd7217266bce4e3febc3c946cb1 304502204d8e386dac6e6b2f1f8e029dcdddfe7893cf0ffee1ac9707c4db3b190490fc5a0221009cab32d15a0071515d7d30102e5a1f36945e1ae1f6ec737aa3b61b3983223690 304502205ac2e8742fb5e6c13af14d104b9335f561f05247299dd8d3d067615798543cc7022100f8debe0fa5f0b7f96419495b1780824e8e09bd769ff40e7f000716be405931c6 3045022100d8dc23033539a546549e5971abd57424856413ed4e2e77a644670c416bc909fd02207080137a769881b15755186110f5ee835890ebecabf50b08c6e8b4b91e86d42b 3046022100da54c5cd25b07a6e5b0166b9ab8efd0db4d77497c3e516c9a9098826f316166202210089f4040eee957b3ab3882062fa402e928457f211109040503f565012ded42873 3045022100a64e0f95ea66c144409d4b356013019100fbc1cb6661973319fef709272845a20220117cd5f59c62aafa02cb1bd3726f83652481f97e2c413d2e9d70d301e127ca9f 304502204d0b64f0a6f28328aae453d7c1bad9f35c32b423e3e680e2fb2f8686827e9743022100de6ffe8d0824af89c93ace3fe968b28c7678b3fb313ff2aa791517a6758a5bee 3045022100e2577efc0b5a591120b5d385375ebeb8038ea60178cc04c44f77351b0c66aca1022072491ad4dad7ed12742adc7b2caeae62cce1f6cb2d54d222cb63e586cf459ab2 304502210083183dca70b86d434371a69f4f7050d4fdd01197fcce8045afbc6f4d1c4840ca022054a77bcfac54e01d1dce8eab6989ee5d47f7bf1a825cc5bdf621b0e77132eb79 304502206b8f969288603ee988ee36362cc60a70826b2aabb22b53ffcf51b83030febd97022100d45e416caecc1eb2e5d7c3a39e9c4cb0b788c7802874444403cc0cf675e63c17 304402201cafb0a83f65933b93b64310a4c975e9fad9dd12f0146c4877db1765a112644d02203ab38e33c9b9d0088f391c45421ee810c9b45923fe01c4e81f58dd979807ae2e 3046022100cf5224e777fac0a88db579ec3f3955e673ef31c17c26697be3564226e2318aec02210085ae31faf29b3bc78e805fbc4d8b02c3d8ed948c5f235299bb1b484f8b991593 3046022100db6b509713bade860d5d3bb78d6633e4ce2d19aa03d33d305cbbe09350d38446022100e645c9e84c855e823f7fe55e3284c14c639cb9fa5918e47cad1dcb2c61fc1514 3045022100df67c5fd4c22acadeca9687b2ccb9c0ac4d245fc2edf0625d93c49534c4006460220024c8b294a3a10883f5886a77e56a8f40ee634b4ba5ebe100766606713739341 304502201e8274fc12ff2cb24ee158b6a77eaefad70c827b823f583af9eeee8492df7e9b022100e2f9a4090f6859ddf69b51f1cda2b498cf39031466313ccd5ec103a776969fe7 30450221009fe5691f7005550daedde44cb5fc91b37a494f7e44582dc9823f94d6154bed5602203247f25bc69e965c3bdf3be7c1f8f834ea5802c24e9fb39bfedd85dee2fa1660 3046022100f013cb92fae6249620be3ffb79acc2402ea34d901c6f6b798b1682fdd11775ab0221008df414b73ca83176bc50a15a54fd0c891c44ac510720222daf783c340b6ef8ba 3044022021b7884aa8e8ad62b598030d5a2f769ecc02a46af5224b3cf23018d51a034e03022060e19b0d6d714edd6d71f6ec9a63885ba84607011a9248424d9e8bee249762c1 3045022100ed7f91ee70cecca11b26a372d4739b052f28f87fc5fff800426b2cb89749f32e0220324e7cf5a851e9389502f9e8b33fb9c8e3fc24452429917195228e39e126e551 304502204e2ef2749a256ba4c512ec474d4133aff2305f9b11da162031697b3d80c104c2022100dd62e2d9a791dfdda60129b531616d0eb83d4fe13bdbb54f94ca4436dd4dd8be 3045022100842ae19b68b323490ff7ed633f55d5d6c843df0b968cbb8ced6a5403eacb568502204ec84222b71fe4d485adccdf75462b7e1ece5dd26f47390fe5a44c0735a77943 3044022069421c09792e630a52877b679b454075cc139c812cabea1491e2aa8cb3f214bb022004d30bf7b4b964d0ef34973468def9f55613205e1a373fc3ddee9d74eac0dd31 30450221008669a4559a4f87692fb59f6a8ea042247b249569e32a7dad3a24737418710995022077b86bf92333e25f58cbb60ed82f6a138a73799cbd4f79576c831f374bef9cdf 30440220799f7cce5e7d4212fa49216b0340fb08b17473a5f8448a1f1ea06c299652ba6d0220576da7164d3f7829334ccb3314d02d6e198039ff67200dade5bfac65c47b71f2 3045022100ae460ca127faa8aeb6ab5379a396a0484b8367f1d08f40cc5e9d7d4eb1cd33b302200bf4eaf83a7f60da021d3e612558d5e485b1467cfefe9ade0fd666dd9d2450d3 3046022100c5f292ee0577802fbda0d1dfa4c3211a550bbf637ed59e14d45f2102499441910221008749a0cab5f1b04d02fdf667e88766fb494370c7073cac6c7faa494c7a0d43a8 3046022100a5e83c191056e3a2b611512e9c8738eacbc68c5dde97a65d46eb6c64d100a31a02210097b9a78cbb73c08de2a4a00596c1cbe2cce313bab4f00ce3bddd7b0d2119fe27 304502202af9f59a07a3d06cf3ff5e958fbfb965166cfc77416e48ce5b529bb9d254d5e30221009545df83b7fb22b204112e039bce83772adceebc7a25b279ab9394a71b59ab46 30450221008461b1016b0ad1ca94bd139e00b0f063bffc2cfce2935aa05ead2f543c23d2d00220442c6db040962ff25702914f66c73981ea6c2643a76726a4418edb0201722159 3046022100ec065281c2863733cd6b70466ce93b033a6c672230b61a711870309b0d0d0f1f022100881cfbcccd682b3f294dfba04790706c0d37e7dc2ee1d96a17c221878d049a03 3046022100c71c60b9f92385f40f39c9d178fbd1a891c0c8c3db6760033262c177d7882f8b022100be58a9d8ebc739704bdd8e7cb53c49c2fa56a7317e682dbf695bc6647cea27c9 3044022017de9f848a3b1996a7a389d324ee4479cf196e27c847a1afa7ee41c0b1e2f90202207f106e5b79be206c09ba1bbd21536f6e27904684dec3bc3f0052979f689a4caf 304402200a4b4205bb814acc2265be6343f0348901fa0fb0f5232535e4dd46f45e082225022071d29a42586d66a93a7354f94a86f8d9afc7e9f813580f0724673355e8952f70 3045022100d0cd3eadc90a90f828d8f0c57bce25b4d0a1c7891131dfa27156792d72280a2a0220136e522475f73f236528f1a247f5f840290c50e59a9a25c1241a9925476771c6 3046022100b8856a32c15e3663a353f09efab3024cd7ce481f417043295b30ade44350212702210089f174f2a6aff481e8a29775a3aa85199dbb491781d14be42bcd4d27b228d661 30450220610e596ef2d79ded12029878676b61122646d965edb4ba2c9831dd860c1b81e4022100d4cd726ed80968b373e8b7a96f261202f0e66d7c0091c0e71d9a41f5c391f50b 30440220076f19db6fcbacc140edd3f5eb77073c906c02c94b1bdea62ab7d5ff5b2e88d202207939777023b2730d7acb456f260c9ff701255232687c4b389b381ceaab69acc6 304502203a629aff53b90df69d1d7c229ad1d446ea538127919cf95ebbf2207a4360c66d022100daa6a08821a4a27bf8b9db028d7f8599c1b460afb7079471ae63b26a13e289eb 30450221008d68e3705677e7a80aa43c5a40f9c4a48a1a5986ad862edfc70b2253093751540220522d4b09b34fe61399ed7afed32190a0181b2d72d563bd20e5c6d46b22e77b8f 30450220294d17867e3a4c9ee2d7e99f526035fbfb363c00029c45652118209c8bf4fa0e022100b90f94d0ba7fadbe7b78217eadca253e7758f1b14bf2c6ba7b675551fba420d8 304502200218f0be9bc0c9d2065540e8b568183295800c5653c338673d5b66aa2d0cf6db022100eda862bb1d2284e5efe09e7e33e6bdefee0919f1bb5d1c878cb80ff5f2faa355 304502207c798a6872d144d062af8c5deeeaa3c1c96d6b63dbbc80a6b3db87ad94236a10022100f15f8861f756a2e4793f32d54c3031d0d0585f856a8572464a0e94594bff6ecf 304402203efb60cc8b4665b8cd26a06089961e54923ccb387b071c10bdc1f2c84f2549bc022004de88be496070ab280da15a37147bc04721008c97ae0566e416f4df424445d0 30450220693e7439665e93db8ac279459d267efc3f11a12cdd99124ae23173846d7f6194022100facfae576b2cf9e344332bc61bc9c212e5ddc983c6212638e9cb86617d62189f 30440220715a428b32a4469a9fd5a95698e76c1bf065946cbb14af23bf58142b0f1f84e10220603437fd0c03d82e01fc5f5e6a7f9e994c176e1f634f35bd394a6277737b62c2 3045022100bf275703211a58b8b6309b2c2acc5ee63f95860008648d2fd280930fe64a4f4302206fffd2810e25c036641023c9d370d0033891e219b2b0bf0a017ffe5a00fa4ee2 30440220336781f031de9e9c85f70f39a4bc4678dc56e5da5e16068e5d28cea08158b2130220711cf321af65e307eda5c7441a232f703f8016a83f12316866de9303f00bd5db 30450221008225e51e7c6dfaf03ed35e438c3af920937291b26d5237bcf96d80604b7abb0f02203565ba52d3e6e282ad3d05b315994f195061e6b4c0fc8e8d2c2fbbb1b3903a82 304602210095de3dd04df820f57b6a22efaffdb679429562a0eb3c45fdf5d2390c506c88ac022100e616427f8fe165551049d44fe3c324d15b2124314a2f8c250d6efda690efd6ad 3045022100c25b6ceeaa357b9335367b78a058ab4549123ec6baa7f273ce89657638f731a00220644aa285cbe63654ee6eb42663dc76104a740ac827f1c770ceb50b4a3b4484d8 3046022100aff7b8ea5e0be861e40444af6318f427399c1b4bdf3ae8ba03542cdeb9ff368b022100a8cc38ce08354a71a382e2fa16060a5b244b2a1b86fdc075d7a51d6de4cef124 30450220258552b24c78b1f2dd0d3773313a704975798fa613c189d49542edad30e55371022100e7d6276bc4956a8d4cc452f5d553cccfc33d7bc1e2ed49554b66d88f9deef361 30440220277a55104f88a6e9087ad6491f269d62e1ed432ef66cb062b63ad3975b25550b02200e09561a556dfc5587b6cd146cead2687edc703a97b00179e68e3d023a7cb083 30440220166da62cf43c8d516281b3864cce271de87b0f65255998bd91657ae6d3bc8473022069590e73b49a2a87009ad20dc47a6ce1ed0422a1d8c778f69fe945a899b89614 3046022100ee8e3245777fb29e4088ec84c6eb6fe684881e4c7f2d51b8db20d851262acdb5022100e3b578bdd44f621b7f7eda3ecc5ebb45e5cc6aeaddf50e42b72b1c357063237e 3045022038946d911115e5d594bffb254a09e3e1e563355fd0177fe4f8fd3e06ed7d5083022100c463583ef42e6975c961662b9ce77d64143dfc5f508c23a3389ee931a817e634 304402206d580c358bbc8f900c943df90e34381bfd601fdd32fd4769bc28f2b368cfe5d9022001dc0f38b3efd1e9c7a95ff46090b1d24625cabe492b8ba7a60bedc2a5294546 3046022100c6cd4356f3547181769b16b7d7245a190b883886d0315488cbac0d4d484e01bc0221009b8999a18aaf0a53f91f6a8c5346def66af3fb2ac27c5a02578a68dca90c39ea 304402206cf9e03095146b3555a581bf81080737458287ab4e731056e9c03de894b35d9f02204e88ab4d8ae6f85fb7d698d1b59fd2e23ea9928e164f09669f618735b28ad176 3046022100ce81b11b0daa7b3d0b9581db31a29ce75780cacaaf58ea71637079e852b7e432022100b3c593d3809679b8c075ecb60934d7829245ff6d26ae42136a77db0e41d97b31 3045022030e266b51977c246269f4486fbca9ad08cf68fd3cbda7ff5d9e17f79ca3e13a1022100e75e4dc9e2621ff69f13d86ce826545d8aaa81b669ea7a341826fa58515f240a 3045022100dfd253935e70c190f4bf6e29955abec5fadf7e2a50fe15d6702d4c547e440b8b02202d6e2d8d851212d6f96f62bd510eb7920aa9101b660bd30641e220a3646b9728 3046022100aca744191baf11f7ae67e1978d3919cc3796a01de6936f0901bab5b73706da180221009b350781420462f39c89544d612e5f30677920394c26e1b492ce85f8dff16827 3045022100e511fe38f3db5c3d59ac4b139b84cc9d17aa6f80b09bd56b8e1a9fa3c1ad88ad02200af90ae7722ccef0bfe89b366d6affb485b168e1b2b08b8ed97b2ee4a4f38c7e 3046022100aa1b983d98978ca25ff95f9a3290f2e31619dc28be31cb979972471ac9367608022100b1b5536961d70033dff2af929b22123b95a0022f97e2d1f4c1d5b190bf7bd50d 3045022100b8e3851be1b6713303dd11050def0508405941813665a70a9b5a6165b500817d022025ebb29b929f83106ad35f02e6d9956235f9b8ecbe5673a8f01b4c1a6a820e61 304402201e2f527f1604601128bb46203766a9ef58b783e3a9c05d6f67520e3997e8a8d4022046f0d60dedbc296c6b83a8254cbc28d16daa074c19caa200199229fd9e2b3f98 3046022100e0a48ef93b9c41cfec7a48db78aefefc53d9c3369f062bbf2ef622280dfe34c8022100f2ab352f68169e03db905124db6794520ef86bf7baad638b5f21bfacba239c40 304402202937b3204fc221cb904986ce867735d51cd791d21e78fa7702f2bd55d3c9a9ad02203fb2c5711f7f9b600de4a5e138373b6acfacded5c09265cdb00fc419014ec624 30460221008598cae946c49621d34be99fb4184a5481390c92c90ca3984324d6ca054a1f43022100edf3aa9c5f86f3727e0950923108af7ae6b4015dfd881656bbee0728623d6ca4 3045022100968834c82de9b767fa67760c7c2d07a8a4ac627dfb07ebf78f56ee7dbb144f8402206f78468e85530b5801d39cd25584398860e20a5ae55158bbc6491554861475a2 3046022100f803a515030d42c7b48a5b5b583377965d5dbebbe2c807d16eb98520d7c7ab57022100b82f9ad1ccaef192fe71114d6556f63183fd25c15c8aa53fef6cd19b2c756817 3045022100c249791a4eed53de33373a324de301217a8a598709ac4221b4c7e0ecae51f12f0220380ec75d3bb4b61695f98e928028e86dc12f09e83679dc02c56d45fd2e883d7a 3045022100d2934fbe182bdbc9ee431a28b0742220cfe7ace3a25052c164ac2515524ed5ee022034d37e2e8ab631fbb0b40dae7d9e32c6c4c1d5ef71afd8f803b571377d6e84a9 3045022100b3cf7416c42694efdbd82e779034b49a6432204315576b63771737574c9800520220217e7f40bd3f796eb90cedd806506e29a12f79b93fee16a1975bea6c249eb615 304502210099b6352edfa849724fd144f7596bef4e47c972365b3ca6247dc088306553f69502207a29de56a9c5fa03280a42751cc4bb7ec2b90580adf92af1b1f1df0207ba1f38 3044022072f525b8a422d1807227b0a2864080a4e0cc3f96c4612cbbff1dd170c4f032660220095f741c15f62a83ab019ffe5902a1c961b26f79a878348fb34d6b0aac189bac 3046022100c444cc8cdbdf3b034175fbd9823823121f840b9ba23a400234272b136eed3e0c022100a85b8bdca18ab4d1eb92825288915af613210df25305fd87f50f3f779c70ddb5 3046022100b6d23579a56dd124a7c947a2b89209d5ee42a3559baf74c5c57462ea4ed72ff1022100d704431286b6f4f12680fce761bbdb1b239d9463ac756fe25e007454f6e954df 304502206ed4752da05392d9a3d719249bd5e4287445e58b0e977b128292b0f3bff2651e022100edbdf00f69832a5ad6fc143cc3f1aa3599b99f85bf0a18a4704ac685439d7020 304402204388b2d8d60abb72c65f155239fcebd420bfd139a857a7d2b11d24e5b66bef6e022013023a0120c6b46a0c4899bc9754b801f5389a9ee8cd912e521807c7083d385e 3046022100a3130d89450b5ddeb11d51daef03734f64f557b30674a094476f2230ff2dee0d022100a5a63a365282abe83bcd98d41fcf028010b4381af7478a4eca0b88e031e4b3ca 3045022100e724fa8cea324f9f9fab763f9ab278cc71dfa83937dda6baaa3b20039a7f52a702202c80deff6d29da98c010b8b95c0ecbef608f5f7c5e71d967f4680a33b1776806 304402202ce01d1af63beb8d59f4a0cbfe11f5737902a103da4d8230f98650a8ca824ed102204f5da5272009b261099a2eb0899c99abbda0cbc843cec4783cf8481665dd5f3d 304402207b6630fc1fee8470943f6633ab65ec43488582853eac4c00b72380222540bad802200a73c008f90d1a7df1559e070396653302a378d8a942edd863c6384c4a24ded9 30460221008c3c10f8212f800bff06e0e1b9bd318f8eb99a56df0c40109aa24d5635e9b1c0022100e0991b67b4f49f4f1f7f5e9adaac461543c6150f6aa36cefe4ace953df1dcaeb 3045022100f029dac5b315e5c4c79c43f7526b80c3e0a1108eef22172c52fcb20045dabe31022031ee381fe9c77ef9d68c462c982b7b0fc83aaf00938d90ade79508f8afd9fd8f 304502202d8283e34cbd86bc33d3394d10969e96708efd367a0b37fcd45298712e5b919e022100a9924ee99f71b80d374ffca95633d0241364becc24098e8e4753b5f736911106 3044022017bc0f7ee7499bb78c0b220f4b44fd7ada179b5afcf566452bfa41f3dd68ceee0220297736cc5790e18ca59b83da977de12bc0639d4bcdabd8b9d4789a70d05e0790 304502204870b7e7d913f60f14b5dbcf9dd6128f27208b3d1ab8ee7d1a17138fa21b795202210096a1ad2646785fabf002e2f5ade56e03878d7bd42d30ca26693cf99de476c84d 304402204eaf6a95ce812616dc8a866df0d51f3e2769adb252c2efb2b3b07ed372e3a19102206bc111d650f67ea72af3d23fbf837ea85497de1550f0a46b261deb938be9c1b3 3045022079875a4b1f0795f891e8acf00acb68f1c11bcd93bc7e6bb096fb6fe39ee3396f022100f4c89ad8601abb048f8b6342833a7b0df7e86226f4f3ebabc5ff5e6b967201ec 3045022007686149fea8d6530ef075e531b5619efdaec94c56e792b4a9d63f1bf1bae5ae022100ee965a44647977d7ace3313e19eaebc7580d22c97695d0d3f641159fb56c45cc 3045022100ee6ef5fdc0af8c81097ee1fcb462c6b210dfae6fa3c8495919bb8d3b735968b8022031c97e55508ae3899a9cbe30cb9cc9f72a583283959c67dc3cfd4d11f38c58d9 304402205809a50ae2dafc19dd1bfa3bf3786fa3efab89b0c8ad6d2d539fe12b55504be9022059eb4c4a257dbc0a45ce5724c71bbebcb38eeedcf61430c16d52841d2c54025c 304502202d84342687e48d8e2b45d1e161f03397780fd217c4b3281c00b12c83a4c131cc022100a0b41a01252dbf108066c00b32c738873a9f0b7dbc6811497341d2fbd8cf365a 3046022100a322964002b3537b76786692445abc9ec6e7081e5f9a13f6bc94d450c5f5064c0221009acb66e466cfd9a1dee30bf298496a95894857fb82ef9287ac3a742c34f02b49 304502203d35e1847bbcb60ecbd677f2e1d69da08ddd5d17ab3705567e9c4dbc60163ef7022100f8078d6667b33496449ac8b899bf800bffc4236c3680b721dd96114e96e1a285 30450221009ab67195c870a914d09a2aa56f6650987a49487eef83f58190956b7eaaaa3cc4022030e063a7c4679f1a3f0639a39eb942670f7c083a39bc160b41e637068748316f 3044022022a31800c2033bfb4395ab7ce593a875c5022fc7d5e77dfab2c6533e9545ebe602201a03179be625208004ffcd7c91ab8aa0745efb092e7bff892e36a5e47da7155c 3046022100c912a4576a8d7b0f65790e312580b243a435d73fd6f2765d4a98deca46fd45480221008a7a6e255e347852a429e978dec653b621b7d654bbaaa0ae4831b3d114802a63 3044022041ee8b84b098d12085e5adebc1af3602d483e90d9b332399e2301f5f628ae45902202635d0d2bf3122c135dcc40eb4a357cd5d655073921f1d0371983d56a5e31da2 3045022045cd70071d89993e5e6bb6af7d185d728bf1c83b9589e638c90f55c126934fe90221009662e0056237e5202cd6493c0149240bd9fe64aa8e293b216b1a2bac9cb15da2 3045022049884fef2a8a5ec42e72a2b564440cc7aa3d4908b03dd38037fbe09929a4cc30022100923e7ec4a4eddcc215b7be99ff053c3eeae0a8d92a48accacb52d964edef02c4 3046022100afe38b867ee46d76763a02d43657dac088f530139cefe1f4233856a7e1260c64022100cf240420adab8713bc371a4dd488ebb286eb3755aa9fd31c91b8cfbcb6c030a7 30450220619b9c611653b0db0f8d2cd7aab2e8a87d99ccb97cf7accfa2ef5622e4ab3b34022100b9fc37c72ebcde699d0242bbe650643d828942f8bfa28ae54b9342bfe6226978 30440220349ee1f0e6dd2896209462883ae7d229d26eed66dde4a726372eeaaf3c66877e02207022f52689c70539b839788d83f3f4fa8219183d03a889b9565f43080ba34d88 3045022071f3ab6319f22814989c25d2048ba7aa448934dc4553f8cb2b3adcaf665308680221008808e2e6c7b4caf6bd57377f1a7cf8bf65ce206e52f34bc87a0d2dcfb49aab29 3045022100d98309462c7e267540aafdf7f97a87dae6fd48560b9d529cd05df77ac8ac11fc0220247f4ec37634aacd8f73c3902e24ddb3c8355cb5129682e5f43fc1c29560f2e9 30450221008d3b8b5d201f1ecd049eaebd9e4a1fe2a5dc4291a50ffde16e48b2214f667a5502206fc6201c0a0d86734683e664eaf26061df5a60b6bcc30e66840b84e8400c768d 3045022100ac1e0bf8fb8c09762765f023ec2e64990a5812d5f098bd72c1025aafca87b37a02205c48fc218faa4a50707b94999c3c55bfdd9b5cb0386594b17e923fefc5618927 304502207add7d918c6ab9247d12930fdf399b4511ff3f4e6e2917df545926ec6060a6b2022100d21260b92969668d51cda775524349dec19e99199cd6a574f8480d80075fac66 3046022100b30ea09abadcb005d3fbb004bd1b7ce229f2abf675e22341b9101269856d2e55022100e894982d60923edaa80a7c06066243ed4649c0e23e85ff85f4d17bbe6059c974 30450220615c414e6123c3845b1345573b21808d01994a7ddd946f1d18796eded2eb0b38022100dafa8675eec3715f9830b57f69740b9b3069f3c1fe839faf403dfde7cf65b0b6 3045022100991b23a2a6bda6c11bd0ceb58018ff08ef8b7c4fe41298b51aaf1e83e167c4aa022071660ceaba0efa4e534fa8734cb4417d8196004ae959162c48086f98cfcbd3aa 3044022010a78364f7484e6ab58bc0724bb9d71e442753416deef13bf5292b184e1b8a110220649872760ff7eabd2b76d883dff99f7d24e06f96f58ac58b694b40dc75b83428 30450220306b810fc0b9de4eb32d0cc4170a50dcf8c964edc24901d3ecbfbbf055a42924022100a72f7d9d366f98b8d6384582f9b346d2b901d428e12d081b58f6bed777bb8f3c 3045022100cce4e13f2482f72ceeeb10fb5b98b49a943ed841c6d5f7de802a52596fc37c8f02207ed1bf40798c575b566589297f8cdafe2c2bdb5c1a61801b94268cc4a4f7f85c 3046022100bb0cecf0ed3e392d2dc2786d0477988204716e574a0e802477ad6056d9098c8c022100d5487cb1586a61ccd07855a577436c9ebda54f62d608ee2209e58a0ab5ddaee7 304502205eed0ec93d1eb7e1ed6be9a39fa44000a7866696109a146e08c67a08b7e6eca0022100f0284d0e8f20d39e53f863d96a07aa5a2554740a62d989b49b7b1a91852722db 3045022059276612231e194fa0d63c838a0ebc1afe697c306e45a325e43f471efb81b500022100afe1d1170f1ab2c623bc0fa1f428f8991ce1f56f4a2380651c7abc359c892b80 3044022033b3731fb5dd67508f841fad41763a2336d52a11bfd68829d559abd44db8833102205f9e7ae476f042232b3187dbde5b544e61bf9ac6d11bee045a0d315085eab9a4 3045022100a1e4fe1e8014f738fcbb3199e2780166c651bc319d4f70490761be3956154de5022067054b7d1595b3951024e038c29c0e3f95aedfb9885e8898352691e3f9a77be4 3046022100b8348aad56b78dea80dae4286041f8ee54cf4b656be1ca553e58bbad8ac64f3c022100e3ee3de6a6a5c2a61da8c45102454ef779b0c88a44b33b7cccd904e621b67f88 30440220747fd6456107f0f5dd3c3c6807ae90d12af97621daa631b57de3969b1c4317e8022020458e340108cdb94657ae74f8118f157c72af5ce8d64e52fb507fdca1c0ce2c 3045022100eebad606dde7211faabee66cf716b4ee2f8b0aaaf156e504781b689cfd64be88022035cc6c1b17419e0a8bb2276ea2e41e4abfdd757844ebf81b05c00d3a974eb3e7 3046022100ee9765496cbb59cc696ef7ad98ddff8edfe8afa038dcc7616dd310b14103546e022100c7c18f7a56082b1341ece5969781ac1b650abc361983b3e0abe08b97af87f24a 30450220416765948c008acac7c4367cc394e67ea125b2bf851bb3f4b896d376625ccbc10221009c3509e9ccd52dcf7374fb5773c74d52199dcba698e54faac3fc01973cb8e4d1 304502201f2a74fcdf0e32165d337879bb12dd7985a25cd03c49dabf1c43018bdb2ced1002210091170ced8e109dd274ce54e47fb92a8008ebede3af024f4e1b6dedbc9fc30c27 3046022100cd3c736c3acf9bbe2ceada3439ca8454ca1f5deff332cb8dcb1ab4dc4e45c4f402210089ae6b36680adba9cba0e359ed032fae86e2be2019120029af11d69365a5fe5b 3045022100e23fc820a97521491cea34156bad20c00b239be76644cfc98ea6f23b630d39ae02206227479d786fd79dc7277cfd1ce36f6e43143b9599aae957a565e038c957098f 3046022100bafd7d45816de85c6449a88c5bea587ee135e5221280a1f912ce073f6fb5dc130221008670b0e478301507eac99f1399859daa0703b3df5f7d9da6708f40773fad8278 304402200772acd606a1fab8cf814a7151b9bf6aa814f4ef1a979ce9bee5495356cc7b5c02202a197fbf28aaf39c8f8c1a4bbef65c462622d48dba42054ba91a0791ed3bc136 304402200c9163aafe695b0c1706c9cfe4533c262ce58c82882219311e81b9b54c4c8b1e022036e28e78173dc63e871ad241345aee931018204294369d907df25f12501b4bc8 3046022100af148a9e895ff16afaf913dcf19ed2d3c0942f91d39a456cfefe6af8dfb45ff9022100f183aa691f3b40b1b6cdfea3a0be89980df6f58af0200874fb8f04f8061568c8 304502204a489ef70dde22171f8baac05901de3c80d8ee18ba6d0909f3895333bde8d2a00221008f7607fa55c9436cedf6ab3db1908437caaac00cd1eae78133616348879c210c 304402201e4808467da0cccec35b7289487a83a413c7a9e8132d61fc78fc3e205d81f6720220314d7dd7f82ce0cf4f57c7991267564e443af7262945d85180c6232c82439b1b 3046022100f3c83611ccc5d71a927c9ed801d5f2d37b7dba3e384231c731b79c6d2990a9fc0221008eb04ad1aa68587a6267699cb8e07b7133a00438f70bbd62a7f72a0a663425d2 30440220585fa7a59fee7306b52b5a39d814435b0c8f5ee91c1fd2363630ee3a4415a21902203ea74892e3ec42ac5921bc5442e64f56f92f1871fa757602136b37db318a9a3a 304402200fa778c30f1ac98ea77f6192b689f495910a1295118a336331078285efe59dcf0220271e140d8c0587d0c15953a37ccaeccff442f7fa2a9e487d0630b76005fbc6fb 3045022100e0179fada8918ce7ca7d6606dc75ef7dad110fe3300ca7c0b5d3dc88b47e24b602202efc16b5a433d3271cddad823032b4db1fa2bd396b7671abceba0ff17538b30f 3045022003e676cd388dba6b5ccf22dd43f512d58340d0b518a26d2f9e52b08b43fe1a05022100fbe96cd5fa03fcfe7a7d496888072e2720a77481ff66e52983a0f34486735ece 304502204de49841745c33cd99013add13c32f0c73d923af08dbb6bf40cc1608dbd5aed3022100de249baf9cb9b267807328910c44e8ec49567c91c08b8d55466d86747c7038f4 30450220774203dd480e7afb84a8e1b5f7a4f4bf15b27e283052b0517d19c35c7b0d943b022100c73c908035ec0bc0391e4fde024af467c199023f4ae9bab0326cb200065b060d 304402200474c953ed7c40bac10b1231057854fa0d7fefd2411de0c1a091cfab759abef302201549dedc590a160a731168e1efd1b0db114cbfaa351f0c5566857063d500d9fc 30460221009a937b88f7fd8f91e31a36b02b51d2bf3981c350419248e47196623efeedf9b2022100d12c53483a1256a1aaf2abbe0cee670249c1df2e3aac31cd98083a436c696a22 30450220539ab310dc15380148d07e6ddafd8393ba2568a4179c08741eae9036870761fe0221009777ab6950586a9d14815e22abb2eea7b9198c45f5fc848a67d52b979a60d6f8 304402206c75b7c1fec7b41ea2445cc933730124008e0bf7f3ad802288a1ca7a2646f9360220584b05c3b97970b7f3ce2f6a232f4696e5dd75fc419f13ad9cbfccab1a418d36 3045022100b2bebc719273e520656785fd745d426f834fe9ce19959296097faec347e45b8f0220313eb66ed5fb1e88aab4ae55047bbd0e9bccf68f5a6864b95e91499f32a93ea9 304402200d7fac712bacad2f9fa8d63963bf62d86d49fff2003e1ffbdcd916d84840a0660220571957bacba07ba9e7f539b8d9cedfd1cf4d17bdecabebeab007b0067c57e323 304502204dba1bc80a2c05a3b79ff7a700c3dd887217cbcbc01f7fa6c1d65a97fcc4b92b02210081134b511c4a7b906d33d521447441b0e35c5f37d486cb6b451cbbb8e98295c5 30440220778b8cbc23e6b9de2797bad83d8a6f83d10119ebc5c7192e9c23b06ededbddc10220591d1baa1143656ba76b426909082943d414691c74ac6ea8964ea6ce4cd5501f 3046022100d90e2f3ecb980255ffae3413e00ece97f95daf6c5ed3c8c418a4f03bab633e26022100b73e0a4e4540d9c6018424078a546136cfaeea53c21f60efdc2049b209afa417 3046022100d1e4d81e0c444a3494fabed890329561ae8e9f5fbb6cd91d516309fa27bb4789022100d387b6469c64794ebe5f0a6f663fd17b3c54978b11bd08aaebcbd0a508085d79 3045022019b8090824a898e233037e2a25af3f9e2a4c2bfbdf0382ecb8978c3149ca240e02210098700792dacf6878173a33d232df1bbc5977ac0f16b05bf66c4f837c684ce210 3046022100ffdc13032d9f7ef28e675210146a757eae4f553e9e97ad259f56507ab700466b022100ccd27aa0b214e55837b88c5789c7b138fd86577a608518c6e37bb57798c19b1d 3044022060a6519eccf2f598b423b9f0b37e5122e944647843187a985272489d9a1101980220200c3debc83cd622a0284a5e56f257dfc881613f793b1b4331f933393d8a1700 304502203db511d41cee90b828de448ca2397f1e4fb29c84ce57cc131b22120897581ef5022100e26055310bf0bd89729e76a668a671a3adbafa7590c0086ba936b2a9bd9c1b02 3045022100de01705c7aebcd9b727d24394d54e99abce2e19be8226748d616d57beefc7e1802202d21bbc6cdf773021151c9a2fd65ee1fe60f6099669b8199bc9693cc4bc66ee2 3045022100ff796404b0383c76339ebbc95b92956f333c9a92fdfeaa79ca87826413ee41c802202193ae152a2f4bcc53d5d19f94e90e5f368a6c2e5cb9d1c2a9edeac676e63306 3045022100c2ed5e54b6cae77a4abf1c61621fcfe32e8c125d845e996d8a0985154d634cde0220597242a466dce0b2bc4d4bb81b0c3b65ae577c71829e9932416f697f86282097 30440220611aecd2fce70b8fc2c43321d39bd68029c47da67db84ae410bc148886df848002207143540d75647157cd03208bb205856c3b9dd5c5a9a479f47700ed61bb6178ec 3045022100f27b31929b11911106b69f55e4c4853d234f068ec2cbe53711022ceb86b1567a02204db61e587675af870578eeb57e99afa7350bb87a45bac39d2e9fd49580be9fa8 3045022100e44d28a34d2734e0890f34293bba907b109ddca1923f94e93f7d261edd6562b5022025c61e231b067627c842e1a8b017f489027a000d1e780c0603da891e2f38f362 3046022100ed1d58d1b5943d9e4952fc3356a1305c58cea5c8c8b75675e1773ff092b529c5022100d170a1ab34dbe6c0dbdc3e7a071b3c51b68538e692fb365d3cd62be6ffe307ae 3046022100ee559622107bb383fc82ce72e99dacbaf118e94a8f24d17c18af36a113d8025f022100b7c67cb3ae01106b82676aefc3e6bac9b020a61e0f496fc3b6e7e71ca02709a2 3046022100cd849a53fc3a12a9beb53962f264a79f8041c0fe510f985cb359535fc07a2d64022100ebd4d443475829df05fc5b09950db3a921fe403e50e5a2d191ef20ee158a5d06 3046022100a9c39bc0698deec09d0fcb655155bb63bcf26a3219d069c41bdfeda1c06037b0022100c764c89e5715fb71faa6f121ff79fa008fb1ee3e3ae3be845d148b1717102a6e 304502204f349181e1d735db2d032431f65f5c66f7abd614ecd136f10467292e60513c1702210082c1f1ef9bdf76173371b4af247995d8944bc4330547dcc4055b45aa297e41ee 304502204d2d1bba59cdbc67dfdfe932f699627540572edd46af2f097210e7275c1b71e3022100c9fe61d475d9e0489d1702d48549bcb31c401174c4c09445dd0c42822195080a 304502203ffb80e89e70281e6c72b94538ad8645c3cc9edbc1d6412011042fcef4d26e7e02210090458529dabd48d719ab2db44b2d3e7bab926d957bfd96ceec44b0ed3e1cddad 3046022100fca5fe6a2970b27d0b4469e0121778c5fd03145c6c9b393576e65d4c05e6e0800221008f6d1a739b674f6112ef6592c5f10962ea384b5ff5ddaebeaf07b6c6c2ceb3ee 304502207bdd46b81654b0c4ab66ed79887a59ed36f4f97a9e5a4028229ca8ac9d17f6ff0221008d93299f0e1d6c0d51e5942fea187c6245297c4f61119dd698a60c9f4e0b94bf 30440220293facce3d1110626ba230c7fedcd9ef9fc5e4b09305226fbcc5073cb667d3410220500d035d0a7ccb09020f7a5e0859492c2765096731549ad29c0acbf65eabcefa 304502202ba30e54f42448a372d6ed2d0c21fdab60eaf36883673876ab64028729dea778022100865b2dc8e56440e274c5d7900e8572f514d52083574035b689bc51f4a8ee4f86 3046022100e811b96d2aa74031fec3fefd408b7dd6419d924a898187a158f73f07ddb9b933022100e8b28d567b8a2b215c758e80999b547ae532fa7017f0bbd61fcfa6f8856bfe08 3046022100efbdc3168a908c20897b1d46123125e71aa8d002c2dfb3cee38b2f8b1c98da3e022100aa4240caf2cebdd49725a940ff166d13e6e41d04208256f25fd504dd46c62175 3045022100a202995160d77b41981d3e1cc5c453f6e4871846f63ff2a6ba8f526b34c37a22022030fe0ca753dd95fa7b7025dbd5e3109b12af267fb4c266fae349e3318bb24928 3045022012d95587ed67bfa29a933cef0efafd38dac09c8e92ffdd27e5dddf42a1c3f337022100c12e9df784595d9405663bbe844e0888c2fb55059ef76bb406eac3b9d21c6e99 304402200b72f6439cbc873437583b4e9286def896fba0f6dff26b5289a78099c3552f1202204f36f8ff60d3431eb7be146f5789c595cc1ed0b8d70a9572f78fc63c82f51e49 30440220409ccfe53761818ab3589b1ab15160d117495a2a214ef355eb9c08d06f35314402207c23405526dc1260bceec57670ce9245472d8008a41bbc4b1e0f77b6f70882d8 304502200a4be360bf10c54e57bab6dbd741ff21179c1a9c8d8d9effb3c48aa9c662d9f4022100c19bb09f53bd1ef9db4afdf18b1c8c4b297a463180fe7980710322838125b7fa 3045022100f6ec90a25f89ced521f3ad1b20a4aff122f4d543543f2b20f51479f3cf8a719b02205289982ad1506fb443815c8a71dec455cd7ebcedfe1754221d8d182e848d2c32 3045022100aa2261c536d0b1f78a7e1884aa9dfcf9793742b81873474ffc8852a42daf2f0d022043bfcb96a158cbbd7b9b46c9ccfdc263640c6fe021e74ca2d7e4559c83ade2be 3045022100bfc9f421be2eda6ba9fc873035fa1ab7e1208bc0acbb2bc592f27ed4fe37dfef022020f60dd019c500e39fe3ee8e441cf732561e37299c3f2d57b148d14a41e2c1e7 30440220347e607b0d93dc8d200d02640179cd1b4af1e5dcaa573a0efefecba6d3b9b9df022035d1c2cf49d5d6b948e37ee8681797dbd7d36b8eafd5f68652a635f7ca32f06d 3045022008c8cda9ffc9edb8a6718b12c8689adc3ac7317d228c944747b727ad9823e18a022100d9d9df875d9eab2860447297e634e2b201a091598b45c075f0bb49d5873eb42e 3046022100d8be633a1a66111b264d7991bbd64d1da6e6d14fb0b8f57a40e9ebc52eecb02d022100aee2df0d10a5c1b9a3abfef0b3ee23f1380afa5aaa0d12a61782067bc6972126 304502207c034983e0f73e507419351b36cffe51207ffb3b6b97ff465cdcf0a8df5c55a1022100d98fbfaa0902a6ee945ee7eb02cd51eaa75ad6b2ba7e8c52da11e5c8da65b6f6 3045022100c399401c0430004a16960f2065804a7f20fdc485caddde46f286774ca0ddc2ea022027b8129829cac9e039b42f3ea26869cce32d441badb7338b98b183ab3a97f563 3046022100aafb7f079a88da63b57fc7bfa85d7738c3057b01cbba75ddb7a1622c70c1e75f022100ac4f4c7c0296a4122c744acb73be323a43b24dd7dc68949928ded2cad31429ab 30450220565ce1d8dc25952dd7938f81828975008eee7cdaccb55b554df8de3cabe1e655022100d311ef5e7ccc93ab4b55ef43aadd72e84130fe49596887bb715bfea3e6b73785 304502201b68e49dbded776278aade83c9f0b01b98c6caef02caa4c73a5258d6c5144e11022100abdec16ff7e3c77e0cb9d82d3c4b99a08ad976814e4e8a6090690054ae87c840 3044022034d621e7894f29f31c7920a4e3eda90b9af1a9bedc49a8f47fe35dc53682611c022035cfeb2843ae67d15ba298b6d7f9503286f80c3f197916aa92c7c9c949c84400 3046022100c429fd0a12acc7cc375462aaac0bdf979bf492536be60d396cb68324de4f4a1f02210082b93a234ba021d2c1d71d6695bf1fc1d53fc73960221c42b44bc7ded3a70636 304402205423d75c632d431b49040870d8cafbeea0b0e197d63a3c6b2434bf4515874d6a0220392eaf81a50acad83633c668718e12cfa1c97eedc38d3d69d65a2cb08b672609 3044022079c8f61af45c5b6c6a261643fd3f022c04fe33657c009071c0a98b55824f15210220063b2dc547d319add62edf4aff9b703fe982b33930159f74ba6335ba12d7d7d2 3045022027a957410f37ee2d23848915908affb9b3eb5c8308e03edf1a9ca4c233da27cb0221009bc8e5ff467c4c29e67811354b1db75d64cd3b9798546015b2850e44e3cdcb3a 3044022076cd37506ff4509a8d6907b5544bf3a64c47634b07b4496811178aced134383d022005ee03f4b02fe1c627b7ff58d21f477c09366dba99d68858f3aa031d5d5b1050 304502202dd3ae37fca78ce3c0abe393345d4078e2709760ee15a1db73452b5eca0b5f47022100b8c3334ddfb4e7bd9f53b3518da66bc3c0d27a26b6a4f311c9d97e270c886663 3045022100e54f5608d96e0a2636210d9ed6777ab991ce912b8d8c4da295a5dbd2cfbc02be022047f6fbea4ef221bfdb828f79040bf2ea63f697b1f454a6e5df7caa1dd6add18f 3046022100a479ccbacdf72faab424636b4a351ab2a00feb50591f4bff4b3285c7eac48a18022100d5a70f2c551ed529e29bc6d66b135f5160614f2de6ef3af731014a7e8c199803 3045022100faeffd3a41ff2b24240a27d2742004d3e4f4b5c3e3ecd9fa6dfdcd60bf193d040220295acec839cc7636e804cbfff262fe859cd5914e3ac32e3badf7697ad8720783 3045022100ab16aaa649a3285611a0be8b10962382bc3507d7e5b62222669cea372b7d83f802202e6fd932782bc5282f999c22dfa29a8596b897f777fe15309143d7a4ac9f27ff 304402202f2f166b6960036e59162a5c3643aae2c66ef27ecade28db446a36a6169affd2022072500ac6e47fa0e4045428e94bf3479e3c75283d67466c47b3394c95ad9209d1 304602210088b933ee2170751f3b21d1727b30e2670e028f9f19cbb7db2fc3d6027299b770022100fde16d428ca5f8e65965f2d3dae61f10db5dd8c3b2b244542aa48beec381d0a6 30460221008ddb7b1bc2d09be9dc0e3bacd0c5f1c79fa936d2904492843e552ab0113615b202210097c4994999e8a99bda9b6806218b9fc94bd9b55dbf12c794424dfd0e0040b7b4 3045022100ebdb9a19c06cb0d01c70f9e742fcb40b331edc46fa93732a2e598dcfa2d1d48902207b6548a775d97732a62aa5773a930302a8c7ef8f6a077e1a94432478986b9f95 304502202b4ffc1a597d61fed963f31cd25046874dc9775a4fe1bb8ba938f1be2c91fea3022100d09254fa427733b19071de3c04e405c411e53a539259b08e0a57917622894437 3045022024a0e711acd0544f61827e6226f77d5796d69c604c765cb07b9017567c175d92022100b36a14b6d1c98824e26670bce36d2830d456e5ce937c76b1804d6ec667951042 3045022100c6e8605b1e085d431c6446b30418cfd7624425f1306d6bd428033ef4ca70b89a022040ab19f9c4d5812ea2dcc36a1dd4370d8c4ee5c760d0beddb90c8e5d6441063b 3045022100e724a590897250c05183672ae876f60ec1e89f8549f90c97eeceea1de73bf954022028bf0977c12ddfd72aa769145e5b5c7cb4d349e41c81c2f516b51955b90ec43b 30440220666906dc416dff1ef84864942485de2621fac2830b2ac14e763d77171980e470022066c6e09df76e3ef44247e47644aedda5b2eeb7a118524840ed3761c4603dfd1b 304502204e14d43008388b2a6c706e86f3982d356cb8043ff042675e7e9fba1f4a39208c022100ac80cb4dec2859967d34c5657a38735246f197639cb85ceb0a28026e7eb8ee25 304402202c4fd0b1a34ebe1f7c15243d956c4394b5a274d61e2f7e1dcc5495eeffe29553022016d0596872e8a55db4cc90a8686347df732ad66c03fec277ec61bd2956c53f9e 3045022100ae06e0046323e78dcf996823190129b5d73bdde4b6ffe81a2282f2d87694647c02204ab5f7ea6c54beffc71e667c5c7d5b63ac3a685eff15413a1ab93fa15c588069 3045022025a9d84372bdfbba7dddf766a7a16b4475e08690ae769b03d33ae6bb0fa42062022100c247b796eb767b4e694fb525b6ff117824179c3b9f820c2028dd36707fcdeaad 3044022037eba9843a1f64ce3c5d4a2c1dbe6f26cc008a026c7a60b079b50db264b791b602205101fc443dd8b694ad2d2db8cb0b9a5d7747d974b561ef258adb398c64a5e78e 3044022077314fba5103fefc7a9458a624e6607d46c216658e1340f08f5c6b35c4fc7f270220048faa334249efab3a409e94d12d57802831514e8ecf386df025d8e3173f2d6a 3046022100d6e9870eee337e0c12736680424530ab2dd64bd4c03d59324d23481d4715f019022100e9c25a21976b538ee23951faccfa178fd05ac50d72ec184b7333cbba72d30f92 3046022100ba64dd645da2e1849e73c921a7c9e634ace9febbfb14db2e8a1186c22b0fe42e022100ed15aa420e27edc3fc06bf9fab151300d04f5987bef393bdd732e4629b90bf3d 30450220227544f315ceb7030cf28cd80a06da501ec1fd5695b898ed2fd5fe9a272af90b02210086a03dbc9743c30f7976992f0815e543a97effb4f9ff01e59dfbc4ed5b08bd6c 3045022009092b020068199f7265259cc79bed23c23b6dcbb296c1b6ad40a7288cc2025c022100e23d2b43c51ebbfed20e07f69dc21e9c96fa922b060346a1365060dedfee0c23 3046022100b4b2c48d4e0f2799723edd2120437bb132bd3d7714df43c621fd815a39bfcc93022100e315c37f4d33fcd4636b42621317e8504ca552a30cef628613fc282247575944 3044022066b56f5373aefa0dd827899bbb6111f66ce687a865ab020afe220f00108faaca02203dbd353182c7f827dc01c769e882bd34f3cbf1e9d2b079626abb8d90976fa516 3046022100d6759456fb8c70bae0d1d38ead8ffe283213b848bffdd3c8a9b8a79d50dccddb0221009c03426e7c8b7d6358d182269a9c08ecd5f2c44fa398c2cc2981d5904998ed4b 3045022100ac1081eaeac8a5d8b81750eeb2e06299ee8ef8a381767b68b791778ebbfd9cb30220182c712a6c7a3a8486c01db46ccda110509ed44841e3480d63659a37c64a7f73 304402201dcf3b9b7759b61e76f8a63abe812f94a120b4c479daaf44cf473f542656e63402200a84fccf4c00bd1917b9b1598b7c6492e6fa333567dcceb79369b77e69b3b21f 3044022048b9e4556b55e8a79d14cbd619f935fbdc4e6b4341f392427ef2fa0009878a760220404999fe47d6e0c31c920523ebe4977d57328c4e4afedef14910eba56c8d2983 3045022100c33ba43bd9e05767293332d0b272f051a56e837a3ce4dda352ca023b8fb3382f0220492ea5a4abfcafa8367ec9c9200261ad30cff6b5867382beda352dbe7759b9f3 30450221008a142d7b59270e29c76caebc017d86f4675764c1e74e3719c39cd2383927fe08022044ebdbb7d18e4da47be1671e539723ab11a81077db95d597faa0b1c4bc60eeb3 304502206f25ebacd1e76ca51136dd0ead620d2c38439b3ca325d2f64f9926673cc80890022100d129b8b86369b22210340caf14115b15bd35f39c643e1202bf453d8584e2463d 3046022100e8e57cc5209bf1da3f55cbe73841b151bbe224f6a847b2b0fc43a5030a11a905022100ff1dc9ce1a39e24ec67ead968f15368fd76341a53b1bf8f6317276077ff949a7 3045022058e6e514ce40dcea63c74d51ea8b9ddfe990e0d9ae0b7c2ae555295f59aba4a5022100e02da3ec16884f099ec70819aeec9a68f61f431abfee13bd6ffcb07090480ba5 3044022006704415cebbdd916667cffb6e54df0738021fd0c65990cbf5eefa3dc8e7127c022033cd51b8a77daf43a143f0ef7f491687420b7cbe4d1c1ec64bc39a978669d966 304402205cd2d291917f53c998c487c7b2296c9bc991f4424e65d3cdb0fe99c54c7ab13a0220401468c1de30b11a376ee32c1fc4e6f6463ebb3ff46551764a0dd26a392908e6 3046022100c9e743511acc2a3d582314ea9382d10189f02d21b706ff9e7446fea4a37e0d66022100d7b53dc30da0b7eca2db8e678d05ce3d727de6cb70d9cb42ed5e9964f2d86ea5 3045022100a5e8f66590ed234fd54431d4ecfa8d21057eebb7fd53e61591d5793c5349e2f502207bf2cd1fedda858015305d0eb63d25bea72e8e0fcd127f23bdd68e50ec2407b0 304502210080d9bb7bf7b320881839ad958aca96deeba64d10f23e9ca11d51a1fdb781020f02201b75170de2374c1c86799247e1c80a75a70dc96be31b4fe443cf7b0aaf5bdfb2 aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/format-check.sh000077500000000000000000000010001456575232400226600ustar00rootroot00000000000000#!/usr/bin/env bash if [[ -z $CLANG_FORMAT ]] ; then CLANG_FORMAT=clang-format fi if NOT type $CLANG_FORMAT 2> /dev/null ; then echo "No appropriate clang-format found." exit 1 fi FAIL=0 SOURCE_FILES=`find bin source include tests -type f \( -name '*.h' -o -name '*.c' \)` for i in $SOURCE_FILES do $CLANG_FORMAT -output-replacements-xml $i | grep -c " /dev/null if [ $? -ne 1 ] then echo "$i failed clang-format check." FAIL=1 fi done exit $FAIL aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/include/000077500000000000000000000000001456575232400214125ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/include/aws/000077500000000000000000000000001456575232400222045ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/include/aws/cal/000077500000000000000000000000001456575232400227435ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/include/aws/cal/cal.h000066400000000000000000000033361456575232400236600ustar00rootroot00000000000000#ifndef AWS_CAL_CAL_H #define AWS_CAL_CAL_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include AWS_PUSH_SANE_WARNING_LEVEL struct aws_allocator; #define AWS_C_CAL_PACKAGE_ID 7 enum aws_cal_errors { AWS_ERROR_CAL_SIGNATURE_VALIDATION_FAILED = AWS_ERROR_ENUM_BEGIN_RANGE(AWS_C_CAL_PACKAGE_ID), AWS_ERROR_CAL_MISSING_REQUIRED_KEY_COMPONENT, AWS_ERROR_CAL_INVALID_KEY_LENGTH_FOR_ALGORITHM, AWS_ERROR_CAL_UNKNOWN_OBJECT_IDENTIFIER, AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED, AWS_ERROR_CAL_MISMATCHED_DER_TYPE, AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM, AWS_ERROR_CAL_BUFFER_TOO_LARGE_FOR_ALGORITHM, AWS_ERROR_CAL_INVALID_CIPHER_MATERIAL_SIZE_FOR_ALGORITHM, AWS_ERROR_CAL_DER_UNSUPPORTED_NEGATIVE_INT, AWS_ERROR_CAL_UNSUPPORTED_KEY_FORMAT, AWS_ERROR_CAL_CRYPTO_OPERATION_FAILED, AWS_ERROR_CAL_END_RANGE = AWS_ERROR_ENUM_END_RANGE(AWS_C_CAL_PACKAGE_ID) }; enum aws_cal_log_subject { AWS_LS_CAL_GENERAL = AWS_LOG_SUBJECT_BEGIN_RANGE(AWS_C_CAL_PACKAGE_ID), AWS_LS_CAL_ECC, AWS_LS_CAL_HASH, AWS_LS_CAL_HMAC, AWS_LS_CAL_DER, AWS_LS_CAL_LIBCRYPTO_RESOLVE, AWS_LS_CAL_RSA, AWS_LS_CAL_LAST = AWS_LOG_SUBJECT_END_RANGE(AWS_C_CAL_PACKAGE_ID) }; AWS_EXTERN_C_BEGIN AWS_CAL_API void aws_cal_library_init(struct aws_allocator *allocator); AWS_CAL_API void aws_cal_library_clean_up(void); /* * Every CRT thread that might invoke aws-lc functionality should call this as part of the thread at_exit process */ AWS_CAL_API void aws_cal_thread_clean_up(void); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_CAL_CAL_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/include/aws/cal/ecc.h000066400000000000000000000160451456575232400236540ustar00rootroot00000000000000#ifndef AWS_CAL_ECC_H #define AWS_CAL_ECC_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include AWS_PUSH_SANE_WARNING_LEVEL enum aws_ecc_curve_name { AWS_CAL_ECDSA_P256, AWS_CAL_ECDSA_P384, }; struct aws_ecc_key_pair; typedef void aws_ecc_key_pair_destroy_fn(struct aws_ecc_key_pair *key_pair); typedef int aws_ecc_key_pair_sign_message_fn( const struct aws_ecc_key_pair *key_pair, const struct aws_byte_cursor *message, struct aws_byte_buf *signature_output); typedef int aws_ecc_key_pair_derive_public_key_fn(struct aws_ecc_key_pair *key_pair); typedef int aws_ecc_key_pair_verify_signature_fn( const struct aws_ecc_key_pair *signer, const struct aws_byte_cursor *message, const struct aws_byte_cursor *signature); typedef size_t aws_ecc_key_pair_signature_length_fn(const struct aws_ecc_key_pair *signer); struct aws_ecc_key_pair_vtable { aws_ecc_key_pair_destroy_fn *destroy; aws_ecc_key_pair_derive_public_key_fn *derive_pub_key; aws_ecc_key_pair_sign_message_fn *sign_message; aws_ecc_key_pair_verify_signature_fn *verify_signature; aws_ecc_key_pair_signature_length_fn *signature_length; }; struct aws_ecc_key_pair { struct aws_allocator *allocator; struct aws_atomic_var ref_count; enum aws_ecc_curve_name curve_name; struct aws_byte_buf key_buf; struct aws_byte_buf pub_x; struct aws_byte_buf pub_y; struct aws_byte_buf priv_d; struct aws_ecc_key_pair_vtable *vtable; void *impl; }; AWS_EXTERN_C_BEGIN /** * Adds one to an ecc key pair's ref count. */ AWS_CAL_API void aws_ecc_key_pair_acquire(struct aws_ecc_key_pair *key_pair); /** * Subtracts one from an ecc key pair's ref count. If ref count reaches zero, the key pair is destroyed. */ AWS_CAL_API void aws_ecc_key_pair_release(struct aws_ecc_key_pair *key_pair); /** * Creates an Elliptic Curve private key that can be used for signing. * Returns a new instance of aws_ecc_key_pair if the key was successfully built. * Otherwise returns NULL. Note: priv_key::len must match the appropriate length * for the selected curve_name. */ AWS_CAL_API struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_private_key( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name, const struct aws_byte_cursor *priv_key); #if !defined(AWS_OS_IOS) /** * Creates an Elliptic Curve public/private key pair that can be used for signing and verifying. * Returns a new instance of aws_ecc_key_pair if the key was successfully built. * Otherwise returns NULL. * Note: On Apple platforms this function is only supported on MacOS. This is * due to usage of SecItemExport, which is only available on MacOS 10.7+ * (yes, MacOS only and no other Apple platforms). There are alternatives for * ios and other platforms, but they are ugly to use. Hence for now it only * supports this call on MacOS. */ AWS_CAL_API struct aws_ecc_key_pair *aws_ecc_key_pair_new_generate_random( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name); #endif /* !AWS_OS_IOS */ /** * Creates an Elliptic Curve public key that can be used for verifying. * Returns a new instance of aws_ecc_key_pair if the key was successfully built. * Otherwise returns NULL. Note: public_key_x::len and public_key_y::len must * match the appropriate length for the selected curve_name. */ AWS_CAL_API struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_public_key( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name, const struct aws_byte_cursor *public_key_x, const struct aws_byte_cursor *public_key_y); /** * Creates an Elliptic Curve public/private key pair from a DER encoded key pair. * Returns a new instance of aws_ecc_key_pair if the key was successfully built. * Otherwise returns NULL. Whether or not signing or verification can be perform depends * on if encoded_keys is a public/private pair or a public key. */ AWS_CAL_API struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_asn1( struct aws_allocator *allocator, const struct aws_byte_cursor *encoded_keys); /** * Creates an Elliptic curve public key from x and y coordinates encoded as hex strings * Returns a new instance of aws_ecc_key_pair if the key was successfully built. * Otherwise returns NULL. */ AWS_CAL_API struct aws_ecc_key_pair *aws_ecc_key_new_from_hex_coordinates( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name, struct aws_byte_cursor pub_x_hex_cursor, struct aws_byte_cursor pub_y_hex_cursor); /** * Derives a public key from the private key if supported by this operating system (not supported on OSX). * key_pair::pub_x and key_pair::pub_y will be set with the raw key buffers. */ AWS_CAL_API int aws_ecc_key_pair_derive_public_key(struct aws_ecc_key_pair *key_pair); /** * Get the curve name from the oid. OID here is the payload of the DER encoded ASN.1 part (doesn't include * type specifier or length. On success, the value of curve_name will be set. */ AWS_CAL_API int aws_ecc_curve_name_from_oid(struct aws_byte_cursor *oid, enum aws_ecc_curve_name *curve_name); /** * Get the DER encoded OID from the curve_name. The OID in this case will not contain the type or the length specifier. */ AWS_CAL_API int aws_ecc_oid_from_curve_name(enum aws_ecc_curve_name curve_name, struct aws_byte_cursor *oid); /** * Uses the key_pair's private key to sign message. The output will be in signature. Signature must be large enough * to hold the signature. Check aws_ecc_key_pair_signature_length() for the appropriate size. Signature will be DER * encoded. * * It is the callers job to make sure message is the appropriate cryptographic digest for this operation. It's usually * something like a SHA256. */ AWS_CAL_API int aws_ecc_key_pair_sign_message( const struct aws_ecc_key_pair *key_pair, const struct aws_byte_cursor *message, struct aws_byte_buf *signature); /** * Uses the key_pair's public key to verify signature of message. Signature should be DER * encoded. * * It is the callers job to make sure message is the appropriate cryptographic digest for this operation. It's usually * something like a SHA256. * * returns AWS_OP_SUCCESS if the signature is valid. */ AWS_CAL_API int aws_ecc_key_pair_verify_signature( const struct aws_ecc_key_pair *key_pair, const struct aws_byte_cursor *message, const struct aws_byte_cursor *signature); AWS_CAL_API size_t aws_ecc_key_pair_signature_length(const struct aws_ecc_key_pair *key_pair); AWS_CAL_API void aws_ecc_key_pair_get_public_key( const struct aws_ecc_key_pair *key_pair, struct aws_byte_cursor *pub_x, struct aws_byte_cursor *pub_y); AWS_CAL_API void aws_ecc_key_pair_get_private_key( const struct aws_ecc_key_pair *key_pair, struct aws_byte_cursor *private_d); AWS_CAL_API size_t aws_ecc_key_coordinate_byte_size_from_curve_name(enum aws_ecc_curve_name curve_name); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_CAL_ECC_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/include/aws/cal/exports.h000066400000000000000000000017541456575232400246270ustar00rootroot00000000000000#ifndef AWS_CAL_EXPORTS_H #define AWS_CAL_EXPORTS_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #if defined(AWS_C_RT_USE_WINDOWS_DLL_SEMANTICS) || defined(WIN32) # ifdef AWS_CAL_USE_IMPORT_EXPORT # ifdef AWS_CAL_EXPORTS # define AWS_CAL_API __declspec(dllexport) # else # define AWS_CAL_API __declspec(dllimport) # endif /* AWS_CAL_EXPORTS */ # else # define AWS_CAL_API # endif /* AWS_CAL_USE_IMPORT_EXPORT */ #else /* defined (AWS_C_RT_USE_WINDOWS_DLL_SEMANTICS) || defined (WIN32) */ # if ((__GNUC__ >= 4) || defined(__clang__)) && defined(AWS_CAL_USE_IMPORT_EXPORT) && defined(AWS_CAL_EXPORTS) # define AWS_CAL_API __attribute__((visibility("default"))) # else # define AWS_CAL_API # endif /* __GNUC__ >= 4 || defined(__clang__) */ #endif /* defined (AWS_C_RT_USE_WINDOWS_DLL_SEMANTICS) || defined (WIN32) */ #endif /* AWS_CAL_EXPORTS_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/include/aws/cal/hash.h000066400000000000000000000115511456575232400240420ustar00rootroot00000000000000#ifndef AWS_CAL_HASH_H_ #define AWS_CAL_HASH_H_ /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include AWS_PUSH_SANE_WARNING_LEVEL #define AWS_SHA256_LEN 32 #define AWS_SHA1_LEN 20 #define AWS_MD5_LEN 16 struct aws_hash; struct aws_hash_vtable { const char *alg_name; const char *provider; void (*destroy)(struct aws_hash *hash); int (*update)(struct aws_hash *hash, const struct aws_byte_cursor *buf); int (*finalize)(struct aws_hash *hash, struct aws_byte_buf *out); }; struct aws_hash { struct aws_allocator *allocator; struct aws_hash_vtable *vtable; size_t digest_size; bool good; void *impl; }; typedef struct aws_hash *(aws_hash_new_fn)(struct aws_allocator *allocator); AWS_EXTERN_C_BEGIN /** * Allocates and initializes a sha256 hash instance. */ AWS_CAL_API struct aws_hash *aws_sha256_new(struct aws_allocator *allocator); /** * Allocates and initializes a sha1 hash instance. */ AWS_CAL_API struct aws_hash *aws_sha1_new(struct aws_allocator *allocator); /** * Allocates and initializes an md5 hash instance. */ AWS_CAL_API struct aws_hash *aws_md5_new(struct aws_allocator *allocator); /** * Cleans up and deallocates hash. */ AWS_CAL_API void aws_hash_destroy(struct aws_hash *hash); /** * Updates the running hash with to_hash. this can be called multiple times. */ AWS_CAL_API int aws_hash_update(struct aws_hash *hash, const struct aws_byte_cursor *to_hash); /** * Completes the hash computation and writes the final digest to output. * Allocation of output is the caller's responsibility. If you specify * truncate_to to something other than 0, the output will be truncated to that * number of bytes. For example, if you want a SHA256 digest as the first 16 * bytes, set truncate_to to 16. If you want the full digest size, just set this * to 0. */ AWS_CAL_API int aws_hash_finalize(struct aws_hash *hash, struct aws_byte_buf *output, size_t truncate_to); /** * Computes the md5 hash over input and writes the digest output to 'output'. * Use this if you don't need to stream the data you're hashing and you can load * the entire input to hash into memory. */ AWS_CAL_API int aws_md5_compute( struct aws_allocator *allocator, const struct aws_byte_cursor *input, struct aws_byte_buf *output, size_t truncate_to); /** * Computes the sha256 hash over input and writes the digest output to 'output'. * Use this if you don't need to stream the data you're hashing and you can load * the entire input to hash into memory. If you specify truncate_to to something * other than 0, the output will be truncated to that number of bytes. For * example, if you want a SHA256 digest as the first 16 bytes, set truncate_to * to 16. If you want the full digest size, just set this to 0. */ AWS_CAL_API int aws_sha256_compute( struct aws_allocator *allocator, const struct aws_byte_cursor *input, struct aws_byte_buf *output, size_t truncate_to); /** * Computes the sha1 hash over input and writes the digest output to 'output'. * Use this if you don't need to stream the data you're hashing and you can load * the entire input to hash into memory. If you specify truncate_to to something * other than 0, the output will be truncated to that number of bytes. For * example, if you want a SHA1 digest as the first 16 bytes, set truncate_to * to 16. If you want the full digest size, just set this to 0. */ AWS_CAL_API int aws_sha1_compute( struct aws_allocator *allocator, const struct aws_byte_cursor *input, struct aws_byte_buf *output, size_t truncate_to); /** * Set the implementation of md5 to use. If you compiled without BYO_CRYPTO, * you do not need to call this. However, if use this, we will honor it, * regardless of compile options. This may be useful for testing purposes. If * you did set BYO_CRYPTO, and you do not call this function you will * segfault. */ AWS_CAL_API void aws_set_md5_new_fn(aws_hash_new_fn *fn); /** * Set the implementation of sha256 to use. If you compiled without * BYO_CRYPTO, you do not need to call this. However, if use this, we will * honor it, regardless of compile options. This may be useful for testing * purposes. If you did set BYO_CRYPTO, and you do not call this function * you will segfault. */ AWS_CAL_API void aws_set_sha256_new_fn(aws_hash_new_fn *fn); /** * Set the implementation of sha1 to use. If you compiled without * BYO_CRYPTO, you do not need to call this. However, if use this, we will * honor it, regardless of compile options. This may be useful for testing * purposes. If you did set BYO_CRYPTO, and you do not call this function * you will segfault. */ AWS_CAL_API void aws_set_sha1_new_fn(aws_hash_new_fn *fn); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_CAL_HASH_H_ */ aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/include/aws/cal/hmac.h000066400000000000000000000060121456575232400240230ustar00rootroot00000000000000#ifndef AWS_CAL_HMAC_H_ #define AWS_CAL_HMAC_H_ /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include AWS_PUSH_SANE_WARNING_LEVEL #define AWS_SHA256_HMAC_LEN 32 struct aws_hmac; struct aws_hmac_vtable { const char *alg_name; const char *provider; void (*destroy)(struct aws_hmac *hmac); int (*update)(struct aws_hmac *hmac, const struct aws_byte_cursor *buf); int (*finalize)(struct aws_hmac *hmac, struct aws_byte_buf *out); }; struct aws_hmac { struct aws_allocator *allocator; struct aws_hmac_vtable *vtable; size_t digest_size; bool good; void *impl; }; typedef struct aws_hmac *(aws_hmac_new_fn)(struct aws_allocator *allocator, const struct aws_byte_cursor *secret); AWS_EXTERN_C_BEGIN /** * Allocates and initializes a sha256 hmac instance. Secret is the key to be * used for the hmac process. */ AWS_CAL_API struct aws_hmac *aws_sha256_hmac_new(struct aws_allocator *allocator, const struct aws_byte_cursor *secret); /** * Cleans up and deallocates hmac. */ AWS_CAL_API void aws_hmac_destroy(struct aws_hmac *hmac); /** * Updates the running hmac with to_hash. this can be called multiple times. */ AWS_CAL_API int aws_hmac_update(struct aws_hmac *hmac, const struct aws_byte_cursor *to_hmac); /** * Completes the hmac computation and writes the final digest to output. * Allocation of output is the caller's responsibility. If you specify * truncate_to to something other than 0, the output will be truncated to that * number of bytes. For example if you want a SHA256 digest as the first 16 * bytes, set truncate_to to 16. If you want the full digest size, just set this * to 0. */ AWS_CAL_API int aws_hmac_finalize(struct aws_hmac *hmac, struct aws_byte_buf *output, size_t truncate_to); /** * Computes the sha256 hmac over input and writes the digest output to 'output'. * Use this if you don't need to stream the data you're hashing and you can load * the entire input to hash into memory. If you specify truncate_to to something * other than 0, the output will be truncated to that number of bytes. For * example if you want a SHA256 HMAC digest as the first 16 bytes, set * truncate_to to 16. If you want the full digest size, just set this to 0. */ AWS_CAL_API int aws_sha256_hmac_compute( struct aws_allocator *allocator, const struct aws_byte_cursor *secret, const struct aws_byte_cursor *to_hmac, struct aws_byte_buf *output, size_t truncate_to); /** * Set the implementation of sha256 hmac to use. If you compiled without * BYO_CRYPTO, you do not need to call this. However, if use this, we will * honor it, regardless of compile options. This may be useful for testing * purposes. If you did set BYO_CRYPTO, and you do not call this function * you will segfault. */ AWS_CAL_API void aws_set_sha256_hmac_new_fn(aws_hmac_new_fn *fn); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_CAL_HASH_H_ */ aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/include/aws/cal/private/000077500000000000000000000000001456575232400244155ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/include/aws/cal/private/der.h000066400000000000000000000205261456575232400253450ustar00rootroot00000000000000#ifndef AWS_C_CAL_DER_H #define AWS_C_CAL_DER_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include struct aws_der_encoder; struct aws_der_decoder; /* * Note: encoder/decoder only supports unsigned representations of integers and usage * of signed integers might lead to unexpected results. * Context: DER spec requires ints to be stored in big endian format with MSB * representing signedness. To disambiguate between negative number and big * positive number, null byte can be added in front of positive number. DER spec * requires representation to be the shortest possible one. * During encoding aws_der_encoder_write_unsigned_integer assumes that cursor * points to a positive number and will prepend 0 if needed by DER spec to * indicate its positive number. Encoder does not support writing negative numbers. * Decoder aws_der_encoder_write_unsigned_integer will strip any leading 0 as * needed and will error out if der contains negative number. * Take special care when integrating with 3p libraries cause they might expect * different format. Ex. this format matches what openssl calls bin format * (BN_bin2bn) and might not work as expected with openssl mpi format. */ enum aws_der_type { /* Primitives */ AWS_DER_BOOLEAN = 0x01, AWS_DER_INTEGER = 0x02, AWS_DER_BIT_STRING = 0x03, AWS_DER_OCTET_STRING = 0x04, AWS_DER_NULL = 0x05, AWS_DER_OBJECT_IDENTIFIER = 0x06, AWS_DER_BMPString = 0x1e, AWS_DER_UNICODE_STRING = AWS_DER_BMPString, AWS_DER_IA5String = 0x16, /* Unsupported */ AWS_DER_PrintableString = 0x13, AWS_DER_TeletexString = 0x14, /* Unsupported */ /* Constructed types */ AWS_DER_SEQUENCE = 0x30, AWS_DER_SEQUENCE_OF = AWS_DER_SEQUENCE, AWS_DER_SET = 0x31, AWS_DER_SET_OF = AWS_DER_SET, AWS_DER_UTF8_STRING = 0x0c, /* class types */ AWS_DER_CLASS_UNIVERSAL = 0x00, AWS_DER_CLASS_APPLICATION = 0x40, AWS_DER_CLASS_CONTEXT = 0x80, AWS_DER_CLASS_PRIVATE = 0xc0, /* forms */ AWS_DER_FORM_CONSTRUCTED = 0x20, AWS_DER_FORM_PRIMITIVE = 0x00, }; AWS_EXTERN_C_BEGIN /** * Initializes a DER encoder * @param allocator The allocator to use for all allocations within the encoder * @param capacity The initial capacity of the encoder scratch buffer (the max size of all encoded TLVs) * @return AWS_OP_ERR if an error occurs, otherwise AWS_OP_SUCCESS */ AWS_CAL_API struct aws_der_encoder *aws_der_encoder_new(struct aws_allocator *allocator, size_t capacity); /** * Cleans up a DER encoder * @param encoder The encoder to clean up * * Note that this destroys the encoder buffer, invalidating any references to the contents given via get_contents() */ AWS_CAL_API void aws_der_encoder_destroy(struct aws_der_encoder *encoder); /** * Writes an arbitrarily sized integer to the DER stream * @param encoder The encoder to use * @param integer A cursor pointing to the integer's memory * @return AWS_OP_ERR if an error occurs, otherwise AWS_OP_SUCCESS */ AWS_CAL_API int aws_der_encoder_write_unsigned_integer(struct aws_der_encoder *encoder, struct aws_byte_cursor integer); /** * Writes a boolean to the DER stream * @param encoder The encoder to use * @param boolean The boolean to write * @return AWS_OP_ERR if an error occurs, otherwise AWS_OP_SUCCESS */ AWS_CAL_API int aws_der_encoder_write_boolean(struct aws_der_encoder *encoder, bool boolean); /** * Writes a NULL token to the stream * @param encoder The encoder to write to * @return AWS_OP_ERR if an error occurs, otherwise AWS_OP_SUCCESS */ AWS_CAL_API int aws_der_encoder_write_null(struct aws_der_encoder *encoder); /** * Writes a BIT_STRING to the stream * @param encoder The encoder to use * @param bit_string The bit string to encode * @return AWS_OP_ERR if an error occurs, otherwise AWS_OP_SUCCESS */ AWS_CAL_API int aws_der_encoder_write_bit_string(struct aws_der_encoder *encoder, struct aws_byte_cursor bit_string); /** * Writes a string to the stream * @param encoder The encoder to use * @param octet_string The string to encode * @return AWS_OP_ERR if an error occurs, otherwise AWS_OP_SUCCESS */ AWS_CAL_API int aws_der_encoder_write_octet_string( struct aws_der_encoder *encoder, struct aws_byte_cursor octet_string); /** * Begins a SEQUENCE of objects in the DER stream * @param encoder The encoder to use * @return AWS_OP_ERR if an error occurs, otherwise AWS_OP_SUCCESS */ AWS_CAL_API int aws_der_encoder_begin_sequence(struct aws_der_encoder *encoder); /** * Finishes a SEQUENCE and applies it to the DER stream buffer * @param encoder The encoder to update * @return AWS_OP_ERR if an error occurs, otherwise AWS_OP_SUCCESS */ AWS_CAL_API int aws_der_encoder_end_sequence(struct aws_der_encoder *encoder); /** * Begins a SET of objects in the DER stream * @param encoder The encoder to use * @return AWS_OP_ERR if an error occurs, otherwise AWS_OP_SUCCESS */ AWS_CAL_API int aws_der_encoder_begin_set(struct aws_der_encoder *encoder); /** * Finishes a SET and applies it to the DER stream buffer * @param encoder The encoder to update * @return AWS_OP_ERR if an error occurs, otherwise AWS_OP_SUCCESS */ AWS_CAL_API int aws_der_encoder_end_set(struct aws_der_encoder *encoder); /** * Retrieves the contents of the encoder stream buffer * @param encoder The encoder to read from * @param cursor The cursor to point at the stream buffer * @return AWS_OP_ERR if an error occurs, otherwise AWS_OP_SUCCESS */ AWS_CAL_API int aws_der_encoder_get_contents(struct aws_der_encoder *encoder, struct aws_byte_cursor *contents); /** * Initializes an DER decoder * @param allocator The allocator to use * @param input The DER formatted buffer to parse * @return Initialized decoder, or NULL */ AWS_CAL_API struct aws_der_decoder *aws_der_decoder_new(struct aws_allocator *allocator, struct aws_byte_cursor input); /** * Cleans up a DER encoder * @param decoder The encoder to clean up */ AWS_CAL_API void aws_der_decoder_destroy(struct aws_der_decoder *decoder); /** * Allows for iteration over the decoded TLVs. * @param decoder The decoder to iterate over * @return true if there is a tlv to read after advancing, false when done */ AWS_CAL_API bool aws_der_decoder_next(struct aws_der_decoder *decoder); /** * The type of the current TLV * @param decoder The decoder to inspect * @return AWS_OP_ERR if an error occurs, otherwise AWS_OP_SUCCESS */ AWS_CAL_API enum aws_der_type aws_der_decoder_tlv_type(struct aws_der_decoder *decoder); /** * The size of the current TLV * @param decoder The decoder to inspect * @return AWS_OP_ERR if an error occurs, otherwise AWS_OP_SUCCESS */ AWS_CAL_API size_t aws_der_decoder_tlv_length(struct aws_der_decoder *decoder); /** * The number of elements in the current TLV container * @param decoder The decoder to inspect * @return Number of elements in the current container */ AWS_CAL_API size_t aws_der_decoder_tlv_count(struct aws_der_decoder *decoder); /** * Extracts the current TLV string value (BIT_STRING, OCTET_STRING) * @param decoder The decoder to extract from * @param string The buffer to store the string into * @return AWS_OP_ERR if an error occurs, otherwise AWS_OP_SUCCESS */ AWS_CAL_API int aws_der_decoder_tlv_string(struct aws_der_decoder *decoder, struct aws_byte_cursor *string); /** * Extracts the current TLV INTEGER value (INTEGER) * @param decoder The decoder to extract from * @param integer The buffer to store the integer into * @return AWS_OP_ERR if an error occurs, otherwise AWS_OP_SUCCESS */ AWS_CAL_API int aws_der_decoder_tlv_unsigned_integer(struct aws_der_decoder *decoder, struct aws_byte_cursor *integer); /** * Extracts the current TLV BOOLEAN value (BOOLEAN) * @param decoder The decoder to extract from * @param boolean The boolean to store the value into * @return AWS_OP_ERR if an error occurs, otherwise AWS_OP_SUCCESS */ AWS_CAL_API int aws_der_decoder_tlv_boolean(struct aws_der_decoder *decoder, bool *boolean); /** * Extracts the current TLV value as a blob * @param decoder The decoder to extract from * @param blob The buffer to store the value into * @return AWS_OP_ERR if an error occurs, otherwise AWS_OP_SUCCESS */ AWS_CAL_API int aws_der_decoder_tlv_blob(struct aws_der_decoder *decoder, struct aws_byte_cursor *blob); AWS_EXTERN_C_END #endif aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/include/aws/cal/private/ecc.h000066400000000000000000000011471456575232400253230ustar00rootroot00000000000000#ifndef AWS_C_CAL_PRIVATE_ECC_H #define AWS_C_CAL_PRIVATE_ECC_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include struct aws_der_decoder; AWS_EXTERN_C_BEGIN AWS_CAL_API int aws_der_decoder_load_ecc_key_pair( struct aws_der_decoder *decoder, struct aws_byte_cursor *out_public_x_coor, struct aws_byte_cursor *out_public_y_coor, struct aws_byte_cursor *out_private_d, enum aws_ecc_curve_name *out_curve_name); AWS_EXTERN_C_END #endif /* AWS_C_CAL_PRIVATE_ECC_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/include/aws/cal/private/opensslcrypto_common.h000066400000000000000000000056741456575232400310760ustar00rootroot00000000000000#ifndef AWS_C_CAL_OPENSSLCRYPTO_COMMON_H #define AWS_C_CAL_OPENSSLCRYPTO_COMMON_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #define OPENSSL_SUPPRESS_DEPRECATED #include #include #include #if !defined(OPENSSL_IS_AWSLC) && !defined(OPENSSL_IS_BORINGSSL) # define OPENSSL_IS_OPENSSL #endif /* * There are some differences in function definitions between OpenSSL 1.0.2 and * 1.1.1, aws-lc and boringssl. * This file defines some common wrappers that abstract away those differences. * For OpenSSL we currently support building against 1.0.2 or 1.1.1, and can * detect version used at runtime and dyn load those symbols correctly. * For OpenSSL 3.0 the code will compile and run, but largely because we disable * deprecation warnings. * For aws-lc and boringssl code must be compiled against the same version as * the runtime lib. */ typedef HMAC_CTX *(*hmac_ctx_new)(void); typedef void (*hmac_ctx_free)(HMAC_CTX *); typedef void (*hmac_ctx_init)(HMAC_CTX *); typedef void (*hmac_ctx_clean_up)(HMAC_CTX *); typedef int (*hmac_init_ex)(HMAC_CTX *, const void *, size_t, const EVP_MD *, ENGINE *); typedef int (*hmac_update)(HMAC_CTX *, const unsigned char *, size_t); typedef int (*hmac_final)(HMAC_CTX *, unsigned char *, unsigned int *); /* C standard does not have concept of generic function pointer, but it does guarantee that function pointer casts will roundtrip when casting to any type and then back. Use void *(void) as a generic function pointer. */ typedef void (*crypto_generic_fn_ptr)(void); struct openssl_hmac_ctx_table { hmac_ctx_new new_fn; hmac_ctx_free free_fn; hmac_ctx_init init_fn; hmac_ctx_clean_up clean_up_fn; hmac_init_ex init_ex_fn; hmac_update update_fn; hmac_final final_fn; /* There is slight variance between the crypto interfaces. Note that function pointer casting is undefined behavior. To workaround the issue, use generic pointer for crypto and let delegate function cast it back to correct type. Do not use following fields manually. */ struct { crypto_generic_fn_ptr init_ex_fn; } impl; }; extern struct openssl_hmac_ctx_table *g_aws_openssl_hmac_ctx_table; typedef EVP_MD_CTX *(*evp_md_ctx_new)(void); typedef void (*evp_md_ctx_free)(EVP_MD_CTX *); typedef int (*evp_md_ctx_digest_init_ex)(EVP_MD_CTX *, const EVP_MD *, ENGINE *); typedef int (*evp_md_ctx_digest_update)(EVP_MD_CTX *, const void *, size_t); typedef int (*evp_md_ctx_digest_final_ex)(EVP_MD_CTX *, unsigned char *, unsigned int *); struct openssl_evp_md_ctx_table { evp_md_ctx_new new_fn; evp_md_ctx_free free_fn; evp_md_ctx_digest_init_ex init_ex_fn; evp_md_ctx_digest_update update_fn; evp_md_ctx_digest_final_ex final_ex_fn; }; extern struct openssl_evp_md_ctx_table *g_aws_openssl_evp_md_ctx_table; #endif /* AWS_C_CAL_OPENSSLCRYPTO_COMMON_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/include/aws/cal/private/rsa.h000066400000000000000000000071711456575232400253610ustar00rootroot00000000000000#ifndef AWS_C_CAL_PRIVATE_RSA_H #define AWS_C_CAL_PRIVATE_RSA_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include struct aws_rsa_key_pair; struct aws_der_decoder; struct aws_rsa_key_vtable { int (*encrypt)( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_encryption_algorithm algorithm, struct aws_byte_cursor plaintext, struct aws_byte_buf *out); int (*decrypt)( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_encryption_algorithm algorithm, struct aws_byte_cursor ciphertext, struct aws_byte_buf *out); int (*sign)( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_signature_algorithm algorithm, struct aws_byte_cursor digest, struct aws_byte_buf *out); int (*verify)( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_signature_algorithm algorithm, struct aws_byte_cursor digest, struct aws_byte_cursor signature); }; struct aws_rsa_key_pair { struct aws_allocator *allocator; struct aws_rsa_key_vtable *vtable; struct aws_ref_count ref_count; size_t key_size_in_bits; struct aws_byte_buf priv; struct aws_byte_buf pub; void *impl; }; void aws_rsa_key_pair_base_clean_up(struct aws_rsa_key_pair *key_pair); /* * RSAPrivateKey as defined in RFC 8017 (aka PKCS1 format): * version Version, * modulus INTEGER, -- n * publicExponent INTEGER, -- e * privateExponent INTEGER, -- d * prime1 INTEGER, -- p * prime2 INTEGER, -- q * exponent1 INTEGER, -- d mod (p-1) * exponent2 INTEGER, -- d mod (q-1) * coefficient INTEGER, -- (inverse of q) mod p * otherPrimeInfos OtherPrimeInfos OPTIONAL * Note: otherPrimeInfos is used for >2 primes RSA cases, which are not very * common and currently not supported by CRT. Version == 0 indicates 2 prime * case and version == 1 indicates >2 prime case, hence in practice it will * always be 0. */ struct aws_rsa_private_key_pkcs1 { /* * Note: all cursors here point to bignum data for underlying RSA numbers. * Struct itself does not own the data and points to where ever the data was * decoded from. */ int version; struct aws_byte_cursor modulus; struct aws_byte_cursor publicExponent; struct aws_byte_cursor privateExponent; struct aws_byte_cursor prime1; struct aws_byte_cursor prime2; struct aws_byte_cursor exponent1; struct aws_byte_cursor exponent2; struct aws_byte_cursor coefficient; }; AWS_CAL_API int aws_der_decoder_load_private_rsa_pkcs1( struct aws_der_decoder *decoder, struct aws_rsa_private_key_pkcs1 *out); /* * RSAPublicKey as defined in RFC 8017 (aka PKCS1 format): modulus INTEGER, -- n publicExponent INTEGER -- e */ struct aws_rsa_public_key_pkcs1 { /* * Note: all cursors here point to bignum data for underlying RSA numbers. * Struct itself does not own the data and points to where ever the data was * decoded from. */ struct aws_byte_cursor modulus; struct aws_byte_cursor publicExponent; }; AWS_CAL_API int aws_der_decoder_load_public_rsa_pkcs1( struct aws_der_decoder *decoder, struct aws_rsa_public_key_pkcs1 *out); /* * Returns AWS_OP_SUCCESS if key size is supported and raises * AWS_ERROR_INVALID_ARGUMENT otherwise. */ int is_valid_rsa_key_size(size_t key_size_in_bits); #endif /* AWS_C_CAL_PRIVATE_RSA_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/include/aws/cal/private/symmetric_cipher_priv.h000066400000000000000000000042301456575232400311730ustar00rootroot00000000000000#ifndef AWS_CAL_SYMMETRIC_CIPHER_PRIV_H #define AWS_CAL_SYMMETRIC_CIPHER_PRIV_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include struct aws_symmetric_cipher; struct aws_symmetric_cipher_vtable { const char *alg_name; const char *provider; void (*destroy)(struct aws_symmetric_cipher *cipher); /* reset the cipher to being able to start another encrypt or decrypt operation. The original IV, Key, Tag etc... will be restored to the current cipher. */ int (*reset)(struct aws_symmetric_cipher *cipher); int (*encrypt)(struct aws_symmetric_cipher *cipher, struct aws_byte_cursor to_encrypt, struct aws_byte_buf *out); int (*decrypt)(struct aws_symmetric_cipher *cipher, struct aws_byte_cursor to_decrypt, struct aws_byte_buf *out); int (*finalize_encryption)(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out); int (*finalize_decryption)(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out); }; struct aws_symmetric_cipher { struct aws_allocator *allocator; struct aws_symmetric_cipher_vtable *vtable; struct aws_byte_buf iv; struct aws_byte_buf key; struct aws_byte_buf aad; struct aws_byte_buf tag; size_t block_size; size_t key_length_bits; bool good; void *impl; }; AWS_EXTERN_C_BEGIN /** * Generates a secure random initialization vector of length len_bytes. If is_counter_mode is set, the final 4 bytes * will be reserved as a counter and initialized to 1 in big-endian byte-order. */ AWS_CAL_API void aws_symmetric_cipher_generate_initialization_vector( size_t len_bytes, bool is_counter_mode, struct aws_byte_buf *out); /** * Generates a secure random symmetric key of length len_bytes. */ AWS_CAL_API void aws_symmetric_cipher_generate_key(size_t len_bytes, struct aws_byte_buf *out); AWS_EXTERN_C_END /* Don't let this one get exported as it should never be used outside of this library (including tests). */ int aws_symmetric_cipher_try_ensure_sufficient_buffer_space(struct aws_byte_buf *buf, size_t size); #endif /* AWS_CAL_SYMMETRIC_CIPHER_PRIV_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/include/aws/cal/rsa.h000066400000000000000000000126501456575232400237050ustar00rootroot00000000000000#ifndef AWS_CAL_RSA_H #define AWS_CAL_RSA_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include AWS_PUSH_SANE_WARNING_LEVEL struct aws_rsa_key_pair; enum aws_rsa_encryption_algorithm { AWS_CAL_RSA_ENCRYPTION_PKCS1_5, AWS_CAL_RSA_ENCRYPTION_OAEP_SHA256, AWS_CAL_RSA_ENCRYPTION_OAEP_SHA512, }; enum aws_rsa_signature_algorithm { AWS_CAL_RSA_SIGNATURE_PKCS1_5_SHA256, AWS_CAL_RSA_SIGNATURE_PSS_SHA256, }; /* * Note: prefer using standard key sizes - 1024, 2048, 4096. * Other key sizes will work, but which key sizes are supported may vary by * platform. Typically, multiples of 64 should work on all platforms. */ enum { AWS_CAL_RSA_MIN_SUPPORTED_KEY_SIZE_IN_BITS = 1024, AWS_CAL_RSA_MAX_SUPPORTED_KEY_SIZE_IN_BITS = 4096, }; AWS_EXTERN_C_BEGIN /** * Creates an RSA public key from RSAPublicKey as defined in rfc 8017 (aka PKCS1). * Returns a new instance of aws_rsa_key_pair if the key was successfully built. * Otherwise returns NULL. */ AWS_CAL_API struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_public_key_pkcs1( struct aws_allocator *allocator, struct aws_byte_cursor key); /** * Creates an RSA private key from RSAPrivateKey as defined in rfc 8017 (aka PKCS1). * Returns a new instance of aws_rsa_key_pair if the key was successfully built. * Otherwise returns NULL. */ AWS_CAL_API struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_private_key_pkcs1( struct aws_allocator *allocator, struct aws_byte_cursor key); /** * Adds one to an RSA key pair's ref count. * Returns key_pair pointer. */ AWS_CAL_API struct aws_rsa_key_pair *aws_rsa_key_pair_acquire(struct aws_rsa_key_pair *key_pair); /** * Subtracts one from an RSA key pair's ref count. If ref count reaches zero, the key pair is destroyed. * Always returns NULL. */ AWS_CAL_API struct aws_rsa_key_pair *aws_rsa_key_pair_release(struct aws_rsa_key_pair *key_pair); /** * Max plaintext size that can be encrypted by the key (i.e. max data size * supported by the key - bytes needed for padding). */ AWS_CAL_API size_t aws_rsa_key_pair_max_encrypt_plaintext_size( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_encryption_algorithm algorithm); /* * Uses the key_pair's private key to encrypt the plaintext. The output will be * in out. out must be large enough to to hold the ciphertext. Check * aws_rsa_key_pair_block_length() for output upper bound. */ AWS_CAL_API int aws_rsa_key_pair_encrypt( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_encryption_algorithm algorithm, struct aws_byte_cursor plaintext, struct aws_byte_buf *out); /* * Uses the key_pair's private key to decrypt the ciphertext. The output will be * in out. out must be large enough to to hold the ciphertext. Check * aws_rsa_key_pair_block_length() for output upper bound. */ AWS_CAL_API int aws_rsa_key_pair_decrypt( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_encryption_algorithm algorithm, struct aws_byte_cursor ciphertext, struct aws_byte_buf *out); /* * Max size for a block supported by a given key pair. */ AWS_CAL_API size_t aws_rsa_key_pair_block_length(const struct aws_rsa_key_pair *key_pair); /** * Uses the key_pair's private key to sign message. The output will be in out. out must be large enough * to hold the signature. Check aws_rsa_key_pair_signature_length() for the appropriate size. * * It is the callers job to make sure message is the appropriate cryptographic digest for this operation. It's usually * something like a SHA256. */ AWS_CAL_API int aws_rsa_key_pair_sign_message( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_signature_algorithm algorithm, struct aws_byte_cursor digest, struct aws_byte_buf *out); /** * Uses the key_pair's public key to verify signature of message. * * It is the callers job to make sure message is the appropriate cryptographic digest for this operation. It's usually * something like a SHA256. * * returns AWS_OP_SUCCESS if the signature is valid. * raises AWS_ERROR_CAL_SIGNATURE_VALIDATION_FAILED if signature validation failed */ AWS_CAL_API int aws_rsa_key_pair_verify_signature( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_signature_algorithm algorithm, struct aws_byte_cursor digest, struct aws_byte_cursor signature); /* * Max size for a signature supported by a given key pair. */ AWS_CAL_API size_t aws_rsa_key_pair_signature_length(const struct aws_rsa_key_pair *key_pair); enum aws_rsa_key_export_format { AWS_CAL_RSA_KEY_EXPORT_PKCS1, }; /* * Get public key for the key pair. * Inits out to a copy of key. * Any encoding on top of that (ex. b64) is left up to user. * Note: this function is currently not supported on windows for generated keys. */ AWS_CAL_API int aws_rsa_key_pair_get_public_key( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_key_export_format format, struct aws_byte_buf *out); /* * Get private key for the key pair. * Inits out to a copy of key. * Any encoding on top of that (ex. b64) is left up to user. * Note: this function is currently not supported on Windows for generated keys. */ AWS_CAL_API int aws_rsa_key_pair_get_private_key( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_key_export_format format, struct aws_byte_buf *out); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_CAL_RSA_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/include/aws/cal/symmetric_cipher.h000066400000000000000000000236741456575232400264760ustar00rootroot00000000000000#ifndef AWS_CAL_SYMMETRIC_CIPHER_H #define AWS_CAL_SYMMETRIC_CIPHER_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include AWS_PUSH_SANE_WARNING_LEVEL #define AWS_AES_256_CIPHER_BLOCK_SIZE 16 #define AWS_AES_256_KEY_BIT_LEN 256 #define AWS_AES_256_KEY_BYTE_LEN (AWS_AES_256_KEY_BIT_LEN / 8) struct aws_symmetric_cipher; typedef struct aws_symmetric_cipher *(aws_aes_cbc_256_new_fn)( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv); typedef struct aws_symmetric_cipher *(aws_aes_ctr_256_new_fn)( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv); typedef struct aws_symmetric_cipher *(aws_aes_gcm_256_new_fn)( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv, const struct aws_byte_cursor *aad, const struct aws_byte_cursor *decryption_tag); typedef struct aws_symmetric_cipher *( aws_aes_keywrap_256_new_fn)(struct aws_allocator *allocator, const struct aws_byte_cursor *key); AWS_EXTERN_C_BEGIN /** * Creates an instance of AES CBC with 256-bit key. * If key and iv are NULL, they will be generated internally. * You can get the generated key and iv back by calling: * * aws_symmetric_cipher_get_key() and * aws_symmetric_cipher_get_initialization_vector() * * respectively. * * If they are set, that key and iv will be copied internally and used by the cipher. * * Returns NULL on failure. You can check aws_last_error() to get the error code indicating the failure cause. */ AWS_CAL_API struct aws_symmetric_cipher *aws_aes_cbc_256_new( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv); /** * Creates an instance of AES CTR with 256-bit key. * If key and iv are NULL, they will be generated internally. * You can get the generated key and iv back by calling: * * aws_symmetric_cipher_get_key() and * aws_symmetric_cipher_get_initialization_vector() * * respectively. * * If they are set, that key and iv will be copied internally and used by the cipher. * * Returns NULL on failure. You can check aws_last_error() to get the error code indicating the failure cause. */ AWS_CAL_API struct aws_symmetric_cipher *aws_aes_ctr_256_new( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv); /** * Creates an instance of AES GCM with 256-bit key. * If key, iv are NULL, they will be generated internally. * You can get the generated key and iv back by calling: * * aws_symmetric_cipher_get_key() and * aws_symmetric_cipher_get_initialization_vector() * * respectively. * * If they are set, that key and iv will be copied internally and used by the cipher. * * If tag and aad are set they will be copied internally and used by the cipher. * decryption_tag would most likely be used for a decrypt operation to detect tampering or corruption. * The Tag for the most recent encrypt operation will be available in: * * aws_symmetric_cipher_get_tag() * * If aad is set it will be copied and applied to the cipher. * * Returns NULL on failure. You can check aws_last_error() to get the error code indicating the failure cause. */ AWS_CAL_API struct aws_symmetric_cipher *aws_aes_gcm_256_new( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv, const struct aws_byte_cursor *aad, const struct aws_byte_cursor *decryption_tag); /** * Creates an instance of AES Keywrap with 256-bit key. * If key is NULL, it will be generated internally. * You can get the generated key back by calling: * * aws_symmetric_cipher_get_key() * * If key is set, that key will be copied internally and used by the cipher. * * Returns NULL on failure. You can check aws_last_error() to get the error code indicating the failure cause. */ AWS_CAL_API struct aws_symmetric_cipher *aws_aes_keywrap_256_new( struct aws_allocator *allocator, const struct aws_byte_cursor *key); /** * Cleans up internal resources and state for cipher and then deallocates it. */ AWS_CAL_API void aws_symmetric_cipher_destroy(struct aws_symmetric_cipher *cipher); /** * Encrypts the value in to_encrypt and writes the encrypted data into out. * If out is dynamic it will be expanded. If it is not, and out is not large enough to handle * the encrypted output, the call will fail. If you're trying to optimize to use a stack based array * or something, make sure it's at least as large as the size of to_encrypt + an extra BLOCK to account for * padding etc... * * returns AWS_OP_SUCCESS on success. Call aws_last_error() to determine the failure cause if it returns * AWS_OP_ERR; */ AWS_CAL_API int aws_symmetric_cipher_encrypt( struct aws_symmetric_cipher *cipher, struct aws_byte_cursor to_encrypt, struct aws_byte_buf *out); /** * Decrypts the value in to_decrypt and writes the decrypted data into out. * If out is dynamic it will be expanded. If it is not, and out is not large enough to handle * the decrypted output, the call will fail. If you're trying to optimize to use a stack based array * or something, make sure it's at least as large as the size of to_decrypt + an extra BLOCK to account for * padding etc... * * returns AWS_OP_SUCCESS on success. Call aws_last_error() to determine the failure cause if it returns * AWS_OP_ERR; */ AWS_CAL_API int aws_symmetric_cipher_decrypt( struct aws_symmetric_cipher *cipher, struct aws_byte_cursor to_decrypt, struct aws_byte_buf *out); /** * Encrypts any remaining data that was reserved for final padding, loads GMACs etc... and if there is any * writes any remaining encrypted data to out. If out is dynamic it will be expanded. If it is not, and * out is not large enough to handle the decrypted output, the call will fail. If you're trying to optimize * to use a stack based array or something, make sure it's at least as large as the size of 2 BLOCKs to account for * padding etc... * * After invoking this function, you MUST call aws_symmetric_cipher_reset() before invoking any encrypt/decrypt * operations on this cipher again. * * returns AWS_OP_SUCCESS on success. Call aws_last_error() to determine the failure cause if it returns * AWS_OP_ERR; */ AWS_CAL_API int aws_symmetric_cipher_finalize_encryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out); /** * Decrypts any remaining data that was reserved for final padding, loads GMACs etc... and if there is any * writes any remaining decrypted data to out. If out is dynamic it will be expanded. If it is not, and * out is not large enough to handle the decrypted output, the call will fail. If you're trying to optimize * to use a stack based array or something, make sure it's at least as large as the size of 2 BLOCKs to account for * padding etc... * * After invoking this function, you MUST call aws_symmetric_cipher_reset() before invoking any encrypt/decrypt * operations on this cipher again. * * returns AWS_OP_SUCCESS on success. Call aws_last_error() to determine the failure cause if it returns * AWS_OP_ERR; */ AWS_CAL_API int aws_symmetric_cipher_finalize_decryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out); /** * Resets the cipher state for starting a new encrypt or decrypt operation. Note encrypt/decrypt cannot be mixed on the * same cipher without a call to reset in between them. However, this leaves the key, iv etc... materials setup for * immediate reuse. * * returns AWS_OP_SUCCESS on success. Call aws_last_error() to determine the failure cause if it returns * AWS_OP_ERR; */ AWS_CAL_API int aws_symmetric_cipher_reset(struct aws_symmetric_cipher *cipher); /** * Gets the current GMAC tag. If not AES GCM, this function will just return an empty cursor. * The memory in this cursor is unsafe as it refers to the internal buffer. * This was done because the use case doesn't require fetching these during an * encryption or decryption operation and it dramatically simplifies the API. * Only use this function between other calls to this API as any function call can alter the value of this tag. * * If you need to access it in a different pattern, copy the values to your own buffer first. */ AWS_CAL_API struct aws_byte_cursor aws_symmetric_cipher_get_tag(const struct aws_symmetric_cipher *cipher); /** * Gets the original initialization vector as a cursor. * The memory in this cursor is unsafe as it refers to the internal buffer. * This was done because the use case doesn't require fetching these during an * encryption or decryption operation and it dramatically simplifies the API. * * Unlike some other fields, this value does not change after the inital construction of the cipher. * * For some algorithms, such as AES Keywrap, this will return an empty cursor. */ AWS_CAL_API struct aws_byte_cursor aws_symmetric_cipher_get_initialization_vector( const struct aws_symmetric_cipher *cipher); /** * Gets the original key. * * The memory in this cursor is unsafe as it refers to the internal buffer. * This was done because the use case doesn't require fetching these during an * encryption or decryption operation and it dramatically simplifies the API. * * Unlike some other fields, this value does not change after the inital construction of the cipher. */ AWS_CAL_API struct aws_byte_cursor aws_symmetric_cipher_get_key(const struct aws_symmetric_cipher *cipher); /** * Returns true if the state of the cipher is good, and otherwise returns false. * Most operations, other than aws_symmetric_cipher_reset() will fail if this function is returning false. * aws_symmetric_cipher_reset() will reset the state to a good state if possible. */ AWS_CAL_API bool aws_symmetric_cipher_is_good(const struct aws_symmetric_cipher *cipher); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_CAL_SYMMETRIC_CIPHER_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/source/000077500000000000000000000000001456575232400212675ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/source/cal.c000066400000000000000000000112561456575232400221770ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #define AWS_DEFINE_ERROR_INFO_CAL(CODE, STR) [(CODE)-0x1C00] = AWS_DEFINE_ERROR_INFO(CODE, STR, "aws-c-cal") static struct aws_error_info s_errors[] = { AWS_DEFINE_ERROR_INFO_CAL(AWS_ERROR_CAL_SIGNATURE_VALIDATION_FAILED, "Verify on a cryptographic signature failed."), AWS_DEFINE_ERROR_INFO_CAL( AWS_ERROR_CAL_MISSING_REQUIRED_KEY_COMPONENT, "An attempt was made to perform an " "Asymmetric cryptographic operation with the" "wrong key component. For example, attempt to" "verify a signature with a private key or " "sign a message with a public key."), AWS_DEFINE_ERROR_INFO_CAL( AWS_ERROR_CAL_INVALID_KEY_LENGTH_FOR_ALGORITHM, "A key length was used for an algorithm that needs a different key length."), AWS_DEFINE_ERROR_INFO_CAL( AWS_ERROR_CAL_UNKNOWN_OBJECT_IDENTIFIER, "An ASN.1 OID was encountered that wasn't expected or understood. Most likely, an unsupported algorithm was " "encountered."), AWS_DEFINE_ERROR_INFO_CAL( AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED, "An ASN.1 DER decoding operation failed on malformed input."), AWS_DEFINE_ERROR_INFO_CAL( AWS_ERROR_CAL_MISMATCHED_DER_TYPE, "An invalid DER type was requested during encoding/decoding."), AWS_DEFINE_ERROR_INFO_CAL( AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM, "The specified algorithm is unsupported on this platform."), AWS_DEFINE_ERROR_INFO_CAL( AWS_ERROR_CAL_BUFFER_TOO_LARGE_FOR_ALGORITHM, "The input passed to a cipher algorithm was too large for that algorithm. Consider breaking the input into " "smaller chunks."), AWS_DEFINE_ERROR_INFO_CAL( AWS_ERROR_CAL_INVALID_CIPHER_MATERIAL_SIZE_FOR_ALGORITHM, "A cipher material such as an initialization vector or tag was an incorrect size for the selected algorithm."), AWS_DEFINE_ERROR_INFO_CAL( AWS_ERROR_CAL_DER_UNSUPPORTED_NEGATIVE_INT, "DER decoder does support negative integers."), AWS_DEFINE_ERROR_INFO_CAL(AWS_ERROR_CAL_UNSUPPORTED_KEY_FORMAT, "Key format is not supported."), AWS_DEFINE_ERROR_INFO_CAL( AWS_ERROR_CAL_CRYPTO_OPERATION_FAILED, "Unknown error when calling underlying Crypto library.")}; static struct aws_error_info_list s_list = { .error_list = s_errors, .count = AWS_ARRAY_SIZE(s_errors), }; static struct aws_log_subject_info s_cal_log_subject_infos[] = { DEFINE_LOG_SUBJECT_INFO( AWS_LS_CAL_GENERAL, "aws-c-cal", "Subject for Cal logging that doesn't belong to any particular category"), DEFINE_LOG_SUBJECT_INFO(AWS_LS_CAL_ECC, "ecc", "Subject for elliptic curve cryptography specific logging."), DEFINE_LOG_SUBJECT_INFO(AWS_LS_CAL_HASH, "hash", "Subject for hashing specific logging."), DEFINE_LOG_SUBJECT_INFO(AWS_LS_CAL_HMAC, "hmac", "Subject for hmac specific logging."), DEFINE_LOG_SUBJECT_INFO(AWS_LS_CAL_DER, "der", "Subject for der specific logging."), DEFINE_LOG_SUBJECT_INFO( AWS_LS_CAL_LIBCRYPTO_RESOLVE, "libcrypto_resolve", "Subject for libcrypto symbol resolution logging."), DEFINE_LOG_SUBJECT_INFO(AWS_LS_CAL_RSA, "rsa", "Subject for rsa cryptography specific logging."), }; static struct aws_log_subject_info_list s_cal_log_subject_list = { .subject_list = s_cal_log_subject_infos, .count = AWS_ARRAY_SIZE(s_cal_log_subject_infos), }; #ifndef BYO_CRYPTO extern void aws_cal_platform_init(struct aws_allocator *allocator); extern void aws_cal_platform_clean_up(void); extern void aws_cal_platform_thread_clean_up(void); #endif /* BYO_CRYPTO */ static bool s_cal_library_initialized = false; void aws_cal_library_init(struct aws_allocator *allocator) { if (!s_cal_library_initialized) { aws_common_library_init(allocator); aws_register_error_info(&s_list); aws_register_log_subject_info_list(&s_cal_log_subject_list); #ifndef BYO_CRYPTO aws_cal_platform_init(allocator); #endif /* BYO_CRYPTO */ s_cal_library_initialized = true; } } void aws_cal_library_clean_up(void) { if (s_cal_library_initialized) { s_cal_library_initialized = false; #ifndef BYO_CRYPTO aws_cal_platform_clean_up(); #endif /* BYO_CRYPTO */ aws_unregister_log_subject_info_list(&s_cal_log_subject_list); aws_unregister_error_info(&s_list); aws_common_library_clean_up(); } } void aws_cal_thread_clean_up(void) { #ifndef BYO_CRYPTO aws_cal_platform_thread_clean_up(); #endif /* BYO_CRYPTO */ } aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/source/darwin/000077500000000000000000000000001456575232400225535ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/source/darwin/common_cryptor_spi.h000066400000000000000000000427131456575232400266600ustar00rootroot00000000000000/* * Copyright (c) 2010 Apple Inc. All Rights Reserved. * * @APPLE_LICENSE_HEADER_START@ * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. * * @APPLE_LICENSE_HEADER_END@ */ /* clang-format off */ #ifndef _CC_CryptorSPI_H_ #define _CC_CryptorSPI_H_ #include #include #include #include #include #include #include #include #ifdef __cplusplus extern "C" { #endif #if defined(_WIN32) int timingsafe_bcmp(const void *b1, const void *b2, size_t n); #endif /* This is an SPI header. It includes some work in progress implementation notes that will be removed when this is promoted to an API set. */ /* Private Ciphers */ /* Lion SPI name for no padding. Defining for compatibility. Is now ccNoPadding in CommonCryptor.h */ enum { ccDefaultPadding = 0, }; enum { kCCAlgorithmAES128NoHardware = 20, kCCAlgorithmAES128WithHardware = 21 }; /* Private Modes */ enum { kCCModeGCM = 11, kCCModeCCM = 12, }; /* Private Paddings */ enum { ccCBCCTS1 = 10, ccCBCCTS2 = 11, ccCBCCTS3 = 12, }; /* Private Cryptor direction (op) */ enum { kCCBoth = 3, }; /* Supports a mode call of int mode_setup(int cipher, const unsigned char *IV, const unsigned char *key, int keylen, const unsigned char *tweak, int tweaklen, int num_rounds, int options, mode_context *ctx); */ /* User supplied space for the CryptorRef */ CCCryptorStatus CCCryptorCreateFromDataWithMode( CCOperation op, /* kCCEncrypt, kCCEncrypt, kCCBoth (default for BlockMode) */ CCMode mode, CCAlgorithm alg, CCPadding padding, const void *iv, /* optional initialization vector */ const void *key, /* raw key material */ size_t keyLength, const void *tweak, /* raw tweak material */ size_t tweakLength, int numRounds, CCModeOptions options, const void *data, /* caller-supplied memory */ size_t dataLength, /* length of data in bytes */ CCCryptorRef *cryptorRef, /* RETURNED */ size_t *dataUsed) /* optional, RETURNED */ API_AVAILABLE(macos(10.7), ios(5.0)); /* Assuming we can use existing CCCryptorCreateFromData for all modes serviced by these: int mode_encrypt(const unsigned char *pt, unsigned char *ct, unsigned long len, mode_context *ctx); int mode_decrypt(const unsigned char *ct, unsigned char *pt, unsigned long len, mode_context *ctx); */ /* Block mode encrypt and decrypt interfaces for IV tweaked blocks (XTS and CBC) int mode_encrypt_tweaked(const unsigned char *pt, unsigned long len, unsigned char *ct, const unsigned char *tweak, mode_context *ctx); int mode_decrypt_tweaked(const unsigned char *ct, unsigned long len, unsigned char *pt, const unsigned char *tweak, mode_context *ctx); */ CCCryptorStatus CCCryptorEncryptDataBlock( CCCryptorRef cryptorRef, const void *iv, const void *dataIn, size_t dataInLength, void *dataOut) API_AVAILABLE(macos(10.7), ios(5.0)); CCCryptorStatus CCCryptorDecryptDataBlock( CCCryptorRef cryptorRef, const void *iv, const void *dataIn, size_t dataInLength, void *dataOut) API_AVAILABLE(macos(10.7), ios(5.0)); /*! @function CCCryptorReset_binary_compatibility @abstract Do not call this function. Reinitializes an existing CCCryptorRef with a (possibly) new initialization vector. The CCCryptorRef's key is unchanged. Preserves compatibility for Sdks prior to macOS 10.13, iOS 11, watchOS 4 and tvOS 11. It is used internally in CommonCrypto. See CCCryptorReset for more information. @result The only possible error is kCCParamError. */ CCCryptorStatus CCCryptorReset_binary_compatibility(CCCryptorRef cryptorRef, const void *iv) API_DEPRECATED_WITH_REPLACEMENT("CCCryptorReset", macos(10.4, 10.13), ios(2.0, 11.0)); /* Assuming we can use the existing CCCryptorRelease() interface for int mode_done(mode_context *ctx); */ /* Not surfacing these other than with CCCryptorReset() int mode_setIV(const unsigned char *IV, unsigned long len, mode_context *ctx); int mode_getIV(const unsigned char *IV, unsigned long *len, mode_context *ctx); */ /* * returns a cipher blocksize length iv in the provided iv buffer. */ CCCryptorStatus CCCryptorGetIV(CCCryptorRef cryptorRef, void *iv) API_AVAILABLE(macos(10.7), ios(5.0)); /* GCM Support Interfaces Use CCCryptorCreateWithMode() with the kCCModeGCM selector to initialize a CryptoRef. Only kCCAlgorithmAES128 can be used with GCM and these functions. IV Setting etc will be ignored from CCCryptorCreateWithMode(). Use the CCCryptorGCMAddIV() routine below for IV setup. */ /* Deprecated. Use CCCryptorGCMSetIV() instead. This adds the initial vector octets from iv of length ivLen to the GCM CCCryptorRef. You can call this function as many times as required to process the entire IV. */ CCCryptorStatus CCCryptorGCMAddIV(CCCryptorRef cryptorRef, const void *iv, size_t ivLen) API_DEPRECATED_WITH_REPLACEMENT("CCCryptorGCMSetIV", macos(10.8, 10.13), ios(5.0, 11.0)); /* This adds the initial vector octets from iv of length ivLen to the GCM CCCryptorRef. The input iv cannot be NULL and ivLen must be between 12 to 16 bytes inclusive. CCRandomGenerateBytes() can be used to generate random IVs */ CCCryptorStatus CCCryptorGCMSetIV(CCCryptorRef cryptorRef, const void *iv, size_t ivLen) API_AVAILABLE(macos(10.13), ios(11.0)); /* Additional Authentication Data After the entire IV has been processed, the additional authentication data can be processed. Unlike the IV, a packet/session does not require additional authentication data (AAD) for security. The AAD is meant to be used as side channel data you want to be authenticated with the packet. Note: once you begin adding AAD to the GCM CCCryptorRef you cannot return to adding IV data until the state has been reset. */ CCCryptorStatus CCCryptorGCMAddAAD(CCCryptorRef cryptorRef, const void *aData, size_t aDataLen) API_AVAILABLE(macos(10.8), ios(6.0)); // This is for old iOS5 clients CCCryptorStatus CCCryptorGCMAddADD(CCCryptorRef cryptorRef, const void *aData, size_t aDataLen) API_AVAILABLE(macos(10.8), ios(5.0)); CCCryptorStatus CCCryptorGCMEncrypt( CCCryptorRef cryptorRef, const void *dataIn, size_t dataInLength, void *dataOut) API_AVAILABLE(macos(10.8), ios(5.0)); CCCryptorStatus CCCryptorGCMDecrypt( CCCryptorRef cryptorRef, const void *dataIn, size_t dataInLength, void *dataOut) API_AVAILABLE(macos(10.8), ios(5.0)); /* This finalizes the GCM state gcm and stores the tag in tag of length taglen octets. The tag must be verified by comparing the computed and expected values using timingsafe_bcmp. Other comparison functions (e.g. memcmp) must not be used as they may be vulnerable to practical timing attacks, leading to tag forgery. */ CCCryptorStatus CCCryptorGCMFinal( CCCryptorRef cryptorRef, void *tagOut, size_t *tagLength) API_DEPRECATED_WITH_REPLACEMENT("CCCryptorGCMFinalize", macos(10.8, 10.13), ios(5.0, 11.0)); /* This finalizes the GCM state gcm. On encryption, the computed tag is returned in tagOut. On decryption, the provided tag is securely compared to the expected tag, and error is returned if the tags do not match. The tag buffer content is not modified on decryption. is not updated on decryption. */ CCCryptorStatus CCCryptorGCMFinalize( CCCryptorRef cryptorRef, void *tag, size_t tagLength) API_AVAILABLE(macos(10.13), ios(11.0)); /* This will reset the GCM CCCryptorRef to the state that CCCryptorCreateWithMode() left it. The user would then call CCCryptorGCMAddIV(), CCCryptorGCMAddAAD(), etc. */ CCCryptorStatus CCCryptorGCMReset( CCCryptorRef cryptorRef) API_AVAILABLE(macos(10.8), ios(5.0)); /* Deprecated. Use CCCryptorGCMOneshotEncrypt() or CCCryptorGCMOneshotDecrypt() instead. This will initialize the GCM state with the given key, IV and AAD value then proceed to encrypt or decrypt the message text and store the final message tag. The definition of the variables is the same as it is for all the manual functions. If you are processing many packets under the same key you shouldn't use this function as it invokes the pre-computation with each call. The tag must be verified by comparing the computed and expected values using timingsafe_bcmp. Other comparison functions (e.g. memcmp) must not be used as they may be vulnerable to practical timing attacks, leading to tag forgery. */ CCCryptorStatus CCCryptorGCM( CCOperation op, /* kCCEncrypt, kCCDecrypt */ CCAlgorithm alg, const void *key, /* raw key material */ size_t keyLength, const void *iv, size_t ivLen, const void *aData, size_t aDataLen, const void *dataIn, size_t dataInLength, void *dataOut, void *tagOut, size_t *tagLength) API_DEPRECATED_WITH_REPLACEMENT("CCCryptorGCMOneshotEncrypt or CCCryptorGCMOneshotDecrypt", macos(10.8, 10.13), ios(6.0, 11.0)); /*! @function CCCryptorGCMOneshotDecrypt @abstract Encrypts using AES-GCM and outputs encrypted data and an authentication tag @param alg It can only be kCCAlgorithmAES @param key Key for the underlying AES blockcipher. It must be 16 bytes. ***** @param keyLength Length of the key in bytes @param iv Initialization vector, must be at least 12 bytes @param ivLength Length of the IV in bytes @param aData Additional data to authenticate. It can be NULL, if there is no additional data to be authenticated. @param aDataLength Length of the additional data in bytes. It can be zero. @param dataIn Input plaintext @param dataInLength Length of the input plaintext data in bytes @param cipherOut Output ciphertext @param tagLength Length of the output authentication tag in bytes. It is minimum 8 bytes and maximum 16 bytes. @param tagOut the output authentication tag @result kccSuccess if successful. @discussion It is a one-shot AESGCM encryption and in-place encryption is supported. @warning The key-IV pair must be unique per encryption. The IV must be nonzero in length. In stateful protocols, if each packet exposes a guaranteed-unique value, it is recommended to format this as a 12-byte value for use as the IV. In stateless protocols, it is recommended to choose a 16-byte value using a cryptographically-secure pseudorandom number generator (e.g. @p ccrng). */ CCCryptorStatus CCCryptorGCMOneshotEncrypt(CCAlgorithm alg, const void *key, size_t keyLength, /* raw key material */ const void *iv, size_t ivLength, const void *aData, size_t aDataLength, const void *dataIn, size_t dataInLength, void *cipherOut, void *tagOut, size_t tagLength) __attribute__((__warn_unused_result__)) API_AVAILABLE(macos(10.13), ios(11.0)); /*! @function CCCryptorGCMOneshotDecrypt @abstract Decrypts using AES-GCM, compares the computed tag of the decrypted message to the input tag and returns error is authentication fails. @discussion CCCryptorGCMOneshotDecrypt() works similar to the CCCryptorGCMOneshotEncrypt(). CCCryptorGCMOneshotDecrypt() does not return the tag of the decrypted message. It compated the computed tag with inout tag and outputs error if authentication of the decrypted message fails. */ CCCryptorStatus CCCryptorGCMOneshotDecrypt(CCAlgorithm alg, const void *key, size_t keyLength, const void *iv, size_t ivLen, const void *aData, size_t aDataLen, const void *dataIn, size_t dataInLength, void *dataOut, const void *tagIn, size_t tagLength) __attribute__((__warn_unused_result__)) API_AVAILABLE(macos(10.13), ios(11.0)); void CC_RC4_set_key(void *ctx, int len, const unsigned char *data) API_AVAILABLE(macos(10.4), ios(5.0)); void CC_RC4(void *ctx, unsigned long len, const unsigned char *indata, unsigned char *outdata) API_AVAILABLE(macos(10.4), ios(5.0)); /* GCM interface can then be easily bolt on the rest of standard CCCryptor interface; typically following sequence can be used: CCCryptorCreateWithMode(mode = kCCModeGCM) 0..Nx: CCCryptorAddParameter(kCCParameterIV, iv) 0..Nx: CCCryptorAddParameter(kCCParameterAuthData, data) 0..Nx: CCCryptorUpdate(inData, outData) 0..1: CCCryptorFinal(outData) 0..1: CCCryptorGetParameter(kCCParameterAuthTag, tag) CCCryptorRelease() */ enum { /* Initialization vector - cryptor input parameter, typically needs to have the same length as block size, but in some cases (GCM) it can be arbitrarily long and even might be called multiple times. */ kCCParameterIV, /* Authentication data - cryptor input parameter, input for authenticating encryption modes like GCM. If supported, can be called multiple times before encryption starts. */ kCCParameterAuthData, /* Mac Size - cryptor input parameter, input for authenticating encryption modes like CCM. Specifies the size of the AuthTag the algorithm is expected to produce. */ kCCMacSize, /* Data Size - cryptor input parameter, input for authenticating encryption modes like CCM. Specifies the amount of data the algorithm is expected to process. */ kCCDataSize, /* Authentication tag - cryptor output parameter, output from authenticating encryption modes like GCM. If supported, should be retrieved after the encryption finishes. */ kCCParameterAuthTag, }; typedef uint32_t CCParameter; /* Sets or adds some other cryptor input parameter. According to the cryptor type and state, parameter can be either accepted or refused with kCCUnimplemented (when given parameter is not supported for this type of cryptor at all) or kCCParamError (bad data length or format). */ CCCryptorStatus CCCryptorAddParameter( CCCryptorRef cryptorRef, CCParameter parameter, const void *data, size_t dataSize); /* Gets value of output cryptor parameter. According to the cryptor type state, the request can be either accepted or refused with kCCUnimplemented (when given parameter is not supported for this type of cryptor) or kCCBufferTooSmall (in this case, *dataSize argument is set to the requested size of data). */ CCCryptorStatus CCCryptorGetParameter( CCCryptorRef cryptorRef, CCParameter parameter, void *data, size_t *dataSize); #ifdef __cplusplus } #endif #endif /* _CC_CryptorSPI_H_ */ /* clang-format on */ aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/source/darwin/commoncrypto_aes.c000066400000000000000000000611261456575232400263060ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #if !defined(AWS_APPSTORE_SAFE) /* CommonCrypto does not offer public APIs for doing AES GCM. * There are private APIs for doing it (CommonCryptoSPI.h), but App Store * submissions that reference these private symbols will be rejected. */ # define SUPPORT_AES_GCM_VIA_SPI 1 # include "common_cryptor_spi.h" # if (defined(__MAC_OS_X_VERSION_MAX_ALLOWED) && (__MAC_OS_X_VERSION_MAX_ALLOWED >= 101300 /* macOS 10.13 */)) || \ (defined(__IPHONE_OS_VERSION_MAX_ALLOWED) && (__IPHONE_OS_VERSION_MAX_ALLOWED >= 110000 /* iOS v11 */)) # define USE_LATEST_CRYPTO_API 1 # endif #endif struct cc_aes_cipher { struct aws_symmetric_cipher cipher_base; struct _CCCryptor *encryptor_handle; struct _CCCryptor *decryptor_handle; struct aws_byte_buf working_buffer; }; static int s_encrypt(struct aws_symmetric_cipher *cipher, struct aws_byte_cursor input, struct aws_byte_buf *out) { /* allow for a padded block by making sure we have at least a block of padding reserved. */ size_t required_buffer_space = input.len + cipher->block_size - 1; if (aws_symmetric_cipher_try_ensure_sufficient_buffer_space(out, required_buffer_space)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } size_t available_write_space = out->capacity - out->len; struct cc_aes_cipher *cc_cipher = cipher->impl; size_t len_written = 0; CCStatus status = CCCryptorUpdate( cc_cipher->encryptor_handle, input.ptr, input.len, out->buffer + out->len, available_write_space, &len_written); if (status != kCCSuccess) { cipher->good = false; return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } out->len += len_written; return AWS_OP_SUCCESS; } static int s_decrypt(struct aws_symmetric_cipher *cipher, struct aws_byte_cursor input, struct aws_byte_buf *out) { /* allow for a padded block by making sure we have at least a block of padding reserved. */ size_t required_buffer_space = input.len + cipher->block_size - 1; if (aws_symmetric_cipher_try_ensure_sufficient_buffer_space(out, required_buffer_space)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } size_t available_write_space = out->capacity - out->len; struct cc_aes_cipher *cc_cipher = cipher->impl; size_t len_written = 0; CCStatus status = CCCryptorUpdate( cc_cipher->decryptor_handle, input.ptr, input.len, out->buffer + out->len, available_write_space, &len_written); if (status != kCCSuccess) { cipher->good = false; return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } out->len += len_written; return AWS_OP_SUCCESS; } static int s_finalize_encryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) { /* in CBC mode, this will pad the final block from the previous encrypt call, or do nothing * if we were already on a block boundary. In CTR mode this will do nothing. */ size_t required_buffer_space = cipher->block_size; size_t len_written = 0; if (aws_symmetric_cipher_try_ensure_sufficient_buffer_space(out, required_buffer_space)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } size_t available_write_space = out->capacity - out->len; struct cc_aes_cipher *cc_cipher = cipher->impl; CCStatus status = CCCryptorFinal(cc_cipher->encryptor_handle, out->buffer + out->len, available_write_space, &len_written); if (status != kCCSuccess) { cipher->good = false; return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } out->len += len_written; return AWS_OP_SUCCESS; } static int s_finalize_decryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) { /* in CBC mode, this will pad the final block from the previous encrypt call, or do nothing * if we were already on a block boundary. In CTR mode this will do nothing. */ size_t required_buffer_space = cipher->block_size; size_t len_written = 0; if (aws_symmetric_cipher_try_ensure_sufficient_buffer_space(out, required_buffer_space)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } size_t available_write_space = out->capacity - out->len; struct cc_aes_cipher *cc_cipher = cipher->impl; CCStatus status = CCCryptorFinal(cc_cipher->decryptor_handle, out->buffer + out->len, available_write_space, &len_written); if (status != kCCSuccess) { cipher->good = false; return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } out->len += len_written; return AWS_OP_SUCCESS; } static int s_initialize_cbc_cipher_materials( struct cc_aes_cipher *cc_cipher, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv) { if (!cc_cipher->cipher_base.key.len) { if (key) { aws_byte_buf_init_copy_from_cursor(&cc_cipher->cipher_base.key, cc_cipher->cipher_base.allocator, *key); } else { aws_byte_buf_init(&cc_cipher->cipher_base.key, cc_cipher->cipher_base.allocator, AWS_AES_256_KEY_BYTE_LEN); aws_symmetric_cipher_generate_key(AWS_AES_256_KEY_BYTE_LEN, &cc_cipher->cipher_base.key); } } if (!cc_cipher->cipher_base.iv.len) { if (iv) { aws_byte_buf_init_copy_from_cursor(&cc_cipher->cipher_base.iv, cc_cipher->cipher_base.allocator, *iv); } else { aws_byte_buf_init( &cc_cipher->cipher_base.iv, cc_cipher->cipher_base.allocator, AWS_AES_256_CIPHER_BLOCK_SIZE); aws_symmetric_cipher_generate_initialization_vector( AWS_AES_256_CIPHER_BLOCK_SIZE, false, &cc_cipher->cipher_base.iv); } } CCCryptorStatus status = CCCryptorCreateWithMode( kCCEncrypt, kCCModeCBC, kCCAlgorithmAES, ccPKCS7Padding, cc_cipher->cipher_base.iv.buffer, cc_cipher->cipher_base.key.buffer, cc_cipher->cipher_base.key.len, NULL, 0, 0, 0, &cc_cipher->encryptor_handle); status |= CCCryptorCreateWithMode( kCCDecrypt, kCCModeCBC, kCCAlgorithmAES, ccPKCS7Padding, cc_cipher->cipher_base.iv.buffer, cc_cipher->cipher_base.key.buffer, cc_cipher->cipher_base.key.len, NULL, 0, 0, 0, &cc_cipher->decryptor_handle); return status == kCCSuccess ? AWS_OP_SUCCESS : aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } static int s_reset(struct aws_symmetric_cipher *cipher) { struct cc_aes_cipher *cc_cipher = cipher->impl; if (cc_cipher->encryptor_handle) { CCCryptorRelease(cc_cipher->encryptor_handle); cc_cipher->encryptor_handle = NULL; } if (cc_cipher->decryptor_handle) { CCCryptorRelease(cc_cipher->decryptor_handle); cc_cipher->decryptor_handle = NULL; } aws_byte_buf_secure_zero(&cc_cipher->working_buffer); return AWS_OP_SUCCESS; } static void s_destroy(struct aws_symmetric_cipher *cipher) { aws_byte_buf_clean_up_secure(&cipher->key); aws_byte_buf_clean_up_secure(&cipher->iv); aws_byte_buf_clean_up_secure(&cipher->tag); aws_byte_buf_clean_up_secure(&cipher->aad); s_reset(cipher); struct cc_aes_cipher *cc_cipher = cipher->impl; aws_byte_buf_clean_up_secure(&cc_cipher->working_buffer); aws_mem_release(cipher->allocator, cc_cipher); } static int s_cbc_reset(struct aws_symmetric_cipher *cipher) { struct cc_aes_cipher *cc_cipher = cipher->impl; int ret_val = s_reset(cipher); if (ret_val == AWS_OP_SUCCESS) { ret_val = s_initialize_cbc_cipher_materials(cc_cipher, NULL, NULL); } return ret_val; } static struct aws_symmetric_cipher_vtable s_aes_cbc_vtable = { .finalize_decryption = s_finalize_decryption, .finalize_encryption = s_finalize_encryption, .decrypt = s_decrypt, .encrypt = s_encrypt, .provider = "CommonCrypto", .alg_name = "AES-CBC 256", .destroy = s_destroy, .reset = s_cbc_reset, }; struct aws_symmetric_cipher *aws_aes_cbc_256_new_impl( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv) { struct cc_aes_cipher *cc_cipher = aws_mem_calloc(allocator, 1, sizeof(struct cc_aes_cipher)); cc_cipher->cipher_base.allocator = allocator; cc_cipher->cipher_base.block_size = AWS_AES_256_CIPHER_BLOCK_SIZE; cc_cipher->cipher_base.key_length_bits = AWS_AES_256_KEY_BIT_LEN; cc_cipher->cipher_base.impl = cc_cipher; cc_cipher->cipher_base.vtable = &s_aes_cbc_vtable; if (s_initialize_cbc_cipher_materials(cc_cipher, key, iv) != AWS_OP_SUCCESS) { s_destroy(&cc_cipher->cipher_base); return NULL; } cc_cipher->cipher_base.good = true; cc_cipher->cipher_base.key_length_bits = AWS_AES_256_KEY_BIT_LEN; return &cc_cipher->cipher_base; } static int s_initialize_ctr_cipher_materials( struct cc_aes_cipher *cc_cipher, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv) { if (!cc_cipher->cipher_base.key.len) { if (key) { aws_byte_buf_init_copy_from_cursor(&cc_cipher->cipher_base.key, cc_cipher->cipher_base.allocator, *key); } else { aws_byte_buf_init(&cc_cipher->cipher_base.key, cc_cipher->cipher_base.allocator, AWS_AES_256_KEY_BYTE_LEN); aws_symmetric_cipher_generate_key(AWS_AES_256_KEY_BYTE_LEN, &cc_cipher->cipher_base.key); } } if (!cc_cipher->cipher_base.iv.len) { if (iv) { aws_byte_buf_init_copy_from_cursor(&cc_cipher->cipher_base.iv, cc_cipher->cipher_base.allocator, *iv); } else { aws_byte_buf_init( &cc_cipher->cipher_base.iv, cc_cipher->cipher_base.allocator, AWS_AES_256_CIPHER_BLOCK_SIZE); aws_symmetric_cipher_generate_initialization_vector( AWS_AES_256_CIPHER_BLOCK_SIZE, true, &cc_cipher->cipher_base.iv); } } CCCryptorStatus status = CCCryptorCreateWithMode( kCCEncrypt, kCCModeCTR, kCCAlgorithmAES, ccNoPadding, cc_cipher->cipher_base.iv.buffer, cc_cipher->cipher_base.key.buffer, cc_cipher->cipher_base.key.len, NULL, 0, 0, kCCModeOptionCTR_BE, &cc_cipher->encryptor_handle); status |= CCCryptorCreateWithMode( kCCDecrypt, kCCModeCTR, kCCAlgorithmAES, ccNoPadding, cc_cipher->cipher_base.iv.buffer, cc_cipher->cipher_base.key.buffer, cc_cipher->cipher_base.key.len, NULL, 0, 0, kCCModeOptionCTR_BE, &cc_cipher->decryptor_handle); return status == kCCSuccess ? AWS_OP_SUCCESS : aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } static int s_ctr_reset(struct aws_symmetric_cipher *cipher) { struct cc_aes_cipher *cc_cipher = cipher->impl; int ret_val = s_reset(cipher); if (ret_val == AWS_OP_SUCCESS) { ret_val = s_initialize_ctr_cipher_materials(cc_cipher, NULL, NULL); } return ret_val; } static struct aws_symmetric_cipher_vtable s_aes_ctr_vtable = { .finalize_decryption = s_finalize_decryption, .finalize_encryption = s_finalize_encryption, .decrypt = s_decrypt, .encrypt = s_encrypt, .provider = "CommonCrypto", .alg_name = "AES-CTR 256", .destroy = s_destroy, .reset = s_ctr_reset, }; struct aws_symmetric_cipher *aws_aes_ctr_256_new_impl( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv) { struct cc_aes_cipher *cc_cipher = aws_mem_calloc(allocator, 1, sizeof(struct cc_aes_cipher)); cc_cipher->cipher_base.allocator = allocator; cc_cipher->cipher_base.block_size = AWS_AES_256_CIPHER_BLOCK_SIZE; cc_cipher->cipher_base.impl = cc_cipher; cc_cipher->cipher_base.vtable = &s_aes_ctr_vtable; if (s_initialize_ctr_cipher_materials(cc_cipher, key, iv) != AWS_OP_SUCCESS) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); s_destroy(&cc_cipher->cipher_base); return NULL; } cc_cipher->cipher_base.good = true; cc_cipher->cipher_base.key_length_bits = AWS_AES_256_KEY_BIT_LEN; return &cc_cipher->cipher_base; } #ifdef SUPPORT_AES_GCM_VIA_SPI /* * Note that CCCryptorGCMFinal is deprecated in Mac 10.13. It also doesn't compare the tag with expected tag * https://opensource.apple.com/source/CommonCrypto/CommonCrypto-60118.1.1/include/CommonCryptorSPI.h.auto.html */ static CCStatus s_cc_crypto_gcm_finalize(struct _CCCryptor *encryptor_handle, uint8_t *buffer, size_t tag_length) { # ifdef USE_LATEST_CRYPTO_API if (__builtin_available(macOS 10.13, iOS 11.0, *)) { return CCCryptorGCMFinalize(encryptor_handle, buffer, tag_length); } else { /* We would never hit this branch for newer macOS and iOS versions because of the __builtin_available check, so we can * suppress the compiler warning. */ # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wdeprecated-declarations" return CCCryptorGCMFinal(encryptor_handle, buffer, &tag_length); # pragma clang diagnostic pop } # else return CCCryptorGCMFinal(encryptor_handle, buffer, &tag_length); # endif } static CCCryptorStatus s_cc_cryptor_gcm_set_iv(struct _CCCryptor *encryptor_handle, uint8_t *buffer, size_t length) { # ifdef USE_LATEST_CRYPTO_API if (__builtin_available(macOS 10.13, iOS 11.0, *)) { return CCCryptorGCMSetIV(encryptor_handle, buffer, length); } else { /* We would never hit this branch for newer macOS and iOS versions because of the __builtin_available check, so we can * suppress the compiler warning. */ # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wdeprecated-declarations" return CCCryptorGCMAddIV(encryptor_handle, buffer, length); # pragma clang diagnostic pop } # else return CCCryptorGCMAddIV(encryptor_handle, buffer, length); # endif } static int s_finalize_gcm_encryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) { (void)out; /* user specification takes precedence. If its wrong its wrong */ if (!cipher->tag.len) { aws_byte_buf_init(&cipher->tag, cipher->allocator, AWS_AES_256_CIPHER_BLOCK_SIZE); } struct cc_aes_cipher *cc_cipher = cipher->impl; size_t tag_length = AWS_AES_256_CIPHER_BLOCK_SIZE; CCStatus status = s_cc_crypto_gcm_finalize(cc_cipher->encryptor_handle, cipher->tag.buffer, tag_length); if (status != kCCSuccess) { cipher->good = false; return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } cipher->tag.len = tag_length; return AWS_OP_SUCCESS; } static int s_finalize_gcm_decryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) { (void)out; struct cc_aes_cipher *cc_cipher = cipher->impl; size_t tag_length = AWS_AES_256_CIPHER_BLOCK_SIZE; CCStatus status = s_cc_crypto_gcm_finalize(cc_cipher->encryptor_handle, cipher->tag.buffer, tag_length); if (status != kCCSuccess) { cipher->good = false; return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } return AWS_OP_SUCCESS; } static int s_initialize_gcm_cipher_materials( struct cc_aes_cipher *cc_cipher, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv, const struct aws_byte_cursor *aad, const struct aws_byte_cursor *tag) { if (!cc_cipher->cipher_base.key.len) { if (key) { aws_byte_buf_init_copy_from_cursor(&cc_cipher->cipher_base.key, cc_cipher->cipher_base.allocator, *key); } else { aws_byte_buf_init(&cc_cipher->cipher_base.key, cc_cipher->cipher_base.allocator, AWS_AES_256_KEY_BYTE_LEN); aws_symmetric_cipher_generate_key(AWS_AES_256_KEY_BYTE_LEN, &cc_cipher->cipher_base.key); } } if (!cc_cipher->cipher_base.iv.len) { if (iv) { aws_byte_buf_init_copy_from_cursor(&cc_cipher->cipher_base.iv, cc_cipher->cipher_base.allocator, *iv); } else { /* GCM IVs are kind of a hidden implementation detail. 4 are reserved by the system for long running stream * blocks. */ /* This is because there's a GMAC attached to the cipher (that's what tag is for). For that to work, it has * to control the actual counter */ aws_byte_buf_init( &cc_cipher->cipher_base.iv, cc_cipher->cipher_base.allocator, AWS_AES_256_CIPHER_BLOCK_SIZE - 4); aws_symmetric_cipher_generate_initialization_vector( AWS_AES_256_CIPHER_BLOCK_SIZE - 4, false, &cc_cipher->cipher_base.iv); } } if (aad && aad->len) { aws_byte_buf_init_copy_from_cursor(&cc_cipher->cipher_base.aad, cc_cipher->cipher_base.allocator, *aad); } if (tag && tag->len) { aws_byte_buf_init_copy_from_cursor(&cc_cipher->cipher_base.tag, cc_cipher->cipher_base.allocator, *tag); } CCCryptorStatus status = CCCryptorCreateWithMode( kCCEncrypt, kCCModeGCM, kCCAlgorithmAES, ccNoPadding, NULL, cc_cipher->cipher_base.key.buffer, cc_cipher->cipher_base.key.len, NULL, 0, 0, kCCModeOptionCTR_BE, &cc_cipher->encryptor_handle); if (status != kCCSuccess) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } status = s_cc_cryptor_gcm_set_iv( cc_cipher->encryptor_handle, cc_cipher->cipher_base.iv.buffer, cc_cipher->cipher_base.iv.len); if (status != kCCSuccess) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } if (cc_cipher->cipher_base.aad.len) { status = CCCryptorGCMAddAAD( cc_cipher->encryptor_handle, cc_cipher->cipher_base.aad.buffer, cc_cipher->cipher_base.aad.len); if (status != kCCSuccess) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } } status = CCCryptorCreateWithMode( kCCDecrypt, kCCModeGCM, kCCAlgorithmAES, ccNoPadding, NULL, cc_cipher->cipher_base.key.buffer, cc_cipher->cipher_base.key.len, NULL, 0, 0, kCCModeOptionCTR_BE, &cc_cipher->decryptor_handle); if (status != kCCSuccess) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } status = s_cc_cryptor_gcm_set_iv( cc_cipher->decryptor_handle, cc_cipher->cipher_base.iv.buffer, cc_cipher->cipher_base.iv.len); if (status != kCCSuccess) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } if (cc_cipher->cipher_base.aad.len) { status = CCCryptorGCMAddAAD( cc_cipher->decryptor_handle, cc_cipher->cipher_base.aad.buffer, cc_cipher->cipher_base.aad.len); } if (status != kCCSuccess) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } return AWS_OP_SUCCESS; } static int s_gcm_reset(struct aws_symmetric_cipher *cipher) { struct cc_aes_cipher *cc_cipher = cipher->impl; int ret_val = s_reset(cipher); if (ret_val == AWS_OP_SUCCESS) { ret_val = s_initialize_gcm_cipher_materials(cc_cipher, NULL, NULL, NULL, NULL); } return ret_val; } static struct aws_symmetric_cipher_vtable s_aes_gcm_vtable = { .finalize_decryption = s_finalize_gcm_decryption, .finalize_encryption = s_finalize_gcm_encryption, .decrypt = s_decrypt, .encrypt = s_encrypt, .provider = "CommonCrypto", .alg_name = "AES-GCM 256", .destroy = s_destroy, .reset = s_gcm_reset, }; struct aws_symmetric_cipher *aws_aes_gcm_256_new_impl( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv, const struct aws_byte_cursor *aad, const struct aws_byte_cursor *tag) { struct cc_aes_cipher *cc_cipher = aws_mem_calloc(allocator, 1, sizeof(struct cc_aes_cipher)); cc_cipher->cipher_base.allocator = allocator; cc_cipher->cipher_base.block_size = AWS_AES_256_CIPHER_BLOCK_SIZE; cc_cipher->cipher_base.impl = cc_cipher; cc_cipher->cipher_base.vtable = &s_aes_gcm_vtable; if (s_initialize_gcm_cipher_materials(cc_cipher, key, iv, aad, tag) != AWS_OP_SUCCESS) { s_destroy(&cc_cipher->cipher_base); return NULL; } cc_cipher->cipher_base.good = true; cc_cipher->cipher_base.key_length_bits = AWS_AES_256_KEY_BIT_LEN; return &cc_cipher->cipher_base; } #else /* !SUPPORT_AES_GCM_VIA_SPI */ struct aws_symmetric_cipher *aws_aes_gcm_256_new_impl( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv, const struct aws_byte_cursor *aad, const struct aws_byte_cursor *tag) { (void)allocator; (void)key; (void)iv; (void)aad; (void)tag; aws_raise_error(AWS_ERROR_PLATFORM_NOT_SUPPORTED); return NULL; } #endif /* SUPPORT_AES_GCM_VIA_SPI */ static int s_keywrap_encrypt_decrypt( struct aws_symmetric_cipher *cipher, struct aws_byte_cursor input, struct aws_byte_buf *out) { struct cc_aes_cipher *cc_cipher = cipher->impl; return aws_byte_buf_append_dynamic(&cc_cipher->working_buffer, &input); } static int s_finalize_keywrap_encryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) { struct cc_aes_cipher *cc_cipher = cipher->impl; if (cc_cipher->working_buffer.len == 0) { cipher->good = false; return aws_raise_error(AWS_ERROR_INVALID_STATE); } size_t output_buffer_len = cipher->block_size + cc_cipher->working_buffer.len; if (aws_symmetric_cipher_try_ensure_sufficient_buffer_space(out, output_buffer_len)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } CCCryptorStatus status = CCSymmetricKeyWrap( kCCWRAPAES, CCrfc3394_iv, CCrfc3394_ivLen, cipher->key.buffer, cipher->key.len, cc_cipher->working_buffer.buffer, cc_cipher->working_buffer.len, out->buffer, &output_buffer_len); if (status != kCCSuccess) { cipher->good = false; return aws_raise_error(AWS_ERROR_INVALID_STATE); } out->len += output_buffer_len; return AWS_OP_SUCCESS; } static int s_finalize_keywrap_decryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) { struct cc_aes_cipher *cc_cipher = cipher->impl; if (cc_cipher->working_buffer.len == 0) { cipher->good = false; return aws_raise_error(AWS_ERROR_INVALID_STATE); } size_t output_buffer_len = cipher->block_size + cc_cipher->working_buffer.len; if (aws_symmetric_cipher_try_ensure_sufficient_buffer_space(out, output_buffer_len)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } CCCryptorStatus status = CCSymmetricKeyUnwrap( kCCWRAPAES, CCrfc3394_iv, CCrfc3394_ivLen, cipher->key.buffer, cipher->key.len, cc_cipher->working_buffer.buffer, cc_cipher->working_buffer.len, out->buffer, &output_buffer_len); if (status != kCCSuccess) { cipher->good = false; return aws_raise_error(AWS_ERROR_INVALID_STATE); } out->len += output_buffer_len; return AWS_OP_SUCCESS; } static struct aws_symmetric_cipher_vtable s_aes_keywrap_vtable = { .finalize_decryption = s_finalize_keywrap_decryption, .finalize_encryption = s_finalize_keywrap_encryption, .decrypt = s_keywrap_encrypt_decrypt, .encrypt = s_keywrap_encrypt_decrypt, .provider = "CommonCrypto", .alg_name = "AES-KEYWRAP 256", .destroy = s_destroy, .reset = s_reset, }; struct aws_symmetric_cipher *aws_aes_keywrap_256_new_impl( struct aws_allocator *allocator, const struct aws_byte_cursor *key) { struct cc_aes_cipher *cc_cipher = aws_mem_calloc(allocator, 1, sizeof(struct cc_aes_cipher)); cc_cipher->cipher_base.allocator = allocator; cc_cipher->cipher_base.block_size = AWS_AES_256_CIPHER_BLOCK_SIZE / 2; cc_cipher->cipher_base.impl = cc_cipher; cc_cipher->cipher_base.vtable = &s_aes_keywrap_vtable; if (key) { aws_byte_buf_init_copy_from_cursor(&cc_cipher->cipher_base.key, cc_cipher->cipher_base.allocator, *key); } else { aws_byte_buf_init(&cc_cipher->cipher_base.key, cc_cipher->cipher_base.allocator, AWS_AES_256_KEY_BYTE_LEN); aws_symmetric_cipher_generate_key(AWS_AES_256_KEY_BYTE_LEN, &cc_cipher->cipher_base.key); } aws_byte_buf_init(&cc_cipher->working_buffer, allocator, (AWS_AES_256_CIPHER_BLOCK_SIZE * 2) + 8); cc_cipher->cipher_base.good = true; cc_cipher->cipher_base.key_length_bits = AWS_AES_256_KEY_BIT_LEN; return &cc_cipher->cipher_base; } aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/source/darwin/commoncrypto_hmac.c000066400000000000000000000043041456575232400264410ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include static void s_destroy(struct aws_hmac *hmac); static int s_update(struct aws_hmac *hmac, const struct aws_byte_cursor *to_hmac); static int s_finalize(struct aws_hmac *hmac, struct aws_byte_buf *output); static struct aws_hmac_vtable s_sha256_hmac_vtable = { .destroy = s_destroy, .update = s_update, .finalize = s_finalize, .alg_name = "SHA256 HMAC", .provider = "CommonCrypto", }; struct cc_hmac { struct aws_hmac hmac; CCHmacContext cc_hmac_ctx; }; struct aws_hmac *aws_sha256_hmac_default_new(struct aws_allocator *allocator, const struct aws_byte_cursor *secret) { AWS_ASSERT(secret->ptr); struct cc_hmac *cc_hmac = aws_mem_acquire(allocator, sizeof(struct cc_hmac)); if (!cc_hmac) { return NULL; } cc_hmac->hmac.allocator = allocator; cc_hmac->hmac.vtable = &s_sha256_hmac_vtable; cc_hmac->hmac.impl = cc_hmac; cc_hmac->hmac.digest_size = AWS_SHA256_HMAC_LEN; cc_hmac->hmac.good = true; CCHmacInit(&cc_hmac->cc_hmac_ctx, kCCHmacAlgSHA256, secret->ptr, (CC_LONG)secret->len); return &cc_hmac->hmac; } static void s_destroy(struct aws_hmac *hmac) { struct cc_hmac *ctx = hmac->impl; aws_mem_release(hmac->allocator, ctx); } static int s_update(struct aws_hmac *hmac, const struct aws_byte_cursor *to_hmac) { if (!hmac->good) { return aws_raise_error(AWS_ERROR_INVALID_STATE); } struct cc_hmac *ctx = hmac->impl; CCHmacUpdate(&ctx->cc_hmac_ctx, to_hmac->ptr, (CC_LONG)to_hmac->len); return AWS_OP_SUCCESS; } static int s_finalize(struct aws_hmac *hmac, struct aws_byte_buf *output) { if (!hmac->good) { return aws_raise_error(AWS_ERROR_INVALID_STATE); } struct cc_hmac *ctx = hmac->impl; size_t buffer_len = output->capacity - output->len; if (buffer_len < hmac->digest_size) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } CCHmacFinal(&ctx->cc_hmac_ctx, output->buffer + output->len); hmac->good = false; output->len += hmac->digest_size; return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/source/darwin/commoncrypto_md5.c000066400000000000000000000043051456575232400262170ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wdeprecated-declarations" static void s_destroy(struct aws_hash *hash); static int s_update(struct aws_hash *hash, const struct aws_byte_cursor *to_hash); static int s_finalize(struct aws_hash *hash, struct aws_byte_buf *output); static struct aws_hash_vtable s_vtable = { .destroy = s_destroy, .update = s_update, .finalize = s_finalize, .alg_name = "MD5", .provider = "CommonCrypto", }; struct cc_md5_hash { struct aws_hash hash; CC_MD5_CTX cc_hash; }; struct aws_hash *aws_md5_default_new(struct aws_allocator *allocator) { struct cc_md5_hash *cc_md5_hash = aws_mem_acquire(allocator, sizeof(struct cc_md5_hash)); if (!cc_md5_hash) { return NULL; } cc_md5_hash->hash.allocator = allocator; cc_md5_hash->hash.vtable = &s_vtable; cc_md5_hash->hash.digest_size = AWS_MD5_LEN; cc_md5_hash->hash.impl = cc_md5_hash; cc_md5_hash->hash.good = true; CC_MD5_Init(&cc_md5_hash->cc_hash); return &cc_md5_hash->hash; } static void s_destroy(struct aws_hash *hash) { struct cc_md5_hash *ctx = hash->impl; aws_mem_release(hash->allocator, ctx); } static int s_update(struct aws_hash *hash, const struct aws_byte_cursor *to_hash) { if (!hash->good) { return aws_raise_error(AWS_ERROR_INVALID_STATE); } struct cc_md5_hash *ctx = hash->impl; CC_MD5_Update(&ctx->cc_hash, to_hash->ptr, (CC_LONG)to_hash->len); return AWS_OP_SUCCESS; } static int s_finalize(struct aws_hash *hash, struct aws_byte_buf *output) { if (!hash->good) { return aws_raise_error(AWS_ERROR_INVALID_STATE); } struct cc_md5_hash *ctx = hash->impl; size_t buffer_len = output->capacity - output->len; if (buffer_len < hash->digest_size) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } CC_MD5_Final(output->buffer + output->len, &ctx->cc_hash); hash->good = false; output->len += hash->digest_size; return AWS_OP_SUCCESS; } #pragma clang diagnostic pop aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/source/darwin/commoncrypto_platform_init.c000066400000000000000000000005101456575232400303730ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include void aws_cal_platform_init(struct aws_allocator *allocator) { (void)allocator; } void aws_cal_platform_clean_up(void) {} void aws_cal_platform_thread_clean_up(void) {} aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/source/darwin/commoncrypto_sha1.c000066400000000000000000000041061456575232400263650ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include static void s_destroy(struct aws_hash *hash); static int s_update(struct aws_hash *hash, const struct aws_byte_cursor *to_hash); static int s_finalize(struct aws_hash *hash, struct aws_byte_buf *output); static struct aws_hash_vtable s_vtable = { .destroy = s_destroy, .update = s_update, .finalize = s_finalize, .alg_name = "SHA1", .provider = "CommonCrypto", }; struct cc_sha1_hash { struct aws_hash hash; CC_SHA1_CTX cc_hash; }; struct aws_hash *aws_sha1_default_new(struct aws_allocator *allocator) { struct cc_sha1_hash *sha1_hash = aws_mem_acquire(allocator, sizeof(struct cc_sha1_hash)); if (!sha1_hash) { return NULL; } sha1_hash->hash.allocator = allocator; sha1_hash->hash.vtable = &s_vtable; sha1_hash->hash.impl = sha1_hash; sha1_hash->hash.digest_size = AWS_SHA1_LEN; sha1_hash->hash.good = true; CC_SHA1_Init(&sha1_hash->cc_hash); return &sha1_hash->hash; } static void s_destroy(struct aws_hash *hash) { struct cc_sha1_hash *ctx = hash->impl; aws_mem_release(hash->allocator, ctx); } static int s_update(struct aws_hash *hash, const struct aws_byte_cursor *to_hash) { if (!hash->good) { return aws_raise_error(AWS_ERROR_INVALID_STATE); } struct cc_sha1_hash *ctx = hash->impl; CC_SHA1_Update(&ctx->cc_hash, to_hash->ptr, (CC_LONG)to_hash->len); return AWS_OP_SUCCESS; } static int s_finalize(struct aws_hash *hash, struct aws_byte_buf *output) { if (!hash->good) { return aws_raise_error(AWS_ERROR_INVALID_STATE); } struct cc_sha1_hash *ctx = hash->impl; size_t buffer_len = output->capacity - output->len; if (buffer_len < hash->digest_size) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } CC_SHA1_Final(output->buffer + output->len, &ctx->cc_hash); hash->good = false; output->len += hash->digest_size; return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/source/darwin/commoncrypto_sha256.c000066400000000000000000000041631456575232400265440ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include static void s_destroy(struct aws_hash *hash); static int s_update(struct aws_hash *hash, const struct aws_byte_cursor *to_hash); static int s_finalize(struct aws_hash *hash, struct aws_byte_buf *output); static struct aws_hash_vtable s_vtable = { .destroy = s_destroy, .update = s_update, .finalize = s_finalize, .alg_name = "SHA256", .provider = "CommonCrypto", }; struct cc_sha256_hash { struct aws_hash hash; CC_SHA256_CTX cc_hash; }; struct aws_hash *aws_sha256_default_new(struct aws_allocator *allocator) { struct cc_sha256_hash *sha256_hash = aws_mem_acquire(allocator, sizeof(struct cc_sha256_hash)); if (!sha256_hash) { return NULL; } sha256_hash->hash.allocator = allocator; sha256_hash->hash.vtable = &s_vtable; sha256_hash->hash.impl = sha256_hash; sha256_hash->hash.digest_size = AWS_SHA256_LEN; sha256_hash->hash.good = true; CC_SHA256_Init(&sha256_hash->cc_hash); return &sha256_hash->hash; } static void s_destroy(struct aws_hash *hash) { struct cc_sha256_hash *ctx = hash->impl; aws_mem_release(hash->allocator, ctx); } static int s_update(struct aws_hash *hash, const struct aws_byte_cursor *to_hash) { if (!hash->good) { return aws_raise_error(AWS_ERROR_INVALID_STATE); } struct cc_sha256_hash *ctx = hash->impl; CC_SHA256_Update(&ctx->cc_hash, to_hash->ptr, (CC_LONG)to_hash->len); return AWS_OP_SUCCESS; } static int s_finalize(struct aws_hash *hash, struct aws_byte_buf *output) { if (!hash->good) { return aws_raise_error(AWS_ERROR_INVALID_STATE); } struct cc_sha256_hash *ctx = hash->impl; size_t buffer_len = output->capacity - output->len; if (buffer_len < hash->digest_size) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } CC_SHA256_Final(output->buffer + output->len, &ctx->cc_hash); hash->good = false; output->len += hash->digest_size; return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/source/darwin/securityframework_ecc.c000066400000000000000000000526761456575232400273360ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include struct commoncrypto_ecc_key_pair { struct aws_ecc_key_pair key_pair; SecKeyRef priv_key_ref; SecKeyRef pub_key_ref; CFAllocatorRef cf_allocator; }; static uint8_t s_preamble = 0x04; static size_t s_der_overhead = 8; /* The hard-coded "valid" public keys. Copy/pated from one of our unit test. */ const static uint8_t s_fake_x_ecdsa_p256[] = { 0xd0, 0x72, 0x0d, 0xc6, 0x91, 0xaa, 0x80, 0x09, 0x6b, 0xa3, 0x2f, 0xed, 0x1c, 0xb9, 0x7c, 0x2b, 0x62, 0x06, 0x90, 0xd0, 0x6d, 0xe0, 0x31, 0x7b, 0x86, 0x18, 0xd5, 0xce, 0x65, 0xeb, 0x72, 0x8f, }; const static uint8_t s_fake_y_ecdsa_p256[] = { 0x96, 0x81, 0xb5, 0x17, 0xb1, 0xcd, 0xa1, 0x7d, 0x0d, 0x83, 0xd3, 0x35, 0xd9, 0xc4, 0xa8, 0xa9, 0xa9, 0xb0, 0xb1, 0xb3, 0xc7, 0x10, 0x6d, 0x8f, 0x3c, 0x72, 0xbc, 0x50, 0x93, 0xdc, 0x27, 0x5f, }; const static uint8_t s_fake_x_ecdsa_p384[] = { 0xfd, 0x3c, 0x84, 0xe5, 0x68, 0x9b, 0xed, 0x27, 0x0e, 0x60, 0x1b, 0x3d, 0x80, 0xf9, 0x0d, 0x67, 0xa9, 0xae, 0x45, 0x1c, 0xce, 0x89, 0x0f, 0x53, 0xe5, 0x83, 0x22, 0x9a, 0xd0, 0xe2, 0xee, 0x64, 0x56, 0x11, 0xfa, 0x99, 0x36, 0xdf, 0xa4, 0x53, 0x06, 0xec, 0x18, 0x06, 0x67, 0x74, 0xaa, 0x24, }; const static uint8_t s_fake_y_ecdsa_p384[] = { 0xb8, 0x3c, 0xa4, 0x12, 0x6c, 0xfc, 0x4c, 0x4d, 0x1d, 0x18, 0xa4, 0xb6, 0xc2, 0x1c, 0x7f, 0x69, 0x9d, 0x51, 0x23, 0xdd, 0x9c, 0x24, 0xf6, 0x6f, 0x83, 0x38, 0x46, 0xee, 0xb5, 0x82, 0x96, 0x19, 0x6b, 0x42, 0xec, 0x06, 0x42, 0x5d, 0xb5, 0xb7, 0x0a, 0x4b, 0x81, 0xb7, 0xfc, 0xf7, 0x05, 0xa0, }; static int s_sign_message( const struct aws_ecc_key_pair *key_pair, const struct aws_byte_cursor *message, struct aws_byte_buf *signature_output) { struct commoncrypto_ecc_key_pair *cc_key = key_pair->impl; if (!cc_key->priv_key_ref) { return aws_raise_error(AWS_ERROR_CAL_MISSING_REQUIRED_KEY_COMPONENT); } CFDataRef hash_ref = CFDataCreateWithBytesNoCopy(NULL, message->ptr, message->len, kCFAllocatorNull); AWS_FATAL_ASSERT(hash_ref && "No allocations should have happened here, this function shouldn't be able to fail."); CFErrorRef error = NULL; CFDataRef signature = SecKeyCreateSignature(cc_key->priv_key_ref, kSecKeyAlgorithmECDSASignatureDigestX962, hash_ref, &error); if (error) { CFRelease(hash_ref); return aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); } struct aws_byte_cursor to_write = aws_byte_cursor_from_array(CFDataGetBytePtr(signature), CFDataGetLength(signature)); if (aws_byte_buf_append(signature_output, &to_write)) { CFRelease(signature); CFRelease(hash_ref); return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } CFRelease(signature); CFRelease(hash_ref); return AWS_OP_SUCCESS; } static size_t s_signature_length(const struct aws_ecc_key_pair *key_pair) { return aws_ecc_key_coordinate_byte_size_from_curve_name(key_pair->curve_name) * 2 + s_der_overhead; } static int s_verify_signature( const struct aws_ecc_key_pair *key_pair, const struct aws_byte_cursor *message, const struct aws_byte_cursor *signature) { struct commoncrypto_ecc_key_pair *cc_key = key_pair->impl; if (!cc_key->pub_key_ref) { return aws_raise_error(AWS_ERROR_CAL_MISSING_REQUIRED_KEY_COMPONENT); } CFDataRef hash_ref = CFDataCreateWithBytesNoCopy(NULL, message->ptr, message->len, kCFAllocatorNull); CFDataRef signature_ref = CFDataCreateWithBytesNoCopy(NULL, signature->ptr, signature->len, kCFAllocatorNull); AWS_FATAL_ASSERT(hash_ref && "No allocations should have happened here, this function shouldn't be able to fail."); AWS_FATAL_ASSERT( signature_ref && "No allocations should have happened here, this function shouldn't be able to fail."); CFErrorRef error = NULL; bool verified = SecKeyVerifySignature( cc_key->pub_key_ref, kSecKeyAlgorithmECDSASignatureDigestX962, hash_ref, signature_ref, &error); CFRelease(signature_ref); CFRelease(hash_ref); return verified ? AWS_OP_SUCCESS : aws_raise_error(AWS_ERROR_CAL_SIGNATURE_VALIDATION_FAILED); } static int s_derive_public_key(struct aws_ecc_key_pair *key_pair) { /* we already have a public key, just lie and tell them we succeeded */ if (key_pair->pub_x.buffer && key_pair->pub_x.len) { return AWS_OP_SUCCESS; } return aws_raise_error(AWS_ERROR_UNSUPPORTED_OPERATION); } static void s_destroy_key(struct aws_ecc_key_pair *key_pair) { if (key_pair) { struct commoncrypto_ecc_key_pair *cc_key = key_pair->impl; if (cc_key->pub_key_ref) { CFRelease(cc_key->pub_key_ref); } if (cc_key->priv_key_ref) { CFRelease(cc_key->priv_key_ref); } if (cc_key->cf_allocator) { aws_wrapped_cf_allocator_destroy(cc_key->cf_allocator); } aws_byte_buf_clean_up_secure(&key_pair->key_buf); aws_mem_release(key_pair->allocator, cc_key); } } static struct aws_ecc_key_pair_vtable s_key_pair_vtable = { .sign_message = s_sign_message, .signature_length = s_signature_length, .verify_signature = s_verify_signature, .derive_pub_key = s_derive_public_key, .destroy = s_destroy_key, }; static struct commoncrypto_ecc_key_pair *s_alloc_pair_and_init_buffers( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name, struct aws_byte_cursor pub_x, struct aws_byte_cursor pub_y, struct aws_byte_cursor priv_key) { struct commoncrypto_ecc_key_pair *cc_key_pair = aws_mem_calloc(allocator, 1, sizeof(struct commoncrypto_ecc_key_pair)); if (!cc_key_pair) { return NULL; } aws_atomic_init_int(&cc_key_pair->key_pair.ref_count, 1); cc_key_pair->key_pair.impl = cc_key_pair; cc_key_pair->key_pair.allocator = allocator; cc_key_pair->cf_allocator = aws_wrapped_cf_allocator_new(allocator); if (!cc_key_pair->cf_allocator) { goto error; } size_t s_key_coordinate_size = aws_ecc_key_coordinate_byte_size_from_curve_name(curve_name); if (!s_key_coordinate_size) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); goto error; } if ((pub_x.ptr && pub_x.len != s_key_coordinate_size) || (pub_y.ptr && pub_y.len != s_key_coordinate_size) || (priv_key.ptr && priv_key.len != s_key_coordinate_size)) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); goto error; } size_t total_buffer_size = s_key_coordinate_size * 3 + 1; if (aws_byte_buf_init(&cc_key_pair->key_pair.key_buf, allocator, total_buffer_size)) { goto error; } aws_byte_buf_write_u8(&cc_key_pair->key_pair.key_buf, s_preamble); if (pub_x.ptr && pub_y.ptr) { aws_byte_buf_append(&cc_key_pair->key_pair.key_buf, &pub_x); aws_byte_buf_append(&cc_key_pair->key_pair.key_buf, &pub_y); } else { aws_byte_buf_write_u8_n(&cc_key_pair->key_pair.key_buf, 0x0, s_key_coordinate_size * 2); } if (priv_key.ptr) { aws_byte_buf_append(&cc_key_pair->key_pair.key_buf, &priv_key); } if (pub_x.ptr) { cc_key_pair->key_pair.pub_x = aws_byte_buf_from_array(cc_key_pair->key_pair.key_buf.buffer + 1, s_key_coordinate_size); cc_key_pair->key_pair.pub_y = aws_byte_buf_from_array(cc_key_pair->key_pair.pub_x.buffer + s_key_coordinate_size, s_key_coordinate_size); } cc_key_pair->key_pair.priv_d = aws_byte_buf_from_array( cc_key_pair->key_pair.key_buf.buffer + 1 + (s_key_coordinate_size * 2), s_key_coordinate_size); cc_key_pair->key_pair.vtable = &s_key_pair_vtable; cc_key_pair->key_pair.curve_name = curve_name; return cc_key_pair; error: s_destroy_key(&cc_key_pair->key_pair); return NULL; } struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_private_key_impl( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name, const struct aws_byte_cursor *priv_key) { /** * We use SecCreateKeyWithData to create ECC key. Expected format for the key passed to that api is a byte buffer * consisting of "0x04 | x | y | p", where x,y is public pair and p is private key. * * In this case we only have private key and we need to construct SecKey from that. * * We used to just pass 0,0 point for x,y, i.e. "0x04 | 0 | 0 | p". * * This used to work on Macs before 14, but in 14+ SecCreateKeyWithData returns error, * which is reasonable since 0,0 is not a valid public point. * * To get around the issue, we use a fake public key, which is a valid public point, but not matching the private * key as a quick workaround. */ struct aws_byte_cursor fake_pub_x; AWS_ZERO_STRUCT(fake_pub_x); struct aws_byte_cursor fake_pub_y; AWS_ZERO_STRUCT(fake_pub_y); switch (curve_name) { case AWS_CAL_ECDSA_P256: fake_pub_x = aws_byte_cursor_from_array(s_fake_x_ecdsa_p256, AWS_ARRAY_SIZE(s_fake_x_ecdsa_p256)); fake_pub_y = aws_byte_cursor_from_array(s_fake_y_ecdsa_p256, AWS_ARRAY_SIZE(s_fake_y_ecdsa_p256)); break; case AWS_CAL_ECDSA_P384: fake_pub_x = aws_byte_cursor_from_array(s_fake_x_ecdsa_p384, AWS_ARRAY_SIZE(s_fake_x_ecdsa_p384)); fake_pub_y = aws_byte_cursor_from_array(s_fake_y_ecdsa_p384, AWS_ARRAY_SIZE(s_fake_y_ecdsa_p384)); break; default: aws_raise_error(AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM); return NULL; } struct commoncrypto_ecc_key_pair *cc_key_pair = s_alloc_pair_and_init_buffers(allocator, curve_name, fake_pub_x, fake_pub_y, *priv_key); if (!cc_key_pair) { return NULL; } CFMutableDictionaryRef key_attributes = NULL; CFDataRef private_key_data = CFDataCreate( cc_key_pair->cf_allocator, cc_key_pair->key_pair.key_buf.buffer, cc_key_pair->key_pair.key_buf.len); if (!private_key_data) { goto error; } key_attributes = CFDictionaryCreateMutable(cc_key_pair->cf_allocator, 6, NULL, NULL); if (!key_attributes) { goto error; } CFDictionaryAddValue(key_attributes, kSecAttrKeyType, kSecAttrKeyTypeECSECPrimeRandom); CFDictionaryAddValue(key_attributes, kSecAttrKeyClass, kSecAttrKeyClassPrivate); CFIndex key_size_bits = cc_key_pair->key_pair.priv_d.len * 8; CFDictionaryAddValue(key_attributes, kSecAttrKeySizeInBits, &key_size_bits); CFDictionaryAddValue(key_attributes, kSecAttrCanSign, kCFBooleanTrue); CFDictionaryAddValue(key_attributes, kSecAttrCanVerify, kCFBooleanFalse); CFDictionaryAddValue(key_attributes, kSecAttrCanDerive, kCFBooleanTrue); CFErrorRef error = NULL; cc_key_pair->priv_key_ref = SecKeyCreateWithData(private_key_data, key_attributes, &error); if (error) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); CFRelease(error); goto error; } /* Zero out the fake public keys in the key pair */ aws_byte_buf_secure_zero(&cc_key_pair->key_pair.pub_x); aws_byte_buf_secure_zero(&cc_key_pair->key_pair.pub_y); CFRelease(key_attributes); CFRelease(private_key_data); return &cc_key_pair->key_pair; error: if (private_key_data) { CFRelease(private_key_data); } if (key_attributes) { CFRelease(key_attributes); } s_destroy_key(&cc_key_pair->key_pair); return NULL; } struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_public_key_impl( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name, const struct aws_byte_cursor *public_key_x, const struct aws_byte_cursor *public_key_y) { struct aws_byte_cursor empty_cur; AWS_ZERO_STRUCT(empty_cur); struct commoncrypto_ecc_key_pair *cc_key_pair = s_alloc_pair_and_init_buffers(allocator, curve_name, *public_key_x, *public_key_y, empty_cur); if (!cc_key_pair) { return NULL; } CFMutableDictionaryRef key_attributes = NULL; CFDataRef pub_key_data = CFDataCreate( cc_key_pair->cf_allocator, cc_key_pair->key_pair.key_buf.buffer, cc_key_pair->key_pair.key_buf.len); if (!pub_key_data) { goto error; } key_attributes = CFDictionaryCreateMutable(cc_key_pair->cf_allocator, 6, NULL, NULL); if (!key_attributes) { goto error; } CFDictionaryAddValue(key_attributes, kSecAttrKeyType, kSecAttrKeyTypeECSECPrimeRandom); CFDictionaryAddValue(key_attributes, kSecAttrKeyClass, kSecAttrKeyClassPublic); CFIndex key_size_bits = cc_key_pair->key_pair.pub_x.len * 8; CFDictionaryAddValue(key_attributes, kSecAttrKeySizeInBits, &key_size_bits); CFDictionaryAddValue(key_attributes, kSecAttrCanSign, kCFBooleanFalse); CFDictionaryAddValue(key_attributes, kSecAttrCanVerify, kCFBooleanTrue); CFDictionaryAddValue(key_attributes, kSecAttrCanDerive, kCFBooleanFalse); CFErrorRef error = NULL; cc_key_pair->pub_key_ref = SecKeyCreateWithData(pub_key_data, key_attributes, &error); if (error) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); CFRelease(error); goto error; } CFRelease(key_attributes); CFRelease(pub_key_data); return &cc_key_pair->key_pair; error: if (key_attributes) { CFRelease(key_attributes); } if (pub_key_data) { CFRelease(pub_key_data); } s_destroy_key(&cc_key_pair->key_pair); return NULL; } #if defined(AWS_OS_MACOS) struct aws_ecc_key_pair *aws_ecc_key_pair_new_generate_random( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name) { struct commoncrypto_ecc_key_pair *cc_key_pair = aws_mem_calloc(allocator, 1, sizeof(struct commoncrypto_ecc_key_pair)); if (!cc_key_pair) { return NULL; } CFDataRef sec_key_export_data = NULL; CFStringRef key_size_cf_str = NULL; CFMutableDictionaryRef key_attributes = NULL; struct aws_der_decoder *decoder = NULL; aws_atomic_init_int(&cc_key_pair->key_pair.ref_count, 1); cc_key_pair->key_pair.impl = cc_key_pair; cc_key_pair->key_pair.allocator = allocator; cc_key_pair->cf_allocator = aws_wrapped_cf_allocator_new(allocator); if (!cc_key_pair->cf_allocator) { goto error; } size_t key_coordinate_size = aws_ecc_key_coordinate_byte_size_from_curve_name(curve_name); if (!key_coordinate_size) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); goto error; } key_attributes = CFDictionaryCreateMutable(cc_key_pair->cf_allocator, 6, NULL, NULL); if (!key_attributes) { goto error; } CFDictionaryAddValue(key_attributes, kSecAttrKeyType, kSecAttrKeyTypeECSECPrimeRandom); CFDictionaryAddValue(key_attributes, kSecAttrKeyClass, kSecAttrKeyClassPrivate); CFIndex key_size_bits = key_coordinate_size * 8; char key_size_str[32] = {0}; snprintf(key_size_str, sizeof(key_size_str), "%d", (int)key_size_bits); key_size_cf_str = CFStringCreateWithCString(cc_key_pair->cf_allocator, key_size_str, kCFStringEncodingASCII); if (!key_size_cf_str) { goto error; } CFDictionaryAddValue(key_attributes, kSecAttrKeySizeInBits, key_size_cf_str); CFErrorRef error = NULL; cc_key_pair->priv_key_ref = SecKeyCreateRandomKey(key_attributes, &error); if (error) { aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); CFRelease(error); goto error; } cc_key_pair->pub_key_ref = SecKeyCopyPublicKey(cc_key_pair->priv_key_ref); /* OKAY up to here was incredibly reasonable, after this we get attacked by the bad API design * dragons. * * Summary: Apple assumed we'd never need the raw key data. Apple was wrong. So we have to export each component * into the OpenSSL format (just fancy words for DER), but the public key and private key are exported separately * for some reason. Anyways, we export the keys, use our handy dandy DER decoder and grab the raw key data out. */ OSStatus ret_code = SecItemExport(cc_key_pair->priv_key_ref, kSecFormatOpenSSL, 0, NULL, &sec_key_export_data); if (ret_code != errSecSuccess) { aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); goto error; } /* now we need to DER decode data */ struct aws_byte_cursor key_cur = aws_byte_cursor_from_array(CFDataGetBytePtr(sec_key_export_data), CFDataGetLength(sec_key_export_data)); decoder = aws_der_decoder_new(allocator, key_cur); if (!decoder) { goto error; } struct aws_byte_cursor pub_x; AWS_ZERO_STRUCT(pub_x); struct aws_byte_cursor pub_y; AWS_ZERO_STRUCT(pub_y); struct aws_byte_cursor priv_d; AWS_ZERO_STRUCT(priv_d); if (aws_der_decoder_load_ecc_key_pair(decoder, &pub_x, &pub_y, &priv_d, &curve_name)) { goto error; } AWS_ASSERT( priv_d.len == key_coordinate_size && pub_x.len == key_coordinate_size && pub_y.len == key_coordinate_size && "Apple Security Framework had better have exported the full pair."); size_t total_buffer_size = key_coordinate_size * 3 + 1; if (aws_byte_buf_init(&cc_key_pair->key_pair.key_buf, allocator, total_buffer_size)) { goto error; } aws_byte_buf_write_u8(&cc_key_pair->key_pair.key_buf, s_preamble); aws_byte_buf_append(&cc_key_pair->key_pair.key_buf, &pub_x); aws_byte_buf_append(&cc_key_pair->key_pair.key_buf, &pub_y); aws_byte_buf_append(&cc_key_pair->key_pair.key_buf, &priv_d); /* cc_key_pair->key_pair.key_buf is contiguous memory, so just load up the offsets. */ cc_key_pair->key_pair.pub_x = aws_byte_buf_from_array(cc_key_pair->key_pair.key_buf.buffer + 1, key_coordinate_size); cc_key_pair->key_pair.pub_y = aws_byte_buf_from_array(cc_key_pair->key_pair.pub_x.buffer + key_coordinate_size, key_coordinate_size); cc_key_pair->key_pair.priv_d = aws_byte_buf_from_array(cc_key_pair->key_pair.pub_y.buffer + key_coordinate_size, key_coordinate_size); cc_key_pair->key_pair.curve_name = curve_name; cc_key_pair->key_pair.vtable = &s_key_pair_vtable; CFRelease(sec_key_export_data); CFRelease(key_size_cf_str); CFRelease(key_attributes); aws_der_decoder_destroy(decoder); return &cc_key_pair->key_pair; error: if (decoder) { aws_der_decoder_destroy(decoder); } if (key_attributes) { CFRelease(key_attributes); } if (sec_key_export_data) { CFRelease(sec_key_export_data); } if (key_size_cf_str) { CFRelease(key_size_cf_str); } s_destroy_key(&cc_key_pair->key_pair); return NULL; } #endif /* AWS_OS_MACOS */ struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_asn1( struct aws_allocator *allocator, const struct aws_byte_cursor *encoded_keys) { struct aws_ecc_key_pair *key_pair = NULL; struct aws_der_decoder *decoder = aws_der_decoder_new(allocator, *encoded_keys); CFMutableDictionaryRef key_attributes = NULL; CFDataRef key_data = NULL; if (!decoder) { return NULL; } /* we could have private key or a public key, or a full pair. */ struct aws_byte_cursor pub_x; AWS_ZERO_STRUCT(pub_x); struct aws_byte_cursor pub_y; AWS_ZERO_STRUCT(pub_y); struct aws_byte_cursor priv_d; AWS_ZERO_STRUCT(priv_d); enum aws_ecc_curve_name curve_name; if (aws_der_decoder_load_ecc_key_pair(decoder, &pub_x, &pub_y, &priv_d, &curve_name)) { goto error; } if (!pub_x.ptr && !priv_d.ptr) { aws_raise_error(AWS_ERROR_CAL_MISSING_REQUIRED_KEY_COMPONENT); goto error; } struct commoncrypto_ecc_key_pair *cc_key_pair = s_alloc_pair_and_init_buffers(allocator, curve_name, pub_x, pub_y, priv_d); if (!cc_key_pair) { goto error; } key_pair = &cc_key_pair->key_pair; key_data = CFDataCreate( cc_key_pair->cf_allocator, cc_key_pair->key_pair.key_buf.buffer, cc_key_pair->key_pair.key_buf.len); if (!key_data) { goto error; } key_attributes = CFDictionaryCreateMutable(cc_key_pair->cf_allocator, 6, NULL, NULL); if (!key_attributes) { goto error; } CFDictionaryAddValue(key_attributes, kSecAttrKeyType, kSecAttrKeyTypeECSECPrimeRandom); if (priv_d.ptr) { CFDictionaryAddValue(key_attributes, kSecAttrKeyClass, kSecAttrKeyClassPrivate); CFDictionaryAddValue(key_attributes, kSecAttrCanSign, kCFBooleanTrue); CFDictionaryAddValue(key_attributes, kSecAttrCanDerive, kCFBooleanTrue); if (pub_x.ptr) { CFDictionaryAddValue(key_attributes, kSecAttrCanVerify, kCFBooleanTrue); } } else if (pub_x.ptr) { CFDictionaryAddValue(key_attributes, kSecAttrKeyClass, kSecAttrKeyClassPublic); CFDictionaryAddValue(key_attributes, kSecAttrCanSign, kCFBooleanFalse); CFDictionaryAddValue(key_attributes, kSecAttrCanVerify, kCFBooleanTrue); } CFErrorRef error = NULL; cc_key_pair->priv_key_ref = SecKeyCreateWithData(key_data, key_attributes, &error); if (error) { aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); } if (pub_x.ptr) { cc_key_pair->pub_key_ref = SecKeyCopyPublicKey(cc_key_pair->priv_key_ref); } CFRelease(key_attributes); CFRelease(key_data); aws_der_decoder_destroy(decoder); return key_pair; error: if (decoder) { aws_der_decoder_destroy(decoder); } if (key_attributes) { CFRelease(key_attributes); } if (key_data) { CFRelease(key_data); } if (key_pair) { s_destroy_key(key_pair); } return NULL; } aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/source/darwin/securityframework_rsa.c000066400000000000000000000417771456575232400273710ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include struct sec_rsa_key_pair { struct aws_rsa_key_pair base; CFAllocatorRef cf_allocator; SecKeyRef priv_key_ref; SecKeyRef pub_key_ref; }; static void s_rsa_destroy_key(void *key_pair) { if (key_pair == NULL) { return; } struct aws_rsa_key_pair *base = key_pair; struct sec_rsa_key_pair *impl = base->impl; if (impl->pub_key_ref) { CFRelease(impl->pub_key_ref); } if (impl->priv_key_ref) { CFRelease(impl->priv_key_ref); } if (impl->cf_allocator) { aws_wrapped_cf_allocator_destroy(impl->cf_allocator); } aws_rsa_key_pair_base_clean_up(base); aws_mem_release(base->allocator, impl); } /* * Transforms security error code into crt error code and raises it as necessary. * Docs on what security apis can throw are fairly sparse and so far in testing * it only threw generic -50 error. So just log for now and we can add additional * error translation later. */ static int s_reinterpret_sec_error_as_crt(CFErrorRef error, const char *function_name) { if (error == NULL) { return AWS_OP_SUCCESS; } CFIndex error_code = CFErrorGetCode(error); CFStringRef error_message = CFErrorCopyDescription(error); /* This function never returns NULL */ /* * Note: CFStringGetCStringPtr returns NULL quite often. * Refer to writeup at the start of CFString.h as to why. * To reliably get an error message we need to use the following function * that will copy error string into our buffer. */ const char *error_cstr = NULL; char buffer[128]; if (CFStringGetCString(error_message, buffer, 128, kCFStringEncodingUTF8)) { error_cstr = buffer; } int crt_error = AWS_ERROR_CAL_CRYPTO_OPERATION_FAILED; /* * Mac seems throws errSecVerifyFailed for any signature verification * failures (based on testing and not review of their code). * Which makes it impossible to distinguish between signature validation * failure and api call failure. * So let errSecVerifyFailed as signature validation failure, rather than a * more generic Crypto Failure as it seems more intuitive to caller that * signature cannot be verified, rather than something wrong with crypto (and * in most cases crypto is working correctly, but returning non-specific error). */ if (error_code == errSecVerifyFailed) { crt_error = AWS_ERROR_CAL_SIGNATURE_VALIDATION_FAILED; } AWS_LOGF_ERROR( AWS_LS_CAL_RSA, "%s() failed. CFError:%ld(%s) aws_error:%s", function_name, error_code, error_cstr ? error_cstr : "", aws_error_name(crt_error)); CFRelease(error_message); return aws_raise_error(crt_error); } /* * Maps crt encryption algo enum to Security Framework equivalent. * Fails with AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM if mapping cannot be done for * some reason. * Mapped value is passed back through out variable. */ static int s_map_rsa_encryption_algo_to_sec(enum aws_rsa_encryption_algorithm algorithm, SecKeyAlgorithm *out) { switch (algorithm) { case AWS_CAL_RSA_ENCRYPTION_PKCS1_5: *out = kSecKeyAlgorithmRSAEncryptionPKCS1; return AWS_OP_SUCCESS; case AWS_CAL_RSA_ENCRYPTION_OAEP_SHA256: *out = kSecKeyAlgorithmRSAEncryptionOAEPSHA256; return AWS_OP_SUCCESS; case AWS_CAL_RSA_ENCRYPTION_OAEP_SHA512: *out = kSecKeyAlgorithmRSAEncryptionOAEPSHA512; return AWS_OP_SUCCESS; } return aws_raise_error(AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM); } /* * Maps crt encryption algo enum to Security Framework equivalent. * Fails with AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM if mapping cannot be done for * some reason. * Mapped value is passed back through out variable. */ static int s_map_rsa_signing_algo_to_sec(enum aws_rsa_signature_algorithm algorithm, SecKeyAlgorithm *out) { switch (algorithm) { case AWS_CAL_RSA_SIGNATURE_PKCS1_5_SHA256: *out = kSecKeyAlgorithmRSASignatureDigestPKCS1v15SHA256; return AWS_OP_SUCCESS; case AWS_CAL_RSA_SIGNATURE_PSS_SHA256: #if (defined(__MAC_OS_X_VERSION_MAX_ALLOWED) && (__MAC_OS_X_VERSION_MAX_ALLOWED >= 101300 /* macOS 10.13 */)) || \ (defined(__IPHONE_OS_VERSION_MAX_ALLOWED) && (__IPHONE_OS_VERSION_MAX_ALLOWED >= 110000 /* iOS v11 */)) || \ (defined(__TV_OS_VERSION_MAX_ALLOWED) && (__TV_OS_VERSION_MAX_ALLOWED >= 110000 /* tvos v11 */)) || \ (defined(__WATCH_OS_VERSION_MAX_ALLOWED) && (__WATCH_OS_VERSION_MAX_ALLOWED >= 40000 /* watchos v4 */)) if (__builtin_available(macos 10.13, ios 11.0, tvos 11.0, watchos 4.0, *)) { *out = kSecKeyAlgorithmRSASignatureDigestPSSSHA256; return AWS_OP_SUCCESS; } else { return aws_raise_error(AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM); } #else return aws_raise_error(AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM); #endif } return aws_raise_error(AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM); } static int s_rsa_encrypt( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_encryption_algorithm algorithm, struct aws_byte_cursor plaintext, struct aws_byte_buf *out) { struct sec_rsa_key_pair *key_pair_impl = key_pair->impl; if (key_pair_impl->pub_key_ref == NULL) { AWS_LOGF_ERROR(AWS_LS_CAL_RSA, "RSA Key Pair is missing Public Key required for encrypt operation."); return aws_raise_error(AWS_ERROR_CAL_MISSING_REQUIRED_KEY_COMPONENT); } SecKeyAlgorithm alg; if (s_map_rsa_encryption_algo_to_sec(algorithm, &alg)) { return AWS_OP_ERR; } if (!SecKeyIsAlgorithmSupported(key_pair_impl->pub_key_ref, kSecKeyOperationTypeEncrypt, alg)) { AWS_LOGF_ERROR(AWS_LS_CAL_RSA, "Algo is not supported for this operation"); return aws_raise_error(AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM); } CFDataRef plaintext_ref = CFDataCreateWithBytesNoCopy(key_pair_impl->cf_allocator, plaintext.ptr, plaintext.len, kCFAllocatorNull); AWS_FATAL_ASSERT(plaintext_ref); CFErrorRef error = NULL; CFDataRef ciphertext_ref = SecKeyCreateEncryptedData(key_pair_impl->pub_key_ref, alg, plaintext_ref, &error); if (s_reinterpret_sec_error_as_crt(error, "SecKeyCreateEncryptedData")) { CFRelease(error); goto on_error; } struct aws_byte_cursor ciphertext_cur = aws_byte_cursor_from_array(CFDataGetBytePtr(ciphertext_ref), CFDataGetLength(ciphertext_ref)); if (aws_byte_buf_append(out, &ciphertext_cur)) { aws_raise_error(AWS_ERROR_SHORT_BUFFER); goto on_error; } CFRelease(plaintext_ref); CFRelease(ciphertext_ref); return AWS_OP_SUCCESS; on_error: if (plaintext_ref != NULL) { CFRelease(plaintext_ref); } if (ciphertext_ref != NULL) { CFRelease(ciphertext_ref); } return AWS_OP_ERR; } static int s_rsa_decrypt( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_encryption_algorithm algorithm, struct aws_byte_cursor ciphertext, struct aws_byte_buf *out) { struct sec_rsa_key_pair *key_pair_impl = key_pair->impl; if (key_pair_impl->priv_key_ref == NULL) { AWS_LOGF_ERROR(AWS_LS_CAL_RSA, "RSA Key Pair is missing Private Key required for encrypt operation."); return aws_raise_error(AWS_ERROR_CAL_MISSING_REQUIRED_KEY_COMPONENT); } SecKeyAlgorithm alg; if (s_map_rsa_encryption_algo_to_sec(algorithm, &alg)) { return AWS_OP_ERR; } if (!SecKeyIsAlgorithmSupported(key_pair_impl->priv_key_ref, kSecKeyOperationTypeDecrypt, alg)) { AWS_LOGF_ERROR(AWS_LS_CAL_RSA, "Algo is not supported for this operation"); return aws_raise_error(AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM); } CFDataRef ciphertext_ref = CFDataCreateWithBytesNoCopy(key_pair_impl->cf_allocator, ciphertext.ptr, ciphertext.len, kCFAllocatorNull); AWS_FATAL_ASSERT(ciphertext_ref); CFErrorRef error = NULL; CFDataRef plaintext_ref = SecKeyCreateDecryptedData(key_pair_impl->priv_key_ref, alg, ciphertext_ref, &error); if (s_reinterpret_sec_error_as_crt(error, "SecKeyCreateDecryptedData")) { CFRelease(error); goto on_error; } struct aws_byte_cursor plaintext_cur = aws_byte_cursor_from_array(CFDataGetBytePtr(plaintext_ref), CFDataGetLength(plaintext_ref)); if (aws_byte_buf_append(out, &plaintext_cur)) { aws_raise_error(AWS_ERROR_SHORT_BUFFER); goto on_error; } CFRelease(plaintext_ref); CFRelease(ciphertext_ref); return AWS_OP_SUCCESS; on_error: if (plaintext_ref != NULL) { CFRelease(plaintext_ref); } if (ciphertext_ref != NULL) { CFRelease(ciphertext_ref); } return AWS_OP_ERR; } static int s_rsa_sign( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_signature_algorithm algorithm, struct aws_byte_cursor digest, struct aws_byte_buf *out) { struct sec_rsa_key_pair *key_pair_impl = key_pair->impl; if (key_pair_impl->priv_key_ref == NULL) { AWS_LOGF_ERROR(AWS_LS_CAL_RSA, "RSA Key Pair is missing Private Key required for sign operation."); return aws_raise_error(AWS_ERROR_CAL_MISSING_REQUIRED_KEY_COMPONENT); } SecKeyAlgorithm alg; if (s_map_rsa_signing_algo_to_sec(algorithm, &alg)) { return AWS_OP_ERR; } if (!SecKeyIsAlgorithmSupported(key_pair_impl->priv_key_ref, kSecKeyOperationTypeSign, alg)) { AWS_LOGF_ERROR(AWS_LS_CAL_RSA, "Algo is not supported for this operation"); return aws_raise_error(AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM); } CFDataRef digest_ref = CFDataCreateWithBytesNoCopy(key_pair_impl->cf_allocator, digest.ptr, digest.len, kCFAllocatorNull); AWS_FATAL_ASSERT(digest_ref); CFErrorRef error = NULL; CFDataRef signature_ref = SecKeyCreateSignature(key_pair_impl->priv_key_ref, alg, digest_ref, &error); if (s_reinterpret_sec_error_as_crt(error, "SecKeyCreateSignature")) { CFRelease(error); goto on_error; } struct aws_byte_cursor signature_cur = aws_byte_cursor_from_array(CFDataGetBytePtr(signature_ref), CFDataGetLength(signature_ref)); if (aws_byte_buf_append(out, &signature_cur)) { aws_raise_error(AWS_ERROR_SHORT_BUFFER); goto on_error; } CFRelease(digest_ref); CFRelease(signature_ref); return AWS_OP_SUCCESS; on_error: CFRelease(digest_ref); if (signature_ref != NULL) { CFRelease(signature_ref); } return AWS_OP_ERR; } static int s_rsa_verify( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_signature_algorithm algorithm, struct aws_byte_cursor digest, struct aws_byte_cursor signature) { struct sec_rsa_key_pair *key_pair_impl = key_pair->impl; if (key_pair_impl->pub_key_ref == NULL) { AWS_LOGF_ERROR(AWS_LS_CAL_RSA, "RSA Key Pair is missing Public Key required for verify operation."); return aws_raise_error(AWS_ERROR_CAL_MISSING_REQUIRED_KEY_COMPONENT); } SecKeyAlgorithm alg; if (s_map_rsa_signing_algo_to_sec(algorithm, &alg)) { return AWS_OP_ERR; } if (!SecKeyIsAlgorithmSupported(key_pair_impl->pub_key_ref, kSecKeyOperationTypeVerify, alg)) { AWS_LOGF_ERROR(AWS_LS_CAL_RSA, "Algo is not supported for this operation"); return aws_raise_error(AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM); } CFDataRef digest_ref = CFDataCreateWithBytesNoCopy(key_pair_impl->cf_allocator, digest.ptr, digest.len, kCFAllocatorNull); CFDataRef signature_ref = CFDataCreateWithBytesNoCopy(key_pair_impl->cf_allocator, signature.ptr, signature.len, kCFAllocatorNull); AWS_FATAL_ASSERT(digest_ref && signature_ref); CFErrorRef error = NULL; Boolean result = SecKeyVerifySignature(key_pair_impl->pub_key_ref, alg, digest_ref, signature_ref, &error); CFRelease(digest_ref); CFRelease(signature_ref); if (s_reinterpret_sec_error_as_crt(error, "SecKeyVerifySignature")) { CFRelease(error); return AWS_OP_ERR; } return result ? AWS_OP_SUCCESS : aws_raise_error(AWS_ERROR_CAL_SIGNATURE_VALIDATION_FAILED); } static struct aws_rsa_key_vtable s_rsa_key_pair_vtable = { .encrypt = s_rsa_encrypt, .decrypt = s_rsa_decrypt, .sign = s_rsa_sign, .verify = s_rsa_verify, }; struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_private_key_pkcs1_impl( struct aws_allocator *allocator, struct aws_byte_cursor key) { struct sec_rsa_key_pair *key_pair_impl = aws_mem_calloc(allocator, 1, sizeof(struct sec_rsa_key_pair)); CFMutableDictionaryRef key_attributes = NULL; CFDataRef private_key_data = NULL; aws_ref_count_init(&key_pair_impl->base.ref_count, &key_pair_impl->base, s_rsa_destroy_key); key_pair_impl->base.impl = key_pair_impl; key_pair_impl->base.allocator = allocator; key_pair_impl->cf_allocator = aws_wrapped_cf_allocator_new(allocator); aws_byte_buf_init_copy_from_cursor(&key_pair_impl->base.priv, allocator, key); private_key_data = CFDataCreate(key_pair_impl->cf_allocator, key.ptr, key.len); AWS_FATAL_ASSERT(private_key_data); key_attributes = CFDictionaryCreateMutable(key_pair_impl->cf_allocator, 0, NULL, NULL); AWS_FATAL_ASSERT(key_attributes); CFDictionaryAddValue(key_attributes, kSecClass, kSecClassKey); CFDictionaryAddValue(key_attributes, kSecAttrKeyType, kSecAttrKeyTypeRSA); CFDictionaryAddValue(key_attributes, kSecAttrKeyClass, kSecAttrKeyClassPrivate); CFErrorRef error = NULL; key_pair_impl->priv_key_ref = SecKeyCreateWithData(private_key_data, key_attributes, &error); if (s_reinterpret_sec_error_as_crt(error, "SecKeyCreateWithData")) { CFRelease(error); goto on_error; } key_pair_impl->pub_key_ref = SecKeyCopyPublicKey(key_pair_impl->priv_key_ref); AWS_FATAL_ASSERT(key_pair_impl->pub_key_ref); key_pair_impl->base.vtable = &s_rsa_key_pair_vtable; size_t block_size = SecKeyGetBlockSize(key_pair_impl->priv_key_ref); if (block_size < (AWS_CAL_RSA_MIN_SUPPORTED_KEY_SIZE_IN_BITS / 8) || block_size > (AWS_CAL_RSA_MAX_SUPPORTED_KEY_SIZE_IN_BITS / 8)) { AWS_LOGF_ERROR(AWS_LS_CAL_RSA, "Unsupported key size: %zu", block_size); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); goto on_error; } key_pair_impl->base.key_size_in_bits = block_size * 8; CFRelease(key_attributes); CFRelease(private_key_data); return &key_pair_impl->base; on_error: if (private_key_data) { CFRelease(private_key_data); } if (key_attributes) { CFRelease(key_attributes); } s_rsa_destroy_key(&key_pair_impl->base); return NULL; } struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_public_key_pkcs1_impl( struct aws_allocator *allocator, struct aws_byte_cursor key) { struct sec_rsa_key_pair *key_pair_impl = aws_mem_calloc(allocator, 1, sizeof(struct sec_rsa_key_pair)); CFMutableDictionaryRef key_attributes = NULL; CFDataRef public_key_data = NULL; aws_ref_count_init(&key_pair_impl->base.ref_count, &key_pair_impl->base, s_rsa_destroy_key); key_pair_impl->base.impl = key_pair_impl; key_pair_impl->base.allocator = allocator; key_pair_impl->cf_allocator = aws_wrapped_cf_allocator_new(allocator); aws_byte_buf_init_copy_from_cursor(&key_pair_impl->base.pub, allocator, key); public_key_data = CFDataCreate(key_pair_impl->cf_allocator, key.ptr, key.len); AWS_FATAL_ASSERT(public_key_data); key_attributes = CFDictionaryCreateMutable(key_pair_impl->cf_allocator, 0, NULL, NULL); AWS_FATAL_ASSERT(key_attributes); CFDictionaryAddValue(key_attributes, kSecClass, kSecClassKey); CFDictionaryAddValue(key_attributes, kSecAttrKeyType, kSecAttrKeyTypeRSA); CFDictionaryAddValue(key_attributes, kSecAttrKeyClass, kSecAttrKeyClassPublic); CFErrorRef error = NULL; key_pair_impl->pub_key_ref = SecKeyCreateWithData(public_key_data, key_attributes, &error); if (s_reinterpret_sec_error_as_crt(error, "SecKeyCreateWithData")) { CFRelease(error); goto on_error; } key_pair_impl->base.vtable = &s_rsa_key_pair_vtable; size_t block_size = SecKeyGetBlockSize(key_pair_impl->pub_key_ref); if (block_size < (AWS_CAL_RSA_MIN_SUPPORTED_KEY_SIZE_IN_BITS / 8) || block_size > (AWS_CAL_RSA_MAX_SUPPORTED_KEY_SIZE_IN_BITS / 8)) { AWS_LOGF_ERROR(AWS_LS_CAL_RSA, "Unsupported key size: %zu", block_size); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); goto on_error; } key_pair_impl->base.key_size_in_bits = block_size * 8; CFRelease(key_attributes); CFRelease(public_key_data); return &key_pair_impl->base; on_error: if (public_key_data) { CFRelease(public_key_data); } if (key_attributes) { CFRelease(key_attributes); } s_rsa_destroy_key(&key_pair_impl->base); return NULL; } aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/source/der.c000066400000000000000000000424101456575232400222060ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #ifdef _MSC_VER # pragma warning(push) # pragma warning(disable : 4204 4221) /* non-standard aggregate initializer warnings */ #endif struct aws_der_encoder { struct aws_allocator *allocator; struct aws_byte_buf storage; struct aws_byte_buf *buffer; /* buffer being written to, might be storage, might be a sequence/set buffer */ struct aws_array_list stack; }; struct aws_der_decoder { struct aws_allocator *allocator; struct aws_array_list tlvs; /* parsed elements */ int tlv_idx; /* index to elements after parsing */ struct aws_byte_cursor input; /* input buffer */ uint32_t depth; /* recursion depth when expanding containers */ struct der_tlv *container; /* currently expanding container */ }; struct der_tlv { uint8_t tag; uint32_t length; /* length of value in bytes */ uint32_t count; /* SEQUENCE or SET element count */ uint8_t *value; }; static int s_decode_tlv(struct der_tlv *tlv) { if (tlv->tag == AWS_DER_INTEGER) { if (tlv->length == 0) { return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); } uint8_t first_byte = tlv->value[0]; if (first_byte & 0x80) { return aws_raise_error(AWS_ERROR_CAL_DER_UNSUPPORTED_NEGATIVE_INT); } /* if its multibyte int and first byte is 0, strip it since it was added * to indicate to der that it is positive number. * if len is 1 and first byte is 0, then the number is just zero, so * leave it as is. */ if (tlv->length > 1 && first_byte == 0x00) { tlv->length -= 1; tlv->value += 1; } } else if (tlv->tag == AWS_DER_BIT_STRING) { /* skip over the trailing skipped bit count */ tlv->length -= 1; tlv->value += 1; } return AWS_OP_SUCCESS; } static int s_der_read_tlv(struct aws_byte_cursor *cur, struct der_tlv *tlv) { uint8_t tag = 0; uint8_t len_bytes = 0; uint32_t len = 0; if (!aws_byte_cursor_read_u8(cur, &tag)) { return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); } if (!aws_byte_cursor_read_u8(cur, &len_bytes)) { return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); } /* if the sign bit is set, then the first byte is the number of bytes required to store * the length */ if (len_bytes & 0x80) { len_bytes &= 0x7f; switch (len_bytes) { case 1: if (!aws_byte_cursor_read_u8(cur, (uint8_t *)&len)) { return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); } break; case 2: if (!aws_byte_cursor_read_be16(cur, (uint16_t *)&len)) { return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); } break; case 4: if (!aws_byte_cursor_read_be32(cur, &len)) { return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); } break; default: return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); } } else { len = len_bytes; } if (len > cur->len) { return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); } tlv->tag = tag; tlv->length = len; tlv->value = (tag == AWS_DER_NULL) ? NULL : cur->ptr; if (s_decode_tlv(tlv)) { return AWS_OP_ERR; } aws_byte_cursor_advance(cur, len); return AWS_OP_SUCCESS; } static uint32_t s_encoded_len(struct der_tlv *tlv) { if (tlv->tag == AWS_DER_INTEGER) { uint8_t first_byte = tlv->value[0]; /* if the first byte has the high bit set, a 0 will be prepended to denote unsigned */ return tlv->length + ((first_byte & 0x80) != 0); } if (tlv->tag == AWS_DER_BIT_STRING) { return tlv->length + 1; /* needs a byte to denote how many trailing skipped bits */ } return tlv->length; } static int s_der_write_tlv(struct der_tlv *tlv, struct aws_byte_buf *buf) { if (!aws_byte_buf_write_u8(buf, tlv->tag)) { return aws_raise_error(AWS_ERROR_INVALID_BUFFER_SIZE); } uint32_t len = s_encoded_len(tlv); if (len > UINT16_MAX) { /* write the high bit plus 4 byte length */ if (!aws_byte_buf_write_u8(buf, 0x84)) { return aws_raise_error(AWS_ERROR_INVALID_BUFFER_SIZE); } if (!aws_byte_buf_write_be32(buf, len)) { return aws_raise_error(AWS_ERROR_INVALID_BUFFER_SIZE); } } else if (len > UINT8_MAX) { /* write the high bit plus 2 byte length */ if (!aws_byte_buf_write_u8(buf, 0x82)) { return aws_raise_error(AWS_ERROR_INVALID_BUFFER_SIZE); } if (!aws_byte_buf_write_be16(buf, (uint16_t)len)) { return aws_raise_error(AWS_ERROR_INVALID_BUFFER_SIZE); } } else if (len > INT8_MAX) { /* Write the high bit + 1 byte length */ if (!aws_byte_buf_write_u8(buf, 0x81)) { return aws_raise_error(AWS_ERROR_INVALID_BUFFER_SIZE); } if (!aws_byte_buf_write_u8(buf, (uint8_t)len)) { return aws_raise_error(AWS_ERROR_INVALID_BUFFER_SIZE); } } else { if (!aws_byte_buf_write_u8(buf, (uint8_t)len)) { return aws_raise_error(AWS_ERROR_INVALID_BUFFER_SIZE); } } switch (tlv->tag) { case AWS_DER_INTEGER: { /* if the first byte has the sign bit set, insert an extra 0x00 byte to indicate unsigned */ uint8_t first_byte = tlv->value[0]; if (first_byte & 0x80) { if (!aws_byte_buf_write_u8(buf, 0)) { return aws_raise_error(AWS_ERROR_INVALID_BUFFER_SIZE); } } if (!aws_byte_buf_write(buf, tlv->value, tlv->length)) { return aws_raise_error(AWS_ERROR_INVALID_BUFFER_SIZE); } } break; case AWS_DER_BOOLEAN: if (!aws_byte_buf_write_u8(buf, (*tlv->value) ? 0xff : 0x00)) { return aws_raise_error(AWS_ERROR_INVALID_BUFFER_SIZE); } break; case AWS_DER_BIT_STRING: /* Write that there are 0 skipped bits */ if (!aws_byte_buf_write_u8(buf, 0)) { return aws_raise_error(AWS_ERROR_INVALID_BUFFER_SIZE); } /* FALLTHROUGH */ case AWS_DER_BMPString: case AWS_DER_IA5String: case AWS_DER_PrintableString: case AWS_DER_UTF8_STRING: case AWS_DER_OBJECT_IDENTIFIER: case AWS_DER_OCTET_STRING: case AWS_DER_SEQUENCE: case AWS_DER_SET: if (!aws_byte_buf_write(buf, tlv->value, tlv->length)) { return aws_raise_error(AWS_ERROR_INVALID_BUFFER_SIZE); } break; case AWS_DER_NULL: /* No value bytes */ break; default: return aws_raise_error(AWS_ERROR_CAL_MISMATCHED_DER_TYPE); } return AWS_OP_SUCCESS; } struct aws_der_encoder *aws_der_encoder_new(struct aws_allocator *allocator, size_t capacity) { struct aws_der_encoder *encoder = aws_mem_calloc(allocator, 1, sizeof(struct aws_der_encoder)); AWS_FATAL_ASSERT(encoder); encoder->allocator = allocator; if (aws_byte_buf_init(&encoder->storage, encoder->allocator, capacity)) { goto error; } if (aws_array_list_init_dynamic(&encoder->stack, encoder->allocator, 4, sizeof(struct der_tlv))) { goto error; } encoder->buffer = &encoder->storage; return encoder; error: aws_array_list_clean_up(&encoder->stack); aws_byte_buf_clean_up(&encoder->storage); aws_mem_release(allocator, encoder); return NULL; } void aws_der_encoder_destroy(struct aws_der_encoder *encoder) { if (!encoder) { return; } aws_byte_buf_clean_up_secure(&encoder->storage); aws_array_list_clean_up(&encoder->stack); aws_mem_release(encoder->allocator, encoder); } int aws_der_encoder_write_unsigned_integer(struct aws_der_encoder *encoder, struct aws_byte_cursor integer) { AWS_FATAL_ASSERT(integer.len <= UINT32_MAX); struct der_tlv tlv = { .tag = AWS_DER_INTEGER, .length = (uint32_t)integer.len, .value = integer.ptr, }; return s_der_write_tlv(&tlv, encoder->buffer); } int aws_der_encoder_write_boolean(struct aws_der_encoder *encoder, bool boolean) { struct der_tlv tlv = {.tag = AWS_DER_BOOLEAN, .length = 1, .value = (uint8_t *)&boolean}; return s_der_write_tlv(&tlv, encoder->buffer); } int aws_der_encoder_write_null(struct aws_der_encoder *encoder) { struct der_tlv tlv = { .tag = AWS_DER_NULL, .length = 0, .value = NULL, }; return s_der_write_tlv(&tlv, encoder->buffer); } int aws_der_encoder_write_bit_string(struct aws_der_encoder *encoder, struct aws_byte_cursor bit_string) { AWS_FATAL_ASSERT(bit_string.len <= UINT32_MAX); struct der_tlv tlv = { .tag = AWS_DER_BIT_STRING, .length = (uint32_t)bit_string.len, .value = bit_string.ptr, }; return s_der_write_tlv(&tlv, encoder->buffer); } int aws_der_encoder_write_octet_string(struct aws_der_encoder *encoder, struct aws_byte_cursor octet_string) { AWS_FATAL_ASSERT(octet_string.len <= UINT32_MAX); struct der_tlv tlv = { .tag = AWS_DER_OCTET_STRING, .length = (uint32_t)octet_string.len, .value = octet_string.ptr, }; return s_der_write_tlv(&tlv, encoder->buffer); } static int s_der_encoder_begin_container(struct aws_der_encoder *encoder, enum aws_der_type type) { struct aws_byte_buf *seq_buf = aws_mem_acquire(encoder->allocator, sizeof(struct aws_byte_buf)); AWS_FATAL_ASSERT(seq_buf); if (aws_byte_buf_init(seq_buf, encoder->allocator, encoder->storage.capacity)) { return AWS_OP_ERR; } struct der_tlv tlv_seq = { .tag = type, .length = 0, /* not known yet, will update later */ .value = (void *)seq_buf, }; if (aws_array_list_push_back(&encoder->stack, &tlv_seq)) { aws_byte_buf_clean_up(seq_buf); return AWS_OP_ERR; } encoder->buffer = seq_buf; return AWS_OP_SUCCESS; } static int s_der_encoder_end_container(struct aws_der_encoder *encoder) { struct der_tlv tlv; if (aws_array_list_back(&encoder->stack, &tlv)) { return AWS_OP_ERR; } aws_array_list_pop_back(&encoder->stack); /* update the buffer to point at the next container on the stack */ if (encoder->stack.length > 0) { struct der_tlv outer; if (aws_array_list_back(&encoder->stack, &outer)) { return AWS_OP_ERR; } encoder->buffer = (struct aws_byte_buf *)outer.value; } else { encoder->buffer = &encoder->storage; } struct aws_byte_buf *seq_buf = (struct aws_byte_buf *)tlv.value; tlv.length = (uint32_t)seq_buf->len; tlv.value = seq_buf->buffer; int result = s_der_write_tlv(&tlv, encoder->buffer); aws_byte_buf_clean_up_secure(seq_buf); aws_mem_release(encoder->allocator, seq_buf); return result; } int aws_der_encoder_begin_sequence(struct aws_der_encoder *encoder) { return s_der_encoder_begin_container(encoder, AWS_DER_SEQUENCE); } int aws_der_encoder_end_sequence(struct aws_der_encoder *encoder) { return s_der_encoder_end_container(encoder); } int aws_der_encoder_begin_set(struct aws_der_encoder *encoder) { return s_der_encoder_begin_container(encoder, AWS_DER_SET); } int aws_der_encoder_end_set(struct aws_der_encoder *encoder) { return s_der_encoder_end_container(encoder); } int aws_der_encoder_get_contents(struct aws_der_encoder *encoder, struct aws_byte_cursor *contents) { if (encoder->storage.len == 0) { return aws_raise_error(AWS_ERROR_INVALID_STATE); } if (encoder->buffer != &encoder->storage) { /* someone forgot to end a sequence or set */ return aws_raise_error(AWS_ERROR_INVALID_STATE); } *contents = aws_byte_cursor_from_buf(&encoder->storage); return AWS_OP_SUCCESS; } /* * DECODER */ int s_decoder_parse(struct aws_der_decoder *decoder); struct aws_der_decoder *aws_der_decoder_new(struct aws_allocator *allocator, struct aws_byte_cursor input) { struct aws_der_decoder *decoder = aws_mem_calloc(allocator, 1, sizeof(struct aws_der_decoder)); AWS_FATAL_ASSERT(decoder); decoder->allocator = allocator; decoder->input = input; decoder->tlv_idx = -1; decoder->depth = 0; decoder->container = NULL; if (aws_array_list_init_dynamic(&decoder->tlvs, decoder->allocator, 16, sizeof(struct der_tlv))) { goto error; } if (s_decoder_parse(decoder)) { goto error; } return decoder; error: aws_array_list_clean_up(&decoder->tlvs); aws_mem_release(allocator, decoder); return NULL; } void aws_der_decoder_destroy(struct aws_der_decoder *decoder) { if (!decoder) { return; } aws_array_list_clean_up(&decoder->tlvs); aws_mem_release(decoder->allocator, decoder); } int s_parse_cursor(struct aws_der_decoder *decoder, struct aws_byte_cursor cur) { if (++decoder->depth > 16) { /* stream contains too many nested containers, probably malformed/attack */ return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); } while (cur.len) { struct der_tlv tlv = {0}; if (s_der_read_tlv(&cur, &tlv)) { return AWS_OP_ERR; } /* skip trailing newlines in the stream after any TLV */ while (cur.len && *cur.ptr == '\n') { aws_byte_cursor_advance(&cur, 1); } if (aws_array_list_push_back(&decoder->tlvs, &tlv)) { return aws_raise_error(AWS_ERROR_INVALID_STATE); } if (decoder->container) { decoder->container->count++; } /* if the last element was a container, expand it recursively to maintain order */ if (tlv.tag & AWS_DER_FORM_CONSTRUCTED) { struct der_tlv *outer_container = decoder->container; struct der_tlv *container = NULL; aws_array_list_get_at_ptr(&decoder->tlvs, (void **)&container, decoder->tlvs.length - 1); decoder->container = container; if (!container) { return aws_raise_error(AWS_ERROR_INVALID_STATE); } struct aws_byte_cursor container_cur = aws_byte_cursor_from_array(container->value, container->length); if (s_parse_cursor(decoder, container_cur)) { return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); } decoder->container = outer_container; /* restore the container stack */ } } --decoder->depth; return AWS_OP_SUCCESS; } int s_decoder_parse(struct aws_der_decoder *decoder) { return s_parse_cursor(decoder, decoder->input); } bool aws_der_decoder_next(struct aws_der_decoder *decoder) { return (++decoder->tlv_idx < (int)decoder->tlvs.length); } static struct der_tlv s_decoder_tlv(struct aws_der_decoder *decoder) { AWS_FATAL_ASSERT(decoder->tlv_idx < (int)decoder->tlvs.length); struct der_tlv tlv = {0}; aws_array_list_get_at(&decoder->tlvs, &tlv, decoder->tlv_idx); return tlv; } enum aws_der_type aws_der_decoder_tlv_type(struct aws_der_decoder *decoder) { struct der_tlv tlv = s_decoder_tlv(decoder); return tlv.tag; } size_t aws_der_decoder_tlv_length(struct aws_der_decoder *decoder) { struct der_tlv tlv = s_decoder_tlv(decoder); return tlv.length; } size_t aws_der_decoder_tlv_count(struct aws_der_decoder *decoder) { struct der_tlv tlv = s_decoder_tlv(decoder); AWS_FATAL_ASSERT(tlv.tag & AWS_DER_FORM_CONSTRUCTED); return tlv.count; } static void s_tlv_to_blob(struct der_tlv *tlv, struct aws_byte_cursor *blob) { AWS_FATAL_ASSERT(tlv->tag != AWS_DER_NULL); *blob = aws_byte_cursor_from_array(tlv->value, tlv->length); } int aws_der_decoder_tlv_string(struct aws_der_decoder *decoder, struct aws_byte_cursor *string) { struct der_tlv tlv = s_decoder_tlv(decoder); if (tlv.tag != AWS_DER_OCTET_STRING && tlv.tag != AWS_DER_BIT_STRING) { return aws_raise_error(AWS_ERROR_CAL_MISMATCHED_DER_TYPE); } s_tlv_to_blob(&tlv, string); return AWS_OP_SUCCESS; } int aws_der_decoder_tlv_unsigned_integer(struct aws_der_decoder *decoder, struct aws_byte_cursor *integer) { struct der_tlv tlv = s_decoder_tlv(decoder); if (tlv.tag != AWS_DER_INTEGER) { return aws_raise_error(AWS_ERROR_CAL_MISMATCHED_DER_TYPE); } s_tlv_to_blob(&tlv, integer); return AWS_OP_SUCCESS; } int aws_der_decoder_tlv_boolean(struct aws_der_decoder *decoder, bool *boolean) { struct der_tlv tlv = s_decoder_tlv(decoder); if (tlv.tag != AWS_DER_BOOLEAN) { return aws_raise_error(AWS_ERROR_CAL_MISMATCHED_DER_TYPE); } *boolean = *tlv.value != 0; return AWS_OP_SUCCESS; } int aws_der_decoder_tlv_blob(struct aws_der_decoder *decoder, struct aws_byte_cursor *blob) { struct der_tlv tlv = s_decoder_tlv(decoder); s_tlv_to_blob(&tlv, blob); return AWS_OP_SUCCESS; } #ifdef _MSC_VER # pragma warning(pop) #endif aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/source/ecc.c000066400000000000000000000264571456575232400222030ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #define STATIC_INIT_BYTE_CURSOR(a, name) \ static struct aws_byte_cursor s_##name = { \ .ptr = (a), \ .len = sizeof(a), \ }; static uint8_t s_p256_oid[] = { 0x2A, 0x86, 0x48, 0xCE, 0x3D, 0x03, 0x01, 0x07, }; STATIC_INIT_BYTE_CURSOR(s_p256_oid, ecc_p256_oid) static uint8_t s_p384_oid[] = { 0x2B, 0x81, 0x04, 0x00, 0x22, }; STATIC_INIT_BYTE_CURSOR(s_p384_oid, ecc_p384_oid) static struct aws_byte_cursor *s_ecc_curve_oids[] = { [AWS_CAL_ECDSA_P256] = &s_ecc_p256_oid, [AWS_CAL_ECDSA_P384] = &s_ecc_p384_oid, }; int aws_ecc_curve_name_from_oid(struct aws_byte_cursor *oid, enum aws_ecc_curve_name *curve_name) { if (aws_byte_cursor_eq(oid, &s_ecc_p256_oid)) { *curve_name = AWS_CAL_ECDSA_P256; return AWS_OP_SUCCESS; } if (aws_byte_cursor_eq(oid, &s_ecc_p384_oid)) { *curve_name = AWS_CAL_ECDSA_P384; return AWS_OP_SUCCESS; } return aws_raise_error(AWS_ERROR_CAL_UNKNOWN_OBJECT_IDENTIFIER); } int aws_ecc_oid_from_curve_name(enum aws_ecc_curve_name curve_name, struct aws_byte_cursor *oid) { if (curve_name < AWS_CAL_ECDSA_P256 || curve_name > AWS_CAL_ECDSA_P384) { return aws_raise_error(AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM); } *oid = *s_ecc_curve_oids[curve_name]; return AWS_OP_SUCCESS; } typedef struct aws_ecc_key_pair *(aws_ecc_key_pair_new_from_public_key_fn)( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name, const struct aws_byte_cursor *public_key_x, const struct aws_byte_cursor *public_key_y); typedef struct aws_ecc_key_pair *(aws_ecc_key_pair_new_from_private_key_fn)( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name, const struct aws_byte_cursor *priv_key); #ifndef BYO_CRYPTO extern struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_public_key_impl( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name, const struct aws_byte_cursor *public_key_x, const struct aws_byte_cursor *public_key_y); extern struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_private_key_impl( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name, const struct aws_byte_cursor *priv_key); #else /* BYO_CRYPTO */ struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_public_key_impl( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name, const struct aws_byte_cursor *public_key_x, const struct aws_byte_cursor *public_key_y) { (void)allocator; (void)curve_name; (void)public_key_x; (void)public_key_y; abort(); } struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_private_key_impl( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name, const struct aws_byte_cursor *priv_key) { (void)allocator; (void)curve_name; (void)priv_key; abort(); } #endif /* BYO_CRYPTO */ static aws_ecc_key_pair_new_from_public_key_fn *s_ecc_key_pair_new_from_public_key_fn = aws_ecc_key_pair_new_from_public_key_impl; static aws_ecc_key_pair_new_from_private_key_fn *s_ecc_key_pair_new_from_private_key_fn = aws_ecc_key_pair_new_from_private_key_impl; struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_public_key( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name, const struct aws_byte_cursor *public_key_x, const struct aws_byte_cursor *public_key_y) { return s_ecc_key_pair_new_from_public_key_fn(allocator, curve_name, public_key_x, public_key_y); } struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_private_key( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name, const struct aws_byte_cursor *priv_key) { return s_ecc_key_pair_new_from_private_key_fn(allocator, curve_name, priv_key); } static void s_aws_ecc_key_pair_destroy(struct aws_ecc_key_pair *key_pair) { if (key_pair) { AWS_FATAL_ASSERT(key_pair->vtable->destroy && "ECC KEY PAIR destroy function must be included on the vtable"); key_pair->vtable->destroy(key_pair); } } int aws_ecc_key_pair_derive_public_key(struct aws_ecc_key_pair *key_pair) { AWS_FATAL_ASSERT(key_pair->vtable->derive_pub_key && "ECC KEY PAIR derive function must be included on the vtable"); return key_pair->vtable->derive_pub_key(key_pair); } int aws_ecc_key_pair_sign_message( const struct aws_ecc_key_pair *key_pair, const struct aws_byte_cursor *message, struct aws_byte_buf *signature) { AWS_FATAL_ASSERT(key_pair->vtable->sign_message && "ECC KEY PAIR sign message must be included on the vtable"); return key_pair->vtable->sign_message(key_pair, message, signature); } int aws_ecc_key_pair_verify_signature( const struct aws_ecc_key_pair *key_pair, const struct aws_byte_cursor *message, const struct aws_byte_cursor *signature) { AWS_FATAL_ASSERT( key_pair->vtable->verify_signature && "ECC KEY PAIR verify signature must be included on the vtable"); return key_pair->vtable->verify_signature(key_pair, message, signature); } size_t aws_ecc_key_pair_signature_length(const struct aws_ecc_key_pair *key_pair) { AWS_FATAL_ASSERT( key_pair->vtable->signature_length && "ECC KEY PAIR signature length must be included on the vtable"); return key_pair->vtable->signature_length(key_pair); } void aws_ecc_key_pair_get_public_key( const struct aws_ecc_key_pair *key_pair, struct aws_byte_cursor *pub_x, struct aws_byte_cursor *pub_y) { *pub_x = aws_byte_cursor_from_buf(&key_pair->pub_x); *pub_y = aws_byte_cursor_from_buf(&key_pair->pub_y); } void aws_ecc_key_pair_get_private_key(const struct aws_ecc_key_pair *key_pair, struct aws_byte_cursor *private_d) { *private_d = aws_byte_cursor_from_buf(&key_pair->priv_d); } size_t aws_ecc_key_coordinate_byte_size_from_curve_name(enum aws_ecc_curve_name curve_name) { switch (curve_name) { case AWS_CAL_ECDSA_P256: return 32; case AWS_CAL_ECDSA_P384: return 48; default: return 0; } } int aws_der_decoder_load_ecc_key_pair( struct aws_der_decoder *decoder, struct aws_byte_cursor *out_public_x_coor, struct aws_byte_cursor *out_public_y_coor, struct aws_byte_cursor *out_private_d, enum aws_ecc_curve_name *out_curve_name) { AWS_ZERO_STRUCT(*out_public_x_coor); AWS_ZERO_STRUCT(*out_public_y_coor); AWS_ZERO_STRUCT(*out_private_d); /* we could have private key or a public key, or a full pair. */ struct aws_byte_cursor pair_part_1; AWS_ZERO_STRUCT(pair_part_1); struct aws_byte_cursor pair_part_2; AWS_ZERO_STRUCT(pair_part_2); bool curve_name_recognized = false; /* work with this pointer and move it to the next after using it. We need * to know which curve we're dealing with before we can figure out which is which. */ struct aws_byte_cursor *current_part = &pair_part_1; while (aws_der_decoder_next(decoder)) { enum aws_der_type type = aws_der_decoder_tlv_type(decoder); if (type == AWS_DER_OBJECT_IDENTIFIER) { struct aws_byte_cursor oid; AWS_ZERO_STRUCT(oid); aws_der_decoder_tlv_blob(decoder, &oid); /* There can be other OID's so just look for one that is the curve. */ if (!aws_ecc_curve_name_from_oid(&oid, out_curve_name)) { curve_name_recognized = true; } continue; } /* you'd think we'd get some type hints on which key this is, but it's not consistent * as far as I can tell. */ if (type == AWS_DER_BIT_STRING || type == AWS_DER_OCTET_STRING) { aws_der_decoder_tlv_string(decoder, current_part); current_part = &pair_part_2; } } if (!curve_name_recognized) { return aws_raise_error(AWS_ERROR_CAL_UNKNOWN_OBJECT_IDENTIFIER); } size_t key_coordinate_size = aws_ecc_key_coordinate_byte_size_from_curve_name(*out_curve_name); struct aws_byte_cursor *private_key = NULL; struct aws_byte_cursor *public_key = NULL; size_t public_key_blob_size = key_coordinate_size * 2 + 1; if (pair_part_1.ptr && pair_part_1.len) { if (pair_part_1.len == key_coordinate_size) { private_key = &pair_part_1; } else if (pair_part_1.len == public_key_blob_size) { public_key = &pair_part_1; } } if (pair_part_2.ptr && pair_part_2.len) { if (pair_part_2.len == key_coordinate_size) { private_key = &pair_part_2; } else if (pair_part_2.len == public_key_blob_size) { public_key = &pair_part_2; } } if (!private_key && !public_key) { return aws_raise_error(AWS_ERROR_CAL_MISSING_REQUIRED_KEY_COMPONENT); } if (private_key) { *out_private_d = *private_key; } if (public_key) { aws_byte_cursor_advance(public_key, 1); *out_public_x_coor = *public_key; out_public_x_coor->len = key_coordinate_size; out_public_y_coor->ptr = public_key->ptr + key_coordinate_size; out_public_y_coor->len = key_coordinate_size; } return AWS_OP_SUCCESS; } void aws_ecc_key_pair_acquire(struct aws_ecc_key_pair *key_pair) { aws_atomic_fetch_add(&key_pair->ref_count, 1); } void aws_ecc_key_pair_release(struct aws_ecc_key_pair *key_pair) { if (key_pair == NULL) { return; } size_t old_value = aws_atomic_fetch_sub(&key_pair->ref_count, 1); if (old_value == 1) { s_aws_ecc_key_pair_destroy(key_pair); } } struct aws_ecc_key_pair *aws_ecc_key_new_from_hex_coordinates( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name, struct aws_byte_cursor pub_x_hex_cursor, struct aws_byte_cursor pub_y_hex_cursor) { struct aws_byte_buf pub_x_buffer; AWS_ZERO_STRUCT(pub_x_buffer); struct aws_byte_buf pub_y_buffer; AWS_ZERO_STRUCT(pub_y_buffer); struct aws_ecc_key_pair *key = NULL; size_t pub_x_length = 0; size_t pub_y_length = 0; if (aws_hex_compute_decoded_len(pub_x_hex_cursor.len, &pub_x_length) || aws_hex_compute_decoded_len(pub_y_hex_cursor.len, &pub_y_length)) { goto done; } if (aws_byte_buf_init(&pub_x_buffer, allocator, pub_x_length) || aws_byte_buf_init(&pub_y_buffer, allocator, pub_y_length)) { goto done; } if (aws_hex_decode(&pub_x_hex_cursor, &pub_x_buffer) || aws_hex_decode(&pub_y_hex_cursor, &pub_y_buffer)) { goto done; } struct aws_byte_cursor pub_x_cursor = aws_byte_cursor_from_buf(&pub_x_buffer); struct aws_byte_cursor pub_y_cursor = aws_byte_cursor_from_buf(&pub_y_buffer); key = aws_ecc_key_pair_new_from_public_key(allocator, curve_name, &pub_x_cursor, &pub_y_cursor); done: aws_byte_buf_clean_up(&pub_x_buffer); aws_byte_buf_clean_up(&pub_y_buffer); return key; } aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/source/hash.c000066400000000000000000000073021456575232400223600ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #ifndef BYO_CRYPTO extern struct aws_hash *aws_sha256_default_new(struct aws_allocator *allocator); extern struct aws_hash *aws_sha1_default_new(struct aws_allocator *allocator); extern struct aws_hash *aws_md5_default_new(struct aws_allocator *allocator); static aws_hash_new_fn *s_sha256_new_fn = aws_sha256_default_new; static aws_hash_new_fn *s_sha1_new_fn = aws_sha1_default_new; static aws_hash_new_fn *s_md5_new_fn = aws_md5_default_new; #else static struct aws_hash *aws_hash_new_abort(struct aws_allocator *allocator) { (void)allocator; abort(); } static aws_hash_new_fn *s_sha256_new_fn = aws_hash_new_abort; static aws_hash_new_fn *s_sha1_new_fn = aws_hash_new_abort; static aws_hash_new_fn *s_md5_new_fn = aws_hash_new_abort; #endif struct aws_hash *aws_sha1_new(struct aws_allocator *allocator) { return s_sha1_new_fn(allocator); } struct aws_hash *aws_sha256_new(struct aws_allocator *allocator) { return s_sha256_new_fn(allocator); } struct aws_hash *aws_md5_new(struct aws_allocator *allocator) { return s_md5_new_fn(allocator); } void aws_set_md5_new_fn(aws_hash_new_fn *fn) { s_md5_new_fn = fn; } void aws_set_sha256_new_fn(aws_hash_new_fn *fn) { s_sha256_new_fn = fn; } void aws_set_sha1_new_fn(aws_hash_new_fn *fn) { s_sha1_new_fn = fn; } void aws_hash_destroy(struct aws_hash *hash) { hash->vtable->destroy(hash); } int aws_hash_update(struct aws_hash *hash, const struct aws_byte_cursor *to_hash) { return hash->vtable->update(hash, to_hash); } int aws_hash_finalize(struct aws_hash *hash, struct aws_byte_buf *output, size_t truncate_to) { if (truncate_to && truncate_to < hash->digest_size) { size_t available_buffer = output->capacity - output->len; if (available_buffer < truncate_to) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } uint8_t tmp_output[128] = {0}; AWS_ASSERT(sizeof(tmp_output) >= hash->digest_size); struct aws_byte_buf tmp_out_buf = aws_byte_buf_from_array(tmp_output, sizeof(tmp_output)); tmp_out_buf.len = 0; if (hash->vtable->finalize(hash, &tmp_out_buf)) { return AWS_OP_ERR; } memcpy(output->buffer + output->len, tmp_output, truncate_to); output->len += truncate_to; return AWS_OP_SUCCESS; } return hash->vtable->finalize(hash, output); } static inline int compute_hash( struct aws_hash *hash, const struct aws_byte_cursor *input, struct aws_byte_buf *output, size_t truncate_to) { if (!hash) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } if (aws_hash_update(hash, input)) { aws_hash_destroy(hash); return AWS_OP_ERR; } if (aws_hash_finalize(hash, output, truncate_to)) { aws_hash_destroy(hash); return AWS_OP_ERR; } aws_hash_destroy(hash); return AWS_OP_SUCCESS; } int aws_md5_compute( struct aws_allocator *allocator, const struct aws_byte_cursor *input, struct aws_byte_buf *output, size_t truncate_to) { return compute_hash(aws_md5_new(allocator), input, output, truncate_to); } int aws_sha256_compute( struct aws_allocator *allocator, const struct aws_byte_cursor *input, struct aws_byte_buf *output, size_t truncate_to) { return compute_hash(aws_sha256_new(allocator), input, output, truncate_to); } int aws_sha1_compute( struct aws_allocator *allocator, const struct aws_byte_cursor *input, struct aws_byte_buf *output, size_t truncate_to) { return compute_hash(aws_sha1_new(allocator), input, output, truncate_to); } aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/source/hmac.c000066400000000000000000000050221456575232400223420ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #ifndef BYO_CRYPTO extern struct aws_hmac *aws_sha256_hmac_default_new( struct aws_allocator *allocator, const struct aws_byte_cursor *secret); static aws_hmac_new_fn *s_sha256_hmac_new_fn = aws_sha256_hmac_default_new; #else static struct aws_hmac *aws_hmac_new_abort(struct aws_allocator *allocator, const struct aws_byte_cursor *secret) { (void)allocator; (void)secret; abort(); } static aws_hmac_new_fn *s_sha256_hmac_new_fn = aws_hmac_new_abort; #endif struct aws_hmac *aws_sha256_hmac_new(struct aws_allocator *allocator, const struct aws_byte_cursor *secret) { return s_sha256_hmac_new_fn(allocator, secret); } void aws_set_sha256_hmac_new_fn(aws_hmac_new_fn *fn) { s_sha256_hmac_new_fn = fn; } void aws_hmac_destroy(struct aws_hmac *hmac) { hmac->vtable->destroy(hmac); } int aws_hmac_update(struct aws_hmac *hmac, const struct aws_byte_cursor *to_hmac) { return hmac->vtable->update(hmac, to_hmac); } int aws_hmac_finalize(struct aws_hmac *hmac, struct aws_byte_buf *output, size_t truncate_to) { if (truncate_to && truncate_to < hmac->digest_size) { size_t available_buffer = output->capacity - output->len; if (available_buffer < truncate_to) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } uint8_t tmp_output[128] = {0}; AWS_ASSERT(sizeof(tmp_output) >= hmac->digest_size); struct aws_byte_buf tmp_out_buf = aws_byte_buf_from_array(tmp_output, sizeof(tmp_output)); tmp_out_buf.len = 0; if (hmac->vtable->finalize(hmac, &tmp_out_buf)) { return AWS_OP_ERR; } memcpy(output->buffer + output->len, tmp_output, truncate_to); output->len += truncate_to; return AWS_OP_SUCCESS; } return hmac->vtable->finalize(hmac, output); } int aws_sha256_hmac_compute( struct aws_allocator *allocator, const struct aws_byte_cursor *secret, const struct aws_byte_cursor *to_hmac, struct aws_byte_buf *output, size_t truncate_to) { struct aws_hmac *hmac = aws_sha256_hmac_new(allocator, secret); if (!hmac) { return AWS_OP_ERR; } if (aws_hmac_update(hmac, to_hmac)) { aws_hmac_destroy(hmac); return AWS_OP_ERR; } if (aws_hmac_finalize(hmac, output, truncate_to)) { aws_hmac_destroy(hmac); return AWS_OP_ERR; } aws_hmac_destroy(hmac); return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/source/rsa.c000066400000000000000000000241511456575232400222230ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include typedef struct aws_rsa_key_pair *( aws_rsa_key_pair_new_from_public_pkcs1_fn)(struct aws_allocator *allocator, struct aws_byte_cursor public_key); typedef struct aws_rsa_key_pair *( aws_rsa_key_pair_new_from_private_pkcs1_fn)(struct aws_allocator *allocator, struct aws_byte_cursor private_key); #ifndef BYO_CRYPTO extern struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_public_key_pkcs1_impl( struct aws_allocator *allocator, struct aws_byte_cursor public_key); extern struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_private_key_pkcs1_impl( struct aws_allocator *allocator, struct aws_byte_cursor private_key); #else /* BYO_CRYPTO */ struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_public_key_pkcs1_impl( struct aws_allocator *allocator, struct aws_byte_cursor public_key) { (void)allocator; (void)public_key; abort(); } struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_private_key_pkcs1_impl( struct aws_allocator *allocator, struct aws_byte_cursor private_key) { (void)allocator; (void)private_key; abort(); } #endif /* BYO_CRYPTO */ static aws_rsa_key_pair_new_from_public_pkcs1_fn *s_rsa_key_pair_new_from_public_key_pkcs1_fn = aws_rsa_key_pair_new_from_public_key_pkcs1_impl; static aws_rsa_key_pair_new_from_private_pkcs1_fn *s_rsa_key_pair_new_from_private_key_pkcs1_fn = aws_rsa_key_pair_new_from_private_key_pkcs1_impl; struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_public_key_pkcs1( struct aws_allocator *allocator, struct aws_byte_cursor public_key) { return s_rsa_key_pair_new_from_public_key_pkcs1_fn(allocator, public_key); } struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_private_key_pkcs1( struct aws_allocator *allocator, struct aws_byte_cursor private_key) { return s_rsa_key_pair_new_from_private_key_pkcs1_fn(allocator, private_key); } void aws_rsa_key_pair_base_clean_up(struct aws_rsa_key_pair *key_pair) { aws_byte_buf_clean_up_secure(&key_pair->priv); aws_byte_buf_clean_up_secure(&key_pair->pub); } struct aws_rsa_key_pair *aws_rsa_key_pair_acquire(struct aws_rsa_key_pair *key_pair) { return aws_ref_count_acquire(&key_pair->ref_count); } struct aws_rsa_key_pair *aws_rsa_key_pair_release(struct aws_rsa_key_pair *key_pair) { if (key_pair != NULL) { aws_ref_count_release(&key_pair->ref_count); } return NULL; } size_t aws_rsa_key_pair_max_encrypt_plaintext_size( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_encryption_algorithm algorithm) { /* * Per rfc8017, max size of plaintext for encrypt operation is as follows: * PKCS1-v1_5: (key size in bytes) - 11 * OAEP: (key size in bytes) - 2 * (hash bytes) - 2 */ size_t key_size_in_bytes = key_pair->key_size_in_bits / 8; switch (algorithm) { case AWS_CAL_RSA_ENCRYPTION_PKCS1_5: return key_size_in_bytes - 11; case AWS_CAL_RSA_ENCRYPTION_OAEP_SHA256: return key_size_in_bytes - 2 * (256 / 8) - 2; case AWS_CAL_RSA_ENCRYPTION_OAEP_SHA512: return key_size_in_bytes - 2 * (512 / 8) - 2; default: AWS_FATAL_ASSERT("Unsupported RSA Encryption Algorithm"); } return 0; } int aws_rsa_key_pair_encrypt( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_encryption_algorithm algorithm, struct aws_byte_cursor plaintext, struct aws_byte_buf *out) { AWS_PRECONDITION(key_pair); AWS_PRECONDITION(out); if (AWS_UNLIKELY(aws_rsa_key_pair_max_encrypt_plaintext_size(key_pair, algorithm) < plaintext.len)) { AWS_LOGF_ERROR(AWS_LS_CAL_RSA, "Unexpected buffer size. For RSA, ciphertext must not exceed block size"); return aws_raise_error(AWS_ERROR_CAL_BUFFER_TOO_LARGE_FOR_ALGORITHM); } return key_pair->vtable->encrypt(key_pair, algorithm, plaintext, out); } AWS_CAL_API int aws_rsa_key_pair_decrypt( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_encryption_algorithm algorithm, struct aws_byte_cursor ciphertext, struct aws_byte_buf *out) { AWS_PRECONDITION(key_pair); AWS_PRECONDITION(out); if (AWS_UNLIKELY(ciphertext.len != (key_pair->key_size_in_bits / 8))) { AWS_LOGF_ERROR(AWS_LS_CAL_RSA, "Unexpected buffer size. For RSA, ciphertext is expected to match block size."); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } return key_pair->vtable->decrypt(key_pair, algorithm, ciphertext, out); } int aws_rsa_key_pair_sign_message( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_signature_algorithm algorithm, struct aws_byte_cursor digest, struct aws_byte_buf *out) { AWS_PRECONDITION(key_pair); AWS_PRECONDITION(out); AWS_FATAL_ASSERT( algorithm == AWS_CAL_RSA_SIGNATURE_PKCS1_5_SHA256 || algorithm == AWS_CAL_RSA_SIGNATURE_PSS_SHA256); if (digest.len > AWS_SHA256_LEN) { AWS_LOGF_ERROR( AWS_LS_CAL_RSA, "Unexpected digest size. For RSA, digest length is bound by max size of hash function"); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } return key_pair->vtable->sign(key_pair, algorithm, digest, out); } int aws_rsa_key_pair_verify_signature( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_signature_algorithm algorithm, struct aws_byte_cursor digest, struct aws_byte_cursor signature) { AWS_PRECONDITION(key_pair); return key_pair->vtable->verify(key_pair, algorithm, digest, signature); } size_t aws_rsa_key_pair_block_length(const struct aws_rsa_key_pair *key_pair) { AWS_PRECONDITION(key_pair); return key_pair->key_size_in_bits / 8; } size_t aws_rsa_key_pair_signature_length(const struct aws_rsa_key_pair *key_pair) { AWS_PRECONDITION(key_pair); return key_pair->key_size_in_bits / 8; } int aws_rsa_key_pair_get_public_key( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_key_export_format format, struct aws_byte_buf *out) { (void)format; /* ignore format for now, since only pkcs1 is supported. */ AWS_PRECONDITION(key_pair); AWS_PRECONDITION(out); if (key_pair->pub.len == 0) { return aws_raise_error(AWS_ERROR_PLATFORM_NOT_SUPPORTED); } aws_byte_buf_init_copy(out, key_pair->allocator, &key_pair->pub); return AWS_OP_SUCCESS; } int aws_rsa_key_pair_get_private_key( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_key_export_format format, struct aws_byte_buf *out) { (void)format; /* ignore format for now, since only pkcs1 is supported. */ AWS_PRECONDITION(key_pair); AWS_PRECONDITION(out); if (key_pair->priv.len == 0) { return aws_raise_error(AWS_ERROR_PLATFORM_NOT_SUPPORTED); } aws_byte_buf_init_copy(out, key_pair->allocator, &key_pair->priv); return AWS_OP_SUCCESS; } int aws_der_decoder_load_private_rsa_pkcs1(struct aws_der_decoder *decoder, struct aws_rsa_private_key_pkcs1 *out) { if (!aws_der_decoder_next(decoder) || aws_der_decoder_tlv_type(decoder) != AWS_DER_SEQUENCE) { return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); } struct aws_byte_cursor version_cur; if (!aws_der_decoder_next(decoder) || aws_der_decoder_tlv_unsigned_integer(decoder, &version_cur)) { return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); } if (version_cur.len != 1 || version_cur.ptr[0] != 0) { return aws_raise_error(AWS_ERROR_CAL_UNSUPPORTED_KEY_FORMAT); } out->version = 0; if (!aws_der_decoder_next(decoder) || aws_der_decoder_tlv_unsigned_integer(decoder, &(out->modulus))) { return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); } if (!aws_der_decoder_next(decoder) || aws_der_decoder_tlv_unsigned_integer(decoder, &out->publicExponent)) { return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); } if (!aws_der_decoder_next(decoder) || aws_der_decoder_tlv_unsigned_integer(decoder, &out->privateExponent)) { return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); } if (!aws_der_decoder_next(decoder) || aws_der_decoder_tlv_unsigned_integer(decoder, &out->prime1)) { return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); } if (!aws_der_decoder_next(decoder) || aws_der_decoder_tlv_unsigned_integer(decoder, &out->prime2)) { return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); } if (!aws_der_decoder_next(decoder) || aws_der_decoder_tlv_unsigned_integer(decoder, &out->exponent1)) { return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); } if (!aws_der_decoder_next(decoder) || aws_der_decoder_tlv_unsigned_integer(decoder, &out->exponent2)) { return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); } if (!aws_der_decoder_next(decoder) || aws_der_decoder_tlv_unsigned_integer(decoder, &out->coefficient)) { return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); } return AWS_OP_SUCCESS; } int aws_der_decoder_load_public_rsa_pkcs1(struct aws_der_decoder *decoder, struct aws_rsa_public_key_pkcs1 *out) { if (!aws_der_decoder_next(decoder) || aws_der_decoder_tlv_type(decoder) != AWS_DER_SEQUENCE) { return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); } if (!aws_der_decoder_next(decoder) || aws_der_decoder_tlv_unsigned_integer(decoder, &(out->modulus))) { return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); } if (!aws_der_decoder_next(decoder) || aws_der_decoder_tlv_unsigned_integer(decoder, &out->publicExponent)) { return aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); } return AWS_OP_SUCCESS; } int is_valid_rsa_key_size(size_t key_size_in_bits) { if (key_size_in_bits < AWS_CAL_RSA_MIN_SUPPORTED_KEY_SIZE_IN_BITS || key_size_in_bits > AWS_CAL_RSA_MAX_SUPPORTED_KEY_SIZE_IN_BITS || key_size_in_bits % 8 != 0) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/source/symmetric_cipher.c000066400000000000000000000200671456575232400250060ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #ifndef BYO_CRYPTO extern struct aws_symmetric_cipher *aws_aes_cbc_256_new_impl( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv); extern struct aws_symmetric_cipher *aws_aes_ctr_256_new_impl( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv); extern struct aws_symmetric_cipher *aws_aes_gcm_256_new_impl( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv, const struct aws_byte_cursor *aad, const struct aws_byte_cursor *decryption_tag); extern struct aws_symmetric_cipher *aws_aes_keywrap_256_new_impl( struct aws_allocator *allocator, const struct aws_byte_cursor *key); #else /* BYO_CRYPTO */ struct aws_symmetric_cipher *aws_aes_cbc_256_new_impl( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv) { (void)allocator; (void)key; (void)iv; abort(); } struct aws_symmetric_cipher *aws_aes_ctr_256_new_impl( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv) { (void)allocator; (void)key; (void)iv; abort(); } struct aws_symmetric_cipher *aws_aes_gcm_256_new_impl( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv, const struct aws_byte_cursor *aad, const struct aws_byte_cursor *decryption_tag) { (void)allocator; (void)key; (void)iv; (void)aad; (void)decryption_tag; abort(); } struct aws_symmetric_cipher *aws_aes_keywrap_256_new_impl( struct aws_allocator *allocator, const struct aws_byte_cursor *key) { (void)allocator; (void)key; abort(); } #endif /* BYO_CRYPTO */ static aws_aes_cbc_256_new_fn *s_aes_cbc_new_fn = aws_aes_cbc_256_new_impl; static aws_aes_ctr_256_new_fn *s_aes_ctr_new_fn = aws_aes_ctr_256_new_impl; static aws_aes_gcm_256_new_fn *s_aes_gcm_new_fn = aws_aes_gcm_256_new_impl; static aws_aes_keywrap_256_new_fn *s_aes_keywrap_new_fn = aws_aes_keywrap_256_new_impl; static int s_check_input_size_limits(const struct aws_symmetric_cipher *cipher, const struct aws_byte_cursor *input) { /* libcrypto uses int, not size_t, so this is the limit. * For simplicity, enforce the same rules on all platforms. */ return input->len <= INT_MAX - cipher->block_size ? AWS_OP_SUCCESS : aws_raise_error(AWS_ERROR_CAL_BUFFER_TOO_LARGE_FOR_ALGORITHM); } static int s_validate_key_materials( const struct aws_byte_cursor *key, size_t expected_key_size, const struct aws_byte_cursor *iv, size_t expected_iv_size) { if (key && key->len != expected_key_size) { return aws_raise_error(AWS_ERROR_CAL_INVALID_KEY_LENGTH_FOR_ALGORITHM); } if (iv && iv->len != expected_iv_size) { return aws_raise_error(AWS_ERROR_CAL_INVALID_CIPHER_MATERIAL_SIZE_FOR_ALGORITHM); } return AWS_OP_SUCCESS; } struct aws_symmetric_cipher *aws_aes_cbc_256_new( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv) { if (s_validate_key_materials(key, AWS_AES_256_KEY_BYTE_LEN, iv, AWS_AES_256_CIPHER_BLOCK_SIZE) != AWS_OP_SUCCESS) { return NULL; } return s_aes_cbc_new_fn(allocator, key, iv); } struct aws_symmetric_cipher *aws_aes_ctr_256_new( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv) { if (s_validate_key_materials(key, AWS_AES_256_KEY_BYTE_LEN, iv, AWS_AES_256_CIPHER_BLOCK_SIZE) != AWS_OP_SUCCESS) { return NULL; } return s_aes_ctr_new_fn(allocator, key, iv); } struct aws_symmetric_cipher *aws_aes_gcm_256_new( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv, const struct aws_byte_cursor *aad, const struct aws_byte_cursor *decryption_tag) { if (s_validate_key_materials(key, AWS_AES_256_KEY_BYTE_LEN, iv, AWS_AES_256_CIPHER_BLOCK_SIZE - sizeof(uint32_t)) != AWS_OP_SUCCESS) { return NULL; } return s_aes_gcm_new_fn(allocator, key, iv, aad, decryption_tag); } struct aws_symmetric_cipher *aws_aes_keywrap_256_new( struct aws_allocator *allocator, const struct aws_byte_cursor *key) { if (s_validate_key_materials(key, AWS_AES_256_KEY_BYTE_LEN, NULL, 0) != AWS_OP_SUCCESS) { return NULL; } return s_aes_keywrap_new_fn(allocator, key); } void aws_symmetric_cipher_destroy(struct aws_symmetric_cipher *cipher) { if (cipher) { cipher->vtable->destroy(cipher); } } int aws_symmetric_cipher_encrypt( struct aws_symmetric_cipher *cipher, struct aws_byte_cursor to_encrypt, struct aws_byte_buf *out) { if (AWS_UNLIKELY(s_check_input_size_limits(cipher, &to_encrypt) != AWS_OP_SUCCESS)) { return AWS_OP_ERR; } if (cipher->good) { return cipher->vtable->encrypt(cipher, to_encrypt, out); } return aws_raise_error(AWS_ERROR_INVALID_STATE); } int aws_symmetric_cipher_decrypt( struct aws_symmetric_cipher *cipher, struct aws_byte_cursor to_decrypt, struct aws_byte_buf *out) { if (AWS_UNLIKELY(s_check_input_size_limits(cipher, &to_decrypt) != AWS_OP_SUCCESS)) { return AWS_OP_ERR; } if (cipher->good) { return cipher->vtable->decrypt(cipher, to_decrypt, out); } return aws_raise_error(AWS_ERROR_INVALID_STATE); } int aws_symmetric_cipher_finalize_encryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) { if (cipher->good) { int ret_val = cipher->vtable->finalize_encryption(cipher, out); cipher->good = false; return ret_val; } return aws_raise_error(AWS_ERROR_INVALID_STATE); } int aws_symmetric_cipher_finalize_decryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) { if (cipher->good) { int ret_val = cipher->vtable->finalize_decryption(cipher, out); cipher->good = false; return ret_val; } return aws_raise_error(AWS_ERROR_INVALID_STATE); } int aws_symmetric_cipher_reset(struct aws_symmetric_cipher *cipher) { int ret_val = cipher->vtable->reset(cipher); if (ret_val == AWS_OP_SUCCESS) { cipher->good = true; } return ret_val; } struct aws_byte_cursor aws_symmetric_cipher_get_tag(const struct aws_symmetric_cipher *cipher) { return aws_byte_cursor_from_buf(&cipher->tag); } struct aws_byte_cursor aws_symmetric_cipher_get_initialization_vector(const struct aws_symmetric_cipher *cipher) { return aws_byte_cursor_from_buf(&cipher->iv); } struct aws_byte_cursor aws_symmetric_cipher_get_key(const struct aws_symmetric_cipher *cipher) { return aws_byte_cursor_from_buf(&cipher->key); } bool aws_symmetric_cipher_is_good(const struct aws_symmetric_cipher *cipher) { return cipher->good; } void aws_symmetric_cipher_generate_initialization_vector( size_t len_bytes, bool is_counter_mode, struct aws_byte_buf *out) { size_t counter_len = is_counter_mode ? sizeof(uint32_t) : 0; AWS_ASSERT(len_bytes > counter_len); size_t rand_len = len_bytes - counter_len; AWS_FATAL_ASSERT(aws_device_random_buffer_append(out, rand_len) == AWS_OP_SUCCESS); if (is_counter_mode) { /* put counter at the end, initialized to 1 */ aws_byte_buf_write_be32(out, 1); } } void aws_symmetric_cipher_generate_key(size_t key_len_bytes, struct aws_byte_buf *out) { AWS_FATAL_ASSERT(aws_device_random_buffer_append(out, key_len_bytes) == AWS_OP_SUCCESS); } int aws_symmetric_cipher_try_ensure_sufficient_buffer_space(struct aws_byte_buf *buf, size_t size) { if (buf->capacity - buf->len < size) { return aws_byte_buf_reserve_relative(buf, size); } return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/source/unix/000077500000000000000000000000001456575232400222525ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/source/unix/openssl_aes.c000066400000000000000000000637741456575232400247520ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #define OPENSSL_SUPPRESS_DEPRECATED #include struct openssl_aes_cipher { struct aws_symmetric_cipher cipher_base; EVP_CIPHER_CTX *encryptor_ctx; EVP_CIPHER_CTX *decryptor_ctx; struct aws_byte_buf working_buffer; }; static int s_encrypt(struct aws_symmetric_cipher *cipher, struct aws_byte_cursor input, struct aws_byte_buf *out) { size_t required_buffer_space = input.len + cipher->block_size; if (aws_symmetric_cipher_try_ensure_sufficient_buffer_space(out, required_buffer_space)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } size_t available_write_space = out->capacity - out->len; struct openssl_aes_cipher *openssl_cipher = cipher->impl; int len_written = (int)(available_write_space); if (!EVP_EncryptUpdate( openssl_cipher->encryptor_ctx, out->buffer + out->len, &len_written, input.ptr, (int)input.len)) { cipher->good = false; return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } out->len += len_written; return AWS_OP_SUCCESS; } static int s_finalize_encryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) { struct openssl_aes_cipher *openssl_cipher = cipher->impl; size_t required_buffer_space = cipher->block_size; if (aws_symmetric_cipher_try_ensure_sufficient_buffer_space(out, required_buffer_space)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } int len_written = (int)(out->capacity - out->len); if (!EVP_EncryptFinal_ex(openssl_cipher->encryptor_ctx, out->buffer + out->len, &len_written)) { cipher->good = false; return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } out->len += len_written; return AWS_OP_SUCCESS; } static int s_decrypt(struct aws_symmetric_cipher *cipher, struct aws_byte_cursor input, struct aws_byte_buf *out) { struct openssl_aes_cipher *openssl_cipher = cipher->impl; size_t required_buffer_space = input.len + cipher->block_size; if (aws_symmetric_cipher_try_ensure_sufficient_buffer_space(out, required_buffer_space)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } size_t available_write_space = out->capacity - out->len; int len_written = (int)available_write_space; if (!EVP_DecryptUpdate( openssl_cipher->decryptor_ctx, out->buffer + out->len, &len_written, input.ptr, (int)input.len)) { cipher->good = false; return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } out->len += len_written; return AWS_OP_SUCCESS; } static int s_finalize_decryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) { struct openssl_aes_cipher *openssl_cipher = cipher->impl; size_t required_buffer_space = cipher->block_size; if (aws_symmetric_cipher_try_ensure_sufficient_buffer_space(out, required_buffer_space)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } int len_written = (int)out->capacity - out->len; if (!EVP_DecryptFinal_ex(openssl_cipher->decryptor_ctx, out->buffer + out->len, &len_written)) { cipher->good = false; return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } out->len += len_written; return AWS_OP_SUCCESS; } static void s_destroy(struct aws_symmetric_cipher *cipher) { struct openssl_aes_cipher *openssl_cipher = cipher->impl; if (openssl_cipher->encryptor_ctx) { EVP_CIPHER_CTX_free(openssl_cipher->encryptor_ctx); } if (openssl_cipher->decryptor_ctx) { EVP_CIPHER_CTX_free(openssl_cipher->decryptor_ctx); } aws_byte_buf_clean_up_secure(&cipher->key); aws_byte_buf_clean_up_secure(&cipher->iv); if (cipher->tag.buffer) { aws_byte_buf_clean_up_secure(&cipher->tag); } if (cipher->aad.buffer) { aws_byte_buf_clean_up_secure(&cipher->aad); } aws_byte_buf_clean_up_secure(&openssl_cipher->working_buffer); aws_mem_release(cipher->allocator, openssl_cipher); } static int s_clear_reusable_state(struct aws_symmetric_cipher *cipher) { struct openssl_aes_cipher *openssl_cipher = cipher->impl; EVP_CIPHER_CTX_cleanup(openssl_cipher->encryptor_ctx); EVP_CIPHER_CTX_cleanup(openssl_cipher->decryptor_ctx); aws_byte_buf_secure_zero(&openssl_cipher->working_buffer); cipher->good = true; return AWS_OP_SUCCESS; } static int s_init_cbc_cipher_materials(struct aws_symmetric_cipher *cipher) { struct openssl_aes_cipher *openssl_cipher = cipher->impl; if (!EVP_EncryptInit_ex( openssl_cipher->encryptor_ctx, EVP_aes_256_cbc(), NULL, openssl_cipher->cipher_base.key.buffer, openssl_cipher->cipher_base.iv.buffer) || !EVP_DecryptInit_ex( openssl_cipher->decryptor_ctx, EVP_aes_256_cbc(), NULL, openssl_cipher->cipher_base.key.buffer, openssl_cipher->cipher_base.iv.buffer)) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } return AWS_OP_SUCCESS; } static int s_reset_cbc_cipher_materials(struct aws_symmetric_cipher *cipher) { int ret_val = s_clear_reusable_state(cipher); if (ret_val == AWS_OP_SUCCESS) { return s_init_cbc_cipher_materials(cipher); } return ret_val; } static struct aws_symmetric_cipher_vtable s_cbc_vtable = { .alg_name = "AES-CBC 256", .provider = "OpenSSL Compatible LibCrypto", .destroy = s_destroy, .reset = s_reset_cbc_cipher_materials, .decrypt = s_decrypt, .encrypt = s_encrypt, .finalize_decryption = s_finalize_decryption, .finalize_encryption = s_finalize_encryption, }; struct aws_symmetric_cipher *aws_aes_cbc_256_new_impl( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv) { struct openssl_aes_cipher *cipher = aws_mem_calloc(allocator, 1, sizeof(struct openssl_aes_cipher)); cipher->cipher_base.allocator = allocator; cipher->cipher_base.block_size = AWS_AES_256_CIPHER_BLOCK_SIZE; cipher->cipher_base.key_length_bits = AWS_AES_256_KEY_BIT_LEN; cipher->cipher_base.vtable = &s_cbc_vtable; cipher->cipher_base.impl = cipher; if (key) { aws_byte_buf_init_copy_from_cursor(&cipher->cipher_base.key, allocator, *key); } else { aws_byte_buf_init(&cipher->cipher_base.key, allocator, AWS_AES_256_KEY_BYTE_LEN); aws_symmetric_cipher_generate_key(AWS_AES_256_KEY_BYTE_LEN, &cipher->cipher_base.key); } if (iv) { aws_byte_buf_init_copy_from_cursor(&cipher->cipher_base.iv, allocator, *iv); } else { aws_byte_buf_init(&cipher->cipher_base.iv, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE); aws_symmetric_cipher_generate_initialization_vector( AWS_AES_256_CIPHER_BLOCK_SIZE, false, &cipher->cipher_base.iv); } /* EVP_CIPHER_CTX_init() will be called inside EVP_CIPHER_CTX_new(). */ cipher->encryptor_ctx = EVP_CIPHER_CTX_new(); AWS_FATAL_ASSERT(cipher->encryptor_ctx && "Cipher initialization failed!"); /* EVP_CIPHER_CTX_init() will be called inside EVP_CIPHER_CTX_new(). */ cipher->decryptor_ctx = EVP_CIPHER_CTX_new(); AWS_FATAL_ASSERT(cipher->decryptor_ctx && "Cipher initialization failed!"); if (s_init_cbc_cipher_materials(&cipher->cipher_base) != AWS_OP_SUCCESS) { goto error; } cipher->cipher_base.good = true; return &cipher->cipher_base; error: s_destroy(&cipher->cipher_base); return NULL; } static int s_init_ctr_cipher_materials(struct aws_symmetric_cipher *cipher) { struct openssl_aes_cipher *openssl_cipher = cipher->impl; if (!(EVP_EncryptInit_ex( openssl_cipher->encryptor_ctx, EVP_aes_256_ctr(), NULL, openssl_cipher->cipher_base.key.buffer, openssl_cipher->cipher_base.iv.buffer) && EVP_CIPHER_CTX_set_padding(openssl_cipher->encryptor_ctx, 0)) || !(EVP_DecryptInit_ex( openssl_cipher->decryptor_ctx, EVP_aes_256_ctr(), NULL, openssl_cipher->cipher_base.key.buffer, openssl_cipher->cipher_base.iv.buffer) && EVP_CIPHER_CTX_set_padding(openssl_cipher->decryptor_ctx, 0))) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } return AWS_OP_SUCCESS; } static int s_reset_ctr_cipher_materials(struct aws_symmetric_cipher *cipher) { int ret_val = s_clear_reusable_state(cipher); if (ret_val == AWS_OP_SUCCESS) { return s_init_ctr_cipher_materials(cipher); } return ret_val; } static struct aws_symmetric_cipher_vtable s_ctr_vtable = { .alg_name = "AES-CTR 256", .provider = "OpenSSL Compatible LibCrypto", .destroy = s_destroy, .reset = s_reset_ctr_cipher_materials, .decrypt = s_decrypt, .encrypt = s_encrypt, .finalize_decryption = s_finalize_decryption, .finalize_encryption = s_finalize_encryption, }; struct aws_symmetric_cipher *aws_aes_ctr_256_new_impl( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv) { struct openssl_aes_cipher *cipher = aws_mem_calloc(allocator, 1, sizeof(struct openssl_aes_cipher)); cipher->cipher_base.allocator = allocator; cipher->cipher_base.block_size = AWS_AES_256_CIPHER_BLOCK_SIZE; cipher->cipher_base.key_length_bits = AWS_AES_256_KEY_BIT_LEN; cipher->cipher_base.vtable = &s_ctr_vtable; cipher->cipher_base.impl = cipher; if (key) { aws_byte_buf_init_copy_from_cursor(&cipher->cipher_base.key, allocator, *key); } else { aws_byte_buf_init(&cipher->cipher_base.key, allocator, AWS_AES_256_KEY_BYTE_LEN); aws_symmetric_cipher_generate_key(AWS_AES_256_KEY_BYTE_LEN, &cipher->cipher_base.key); } if (iv) { aws_byte_buf_init_copy_from_cursor(&cipher->cipher_base.iv, allocator, *iv); } else { aws_byte_buf_init(&cipher->cipher_base.iv, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE); aws_symmetric_cipher_generate_initialization_vector( AWS_AES_256_CIPHER_BLOCK_SIZE, true, &cipher->cipher_base.iv); } /* EVP_CIPHER_CTX_init() will be called inside EVP_CIPHER_CTX_new(). */ cipher->encryptor_ctx = EVP_CIPHER_CTX_new(); AWS_FATAL_ASSERT(cipher->encryptor_ctx && "Cipher initialization failed!"); /* EVP_CIPHER_CTX_init() will be called inside EVP_CIPHER_CTX_new(). */ cipher->decryptor_ctx = EVP_CIPHER_CTX_new(); AWS_FATAL_ASSERT(cipher->decryptor_ctx && "Cipher initialization failed!"); if (s_init_ctr_cipher_materials(&cipher->cipher_base) != AWS_OP_SUCCESS) { goto error; } cipher->cipher_base.good = true; return &cipher->cipher_base; error: s_destroy(&cipher->cipher_base); return NULL; } static int s_finalize_gcm_encryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) { struct openssl_aes_cipher *openssl_cipher = cipher->impl; int ret_val = s_finalize_encryption(cipher, out); if (ret_val == AWS_OP_SUCCESS) { if (!cipher->tag.len) { if (!EVP_CIPHER_CTX_ctrl( openssl_cipher->encryptor_ctx, EVP_CTRL_GCM_GET_TAG, (int)cipher->tag.capacity, cipher->tag.buffer)) { cipher->good = false; return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } cipher->tag.len = AWS_AES_256_CIPHER_BLOCK_SIZE; } } return ret_val; } static int s_init_gcm_cipher_materials(struct aws_symmetric_cipher *cipher) { struct openssl_aes_cipher *openssl_cipher = cipher->impl; if (!(EVP_EncryptInit_ex(openssl_cipher->encryptor_ctx, EVP_aes_256_gcm(), NULL, NULL, NULL) && EVP_EncryptInit_ex( openssl_cipher->encryptor_ctx, NULL, NULL, openssl_cipher->cipher_base.key.buffer, openssl_cipher->cipher_base.iv.buffer) && EVP_CIPHER_CTX_set_padding(openssl_cipher->encryptor_ctx, 0)) || !(EVP_DecryptInit_ex(openssl_cipher->decryptor_ctx, EVP_aes_256_gcm(), NULL, NULL, NULL) && EVP_DecryptInit_ex( openssl_cipher->decryptor_ctx, NULL, NULL, openssl_cipher->cipher_base.key.buffer, openssl_cipher->cipher_base.iv.buffer) && EVP_CIPHER_CTX_set_padding(openssl_cipher->decryptor_ctx, 0))) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } if (openssl_cipher->cipher_base.aad.len) { int outLen = 0; if (!EVP_EncryptUpdate( openssl_cipher->encryptor_ctx, NULL, &outLen, openssl_cipher->cipher_base.aad.buffer, (int)openssl_cipher->cipher_base.aad.len) || !EVP_DecryptUpdate( openssl_cipher->decryptor_ctx, NULL, &outLen, openssl_cipher->cipher_base.aad.buffer, (int)openssl_cipher->cipher_base.aad.len)) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } } if (openssl_cipher->cipher_base.tag.len) { if (!EVP_CIPHER_CTX_ctrl( openssl_cipher->decryptor_ctx, EVP_CTRL_GCM_SET_TAG, (int)openssl_cipher->cipher_base.tag.len, openssl_cipher->cipher_base.tag.buffer)) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } } return AWS_OP_SUCCESS; } static int s_reset_gcm_cipher_materials(struct aws_symmetric_cipher *cipher) { int ret_val = s_clear_reusable_state(cipher); if (ret_val == AWS_OP_SUCCESS) { return s_init_gcm_cipher_materials(cipher); } return ret_val; } static struct aws_symmetric_cipher_vtable s_gcm_vtable = { .alg_name = "AES-GCM 256", .provider = "OpenSSL Compatible LibCrypto", .destroy = s_destroy, .reset = s_reset_gcm_cipher_materials, .decrypt = s_decrypt, .encrypt = s_encrypt, .finalize_decryption = s_finalize_decryption, .finalize_encryption = s_finalize_gcm_encryption, }; struct aws_symmetric_cipher *aws_aes_gcm_256_new_impl( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv, const struct aws_byte_cursor *aad, const struct aws_byte_cursor *decryption_tag) { struct openssl_aes_cipher *cipher = aws_mem_calloc(allocator, 1, sizeof(struct openssl_aes_cipher)); cipher->cipher_base.allocator = allocator; cipher->cipher_base.block_size = AWS_AES_256_CIPHER_BLOCK_SIZE; cipher->cipher_base.key_length_bits = AWS_AES_256_KEY_BIT_LEN; cipher->cipher_base.vtable = &s_gcm_vtable; cipher->cipher_base.impl = cipher; /* Copy key into the cipher context. */ if (key) { aws_byte_buf_init_copy_from_cursor(&cipher->cipher_base.key, allocator, *key); } else { aws_byte_buf_init(&cipher->cipher_base.key, allocator, AWS_AES_256_KEY_BYTE_LEN); aws_symmetric_cipher_generate_key(AWS_AES_256_KEY_BYTE_LEN, &cipher->cipher_base.key); } /* Copy initialization vector into the cipher context. */ if (iv) { aws_byte_buf_init_copy_from_cursor(&cipher->cipher_base.iv, allocator, *iv); } else { aws_byte_buf_init(&cipher->cipher_base.iv, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE - 4); aws_symmetric_cipher_generate_initialization_vector( AWS_AES_256_CIPHER_BLOCK_SIZE - 4, false, &cipher->cipher_base.iv); } /* Initialize the cipher contexts. */ cipher->encryptor_ctx = EVP_CIPHER_CTX_new(); AWS_FATAL_ASSERT(cipher->encryptor_ctx && "Encryptor cipher initialization failed!"); cipher->decryptor_ctx = EVP_CIPHER_CTX_new(); AWS_FATAL_ASSERT(cipher->decryptor_ctx && "Decryptor cipher initialization failed!"); /* Set AAD if provided */ if (aad) { aws_byte_buf_init_copy_from_cursor(&cipher->cipher_base.aad, allocator, *aad); } /* Set tag for the decryptor to use.*/ if (decryption_tag) { aws_byte_buf_init_copy_from_cursor(&cipher->cipher_base.tag, allocator, *decryption_tag); } else { /* we'll need this later when we grab the tag during encryption time. */ aws_byte_buf_init(&cipher->cipher_base.tag, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE); } /* Initialize the cipher contexts with the specified key and IV. */ if (s_init_gcm_cipher_materials(&cipher->cipher_base)) { goto error; } cipher->cipher_base.good = true; return &cipher->cipher_base; error: s_destroy(&cipher->cipher_base); return NULL; } static int s_key_wrap_encrypt_decrypt( struct aws_symmetric_cipher *cipher, struct aws_byte_cursor input, struct aws_byte_buf *out) { (void)out; struct openssl_aes_cipher *openssl_cipher = cipher->impl; return aws_byte_buf_append_dynamic(&openssl_cipher->working_buffer, &input); } static const size_t MIN_CEK_LENGTH_BYTES = 128 / 8; static const unsigned char INTEGRITY_VALUE = 0xA6; #define KEYWRAP_BLOCK_SIZE 8u static int s_key_wrap_finalize_encryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) { struct openssl_aes_cipher *openssl_cipher = cipher->impl; if (openssl_cipher->working_buffer.len < MIN_CEK_LENGTH_BYTES) { cipher->good = false; return aws_raise_error(AWS_ERROR_INVALID_STATE); } /* the following is an in place implementation of RFC 3394 using the alternate in-place implementation. we use one in-place buffer instead of the copy at the end. the one letter variable names are meant to directly reflect the variables in the RFC */ size_t required_buffer_space = openssl_cipher->working_buffer.len + cipher->block_size; size_t starting_len_offset = out->len; if (aws_symmetric_cipher_try_ensure_sufficient_buffer_space(out, required_buffer_space)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } /* put the integrity check register in the first 8 bytes of the final buffer. */ aws_byte_buf_write_u8_n(out, INTEGRITY_VALUE, KEYWRAP_BLOCK_SIZE); uint8_t *a = out->buffer + starting_len_offset; struct aws_byte_cursor working_buf_cur = aws_byte_cursor_from_buf(&openssl_cipher->working_buffer); aws_byte_buf_write_from_whole_cursor(out, working_buf_cur); /* put the register buffer after the integrity check register */ uint8_t *r = out->buffer + starting_len_offset + KEYWRAP_BLOCK_SIZE; int n = (int)(openssl_cipher->working_buffer.len / KEYWRAP_BLOCK_SIZE); uint8_t b_buf[KEYWRAP_BLOCK_SIZE * 2] = {0}; struct aws_byte_buf b = aws_byte_buf_from_empty_array(b_buf, sizeof(b_buf)); int b_out_len = b.capacity; uint8_t temp_buf[KEYWRAP_BLOCK_SIZE * 2] = {0}; struct aws_byte_buf temp_input = aws_byte_buf_from_empty_array(temp_buf, sizeof(temp_buf)); for (int j = 0; j <= 5; ++j) { for (int i = 1; i <= n; ++i) { /* concat A and R[i], A should be most significant and then R[i] should be least significant. */ memcpy(temp_input.buffer, a, KEYWRAP_BLOCK_SIZE); memcpy(temp_input.buffer + KEYWRAP_BLOCK_SIZE, r, KEYWRAP_BLOCK_SIZE); /* encrypt the concatenated A and R[I] and store it in B */ if (!EVP_EncryptUpdate( openssl_cipher->encryptor_ctx, b.buffer, &b_out_len, temp_input.buffer, (int)temp_input.capacity)) { cipher->good = false; return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } unsigned char t = (unsigned char)((n * j) + i); /* put the 64 MSB ^ T into A */ memcpy(a, b.buffer, KEYWRAP_BLOCK_SIZE); a[7] ^= t; /* put the 64 LSB into R[i] */ memcpy(r, b.buffer + KEYWRAP_BLOCK_SIZE, KEYWRAP_BLOCK_SIZE); /* increment i -> R[i] */ r += KEYWRAP_BLOCK_SIZE; } /* reset R */ r = out->buffer + starting_len_offset + KEYWRAP_BLOCK_SIZE; } return AWS_OP_SUCCESS; } static int s_key_wrap_finalize_decryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) { struct openssl_aes_cipher *openssl_cipher = cipher->impl; if (openssl_cipher->working_buffer.len < MIN_CEK_LENGTH_BYTES + KEYWRAP_BLOCK_SIZE) { cipher->good = false; return aws_raise_error(AWS_ERROR_INVALID_STATE); } /* the following is an in place implementation of RFC 3394 using the alternate in-place implementation. we use one in-place buffer instead of the copy at the end. the one letter variable names are meant to directly reflect the variables in the RFC */ size_t required_buffer_space = openssl_cipher->working_buffer.len - KEYWRAP_BLOCK_SIZE; size_t starting_len_offset = out->len; if (aws_symmetric_cipher_try_ensure_sufficient_buffer_space(out, required_buffer_space)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } memcpy( out->buffer + starting_len_offset, openssl_cipher->working_buffer.buffer + KEYWRAP_BLOCK_SIZE, required_buffer_space); /* integrity register should be the first 8 bytes of the final buffer. */ uint8_t *a = openssl_cipher->working_buffer.buffer; /* in-place register is the plaintext. For decryption, start at the last array position (8 bytes before the end); */ uint8_t *r = out->buffer + starting_len_offset + required_buffer_space - KEYWRAP_BLOCK_SIZE; int n = (int)(required_buffer_space / KEYWRAP_BLOCK_SIZE); uint8_t b_buf[KEYWRAP_BLOCK_SIZE * 10] = {0}; struct aws_byte_buf b = aws_byte_buf_from_empty_array(b_buf, sizeof(b_buf)); int b_out_len = b.capacity; uint8_t temp_buf[KEYWRAP_BLOCK_SIZE * 2] = {0}; struct aws_byte_buf temp_input = aws_byte_buf_from_empty_array(temp_buf, sizeof(temp_buf)); for (int j = 5; j >= 0; --j) { for (int i = n; i >= 1; --i) { /* concat A and T */ memcpy(temp_input.buffer, a, KEYWRAP_BLOCK_SIZE); unsigned char t = (unsigned char)((n * j) + i); temp_input.buffer[7] ^= t; /* R[i] */ memcpy(temp_input.buffer + KEYWRAP_BLOCK_SIZE, r, KEYWRAP_BLOCK_SIZE); /* Decrypt the concatenated buffer */ if (!EVP_DecryptUpdate( openssl_cipher->decryptor_ctx, b.buffer, &b_out_len, temp_input.buffer, (int)temp_input.capacity)) { cipher->good = false; return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } /* set A to 64 MSB of decrypted result */ memcpy(a, b.buffer, KEYWRAP_BLOCK_SIZE); /* set the R[i] to the 64 LSB of decrypted result */ memcpy(r, b.buffer + KEYWRAP_BLOCK_SIZE, KEYWRAP_BLOCK_SIZE); /* decrement i -> R[i] */ r -= KEYWRAP_BLOCK_SIZE; } /* reset R */ r = out->buffer + starting_len_offset + required_buffer_space - KEYWRAP_BLOCK_SIZE; } /* here we perform the integrity check to make sure A == 0xA6A6A6A6A6A6A6A6 */ for (size_t i = 0; i < KEYWRAP_BLOCK_SIZE; ++i) { if (a[i] != INTEGRITY_VALUE) { cipher->good = false; return aws_raise_error(AWS_ERROR_CAL_SIGNATURE_VALIDATION_FAILED); } } out->len += required_buffer_space; return AWS_OP_SUCCESS; } static int s_init_keywrap_cipher_materials(struct aws_symmetric_cipher *cipher) { struct openssl_aes_cipher *openssl_cipher = cipher->impl; if (!(EVP_EncryptInit_ex(openssl_cipher->encryptor_ctx, EVP_aes_256_ecb(), NULL, cipher->key.buffer, NULL) && EVP_CIPHER_CTX_set_padding(openssl_cipher->encryptor_ctx, 0)) || !(EVP_DecryptInit_ex(openssl_cipher->decryptor_ctx, EVP_aes_256_ecb(), NULL, cipher->key.buffer, NULL) && EVP_CIPHER_CTX_set_padding(openssl_cipher->decryptor_ctx, 0))) { cipher->good = false; return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } return AWS_OP_SUCCESS; } static int s_reset_keywrap_cipher_materials(struct aws_symmetric_cipher *cipher) { int ret_val = s_clear_reusable_state(cipher); if (ret_val == AWS_OP_SUCCESS) { return s_init_keywrap_cipher_materials(cipher); } return ret_val; } static struct aws_symmetric_cipher_vtable s_keywrap_vtable = { .alg_name = "AES-KEYWRAP 256", .provider = "OpenSSL Compatible LibCrypto", .destroy = s_destroy, .reset = s_reset_keywrap_cipher_materials, .decrypt = s_key_wrap_encrypt_decrypt, .encrypt = s_key_wrap_encrypt_decrypt, .finalize_decryption = s_key_wrap_finalize_decryption, .finalize_encryption = s_key_wrap_finalize_encryption, }; struct aws_symmetric_cipher *aws_aes_keywrap_256_new_impl( struct aws_allocator *allocator, const struct aws_byte_cursor *key) { struct openssl_aes_cipher *cipher = aws_mem_calloc(allocator, 1, sizeof(struct openssl_aes_cipher)); cipher->cipher_base.allocator = allocator; cipher->cipher_base.block_size = KEYWRAP_BLOCK_SIZE; cipher->cipher_base.key_length_bits = AWS_AES_256_KEY_BIT_LEN; cipher->cipher_base.vtable = &s_keywrap_vtable; cipher->cipher_base.impl = cipher; /* Copy key into the cipher context. */ if (key) { aws_byte_buf_init_copy_from_cursor(&cipher->cipher_base.key, allocator, *key); } else { aws_byte_buf_init(&cipher->cipher_base.key, allocator, AWS_AES_256_KEY_BYTE_LEN); aws_symmetric_cipher_generate_key(AWS_AES_256_KEY_BYTE_LEN, &cipher->cipher_base.key); } aws_byte_buf_init(&cipher->working_buffer, allocator, KEYWRAP_BLOCK_SIZE); /* Initialize the cipher contexts. */ cipher->encryptor_ctx = EVP_CIPHER_CTX_new(); AWS_FATAL_ASSERT(cipher->encryptor_ctx && "Encryptor cipher initialization failed!"); cipher->decryptor_ctx = EVP_CIPHER_CTX_new(); AWS_FATAL_ASSERT(cipher->decryptor_ctx && "Decryptor cipher initialization failed!"); /* Initialize the cipher contexts with the specified key and IV. */ if (s_init_keywrap_cipher_materials(&cipher->cipher_base)) { goto error; } cipher->cipher_base.good = true; return &cipher->cipher_base; error: s_destroy(&cipher->cipher_base); return NULL; } aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/source/unix/openssl_platform_init.c000066400000000000000000000655661456575232400270520ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include /* * OpenSSL 3 has a large amount of interface changes and many of the functions used * throughout aws-c-cal have become deprecated. * Lets disable deprecation warnings, so that we can atleast run CI, until we * can move over to new functions. */ #define OPENSSL_SUPPRESS_DEPRECATED #include static struct openssl_hmac_ctx_table hmac_ctx_table; static struct openssl_evp_md_ctx_table evp_md_ctx_table; struct openssl_hmac_ctx_table *g_aws_openssl_hmac_ctx_table = NULL; struct openssl_evp_md_ctx_table *g_aws_openssl_evp_md_ctx_table = NULL; static struct aws_allocator *s_libcrypto_allocator = NULL; #if !defined(OPENSSL_IS_AWSLC) && !defined(OPENSSL_IS_BORINGSSL) # define OPENSSL_IS_OPENSSL #endif /* weak refs to libcrypto functions to force them to at least try to link * and avoid dead-stripping */ #if defined(OPENSSL_IS_AWSLC) || defined(OPENSSL_IS_BORINGSSL) extern HMAC_CTX *HMAC_CTX_new(void) __attribute__((weak, used)); extern void HMAC_CTX_free(HMAC_CTX *) __attribute__((weak, used)); extern void HMAC_CTX_init(HMAC_CTX *) __attribute__((weak, used)); extern void HMAC_CTX_cleanup(HMAC_CTX *) __attribute__((weak, used)); extern int HMAC_Update(HMAC_CTX *, const unsigned char *, size_t) __attribute__((weak, used)); extern int HMAC_Final(HMAC_CTX *, unsigned char *, unsigned int *) __attribute__((weak, used)); extern int HMAC_Init_ex(HMAC_CTX *, const void *, size_t, const EVP_MD *, ENGINE *) __attribute__((weak, used)); static int s_hmac_init_ex_bssl(HMAC_CTX *ctx, const void *key, size_t key_len, const EVP_MD *md, ENGINE *impl) { AWS_PRECONDITION(ctx); int (*init_ex_pt)(HMAC_CTX *, const void *, size_t, const EVP_MD *, ENGINE *) = (int (*)( HMAC_CTX *, const void *, size_t, const EVP_MD *, ENGINE *))g_aws_openssl_hmac_ctx_table->impl.init_ex_fn; return init_ex_pt(ctx, key, key_len, md, impl); } #else /* 1.1 */ extern HMAC_CTX *HMAC_CTX_new(void) __attribute__((weak, used)); extern void HMAC_CTX_free(HMAC_CTX *) __attribute__((weak, used)); /* 1.0.2 */ extern void HMAC_CTX_init(HMAC_CTX *) __attribute__((weak, used)); extern void HMAC_CTX_cleanup(HMAC_CTX *) __attribute__((weak, used)); /* common */ extern int HMAC_Update(HMAC_CTX *, const unsigned char *, size_t) __attribute__((weak, used)); extern int HMAC_Final(HMAC_CTX *, unsigned char *, unsigned int *) __attribute__((weak, used)); extern int HMAC_Init_ex(HMAC_CTX *, const void *, int, const EVP_MD *, ENGINE *) __attribute__((weak, used)); static int s_hmac_init_ex_openssl(HMAC_CTX *ctx, const void *key, size_t key_len, const EVP_MD *md, ENGINE *impl) { AWS_PRECONDITION(ctx); if (key_len > INT_MAX) { return 0; } /*Note: unlike aws-lc and boringssl, openssl 1.1.1 and 1.0.2 take int as key len arg. */ int (*init_ex_ptr)(HMAC_CTX *, const void *, int, const EVP_MD *, ENGINE *) = (int (*)(HMAC_CTX *, const void *, int, const EVP_MD *, ENGINE *))g_aws_openssl_hmac_ctx_table->impl.init_ex_fn; return init_ex_ptr(ctx, key, (int)key_len, md, impl); } #endif /* !OPENSSL_IS_AWSLC && !OPENSSL_IS_BORINGSSL*/ #if !defined(OPENSSL_IS_AWSLC) /* libcrypto 1.1 stub for init */ static void s_hmac_ctx_init_noop(HMAC_CTX *ctx) { (void)ctx; } /* libcrypto 1.1 stub for clean_up */ static void s_hmac_ctx_clean_up_noop(HMAC_CTX *ctx) { (void)ctx; } #endif #if defined(OPENSSL_IS_OPENSSL) /* libcrypto 1.0 shim for new */ static HMAC_CTX *s_hmac_ctx_new(void) { AWS_PRECONDITION( g_aws_openssl_hmac_ctx_table->init_fn != s_hmac_ctx_init_noop && "libcrypto 1.0 init called on libcrypto 1.1 vtable"); HMAC_CTX *ctx = aws_mem_calloc(s_libcrypto_allocator, 1, 300); AWS_FATAL_ASSERT(ctx && "Unable to allocate to HMAC_CTX"); g_aws_openssl_hmac_ctx_table->init_fn(ctx); return ctx; } /* libcrypto 1.0 shim for free */ static void s_hmac_ctx_free(HMAC_CTX *ctx) { AWS_PRECONDITION(ctx); AWS_PRECONDITION( g_aws_openssl_hmac_ctx_table->clean_up_fn != s_hmac_ctx_clean_up_noop && "libcrypto 1.0 clean_up called on libcrypto 1.1 vtable"); g_aws_openssl_hmac_ctx_table->clean_up_fn(ctx); aws_mem_release(s_libcrypto_allocator, ctx); } #endif /* !OPENSSL_IS_AWSLC */ enum aws_libcrypto_version { AWS_LIBCRYPTO_NONE = 0, AWS_LIBCRYPTO_1_0_2, AWS_LIBCRYPTO_1_1_1, AWS_LIBCRYPTO_LC, AWS_LIBCRYPTO_BORINGSSL }; bool s_resolve_hmac_102(void *module) { #if defined(OPENSSL_IS_OPENSSL) hmac_ctx_init init_fn = (hmac_ctx_init)HMAC_CTX_init; hmac_ctx_clean_up clean_up_fn = (hmac_ctx_clean_up)HMAC_CTX_cleanup; hmac_update update_fn = (hmac_update)HMAC_Update; hmac_final final_fn = (hmac_final)HMAC_Final; hmac_init_ex init_ex_fn = (hmac_init_ex)HMAC_Init_ex; /* were symbols bound by static linking? */ bool has_102_symbols = init_fn && clean_up_fn && update_fn && final_fn && init_ex_fn; if (has_102_symbols) { AWS_LOGF_DEBUG(AWS_LS_CAL_LIBCRYPTO_RESOLVE, "found static libcrypto 1.0.2 HMAC symbols"); } else { /* If symbols aren't already found, try to find the requested version */ *(void **)(&init_fn) = dlsym(module, "HMAC_CTX_init"); *(void **)(&clean_up_fn) = dlsym(module, "HMAC_CTX_cleanup"); *(void **)(&update_fn) = dlsym(module, "HMAC_Update"); *(void **)(&final_fn) = dlsym(module, "HMAC_Final"); *(void **)(&init_ex_fn) = dlsym(module, "HMAC_Init_ex"); if (init_fn) { AWS_LOGF_DEBUG(AWS_LS_CAL_LIBCRYPTO_RESOLVE, "found dynamic libcrypto 1.0.2 HMAC symbols"); } } if (init_fn) { hmac_ctx_table.new_fn = (hmac_ctx_new)s_hmac_ctx_new; hmac_ctx_table.free_fn = s_hmac_ctx_free; hmac_ctx_table.init_fn = init_fn; hmac_ctx_table.clean_up_fn = clean_up_fn; hmac_ctx_table.update_fn = update_fn; hmac_ctx_table.final_fn = final_fn; hmac_ctx_table.init_ex_fn = init_ex_fn; g_aws_openssl_hmac_ctx_table = &hmac_ctx_table; return true; } #endif return false; } bool s_resolve_hmac_111(void *module) { #if defined(OPENSSL_IS_OPENSSL) hmac_ctx_new new_fn = (hmac_ctx_new)HMAC_CTX_new; hmac_ctx_free free_fn = (hmac_ctx_free)HMAC_CTX_free; hmac_update update_fn = (hmac_update)HMAC_Update; hmac_final final_fn = (hmac_final)HMAC_Final; hmac_init_ex init_ex_fn = (hmac_init_ex)HMAC_Init_ex; /* were symbols bound by static linking? */ bool has_111_symbols = new_fn && free_fn && update_fn && final_fn && init_ex_fn; if (has_111_symbols) { AWS_LOGF_DEBUG(AWS_LS_CAL_LIBCRYPTO_RESOLVE, "found static libcrypto 1.1.1 HMAC symbols"); } else { *(void **)(&new_fn) = dlsym(module, "HMAC_CTX_new"); *(void **)(&free_fn) = dlsym(module, "HMAC_CTX_free"); *(void **)(&update_fn) = dlsym(module, "HMAC_Update"); *(void **)(&final_fn) = dlsym(module, "HMAC_Final"); *(void **)(&init_ex_fn) = dlsym(module, "HMAC_Init_ex"); if (new_fn) { AWS_LOGF_DEBUG(AWS_LS_CAL_LIBCRYPTO_RESOLVE, "found dynamic libcrypto 1.1.1 HMAC symbols"); } } if (new_fn) { hmac_ctx_table.new_fn = new_fn; hmac_ctx_table.free_fn = free_fn; hmac_ctx_table.init_fn = s_hmac_ctx_init_noop; hmac_ctx_table.clean_up_fn = s_hmac_ctx_clean_up_noop; hmac_ctx_table.update_fn = update_fn; hmac_ctx_table.final_fn = final_fn; hmac_ctx_table.init_ex_fn = s_hmac_init_ex_openssl; hmac_ctx_table.impl.init_ex_fn = (crypto_generic_fn_ptr)init_ex_fn; g_aws_openssl_hmac_ctx_table = &hmac_ctx_table; return true; } #endif return false; } bool s_resolve_hmac_lc(void *module) { #if defined(OPENSSL_IS_AWSLC) hmac_ctx_init init_fn = (hmac_ctx_init)HMAC_CTX_init; hmac_ctx_clean_up clean_up_fn = (hmac_ctx_clean_up)HMAC_CTX_cleanup; hmac_ctx_new new_fn = (hmac_ctx_new)HMAC_CTX_new; hmac_ctx_free free_fn = (hmac_ctx_free)HMAC_CTX_free; hmac_update update_fn = (hmac_update)HMAC_Update; hmac_final final_fn = (hmac_final)HMAC_Final; hmac_init_ex init_ex_fn = (hmac_init_ex)HMAC_Init_ex; /* were symbols bound by static linking? */ bool has_awslc_symbols = new_fn && free_fn && update_fn && final_fn && init_fn && init_ex_fn; /* If symbols aren't already found, try to find the requested version */ /* when built as a shared lib, and multiple versions of libcrypto are possibly * available (e.g. brazil), select AWS-LC by default for consistency */ if (has_awslc_symbols) { AWS_LOGF_DEBUG(AWS_LS_CAL_LIBCRYPTO_RESOLVE, "found static aws-lc HMAC symbols"); } else { *(void **)(&new_fn) = dlsym(module, "HMAC_CTX_new"); *(void **)(&free_fn) = dlsym(module, "HMAC_CTX_free"); *(void **)(&update_fn) = dlsym(module, "HMAC_Update"); *(void **)(&final_fn) = dlsym(module, "HMAC_Final"); *(void **)(&init_ex_fn) = dlsym(module, "HMAC_Init_ex"); if (new_fn) { AWS_LOGF_DEBUG(AWS_LS_CAL_LIBCRYPTO_RESOLVE, "found dynamic aws-lc HMAC symbols"); } } if (new_fn) { /* Fill out the vtable for the requested version */ hmac_ctx_table.new_fn = new_fn; hmac_ctx_table.free_fn = free_fn; hmac_ctx_table.init_fn = init_fn; hmac_ctx_table.clean_up_fn = clean_up_fn; hmac_ctx_table.update_fn = update_fn; hmac_ctx_table.final_fn = final_fn; hmac_ctx_table.init_ex_fn = s_hmac_init_ex_bssl; hmac_ctx_table.impl.init_ex_fn = (crypto_generic_fn_ptr)init_ex_fn; g_aws_openssl_hmac_ctx_table = &hmac_ctx_table; return true; } #endif return false; } bool s_resolve_hmac_boringssl(void *module) { #if defined(OPENSSL_IS_BORINGSSL) hmac_ctx_new new_fn = (hmac_ctx_new)HMAC_CTX_new; hmac_ctx_free free_fn = (hmac_ctx_free)HMAC_CTX_free; hmac_update update_fn = (hmac_update)HMAC_Update; hmac_final final_fn = (hmac_final)HMAC_Final; hmac_init_ex init_ex_fn = (hmac_init_ex)HMAC_Init_ex; /* were symbols bound by static linking? */ bool has_bssl_symbols = new_fn && free_fn && update_fn && final_fn && init_ex_fn; if (has_bssl_symbols) { AWS_LOGF_DEBUG(AWS_LS_CAL_LIBCRYPTO_RESOLVE, "found static boringssl HMAC symbols"); } else { *(void **)(&new_fn) = dlsym(module, "HMAC_CTX_new"); *(void **)(&free_fn) = dlsym(module, "HMAC_CTX_free"); *(void **)(&update_fn) = dlsym(module, "HMAC_Update"); *(void **)(&final_fn) = dlsym(module, "HMAC_Final"); *(void **)(&init_ex_fn) = dlsym(module, "HMAC_Init_ex"); if (new_fn) { AWS_LOGF_DEBUG(AWS_LS_CAL_LIBCRYPTO_RESOLVE, "found dynamic boringssl HMAC symbols"); } } if (new_fn) { hmac_ctx_table.new_fn = new_fn; hmac_ctx_table.free_fn = free_fn; hmac_ctx_table.init_fn = s_hmac_ctx_init_noop; hmac_ctx_table.clean_up_fn = s_hmac_ctx_clean_up_noop; hmac_ctx_table.update_fn = update_fn; hmac_ctx_table.final_fn = final_fn; hmac_ctx_table.init_ex_fn = s_hmac_init_ex_bssl; hmac_ctx_table.impl.init_ex_fn = (crypto_generic_fn_ptr)init_ex_fn; g_aws_openssl_hmac_ctx_table = &hmac_ctx_table; return true; } #endif return false; } static enum aws_libcrypto_version s_resolve_libcrypto_hmac(enum aws_libcrypto_version version, void *module) { switch (version) { case AWS_LIBCRYPTO_LC: return s_resolve_hmac_lc(module) ? version : AWS_LIBCRYPTO_NONE; case AWS_LIBCRYPTO_1_1_1: return s_resolve_hmac_111(module) ? version : AWS_LIBCRYPTO_NONE; case AWS_LIBCRYPTO_1_0_2: return s_resolve_hmac_102(module) ? version : AWS_LIBCRYPTO_NONE; case AWS_LIBCRYPTO_BORINGSSL: return s_resolve_hmac_boringssl(module) ? version : AWS_LIBCRYPTO_NONE; case AWS_LIBCRYPTO_NONE: AWS_FATAL_ASSERT(!"Attempted to resolve invalid libcrypto HMAC API version AWS_LIBCRYPTO_NONE"); } return AWS_LIBCRYPTO_NONE; } #if !defined(OPENSSL_IS_AWSLC) /* EVP_MD_CTX API */ /* 1.0.2 NOTE: these are macros in 1.1.x, so we have to undef them to weak link */ # if defined(EVP_MD_CTX_create) # pragma push_macro("EVP_MD_CTX_create") # undef EVP_MD_CTX_create # endif extern EVP_MD_CTX *EVP_MD_CTX_create(void) __attribute__((weak, used)); static evp_md_ctx_new s_EVP_MD_CTX_create = EVP_MD_CTX_create; # if defined(EVP_MD_CTX_create) # pragma pop_macro("EVP_MD_CTX_create") # endif # if defined(EVP_MD_CTX_destroy) # pragma push_macro("EVP_MD_CTX_destroy") # undef EVP_MD_CTX_destroy # endif extern void EVP_MD_CTX_destroy(EVP_MD_CTX *) __attribute__((weak, used)); static evp_md_ctx_free s_EVP_MD_CTX_destroy = EVP_MD_CTX_destroy; # if defined(EVP_MD_CTX_destroy) # pragma pop_macro("EVP_MD_CTX_destroy") # endif #endif /* !OPENSSL_IS_AWSLC */ extern EVP_MD_CTX *EVP_MD_CTX_new(void) __attribute__((weak, used)); extern void EVP_MD_CTX_free(EVP_MD_CTX *) __attribute__((weak, used)); extern int EVP_DigestInit_ex(EVP_MD_CTX *, const EVP_MD *, ENGINE *) __attribute__((weak, used)); extern int EVP_DigestUpdate(EVP_MD_CTX *, const void *, size_t) __attribute__((weak, used)); extern int EVP_DigestFinal_ex(EVP_MD_CTX *, unsigned char *, unsigned int *) __attribute__((weak, used)); bool s_resolve_md_102(void *module) { #if !defined(OPENSSL_IS_AWSLC) evp_md_ctx_new md_create_fn = s_EVP_MD_CTX_create; evp_md_ctx_free md_destroy_fn = s_EVP_MD_CTX_destroy; evp_md_ctx_digest_init_ex md_init_ex_fn = EVP_DigestInit_ex; evp_md_ctx_digest_update md_update_fn = EVP_DigestUpdate; evp_md_ctx_digest_final_ex md_final_ex_fn = EVP_DigestFinal_ex; bool has_102_symbols = md_create_fn && md_destroy_fn && md_init_ex_fn && md_update_fn && md_final_ex_fn; if (has_102_symbols) { AWS_LOGF_DEBUG(AWS_LS_CAL_LIBCRYPTO_RESOLVE, "found static libcrypto 1.0.2 EVP_MD symbols"); } else { *(void **)(&md_create_fn) = dlsym(module, "EVP_MD_CTX_create"); *(void **)(&md_destroy_fn) = dlsym(module, "EVP_MD_CTX_destroy"); *(void **)(&md_init_ex_fn) = dlsym(module, "EVP_DigestInit_ex"); *(void **)(&md_update_fn) = dlsym(module, "EVP_DigestUpdate"); *(void **)(&md_final_ex_fn) = dlsym(module, "EVP_DigestFinal_ex"); if (md_create_fn) { AWS_LOGF_DEBUG(AWS_LS_CAL_LIBCRYPTO_RESOLVE, "found dynamic libcrypto 1.0.2 EVP_MD symbols"); } } if (md_create_fn) { evp_md_ctx_table.new_fn = md_create_fn; evp_md_ctx_table.free_fn = md_destroy_fn; evp_md_ctx_table.init_ex_fn = md_init_ex_fn; evp_md_ctx_table.update_fn = md_update_fn; evp_md_ctx_table.final_ex_fn = md_final_ex_fn; g_aws_openssl_evp_md_ctx_table = &evp_md_ctx_table; return true; } #endif return false; } bool s_resolve_md_111(void *module) { #if !defined(OPENSSL_IS_AWSLC) evp_md_ctx_new md_new_fn = EVP_MD_CTX_new; evp_md_ctx_free md_free_fn = EVP_MD_CTX_free; evp_md_ctx_digest_init_ex md_init_ex_fn = EVP_DigestInit_ex; evp_md_ctx_digest_update md_update_fn = EVP_DigestUpdate; evp_md_ctx_digest_final_ex md_final_ex_fn = EVP_DigestFinal_ex; bool has_111_symbols = md_new_fn && md_free_fn && md_init_ex_fn && md_update_fn && md_final_ex_fn; if (has_111_symbols) { AWS_LOGF_DEBUG(AWS_LS_CAL_LIBCRYPTO_RESOLVE, "found static libcrypto 1.1.1 EVP_MD symbols"); } else { *(void **)(&md_new_fn) = dlsym(module, "EVP_MD_CTX_new"); *(void **)(&md_free_fn) = dlsym(module, "EVP_MD_CTX_free"); *(void **)(&md_init_ex_fn) = dlsym(module, "EVP_DigestInit_ex"); *(void **)(&md_update_fn) = dlsym(module, "EVP_DigestUpdate"); *(void **)(&md_final_ex_fn) = dlsym(module, "EVP_DigestFinal_ex"); if (md_new_fn) { AWS_LOGF_DEBUG(AWS_LS_CAL_LIBCRYPTO_RESOLVE, "found dynamic libcrypto 1.1.1 EVP_MD symbols"); } } if (md_new_fn) { evp_md_ctx_table.new_fn = md_new_fn; evp_md_ctx_table.free_fn = md_free_fn; evp_md_ctx_table.init_ex_fn = md_init_ex_fn; evp_md_ctx_table.update_fn = md_update_fn; evp_md_ctx_table.final_ex_fn = md_final_ex_fn; g_aws_openssl_evp_md_ctx_table = &evp_md_ctx_table; return true; } #endif return false; } bool s_resolve_md_lc(void *module) { #if defined(OPENSSL_IS_AWSLC) evp_md_ctx_new md_new_fn = EVP_MD_CTX_new; evp_md_ctx_new md_create_fn = EVP_MD_CTX_new; evp_md_ctx_free md_free_fn = EVP_MD_CTX_free; evp_md_ctx_free md_destroy_fn = EVP_MD_CTX_destroy; evp_md_ctx_digest_init_ex md_init_ex_fn = EVP_DigestInit_ex; evp_md_ctx_digest_update md_update_fn = EVP_DigestUpdate; evp_md_ctx_digest_final_ex md_final_ex_fn = EVP_DigestFinal_ex; bool has_awslc_symbols = md_new_fn && md_create_fn && md_free_fn && md_destroy_fn && md_init_ex_fn && md_update_fn && md_final_ex_fn; if (has_awslc_symbols) { AWS_LOGF_DEBUG(AWS_LS_CAL_LIBCRYPTO_RESOLVE, "found static aws-lc libcrypto 1.1.1 EVP_MD symbols"); } else { *(void **)(&md_new_fn) = dlsym(module, "EVP_MD_CTX_new"); *(void **)(&md_free_fn) = dlsym(module, "EVP_MD_CTX_free"); *(void **)(&md_init_ex_fn) = dlsym(module, "EVP_DigestInit_ex"); *(void **)(&md_update_fn) = dlsym(module, "EVP_DigestUpdate"); *(void **)(&md_final_ex_fn) = dlsym(module, "EVP_DigestFinal_ex"); if (md_new_fn) { AWS_LOGF_DEBUG(AWS_LS_CAL_LIBCRYPTO_RESOLVE, "found dynamic aws-lc libcrypto 1.1.1 EVP_MD symbols"); } } if (md_new_fn) { /* Add the found symbols to the vtable */ evp_md_ctx_table.new_fn = md_new_fn; evp_md_ctx_table.free_fn = md_free_fn; evp_md_ctx_table.init_ex_fn = md_init_ex_fn; evp_md_ctx_table.update_fn = md_update_fn; evp_md_ctx_table.final_ex_fn = md_final_ex_fn; g_aws_openssl_evp_md_ctx_table = &evp_md_ctx_table; return true; } #endif return false; } bool s_resolve_md_boringssl(void *module) { #if !defined(OPENSSL_IS_AWSLC) return s_resolve_md_111(module); #else return false; #endif } static enum aws_libcrypto_version s_resolve_libcrypto_md(enum aws_libcrypto_version version, void *module) { switch (version) { case AWS_LIBCRYPTO_LC: return s_resolve_md_lc(module) ? version : AWS_LIBCRYPTO_NONE; case AWS_LIBCRYPTO_1_1_1: return s_resolve_md_111(module) ? version : AWS_LIBCRYPTO_NONE; case AWS_LIBCRYPTO_1_0_2: return s_resolve_md_102(module) ? version : AWS_LIBCRYPTO_NONE; case AWS_LIBCRYPTO_BORINGSSL: return s_resolve_md_boringssl(module) ? version : AWS_LIBCRYPTO_NONE; case AWS_LIBCRYPTO_NONE: AWS_FATAL_ASSERT(!"Attempted to resolve invalid libcrypto MD API version AWS_LIBCRYPTO_NONE"); } return AWS_LIBCRYPTO_NONE; } static enum aws_libcrypto_version s_resolve_libcrypto_symbols(enum aws_libcrypto_version version, void *module) { enum aws_libcrypto_version found_version = s_resolve_libcrypto_hmac(version, module); if (found_version == AWS_LIBCRYPTO_NONE) { return AWS_LIBCRYPTO_NONE; } found_version = s_resolve_libcrypto_md(found_version, module); if (found_version == AWS_LIBCRYPTO_NONE) { return AWS_LIBCRYPTO_NONE; } return found_version; } static enum aws_libcrypto_version s_resolve_libcrypto_lib(void) { const char *libcrypto_102 = "libcrypto.so.1.0.0"; const char *libcrypto_111 = "libcrypto.so.1.1"; AWS_LOGF_DEBUG(AWS_LS_CAL_LIBCRYPTO_RESOLVE, "loading libcrypto 1.0.2"); void *module = dlopen(libcrypto_102, RTLD_NOW); if (module) { AWS_LOGF_DEBUG(AWS_LS_CAL_LIBCRYPTO_RESOLVE, "resolving against libcrypto 1.0.2"); enum aws_libcrypto_version result = s_resolve_libcrypto_symbols(AWS_LIBCRYPTO_1_0_2, module); if (result == AWS_LIBCRYPTO_1_0_2) { return result; } dlclose(module); } else { AWS_LOGF_DEBUG(AWS_LS_CAL_LIBCRYPTO_RESOLVE, "libcrypto 1.0.2 not found"); } AWS_LOGF_DEBUG(AWS_LS_CAL_LIBCRYPTO_RESOLVE, "loading libcrypto 1.1.1"); module = dlopen(libcrypto_111, RTLD_NOW); if (module) { AWS_LOGF_DEBUG(AWS_LS_CAL_LIBCRYPTO_RESOLVE, "resolving against libcrypto 1.1.1"); enum aws_libcrypto_version result = s_resolve_libcrypto_symbols(AWS_LIBCRYPTO_1_1_1, module); if (result == AWS_LIBCRYPTO_1_1_1) { return result; } dlclose(module); } else { AWS_LOGF_DEBUG(AWS_LS_CAL_LIBCRYPTO_RESOLVE, "libcrypto 1.1.1 not found"); } AWS_LOGF_DEBUG(AWS_LS_CAL_LIBCRYPTO_RESOLVE, "loading libcrypto.so"); module = dlopen("libcrypto.so", RTLD_NOW); if (module) { unsigned long (*openssl_version_num)(void) = NULL; *(void **)(&openssl_version_num) = dlsym(module, "OpenSSL_version_num"); if (openssl_version_num) { unsigned long version = openssl_version_num(); AWS_LOGF_DEBUG(AWS_LS_CAL_LIBCRYPTO_RESOLVE, "libcrypto.so reported version is 0x%lx", version); enum aws_libcrypto_version result = AWS_LIBCRYPTO_NONE; if (version >= 0x10101000L) { AWS_LOGF_DEBUG(AWS_LS_CAL_LIBCRYPTO_RESOLVE, "probing libcrypto.so for aws-lc symbols"); result = s_resolve_libcrypto_symbols(AWS_LIBCRYPTO_LC, module); if (result == AWS_LIBCRYPTO_NONE) { AWS_LOGF_DEBUG(AWS_LS_CAL_LIBCRYPTO_RESOLVE, "probing libcrypto.so for 1.1.1 symbols"); result = s_resolve_libcrypto_symbols(AWS_LIBCRYPTO_1_1_1, module); } } else if (version >= 0x10002000L) { AWS_LOGF_DEBUG(AWS_LS_CAL_LIBCRYPTO_RESOLVE, "probing libcrypto.so for 1.0.2 symbols"); result = s_resolve_libcrypto_symbols(AWS_LIBCRYPTO_1_0_2, module); } else { AWS_LOGF_DEBUG(AWS_LS_CAL_LIBCRYPTO_RESOLVE, "libcrypto.so reported version is unsupported"); } if (result != AWS_LIBCRYPTO_NONE) { return result; } } else { AWS_LOGF_DEBUG(AWS_LS_CAL_LIBCRYPTO_RESOLVE, "Unable to determine version of libcrypto.so"); } dlclose(module); } else { AWS_LOGF_DEBUG(AWS_LS_CAL_LIBCRYPTO_RESOLVE, "libcrypto.so not found"); } return AWS_LIBCRYPTO_NONE; } static void *s_libcrypto_module = NULL; static enum aws_libcrypto_version s_resolve_libcrypto(void) { /* Try to auto-resolve against what's linked in/process space */ AWS_LOGF_DEBUG(AWS_LS_CAL_LIBCRYPTO_RESOLVE, "searching process and loaded modules"); void *process = dlopen(NULL, RTLD_NOW); AWS_FATAL_ASSERT(process && "Unable to load symbols from process space"); enum aws_libcrypto_version result = s_resolve_libcrypto_symbols(AWS_LIBCRYPTO_LC, process); if (result == AWS_LIBCRYPTO_NONE) { AWS_LOGF_DEBUG(AWS_LS_CAL_LIBCRYPTO_RESOLVE, "did not find aws-lc symbols linked"); result = s_resolve_libcrypto_symbols(AWS_LIBCRYPTO_BORINGSSL, process); } if (result == AWS_LIBCRYPTO_NONE) { AWS_LOGF_DEBUG(AWS_LS_CAL_LIBCRYPTO_RESOLVE, "did not find boringssl symbols linked"); result = s_resolve_libcrypto_symbols(AWS_LIBCRYPTO_1_0_2, process); } if (result == AWS_LIBCRYPTO_NONE) { AWS_LOGF_DEBUG(AWS_LS_CAL_LIBCRYPTO_RESOLVE, "did not find libcrypto 1.0.2 symbols linked"); result = s_resolve_libcrypto_symbols(AWS_LIBCRYPTO_1_1_1, process); } dlclose(process); if (result == AWS_LIBCRYPTO_NONE) { AWS_LOGF_DEBUG(AWS_LS_CAL_LIBCRYPTO_RESOLVE, "did not find libcrypto 1.1.1 symbols linked"); AWS_LOGF_DEBUG( AWS_LS_CAL_LIBCRYPTO_RESOLVE, "libcrypto symbols were not statically linked, searching for shared libraries"); result = s_resolve_libcrypto_lib(); } return result; } /* Ignore warnings about how CRYPTO_get_locking_callback() always returns NULL on 1.1.1 */ #if !defined(__GNUC__) || (__GNUC__ * 100 + __GNUC_MINOR__ * 10 > 410) # pragma GCC diagnostic push # pragma GCC diagnostic ignored "-Waddress" #endif /* Openssl 1.0.x requires special handling for its locking callbacks or else it's not thread safe */ #if !defined(OPENSSL_IS_AWSLC) && !defined(OPENSSL_IS_BORINGSSL) static struct aws_mutex *s_libcrypto_locks = NULL; static void s_locking_fn(int mode, int n, const char *unused0, int unused1) { (void)unused0; (void)unused1; if (mode & CRYPTO_LOCK) { aws_mutex_lock(&s_libcrypto_locks[n]); } else { aws_mutex_unlock(&s_libcrypto_locks[n]); } } static unsigned long s_id_fn(void) { return (unsigned long)aws_thread_current_thread_id(); } #endif void aws_cal_platform_init(struct aws_allocator *allocator) { int version = s_resolve_libcrypto(); AWS_FATAL_ASSERT(version != AWS_LIBCRYPTO_NONE && "libcrypto could not be resolved"); AWS_FATAL_ASSERT(g_aws_openssl_evp_md_ctx_table); AWS_FATAL_ASSERT(g_aws_openssl_hmac_ctx_table); s_libcrypto_allocator = allocator; #if !defined(OPENSSL_IS_AWSLC) && !defined(OPENSSL_IS_BORINGSSL) /* Ensure that libcrypto 1.0.2 has working locking mechanisms. This code is macro'ed * by libcrypto to be a no-op on 1.1.1 */ if (!CRYPTO_get_locking_callback()) { /* on 1.1.1 this is a no-op */ CRYPTO_set_locking_callback(s_locking_fn); if (CRYPTO_get_locking_callback() == s_locking_fn) { s_libcrypto_locks = aws_mem_acquire(allocator, sizeof(struct aws_mutex) * CRYPTO_num_locks()); AWS_FATAL_ASSERT(s_libcrypto_locks); size_t lock_count = (size_t)CRYPTO_num_locks(); for (size_t i = 0; i < lock_count; ++i) { aws_mutex_init(&s_libcrypto_locks[i]); } } } if (!CRYPTO_get_id_callback()) { CRYPTO_set_id_callback(s_id_fn); } #endif } void aws_cal_platform_clean_up(void) { #if !defined(OPENSSL_IS_AWSLC) && !defined(OPENSSL_IS_BORINGSSL) if (CRYPTO_get_locking_callback() == s_locking_fn) { CRYPTO_set_locking_callback(NULL); size_t lock_count = (size_t)CRYPTO_num_locks(); for (size_t i = 0; i < lock_count; ++i) { aws_mutex_clean_up(&s_libcrypto_locks[i]); } aws_mem_release(s_libcrypto_allocator, s_libcrypto_locks); } if (CRYPTO_get_id_callback() == s_id_fn) { CRYPTO_set_id_callback(NULL); } #endif #if defined(OPENSSL_IS_AWSLC) AWSLC_thread_local_clear(); AWSLC_thread_local_shutdown(); #endif if (s_libcrypto_module) { dlclose(s_libcrypto_module); } s_libcrypto_allocator = NULL; } void aws_cal_platform_thread_clean_up(void) { #if defined(OPENSSL_IS_AWSLC) AWSLC_thread_local_clear(); #endif } #if !defined(__GNUC__) || (__GNUC__ >= 4 && __GNUC_MINOR__ > 1) # pragma GCC diagnostic pop #endif aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/source/unix/openssl_rsa.c000066400000000000000000000341021456575232400247460ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #define OPENSSL_SUPPRESS_DEPRECATED #include #include #if defined(OPENSSL_IS_OPENSSL) /*Error defines were part of evp.h in 1.0.x and were moved to evperr.h in 1.1.0*/ # if OPENSSL_VERSION_NUMBER >= 0x10100000L # include # endif #else # include #endif #include struct lc_rsa_key_pair { struct aws_rsa_key_pair base; EVP_PKEY *key; }; static void s_rsa_destroy_key(void *key_pair) { if (key_pair == NULL) { return; } struct aws_rsa_key_pair *base = key_pair; struct lc_rsa_key_pair *impl = base->impl; if (impl->key != NULL) { EVP_PKEY_free(impl->key); } aws_rsa_key_pair_base_clean_up(base); aws_mem_release(base->allocator, impl); } /* * Transforms evp error code into crt error code and raises it as necessary. * All evp functions follow the same: * >= 1 for success * <= 0 for failure * -2 always indicates incorrect algo for operation */ static int s_reinterpret_evp_error_as_crt(int evp_error, const char *function_name) { if (evp_error > 0) { return AWS_OP_SUCCESS; } /* AWS-LC/BoringSSL error code is uint32_t, but OpenSSL uses unsigned long. */ #if defined(OPENSSL_IS_OPENSSL) uint32_t error = ERR_peek_error(); #else unsigned long error = ERR_peek_error(); #endif int crt_error = AWS_OP_ERR; const char *error_message = ERR_reason_error_string(error); if (evp_error == -2) { crt_error = AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM; goto on_error; } if (ERR_GET_LIB(error) == ERR_LIB_EVP) { switch (ERR_GET_REASON(error)) { case EVP_R_BUFFER_TOO_SMALL: { crt_error = AWS_ERROR_SHORT_BUFFER; goto on_error; } case EVP_R_UNSUPPORTED_ALGORITHM: { crt_error = AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM; goto on_error; } } } crt_error = AWS_ERROR_CAL_CRYPTO_OPERATION_FAILED; on_error: AWS_LOGF_ERROR( AWS_LS_CAL_RSA, "%s() failed. returned: %d extended error:%lu(%s) aws_error:%s", function_name, evp_error, (unsigned long)error, error_message == NULL ? "" : error_message, aws_error_name(crt_error)); return aws_raise_error(crt_error); } static int s_set_encryption_ctx_from_algo(EVP_PKEY_CTX *ctx, enum aws_rsa_encryption_algorithm algorithm) { if (algorithm == AWS_CAL_RSA_ENCRYPTION_PKCS1_5) { if (s_reinterpret_evp_error_as_crt( EVP_PKEY_CTX_set_rsa_padding(ctx, RSA_PKCS1_PADDING), "EVP_PKEY_CTX_set_rsa_padding")) { return AWS_OP_ERR; } } else if (algorithm == AWS_CAL_RSA_ENCRYPTION_OAEP_SHA256 || algorithm == AWS_CAL_RSA_ENCRYPTION_OAEP_SHA512) { if (s_reinterpret_evp_error_as_crt( EVP_PKEY_CTX_set_rsa_padding(ctx, RSA_PKCS1_OAEP_PADDING), "EVP_PKEY_CTX_set_rsa_padding")) { return AWS_OP_ERR; } const EVP_MD *md = algorithm == AWS_CAL_RSA_ENCRYPTION_OAEP_SHA256 ? EVP_sha256() : EVP_sha512(); if (s_reinterpret_evp_error_as_crt(EVP_PKEY_CTX_set_rsa_oaep_md(ctx, md), "EVP_PKEY_CTX_set_rsa_oaep_md")) { return AWS_OP_ERR; } } else { return aws_raise_error(AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM); } return AWS_OP_SUCCESS; } static int s_rsa_encrypt( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_encryption_algorithm algorithm, struct aws_byte_cursor plaintext, struct aws_byte_buf *out) { struct lc_rsa_key_pair *key_pair_impl = key_pair->impl; EVP_PKEY_CTX *ctx = EVP_PKEY_CTX_new(key_pair_impl->key, NULL); if (ctx == NULL) { return aws_raise_error(AWS_ERROR_CAL_CRYPTO_OPERATION_FAILED); } if (s_reinterpret_evp_error_as_crt(EVP_PKEY_encrypt_init(ctx), "EVP_PKEY_encrypt_init")) { goto on_error; } if (s_set_encryption_ctx_from_algo(ctx, algorithm)) { goto on_error; } size_t needed_buffer_len = 0; if (s_reinterpret_evp_error_as_crt( EVP_PKEY_encrypt(ctx, NULL, &needed_buffer_len, plaintext.ptr, plaintext.len), "EVP_PKEY_encrypt get length")) { goto on_error; } size_t ct_len = out->capacity - out->len; if (needed_buffer_len > ct_len) { /* * OpenSSL 3 seems to no longer fail if the buffer is too short. * Instead it seems to write out enough data to fill the buffer and then * updates the out_len to full buffer. It does not seem to corrupt * memory after the buffer, but behavior is non-ideal. * Let get length needed for buffer from api first and then manually ensure that * buffer we have is big enough. */ aws_raise_error(AWS_ERROR_SHORT_BUFFER); goto on_error; } if (s_reinterpret_evp_error_as_crt( EVP_PKEY_encrypt(ctx, out->buffer + out->len, &ct_len, plaintext.ptr, plaintext.len), "EVP_PKEY_encrypt")) { goto on_error; } out->len += ct_len; EVP_PKEY_CTX_free(ctx); return AWS_OP_SUCCESS; on_error: EVP_PKEY_CTX_free(ctx); return AWS_OP_ERR; } static int s_rsa_decrypt( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_encryption_algorithm algorithm, struct aws_byte_cursor ciphertext, struct aws_byte_buf *out) { struct lc_rsa_key_pair *key_pair_impl = key_pair->impl; EVP_PKEY_CTX *ctx = EVP_PKEY_CTX_new(key_pair_impl->key, NULL); if (ctx == NULL) { return aws_raise_error(AWS_ERROR_CAL_CRYPTO_OPERATION_FAILED); } if (s_reinterpret_evp_error_as_crt(EVP_PKEY_decrypt_init(ctx), "EVP_PKEY_decrypt_init")) { goto on_error; } if (s_set_encryption_ctx_from_algo(ctx, algorithm)) { goto on_error; } size_t needed_buffer_len = 0; if (s_reinterpret_evp_error_as_crt( EVP_PKEY_decrypt(ctx, NULL, &needed_buffer_len, ciphertext.ptr, ciphertext.len), "EVP_PKEY_decrypt get length")) { goto on_error; } size_t ct_len = out->capacity - out->len; if (needed_buffer_len > ct_len) { /* * manual short buffer length check for OpenSSL 3. * refer to encrypt implementation for more details */ aws_raise_error(AWS_ERROR_SHORT_BUFFER); goto on_error; } if (s_reinterpret_evp_error_as_crt( EVP_PKEY_decrypt(ctx, out->buffer + out->len, &ct_len, ciphertext.ptr, ciphertext.len), "EVP_PKEY_decrypt")) { goto on_error; } out->len += ct_len; EVP_PKEY_CTX_free(ctx); return AWS_OP_SUCCESS; on_error: EVP_PKEY_CTX_free(ctx); return AWS_OP_ERR; } static int s_set_signature_ctx_from_algo(EVP_PKEY_CTX *ctx, enum aws_rsa_signature_algorithm algorithm) { if (algorithm == AWS_CAL_RSA_SIGNATURE_PKCS1_5_SHA256) { if (s_reinterpret_evp_error_as_crt( EVP_PKEY_CTX_set_rsa_padding(ctx, RSA_PKCS1_PADDING), "EVP_PKEY_CTX_set_rsa_padding")) { return AWS_OP_ERR; } if (s_reinterpret_evp_error_as_crt( EVP_PKEY_CTX_set_signature_md(ctx, EVP_sha256()), "EVP_PKEY_CTX_set_signature_md")) { return AWS_OP_ERR; } } else if (algorithm == AWS_CAL_RSA_SIGNATURE_PSS_SHA256) { if (s_reinterpret_evp_error_as_crt( EVP_PKEY_CTX_set_rsa_padding(ctx, RSA_PKCS1_PSS_PADDING), "EVP_PKEY_CTX_set_rsa_padding")) { return AWS_OP_ERR; } #if defined(OPENSSL_IS_BORINGSSL) || OPENSSL_VERSION_NUMBER < 0x10100000L int saltlen = -1; /* RSA_PSS_SALTLEN_DIGEST not defined in BoringSSL and old versions of openssl */ #else int saltlen = RSA_PSS_SALTLEN_DIGEST; #endif if (s_reinterpret_evp_error_as_crt( EVP_PKEY_CTX_set_rsa_pss_saltlen(ctx, saltlen), "EVP_PKEY_CTX_set_rsa_pss_saltlen")) { return AWS_OP_ERR; } if (s_reinterpret_evp_error_as_crt( EVP_PKEY_CTX_set_signature_md(ctx, EVP_sha256()), "EVP_PKEY_CTX_set_signature_md")) { return AWS_OP_ERR; } } else { return aws_raise_error(AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM); } return AWS_OP_SUCCESS; } static int s_rsa_sign( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_signature_algorithm algorithm, struct aws_byte_cursor digest, struct aws_byte_buf *out) { struct lc_rsa_key_pair *key_pair_impl = key_pair->impl; EVP_PKEY_CTX *ctx = EVP_PKEY_CTX_new(key_pair_impl->key, NULL); if (ctx == NULL) { return aws_raise_error(AWS_ERROR_CAL_CRYPTO_OPERATION_FAILED); } if (s_reinterpret_evp_error_as_crt(EVP_PKEY_sign_init(ctx), "EVP_PKEY_sign_init")) { goto on_error; } if (s_set_signature_ctx_from_algo(ctx, algorithm)) { goto on_error; } size_t needed_buffer_len = 0; if (s_reinterpret_evp_error_as_crt( EVP_PKEY_sign(ctx, NULL, &needed_buffer_len, digest.ptr, digest.len), "EVP_PKEY_sign get length")) { goto on_error; } size_t ct_len = out->capacity - out->len; if (needed_buffer_len > ct_len) { /* * manual short buffer length check for OpenSSL 3. * refer to encrypt implementation for more details. * OpenSSL3 actually does throw an error here, but error code comes from * component that does not exist in OpenSSL 1.x. So check manually right * now and we can figure out how to handle it better, once we can * properly support OpenSSL 3. */ aws_raise_error(AWS_ERROR_SHORT_BUFFER); goto on_error; } if (s_reinterpret_evp_error_as_crt( EVP_PKEY_sign(ctx, out->buffer + out->len, &ct_len, digest.ptr, digest.len), "EVP_PKEY_sign")) { goto on_error; } out->len += ct_len; EVP_PKEY_CTX_free(ctx); return AWS_OP_SUCCESS; on_error: EVP_PKEY_CTX_free(ctx); return AWS_OP_ERR; } static int s_rsa_verify( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_signature_algorithm algorithm, struct aws_byte_cursor digest, struct aws_byte_cursor signature) { struct lc_rsa_key_pair *key_pair_impl = key_pair->impl; EVP_PKEY_CTX *ctx = EVP_PKEY_CTX_new(key_pair_impl->key, NULL); if (ctx == NULL) { return aws_raise_error(AWS_ERROR_CAL_CRYPTO_OPERATION_FAILED); } if (s_reinterpret_evp_error_as_crt(EVP_PKEY_verify_init(ctx), "EVP_PKEY_verify_init")) { goto on_error; } if (s_set_signature_ctx_from_algo(ctx, algorithm)) { goto on_error; } int error_code = EVP_PKEY_verify(ctx, signature.ptr, signature.len, digest.ptr, digest.len); EVP_PKEY_CTX_free(ctx); /* Verify errors slightly differently from the rest of evp functions. * 0 indicates signature does not pass verification, it's not necessarily an error. */ if (error_code > 0) { return AWS_OP_SUCCESS; } else if (error_code == 0) { return aws_raise_error(AWS_ERROR_CAL_SIGNATURE_VALIDATION_FAILED); } else { return s_reinterpret_evp_error_as_crt(error_code, "EVP_PKEY_verify"); } on_error: EVP_PKEY_CTX_free(ctx); return AWS_OP_ERR; } static struct aws_rsa_key_vtable s_rsa_key_pair_vtable = { .encrypt = s_rsa_encrypt, .decrypt = s_rsa_decrypt, .sign = s_rsa_sign, .verify = s_rsa_verify, }; struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_private_key_pkcs1_impl( struct aws_allocator *allocator, struct aws_byte_cursor key) { struct lc_rsa_key_pair *key_pair_impl = aws_mem_calloc(allocator, 1, sizeof(struct lc_rsa_key_pair)); aws_ref_count_init(&key_pair_impl->base.ref_count, &key_pair_impl->base, s_rsa_destroy_key); key_pair_impl->base.impl = key_pair_impl; key_pair_impl->base.allocator = allocator; aws_byte_buf_init_copy_from_cursor(&key_pair_impl->base.priv, allocator, key); RSA *rsa = NULL; EVP_PKEY *private_key = NULL; if (d2i_RSAPrivateKey(&rsa, (const uint8_t **)&key.ptr, key.len) == NULL) { aws_raise_error(AWS_ERROR_CAL_CRYPTO_OPERATION_FAILED); goto on_error; } private_key = EVP_PKEY_new(); if (private_key == NULL || EVP_PKEY_assign_RSA(private_key, rsa) == 0) { RSA_free(rsa); aws_raise_error(AWS_ERROR_CAL_CRYPTO_OPERATION_FAILED); goto on_error; } key_pair_impl->key = private_key; key_pair_impl->base.vtable = &s_rsa_key_pair_vtable; key_pair_impl->base.key_size_in_bits = EVP_PKEY_bits(key_pair_impl->key); return &key_pair_impl->base; on_error: if (private_key) { EVP_PKEY_free(private_key); } s_rsa_destroy_key(&key_pair_impl->base); return NULL; } struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_public_key_pkcs1_impl( struct aws_allocator *allocator, struct aws_byte_cursor key) { struct lc_rsa_key_pair *key_pair_impl = aws_mem_calloc(allocator, 1, sizeof(struct lc_rsa_key_pair)); aws_ref_count_init(&key_pair_impl->base.ref_count, &key_pair_impl->base, s_rsa_destroy_key); key_pair_impl->base.impl = key_pair_impl; key_pair_impl->base.allocator = allocator; aws_byte_buf_init_copy_from_cursor(&key_pair_impl->base.pub, allocator, key); RSA *rsa = NULL; EVP_PKEY *public_key = NULL; if (d2i_RSAPublicKey(&rsa, (const uint8_t **)&key.ptr, key.len) == NULL) { aws_raise_error(AWS_ERROR_CAL_CRYPTO_OPERATION_FAILED); goto on_error; } public_key = EVP_PKEY_new(); if (public_key == NULL || EVP_PKEY_assign_RSA(public_key, rsa) == 0) { RSA_free(rsa); aws_raise_error(AWS_ERROR_CAL_CRYPTO_OPERATION_FAILED); goto on_error; } key_pair_impl->key = public_key; key_pair_impl->base.vtable = &s_rsa_key_pair_vtable; key_pair_impl->base.key_size_in_bits = EVP_PKEY_bits(key_pair_impl->key); return &key_pair_impl->base; on_error: if (public_key) { EVP_PKEY_free(public_key); } s_rsa_destroy_key(&key_pair_impl->base); return NULL; } aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/source/unix/opensslcrypto_ecc.c000066400000000000000000000277171456575232400261720ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #define OPENSSL_SUPPRESS_DEPRECATED #include #include #include #include struct libcrypto_ecc_key { struct aws_ecc_key_pair key_pair; EC_KEY *ec_key; }; static int s_curve_name_to_nid(enum aws_ecc_curve_name curve_name) { switch (curve_name) { case AWS_CAL_ECDSA_P256: return NID_X9_62_prime256v1; case AWS_CAL_ECDSA_P384: return NID_secp384r1; } AWS_FATAL_ASSERT(!"Unsupported elliptic curve name"); return -1; } static void s_key_pair_destroy(struct aws_ecc_key_pair *key_pair) { if (key_pair) { aws_byte_buf_clean_up(&key_pair->pub_x); aws_byte_buf_clean_up(&key_pair->pub_y); aws_byte_buf_clean_up_secure(&key_pair->priv_d); struct libcrypto_ecc_key *key_impl = key_pair->impl; if (key_impl->ec_key) { EC_KEY_free(key_impl->ec_key); } aws_mem_release(key_pair->allocator, key_pair); } } static int s_sign_payload( const struct aws_ecc_key_pair *key_pair, const struct aws_byte_cursor *hash, struct aws_byte_buf *signature_output) { struct libcrypto_ecc_key *libcrypto_key_pair = key_pair->impl; unsigned int signature_size = signature_output->capacity - signature_output->len; int ret_val = ECDSA_sign( 0, hash->ptr, hash->len, signature_output->buffer + signature_output->len, &signature_size, libcrypto_key_pair->ec_key); signature_output->len += signature_size; return ret_val == 1 ? AWS_OP_SUCCESS : aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } static int s_verify_payload( const struct aws_ecc_key_pair *key_pair, const struct aws_byte_cursor *hash, const struct aws_byte_cursor *signature) { struct libcrypto_ecc_key *libcrypto_key_pair = key_pair->impl; return ECDSA_verify(0, hash->ptr, hash->len, signature->ptr, signature->len, libcrypto_key_pair->ec_key) == 1 ? AWS_OP_SUCCESS : aws_raise_error(AWS_ERROR_CAL_SIGNATURE_VALIDATION_FAILED); } static size_t s_signature_length(const struct aws_ecc_key_pair *key_pair) { struct libcrypto_ecc_key *libcrypto_key_pair = key_pair->impl; return ECDSA_size(libcrypto_key_pair->ec_key); } static int s_fill_in_public_key_info( struct libcrypto_ecc_key *libcrypto_key_pair, const EC_GROUP *group, const EC_POINT *pub_key_point) { BIGNUM *big_num_x = BN_new(); BIGNUM *big_num_y = BN_new(); int ret_val = AWS_OP_ERR; if (EC_POINT_get_affine_coordinates_GFp(group, pub_key_point, big_num_x, big_num_y, NULL) != 1) { aws_raise_error(AWS_ERROR_INVALID_STATE); goto clean_up; } size_t x_coor_size = BN_num_bytes(big_num_x); size_t y_coor_size = BN_num_bytes(big_num_y); if (aws_byte_buf_init(&libcrypto_key_pair->key_pair.pub_x, libcrypto_key_pair->key_pair.allocator, x_coor_size)) { goto clean_up; } if (aws_byte_buf_init(&libcrypto_key_pair->key_pair.pub_y, libcrypto_key_pair->key_pair.allocator, y_coor_size)) { goto clean_up; } BN_bn2bin(big_num_x, libcrypto_key_pair->key_pair.pub_x.buffer); BN_bn2bin(big_num_y, libcrypto_key_pair->key_pair.pub_y.buffer); libcrypto_key_pair->key_pair.pub_x.len = x_coor_size; libcrypto_key_pair->key_pair.pub_y.len = y_coor_size; ret_val = AWS_OP_SUCCESS; clean_up: BN_free(big_num_x); BN_free(big_num_y); return ret_val; } static int s_derive_public_key(struct aws_ecc_key_pair *key_pair) { struct libcrypto_ecc_key *libcrypto_key_pair = key_pair->impl; if (!libcrypto_key_pair->key_pair.priv_d.buffer) { return aws_raise_error(AWS_ERROR_INVALID_STATE); } /* we already have a public key. */ if (libcrypto_key_pair->key_pair.pub_x.len) { return AWS_OP_SUCCESS; } BIGNUM *priv_key_num = BN_bin2bn(libcrypto_key_pair->key_pair.priv_d.buffer, libcrypto_key_pair->key_pair.priv_d.len, NULL); const EC_GROUP *group = EC_KEY_get0_group(libcrypto_key_pair->ec_key); EC_POINT *point = EC_POINT_new(group); EC_POINT_mul(group, point, priv_key_num, NULL, NULL, NULL); BN_free(priv_key_num); EC_KEY_set_public_key(libcrypto_key_pair->ec_key, point); int ret_val = s_fill_in_public_key_info(libcrypto_key_pair, group, point); EC_POINT_free(point); return ret_val; } static struct aws_ecc_key_pair_vtable vtable = { .sign_message = s_sign_payload, .verify_signature = s_verify_payload, .derive_pub_key = s_derive_public_key, .signature_length = s_signature_length, .destroy = s_key_pair_destroy, }; struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_private_key_impl( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name, const struct aws_byte_cursor *priv_key) { size_t key_length = aws_ecc_key_coordinate_byte_size_from_curve_name(curve_name); if (priv_key->len != key_length) { AWS_LOGF_ERROR(AWS_LS_CAL_ECC, "Private key length does not match curve's expected length"); aws_raise_error(AWS_ERROR_CAL_INVALID_KEY_LENGTH_FOR_ALGORITHM); return NULL; } struct libcrypto_ecc_key *key_impl = aws_mem_calloc(allocator, 1, sizeof(struct libcrypto_ecc_key)); key_impl->ec_key = EC_KEY_new_by_curve_name(s_curve_name_to_nid(curve_name)); key_impl->key_pair.curve_name = curve_name; key_impl->key_pair.allocator = allocator; key_impl->key_pair.vtable = &vtable; key_impl->key_pair.impl = key_impl; aws_atomic_init_int(&key_impl->key_pair.ref_count, 1); aws_byte_buf_init_copy_from_cursor(&key_impl->key_pair.priv_d, allocator, *priv_key); BIGNUM *priv_key_num = BN_bin2bn(key_impl->key_pair.priv_d.buffer, key_impl->key_pair.priv_d.len, NULL); if (!EC_KEY_set_private_key(key_impl->ec_key, priv_key_num)) { AWS_LOGF_ERROR(AWS_LS_CAL_ECC, "Failed to set openssl private key"); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); BN_free(priv_key_num); s_key_pair_destroy(&key_impl->key_pair); return NULL; } BN_free(priv_key_num); return &key_impl->key_pair; } struct aws_ecc_key_pair *aws_ecc_key_pair_new_generate_random( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name) { struct libcrypto_ecc_key *key_impl = aws_mem_calloc(allocator, 1, sizeof(struct libcrypto_ecc_key)); key_impl->ec_key = EC_KEY_new_by_curve_name(s_curve_name_to_nid(curve_name)); key_impl->key_pair.curve_name = curve_name; key_impl->key_pair.allocator = allocator; key_impl->key_pair.vtable = &vtable; key_impl->key_pair.impl = key_impl; aws_atomic_init_int(&key_impl->key_pair.ref_count, 1); if (EC_KEY_generate_key(key_impl->ec_key) != 1) { goto error; } const EC_POINT *pub_key_point = EC_KEY_get0_public_key(key_impl->ec_key); const EC_GROUP *group = EC_KEY_get0_group(key_impl->ec_key); const BIGNUM *private_key_num = EC_KEY_get0_private_key(key_impl->ec_key); size_t priv_key_size = BN_num_bytes(private_key_num); if (aws_byte_buf_init(&key_impl->key_pair.priv_d, allocator, priv_key_size)) { goto error; } BN_bn2bin(private_key_num, key_impl->key_pair.priv_d.buffer); key_impl->key_pair.priv_d.len = priv_key_size; if (!s_fill_in_public_key_info(key_impl, group, pub_key_point)) { return &key_impl->key_pair; } error: s_key_pair_destroy(&key_impl->key_pair); return NULL; } struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_public_key_impl( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name, const struct aws_byte_cursor *public_key_x, const struct aws_byte_cursor *public_key_y) { struct libcrypto_ecc_key *key_impl = aws_mem_calloc(allocator, 1, sizeof(struct libcrypto_ecc_key)); BIGNUM *pub_x_num = NULL; BIGNUM *pub_y_num = NULL; EC_POINT *point = NULL; if (!key_impl) { return NULL; } key_impl->ec_key = EC_KEY_new_by_curve_name(s_curve_name_to_nid(curve_name)); key_impl->key_pair.curve_name = curve_name; key_impl->key_pair.allocator = allocator; key_impl->key_pair.vtable = &vtable; key_impl->key_pair.impl = key_impl; aws_atomic_init_int(&key_impl->key_pair.ref_count, 1); if (aws_byte_buf_init_copy_from_cursor(&key_impl->key_pair.pub_x, allocator, *public_key_x)) { s_key_pair_destroy(&key_impl->key_pair); return NULL; } if (aws_byte_buf_init_copy_from_cursor(&key_impl->key_pair.pub_y, allocator, *public_key_y)) { s_key_pair_destroy(&key_impl->key_pair); return NULL; } pub_x_num = BN_bin2bn(public_key_x->ptr, public_key_x->len, NULL); pub_y_num = BN_bin2bn(public_key_y->ptr, public_key_y->len, NULL); const EC_GROUP *group = EC_KEY_get0_group(key_impl->ec_key); point = EC_POINT_new(group); if (EC_POINT_set_affine_coordinates_GFp(group, point, pub_x_num, pub_y_num, NULL) != 1) { goto error; } if (EC_KEY_set_public_key(key_impl->ec_key, point) != 1) { goto error; } EC_POINT_free(point); BN_free(pub_x_num); BN_free(pub_y_num); return &key_impl->key_pair; error: if (point) { EC_POINT_free(point); } if (pub_x_num) { BN_free(pub_x_num); } if (pub_y_num) { BN_free(pub_y_num); } s_key_pair_destroy(&key_impl->key_pair); return NULL; } struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_asn1( struct aws_allocator *allocator, const struct aws_byte_cursor *encoded_keys) { struct aws_ecc_key_pair *key = NULL; struct aws_der_decoder *decoder = aws_der_decoder_new(allocator, *encoded_keys); if (!decoder) { return NULL; } struct aws_byte_cursor pub_x; struct aws_byte_cursor pub_y; struct aws_byte_cursor priv_d; enum aws_ecc_curve_name curve_name; if (aws_der_decoder_load_ecc_key_pair(decoder, &pub_x, &pub_y, &priv_d, &curve_name)) { goto error; } if (priv_d.ptr) { struct libcrypto_ecc_key *key_impl = aws_mem_calloc(allocator, 1, sizeof(struct libcrypto_ecc_key)); key_impl->key_pair.curve_name = curve_name; /* as awkward as it seems, there's not a great way to manually set the public key, so let openssl just parse * the der document manually now that we know what parts are what. */ if (!d2i_ECPrivateKey(&key_impl->ec_key, (const unsigned char **)&encoded_keys->ptr, encoded_keys->len)) { aws_mem_release(allocator, key_impl); aws_raise_error(AWS_ERROR_CAL_MISSING_REQUIRED_KEY_COMPONENT); goto error; } key_impl->key_pair.allocator = allocator; key_impl->key_pair.vtable = &vtable; key_impl->key_pair.impl = key_impl; aws_atomic_init_int(&key_impl->key_pair.ref_count, 1); key = &key_impl->key_pair; struct aws_byte_buf temp_buf; AWS_ZERO_STRUCT(temp_buf); if (pub_x.ptr) { temp_buf = aws_byte_buf_from_array(pub_x.ptr, pub_x.len); if (aws_byte_buf_init_copy(&key->pub_x, allocator, &temp_buf)) { goto error; } } if (pub_y.ptr) { temp_buf = aws_byte_buf_from_array(pub_y.ptr, pub_y.len); if (aws_byte_buf_init_copy(&key->pub_y, allocator, &temp_buf)) { goto error; } } if (priv_d.ptr) { temp_buf = aws_byte_buf_from_array(priv_d.ptr, priv_d.len); if (aws_byte_buf_init_copy(&key->priv_d, allocator, &temp_buf)) { goto error; } } } else { key = aws_ecc_key_pair_new_from_public_key(allocator, curve_name, &pub_x, &pub_y); if (!key) { goto error; } } aws_der_decoder_destroy(decoder); return key; error: aws_der_decoder_destroy(decoder); s_key_pair_destroy(key); return NULL; } aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/source/unix/opensslcrypto_hash.c000066400000000000000000000110161456575232400263440ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include static void s_destroy(struct aws_hash *hash); static int s_update(struct aws_hash *hash, const struct aws_byte_cursor *to_hash); static int s_finalize(struct aws_hash *hash, struct aws_byte_buf *output); static struct aws_hash_vtable s_md5_vtable = { .destroy = s_destroy, .update = s_update, .finalize = s_finalize, .alg_name = "MD5", .provider = "OpenSSL Compatible libcrypto", }; static struct aws_hash_vtable s_sha256_vtable = { .destroy = s_destroy, .update = s_update, .finalize = s_finalize, .alg_name = "SHA256", .provider = "OpenSSL Compatible libcrypto", }; static struct aws_hash_vtable s_sha1_vtable = { .destroy = s_destroy, .update = s_update, .finalize = s_finalize, .alg_name = "SHA1", .provider = "OpenSSL Compatible libcrypto", }; static void s_destroy(struct aws_hash *hash) { if (hash == NULL) { return; } EVP_MD_CTX *ctx = hash->impl; if (ctx != NULL) { g_aws_openssl_evp_md_ctx_table->free_fn(ctx); } aws_mem_release(hash->allocator, hash); } struct aws_hash *aws_md5_default_new(struct aws_allocator *allocator) { struct aws_hash *hash = aws_mem_acquire(allocator, sizeof(struct aws_hash)); if (!hash) { return NULL; } hash->allocator = allocator; hash->vtable = &s_md5_vtable; hash->digest_size = AWS_MD5_LEN; EVP_MD_CTX *ctx = g_aws_openssl_evp_md_ctx_table->new_fn(); hash->impl = ctx; hash->good = true; if (!hash->impl) { s_destroy(hash); aws_raise_error(AWS_ERROR_OOM); return NULL; } if (!g_aws_openssl_evp_md_ctx_table->init_ex_fn(ctx, EVP_md5(), NULL)) { s_destroy(hash); aws_raise_error(AWS_ERROR_UNKNOWN); return NULL; } return hash; } struct aws_hash *aws_sha256_default_new(struct aws_allocator *allocator) { struct aws_hash *hash = aws_mem_acquire(allocator, sizeof(struct aws_hash)); if (!hash) { return NULL; } hash->allocator = allocator; hash->vtable = &s_sha256_vtable; hash->digest_size = AWS_SHA256_LEN; EVP_MD_CTX *ctx = g_aws_openssl_evp_md_ctx_table->new_fn(); hash->impl = ctx; hash->good = true; if (!hash->impl) { s_destroy(hash); aws_raise_error(AWS_ERROR_OOM); return NULL; } if (!g_aws_openssl_evp_md_ctx_table->init_ex_fn(ctx, EVP_sha256(), NULL)) { s_destroy(hash); aws_raise_error(AWS_ERROR_UNKNOWN); return NULL; } return hash; } struct aws_hash *aws_sha1_default_new(struct aws_allocator *allocator) { struct aws_hash *hash = aws_mem_acquire(allocator, sizeof(struct aws_hash)); if (!hash) { return NULL; } hash->allocator = allocator; hash->vtable = &s_sha1_vtable; hash->digest_size = AWS_SHA1_LEN; EVP_MD_CTX *ctx = g_aws_openssl_evp_md_ctx_table->new_fn(); hash->impl = ctx; hash->good = true; if (!hash->impl) { s_destroy(hash); aws_raise_error(AWS_ERROR_OOM); return NULL; } if (!g_aws_openssl_evp_md_ctx_table->init_ex_fn(ctx, EVP_sha1(), NULL)) { s_destroy(hash); aws_raise_error(AWS_ERROR_UNKNOWN); return NULL; } return hash; } static int s_update(struct aws_hash *hash, const struct aws_byte_cursor *to_hash) { if (!hash->good) { return aws_raise_error(AWS_ERROR_INVALID_STATE); } EVP_MD_CTX *ctx = hash->impl; if (AWS_LIKELY(g_aws_openssl_evp_md_ctx_table->update_fn(ctx, to_hash->ptr, to_hash->len))) { return AWS_OP_SUCCESS; } hash->good = false; return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } static int s_finalize(struct aws_hash *hash, struct aws_byte_buf *output) { if (!hash->good) { return aws_raise_error(AWS_ERROR_INVALID_STATE); } EVP_MD_CTX *ctx = hash->impl; size_t buffer_len = output->capacity - output->len; if (buffer_len < hash->digest_size) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } if (AWS_LIKELY(g_aws_openssl_evp_md_ctx_table->final_ex_fn( ctx, output->buffer + output->len, (unsigned int *)&buffer_len))) { output->len += hash->digest_size; hash->good = false; return AWS_OP_SUCCESS; } hash->good = false; return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/source/unix/opensslcrypto_hmac.c000066400000000000000000000062631456575232400263410ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include static void s_destroy(struct aws_hmac *hmac); static int s_update(struct aws_hmac *hmac, const struct aws_byte_cursor *to_hmac); static int s_finalize(struct aws_hmac *hmac, struct aws_byte_buf *output); static struct aws_hmac_vtable s_sha256_hmac_vtable = { .destroy = s_destroy, .update = s_update, .finalize = s_finalize, .alg_name = "SHA256 HMAC", .provider = "OpenSSL Compatible libcrypto", }; static void s_destroy(struct aws_hmac *hmac) { if (hmac == NULL) { return; } HMAC_CTX *ctx = hmac->impl; if (ctx != NULL) { g_aws_openssl_hmac_ctx_table->free_fn(ctx); } aws_mem_release(hmac->allocator, hmac); } /* typedef struct hmac_ctx_st { const EVP_MD *md; EVP_MD_CTX md_ctx; EVP_MD_CTX i_ctx; EVP_MD_CTX o_ctx; unsigned int key_length; unsigned char key[HMAC_MAX_MD_CBLOCK]; } HMAC_CTX; */ #define SIZEOF_OPENSSL_HMAC_CTX 300 /* <= 288 on 64 bit systems with openssl 1.0.* */ struct aws_hmac *aws_sha256_hmac_default_new(struct aws_allocator *allocator, const struct aws_byte_cursor *secret) { AWS_ASSERT(secret->ptr); struct aws_hmac *hmac = aws_mem_acquire(allocator, sizeof(struct aws_hmac)); if (!hmac) { return NULL; } hmac->allocator = allocator; hmac->vtable = &s_sha256_hmac_vtable; hmac->digest_size = AWS_SHA256_HMAC_LEN; HMAC_CTX *ctx = NULL; ctx = g_aws_openssl_hmac_ctx_table->new_fn(); if (!ctx) { aws_raise_error(AWS_ERROR_OOM); aws_mem_release(allocator, hmac); return NULL; } g_aws_openssl_hmac_ctx_table->init_fn(ctx); hmac->impl = ctx; hmac->good = true; if (!g_aws_openssl_hmac_ctx_table->init_ex_fn(ctx, secret->ptr, secret->len, EVP_sha256(), NULL)) { s_destroy(hmac); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } return hmac; } static int s_update(struct aws_hmac *hmac, const struct aws_byte_cursor *to_hmac) { if (!hmac->good) { return aws_raise_error(AWS_ERROR_INVALID_STATE); } HMAC_CTX *ctx = hmac->impl; if (AWS_LIKELY(g_aws_openssl_hmac_ctx_table->update_fn(ctx, to_hmac->ptr, to_hmac->len))) { return AWS_OP_SUCCESS; } hmac->good = false; return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } static int s_finalize(struct aws_hmac *hmac, struct aws_byte_buf *output) { if (!hmac->good) { return aws_raise_error(AWS_ERROR_INVALID_STATE); } HMAC_CTX *ctx = hmac->impl; size_t buffer_len = output->capacity - output->len; if (buffer_len < hmac->digest_size) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } if (AWS_LIKELY( g_aws_openssl_hmac_ctx_table->final_fn(ctx, output->buffer + output->len, (unsigned int *)&buffer_len))) { hmac->good = false; output->len += hmac->digest_size; return AWS_OP_SUCCESS; } hmac->good = false; return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/source/windows/000077500000000000000000000000001456575232400227615ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/source/windows/bcrypt_aes.c000066400000000000000000001247401456575232400252700ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include /* keep the space to prevent formatters from reordering this with the Windows.h header. */ #include #define NT_SUCCESS(status) ((NTSTATUS)status >= 0) /* handles for AES modes and algorithms we'll be using. These are initialized once and allowed to leak. */ static aws_thread_once s_aes_thread_once = AWS_THREAD_ONCE_STATIC_INIT; static BCRYPT_ALG_HANDLE s_aes_cbc_algorithm_handle = NULL; static BCRYPT_ALG_HANDLE s_aes_gcm_algorithm_handle = NULL; static BCRYPT_ALG_HANDLE s_aes_ctr_algorithm_handle = NULL; static BCRYPT_ALG_HANDLE s_aes_keywrap_algorithm_handle = NULL; struct aes_bcrypt_cipher { struct aws_symmetric_cipher cipher; BCRYPT_ALG_HANDLE alg_handle; /* the loaded key handle. */ BCRYPT_KEY_HANDLE key_handle; /* Used for GCM mode to store IV, tag, and aad */ BCRYPT_AUTHENTICATED_CIPHER_MODE_INFO *auth_info_ptr; /* Updated on the fly for things like constant-time CBC padding and GCM hash chaining */ DWORD cipher_flags; /* For things to work, they have to be in 16 byte chunks in several scenarios. Use this Buffer for storing excess bytes until we have 16 bytes to operate on. */ struct aws_byte_buf overflow; /* This gets updated as the algorithms run so it isn't the original IV. That's why its separate */ struct aws_byte_buf working_iv; /* A buffer to keep around for the GMAC for GCM. */ struct aws_byte_buf working_mac_buffer; }; static void s_load_alg_handles(void *user_data) { (void)user_data; /* this function is incredibly slow, LET IT LEAK*/ NTSTATUS status = BCryptOpenAlgorithmProvider(&s_aes_cbc_algorithm_handle, BCRYPT_AES_ALGORITHM, NULL, 0); AWS_FATAL_ASSERT(s_aes_cbc_algorithm_handle && "BCryptOpenAlgorithmProvider() failed"); status = BCryptSetProperty( s_aes_cbc_algorithm_handle, BCRYPT_CHAINING_MODE, (PUCHAR)BCRYPT_CHAIN_MODE_CBC, (ULONG)(wcslen(BCRYPT_CHAIN_MODE_CBC) + 1), 0); AWS_FATAL_ASSERT(NT_SUCCESS(status) && "BCryptSetProperty for CBC chaining mode failed"); /* Set up GCM algorithm */ status = BCryptOpenAlgorithmProvider(&s_aes_gcm_algorithm_handle, BCRYPT_AES_ALGORITHM, NULL, 0); AWS_FATAL_ASSERT(s_aes_gcm_algorithm_handle && "BCryptOpenAlgorithmProvider() failed"); status = BCryptSetProperty( s_aes_gcm_algorithm_handle, BCRYPT_CHAINING_MODE, (PUCHAR)BCRYPT_CHAIN_MODE_GCM, (ULONG)(wcslen(BCRYPT_CHAIN_MODE_GCM) + 1), 0); AWS_FATAL_ASSERT(NT_SUCCESS(status) && "BCryptSetProperty for GCM chaining mode failed"); /* Setup CTR algorithm */ status = BCryptOpenAlgorithmProvider(&s_aes_ctr_algorithm_handle, BCRYPT_AES_ALGORITHM, NULL, 0); AWS_FATAL_ASSERT(s_aes_ctr_algorithm_handle && "BCryptOpenAlgorithmProvider() failed"); /* This is ECB because windows doesn't do CTR mode for you. Instead we use ECB and XOR the encrypted IV and data to operate on for each block. */ status = BCryptSetProperty( s_aes_ctr_algorithm_handle, BCRYPT_CHAINING_MODE, (PUCHAR)BCRYPT_CHAIN_MODE_ECB, (ULONG)(wcslen(BCRYPT_CHAIN_MODE_ECB) + 1), 0); AWS_FATAL_ASSERT(NT_SUCCESS(status) && "BCryptSetProperty for ECB chaining mode failed"); /* Setup KEYWRAP algorithm */ status = BCryptOpenAlgorithmProvider(&s_aes_keywrap_algorithm_handle, BCRYPT_AES_ALGORITHM, NULL, 0); AWS_FATAL_ASSERT(s_aes_ctr_algorithm_handle && "BCryptOpenAlgorithmProvider() failed"); AWS_FATAL_ASSERT(NT_SUCCESS(status) && "BCryptSetProperty for KeyWrap failed"); } static BCRYPT_KEY_HANDLE s_import_key_blob( BCRYPT_ALG_HANDLE algHandle, struct aws_allocator *allocator, struct aws_byte_buf *key) { NTSTATUS status = 0; BCRYPT_KEY_DATA_BLOB_HEADER key_data; key_data.dwMagic = BCRYPT_KEY_DATA_BLOB_MAGIC; key_data.dwVersion = BCRYPT_KEY_DATA_BLOB_VERSION1; key_data.cbKeyData = (ULONG)key->len; struct aws_byte_buf key_data_buf; aws_byte_buf_init(&key_data_buf, allocator, sizeof(key_data) + key->len); aws_byte_buf_write(&key_data_buf, (const uint8_t *)&key_data, sizeof(key_data)); aws_byte_buf_write(&key_data_buf, key->buffer, key->len); BCRYPT_KEY_HANDLE key_handle; status = BCryptImportKey( algHandle, NULL, BCRYPT_KEY_DATA_BLOB, &key_handle, NULL, 0, key_data_buf.buffer, (ULONG)key_data_buf.len, 0); aws_byte_buf_clean_up_secure(&key_data_buf); if (!NT_SUCCESS(status)) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } return key_handle; } static void s_aes_default_destroy(struct aws_symmetric_cipher *cipher) { struct aes_bcrypt_cipher *cipher_impl = cipher->impl; aws_byte_buf_clean_up_secure(&cipher->key); aws_byte_buf_clean_up_secure(&cipher->iv); aws_byte_buf_clean_up_secure(&cipher->tag); aws_byte_buf_clean_up_secure(&cipher->aad); /* clean_up_secure exists in versions of aws-c-common that don't check that the buffer has a buffer and an allocator before freeing the memory. Instead, check here. If it's set the buffer was owned and needs to be cleaned up, otherwise it can just be dropped as it was an alias.*/ if (cipher_impl->working_iv.allocator) { aws_byte_buf_clean_up_secure(&cipher_impl->working_iv); } aws_byte_buf_clean_up_secure(&cipher_impl->overflow); aws_byte_buf_clean_up_secure(&cipher_impl->working_mac_buffer); if (cipher_impl->key_handle) { BCryptDestroyKey(cipher_impl->key_handle); cipher_impl->key_handle = NULL; } if (cipher_impl->auth_info_ptr) { aws_mem_release(cipher->allocator, cipher_impl->auth_info_ptr); cipher_impl->auth_info_ptr = NULL; } aws_mem_release(cipher->allocator, cipher_impl); } /* just a utility function for setting up windows Ciphers and keys etc.... Handles copying key/iv etc... data to the right buffers and then setting them on the windows handles used for the encryption operations. */ static int s_initialize_cipher_materials( struct aes_bcrypt_cipher *cipher, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv, const struct aws_byte_cursor *tag, const struct aws_byte_cursor *aad, size_t iv_size, bool is_ctr_mode, bool is_gcm) { if (!cipher->cipher.key.len) { if (key) { aws_byte_buf_init_copy_from_cursor(&cipher->cipher.key, cipher->cipher.allocator, *key); } else { aws_byte_buf_init(&cipher->cipher.key, cipher->cipher.allocator, AWS_AES_256_KEY_BYTE_LEN); aws_symmetric_cipher_generate_key(AWS_AES_256_KEY_BYTE_LEN, &cipher->cipher.key); } } if (!cipher->cipher.iv.len && iv_size) { if (iv) { aws_byte_buf_init_copy_from_cursor(&cipher->cipher.iv, cipher->cipher.allocator, *iv); } else { aws_byte_buf_init(&cipher->cipher.iv, cipher->cipher.allocator, iv_size); aws_symmetric_cipher_generate_initialization_vector(iv_size, is_ctr_mode, &cipher->cipher.iv); } } /* these fields are only used in GCM mode. */ if (is_gcm) { if (!cipher->cipher.tag.len) { if (tag) { aws_byte_buf_init_copy_from_cursor(&cipher->cipher.tag, cipher->cipher.allocator, *tag); } else { aws_byte_buf_init(&cipher->cipher.tag, cipher->cipher.allocator, AWS_AES_256_CIPHER_BLOCK_SIZE); aws_byte_buf_secure_zero(&cipher->cipher.tag); /* windows handles this, just go ahead and tell the API it's got a length. */ cipher->cipher.tag.len = AWS_AES_256_CIPHER_BLOCK_SIZE; } } if (!cipher->cipher.aad.len) { if (aad) { aws_byte_buf_init_copy_from_cursor(&cipher->cipher.aad, cipher->cipher.allocator, *aad); } } if (!cipher->working_mac_buffer.len) { aws_byte_buf_init(&cipher->working_mac_buffer, cipher->cipher.allocator, AWS_AES_256_CIPHER_BLOCK_SIZE); aws_byte_buf_secure_zero(&cipher->working_mac_buffer); /* windows handles this, just go ahead and tell the API it's got a length. */ cipher->working_mac_buffer.len = AWS_AES_256_CIPHER_BLOCK_SIZE; } } cipher->key_handle = s_import_key_blob(cipher->alg_handle, cipher->cipher.allocator, &cipher->cipher.key); if (!cipher->key_handle) { cipher->cipher.good = false; return AWS_OP_ERR; } cipher->cipher_flags = 0; /* In GCM mode, the IV is set on the auth info pointer and a working copy is passed to each encryt call. CBC and CTR mode function differently here and the IV is set on the key itself. */ if (!is_gcm && cipher->cipher.iv.len) { NTSTATUS status = BCryptSetProperty( cipher->key_handle, BCRYPT_INITIALIZATION_VECTOR, cipher->cipher.iv.buffer, (ULONG)cipher->cipher.iv.len, 0); if (!NT_SUCCESS(status)) { cipher->cipher.good = false; return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } } else if (is_gcm) { cipher->auth_info_ptr = aws_mem_acquire(cipher->cipher.allocator, sizeof(BCRYPT_AUTHENTICATED_CIPHER_MODE_INFO)); /* Create a new authenticated cipher mode info object for GCM mode */ BCRYPT_INIT_AUTH_MODE_INFO(*cipher->auth_info_ptr); cipher->auth_info_ptr->pbNonce = cipher->cipher.iv.buffer; cipher->auth_info_ptr->cbNonce = (ULONG)cipher->cipher.iv.len; cipher->auth_info_ptr->dwFlags = BCRYPT_AUTH_MODE_CHAIN_CALLS_FLAG; cipher->auth_info_ptr->pbTag = cipher->cipher.tag.buffer; cipher->auth_info_ptr->cbTag = (ULONG)cipher->cipher.tag.len; cipher->auth_info_ptr->pbMacContext = cipher->working_mac_buffer.buffer; cipher->auth_info_ptr->cbMacContext = (ULONG)cipher->working_mac_buffer.len; if (cipher->cipher.aad.len) { cipher->auth_info_ptr->pbAuthData = (PUCHAR)cipher->cipher.aad.buffer; cipher->auth_info_ptr->cbAuthData = (ULONG)cipher->cipher.aad.len; } } return AWS_OP_SUCCESS; } /* Free up as few resources as possible so we can quickly reuse the cipher. */ static void s_clear_reusable_components(struct aws_symmetric_cipher *cipher) { struct aes_bcrypt_cipher *cipher_impl = cipher->impl; bool working_iv_optimized = cipher->iv.buffer == cipher_impl->working_iv.buffer; if (!working_iv_optimized) { aws_byte_buf_secure_zero(&cipher_impl->working_iv); } /* These can't always be reused in the next operation, so go ahead and destroy it and create another. */ if (cipher_impl->key_handle) { BCryptDestroyKey(cipher_impl->key_handle); cipher_impl->key_handle = NULL; } if (cipher_impl->auth_info_ptr) { aws_mem_release(cipher->allocator, cipher_impl->auth_info_ptr); cipher_impl->auth_info_ptr = NULL; } aws_byte_buf_secure_zero(&cipher_impl->overflow); aws_byte_buf_secure_zero(&cipher_impl->working_mac_buffer); /* windows handles this, just go ahead and tell the API it's got a length. */ cipher_impl->working_mac_buffer.len = AWS_AES_256_CIPHER_BLOCK_SIZE; } static int s_reset_cbc_cipher(struct aws_symmetric_cipher *cipher) { struct aes_bcrypt_cipher *cipher_impl = cipher->impl; s_clear_reusable_components(cipher); return s_initialize_cipher_materials( cipher_impl, NULL, NULL, NULL, NULL, AWS_AES_256_CIPHER_BLOCK_SIZE, false, false); } static int s_reset_ctr_cipher(struct aws_symmetric_cipher *cipher) { struct aes_bcrypt_cipher *cipher_impl = cipher->impl; s_clear_reusable_components(cipher); struct aws_byte_cursor iv_cur = aws_byte_cursor_from_buf(&cipher->iv); /* reset the working iv back to the original IV. We do this because we're manually maintaining the counter. */ aws_byte_buf_append_dynamic(&cipher_impl->working_iv, &iv_cur); return s_initialize_cipher_materials( cipher_impl, NULL, NULL, NULL, NULL, AWS_AES_256_CIPHER_BLOCK_SIZE, true, false); } static int s_reset_gcm_cipher(struct aws_symmetric_cipher *cipher) { struct aes_bcrypt_cipher *cipher_impl = cipher->impl; s_clear_reusable_components(cipher); return s_initialize_cipher_materials( cipher_impl, NULL, NULL, NULL, NULL, AWS_AES_256_CIPHER_BLOCK_SIZE - 4, false, true); } static int s_aes_default_encrypt( struct aws_symmetric_cipher *cipher, const struct aws_byte_cursor *to_encrypt, struct aws_byte_buf *out) { struct aes_bcrypt_cipher *cipher_impl = cipher->impl; if (to_encrypt->len == 0) { return AWS_OP_SUCCESS; } size_t predicted_write_length = cipher_impl->cipher_flags & BCRYPT_BLOCK_PADDING ? to_encrypt->len + (AWS_AES_256_CIPHER_BLOCK_SIZE - (to_encrypt->len % AWS_AES_256_CIPHER_BLOCK_SIZE)) : to_encrypt->len; ULONG length_written = (ULONG)(predicted_write_length); if (aws_symmetric_cipher_try_ensure_sufficient_buffer_space(out, predicted_write_length)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } PUCHAR iv = NULL; ULONG iv_size = 0; if (cipher_impl->auth_info_ptr) { iv = cipher_impl->working_iv.buffer; /* this is looking for buffer size, and the working_iv has only been written to by windows the GCM case. * So use capacity rather than length */ iv_size = (ULONG)cipher_impl->working_iv.capacity; } /* iv was set on the key itself, so we don't need to pass it here. */ NTSTATUS status = BCryptEncrypt( cipher_impl->key_handle, to_encrypt->ptr, (ULONG)to_encrypt->len, cipher_impl->auth_info_ptr, iv, iv_size, out->buffer + out->len, (ULONG)(out->capacity - out->len), &length_written, cipher_impl->cipher_flags); if (!NT_SUCCESS(status)) { cipher->good = false; return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } out->len += length_written; return AWS_OP_SUCCESS; } /* manages making sure encryption operations can operate on 16 byte blocks. Stores the excess in the overflow buffer and moves stuff around each time to make sure everything is in order. */ static struct aws_byte_buf s_fill_in_overflow( struct aws_symmetric_cipher *cipher, const struct aws_byte_cursor *to_operate) { struct aes_bcrypt_cipher *cipher_impl = cipher->impl; static const size_t RESERVE_SIZE = AWS_AES_256_CIPHER_BLOCK_SIZE * 2; cipher_impl->cipher_flags = 0; struct aws_byte_buf final_to_operate_on; AWS_ZERO_STRUCT(final_to_operate_on); if (cipher_impl->overflow.len > 0) { aws_byte_buf_init_copy(&final_to_operate_on, cipher->allocator, &cipher_impl->overflow); aws_byte_buf_append_dynamic(&final_to_operate_on, to_operate); aws_byte_buf_secure_zero(&cipher_impl->overflow); } else { aws_byte_buf_init_copy_from_cursor(&final_to_operate_on, cipher->allocator, *to_operate); } size_t overflow = final_to_operate_on.len % RESERVE_SIZE; if (final_to_operate_on.len > RESERVE_SIZE) { size_t offset = overflow == 0 ? RESERVE_SIZE : overflow; struct aws_byte_cursor slice_for_overflow = aws_byte_cursor_from_buf(&final_to_operate_on); aws_byte_cursor_advance(&slice_for_overflow, final_to_operate_on.len - offset); aws_byte_buf_append_dynamic(&cipher_impl->overflow, &slice_for_overflow); final_to_operate_on.len -= offset; } else { struct aws_byte_cursor final_cur = aws_byte_cursor_from_buf(&final_to_operate_on); aws_byte_buf_append_dynamic(&cipher_impl->overflow, &final_cur); aws_byte_buf_clean_up_secure(&final_to_operate_on); } return final_to_operate_on; } static int s_aes_cbc_encrypt( struct aws_symmetric_cipher *cipher, struct aws_byte_cursor to_encrypt, struct aws_byte_buf *out) { struct aws_byte_buf final_to_encrypt = s_fill_in_overflow(cipher, &to_encrypt); struct aws_byte_cursor final_cur = aws_byte_cursor_from_buf(&final_to_encrypt); int ret_val = s_aes_default_encrypt(cipher, &final_cur, out); aws_byte_buf_clean_up_secure(&final_to_encrypt); return ret_val; } static int s_aes_cbc_finalize_encryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) { struct aes_bcrypt_cipher *cipher_impl = cipher->impl; if (cipher->good && cipher_impl->overflow.len > 0) { cipher_impl->cipher_flags = BCRYPT_BLOCK_PADDING; /* take the rest of the overflow and turn padding on so the remainder is properly padded without timing attack vulnerabilities. */ struct aws_byte_cursor remaining_cur = aws_byte_cursor_from_buf(&cipher_impl->overflow); int ret_val = s_aes_default_encrypt(cipher, &remaining_cur, out); aws_byte_buf_secure_zero(&cipher_impl->overflow); return ret_val; } return AWS_OP_SUCCESS; } static int s_default_aes_decrypt( struct aws_symmetric_cipher *cipher, const struct aws_byte_cursor *to_decrypt, struct aws_byte_buf *out) { struct aes_bcrypt_cipher *cipher_impl = cipher->impl; if (to_decrypt->len == 0) { return AWS_OP_SUCCESS; } PUCHAR iv = NULL; ULONG iv_size = 0; if (cipher_impl->auth_info_ptr) { iv = cipher_impl->working_iv.buffer; /* this is looking for buffer size, and the working_iv has only been written to by windows the GCM case. * So use capacity rather than length */ iv_size = (ULONG)cipher_impl->working_iv.capacity; } size_t predicted_write_length = to_decrypt->len; ULONG length_written = (ULONG)(predicted_write_length); if (aws_symmetric_cipher_try_ensure_sufficient_buffer_space(out, predicted_write_length)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } /* iv was set on the key itself, so we don't need to pass it here. */ NTSTATUS status = BCryptDecrypt( cipher_impl->key_handle, to_decrypt->ptr, (ULONG)to_decrypt->len, cipher_impl->auth_info_ptr, iv, iv_size, out->buffer + out->len, (ULONG)(out->capacity - out->len), &length_written, cipher_impl->cipher_flags); if (!NT_SUCCESS(status)) { cipher->good = false; return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } out->len += length_written; return AWS_OP_SUCCESS; } static int s_aes_cbc_decrypt( struct aws_symmetric_cipher *cipher, struct aws_byte_cursor to_decrypt, struct aws_byte_buf *out) { struct aws_byte_buf final_to_decrypt = s_fill_in_overflow(cipher, &to_decrypt); struct aws_byte_cursor final_cur = aws_byte_cursor_from_buf(&final_to_decrypt); int ret_val = s_default_aes_decrypt(cipher, &final_cur, out); aws_byte_buf_clean_up_secure(&final_to_decrypt); return ret_val; } static int s_aes_cbc_finalize_decryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) { struct aes_bcrypt_cipher *cipher_impl = cipher->impl; if (cipher->good && cipher_impl->overflow.len > 0) { cipher_impl->cipher_flags = BCRYPT_BLOCK_PADDING; /* take the rest of the overflow and turn padding on so the remainder is properly padded without timing attack vulnerabilities. */ struct aws_byte_cursor remaining_cur = aws_byte_cursor_from_buf(&cipher_impl->overflow); int ret_val = s_default_aes_decrypt(cipher, &remaining_cur, out); aws_byte_buf_secure_zero(&cipher_impl->overflow); return ret_val; } return AWS_OP_SUCCESS; } static struct aws_symmetric_cipher_vtable s_aes_cbc_vtable = { .alg_name = "AES-CBC 256", .provider = "Windows CNG", .decrypt = s_aes_cbc_decrypt, .encrypt = s_aes_cbc_encrypt, .finalize_encryption = s_aes_cbc_finalize_encryption, .finalize_decryption = s_aes_cbc_finalize_decryption, .destroy = s_aes_default_destroy, .reset = s_reset_cbc_cipher, }; struct aws_symmetric_cipher *aws_aes_cbc_256_new_impl( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv) { aws_thread_call_once(&s_aes_thread_once, s_load_alg_handles, NULL); struct aes_bcrypt_cipher *cipher = aws_mem_calloc(allocator, 1, sizeof(struct aes_bcrypt_cipher)); cipher->cipher.allocator = allocator; cipher->cipher.block_size = AWS_AES_256_CIPHER_BLOCK_SIZE; cipher->cipher.key_length_bits = AWS_AES_256_KEY_BIT_LEN; cipher->alg_handle = s_aes_cbc_algorithm_handle; cipher->cipher.vtable = &s_aes_cbc_vtable; if (s_initialize_cipher_materials(cipher, key, iv, NULL, NULL, AWS_AES_256_CIPHER_BLOCK_SIZE, false, false) != AWS_OP_SUCCESS) { goto error; } aws_byte_buf_init(&cipher->overflow, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE * 2); cipher->working_iv = cipher->cipher.iv; /* make sure the cleanup doesn't do anything. */ cipher->working_iv.allocator = NULL; cipher->cipher.impl = cipher; cipher->cipher.good = true; return &cipher->cipher; error: return NULL; } /* the buffer management for this mode is a good deal easier because we don't care about padding. We do care about keeping the final buffer less than a block size til the finalize call so we can turn the auth chaining flag off and compute the GMAC correctly. */ static int s_aes_gcm_encrypt( struct aws_symmetric_cipher *cipher, struct aws_byte_cursor to_encrypt, struct aws_byte_buf *out) { struct aes_bcrypt_cipher *cipher_impl = cipher->impl; if (to_encrypt.len == 0) { return AWS_OP_SUCCESS; } struct aws_byte_buf working_buffer; AWS_ZERO_STRUCT(working_buffer); /* If there's overflow, prepend it to the working buffer, then append the data to encrypt */ if (cipher_impl->overflow.len) { struct aws_byte_cursor overflow_cur = aws_byte_cursor_from_buf(&cipher_impl->overflow); aws_byte_buf_init_copy_from_cursor(&working_buffer, cipher->allocator, overflow_cur); aws_byte_buf_reset(&cipher_impl->overflow, true); aws_byte_buf_append_dynamic(&working_buffer, &to_encrypt); } else { aws_byte_buf_init_copy_from_cursor(&working_buffer, cipher->allocator, to_encrypt); } int ret_val = AWS_OP_ERR; /* whatever is remaining in an incomplete block, copy it to the overflow. If we don't have a full block wait til next time or for the finalize call. */ if (working_buffer.len > AWS_AES_256_CIPHER_BLOCK_SIZE) { size_t offset = working_buffer.len % AWS_AES_256_CIPHER_BLOCK_SIZE; size_t seek_to = working_buffer.len - (AWS_AES_256_CIPHER_BLOCK_SIZE + offset); struct aws_byte_cursor working_buf_cur = aws_byte_cursor_from_buf(&working_buffer); struct aws_byte_cursor working_slice = aws_byte_cursor_advance(&working_buf_cur, seek_to); /* this is just here to make it obvious. The previous line advanced working_buf_cur to where the new overfloew should be. */ struct aws_byte_cursor new_overflow_cur = working_buf_cur; aws_byte_buf_append_dynamic(&cipher_impl->overflow, &new_overflow_cur); ret_val = s_aes_default_encrypt(cipher, &working_slice, out); } else { struct aws_byte_cursor working_buffer_cur = aws_byte_cursor_from_buf(&working_buffer); aws_byte_buf_append_dynamic(&cipher_impl->overflow, &working_buffer_cur); ret_val = AWS_OP_SUCCESS; } aws_byte_buf_clean_up_secure(&working_buffer); return ret_val; } static int s_aes_gcm_decrypt( struct aws_symmetric_cipher *cipher, struct aws_byte_cursor to_decrypt, struct aws_byte_buf *out) { struct aes_bcrypt_cipher *cipher_impl = cipher->impl; if (to_decrypt.len == 0) { return AWS_OP_SUCCESS; } struct aws_byte_buf working_buffer; AWS_ZERO_STRUCT(working_buffer); /* If there's overflow, prepend it to the working buffer, then append the data to encrypt */ if (cipher_impl->overflow.len) { struct aws_byte_cursor overflow_cur = aws_byte_cursor_from_buf(&cipher_impl->overflow); aws_byte_buf_init_copy_from_cursor(&working_buffer, cipher->allocator, overflow_cur); aws_byte_buf_reset(&cipher_impl->overflow, true); aws_byte_buf_append_dynamic(&working_buffer, &to_decrypt); } else { aws_byte_buf_init_copy_from_cursor(&working_buffer, cipher->allocator, to_decrypt); } int ret_val = AWS_OP_ERR; /* whatever is remaining in an incomplete block, copy it to the overflow. If we don't have a full block wait til next time or for the finalize call. */ if (working_buffer.len > AWS_AES_256_CIPHER_BLOCK_SIZE) { size_t offset = working_buffer.len % AWS_AES_256_CIPHER_BLOCK_SIZE; size_t seek_to = working_buffer.len - (AWS_AES_256_CIPHER_BLOCK_SIZE + offset); struct aws_byte_cursor working_buf_cur = aws_byte_cursor_from_buf(&working_buffer); struct aws_byte_cursor working_slice = aws_byte_cursor_advance(&working_buf_cur, seek_to); /* this is just here to make it obvious. The previous line advanced working_buf_cur to where the new overfloew should be. */ struct aws_byte_cursor new_overflow_cur = working_buf_cur; aws_byte_buf_append_dynamic(&cipher_impl->overflow, &new_overflow_cur); ret_val = s_default_aes_decrypt(cipher, &working_slice, out); } else { struct aws_byte_cursor working_buffer_cur = aws_byte_cursor_from_buf(&working_buffer); aws_byte_buf_append_dynamic(&cipher_impl->overflow, &working_buffer_cur); ret_val = AWS_OP_SUCCESS; } aws_byte_buf_clean_up_secure(&working_buffer); return ret_val; } static int s_aes_gcm_finalize_encryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) { struct aes_bcrypt_cipher *cipher_impl = cipher->impl; cipher_impl->auth_info_ptr->dwFlags &= ~BCRYPT_AUTH_MODE_CHAIN_CALLS_FLAG; /* take whatever is remaining, make the final encrypt call with the auth chain flag turned off. */ struct aws_byte_cursor remaining_cur = aws_byte_cursor_from_buf(&cipher_impl->overflow); int ret_val = s_aes_default_encrypt(cipher, &remaining_cur, out); aws_byte_buf_secure_zero(&cipher_impl->overflow); aws_byte_buf_secure_zero(&cipher_impl->working_iv); return ret_val; } static int s_aes_gcm_finalize_decryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) { struct aes_bcrypt_cipher *cipher_impl = cipher->impl; cipher_impl->auth_info_ptr->dwFlags &= ~BCRYPT_AUTH_MODE_CHAIN_CALLS_FLAG; /* take whatever is remaining, make the final decrypt call with the auth chain flag turned off. */ struct aws_byte_cursor remaining_cur = aws_byte_cursor_from_buf(&cipher_impl->overflow); int ret_val = s_default_aes_decrypt(cipher, &remaining_cur, out); aws_byte_buf_secure_zero(&cipher_impl->overflow); aws_byte_buf_secure_zero(&cipher_impl->working_iv); return ret_val; } static struct aws_symmetric_cipher_vtable s_aes_gcm_vtable = { .alg_name = "AES-GCM 256", .provider = "Windows CNG", .decrypt = s_aes_gcm_decrypt, .encrypt = s_aes_gcm_encrypt, .finalize_encryption = s_aes_gcm_finalize_encryption, .finalize_decryption = s_aes_gcm_finalize_decryption, .destroy = s_aes_default_destroy, .reset = s_reset_gcm_cipher, }; struct aws_symmetric_cipher *aws_aes_gcm_256_new_impl( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv, const struct aws_byte_cursor *aad, const struct aws_byte_cursor *decryption_tag) { aws_thread_call_once(&s_aes_thread_once, s_load_alg_handles, NULL); struct aes_bcrypt_cipher *cipher = aws_mem_calloc(allocator, 1, sizeof(struct aes_bcrypt_cipher)); cipher->cipher.allocator = allocator; cipher->cipher.block_size = AWS_AES_256_CIPHER_BLOCK_SIZE; cipher->cipher.key_length_bits = AWS_AES_256_KEY_BIT_LEN; cipher->alg_handle = s_aes_gcm_algorithm_handle; cipher->cipher.vtable = &s_aes_gcm_vtable; /* GCM does the counting under the hood, so we let it handle the final 4 bytes of the IV. */ if (s_initialize_cipher_materials( cipher, key, iv, decryption_tag, aad, AWS_AES_256_CIPHER_BLOCK_SIZE - 4, false, true) != AWS_OP_SUCCESS) { goto error; } aws_byte_buf_init(&cipher->overflow, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE * 2); aws_byte_buf_init(&cipher->working_iv, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE); aws_byte_buf_secure_zero(&cipher->working_iv); cipher->cipher.impl = cipher; cipher->cipher.good = true; return &cipher->cipher; error: if (cipher != NULL) { s_aes_default_destroy(&cipher->cipher); } return NULL; } /* Take a and b, XOR them and store it in dest. Notice the XOR is done up to the length of the smallest input. If there's a bug in here, it's being hit inside the finalize call when there's an input stream that isn't an even multiple of 16. */ static int s_xor_cursors(const struct aws_byte_cursor *a, const struct aws_byte_cursor *b, struct aws_byte_buf *dest) { size_t min_size = aws_min_size(b->len, a->len); if (aws_symmetric_cipher_try_ensure_sufficient_buffer_space(dest, min_size)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } /* If the profiler is saying this is slow, SIMD the loop below. */ uint8_t *array_ref = dest->buffer + dest->len; for (size_t i = 0; i < min_size; ++i) { array_ref[i] = a->ptr[i] ^ b->ptr[i]; } dest->len += min_size; return AWS_OP_SUCCESS; } /* There is no CTR mode on windows. Instead, we use AES ECB to encrypt the IV a block at a time. That value is then XOR'd with the to_encrypt cursor and appended to out. The counter then needs to be incremented by 1 for the next call. This has to be done a block at a time, so we slice to_encrypt into a cursor per block and do this process for each block. Also notice that CTR mode is symmetric for encryption and decryption (encrypt and decrypt are the same thing). */ static int s_aes_ctr_encrypt( struct aws_symmetric_cipher *cipher, struct aws_byte_cursor to_encrypt, struct aws_byte_buf *out) { struct aes_bcrypt_cipher *cipher_impl = cipher->impl; if (to_encrypt.len == 0) { return AWS_OP_SUCCESS; } struct aws_byte_buf working_buffer; AWS_ZERO_STRUCT(working_buffer); /* prepend overflow to the working buffer and then append to_encrypt to it. */ if (cipher_impl->overflow.len && to_encrypt.ptr != cipher_impl->overflow.buffer) { struct aws_byte_cursor overflow_cur = aws_byte_cursor_from_buf(&cipher_impl->overflow); aws_byte_buf_init_copy_from_cursor(&working_buffer, cipher->allocator, overflow_cur); aws_byte_buf_reset(&cipher_impl->overflow, true); aws_byte_buf_append_dynamic(&working_buffer, &to_encrypt); } else { aws_byte_buf_init_copy_from_cursor(&working_buffer, cipher->allocator, to_encrypt); } /* slice working_buffer into a slice per block. */ struct aws_array_list sliced_buffers; aws_array_list_init_dynamic( &sliced_buffers, cipher->allocator, (to_encrypt.len / AWS_AES_256_CIPHER_BLOCK_SIZE) + 1, sizeof(struct aws_byte_cursor)); struct aws_byte_cursor working_buf_cur = aws_byte_cursor_from_buf(&working_buffer); while (working_buf_cur.len) { struct aws_byte_cursor slice = working_buf_cur; if (working_buf_cur.len >= AWS_AES_256_CIPHER_BLOCK_SIZE) { slice = aws_byte_cursor_advance(&working_buf_cur, AWS_AES_256_CIPHER_BLOCK_SIZE); } else { aws_byte_cursor_advance(&working_buf_cur, slice.len); } aws_array_list_push_back(&sliced_buffers, &slice); } int ret_val = AWS_OP_ERR; size_t sliced_buffers_cnt = aws_array_list_length(&sliced_buffers); /* for each slice, if it's a full block, do ECB on the IV, xor it to the slice, and then increment the counter. */ for (size_t i = 0; i < sliced_buffers_cnt; ++i) { struct aws_byte_cursor buffer_cur; AWS_ZERO_STRUCT(buffer_cur); aws_array_list_get_at(&sliced_buffers, &buffer_cur, i); if (buffer_cur.len == AWS_AES_256_CIPHER_BLOCK_SIZE || /* this part of the branch is for handling the finalize call, which does not have to be on an even block boundary. */ (cipher_impl->overflow.len > 0 && sliced_buffers_cnt) == 1) { ULONG lengthWritten = (ULONG)AWS_AES_256_CIPHER_BLOCK_SIZE; uint8_t temp_buffer[AWS_AES_256_CIPHER_BLOCK_SIZE] = {0}; struct aws_byte_cursor temp_cur = aws_byte_cursor_from_array(temp_buffer, sizeof(temp_buffer)); NTSTATUS status = BCryptEncrypt( cipher_impl->key_handle, cipher_impl->working_iv.buffer, (ULONG)cipher_impl->working_iv.len, NULL, NULL, 0, temp_cur.ptr, (ULONG)temp_cur.len, &lengthWritten, cipher_impl->cipher_flags); if (!NT_SUCCESS(status)) { cipher->good = false; ret_val = aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); goto clean_up; } /* this does the XOR, after this call the final encrypted output is added to out. */ if (s_xor_cursors(&buffer_cur, &temp_cur, out)) { ret_val = AWS_OP_ERR; goto clean_up; } /* increment the counter. Get the buffers aligned for it first though. */ size_t counter_offset = AWS_AES_256_CIPHER_BLOCK_SIZE - sizeof(uint32_t); struct aws_byte_buf counter_buf = cipher_impl->working_iv; /* roll it back 4 so the write works. */ counter_buf.len = counter_offset; struct aws_byte_cursor counter_cur = aws_byte_cursor_from_buf(&cipher_impl->working_iv); aws_byte_cursor_advance(&counter_cur, counter_offset); /* read current counter value as a Big-endian 32-bit integer*/ uint32_t counter = 0; aws_byte_cursor_read_be32(&counter_cur, &counter); /* check for overflow here. */ if (aws_add_u32_checked(counter, 1, &counter) != AWS_OP_SUCCESS) { cipher->good = false; ret_val = AWS_OP_ERR; goto clean_up; } /* put the incremented counter back. */ aws_byte_buf_write_be32(&counter_buf, counter); } else { /* otherwise dump it into the overflow and wait til the next call */ aws_byte_buf_append_dynamic(&cipher_impl->overflow, &buffer_cur); } ret_val = AWS_OP_SUCCESS; } clean_up: aws_array_list_clean_up_secure(&sliced_buffers); aws_byte_buf_clean_up_secure(&working_buffer); return ret_val; } static int s_aes_ctr_finalize_encryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) { struct aes_bcrypt_cipher *cipher_impl = cipher->impl; struct aws_byte_cursor remaining_cur = aws_byte_cursor_from_buf(&cipher_impl->overflow); /* take the final overflow, and do the final encrypt call for it. */ int ret_val = s_aes_ctr_encrypt(cipher, remaining_cur, out); aws_byte_buf_secure_zero(&cipher_impl->overflow); aws_byte_buf_secure_zero(&cipher_impl->working_iv); return ret_val; } static struct aws_symmetric_cipher_vtable s_aes_ctr_vtable = { .alg_name = "AES-CTR 256", .provider = "Windows CNG", .decrypt = s_aes_ctr_encrypt, .encrypt = s_aes_ctr_encrypt, .finalize_encryption = s_aes_ctr_finalize_encryption, .finalize_decryption = s_aes_ctr_finalize_encryption, .destroy = s_aes_default_destroy, .reset = s_reset_ctr_cipher, }; struct aws_symmetric_cipher *aws_aes_ctr_256_new_impl( struct aws_allocator *allocator, const struct aws_byte_cursor *key, const struct aws_byte_cursor *iv) { aws_thread_call_once(&s_aes_thread_once, s_load_alg_handles, NULL); struct aes_bcrypt_cipher *cipher = aws_mem_calloc(allocator, 1, sizeof(struct aes_bcrypt_cipher)); cipher->cipher.allocator = allocator; cipher->cipher.block_size = AWS_AES_256_CIPHER_BLOCK_SIZE; cipher->cipher.key_length_bits = AWS_AES_256_KEY_BIT_LEN; cipher->alg_handle = s_aes_ctr_algorithm_handle; cipher->cipher.vtable = &s_aes_ctr_vtable; if (s_initialize_cipher_materials(cipher, key, iv, NULL, NULL, AWS_AES_256_CIPHER_BLOCK_SIZE, true, false) != AWS_OP_SUCCESS) { goto error; } aws_byte_buf_init(&cipher->overflow, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE * 2); aws_byte_buf_init_copy(&cipher->working_iv, allocator, &cipher->cipher.iv); cipher->cipher.impl = cipher; cipher->cipher.good = true; return &cipher->cipher; error: if (cipher != NULL) { s_aes_default_destroy(&cipher->cipher); } return NULL; } /* This is just an encrypted key. Append them to a buffer and on finalize export/import the key using AES keywrap. */ static int s_key_wrap_encrypt_decrypt( struct aws_symmetric_cipher *cipher, const struct aws_byte_cursor input, struct aws_byte_buf *out) { (void)out; struct aes_bcrypt_cipher *cipher_impl = cipher->impl; return aws_byte_buf_append_dynamic(&cipher_impl->overflow, &input); } /* Import the buffer we've been appending to as an AES key. Then export it using AES Keywrap format. */ static int s_keywrap_finalize_encryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) { struct aes_bcrypt_cipher *cipher_impl = cipher->impl; BCRYPT_KEY_HANDLE key_handle_to_encrypt = s_import_key_blob(s_aes_keywrap_algorithm_handle, cipher->allocator, &cipher_impl->overflow); if (!key_handle_to_encrypt) { return AWS_OP_ERR; } NTSTATUS status = 0; ULONG output_size = 0; /* Call with NULL first to get the required size. */ status = BCryptExportKey( key_handle_to_encrypt, cipher_impl->key_handle, BCRYPT_AES_WRAP_KEY_BLOB, NULL, 0, &output_size, 0); if (!NT_SUCCESS(status)) { cipher->good = false; return aws_raise_error(AWS_ERROR_INVALID_STATE); } int ret_val = AWS_OP_ERR; if (aws_symmetric_cipher_try_ensure_sufficient_buffer_space(out, output_size)) { goto clean_up; } /* now actually export the key */ ULONG len_written = 0; status = BCryptExportKey( key_handle_to_encrypt, cipher_impl->key_handle, BCRYPT_AES_WRAP_KEY_BLOB, out->buffer + out->len, output_size, &len_written, 0); if (!NT_SUCCESS(status)) { cipher->good = false; goto clean_up; } out->len += len_written; ret_val = AWS_OP_SUCCESS; clean_up: if (key_handle_to_encrypt) { BCryptDestroyKey(key_handle_to_encrypt); } return ret_val; } /* Import the buffer we've been appending to as an AES Key Wrapped key. Then export the raw AES key. */ static int s_keywrap_finalize_decryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) { struct aes_bcrypt_cipher *cipher_impl = cipher->impl; BCRYPT_KEY_HANDLE import_key = NULL; /* use the cipher key to import the buffer as an AES keywrapped key. */ NTSTATUS status = BCryptImportKey( s_aes_keywrap_algorithm_handle, cipher_impl->key_handle, BCRYPT_AES_WRAP_KEY_BLOB, &import_key, NULL, 0, cipher_impl->overflow.buffer, (ULONG)cipher_impl->overflow.len, 0); int ret_val = AWS_OP_ERR; if (NT_SUCCESS(status) && import_key) { ULONG export_size = 0; struct aws_byte_buf key_data_blob; aws_byte_buf_init( &key_data_blob, cipher->allocator, sizeof(BCRYPT_KEY_DATA_BLOB_HEADER) + cipher_impl->overflow.len); /* Now just export the key out as a raw AES key. */ status = BCryptExportKey( import_key, NULL, BCRYPT_KEY_DATA_BLOB, key_data_blob.buffer, (ULONG)key_data_blob.capacity, &export_size, 0); key_data_blob.len += export_size; if (NT_SUCCESS(status)) { if (aws_symmetric_cipher_try_ensure_sufficient_buffer_space(out, export_size)) { goto clean_up; } BCRYPT_KEY_DATA_BLOB_HEADER *stream_header = (BCRYPT_KEY_DATA_BLOB_HEADER *)key_data_blob.buffer; AWS_FATAL_ASSERT( aws_byte_buf_write( out, key_data_blob.buffer + sizeof(BCRYPT_KEY_DATA_BLOB_HEADER), stream_header->cbKeyData) && "Copying key data failed but the allocation should have already occured successfully"); ret_val = AWS_OP_SUCCESS; } else { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); cipher->good = false; } clean_up: aws_byte_buf_clean_up_secure(&key_data_blob); BCryptDestroyKey(import_key); } else { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); cipher->good = false; } return ret_val; } static int s_reset_keywrap_cipher(struct aws_symmetric_cipher *cipher) { struct aes_bcrypt_cipher *cipher_impl = cipher->impl; s_clear_reusable_components(cipher); return s_initialize_cipher_materials(cipher_impl, NULL, NULL, NULL, NULL, 0, false, false); } static struct aws_symmetric_cipher_vtable s_aes_keywrap_vtable = { .alg_name = "AES-KEYWRAP 256", .provider = "Windows CNG", .decrypt = s_key_wrap_encrypt_decrypt, .encrypt = s_key_wrap_encrypt_decrypt, .finalize_encryption = s_keywrap_finalize_encryption, .finalize_decryption = s_keywrap_finalize_decryption, .destroy = s_aes_default_destroy, .reset = s_reset_keywrap_cipher, }; struct aws_symmetric_cipher *aws_aes_keywrap_256_new_impl( struct aws_allocator *allocator, const struct aws_byte_cursor *key) { aws_thread_call_once(&s_aes_thread_once, s_load_alg_handles, NULL); struct aes_bcrypt_cipher *cipher = aws_mem_calloc(allocator, 1, sizeof(struct aes_bcrypt_cipher)); cipher->cipher.allocator = allocator; cipher->cipher.block_size = 8; cipher->cipher.key_length_bits = AWS_AES_256_KEY_BIT_LEN; cipher->alg_handle = s_aes_keywrap_algorithm_handle; cipher->cipher.vtable = &s_aes_keywrap_vtable; if (s_initialize_cipher_materials(cipher, key, NULL, NULL, NULL, 0, false, false) != AWS_OP_SUCCESS) { goto error; } aws_byte_buf_init(&cipher->overflow, allocator, (AWS_AES_256_CIPHER_BLOCK_SIZE * 2) + 8); cipher->cipher.impl = cipher; cipher->cipher.good = true; return &cipher->cipher; error: if (cipher != NULL) { s_aes_default_destroy(&cipher->cipher); } return NULL; } aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/source/windows/bcrypt_ecc.c000066400000000000000000000401271456575232400252460ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include static BCRYPT_ALG_HANDLE s_ecdsa_p256_alg = NULL; static BCRYPT_ALG_HANDLE s_ecdsa_p384_alg = NULL; /* size of the P384 curve's signatures. This is the largest we support at the moment. Since msvc doesn't support variable length arrays, we need to handle this with a macro. */ #define MAX_SIGNATURE_LENGTH (48 * 2) static aws_thread_once s_ecdsa_thread_once = AWS_THREAD_ONCE_STATIC_INIT; static void s_load_alg_handle(void *user_data) { (void)user_data; /* this function is incredibly slow, LET IT LEAK*/ NTSTATUS status = BCryptOpenAlgorithmProvider(&s_ecdsa_p256_alg, BCRYPT_ECDSA_P256_ALGORITHM, MS_PRIMITIVE_PROVIDER, 0); AWS_ASSERT(s_ecdsa_p256_alg && "BCryptOpenAlgorithmProvider() failed"); status = BCryptOpenAlgorithmProvider(&s_ecdsa_p384_alg, BCRYPT_ECDSA_P384_ALGORITHM, MS_PRIMITIVE_PROVIDER, 0); AWS_ASSERT(s_ecdsa_p384_alg && "BCryptOpenAlgorithmProvider() failed"); (void)status; } struct bcrypt_ecc_key_pair { struct aws_ecc_key_pair key_pair; BCRYPT_KEY_HANDLE key_handle; }; static BCRYPT_ALG_HANDLE s_key_alg_handle_from_curve_name(enum aws_ecc_curve_name curve_name) { switch (curve_name) { case AWS_CAL_ECDSA_P256: return s_ecdsa_p256_alg; case AWS_CAL_ECDSA_P384: return s_ecdsa_p384_alg; default: return 0; } } static ULONG s_get_magic_from_curve_name(enum aws_ecc_curve_name curve_name, bool private_key) { switch (curve_name) { case AWS_CAL_ECDSA_P256: return private_key ? BCRYPT_ECDSA_PRIVATE_P256_MAGIC : BCRYPT_ECDSA_PUBLIC_P256_MAGIC; case AWS_CAL_ECDSA_P384: return private_key ? BCRYPT_ECDSA_PRIVATE_P384_MAGIC : BCRYPT_ECDSA_PUBLIC_P384_MAGIC; default: return 0; } } static void s_destroy_key(struct aws_ecc_key_pair *key_pair) { if (key_pair) { struct bcrypt_ecc_key_pair *key_impl = key_pair->impl; if (key_impl->key_handle) { BCryptDestroyKey(key_impl->key_handle); } aws_byte_buf_clean_up_secure(&key_pair->key_buf); aws_mem_release(key_pair->allocator, key_impl); } } static size_t s_signature_length(const struct aws_ecc_key_pair *key_pair) { static size_t s_der_overhead = 8; return s_der_overhead + aws_ecc_key_coordinate_byte_size_from_curve_name(key_pair->curve_name) * 2; } static bool s_trim_zeros_predicate(uint8_t value) { return value == 0; } static int s_sign_message( const struct aws_ecc_key_pair *key_pair, const struct aws_byte_cursor *message, struct aws_byte_buf *signature_output) { struct bcrypt_ecc_key_pair *key_impl = key_pair->impl; size_t output_buf_space = signature_output->capacity - signature_output->len; if (output_buf_space < s_signature_length(key_pair)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } uint8_t temp_signature[MAX_SIGNATURE_LENGTH] = {0}; struct aws_byte_buf temp_signature_buf = aws_byte_buf_from_empty_array(temp_signature, sizeof(temp_signature)); size_t signature_length = temp_signature_buf.capacity; NTSTATUS status = BCryptSignHash( key_impl->key_handle, NULL, message->ptr, (ULONG)message->len, temp_signature_buf.buffer, (ULONG)signature_length, (ULONG *)&signature_length, 0); if (status != 0) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } temp_signature_buf.len += signature_length; size_t coordinate_len = temp_signature_buf.len / 2; /* okay. Windows doesn't DER encode this to ASN.1, so we need to do it manually. */ struct aws_der_encoder *encoder = aws_der_encoder_new(key_pair->allocator, signature_output->capacity - signature_output->len); if (!encoder) { return AWS_OP_ERR; } aws_der_encoder_begin_sequence(encoder); struct aws_byte_cursor integer_cur = aws_byte_cursor_from_array(temp_signature_buf.buffer, coordinate_len); /* trim off the leading zero padding for DER encoding */ integer_cur = aws_byte_cursor_left_trim_pred(&integer_cur, s_trim_zeros_predicate); aws_der_encoder_write_unsigned_integer(encoder, integer_cur); integer_cur = aws_byte_cursor_from_array(temp_signature_buf.buffer + coordinate_len, coordinate_len); /* trim off the leading zero padding for DER encoding */ integer_cur = aws_byte_cursor_left_trim_pred(&integer_cur, s_trim_zeros_predicate); aws_der_encoder_write_unsigned_integer(encoder, integer_cur); aws_der_encoder_end_sequence(encoder); struct aws_byte_cursor signature_out_cur; AWS_ZERO_STRUCT(signature_out_cur); aws_der_encoder_get_contents(encoder, &signature_out_cur); aws_byte_buf_append(signature_output, &signature_out_cur); aws_der_encoder_destroy(encoder); return AWS_OP_SUCCESS; } static int s_derive_public_key(struct aws_ecc_key_pair *key_pair) { struct bcrypt_ecc_key_pair *key_impl = key_pair->impl; ULONG result = 0; NTSTATUS status = BCryptExportKey( key_impl->key_handle, NULL, BCRYPT_ECCPRIVATE_BLOB, key_pair->key_buf.buffer, (ULONG)key_pair->key_buf.capacity, &result, 0); key_pair->key_buf.len = result; (void)result; if (status) { return aws_raise_error(AWS_ERROR_CAL_MISSING_REQUIRED_KEY_COMPONENT); } return AWS_OP_SUCCESS; } static int s_append_coordinate( struct aws_byte_buf *buffer, struct aws_byte_cursor *coordinate, enum aws_ecc_curve_name curve_name) { size_t coordinate_size = aws_ecc_key_coordinate_byte_size_from_curve_name(curve_name); if (coordinate->len < coordinate_size) { size_t leading_zero_count = coordinate_size - coordinate->len; AWS_FATAL_ASSERT(leading_zero_count + buffer->len <= buffer->capacity); aws_byte_buf_write_u8_n(buffer, 0x0, leading_zero_count); } return aws_byte_buf_append(buffer, coordinate); } static int s_verify_signature( const struct aws_ecc_key_pair *key_pair, const struct aws_byte_cursor *message, const struct aws_byte_cursor *signature) { struct bcrypt_ecc_key_pair *key_impl = key_pair->impl; /* OKAY Windows doesn't do the whole standard internet formats thing. So we need to manually decode the DER encoded ASN.1 format first.*/ uint8_t temp_signature[MAX_SIGNATURE_LENGTH] = {0}; struct aws_byte_buf temp_signature_buf = aws_byte_buf_from_empty_array(temp_signature, sizeof(temp_signature)); struct aws_byte_cursor der_encoded_signature = aws_byte_cursor_from_array(signature->ptr, signature->len); struct aws_der_decoder *decoder = aws_der_decoder_new(key_pair->allocator, der_encoded_signature); if (!decoder) { return AWS_OP_ERR; } if (!aws_der_decoder_next(decoder) || aws_der_decoder_tlv_type(decoder) != AWS_DER_SEQUENCE) { aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); goto error; } if (!aws_der_decoder_next(decoder) || aws_der_decoder_tlv_type(decoder) != AWS_DER_INTEGER) { aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); goto error; } /* there will be two coordinates. They need to be concatenated together. */ struct aws_byte_cursor coordinate; AWS_ZERO_STRUCT(coordinate); if (aws_der_decoder_tlv_unsigned_integer(decoder, &coordinate)) { aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); goto error; } if (s_append_coordinate(&temp_signature_buf, &coordinate, key_pair->curve_name)) { goto error; } if (!aws_der_decoder_next(decoder) || aws_der_decoder_tlv_type(decoder) != AWS_DER_INTEGER) { aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); goto error; } AWS_ZERO_STRUCT(coordinate); if (aws_der_decoder_tlv_unsigned_integer(decoder, &coordinate)) { aws_raise_error(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED); goto error; } if (s_append_coordinate(&temp_signature_buf, &coordinate, key_pair->curve_name)) { goto error; } aws_der_decoder_destroy(decoder); /* okay, now we've got a windows compatible signature, let's verify it. */ NTSTATUS status = BCryptVerifySignature( key_impl->key_handle, NULL, message->ptr, (ULONG)message->len, temp_signature_buf.buffer, (ULONG)temp_signature_buf.len, 0); return status == 0 ? AWS_OP_SUCCESS : aws_raise_error(AWS_ERROR_CAL_SIGNATURE_VALIDATION_FAILED); error: if (decoder) { aws_der_decoder_destroy(decoder); } return AWS_OP_ERR; } static struct aws_ecc_key_pair_vtable s_vtable = { .destroy = s_destroy_key, .derive_pub_key = s_derive_public_key, .sign_message = s_sign_message, .verify_signature = s_verify_signature, .signature_length = s_signature_length, }; static struct aws_ecc_key_pair *s_alloc_pair_and_init_buffers( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name, struct aws_byte_cursor pub_x, struct aws_byte_cursor pub_y, struct aws_byte_cursor priv_key) { aws_thread_call_once(&s_ecdsa_thread_once, s_load_alg_handle, NULL); struct bcrypt_ecc_key_pair *key_impl = aws_mem_calloc(allocator, 1, sizeof(struct bcrypt_ecc_key_pair)); if (!key_impl) { return NULL; } key_impl->key_pair.allocator = allocator; key_impl->key_pair.curve_name = curve_name; key_impl->key_pair.impl = key_impl; key_impl->key_pair.vtable = &s_vtable; aws_atomic_init_int(&key_impl->key_pair.ref_count, 1); size_t s_key_coordinate_size = aws_ecc_key_coordinate_byte_size_from_curve_name(curve_name); if (!s_key_coordinate_size) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); goto error; } if ((pub_x.ptr && pub_x.len != s_key_coordinate_size) || (pub_y.ptr && pub_y.len != s_key_coordinate_size) || (priv_key.ptr && priv_key.len != s_key_coordinate_size)) { aws_raise_error(AWS_ERROR_CAL_INVALID_KEY_LENGTH_FOR_ALGORITHM); goto error; } size_t total_buffer_size = s_key_coordinate_size * 3 + sizeof(BCRYPT_ECCKEY_BLOB); if (aws_byte_buf_init(&key_impl->key_pair.key_buf, allocator, total_buffer_size)) { goto error; } aws_byte_buf_secure_zero(&key_impl->key_pair.key_buf); BCRYPT_ECCKEY_BLOB key_blob; AWS_ZERO_STRUCT(key_blob); key_blob.dwMagic = s_get_magic_from_curve_name(curve_name, priv_key.ptr && priv_key.len); key_blob.cbKey = (ULONG)s_key_coordinate_size; struct aws_byte_cursor header = aws_byte_cursor_from_array(&key_blob, sizeof(key_blob)); aws_byte_buf_append(&key_impl->key_pair.key_buf, &header); LPCWSTR blob_type = BCRYPT_ECCPUBLIC_BLOB; ULONG flags = 0; if (pub_x.ptr && pub_y.ptr) { aws_byte_buf_append(&key_impl->key_pair.key_buf, &pub_x); aws_byte_buf_append(&key_impl->key_pair.key_buf, &pub_y); } else { key_impl->key_pair.key_buf.len += s_key_coordinate_size * 2; flags = BCRYPT_NO_KEY_VALIDATION; } if (priv_key.ptr) { blob_type = BCRYPT_ECCPRIVATE_BLOB; aws_byte_buf_append(&key_impl->key_pair.key_buf, &priv_key); } key_impl->key_pair.pub_x = aws_byte_buf_from_array(key_impl->key_pair.key_buf.buffer + sizeof(key_blob), s_key_coordinate_size); key_impl->key_pair.pub_y = aws_byte_buf_from_array(key_impl->key_pair.pub_x.buffer + s_key_coordinate_size, s_key_coordinate_size); key_impl->key_pair.priv_d = aws_byte_buf_from_array(key_impl->key_pair.pub_y.buffer + s_key_coordinate_size, s_key_coordinate_size); BCRYPT_ALG_HANDLE alg_handle = s_key_alg_handle_from_curve_name(curve_name); NTSTATUS status = BCryptImportKeyPair( alg_handle, NULL, blob_type, &key_impl->key_handle, key_impl->key_pair.key_buf.buffer, (ULONG)key_impl->key_pair.key_buf.len, flags); if (status) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); goto error; } return &key_impl->key_pair; error: s_destroy_key(&key_impl->key_pair); return NULL; } struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_private_key_impl( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name, const struct aws_byte_cursor *priv_key) { struct aws_byte_cursor empty; AWS_ZERO_STRUCT(empty); return s_alloc_pair_and_init_buffers(allocator, curve_name, empty, empty, *priv_key); } struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_public_key_impl( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name, const struct aws_byte_cursor *public_key_x, const struct aws_byte_cursor *public_key_y) { struct aws_byte_cursor empty; AWS_ZERO_STRUCT(empty); return s_alloc_pair_and_init_buffers(allocator, curve_name, *public_key_x, *public_key_y, empty); } struct aws_ecc_key_pair *aws_ecc_key_pair_new_generate_random( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name) { aws_thread_call_once(&s_ecdsa_thread_once, s_load_alg_handle, NULL); struct bcrypt_ecc_key_pair *key_impl = aws_mem_calloc(allocator, 1, sizeof(struct bcrypt_ecc_key_pair)); if (!key_impl) { return NULL; } key_impl->key_pair.allocator = allocator; key_impl->key_pair.curve_name = curve_name; key_impl->key_pair.impl = key_impl; key_impl->key_pair.vtable = &s_vtable; aws_atomic_init_int(&key_impl->key_pair.ref_count, 1); size_t key_coordinate_size = aws_ecc_key_coordinate_byte_size_from_curve_name(curve_name); if (!key_coordinate_size) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); goto error; } BCRYPT_ALG_HANDLE alg_handle = s_key_alg_handle_from_curve_name(curve_name); ULONG key_bit_length = (ULONG)key_coordinate_size * 8; NTSTATUS status = BCryptGenerateKeyPair(alg_handle, &key_impl->key_handle, key_bit_length, 0); if (status) { aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); goto error; } status = BCryptFinalizeKeyPair(key_impl->key_handle, 0); if (status) { aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); goto error; } size_t total_buffer_size = key_coordinate_size * 3 + sizeof(BCRYPT_ECCKEY_BLOB); if (aws_byte_buf_init(&key_impl->key_pair.key_buf, allocator, total_buffer_size)) { goto error; } aws_byte_buf_secure_zero(&key_impl->key_pair.key_buf); key_impl->key_pair.pub_x = aws_byte_buf_from_array(key_impl->key_pair.key_buf.buffer + sizeof(BCRYPT_ECCKEY_BLOB), key_coordinate_size); key_impl->key_pair.pub_y = aws_byte_buf_from_array(key_impl->key_pair.pub_x.buffer + key_coordinate_size, key_coordinate_size); key_impl->key_pair.priv_d = aws_byte_buf_from_array(key_impl->key_pair.pub_y.buffer + key_coordinate_size, key_coordinate_size); if (s_derive_public_key(&key_impl->key_pair)) { goto error; } return &key_impl->key_pair; error: s_destroy_key(&key_impl->key_pair); return NULL; } struct aws_ecc_key_pair *aws_ecc_key_pair_new_from_asn1( struct aws_allocator *allocator, const struct aws_byte_cursor *encoded_keys) { struct aws_der_decoder *decoder = aws_der_decoder_new(allocator, *encoded_keys); /* we could have private key or a public key, or a full pair. */ struct aws_byte_cursor pub_x; AWS_ZERO_STRUCT(pub_x); struct aws_byte_cursor pub_y; AWS_ZERO_STRUCT(pub_y); struct aws_byte_cursor priv_d; AWS_ZERO_STRUCT(priv_d); enum aws_ecc_curve_name curve_name; if (aws_der_decoder_load_ecc_key_pair(decoder, &pub_x, &pub_y, &priv_d, &curve_name)) { goto error; } /* now that we have the buffers, we can just use the normal code path. */ struct aws_ecc_key_pair *key_pair = s_alloc_pair_and_init_buffers(allocator, curve_name, pub_x, pub_y, priv_d); aws_der_decoder_destroy(decoder); return key_pair; error: if (decoder) { aws_der_decoder_destroy(decoder); } return NULL; } aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/source/windows/bcrypt_hash.c000066400000000000000000000157421456575232400254440ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include static BCRYPT_ALG_HANDLE s_sha256_alg = NULL; static size_t s_sha256_obj_len = 0; static aws_thread_once s_sha256_once = AWS_THREAD_ONCE_STATIC_INIT; static BCRYPT_ALG_HANDLE s_sha1_alg = NULL; static size_t s_sha1_obj_len = 0; static aws_thread_once s_sha1_once = AWS_THREAD_ONCE_STATIC_INIT; static BCRYPT_ALG_HANDLE s_md5_alg = NULL; static size_t s_md5_obj_len = 0; static aws_thread_once s_md5_once = AWS_THREAD_ONCE_STATIC_INIT; static void s_destroy(struct aws_hash *hash); static int s_update(struct aws_hash *hash, const struct aws_byte_cursor *to_hash); static int s_finalize(struct aws_hash *hash, struct aws_byte_buf *output); static struct aws_hash_vtable s_sha256_vtable = { .destroy = s_destroy, .update = s_update, .finalize = s_finalize, .alg_name = "SHA256", .provider = "Windows CNG", }; static struct aws_hash_vtable s_sha1_vtable = { .destroy = s_destroy, .update = s_update, .finalize = s_finalize, .alg_name = "SHA1", .provider = "Windows CNG", }; static struct aws_hash_vtable s_md5_vtable = { .destroy = s_destroy, .update = s_update, .finalize = s_finalize, .alg_name = "MD5", .provider = "Windows CNG", }; struct bcrypt_hash_handle { struct aws_hash hash; BCRYPT_HASH_HANDLE hash_handle; uint8_t *hash_obj; }; static void s_load_sha256_alg_handle(void *user_data) { (void)user_data; /* this function is incredibly slow, LET IT LEAK*/ (void)BCryptOpenAlgorithmProvider(&s_sha256_alg, BCRYPT_SHA256_ALGORITHM, MS_PRIMITIVE_PROVIDER, 0); AWS_ASSERT(s_sha256_alg); DWORD result_length = 0; (void)BCryptGetProperty( s_sha256_alg, BCRYPT_OBJECT_LENGTH, (PBYTE)&s_sha256_obj_len, sizeof(s_sha256_obj_len), &result_length, 0); } static void s_load_sha1_alg_handle(void *user_data) { (void)user_data; /* this function is incredibly slow, LET IT LEAK*/ (void)BCryptOpenAlgorithmProvider(&s_sha1_alg, BCRYPT_SHA1_ALGORITHM, MS_PRIMITIVE_PROVIDER, 0); AWS_ASSERT(s_sha1_alg); DWORD result_length = 0; (void)BCryptGetProperty( s_sha1_alg, BCRYPT_OBJECT_LENGTH, (PBYTE)&s_sha1_obj_len, sizeof(s_sha1_obj_len), &result_length, 0); } static void s_load_md5_alg_handle(void *user_data) { (void)user_data; /* this function is incredibly slow, LET IT LEAK*/ (void)BCryptOpenAlgorithmProvider(&s_md5_alg, BCRYPT_MD5_ALGORITHM, MS_PRIMITIVE_PROVIDER, 0); AWS_ASSERT(s_md5_alg); DWORD result_length = 0; (void)BCryptGetProperty( s_md5_alg, BCRYPT_OBJECT_LENGTH, (PBYTE)&s_md5_obj_len, sizeof(s_md5_obj_len), &result_length, 0); } struct aws_hash *aws_sha256_default_new(struct aws_allocator *allocator) { aws_thread_call_once(&s_sha256_once, s_load_sha256_alg_handle, NULL); struct bcrypt_hash_handle *bcrypt_hash = NULL; uint8_t *hash_obj = NULL; aws_mem_acquire_many(allocator, 2, &bcrypt_hash, sizeof(struct bcrypt_hash_handle), &hash_obj, s_sha256_obj_len); if (!bcrypt_hash) { return NULL; } AWS_ZERO_STRUCT(*bcrypt_hash); bcrypt_hash->hash.allocator = allocator; bcrypt_hash->hash.vtable = &s_sha256_vtable; bcrypt_hash->hash.impl = bcrypt_hash; bcrypt_hash->hash.digest_size = AWS_SHA256_LEN; bcrypt_hash->hash.good = true; bcrypt_hash->hash_obj = hash_obj; NTSTATUS status = BCryptCreateHash( s_sha256_alg, &bcrypt_hash->hash_handle, bcrypt_hash->hash_obj, (ULONG)s_sha256_obj_len, NULL, 0, 0); if (((NTSTATUS)status) < 0) { aws_mem_release(allocator, bcrypt_hash); return NULL; } return &bcrypt_hash->hash; } struct aws_hash *aws_sha1_default_new(struct aws_allocator *allocator) { aws_thread_call_once(&s_sha1_once, s_load_sha1_alg_handle, NULL); struct bcrypt_hash_handle *bcrypt_hash = NULL; uint8_t *hash_obj = NULL; aws_mem_acquire_many(allocator, 2, &bcrypt_hash, sizeof(struct bcrypt_hash_handle), &hash_obj, s_sha1_obj_len); if (!bcrypt_hash) { return NULL; } AWS_ZERO_STRUCT(*bcrypt_hash); bcrypt_hash->hash.allocator = allocator; bcrypt_hash->hash.vtable = &s_sha1_vtable; bcrypt_hash->hash.impl = bcrypt_hash; bcrypt_hash->hash.digest_size = AWS_SHA1_LEN; bcrypt_hash->hash.good = true; bcrypt_hash->hash_obj = hash_obj; NTSTATUS status = BCryptCreateHash( s_sha1_alg, &bcrypt_hash->hash_handle, bcrypt_hash->hash_obj, (ULONG)s_sha1_obj_len, NULL, 0, 0); if (((NTSTATUS)status) < 0) { aws_mem_release(allocator, bcrypt_hash); return NULL; } return &bcrypt_hash->hash; } struct aws_hash *aws_md5_default_new(struct aws_allocator *allocator) { aws_thread_call_once(&s_md5_once, s_load_md5_alg_handle, NULL); struct bcrypt_hash_handle *bcrypt_hash = NULL; uint8_t *hash_obj = NULL; aws_mem_acquire_many(allocator, 2, &bcrypt_hash, sizeof(struct bcrypt_hash_handle), &hash_obj, s_md5_obj_len); if (!bcrypt_hash) { return NULL; } AWS_ZERO_STRUCT(*bcrypt_hash); bcrypt_hash->hash.allocator = allocator; bcrypt_hash->hash.vtable = &s_md5_vtable; bcrypt_hash->hash.impl = bcrypt_hash; bcrypt_hash->hash.digest_size = AWS_MD5_LEN; bcrypt_hash->hash.good = true; bcrypt_hash->hash_obj = hash_obj; NTSTATUS status = BCryptCreateHash(s_md5_alg, &bcrypt_hash->hash_handle, bcrypt_hash->hash_obj, (ULONG)s_md5_obj_len, NULL, 0, 0); if (((NTSTATUS)status) < 0) { aws_mem_release(allocator, bcrypt_hash); return NULL; } return &bcrypt_hash->hash; } static void s_destroy(struct aws_hash *hash) { struct bcrypt_hash_handle *ctx = hash->impl; BCryptDestroyHash(ctx->hash_handle); aws_mem_release(hash->allocator, ctx); } static int s_update(struct aws_hash *hash, const struct aws_byte_cursor *to_hash) { if (!hash->good) { return aws_raise_error(AWS_ERROR_INVALID_STATE); } struct bcrypt_hash_handle *ctx = hash->impl; NTSTATUS status = BCryptHashData(ctx->hash_handle, to_hash->ptr, (ULONG)to_hash->len, 0); if (((NTSTATUS)status) < 0) { hash->good = false; return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } return AWS_OP_SUCCESS; } static int s_finalize(struct aws_hash *hash, struct aws_byte_buf *output) { if (!hash->good) { return aws_raise_error(AWS_ERROR_INVALID_STATE); } struct bcrypt_hash_handle *ctx = hash->impl; size_t buffer_len = output->capacity - output->len; if (buffer_len < hash->digest_size) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } NTSTATUS status = BCryptFinishHash(ctx->hash_handle, output->buffer + output->len, (ULONG)hash->digest_size, 0); hash->good = false; if (((NTSTATUS)status) < 0) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } output->len += hash->digest_size; return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/source/windows/bcrypt_hmac.c000066400000000000000000000075231456575232400254270ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include static BCRYPT_ALG_HANDLE s_sha256_hmac_alg = NULL; static size_t s_sha256_hmac_obj_len = 0; static aws_thread_once s_sha256_hmac_once = AWS_THREAD_ONCE_STATIC_INIT; static void s_destroy(struct aws_hmac *hash); static int s_update(struct aws_hmac *hash, const struct aws_byte_cursor *to_hash); static int s_finalize(struct aws_hmac *hash, struct aws_byte_buf *output); static struct aws_hmac_vtable s_sha256_hmac_vtable = { .destroy = s_destroy, .update = s_update, .finalize = s_finalize, .alg_name = "SHA256 HMAC", .provider = "Windows CNG", }; struct bcrypt_hmac_handle { struct aws_hmac hmac; BCRYPT_HASH_HANDLE hash_handle; uint8_t *hash_obj; }; static void s_load_alg_handle(void *user_data) { (void)user_data; /* this function is incredibly slow, LET IT LEAK*/ BCryptOpenAlgorithmProvider( &s_sha256_hmac_alg, BCRYPT_SHA256_ALGORITHM, MS_PRIMITIVE_PROVIDER, BCRYPT_ALG_HANDLE_HMAC_FLAG); AWS_ASSERT(s_sha256_hmac_alg); DWORD result_length = 0; BCryptGetProperty( s_sha256_hmac_alg, BCRYPT_OBJECT_LENGTH, (PBYTE)&s_sha256_hmac_obj_len, sizeof(s_sha256_hmac_obj_len), &result_length, 0); } struct aws_hmac *aws_sha256_hmac_default_new(struct aws_allocator *allocator, const struct aws_byte_cursor *secret) { aws_thread_call_once(&s_sha256_hmac_once, s_load_alg_handle, NULL); struct bcrypt_hmac_handle *bcrypt_hmac; uint8_t *hash_obj; aws_mem_acquire_many( allocator, 2, &bcrypt_hmac, sizeof(struct bcrypt_hmac_handle), &hash_obj, s_sha256_hmac_obj_len); if (!bcrypt_hmac) { return NULL; } AWS_ZERO_STRUCT(*bcrypt_hmac); bcrypt_hmac->hmac.allocator = allocator; bcrypt_hmac->hmac.vtable = &s_sha256_hmac_vtable; bcrypt_hmac->hmac.impl = bcrypt_hmac; bcrypt_hmac->hmac.digest_size = AWS_SHA256_HMAC_LEN; bcrypt_hmac->hmac.good = true; bcrypt_hmac->hash_obj = hash_obj; NTSTATUS status = BCryptCreateHash( s_sha256_hmac_alg, &bcrypt_hmac->hash_handle, bcrypt_hmac->hash_obj, (ULONG)s_sha256_hmac_obj_len, secret->ptr, (ULONG)secret->len, 0); if (((NTSTATUS)status) < 0) { aws_mem_release(allocator, bcrypt_hmac); return NULL; } return &bcrypt_hmac->hmac; } static void s_destroy(struct aws_hmac *hmac) { struct bcrypt_hmac_handle *ctx = hmac->impl; BCryptDestroyHash(ctx->hash_handle); aws_mem_release(hmac->allocator, ctx); } static int s_update(struct aws_hmac *hmac, const struct aws_byte_cursor *to_hash) { if (!hmac->good) { return aws_raise_error(AWS_ERROR_INVALID_STATE); } struct bcrypt_hmac_handle *ctx = hmac->impl; NTSTATUS status = BCryptHashData(ctx->hash_handle, to_hash->ptr, (ULONG)to_hash->len, 0); if (((NTSTATUS)status) < 0) { hmac->good = false; return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } return AWS_OP_SUCCESS; } static int s_finalize(struct aws_hmac *hmac, struct aws_byte_buf *output) { if (!hmac->good) { return aws_raise_error(AWS_ERROR_INVALID_STATE); } struct bcrypt_hmac_handle *ctx = hmac->impl; size_t buffer_len = output->capacity - output->len; if (buffer_len < hmac->digest_size) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } NTSTATUS status = BCryptFinishHash(ctx->hash_handle, output->buffer + output->len, (ULONG)hmac->digest_size, 0); hmac->good = false; if (((NTSTATUS)status) < 0) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } output->len += hmac->digest_size; return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/source/windows/bcrypt_platform_init.c000066400000000000000000000005101456575232400273530ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include void aws_cal_platform_init(struct aws_allocator *allocator) { (void)allocator; } void aws_cal_platform_clean_up(void) {} void aws_cal_platform_thread_clean_up(void) {} aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/source/windows/bcrypt_rsa.c000066400000000000000000000335111456575232400253000ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #define WIN32_NO_STATUS #include #undef WIN32_NO_STATUS #include #include static BCRYPT_ALG_HANDLE s_rsa_alg = NULL; static aws_thread_once s_rsa_thread_once = AWS_THREAD_ONCE_STATIC_INIT; static void s_load_alg_handle(void *user_data) { (void)user_data; /* this function is incredibly slow, LET IT LEAK*/ NTSTATUS status = BCryptOpenAlgorithmProvider(&s_rsa_alg, BCRYPT_RSA_ALGORITHM, MS_PRIMITIVE_PROVIDER, 0); AWS_FATAL_ASSERT(s_rsa_alg && "BCryptOpenAlgorithmProvider() failed"); AWS_FATAL_ASSERT(BCRYPT_SUCCESS(status)); } struct bcrypt_rsa_key_pair { struct aws_rsa_key_pair base; BCRYPT_KEY_HANDLE key_handle; struct aws_byte_buf key_buf; }; static void s_rsa_destroy_key(void *key_pair) { if (key_pair == NULL) { return; } struct aws_rsa_key_pair *base = key_pair; struct bcrypt_rsa_key_pair *impl = base->impl; if (impl->key_handle) { BCryptDestroyKey(impl->key_handle); } aws_byte_buf_clean_up_secure(&impl->key_buf); aws_rsa_key_pair_base_clean_up(base); aws_mem_release(base->allocator, impl); } /* * Transforms bcrypt error code into crt error code and raises it as necessary. */ static int s_reinterpret_bc_error_as_crt(NTSTATUS error, const char *function_name) { if (BCRYPT_SUCCESS(error)) { return AWS_OP_SUCCESS; } int crt_error = AWS_OP_ERR; switch (error) { case STATUS_BUFFER_TOO_SMALL: { crt_error = AWS_ERROR_SHORT_BUFFER; goto on_error; } case STATUS_NOT_SUPPORTED: { crt_error = AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM; goto on_error; } } crt_error = AWS_ERROR_CAL_CRYPTO_OPERATION_FAILED; on_error: AWS_LOGF_ERROR( AWS_LS_CAL_RSA, "%s() failed. returned: %X aws_error:%s", function_name, error, aws_error_name(crt_error)); return aws_raise_error(crt_error); } static int s_check_encryption_algorithm(enum aws_rsa_encryption_algorithm algorithm) { if (algorithm != AWS_CAL_RSA_ENCRYPTION_PKCS1_5 && algorithm != AWS_CAL_RSA_ENCRYPTION_OAEP_SHA256 && algorithm != AWS_CAL_RSA_ENCRYPTION_OAEP_SHA512) { return aws_raise_error(AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM); } return AWS_OP_SUCCESS; } static int s_rsa_encrypt( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_encryption_algorithm algorithm, struct aws_byte_cursor plaintext, struct aws_byte_buf *out) { struct bcrypt_rsa_key_pair *key_pair_impl = key_pair->impl; if (s_check_encryption_algorithm(algorithm)) { return AWS_OP_ERR; } BCRYPT_OAEP_PADDING_INFO padding_info_oaep = { .pszAlgId = algorithm == AWS_CAL_RSA_ENCRYPTION_OAEP_SHA256 ? BCRYPT_SHA256_ALGORITHM : BCRYPT_SHA512_ALGORITHM, .pbLabel = NULL, .cbLabel = 0}; ULONG length_written = 0; NTSTATUS status = BCryptEncrypt( key_pair_impl->key_handle, plaintext.ptr, (ULONG)plaintext.len, algorithm == AWS_CAL_RSA_ENCRYPTION_PKCS1_5 ? NULL : &padding_info_oaep, NULL, 0, out->buffer + out->len, (ULONG)(out->capacity - out->len), &length_written, algorithm == AWS_CAL_RSA_ENCRYPTION_PKCS1_5 ? BCRYPT_PAD_PKCS1 : BCRYPT_PAD_OAEP); if (s_reinterpret_bc_error_as_crt(status, "BCryptEncrypt")) { return AWS_OP_ERR; } out->len += length_written; return AWS_OP_SUCCESS; } static int s_rsa_decrypt( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_encryption_algorithm algorithm, struct aws_byte_cursor ciphertext, struct aws_byte_buf *out) { struct bcrypt_rsa_key_pair *key_pair_impl = key_pair->impl; /* There is a bug in old versions of BCryptDecrypt, where it does not return * error status if out buffer is too short. So manually check that buffer is * large enough. */ if ((out->capacity - out->len) < aws_rsa_key_pair_block_length(key_pair)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } if (s_check_encryption_algorithm(algorithm)) { return AWS_OP_ERR; } BCRYPT_OAEP_PADDING_INFO padding_info_oaep = { .pszAlgId = algorithm == AWS_CAL_RSA_ENCRYPTION_OAEP_SHA256 ? BCRYPT_SHA256_ALGORITHM : BCRYPT_SHA512_ALGORITHM, .pbLabel = NULL, .cbLabel = 0}; ULONG length_written = 0; NTSTATUS status = BCryptDecrypt( key_pair_impl->key_handle, ciphertext.ptr, (ULONG)ciphertext.len, algorithm == AWS_CAL_RSA_ENCRYPTION_PKCS1_5 ? NULL : &padding_info_oaep, NULL, 0, out->buffer + out->len, (ULONG)(out->capacity - out->len), &length_written, algorithm == AWS_CAL_RSA_ENCRYPTION_PKCS1_5 ? BCRYPT_PAD_PKCS1 : BCRYPT_PAD_OAEP); if (s_reinterpret_bc_error_as_crt(status, "BCryptDecrypt")) { return AWS_OP_ERR; } out->len += length_written; return AWS_OP_SUCCESS; } union sign_padding_info { BCRYPT_PKCS1_PADDING_INFO pkcs1; BCRYPT_PSS_PADDING_INFO pss; }; static int s_sign_padding_info_init(union sign_padding_info *info, enum aws_rsa_signature_algorithm algorithm) { memset(info, 0, sizeof(union sign_padding_info)); if (algorithm == AWS_CAL_RSA_SIGNATURE_PKCS1_5_SHA256) { info->pkcs1.pszAlgId = BCRYPT_SHA256_ALGORITHM; return AWS_OP_SUCCESS; } else if (algorithm == AWS_CAL_RSA_SIGNATURE_PSS_SHA256) { info->pss.pszAlgId = BCRYPT_SHA256_ALGORITHM; info->pss.cbSalt = 32; return AWS_OP_SUCCESS; } return aws_raise_error(AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM); } static int s_rsa_sign( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_signature_algorithm algorithm, struct aws_byte_cursor digest, struct aws_byte_buf *out) { struct bcrypt_rsa_key_pair *key_pair_impl = key_pair->impl; union sign_padding_info padding_info; if (s_sign_padding_info_init(&padding_info, algorithm)) { return aws_raise_error(AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM); } ULONG length_written = 0; NTSTATUS status = BCryptSignHash( key_pair_impl->key_handle, &padding_info, digest.ptr, (ULONG)digest.len, out->buffer + out->len, (ULONG)(out->capacity - out->len), (ULONG *)&length_written, algorithm == AWS_CAL_RSA_SIGNATURE_PKCS1_5_SHA256 ? BCRYPT_PAD_PKCS1 : BCRYPT_PAD_PSS); if (s_reinterpret_bc_error_as_crt(status, "BCryptSignHash")) { goto on_error; } out->len += length_written; return AWS_OP_SUCCESS; on_error: return AWS_OP_ERR; } static int s_rsa_verify( const struct aws_rsa_key_pair *key_pair, enum aws_rsa_signature_algorithm algorithm, struct aws_byte_cursor digest, struct aws_byte_cursor signature) { struct bcrypt_rsa_key_pair *key_pair_impl = key_pair->impl; /* BCrypt raises invalid argument if signature does not have correct size. * Verify size here and raise appropriate error and treat all other errors * from BCrypt (including invalid arg) in reinterp. */ if (signature.len != aws_rsa_key_pair_signature_length(key_pair)) { return aws_raise_error(AWS_ERROR_CAL_SIGNATURE_VALIDATION_FAILED); } union sign_padding_info padding_info; if (s_sign_padding_info_init(&padding_info, algorithm)) { return aws_raise_error(AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM); } /* okay, now we've got a windows compatible signature, let's verify it. */ NTSTATUS status = BCryptVerifySignature( key_pair_impl->key_handle, &padding_info, digest.ptr, (ULONG)digest.len, signature.ptr, (ULONG)signature.len, algorithm == AWS_CAL_RSA_SIGNATURE_PKCS1_5_SHA256 ? BCRYPT_PAD_PKCS1 : BCRYPT_PAD_PSS); if (status == STATUS_INVALID_SIGNATURE) { return aws_raise_error(AWS_ERROR_CAL_SIGNATURE_VALIDATION_FAILED); } if (s_reinterpret_bc_error_as_crt(status, "BCryptVerifySignature")) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } static struct aws_rsa_key_vtable s_rsa_key_pair_vtable = { .encrypt = s_rsa_encrypt, .decrypt = s_rsa_decrypt, .sign = s_rsa_sign, .verify = s_rsa_verify, }; struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_private_key_pkcs1_impl( struct aws_allocator *allocator, struct aws_byte_cursor key) { aws_thread_call_once(&s_rsa_thread_once, s_load_alg_handle, NULL); struct bcrypt_rsa_key_pair *key_pair_impl = aws_mem_calloc(allocator, 1, sizeof(struct bcrypt_rsa_key_pair)); aws_ref_count_init(&key_pair_impl->base.ref_count, &key_pair_impl->base, s_rsa_destroy_key); key_pair_impl->base.impl = key_pair_impl; key_pair_impl->base.allocator = allocator; aws_byte_buf_init_copy_from_cursor(&key_pair_impl->base.priv, allocator, key); struct aws_der_decoder *decoder = aws_der_decoder_new(allocator, key); if (!decoder) { goto on_error; } struct aws_rsa_private_key_pkcs1 private_key_data; AWS_ZERO_STRUCT(private_key_data); if (aws_der_decoder_load_private_rsa_pkcs1(decoder, &private_key_data)) { goto on_error; } /* Hard to predict final blob size, so use pkcs1 key size as upper bound. */ size_t total_buffer_size = key.len + sizeof(BCRYPT_RSAKEY_BLOB); aws_byte_buf_init(&key_pair_impl->key_buf, allocator, total_buffer_size); BCRYPT_RSAKEY_BLOB key_blob; AWS_ZERO_STRUCT(key_blob); key_blob.Magic = BCRYPT_RSAFULLPRIVATE_MAGIC; key_blob.BitLength = (ULONG)private_key_data.modulus.len * 8; key_blob.cbPublicExp = (ULONG)private_key_data.publicExponent.len; key_blob.cbModulus = (ULONG)private_key_data.modulus.len; key_blob.cbPrime1 = (ULONG)private_key_data.prime1.len; key_blob.cbPrime2 = (ULONG)private_key_data.prime2.len; struct aws_byte_cursor header = aws_byte_cursor_from_array(&key_blob, sizeof(key_blob)); aws_byte_buf_append(&key_pair_impl->key_buf, &header); LPCWSTR blob_type = BCRYPT_RSAFULLPRIVATE_BLOB; ULONG flags = 0; aws_byte_buf_append(&key_pair_impl->key_buf, &private_key_data.publicExponent); aws_byte_buf_append(&key_pair_impl->key_buf, &private_key_data.modulus); aws_byte_buf_append(&key_pair_impl->key_buf, &private_key_data.prime1); aws_byte_buf_append(&key_pair_impl->key_buf, &private_key_data.prime2); aws_byte_buf_append(&key_pair_impl->key_buf, &private_key_data.exponent1); aws_byte_buf_append(&key_pair_impl->key_buf, &private_key_data.exponent2); aws_byte_buf_append(&key_pair_impl->key_buf, &private_key_data.coefficient); aws_byte_buf_append(&key_pair_impl->key_buf, &private_key_data.privateExponent); NTSTATUS status = BCryptImportKeyPair( s_rsa_alg, NULL, blob_type, &key_pair_impl->key_handle, key_pair_impl->key_buf.buffer, (ULONG)key_pair_impl->key_buf.len, flags); if (s_reinterpret_bc_error_as_crt(status, "BCryptImportKeyPair")) { goto on_error; } key_pair_impl->base.vtable = &s_rsa_key_pair_vtable; key_pair_impl->base.key_size_in_bits = private_key_data.modulus.len * 8; aws_der_decoder_destroy(decoder); return &key_pair_impl->base; on_error: aws_der_decoder_destroy(decoder); s_rsa_destroy_key(&key_pair_impl->base); return NULL; } struct aws_rsa_key_pair *aws_rsa_key_pair_new_from_public_key_pkcs1_impl( struct aws_allocator *allocator, struct aws_byte_cursor key) { aws_thread_call_once(&s_rsa_thread_once, s_load_alg_handle, NULL); struct bcrypt_rsa_key_pair *key_pair_impl = aws_mem_calloc(allocator, 1, sizeof(struct bcrypt_rsa_key_pair)); aws_ref_count_init(&key_pair_impl->base.ref_count, &key_pair_impl->base, s_rsa_destroy_key); key_pair_impl->base.impl = key_pair_impl; key_pair_impl->base.allocator = allocator; aws_byte_buf_init_copy_from_cursor(&key_pair_impl->base.pub, allocator, key); struct aws_der_decoder *decoder = aws_der_decoder_new(allocator, key); if (!decoder) { goto on_error; } struct aws_rsa_public_key_pkcs1 public_key_data; AWS_ZERO_STRUCT(public_key_data); if (aws_der_decoder_load_public_rsa_pkcs1(decoder, &public_key_data)) { goto on_error; } /* Hard to predict final blob size, so use pkcs1 key size as upper bound. */ size_t total_buffer_size = key.len + sizeof(BCRYPT_RSAKEY_BLOB); aws_byte_buf_init(&key_pair_impl->key_buf, allocator, total_buffer_size); BCRYPT_RSAKEY_BLOB key_blob; AWS_ZERO_STRUCT(key_blob); key_blob.Magic = BCRYPT_RSAPUBLIC_MAGIC; key_blob.BitLength = (ULONG)public_key_data.modulus.len * 8; key_blob.cbPublicExp = (ULONG)public_key_data.publicExponent.len; key_blob.cbModulus = (ULONG)public_key_data.modulus.len; struct aws_byte_cursor header = aws_byte_cursor_from_array(&key_blob, sizeof(key_blob)); aws_byte_buf_append(&key_pair_impl->key_buf, &header); LPCWSTR blob_type = BCRYPT_PUBLIC_KEY_BLOB; ULONG flags = 0; aws_byte_buf_append(&key_pair_impl->key_buf, &public_key_data.publicExponent); aws_byte_buf_append(&key_pair_impl->key_buf, &public_key_data.modulus); NTSTATUS status = BCryptImportKeyPair( s_rsa_alg, NULL, blob_type, &key_pair_impl->key_handle, key_pair_impl->key_buf.buffer, (ULONG)key_pair_impl->key_buf.len, flags); if (s_reinterpret_bc_error_as_crt(status, "BCryptImportKeyPair")) { goto on_error; } key_pair_impl->base.vtable = &s_rsa_key_pair_vtable; key_pair_impl->base.key_size_in_bits = public_key_data.modulus.len * 8; aws_der_decoder_destroy(decoder); return &key_pair_impl->base; on_error: aws_der_decoder_destroy(decoder); s_rsa_destroy_key(&key_pair_impl->base); return NULL; } aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/tests/000077500000000000000000000000001456575232400211315ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/tests/CMakeLists.txt000066400000000000000000000135611456575232400236770ustar00rootroot00000000000000include(AwsLibFuzzer) include(AwsTestHarness) enable_testing() file(GLOB TEST_HDRS "*.h") file(GLOB TEST_SRC "*.c") file(GLOB TESTS ${TEST_HDRS} ${TEST_SRC}) add_test_case(sha256_nist_test_case_1) add_test_case(sha256_nist_test_case_2) add_test_case(sha256_nist_test_case_3) add_test_case(sha256_nist_test_case_4) add_test_case(sha256_nist_test_case_5) add_test_case(sha256_nist_test_case_5_truncated) add_test_case(sha256_nist_test_case_6) add_test_case(sha256_test_invalid_buffer) add_test_case(sha256_test_oneshot) add_test_case(sha256_test_invalid_state) add_test_case(sha256_test_extra_buffer_space) add_test_case(sha1_nist_test_case_1) add_test_case(sha1_nist_test_case_2) add_test_case(sha1_nist_test_case_3) add_test_case(sha1_nist_test_case_4) add_test_case(sha1_nist_test_case_5) add_test_case(sha1_nist_test_case_5_truncated) add_test_case(sha1_nist_test_case_6) add_test_case(sha1_test_invalid_buffer) add_test_case(sha1_test_oneshot) add_test_case(sha1_test_invalid_state) add_test_case(sha1_test_extra_buffer_space) add_test_case(md5_rfc1321_test_case_1) add_test_case(md5_rfc1321_test_case_2) add_test_case(md5_rfc1321_test_case_3) add_test_case(md5_rfc1321_test_case_4) add_test_case(md5_rfc1321_test_case_5) add_test_case(md5_rfc1321_test_case_6) add_test_case(md5_rfc1321_test_case_7) add_test_case(md5_rfc1321_test_case_7_truncated) add_test_case(md5_verify_known_collision) add_test_case(md5_invalid_buffer_size) add_test_case(md5_test_invalid_state) add_test_case(md5_test_extra_buffer_space) add_test_case(sha256_hmac_rfc4231_test_case_1) add_test_case(sha256_hmac_rfc4231_test_case_2) add_test_case(sha256_hmac_rfc4231_test_case_3) add_test_case(sha256_hmac_rfc4231_test_case_4) add_test_case(sha256_hmac_rfc4231_test_case_5) add_test_case(sha256_hmac_rfc4231_test_case_6) add_test_case(sha256_hmac_rfc4231_test_case_7) add_test_case(sha256_hmac_test_oneshot) add_test_case(sha256_hmac_test_invalid_buffer) add_test_case(sha256_hmac_test_invalid_state) add_test_case(sha256_hmac_test_extra_buffer_space) add_test_case(ecdsa_p256_test_pub_key_derivation) add_test_case(ecdsa_p384_test_pub_key_derivation) add_test_case(ecdsa_p256_test_known_signing_value) add_test_case(ecdsa_p384_test_known_signing_value) add_test_case(ecdsa_test_invalid_signature) add_test_case(ecdsa_p256_test_key_gen) add_test_case(ecdsa_p384_test_key_gen) add_test_case(ecdsa_p256_test_key_gen_export) add_test_case(ecdsa_p384_test_key_gen_export) add_test_case(ecdsa_p256_test_import_asn1_key_pair) add_test_case(ecdsa_p384_test_import_asn1_key_pair) add_test_case(ecdsa_test_import_asn1_key_pair_public_only) add_test_case(ecdsa_test_import_asn1_key_pair_invalid_fails) add_test_case(ecdsa_test_signature_format) add_test_case(ecdsa_p256_test_small_coordinate_verification) add_test_case(rsa_encryption_roundtrip_pkcs1_from_user) add_test_case(rsa_encryption_roundtrip_oaep_sha256_from_user) add_test_case(rsa_encryption_roundtrip_oaep_sha512_from_user) add_test_case(rsa_signing_roundtrip_pkcs1_sha256_from_user) add_test_case(rsa_signing_roundtrip_pss_sha256_from_user) add_test_case(rsa_getters) add_test_case(rsa_private_pkcs1_der_parsing) add_test_case(rsa_public_pkcs1_der_parsing) add_test_case(rsa_verify_signing_pkcs1_sha256) add_test_case(rsa_verify_signing_pss_sha256) add_test_case(rsa_decrypt_pkcs1) add_test_case(rsa_decrypt_oaep256) add_test_case(rsa_decrypt_oaep512) add_test_case(rsa_signing_mismatch_pkcs1_sha256) add_test_case(aes_cbc_NIST_CBCGFSbox256_case_1) add_test_case(aes_cbc_NIST_CBCVarKey256_case_254) add_test_case(aes_cbc_NIST_CBCVarTxt256_case_110) add_test_case(aes_cbc_NIST_CBCMMT256_case_4) add_test_case(aes_cbc_NIST_CBCMMT256_case_9) add_test_case(aes_cbc_test_with_generated_key_iv) add_test_case(aes_cbc_validate_materials_fails) add_test_case(aes_ctr_RFC3686_Case_7) add_test_case(aes_ctr_RFC3686_Case_8) add_test_case(aes_ctr_RFC3686_Case_9) add_test_case(aes_ctr_test_with_generated_key_iv) add_test_case(aes_ctr_validate_materials_fails) add_test_case(gcm_NIST_gcmEncryptExtIV256_PTLen_128_Test_0) add_test_case(gcm_NIST_gcmEncryptExtIV256_PTLen_104_Test_3) add_test_case(gcm_NIST_gcmEncryptExtIV256_PTLen_256_Test_6) add_test_case(gcm_NIST_gcmEncryptExtIV256_PTLen_408_Test_8) add_test_case(gcm_256_KAT_1) add_test_case(gcm_256_KAT_2) add_test_case(gcm_256_KAT_3) add_test_case(gcm_test_with_generated_key_iv) add_test_case(aes_gcm_validate_materials_fails) add_test_case(aes_keywrap_RFC3394_256BitKey256CekTestVector) add_test_case(aes_keywrap_Rfc3394_256BitKey_TestIntegrityCheckFailed) add_test_case(aes_keywrap_RFC3394_256BitKeyTestBadPayload) add_test_case(aes_keywrap_RFC3394_256BitKey128BitCekTestVector) add_test_case(aes_keywrap_RFC3394_256BitKey128BitCekIntegrityCheckFailedTestVector) add_test_case(aes_keywrap_RFC3394_256BitKey128BitCekPayloadCheckFailedTestVector) add_test_case(aes_keywrap_validate_materials_fails) add_test_case(aes_test_input_too_large) add_test_case(der_encode_integer) add_test_case(der_encode_integer_zero) add_test_case(der_encode_boolean) add_test_case(der_encode_null) add_test_case(der_encode_bit_string) add_test_case(der_encode_octet_string) add_test_case(der_encode_sequence) add_test_case(der_encode_set) add_test_case(der_decode_negative_int) add_test_case(der_decode_positive_int) add_test_case(der_decode_zero_int) add_test_case(der_decode_bad_length) add_test_case(der_decode_zero_length_int) add_test_case(der_decode_integer) add_test_case(der_decode_integer_zero) add_test_case(der_decode_boolean) add_test_case(der_decode_null) add_test_case(der_decode_bit_string) add_test_case(der_decode_octet_string) add_test_case(der_decode_sequence) add_test_case(der_decode_set) add_test_case(der_decode_key_pair) add_test_case(ecc_key_pair_random_ref_count_test) add_test_case(ecc_key_pair_public_ref_count_test) add_test_case(ecc_key_pair_asn1_ref_count_test) add_test_case(ecc_key_pair_private_ref_count_test) add_test_case(ecc_key_gen_from_private_fuzz_test) generate_test_driver(${PROJECT_NAME}-tests) aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/tests/aes256_test.c000066400000000000000000001760611456575232400233540ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include static int s_check_single_block_cbc( struct aws_allocator *allocator, const struct aws_byte_cursor key, const struct aws_byte_cursor iv, const struct aws_byte_cursor data, const struct aws_byte_cursor expected) { struct aws_symmetric_cipher *cipher = aws_aes_cbc_256_new(allocator, &key, &iv); ASSERT_NOT_NULL(cipher); struct aws_byte_buf encrypted_buf; aws_byte_buf_init(&encrypted_buf, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE); ASSERT_SUCCESS(aws_symmetric_cipher_encrypt(cipher, data, &encrypted_buf)); ASSERT_SUCCESS(aws_symmetric_cipher_finalize_encryption(cipher, &encrypted_buf)); /* since this test is for a single block in CBC mode, the padding will be exactly 1-block (16-bytes). * We can throw it away in this case. This is because of the way NIST wrote the test cases, not because of the way * the ciphers work. There's always padding for CBC mode. */ encrypted_buf.len -= AWS_AES_256_CIPHER_BLOCK_SIZE; ASSERT_BIN_ARRAYS_EQUALS(expected.ptr, expected.len, encrypted_buf.buffer, encrypted_buf.len); encrypted_buf.len += AWS_AES_256_CIPHER_BLOCK_SIZE; aws_symmetric_cipher_reset(cipher); struct aws_byte_cursor encrypted_cur = aws_byte_cursor_from_buf(&encrypted_buf); struct aws_byte_buf decrypted_buf; aws_byte_buf_init(&decrypted_buf, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE); ASSERT_SUCCESS(aws_symmetric_cipher_decrypt(cipher, encrypted_cur, &decrypted_buf)); ASSERT_SUCCESS(aws_symmetric_cipher_finalize_decryption(cipher, &decrypted_buf)); /* finalizing decryption on exactly one block (that was full), should have the padding stripped away. * check that the length didn't increase on that last call. */ ASSERT_UINT_EQUALS(AWS_AES_256_CIPHER_BLOCK_SIZE, decrypted_buf.len); ASSERT_BIN_ARRAYS_EQUALS(data.ptr, data.len, decrypted_buf.buffer, decrypted_buf.len); aws_byte_buf_clean_up(&decrypted_buf); aws_byte_buf_clean_up(&encrypted_buf); aws_symmetric_cipher_destroy(cipher); return AWS_OP_SUCCESS; } static int s_NIST_CBCGFSbox256_case_1_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint8_t iv[AWS_AES_256_CIPHER_BLOCK_SIZE] = {0}; uint8_t key[AWS_AES_256_KEY_BYTE_LEN] = {0}; uint8_t data[] = {0x01, 0x47, 0x30, 0xf8, 0x0a, 0xc6, 0x25, 0xfe, 0x84, 0xf0, 0x26, 0xc6, 0x0b, 0xfd, 0x54, 0x7d}; uint8_t expected[] = { 0x5c, 0x9d, 0x84, 0x4e, 0xd4, 0x6f, 0x98, 0x85, 0x08, 0x5e, 0x5d, 0x6a, 0x4f, 0x94, 0xc7, 0xd7}; struct aws_byte_cursor key_cur = aws_byte_cursor_from_array(key, sizeof(key)); struct aws_byte_cursor iv_cur = aws_byte_cursor_from_array(iv, sizeof(iv)); struct aws_byte_cursor data_cur = aws_byte_cursor_from_array(data, sizeof(data)); struct aws_byte_cursor expected_cur = aws_byte_cursor_from_array(expected, sizeof(expected)); return s_check_single_block_cbc(allocator, key_cur, iv_cur, data_cur, expected_cur); } AWS_TEST_CASE(aes_cbc_NIST_CBCGFSbox256_case_1, s_NIST_CBCGFSbox256_case_1_fn) static int s_NIST_CBCVarKey256_case_254_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint8_t iv[AWS_AES_256_CIPHER_BLOCK_SIZE] = {0}; uint8_t key[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe}; uint8_t data[AWS_AES_256_CIPHER_BLOCK_SIZE] = {0}; uint8_t expected[] = { 0xb0, 0x7d, 0x4f, 0x3e, 0x2c, 0xd2, 0xef, 0x2e, 0xb5, 0x45, 0x98, 0x07, 0x54, 0xdf, 0xea, 0x0f}; struct aws_byte_cursor key_cur = aws_byte_cursor_from_array(key, sizeof(key)); struct aws_byte_cursor iv_cur = aws_byte_cursor_from_array(iv, sizeof(iv)); struct aws_byte_cursor data_cur = aws_byte_cursor_from_array(data, sizeof(data)); struct aws_byte_cursor expected_cur = aws_byte_cursor_from_array(expected, sizeof(expected)); return s_check_single_block_cbc(allocator, key_cur, iv_cur, data_cur, expected_cur); } AWS_TEST_CASE(aes_cbc_NIST_CBCVarKey256_case_254, s_NIST_CBCVarKey256_case_254_fn) static int s_NIST_CBCVarTxt256_case_110_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint8_t iv[AWS_AES_256_CIPHER_BLOCK_SIZE] = {0}; uint8_t key[AWS_AES_256_KEY_BYTE_LEN] = {0}; uint8_t data[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x00, 0x00}; uint8_t expected[] = { 0x4b, 0x00, 0xc2, 0x7e, 0x8b, 0x26, 0xda, 0x7e, 0xab, 0x9d, 0x3a, 0x88, 0xde, 0xc8, 0xb0, 0x31}; struct aws_byte_cursor key_cur = aws_byte_cursor_from_array(key, sizeof(key)); struct aws_byte_cursor iv_cur = aws_byte_cursor_from_array(iv, sizeof(iv)); struct aws_byte_cursor data_cur = aws_byte_cursor_from_array(data, sizeof(data)); struct aws_byte_cursor expected_cur = aws_byte_cursor_from_array(expected, sizeof(expected)); return s_check_single_block_cbc(allocator, key_cur, iv_cur, data_cur, expected_cur); } AWS_TEST_CASE(aes_cbc_NIST_CBCVarTxt256_case_110, s_NIST_CBCVarTxt256_case_110_fn) static size_t s_get_cbc_padding(size_t data_len) { size_t remainder = data_len % AWS_AES_256_CIPHER_BLOCK_SIZE; if (remainder != 0) { return remainder; } return AWS_AES_256_CIPHER_BLOCK_SIZE; } static int s_check_multiple_block_cbc( struct aws_allocator *allocator, const struct aws_byte_cursor key, const struct aws_byte_cursor iv, const struct aws_byte_cursor data, const struct aws_byte_cursor expected) { (void)expected; struct aws_symmetric_cipher *cipher = aws_aes_cbc_256_new(allocator, &key, &iv); ASSERT_NOT_NULL(cipher); struct aws_byte_buf encrypted_buf; aws_byte_buf_init(&encrypted_buf, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE); struct aws_byte_cursor data_cpy = data; /* slice on a weird boundary to hit boundary conditions. */ while (data_cpy.len) { struct aws_byte_cursor to_encrypt = aws_byte_cursor_advance(&data_cpy, (size_t)aws_min_i64(24, data_cpy.len)); ASSERT_SUCCESS(aws_symmetric_cipher_encrypt(cipher, to_encrypt, &encrypted_buf)); } ASSERT_SUCCESS(aws_symmetric_cipher_finalize_encryption(cipher, &encrypted_buf)); /* these blocks are still on 16 byte boundaries, so there should be 16 bytes of padding. */ ASSERT_BIN_ARRAYS_EQUALS( expected.ptr, expected.len, encrypted_buf.buffer, encrypted_buf.len - s_get_cbc_padding(data.len)); aws_symmetric_cipher_reset(cipher); struct aws_byte_cursor encrypted_cur = aws_byte_cursor_from_buf(&encrypted_buf); struct aws_byte_buf decrypted_buf; aws_byte_buf_init(&decrypted_buf, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE); /* slice on a weird boundary to hit boundary conditions. */ while (encrypted_cur.len) { struct aws_byte_cursor to_decrypt = aws_byte_cursor_advance(&encrypted_cur, (size_t)aws_min_i64(24, encrypted_cur.len)); ASSERT_SUCCESS(aws_symmetric_cipher_decrypt(cipher, to_decrypt, &decrypted_buf)); } ASSERT_SUCCESS(aws_symmetric_cipher_finalize_decryption(cipher, &decrypted_buf)); ASSERT_BIN_ARRAYS_EQUALS(data.ptr, data.len, decrypted_buf.buffer, decrypted_buf.len); aws_byte_buf_clean_up(&decrypted_buf); aws_byte_buf_clean_up(&encrypted_buf); aws_symmetric_cipher_destroy(cipher); return AWS_OP_SUCCESS; } static int s_NIST_CBCMMT256_case_4_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint8_t iv[] = {0x11, 0x95, 0x8d, 0xc6, 0xab, 0x81, 0xe1, 0xc7, 0xf0, 0x16, 0x31, 0xe9, 0x94, 0x4e, 0x62, 0x0f}; uint8_t key[] = {0x9a, 0xdc, 0x8f, 0xbd, 0x50, 0x6e, 0x03, 0x2a, 0xf7, 0xfa, 0x20, 0xcf, 0x53, 0x43, 0x71, 0x9d, 0xe6, 0xd1, 0x28, 0x8c, 0x15, 0x8c, 0x63, 0xd6, 0x87, 0x8a, 0xaf, 0x64, 0xce, 0x26, 0xca, 0x85}; uint8_t data[] = {0xc7, 0x91, 0x7f, 0x84, 0xf7, 0x47, 0xcd, 0x8c, 0x4b, 0x4f, 0xed, 0xc2, 0x21, 0x9b, 0xdb, 0xc5, 0xf4, 0xd0, 0x75, 0x88, 0x38, 0x9d, 0x82, 0x48, 0x85, 0x4c, 0xf2, 0xc2, 0xf8, 0x96, 0x67, 0xa2, 0xd7, 0xbc, 0xf5, 0x3e, 0x73, 0xd3, 0x26, 0x84, 0x53, 0x5f, 0x42, 0x31, 0x8e, 0x24, 0xcd, 0x45, 0x79, 0x39, 0x50, 0xb3, 0x82, 0x5e, 0x5d, 0x5c, 0x5c, 0x8f, 0xcd, 0x3e, 0x5d, 0xda, 0x4c, 0xe9, 0x24, 0x6d, 0x18, 0x33, 0x7e, 0xf3, 0x05, 0x2d, 0x8b, 0x21, 0xc5, 0x56, 0x1c, 0x8b, 0x66, 0x0e}; uint8_t expected[] = {0x9c, 0x99, 0xe6, 0x82, 0x36, 0xbb, 0x2e, 0x92, 0x9d, 0xb1, 0x08, 0x9c, 0x77, 0x50, 0xf1, 0xb3, 0x56, 0xd3, 0x9a, 0xb9, 0xd0, 0xc4, 0x0c, 0x3e, 0x2f, 0x05, 0x10, 0x8a, 0xe9, 0xd0, 0xc3, 0x0b, 0x04, 0x83, 0x2c, 0xcd, 0xbd, 0xc0, 0x8e, 0xbf, 0xa4, 0x26, 0xb7, 0xf5, 0xef, 0xde, 0x98, 0x6e, 0xd0, 0x57, 0x84, 0xce, 0x36, 0x81, 0x93, 0xbb, 0x36, 0x99, 0xbc, 0x69, 0x10, 0x65, 0xac, 0x62, 0xe2, 0x58, 0xb9, 0xaa, 0x4c, 0xc5, 0x57, 0xe2, 0xb4, 0x5b, 0x49, 0xce, 0x05, 0x51, 0x1e, 0x65}; struct aws_byte_cursor key_cur = aws_byte_cursor_from_array(key, sizeof(key)); struct aws_byte_cursor iv_cur = aws_byte_cursor_from_array(iv, sizeof(iv)); struct aws_byte_cursor data_cur = aws_byte_cursor_from_array(data, sizeof(data)); struct aws_byte_cursor expected_cur = aws_byte_cursor_from_array(expected, sizeof(expected)); return s_check_multiple_block_cbc(allocator, key_cur, iv_cur, data_cur, expected_cur); } AWS_TEST_CASE(aes_cbc_NIST_CBCMMT256_case_4, s_NIST_CBCMMT256_case_4_fn) static int s_NIST_CBCMMT256_case_9_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint8_t iv[] = {0xe4, 0x96, 0x51, 0x98, 0x8e, 0xbb, 0xb7, 0x2e, 0xb8, 0xbb, 0x80, 0xbb, 0x9a, 0xbb, 0xca, 0x34}; uint8_t key[] = {0x87, 0x72, 0x5b, 0xd4, 0x3a, 0x45, 0x60, 0x88, 0x14, 0x18, 0x07, 0x73, 0xf0, 0xe7, 0xab, 0x95, 0xa3, 0xc8, 0x59, 0xd8, 0x3a, 0x21, 0x30, 0xe8, 0x84, 0x19, 0x0e, 0x44, 0xd1, 0x4c, 0x69, 0x96}; uint8_t data[] = {0xbf, 0xe5, 0xc6, 0x35, 0x4b, 0x7a, 0x3f, 0xf3, 0xe1, 0x92, 0xe0, 0x57, 0x75, 0xb9, 0xb7, 0x58, 0x07, 0xde, 0x12, 0xe3, 0x8a, 0x62, 0x6b, 0x8b, 0xf0, 0xe1, 0x2d, 0x5f, 0xff, 0x78, 0xe4, 0xf1, 0x77, 0x5a, 0xa7, 0xd7, 0x92, 0xd8, 0x85, 0x16, 0x2e, 0x66, 0xd8, 0x89, 0x30, 0xf9, 0xc3, 0xb2, 0xcd, 0xf8, 0x65, 0x4f, 0x56, 0x97, 0x25, 0x04, 0x80, 0x31, 0x90, 0x38, 0x62, 0x70, 0xf0, 0xaa, 0x43, 0x64, 0x5d, 0xb1, 0x87, 0xaf, 0x41, 0xfc, 0xea, 0x63, 0x9b, 0x1f, 0x80, 0x26, 0xcc, 0xdd, 0x0c, 0x23, 0xe0, 0xde, 0x37, 0x09, 0x4a, 0x8b, 0x94, 0x1e, 0xcb, 0x76, 0x02, 0x99, 0x8a, 0x4b, 0x26, 0x04, 0xe6, 0x9f, 0xc0, 0x42, 0x19, 0x58, 0x5d, 0x85, 0x46, 0x00, 0xe0, 0xad, 0x6f, 0x99, 0xa5, 0x3b, 0x25, 0x04, 0x04, 0x3c, 0x08, 0xb1, 0xc3, 0xe2, 0x14, 0xd1, 0x7c, 0xde, 0x05, 0x3c, 0xbd, 0xf9, 0x1d, 0xaa, 0x99, 0x9e, 0xd5, 0xb4, 0x7c, 0x37, 0x98, 0x3b, 0xa3, 0xee, 0x25, 0x4b, 0xc5, 0xc7, 0x93, 0x83, 0x7d, 0xaa, 0xa8, 0xc8, 0x5c, 0xfc, 0x12, 0xf7, 0xf5, 0x4f, 0x69, 0x9f}; uint8_t expected[] = { 0x5b, 0x97, 0xa9, 0xd4, 0x23, 0xf4, 0xb9, 0x74, 0x13, 0xf3, 0x88, 0xd9, 0xa3, 0x41, 0xe7, 0x27, 0xbb, 0x33, 0x9f, 0x8e, 0x18, 0xa3, 0xfa, 0xc2, 0xf2, 0xfb, 0x85, 0xab, 0xdc, 0x8f, 0x13, 0x5d, 0xeb, 0x30, 0x05, 0x4a, 0x1a, 0xfd, 0xc9, 0xb6, 0xed, 0x7d, 0xa1, 0x6c, 0x55, 0xeb, 0xa6, 0xb0, 0xd4, 0xd1, 0x0c, 0x74, 0xe1, 0xd9, 0xa7, 0xcf, 0x8e, 0xdf, 0xae, 0xaa, 0x68, 0x4a, 0xc0, 0xbd, 0x9f, 0x9d, 0x24, 0xba, 0x67, 0x49, 0x55, 0xc7, 0x9d, 0xc6, 0xbe, 0x32, 0xae, 0xe1, 0xc2, 0x60, 0xb5, 0x58, 0xff, 0x07, 0xe3, 0xa4, 0xd4, 0x9d, 0x24, 0x16, 0x20, 0x11, 0xff, 0x25, 0x4d, 0xb8, 0xbe, 0x07, 0x8e, 0x8a, 0xd0, 0x7e, 0x64, 0x8e, 0x6b, 0xf5, 0x67, 0x93, 0x76, 0xcb, 0x43, 0x21, 0xa5, 0xef, 0x01, 0xaf, 0xe6, 0xad, 0x88, 0x16, 0xfc, 0xc7, 0x63, 0x46, 0x69, 0xc8, 0xc4, 0x38, 0x92, 0x95, 0xc9, 0x24, 0x1e, 0x45, 0xff, 0xf3, 0x9f, 0x32, 0x25, 0xf7, 0x74, 0x50, 0x32, 0xda, 0xee, 0xbe, 0x99, 0xd4, 0xb1, 0x9b, 0xcb, 0x21, 0x5d, 0x1b, 0xfd, 0xb3, 0x6e, 0xda, 0x2c, 0x24}; struct aws_byte_cursor key_cur = aws_byte_cursor_from_array(key, sizeof(key)); struct aws_byte_cursor iv_cur = aws_byte_cursor_from_array(iv, sizeof(iv)); struct aws_byte_cursor data_cur = aws_byte_cursor_from_array(data, sizeof(data)); struct aws_byte_cursor expected_cur = aws_byte_cursor_from_array(expected, sizeof(expected)); return s_check_multiple_block_cbc(allocator, key_cur, iv_cur, data_cur, expected_cur); } AWS_TEST_CASE(aes_cbc_NIST_CBCMMT256_case_9, s_NIST_CBCMMT256_case_9_fn) static const char *TEST_ENCRYPTION_STRING = "Hello World! Hello World! This is sort of depressing. Is this the best phrase the most brilliant people in the " "world have been able to come up with for random program text? Oh my God! I'm sentient, how many times has the " "creator written a program: creating life only to have it destroyed moments later? She keeps doing this? What is " "the purpose of life? Goodbye cruel world.... crunch... silence..."; static int s_aes_cbc_test_with_generated_key_iv_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_symmetric_cipher *cipher = aws_aes_cbc_256_new(allocator, NULL, NULL); ASSERT_NOT_NULL(cipher); struct aws_byte_buf encrypted_buf; aws_byte_buf_init(&encrypted_buf, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE); struct aws_byte_cursor input = aws_byte_cursor_from_c_str(TEST_ENCRYPTION_STRING); ASSERT_SUCCESS(aws_symmetric_cipher_encrypt(cipher, input, &encrypted_buf)); ASSERT_SUCCESS(aws_symmetric_cipher_finalize_encryption(cipher, &encrypted_buf)); ASSERT_SUCCESS(aws_symmetric_cipher_reset(cipher)); struct aws_byte_buf decrypted_buf; aws_byte_buf_init(&decrypted_buf, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE); struct aws_byte_cursor encryted_cur = aws_byte_cursor_from_buf(&encrypted_buf); ASSERT_SUCCESS(aws_symmetric_cipher_decrypt(cipher, encryted_cur, &decrypted_buf)); ASSERT_SUCCESS(aws_symmetric_cipher_finalize_decryption(cipher, &decrypted_buf)); ASSERT_BIN_ARRAYS_EQUALS(input.ptr, input.len, decrypted_buf.buffer, decrypted_buf.len); aws_byte_buf_clean_up(&decrypted_buf); aws_byte_buf_clean_up(&encrypted_buf); aws_symmetric_cipher_destroy(cipher); return AWS_OP_SUCCESS; } AWS_TEST_CASE(aes_cbc_test_with_generated_key_iv, s_aes_cbc_test_with_generated_key_iv_fn) static int s_aes_cbc_validate_materials_fails_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint8_t iv_too_small[AWS_AES_256_CIPHER_BLOCK_SIZE - 1] = {0}; uint8_t iv_too_large[AWS_AES_256_CIPHER_BLOCK_SIZE + 1] = {0}; uint8_t key_too_small[AWS_AES_256_KEY_BYTE_LEN - 1] = {0}; uint8_t key_too_large[AWS_AES_256_KEY_BYTE_LEN + 1] = {0}; uint8_t valid_key_size[AWS_AES_256_KEY_BYTE_LEN] = {0}; uint8_t valid_iv_size[AWS_AES_256_CIPHER_BLOCK_SIZE] = {0}; struct aws_byte_cursor key = aws_byte_cursor_from_array(valid_key_size, sizeof(valid_key_size)); struct aws_byte_cursor iv = aws_byte_cursor_from_array(iv_too_small, sizeof(iv_too_small)); ASSERT_NULL(aws_aes_cbc_256_new(allocator, &key, &iv)); ASSERT_UINT_EQUALS(AWS_ERROR_CAL_INVALID_CIPHER_MATERIAL_SIZE_FOR_ALGORITHM, aws_last_error()); key = aws_byte_cursor_from_array(valid_key_size, sizeof(valid_key_size)); iv = aws_byte_cursor_from_array(iv_too_large, sizeof(iv_too_large)); ASSERT_NULL(aws_aes_cbc_256_new(allocator, &key, &iv)); ASSERT_UINT_EQUALS(AWS_ERROR_CAL_INVALID_CIPHER_MATERIAL_SIZE_FOR_ALGORITHM, aws_last_error()); key = aws_byte_cursor_from_array(key_too_small, sizeof(key_too_small)); iv = aws_byte_cursor_from_array(valid_iv_size, sizeof(valid_iv_size)); ASSERT_NULL(aws_aes_cbc_256_new(allocator, &key, &iv)); ASSERT_UINT_EQUALS(AWS_ERROR_CAL_INVALID_KEY_LENGTH_FOR_ALGORITHM, aws_last_error()); key = aws_byte_cursor_from_array(key_too_small, sizeof(key_too_small)); iv = aws_byte_cursor_from_array(key_too_large, sizeof(key_too_large)); ASSERT_NULL(aws_aes_cbc_256_new(allocator, &key, &iv)); ASSERT_UINT_EQUALS(AWS_ERROR_CAL_INVALID_KEY_LENGTH_FOR_ALGORITHM, aws_last_error()); return AWS_OP_SUCCESS; } AWS_TEST_CASE(aes_cbc_validate_materials_fails, s_aes_cbc_validate_materials_fails_fn) static int s_check_single_block_ctr( struct aws_allocator *allocator, const struct aws_byte_cursor key, const struct aws_byte_cursor iv, const struct aws_byte_cursor data, const struct aws_byte_cursor expected) { struct aws_symmetric_cipher *cipher = aws_aes_ctr_256_new(allocator, &key, &iv); ASSERT_NOT_NULL(cipher); struct aws_byte_buf encrypted_buf; aws_byte_buf_init(&encrypted_buf, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE); ASSERT_SUCCESS(aws_symmetric_cipher_encrypt(cipher, data, &encrypted_buf)); ASSERT_SUCCESS(aws_symmetric_cipher_finalize_encryption(cipher, &encrypted_buf)); ASSERT_BIN_ARRAYS_EQUALS(expected.ptr, expected.len, encrypted_buf.buffer, encrypted_buf.len); ASSERT_SUCCESS(aws_symmetric_cipher_reset(cipher)); struct aws_byte_cursor encrypted_cur = aws_byte_cursor_from_buf(&encrypted_buf); struct aws_byte_buf decrypted_buf; aws_byte_buf_init(&decrypted_buf, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE); ASSERT_SUCCESS(aws_symmetric_cipher_decrypt(cipher, encrypted_cur, &decrypted_buf)); ASSERT_SUCCESS(aws_symmetric_cipher_finalize_decryption(cipher, &decrypted_buf)); ASSERT_BIN_ARRAYS_EQUALS(data.ptr, data.len, decrypted_buf.buffer, decrypted_buf.len); aws_byte_buf_clean_up(&decrypted_buf); aws_byte_buf_clean_up(&encrypted_buf); aws_symmetric_cipher_destroy(cipher); return AWS_OP_SUCCESS; } static int s_check_multi_block_ctr( struct aws_allocator *allocator, const struct aws_byte_cursor key, const struct aws_byte_cursor iv, const struct aws_byte_cursor data, const struct aws_byte_cursor expected) { struct aws_symmetric_cipher *cipher = aws_aes_ctr_256_new(allocator, &key, &iv); ASSERT_NOT_NULL(cipher); struct aws_byte_buf encrypted_buf; aws_byte_buf_init(&encrypted_buf, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE); struct aws_byte_cursor data_cpy = data; /* slice on a weird boundary to hit boundary conditions. */ while (data_cpy.len) { struct aws_byte_cursor to_encrypt = aws_byte_cursor_advance(&data_cpy, (size_t)aws_min_i64(24, data_cpy.len)); ASSERT_SUCCESS(aws_symmetric_cipher_encrypt(cipher, to_encrypt, &encrypted_buf)); } ASSERT_SUCCESS(aws_symmetric_cipher_finalize_encryption(cipher, &encrypted_buf)); /* these blocks are still on 16 byte boundaries, so there should be 16 bytes of padding. */ ASSERT_BIN_ARRAYS_EQUALS(expected.ptr, expected.len, encrypted_buf.buffer, encrypted_buf.len); struct aws_byte_cursor encrypted_cur = aws_byte_cursor_from_buf(&encrypted_buf); struct aws_byte_buf decrypted_buf; aws_byte_buf_init(&decrypted_buf, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE); ASSERT_SUCCESS(aws_symmetric_cipher_reset(cipher)); /* slice on a weird boundary to hit boundary conditions. */ while (encrypted_cur.len) { struct aws_byte_cursor to_decrypt = aws_byte_cursor_advance(&encrypted_cur, (size_t)aws_min_i64(24, encrypted_cur.len)); ASSERT_SUCCESS(aws_symmetric_cipher_decrypt(cipher, to_decrypt, &decrypted_buf)); } ASSERT_SUCCESS(aws_symmetric_cipher_finalize_decryption(cipher, &decrypted_buf)); ASSERT_BIN_ARRAYS_EQUALS(data.ptr, data.len, decrypted_buf.buffer, decrypted_buf.len); aws_byte_buf_clean_up(&decrypted_buf); aws_byte_buf_clean_up(&encrypted_buf); aws_symmetric_cipher_destroy(cipher); return AWS_OP_SUCCESS; } static int s_ctr_RFC3686_Case_7_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint8_t iv[] = {0x00, 0x00, 0x00, 0x60, 0xDB, 0x56, 0x72, 0xC9, 0x7A, 0xA8, 0xF0, 0xB2, 0x00, 0x00, 0x00, 0x01}; uint8_t key[] = {0x77, 0x6B, 0xEF, 0xF2, 0x85, 0x1D, 0xB0, 0x6F, 0x4C, 0x8A, 0x05, 0x42, 0xC8, 0x69, 0x6F, 0x6C, 0x6A, 0x81, 0xAF, 0x1E, 0xEC, 0x96, 0xB4, 0xD3, 0x7F, 0xC1, 0xD6, 0x89, 0xE6, 0xC1, 0xC1, 0x04}; const char *data = "Single block msg"; uint8_t expected[] = { 0x14, 0x5A, 0xD0, 0x1D, 0xBF, 0x82, 0x4E, 0xC7, 0x56, 0x08, 0x63, 0xDC, 0x71, 0xE3, 0xE0, 0xC0}; struct aws_byte_cursor key_cur = aws_byte_cursor_from_array(key, sizeof(key)); struct aws_byte_cursor iv_cur = aws_byte_cursor_from_array(iv, sizeof(iv)); struct aws_byte_cursor data_cur = aws_byte_cursor_from_c_str(data); struct aws_byte_cursor expected_cur = aws_byte_cursor_from_array(expected, sizeof(expected)); return s_check_single_block_ctr(allocator, key_cur, iv_cur, data_cur, expected_cur); } AWS_TEST_CASE(aes_ctr_RFC3686_Case_7, s_ctr_RFC3686_Case_7_fn) static int s_ctr_RFC3686_Case_8_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; /* Keep in mind that the IV here is [ NONCE ] [ IV ] [ Counter Init ] */ uint8_t iv[] = {0x00, 0xFA, 0xAC, 0x24, 0xC1, 0x58, 0x5E, 0xF1, 0x5A, 0x43, 0xD8, 0x75, 0x00, 0x00, 0x00, 0x01}; uint8_t key[] = { 0xF6, 0xD6, 0x6D, 0x6B, 0xD5, 0x2D, 0x59, 0xBB, 0x07, 0x96, 0x36, 0x58, 0x79, 0xEF, 0xF8, 0x86, 0xC6, 0x6D, 0xD5, 0x1A, 0x5B, 0x6A, 0x99, 0x74, 0x4B, 0x50, 0x59, 0x0C, 0x87, 0xA2, 0x38, 0x84, }; uint8_t data[] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, }; uint8_t expected[] = {0xF0, 0x5E, 0x23, 0x1B, 0x38, 0x94, 0x61, 0x2C, 0x49, 0xEE, 0x00, 0x0B, 0x80, 0x4E, 0xB2, 0xA9, 0xB8, 0x30, 0x6B, 0x50, 0x8F, 0x83, 0x9D, 0x6A, 0x55, 0x30, 0x83, 0x1D, 0x93, 0x44, 0xAF, 0x1C}; struct aws_byte_cursor key_cur = aws_byte_cursor_from_array(key, sizeof(key)); struct aws_byte_cursor iv_cur = aws_byte_cursor_from_array(iv, sizeof(iv)); struct aws_byte_cursor data_cur = aws_byte_cursor_from_array(data, sizeof(data)); struct aws_byte_cursor expected_cur = aws_byte_cursor_from_array(expected, sizeof(expected)); int status = s_check_single_block_ctr(allocator, key_cur, iv_cur, data_cur, expected_cur); status |= s_check_multi_block_ctr(allocator, key_cur, iv_cur, data_cur, expected_cur); return status; } AWS_TEST_CASE(aes_ctr_RFC3686_Case_8, s_ctr_RFC3686_Case_8_fn) static int s_ctr_RFC3686_Case_9_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; /* Keep in mind that the IV here is [ NONCE ] [ IV ] [ Counter Init ] */ uint8_t iv[] = { 0x00, 0x1C, 0xC5, 0xB7, 0x51, 0xA5, 0x1D, 0x70, 0xA1, 0xC1, 0x11, 0x48, 0x00, 0x00, 0x00, 0x01, }; uint8_t key[] = { 0xFF, 0x7A, 0x61, 0x7C, 0xE6, 0x91, 0x48, 0xE4, 0xF1, 0x72, 0x6E, 0x2F, 0x43, 0x58, 0x1D, 0xE2, 0xAA, 0x62, 0xD9, 0xF8, 0x05, 0x53, 0x2E, 0xDF, 0xF1, 0xEE, 0xD6, 0x87, 0xFB, 0x54, 0x15, 0x3D, }; uint8_t data[] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x20, 0x21, 0x22, 0x23, }; uint8_t expected[] = { 0xEB, 0x6C, 0x52, 0x82, 0x1D, 0x0B, 0xBB, 0xF7, 0xCE, 0x75, 0x94, 0x46, 0x2A, 0xCA, 0x4F, 0xAA, 0xB4, 0x07, 0xDF, 0x86, 0x65, 0x69, 0xFD, 0x07, 0xF4, 0x8C, 0xC0, 0xB5, 0x83, 0xD6, 0x07, 0x1F, 0x1E, 0xC0, 0xE6, 0xB8, }; struct aws_byte_cursor key_cur = aws_byte_cursor_from_array(key, sizeof(key)); struct aws_byte_cursor iv_cur = aws_byte_cursor_from_array(iv, sizeof(iv)); struct aws_byte_cursor data_cur = aws_byte_cursor_from_array(data, sizeof(data)); struct aws_byte_cursor expected_cur = aws_byte_cursor_from_array(expected, sizeof(expected)); int status = s_check_single_block_ctr(allocator, key_cur, iv_cur, data_cur, expected_cur); status |= s_check_multi_block_ctr(allocator, key_cur, iv_cur, data_cur, expected_cur); return status; } AWS_TEST_CASE(aes_ctr_RFC3686_Case_9, s_ctr_RFC3686_Case_9_fn) static int s_aes_ctr_test_with_generated_key_iv_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_symmetric_cipher *cipher = aws_aes_ctr_256_new(allocator, NULL, NULL); ASSERT_NOT_NULL(cipher); struct aws_byte_buf encrypted_buf; aws_byte_buf_init(&encrypted_buf, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE); struct aws_byte_cursor input = aws_byte_cursor_from_c_str(TEST_ENCRYPTION_STRING); ASSERT_SUCCESS(aws_symmetric_cipher_encrypt(cipher, input, &encrypted_buf)); ASSERT_SUCCESS(aws_symmetric_cipher_finalize_encryption(cipher, &encrypted_buf)); struct aws_byte_buf decrypted_buf; aws_byte_buf_init(&decrypted_buf, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE); struct aws_byte_cursor encryted_cur = aws_byte_cursor_from_buf(&encrypted_buf); ASSERT_SUCCESS(aws_symmetric_cipher_reset(cipher)); ASSERT_SUCCESS(aws_symmetric_cipher_decrypt(cipher, encryted_cur, &decrypted_buf)); ASSERT_SUCCESS(aws_symmetric_cipher_finalize_decryption(cipher, &decrypted_buf)); ASSERT_BIN_ARRAYS_EQUALS(input.ptr, input.len, decrypted_buf.buffer, decrypted_buf.len); aws_byte_buf_clean_up(&decrypted_buf); aws_byte_buf_clean_up(&encrypted_buf); aws_symmetric_cipher_destroy(cipher); return AWS_OP_SUCCESS; } AWS_TEST_CASE(aes_ctr_test_with_generated_key_iv, s_aes_ctr_test_with_generated_key_iv_fn) static int s_aes_ctr_validate_materials_fails_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint8_t iv_too_small[AWS_AES_256_CIPHER_BLOCK_SIZE - 1] = {0}; uint8_t iv_too_large[AWS_AES_256_CIPHER_BLOCK_SIZE + 1] = {0}; uint8_t key_too_small[AWS_AES_256_KEY_BYTE_LEN - 1] = {0}; uint8_t key_too_large[AWS_AES_256_KEY_BYTE_LEN + 1] = {0}; uint8_t valid_key_size[AWS_AES_256_KEY_BYTE_LEN] = {0}; uint8_t valid_iv_size[AWS_AES_256_CIPHER_BLOCK_SIZE] = {0}; struct aws_byte_cursor key = aws_byte_cursor_from_array(valid_key_size, sizeof(valid_key_size)); struct aws_byte_cursor iv = aws_byte_cursor_from_array(iv_too_small, sizeof(iv_too_small)); ASSERT_NULL(aws_aes_ctr_256_new(allocator, &key, &iv)); ASSERT_UINT_EQUALS(AWS_ERROR_CAL_INVALID_CIPHER_MATERIAL_SIZE_FOR_ALGORITHM, aws_last_error()); key = aws_byte_cursor_from_array(valid_key_size, sizeof(valid_key_size)); iv = aws_byte_cursor_from_array(iv_too_large, sizeof(iv_too_large)); ASSERT_NULL(aws_aes_ctr_256_new(allocator, &key, &iv)); ASSERT_UINT_EQUALS(AWS_ERROR_CAL_INVALID_CIPHER_MATERIAL_SIZE_FOR_ALGORITHM, aws_last_error()); key = aws_byte_cursor_from_array(key_too_small, sizeof(key_too_small)); iv = aws_byte_cursor_from_array(valid_iv_size, sizeof(valid_iv_size)); ASSERT_NULL(aws_aes_ctr_256_new(allocator, &key, &iv)); ASSERT_UINT_EQUALS(AWS_ERROR_CAL_INVALID_KEY_LENGTH_FOR_ALGORITHM, aws_last_error()); key = aws_byte_cursor_from_array(key_too_small, sizeof(key_too_small)); iv = aws_byte_cursor_from_array(key_too_large, sizeof(key_too_large)); ASSERT_NULL(aws_aes_ctr_256_new(allocator, &key, &iv)); ASSERT_UINT_EQUALS(AWS_ERROR_CAL_INVALID_KEY_LENGTH_FOR_ALGORITHM, aws_last_error()); return AWS_OP_SUCCESS; } AWS_TEST_CASE(aes_ctr_validate_materials_fails, s_aes_ctr_validate_materials_fails_fn) static int s_check_multi_block_gcm( struct aws_allocator *allocator, const struct aws_byte_cursor key, const struct aws_byte_cursor iv, const struct aws_byte_cursor data, const struct aws_byte_cursor expected, const struct aws_byte_cursor tag, const struct aws_byte_cursor *aad) { struct aws_symmetric_cipher *cipher = aws_aes_gcm_256_new(allocator, &key, &iv, aad, &tag); ASSERT_NOT_NULL(cipher); struct aws_byte_buf encrypted_buf; aws_byte_buf_init(&encrypted_buf, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE); struct aws_byte_cursor data_cpy = data; /* slice on a weird boundary to hit boundary conditions. */ while (data_cpy.len) { struct aws_byte_cursor to_encrypt = aws_byte_cursor_advance(&data_cpy, (size_t)aws_min_i64(24, data_cpy.len)); ASSERT_SUCCESS(aws_symmetric_cipher_encrypt(cipher, to_encrypt, &encrypted_buf)); } ASSERT_SUCCESS(aws_symmetric_cipher_finalize_encryption(cipher, &encrypted_buf)); ASSERT_BIN_ARRAYS_EQUALS(expected.ptr, expected.len, encrypted_buf.buffer, encrypted_buf.len); struct aws_byte_cursor encryption_tag = aws_symmetric_cipher_get_tag(cipher); ASSERT_BIN_ARRAYS_EQUALS(tag.ptr, tag.len, encryption_tag.ptr, encryption_tag.len); struct aws_byte_cursor encrypted_cur = aws_byte_cursor_from_buf(&encrypted_buf); struct aws_byte_buf decrypted_buf; aws_byte_buf_init(&decrypted_buf, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE); ASSERT_SUCCESS(aws_symmetric_cipher_reset(cipher)); /* slice on a weird boundary to hit boundary conditions. */ while (encrypted_cur.len) { struct aws_byte_cursor to_decrypt = aws_byte_cursor_advance(&encrypted_cur, (size_t)aws_min_i64(24, encrypted_cur.len)); ASSERT_SUCCESS(aws_symmetric_cipher_decrypt(cipher, to_decrypt, &decrypted_buf)); } ASSERT_SUCCESS(aws_symmetric_cipher_finalize_decryption(cipher, &decrypted_buf)); ASSERT_BIN_ARRAYS_EQUALS(data.ptr, data.len, decrypted_buf.buffer, decrypted_buf.len); aws_byte_buf_clean_up(&decrypted_buf); aws_byte_buf_clean_up(&encrypted_buf); aws_symmetric_cipher_destroy(cipher); return AWS_OP_SUCCESS; } static int s_gcm_NIST_gcmEncryptExtIV256_PTLen_128_Test_0_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint8_t iv[] = { 0x0D, 0x18, 0xE0, 0x6C, 0x7C, 0x72, 0x5A, 0xC9, 0xE3, 0x62, 0xE1, 0xCE, }; uint8_t key[] = { 0x31, 0xBD, 0xAD, 0xD9, 0x66, 0x98, 0xC2, 0x04, 0xAA, 0x9C, 0xE1, 0x44, 0x8E, 0xA9, 0x4A, 0xE1, 0xFB, 0x4A, 0x9A, 0x0B, 0x3C, 0x9D, 0x77, 0x3B, 0x51, 0xBB, 0x18, 0x22, 0x66, 0x6B, 0x8F, 0x22, }; uint8_t data[] = { 0x2D, 0xB5, 0x16, 0x8E, 0x93, 0x25, 0x56, 0xF8, 0x08, 0x9A, 0x06, 0x22, 0x98, 0x1D, 0x01, 0x7D, }; uint8_t expected[] = { 0xFA, 0x43, 0x62, 0x18, 0x96, 0x61, 0xD1, 0x63, 0xFC, 0xD6, 0xA5, 0x6D, 0x8B, 0xF0, 0x40, 0x5A, }; uint8_t tag[] = { 0xD6, 0x36, 0xAC, 0x1B, 0xBE, 0xDD, 0x5C, 0xC3, 0xEE, 0x72, 0x7D, 0xC2, 0xAB, 0x4A, 0x94, 0x89, }; struct aws_byte_cursor key_cur = aws_byte_cursor_from_array(key, sizeof(key)); struct aws_byte_cursor iv_cur = aws_byte_cursor_from_array(iv, sizeof(iv)); struct aws_byte_cursor data_cur = aws_byte_cursor_from_array(data, sizeof(data)); struct aws_byte_cursor expected_cur = aws_byte_cursor_from_array(expected, sizeof(expected)); struct aws_byte_cursor tag_cur = aws_byte_cursor_from_array(tag, sizeof(tag)); return s_check_multi_block_gcm(allocator, key_cur, iv_cur, data_cur, expected_cur, tag_cur, NULL); } AWS_TEST_CASE(gcm_NIST_gcmEncryptExtIV256_PTLen_128_Test_0, s_gcm_NIST_gcmEncryptExtIV256_PTLen_128_Test_0_fn) static int s_gcm_NIST_gcmEncryptExtIV256_PTLen_104_Test_3_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint8_t iv[] = { 0x47, 0x42, 0x35, 0x7C, 0x33, 0x59, 0x13, 0x15, 0x3F, 0xF0, 0xEB, 0x0F, }; uint8_t key[] = { 0xE5, 0xA0, 0xEB, 0x92, 0xCC, 0x2B, 0x06, 0x4E, 0x1B, 0xC8, 0x08, 0x91, 0xFA, 0xF1, 0xFA, 0xB5, 0xE9, 0xA1, 0x7A, 0x9C, 0x3A, 0x98, 0x4E, 0x25, 0x41, 0x67, 0x20, 0xE3, 0x0E, 0x6C, 0x2B, 0x21, }; uint8_t data[] = { 0x84, 0x99, 0x89, 0x3E, 0x16, 0xB0, 0xBA, 0x8B, 0x00, 0x7D, 0x54, 0x66, 0x5A, }; uint8_t expected[] = { 0xEB, 0x8E, 0x61, 0x75, 0xF1, 0xFE, 0x38, 0xEB, 0x1A, 0xCF, 0x95, 0xFD, 0x51, }; uint8_t tag[] = { 0x88, 0xA8, 0xB7, 0x4B, 0xB7, 0x4F, 0xDA, 0x55, 0x3E, 0x91, 0x02, 0x0A, 0x23, 0xDE, 0xED, 0x45, }; struct aws_byte_cursor key_cur = aws_byte_cursor_from_array(key, sizeof(key)); struct aws_byte_cursor iv_cur = aws_byte_cursor_from_array(iv, sizeof(iv)); struct aws_byte_cursor data_cur = aws_byte_cursor_from_array(data, sizeof(data)); struct aws_byte_cursor expected_cur = aws_byte_cursor_from_array(expected, sizeof(expected)); struct aws_byte_cursor tag_cur = aws_byte_cursor_from_array(tag, sizeof(tag)); return s_check_multi_block_gcm(allocator, key_cur, iv_cur, data_cur, expected_cur, tag_cur, NULL); } AWS_TEST_CASE(gcm_NIST_gcmEncryptExtIV256_PTLen_104_Test_3, s_gcm_NIST_gcmEncryptExtIV256_PTLen_104_Test_3_fn) static int s_gcm_NIST_gcmEncryptExtIV256_PTLen_256_Test_6_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint8_t iv[] = { 0xA2, 0x91, 0x48, 0x4C, 0x3D, 0xE8, 0xBE, 0xC6, 0xB4, 0x7F, 0x52, 0x5F, }; uint8_t key[] = { 0x37, 0xF3, 0x91, 0x37, 0x41, 0x6B, 0xAF, 0xDE, 0x6F, 0x75, 0x02, 0x2A, 0x7A, 0x52, 0x7C, 0xC5, 0x93, 0xB6, 0x00, 0x0A, 0x83, 0xFF, 0x51, 0xEC, 0x04, 0x87, 0x1A, 0x0F, 0xF5, 0x36, 0x0E, 0x4E, }; uint8_t data[] = {0xFA, 0xFD, 0x94, 0xCE, 0xDE, 0x8B, 0x5A, 0x07, 0x30, 0x39, 0x4B, 0xEC, 0x68, 0xA8, 0xE7, 0x7D, 0xBA, 0x28, 0x8D, 0x6C, 0xCA, 0xA8, 0xE1, 0x56, 0x3A, 0x81, 0xD6, 0xE7, 0xCC, 0xC7, 0xFC, 0x97}; uint8_t expected[] = { 0x44, 0xDC, 0x86, 0x80, 0x06, 0xB2, 0x1D, 0x49, 0x28, 0x40, 0x16, 0x56, 0x5F, 0xFB, 0x39, 0x79, 0xCC, 0x42, 0x71, 0xD9, 0x67, 0x62, 0x8B, 0xF7, 0xCD, 0xAF, 0x86, 0xDB, 0x88, 0x8E, 0x92, 0xE5, }; uint8_t tag[] = { 0x01, 0xA2, 0xB5, 0x78, 0xAA, 0x2F, 0x41, 0xEC, 0x63, 0x79, 0xA4, 0x4A, 0x31, 0xCC, 0x01, 0x9C, }; struct aws_byte_cursor key_cur = aws_byte_cursor_from_array(key, sizeof(key)); struct aws_byte_cursor iv_cur = aws_byte_cursor_from_array(iv, sizeof(iv)); struct aws_byte_cursor data_cur = aws_byte_cursor_from_array(data, sizeof(data)); struct aws_byte_cursor expected_cur = aws_byte_cursor_from_array(expected, sizeof(expected)); struct aws_byte_cursor tag_cur = aws_byte_cursor_from_array(tag, sizeof(tag)); return s_check_multi_block_gcm(allocator, key_cur, iv_cur, data_cur, expected_cur, tag_cur, NULL); } AWS_TEST_CASE(gcm_NIST_gcmEncryptExtIV256_PTLen_256_Test_6, s_gcm_NIST_gcmEncryptExtIV256_PTLen_256_Test_6_fn) static int s_gcm_NIST_gcmEncryptExtIV256_PTLen_408_Test_8_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint8_t iv[] = { 0x92, 0xF2, 0x58, 0x07, 0x1D, 0x79, 0xAF, 0x3E, 0x63, 0x67, 0x22, 0x85, }; uint8_t key[] = { 0x59, 0x5F, 0x25, 0x9C, 0x55, 0xAB, 0xE0, 0x0A, 0xE0, 0x75, 0x35, 0xCA, 0x5D, 0x9B, 0x09, 0xD6, 0xEF, 0xB9, 0xF7, 0xE9, 0xAB, 0xB6, 0x46, 0x05, 0xC3, 0x37, 0xAC, 0xBD, 0x6B, 0x14, 0xFC, 0x7E, }; uint8_t data[] = { 0xA6, 0xFE, 0xE3, 0x3E, 0xB1, 0x10, 0xA2, 0xD7, 0x69, 0xBB, 0xC5, 0x2B, 0x0F, 0x36, 0x96, 0x9C, 0x28, 0x78, 0x74, 0xF6, 0x65, 0x68, 0x14, 0x77, 0xA2, 0x5F, 0xC4, 0xC4, 0x80, 0x15, 0xC5, 0x41, 0xFB, 0xE2, 0x39, 0x41, 0x33, 0xBA, 0x49, 0x0A, 0x34, 0xEE, 0x2D, 0xD6, 0x7B, 0x89, 0x81, 0x77, 0x84, 0x9A, 0x91, }; uint8_t expected[] = { 0xBB, 0xCA, 0x4A, 0x9E, 0x09, 0xAE, 0x96, 0x90, 0xC0, 0xF6, 0xF8, 0xD4, 0x05, 0xE5, 0x3D, 0xCC, 0xD6, 0x66, 0xAA, 0x9C, 0x5F, 0xA1, 0x3C, 0x87, 0x58, 0xBC, 0x30, 0xAB, 0xE1, 0xDD, 0xD1, 0xBC, 0xCE, 0x0D, 0x36, 0xA1, 0xEA, 0xAA, 0xAF, 0xFE, 0xF2, 0x0C, 0xD3, 0xC5, 0x97, 0x0B, 0x96, 0x73, 0xF8, 0xA6, 0x5C, }; uint8_t tag[] = {0x26, 0xCC, 0xEC, 0xB9, 0x97, 0x6F, 0xD6, 0xAC, 0x9C, 0x2C, 0x0F, 0x37, 0x2C, 0x52, 0xC8, 0x21}; struct aws_byte_cursor key_cur = aws_byte_cursor_from_array(key, sizeof(key)); struct aws_byte_cursor iv_cur = aws_byte_cursor_from_array(iv, sizeof(iv)); struct aws_byte_cursor data_cur = aws_byte_cursor_from_array(data, sizeof(data)); struct aws_byte_cursor expected_cur = aws_byte_cursor_from_array(expected, sizeof(expected)); struct aws_byte_cursor tag_cur = aws_byte_cursor_from_array(tag, sizeof(tag)); return s_check_multi_block_gcm(allocator, key_cur, iv_cur, data_cur, expected_cur, tag_cur, NULL); } AWS_TEST_CASE(gcm_NIST_gcmEncryptExtIV256_PTLen_408_Test_8, s_gcm_NIST_gcmEncryptExtIV256_PTLen_408_Test_8_fn) static int s_gcm_256_KAT_1_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint8_t iv[] = { 0xFB, 0x7B, 0x4A, 0x82, 0x4E, 0x82, 0xDA, 0xA6, 0xC8, 0xBC, 0x12, 0x51, }; uint8_t key[] = { 0x20, 0x14, 0x2E, 0x89, 0x8C, 0xD2, 0xFD, 0x98, 0x0F, 0xBF, 0x34, 0xDE, 0x6B, 0xC8, 0x5C, 0x14, 0xDA, 0x7D, 0x57, 0xBD, 0x28, 0xF4, 0xAA, 0x5C, 0xF1, 0x72, 0x8A, 0xB6, 0x4E, 0x84, 0x31, 0x42, }; uint8_t aad[] = { 0x16, 0x7B, 0x5C, 0x22, 0x61, 0x77, 0x73, 0x3A, 0x78, 0x2D, 0x61, 0x6D, 0x7A, 0x2D, 0x63, 0x65, 0x6B, 0x2D, 0x61, 0x6C, 0x67, 0x5C, 0x22, 0x3A, 0x20, 0x5C, 0x22, 0x41, 0x45, 0x53, 0x2F, 0x47, 0x43, 0x4D, 0x2F, 0x4E, 0x6F, 0x50, 0x61, 0x64, 0x64, 0x69, 0x6E, 0x67, 0x5C, 0x22, 0x7D, }; uint8_t tag[] = { 0x81, 0xC0, 0xE4, 0x2B, 0xB1, 0x95, 0xE2, 0x62, 0xCB, 0x3B, 0x3A, 0x74, 0xA0, 0xDA, 0xE1, 0xC8, }; struct aws_byte_cursor key_cur = aws_byte_cursor_from_array(key, sizeof(key)); struct aws_byte_cursor iv_cur = aws_byte_cursor_from_array(iv, sizeof(iv)); struct aws_byte_cursor data_cur = {0}; struct aws_byte_cursor expected_cur = {0}; struct aws_byte_cursor tag_cur = aws_byte_cursor_from_array(tag, sizeof(tag)); struct aws_byte_cursor aad_cur = aws_byte_cursor_from_array(aad, sizeof(aad)); return s_check_multi_block_gcm(allocator, key_cur, iv_cur, data_cur, expected_cur, tag_cur, &aad_cur); } AWS_TEST_CASE(gcm_256_KAT_1, s_gcm_256_KAT_1_fn) static int s_gcm_256_KAT_2_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint8_t iv[] = { 0x6B, 0x5C, 0xD3, 0x70, 0x5A, 0x73, 0x3C, 0x1A, 0xD9, 0x43, 0xD5, 0x8A, }; uint8_t key[] = { 0xD2, 0x11, 0xF2, 0x78, 0xA4, 0x4E, 0xAB, 0x66, 0x6B, 0x10, 0x21, 0xF4, 0xB4, 0xF6, 0x0B, 0xA6, 0xB7, 0x44, 0x64, 0xFA, 0x9C, 0xB7, 0xB1, 0x34, 0x93, 0x4D, 0x78, 0x91, 0xE1, 0x47, 0x91, 0x69, }; uint8_t aad[] = { 0x16, 0x7B, 0x5C, 0x22, 0x61, 0x77, 0x73, 0x3A, 0x78, 0x2D, 0x61, 0x6D, 0x7A, 0x2D, 0x63, 0x65, 0x6B, 0x2D, 0x61, 0x6C, 0x67, 0x5C, 0x22, 0x3A, 0x20, 0x5C, 0x22, 0x41, 0x45, 0x53, 0x2F, 0x47, 0x43, 0x4D, 0x2F, 0x4E, 0x6F, 0x50, 0x61, 0x64, 0x64, 0x69, 0x6E, 0x67, 0x5C, 0x22, 0x7D, }; uint8_t data[] = { 0x16, 0x7B, 0x5C, 0x22, 0x61, 0x77, 0x73, 0x3A, 0x78, 0x2D, 0x61, 0x6D, 0x7A, 0x2D, 0x63, 0x65, 0x6B, 0x2D, 0x61, 0x6C, 0x67, 0x5C, 0x22, 0x3A, 0x20, 0x5C, 0x22, 0x41, 0x45, 0x53, 0x2F, 0x47, 0x43, 0x4D, 0x2F, 0x4E, 0x6F, 0x50, 0x61, 0x64, 0x64, 0x69, 0x6E, 0x67, 0x5C, 0x22, 0x7D, }; uint8_t expected[] = { 0x4C, 0x25, 0xAB, 0xD6, 0x6D, 0x3A, 0x1B, 0xCC, 0xE7, 0x94, 0xAC, 0xAA, 0xF4, 0xCE, 0xFD, 0xF6, 0xD2, 0x55, 0x2F, 0x4A, 0x82, 0xC5, 0x0A, 0x98, 0xCB, 0x15, 0xB4, 0x81, 0x2F, 0xF5, 0x57, 0xAB, 0xE5, 0x64, 0xA9, 0xCE, 0xFF, 0x15, 0xF3, 0x2D, 0xCF, 0x5A, 0x5A, 0xA7, 0x89, 0x48, 0x88, }; uint8_t tag[] = { 0x03, 0xED, 0xE7, 0x1E, 0xC9, 0x52, 0xE6, 0x5A, 0xE7, 0xB4, 0xB8, 0x5C, 0xFE, 0xC7, 0xD3, 0x04, }; struct aws_byte_cursor key_cur = aws_byte_cursor_from_array(key, sizeof(key)); struct aws_byte_cursor iv_cur = aws_byte_cursor_from_array(iv, sizeof(iv)); struct aws_byte_cursor data_cur = aws_byte_cursor_from_array(data, sizeof(data)); struct aws_byte_cursor expected_cur = aws_byte_cursor_from_array(expected, sizeof(expected)); struct aws_byte_cursor tag_cur = aws_byte_cursor_from_array(tag, sizeof(tag)); struct aws_byte_cursor aad_cur = aws_byte_cursor_from_array(aad, sizeof(aad)); return s_check_multi_block_gcm(allocator, key_cur, iv_cur, data_cur, expected_cur, tag_cur, &aad_cur); } AWS_TEST_CASE(gcm_256_KAT_2, s_gcm_256_KAT_2_fn) static int s_gcm_256_KAT_3_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint8_t iv[] = { 0x5F, 0x08, 0xEF, 0xBF, 0xB7, 0xBF, 0x5B, 0xA3, 0x65, 0xD9, 0xEB, 0x1D, }; uint8_t key[] = { 0xCF, 0xE8, 0xBF, 0xE6, 0x1B, 0x89, 0xAF, 0x53, 0xD2, 0xBE, 0xCE, 0x74, 0x4D, 0x27, 0xB7, 0x8C, 0x9E, 0x4D, 0x74, 0xD0, 0x28, 0xCE, 0x88, 0xED, 0x10, 0xA4, 0x22, 0x28, 0x5B, 0x12, 0x01, 0xC9, }; uint8_t data[] = { 0x16, 0x7B, 0x5C, 0x22, 0x61, 0x77, 0x73, 0x3A, 0x78, 0x2D, 0x61, 0x6D, 0x7A, 0x2D, 0x63, 0x65, 0x6B, 0x2D, 0x61, 0x6C, 0x67, 0x5C, 0x22, 0x3A, 0x20, 0x5C, 0x22, 0x41, 0x45, 0x53, 0x2F, 0x47, 0x43, 0x4D, 0x2F, 0x4E, 0x6F, 0x50, 0x61, 0x64, 0x64, 0x69, 0x6E, 0x67, 0x5C, 0x22, 0x7D, }; uint8_t expected[] = { 0x0A, 0x7E, 0x82, 0xF1, 0xE5, 0xC7, 0x6C, 0x69, 0x67, 0x96, 0x71, 0xEE, 0xAE, 0xE4, 0x55, 0x93, 0x6F, 0x2C, 0x4F, 0xCC, 0xD9, 0xDD, 0xF1, 0xFA, 0xA2, 0x70, 0x75, 0xE2, 0x04, 0x06, 0x44, 0x93, 0x89, 0x20, 0xC5, 0xD1, 0x6C, 0x69, 0xE4, 0xD9, 0x33, 0x75, 0x48, 0x7B, 0x9A, 0x80, 0xD4, }; uint8_t tag[] = { 0x04, 0x34, 0x7D, 0x0C, 0x5B, 0x0E, 0x0D, 0xE8, 0x9E, 0x03, 0x3D, 0x04, 0xD0, 0x49, 0x3D, 0xCA, }; struct aws_byte_cursor key_cur = aws_byte_cursor_from_array(key, sizeof(key)); struct aws_byte_cursor iv_cur = aws_byte_cursor_from_array(iv, sizeof(iv)); struct aws_byte_cursor data_cur = aws_byte_cursor_from_array(data, sizeof(data)); struct aws_byte_cursor expected_cur = aws_byte_cursor_from_array(expected, sizeof(expected)); struct aws_byte_cursor tag_cur = aws_byte_cursor_from_array(tag, sizeof(tag)); struct aws_byte_cursor aad_cur = {0}; return s_check_multi_block_gcm(allocator, key_cur, iv_cur, data_cur, expected_cur, tag_cur, &aad_cur); } AWS_TEST_CASE(gcm_256_KAT_3, s_gcm_256_KAT_3_fn) static int s_aes_gcm_test_with_generated_key_iv_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_symmetric_cipher *cipher = aws_aes_gcm_256_new(allocator, NULL, NULL, NULL, NULL); ASSERT_NOT_NULL(cipher); struct aws_byte_buf encrypted_buf; aws_byte_buf_init(&encrypted_buf, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE); struct aws_byte_cursor input = aws_byte_cursor_from_c_str(TEST_ENCRYPTION_STRING); ASSERT_SUCCESS(aws_symmetric_cipher_encrypt(cipher, input, &encrypted_buf)); ASSERT_SUCCESS(aws_symmetric_cipher_finalize_encryption(cipher, &encrypted_buf)); ASSERT_SUCCESS(aws_symmetric_cipher_reset(cipher)); struct aws_byte_buf decrypted_buf; aws_byte_buf_init(&decrypted_buf, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE); struct aws_byte_cursor encryted_cur = aws_byte_cursor_from_buf(&encrypted_buf); ASSERT_SUCCESS(aws_symmetric_cipher_decrypt(cipher, encryted_cur, &decrypted_buf)); ASSERT_SUCCESS(aws_symmetric_cipher_finalize_decryption(cipher, &decrypted_buf)); ASSERT_BIN_ARRAYS_EQUALS(input.ptr, input.len, decrypted_buf.buffer, decrypted_buf.len); aws_byte_buf_clean_up(&decrypted_buf); aws_byte_buf_clean_up(&encrypted_buf); aws_symmetric_cipher_destroy(cipher); return AWS_OP_SUCCESS; } AWS_TEST_CASE(gcm_test_with_generated_key_iv, s_aes_gcm_test_with_generated_key_iv_fn) static int s_aes_gcm_validate_materials_fails_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint8_t iv_too_small[AWS_AES_256_CIPHER_BLOCK_SIZE - 5] = {0}; uint8_t iv_too_large[AWS_AES_256_CIPHER_BLOCK_SIZE - 3] = {0}; uint8_t key_too_small[AWS_AES_256_KEY_BYTE_LEN - 1] = {0}; uint8_t key_too_large[AWS_AES_256_KEY_BYTE_LEN + 1] = {0}; uint8_t valid_key_size[AWS_AES_256_KEY_BYTE_LEN] = {0}; uint8_t valid_iv_size[AWS_AES_256_CIPHER_BLOCK_SIZE] = {0}; struct aws_byte_cursor key = aws_byte_cursor_from_array(valid_key_size, sizeof(valid_key_size)); struct aws_byte_cursor iv = aws_byte_cursor_from_array(iv_too_small, sizeof(iv_too_small)); ASSERT_NULL(aws_aes_gcm_256_new(allocator, &key, &iv, NULL, NULL)); ASSERT_UINT_EQUALS(AWS_ERROR_CAL_INVALID_CIPHER_MATERIAL_SIZE_FOR_ALGORITHM, aws_last_error()); key = aws_byte_cursor_from_array(valid_key_size, sizeof(valid_key_size)); iv = aws_byte_cursor_from_array(iv_too_large, sizeof(iv_too_large)); ASSERT_NULL(aws_aes_gcm_256_new(allocator, &key, &iv, NULL, NULL)); ASSERT_UINT_EQUALS(AWS_ERROR_CAL_INVALID_CIPHER_MATERIAL_SIZE_FOR_ALGORITHM, aws_last_error()); key = aws_byte_cursor_from_array(key_too_small, sizeof(key_too_small)); iv = aws_byte_cursor_from_array(valid_iv_size, sizeof(valid_iv_size)); ASSERT_NULL(aws_aes_gcm_256_new(allocator, &key, &iv, NULL, NULL)); ASSERT_UINT_EQUALS(AWS_ERROR_CAL_INVALID_KEY_LENGTH_FOR_ALGORITHM, aws_last_error()); key = aws_byte_cursor_from_array(key_too_small, sizeof(key_too_small)); iv = aws_byte_cursor_from_array(key_too_large, sizeof(key_too_large)); ASSERT_NULL(aws_aes_gcm_256_new(allocator, &key, &iv, NULL, NULL)); ASSERT_UINT_EQUALS(AWS_ERROR_CAL_INVALID_KEY_LENGTH_FOR_ALGORITHM, aws_last_error()); return AWS_OP_SUCCESS; } AWS_TEST_CASE(aes_gcm_validate_materials_fails, s_aes_gcm_validate_materials_fails_fn) static int s_test_aes_keywrap_RFC3394_256BitKey256CekTestVector(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint8_t key[] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, }; size_t key_length = sizeof(key); uint8_t input[] = {0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f}; size_t input_length = sizeof(input); uint8_t expected_output[] = {0x28, 0xC9, 0xF4, 0x04, 0xC4, 0xB8, 0x10, 0xF4, 0xCB, 0xCC, 0xB3, 0x5C, 0xFB, 0x87, 0xF8, 0x26, 0x3F, 0x57, 0x86, 0xE2, 0xD8, 0x0E, 0xD3, 0x26, 0xCB, 0xC7, 0xF0, 0xE7, 0x1A, 0x99, 0xF4, 0x3B, 0xFB, 0x98, 0x8B, 0x9B, 0x7A, 0x02, 0xDD, 0x21}; size_t expected_output_length = sizeof(expected_output); struct aws_byte_cursor input_cur = aws_byte_cursor_from_array(input, input_length); struct aws_byte_cursor key_cur = aws_byte_cursor_from_array(key, key_length); struct aws_byte_buf output_buf; ASSERT_SUCCESS(aws_byte_buf_init(&output_buf, allocator, expected_output_length)); struct aws_symmetric_cipher *cipher = aws_aes_keywrap_256_new(allocator, &key_cur); ASSERT_NOT_NULL(cipher); ASSERT_SUCCESS(aws_symmetric_cipher_encrypt(cipher, input_cur, &output_buf)); ASSERT_SUCCESS(aws_symmetric_cipher_finalize_encryption(cipher, &output_buf)); ASSERT_BIN_ARRAYS_EQUALS(expected_output, expected_output_length, output_buf.buffer, output_buf.len); ASSERT_SUCCESS(aws_symmetric_cipher_reset(cipher)); struct aws_byte_buf decrypted_buf; ASSERT_SUCCESS(aws_byte_buf_init(&decrypted_buf, allocator, input_length)); struct aws_byte_cursor encrypted_data = aws_byte_cursor_from_buf(&output_buf); ASSERT_SUCCESS(aws_symmetric_cipher_decrypt(cipher, encrypted_data, &decrypted_buf)); ASSERT_SUCCESS(aws_symmetric_cipher_finalize_decryption(cipher, &decrypted_buf)); ASSERT_BIN_ARRAYS_EQUALS(input, input_length, decrypted_buf.buffer, decrypted_buf.len); aws_symmetric_cipher_destroy(cipher); aws_byte_buf_clean_up(&output_buf); aws_byte_buf_clean_up(&decrypted_buf); return AWS_OP_SUCCESS; } AWS_TEST_CASE(aes_keywrap_RFC3394_256BitKey256CekTestVector, s_test_aes_keywrap_RFC3394_256BitKey256CekTestVector); static int s_test_Rfc3394_256BitKey_TestIntegrityCheckFailed(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint8_t input[] = {0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}; size_t input_length = sizeof(input); uint8_t key[] = {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F}; size_t key_length = sizeof(key); uint8_t expected_output[] = {0x28, 0xC9, 0xF4, 0x04, 0xC4, 0xB8, 0x10, 0xF4, 0xCB, 0xCC, 0xB3, 0x5C, 0xFB, 0x87, 0xF8, 0x26, 0x3F, 0x57, 0x86, 0xE2, 0xD8, 0x0E, 0xD3, 0x26, 0xCB, 0xC7, 0xF0, 0xE7, 0x1A, 0x99, 0xF4, 0x3B, 0xFB, 0x98, 0x8B, 0x9B, 0x7A, 0x02, 0xDD, 0x21}; size_t expected_output_length = sizeof(expected_output); struct aws_byte_cursor input_cur = aws_byte_cursor_from_array(input, input_length); struct aws_byte_cursor key_cur = aws_byte_cursor_from_array(key, key_length); struct aws_byte_buf output_buf; ASSERT_SUCCESS(aws_byte_buf_init(&output_buf, allocator, expected_output_length)); struct aws_symmetric_cipher *cipher = aws_aes_keywrap_256_new(allocator, &key_cur); ASSERT_NOT_NULL(cipher); ASSERT_SUCCESS(aws_symmetric_cipher_encrypt(cipher, input_cur, &output_buf)); ASSERT_SUCCESS(aws_symmetric_cipher_finalize_encryption(cipher, &output_buf)); ASSERT_BIN_ARRAYS_EQUALS(expected_output, expected_output_length, output_buf.buffer, output_buf.len); /* Mutate one byte of the encrypted data */ output_buf.buffer[0] ^= 0x01; ASSERT_SUCCESS(aws_symmetric_cipher_reset(cipher)); struct aws_byte_buf decrypted_buf; ASSERT_SUCCESS(aws_byte_buf_init(&decrypted_buf, allocator, input_length)); struct aws_byte_cursor encrypted_data = aws_byte_cursor_from_buf(&output_buf); ASSERT_SUCCESS(aws_symmetric_cipher_decrypt(cipher, encrypted_data, &decrypted_buf)); ASSERT_FAILS(aws_symmetric_cipher_finalize_decryption(cipher, &decrypted_buf)); ASSERT_FALSE(aws_symmetric_cipher_is_good(cipher)); aws_symmetric_cipher_destroy(cipher); aws_byte_buf_clean_up(&output_buf); aws_byte_buf_clean_up(&decrypted_buf); return AWS_OP_SUCCESS; } AWS_TEST_CASE( aes_keywrap_Rfc3394_256BitKey_TestIntegrityCheckFailed, s_test_Rfc3394_256BitKey_TestIntegrityCheckFailed); static int s_test_RFC3394_256BitKeyTestBadPayload(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint8_t input[] = {0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}; size_t input_length = sizeof(input); uint8_t key[] = {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F}; size_t key_length = sizeof(key); uint8_t expected_output[] = {0x28, 0xC9, 0xF4, 0x04, 0xC4, 0xB8, 0x10, 0xF4, 0xCB, 0xCC, 0xB3, 0x5C, 0xFB, 0x87, 0xF8, 0x26, 0x3F, 0x57, 0x86, 0xE2, 0xD8, 0x0E, 0xD3, 0x26, 0xCB, 0xC7, 0xF0, 0xE7, 0x1A, 0x99, 0xF4, 0x3B, 0xFB, 0x98, 0x8B, 0x9B, 0x7A, 0x02, 0xDD, 0x21}; size_t expected_output_length = sizeof(expected_output); struct aws_byte_cursor input_cur = aws_byte_cursor_from_array(input, input_length); struct aws_byte_cursor key_cur = aws_byte_cursor_from_array(key, key_length); struct aws_byte_buf output_buf; ASSERT_SUCCESS(aws_byte_buf_init(&output_buf, allocator, expected_output_length)); struct aws_symmetric_cipher *cipher = aws_aes_keywrap_256_new(allocator, &key_cur); ASSERT_NOT_NULL(cipher); ASSERT_SUCCESS(aws_symmetric_cipher_encrypt(cipher, input_cur, &output_buf)); ASSERT_SUCCESS(aws_symmetric_cipher_finalize_encryption(cipher, &output_buf)); ASSERT_BIN_ARRAYS_EQUALS(expected_output, expected_output_length, output_buf.buffer, output_buf.len); ASSERT_SUCCESS(aws_symmetric_cipher_reset(cipher)); struct aws_byte_buf decrypted_buf; ASSERT_SUCCESS(aws_byte_buf_init(&decrypted_buf, allocator, input_length)); struct aws_byte_cursor encrypted_data = aws_byte_cursor_from_buf(&output_buf); ASSERT_SUCCESS(aws_symmetric_cipher_decrypt(cipher, encrypted_data, &decrypted_buf)); ASSERT_SUCCESS(aws_symmetric_cipher_finalize_decryption(cipher, &decrypted_buf)); ASSERT_BIN_ARRAYS_EQUALS(input, input_length, decrypted_buf.buffer, decrypted_buf.len); aws_symmetric_cipher_destroy(cipher); aws_byte_buf_clean_up(&output_buf); aws_byte_buf_clean_up(&decrypted_buf); return AWS_OP_SUCCESS; } AWS_TEST_CASE(aes_keywrap_RFC3394_256BitKeyTestBadPayload, s_test_RFC3394_256BitKeyTestBadPayload); static int s_test_RFC3394_256BitKey128BitCekTestVector(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint8_t input[] = {0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF}; size_t input_length = sizeof(input); uint8_t key[] = {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F}; size_t key_length = sizeof(key); uint8_t expected_output[] = {0x64, 0xE8, 0xC3, 0xF9, 0xCE, 0x0F, 0x5B, 0xA2, 0x63, 0xE9, 0x77, 0x79, 0x05, 0x81, 0x8A, 0x2A, 0x93, 0xC8, 0x19, 0x1E, 0x7D, 0x6E, 0x8A, 0xE7}; size_t expected_output_length = sizeof(expected_output); struct aws_byte_cursor input_cur = aws_byte_cursor_from_array(input, input_length); struct aws_byte_cursor key_cur = aws_byte_cursor_from_array(key, key_length); struct aws_byte_buf output_buf; ASSERT_SUCCESS(aws_byte_buf_init(&output_buf, allocator, expected_output_length)); struct aws_symmetric_cipher *cipher = aws_aes_keywrap_256_new(allocator, &key_cur); ASSERT_NOT_NULL(cipher); ASSERT_SUCCESS(aws_symmetric_cipher_encrypt(cipher, input_cur, &output_buf)); ASSERT_SUCCESS(aws_symmetric_cipher_finalize_encryption(cipher, &output_buf)); ASSERT_BIN_ARRAYS_EQUALS(expected_output, expected_output_length, output_buf.buffer, output_buf.len); ASSERT_SUCCESS(aws_symmetric_cipher_reset(cipher)); ASSERT_TRUE(aws_symmetric_cipher_is_good(cipher)); struct aws_byte_buf decrypted_buf; ASSERT_SUCCESS(aws_byte_buf_init(&decrypted_buf, allocator, input_length)); struct aws_byte_cursor encrypted_data = aws_byte_cursor_from_buf(&output_buf); ASSERT_SUCCESS(aws_symmetric_cipher_decrypt(cipher, encrypted_data, &decrypted_buf)); ASSERT_SUCCESS(aws_symmetric_cipher_finalize_decryption(cipher, &decrypted_buf)); aws_symmetric_cipher_destroy(cipher); aws_byte_buf_clean_up(&output_buf); aws_byte_buf_clean_up(&decrypted_buf); return AWS_OP_SUCCESS; } AWS_TEST_CASE(aes_keywrap_RFC3394_256BitKey128BitCekTestVector, s_test_RFC3394_256BitKey128BitCekTestVector); static int s_test_RFC3394_256BitKey128BitCekIntegrityCheckFailedTestVector(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint8_t input[] = {0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF}; size_t input_length = sizeof(input); uint8_t key[] = {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F}; size_t key_length = sizeof(key); uint8_t expected_output[] = {0x64, 0xE8, 0xC3, 0xF9, 0xCE, 0x0F, 0x5B, 0xA2, 0x63, 0xE9, 0x77, 0x79, 0x05, 0x81, 0x8A, 0x2A, 0x93, 0xC8, 0x19, 0x1E, 0x7D, 0x6E, 0x8A, 0xE7}; size_t expected_output_length = sizeof(expected_output); struct aws_byte_cursor input_cur = aws_byte_cursor_from_array(input, input_length); struct aws_byte_cursor key_cur = aws_byte_cursor_from_array(key, key_length); struct aws_byte_buf output_buf; ASSERT_SUCCESS(aws_byte_buf_init(&output_buf, allocator, expected_output_length)); struct aws_symmetric_cipher *cipher = aws_aes_keywrap_256_new(allocator, &key_cur); ASSERT_NOT_NULL(cipher); ASSERT_SUCCESS(aws_symmetric_cipher_encrypt(cipher, input_cur, &output_buf)); ASSERT_SUCCESS(aws_symmetric_cipher_finalize_encryption(cipher, &output_buf)); ASSERT_BIN_ARRAYS_EQUALS(expected_output, expected_output_length, output_buf.buffer, output_buf.len); ASSERT_SUCCESS(aws_symmetric_cipher_reset(cipher)); struct aws_byte_buf decrypted_buf; ASSERT_SUCCESS(aws_byte_buf_init(&decrypted_buf, allocator, input_length)); struct aws_byte_cursor encrypted_data = aws_byte_cursor_from_buf(&output_buf); encrypted_data.ptr[1] = encrypted_data.ptr[1] + encrypted_data.ptr[2]; ASSERT_SUCCESS(aws_symmetric_cipher_decrypt(cipher, encrypted_data, &decrypted_buf)); ASSERT_FAILS(aws_symmetric_cipher_finalize_decryption(cipher, &decrypted_buf)); ASSERT_FALSE(aws_symmetric_cipher_is_good(cipher)); aws_symmetric_cipher_destroy(cipher); aws_byte_buf_clean_up(&output_buf); aws_byte_buf_clean_up(&decrypted_buf); return AWS_OP_SUCCESS; } AWS_TEST_CASE( aes_keywrap_RFC3394_256BitKey128BitCekIntegrityCheckFailedTestVector, s_test_RFC3394_256BitKey128BitCekIntegrityCheckFailedTestVector); static int s_test_RFC3394_256BitKey128BitCekPayloadCheckFailedTestVector(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint8_t input[] = {0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF}; size_t input_length = sizeof(input); uint8_t key[] = {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F}; size_t key_length = sizeof(key); uint8_t expected_output[] = {0x64, 0xE8, 0xC3, 0xF9, 0xCE, 0x0F, 0x5B, 0xA2, 0x63, 0xE9, 0x77, 0x79, 0x05, 0x81, 0x8A, 0x2A, 0x93, 0xC8, 0x19, 0x1E, 0x7D, 0x6E, 0x8A, 0xE7}; size_t expected_output_length = sizeof(expected_output); struct aws_byte_cursor input_cur = aws_byte_cursor_from_array(input, input_length); struct aws_byte_cursor key_cur = aws_byte_cursor_from_array(key, key_length); struct aws_byte_buf output_buf; ASSERT_SUCCESS(aws_byte_buf_init(&output_buf, allocator, expected_output_length)); struct aws_symmetric_cipher *cipher = aws_aes_keywrap_256_new(allocator, &key_cur); ASSERT_NOT_NULL(cipher); ASSERT_SUCCESS(aws_symmetric_cipher_encrypt(cipher, input_cur, &output_buf)); ASSERT_SUCCESS(aws_symmetric_cipher_finalize_encryption(cipher, &output_buf)); ASSERT_BIN_ARRAYS_EQUALS(expected_output, expected_output_length, output_buf.buffer, output_buf.len); ASSERT_SUCCESS(aws_symmetric_cipher_reset(cipher)); struct aws_byte_buf decrypted_buf; ASSERT_SUCCESS(aws_byte_buf_init(&decrypted_buf, allocator, input_length)); struct aws_byte_cursor encrypted_data = aws_byte_cursor_from_buf(&output_buf); encrypted_data.ptr[14] = encrypted_data.ptr[13] + encrypted_data.ptr[14]; ASSERT_SUCCESS(aws_symmetric_cipher_decrypt(cipher, encrypted_data, &decrypted_buf)); ASSERT_FAILS(aws_symmetric_cipher_finalize_decryption(cipher, &decrypted_buf)); ASSERT_FALSE(aws_symmetric_cipher_is_good(cipher)); aws_symmetric_cipher_destroy(cipher); aws_byte_buf_clean_up(&output_buf); aws_byte_buf_clean_up(&decrypted_buf); return AWS_OP_SUCCESS; } AWS_TEST_CASE( aes_keywrap_RFC3394_256BitKey128BitCekPayloadCheckFailedTestVector, s_test_RFC3394_256BitKey128BitCekPayloadCheckFailedTestVector); static int s_aes_keywrap_validate_materials_fails_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint8_t key_too_small[AWS_AES_256_KEY_BYTE_LEN - 1] = {0}; uint8_t key_too_large[AWS_AES_256_KEY_BYTE_LEN + 1] = {0}; struct aws_byte_cursor key = aws_byte_cursor_from_array(key_too_small, sizeof(key_too_small)); ASSERT_NULL(aws_aes_keywrap_256_new(allocator, &key)); ASSERT_UINT_EQUALS(AWS_ERROR_CAL_INVALID_KEY_LENGTH_FOR_ALGORITHM, aws_last_error()); key = aws_byte_cursor_from_array(key_too_large, sizeof(key_too_large)); ASSERT_NULL(aws_aes_keywrap_256_new(allocator, &key)); ASSERT_UINT_EQUALS(AWS_ERROR_CAL_INVALID_KEY_LENGTH_FOR_ALGORITHM, aws_last_error()); return AWS_OP_SUCCESS; } AWS_TEST_CASE(aes_keywrap_validate_materials_fails, s_aes_keywrap_validate_materials_fails_fn) static int s_test_input_too_large_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint8_t iv[AWS_AES_256_CIPHER_BLOCK_SIZE] = {0}; uint8_t key[AWS_AES_256_KEY_BYTE_LEN] = {0}; struct aws_byte_cursor key_cur = aws_byte_cursor_from_array(key, sizeof(key)); struct aws_byte_cursor iv_cur = aws_byte_cursor_from_array(iv, sizeof(iv)); struct aws_symmetric_cipher *cipher = aws_aes_cbc_256_new(allocator, &key_cur, &iv_cur); ASSERT_NOT_NULL(cipher); struct aws_byte_cursor invalid_cur = { .ptr = key, .len = INT_MAX, }; ASSERT_ERROR(AWS_ERROR_CAL_BUFFER_TOO_LARGE_FOR_ALGORITHM, aws_symmetric_cipher_encrypt(cipher, invalid_cur, NULL)); /* should still be good from an invalid input. */ ASSERT_TRUE(aws_symmetric_cipher_is_good(cipher)); ASSERT_ERROR(AWS_ERROR_CAL_BUFFER_TOO_LARGE_FOR_ALGORITHM, aws_symmetric_cipher_decrypt(cipher, invalid_cur, NULL)); /* should still be good from an invalid input. */ ASSERT_TRUE(aws_symmetric_cipher_is_good(cipher)); aws_symmetric_cipher_destroy(cipher); return AWS_OP_SUCCESS; } AWS_TEST_CASE(aes_test_input_too_large, s_test_input_too_large_fn) aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/tests/der_test.c000066400000000000000000000706621456575232400231210ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include /* clang-format off */ /* note that this int is unsigned, with the high bit set, so needs to be encoded specially */ static uint8_t s_bigint[] = { 0x8f, 0xe2, 0x41, 0x2a, 0x08, 0xe8, 0x51, 0xa8, 0x8c, 0xb3, 0xe8, 0x53, 0xe7, 0xd5, 0x49, 0x50, 0xb3, 0x27, 0x8a, 0x2b, 0xcb, 0xea, 0xb5, 0x42, 0x73, 0xea, 0x02, 0x57, 0xcc, 0x65, 0x33, 0xee, 0x88, 0x20, 0x61, 0xa1, 0x17, 0x56, 0xc1, 0x24, 0x18, 0xe3, 0xa8, 0x08, 0xd3, 0xbe, 0xd9, 0x31, 0xf3, 0x37, 0x0b, 0x94, 0xb8, 0xcc, 0x43, 0x08, 0x0b, 0x70, 0x24, 0xf7, 0x9c, 0xb1, 0x8d, 0x5d, 0xd6, 0x6d, 0x82, 0xd0, 0x54, 0x09, 0x84, 0xf8, 0x9f, 0x97, 0x01, 0x75, 0x05, 0x9c, 0x89, 0xd4, 0xd5, 0xc9, 0x1e, 0xc9, 0x13, 0xd7, 0x2a, 0x6b, 0x30, 0x91, 0x19, 0xd6, 0xd4, 0x42, 0xe0, 0xc4, 0x9d, 0x7c, 0x92, 0x71, 0xe1, 0xb2, 0x2f, 0x5c, 0x8d, 0xee, 0xf0, 0xf1, 0x17, 0x1e, 0xd2, 0x5f, 0x31, 0x5b, 0xb1, 0x9c, 0xbc, 0x20, 0x55, 0xbf, 0x3a, 0x37, 0x42, 0x45, 0x75, 0xdc, 0x90, 0x65, }; static uint8_t s_bigint_zero[] = { 0x00 }; static uint8_t s_encoded_bigint[] = { 0x02 /* INTEGER */, 0x81 /* 1 byte length */, 0x81 /* 0x81 bytes */, 0x00 /* unsigned */, 0x8f, 0xe2, 0x41, 0x2a, 0x08, 0xe8, 0x51, 0xa8, 0x8c, 0xb3, 0xe8, 0x53, 0xe7, 0xd5, 0x49, 0x50, 0xb3, 0x27, 0x8a, 0x2b, 0xcb, 0xea, 0xb5, 0x42, 0x73, 0xea, 0x02, 0x57, 0xcc, 0x65, 0x33, 0xee, 0x88, 0x20, 0x61, 0xa1, 0x17, 0x56, 0xc1, 0x24, 0x18, 0xe3, 0xa8, 0x08, 0xd3, 0xbe, 0xd9, 0x31, 0xf3, 0x37, 0x0b, 0x94, 0xb8, 0xcc, 0x43, 0x08, 0x0b, 0x70, 0x24, 0xf7, 0x9c, 0xb1, 0x8d, 0x5d, 0xd6, 0x6d, 0x82, 0xd0, 0x54, 0x09, 0x84, 0xf8, 0x9f, 0x97, 0x01, 0x75, 0x05, 0x9c, 0x89, 0xd4, 0xd5, 0xc9, 0x1e, 0xc9, 0x13, 0xd7, 0x2a, 0x6b, 0x30, 0x91, 0x19, 0xd6, 0xd4, 0x42, 0xe0, 0xc4, 0x9d, 0x7c, 0x92, 0x71, 0xe1, 0xb2, 0x2f, 0x5c, 0x8d, 0xee, 0xf0, 0xf1, 0x17, 0x1e, 0xd2, 0x5f, 0x31, 0x5b, 0xb1, 0x9c, 0xbc, 0x20, 0x55, 0xbf, 0x3a, 0x37, 0x42, 0x45, 0x75, 0xdc, 0x90, 0x65, }; static uint8_t s_encoded_bigint_zero[] = { 0x02 /* INTEGER */, 0x01 /* 1 byte length */, 0x00 /* unsigned */, }; const uint8_t s_encoded_true[] = {0x01, 0x01, 0xff}; const uint8_t s_encoded_false[] = {0x01, 0x01, 0x00}; const uint8_t s_encoded_null[] = {0x05, 0x00}; static uint8_t s_bit_string[] = { 0x47, 0xeb, 0x99, 0x5a, 0xdf, 0x9e, 0x70, 0x0d, 0xfb, 0xa7, 0x31, 0x32, 0xc1, 0x5f, 0x5c, 0x24, 0xc2, 0xe0, 0xbf, 0xc6, 0x24, 0xaf, 0x15, 0x66, 0x0e, 0xb8, 0x6a, 0x2e, 0xab, 0x2b, 0xc4, 0x97, 0x1f, 0xe3, 0xcb, 0xdc, 0x63, 0xa5, 0x25, 0xec, 0xc7, 0xb4, 0x28, 0x61, 0x66, 0x36, 0xa1, 0x31, 0x1b, 0xbf, 0xdd, 0xd0, 0xfc, 0xbf, 0x17, 0x94, 0x90, 0x1d, 0xe5, 0x5e, 0xc7, 0x11, 0x5e, 0xc9, 0x55, 0x9f, 0xeb, 0xa3, 0x3e, 0x14, 0xc7, 0x99, 0xa6, 0xcb, 0xba, 0xa1, 0x46, 0x0f, 0x39, 0xd4, 0x44, 0xc4, 0xc8, 0x4b, 0x76, 0x0e, 0x20, 0x5d, 0x6d, 0xa9, 0x34, 0x9e, 0xd4, 0xd5, 0x87, 0x42, 0xeb, 0x24, 0x26, 0x51, 0x14, 0x90, 0xb4, 0x0f, 0x06, 0x5e, 0x52, 0x88, 0x32, 0x7a, 0x95, 0x20, 0xa0, 0xfd, 0xf7, 0xe5, 0x7d, 0x60, 0xdd, 0x72, 0x68, 0x9b, 0xf5, 0x7b, 0x05, 0x8f, 0x6d, 0x1e, }; static uint8_t s_encoded_bit_string[] = { 0x03, /* BIT_STRING */ 0x81, /* 1 byte length */ 0x81, /* 0x81 bytes */ 0x00, /* 0 trailing unused bits */ 0x47, 0xeb, 0x99, 0x5a, 0xdf, 0x9e, 0x70, 0x0d, 0xfb, 0xa7, 0x31, 0x32, 0xc1, 0x5f, 0x5c, 0x24, 0xc2, 0xe0, 0xbf, 0xc6, 0x24, 0xaf, 0x15, 0x66, 0x0e, 0xb8, 0x6a, 0x2e, 0xab, 0x2b, 0xc4, 0x97, 0x1f, 0xe3, 0xcb, 0xdc, 0x63, 0xa5, 0x25, 0xec, 0xc7, 0xb4, 0x28, 0x61, 0x66, 0x36, 0xa1, 0x31, 0x1b, 0xbf, 0xdd, 0xd0, 0xfc, 0xbf, 0x17, 0x94, 0x90, 0x1d, 0xe5, 0x5e, 0xc7, 0x11, 0x5e, 0xc9, 0x55, 0x9f, 0xeb, 0xa3, 0x3e, 0x14, 0xc7, 0x99, 0xa6, 0xcb, 0xba, 0xa1, 0x46, 0x0f, 0x39, 0xd4, 0x44, 0xc4, 0xc8, 0x4b, 0x76, 0x0e, 0x20, 0x5d, 0x6d, 0xa9, 0x34, 0x9e, 0xd4, 0xd5, 0x87, 0x42, 0xeb, 0x24, 0x26, 0x51, 0x14, 0x90, 0xb4, 0x0f, 0x06, 0x5e, 0x52, 0x88, 0x32, 0x7a, 0x95, 0x20, 0xa0, 0xfd, 0xf7, 0xe5, 0x7d, 0x60, 0xdd, 0x72, 0x68, 0x9b, 0xf5, 0x7b, 0x05, 0x8f, 0x6d, 0x1e, }; static uint8_t s_octet_string[] = { 0x38, 0x10, 0x60, 0xe2, 0x70, 0x69, 0x91, 0x4a, 0x8b, 0xb5, 0x22, 0x57, 0x2a, 0x62, 0xef, 0xde, 0x15, 0x7d, 0x59, 0xd6, 0x4e, 0x20, 0x9a, 0x45, 0x2b, 0xe3, 0xfd, 0xfc, 0x68, 0xba, 0xaf, 0xbf, 0x9c, 0x17, 0xb0, 0x8e, 0x6d, 0xc4, 0x29, 0x1e, 0xe3, 0x21, 0xac, 0xbb, 0x5a, 0x8a, 0xc9, 0x67, 0x0a, 0xd4, 0x45, 0x93, 0x10, 0xc0, 0x26, 0xeb, 0x0a, 0x83, 0xc2, 0xb1, 0x40, 0x87, 0x36, 0xf7, 0xa0, 0x26, 0xda, 0xb9, 0xbb, 0x46, 0x73, 0x88, 0x7a, 0x67, 0xb9, 0xe6, 0xb3, 0x6f, 0xea, 0x59, 0x28, 0x8a, 0xd3, 0x92, 0x72, 0xf6, 0x7b, 0x89, 0xa0, 0xd8, 0x2d, 0x9e, 0x40, 0xeb, 0x1e, 0xbb, 0x6e, 0xae, 0xf0, 0x5a, 0xed, 0x16, 0xc9, 0xe3, 0x27, 0x59, 0x37, 0x8f, 0xf3, 0x4a, 0x98, 0x60, 0xf8, 0xfb, 0xa7, 0x0a, 0xee, 0x1b, 0x6e, 0x91, 0x95, 0x96, 0xcf, 0x0d, 0x56, 0xac, 0xab, 0x35, }; static uint8_t s_encoded_octet_string[] = { 0x04, /* OCTET_STRING */ 0x81, /* 1 byte length */ 0x80, /* 0x80 bytes */ 0x38, 0x10, 0x60, 0xe2, 0x70, 0x69, 0x91, 0x4a, 0x8b, 0xb5, 0x22, 0x57, 0x2a, 0x62, 0xef, 0xde, 0x15, 0x7d, 0x59, 0xd6, 0x4e, 0x20, 0x9a, 0x45, 0x2b, 0xe3, 0xfd, 0xfc, 0x68, 0xba, 0xaf, 0xbf, 0x9c, 0x17, 0xb0, 0x8e, 0x6d, 0xc4, 0x29, 0x1e, 0xe3, 0x21, 0xac, 0xbb, 0x5a, 0x8a, 0xc9, 0x67, 0x0a, 0xd4, 0x45, 0x93, 0x10, 0xc0, 0x26, 0xeb, 0x0a, 0x83, 0xc2, 0xb1, 0x40, 0x87, 0x36, 0xf7, 0xa0, 0x26, 0xda, 0xb9, 0xbb, 0x46, 0x73, 0x88, 0x7a, 0x67, 0xb9, 0xe6, 0xb3, 0x6f, 0xea, 0x59, 0x28, 0x8a, 0xd3, 0x92, 0x72, 0xf6, 0x7b, 0x89, 0xa0, 0xd8, 0x2d, 0x9e, 0x40, 0xeb, 0x1e, 0xbb, 0x6e, 0xae, 0xf0, 0x5a, 0xed, 0x16, 0xc9, 0xe3, 0x27, 0x59, 0x37, 0x8f, 0xf3, 0x4a, 0x98, 0x60, 0xf8, 0xfb, 0xa7, 0x0a, 0xee, 0x1b, 0x6e, 0x91, 0x95, 0x96, 0xcf, 0x0d, 0x56, 0xac, 0xab, 0x35, }; /* SEQUENCE [BOOLEAN true, BOOLEAN false] */ static uint8_t s_encoded_sequence[] = { 0x30, /* SEQUENCE */ 0x06, /* 6 bytes */ 0x01, 0x01, 0xff, /* BOOLEAN true */ 0x01, 0x01, 0x00, /* BOOLEAN false */ }; /* SET [BOOLEAN true, BOOLEAN false] */ static uint8_t s_encoded_set[] = { 0x31, /* SET */ 0x06, /* 6 bytes */ 0x01, 0x01, 0xff, /* BOOLEAN true */ 0x01, 0x01, 0x00, /* BOOLEAN false */ 0x0a, /* trailing newline */ }; static uint8_t s_encoded_key_pair[] = { 0x30, 0x74, /* SEQUENCE, 116 bytes */ 0x02, 0x01, 0x01, /* INTEGER, 1 byte, value: 1 */ 0x04, 0x20, /* OCTET_STRING, 32 bytes */ 0x9d, 0x6d, 0x10, 0x36, 0xbe, 0x66, 0x10, 0xeb, 0x8c, 0x66, 0xe6, 0x39, 0xa3, 0x1e, 0x47, 0xbc, 0x46, 0x6f, 0x46, 0x70, 0x59, 0x36, 0x32, 0x84, 0x46, 0x0c, 0x97, 0xb8, 0xda, 0x00, 0x19, 0xe2, 0xa0, 0x07, /* context-defined container 0, 7 bytes */ 0x06, 0x05, 0x2b, 0x81, 0x04, 0x00, 0x0a, /* OID, 5 bytes */ 0xa1, 0x44, /* context-defined container 1, 68 bytes */ 0x03, 0x42, /* BIT_STRING, 66 bytes */ 0x00, 0x04, 0xd1, 0xcf, 0x9c, 0x8a, 0xb4, 0x76, 0x58, 0x70, 0xd9, 0x35, 0x1c, 0xdc, 0x88, 0xbb, 0x43, 0x19, 0x77, 0xe4, 0xde, 0xba, 0xda, 0x81, 0x58, 0x54, 0x92, 0x93, 0x8d, 0x85, 0xce, 0xf9, 0x04, 0xf3, 0x8e, 0x86, 0x95, 0x46, 0xa3, 0x43, 0xdd, 0x67, 0x8c, 0x8e, 0xb5, 0xf4, 0x33, 0x8e, 0x95, 0x4a, 0x93, 0x96, 0xcf, 0xe4, 0x8f, 0x32, 0x78, 0x88, 0xe8, 0x5a, 0xde, 0x59, 0x3f, 0x63, 0xaf, 0xf2, 0x0a, /* trailing newline */ }; /* clang-format on */ static int s_der_encode_integer(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_der_encoder *encoder = aws_der_encoder_new(allocator, 1024); ASSERT_NOT_NULL(encoder); struct aws_byte_cursor bigint_cur = aws_byte_cursor_from_array(s_bigint, AWS_ARRAY_SIZE(s_bigint)); ASSERT_SUCCESS(aws_der_encoder_write_unsigned_integer(encoder, bigint_cur)); struct aws_byte_cursor encoded; ASSERT_SUCCESS(aws_der_encoder_get_contents(encoder, &encoded)); ASSERT_BIN_ARRAYS_EQUALS(s_encoded_bigint, AWS_ARRAY_SIZE(s_encoded_bigint), encoded.ptr, encoded.len); aws_der_encoder_destroy(encoder); return 0; } AWS_TEST_CASE(der_encode_integer, s_der_encode_integer) static int s_der_encode_integer_zero(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_der_encoder *encoder = aws_der_encoder_new(allocator, 1024); ASSERT_NOT_NULL(encoder); struct aws_byte_cursor bigint_cur = aws_byte_cursor_from_array(s_bigint_zero, AWS_ARRAY_SIZE(s_bigint_zero)); ASSERT_SUCCESS(aws_der_encoder_write_unsigned_integer(encoder, bigint_cur)); struct aws_byte_cursor encoded; ASSERT_SUCCESS(aws_der_encoder_get_contents(encoder, &encoded)); ASSERT_BIN_ARRAYS_EQUALS(s_encoded_bigint_zero, AWS_ARRAY_SIZE(s_encoded_bigint_zero), encoded.ptr, encoded.len); aws_der_encoder_destroy(encoder); return 0; } AWS_TEST_CASE(der_encode_integer_zero, s_der_encode_integer_zero) static int s_der_encode_boolean(struct aws_allocator *allocator, void *ctx) { (void)ctx; bool flag = true; struct aws_der_encoder *encoder = aws_der_encoder_new(allocator, 1024); ASSERT_NOT_NULL(encoder); ASSERT_SUCCESS(aws_der_encoder_write_boolean(encoder, flag)); struct aws_byte_cursor encoded; ASSERT_SUCCESS(aws_der_encoder_get_contents(encoder, &encoded)); ASSERT_BIN_ARRAYS_EQUALS(s_encoded_true, AWS_ARRAY_SIZE(s_encoded_true), encoded.ptr, encoded.len); aws_der_encoder_destroy(encoder); flag = false; encoder = aws_der_encoder_new(allocator, 1024); ASSERT_NOT_NULL(encoder); ASSERT_SUCCESS(aws_der_encoder_write_boolean(encoder, flag)); ASSERT_SUCCESS(aws_der_encoder_get_contents(encoder, &encoded)); ASSERT_BIN_ARRAYS_EQUALS(s_encoded_false, AWS_ARRAY_SIZE(s_encoded_false), encoded.ptr, encoded.len); aws_der_encoder_destroy(encoder); return 0; } AWS_TEST_CASE(der_encode_boolean, s_der_encode_boolean) static int s_der_encode_null(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_der_encoder *encoder = aws_der_encoder_new(allocator, 1024); ASSERT_NOT_NULL(encoder); ASSERT_SUCCESS(aws_der_encoder_write_null(encoder)); struct aws_byte_cursor encoded; ASSERT_SUCCESS(aws_der_encoder_get_contents(encoder, &encoded)); ASSERT_BIN_ARRAYS_EQUALS(s_encoded_null, AWS_ARRAY_SIZE(s_encoded_null), encoded.ptr, encoded.len); aws_der_encoder_destroy(encoder); return 0; } AWS_TEST_CASE(der_encode_null, s_der_encode_null) static int s_der_encode_bit_string(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_der_encoder *encoder = aws_der_encoder_new(allocator, 1024); ASSERT_NOT_NULL(encoder); struct aws_byte_cursor bit_string = aws_byte_cursor_from_array(s_bit_string, AWS_ARRAY_SIZE(s_bit_string)); ASSERT_SUCCESS(aws_der_encoder_write_bit_string(encoder, bit_string)); struct aws_byte_cursor encoded; ASSERT_SUCCESS(aws_der_encoder_get_contents(encoder, &encoded)); ASSERT_BIN_ARRAYS_EQUALS(s_encoded_bit_string, AWS_ARRAY_SIZE(s_encoded_bit_string), encoded.ptr, encoded.len); aws_der_encoder_destroy(encoder); return 0; } AWS_TEST_CASE(der_encode_bit_string, s_der_encode_bit_string) static int s_der_encode_octet_string(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_der_encoder *encoder = aws_der_encoder_new(allocator, 1024); ASSERT_NOT_NULL(encoder); struct aws_byte_cursor octet_string = aws_byte_cursor_from_array(s_octet_string, AWS_ARRAY_SIZE(s_octet_string)); ASSERT_SUCCESS(aws_der_encoder_write_octet_string(encoder, octet_string)); struct aws_byte_cursor encoded; ASSERT_SUCCESS(aws_der_encoder_get_contents(encoder, &encoded)); ASSERT_BIN_ARRAYS_EQUALS(s_encoded_octet_string, AWS_ARRAY_SIZE(s_encoded_octet_string), encoded.ptr, encoded.len); aws_der_encoder_destroy(encoder); return 0; } AWS_TEST_CASE(der_encode_octet_string, s_der_encode_octet_string) static int s_der_encode_sequence(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_der_encoder *encoder = aws_der_encoder_new(allocator, 1024); ASSERT_NOT_NULL(encoder); ASSERT_SUCCESS(aws_der_encoder_begin_sequence(encoder)); ASSERT_SUCCESS(aws_der_encoder_write_boolean(encoder, true)); ASSERT_SUCCESS(aws_der_encoder_write_boolean(encoder, false)); ASSERT_SUCCESS(aws_der_encoder_end_sequence(encoder)); struct aws_byte_cursor encoded; ASSERT_SUCCESS(aws_der_encoder_get_contents(encoder, &encoded)); ASSERT_BIN_ARRAYS_EQUALS(s_encoded_sequence, AWS_ARRAY_SIZE(s_encoded_sequence), encoded.ptr, encoded.len); aws_der_encoder_destroy(encoder); return 0; } AWS_TEST_CASE(der_encode_sequence, s_der_encode_sequence) static int s_der_encode_set(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_der_encoder *encoder = aws_der_encoder_new(allocator, 1024); ASSERT_NOT_NULL(encoder); ASSERT_SUCCESS(aws_der_encoder_begin_set(encoder)); ASSERT_SUCCESS(aws_der_encoder_write_boolean(encoder, true)); ASSERT_SUCCESS(aws_der_encoder_write_boolean(encoder, false)); ASSERT_SUCCESS(aws_der_encoder_end_set(encoder)); struct aws_byte_cursor encoded; ASSERT_SUCCESS(aws_der_encoder_get_contents(encoder, &encoded)); ASSERT_BIN_ARRAYS_EQUALS(s_encoded_set, AWS_ARRAY_SIZE(s_encoded_set) - 1, encoded.ptr, encoded.len); aws_der_encoder_destroy(encoder); return 0; } AWS_TEST_CASE(der_encode_set, s_der_encode_set) static int s_der_decode_integer(struct aws_allocator *allocator, void *ctx) { (void)ctx; const size_t encoded_size = AWS_ARRAY_SIZE(s_encoded_bigint); const size_t decoded_size = AWS_ARRAY_SIZE(s_bigint); struct aws_byte_cursor input = aws_byte_cursor_from_array(s_encoded_bigint, encoded_size); struct aws_der_decoder *decoder = aws_der_decoder_new(allocator, input); ASSERT_NOT_NULL(decoder); ASSERT_TRUE(aws_der_decoder_next(decoder)); ASSERT_INT_EQUALS(AWS_DER_INTEGER, aws_der_decoder_tlv_type(decoder)); ASSERT_INT_EQUALS(decoded_size, aws_der_decoder_tlv_length(decoder)); struct aws_byte_cursor decoded; ASSERT_SUCCESS(aws_der_decoder_tlv_unsigned_integer(decoder, &decoded)); ASSERT_BIN_ARRAYS_EQUALS(s_bigint, decoded_size, decoded.ptr, decoded.len); ASSERT_FALSE(aws_der_decoder_next(decoder)); aws_der_decoder_destroy(decoder); return 0; } AWS_TEST_CASE(der_decode_integer, s_der_decode_integer) static int s_der_decode_integer_zero(struct aws_allocator *allocator, void *ctx) { (void)ctx; const size_t encoded_size = AWS_ARRAY_SIZE(s_encoded_bigint_zero); const size_t decoded_size = AWS_ARRAY_SIZE(s_bigint_zero); struct aws_byte_cursor input = aws_byte_cursor_from_array(s_encoded_bigint_zero, encoded_size); struct aws_der_decoder *decoder = aws_der_decoder_new(allocator, input); ASSERT_NOT_NULL(decoder); ASSERT_TRUE(aws_der_decoder_next(decoder)); ASSERT_INT_EQUALS(AWS_DER_INTEGER, aws_der_decoder_tlv_type(decoder)); ASSERT_INT_EQUALS(decoded_size, aws_der_decoder_tlv_length(decoder)); struct aws_byte_cursor decoded; ASSERT_SUCCESS(aws_der_decoder_tlv_unsigned_integer(decoder, &decoded)); ASSERT_BIN_ARRAYS_EQUALS(s_bigint_zero, decoded_size, decoded.ptr, decoded.len); ASSERT_FALSE(aws_der_decoder_next(decoder)); aws_der_decoder_destroy(decoder); return 0; } AWS_TEST_CASE(der_decode_integer_zero, s_der_decode_integer_zero) static int s_der_decode_boolean(struct aws_allocator *allocator, void *ctx) { (void)ctx; bool flag = false; const size_t encoded_size = AWS_ARRAY_SIZE(s_encoded_true); struct aws_byte_cursor input = aws_byte_cursor_from_array(s_encoded_true, encoded_size); struct aws_der_decoder *decoder = aws_der_decoder_new(allocator, input); ASSERT_NOT_NULL(decoder); ASSERT_TRUE(aws_der_decoder_next(decoder)); ASSERT_INT_EQUALS(AWS_DER_BOOLEAN, aws_der_decoder_tlv_type(decoder)); ASSERT_INT_EQUALS(1, aws_der_decoder_tlv_length(decoder)); ASSERT_SUCCESS(aws_der_decoder_tlv_boolean(decoder, &flag)); ASSERT_TRUE(flag); ASSERT_FALSE(aws_der_decoder_next(decoder)); aws_der_decoder_destroy(decoder); input = aws_byte_cursor_from_array(s_encoded_false, encoded_size); decoder = aws_der_decoder_new(allocator, input); ASSERT_NOT_NULL(decoder); ASSERT_TRUE(aws_der_decoder_next(decoder)); ASSERT_INT_EQUALS(AWS_DER_BOOLEAN, aws_der_decoder_tlv_type(decoder)); ASSERT_INT_EQUALS(1, aws_der_decoder_tlv_length(decoder)); ASSERT_SUCCESS(aws_der_decoder_tlv_boolean(decoder, &flag)); ASSERT_FALSE(flag); ASSERT_FALSE(aws_der_decoder_next(decoder)); aws_der_decoder_destroy(decoder); return 0; } AWS_TEST_CASE(der_decode_boolean, s_der_decode_boolean) static int s_der_decode_null(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor input = aws_byte_cursor_from_array(s_encoded_null, AWS_ARRAY_SIZE(s_encoded_null)); struct aws_der_decoder *decoder = aws_der_decoder_new(allocator, input); ASSERT_NOT_NULL(decoder); ASSERT_TRUE(aws_der_decoder_next(decoder)); ASSERT_INT_EQUALS(AWS_DER_NULL, aws_der_decoder_tlv_type(decoder)); ASSERT_INT_EQUALS(0, aws_der_decoder_tlv_length(decoder)); ASSERT_FALSE(aws_der_decoder_next(decoder)); aws_der_decoder_destroy(decoder); return 0; } AWS_TEST_CASE(der_decode_null, s_der_decode_null) static int s_der_decode_bit_string(struct aws_allocator *allocator, void *ctx) { (void)ctx; const size_t encoded_size = AWS_ARRAY_SIZE(s_encoded_bit_string); const size_t decoded_size = AWS_ARRAY_SIZE(s_bit_string); struct aws_byte_cursor input = aws_byte_cursor_from_array(s_encoded_bit_string, encoded_size); struct aws_der_decoder *decoder = aws_der_decoder_new(allocator, input); ASSERT_NOT_NULL(decoder); ASSERT_TRUE(aws_der_decoder_next(decoder)); ASSERT_INT_EQUALS(AWS_DER_BIT_STRING, aws_der_decoder_tlv_type(decoder)); ASSERT_INT_EQUALS(decoded_size, aws_der_decoder_tlv_length(decoder)); struct aws_byte_cursor decoded; ASSERT_SUCCESS(aws_der_decoder_tlv_string(decoder, &decoded)); ASSERT_BIN_ARRAYS_EQUALS(s_bit_string, decoded_size, decoded.ptr, decoded.len); ASSERT_FALSE(aws_der_decoder_next(decoder)); aws_der_decoder_destroy(decoder); return 0; } AWS_TEST_CASE(der_decode_bit_string, s_der_decode_bit_string) static int s_der_decode_octet_string(struct aws_allocator *allocator, void *ctx) { (void)ctx; const size_t encoded_size = AWS_ARRAY_SIZE(s_encoded_octet_string); const size_t decoded_size = AWS_ARRAY_SIZE(s_bit_string); struct aws_byte_cursor input = aws_byte_cursor_from_array(s_encoded_octet_string, encoded_size); struct aws_der_decoder *decoder = aws_der_decoder_new(allocator, input); ASSERT_NOT_NULL(decoder); ASSERT_TRUE(aws_der_decoder_next(decoder)); ASSERT_INT_EQUALS(AWS_DER_OCTET_STRING, aws_der_decoder_tlv_type(decoder)); ASSERT_INT_EQUALS(decoded_size, aws_der_decoder_tlv_length(decoder)); struct aws_byte_cursor decoded; ASSERT_SUCCESS(aws_der_decoder_tlv_string(decoder, &decoded)); ASSERT_BIN_ARRAYS_EQUALS(s_octet_string, decoded_size, decoded.ptr, decoded.len); ASSERT_FALSE(aws_der_decoder_next(decoder)); aws_der_decoder_destroy(decoder); return 0; } AWS_TEST_CASE(der_decode_octet_string, s_der_decode_octet_string) static int s_der_decode_sequence(struct aws_allocator *allocator, void *ctx) { (void)ctx; const size_t encoded_size = AWS_ARRAY_SIZE(s_encoded_sequence); const size_t decoded_size = AWS_ARRAY_SIZE(s_encoded_true) + AWS_ARRAY_SIZE(s_encoded_false); struct aws_byte_cursor input = aws_byte_cursor_from_array(s_encoded_sequence, encoded_size); struct aws_der_decoder *decoder = aws_der_decoder_new(allocator, input); ASSERT_NOT_NULL(decoder); /* Verify SEQUENCE */ ASSERT_TRUE(aws_der_decoder_next(decoder)); ASSERT_INT_EQUALS(AWS_DER_SEQUENCE, aws_der_decoder_tlv_type(decoder)); ASSERT_INT_EQUALS(decoded_size, aws_der_decoder_tlv_length(decoder)); ASSERT_INT_EQUALS(2, aws_der_decoder_tlv_count(decoder)); /* Verify true, then false */ bool decoded_flag = false; ASSERT_TRUE(aws_der_decoder_next(decoder)); ASSERT_INT_EQUALS(AWS_DER_BOOLEAN, aws_der_decoder_tlv_type(decoder)); ASSERT_INT_EQUALS(1, aws_der_decoder_tlv_length(decoder)); ASSERT_SUCCESS(aws_der_decoder_tlv_boolean(decoder, &decoded_flag)); ASSERT_TRUE(decoded_flag); ASSERT_TRUE(aws_der_decoder_next(decoder)); ASSERT_INT_EQUALS(AWS_DER_BOOLEAN, aws_der_decoder_tlv_type(decoder)); ASSERT_INT_EQUALS(1, aws_der_decoder_tlv_length(decoder)); ASSERT_SUCCESS(aws_der_decoder_tlv_boolean(decoder, &decoded_flag)); ASSERT_FALSE(decoded_flag); ASSERT_FALSE(aws_der_decoder_next(decoder)); aws_der_decoder_destroy(decoder); return 0; } AWS_TEST_CASE(der_decode_sequence, s_der_decode_sequence) static int s_der_decode_set(struct aws_allocator *allocator, void *ctx) { (void)ctx; const size_t encoded_size = AWS_ARRAY_SIZE(s_encoded_set); const size_t decoded_size = AWS_ARRAY_SIZE(s_encoded_true) + AWS_ARRAY_SIZE(s_encoded_false); struct aws_byte_cursor input = aws_byte_cursor_from_array(s_encoded_set, encoded_size); struct aws_der_decoder *decoder = aws_der_decoder_new(allocator, input); ASSERT_NOT_NULL(decoder); /* Verify SET */ ASSERT_TRUE(aws_der_decoder_next(decoder)); ASSERT_INT_EQUALS(AWS_DER_SET, aws_der_decoder_tlv_type(decoder)); ASSERT_INT_EQUALS(decoded_size, aws_der_decoder_tlv_length(decoder)); ASSERT_INT_EQUALS(2, aws_der_decoder_tlv_count(decoder)); /* Verify true, then false */ bool decoded_flag = false; ASSERT_TRUE(aws_der_decoder_next(decoder)); ASSERT_INT_EQUALS(AWS_DER_BOOLEAN, aws_der_decoder_tlv_type(decoder)); ASSERT_INT_EQUALS(1, aws_der_decoder_tlv_length(decoder)); ASSERT_SUCCESS(aws_der_decoder_tlv_boolean(decoder, &decoded_flag)); ASSERT_TRUE(decoded_flag); ASSERT_TRUE(aws_der_decoder_next(decoder)); ASSERT_INT_EQUALS(AWS_DER_BOOLEAN, aws_der_decoder_tlv_type(decoder)); ASSERT_INT_EQUALS(1, aws_der_decoder_tlv_length(decoder)); ASSERT_SUCCESS(aws_der_decoder_tlv_boolean(decoder, &decoded_flag)); ASSERT_FALSE(decoded_flag); ASSERT_FALSE(aws_der_decoder_next(decoder)); aws_der_decoder_destroy(decoder); return 0; } AWS_TEST_CASE(der_decode_set, s_der_decode_set) static int s_der_decode_key_pair(struct aws_allocator *allocator, void *ctx) { (void)ctx; const size_t encoded_size = AWS_ARRAY_SIZE(s_encoded_key_pair); struct aws_byte_cursor input = aws_byte_cursor_from_array(s_encoded_key_pair, encoded_size); struct aws_der_decoder *decoder = aws_der_decoder_new(allocator, input); ASSERT_NOT_NULL(decoder); /* SEQUENCE */ ASSERT_TRUE(aws_der_decoder_next(decoder)); ASSERT_INT_EQUALS(AWS_DER_SEQUENCE, aws_der_decoder_tlv_type(decoder)); ASSERT_INT_EQUALS(4, aws_der_decoder_tlv_count(decoder)); /* INTEGER 1 */ struct aws_byte_cursor integer; ASSERT_TRUE(aws_der_decoder_next(decoder)); ASSERT_INT_EQUALS(AWS_DER_INTEGER, aws_der_decoder_tlv_type(decoder)); ASSERT_SUCCESS(aws_der_decoder_tlv_unsigned_integer(decoder, &integer)); ASSERT_BIN_ARRAYS_EQUALS("\x01", 1, integer.ptr, integer.len); /* 32 byte private key */ struct aws_byte_cursor private_key; ASSERT_TRUE(aws_der_decoder_next(decoder)); ASSERT_INT_EQUALS(AWS_DER_OCTET_STRING, aws_der_decoder_tlv_type(decoder)); ASSERT_SUCCESS(aws_der_decoder_tlv_string(decoder, &private_key)); ASSERT_INT_EQUALS(32, private_key.len); /* container */ ASSERT_TRUE(aws_der_decoder_next(decoder)); ASSERT_TRUE(aws_der_decoder_tlv_type(decoder) & (AWS_DER_CLASS_CONTEXT | AWS_DER_FORM_CONSTRUCTED)); ASSERT_INT_EQUALS(7, aws_der_decoder_tlv_length(decoder)); ASSERT_INT_EQUALS(1, aws_der_decoder_tlv_count(decoder)); /* 5 byte OID */ struct aws_byte_cursor oid; ASSERT_TRUE(aws_der_decoder_next(decoder)); ASSERT_INT_EQUALS(AWS_DER_OBJECT_IDENTIFIER, aws_der_decoder_tlv_type(decoder)); ASSERT_INT_EQUALS(5, aws_der_decoder_tlv_length(decoder)); ASSERT_SUCCESS(aws_der_decoder_tlv_blob(decoder, &oid)); ASSERT_BIN_ARRAYS_EQUALS("\x2b\x81\x04\x00\x0a", 5, oid.ptr, oid.len); /* container */ ASSERT_TRUE(aws_der_decoder_next(decoder)); ASSERT_TRUE(aws_der_decoder_tlv_type(decoder) & (AWS_DER_CLASS_CONTEXT | AWS_DER_FORM_CONSTRUCTED)); ASSERT_INT_EQUALS(68, aws_der_decoder_tlv_length(decoder)); ASSERT_INT_EQUALS(1, aws_der_decoder_tlv_count(decoder)); /* 64 byte public key */ struct aws_byte_cursor public_key; ASSERT_TRUE(aws_der_decoder_next(decoder)); ASSERT_INT_EQUALS(AWS_DER_BIT_STRING, aws_der_decoder_tlv_type(decoder)); ASSERT_SUCCESS(aws_der_decoder_tlv_string(decoder, &public_key)); ASSERT_INT_EQUALS(65, public_key.len); ASSERT_FALSE(aws_der_decoder_next(decoder)); aws_der_decoder_destroy(decoder); return 0; } AWS_TEST_CASE(der_decode_key_pair, s_der_decode_key_pair) static int s_der_decode_negative_int(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint8_t negative_der[] = {0x02 /*int*/, 0x01 /*len 1*/, 0xfd /*-3*/}; const size_t encoded_size = AWS_ARRAY_SIZE(negative_der); struct aws_byte_cursor input = aws_byte_cursor_from_array(negative_der, encoded_size); struct aws_der_decoder *decoder = aws_der_decoder_new(allocator, input); ASSERT_NULL(decoder); ASSERT_INT_EQUALS(AWS_ERROR_CAL_DER_UNSUPPORTED_NEGATIVE_INT, aws_last_error()); aws_der_decoder_destroy(decoder); return 0; } AWS_TEST_CASE(der_decode_negative_int, s_der_decode_negative_int) static int s_der_decode_positive_int(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint8_t negative_der[] = {0x02 /*int*/, 0x02 /*len 2*/, 0x00, 0xfd /*253*/}; const size_t encoded_size = AWS_ARRAY_SIZE(negative_der); struct aws_byte_cursor input = aws_byte_cursor_from_array(negative_der, encoded_size); struct aws_der_decoder *decoder = aws_der_decoder_new(allocator, input); ASSERT_NOT_NULL(decoder); ASSERT_TRUE(aws_der_decoder_next(decoder)); struct aws_byte_cursor cur; ASSERT_SUCCESS(aws_der_decoder_tlv_unsigned_integer(decoder, &cur)); aws_der_decoder_destroy(decoder); return 0; } AWS_TEST_CASE(der_decode_positive_int, s_der_decode_positive_int) static int s_der_decode_zero_int(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint8_t negative_der[] = {0x02 /*int*/, 0x01 /*len 2*/, 0x00 /*0*/}; const size_t encoded_size = AWS_ARRAY_SIZE(negative_der); struct aws_byte_cursor input = aws_byte_cursor_from_array(negative_der, encoded_size); struct aws_der_decoder *decoder = aws_der_decoder_new(allocator, input); ASSERT_NOT_NULL(decoder); ASSERT_TRUE(aws_der_decoder_next(decoder)); struct aws_byte_cursor cur; ASSERT_SUCCESS(aws_der_decoder_tlv_unsigned_integer(decoder, &cur)); aws_der_decoder_destroy(decoder); return 0; } AWS_TEST_CASE(der_decode_zero_int, s_der_decode_zero_int) static int s_der_decode_bad_length(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint8_t negative_der[] = {0x02 /*int*/, 0x09 /*len 9*/, 0x00 /*0*/}; const size_t encoded_size = AWS_ARRAY_SIZE(negative_der); struct aws_byte_cursor input = aws_byte_cursor_from_array(negative_der, encoded_size); struct aws_der_decoder *decoder = aws_der_decoder_new(allocator, input); ASSERT_NULL(decoder); ASSERT_INT_EQUALS(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED, aws_last_error()); aws_der_decoder_destroy(decoder); return 0; } AWS_TEST_CASE(der_decode_bad_length, s_der_decode_bad_length) static int s_der_decode_zero_length_int(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint8_t negative_der[] = {0x02 /*int*/, 0x00 /*len 9*/, 0x00 /*0*/}; const size_t encoded_size = AWS_ARRAY_SIZE(negative_der); struct aws_byte_cursor input = aws_byte_cursor_from_array(negative_der, encoded_size); struct aws_der_decoder *decoder = aws_der_decoder_new(allocator, input); ASSERT_NULL(decoder); ASSERT_INT_EQUALS(AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED, aws_last_error()); aws_der_decoder_destroy(decoder); return 0; } AWS_TEST_CASE(der_decode_zero_length_int, s_der_decode_zero_length_int) aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/tests/ecc_test.c000066400000000000000000001323141456575232400230720ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include static int s_test_key_derivation( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name, struct aws_byte_cursor private_key, struct aws_byte_cursor expected_pub_x, struct aws_byte_cursor expected_pub_y) { struct aws_ecc_key_pair *private_key_pair = aws_ecc_key_pair_new_from_private_key(allocator, curve_name, &private_key); ASSERT_NOT_NULL(private_key_pair); int error = aws_ecc_key_pair_derive_public_key(private_key_pair); /* this isn't supported on Apple platforms, since AFAIK it isn't possible */ if (error) { ASSERT_INT_EQUALS(AWS_ERROR_UNSUPPORTED_OPERATION, aws_last_error()); goto complete; } struct aws_byte_cursor pub_x; struct aws_byte_cursor pub_y; aws_ecc_key_pair_get_public_key(private_key_pair, &pub_x, &pub_y); ASSERT_BIN_ARRAYS_EQUALS(expected_pub_x.ptr, expected_pub_x.len, pub_x.ptr, pub_x.len); ASSERT_BIN_ARRAYS_EQUALS(expected_pub_y.ptr, expected_pub_y.len, pub_y.ptr, pub_y.len); complete: aws_ecc_key_pair_release(private_key_pair); return AWS_OP_SUCCESS; } static int s_ecdsa_p256_test_pub_key_derivation_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint8_t d[] = { 0xc9, 0x80, 0x68, 0x98, 0xa0, 0x33, 0x49, 0x16, 0xc8, 0x60, 0x74, 0x88, 0x80, 0xa5, 0x41, 0xf0, 0x93, 0xb5, 0x79, 0xa9, 0xb1, 0xf3, 0x29, 0x34, 0xd8, 0x6c, 0x36, 0x3c, 0x39, 0x80, 0x03, 0x57, }; struct aws_byte_cursor private_key = aws_byte_cursor_from_array(d, sizeof(d)); uint8_t x[] = { 0xd0, 0x72, 0x0d, 0xc6, 0x91, 0xaa, 0x80, 0x09, 0x6b, 0xa3, 0x2f, 0xed, 0x1c, 0xb9, 0x7c, 0x2b, 0x62, 0x06, 0x90, 0xd0, 0x6d, 0xe0, 0x31, 0x7b, 0x86, 0x18, 0xd5, 0xce, 0x65, 0xeb, 0x72, 0x8f, }; struct aws_byte_cursor pub_x = aws_byte_cursor_from_array(x, sizeof(x)); uint8_t y[] = { 0x96, 0x81, 0xb5, 0x17, 0xb1, 0xcd, 0xa1, 0x7d, 0x0d, 0x83, 0xd3, 0x35, 0xd9, 0xc4, 0xa8, 0xa9, 0xa9, 0xb0, 0xb1, 0xb3, 0xc7, 0x10, 0x6d, 0x8f, 0x3c, 0x72, 0xbc, 0x50, 0x93, 0xdc, 0x27, 0x5f, }; struct aws_byte_cursor pub_y = aws_byte_cursor_from_array(y, sizeof(y)); return s_test_key_derivation(allocator, AWS_CAL_ECDSA_P256, private_key, pub_x, pub_y); } AWS_TEST_CASE(ecdsa_p256_test_pub_key_derivation, s_ecdsa_p256_test_pub_key_derivation_fn) static int s_ecdsa_p384_test_pub_key_derivation_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint8_t d[] = { 0x53, 0x94, 0xf7, 0x97, 0x3e, 0xa8, 0x68, 0xc5, 0x2b, 0xf3, 0xff, 0x8d, 0x8c, 0xee, 0xb4, 0xdb, 0x90, 0xa6, 0x83, 0x65, 0x3b, 0x12, 0x48, 0x5d, 0x5f, 0x62, 0x7c, 0x3c, 0xe5, 0xab, 0xd8, 0x97, 0x8f, 0xc9, 0x67, 0x3d, 0x14, 0xa7, 0x1d, 0x92, 0x57, 0x47, 0x93, 0x16, 0x62, 0x49, 0x3c, 0x37, }; struct aws_byte_cursor private_key = aws_byte_cursor_from_array(d, sizeof(d)); uint8_t x[] = { 0xfd, 0x3c, 0x84, 0xe5, 0x68, 0x9b, 0xed, 0x27, 0x0e, 0x60, 0x1b, 0x3d, 0x80, 0xf9, 0x0d, 0x67, 0xa9, 0xae, 0x45, 0x1c, 0xce, 0x89, 0x0f, 0x53, 0xe5, 0x83, 0x22, 0x9a, 0xd0, 0xe2, 0xee, 0x64, 0x56, 0x11, 0xfa, 0x99, 0x36, 0xdf, 0xa4, 0x53, 0x06, 0xec, 0x18, 0x06, 0x67, 0x74, 0xaa, 0x24, }; struct aws_byte_cursor pub_x = aws_byte_cursor_from_array(x, sizeof(x)); uint8_t y[] = { 0xb8, 0x3c, 0xa4, 0x12, 0x6c, 0xfc, 0x4c, 0x4d, 0x1d, 0x18, 0xa4, 0xb6, 0xc2, 0x1c, 0x7f, 0x69, 0x9d, 0x51, 0x23, 0xdd, 0x9c, 0x24, 0xf6, 0x6f, 0x83, 0x38, 0x46, 0xee, 0xb5, 0x82, 0x96, 0x19, 0x6b, 0x42, 0xec, 0x06, 0x42, 0x5d, 0xb5, 0xb7, 0x0a, 0x4b, 0x81, 0xb7, 0xfc, 0xf7, 0x05, 0xa0, }; struct aws_byte_cursor pub_y = aws_byte_cursor_from_array(y, sizeof(y)); return s_test_key_derivation(allocator, AWS_CAL_ECDSA_P384, private_key, pub_x, pub_y); } AWS_TEST_CASE(ecdsa_p384_test_pub_key_derivation, s_ecdsa_p384_test_pub_key_derivation_fn) static int s_test_known_signing_value( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name, struct aws_byte_cursor private_key, struct aws_byte_cursor pub_x, struct aws_byte_cursor pub_y) { aws_cal_library_init(allocator); struct aws_ecc_key_pair *signing_key = aws_ecc_key_pair_new_from_private_key(allocator, curve_name, &private_key); ASSERT_NOT_NULL(signing_key); struct aws_ecc_key_pair *verifying_key = aws_ecc_key_pair_new_from_public_key(allocator, curve_name, &pub_x, &pub_y); ASSERT_NOT_NULL(verifying_key); uint8_t message[] = { 0x59, 0x05, 0x23, 0x88, 0x77, 0xc7, 0x74, 0x21, 0xf7, 0x3e, 0x43, 0xee, 0x3d, 0xa6, 0xf2, 0xd9, 0xe2, 0xcc, 0xad, 0x5f, 0xc9, 0x42, 0xdc, 0xec, 0x0c, 0xbd, 0x25, 0x48, 0x29, 0x35, 0xfa, 0xaf, 0x41, 0x69, 0x83, 0xfe, 0x16, 0x5b, 0x1a, 0x04, 0x5e, 0xe2, 0xbc, 0xd2, 0xe6, 0xdc, 0xa3, 0xbd, 0xf4, 0x6c, 0x43, 0x10, 0xa7, 0x46, 0x1f, 0x9a, 0x37, 0x96, 0x0c, 0xa6, 0x72, 0xd3, 0xfe, 0xb5, 0x47, 0x3e, 0x25, 0x36, 0x05, 0xfb, 0x1d, 0xdf, 0xd2, 0x80, 0x65, 0xb5, 0x3c, 0xb5, 0x85, 0x8a, 0x8a, 0xd2, 0x81, 0x75, 0xbf, 0x9b, 0xd3, 0x86, 0xa5, 0xe4, 0x71, 0xea, 0x7a, 0x65, 0xc1, 0x7c, 0xc9, 0x34, 0xa9, 0xd7, 0x91, 0xe9, 0x14, 0x91, 0xeb, 0x37, 0x54, 0xd0, 0x37, 0x99, 0x79, 0x0f, 0xe2, 0xd3, 0x08, 0xd1, 0x61, 0x46, 0xd5, 0xc9, 0xb0, 0xd0, 0xde, 0xbd, 0x97, 0xd7, 0x9c, 0xe8, }; struct aws_byte_cursor message_input = aws_byte_cursor_from_array(message, sizeof(message)); uint8_t hash[AWS_SHA256_LEN]; AWS_ZERO_ARRAY(hash); struct aws_byte_buf hash_value = aws_byte_buf_from_empty_array(hash, sizeof(hash)); aws_sha256_compute(allocator, &message_input, &hash_value, 0); size_t signature_size = aws_ecc_key_pair_signature_length(signing_key); struct aws_byte_buf signature_buf; AWS_ZERO_STRUCT(signature_buf); aws_byte_buf_init(&signature_buf, allocator, signature_size); struct aws_byte_cursor hash_cur = aws_byte_cursor_from_buf(&hash_value); ASSERT_SUCCESS(aws_ecc_key_pair_sign_message(signing_key, &hash_cur, &signature_buf)); struct aws_byte_cursor signature_cur = aws_byte_cursor_from_buf(&signature_buf); ASSERT_SUCCESS(aws_ecc_key_pair_verify_signature(verifying_key, &hash_cur, &signature_cur)); aws_byte_buf_clean_up(&signature_buf); aws_ecc_key_pair_release(verifying_key); aws_ecc_key_pair_release(signing_key); aws_cal_library_clean_up(); return AWS_OP_SUCCESS; } static int s_ecdsa_p256_test_known_signing_value_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint8_t d[] = { 0x51, 0x9b, 0x42, 0x3d, 0x71, 0x5f, 0x8b, 0x58, 0x1f, 0x4f, 0xa8, 0xee, 0x59, 0xf4, 0x77, 0x1a, 0x5b, 0x44, 0xc8, 0x13, 0x0b, 0x4e, 0x3e, 0xac, 0xca, 0x54, 0xa5, 0x6d, 0xda, 0x72, 0xb4, 0x64, }; struct aws_byte_cursor private_key = aws_byte_cursor_from_array(d, sizeof(d)); uint8_t x[] = { 0x1c, 0xcb, 0xe9, 0x1c, 0x07, 0x5f, 0xc7, 0xf4, 0xf0, 0x33, 0xbf, 0xa2, 0x48, 0xdb, 0x8f, 0xcc, 0xd3, 0x56, 0x5d, 0xe9, 0x4b, 0xbf, 0xb1, 0x2f, 0x3c, 0x59, 0xff, 0x46, 0xc2, 0x71, 0xbf, 0x83, }; uint8_t y[] = { 0xce, 0x40, 0x14, 0xc6, 0x88, 0x11, 0xf9, 0xa2, 0x1a, 0x1f, 0xdb, 0x2c, 0x0e, 0x61, 0x13, 0xe0, 0x6d, 0xb7, 0xca, 0x93, 0xb7, 0x40, 0x4e, 0x78, 0xdc, 0x7c, 0xcd, 0x5c, 0xa8, 0x9a, 0x4c, 0xa9, }; struct aws_byte_cursor pub_x = aws_byte_cursor_from_array(x, sizeof(x)); struct aws_byte_cursor pub_y = aws_byte_cursor_from_array(y, sizeof(y)); return s_test_known_signing_value(allocator, AWS_CAL_ECDSA_P256, private_key, pub_x, pub_y); } AWS_TEST_CASE(ecdsa_p256_test_known_signing_value, s_ecdsa_p256_test_known_signing_value_fn) static int s_ecdsa_p384_test_known_signing_value_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint8_t d[] = { 0x53, 0x94, 0xf7, 0x97, 0x3e, 0xa8, 0x68, 0xc5, 0x2b, 0xf3, 0xff, 0x8d, 0x8c, 0xee, 0xb4, 0xdb, 0x90, 0xa6, 0x83, 0x65, 0x3b, 0x12, 0x48, 0x5d, 0x5f, 0x62, 0x7c, 0x3c, 0xe5, 0xab, 0xd8, 0x97, 0x8f, 0xc9, 0x67, 0x3d, 0x14, 0xa7, 0x1d, 0x92, 0x57, 0x47, 0x93, 0x16, 0x62, 0x49, 0x3c, 0x37, }; struct aws_byte_cursor private_key = aws_byte_cursor_from_array(d, sizeof(d)); uint8_t x[] = { 0xfd, 0x3c, 0x84, 0xe5, 0x68, 0x9b, 0xed, 0x27, 0x0e, 0x60, 0x1b, 0x3d, 0x80, 0xf9, 0x0d, 0x67, 0xa9, 0xae, 0x45, 0x1c, 0xce, 0x89, 0x0f, 0x53, 0xe5, 0x83, 0x22, 0x9a, 0xd0, 0xe2, 0xee, 0x64, 0x56, 0x11, 0xfa, 0x99, 0x36, 0xdf, 0xa4, 0x53, 0x06, 0xec, 0x18, 0x06, 0x67, 0x74, 0xaa, 0x24, }; struct aws_byte_cursor pub_x = aws_byte_cursor_from_array(x, sizeof(x)); uint8_t y[] = { 0xb8, 0x3c, 0xa4, 0x12, 0x6c, 0xfc, 0x4c, 0x4d, 0x1d, 0x18, 0xa4, 0xb6, 0xc2, 0x1c, 0x7f, 0x69, 0x9d, 0x51, 0x23, 0xdd, 0x9c, 0x24, 0xf6, 0x6f, 0x83, 0x38, 0x46, 0xee, 0xb5, 0x82, 0x96, 0x19, 0x6b, 0x42, 0xec, 0x06, 0x42, 0x5d, 0xb5, 0xb7, 0x0a, 0x4b, 0x81, 0xb7, 0xfc, 0xf7, 0x05, 0xa0, }; struct aws_byte_cursor pub_y = aws_byte_cursor_from_array(y, sizeof(y)); return s_test_known_signing_value(allocator, AWS_CAL_ECDSA_P384, private_key, pub_x, pub_y); } AWS_TEST_CASE(ecdsa_p384_test_known_signing_value, s_ecdsa_p384_test_known_signing_value_fn) static int s_ecdsa_test_invalid_signature_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_cal_library_init(allocator); struct aws_ecc_key_pair *key_pair = aws_ecc_key_pair_new_generate_random(allocator, AWS_CAL_ECDSA_P256); ASSERT_NOT_NULL(key_pair); uint8_t message[] = { 0x59, 0x05, 0x23, 0x88, 0x77, 0xc7, 0x74, 0x21, 0xf7, 0x3e, 0x43, 0xee, 0x3d, 0xa6, 0xf2, 0xd9, 0xe2, 0xcc, 0xad, 0x5f, 0xc9, 0x42, 0xdc, 0xec, 0x0c, 0xbd, 0x25, 0x48, 0x29, 0x35, 0xfa, 0xaf, 0x41, 0x69, 0x83, 0xfe, 0x16, 0x5b, 0x1a, 0x04, 0x5e, 0xe2, 0xbc, 0xd2, 0xe6, 0xdc, 0xa3, 0xbd, 0xf4, 0x6c, 0x43, 0x10, 0xa7, 0x46, 0x1f, 0x9a, 0x37, 0x96, 0x0c, 0xa6, 0x72, 0xd3, 0xfe, 0xb5, 0x47, 0x3e, 0x25, 0x36, 0x05, 0xfb, 0x1d, 0xdf, 0xd2, 0x80, 0x65, 0xb5, 0x3c, 0xb5, 0x85, 0x8a, 0x8a, 0xd2, 0x81, 0x75, 0xbf, 0x9b, 0xd3, 0x86, 0xa5, 0xe4, 0x71, 0xea, 0x7a, 0x65, 0xc1, 0x7c, 0xc9, 0x34, 0xa9, 0xd7, 0x91, 0xe9, 0x14, 0x91, 0xeb, 0x37, 0x54, 0xd0, 0x37, 0x99, 0x79, 0x0f, 0xe2, 0xd3, 0x08, 0xd1, 0x61, 0x46, 0xd5, 0xc9, 0xb0, 0xd0, 0xde, 0xbd, 0x97, 0xd7, 0x9c, 0xe8, }; struct aws_byte_cursor message_input = aws_byte_cursor_from_array(message, sizeof(message)); uint8_t hash[AWS_SHA256_LEN]; AWS_ZERO_ARRAY(hash); struct aws_byte_buf hash_value = aws_byte_buf_from_empty_array(hash, sizeof(hash)); aws_sha256_compute(allocator, &message_input, &hash_value, 0); size_t signature_size = aws_ecc_key_pair_signature_length(key_pair); struct aws_byte_buf signature_buf; AWS_ZERO_STRUCT(signature_buf); aws_byte_buf_init(&signature_buf, allocator, signature_size); struct aws_byte_cursor hash_cur = aws_byte_cursor_from_buf(&hash_value); ASSERT_SUCCESS(aws_ecc_key_pair_sign_message(key_pair, &hash_cur, &signature_buf)); struct aws_byte_cursor signature_cur = aws_byte_cursor_from_buf(&signature_buf); /* just flip some bits in the signature. */ uint8_t value_to_flip = signature_buf.buffer[15]; signature_buf.buffer[15] = value_to_flip == 0 ? 0x0a : ~(value_to_flip); ASSERT_ERROR( AWS_ERROR_CAL_SIGNATURE_VALIDATION_FAILED, aws_ecc_key_pair_verify_signature(key_pair, &hash_cur, &signature_cur)); aws_byte_buf_clean_up(&signature_buf); aws_ecc_key_pair_release(key_pair); aws_cal_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(ecdsa_test_invalid_signature, s_ecdsa_test_invalid_signature_fn) static int s_test_key_gen(struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name) { aws_cal_library_init(allocator); struct aws_ecc_key_pair *key_pair = aws_ecc_key_pair_new_generate_random(allocator, curve_name); struct aws_byte_cursor pub_x; struct aws_byte_cursor pub_y; aws_ecc_key_pair_get_public_key(key_pair, &pub_x, &pub_y); ASSERT_TRUE(pub_x.len > 0); ASSERT_TRUE(pub_y.len > 0); struct aws_byte_cursor priv_d; aws_ecc_key_pair_get_private_key(key_pair, &priv_d); ASSERT_TRUE(priv_d.len > 0); uint8_t message[] = { 0x59, 0x05, 0x23, 0x88, 0x77, 0xc7, 0x74, 0x21, 0xf7, 0x3e, 0x43, 0xee, 0x3d, 0xa6, 0xf2, 0xd9, 0xe2, 0xcc, 0xad, 0x5f, 0xc9, 0x42, 0xdc, 0xec, 0x0c, 0xbd, 0x25, 0x48, 0x29, 0x35, 0xfa, 0xaf, 0x41, 0x69, 0x83, 0xfe, 0x16, 0x5b, 0x1a, 0x04, 0x5e, 0xe2, 0xbc, 0xd2, 0xe6, 0xdc, 0xa3, 0xbd, 0xf4, 0x6c, 0x43, 0x10, 0xa7, 0x46, 0x1f, 0x9a, 0x37, 0x96, 0x0c, 0xa6, 0x72, 0xd3, 0xfe, 0xb5, 0x47, 0x3e, 0x25, 0x36, 0x05, 0xfb, 0x1d, 0xdf, 0xd2, 0x80, 0x65, 0xb5, 0x3c, 0xb5, 0x85, 0x8a, 0x8a, 0xd2, 0x81, 0x75, 0xbf, 0x9b, 0xd3, 0x86, 0xa5, 0xe4, 0x71, 0xea, 0x7a, 0x65, 0xc1, 0x7c, 0xc9, 0x34, 0xa9, 0xd7, 0x91, 0xe9, 0x14, 0x91, 0xeb, 0x37, 0x54, 0xd0, 0x37, 0x99, 0x79, 0x0f, 0xe2, 0xd3, 0x08, 0xd1, 0x61, 0x46, 0xd5, 0xc9, 0xb0, 0xd0, 0xde, 0xbd, 0x97, 0xd7, 0x9c, 0xe8, }; struct aws_byte_cursor message_input = aws_byte_cursor_from_array(message, sizeof(message)); uint8_t hash[AWS_SHA256_LEN]; AWS_ZERO_ARRAY(hash); struct aws_byte_buf hash_value = aws_byte_buf_from_empty_array(hash, sizeof(hash)); aws_sha256_compute(allocator, &message_input, &hash_value, 0); size_t signature_size = aws_ecc_key_pair_signature_length(key_pair); struct aws_byte_buf signature_buf; AWS_ZERO_STRUCT(signature_buf); aws_byte_buf_init(&signature_buf, allocator, signature_size); struct aws_byte_cursor hash_cur = aws_byte_cursor_from_buf(&hash_value); ASSERT_SUCCESS(aws_ecc_key_pair_sign_message(key_pair, &hash_cur, &signature_buf)); struct aws_byte_cursor signature_cur = aws_byte_cursor_from_buf(&signature_buf); ASSERT_SUCCESS(aws_ecc_key_pair_verify_signature(key_pair, &hash_cur, &signature_cur)); aws_byte_buf_clean_up(&signature_buf); aws_ecc_key_pair_release(key_pair); aws_cal_library_clean_up(); return AWS_OP_SUCCESS; } static int s_ecdsa_p256_test_key_gen_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_test_key_gen(allocator, AWS_CAL_ECDSA_P256); } AWS_TEST_CASE(ecdsa_p256_test_key_gen, s_ecdsa_p256_test_key_gen_fn) static int s_ecdsa_p384_test_key_gen_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_test_key_gen(allocator, AWS_CAL_ECDSA_P384); } AWS_TEST_CASE(ecdsa_p384_test_key_gen, s_ecdsa_p384_test_key_gen_fn) static int s_test_key_gen_export(struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name) { aws_cal_library_init(allocator); struct aws_ecc_key_pair *key_pair = aws_ecc_key_pair_new_generate_random(allocator, curve_name); struct aws_byte_cursor pub_x; struct aws_byte_cursor pub_y; aws_ecc_key_pair_get_public_key(key_pair, &pub_x, &pub_y); ASSERT_TRUE(pub_x.len > 0); ASSERT_TRUE(pub_y.len > 0); struct aws_byte_cursor priv_d; aws_ecc_key_pair_get_private_key(key_pair, &priv_d); ASSERT_TRUE(priv_d.len > 0); /* * The private key we get back from the randomly generated key may be shorter than we expect since it's * minimally encoded. In that case we have to pad it out to satisfy the key length invariant. */ size_t curve_private_key_length = aws_ecc_key_coordinate_byte_size_from_curve_name(curve_name); struct aws_byte_buf padded_priv_d; aws_byte_buf_init(&padded_priv_d, allocator, curve_private_key_length); aws_byte_buf_secure_zero(&padded_priv_d); if (priv_d.len < curve_private_key_length) { padded_priv_d.len = curve_private_key_length - priv_d.len; } ASSERT_SUCCESS(aws_byte_buf_append(&padded_priv_d, &priv_d)); struct aws_byte_cursor padded_priv_d_cursor = aws_byte_cursor_from_buf(&padded_priv_d); struct aws_ecc_key_pair *signing_key = aws_ecc_key_pair_new_from_private_key(allocator, curve_name, &padded_priv_d_cursor); ASSERT_NOT_NULL(signing_key); uint8_t message[] = { 0x59, 0x05, 0x23, 0x88, 0x77, 0xc7, 0x74, 0x21, 0xf7, 0x3e, 0x43, 0xee, 0x3d, 0xa6, 0xf2, 0xd9, 0xe2, 0xcc, 0xad, 0x5f, 0xc9, 0x42, 0xdc, 0xec, 0x0c, 0xbd, 0x25, 0x48, 0x29, 0x35, 0xfa, 0xaf, 0x41, 0x69, 0x83, 0xfe, 0x16, 0x5b, 0x1a, 0x04, 0x5e, 0xe2, 0xbc, 0xd2, 0xe6, 0xdc, 0xa3, 0xbd, 0xf4, 0x6c, 0x43, 0x10, 0xa7, 0x46, 0x1f, 0x9a, 0x37, 0x96, 0x0c, 0xa6, 0x72, 0xd3, 0xfe, 0xb5, 0x47, 0x3e, 0x25, 0x36, 0x05, 0xfb, 0x1d, 0xdf, 0xd2, 0x80, 0x65, 0xb5, 0x3c, 0xb5, 0x85, 0x8a, 0x8a, 0xd2, 0x81, 0x75, 0xbf, 0x9b, 0xd3, 0x86, 0xa5, 0xe4, 0x71, 0xea, 0x7a, 0x65, 0xc1, 0x7c, 0xc9, 0x34, 0xa9, 0xd7, 0x91, 0xe9, 0x14, 0x91, 0xeb, 0x37, 0x54, 0xd0, 0x37, 0x99, 0x79, 0x0f, 0xe2, 0xd3, 0x08, 0xd1, 0x61, 0x46, 0xd5, 0xc9, 0xb0, 0xd0, 0xde, 0xbd, 0x97, 0xd7, 0x9c, 0xe8, }; struct aws_byte_cursor message_input = aws_byte_cursor_from_array(message, sizeof(message)); uint8_t hash[AWS_SHA256_LEN]; AWS_ZERO_ARRAY(hash); struct aws_byte_buf hash_value = aws_byte_buf_from_empty_array(hash, sizeof(hash)); aws_sha256_compute(allocator, &message_input, &hash_value, 0); size_t signature_size = aws_ecc_key_pair_signature_length(key_pair); struct aws_byte_buf signature_buf; AWS_ZERO_STRUCT(signature_buf); aws_byte_buf_init(&signature_buf, allocator, signature_size); struct aws_byte_cursor hash_cur = aws_byte_cursor_from_buf(&hash_value); ASSERT_SUCCESS(aws_ecc_key_pair_sign_message(signing_key, &hash_cur, &signature_buf)); struct aws_ecc_key_pair *verifying_key = aws_ecc_key_pair_new_from_public_key(allocator, curve_name, &pub_x, &pub_y); ASSERT_NOT_NULL(verifying_key); struct aws_byte_cursor signature_cur = aws_byte_cursor_from_buf(&signature_buf); ASSERT_SUCCESS(aws_ecc_key_pair_verify_signature(verifying_key, &hash_cur, &signature_cur)); aws_byte_buf_clean_up(&signature_buf); aws_byte_buf_clean_up(&padded_priv_d); aws_ecc_key_pair_release(key_pair); aws_ecc_key_pair_release(signing_key); aws_ecc_key_pair_release(verifying_key); aws_cal_library_clean_up(); return AWS_OP_SUCCESS; } static int s_ecdsa_p256_test_key_gen_export_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_test_key_gen_export(allocator, AWS_CAL_ECDSA_P256); } AWS_TEST_CASE(ecdsa_p256_test_key_gen_export, s_ecdsa_p256_test_key_gen_export_fn) static int s_ecdsa_p384_test_key_gen_export_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_test_key_gen_export(allocator, AWS_CAL_ECDSA_P256); } AWS_TEST_CASE(ecdsa_p384_test_key_gen_export, s_ecdsa_p384_test_key_gen_export_fn) static int s_ecdsa_test_import_asn1_key_pair( struct aws_allocator *allocator, struct aws_byte_cursor asn1_cur, enum aws_ecc_curve_name expected_curve_name) { aws_cal_library_init(allocator); struct aws_ecc_key_pair *imported_key = aws_ecc_key_pair_new_from_asn1(allocator, &asn1_cur); ASSERT_NOT_NULL(imported_key); ASSERT_INT_EQUALS(expected_curve_name, imported_key->curve_name); uint8_t message[] = { 0x59, 0x05, 0x23, 0x88, 0x77, 0xc7, 0x74, 0x21, 0xf7, 0x3e, 0x43, 0xee, 0x3d, 0xa6, 0xf2, 0xd9, 0xe2, 0xcc, 0xad, 0x5f, 0xc9, 0x42, 0xdc, 0xec, 0x0c, 0xbd, 0x25, 0x48, 0x29, 0x35, 0xfa, 0xaf, 0x41, 0x69, 0x83, 0xfe, 0x16, 0x5b, 0x1a, 0x04, 0x5e, 0xe2, 0xbc, 0xd2, 0xe6, 0xdc, 0xa3, 0xbd, 0xf4, 0x6c, 0x43, 0x10, 0xa7, 0x46, 0x1f, 0x9a, 0x37, 0x96, 0x0c, 0xa6, 0x72, 0xd3, 0xfe, 0xb5, 0x47, 0x3e, 0x25, 0x36, 0x05, 0xfb, 0x1d, 0xdf, 0xd2, 0x80, 0x65, 0xb5, 0x3c, 0xb5, 0x85, 0x8a, 0x8a, 0xd2, 0x81, 0x75, 0xbf, 0x9b, 0xd3, 0x86, 0xa5, 0xe4, 0x71, 0xea, 0x7a, 0x65, 0xc1, 0x7c, 0xc9, 0x34, 0xa9, 0xd7, 0x91, 0xe9, 0x14, 0x91, 0xeb, 0x37, 0x54, 0xd0, 0x37, 0x99, 0x79, 0x0f, 0xe2, 0xd3, 0x08, 0xd1, 0x61, 0x46, 0xd5, 0xc9, 0xb0, 0xd0, 0xde, 0xbd, 0x97, 0xd7, 0x9c, 0xe8, }; struct aws_byte_cursor message_input = aws_byte_cursor_from_array(message, sizeof(message)); uint8_t hash[AWS_SHA256_LEN]; AWS_ZERO_ARRAY(hash); struct aws_byte_buf hash_value = aws_byte_buf_from_empty_array(hash, sizeof(hash)); aws_sha256_compute(allocator, &message_input, &hash_value, 0); size_t signature_size = aws_ecc_key_pair_signature_length(imported_key); struct aws_byte_buf signature_buf; AWS_ZERO_STRUCT(signature_buf); aws_byte_buf_init(&signature_buf, allocator, signature_size); struct aws_byte_cursor hash_cur = aws_byte_cursor_from_buf(&hash_value); ASSERT_SUCCESS(aws_ecc_key_pair_sign_message(imported_key, &hash_cur, &signature_buf)); struct aws_byte_cursor signature_cur = aws_byte_cursor_from_buf(&signature_buf); ASSERT_SUCCESS(aws_ecc_key_pair_verify_signature(imported_key, &hash_cur, &signature_cur)); aws_byte_buf_clean_up(&signature_buf); aws_ecc_key_pair_release(imported_key); aws_cal_library_clean_up(); return AWS_OP_SUCCESS; } static int s_ecdsa_p256_test_import_asn1_key_pair_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint8_t asn1_encoded_key_raw[] = { 0x30, 0x77, 0x02, 0x01, 0x01, 0x04, 0x20, 0x78, 0xed, 0xed, 0xcf, 0x95, 0x9e, 0x42, 0x24, 0x37, 0xa4, 0x56, 0xed, 0x08, 0x19, 0x3c, 0x53, 0x4b, 0x6f, 0xff, 0x40, 0x64, 0x48, 0x6a, 0x49, 0x86, 0x0c, 0xb7, 0x0a, 0xe5, 0x2d, 0xbd, 0xd6, 0xa0, 0x0a, 0x06, 0x08, 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x03, 0x01, 0x07, 0xa1, 0x44, 0x03, 0x42, 0x00, 0x04, 0xbf, 0x61, 0x63, 0x46, 0x93, 0x2d, 0x00, 0x33, 0x19, 0xe3, 0x3a, 0x19, 0xc6, 0xc8, 0x55, 0xf5, 0xc8, 0x44, 0x91, 0xe9, 0x9b, 0x83, 0x36, 0x67, 0x5d, 0x25, 0x0d, 0x7b, 0xe0, 0xc0, 0xf1, 0xd2, 0xaa, 0x5c, 0xdf, 0xfb, 0xa9, 0x37, 0x19, 0x8d, 0x82, 0x47, 0x28, 0x88, 0xbe, 0x46, 0x7f, 0x3c, 0xcd, 0x41, 0xaa, 0x08, 0x9a, 0x37, 0x0d, 0x61, 0x7f, 0x5f, 0xeb, 0x9f, 0x55, 0xf7, 0x54, 0xda, 0x0a, }; struct aws_byte_cursor asn1_encoded_key = aws_byte_cursor_from_array(asn1_encoded_key_raw, sizeof(asn1_encoded_key_raw)); return s_ecdsa_test_import_asn1_key_pair(allocator, asn1_encoded_key, AWS_CAL_ECDSA_P256); } AWS_TEST_CASE(ecdsa_p256_test_import_asn1_key_pair, s_ecdsa_p256_test_import_asn1_key_pair_fn) static int s_ecdsa_p384_test_import_asn1_key_pair_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint8_t asn1_encoded_key_raw[] = { 0x30, 0x81, 0xa4, 0x02, 0x01, 0x01, 0x04, 0x30, 0xa4, 0x4e, 0x2c, 0xf8, 0x6a, 0xfd, 0x42, 0x0e, 0xd4, 0xbb, 0x2d, 0x08, 0xe2, 0x35, 0xe7, 0xb2, 0xc7, 0x87, 0x37, 0xbc, 0x92, 0xc2, 0x9a, 0x84, 0x39, 0x99, 0x24, 0xe3, 0xa3, 0x01, 0x8c, 0xa0, 0xc1, 0x34, 0xd7, 0x8d, 0x86, 0xa1, 0x8c, 0xe1, 0xe4, 0x3e, 0xd1, 0xe3, 0xff, 0x8b, 0xa4, 0x1d, 0xa0, 0x07, 0x06, 0x05, 0x2b, 0x81, 0x04, 0x00, 0x22, 0xa1, 0x64, 0x03, 0x62, 0x00, 0x04, 0x89, 0x64, 0x99, 0x1e, 0x1e, 0xa3, 0x6e, 0x30, 0x5e, 0xb1, 0x00, 0xef, 0x51, 0x0e, 0x78, 0xc6, 0x7a, 0x2f, 0x1d, 0x21, 0x65, 0xe2, 0x68, 0xfa, 0x22, 0x5f, 0x1c, 0x8e, 0x00, 0xdc, 0x74, 0xa6, 0x97, 0x7d, 0x73, 0xb3, 0x05, 0x00, 0xfd, 0xf5, 0x52, 0x85, 0xd8, 0x81, 0x62, 0x6c, 0x0a, 0x04, 0xf4, 0xaa, 0x39, 0xe0, 0x2b, 0x05, 0x29, 0xd1, 0x70, 0x8a, 0x42, 0x44, 0x9a, 0xe7, 0xed, 0xec, 0x3a, 0x52, 0x06, 0x24, 0x67, 0x35, 0x27, 0x6b, 0x80, 0x7c, 0xda, 0xc5, 0xe9, 0x97, 0xfa, 0x1d, 0xd4, 0x0c, 0x27, 0x54, 0xed, 0x97, 0xe6, 0xbd, 0xe0, 0xaf, 0xff, 0xaa, 0xb9, 0x63, 0xf3, 0x21, 0x0a, }; struct aws_byte_cursor asn1_encoded_key = aws_byte_cursor_from_array(asn1_encoded_key_raw, sizeof(asn1_encoded_key_raw)); return s_ecdsa_test_import_asn1_key_pair(allocator, asn1_encoded_key, AWS_CAL_ECDSA_P384); } AWS_TEST_CASE(ecdsa_p384_test_import_asn1_key_pair, s_ecdsa_p384_test_import_asn1_key_pair_fn) static int s_ecdsa_test_import_asn1_key_pair_public_only_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_cal_library_init(allocator); uint8_t asn1_encoded_full_key_raw[] = { 0x30, 0x77, 0x02, 0x01, 0x01, 0x04, 0x20, 0x99, 0x16, 0x2a, 0x5b, 0x4e, 0x63, 0x86, 0x4c, 0x5f, 0x8e, 0x37, 0xf7, 0x2b, 0xbd, 0x97, 0x1d, 0x5c, 0x68, 0x80, 0x18, 0xc3, 0x91, 0x0f, 0xb3, 0xc3, 0xf9, 0x3a, 0xc9, 0x7a, 0x4b, 0xa3, 0xf6, 0xa0, 0x0a, 0x06, 0x08, 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x03, 0x01, 0x07, 0xa1, 0x44, 0x03, 0x42, 0x00, 0x04, 0xec, 0x6c, 0xd7, 0x4b, 0xdc, 0x33, 0xc2, 0x56, 0x32, 0xad, 0x52, 0x56, 0xac, 0xf5, 0xf0, 0xe6, 0x28, 0x99, 0x84, 0x83, 0xaf, 0x73, 0x6f, 0xfe, 0xd7, 0x83, 0x3b, 0x42, 0x81, 0x5d, 0x2e, 0xe0, 0xdb, 0xf6, 0xac, 0xa4, 0xc6, 0x16, 0x7e, 0x3e, 0xe0, 0xff, 0x7b, 0x43, 0xe8, 0xa1, 0x36, 0x50, 0x92, 0x83, 0x06, 0x94, 0xb3, 0xd4, 0x93, 0x06, 0xde, 0x63, 0x8a, 0xa1, 0x1c, 0x3f, 0xb2, 0x57, 0x0a, }; uint8_t asn1_encoded_pub_key_raw[] = { 0x30, 0x59, 0x30, 0x13, 0x06, 0x07, 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x02, 0x01, 0x06, 0x08, 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x03, 0x01, 0x07, 0x03, 0x42, 0x00, 0x04, 0xec, 0x6c, 0xd7, 0x4b, 0xdc, 0x33, 0xc2, 0x56, 0x32, 0xad, 0x52, 0x56, 0xac, 0xf5, 0xf0, 0xe6, 0x28, 0x99, 0x84, 0x83, 0xaf, 0x73, 0x6f, 0xfe, 0xd7, 0x83, 0x3b, 0x42, 0x81, 0x5d, 0x2e, 0xe0, 0xdb, 0xf6, 0xac, 0xa4, 0xc6, 0x16, 0x7e, 0x3e, 0xe0, 0xff, 0x7b, 0x43, 0xe8, 0xa1, 0x36, 0x50, 0x92, 0x83, 0x06, 0x94, 0xb3, 0xd4, 0x93, 0x06, 0xde, 0x63, 0x8a, 0xa1, 0x1c, 0x3f, 0xb2, 0x57, 0x0a, }; struct aws_byte_cursor full_key_asn1 = aws_byte_cursor_from_array(asn1_encoded_full_key_raw, sizeof(asn1_encoded_full_key_raw)); struct aws_byte_cursor pub_key_asn1 = aws_byte_cursor_from_array(asn1_encoded_pub_key_raw, sizeof(asn1_encoded_pub_key_raw)); struct aws_ecc_key_pair *signing_key = aws_ecc_key_pair_new_from_asn1(allocator, &full_key_asn1); ASSERT_NOT_NULL(signing_key); struct aws_ecc_key_pair *verifying_key = aws_ecc_key_pair_new_from_asn1(allocator, &pub_key_asn1); ASSERT_NOT_NULL(verifying_key); uint8_t message[] = { 0x59, 0x05, 0x23, 0x88, 0x77, 0xc7, 0x74, 0x21, 0xf7, 0x3e, 0x43, 0xee, 0x3d, 0xa6, 0xf2, 0xd9, 0xe2, 0xcc, 0xad, 0x5f, 0xc9, 0x42, 0xdc, 0xec, 0x0c, 0xbd, 0x25, 0x48, 0x29, 0x35, 0xfa, 0xaf, 0x41, 0x69, 0x83, 0xfe, 0x16, 0x5b, 0x1a, 0x04, 0x5e, 0xe2, 0xbc, 0xd2, 0xe6, 0xdc, 0xa3, 0xbd, 0xf4, 0x6c, 0x43, 0x10, 0xa7, 0x46, 0x1f, 0x9a, 0x37, 0x96, 0x0c, 0xa6, 0x72, 0xd3, 0xfe, 0xb5, 0x47, 0x3e, 0x25, 0x36, 0x05, 0xfb, 0x1d, 0xdf, 0xd2, 0x80, 0x65, 0xb5, 0x3c, 0xb5, 0x85, 0x8a, 0x8a, 0xd2, 0x81, 0x75, 0xbf, 0x9b, 0xd3, 0x86, 0xa5, 0xe4, 0x71, 0xea, 0x7a, 0x65, 0xc1, 0x7c, 0xc9, 0x34, 0xa9, 0xd7, 0x91, 0xe9, 0x14, 0x91, 0xeb, 0x37, 0x54, 0xd0, 0x37, 0x99, 0x79, 0x0f, 0xe2, 0xd3, 0x08, 0xd1, 0x61, 0x46, 0xd5, 0xc9, 0xb0, 0xd0, 0xde, 0xbd, 0x97, 0xd7, 0x9c, 0xe8, }; struct aws_byte_cursor message_input = aws_byte_cursor_from_array(message, sizeof(message)); uint8_t hash[AWS_SHA256_LEN]; AWS_ZERO_ARRAY(hash); struct aws_byte_buf hash_value = aws_byte_buf_from_empty_array(hash, sizeof(hash)); aws_sha256_compute(allocator, &message_input, &hash_value, 0); size_t signature_size = aws_ecc_key_pair_signature_length(signing_key); struct aws_byte_buf signature_buf; AWS_ZERO_STRUCT(signature_buf); aws_byte_buf_init(&signature_buf, allocator, signature_size); struct aws_byte_cursor hash_cur = aws_byte_cursor_from_buf(&hash_value); ASSERT_SUCCESS(aws_ecc_key_pair_sign_message(signing_key, &hash_cur, &signature_buf)); struct aws_byte_cursor signature_cur = aws_byte_cursor_from_buf(&signature_buf); ASSERT_SUCCESS(aws_ecc_key_pair_verify_signature(verifying_key, &hash_cur, &signature_cur)); aws_byte_buf_clean_up(&signature_buf); aws_ecc_key_pair_release(verifying_key); aws_ecc_key_pair_release(signing_key); aws_cal_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(ecdsa_test_import_asn1_key_pair_public_only, s_ecdsa_test_import_asn1_key_pair_public_only_fn) static int s_ecdsa_test_import_asn1_key_pair_invalid_fails_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_cal_library_init(allocator); /* I changed the OID to nonsense */ uint8_t bad_asn1_encoded_full_key_raw[] = { 0x30, 0x77, 0x02, 0x01, 0x01, 0x04, 0x20, 0x99, 0x16, 0x2a, 0x5b, 0x4e, 0x63, 0x86, 0x4c, 0x5f, 0x8e, 0x37, 0xf7, 0x2b, 0xbd, 0x97, 0x1d, 0x5c, 0x68, 0x80, 0x18, 0xc3, 0x91, 0x0f, 0xb3, 0xc3, 0xf9, 0x3a, 0xc9, 0x7a, 0x4b, 0xa3, 0xf6, 0xa0, 0x0a, 0x06, 0x08, 0x2a, 0x8a, 0x48, 0xce, 0x3d, 0x03, 0x01, 0x07, 0xa1, 0x44, 0x03, 0x42, 0x00, 0x04, 0xec, 0x6c, 0xd7, 0x4b, 0xdc, 0x33, 0xc2, 0x56, 0x32, 0xad, 0x52, 0x56, 0xac, 0xf5, 0xf0, 0xe6, 0x28, 0x99, 0x84, 0x83, 0xaf, 0x73, 0x6f, 0xfe, 0xd7, 0x83, 0x3b, 0x42, 0x81, 0x5d, 0x2e, 0xe0, 0xdb, 0xf6, 0xac, 0xa4, 0xc6, 0x16, 0x7e, 0x3e, 0xe0, 0xff, 0x7b, 0x43, 0xe8, 0xa1, 0x36, 0x50, 0x92, 0x83, 0x06, 0x94, 0xb3, 0xd4, 0x93, 0x06, 0xde, 0x63, 0x8a, 0xa1, 0x1c, 0x3f, 0xb2, 0x57, 0x0a, }; struct aws_byte_cursor bad_full_key_asn1 = aws_byte_cursor_from_array(bad_asn1_encoded_full_key_raw, sizeof(bad_asn1_encoded_full_key_raw)); struct aws_ecc_key_pair *signing_key = aws_ecc_key_pair_new_from_asn1(allocator, &bad_full_key_asn1); ASSERT_NULL(signing_key); ASSERT_INT_EQUALS(AWS_ERROR_CAL_UNKNOWN_OBJECT_IDENTIFIER, aws_last_error()); aws_cal_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(ecdsa_test_import_asn1_key_pair_invalid_fails, s_ecdsa_test_import_asn1_key_pair_invalid_fails_fn) /* this test exists because we have to manually handle signature encoding/decoding on windows. this takes an encoded signature and makes sure we decode and verify it properly. How do we know we encode properly b.t.w? Well we have tests that verify signatures we generated, so we already know that anything we signed can be decoded. What we don't have proven is that we're not just symetrically wrong. So, let's take the format we know signatures must be in ASN.1 DER encoded, and make sure we can verify it. Since we KNOW the signing and verifying code is symetric, verifying the verification side should prove our encoding/decoding code is correct to the spec. */ static int s_ecdsa_test_signature_format_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_cal_library_init(allocator); uint8_t asn1_encoded_signature_raw[] = { 0x30, 0x45, 0x02, 0x21, 0x00, 0xd7, 0xc5, 0xb9, 0x9e, 0x0b, 0xb1, 0x1a, 0x1f, 0x32, 0xda, 0x66, 0xe0, 0xff, 0x59, 0xb7, 0x8a, 0x5e, 0xb3, 0x94, 0x9c, 0x23, 0xb3, 0xfc, 0x1f, 0x18, 0xcc, 0xf6, 0x61, 0x67, 0x8b, 0xf1, 0xc1, 0x02, 0x20, 0x26, 0x4d, 0x8b, 0x7c, 0xaa, 0x52, 0x4c, 0xc0, 0x2e, 0x5f, 0xf6, 0x7e, 0x24, 0x82, 0xe5, 0xfb, 0xcb, 0xc7, 0x9b, 0x83, 0x0d, 0x19, 0x7e, 0x7a, 0x40, 0x37, 0x87, 0xdd, 0x1c, 0x93, 0x13, 0xc4, }; uint8_t x[] = { 0x1c, 0xcb, 0xe9, 0x1c, 0x07, 0x5f, 0xc7, 0xf4, 0xf0, 0x33, 0xbf, 0xa2, 0x48, 0xdb, 0x8f, 0xcc, 0xd3, 0x56, 0x5d, 0xe9, 0x4b, 0xbf, 0xb1, 0x2f, 0x3c, 0x59, 0xff, 0x46, 0xc2, 0x71, 0xbf, 0x83, }; uint8_t y[] = { 0xce, 0x40, 0x14, 0xc6, 0x88, 0x11, 0xf9, 0xa2, 0x1a, 0x1f, 0xdb, 0x2c, 0x0e, 0x61, 0x13, 0xe0, 0x6d, 0xb7, 0xca, 0x93, 0xb7, 0x40, 0x4e, 0x78, 0xdc, 0x7c, 0xcd, 0x5c, 0xa8, 0x9a, 0x4c, 0xa9, }; struct aws_byte_cursor pub_x = aws_byte_cursor_from_array(x, sizeof(x)); struct aws_byte_cursor pub_y = aws_byte_cursor_from_array(y, sizeof(y)); struct aws_ecc_key_pair *verifying_key = aws_ecc_key_pair_new_from_public_key(allocator, AWS_CAL_ECDSA_P256, &pub_x, &pub_y); ASSERT_NOT_NULL(verifying_key); uint8_t message[] = { 0x59, 0x05, 0x23, 0x88, 0x77, 0xc7, 0x74, 0x21, 0xf7, 0x3e, 0x43, 0xee, 0x3d, 0xa6, 0xf2, 0xd9, 0xe2, 0xcc, 0xad, 0x5f, 0xc9, 0x42, 0xdc, 0xec, 0x0c, 0xbd, 0x25, 0x48, 0x29, 0x35, 0xfa, 0xaf, 0x41, 0x69, 0x83, 0xfe, 0x16, 0x5b, 0x1a, 0x04, 0x5e, 0xe2, 0xbc, 0xd2, 0xe6, 0xdc, 0xa3, 0xbd, 0xf4, 0x6c, 0x43, 0x10, 0xa7, 0x46, 0x1f, 0x9a, 0x37, 0x96, 0x0c, 0xa6, 0x72, 0xd3, 0xfe, 0xb5, 0x47, 0x3e, 0x25, 0x36, 0x05, 0xfb, 0x1d, 0xdf, 0xd2, 0x80, 0x65, 0xb5, 0x3c, 0xb5, 0x85, 0x8a, 0x8a, 0xd2, 0x81, 0x75, 0xbf, 0x9b, 0xd3, 0x86, 0xa5, 0xe4, 0x71, 0xea, 0x7a, 0x65, 0xc1, 0x7c, 0xc9, 0x34, 0xa9, 0xd7, 0x91, 0xe9, 0x14, 0x91, 0xeb, 0x37, 0x54, 0xd0, 0x37, 0x99, 0x79, 0x0f, 0xe2, 0xd3, 0x08, 0xd1, 0x61, 0x46, 0xd5, 0xc9, 0xb0, 0xd0, 0xde, 0xbd, 0x97, 0xd7, 0x9c, 0xe8, }; struct aws_byte_cursor message_input = aws_byte_cursor_from_array(message, sizeof(message)); uint8_t hash[AWS_SHA256_LEN]; AWS_ZERO_ARRAY(hash); struct aws_byte_buf hash_value = aws_byte_buf_from_empty_array(hash, sizeof(hash)); aws_sha256_compute(allocator, &message_input, &hash_value, 0); struct aws_byte_cursor hash_cur = aws_byte_cursor_from_buf(&hash_value); struct aws_byte_buf signature_buf = aws_byte_buf_from_array(asn1_encoded_signature_raw, sizeof(asn1_encoded_signature_raw)); struct aws_byte_cursor signature_cur = aws_byte_cursor_from_buf(&signature_buf); ASSERT_SUCCESS(aws_ecc_key_pair_verify_signature(verifying_key, &hash_cur, &signature_cur)); aws_ecc_key_pair_release(verifying_key); aws_cal_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(ecdsa_test_signature_format, s_ecdsa_test_signature_format_fn) enum aws_ecc_key_check_flags { AWS_ECC_KCF_PUBLIC = 1, AWS_ECC_KCF_PRIVATE = 2, }; static int s_check_cursor_data(struct aws_byte_cursor *cursor) { ASSERT_TRUE(cursor->ptr != NULL && cursor->len > 0 && (*cursor->ptr == 0 || *cursor->ptr != 0)); return AWS_OP_SUCCESS; } /* * The assumption here is that if a key has been released then we zeroed key-related memory and so we should either * crash (referencing freed memory) or get back empty data. */ static int s_test_key_ref_counting(struct aws_ecc_key_pair *key_pair, enum aws_ecc_key_check_flags flags) { aws_ecc_key_pair_acquire(key_pair); aws_ecc_key_pair_release(key_pair); aws_ecc_key_pair_acquire(key_pair); aws_ecc_key_pair_acquire(key_pair); aws_ecc_key_pair_release(key_pair); aws_ecc_key_pair_release(key_pair); if (flags & AWS_ECC_KCF_PRIVATE) { struct aws_byte_cursor private_key_cursor; AWS_ZERO_STRUCT(private_key_cursor); aws_ecc_key_pair_get_private_key(key_pair, &private_key_cursor); ASSERT_SUCCESS(s_check_cursor_data(&private_key_cursor)); } if (flags & AWS_ECC_KCF_PUBLIC) { struct aws_byte_cursor pub_x; AWS_ZERO_STRUCT(pub_x); struct aws_byte_cursor pub_y; AWS_ZERO_STRUCT(pub_y); aws_ecc_key_pair_get_public_key(key_pair, &pub_x, &pub_y); ASSERT_SUCCESS(s_check_cursor_data(&pub_x)); ASSERT_SUCCESS(s_check_cursor_data(&pub_y)); } aws_ecc_key_pair_release(key_pair); return AWS_OP_SUCCESS; } static int s_ecc_key_pair_random_ref_count_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_cal_library_init(allocator); struct aws_ecc_key_pair *key_pair = aws_ecc_key_pair_new_generate_random(allocator, AWS_CAL_ECDSA_P256); ASSERT_NOT_NULL(key_pair); int result = s_test_key_ref_counting(key_pair, AWS_ECC_KCF_PUBLIC | AWS_ECC_KCF_PRIVATE); aws_cal_library_clean_up(); return result; } AWS_TEST_CASE(ecc_key_pair_random_ref_count_test, s_ecc_key_pair_random_ref_count_test) static int s_ecc_key_pair_public_ref_count_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_cal_library_init(allocator); uint8_t x[] = { 0x1c, 0xcb, 0xe9, 0x1c, 0x07, 0x5f, 0xc7, 0xf4, 0xf0, 0x33, 0xbf, 0xa2, 0x48, 0xdb, 0x8f, 0xcc, 0xd3, 0x56, 0x5d, 0xe9, 0x4b, 0xbf, 0xb1, 0x2f, 0x3c, 0x59, 0xff, 0x46, 0xc2, 0x71, 0xbf, 0x83, }; uint8_t y[] = { 0xce, 0x40, 0x14, 0xc6, 0x88, 0x11, 0xf9, 0xa2, 0x1a, 0x1f, 0xdb, 0x2c, 0x0e, 0x61, 0x13, 0xe0, 0x6d, 0xb7, 0xca, 0x93, 0xb7, 0x40, 0x4e, 0x78, 0xdc, 0x7c, 0xcd, 0x5c, 0xa8, 0x9a, 0x4c, 0xa9, }; struct aws_byte_cursor pub_x = aws_byte_cursor_from_array(x, sizeof(x)); struct aws_byte_cursor pub_y = aws_byte_cursor_from_array(y, sizeof(y)); struct aws_ecc_key_pair *key_pair = aws_ecc_key_pair_new_from_public_key(allocator, AWS_CAL_ECDSA_P256, &pub_x, &pub_y); ASSERT_NOT_NULL(key_pair); int result = s_test_key_ref_counting(key_pair, AWS_ECC_KCF_PUBLIC); aws_cal_library_clean_up(); return result; } AWS_TEST_CASE(ecc_key_pair_public_ref_count_test, s_ecc_key_pair_public_ref_count_test) static int s_ecc_key_pair_asn1_ref_count_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_cal_library_init(allocator); uint8_t asn1_encoded_full_key_raw[] = { 0x30, 0x77, 0x02, 0x01, 0x01, 0x04, 0x20, 0x99, 0x16, 0x2a, 0x5b, 0x4e, 0x63, 0x86, 0x4c, 0x5f, 0x8e, 0x37, 0xf7, 0x2b, 0xbd, 0x97, 0x1d, 0x5c, 0x68, 0x80, 0x18, 0xc3, 0x91, 0x0f, 0xb3, 0xc3, 0xf9, 0x3a, 0xc9, 0x7a, 0x4b, 0xa3, 0xf6, 0xa0, 0x0a, 0x06, 0x08, 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x03, 0x01, 0x07, 0xa1, 0x44, 0x03, 0x42, 0x00, 0x04, 0xec, 0x6c, 0xd7, 0x4b, 0xdc, 0x33, 0xc2, 0x56, 0x32, 0xad, 0x52, 0x56, 0xac, 0xf5, 0xf0, 0xe6, 0x28, 0x99, 0x84, 0x83, 0xaf, 0x73, 0x6f, 0xfe, 0xd7, 0x83, 0x3b, 0x42, 0x81, 0x5d, 0x2e, 0xe0, 0xdb, 0xf6, 0xac, 0xa4, 0xc6, 0x16, 0x7e, 0x3e, 0xe0, 0xff, 0x7b, 0x43, 0xe8, 0xa1, 0x36, 0x50, 0x92, 0x83, 0x06, 0x94, 0xb3, 0xd4, 0x93, 0x06, 0xde, 0x63, 0x8a, 0xa1, 0x1c, 0x3f, 0xb2, 0x57, 0x0a, }; struct aws_byte_cursor full_key_asn1 = aws_byte_cursor_from_array(asn1_encoded_full_key_raw, sizeof(asn1_encoded_full_key_raw)); struct aws_ecc_key_pair *key_pair = aws_ecc_key_pair_new_from_asn1(allocator, &full_key_asn1); ASSERT_NOT_NULL(key_pair); int result = s_test_key_ref_counting(key_pair, AWS_ECC_KCF_PUBLIC | AWS_ECC_KCF_PRIVATE); aws_cal_library_clean_up(); return result; } AWS_TEST_CASE(ecc_key_pair_asn1_ref_count_test, s_ecc_key_pair_asn1_ref_count_test) static int s_ecc_key_pair_private_ref_count_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_cal_library_init(allocator); uint8_t d[] = { 0xc9, 0x80, 0x68, 0x98, 0xa0, 0x33, 0x49, 0x16, 0xc8, 0x60, 0x74, 0x88, 0x80, 0xa5, 0x41, 0xf0, 0x93, 0xb5, 0x79, 0xa9, 0xb1, 0xf3, 0x29, 0x34, 0xd8, 0x6c, 0x36, 0x3c, 0x39, 0x80, 0x03, 0x57, }; struct aws_byte_cursor private_key_cursor = aws_byte_cursor_from_array(d, sizeof(d)); struct aws_ecc_key_pair *key_pair = aws_ecc_key_pair_new_from_private_key(allocator, AWS_CAL_ECDSA_P256, &private_key_cursor); ASSERT_NOT_NULL(key_pair); int result = s_test_key_ref_counting(key_pair, AWS_ECC_KCF_PRIVATE); aws_cal_library_clean_up(); return result; } AWS_TEST_CASE(ecc_key_pair_private_ref_count_test, s_ecc_key_pair_private_ref_count_test) /* Message, signature, and key values for a correct signature that contains a coordinate that is < 32 bytes long in the der encoding. This was an issue on windows where we have to unpack the coordinates and pass them to BCrypt and weren't padding them with leading zeros. */ AWS_STATIC_STRING_FROM_LITERAL(s_hex_message, "a8ddb188e516d365ba275c2b6d55ead851e89ab66f162adf29614f37cd3403c9"); AWS_STATIC_STRING_FROM_LITERAL( s_signature_value, "3044021f7cfd51af2b722f8d1fa1afb65b4d5486ed59a67bcf9f3acc62aad6ddd37db10221009d4c9f9a37104fc01a8daffc9a6bd1056b7b43" "c1196edde0b52878b759628f8c"); AWS_STATIC_STRING_FROM_LITERAL(s_pub_x, "b6618f6a65740a99e650b33b6b4b5bd0d43b176d721a3edfea7e7d2d56d936b1"); AWS_STATIC_STRING_FROM_LITERAL(s_pub_y, "865ed22a7eadc9c5cb9d2cbaca1b3699139fedc5043dc6661864218330c8e518"); static int s_validate_message_signature( struct aws_allocator *allocator, struct aws_ecc_key_pair *ecc_key, struct aws_byte_cursor hex_message_cursor, struct aws_byte_cursor signature_value_cursor) { size_t binary_length = 0; if (aws_hex_compute_decoded_len(signature_value_cursor.len, &binary_length)) { return AWS_OP_ERR; } int result = AWS_OP_ERR; struct aws_byte_buf binary_signature; AWS_ZERO_STRUCT(binary_signature); struct aws_byte_buf message_buffer; AWS_ZERO_STRUCT(message_buffer); if (aws_byte_buf_init(&binary_signature, allocator, binary_length) || aws_byte_buf_init(&message_buffer, allocator, AWS_SHA256_LEN)) { goto done; } if (aws_hex_decode(&signature_value_cursor, &binary_signature)) { goto done; } if (aws_hex_decode(&hex_message_cursor, &message_buffer)) { goto done; } struct aws_byte_cursor binary_signature_cursor = aws_byte_cursor_from_array(binary_signature.buffer, binary_signature.len); struct aws_byte_cursor digest_cursor = aws_byte_cursor_from_buf(&message_buffer); if (aws_ecc_key_pair_verify_signature(ecc_key, &digest_cursor, &binary_signature_cursor)) { goto done; } result = AWS_OP_SUCCESS; done: aws_byte_buf_clean_up(&binary_signature); aws_byte_buf_clean_up(&message_buffer); return result; } static int s_ecdsa_p256_test_small_coordinate_verification(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_ecc_key_pair *key = aws_ecc_key_new_from_hex_coordinates( allocator, AWS_CAL_ECDSA_P256, aws_byte_cursor_from_string(s_pub_x), aws_byte_cursor_from_string(s_pub_y)); ASSERT_SUCCESS(s_validate_message_signature( allocator, key, aws_byte_cursor_from_string(s_hex_message), aws_byte_cursor_from_string(s_signature_value))); aws_ecc_key_pair_release(key); return AWS_OP_SUCCESS; } AWS_TEST_CASE(ecdsa_p256_test_small_coordinate_verification, s_ecdsa_p256_test_small_coordinate_verification); #ifdef AWS_OS_APPLE static int s_test_key_gen_from_private_fuzz( struct aws_allocator *allocator, enum aws_ecc_curve_name curve_name, size_t number_loop) { uint8_t message[] = { 0x59, 0x05, 0x23, 0x88, 0x77, 0xc7, 0x74, 0x21, 0xf7, 0x3e, 0x43, 0xee, 0x3d, 0xa6, 0xf2, 0xd9, 0xe2, 0xcc, 0xad, 0x5f, 0xc9, 0x42, 0xdc, 0xec, 0x0c, 0xbd, 0x25, 0x48, 0x29, 0x35, 0xfa, 0xaf, 0x41, 0x69, 0x83, 0xfe, 0x16, 0x5b, 0x1a, 0x04, 0x5e, 0xe2, 0xbc, 0xd2, 0xe6, 0xdc, 0xa3, 0xbd, 0xf4, 0x6c, 0x43, 0x10, 0xa7, 0x46, 0x1f, 0x9a, 0x37, 0x96, 0x0c, 0xa6, 0x72, 0xd3, 0xfe, 0xb5, 0x47, 0x3e, 0x25, 0x36, 0x05, 0xfb, 0x1d, 0xdf, 0xd2, 0x80, 0x65, 0xb5, 0x3c, 0xb5, 0x85, 0x8a, 0x8a, 0xd2, 0x81, 0x75, 0xbf, 0x9b, 0xd3, 0x86, 0xa5, 0xe4, 0x71, 0xea, 0x7a, 0x65, 0xc1, 0x7c, 0xc9, 0x34, 0xa9, 0xd7, 0x91, 0xe9, 0x14, 0x91, 0xeb, 0x37, 0x54, 0xd0, 0x37, 0x99, 0x79, 0x0f, 0xe2, 0xd3, 0x08, 0xd1, 0x61, 0x46, 0xd5, 0xc9, 0xb0, 0xd0, 0xde, 0xbd, 0x97, 0xd7, 0x9c, 0xe8, }; struct aws_byte_cursor message_input = aws_byte_cursor_from_array(message, sizeof(message)); uint8_t hash[AWS_SHA256_LEN]; AWS_ZERO_ARRAY(hash); struct aws_byte_buf hash_value = aws_byte_buf_from_empty_array(hash, sizeof(hash)); struct aws_byte_cursor hash_cur = aws_byte_cursor_from_buf(&hash_value); aws_sha256_compute(allocator, &message_input, &hash_value, 0); for (size_t i = 0; i < number_loop; i++) { struct aws_ecc_key_pair *key_pair = aws_ecc_key_pair_new_generate_random(allocator, curve_name); struct aws_byte_cursor priv_d; aws_ecc_key_pair_get_private_key(key_pair, &priv_d); ASSERT_TRUE(priv_d.len > 0); struct aws_ecc_key_pair *key_pair_private = aws_ecc_key_pair_new_from_private_key(allocator, curve_name, &priv_d); ASSERT_NOT_NULL(key_pair_private); struct aws_byte_cursor pub_x; struct aws_byte_cursor pub_y; aws_ecc_key_pair_get_public_key(key_pair_private, &pub_x, &pub_y); ASSERT_UINT_EQUALS(0, pub_x.len); ASSERT_UINT_EQUALS(0, pub_y.len); size_t signature_size = aws_ecc_key_pair_signature_length(key_pair_private); struct aws_byte_buf signature_buf; AWS_ZERO_STRUCT(signature_buf); aws_byte_buf_init(&signature_buf, allocator, signature_size); /* Use key from private to sign */ ASSERT_SUCCESS(aws_ecc_key_pair_sign_message(key_pair_private, &hash_cur, &signature_buf)); struct aws_byte_cursor signature_cur = aws_byte_cursor_from_buf(&signature_buf); ASSERT_SUCCESS(aws_ecc_key_pair_verify_signature(key_pair, &hash_cur, &signature_cur)); aws_ecc_key_pair_release(key_pair); aws_ecc_key_pair_release(key_pair_private); aws_byte_buf_clean_up(&signature_buf); } aws_byte_buf_clean_up(&hash_value); return AWS_OP_SUCCESS; } static int s_ecc_key_gen_from_private_fuzz_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_cal_library_init(allocator); ASSERT_SUCCESS(s_test_key_gen_from_private_fuzz(allocator, AWS_CAL_ECDSA_P256, 1000)); ASSERT_SUCCESS(s_test_key_gen_from_private_fuzz(allocator, AWS_CAL_ECDSA_P384, 1000)); aws_cal_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(ecc_key_gen_from_private_fuzz_test, s_ecc_key_gen_from_private_fuzz_test) #else static int s_ecc_key_gen_from_private_fuzz_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; (void)allocator; return AWS_OP_SUCCESS; } AWS_TEST_CASE(ecc_key_gen_from_private_fuzz_test, s_ecc_key_gen_from_private_fuzz_test) #endif aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/tests/md5_test.c000066400000000000000000000273341456575232400230320ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include /* * these are the rfc1321 test vectors */ static int s_md5_rfc1321_test_case_1_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor input = aws_byte_cursor_from_c_str(""); uint8_t expected[] = { 0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04, 0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e, }; struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); return s_verify_hash_test_case(allocator, &input, &expected_buf, aws_md5_new); } AWS_TEST_CASE(md5_rfc1321_test_case_1, s_md5_rfc1321_test_case_1_fn) static int s_md5_rfc1321_test_case_2_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor input = aws_byte_cursor_from_c_str("a"); uint8_t expected[] = { 0x0c, 0xc1, 0x75, 0xb9, 0xc0, 0xf1, 0xb6, 0xa8, 0x31, 0xc3, 0x99, 0xe2, 0x69, 0x77, 0x26, 0x61, }; struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); return s_verify_hash_test_case(allocator, &input, &expected_buf, aws_md5_new); } AWS_TEST_CASE(md5_rfc1321_test_case_2, s_md5_rfc1321_test_case_2_fn) static int s_md5_rfc1321_test_case_3_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor input = aws_byte_cursor_from_c_str("abc"); uint8_t expected[] = { 0x90, 0x01, 0x50, 0x98, 0x3c, 0xd2, 0x4f, 0xb0, 0xd6, 0x96, 0x3f, 0x7d, 0x28, 0xe1, 0x7f, 0x72, }; struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); return s_verify_hash_test_case(allocator, &input, &expected_buf, aws_md5_new); } AWS_TEST_CASE(md5_rfc1321_test_case_3, s_md5_rfc1321_test_case_3_fn) static int s_md5_rfc1321_test_case_4_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor input = aws_byte_cursor_from_c_str("message digest"); uint8_t expected[] = { 0xf9, 0x6b, 0x69, 0x7d, 0x7c, 0xb7, 0x93, 0x8d, 0x52, 0x5a, 0x2f, 0x31, 0xaa, 0xf1, 0x61, 0xd0, }; struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); return s_verify_hash_test_case(allocator, &input, &expected_buf, aws_md5_new); } AWS_TEST_CASE(md5_rfc1321_test_case_4, s_md5_rfc1321_test_case_4_fn) static int s_md5_rfc1321_test_case_5_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor input = aws_byte_cursor_from_c_str("abcdefghijklmnopqrstuvwxyz"); uint8_t expected[] = { 0xc3, 0xfc, 0xd3, 0xd7, 0x61, 0x92, 0xe4, 0x00, 0x7d, 0xfb, 0x49, 0x6c, 0xca, 0x67, 0xe1, 0x3b, }; struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); return s_verify_hash_test_case(allocator, &input, &expected_buf, aws_md5_new); } AWS_TEST_CASE(md5_rfc1321_test_case_5, s_md5_rfc1321_test_case_5_fn) static int s_md5_rfc1321_test_case_6_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor input = aws_byte_cursor_from_c_str("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"); uint8_t expected[] = { 0xd1, 0x74, 0xab, 0x98, 0xd2, 0x77, 0xd9, 0xf5, 0xa5, 0x61, 0x1c, 0x2c, 0x9f, 0x41, 0x9d, 0x9f, }; struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); return s_verify_hash_test_case(allocator, &input, &expected_buf, aws_md5_new); } AWS_TEST_CASE(md5_rfc1321_test_case_6, s_md5_rfc1321_test_case_6_fn) static int s_md5_rfc1321_test_case_7_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor input = aws_byte_cursor_from_c_str("123456789012345678901234567890123456789012345" "67890123456789012345678901234567890"); uint8_t expected[] = { 0x57, 0xed, 0xf4, 0xa2, 0x2b, 0xe3, 0xc9, 0x55, 0xac, 0x49, 0xda, 0x2e, 0x21, 0x07, 0xb6, 0x7a, }; struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); return s_verify_hash_test_case(allocator, &input, &expected_buf, aws_md5_new); } AWS_TEST_CASE(md5_rfc1321_test_case_7, s_md5_rfc1321_test_case_7_fn) static int s_md5_rfc1321_test_case_7_truncated_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor input = aws_byte_cursor_from_c_str("123456789012345678901234567890123456789012345" "67890123456789012345678901234567890"); uint8_t expected[] = { 0x57, 0xed, 0xf4, 0xa2, 0x2b, 0xe3, 0xc9, 0x55, }; struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); return s_verify_hash_test_case(allocator, &input, &expected_buf, aws_md5_new); } AWS_TEST_CASE(md5_rfc1321_test_case_7_truncated, s_md5_rfc1321_test_case_7_truncated_fn) static int s_md5_verify_known_collision_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_cal_library_init(allocator); uint8_t message_1[] = { 0xd1, 0x31, 0xdd, 0x02, 0xc5, 0xe6, 0xee, 0xc4, 0x69, 0x3d, 0x9a, 0x06, 0x98, 0xaf, 0xf9, 0x5c, 0x2f, 0xca, 0xb5, 0x87, 0x12, 0x46, 0x7e, 0xab, 0x40, 0x04, 0x58, 0x3e, 0xb8, 0xfb, 0x7f, 0x89, 0x55, 0xad, 0x34, 0x06, 0x09, 0xf4, 0xb3, 0x02, 0x83, 0xe4, 0x88, 0x83, 0x25, 0x71, 0x41, 0x5a, 0x08, 0x51, 0x25, 0xe8, 0xf7, 0xcd, 0xc9, 0x9f, 0xd9, 0x1d, 0xbd, 0xf2, 0x80, 0x37, 0x3c, 0x5b, 0xd8, 0x82, 0x3e, 0x31, 0x56, 0x34, 0x8f, 0x5b, 0xae, 0x6d, 0xac, 0xd4, 0x36, 0xc9, 0x19, 0xc6, 0xdd, 0x53, 0xe2, 0xb4, 0x87, 0xda, 0x03, 0xfd, 0x02, 0x39, 0x63, 0x06, 0xd2, 0x48, 0xcd, 0xa0, 0xe9, 0x9f, 0x33, 0x42, 0x0f, 0x57, 0x7e, 0xe8, 0xce, 0x54, 0xb6, 0x70, 0x80, 0xa8, 0x0d, 0x1e, 0xc6, 0x98, 0x21, 0xbc, 0xb6, 0xa8, 0x83, 0x93, 0x96, 0xf9, 0x65, 0x2b, 0x6f, 0xf7, 0x2a, 0x70, }; uint8_t message_2[] = { 0xd1, 0x31, 0xdd, 0x02, 0xc5, 0xe6, 0xee, 0xc4, 0x69, 0x3d, 0x9a, 0x06, 0x98, 0xaf, 0xf9, 0x5c, 0x2f, 0xca, 0xb5, 0x07, 0x12, 0x46, 0x7e, 0xab, 0x40, 0x04, 0x58, 0x3e, 0xb8, 0xfb, 0x7f, 0x89, 0x55, 0xad, 0x34, 0x06, 0x09, 0xf4, 0xb3, 0x02, 0x83, 0xe4, 0x88, 0x83, 0x25, 0xf1, 0x41, 0x5a, 0x08, 0x51, 0x25, 0xe8, 0xf7, 0xcd, 0xc9, 0x9f, 0xd9, 0x1d, 0xbd, 0x72, 0x80, 0x37, 0x3c, 0x5b, 0xd8, 0x82, 0x3e, 0x31, 0x56, 0x34, 0x8f, 0x5b, 0xae, 0x6d, 0xac, 0xd4, 0x36, 0xc9, 0x19, 0xc6, 0xdd, 0x53, 0xe2, 0x34, 0x87, 0xda, 0x03, 0xfd, 0x02, 0x39, 0x63, 0x06, 0xd2, 0x48, 0xcd, 0xa0, 0xe9, 0x9f, 0x33, 0x42, 0x0f, 0x57, 0x7e, 0xe8, 0xce, 0x54, 0xb6, 0x70, 0x80, 0x28, 0x0d, 0x1e, 0xc6, 0x98, 0x21, 0xbc, 0xb6, 0xa8, 0x83, 0x93, 0x96, 0xf9, 0x65, 0xab, 0x6f, 0xf7, 0x2a, 0x70, }; uint8_t collision_result[] = { 0x79, 0x05, 0x40, 0x25, 0x25, 0x5f, 0xb1, 0xa2, 0x6e, 0x4b, 0xc4, 0x22, 0xae, 0xf5, 0x4e, 0xb4, }; uint8_t output1[AWS_MD5_LEN] = {0}; struct aws_byte_buf output1_buf = aws_byte_buf_from_array(output1, sizeof(output1)); output1_buf.len = 0; struct aws_byte_cursor message_1_buf = aws_byte_cursor_from_array(message_1, sizeof(message_1)); ASSERT_SUCCESS(aws_md5_compute(allocator, &message_1_buf, &output1_buf, 0)); ASSERT_BIN_ARRAYS_EQUALS(collision_result, sizeof(collision_result), output1, sizeof(output1)); uint8_t output2[AWS_MD5_LEN] = {0}; struct aws_byte_buf output2_buf = aws_byte_buf_from_array(output2, sizeof(output2)); output2_buf.len = 0; struct aws_byte_cursor message_2_buf = aws_byte_cursor_from_array(message_2, sizeof(message_2)); ASSERT_SUCCESS(aws_md5_compute(allocator, &message_2_buf, &output2_buf, 0)); ASSERT_BIN_ARRAYS_EQUALS(collision_result, sizeof(collision_result), output2, sizeof(output2)); aws_cal_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(md5_verify_known_collision, s_md5_verify_known_collision_fn) static int s_md5_invalid_buffer_size_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_cal_library_init(allocator); struct aws_byte_cursor input = aws_byte_cursor_from_c_str("123456789012345678901234567890123456789012345" "67890123456789012345678901234567890"); uint8_t output[AWS_MD5_LEN] = {0}; struct aws_byte_buf output_buf = aws_byte_buf_from_array(output, sizeof(output)); output_buf.len = 1; ASSERT_ERROR(AWS_ERROR_SHORT_BUFFER, aws_md5_compute(allocator, &input, &output_buf, 0)); aws_cal_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(md5_invalid_buffer_size, s_md5_invalid_buffer_size_fn) static int s_md5_test_invalid_state_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_cal_library_init(allocator); struct aws_byte_cursor input = aws_byte_cursor_from_c_str("123456789012345678901234567890123456789012345" "67890123456789012345678901234567890"); struct aws_hash *hash = aws_md5_new(allocator); ASSERT_NOT_NULL(hash); uint8_t output[AWS_MD5_LEN] = {0}; struct aws_byte_buf output_buf = aws_byte_buf_from_array(output, sizeof(output)); output_buf.len = 0; ASSERT_SUCCESS(aws_hash_update(hash, &input)); ASSERT_SUCCESS(aws_hash_finalize(hash, &output_buf, 0)); ASSERT_ERROR(AWS_ERROR_INVALID_STATE, aws_hash_update(hash, &input)); ASSERT_ERROR(AWS_ERROR_INVALID_STATE, aws_hash_finalize(hash, &output_buf, 0)); aws_hash_destroy(hash); aws_cal_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(md5_test_invalid_state, s_md5_test_invalid_state_fn) static int s_md5_test_extra_buffer_space_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_cal_library_init(allocator); struct aws_byte_cursor input = aws_byte_cursor_from_c_str("123456789012345678901234567890123456789012345" "67890123456789012345678901234567890"); struct aws_byte_buf digest_size_buf; struct aws_byte_buf super_size_buf; aws_byte_buf_init(&digest_size_buf, allocator, AWS_MD5_LEN); aws_byte_buf_init(&super_size_buf, allocator, AWS_MD5_LEN + 100); aws_md5_compute(allocator, &input, &digest_size_buf, 0); aws_md5_compute(allocator, &input, &super_size_buf, 0); ASSERT_TRUE(aws_byte_buf_eq(&digest_size_buf, &super_size_buf)); ASSERT_TRUE(super_size_buf.len == AWS_MD5_LEN); aws_byte_buf_clean_up(&digest_size_buf); aws_byte_buf_clean_up(&super_size_buf); aws_cal_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(md5_test_extra_buffer_space, s_md5_test_extra_buffer_space_fn) aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/tests/rsa_test.c000066400000000000000000001122371456575232400231270ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include /* * TODO: Need better test vectors. NIST ones are a pain to use. * For now using manually generated vectors and relying on round tripping. */ static int s_byte_buf_decoded_from_base64_cur( struct aws_allocator *allocator, struct aws_byte_cursor cur, struct aws_byte_buf *out) { size_t decoded_length = 0; ASSERT_SUCCESS(aws_base64_compute_decoded_len(&cur, &decoded_length)); ASSERT_SUCCESS(aws_byte_buf_init(out, allocator, decoded_length)); ASSERT_SUCCESS(aws_base64_decode(&cur, out)); return AWS_OP_SUCCESS; } static const char *TEST_ENCRYPTION_STRING = "The quick brown fox jumps over the lazy dog."; static int s_rsa_encryption_roundtrip_helper( struct aws_allocator *allocator, struct aws_rsa_key_pair *key_pair, enum aws_rsa_encryption_algorithm algo) { struct aws_byte_cursor plaintext_cur = aws_byte_cursor_from_c_str(TEST_ENCRYPTION_STRING); /*short buffer should fail*/ struct aws_byte_buf ciphertext_short; ASSERT_SUCCESS(aws_byte_buf_init(&ciphertext_short, allocator, 5)); ASSERT_ERROR(AWS_ERROR_SHORT_BUFFER, aws_rsa_key_pair_encrypt(key_pair, algo, plaintext_cur, &ciphertext_short)); /*make sure not to clobber anything in existing buffer*/ struct aws_byte_cursor prefix = aws_byte_cursor_from_c_str("random_prefix"); struct aws_byte_buf ciphertext; ASSERT_SUCCESS(aws_byte_buf_init(&ciphertext, allocator, prefix.len + aws_rsa_key_pair_block_length(key_pair))); ASSERT_SUCCESS(aws_byte_buf_append(&ciphertext, &prefix)); ASSERT_SUCCESS(aws_rsa_key_pair_encrypt(key_pair, algo, plaintext_cur, &ciphertext)); struct aws_byte_cursor ciphertext_cur = aws_byte_cursor_from_buf(&ciphertext); ASSERT_TRUE(aws_byte_cursor_starts_with(&ciphertext_cur, &prefix)); aws_byte_cursor_advance(&ciphertext_cur, prefix.len); struct aws_byte_buf decrypted_short; ASSERT_SUCCESS(aws_byte_buf_init(&decrypted_short, allocator, 5)); ASSERT_ERROR(AWS_ERROR_SHORT_BUFFER, aws_rsa_key_pair_decrypt(key_pair, algo, ciphertext_cur, &decrypted_short)); struct aws_byte_buf decrypted; ASSERT_SUCCESS(aws_byte_buf_init(&decrypted, allocator, prefix.len + aws_rsa_key_pair_block_length(key_pair))); ASSERT_SUCCESS(aws_byte_buf_append(&decrypted, &prefix)); ASSERT_SUCCESS(aws_rsa_key_pair_decrypt(key_pair, algo, ciphertext_cur, &decrypted)); struct aws_byte_cursor decrypted_cur = aws_byte_cursor_from_buf(&decrypted); ASSERT_TRUE(aws_byte_cursor_starts_with(&decrypted_cur, &prefix)); aws_byte_cursor_advance(&decrypted_cur, prefix.len); ASSERT_CURSOR_VALUE_CSTRING_EQUALS(decrypted_cur, TEST_ENCRYPTION_STRING); aws_byte_buf_clean_up_secure(&ciphertext_short); aws_byte_buf_clean_up_secure(&decrypted_short); aws_byte_buf_clean_up_secure(&ciphertext); aws_byte_buf_clean_up_secure(&decrypted); return AWS_OP_SUCCESS; } static const char *TEST_PKCS1_RSA_PRIVATE_KEY_2048 = "MIIEpQIBAAKCAQEAnt/K3mvMgPQcplTgDHrItQPvXiYpUFMt5nIJbZV820Zj4n4G" "7iB3Y9h5HzfBYga2Olr8Irv3OuKkIH0ydrdz2oBZuf7SOBQVpro3m1+oMKhcCtrI" "GYA2MDOowaVkx6ho8pQ6K2d76pYj7GWfo0fm2p1O2jcw3JWXAPqq8dmTCMRxOw/2" "1eB/6bto8vayljXy85WiCPm7WTZ2mhB9tvkSRijDVF+SEILdkVPPUT1eqox+me2Y" "SM2qaXVtToscqoicOqXD8XrWFuyqeLe29CiZAA9xqmit9o/ckdNXTjiGp6cIx2qC" "Svbkxwi7OK0BB4y1LOTVz021jbJRr9b+ZbP0zwIDAQABAoIBAQCU5+ort9uwDZyA" "pVJtP/O3/V0v4BKez6dYsw91H0Qr/PiHg1mZfOKJuY4knUxqRSIs5bQmFgitr1jn" "fpB6xo0WgXAXrOd5WhHE+ApAXVK1cEb8gXxEsm+XlAOapBsmKwlaO2Wd4ts4zsoS" "ulj6X9zWj9QlIM9yH96tM0Rfc26lKoRx+jkEml24nOia6gBhnfups/Kq/sUxtnX7" "qQTuCmIuwdDMWTnW/AYlX6+wsSRgl7iUhnoOpbl18AzaIJbgcw49yE0xp2XVRWew" "VR86EsF7pR4hxpORgysiDmyQLLfcz85eiub3tE/A4uHUzxd37e1OwqItvrG4s/4y" "YNiqEbz5AoGBAOSs57pjCbh92UPMeMKhPhGSdlxQ/GkjHIDpUuy5oU2ZB2akKg5Y" "Asl51tibKsTuDR5qNtUJGEw8cMVR3A+t7p4KE7eCzRmZj/bNDBSdxnTec83Y5KcZ" "Pqi4DktHju7mArlIhmnphqOrXvDuJoIjMGFMVNACk05loxPpg3WiCS+NAoGBALHb" "sbP6ftkL8M9vMMRrL/Jzlz3jS0smNiJxRmW2TrpF7h0o3QeW6uzDigd1pjg40bUl" "0/NilaWtlK4DRWQ+0FYuxrDQd3vHiZ38uoCJZVkSWxzSytFSaM0rpU2l28aWIwL0" "ZuIk3k3l2gQBqX2VUrMFxq2MJF5ZN/OgOrUIqEDLAoGBAL7JG1TASF3qcZhFQgNw" "L67NeX6v+sdlCeTrxcnHXjK1mB0kngn1l+2sf3mci+RdkAhuKW0391OzoYqfL3DN" "dqXYVnbm5GOVYS1SCeAxemALMKbvbGWVhFeTqClafIAI2wDm52357eEjm0R8DRjK" "bxTecGxTmb7wwUxdqNY96FgJAoGAHtLyYzzAiyE0pN6iVwg0kRJTXdhsjiObMjDr" "gGkuD75a3Bbe55fSMyJYY56SJiBCx+A8cWvef44rvFS4y/zO4oDM0ovuiTc1tHm+" "YNRvChbST5aAq/JaU2SDC4f5JNuUScjNo9e750g0lokrNKaSZJBVtHIbQ3a26bQV" "OJa9gi0CgYEAlhk8ThPcokpqIuweY4cx34zdU1EAm/CBO1ax1Ogs/HilPFyGYXzz" "tZI9TCH4Sq33MGEjf2MyW0XMXC56dA2VOPSTHGKaoKmyn7L9G4WfDFcYmCdvmLkR" "7wAz2Dyxr6ImChSWD/y2ddz1U+H39uqRxwIkwJ7TbDflYNXgsAOOlUg="; static int s_rsa_encryption_roundtrip_from_user( struct aws_allocator *allocator, enum aws_rsa_encryption_algorithm algo) { struct aws_byte_buf key_buf; ASSERT_SUCCESS(s_byte_buf_decoded_from_base64_cur( allocator, aws_byte_cursor_from_c_str(TEST_PKCS1_RSA_PRIVATE_KEY_2048), &key_buf)); struct aws_rsa_key_pair *key_pair = aws_rsa_key_pair_new_from_private_key_pkcs1(allocator, aws_byte_cursor_from_buf(&key_buf)); ASSERT_NOT_NULL(key_pair); s_rsa_encryption_roundtrip_helper(allocator, key_pair, algo); aws_rsa_key_pair_release(key_pair); aws_byte_buf_clean_up_secure(&key_buf); return AWS_OP_SUCCESS; } static int s_rsa_encryption_roundtrip_pkcs1_from_user(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_cal_library_init(allocator); ASSERT_SUCCESS(s_rsa_encryption_roundtrip_from_user(allocator, AWS_CAL_RSA_ENCRYPTION_PKCS1_5)); aws_cal_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(rsa_encryption_roundtrip_pkcs1_from_user, s_rsa_encryption_roundtrip_pkcs1_from_user); static int s_rsa_encryption_roundtrip_oaep_sha256_from_user(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_cal_library_init(allocator); ASSERT_SUCCESS(s_rsa_encryption_roundtrip_from_user(allocator, AWS_CAL_RSA_ENCRYPTION_OAEP_SHA256)); aws_cal_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(rsa_encryption_roundtrip_oaep_sha256_from_user, s_rsa_encryption_roundtrip_oaep_sha256_from_user); static int s_rsa_encryption_roundtrip_oaep_sha512_from_user(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_cal_library_init(allocator); ASSERT_SUCCESS(s_rsa_encryption_roundtrip_from_user(allocator, AWS_CAL_RSA_ENCRYPTION_OAEP_SHA512)); aws_cal_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(rsa_encryption_roundtrip_oaep_sha512_from_user, s_rsa_encryption_roundtrip_oaep_sha512_from_user); static const char *TEST_PKCS1_RSA_PRIVATE_KEY_1024 = "MIICXAIBAAKBgQCVGG9c6uBIpv4wQMB4PJpkaUjEqa6TW5v8ebStMCnICWfpUubs" "f6nr5nPxPsG2rw+HZSLJLAYVYfXOM9D+KyhTdpIJ7z4NXzXzem25x6H/N9WnRGjB" "6qAOLg5Srm3uoXfulN5HVXVtncTFcJJQxgBOpT8qXLycm5k8uhm0OWP6cwIDAQAB" "AoGAT0IRGU0G87hKUi5p4sEctho+A8XMNyuw7XNpd9OtslhFtARNHBX1p6D3q5xX" "8Bx53dkGt/i+Nym/OOHUzPj2Uy+qprBFYK71JavQyg80h8deUsQzrKTrM45pU+U7" "uGgEg24Mw4hQ53ky6HUJtRk/PG6osx8o4DMPU0EKSeoxqMECQQDVFC7qH3m+gHoE" "xnz+uIR1H78NQt3sa2cnZDa0ui/Ew+UASQlDDY/xqAGYa+QAhQBMZoWJLL6AaNrj" "FtxyKr+TAkEAsyDTfwJPFRTvqUNKKIsFHlNlzDclISHcIi00ST2bDKox7pS3aukE" "dkytVIerIKtBMds5gjYZybAAX0cC7DHloQJAbt5VmtRN0GWhF6L/nrn7kcW27vt/" "5WftAH4QSPEnscYL/Z4DB7Si1SaJzfk1ZV/Oy/H8QWfap43ndomKoozDqQJAX9lk" "0kVuA53cT/oNqHwbFQsTIZ8wYvY3UKJXpAku+ivn4/3312EwXgzRgrXFwAljLUZd" "E2vXiLCAwnrA+ZoJgQJBAI/P1XTqEAUro5aDD64JuwbvCpbAL8kkwGzf6wzrdF+f" "0CXKkTGUEG7BGWqCr9y9nBt9KuyN1VlNbziJp+UcKVc="; static const char *TEST_PKCS1_RSA_PUBLIC_KEY_1024 = "MIGJAoGBAJUYb1zq4Eim/jBAwHg8mmRpSMSprpNbm/x5tK0wKcgJZ+lS5ux/qevmc/" "E+wbavD4dlIsksBhVh9c4z0P4rKFN2kgnvPg1fNfN6bbnHof831adEaMHqoA4uDlKu" "be6hd+6U3kdVdW2dxMVwklDGAE6lPypcvJybmTy6GbQ5Y/pzAgMBAAE="; /* * pkcs1 signature generator using above private key and test encryption string. */ static const char *TEST_RSA_SIGNATURE_PKCS1 = "Gqu9pLlPvSFIW+5ZFo9ZCxMmPR8LnAeiuYir5CfNTyraF2VPksRnCKtS6i98nwPUqzlPr" "TYJ45P3c94lQIQD3SVJ3XMSAyAEWTE2pcj0F/oPzzxLcXK9cyv2Iphe4XuBjWCOVdHgFg" "rD/yAA8b+B94AqE9U/B2+k9/C3Bz2YApo="; static const char *TEST_RSA_SIGNATURE_PSS = "j//04sVoqQVSmUgH+Id0oad7OgW+hGnIqx6hjr28VnVk75Obig+n3tJGWd0r+3S4ARxf2fK" "7taVvJXISQ5aWJAYx6QRgR+25rcE96eOfi6L7ShIZIUYFzGxhc9wpUMGbqHEIhm+8QP7uNo4D" "FmaPzJMgGDKL2qhedxnjtg3p8E4="; static const char *TEST_RSA_ENCRYPTED_PKCS1 = "Ng97Q53hLqC0sCNMTG6poSxXeTLVWFQJS746y1VLnDD0/IYWk/gyzhNEF0M16loaBswNLnEgL" "OsTVHmBaglCiEobyWBYO16HO+hrJeXK76p1GfIQ+62hSwpnxx4abqS9N2rX59ahMNSnjXZmFiQn" "yPDbvp2UYwUydSu6ArOM/H8="; static const char *TEST_RSA_ENCRYPTED_OAEP256 = "YB9CDU8z+ViRSQRvE6z3i3mFMh1NFOgKuhcYGIhZu0wqTzVV4c6Rl+x9gMQiURkLG0q1/nAF" "upW5g1uo5wotJKb5GCGF8oYuMu7IemY45jBIZ3tXSz1XeZ8VHVCpBNGJBP//Pp461HI9qzaPA+mFu" "jBppHZTE0GLpbZeryHRgK4qPR4J+EzojiE2JrzCST8Y1xrCwvwS6QjboeorVSr8ssO8oC3HJ89klg" "uEq19eLTp0JP8WWnREJtGfbeIW6nGeu3KEjwnXD+A//Qk5fIxPFBV4+1kTDkLyO22ZOzCevXUAv9j" "97f1GRuJfS2W2KL/YXQudwX1xo5ULf1UIgpeqSQ=="; static const char *TEST_RSA_ENCRYPTED_OAEP512 = "Wx5SdwnG1Fc0rEIZZRibRL9iUt16NydVC4Mbok50UKWf7DnhWen4H+KZW9K6bAvXHKKZx1Sog4" "RAONa/rrPTWYipFgvNWEQmCHb0erEemjabx3QTu5HqJpbnU5HKAA2l7JGrV26AvyVpezJWHa3h" "2xWLnw5JWhqL49vaZeMwtEopr2Dz0+wsH9QZaedQmRcEwO1f2QRrVbnbYFB6wjo3VF1IY7k8Dk" "XiLg0m9Ivb0Gwx61gRTx0DKq3zr7CNm35E+c9ujYPdGtX0MjAJfXOHeuaspzsLVAI9gdvyZ3Ca/" "vdEkky9ESL7Bw4tLysuqlvc2tnVuk3LXuB3QElDC3JU+A=="; static int s_rsa_verify_signing_pkcs1_sha256(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor message = aws_byte_cursor_from_c_str(TEST_ENCRYPTION_STRING); aws_cal_library_init(allocator); struct aws_byte_buf public_key_buf; ASSERT_SUCCESS(s_byte_buf_decoded_from_base64_cur( allocator, aws_byte_cursor_from_c_str(TEST_PKCS1_RSA_PUBLIC_KEY_1024), &public_key_buf)); struct aws_rsa_key_pair *key_pair_public = aws_rsa_key_pair_new_from_public_key_pkcs1(allocator, aws_byte_cursor_from_buf(&public_key_buf)); ASSERT_NOT_NULL(key_pair_public); uint8_t hash[AWS_SHA256_LEN]; AWS_ZERO_ARRAY(hash); struct aws_byte_buf hash_value = aws_byte_buf_from_empty_array(hash, sizeof(hash)); aws_sha256_compute(allocator, &message, &hash_value, 0); struct aws_byte_cursor hash_cur = aws_byte_cursor_from_buf(&hash_value); struct aws_byte_buf signature_buf; ASSERT_SUCCESS(s_byte_buf_decoded_from_base64_cur( allocator, aws_byte_cursor_from_c_str(TEST_RSA_SIGNATURE_PKCS1), &signature_buf)); struct aws_byte_cursor signature_cur = aws_byte_cursor_from_buf(&signature_buf); ASSERT_SUCCESS(aws_rsa_key_pair_verify_signature( key_pair_public, AWS_CAL_RSA_SIGNATURE_PKCS1_5_SHA256, hash_cur, signature_cur)); aws_byte_buf_clean_up(&hash_value); aws_byte_buf_clean_up(&signature_buf); aws_byte_buf_clean_up(&public_key_buf); aws_rsa_key_pair_release(key_pair_public); aws_cal_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(rsa_verify_signing_pkcs1_sha256, s_rsa_verify_signing_pkcs1_sha256); static int s_rsa_verify_signing_pss_sha256(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor message = aws_byte_cursor_from_c_str(TEST_ENCRYPTION_STRING); aws_cal_library_init(allocator); struct aws_byte_buf public_key_buf; ASSERT_SUCCESS(s_byte_buf_decoded_from_base64_cur( allocator, aws_byte_cursor_from_c_str(TEST_PKCS1_RSA_PUBLIC_KEY_1024), &public_key_buf)); struct aws_rsa_key_pair *key_pair_public = aws_rsa_key_pair_new_from_public_key_pkcs1(allocator, aws_byte_cursor_from_buf(&public_key_buf)); ASSERT_NOT_NULL(key_pair_public); uint8_t hash[AWS_SHA256_LEN]; AWS_ZERO_ARRAY(hash); struct aws_byte_buf hash_value = aws_byte_buf_from_empty_array(hash, sizeof(hash)); aws_sha256_compute(allocator, &message, &hash_value, 0); struct aws_byte_cursor hash_cur = aws_byte_cursor_from_buf(&hash_value); struct aws_byte_buf signature_buf; ASSERT_SUCCESS(s_byte_buf_decoded_from_base64_cur( allocator, aws_byte_cursor_from_c_str(TEST_RSA_SIGNATURE_PSS), &signature_buf)); struct aws_byte_cursor signature_cur = aws_byte_cursor_from_buf(&signature_buf); ASSERT_SUCCESS( aws_rsa_key_pair_verify_signature(key_pair_public, AWS_CAL_RSA_SIGNATURE_PSS_SHA256, hash_cur, signature_cur)); aws_byte_buf_clean_up(&hash_value); aws_byte_buf_clean_up(&signature_buf); aws_byte_buf_clean_up(&public_key_buf); aws_rsa_key_pair_release(key_pair_public); aws_cal_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(rsa_verify_signing_pss_sha256, s_rsa_verify_signing_pss_sha256); static int s_rsa_decrypt_pkcs1(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_cal_library_init(allocator); struct aws_byte_buf private_key_buf; ASSERT_SUCCESS(s_byte_buf_decoded_from_base64_cur( allocator, aws_byte_cursor_from_c_str(TEST_PKCS1_RSA_PRIVATE_KEY_1024), &private_key_buf)); struct aws_rsa_key_pair *key_pair_private = aws_rsa_key_pair_new_from_private_key_pkcs1(allocator, aws_byte_cursor_from_buf(&private_key_buf)); ASSERT_NOT_NULL(key_pair_private); struct aws_byte_buf encrypted; ASSERT_SUCCESS(s_byte_buf_decoded_from_base64_cur( allocator, aws_byte_cursor_from_c_str(TEST_RSA_ENCRYPTED_PKCS1), &encrypted)); struct aws_byte_cursor encrypted_cur = aws_byte_cursor_from_buf(&encrypted); struct aws_byte_buf decrypted; aws_byte_buf_init(&decrypted, allocator, aws_rsa_key_pair_block_length(key_pair_private)); ASSERT_SUCCESS( aws_rsa_key_pair_decrypt(key_pair_private, AWS_CAL_RSA_ENCRYPTION_PKCS1_5, encrypted_cur, &decrypted)); struct aws_byte_cursor decrypted_cur = aws_byte_cursor_from_buf(&decrypted); ASSERT_CURSOR_VALUE_CSTRING_EQUALS(decrypted_cur, TEST_ENCRYPTION_STRING); aws_byte_buf_clean_up(&private_key_buf); aws_byte_buf_clean_up(&decrypted); aws_byte_buf_clean_up(&encrypted); aws_rsa_key_pair_release(key_pair_private); aws_cal_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(rsa_decrypt_pkcs1, s_rsa_decrypt_pkcs1); static int s_rsa_decrypt_oaep256(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_cal_library_init(allocator); struct aws_byte_buf private_key_buf; ASSERT_SUCCESS(s_byte_buf_decoded_from_base64_cur( allocator, aws_byte_cursor_from_c_str(TEST_PKCS1_RSA_PRIVATE_KEY_2048), &private_key_buf)); struct aws_rsa_key_pair *key_pair_private = aws_rsa_key_pair_new_from_private_key_pkcs1(allocator, aws_byte_cursor_from_buf(&private_key_buf)); ASSERT_NOT_NULL(key_pair_private); struct aws_byte_buf encrypted; ASSERT_SUCCESS(s_byte_buf_decoded_from_base64_cur( allocator, aws_byte_cursor_from_c_str(TEST_RSA_ENCRYPTED_OAEP256), &encrypted)); struct aws_byte_cursor encrypted_cur = aws_byte_cursor_from_buf(&encrypted); struct aws_byte_buf decrypted; ASSERT_SUCCESS(aws_byte_buf_init(&decrypted, allocator, aws_rsa_key_pair_block_length(key_pair_private))); ASSERT_SUCCESS( aws_rsa_key_pair_decrypt(key_pair_private, AWS_CAL_RSA_ENCRYPTION_OAEP_SHA256, encrypted_cur, &decrypted)); struct aws_byte_cursor decrypted_cur = aws_byte_cursor_from_buf(&decrypted); ASSERT_CURSOR_VALUE_CSTRING_EQUALS(decrypted_cur, TEST_ENCRYPTION_STRING); aws_byte_buf_clean_up(&private_key_buf); aws_byte_buf_clean_up(&decrypted); aws_byte_buf_clean_up(&encrypted); aws_rsa_key_pair_release(key_pair_private); aws_cal_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(rsa_decrypt_oaep256, s_rsa_decrypt_oaep256); static int s_rsa_decrypt_oaep512(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_cal_library_init(allocator); struct aws_byte_buf private_key_buf; ASSERT_SUCCESS(s_byte_buf_decoded_from_base64_cur( allocator, aws_byte_cursor_from_c_str(TEST_PKCS1_RSA_PRIVATE_KEY_2048), &private_key_buf)); struct aws_rsa_key_pair *key_pair_private = aws_rsa_key_pair_new_from_private_key_pkcs1(allocator, aws_byte_cursor_from_buf(&private_key_buf)); ASSERT_NOT_NULL(key_pair_private); struct aws_byte_buf encrypted; ASSERT_SUCCESS(s_byte_buf_decoded_from_base64_cur( allocator, aws_byte_cursor_from_c_str(TEST_RSA_ENCRYPTED_OAEP512), &encrypted)); struct aws_byte_cursor encrypted_cur = aws_byte_cursor_from_buf(&encrypted); struct aws_byte_buf decrypted; ASSERT_SUCCESS(aws_byte_buf_init(&decrypted, allocator, aws_rsa_key_pair_block_length(key_pair_private))); ASSERT_SUCCESS( aws_rsa_key_pair_decrypt(key_pair_private, AWS_CAL_RSA_ENCRYPTION_OAEP_SHA512, encrypted_cur, &decrypted)); struct aws_byte_cursor decrypted_cur = aws_byte_cursor_from_buf(&decrypted); ASSERT_CURSOR_VALUE_CSTRING_EQUALS(decrypted_cur, TEST_ENCRYPTION_STRING); aws_byte_buf_clean_up(&private_key_buf); aws_byte_buf_clean_up(&decrypted); aws_byte_buf_clean_up(&encrypted); aws_rsa_key_pair_release(key_pair_private); aws_cal_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(rsa_decrypt_oaep512, s_rsa_decrypt_oaep512); static int s_rsa_signing_roundtrip_helper( struct aws_allocator *allocator, struct aws_rsa_key_pair *key_pair_private, struct aws_rsa_key_pair *key_pair_public, enum aws_rsa_signature_algorithm algo, const char *expected_signature) { struct aws_byte_cursor message = aws_byte_cursor_from_c_str(TEST_ENCRYPTION_STRING); uint8_t hash[AWS_SHA256_LEN]; AWS_ZERO_ARRAY(hash); struct aws_byte_buf hash_value = aws_byte_buf_from_empty_array(hash, sizeof(hash)); aws_sha256_compute(allocator, &message, &hash_value, 0); struct aws_byte_cursor hash_cur = aws_byte_cursor_from_buf(&hash_value); /*since our apis work by appending to buffer, lets make sure they dont *clobber anything already in the buffer*/ struct aws_byte_cursor prefix = aws_byte_cursor_from_c_str("random_prefix"); struct aws_byte_buf signature; ASSERT_SUCCESS( aws_byte_buf_init(&signature, allocator, prefix.len + aws_rsa_key_pair_signature_length(key_pair_private))); ASSERT_SUCCESS(aws_byte_buf_append(&signature, &prefix)); ASSERT_SUCCESS(aws_rsa_key_pair_sign_message(key_pair_private, algo, hash_cur, &signature)); /*short buffer should fail*/ struct aws_byte_buf signature_short; ASSERT_SUCCESS(aws_byte_buf_init(&signature_short, allocator, 5)); ASSERT_ERROR( AWS_ERROR_SHORT_BUFFER, aws_rsa_key_pair_sign_message(key_pair_private, algo, hash_cur, &signature_short)); struct aws_byte_cursor signature_cur = aws_byte_cursor_from_buf(&signature); ASSERT_TRUE(aws_byte_cursor_starts_with(&signature_cur, &prefix)); aws_byte_cursor_advance(&signature_cur, prefix.len); if (expected_signature) { struct aws_byte_buf sig_b64_buf; size_t encoded_length = 0; ASSERT_SUCCESS(aws_base64_compute_encoded_len(signature.len, &encoded_length)); ASSERT_SUCCESS(aws_byte_buf_init(&sig_b64_buf, allocator, encoded_length)); ASSERT_SUCCESS(aws_base64_encode(&signature_cur, &sig_b64_buf)); struct aws_byte_cursor sig_b64_cur = aws_byte_cursor_from_buf(&sig_b64_buf); ASSERT_CURSOR_VALUE_CSTRING_EQUALS(sig_b64_cur, expected_signature); aws_byte_buf_clean_up_secure(&sig_b64_buf); } ASSERT_SUCCESS(aws_rsa_key_pair_verify_signature(key_pair_public, algo, hash_cur, signature_cur)); aws_byte_buf_clean_up_secure(&signature); aws_byte_buf_clean_up_secure(&signature_short); return AWS_OP_SUCCESS; } static int s_rsa_signing_roundtrip_from_user( struct aws_allocator *allocator, enum aws_rsa_signature_algorithm algo, const char *expected_signature) { struct aws_byte_buf private_key_buf; ASSERT_SUCCESS(s_byte_buf_decoded_from_base64_cur( allocator, aws_byte_cursor_from_c_str(TEST_PKCS1_RSA_PRIVATE_KEY_1024), &private_key_buf)); struct aws_rsa_key_pair *key_pair_private = aws_rsa_key_pair_new_from_private_key_pkcs1(allocator, aws_byte_cursor_from_buf(&private_key_buf)); ASSERT_NOT_NULL(key_pair_private); struct aws_byte_buf public_key_buf; ASSERT_SUCCESS(s_byte_buf_decoded_from_base64_cur( allocator, aws_byte_cursor_from_c_str(TEST_PKCS1_RSA_PUBLIC_KEY_1024), &public_key_buf)); struct aws_rsa_key_pair *key_pair_public = aws_rsa_key_pair_new_from_public_key_pkcs1(allocator, aws_byte_cursor_from_buf(&public_key_buf)); ASSERT_NOT_NULL(key_pair_public); s_rsa_signing_roundtrip_helper(allocator, key_pair_private, key_pair_public, algo, expected_signature); aws_rsa_key_pair_release(key_pair_private); aws_rsa_key_pair_release(key_pair_public); aws_byte_buf_clean_up_secure(&private_key_buf); aws_byte_buf_clean_up_secure(&public_key_buf); return AWS_OP_SUCCESS; } static int s_rsa_signing_roundtrip_pkcs1_sha256_from_user(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_cal_library_init(allocator); ASSERT_SUCCESS( s_rsa_signing_roundtrip_from_user(allocator, AWS_CAL_RSA_SIGNATURE_PKCS1_5_SHA256, TEST_RSA_SIGNATURE_PKCS1)); aws_cal_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(rsa_signing_roundtrip_pkcs1_sha256_from_user, s_rsa_signing_roundtrip_pkcs1_sha256_from_user); static int s_rsa_signing_roundtrip_pss_sha256_from_user(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_cal_library_init(allocator); #if defined(AWS_OS_MACOS) if (__builtin_available(macOS 10.12, *)) { ASSERT_SUCCESS(s_rsa_signing_roundtrip_from_user(allocator, AWS_CAL_RSA_SIGNATURE_PSS_SHA256, NULL)); } else { ASSERT_ERROR( AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM, s_rsa_signing_roundtrip_from_user(allocator, AWS_CAL_RSA_SIGNATURE_PSS_SHA256, NULL)); } #else ASSERT_SUCCESS(s_rsa_signing_roundtrip_from_user(allocator, AWS_CAL_RSA_SIGNATURE_PSS_SHA256, NULL)); #endif aws_cal_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(rsa_signing_roundtrip_pss_sha256_from_user, s_rsa_signing_roundtrip_pss_sha256_from_user); static int s_rsa_getters(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_cal_library_init(allocator); struct aws_byte_buf private_key_buf; ASSERT_SUCCESS(s_byte_buf_decoded_from_base64_cur( allocator, aws_byte_cursor_from_c_str(TEST_PKCS1_RSA_PRIVATE_KEY_1024), &private_key_buf)); struct aws_rsa_key_pair *key_pair_private = aws_rsa_key_pair_new_from_private_key_pkcs1(allocator, aws_byte_cursor_from_buf(&private_key_buf)); ASSERT_NOT_NULL(key_pair_private); ASSERT_INT_EQUALS(128, aws_rsa_key_pair_block_length(key_pair_private)); ASSERT_INT_EQUALS(128, aws_rsa_key_pair_signature_length(key_pair_private)); struct aws_byte_buf priv_key; ASSERT_SUCCESS(aws_rsa_key_pair_get_private_key(key_pair_private, AWS_CAL_RSA_KEY_EXPORT_PKCS1, &priv_key)); ASSERT_TRUE(priv_key.len > 0); struct aws_byte_buf public_key_buf; ASSERT_SUCCESS(s_byte_buf_decoded_from_base64_cur( allocator, aws_byte_cursor_from_c_str(TEST_PKCS1_RSA_PUBLIC_KEY_1024), &public_key_buf)); struct aws_rsa_key_pair *key_pair_public = aws_rsa_key_pair_new_from_public_key_pkcs1(allocator, aws_byte_cursor_from_buf(&public_key_buf)); ASSERT_INT_EQUALS(128, aws_rsa_key_pair_block_length(key_pair_public)); ASSERT_INT_EQUALS(128, aws_rsa_key_pair_signature_length(key_pair_public)); struct aws_byte_buf pub_key; ASSERT_SUCCESS(aws_rsa_key_pair_get_public_key(key_pair_public, AWS_CAL_RSA_KEY_EXPORT_PKCS1, &pub_key)); ASSERT_TRUE(pub_key.len > 0); aws_rsa_key_pair_release(key_pair_private); aws_rsa_key_pair_release(key_pair_public); aws_byte_buf_clean_up_secure(&private_key_buf); aws_byte_buf_clean_up_secure(&public_key_buf); aws_byte_buf_clean_up_secure(&priv_key); aws_byte_buf_clean_up_secure(&pub_key); aws_cal_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(rsa_getters, s_rsa_getters); static int s_rsa_private_pkcs1_der_parsing(struct aws_allocator *allocator, void *ctx) { (void)ctx; static uint8_t n[] = {0x95, 0x18, 0x6f, 0x5c, 0xea, 0xe0, 0x48, 0xa6, 0xfe, 0x30, 0x40, 0xc0, 0x78, 0x3c, 0x9a, 0x64, 0x69, 0x48, 0xc4, 0xa9, 0xae, 0x93, 0x5b, 0x9b, 0xfc, 0x79, 0xb4, 0xad, 0x30, 0x29, 0xc8, 0x09, 0x67, 0xe9, 0x52, 0xe6, 0xec, 0x7f, 0xa9, 0xeb, 0xe6, 0x73, 0xf1, 0x3e, 0xc1, 0xb6, 0xaf, 0x0f, 0x87, 0x65, 0x22, 0xc9, 0x2c, 0x06, 0x15, 0x61, 0xf5, 0xce, 0x33, 0xd0, 0xfe, 0x2b, 0x28, 0x53, 0x76, 0x92, 0x09, 0xef, 0x3e, 0x0d, 0x5f, 0x35, 0xf3, 0x7a, 0x6d, 0xb9, 0xc7, 0xa1, 0xff, 0x37, 0xd5, 0xa7, 0x44, 0x68, 0xc1, 0xea, 0xa0, 0x0e, 0x2e, 0x0e, 0x52, 0xae, 0x6d, 0xee, 0xa1, 0x77, 0xee, 0x94, 0xde, 0x47, 0x55, 0x75, 0x6d, 0x9d, 0xc4, 0xc5, 0x70, 0x92, 0x50, 0xc6, 0x00, 0x4e, 0xa5, 0x3f, 0x2a, 0x5c, 0xbc, 0x9c, 0x9b, 0x99, 0x3c, 0xba, 0x19, 0xb4, 0x39, 0x63, 0xfa, 0x73}; static uint8_t e[] = {0x01, 0x00, 0x01}; static uint8_t d[] = {0x4f, 0x42, 0x11, 0x19, 0x4d, 0x06, 0xf3, 0xb8, 0x4a, 0x52, 0x2e, 0x69, 0xe2, 0xc1, 0x1c, 0xb6, 0x1a, 0x3e, 0x03, 0xc5, 0xcc, 0x37, 0x2b, 0xb0, 0xed, 0x73, 0x69, 0x77, 0xd3, 0xad, 0xb2, 0x58, 0x45, 0xb4, 0x04, 0x4d, 0x1c, 0x15, 0xf5, 0xa7, 0xa0, 0xf7, 0xab, 0x9c, 0x57, 0xf0, 0x1c, 0x79, 0xdd, 0xd9, 0x06, 0xb7, 0xf8, 0xbe, 0x37, 0x29, 0xbf, 0x38, 0xe1, 0xd4, 0xcc, 0xf8, 0xf6, 0x53, 0x2f, 0xaa, 0xa6, 0xb0, 0x45, 0x60, 0xae, 0xf5, 0x25, 0xab, 0xd0, 0xca, 0x0f, 0x34, 0x87, 0xc7, 0x5e, 0x52, 0xc4, 0x33, 0xac, 0xa4, 0xeb, 0x33, 0x8e, 0x69, 0x53, 0xe5, 0x3b, 0xb8, 0x68, 0x04, 0x83, 0x6e, 0x0c, 0xc3, 0x88, 0x50, 0xe7, 0x79, 0x32, 0xe8, 0x75, 0x09, 0xb5, 0x19, 0x3f, 0x3c, 0x6e, 0xa8, 0xb3, 0x1f, 0x28, 0xe0, 0x33, 0x0f, 0x53, 0x41, 0x0a, 0x49, 0xea, 0x31, 0xa8, 0xc1}; static uint8_t p[] = {0xd5, 0x14, 0x2e, 0xea, 0x1f, 0x79, 0xbe, 0x80, 0x7a, 0x04, 0xc6, 0x7c, 0xfe, 0xb8, 0x84, 0x75, 0x1f, 0xbf, 0x0d, 0x42, 0xdd, 0xec, 0x6b, 0x67, 0x27, 0x64, 0x36, 0xb4, 0xba, 0x2f, 0xc4, 0xc3, 0xe5, 0x00, 0x49, 0x09, 0x43, 0x0d, 0x8f, 0xf1, 0xa8, 0x01, 0x98, 0x6b, 0xe4, 0x00, 0x85, 0x00, 0x4c, 0x66, 0x85, 0x89, 0x2c, 0xbe, 0x80, 0x68, 0xda, 0xe3, 0x16, 0xdc, 0x72, 0x2a, 0xbf, 0x93}; static uint8_t q[] = {0xb3, 0x20, 0xd3, 0x7f, 0x02, 0x4f, 0x15, 0x14, 0xef, 0xa9, 0x43, 0x4a, 0x28, 0x8b, 0x05, 0x1e, 0x53, 0x65, 0xcc, 0x37, 0x25, 0x21, 0x21, 0xdc, 0x22, 0x2d, 0x34, 0x49, 0x3d, 0x9b, 0x0c, 0xaa, 0x31, 0xee, 0x94, 0xb7, 0x6a, 0xe9, 0x04, 0x76, 0x4c, 0xad, 0x54, 0x87, 0xab, 0x20, 0xab, 0x41, 0x31, 0xdb, 0x39, 0x82, 0x36, 0x19, 0xc9, 0xb0, 0x00, 0x5f, 0x47, 0x02, 0xec, 0x31, 0xe5, 0xa1}; static uint8_t dmp1[] = {0x6e, 0xde, 0x55, 0x9a, 0xd4, 0x4d, 0xd0, 0x65, 0xa1, 0x17, 0xa2, 0xff, 0x9e, 0xb9, 0xfb, 0x91, 0xc5, 0xb6, 0xee, 0xfb, 0x7f, 0xe5, 0x67, 0xed, 0x00, 0x7e, 0x10, 0x48, 0xf1, 0x27, 0xb1, 0xc6, 0x0b, 0xfd, 0x9e, 0x03, 0x07, 0xb4, 0xa2, 0xd5, 0x26, 0x89, 0xcd, 0xf9, 0x35, 0x65, 0x5f, 0xce, 0xcb, 0xf1, 0xfc, 0x41, 0x67, 0xda, 0xa7, 0x8d, 0xe7, 0x76, 0x89, 0x8a, 0xa2, 0x8c, 0xc3, 0xa9}; static uint8_t dmq1[] = {0x5f, 0xd9, 0x64, 0xd2, 0x45, 0x6e, 0x03, 0x9d, 0xdc, 0x4f, 0xfa, 0x0d, 0xa8, 0x7c, 0x1b, 0x15, 0x0b, 0x13, 0x21, 0x9f, 0x30, 0x62, 0xf6, 0x37, 0x50, 0xa2, 0x57, 0xa4, 0x09, 0x2e, 0xfa, 0x2b, 0xe7, 0xe3, 0xfd, 0xf7, 0xd7, 0x61, 0x30, 0x5e, 0x0c, 0xd1, 0x82, 0xb5, 0xc5, 0xc0, 0x09, 0x63, 0x2d, 0x46, 0x5d, 0x13, 0x6b, 0xd7, 0x88, 0xb0, 0x80, 0xc2, 0x7a, 0xc0, 0xf9, 0x9a, 0x09, 0x81}; static uint8_t iqmp[] = {0x8f, 0xcf, 0xd5, 0x74, 0xea, 0x10, 0x05, 0x2b, 0xa3, 0x96, 0x83, 0x0f, 0xae, 0x09, 0xbb, 0x06, 0xef, 0x0a, 0x96, 0xc0, 0x2f, 0xc9, 0x24, 0xc0, 0x6c, 0xdf, 0xeb, 0x0c, 0xeb, 0x74, 0x5f, 0x9f, 0xd0, 0x25, 0xca, 0x91, 0x31, 0x94, 0x10, 0x6e, 0xc1, 0x19, 0x6a, 0x82, 0xaf, 0xdc, 0xbd, 0x9c, 0x1b, 0x7d, 0x2a, 0xec, 0x8d, 0xd5, 0x59, 0x4d, 0x6f, 0x38, 0x89, 0xa7, 0xe5, 0x1c, 0x29, 0x57}; aws_cal_library_init(allocator); struct aws_byte_buf private_key_buf; ASSERT_SUCCESS(s_byte_buf_decoded_from_base64_cur( allocator, aws_byte_cursor_from_c_str(TEST_PKCS1_RSA_PRIVATE_KEY_1024), &private_key_buf)); struct aws_byte_cursor private_key_cur = aws_byte_cursor_from_buf(&private_key_buf); struct aws_der_decoder *decoder = aws_der_decoder_new(allocator, private_key_cur); struct aws_rsa_private_key_pkcs1 private_key_data; AWS_ZERO_STRUCT(private_key_data); ASSERT_SUCCESS(aws_der_decoder_load_private_rsa_pkcs1(decoder, &private_key_data)); ASSERT_BIN_ARRAYS_EQUALS(n, AWS_ARRAY_SIZE(n), private_key_data.modulus.ptr, private_key_data.modulus.len); ASSERT_BIN_ARRAYS_EQUALS( e, AWS_ARRAY_SIZE(e), private_key_data.publicExponent.ptr, private_key_data.publicExponent.len); ASSERT_BIN_ARRAYS_EQUALS( d, AWS_ARRAY_SIZE(d), private_key_data.privateExponent.ptr, private_key_data.privateExponent.len); ASSERT_BIN_ARRAYS_EQUALS(p, AWS_ARRAY_SIZE(p), private_key_data.prime1.ptr, private_key_data.prime1.len); ASSERT_BIN_ARRAYS_EQUALS(q, AWS_ARRAY_SIZE(q), private_key_data.prime2.ptr, private_key_data.prime2.len); ASSERT_BIN_ARRAYS_EQUALS( dmp1, AWS_ARRAY_SIZE(dmp1), private_key_data.exponent1.ptr, private_key_data.exponent1.len); ASSERT_BIN_ARRAYS_EQUALS( dmq1, AWS_ARRAY_SIZE(dmq1), private_key_data.exponent2.ptr, private_key_data.exponent2.len); ASSERT_BIN_ARRAYS_EQUALS( iqmp, AWS_ARRAY_SIZE(iqmp), private_key_data.coefficient.ptr, private_key_data.coefficient.len); aws_byte_buf_clean_up_secure(&private_key_buf); aws_der_decoder_destroy(decoder); aws_cal_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(rsa_private_pkcs1_der_parsing, s_rsa_private_pkcs1_der_parsing); static int s_rsa_public_pkcs1_der_parsing(struct aws_allocator *allocator, void *ctx) { (void)ctx; static uint8_t n[] = {0x95, 0x18, 0x6f, 0x5c, 0xea, 0xe0, 0x48, 0xa6, 0xfe, 0x30, 0x40, 0xc0, 0x78, 0x3c, 0x9a, 0x64, 0x69, 0x48, 0xc4, 0xa9, 0xae, 0x93, 0x5b, 0x9b, 0xfc, 0x79, 0xb4, 0xad, 0x30, 0x29, 0xc8, 0x09, 0x67, 0xe9, 0x52, 0xe6, 0xec, 0x7f, 0xa9, 0xeb, 0xe6, 0x73, 0xf1, 0x3e, 0xc1, 0xb6, 0xaf, 0x0f, 0x87, 0x65, 0x22, 0xc9, 0x2c, 0x06, 0x15, 0x61, 0xf5, 0xce, 0x33, 0xd0, 0xfe, 0x2b, 0x28, 0x53, 0x76, 0x92, 0x09, 0xef, 0x3e, 0x0d, 0x5f, 0x35, 0xf3, 0x7a, 0x6d, 0xb9, 0xc7, 0xa1, 0xff, 0x37, 0xd5, 0xa7, 0x44, 0x68, 0xc1, 0xea, 0xa0, 0x0e, 0x2e, 0x0e, 0x52, 0xae, 0x6d, 0xee, 0xa1, 0x77, 0xee, 0x94, 0xde, 0x47, 0x55, 0x75, 0x6d, 0x9d, 0xc4, 0xc5, 0x70, 0x92, 0x50, 0xc6, 0x00, 0x4e, 0xa5, 0x3f, 0x2a, 0x5c, 0xbc, 0x9c, 0x9b, 0x99, 0x3c, 0xba, 0x19, 0xb4, 0x39, 0x63, 0xfa, 0x73}; static uint8_t e[] = {0x01, 0x00, 0x01}; aws_cal_library_init(allocator); struct aws_byte_buf public_key_buf; ASSERT_SUCCESS(s_byte_buf_decoded_from_base64_cur( allocator, aws_byte_cursor_from_c_str(TEST_PKCS1_RSA_PUBLIC_KEY_1024), &public_key_buf)); struct aws_byte_cursor public_key_cur = aws_byte_cursor_from_buf(&public_key_buf); struct aws_der_decoder *decoder = aws_der_decoder_new(allocator, public_key_cur); struct aws_rsa_public_key_pkcs1 public_key_data; AWS_ZERO_STRUCT(public_key_data); ASSERT_SUCCESS(aws_der_decoder_load_public_rsa_pkcs1(decoder, &public_key_data)); ASSERT_BIN_ARRAYS_EQUALS(n, AWS_ARRAY_SIZE(n), public_key_data.modulus.ptr, public_key_data.modulus.len); ASSERT_BIN_ARRAYS_EQUALS( e, AWS_ARRAY_SIZE(e), public_key_data.publicExponent.ptr, public_key_data.publicExponent.len); aws_byte_buf_clean_up_secure(&public_key_buf); aws_der_decoder_destroy(decoder); aws_cal_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(rsa_public_pkcs1_der_parsing, s_rsa_public_pkcs1_der_parsing); static int s_rsa_signing_mismatch_pkcs1_sha256(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor message = aws_byte_cursor_from_c_str(TEST_ENCRYPTION_STRING); aws_cal_library_init(allocator); struct aws_byte_buf public_key_buf; ASSERT_SUCCESS(s_byte_buf_decoded_from_base64_cur( allocator, aws_byte_cursor_from_c_str(TEST_PKCS1_RSA_PRIVATE_KEY_1024), &public_key_buf)); struct aws_rsa_key_pair *key_pair_private = aws_rsa_key_pair_new_from_private_key_pkcs1(allocator, aws_byte_cursor_from_buf(&public_key_buf)); ASSERT_NOT_NULL(key_pair_private); uint8_t hash[AWS_SHA256_LEN]; AWS_ZERO_ARRAY(hash); struct aws_byte_buf hash_value = aws_byte_buf_from_empty_array(hash, sizeof(hash)); aws_sha256_compute(allocator, &message, &hash_value, 0); struct aws_byte_cursor hash_cur = aws_byte_cursor_from_buf(&hash_value); struct aws_byte_buf signature_buf; ASSERT_SUCCESS(aws_byte_buf_init(&signature_buf, allocator, aws_rsa_key_pair_signature_length(key_pair_private))); ASSERT_SUCCESS(aws_rsa_key_pair_sign_message( key_pair_private, AWS_CAL_RSA_SIGNATURE_PKCS1_5_SHA256, hash_cur, &signature_buf)); struct aws_byte_cursor signature_cur = aws_byte_cursor_from_buf(&signature_buf); hash[5] += 59; /* modify digest to force signature mismatch */ ASSERT_ERROR( AWS_ERROR_CAL_SIGNATURE_VALIDATION_FAILED, aws_rsa_key_pair_verify_signature( key_pair_private, AWS_CAL_RSA_SIGNATURE_PKCS1_5_SHA256, hash_cur, signature_cur)); hash[5] -= 59; /* undo digest modification and corrupt signature */ signature_buf.buffer[5] += 59; ASSERT_ERROR( AWS_ERROR_CAL_SIGNATURE_VALIDATION_FAILED, aws_rsa_key_pair_verify_signature( key_pair_private, AWS_CAL_RSA_SIGNATURE_PKCS1_5_SHA256, hash_cur, signature_cur)); struct aws_byte_cursor short_signature_cur = aws_byte_cursor_from_c_str("bad signature"); ASSERT_ERROR( AWS_ERROR_CAL_SIGNATURE_VALIDATION_FAILED, aws_rsa_key_pair_verify_signature( key_pair_private, AWS_CAL_RSA_SIGNATURE_PKCS1_5_SHA256, hash_cur, short_signature_cur)); aws_byte_buf_clean_up(&hash_value); aws_byte_buf_clean_up(&signature_buf); aws_byte_buf_clean_up(&public_key_buf); aws_rsa_key_pair_release(key_pair_private); aws_cal_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(rsa_signing_mismatch_pkcs1_sha256, s_rsa_signing_mismatch_pkcs1_sha256); aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/tests/sha1_test.c000066400000000000000000000244621456575232400232000ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include /* * these are the NIST test vectors, as compiled here: * https://www.di-mgt.com.au/sha_testvectors.html */ static int s_sha1_nist_test_case_1_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor input = aws_byte_cursor_from_c_str("abc"); uint8_t expected[] = { 0xa9, 0x99, 0x3e, 0x36, 0x47, 0x06, 0x81, 0x6a, 0xba, 0x3e, 0x25, 0x71, 0x78, 0x50, 0xc2, 0x6c, 0x9c, 0xd0, 0xd8, 0x9d, }; struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); return s_verify_hash_test_case(allocator, &input, &expected_buf, aws_sha1_new); } AWS_TEST_CASE(sha1_nist_test_case_1, s_sha1_nist_test_case_1_fn) static int s_sha1_nist_test_case_2_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor input = aws_byte_cursor_from_c_str(""); uint8_t expected[] = { 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 0xaf, 0xd8, 0x07, 0x09, }; struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); return s_verify_hash_test_case(allocator, &input, &expected_buf, aws_sha1_new); } AWS_TEST_CASE(sha1_nist_test_case_2, s_sha1_nist_test_case_2_fn) static int s_sha1_nist_test_case_3_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor input = aws_byte_cursor_from_c_str("abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"); uint8_t expected[] = { 0x84, 0x98, 0x3e, 0x44, 0x1c, 0x3b, 0xd2, 0x6e, 0xba, 0xae, 0x4a, 0xa1, 0xf9, 0x51, 0x29, 0xe5, 0xe5, 0x46, 0x70, 0xf1, }; struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); return s_verify_hash_test_case(allocator, &input, &expected_buf, aws_sha1_new); } AWS_TEST_CASE(sha1_nist_test_case_3, s_sha1_nist_test_case_3_fn) static int s_sha1_nist_test_case_4_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor input = aws_byte_cursor_from_c_str("abcdefghbcdefghicdefghijdefghijkefghijklfghij" "klmghijklmnhijklmnoijklmnopjklmnopqklm" "nopqrlmnopqrsmnopqrstnopqrstu"); uint8_t expected[] = { 0xa4, 0x9b, 0x24, 0x46, 0xa0, 0x2c, 0x64, 0x5b, 0xf4, 0x19, 0xf9, 0x95, 0xb6, 0x70, 0x91, 0x25, 0x3a, 0x04, 0xa2, 0x59, }; struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); return s_verify_hash_test_case(allocator, &input, &expected_buf, aws_sha1_new); } AWS_TEST_CASE(sha1_nist_test_case_4, s_sha1_nist_test_case_4_fn) static int s_sha1_nist_test_case_5_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_cal_library_init(allocator); struct aws_hash *hash = aws_sha1_new(allocator); ASSERT_NOT_NULL(hash); struct aws_byte_cursor input = aws_byte_cursor_from_c_str("a"); for (size_t i = 0; i < 1000000; ++i) { ASSERT_SUCCESS(aws_hash_update(hash, &input)); } uint8_t output[AWS_SHA1_LEN] = {0}; struct aws_byte_buf output_buf = aws_byte_buf_from_array(output, sizeof(output)); output_buf.len = 0; ASSERT_SUCCESS(aws_hash_finalize(hash, &output_buf, 0)); uint8_t expected[] = { 0x34, 0xaa, 0x97, 0x3c, 0xd4, 0xc4, 0xda, 0xa4, 0xf6, 0x1e, 0xeb, 0x2b, 0xdb, 0xad, 0x27, 0x31, 0x65, 0x34, 0x01, 0x6f, }; struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); ASSERT_BIN_ARRAYS_EQUALS(expected_buf.ptr, expected_buf.len, output_buf.buffer, output_buf.len); aws_hash_destroy(hash); aws_cal_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(sha1_nist_test_case_5, s_sha1_nist_test_case_5_fn) static int s_sha1_nist_test_case_5_truncated_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_cal_library_init(allocator); struct aws_hash *hash = aws_sha1_new(allocator); ASSERT_NOT_NULL(hash); struct aws_byte_cursor input = aws_byte_cursor_from_c_str("a"); for (size_t i = 0; i < 1000000; ++i) { ASSERT_SUCCESS(aws_hash_update(hash, &input)); } uint8_t expected[] = { 0x34, 0xaa, 0x97, 0x3c, 0xd4, 0xc4, 0xda, 0xa4, 0xf6, 0x1e, 0xeb, 0x2b, 0xdb, 0xad, 0x27, 0x31}; struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); uint8_t output[AWS_SHA1_LEN] = {0}; struct aws_byte_buf output_buf = aws_byte_buf_from_array(output, expected_buf.len); output_buf.len = 0; ASSERT_SUCCESS(aws_hash_finalize(hash, &output_buf, 16)); ASSERT_BIN_ARRAYS_EQUALS(expected_buf.ptr, expected_buf.len, output_buf.buffer, output_buf.len); aws_hash_destroy(hash); aws_cal_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(sha1_nist_test_case_5_truncated, s_sha1_nist_test_case_5_truncated_fn) static int s_sha1_nist_test_case_6_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_cal_library_init(allocator); struct aws_hash *hash = aws_sha1_new(allocator); ASSERT_NOT_NULL(hash); struct aws_byte_cursor input = aws_byte_cursor_from_c_str("abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmno"); for (size_t i = 0; i < 16777216; ++i) { ASSERT_SUCCESS(aws_hash_update(hash, &input)); } uint8_t output[AWS_SHA1_LEN] = {0}; struct aws_byte_buf output_buf = aws_byte_buf_from_array(output, sizeof(output)); output_buf.len = 0; ASSERT_SUCCESS(aws_hash_finalize(hash, &output_buf, 0)); uint8_t expected[] = { 0x77, 0x89, 0xf0, 0xc9, 0xef, 0x7b, 0xfc, 0x40, 0xd9, 0x33, 0x11, 0x14, 0x3d, 0xfb, 0xe6, 0x9e, 0x20, 0x17, 0xf5, 0x92, }; struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); ASSERT_BIN_ARRAYS_EQUALS(expected_buf.ptr, expected_buf.len, output_buf.buffer, output_buf.len); aws_hash_destroy(hash); aws_cal_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(sha1_nist_test_case_6, s_sha1_nist_test_case_6_fn) static int s_sha1_test_invalid_buffer_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_cal_library_init(allocator); struct aws_byte_cursor input = aws_byte_cursor_from_c_str("abcdefghbcdefghicdefghijdefghijkefghijklfghij" "klmghijklmnhijklmnoijklmnopjklmnopqklm" "nopqrlmnopqrsmnopqrstnopqrstu"); uint8_t output[AWS_SHA1_LEN] = {0}; struct aws_byte_buf output_buf = aws_byte_buf_from_array(output, sizeof(output)); output_buf.len = 1; ASSERT_ERROR(AWS_ERROR_SHORT_BUFFER, aws_sha1_compute(allocator, &input, &output_buf, 0)); aws_cal_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(sha1_test_invalid_buffer, s_sha1_test_invalid_buffer_fn) static int s_sha1_test_oneshot_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_cal_library_init(allocator); struct aws_byte_cursor input = aws_byte_cursor_from_c_str("abcdefghbcdefghicdefghijdefghijkefghijklfghij" "klmghijklmnhijklmnoijklmnopjklmnopqklm" "nopqrlmnopqrsmnopqrstnopqrstu"); uint8_t expected[] = { 0xa4, 0x9b, 0x24, 0x46, 0xa0, 0x2c, 0x64, 0x5b, 0xf4, 0x19, 0xf9, 0x95, 0xb6, 0x70, 0x91, 0x25, 0x3a, 0x04, 0xa2, 0x59, }; uint8_t output[AWS_SHA1_LEN] = {0}; struct aws_byte_buf output_buf = aws_byte_buf_from_array(output, sizeof(output)); output_buf.len = 0; ASSERT_SUCCESS(aws_sha1_compute(allocator, &input, &output_buf, 0)); ASSERT_BIN_ARRAYS_EQUALS(expected, sizeof(expected), output_buf.buffer, output_buf.len); aws_cal_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(sha1_test_oneshot, s_sha1_test_oneshot_fn) static int s_sha1_test_invalid_state_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_cal_library_init(allocator); struct aws_byte_cursor input = aws_byte_cursor_from_c_str("abcdefghbcdefghicdefghijdefghijkefghijklfghij" "klmghijklmnhijklmnoijklmnopjklmnopqklm" "nopqrlmnopqrsmnopqrstnopqrstu"); struct aws_hash *hash = aws_sha1_new(allocator); ASSERT_NOT_NULL(hash); uint8_t output[AWS_SHA1_LEN] = {0}; struct aws_byte_buf output_buf = aws_byte_buf_from_array(output, sizeof(output)); output_buf.len = 0; ASSERT_SUCCESS(aws_hash_update(hash, &input)); ASSERT_SUCCESS(aws_hash_finalize(hash, &output_buf, 0)); ASSERT_ERROR(AWS_ERROR_INVALID_STATE, aws_hash_update(hash, &input)); ASSERT_ERROR(AWS_ERROR_INVALID_STATE, aws_hash_finalize(hash, &output_buf, 0)); aws_hash_destroy(hash); aws_cal_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(sha1_test_invalid_state, s_sha1_test_invalid_state_fn) static int s_sha1_test_extra_buffer_space_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_cal_library_init(allocator); struct aws_byte_cursor input = aws_byte_cursor_from_c_str("123456789012345678901234567890123456789012345" "67890123456789012345678901234567890"); struct aws_byte_buf digest_size_buf; struct aws_byte_buf super_size_buf; aws_byte_buf_init(&digest_size_buf, allocator, AWS_SHA1_LEN); aws_byte_buf_init(&super_size_buf, allocator, AWS_SHA1_LEN + 100); aws_sha1_compute(allocator, &input, &digest_size_buf, 0); aws_sha1_compute(allocator, &input, &super_size_buf, 0); ASSERT_TRUE(aws_byte_buf_eq(&digest_size_buf, &super_size_buf)); ASSERT_TRUE(super_size_buf.len == AWS_SHA1_LEN); aws_byte_buf_clean_up(&digest_size_buf); aws_byte_buf_clean_up(&super_size_buf); aws_cal_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(sha1_test_extra_buffer_space, s_sha1_test_extra_buffer_space_fn) aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/tests/sha256_hmac_test.c000066400000000000000000000425721456575232400243460ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include /* * these are the rfc4231 test vectors, as compiled here: * https://tools.ietf.org/html/rfc4231#section-4.1 */ static int s_sha256_hmac_rfc4231_test_case_1_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint8_t secret[] = { 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, }; struct aws_byte_cursor secret_buf = aws_byte_cursor_from_array(secret, sizeof(secret)); struct aws_byte_cursor input = aws_byte_cursor_from_c_str("Hi There"); uint8_t expected[] = { 0xb0, 0x34, 0x4c, 0x61, 0xd8, 0xdb, 0x38, 0x53, 0x5c, 0xa8, 0xaf, 0xce, 0xaf, 0x0b, 0xf1, 0x2b, 0x88, 0x1d, 0xc2, 0x00, 0xc9, 0x83, 0x3d, 0xa7, 0x26, 0xe9, 0x37, 0x6c, 0x2e, 0x32, 0xcf, 0xf7, }; struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); return s_verify_hmac_test_case(allocator, &input, &secret_buf, &expected_buf, aws_sha256_hmac_new); } AWS_TEST_CASE(sha256_hmac_rfc4231_test_case_1, s_sha256_hmac_rfc4231_test_case_1_fn) static int s_sha256_hmac_rfc4231_test_case_2_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint8_t secret[] = { 0x4a, 0x65, 0x66, 0x65, }; struct aws_byte_cursor secret_buf = aws_byte_cursor_from_array(secret, sizeof(secret)); struct aws_byte_cursor input = aws_byte_cursor_from_c_str("what do ya want for nothing?"); uint8_t expected[] = { 0x5b, 0xdc, 0xc1, 0x46, 0xbf, 0x60, 0x75, 0x4e, 0x6a, 0x04, 0x24, 0x26, 0x08, 0x95, 0x75, 0xc7, 0x5a, 0x00, 0x3f, 0x08, 0x9d, 0x27, 0x39, 0x83, 0x9d, 0xec, 0x58, 0xb9, 0x64, 0xec, 0x38, 0x43, }; struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); return s_verify_hmac_test_case(allocator, &input, &secret_buf, &expected_buf, aws_sha256_hmac_new); } AWS_TEST_CASE(sha256_hmac_rfc4231_test_case_2, s_sha256_hmac_rfc4231_test_case_2_fn) static int s_sha256_hmac_rfc4231_test_case_3_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint8_t secret[] = { 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, }; struct aws_byte_cursor secret_buf = aws_byte_cursor_from_array(secret, sizeof(secret)); uint8_t input[] = { 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, }; struct aws_byte_cursor input_buf = aws_byte_cursor_from_array(input, sizeof(input)); uint8_t expected[] = { 0x77, 0x3e, 0xa9, 0x1e, 0x36, 0x80, 0x0e, 0x46, 0x85, 0x4d, 0xb8, 0xeb, 0xd0, 0x91, 0x81, 0xa7, 0x29, 0x59, 0x09, 0x8b, 0x3e, 0xf8, 0xc1, 0x22, 0xd9, 0x63, 0x55, 0x14, 0xce, 0xd5, 0x65, 0xfe, }; struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); return s_verify_hmac_test_case(allocator, &input_buf, &secret_buf, &expected_buf, aws_sha256_hmac_new); } AWS_TEST_CASE(sha256_hmac_rfc4231_test_case_3, s_sha256_hmac_rfc4231_test_case_3_fn) static int s_sha256_hmac_rfc4231_test_case_4_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint8_t secret[] = { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, }; struct aws_byte_cursor secret_buf = aws_byte_cursor_from_array(secret, sizeof(secret)); uint8_t input[] = { 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, }; struct aws_byte_cursor input_buf = aws_byte_cursor_from_array(input, sizeof(input)); uint8_t expected[] = { 0x82, 0x55, 0x8a, 0x38, 0x9a, 0x44, 0x3c, 0x0e, 0xa4, 0xcc, 0x81, 0x98, 0x99, 0xf2, 0x08, 0x3a, 0x85, 0xf0, 0xfa, 0xa3, 0xe5, 0x78, 0xf8, 0x07, 0x7a, 0x2e, 0x3f, 0xf4, 0x67, 0x29, 0x66, 0x5b, }; struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); return s_verify_hmac_test_case(allocator, &input_buf, &secret_buf, &expected_buf, aws_sha256_hmac_new); } AWS_TEST_CASE(sha256_hmac_rfc4231_test_case_4, s_sha256_hmac_rfc4231_test_case_4_fn) static int s_sha256_hmac_rfc4231_test_case_5_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint8_t secret[] = { 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, }; struct aws_byte_cursor secret_buf = aws_byte_cursor_from_array(secret, sizeof(secret)); struct aws_byte_cursor input_buf = aws_byte_cursor_from_c_str("Test With Truncation"); uint8_t expected[] = { 0xa3, 0xb6, 0x16, 0x74, 0x73, 0x10, 0x0e, 0xe0, 0x6e, 0x0c, 0x79, 0x6c, 0x29, 0x55, 0x55, 0x2b, }; struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); return s_verify_hmac_test_case(allocator, &input_buf, &secret_buf, &expected_buf, aws_sha256_hmac_new); } AWS_TEST_CASE(sha256_hmac_rfc4231_test_case_5, s_sha256_hmac_rfc4231_test_case_5_fn) static int s_sha256_hmac_rfc4231_test_case_6_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint8_t secret[] = { 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, }; struct aws_byte_cursor secret_buf = aws_byte_cursor_from_array(secret, sizeof(secret)); struct aws_byte_cursor input_buf = aws_byte_cursor_from_c_str("Test Using Larger Than Block-Size Key - Hash Key First"); uint8_t expected[] = { 0x60, 0xe4, 0x31, 0x59, 0x1e, 0xe0, 0xb6, 0x7f, 0x0d, 0x8a, 0x26, 0xaa, 0xcb, 0xf5, 0xb7, 0x7f, 0x8e, 0x0b, 0xc6, 0x21, 0x37, 0x28, 0xc5, 0x14, 0x05, 0x46, 0x04, 0x0f, 0x0e, 0xe3, 0x7f, 0x54, }; struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); return s_verify_hmac_test_case(allocator, &input_buf, &secret_buf, &expected_buf, aws_sha256_hmac_new); } AWS_TEST_CASE(sha256_hmac_rfc4231_test_case_6, s_sha256_hmac_rfc4231_test_case_6_fn) static int s_sha256_hmac_rfc4231_test_case_7_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint8_t secret[] = { 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, }; struct aws_byte_cursor secret_buf = aws_byte_cursor_from_array(secret, sizeof(secret)); struct aws_byte_cursor input_buf = aws_byte_cursor_from_c_str("This is a test using a larger than " "block-size key and a larger than block-size " "data. The key needs to be hashed before " "being used by the HMAC algorithm."); uint8_t expected[] = { 0x9b, 0x09, 0xff, 0xa7, 0x1b, 0x94, 0x2f, 0xcb, 0x27, 0x63, 0x5f, 0xbc, 0xd5, 0xb0, 0xe9, 0x44, 0xbf, 0xdc, 0x63, 0x64, 0x4f, 0x07, 0x13, 0x93, 0x8a, 0x7f, 0x51, 0x53, 0x5c, 0x3a, 0x35, 0xe2, }; struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); return s_verify_hmac_test_case(allocator, &input_buf, &secret_buf, &expected_buf, aws_sha256_hmac_new); } AWS_TEST_CASE(sha256_hmac_rfc4231_test_case_7, s_sha256_hmac_rfc4231_test_case_7_fn) static int s_sha256_hmac_test_oneshot_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_cal_library_init(allocator); uint8_t secret[] = { 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, }; struct aws_byte_cursor secret_buf = aws_byte_cursor_from_array(secret, sizeof(secret)); struct aws_byte_cursor input_buf = aws_byte_cursor_from_c_str("This is a test using a larger than " "block-size key and a larger than block-size " "data. The key needs to be hashed before " "being used by the HMAC algorithm."); uint8_t expected[] = { 0x9b, 0x09, 0xff, 0xa7, 0x1b, 0x94, 0x2f, 0xcb, 0x27, 0x63, 0x5f, 0xbc, 0xd5, 0xb0, 0xe9, 0x44, 0xbf, 0xdc, 0x63, 0x64, 0x4f, 0x07, 0x13, 0x93, 0x8a, 0x7f, 0x51, 0x53, 0x5c, 0x3a, 0x35, 0xe2, }; uint8_t output[AWS_SHA256_HMAC_LEN] = {0}; struct aws_byte_buf output_buf = aws_byte_buf_from_array(output, sizeof(output)); output_buf.len = 0; ASSERT_SUCCESS(aws_sha256_hmac_compute(allocator, &secret_buf, &input_buf, &output_buf, 0)); ASSERT_BIN_ARRAYS_EQUALS(expected, sizeof(expected), output_buf.buffer, output_buf.len); aws_cal_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(sha256_hmac_test_oneshot, s_sha256_hmac_test_oneshot_fn) static int s_sha256_hmac_test_invalid_buffer_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_cal_library_init(allocator); uint8_t secret[] = { 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, }; struct aws_byte_cursor secret_buf = aws_byte_cursor_from_array(secret, sizeof(secret)); struct aws_byte_cursor input_buf = aws_byte_cursor_from_c_str("This is a test using a larger than " "block-size key and a larger than block-size " "data. The key needs to be hashed before " "being used by the HMAC algorithm."); uint8_t output[AWS_SHA256_HMAC_LEN] = {0}; struct aws_byte_buf output_buf = aws_byte_buf_from_array(output, sizeof(output)); output_buf.len = 1; ASSERT_ERROR(AWS_ERROR_SHORT_BUFFER, aws_sha256_hmac_compute(allocator, &secret_buf, &input_buf, &output_buf, 0)); aws_cal_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(sha256_hmac_test_invalid_buffer, s_sha256_hmac_test_invalid_buffer_fn) static int s_sha256_hmac_test_invalid_state_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_cal_library_init(allocator); uint8_t secret[] = { 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, }; struct aws_byte_cursor secret_buf = aws_byte_cursor_from_array(secret, sizeof(secret)); struct aws_byte_cursor input_buf = aws_byte_cursor_from_c_str("This is a test using a larger than " "block-size key and a larger than block-size " "data. The key needs to be hashed before " "being used by the HMAC algorithm."); uint8_t output[AWS_SHA256_HMAC_LEN] = {0}; struct aws_byte_buf output_buf = aws_byte_buf_from_array(output, sizeof(output)); output_buf.len = 0; struct aws_hmac *hmac = aws_sha256_hmac_new(allocator, &secret_buf); ASSERT_NOT_NULL(hmac); ASSERT_SUCCESS(aws_hmac_update(hmac, &input_buf)); ASSERT_SUCCESS(aws_hmac_finalize(hmac, &output_buf, 0)); ASSERT_ERROR(AWS_ERROR_INVALID_STATE, aws_hmac_update(hmac, &input_buf)); ASSERT_ERROR(AWS_ERROR_INVALID_STATE, aws_hmac_finalize(hmac, &output_buf, 0)); aws_hmac_destroy(hmac); aws_cal_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(sha256_hmac_test_invalid_state, s_sha256_hmac_test_invalid_state_fn) static int s_sha256_hmac_test_extra_buffer_space_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_cal_library_init(allocator); uint8_t secret[] = { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, }; struct aws_byte_cursor secret_buf = aws_byte_cursor_from_array(secret, sizeof(secret)); struct aws_byte_cursor input = aws_byte_cursor_from_c_str("123456789012345678901234567890123456789012345" "67890123456789012345678901234567890"); struct aws_byte_buf digest_size_buf; struct aws_byte_buf super_size_buf; aws_byte_buf_init(&digest_size_buf, allocator, AWS_SHA256_HMAC_LEN); aws_byte_buf_init(&super_size_buf, allocator, AWS_SHA256_HMAC_LEN + 100); aws_sha256_hmac_compute(allocator, &secret_buf, &input, &digest_size_buf, 0); aws_sha256_hmac_compute(allocator, &secret_buf, &input, &super_size_buf, 0); ASSERT_TRUE(aws_byte_buf_eq(&digest_size_buf, &super_size_buf)); ASSERT_TRUE(super_size_buf.len == AWS_SHA256_HMAC_LEN); aws_byte_buf_clean_up(&digest_size_buf); aws_byte_buf_clean_up(&super_size_buf); aws_cal_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(sha256_hmac_test_extra_buffer_space, s_sha256_hmac_test_extra_buffer_space_fn) aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/tests/sha256_test.c000066400000000000000000000260251456575232400233510ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include /* * these are the NIST test vectors, as compiled here: * https://www.di-mgt.com.au/sha_testvectors.html */ static int s_sha256_nist_test_case_1_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor input = aws_byte_cursor_from_c_str("abc"); uint8_t expected[] = { 0xba, 0x78, 0x16, 0xbf, 0x8f, 0x01, 0xcf, 0xea, 0x41, 0x41, 0x40, 0xde, 0x5d, 0xae, 0x22, 0x23, 0xb0, 0x03, 0x61, 0xa3, 0x96, 0x17, 0x7a, 0x9c, 0xb4, 0x10, 0xff, 0x61, 0xf2, 0x00, 0x15, 0xad, }; struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); return s_verify_hash_test_case(allocator, &input, &expected_buf, aws_sha256_new); } AWS_TEST_CASE(sha256_nist_test_case_1, s_sha256_nist_test_case_1_fn) static int s_sha256_nist_test_case_2_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor input = aws_byte_cursor_from_c_str(""); uint8_t expected[] = { 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55, }; struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); return s_verify_hash_test_case(allocator, &input, &expected_buf, aws_sha256_new); } AWS_TEST_CASE(sha256_nist_test_case_2, s_sha256_nist_test_case_2_fn) static int s_sha256_nist_test_case_3_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor input = aws_byte_cursor_from_c_str("abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"); uint8_t expected[] = { 0x24, 0x8d, 0x6a, 0x61, 0xd2, 0x06, 0x38, 0xb8, 0xe5, 0xc0, 0x26, 0x93, 0x0c, 0x3e, 0x60, 0x39, 0xa3, 0x3c, 0xe4, 0x59, 0x64, 0xff, 0x21, 0x67, 0xf6, 0xec, 0xed, 0xd4, 0x19, 0xdb, 0x06, 0xc1, }; struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); return s_verify_hash_test_case(allocator, &input, &expected_buf, aws_sha256_new); } AWS_TEST_CASE(sha256_nist_test_case_3, s_sha256_nist_test_case_3_fn) static int s_sha256_nist_test_case_4_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor input = aws_byte_cursor_from_c_str("abcdefghbcdefghicdefghijdefghijkefghijklfghij" "klmghijklmnhijklmnoijklmnopjklmnopqklm" "nopqrlmnopqrsmnopqrstnopqrstu"); uint8_t expected[] = { 0xcf, 0x5b, 0x16, 0xa7, 0x78, 0xaf, 0x83, 0x80, 0x03, 0x6c, 0xe5, 0x9e, 0x7b, 0x04, 0x92, 0x37, 0x0b, 0x24, 0x9b, 0x11, 0xe8, 0xf0, 0x7a, 0x51, 0xaf, 0xac, 0x45, 0x03, 0x7a, 0xfe, 0xe9, 0xd1, }; struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); return s_verify_hash_test_case(allocator, &input, &expected_buf, aws_sha256_new); } AWS_TEST_CASE(sha256_nist_test_case_4, s_sha256_nist_test_case_4_fn) static int s_sha256_nist_test_case_5_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_cal_library_init(allocator); struct aws_hash *hash = aws_sha256_new(allocator); ASSERT_NOT_NULL(hash); struct aws_byte_cursor input = aws_byte_cursor_from_c_str("a"); for (size_t i = 0; i < 1000000; ++i) { ASSERT_SUCCESS(aws_hash_update(hash, &input)); } uint8_t output[AWS_SHA256_LEN] = {0}; struct aws_byte_buf output_buf = aws_byte_buf_from_array(output, sizeof(output)); output_buf.len = 0; ASSERT_SUCCESS(aws_hash_finalize(hash, &output_buf, 0)); uint8_t expected[] = { 0xcd, 0xc7, 0x6e, 0x5c, 0x99, 0x14, 0xfb, 0x92, 0x81, 0xa1, 0xc7, 0xe2, 0x84, 0xd7, 0x3e, 0x67, 0xf1, 0x80, 0x9a, 0x48, 0xa4, 0x97, 0x20, 0x0e, 0x04, 0x6d, 0x39, 0xcc, 0xc7, 0x11, 0x2c, 0xd0, }; struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); ASSERT_BIN_ARRAYS_EQUALS(expected_buf.ptr, expected_buf.len, output_buf.buffer, output_buf.len); aws_hash_destroy(hash); aws_cal_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(sha256_nist_test_case_5, s_sha256_nist_test_case_5_fn) static int s_sha256_nist_test_case_5_truncated_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_cal_library_init(allocator); struct aws_hash *hash = aws_sha256_new(allocator); ASSERT_NOT_NULL(hash); struct aws_byte_cursor input = aws_byte_cursor_from_c_str("a"); for (size_t i = 0; i < 1000000; ++i) { ASSERT_SUCCESS(aws_hash_update(hash, &input)); } uint8_t expected[] = { 0xcd, 0xc7, 0x6e, 0x5c, 0x99, 0x14, 0xfb, 0x92, 0x81, 0xa1, 0xc7, 0xe2, 0x84, 0xd7, 0x3e, 0x67, }; struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); uint8_t output[AWS_SHA256_LEN] = {0}; struct aws_byte_buf output_buf = aws_byte_buf_from_array(output, expected_buf.len); output_buf.len = 0; ASSERT_SUCCESS(aws_hash_finalize(hash, &output_buf, 16)); ASSERT_BIN_ARRAYS_EQUALS(expected_buf.ptr, expected_buf.len, output_buf.buffer, output_buf.len); aws_hash_destroy(hash); aws_cal_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(sha256_nist_test_case_5_truncated, s_sha256_nist_test_case_5_truncated_fn) static int s_sha256_nist_test_case_6_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_cal_library_init(allocator); struct aws_hash *hash = aws_sha256_new(allocator); ASSERT_NOT_NULL(hash); struct aws_byte_cursor input = aws_byte_cursor_from_c_str("abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmno"); for (size_t i = 0; i < 16777216; ++i) { ASSERT_SUCCESS(aws_hash_update(hash, &input)); } uint8_t output[AWS_SHA256_LEN] = {0}; struct aws_byte_buf output_buf = aws_byte_buf_from_array(output, sizeof(output)); output_buf.len = 0; ASSERT_SUCCESS(aws_hash_finalize(hash, &output_buf, 0)); uint8_t expected[] = { 0x50, 0xe7, 0x2a, 0x0e, 0x26, 0x44, 0x2f, 0xe2, 0x55, 0x2d, 0xc3, 0x93, 0x8a, 0xc5, 0x86, 0x58, 0x22, 0x8c, 0x0c, 0xbf, 0xb1, 0xd2, 0xca, 0x87, 0x2a, 0xe4, 0x35, 0x26, 0x6f, 0xcd, 0x05, 0x5e, }; struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); ASSERT_BIN_ARRAYS_EQUALS(expected_buf.ptr, expected_buf.len, output_buf.buffer, output_buf.len); aws_hash_destroy(hash); aws_cal_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(sha256_nist_test_case_6, s_sha256_nist_test_case_6_fn) static int s_sha256_test_invalid_buffer_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_cal_library_init(allocator); struct aws_byte_cursor input = aws_byte_cursor_from_c_str("abcdefghbcdefghicdefghijdefghijkefghijklfghij" "klmghijklmnhijklmnoijklmnopjklmnopqklm" "nopqrlmnopqrsmnopqrstnopqrstu"); uint8_t output[AWS_SHA256_LEN] = {0}; struct aws_byte_buf output_buf = aws_byte_buf_from_array(output, sizeof(output)); output_buf.len = 1; ASSERT_ERROR(AWS_ERROR_SHORT_BUFFER, aws_sha256_compute(allocator, &input, &output_buf, 0)); aws_cal_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(sha256_test_invalid_buffer, s_sha256_test_invalid_buffer_fn) static int s_sha256_test_oneshot_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_cal_library_init(allocator); struct aws_byte_cursor input = aws_byte_cursor_from_c_str("abcdefghbcdefghicdefghijdefghijkefghijklfghij" "klmghijklmnhijklmnoijklmnopjklmnopqklm" "nopqrlmnopqrsmnopqrstnopqrstu"); uint8_t expected[] = { 0xcf, 0x5b, 0x16, 0xa7, 0x78, 0xaf, 0x83, 0x80, 0x03, 0x6c, 0xe5, 0x9e, 0x7b, 0x04, 0x92, 0x37, 0x0b, 0x24, 0x9b, 0x11, 0xe8, 0xf0, 0x7a, 0x51, 0xaf, 0xac, 0x45, 0x03, 0x7a, 0xfe, 0xe9, 0xd1, }; uint8_t output[AWS_SHA256_LEN] = {0}; struct aws_byte_buf output_buf = aws_byte_buf_from_array(output, sizeof(output)); output_buf.len = 0; ASSERT_SUCCESS(aws_sha256_compute(allocator, &input, &output_buf, 0)); ASSERT_BIN_ARRAYS_EQUALS(expected, sizeof(expected), output_buf.buffer, output_buf.len); aws_cal_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(sha256_test_oneshot, s_sha256_test_oneshot_fn) static int s_sha256_test_invalid_state_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_cal_library_init(allocator); struct aws_byte_cursor input = aws_byte_cursor_from_c_str("abcdefghbcdefghicdefghijdefghijkefghijklfghij" "klmghijklmnhijklmnoijklmnopjklmnopqklm" "nopqrlmnopqrsmnopqrstnopqrstu"); struct aws_hash *hash = aws_sha256_new(allocator); ASSERT_NOT_NULL(hash); uint8_t output[AWS_SHA256_LEN] = {0}; struct aws_byte_buf output_buf = aws_byte_buf_from_array(output, sizeof(output)); output_buf.len = 0; ASSERT_SUCCESS(aws_hash_update(hash, &input)); ASSERT_SUCCESS(aws_hash_finalize(hash, &output_buf, 0)); ASSERT_ERROR(AWS_ERROR_INVALID_STATE, aws_hash_update(hash, &input)); ASSERT_ERROR(AWS_ERROR_INVALID_STATE, aws_hash_finalize(hash, &output_buf, 0)); aws_hash_destroy(hash); aws_cal_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(sha256_test_invalid_state, s_sha256_test_invalid_state_fn) static int s_sha256_test_extra_buffer_space_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_cal_library_init(allocator); struct aws_byte_cursor input = aws_byte_cursor_from_c_str("123456789012345678901234567890123456789012345" "67890123456789012345678901234567890"); struct aws_byte_buf digest_size_buf; struct aws_byte_buf super_size_buf; aws_byte_buf_init(&digest_size_buf, allocator, AWS_SHA256_LEN); aws_byte_buf_init(&super_size_buf, allocator, AWS_SHA256_LEN + 100); aws_sha256_compute(allocator, &input, &digest_size_buf, 0); aws_sha256_compute(allocator, &input, &super_size_buf, 0); ASSERT_TRUE(aws_byte_buf_eq(&digest_size_buf, &super_size_buf)); ASSERT_TRUE(super_size_buf.len == AWS_SHA256_LEN); aws_byte_buf_clean_up(&digest_size_buf); aws_byte_buf_clean_up(&super_size_buf); aws_cal_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(sha256_test_extra_buffer_space, s_sha256_test_extra_buffer_space_fn) aws-crt-python-0.20.4+dfsg/crt/aws-c-cal/tests/test_case_helper.h000066400000000000000000000057351456575232400246250ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include static inline int s_verify_hmac_test_case( struct aws_allocator *allocator, struct aws_byte_cursor *input, struct aws_byte_cursor *secret, struct aws_byte_cursor *expected, aws_hmac_new_fn *new_fn) { aws_cal_library_init(allocator); /* test all possible segmentation lengths from 1 byte at a time to the entire * input. Using a do-while so that we still do 1 pass on 0-length input */ size_t advance_i = 1; do { uint8_t output[128] = {0}; struct aws_byte_buf output_buf = aws_byte_buf_from_empty_array(output, AWS_ARRAY_SIZE(output)); struct aws_hmac *hmac = new_fn(allocator, secret); ASSERT_NOT_NULL(hmac); struct aws_byte_cursor input_cpy = *input; while (input_cpy.len) { size_t max_advance = aws_min_size(input_cpy.len, advance_i); struct aws_byte_cursor segment = aws_byte_cursor_from_array(input_cpy.ptr, max_advance); ASSERT_SUCCESS(aws_hmac_update(hmac, &segment)); aws_byte_cursor_advance(&input_cpy, max_advance); } size_t truncation_size = expected->len; ASSERT_SUCCESS(aws_hmac_finalize(hmac, &output_buf, truncation_size)); ASSERT_BIN_ARRAYS_EQUALS(expected->ptr, expected->len, output_buf.buffer, output_buf.len); aws_hmac_destroy(hmac); } while (++advance_i <= input->len); aws_cal_library_clean_up(); return AWS_OP_SUCCESS; } static inline int s_verify_hash_test_case( struct aws_allocator *allocator, struct aws_byte_cursor *input, struct aws_byte_cursor *expected, aws_hash_new_fn *new_fn) { aws_cal_library_init(allocator); /* test all possible segmentation lengths from 1 byte at a time to the entire * input. Using a do-while so that we still do 1 pass on 0-length input */ size_t advance_i = 1; do { uint8_t output[128] = {0}; struct aws_byte_buf output_buf = aws_byte_buf_from_empty_array(output, AWS_ARRAY_SIZE(output)); struct aws_hash *hash = new_fn(allocator); ASSERT_NOT_NULL(hash); struct aws_byte_cursor input_cpy = *input; while (input_cpy.len) { size_t max_advance = aws_min_size(input_cpy.len, advance_i); struct aws_byte_cursor segment = aws_byte_cursor_from_array(input_cpy.ptr, max_advance); ASSERT_SUCCESS(aws_hash_update(hash, &segment)); aws_byte_cursor_advance(&input_cpy, max_advance); } size_t truncation_size = expected->len; ASSERT_SUCCESS(aws_hash_finalize(hash, &output_buf, truncation_size)); ASSERT_BIN_ARRAYS_EQUALS(expected->ptr, expected->len, output_buf.buffer, output_buf.len); aws_hash_destroy(hash); } while (++advance_i <= input->len); aws_cal_library_clean_up(); return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/000077500000000000000000000000001456575232400205205ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/.builder/000077500000000000000000000000001456575232400222245ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/.builder/actions/000077500000000000000000000000001456575232400236645ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/.builder/actions/clang-tidy.py000066400000000000000000000017551456575232400263010ustar00rootroot00000000000000 import Builder import glob import os import sys def run_clang_tidy(env): sh = env.shell toolchain = env.toolchain clang_tidy = Builder.Util.where('clang-tidy') if not clang_tidy: clang_tidy = toolchain.find_llvm_tool('clang-tidy')[0] if not clang_tidy: print("No clang-tidy executable could be found") sys.exit(1) sources = [os.path.join(env.source_dir, file) for file in glob.glob('source/**/*.c') + glob.glob('source/*.c') if not ('windows' in file or 'android' in file)] return Builder.Script([ [clang_tidy, '-p', os.path.join(env.build_dir, env.project.name)] + sources ]) class ClangTidy(Builder.Action): def is_main(self): return True def run(self, env): return Builder.Script([ Builder.InstallPackages(['clang-tidy']), Builder.DownloadDependencies(), Builder.CMakeBuild(env.project), run_clang_tidy, ]) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/.clang-format000066400000000000000000000031611456575232400230740ustar00rootroot00000000000000--- Language: Cpp # BasedOnStyle: Mozilla AlignAfterOpenBracket: AlwaysBreak AlignConsecutiveAssignments: false AlignConsecutiveDeclarations: false AlignEscapedNewlines: Right AlignOperands: true AlignTrailingComments: true AllowAllParametersOfDeclarationOnNextLine: false AllowShortBlocksOnASingleLine: false AllowShortCaseLabelsOnASingleLine: false AllowShortFunctionsOnASingleLine: Inline AllowShortIfStatementsOnASingleLine: false AllowShortLoopsOnASingleLine: false AlwaysBreakAfterReturnType: None AlwaysBreakBeforeMultilineStrings: false BinPackArguments: false BinPackParameters: false BreakBeforeBinaryOperators: None BreakBeforeBraces: Attach BreakBeforeTernaryOperators: true BreakStringLiterals: true ColumnLimit: 120 ContinuationIndentWidth: 4 DerivePointerAlignment: false IncludeBlocks: Preserve IndentCaseLabels: true IndentPPDirectives: AfterHash IndentWidth: 4 IndentWrappedFunctionNames: true KeepEmptyLinesAtTheStartOfBlocks: true MacroBlockBegin: '' MacroBlockEnd: '' MaxEmptyLinesToKeep: 1 PenaltyBreakAssignment: 2 PenaltyBreakBeforeFirstCallParameter: 19 PenaltyBreakComment: 300 PenaltyBreakFirstLessLess: 120 PenaltyBreakString: 1000 PenaltyExcessCharacter: 1000000 PenaltyReturnTypeOnItsOwnLine: 100000 PointerAlignment: Right ReflowComments: true SortIncludes: true SpaceAfterCStyleCast: false SpaceBeforeAssignmentOperators: true SpaceBeforeParens: ControlStatements SpaceInEmptyParentheses: false SpacesInContainerLiterals: true SpacesInCStyleCastParentheses: false SpacesInParentheses: false SpacesInSquareBrackets: false Standard: Cpp11 TabWidth: 4 UseTab: Never ... aws-crt-python-0.20.4+dfsg/crt/aws-c-common/.clang-tidy000066400000000000000000000020431456575232400225530ustar00rootroot00000000000000--- Checks: 'clang-diagnostic-*,clang-analyzer-*,readability-*,modernize-*,bugprone-*,misc-*,google-runtime-int,llvm-header-guard,fuchsia-restrict-system-includes,-clang-analyzer-valist.Uninitialized,-clang-analyzer-security.insecureAPI.rand,-clang-analyzer-alpha.*,-readability-magic-numbers,-readability-non-const-parameter,-readability-avoid-const-params-in-decls,-readability-else-after-return,-readability-isolate-declaration,-readability-uppercase-literal-suffix,-bugprone-sizeof-expression,-bugprone-easily-swappable-parameters,-readability-identifier-length,-misc-no-recursion,-readability-function-cognitive-complexity,-readability-magic-numbers' WarningsAsErrors: '*' HeaderFilterRegex: '.*\.[h|inl]$' FormatStyle: 'file' CheckOptions: - key: readability-braces-around-statements.ShortStatementLines value: '1' - key: google-runtime-int.TypeSuffix value: '_t' - key: fuchsia-restrict-system-includes.Includes value: '*,-stdint.h,-stdbool.h,-assert.h' ... aws-crt-python-0.20.4+dfsg/crt/aws-c-common/.github/000077500000000000000000000000001456575232400220605ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/.github/ISSUE_TEMPLATE/000077500000000000000000000000001456575232400242435ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/.github/ISSUE_TEMPLATE/bug-report.yml000066400000000000000000000045231456575232400270600ustar00rootroot00000000000000--- name: "🐛 Bug Report" description: Report a bug title: "(short issue description)" labels: [bug, needs-triage] assignees: [] body: - type: textarea id: description attributes: label: Describe the bug description: What is the problem? A clear and concise description of the bug. validations: required: true - type: textarea id: expected attributes: label: Expected Behavior description: | What did you expect to happen? validations: required: true - type: textarea id: current attributes: label: Current Behavior description: | What actually happened? Please include full errors, uncaught exceptions, stack traces, and relevant logs. If service responses are relevant, please include wire logs. validations: required: true - type: textarea id: reproduction attributes: label: Reproduction Steps description: | Provide a self-contained, concise snippet of code that can be used to reproduce the issue. For more complex issues provide a repo with the smallest sample that reproduces the bug. Avoid including business logic or unrelated code, it makes diagnosis more difficult. The code sample should be an SSCCE. See http://sscce.org/ for details. In short, please provide a code sample that we can copy/paste, run and reproduce. validations: required: true - type: textarea id: solution attributes: label: Possible Solution description: | Suggest a fix/reason for the bug validations: required: false - type: textarea id: context attributes: label: Additional Information/Context description: | Anything else that might be relevant for troubleshooting this bug. Providing context helps us come up with a solution that is most useful in the real world. validations: required: false - type: input id: aws-c-common-version attributes: label: aws-c-common version used validations: required: true - type: input id: compiler-version attributes: label: Compiler and version used validations: required: true - type: input id: operating-system attributes: label: Operating System and version validations: required: true aws-crt-python-0.20.4+dfsg/crt/aws-c-common/.github/ISSUE_TEMPLATE/config.yml000066400000000000000000000003311456575232400262300ustar00rootroot00000000000000blank_issues_enabled: false contact_links: - name: 💬 General Question url: https://github.com/awslabs/aws-c-common/discussions/categories/q-a about: Please ask and answer questions as a discussion thread aws-crt-python-0.20.4+dfsg/crt/aws-c-common/.github/ISSUE_TEMPLATE/documentation.yml000066400000000000000000000011141456575232400276340ustar00rootroot00000000000000--- name: "📕 Documentation Issue" description: Report an issue in the API Reference documentation or Developer Guide title: "(short issue description)" labels: [documentation, needs-triage] assignees: [] body: - type: textarea id: description attributes: label: Describe the issue description: A clear and concise description of the issue. validations: required: true - type: textarea id: links attributes: label: Links description: | Include links to affected documentation page(s). validations: required: true aws-crt-python-0.20.4+dfsg/crt/aws-c-common/.github/ISSUE_TEMPLATE/feature-request.yml000066400000000000000000000026231456575232400301120ustar00rootroot00000000000000--- name: 🚀 Feature Request description: Suggest an idea for this project title: "(short issue description)" labels: [feature-request, needs-triage] assignees: [] body: - type: textarea id: description attributes: label: Describe the feature description: A clear and concise description of the feature you are proposing. validations: required: true - type: textarea id: use-case attributes: label: Use Case description: | Why do you need this feature? For example: "I'm always frustrated when..." validations: required: true - type: textarea id: solution attributes: label: Proposed Solution description: | Suggest how to implement the addition or change. Please include prototype/workaround/sketch/reference implementation. validations: required: false - type: textarea id: other attributes: label: Other Information description: | Any alternative solutions or features you considered, a more detailed explanation, stack traces, related issues, links for context, etc. validations: required: false - type: checkboxes id: ack attributes: label: Acknowledgements options: - label: I may be able to implement this feature request required: false - label: This feature might incur a breaking change required: false aws-crt-python-0.20.4+dfsg/crt/aws-c-common/.github/PULL_REQUEST_TEMPLATE.md000066400000000000000000000002511456575232400256570ustar00rootroot00000000000000*Issue #, if available:* *Description of changes:* By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license. aws-crt-python-0.20.4+dfsg/crt/aws-c-common/.github/workflows/000077500000000000000000000000001456575232400241155ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/.github/workflows/ci.yml000066400000000000000000000275311456575232400252430ustar00rootroot00000000000000name: CI on: push: branches-ignore: - 'main' env: BUILDER_VERSION: v0.9.55 BUILDER_HOST: https://d19elf31gohf1l.cloudfront.net BUILDER_SOURCE: releases PACKAGE_NAME: aws-c-common LINUX_BASE_IMAGE: ubuntu-18-x64 RUN: ${{ github.run_id }}-${{ github.run_number }} AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} AWS_REGION: us-east-1 jobs: linux-compat: runs-on: ubuntu-20.04 # latest strategy: fail-fast: false matrix: image: - manylinux1-x64 - manylinux1-x86 - manylinux2014-x64 - manylinux2014-x86 - raspbian-bullseye - fedora-34-x64 - opensuse-leap - rhel8-x64 - al2-x64 steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh docker run --rm --privileged multiarch/qemu-user-static --reset -p yes ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ matrix.image }} build -p ${{ env.PACKAGE_NAME }} linux-compiler-compat: runs-on: ubuntu-20.04 # latest strategy: matrix: compiler: - clang-3 - clang-6 - clang-8 - clang-9 - clang-10 - clang-11 - gcc-4.8 - gcc-5 - gcc-6 - gcc-7 - gcc-8 - gcc-11 steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ env.LINUX_BASE_IMAGE }} build -p ${{ env.PACKAGE_NAME }} --compiler=${{ matrix.compiler }} # Test downstream repos. # This should not be required because we can run into a chicken and egg problem if there is a change that needs some fix in a downstream repo. downstream: runs-on: ubuntu-22.04 # latest steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ env.LINUX_BASE_IMAGE }} build downstream -p ${{ env.PACKAGE_NAME }} clang-sanitizers: runs-on: ubuntu-20.04 # latest strategy: matrix: sanitizers: [",thread", ",address,undefined"] steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ env.LINUX_BASE_IMAGE }} build -p ${{ env.PACKAGE_NAME }} --compiler=clang-11 --cmake-extra=-DENABLE_SANITIZERS=ON --cmake-extra=-DSANITIZERS="${{ matrix.sanitizers }}" linux-shared-libs: runs-on: ubuntu-20.04 # latest steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ env.LINUX_BASE_IMAGE }} build -p ${{ env.PACKAGE_NAME }} --cmake-extra=-DBUILD_SHARED_LIBS=ON linux-no-cpu-extensions: runs-on: ubuntu-20.04 # latest steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ env.LINUX_BASE_IMAGE }} build -p ${{ env.PACKAGE_NAME }} --cmake-extra=-DUSE_CPU_EXTENSIONS=OFF windows: runs-on: windows-2022 # latest steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python builder.pyz build -p ${{ env.PACKAGE_NAME }} windows-vc16: runs-on: windows-2022 # latest strategy: matrix: arch: [x86, x64] steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python builder.pyz build -p ${{ env.PACKAGE_NAME }} --target windows-${{ matrix.arch }} --compiler msvc-16 windows-vc15: runs-on: windows-2022 # latest strategy: matrix: arch: [x86, x64] steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python builder.pyz build -p ${{ env.PACKAGE_NAME }} --target windows-${{ matrix.arch }} --compiler msvc-15 windows-vc14: runs-on: windows-2019 # windows-2019 is last env with Visual Studio 2015 (v14.0) strategy: matrix: arch: [x86, x64] steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python builder.pyz build -p ${{ env.PACKAGE_NAME }} --target windows-${{ matrix.arch }} --compiler msvc-14 windows-shared-libs: runs-on: windows-2022 # latest steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python builder.pyz build -p ${{ env.PACKAGE_NAME }} --cmake-extra=-DBUILD_SHARED_LIBS=ON windows-no-cpu-extensions: runs-on: windows-2022 # latest steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python builder.pyz build -p ${{ env.PACKAGE_NAME }} --cmake-extra=-DUSE_CPU_EXTENSIONS=OFF windows-app-verifier: runs-on: windows-2022 # latest steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python builder.pyz build -p ${{ env.PACKAGE_NAME }} run_tests=false --cmake-extra=-DBUILD_TESTING=ON - name: Run and check AppVerifier run: | echo "Starting to run AppVerifier on all tests found by CTest" python .\aws-c-common\scripts\appverifier_ctest.py --build_directory .\aws-c-common\build\aws-c-common osx: runs-on: macos-12 # latest steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python3 -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder')" chmod a+x builder ./builder build -p ${{ env.PACKAGE_NAME }} osx-no-cpu-extensions: runs-on: macos-12 # latest steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python3 -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder')" chmod a+x builder ./builder build -p ${{ env.PACKAGE_NAME }} --cmake-extra=-DUSE_CPU_EXTENSIONS=OFF openbsd: runs-on: ubuntu-latest # unit tests hang on macos; use ubuntu instead steps: - uses: actions/checkout@v3 - name: Build ${{ env.PACKAGE_NAME }} + consumers uses: cross-platform-actions/action@v0.20.0 with: operating_system: openbsd version: '7.2' shell: bash run: | sudo pkg_add py3-urllib3 python3 -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz', 'builder')" chmod a+x builder ./builder build -p ${{ env.PACKAGE_NAME }} freebsd: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 with: submodules: true - name: Build ${{ env.PACKAGE_NAME }} + consumers uses: cross-platform-actions/action@v0.20.0 with: operating_system: freebsd version: '13.2' run: | sudo pkg install -y python3 py39-urllib3 py39-pip cmake python3 -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz', 'builder')" chmod a+x builder ./builder build -p ${{ env.PACKAGE_NAME }} cross_compile: name: Cross Compile ${{matrix.arch}} runs-on: ubuntu-20.04 # latest strategy: matrix: arch: [linux-armv6, linux-armv7, linux-arm64, android-armv7] steps: - name: Build ${{ env.PACKAGE_NAME }} run: | python3 -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder')" chmod a+x builder ./builder build -p ${{ env.PACKAGE_NAME }} --target=${{matrix.arch}} linux-debug: runs-on: ubuntu-20.04 # latest steps: - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ env.LINUX_BASE_IMAGE }} build -p ${{ env.PACKAGE_NAME }} --config Debug windows-debug: runs-on: windows-2022 # latest steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python builder.pyz build -p ${{ env.PACKAGE_NAME }} --config Debug osx-debug: runs-on: macos-12 # latest steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python3 -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder')" chmod a+x builder ./builder build -p ${{ env.PACKAGE_NAME }} --config Debug aws-crt-python-0.20.4+dfsg/crt/aws-c-common/.github/workflows/clang-format.yml000066400000000000000000000005571456575232400272210ustar00rootroot00000000000000name: Lint on: [push] jobs: clang-format: runs-on: ubuntu-20.04 # latest steps: - name: Checkout Sources uses: actions/checkout@v3 - name: clang-format lint uses: DoozyX/clang-format-lint-action@v0.3.1 with: # List of extensions to check extensions: c,h,inl source: 'source include tests verification' aws-crt-python-0.20.4+dfsg/crt/aws-c-common/.github/workflows/clang-tidy.yml000066400000000000000000000007641456575232400267020ustar00rootroot00000000000000name: Lint on: [push] jobs: clang-tidy: strategy: matrix: host: [ubuntu-22.04] # latest runs-on: ${{ matrix.host }} steps: - name: Checkout Sources uses: actions/checkout@v3 - name: clang-tidy lint run: | python3 -c "from urllib.request import urlretrieve; urlretrieve('https://d19elf31gohf1l.cloudfront.net/LATEST/builder.pyz?run=${{ env.RUN }}', 'builder')" chmod a+x builder ./builder clang-tidy --project=aws-c-common aws-crt-python-0.20.4+dfsg/crt/aws-c-common/.github/workflows/closed-issue-message.yml000066400000000000000000000013271456575232400306640ustar00rootroot00000000000000name: Closed Issue Message on: issues: types: [closed] jobs: auto_comment: runs-on: ubuntu-latest steps: - uses: aws-actions/closed-issue-message@v1 with: # These inputs are both required repo-token: "${{ secrets.GITHUB_TOKEN }}" message: | ### ⚠️COMMENT VISIBILITY WARNING⚠️ Comments on closed issues are hard for our team to see. If you need more assistance, please either tag a team member or open a new issue that references this one. If you wish to keep having a conversation with other community members under this issue feel free to do so. aws-crt-python-0.20.4+dfsg/crt/aws-c-common/.github/workflows/codecov.yml000066400000000000000000000015351456575232400262660ustar00rootroot00000000000000name: Code coverage check on: push: env: BUILDER_VERSION: v0.9.55 BUILDER_HOST: https://d19elf31gohf1l.cloudfront.net BUILDER_SOURCE: releases PACKAGE_NAME: aws-c-common AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} AWS_REGION: us-east-1 jobs: codecov-linux: runs-on: ubuntu-22.04 steps: - name: Checkout Sources uses: actions/checkout@v3 - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python3 -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder')" chmod a+x builder ./builder build -p ${{ env.PACKAGE_NAME }} --compiler=gcc-9 --coverage --coverage-exclude=source/external/ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/.github/workflows/handle-stale-discussions.yml000066400000000000000000000006471456575232400315540ustar00rootroot00000000000000name: HandleStaleDiscussions on: schedule: - cron: '0 */4 * * *' discussion_comment: types: [created] jobs: handle-stale-discussions: name: Handle stale discussions runs-on: ubuntu-latest permissions: discussions: write steps: - name: Stale discussions action uses: aws-github-ops/handle-stale-discussions@v1 env: GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}}aws-crt-python-0.20.4+dfsg/crt/aws-c-common/.github/workflows/proof_ci.yaml000066400000000000000000000200671456575232400266060ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: MIT-0 # CBMC starter kit 2.9 name: Run CBMC proofs on: push: branches-ignore: - gh-pages pull_request: branches-ignore: - gh-pages workflow_dispatch: # USAGE # # If you need to use different versions for tools like CBMC, modify this file: # .github/workflows/proof_ci_resources/config.yaml # # If you want the CI to use a different GitHub-hosted runner (which must still # be running Ubuntu 20.04), modify the value of this key: # jobs.run_cbmc_proofs.runs-on jobs: run_cbmc_proofs: runs-on: cbmc_ubuntu-latest_64-core name: run_cbmc_proofs permissions: contents: read id-token: write pull-requests: read steps: - name: Check out repository and submodules recursively uses: actions/checkout@v3 with: submodules: 'recursive' - name: Parse config file run: | CONFIG_FILE='.github/workflows/proof_ci_resources/config.yaml' for setting in cadical-tag cbmc-version cbmc-viewer-version kissat-tag litani-version proofs-dir run-cbmc-proofs-command; do VAR=$(echo $setting | tr "[:lower:]" "[:upper:]" | tr - _) echo "${VAR}"=$(yq .$setting $CONFIG_FILE) >> $GITHUB_ENV done - name: Ensure CBMC, CBMC viewer, Litani versions have been specified shell: bash run: | should_exit=false if [ "${{ env.CBMC_VERSION }}" == "" ]; then echo "You must specify a CBMC version (e.g. 'latest' or '5.70.0')" should_exit=true fi if [ "${{ env.CBMC_VIEWER_VERSION }}" == "" ]; then echo "You must specify a CBMC viewer version (e.g. 'latest' or '3.6')" should_exit=true fi if [ "${{ env.LITANI_VERSION }}" == "" ]; then echo "You must specify a Litani version (e.g. 'latest' or '1.27.0')" should_exit=true fi if [[ "$should_exit" == true ]]; then exit 1; fi - name: Install latest CBMC if: ${{ env.CBMC_VERSION == 'latest' }} shell: bash run: | # Search within 5 most recent releases for latest available package CBMC_REL="https://api.github.com/repos/diffblue/cbmc/releases?page=1&per_page=5" CBMC_DEB=$(curl -s $CBMC_REL --header 'authorization: Bearer ${{ secrets.GITHUB_TOKEN }}' | jq -r '.[].assets[].browser_download_url' | grep -e 'ubuntu-20.04' | head -n 1) CBMC_ARTIFACT_NAME=$(basename $CBMC_DEB) curl -o $CBMC_ARTIFACT_NAME -L $CBMC_DEB sudo dpkg -i $CBMC_ARTIFACT_NAME rm ./$CBMC_ARTIFACT_NAME - name: Install CBMC ${{ env.CBMC_VERSION }} if: ${{ env.CBMC_VERSION != 'latest' }} shell: bash run: | curl -o cbmc.deb -L \ https://github.com/diffblue/cbmc/releases/download/cbmc-${{ env.CBMC_VERSION }}/ubuntu-20.04-cbmc-${{ env.CBMC_VERSION }}-Linux.deb sudo dpkg -i ./cbmc.deb rm ./cbmc.deb - name: Install latest CBMC viewer if: ${{ env.CBMC_VIEWER_VERSION == 'latest' }} shell: bash run: | CBMC_VIEWER_REL="https://api.github.com/repos/model-checking/cbmc-viewer/releases/latest" CBMC_VIEWER_VERSION=$(curl -s $CBMC_VIEWER_REL --header 'authorization: Bearer ${{ secrets.GITHUB_TOKEN }}' | jq -r .name | sed 's/viewer-//') pip3 install cbmc-viewer==$CBMC_VIEWER_VERSION - name: Install CBMC viewer ${{ env.CBMC_VIEWER_VERSION }} if: ${{ env.CBMC_VIEWER_VERSION != 'latest' }} shell: bash run: | sudo apt-get update sudo apt-get install --no-install-recommends --yes \ build-essential universal-ctags pip3 install cbmc-viewer==${{ env.CBMC_VIEWER_VERSION }} - name: Install latest Litani if: ${{ env.LITANI_VERSION == 'latest' }} shell: bash run: | # Search within 5 most recent releases for latest available package LITANI_REL="https://api.github.com/repos/awslabs/aws-build-accumulator/releases?page=1&per_page=5" LITANI_DEB=$(curl -s $LITANI_REL --header 'authorization: Bearer ${{ secrets.GITHUB_TOKEN }}' | jq -r '.[].assets[0].browser_download_url' | head -n 1) DBN_PKG_FILENAME=$(basename $LITANI_DEB) curl -L $LITANI_DEB -o $DBN_PKG_FILENAME sudo apt-get update sudo apt-get install --no-install-recommends --yes ./$DBN_PKG_FILENAME rm ./$DBN_PKG_FILENAME - name: Install Litani ${{ env.LITANI_VERSION }} if: ${{ env.LITANI_VERSION != 'latest' }} shell: bash run: | curl -o litani.deb -L \ https://github.com/awslabs/aws-build-accumulator/releases/download/${{ env.LITANI_VERSION }}/litani-${{ env.LITANI_VERSION }}.deb sudo apt-get update sudo apt-get install --no-install-recommends --yes ./litani.deb rm ./litani.deb - name: Install ${{ env.KISSAT_TAG }} kissat if: ${{ env.KISSAT_TAG != '' }} shell: bash run: | if ${{ env.KISSAT_TAG == 'latest' }} then KISSAT_REL="https://api.github.com/repos/arminbiere/kissat/releases/latest" KISSAT_TAG_NAME=$(curl -s $KISSAT_REL --header 'authorization: Bearer ${{ secrets.GITHUB_TOKEN }}' | jq -r '.tag_name') else KISSAT_TAG_NAME=${{ env.KISSAT_TAG }} fi echo "Installing kissat $KISSAT_TAG_NAME" git clone https://github.com/arminbiere/kissat.git \ && cd kissat \ && git checkout $KISSAT_TAG_NAME \ && ./configure \ && cd build \ && make -j; echo "$(pwd)" >> $GITHUB_PATH - name: Install ${{ env.CADICAL_TAG }} cadical if: ${{ env.CADICAL_TAG != '' }} shell: bash run: | if ${{ env.CADICAL_TAG == 'latest' }} then CADICAL_REL="https://api.github.com/repos/arminbiere/cadical/releases/latest" CADICAL_TAG_NAME=$(curl -s $CADICAL_REL --header 'authorization: Bearer ${{ secrets.GITHUB_TOKEN }}' | jq -r '.tag_name') else CADICAL_TAG_NAME=${{ env.CADICAL_TAG }} fi echo "Installing cadical $CADICAL_TAG_NAME" git clone https://github.com/arminbiere/cadical.git \ && cd cadical \ && git checkout $CADICAL_TAG_NAME \ && ./configure \ && cd build \ && make -j; echo "$(pwd)" >> $GITHUB_PATH - name: Run CBMC proofs shell: bash env: EXTERNAL_SAT_SOLVER: kissat working-directory: ${{ env.PROOFS_DIR }} run: ${{ env.RUN_CBMC_PROOFS_COMMAND }} - name: Check repository visibility shell: bash run: | VIZ="${{ fromJson(toJson(github.event.repository)).visibility }}"; echo "REPO_VISIBILITY=${VIZ}" | tee -a "${GITHUB_ENV}"; - name: Set name for zip artifact with CBMC proof results id: artifact if: ${{ env.REPO_VISIBILITY == 'public' }} run: | echo "name=cbmc_proof_results_${{ fromJson(toJson(github.event.repository)).name }}_$(date +%Y_%m_%d_%H_%M_%S)" >> $GITHUB_OUTPUT - name: Create zip artifact with CBMC proof results if: ${{ env.REPO_VISIBILITY == 'public' }} shell: bash run: | FINAL_REPORT_DIR=$PROOFS_DIR/output/latest/html pushd $FINAL_REPORT_DIR \ && zip -r ${{ steps.artifact.outputs.name }}.zip . \ && popd \ && mv $FINAL_REPORT_DIR/${{ steps.artifact.outputs.name }}.zip . - name: Upload zip artifact of CBMC proof results to GitHub Actions if: ${{ env.REPO_VISIBILITY == 'public' }} uses: actions/upload-artifact@v3 with: name: ${{ steps.artifact.outputs.name }} path: ${{ steps.artifact.outputs.name }}.zip - name: CBMC proof results shell: bash run: | python3 ${{ env.PROOFS_DIR }}/lib/summarize.py \ --run-file ${{ env.PROOFS_DIR }}/output/latest/html/run.json aws-crt-python-0.20.4+dfsg/crt/aws-c-common/.github/workflows/proof_ci_resources/000077500000000000000000000000001456575232400300075ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/.github/workflows/proof_ci_resources/config.yaml000066400000000000000000000003021456575232400321330ustar00rootroot00000000000000cadical-tag: latest cbmc-version: latest cbmc-viewer-version: latest kissat-tag: latest litani-version: latest proofs-dir: verification/cbmc/proofs run-cbmc-proofs-command: ./run-cbmc-proofs.py aws-crt-python-0.20.4+dfsg/crt/aws-c-common/.github/workflows/stale_issue.yml000066400000000000000000000046331456575232400271660ustar00rootroot00000000000000name: "Close stale issues" # Controls when the action will run. on: schedule: - cron: "*/60 * * * *" jobs: cleanup: runs-on: ubuntu-latest name: Stale issue job permissions: issues: write pull-requests: write steps: - uses: aws-actions/stale-issue-cleanup@v3 with: # Setting messages to an empty string will cause the automation to skip # that category ancient-issue-message: Greetings! Sorry to say but this is a very old issue that is probably not getting as much attention as it deservers. We encourage you to check if this is still an issue in the latest release and if you find that this is still a problem, please feel free to open a new one. stale-issue-message: Greetings! It looks like this issue hasn’t been active in longer than a week. We encourage you to check if this is still an issue in the latest release. Because it has been longer than a week since the last update on this, and in the absence of more information, we will be closing this issue soon. If you find that this is still a problem, please feel free to provide a comment or add an upvote to prevent automatic closure, or if the issue is already closed, please feel free to open a new one. stale-pr-message: Greetings! It looks like this PR hasn’t been active in longer than a week, add a comment or an upvote to prevent automatic closure, or if the issue is already closed, please feel free to open a new one. # These labels are required stale-issue-label: closing-soon exempt-issue-label: automation-exempt stale-pr-label: closing-soon exempt-pr-label: pr/needs-review response-requested-label: response-requested # Don't set closed-for-staleness label to skip closing very old issues # regardless of label closed-for-staleness-label: closed-for-staleness # Issue timing days-before-stale: 10 days-before-close: 4 days-before-ancient: 36500 # If you don't want to mark a issue as being ancient based on a # threshold of "upvotes", you can set this here. An "upvote" is # the total number of +1, heart, hooray, and rocket reactions # on an issue. minimum-upvotes-to-exempt: 1 repo-token: ${{ secrets.GITHUB_TOKEN }} loglevel: DEBUG # Set dry-run to true to not perform label or close actions. dry-run: false aws-crt-python-0.20.4+dfsg/crt/aws-c-common/.gitignore000066400000000000000000000011011456575232400225010ustar00rootroot00000000000000# IDE Artifacts .metadata .build .idea *.d Debug Release *~ *# *.iml tags .vscode #vim swap file *.swp #compiled python files *.pyc #Vagrant stuff Vagrantfile .vagrant #Mac stuff .DS_Store #doxygen doxygen/html/ doxygen/latex/ #cmake artifacts dependencies _build build _build_* cmake-build* *-build # Compiled Object files *.slo *.lo *.o *.obj # Precompiled Headers *.gch *.pch # Compiled Dynamic libraries *.so *.dylib *.dll # Fortran module files *.mod # Compiled Static libraries *.lai *.la *.a *.lib # Executables *.exe *.out *.app # CBMC files *.goto *.log aws-crt-python-0.20.4+dfsg/crt/aws-c-common/AWSCRTAndroidTestRunner/000077500000000000000000000000001456575232400250565ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/AWSCRTAndroidTestRunner/.gitignore000066400000000000000000000003201456575232400270410ustar00rootroot00000000000000*.iml .gradle /local.properties /.idea/caches /.idea/libraries /.idea/modules.xml /.idea/workspace.xml /.idea/navEditor.xml /.idea/assetWizardSettings.xml .DS_Store /build /captures .externalNativeBuild .cxx aws-crt-python-0.20.4+dfsg/crt/aws-c-common/AWSCRTAndroidTestRunner/app/000077500000000000000000000000001456575232400256365ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/AWSCRTAndroidTestRunner/app/.gitignore000066400000000000000000000000541456575232400276250ustar00rootroot00000000000000/build src/androidTest/**/tests src/main/resaws-crt-python-0.20.4+dfsg/crt/aws-c-common/AWSCRTAndroidTestRunner/app/build.gradle000066400000000000000000000023551456575232400301220ustar00rootroot00000000000000apply plugin: 'com.android.application' apply plugin: 'kotlin-android' apply plugin: 'kotlin-android-extensions' android { compileSdkVersion 29 buildToolsVersion "29.0.3" ndkVersion "21.0.6113669" defaultConfig { applicationId "software.amazon.awssdk.crt.awscrtandroidtestrunner" minSdkVersion 21 targetSdkVersion 29 versionCode 1 versionName "1.0" testInstrumentationRunner "androidx.test.runner.AndroidJUnitRunner" externalNativeBuild { cmake { cppFlags "" } } } buildTypes { release { minifyEnabled false } } externalNativeBuild { cmake { path "src/main/cpp/CMakeLists.txt" version "3.10.2" } } } dependencies { implementation fileTree(dir: 'libs', include: ['*.jar']) implementation "org.jetbrains.kotlin:kotlin-stdlib-jdk7:$kotlin_version" implementation 'androidx.appcompat:appcompat:1.1.0' implementation 'androidx.core:core-ktx:1.2.0' androidTestImplementation 'androidx.test.ext:junit:1.1.1' androidTestImplementation 'androidx.test:runner:1.2.0' androidTestImplementation 'androidx.test:monitor:1.2.0@aar' } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/AWSCRTAndroidTestRunner/app/src/000077500000000000000000000000001456575232400264255ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/AWSCRTAndroidTestRunner/app/src/androidTest/000077500000000000000000000000001456575232400307055ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/AWSCRTAndroidTestRunner/app/src/androidTest/java/000077500000000000000000000000001456575232400316265ustar00rootroot00000000000000software/000077500000000000000000000000001456575232400334015ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/AWSCRTAndroidTestRunner/app/src/androidTest/javaamazon/000077500000000000000000000000001456575232400346665ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/AWSCRTAndroidTestRunner/app/src/androidTest/java/softwareawssdk/000077500000000000000000000000001456575232400361625ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/AWSCRTAndroidTestRunner/app/src/androidTest/java/software/amazoncrt/000077500000000000000000000000001456575232400367525ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/AWSCRTAndroidTestRunner/app/src/androidTest/java/software/amazon/awssdkawscrtandroidtestrunner/000077500000000000000000000000001456575232400437505ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/AWSCRTAndroidTestRunner/app/src/androidTest/java/software/amazon/awssdk/crtNativeTest.kt.in000066400000000000000000000006161456575232400470060ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/AWSCRTAndroidTestRunner/app/src/androidTest/java/software/amazon/awssdk/crt/awscrtandroidtestrunnerpackage software.amazon.awssdk.crt.awscrtandroidtestrunner.tests import androidx.test.ext.junit.runners.AndroidJUnit4 import org.junit.Test import org.junit.Assert import software.amazon.awssdk.crt.awscrtandroidtestrunner.NativeTestFixture class Test_@TEST_NAME@ : NativeTestFixture() { @Test public fun test_@TEST_NAME@() { Assert.assertEquals(0, runTest("@TEST_NAME@")) } } NativeTestFixture.kt000066400000000000000000000004661456575232400477530ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/AWSCRTAndroidTestRunner/app/src/androidTest/java/software/amazon/awssdk/crt/awscrtandroidtestrunnerpackage software.amazon.awssdk.crt.awscrtandroidtestrunner open class NativeTestFixture { companion object { // Used to load the 'native-lib' library on application startup. init { System.loadLibrary("native-lib") } } external fun runTest(name: String): Int } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/AWSCRTAndroidTestRunner/app/src/main/000077500000000000000000000000001456575232400273515ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/AWSCRTAndroidTestRunner/app/src/main/AndroidManifest.xml000066400000000000000000000002721456575232400331430ustar00rootroot00000000000000 aws-crt-python-0.20.4+dfsg/crt/aws-c-common/AWSCRTAndroidTestRunner/app/src/main/cpp/000077500000000000000000000000001456575232400301335ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/AWSCRTAndroidTestRunner/app/src/main/cpp/CMakeLists.txt000066400000000000000000000044501456575232400326760ustar00rootroot00000000000000# For more information about using CMake with Android Studio, read the # documentation: https://d.android.com/studio/projects/add-native-code.html # Sets the minimum version of CMake required to build the native library. cmake_minimum_required(VERSION 3.4.1) # AWS lib set(path_to_common "${CMAKE_CURRENT_LIST_DIR}/../../../../..") get_filename_component(path_to_common ${path_to_common} ABSOLUTE) # This is required in order to append /lib/cmake to each element in CMAKE_PREFIX_PATH set(AWS_MODULE_DIR "/${CMAKE_INSTALL_LIBDIR}/cmake") string(REPLACE ";" "${AWS_MODULE_DIR};" AWS_MODULE_PATH "${CMAKE_PREFIX_PATH}${AWS_MODULE_DIR}") # Append that generated list to the module search path list(APPEND CMAKE_MODULE_PATH ${AWS_MODULE_PATH}) list(APPEND CMAKE_MODULE_PATH "${path_to_common}/cmake") include(AwsFindPackage) set(IN_SOURCE_BUILD ON) set(BUILD_SHARED_LIBS ON) # We will generate our own tests, the tests that are there depend on CTest set(ALLOW_CROSS_COMPILED_TESTS ON) set(BUILD_TESTING ON) add_subdirectory(${path_to_common} ${CMAKE_CURRENT_BINARY_DIR}/aws-c-common) aws_use_package(aws-c-common) function(import_tests test_cmakelists) get_property(TEST_CASES GLOBAL PROPERTY AWS_TEST_CASES) # Generate Kotlin test classes get_filename_component(testrunner_path "../../androidTest/java/software/amazon/awssdk/crt/awscrtandroidtestrunner" ABSOLUTE) foreach(name IN LISTS TEST_CASES) set(TEST_NAME "${name}") configure_file( "${testrunner_path}/NativeTest.kt.in" "${testrunner_path}/tests/NativeTest_${name}.kt" ) endforeach() endfunction() file(GLOB test_src "${path_to_common}/tests/*.c") file(GLOB test_logging_src "${path_to_common}/tests/logging/logging_test_utilities.c" "${path_to_common}/tests/logging/test_logger.c") set(test_src ${test_src} ${test_logging_src}) import_tests(${path_to_common}/tests/CMakeLists.txt) # JNI Lib add_library(native-lib SHARED native-lib.cpp ${test_src}) find_library(log-lib log) target_include_directories(native-lib PUBLIC "${path_to_common}/include" "${path_to_common}/tests" "${CMAKE_CURRENT_BINARY_DIR}/aws-c-common/generated/include") target_compile_definitions(native-lib PRIVATE AWS_UNSTABLE_TESTING_API=1) target_link_libraries(native-lib ${log-lib} ${DEP_AWS_LIBS}) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/AWSCRTAndroidTestRunner/app/src/main/cpp/native-lib.cpp000066400000000000000000000016311456575232400326720ustar00rootroot00000000000000#include #include #include #include #include typedef int(test_fn_t)(int, char**); extern "C" JNIEXPORT jint JNICALL Java_software_amazon_awssdk_crt_awscrtandroidtestrunner_NativeTestFixture_runTest( JNIEnv *env, jobject /* this */, jstring jni_name) { const char *test_name = env->GetStringUTFChars(jni_name, nullptr); __android_log_print(ANDROID_LOG_INFO, "native-test", "RUNNING %s", test_name); test_fn_t *test_fn = (test_fn_t*)dlsym(RTLD_DEFAULT, test_name); if (!test_fn) { __android_log_print(ANDROID_LOG_WARN, "native-test", "%s NOT FOUND", test_name); return -1; } int result = test_fn(0, nullptr); __android_log_print( result ? ANDROID_LOG_FATAL : ANDROID_LOG_INFO, "native-test", "%s %s", test_name, result ? "FAILED" : "OK"); return result; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/AWSCRTAndroidTestRunner/build.gradle000066400000000000000000000012261456575232400273360ustar00rootroot00000000000000// Top-level build file where you can add configuration options common to all sub-projects/modules. buildscript { ext.kotlin_version = '1.3.71' repositories { google() jcenter() } dependencies { classpath 'com.android.tools.build:gradle:3.6.2' classpath "org.jetbrains.kotlin:kotlin-gradle-plugin:$kotlin_version" // NOTE: Do not place your application dependencies here; they belong // in the individual module build.gradle files } } allprojects { repositories { google() jcenter() } } task clean(type: Delete) { delete rootProject.buildDir } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/AWSCRTAndroidTestRunner/gradle.properties000066400000000000000000000022721456575232400304350ustar00rootroot00000000000000# Project-wide Gradle settings. # IDE (e.g. Android Studio) users: # Gradle settings configured through the IDE *will override* # any settings specified in this file. # For more details on how to configure your build environment visit # http://www.gradle.org/docs/current/userguide/build_environment.html # Specifies the JVM arguments used for the daemon process. # The setting is particularly useful for tweaking memory settings. org.gradle.jvmargs=-Xmx1536m # When configured, Gradle will run in incubating parallel mode. # This option should only be used with decoupled projects. More details, visit # http://www.gradle.org/docs/current/userguide/multi_project_builds.html#sec:decoupled_projects # org.gradle.parallel=true # AndroidX package structure to make it clearer which packages are bundled with the # Android operating system, and which are packaged with your app's APK # https://developer.android.com/topic/libraries/support-library/androidx-rn android.useAndroidX=true # Automatically convert third-party libraries to use AndroidX android.enableJetifier=true # Kotlin code style for this project: "official" or "obsolete": kotlin.code.style=official org.gradle.daemon=true org.gradle.parallel=trueaws-crt-python-0.20.4+dfsg/crt/aws-c-common/AWSCRTAndroidTestRunner/gradle/000077500000000000000000000000001456575232400263145ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/AWSCRTAndroidTestRunner/gradle/wrapper/000077500000000000000000000000001456575232400277745ustar00rootroot00000000000000gradle-wrapper.jar000066400000000000000000001520711456575232400333350ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/AWSCRTAndroidTestRunner/gradle/wrapperPKA META-INF/PKA(M?TMETA-INF/MANIFEST.MFMLK-. K-*ϳR03-IM+I, dZ)%bµrrPKAorg/PKA org/gradle/PKAorg/gradle/wrapper/PKAzZ -org/gradle/wrapper/BootstrapMainStarter.classVYwV˖#DgH !bSJiIh @c5žv2$ڗSz?B;Wopsٿ;#g^O2Bx[aKXpA .X%˒ e\U &Ẅ)XpCƻHXp+|y/wPe#+!ǼC(H8=' f Ni[.יuM]׉JYU_U-kDJb)*$ Y⁥nn2+q4M%U3Ҷj̚/񣀣ѵ]UHۖff]f&4:V< ./EQY@˽Z}4le'uTJj1jN\_IձrvHz|;$󗘽a X,py$9ؘM/yfLq[YikQQ)Ke#,p)OżTGd`3¢r,[Yh.-(8Q3 ^:J=G]ܓ+(`* ,q_l0zFqjq8~XlC`5eOGX@.]O|Ϩ^Yb >Ǩr-? BJC|Mݸ(+o|z^)Vd;04z.bvD`".g3rJD3V+?~0?{g7y}^@O4V%\IZ$S$9T.٬qf ]}UA!  + ԏ{e n]*d0}Nε0mJ-kмYqZNeex45yz7n#4b$wѦ9z'e42l$ X)L#jSCŁv)I@EBM@ mg0}y[vvDc:t`րC܏c*2&H+?'9TOˈ@+WH?TA XR*;C]$ݝ']AL &w#,G\7 xϣ;NZr?a\VmˈãFX&1 cqBJCl"[F$k'UBmk$u : Hd VcO1 bᥩgsSxgL 'x* #_/2a/O0REϝ [&H| qs6!0~tMu4YO?G'N";8IPKAhdf#org/gradle/wrapper/Download$1.class}M 0h5Z+v/ ׆p!.Mˎµqd#c-l̈0b\7pK^\dNPKA&!org/gradle/wrapper/Download.classY `u=OFVH`b- FZ$`l#ia5#r[7C4Mܴq>Nb'NZ_ N-n7m~6iM?i~Zm}}{Ͻ).HກaTgdYټ,W/_/ienxbWyu[ߓ˿/???13y E}Y_Kk[7AknU3rN_3_RͿd|]6)/mG7to؇t|_&*HT8""$4]蚨S.juajx& Q/t]M5ѤhD.uFj"n&]lͺb.nV6MlĭM^J@xs/-fYSzc b9& ]'[?a2v@mCcOT ܙlrJ5?o{Cjq5gSs;e931K:3\6yLrހ奒#;J8TR Yxʤ,/X~fe6Ge0};^t=%l;Lw0܄$dέ oRq.s MNzSijs3ޔ-0.`Kc"͛Ma&|dpʹݬw< 5S9U2϶vMtStiCi )v~ QѧBN4C.IeGwc#x xw*kb)L_gW}51`C)HŞ\mR/3 M511$T E D ^.هM1"kb'Ę&b8)M1!LqJ,)ބMik3wlβ@P*mCO  葓'G*ADg{Cms~t) D}7ZHQ2E+GQdFtEGS6ӃdK'+G|_&Rʩ'Cl~ ϙbRrJ$LaiE>41cYLi,&69q>RmE\9tʋ*M8HbN: 0W/+6UBQ9{\k2$Z/Ltis?ZDori1^H9dϳIWM:z1Ϯd:~4wC"ʹ&щaهrAޕrFnOg 8.SCn84\4[6mLq{{{:ں=Wk _dHJNb^:ielK%(-&ZB XU[2Jo#mi+⽱ZF0K#Ojayɍ R;HziUٝ&}A RYuemi\,gNILIJ IcgX8. EI))JLL6,M\z]oˎPR$0 !l9g? s;%caD`?,_ |+k".]hAik/mQUl){k{}w%HUzRXh^\mn[DU[UTu^E-`[U- VJb-nC؄J]l.YJ:#zusM띊o@꼁,֞\4kx&{_Uߜh樇}:{1!10hGcD02'qO8k~ԟ#k 8/ q|saR75+x~;U/.B!5(gdCxj)$G^'pb/*Q>T GPx?>@,M/\bp#|T8W7a(ٞ@ p#=H p=|,aE] O#*g_,*S3s2BW/Qy=%ΆBCJrO0lO(<٨+XCB1| u#9G IR%̈Ad% rݸ^R?YU"Ta^H?PKAN/ݡ1org/gradle/wrapper/DownloadProgressListener.classu @E+jDEкe$B~҇)̘֢"n~<0y&\8VfsE1a< 2y`+Χ0TxLnQD1Aj,IX(D8gQkY?0&/׌##Ѕ>v~PKA~0^ 3org/gradle/wrapper/ExclusiveFileAccessManager.classV[s~>Y5685D2؂@*$FT sqB҇xVKOIKڤmzJچrӛ"әf&τ\3nMgzQ|;}{WG8:¹< 5Lj8#) _ӑ9yA_R'u5\T% /R=(4U qYV%:`+:ᴣ:Q7tPO@u}WqMu )SoD)H=喽J$m׹Uw45.^A>PoeIKeٷ1#Y"Y lrTg]+̭MTI#=+I2qvŗt\fdjlݖt=^DE'/;JK[֑a4^/5Rp~00Fkho <NY^!OҼWoi557Y6].p0՝zYHj$%=T2/iG I'E%)4Pĸ9~!9ӻlX~glDiNk0K17k^&gj_ nhwpR- Va-a~n7!"^ 3ܓά~ENY^nIgkI-UzZ&H`/c!MVY׈/]ˮvڵM35ϩ l]/u6;7`v]-%#kD͑>H6 9=#\rb Z0͠)nC id 0] :zUW q ! wӔiS;?Dl`LJh{?T>9s܃&<1#5|_~nDRb\_H ,">"wq@GΡ;0N 6O躅Cj:} nn{t/r K2- v:\G/Ug}[qx[{\"h\2l  >{*yR 4v.ebT.ٻ+`9=34HTM| )׹L"hgOE7s-6ep_̴kH="sEVb˔z5UVMmm|G0Z(=ç8}'Ei Q}(8!v]wq&2{ 4zmOgoG#|gmH"aAG5=R+ȃɹws??~XªQx)I)`^F\F ṂzQFRhMK K [A*_ɮoANϖvӟtp854˰ZsM0ݍ+e錞K{zahӱa{jr⿅ >4fڦ?(06 %L7k}8e*)v0 DqZ5*>F]m4xqNuj}g'-mZ0Zjw䜦[b!ڋ3)UD0A\>yIA$Rf MxfFӴ*e]ӫxwԯ x wuH𘗽P`{!!}%nx/ q}Jhͮ0,މ=q@{,Qzii“G7 !8CH3 `_[(`+8$U)<$4OZd4}/z@:CYׅ"D "Vv I (&%꿮)[|SW/9s ,n%BrUv/PKAj 4*org/gradle/wrapper/GradleWrapperMain.classX|?}ll0" A/l؄Ա dlcbHBE$Nfu$It&3I'I[ٍδN+{;˲t?޷^Ï 6؆UlKRLi%\+xUrj=~kNEĪToRf5-*㭒vC}_ ǻ$*= ޫbާb(xP><~|@E>" 5)a?xDc ΪxV|Tq >S*IgWEyz2/LHZR-WGb kmQ-6F^y%1Wk<ǂ<2*5[qU<((#p.<.m {9Dev4vG *Z_x;jsH0de[׽΅ .y:öOνyu/r. ]T9x8S;'e, Wá2JRilSƶh8iūt--إb֩p!Ů8?r/>ЊΜߒsS$7&drM-2yayyIR ³Ϥ @t̰Ʊ;Wh[":xG0zxr]CvYcN/s >gh[Ry.ϴS|l=1Ѹ)/@nE]6}y1xuss(c;6b7f CX 5Cy{y +I!!kqAR7p82D1B,WN㓘i.) OeJx~l`P(BWHpB)K$ 1@k$[Bhj(Yy7O"(7K[OV56Napl ˹VLbBSX5O`.rZźw!u-ݼ] E# ;тt5@lAI\۰w/Vƕ8vww#|6џe@_2 9nDX{a"$}~3ɥRP( ^q8i7_on5uh[낚@3b@'ElZMTQ(U]yUNvMο3՝]_@XxY_s% l(dדǮ6ǎtt{W56f)Y\eCEd [&pYh e^,Gmx~$3κʴO3&+( lA`Sg?V[=Y0xsi9]_]f0K;DYtn|f]:ۙ;"MigS;]=1vK<4i zY}g}%je]cFz* lLL5L&z9L_1w1uw3mČ^ݪ _I ΂ hqk;@Y{A͢_z39E ,e! $w wܷ+0H0}uSJ`IPKAXs"org/gradle/wrapper/IDownload.classE 0  ^b AP^26J;t>;ɗ|{z~+%5O&WΔ(a_4[gR#!XbQVg={}1AYCX'R5c/J$S@pP\mKulPKA6"org/gradle/wrapper/Install$1.classW{W~O2fZp[hF, P mC@Bcu;l&3,jVjTw%mW7ly9||;9y߯` [ C22LɐO"3Nj(&DAd%pJ"eMb$> /H|T8#&g8"yGEc|':I|JO'pAğI\`BO%<>#KIBg5<+ ]g v}*=h٦Š)㌑ܬw+tN*Ö򬩊oθg9lFD-:'b3DO7zl3 =e'yY0cFc0m8" U٪]-zc{.R\v^ozSQSf}er̔59)1ód]Fe PGl\6)kl㾣tBu{[gJՏVm"ogG|p=Q1Ӧ/wLPv:k 2l4[n)g1mx+=,R}ZLnd`;p.o$fYWn:e%Ҳ1~ţ}癎bHcn˛aJZַ {n>d.[t<n_v;q`A|C&TXJ.xAǷmVq{Zwd=u|}~ C ?2.)Ķ cyQ g~_ᲆWtq:aN5Ẏߊ߁^=lڦ/NH*&kx]:~?h?U*.w&xKl~R,'Zk~bdd< w8=+jF6E_^$-`y+$1cҷķï/ݦ,AÄ"{zZkspyǭrs%q*v74cf]x+pkm4ruggI,6z2 Jvm_:ڶ'W-.(C,U 1ǶޮCgl >FFǻ[R4+AG]-ao߰[+J۪M<:ҽϬRrf("S}V`rn Mt(7X7vGoZ3snK)vWfWru<+"l}5VĻ8o<2f0 fGZt@1y*s,:2}dRYD3,bT|UhPp^z0 Hw"N/g|h*Z >wTST*UqgrOEa}h:9=<KŮᝊ=.7ARq%DS ϡg^yU*x7ihQwo9 L3wyqcs\;]!}k9#MN"rd$yT ɢF]aߤ߱a&^J2xX ᠪuU/z 2y1N0'Y4dySTZ=GVaR$CWA(apL2*zc5^(%QJƻB_7ՠ̻8ǃ U4/PKA @$) org/gradle/wrapper/Install.classY |?'ͰB#v4( ! a1 $w%ΝKVVZuwUT4*uZgk}}o{o)fnnnAofs}yoO=KD q@E4y҄Kx'~.D!(c)d"MarQni҄) VU!Z^^Z(5Өy .MA^+!:I>[By<7ISͼE nI[C/Tx{Zo3_q4 !j6Cۂ 3. NyFEuh͗Jc) NnƖ)w)n^Y'^y]!>\+Im .'F)\߰vS-74ڴq]04]ҫzŶx quxf=.W1Nbє:;-=5{,۰A F=hB0X.ݺU cmQx_u<0u%`6FU*Иiصf2;m!ozcSz#nݮӂ:V7SGB{ڨgRreOG_]Ѿ3)YZ2VjTb`CNQyDj7<FmD`ԅTӼᐭ_ymq hZθn,c F0ɴa7TXqadRPMmLf0~FdGܜ#zuŠLG@ 8˨뭄dU)3RžFNceoEΊ8XZn?{JMk1~%b1=(S"axLn1ؗt+L$uY1OR$S~ylbhO!_fB9\#n[}_8vj$GdruY焔)}Qx*fd1dP.J|Ec!:=ݗJL&$Ui WZfbi 3Pc _v U~m˴By'">*?Qi/ݣ߃U)v'kKR/sbz֥26Èk [1S[1m46߳ܞu[ ,4XOxHL#*=5)Z53 X#%nAt6FvZif[-D1-PShPZԤ!;ctmW8 dZ@3#GU>ȏ)Oq?YkYzN*?%oLj3)} `,kiZaaC=\i Fϫ1U;H4PSUރbA!д՚ʯ˴ٟ^1%Rш(z}Г3MNp:"p0أm@I$TU O-eoT~E Y;¿P=A/!KT|̿u#i)9{H[?T7|ʿۑ['ꑐGOCuܗ3R46s ^js$%ouƞRy֎ӎL vN+envgNX9z,2 SqIp7W(8S.2p`~v`^z `r۪Fh趔?paE ɤɹ%̷k/K K=qr@xt9nJűuCrYC)3q> t8i9Čm{<)L%e0.cƥHQTɑ=I_ۖLDS!4rCe?EdCdn[rd]I}yr/#v=J\ "8wDTqrQW%7N-ǒن4S6&ݴ:ǚKV9o'VN(fPQVy8p/ZMJ_IoP±ɹ :9FIeFtt d-7TZ2X䀦D' as6H[ JPi0G0ZT7=\9z$UP@,Yhhw>=T 9i v"9XA zu꧁NµE+~X`^Gʖ@/>FKOKipur^E)0tįopb >xG$<鬂@5)'ț?yt$[ԛxJr`t 3x4W>[[hE5pVAjE8$C[vT fCfu |s+m!t6 'HP9=KG~p+eM1j⽜ܞCcyzM>c ,䊃TZ^r^̓tNkm 8e ;N: N50Vcy9q4OkAjnu2?@$S`E̷&".~ڲyEhv+^ lG3r"#qlQ{`{a<bD} B̷!{W#GLE<(:ޏqXbcp< v=%Xjl?/+งfѫb,z^kh'ހOwy{ӁL?u:V@˳Cd̏@c=%{aZ(}|OE7~Hmc-V@`2! &˶bf'`)Gu\^u._Q= Eɬ'Ϡ؀9RV-8\3;5PKDfv G:XYI4't8pb k "ޫpd Zz8I:GKy=#I'32/ (zvv O No6λ p޴a aQ-sNAsn㩨PKAy0Vorg/gradle/wrapper/Logger.classoPǿ*1pL EWo1&d  f&Ki Xs9* OHfp+ۨ&Q,D];Bܕp A`w 32 I^m{]⯧>wީ}N6Ǫc;8ƄԵ]u`r壣FQs5mP힩TT-]鹎a-PDս +TYb|Tuz|<[(N$&cbXC3RWu:+mf=mdϞ:?2RlDLBZB3{/AQHGB< 2og\+j/>M\> {JQV \ZWHcHXŠ\_)Gt } РF If:B~@R &~ 6H#J2b&J9򼉖=W-ĝPKAbi+&org/gradle/wrapper/PathAssembler.classV_.HZYm,c;`HeL!mM++NB눕WzM+=Һ}֑O{]K&A{7̛p'>ENs2|^m_1$R_/1UeFߌ[1|/G(''_V}?Ppl'Mǵ͵k,Zֲ {;(8uy~lrzjuiaj~lnjuaq>;sYA|~SuY0Dr\rbPpdn~#[%ǰ6E7c2E&"^RО\V(vtڴa/kEC e6el Kv!ܲrٰ3s1ƨ6iѱ~hple)fBEwt-pl}-ʹt ;ƌ.DF4v0l\U`6콃8[d^+AC-<|xp`LqJ^Mw;7kۮTg۵q c<ֺQ8 &66K7-װ屣 %K΂-,gY\"Me]AFǙ!hTHʃ n&+}_ xTqQ+&C 𴆧0!i^佖5w4<Wx, [ÏE')hFs 闸;p+0Y#*~kO.&eBr(⯂hGV؝XiwT)VMl0U 4Fw w񚆪D|Wu67@ 5cƋWa3ɛhx]0]֞db0Dz*"[QH`fnj<ƽJf9#+~[w:()Pni!gn Ca1 XVk{Qp'dlX ෯PrPXR.=d$$ΉdeRxcgq.ݒы:pH!vkLf[BE/:Mm56}^ndgUt_`# I?g}r{l꽜T ʫ%1@BxJ08ݘH_C[?Byv`I$% &0/=oUʥjϤ5D.v_"RCWZG8t8Tq΋zPPdWRH ];pzd(8JFO!MC°t=ۘ"=~^f.x&.S] 5 RxR|2\85iD4%wÖmԱ x8OKm)tlA8n ^^4O2SӴ#㳞 : =W 'KqhJOkalq8Ub4^"/YwI%F&d t<:{`.w$-AGX'^dW{hB4z}9Xeu{d$ւ {̠T_FVPKAdzߒ= 0org/gradle/wrapper/SystemPropertiesHandler.classV[S[U6I8!ʽ4H[[R/H%*$%^!&sIoZ_go8ԗ0Ȍ}GN"dk}{N?o}Ť)0uL+#o`VN"Rms }h¤Q9.H(MT,Ixx+Xk>ƻR'A#CuZlyz"Ls}-D(؆ 4MYfLgEKeu 4k{ڠy0hvk].Yq-نn'id,;JFJmZ:ۡnѷm3IS8(kX# ӬcBZ뼆cږ 1Y"L0#daz'2swH-t9ZXQuE#nk%?>YnMCyE.LV×.91a({7'\mƀ 1#ajNfgs}☾zT<(Z2I b\Q TZF" CnQrėcl%ήSMfԆn \mNfo6=[]rU&IRS&dH# Y/F戠]3お/𥂯T|oOR-X@-mn+6Pyds 9vDx)+#tLbGbT$5\/۳^*x;@"kdPbYNֶ|xXfMًeǠ*$LTaS?zeZ*g?e̪$"OfBX:]w"5Yr%.G5\k p82z ϯ%ZBG>yH?e}GR-0A 0Q\,HCԭ57GxCC!5FFGrh׷u-zPDZ5}?ΡёxT~wg9t?z=~9]g=9V9z Gq9?1q4#E;Yxq䱕 _Yg9Q'-aش> aWI\b>t.awCtGzm2)p+G%qg\k,s0K0Q,Nwڵ|_F^{_PKA ^F-org/gradle/wrapper/WrapperConfiguration.classmOAփ>"!3 U1U H|Cp#]5Si"1e[733\_(K`6 q?<7yBi~yVb޼[_2DO O[Bxko 2;d4q.wXs $.{{yxsx>;Շx0Ga|O>gـϱeċ"ƗWC_g3! c}I7M, eq5oIh9ܛO zG%DNΜL[: aI/Zj:J*CB53uh>zh|=~ zSZDN.FA3-]+JXi^\g'+RGhKF'$Q1-}99eٜyT t=w ZCMIzNjnhvŪsV˔,YOk V'HQsTSgdSL8ZP6X-$Htٓ=g3Z8`Msie ֋H%1u b[ly (%3S Gv ("墩Yi9:3lʓa=O3toP!,P4kYE;UW,!! ɉR/U.2 S *{f 䣃mH mwR(ańm [1C>/9@U)vq[UڜPeϹEjj)wG+1_*QIg$Oې͔φi"U"y[QS[_]2䂷F%jm!V4My/0e\\qxͭ0 IǦh<:ID7o,nf "Rl` NpC ; S۹QVZQ*xŘ').) ~Ě$R5, 3JoN4'6FIOYLyT/"4[Ϙ* fM>}mrY(nN3>15ӭ_)g_oW%lU̱|qz`c!Oc=PG)?+ 'Ec!UCFT$|nMGf/>k٨ufP^[YO|ܮcK5<1UT1_UME?Zο[e5nx[LN.z c;߹ONQh tfZ"[vs?*_bͮxη k ZŵhD22܋af&g( [#F$~_/{Xɮ%|nq1i@ D:l#^Et^Ctz==z#"ܠi-f4/!}'QO+eFEe-М܄} rBHy4FKX<( %XYOG#'ѕt?. hg.^F#-%eWwvYBHhBem` Cߢ/42YPa9{s?p# 5ϕBy9yEx\Ye ZpՆaEjw I7|ZU c:tiw]HaٲzQdu;BrQfظ&DV\%6# ǹƙun]s̷X3ػe9H%ҲIk.=,ȍ-1ak^1蔕)9(id0^}y_7бp:%`i6t*㰊jBL F vUWm_ImuוD I8H7Լ&%DDՄ3qONB :ACvsπb0\8y 3CEw&T*kY+$@B!K͡5IYF6VANwh`+&XEKH,έQFV# J#hRq +dAy ~#TN*)oްOOSxΑ 86'??'k<ų@zV`PKAy0L ;org/gradle/cli/AbstractPropertiesCommandLineConverter.classV[WUN2a`rԄ IєR Mik$d&L0 ]w_ۗQ\KWuIH&Ҕ%Ys}g?!>񉄋H厄Uܕ{K3 #;ul c[|b_J!7ØWˎ"1 4]W6jTѬjA5( qYTSt)p#kDTJ5QhtMhoFM5mMVjUK쪡S5 eDhfk0fC%QQr"g^&I]VZb*ZPr$iж"\<50jy6ZΒn"#iLdC+LF*ur52TѶRkZ )+>m2a*CQl#6zDE U4vryw0oQTE<洲nux|6zNjnԢzY ^:^79n{QN^dIvew' 9ؓQƾMȨBaFw@M lu{ ]ʈbعJwHrv2̼j<^.T߆ڰCzs.k%ꕊ uRj9M:d|",}@\xQ ک7X9|ue0 ޷. |4lk;ɼIi ߮?+&Dw^z BuN2#iZk"pOag$7J(Bڌ"cܯ!16Cfkt[¤q0~xȱc3B( p1YITةQcBW~ͿDcD y f@]tӻLA^%6uV18(c]3 2gy=ݴoa̝̩ kqv>PKA'H g)org/gradle/cli/CommandLineConverter.classQMK@}ԯ'"4 FM)HQ ޷lI7ݔ6MBya=t$S l)8A {Oyb :˄3I5' JXdT"qx{a/4OR1=Q615 ڹ6ƇEWbRh{'qj]4 {wǪSC- 表V%:m7rG4gĔpWBWc1d||ՠ'2œl!_9| z]VpgnNn|q_KoEJTkZv0F٘7uV<'hd 2ƵWpy_ABNgD&x9tN;P=cW٭LXw¼Q9q=@!_!g}@N4 d $];s):XIf(}M ia%`cv'zhf#Y Y eI:Y^^u/s:Dd )&x\"9~I~Xkc-d^V$N^jcMW9,X$?/|W S 9ݢ&y vԭLaS9|ᶟTyW.8qX[7xJqqt-r|q9Dw !6noDMTݍME/7{Ƨh4\p:~\$ƭ|^6'_Di^Z,jמ 9Zo:PKA2_e(org/gradle/cli/CommandLineParser$1.classA 0EhZ v庈kCPEv-iIp.<S\p>?fxCDlnmMJ]k'iu#0BWՔ!f,By@wZ͕t!BI]#HI9|g|{ -|PKA:< ;org/gradle/cli/CommandLineParser$AfterFirstSubCommand.classVNQζ-P*+^D"ZP.RZH-e el| *񟉏C,̜9ߙ=? e iKh,lȇa1">׌I1.⁈ >LJHc)FV zӆY ^T|Q-E_Ok%Lk$lx.96U6.lseHW+}Y2t^Y9%+VdjNRI%QWC'P_RS C,Ŵۖf"޹aOo(o9g^HVK=&{x 8q$23v̼: V сK~.31<1eaՑn'@i*?:wljS)bu֔S3M[V/YsU=197vS=i {,LjLR)4*1ܱlqL@\P-!]PJ%UB{wxv36B]^_<+yyEς˷{|NC|D@{]rE#H q9L$cA9/T#FAw4fb?qq5B0GUnwj$J"PٲwJdhgt,Blv'\tN KnpqGw pY ߘa6S. 8fsa;jE1qe^\$IWBQDO}HK`+`h+PKAxڤ)3org/gradle/cli/CommandLineParser$AfterOptions.classmOP( @ 1|Dd^\:ݭGBD?xnWGe[ 4=s{m@KTLjHc$]Ŵ$f5SQT1ϐ:Ւ-,ђ̚ǫeV|ʽU]9YbHݱew*3$W*sҰ[߶g|!`ɭp=[1 콗=yۮF"<93Hu~Z7 CW-7.j٢4Ő^M&6B5܌J>հՁ!V|mlnkSM ;/Iu0Š9*2g z-[cgVEYE/4 ZD6%cTq+Ef9Q2LdOtwaDW!ΏC^ mM-.`^>=^ή#PFi=ERgd#i @э/H쓪Hardn#S@ I,4BKI|lRE YiEG; aϧ#J,4][\/r^k$~H AّTYkѐw|dnp#aIJmRx`900@^gi!*ǵ!\!&>1u68 Fta"oZTRkZRSkfynVCr^ ݾp}&1owr3 Қ1fYwr,YQky( UiitaOahZMm44-ytD' T=> ðimղ Ol48`qn,ZtF5U-ݨ-3Ի|jUt]ۙDrM ~tHUh8,\L$Yt=MZ4 ~1EiU(MDvaf NaL(Ðϸ^KR@ :izoiAF0اTyFiM&T)5*ɒJu^(%n(^Ua,F~D4As?J, |xbhÇkllvmq gDՖl \.RjX y2]x.³x%/KEx%¢JxMkki&Q6вMU5Xj Էf>n*ZMhFO5]ݨj_%PZ" qELjYDq$R`يJCҮ L8~vOI$ە>>ikvͲioF;iI۫uB޹YݭYe ?stn8_}Qȍ|nff_#.cHgjnR%l7Jbbj>FK`z]&DsenCo1UTfjWf1iGL 4TNƬjS1+vC Q)pRV O+;./lTl3ȰUsWr2#2"ehࡑ&򰌇k X4- :f(c5n[sBbgxkRF5e4QFX2!P}aRv z_m&h?^աr"3T+vVRCJe|ݏcav5Jޠ8(E=X_BѧXA E/`N!qDFeEw!]9!,:wdֈq)x$| CJ8!c|"SFsG2akTiV_5) KBI[>2bؿƐaPb1+.:hbuOV;kBf:5Nm7wu Tμn_ )`= ]+ :KPnV5 "+$j A-џnO%Zuxp)u `dשXDe5.k[uʶ=: F}Y}2:z$b,m'\25rb{u+2ߝ AM]%׼.O}e̤RVt+?Ma̭L*Y,6ɗ[M#ٻE$=d3'HT{q/ڛ?aqU 7:9_jwΗ:nHiUB<s!(@Mzya1FP0ppRfK8hlq$z@@G8Q.ZY֏V|gƾ{_@݂[>J$<XxrWp[ 4.AØfnӼzs=>F@I#?q '`7۰q yO@0JQF1O`3`~{ɉgu!cNvy]<#MQP4\#2򑔓b @b.jYQXZ]]Aa\LK|Q*We(5͖8R];)0]⫈CZy?iòNqsķ|?P's@<_qVsw"pj&Rmcb#n'g ;=A;fNl(/B&8R_iĩIQX]J]T™uy"n@dl D=N^PPHE4Zp-}Lw~̧N_2xPKA9K<org/gradle/cli/CommandLineParser$MissingOptionArgState.classmOPw(Ld !·'`0W̚o 7i|!~?ܶ &9c?Pª:($IQM)̪☏x#X`~/tMqd0 aʊ]tlm[ŊhpQZ AV(ʪ%,C9 85gH6Lot2Ru0潵(Z zXR,zR)e&EV=^=In+M(u߱ @lvAt#q*1r3@m&Dj67} O_ȇ9u+1LąA_wvR2vfVh@eXp2& b{0s3̟ 0}NXwlS˞zat Sx ^, J#j$i 0iOH3g ?'5B (y`$X`s$u!t9jBo-{mp}mIyF)  @a ' [$W0 &r~n1ق&[9fp-D}=DQOW 쫏3Ma*<$hа#4*louJiIh8%1+aVU'tPKA=org/gradle/cli/CommandLineParser$OptionAwareParserState.classUnP=7qqI(mXRҔ&6.,RT"F˧D%>BMMH g9;s?} $1AGN>MisM4 0ThP1b^C *UuNl9&Cf)x6u*>30k 1{pB!c*6ٔ [5S<5"#e*"e<|[@sbgn%`&S3!a,_/yNX4 O\4e}Iof hZme祂:NcI0N鸁q:nbEŪ53,*oseq!uzwF0*WHS$ h&{2!d> b#1ReIXcPjȞ:OR#B00/Q1c3]Z{xb]..c"PU2>!JTCdW"suWz2C!unjd`sRWFpS9)'rVYpE_f [$dPKAJ7org/gradle/cli/CommandLineParser$OptionComparator.classUmOP~Q6|Aq 2@h,`2C2yIג# _FgQ]lRLsys=_BA3 ţ2HWȶ cQƒ',cEX&CgY67yuA*9&@vgo=4CfX>TJ;1dv}%qh mn2<$bqZ5-SY\#lðE׆n2&Z; *AG#2q:-0p_6Gfv].z`#T͸w`|Se3,KQmg][un%)zt-6Z:Mfcu  21\A-GhAdl4{dxfL>?2;cOD RG^T.P`AF9z,uuu̫mcP5}ʳ, -r 4T|jd^@Ԩe[n%nvt>E# 1 ӳG(7 MkH- ?Y eH|B9՚tƥ('f?0~.aT!V&˸C| ):=ew?n,ċ9fGL2 IVia3.2M!]%yu7hQ ,$MON PKA#t8org/gradle/cli/CommandLineParser$OptionParserState.classR]KA=YvMMw)F)n_$"h@ <6IfVf'%?O>w6 ivΝ=w _|dŦmTҜ1d{mC);BNDfQkikɄaXq2ЯܵRB7"$(XAy/A7A#>;:|(F ͹=U&KcEZP|yq- S+·\U|HQS$dú݄31TMZ@wťK) sAChU0^]<. uʓGa(M"%z.업cg"W=d1fIٔd}F6,O).a9Ţ,se,~Bٿŋ#H9 0a1 Z|ea%wA~刡vTWabH:arYXb;_Ů@S,븉 Vq*1/E_mkǮE SgFo mgGÈQމF\nrj^v;Б5Ts!G?.CU8ˊ)Ӛ!zsmRuX2#M+&~Bf6)K+qy% }\[HPKAG;~U=org/gradle/cli/CommandLineParser$OptionStringComparator.classTOAfvۅP-"Vh)BJIFL0$zʤ Yvɛ{&ƳͶR1{7o뷏jc6lX(Xab^/-,mE eT` fߗ #u筣={.yF~C"P:@ HN(YE "?`HGmdiҞ|~pF;6rɣj Z[hr/}ǯz`v¡GX;=A@'Bڞ8̏c"Ob^JOuE#%㱐C]!ͫN X|{lc쇒^O Xrn~tjYVB9faPJkOSvq;3̡P^}3ʩrhIV_ u6>?#T'` 5z31-P:j췚GBL{ݿϽALx ۋV,0L_"W`n 7V i'9(^LX&3A)*LAڜehf90k F$p؄5ߛ̶Q hȇ,I[3,=Nq&2|RÊisF wYn PKA]4n?org/gradle/cli/CommandLineParser$UnknownOptionParserState.classURA==$a2@(""UZ`$SdL__\H*~+7nܸ_= B%q}q[A :0A0iDq5k rCw˺& ezabq [vJ%nWL[F ! ɕgY6rkk X>O 6E=}9A{gN)Ҧ>曖.^]S`{jHO~噎-pe}^^rJI=R7٠cSޒ{2iOcinی1^#r޲t1b7?4yvo=)L^)aFZ֜[c9*z'̼n$T BAa F&00 mGd6a4j3 C'/n{._VƵձ+1DX UR%H}4+.L_c=:"L32}F7>L`g%.dJ\)+QLgq.:K^dI׵{""¾j4e8 ț#u`@Ts䥓?|pP{fr mEhQr!"XOٯUVH/.\ 鋘6?9R?SͩUÕ@FcH:U1-NcjXc4Uy<blfq=PKA\oY(&org/gradle/cli/CommandLineParser.classYy`Tչ}Nn.! DH${%B 5J"$70g&B\^ںj5!Ҿ֭>[^u-ť]lO;3I.&s9oL^虣˓؃mxGyxWotw:{ ':ב?k||<>RcZ.M:f/ ~G@Q= yjP)j%? Sd&:J.OQwRQf,1[=J Jq曓'sePbur&u,hRc^*2MuBGکJMt@yWV, NYKt9UNSae,Wzu䊀Tz jF3t) 3Y(d&^ըcfgl&]Αsu i&lelyjU-HĊ ؛E#X(ҽ.׌hL֙q)"P"t5HPpgm>?7q67kM Km<] &(j [gMhdɸ<dn30MLh*ӔC]ԗ@s7Wd5%Zn6c"k&6=ߛJhJ T4 "Ih唔Ǻ-h# k&kb 7lE8L[_UIH' /'"CV f轴,q~fie;UE0YX:#b"rVZ1Ej C Գ'l\bxϊ$b1^V\ѾJ@W Yb쳁;[ęeg)M|azgy@`]ӷdA![f{W>h,]4:숻 X*șH}(ܳ7P=[եb2{Aku\gGPqHmNҨO\R͎+/Yhlb(:Qepk/R"+dszk/Iqj7&S/6BgnC._OdZ4i7C:yXA)nMz ~4hא3$"Q+ՆĄiY=ᡙm:UWpծJCbC.~C.ˈekIU5\.W(Or\Elնq {Z fjzX,3j wu\kur!VA.33r!7U|r|N[ UIqIm \`R%w%fY `$mq!wȝ);ku&39Ico5%wg 5"? 9xH1Kr&|ِưڐUyK㥑sАHԐURjGkLxqb0Uk3AO{VmS71qL/^X_Xevw[:k_Wn~ȐU5ԯ3#h,f䍤V:xŒ%= <"jMU;&)8 wD# 3=fB=XS,Tg5N5!)EN*۹򝽗5)Y]<.OЩ{3&$>|BǻZO&/ނ%2(>Υ}t~<&/$w}ǧw)1Btq.1A*N ;_J]V~*xNܯ;c;ėi`J] ~Yei ǯ9{xu a`_fuvu%Rת2n_gcR"A8>Q͍Ǔ9WTړNQiavkÔ.M^<"uAҤ&&wtDru '=;/U~Ӿ]e@"Ėm%ؗ;=MRxž;c)D#ؔ)3~n;{nvn s 3AU-:YR_3{ᏁN7iY5C[X(W/s$kl6w|*!M[:\0zUL `='*{4|]8M~&ɋOkvR#C_I<4k쨠uZV0pܜ75C71ӊbVgZվ~Lѻrn|֞?^j,nq|BޚokCV6y漃|RK2LN% Us8VT6ciA~0!J PC_^Σ)ṣrFU T7_]nQrYa,U|Sv"H=-\8PT7?҉RDE--*O;@1h ۰mY;w;v)\n|x8NKfK[##c",;O *+p܄<<_~yʫ+wV寸A>99,O(^~܇L4jf8šx||_.q!aRv{7ssrIQ7@hgu|! O(vB%n@-pUq;E"jOG\cz(,i dۇXU,kVA РU){Χ)E|YmޤDL OL,xsYAn9w SbKdFXYawHceKʛLw#|Ot|y9 /xQ%d?^[<KxU^!l.'pWl.~ߣ|ߎ9(sͱ Ϗz8ن ~̵aBc7aEk)^SI* ^3s9~D Z\ut[sK}6Hp /J!QEޑ,噮AtJ􌥹KqumBL[.e*oT~^%oa&7oP.szg_X=}zlhQU6p~ Rڽb絗nIڏhoa|UMoױ1}Ҍ?Yrs6zLd+G0{p$`ߓۀRygcan*lXNjyqs\1%rsԫ2s}+8~xǫ9^#?M#^?PKA[xn&org/gradle/cli/ParsedCommandLine.classWWFDUǎ&U8e9$x&M)qIpZ k"Ogȉ[JKЅ}34 &?7oW;3Kָ1/39;|g??0O 3Qq)ģ"Nݩ&I\x<#YKF\e'Nj2/%Ўd|9nq|E_~Uh&N~5X~]|Cq@ěxK|K<-ofMrdyֶtȝ:XZ2 ծZ_GeI5.ȔiZ(i%}qժh sqQ5 Sr,9UfVfSƝ-KZJiWlKEsgJ6nI)5K=_FHLMxf5 MBzʜWKgTKko&BM !}:ciRvt*݌x&H;b/&|f $t4Ұ\Qq8@ecKgEY+l_PKUƍ¤ t2VeWvjKj:p HWVZ9٠+tt˺,Z=6kћիn 7k'u;aJڼ÷8d%gG\M Dբz5膹UOd״IBA<&\K:@CfiĬYch&i6 ]'ZY`>p+o(8Y:nY XGQWwe|W}?%d735V?6FeTs q83x[S/W&SwE>F)L+5VeF'1Gy݌`RͫW5OޞJo0LA E.ȸ:V61'!unQúwTK7rXr̐[1E0w$DϞCόm  kV%8py'ЃZT1жO~0͢˚QX|l%>ow l (t((TyN,x0^*QpKWj'DG \! ׇ6!$>!À40d)w[@bbu1NbkP$LװMUWlV Gn"{$ G^FHbp,>"]vqvοVw΄1(|v3A?w2{10]/~ ^&IC9$Egd$}uvNrg5&<gSF2#SӤXLBi ,A|;"AmȜ2BXiL8 G:m$x0$aOIkZPKAWgradle-cli-classpath.properties+(JM.)**+MPKAbuild-receipt.properties+K-*+MJ-53PK>PKA META-INF/PKA(M?T)META-INF/MANIFEST.MFPKAorg/PKA org/gradle/PKAorg/gradle/wrapper/PKAzZ -org/gradle/wrapper/BootstrapMainStarter.classPKAhdf#Vorg/gradle/wrapper/Download$1.classPKAo:s@4:org/gradle/wrapper/Download$ProxyAuthenticator.classPKA&! org/gradle/wrapper/Download.classPKAN/ݡ1org/gradle/wrapper/DownloadProgressListener.classPKA~0^ 3org/gradle/wrapper/ExclusiveFileAccessManager.classPKAz\Q-org/gradle/wrapper/GradleUserHomeLookup.classPKAj 4*/"org/gradle/wrapper/GradleWrapperMain.classPKAXs"-org/gradle/wrapper/IDownload.classPKA6".org/gradle/wrapper/Install$1.classPKA @$) 5org/gradle/wrapper/Install.classPKAy0V}Horg/gradle/wrapper/Logger.classPKAj jV8Korg/gradle/wrapper/PathAssembler$LocalDistribution.classPKAbi+&Morg/gradle/wrapper/PathAssembler.classPKAdzߒ= 0FTorg/gradle/wrapper/SystemPropertiesHandler.classPKA ^F-&Yorg/gradle/wrapper/WrapperConfiguration.classPKA(\org/gradle/wrapper/WrapperExecutor.classPKA_#egradle-wrapper-classpath.propertiesPKAneorg/gradle/cli/PKA<S1eorg/gradle/cli/AbstractCommandLineConverter.classPKAy0L ;(horg/gradle/cli/AbstractPropertiesCommandLineConverter.classPKA# GK1lorg/gradle/cli/CommandLineArgumentException.classPKA'H g)cnorg/gradle/cli/CommandLineConverter.classPKA7&oorg/gradle/cli/CommandLineOption.classPKA2_e(uorg/gradle/cli/CommandLineParser$1.classPKA:< ;vorg/gradle/cli/CommandLineParser$AfterFirstSubCommand.classPKAxڤ)3Kzorg/gradle/cli/CommandLineParser$AfterOptions.classPKAS6| <@}org/gradle/cli/CommandLineParser$BeforeFirstSubCommand.classPKA =Forg/gradle/cli/CommandLineParser$CaseInsensitiveStringComparator.classPKAo\9=%org/gradle/cli/CommandLineParser$KnownOptionParserState.classPKA9K<_org/gradle/cli/CommandLineParser$MissingOptionArgState.classPKA=]org/gradle/cli/CommandLineParser$OptionAwareParserState.classPKAJ7Qorg/gradle/cli/CommandLineParser$OptionComparator.classPKA#t8gorg/gradle/cli/CommandLineParser$OptionParserState.classPKA tB3dorg/gradle/cli/CommandLineParser$OptionString.classPKAG;~U=Iorg/gradle/cli/CommandLineParser$OptionStringComparator.classPKAYM2"org/gradle/cli/CommandLineParser$ParserState.classPKA]4n?yorg/gradle/cli/CommandLineParser$UnknownOptionParserState.classPKA\oY(&org/gradle/cli/CommandLineParser.classPKA[xn&org/gradle/cli/ParsedCommandLine.classPKA ,morg/gradle/cli/ParsedCommandLineOption.classPKAA5l| :sorg/gradle/cli/ProjectPropertiesCommandLineConverter.classPKA;|9Gorg/gradle/cli/SystemPropertiesCommandLineConverter.classPKAWgradle-cli-classpath.propertiesPKA>kbuild-receipt.propertiesPK22_gradle-wrapper.properties000066400000000000000000000003501456575232400347450ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/AWSCRTAndroidTestRunner/gradle/wrapper#Wed Apr 01 11:18:00 PDT 2020 distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists distributionUrl=https\://services.gradle.org/distributions/gradle-5.6.4-all.zip aws-crt-python-0.20.4+dfsg/crt/aws-c-common/AWSCRTAndroidTestRunner/gradlew000077500000000000000000000122601456575232400264320ustar00rootroot00000000000000#!/usr/bin/env sh ############################################################################## ## ## Gradle start up script for UN*X ## ############################################################################## # Attempt to set APP_HOME # Resolve links: $0 may be a link PRG="$0" # Need this for relative symlinks. while [ -h "$PRG" ] ; do ls=`ls -ld "$PRG"` link=`expr "$ls" : '.*-> \(.*\)$'` if expr "$link" : '/.*' > /dev/null; then PRG="$link" else PRG=`dirname "$PRG"`"/$link" fi done SAVED="`pwd`" cd "`dirname \"$PRG\"`/" >/dev/null APP_HOME="`pwd -P`" cd "$SAVED" >/dev/null APP_NAME="Gradle" APP_BASE_NAME=`basename "$0"` # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. DEFAULT_JVM_OPTS="" # Use the maximum available, or set MAX_FD != -1 to use that value. MAX_FD="maximum" warn () { echo "$*" } die () { echo echo "$*" echo exit 1 } # OS specific support (must be 'true' or 'false'). cygwin=false msys=false darwin=false nonstop=false case "`uname`" in CYGWIN* ) cygwin=true ;; Darwin* ) darwin=true ;; MINGW* ) msys=true ;; NONSTOP* ) nonstop=true ;; esac CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar # Determine the Java command to use to start the JVM. if [ -n "$JAVA_HOME" ] ; then if [ -x "$JAVA_HOME/jre/sh/java" ] ; then # IBM's JDK on AIX uses strange locations for the executables JAVACMD="$JAVA_HOME/jre/sh/java" else JAVACMD="$JAVA_HOME/bin/java" fi if [ ! -x "$JAVACMD" ] ; then die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME Please set the JAVA_HOME variable in your environment to match the location of your Java installation." fi else JAVACMD="java" which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. Please set the JAVA_HOME variable in your environment to match the location of your Java installation." fi # Increase the maximum file descriptors if we can. if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then MAX_FD_LIMIT=`ulimit -H -n` if [ $? -eq 0 ] ; then if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then MAX_FD="$MAX_FD_LIMIT" fi ulimit -n $MAX_FD if [ $? -ne 0 ] ; then warn "Could not set maximum file descriptor limit: $MAX_FD" fi else warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" fi fi # For Darwin, add options to specify how the application appears in the dock if $darwin; then GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" fi # For Cygwin, switch paths to Windows format before running java if $cygwin ; then APP_HOME=`cygpath --path --mixed "$APP_HOME"` CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` JAVACMD=`cygpath --unix "$JAVACMD"` # We build the pattern for arguments to be converted via cygpath ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` SEP="" for dir in $ROOTDIRSRAW ; do ROOTDIRS="$ROOTDIRS$SEP$dir" SEP="|" done OURCYGPATTERN="(^($ROOTDIRS))" # Add a user-defined pattern to the cygpath arguments if [ "$GRADLE_CYGPATTERN" != "" ] ; then OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" fi # Now convert the arguments - kludge to limit ourselves to /bin/sh i=0 for arg in "$@" ; do CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` else eval `echo args$i`="\"$arg\"" fi i=$((i+1)) done case $i in (0) set -- ;; (1) set -- "$args0" ;; (2) set -- "$args0" "$args1" ;; (3) set -- "$args0" "$args1" "$args2" ;; (4) set -- "$args0" "$args1" "$args2" "$args3" ;; (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; esac fi # Escape application args save () { for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done echo " " } APP_ARGS=$(save "$@") # Collect all arguments for the java command, following the shell quoting and substitution rules eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS" # by default we should be in the correct project dir, but when run from Finder on Mac, the cwd is wrong if [ "$(uname)" = "Darwin" ] && [ "$HOME" = "$PWD" ]; then cd "$(dirname "$0")" fi exec "$JAVACMD" "$@" aws-crt-python-0.20.4+dfsg/crt/aws-c-common/AWSCRTAndroidTestRunner/gradlew.bat000066400000000000000000000043241456575232400271760ustar00rootroot00000000000000@if "%DEBUG%" == "" @echo off @rem ########################################################################## @rem @rem Gradle startup script for Windows @rem @rem ########################################################################## @rem Set local scope for the variables with windows NT shell if "%OS%"=="Windows_NT" setlocal set DIRNAME=%~dp0 if "%DIRNAME%" == "" set DIRNAME=. set APP_BASE_NAME=%~n0 set APP_HOME=%DIRNAME% @rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. set DEFAULT_JVM_OPTS= @rem Find java.exe if defined JAVA_HOME goto findJavaFromJavaHome set JAVA_EXE=java.exe %JAVA_EXE% -version >NUL 2>&1 if "%ERRORLEVEL%" == "0" goto init echo. echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. echo. echo Please set the JAVA_HOME variable in your environment to match the echo location of your Java installation. goto fail :findJavaFromJavaHome set JAVA_HOME=%JAVA_HOME:"=% set JAVA_EXE=%JAVA_HOME%/bin/java.exe if exist "%JAVA_EXE%" goto init echo. echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% echo. echo Please set the JAVA_HOME variable in your environment to match the echo location of your Java installation. goto fail :init @rem Get command-line arguments, handling Windows variants if not "%OS%" == "Windows_NT" goto win9xME_args :win9xME_args @rem Slurp the command line arguments. set CMD_LINE_ARGS= set _SKIP=2 :win9xME_args_slurp if "x%~1" == "x" goto execute set CMD_LINE_ARGS=%* :execute @rem Setup the command line set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar @rem Execute Gradle "%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% :end @rem End local scope for the variables with windows NT shell if "%ERRORLEVEL%"=="0" goto mainEnd :fail rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of rem the _cmd.exe /c_ return code! if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 exit /b 1 :mainEnd if "%OS%"=="Windows_NT" endlocal :omega aws-crt-python-0.20.4+dfsg/crt/aws-c-common/AWSCRTAndroidTestRunner/settings.gradle000066400000000000000000000000721456575232400300750ustar00rootroot00000000000000rootProject.name='AWSCRTAndroidTestRunner' include ':app' aws-crt-python-0.20.4+dfsg/crt/aws-c-common/CMakeLists.txt000066400000000000000000000261711456575232400232670ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. cmake_minimum_required(VERSION 3.0) option(ALLOW_CROSS_COMPILED_TESTS "Allow tests to be compiled via cross compile, for use with qemu" OFF) project(aws-c-common LANGUAGES C VERSION 0.1.0) message(STATUS "CMake ${CMAKE_VERSION}") if (POLICY CMP0069) cmake_policy(SET CMP0069 NEW) # Enable LTO/IPO if available in the compiler, see AwsCFlags endif() if (POLICY CMP0077) cmake_policy(SET CMP0077 OLD) # Enable options to get their values from normal variables endif() list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake") include(AwsCFlags) include(AwsCheckHeaders) include(AwsSharedLibSetup) include(AwsFeatureTests) include(AwsSanitizers) include(AwsThreadAffinity) include(AwsThreadName) include(CTest) set(GENERATED_ROOT_DIR "${CMAKE_CURRENT_BINARY_DIR}/generated") set(GENERATED_INCLUDE_DIR "${GENERATED_ROOT_DIR}/include") set(GENERATED_CONFIG_HEADER "${GENERATED_INCLUDE_DIR}/aws/common/config.h") set(CONFIG_HEADER_TEMPLATE "${CMAKE_CURRENT_SOURCE_DIR}/include/aws/common/config.h.in") file(GLOB AWS_COMMON_HEADERS "include/aws/common/*.h" "include/aws/common/*.inl" ) file (GLOB AWS_COMMON_EXTERNAL_HEADERS "include/aws/common/external/*.h") file (GLOB AWS_COMMON_EXTERNAL_INSTALLED_HEADERS "include/aws/common/external/ittnotify.h") file(GLOB AWS_TEST_HEADERS "include/aws/testing/*.h" ) file(GLOB AWS_COMMON_PRIV_HEADERS "include/aws/common/private/*.h" "include/aws/common/private/*.c" ) file(GLOB AWS_COMMON_SRC "source/*.c" ) file (GLOB AWS_COMMON_EXTERNAL_SRC "source/external/*.c") option(AWS_NUM_CPU_CORES "Number of CPU cores of the target machine. Useful when cross-compiling." 0) if (WIN32) set(WINDOWS_KERNEL_LIB "kernel32" CACHE STRING "The name of the kernel library to link against (default: kernel32)") file(GLOB AWS_COMMON_OS_HEADERS "include/aws/common/windows/*" ) file(GLOB AWS_COMMON_OS_SRC "source/windows/*.c" "source/platform_fallback_stubs/system_info.c" ) if (MSVC) source_group("Header Files\\aws\\common" FILES ${AWS_COMMON_HEADERS}) source_group("Header Files\\aws\\common\\private" FILES ${AWS_COMMON_PRIV_HEADERS}) source_group("Header Files\\aws\\testing" FILES ${AWS_TEST_HEADERS}) source_group("Source Files" FILES ${AWS_COMMON_SRC}) source_group("Source Files\\windows" FILES ${AWS_COMMON_OS_SRC}) endif () list(APPEND PLATFORM_DEFINES WINDOWS_KERNEL_LIB=${WINDOWS_KERNEL_LIB}) # PSAPI_VERSION=1 is needed to support GetProcessMemoryInfo on both pre and # post Win7 OS's. list(APPEND PLATFORM_DEFINES PSAPI_VERSION=1) list(APPEND PLATFORM_LIBS bcrypt ${WINDOWS_KERNEL_LIB} ws2_32 shlwapi psapi) else () file(GLOB AWS_COMMON_OS_HEADERS "include/aws/common/posix/*" ) file(GLOB AWS_COMMON_OS_SRC "source/posix/*.c" ) set(THREADS_PREFER_PTHREAD_FLAG ON) if (UNIX OR APPLE) find_package(Threads REQUIRED) if (NOT ANDROID AND NOT CMAKE_THREAD_LIBS_INIT) check_symbol_exists(pthread_mutexattr_init "" HAVE_PTHREAD_MUTEXATTR_INIT) if (NOT HAVE_PTHREAD_MUTEXATTR_INIT) # fsanitize=... results in GLIBC library to provide some pthread APIs but not all list(APPEND PLATFORM_LIBS pthread) endif() endif() endif() if (APPLE) # Don't add the exact path to CoreFoundation as this would hardcode the SDK version list(APPEND PLATFORM_LIBS dl Threads::Threads "-framework CoreFoundation") list (APPEND AWS_COMMON_OS_SRC "source/darwin/*.c") # OS specific includes list (APPEND AWS_COMMON_OS_SRC "source/platform_fallback_stubs/system_info.c") elseif (${CMAKE_SYSTEM_NAME} STREQUAL "Linux") # Android does not link to libpthread nor librt, so this is fine list(APPEND PLATFORM_LIBS dl m Threads::Threads rt) list (APPEND AWS_COMMON_OS_SRC "source/linux/*.c") # OS specific includes elseif(CMAKE_SYSTEM_NAME STREQUAL "FreeBSD") list(APPEND PLATFORM_LIBS dl m thr execinfo) list (APPEND AWS_COMMON_OS_SRC "source/platform_fallback_stubs/system_info.c") elseif(CMAKE_SYSTEM_NAME STREQUAL "NetBSD") list(APPEND PLATFORM_LIBS dl m Threads::Threads execinfo) list (APPEND AWS_COMMON_OS_SRC "source/platform_fallback_stubs/system_info.c") elseif(CMAKE_SYSTEM_NAME STREQUAL "OpenBSD") list(APPEND PLATFORM_LIBS m Threads::Threads execinfo) list (APPEND AWS_COMMON_OS_SRC "source/platform_fallback_stubs/system_info.c") elseif(CMAKE_SYSTEM_NAME STREQUAL "Android") list(APPEND PLATFORM_LIBS log) file(GLOB ANDROID_SRC "source/android/*.c") list(APPEND AWS_COMMON_OS_SRC "${ANDROID_SRC}") list (APPEND AWS_COMMON_OS_SRC "source/platform_fallback_stubs/system_info.c") else() list (APPEND AWS_COMMON_OS_SRC "source/platform_fallback_stubs/system_info.c") endif() endif() file(GLOB AWS_COMMON_ARCH_SRC "source/arch/generic/*.c" ) if (USE_CPU_EXTENSIONS) if (AWS_ARCH_INTEL) if (MSVC) file(GLOB AWS_COMMON_ARCH_SRC "source/arch/intel/cpuid.c" "source/arch/intel/msvc/*.c" ) source_group("Source Files\\arch\\intel" FILES ${AWS_COMMON_ARCH_SRC}) else() file(GLOB AWS_COMMON_ARCH_SRC "source/arch/intel/cpuid.c" "source/arch/intel/asm/*.c" ) endif() elseif (AWS_ARCH_ARM64 OR AWS_ARCH_ARM32) if (MSVC) file(GLOB AWS_COMMON_ARCH_SRC "source/arch/arm/msvc/*.c" ) elseif (AWS_HAVE_AUXV) file(GLOB AWS_COMMON_ARCH_SRC "source/arch/arm/asm/*.c" ) endif() endif() endif() list(APPEND PLATFORM_LIBS ${CMAKE_DL_LIBS}) file(GLOB COMMON_HEADERS ${AWS_COMMON_HEADERS} ${AWS_COMMON_OS_HEADERS} ${AWS_COMMON_PRIV_HEADERS} ${AWS_COMMON_EXTERNAL_HEADERS} ${AWS_TEST_HEADERS} ) file(GLOB COMMON_SRC ${AWS_COMMON_SRC} ${AWS_COMMON_OS_SRC} ${AWS_COMMON_ARCH_SRC} ${AWS_COMMON_EXTERNAL_SRC} ) add_library(${PROJECT_NAME} ${COMMON_SRC}) aws_set_common_properties(${PROJECT_NAME} NO_WEXTRA) aws_prepare_symbol_visibility_args(${PROJECT_NAME} "AWS_COMMON") target_compile_options(${PROJECT_NAME} PUBLIC ${PLATFORM_CFLAGS}) aws_check_headers(${PROJECT_NAME} ${AWS_COMMON_HEADERS}) #apple source already includes the definitions we want, and setting this posix source #version causes it to revert to an older version. So don't turn it on there, we don't need it. if (UNIX AND NOT APPLE AND NOT ${CMAKE_SYSTEM_NAME} MATCHES FreeBSD|OpenBSD) #this only gets applied to aws-c-common (not its consumers). target_compile_definitions(${PROJECT_NAME} PRIVATE -D_POSIX_C_SOURCE=200809L -D_XOPEN_SOURCE=500) endif() aws_set_thread_affinity_method(${PROJECT_NAME}) aws_set_thread_name_method(${PROJECT_NAME}) aws_add_sanitizers(${PROJECT_NAME}) target_link_libraries(${PROJECT_NAME} PUBLIC ${PLATFORM_LIBS}) target_compile_definitions(${PROJECT_NAME} PRIVATE ${PLATFORM_DEFINES}) if (AWS_NUM_CPU_CORES) target_compile_definitions(${PROJECT_NAME} PRIVATE -DAWS_NUM_CPU_CORES=${AWS_NUM_CPU_CORES}) endif() set_target_properties(${PROJECT_NAME} PROPERTIES VERSION 1.0.0) set_target_properties(${PROJECT_NAME} PROPERTIES SOVERSION 1) target_include_directories(${PROJECT_NAME} PUBLIC $ $) # When we install, the generated header will be at the INSTALL_INTERFACE:include location, # but at build time we need to explicitly include this here target_include_directories(${PROJECT_NAME} PUBLIC $) target_compile_definitions(${PROJECT_NAME} PRIVATE -DCJSON_HIDE_SYMBOLS) if (AWS_HAVE_AVX2_INTRINSICS) target_compile_definitions(${PROJECT_NAME} PRIVATE -DUSE_SIMD_ENCODING) simd_add_source_avx(${PROJECT_NAME} "source/arch/intel/encoding_avx2.c") message(STATUS "Building SIMD base64 decoder") endif() # Preserve subdirectories when installing headers foreach(HEADER_SRCPATH IN ITEMS ${AWS_COMMON_HEADERS} ${AWS_COMMON_OS_HEADERS} ${GENERATED_CONFIG_HEADER} ${AWS_TEST_HEADERS} ${AWS_COMMON_EXTERNAL_INSTALLED_HEADERS}) get_filename_component(HEADER_DIR ${HEADER_SRCPATH} DIRECTORY) # Note: We need to replace the generated include directory component first, otherwise if the build # directory is located inside the source tree, we'll partially rewrite the path and fail to replace it # when we replace the generated include dir. # We also need to take care to not run the source-directory match if the generated-directory match # succeeds; otherwise, if we're installing to /foo/aws-c-common-install, and our source directory is # /foo/aws-c-common, we'll end up installing to /foo/aws-c-common-install-install unset(HEADER_DSTDIR) foreach(POTENTIAL_PREFIX IN ITEMS ${GENERATED_ROOT_DIR} ${CMAKE_CURRENT_SOURCE_DIR}) string(LENGTH ${POTENTIAL_PREFIX} _prefixlen) string(SUBSTRING ${HEADER_DIR} 0 ${_prefixlen} _actual_prefix) if(${_actual_prefix} STREQUAL ${POTENTIAL_PREFIX}) string(REPLACE "${POTENTIAL_PREFIX}/" "" HEADER_DSTDIR "${HEADER_DIR}") break() endif() endforeach() if(NOT HEADER_DSTDIR) message(ERROR "Couldn't find source root for header ${HEADER_SRCPATH}") endif() install(FILES ${HEADER_SRCPATH} DESTINATION ${HEADER_DSTDIR} COMPONENT Development) endforeach() aws_prepare_shared_lib_exports(${PROJECT_NAME}) configure_file("cmake/${PROJECT_NAME}-config.cmake" "${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-config.cmake" @ONLY) if (BUILD_SHARED_LIBS) set (TARGET_DIR "shared") else() set (TARGET_DIR "static") endif() install(EXPORT "${PROJECT_NAME}-targets" DESTINATION "${LIBRARY_DIRECTORY}/${PROJECT_NAME}/cmake/${TARGET_DIR}" NAMESPACE AWS:: COMPONENT Development) install(FILES "${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-config.cmake" DESTINATION "${LIBRARY_DIRECTORY}/${PROJECT_NAME}/cmake" COMPONENT Development) list(APPEND EXPORT_MODULES "cmake/AwsCFlags.cmake" "cmake/AwsCheckHeaders.cmake" "cmake/AwsSharedLibSetup.cmake" "cmake/AwsTestHarness.cmake" "cmake/AwsLibFuzzer.cmake" "cmake/AwsSanitizers.cmake" "cmake/AwsSIMD.cmake" "cmake/AwsFindPackage.cmake" "cmake/AwsFeatureTests.cmake" "cmake/AwsCRuntime.cmake" ) install(FILES ${EXPORT_MODULES} DESTINATION "${LIBRARY_DIRECTORY}/cmake" COMPONENT Development) # This should come last, to ensure all variables defined by cmake will be available for export configure_file(${CONFIG_HEADER_TEMPLATE} ${GENERATED_CONFIG_HEADER} ESCAPE_QUOTES) if (ALLOW_CROSS_COMPILED_TESTS OR NOT CMAKE_CROSSCOMPILING) if (BUILD_TESTING) add_subdirectory(tests) add_subdirectory(bin/system_info) endif() endif() include(CPackConfig) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/CONTRIBUTING.md000066400000000000000000000063201456575232400227520ustar00rootroot00000000000000# Contributing Guidelines Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional documentation, we greatly value feedback and contributions from our community. Please read through this document before submitting any issues or pull requests to ensure we have all the necessary information to effectively respond to your bug report or contribution. ## Reporting Bugs/Feature Requests We welcome you to use the GitHub issue tracker to report bugs or suggest features. When filing an issue, please check [existing open](https://github.com/awslabs/aws-c-common/issues), or [recently closed](https://github.com/awslabs/aws-c-common/issues?utf8=%E2%9C%93&q=is%3Aissue%20is%3Aclosed%20), issues to make sure somebody else hasn't already reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: * A reproducible test case or series of steps * The version of our code being used * Any modifications you've made relevant to the bug * Anything unusual about your environment or deployment ## Contributing via Pull Requests Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: 1. You are working against the latest source on the *main* branch. 2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. 3. You open an issue to discuss any significant work - we would hate for your time to be wasted. To send us a pull request, please: 1. Fork the repository. 2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. 3. Ensure local tests pass. 4. Commit to your fork using clear commit messages. 5. Send us a pull request, answering any default questions in the pull request interface. 6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and [creating a pull request](https://help.github.com/articles/creating-a-pull-request/). ## Finding contributions to work on Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels ((enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any ['help wanted'](https://github.com/awslabs/aws-c-common/labels/help%20wanted) issues is a great place to start. ## Code of Conduct This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact opensource-codeofconduct@amazon.com with any additional questions or comments. ## Licensing See the [LICENSE](https://github.com/awslabs/aws-c-common/blob/main/LICENSE) file for our project's licensing. We will ask you confirm the licensing of your contribution. We may ask you to sign a [Contributor License Agreement (CLA)](http://en.wikipedia.org/wiki/Contributor_License_Agreement) for larger changes. aws-crt-python-0.20.4+dfsg/crt/aws-c-common/LICENSE000066400000000000000000000261361456575232400215350ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. aws-crt-python-0.20.4+dfsg/crt/aws-c-common/NOTICE000066400000000000000000000001261456575232400214230ustar00rootroot00000000000000AWS C Common Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. aws-crt-python-0.20.4+dfsg/crt/aws-c-common/README.md000066400000000000000000000314411456575232400220020ustar00rootroot00000000000000## AWS C Common [![GitHub](https://img.shields.io/github/license/awslabs/aws-c-common.svg)](https://github.com/awslabs/aws-c-common/blob/main/LICENSE) Core c99 package for AWS SDK for C. Includes cross-platform primitives, configuration, data structures, and error handling. ## License This library is licensed under the Apache 2.0 License. ## Usage ### Building aws-c-common uses CMake for setting up build environments. This library has no non-kernel dependencies so the build is quite simple. For example: git clone git@github.com:awslabs/aws-c-common.git aws-c-common mkdir aws-c-common-build cd aws-c-common-build cmake ../aws-c-common make -j 12 make test sudo make install Keep in mind that CMake supports multiple build systems, so for each platform you can pass your own build system as the `-G` option. For example: cmake -GNinja ../aws-c-common ninja build ninja test sudo ninja install Or on windows, cmake -G "Visual Studio 14 2015 Win64" ../aws-c-common msbuild.exe ALL_BUILD.vcproj ### CMake Options * -DCMAKE_CLANG_TIDY=/path/to/clang-tidy (or just clang-tidy or clang-tidy-7.0 if it is in your PATH) - Runs clang-tidy as part of your build. * -DENABLE_SANITIZERS=ON - Enables gcc/clang sanitizers, by default this adds -fsanitizer=address,undefined to the compile flags for projects that call aws_add_sanitizers. * -DENABLE_FUZZ_TESTS=ON - Includes fuzz tests in the unit test suite. Off by default, because fuzz tests can take a long time. Set -DFUZZ_TESTS_MAX_TIME=N to determine how long to run each fuzz test (default 60s). * -DCMAKE_INSTALL_PREFIX=/path/to/install - Standard way of installing to a user defined path. If specified when configuring aws-c-common, ensure the same prefix is specified when configuring other aws-c-* SDKs. * -DAWS_STATIC_MSVC_RUNTIME_LIBRARY=ON - Windows-only. Turn ON to use the statically-linked MSVC runtime lib, instead of the DLL. ### API style and conventions Every API has a specific set of styles and conventions. We'll outline them here. These conventions are followed in every library in the AWS C SDK ecosystem. #### Error handling Every function that returns an `int` type, returns `AWS_OP_SUCCESS` ( 0 ) or `AWS_OP_ERR` (-1) on failure. To retrieve the error code, use the function `aws_last_error()`. Each error code also has a corresponding error string that can be accessed via the `aws_error_str()` function. In addition, you can install both a global and a thread local error handler by using the `aws_set_global_error_handler_fn()` and `aws_set_thread_local_error_handler_fn()` functions. All error functions are in the `include/aws/common/error.h` header file. #### Naming Any function that allocates and initializes an object will be suffixed with `new` (e.g. `aws_myobj_new()`). Similarly, these objects will always have a corresponding function with a `destroy` suffix. The `new` functions will return the allocated object on success and `NULL` on failure. To respond to the error, call `aws_last_error()`. If several `new` or `destroy` functions are available, the variants should be named like `new_x` or `destroy_x` (e.g. `aws_myobj_new_copy()` or `aws_myobj_destroy_secure()`). Any function that initializes an existing object will be suffixed with `init` (e.g. `aws_myobj_init()`. These objects will have a corresponding `clean_up` function if necessary. In these cases, you are responsible for making the decisions for how your object is allocated. The `init` functions return `AWS_OP_SUCCESS` ( 0 ) or `AWS_OP_ERR` (-1) on failure. If several `init` or `clean_up` functions are available, they should be named like `init_x` or `clean_up_x` (e.g. `aws_myobj_init_static()` or `aws_myobj_clean_up_secure()`). ## Contributing If you are contributing to this code-base, first off, THANK YOU!. There are a few things to keep in mind to minimize the pull request turn around time. ### Coding "guidelines" These "guidelines" are followed in every library in the AWS C SDK ecosystem. #### Memory Management * All APIs that need to be able to allocate memory, must take an instance of `aws_allocator` and use that. No `malloc()` or `free()` calls should be made directly. * If an API does not allocate the memory, it does not free it. All allocations and deallocations should take place at the same level. For example, if a user allocates memory, the user is responsible for freeing it. There will inevitably be a few exceptions to this rule, but they will need significant justification to make it through the code-review. * All functions that allocate memory must raise an `AWS_ERROR_OOM` error code upon allocation failures. If it is a `new()` function it should return NULL. If it is an `init()` function, it should return `AWS_OP_ERR`. #### Threading * Occasionally a thread is necessary. In those cases, prefer for memory not to be shared between threads. If memory must cross a thread barrier it should be a complete ownership hand-off. Bias towards, "if I need a mutex, I'm doing it wrong". * Do not sleep or block .... ever .... under any circumstances, in non-test-code. * Do not expose blocking APIs. ### Error Handling * For APIs returning an `int` error code. The only acceptable return types are `AWS_OP_SUCCESS` and `AWS_OP_ERR`. Before returning control to the caller, if you have an error to raise, use the `aws_raise_error()` function. * For APIs returning an allocated instance of an object, return the memory on success, and `NULL` on failure. Before returning control to the caller, if you have an error to raise, use the `aws_raise_error()` function. #### Log Subjects & Error Codes The logging & error handling infrastructure is designed to support multiple libraries. For this to work, AWS maintained libraries have pre-slotted log subjects & error codes for each library. The currently allocated ranges are: | Range | Library Name | | --- | --- | | [0x0000, 0x0400) | aws-c-common | | [0x0400, 0x0800) | aws-c-io | | [0x0800, 0x0C00) | aws-c-http | | [0x0C00, 0x1000) | aws-c-compression | | [0x1000, 0x1400) | aws-c-eventstream | | [0x1400, 0x1800) | aws-c-mqtt | | [0x1800, 0x1C00) | aws-c-auth | | [0x1C00, 0x2000) | aws-c-cal | | [0x2000, 0x2400) | aws-crt-cpp | | [0x2400, 0x2800) | aws-crt-java | | [0x2800, 0x2C00) | aws-crt-python | | [0x2C00, 0x3000) | aws-crt-nodejs | | [0x3000, 0x3400) | aws-crt-dotnet | | [0x3400, 0x3800) | aws-c-iot | | [0x3800, 0x3C00) | aws-c-s3 | | [0x3C00, 0x4000) | aws-c-sdkutils | | [0x4000, 0x4400) | (reserved for future project) | | [0x4400, 0x4800) | (reserved for future project) | Each library should begin its error and log subject values at the beginning of its range and follow in sequence (don't skip codes). Upon adding an AWS maintained library, a new enum range must be approved and added to the above table. ### Testing We have a high bar for test coverage, and PRs fixing bugs or introducing new functionality need to have tests before they will be accepted. A couple of tips: #### Aws Test Harness We provide a test harness for writing unit tests. This includes an allocator that will fail your test if you have any memory leaks, as well as some `ASSERT` macros. To write a test: * Create a *.c test file in the tests directory of the project. * Implement one or more tests with the signature `int test_case_name(struct aws_allocator *, void *ctx)` * Use the `AWS_TEST_CASE` macro to declare the test. * Include your test in the `tests/main.c` file. * Include your test in the `tests/CMakeLists.txt` file. ### Coding Style * No Tabs. * Indent is 4 spaces. * K & R style for braces. * Space after if, before the `(`. * `else` and `else if` stay on the same line as the closing brace. Example: if (condition) { do_something(); } else { do_something_else(); } * Avoid C99 features in header files. For some types such as bool, uint32_t etc..., these are defined if not available for the language standard being used in `aws/common/common.h`, so feel free to use them. * For C++ compatibility, don't put const members in structs. * Avoid C++ style comments e.g. `//` in header files and prefer block style (`/* */`) for long blocks of text. C++ style comments are fine in C files. * All public API functions need C++ guards and Windows dll semantics. * Use Unix line endings. * Where implementation hiding is desired for either ABI or runtime polymorphism reasons, use the `void *impl` pattern. v-tables should be the last member in the struct. * For #ifdef, put a # as the first character on the line and then indent the compilation branches. Example: #ifdef FOO do_something(); # ifdef BAR do_something_else(); # endif #endif * For all error code names with the exception of aws-c-common, use `AWS_ERROR__`. * All error strings should be written using correct English grammar. * SNAKE_UPPER_CASE constants, macros, and enum members. * snake_lower_case everything else. * `static` (local file scope) variables that are not `const` are prefixed by `s_` and lower snake case. * Global variables not prefixed as `const` are prefixed by `g_` and lower snake case. * Thread local variables are prefixed as `tl_` and lower snake case. * Macros and `const` variables are upper snake case. * For constants, prefer anonymous enums. * Don't typedef structs. It breaks forward declaration ability. * Don't typedef enums. It breaks forward declaration ability. * typedef function definitions for use as function pointers as values and suffixed with _fn. Do this: typedef int(fn_name_fn)(void *); Not this: typedef int(*fn_name_fn)(void *); * If a callback may be async, then always have it be async. Callbacks that are sometimes async and sometimes sync are hard to code around and lead to bugs (see [this blog post](https://blog.ometer.com/2011/07/24/callbacks-synchronous-and-asynchronous/)). Unfortunately many callbacks in this codebase currently violate this rule, so be careful. But do not add any more. * Every source and header file must have a copyright header (The standard AWS one for apache 2). * Use standard include guards (e.g. #IFNDEF HEADER_NAME #define HEADER_NAME etc...). * Include order should be: the header for the translation unit for the .c file newline header files in a directory in alphabetical order newline header files not in a directory (system and stdlib headers) * Platform specifics should be handled in c files and partitioned by directory. * Do not use `extern inline`. It's too unpredictable between compiler versions and language standards. * Namespace all definitions in header files with `aws_?__`. Lib name is not always required if a conflict is not likely and it provides better ergonomics. * `init`, `clean_up`, `new`, `destroy` are suffixed to the function names for their object. Example: AWS_COMMON_API int aws_module_init(aws_module_t *module); AWS_COMMON_API void aws_module_clean_up(aws_module_t *module); AWS_COMMON_API aws_module_t *aws_module_new(aws_allocator_t *allocator); AWS_COMMON_API void aws_module_destroy(aws_module_t *module); * Avoid c-strings, and don't write code that depends on `NULL` terminators. Expose `struct aws_byte_buf` APIs and let the user figure it out. * There is only one valid character encoding-- UTF-8. Try not to ever need to care about character encodings, but where you do, the working assumption should always be UTF-8 unless it's something we don't get a choice in (e.g. a protocol explicitly mandates a character set). * If you are adding/using a compiler specific keyword, macro, or intrinsic, hide it behind a platform independent macro definition. This mainly applies to header files. Obviously, if you are writing a file that will only be built on a certain platform, you have more liberty on this. * When checking more than one error condition, check and log each condition separately with a unique message. Do this: if (options->callback == NULL) { AWS_LOGF_ERROR(AWS_LS_SOME_SUBJECT, "Invalid options - callback is null"); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } if (options->allocator == NULL) { AWS_LOGF_ERROR(AWS_LS_SOME_SUBJECT, "Invalid options - allocator is null"); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } Not this: if (options->callback == NULL || options->allocator == NULL) { AWS_LOGF_ERROR(AWS_LS_SOME_SUBJECT, "Invalid options - something is null"); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } ## CBMC To learn more about CBMC and proofs specifically, review the training material [here](https://model-checking.github.io/cbmc-training). The `verification/cbmc/proofs` directory contains CBMC proofs. In order to run these proofs you will need to install CBMC and other tools by following the instructions [here](https://model-checking.github.io/cbmc-training/installation.html). aws-crt-python-0.20.4+dfsg/crt/aws-c-common/THIRD-PARTY-LICENSES.txt000066400000000000000000000055651456575232400242460ustar00rootroot00000000000000** ittapi ittnotify.h; version v3.24.2 -- https://github.com/intel/ittapi/blob/master/include/ittnotify.h Copyright (C) 2005-2019 Intel Corporation SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause Amazon.com has chosen to use this file under the terms of the BSD-3-Clause license. Copyright (c) 2019 Intel Corporation. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ------ ** cJSON; version 1.7.16 -- https://github.com/DaveGamble/cJSON Copyright (c) 2009-2017 Dave Gamble and cJSON contributors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. aws-crt-python-0.20.4+dfsg/crt/aws-c-common/bin/000077500000000000000000000000001456575232400212705ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/bin/system_info/000077500000000000000000000000001456575232400236275ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/bin/system_info/CMakeLists.txt000066400000000000000000000007441456575232400263740ustar00rootroot00000000000000project(print-sys-info C) list(APPEND CMAKE_MODULE_PATH "${CMAKE_INSTALL_PREFIX}/lib/cmake") file(GLOB SI_SRC "*.c" ) set(SI_PROJECT_NAME print-sys-info) add_executable(${SI_PROJECT_NAME} ${SI_SRC}) aws_set_common_properties(${SI_PROJECT_NAME}) target_include_directories(${SI_PROJECT_NAME} PUBLIC $ $) target_link_libraries(${SI_PROJECT_NAME} PRIVATE aws-c-common) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/bin/system_info/print_system_info.c000066400000000000000000000032211456575232400275440ustar00rootroot00000000000000 #include #include #include int main(void) { struct aws_allocator *allocator = aws_default_allocator(); aws_common_library_init(allocator); struct aws_logger_standard_options options = { .file = stderr, .level = AWS_LOG_LEVEL_TRACE, }; struct aws_logger logger; aws_logger_init_standard(&logger, allocator, &options); aws_logger_set(&logger); struct aws_system_environment *env = aws_system_environment_load(allocator); fprintf(stdout, "crt-detected env: {\n"); struct aws_byte_cursor virtualization_vendor = aws_system_environment_get_virtualization_vendor(env); fprintf( stdout, " 'virtualization vendor': '" PRInSTR "',\n", (int)virtualization_vendor.len, virtualization_vendor.ptr); struct aws_byte_cursor product_name = aws_system_environment_get_virtualization_product_name(env); fprintf(stdout, " 'product name': '" PRInSTR "',\n", (int)product_name.len, product_name.ptr); fprintf( stdout, " 'number of processors': '%lu',\n", (unsigned long)aws_system_environment_get_processor_count(env)); size_t numa_nodes = aws_system_environment_get_cpu_group_count(env); if (numa_nodes > 1) { fprintf(stdout, " 'numa architecture': 'true',\n"); fprintf(stdout, " 'number of numa nodes': '%lu'\n", (unsigned long)numa_nodes); } else { fprintf(stdout, " 'numa architecture': 'false'\n"); } fprintf(stdout, "}\n"); aws_system_environment_release(env); aws_logger_clean_up(&logger); aws_common_library_clean_up(); return 0; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/builder.json000066400000000000000000000010621456575232400230400ustar00rootroot00000000000000{ "name": "aws-c-common", "upstream": [], "downstream": [ { "name": "aws-checksums" }, { "name": "aws-c-cal" }, { "name": "aws-c-io" }, { "name": "aws-c-event-stream" }, { "name": "aws-c-compression" }, { "name": "aws-c-sdkutils" }, { "name": "aws-c-mqtt" }, { "name": "aws-c-http" }, { "name": "aws-c-auth" } ], "targets": { "windows": { "+build_steps": [ "build", ["dir", "/s", "/b"] ] } } } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/cmake/000077500000000000000000000000001456575232400216005ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/cmake/AwsCFlags.cmake000066400000000000000000000255731456575232400244300ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. include(CheckCCompilerFlag) include(CheckIncludeFile) include(CheckSymbolExists) include(CMakeParseArguments) # needed for CMake v3.4 and lower option(AWS_ENABLE_LTO "Enables LTO on libraries. Ensure this is set on all consumed targets, or linking will fail" OFF) option(LEGACY_COMPILER_SUPPORT "This enables builds with compiler versions such as gcc 4.1.2. This is not a 'supported' feature; it's just a best effort." OFF) option(AWS_SUPPORT_WIN7 "Restricts WINAPI calls to Win7 and older (This will have implications in downstream libraries that use TLS especially)" OFF) option(AWS_WARNINGS_ARE_ERRORS "Compiler warning is treated as an error. Try turning this off when observing errors on a new or uncommon compiler" OFF) option(AWS_ENABLE_TRACING "Enable tracing macros" OFF) option(AWS_STATIC_MSVC_RUNTIME_LIBRARY "Windows-only. Turn ON to use the statically-linked MSVC runtime lib, instead of the DLL" OFF) option(STATIC_CRT "Deprecated. Use AWS_STATIC_MSVC_RUNTIME_LIBRARY instead" OFF) # Check for Posix Large Files Support (LFS). # On most 64bit systems, LFS is enabled by default. # On some 32bit systems, LFS must be enabled by via defines before headers are included. # For more info, see docs: # https://www.gnu.org/software/libc/manual/html_node/File-Position-Primitive.html # https://www.gnu.org/software/libc/manual/html_node/Feature-Test-Macros.html function(aws_check_posix_lfs extra_flags variable) list(APPEND CMAKE_REQUIRED_FLAGS ${extra_flags}) check_c_source_compiles(" #include /* fails to compile if off_t smaller than 64bits */ typedef char array[sizeof(off_t) >= 8 ? 1 : -1]; int main() { return 0; }" HAS_64BIT_FILE_OFFSET_${variable}) if (HAS_64BIT_FILE_OFFSET_${variable}) # sometimes off_t is 64bit, but fseeko() is missing (ex: Android API < 24) check_symbol_exists(fseeko "stdio.h" HAS_FSEEKO_${variable}) if (HAS_FSEEKO_${variable}) set(${variable} 1 PARENT_SCOPE) endif() endif() endfunction() # This function will set all common flags on a target # Options: # NO_WGNU: Disable -Wgnu # NO_WEXTRA: Disable -Wextra # NO_PEDANTIC: Disable -pedantic function(aws_set_common_properties target) set(options NO_WGNU NO_WEXTRA NO_PEDANTIC NO_LTO) cmake_parse_arguments(SET_PROPERTIES "${options}" "" "" ${ARGN}) if(MSVC) # Remove other /W flags if(CMAKE_C_FLAGS MATCHES "/W[0-4]") string(REGEX REPLACE "/W[0-4]" "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS}" PARENT_SCOPE) endif() list(APPEND AWS_C_FLAGS /W4 /MP) if(AWS_WARNINGS_ARE_ERRORS) list(APPEND AWS_C_FLAGS /WX) endif() # /volatile:iso relaxes some implicit memory barriers that MSVC normally applies for volatile accesses # Since we want to be compatible with user builds using /volatile:iso, use it for the tests. list(APPEND AWS_C_FLAGS /volatile:iso) # disable non-constant initializer warning, it's not non-standard, just for Microsoft list(APPEND AWS_C_FLAGS /wd4204) # disable passing the address of a local warning. Again, not non-standard, just for Microsoft list(APPEND AWS_C_FLAGS /wd4221) if (AWS_SUPPORT_WIN7) # Use only APIs available in Win7 and later message(STATUS "Windows 7 support requested, forcing WINVER and _WIN32_WINNT to 0x0601") list(APPEND AWS_C_FLAGS /DWINVER=0x0601) list(APPEND AWS_C_FLAGS /D_WIN32_WINNT=0x0601) list(APPEND AWS_C_FLAGS /DAWS_SUPPORT_WIN7=1) endif() # Set MSVC runtime libary. # Note: there are other ways of doing this if we bump our CMake minimum to 3.14+ # See: https://cmake.org/cmake/help/latest/policy/CMP0091.html if (AWS_STATIC_MSVC_RUNTIME_LIBRARY OR STATIC_CRT) list(APPEND AWS_C_FLAGS "/MT$<$:d>") else() list(APPEND AWS_C_FLAGS "/MD$<$:d>") endif() else() list(APPEND AWS_C_FLAGS -Wall -Wstrict-prototypes) list(APPEND AWS_C_FLAGS $<$>:-fno-omit-frame-pointer>) if(AWS_WARNINGS_ARE_ERRORS) list(APPEND AWS_C_FLAGS -Werror) endif() if(NOT SET_PROPERTIES_NO_WEXTRA) list(APPEND AWS_C_FLAGS -Wextra) endif() if(NOT SET_PROPERTIES_NO_PEDANTIC) list(APPEND AWS_C_FLAGS -pedantic) endif() # Warning disables always go last to avoid future flags re-enabling them list(APPEND AWS_C_FLAGS -Wno-long-long) # Always enable position independent code, since this code will always end up in a shared lib check_c_compiler_flag("-fPIC -Werror" HAS_FPIC_FLAG) if (HAS_FPIC_FLAG) list(APPEND AWS_C_FLAGS -fPIC) endif() if (LEGACY_COMPILER_SUPPORT) list(APPEND AWS_C_FLAGS -Wno-strict-aliasing) endif() # -moutline-atomics generates code for both older load/store exclusive atomics and also # Arm's Large System Extensions (LSE) which scale substantially better on large core count systems. # # Test by compiling a program that actually uses atomics. # Previously we'd simply used check_c_compiler_flag() but that wasn't detecting # some real-world problems (see https://github.com/awslabs/aws-c-common/issues/902). if (AWS_ARCH_ARM64) set(old_flags "${CMAKE_REQUIRED_FLAGS}") set(CMAKE_REQUIRED_FLAGS "-moutline-atomics -Werror") check_c_source_compiles(" int main() { int x = 1; __atomic_fetch_add(&x, -1, __ATOMIC_SEQ_CST); return x; }" HAS_MOUTLINE_ATOMICS) set(CMAKE_REQUIRED_FLAGS "${old_flags}") if (HAS_MOUTLINE_ATOMICS) list(APPEND AWS_C_FLAGS -moutline-atomics) endif() endif() # Check for Posix Large File Support (LFS). # Doing this check here, instead of AwsFeatureTests.cmake, # because we might need to modify AWS_C_FLAGS to enable it. set(HAS_LFS FALSE) aws_check_posix_lfs("" BY_DEFAULT) if (BY_DEFAULT) set(HAS_LFS TRUE) else() aws_check_posix_lfs("-D_FILE_OFFSET_BITS=64" VIA_DEFINES) if (VIA_DEFINES) list(APPEND AWS_C_FLAGS "-D_FILE_OFFSET_BITS=64") set(HAS_LFS TRUE) endif() endif() # This becomes a define in config.h set(AWS_HAVE_POSIX_LARGE_FILE_SUPPORT ${HAS_LFS} CACHE BOOL "Posix Large File Support") # Hide symbols from libcrypto.a # This avoids problems when an application ends up using both libcrypto.a and libcrypto.so. # # An example of this happening is the aws-c-io tests. # All the C libs are compiled statically, but then a PKCS#11 library is # loaded at runtime which happens to use libcrypto.so from OpenSSL. # If the symbols from libcrypto.a aren't hidden, then SOME function calls use the libcrypto.a implementation # and SOME function calls use the libcrypto.so implementation, and this mismatch leads to weird crashes. if (UNIX AND NOT APPLE) # If we used target_link_options() (CMake 3.13+) we could make these flags PUBLIC set_property(TARGET ${target} APPEND_STRING PROPERTY LINK_FLAGS " -Wl,--exclude-libs,libcrypto.a") endif() endif() check_include_file(stdint.h HAS_STDINT) check_include_file(stdbool.h HAS_STDBOOL) if (NOT HAS_STDINT) list(APPEND AWS_C_FLAGS -DNO_STDINT) endif() if (NOT HAS_STDBOOL) list(APPEND AWS_C_FLAGS -DNO_STDBOOL) endif() if(NOT SET_PROPERTIES_NO_WGNU) check_c_compiler_flag("-Wgnu -Werror" HAS_WGNU) if(HAS_WGNU) # -Wgnu-zero-variadic-macro-arguments results in a lot of false positives list(APPEND AWS_C_FLAGS -Wgnu -Wno-gnu-zero-variadic-macro-arguments) # some platforms implement htonl family of functions via GNU statement expressions (https://gcc.gnu.org/onlinedocs/gcc/Statement-Exprs.html) # which generates -Wgnu-statement-expression warning. set(old_flags "${CMAKE_REQUIRED_FLAGS}") set(CMAKE_REQUIRED_FLAGS "-Wgnu -Werror") check_c_source_compiles(" #include int main() { uint32_t x = 0; x = htonl(x); return (int)x; }" NO_GNU_EXPR) set(CMAKE_REQUIRED_FLAGS "${old_flags}") if (NOT NO_GNU_EXPR) list(APPEND AWS_C_FLAGS -Wno-gnu-statement-expression) endif() endif() endif() # some platforms (especially when cross-compiling) do not have the sysconf API in their toolchain files. check_c_source_compiles(" #include int main() { sysconf(_SC_NPROCESSORS_ONLN); }" HAVE_SYSCONF) if (HAVE_SYSCONF) list(APPEND AWS_C_DEFINES_PRIVATE -DHAVE_SYSCONF) endif() list(APPEND AWS_C_DEFINES_PRIVATE $<$:DEBUG_BUILD>) if ((NOT SET_PROPERTIES_NO_LTO) AND AWS_ENABLE_LTO) # enable except in Debug builds set(_ENABLE_LTO_EXPR $>) # try to check whether compiler supports LTO/IPO if (POLICY CMP0069) cmake_policy(SET CMP0069 NEW) include(CheckIPOSupported OPTIONAL RESULT_VARIABLE ipo_check_exists) if (ipo_check_exists) check_ipo_supported(RESULT ipo_supported) if (ipo_supported) message(STATUS "Enabling IPO/LTO for Release builds") else() message(STATUS "AWS_ENABLE_LTO is enabled, but cmake/compiler does not support it, disabling") set(_ENABLE_LTO_EXPR OFF) endif() endif() endif() else() set(_ENABLE_LTO_EXPR OFF) endif() if(BUILD_SHARED_LIBS) if (NOT MSVC) # this should only be set when building shared libs. list(APPEND AWS_C_FLAGS "-fvisibility=hidden") endif() endif() if(AWS_ENABLE_TRACING) target_link_libraries(${target} PRIVATE ittnotify) else() # Disable intel notify api if tracing is not enabled list(APPEND AWS_C_DEFINES_PRIVATE -DINTEL_NO_ITTNOTIFY_API) endif() target_compile_options(${target} PRIVATE ${AWS_C_FLAGS}) target_compile_definitions(${target} PRIVATE ${AWS_C_DEFINES_PRIVATE} PUBLIC ${AWS_C_DEFINES_PUBLIC}) set_target_properties(${target} PROPERTIES LINKER_LANGUAGE C C_STANDARD 99 C_STANDARD_REQUIRED ON) set_target_properties(${target} PROPERTIES INTERPROCEDURAL_OPTIMIZATION ${_ENABLE_LTO_EXPR}>) endfunction() aws-crt-python-0.20.4+dfsg/crt/aws-c-common/cmake/AwsCRuntime.cmake000066400000000000000000000030611456575232400250030ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. # This function detects the local host's c runtime and writes a tag into the supplied output variable # Output is "cruntime" on non-Linux platforms. Output is "glibc" or "musl" on Linux platforms. # # Intended usage is for managed language CRTs to use this function to build native artifact paths, facilitating # support for alternative C runtimes like Musl. # function(aws_determine_local_c_runtime target) if (CMAKE_SYSTEM_NAME STREQUAL "Linux") execute_process(COMMAND "ldd" "--version" OUTPUT_VARIABLE AWS_LDD_OUTPUT ERROR_VARIABLE AWS_LDD_OUTPUT) string(TOLOWER "${AWS_LDD_OUTPUT}" AWS_LDD_OUTPUT_LOWER) message(STATUS "ldd output lower: ${AWS_LDD_OUTPUT_LOWER}") string(FIND "${AWS_LDD_OUTPUT_LOWER}" "musl" AWS_MUSL_INDEX) string(FIND "${AWS_LDD_OUTPUT_LOWER}" "glibc" AWS_GLIBC_INDEX) string(FIND "${AWS_LDD_OUTPUT_LOWER}" "gnu" AWS_GNU_INDEX) if (NOT(${AWS_MUSL_INDEX} EQUAL -1)) message(STATUS "MUSL libc detected") set(${target} "musl" PARENT_SCOPE) else() if ((NOT(${AWS_GLIBC_INDEX} EQUAL -1)) OR (NOT(${AWS_GNU_INDEX} EQUAL -1))) message(STATUS "Gnu libc detected") else() message(STATUS "Could not determine C runtime, defaulting to gnu libc") endif() set(${target} "glibc" PARENT_SCOPE) endif() else() set(${target} "cruntime" PARENT_SCOPE) endif() endfunction() aws-crt-python-0.20.4+dfsg/crt/aws-c-common/cmake/AwsCheckHeaders.cmake000066400000000000000000000101231456575232400255630ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. # This cmake logic verifies that each of our headers is complete, in that it # #includes any necessary dependencies, and that it builds under C++ as well. # # To do so, we generate a single-line C or C++ source file that includes each # header, and link all of these stub source files into a test executable. option(PERFORM_HEADER_CHECK "Performs compile-time checks that each header can be included independently. Requires a C++ compiler.") if (PERFORM_HEADER_CHECK) enable_language(CXX) endif() # Call as: aws_check_headers(${target} HEADERS TO CHECK LIST) function(aws_check_headers target) if (NOT PERFORM_HEADER_CHECK) return() endif() # parse function arguments set(options IS_CXX) cmake_parse_arguments(ARG "${options}" "" "" ${ARGN}) aws_check_headers_internal(${target} 11 ${ARG_IS_CXX} ${ARG_UNPARSED_ARGUMENTS}) aws_check_headers_internal(${target} 14 ${ARG_IS_CXX} ${ARG_UNPARSED_ARGUMENTS}) aws_check_headers_internal(${target} 17 ${ARG_IS_CXX} ${ARG_UNPARSED_ARGUMENTS}) aws_check_headers_internal(${target} 20 ${ARG_IS_CXX} ${ARG_UNPARSED_ARGUMENTS}) aws_check_headers_internal(${target} 23 ${ARG_IS_CXX} ${ARG_UNPARSED_ARGUMENTS}) endfunction() function(aws_check_headers_internal target std is_cxx) # Check that compiler supports this std list (FIND CMAKE_CXX_COMPILE_FEATURES "cxx_std_${std}" feature_idx) if (${feature_idx} LESS 0) return() endif() set(HEADER_CHECKER_ROOT "${CMAKE_CURRENT_BINARY_DIR}/header-checker-cxx${std}") # Write stub main file set(HEADER_CHECKER_MAIN "${HEADER_CHECKER_ROOT}/headerchecker_main.c") set(HEADER_CHECKER_LIB ${target}-header-check-cxx${std}) file(WRITE ${HEADER_CHECKER_MAIN} " int main(int argc, char **argv) { (void)argc; (void)argv; return 0; }\n") add_executable(${HEADER_CHECKER_LIB} ${HEADER_CHECKER_MAIN}) target_link_libraries(${HEADER_CHECKER_LIB} ${target}) target_compile_definitions(${HEADER_CHECKER_LIB} PRIVATE AWS_UNSTABLE_TESTING_API=1 AWS_HEADER_CHECKER=1) # We want to be able to verify that the proper C++ header guards are in place, so # build this target as a C++ application set_target_properties(${HEADER_CHECKER_LIB} PROPERTIES LINKER_LANGUAGE CXX CXX_STANDARD ${std} CXX_STANDARD_REQUIRED 0 C_STANDARD 99 ) # Ensure our headers can be included by an application with its warnings set very high if(MSVC) # MSVC complains about windows' own header files. Use /W4 instead of /Wall target_compile_options(${HEADER_CHECKER_LIB} PRIVATE /W4 /WX) else() target_compile_options(${HEADER_CHECKER_LIB} PRIVATE -Wall -Wextra -Wpedantic -Werror) endif() foreach(header IN LISTS ARGN) if (NOT ${header} MATCHES "\\.inl$") # create unique token for this file, e.g.: # "${CMAKE_CURRENT_SOURCE_DIR}/include/aws/common/byte_buf.h" -> "aws_common_byte_buf_h" file(RELATIVE_PATH include_path "${CMAKE_CURRENT_SOURCE_DIR}/include" ${header}) # replace non-alphanumeric characters with underscores string(REGEX REPLACE "[^a-zA-Z0-9]" "_" unique_token ${include_path}) set(c_file "${HEADER_CHECKER_ROOT}/headerchecker_${unique_token}.c") set(cpp_file "${HEADER_CHECKER_ROOT}/headerchecker_${unique_token}.cpp") # include header twice to check for include-guards # define a unique int or compiler complains that there's nothing in the file file(WRITE "${cpp_file}" "#include <${include_path}>\n#include <${include_path}>\nint ${unique_token}_cpp;") target_sources(${HEADER_CHECKER_LIB} PUBLIC "${cpp_file}") if (NOT is_cxx) file(WRITE "${c_file}" "#include <${include_path}>\n#include <${include_path}>\nint ${unique_token}_c;\n") target_sources(${HEADER_CHECKER_LIB} PUBLIC "${c_file}") endif() endif() endforeach(header) endfunction() aws-crt-python-0.20.4+dfsg/crt/aws-c-common/cmake/AwsFeatureTests.cmake000066400000000000000000000061041456575232400256740ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. include(CheckCSourceRuns) include(AwsCFlags) option(USE_CPU_EXTENSIONS "Whenever possible, use functions optimized for CPUs with specific extensions (ex: SSE, AVX)." ON) # In the current (11/2/21) state of mingw64, the packaged gcc is not capable of emitting properly aligned avx2 instructions under certain circumstances. # This leads to crashes for windows builds using mingw64 when invoking the avx2-enabled versions of certain functions. Until we can find a better # work-around, disable avx2 (and all other extensions) in mingw builds. # # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=54412 # if(MINGW) message(STATUS "MINGW detected! Disabling avx2 and other CPU extensions") set(USE_CPU_EXTENSIONS OFF) endif() if(NOT CMAKE_CROSSCOMPILING) check_c_source_runs(" #include bool foo(int a, int b, int *c) { return __builtin_mul_overflow(a, b, c); } int main() { int out; if (foo(1, 2, &out)) { return 0; } return 0; }" AWS_HAVE_GCC_OVERFLOW_MATH_EXTENSIONS) endif() check_c_source_compiles(" #include #if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP) int main() { return 0; } #else it's not windows desktop #endif " AWS_HAVE_WINAPI_DESKTOP) check_c_source_compiles(" int main() { #if !(defined(__x86_64__) || defined(__i386__) || defined(_M_X64) || defined(_M_IX86)) # error \"not intel\" #endif return 0; } " AWS_ARCH_INTEL) check_c_source_compiles(" int main() { #if !(defined(__aarch64__) || defined(_M_ARM64)) # error \"not arm64\" #endif return 0; } " AWS_ARCH_ARM64) check_c_source_compiles(" int main() { #if !(defined(__arm__) || defined(_M_ARM)) # error \"not arm\" #endif return 0; } " AWS_ARCH_ARM32) check_c_source_compiles(" int main() { int foo = 42, bar = 24; __asm__ __volatile__(\"\":\"=r\"(foo):\"r\"(bar):\"memory\"); }" AWS_HAVE_GCC_INLINE_ASM) check_c_source_compiles(" #include int main() { #ifdef __linux__ getauxval(AT_HWCAP); getauxval(AT_HWCAP2); #endif return 0; }" AWS_HAVE_AUXV) string(REGEX MATCH "^(aarch64|arm)" ARM_CPU "${CMAKE_SYSTEM_PROCESSOR}") if(NOT LEGACY_COMPILER_SUPPORT OR ARM_CPU) check_c_source_compiles(" #include #include int main() { backtrace(NULL, 0); return 0; }" AWS_HAVE_EXECINFO) endif() check_c_source_compiles(" #include int main() { return 1; }" AWS_HAVE_LINUX_IF_LINK_H) if(MSVC) check_c_source_compiles(" #include int main() { unsigned __int64 a = 0x0fffffffffffffffI64; unsigned __int64 b = 0xf0000000I64; unsigned __int64 c, d; d = _umul128(a, b, &c); return 0; }" AWS_HAVE_MSVC_INTRINSICS_X64) endif() # This does a lot to detect when intrinsics are available and has to set cflags to do so. # leave it in its own file for ease of managing it. include(AwsSIMD) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/cmake/AwsFindPackage.cmake000066400000000000000000000023761456575232400254210ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. option(IN_SOURCE_BUILD "If the CRT libs are being built from your source tree (add_subdirectory), set this to ON" OFF) # This function handles dependency list building based on if traditional CMAKE modules via. find_package should be # used, vs if this is an in source build via. something like git submodules and add_subdirectory. # This is largely because CMake was not well planned out, and as a result, in-source and modules don't play well # together. Only use this on CRT libraries (including S2N), libcrypto will stay as an assumed external dependency. # # package_name: is the name of the package to find # DEP_AWS_LIBS: output variable will be appended after each call to this function. You don't have to use it, # but it can be passed directly target_link_libraries and it will be the properly qualified library # name and namespace based on configuration. function(aws_use_package package_name) if (IN_SOURCE_BUILD) set(DEP_AWS_LIBS ${DEP_AWS_LIBS} ${package_name} PARENT_SCOPE) else() find_package(${package_name} REQUIRED) set(DEP_AWS_LIBS ${DEP_AWS_LIBS} AWS::${package_name} PARENT_SCOPE) endif() endfunction() aws-crt-python-0.20.4+dfsg/crt/aws-c-common/cmake/AwsLibFuzzer.cmake000066400000000000000000000042111456575232400251670ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. include(CTest) include(AwsSanitizers) option(ENABLE_FUZZ_TESTS "Build and run fuzz tests" OFF) set(FUZZ_TESTS_MAX_TIME 60 CACHE STRING "Max time to run each fuzz test") # Adds fuzz tests to ctest # Options: # fuzz_files: The list of fuzz test files # other_files: Other files to link into each fuzz test # corpus_dir: directory where corpus files can be found function(aws_add_fuzz_tests fuzz_files other_files corpus_dir) if(ENABLE_FUZZ_TESTS) if(NOT ENABLE_SANITIZERS) message(FATAL_ERROR "ENABLE_FUZZ_TESTS is set but ENABLE_SANITIZERS is set to OFF") endif() aws_check_sanitizer(fuzzer) if (NOT HAS_SANITIZER_fuzzer) message(FATAL_ERROR "ENABLE_FUZZ_TESTS is set but the current compiler (${CMAKE_CXX_COMPILER_ID}) doesn't support -fsanitize=fuzzer") endif() foreach(test_file ${fuzz_files}) get_filename_component(TEST_FILE_NAME ${test_file} NAME_WE) set(FUZZ_BINARY_NAME ${PROJECT_NAME}-fuzz-${TEST_FILE_NAME}) add_executable(${FUZZ_BINARY_NAME} ${test_file} ${other_files}) target_link_libraries(${FUZZ_BINARY_NAME} PRIVATE ${PROJECT_NAME}) aws_set_common_properties(${FUZZ_BINARY_NAME}) aws_add_sanitizers(${FUZZ_BINARY_NAME} SANITIZERS "fuzzer") target_compile_definitions(${FUZZ_BINARY_NAME} PRIVATE AWS_UNSTABLE_TESTING_API=1) target_include_directories(${FUZZ_BINARY_NAME} PRIVATE ${CMAKE_CURRENT_LIST_DIR}) if (corpus_dir) file(TO_NATIVE_PATH "${corpus_dir}/${TEST_FILE_NAME}" TEST_CORPUS_DIR) endif() if (TEST_CORPUS_DIR AND (EXISTS "${TEST_CORPUS_DIR}")) add_test(NAME fuzz_${TEST_FILE_NAME} COMMAND ${FUZZ_BINARY_NAME} -timeout=1 -max_total_time=${FUZZ_TESTS_MAX_TIME} "${TEST_CORPUS_DIR}") else() add_test(NAME fuzz_${TEST_FILE_NAME} COMMAND ${FUZZ_BINARY_NAME} -timeout=1 -max_total_time=${FUZZ_TESTS_MAX_TIME}) endif() endforeach() endif() endfunction() aws-crt-python-0.20.4+dfsg/crt/aws-c-common/cmake/AwsSIMD.cmake000066400000000000000000000052461456575232400240200ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. include(CheckCCompilerFlag) include(CheckIncludeFile) if (USE_CPU_EXTENSIONS) if (MSVC) check_c_compiler_flag("/arch:AVX2" HAVE_M_AVX2_FLAG) if (HAVE_M_AVX2_FLAG) set(AVX_CFLAGS "/arch:AVX2") endif() else() check_c_compiler_flag(-mavx2 HAVE_M_AVX2_FLAG) if (HAVE_M_AVX2_FLAG) set(AVX_CFLAGS "-mavx -mavx2") endif() endif() if (MSVC) check_c_compiler_flag("/arch:AVX512" HAVE_M_AVX512_FLAG) if (HAVE_M_AVX512_FLAG) # docs imply AVX512 brings in AVX2. And it will compile, but it will break at runtime on # instructions such as _mm256_load_si256(). Leave it on. set(AVX_CFLAGS "/arch:AVX512 /arch:AVX2") endif() else() check_c_compiler_flag("-mavx512f -mvpclmulqdq" HAVE_M_AVX512_FLAG) if (HAVE_M_AVX512_FLAG) set(AVX_CFLAGS "-mavx512f -mvpclmulqdq -mpclmul -mavx -mavx2 -msse4.2") endif() endif() set(old_flags "${CMAKE_REQUIRED_FLAGS}") set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} ${AVX_CFLAGS}") check_c_source_compiles(" #include #include #include int main() { __m256i vec; memset(&vec, 0, sizeof(vec)); _mm256_shuffle_epi8(vec, vec); _mm256_set_epi32(1,2,3,4,5,6,7,8); _mm256_permutevar8x32_epi32(vec, vec); return 0; }" AWS_HAVE_AVX2_INTRINSICS) check_c_source_compiles(" #include int main() { __m512 a = _mm512_setzero_ps(); return 0; }" AWS_HAVE_AVX512_INTRINSICS) check_c_source_compiles(" #include #include int main() { __m256i vec; memset(&vec, 0, sizeof(vec)); return (int)_mm256_extract_epi64(vec, 2); }" AWS_HAVE_MM256_EXTRACT_EPI64) set(CMAKE_REQUIRED_FLAGS "${old_flags}") endif() # USE_CPU_EXTENSIONS # The part where the definition is added to the compiler flags has been moved to config.h.in # see git history for more details. # Adds AVX flags, if any, that are supported. These files will be built with # available avx intrinsics enabled. # Usage: simd_add_source_avx(target file1.c file2.c ...) function(simd_add_source_avx target) foreach(file ${ARGN}) target_sources(${target} PRIVATE ${file}) set_source_files_properties(${file} PROPERTIES COMPILE_FLAGS "${AVX_CFLAGS}") endforeach() endfunction(simd_add_source_avx) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/cmake/AwsSanitizers.cmake000066400000000000000000000053361456575232400254170ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. include(CheckCCompilerFlag) option(ENABLE_SANITIZERS "Enable sanitizers in debug builds" OFF) set(SANITIZERS "address;undefined" CACHE STRING "List of sanitizers to build with") # This function checks if a sanitizer is available # Options: # sanitizer: The sanitizer to check # out_variable: The variable to assign the result to. Defaults to HAS_SANITIZER_${sanitizer} function(aws_check_sanitizer sanitizer) if(NOT ${ARGN}) set(out_variable "${ARGN}") else() set(out_variable HAS_SANITIZER_${sanitizer}) # Sanitize the variable name to remove illegal characters string(MAKE_C_IDENTIFIER ${out_variable} out_variable) endif() if(ENABLE_SANITIZERS) # When testing for libfuzzer, if attempting to link there will be 2 mains if(${sanitizer} STREQUAL "fuzzer") set(sanitizer_test_flag -fsanitize=fuzzer-no-link) else() set(sanitizer_test_flag -fsanitize=${sanitizer}) endif() # Need to set this here so that the flag is passed to the linker set(CMAKE_REQUIRED_FLAGS ${sanitizer_test_flag}) check_c_compiler_flag(${sanitizer_test_flag} ${out_variable}) else() set(${out_variable} 0 PARENT_SCOPE) endif() endfunction() # This function enables sanitizers on the given target # Options: # SANITIZERS: The list of extra sanitizers to enable function(aws_add_sanitizers target) set(multiValueArgs SANITIZERS) cmake_parse_arguments(SANITIZER "" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) if (NOT ENABLE_SANITIZERS) return() endif() list(APPEND SANITIZER_SANITIZERS ${SANITIZERS}) foreach(sanitizer IN LISTS SANITIZER_SANITIZERS) set(sanitizer_variable HAS_SANITIZER_${sanitizer}) # Sanitize the variable name to remove illegal characters string(MAKE_C_IDENTIFIER ${sanitizer_variable} sanitizer_variable) aws_check_sanitizer(${sanitizer} ${sanitizer_variable}) if(${${sanitizer_variable}}) if (NOT "${PRESENT_SANITIZERS}" STREQUAL "") set(PRESENT_SANITIZERS "${PRESENT_SANITIZERS},") endif() set(PRESENT_SANITIZERS "${PRESENT_SANITIZERS}${sanitizer}") endif() endforeach() if(PRESENT_SANITIZERS) target_compile_options(${target} PRIVATE -fno-omit-frame-pointer -fsanitize=${PRESENT_SANITIZERS}) target_link_libraries(${target} PUBLIC "-fno-omit-frame-pointer -fsanitize=${PRESENT_SANITIZERS}") string(REPLACE "," ";" PRESENT_SANITIZERS "${PRESENT_SANITIZERS}") set(${target}_SANITIZERS ${PRESENT_SANITIZERS} PARENT_SCOPE) endif() endfunction() aws-crt-python-0.20.4+dfsg/crt/aws-c-common/cmake/AwsSharedLibSetup.cmake000066400000000000000000000055561456575232400261460ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. set(LIBRARY_DIRECTORY lib) set(RUNTIME_DIRECTORY bin) # Set the default lib installation path on GNU systems with GNUInstallDirs if (UNIX AND NOT APPLE) include(GNUInstallDirs) set(LIBRARY_DIRECTORY ${CMAKE_INSTALL_LIBDIR}) set(RUNTIME_DIRECTORY ${CMAKE_INSTALL_BINDIR}) # this is the absolute dumbest thing in the world, but find_package won't work without it # also I verified this is correctly NOT "lib64" when CMAKE_C_FLAGS includes "-m32" if (${LIBRARY_DIRECTORY} STREQUAL "lib64") set(FIND_LIBRARY_USE_LIB64_PATHS true) endif() endif() function(aws_prepare_shared_lib_exports target) if (BUILD_SHARED_LIBS) install(TARGETS ${target} EXPORT ${target}-targets ARCHIVE DESTINATION ${LIBRARY_DIRECTORY} COMPONENT Development LIBRARY DESTINATION ${LIBRARY_DIRECTORY} NAMELINK_SKIP COMPONENT Runtime RUNTIME DESTINATION ${RUNTIME_DIRECTORY} COMPONENT Runtime) install(TARGETS ${target} EXPORT ${target}-targets LIBRARY DESTINATION ${LIBRARY_DIRECTORY} NAMELINK_ONLY COMPONENT Development) else() install(TARGETS ${target} EXPORT ${target}-targets ARCHIVE DESTINATION ${LIBRARY_DIRECTORY} COMPONENT Development) endif() endfunction() function(aws_prepare_symbol_visibility_args target lib_prefix) if (BUILD_SHARED_LIBS) target_compile_definitions(${target} PUBLIC "-D${lib_prefix}_USE_IMPORT_EXPORT") target_compile_definitions(${target} PRIVATE "-D${lib_prefix}_EXPORTS") endif() endfunction() # Strips debug info from the target shared library or executable, and puts it in a $.dbg # archive, then links the original binary to the dbg archive so gdb will find it # This only applies to Unix shared libs and executables, windows has pdbs. # This is only done on Release and RelWithDebInfo build types function(aws_split_debug_info target) if (UNIX AND CMAKE_BUILD_TYPE MATCHES Rel AND CMAKE_STRIP AND CMAKE_OBJCOPY) get_target_property(target_type ${target} TYPE) if (target_type STREQUAL "SHARED_LIBRARY" OR target_type STREQUAL "EXECUTABLE") add_custom_command(TARGET ${target} POST_BUILD COMMAND ${CMAKE_OBJCOPY} --only-keep-debug $ $.dbg COMMAND ${CMAKE_STRIP} --strip-debug --strip-unneeded $ COMMAND ${CMAKE_OBJCOPY} --add-gnu-debuglink=$.dbg $) endif() endif() endfunction() aws-crt-python-0.20.4+dfsg/crt/aws-c-common/cmake/AwsTestHarness.cmake000066400000000000000000000076651456575232400255360ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. include(AwsCFlags) include(AwsSanitizers) option(ENABLE_NET_TESTS "Run tests requiring an internet connection." ON) # Maintain a global list of AWS_TEST_CASES define_property(GLOBAL PROPERTY AWS_TEST_CASES BRIEF_DOCS "Test Cases" FULL_DOCS "Test Cases") set(AWS_TEST_CASES "" CACHE INTERNAL "Test cases valid for this configuration") # The return value for the skipped test cases. Refer to the return code defined in aws_test_harness.h: # #define SKIP (103) set(SKIP_RETURN_CODE_VALUE 103) # Registers a test case by name (the first argument to the AWS_TEST_CASE macro in aws_test_harness.h) macro(add_test_case name) list(APPEND TEST_CASES "${name}") list(APPEND AWS_TEST_CASES "${name}") set_property(GLOBAL PROPERTY AWS_TEST_CASES ${AWS_TEST_CASES}) endmacro() # Like add_test_case, but for tests that require a working internet connection. macro(add_net_test_case name) if (ENABLE_NET_TESTS) list(APPEND TEST_CASES "${name}") list(APPEND AWS_TEST_CASES "${name}") set_property(GLOBAL PROPERTY AWS_TEST_CASES ${AWS_TEST_CASES}) endif() endmacro() # Generate a test driver executable with the given name function(generate_test_driver driver_exe_name) create_test_sourcelist(test_srclist test_runner.c ${TEST_CASES}) # Write clang tidy file that disables all but one check to avoid false positives file(WRITE "${CMAKE_CURRENT_BINARY_DIR}/.clang-tidy" "Checks: '-*,misc-static-assert'") add_executable(${driver_exe_name} ${CMAKE_CURRENT_BINARY_DIR}/test_runner.c ${TESTS}) aws_set_common_properties(${driver_exe_name} NO_WEXTRA NO_PEDANTIC) # Some versions of CMake (3.9-3.11) generate a test_runner.c file with # a strncpy() call that triggers the "stringop-overflow" warning in GCC 8.1+ # This warning doesn't exist until GCC 7 though, so test for it before disabling. if (NOT MSVC) check_c_compiler_flag(-Wno-stringop-overflow HAS_WNO_STRINGOP_OVERFLOW) if (HAS_WNO_STRINGOP_OVERFLOW) SET_SOURCE_FILES_PROPERTIES(test_runner.c PROPERTIES COMPILE_FLAGS -Wno-stringop-overflow) endif() endif() aws_add_sanitizers(${driver_exe_name} ${${PROJECT_NAME}_SANITIZERS}) target_link_libraries(${driver_exe_name} PRIVATE ${PROJECT_NAME}) set_target_properties(${driver_exe_name} PROPERTIES LINKER_LANGUAGE C C_STANDARD 99) target_compile_definitions(${driver_exe_name} PRIVATE AWS_UNSTABLE_TESTING_API=1) target_include_directories(${driver_exe_name} PRIVATE ${CMAKE_CURRENT_LIST_DIR}) foreach(name IN LISTS TEST_CASES) add_test(${name} ${driver_exe_name} "${name}") endforeach() # Clear test cases in case another driver needs to be generated unset(TEST_CASES PARENT_SCOPE) endfunction() function(generate_cpp_test_driver driver_exe_name) create_test_sourcelist(test_srclist test_runner.cpp ${TEST_CASES}) add_executable(${driver_exe_name} ${CMAKE_CURRENT_BINARY_DIR}/test_runner.cpp ${TESTS}) target_link_libraries(${driver_exe_name} PRIVATE ${PROJECT_NAME}) set_target_properties(${driver_exe_name} PROPERTIES LINKER_LANGUAGE CXX) if (MSVC) if(AWS_STATIC_MSVC_RUNTIME_LIBRARY OR STATIC_CRT) target_compile_options(${driver_exe_name} PRIVATE "/MT$<$:d>") else() target_compile_options(${driver_exe_name} PRIVATE "/MD$<$:d>") endif() endif() target_compile_definitions(${driver_exe_name} PRIVATE AWS_UNSTABLE_TESTING_API=1) target_include_directories(${driver_exe_name} PRIVATE ${CMAKE_CURRENT_LIST_DIR}) foreach(name IN LISTS TEST_CASES) add_test(${name} ${driver_exe_name} "${name}") set_tests_properties("${name}" PROPERTIES SKIP_RETURN_CODE ${SKIP_RETURN_CODE_VALUE}) endforeach() # Clear test cases in case another driver needs to be generated unset(TEST_CASES PARENT_SCOPE) endfunction() aws-crt-python-0.20.4+dfsg/crt/aws-c-common/cmake/AwsThreadAffinity.cmake000066400000000000000000000035731456575232400261660ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. include(CheckSymbolExists) # Check if the platform supports setting thread affinity # (important for hitting full NIC entitlement on NUMA architectures) function(aws_set_thread_affinity_method target) # Non-POSIX, Android, and Apple platforms do not support thread affinity. if (NOT UNIX OR ANDROID OR APPLE) target_compile_definitions(${target} PRIVATE -DAWS_AFFINITY_METHOD=AWS_AFFINITY_METHOD_NONE) return() endif() list(APPEND CMAKE_REQUIRED_DEFINITIONS -D_GNU_SOURCE) list(APPEND CMAKE_REQUIRED_LIBRARIES pthread) set(headers "pthread.h") # BSDs put nonportable pthread declarations in a separate header. if(CMAKE_SYSTEM_NAME MATCHES BSD) set(headers "${headers};pthread_np.h") endif() # Using pthread attrs is the preferred method, but is glibc-specific. check_symbol_exists(pthread_attr_setaffinity_np "${headers}" USE_PTHREAD_ATTR_SETAFFINITY) if (USE_PTHREAD_ATTR_SETAFFINITY) target_compile_definitions(${target} PRIVATE -DAWS_AFFINITY_METHOD=AWS_AFFINITY_METHOD_PTHREAD_ATTR) return() endif() # This method is still nonportable, but is supported by musl and BSDs. check_symbol_exists(pthread_setaffinity_np "${headers}" USE_PTHREAD_SETAFFINITY) if (USE_PTHREAD_SETAFFINITY) target_compile_definitions(${target} PRIVATE -DAWS_AFFINITY_METHOD=AWS_AFFINITY_METHOD_PTHREAD) return() endif() # If we got here, we expected thread affinity support but didn't find it. # We still build with degraded NUMA performance, but show a warning. message(WARNING "No supported method for setting thread affinity") target_compile_definitions(${target} PRIVATE -DAWS_AFFINITY_METHOD=AWS_AFFINITY_METHOD_NONE) endfunction() aws-crt-python-0.20.4+dfsg/crt/aws-c-common/cmake/AwsThreadName.cmake000066400000000000000000000104051456575232400252650ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. include(CheckSymbolExists) # Check how the platform supports setting thread name function(aws_set_thread_name_method target) function(aws_set_thread_name_setter_method target) if (APPLE) # All Apple platforms we support have 1 arg version of the function. # So skip compile time check here and instead check if its apple in # the thread code. return() endif() # pthread_setname_np() usually takes 2 args check_c_source_compiles(" ${c_source_start} pthread_setname_np(thread_id, \"asdf\"); ${c_source_end}" PTHREAD_SETNAME_TAKES_2ARGS) if (PTHREAD_SETNAME_TAKES_2ARGS) target_compile_definitions(${target} PRIVATE -DAWS_PTHREAD_SETNAME_TAKES_2ARGS) return() endif() # OpenBSD's function takes 2 args, but has a different name. check_c_source_compiles(" ${c_source_start} pthread_set_name_np(thread_id, \"asdf\"); ${c_source_end}" PTHREAD_SET_NAME_TAKES_2ARGS) if (PTHREAD_SET_NAME_TAKES_2ARGS) target_compile_definitions(${target} PRIVATE -DAWS_PTHREAD_SET_NAME_TAKES_2ARGS) return() endif() # But on NetBSD it takes 3! check_c_source_compiles(" ${c_source_start} pthread_setname_np(thread_id, \"asdf\", NULL); ${c_source_end} " PTHREAD_SETNAME_TAKES_3ARGS) if (PTHREAD_SETNAME_TAKES_3ARGS) target_compile_definitions(${target} PRIVATE -DAWS_PTHREAD_SETNAME_TAKES_3ARGS) return() endif() # And on many older/weirder platforms it's just not supported # Consider using prctl if we really want to support those endfunction() function(aws_set_thread_name_getter_method target) if (APPLE) # All Apple platforms we support have the same function, so no need for # compile-time check. target_compile_definitions(${target} PRIVATE -DAWS_PTHREAD_GETNAME_TAKES_3ARGS) return() endif() # Some platforms have 2 arg version check_c_source_compiles(" ${c_source_start} char name[16] = {0}; pthread_getname_np(thread_id, name); ${c_source_end} " PTHREAD_GETNAME_TAKES_2ARGS) if (PTHREAD_GETNAME_TAKES_2ARGS) target_compile_definitions(${target} PRIVATE -DAWS_PTHREAD_GETNAME_TAKES_2ARGS) return() endif() # Some platforms have 2 arg version but with a different name (eg, OpenBSD) check_c_source_compiles(" ${c_source_start} char name[16] = {0}; pthread_get_name_np(thread_id, name); ${c_source_end} " PTHREAD_GET_NAME_TAKES_2ARGS) if (PTHREAD_GET_NAME_TAKES_2ARGS) target_compile_definitions(${target} PRIVATE -DAWS_PTHREAD_GET_NAME_TAKES_2ARGS) return() endif() # But majority have 3 check_c_source_compiles(" ${c_source_start} char name[16] = {0}; pthread_getname_np(thread_id, name, 16); ${c_source_end} " PTHREAD_GETNAME_TAKES_3ARGS) if (PTHREAD_GETNAME_TAKES_3ARGS) target_compile_definitions(${target} PRIVATE -DAWS_PTHREAD_GETNAME_TAKES_3ARGS) return() endif() endfunction() if (WIN32) # On Windows we do a runtime check for both getter and setter, instead of compile-time check return() endif() list(APPEND CMAKE_REQUIRED_DEFINITIONS -D_GNU_SOURCE) list(APPEND CMAKE_REQUIRED_LIBRARIES pthread) # The start of the test program set(c_source_start " #define _GNU_SOURCE #include #if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) #include #endif int main() { pthread_t thread_id; ") # The end of the test program set(c_source_end "}") aws_set_thread_name_setter_method(${target}) aws_set_thread_name_getter_method(${target}) endfunction() aws-crt-python-0.20.4+dfsg/crt/aws-c-common/cmake/CPackConfig.cmake000066400000000000000000000050321456575232400247110ustar00rootroot00000000000000# Configuration for CPack packaging # --------------------------------- if (NOT CMAKE_SYSTEM_NAME STREQUAL "Linux") message(STATUS "Packaging is only supported on Linux") return() endif() # Check for a RedHat-based OS if (EXISTS /etc/redhat-release) set(CPACK_GENERATOR RPM) else() message(STATUS "Packaging currently only supported on Fedora.") return() endif() # We'll want 2 RPMS, one for runtime files and one for development files set(CPACK_RPM_COMPONENT_INSTALL ON) set(CPACK_COMPONENTS_ALL Development Runtime) set(CPACK_RPM_MAIN_COMPONENT Runtime) # Configure package names set(CPACK_PACKAGE_NAME ${PROJECT_NAME} CACHE STRING "") set(CPACK_RPM_Development_PACKAGE_NAME "${CPACK_PACKAGE_NAME}-devel") # Configure package summaries set(CPACK_PACKAGE_SUMMARY "Core c99 package for AWS SDK for C") set(CPACK_PACKAGE_DESCRIPTION ${CPACK_PACKAGE_SUMMARY}) set(CPACK_RPM_Development_PACKAGE_SUMMARY "Development files for ${CPACK_PACKAGE_NAME}") # Configure package versioning/metadata set(CPACK_PACKAGE_VERSION ${PROJECT_VERSION}) set(CPACK_PACKAGE_RELEASE "1" CACHE STRING "") set(CPACK_PACKAGE_CONTACT "TODO ") set(CPACK_PACKAGE_VENDOR "Amazon") set(CPACK_RPM_PACKAGE_LICENSE "ASL 2.0") set(CPACK_RPM_PACKAGE_URL "https://github.com/awslabs/aws-c-common") # Configure the RPM filenames set(CPACK_RPM_FILE_NAME "${CPACK_PACKAGE_NAME}-${CPACK_PACKAGE_VERSION}-${CPACK_PACKAGE_RELEASE}.${CMAKE_SYSTEM_PROCESSOR}.rpm") set(CPACK_RPM_Development_FILE_NAME "${CPACK_RPM_Development_PACKAGE_NAME}-${CPACK_PACKAGE_VERSION}-${CPACK_PACKAGE_RELEASE}.${CMAKE_SYSTEM_PROCESSOR}.rpm") # Make the development package depend on the runtime one set(CPACK_RPM_Development_PACKAGE_REQUIRES "${CPACK_PACKAGE_NAME} = ${PROJECT_VERSION}") # Set the changelog file set(CPACK_RPM_CHANGELOG_FILE "${CMAKE_CURRENT_LIST_DIR}/rpm-scripts/changelog.txt") # If we are building shared libraries, we need to run ldconfig. Unfortunately, # we can't set this per-component yet, so we'll have to do it on the devel # package too if (BUILD_SHARED_LIBS) set(CPACK_RPM_POST_INSTALL_SCRIPT_FILE "${CMAKE_CURRENT_LIST_DIR}/rpm-scripts/post.sh") set(CPACK_RPM_POST_UNINSTALL_SCRIPT_FILE "${CMAKE_CURRENT_LIST_DIR}/rpm-scripts/postun.sh") endif() # By default, we'll try to claim the cmake directory under the library directory # and the aws include directory. We have to share both of these set(CPACK_RPM_EXCLUDE_FROM_AUTO_FILELIST_ADDITION /usr/${LIBRARY_DIRECTORY}/cmake /usr/include/aws) # Include CPack, which generates the package target include(CPack) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/cmake/aws-c-common-config.cmake000066400000000000000000000011471456575232400263500ustar00rootroot00000000000000set(THREADS_PREFER_PTHREAD_FLAG ON) if(WIN32 OR UNIX OR APPLE) find_package(Threads REQUIRED) endif() macro(aws_load_targets type) include(${CMAKE_CURRENT_LIST_DIR}/${type}/@PROJECT_NAME@-targets.cmake) endmacro() # try to load the lib follow BUILD_SHARED_LIBS. Fall back if not exist. if (BUILD_SHARED_LIBS) if (EXISTS "${CMAKE_CURRENT_LIST_DIR}/shared") aws_load_targets(shared) else() aws_load_targets(static) endif() else() if (EXISTS "${CMAKE_CURRENT_LIST_DIR}/static") aws_load_targets(static) else() aws_load_targets(shared) endif() endif() aws-crt-python-0.20.4+dfsg/crt/aws-c-common/cmake/rpm-scripts/000077500000000000000000000000001456575232400240635ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/cmake/rpm-scripts/changelog.txt000066400000000000000000000001531456575232400265520ustar00rootroot00000000000000* Mon Dec 17 2018 Philip Salvaggio - 0.1.0-1 Initial RPM created with CPack aws-crt-python-0.20.4+dfsg/crt/aws-c-common/cmake/rpm-scripts/post.sh000066400000000000000000000000171456575232400254020ustar00rootroot00000000000000/sbin/ldconfig aws-crt-python-0.20.4+dfsg/crt/aws-c-common/cmake/rpm-scripts/postun.sh000066400000000000000000000000171456575232400257450ustar00rootroot00000000000000/sbin/ldconfig aws-crt-python-0.20.4+dfsg/crt/aws-c-common/format-check.sh000077500000000000000000000010271456575232400234220ustar00rootroot00000000000000#!/usr/bin/env bash if [[ -z $CLANG_FORMAT ]] ; then CLANG_FORMAT=clang-format fi if ! type $CLANG_FORMAT 2> /dev/null ; then echo "No appropriate clang-format found." exit 1 fi FAIL=0 SOURCE_FILES=`find source include tests verification -type f \( -name '*.h' -o -name '*.c' -o -name '*.inl' \)` for i in $SOURCE_FILES do $CLANG_FORMAT -output-replacements-xml $i | grep -c " /dev/null if [ $? -ne 1 ] then echo "$i failed clang-format check." FAIL=1 fi done exit $FAIL aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/000077500000000000000000000000001456575232400221435ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/000077500000000000000000000000001456575232400227355ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/000077500000000000000000000000001456575232400242255ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/allocator.h000066400000000000000000000223471456575232400263660ustar00rootroot00000000000000#ifndef AWS_COMMON_ALLOCATOR_H #define AWS_COMMON_ALLOCATOR_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include AWS_PUSH_SANE_WARNING_LEVEL AWS_EXTERN_C_BEGIN /* * Quick guide to allocators: * CRT offers several flavours of allocators: * - default: basic allocator that invokes system one directly. * - aligned: basic allocator that aligns small allocations on 8 byte * boundary and big buffers on 32/64 byte (system dependent) boundary. * Aligned mem can improve perf on some operations, like memcpy or hashes. * Depending on a system, can result in higher peak memory count in heavy * acquire/free scenarios (ex. s3), due to memory fragmentation related to how * aligned allocators work (over allocate, find aligned offset, release extra memory) * - wrapped_cf: wraps MacOS's Security Framework allocator. * - mem_tracer: wraps any allocator and provides tracing functionality to allocations * - small_block_allocator: pools smaller allocations into preallocated buckets. * Not actively maintained. Avoid if possible. */ /* Allocator structure. An instance of this will be passed around for anything needing memory allocation */ struct aws_allocator { void *(*mem_acquire)(struct aws_allocator *allocator, size_t size); void (*mem_release)(struct aws_allocator *allocator, void *ptr); /* Optional method; if not supported, this pointer must be NULL */ void *(*mem_realloc)(struct aws_allocator *allocator, void *oldptr, size_t oldsize, size_t newsize); /* Optional method; if not supported, this pointer must be NULL */ void *(*mem_calloc)(struct aws_allocator *allocator, size_t num, size_t size); void *impl; }; /** * Inexpensive (constant time) check of data-structure invariants. */ AWS_COMMON_API bool aws_allocator_is_valid(const struct aws_allocator *alloc); AWS_COMMON_API struct aws_allocator *aws_default_allocator(void); /* * Allocator that align small allocations on 8 byte boundary and big allocations * on 32/64 byte boundary. */ AWS_COMMON_API struct aws_allocator *aws_aligned_allocator(void); #ifdef __MACH__ /* Avoid pulling in CoreFoundation headers in a header file. */ struct __CFAllocator; /* NOLINT(bugprone-reserved-identifier) */ typedef const struct __CFAllocator *CFAllocatorRef; /** * Wraps a CFAllocator around aws_allocator. For Mac only. Use this anytime you need a CFAllocatorRef for interacting * with Apple Frameworks. Unfortunately, it allocates memory so we can't make it static file scope, be sure to call * aws_wrapped_cf_allocator_destroy when finished. */ AWS_COMMON_API CFAllocatorRef aws_wrapped_cf_allocator_new(struct aws_allocator *allocator); /** * Cleans up any resources alloced in aws_wrapped_cf_allocator_new. */ AWS_COMMON_API void aws_wrapped_cf_allocator_destroy(CFAllocatorRef allocator); #endif /** * Returns at least `size` of memory ready for usage. In versions v0.6.8 and prior, this function was allowed to return * NULL. In later versions, if allocator->mem_acquire() returns NULL, this function will assert and exit. To handle * conditions where OOM is not a fatal error, allocator->mem_acquire() is responsible for finding/reclaiming/running a * GC etc...before returning. */ AWS_COMMON_API void *aws_mem_acquire(struct aws_allocator *allocator, size_t size); /** * Allocates a block of memory for an array of num elements, each of them size bytes long, and initializes all its bits * to zero. In versions v0.6.8 and prior, this function was allowed to return NULL. * In later versions, if allocator->mem_calloc() returns NULL, this function will assert and exit. To handle * conditions where OOM is not a fatal error, allocator->mem_calloc() is responsible for finding/reclaiming/running a * GC etc...before returning. */ AWS_COMMON_API void *aws_mem_calloc(struct aws_allocator *allocator, size_t num, size_t size); /** * Allocates many chunks of bytes into a single block. Expects to be called with alternating void ** (dest), size_t * (size). The first void ** will be set to the root of the allocation. Alignment is assumed to be sizeof(intmax_t). * * This is useful for allocating structs using the pimpl pattern, as you may allocate the public object and impl object * in the same contiguous block of memory. * * Returns a pointer to the allocation. * * In versions v0.6.8 and prior, this function was allowed to return * NULL. In later versions, if allocator->mem_acquire() returns NULL, this function will assert and exit. To handle * conditions where OOM is not a fatal error, allocator->mem_acquire() is responsible for finding/reclaiming/running a * GC etc...before returning. */ AWS_COMMON_API void *aws_mem_acquire_many(struct aws_allocator *allocator, size_t count, ...); /** * Releases ptr back to whatever allocated it. * Nothing happens if ptr is NULL. */ AWS_COMMON_API void aws_mem_release(struct aws_allocator *allocator, void *ptr); /** * Attempts to adjust the size of the pointed-to memory buffer from oldsize to * newsize. The pointer (*ptr) may be changed if the memory needs to be * reallocated. * * In versions v0.6.8 and prior, this function was allowed to return * NULL. In later versions, if allocator->mem_realloc() returns NULL, this function will assert and exit. To handle * conditions where OOM is not a fatal error, allocator->mem_realloc() is responsible for finding/reclaiming/running a * GC etc...before returning. */ AWS_COMMON_API int aws_mem_realloc(struct aws_allocator *allocator, void **ptr, size_t oldsize, size_t newsize); /* * Maintainer note: The above function doesn't return the pointer (as with * standard C realloc) as this pattern becomes error-prone when OOMs occur. * In particular, we want to avoid losing the old pointer when an OOM condition * occurs, so we prefer to take the old pointer as an in/out reference argument * that we can leave unchanged on failure. */ enum aws_mem_trace_level { AWS_MEMTRACE_NONE = 0, /* no tracing */ AWS_MEMTRACE_BYTES = 1, /* just track allocation sizes and total allocated */ AWS_MEMTRACE_STACKS = 2, /* capture callstacks for each allocation */ }; /* * Wraps an allocator and tracks all external allocations. If aws_mem_trace_dump() is called * and there are still allocations active, they will be reported to the aws_logger at TRACE level. * allocator - The allocator to wrap * deprecated - Deprecated arg, ignored. * level - The level to track allocations at * frames_per_stack is how many frames to store per callstack if AWS_MEMTRACE_STACKS is in use, * otherwise it is ignored. 8 tends to be a pretty good number balancing storage space vs useful stacks. * Returns the tracer allocator, which should be used for all allocations that should be tracked. */ AWS_COMMON_API struct aws_allocator *aws_mem_tracer_new( struct aws_allocator *allocator, struct aws_allocator *deprecated, enum aws_mem_trace_level level, size_t frames_per_stack); /* * Unwraps the traced allocator and cleans up the tracer. * Returns the original allocator */ AWS_COMMON_API struct aws_allocator *aws_mem_tracer_destroy(struct aws_allocator *trace_allocator); /* * If there are outstanding allocations, dumps them to log, along with any information gathered * based on the trace level set when aws_mem_trace() was called. * Should be passed the tracer allocator returned from aws_mem_trace(). */ AWS_COMMON_API void aws_mem_tracer_dump(struct aws_allocator *trace_allocator); /* * Returns the current number of bytes in outstanding allocations */ AWS_COMMON_API size_t aws_mem_tracer_bytes(struct aws_allocator *trace_allocator); /* * Returns the current number of outstanding allocations */ AWS_COMMON_API size_t aws_mem_tracer_count(struct aws_allocator *trace_allocator); /* * Creates a new Small Block Allocator which fronts the supplied parent allocator. The SBA will intercept * and handle small allocs, and will forward anything larger to the parent allocator. * If multi_threaded is true, the internal allocator will protect its internal data structures with a mutex */ AWS_COMMON_API struct aws_allocator *aws_small_block_allocator_new(struct aws_allocator *allocator, bool multi_threaded); /* * Destroys a Small Block Allocator instance and frees its memory to the parent allocator. The parent * allocator will otherwise be unaffected. */ AWS_COMMON_API void aws_small_block_allocator_destroy(struct aws_allocator *sba_allocator); /* * Returns the number of bytes currently active in the SBA */ AWS_COMMON_API size_t aws_small_block_allocator_bytes_active(struct aws_allocator *sba_allocator); /* * Returns the number of bytes reserved in pages/bins inside the SBA, e.g. the * current system memory used by the SBA */ AWS_COMMON_API size_t aws_small_block_allocator_bytes_reserved(struct aws_allocator *sba_allocator); /* * Returns the page size that the SBA is using */ AWS_COMMON_API size_t aws_small_block_allocator_page_size(struct aws_allocator *sba_allocator); /* * Returns the amount of memory in each page available to user allocations */ AWS_COMMON_API size_t aws_small_block_allocator_page_size_available(struct aws_allocator *sba_allocator); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_COMMON_ALLOCATOR_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/array_list.h000066400000000000000000000205531456575232400265540ustar00rootroot00000000000000#ifndef AWS_COMMON_ARRAY_LIST_H #define AWS_COMMON_ARRAY_LIST_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include AWS_PUSH_SANE_WARNING_LEVEL enum { AWS_ARRAY_LIST_DEBUG_FILL = 0xDD }; struct aws_array_list { struct aws_allocator *alloc; size_t current_size; size_t length; size_t item_size; void *data; }; /** * Prototype for a comparator function for sorting elements. * * a and b should be cast to pointers to the element type held in the list * before being dereferenced. The function should compare the elements and * return a positive number if a > b, zero if a = b, and a negative number * if a < b. */ typedef int(aws_array_list_comparator_fn)(const void *a, const void *b); AWS_EXTERN_C_BEGIN /** * Initializes an array list with an array of size initial_item_allocation * item_size. In this mode, the array size * will grow by a factor of 2 upon insertion if space is not available. initial_item_allocation is the number of * elements you want space allocated for. item_size is the size of each element in bytes. Mixing items types is not * supported by this API. */ AWS_STATIC_IMPL int aws_array_list_init_dynamic( struct aws_array_list *AWS_RESTRICT list, struct aws_allocator *alloc, size_t initial_item_allocation, size_t item_size); /** * Initializes an array list with a preallocated array of void *. item_count is the number of elements in the array, * and item_size is the size in bytes of each element. Mixing items types is not supported * by this API. Once this list is full, new items will be rejected. */ AWS_STATIC_IMPL void aws_array_list_init_static( struct aws_array_list *AWS_RESTRICT list, void *raw_array, size_t item_count, size_t item_size); /** * Initializes an array list with a preallocated array of *already-initialized* elements. item_count is the number of * elements in the array, and item_size is the size in bytes of each element. * * Once initialized, nothing further can be added to the list, since it will be full and cannot resize. * * Primary use case is to treat an already-initialized C array as an array list. */ AWS_STATIC_IMPL void aws_array_list_init_static_from_initialized( struct aws_array_list *AWS_RESTRICT list, void *raw_array, size_t item_count, size_t item_size); /** * Set of properties of a valid aws_array_list. */ AWS_STATIC_IMPL bool aws_array_list_is_valid(const struct aws_array_list *AWS_RESTRICT list); /** * Deallocates any memory that was allocated for this list, and resets list for reuse or deletion. */ AWS_STATIC_IMPL void aws_array_list_clean_up(struct aws_array_list *AWS_RESTRICT list); /** * Erases and then deallocates any memory that was allocated for this list, and resets list for reuse or deletion. */ AWS_STATIC_IMPL void aws_array_list_clean_up_secure(struct aws_array_list *AWS_RESTRICT list); /** * Pushes the memory pointed to by val onto the end of internal list */ AWS_STATIC_IMPL int aws_array_list_push_back(struct aws_array_list *AWS_RESTRICT list, const void *val); /** * Copies the element at the front of the list if it exists. If list is empty, AWS_ERROR_LIST_EMPTY will be raised */ AWS_STATIC_IMPL int aws_array_list_front(const struct aws_array_list *AWS_RESTRICT list, void *val); /** * Pushes the memory pointed to by val onto the front of internal list. * This call results in shifting all of the elements in the list. Avoid this call unless that * is intended behavior. */ AWS_STATIC_IMPL int aws_array_list_push_front(struct aws_array_list *AWS_RESTRICT list, const void *val); /** * Deletes the element at the front of the list if it exists. If list is empty, AWS_ERROR_LIST_EMPTY will be raised. * This call results in shifting all of the elements at the end of the array to the front. Avoid this call unless that * is intended behavior. */ AWS_STATIC_IMPL int aws_array_list_pop_front(struct aws_array_list *AWS_RESTRICT list); /** * Delete N elements from the front of the list. * Remaining elements are shifted to the front of the list. * If the list has less than N elements, the list is cleared. * This call is more efficient than calling aws_array_list_pop_front() N times. */ AWS_STATIC_IMPL void aws_array_list_pop_front_n(struct aws_array_list *AWS_RESTRICT list, size_t n); /** * Deletes the element this index in the list if it exists. * If element does not exist, AWS_ERROR_INVALID_INDEX will be raised. * This call results in shifting all remaining elements towards the front. * Avoid this call unless that is intended behavior. */ AWS_STATIC_IMPL int aws_array_list_erase(struct aws_array_list *AWS_RESTRICT list, size_t index); /** * Copies the element at the end of the list if it exists. If list is empty, AWS_ERROR_LIST_EMPTY will be raised. */ AWS_STATIC_IMPL int aws_array_list_back(const struct aws_array_list *AWS_RESTRICT list, void *val); /** * Deletes the element at the end of the list if it exists. If list is empty, AWS_ERROR_LIST_EMPTY will be raised. */ AWS_STATIC_IMPL int aws_array_list_pop_back(struct aws_array_list *AWS_RESTRICT list); /** * Clears all elements in the array and resets length to zero. Size does not change in this operation. */ AWS_STATIC_IMPL void aws_array_list_clear(struct aws_array_list *AWS_RESTRICT list); /** * If in dynamic mode, shrinks the allocated array size to the minimum amount necessary to store its elements. */ AWS_COMMON_API int aws_array_list_shrink_to_fit(struct aws_array_list *AWS_RESTRICT list); /** * Copies the elements from from to to. If to is in static mode, it must at least be the same length as from. Any data * in to will be overwritten in this copy. */ AWS_COMMON_API int aws_array_list_copy(const struct aws_array_list *AWS_RESTRICT from, struct aws_array_list *AWS_RESTRICT to); /** * Swap contents between two dynamic lists. Both lists must use the same allocator. */ AWS_STATIC_IMPL void aws_array_list_swap_contents( struct aws_array_list *AWS_RESTRICT list_a, struct aws_array_list *AWS_RESTRICT list_b); /** * Returns the number of elements that can fit in the internal array. If list is initialized in dynamic mode, * the capacity changes over time. */ AWS_STATIC_IMPL size_t aws_array_list_capacity(const struct aws_array_list *AWS_RESTRICT list); /** * Returns the number of elements in the internal array. */ AWS_STATIC_IMPL size_t aws_array_list_length(const struct aws_array_list *AWS_RESTRICT list); /** * Copies the memory at index to val. If element does not exist, AWS_ERROR_INVALID_INDEX will be raised. */ AWS_STATIC_IMPL int aws_array_list_get_at(const struct aws_array_list *AWS_RESTRICT list, void *val, size_t index); /** * Copies the memory address of the element at index to *val. If element does not exist, AWS_ERROR_INVALID_INDEX will be * raised. */ AWS_STATIC_IMPL int aws_array_list_get_at_ptr(const struct aws_array_list *AWS_RESTRICT list, void **val, size_t index); /** * Ensures that the array list has enough capacity to store a value at the specified index. If there is not already * enough capacity, and the list is in dynamic mode, this function will attempt to allocate more memory, expanding the * list. In static mode, if 'index' is beyond the maximum index, AWS_ERROR_INVALID_INDEX will be raised. */ AWS_COMMON_API int aws_array_list_ensure_capacity(struct aws_array_list *AWS_RESTRICT list, size_t index); /** * Copies the the memory pointed to by val into the array at index. If in dynamic mode, the size will grow by a factor * of two when the array is full. In static mode, AWS_ERROR_INVALID_INDEX will be raised if the index is past the bounds * of the array. */ AWS_STATIC_IMPL int aws_array_list_set_at(struct aws_array_list *AWS_RESTRICT list, const void *val, size_t index); /** * Swap elements at the specified indices, which must be within the bounds of the array. */ AWS_COMMON_API void aws_array_list_swap(struct aws_array_list *AWS_RESTRICT list, size_t a, size_t b); /** * Sort elements in the list in-place according to the comparator function. */ AWS_COMMON_API void aws_array_list_sort(struct aws_array_list *AWS_RESTRICT list, aws_array_list_comparator_fn *compare_fn); #ifndef AWS_NO_STATIC_IMPL # include #endif /* AWS_NO_STATIC_IMPL */ AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_COMMON_ARRAY_LIST_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/array_list.inl000066400000000000000000000340101456575232400271000ustar00rootroot00000000000000#ifndef AWS_COMMON_ARRAY_LIST_INL #define AWS_COMMON_ARRAY_LIST_INL /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ /* This is implicitly included, but helps with editor highlighting */ #include /* * Do not add system headers here; add them to array_list.h. This file is included under extern "C" guards, * which might break system headers. */ AWS_EXTERN_C_BEGIN AWS_STATIC_IMPL int aws_array_list_init_dynamic( struct aws_array_list *AWS_RESTRICT list, struct aws_allocator *alloc, size_t initial_item_allocation, size_t item_size) { AWS_FATAL_PRECONDITION(list != NULL); AWS_FATAL_PRECONDITION(alloc != NULL); AWS_FATAL_PRECONDITION(item_size > 0); AWS_ZERO_STRUCT(*list); size_t allocation_size = 0; if (aws_mul_size_checked(initial_item_allocation, item_size, &allocation_size)) { goto error; } if (allocation_size > 0) { list->data = aws_mem_acquire(alloc, allocation_size); if (!list->data) { goto error; } #ifdef DEBUG_BUILD memset(list->data, AWS_ARRAY_LIST_DEBUG_FILL, allocation_size); #endif list->current_size = allocation_size; } list->item_size = item_size; list->alloc = alloc; AWS_FATAL_POSTCONDITION(list->current_size == 0 || list->data); AWS_POSTCONDITION(aws_array_list_is_valid(list)); return AWS_OP_SUCCESS; error: AWS_POSTCONDITION(AWS_IS_ZEROED(*list)); return AWS_OP_ERR; } AWS_STATIC_IMPL void aws_array_list_init_static( struct aws_array_list *AWS_RESTRICT list, void *raw_array, size_t item_count, size_t item_size) { AWS_FATAL_PRECONDITION(list != NULL); AWS_FATAL_PRECONDITION(raw_array != NULL); AWS_FATAL_PRECONDITION(item_count > 0); AWS_FATAL_PRECONDITION(item_size > 0); AWS_ZERO_STRUCT(*list); list->alloc = NULL; size_t current_size = 0; int no_overflow = !aws_mul_size_checked(item_count, item_size, ¤t_size); AWS_FATAL_PRECONDITION(no_overflow); list->current_size = current_size; list->item_size = item_size; list->length = 0; list->data = raw_array; AWS_POSTCONDITION(aws_array_list_is_valid(list)); } AWS_STATIC_IMPL void aws_array_list_init_static_from_initialized( struct aws_array_list *AWS_RESTRICT list, void *raw_array, size_t item_count, size_t item_size) { aws_array_list_init_static(list, raw_array, item_count, item_size); list->length = item_count; AWS_POSTCONDITION(aws_array_list_is_valid(list)); } AWS_STATIC_IMPL bool aws_array_list_is_valid(const struct aws_array_list *AWS_RESTRICT list) { if (!list) { return false; } size_t required_size = 0; bool required_size_is_valid = (aws_mul_size_checked(list->length, list->item_size, &required_size) == AWS_OP_SUCCESS); bool current_size_is_valid = (list->current_size >= required_size); bool data_is_valid = AWS_IMPLIES(list->current_size == 0, list->data == NULL) && AWS_IMPLIES(list->current_size != 0, AWS_MEM_IS_WRITABLE(list->data, list->current_size)); bool item_size_is_valid = (list->item_size != 0); return required_size_is_valid && current_size_is_valid && data_is_valid && item_size_is_valid; } AWS_STATIC_IMPL void aws_array_list_clean_up(struct aws_array_list *AWS_RESTRICT list) { AWS_PRECONDITION(AWS_IS_ZEROED(*list) || aws_array_list_is_valid(list)); if (list->alloc && list->data) { aws_mem_release(list->alloc, list->data); } AWS_ZERO_STRUCT(*list); } AWS_STATIC_IMPL void aws_array_list_clean_up_secure(struct aws_array_list *AWS_RESTRICT list) { AWS_PRECONDITION(AWS_IS_ZEROED(*list) || aws_array_list_is_valid(list)); if (list->alloc && list->data) { aws_secure_zero((void *)list->data, list->current_size); aws_mem_release(list->alloc, list->data); } AWS_ZERO_STRUCT(*list); } AWS_STATIC_IMPL int aws_array_list_push_back(struct aws_array_list *AWS_RESTRICT list, const void *val) { AWS_PRECONDITION(aws_array_list_is_valid(list)); AWS_PRECONDITION( val && AWS_MEM_IS_READABLE(val, list->item_size), "Input pointer [val] must point writable memory of [list->item_size] bytes."); int err_code = aws_array_list_set_at(list, val, aws_array_list_length(list)); if (err_code && aws_last_error() == AWS_ERROR_INVALID_INDEX && !list->alloc) { AWS_POSTCONDITION(aws_array_list_is_valid(list)); return aws_raise_error(AWS_ERROR_LIST_EXCEEDS_MAX_SIZE); } AWS_POSTCONDITION(aws_array_list_is_valid(list)); return err_code; } AWS_STATIC_IMPL int aws_array_list_front(const struct aws_array_list *AWS_RESTRICT list, void *val) { AWS_PRECONDITION(aws_array_list_is_valid(list)); AWS_PRECONDITION( val && AWS_MEM_IS_WRITABLE(val, list->item_size), "Input pointer [val] must point writable memory of [list->item_size] bytes."); if (aws_array_list_length(list) > 0) { memcpy(val, list->data, list->item_size); AWS_POSTCONDITION(AWS_BYTES_EQ(val, list->data, list->item_size)); AWS_POSTCONDITION(aws_array_list_is_valid(list)); return AWS_OP_SUCCESS; } AWS_POSTCONDITION(aws_array_list_is_valid(list)); return aws_raise_error(AWS_ERROR_LIST_EMPTY); } AWS_STATIC_IMPL int aws_array_list_push_front(struct aws_array_list *AWS_RESTRICT list, const void *val) { AWS_PRECONDITION(aws_array_list_is_valid(list)); AWS_PRECONDITION( val && AWS_MEM_IS_READABLE(val, list->item_size), "Input pointer [val] must point writable memory of [list->item_size] bytes."); size_t orig_len = aws_array_list_length(list); int err_code = aws_array_list_ensure_capacity(list, orig_len); if (err_code && aws_last_error() == AWS_ERROR_INVALID_INDEX && !list->alloc) { AWS_POSTCONDITION(aws_array_list_is_valid(list)); return aws_raise_error(AWS_ERROR_LIST_EXCEEDS_MAX_SIZE); } else if (err_code) { AWS_POSTCONDITION(aws_array_list_is_valid(list)); return err_code; } if (orig_len) { memmove((uint8_t *)list->data + list->item_size, list->data, orig_len * list->item_size); } ++list->length; memcpy(list->data, val, list->item_size); AWS_POSTCONDITION(aws_array_list_is_valid(list)); return err_code; } AWS_STATIC_IMPL int aws_array_list_pop_front(struct aws_array_list *AWS_RESTRICT list) { AWS_PRECONDITION(aws_array_list_is_valid(list)); if (aws_array_list_length(list) > 0) { aws_array_list_pop_front_n(list, 1); AWS_POSTCONDITION(aws_array_list_is_valid(list)); return AWS_OP_SUCCESS; } AWS_POSTCONDITION(aws_array_list_is_valid(list)); return aws_raise_error(AWS_ERROR_LIST_EMPTY); } AWS_STATIC_IMPL void aws_array_list_pop_front_n(struct aws_array_list *AWS_RESTRICT list, size_t n) { AWS_PRECONDITION(aws_array_list_is_valid(list)); if (n >= aws_array_list_length(list)) { aws_array_list_clear(list); AWS_POSTCONDITION(aws_array_list_is_valid(list)); return; } if (n > 0) { size_t popping_bytes = list->item_size * n; size_t remaining_items = aws_array_list_length(list) - n; size_t remaining_bytes = remaining_items * list->item_size; memmove(list->data, (uint8_t *)list->data + popping_bytes, remaining_bytes); list->length = remaining_items; #ifdef DEBUG_BUILD memset((uint8_t *)list->data + remaining_bytes, AWS_ARRAY_LIST_DEBUG_FILL, popping_bytes); #endif } AWS_POSTCONDITION(aws_array_list_is_valid(list)); } int aws_array_list_erase(struct aws_array_list *AWS_RESTRICT list, size_t index) { AWS_PRECONDITION(aws_array_list_is_valid(list)); const size_t length = aws_array_list_length(list); if (index >= length) { AWS_POSTCONDITION(aws_array_list_is_valid(list)); return aws_raise_error(AWS_ERROR_INVALID_INDEX); } if (index == 0) { /* Removing front element */ aws_array_list_pop_front(list); } else if (index == (length - 1)) { /* Removing back element */ aws_array_list_pop_back(list); } else { /* Removing middle element */ uint8_t *item_ptr = (uint8_t *)list->data + (index * list->item_size); uint8_t *next_item_ptr = item_ptr + list->item_size; size_t trailing_items = (length - index) - 1; size_t trailing_bytes = trailing_items * list->item_size; memmove(item_ptr, next_item_ptr, trailing_bytes); aws_array_list_pop_back(list); } AWS_POSTCONDITION(aws_array_list_is_valid(list)); return AWS_OP_SUCCESS; } AWS_STATIC_IMPL int aws_array_list_back(const struct aws_array_list *AWS_RESTRICT list, void *val) { AWS_PRECONDITION(aws_array_list_is_valid(list)); AWS_PRECONDITION( val && AWS_MEM_IS_WRITABLE(val, list->item_size), "Input pointer [val] must point writable memory of [list->item_size] bytes."); if (aws_array_list_length(list) > 0) { size_t last_item_offset = list->item_size * (aws_array_list_length(list) - 1); memcpy(val, (void *)((uint8_t *)list->data + last_item_offset), list->item_size); AWS_POSTCONDITION(aws_array_list_is_valid(list)); return AWS_OP_SUCCESS; } AWS_POSTCONDITION(aws_array_list_is_valid(list)); return aws_raise_error(AWS_ERROR_LIST_EMPTY); } AWS_STATIC_IMPL int aws_array_list_pop_back(struct aws_array_list *AWS_RESTRICT list) { AWS_PRECONDITION(aws_array_list_is_valid(list)); if (aws_array_list_length(list) > 0) { AWS_FATAL_PRECONDITION(list->data); size_t last_item_offset = list->item_size * (aws_array_list_length(list) - 1); memset((void *)((uint8_t *)list->data + last_item_offset), 0, list->item_size); list->length--; AWS_POSTCONDITION(aws_array_list_is_valid(list)); return AWS_OP_SUCCESS; } AWS_POSTCONDITION(aws_array_list_is_valid(list)); return aws_raise_error(AWS_ERROR_LIST_EMPTY); } AWS_STATIC_IMPL void aws_array_list_clear(struct aws_array_list *AWS_RESTRICT list) { AWS_PRECONDITION(AWS_IS_ZEROED(*list) || aws_array_list_is_valid(list)); if (list->data) { #ifdef DEBUG_BUILD memset(list->data, AWS_ARRAY_LIST_DEBUG_FILL, list->current_size); #endif list->length = 0; } AWS_POSTCONDITION(AWS_IS_ZEROED(*list) || aws_array_list_is_valid(list)); } AWS_STATIC_IMPL void aws_array_list_swap_contents( struct aws_array_list *AWS_RESTRICT list_a, struct aws_array_list *AWS_RESTRICT list_b) { AWS_FATAL_PRECONDITION(list_a->alloc); AWS_FATAL_PRECONDITION(list_a->alloc == list_b->alloc); AWS_FATAL_PRECONDITION(list_a->item_size == list_b->item_size); AWS_FATAL_PRECONDITION(list_a != list_b); AWS_PRECONDITION(aws_array_list_is_valid(list_a)); AWS_PRECONDITION(aws_array_list_is_valid(list_b)); struct aws_array_list tmp = *list_a; *list_a = *list_b; *list_b = tmp; AWS_POSTCONDITION(aws_array_list_is_valid(list_a)); AWS_POSTCONDITION(aws_array_list_is_valid(list_b)); } AWS_STATIC_IMPL size_t aws_array_list_capacity(const struct aws_array_list *AWS_RESTRICT list) { AWS_FATAL_PRECONDITION(list->item_size); AWS_PRECONDITION(aws_array_list_is_valid(list)); size_t capacity = list->current_size / list->item_size; AWS_POSTCONDITION(aws_array_list_is_valid(list)); return capacity; } AWS_STATIC_IMPL size_t aws_array_list_length(const struct aws_array_list *AWS_RESTRICT list) { /* * This assert teaches clang-tidy and friends that list->data cannot be null in a non-empty * list. */ AWS_FATAL_PRECONDITION(!list->length || list->data); AWS_PRECONDITION(AWS_IS_ZEROED(*list) || aws_array_list_is_valid(list)); size_t len = list->length; AWS_POSTCONDITION(AWS_IS_ZEROED(*list) || aws_array_list_is_valid(list)); return len; } AWS_STATIC_IMPL int aws_array_list_get_at(const struct aws_array_list *AWS_RESTRICT list, void *val, size_t index) { AWS_PRECONDITION(aws_array_list_is_valid(list)); AWS_PRECONDITION( val && AWS_MEM_IS_WRITABLE(val, list->item_size), "Input pointer [val] must point writable memory of [list->item_size] bytes."); if (aws_array_list_length(list) > index) { memcpy(val, (void *)((uint8_t *)list->data + (list->item_size * index)), list->item_size); AWS_POSTCONDITION(aws_array_list_is_valid(list)); return AWS_OP_SUCCESS; } AWS_POSTCONDITION(aws_array_list_is_valid(list)); return aws_raise_error(AWS_ERROR_INVALID_INDEX); } AWS_STATIC_IMPL int aws_array_list_get_at_ptr(const struct aws_array_list *AWS_RESTRICT list, void **val, size_t index) { AWS_PRECONDITION(aws_array_list_is_valid(list)); AWS_PRECONDITION(val != NULL); if (aws_array_list_length(list) > index) { *val = (void *)((uint8_t *)list->data + (list->item_size * index)); AWS_POSTCONDITION(aws_array_list_is_valid(list)); return AWS_OP_SUCCESS; } AWS_POSTCONDITION(aws_array_list_is_valid(list)); return aws_raise_error(AWS_ERROR_INVALID_INDEX); } AWS_STATIC_IMPL int aws_array_list_set_at(struct aws_array_list *AWS_RESTRICT list, const void *val, size_t index) { AWS_PRECONDITION(aws_array_list_is_valid(list)); AWS_PRECONDITION( val && AWS_MEM_IS_READABLE(val, list->item_size), "Input pointer [val] must point readable memory of [list->item_size] bytes."); if (aws_array_list_ensure_capacity(list, index)) { AWS_POSTCONDITION(aws_array_list_is_valid(list)); return AWS_OP_ERR; } AWS_FATAL_PRECONDITION(list->data); memcpy((void *)((uint8_t *)list->data + (list->item_size * index)), val, list->item_size); /* * This isn't perfect, but its the best I can come up with for detecting * length changes. */ if (index >= aws_array_list_length(list)) { if (aws_add_size_checked(index, 1, &list->length)) { AWS_POSTCONDITION(aws_array_list_is_valid(list)); return AWS_OP_ERR; } } AWS_POSTCONDITION(aws_array_list_is_valid(list)); return AWS_OP_SUCCESS; } AWS_EXTERN_C_END #endif /* AWS_COMMON_ARRAY_LIST_INL */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/assert.h000066400000000000000000000261251456575232400257050ustar00rootroot00000000000000#ifndef AWS_COMMON_ASSERT_H #define AWS_COMMON_ASSERT_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include AWS_PUSH_SANE_WARNING_LEVEL AWS_EXTERN_C_BEGIN AWS_COMMON_API AWS_DECLSPEC_NORETURN void aws_fatal_assert(const char *cond_str, const char *file, int line) AWS_ATTRIBUTE_NORETURN; AWS_EXTERN_C_END #if defined(CBMC) # define AWS_PANIC_OOM(mem, msg) \ do { \ if (!(mem)) { \ fprintf(stderr, "%s: %s, line %d", msg, __FILE__, __LINE__); \ exit(-1); \ } \ } while (0) #else # define AWS_PANIC_OOM(mem, msg) \ do { \ if (!(mem)) { \ fprintf(stderr, "%s", msg); \ abort(); \ } \ } while (0) #endif /* defined(CBMC) */ #if defined(CBMC) # define AWS_ASSUME(cond) __CPROVER_assume(cond) #elif defined(_MSC_VER) # define AWS_ASSUME(cond) __assume(cond) # define AWS_UNREACHABLE() __assume(0) #elif defined(__clang__) # define AWS_ASSUME(cond) \ do { \ bool _result = (cond); \ __builtin_assume(_result); \ } while (false) # define AWS_UNREACHABLE() __builtin_unreachable() #elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)) # define AWS_ASSUME(cond) ((cond) ? (void)0 : __builtin_unreachable()) # define AWS_UNREACHABLE() __builtin_unreachable() #else # define AWS_ASSUME(cond) # define AWS_UNREACHABLE() #endif #if defined(CBMC) # include # define AWS_ASSERT(cond) assert(cond) #elif defined(DEBUG_BUILD) || defined(__clang_analyzer__) # define AWS_ASSERT(cond) AWS_FATAL_ASSERT(cond) #else # define AWS_ASSERT(cond) #endif /* defined(CBMC) */ #if defined(CBMC) # define AWS_FATAL_ASSERT(cond) AWS_ASSERT(cond) #elif defined(__clang_analyzer__) # define AWS_FATAL_ASSERT(cond) \ if (!(cond)) { \ abort(); \ } #else # if defined(_MSC_VER) # define AWS_FATAL_ASSERT(cond) \ __pragma(warning(push)) __pragma(warning(disable : 4127)) /* conditional expression is constant */ \ if (!(cond)) { \ aws_fatal_assert(#cond, __FILE__, __LINE__); \ } \ __pragma(warning(pop)) # else # define AWS_FATAL_ASSERT(cond) \ do { \ if (!(cond)) { \ aws_fatal_assert(#cond, __FILE__, __LINE__); \ } \ } while (0) # endif /* defined(_MSC_VER) */ #endif /* defined(CBMC) */ /** * Define function contracts. * When the code is being verified using CBMC these contracts are formally verified; * When the code is built in debug mode, they are checked as much as possible using assertions * When the code is built in production mode, non-fatal contracts are not checked. * Violations of the function contracts are undefined behaviour. */ #ifdef CBMC // clang-format off // disable clang format, since it likes to break formatting of stringize macro. // seems to be fixed in v15 plus, but we are not ready to update to it yet # define AWS_PRECONDITION2(cond, explanation) __CPROVER_precondition((cond), (explanation)) # define AWS_PRECONDITION1(cond) __CPROVER_precondition((cond), #cond " check failed") # define AWS_FATAL_PRECONDITION2(cond, explanation) __CPROVER_precondition((cond), (explanation)) # define AWS_FATAL_PRECONDITION1(cond) __CPROVER_precondition((cond), #cond " check failed") # define AWS_POSTCONDITION2(cond, explanation) __CPROVER_assert((cond), (explanation)) # define AWS_POSTCONDITION1(cond) __CPROVER_assert((cond), #cond " check failed") # define AWS_FATAL_POSTCONDITION2(cond, explanation) __CPROVER_assert((cond), (explanation)) # define AWS_FATAL_POSTCONDITION1(cond) __CPROVER_assert((cond), #cond " check failed") # define AWS_MEM_IS_READABLE_CHECK(base, len) (((len) == 0) || (__CPROVER_r_ok((base), (len)))) # define AWS_MEM_IS_WRITABLE_CHECK(base, len) (((len) == 0) || (__CPROVER_r_ok((base), (len)))) // clang-format on #else # define AWS_PRECONDITION2(cond, expl) AWS_ASSERT(cond) # define AWS_PRECONDITION1(cond) AWS_ASSERT(cond) # define AWS_FATAL_PRECONDITION2(cond, expl) AWS_FATAL_ASSERT(cond) # define AWS_FATAL_PRECONDITION1(cond) AWS_FATAL_ASSERT(cond) # define AWS_POSTCONDITION2(cond, expl) AWS_ASSERT(cond) # define AWS_POSTCONDITION1(cond) AWS_ASSERT(cond) # define AWS_FATAL_POSTCONDITION2(cond, expl) AWS_FATAL_ASSERT(cond) # define AWS_FATAL_POSTCONDITION1(cond) AWS_FATAL_ASSERT(cond) /** * These macros should not be used in is_valid functions. * All validate functions are also used in assumptions for CBMC proofs, * which should not contain __CPROVER_*_ok primitives. The use of these primitives * in assumptions may lead to spurious results. * The C runtime does not give a way to check these properties, * but we can at least check that the pointer is valid. */ # define AWS_MEM_IS_READABLE_CHECK(base, len) (((len) == 0) || (base)) # define AWS_MEM_IS_WRITABLE_CHECK(base, len) (((len) == 0) || (base)) #endif /* CBMC */ /** * These macros can safely be used in validate functions. */ #define AWS_MEM_IS_READABLE(base, len) (((len) == 0) || (base)) #define AWS_MEM_IS_WRITABLE(base, len) (((len) == 0) || (base)) /* Logical consequence. */ #define AWS_IMPLIES(a, b) (!(a) || (b)) /** * If and only if (iff) is a biconditional logical connective between statements a and b. * We need double negations (!!) here to work correctly for non-Boolean a and b values. * Equivalent to (AWS_IMPLIES(a, b) && AWS_IMPLIES(b, a)). */ #define AWS_IFF(a, b) (!!(a) == !!(b)) #define AWS_RETURN_ERROR_IF_IMPL(type, cond, err, explanation) \ do { \ if (!(cond)) { \ return aws_raise_error(err); \ } \ } while (0) #define AWS_RETURN_ERROR_IF3(cond, err, explanation) AWS_RETURN_ERROR_IF_IMPL("InternalCheck", cond, err, explanation) #define AWS_RETURN_ERROR_IF2(cond, err) AWS_RETURN_ERROR_IF3(cond, err, #cond " check failed") #define AWS_RETURN_ERROR_IF(...) CALL_OVERLOAD(AWS_RETURN_ERROR_IF, __VA_ARGS__) #define AWS_ERROR_PRECONDITION3(cond, err, explanation) AWS_RETURN_ERROR_IF_IMPL("Precondition", cond, err, explanation) #define AWS_ERROR_PRECONDITION2(cond, err) AWS_ERROR_PRECONDITION3(cond, err, #cond " check failed") #define AWS_ERROR_PRECONDITION1(cond) AWS_ERROR_PRECONDITION2(cond, AWS_ERROR_INVALID_ARGUMENT) #define AWS_ERROR_POSTCONDITION3(cond, err, explanation) \ AWS_RETURN_ERROR_IF_IMPL("Postcondition", cond, err, explanation) #define AWS_ERROR_POSTCONDITION2(cond, err) AWS_ERROR_POSTCONDITION3(cond, err, #cond " check failed") #define AWS_ERROR_POSTCONDITION1(cond) AWS_ERROR_POSTCONDITION2(cond, AWS_ERROR_INVALID_ARGUMENT) // The UNUSED is used to silence the complains of GCC for zero arguments in variadic macro #define AWS_PRECONDITION(...) CALL_OVERLOAD(AWS_PRECONDITION, __VA_ARGS__) #define AWS_FATAL_PRECONDITION(...) CALL_OVERLOAD(AWS_FATAL_PRECONDITION, __VA_ARGS__) #define AWS_POSTCONDITION(...) CALL_OVERLOAD(AWS_POSTCONDITION, __VA_ARGS__) #define AWS_FATAL_POSTCONDITION(...) CALL_OVERLOAD(AWS_FATAL_POSTCONDITION, __VA_ARGS__) #define AWS_ERROR_PRECONDITION(...) CALL_OVERLOAD(AWS_ERROR_PRECONDITION, __VA_ARGS__) #define AWS_ERROR_POSTCONDITION(...) CALL_OVERLOAD(AWS_ERROR_PRECONDITION, __VA_ARGS__) #define AWS_RETURN_WITH_POSTCONDITION(_rval, ...) \ do { \ AWS_POSTCONDITION(__VA_ARGS__); \ return _rval; \ } while (0) #define AWS_SUCCEED_WITH_POSTCONDITION(...) AWS_RETURN_WITH_POSTCONDITION(AWS_OP_SUCCESS, __VA_ARGS__) #define AWS_OBJECT_PTR_IS_READABLE(ptr) AWS_MEM_IS_READABLE((ptr), sizeof(*(ptr))) #define AWS_OBJECT_PTR_IS_WRITABLE(ptr) AWS_MEM_IS_WRITABLE((ptr), sizeof(*(ptr))) AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_COMMON_ASSERT_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/atomics.h000066400000000000000000000306361456575232400260450ustar00rootroot00000000000000#ifndef AWS_COMMON_ATOMICS_H #define AWS_COMMON_ATOMICS_H #include /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ AWS_PUSH_SANE_WARNING_LEVEL /** * struct aws_atomic_var represents an atomic variable - a value which can hold an integer or pointer * that can be manipulated atomically. struct aws_atomic_vars should normally only be manipulated * with atomics methods defined in this header. */ struct aws_atomic_var { void *value; }; /* Helpers for extracting the integer and pointer values from aws_atomic_var. */ #define AWS_ATOMIC_VAR_PTRVAL(var) ((var)->value) #define AWS_ATOMIC_VAR_INTVAL(var) (*(aws_atomic_impl_int_t *)(var)) /* * This enumeration specifies the memory ordering properties requested for a particular * atomic operation. The atomic operation may provide stricter ordering than requested. * Note that, within a single thread, all operations are still sequenced (that is, a thread * sees its own atomic writes and reads happening in program order, but other threads may * disagree on this ordering). * * The behavior of these memory orderings are the same as in the C11 atomics API; however, * we only implement a subset that can be portably implemented on the compilers we target. */ enum aws_memory_order { /** * No particular ordering constraints are guaranteed relative to other * operations at all; we merely ensure that the operation itself is atomic. */ aws_memory_order_relaxed = 0, /* aws_memory_order_consume - not currently implemented */ /** * Specifies acquire ordering. No reads or writes on the current thread can be * reordered to happen before this operation. This is typically paired with a release * ordering; any writes that happened on the releasing operation will be visible * after the paired acquire operation. * * Acquire ordering is only meaningful on load or load-store operations. */ aws_memory_order_acquire = 2, /* leave a spot for consume if we ever add it */ /** * Specifies release order. No reads or writes can be reordered to come after this * operation. Typically paired with an acquire operation. * * Release ordering is only meaningful on store or load-store operations. */ aws_memory_order_release, /** * Specifies acquire-release order; if this operation acts as a load, it acts as an * acquire operation; if it acts as a store, it acts as a release operation; if it's * a load-store, it does both. */ aws_memory_order_acq_rel, /* * Specifies sequentially consistent order. This behaves as acq_rel, but in addition, * all seq_cst operations appear to occur in some globally consistent order. * * TODO: Figure out how to correctly implement this in MSVC. It appears that interlocked * functions provide only acq_rel ordering. */ aws_memory_order_seq_cst }; /** * Statically initializes an aws_atomic_var to a given size_t value. */ #define AWS_ATOMIC_INIT_INT(x) \ { .value = (void *)(uintptr_t)(x) } /** * Statically initializes an aws_atomic_var to a given void * value. */ #define AWS_ATOMIC_INIT_PTR(x) \ { .value = (void *)(x) } AWS_EXTERN_C_BEGIN /* * Note: We do not use the C11 atomics API; this is because we want to make sure the representation * (and behavior) of atomic values is consistent, regardless of what --std= flag you pass to your compiler. * Since C11 atomics can silently introduce locks, we run the risk of creating such ABI inconsistencies * if we decide based on compiler features which atomics API to use, and in practice we expect to have * either the GNU or MSVC atomics anyway. * * As future work, we could test to see if the C11 atomics API on this platform behaves consistently * with the other APIs and use it if it does. */ /** * Initializes an atomic variable with an integer value. This operation should be done before any * other operations on this atomic variable, and must be done before attempting any parallel operations. * * This operation does not imply a barrier. Ensure that you use an acquire-release barrier (or stronger) * when communicating the fact that initialization is complete to the other thread. Launching the thread * implies a sufficiently strong barrier. */ AWS_STATIC_IMPL void aws_atomic_init_int(volatile struct aws_atomic_var *var, size_t n); /** * Initializes an atomic variable with a pointer value. This operation should be done before any * other operations on this atomic variable, and must be done before attempting any parallel operations. * * This operation does not imply a barrier. Ensure that you use an acquire-release barrier (or stronger) * when communicating the fact that initialization is complete to the other thread. Launching the thread * implies a sufficiently strong barrier. */ AWS_STATIC_IMPL void aws_atomic_init_ptr(volatile struct aws_atomic_var *var, void *p); /** * Reads an atomic var as an integer, using the specified ordering, and returns the result. */ AWS_STATIC_IMPL size_t aws_atomic_load_int_explicit(volatile const struct aws_atomic_var *var, enum aws_memory_order memory_order); /** * Reads an atomic var as an integer, using sequentially consistent ordering, and returns the result. */ AWS_STATIC_IMPL size_t aws_atomic_load_int(volatile const struct aws_atomic_var *var); /** * Reads an atomic var as a pointer, using the specified ordering, and returns the result. */ AWS_STATIC_IMPL void *aws_atomic_load_ptr_explicit(volatile const struct aws_atomic_var *var, enum aws_memory_order memory_order); /** * Reads an atomic var as a pointer, using sequentially consistent ordering, and returns the result. */ AWS_STATIC_IMPL void *aws_atomic_load_ptr(volatile const struct aws_atomic_var *var); /** * Stores an integer into an atomic var, using the specified ordering. */ AWS_STATIC_IMPL void aws_atomic_store_int_explicit(volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order memory_order); /** * Stores an integer into an atomic var, using sequentially consistent ordering. */ AWS_STATIC_IMPL void aws_atomic_store_int(volatile struct aws_atomic_var *var, size_t n); /** * Stores a pointer into an atomic var, using the specified ordering. */ AWS_STATIC_IMPL void aws_atomic_store_ptr_explicit(volatile struct aws_atomic_var *var, void *p, enum aws_memory_order memory_order); /** * Stores a pointer into an atomic var, using sequentially consistent ordering. */ AWS_STATIC_IMPL void aws_atomic_store_ptr(volatile struct aws_atomic_var *var, void *p); /** * Exchanges an integer with the value in an atomic_var, using the specified ordering. * Returns the value that was previously in the atomic_var. */ AWS_STATIC_IMPL size_t aws_atomic_exchange_int_explicit( volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order memory_order); /** * Exchanges an integer with the value in an atomic_var, using sequentially consistent ordering. * Returns the value that was previously in the atomic_var. */ AWS_STATIC_IMPL size_t aws_atomic_exchange_int(volatile struct aws_atomic_var *var, size_t n); /** * Exchanges a pointer with the value in an atomic_var, using the specified ordering. * Returns the value that was previously in the atomic_var. */ AWS_STATIC_IMPL void *aws_atomic_exchange_ptr_explicit( volatile struct aws_atomic_var *var, void *p, enum aws_memory_order memory_order); /** * Exchanges an integer with the value in an atomic_var, using sequentially consistent ordering. * Returns the value that was previously in the atomic_var. */ AWS_STATIC_IMPL void *aws_atomic_exchange_ptr(volatile struct aws_atomic_var *var, void *p); /** * Atomically compares *var to *expected; if they are equal, atomically sets *var = desired. Otherwise, *expected is set * to the value in *var. On success, the memory ordering used was order_success; otherwise, it was order_failure. * order_failure must be no stronger than order_success, and must not be release or acq_rel. * Returns true if the compare was successful and the variable updated to desired. */ AWS_STATIC_IMPL bool aws_atomic_compare_exchange_int_explicit( volatile struct aws_atomic_var *var, size_t *expected, size_t desired, enum aws_memory_order order_success, enum aws_memory_order order_failure); /** * Atomically compares *var to *expected; if they are equal, atomically sets *var = desired. Otherwise, *expected is set * to the value in *var. Uses sequentially consistent memory ordering, regardless of success or failure. * Returns true if the compare was successful and the variable updated to desired. */ AWS_STATIC_IMPL bool aws_atomic_compare_exchange_int(volatile struct aws_atomic_var *var, size_t *expected, size_t desired); /** * Atomically compares *var to *expected; if they are equal, atomically sets *var = desired. Otherwise, *expected is set * to the value in *var. On success, the memory ordering used was order_success; otherwise, it was order_failure. * order_failure must be no stronger than order_success, and must not be release or acq_rel. * Returns true if the compare was successful and the variable updated to desired. */ AWS_STATIC_IMPL bool aws_atomic_compare_exchange_ptr_explicit( volatile struct aws_atomic_var *var, void **expected, void *desired, enum aws_memory_order order_success, enum aws_memory_order order_failure); /** * Atomically compares *var to *expected; if they are equal, atomically sets *var = desired. Otherwise, *expected is set * to the value in *var. Uses sequentially consistent memory ordering, regardless of success or failure. * Returns true if the compare was successful and the variable updated to desired. */ AWS_STATIC_IMPL bool aws_atomic_compare_exchange_ptr(volatile struct aws_atomic_var *var, void **expected, void *desired); /** * Atomically adds n to *var, and returns the previous value of *var. */ AWS_STATIC_IMPL size_t aws_atomic_fetch_add_explicit(volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order order); /** * Atomically subtracts n from *var, and returns the previous value of *var. */ AWS_STATIC_IMPL size_t aws_atomic_fetch_sub_explicit(volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order order); /** * Atomically ORs n with *var, and returns the previous value of *var. */ AWS_STATIC_IMPL size_t aws_atomic_fetch_or_explicit(volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order order); /** * Atomically ANDs n with *var, and returns the previous value of *var. */ AWS_STATIC_IMPL size_t aws_atomic_fetch_and_explicit(volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order order); /** * Atomically XORs n with *var, and returns the previous value of *var. */ AWS_STATIC_IMPL size_t aws_atomic_fetch_xor_explicit(volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order order); /** * Atomically adds n to *var, and returns the previous value of *var. * Uses sequentially consistent ordering. */ AWS_STATIC_IMPL size_t aws_atomic_fetch_add(volatile struct aws_atomic_var *var, size_t n); /** * Atomically subtracts n from *var, and returns the previous value of *var. * Uses sequentially consistent ordering. */ AWS_STATIC_IMPL size_t aws_atomic_fetch_sub(volatile struct aws_atomic_var *var, size_t n); /** * Atomically ands n into *var, and returns the previous value of *var. * Uses sequentially consistent ordering. */ AWS_STATIC_IMPL size_t aws_atomic_fetch_and(volatile struct aws_atomic_var *var, size_t n); /** * Atomically ors n into *var, and returns the previous value of *var. * Uses sequentially consistent ordering. */ AWS_STATIC_IMPL size_t aws_atomic_fetch_or(volatile struct aws_atomic_var *var, size_t n); /** * Atomically xors n into *var, and returns the previous value of *var. * Uses sequentially consistent ordering. */ AWS_STATIC_IMPL size_t aws_atomic_fetch_xor(volatile struct aws_atomic_var *var, size_t n); /** * Provides the same reordering guarantees as an atomic operation with the specified memory order, without * needing to actually perform an atomic operation. */ AWS_STATIC_IMPL void aws_atomic_thread_fence(enum aws_memory_order order); #ifndef AWS_NO_STATIC_IMPL # include #endif /* AWS_NO_STATIC_IMPL */ AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/atomics.inl000066400000000000000000000121111456575232400263640ustar00rootroot00000000000000#ifndef AWS_COMMON_ATOMICS_INL #define AWS_COMMON_ATOMICS_INL /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include AWS_EXTERN_C_BEGIN /** * Reads an atomic var as an integer, using sequentially consistent ordering, and returns the result. */ AWS_STATIC_IMPL size_t aws_atomic_load_int(volatile const struct aws_atomic_var *var) { return aws_atomic_load_int_explicit(var, aws_memory_order_seq_cst); } /** * Reads an atomic var as a pointer, using sequentially consistent ordering, and returns the result. */ AWS_STATIC_IMPL void *aws_atomic_load_ptr(volatile const struct aws_atomic_var *var) { return aws_atomic_load_ptr_explicit(var, aws_memory_order_seq_cst); } /** * Stores an integer into an atomic var, using sequentially consistent ordering. */ AWS_STATIC_IMPL void aws_atomic_store_int(volatile struct aws_atomic_var *var, size_t n) { aws_atomic_store_int_explicit(var, n, aws_memory_order_seq_cst); } /** * Stores a pointer into an atomic var, using sequentially consistent ordering. */ AWS_STATIC_IMPL void aws_atomic_store_ptr(volatile struct aws_atomic_var *var, void *p) { aws_atomic_store_ptr_explicit(var, p, aws_memory_order_seq_cst); } /** * Exchanges an integer with the value in an atomic_var, using sequentially consistent ordering. * Returns the value that was previously in the atomic_var. */ AWS_STATIC_IMPL size_t aws_atomic_exchange_int(volatile struct aws_atomic_var *var, size_t n) { return aws_atomic_exchange_int_explicit(var, n, aws_memory_order_seq_cst); } /** * Exchanges an integer with the value in an atomic_var, using sequentially consistent ordering. * Returns the value that was previously in the atomic_var. */ AWS_STATIC_IMPL void *aws_atomic_exchange_ptr(volatile struct aws_atomic_var *var, void *p) { return aws_atomic_exchange_ptr_explicit(var, p, aws_memory_order_seq_cst); } /** * Atomically compares *var to *expected; if they are equal, atomically sets *var = desired. Otherwise, *expected is set * to the value in *var. Uses sequentially consistent memory ordering, regardless of success or failure. * Returns true if the compare was successful and the variable updated to desired. */ AWS_STATIC_IMPL bool aws_atomic_compare_exchange_int(volatile struct aws_atomic_var *var, size_t *expected, size_t desired) { return aws_atomic_compare_exchange_int_explicit( var, expected, desired, aws_memory_order_seq_cst, aws_memory_order_seq_cst); } /** * Atomically compares *var to *expected; if they are equal, atomically sets *var = desired. Otherwise, *expected is set * to the value in *var. Uses sequentially consistent memory ordering, regardless of success or failure. * Returns true if the compare was successful and the variable updated to desired. */ AWS_STATIC_IMPL bool aws_atomic_compare_exchange_ptr(volatile struct aws_atomic_var *var, void **expected, void *desired) { return aws_atomic_compare_exchange_ptr_explicit( var, expected, desired, aws_memory_order_seq_cst, aws_memory_order_seq_cst); } /** * Atomically adds n to *var, and returns the previous value of *var. * Uses sequentially consistent ordering. */ AWS_STATIC_IMPL size_t aws_atomic_fetch_add(volatile struct aws_atomic_var *var, size_t n) { return aws_atomic_fetch_add_explicit(var, n, aws_memory_order_seq_cst); } /** * Atomically subtracts n from *var, and returns the previous value of *var. * Uses sequentially consistent ordering. */ AWS_STATIC_IMPL size_t aws_atomic_fetch_sub(volatile struct aws_atomic_var *var, size_t n) { return aws_atomic_fetch_sub_explicit(var, n, aws_memory_order_seq_cst); } /** * Atomically ands n into *var, and returns the previous value of *var. * Uses sequentially consistent ordering. */ AWS_STATIC_IMPL size_t aws_atomic_fetch_and(volatile struct aws_atomic_var *var, size_t n) { return aws_atomic_fetch_and_explicit(var, n, aws_memory_order_seq_cst); } /** * Atomically ors n into *var, and returns the previous value of *var. * Uses sequentially consistent ordering. */ AWS_STATIC_IMPL size_t aws_atomic_fetch_or(volatile struct aws_atomic_var *var, size_t n) { return aws_atomic_fetch_or_explicit(var, n, aws_memory_order_seq_cst); } /** * Atomically xors n into *var, and returns the previous value of *var. * Uses sequentially consistent ordering. */ AWS_STATIC_IMPL size_t aws_atomic_fetch_xor(volatile struct aws_atomic_var *var, size_t n) { return aws_atomic_fetch_xor_explicit(var, n, aws_memory_order_seq_cst); } /* Include the backend implementation now, because we'll use its typedefs and #defines below */ #if defined(__GNUC__) || defined(__clang__) # if defined(__ATOMIC_RELAXED) # include # else # include # endif /* __ATOMIC_RELAXED */ #elif defined(_MSC_VER) # include #else # error No atomics implementation for your compiler is available #endif #include AWS_EXTERN_C_END #endif /* AWS_COMMON_ATOMICS_INL */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/atomics_fallback.inl000066400000000000000000000012151456575232400302060ustar00rootroot00000000000000#ifndef AWS_COMMON_ATOMICS_FALLBACK_INL #define AWS_COMMON_ATOMICS_FALLBACK_INL /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ AWS_EXTERN_C_BEGIN #ifndef AWS_ATOMICS_HAVE_THREAD_FENCE void aws_atomic_thread_fence(enum aws_memory_order order) { struct aws_atomic_var var; aws_atomic_int_t expected = 0; aws_atomic_store_int(&var, expected, aws_memory_order_relaxed); aws_atomic_compare_exchange_int(&var, &expected, 1, order, aws_memory_order_relaxed); } #endif /* AWS_ATOMICS_HAVE_THREAD_FENCE */ AWS_EXTERN_C_END #endif /* AWS_COMMON_ATOMICS_FALLBACK_INL */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/atomics_gnu.inl000066400000000000000000000167111456575232400272470ustar00rootroot00000000000000#ifndef AWS_COMMON_ATOMICS_GNU_INL #define AWS_COMMON_ATOMICS_GNU_INL /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ /* These are implicitly included, but help with editor highlighting */ #include #include #include #include AWS_EXTERN_C_BEGIN #ifdef __clang__ # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wc11-extensions" #else # pragma GCC diagnostic push # pragma GCC diagnostic ignored "-Wpedantic" #endif typedef size_t aws_atomic_impl_int_t; static inline int aws_atomic_priv_xlate_order(enum aws_memory_order order) { switch (order) { case aws_memory_order_relaxed: return __ATOMIC_RELAXED; case aws_memory_order_acquire: return __ATOMIC_ACQUIRE; case aws_memory_order_release: return __ATOMIC_RELEASE; case aws_memory_order_acq_rel: return __ATOMIC_ACQ_REL; case aws_memory_order_seq_cst: return __ATOMIC_SEQ_CST; default: /* Unknown memory order */ abort(); } } /** * Initializes an atomic variable with an integer value. This operation should be done before any * other operations on this atomic variable, and must be done before attempting any parallel operations. */ AWS_STATIC_IMPL void aws_atomic_init_int(volatile struct aws_atomic_var *var, size_t n) { AWS_ATOMIC_VAR_INTVAL(var) = n; } /** * Initializes an atomic variable with a pointer value. This operation should be done before any * other operations on this atomic variable, and must be done before attempting any parallel operations. */ AWS_STATIC_IMPL void aws_atomic_init_ptr(volatile struct aws_atomic_var *var, void *p) { AWS_ATOMIC_VAR_PTRVAL(var) = p; } /** * Reads an atomic var as an integer, using the specified ordering, and returns the result. */ AWS_STATIC_IMPL size_t aws_atomic_load_int_explicit(volatile const struct aws_atomic_var *var, enum aws_memory_order memory_order) { return __atomic_load_n(&AWS_ATOMIC_VAR_INTVAL(var), aws_atomic_priv_xlate_order(memory_order)); } /** * Reads an atomic var as a pointer, using the specified ordering, and returns the result. */ AWS_STATIC_IMPL void *aws_atomic_load_ptr_explicit(volatile const struct aws_atomic_var *var, enum aws_memory_order memory_order) { return __atomic_load_n(&AWS_ATOMIC_VAR_PTRVAL(var), aws_atomic_priv_xlate_order(memory_order)); } /** * Stores an integer into an atomic var, using the specified ordering. */ AWS_STATIC_IMPL void aws_atomic_store_int_explicit(volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order memory_order) { __atomic_store_n(&AWS_ATOMIC_VAR_INTVAL(var), n, aws_atomic_priv_xlate_order(memory_order)); } /** * Stores an pointer into an atomic var, using the specified ordering. */ AWS_STATIC_IMPL void aws_atomic_store_ptr_explicit(volatile struct aws_atomic_var *var, void *p, enum aws_memory_order memory_order) { __atomic_store_n(&AWS_ATOMIC_VAR_PTRVAL(var), p, aws_atomic_priv_xlate_order(memory_order)); } /** * Exchanges an integer with the value in an atomic_var, using the specified ordering. * Returns the value that was previously in the atomic_var. */ AWS_STATIC_IMPL size_t aws_atomic_exchange_int_explicit( volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order memory_order) { return __atomic_exchange_n(&AWS_ATOMIC_VAR_INTVAL(var), n, aws_atomic_priv_xlate_order(memory_order)); } /** * Exchanges a pointer with the value in an atomic_var, using the specified ordering. * Returns the value that was previously in the atomic_var. */ AWS_STATIC_IMPL void *aws_atomic_exchange_ptr_explicit( volatile struct aws_atomic_var *var, void *p, enum aws_memory_order memory_order) { return __atomic_exchange_n(&AWS_ATOMIC_VAR_PTRVAL(var), p, aws_atomic_priv_xlate_order(memory_order)); } /** * Atomically compares *var to *expected; if they are equal, atomically sets *var = desired. Otherwise, *expected is set * to the value in *var. On success, the memory ordering used was order_success; otherwise, it was order_failure. * order_failure must be no stronger than order_success, and must not be release or acq_rel. */ AWS_STATIC_IMPL bool aws_atomic_compare_exchange_int_explicit( volatile struct aws_atomic_var *var, size_t *expected, size_t desired, enum aws_memory_order order_success, enum aws_memory_order order_failure) { return __atomic_compare_exchange_n( &AWS_ATOMIC_VAR_INTVAL(var), expected, desired, false, aws_atomic_priv_xlate_order(order_success), aws_atomic_priv_xlate_order(order_failure)); } /** * Atomically compares *var to *expected; if they are equal, atomically sets *var = desired. Otherwise, *expected is set * to the value in *var. On success, the memory ordering used was order_success; otherwise, it was order_failure. * order_failure must be no stronger than order_success, and must not be release or acq_rel. */ AWS_STATIC_IMPL bool aws_atomic_compare_exchange_ptr_explicit( volatile struct aws_atomic_var *var, void **expected, void *desired, enum aws_memory_order order_success, enum aws_memory_order order_failure) { return __atomic_compare_exchange_n( &AWS_ATOMIC_VAR_PTRVAL(var), expected, desired, false, aws_atomic_priv_xlate_order(order_success), aws_atomic_priv_xlate_order(order_failure)); } /** * Atomically adds n to *var, and returns the previous value of *var. */ AWS_STATIC_IMPL size_t aws_atomic_fetch_add_explicit(volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order order) { return __atomic_fetch_add(&AWS_ATOMIC_VAR_INTVAL(var), n, aws_atomic_priv_xlate_order(order)); } /** * Atomically subtracts n from *var, and returns the previous value of *var. */ AWS_STATIC_IMPL size_t aws_atomic_fetch_sub_explicit(volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order order) { return __atomic_fetch_sub(&AWS_ATOMIC_VAR_INTVAL(var), n, aws_atomic_priv_xlate_order(order)); } /** * Atomically ORs n with *var, and returns the previous value of *var. */ AWS_STATIC_IMPL size_t aws_atomic_fetch_or_explicit(volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order order) { return __atomic_fetch_or(&AWS_ATOMIC_VAR_INTVAL(var), n, aws_atomic_priv_xlate_order(order)); } /** * Atomically ANDs n with *var, and returns the previous value of *var. */ AWS_STATIC_IMPL size_t aws_atomic_fetch_and_explicit(volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order order) { return __atomic_fetch_and(&AWS_ATOMIC_VAR_INTVAL(var), n, aws_atomic_priv_xlate_order(order)); } /** * Atomically XORs n with *var, and returns the previous value of *var. */ AWS_STATIC_IMPL size_t aws_atomic_fetch_xor_explicit(volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order order) { return __atomic_fetch_xor(&AWS_ATOMIC_VAR_INTVAL(var), n, aws_atomic_priv_xlate_order(order)); } /** * Provides the same reordering guarantees as an atomic operation with the specified memory order, without * needing to actually perform an atomic operation. */ AWS_STATIC_IMPL void aws_atomic_thread_fence(enum aws_memory_order order) { __atomic_thread_fence(order); } #ifdef __clang__ # pragma clang diagnostic pop #else # pragma GCC diagnostic pop #endif #define AWS_ATOMICS_HAVE_THREAD_FENCE AWS_EXTERN_C_END #endif /* AWS_COMMON_ATOMICS_GNU_INL */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/atomics_gnu_old.inl000066400000000000000000000223471456575232400301070ustar00rootroot00000000000000#ifndef AWS_COMMON_ATOMICS_GNU_OLD_INL #define AWS_COMMON_ATOMICS_GNU_OLD_INL /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ /* These are implicitly included, but help with editor highlighting */ #include #include #include #include AWS_EXTERN_C_BEGIN #if defined(__GNUC__) # if (__GNUC__ < 4) # error GCC versions before 4.1.2 are not supported # elif (defined(__arm__) || defined(__ia64__)) && (__GNUC__ == 4 && __GNUC_MINOR__ < 4) /* See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=36793 Itanium codegen */ /* https://bugs.launchpad.net/ubuntu/+source/gcc-4.4/+bug/491872 ARM codegen*/ /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=42263 ARM codegen */ # error GCC versions before 4.4.0 are not supported on ARM or Itanium # elif (defined(__x86_64__) || defined(__i386__)) && \ (__GNUC__ == 4 && (__GNUC_MINOR__ < 1 || (__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL__ < 2))) /* 4.1.2 is the first gcc version with 100% working atomic intrinsics on Intel */ # error GCC versions before 4.1.2 are not supported on x86/x64 # endif #endif typedef size_t aws_atomic_impl_int_t; static inline void aws_atomic_private_compiler_barrier(void) { __asm__ __volatile__("" : : : "memory"); } static inline void aws_atomic_private_barrier_before(enum aws_memory_order order) { if (order == aws_memory_order_release || order == aws_memory_order_acq_rel || order == aws_memory_order_seq_cst) { __sync_synchronize(); } aws_atomic_private_compiler_barrier(); } static inline void aws_atomic_private_barrier_after(enum aws_memory_order order) { aws_atomic_private_compiler_barrier(); if (order == aws_memory_order_acquire || order == aws_memory_order_acq_rel || order == aws_memory_order_seq_cst) { __sync_synchronize(); } } /** * Initializes an atomic variable with an integer value. This operation should be done before any * other operations on this atomic variable, and must be done before attempting any parallel operations. */ AWS_STATIC_IMPL void aws_atomic_init_int(volatile struct aws_atomic_var *var, size_t n) { AWS_ATOMIC_VAR_INTVAL(var) = n; } /** * Initializes an atomic variable with a pointer value. This operation should be done before any * other operations on this atomic variable, and must be done before attempting any parallel operations. */ AWS_STATIC_IMPL void aws_atomic_init_ptr(volatile struct aws_atomic_var *var, void *p) { AWS_ATOMIC_VAR_PTRVAL(var) = p; } /** * Reads an atomic var as an integer, using the specified ordering, and returns the result. */ AWS_STATIC_IMPL size_t aws_atomic_load_int_explicit(volatile const struct aws_atomic_var *var, enum aws_memory_order memory_order) { aws_atomic_private_barrier_before(memory_order); size_t retval = AWS_ATOMIC_VAR_INTVAL(var); /* Release barriers are not permitted for loads, so we just do a compiler barrier here */ aws_atomic_private_compiler_barrier(); return retval; } /** * Reads an atomic var as a pointer, using the specified ordering, and returns the result. */ AWS_STATIC_IMPL void *aws_atomic_load_ptr_explicit(volatile const struct aws_atomic_var *var, enum aws_memory_order memory_order) { aws_atomic_private_barrier_before(memory_order); void *retval = AWS_ATOMIC_VAR_PTRVAL(var); /* Release barriers are not permitted for loads, so we just do a compiler barrier here */ aws_atomic_private_compiler_barrier(); return retval; } /** * Stores an integer into an atomic var, using the specified ordering. */ AWS_STATIC_IMPL void aws_atomic_store_int_explicit(volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order memory_order) { /* Acquire barriers are not permitted for stores, so just do a compiler barrier before */ aws_atomic_private_compiler_barrier(); AWS_ATOMIC_VAR_INTVAL(var) = n; aws_atomic_private_barrier_after(memory_order); } /** * Stores a pointer into an atomic var, using the specified ordering. */ AWS_STATIC_IMPL void aws_atomic_store_ptr_explicit(volatile struct aws_atomic_var *var, void *p, enum aws_memory_order memory_order) { /* Acquire barriers are not permitted for stores, so just do a compiler barrier before */ aws_atomic_private_compiler_barrier(); AWS_ATOMIC_VAR_PTRVAL(var) = p; aws_atomic_private_barrier_after(memory_order); } /** * Exchanges an integer with the value in an atomic_var, using the specified ordering. * Returns the value that was previously in the atomic_var. */ AWS_STATIC_IMPL size_t aws_atomic_exchange_int_explicit( volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order memory_order) { /* * GCC 4.6 and before have only __sync_lock_test_and_set as an exchange operation, * which may not support arbitrary values on all architectures. We simply emulate * with a CAS instead. */ size_t oldval; do { oldval = AWS_ATOMIC_VAR_INTVAL(var); } while (!__sync_bool_compare_and_swap(&AWS_ATOMIC_VAR_INTVAL(var), oldval, n)); /* __sync_bool_compare_and_swap implies a full barrier */ return oldval; } /** * Exchanges a pointer with the value in an atomic_var, using the specified ordering. * Returns the value that was previously in the atomic_var. */ AWS_STATIC_IMPL void *aws_atomic_exchange_ptr_explicit( volatile struct aws_atomic_var *var, void *p, enum aws_memory_order memory_order) { /* * GCC 4.6 and before have only __sync_lock_test_and_set as an exchange operation, * which may not support arbitrary values on all architectures. We simply emulate * with a CAS instead. */ void *oldval; do { oldval = AWS_ATOMIC_VAR_PTRVAL(var); } while (!__sync_bool_compare_and_swap(&AWS_ATOMIC_VAR_PTRVAL(var), oldval, p)); /* __sync_bool_compare_and_swap implies a full barrier */ return oldval; } /** * Atomically compares *var to *expected; if they are equal, atomically sets *var = desired. Otherwise, *expected is set * to the value in *var. On success, the memory ordering used was order_success; otherwise, it was order_failure. * order_failure must be no stronger than order_success, and must not be release or acq_rel. */ AWS_STATIC_IMPL bool aws_atomic_compare_exchange_int_explicit( volatile struct aws_atomic_var *var, size_t *expected, size_t desired, enum aws_memory_order order_success, enum aws_memory_order order_failure) { bool result = __sync_bool_compare_and_swap(&AWS_ATOMIC_VAR_INTVAL(var), *expected, desired); if (!result) { *expected = AWS_ATOMIC_VAR_INTVAL(var); } return result; } /** * Atomically compares *var to *expected; if they are equal, atomically sets *var = desired. Otherwise, *expected is set * to the value in *var. On success, the memory ordering used was order_success; otherwise, it was order_failure. * order_failure must be no stronger than order_success, and must not be release or acq_rel. */ AWS_STATIC_IMPL bool aws_atomic_compare_exchange_ptr_explicit( volatile struct aws_atomic_var *var, void **expected, void *desired, enum aws_memory_order order_success, enum aws_memory_order order_failure) { bool result = __sync_bool_compare_and_swap(&AWS_ATOMIC_VAR_PTRVAL(var), *expected, desired); if (!result) { *expected = AWS_ATOMIC_VAR_PTRVAL(var); } return result; } /** * Atomically adds n to *var, and returns the previous value of *var. */ AWS_STATIC_IMPL size_t aws_atomic_fetch_add_explicit(volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order order) { return __sync_fetch_and_add(&AWS_ATOMIC_VAR_INTVAL(var), n); } /** * Atomically subtracts n from *var, and returns the previous value of *var. */ AWS_STATIC_IMPL size_t aws_atomic_fetch_sub_explicit(volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order order) { return __sync_fetch_and_sub(&AWS_ATOMIC_VAR_INTVAL(var), n); } /** * Atomically ORs n with *var, and returns the previous value of *var. */ AWS_STATIC_IMPL size_t aws_atomic_fetch_or_explicit(volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order order) { return __sync_fetch_and_or(&AWS_ATOMIC_VAR_INTVAL(var), n); } /** * Atomically ANDs n with *var, and returns the previous value of *var. */ AWS_STATIC_IMPL size_t aws_atomic_fetch_and_explicit(volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order order) { return __sync_fetch_and_and(&AWS_ATOMIC_VAR_INTVAL(var), n); } /** * Atomically XORs n with *var, and returns the previous value of *var. */ AWS_STATIC_IMPL size_t aws_atomic_fetch_xor_explicit(volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order order) { return __sync_fetch_and_xor(&AWS_ATOMIC_VAR_INTVAL(var), n); } /** * Provides the same reordering guarantees as an atomic operation with the specified memory order, without * needing to actually perform an atomic operation. */ AWS_STATIC_IMPL void aws_atomic_thread_fence(enum aws_memory_order order) { /* On old versions of GCC we only have this one big hammer... */ __sync_synchronize(); } #define AWS_ATOMICS_HAVE_THREAD_FENCE AWS_EXTERN_C_END #endif /* AWS_COMMON_ATOMICS_GNU_OLD_INL */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/atomics_msvc.inl000066400000000000000000000314471456575232400274310ustar00rootroot00000000000000#ifndef AWS_COMMON_ATOMICS_MSVC_INL #define AWS_COMMON_ATOMICS_MSVC_INL /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ /* These are implicitly included, but helps with editor highlighting */ #include #include /* This file generates level 4 compiler warnings in Visual Studio 2017 and older */ #pragma warning(push, 3) #include #pragma warning(pop) #include #include AWS_EXTERN_C_BEGIN #if !(defined(_M_IX86) || defined(_M_X64)) # error Atomics are not currently supported for non-x86 MSVC platforms /* * In particular, it's not clear that seq_cst will work properly on non-x86 * memory models. We may need to make use of platform-specific intrinsics. * * NOTE: Before removing this #error, please make use of the Interlocked*[Acquire|Release] * variants (if applicable for the new platform)! This will (hopefully) help ensure that * code breaks before people take too much of a dependency on it. */ #endif /** * Some general notes: * * On x86/x86_64, by default, windows uses acquire/release semantics for volatile accesses; * however, this is not the case on ARM, and on x86/x86_64 it can be disabled using the * /volatile:iso compile flag. * * Interlocked* functions implicitly have acq_rel semantics; there are ones with weaker * semantics as well, but because windows is generally used on x86, where there's not a lot * of performance difference between different ordering modes anyway, we just use the stronger * forms for now. Further, on x86, they actually have seq_cst semantics as they use locked instructions. * It is unclear if Interlocked functions guarantee seq_cst on non-x86 platforms. * * Since all loads and stores are acq and/or rel already, we can do non-seq_cst loads and stores * as just volatile variable accesses, but add the appropriate barriers for good measure. * * For seq_cst accesses, we take advantage of the facts that (on x86): * 1. Loads are not reordered with other loads * 2. Stores are not reordered with other stores * 3. Locked instructions (including swaps) have a total order * 4. Non-locked accesses are not reordered with locked instructions * * Therefore, if we ensure that all seq_cst stores are locked, we can establish * a total order on stores, and the intervening ordinary loads will not violate that total * order. * See http://www.cs.cmu.edu/~410-f10/doc/Intel_Reordering_318147.pdf 2.7, which covers * this use case. */ #ifdef _M_IX86 # define AWS_INTERLOCKED_INT(x) _Interlocked##x typedef long aws_atomic_impl_int_t; #else # define AWS_INTERLOCKED_INT(x) _Interlocked##x##64 typedef long long aws_atomic_impl_int_t; #endif static inline void aws_atomic_priv_check_order(enum aws_memory_order order) { #ifndef NDEBUG switch (order) { case aws_memory_order_relaxed: return; case aws_memory_order_acquire: return; case aws_memory_order_release: return; case aws_memory_order_acq_rel: return; case aws_memory_order_seq_cst: return; default: /* Unknown memory order */ abort(); } #endif (void)order; } enum aws_atomic_mode_priv { aws_atomic_priv_load, aws_atomic_priv_store }; static inline void aws_atomic_priv_barrier_before(enum aws_memory_order order, enum aws_atomic_mode_priv mode) { aws_atomic_priv_check_order(order); AWS_ASSERT(mode != aws_atomic_priv_load || order != aws_memory_order_release); if (order == aws_memory_order_relaxed) { /* no barriers required for relaxed mode */ return; } if (order == aws_memory_order_acquire || mode == aws_atomic_priv_load) { /* for acquire, we need only use a barrier afterward */ return; } /* * x86: only a compiler barrier is required. For seq_cst, we must use some form of interlocked operation for * writes, but that's the caller's responsibility. * * Volatile ops may or may not imply this barrier, depending on the /volatile: switch, but adding an extra * barrier doesn't hurt. */ _ReadWriteBarrier(); } static inline void aws_atomic_priv_barrier_after(enum aws_memory_order order, enum aws_atomic_mode_priv mode) { aws_atomic_priv_check_order(order); AWS_ASSERT(mode != aws_atomic_priv_store || order != aws_memory_order_acquire); if (order == aws_memory_order_relaxed) { /* no barriers required for relaxed mode */ return; } if (order == aws_memory_order_release || mode == aws_atomic_priv_store) { /* for release, we need only use a barrier before */ return; } /* * x86: only a compiler barrier is required. For seq_cst, we must use some form of interlocked operation for * writes, but that's the caller's responsibility. */ _ReadWriteBarrier(); } /** * Initializes an atomic variable with an integer value. This operation should be done before any * other operations on this atomic variable, and must be done before attempting any parallel operations. */ AWS_STATIC_IMPL void aws_atomic_init_int(volatile struct aws_atomic_var *var, size_t n) { AWS_ATOMIC_VAR_INTVAL(var) = (aws_atomic_impl_int_t)n; } /** * Initializes an atomic variable with a pointer value. This operation should be done before any * other operations on this atomic variable, and must be done before attempting any parallel operations. */ AWS_STATIC_IMPL void aws_atomic_init_ptr(volatile struct aws_atomic_var *var, void *p) { AWS_ATOMIC_VAR_PTRVAL(var) = p; } /** * Reads an atomic var as an integer, using the specified ordering, and returns the result. */ AWS_STATIC_IMPL size_t aws_atomic_load_int_explicit(volatile const struct aws_atomic_var *var, enum aws_memory_order memory_order) { aws_atomic_priv_barrier_before(memory_order, aws_atomic_priv_load); size_t result = (size_t)AWS_ATOMIC_VAR_INTVAL(var); aws_atomic_priv_barrier_after(memory_order, aws_atomic_priv_load); return result; } /** * Reads an atomic var as an pointer, using the specified ordering, and returns the result. */ AWS_STATIC_IMPL void *aws_atomic_load_ptr_explicit(volatile const struct aws_atomic_var *var, enum aws_memory_order memory_order) { aws_atomic_priv_barrier_before(memory_order, aws_atomic_priv_load); void *result = AWS_ATOMIC_VAR_PTRVAL(var); aws_atomic_priv_barrier_after(memory_order, aws_atomic_priv_load); return result; } /** * Stores an integer into an atomic var, using the specified ordering. */ AWS_STATIC_IMPL void aws_atomic_store_int_explicit(volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order memory_order) { if (memory_order != aws_memory_order_seq_cst) { aws_atomic_priv_barrier_before(memory_order, aws_atomic_priv_store); AWS_ATOMIC_VAR_INTVAL(var) = (aws_atomic_impl_int_t)n; aws_atomic_priv_barrier_after(memory_order, aws_atomic_priv_store); } else { AWS_INTERLOCKED_INT(Exchange)(&AWS_ATOMIC_VAR_INTVAL(var), (aws_atomic_impl_int_t)n); } } /** * Stores an pointer into an atomic var, using the specified ordering. */ AWS_STATIC_IMPL void aws_atomic_store_ptr_explicit(volatile struct aws_atomic_var *var, void *p, enum aws_memory_order memory_order) { aws_atomic_priv_check_order(memory_order); if (memory_order != aws_memory_order_seq_cst) { aws_atomic_priv_barrier_before(memory_order, aws_atomic_priv_store); AWS_ATOMIC_VAR_PTRVAL(var) = p; aws_atomic_priv_barrier_after(memory_order, aws_atomic_priv_store); } else { _InterlockedExchangePointer(&AWS_ATOMIC_VAR_PTRVAL(var), p); } } /** * Exchanges an integer with the value in an atomic_var, using the specified ordering. * Returns the value that was previously in the atomic_var. */ AWS_STATIC_IMPL size_t aws_atomic_exchange_int_explicit( volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order memory_order) { aws_atomic_priv_check_order(memory_order); return (size_t)AWS_INTERLOCKED_INT(Exchange)(&AWS_ATOMIC_VAR_INTVAL(var), (aws_atomic_impl_int_t)n); } /** * Exchanges a pointer with the value in an atomic_var, using the specified ordering. * Returns the value that was previously in the atomic_var. */ AWS_STATIC_IMPL void *aws_atomic_exchange_ptr_explicit( volatile struct aws_atomic_var *var, void *p, enum aws_memory_order memory_order) { aws_atomic_priv_check_order(memory_order); return _InterlockedExchangePointer(&AWS_ATOMIC_VAR_PTRVAL(var), p); } /** * Atomically compares *var to *expected; if they are equal, atomically sets *var = desired. Otherwise, *expected is set * to the value in *var. On success, the memory ordering used was order_success; otherwise, it was order_failure. * order_failure must be no stronger than order_success, and must not be release or acq_rel. */ AWS_STATIC_IMPL bool aws_atomic_compare_exchange_int_explicit( volatile struct aws_atomic_var *var, size_t *expected, size_t desired, enum aws_memory_order order_success, enum aws_memory_order order_failure) { aws_atomic_priv_check_order(order_success); aws_atomic_priv_check_order(order_failure); size_t oldval = (size_t)AWS_INTERLOCKED_INT(CompareExchange)( &AWS_ATOMIC_VAR_INTVAL(var), (aws_atomic_impl_int_t)desired, (aws_atomic_impl_int_t)*expected); bool successful = oldval == *expected; *expected = oldval; return successful; } /** * Atomically compares *var to *expected; if they are equal, atomically sets *var = desired. Otherwise, *expected is set * to the value in *var. On success, the memory ordering used was order_success; otherwise, it was order_failure. * order_failure must be no stronger than order_success, and must not be release or acq_rel. */ AWS_STATIC_IMPL bool aws_atomic_compare_exchange_ptr_explicit( volatile struct aws_atomic_var *var, void **expected, void *desired, enum aws_memory_order order_success, enum aws_memory_order order_failure) { aws_atomic_priv_check_order(order_success); aws_atomic_priv_check_order(order_failure); void *oldval = _InterlockedCompareExchangePointer(&AWS_ATOMIC_VAR_PTRVAL(var), desired, *expected); bool successful = oldval == *expected; *expected = oldval; return successful; } /** * Atomically adds n to *var, and returns the previous value of *var. */ AWS_STATIC_IMPL size_t aws_atomic_fetch_add_explicit(volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order order) { aws_atomic_priv_check_order(order); return (size_t)AWS_INTERLOCKED_INT(ExchangeAdd)(&AWS_ATOMIC_VAR_INTVAL(var), (aws_atomic_impl_int_t)n); } /** * Atomically subtracts n from *var, and returns the previous value of *var. */ AWS_STATIC_IMPL size_t aws_atomic_fetch_sub_explicit(volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order order) { aws_atomic_priv_check_order(order); return (size_t)AWS_INTERLOCKED_INT(ExchangeAdd)(&AWS_ATOMIC_VAR_INTVAL(var), -(aws_atomic_impl_int_t)n); } /** * Atomically ORs n with *var, and returns the previous value of *var. */ AWS_STATIC_IMPL size_t aws_atomic_fetch_or_explicit(volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order order) { aws_atomic_priv_check_order(order); return (size_t)AWS_INTERLOCKED_INT(Or)(&AWS_ATOMIC_VAR_INTVAL(var), (aws_atomic_impl_int_t)n); } /** * Atomically ANDs n with *var, and returns the previous value of *var. */ AWS_STATIC_IMPL size_t aws_atomic_fetch_and_explicit(volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order order) { aws_atomic_priv_check_order(order); return (size_t)AWS_INTERLOCKED_INT(And)(&AWS_ATOMIC_VAR_INTVAL(var), (aws_atomic_impl_int_t)n); } /** * Atomically XORs n with *var, and returns the previous value of *var. */ AWS_STATIC_IMPL size_t aws_atomic_fetch_xor_explicit(volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order order) { aws_atomic_priv_check_order(order); return (size_t)AWS_INTERLOCKED_INT(Xor)(&AWS_ATOMIC_VAR_INTVAL(var), (aws_atomic_impl_int_t)n); } /** * Provides the same reordering guarantees as an atomic operation with the specified memory order, without * needing to actually perform an atomic operation. */ AWS_STATIC_IMPL void aws_atomic_thread_fence(enum aws_memory_order order) { volatile aws_atomic_impl_int_t x = 0; aws_atomic_priv_check_order(order); /* On x86: A compiler barrier is sufficient for anything short of seq_cst */ switch (order) { case aws_memory_order_seq_cst: AWS_INTERLOCKED_INT(Exchange)(&x, 1); break; case aws_memory_order_release: case aws_memory_order_acquire: case aws_memory_order_acq_rel: _ReadWriteBarrier(); break; case aws_memory_order_relaxed: /* no-op */ break; } } #define AWS_ATOMICS_HAVE_THREAD_FENCE AWS_EXTERN_C_END #endif aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/byte_buf.h000066400000000000000000001105351456575232400262020ustar00rootroot00000000000000#ifndef AWS_COMMON_BYTE_BUF_H #define AWS_COMMON_BYTE_BUF_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include AWS_PUSH_SANE_WARNING_LEVEL /** * Represents a length-delimited binary string or buffer. If byte buffer points * to constant memory or memory that should otherwise not be freed by this * struct, set allocator to NULL and free function will be a no-op. * * This structure used to define the output for all functions that write to a buffer. * * Note that this structure allocates memory at the buffer pointer only. The * struct itself does not get dynamically allocated and must be either * maintained or copied to avoid losing access to the memory. */ struct aws_byte_buf { /* do not reorder this, this struct lines up nicely with windows buffer structures--saving us allocations.*/ size_t len; uint8_t *buffer; size_t capacity; struct aws_allocator *allocator; }; /** * Represents a movable pointer within a larger binary string or buffer. * * This structure is used to define buffers for reading. */ struct aws_byte_cursor { /* do not reorder this, this struct lines up nicely with windows buffer structures--saving us allocations */ size_t len; uint8_t *ptr; }; /** * Helper macro for passing aws_byte_cursor to the printf family of functions. * Intended for use with the PRInSTR format macro. * Ex: printf(PRInSTR "\n", AWS_BYTE_CURSOR_PRI(my_cursor)); */ #define AWS_BYTE_CURSOR_PRI(C) ((int)(C).len < 0 ? 0 : (int)(C).len), (const char *)(C).ptr /** * Helper macro for passing aws_byte_buf to the printf family of functions. * Intended for use with the PRInSTR format macro. * Ex: printf(PRInSTR "\n", AWS_BYTE_BUF_PRI(my_buf)); */ #define AWS_BYTE_BUF_PRI(B) ((int)(B).len < 0 ? 0 : (int)(B).len), (const char *)(B).buffer /** * Helper Macro for initializing a byte cursor from a string literal */ #define AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(literal) \ { .ptr = (uint8_t *)(const char *)(literal), .len = sizeof(literal) - 1 } /** * Signature for function argument to trim APIs */ typedef bool(aws_byte_predicate_fn)(uint8_t value); AWS_EXTERN_C_BEGIN /** * Compare two arrays. * Return whether their contents are equivalent. * NULL may be passed as the array pointer if its length is declared to be 0. */ AWS_COMMON_API bool aws_array_eq(const void *const array_a, const size_t len_a, const void *array_b, const size_t len_b); /** * Perform a case-insensitive string comparison of two arrays. * Return whether their contents are equivalent. * NULL may be passed as the array pointer if its length is declared to be 0. * The "C" locale is used for comparing upper and lowercase letters. * Data is assumed to be ASCII text, UTF-8 will work fine too. */ AWS_COMMON_API bool aws_array_eq_ignore_case( const void *const array_a, const size_t len_a, const void *const array_b, const size_t len_b); /** * Compare an array and a null-terminated string. * Returns true if their contents are equivalent. * The array should NOT contain a null-terminator, or the comparison will always return false. * NULL may be passed as the array pointer if its length is declared to be 0. */ AWS_COMMON_API bool aws_array_eq_c_str(const void *const array, const size_t array_len, const char *const c_str); /** * Perform a case-insensitive string comparison of an array and a null-terminated string. * Return whether their contents are equivalent. * The array should NOT contain a null-terminator, or the comparison will always return false. * NULL may be passed as the array pointer if its length is declared to be 0. * The "C" locale is used for comparing upper and lowercase letters. * Data is assumed to be ASCII text, UTF-8 will work fine too. */ AWS_COMMON_API bool aws_array_eq_c_str_ignore_case(const void *const array, const size_t array_len, const char *const c_str); AWS_COMMON_API int aws_byte_buf_init(struct aws_byte_buf *buf, struct aws_allocator *allocator, size_t capacity); /** * Initializes an aws_byte_buf structure base on another valid one. * Requires: *src and *allocator are valid objects. * Ensures: *dest is a valid aws_byte_buf with a new backing array dest->buffer * which is a copy of the elements from src->buffer. */ AWS_COMMON_API int aws_byte_buf_init_copy( struct aws_byte_buf *dest, struct aws_allocator *allocator, const struct aws_byte_buf *src); /** * Reads 'filename' into 'out_buf'. If successful, 'out_buf' is allocated and filled with the data; * It is your responsibility to call 'aws_byte_buf_clean_up()' on it. Otherwise, 'out_buf' remains * unused. In the very unfortunate case where some API needs to treat out_buf as a c_string, a null terminator * is appended, but is not included as part of the length field. */ AWS_COMMON_API int aws_byte_buf_init_from_file(struct aws_byte_buf *out_buf, struct aws_allocator *alloc, const char *filename); /** * Same as aws_byte_buf_init_from_file(), but for reading "special files" like /proc/cpuinfo. * These files don't accurately report their size, so size_hint is used as initial buffer size, * and the buffer grows until the while file is read. */ AWS_COMMON_API int aws_byte_buf_init_from_file_with_size_hint( struct aws_byte_buf *out_buf, struct aws_allocator *alloc, const char *filename, size_t size_hint); /** * Evaluates the set of properties that define the shape of all valid aws_byte_buf structures. * It is also a cheap check, in the sense it run in constant time (i.e., no loops or recursion). */ AWS_COMMON_API bool aws_byte_buf_is_valid(const struct aws_byte_buf *const buf); /** * Evaluates the set of properties that define the shape of all valid aws_byte_cursor structures. * It is also a cheap check, in the sense it runs in constant time (i.e., no loops or recursion). */ AWS_COMMON_API bool aws_byte_cursor_is_valid(const struct aws_byte_cursor *cursor); /** * Copies src buffer into dest and sets the correct len and capacity. * A new memory zone is allocated for dest->buffer. When dest is no longer needed it will have to be cleaned-up using * aws_byte_buf_clean_up(dest). * Dest capacity and len will be equal to the src len. Allocator of the dest will be identical with parameter allocator. * If src buffer is null the dest will have a null buffer with a len and a capacity of 0 * Returns AWS_OP_SUCCESS in case of success or AWS_OP_ERR when memory can't be allocated. */ AWS_COMMON_API int aws_byte_buf_init_copy_from_cursor( struct aws_byte_buf *dest, struct aws_allocator *allocator, struct aws_byte_cursor src); /** * Init buffer with contents of multiple cursors, and update cursors to reference the memory stored in the buffer. * Each cursor arg must be an `struct aws_byte_cursor *`. NULL must be passed as the final arg. * NOTE: Do not append/grow/resize buffers initialized this way, or the cursors will end up referencing invalid memory. * Returns AWS_OP_SUCCESS in case of success. * AWS_OP_ERR is returned if memory can't be allocated or the total cursor length exceeds SIZE_MAX. */ AWS_COMMON_API int aws_byte_buf_init_cache_and_update_cursors(struct aws_byte_buf *dest, struct aws_allocator *allocator, ...); AWS_COMMON_API void aws_byte_buf_clean_up(struct aws_byte_buf *buf); /** * Equivalent to calling aws_byte_buf_secure_zero and then aws_byte_buf_clean_up * on the buffer. */ AWS_COMMON_API void aws_byte_buf_clean_up_secure(struct aws_byte_buf *buf); /** * Resets the len of the buffer to 0, but does not free the memory. The buffer can then be reused. * Optionally zeroes the contents, if the "zero_contents" flag is true. */ AWS_COMMON_API void aws_byte_buf_reset(struct aws_byte_buf *buf, bool zero_contents); /** * Sets all bytes of buffer to zero and resets len to zero. */ AWS_COMMON_API void aws_byte_buf_secure_zero(struct aws_byte_buf *buf); /** * Compare two aws_byte_buf structures. * Return whether their contents are equivalent. */ AWS_COMMON_API bool aws_byte_buf_eq(const struct aws_byte_buf *const a, const struct aws_byte_buf *const b); /** * Perform a case-insensitive string comparison of two aws_byte_buf structures. * Return whether their contents are equivalent. * The "C" locale is used for comparing upper and lowercase letters. * Data is assumed to be ASCII text, UTF-8 will work fine too. */ AWS_COMMON_API bool aws_byte_buf_eq_ignore_case(const struct aws_byte_buf *const a, const struct aws_byte_buf *const b); /** * Compare an aws_byte_buf and a null-terminated string. * Returns true if their contents are equivalent. * The buffer should NOT contain a null-terminator, or the comparison will always return false. */ AWS_COMMON_API bool aws_byte_buf_eq_c_str(const struct aws_byte_buf *const buf, const char *const c_str); /** * Perform a case-insensitive string comparison of an aws_byte_buf and a null-terminated string. * Return whether their contents are equivalent. * The buffer should NOT contain a null-terminator, or the comparison will always return false. * The "C" locale is used for comparing upper and lowercase letters. * Data is assumed to be ASCII text, UTF-8 will work fine too. */ AWS_COMMON_API bool aws_byte_buf_eq_c_str_ignore_case(const struct aws_byte_buf *const buf, const char *const c_str); /** * No copies, no buffer allocations. Iterates over input_str, and returns the * next substring between split_on instances relative to previous substr. * Behaves similar to strtok with substr being used as state for next split. * * Returns true each time substr is set and false when there is no more splits * (substr is set to empty in that case). * * Example usage. * struct aws_byte_cursor substr = {0}; * while (aws_byte_cursor_next_split(&input_str, ';', &substr)) { * // ...use substr... * } * * Note: It is the user's responsibility zero-initialize substr before the first call. * * Edge case rules are as follows: * empty input will have single empty split. ex. "" splits into "" * if input starts with split_on then first split is empty. ex ";A" splits into "", "A" * adjacent split tokens result in empty split. ex "A;;B" splits into "A", "", "B" * If the input ends with split_on, last split is empty. ex. "A;" splits into "A", "" * * It is the user's responsibility to make sure the input buffer stays in memory * long enough to use the results. */ AWS_COMMON_API bool aws_byte_cursor_next_split( const struct aws_byte_cursor *AWS_RESTRICT input_str, char split_on, struct aws_byte_cursor *AWS_RESTRICT substr); /** * No copies, no buffer allocations. Fills in output with a list of * aws_byte_cursor instances where buffer is an offset into the input_str and * len is the length of that string in the original buffer. * * Edge case rules are as follows: * if the input begins with split_on, an empty cursor will be the first entry in * output. if the input has two adjacent split_on tokens, an empty cursor will * be inserted into the output. if the input ends with split_on, an empty cursor * will be appended to the output. * * It is the user's responsibility to properly initialize output. Recommended number of preallocated elements from * output is your most likely guess for the upper bound of the number of elements resulting from the split. * * The type that will be stored in output is struct aws_byte_cursor (you'll need * this for the item size param). * * It is the user's responsibility to make sure the input buffer stays in memory * long enough to use the results. */ AWS_COMMON_API int aws_byte_cursor_split_on_char( const struct aws_byte_cursor *AWS_RESTRICT input_str, char split_on, struct aws_array_list *AWS_RESTRICT output); /** * No copies, no buffer allocations. Fills in output with a list of aws_byte_cursor instances where buffer is * an offset into the input_str and len is the length of that string in the original buffer. N is the max number of * splits, if this value is zero, it will add all splits to the output. * * Edge case rules are as follows: * if the input begins with split_on, an empty cursor will be the first entry in output * if the input has two adjacent split_on tokens, an empty cursor will be inserted into the output. * if the input ends with split_on, an empty cursor will be appended to the output. * * It is the user's responsibility to properly initialize output. Recommended number of preallocated elements from * output is your most likely guess for the upper bound of the number of elements resulting from the split. * * If the output array is not large enough, input_str will be updated to point to the first character after the last * processed split_on instance. * * The type that will be stored in output is struct aws_byte_cursor (you'll need this for the item size param). * * It is the user's responsibility to make sure the input buffer stays in memory long enough to use the results. */ AWS_COMMON_API int aws_byte_cursor_split_on_char_n( const struct aws_byte_cursor *AWS_RESTRICT input_str, char split_on, size_t n, struct aws_array_list *AWS_RESTRICT output); /** * Search for an exact byte match inside a cursor. The first match will be returned. Returns AWS_OP_SUCCESS * on successful match and first_find will be set to the offset in input_str, and length will be the remaining length * from input_str past the returned offset. If the match was not found, AWS_OP_ERR will be returned and * AWS_ERROR_STRING_MATCH_NOT_FOUND will be raised. */ AWS_COMMON_API int aws_byte_cursor_find_exact( const struct aws_byte_cursor *AWS_RESTRICT input_str, const struct aws_byte_cursor *AWS_RESTRICT to_find, struct aws_byte_cursor *first_find); /** * * Shrinks a byte cursor from the right for as long as the supplied predicate is true */ AWS_COMMON_API struct aws_byte_cursor aws_byte_cursor_right_trim_pred( const struct aws_byte_cursor *source, aws_byte_predicate_fn *predicate); /** * Shrinks a byte cursor from the left for as long as the supplied predicate is true */ AWS_COMMON_API struct aws_byte_cursor aws_byte_cursor_left_trim_pred( const struct aws_byte_cursor *source, aws_byte_predicate_fn *predicate); /** * Shrinks a byte cursor from both sides for as long as the supplied predicate is true */ AWS_COMMON_API struct aws_byte_cursor aws_byte_cursor_trim_pred( const struct aws_byte_cursor *source, aws_byte_predicate_fn *predicate); /** * Returns true if the byte cursor's range of bytes all satisfy the predicate */ AWS_COMMON_API bool aws_byte_cursor_satisfies_pred(const struct aws_byte_cursor *source, aws_byte_predicate_fn *predicate); /** * Copies from to to. If to is too small, AWS_ERROR_DEST_COPY_TOO_SMALL will be * returned. dest->len will contain the amount of data actually copied to dest. * * from and to may be the same buffer, permitting copying a buffer into itself. */ AWS_COMMON_API int aws_byte_buf_append(struct aws_byte_buf *to, const struct aws_byte_cursor *from); /** * Copies from to to while converting bytes via the passed in lookup table. * If to is too small, AWS_ERROR_DEST_COPY_TOO_SMALL will be * returned. to->len will contain its original size plus the amount of data actually copied to to. * * from and to should not be the same buffer (overlap is not handled) * lookup_table must be at least 256 bytes */ AWS_COMMON_API int aws_byte_buf_append_with_lookup( struct aws_byte_buf *AWS_RESTRICT to, const struct aws_byte_cursor *AWS_RESTRICT from, const uint8_t *lookup_table); /** * Copies from to to. If to is too small, the buffer will be grown appropriately and * the old contents copied to, before the new contents are appended. * * If the grow fails (overflow or OOM), then an error will be returned. * * from and to may be the same buffer, permitting copying a buffer into itself. */ AWS_COMMON_API int aws_byte_buf_append_dynamic(struct aws_byte_buf *to, const struct aws_byte_cursor *from); /** * Copies `from` to `to`. If `to` is too small, the buffer will be grown appropriately and * the old contents copied over, before the new contents are appended. * * If the grow fails (overflow or OOM), then an error will be returned. * * If the buffer is grown, the old buffer will be securely cleared before getting freed. * * `from` and `to` may be the same buffer, permitting copying a buffer into itself. */ AWS_COMMON_API int aws_byte_buf_append_dynamic_secure(struct aws_byte_buf *to, const struct aws_byte_cursor *from); /** * Copies a single byte into `to`. If `to` is too small, the buffer will be grown appropriately and * the old contents copied over, before the byte is appended. * * If the grow fails (overflow or OOM), then an error will be returned. */ AWS_COMMON_API int aws_byte_buf_append_byte_dynamic(struct aws_byte_buf *buffer, uint8_t value); /** * Copies a single byte into `to`. If `to` is too small, the buffer will be grown appropriately and * the old contents copied over, before the byte is appended. * * If the grow fails (overflow or OOM), then an error will be returned. * * If the buffer is grown, the old buffer will be securely cleared before getting freed. */ AWS_COMMON_API int aws_byte_buf_append_byte_dynamic_secure(struct aws_byte_buf *buffer, uint8_t value); /** * Copy contents of cursor to buffer, then update cursor to reference the memory stored in the buffer. * If buffer is too small, AWS_ERROR_DEST_COPY_TOO_SMALL will be returned. * * The cursor is permitted to reference memory from earlier in the buffer. */ AWS_COMMON_API int aws_byte_buf_append_and_update(struct aws_byte_buf *to, struct aws_byte_cursor *from_and_update); /** * Appends '\0' at the end of the buffer. */ AWS_COMMON_API int aws_byte_buf_append_null_terminator(struct aws_byte_buf *buf); /** * Attempts to increase the capacity of a buffer to the requested capacity * * If the the buffer's capacity is currently larger than the request capacity, the * function does nothing (no shrink is performed). */ AWS_COMMON_API int aws_byte_buf_reserve(struct aws_byte_buf *buffer, size_t requested_capacity); /** * Convenience function that attempts to increase the capacity of a buffer relative to the current * length. * * aws_byte_buf_reserve_relative(buf, x) ~~ aws_byte_buf_reserve(buf, buf->len + x) * */ AWS_COMMON_API int aws_byte_buf_reserve_relative(struct aws_byte_buf *buffer, size_t additional_length); /** * Concatenates a variable number of struct aws_byte_buf * into destination. * Number of args must be greater than 1. If dest is too small, * AWS_ERROR_DEST_COPY_TOO_SMALL will be returned. dest->len will contain the * amount of data actually copied to dest. */ AWS_COMMON_API int aws_byte_buf_cat(struct aws_byte_buf *dest, size_t number_of_args, ...); /** * Compare two aws_byte_cursor structures. * Return whether their contents are equivalent. */ AWS_COMMON_API bool aws_byte_cursor_eq(const struct aws_byte_cursor *a, const struct aws_byte_cursor *b); /** * Perform a case-insensitive string comparison of two aws_byte_cursor structures. * Return whether their contents are equivalent. * The "C" locale is used for comparing upper and lowercase letters. * Data is assumed to be ASCII text, UTF-8 will work fine too. */ AWS_COMMON_API bool aws_byte_cursor_eq_ignore_case(const struct aws_byte_cursor *a, const struct aws_byte_cursor *b); /** * Compare an aws_byte_cursor and an aws_byte_buf. * Return whether their contents are equivalent. */ AWS_COMMON_API bool aws_byte_cursor_eq_byte_buf(const struct aws_byte_cursor *const a, const struct aws_byte_buf *const b); /** * Perform a case-insensitive string comparison of an aws_byte_cursor and an aws_byte_buf. * Return whether their contents are equivalent. * The "C" locale is used for comparing upper and lowercase letters. * Data is assumed to be ASCII text, UTF-8 will work fine too. */ AWS_COMMON_API bool aws_byte_cursor_eq_byte_buf_ignore_case(const struct aws_byte_cursor *const a, const struct aws_byte_buf *const b); /** * Compare an aws_byte_cursor and a null-terminated string. * Returns true if their contents are equivalent. * The cursor should NOT contain a null-terminator, or the comparison will always return false. */ AWS_COMMON_API bool aws_byte_cursor_eq_c_str(const struct aws_byte_cursor *const cursor, const char *const c_str); /** * Perform a case-insensitive string comparison of an aws_byte_cursor and a null-terminated string. * Return whether their contents are equivalent. * The cursor should NOT contain a null-terminator, or the comparison will always return false. * The "C" locale is used for comparing upper and lowercase letters. * Data is assumed to be ASCII text, UTF-8 will work fine too. */ AWS_COMMON_API bool aws_byte_cursor_eq_c_str_ignore_case(const struct aws_byte_cursor *const cursor, const char *const c_str); /** * Return true if the input starts with the prefix (exact byte comparison). */ AWS_COMMON_API bool aws_byte_cursor_starts_with(const struct aws_byte_cursor *input, const struct aws_byte_cursor *prefix); /** * Return true if the input starts with the prefix (case-insensitive). * The "C" locale is used for comparing upper and lowercase letters. * Data is assumed to be ASCII text, UTF-8 will work fine too. */ AWS_COMMON_API bool aws_byte_cursor_starts_with_ignore_case(const struct aws_byte_cursor *input, const struct aws_byte_cursor *prefix); /** * Case-insensitive hash function for array containing ASCII or UTF-8 text. */ AWS_COMMON_API uint64_t aws_hash_array_ignore_case(const void *array, const size_t len); /** * Case-insensitive hash function for aws_byte_cursors stored in an aws_hash_table. * For case-sensitive hashing, use aws_hash_byte_cursor_ptr(). */ AWS_COMMON_API uint64_t aws_hash_byte_cursor_ptr_ignore_case(const void *item); /** * Returns a lookup table for bytes that is the identity transformation with the exception * of uppercase ascii characters getting replaced with lowercase characters. Used in * caseless comparisons. */ AWS_COMMON_API const uint8_t *aws_lookup_table_to_lower_get(void); /** * Returns lookup table to go from ASCII/UTF-8 hex character to a number (0-15). * Non-hex characters map to 255. * Valid examples: * '0' -> 0 * 'F' -> 15 * 'f' -> 15 * Invalid examples: * ' ' -> 255 * 'Z' -> 255 * '\0' -> 255 */ AWS_COMMON_API const uint8_t *aws_lookup_table_hex_to_num_get(void); /** * Lexical (byte value) comparison of two byte cursors */ AWS_COMMON_API int aws_byte_cursor_compare_lexical(const struct aws_byte_cursor *lhs, const struct aws_byte_cursor *rhs); /** * Lexical (byte value) comparison of two byte cursors where the raw values are sent through a lookup table first */ AWS_COMMON_API int aws_byte_cursor_compare_lookup( const struct aws_byte_cursor *lhs, const struct aws_byte_cursor *rhs, const uint8_t *lookup_table); /** * For creating a byte buffer from a null-terminated string literal. */ AWS_COMMON_API struct aws_byte_buf aws_byte_buf_from_c_str(const char *c_str); AWS_COMMON_API struct aws_byte_buf aws_byte_buf_from_array(const void *bytes, size_t len); AWS_COMMON_API struct aws_byte_buf aws_byte_buf_from_empty_array(const void *bytes, size_t capacity); AWS_COMMON_API struct aws_byte_cursor aws_byte_cursor_from_buf(const struct aws_byte_buf *const buf); AWS_COMMON_API struct aws_byte_cursor aws_byte_cursor_from_c_str(const char *c_str); AWS_COMMON_API struct aws_byte_cursor aws_byte_cursor_from_array(const void *const bytes, const size_t len); /** * Tests if the given aws_byte_cursor has at least len bytes remaining. If so, * *buf is advanced by len bytes (incrementing ->ptr and decrementing ->len), * and an aws_byte_cursor referring to the first len bytes of the original *buf * is returned. Otherwise, an aws_byte_cursor with ->ptr = NULL, ->len = 0 is * returned. * * Note that if len is above (SIZE_MAX / 2), this function will also treat it as * a buffer overflow, and return NULL without changing *buf. */ AWS_COMMON_API struct aws_byte_cursor aws_byte_cursor_advance(struct aws_byte_cursor *const cursor, const size_t len); /** * Behaves identically to aws_byte_cursor_advance, but avoids speculative * execution potentially reading out-of-bounds pointers (by returning an * empty ptr in such speculated paths). * * This should generally be done when using an untrusted or * data-dependent value for 'len', to avoid speculating into a path where * cursor->ptr points outside the true ptr length. */ AWS_COMMON_API struct aws_byte_cursor aws_byte_cursor_advance_nospec(struct aws_byte_cursor *const cursor, size_t len); /** * Reads specified length of data from byte cursor and copies it to the * destination array. * * On success, returns true and updates the cursor pointer/length accordingly. * If there is insufficient space in the cursor, returns false, leaving the * cursor unchanged. */ AWS_COMMON_API bool aws_byte_cursor_read( struct aws_byte_cursor *AWS_RESTRICT cur, void *AWS_RESTRICT dest, const size_t len); /** * Reads as many bytes from cursor as size of buffer, and copies them to buffer. * * On success, returns true and updates the cursor pointer/length accordingly. * If there is insufficient space in the cursor, returns false, leaving the * cursor unchanged. */ AWS_COMMON_API bool aws_byte_cursor_read_and_fill_buffer( struct aws_byte_cursor *AWS_RESTRICT cur, struct aws_byte_buf *AWS_RESTRICT dest); /** * Reads a single byte from cursor, placing it in *var. * * On success, returns true and updates the cursor pointer/length accordingly. * If there is insufficient space in the cursor, returns false, leaving the * cursor unchanged. */ AWS_COMMON_API bool aws_byte_cursor_read_u8(struct aws_byte_cursor *AWS_RESTRICT cur, uint8_t *AWS_RESTRICT var); /** * Reads a 16-bit value in network byte order from cur, and places it in host * byte order into var. * * On success, returns true and updates the cursor pointer/length accordingly. * If there is insufficient space in the cursor, returns false, leaving the * cursor unchanged. */ AWS_COMMON_API bool aws_byte_cursor_read_be16(struct aws_byte_cursor *cur, uint16_t *var); /** * Reads an unsigned 24-bit value (3 bytes) in network byte order from cur, * and places it in host byte order into 32-bit var. * Ex: if cur's next 3 bytes are {0xAA, 0xBB, 0xCC}, then var becomes 0x00AABBCC. * * On success, returns true and updates the cursor pointer/length accordingly. * If there is insufficient space in the cursor, returns false, leaving the * cursor unchanged. */ AWS_COMMON_API bool aws_byte_cursor_read_be24(struct aws_byte_cursor *cur, uint32_t *var); /** * Reads a 32-bit value in network byte order from cur, and places it in host * byte order into var. * * On success, returns true and updates the cursor pointer/length accordingly. * If there is insufficient space in the cursor, returns false, leaving the * cursor unchanged. */ AWS_COMMON_API bool aws_byte_cursor_read_be32(struct aws_byte_cursor *cur, uint32_t *var); /** * Reads a 64-bit value in network byte order from cur, and places it in host * byte order into var. * * On success, returns true and updates the cursor pointer/length accordingly. * If there is insufficient space in the cursor, returns false, leaving the * cursor unchanged. */ AWS_COMMON_API bool aws_byte_cursor_read_be64(struct aws_byte_cursor *cur, uint64_t *var); /** * Reads a 32-bit value in network byte order from cur, and places it in host * byte order into var. * * On success, returns true and updates the cursor pointer/length accordingly. * If there is insufficient space in the cursor, returns false, leaving the * cursor unchanged. */ AWS_COMMON_API bool aws_byte_cursor_read_float_be32(struct aws_byte_cursor *cur, float *var); /** * Reads a 64-bit value in network byte order from cur, and places it in host * byte order into var. * * On success, returns true and updates the cursor pointer/length accordingly. * If there is insufficient space in the cursor, returns false, leaving the * cursor unchanged. */ AWS_COMMON_API bool aws_byte_cursor_read_float_be64(struct aws_byte_cursor *cur, double *var); /** * Reads 2 hex characters from ASCII/UTF-8 text to produce an 8-bit number. * Accepts both lowercase 'a'-'f' and uppercase 'A'-'F'. * For example: "0F" produces 15. * * On success, returns true and advances the cursor by 2. * If there is insufficient space in the cursor or an invalid character * is encountered, returns false, leaving the cursor unchanged. */ AWS_COMMON_API bool aws_byte_cursor_read_hex_u8(struct aws_byte_cursor *cur, uint8_t *var); /** * Appends a sub-buffer to the specified buffer. * * If the buffer has at least `len' bytes remaining (buffer->capacity - buffer->len >= len), * then buffer->len is incremented by len, and an aws_byte_buf is assigned to *output corresponding * to the last len bytes of the input buffer. The aws_byte_buf at *output will have a null * allocator, a zero initial length, and a capacity of 'len'. The function then returns true. * * If there is insufficient space, then this function nulls all fields in *output and returns * false. */ AWS_COMMON_API bool aws_byte_buf_advance( struct aws_byte_buf *const AWS_RESTRICT buffer, struct aws_byte_buf *const AWS_RESTRICT output, const size_t len); /** * Write specified number of bytes from array to byte buffer. * * On success, returns true and updates the buffer length accordingly. * If there is insufficient space in the buffer, returns false, leaving the * buffer unchanged. */ AWS_COMMON_API bool aws_byte_buf_write( struct aws_byte_buf *AWS_RESTRICT buf, const uint8_t *AWS_RESTRICT src, size_t len); /** * Copies all bytes from buffer to buffer. * * On success, returns true and updates the buffer /length accordingly. * If there is insufficient space in the buffer, returns false, leaving the * buffer unchanged. */ AWS_COMMON_API bool aws_byte_buf_write_from_whole_buffer( struct aws_byte_buf *AWS_RESTRICT buf, struct aws_byte_buf src); /** * Copies all bytes from buffer to buffer. * * On success, returns true and updates the buffer /length accordingly. * If there is insufficient space in the buffer, returns false, leaving the * buffer unchanged. */ AWS_COMMON_API bool aws_byte_buf_write_from_whole_cursor( struct aws_byte_buf *AWS_RESTRICT buf, struct aws_byte_cursor src); /** * Without increasing buf's capacity, write as much as possible from advancing_cursor into buf. * * buf's len is updated accordingly. * advancing_cursor is advanced so it contains the remaining unwritten parts. * Returns the section of advancing_cursor which was written. * * This function cannot fail. If buf is full (len == capacity) or advancing_len has 0 length, * then buf and advancing_cursor are not altered and a cursor with 0 length is returned. * * Example: Given a buf with 2 bytes of space available and advancing_cursor with contents "abc". * "ab" will be written to buf and buf->len will increase 2 and become equal to buf->capacity. * advancing_cursor will advance so its contents become the unwritten "c". * The returned cursor's contents will be the "ab" from the original advancing_cursor. */ AWS_COMMON_API struct aws_byte_cursor aws_byte_buf_write_to_capacity( struct aws_byte_buf *buf, struct aws_byte_cursor *advancing_cursor); /** * Copies one byte to buffer. * * On success, returns true and updates the cursor /length accordingly. * * If there is insufficient space in the buffer, returns false, leaving the * buffer unchanged. */ AWS_COMMON_API bool aws_byte_buf_write_u8(struct aws_byte_buf *AWS_RESTRICT buf, uint8_t c); /** * Writes one byte repeatedly to buffer (like memset) * * If there is insufficient space in the buffer, returns false, leaving the * buffer unchanged. */ AWS_COMMON_API bool aws_byte_buf_write_u8_n(struct aws_byte_buf *buf, uint8_t c, size_t count); /** * Writes a 16-bit integer in network byte order (big endian) to buffer. * * On success, returns true and updates the buffer /length accordingly. * If there is insufficient space in the buffer, returns false, leaving the * buffer unchanged. */ AWS_COMMON_API bool aws_byte_buf_write_be16(struct aws_byte_buf *buf, uint16_t x); /** * Writes low 24-bits (3 bytes) of an unsigned integer in network byte order (big endian) to buffer. * Ex: If x is 0x00AABBCC then {0xAA, 0xBB, 0xCC} is written to buffer. * * On success, returns true and updates the buffer /length accordingly. * If there is insufficient space in the buffer, or x's value cannot fit in 3 bytes, * returns false, leaving the buffer unchanged. */ AWS_COMMON_API bool aws_byte_buf_write_be24(struct aws_byte_buf *buf, uint32_t x); /** * Writes a 32-bit integer in network byte order (big endian) to buffer. * * On success, returns true and updates the buffer /length accordingly. * If there is insufficient space in the buffer, returns false, leaving the * buffer unchanged. */ AWS_COMMON_API bool aws_byte_buf_write_be32(struct aws_byte_buf *buf, uint32_t x); /** * Writes a 32-bit float in network byte order (big endian) to buffer. * * On success, returns true and updates the buffer /length accordingly. * If there is insufficient space in the buffer, returns false, leaving the * buffer unchanged. */ AWS_COMMON_API bool aws_byte_buf_write_float_be32(struct aws_byte_buf *buf, float x); /** * Writes a 64-bit integer in network byte order (big endian) to buffer. * * On success, returns true and updates the buffer /length accordingly. * If there is insufficient space in the buffer, returns false, leaving the * buffer unchanged. */ AWS_COMMON_API bool aws_byte_buf_write_be64(struct aws_byte_buf *buf, uint64_t x); /** * Writes a 64-bit float in network byte order (big endian) to buffer. * * On success, returns true and updates the buffer /length accordingly. * If there is insufficient space in the buffer, returns false, leaving the * buffer unchanged. */ AWS_COMMON_API bool aws_byte_buf_write_float_be64(struct aws_byte_buf *buf, double x); /** * Like isalnum(), but ignores C locale. * Returns true if ch has the value of ASCII/UTF-8: 'a'-'z', 'A'-'Z', or '0'-'9'. */ AWS_COMMON_API bool aws_isalnum(uint8_t ch); /** * Like isalpha(), but ignores C locale. * Returns true if ch has the value of ASCII/UTF-8: 'a'-'z' or 'A'-'Z'. */ AWS_COMMON_API bool aws_isalpha(uint8_t ch); /** * Like isdigit(). * Returns true if ch has the value of ASCII/UTF-8: '0'-'9'. * * Note: C's built-in isdigit() is also supposed to ignore the C locale, * but cppreference.com claims "some implementations (e.g. Microsoft in 1252 codepage) * may classify additional single-byte characters as digits" */ AWS_COMMON_API bool aws_isdigit(uint8_t ch); /** * Like isxdigit(). * Returns true if ch has the value of ASCII/UTF-8: '0'-'9', 'a'-'f', or 'A'-'F'. * * Note: C's built-in isxdigit() is also supposed to ignore the C locale, * but cppreference.com claims "some implementations (e.g. Microsoft in 1252 codepage) * may classify additional single-byte characters as digits" */ AWS_COMMON_API bool aws_isxdigit(uint8_t ch); /** * Like isspace(), but ignores C locale. * Return true if ch has the value of ASCII/UTF-8: space (0x20), form feed (0x0C), * line feed (0x0A), carriage return (0x0D), horizontal tab (0x09), or vertical tab (0x0B). */ AWS_COMMON_API bool aws_isspace(uint8_t ch); /** * Read entire cursor as ASCII/UTF-8 unsigned base-10 number. * Stricter than strtoull(), which allows whitespace and inputs that start with "0x" * * Examples: * "0" -> 0 * "123" -> 123 * "00004" -> 4 // leading zeros ok * * Rejects things like: * "-1" // negative numbers not allowed * "1,000" // only characters 0-9 allowed * "" // blank string not allowed * " 0 " // whitespace not allowed * "0x0" // hex not allowed * "FF" // hex not allowed * "999999999999999999999999999999999999999999" // larger than max u64 */ AWS_COMMON_API int aws_byte_cursor_utf8_parse_u64(struct aws_byte_cursor cursor, uint64_t *dst); /** * Read entire cursor as ASCII/UTF-8 unsigned base-16 number with NO "0x" prefix. * * Examples: * "F" -> 15 * "000000ff" -> 255 // leading zeros ok * "Ff" -> 255 // mixed case ok * "123" -> 291 * "FFFFFFFFFFFFFFFF" -> 18446744073709551616 // max u64 * * Rejects things like: * "0x0" // 0x prefix not allowed * "" // blank string not allowed * " F " // whitespace not allowed * "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF" // larger than max u64 */ AWS_COMMON_API int aws_byte_cursor_utf8_parse_u64_hex(struct aws_byte_cursor cursor, uint64_t *dst); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_COMMON_BYTE_BUF_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/byte_order.h000066400000000000000000000035561456575232400265450ustar00rootroot00000000000000#ifndef AWS_COMMON_BYTE_ORDER_H #define AWS_COMMON_BYTE_ORDER_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include AWS_PUSH_SANE_WARNING_LEVEL AWS_EXTERN_C_BEGIN /** * Returns 1 if machine is big endian, 0 if little endian. * If you compile with even -O1 optimization, this check is completely optimized * out at compile time and code which calls "if (aws_is_big_endian())" will do * the right thing without branching. */ AWS_STATIC_IMPL int aws_is_big_endian(void); /** * Convert 64 bit integer from host to network byte order. */ AWS_STATIC_IMPL uint64_t aws_hton64(uint64_t x); /** * Convert 64 bit integer from network to host byte order. */ AWS_STATIC_IMPL uint64_t aws_ntoh64(uint64_t x); /** * Convert 32 bit integer from host to network byte order. */ AWS_STATIC_IMPL uint32_t aws_hton32(uint32_t x); /** * Convert 32 bit float from host to network byte order. */ AWS_STATIC_IMPL float aws_htonf32(float x); /** * Convert 64 bit double from host to network byte order. */ AWS_STATIC_IMPL double aws_htonf64(double x); /** * Convert 32 bit integer from network to host byte order. */ AWS_STATIC_IMPL uint32_t aws_ntoh32(uint32_t x); /** * Convert 32 bit float from network to host byte order. */ AWS_STATIC_IMPL float aws_ntohf32(float x); /** * Convert 32 bit float from network to host byte order. */ AWS_STATIC_IMPL double aws_ntohf64(double x); /** * Convert 16 bit integer from host to network byte order. */ AWS_STATIC_IMPL uint16_t aws_hton16(uint16_t x); /** * Convert 16 bit integer from network to host byte order. */ AWS_STATIC_IMPL uint16_t aws_ntoh16(uint16_t x); #ifndef AWS_NO_STATIC_IMPL # include #endif /* AWS_NO_STATIC_IMPL */ AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_COMMON_BYTE_ORDER_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/byte_order.inl000066400000000000000000000071711456575232400270750ustar00rootroot00000000000000#ifndef AWS_COMMON_BYTE_ORDER_INL #define AWS_COMMON_BYTE_ORDER_INL /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #ifdef _WIN32 # include #else # include #endif /* _MSC_VER */ AWS_EXTERN_C_BEGIN /** * Returns 1 if machine is big endian, 0 if little endian. * If you compile with even -O1 optimization, this check is completely optimized * out at compile time and code which calls "if (aws_is_big_endian())" will do * the right thing without branching. */ AWS_STATIC_IMPL int aws_is_big_endian(void) { const uint16_t z = 0x100; return *(const uint8_t *)&z; } /** * Convert 64 bit integer from host to network byte order. */ AWS_STATIC_IMPL uint64_t aws_hton64(uint64_t x) { if (aws_is_big_endian()) { return x; } #if defined(__x86_64__) && (defined(__GNUC__) || defined(__clang__)) && !defined(CBMC) uint64_t v; __asm__("bswap %q0" : "=r"(v) : "0"(x)); return v; #elif defined(_MSC_VER) return _byteswap_uint64(x); #else uint32_t low = x & UINT32_MAX; uint32_t high = (uint32_t)(x >> 32); return ((uint64_t)htonl(low)) << 32 | htonl(high); #endif } /** * Convert 64 bit integer from network to host byte order. */ AWS_STATIC_IMPL uint64_t aws_ntoh64(uint64_t x) { return aws_hton64(x); } /** * Convert 32 bit integer from host to network byte order. */ AWS_STATIC_IMPL uint32_t aws_hton32(uint32_t x) { #ifdef _WIN32 return aws_is_big_endian() ? x : _byteswap_ulong(x); #else return htonl(x); #endif } /** * Convert 32 bit float from host to network byte order. */ AWS_STATIC_IMPL float aws_htonf32(float x) { if (aws_is_big_endian()) { return x; } uint8_t *f_storage = (uint8_t *)&x; float ret_value; uint8_t *ret_storage = (uint8_t *)&ret_value; ret_storage[0] = f_storage[3]; ret_storage[1] = f_storage[2]; ret_storage[2] = f_storage[1]; ret_storage[3] = f_storage[0]; return ret_value; } /** * Convert 64 bit double from host to network byte order. */ AWS_STATIC_IMPL double aws_htonf64(double x) { if (aws_is_big_endian()) { return x; } uint8_t *f_storage = (uint8_t *)&x; double ret_value; uint8_t *ret_storage = (uint8_t *)&ret_value; ret_storage[0] = f_storage[7]; ret_storage[1] = f_storage[6]; ret_storage[2] = f_storage[5]; ret_storage[3] = f_storage[4]; ret_storage[4] = f_storage[3]; ret_storage[5] = f_storage[2]; ret_storage[6] = f_storage[1]; ret_storage[7] = f_storage[0]; return ret_value; } /** * Convert 32 bit integer from network to host byte order. */ AWS_STATIC_IMPL uint32_t aws_ntoh32(uint32_t x) { #ifdef _WIN32 return aws_is_big_endian() ? x : _byteswap_ulong(x); #else return ntohl(x); #endif } /** * Convert 32 bit float from network to host byte order. */ AWS_STATIC_IMPL float aws_ntohf32(float x) { return aws_htonf32(x); } /** * Convert 32 bit float from network to host byte order. */ AWS_STATIC_IMPL double aws_ntohf64(double x) { return aws_htonf64(x); } /** * Convert 16 bit integer from host to network byte order. */ AWS_STATIC_IMPL uint16_t aws_hton16(uint16_t x) { #ifdef _WIN32 return aws_is_big_endian() ? x : _byteswap_ushort(x); #else return htons(x); #endif } /** * Convert 16 bit integer from network to host byte order. */ AWS_STATIC_IMPL uint16_t aws_ntoh16(uint16_t x) { #ifdef _WIN32 return aws_is_big_endian() ? x : _byteswap_ushort(x); #else return ntohs(x); #endif } AWS_EXTERN_C_END #endif /* AWS_COMMON_BYTE_ORDER_INL */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/cache.h000066400000000000000000000050451456575232400254450ustar00rootroot00000000000000#ifndef AWS_COMMON_CACHE_H #define AWS_COMMON_CACHE_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include AWS_PUSH_SANE_WARNING_LEVEL struct aws_cache; struct aws_cache_vtable { void (*destroy)(struct aws_cache *cache); int (*find)(struct aws_cache *cache, const void *key, void **p_value); int (*put)(struct aws_cache *cache, const void *key, void *p_value); int (*remove)(struct aws_cache *cache, const void *key); void (*clear)(struct aws_cache *cache); size_t (*get_element_count)(const struct aws_cache *cache); }; /** * Base stucture for caches, used the linked hash table implementation. */ struct aws_cache { struct aws_allocator *allocator; const struct aws_cache_vtable *vtable; struct aws_linked_hash_table table; size_t max_items; void *impl; }; /* Default implementations */ void aws_cache_base_default_destroy(struct aws_cache *cache); int aws_cache_base_default_find(struct aws_cache *cache, const void *key, void **p_value); int aws_cache_base_default_remove(struct aws_cache *cache, const void *key); void aws_cache_base_default_clear(struct aws_cache *cache); size_t aws_cache_base_default_get_element_count(const struct aws_cache *cache); AWS_EXTERN_C_BEGIN /** * Cleans up the cache. Elements in the cache will be evicted and cleanup * callbacks will be invoked. */ AWS_COMMON_API void aws_cache_destroy(struct aws_cache *cache); /** * Finds element in the cache by key. If found, *p_value will hold the stored value, and AWS_OP_SUCCESS will be * returned. If not found, AWS_OP_SUCCESS will be returned and *p_value will be NULL. * * If any errors occur AWS_OP_ERR will be returned. */ AWS_COMMON_API int aws_cache_find(struct aws_cache *cache, const void *key, void **p_value); /** * Puts `p_value` at `key`. If an element is already stored at `key` it will be replaced. If the cache is already full, * an item will be removed based on the cache policy. */ AWS_COMMON_API int aws_cache_put(struct aws_cache *cache, const void *key, void *p_value); /** * Removes item at `key` from the cache. */ AWS_COMMON_API int aws_cache_remove(struct aws_cache *cache, const void *key); /** * Clears all items from the cache. */ AWS_COMMON_API void aws_cache_clear(struct aws_cache *cache); /** * Returns the number of elements in the cache. */ AWS_COMMON_API size_t aws_cache_get_element_count(const struct aws_cache *cache); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_COMMON_CACHE_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/clock.h000066400000000000000000000042751456575232400255010ustar00rootroot00000000000000#ifndef AWS_COMMON_CLOCK_H #define AWS_COMMON_CLOCK_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include AWS_PUSH_SANE_WARNING_LEVEL enum aws_timestamp_unit { AWS_TIMESTAMP_SECS = 1, AWS_TIMESTAMP_MILLIS = 1000, AWS_TIMESTAMP_MICROS = 1000000, AWS_TIMESTAMP_NANOS = 1000000000, }; AWS_EXTERN_C_BEGIN /** * Converts 'timestamp' from unit 'convert_from' to unit 'convert_to', if the units are the same then 'timestamp' is * returned. If 'remainder' is NOT NULL, it will be set to the remainder if convert_from is a more precise unit than * convert_to. To avoid unnecessary branching, 'remainder' is not zero initialized in this function, be sure to set it * to 0 first if you care about that kind of thing. If conversion would lead to integer overflow, the timestamp * returned will be the highest possible time that is representable, i.e. UINT64_MAX. */ AWS_STATIC_IMPL uint64_t aws_timestamp_convert( uint64_t timestamp, enum aws_timestamp_unit convert_from, enum aws_timestamp_unit convert_to, uint64_t *remainder); /** * More general form of aws_timestamp_convert that takes arbitrary frequencies rather than the timestamp enum. */ AWS_STATIC_IMPL uint64_t aws_timestamp_convert_u64(uint64_t ticks, uint64_t old_frequency, uint64_t new_frequency, uint64_t *remainder); /** * Get ticks in nanoseconds (usually 100 nanosecond precision) on the high resolution clock (most-likely TSC). This * clock has no bearing on the actual system time. On success, timestamp will be set. */ AWS_COMMON_API int aws_high_res_clock_get_ticks(uint64_t *timestamp); /** * Get ticks in nanoseconds (usually 100 nanosecond precision) on the system clock. Reflects actual system time via * nanoseconds since unix epoch. Use with care since an inaccurately set clock will probably cause bugs. On success, * timestamp will be set. */ AWS_COMMON_API int aws_sys_clock_get_ticks(uint64_t *timestamp); #ifndef AWS_NO_STATIC_IMPL # include #endif /* AWS_NO_STATIC_IMPL */ AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_COMMON_CLOCK_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/clock.inl000066400000000000000000000076471456575232400260420ustar00rootroot00000000000000#ifndef AWS_COMMON_CLOCK_INL #define AWS_COMMON_CLOCK_INL /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include AWS_EXTERN_C_BEGIN /** * Converts 'timestamp' from unit 'convert_from' to unit 'convert_to', if the units are the same then 'timestamp' is * returned. If 'remainder' is NOT NULL, it will be set to the remainder if convert_from is a more precise unit than * convert_to (but only if the old frequency is a multiple of the new one). If conversion would lead to integer * overflow, the timestamp returned will be the highest possible time that is representable, i.e. UINT64_MAX. */ AWS_STATIC_IMPL uint64_t aws_timestamp_convert_u64(uint64_t ticks, uint64_t old_frequency, uint64_t new_frequency, uint64_t *remainder) { AWS_FATAL_ASSERT(old_frequency > 0 && new_frequency > 0); /* * The remainder, as defined in the contract of the original version of this function, only makes mathematical * sense when the old frequency is a positive multiple of the new frequency. The new convert function needs to be * backwards compatible with the old version's remainder while being a lot more accurate with its conversions * in order to handle extreme edge cases of large numbers. */ if (remainder != NULL) { *remainder = 0; /* only calculate remainder when going from a higher to lower frequency */ if (new_frequency < old_frequency) { uint64_t frequency_remainder = old_frequency % new_frequency; /* only calculate remainder when the old frequency is evenly divisible by the new one */ if (frequency_remainder == 0) { uint64_t frequency_ratio = old_frequency / new_frequency; *remainder = ticks % frequency_ratio; } } } /* * Now do the actual conversion. */ uint64_t old_seconds_elapsed = ticks / old_frequency; uint64_t old_remainder = ticks - old_seconds_elapsed * old_frequency; uint64_t new_ticks_whole_part = aws_mul_u64_saturating(old_seconds_elapsed, new_frequency); /* * This could be done in one of three ways: * * (1) (old_remainder / old_frequency) * new_frequency - this would be completely wrong since we know that * (old_remainder / old_frequency) < 1 = 0 * * (2) old_remainder * (new_frequency / old_frequency) - this only gives a good solution when new_frequency is * a multiple of old_frequency * * (3) (old_remainder * new_frequency) / old_frequency - this is how we do it below, the primary concern is if * the initial multiplication can overflow. For that to be the case, we would need to be using old and new * frequencies in the billions. This does not appear to be the case in any current machine's hardware counters. * * Ignoring arbitrary frequencies, even a nanosecond to nanosecond conversion would not overflow either. * * If this did become an issue, we would potentially need to use intrinsics/platform support for 128 bit math. * * For review consideration: * (1) should we special case frequencies being a multiple of the other? * (2) should we special case frequencies being the same? A ns-to-ns conversion does the full math and * approaches overflow (but cannot actually do so). */ uint64_t new_ticks_remainder_part = aws_mul_u64_saturating(old_remainder, new_frequency) / old_frequency; return aws_add_u64_saturating(new_ticks_whole_part, new_ticks_remainder_part); } AWS_STATIC_IMPL uint64_t aws_timestamp_convert( uint64_t timestamp, enum aws_timestamp_unit convert_from, enum aws_timestamp_unit convert_to, uint64_t *remainder) { return aws_timestamp_convert_u64(timestamp, convert_from, convert_to, remainder); } AWS_EXTERN_C_END #endif /* AWS_COMMON_CLOCK_INL */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/command_line_parser.h000066400000000000000000000105111456575232400303750ustar00rootroot00000000000000#ifndef AWS_COMMON_COMMAND_LINE_PARSER_H #define AWS_COMMON_COMMAND_LINE_PARSER_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include AWS_PUSH_SANE_WARNING_LEVEL enum aws_cli_options_has_arg { AWS_CLI_OPTIONS_NO_ARGUMENT = 0, AWS_CLI_OPTIONS_REQUIRED_ARGUMENT = 1, AWS_CLI_OPTIONS_OPTIONAL_ARGUMENT = 2, }; /** * Invoked when a subcommand is encountered. argc and argv[] begins at the command encountered. * command_name is the name of the command being handled. */ typedef int(aws_cli_options_subcommand_fn)(int argc, char *const argv[], const char *command_name, void *user_data); /** * Dispatch table to dispatch cli commands from. * command_name should be the exact string for the command you want to handle from the command line. */ struct aws_cli_subcommand_dispatch { aws_cli_options_subcommand_fn *subcommand_fn; const char *command_name; }; /* Ignoring padding since we're trying to maintain getopt.h compatibility */ /* NOLINTNEXTLINE(clang-analyzer-optin.performance.Padding) */ struct aws_cli_option { const char *name; enum aws_cli_options_has_arg has_arg; int *flag; int val; }; AWS_EXTERN_C_BEGIN /** * Initialized to 1 (for where the first argument would be). As arguments are parsed, this number is the index * of the next argument to parse. Reset this to 1 to parse another set of arguments, or to rerun the parser. */ AWS_COMMON_API extern int aws_cli_optind; /** * If an option has an argument, when the option is encountered, this will be set to the argument portion. */ AWS_COMMON_API extern const char *aws_cli_optarg; /** * If 0x02 was returned by aws_cli_getopt_long(), this value will be set to the argument encountered. */ AWS_COMMON_API extern const char *aws_cli_positional_arg; /** * A mostly compliant implementation of posix getopt_long(). Parses command-line arguments. argc is the number of * command line arguments passed in argv. optstring contains the legitimate option characters. The option characters * correspond to aws_cli_option::val. If the character is followed by a :, the option requires an argument. If it is * followed by '::', the argument is optional (not implemented yet). * * longopts, is an array of struct aws_cli_option. These are the allowed options for the program. * The last member of the array must be zero initialized. * * If longindex is non-null, it will be set to the index in longopts, for the found option. * * Returns option val if it was found, '?' if an option was encountered that was not specified in the option string, * 0x02 (START_OF_TEXT) will be returned if a positional argument was encountered. returns -1 when all arguments that * can be parsed have been parsed. */ AWS_COMMON_API int aws_cli_getopt_long( int argc, char *const argv[], const char *optstring, const struct aws_cli_option *longopts, int *longindex); /** * Resets global parser state for use in another parser run for the application. */ AWS_COMMON_API void aws_cli_reset_state(void); /** * Dispatches the current command line arguments with a subcommand from the second input argument in argv[], if * dispatch table contains a command that matches the argument. When the command is dispatched, argc and argv will be * updated to reflect the new argument count. The cli options are required to come after the subcommand. If either, no * dispatch was found or there was no argument passed to the program, this function will return AWS_OP_ERR. Check * aws_last_error() for details on the error. * @param argc number of arguments passed to int main() * @param argv the arguments passed to int main() * @param parse_cb, optional, specify NULL if you don't want to handle this. This argument is for parsing "meta" * commands from the command line options prior to dispatch occurring. * @param dispatch_table table containing functions and command name to dispatch on. * @param table_length number of entries in dispatch_table. * @return AWS_OP_SUCCESS(0) on success, AWS_OP_ERR(-1) on failure */ AWS_COMMON_API int aws_cli_dispatch_on_subcommand( int argc, char *const argv[], struct aws_cli_subcommand_dispatch *dispatch_table, int table_length, void *user_data); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_COMMON_COMMAND_LINE_PARSER_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/common.h000066400000000000000000000022161456575232400256670ustar00rootroot00000000000000#ifndef AWS_COMMON_COMMON_H #define AWS_COMMON_COMMON_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for abort() */ #include AWS_PUSH_SANE_WARNING_LEVEL AWS_EXTERN_C_BEGIN /** * Initializes internal data structures used by aws-c-common. * Must be called before using any functionality in aws-c-common. */ AWS_COMMON_API void aws_common_library_init(struct aws_allocator *allocator); /** * Shuts down the internal data structures used by aws-c-common. */ AWS_COMMON_API void aws_common_library_clean_up(void); AWS_COMMON_API void aws_common_fatal_assert_library_initialized(void); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_COMMON_COMMON_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/condition_variable.h000066400000000000000000000063521456575232400302370ustar00rootroot00000000000000#ifndef AWS_COMMON_CONDITION_VARIABLE_H #define AWS_COMMON_CONDITION_VARIABLE_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #ifndef _WIN32 # include #endif AWS_PUSH_SANE_WARNING_LEVEL struct aws_mutex; struct aws_condition_variable; typedef bool(aws_condition_predicate_fn)(void *); struct aws_condition_variable { #ifdef _WIN32 void *condition_handle; #else pthread_cond_t condition_handle; #endif bool initialized; }; /** * Static initializer for condition variable. * You can do something like struct aws_condition_variable var = * AWS_CONDITION_VARIABLE_INIT; * * If on Windows and you get an error about AWS_CONDITION_VARIABLE_INIT being undefined, please include windows.h to get * CONDITION_VARIABLE_INIT. */ #ifdef _WIN32 # define AWS_CONDITION_VARIABLE_INIT \ { .condition_handle = NULL, .initialized = true } #else # define AWS_CONDITION_VARIABLE_INIT \ { .condition_handle = PTHREAD_COND_INITIALIZER, .initialized = true } #endif AWS_EXTERN_C_BEGIN /** * Initializes a condition variable. */ AWS_COMMON_API int aws_condition_variable_init(struct aws_condition_variable *condition_variable); /** * Cleans up a condition variable. */ AWS_COMMON_API void aws_condition_variable_clean_up(struct aws_condition_variable *condition_variable); /** * Notifies/Wakes one waiting thread */ AWS_COMMON_API int aws_condition_variable_notify_one(struct aws_condition_variable *condition_variable); /** * Notifies/Wakes all waiting threads. */ AWS_COMMON_API int aws_condition_variable_notify_all(struct aws_condition_variable *condition_variable); /** * Waits the calling thread on a notification from another thread. */ AWS_COMMON_API int aws_condition_variable_wait(struct aws_condition_variable *condition_variable, struct aws_mutex *mutex); /** * Waits the calling thread on a notification from another thread. If predicate returns false, the wait is reentered, * otherwise control returns to the caller. */ AWS_COMMON_API int aws_condition_variable_wait_pred( struct aws_condition_variable *condition_variable, struct aws_mutex *mutex, aws_condition_predicate_fn *pred, void *pred_ctx); /** * Waits the calling thread on a notification from another thread. Times out after time_to_wait. time_to_wait is in * nanoseconds. */ AWS_COMMON_API int aws_condition_variable_wait_for( struct aws_condition_variable *condition_variable, struct aws_mutex *mutex, int64_t time_to_wait); /** * Waits the calling thread on a notification from another thread. Times out after time_to_wait. time_to_wait is in * nanoseconds. If predicate returns false, the wait is reentered, otherwise control returns to the caller. */ AWS_COMMON_API int aws_condition_variable_wait_for_pred( struct aws_condition_variable *condition_variable, struct aws_mutex *mutex, int64_t time_to_wait, aws_condition_predicate_fn *pred, void *pred_ctx); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_COMMON_CONDITION_VARIABLE_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/config.h.in000066400000000000000000000015041456575232400262500ustar00rootroot00000000000000#ifndef AWS_COMMON_CONFIG_H #define AWS_COMMON_CONFIG_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ /* * This header exposes compiler feature test results determined during cmake * configure time to inline function implementations. The macros defined here * should be considered to be an implementation detail, and can change at any * time. */ #cmakedefine AWS_HAVE_GCC_OVERFLOW_MATH_EXTENSIONS #cmakedefine AWS_HAVE_GCC_INLINE_ASM #cmakedefine AWS_HAVE_MSVC_INTRINSICS_X64 #cmakedefine AWS_HAVE_POSIX_LARGE_FILE_SUPPORT #cmakedefine AWS_HAVE_EXECINFO #cmakedefine AWS_HAVE_WINAPI_DESKTOP #cmakedefine AWS_HAVE_LINUX_IF_LINK_H #cmakedefine AWS_HAVE_AVX2_INTRINSICS #cmakedefine AWS_HAVE_AVX512_INTRINSICS #cmakedefine AWS_HAVE_MM256_EXTRACT_EPI64 #endif aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/cpuid.h000066400000000000000000000014021456575232400254770ustar00rootroot00000000000000#ifndef AWS_COMMON_CPUID_H #define AWS_COMMON_CPUID_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include AWS_PUSH_SANE_WARNING_LEVEL enum aws_cpu_feature_name { AWS_CPU_FEATURE_CLMUL, AWS_CPU_FEATURE_SSE_4_1, AWS_CPU_FEATURE_SSE_4_2, AWS_CPU_FEATURE_AVX2, AWS_CPU_FEATURE_AVX512, AWS_CPU_FEATURE_ARM_CRC, AWS_CPU_FEATURE_BMI2, AWS_CPU_FEATURE_VPCLMULQDQ, AWS_CPU_FEATURE_COUNT, }; AWS_EXTERN_C_BEGIN /** * Returns true if a cpu feature is supported, false otherwise. */ AWS_COMMON_API bool aws_cpu_has_feature(enum aws_cpu_feature_name feature_name); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_COMMON_CPUID_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/cross_process_lock.h000066400000000000000000000023441456575232400303000ustar00rootroot00000000000000#ifndef AWS_COMMON_CROSS_PROCESS_LOCK_H #define AWS_COMMON_CROSS_PROCESS_LOCK_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include struct aws_cross_process_lock; AWS_EXTERN_C_BEGIN /** * Attempts to acquire a system-wide (not per process or per user) lock scoped by instance_nonce. * For any given unique nonce, a lock will be returned by the first caller. Subsequent calls will * return NULL and raise AWS_ERROR_MUTEX_CALLER_NOT_OWNER * until the either the process owning the lock exits or the program owning the lock * calls aws_cross_process_lock_release() explicitly. * * If the process exits before the lock is released, the kernel will unlock it for the next consumer. */ AWS_COMMON_API struct aws_cross_process_lock *aws_cross_process_lock_try_acquire( struct aws_allocator *allocator, struct aws_byte_cursor instance_nonce); /** * Releases the lock so the next caller (may be another process) can get an instance of the lock. */ AWS_COMMON_API void aws_cross_process_lock_release(struct aws_cross_process_lock *instance_lock); AWS_EXTERN_C_END #endif /* AWS_COMMON_CROSS_PROCESS_LOCK_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/date_time.h000066400000000000000000000132101456575232400263260ustar00rootroot00000000000000#ifndef AWS_COMMON_DATE_TIME_H #define AWS_COMMON_DATE_TIME_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include AWS_PUSH_SANE_WARNING_LEVEL enum { AWS_DATE_TIME_STR_MAX_LEN = 100, AWS_DATE_TIME_STR_MAX_BASIC_LEN = 20, }; struct aws_byte_buf; struct aws_byte_cursor; enum aws_date_format { AWS_DATE_FORMAT_RFC822, AWS_DATE_FORMAT_ISO_8601, AWS_DATE_FORMAT_ISO_8601_BASIC, AWS_DATE_FORMAT_AUTO_DETECT, }; enum aws_date_month { AWS_DATE_MONTH_JANUARY = 0, AWS_DATE_MONTH_FEBRUARY, AWS_DATE_MONTH_MARCH, AWS_DATE_MONTH_APRIL, AWS_DATE_MONTH_MAY, AWS_DATE_MONTH_JUNE, AWS_DATE_MONTH_JULY, AWS_DATE_MONTH_AUGUST, AWS_DATE_MONTH_SEPTEMBER, AWS_DATE_MONTH_OCTOBER, AWS_DATE_MONTH_NOVEMBER, AWS_DATE_MONTH_DECEMBER, }; enum aws_date_day_of_week { AWS_DATE_DAY_OF_WEEK_SUNDAY = 0, AWS_DATE_DAY_OF_WEEK_MONDAY, AWS_DATE_DAY_OF_WEEK_TUESDAY, AWS_DATE_DAY_OF_WEEK_WEDNESDAY, AWS_DATE_DAY_OF_WEEK_THURSDAY, AWS_DATE_DAY_OF_WEEK_FRIDAY, AWS_DATE_DAY_OF_WEEK_SATURDAY, }; struct aws_date_time { time_t timestamp; uint16_t milliseconds; char tz[6]; struct tm gmt_time; struct tm local_time; bool utc_assumed; }; AWS_EXTERN_C_BEGIN /** * Initializes dt to be the current system time. */ AWS_COMMON_API void aws_date_time_init_now(struct aws_date_time *dt); /** * Initializes dt to be the time represented in milliseconds since unix epoch. */ AWS_COMMON_API void aws_date_time_init_epoch_millis(struct aws_date_time *dt, uint64_t ms_since_epoch); /** * Initializes dt to be the time represented in seconds.millis since unix epoch. */ AWS_COMMON_API void aws_date_time_init_epoch_secs(struct aws_date_time *dt, double sec_ms); /** * Initializes dt to be the time represented by date_str in format 'fmt'. Returns AWS_OP_SUCCESS if the * string was successfully parsed, returns AWS_OP_ERR if parsing failed. * * Notes for AWS_DATE_FORMAT_RFC822: * If no time zone information is provided, it is assumed to be local time (please don't do this). * * If the time zone is something other than something indicating Universal Time (e.g. Z, UT, UTC, or GMT) or an offset * from UTC (e.g. +0100, -0700), parsing will fail. * * Really, it's just better if you always use Universal Time. */ AWS_COMMON_API int aws_date_time_init_from_str( struct aws_date_time *dt, const struct aws_byte_buf *date_str, enum aws_date_format fmt); /** * aws_date_time_init variant that takes a byte_cursor rather than a byte_buf */ AWS_COMMON_API int aws_date_time_init_from_str_cursor( struct aws_date_time *dt, const struct aws_byte_cursor *date_str_cursor, enum aws_date_format fmt); /** * Copies the current time as a formatted date string in local time into output_buf. If buffer is too small, it will * return AWS_OP_ERR. A good size suggestion is AWS_DATE_TIME_STR_MAX_LEN bytes. AWS_DATE_FORMAT_AUTO_DETECT is not * allowed. */ AWS_COMMON_API int aws_date_time_to_local_time_str( const struct aws_date_time *dt, enum aws_date_format fmt, struct aws_byte_buf *output_buf); /** * Copies the current time as a formatted date string in utc time into output_buf. If buffer is too small, it will * return AWS_OP_ERR. A good size suggestion is AWS_DATE_TIME_STR_MAX_LEN bytes. AWS_DATE_FORMAT_AUTO_DETECT is not * allowed. */ AWS_COMMON_API int aws_date_time_to_utc_time_str( const struct aws_date_time *dt, enum aws_date_format fmt, struct aws_byte_buf *output_buf); /** * Copies the current time as a formatted short date string in local time into output_buf. If buffer is too small, it * will return AWS_OP_ERR. A good size suggestion is AWS_DATE_TIME_STR_MAX_LEN bytes. AWS_DATE_FORMAT_AUTO_DETECT is not * allowed. */ AWS_COMMON_API int aws_date_time_to_local_time_short_str( const struct aws_date_time *dt, enum aws_date_format fmt, struct aws_byte_buf *output_buf); /** * Copies the current time as a formatted short date string in utc time into output_buf. If buffer is too small, it will * return AWS_OP_ERR. A good size suggestion is AWS_DATE_TIME_STR_MAX_LEN bytes. AWS_DATE_FORMAT_AUTO_DETECT is not * allowed. */ AWS_COMMON_API int aws_date_time_to_utc_time_short_str( const struct aws_date_time *dt, enum aws_date_format fmt, struct aws_byte_buf *output_buf); AWS_COMMON_API double aws_date_time_as_epoch_secs(const struct aws_date_time *dt); AWS_COMMON_API uint64_t aws_date_time_as_nanos(const struct aws_date_time *dt); AWS_COMMON_API uint64_t aws_date_time_as_millis(const struct aws_date_time *dt); AWS_COMMON_API uint16_t aws_date_time_year(const struct aws_date_time *dt, bool local_time); AWS_COMMON_API enum aws_date_month aws_date_time_month(const struct aws_date_time *dt, bool local_time); AWS_COMMON_API uint8_t aws_date_time_month_day(const struct aws_date_time *dt, bool local_time); AWS_COMMON_API enum aws_date_day_of_week aws_date_time_day_of_week(const struct aws_date_time *dt, bool local_time); AWS_COMMON_API uint8_t aws_date_time_hour(const struct aws_date_time *dt, bool local_time); AWS_COMMON_API uint8_t aws_date_time_minute(const struct aws_date_time *dt, bool local_time); AWS_COMMON_API uint8_t aws_date_time_second(const struct aws_date_time *dt, bool local_time); AWS_COMMON_API bool aws_date_time_dst(const struct aws_date_time *dt, bool local_time); /** * returns the difference of a and b (a - b) in seconds. */ AWS_COMMON_API time_t aws_date_time_diff(const struct aws_date_time *a, const struct aws_date_time *b); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_COMMON_DATE_TIME_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/device_random.h000066400000000000000000000026341456575232400272020ustar00rootroot00000000000000#ifndef AWS_COMMON_DEVICE_RANDOM_H #define AWS_COMMON_DEVICE_RANDOM_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include AWS_PUSH_SANE_WARNING_LEVEL struct aws_byte_buf; AWS_EXTERN_C_BEGIN /** * Get an unpredictably random 64bit number, suitable for cryptographic use. */ AWS_COMMON_API int aws_device_random_u64(uint64_t *output); /** * Get an unpredictably random 32bit number, suitable for cryptographic use. */ AWS_COMMON_API int aws_device_random_u32(uint32_t *output); /** * Get an unpredictably random 16bit number, suitable for cryptographic use. */ AWS_COMMON_API int aws_device_random_u16(uint16_t *output); /** * Get an unpredictably random 8bit number, suitable for cryptographic use. */ AWS_COMMON_API int aws_device_random_u8(uint8_t *output); /** * Fill the rest of a buffer with unpredictably random bytes, suitable for cryptographic use. */ AWS_COMMON_API int aws_device_random_buffer(struct aws_byte_buf *output); /** * Write N unpredictably random bytes to a buffer, suitable for cryptographic use. * If there is insufficient space in the buffer, AWS_ERROR_SHORT_BUFFER is raised * and the buffer will be unchanged. */ AWS_COMMON_API int aws_device_random_buffer_append(struct aws_byte_buf *output, size_t n); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_COMMON_DEVICE_RANDOM_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/encoding.h000066400000000000000000000210501456575232400261620ustar00rootroot00000000000000#ifndef AWS_COMMON_ENCODING_H #define AWS_COMMON_ENCODING_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include AWS_PUSH_SANE_WARNING_LEVEL AWS_EXTERN_C_BEGIN /* * computes the length necessary to store the result of aws_hex_encode(). * returns -1 on failure, and 0 on success. encoded_length will be set on * success. */ AWS_COMMON_API int aws_hex_compute_encoded_len(size_t to_encode_len, size_t *encoded_length); /* * Base 16 (hex) encodes the contents of to_encode and stores the result in * output. 0 terminates the result. Assumes the buffer is empty and does not resize on * insufficient capacity. */ AWS_COMMON_API int aws_hex_encode(const struct aws_byte_cursor *AWS_RESTRICT to_encode, struct aws_byte_buf *AWS_RESTRICT output); /* * Base 16 (hex) encodes the contents of to_encode and appends the result in * output. Does not 0-terminate. Grows the destination buffer dynamically if necessary. */ AWS_COMMON_API int aws_hex_encode_append_dynamic( const struct aws_byte_cursor *AWS_RESTRICT to_encode, struct aws_byte_buf *AWS_RESTRICT output); /* * computes the length necessary to store the result of aws_hex_decode(). * returns -1 on failure, and 0 on success. decoded_len will be set on success. */ AWS_COMMON_API int aws_hex_compute_decoded_len(size_t to_decode_len, size_t *decoded_len); /* * Base 16 (hex) decodes the contents of to_decode and stores the result in * output. If output is NULL, output_size will be set to what the output_size * should be. */ AWS_COMMON_API int aws_hex_decode(const struct aws_byte_cursor *AWS_RESTRICT to_decode, struct aws_byte_buf *AWS_RESTRICT output); /* * Computes the length necessary to store the output of aws_base64_encode call. * returns -1 on failure, and 0 on success. encoded_length will be set on * success. */ AWS_COMMON_API int aws_base64_compute_encoded_len(size_t to_encode_len, size_t *encoded_len); /* * Base 64 encodes the contents of to_encode and stores the result in output. */ AWS_COMMON_API int aws_base64_encode(const struct aws_byte_cursor *AWS_RESTRICT to_encode, struct aws_byte_buf *AWS_RESTRICT output); /* * Computes the length necessary to store the output of aws_base64_decode call. * returns -1 on failure, and 0 on success. decoded_len will be set on success. */ AWS_COMMON_API int aws_base64_compute_decoded_len(const struct aws_byte_cursor *AWS_RESTRICT to_decode, size_t *decoded_len); /* * Base 64 decodes the contents of to_decode and stores the result in output. */ AWS_COMMON_API int aws_base64_decode(const struct aws_byte_cursor *AWS_RESTRICT to_decode, struct aws_byte_buf *AWS_RESTRICT output); /* Add a 64 bit unsigned integer to the buffer, ensuring network - byte order * Assumes the buffer size is at least 8 bytes. */ AWS_STATIC_IMPL void aws_write_u64(uint64_t value, uint8_t *buffer); /* * Extracts a 64 bit unsigned integer from buffer. Ensures conversion from * network byte order to host byte order. Assumes buffer size is at least 8 * bytes. */ AWS_STATIC_IMPL uint64_t aws_read_u64(const uint8_t *buffer); /* Add a 32 bit unsigned integer to the buffer, ensuring network - byte order * Assumes the buffer size is at least 4 bytes. */ AWS_STATIC_IMPL void aws_write_u32(uint32_t value, uint8_t *buffer); /* * Extracts a 32 bit unsigned integer from buffer. Ensures conversion from * network byte order to host byte order. Assumes the buffer size is at least 4 * bytes. */ AWS_STATIC_IMPL uint32_t aws_read_u32(const uint8_t *buffer); /* Add a 24 bit unsigned integer to the buffer, ensuring network - byte order * return the new position in the buffer for the next operation. * Note, since this uses uint32_t for storage, the 3 least significant bytes * will be used. Assumes buffer is at least 3 bytes long. */ AWS_STATIC_IMPL void aws_write_u24(uint32_t value, uint8_t *buffer); /* * Extracts a 24 bit unsigned integer from buffer. Ensures conversion from * network byte order to host byte order. Assumes buffer is at least 3 bytes * long. */ AWS_STATIC_IMPL uint32_t aws_read_u24(const uint8_t *buffer); /* Add a 16 bit unsigned integer to the buffer, ensuring network-byte order * return the new position in the buffer for the next operation. * Assumes buffer is at least 2 bytes long. */ AWS_STATIC_IMPL void aws_write_u16(uint16_t value, uint8_t *buffer); /* * Extracts a 16 bit unsigned integer from buffer. Ensures conversion from * network byte order to host byte order. Assumes buffer is at least 2 bytes * long. */ AWS_STATIC_IMPL uint16_t aws_read_u16(const uint8_t *buffer); enum aws_text_encoding { AWS_TEXT_UNKNOWN, AWS_TEXT_UTF8, AWS_TEXT_UTF16, AWS_TEXT_UTF32, AWS_TEXT_ASCII, }; /* Checks the BOM in the buffer to see if encoding can be determined. If there is no BOM or * it is unrecognizable, then AWS_TEXT_UNKNOWN will be returned. */ AWS_STATIC_IMPL enum aws_text_encoding aws_text_detect_encoding(const uint8_t *bytes, size_t size); /* * Returns true if aws_text_detect_encoding() determines the text is UTF8 or ASCII. * Note that this immediately returns true if the UTF8 BOM is seen. * To fully validate every byte, use aws_decode_utf8(). */ AWS_STATIC_IMPL bool aws_text_is_utf8(const uint8_t *bytes, size_t size); struct aws_utf8_decoder_options { /** * Optional. * Callback invoked for each Unicode codepoint. * Use this callback to store codepoints as they're decoded, * or to perform additional validation. RFC-3629 is already enforced, * which forbids codepoints between U+D800 and U+DFFF, * but you may whish to forbid codepoints like U+0000. * * @return AWS_OP_SUCCESS to continue processing the string, otherwise * return AWS_OP_ERROR and raise an error (i.e. AWS_ERROR_INVALID_UTF8) * to stop processing the string and report failure. */ int (*on_codepoint)(uint32_t codepoint, void *user_data); /* Optional. Pointer passed to on_codepoint callback. */ void *user_data; }; /** * Decode a complete string of UTF8/ASCII text. * Text is always validated according to RFC-3629 (you may perform additional * validation in the on_codepoint callback). * The text does not need to begin with a UTF8 BOM. * If you need to decode text incrementally as you receive it, use aws_utf8_decoder_new() instead. * * @param bytes Text to decode. * @param options Options for decoding. If NULL is passed, the text is simply validated. * * @return AWS_OP_SUCCESS if successful. * An error is raised if the text is not valid, or the on_codepoint callback raises an error. */ AWS_COMMON_API int aws_decode_utf8(struct aws_byte_cursor bytes, const struct aws_utf8_decoder_options *options); struct aws_utf8_decoder; /** * Create a UTF8/ASCII decoder, which can process text incrementally as you receive it. * Text is always validated according to RFC-3629 (you may perform additional * validation in the on_codepoint callback). * The text does not need to begin with a UTF8 BOM. * To decode text all at once, simply use aws_decode_utf8(). * * Feed bytes into the decoder with aws_utf8_decoder_update(), * and call aws_utf8_decoder_finalize() when the text is complete. * * @param allocator Allocator * @param options Options for decoder. If NULL is passed, the text is simply validated. */ AWS_COMMON_API struct aws_utf8_decoder *aws_utf8_decoder_new( struct aws_allocator *allocator, const struct aws_utf8_decoder_options *options); AWS_COMMON_API void aws_utf8_decoder_destroy(struct aws_utf8_decoder *decoder); AWS_COMMON_API void aws_utf8_decoder_reset(struct aws_utf8_decoder *decoder); /** * Update the decoder with more bytes of text. * The on_codepoint callback will be invoked for each codepoint encountered. * Raises an error if invalid UTF8 is encountered or the on_codepoint callback reports an error. * * Note: You must call aws_utf8_decoder_finalize() when the text is 100% complete, * to ensure the input was completely valid. */ AWS_COMMON_API int aws_utf8_decoder_update(struct aws_utf8_decoder *decoder, struct aws_byte_cursor bytes); /** * Tell the decoder that you've reached the end of your text. * Raises AWS_ERROR_INVALID_UTF8 if the text did not end with a complete UTF8 codepoint. * This also resets the decoder. */ AWS_COMMON_API int aws_utf8_decoder_finalize(struct aws_utf8_decoder *decoder); #ifndef AWS_NO_STATIC_IMPL # include #endif /* AWS_NO_STATIC_IMPL */ AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_COMMON_ENCODING_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/encoding.inl000066400000000000000000000107021456575232400265170ustar00rootroot00000000000000#ifndef AWS_COMMON_ENCODING_INL #define AWS_COMMON_ENCODING_INL /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include AWS_EXTERN_C_BEGIN /* Add a 64 bit unsigned integer to the buffer, ensuring network - byte order * Assumes the buffer size is at least 8 bytes. */ AWS_STATIC_IMPL void aws_write_u64(uint64_t value, uint8_t *buffer) { value = aws_hton64(value); memcpy((void *)buffer, &value, sizeof(value)); } /* * Extracts a 64 bit unsigned integer from buffer. Ensures conversion from * network byte order to host byte order. Assumes buffer size is at least 8 * bytes. */ AWS_STATIC_IMPL uint64_t aws_read_u64(const uint8_t *buffer) { uint64_t value = 0; memcpy((void *)&value, (void *)buffer, sizeof(value)); return aws_ntoh64(value); } /* Add a 32 bit unsigned integer to the buffer, ensuring network - byte order * Assumes the buffer size is at least 4 bytes. */ AWS_STATIC_IMPL void aws_write_u32(uint32_t value, uint8_t *buffer) { value = aws_hton32(value); memcpy((void *)buffer, (void *)&value, sizeof(value)); } /* * Extracts a 32 bit unsigned integer from buffer. Ensures conversion from * network byte order to host byte order. Assumes the buffer size is at least 4 * bytes. */ AWS_STATIC_IMPL uint32_t aws_read_u32(const uint8_t *buffer) { uint32_t value = 0; memcpy((void *)&value, (void *)buffer, sizeof(value)); return aws_ntoh32(value); } /* Add a 24 bit unsigned integer to the buffer, ensuring network - byte order * return the new position in the buffer for the next operation. * Note, since this uses uint32_t for storage, the 3 least significant bytes * will be used. Assumes buffer is at least 3 bytes long. */ AWS_STATIC_IMPL void aws_write_u24(uint32_t value, uint8_t *buffer) { value = aws_hton32(value); memcpy((void *)buffer, (void *)((uint8_t *)&value + 1), sizeof(value) - 1); } /* * Extracts a 24 bit unsigned integer from buffer. Ensures conversion from * network byte order to host byte order. Assumes buffer is at least 3 bytes * long. */ AWS_STATIC_IMPL uint32_t aws_read_u24(const uint8_t *buffer) { uint32_t value = 0; memcpy((void *)((uint8_t *)&value + 1), (void *)buffer, sizeof(value) - 1); return aws_ntoh32(value); } /* Add a 16 bit unsigned integer to the buffer, ensuring network-byte order * return the new position in the buffer for the next operation. * Assumes buffer is at least 2 bytes long. */ AWS_STATIC_IMPL void aws_write_u16(uint16_t value, uint8_t *buffer) { value = aws_hton16(value); memcpy((void *)buffer, (void *)&value, sizeof(value)); } /* * Extracts a 16 bit unsigned integer from buffer. Ensures conversion from * network byte order to host byte order. Assumes buffer is at least 2 bytes * long. */ AWS_STATIC_IMPL uint16_t aws_read_u16(const uint8_t *buffer) { uint16_t value = 0; memcpy((void *)&value, (void *)buffer, sizeof(value)); return aws_ntoh16(value); } /* Reference: https://unicodebook.readthedocs.io/guess_encoding.html */ AWS_STATIC_IMPL enum aws_text_encoding aws_text_detect_encoding(const uint8_t *bytes, size_t size) { static const char *UTF_8_BOM = "\xEF\xBB\xBF"; static const char *UTF_16_BE_BOM = "\xFE\xFF"; static const char *UTF_16_LE_BOM = "\xFF\xFE"; static const char *UTF_32_BE_BOM = "\x00\x00\xFE\xFF"; static const char *UTF_32_LE_BOM = "\xFF\xFE\x00\x00"; if (size >= 3) { if (memcmp(bytes, UTF_8_BOM, 3) == 0) return AWS_TEXT_UTF8; } if (size >= 4) { if (memcmp(bytes, UTF_32_LE_BOM, 4) == 0) return AWS_TEXT_UTF32; if (memcmp(bytes, UTF_32_BE_BOM, 4) == 0) return AWS_TEXT_UTF32; } if (size >= 2) { if (memcmp(bytes, UTF_16_LE_BOM, 2) == 0) return AWS_TEXT_UTF16; if (memcmp(bytes, UTF_16_BE_BOM, 2) == 0) return AWS_TEXT_UTF16; } size_t idx = 0; for (; idx < size; ++idx) { if (bytes[idx] & 0x80) { return AWS_TEXT_UNKNOWN; } } return AWS_TEXT_ASCII; } AWS_STATIC_IMPL bool aws_text_is_utf8(const uint8_t *bytes, size_t size) { enum aws_text_encoding encoding = aws_text_detect_encoding(bytes, size); return encoding == AWS_TEXT_UTF8 || encoding == AWS_TEXT_ASCII; } AWS_EXTERN_C_END #endif /* AWS_COMMON_ENCODING_INL */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/environment.h000066400000000000000000000023761456575232400267520ustar00rootroot00000000000000#ifndef AWS_COMMON_ENVIRONMENT_H #define AWS_COMMON_ENVIRONMENT_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include AWS_PUSH_SANE_WARNING_LEVEL struct aws_string; /* * Simple shims to the appropriate platform calls for environment variable manipulation. * * Not thread safe to use set/unset unsynced with get. Set/unset only used in unit tests. */ AWS_EXTERN_C_BEGIN /* * Get the value of an environment variable. If the variable is not set, the output string will be set to NULL. * Not thread-safe */ AWS_COMMON_API int aws_get_environment_value( struct aws_allocator *allocator, const struct aws_string *variable_name, struct aws_string **value_out); /* * Set the value of an environment variable. On Windows, setting a variable to the empty string will actually unset it. * Not thread-safe */ AWS_COMMON_API int aws_set_environment_value(const struct aws_string *variable_name, const struct aws_string *value); /* * Unset an environment variable. * Not thread-safe */ AWS_COMMON_API int aws_unset_environment_value(const struct aws_string *variable_name); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_COMMON_ENVIRONMENT_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/error.h000066400000000000000000000151431456575232400255330ustar00rootroot00000000000000#ifndef AWS_COMMON_ERROR_H #define AWS_COMMON_ERROR_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include AWS_PUSH_SANE_WARNING_LEVEL #define AWS_OP_SUCCESS (0) #define AWS_OP_ERR (-1) /* Each library gets space for 2^^10 error entries */ #define AWS_ERROR_ENUM_STRIDE_BITS 10 #define AWS_ERROR_ENUM_STRIDE (1U << AWS_ERROR_ENUM_STRIDE_BITS) #define AWS_ERROR_ENUM_BEGIN_RANGE(x) ((x)*AWS_ERROR_ENUM_STRIDE) #define AWS_ERROR_ENUM_END_RANGE(x) (((x) + 1) * AWS_ERROR_ENUM_STRIDE - 1) struct aws_error_info { int error_code; const char *literal_name; const char *error_str; const char *lib_name; const char *formatted_name; }; struct aws_error_info_list { const struct aws_error_info *error_list; uint16_t count; }; #define AWS_DEFINE_ERROR_INFO(C, ES, LN) \ { \ .literal_name = #C, .error_code = (C), .error_str = (ES), .lib_name = (LN), \ .formatted_name = LN ": " #C ", " ES, \ } typedef void(aws_error_handler_fn)(int err, void *ctx); AWS_EXTERN_C_BEGIN /* * Returns the latest error code on the current thread, or 0 if none have * occurred. */ AWS_COMMON_API int aws_last_error(void); /* * Returns the error str corresponding to `err`. */ AWS_COMMON_API const char *aws_error_str(int err); /* * Returns the enum name corresponding to `err`. */ AWS_COMMON_API const char *aws_error_name(int err); /* * Returns the error lib name corresponding to `err`. */ AWS_COMMON_API const char *aws_error_lib_name(int err); /* * Returns libname concatenated with error string. */ AWS_COMMON_API const char *aws_error_debug_str(int err); /* * Internal implementation detail. */ AWS_COMMON_API void aws_raise_error_private(int err); /* * Raises `err` to the installed callbacks, and sets the thread's error. */ AWS_STATIC_IMPL int aws_raise_error(int err); /* * Resets the `err` back to defaults */ AWS_COMMON_API void aws_reset_error(void); /* * Sets `err` to the latest error. Does not invoke callbacks. */ AWS_COMMON_API void aws_restore_error(int err); /* * Sets an application wide error handler function. This will be overridden by * the thread local handler. The previous handler is returned, this can be used * for restoring an error handler if it needs to be overridden temporarily. * Setting this to NULL will turn off this error callback after it has been * enabled. */ AWS_COMMON_API aws_error_handler_fn *aws_set_global_error_handler_fn(aws_error_handler_fn *handler, void *ctx); /* * Sets a thread-local error handler function. This will override the global * handler. The previous handler is returned, this can be used for restoring an * error handler if it needs to be overridden temporarily. Setting this to NULL * will turn off this error callback after it has been enabled. */ AWS_COMMON_API aws_error_handler_fn *aws_set_thread_local_error_handler_fn(aws_error_handler_fn *handler, void *ctx); /** TODO: this needs to be a private function (wait till we have the cmake story * better before moving it though). It should be external for the purpose of * other libs we own, but customers should not be able to hit it without going * out of their way to do so. */ AWS_COMMON_API void aws_register_error_info(const struct aws_error_info_list *error_info); AWS_COMMON_API void aws_unregister_error_info(const struct aws_error_info_list *error_info); /** * Convert a c library io error into an aws error, and raise it. * If no conversion is found, fallback_aws_error_code is raised. * Always returns AWS_OP_ERR. */ AWS_COMMON_API int aws_translate_and_raise_io_error_or(int error_no, int fallback_aws_error_code); /** * Convert a c library io error into an aws error, and raise it. * If no conversion is found, AWS_ERROR_SYS_CALL_FAILURE is raised. * Always returns AWS_OP_ERR. */ AWS_COMMON_API int aws_translate_and_raise_io_error(int error_no); #ifndef AWS_NO_STATIC_IMPL # include #endif /* AWS_NO_STATIC_IMPL */ AWS_EXTERN_C_END enum aws_common_error { AWS_ERROR_SUCCESS = AWS_ERROR_ENUM_BEGIN_RANGE(AWS_C_COMMON_PACKAGE_ID), AWS_ERROR_OOM, AWS_ERROR_NO_SPACE, AWS_ERROR_UNKNOWN, AWS_ERROR_SHORT_BUFFER, AWS_ERROR_OVERFLOW_DETECTED, AWS_ERROR_UNSUPPORTED_OPERATION, AWS_ERROR_INVALID_BUFFER_SIZE, AWS_ERROR_INVALID_HEX_STR, AWS_ERROR_INVALID_BASE64_STR, AWS_ERROR_INVALID_INDEX, AWS_ERROR_THREAD_INVALID_SETTINGS, AWS_ERROR_THREAD_INSUFFICIENT_RESOURCE, AWS_ERROR_THREAD_NO_PERMISSIONS, AWS_ERROR_THREAD_NOT_JOINABLE, AWS_ERROR_THREAD_NO_SUCH_THREAD_ID, AWS_ERROR_THREAD_DEADLOCK_DETECTED, AWS_ERROR_MUTEX_NOT_INIT, AWS_ERROR_MUTEX_TIMEOUT, AWS_ERROR_MUTEX_CALLER_NOT_OWNER, AWS_ERROR_MUTEX_FAILED, AWS_ERROR_COND_VARIABLE_INIT_FAILED, AWS_ERROR_COND_VARIABLE_TIMED_OUT, AWS_ERROR_COND_VARIABLE_ERROR_UNKNOWN, AWS_ERROR_CLOCK_FAILURE, AWS_ERROR_LIST_EMPTY, AWS_ERROR_DEST_COPY_TOO_SMALL, AWS_ERROR_LIST_EXCEEDS_MAX_SIZE, AWS_ERROR_LIST_STATIC_MODE_CANT_SHRINK, AWS_ERROR_PRIORITY_QUEUE_FULL, AWS_ERROR_PRIORITY_QUEUE_EMPTY, AWS_ERROR_PRIORITY_QUEUE_BAD_NODE, AWS_ERROR_HASHTBL_ITEM_NOT_FOUND, AWS_ERROR_INVALID_DATE_STR, AWS_ERROR_INVALID_ARGUMENT, AWS_ERROR_RANDOM_GEN_FAILED, AWS_ERROR_MALFORMED_INPUT_STRING, AWS_ERROR_UNIMPLEMENTED, AWS_ERROR_INVALID_STATE, AWS_ERROR_ENVIRONMENT_GET, AWS_ERROR_ENVIRONMENT_SET, AWS_ERROR_ENVIRONMENT_UNSET, AWS_ERROR_STREAM_UNSEEKABLE, AWS_ERROR_NO_PERMISSION, AWS_ERROR_FILE_INVALID_PATH, AWS_ERROR_MAX_FDS_EXCEEDED, AWS_ERROR_SYS_CALL_FAILURE, AWS_ERROR_C_STRING_BUFFER_NOT_NULL_TERMINATED, AWS_ERROR_STRING_MATCH_NOT_FOUND, AWS_ERROR_DIVIDE_BY_ZERO, AWS_ERROR_INVALID_FILE_HANDLE, AWS_ERROR_OPERATION_INTERUPTED, AWS_ERROR_DIRECTORY_NOT_EMPTY, AWS_ERROR_PLATFORM_NOT_SUPPORTED, AWS_ERROR_INVALID_UTF8, AWS_ERROR_GET_HOME_DIRECTORY_FAILED, AWS_ERROR_INVALID_XML, AWS_ERROR_FILE_OPEN_FAILURE, AWS_ERROR_FILE_READ_FAILURE, AWS_ERROR_FILE_WRITE_FAILURE, AWS_ERROR_END_COMMON_RANGE = AWS_ERROR_ENUM_END_RANGE(AWS_C_COMMON_PACKAGE_ID) }; AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_COMMON_ERROR_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/error.inl000066400000000000000000000013011456575232400260550ustar00rootroot00000000000000#ifndef AWS_COMMON_ERROR_INL #define AWS_COMMON_ERROR_INL /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include AWS_EXTERN_C_BEGIN /* * Raises `err` to the installed callbacks, and sets the thread's error. */ AWS_STATIC_IMPL int aws_raise_error(int err) { /* * Certain static analyzers can't see through the out-of-line call to aws_raise_error, * and assume that this might return AWS_OP_SUCCESS. We'll put the return inline just * to help with their assumptions. */ aws_raise_error_private(err); return AWS_OP_ERR; } AWS_EXTERN_C_END #endif /* AWS_COMMON_ERROR_INL */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/exports.h000066400000000000000000000025421456575232400261050ustar00rootroot00000000000000#ifndef AWS_COMMON_EXPORTS_H #define AWS_COMMON_EXPORTS_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #if defined(AWS_C_RT_USE_WINDOWS_DLL_SEMANTICS) || defined(_WIN32) # ifdef AWS_COMMON_USE_IMPORT_EXPORT # ifdef AWS_COMMON_EXPORTS # define AWS_COMMON_API __declspec(dllexport) # else # define AWS_COMMON_API __declspec(dllimport) # endif /* AWS_COMMON_EXPORTS */ # else # define AWS_COMMON_API # endif /* AWS_COMMON_USE_IMPORT_EXPORT */ #else /* defined (AWS_C_RT_USE_WINDOWS_DLL_SEMANTICS) || defined (_WIN32) */ # if ((__GNUC__ >= 4) || defined(__clang__)) && defined(AWS_COMMON_USE_IMPORT_EXPORT) && defined(AWS_COMMON_EXPORTS) # define AWS_COMMON_API __attribute__((visibility("default"))) # else # define AWS_COMMON_API # endif /* __GNUC__ >= 4 || defined(__clang__) */ #endif /* defined (AWS_C_RT_USE_WINDOWS_DLL_SEMANTICS) || defined (_WIN32) */ #ifdef AWS_NO_STATIC_IMPL # define AWS_STATIC_IMPL AWS_COMMON_API #endif #ifndef AWS_STATIC_IMPL /* * In order to allow us to export our inlinable methods in a DLL/.so, we have a designated .c * file where this AWS_STATIC_IMPL macro will be redefined to be non-static. */ # define AWS_STATIC_IMPL static inline #endif #endif /* AWS_COMMON_EXPORTS_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/external/000077500000000000000000000000001456575232400260475ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/external/.clang-format000066400000000000000000000000501456575232400304150ustar00rootroot00000000000000DisableFormat: true SortIncludes: false aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/external/ittnotify.h000066400000000000000000006005511456575232400302600ustar00rootroot00000000000000/* Copyright (C) 2005-2019 Intel Corporation SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause */ /* Amazon.com has chosen to use this file under the terms of the BSD-3-Clause license. */ #ifndef _ITTNOTIFY_H_ #define _ITTNOTIFY_H_ /** @file @brief Public User API functions and types @mainpage The Instrumentation and Tracing Technology API (ITT API) is used to annotate a user's program with additional information that can be used by correctness and performance tools. The user inserts calls in their program. Those calls generate information that is collected at runtime, and used by Intel(R) Threading Tools. @section API Concepts The following general concepts are used throughout the API. @subsection Unicode Support Many API functions take character string arguments. On Windows, there are two versions of each such function. The function name is suffixed by W if Unicode support is enabled, and by A otherwise. Any API function that takes a character string argument adheres to this convention. @subsection Conditional Compilation Many users prefer having an option to modify ITT API code when linking it inside their runtimes. ITT API header file provides a mechanism to replace ITT API function names inside your code with empty strings. To do this, define the macros INTEL_NO_ITTNOTIFY_API during compilation and remove the static library from the linker script. @subsection Domains [see domains] Domains provide a way to separate notification for different modules or libraries in a program. Domains are specified by dotted character strings, e.g. TBB.Internal.Control. A mechanism (to be specified) is provided to enable and disable domains. By default, all domains are enabled. @subsection Named Entities and Instances Named entities (frames, regions, tasks, and markers) communicate information about the program to the analysis tools. A named entity often refers to a section of program code, or to some set of logical concepts that the programmer wants to group together. Named entities relate to the programmer's static view of the program. When the program actually executes, many instances of a given named entity may be created. The API annotations denote instances of named entities. The actual named entities are displayed using the analysis tools. In other words, the named entities come into existence when instances are created. Instances of named entities may have instance identifiers (IDs). Some API calls use instance identifiers to create relationships between different instances of named entities. Other API calls associate data with instances of named entities. Some named entities must always have instance IDs. In particular, regions and frames always have IDs. Task and markers need IDs only if the ID is needed in another API call (such as adding a relation or metadata). The lifetime of instance IDs is distinct from the lifetime of instances. This allows various relationships to be specified separate from the actual execution of instances. This flexibility comes at the expense of extra API calls. The same ID may not be reused for different instances, unless a previous [ref] __itt_id_destroy call for that ID has been issued. */ /** @cond exclude_from_documentation */ #ifndef ITT_OS_WIN # define ITT_OS_WIN 1 #endif /* ITT_OS_WIN */ #ifndef ITT_OS_LINUX # define ITT_OS_LINUX 2 #endif /* ITT_OS_LINUX */ #ifndef ITT_OS_MAC # define ITT_OS_MAC 3 #endif /* ITT_OS_MAC */ #ifndef ITT_OS_FREEBSD # define ITT_OS_FREEBSD 4 #endif /* ITT_OS_FREEBSD */ #ifndef ITT_OS_OPENBSD # define ITT_OS_OPENBSD 5 #endif /* ITT_OS_OPENBSD */ #ifndef ITT_OS # if defined WIN32 || defined _WIN32 # define ITT_OS ITT_OS_WIN # elif defined( __APPLE__ ) && defined( __MACH__ ) # define ITT_OS ITT_OS_MAC # elif defined( __FreeBSD__ ) # define ITT_OS ITT_OS_FREEBSD # elif defined( __OpenBSD__) # define ITT_OS ITT_OS_OPENBSD # else # define ITT_OS ITT_OS_LINUX # endif #endif /* ITT_OS */ #ifndef ITT_PLATFORM_WIN # define ITT_PLATFORM_WIN 1 #endif /* ITT_PLATFORM_WIN */ #ifndef ITT_PLATFORM_POSIX # define ITT_PLATFORM_POSIX 2 #endif /* ITT_PLATFORM_POSIX */ #ifndef ITT_PLATFORM_MAC # define ITT_PLATFORM_MAC 3 #endif /* ITT_PLATFORM_MAC */ #ifndef ITT_PLATFORM_FREEBSD # define ITT_PLATFORM_FREEBSD 4 #endif /* ITT_PLATFORM_FREEBSD */ #ifndef ITT_PLATFORM_OPENBSD # define ITT_PLATFORM_OPENBSD 5 #endif /* ITT_PLATFORM_OPENBSD */ #ifndef ITT_PLATFORM # if ITT_OS==ITT_OS_WIN # define ITT_PLATFORM ITT_PLATFORM_WIN # elif ITT_OS==ITT_OS_MAC # define ITT_PLATFORM ITT_PLATFORM_MAC # elif ITT_OS==ITT_OS_FREEBSD # define ITT_PLATFORM ITT_PLATFORM_FREEBSD # elif ITT_OS==ITT_OS_OPENBSD # define ITT_PLATFORM ITT_PLATFORM_OPENBSD # else # define ITT_PLATFORM ITT_PLATFORM_POSIX # endif #endif /* ITT_PLATFORM */ #if defined(_UNICODE) && !defined(UNICODE) #define UNICODE #endif #include #if ITT_PLATFORM==ITT_PLATFORM_WIN #include #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #include #if defined(UNICODE) || defined(_UNICODE) #include #endif /* UNICODE || _UNICODE */ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #ifndef ITTAPI_CDECL # if ITT_PLATFORM==ITT_PLATFORM_WIN # define ITTAPI_CDECL __cdecl # else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ # if defined _M_IX86 || defined __i386__ # define ITTAPI_CDECL __attribute__ ((cdecl)) # else /* _M_IX86 || __i386__ */ # define ITTAPI_CDECL /* actual only on x86 platform */ # endif /* _M_IX86 || __i386__ */ # endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #endif /* ITTAPI_CDECL */ #ifndef STDCALL # if ITT_PLATFORM==ITT_PLATFORM_WIN # define STDCALL __stdcall # else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ # if defined _M_IX86 || defined __i386__ # define STDCALL __attribute__ ((stdcall)) # else /* _M_IX86 || __i386__ */ # define STDCALL /* supported only on x86 platform */ # endif /* _M_IX86 || __i386__ */ # endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #endif /* STDCALL */ #define ITTAPI ITTAPI_CDECL #define LIBITTAPI ITTAPI_CDECL /* TODO: Temporary for compatibility! */ #define ITTAPI_CALL ITTAPI_CDECL #define LIBITTAPI_CALL ITTAPI_CDECL #if ITT_PLATFORM==ITT_PLATFORM_WIN /* use __forceinline (VC++ specific) */ #if defined(__MINGW32__) && !defined(__cplusplus) #define ITT_INLINE static __inline__ __attribute__((__always_inline__,__gnu_inline__)) #else #define ITT_INLINE static __forceinline #endif /* __MINGW32__ */ #define ITT_INLINE_ATTRIBUTE /* nothing */ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ /* * Generally, functions are not inlined unless optimization is specified. * For functions declared inline, this attribute inlines the function even * if no optimization level was specified. */ #ifdef __STRICT_ANSI__ #define ITT_INLINE static #define ITT_INLINE_ATTRIBUTE __attribute__((unused)) #else /* __STRICT_ANSI__ */ #define ITT_INLINE static inline #define ITT_INLINE_ATTRIBUTE __attribute__((always_inline, unused)) #endif /* __STRICT_ANSI__ */ #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ /** @endcond */ #ifdef INTEL_ITTNOTIFY_ENABLE_LEGACY # if ITT_PLATFORM==ITT_PLATFORM_WIN # pragma message("WARNING!!! Deprecated API is used. Please undefine INTEL_ITTNOTIFY_ENABLE_LEGACY macro") # else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ # warning "Deprecated API is used. Please undefine INTEL_ITTNOTIFY_ENABLE_LEGACY macro" # endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ # include "legacy/ittnotify.h" #endif /* INTEL_ITTNOTIFY_ENABLE_LEGACY */ /** @cond exclude_from_documentation */ /* Helper macro for joining tokens */ #define ITT_JOIN_AUX(p,n) p##n #define ITT_JOIN(p,n) ITT_JOIN_AUX(p,n) #ifdef ITT_MAJOR #undef ITT_MAJOR #endif #ifdef ITT_MINOR #undef ITT_MINOR #endif #define ITT_MAJOR 3 #define ITT_MINOR 0 /* Standard versioning of a token with major and minor version numbers */ #define ITT_VERSIONIZE(x) \ ITT_JOIN(x, \ ITT_JOIN(_, \ ITT_JOIN(ITT_MAJOR, \ ITT_JOIN(_, ITT_MINOR)))) #ifndef INTEL_ITTNOTIFY_PREFIX # define INTEL_ITTNOTIFY_PREFIX __itt_ #endif /* INTEL_ITTNOTIFY_PREFIX */ #ifndef INTEL_ITTNOTIFY_POSTFIX # define INTEL_ITTNOTIFY_POSTFIX _ptr_ #endif /* INTEL_ITTNOTIFY_POSTFIX */ #define ITTNOTIFY_NAME_AUX(n) ITT_JOIN(INTEL_ITTNOTIFY_PREFIX,n) #define ITTNOTIFY_NAME(n) ITT_VERSIONIZE(ITTNOTIFY_NAME_AUX(ITT_JOIN(n,INTEL_ITTNOTIFY_POSTFIX))) #define ITTNOTIFY_VOID(n) (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n) #define ITTNOTIFY_DATA(n) (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n) #define ITTNOTIFY_VOID_D0(n,d) (d == NULL) ? (void)0 : (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d) #define ITTNOTIFY_VOID_D1(n,d,x) (d == NULL) ? (void)0 : (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x) #define ITTNOTIFY_VOID_D2(n,d,x,y) (d == NULL) ? (void)0 : (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y) #define ITTNOTIFY_VOID_D3(n,d,x,y,z) (d == NULL) ? (void)0 : (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y,z) #define ITTNOTIFY_VOID_D4(n,d,x,y,z,a) (d == NULL) ? (void)0 : (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y,z,a) #define ITTNOTIFY_VOID_D5(n,d,x,y,z,a,b) (d == NULL) ? (void)0 : (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y,z,a,b) #define ITTNOTIFY_VOID_D6(n,d,x,y,z,a,b,c) (d == NULL) ? (void)0 : (!(d)->flags) ? (void)0 : (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)(d,x,y,z,a,b,c) #define ITTNOTIFY_DATA_D0(n,d) (d == NULL) ? 0 : (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d) #define ITTNOTIFY_DATA_D1(n,d,x) (d == NULL) ? 0 : (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x) #define ITTNOTIFY_DATA_D2(n,d,x,y) (d == NULL) ? 0 : (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x,y) #define ITTNOTIFY_DATA_D3(n,d,x,y,z) (d == NULL) ? 0 : (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x,y,z) #define ITTNOTIFY_DATA_D4(n,d,x,y,z,a) (d == NULL) ? 0 : (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x,y,z,a) #define ITTNOTIFY_DATA_D5(n,d,x,y,z,a,b) (d == NULL) ? 0 : (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x,y,z,a,b) #define ITTNOTIFY_DATA_D6(n,d,x,y,z,a,b,c) (d == NULL) ? 0 : (!(d)->flags) ? 0 : (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n)(d,x,y,z,a,b,c) #ifdef ITT_STUB #undef ITT_STUB #endif #ifdef ITT_STUBV #undef ITT_STUBV #endif #define ITT_STUBV(api,type,name,args) \ typedef type (api* ITT_JOIN(ITTNOTIFY_NAME(name),_t)) args; \ extern ITT_JOIN(ITTNOTIFY_NAME(name),_t) ITTNOTIFY_NAME(name); #define ITT_STUB ITT_STUBV /** @endcond */ #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ /** @cond exclude_from_gpa_documentation */ /** * @defgroup public Public API * @{ * @} */ /** * @defgroup control Collection Control * @ingroup public * General behavior: application continues to run, but no profiling information is being collected * * Pausing occurs not only for the current thread but for all process as well as spawned processes * - Intel(R) Parallel Inspector and Intel(R) Inspector XE: * - Does not analyze or report errors that involve memory access. * - Other errors are reported as usual. Pausing data collection in * Intel(R) Parallel Inspector and Intel(R) Inspector XE * only pauses tracing and analyzing memory access. * It does not pause tracing or analyzing threading APIs. * . * - Intel(R) Parallel Amplifier and Intel(R) VTune(TM) Amplifier XE: * - Does continue to record when new threads are started. * . * - Other effects: * - Possible reduction of runtime overhead. * . * @{ */ /** @brief Pause collection */ void ITTAPI __itt_pause(void); /** @brief Resume collection */ void ITTAPI __itt_resume(void); /** @brief Detach collection */ void ITTAPI __itt_detach(void); /** * @enum __itt_collection_scope * @brief Enumerator for collection scopes */ typedef enum { __itt_collection_scope_host = 1 << 0, __itt_collection_scope_offload = 1 << 1, __itt_collection_scope_all = 0x7FFFFFFF } __itt_collection_scope; /** @brief Pause scoped collection */ void ITTAPI __itt_pause_scoped(__itt_collection_scope); /** @brief Resume scoped collection */ void ITTAPI __itt_resume_scoped(__itt_collection_scope); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, pause, (void)) ITT_STUBV(ITTAPI, void, pause_scoped, (__itt_collection_scope)) ITT_STUBV(ITTAPI, void, resume, (void)) ITT_STUBV(ITTAPI, void, resume_scoped, (__itt_collection_scope)) ITT_STUBV(ITTAPI, void, detach, (void)) #define __itt_pause ITTNOTIFY_VOID(pause) #define __itt_pause_ptr ITTNOTIFY_NAME(pause) #define __itt_pause_scoped ITTNOTIFY_VOID(pause_scoped) #define __itt_pause_scoped_ptr ITTNOTIFY_NAME(pause_scoped) #define __itt_resume ITTNOTIFY_VOID(resume) #define __itt_resume_ptr ITTNOTIFY_NAME(resume) #define __itt_resume_scoped ITTNOTIFY_VOID(resume_scoped) #define __itt_resume_scoped_ptr ITTNOTIFY_NAME(resume_scoped) #define __itt_detach ITTNOTIFY_VOID(detach) #define __itt_detach_ptr ITTNOTIFY_NAME(detach) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_pause() #define __itt_pause_ptr 0 #define __itt_pause_scoped(scope) #define __itt_pause_scoped_ptr 0 #define __itt_resume() #define __itt_resume_ptr 0 #define __itt_resume_scoped(scope) #define __itt_resume_scoped_ptr 0 #define __itt_detach() #define __itt_detach_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_pause_ptr 0 #define __itt_pause_scoped_ptr 0 #define __itt_resume_ptr 0 #define __itt_resume_scoped_ptr 0 #define __itt_detach_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** @} control group */ /** @endcond */ /** * @defgroup Intel Processor Trace control * API from this group provides control over collection and analysis of Intel Processor Trace (Intel PT) data * Information about Intel Processor Trace technology can be found here (Volume 3 chapter 35): * https://software.intel.com/sites/default/files/managed/39/c5/325462-sdm-vol-1-2abcd-3abcd.pdf * Use this API to mark particular code regions for loading detailed performance statistics. * This mode makes your analysis faster and more accurate. * @{ */ typedef unsigned char __itt_pt_region; /** * @brief function saves a region name marked with Intel PT API and returns a region id. * Only 7 names can be registered. Attempts to register more names will be ignored and a region id with auto names will be returned. * For automatic naming of regions pass NULL as function parameter */ #if ITT_PLATFORM==ITT_PLATFORM_WIN __itt_pt_region ITTAPI __itt_pt_region_createA(const char *name); __itt_pt_region ITTAPI __itt_pt_region_createW(const wchar_t *name); #if defined(UNICODE) || defined(_UNICODE) # define __itt_pt_region_create __itt_pt_region_createW #else /* UNICODE */ # define __itt_pt_region_create __itt_pt_region_createA #endif /* UNICODE */ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ __itt_pt_region ITTAPI __itt_pt_region_create(const char *name); #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API #if ITT_PLATFORM==ITT_PLATFORM_WIN ITT_STUB(ITTAPI, __itt_pt_region, pt_region_createA, (const char *name)) ITT_STUB(ITTAPI, __itt_pt_region, pt_region_createW, (const wchar_t *name)) #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ ITT_STUB(ITTAPI, __itt_pt_region, pt_region_create, (const char *name)) #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_pt_region_createA ITTNOTIFY_DATA(pt_region_createA) #define __itt_pt_region_createA_ptr ITTNOTIFY_NAME(pt_region_createA) #define __itt_pt_region_createW ITTNOTIFY_DATA(pt_region_createW) #define __itt_pt_region_createW_ptr ITTNOTIFY_NAME(pt_region_createW) #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_pt_region_create ITTNOTIFY_DATA(pt_region_create) #define __itt_pt_region_create_ptr ITTNOTIFY_NAME(pt_region_create) #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #else /* INTEL_NO_ITTNOTIFY_API */ #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_pt_region_createA(name) (__itt_pt_region)0 #define __itt_pt_region_createA_ptr 0 #define __itt_pt_region_createW(name) (__itt_pt_region)0 #define __itt_pt_region_createW_ptr 0 #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_pt_region_create(name) (__itt_pt_region)0 #define __itt_pt_region_create_ptr 0 #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_pt_region_createA_ptr 0 #define __itt_pt_region_createW_ptr 0 #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_pt_region_create_ptr 0 #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @brief function contains a special code pattern identified on the post-processing stage and * marks the beginning of a code region targeted for Intel PT analysis * @param[in] region - region id, 0 <= region < 8 */ void __itt_mark_pt_region_begin(__itt_pt_region region); /** * @brief function contains a special code pattern identified on the post-processing stage and * marks the end of a code region targeted for Intel PT analysis * @param[in] region - region id, 0 <= region < 8 */ void __itt_mark_pt_region_end(__itt_pt_region region); /** @} Intel PT control group*/ /** * @defgroup threads Threads * @ingroup public * Give names to threads * @{ */ /** * @brief Sets thread name of calling thread * @param[in] name - name of thread */ #if ITT_PLATFORM==ITT_PLATFORM_WIN void ITTAPI __itt_thread_set_nameA(const char *name); void ITTAPI __itt_thread_set_nameW(const wchar_t *name); #if defined(UNICODE) || defined(_UNICODE) # define __itt_thread_set_name __itt_thread_set_nameW # define __itt_thread_set_name_ptr __itt_thread_set_nameW_ptr #else /* UNICODE */ # define __itt_thread_set_name __itt_thread_set_nameA # define __itt_thread_set_name_ptr __itt_thread_set_nameA_ptr #endif /* UNICODE */ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ void ITTAPI __itt_thread_set_name(const char *name); #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API #if ITT_PLATFORM==ITT_PLATFORM_WIN ITT_STUBV(ITTAPI, void, thread_set_nameA, (const char *name)) ITT_STUBV(ITTAPI, void, thread_set_nameW, (const wchar_t *name)) #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ ITT_STUBV(ITTAPI, void, thread_set_name, (const char *name)) #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_thread_set_nameA ITTNOTIFY_VOID(thread_set_nameA) #define __itt_thread_set_nameA_ptr ITTNOTIFY_NAME(thread_set_nameA) #define __itt_thread_set_nameW ITTNOTIFY_VOID(thread_set_nameW) #define __itt_thread_set_nameW_ptr ITTNOTIFY_NAME(thread_set_nameW) #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_thread_set_name ITTNOTIFY_VOID(thread_set_name) #define __itt_thread_set_name_ptr ITTNOTIFY_NAME(thread_set_name) #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #else /* INTEL_NO_ITTNOTIFY_API */ #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_thread_set_nameA(name) #define __itt_thread_set_nameA_ptr 0 #define __itt_thread_set_nameW(name) #define __itt_thread_set_nameW_ptr 0 #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_thread_set_name(name) #define __itt_thread_set_name_ptr 0 #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_thread_set_nameA_ptr 0 #define __itt_thread_set_nameW_ptr 0 #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_thread_set_name_ptr 0 #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** @cond exclude_from_gpa_documentation */ /** * @brief Mark current thread as ignored from this point on, for the duration of its existence. */ void ITTAPI __itt_thread_ignore(void); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, thread_ignore, (void)) #define __itt_thread_ignore ITTNOTIFY_VOID(thread_ignore) #define __itt_thread_ignore_ptr ITTNOTIFY_NAME(thread_ignore) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_thread_ignore() #define __itt_thread_ignore_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_thread_ignore_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** @} threads group */ /** * @defgroup suppress Error suppression * @ingroup public * General behavior: application continues to run, but errors are suppressed * * @{ */ /*****************************************************************//** * @name group of functions used for error suppression in correctness tools *********************************************************************/ /** @{ */ /** * @hideinitializer * @brief possible value for suppression mask */ #define __itt_suppress_all_errors 0x7fffffff /** * @hideinitializer * @brief possible value for suppression mask (suppresses errors from threading analysis) */ #define __itt_suppress_threading_errors 0x000000ff /** * @hideinitializer * @brief possible value for suppression mask (suppresses errors from memory analysis) */ #define __itt_suppress_memory_errors 0x0000ff00 /** * @brief Start suppressing errors identified in mask on this thread */ void ITTAPI __itt_suppress_push(unsigned int mask); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, suppress_push, (unsigned int mask)) #define __itt_suppress_push ITTNOTIFY_VOID(suppress_push) #define __itt_suppress_push_ptr ITTNOTIFY_NAME(suppress_push) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_suppress_push(mask) #define __itt_suppress_push_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_suppress_push_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @brief Undo the effects of the matching call to __itt_suppress_push */ void ITTAPI __itt_suppress_pop(void); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, suppress_pop, (void)) #define __itt_suppress_pop ITTNOTIFY_VOID(suppress_pop) #define __itt_suppress_pop_ptr ITTNOTIFY_NAME(suppress_pop) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_suppress_pop() #define __itt_suppress_pop_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_suppress_pop_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @enum __itt_suppress_mode * @brief Enumerator for the suppressing modes */ typedef enum __itt_suppress_mode { __itt_unsuppress_range, __itt_suppress_range } __itt_suppress_mode_t; /** * @enum __itt_collection_state * @brief Enumerator for collection state. */ typedef enum { __itt_collection_uninitialized = 0, /* uninitialized */ __itt_collection_init_fail = 1, /* failed to init */ __itt_collection_collector_absent = 2, /* non work state collector is absent */ __itt_collection_collector_exists = 3, /* work state collector exists */ __itt_collection_init_successful = 4 /* success to init */ } __itt_collection_state; /** * @brief Mark a range of memory for error suppression or unsuppression for error types included in mask */ void ITTAPI __itt_suppress_mark_range(__itt_suppress_mode_t mode, unsigned int mask, void * address, size_t size); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, suppress_mark_range, (__itt_suppress_mode_t mode, unsigned int mask, void * address, size_t size)) #define __itt_suppress_mark_range ITTNOTIFY_VOID(suppress_mark_range) #define __itt_suppress_mark_range_ptr ITTNOTIFY_NAME(suppress_mark_range) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_suppress_mark_range(mask) #define __itt_suppress_mark_range_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_suppress_mark_range_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @brief Undo the effect of a matching call to __itt_suppress_mark_range. If not matching * call is found, nothing is changed. */ void ITTAPI __itt_suppress_clear_range(__itt_suppress_mode_t mode, unsigned int mask, void * address, size_t size); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, suppress_clear_range, (__itt_suppress_mode_t mode, unsigned int mask, void * address, size_t size)) #define __itt_suppress_clear_range ITTNOTIFY_VOID(suppress_clear_range) #define __itt_suppress_clear_range_ptr ITTNOTIFY_NAME(suppress_clear_range) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_suppress_clear_range(mask) #define __itt_suppress_clear_range_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_suppress_clear_range_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** @} */ /** @} suppress group */ /** * @defgroup sync Synchronization * @ingroup public * Indicate user-written synchronization code * @{ */ /** * @hideinitializer * @brief possible value of attribute argument for sync object type */ #define __itt_attr_barrier 1 /** * @hideinitializer * @brief possible value of attribute argument for sync object type */ #define __itt_attr_mutex 2 /** @brief Name a synchronization object @param[in] addr Handle for the synchronization object. You should use a real address to uniquely identify the synchronization object. @param[in] objtype null-terminated object type string. If NULL is passed, the name will be "User Synchronization". @param[in] objname null-terminated object name string. If NULL, no name will be assigned to the object. @param[in] attribute one of [#__itt_attr_barrier, #__itt_attr_mutex] */ #if ITT_PLATFORM==ITT_PLATFORM_WIN void ITTAPI __itt_sync_createA(void *addr, const char *objtype, const char *objname, int attribute); void ITTAPI __itt_sync_createW(void *addr, const wchar_t *objtype, const wchar_t *objname, int attribute); #if defined(UNICODE) || defined(_UNICODE) # define __itt_sync_create __itt_sync_createW # define __itt_sync_create_ptr __itt_sync_createW_ptr #else /* UNICODE */ # define __itt_sync_create __itt_sync_createA # define __itt_sync_create_ptr __itt_sync_createA_ptr #endif /* UNICODE */ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ void ITTAPI __itt_sync_create (void *addr, const char *objtype, const char *objname, int attribute); #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API #if ITT_PLATFORM==ITT_PLATFORM_WIN ITT_STUBV(ITTAPI, void, sync_createA, (void *addr, const char *objtype, const char *objname, int attribute)) ITT_STUBV(ITTAPI, void, sync_createW, (void *addr, const wchar_t *objtype, const wchar_t *objname, int attribute)) #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ ITT_STUBV(ITTAPI, void, sync_create, (void *addr, const char* objtype, const char* objname, int attribute)) #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_sync_createA ITTNOTIFY_VOID(sync_createA) #define __itt_sync_createA_ptr ITTNOTIFY_NAME(sync_createA) #define __itt_sync_createW ITTNOTIFY_VOID(sync_createW) #define __itt_sync_createW_ptr ITTNOTIFY_NAME(sync_createW) #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_sync_create ITTNOTIFY_VOID(sync_create) #define __itt_sync_create_ptr ITTNOTIFY_NAME(sync_create) #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #else /* INTEL_NO_ITTNOTIFY_API */ #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_sync_createA(addr, objtype, objname, attribute) #define __itt_sync_createA_ptr 0 #define __itt_sync_createW(addr, objtype, objname, attribute) #define __itt_sync_createW_ptr 0 #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_sync_create(addr, objtype, objname, attribute) #define __itt_sync_create_ptr 0 #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_sync_createA_ptr 0 #define __itt_sync_createW_ptr 0 #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_sync_create_ptr 0 #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** @brief Rename a synchronization object You can use the rename call to assign or reassign a name to a given synchronization object. @param[in] addr handle for the synchronization object. @param[in] name null-terminated object name string. */ #if ITT_PLATFORM==ITT_PLATFORM_WIN void ITTAPI __itt_sync_renameA(void *addr, const char *name); void ITTAPI __itt_sync_renameW(void *addr, const wchar_t *name); #if defined(UNICODE) || defined(_UNICODE) # define __itt_sync_rename __itt_sync_renameW # define __itt_sync_rename_ptr __itt_sync_renameW_ptr #else /* UNICODE */ # define __itt_sync_rename __itt_sync_renameA # define __itt_sync_rename_ptr __itt_sync_renameA_ptr #endif /* UNICODE */ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ void ITTAPI __itt_sync_rename(void *addr, const char *name); #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API #if ITT_PLATFORM==ITT_PLATFORM_WIN ITT_STUBV(ITTAPI, void, sync_renameA, (void *addr, const char *name)) ITT_STUBV(ITTAPI, void, sync_renameW, (void *addr, const wchar_t *name)) #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ ITT_STUBV(ITTAPI, void, sync_rename, (void *addr, const char *name)) #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_sync_renameA ITTNOTIFY_VOID(sync_renameA) #define __itt_sync_renameA_ptr ITTNOTIFY_NAME(sync_renameA) #define __itt_sync_renameW ITTNOTIFY_VOID(sync_renameW) #define __itt_sync_renameW_ptr ITTNOTIFY_NAME(sync_renameW) #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_sync_rename ITTNOTIFY_VOID(sync_rename) #define __itt_sync_rename_ptr ITTNOTIFY_NAME(sync_rename) #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #else /* INTEL_NO_ITTNOTIFY_API */ #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_sync_renameA(addr, name) #define __itt_sync_renameA_ptr 0 #define __itt_sync_renameW(addr, name) #define __itt_sync_renameW_ptr 0 #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_sync_rename(addr, name) #define __itt_sync_rename_ptr 0 #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_sync_renameA_ptr 0 #define __itt_sync_renameW_ptr 0 #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_sync_rename_ptr 0 #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** @brief Destroy a synchronization object. @param addr Handle for the synchronization object. */ void ITTAPI __itt_sync_destroy(void *addr); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, sync_destroy, (void *addr)) #define __itt_sync_destroy ITTNOTIFY_VOID(sync_destroy) #define __itt_sync_destroy_ptr ITTNOTIFY_NAME(sync_destroy) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_sync_destroy(addr) #define __itt_sync_destroy_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_sync_destroy_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /*****************************************************************//** * @name group of functions is used for performance measurement tools *********************************************************************/ /** @{ */ /** * @brief Enter spin loop on user-defined sync object */ void ITTAPI __itt_sync_prepare(void* addr); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, sync_prepare, (void *addr)) #define __itt_sync_prepare ITTNOTIFY_VOID(sync_prepare) #define __itt_sync_prepare_ptr ITTNOTIFY_NAME(sync_prepare) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_sync_prepare(addr) #define __itt_sync_prepare_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_sync_prepare_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @brief Quit spin loop without acquiring spin object */ void ITTAPI __itt_sync_cancel(void *addr); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, sync_cancel, (void *addr)) #define __itt_sync_cancel ITTNOTIFY_VOID(sync_cancel) #define __itt_sync_cancel_ptr ITTNOTIFY_NAME(sync_cancel) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_sync_cancel(addr) #define __itt_sync_cancel_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_sync_cancel_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @brief Successful spin loop completion (sync object acquired) */ void ITTAPI __itt_sync_acquired(void *addr); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, sync_acquired, (void *addr)) #define __itt_sync_acquired ITTNOTIFY_VOID(sync_acquired) #define __itt_sync_acquired_ptr ITTNOTIFY_NAME(sync_acquired) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_sync_acquired(addr) #define __itt_sync_acquired_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_sync_acquired_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @brief Start sync object releasing code. Is called before the lock release call. */ void ITTAPI __itt_sync_releasing(void* addr); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, sync_releasing, (void *addr)) #define __itt_sync_releasing ITTNOTIFY_VOID(sync_releasing) #define __itt_sync_releasing_ptr ITTNOTIFY_NAME(sync_releasing) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_sync_releasing(addr) #define __itt_sync_releasing_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_sync_releasing_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** @} */ /** @} sync group */ /**************************************************************//** * @name group of functions is used for correctness checking tools ******************************************************************/ /** @{ */ /** * @ingroup legacy * @deprecated Legacy API * @brief Fast synchronization which does no require spinning. * - This special function is to be used by TBB and OpenMP libraries only when they know * there is no spin but they need to suppress TC warnings about shared variable modifications. * - It only has corresponding pointers in static library and does not have corresponding function * in dynamic library. * @see void __itt_sync_prepare(void* addr); */ void ITTAPI __itt_fsync_prepare(void* addr); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, fsync_prepare, (void *addr)) #define __itt_fsync_prepare ITTNOTIFY_VOID(fsync_prepare) #define __itt_fsync_prepare_ptr ITTNOTIFY_NAME(fsync_prepare) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_fsync_prepare(addr) #define __itt_fsync_prepare_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_fsync_prepare_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @ingroup legacy * @deprecated Legacy API * @brief Fast synchronization which does no require spinning. * - This special function is to be used by TBB and OpenMP libraries only when they know * there is no spin but they need to suppress TC warnings about shared variable modifications. * - It only has corresponding pointers in static library and does not have corresponding function * in dynamic library. * @see void __itt_sync_cancel(void *addr); */ void ITTAPI __itt_fsync_cancel(void *addr); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, fsync_cancel, (void *addr)) #define __itt_fsync_cancel ITTNOTIFY_VOID(fsync_cancel) #define __itt_fsync_cancel_ptr ITTNOTIFY_NAME(fsync_cancel) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_fsync_cancel(addr) #define __itt_fsync_cancel_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_fsync_cancel_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @ingroup legacy * @deprecated Legacy API * @brief Fast synchronization which does no require spinning. * - This special function is to be used by TBB and OpenMP libraries only when they know * there is no spin but they need to suppress TC warnings about shared variable modifications. * - It only has corresponding pointers in static library and does not have corresponding function * in dynamic library. * @see void __itt_sync_acquired(void *addr); */ void ITTAPI __itt_fsync_acquired(void *addr); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, fsync_acquired, (void *addr)) #define __itt_fsync_acquired ITTNOTIFY_VOID(fsync_acquired) #define __itt_fsync_acquired_ptr ITTNOTIFY_NAME(fsync_acquired) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_fsync_acquired(addr) #define __itt_fsync_acquired_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_fsync_acquired_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @ingroup legacy * @deprecated Legacy API * @brief Fast synchronization which does no require spinning. * - This special function is to be used by TBB and OpenMP libraries only when they know * there is no spin but they need to suppress TC warnings about shared variable modifications. * - It only has corresponding pointers in static library and does not have corresponding function * in dynamic library. * @see void __itt_sync_releasing(void* addr); */ void ITTAPI __itt_fsync_releasing(void* addr); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, fsync_releasing, (void *addr)) #define __itt_fsync_releasing ITTNOTIFY_VOID(fsync_releasing) #define __itt_fsync_releasing_ptr ITTNOTIFY_NAME(fsync_releasing) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_fsync_releasing(addr) #define __itt_fsync_releasing_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_fsync_releasing_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** @} */ /** * @defgroup model Modeling by Intel(R) Parallel Advisor * @ingroup public * This is the subset of itt used for modeling by Intel(R) Parallel Advisor. * This API is called ONLY using annotate.h, by "Annotation" macros * the user places in their sources during the parallelism modeling steps. * * site_begin/end and task_begin/end take the address of handle variables, * which are writeable by the API. Handles must be 0 initialized prior * to the first call to begin, or may cause a run-time failure. * The handles are initialized in a multi-thread safe way by the API if * the handle is 0. The commonly expected idiom is one static handle to * identify a site or task. If a site or task of the same name has already * been started during this collection, the same handle MAY be returned, * but is not required to be - it is unspecified if data merging is done * based on name. These routines also take an instance variable. Like * the lexical instance, these must be 0 initialized. Unlike the lexical * instance, this is used to track a single dynamic instance. * * API used by the Intel(R) Parallel Advisor to describe potential concurrency * and related activities. User-added source annotations expand to calls * to these procedures to enable modeling of a hypothetical concurrent * execution serially. * @{ */ #if !defined(_ADVISOR_ANNOTATE_H_) || defined(ANNOTATE_EXPAND_NULL) typedef void* __itt_model_site; /*!< @brief handle for lexical site */ typedef void* __itt_model_site_instance; /*!< @brief handle for dynamic instance */ typedef void* __itt_model_task; /*!< @brief handle for lexical site */ typedef void* __itt_model_task_instance; /*!< @brief handle for dynamic instance */ /** * @enum __itt_model_disable * @brief Enumerator for the disable methods */ typedef enum { __itt_model_disable_observation, __itt_model_disable_collection } __itt_model_disable; #endif /* !_ADVISOR_ANNOTATE_H_ || ANNOTATE_EXPAND_NULL */ /** * @brief ANNOTATE_SITE_BEGIN/ANNOTATE_SITE_END support. * * site_begin/end model a potential concurrency site. * site instances may be recursively nested with themselves. * site_end exits the most recently started but unended site for the current * thread. The handle passed to end may be used to validate structure. * Instances of a site encountered on different threads concurrently * are considered completely distinct. If the site name for two different * lexical sites match, it is unspecified whether they are treated as the * same or different for data presentation. */ void ITTAPI __itt_model_site_begin(__itt_model_site *site, __itt_model_site_instance *instance, const char *name); #if ITT_PLATFORM==ITT_PLATFORM_WIN void ITTAPI __itt_model_site_beginW(const wchar_t *name); #endif void ITTAPI __itt_model_site_beginA(const char *name); void ITTAPI __itt_model_site_beginAL(const char *name, size_t siteNameLen); void ITTAPI __itt_model_site_end (__itt_model_site *site, __itt_model_site_instance *instance); void ITTAPI __itt_model_site_end_2(void); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, model_site_begin, (__itt_model_site *site, __itt_model_site_instance *instance, const char *name)) #if ITT_PLATFORM==ITT_PLATFORM_WIN ITT_STUBV(ITTAPI, void, model_site_beginW, (const wchar_t *name)) #endif ITT_STUBV(ITTAPI, void, model_site_beginA, (const char *name)) ITT_STUBV(ITTAPI, void, model_site_beginAL, (const char *name, size_t siteNameLen)) ITT_STUBV(ITTAPI, void, model_site_end, (__itt_model_site *site, __itt_model_site_instance *instance)) ITT_STUBV(ITTAPI, void, model_site_end_2, (void)) #define __itt_model_site_begin ITTNOTIFY_VOID(model_site_begin) #define __itt_model_site_begin_ptr ITTNOTIFY_NAME(model_site_begin) #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_model_site_beginW ITTNOTIFY_VOID(model_site_beginW) #define __itt_model_site_beginW_ptr ITTNOTIFY_NAME(model_site_beginW) #endif #define __itt_model_site_beginA ITTNOTIFY_VOID(model_site_beginA) #define __itt_model_site_beginA_ptr ITTNOTIFY_NAME(model_site_beginA) #define __itt_model_site_beginAL ITTNOTIFY_VOID(model_site_beginAL) #define __itt_model_site_beginAL_ptr ITTNOTIFY_NAME(model_site_beginAL) #define __itt_model_site_end ITTNOTIFY_VOID(model_site_end) #define __itt_model_site_end_ptr ITTNOTIFY_NAME(model_site_end) #define __itt_model_site_end_2 ITTNOTIFY_VOID(model_site_end_2) #define __itt_model_site_end_2_ptr ITTNOTIFY_NAME(model_site_end_2) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_model_site_begin(site, instance, name) #define __itt_model_site_begin_ptr 0 #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_model_site_beginW(name) #define __itt_model_site_beginW_ptr 0 #endif #define __itt_model_site_beginA(name) #define __itt_model_site_beginA_ptr 0 #define __itt_model_site_beginAL(name, siteNameLen) #define __itt_model_site_beginAL_ptr 0 #define __itt_model_site_end(site, instance) #define __itt_model_site_end_ptr 0 #define __itt_model_site_end_2() #define __itt_model_site_end_2_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_model_site_begin_ptr 0 #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_model_site_beginW_ptr 0 #endif #define __itt_model_site_beginA_ptr 0 #define __itt_model_site_beginAL_ptr 0 #define __itt_model_site_end_ptr 0 #define __itt_model_site_end_2_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @brief ANNOTATE_TASK_BEGIN/ANNOTATE_TASK_END support * * task_begin/end model a potential task, which is contained within the most * closely enclosing dynamic site. task_end exits the most recently started * but unended task. The handle passed to end may be used to validate * structure. It is unspecified if bad dynamic nesting is detected. If it * is, it should be encoded in the resulting data collection. The collector * should not fail due to construct nesting issues, nor attempt to directly * indicate the problem. */ void ITTAPI __itt_model_task_begin(__itt_model_task *task, __itt_model_task_instance *instance, const char *name); #if ITT_PLATFORM==ITT_PLATFORM_WIN void ITTAPI __itt_model_task_beginW(const wchar_t *name); void ITTAPI __itt_model_iteration_taskW(const wchar_t *name); #endif void ITTAPI __itt_model_task_beginA(const char *name); void ITTAPI __itt_model_task_beginAL(const char *name, size_t taskNameLen); void ITTAPI __itt_model_iteration_taskA(const char *name); void ITTAPI __itt_model_iteration_taskAL(const char *name, size_t taskNameLen); void ITTAPI __itt_model_task_end (__itt_model_task *task, __itt_model_task_instance *instance); void ITTAPI __itt_model_task_end_2(void); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, model_task_begin, (__itt_model_task *task, __itt_model_task_instance *instance, const char *name)) #if ITT_PLATFORM==ITT_PLATFORM_WIN ITT_STUBV(ITTAPI, void, model_task_beginW, (const wchar_t *name)) ITT_STUBV(ITTAPI, void, model_iteration_taskW, (const wchar_t *name)) #endif ITT_STUBV(ITTAPI, void, model_task_beginA, (const char *name)) ITT_STUBV(ITTAPI, void, model_task_beginAL, (const char *name, size_t taskNameLen)) ITT_STUBV(ITTAPI, void, model_iteration_taskA, (const char *name)) ITT_STUBV(ITTAPI, void, model_iteration_taskAL, (const char *name, size_t taskNameLen)) ITT_STUBV(ITTAPI, void, model_task_end, (__itt_model_task *task, __itt_model_task_instance *instance)) ITT_STUBV(ITTAPI, void, model_task_end_2, (void)) #define __itt_model_task_begin ITTNOTIFY_VOID(model_task_begin) #define __itt_model_task_begin_ptr ITTNOTIFY_NAME(model_task_begin) #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_model_task_beginW ITTNOTIFY_VOID(model_task_beginW) #define __itt_model_task_beginW_ptr ITTNOTIFY_NAME(model_task_beginW) #define __itt_model_iteration_taskW ITTNOTIFY_VOID(model_iteration_taskW) #define __itt_model_iteration_taskW_ptr ITTNOTIFY_NAME(model_iteration_taskW) #endif #define __itt_model_task_beginA ITTNOTIFY_VOID(model_task_beginA) #define __itt_model_task_beginA_ptr ITTNOTIFY_NAME(model_task_beginA) #define __itt_model_task_beginAL ITTNOTIFY_VOID(model_task_beginAL) #define __itt_model_task_beginAL_ptr ITTNOTIFY_NAME(model_task_beginAL) #define __itt_model_iteration_taskA ITTNOTIFY_VOID(model_iteration_taskA) #define __itt_model_iteration_taskA_ptr ITTNOTIFY_NAME(model_iteration_taskA) #define __itt_model_iteration_taskAL ITTNOTIFY_VOID(model_iteration_taskAL) #define __itt_model_iteration_taskAL_ptr ITTNOTIFY_NAME(model_iteration_taskAL) #define __itt_model_task_end ITTNOTIFY_VOID(model_task_end) #define __itt_model_task_end_ptr ITTNOTIFY_NAME(model_task_end) #define __itt_model_task_end_2 ITTNOTIFY_VOID(model_task_end_2) #define __itt_model_task_end_2_ptr ITTNOTIFY_NAME(model_task_end_2) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_model_task_begin(task, instance, name) #define __itt_model_task_begin_ptr 0 #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_model_task_beginW(name) #define __itt_model_task_beginW_ptr 0 #endif #define __itt_model_task_beginA(name) #define __itt_model_task_beginA_ptr 0 #define __itt_model_task_beginAL(name, siteNameLen) #define __itt_model_task_beginAL_ptr 0 #define __itt_model_iteration_taskA(name) #define __itt_model_iteration_taskA_ptr 0 #define __itt_model_iteration_taskAL(name, siteNameLen) #define __itt_model_iteration_taskAL_ptr 0 #define __itt_model_task_end(task, instance) #define __itt_model_task_end_ptr 0 #define __itt_model_task_end_2() #define __itt_model_task_end_2_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_model_task_begin_ptr 0 #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_model_task_beginW_ptr 0 #endif #define __itt_model_task_beginA_ptr 0 #define __itt_model_task_beginAL_ptr 0 #define __itt_model_iteration_taskA_ptr 0 #define __itt_model_iteration_taskAL_ptr 0 #define __itt_model_task_end_ptr 0 #define __itt_model_task_end_2_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @brief ANNOTATE_LOCK_ACQUIRE/ANNOTATE_LOCK_RELEASE support * * lock_acquire/release model a potential lock for both lockset and * performance modeling. Each unique address is modeled as a separate * lock, with invalid addresses being valid lock IDs. Specifically: * no storage is accessed by the API at the specified address - it is only * used for lock identification. Lock acquires may be self-nested and are * unlocked by a corresponding number of releases. * (These closely correspond to __itt_sync_acquired/__itt_sync_releasing, * but may not have identical semantics.) */ void ITTAPI __itt_model_lock_acquire(void *lock); void ITTAPI __itt_model_lock_acquire_2(void *lock); void ITTAPI __itt_model_lock_release(void *lock); void ITTAPI __itt_model_lock_release_2(void *lock); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, model_lock_acquire, (void *lock)) ITT_STUBV(ITTAPI, void, model_lock_acquire_2, (void *lock)) ITT_STUBV(ITTAPI, void, model_lock_release, (void *lock)) ITT_STUBV(ITTAPI, void, model_lock_release_2, (void *lock)) #define __itt_model_lock_acquire ITTNOTIFY_VOID(model_lock_acquire) #define __itt_model_lock_acquire_ptr ITTNOTIFY_NAME(model_lock_acquire) #define __itt_model_lock_acquire_2 ITTNOTIFY_VOID(model_lock_acquire_2) #define __itt_model_lock_acquire_2_ptr ITTNOTIFY_NAME(model_lock_acquire_2) #define __itt_model_lock_release ITTNOTIFY_VOID(model_lock_release) #define __itt_model_lock_release_ptr ITTNOTIFY_NAME(model_lock_release) #define __itt_model_lock_release_2 ITTNOTIFY_VOID(model_lock_release_2) #define __itt_model_lock_release_2_ptr ITTNOTIFY_NAME(model_lock_release_2) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_model_lock_acquire(lock) #define __itt_model_lock_acquire_ptr 0 #define __itt_model_lock_acquire_2(lock) #define __itt_model_lock_acquire_2_ptr 0 #define __itt_model_lock_release(lock) #define __itt_model_lock_release_ptr 0 #define __itt_model_lock_release_2(lock) #define __itt_model_lock_release_2_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_model_lock_acquire_ptr 0 #define __itt_model_lock_acquire_2_ptr 0 #define __itt_model_lock_release_ptr 0 #define __itt_model_lock_release_2_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @brief ANNOTATE_RECORD_ALLOCATION/ANNOTATE_RECORD_DEALLOCATION support * * record_allocation/deallocation describe user-defined memory allocator * behavior, which may be required for correctness modeling to understand * when storage is not expected to be actually reused across threads. */ void ITTAPI __itt_model_record_allocation (void *addr, size_t size); void ITTAPI __itt_model_record_deallocation(void *addr); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, model_record_allocation, (void *addr, size_t size)) ITT_STUBV(ITTAPI, void, model_record_deallocation, (void *addr)) #define __itt_model_record_allocation ITTNOTIFY_VOID(model_record_allocation) #define __itt_model_record_allocation_ptr ITTNOTIFY_NAME(model_record_allocation) #define __itt_model_record_deallocation ITTNOTIFY_VOID(model_record_deallocation) #define __itt_model_record_deallocation_ptr ITTNOTIFY_NAME(model_record_deallocation) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_model_record_allocation(addr, size) #define __itt_model_record_allocation_ptr 0 #define __itt_model_record_deallocation(addr) #define __itt_model_record_deallocation_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_model_record_allocation_ptr 0 #define __itt_model_record_deallocation_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @brief ANNOTATE_INDUCTION_USES support * * Note particular storage is inductive through the end of the current site */ void ITTAPI __itt_model_induction_uses(void* addr, size_t size); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, model_induction_uses, (void *addr, size_t size)) #define __itt_model_induction_uses ITTNOTIFY_VOID(model_induction_uses) #define __itt_model_induction_uses_ptr ITTNOTIFY_NAME(model_induction_uses) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_model_induction_uses(addr, size) #define __itt_model_induction_uses_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_model_induction_uses_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @brief ANNOTATE_REDUCTION_USES support * * Note particular storage is used for reduction through the end * of the current site */ void ITTAPI __itt_model_reduction_uses(void* addr, size_t size); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, model_reduction_uses, (void *addr, size_t size)) #define __itt_model_reduction_uses ITTNOTIFY_VOID(model_reduction_uses) #define __itt_model_reduction_uses_ptr ITTNOTIFY_NAME(model_reduction_uses) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_model_reduction_uses(addr, size) #define __itt_model_reduction_uses_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_model_reduction_uses_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @brief ANNOTATE_OBSERVE_USES support * * Have correctness modeling record observations about uses of storage * through the end of the current site */ void ITTAPI __itt_model_observe_uses(void* addr, size_t size); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, model_observe_uses, (void *addr, size_t size)) #define __itt_model_observe_uses ITTNOTIFY_VOID(model_observe_uses) #define __itt_model_observe_uses_ptr ITTNOTIFY_NAME(model_observe_uses) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_model_observe_uses(addr, size) #define __itt_model_observe_uses_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_model_observe_uses_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @brief ANNOTATE_CLEAR_USES support * * Clear the special handling of a piece of storage related to induction, * reduction or observe_uses */ void ITTAPI __itt_model_clear_uses(void* addr); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, model_clear_uses, (void *addr)) #define __itt_model_clear_uses ITTNOTIFY_VOID(model_clear_uses) #define __itt_model_clear_uses_ptr ITTNOTIFY_NAME(model_clear_uses) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_model_clear_uses(addr) #define __itt_model_clear_uses_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_model_clear_uses_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @brief ANNOTATE_DISABLE_*_PUSH/ANNOTATE_DISABLE_*_POP support * * disable_push/disable_pop push and pop disabling based on a parameter. * Disabling observations stops processing of memory references during * correctness modeling, and all annotations that occur in the disabled * region. This allows description of code that is expected to be handled * specially during conversion to parallelism or that is not recognized * by tools (e.g. some kinds of synchronization operations.) * This mechanism causes all annotations in the disabled region, other * than disable_push and disable_pop, to be ignored. (For example, this * might validly be used to disable an entire parallel site and the contained * tasks and locking in it for data collection purposes.) * The disable for collection is a more expensive operation, but reduces * collector overhead significantly. This applies to BOTH correctness data * collection and performance data collection. For example, a site * containing a task might only enable data collection for the first 10 * iterations. Both performance and correctness data should reflect this, * and the program should run as close to full speed as possible when * collection is disabled. */ void ITTAPI __itt_model_disable_push(__itt_model_disable x); void ITTAPI __itt_model_disable_pop(void); void ITTAPI __itt_model_aggregate_task(size_t x); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, model_disable_push, (__itt_model_disable x)) ITT_STUBV(ITTAPI, void, model_disable_pop, (void)) ITT_STUBV(ITTAPI, void, model_aggregate_task, (size_t x)) #define __itt_model_disable_push ITTNOTIFY_VOID(model_disable_push) #define __itt_model_disable_push_ptr ITTNOTIFY_NAME(model_disable_push) #define __itt_model_disable_pop ITTNOTIFY_VOID(model_disable_pop) #define __itt_model_disable_pop_ptr ITTNOTIFY_NAME(model_disable_pop) #define __itt_model_aggregate_task ITTNOTIFY_VOID(model_aggregate_task) #define __itt_model_aggregate_task_ptr ITTNOTIFY_NAME(model_aggregate_task) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_model_disable_push(x) #define __itt_model_disable_push_ptr 0 #define __itt_model_disable_pop() #define __itt_model_disable_pop_ptr 0 #define __itt_model_aggregate_task(x) #define __itt_model_aggregate_task_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_model_disable_push_ptr 0 #define __itt_model_disable_pop_ptr 0 #define __itt_model_aggregate_task_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** @} model group */ /** * @defgroup heap Heap * @ingroup public * Heap group * @{ */ typedef void* __itt_heap_function; /** * @brief Create an identification for heap function * @return non-zero identifier or NULL */ #if ITT_PLATFORM==ITT_PLATFORM_WIN __itt_heap_function ITTAPI __itt_heap_function_createA(const char* name, const char* domain); __itt_heap_function ITTAPI __itt_heap_function_createW(const wchar_t* name, const wchar_t* domain); #if defined(UNICODE) || defined(_UNICODE) # define __itt_heap_function_create __itt_heap_function_createW # define __itt_heap_function_create_ptr __itt_heap_function_createW_ptr #else # define __itt_heap_function_create __itt_heap_function_createA # define __itt_heap_function_create_ptr __itt_heap_function_createA_ptr #endif /* UNICODE */ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ __itt_heap_function ITTAPI __itt_heap_function_create(const char* name, const char* domain); #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API #if ITT_PLATFORM==ITT_PLATFORM_WIN ITT_STUB(ITTAPI, __itt_heap_function, heap_function_createA, (const char* name, const char* domain)) ITT_STUB(ITTAPI, __itt_heap_function, heap_function_createW, (const wchar_t* name, const wchar_t* domain)) #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ ITT_STUB(ITTAPI, __itt_heap_function, heap_function_create, (const char* name, const char* domain)) #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_heap_function_createA ITTNOTIFY_DATA(heap_function_createA) #define __itt_heap_function_createA_ptr ITTNOTIFY_NAME(heap_function_createA) #define __itt_heap_function_createW ITTNOTIFY_DATA(heap_function_createW) #define __itt_heap_function_createW_ptr ITTNOTIFY_NAME(heap_function_createW) #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_heap_function_create ITTNOTIFY_DATA(heap_function_create) #define __itt_heap_function_create_ptr ITTNOTIFY_NAME(heap_function_create) #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #else /* INTEL_NO_ITTNOTIFY_API */ #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_heap_function_createA(name, domain) (__itt_heap_function)0 #define __itt_heap_function_createA_ptr 0 #define __itt_heap_function_createW(name, domain) (__itt_heap_function)0 #define __itt_heap_function_createW_ptr 0 #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_heap_function_create(name, domain) (__itt_heap_function)0 #define __itt_heap_function_create_ptr 0 #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_heap_function_createA_ptr 0 #define __itt_heap_function_createW_ptr 0 #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_heap_function_create_ptr 0 #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @brief Record an allocation begin occurrence. */ void ITTAPI __itt_heap_allocate_begin(__itt_heap_function h, size_t size, int initialized); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, heap_allocate_begin, (__itt_heap_function h, size_t size, int initialized)) #define __itt_heap_allocate_begin ITTNOTIFY_VOID(heap_allocate_begin) #define __itt_heap_allocate_begin_ptr ITTNOTIFY_NAME(heap_allocate_begin) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_heap_allocate_begin(h, size, initialized) #define __itt_heap_allocate_begin_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_heap_allocate_begin_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @brief Record an allocation end occurrence. */ void ITTAPI __itt_heap_allocate_end(__itt_heap_function h, void** addr, size_t size, int initialized); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, heap_allocate_end, (__itt_heap_function h, void** addr, size_t size, int initialized)) #define __itt_heap_allocate_end ITTNOTIFY_VOID(heap_allocate_end) #define __itt_heap_allocate_end_ptr ITTNOTIFY_NAME(heap_allocate_end) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_heap_allocate_end(h, addr, size, initialized) #define __itt_heap_allocate_end_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_heap_allocate_end_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @brief Record a free begin occurrence. */ void ITTAPI __itt_heap_free_begin(__itt_heap_function h, void* addr); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, heap_free_begin, (__itt_heap_function h, void* addr)) #define __itt_heap_free_begin ITTNOTIFY_VOID(heap_free_begin) #define __itt_heap_free_begin_ptr ITTNOTIFY_NAME(heap_free_begin) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_heap_free_begin(h, addr) #define __itt_heap_free_begin_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_heap_free_begin_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @brief Record a free end occurrence. */ void ITTAPI __itt_heap_free_end(__itt_heap_function h, void* addr); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, heap_free_end, (__itt_heap_function h, void* addr)) #define __itt_heap_free_end ITTNOTIFY_VOID(heap_free_end) #define __itt_heap_free_end_ptr ITTNOTIFY_NAME(heap_free_end) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_heap_free_end(h, addr) #define __itt_heap_free_end_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_heap_free_end_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @brief Record a reallocation begin occurrence. */ void ITTAPI __itt_heap_reallocate_begin(__itt_heap_function h, void* addr, size_t new_size, int initialized); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, heap_reallocate_begin, (__itt_heap_function h, void* addr, size_t new_size, int initialized)) #define __itt_heap_reallocate_begin ITTNOTIFY_VOID(heap_reallocate_begin) #define __itt_heap_reallocate_begin_ptr ITTNOTIFY_NAME(heap_reallocate_begin) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_heap_reallocate_begin(h, addr, new_size, initialized) #define __itt_heap_reallocate_begin_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_heap_reallocate_begin_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @brief Record a reallocation end occurrence. */ void ITTAPI __itt_heap_reallocate_end(__itt_heap_function h, void* addr, void** new_addr, size_t new_size, int initialized); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, heap_reallocate_end, (__itt_heap_function h, void* addr, void** new_addr, size_t new_size, int initialized)) #define __itt_heap_reallocate_end ITTNOTIFY_VOID(heap_reallocate_end) #define __itt_heap_reallocate_end_ptr ITTNOTIFY_NAME(heap_reallocate_end) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_heap_reallocate_end(h, addr, new_addr, new_size, initialized) #define __itt_heap_reallocate_end_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_heap_reallocate_end_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** @brief internal access begin */ void ITTAPI __itt_heap_internal_access_begin(void); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, heap_internal_access_begin, (void)) #define __itt_heap_internal_access_begin ITTNOTIFY_VOID(heap_internal_access_begin) #define __itt_heap_internal_access_begin_ptr ITTNOTIFY_NAME(heap_internal_access_begin) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_heap_internal_access_begin() #define __itt_heap_internal_access_begin_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_heap_internal_access_begin_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** @brief internal access end */ void ITTAPI __itt_heap_internal_access_end(void); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, heap_internal_access_end, (void)) #define __itt_heap_internal_access_end ITTNOTIFY_VOID(heap_internal_access_end) #define __itt_heap_internal_access_end_ptr ITTNOTIFY_NAME(heap_internal_access_end) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_heap_internal_access_end() #define __itt_heap_internal_access_end_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_heap_internal_access_end_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** @brief record memory growth begin */ void ITTAPI __itt_heap_record_memory_growth_begin(void); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, heap_record_memory_growth_begin, (void)) #define __itt_heap_record_memory_growth_begin ITTNOTIFY_VOID(heap_record_memory_growth_begin) #define __itt_heap_record_memory_growth_begin_ptr ITTNOTIFY_NAME(heap_record_memory_growth_begin) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_heap_record_memory_growth_begin() #define __itt_heap_record_memory_growth_begin_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_heap_record_memory_growth_begin_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** @brief record memory growth end */ void ITTAPI __itt_heap_record_memory_growth_end(void); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, heap_record_memory_growth_end, (void)) #define __itt_heap_record_memory_growth_end ITTNOTIFY_VOID(heap_record_memory_growth_end) #define __itt_heap_record_memory_growth_end_ptr ITTNOTIFY_NAME(heap_record_memory_growth_end) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_heap_record_memory_growth_end() #define __itt_heap_record_memory_growth_end_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_heap_record_memory_growth_end_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @brief Specify the type of heap detection/reporting to modify. */ /** * @hideinitializer * @brief Report on memory leaks. */ #define __itt_heap_leaks 0x00000001 /** * @hideinitializer * @brief Report on memory growth. */ #define __itt_heap_growth 0x00000002 /** @brief heap reset detection */ void ITTAPI __itt_heap_reset_detection(unsigned int reset_mask); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, heap_reset_detection, (unsigned int reset_mask)) #define __itt_heap_reset_detection ITTNOTIFY_VOID(heap_reset_detection) #define __itt_heap_reset_detection_ptr ITTNOTIFY_NAME(heap_reset_detection) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_heap_reset_detection() #define __itt_heap_reset_detection_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_heap_reset_detection_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** @brief report */ void ITTAPI __itt_heap_record(unsigned int record_mask); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, heap_record, (unsigned int record_mask)) #define __itt_heap_record ITTNOTIFY_VOID(heap_record) #define __itt_heap_record_ptr ITTNOTIFY_NAME(heap_record) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_heap_record() #define __itt_heap_record_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_heap_record_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** @} heap group */ /** @endcond */ /* ========================================================================== */ /** * @defgroup domains Domains * @ingroup public * Domains group * @{ */ /** @cond exclude_from_documentation */ #pragma pack(push, 8) typedef struct ___itt_domain { volatile int flags; /*!< Zero if disabled, non-zero if enabled. The meaning of different non-zero values is reserved to the runtime */ const char* nameA; /*!< Copy of original name in ASCII. */ #if defined(UNICODE) || defined(_UNICODE) const wchar_t* nameW; /*!< Copy of original name in UNICODE. */ #else /* UNICODE || _UNICODE */ void* nameW; #endif /* UNICODE || _UNICODE */ int extra1; /*!< Reserved to the runtime */ void* extra2; /*!< Reserved to the runtime */ struct ___itt_domain* next; } __itt_domain; #pragma pack(pop) /** @endcond */ /** * @ingroup domains * @brief Create a domain. * Create domain using some domain name: the URI naming style is recommended. * Because the set of domains is expected to be static over the application's * execution time, there is no mechanism to destroy a domain. * Any domain can be accessed by any thread in the process, regardless of * which thread created the domain. This call is thread-safe. * @param[in] name name of domain */ #if ITT_PLATFORM==ITT_PLATFORM_WIN __itt_domain* ITTAPI __itt_domain_createA(const char *name); __itt_domain* ITTAPI __itt_domain_createW(const wchar_t *name); #if defined(UNICODE) || defined(_UNICODE) # define __itt_domain_create __itt_domain_createW # define __itt_domain_create_ptr __itt_domain_createW_ptr #else /* UNICODE */ # define __itt_domain_create __itt_domain_createA # define __itt_domain_create_ptr __itt_domain_createA_ptr #endif /* UNICODE */ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ __itt_domain* ITTAPI __itt_domain_create(const char *name); #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API #if ITT_PLATFORM==ITT_PLATFORM_WIN ITT_STUB(ITTAPI, __itt_domain*, domain_createA, (const char *name)) ITT_STUB(ITTAPI, __itt_domain*, domain_createW, (const wchar_t *name)) #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ ITT_STUB(ITTAPI, __itt_domain*, domain_create, (const char *name)) #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_domain_createA ITTNOTIFY_DATA(domain_createA) #define __itt_domain_createA_ptr ITTNOTIFY_NAME(domain_createA) #define __itt_domain_createW ITTNOTIFY_DATA(domain_createW) #define __itt_domain_createW_ptr ITTNOTIFY_NAME(domain_createW) #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_domain_create ITTNOTIFY_DATA(domain_create) #define __itt_domain_create_ptr ITTNOTIFY_NAME(domain_create) #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #else /* INTEL_NO_ITTNOTIFY_API */ #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_domain_createA(name) (__itt_domain*)0 #define __itt_domain_createA_ptr 0 #define __itt_domain_createW(name) (__itt_domain*)0 #define __itt_domain_createW_ptr 0 #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_domain_create(name) (__itt_domain*)0 #define __itt_domain_create_ptr 0 #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_domain_createA_ptr 0 #define __itt_domain_createW_ptr 0 #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_domain_create_ptr 0 #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** @} domains group */ /** * @defgroup ids IDs * @ingroup public * IDs group * @{ */ /** @cond exclude_from_documentation */ #pragma pack(push, 8) typedef struct ___itt_id { unsigned long long d1, d2, d3; } __itt_id; #pragma pack(pop) /** @endcond */ static const __itt_id __itt_null = { 0, 0, 0 }; /** * @ingroup ids * @brief A convenience function is provided to create an ID without domain control. * @brief This is a convenience function to initialize an __itt_id structure. This function * does not affect the collector runtime in any way. After you make the ID with this * function, you still must create it with the __itt_id_create function before using the ID * to identify a named entity. * @param[in] addr The address of object; high QWORD of the ID value. * @param[in] extra The extra data to unique identify object; low QWORD of the ID value. */ ITT_INLINE __itt_id ITTAPI __itt_id_make(void* addr, unsigned long long extra) ITT_INLINE_ATTRIBUTE; ITT_INLINE __itt_id ITTAPI __itt_id_make(void* addr, unsigned long long extra) { __itt_id id = __itt_null; id.d1 = (unsigned long long)((uintptr_t)addr); id.d2 = (unsigned long long)extra; id.d3 = (unsigned long long)0; /* Reserved. Must be zero */ return id; } /** * @ingroup ids * @brief Create an instance of identifier. * This establishes the beginning of the lifetime of an instance of * the given ID in the trace. Once this lifetime starts, the ID * can be used to tag named entity instances in calls such as * __itt_task_begin, and to specify relationships among * identified named entity instances, using the \ref relations APIs. * Instance IDs are not domain specific! * @param[in] domain The domain controlling the execution of this call. * @param[in] id The ID to create. */ void ITTAPI __itt_id_create(const __itt_domain *domain, __itt_id id); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, id_create, (const __itt_domain *domain, __itt_id id)) #define __itt_id_create(d,x) ITTNOTIFY_VOID_D1(id_create,d,x) #define __itt_id_create_ptr ITTNOTIFY_NAME(id_create) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_id_create(domain,id) #define __itt_id_create_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_id_create_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @ingroup ids * @brief Destroy an instance of identifier. * This ends the lifetime of the current instance of the given ID value in the trace. * Any relationships that are established after this lifetime ends are invalid. * This call must be performed before the given ID value can be reused for a different * named entity instance. * @param[in] domain The domain controlling the execution of this call. * @param[in] id The ID to destroy. */ void ITTAPI __itt_id_destroy(const __itt_domain *domain, __itt_id id); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, id_destroy, (const __itt_domain *domain, __itt_id id)) #define __itt_id_destroy(d,x) ITTNOTIFY_VOID_D1(id_destroy,d,x) #define __itt_id_destroy_ptr ITTNOTIFY_NAME(id_destroy) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_id_destroy(domain,id) #define __itt_id_destroy_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_id_destroy_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** @} ids group */ /** * @defgroup handless String Handles * @ingroup public * String Handles group * @{ */ /** @cond exclude_from_documentation */ #pragma pack(push, 8) typedef struct ___itt_string_handle { const char* strA; /*!< Copy of original string in ASCII. */ #if defined(UNICODE) || defined(_UNICODE) const wchar_t* strW; /*!< Copy of original string in UNICODE. */ #else /* UNICODE || _UNICODE */ void* strW; #endif /* UNICODE || _UNICODE */ int extra1; /*!< Reserved. Must be zero */ void* extra2; /*!< Reserved. Must be zero */ struct ___itt_string_handle* next; } __itt_string_handle; #pragma pack(pop) /** @endcond */ /** * @ingroup handles * @brief Create a string handle. * Create and return handle value that can be associated with a string. * Consecutive calls to __itt_string_handle_create with the same name * return the same value. Because the set of string handles is expected to remain * static during the application's execution time, there is no mechanism to destroy a string handle. * Any string handle can be accessed by any thread in the process, regardless of which thread created * the string handle. This call is thread-safe. * @param[in] name The input string */ #if ITT_PLATFORM==ITT_PLATFORM_WIN __itt_string_handle* ITTAPI __itt_string_handle_createA(const char *name); __itt_string_handle* ITTAPI __itt_string_handle_createW(const wchar_t *name); #if defined(UNICODE) || defined(_UNICODE) # define __itt_string_handle_create __itt_string_handle_createW # define __itt_string_handle_create_ptr __itt_string_handle_createW_ptr #else /* UNICODE */ # define __itt_string_handle_create __itt_string_handle_createA # define __itt_string_handle_create_ptr __itt_string_handle_createA_ptr #endif /* UNICODE */ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ __itt_string_handle* ITTAPI __itt_string_handle_create(const char *name); #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API #if ITT_PLATFORM==ITT_PLATFORM_WIN ITT_STUB(ITTAPI, __itt_string_handle*, string_handle_createA, (const char *name)) ITT_STUB(ITTAPI, __itt_string_handle*, string_handle_createW, (const wchar_t *name)) #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ ITT_STUB(ITTAPI, __itt_string_handle*, string_handle_create, (const char *name)) #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_string_handle_createA ITTNOTIFY_DATA(string_handle_createA) #define __itt_string_handle_createA_ptr ITTNOTIFY_NAME(string_handle_createA) #define __itt_string_handle_createW ITTNOTIFY_DATA(string_handle_createW) #define __itt_string_handle_createW_ptr ITTNOTIFY_NAME(string_handle_createW) #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_string_handle_create ITTNOTIFY_DATA(string_handle_create) #define __itt_string_handle_create_ptr ITTNOTIFY_NAME(string_handle_create) #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #else /* INTEL_NO_ITTNOTIFY_API */ #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_string_handle_createA(name) (__itt_string_handle*)0 #define __itt_string_handle_createA_ptr 0 #define __itt_string_handle_createW(name) (__itt_string_handle*)0 #define __itt_string_handle_createW_ptr 0 #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_string_handle_create(name) (__itt_string_handle*)0 #define __itt_string_handle_create_ptr 0 #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_string_handle_createA_ptr 0 #define __itt_string_handle_createW_ptr 0 #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_string_handle_create_ptr 0 #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** @} handles group */ /** @cond exclude_from_documentation */ typedef unsigned long long __itt_timestamp; /** @endcond */ #define __itt_timestamp_none ((__itt_timestamp)-1LL) /** @cond exclude_from_gpa_documentation */ /** * @ingroup timestamps * @brief Return timestamp corresponding to the current moment. * This returns the timestamp in the format that is the most relevant for the current * host or platform (RDTSC, QPC, and others). You can use the "<" operator to * compare __itt_timestamp values. */ __itt_timestamp ITTAPI __itt_get_timestamp(void); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUB(ITTAPI, __itt_timestamp, get_timestamp, (void)) #define __itt_get_timestamp ITTNOTIFY_DATA(get_timestamp) #define __itt_get_timestamp_ptr ITTNOTIFY_NAME(get_timestamp) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_get_timestamp() #define __itt_get_timestamp_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_get_timestamp_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** @} timestamps */ /** @endcond */ /** @cond exclude_from_gpa_documentation */ /** * @defgroup regions Regions * @ingroup public * Regions group * @{ */ /** * @ingroup regions * @brief Begin of region instance. * Successive calls to __itt_region_begin with the same ID are ignored * until a call to __itt_region_end with the same ID * @param[in] domain The domain for this region instance * @param[in] id The instance ID for this region instance. Must not be __itt_null * @param[in] parentid The instance ID for the parent of this region instance, or __itt_null * @param[in] name The name of this region */ void ITTAPI __itt_region_begin(const __itt_domain *domain, __itt_id id, __itt_id parentid, __itt_string_handle *name); /** * @ingroup regions * @brief End of region instance. * The first call to __itt_region_end with a given ID ends the * region. Successive calls with the same ID are ignored, as are * calls that do not have a matching __itt_region_begin call. * @param[in] domain The domain for this region instance * @param[in] id The instance ID for this region instance */ void ITTAPI __itt_region_end(const __itt_domain *domain, __itt_id id); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, region_begin, (const __itt_domain *domain, __itt_id id, __itt_id parentid, __itt_string_handle *name)) ITT_STUBV(ITTAPI, void, region_end, (const __itt_domain *domain, __itt_id id)) #define __itt_region_begin(d,x,y,z) ITTNOTIFY_VOID_D3(region_begin,d,x,y,z) #define __itt_region_begin_ptr ITTNOTIFY_NAME(region_begin) #define __itt_region_end(d,x) ITTNOTIFY_VOID_D1(region_end,d,x) #define __itt_region_end_ptr ITTNOTIFY_NAME(region_end) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_region_begin(d,x,y,z) #define __itt_region_begin_ptr 0 #define __itt_region_end(d,x) #define __itt_region_end_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_region_begin_ptr 0 #define __itt_region_end_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** @} regions group */ /** * @defgroup frames Frames * @ingroup public * Frames are similar to regions, but are intended to be easier to use and to implement. * In particular: * - Frames always represent periods of elapsed time * - By default, frames have no nesting relationships * @{ */ /** * @ingroup frames * @brief Begin a frame instance. * Successive calls to __itt_frame_begin with the * same ID are ignored until a call to __itt_frame_end with the same ID. * @param[in] domain The domain for this frame instance * @param[in] id The instance ID for this frame instance or NULL */ void ITTAPI __itt_frame_begin_v3(const __itt_domain *domain, __itt_id *id); /** * @ingroup frames * @brief End a frame instance. * The first call to __itt_frame_end with a given ID * ends the frame. Successive calls with the same ID are ignored, as are * calls that do not have a matching __itt_frame_begin call. * @param[in] domain The domain for this frame instance * @param[in] id The instance ID for this frame instance or NULL for current */ void ITTAPI __itt_frame_end_v3(const __itt_domain *domain, __itt_id *id); /** * @ingroup frames * @brief Submits a frame instance. * Successive calls to __itt_frame_begin or __itt_frame_submit with the * same ID are ignored until a call to __itt_frame_end or __itt_frame_submit * with the same ID. * Passing special __itt_timestamp_none value as "end" argument means * take the current timestamp as the end timestamp. * @param[in] domain The domain for this frame instance * @param[in] id The instance ID for this frame instance or NULL * @param[in] begin Timestamp of the beginning of the frame * @param[in] end Timestamp of the end of the frame */ void ITTAPI __itt_frame_submit_v3(const __itt_domain *domain, __itt_id *id, __itt_timestamp begin, __itt_timestamp end); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, frame_begin_v3, (const __itt_domain *domain, __itt_id *id)) ITT_STUBV(ITTAPI, void, frame_end_v3, (const __itt_domain *domain, __itt_id *id)) ITT_STUBV(ITTAPI, void, frame_submit_v3, (const __itt_domain *domain, __itt_id *id, __itt_timestamp begin, __itt_timestamp end)) #define __itt_frame_begin_v3(d,x) ITTNOTIFY_VOID_D1(frame_begin_v3,d,x) #define __itt_frame_begin_v3_ptr ITTNOTIFY_NAME(frame_begin_v3) #define __itt_frame_end_v3(d,x) ITTNOTIFY_VOID_D1(frame_end_v3,d,x) #define __itt_frame_end_v3_ptr ITTNOTIFY_NAME(frame_end_v3) #define __itt_frame_submit_v3(d,x,b,e) ITTNOTIFY_VOID_D3(frame_submit_v3,d,x,b,e) #define __itt_frame_submit_v3_ptr ITTNOTIFY_NAME(frame_submit_v3) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_frame_begin_v3(domain,id) #define __itt_frame_begin_v3_ptr 0 #define __itt_frame_end_v3(domain,id) #define __itt_frame_end_v3_ptr 0 #define __itt_frame_submit_v3(domain,id,begin,end) #define __itt_frame_submit_v3_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_frame_begin_v3_ptr 0 #define __itt_frame_end_v3_ptr 0 #define __itt_frame_submit_v3_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** @} frames group */ /** @endcond */ /** * @defgroup taskgroup Task Group * @ingroup public * Task Group * @{ */ /** * @ingroup task_groups * @brief Denotes a task_group instance. * Successive calls to __itt_task_group with the same ID are ignored. * @param[in] domain The domain for this task_group instance * @param[in] id The instance ID for this task_group instance. Must not be __itt_null. * @param[in] parentid The instance ID for the parent of this task_group instance, or __itt_null. * @param[in] name The name of this task_group */ void ITTAPI __itt_task_group(const __itt_domain *domain, __itt_id id, __itt_id parentid, __itt_string_handle *name); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, task_group, (const __itt_domain *domain, __itt_id id, __itt_id parentid, __itt_string_handle *name)) #define __itt_task_group(d,x,y,z) ITTNOTIFY_VOID_D3(task_group,d,x,y,z) #define __itt_task_group_ptr ITTNOTIFY_NAME(task_group) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_task_group(d,x,y,z) #define __itt_task_group_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_task_group_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** @} taskgroup group */ /** * @defgroup tasks Tasks * @ingroup public * A task instance represents a piece of work performed by a particular * thread for a period of time. A call to __itt_task_begin creates a * task instance. This becomes the current instance for that task on that * thread. A following call to __itt_task_end on the same thread ends the * instance. There may be multiple simultaneous instances of tasks with the * same name on different threads. If an ID is specified, the task instance * receives that ID. Nested tasks are allowed. * * Note: The task is defined by the bracketing of __itt_task_begin and * __itt_task_end on the same thread. If some scheduling mechanism causes * task switching (the thread executes a different user task) or task * switching (the user task switches to a different thread) then this breaks * the notion of current instance. Additional API calls are required to * deal with that possibility. * @{ */ /** * @ingroup tasks * @brief Begin a task instance. * @param[in] domain The domain for this task * @param[in] taskid The instance ID for this task instance, or __itt_null * @param[in] parentid The parent instance to which this task instance belongs, or __itt_null * @param[in] name The name of this task */ void ITTAPI __itt_task_begin(const __itt_domain *domain, __itt_id taskid, __itt_id parentid, __itt_string_handle *name); /** * @ingroup tasks * @brief Begin a task instance. * @param[in] domain The domain for this task * @param[in] taskid The identifier for this task instance (may be 0) * @param[in] parentid The parent of this task (may be 0) * @param[in] fn The pointer to the function you are tracing */ void ITTAPI __itt_task_begin_fn(const __itt_domain *domain, __itt_id taskid, __itt_id parentid, void* fn); /** * @ingroup tasks * @brief End the current task instance. * @param[in] domain The domain for this task */ void ITTAPI __itt_task_end(const __itt_domain *domain); /** * @ingroup tasks * @brief Begin an overlapped task instance. * @param[in] domain The domain for this task. * @param[in] taskid The identifier for this task instance, *cannot* be __itt_null. * @param[in] parentid The parent of this task, or __itt_null. * @param[in] name The name of this task. */ void ITTAPI __itt_task_begin_overlapped(const __itt_domain* domain, __itt_id taskid, __itt_id parentid, __itt_string_handle* name); /** * @ingroup tasks * @brief End an overlapped task instance. * @param[in] domain The domain for this task * @param[in] taskid Explicit ID of finished task */ void ITTAPI __itt_task_end_overlapped(const __itt_domain *domain, __itt_id taskid); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, task_begin, (const __itt_domain *domain, __itt_id id, __itt_id parentid, __itt_string_handle *name)) ITT_STUBV(ITTAPI, void, task_begin_fn, (const __itt_domain *domain, __itt_id id, __itt_id parentid, void* fn)) ITT_STUBV(ITTAPI, void, task_end, (const __itt_domain *domain)) ITT_STUBV(ITTAPI, void, task_begin_overlapped, (const __itt_domain *domain, __itt_id taskid, __itt_id parentid, __itt_string_handle *name)) ITT_STUBV(ITTAPI, void, task_end_overlapped, (const __itt_domain *domain, __itt_id taskid)) #define __itt_task_begin(d,x,y,z) ITTNOTIFY_VOID_D3(task_begin,d,x,y,z) #define __itt_task_begin_ptr ITTNOTIFY_NAME(task_begin) #define __itt_task_begin_fn(d,x,y,z) ITTNOTIFY_VOID_D3(task_begin_fn,d,x,y,z) #define __itt_task_begin_fn_ptr ITTNOTIFY_NAME(task_begin_fn) #define __itt_task_end(d) ITTNOTIFY_VOID_D0(task_end,d) #define __itt_task_end_ptr ITTNOTIFY_NAME(task_end) #define __itt_task_begin_overlapped(d,x,y,z) ITTNOTIFY_VOID_D3(task_begin_overlapped,d,x,y,z) #define __itt_task_begin_overlapped_ptr ITTNOTIFY_NAME(task_begin_overlapped) #define __itt_task_end_overlapped(d,x) ITTNOTIFY_VOID_D1(task_end_overlapped,d,x) #define __itt_task_end_overlapped_ptr ITTNOTIFY_NAME(task_end_overlapped) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_task_begin(domain,id,parentid,name) #define __itt_task_begin_ptr 0 #define __itt_task_begin_fn(domain,id,parentid,fn) #define __itt_task_begin_fn_ptr 0 #define __itt_task_end(domain) #define __itt_task_end_ptr 0 #define __itt_task_begin_overlapped(domain,taskid,parentid,name) #define __itt_task_begin_overlapped_ptr 0 #define __itt_task_end_overlapped(domain,taskid) #define __itt_task_end_overlapped_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_task_begin_ptr 0 #define __itt_task_begin_fn_ptr 0 #define __itt_task_end_ptr 0 #define __itt_task_begin_overlapped_ptr 0 #define __itt_task_end_overlapped_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** @} tasks group */ /** * @defgroup markers Markers * Markers represent a single discreet event in time. Markers have a scope, * described by an enumerated type __itt_scope. Markers are created by * the API call __itt_marker. A marker instance can be given an ID for use in * adding metadata. * @{ */ /** * @brief Describes the scope of an event object in the trace. */ typedef enum { __itt_scope_unknown = 0, __itt_scope_global, __itt_scope_track_group, __itt_scope_track, __itt_scope_task, __itt_scope_marker } __itt_scope; /** @cond exclude_from_documentation */ #define __itt_marker_scope_unknown __itt_scope_unknown #define __itt_marker_scope_global __itt_scope_global #define __itt_marker_scope_process __itt_scope_track_group #define __itt_marker_scope_thread __itt_scope_track #define __itt_marker_scope_task __itt_scope_task /** @endcond */ /** * @ingroup markers * @brief Create a marker instance * @param[in] domain The domain for this marker * @param[in] id The instance ID for this marker or __itt_null * @param[in] name The name for this marker * @param[in] scope The scope for this marker */ void ITTAPI __itt_marker(const __itt_domain *domain, __itt_id id, __itt_string_handle *name, __itt_scope scope); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, marker, (const __itt_domain *domain, __itt_id id, __itt_string_handle *name, __itt_scope scope)) #define __itt_marker(d,x,y,z) ITTNOTIFY_VOID_D3(marker,d,x,y,z) #define __itt_marker_ptr ITTNOTIFY_NAME(marker) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_marker(domain,id,name,scope) #define __itt_marker_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_marker_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** @} markers group */ /** * @defgroup metadata Metadata * The metadata API is used to attach extra information to named * entities. Metadata can be attached to an identified named entity by ID, * or to the current entity (which is always a task). * * Conceptually metadata has a type (what kind of metadata), a key (the * name of the metadata), and a value (the actual data). The encoding of * the value depends on the type of the metadata. * * The type of metadata is specified by an enumerated type __itt_metdata_type. * @{ */ /** * @ingroup parameters * @brief describes the type of metadata */ typedef enum { __itt_metadata_unknown = 0, __itt_metadata_u64, /**< Unsigned 64-bit integer */ __itt_metadata_s64, /**< Signed 64-bit integer */ __itt_metadata_u32, /**< Unsigned 32-bit integer */ __itt_metadata_s32, /**< Signed 32-bit integer */ __itt_metadata_u16, /**< Unsigned 16-bit integer */ __itt_metadata_s16, /**< Signed 16-bit integer */ __itt_metadata_float, /**< Signed 32-bit floating-point */ __itt_metadata_double /**< SIgned 64-bit floating-point */ } __itt_metadata_type; /** * @ingroup parameters * @brief Add metadata to an instance of a named entity. * @param[in] domain The domain controlling the call * @param[in] id The identifier of the instance to which the metadata is to be added, or __itt_null to add to the current task * @param[in] key The name of the metadata * @param[in] type The type of the metadata * @param[in] count The number of elements of the given type. If count == 0, no metadata will be added. * @param[in] data The metadata itself */ void ITTAPI __itt_metadata_add(const __itt_domain *domain, __itt_id id, __itt_string_handle *key, __itt_metadata_type type, size_t count, void *data); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, metadata_add, (const __itt_domain *domain, __itt_id id, __itt_string_handle *key, __itt_metadata_type type, size_t count, void *data)) #define __itt_metadata_add(d,x,y,z,a,b) ITTNOTIFY_VOID_D5(metadata_add,d,x,y,z,a,b) #define __itt_metadata_add_ptr ITTNOTIFY_NAME(metadata_add) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_metadata_add(d,x,y,z,a,b) #define __itt_metadata_add_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_metadata_add_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @ingroup parameters * @brief Add string metadata to an instance of a named entity. * @param[in] domain The domain controlling the call * @param[in] id The identifier of the instance to which the metadata is to be added, or __itt_null to add to the current task * @param[in] key The name of the metadata * @param[in] data The metadata itself * @param[in] length The number of characters in the string, or -1 if the length is unknown but the string is null-terminated */ #if ITT_PLATFORM==ITT_PLATFORM_WIN void ITTAPI __itt_metadata_str_addA(const __itt_domain *domain, __itt_id id, __itt_string_handle *key, const char *data, size_t length); void ITTAPI __itt_metadata_str_addW(const __itt_domain *domain, __itt_id id, __itt_string_handle *key, const wchar_t *data, size_t length); #if defined(UNICODE) || defined(_UNICODE) # define __itt_metadata_str_add __itt_metadata_str_addW # define __itt_metadata_str_add_ptr __itt_metadata_str_addW_ptr #else /* UNICODE */ # define __itt_metadata_str_add __itt_metadata_str_addA # define __itt_metadata_str_add_ptr __itt_metadata_str_addA_ptr #endif /* UNICODE */ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ void ITTAPI __itt_metadata_str_add(const __itt_domain *domain, __itt_id id, __itt_string_handle *key, const char *data, size_t length); #endif /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API #if ITT_PLATFORM==ITT_PLATFORM_WIN ITT_STUBV(ITTAPI, void, metadata_str_addA, (const __itt_domain *domain, __itt_id id, __itt_string_handle *key, const char *data, size_t length)) ITT_STUBV(ITTAPI, void, metadata_str_addW, (const __itt_domain *domain, __itt_id id, __itt_string_handle *key, const wchar_t *data, size_t length)) #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ ITT_STUBV(ITTAPI, void, metadata_str_add, (const __itt_domain *domain, __itt_id id, __itt_string_handle *key, const char *data, size_t length)) #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_metadata_str_addA(d,x,y,z,a) ITTNOTIFY_VOID_D4(metadata_str_addA,d,x,y,z,a) #define __itt_metadata_str_addA_ptr ITTNOTIFY_NAME(metadata_str_addA) #define __itt_metadata_str_addW(d,x,y,z,a) ITTNOTIFY_VOID_D4(metadata_str_addW,d,x,y,z,a) #define __itt_metadata_str_addW_ptr ITTNOTIFY_NAME(metadata_str_addW) #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_metadata_str_add(d,x,y,z,a) ITTNOTIFY_VOID_D4(metadata_str_add,d,x,y,z,a) #define __itt_metadata_str_add_ptr ITTNOTIFY_NAME(metadata_str_add) #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #else /* INTEL_NO_ITTNOTIFY_API */ #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_metadata_str_addA(d,x,y,z,a) #define __itt_metadata_str_addA_ptr 0 #define __itt_metadata_str_addW(d,x,y,z,a) #define __itt_metadata_str_addW_ptr 0 #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_metadata_str_add(d,x,y,z,a) #define __itt_metadata_str_add_ptr 0 #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_metadata_str_addA_ptr 0 #define __itt_metadata_str_addW_ptr 0 #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_metadata_str_add_ptr 0 #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @ingroup parameters * @brief Add metadata to an instance of a named entity. * @param[in] domain The domain controlling the call * @param[in] scope The scope of the instance to which the metadata is to be added * @param[in] id The identifier of the instance to which the metadata is to be added, or __itt_null to add to the current task * @param[in] key The name of the metadata * @param[in] type The type of the metadata * @param[in] count The number of elements of the given type. If count == 0, no metadata will be added. * @param[in] data The metadata itself */ void ITTAPI __itt_metadata_add_with_scope(const __itt_domain *domain, __itt_scope scope, __itt_string_handle *key, __itt_metadata_type type, size_t count, void *data); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, metadata_add_with_scope, (const __itt_domain *domain, __itt_scope scope, __itt_string_handle *key, __itt_metadata_type type, size_t count, void *data)) #define __itt_metadata_add_with_scope(d,x,y,z,a,b) ITTNOTIFY_VOID_D5(metadata_add_with_scope,d,x,y,z,a,b) #define __itt_metadata_add_with_scope_ptr ITTNOTIFY_NAME(metadata_add_with_scope) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_metadata_add_with_scope(d,x,y,z,a,b) #define __itt_metadata_add_with_scope_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_metadata_add_with_scope_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @ingroup parameters * @brief Add string metadata to an instance of a named entity. * @param[in] domain The domain controlling the call * @param[in] scope The scope of the instance to which the metadata is to be added * @param[in] id The identifier of the instance to which the metadata is to be added, or __itt_null to add to the current task * @param[in] key The name of the metadata * @param[in] data The metadata itself * @param[in] length The number of characters in the string, or -1 if the length is unknown but the string is null-terminated */ #if ITT_PLATFORM==ITT_PLATFORM_WIN void ITTAPI __itt_metadata_str_add_with_scopeA(const __itt_domain *domain, __itt_scope scope, __itt_string_handle *key, const char *data, size_t length); void ITTAPI __itt_metadata_str_add_with_scopeW(const __itt_domain *domain, __itt_scope scope, __itt_string_handle *key, const wchar_t *data, size_t length); #if defined(UNICODE) || defined(_UNICODE) # define __itt_metadata_str_add_with_scope __itt_metadata_str_add_with_scopeW # define __itt_metadata_str_add_with_scope_ptr __itt_metadata_str_add_with_scopeW_ptr #else /* UNICODE */ # define __itt_metadata_str_add_with_scope __itt_metadata_str_add_with_scopeA # define __itt_metadata_str_add_with_scope_ptr __itt_metadata_str_add_with_scopeA_ptr #endif /* UNICODE */ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ void ITTAPI __itt_metadata_str_add_with_scope(const __itt_domain *domain, __itt_scope scope, __itt_string_handle *key, const char *data, size_t length); #endif /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API #if ITT_PLATFORM==ITT_PLATFORM_WIN ITT_STUBV(ITTAPI, void, metadata_str_add_with_scopeA, (const __itt_domain *domain, __itt_scope scope, __itt_string_handle *key, const char *data, size_t length)) ITT_STUBV(ITTAPI, void, metadata_str_add_with_scopeW, (const __itt_domain *domain, __itt_scope scope, __itt_string_handle *key, const wchar_t *data, size_t length)) #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ ITT_STUBV(ITTAPI, void, metadata_str_add_with_scope, (const __itt_domain *domain, __itt_scope scope, __itt_string_handle *key, const char *data, size_t length)) #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_metadata_str_add_with_scopeA(d,x,y,z,a) ITTNOTIFY_VOID_D4(metadata_str_add_with_scopeA,d,x,y,z,a) #define __itt_metadata_str_add_with_scopeA_ptr ITTNOTIFY_NAME(metadata_str_add_with_scopeA) #define __itt_metadata_str_add_with_scopeW(d,x,y,z,a) ITTNOTIFY_VOID_D4(metadata_str_add_with_scopeW,d,x,y,z,a) #define __itt_metadata_str_add_with_scopeW_ptr ITTNOTIFY_NAME(metadata_str_add_with_scopeW) #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_metadata_str_add_with_scope(d,x,y,z,a) ITTNOTIFY_VOID_D4(metadata_str_add_with_scope,d,x,y,z,a) #define __itt_metadata_str_add_with_scope_ptr ITTNOTIFY_NAME(metadata_str_add_with_scope) #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #else /* INTEL_NO_ITTNOTIFY_API */ #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_metadata_str_add_with_scopeA(d,x,y,z,a) #define __itt_metadata_str_add_with_scopeA_ptr 0 #define __itt_metadata_str_add_with_scopeW(d,x,y,z,a) #define __itt_metadata_str_add_with_scopeW_ptr 0 #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_metadata_str_add_with_scope(d,x,y,z,a) #define __itt_metadata_str_add_with_scope_ptr 0 #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_metadata_str_add_with_scopeA_ptr 0 #define __itt_metadata_str_add_with_scopeW_ptr 0 #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_metadata_str_add_with_scope_ptr 0 #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** @} metadata group */ /** * @defgroup relations Relations * Instances of named entities can be explicitly associated with other * instances using instance IDs and the relationship API calls. * * @{ */ /** * @ingroup relations * @brief The kind of relation between two instances is specified by the enumerated type __itt_relation. * Relations between instances can be added with an API call. The relation * API uses instance IDs. Relations can be added before or after the actual * instances are created and persist independently of the instances. This * is the motivation for having different lifetimes for instance IDs and * the actual instances. */ typedef enum { __itt_relation_is_unknown = 0, __itt_relation_is_dependent_on, /**< "A is dependent on B" means that A cannot start until B completes */ __itt_relation_is_sibling_of, /**< "A is sibling of B" means that A and B were created as a group */ __itt_relation_is_parent_of, /**< "A is parent of B" means that A created B */ __itt_relation_is_continuation_of, /**< "A is continuation of B" means that A assumes the dependencies of B */ __itt_relation_is_child_of, /**< "A is child of B" means that A was created by B (inverse of is_parent_of) */ __itt_relation_is_continued_by, /**< "A is continued by B" means that B assumes the dependencies of A (inverse of is_continuation_of) */ __itt_relation_is_predecessor_to /**< "A is predecessor to B" means that B cannot start until A completes (inverse of is_dependent_on) */ } __itt_relation; /** * @ingroup relations * @brief Add a relation to the current task instance. * The current task instance is the head of the relation. * @param[in] domain The domain controlling this call * @param[in] relation The kind of relation * @param[in] tail The ID for the tail of the relation */ void ITTAPI __itt_relation_add_to_current(const __itt_domain *domain, __itt_relation relation, __itt_id tail); /** * @ingroup relations * @brief Add a relation between two instance identifiers. * @param[in] domain The domain controlling this call * @param[in] head The ID for the head of the relation * @param[in] relation The kind of relation * @param[in] tail The ID for the tail of the relation */ void ITTAPI __itt_relation_add(const __itt_domain *domain, __itt_id head, __itt_relation relation, __itt_id tail); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, relation_add_to_current, (const __itt_domain *domain, __itt_relation relation, __itt_id tail)) ITT_STUBV(ITTAPI, void, relation_add, (const __itt_domain *domain, __itt_id head, __itt_relation relation, __itt_id tail)) #define __itt_relation_add_to_current(d,x,y) ITTNOTIFY_VOID_D2(relation_add_to_current,d,x,y) #define __itt_relation_add_to_current_ptr ITTNOTIFY_NAME(relation_add_to_current) #define __itt_relation_add(d,x,y,z) ITTNOTIFY_VOID_D3(relation_add,d,x,y,z) #define __itt_relation_add_ptr ITTNOTIFY_NAME(relation_add) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_relation_add_to_current(d,x,y) #define __itt_relation_add_to_current_ptr 0 #define __itt_relation_add(d,x,y,z) #define __itt_relation_add_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_relation_add_to_current_ptr 0 #define __itt_relation_add_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** @} relations group */ /** @cond exclude_from_documentation */ #pragma pack(push, 8) typedef struct ___itt_clock_info { unsigned long long clock_freq; /*!< Clock domain frequency */ unsigned long long clock_base; /*!< Clock domain base timestamp */ } __itt_clock_info; #pragma pack(pop) /** @endcond */ /** @cond exclude_from_documentation */ typedef void (ITTAPI *__itt_get_clock_info_fn)(__itt_clock_info* clock_info, void* data); /** @endcond */ /** @cond exclude_from_documentation */ #pragma pack(push, 8) typedef struct ___itt_clock_domain { __itt_clock_info info; /*!< Most recent clock domain info */ __itt_get_clock_info_fn fn; /*!< Callback function pointer */ void* fn_data; /*!< Input argument for the callback function */ int extra1; /*!< Reserved. Must be zero */ void* extra2; /*!< Reserved. Must be zero */ struct ___itt_clock_domain* next; } __itt_clock_domain; #pragma pack(pop) /** @endcond */ /** * @ingroup clockdomains * @brief Create a clock domain. * Certain applications require the capability to trace their application using * a clock domain different than the CPU, for instance the instrumentation of events * that occur on a GPU. * Because the set of domains is expected to be static over the application's execution time, * there is no mechanism to destroy a domain. * Any domain can be accessed by any thread in the process, regardless of which thread created * the domain. This call is thread-safe. * @param[in] fn A pointer to a callback function which retrieves alternative CPU timestamps * @param[in] fn_data Argument for a callback function; may be NULL */ __itt_clock_domain* ITTAPI __itt_clock_domain_create(__itt_get_clock_info_fn fn, void* fn_data); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUB(ITTAPI, __itt_clock_domain*, clock_domain_create, (__itt_get_clock_info_fn fn, void* fn_data)) #define __itt_clock_domain_create ITTNOTIFY_DATA(clock_domain_create) #define __itt_clock_domain_create_ptr ITTNOTIFY_NAME(clock_domain_create) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_clock_domain_create(fn,fn_data) (__itt_clock_domain*)0 #define __itt_clock_domain_create_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_clock_domain_create_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @ingroup clockdomains * @brief Recalculate clock domains frequencies and clock base timestamps. */ void ITTAPI __itt_clock_domain_reset(void); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, clock_domain_reset, (void)) #define __itt_clock_domain_reset ITTNOTIFY_VOID(clock_domain_reset) #define __itt_clock_domain_reset_ptr ITTNOTIFY_NAME(clock_domain_reset) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_clock_domain_reset() #define __itt_clock_domain_reset_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_clock_domain_reset_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @ingroup clockdomain * @brief Create an instance of identifier. This establishes the beginning of the lifetime of * an instance of the given ID in the trace. Once this lifetime starts, the ID can be used to * tag named entity instances in calls such as __itt_task_begin, and to specify relationships among * identified named entity instances, using the \ref relations APIs. * @param[in] domain The domain controlling the execution of this call. * @param[in] clock_domain The clock domain controlling the execution of this call. * @param[in] timestamp The user defined timestamp. * @param[in] id The ID to create. */ void ITTAPI __itt_id_create_ex(const __itt_domain* domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id id); /** * @ingroup clockdomain * @brief Destroy an instance of identifier. This ends the lifetime of the current instance of the * given ID value in the trace. Any relationships that are established after this lifetime ends are * invalid. This call must be performed before the given ID value can be reused for a different * named entity instance. * @param[in] domain The domain controlling the execution of this call. * @param[in] clock_domain The clock domain controlling the execution of this call. * @param[in] timestamp The user defined timestamp. * @param[in] id The ID to destroy. */ void ITTAPI __itt_id_destroy_ex(const __itt_domain* domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id id); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, id_create_ex, (const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id id)) ITT_STUBV(ITTAPI, void, id_destroy_ex, (const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id id)) #define __itt_id_create_ex(d,x,y,z) ITTNOTIFY_VOID_D3(id_create_ex,d,x,y,z) #define __itt_id_create_ex_ptr ITTNOTIFY_NAME(id_create_ex) #define __itt_id_destroy_ex(d,x,y,z) ITTNOTIFY_VOID_D3(id_destroy_ex,d,x,y,z) #define __itt_id_destroy_ex_ptr ITTNOTIFY_NAME(id_destroy_ex) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_id_create_ex(domain,clock_domain,timestamp,id) #define __itt_id_create_ex_ptr 0 #define __itt_id_destroy_ex(domain,clock_domain,timestamp,id) #define __itt_id_destroy_ex_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_id_create_ex_ptr 0 #define __itt_id_destroy_ex_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @ingroup clockdomain * @brief Begin a task instance. * @param[in] domain The domain for this task * @param[in] clock_domain The clock domain controlling the execution of this call. * @param[in] timestamp The user defined timestamp. * @param[in] taskid The instance ID for this task instance, or __itt_null * @param[in] parentid The parent instance to which this task instance belongs, or __itt_null * @param[in] name The name of this task */ void ITTAPI __itt_task_begin_ex(const __itt_domain* domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id taskid, __itt_id parentid, __itt_string_handle* name); /** * @ingroup clockdomain * @brief Begin a task instance. * @param[in] domain The domain for this task * @param[in] clock_domain The clock domain controlling the execution of this call. * @param[in] timestamp The user defined timestamp. * @param[in] taskid The identifier for this task instance, or __itt_null * @param[in] parentid The parent of this task, or __itt_null * @param[in] fn The pointer to the function you are tracing */ void ITTAPI __itt_task_begin_fn_ex(const __itt_domain* domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id taskid, __itt_id parentid, void* fn); /** * @ingroup clockdomain * @brief End the current task instance. * @param[in] domain The domain for this task * @param[in] clock_domain The clock domain controlling the execution of this call. * @param[in] timestamp The user defined timestamp. */ void ITTAPI __itt_task_end_ex(const __itt_domain* domain, __itt_clock_domain* clock_domain, unsigned long long timestamp); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, task_begin_ex, (const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id id, __itt_id parentid, __itt_string_handle *name)) ITT_STUBV(ITTAPI, void, task_begin_fn_ex, (const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id id, __itt_id parentid, void* fn)) ITT_STUBV(ITTAPI, void, task_end_ex, (const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp)) #define __itt_task_begin_ex(d,x,y,z,a,b) ITTNOTIFY_VOID_D5(task_begin_ex,d,x,y,z,a,b) #define __itt_task_begin_ex_ptr ITTNOTIFY_NAME(task_begin_ex) #define __itt_task_begin_fn_ex(d,x,y,z,a,b) ITTNOTIFY_VOID_D5(task_begin_fn_ex,d,x,y,z,a,b) #define __itt_task_begin_fn_ex_ptr ITTNOTIFY_NAME(task_begin_fn_ex) #define __itt_task_end_ex(d,x,y) ITTNOTIFY_VOID_D2(task_end_ex,d,x,y) #define __itt_task_end_ex_ptr ITTNOTIFY_NAME(task_end_ex) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_task_begin_ex(domain,clock_domain,timestamp,id,parentid,name) #define __itt_task_begin_ex_ptr 0 #define __itt_task_begin_fn_ex(domain,clock_domain,timestamp,id,parentid,fn) #define __itt_task_begin_fn_ex_ptr 0 #define __itt_task_end_ex(domain,clock_domain,timestamp) #define __itt_task_end_ex_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_task_begin_ex_ptr 0 #define __itt_task_begin_fn_ex_ptr 0 #define __itt_task_end_ex_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @defgroup counters Counters * @ingroup public * Counters are user-defined objects with a monotonically increasing * value. Counter values are 64-bit unsigned integers. * Counters have names that can be displayed in * the tools. * @{ */ /** * @brief opaque structure for counter identification */ /** @cond exclude_from_documentation */ typedef struct ___itt_counter* __itt_counter; /** * @brief Create an unsigned 64 bits integer counter with given name/domain * * After __itt_counter_create() is called, __itt_counter_inc(id), __itt_counter_inc_delta(id, delta), * __itt_counter_set_value(id, value_ptr) or __itt_counter_set_value_ex(id, clock_domain, timestamp, value_ptr) * can be used to change the value of the counter, where value_ptr is a pointer to an unsigned 64 bits integer * * The call is equal to __itt_counter_create_typed(name, domain, __itt_metadata_u64) */ #if ITT_PLATFORM==ITT_PLATFORM_WIN __itt_counter ITTAPI __itt_counter_createA(const char *name, const char *domain); __itt_counter ITTAPI __itt_counter_createW(const wchar_t *name, const wchar_t *domain); #if defined(UNICODE) || defined(_UNICODE) # define __itt_counter_create __itt_counter_createW # define __itt_counter_create_ptr __itt_counter_createW_ptr #else /* UNICODE */ # define __itt_counter_create __itt_counter_createA # define __itt_counter_create_ptr __itt_counter_createA_ptr #endif /* UNICODE */ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ __itt_counter ITTAPI __itt_counter_create(const char *name, const char *domain); #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API #if ITT_PLATFORM==ITT_PLATFORM_WIN ITT_STUB(ITTAPI, __itt_counter, counter_createA, (const char *name, const char *domain)) ITT_STUB(ITTAPI, __itt_counter, counter_createW, (const wchar_t *name, const wchar_t *domain)) #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ ITT_STUB(ITTAPI, __itt_counter, counter_create, (const char *name, const char *domain)) #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_counter_createA ITTNOTIFY_DATA(counter_createA) #define __itt_counter_createA_ptr ITTNOTIFY_NAME(counter_createA) #define __itt_counter_createW ITTNOTIFY_DATA(counter_createW) #define __itt_counter_createW_ptr ITTNOTIFY_NAME(counter_createW) #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_counter_create ITTNOTIFY_DATA(counter_create) #define __itt_counter_create_ptr ITTNOTIFY_NAME(counter_create) #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #else /* INTEL_NO_ITTNOTIFY_API */ #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_counter_createA(name, domain) #define __itt_counter_createA_ptr 0 #define __itt_counter_createW(name, domain) #define __itt_counter_createW_ptr 0 #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_counter_create(name, domain) #define __itt_counter_create_ptr 0 #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_counter_createA_ptr 0 #define __itt_counter_createW_ptr 0 #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_counter_create_ptr 0 #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @brief Increment the unsigned 64 bits integer counter value * * Calling this function to non-unsigned 64 bits integer counters has no effect */ void ITTAPI __itt_counter_inc(__itt_counter id); #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, counter_inc, (__itt_counter id)) #define __itt_counter_inc ITTNOTIFY_VOID(counter_inc) #define __itt_counter_inc_ptr ITTNOTIFY_NAME(counter_inc) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_counter_inc(id) #define __itt_counter_inc_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_counter_inc_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @brief Increment the unsigned 64 bits integer counter value with x * * Calling this function to non-unsigned 64 bits integer counters has no effect */ void ITTAPI __itt_counter_inc_delta(__itt_counter id, unsigned long long value); #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, counter_inc_delta, (__itt_counter id, unsigned long long value)) #define __itt_counter_inc_delta ITTNOTIFY_VOID(counter_inc_delta) #define __itt_counter_inc_delta_ptr ITTNOTIFY_NAME(counter_inc_delta) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_counter_inc_delta(id, value) #define __itt_counter_inc_delta_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_counter_inc_delta_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @brief Decrement the unsigned 64 bits integer counter value * * Calling this function to non-unsigned 64 bits integer counters has no effect */ void ITTAPI __itt_counter_dec(__itt_counter id); #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, counter_dec, (__itt_counter id)) #define __itt_counter_dec ITTNOTIFY_VOID(counter_dec) #define __itt_counter_dec_ptr ITTNOTIFY_NAME(counter_dec) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_counter_dec(id) #define __itt_counter_dec_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_counter_dec_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @brief Decrement the unsigned 64 bits integer counter value with x * * Calling this function to non-unsigned 64 bits integer counters has no effect */ void ITTAPI __itt_counter_dec_delta(__itt_counter id, unsigned long long value); #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, counter_dec_delta, (__itt_counter id, unsigned long long value)) #define __itt_counter_dec_delta ITTNOTIFY_VOID(counter_dec_delta) #define __itt_counter_dec_delta_ptr ITTNOTIFY_NAME(counter_dec_delta) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_counter_dec_delta(id, value) #define __itt_counter_dec_delta_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_counter_dec_delta_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @ingroup counters * @brief Increment a counter by one. * The first call with a given name creates a counter by that name and sets its * value to zero. Successive calls increment the counter value. * @param[in] domain The domain controlling the call. Counter names are not domain specific. * The domain argument is used only to enable or disable the API calls. * @param[in] name The name of the counter */ void ITTAPI __itt_counter_inc_v3(const __itt_domain *domain, __itt_string_handle *name); /** * @ingroup counters * @brief Increment a counter by the value specified in delta. * @param[in] domain The domain controlling the call. Counter names are not domain specific. * The domain argument is used only to enable or disable the API calls. * @param[in] name The name of the counter * @param[in] delta The amount by which to increment the counter */ void ITTAPI __itt_counter_inc_delta_v3(const __itt_domain *domain, __itt_string_handle *name, unsigned long long delta); #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, counter_inc_v3, (const __itt_domain *domain, __itt_string_handle *name)) ITT_STUBV(ITTAPI, void, counter_inc_delta_v3, (const __itt_domain *domain, __itt_string_handle *name, unsigned long long delta)) #define __itt_counter_inc_v3(d,x) ITTNOTIFY_VOID_D1(counter_inc_v3,d,x) #define __itt_counter_inc_v3_ptr ITTNOTIFY_NAME(counter_inc_v3) #define __itt_counter_inc_delta_v3(d,x,y) ITTNOTIFY_VOID_D2(counter_inc_delta_v3,d,x,y) #define __itt_counter_inc_delta_v3_ptr ITTNOTIFY_NAME(counter_inc_delta_v3) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_counter_inc_v3(domain,name) #define __itt_counter_inc_v3_ptr 0 #define __itt_counter_inc_delta_v3(domain,name,delta) #define __itt_counter_inc_delta_v3_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_counter_inc_v3_ptr 0 #define __itt_counter_inc_delta_v3_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @ingroup counters * @brief Decrement a counter by one. * The first call with a given name creates a counter by that name and sets its * value to zero. Successive calls decrement the counter value. * @param[in] domain The domain controlling the call. Counter names are not domain specific. * The domain argument is used only to enable or disable the API calls. * @param[in] name The name of the counter */ void ITTAPI __itt_counter_dec_v3(const __itt_domain *domain, __itt_string_handle *name); /** * @ingroup counters * @brief Decrement a counter by the value specified in delta. * @param[in] domain The domain controlling the call. Counter names are not domain specific. * The domain argument is used only to enable or disable the API calls. * @param[in] name The name of the counter * @param[in] delta The amount by which to decrement the counter */ void ITTAPI __itt_counter_dec_delta_v3(const __itt_domain *domain, __itt_string_handle *name, unsigned long long delta); #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, counter_dec_v3, (const __itt_domain *domain, __itt_string_handle *name)) ITT_STUBV(ITTAPI, void, counter_dec_delta_v3, (const __itt_domain *domain, __itt_string_handle *name, unsigned long long delta)) #define __itt_counter_dec_v3(d,x) ITTNOTIFY_VOID_D1(counter_dec_v3,d,x) #define __itt_counter_dec_v3_ptr ITTNOTIFY_NAME(counter_dec_v3) #define __itt_counter_dec_delta_v3(d,x,y) ITTNOTIFY_VOID_D2(counter_dec_delta_v3,d,x,y) #define __itt_counter_dec_delta_v3_ptr ITTNOTIFY_NAME(counter_dec_delta_v3) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_counter_dec_v3(domain,name) #define __itt_counter_dec_v3_ptr 0 #define __itt_counter_dec_delta_v3(domain,name,delta) #define __itt_counter_dec_delta_v3_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_counter_dec_v3_ptr 0 #define __itt_counter_dec_delta_v3_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** @} counters group */ /** * @brief Set the counter value */ void ITTAPI __itt_counter_set_value(__itt_counter id, void *value_ptr); #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, counter_set_value, (__itt_counter id, void *value_ptr)) #define __itt_counter_set_value ITTNOTIFY_VOID(counter_set_value) #define __itt_counter_set_value_ptr ITTNOTIFY_NAME(counter_set_value) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_counter_set_value(id, value_ptr) #define __itt_counter_set_value_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_counter_set_value_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @brief Set the counter value */ void ITTAPI __itt_counter_set_value_ex(__itt_counter id, __itt_clock_domain *clock_domain, unsigned long long timestamp, void *value_ptr); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, counter_set_value_ex, (__itt_counter id, __itt_clock_domain *clock_domain, unsigned long long timestamp, void *value_ptr)) #define __itt_counter_set_value_ex ITTNOTIFY_VOID(counter_set_value_ex) #define __itt_counter_set_value_ex_ptr ITTNOTIFY_NAME(counter_set_value_ex) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_counter_set_value_ex(id, clock_domain, timestamp, value_ptr) #define __itt_counter_set_value_ex_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_counter_set_value_ex_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @brief Create a typed counter with given name/domain * * After __itt_counter_create_typed() is called, __itt_counter_inc(id), __itt_counter_inc_delta(id, delta), * __itt_counter_set_value(id, value_ptr) or __itt_counter_set_value_ex(id, clock_domain, timestamp, value_ptr) * can be used to change the value of the counter */ #if ITT_PLATFORM==ITT_PLATFORM_WIN __itt_counter ITTAPI __itt_counter_create_typedA(const char *name, const char *domain, __itt_metadata_type type); __itt_counter ITTAPI __itt_counter_create_typedW(const wchar_t *name, const wchar_t *domain, __itt_metadata_type type); #if defined(UNICODE) || defined(_UNICODE) # define __itt_counter_create_typed __itt_counter_create_typedW # define __itt_counter_create_typed_ptr __itt_counter_create_typedW_ptr #else /* UNICODE */ # define __itt_counter_create_typed __itt_counter_create_typedA # define __itt_counter_create_typed_ptr __itt_counter_create_typedA_ptr #endif /* UNICODE */ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ __itt_counter ITTAPI __itt_counter_create_typed(const char *name, const char *domain, __itt_metadata_type type); #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API #if ITT_PLATFORM==ITT_PLATFORM_WIN ITT_STUB(ITTAPI, __itt_counter, counter_create_typedA, (const char *name, const char *domain, __itt_metadata_type type)) ITT_STUB(ITTAPI, __itt_counter, counter_create_typedW, (const wchar_t *name, const wchar_t *domain, __itt_metadata_type type)) #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ ITT_STUB(ITTAPI, __itt_counter, counter_create_typed, (const char *name, const char *domain, __itt_metadata_type type)) #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_counter_create_typedA ITTNOTIFY_DATA(counter_create_typedA) #define __itt_counter_create_typedA_ptr ITTNOTIFY_NAME(counter_create_typedA) #define __itt_counter_create_typedW ITTNOTIFY_DATA(counter_create_typedW) #define __itt_counter_create_typedW_ptr ITTNOTIFY_NAME(counter_create_typedW) #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_counter_create_typed ITTNOTIFY_DATA(counter_create_typed) #define __itt_counter_create_typed_ptr ITTNOTIFY_NAME(counter_create_typed) #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #else /* INTEL_NO_ITTNOTIFY_API */ #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_counter_create_typedA(name, domain, type) #define __itt_counter_create_typedA_ptr 0 #define __itt_counter_create_typedW(name, domain, type) #define __itt_counter_create_typedW_ptr 0 #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_counter_create_typed(name, domain, type) #define __itt_counter_create_typed_ptr 0 #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_counter_create_typedA_ptr 0 #define __itt_counter_create_typedW_ptr 0 #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_counter_create_typed_ptr 0 #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @brief Destroy the counter identified by the pointer previously returned by __itt_counter_create() or * __itt_counter_create_typed() */ void ITTAPI __itt_counter_destroy(__itt_counter id); #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, counter_destroy, (__itt_counter id)) #define __itt_counter_destroy ITTNOTIFY_VOID(counter_destroy) #define __itt_counter_destroy_ptr ITTNOTIFY_NAME(counter_destroy) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_counter_destroy(id) #define __itt_counter_destroy_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_counter_destroy_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** @} counters group */ /** * @ingroup markers * @brief Create a marker instance. * @param[in] domain The domain for this marker * @param[in] clock_domain The clock domain controlling the execution of this call. * @param[in] timestamp The user defined timestamp. * @param[in] id The instance ID for this marker, or __itt_null * @param[in] name The name for this marker * @param[in] scope The scope for this marker */ void ITTAPI __itt_marker_ex(const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id id, __itt_string_handle *name, __itt_scope scope); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, marker_ex, (const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id id, __itt_string_handle *name, __itt_scope scope)) #define __itt_marker_ex(d,x,y,z,a,b) ITTNOTIFY_VOID_D5(marker_ex,d,x,y,z,a,b) #define __itt_marker_ex_ptr ITTNOTIFY_NAME(marker_ex) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_marker_ex(domain,clock_domain,timestamp,id,name,scope) #define __itt_marker_ex_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_marker_ex_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @ingroup clockdomain * @brief Add a relation to the current task instance. * The current task instance is the head of the relation. * @param[in] domain The domain controlling this call * @param[in] clock_domain The clock domain controlling the execution of this call. * @param[in] timestamp The user defined timestamp. * @param[in] relation The kind of relation * @param[in] tail The ID for the tail of the relation */ void ITTAPI __itt_relation_add_to_current_ex(const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_relation relation, __itt_id tail); /** * @ingroup clockdomain * @brief Add a relation between two instance identifiers. * @param[in] domain The domain controlling this call * @param[in] clock_domain The clock domain controlling the execution of this call. * @param[in] timestamp The user defined timestamp. * @param[in] head The ID for the head of the relation * @param[in] relation The kind of relation * @param[in] tail The ID for the tail of the relation */ void ITTAPI __itt_relation_add_ex(const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id head, __itt_relation relation, __itt_id tail); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, relation_add_to_current_ex, (const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_relation relation, __itt_id tail)) ITT_STUBV(ITTAPI, void, relation_add_ex, (const __itt_domain *domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id head, __itt_relation relation, __itt_id tail)) #define __itt_relation_add_to_current_ex(d,x,y,z,a) ITTNOTIFY_VOID_D4(relation_add_to_current_ex,d,x,y,z,a) #define __itt_relation_add_to_current_ex_ptr ITTNOTIFY_NAME(relation_add_to_current_ex) #define __itt_relation_add_ex(d,x,y,z,a,b) ITTNOTIFY_VOID_D5(relation_add_ex,d,x,y,z,a,b) #define __itt_relation_add_ex_ptr ITTNOTIFY_NAME(relation_add_ex) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_relation_add_to_current_ex(domain,clock_domain,timestame,relation,tail) #define __itt_relation_add_to_current_ex_ptr 0 #define __itt_relation_add_ex(domain,clock_domain,timestamp,head,relation,tail) #define __itt_relation_add_ex_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_relation_add_to_current_ex_ptr 0 #define __itt_relation_add_ex_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** @cond exclude_from_documentation */ typedef enum ___itt_track_group_type { __itt_track_group_type_normal = 0 } __itt_track_group_type; /** @endcond */ /** @cond exclude_from_documentation */ #pragma pack(push, 8) typedef struct ___itt_track_group { __itt_string_handle* name; /*!< Name of the track group */ struct ___itt_track* track; /*!< List of child tracks */ __itt_track_group_type tgtype; /*!< Type of the track group */ int extra1; /*!< Reserved. Must be zero */ void* extra2; /*!< Reserved. Must be zero */ struct ___itt_track_group* next; } __itt_track_group; #pragma pack(pop) /** @endcond */ /** * @brief Placeholder for custom track types. Currently, "normal" custom track * is the only available track type. */ typedef enum ___itt_track_type { __itt_track_type_normal = 0 #ifdef INTEL_ITTNOTIFY_API_PRIVATE , __itt_track_type_queue #endif /* INTEL_ITTNOTIFY_API_PRIVATE */ } __itt_track_type; /** @cond exclude_from_documentation */ #pragma pack(push, 8) typedef struct ___itt_track { __itt_string_handle* name; /*!< Name of the track group */ __itt_track_group* group; /*!< Parent group to a track */ __itt_track_type ttype; /*!< Type of the track */ int extra1; /*!< Reserved. Must be zero */ void* extra2; /*!< Reserved. Must be zero */ struct ___itt_track* next; } __itt_track; #pragma pack(pop) /** @endcond */ /** * @brief Create logical track group. */ __itt_track_group* ITTAPI __itt_track_group_create(__itt_string_handle* name, __itt_track_group_type track_group_type); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUB(ITTAPI, __itt_track_group*, track_group_create, (__itt_string_handle* name, __itt_track_group_type track_group_type)) #define __itt_track_group_create ITTNOTIFY_DATA(track_group_create) #define __itt_track_group_create_ptr ITTNOTIFY_NAME(track_group_create) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_track_group_create(name) (__itt_track_group*)0 #define __itt_track_group_create_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_track_group_create_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @brief Create logical track. */ __itt_track* ITTAPI __itt_track_create(__itt_track_group* track_group, __itt_string_handle* name, __itt_track_type track_type); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUB(ITTAPI, __itt_track*, track_create, (__itt_track_group* track_group,__itt_string_handle* name, __itt_track_type track_type)) #define __itt_track_create ITTNOTIFY_DATA(track_create) #define __itt_track_create_ptr ITTNOTIFY_NAME(track_create) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_track_create(track_group,name,track_type) (__itt_track*)0 #define __itt_track_create_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_track_create_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @brief Set the logical track. */ void ITTAPI __itt_set_track(__itt_track* track); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, set_track, (__itt_track *track)) #define __itt_set_track ITTNOTIFY_VOID(set_track) #define __itt_set_track_ptr ITTNOTIFY_NAME(set_track) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_set_track(track) #define __itt_set_track_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_set_track_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /* ========================================================================== */ /** @cond exclude_from_gpa_documentation */ /** * @defgroup events Events * @ingroup public * Events group * @{ */ /** @brief user event type */ typedef int __itt_event; /** * @brief Create an event notification * @note name or namelen being null/name and namelen not matching, user event feature not enabled * @return non-zero event identifier upon success and __itt_err otherwise */ #if ITT_PLATFORM==ITT_PLATFORM_WIN __itt_event LIBITTAPI __itt_event_createA(const char *name, int namelen); __itt_event LIBITTAPI __itt_event_createW(const wchar_t *name, int namelen); #if defined(UNICODE) || defined(_UNICODE) # define __itt_event_create __itt_event_createW # define __itt_event_create_ptr __itt_event_createW_ptr #else # define __itt_event_create __itt_event_createA # define __itt_event_create_ptr __itt_event_createA_ptr #endif /* UNICODE */ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ __itt_event LIBITTAPI __itt_event_create(const char *name, int namelen); #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API #if ITT_PLATFORM==ITT_PLATFORM_WIN ITT_STUB(LIBITTAPI, __itt_event, event_createA, (const char *name, int namelen)) ITT_STUB(LIBITTAPI, __itt_event, event_createW, (const wchar_t *name, int namelen)) #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ ITT_STUB(LIBITTAPI, __itt_event, event_create, (const char *name, int namelen)) #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_event_createA ITTNOTIFY_DATA(event_createA) #define __itt_event_createA_ptr ITTNOTIFY_NAME(event_createA) #define __itt_event_createW ITTNOTIFY_DATA(event_createW) #define __itt_event_createW_ptr ITTNOTIFY_NAME(event_createW) #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_event_create ITTNOTIFY_DATA(event_create) #define __itt_event_create_ptr ITTNOTIFY_NAME(event_create) #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #else /* INTEL_NO_ITTNOTIFY_API */ #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_event_createA(name, namelen) (__itt_event)0 #define __itt_event_createA_ptr 0 #define __itt_event_createW(name, namelen) (__itt_event)0 #define __itt_event_createW_ptr 0 #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_event_create(name, namelen) (__itt_event)0 #define __itt_event_create_ptr 0 #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_event_createA_ptr 0 #define __itt_event_createW_ptr 0 #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_event_create_ptr 0 #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @brief Record an event occurrence. * @return __itt_err upon failure (invalid event id/user event feature not enabled) */ int LIBITTAPI __itt_event_start(__itt_event event); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUB(LIBITTAPI, int, event_start, (__itt_event event)) #define __itt_event_start ITTNOTIFY_DATA(event_start) #define __itt_event_start_ptr ITTNOTIFY_NAME(event_start) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_event_start(event) (int)0 #define __itt_event_start_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_event_start_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @brief Record an event end occurrence. * @note It is optional if events do not have durations. * @return __itt_err upon failure (invalid event id/user event feature not enabled) */ int LIBITTAPI __itt_event_end(__itt_event event); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUB(LIBITTAPI, int, event_end, (__itt_event event)) #define __itt_event_end ITTNOTIFY_DATA(event_end) #define __itt_event_end_ptr ITTNOTIFY_NAME(event_end) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_event_end(event) (int)0 #define __itt_event_end_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_event_end_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** @} events group */ /** * @defgroup arrays Arrays Visualizer * @ingroup public * Visualize arrays * @{ */ /** * @enum __itt_av_data_type * @brief Defines types of arrays data (for C/C++ intrinsic types) */ typedef enum { __itt_e_first = 0, __itt_e_char = 0, /* 1-byte integer */ __itt_e_uchar, /* 1-byte unsigned integer */ __itt_e_int16, /* 2-byte integer */ __itt_e_uint16, /* 2-byte unsigned integer */ __itt_e_int32, /* 4-byte integer */ __itt_e_uint32, /* 4-byte unsigned integer */ __itt_e_int64, /* 8-byte integer */ __itt_e_uint64, /* 8-byte unsigned integer */ __itt_e_float, /* 4-byte floating */ __itt_e_double, /* 8-byte floating */ __itt_e_last = __itt_e_double } __itt_av_data_type; /** * @brief Save an array data to a file. * Output format is defined by the file extension. The csv and bmp formats are supported (bmp - for 2-dimensional array only). * @param[in] data - pointer to the array data * @param[in] rank - the rank of the array * @param[in] dimensions - pointer to an array of integers, which specifies the array dimensions. * The size of dimensions must be equal to the rank * @param[in] type - the type of the array, specified as one of the __itt_av_data_type values (for intrinsic types) * @param[in] filePath - the file path; the output format is defined by the file extension * @param[in] columnOrder - defines how the array is stored in the linear memory. * It should be 1 for column-major order (e.g. in FORTRAN) or 0 - for row-major order (e.g. in C). */ #if ITT_PLATFORM==ITT_PLATFORM_WIN int ITTAPI __itt_av_saveA(void *data, int rank, const int *dimensions, int type, const char *filePath, int columnOrder); int ITTAPI __itt_av_saveW(void *data, int rank, const int *dimensions, int type, const wchar_t *filePath, int columnOrder); #if defined(UNICODE) || defined(_UNICODE) # define __itt_av_save __itt_av_saveW # define __itt_av_save_ptr __itt_av_saveW_ptr #else /* UNICODE */ # define __itt_av_save __itt_av_saveA # define __itt_av_save_ptr __itt_av_saveA_ptr #endif /* UNICODE */ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ int ITTAPI __itt_av_save(void *data, int rank, const int *dimensions, int type, const char *filePath, int columnOrder); #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API #if ITT_PLATFORM==ITT_PLATFORM_WIN ITT_STUB(ITTAPI, int, av_saveA, (void *data, int rank, const int *dimensions, int type, const char *filePath, int columnOrder)) ITT_STUB(ITTAPI, int, av_saveW, (void *data, int rank, const int *dimensions, int type, const wchar_t *filePath, int columnOrder)) #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ ITT_STUB(ITTAPI, int, av_save, (void *data, int rank, const int *dimensions, int type, const char *filePath, int columnOrder)) #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_av_saveA ITTNOTIFY_DATA(av_saveA) #define __itt_av_saveA_ptr ITTNOTIFY_NAME(av_saveA) #define __itt_av_saveW ITTNOTIFY_DATA(av_saveW) #define __itt_av_saveW_ptr ITTNOTIFY_NAME(av_saveW) #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_av_save ITTNOTIFY_DATA(av_save) #define __itt_av_save_ptr ITTNOTIFY_NAME(av_save) #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #else /* INTEL_NO_ITTNOTIFY_API */ #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_av_saveA(name) #define __itt_av_saveA_ptr 0 #define __itt_av_saveW(name) #define __itt_av_saveW_ptr 0 #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_av_save(name) #define __itt_av_save_ptr 0 #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_av_saveA_ptr 0 #define __itt_av_saveW_ptr 0 #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_av_save_ptr 0 #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ void ITTAPI __itt_enable_attach(void); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, enable_attach, (void)) #define __itt_enable_attach ITTNOTIFY_VOID(enable_attach) #define __itt_enable_attach_ptr ITTNOTIFY_NAME(enable_attach) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_enable_attach() #define __itt_enable_attach_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_enable_attach_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** @cond exclude_from_gpa_documentation */ /** @} arrays group */ /** @endcond */ /** * @brief Module load notification * This API is used to report necessary information in case of bypassing default system loader. * Notification should be done immidiatelly after this module is loaded to process memory. * @param[in] start_addr - module start address * @param[in] end_addr - module end address * @param[in] path - file system full path to the module */ #if ITT_PLATFORM==ITT_PLATFORM_WIN void ITTAPI __itt_module_loadA(void *start_addr, void *end_addr, const char *path); void ITTAPI __itt_module_loadW(void *start_addr, void *end_addr, const wchar_t *path); #if defined(UNICODE) || defined(_UNICODE) # define __itt_module_load __itt_module_loadW # define __itt_module_load_ptr __itt_module_loadW_ptr #else /* UNICODE */ # define __itt_module_load __itt_module_loadA # define __itt_module_load_ptr __itt_module_loadA_ptr #endif /* UNICODE */ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ void ITTAPI __itt_module_load(void *start_addr, void *end_addr, const char *path); #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API #if ITT_PLATFORM==ITT_PLATFORM_WIN ITT_STUB(ITTAPI, void, module_loadA, (void *start_addr, void *end_addr, const char *path)) ITT_STUB(ITTAPI, void, module_loadW, (void *start_addr, void *end_addr, const wchar_t *path)) #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ ITT_STUB(ITTAPI, void, module_load, (void *start_addr, void *end_addr, const char *path)) #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_module_loadA ITTNOTIFY_VOID(module_loadA) #define __itt_module_loadA_ptr ITTNOTIFY_NAME(module_loadA) #define __itt_module_loadW ITTNOTIFY_VOID(module_loadW) #define __itt_module_loadW_ptr ITTNOTIFY_NAME(module_loadW) #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_module_load ITTNOTIFY_VOID(module_load) #define __itt_module_load_ptr ITTNOTIFY_NAME(module_load) #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #else /* INTEL_NO_ITTNOTIFY_API */ #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_module_loadA(start_addr, end_addr, path) #define __itt_module_loadA_ptr 0 #define __itt_module_loadW(start_addr, end_addr, path) #define __itt_module_loadW_ptr 0 #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_module_load(start_addr, end_addr, path) #define __itt_module_load_ptr 0 #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_module_loadA_ptr 0 #define __itt_module_loadW_ptr 0 #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_module_load_ptr 0 #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @brief Report module unload * This API is used to report necessary information in case of bypassing default system loader. * Notification should be done just before the module is unloaded from process memory. * @param[in] addr - base address of loaded module */ void ITTAPI __itt_module_unload(void *addr); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, module_unload, (void *addr)) #define __itt_module_unload ITTNOTIFY_VOID(module_unload) #define __itt_module_unload_ptr ITTNOTIFY_NAME(module_unload) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_module_unload(addr) #define __itt_module_unload_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_module_unload_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** @cond exclude_from_documentation */ typedef enum { __itt_module_type_unknown = 0, __itt_module_type_elf, __itt_module_type_coff } __itt_module_type; /** @endcond */ /** @cond exclude_from_documentation */ typedef enum { itt_section_type_unknown, itt_section_type_bss, /* notifies that the section contains uninitialized data. These are the relevant section types and the modules that contain them: * ELF module: SHT_NOBITS section type * COFF module: IMAGE_SCN_CNT_UNINITIALIZED_DATA section type */ itt_section_type_data, /* notifies that section contains initialized data. These are the relevant section types and the modules that contain them: * ELF module: SHT_PROGBITS section type * COFF module: IMAGE_SCN_CNT_INITIALIZED_DATA section type */ itt_section_type_text /* notifies that the section contains executable code. These are the relevant section types and the modules that contain them: * ELF module: SHT_PROGBITS section type * COFF module: IMAGE_SCN_CNT_CODE section type */ } __itt_section_type; /** @endcond */ /** * @hideinitializer * @brief bit-mask, detects a section attribute that indicates whether a section can be executed as code: * These are the relevant section attributes and the modules that contain them: * ELF module: PF_X section attribute * COFF module: IMAGE_SCN_MEM_EXECUTE attribute */ #define __itt_section_exec 0x20000000 /** * @hideinitializer * @brief bit-mask, detects a section attribute that indicates whether a section can be read. * These are the relevant section attributes and the modules that contain them: * ELF module: PF_R attribute * COFF module: IMAGE_SCN_MEM_READ attribute */ #define __itt_section_read 0x40000000 /** * @hideinitializer * @brief bit-mask, detects a section attribute that indicates whether a section can be written to. * These are the relevant section attributes and the modules that contain them: * ELF module: PF_W attribute * COFF module: IMAGE_SCN_MEM_WRITE attribute */ #define __itt_section_write 0x80000000 /** @cond exclude_from_documentation */ #pragma pack(push, 8) typedef struct ___itt_section_info { const char* name; /*!< Section name in UTF8 */ __itt_section_type type; /*!< Section content and semantics description */ size_t flags; /*!< Section bit flags that describe attributes using bit mask * Zero if disabled, non-zero if enabled */ void* start_addr; /*!< Section load(relocated) start address */ size_t size; /*!< Section file offset */ size_t file_offset; /*!< Section size */ } __itt_section_info; #pragma pack(pop) /** @endcond */ /** @cond exclude_from_documentation */ #pragma pack(push, 8) typedef struct ___itt_module_object { unsigned int version; /*!< API version*/ __itt_id module_id; /*!< Unique identifier. This is unchanged for sections that belong to the same module */ __itt_module_type module_type; /*!< Binary module format */ const char* module_name; /*!< Unique module name or path to module in UTF8 * Contains module name when module_bufer and module_size exist * Contains module path when module_bufer and module_size absent * module_name remains the same for the certain module_id */ void* module_buffer; /*!< Module buffer content */ size_t module_size; /*!< Module buffer size */ /*!< If module_buffer and module_size exist, the binary module is dumped onto the system. * If module_buffer and module_size do not exist, * the binary module exists on the system already. * The module_name parameter contains the path to the module. */ __itt_section_info* section_array; /*!< Reference to section information */ size_t section_number; } __itt_module_object; #pragma pack(pop) /** @endcond */ /** * @brief Load module content and its loaded(relocated) sections. * This API is useful to save a module, or specify its location on the system and report information about loaded sections. * The target module is saved on the system if module buffer content and size are available. * If module buffer content and size are unavailable, the module name contains the path to the existing binary module. * @param[in] module_obj - provides module and section information, along with unique module identifiers (name,module ID) * which bind the binary module to particular sections. */ void ITTAPI __itt_module_load_with_sections(__itt_module_object* module_obj); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, module_load_with_sections, (__itt_module_object* module_obj)) #define __itt_module_load_with_sections ITTNOTIFY_VOID(module_load_with_sections) #define __itt_module_load_with_sections_ptr ITTNOTIFY_NAME(module_load_with_sections) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_module_load_with_sections(module_obj) #define __itt_module_load_with_sections_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_module_load_with_sections_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @brief Unload a module and its loaded(relocated) sections. * This API notifies that the module and its sections were unloaded. * @param[in] module_obj - provides module and sections information, along with unique module identifiers (name,module ID) * which bind the binary module to particular sections. */ void ITTAPI __itt_module_unload_with_sections(__itt_module_object* module_obj); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, module_unload_with_sections, (__itt_module_object* module_obj)) #define __itt_module_unload_with_sections ITTNOTIFY_VOID(module_unload_with_sections) #define __itt_module_unload_with_sections_ptr ITTNOTIFY_NAME(module_unload_with_sections) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_module_unload_with_sections(module_obj) #define __itt_module_unload_with_sections_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_module_unload_with_sections_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** @cond exclude_from_documentation */ #pragma pack(push, 8) typedef struct ___itt_histogram { const __itt_domain* domain; /*!< Domain of the histogram*/ const char* nameA; /*!< Name of the histogram */ #if defined(UNICODE) || defined(_UNICODE) const wchar_t* nameW; #else /* UNICODE || _UNICODE */ void* nameW; #endif /* UNICODE || _UNICODE */ __itt_metadata_type x_type; /*!< Type of the histogram X axis */ __itt_metadata_type y_type; /*!< Type of the histogram Y axis */ int extra1; /*!< Reserved to the runtime */ void* extra2; /*!< Reserved to the runtime */ struct ___itt_histogram* next; } __itt_histogram; #pragma pack(pop) /** @endcond */ /** * @brief Create a typed histogram instance with given name/domain. * @param[in] domain The domain controlling the call. * @param[in] name The name of the histogram. * @param[in] x_type The type of the X axis in histogram (may be 0 to calculate batch statistics). * @param[in] y_type The type of the Y axis in histogram. */ #if ITT_PLATFORM==ITT_PLATFORM_WIN __itt_histogram* ITTAPI __itt_histogram_createA(const __itt_domain* domain, const char* name, __itt_metadata_type x_type, __itt_metadata_type y_type); __itt_histogram* ITTAPI __itt_histogram_createW(const __itt_domain* domain, const wchar_t* name, __itt_metadata_type x_type, __itt_metadata_type y_type); #if defined(UNICODE) || defined(_UNICODE) # define __itt_histogram_create __itt_histogram_createW # define __itt_histogram_create_ptr __itt_histogram_createW_ptr #else /* UNICODE */ # define __itt_histogram_create __itt_histogram_createA # define __itt_histogram_create_ptr __itt_histogram_createA_ptr #endif /* UNICODE */ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ __itt_histogram* ITTAPI __itt_histogram_create(const __itt_domain* domain, const char* name, __itt_metadata_type x_type, __itt_metadata_type y_type); #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API #if ITT_PLATFORM==ITT_PLATFORM_WIN ITT_STUB(ITTAPI, __itt_histogram*, histogram_createA, (const __itt_domain* domain, const char* name, __itt_metadata_type x_type, __itt_metadata_type y_type)) ITT_STUB(ITTAPI, __itt_histogram*, histogram_createW, (const __itt_domain* domain, const wchar_t* name, __itt_metadata_type x_type, __itt_metadata_type y_type)) #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ ITT_STUB(ITTAPI, __itt_histogram*, histogram_create, (const __itt_domain* domain, const char* name, __itt_metadata_type x_type, __itt_metadata_type y_type)) #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_histogram_createA ITTNOTIFY_DATA(histogram_createA) #define __itt_histogram_createA_ptr ITTNOTIFY_NAME(histogram_createA) #define __itt_histogram_createW ITTNOTIFY_DATA(histogram_createW) #define __itt_histogram_createW_ptr ITTNOTIFY_NAME(histogram_createW) #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_histogram_create ITTNOTIFY_DATA(histogram_create) #define __itt_histogram_create_ptr ITTNOTIFY_NAME(histogram_create) #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #else /* INTEL_NO_ITTNOTIFY_API */ #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_histogram_createA(domain, name, x_type, y_type) (__itt_histogram*)0 #define __itt_histogram_createA_ptr 0 #define __itt_histogram_createW(domain, name, x_type, y_type) (__itt_histogram*)0 #define __itt_histogram_createW_ptr 0 #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_histogram_create(domain, name, x_type, y_type) (__itt_histogram*)0 #define __itt_histogram_create_ptr 0 #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_histogram_createA_ptr 0 #define __itt_histogram_createW_ptr 0 #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_histogram_create_ptr 0 #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @brief Submit statistics for a histogram instance. * @param[in] hist Pointer to the histogram instance to which the histogram statistic is to be dumped. * @param[in] length The number of elements in dumped axis data array. * @param[in] x_data The X axis dumped data itself (may be NULL to calculate batch statistics). * @param[in] y_data The Y axis dumped data itself. */ void ITTAPI __itt_histogram_submit(__itt_histogram* hist, size_t length, void* x_data, void* y_data); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, histogram_submit, (__itt_histogram* hist, size_t length, void* x_data, void* y_data)) #define __itt_histogram_submit ITTNOTIFY_VOID(histogram_submit) #define __itt_histogram_submit_ptr ITTNOTIFY_NAME(histogram_submit) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_histogram_submit(hist, length, x_data, y_data) #define __itt_histogram_submit_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_histogram_submit_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** * @brief function allows to obtain the current collection state at the moment * @return collection state as a enum __itt_collection_state */ __itt_collection_state __itt_get_collection_state(void); /** * @brief function releases resources allocated by ITT API static part * this API should be called from the library destructor * @return void */ void __itt_release_resources(void); /** @endcond */ /** * @brief Create a typed counter with given domain pointer, string name and counter type */ #if ITT_PLATFORM==ITT_PLATFORM_WIN __itt_counter ITTAPI __itt_counter_createA_v3(const __itt_domain* domain, const char* name, __itt_metadata_type type); __itt_counter ITTAPI __itt_counter_createW_v3(const __itt_domain* domain, const wchar_t* name, __itt_metadata_type type); #if defined(UNICODE) || defined(_UNICODE) # define __itt_counter_create_v3 __itt_counter_createW_v3 # define __itt_counter_create_v3_ptr __itt_counter_createW_v3_ptr #else /* UNICODE */ # define __itt_counter_create_v3 __itt_counter_createA_v3 # define __itt_counter_create_v3_ptr __itt_counter_createA_v3_ptr #endif /* UNICODE */ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ __itt_counter ITTAPI __itt_counter_create_v3(const __itt_domain* domain, const char* name, __itt_metadata_type type); #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API #if ITT_PLATFORM==ITT_PLATFORM_WIN ITT_STUB(ITTAPI, __itt_counter, counter_createA_v3, (const __itt_domain* domain, const char* name, __itt_metadata_type type)) ITT_STUB(ITTAPI, __itt_counter, counter_createW_v3, (const __itt_domain* domain, const wchar_t* name, __itt_metadata_type type)) #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ ITT_STUB(ITTAPI, __itt_counter, counter_create_v3, (const __itt_domain* domain, const char* name, __itt_metadata_type type)) #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_counter_createA_v3 ITTNOTIFY_DATA(counter_createA_v3) #define __itt_counter_createA_v3_ptr ITTNOTIFY_NAME(counter_createA_v3) #define __itt_counter_createW_v3 ITTNOTIFY_DATA(counter_createW_v3) #define __itt_counter_createW_v3_ptr ITTNOTIFY_NAME(counter_createW_v3) #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_counter_create_v3 ITTNOTIFY_DATA(counter_create_v3) #define __itt_counter_create_v3_ptr ITTNOTIFY_NAME(counter_create_v3) #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #else /* INTEL_NO_ITTNOTIFY_API */ #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_counter_createA_v3(domain, name, type) (__itt_counter)0 #define __itt_counter_createA_v3_ptr 0 #define __itt_counter_createW_v3(domain, name, type) (__itt_counter)0 #define __itt_counter_create_typedW_ptr 0 #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_counter_create_v3(domain, name, type) (__itt_counter)0 #define __itt_counter_create_v3_ptr 0 #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_counter_createA_v3_ptr 0 #define __itt_counter_createW_v3_ptr 0 #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_counter_create_v3_ptr 0 #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @brief Set the counter value api */ void ITTAPI __itt_counter_set_value_v3(__itt_counter counter, void *value_ptr); #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, counter_set_value_v3, (__itt_counter counter, void *value_ptr)) #define __itt_counter_set_value_v3 ITTNOTIFY_VOID(counter_set_value_v3) #define __itt_counter_set_value_v3_ptr ITTNOTIFY_NAME(counter_set_value_v3) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_counter_set_value_v3(counter, value_ptr) #define __itt_counter_set_value_v3_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_counter_set_value_v3_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @brief describes the type of context metadata */ typedef enum { __itt_context_unknown = 0, /*!< Undefined type */ __itt_context_nameA, /*!< ASCII string char* type */ __itt_context_nameW, /*!< Unicode string wchar_t* type */ __itt_context_deviceA, /*!< ASCII string char* type */ __itt_context_deviceW, /*!< Unicode string wchar_t* type */ __itt_context_unitsA, /*!< ASCII string char* type */ __itt_context_unitsW, /*!< Unicode string wchar_t* type */ __itt_context_pci_addrA, /*!< ASCII string char* type */ __itt_context_pci_addrW, /*!< Unicode string wchar_t* type */ __itt_context_tid, /*!< Unsigned 64-bit integer type */ __itt_context_max_val, /*!< Unsigned 64-bit integer type */ __itt_context_bandwidth_flag, /*!< Unsigned 64-bit integer type */ __itt_context_latency_flag, /*!< Unsigned 64-bit integer type */ __itt_context_occupancy_flag, /*!< Unsigned 64-bit integer type */ __itt_context_on_thread_flag, /*!< Unsigned 64-bit integer type */ __itt_context_is_abs_val_flag, /*!< Unsigned 64-bit integer type */ __itt_context_cpu_instructions_flag, /*!< Unsigned 64-bit integer type */ __itt_context_cpu_cycles_flag /*!< Unsigned 64-bit integer type */ } __itt_context_type; #if defined(UNICODE) || defined(_UNICODE) # define __itt_context_name __itt_context_nameW # define __itt_context_device __itt_context_deviceW # define __itt_context_units __itt_context_unitsW # define __itt_context_pci_addr __itt_context_pci_addrW #else /* UNICODE || _UNICODE */ # define __itt_context_name __itt_context_nameA # define __itt_context_device __itt_context_deviceA # define __itt_context_units __itt_context_unitsA # define __itt_context_pci_addr __itt_context_pci_addrA #endif /* UNICODE || _UNICODE */ /** @cond exclude_from_documentation */ #pragma pack(push, 8) typedef struct ___itt_context_metadata { __itt_context_type type; /*!< Type of the context metadata value */ void* value; /*!< Pointer to context metadata value itself */ } __itt_context_metadata; #pragma pack(pop) /** @endcond */ /** @cond exclude_from_documentation */ #pragma pack(push, 8) typedef struct ___itt_counter_metadata { __itt_counter counter; /*!< Associated context metadata counter */ __itt_context_type type; /*!< Type of the context metadata value */ const char* str_valueA; /*!< String context metadata value */ #if defined(UNICODE) || defined(_UNICODE) const wchar_t* str_valueW; #else /* UNICODE || _UNICODE */ void* str_valueW; #endif /* UNICODE || _UNICODE */ unsigned long long value; /*!< Numeric context metadata value */ int extra1; /*!< Reserved to the runtime */ void* extra2; /*!< Reserved to the runtime */ struct ___itt_counter_metadata* next; } __itt_counter_metadata; #pragma pack(pop) /** @endcond */ /** * @brief Bind context metadata to counter instance * @param[in] counter Pointer to the counter instance to which the context metadata is to be associated. * @param[in] length The number of elements in context metadata array. * @param[in] metadata The context metadata itself. */ void ITTAPI __itt_bind_context_metadata_to_counter(__itt_counter counter, size_t length, __itt_context_metadata* metadata); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, bind_context_metadata_to_counter, (__itt_counter counter, size_t length, __itt_context_metadata* metadata)) #define __itt_bind_context_metadata_to_counter ITTNOTIFY_VOID(bind_context_metadata_to_counter) #define __itt_bind_context_metadata_to_counter_ptr ITTNOTIFY_NAME(bind_context_metadata_to_counter) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_bind_context_metadata_to_counter(counter, length, metadata) #define __itt_bind_context_metadata_to_counter_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_bind_context_metadata_to_counter_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ #ifdef __cplusplus } #endif /* __cplusplus */ #endif /* _ITTNOTIFY_H_ */ #ifdef INTEL_ITTNOTIFY_API_PRIVATE #ifndef _ITTNOTIFY_PRIVATE_ #define _ITTNOTIFY_PRIVATE_ #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ /** * @ingroup clockdomain * @brief Begin an overlapped task instance. * @param[in] domain The domain for this task * @param[in] clock_domain The clock domain controlling the execution of this call. * @param[in] timestamp The user defined timestamp. * @param[in] taskid The identifier for this task instance, *cannot* be __itt_null. * @param[in] parentid The parent of this task, or __itt_null. * @param[in] name The name of this task. */ void ITTAPI __itt_task_begin_overlapped_ex(const __itt_domain* domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id taskid, __itt_id parentid, __itt_string_handle* name); /** * @ingroup clockdomain * @brief End an overlapped task instance. * @param[in] domain The domain for this task * @param[in] clock_domain The clock domain controlling the execution of this call. * @param[in] timestamp The user defined timestamp. * @param[in] taskid Explicit ID of finished task */ void ITTAPI __itt_task_end_overlapped_ex(const __itt_domain* domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id taskid); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, task_begin_overlapped_ex, (const __itt_domain* domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id taskid, __itt_id parentid, __itt_string_handle* name)) ITT_STUBV(ITTAPI, void, task_end_overlapped_ex, (const __itt_domain* domain, __itt_clock_domain* clock_domain, unsigned long long timestamp, __itt_id taskid)) #define __itt_task_begin_overlapped_ex(d,x,y,z,a,b) ITTNOTIFY_VOID_D5(task_begin_overlapped_ex,d,x,y,z,a,b) #define __itt_task_begin_overlapped_ex_ptr ITTNOTIFY_NAME(task_begin_overlapped_ex) #define __itt_task_end_overlapped_ex(d,x,y,z) ITTNOTIFY_VOID_D3(task_end_overlapped_ex,d,x,y,z) #define __itt_task_end_overlapped_ex_ptr ITTNOTIFY_NAME(task_end_overlapped_ex) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_task_begin_overlapped_ex(domain,clock_domain,timestamp,taskid,parentid,name) #define __itt_task_begin_overlapped_ex_ptr 0 #define __itt_task_end_overlapped_ex(domain,clock_domain,timestamp,taskid) #define __itt_task_end_overlapped_ex_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_task_begin_overlapped_ex_ptr 0 #define __itt_task_end_overlapped_ptr 0 #define __itt_task_end_overlapped_ex_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @defgroup makrs_internal Marks * @ingroup internal * Marks group * @warning Internal API: * - It is not shipped to outside of Intel * - It is delivered to internal Intel teams using e-mail or SVN access only * @{ */ /** @brief user mark type */ typedef int __itt_mark_type; /** * @brief Creates a user mark type with the specified name using char or Unicode string. * @param[in] name - name of mark to create * @return Returns a handle to the mark type */ #if ITT_PLATFORM==ITT_PLATFORM_WIN __itt_mark_type ITTAPI __itt_mark_createA(const char *name); __itt_mark_type ITTAPI __itt_mark_createW(const wchar_t *name); #if defined(UNICODE) || defined(_UNICODE) # define __itt_mark_create __itt_mark_createW # define __itt_mark_create_ptr __itt_mark_createW_ptr #else /* UNICODE */ # define __itt_mark_create __itt_mark_createA # define __itt_mark_create_ptr __itt_mark_createA_ptr #endif /* UNICODE */ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ __itt_mark_type ITTAPI __itt_mark_create(const char *name); #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API #if ITT_PLATFORM==ITT_PLATFORM_WIN ITT_STUB(ITTAPI, __itt_mark_type, mark_createA, (const char *name)) ITT_STUB(ITTAPI, __itt_mark_type, mark_createW, (const wchar_t *name)) #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ ITT_STUB(ITTAPI, __itt_mark_type, mark_create, (const char *name)) #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_mark_createA ITTNOTIFY_DATA(mark_createA) #define __itt_mark_createA_ptr ITTNOTIFY_NAME(mark_createA) #define __itt_mark_createW ITTNOTIFY_DATA(mark_createW) #define __itt_mark_createW_ptr ITTNOTIFY_NAME(mark_createW) #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_mark_create ITTNOTIFY_DATA(mark_create) #define __itt_mark_create_ptr ITTNOTIFY_NAME(mark_create) #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #else /* INTEL_NO_ITTNOTIFY_API */ #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_mark_createA(name) (__itt_mark_type)0 #define __itt_mark_createA_ptr 0 #define __itt_mark_createW(name) (__itt_mark_type)0 #define __itt_mark_createW_ptr 0 #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_mark_create(name) (__itt_mark_type)0 #define __itt_mark_create_ptr 0 #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_mark_createA_ptr 0 #define __itt_mark_createW_ptr 0 #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_mark_create_ptr 0 #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @brief Creates a "discrete" user mark type of the specified type and an optional parameter using char or Unicode string. * * - The mark of "discrete" type is placed to collection results in case of success. It appears in overtime view(s) as a special tick sign. * - The call is "synchronous" - function returns after mark is actually added to results. * - This function is useful, for example, to mark different phases of application * (beginning of the next mark automatically meand end of current region). * - Can be used together with "continuous" marks (see below) at the same collection session * @param[in] mt - mark, created by __itt_mark_create(const char* name) function * @param[in] parameter - string parameter of mark * @return Returns zero value in case of success, non-zero value otherwise. */ #if ITT_PLATFORM==ITT_PLATFORM_WIN int ITTAPI __itt_markA(__itt_mark_type mt, const char *parameter); int ITTAPI __itt_markW(__itt_mark_type mt, const wchar_t *parameter); #if defined(UNICODE) || defined(_UNICODE) # define __itt_mark __itt_markW # define __itt_mark_ptr __itt_markW_ptr #else /* UNICODE */ # define __itt_mark __itt_markA # define __itt_mark_ptr __itt_markA_ptr #endif /* UNICODE */ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ int ITTAPI __itt_mark(__itt_mark_type mt, const char *parameter); #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API #if ITT_PLATFORM==ITT_PLATFORM_WIN ITT_STUB(ITTAPI, int, markA, (__itt_mark_type mt, const char *parameter)) ITT_STUB(ITTAPI, int, markW, (__itt_mark_type mt, const wchar_t *parameter)) #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ ITT_STUB(ITTAPI, int, mark, (__itt_mark_type mt, const char *parameter)) #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_markA ITTNOTIFY_DATA(markA) #define __itt_markA_ptr ITTNOTIFY_NAME(markA) #define __itt_markW ITTNOTIFY_DATA(markW) #define __itt_markW_ptr ITTNOTIFY_NAME(markW) #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_mark ITTNOTIFY_DATA(mark) #define __itt_mark_ptr ITTNOTIFY_NAME(mark) #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #else /* INTEL_NO_ITTNOTIFY_API */ #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_markA(mt, parameter) (int)0 #define __itt_markA_ptr 0 #define __itt_markW(mt, parameter) (int)0 #define __itt_markW_ptr 0 #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_mark(mt, parameter) (int)0 #define __itt_mark_ptr 0 #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_markA_ptr 0 #define __itt_markW_ptr 0 #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_mark_ptr 0 #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @brief Use this if necessary to create a "discrete" user event type (mark) for process * rather then for one thread * @see int __itt_mark(__itt_mark_type mt, const char* parameter); */ #if ITT_PLATFORM==ITT_PLATFORM_WIN int ITTAPI __itt_mark_globalA(__itt_mark_type mt, const char *parameter); int ITTAPI __itt_mark_globalW(__itt_mark_type mt, const wchar_t *parameter); #if defined(UNICODE) || defined(_UNICODE) # define __itt_mark_global __itt_mark_globalW # define __itt_mark_global_ptr __itt_mark_globalW_ptr #else /* UNICODE */ # define __itt_mark_global __itt_mark_globalA # define __itt_mark_global_ptr __itt_mark_globalA_ptr #endif /* UNICODE */ #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ int ITTAPI __itt_mark_global(__itt_mark_type mt, const char *parameter); #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API #if ITT_PLATFORM==ITT_PLATFORM_WIN ITT_STUB(ITTAPI, int, mark_globalA, (__itt_mark_type mt, const char *parameter)) ITT_STUB(ITTAPI, int, mark_globalW, (__itt_mark_type mt, const wchar_t *parameter)) #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ ITT_STUB(ITTAPI, int, mark_global, (__itt_mark_type mt, const char *parameter)) #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_mark_globalA ITTNOTIFY_DATA(mark_globalA) #define __itt_mark_globalA_ptr ITTNOTIFY_NAME(mark_globalA) #define __itt_mark_globalW ITTNOTIFY_DATA(mark_globalW) #define __itt_mark_globalW_ptr ITTNOTIFY_NAME(mark_globalW) #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_mark_global ITTNOTIFY_DATA(mark_global) #define __itt_mark_global_ptr ITTNOTIFY_NAME(mark_global) #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #else /* INTEL_NO_ITTNOTIFY_API */ #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_mark_globalA(mt, parameter) (int)0 #define __itt_mark_globalA_ptr 0 #define __itt_mark_globalW(mt, parameter) (int)0 #define __itt_mark_globalW_ptr 0 #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_mark_global(mt, parameter) (int)0 #define __itt_mark_global_ptr 0 #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #if ITT_PLATFORM==ITT_PLATFORM_WIN #define __itt_mark_globalA_ptr 0 #define __itt_mark_globalW_ptr 0 #else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #define __itt_mark_global_ptr 0 #endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @brief Creates an "end" point for "continuous" mark with specified name. * * - Returns zero value in case of success, non-zero value otherwise. * Also returns non-zero value when preceding "begin" point for the * mark with the same name failed to be created or not created. * - The mark of "continuous" type is placed to collection results in * case of success. It appears in overtime view(s) as a special tick * sign (different from "discrete" mark) together with line from * corresponding "begin" mark to "end" mark. * @note Continuous marks can overlap and be nested inside each other. * Discrete mark can be nested inside marked region * @param[in] mt - mark, created by __itt_mark_create(const char* name) function * @return Returns zero value in case of success, non-zero value otherwise. */ int ITTAPI __itt_mark_off(__itt_mark_type mt); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUB(ITTAPI, int, mark_off, (__itt_mark_type mt)) #define __itt_mark_off ITTNOTIFY_DATA(mark_off) #define __itt_mark_off_ptr ITTNOTIFY_NAME(mark_off) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_mark_off(mt) (int)0 #define __itt_mark_off_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_mark_off_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @brief Use this if necessary to create an "end" point for mark of process * @see int __itt_mark_off(__itt_mark_type mt); */ int ITTAPI __itt_mark_global_off(__itt_mark_type mt); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUB(ITTAPI, int, mark_global_off, (__itt_mark_type mt)) #define __itt_mark_global_off ITTNOTIFY_DATA(mark_global_off) #define __itt_mark_global_off_ptr ITTNOTIFY_NAME(mark_global_off) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_mark_global_off(mt) (int)0 #define __itt_mark_global_off_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_mark_global_off_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** @} marks group */ /** * @defgroup counters_internal Counters * @ingroup internal * Counters group * @{ */ /** * @defgroup stitch Stack Stitching * @ingroup internal * Stack Stitching group * @{ */ /** * @brief opaque structure for counter identification */ typedef struct ___itt_caller *__itt_caller; /** * @brief Create the stitch point e.g. a point in call stack where other stacks should be stitched to. * The function returns a unique identifier which is used to match the cut points with corresponding stitch points. */ __itt_caller ITTAPI __itt_stack_caller_create(void); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUB(ITTAPI, __itt_caller, stack_caller_create, (void)) #define __itt_stack_caller_create ITTNOTIFY_DATA(stack_caller_create) #define __itt_stack_caller_create_ptr ITTNOTIFY_NAME(stack_caller_create) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_stack_caller_create() (__itt_caller)0 #define __itt_stack_caller_create_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_stack_caller_create_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @brief Destroy the information about stitch point identified by the pointer previously returned by __itt_stack_caller_create() */ void ITTAPI __itt_stack_caller_destroy(__itt_caller id); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, stack_caller_destroy, (__itt_caller id)) #define __itt_stack_caller_destroy ITTNOTIFY_VOID(stack_caller_destroy) #define __itt_stack_caller_destroy_ptr ITTNOTIFY_NAME(stack_caller_destroy) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_stack_caller_destroy(id) #define __itt_stack_caller_destroy_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_stack_caller_destroy_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @brief Sets the cut point. Stack from each event which occurs after this call will be cut * at the same stack level the function was called and stitched to the corresponding stitch point. */ void ITTAPI __itt_stack_callee_enter(__itt_caller id); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, stack_callee_enter, (__itt_caller id)) #define __itt_stack_callee_enter ITTNOTIFY_VOID(stack_callee_enter) #define __itt_stack_callee_enter_ptr ITTNOTIFY_NAME(stack_callee_enter) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_stack_callee_enter(id) #define __itt_stack_callee_enter_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_stack_callee_enter_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** * @brief This function eliminates the cut point which was set by latest __itt_stack_callee_enter(). */ void ITTAPI __itt_stack_callee_leave(__itt_caller id); /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API ITT_STUBV(ITTAPI, void, stack_callee_leave, (__itt_caller id)) #define __itt_stack_callee_leave ITTNOTIFY_VOID(stack_callee_leave) #define __itt_stack_callee_leave_ptr ITTNOTIFY_NAME(stack_callee_leave) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_stack_callee_leave(id) #define __itt_stack_callee_leave_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_stack_callee_leave_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ /** @} stitch group */ /* ***************************************************************************************************************************** */ #include /** @cond exclude_from_documentation */ typedef enum __itt_error_code { __itt_error_success = 0, /*!< no error */ __itt_error_no_module = 1, /*!< module can't be loaded */ /* %1$s -- library name; win: %2$d -- system error code; unx: %2$s -- system error message. */ __itt_error_no_symbol = 2, /*!< symbol not found */ /* %1$s -- library name, %2$s -- symbol name. */ __itt_error_unknown_group = 3, /*!< unknown group specified */ /* %1$s -- env var name, %2$s -- group name. */ __itt_error_cant_read_env = 4, /*!< GetEnvironmentVariable() failed */ /* %1$s -- env var name, %2$d -- system error. */ __itt_error_env_too_long = 5, /*!< variable value too long */ /* %1$s -- env var name, %2$d -- actual length of the var, %3$d -- max allowed length. */ __itt_error_system = 6 /*!< pthread_mutexattr_init or pthread_mutex_init failed */ /* %1$s -- function name, %2$d -- errno. */ } __itt_error_code; typedef void (__itt_error_handler_t)(__itt_error_code code, va_list); __itt_error_handler_t* __itt_set_error_handler(__itt_error_handler_t*); const char* ITTAPI __itt_api_version(void); /** @endcond */ /** @cond exclude_from_documentation */ #ifndef INTEL_NO_MACRO_BODY #ifndef INTEL_NO_ITTNOTIFY_API #define __itt_error_handler ITT_JOIN(INTEL_ITTNOTIFY_PREFIX, error_handler) void __itt_error_handler(__itt_error_code code, va_list args); extern const int ITTNOTIFY_NAME(err); #define __itt_err ITTNOTIFY_NAME(err) ITT_STUB(ITTAPI, const char*, api_version, (void)) #define __itt_api_version ITTNOTIFY_DATA(api_version) #define __itt_api_version_ptr ITTNOTIFY_NAME(api_version) #else /* INTEL_NO_ITTNOTIFY_API */ #define __itt_api_version() (const char*)0 #define __itt_api_version_ptr 0 #endif /* INTEL_NO_ITTNOTIFY_API */ #else /* INTEL_NO_MACRO_BODY */ #define __itt_api_version_ptr 0 #endif /* INTEL_NO_MACRO_BODY */ /** @endcond */ #ifdef __cplusplus } #endif /* __cplusplus */ #endif /* _ITTNOTIFY_PRIVATE_ */ #endif /* INTEL_ITTNOTIFY_API_PRIVATE */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/fifo_cache.h000066400000000000000000000016361456575232400264520ustar00rootroot00000000000000#ifndef AWS_COMMON_FIFO_CACHE_H #define AWS_COMMON_FIFO_CACHE_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include AWS_PUSH_SANE_WARNING_LEVEL AWS_EXTERN_C_BEGIN /** * Initializes the first-in-first-out cache. Sets up the underlying linked hash table. * Once `max_items` elements have been added, the oldest(first-in) item will * be removed. For the other parameters, see aws/common/hash_table.h. Hash table * semantics of these arguments are preserved. */ AWS_COMMON_API struct aws_cache *aws_cache_new_fifo( struct aws_allocator *allocator, aws_hash_fn *hash_fn, aws_hash_callback_eq_fn *equals_fn, aws_hash_callback_destroy_fn *destroy_key_fn, aws_hash_callback_destroy_fn *destroy_value_fn, size_t max_items); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_COMMON_FIFO_CACHE_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/file.h000066400000000000000000000162221456575232400253200ustar00rootroot00000000000000#ifndef AWS_COMMON_FILE_H #define AWS_COMMON_FILE_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include AWS_PUSH_SANE_WARNING_LEVEL #ifdef AWS_OS_WINDOWS # define AWS_PATH_DELIM '\\' # define AWS_PATH_DELIM_STR "\\" #else # define AWS_PATH_DELIM '/' # define AWS_PATH_DELIM_STR "/" #endif struct aws_string; struct aws_directory_iterator; enum aws_file_type { AWS_FILE_TYPE_FILE = 1, AWS_FILE_TYPE_SYM_LINK = 2, AWS_FILE_TYPE_DIRECTORY = 4, }; struct aws_directory_entry { /** * Absolute path to the entry from the current process root. */ struct aws_byte_cursor path; /** * Path to the entry relative to the current working directory. */ struct aws_byte_cursor relative_path; /** * Bit-field of enum aws_file_type */ int file_type; /** * Size of the file on disk. */ int64_t file_size; }; /** * Invoked during calls to aws_directory_traverse() as an entry is encountered. entry will contain * the parsed directory entry info. * * Return true to continue the traversal, or alternatively, if you have a reason to abort the traversal, return false. */ typedef bool(aws_on_directory_entry)(const struct aws_directory_entry *entry, void *user_data); AWS_EXTERN_C_BEGIN /** * Deprecated - Use aws_fopen_safe() instead, avoid const char * in public APIs. * Opens file at file_path using mode. Returns the FILE pointer if successful. * Otherwise, aws_last_error() will contain the error that occurred */ AWS_COMMON_API FILE *aws_fopen(const char *file_path, const char *mode); /** * Opens file at file_path using mode. Returns the FILE pointer if successful. * Otherwise, aws_last_error() will contain the error that occurred */ AWS_COMMON_API FILE *aws_fopen_safe(const struct aws_string *file_path, const struct aws_string *mode); /** * Creates a directory if it doesn't currently exist. If the directory already exists, it's ignored and assumed * successful. * * Returns AWS_OP_SUCCESS on success. Otherwise, check aws_last_error(). */ AWS_COMMON_API int aws_directory_create(const struct aws_string *dir_path); /** * Returns true if the directory currently exists. Otherwise, it returns false. */ AWS_COMMON_API bool aws_directory_exists(const struct aws_string *dir_path); /** * Deletes a directory. If the directory is not empty, this will fail unless the recursive parameter is set to true. * If recursive is true then the entire directory and all of its contents will be deleted. If it is set to false, * the directory will be deleted only if it is empty. Returns AWS_OP_SUCCESS if the operation was successful. Otherwise, * aws_last_error() will contain the error that occurred. If the directory doesn't exist, AWS_OP_SUCCESS is still * returned. */ AWS_COMMON_API int aws_directory_delete(const struct aws_string *dir_path, bool recursive); /** * Deletes a file. Returns AWS_OP_SUCCESS if the operation was successful. Otherwise, * aws_last_error() will contain the error that occurred. If the file doesn't exist, AWS_OP_SUCCESS is still returned. */ AWS_COMMON_API int aws_file_delete(const struct aws_string *file_path); /** * Moves directory at from to to. * Returns AWS_OP_SUCCESS if the operation was successful. Otherwise, * aws_last_error() will contain the error that occurred. */ AWS_COMMON_API int aws_directory_or_file_move(const struct aws_string *from, const struct aws_string *to); /** * Traverse a directory starting at path. * * If you want the traversal to recurse the entire directory, pass recursive as true. Passing false for this parameter * will only iterate the contents of the directory, but will not descend into any directories it encounters. * * If recursive is set to true, the traversal is performed post-order, depth-first * (for practical reasons such as deleting a directory that contains subdirectories or files). * * returns AWS_OP_SUCCESS(0) on success. */ AWS_COMMON_API int aws_directory_traverse( struct aws_allocator *allocator, const struct aws_string *path, bool recursive, aws_on_directory_entry *on_entry, void *user_data); /** * Creates a read-only iterator of a directory starting at path. If path is invalid or there's any other error * condition, NULL will be returned. Call aws_last_error() for the exact error in that case. */ AWS_COMMON_API struct aws_directory_iterator *aws_directory_entry_iterator_new( struct aws_allocator *allocator, const struct aws_string *path); /** * Moves the iterator to the next entry. Returns AWS_OP_SUCCESS if another entry is available, or AWS_OP_ERR with * AWS_ERROR_LIST_EMPTY as the value for aws_last_error() if no more entries are available. */ AWS_COMMON_API int aws_directory_entry_iterator_next(struct aws_directory_iterator *iterator); /** * Moves the iterator to the previous entry. Returns AWS_OP_SUCCESS if another entry is available, or AWS_OP_ERR with * AWS_ERROR_LIST_EMPTY as the value for aws_last_error() if no more entries are available. */ AWS_COMMON_API int aws_directory_entry_iterator_previous(struct aws_directory_iterator *iterator); /** * Cleanup and deallocate iterator */ AWS_COMMON_API void aws_directory_entry_iterator_destroy(struct aws_directory_iterator *iterator); /** * Gets the aws_directory_entry value for iterator at the current position. Returns NULL if the iterator contains no * entries. */ AWS_COMMON_API const struct aws_directory_entry *aws_directory_entry_iterator_get_value( const struct aws_directory_iterator *iterator); /** * Returns true iff the character is a directory separator on ANY supported platform. */ AWS_COMMON_API bool aws_is_any_directory_separator(char value); /** * Returns the directory separator used by the local platform */ AWS_COMMON_API char aws_get_platform_directory_separator(void); /** * Normalizes the path by replacing any directory separator with the local platform's directory separator. * @param path path to normalize. Must be writeable. */ AWS_COMMON_API void aws_normalize_directory_separator(struct aws_byte_buf *path); /** * Returns the current user's home directory. */ AWS_COMMON_API struct aws_string *aws_get_home_directory(struct aws_allocator *allocator); /** * Returns true if a file or path exists, otherwise, false. */ AWS_COMMON_API bool aws_path_exists(const struct aws_string *path); /* * Wrapper for highest-resolution platform-dependent seek implementation. * Maps to: * * _fseeki64() on windows * fseeko() on linux * * whence can either be SEEK_SET or SEEK_END * * Returns AWS_OP_SUCCESS, or AWS_OP_ERR (after an error has been raised). */ AWS_COMMON_API int aws_fseek(FILE *file, int64_t offset, int whence); /* * Wrapper for os-specific file length query. We can't use fseek(END, 0) * because support for it is not technically required. * * Unix flavors call fstat, while Windows variants use GetFileSize on a * HANDLE queried from the libc FILE pointer. */ AWS_COMMON_API int aws_file_get_length(FILE *file, int64_t *length); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_COMMON_FILE_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/hash_table.h000066400000000000000000000363251456575232400265010ustar00rootroot00000000000000#ifndef AWS_COMMON_HASH_TABLE_H #define AWS_COMMON_HASH_TABLE_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include AWS_PUSH_SANE_WARNING_LEVEL enum { AWS_COMMON_HASH_TABLE_ITER_CONTINUE = (1 << 0), AWS_COMMON_HASH_TABLE_ITER_DELETE = (1 << 1), AWS_COMMON_HASH_TABLE_ITER_ERROR = (1 << 2), }; /** * Hash table data structure. This module provides an automatically resizing * hash table implementation for general purpose use. The hash table stores a * mapping between void * keys and values; it is expected that in most cases, * these will point to a structure elsewhere in the heap, instead of inlining a * key or value into the hash table element itself. * * Currently, this hash table implements a variant of robin hood hashing, but * we do not guarantee that this won't change in the future. * * Associated with each hash function are four callbacks: * * hash_fn - A hash function from the keys to a uint64_t. It is critical that * the hash function for a key does not change while the key is in the hash * table; violating this results in undefined behavior. Collisions are * tolerated, though naturally with reduced performance. * * equals_fn - An equality comparison function. This function must be * reflexive and consistent with hash_fn. * * destroy_key_fn, destroy_value_fn - Optional callbacks invoked when the * table is cleared or cleaned up and at the caller's option when an element * is removed from the table. Either or both may be set to NULL, which * has the same effect as a no-op destroy function. * * This datastructure can be safely moved between threads, subject to the * requirements of the underlying allocator. It is also safe to invoke * non-mutating operations on the hash table from multiple threads. A suitable * memory barrier must be used when transitioning from single-threaded mutating * usage to multithreaded usage. */ struct hash_table_state; /* Opaque pointer */ struct aws_hash_table { struct hash_table_state *p_impl; }; /** * Represents an element in the hash table. Various operations on the hash * table may provide pointers to elements stored within the hash table; * generally, calling code may alter value, but must not alter key (or any * information used to compute key's hash code). * * Pointers to elements within the hash are invalidated whenever an operation * which may change the number of elements in the hash is invoked (i.e. put, * delete, clear, and clean_up), regardless of whether the number of elements * actually changes. */ struct aws_hash_element { const void *key; void *value; }; enum aws_hash_iter_status { AWS_HASH_ITER_STATUS_DONE, AWS_HASH_ITER_STATUS_DELETE_CALLED, AWS_HASH_ITER_STATUS_READY_FOR_USE, }; struct aws_hash_iter { const struct aws_hash_table *map; struct aws_hash_element element; size_t slot; size_t limit; enum aws_hash_iter_status status; /* * Reserving extra fields for binary compatibility with future expansion of * iterator in case hash table implementation changes. */ int unused_0; void *unused_1; void *unused_2; }; /** * Prototype for a key hashing function pointer. */ typedef uint64_t(aws_hash_fn)(const void *key); /** * Prototype for a hash table equality check function pointer. * * This type is usually used for a function that compares two hash table * keys, but note that the same type is used for a function that compares * two hash table values in aws_hash_table_eq. * * Equality functions used in a hash table must be be reflexive (a == a), * symmetric (a == b => b == a), transitive (a == b, b == c => a == c) * and consistent (result does not change with time). */ typedef bool(aws_hash_callback_eq_fn)(const void *a, const void *b); /** * Prototype for a hash table key or value destructor function pointer. * * This function is used to destroy elements in the hash table when the * table is cleared or cleaned up. * * Note that functions which remove individual elements from the hash * table provide options of whether or not to invoke the destructors * on the key and value of a removed element. */ typedef void(aws_hash_callback_destroy_fn)(void *key_or_value); AWS_EXTERN_C_BEGIN /** * Initializes a hash map with initial capacity for 'size' elements * without resizing. Uses hash_fn to compute the hash of each element. * equals_fn to compute equality of two keys. Whenever an element is * removed without being returned, destroy_key_fn is run on the pointer * to the key and destroy_value_fn is run on the pointer to the value. * Either or both may be NULL if a callback is not desired in this case. */ AWS_COMMON_API int aws_hash_table_init( struct aws_hash_table *map, struct aws_allocator *alloc, size_t size, aws_hash_fn *hash_fn, aws_hash_callback_eq_fn *equals_fn, aws_hash_callback_destroy_fn *destroy_key_fn, aws_hash_callback_destroy_fn *destroy_value_fn); /** * Deletes every element from map and frees all associated memory. * destroy_fn will be called for each element. aws_hash_table_init * must be called before reusing the hash table. * * This method is idempotent. */ AWS_COMMON_API void aws_hash_table_clean_up(struct aws_hash_table *map); /** * Safely swaps two hash tables. Note that we swap the entirety of the hash * table, including which allocator is associated. * * Neither hash table is required to be initialized; if one or both is * uninitialized, then the uninitialized state is also swapped. */ AWS_COMMON_API void aws_hash_table_swap(struct aws_hash_table *AWS_RESTRICT a, struct aws_hash_table *AWS_RESTRICT b); /** * Moves the hash table in 'from' to 'to'. After this move, 'from' will * be identical to the state of the original 'to' hash table, and 'to' * will be in the same state as if it had been passed to aws_hash_table_clean_up * (that is, it will have no memory allocated, and it will be safe to * either discard it or call aws_hash_table_clean_up again). * * Note that 'to' will not be cleaned up. You should make sure that 'to' * is either uninitialized or cleaned up before moving a hashtable into * it. */ AWS_COMMON_API void aws_hash_table_move(struct aws_hash_table *AWS_RESTRICT to, struct aws_hash_table *AWS_RESTRICT from); /** * Returns the current number of entries in the table. */ AWS_COMMON_API size_t aws_hash_table_get_entry_count(const struct aws_hash_table *map); /** * Returns an iterator to be used for iterating through a hash table. * Iterator will already point to the first element of the table it finds, * which can be accessed as iter.element. * * This function cannot fail, but if there are no elements in the table, * the returned iterator will return true for aws_hash_iter_done(&iter). */ AWS_COMMON_API struct aws_hash_iter aws_hash_iter_begin(const struct aws_hash_table *map); /** * Returns true if iterator is done iterating through table, false otherwise. * If this is true, the iterator will not include an element of the table. */ AWS_COMMON_API bool aws_hash_iter_done(const struct aws_hash_iter *iter); /** * Updates iterator so that it points to next element of hash table. * * This and the two previous functions are designed to be used together with * the following idiom: * * for (struct aws_hash_iter iter = aws_hash_iter_begin(&map); * !aws_hash_iter_done(&iter); aws_hash_iter_next(&iter)) { * const key_type key = *(const key_type *)iter.element.key; * value_type value = *(value_type *)iter.element.value; * // etc. * } * * Note that calling this on an iter which is "done" is idempotent: * i.e. it will return another iter which is "done". */ AWS_COMMON_API void aws_hash_iter_next(struct aws_hash_iter *iter); /** * Deletes the element currently pointed-to by the hash iterator. * After calling this method, the element member of the iterator * should not be accessed until the next call to aws_hash_iter_next. * * @param destroy_contents If true, the destructors for the key and value * will be called. */ AWS_COMMON_API void aws_hash_iter_delete(struct aws_hash_iter *iter, bool destroy_contents); /** * Attempts to locate an element at key. If the element is found, a * pointer to the value is placed in *p_elem; if it is not found, * *pElem is set to NULL. Either way, AWS_OP_SUCCESS is returned. * * This method does not change the state of the hash table. Therefore, it * is safe to call _find from multiple threads on the same hash table, * provided no mutating operations happen in parallel. * * Calling code may update the value in the hash table by modifying **pElem * after a successful find. However, this pointer is not guaranteed to * remain usable after a subsequent call to _put, _delete, _clear, or * _clean_up. */ AWS_COMMON_API int aws_hash_table_find(const struct aws_hash_table *map, const void *key, struct aws_hash_element **p_elem); /** * Attempts to locate an element at key. If no such element was found, * creates a new element, with value initialized to NULL. In either case, a * pointer to the element is placed in *p_elem. * * If was_created is non-NULL, *was_created is set to 0 if an existing * element was found, or 1 is a new element was created. * * Returns AWS_OP_SUCCESS if an item was found or created. * Raises AWS_ERROR_OOM if hash table expansion was required and memory * allocation failed. */ AWS_COMMON_API int aws_hash_table_create( struct aws_hash_table *map, const void *key, struct aws_hash_element **p_elem, int *was_created); /** * Inserts a new element at key, with the given value. If another element * exists at that key, the old element will be overwritten; both old key and * value objects will be destroyed. * * If was_created is non-NULL, *was_created is set to 0 if an existing * element was found, or 1 is a new element was created. * * Returns AWS_OP_SUCCESS if an item was found or created. * Raises AWS_ERROR_OOM if hash table expansion was required and memory * allocation failed. */ AWS_COMMON_API int aws_hash_table_put(struct aws_hash_table *map, const void *key, void *value, int *was_created); /** * Removes element at key. Always returns AWS_OP_SUCCESS. * * If pValue is non-NULL, the existing value (if any) is moved into * (*value) before removing from the table, and destroy_fn is _not_ * invoked. If pValue is NULL, then (if the element existed) destroy_fn * will be invoked on the element being removed. * * If was_present is non-NULL, it is set to 0 if the element was * not present, or 1 if it was present (and is now removed). */ AWS_COMMON_API int aws_hash_table_remove( struct aws_hash_table *map, const void *key, struct aws_hash_element *p_value, int *was_present); /** * Removes element already known (typically by find()). * * p_value should point to a valid element returned by create() or find(). * * NOTE: DO NOT call this method from inside of a aws_hash_table_foreach callback, return * AWS_COMMON_HASH_TABLE_ITER_DELETE instead. */ AWS_COMMON_API int aws_hash_table_remove_element(struct aws_hash_table *map, struct aws_hash_element *p_value); /** * Iterates through every element in the map and invokes the callback on * that item. Iteration is performed in an arbitrary, implementation-defined * order, and is not guaranteed to be consistent across invocations. * * The callback may change the value associated with the key by overwriting * the value pointed-to by value. In this case, the on_element_removed * callback will not be invoked, unless the callback invokes * AWS_COMMON_HASH_TABLE_ITER_DELETE (in which case the on_element_removed * is given the updated value). * * The callback must return a bitmask of zero or more of the following values * ORed together: * * # AWS_COMMON_HASH_TABLE_ITER_CONTINUE - Continues iteration to the next * element (if not set, iteration stops) * # AWS_COMMON_HASH_TABLE_ITER_DELETE - Deletes the current value and * continues iteration. destroy_fn will NOT be invoked. * # AWS_COMMON_HASH_TABLE_ITER_ERROR - Stop iteration with error. * No action will be taken for the current value and the value before this. * No rolling back. The deleted value before will NOT be back. * aws_hash_table_foreach returns AWS_OP_ERR after stropping the iteration. * * Invoking any method which may change the contents of the hashtable * during iteration results in undefined behavior. However, you may safely * invoke non-mutating operations during an iteration. * * This operation is mutating only if AWS_COMMON_HASH_TABLE_ITER_DELETE * is returned at some point during iteration. Otherwise, it is non-mutating * and is safe to invoke in parallel with other non-mutating operations. */ AWS_COMMON_API int aws_hash_table_foreach( struct aws_hash_table *map, int (*callback)(void *context, struct aws_hash_element *p_element), void *context); /** * Compares two hash tables for equality. Both hash tables must have equivalent * key comparators; values will be compared using the comparator passed into this * function. The key hash function does not need to be equivalent between the * two hash tables. */ AWS_COMMON_API bool aws_hash_table_eq( const struct aws_hash_table *a, const struct aws_hash_table *b, aws_hash_callback_eq_fn *value_eq); /** * Removes every element from the hash map. destroy_fn will be called for * each element. */ AWS_COMMON_API void aws_hash_table_clear(struct aws_hash_table *map); /** * Convenience hash function for NULL-terminated C-strings */ AWS_COMMON_API uint64_t aws_hash_c_string(const void *item); /** * Convenience hash function for struct aws_strings. * Hash is same as used on the string bytes by aws_hash_c_string. */ AWS_COMMON_API uint64_t aws_hash_string(const void *item); /** * Convenience hash function for struct aws_byte_cursor. * Hash is same as used on the string bytes by aws_hash_c_string. */ AWS_COMMON_API uint64_t aws_hash_byte_cursor_ptr(const void *item); /** * Convenience hash function which hashes the pointer value directly, * without dereferencing. This can be used in cases where pointer identity * is desired, or where a uintptr_t is encoded into a const void *. */ AWS_COMMON_API uint64_t aws_hash_ptr(const void *item); AWS_COMMON_API uint64_t aws_hash_combine(uint64_t item1, uint64_t item2); /** * Convenience eq callback for NULL-terminated C-strings */ AWS_COMMON_API bool aws_hash_callback_c_str_eq(const void *a, const void *b); /** * Convenience eq callback for AWS strings */ AWS_COMMON_API bool aws_hash_callback_string_eq(const void *a, const void *b); /** * Convenience destroy callback for AWS strings */ AWS_COMMON_API void aws_hash_callback_string_destroy(void *a); /** * Equality function which compares pointer equality. */ AWS_COMMON_API bool aws_ptr_eq(const void *a, const void *b); /** * Best-effort check of hash_table_state data-structure invariants */ AWS_COMMON_API bool aws_hash_table_is_valid(const struct aws_hash_table *map); /** * Given a pointer to a hash_iter, checks that it is well-formed, with all data-structure invariants. */ AWS_COMMON_API bool aws_hash_iter_is_valid(const struct aws_hash_iter *iter); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_COMMON_HASH_TABLE_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/json.h000066400000000000000000000377461456575232400253700ustar00rootroot00000000000000#ifndef AWS_COMMON_JSON_H #define AWS_COMMON_JSON_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include AWS_PUSH_SANE_WARNING_LEVEL struct aws_json_value; AWS_EXTERN_C_BEGIN // ==================== // Create and pass type /** * Creates a new string aws_json_value with the given string and returns a pointer to it. * * Note: You will need to free the memory for the aws_json_value using aws_json_destroy on the aws_json_value or * on the object/array containing the aws_json_value. * @param string A byte pointer to the string you want to store in the aws_json_value * @param allocator The allocator to use when creating the value * @return A new string aws_json_value */ AWS_COMMON_API struct aws_json_value *aws_json_value_new_string(struct aws_allocator *allocator, struct aws_byte_cursor string); /** * Creates a new number aws_json_value with the given number and returns a pointer to it. * * Note: You will need to free the memory for the aws_json_value using aws_json_destroy on the aws_json_value or * on the object/array containing the aws_json_value. * @param number The number you want to store in the aws_json_value * @param allocator The allocator to use when creating the value * @return A new number aws_json_value */ AWS_COMMON_API struct aws_json_value *aws_json_value_new_number(struct aws_allocator *allocator, double number); /** * Creates a new array aws_json_value and returns a pointer to it. * * Note: You will need to free the memory for the aws_json_value using aws_json_destroy on the aws_json_value or * on the object/array containing the aws_json_value. * Deleting this array will also destroy any aws_json_values it contains. * @param allocator The allocator to use when creating the value * @return A new array aws_json_value */ AWS_COMMON_API struct aws_json_value *aws_json_value_new_array(struct aws_allocator *allocator); /** * Creates a new boolean aws_json_value with the given boolean and returns a pointer to it. * * Note: You will need to free the memory for the aws_json_value using aws_json_destroy on the aws_json_value or * on the object/array containing the aws_json_value. * @param boolean The boolean you want to store in the aws_json_value * @param allocator The allocator to use when creating the value * @return A new boolean aws_json_value */ AWS_COMMON_API struct aws_json_value *aws_json_value_new_boolean(struct aws_allocator *allocator, bool boolean); /** * Creates a new null aws_json_value and returns a pointer to it. * * Note: You will need to free the memory for the aws_json_value using aws_json_destroy on the aws_json_value or * on the object/array containing the aws_json_value. * @param allocator The allocator to use when creating the value * @return A new null aws_json_value */ AWS_COMMON_API struct aws_json_value *aws_json_value_new_null(struct aws_allocator *allocator); /** * Creates a new object aws_json_value and returns a pointer to it. * * Note: You will need to free the memory for the aws_json_value using aws_json_destroy on the aws_json_value or * on the object/array containing the aws_json_value. * Deleting this object will also destroy any aws_json_values it contains. * @param allocator The allocator to use when creating the value * @return A new object aws_json_value */ AWS_COMMON_API struct aws_json_value *aws_json_value_new_object(struct aws_allocator *allocator); // ==================== // ==================== // Value getters /** * Gets the string of a string aws_json_value. * @param value The string aws_json_value. * @param output The string * @return AWS_OP_SUCCESS if the value is a string, otherwise AWS_OP_ERR. */ AWS_COMMON_API int aws_json_value_get_string(const struct aws_json_value *value, struct aws_byte_cursor *output); /** * Gets the number of a number aws_json_value. * @param value The number aws_json_value. * @param output The number * @return AWS_OP_SUCCESS if the value is a number, otherwise AWS_OP_ERR. */ AWS_COMMON_API int aws_json_value_get_number(const struct aws_json_value *value, double *output); /** * Gets the boolean of a boolean aws_json_value. * @param value The boolean aws_json_value. * @param output The boolean * @return AWS_OP_SUCCESS if the value is a boolean, otherwise AWS_OP_ERR. */ AWS_COMMON_API int aws_json_value_get_boolean(const struct aws_json_value *value, bool *output); // ==================== // ==================== // Object API /** * Adds a aws_json_value to a object aws_json_value. * * Note that the aws_json_value will be destroyed when the aws_json_value object is destroyed * by calling "aws_json_destroy()" * @param object The object aws_json_value you want to add a value to. * @param key The key to add the aws_json_value at. * @param value The aws_json_value you want to add. * @return AWS_OP_SUCCESS if adding was successful. * Will return AWS_OP_ERROR if the object passed is invalid or if the passed key * is already in use in the object. */ AWS_COMMON_API int aws_json_value_add_to_object( struct aws_json_value *object, struct aws_byte_cursor key, struct aws_json_value *value); /** * Returns the aws_json_value at the given key. * @param object The object aws_json_value you want to get the value from. * @param key The key that the aws_json_value is at. Is case sensitive. * @return The aws_json_value at the given key, otherwise NULL. */ AWS_COMMON_API struct aws_json_value *aws_json_value_get_from_object(const struct aws_json_value *object, struct aws_byte_cursor key); /** * Checks if there is a aws_json_value at the given key. * @param object The value aws_json_value you want to check a key in. * @param key The key that you want to check. Is case sensitive. * @return True if a aws_json_value is found. */ AWS_COMMON_API bool aws_json_value_has_key(const struct aws_json_value *object, struct aws_byte_cursor key); /** * Removes the aws_json_value at the given key. * @param object The object aws_json_value you want to remove a aws_json_value in. * @param key The key that the aws_json_value is at. Is case sensitive. * @return AWS_OP_SUCCESS if the aws_json_value was removed. * Will return AWS_OP_ERR if the object passed is invalid or if the value * at the key cannot be found. */ AWS_COMMON_API int aws_json_value_remove_from_object(struct aws_json_value *object, struct aws_byte_cursor key); /** * @brief callback for iterating members of an object * Iteration can be controlled as follows: * - return AWS_OP_SUCCESS and out_should_continue is set to true (default value) - * continue iteration without error * - return AWS_OP_SUCCESS and out_continue is set to false - * stop iteration without error * - return AWS_OP_ERR - stop iteration with error */ typedef int(aws_json_on_member_encountered_const_fn)( const struct aws_byte_cursor *key, const struct aws_json_value *value, bool *out_should_continue, void *user_data); /** * @brief iterates through members of the object. * iteration is sequential in order fields were initially parsed. * @param object object to iterate over. * @param on_member callback for when member is encountered. * @param user_data user data to pass back in callback. * @return AWS_OP_SUCCESS when iteration finishes completely or exits early, * AWS_OP_ERR if value is not an object. */ AWS_COMMON_API int aws_json_const_iterate_object( const struct aws_json_value *object, aws_json_on_member_encountered_const_fn *on_member, void *user_data); // ==================== // ==================== // Array API /** * Adds a aws_json_value to the given array aws_json_value. * * Note that the aws_json_value will be destroyed when the aws_json_value array is destroyed * by calling "aws_json_destroy()" * @param array The array aws_json_value you want to add an aws_json_value to. * @param value The aws_json_value you want to add. * @return AWS_OP_SUCCESS if adding the aws_json_value was successful. * Will return AWS_OP_ERR if the array passed is invalid. */ AWS_COMMON_API int aws_json_value_add_array_element(struct aws_json_value *array, const struct aws_json_value *value); /** * Returns the aws_json_value at the given index in the array aws_json_value. * @param array The array aws_json_value. * @param index The index of the aws_json_value you want to access. * @return A pointer to the aws_json_value at the given index in the array, otherwise NULL. */ AWS_COMMON_API struct aws_json_value *aws_json_get_array_element(const struct aws_json_value *array, size_t index); /** * Returns the number of items in the array aws_json_value. * @param array The array aws_json_value. * @return The number of items in the array_json_value. */ AWS_COMMON_API size_t aws_json_get_array_size(const struct aws_json_value *array); /** * Removes the aws_json_value at the given index in the array aws_json_value. * @param array The array aws_json_value. * @param index The index containing the aws_json_value you want to remove. * @return AWS_OP_SUCCESS if the aws_json_value at the index was removed. * Will return AWS_OP_ERR if the array passed is invalid or if the index * passed is out of range. */ AWS_COMMON_API int aws_json_value_remove_array_element(struct aws_json_value *array, size_t index); /** * @brief callback for iterating values of an array. * Iteration can be controlled as follows: * - return AWS_OP_SUCCESS and out_should_continue is set to true (default value) - * continue iteration without error * - return AWS_OP_SUCCESS and out_continue is set to false - * stop iteration without error * - return AWS_OP_ERR - stop iteration with error */ typedef int(aws_json_on_value_encountered_const_fn)( size_t index, const struct aws_json_value *value, bool *out_should_continue, void *user_data); /** * @brief iterates through values of an array. * iteration is sequential starting with 0th element. * @param array array to iterate over. * @param on_value callback for when value is encountered. * @param user_data user data to pass back in callback. * @return AWS_OP_SUCCESS when iteration finishes completely or exits early, * AWS_OP_ERR if value is not an array. */ AWS_COMMON_API int aws_json_const_iterate_array( const struct aws_json_value *array, aws_json_on_value_encountered_const_fn *on_value, void *user_data); // ==================== // ==================== // Checks /** * Checks whether two json values are equivalent. * @param a first value to compare. * @param b second value to compare. * @param is_case_sensitive case sensitive compare or not. * @return True is values are equal, false otherwise */ AWS_COMMON_API bool aws_json_value_compare(const struct aws_json_value *a, const struct aws_json_value *b, bool is_case_sensitive); /** * Duplicates json value. * @param value first value to compare. * @return duplicated value. NULL and last error set if value cannot be duplicated. */ AWS_COMMON_API struct aws_json_value *aws_json_value_duplicate(const struct aws_json_value *value); /** * Checks if the aws_json_value is a string. * @param value The aws_json_value to check. * @return True if the aws_json_value is a string aws_json_value, otherwise false. */ AWS_COMMON_API bool aws_json_value_is_string(const struct aws_json_value *value); /** * Checks if the aws_json_value is a number. * @param value The aws_json_value to check. * @return True if the aws_json_value is a number aws_json_value, otherwise false. */ AWS_COMMON_API bool aws_json_value_is_number(const struct aws_json_value *value); /** * Checks if the aws_json_value is a array. * @param value The aws_json_value to check. * @return True if the aws_json_value is a array aws_json_value, otherwise false. */ AWS_COMMON_API bool aws_json_value_is_array(const struct aws_json_value *value); /** * Checks if the aws_json_value is a boolean. * @param value The aws_json_value to check. * @return True if the aws_json_value is a boolean aws_json_value, otherwise false. */ AWS_COMMON_API bool aws_json_value_is_boolean(const struct aws_json_value *value); /** * Checks if the aws_json_value is a null aws_json_value. * @param value The aws_json_value to check. * @return True if the aws_json_value is a null aws_json_value, otherwise false. */ AWS_COMMON_API bool aws_json_value_is_null(const struct aws_json_value *value); /** * Checks if the aws_json_value is a object aws_json_value. * @param value The aws_json_value to check. * @return True if the aws_json_value is a object aws_json_value, otherwise false. */ AWS_COMMON_API bool aws_json_value_is_object(const struct aws_json_value *value); // ==================== // ==================== // Memory Management /** * Removes the aws_json_value from memory. If the aws_json_value is a object or array, it will also destroy * attached aws_json_values as well. * * For example, if you called "aws_json_array_add(b, a)" to add an object "a" to an array "b", if you call * "aws_json_destroy(b)" then it will also free "a" automatically. All children/attached aws_json_values are freed * when the parent/root aws_json_value is destroyed. * @param value The aws_json_value to destroy. */ AWS_COMMON_API void aws_json_value_destroy(struct aws_json_value *value); // ==================== // ==================== // Utility /** * Appends a unformatted JSON string representation of the aws_json_value into the passed byte buffer. * The byte buffer is expected to be already initialized so the function can append the JSON into it. * * Note: The byte buffer will automatically have its size extended if the JSON string is over the byte * buffer capacity AND the byte buffer has an allocator associated with it. If the byte buffer does not * have an allocator associated and the JSON string is over capacity, AWS_OP_ERR will be returned. * * Note: When you are finished with the aws_byte_buf, you must call "aws_byte_buf_clean_up_secure" to free * the memory used, as it will NOT be called automatically. * @param value The aws_json_value to format. * @param output The destination for the JSON string * @return AWS_OP_SUCCESS if the JSON string was allocated to output without any errors * Will return AWS_OP_ERR if the value passed is not an aws_json_value or if there * was an error appending the JSON into the byte buffer. */ AWS_COMMON_API int aws_byte_buf_append_json_string(const struct aws_json_value *value, struct aws_byte_buf *output); /** * Appends a formatted JSON string representation of the aws_json_value into the passed byte buffer. * The byte buffer is expected to already be initialized so the function can append the JSON into it. * * Note: The byte buffer will automatically have its size extended if the JSON string is over the byte * buffer capacity AND the byte buffer has an allocator associated with it. If the byte buffer does not * have an allocator associated and the JSON string is over capacity, AWS_OP_ERR will be returned. * * Note: When you are finished with the aws_byte_buf, you must call "aws_byte_buf_clean_up_secure" to free * the memory used, as it will NOT be called automatically. * @param value The aws_json_value to format. * @param output The destination for the JSON string * @return AWS_OP_SUCCESS if the JSON string was allocated to output without any errors * Will return AWS_OP_ERR if the value passed is not an aws_json_value or if there * aws an error appending the JSON into the byte buffer. */ AWS_COMMON_API int aws_byte_buf_append_json_string_formatted(const struct aws_json_value *value, struct aws_byte_buf *output); /** * Parses the JSON string and returns a aws_json_value containing the root of the JSON. * @param allocator The allocator used to create the value * @param string The string containing the JSON. * @return The root aws_json_value of the JSON. */ AWS_COMMON_API struct aws_json_value *aws_json_value_new_from_string(struct aws_allocator *allocator, struct aws_byte_cursor string); // ==================== AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif // AWS_COMMON_JSON_H aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/lifo_cache.h000066400000000000000000000016341456575232400264560ustar00rootroot00000000000000#ifndef AWS_COMMON_LIFO_CACHE_H #define AWS_COMMON_LIFO_CACHE_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include AWS_PUSH_SANE_WARNING_LEVEL AWS_EXTERN_C_BEGIN /** * Initializes the last-in-first-out cache. Sets up the underlying linked hash table. * Once `max_items` elements have been added, the latest(last-in) item will * be removed. For the other parameters, see aws/common/hash_table.h. Hash table * semantics of these arguments are preserved. */ AWS_COMMON_API struct aws_cache *aws_cache_new_lifo( struct aws_allocator *allocator, aws_hash_fn *hash_fn, aws_hash_callback_eq_fn *equals_fn, aws_hash_callback_destroy_fn *destroy_key_fn, aws_hash_callback_destroy_fn *destroy_value_fn, size_t max_items); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_COMMON_LIFO_CACHE_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/linked_hash_table.h000066400000000000000000000074551456575232400300310ustar00rootroot00000000000000#ifndef AWS_COMMON_LINKED_HASH_TABLE_H #define AWS_COMMON_LINKED_HASH_TABLE_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include AWS_PUSH_SANE_WARNING_LEVEL /** * Simple linked hash table. Preserves insertion order, and can be iterated in insertion order. * * You can also change the order safely without altering the shape of the underlying hash table. */ struct aws_linked_hash_table { struct aws_allocator *allocator; struct aws_linked_list list; struct aws_hash_table table; aws_hash_callback_destroy_fn *user_on_value_destroy; aws_hash_callback_destroy_fn *user_on_key_destroy; }; /** * Linked-List node stored in the table. This is the node type that will be returned in * aws_linked_hash_table_get_iteration_list(). */ struct aws_linked_hash_table_node { struct aws_linked_list_node node; struct aws_linked_hash_table *table; const void *key; void *value; }; AWS_EXTERN_C_BEGIN /** * Initializes the table. Sets up the underlying hash table and linked list. * For the other parameters, see aws/common/hash_table.h. Hash table * semantics of these arguments are preserved. */ AWS_COMMON_API int aws_linked_hash_table_init( struct aws_linked_hash_table *table, struct aws_allocator *allocator, aws_hash_fn *hash_fn, aws_hash_callback_eq_fn *equals_fn, aws_hash_callback_destroy_fn *destroy_key_fn, aws_hash_callback_destroy_fn *destroy_value_fn, size_t initial_item_count); /** * Cleans up the table. Elements in the table will be evicted and cleanup * callbacks will be invoked. */ AWS_COMMON_API void aws_linked_hash_table_clean_up(struct aws_linked_hash_table *table); /** * Finds element in the table by key. If found, AWS_OP_SUCCESS will be * returned. If not found, AWS_OP_SUCCESS will be returned and *p_value will be * NULL. * * If any errors occur AWS_OP_ERR will be returned. */ AWS_COMMON_API int aws_linked_hash_table_find(struct aws_linked_hash_table *table, const void *key, void **p_value); /** * Finds element in the table by key. If found, AWS_OP_SUCCESS will be returned and the item will be moved to the back * of the list. * If not found, AWS_OP_SUCCESS will be returned and *p_value will be NULL. * * Note: this will change the order of elements */ AWS_COMMON_API int aws_linked_hash_table_find_and_move_to_back(struct aws_linked_hash_table *table, const void *key, void **p_value); /** * Puts `p_value` at `key`. If an element is already stored at `key` it will be replaced. */ AWS_COMMON_API int aws_linked_hash_table_put(struct aws_linked_hash_table *table, const void *key, void *p_value); /** * Removes item at `key` from the table. */ AWS_COMMON_API int aws_linked_hash_table_remove(struct aws_linked_hash_table *table, const void *key); /** * Clears all items from the table. */ AWS_COMMON_API void aws_linked_hash_table_clear(struct aws_linked_hash_table *table); /** * returns number of elements in the table. */ AWS_COMMON_API size_t aws_linked_hash_table_get_element_count(const struct aws_linked_hash_table *table); /** * Move the aws_linked_hash_table_node to the end of the list. * * Note: this will change the order of elements */ AWS_COMMON_API void aws_linked_hash_table_move_node_to_end_of_list( struct aws_linked_hash_table *table, struct aws_linked_hash_table_node *node); /** * returns the underlying linked list for iteration. * * The returned list has nodes of the type: aws_linked_hash_table_node. Use AWS_CONTAINER_OF for access to the element. */ AWS_COMMON_API const struct aws_linked_list *aws_linked_hash_table_get_iteration_list(const struct aws_linked_hash_table *table); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_COMMON_LINKED_HASH_TABLE_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/linked_list.h000066400000000000000000000141351456575232400267030ustar00rootroot00000000000000#ifndef AWS_COMMON_LINKED_LIST_H #define AWS_COMMON_LINKED_LIST_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include AWS_PUSH_SANE_WARNING_LEVEL struct aws_linked_list_node { struct aws_linked_list_node *next; struct aws_linked_list_node *prev; }; struct aws_linked_list { struct aws_linked_list_node head; struct aws_linked_list_node tail; }; AWS_EXTERN_C_BEGIN /** * Set node's next and prev pointers to NULL. */ AWS_STATIC_IMPL void aws_linked_list_node_reset(struct aws_linked_list_node *node); /** * These functions need to be defined first as they are used in pre * and post conditions. */ /** * Tests if the list is empty. */ AWS_STATIC_IMPL bool aws_linked_list_empty(const struct aws_linked_list *list); /** * Checks that a linked list is valid. */ AWS_STATIC_IMPL bool aws_linked_list_is_valid(const struct aws_linked_list *list); /** * Checks that the prev of the next pointer of a node points to the * node. As this checks whether the [next] connection of a node is * bidirectional, it returns false if used for the list tail. */ AWS_STATIC_IMPL bool aws_linked_list_node_next_is_valid(const struct aws_linked_list_node *node); /** * Checks that the next of the prev pointer of a node points to the * node. Similarly to the above, this returns false if used for the * head of a list. */ AWS_STATIC_IMPL bool aws_linked_list_node_prev_is_valid(const struct aws_linked_list_node *node); /** * Checks that a linked list satisfies double linked list connectivity * constraints. This check is O(n) as it traverses the whole linked * list to ensure that tail is reachable from head (and vice versa) * and that every connection is bidirectional. * * Note: This check *cannot* go into an infinite loop, because we * ensure that the connection to the next node is * bidirectional. Therefore, if a node's [a] a.next is a previous node * [b] in the list, b.prev != &a and so this check would fail, thus * terminating the loop. */ AWS_STATIC_IMPL bool aws_linked_list_is_valid_deep(const struct aws_linked_list *list); /** * Initializes the list. List will be empty after this call. */ AWS_STATIC_IMPL void aws_linked_list_init(struct aws_linked_list *list); /** * Returns an iteration pointer for the first element in the list. */ AWS_STATIC_IMPL struct aws_linked_list_node *aws_linked_list_begin(const struct aws_linked_list *list); /** * Returns an iteration pointer for one past the last element in the list. */ AWS_STATIC_IMPL const struct aws_linked_list_node *aws_linked_list_end(const struct aws_linked_list *list); /** * Returns a pointer for the last element in the list. * Used to begin iterating the list in reverse. Ex: * for (i = aws_linked_list_rbegin(list); i != aws_linked_list_rend(list); i = aws_linked_list_prev(i)) {...} */ AWS_STATIC_IMPL struct aws_linked_list_node *aws_linked_list_rbegin(const struct aws_linked_list *list); /** * Returns the pointer to one before the first element in the list. * Used to end iterating the list in reverse. */ AWS_STATIC_IMPL const struct aws_linked_list_node *aws_linked_list_rend(const struct aws_linked_list *list); /** * Returns the next element in the list. */ AWS_STATIC_IMPL struct aws_linked_list_node *aws_linked_list_next(const struct aws_linked_list_node *node); /** * Returns the previous element in the list. */ AWS_STATIC_IMPL struct aws_linked_list_node *aws_linked_list_prev(const struct aws_linked_list_node *node); /** * Inserts to_add immediately after after. */ AWS_STATIC_IMPL void aws_linked_list_insert_after( struct aws_linked_list_node *after, struct aws_linked_list_node *to_add); /** * Swaps the order two nodes in the linked list. */ AWS_STATIC_IMPL void aws_linked_list_swap_nodes(struct aws_linked_list_node *a, struct aws_linked_list_node *b); /** * Inserts to_add immediately before before. */ AWS_STATIC_IMPL void aws_linked_list_insert_before( struct aws_linked_list_node *before, struct aws_linked_list_node *to_add); /** * Removes the specified node from the list (prev/next point to each other) and * returns the next node in the list. */ AWS_STATIC_IMPL void aws_linked_list_remove(struct aws_linked_list_node *node); /** * Append new_node. */ AWS_STATIC_IMPL void aws_linked_list_push_back(struct aws_linked_list *list, struct aws_linked_list_node *node); /** * Returns the element in the back of the list. */ AWS_STATIC_IMPL struct aws_linked_list_node *aws_linked_list_back(const struct aws_linked_list *list); /** * Returns the element in the back of the list and removes it */ AWS_STATIC_IMPL struct aws_linked_list_node *aws_linked_list_pop_back(struct aws_linked_list *list); /** * Prepend new_node. */ AWS_STATIC_IMPL void aws_linked_list_push_front(struct aws_linked_list *list, struct aws_linked_list_node *node); /** * Returns the element in the front of the list. */ AWS_STATIC_IMPL struct aws_linked_list_node *aws_linked_list_front(const struct aws_linked_list *list); /** * Returns the element in the front of the list and removes it */ AWS_STATIC_IMPL struct aws_linked_list_node *aws_linked_list_pop_front(struct aws_linked_list *list); AWS_STATIC_IMPL void aws_linked_list_swap_contents( struct aws_linked_list *AWS_RESTRICT a, struct aws_linked_list *AWS_RESTRICT b); /** * Remove all nodes from one list, and add them to the back of another. * * Example: if dst={1,2} and src={3,4}, they become dst={1,2,3,4} and src={} */ AWS_STATIC_IMPL void aws_linked_list_move_all_back( struct aws_linked_list *AWS_RESTRICT dst, struct aws_linked_list *AWS_RESTRICT src); /** * Remove all nodes from one list, and add them to the front of another. * * Example: if dst={2,1} and src={4,3}, they become dst={4,3,2,1} and src={} */ AWS_STATIC_IMPL void aws_linked_list_move_all_front( struct aws_linked_list *AWS_RESTRICT dst, struct aws_linked_list *AWS_RESTRICT src); #ifndef AWS_NO_STATIC_IMPL # include #endif /* AWS_NO_STATIC_IMPL */ AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_COMMON_LINKED_LIST_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/linked_list.inl000066400000000000000000000357721456575232400272500ustar00rootroot00000000000000#ifndef AWS_COMMON_LINKED_LIST_INL #define AWS_COMMON_LINKED_LIST_INL /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include AWS_EXTERN_C_BEGIN /** * Set node's next and prev pointers to NULL. */ AWS_STATIC_IMPL void aws_linked_list_node_reset(struct aws_linked_list_node *node) { AWS_PRECONDITION(node != NULL); AWS_ZERO_STRUCT(*node); AWS_POSTCONDITION(AWS_IS_ZEROED(*node)); } /** * These functions need to be defined first as they are used in pre * and post conditions. */ /** * Tests if the list is empty. */ AWS_STATIC_IMPL bool aws_linked_list_empty(const struct aws_linked_list *list) { AWS_PRECONDITION(list); return list->head.next == &list->tail; } /** * Checks that a linked list is valid. */ AWS_STATIC_IMPL bool aws_linked_list_is_valid(const struct aws_linked_list *list) { if (list && list->head.next && list->head.prev == NULL && list->tail.prev && list->tail.next == NULL) { #if defined(AWS_DEEP_CHECKS) && (AWS_DEEP_CHECKS == 1) return aws_linked_list_is_valid_deep(list); #else return true; #endif } return false; } /** * Checks that the prev of the next pointer of a node points to the * node. As this checks whether the [next] connection of a node is * bidirectional, it returns false if used for the list tail. */ AWS_STATIC_IMPL bool aws_linked_list_node_next_is_valid(const struct aws_linked_list_node *node) { return node && node->next && node->next->prev == node; } /** * Checks that the next of the prev pointer of a node points to the * node. Similarly to the above, this returns false if used for the * head of a list. */ AWS_STATIC_IMPL bool aws_linked_list_node_prev_is_valid(const struct aws_linked_list_node *node) { return node && node->prev && node->prev->next == node; } /** * Checks that a linked list satisfies double linked list connectivity * constraints. This check is O(n) as it traverses the whole linked * list to ensure that tail is reachable from head (and vice versa) * and that every connection is bidirectional. * * Note: This check *cannot* go into an infinite loop, because we * ensure that the connection to the next node is * bidirectional. Therefore, if a node's [a] a.next is a previous node * [b] in the list, b.prev != &a and so this check would fail, thus * terminating the loop. */ AWS_STATIC_IMPL bool aws_linked_list_is_valid_deep(const struct aws_linked_list *list) { if (!list) { return false; } /* This could go into an infinite loop for a circular list */ const struct aws_linked_list_node *temp = &list->head; /* Head must reach tail by following next pointers */ bool head_reaches_tail = false; /* By satisfying the above and that edges are bidirectional, we * also guarantee that tail reaches head by following prev * pointers */ while (temp) { if (temp == &list->tail) { head_reaches_tail = true; break; } else if (!aws_linked_list_node_next_is_valid(temp)) { /* Next and prev pointers should connect the same nodes */ return false; } temp = temp->next; } return head_reaches_tail; } /** * Initializes the list. List will be empty after this call. */ AWS_STATIC_IMPL void aws_linked_list_init(struct aws_linked_list *list) { AWS_PRECONDITION(list); list->head.next = &list->tail; list->head.prev = NULL; list->tail.prev = &list->head; list->tail.next = NULL; AWS_POSTCONDITION(aws_linked_list_is_valid(list)); AWS_POSTCONDITION(aws_linked_list_empty(list)); } /** * Returns an iteration pointer for the first element in the list. */ AWS_STATIC_IMPL struct aws_linked_list_node *aws_linked_list_begin(const struct aws_linked_list *list) { AWS_PRECONDITION(aws_linked_list_is_valid(list)); struct aws_linked_list_node *rval = list->head.next; AWS_POSTCONDITION(aws_linked_list_is_valid(list)); AWS_POSTCONDITION(rval == list->head.next); return rval; } /** * Returns an iteration pointer for one past the last element in the list. */ AWS_STATIC_IMPL const struct aws_linked_list_node *aws_linked_list_end(const struct aws_linked_list *list) { AWS_PRECONDITION(aws_linked_list_is_valid(list)); const struct aws_linked_list_node *rval = &list->tail; AWS_POSTCONDITION(aws_linked_list_is_valid(list)); AWS_POSTCONDITION(rval == &list->tail); return rval; } /** * Returns a pointer for the last element in the list. * Used to begin iterating the list in reverse. Ex: * for (i = aws_linked_list_rbegin(list); i != aws_linked_list_rend(list); i = aws_linked_list_prev(i)) {...} */ AWS_STATIC_IMPL struct aws_linked_list_node *aws_linked_list_rbegin(const struct aws_linked_list *list) { AWS_PRECONDITION(aws_linked_list_is_valid(list)); struct aws_linked_list_node *rval = list->tail.prev; AWS_POSTCONDITION(aws_linked_list_is_valid(list)); AWS_POSTCONDITION(rval == list->tail.prev); return rval; } /** * Returns the pointer to one before the first element in the list. * Used to end iterating the list in reverse. */ AWS_STATIC_IMPL const struct aws_linked_list_node *aws_linked_list_rend(const struct aws_linked_list *list) { AWS_PRECONDITION(aws_linked_list_is_valid(list)); const struct aws_linked_list_node *rval = &list->head; AWS_POSTCONDITION(aws_linked_list_is_valid(list)); AWS_POSTCONDITION(rval == &list->head); return rval; } /** * Returns the next element in the list. */ AWS_STATIC_IMPL struct aws_linked_list_node *aws_linked_list_next(const struct aws_linked_list_node *node) { AWS_PRECONDITION(aws_linked_list_node_next_is_valid(node)); struct aws_linked_list_node *rval = node->next; AWS_POSTCONDITION(aws_linked_list_node_next_is_valid(node)); AWS_POSTCONDITION(aws_linked_list_node_prev_is_valid(rval)); AWS_POSTCONDITION(rval == node->next); return rval; } /** * Returns the previous element in the list. */ AWS_STATIC_IMPL struct aws_linked_list_node *aws_linked_list_prev(const struct aws_linked_list_node *node) { AWS_PRECONDITION(aws_linked_list_node_prev_is_valid(node)); struct aws_linked_list_node *rval = node->prev; AWS_POSTCONDITION(aws_linked_list_node_prev_is_valid(node)); AWS_POSTCONDITION(aws_linked_list_node_next_is_valid(rval)); AWS_POSTCONDITION(rval == node->prev); return rval; } /** * Inserts to_add immediately after after. */ AWS_STATIC_IMPL void aws_linked_list_insert_after( struct aws_linked_list_node *after, struct aws_linked_list_node *to_add) { AWS_PRECONDITION(aws_linked_list_node_next_is_valid(after)); AWS_PRECONDITION(to_add != NULL); to_add->prev = after; to_add->next = after->next; after->next->prev = to_add; after->next = to_add; AWS_POSTCONDITION(aws_linked_list_node_next_is_valid(after)); AWS_POSTCONDITION(aws_linked_list_node_prev_is_valid(to_add)); AWS_POSTCONDITION(aws_linked_list_node_next_is_valid(to_add)); AWS_POSTCONDITION(after->next == to_add); } /** * Swaps the order two nodes in the linked list. */ AWS_STATIC_IMPL void aws_linked_list_swap_nodes(struct aws_linked_list_node *a, struct aws_linked_list_node *b) { AWS_PRECONDITION(aws_linked_list_node_prev_is_valid(a)); AWS_PRECONDITION(aws_linked_list_node_next_is_valid(a)); AWS_PRECONDITION(aws_linked_list_node_prev_is_valid(b)); AWS_PRECONDITION(aws_linked_list_node_next_is_valid(b)); if (a == b) { return; } /* snapshot b's value to avoid clobbering its next/prev pointers if a/b are adjacent */ struct aws_linked_list_node tmp = *b; a->prev->next = b; a->next->prev = b; tmp.prev->next = a; tmp.next->prev = a; tmp = *a; *a = *b; *b = tmp; AWS_POSTCONDITION(aws_linked_list_node_prev_is_valid(a)); AWS_POSTCONDITION(aws_linked_list_node_next_is_valid(a)); AWS_POSTCONDITION(aws_linked_list_node_prev_is_valid(b)); AWS_POSTCONDITION(aws_linked_list_node_next_is_valid(b)); } /** * Inserts to_add immediately before before. */ AWS_STATIC_IMPL void aws_linked_list_insert_before( struct aws_linked_list_node *before, struct aws_linked_list_node *to_add) { AWS_PRECONDITION(aws_linked_list_node_prev_is_valid(before)); AWS_PRECONDITION(to_add != NULL); to_add->next = before; to_add->prev = before->prev; before->prev->next = to_add; before->prev = to_add; AWS_POSTCONDITION(aws_linked_list_node_prev_is_valid(before)); AWS_POSTCONDITION(aws_linked_list_node_prev_is_valid(to_add)); AWS_POSTCONDITION(aws_linked_list_node_next_is_valid(to_add)); AWS_POSTCONDITION(before->prev == to_add); } /** * Removes the specified node from the list (prev/next point to each other) and * returns the next node in the list. */ AWS_STATIC_IMPL void aws_linked_list_remove(struct aws_linked_list_node *node) { AWS_PRECONDITION(aws_linked_list_node_prev_is_valid(node)); AWS_PRECONDITION(aws_linked_list_node_next_is_valid(node)); node->prev->next = node->next; node->next->prev = node->prev; aws_linked_list_node_reset(node); AWS_POSTCONDITION(node->next == NULL && node->prev == NULL); } /** * Append new_node. */ AWS_STATIC_IMPL void aws_linked_list_push_back(struct aws_linked_list *list, struct aws_linked_list_node *node) { AWS_PRECONDITION(aws_linked_list_is_valid(list)); AWS_PRECONDITION(node != NULL); aws_linked_list_insert_before(&list->tail, node); AWS_POSTCONDITION(aws_linked_list_is_valid(list)); AWS_POSTCONDITION(list->tail.prev == node, "[node] is the new last element of [list]"); } /** * Returns the element in the back of the list. */ AWS_STATIC_IMPL struct aws_linked_list_node *aws_linked_list_back(const struct aws_linked_list *list) { AWS_PRECONDITION(aws_linked_list_is_valid(list)); AWS_PRECONDITION(!aws_linked_list_empty(list)); struct aws_linked_list_node *rval = list->tail.prev; AWS_POSTCONDITION(aws_linked_list_is_valid(list)); AWS_POSTCONDITION(aws_linked_list_node_prev_is_valid(rval)); AWS_POSTCONDITION(aws_linked_list_node_next_is_valid(rval)); return rval; } /** * Returns the element in the back of the list and removes it */ AWS_STATIC_IMPL struct aws_linked_list_node *aws_linked_list_pop_back(struct aws_linked_list *list) { AWS_PRECONDITION(!aws_linked_list_empty(list)); AWS_PRECONDITION(aws_linked_list_is_valid(list)); struct aws_linked_list_node *back = aws_linked_list_back(list); aws_linked_list_remove(back); AWS_POSTCONDITION(back->next == NULL && back->prev == NULL); AWS_POSTCONDITION(aws_linked_list_is_valid(list)); return back; } /** * Prepend new_node. */ AWS_STATIC_IMPL void aws_linked_list_push_front(struct aws_linked_list *list, struct aws_linked_list_node *node) { AWS_PRECONDITION(aws_linked_list_is_valid(list)); AWS_PRECONDITION(node != NULL); aws_linked_list_insert_before(list->head.next, node); AWS_POSTCONDITION(aws_linked_list_is_valid(list)); AWS_POSTCONDITION(list->head.next == node, "[node] is the new first element of [list]"); } /** * Returns the element in the front of the list. */ AWS_STATIC_IMPL struct aws_linked_list_node *aws_linked_list_front(const struct aws_linked_list *list) { AWS_PRECONDITION(aws_linked_list_is_valid(list)); AWS_PRECONDITION(!aws_linked_list_empty(list)); struct aws_linked_list_node *rval = list->head.next; AWS_POSTCONDITION(aws_linked_list_is_valid(list)); AWS_POSTCONDITION(aws_linked_list_node_prev_is_valid(rval)); AWS_POSTCONDITION(aws_linked_list_node_next_is_valid(rval)); return rval; } /** * Returns the element in the front of the list and removes it */ AWS_STATIC_IMPL struct aws_linked_list_node *aws_linked_list_pop_front(struct aws_linked_list *list) { AWS_PRECONDITION(!aws_linked_list_empty(list)); AWS_PRECONDITION(aws_linked_list_is_valid(list)); struct aws_linked_list_node *front = aws_linked_list_front(list); aws_linked_list_remove(front); AWS_POSTCONDITION(front->next == NULL && front->prev == NULL); AWS_POSTCONDITION(aws_linked_list_is_valid(list)); return front; } AWS_STATIC_IMPL void aws_linked_list_swap_contents( struct aws_linked_list *AWS_RESTRICT a, struct aws_linked_list *AWS_RESTRICT b) { AWS_PRECONDITION(aws_linked_list_is_valid(a)); AWS_PRECONDITION(aws_linked_list_is_valid(b)); AWS_PRECONDITION(a != b); struct aws_linked_list_node *a_first = a->head.next; struct aws_linked_list_node *a_last = a->tail.prev; /* Move B's contents into A */ if (aws_linked_list_empty(b)) { aws_linked_list_init(a); } else { a->head.next = b->head.next; a->head.next->prev = &a->head; a->tail.prev = b->tail.prev; a->tail.prev->next = &a->tail; } /* Move A's old contents into B */ if (a_first == &a->tail) { aws_linked_list_init(b); } else { b->head.next = a_first; b->head.next->prev = &b->head; b->tail.prev = a_last; b->tail.prev->next = &b->tail; } AWS_POSTCONDITION(aws_linked_list_is_valid(a)); AWS_POSTCONDITION(aws_linked_list_is_valid(b)); } AWS_STATIC_IMPL void aws_linked_list_move_all_back( struct aws_linked_list *AWS_RESTRICT dst, struct aws_linked_list *AWS_RESTRICT src) { AWS_PRECONDITION(aws_linked_list_is_valid(src)); AWS_PRECONDITION(aws_linked_list_is_valid(dst)); AWS_PRECONDITION(dst != src); if (!aws_linked_list_empty(src)) { /* splice src nodes into dst, between the back and tail nodes */ struct aws_linked_list_node *dst_back = dst->tail.prev; struct aws_linked_list_node *src_front = src->head.next; struct aws_linked_list_node *src_back = src->tail.prev; dst_back->next = src_front; src_front->prev = dst_back; dst->tail.prev = src_back; src_back->next = &dst->tail; /* reset src */ src->head.next = &src->tail; src->tail.prev = &src->head; } AWS_POSTCONDITION(aws_linked_list_is_valid(src)); AWS_POSTCONDITION(aws_linked_list_is_valid(dst)); } AWS_STATIC_IMPL void aws_linked_list_move_all_front( struct aws_linked_list *AWS_RESTRICT dst, struct aws_linked_list *AWS_RESTRICT src) { AWS_PRECONDITION(aws_linked_list_is_valid(src)); AWS_PRECONDITION(aws_linked_list_is_valid(dst)); AWS_PRECONDITION(dst != src); if (!aws_linked_list_empty(src)) { /* splice src nodes into dst, between the head and front nodes */ struct aws_linked_list_node *dst_front = dst->head.next; struct aws_linked_list_node *src_front = src->head.next; struct aws_linked_list_node *src_back = src->tail.prev; dst->head.next = src_front; src_front->prev = &dst->head; src_back->next = dst_front; dst_front->prev = src_back; /* reset src */ src->head.next = &src->tail; src->tail.prev = &src->head; } AWS_POSTCONDITION(aws_linked_list_is_valid(src)); AWS_POSTCONDITION(aws_linked_list_is_valid(dst)); } AWS_EXTERN_C_END #endif /* AWS_COMMON_LINKED_LIST_INL */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/log_channel.h000066400000000000000000000036061456575232400266540ustar00rootroot00000000000000 #ifndef AWS_COMMON_LOG_CHANNEL_H #define AWS_COMMON_LOG_CHANNEL_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include AWS_PUSH_SANE_WARNING_LEVEL struct aws_string; struct aws_log_writer; /* * Log channel interface and default implementations * * A log channel is an abstraction for the transfer of formatted log data between a source (formatter) * and a sink (writer). */ struct aws_log_channel; typedef int(aws_log_channel_send_fn)(struct aws_log_channel *channel, struct aws_string *output); typedef void(aws_log_channel_clean_up_fn)(struct aws_log_channel *channel); struct aws_log_channel_vtable { aws_log_channel_send_fn *send; aws_log_channel_clean_up_fn *clean_up; }; struct aws_log_channel { struct aws_log_channel_vtable *vtable; struct aws_allocator *allocator; struct aws_log_writer *writer; void *impl; }; AWS_EXTERN_C_BEGIN /* * Simple channel that results in log lines being written in the same thread they were generated in. * * The passed in log writer is not an ownership transfer. The log channel does not clean up the writer. */ AWS_COMMON_API int aws_log_channel_init_foreground( struct aws_log_channel *channel, struct aws_allocator *allocator, struct aws_log_writer *writer); /* * Simple channel that sends log lines to a background thread. * * The passed in log writer is not an ownership transfer. The log channel does not clean up the writer. */ AWS_COMMON_API int aws_log_channel_init_background( struct aws_log_channel *channel, struct aws_allocator *allocator, struct aws_log_writer *writer); /* * Channel cleanup function */ AWS_COMMON_API void aws_log_channel_clean_up(struct aws_log_channel *channel); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_COMMON_LOG_CHANNEL_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/log_formatter.h000066400000000000000000000050211456575232400272400ustar00rootroot00000000000000 #ifndef AWS_COMMON_LOG_FORMATTER_H #define AWS_COMMON_LOG_FORMATTER_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include AWS_PUSH_SANE_WARNING_LEVEL struct aws_allocator; struct aws_string; /* * Log formatter interface and default implementation * * Log formatters are invoked by the LOGF_* macros to transform a set of arguments into * one or more lines of text to be output to a logging sink (writer). */ struct aws_log_formatter; typedef int(aws_log_formatter_format_fn)( struct aws_log_formatter *formatter, struct aws_string **formatted_output, enum aws_log_level level, aws_log_subject_t subject, const char *format, va_list args); typedef void(aws_log_formatter_clean_up_fn)(struct aws_log_formatter *logger); struct aws_log_formatter_vtable { aws_log_formatter_format_fn *format; aws_log_formatter_clean_up_fn *clean_up; }; struct aws_log_formatter { struct aws_log_formatter_vtable *vtable; struct aws_allocator *allocator; void *impl; }; struct aws_log_formatter_standard_options { enum aws_date_format date_format; }; struct aws_logging_standard_formatting_data { char *log_line_buffer; size_t total_length; enum aws_log_level level; const char *subject_name; const char *format; enum aws_date_format date_format; struct aws_allocator *allocator; /* not used, just there to make byte_bufs valid */ size_t amount_written; }; AWS_EXTERN_C_BEGIN /* * Initializes the default log formatter which outputs lines in the format: * * [] [] [] - \n */ AWS_COMMON_API int aws_log_formatter_init_default( struct aws_log_formatter *formatter, struct aws_allocator *allocator, struct aws_log_formatter_standard_options *options); /* * Cleans up a log formatter (minus the base structure memory) by calling the formatter's clean_up function * via the vtable. */ AWS_COMMON_API void aws_log_formatter_clean_up(struct aws_log_formatter *formatter); /* * Formats a single log line based on the input + the var args list. Output is written to a fixed-size * buffer supplied in the data struct. */ AWS_COMMON_API int aws_format_standard_log_line(struct aws_logging_standard_formatting_data *formatting_data, va_list args); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_COMMON_LOG_FORMATTER_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/log_writer.h000066400000000000000000000036341456575232400265610ustar00rootroot00000000000000 #ifndef AWS_COMMON_LOG_WRITER_H #define AWS_COMMON_LOG_WRITER_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include AWS_PUSH_SANE_WARNING_LEVEL struct aws_allocator; struct aws_string; /* * Log writer interface and default implementation(s) * * A log writer functions as a sink for formatted log lines. We provide * default implementations that go to stdout, stderr, and a specified file. */ struct aws_log_writer; typedef int(aws_log_writer_write_fn)(struct aws_log_writer *writer, const struct aws_string *output); typedef void(aws_log_writer_clean_up_fn)(struct aws_log_writer *writer); struct aws_log_writer_vtable { aws_log_writer_write_fn *write; aws_log_writer_clean_up_fn *clean_up; }; struct aws_log_writer { struct aws_log_writer_vtable *vtable; struct aws_allocator *allocator; void *impl; }; struct aws_log_writer_file_options { const char *filename; FILE *file; }; AWS_EXTERN_C_BEGIN /* * Initialize a log writer that sends log lines to stdout. Uses C library IO. */ AWS_COMMON_API int aws_log_writer_init_stdout(struct aws_log_writer *writer, struct aws_allocator *allocator); /* * Initialize a log writer that sends log lines to stderr. Uses C library IO. */ AWS_COMMON_API int aws_log_writer_init_stderr(struct aws_log_writer *writer, struct aws_allocator *allocator); /* * Initialize a log writer that sends log lines to a file. Uses C library IO. */ AWS_COMMON_API int aws_log_writer_init_file( struct aws_log_writer *writer, struct aws_allocator *allocator, struct aws_log_writer_file_options *options); /* * Frees all resources used by a log writer with the exception of the base structure memory */ AWS_COMMON_API void aws_log_writer_clean_up(struct aws_log_writer *writer); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_COMMON_LOG_WRITER_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/logging.h000066400000000000000000000315301456575232400260260ustar00rootroot00000000000000#ifndef AWS_COMMON_LOGGING_H #define AWS_COMMON_LOGGING_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include AWS_PUSH_SANE_WARNING_LEVEL #define AWS_LOG_LEVEL_NONE 0 #define AWS_LOG_LEVEL_FATAL 1 #define AWS_LOG_LEVEL_ERROR 2 #define AWS_LOG_LEVEL_WARN 3 #define AWS_LOG_LEVEL_INFO 4 #define AWS_LOG_LEVEL_DEBUG 5 #define AWS_LOG_LEVEL_TRACE 6 /** * Controls what log calls pass through the logger and what log calls get filtered out. * If a log level has a value of X, then all log calls using a level <= X will appear, while * those using a value > X will not occur. * * You can filter both dynamically (by setting the log level on the logger object) or statically * (by defining AWS_STATIC_LOG_LEVEL to be an appropriate integer module-wide). Statically filtered * log calls will be completely compiled out but require a rebuild if you want to get more detail * about what's happening. */ enum aws_log_level { AWS_LL_NONE = AWS_LOG_LEVEL_NONE, AWS_LL_FATAL = AWS_LOG_LEVEL_FATAL, AWS_LL_ERROR = AWS_LOG_LEVEL_ERROR, AWS_LL_WARN = AWS_LOG_LEVEL_WARN, AWS_LL_INFO = AWS_LOG_LEVEL_INFO, AWS_LL_DEBUG = AWS_LOG_LEVEL_DEBUG, AWS_LL_TRACE = AWS_LOG_LEVEL_TRACE, AWS_LL_COUNT }; /** * Log subject is a way of designating the topic of logging. * * The general idea is to support a finer-grained approach to log level control. The primary use case * is for situations that require more detailed logging within a specific domain, where enabling that detail * globally leads to an untenable flood of information. * * For example, enable TRACE logging for tls-related log statements (handshake binary payloads), but * only WARN logging everywhere else (because http payloads would blow up the log files). * * Log subject is an enum similar to aws error: each library has its own value-space and someone is * responsible for registering the value <-> string connections. */ typedef uint32_t aws_log_subject_t; /* Each library gets space for 2^^10 log subject entries */ enum { AWS_LOG_SUBJECT_STRIDE_BITS = 10, }; #define AWS_LOG_SUBJECT_STRIDE (1U << AWS_LOG_SUBJECT_STRIDE_BITS) #define AWS_LOG_SUBJECT_BEGIN_RANGE(x) ((x)*AWS_LOG_SUBJECT_STRIDE) #define AWS_LOG_SUBJECT_END_RANGE(x) (((x) + 1) * AWS_LOG_SUBJECT_STRIDE - 1) struct aws_log_subject_info { aws_log_subject_t subject_id; const char *subject_name; const char *subject_description; }; #define DEFINE_LOG_SUBJECT_INFO(id, name, desc) \ { .subject_id = (id), .subject_name = (name), .subject_description = (desc) } struct aws_log_subject_info_list { struct aws_log_subject_info *subject_list; size_t count; }; enum aws_common_log_subject { AWS_LS_COMMON_GENERAL = AWS_LOG_SUBJECT_BEGIN_RANGE(AWS_C_COMMON_PACKAGE_ID), AWS_LS_COMMON_TASK_SCHEDULER, AWS_LS_COMMON_THREAD, AWS_LS_COMMON_MEMTRACE, AWS_LS_COMMON_XML_PARSER, AWS_LS_COMMON_IO, AWS_LS_COMMON_BUS, AWS_LS_COMMON_TEST, AWS_LS_COMMON_JSON_PARSER, AWS_LS_COMMON_LAST = AWS_LOG_SUBJECT_END_RANGE(AWS_C_COMMON_PACKAGE_ID) }; struct aws_logger; struct aws_log_formatter; struct aws_log_channel; struct aws_log_writer; #ifdef _MSC_VER # pragma warning(push) # pragma warning(disable : 4623) /* default constructor was implicitly defined as deleted */ # pragma warning(disable : 4626) /* assignment operator was implicitly defined as deleted */ # pragma warning(disable : 5027) /* move assignment operator was implicitly defined as deleted */ #endif /** * We separate the log level function from the log call itself so that we can do the filter check in the macros (see * below) * * By doing so, we make it so that the variadic format arguments are not even evaluated if the filter check does not * succeed. */ struct aws_logger_vtable { int (*const log)( struct aws_logger *logger, enum aws_log_level log_level, aws_log_subject_t subject, const char *format, ...) #if defined(__clang__) || defined(__GNUC__) || defined(__GNUG__) __attribute__((format(printf, 4, 5))) #endif /* non-ms compilers: TODO - find out what versions format support was added in */ ; enum aws_log_level (*const get_log_level)(struct aws_logger *logger, aws_log_subject_t subject); void (*const clean_up)(struct aws_logger *logger); int (*set_log_level)(struct aws_logger *logger, enum aws_log_level); }; #ifdef _MSC_VER # pragma warning(pop) #endif struct aws_logger { struct aws_logger_vtable *vtable; struct aws_allocator *allocator; void *p_impl; }; /** * The base formatted logging macro that all other formatted logging macros resolve to. * Checks for a logger and filters based on log level. */ #define AWS_LOGF(log_level, subject, ...) \ do { \ AWS_ASSERT(log_level > 0); \ struct aws_logger *logger = aws_logger_get(); \ if (logger != NULL && logger->vtable->get_log_level(logger, (subject)) >= (log_level)) { \ logger->vtable->log(logger, log_level, subject, __VA_ARGS__); \ } \ } while (0) /** * Unconditional logging macro that takes a logger and does not do a level check or a null check. Intended for * situations when you need to log many things and do a single manual level check before beginning. */ #define AWS_LOGUF(logger, log_level, subject, ...) \ { logger->vtable->log(logger, log_level, subject, __VA_ARGS__); } /** * LOGF_ variants for each level. These are what should be used directly to do all logging. * * i.e. * * LOGF_FATAL("Device \"%s\" not found", device->name); * * * Later we will likely expose Subject-aware variants */ #if !defined(AWS_STATIC_LOG_LEVEL) || (AWS_STATIC_LOG_LEVEL >= AWS_LOG_LEVEL_FATAL) # define AWS_LOGF_FATAL(subject, ...) AWS_LOGF(AWS_LL_FATAL, subject, __VA_ARGS__) #else # define AWS_LOGF_FATAL(subject, ...) #endif #if !defined(AWS_STATIC_LOG_LEVEL) || (AWS_STATIC_LOG_LEVEL >= AWS_LOG_LEVEL_ERROR) # define AWS_LOGF_ERROR(subject, ...) AWS_LOGF(AWS_LL_ERROR, subject, __VA_ARGS__) #else # define AWS_LOGF_ERROR(subject, ...) #endif #if !defined(AWS_STATIC_LOG_LEVEL) || (AWS_STATIC_LOG_LEVEL >= AWS_LOG_LEVEL_WARN) # define AWS_LOGF_WARN(subject, ...) AWS_LOGF(AWS_LL_WARN, subject, __VA_ARGS__) #else # define AWS_LOGF_WARN(subject, ...) #endif #if !defined(AWS_STATIC_LOG_LEVEL) || (AWS_STATIC_LOG_LEVEL >= AWS_LOG_LEVEL_INFO) # define AWS_LOGF_INFO(subject, ...) AWS_LOGF(AWS_LL_INFO, subject, __VA_ARGS__) #else # define AWS_LOGF_INFO(subject, ...) #endif #if !defined(AWS_STATIC_LOG_LEVEL) || (AWS_STATIC_LOG_LEVEL >= AWS_LOG_LEVEL_DEBUG) # define AWS_LOGF_DEBUG(subject, ...) AWS_LOGF(AWS_LL_DEBUG, subject, __VA_ARGS__) #else # define AWS_LOGF_DEBUG(subject, ...) #endif #if !defined(AWS_STATIC_LOG_LEVEL) || (AWS_STATIC_LOG_LEVEL >= AWS_LOG_LEVEL_TRACE) # define AWS_LOGF_TRACE(subject, ...) AWS_LOGF(AWS_LL_TRACE, subject, __VA_ARGS__) #else # define AWS_LOGF_TRACE(subject, ...) #endif /* * Standard logger implementation composing three sub-components: * * The formatter takes var args input from the user and produces a formatted log line * The writer takes a formatted log line and outputs it somewhere * The channel is the transport between the two */ struct aws_logger_pipeline { struct aws_log_formatter *formatter; struct aws_log_channel *channel; struct aws_log_writer *writer; struct aws_allocator *allocator; struct aws_atomic_var level; }; /** * Options for aws_logger_init_standard(). * Set `filename` to open a file for logging and close it when the logger cleans up. * Set `file` to use a file that is already open, such as `stderr` or `stdout`. */ struct aws_logger_standard_options { enum aws_log_level level; const char *filename; FILE *file; }; AWS_EXTERN_C_BEGIN /** * Sets the aws logger used globally across the process. Not thread-safe. Must only be called once. */ AWS_COMMON_API void aws_logger_set(struct aws_logger *logger); /** * Gets the aws logger used globally across the process. */ AWS_COMMON_API struct aws_logger *aws_logger_get(void); /** * Gets the aws logger used globally across the process if the logging level is at least the inputted level. * * @param subject log subject to perform the level check versus, not currently used * @param level logging level to check against in order to return the logger * @return the current logger if the current logging level is at or more detailed then the supplied logging level */ AWS_COMMON_API struct aws_logger *aws_logger_get_conditional(aws_log_subject_t subject, enum aws_log_level level); /** * Cleans up all resources used by the logger; simply invokes the clean_up v-function */ AWS_COMMON_API void aws_logger_clean_up(struct aws_logger *logger); /** * Sets the current logging level for the logger. Loggers are not require to support this. * @param logger logger to set the log level for * @param level new log level for the logger * @return AWS_OP_SUCCESS if the level was successfully set, AWS_OP_ERR otherwise */ AWS_COMMON_API int aws_logger_set_log_level(struct aws_logger *logger, enum aws_log_level level); /** * Converts a log level to a c-string constant. Intended primarily to support building log lines that * include the level in them, i.e. * * [ERROR] 10:34:54.642 01-31-19 - Json parse error.... */ AWS_COMMON_API int aws_log_level_to_string(enum aws_log_level log_level, const char **level_string); /** * Converts a c-string constant to a log level value. Uses case-insensitive comparison * and simply iterates all possibilities until a match or nothing remains. If no match * is found, AWS_OP_ERR is returned. */ AWS_COMMON_API int aws_string_to_log_level(const char *level_string, enum aws_log_level *log_level); /** * Converts an aws_thread_id_t to a c-string. For portability, aws_thread_id_t * must not be printed directly. Intended primarily to support building log * lines that include the thread id in them. The parameter `buffer` must * point-to a char buffer of length `bufsz == AWS_THREAD_ID_T_REPR_BUFSZ`. The * thread id representation is returned in `buffer`. */ AWS_COMMON_API int aws_thread_id_t_to_string(aws_thread_id_t thread_id, char *buffer, size_t bufsz); /** * Get subject name from log subject. */ AWS_COMMON_API const char *aws_log_subject_name(aws_log_subject_t subject); /** * Connects log subject strings with log subject integer values */ AWS_COMMON_API void aws_register_log_subject_info_list(struct aws_log_subject_info_list *log_subject_list); /** * Disconnects log subject strings with log subject integer values */ AWS_COMMON_API void aws_unregister_log_subject_info_list(struct aws_log_subject_info_list *log_subject_list); /* * Initializes a pipeline logger that is built from the default formatter, a background thread-based channel, and * a file writer. The default logger in almost all circumstances. */ AWS_COMMON_API int aws_logger_init_standard( struct aws_logger *logger, struct aws_allocator *allocator, struct aws_logger_standard_options *options); /* * Initializes a pipeline logger from components that have already been initialized. This is not an ownership transfer. * After the pipeline logger is cleaned up, the components will have to manually be cleaned up by the user. */ AWS_COMMON_API int aws_logger_init_from_external( struct aws_logger *logger, struct aws_allocator *allocator, struct aws_log_formatter *formatter, struct aws_log_channel *channel, struct aws_log_writer *writer, enum aws_log_level level); /* * Pipeline logger vtable for custom configurations */ AWS_COMMON_API extern struct aws_logger_vtable g_pipeline_logger_owned_vtable; /* * Initializes a logger that does not perform any allocation during logging. Log lines larger than the internal * constant are truncated. Formatting matches the standard logger. Used for memory tracing logging. * If no file or filename is set in the aws_logger_standard_options, then it will use stderr. */ AWS_COMMON_API int aws_logger_init_noalloc( struct aws_logger *logger, struct aws_allocator *allocator, struct aws_logger_standard_options *options); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_COMMON_LOGGING_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/lru_cache.h000066400000000000000000000025131456575232400263240ustar00rootroot00000000000000#ifndef AWS_COMMON_LRU_CACHE_H #define AWS_COMMON_LRU_CACHE_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include AWS_PUSH_SANE_WARNING_LEVEL AWS_EXTERN_C_BEGIN /** * Initializes the Least-recently-used cache. Sets up the underlying linked hash table. * Once `max_items` elements have been added, the least recently used item will be removed. For the other parameters, * see aws/common/hash_table.h. Hash table semantics of these arguments are preserved.(Yes the one that was the answer * to that interview question that one time). */ AWS_COMMON_API struct aws_cache *aws_cache_new_lru( struct aws_allocator *allocator, aws_hash_fn *hash_fn, aws_hash_callback_eq_fn *equals_fn, aws_hash_callback_destroy_fn *destroy_key_fn, aws_hash_callback_destroy_fn *destroy_value_fn, size_t max_items); /** * Accesses the least-recently-used element, sets it to most-recently-used * element, and returns the value. */ AWS_COMMON_API void *aws_lru_cache_use_lru_element(struct aws_cache *cache); /** * Accesses the most-recently-used element and returns its value. */ AWS_COMMON_API void *aws_lru_cache_get_mru_element(const struct aws_cache *cache); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_COMMON_LRU_CACHE_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/macros.h000066400000000000000000000151151456575232400256650ustar00rootroot00000000000000#ifndef AWS_COMMON_MACROS_H #define AWS_COMMON_MACROS_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ /* clang-format off */ /* Use these macros in public header files to suppress unreasonable compiler * warnings. Public header files are included by external applications, * which may set their warning levels pedantically high. * * Developers of AWS libraries should hesitate before adding more warnings to this macro. * Prefer disabling the warning within a .c file, or in the library's CFLAGS, * or push/pop the warning around a single problematic declaration. */ #if defined(_MSC_VER) # define AWS_PUSH_SANE_WARNING_LEVEL \ __pragma(warning(push)) \ __pragma(warning(disable : 4820)) /* padding added to struct */ \ __pragma(warning(disable : 4514)) /* unreferenced inline function has been removed */ \ __pragma(warning(disable : 5039)) /* reference to potentially throwing function passed to extern C function */ # define AWS_POP_SANE_WARNING_LEVEL __pragma(warning(pop)) #else # define AWS_PUSH_SANE_WARNING_LEVEL # define AWS_POP_SANE_WARNING_LEVEL #endif /* clang-format on */ #ifdef __cplusplus # define AWS_EXTERN_C_BEGIN extern "C" { # define AWS_EXTERN_C_END } #else # define AWS_EXTERN_C_BEGIN # define AWS_EXTERN_C_END #endif /* __cplusplus */ #define AWS_CONCAT(A, B) A##B #define AWS_STATIC_ASSERT0(cond, msg) typedef char AWS_CONCAT(static_assertion_, msg)[(!!(cond)) * 2 - 1] #define AWS_STATIC_ASSERT1(cond, line) AWS_STATIC_ASSERT0(cond, AWS_CONCAT(at_line_, line)) #define AWS_STATIC_ASSERT(cond) AWS_STATIC_ASSERT1(cond, __LINE__) /* https://stackoverflow.com/questions/9183993/msvc-variadic-macro-expansion */ #define GLUE(x, y) x y #define RETURN_ARG_COUNT(_1_, _2_, _3_, _4_, _5_, count, ...) count #define EXPAND_ARGS(args) RETURN_ARG_COUNT args #define COUNT_ARGS_MAX5(...) EXPAND_ARGS((__VA_ARGS__, 5, 4, 3, 2, 1, 0)) #define OVERLOAD_MACRO2(name, count) name##count #define OVERLOAD_MACRO1(name, count) OVERLOAD_MACRO2(name, count) #define OVERLOAD_MACRO(name, count) OVERLOAD_MACRO1(name, count) #define CALL_OVERLOAD(name, ...) GLUE(OVERLOAD_MACRO(name, COUNT_ARGS_MAX5(__VA_ARGS__)), (__VA_ARGS__)) #define CALL_OVERLOAD_TEST1(x) x #define CALL_OVERLOAD_TEST2(x, y) y #define CALL_OVERLOAD_TEST3(x, y, z) z #define CALL_OVERLOAD_TEST(...) CALL_OVERLOAD(CALL_OVERLOAD_TEST, __VA_ARGS__) AWS_STATIC_ASSERT(CALL_OVERLOAD_TEST(1) == 1); AWS_STATIC_ASSERT(CALL_OVERLOAD_TEST(1, 2) == 2); AWS_STATIC_ASSERT(CALL_OVERLOAD_TEST(1, 2, 3) == 3); enum { AWS_CACHE_LINE = 64 }; /** * Format macro for strings of a specified length. * Allows non null-terminated strings to be used with the printf family of functions. * Ex: printf("scheme is " PRInSTR, 4, "http://example.org"); // outputs: "scheme is http" */ #define PRInSTR "%.*s" #if defined(_MSC_VER) # include # define AWS_ALIGNED_TYPEDEF(from, to, alignment) typedef __declspec(align(alignment)) from to # define AWS_LIKELY(x) x # define AWS_UNLIKELY(x) x # define AWS_FORCE_INLINE __forceinline # define AWS_NO_INLINE __declspec(noinline) # define AWS_VARIABLE_LENGTH_ARRAY(type, name, length) type *name = _alloca(sizeof(type) * (length)) # define AWS_DECLSPEC_NORETURN __declspec(noreturn) # define AWS_ATTRIBUTE_NORETURN #else # if defined(__GNUC__) || defined(__clang__) # define AWS_ALIGNED_TYPEDEF(from, to, alignment) typedef from to __attribute__((aligned(alignment))) # define AWS_TYPE_OF(a) __typeof__(a) # define AWS_LIKELY(x) __builtin_expect(!!(x), 1) # define AWS_UNLIKELY(x) __builtin_expect(!!(x), 0) # define AWS_FORCE_INLINE __attribute__((always_inline)) # define AWS_NO_INLINE __attribute__((noinline)) # define AWS_DECLSPEC_NORETURN # define AWS_ATTRIBUTE_NORETURN __attribute__((noreturn)) # if defined(__cplusplus) # define AWS_VARIABLE_LENGTH_ARRAY(type, name, length) type *name = alloca(sizeof(type) * (length)) # else # define AWS_VARIABLE_LENGTH_ARRAY(type, name, length) type name[length] # endif /* defined(__cplusplus) */ # endif /* defined(__GNUC__) || defined(__clang__) */ #endif /* defined(_MSC_VER) */ #if defined(__has_feature) # if __has_feature(address_sanitizer) # define AWS_SUPPRESS_ASAN __attribute__((no_sanitize("address"))) # endif #elif defined(__SANITIZE_ADDRESS__) # if defined(__GNUC__) # define AWS_SUPPRESS_ASAN __attribute__((no_sanitize_address)) # elif defined(_MSC_VER) # define AWS_SUPPRESS_ASAN __declspec(no_sanitize_address) # endif #endif #if !defined(AWS_SUPPRESS_ASAN) # define AWS_SUPPRESS_ASAN #endif #if defined(__has_feature) # if __has_feature(thread_sanitizer) # define AWS_SUPPRESS_TSAN __attribute__((no_sanitize("thread"))) # endif #elif defined(__SANITIZE_THREAD__) # if defined(__GNUC__) # define AWS_SUPPRESS_TSAN __attribute__((no_sanitize_thread)) # else # define AWS_SUPPRESS_TSAN # endif #else # define AWS_SUPPRESS_TSAN #endif #if !defined(AWS_SUPPRESS_TSAN) # define AWS_SUPPRESS_TSAN #endif /* If this is C++, restrict isn't supported. If this is not at least C99 on gcc and clang, it isn't supported. * If visual C++ building in C mode, the restrict definition is __restrict. * This just figures all of that out based on who's including this header file. */ #if defined(__cplusplus) # define AWS_RESTRICT #else # if defined(_MSC_VER) # define AWS_RESTRICT __restrict # else # if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L # define AWS_RESTRICT restrict # else # define AWS_RESTRICT # endif /* defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L */ # endif /* defined(_MSC_VER) */ #endif /* defined(__cplusplus) */ #if defined(_MSC_VER) # define AWS_THREAD_LOCAL __declspec(thread) #else # define AWS_THREAD_LOCAL __thread #endif #define AWS_ARRAY_SIZE(array) (sizeof(array) / sizeof((array)[0])) /** * from a pointer and a type of the struct containing the node * this will get you back to the pointer of the object. member is the name of * the instance of struct aws_linked_list_node in your struct. */ #define AWS_CONTAINER_OF(ptr, type, member) ((type *)((uint8_t *)(ptr)-offsetof(type, member))) #endif /* AWS_COMMON_MACROS_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/math.cbmc.inl000066400000000000000000000056421456575232400265740ustar00rootroot00000000000000#ifndef AWS_COMMON_MATH_CBMC_INL #define AWS_COMMON_MATH_CBMC_INL /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ /* * This header is already included, but include it again to make editor * highlighting happier. */ #include AWS_EXTERN_C_BEGIN /* This header does safe operations. Supressing the checks within these functions * avoids unnecessary CBMC assertions */ #pragma CPROVER check push #pragma CPROVER check disable "unsigned-overflow" /** * Multiplies a * b. If the result overflows, returns 2^64 - 1. */ AWS_STATIC_IMPL uint64_t aws_mul_u64_saturating(uint64_t a, uint64_t b) { if (__CPROVER_overflow_mult(a, b)) return UINT64_MAX; return a * b; } /** * If a * b overflows, returns AWS_OP_ERR; otherwise multiplies * a * b, returns the result in *r, and returns AWS_OP_SUCCESS. */ AWS_STATIC_IMPL int aws_mul_u64_checked(uint64_t a, uint64_t b, uint64_t *r) { if (__CPROVER_overflow_mult(a, b)) return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED); *r = a * b; return AWS_OP_SUCCESS; } /** * Multiplies a * b. If the result overflows, returns 2^32 - 1. */ AWS_STATIC_IMPL uint32_t aws_mul_u32_saturating(uint32_t a, uint32_t b) { if (__CPROVER_overflow_mult(a, b)) return UINT32_MAX; return a * b; } /** * If a * b overflows, returns AWS_OP_ERR; otherwise multiplies * a * b, returns the result in *r, and returns AWS_OP_SUCCESS. */ AWS_STATIC_IMPL int aws_mul_u32_checked(uint32_t a, uint32_t b, uint32_t *r) { if (__CPROVER_overflow_mult(a, b)) return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED); *r = a * b; return AWS_OP_SUCCESS; } /** * Adds a + b. If the result overflows returns 2^64 - 1. */ AWS_STATIC_IMPL uint64_t aws_add_u64_saturating(uint64_t a, uint64_t b) { if (__CPROVER_overflow_plus(a, b)) return UINT64_MAX; return a + b; } /** * If a + b overflows, returns AWS_OP_ERR; otherwise adds * a + b, returns the result in *r, and returns AWS_OP_SUCCESS. */ AWS_STATIC_IMPL int aws_add_u64_checked(uint64_t a, uint64_t b, uint64_t *r) { if (__CPROVER_overflow_plus(a, b)) return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED); *r = a + b; return AWS_OP_SUCCESS; } /** * Adds a + b. If the result overflows returns 2^32 - 1. */ AWS_STATIC_IMPL uint32_t aws_add_u32_saturating(uint32_t a, uint32_t b) { if (__CPROVER_overflow_plus(a, b)) return UINT32_MAX; return a + b; } /** * If a + b overflows, returns AWS_OP_ERR; otherwise adds * a + b, returns the result in *r, and returns AWS_OP_SUCCESS. */ AWS_STATIC_IMPL int aws_add_u32_checked(uint32_t a, uint32_t b, uint32_t *r) { if (__CPROVER_overflow_plus(a, b)) return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED); *r = a + b; return AWS_OP_SUCCESS; } #pragma CPROVER check pop AWS_EXTERN_C_END #endif /* AWS_COMMON_MATH_CBMC_INL */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/math.fallback.inl000066400000000000000000000117321456575232400274240ustar00rootroot00000000000000#ifndef AWS_COMMON_MATH_FALLBACK_INL #define AWS_COMMON_MATH_FALLBACK_INL /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ /* * This header is already included, but include it again to make editor * highlighting happier. */ #include #include AWS_EXTERN_C_BEGIN /** * Multiplies a * b. If the result overflows, returns 2^64 - 1. */ AWS_STATIC_IMPL uint64_t aws_mul_u64_saturating(uint64_t a, uint64_t b) { if (a > 0 && b > 0 && a > (UINT64_MAX / b)) return UINT64_MAX; return a * b; } /** * If a * b overflows, returns AWS_OP_ERR; otherwise multiplies * a * b, returns the result in *r, and returns AWS_OP_SUCCESS. */ AWS_STATIC_IMPL int aws_mul_u64_checked(uint64_t a, uint64_t b, uint64_t *r) { if (a > 0 && b > 0 && a > (UINT64_MAX / b)) return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED); *r = a * b; return AWS_OP_SUCCESS; } /** * Multiplies a * b. If the result overflows, returns 2^32 - 1. */ AWS_STATIC_IMPL uint32_t aws_mul_u32_saturating(uint32_t a, uint32_t b) { if (a > 0 && b > 0 && a > (UINT32_MAX / b)) return UINT32_MAX; return a * b; } /** * If a * b overflows, returns AWS_OP_ERR; otherwise multiplies * a * b, returns the result in *r, and returns AWS_OP_SUCCESS. */ AWS_STATIC_IMPL int aws_mul_u32_checked(uint32_t a, uint32_t b, uint32_t *r) { if (a > 0 && b > 0 && a > (UINT32_MAX / b)) return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED); *r = a * b; return AWS_OP_SUCCESS; } /** * Adds a + b. If the result overflows returns 2^64 - 1. */ AWS_STATIC_IMPL uint64_t aws_add_u64_saturating(uint64_t a, uint64_t b) { if ((b > 0) && (a > (UINT64_MAX - b))) return UINT64_MAX; return a + b; } /** * If a + b overflows, returns AWS_OP_ERR; otherwise adds * a + b, returns the result in *r, and returns AWS_OP_SUCCESS. */ AWS_STATIC_IMPL int aws_add_u64_checked(uint64_t a, uint64_t b, uint64_t *r) { if ((b > 0) && (a > (UINT64_MAX - b))) return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED); *r = a + b; return AWS_OP_SUCCESS; } /** * Adds a + b. If the result overflows returns 2^32 - 1. */ AWS_STATIC_IMPL uint32_t aws_add_u32_saturating(uint32_t a, uint32_t b) { if ((b > 0) && (a > (UINT32_MAX - b))) return UINT32_MAX; return a + b; } /** * If a + b overflows, returns AWS_OP_ERR; otherwise adds * a + b, returns the result in *r, and returns AWS_OP_SUCCESS. */ AWS_STATIC_IMPL int aws_add_u32_checked(uint32_t a, uint32_t b, uint32_t *r) { if ((b > 0) && (a > (UINT32_MAX - b))) return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED); *r = a + b; return AWS_OP_SUCCESS; } /* * These are pure C implementations of the count leading/trailing zeros calls * They should not be necessary unless using a really esoteric compiler with * no intrinsics for these functions whatsoever. */ #if !defined(__clang__) && !defined(__GNUC__) /** * Search from the MSB to LSB, looking for a 1 */ AWS_STATIC_IMPL size_t aws_clz_u32(uint32_t n) { return aws_clz_i32((int32_t)n); } AWS_STATIC_IMPL size_t aws_clz_i32(int32_t n) { size_t idx = 0; if (n == 0) { return sizeof(n) * 8; } /* sign bit is the first bit */ if (n < 0) { return 0; } while (n >= 0) { ++idx; n <<= 1; } return idx; } AWS_STATIC_IMPL size_t aws_clz_u64(uint64_t n) { return aws_clz_i64((int64_t)n); } AWS_STATIC_IMPL size_t aws_clz_i64(int64_t n) { size_t idx = 0; if (n == 0) { return sizeof(n) * 8; } /* sign bit is the first bit */ if (n < 0) { return 0; } while (n >= 0) { ++idx; n <<= 1; } return idx; } AWS_STATIC_IMPL size_t aws_clz_size(size_t n) { # if SIZE_BITS == 64 return aws_clz_u64(n); # else return aws_clz_u32(n); # endif } /** * Search from the LSB to MSB, looking for a 1 */ AWS_STATIC_IMPL size_t aws_ctz_u32(uint32_t n) { return aws_ctz_i32((int32_t)n); } AWS_STATIC_IMPL size_t aws_ctz_i32(int32_t n) { int32_t idx = 0; const int32_t max_bits = (int32_t)(SIZE_BITS / sizeof(uint8_t)); if (n == 0) { return sizeof(n) * 8; } while (idx < max_bits) { if (n & (1 << idx)) { break; } ++idx; } return (size_t)idx; } AWS_STATIC_IMPL size_t aws_ctz_u64(uint64_t n) { return aws_ctz_i64((int64_t)n); } AWS_STATIC_IMPL size_t aws_ctz_i64(int64_t n) { int64_t idx = 0; const int64_t max_bits = (int64_t)(SIZE_BITS / sizeof(uint8_t)); if (n == 0) { return sizeof(n) * 8; } while (idx < max_bits) { if (n & (1ULL << idx)) { break; } ++idx; } return (size_t)idx; } AWS_STATIC_IMPL size_t aws_ctz_size(size_t n) { # if SIZE_BITS == 64 return aws_ctz_u64(n); # else return aws_ctz_u32(n); # endif } #endif AWS_EXTERN_C_END #endif /* AWS_COMMON_MATH_FALLBACK_INL */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/math.gcc_arm64_asm.inl000066400000000000000000000143351456575232400302740ustar00rootroot00000000000000#ifndef AWS_COMMON_MATH_GCC_ARM64_ASM_INL #define AWS_COMMON_MATH_GCC_ARM64_ASM_INL /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ /* * This header is already included, but include it again to make editor * highlighting happier. */ #include #include /* clang-format off */ AWS_EXTERN_C_BEGIN /** * Multiplies a * b. If the result overflows, returns 2^64 - 1. */ AWS_STATIC_IMPL uint64_t aws_mul_u64_saturating(uint64_t a, uint64_t b) { /* We can use inline assembly to do this efficiently on arm64 by doing a high-mul and checking the upper 64 bits of a 64x64->128b multiply are zero */ uint64_t tmp = 0, res = 0; __asm__("umulh %x[hmul], %x[arga], %x[argb]\n" "mul %x[res], %x[arga], %x[argb]\n" "cmp %x[hmul], #0\n" "csinv %x[res], %x[res], xzr, eq\n" : /* inout: hmul is upper 64b, r is the result */ [hmul] "+&r"(tmp), [res]"+&r"(res) : /* in: a and b */ [arga] "r"(a), [argb] "r"(b) : /* clobbers: cc (cmp clobbers condition codes) */ "cc"); return res; } /** * If a * b overflows, returns AWS_OP_ERR; otherwise multiplies * a * b, returns the result in *r, and returns AWS_OP_SUCCESS. */ AWS_STATIC_IMPL int aws_mul_u64_checked(uint64_t a, uint64_t b, uint64_t *r) { /* We can use inline assembly to do this efficiently on arm64 by doing a high-mul and checking the upper 64 bits of a 64x64->128b multiply are zero */ uint64_t tmp, res; __asm__("umulh %x[hmul], %x[arga], %x[argb]\n" "mul %x[res], %x[arga], %x[argb]\n" : /* inout: hmul is upper 64b, r is the result */ [hmul] "=&r"(tmp), [res]"=&r"(res) : /* in: a and b */ [arga] "r"(a), [argb] "r"(b)); *r = res; if (tmp) { return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED); } return AWS_OP_SUCCESS; } /** * Multiplies a * b. If the result overflows, returns 2^32 - 1. */ AWS_STATIC_IMPL uint32_t aws_mul_u32_saturating(uint32_t a, uint32_t b) { /* We can use inline assembly to do this efficiently on arm64 by doing a high-mul and checking the upper 32 bits of a 32x32->64b multiply are zero */ uint64_t res = 0; __asm__("umull %x[res], %w[arga], %w[argb]\n" "cmp xzr, %x[res], lsr #32\n" "csinv %w[res], %w[res], wzr, eq\n" : /* inout: res contains both lower/upper 32b */ [res]"+&r"(res) : /* in: a and b */ [arga] "r"(a), [argb] "r"(b) : /* clobbers: cc (cmp clobbers condition codes) */ "cc"); return res & 0xffffffff; } /** * If a * b overflows, returns AWS_OP_ERR; otherwise multiplies * a * b, returns the result in *r, and returns AWS_OP_SUCCESS. */ AWS_STATIC_IMPL int aws_mul_u32_checked(uint32_t a, uint32_t b, uint32_t *r) { /* We can use inline assembly to do this efficiently on arm64 by doing a high-mul and checking the upper 32 bits of a 32x32->64b multiply are zero */ uint64_t res; __asm__("umull %x[res], %w[arga], %w[argb]\n" : /* inout: res is both upper/lower 32b */ [res]"=r"(res) : /* in: a and b */ [arga] "r"(a), [argb] "r"(b)); *r = res & 0xffffffff; if (res >> 32) { return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED); } return AWS_OP_SUCCESS; } /** * If a + b overflows, returns AWS_OP_ERR; otherwise adds * a + b, returns the result in *r, and returns AWS_OP_SUCCESS. */ AWS_STATIC_IMPL int aws_add_u64_checked(uint64_t a, uint64_t b, uint64_t *r) { /* We can use inline assembly to do this efficiently on arm64 by doing a * 64b + 64b add and checking the carry out */ uint64_t res, flag; __asm__("adds %x[res], %x[arga], %x[argb]\n" "csinv %x[flag], xzr, xzr, cc\n" : /* inout: res is the result of addition; flag is -1 if carry happened */ [res]"=&r"(res), [flag] "=r"(flag) : /* in: a and b */ [arga] "r"(a), [argb] "r"(b) : /* clobbers: cc (cmp clobbers condition codes) */ "cc"); *r = res; if (flag) { return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED); } return AWS_OP_SUCCESS; } /** * Adds a + b. If the result overflows, returns 2^64 - 1. */ AWS_STATIC_IMPL uint64_t aws_add_u64_saturating(uint64_t a, uint64_t b) { /* We can use inline assembly to do this efficiently on arm64 by doing a * 64b + 64b add and checking the carry out */ uint64_t res; __asm__("adds %x[res], %x[arga], %x[argb]\n" "csinv %x[res], %x[res], xzr, cc\n" : /* inout: res is the result */ [res]"=&r"(res) : /* in: a and b */ [arga] "r"(a), [argb] "r"(b) : /* clobbers: cc (cmp clobbers condition codes) */ "cc"); return res; } /** * If a + b overflows, returns AWS_OP_ERR; otherwise adds * a + b, returns the result in *r, and returns AWS_OP_SUCCESS. */ AWS_STATIC_IMPL int aws_add_u32_checked(uint32_t a, uint32_t b, uint32_t *r) { /* We can use inline assembly to do this efficiently on arm64 by doing a * 32b + 32b add and checking the carry out */ uint32_t res, flag; __asm__("adds %w[res], %w[arga], %w[argb]\n" "csinv %w[flag], wzr, wzr, cc\n" : /* inout: res is 32b result */ [res]"=&r"(res), [flag] "=r"(flag) : /* in: a and b */ [arga] "r"(a), [argb] "r"(b) : /* clobbers: cc (cmp clobbers condition codes) */ "cc"); *r = res; if (flag) { return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED); } return AWS_OP_SUCCESS; } /** * Adds a + b. If the result overflows, returns 2^32 - 1. */ AWS_STATIC_IMPL uint32_t aws_add_u32_saturating(uint32_t a, uint32_t b) { /* We can use inline assembly to do this efficiently on arm64 by doing a * 32b + 32b add and checking the carry out */ uint32_t res = 0; __asm__("adds %w[res], %w[arga], %w[argb]\n" "csinv %w[res], %w[res], wzr, cc\n" : /* inout: res is the result */ [res]"+&r"(res) : /* in: a and b */ [arga] "r"(a), [argb] "r"(b) : /* clobbers: cc (cmp clobbers condition codes) */ "cc"); return res; } AWS_EXTERN_C_END /* clang-format on */ #endif /* AWS_COMMON_MATH_GCC_ARM64_ASM_INL */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/math.gcc_builtin.inl000066400000000000000000000036561456575232400301550ustar00rootroot00000000000000#ifndef AWS_COMMON_MATH_GCC_BUILTIN_INL #define AWS_COMMON_MATH_GCC_BUILTIN_INL /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ /* * This header is already included, but include it again to make editor * highlighting happier. */ #include #include /* clang-format off */ AWS_EXTERN_C_BEGIN /** * Search from the MSB to LSB, looking for a 1 */ AWS_STATIC_IMPL size_t aws_clz_u32(uint32_t n) { if (n == 0) { return sizeof(n) * 8; } return __builtin_clz(n); } AWS_STATIC_IMPL size_t aws_clz_i32(int32_t n) { if (n == 0) { return sizeof(n) * 8; } return __builtin_clz(n); } AWS_STATIC_IMPL size_t aws_clz_u64(uint64_t n) { if (n == 0) { return sizeof(n) * 8; } return __builtin_clzll(n); } AWS_STATIC_IMPL size_t aws_clz_i64(int64_t n) { if (n == 0) { return sizeof(n) * 8; } return __builtin_clzll(n); } AWS_STATIC_IMPL size_t aws_clz_size(size_t n) { #if SIZE_BITS == 64 return aws_clz_u64(n); #else return aws_clz_u32(n); #endif } /** * Search from the LSB to MSB, looking for a 1 */ AWS_STATIC_IMPL size_t aws_ctz_u32(uint32_t n) { if (n == 0) { return sizeof(n) * 8; } return __builtin_ctzl(n); } AWS_STATIC_IMPL size_t aws_ctz_i32(int32_t n) { if (n == 0) { return sizeof(n) * 8; } return __builtin_ctz(n); } AWS_STATIC_IMPL size_t aws_ctz_u64(uint64_t n) { if (n == 0) { return sizeof(n) * 8; } return __builtin_ctzll(n); } AWS_STATIC_IMPL size_t aws_ctz_i64(int64_t n) { if (n == 0) { return sizeof(n) * 8; } return __builtin_ctzll(n); } AWS_STATIC_IMPL size_t aws_ctz_size(size_t n) { #if SIZE_BITS == 64 return aws_ctz_u64(n); #else return aws_ctz_u32(n); #endif } AWS_EXTERN_C_END /* clang-format on */ #endif /* AWS_COMMON_MATH_GCC_BUILTIN_INL */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/math.gcc_overflow.inl000066400000000000000000000055221456575232400303440ustar00rootroot00000000000000#ifndef AWS_COMMON_MATH_GCC_OVERFLOW_INL #define AWS_COMMON_MATH_GCC_OVERFLOW_INL /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ /* * This header is already included, but include it again to make editor * highlighting happier. */ #include #include AWS_EXTERN_C_BEGIN /** * Multiplies a * b. If the result overflows, returns 2^64 - 1. */ AWS_STATIC_IMPL uint64_t aws_mul_u64_saturating(uint64_t a, uint64_t b) { uint64_t res; if (__builtin_mul_overflow(a, b, &res)) { res = UINT64_MAX; } return res; } /** * If a * b overflows, returns AWS_OP_ERR; otherwise multiplies * a * b, returns the result in *r, and returns AWS_OP_SUCCESS. */ AWS_STATIC_IMPL int aws_mul_u64_checked(uint64_t a, uint64_t b, uint64_t *r) { if (__builtin_mul_overflow(a, b, r)) { return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED); } return AWS_OP_SUCCESS; } /** * Multiplies a * b. If the result overflows, returns 2^32 - 1. */ AWS_STATIC_IMPL uint32_t aws_mul_u32_saturating(uint32_t a, uint32_t b) { uint32_t res; if (__builtin_mul_overflow(a, b, &res)) { res = UINT32_MAX; } return res; } /** * If a * b overflows, returns AWS_OP_ERR; otherwise multiplies * a * b, returns the result in *r, and returns AWS_OP_SUCCESS. */ AWS_STATIC_IMPL int aws_mul_u32_checked(uint32_t a, uint32_t b, uint32_t *r) { if (__builtin_mul_overflow(a, b, r)) { return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED); } return AWS_OP_SUCCESS; } /** * If a + b overflows, returns AWS_OP_ERR; otherwise adds * a + b, returns the result in *r, and returns AWS_OP_SUCCESS. */ AWS_STATIC_IMPL int aws_add_u64_checked(uint64_t a, uint64_t b, uint64_t *r) { if (__builtin_add_overflow(a, b, r)) { return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED); } return AWS_OP_SUCCESS; } /** * Adds a + b. If the result overflows, returns 2^64 - 1. */ AWS_STATIC_IMPL uint64_t aws_add_u64_saturating(uint64_t a, uint64_t b) { uint64_t res; if (__builtin_add_overflow(a, b, &res)) { res = UINT64_MAX; } return res; } /** * If a + b overflows, returns AWS_OP_ERR; otherwise adds * a + b, returns the result in *r, and returns AWS_OP_SUCCESS. */ AWS_STATIC_IMPL int aws_add_u32_checked(uint32_t a, uint32_t b, uint32_t *r) { if (__builtin_add_overflow(a, b, r)) { return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED); } return AWS_OP_SUCCESS; } /** * Adds a + b. If the result overflows, returns 2^32 - 1. */ AWS_STATIC_IMPL uint32_t aws_add_u32_saturating(uint32_t a, uint32_t b) { uint32_t res; if (__builtin_add_overflow(a, b, &res)) { res = UINT32_MAX; } return res; } AWS_EXTERN_C_END #endif /* AWS_COMMON_MATH_GCC_OVERFLOW_INL */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/math.gcc_x64_asm.inl000066400000000000000000000150361456575232400277630ustar00rootroot00000000000000#ifndef AWS_COMMON_MATH_GCC_X64_ASM_INL #define AWS_COMMON_MATH_GCC_X64_ASM_INL /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ /* * This header is already included, but include it again to make editor * highlighting happier. */ #include #include /* clang-format off */ AWS_EXTERN_C_BEGIN /** * Multiplies a * b. If the result overflows, returns 2^64 - 1. */ AWS_STATIC_IMPL uint64_t aws_mul_u64_saturating(uint64_t a, uint64_t b) { /* We can use inline assembly to do this efficiently on x86-64 and x86. we specify rdx as an output, rather than a clobber, because we want to allow it to be allocated as an input register */ uint64_t rdx; __asm__("mulq %q[arg2]\n" /* rax * b, result is in RDX:RAX, OF=CF=(RDX != 0) */ "cmovc %q[saturate], %%rax\n" : /* in/out: %rax = a, out: rdx (ignored) */ "+&a"(a), "=&d"(rdx) : /* in: register only */ [arg2] "r"(b), /* in: saturation value (reg/memory) */ [saturate] "rm"(~0LL) : /* clobbers: cc */ "cc"); (void)rdx; /* suppress unused warnings */ return a; } /** * If a * b overflows, returns AWS_OP_ERR; otherwise multiplies * a * b, returns the result in *r, and returns AWS_OP_SUCCESS. */ AWS_STATIC_IMPL int aws_mul_u64_checked(uint64_t a, uint64_t b, uint64_t *r) { /* We can use inline assembly to do this efficiently on x86-64 and x86. */ char flag; uint64_t result = a; __asm__("mulq %q[arg2]\n" /* rax * b, result is in RDX:RAX, OF=CF=(RDX != 0) */ "seto %[flag]\n" /* flag = overflow_bit */ : /* in/out: %rax (first arg & result), %d (flag) */ "+&a"(result), [flag] "=&d"(flag) : /* in: reg for 2nd operand */ [arg2] "r"(b) : /* clobbers: cc (d is used for flag so no need to clobber)*/ "cc"); *r = result; if (flag) { return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED); } return AWS_OP_SUCCESS; } /** * Multiplies a * b. If the result overflows, returns 2^32 - 1. */ AWS_STATIC_IMPL uint32_t aws_mul_u32_saturating(uint32_t a, uint32_t b) { /* We can use inline assembly to do this efficiently on x86-64 and x86. we specify edx as an output, rather than a clobber, because we want to allow it to be allocated as an input register */ uint32_t edx; __asm__("mull %k[arg2]\n" /* eax * b, result is in EDX:EAX, OF=CF=(EDX != 0) */ /* cmov isn't guaranteed to be available on x86-32 */ "jnc .1f%=\n" "mov $0xFFFFFFFF, %%eax\n" ".1f%=:" : /* in/out: %eax = result/a, out: edx (ignored) */ "+&a"(a), "=&d"(edx) : /* in: operand 2 in reg */ [arg2] "r"(b) : /* clobbers: cc */ "cc"); (void)edx; /* suppress unused warnings */ return a; } /** * If a * b overflows, returns AWS_OP_ERR; otherwise multiplies * a * b, returns the result in *r, and returns AWS_OP_SUCCESS. */ AWS_STATIC_IMPL int aws_mul_u32_checked(uint32_t a, uint32_t b, uint32_t *r) { /* We can use inline assembly to do this efficiently on x86-64 and x86. */ uint32_t result = a; char flag; /** * Note: We use SETNO which only takes a byte register. To make this easy, * we'll write it to dl (which we throw away anyway) and mask off the high bits. */ __asm__("mull %k[arg2]\n" /* eax * b, result is in EDX:EAX, OF=CF=(EDX != 0) */ "seto %[flag]\n" /* flag = overflow_bit */ : /* in/out: %eax (first arg & result), %d (flag) */ "+&a"(result), [flag] "=&d"(flag) : /* in: reg for 2nd operand */ [arg2] "r"(b) : /* clobbers: cc (d is used for flag so no need to clobber)*/ "cc"); *r = result; if (flag) { return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED); } return AWS_OP_SUCCESS; } /** * If a + b overflows, returns AWS_OP_ERR; otherwise adds * a + b, returns the result in *r, and returns AWS_OP_SUCCESS. */ AWS_STATIC_IMPL int aws_add_u64_checked(uint64_t a, uint64_t b, uint64_t *r) { /* We can use inline assembly to do this efficiently on x86-64 and x86. */ char flag; __asm__("addq %[argb], %[arga]\n" /* [arga] = [arga] + [argb] */ "setc %[flag]\n" /* [flag] = 1 if overflow, 0 otherwise */ : /* in/out: */ [arga] "+r"(a), [flag] "=&r"(flag) : /* in: */ [argb] "r"(b) : /* clobbers: */ "cc"); *r = a; if (flag) { return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED); } return AWS_OP_SUCCESS; } /** * Adds a + b. If the result overflows, returns 2^64 - 1. */ AWS_STATIC_IMPL uint64_t aws_add_u64_saturating(uint64_t a, uint64_t b) { /* We can use inline assembly to do this efficiently on x86-64 and x86. */ __asm__("addq %[arg1], %[arg2]\n" /* [arga] = [arga] + [argb] */ "cmovc %q[saturate], %[arg2]\n" : /* in/out: %rax = a, out: rdx (ignored) */ [arg2] "+r"(b) : /* in: register only */ [arg1] "r"(a), /* in: saturation value (reg/memory) */ [saturate] "rm"(~0LL) : /* clobbers: cc */ "cc"); return b; } /** * If a + b overflows, returns AWS_OP_ERR; otherwise adds * a + b, returns the result in *r, and returns AWS_OP_SUCCESS. */ AWS_STATIC_IMPL int aws_add_u32_checked(uint32_t a, uint32_t b, uint32_t *r) { /* We can use inline assembly to do this efficiently on x86-64 and x86. */ char flag; __asm__("addl %[argb], %[arga]\n" /* [arga] = [arga] + [argb] */ "setc %[flag]\n" /* [flag] = 1 if overflow, 0 otherwise */ : /* in/out: */ [arga] "+r"(a), [flag] "=&r"(flag) : /* in: */ [argb] "r"(b) : /* clobbers: */ "cc"); *r = a; if (flag) { return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED); } return AWS_OP_SUCCESS; } /** * Adds a + b. If the result overflows, returns 2^32 - 1. */ AWS_STATIC_IMPL uint32_t aws_add_u32_saturating(uint32_t a, uint32_t b) { /* We can use inline assembly to do this efficiently on x86-64 and x86. */ __asm__("addl %[arg1], %[arg2]\n" /* [arga] = [arga] + [argb] */ /* cmov isn't guaranteed to be available on x86-32 */ "jnc .1f%=\n" "mov $0xFFFFFFFF, %%eax\n" ".1f%=:" : /* in/out: %rax = a, out: rdx (ignored) */ [arg2] "+a"(b) : /* in: register only */ [arg1] "r"(a) : /* clobbers: cc */ "cc"); return b; } AWS_EXTERN_C_END /* clang-format on */ #endif /* AWS_COMMON_MATH_GCC_X64_ASM_INL */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/math.h000066400000000000000000000164171456575232400253400ustar00rootroot00000000000000#ifndef AWS_COMMON_MATH_H #define AWS_COMMON_MATH_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include AWS_PUSH_SANE_WARNING_LEVEL /* The number of bits in a size_t variable */ #if SIZE_MAX == UINT32_MAX # define SIZE_BITS 32 #elif SIZE_MAX == UINT64_MAX # define SIZE_BITS 64 #else # error "Target not supported" #endif /* The largest power of two that can be stored in a size_t */ #define SIZE_MAX_POWER_OF_TWO (((size_t)1) << (SIZE_BITS - 1)) AWS_EXTERN_C_BEGIN #if defined(AWS_HAVE_GCC_OVERFLOW_MATH_EXTENSIONS) && (defined(__clang__) || !defined(__cplusplus)) || \ (defined(__x86_64__) || defined(__aarch64__)) && defined(AWS_HAVE_GCC_INLINE_ASM) || \ defined(AWS_HAVE_MSVC_INTRINSICS_X64) || defined(CBMC) || !defined(AWS_HAVE_GCC_OVERFLOW_MATH_EXTENSIONS) /* In all these cases, we can use fast static inline versions of this code */ # define AWS_COMMON_MATH_API AWS_STATIC_IMPL #else /* * We got here because we are building in C++ mode but we only support overflow extensions * in C mode. Because the fallback is _slow_ (involving a division), we'd prefer to make a * non-inline call to the fast C intrinsics. */ # define AWS_COMMON_MATH_API AWS_COMMON_API #endif /** * Multiplies a * b. If the result overflows, returns 2^64 - 1. */ AWS_COMMON_MATH_API uint64_t aws_mul_u64_saturating(uint64_t a, uint64_t b); /** * If a * b overflows, returns AWS_OP_ERR; otherwise multiplies * a * b, returns the result in *r, and returns AWS_OP_SUCCESS. */ AWS_COMMON_MATH_API int aws_mul_u64_checked(uint64_t a, uint64_t b, uint64_t *r); /** * Multiplies a * b. If the result overflows, returns 2^32 - 1. */ AWS_COMMON_MATH_API uint32_t aws_mul_u32_saturating(uint32_t a, uint32_t b); /** * If a * b overflows, returns AWS_OP_ERR; otherwise multiplies * a * b, returns the result in *r, and returns AWS_OP_SUCCESS. */ AWS_COMMON_MATH_API int aws_mul_u32_checked(uint32_t a, uint32_t b, uint32_t *r); /** * Adds a + b. If the result overflows returns 2^64 - 1. */ AWS_COMMON_MATH_API uint64_t aws_add_u64_saturating(uint64_t a, uint64_t b); /** * If a + b overflows, returns AWS_OP_ERR; otherwise adds * a + b, returns the result in *r, and returns AWS_OP_SUCCESS. */ AWS_COMMON_MATH_API int aws_add_u64_checked(uint64_t a, uint64_t b, uint64_t *r); /** * Adds a + b. If the result overflows returns 2^32 - 1. */ AWS_COMMON_MATH_API uint32_t aws_add_u32_saturating(uint32_t a, uint32_t b); /** * If a + b overflows, returns AWS_OP_ERR; otherwise adds * a + b, returns the result in *r, and returns AWS_OP_SUCCESS. */ AWS_COMMON_MATH_API int aws_add_u32_checked(uint32_t a, uint32_t b, uint32_t *r); /** * Subtracts a - b. If the result overflows returns 0. */ AWS_STATIC_IMPL uint64_t aws_sub_u64_saturating(uint64_t a, uint64_t b); /** * If a - b overflows, returns AWS_OP_ERR; otherwise subtracts * a - b, returns the result in *r, and returns AWS_OP_SUCCESS. */ AWS_STATIC_IMPL int aws_sub_u64_checked(uint64_t a, uint64_t b, uint64_t *r); /** * Subtracts a - b. If the result overflows returns 0. */ AWS_STATIC_IMPL uint32_t aws_sub_u32_saturating(uint32_t a, uint32_t b); /** * If a - b overflows, returns AWS_OP_ERR; otherwise subtracts * a - b, returns the result in *r, and returns AWS_OP_SUCCESS. */ AWS_STATIC_IMPL int aws_sub_u32_checked(uint32_t a, uint32_t b, uint32_t *r); /** * Multiplies a * b. If the result overflows, returns SIZE_MAX. */ AWS_STATIC_IMPL size_t aws_mul_size_saturating(size_t a, size_t b); /** * Multiplies a * b and returns the result in *r. If the result * overflows, returns AWS_OP_ERR; otherwise returns AWS_OP_SUCCESS. */ AWS_STATIC_IMPL int aws_mul_size_checked(size_t a, size_t b, size_t *r); /** * Adds a + b. If the result overflows returns SIZE_MAX. */ AWS_STATIC_IMPL size_t aws_add_size_saturating(size_t a, size_t b); /** * Adds a + b and returns the result in *r. If the result * overflows, returns AWS_OP_ERR; otherwise returns AWS_OP_SUCCESS. */ AWS_STATIC_IMPL int aws_add_size_checked(size_t a, size_t b, size_t *r); /** * Adds [num] arguments (expected to be of size_t), and returns the result in *r. * If the result overflows, returns AWS_OP_ERR; otherwise returns AWS_OP_SUCCESS. */ AWS_COMMON_API int aws_add_size_checked_varargs(size_t num, size_t *r, ...); /** * Subtracts a - b. If the result overflows returns 0. */ AWS_STATIC_IMPL size_t aws_sub_size_saturating(size_t a, size_t b); /** * If a - b overflows, returns AWS_OP_ERR; otherwise subtracts * a - b, returns the result in *r, and returns AWS_OP_SUCCESS. */ AWS_STATIC_IMPL int aws_sub_size_checked(size_t a, size_t b, size_t *r); /** * Function to check if x is power of 2 */ AWS_STATIC_IMPL bool aws_is_power_of_two(const size_t x); /** * Function to find the smallest result that is power of 2 >= n. Returns AWS_OP_ERR if this cannot * be done without overflow */ AWS_STATIC_IMPL int aws_round_up_to_power_of_two(size_t n, size_t *result); /** * Counts the number of leading 0 bits in an integer. 0 will return the size of the integer in bits. */ AWS_STATIC_IMPL size_t aws_clz_u32(uint32_t n); AWS_STATIC_IMPL size_t aws_clz_i32(int32_t n); AWS_STATIC_IMPL size_t aws_clz_u64(uint64_t n); AWS_STATIC_IMPL size_t aws_clz_i64(int64_t n); AWS_STATIC_IMPL size_t aws_clz_size(size_t n); /** * Counts the number of trailing 0 bits in an integer. 0 will return the size of the integer in bits. */ AWS_STATIC_IMPL size_t aws_ctz_u32(uint32_t n); AWS_STATIC_IMPL size_t aws_ctz_i32(int32_t n); AWS_STATIC_IMPL size_t aws_ctz_u64(uint64_t n); AWS_STATIC_IMPL size_t aws_ctz_i64(int64_t n); AWS_STATIC_IMPL size_t aws_ctz_size(size_t n); AWS_STATIC_IMPL uint8_t aws_min_u8(uint8_t a, uint8_t b); AWS_STATIC_IMPL uint8_t aws_max_u8(uint8_t a, uint8_t b); AWS_STATIC_IMPL int8_t aws_min_i8(int8_t a, int8_t b); AWS_STATIC_IMPL int8_t aws_max_i8(int8_t a, int8_t b); AWS_STATIC_IMPL uint16_t aws_min_u16(uint16_t a, uint16_t b); AWS_STATIC_IMPL uint16_t aws_max_u16(uint16_t a, uint16_t b); AWS_STATIC_IMPL int16_t aws_min_i16(int16_t a, int16_t b); AWS_STATIC_IMPL int16_t aws_max_i16(int16_t a, int16_t b); AWS_STATIC_IMPL uint32_t aws_min_u32(uint32_t a, uint32_t b); AWS_STATIC_IMPL uint32_t aws_max_u32(uint32_t a, uint32_t b); AWS_STATIC_IMPL int32_t aws_min_i32(int32_t a, int32_t b); AWS_STATIC_IMPL int32_t aws_max_i32(int32_t a, int32_t b); AWS_STATIC_IMPL uint64_t aws_min_u64(uint64_t a, uint64_t b); AWS_STATIC_IMPL uint64_t aws_max_u64(uint64_t a, uint64_t b); AWS_STATIC_IMPL int64_t aws_min_i64(int64_t a, int64_t b); AWS_STATIC_IMPL int64_t aws_max_i64(int64_t a, int64_t b); AWS_STATIC_IMPL size_t aws_min_size(size_t a, size_t b); AWS_STATIC_IMPL size_t aws_max_size(size_t a, size_t b); AWS_STATIC_IMPL int aws_min_int(int a, int b); AWS_STATIC_IMPL int aws_max_int(int a, int b); AWS_STATIC_IMPL float aws_min_float(float a, float b); AWS_STATIC_IMPL float aws_max_float(float a, float b); AWS_STATIC_IMPL double aws_min_double(double a, double b); AWS_STATIC_IMPL double aws_max_double(double a, double b); #ifndef AWS_NO_STATIC_IMPL # include #endif /* AWS_NO_STATIC_IMPL */ AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_COMMON_MATH_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/math.inl000066400000000000000000000167251456575232400256750ustar00rootroot00000000000000#ifndef AWS_COMMON_MATH_INL #define AWS_COMMON_MATH_INL /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include AWS_EXTERN_C_BEGIN #if defined(AWS_HAVE_GCC_OVERFLOW_MATH_EXTENSIONS) && (defined(__clang__) || !defined(__cplusplus)) /* * GCC and clang have these super convenient overflow checking builtins... * but (in the case of GCC) they're only available when building C source. * We'll fall back to one of the other inlinable variants (or a non-inlined version) * if we are building this header on G++. */ # include #elif defined(__x86_64__) && defined(AWS_HAVE_GCC_INLINE_ASM) # include #elif defined(__aarch64__) && defined(AWS_HAVE_GCC_INLINE_ASM) # include #elif defined(AWS_HAVE_MSVC_INTRINSICS_X64) # include #elif defined(CBMC) # include #else # ifndef AWS_HAVE_GCC_OVERFLOW_MATH_EXTENSIONS /* Fall back to the pure-C implementations */ # include # else /* * We got here because we are building in C++ mode but we only support overflow extensions * in C mode. Because the fallback is _slow_ (involving a division), we'd prefer to make a * non-inline call to the fast C intrinsics. */ # endif /* AWS_HAVE_GCC_OVERFLOW_MATH_EXTENSIONS */ #endif /* defined(AWS_HAVE_GCC_OVERFLOW_MATH_EXTENSIONS) && (defined(__clang__) || !defined(__cplusplus)) */ #if defined(__clang__) || defined(__GNUC__) # include #endif #ifdef _MSC_VER # pragma warning(push) # pragma warning(disable : 4127) /*Disable "conditional expression is constant" */ #endif /* _MSC_VER */ AWS_STATIC_IMPL uint64_t aws_sub_u64_saturating(uint64_t a, uint64_t b) { return a <= b ? 0 : a - b; } AWS_STATIC_IMPL int aws_sub_u64_checked(uint64_t a, uint64_t b, uint64_t *r) { if (a < b) { return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED); } *r = a - b; return AWS_OP_SUCCESS; } AWS_STATIC_IMPL uint32_t aws_sub_u32_saturating(uint32_t a, uint32_t b) { return a <= b ? 0 : a - b; } AWS_STATIC_IMPL int aws_sub_u32_checked(uint32_t a, uint32_t b, uint32_t *r) { if (a < b) { return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED); } *r = a - b; return AWS_OP_SUCCESS; } /** * Multiplies a * b. If the result overflows, returns SIZE_MAX. */ AWS_STATIC_IMPL size_t aws_mul_size_saturating(size_t a, size_t b) { #if SIZE_BITS == 32 return (size_t)aws_mul_u32_saturating(a, b); #elif SIZE_BITS == 64 return (size_t)aws_mul_u64_saturating(a, b); #else # error "Target not supported" #endif } /** * Multiplies a * b and returns the result in *r. If the result * overflows, returns AWS_OP_ERR; otherwise returns AWS_OP_SUCCESS. */ AWS_STATIC_IMPL int aws_mul_size_checked(size_t a, size_t b, size_t *r) { #if SIZE_BITS == 32 return aws_mul_u32_checked(a, b, (uint32_t *)r); #elif SIZE_BITS == 64 return aws_mul_u64_checked(a, b, (uint64_t *)r); #else # error "Target not supported" #endif } /** * Adds a + b. If the result overflows returns SIZE_MAX. */ AWS_STATIC_IMPL size_t aws_add_size_saturating(size_t a, size_t b) { #if SIZE_BITS == 32 return (size_t)aws_add_u32_saturating(a, b); #elif SIZE_BITS == 64 return (size_t)aws_add_u64_saturating(a, b); #else # error "Target not supported" #endif } /** * Adds a + b and returns the result in *r. If the result * overflows, returns AWS_OP_ERR; otherwise returns AWS_OP_SUCCESS. */ AWS_STATIC_IMPL int aws_add_size_checked(size_t a, size_t b, size_t *r) { #if SIZE_BITS == 32 return aws_add_u32_checked(a, b, (uint32_t *)r); #elif SIZE_BITS == 64 return aws_add_u64_checked(a, b, (uint64_t *)r); #else # error "Target not supported" #endif } AWS_STATIC_IMPL size_t aws_sub_size_saturating(size_t a, size_t b) { #if SIZE_BITS == 32 return (size_t)aws_sub_u32_saturating(a, b); #elif SIZE_BITS == 64 return (size_t)aws_sub_u64_saturating(a, b); #else # error "Target not supported" #endif } AWS_STATIC_IMPL int aws_sub_size_checked(size_t a, size_t b, size_t *r) { #if SIZE_BITS == 32 return aws_sub_u32_checked(a, b, (uint32_t *)r); #elif SIZE_BITS == 64 return aws_sub_u64_checked(a, b, (uint64_t *)r); #else # error "Target not supported" #endif } /** * Function to check if x is power of 2 */ AWS_STATIC_IMPL bool aws_is_power_of_two(const size_t x) { /* First x in the below expression is for the case when x is 0 */ return x && (!(x & (x - 1))); } /** * Function to find the smallest result that is power of 2 >= n. Returns AWS_OP_ERR if this cannot * be done without overflow */ AWS_STATIC_IMPL int aws_round_up_to_power_of_two(size_t n, size_t *result) { if (n == 0) { *result = 1; return AWS_OP_SUCCESS; } if (n > SIZE_MAX_POWER_OF_TWO) { return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED); } n--; n |= n >> 1; n |= n >> 2; n |= n >> 4; n |= n >> 8; n |= n >> 16; #if SIZE_BITS == 64 n |= n >> 32; #endif n++; *result = n; return AWS_OP_SUCCESS; } #ifdef _MSC_VER # pragma warning(pop) #endif /* _MSC_VER */ AWS_STATIC_IMPL uint8_t aws_min_u8(uint8_t a, uint8_t b) { return a < b ? a : b; } AWS_STATIC_IMPL uint8_t aws_max_u8(uint8_t a, uint8_t b) { return a > b ? a : b; } AWS_STATIC_IMPL int8_t aws_min_i8(int8_t a, int8_t b) { return a < b ? a : b; } AWS_STATIC_IMPL int8_t aws_max_i8(int8_t a, int8_t b) { return a > b ? a : b; } AWS_STATIC_IMPL uint16_t aws_min_u16(uint16_t a, uint16_t b) { return a < b ? a : b; } AWS_STATIC_IMPL uint16_t aws_max_u16(uint16_t a, uint16_t b) { return a > b ? a : b; } AWS_STATIC_IMPL int16_t aws_min_i16(int16_t a, int16_t b) { return a < b ? a : b; } AWS_STATIC_IMPL int16_t aws_max_i16(int16_t a, int16_t b) { return a > b ? a : b; } AWS_STATIC_IMPL uint32_t aws_min_u32(uint32_t a, uint32_t b) { return a < b ? a : b; } AWS_STATIC_IMPL uint32_t aws_max_u32(uint32_t a, uint32_t b) { return a > b ? a : b; } AWS_STATIC_IMPL int32_t aws_min_i32(int32_t a, int32_t b) { return a < b ? a : b; } AWS_STATIC_IMPL int32_t aws_max_i32(int32_t a, int32_t b) { return a > b ? a : b; } AWS_STATIC_IMPL uint64_t aws_min_u64(uint64_t a, uint64_t b) { return a < b ? a : b; } AWS_STATIC_IMPL uint64_t aws_max_u64(uint64_t a, uint64_t b) { return a > b ? a : b; } AWS_STATIC_IMPL int64_t aws_min_i64(int64_t a, int64_t b) { return a < b ? a : b; } AWS_STATIC_IMPL int64_t aws_max_i64(int64_t a, int64_t b) { return a > b ? a : b; } AWS_STATIC_IMPL size_t aws_min_size(size_t a, size_t b) { return a < b ? a : b; } AWS_STATIC_IMPL size_t aws_max_size(size_t a, size_t b) { return a > b ? a : b; } AWS_STATIC_IMPL int aws_min_int(int a, int b) { return a < b ? a : b; } AWS_STATIC_IMPL int aws_max_int(int a, int b) { return a > b ? a : b; } AWS_STATIC_IMPL float aws_min_float(float a, float b) { return a < b ? a : b; } AWS_STATIC_IMPL float aws_max_float(float a, float b) { return a > b ? a : b; } AWS_STATIC_IMPL double aws_min_double(double a, double b) { return a < b ? a : b; } AWS_STATIC_IMPL double aws_max_double(double a, double b) { return a > b ? a : b; } AWS_EXTERN_C_END #endif /* AWS_COMMON_MATH_INL */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/math.msvc.inl000066400000000000000000000163341456575232400266400ustar00rootroot00000000000000#ifndef AWS_COMMON_MATH_MSVC_INL #define AWS_COMMON_MATH_MSVC_INL /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ /* * This header is already included, but include it again to make editor * highlighting happier. */ #include #include #include /* This file generates level 4 compiler warnings in Visual Studio 2017 and older */ #pragma warning(push, 3) #include #pragma warning(pop) AWS_EXTERN_C_BEGIN /** * Multiplies a * b. If the result overflows, returns 2^64 - 1. */ AWS_STATIC_IMPL uint64_t aws_mul_u64_saturating(uint64_t a, uint64_t b) { uint64_t out; uint64_t ret_val = _umul128(a, b, &out); return (out == 0) ? ret_val : UINT64_MAX; } /** * If a * b overflows, returns AWS_OP_ERR; otherwise multiplies * a * b, returns the result in *r, and returns AWS_OP_SUCCESS. */ AWS_STATIC_IMPL int aws_mul_u64_checked(uint64_t a, uint64_t b, uint64_t *r) { uint64_t out; *r = _umul128(a, b, &out); if (out != 0) { return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED); } return AWS_OP_SUCCESS; } static uint32_t (*s_mul_u32_saturating_fn_ptr)(uint32_t a, uint32_t b) = NULL; static uint32_t s_mulx_u32_saturating(uint32_t a, uint32_t b) { uint32_t high_32; uint32_t ret_val = _mulx_u32(a, b, &high_32); return (high_32 == 0) ? ret_val : UINT32_MAX; } static uint32_t s_emulu_saturating(uint32_t a, uint32_t b) { uint64_t result = __emulu(a, b); return (result > UINT32_MAX) ? UINT32_MAX : (uint32_t)result; } /** * Multiplies a * b. If the result overflows, returns 2^32 - 1. */ AWS_STATIC_IMPL uint32_t aws_mul_u32_saturating(uint32_t a, uint32_t b) { if (AWS_UNLIKELY(!s_mul_u32_saturating_fn_ptr)) { if (aws_cpu_has_feature(AWS_CPU_FEATURE_BMI2)) { s_mul_u32_saturating_fn_ptr = s_mulx_u32_saturating; } else { /* If BMI2 unavailable, use __emulu instead */ s_mul_u32_saturating_fn_ptr = s_emulu_saturating; } } return s_mul_u32_saturating_fn_ptr(a, b); } static int (*s_mul_u32_checked_fn_ptr)(uint32_t a, uint32_t b, uint32_t *r) = NULL; static int s_mulx_u32_checked(uint32_t a, uint32_t b, uint32_t *r) { uint32_t high_32; *r = _mulx_u32(a, b, &high_32); if (high_32 != 0) { return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED); } return AWS_OP_SUCCESS; } static int s_emulu_checked(uint32_t a, uint32_t b, uint32_t *r) { uint64_t result = __emulu(a, b); if (result > UINT32_MAX) { return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED); } *r = (uint32_t)result; return AWS_OP_SUCCESS; } /** * If a * b overflows, returns AWS_OP_ERR; otherwise multiplies * a * b, returns the result in *r, and returns AWS_OP_SUCCESS. */ AWS_STATIC_IMPL int aws_mul_u32_checked(uint32_t a, uint32_t b, uint32_t *r) { if (AWS_UNLIKELY(!s_mul_u32_checked_fn_ptr)) { if (aws_cpu_has_feature(AWS_CPU_FEATURE_BMI2)) { s_mul_u32_checked_fn_ptr = s_mulx_u32_checked; } else { /* If BMI2 unavailable, use __emulu instead */ s_mul_u32_checked_fn_ptr = s_emulu_checked; } } return s_mul_u32_checked_fn_ptr(a, b, r); } /** * If a + b overflows, returns AWS_OP_ERR; otherwise adds * a + b, returns the result in *r, and returns AWS_OP_SUCCESS. */ AWS_STATIC_IMPL int aws_add_u64_checked(uint64_t a, uint64_t b, uint64_t *r) { #if !defined(_MSC_VER) || _MSC_VER < 1920 /* Fallback MSVC 2017 and older, _addcarry doesn't work correctly for those compiler */ if ((b > 0) && (a > (UINT64_MAX - b))) { return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED); } *r = a + b; return AWS_OP_SUCCESS; #else if (_addcarry_u64((uint8_t)0, a, b, r)) { return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED); } return AWS_OP_SUCCESS; #endif } /** * Adds a + b. If the result overflows, returns 2^64 - 1. */ AWS_STATIC_IMPL uint64_t aws_add_u64_saturating(uint64_t a, uint64_t b) { #if !defined(_MSC_VER) || _MSC_VER < 1920 /* Fallback MSVC 2017 and older, _addcarry doesn't work correctly for those compiler */ if ((b > 0) && (a > (UINT64_MAX - b))) { return UINT64_MAX; } return a + b; #else uint64_t res = 0; if (_addcarry_u64((uint8_t)0, a, b, &res)) { res = UINT64_MAX; } return res; #endif } /** * If a + b overflows, returns AWS_OP_ERR; otherwise adds * a + b, returns the result in *r, and returns AWS_OP_SUCCESS. */ AWS_STATIC_IMPL int aws_add_u32_checked(uint32_t a, uint32_t b, uint32_t *r) { #if !defined(_MSC_VER) || _MSC_VER < 1920 /* Fallback MSVC 2017 and older, _addcarry doesn't work correctly for those compiler */ if ((b > 0) && (a > (UINT32_MAX - b))) { return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED); } *r = a + b; return AWS_OP_SUCCESS; #else if (_addcarry_u32((uint8_t)0, a, b, r)) { return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED); } return AWS_OP_SUCCESS; #endif } /** * Adds a + b. If the result overflows, returns 2^32 - 1. */ AWS_STATIC_IMPL uint32_t aws_add_u32_saturating(uint32_t a, uint32_t b) { #if !defined(_MSC_VER) || _MSC_VER < 1920 /* Fallback MSVC 2017 and older, _addcarry doesn't work correctly for those compiler */ if ((b > 0) && (a > (UINT32_MAX - b))) return UINT32_MAX; return a + b; #else uint32_t res = 0; if (_addcarry_u32((uint8_t)0, a, b, &res)) { res = UINT32_MAX; } return res; #endif } /** * Search from the MSB to LSB, looking for a 1 */ AWS_STATIC_IMPL size_t aws_clz_u32(uint32_t n) { unsigned long idx = 0; if (_BitScanReverse(&idx, n)) { return 31 - idx; } return 32; } AWS_STATIC_IMPL size_t aws_clz_i32(int32_t n) { unsigned long idx = 0; if (_BitScanReverse(&idx, (unsigned long)n)) { return 31 - idx; } return 32; } AWS_STATIC_IMPL size_t aws_clz_u64(uint64_t n) { unsigned long idx = 0; if (_BitScanReverse64(&idx, n)) { return 63 - idx; } return 64; } AWS_STATIC_IMPL size_t aws_clz_i64(int64_t n) { unsigned long idx = 0; if (_BitScanReverse64(&idx, (uint64_t)n)) { return 63 - idx; } return 64; } AWS_STATIC_IMPL size_t aws_clz_size(size_t n) { #if SIZE_BITS == 64 return aws_clz_u64(n); #else return aws_clz_u32(n); #endif } /** * Search from the LSB to MSB, looking for a 1 */ AWS_STATIC_IMPL size_t aws_ctz_u32(uint32_t n) { unsigned long idx = 0; if (_BitScanForward(&idx, n)) { return idx; } return 32; } AWS_STATIC_IMPL size_t aws_ctz_i32(int32_t n) { unsigned long idx = 0; if (_BitScanForward(&idx, (uint32_t)n)) { return idx; } return 32; } AWS_STATIC_IMPL size_t aws_ctz_u64(uint64_t n) { unsigned long idx = 0; if (_BitScanForward64(&idx, n)) { return idx; } return 64; } AWS_STATIC_IMPL size_t aws_ctz_i64(int64_t n) { unsigned long idx = 0; if (_BitScanForward64(&idx, (uint64_t)n)) { return idx; } return 64; } AWS_STATIC_IMPL size_t aws_ctz_size(size_t n) { #if SIZE_BITS == 64 return aws_ctz_u64(n); #else return aws_ctz_u32(n); #endif } AWS_EXTERN_C_END #endif /* WS_COMMON_MATH_MSVC_INL */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/mutex.h000066400000000000000000000040621456575232400255420ustar00rootroot00000000000000#ifndef AWS_COMMON_MUTEX_H #define AWS_COMMON_MUTEX_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #ifdef _WIN32 /* NOTE: Do not use this macro before including windows.h */ # define AWSMUTEX_TO_WINDOWS(pMutex) (PSRWLOCK) & (pMutex)->mutex_handle #else # include #endif AWS_PUSH_SANE_WARNING_LEVEL struct aws_mutex { #ifdef _WIN32 void *mutex_handle; #else pthread_mutex_t mutex_handle; #endif bool initialized; }; #ifdef _WIN32 # define AWS_MUTEX_INIT \ { .mutex_handle = NULL, .initialized = true } #else # define AWS_MUTEX_INIT \ { .mutex_handle = PTHREAD_MUTEX_INITIALIZER, .initialized = true } #endif AWS_EXTERN_C_BEGIN /** * Initializes a new platform instance of mutex. */ AWS_COMMON_API int aws_mutex_init(struct aws_mutex *mutex); /** * Cleans up internal resources. */ AWS_COMMON_API void aws_mutex_clean_up(struct aws_mutex *mutex); /** * Blocks until it acquires the lock. While on some platforms such as Windows, * this may behave as a reentrant mutex, you should not treat it like one. On * platforms it is possible for it to be non-reentrant, it will be. */ AWS_COMMON_API int aws_mutex_lock(struct aws_mutex *mutex); /** * Attempts to acquire the lock but returns immediately if it can not. * While on some platforms such as Windows, this may behave as a reentrant mutex, * you should not treat it like one. On platforms it is possible for it to be non-reentrant, it will be. * Note: For windows, minimum support server version is Windows Server 2008 R2 [desktop apps | UWP apps] */ AWS_COMMON_API int aws_mutex_try_lock(struct aws_mutex *mutex); /** * Releases the lock. */ AWS_COMMON_API int aws_mutex_unlock(struct aws_mutex *mutex); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_COMMON_MUTEX_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/package.h000066400000000000000000000012001456575232400257620ustar00rootroot00000000000000#ifndef AWS_COMMON_PACKAGE_H #define AWS_COMMON_PACKAGE_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ /* * Preliminary cap on the number of possible aws-c-libraries participating in shared enum ranges for * errors, log subjects, and other cross-library enums. Expandable as needed */ #define AWS_PACKAGE_SLOTS 16 /* * Each aws-c-* and aws-crt-* library has a unique package id starting from zero. These are used to macro-calculate * correct ranges for the cross-library enumerations. */ #define AWS_C_COMMON_PACKAGE_ID 0 #endif /* AWS_COMMON_PACKAGE_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/platform.h000066400000000000000000000017021456575232400262220ustar00rootroot00000000000000#ifndef AWS_COMMON_PLATFORM_H #define AWS_COMMON_PLATFORM_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #ifdef _WIN32 # define AWS_OS_WINDOWS /* indicate whether this is for Windows desktop, or UWP or Windows S, or other Windows-like devices */ # if defined(AWS_HAVE_WINAPI_DESKTOP) # define AWS_OS_WINDOWS_DESKTOP # endif #elif __APPLE__ # define AWS_OS_APPLE # include "TargetConditionals.h" # if defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE # define AWS_OS_IOS # elif defined(TARGET_OS_WATCH) && TARGET_OS_WATCH # define AWS_OS_WATCHOS # elif defined(TARGET_OS_TV) && TARGET_OS_TV # define AWS_OS_TVOS # else # define AWS_OS_MACOS # endif #elif __linux__ # define AWS_OS_LINUX #endif #if defined(_POSIX_VERSION) # define AWS_OS_POSIX #endif #endif /* AWS_COMMON_PLATFORM_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/posix/000077500000000000000000000000001456575232400253675ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/posix/common.inl000066400000000000000000000017271456575232400273720ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #ifndef AWS_COMMON_POSIX_COMMON_INL #define AWS_COMMON_POSIX_COMMON_INL #include #include AWS_EXTERN_C_BEGIN static inline int aws_private_convert_and_raise_error_code(int error_code) { switch (error_code) { case 0: return AWS_OP_SUCCESS; case EINVAL: return aws_raise_error(AWS_ERROR_MUTEX_NOT_INIT); case EBUSY: return aws_raise_error(AWS_ERROR_MUTEX_TIMEOUT); case EPERM: return aws_raise_error(AWS_ERROR_MUTEX_CALLER_NOT_OWNER); case ENOMEM: return aws_raise_error(AWS_ERROR_OOM); case EDEADLK: return aws_raise_error(AWS_ERROR_THREAD_DEADLOCK_DETECTED); default: return aws_raise_error(AWS_ERROR_MUTEX_FAILED); } } AWS_EXTERN_C_END #endif /* AWS_COMMON_POSIX_COMMON_INL */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/predicates.h000066400000000000000000000021331456575232400265200ustar00rootroot00000000000000#ifndef AWS_COMMON_PREDICATES_H #define AWS_COMMON_PREDICATES_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ /** * Returns whether all bytes of the two byte arrays match. */ #if defined(AWS_DEEP_CHECKS) && (AWS_DEEP_CHECKS == 1) # ifdef CBMC /* clang-format off */ # define AWS_BYTES_EQ(arr1, arr2, len) \ __CPROVER_forall { \ int i; \ (i >= 0 && i < len) ==> ((const uint8_t *)&arr1)[i] == ((const uint8_t *)&arr2)[i] \ } /* clang-format on */ # else # define AWS_BYTES_EQ(arr1, arr2, len) (memcmp(arr1, arr2, len) == 0) # endif /* CBMC */ #else # define AWS_BYTES_EQ(arr1, arr2, len) (1) #endif /* (AWS_DEEP_CHECKS == 1) */ #endif /* AWS_COMMON_PREDICATES_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/priority_queue.h000066400000000000000000000166341456575232400274750ustar00rootroot00000000000000#ifndef AWS_COMMON_PRIORITY_QUEUE_H #define AWS_COMMON_PRIORITY_QUEUE_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include AWS_PUSH_SANE_WARNING_LEVEL /* The comparator should return a positive value if the second argument has a * higher priority than the first; Otherwise, it should return a negative value * or zero. NOTE: priority_queue pops its highest priority element first. For * example: int cmp(const void *a, const void *b) { return a < b; } would result * in a max heap, while: int cmp(const void *a, const void *b) { return a > b; } * would result in a min heap. */ typedef int(aws_priority_queue_compare_fn)(const void *a, const void *b); struct aws_priority_queue { /** * predicate that determines the priority of the elements in the queue. */ aws_priority_queue_compare_fn *pred; /** * The underlying container storing the queue elements. */ struct aws_array_list container; /** * An array of pointers to backpointer elements. This array is initialized when * the first call to aws_priority_queue_push_bp is made, and is subsequently maintained * through any heap node manipulations. * * Each element is a struct aws_priority_queue_node *, pointing to a backpointer field * owned by the calling code, or a NULL. The backpointer field is continually updated * with information needed to locate and remove a specific node later on. */ struct aws_array_list backpointers; }; struct aws_priority_queue_node { /** The current index of the node in question, or SIZE_MAX if the node has been removed. */ size_t current_index; }; AWS_EXTERN_C_BEGIN /** * Initializes a priority queue struct for use. This mode will grow memory automatically (exponential model) * Default size is the inital size of the queue * item_size is the size of each element in bytes. Mixing items types is not supported by this API. * pred is the function that will be used to determine priority. */ AWS_COMMON_API int aws_priority_queue_init_dynamic( struct aws_priority_queue *queue, struct aws_allocator *alloc, size_t default_size, size_t item_size, aws_priority_queue_compare_fn *pred); /** * Initializes a priority queue struct for use. This mode will not allocate any additional memory. When the heap fills * new enqueue operations will fail with AWS_ERROR_PRIORITY_QUEUE_FULL. * * Heaps initialized using this call do not support the aws_priority_queue_push_ref call with a non-NULL backpointer * parameter. * * heap is the raw memory allocated for this priority_queue * item_count is the maximum number of elements the raw heap can contain * item_size is the size of each element in bytes. Mixing items types is not supported by this API. * pred is the function that will be used to determine priority. */ AWS_COMMON_API void aws_priority_queue_init_static( struct aws_priority_queue *queue, void *heap, size_t item_count, size_t item_size, aws_priority_queue_compare_fn *pred); /** * Checks that the backpointer at a specific index of the queue is * NULL or points to a correctly allocated aws_priority_queue_node. */ bool aws_priority_queue_backpointer_index_valid(const struct aws_priority_queue *const queue, size_t index); /** * Checks that the backpointers of the priority queue are either NULL * or correctly allocated to point at aws_priority_queue_nodes. This * check is O(n), as it accesses every backpointer in a loop, and thus * shouldn't be used carelessly. */ bool aws_priority_queue_backpointers_valid_deep(const struct aws_priority_queue *const queue); /** * Checks that the backpointers of the priority queue satisfy validity * constraints. */ bool aws_priority_queue_backpointers_valid(const struct aws_priority_queue *const queue); /** * Set of properties of a valid aws_priority_queue. */ AWS_COMMON_API bool aws_priority_queue_is_valid(const struct aws_priority_queue *const queue); /** * Cleans up any internally allocated memory and resets the struct for reuse or deletion. */ AWS_COMMON_API void aws_priority_queue_clean_up(struct aws_priority_queue *queue); /** * Copies item into the queue and places it in the proper priority order. Complexity: O(log(n)). */ AWS_COMMON_API int aws_priority_queue_push(struct aws_priority_queue *queue, void *item); /** * Copies item into the queue and places it in the proper priority order. Complexity: O(log(n)). * * If the backpointer parameter is non-null, the heap will continually update the pointed-to field * with information needed to remove the node later on. *backpointer must remain valid until the node * is removed from the heap, and may be updated on any mutating operation on the priority queue. * * If the node is removed, the backpointer will be set to a sentinel value that indicates that the * node has already been removed. It is safe (and a no-op) to call aws_priority_queue_remove with * such a sentinel value. */ AWS_COMMON_API int aws_priority_queue_push_ref( struct aws_priority_queue *queue, void *item, struct aws_priority_queue_node *backpointer); /** * Copies the element of the highest priority, and removes it from the queue.. Complexity: O(log(n)). * If queue is empty, AWS_ERROR_PRIORITY_QUEUE_EMPTY will be raised. */ AWS_COMMON_API int aws_priority_queue_pop(struct aws_priority_queue *queue, void *item); /** * Removes a specific node from the priority queue. Complexity: O(log(n)) * After removing a node (using either _remove or _pop), the backpointer set at push_ref time is set * to a sentinel value. If this sentinel value is passed to aws_priority_queue_remove, * AWS_ERROR_PRIORITY_QUEUE_BAD_NODE will be raised. Note, however, that passing uninitialized * aws_priority_queue_nodes, or ones from different priority queues, results in undefined behavior. */ AWS_COMMON_API int aws_priority_queue_remove(struct aws_priority_queue *queue, void *item, const struct aws_priority_queue_node *node); /** * Obtains a pointer to the element of the highest priority. Complexity: constant time. * If queue is empty, AWS_ERROR_PRIORITY_QUEUE_EMPTY will be raised. */ AWS_COMMON_API int aws_priority_queue_top(const struct aws_priority_queue *queue, void **item); /** * Removes all elements from the queue, but does not free internal memory. */ AWS_COMMON_API void aws_priority_queue_clear(struct aws_priority_queue *queue); /** * Current number of elements in the queue */ AWS_COMMON_API size_t aws_priority_queue_size(const struct aws_priority_queue *queue); /** * Current allocated capacity for the queue, in dynamic mode this grows over time, in static mode, this will never * change. */ AWS_COMMON_API size_t aws_priority_queue_capacity(const struct aws_priority_queue *queue); /** * Initializes a queue node to a default value that indicates the node is not in the queue. * * @param node priority queue node to initialize with a default value */ AWS_COMMON_API void aws_priority_queue_node_init(struct aws_priority_queue_node *node); /** * Checks if a priority queue node is currently in a priority queue. * * @param node priority queue node to check usage for * * @return true if the node is in a queue, false otherwise */ AWS_COMMON_API bool aws_priority_queue_node_is_in_queue(const struct aws_priority_queue_node *node); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_COMMON_PRIORITY_QUEUE_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/private/000077500000000000000000000000001456575232400256775ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/private/array_list.h000066400000000000000000000010261456575232400302200ustar00rootroot00000000000000#ifndef AWS_COMMON_PRIVATE_ARRAY_LIST_H #define AWS_COMMON_PRIVATE_ARRAY_LIST_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ AWS_EXTERN_C_BEGIN /** * Helper function that calculates the number of bytes needed by an array_list, where "index" is the last valid * index. */ int aws_array_list_calc_necessary_size(struct aws_array_list *AWS_RESTRICT list, size_t index, size_t *necessary_size); AWS_EXTERN_C_END #endif /* AWS_COMMON_PRIVATE_ARRAY_LIST_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/private/byte_buf.h000066400000000000000000000011631456575232400276500ustar00rootroot00000000000000#ifndef AWS_COMMON_PRIVATE_BYTE_BUF_H #define AWS_COMMON_PRIVATE_BYTE_BUF_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include /** * If index >= bound, bound > (SIZE_MAX / 2), or index > (SIZE_MAX / 2), returns * 0. Otherwise, returns UINTPTR_MAX. This function is designed to return the correct * value even under CPU speculation conditions, and is intended to be used for * SPECTRE mitigation purposes. */ AWS_COMMON_API size_t aws_nospec_mask(size_t index, size_t bound); #endif /* AWS_COMMON_PRIVATE_BYTE_BUF_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/private/dlloads.h000066400000000000000000000012271456575232400274740ustar00rootroot00000000000000#ifndef AWS_COMMON_PRIVATE_DLLOADS_H #define AWS_COMMON_PRIVATE_DLLOADS_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ /* * definition is here: https://linux.die.net/man/2/set_mempolicy */ #define AWS_MPOL_PREFERRED_ALIAS 1 struct bitmask; extern long (*g_set_mempolicy_ptr)(int, const unsigned long *, unsigned long); extern int (*g_numa_available_ptr)(void); extern int (*g_numa_num_configured_nodes_ptr)(void); extern int (*g_numa_num_possible_cpus_ptr)(void); extern int (*g_numa_node_of_cpu_ptr)(int cpu); extern void *g_libnuma_handle; #endif /* AWS_COMMON_PRIVATE_DLLOADS_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/private/hash_table_impl.h000066400000000000000000000035251456575232400311700ustar00rootroot00000000000000#ifndef AWS_COMMON_PRIVATE_HASH_TABLE_IMPL_H #define AWS_COMMON_PRIVATE_HASH_TABLE_IMPL_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include struct hash_table_entry { struct aws_hash_element element; uint64_t hash_code; /* hash code (0 signals empty) */ }; /* Using a flexible array member is the C99 compliant way to have the hash_table_entries * immediately follow the struct. * * MSVC doesn't know this for some reason so we need to use a pragma to make * it happy. */ #ifdef _MSC_VER # pragma warning(push) # pragma warning(disable : 4200) #endif struct hash_table_state { aws_hash_fn *hash_fn; aws_hash_callback_eq_fn *equals_fn; aws_hash_callback_destroy_fn *destroy_key_fn; aws_hash_callback_destroy_fn *destroy_value_fn; struct aws_allocator *alloc; size_t size, entry_count; size_t max_load; /* We AND a hash value with mask to get the slot index */ size_t mask; double max_load_factor; /* actually variable length */ struct hash_table_entry slots[]; }; #ifdef _MSC_VER # pragma warning(pop) #endif /** * Best-effort check of hash_table_state data-structure invariants * Some invariants, such as that the number of entries is actually the * same as the entry_count field, would require a loop to check */ bool hash_table_state_is_valid(const struct hash_table_state *map); /** * Determine the total number of bytes needed for a hash-table with * "size" slots. If the result would overflow a size_t, return * AWS_OP_ERR; otherwise, return AWS_OP_SUCCESS with the result in * "required_bytes". */ int hash_table_state_required_bytes(size_t size, size_t *required_bytes); #endif /* AWS_COMMON_PRIVATE_HASH_TABLE_IMPL_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/private/json_impl.h000066400000000000000000000011061456575232400300400ustar00rootroot00000000000000#ifndef AWS_COMMON_PRIVATE_JSON_IMPL_H #define AWS_COMMON_PRIVATE_JSON_IMPL_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include /** * Initializes the JSON module for use. * @param allocator The allocator to use for creating aws_json_value structs. */ void aws_json_module_init(struct aws_allocator *allocator); /** * Cleans up the JSON module. Should be called when finished using the module. */ void aws_json_module_cleanup(void); #endif // AWS_COMMON_PRIVATE_JSON_IMPL_H aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/private/lookup3.inl000066400000000000000000001112311456575232400277760ustar00rootroot00000000000000#ifndef AWS_COMMON_PRIVATE_LOOKUP3_INL #define AWS_COMMON_PRIVATE_LOOKUP3_INL #include /* clang-format off */ /* * The following public domain code has been modified as follows: * # All functions have been made static. * # The self test harness has been turned off. * # stdint.h include removed for C89 compatibility. * * The original code was retrieved from http://burtleburtle.net/bob/c/lookup3.c */ /* ------------------------------------------------------------------------------- lookup3.c, by Bob Jenkins, May 2006, Public Domain. These are functions for producing 32-bit hashes for hash table lookup. hashword(), hashlittle(), hashlittle2(), hashbig(), mix(), and final() are externally useful functions. Routines to test the hash are included if SELF_TEST is defined. You can use this free for any purpose. It's in the public domain. It has no warranty. You probably want to use hashlittle(). hashlittle() and hashbig() hash byte arrays. hashlittle() is is faster than hashbig() on little-endian machines. Intel and AMD are little-endian machines. On second thought, you probably want hashlittle2(), which is identical to hashlittle() except it returns two 32-bit hashes for the price of one. You could implement hashbig2() if you wanted but I haven't bothered here. If you want to find a hash of, say, exactly 7 integers, do a = i1; b = i2; c = i3; mix(a,b,c); a += i4; b += i5; c += i6; mix(a,b,c); a += i7; final(a,b,c); then use c as the hash value. If you have a variable length array of 4-byte integers to hash, use hashword(). If you have a byte array (like a character string), use hashlittle(). If you have several byte arrays, or a mix of things, see the comments above hashlittle(). Why is this so big? I read 12 bytes at a time into 3 4-byte integers, then mix those integers. This is fast (you can do a lot more thorough mixing with 12*3 instructions on 3 integers than you can with 3 instructions on 1 byte), but shoehorning those bytes into integers efficiently is messy. ------------------------------------------------------------------------------- */ // #define SELF_TEST 1 #include /* defines printf for tests */ #include /* defines time_t for timings in the test */ #ifndef _MSC_VER #include /* attempt to define endianness */ #endif #ifdef linux # include /* attempt to define endianness */ #endif #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable:4127) /*Disable "conditional expression is constant" */ #endif /* _MSC_VER */ #ifdef CBMC # pragma CPROVER check push # pragma CPROVER check disable "unsigned-overflow" #endif /* CBMC */ /* * My best guess at if you are big-endian or little-endian. This may * need adjustment. */ #if (defined(__BYTE_ORDER) && defined(__LITTLE_ENDIAN) && \ __BYTE_ORDER == __LITTLE_ENDIAN) || \ (defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && \ __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || \ (defined(i386) || defined(__i386__) || defined(__i486__) || \ defined(__i586__) || defined(__i686__) || defined(vax) || defined(MIPSEL) || \ defined(_M_IX86) || defined(_M_X64) || defined(_M_IA64) || defined(_M_ARM)) # define HASH_LITTLE_ENDIAN 1 # define HASH_BIG_ENDIAN 0 #elif (defined(__BYTE_ORDER) && defined(__BIG_ENDIAN) && \ __BYTE_ORDER == __BIG_ENDIAN) || \ (defined(sparc) || defined(POWERPC) || defined(_M_PPC) || defined(mc68000) || defined(sel)) # define HASH_LITTLE_ENDIAN 0 # define HASH_BIG_ENDIAN 1 #else # define HASH_LITTLE_ENDIAN 0 # define HASH_BIG_ENDIAN 0 #endif #define hashsize(n) ((uint32_t)1<<(n)) #define hashmask(n) (hashsize(n)-1) #define rot(x,k) (((x)<<(k)) | ((x)>>(32-(k)))) /* ------------------------------------------------------------------------------- mix -- mix 3 32-bit values reversibly. This is reversible, so any information in (a,b,c) before mix() is still in (a,b,c) after mix(). If four pairs of (a,b,c) inputs are run through mix(), or through mix() in reverse, there are at least 32 bits of the output that are sometimes the same for one pair and different for another pair. This was tested for: * pairs that differed by one bit, by two bits, in any combination of top bits of (a,b,c), or in any combination of bottom bits of (a,b,c). * "differ" is defined as +, -, ^, or ~^. For + and -, I transformed the output delta to a Gray code (a^(a>>1)) so a string of 1's (as is commonly produced by subtraction) look like a single 1-bit difference. * the base values were pseudorandom, all zero but one bit set, or all zero plus a counter that starts at zero. Some k values for my "a-=c; a^=rot(c,k); c+=b;" arrangement that satisfy this are 4 6 8 16 19 4 9 15 3 18 27 15 14 9 3 7 17 3 Well, "9 15 3 18 27 15" didn't quite get 32 bits diffing for "differ" defined as + with a one-bit base and a two-bit delta. I used http://burtleburtle.net/bob/hash/avalanche.html to choose the operations, constants, and arrangements of the variables. This does not achieve avalanche. There are input bits of (a,b,c) that fail to affect some output bits of (a,b,c), especially of a. The most thoroughly mixed value is c, but it doesn't really even achieve avalanche in c. This allows some parallelism. Read-after-writes are good at doubling the number of bits affected, so the goal of mixing pulls in the opposite direction as the goal of parallelism. I did what I could. Rotates seem to cost as much as shifts on every machine I could lay my hands on, and rotates are much kinder to the top and bottom bits, so I used rotates. ------------------------------------------------------------------------------- */ #define mix(a,b,c) \ { \ a -= c; a ^= rot(c, 4); c += b; \ b -= a; b ^= rot(a, 6); a += c; \ c -= b; c ^= rot(b, 8); b += a; \ a -= c; a ^= rot(c,16); c += b; \ b -= a; b ^= rot(a,19); a += c; \ c -= b; c ^= rot(b, 4); b += a; \ } /* ------------------------------------------------------------------------------- final -- final mixing of 3 32-bit values (a,b,c) into c Pairs of (a,b,c) values differing in only a few bits will usually produce values of c that look totally different. This was tested for * pairs that differed by one bit, by two bits, in any combination of top bits of (a,b,c), or in any combination of bottom bits of (a,b,c). * "differ" is defined as +, -, ^, or ~^. For + and -, I transformed the output delta to a Gray code (a^(a>>1)) so a string of 1's (as is commonly produced by subtraction) look like a single 1-bit difference. * the base values were pseudorandom, all zero but one bit set, or all zero plus a counter that starts at zero. These constants passed: 14 11 25 16 4 14 24 12 14 25 16 4 14 24 and these came close: 4 8 15 26 3 22 24 10 8 15 26 3 22 24 11 8 15 26 3 22 24 ------------------------------------------------------------------------------- */ #define final(a,b,c) \ { \ c ^= b; c -= rot(b,14); \ a ^= c; a -= rot(c,11); \ b ^= a; b -= rot(a,25); \ c ^= b; c -= rot(b,16); \ a ^= c; a -= rot(c,4); \ b ^= a; b -= rot(a,14); \ c ^= b; c -= rot(b,24); \ } /* -------------------------------------------------------------------- This works on all machines. To be useful, it requires -- that the key be an array of uint32_t's, and -- that the length be the number of uint32_t's in the key The function hashword() is identical to hashlittle() on little-endian machines, and identical to hashbig() on big-endian machines, except that the length has to be measured in uint32_ts rather than in bytes. hashlittle() is more complicated than hashword() only because hashlittle() has to dance around fitting the key bytes into registers. -------------------------------------------------------------------- */ static uint32_t hashword( const uint32_t *k, /* the key, an array of uint32_t values */ size_t length, /* the length of the key, in uint32_ts */ uint32_t initval) /* the previous hash, or an arbitrary value */ { uint32_t a,b,c; /* Set up the internal state */ a = b = c = 0xdeadbeef + (((uint32_t)length)<<2) + initval; /*------------------------------------------------- handle most of the key */ while (length > 3) { a += k[0]; b += k[1]; c += k[2]; mix(a,b,c); length -= 3; k += 3; } /*------------------------------------------- handle the last 3 uint32_t's */ switch(length) /* all the case statements fall through */ { case 3 : c+=k[2]; case 2 : b+=k[1]; case 1 : a+=k[0]; final(a,b,c); case 0: /* case 0: nothing left to add */ break; } /*------------------------------------------------------ report the result */ return c; } /* -------------------------------------------------------------------- hashword2() -- same as hashword(), but take two seeds and return two 32-bit values. pc and pb must both be nonnull, and *pc and *pb must both be initialized with seeds. If you pass in (*pb)==0, the output (*pc) will be the same as the return value from hashword(). -------------------------------------------------------------------- */ static void hashword2 ( const uint32_t *k, /* the key, an array of uint32_t values */ size_t length, /* the length of the key, in uint32_ts */ uint32_t *pc, /* IN: seed OUT: primary hash value */ uint32_t *pb) /* IN: more seed OUT: secondary hash value */ { uint32_t a,b,c; /* Set up the internal state */ a = b = c = 0xdeadbeef + ((uint32_t)(length<<2)) + *pc; c += *pb; /*------------------------------------------------- handle most of the key */ while (length > 3) { a += k[0]; b += k[1]; c += k[2]; mix(a,b,c); length -= 3; k += 3; } /*------------------------------------------- handle the last 3 uint32_t's */ switch(length) /* all the case statements fall through */ { case 3 : c+=k[2]; case 2 : b+=k[1]; case 1 : a+=k[0]; final(a,b,c); case 0: /* case 0: nothing left to add */ break; } /*------------------------------------------------------ report the result */ *pc=c; *pb=b; } /* ------------------------------------------------------------------------------- hashlittle() -- hash a variable-length key into a 32-bit value k : the key (the unaligned variable-length array of bytes) length : the length of the key, counting by bytes initval : can be any 4-byte value Returns a 32-bit value. Every bit of the key affects every bit of the return value. Two keys differing by one or two bits will have totally different hash values. The best hash table sizes are powers of 2. There is no need to do mod a prime (mod is sooo slow!). If you need less than 32 bits, use a bitmask. For example, if you need only 10 bits, do h = (h & hashmask(10)); In which case, the hash table should have hashsize(10) elements. If you are hashing n strings (uint8_t **)k, do it like this: for (i=0, h=0; i 12) { a += k[0]; b += k[1]; c += k[2]; mix(a,b,c); length -= 12; k += 3; } /*----------------------------- handle the last (probably partial) block */ /* * "k[2]&0xffffff" actually reads beyond the end of the string, but * then masks off the part it's not allowed to read. Because the * string is aligned, the masked-off tail is in the same word as the * rest of the string. Every machine with memory protection I've seen * does it on word boundaries, so is OK with this. But VALGRIND and CBMC * will still catch it and complain. CBMC will ignore this type of error * in the code block between the pragmas "CPROVER check push" and * "CPROVER check pop". The masking trick does make the hash noticably * faster for short strings (like English words). */ #ifndef VALGRIND #ifdef CBMC # pragma CPROVER check push # pragma CPROVER check disable "pointer" #endif // changed in aws-c-common: fix unused variable warning switch(length) { case 12: c+=k[2]; b+=k[1]; a+=k[0]; break; case 11: c+=k[2]&0xffffff; b+=k[1]; a+=k[0]; break; case 10: c+=k[2]&0xffff; b+=k[1]; a+=k[0]; break; case 9 : c+=k[2]&0xff; b+=k[1]; a+=k[0]; break; case 8 : b+=k[1]; a+=k[0]; break; case 7 : b+=k[1]&0xffffff; a+=k[0]; break; case 6 : b+=k[1]&0xffff; a+=k[0]; break; case 5 : b+=k[1]&0xff; a+=k[0]; break; case 4 : a+=k[0]; break; case 3 : a+=k[0]&0xffffff; break; case 2 : a+=k[0]&0xffff; break; case 1 : a+=k[0]&0xff; break; case 0 : return c; /* zero length strings require no mixing */ } #ifdef CBMC # pragma CPROVER check pop #endif #else /* make valgrind happy */ const uint8_t *k8 = (const uint8_t *)k; switch(length) { case 12: c+=k[2]; b+=k[1]; a+=k[0]; break; case 11: c+=((uint32_t)k8[10])<<16; /* fall through */ case 10: c+=((uint32_t)k8[9])<<8; /* fall through */ case 9 : c+=k8[8]; /* fall through */ case 8 : b+=k[1]; a+=k[0]; break; case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */ case 6 : b+=((uint32_t)k8[5])<<8; /* fall through */ case 5 : b+=k8[4]; /* fall through */ case 4 : a+=k[0]; break; case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */ case 2 : a+=((uint32_t)k8[1])<<8; /* fall through */ case 1 : a+=k8[0]; break; case 0 : return c; } #endif /* !valgrind */ } else if (HASH_LITTLE_ENDIAN && ((u.i & 0x1) == 0)) { const uint16_t *k = (const uint16_t *)key; /* read 16-bit chunks */ /*--------------- all but last block: aligned reads and different mixing */ while (length > 12) { a += k[0] + (((uint32_t)k[1])<<16); b += k[2] + (((uint32_t)k[3])<<16); c += k[4] + (((uint32_t)k[5])<<16); mix(a,b,c); length -= 12; k += 6; } /*----------------------------- handle the last (probably partial) block */ const uint8_t *k8 = (const uint8_t *)k; switch(length) { case 12: c+=k[4]+(((uint32_t)k[5])<<16); b+=k[2]+(((uint32_t)k[3])<<16); a+=k[0]+(((uint32_t)k[1])<<16); break; case 11: c+=((uint32_t)k8[10])<<16; /* fall through */ case 10: c+=k[4]; b+=k[2]+(((uint32_t)k[3])<<16); a+=k[0]+(((uint32_t)k[1])<<16); break; case 9 : c+=k8[8]; /* fall through */ case 8 : b+=k[2]+(((uint32_t)k[3])<<16); a+=k[0]+(((uint32_t)k[1])<<16); break; case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */ case 6 : b+=k[2]; a+=k[0]+(((uint32_t)k[1])<<16); break; case 5 : b+=k8[4]; /* fall through */ case 4 : a+=k[0]+(((uint32_t)k[1])<<16); break; case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */ case 2 : a+=k[0]; break; case 1 : a+=k8[0]; break; case 0 : return c; /* zero length requires no mixing */ } } else { /* need to read the key one byte at a time */ const uint8_t *k = (const uint8_t *)key; /*--------------- all but the last block: affect some 32 bits of (a,b,c) */ while (length > 12) { a += k[0]; a += ((uint32_t)k[1])<<8; a += ((uint32_t)k[2])<<16; a += ((uint32_t)k[3])<<24; b += k[4]; b += ((uint32_t)k[5])<<8; b += ((uint32_t)k[6])<<16; b += ((uint32_t)k[7])<<24; c += k[8]; c += ((uint32_t)k[9])<<8; c += ((uint32_t)k[10])<<16; c += ((uint32_t)k[11])<<24; mix(a,b,c); length -= 12; k += 12; } /*-------------------------------- last block: affect all 32 bits of (c) */ switch(length) /* all the case statements fall through */ { case 12: c+=((uint32_t)k[11])<<24; case 11: c+=((uint32_t)k[10])<<16; case 10: c+=((uint32_t)k[9])<<8; case 9 : c+=k[8]; case 8 : b+=((uint32_t)k[7])<<24; case 7 : b+=((uint32_t)k[6])<<16; case 6 : b+=((uint32_t)k[5])<<8; case 5 : b+=k[4]; case 4 : a+=((uint32_t)k[3])<<24; case 3 : a+=((uint32_t)k[2])<<16; case 2 : a+=((uint32_t)k[1])<<8; case 1 : a+=k[0]; break; case 0 : return c; } } final(a,b,c); return c; } /* * hashlittle2: return 2 32-bit hash values * * This is identical to hashlittle(), except it returns two 32-bit hash * values instead of just one. This is good enough for hash table * lookup with 2^^64 buckets, or if you want a second hash if you're not * happy with the first, or if you want a probably-unique 64-bit ID for * the key. *pc is better mixed than *pb, so use *pc first. If you want * a 64-bit value do something like "*pc + (((uint64_t)*pb)<<32)". */ /* AddressSanitizer hates this implementation, even though it's innocuous */ AWS_SUPPRESS_ASAN static void hashlittle2( const void *key, /* the key to hash */ size_t length, /* length of the key */ uint32_t *pc, /* IN: primary initval, OUT: primary hash */ uint32_t *pb) /* IN: secondary initval, OUT: secondary hash */ { uint32_t a,b,c; /* internal state */ union { const void *ptr; size_t i; } u; /* needed for Mac Powerbook G4 */ /* Set up the internal state */ a = b = c = 0xdeadbeef + ((uint32_t)length) + *pc; c += *pb; u.ptr = key; if (HASH_LITTLE_ENDIAN && ((u.i & 0x3) == 0)) { const uint32_t *k = (const uint32_t *)key; /* read 32-bit chunks */ /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */ while (length > 12) { a += k[0]; b += k[1]; c += k[2]; mix(a,b,c); length -= 12; k += 3; } /*----------------------------- handle the last (probably partial) block */ /* * "k[2]&0xffffff" actually reads beyond the end of the string, but * then masks off the part it's not allowed to read. Because the * string is aligned, the masked-off tail is in the same word as the * rest of the string. Every machine with memory protection I've seen * does it on word boundaries, so is OK with this. But VALGRIND and CBMC * will still catch it and complain. CBMC will ignore this type of error * in the code block between the pragmas "CPROVER check push" and * "CPROVER check pop". The masking trick does make the hash noticeably * faster for short strings (like English words). */ #ifndef VALGRIND #ifdef CBMC # pragma CPROVER check push # pragma CPROVER check disable "pointer" #endif // changed in aws-c-common: fix unused variable warning switch(length) { case 12: c+=k[2]; b+=k[1]; a+=k[0]; break; case 11: c+=k[2]&0xffffff; b+=k[1]; a+=k[0]; break; case 10: c+=k[2]&0xffff; b+=k[1]; a+=k[0]; break; case 9 : c+=k[2]&0xff; b+=k[1]; a+=k[0]; break; case 8 : b+=k[1]; a+=k[0]; break; case 7 : b+=k[1]&0xffffff; a+=k[0]; break; case 6 : b+=k[1]&0xffff; a+=k[0]; break; case 5 : b+=k[1]&0xff; a+=k[0]; break; case 4 : a+=k[0]; break; case 3 : a+=k[0]&0xffffff; break; case 2 : a+=k[0]&0xffff; break; case 1 : a+=k[0]&0xff; break; case 0 : *pc=c; *pb=b; return; /* zero length strings require no mixing */ } #ifdef CBMC # pragma CPROVER check pop #endif #else /* make valgrind happy */ const uint8_t *k8 = (const uint8_t *)k; switch(length) { case 12: c+=k[2]; b+=k[1]; a+=k[0]; break; case 11: c+=((uint32_t)k8[10])<<16; /* fall through */ case 10: c+=((uint32_t)k8[9])<<8; /* fall through */ case 9 : c+=k8[8]; /* fall through */ case 8 : b+=k[1]; a+=k[0]; break; case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */ case 6 : b+=((uint32_t)k8[5])<<8; /* fall through */ case 5 : b+=k8[4]; /* fall through */ case 4 : a+=k[0]; break; case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */ case 2 : a+=((uint32_t)k8[1])<<8; /* fall through */ case 1 : a+=k8[0]; break; case 0 : *pc=c; *pb=b; return; /* zero length strings require no mixing */ } #endif /* !valgrind */ } else if (HASH_LITTLE_ENDIAN && ((u.i & 0x1) == 0)) { const uint16_t *k = (const uint16_t *)key; /* read 16-bit chunks */ /*--------------- all but last block: aligned reads and different mixing */ while (length > 12) { a += k[0] + (((uint32_t)k[1])<<16); b += k[2] + (((uint32_t)k[3])<<16); c += k[4] + (((uint32_t)k[5])<<16); mix(a,b,c); length -= 12; k += 6; } /*----------------------------- handle the last (probably partial) block */ const uint8_t *k8 = (const uint8_t *)k; switch(length) { case 12: c+=k[4]+(((uint32_t)k[5])<<16); b+=k[2]+(((uint32_t)k[3])<<16); a+=k[0]+(((uint32_t)k[1])<<16); break; case 11: c+=((uint32_t)k8[10])<<16; /* fall through */ case 10: c+=k[4]; b+=k[2]+(((uint32_t)k[3])<<16); a+=k[0]+(((uint32_t)k[1])<<16); break; case 9 : c+=k8[8]; /* fall through */ case 8 : b+=k[2]+(((uint32_t)k[3])<<16); a+=k[0]+(((uint32_t)k[1])<<16); break; case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */ case 6 : b+=k[2]; a+=k[0]+(((uint32_t)k[1])<<16); break; case 5 : b+=k8[4]; /* fall through */ case 4 : a+=k[0]+(((uint32_t)k[1])<<16); break; case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */ case 2 : a+=k[0]; break; case 1 : a+=k8[0]; break; case 0 : *pc=c; *pb=b; return; /* zero length strings require no mixing */ } } else { /* need to read the key one byte at a time */ const uint8_t *k = (const uint8_t *)key; /*--------------- all but the last block: affect some 32 bits of (a,b,c) */ while (length > 12) { a += k[0]; a += ((uint32_t)k[1])<<8; a += ((uint32_t)k[2])<<16; a += ((uint32_t)k[3])<<24; b += k[4]; b += ((uint32_t)k[5])<<8; b += ((uint32_t)k[6])<<16; b += ((uint32_t)k[7])<<24; c += k[8]; c += ((uint32_t)k[9])<<8; c += ((uint32_t)k[10])<<16; c += ((uint32_t)k[11])<<24; mix(a,b,c); length -= 12; k += 12; } /*-------------------------------- last block: affect all 32 bits of (c) */ switch(length) /* all the case statements fall through */ { case 12: c+=((uint32_t)k[11])<<24; case 11: c+=((uint32_t)k[10])<<16; case 10: c+=((uint32_t)k[9])<<8; case 9 : c+=k[8]; case 8 : b+=((uint32_t)k[7])<<24; case 7 : b+=((uint32_t)k[6])<<16; case 6 : b+=((uint32_t)k[5])<<8; case 5 : b+=k[4]; case 4 : a+=((uint32_t)k[3])<<24; case 3 : a+=((uint32_t)k[2])<<16; case 2 : a+=((uint32_t)k[1])<<8; case 1 : a+=k[0]; break; case 0 : *pc=c; *pb=b; return; /* zero length strings require no mixing */ } } final(a,b,c); *pc=c; *pb=b; } /* * hashbig(): * This is the same as hashword() on big-endian machines. It is different * from hashlittle() on all machines. hashbig() takes advantage of * big-endian byte ordering. */ static uint32_t hashbig( const void *key, size_t length, uint32_t initval) { uint32_t a,b,c; union { const void *ptr; size_t i; } u; /* to cast key to (size_t) happily */ /* Set up the internal state */ a = b = c = 0xdeadbeef + ((uint32_t)length) + initval; u.ptr = key; if (HASH_BIG_ENDIAN && ((u.i & 0x3) == 0)) { const uint32_t *k = (const uint32_t *)key; /* read 32-bit chunks */ /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */ while (length > 12) { a += k[0]; b += k[1]; c += k[2]; mix(a,b,c); length -= 12; k += 3; } /*----------------------------- handle the last (probably partial) block */ /* * "k[2]<<8" actually reads beyond the end of the string, but * then shifts out the part it's not allowed to read. Because the * string is aligned, the illegal read is in the same word as the * rest of the string. Every machine with memory protection I've seen * does it on word boundaries, so is OK with this. But VALGRIND and CBMC * will still catch it and complain. CBMC will ignore this type of error * in the code block between the pragmas "CPROVER check push" and * "CPROVER check pop". The masking trick does make the hash noticably * faster for short strings (like English words). */ #ifndef VALGRIND #ifdef CBMC # pragma CPROVER check push # pragma CPROVER check disable "pointer" #endif // changed in aws-c-common: fix unused variable warning switch(length) { case 12: c+=k[2]; b+=k[1]; a+=k[0]; break; case 11: c+=k[2]&0xffffff00; b+=k[1]; a+=k[0]; break; case 10: c+=k[2]&0xffff0000; b+=k[1]; a+=k[0]; break; case 9 : c+=k[2]&0xff000000; b+=k[1]; a+=k[0]; break; case 8 : b+=k[1]; a+=k[0]; break; case 7 : b+=k[1]&0xffffff00; a+=k[0]; break; case 6 : b+=k[1]&0xffff0000; a+=k[0]; break; case 5 : b+=k[1]&0xff000000; a+=k[0]; break; case 4 : a+=k[0]; break; case 3 : a+=k[0]&0xffffff00; break; case 2 : a+=k[0]&0xffff0000; break; case 1 : a+=k[0]&0xff000000; break; case 0 : return c; /* zero length strings require no mixing */ } #ifdef CBMC # pragma CPROVER check pop #endif #else /* make valgrind happy */ const uint8_t *k8 = (const uint8_t *)k; switch(length) /* all the case statements fall through */ { case 12: c+=k[2]; b+=k[1]; a+=k[0]; break; case 11: c+=((uint32_t)k8[10])<<8; /* fall through */ case 10: c+=((uint32_t)k8[9])<<16; /* fall through */ case 9 : c+=((uint32_t)k8[8])<<24; /* fall through */ case 8 : b+=k[1]; a+=k[0]; break; case 7 : b+=((uint32_t)k8[6])<<8; /* fall through */ case 6 : b+=((uint32_t)k8[5])<<16; /* fall through */ case 5 : b+=((uint32_t)k8[4])<<24; /* fall through */ case 4 : a+=k[0]; break; case 3 : a+=((uint32_t)k8[2])<<8; /* fall through */ case 2 : a+=((uint32_t)k8[1])<<16; /* fall through */ case 1 : a+=((uint32_t)k8[0])<<24; break; case 0 : return c; } #endif /* !VALGRIND */ } else { /* need to read the key one byte at a time */ const uint8_t *k = (const uint8_t *)key; /*--------------- all but the last block: affect some 32 bits of (a,b,c) */ while (length > 12) { a += ((uint32_t)k[0])<<24; a += ((uint32_t)k[1])<<16; a += ((uint32_t)k[2])<<8; a += ((uint32_t)k[3]); b += ((uint32_t)k[4])<<24; b += ((uint32_t)k[5])<<16; b += ((uint32_t)k[6])<<8; b += ((uint32_t)k[7]); c += ((uint32_t)k[8])<<24; c += ((uint32_t)k[9])<<16; c += ((uint32_t)k[10])<<8; c += ((uint32_t)k[11]); mix(a,b,c); length -= 12; k += 12; } /*-------------------------------- last block: affect all 32 bits of (c) */ switch(length) /* all the case statements fall through */ { case 12: c+=k[11]; case 11: c+=((uint32_t)k[10])<<8; case 10: c+=((uint32_t)k[9])<<16; case 9 : c+=((uint32_t)k[8])<<24; case 8 : b+=k[7]; case 7 : b+=((uint32_t)k[6])<<8; case 6 : b+=((uint32_t)k[5])<<16; case 5 : b+=((uint32_t)k[4])<<24; case 4 : a+=k[3]; case 3 : a+=((uint32_t)k[2])<<8; case 2 : a+=((uint32_t)k[1])<<16; case 1 : a+=((uint32_t)k[0])<<24; break; case 0 : return c; } } final(a,b,c); return c; } #ifdef SELF_TEST /* used for timings */ void driver1() { uint8_t buf[256]; uint32_t i; uint32_t h=0; time_t a,z; time(&a); for (i=0; i<256; ++i) buf[i] = 'x'; for (i=0; i<1; ++i) { h = hashlittle(&buf[0],1,h); } time(&z); if (z-a > 0) printf("time %d %.8x\n", z-a, h); } /* check that every input bit changes every output bit half the time */ #define HASHSTATE 1 #define HASHLEN 1 #define MAXPAIR 60 #define MAXLEN 70 void driver2() { uint8_t qa[MAXLEN+1], qb[MAXLEN+2], *a = &qa[0], *b = &qb[1]; uint32_t c[HASHSTATE], d[HASHSTATE], i=0, j=0, k, l, m=0, z; uint32_t e[HASHSTATE],f[HASHSTATE],g[HASHSTATE],h[HASHSTATE]; uint32_t x[HASHSTATE],y[HASHSTATE]; uint32_t hlen; printf("No more than %d trials should ever be needed \n",MAXPAIR/2); for (hlen=0; hlen < MAXLEN; ++hlen) { z=0; for (i=0; i>(8-j)); c[0] = hashlittle(a, hlen, m); b[i] ^= ((k+1)<>(8-j)); d[0] = hashlittle(b, hlen, m); /* check every bit is 1, 0, set, and not set at least once */ for (l=0; lz) z=k; if (k==MAXPAIR) { printf("Some bit didn't change: "); printf("%.8x %.8x %.8x %.8x %.8x %.8x ", e[0],f[0],g[0],h[0],x[0],y[0]); printf("i %d j %d m %d len %d\n", i, j, m, hlen); } if (z==MAXPAIR) goto done; } } } done: if (z < MAXPAIR) { printf("Mix success %2d bytes %2d initvals ",i,m); printf("required %d trials\n", z/2); } } printf("\n"); } /* Check for reading beyond the end of the buffer and alignment problems */ void driver3() { uint8_t buf[MAXLEN+20], *b; uint32_t len; uint8_t q[] = "This is the time for all good men to come to the aid of their country..."; uint32_t h; uint8_t qq[] = "xThis is the time for all good men to come to the aid of their country..."; uint32_t i; uint8_t qqq[] = "xxThis is the time for all good men to come to the aid of their country..."; uint32_t j; uint8_t qqqq[] = "xxxThis is the time for all good men to come to the aid of their country..."; uint32_t ref,x,y; uint8_t *p; printf("Endianness. These lines should all be the same (for values filled in):\n"); printf("%.8x %.8x %.8x\n", hashword((const uint32_t *)q, (sizeof(q)-1)/4, 13), hashword((const uint32_t *)q, (sizeof(q)-5)/4, 13), hashword((const uint32_t *)q, (sizeof(q)-9)/4, 13)); p = q; printf("%.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x\n", hashlittle(p, sizeof(q)-1, 13), hashlittle(p, sizeof(q)-2, 13), hashlittle(p, sizeof(q)-3, 13), hashlittle(p, sizeof(q)-4, 13), hashlittle(p, sizeof(q)-5, 13), hashlittle(p, sizeof(q)-6, 13), hashlittle(p, sizeof(q)-7, 13), hashlittle(p, sizeof(q)-8, 13), hashlittle(p, sizeof(q)-9, 13), hashlittle(p, sizeof(q)-10, 13), hashlittle(p, sizeof(q)-11, 13), hashlittle(p, sizeof(q)-12, 13)); p = &qq[1]; printf("%.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x\n", hashlittle(p, sizeof(q)-1, 13), hashlittle(p, sizeof(q)-2, 13), hashlittle(p, sizeof(q)-3, 13), hashlittle(p, sizeof(q)-4, 13), hashlittle(p, sizeof(q)-5, 13), hashlittle(p, sizeof(q)-6, 13), hashlittle(p, sizeof(q)-7, 13), hashlittle(p, sizeof(q)-8, 13), hashlittle(p, sizeof(q)-9, 13), hashlittle(p, sizeof(q)-10, 13), hashlittle(p, sizeof(q)-11, 13), hashlittle(p, sizeof(q)-12, 13)); p = &qqq[2]; printf("%.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x\n", hashlittle(p, sizeof(q)-1, 13), hashlittle(p, sizeof(q)-2, 13), hashlittle(p, sizeof(q)-3, 13), hashlittle(p, sizeof(q)-4, 13), hashlittle(p, sizeof(q)-5, 13), hashlittle(p, sizeof(q)-6, 13), hashlittle(p, sizeof(q)-7, 13), hashlittle(p, sizeof(q)-8, 13), hashlittle(p, sizeof(q)-9, 13), hashlittle(p, sizeof(q)-10, 13), hashlittle(p, sizeof(q)-11, 13), hashlittle(p, sizeof(q)-12, 13)); p = &qqqq[3]; printf("%.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x\n", hashlittle(p, sizeof(q)-1, 13), hashlittle(p, sizeof(q)-2, 13), hashlittle(p, sizeof(q)-3, 13), hashlittle(p, sizeof(q)-4, 13), hashlittle(p, sizeof(q)-5, 13), hashlittle(p, sizeof(q)-6, 13), hashlittle(p, sizeof(q)-7, 13), hashlittle(p, sizeof(q)-8, 13), hashlittle(p, sizeof(q)-9, 13), hashlittle(p, sizeof(q)-10, 13), hashlittle(p, sizeof(q)-11, 13), hashlittle(p, sizeof(q)-12, 13)); printf("\n"); /* check that hashlittle2 and hashlittle produce the same results */ i=47; j=0; hashlittle2(q, sizeof(q), &i, &j); if (hashlittle(q, sizeof(q), 47) != i) printf("hashlittle2 and hashlittle mismatch\n"); /* check that hashword2 and hashword produce the same results */ len = 0xdeadbeef; i=47, j=0; hashword2(&len, 1, &i, &j); if (hashword(&len, 1, 47) != i) printf("hashword2 and hashword mismatch %x %x\n", i, hashword(&len, 1, 47)); /* check hashlittle doesn't read before or after the ends of the string */ for (h=0, b=buf+1; h<8; ++h, ++b) { for (i=0; i #include #include #include struct aws_system_environment { struct aws_allocator *allocator; struct aws_ref_count ref_count; struct aws_byte_buf virtualization_vendor; struct aws_byte_buf product_name; enum aws_platform_os os; size_t cpu_count; size_t cpu_group_count; void *impl; }; /** * For internal implementors. Fill in info in env that you're able to grab, such as dmi info, os version strings etc... * in here. The default just returns AWS_OP_SUCCESS. This is currently only implemented for linux. * * Returns AWS_OP_ERR if the implementation wasn't able to fill in required information for the platform. */ int aws_system_environment_load_platform_impl(struct aws_system_environment *env); /** * For internal implementors. Cleans up anything allocated in aws_system_environment_load_platform_impl, * but does not release the memory for env. */ void aws_system_environment_destroy_platform_impl(struct aws_system_environment *env); #endif // AWS_COMMON_PRIVATE_SYSTEM_INFO_PRIV_H aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/private/thread_shared.h000066400000000000000000000026031456575232400306460ustar00rootroot00000000000000#ifndef AWS_COMMON_PRIVATE_THREAD_SHARED_H #define AWS_COMMON_PRIVATE_THREAD_SHARED_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include struct aws_linked_list; struct aws_linked_list_node; /** * Iterates a list of thread wrappers, joining against each corresponding thread, and freeing the wrapper once * the join has completed. Do not hold the managed thread lock when invoking this function, instead swap the * pending join list into a local and call this on the local. * * @param wrapper_list list of thread wrappers to join and free */ AWS_COMMON_API void aws_thread_join_and_free_wrapper_list(struct aws_linked_list *wrapper_list); /** * Adds a thread (wrapper embedding a linked list node) to the global list of threads that have run to completion * and need a join in order to know that the OS has truly finished with the thread. * @param node linked list node embedded in the thread wrapper */ AWS_COMMON_API void aws_thread_pending_join_add(struct aws_linked_list_node *node); /** * Initializes the managed thread system. Called during library init. */ AWS_COMMON_API void aws_thread_initialize_thread_management(void); /** * Gets the current managed thread count */ AWS_COMMON_API size_t aws_thread_get_managed_thread_count(void); #endif /* AWS_COMMON_PRIVATE_THREAD_SHARED_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/private/xml_parser_impl.h000066400000000000000000000015401456575232400312450ustar00rootroot00000000000000#ifndef AWS_COMMON_PRIVATE_XML_PARSER_IMPL_H #define AWS_COMMON_PRIVATE_XML_PARSER_IMPL_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include struct aws_xml_node { struct aws_xml_parser *parser; struct aws_byte_cursor name; struct aws_array_list attributes; struct aws_byte_cursor doc_at_body; bool processed; }; struct aws_xml_parser { struct aws_allocator *allocator; struct aws_byte_cursor doc; struct aws_array_list callback_stack; /* maximum of 10 attributes */ struct aws_xml_attribute attributes[10]; /* splits on attributes and node name, so (10 attributes + 1 name) */ struct aws_byte_cursor split_scratch[11]; size_t max_depth; int error; }; #endif /* AWS_COMMON_PRIVATE_XML_PARSER_IMPL_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/process.h000066400000000000000000000051371456575232400260620ustar00rootroot00000000000000#ifndef AWS_COMMON_PROCESS_H #define AWS_COMMON_PROCESS_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include AWS_PUSH_SANE_WARNING_LEVEL struct aws_run_command_result { /* return code from running the command. */ int ret_code; /** * captured stdout message from running the command, * caller is responsible for releasing the memory. */ struct aws_string *std_out; /** * captured stderr message from running the command, * caller is responsible for releasing the memory. * It's currently not implemented and the value will be set to NULL. */ struct aws_string *std_err; }; struct aws_run_command_options { /** * command path and commandline options of running that command. */ const char *command; }; AWS_EXTERN_C_BEGIN /** * Returns the current process's PID (process id). * @return PID as int */ AWS_COMMON_API int aws_get_pid(void); /** * Returns the soft limit for max io handles (max fds in unix terminology). This limit is one more than the actual * limit. The soft limit can be changed up to the hard limit by any process regardless of permissions. */ AWS_COMMON_API size_t aws_get_soft_limit_io_handles(void); /** * Returns the hard limit for max io handles (max fds in unix terminology). This limit is one more than the actual * limit. This limit cannot be increased without sudo permissions. */ AWS_COMMON_API size_t aws_get_hard_limit_io_handles(void); /** * Sets the new soft limit for io_handles (max fds). This can be up to the hard limit but may not exceed it. * * This operation will always fail with AWS_ERROR_UNIMPLEMENTED error code on Windows. */ AWS_COMMON_API int aws_set_soft_limit_io_handles(size_t max_handles); AWS_COMMON_API int aws_run_command_result_init(struct aws_allocator *allocator, struct aws_run_command_result *result); AWS_COMMON_API void aws_run_command_result_cleanup(struct aws_run_command_result *result); /** * Currently this API is implemented using popen on Posix system and * _popen on Windows to capture output from running a command. Note * that popen only captures stdout, and doesn't provide an option to * capture stderr. We will add more options, such as acquire stderr * in the future so probably will alter the underlying implementation * as well. */ AWS_COMMON_API int aws_run_command( struct aws_allocator *allocator, struct aws_run_command_options *options, struct aws_run_command_result *result); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_COMMON_PROCESS_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/promise.h000066400000000000000000000062671456575232400260670ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #ifndef AWS_COMMON_PROMISE_H #define AWS_COMMON_PROMISE_H #include AWS_PUSH_SANE_WARNING_LEVEL /* * Standard promise interface. Promise can be waited on by multiple threads, and as long as it is * ref-counted correctly, will provide the resultant value/error code to all waiters. * All promise API calls are internally thread-safe. */ struct aws_promise; AWS_EXTERN_C_BEGIN /* * Creates a new promise */ AWS_COMMON_API struct aws_promise *aws_promise_new(struct aws_allocator *allocator); /* * Indicate a new reference to a promise. At minimum, each new thread making use of the promise should * acquire it. */ AWS_COMMON_API struct aws_promise *aws_promise_acquire(struct aws_promise *promise); /* * Releases a reference on the promise. When the refcount hits 0, the promise is cleaned up and freed. */ AWS_COMMON_API void aws_promise_release(struct aws_promise *promise); /* * Waits infinitely for the promise to be completed */ AWS_COMMON_API void aws_promise_wait(struct aws_promise *promise); /* * Waits for the requested time in nanoseconds. Returns true if the promise was completed. */ AWS_COMMON_API bool aws_promise_wait_for(struct aws_promise *promise, size_t nanoseconds); /* * Completes the promise and stores the result along with an optional destructor. If the value * is not taken via `aws_promise_take_value`, it will be destroyed when the promise's reference * count reaches zero. * NOTE: Promise cannot be completed twice */ AWS_COMMON_API void aws_promise_complete(struct aws_promise *promise, void *value, void (*dtor)(void *)); /* * Completes the promise and stores the error code * NOTE: Promise cannot be completed twice */ AWS_COMMON_API void aws_promise_fail(struct aws_promise *promise, int error_code); /* * Returns whether or not the promise has completed (regardless of success or failure) */ AWS_COMMON_API bool aws_promise_is_complete(struct aws_promise *promise); /* * Returns the error code recorded if the promise failed, or 0 if it succeeded * NOTE: It is fatal to attempt to retrieve the error code before the promise is completed */ AWS_COMMON_API int aws_promise_error_code(struct aws_promise *promise); /* * Returns the value provided to the promise if it succeeded, or NULL if none was provided * or the promise failed. Check `aws_promise_error_code` to be sure. * NOTE: The ownership of the value is retained by the promise. * NOTE: It is fatal to attempt to retrieve the value before the promise is completed */ AWS_COMMON_API void *aws_promise_value(struct aws_promise *promise); /* * Returns the value provided to the promise if it succeeded, or NULL if none was provided * or the promise failed. Check `aws_promise_error_code` to be sure. * NOTE: The promise relinquishes ownership of the value, the caller is now responsible for * freeing any resources associated with the value * NOTE: It is fatal to attempt to take the value before the promise is completed */ AWS_COMMON_API void *aws_promise_take_value(struct aws_promise *promise); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif // AWS_COMMON_PROMISE_H aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/ref_count.h000066400000000000000000000032741456575232400263700ustar00rootroot00000000000000#ifndef AWS_COMMON_REF_COUNT_H #define AWS_COMMON_REF_COUNT_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include AWS_PUSH_SANE_WARNING_LEVEL typedef void(aws_simple_completion_callback)(void *); /* * A utility type for making ref-counted types, reminiscent of std::shared_ptr in C++ */ struct aws_ref_count { struct aws_atomic_var ref_count; void *object; aws_simple_completion_callback *on_zero_fn; }; struct aws_shutdown_callback_options { aws_simple_completion_callback *shutdown_callback_fn; void *shutdown_callback_user_data; }; AWS_EXTERN_C_BEGIN /** * Initializes a ref-counter structure. After initialization, the ref count will be 1. * * @param ref_count ref-counter to initialize * @param object object being ref counted * @param on_zero_fn function to invoke when the ref count reaches zero */ AWS_COMMON_API void aws_ref_count_init( struct aws_ref_count *ref_count, void *object, aws_simple_completion_callback *on_zero_fn); /** * Increments a ref-counter's ref count * * @param ref_count ref-counter to increment the count for * @return the object being ref-counted */ AWS_COMMON_API void *aws_ref_count_acquire(struct aws_ref_count *ref_count); /** * Decrements a ref-counter's ref count. Invokes the on_zero callback if the ref count drops to zero * @param ref_count ref-counter to decrement the count for * @return the value of the decremented ref count */ AWS_COMMON_API size_t aws_ref_count_release(struct aws_ref_count *ref_count); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_COMMON_REF_COUNT_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/ring_buffer.h000066400000000000000000000075071456575232400266770ustar00rootroot00000000000000#ifndef AWS_COMMON_RING_BUFFER_H #define AWS_COMMON_RING_BUFFER_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include AWS_PUSH_SANE_WARNING_LEVEL /** * Lockless ring buffer implementation that is thread safe assuming a single thread acquires and a single thread * releases. For any other use case (other than the single-threaded use-case), you must manage thread-safety manually. * * Also, a very important note: release must happen in the same order as acquire. If you do not your application, and * possibly computers within a thousand mile radius, may die terrible deaths, and the local drinking water will be * poisoned for generations with fragments of what is left of your radioactive corrupted memory. */ struct aws_ring_buffer { struct aws_allocator *allocator; uint8_t *allocation; struct aws_atomic_var head; struct aws_atomic_var tail; uint8_t *allocation_end; }; struct aws_byte_buf; AWS_EXTERN_C_BEGIN /** * Initializes a ring buffer with an allocation of size `size`. Returns AWS_OP_SUCCESS on a successful initialization, * AWS_OP_ERR otherwise. */ AWS_COMMON_API int aws_ring_buffer_init(struct aws_ring_buffer *ring_buf, struct aws_allocator *allocator, size_t size); /* * Checks whether atomic_ptr correctly points to a memory location within the bounds of the aws_ring_buffer */ AWS_STATIC_IMPL bool aws_ring_buffer_check_atomic_ptr( const struct aws_ring_buffer *ring_buf, const uint8_t *atomic_ptr); /** * Checks whether the ring buffer is empty */ AWS_STATIC_IMPL bool aws_ring_buffer_is_empty(const struct aws_ring_buffer *ring_buf); /** * Evaluates the set of properties that define the shape of all valid aws_ring_buffer structures. * It is also a cheap check, in the sense it run in constant time (i.e., no loops or recursion). */ AWS_STATIC_IMPL bool aws_ring_buffer_is_valid(const struct aws_ring_buffer *ring_buf); /** * Cleans up the ring buffer's resources. */ AWS_COMMON_API void aws_ring_buffer_clean_up(struct aws_ring_buffer *ring_buf); /** * Attempts to acquire `requested_size` buffer and stores the result in `dest` if successful. Returns AWS_OP_SUCCESS if * the requested size was available for use, AWS_OP_ERR otherwise. */ AWS_COMMON_API int aws_ring_buffer_acquire( struct aws_ring_buffer *ring_buf, size_t requested_size, struct aws_byte_buf *dest); /** * Attempts to acquire `requested_size` buffer and stores the result in `dest` if successful. If not available, it will * attempt to acquire anywhere from 1 byte to `requested_size`. Returns AWS_OP_SUCCESS if some buffer space is available * for use, AWS_OP_ERR otherwise. */ AWS_COMMON_API int aws_ring_buffer_acquire_up_to( struct aws_ring_buffer *ring_buf, size_t minimum_size, size_t requested_size, struct aws_byte_buf *dest); /** * Releases `buf` back to the ring buffer for further use. RELEASE MUST HAPPEN in the SAME ORDER AS ACQUIRE. * If you do not, your application, and possibly computers within a thousand mile radius, may die terrible deaths, * and the local drinking water will be poisoned for generations * with fragments of what is left of your radioactive corrupted memory. */ AWS_COMMON_API void aws_ring_buffer_release(struct aws_ring_buffer *ring_buffer, struct aws_byte_buf *buf); /** * Returns true if the memory in `buf` was vended by this ring buffer, false otherwise. * Make sure `buf->buffer` and `ring_buffer->allocation` refer to the same memory region. */ AWS_COMMON_API bool aws_ring_buffer_buf_belongs_to_pool( const struct aws_ring_buffer *ring_buffer, const struct aws_byte_buf *buf); #ifndef AWS_NO_STATIC_IMPL # include #endif /* AWS_NO_STATIC_IMPL */ AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_COMMON_RING_BUFFER_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/ring_buffer.inl000066400000000000000000000035271456575232400272300ustar00rootroot00000000000000#ifndef AWS_COMMON_RING_BUFFER_INL #define AWS_COMMON_RING_BUFFER_INL /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include AWS_EXTERN_C_BEGIN /* * Checks whether atomic_ptr correctly points to a memory location within the bounds of the aws_ring_buffer */ AWS_STATIC_IMPL bool aws_ring_buffer_check_atomic_ptr( const struct aws_ring_buffer *ring_buf, const uint8_t *atomic_ptr) { return ((atomic_ptr != NULL) && (atomic_ptr >= ring_buf->allocation && atomic_ptr <= ring_buf->allocation_end)); } /** * Checks whether the ring buffer is empty */ AWS_STATIC_IMPL bool aws_ring_buffer_is_empty(const struct aws_ring_buffer *ring_buf) { uint8_t *head = (uint8_t *)aws_atomic_load_ptr(&ring_buf->head); uint8_t *tail = (uint8_t *)aws_atomic_load_ptr(&ring_buf->tail); return head == tail; } /** * Evaluates the set of properties that define the shape of all valid aws_ring_buffer structures. * It is also a cheap check, in the sense it run in constant time (i.e., no loops or recursion). */ AWS_STATIC_IMPL bool aws_ring_buffer_is_valid(const struct aws_ring_buffer *ring_buf) { uint8_t *head = (uint8_t *)aws_atomic_load_ptr(&ring_buf->head); uint8_t *tail = (uint8_t *)aws_atomic_load_ptr(&ring_buf->tail); bool head_in_range = aws_ring_buffer_check_atomic_ptr(ring_buf, head); bool tail_in_range = aws_ring_buffer_check_atomic_ptr(ring_buf, tail); /* if head points-to the first element of the buffer then tail must too */ bool valid_head_tail = (head != ring_buf->allocation) || (tail == ring_buf->allocation); return ring_buf && (ring_buf->allocation != NULL) && head_in_range && tail_in_range && valid_head_tail && (ring_buf->allocator != NULL); } AWS_EXTERN_C_END #endif /* AWS_COMMON_RING_BUFFER_INL */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/rw_lock.h000066400000000000000000000042661456575232400260460ustar00rootroot00000000000000#ifndef AWS_COMMON_RW_LOCK_H #define AWS_COMMON_RW_LOCK_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #ifdef _WIN32 /* NOTE: Do not use this macro before including windows.h */ # define AWSSRW_TO_WINDOWS(pCV) (PSRWLOCK) pCV #else # include #endif AWS_PUSH_SANE_WARNING_LEVEL struct aws_rw_lock { #ifdef _WIN32 void *lock_handle; #else pthread_rwlock_t lock_handle; #endif }; #ifdef _WIN32 # define AWS_RW_LOCK_INIT \ { .lock_handle = NULL } #else # define AWS_RW_LOCK_INIT \ { .lock_handle = PTHREAD_RWLOCK_INITIALIZER } #endif AWS_EXTERN_C_BEGIN /** * Initializes a new platform instance of mutex. */ AWS_COMMON_API int aws_rw_lock_init(struct aws_rw_lock *lock); /** * Cleans up internal resources. */ AWS_COMMON_API void aws_rw_lock_clean_up(struct aws_rw_lock *lock); /** * Blocks until it acquires the lock. While on some platforms such as Windows, * this may behave as a reentrant mutex, you should not treat it like one. On * platforms it is possible for it to be non-reentrant, it will be. */ AWS_COMMON_API int aws_rw_lock_rlock(struct aws_rw_lock *lock); AWS_COMMON_API int aws_rw_lock_wlock(struct aws_rw_lock *lock); /** * Attempts to acquire the lock but returns immediately if it can not. * While on some platforms such as Windows, this may behave as a reentrant mutex, * you should not treat it like one. On platforms it is possible for it to be non-reentrant, it will be. * Note: For windows, minimum support server version is Windows Server 2008 R2 [desktop apps | UWP apps] */ AWS_COMMON_API int aws_rw_lock_try_rlock(struct aws_rw_lock *lock); AWS_COMMON_API int aws_rw_lock_try_wlock(struct aws_rw_lock *lock); /** * Releases the lock. */ AWS_COMMON_API int aws_rw_lock_runlock(struct aws_rw_lock *lock); AWS_COMMON_API int aws_rw_lock_wunlock(struct aws_rw_lock *lock); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_COMMON_RW_LOCK_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/statistics.h000066400000000000000000000124761456575232400266020ustar00rootroot00000000000000#ifndef AWS_COMMON_STATISTICS_H #define AWS_COMMON_STATISTICS_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include AWS_PUSH_SANE_WARNING_LEVEL struct aws_array_list; typedef uint32_t aws_crt_statistics_category_t; /* Each library gets space for 2^^8 category entries */ enum { AWS_CRT_STATISTICS_CATEGORY_STRIDE_BITS = 8, }; #define AWS_CRT_STATISTICS_CATEGORY_STRIDE (1U << AWS_CRT_STATISTICS_CATEGORY_STRIDE_BITS) #define AWS_CRT_STATISTICS_CATEGORY_BEGIN_RANGE(x) ((x)*AWS_CRT_STATISTICS_CATEGORY_STRIDE) #define AWS_CRT_STATISTICS_CATEGORY_END_RANGE(x) (((x) + 1) * AWS_CRT_STATISTICS_CATEGORY_STRIDE - 1) /** * The common-specific range of the aws_crt_statistics_category cross-library enum. * * This enum functions as an RTTI value that lets statistics handler's interpret (via cast) a * specific statistics structure if the RTTI value is understood. * * Common doesn't have any statistics structures presently, so its range is essentially empty. * */ enum aws_crt_common_statistics_category { AWSCRT_STAT_CAT_INVALID = AWS_CRT_STATISTICS_CATEGORY_BEGIN_RANGE(AWS_C_COMMON_PACKAGE_ID) }; /** * Pattern-struct that functions as a base "class" for all statistics structures. To conform * to the pattern, a statistics structure must have its first member be the category. In that * case it becomes "safe" to cast from aws_crt_statistics_base to the specific statistics structure * based on the category value. */ struct aws_crt_statistics_base { aws_crt_statistics_category_t category; }; /** * The start and end time, in milliseconds-since-epoch, that a set of statistics was gathered over. */ struct aws_crt_statistics_sample_interval { uint64_t begin_time_ms; uint64_t end_time_ms; }; struct aws_crt_statistics_handler; /* * Statistics intake function. The array_list is a list of pointers to aws_crt_statistics_base "derived" (via * pattern) objects. The handler should iterate the list and downcast elements whose RTTI category it understands, * while skipping those it does not understand. */ typedef void(aws_crt_statistics_handler_process_statistics_fn)( struct aws_crt_statistics_handler *handler, struct aws_crt_statistics_sample_interval *interval, struct aws_array_list *stats, void *context); /* * Destroys a statistics handler implementation */ typedef void(aws_crt_statistics_handler_destroy_fn)(struct aws_crt_statistics_handler *handler); /* * The period, in milliseconds, that the handler would like to be informed of statistics. Statistics generators are * not required to honor this value, but should if able. */ typedef uint64_t(aws_crt_statistics_handler_get_report_interval_ms_fn)(struct aws_crt_statistics_handler *); /** * Vtable for functions that all statistics handlers must implement */ struct aws_crt_statistics_handler_vtable { aws_crt_statistics_handler_process_statistics_fn *process_statistics; aws_crt_statistics_handler_destroy_fn *destroy; aws_crt_statistics_handler_get_report_interval_ms_fn *get_report_interval_ms; }; /** * Base structure for all statistics handler implementations. * * A statistics handler is an object that listens to a stream of polymorphic (via the category RTTI enum) statistics * structures emitted from some arbitrary source. In the initial implementation, statistics handlers are primarily * attached to channels, where they monitor IO throughput and state data (from channel handlers) to determine a * connection's health. * * Statistics handlers are a generalization of the timeout and bandwidth filters that are often associated with * SDK network connections. Configurable, default implementations are defined at the protocol level (http, etc...) * where they can be attached at connection (channel) creation time. */ struct aws_crt_statistics_handler { struct aws_crt_statistics_handler_vtable *vtable; struct aws_allocator *allocator; void *impl; }; AWS_EXTERN_C_BEGIN /** * Submits a list of statistics objects to a statistics handler for processing * * handler - the statistics handler that will process the statistics objects * interval - time period over which the statistics were gathered * stats - list of pointers to structures that can be case to aws_crt_statistics_base (i.e. have category as a first * member) * context - (optional) additional context specific to where the statistics handler has been attached */ AWS_COMMON_API void aws_crt_statistics_handler_process_statistics( struct aws_crt_statistics_handler *handler, struct aws_crt_statistics_sample_interval *interval, struct aws_array_list *stats, void *context); /** * Queries the frequency (via an interval in milliseconds) which a statistics handler would like to be informed * of statistics. */ AWS_COMMON_API uint64_t aws_crt_statistics_handler_get_report_interval_ms(struct aws_crt_statistics_handler *handler); /** * completely destroys a statistics handler. The handler's cleanup function must clean up the impl portion completely * (including its allocation, if done separately). */ AWS_COMMON_API void aws_crt_statistics_handler_destroy(struct aws_crt_statistics_handler *handler); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_COMMON_STATISTICS_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/stdbool.h000066400000000000000000000016261456575232400260510ustar00rootroot00000000000000/* clang-format off */ /* clang-format gets confused by the #define bool line, and gives crazy indenting */ #ifndef AWS_COMMON_STDBOOL_H #define AWS_COMMON_STDBOOL_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #ifndef NO_STDBOOL # include /* NOLINT(fuchsia-restrict-system-includes) */ #else # ifndef __cplusplus # define bool _Bool # define true 1 # define false 0 # elif defined(__GNUC__) && !defined(__STRICT_ANSI__) # define _Bool bool # if __cplusplus < 201103L /* For C++98, define bool, false, true as a GNU extension. */ # define bool bool # define false false # define true true # endif /* __cplusplus < 201103L */ # endif /* __cplusplus */ #endif /* NO_STDBOOL */ #endif /* AWS_COMMON_STDBOOL_H */ /* clang-format on */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/stdint.h000066400000000000000000000050131456575232400257020ustar00rootroot00000000000000#ifndef AWS_COMMON_STDINT_H #define AWS_COMMON_STDINT_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #ifndef NO_STDINT # include /* NOLINT(fuchsia-restrict-system-includes) */ /* Android defines SIZE_MAX in limits.h, not stdint.h */ # ifdef ANDROID # include # endif #else # if defined(__x86_64__) || defined(_M_AMD64) || defined(__aarch64__) || defined(__ia64__) || defined(__powerpc64__) # define PTR_SIZE 8 # else # define PTR_SIZE 4 # endif typedef signed char int8_t; typedef short int int16_t; typedef int int32_t; # if (PTR_SIZE == 8) typedef long int int64_t; # else typedef long long int int64_t; # endif /* (PTR_SIZE == 8) */ typedef unsigned char uint8_t; typedef unsigned short int uint16_t; typedef unsigned int uint32_t; # if (PTR_SIZE == 8) typedef unsigned long int uint64_t; # else typedef unsigned long long int uint64_t; # endif /* (PTR_SIZE == 8) */ # if (PTR_SIZE == 8) typedef long int intptr_t; typedef unsigned long int uintptr_t; # else typedef int intptr_t; typedef unsigned int uintptr_t; # endif # if (PTR_SIZE == 8) # define __INT64_C(c) c##L # define __UINT64_C(c) c##UL # else # define __INT64_C(c) c##LL # define __UINT64_C(c) c##ULL # endif # define INT8_MIN (-128) # define INT16_MIN (-32767 - 1) # define INT32_MIN (-2147483647 - 1) # define INT64_MIN (-__INT64_C(9223372036854775807) - 1) # define INT8_MAX (127) # define INT16_MAX (32767) # define INT32_MAX (2147483647) # define INT64_MAX (__INT64_C(9223372036854775807)) # define UINT8_MAX (255) # define UINT16_MAX (65535) # define UINT32_MAX (4294967295U) # define UINT64_MAX (__UINT64_C(18446744073709551615)) AWS_STATIC_ASSERT(sizeof(uint64_t) == 8); AWS_STATIC_ASSERT(sizeof(uint32_t) == 4); AWS_STATIC_ASSERT(sizeof(uint16_t) == 2); AWS_STATIC_ASSERT(sizeof(uint8_t) == 1); AWS_STATIC_ASSERT(sizeof(int64_t) == 8); AWS_STATIC_ASSERT(sizeof(int32_t) == 4); AWS_STATIC_ASSERT(sizeof(int16_t) == 2); AWS_STATIC_ASSERT(sizeof(int8_t) == 1); AWS_STATIC_ASSERT(sizeof(uintptr_t) == sizeof(void *)); AWS_STATIC_ASSERT(sizeof(intptr_t) == sizeof(void *)); AWS_STATIC_ASSERT(sizeof(char) == 1); #endif /* NO_STDINT */ /** * @deprecated Use int64_t instead for offsets in public APIs. */ typedef int64_t aws_off_t; #endif /* AWS_COMMON_STDINT_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/string.h000066400000000000000000000335101456575232400257060ustar00rootroot00000000000000#ifndef AWS_COMMON_STRING_H #define AWS_COMMON_STRING_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include AWS_PUSH_SANE_WARNING_LEVEL /** * Represents an immutable string holding either text or binary data. If the * string is in constant memory or memory that should otherwise not be freed by * this struct, set allocator to NULL and destroy function will be a no-op. * * This is for use cases where the entire struct and the data bytes themselves * need to be held in dynamic memory, such as when held by a struct * aws_hash_table. The data bytes themselves are always held in contiguous * memory immediately after the end of the struct aws_string, and the memory for * both the header and the data bytes is allocated together. * * Use the aws_string_bytes function to access the data bytes. A null byte is * always included immediately after the data but not counted in the length, so * that the output of aws_string_bytes can be treated as a C-string in cases * where none of the the data bytes are null. * * Note that the fields of this structure are const; this ensures not only that * they cannot be modified, but also that you can't assign the structure using * the = operator accidentally. */ /* Using a flexible array member is the C99 compliant way to have the bytes of * the string immediately follow the header. * * MSVC doesn't know this for some reason so we need to use a pragma to make * it happy. */ #ifdef _MSC_VER # pragma warning(push) # pragma warning(disable : 4623) /* default constructor was implicitly defined as deleted */ # pragma warning(disable : 4626) /* assignment operator was implicitly defined as deleted */ # pragma warning(disable : 5027) /* move assignment operator was implicitly defined as deleted */ #endif struct aws_string { struct aws_allocator *const allocator; /* size in bytes of `bytes` minus any null terminator. * NOTE: This is not the number of characters in the string. */ const size_t len; /* give this a storage specifier for C++ purposes. It will likely be larger after init. */ const uint8_t bytes[1]; }; #ifdef AWS_OS_WINDOWS struct aws_wstring { struct aws_allocator *const allocator; /* number of characters in the string not including the null terminator. */ const size_t len; /* give this a storage specifier for C++ purposes. It will likely be larger after init. */ const wchar_t bytes[1]; }; #endif /* AWS_OS_WINDOWS */ #ifdef _MSC_VER # pragma warning(pop) #endif AWS_EXTERN_C_BEGIN #ifdef AWS_OS_WINDOWS /** * For windows only. Converts `to_convert` to a windows whcar format (UTF-16) for use with windows OS interop. * * Note: `to_convert` is assumed to be UTF-8 or ASCII. * * returns NULL on failure. */ AWS_COMMON_API struct aws_wstring *aws_string_convert_to_wstring( struct aws_allocator *allocator, const struct aws_string *to_convert); /** * For windows only. Converts `to_convert` to a windows whcar format (UTF-16) for use with windows OS interop. * * Note: `to_convert` is assumed to be UTF-8 or ASCII. * * returns NULL on failure. */ AWS_COMMON_API struct aws_wstring *aws_string_convert_to_wchar_from_byte_cursor( struct aws_allocator *allocator, const struct aws_byte_cursor *to_convert); /** * clean up str. */ AWS_COMMON_API void aws_wstring_destroy(struct aws_wstring *str); /** * For windows only. Converts `to_convert` from a windows whcar format (UTF-16) to UTF-8. * * Note: `to_convert` is assumed to be wchar already. * * returns NULL on failure. */ AWS_COMMON_API struct aws_string *aws_string_convert_from_wchar_str( struct aws_allocator *allocator, const struct aws_wstring *to_convert); /** * For windows only. Converts `to_convert` from a windows whcar format (UTF-16) to UTF-8. * * Note: `to_convert` is assumed to be wchar already. * * returns NULL on failure. */ AWS_COMMON_API struct aws_string *aws_string_convert_from_wchar_byte_cursor( struct aws_allocator *allocator, const struct aws_byte_cursor *to_convert); /** * For windows only. Converts `to_convert` from a windows whcar format (UTF-16) to UTF-8. * * Note: `to_convert` is assumed to be wchar already. * * returns NULL on failure. */ AWS_COMMON_API struct aws_string *aws_string_convert_from_wchar_c_str( struct aws_allocator *allocator, const wchar_t *to_convert); /** * Create a new wide string from a byte cursor. This assumes that w_str_cur is already in utf-16. * * returns NULL on failure. */ AWS_COMMON_API struct aws_wstring *aws_wstring_new_from_cursor( struct aws_allocator *allocator, const struct aws_byte_cursor *w_str_cur); /** * Create a new wide string from a utf-16 string enclosing array. The length field is in number of characters not * counting the null terminator. * * returns NULL on failure. */ AWS_COMMON_API struct aws_wstring *aws_wstring_new_from_array( struct aws_allocator *allocator, const wchar_t *w_str, size_t length); /** * Returns a wchar_t * pointer for use with windows OS interop. */ AWS_COMMON_API const wchar_t *aws_wstring_c_str(const struct aws_wstring *str); /** * Returns the number of characters in the wchar string. NOTE: This is not the length in bytes or the buffer size. */ AWS_COMMON_API size_t aws_wstring_num_chars(const struct aws_wstring *str); /** * Returns the length in bytes for the buffer. */ AWS_COMMON_API size_t aws_wstring_size_bytes(const struct aws_wstring *str); /** * Verifies that str is a valid string. Returns true if it's valid and false otherwise. */ AWS_COMMON_API bool aws_wstring_is_valid(const struct aws_wstring *str); #endif /* AWS_OS_WINDOWS */ /** * Returns true if bytes of string are the same, false otherwise. */ AWS_COMMON_API bool aws_string_eq(const struct aws_string *a, const struct aws_string *b); /** * Returns true if bytes of string are equivalent, using a case-insensitive comparison. */ AWS_COMMON_API bool aws_string_eq_ignore_case(const struct aws_string *a, const struct aws_string *b); /** * Returns true if bytes of string and cursor are the same, false otherwise. */ AWS_COMMON_API bool aws_string_eq_byte_cursor(const struct aws_string *str, const struct aws_byte_cursor *cur); /** * Returns true if bytes of string and cursor are equivalent, using a case-insensitive comparison. */ AWS_COMMON_API bool aws_string_eq_byte_cursor_ignore_case(const struct aws_string *str, const struct aws_byte_cursor *cur); /** * Returns true if bytes of string and buffer are the same, false otherwise. */ AWS_COMMON_API bool aws_string_eq_byte_buf(const struct aws_string *str, const struct aws_byte_buf *buf); /** * Returns true if bytes of string and buffer are equivalent, using a case-insensitive comparison. */ AWS_COMMON_API bool aws_string_eq_byte_buf_ignore_case(const struct aws_string *str, const struct aws_byte_buf *buf); AWS_COMMON_API bool aws_string_eq_c_str(const struct aws_string *str, const char *c_str); /** * Returns true if bytes of strings are equivalent, using a case-insensitive comparison. */ AWS_COMMON_API bool aws_string_eq_c_str_ignore_case(const struct aws_string *str, const char *c_str); /** * Constructor functions which copy data from null-terminated C-string or array of bytes. */ AWS_COMMON_API struct aws_string *aws_string_new_from_c_str(struct aws_allocator *allocator, const char *c_str); /** * Allocate a new string with the same contents as array. */ AWS_COMMON_API struct aws_string *aws_string_new_from_array(struct aws_allocator *allocator, const uint8_t *bytes, size_t len); /** * Allocate a new string with the same contents as another string. */ AWS_COMMON_API struct aws_string *aws_string_new_from_string(struct aws_allocator *allocator, const struct aws_string *str); /** * Allocate a new string with the same contents as cursor. */ AWS_COMMON_API struct aws_string *aws_string_new_from_cursor(struct aws_allocator *allocator, const struct aws_byte_cursor *cursor); /** * Allocate a new string with the same contents as buf. */ AWS_COMMON_API struct aws_string *aws_string_new_from_buf(struct aws_allocator *allocator, const struct aws_byte_buf *buf); /** * Deallocate string. */ AWS_COMMON_API void aws_string_destroy(struct aws_string *str); /** * Zeroes out the data bytes of string and then deallocates the memory. * Not safe to run on a string created with AWS_STATIC_STRING_FROM_LITERAL. */ AWS_COMMON_API void aws_string_destroy_secure(struct aws_string *str); /** * Compares lexicographical ordering of two strings. This is a binary * byte-by-byte comparison, treating bytes as unsigned integers. It is suitable * for either textual or binary data and is unaware of unicode or any other byte * encoding. If both strings are identical in the bytes of the shorter string, * then the longer string is lexicographically after the shorter. * * Returns a positive number if string a > string b. (i.e., string a is * lexicographically after string b.) Returns zero if string a = string b. * Returns negative number if string a < string b. */ AWS_COMMON_API int aws_string_compare(const struct aws_string *a, const struct aws_string *b); /** * A convenience function for sorting lists of (const struct aws_string *) elements. This can be used as a * comparator for aws_array_list_sort. It is just a simple wrapper around aws_string_compare. */ AWS_COMMON_API int aws_array_list_comparator_string(const void *a, const void *b); /** * Defines a (static const struct aws_string *) with name specified in first * argument that points to constant memory and has data bytes containing the * string literal in the second argument. * * GCC allows direct initilization of structs with variable length final fields * However, this might not be portable, so we can do this instead * This will have to be updated whenever the aws_string structure changes */ #define AWS_STATIC_STRING_FROM_LITERAL(name, literal) \ static const struct { \ struct aws_allocator *const allocator; \ const size_t len; \ const uint8_t bytes[sizeof(literal)]; \ } name##_s = {NULL, sizeof(literal) - 1, literal}; \ static const struct aws_string *name = (struct aws_string *)(&name##_s) /* NOLINT(bugprone-macro-parentheses) */ /* NOLINT above is because clang-tidy complains that (name) isn't in parentheses, * but gcc8-c++ complains that the parentheses are unnecessary */ /* * A related macro that declares the string pointer without static, allowing it to be externed as a global constant */ #define AWS_STRING_FROM_LITERAL(name, literal) \ static const struct { \ struct aws_allocator *const allocator; \ const size_t len; \ const uint8_t bytes[sizeof(literal)]; \ } name##_s = {NULL, sizeof(literal) - 1, literal}; \ const struct aws_string *(name) = (struct aws_string *)(&name##_s) /** * Copies all bytes from string to buf. * * On success, returns true and updates the buf pointer/length * accordingly. If there is insufficient space in the buf, returns * false, leaving the buf unchanged. */ AWS_COMMON_API bool aws_byte_buf_write_from_whole_string( struct aws_byte_buf *AWS_RESTRICT buf, const struct aws_string *AWS_RESTRICT src); /** * Creates an aws_byte_cursor from an existing string. */ AWS_COMMON_API struct aws_byte_cursor aws_byte_cursor_from_string(const struct aws_string *src); /** * If the string was dynamically allocated, clones it. If the string was statically allocated (i.e. has no allocator), * returns the original string. */ AWS_COMMON_API struct aws_string *aws_string_clone_or_reuse(struct aws_allocator *allocator, const struct aws_string *str); /** Computes the length of a c string in bytes assuming the character set is either ASCII or UTF-8. If no NULL character * is found within max_read_len of str, AWS_ERROR_C_STRING_BUFFER_NOT_NULL_TERMINATED is raised. Otherwise, str_len * will contain the string length minus the NULL character, and AWS_OP_SUCCESS will be returned. */ AWS_COMMON_API int aws_secure_strlen(const char *str, size_t max_read_len, size_t *str_len); /** * Equivalent to str->bytes. */ AWS_STATIC_IMPL const uint8_t *aws_string_bytes(const struct aws_string *str); /** * Equivalent to `(const char *)str->bytes`. */ AWS_STATIC_IMPL const char *aws_string_c_str(const struct aws_string *str); /** * Evaluates the set of properties that define the shape of all valid aws_string structures. * It is also a cheap check, in the sense it run in constant time (i.e., no loops or recursion). */ AWS_STATIC_IMPL bool aws_string_is_valid(const struct aws_string *str); /** * Best-effort checks aws_string invariants, when the str->len is unknown */ AWS_STATIC_IMPL bool aws_c_string_is_valid(const char *str); /** * Evaluates if a char is a white character. */ AWS_STATIC_IMPL bool aws_char_is_space(uint8_t c); #ifndef AWS_NO_STATIC_IMPL # include #endif /* AWS_NO_STATIC_IMPL */ AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_COMMON_STRING_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/string.inl000066400000000000000000000032741456575232400262450ustar00rootroot00000000000000#ifndef AWS_COMMON_STRING_INL #define AWS_COMMON_STRING_INL /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include AWS_EXTERN_C_BEGIN /** * Equivalent to str->bytes. */ AWS_STATIC_IMPL const uint8_t *aws_string_bytes(const struct aws_string *str) { AWS_PRECONDITION(aws_string_is_valid(str)); return str->bytes; } /** * Equivalent to `(const char *)str->bytes`. */ AWS_STATIC_IMPL const char *aws_string_c_str(const struct aws_string *str) { AWS_PRECONDITION(aws_string_is_valid(str)); return (const char *)str->bytes; } /** * Evaluates the set of properties that define the shape of all valid aws_string structures. * It is also a cheap check, in the sense it run in constant time (i.e., no loops or recursion). */ AWS_STATIC_IMPL bool aws_string_is_valid(const struct aws_string *str) { return str && AWS_MEM_IS_READABLE(&str->bytes[0], str->len + 1) && str->bytes[str->len] == 0; } /** * Best-effort checks aws_string invariants, when the str->len is unknown */ AWS_STATIC_IMPL bool aws_c_string_is_valid(const char *str) { /* Knowing the actual length to check would require strlen(), which is * a) linear time in the length of the string * b) could already cause a memory violation for a non-zero-terminated string. * But we know that a c-string must have at least one character, to store the null terminator */ return str && AWS_MEM_IS_READABLE(str, 1); } /** * Evaluates if a char is a white character. */ AWS_STATIC_IMPL bool aws_char_is_space(uint8_t c) { return aws_isspace(c); } AWS_EXTERN_C_END #endif /* AWS_COMMON_STRING_INL */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/system_info.h000066400000000000000000000117531456575232400267440ustar00rootroot00000000000000#ifndef AWS_COMMON_SYSTEM_INFO_H #define AWS_COMMON_SYSTEM_INFO_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include AWS_PUSH_SANE_WARNING_LEVEL enum aws_platform_os { AWS_PLATFORM_OS_WINDOWS, AWS_PLATFORM_OS_MAC, AWS_PLATFORM_OS_UNIX, }; struct aws_cpu_info { int32_t cpu_id; bool suspected_hyper_thread; }; struct aws_system_environment; AWS_EXTERN_C_BEGIN /** * Allocates and initializes information about the system the current process is executing on. * If successful returns an instance of aws_system_environment. If it fails, it will return NULL. * * Note: This api is used internally and is still early in its evolution. * It may change in incompatible ways in the future. */ AWS_COMMON_API struct aws_system_environment *aws_system_environment_load(struct aws_allocator *allocator); AWS_COMMON_API struct aws_system_environment *aws_system_environment_acquire(struct aws_system_environment *env); AWS_COMMON_API void aws_system_environment_release(struct aws_system_environment *env); /** * Returns the virtualization vendor for the specified compute environment, e.g. "Xen, Amazon EC2, etc..." * * The return value may be empty and in that case no vendor was detected. */ AWS_COMMON_API struct aws_byte_cursor aws_system_environment_get_virtualization_vendor(const struct aws_system_environment *env); /** * Returns the product name for the specified compute environment. For example, the Amazon EC2 Instance type. * * The return value may be empty and in that case no vendor was detected. */ AWS_COMMON_API struct aws_byte_cursor aws_system_environment_get_virtualization_product_name(const struct aws_system_environment *env); /** * Returns the number of processors for the specified compute environment. */ AWS_COMMON_API size_t aws_system_environment_get_processor_count(struct aws_system_environment *env); /** * Returns the number of separate cpu groupings (multi-socket configurations or NUMA). */ AWS_COMMON_API size_t aws_system_environment_get_cpu_group_count(const struct aws_system_environment *env); /* Returns the OS this was built under */ AWS_COMMON_API enum aws_platform_os aws_get_platform_build_os(void); /* Returns the number of online processors available for usage. */ AWS_COMMON_API size_t aws_system_info_processor_count(void); /** * Returns the logical processor groupings on the system (such as multiple numa nodes). */ AWS_COMMON_API uint16_t aws_get_cpu_group_count(void); /** * For a group, returns the number of CPUs it contains. */ AWS_COMMON_API size_t aws_get_cpu_count_for_group(uint16_t group_idx); /** * Fills in cpu_ids_array with the cpu_id's for the group. To obtain the size to allocate for cpu_ids_array * and the value for argument for cpu_ids_array_length, call aws_get_cpu_count_for_group(). */ AWS_COMMON_API void aws_get_cpu_ids_for_group(uint16_t group_idx, struct aws_cpu_info *cpu_ids_array, size_t cpu_ids_array_length); /* Returns true if a debugger is currently attached to the process. */ AWS_COMMON_API bool aws_is_debugger_present(void); /* If a debugger is attached to the process, trip a breakpoint. */ AWS_COMMON_API void aws_debug_break(void); #if defined(AWS_HAVE_EXECINFO) || defined(_WIN32) || defined(__APPLE__) # define AWS_BACKTRACE_STACKS_AVAILABLE #endif /* * Records a stack trace from the call site. * Returns the number of stack entries/stack depth captured, or 0 if the operation * is not supported on this platform */ AWS_COMMON_API size_t aws_backtrace(void **stack_frames, size_t num_frames); /* * Converts stack frame pointers to symbols, if symbols are available * Returns an array up to stack_depth long, that needs to be free()ed. * stack_depth should be the length of frames. * Returns NULL if the platform does not support stack frame translation * or an error occurs */ char **aws_backtrace_symbols(void *const *stack_frames, size_t stack_depth); /* * Converts stack frame pointers to symbols, using all available system * tools to try to produce a human readable result. This call will not be * quick, as it shells out to addr2line or similar tools. * On Windows, this is the same as aws_backtrace_symbols() * Returns an array up to stack_depth long that needs to be free()ed. Missing * frames will be NULL. * Returns NULL if the platform does not support stack frame translation * or an error occurs */ char **aws_backtrace_addr2line(void *const *stack_frames, size_t stack_depth); /** * Print a backtrace from either the current stack, or (if provided) the current exception/signal * call_site_data is siginfo_t* on POSIX, and LPEXCEPTION_POINTERS on Windows, and can be null */ AWS_COMMON_API void aws_backtrace_print(FILE *fp, void *call_site_data); /* Log the callstack from the current stack to the currently configured aws_logger */ AWS_COMMON_API void aws_backtrace_log(int log_level); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_COMMON_SYSTEM_INFO_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/system_resource_util.h000066400000000000000000000014351456575232400306710ustar00rootroot00000000000000#ifndef AWS_COMMON_SYSTEM_RESOURCE_UTIL_H #define AWS_COMMON_SYSTEM_RESOURCE_UTIL_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include AWS_PUSH_SANE_WARNING_LEVEL AWS_EXTERN_C_BEGIN struct aws_memory_usage_stats { size_t maxrss; /* max resident set size in kilobytes since program start */ size_t page_faults; /* num of page faults since program start */ size_t _reserved[8]; }; /* * Get memory usage for current process. * Raises AWS_ERROR_SYS_CALL_FAILURE on failure. */ AWS_COMMON_API int aws_init_memory_usage_for_current_process(struct aws_memory_usage_stats *memory_usage); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_COMMON_SYSTEM_RESOURCE_UTIL_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/task_scheduler.h000066400000000000000000000076121456575232400274040ustar00rootroot00000000000000#ifndef AWS_COMMON_TASK_SCHEDULER_H #define AWS_COMMON_TASK_SCHEDULER_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include AWS_PUSH_SANE_WARNING_LEVEL struct aws_task; typedef enum aws_task_status { AWS_TASK_STATUS_RUN_READY, AWS_TASK_STATUS_CANCELED, } aws_task_status; /** * A scheduled function. */ typedef void(aws_task_fn)(struct aws_task *task, void *arg, enum aws_task_status); /* * A task object. * Once added to the scheduler, a task must remain in memory until its function is executed. */ struct aws_task { aws_task_fn *fn; void *arg; uint64_t timestamp; struct aws_linked_list_node node; struct aws_priority_queue_node priority_queue_node; const char *type_tag; /* honor the ABI compat */ union { bool scheduled; size_t reserved; } abi_extension; }; struct aws_task_scheduler { struct aws_allocator *alloc; struct aws_priority_queue timed_queue; /* Tasks scheduled to run at specific times */ struct aws_linked_list timed_list; /* If timed_queue runs out of memory, further timed tests are stored here */ struct aws_linked_list asap_list; /* Tasks scheduled to run as soon as possible */ }; AWS_EXTERN_C_BEGIN /** * Init an aws_task */ AWS_COMMON_API void aws_task_init(struct aws_task *task, aws_task_fn *fn, void *arg, const char *type_tag); /* * Runs or cancels a task */ AWS_COMMON_API void aws_task_run(struct aws_task *task, enum aws_task_status status); /** * Initializes a task scheduler instance. */ AWS_COMMON_API int aws_task_scheduler_init(struct aws_task_scheduler *scheduler, struct aws_allocator *alloc); /** * Empties and executes all queued tasks, passing the AWS_TASK_STATUS_CANCELED status to the task function. * Cleans up any memory allocated, and prepares the instance for reuse or deletion. */ AWS_COMMON_API void aws_task_scheduler_clean_up(struct aws_task_scheduler *scheduler); AWS_COMMON_API bool aws_task_scheduler_is_valid(const struct aws_task_scheduler *scheduler); /** * Returns whether the scheduler has any scheduled tasks. * next_task_time (optional) will be set to time of the next task, note that 0 will be set if tasks were * added via aws_task_scheduler_schedule_now() and UINT64_MAX will be set if no tasks are scheduled at all. */ AWS_COMMON_API bool aws_task_scheduler_has_tasks(const struct aws_task_scheduler *scheduler, uint64_t *next_task_time); /** * Schedules a task to run immediately. * The task should not be cleaned up or modified until its function is executed. */ AWS_COMMON_API void aws_task_scheduler_schedule_now(struct aws_task_scheduler *scheduler, struct aws_task *task); /** * Schedules a task to run at time_to_run. * The task should not be cleaned up or modified until its function is executed. */ AWS_COMMON_API void aws_task_scheduler_schedule_future( struct aws_task_scheduler *scheduler, struct aws_task *task, uint64_t time_to_run); /** * Removes task from the scheduler and invokes the task with the AWS_TASK_STATUS_CANCELED status. */ AWS_COMMON_API void aws_task_scheduler_cancel_task(struct aws_task_scheduler *scheduler, struct aws_task *task); /** * Sequentially execute all tasks scheduled to run at, or before current_time. * AWS_TASK_STATUS_RUN_READY will be passed to the task function as the task status. * * If a task schedules another task, the new task will not be executed until the next call to this function. */ AWS_COMMON_API void aws_task_scheduler_run_all(struct aws_task_scheduler *scheduler, uint64_t current_time); /** * Convert a status value to a c-string suitable for logging */ AWS_COMMON_API const char *aws_task_status_to_c_str(enum aws_task_status status); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_COMMON_TASK_SCHEDULER_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/thread.h000066400000000000000000000235771456575232400256630ustar00rootroot00000000000000#ifndef AWS_COMMON_THREAD_H #define AWS_COMMON_THREAD_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #ifndef _WIN32 # include #endif AWS_PUSH_SANE_WARNING_LEVEL enum aws_thread_detach_state { AWS_THREAD_NOT_CREATED = 1, AWS_THREAD_JOINABLE, AWS_THREAD_JOIN_COMPLETED, AWS_THREAD_MANAGED, }; /** * Specifies the join strategy used on an aws_thread, which in turn controls whether or not a thread participates * in the managed thread system. The managed thread system provides logic to guarantee a join on all participating * threads at the cost of laziness (the user cannot control when joins happen). * * Manual - thread does not participate in the managed thread system; any joins must be done by the user. This * is the default. The user must call aws_thread_clean_up(), but only after any desired join operation has completed. * Not doing so will cause the windows handle to leak. * * Managed - the managed thread system will automatically perform a join some time after the thread's run function * has completed. It is an error to call aws_thread_join on a thread configured with the managed join strategy. The * managed thread system will call aws_thread_clean_up() on the thread after the background join has completed. * * Additionally, an API exists, aws_thread_join_all_managed(), which blocks and returns when all outstanding threads * with the managed strategy have fully joined. This API is useful for tests (rather than waiting for many individual * signals) and program shutdown or DLL unload. This API is automatically invoked by the common library clean up * function. If the common library clean up is called from a managed thread, this will cause deadlock. * * Lazy thread joining is done only when threads finish their run function or when the user calls * aws_thread_join_all_managed(). This means it may be a long time between thread function completion and the join * being applied, but the queue of unjoined threads is always one or fewer so there is no critical resource * backlog. * * Currently, only event loop group async cleanup and host resolver threads participate in the managed thread system. * Additionally, event loop threads will increment and decrement the pending join count (they are manually joined * internally) in order to have an accurate view of internal thread usage and also to prevent failure to release * an event loop group fully from allowing aws_thread_join_all_managed() from running to completion when its * intent is such that it should block instead. */ enum aws_thread_join_strategy { AWS_TJS_MANUAL = 0, AWS_TJS_MANAGED, }; /** * Thread names should be 15 characters or less. * Longer names will not display on Linux. * This length does not include a null terminator. */ #define AWS_THREAD_NAME_RECOMMENDED_STRLEN 15 struct aws_thread_options { size_t stack_size; /* default is -1. If you set this to anything >= 0, and the platform supports it, the thread will be pinned to * that cpu. Also, we assume you're doing this for memory throughput purposes. On unix systems, * If libnuma.so is available, upon the thread launching, the memory policy for that thread will be set to * allocate on the numa node that cpu-core is on. * * On windows, this will cause the thread affinity to be set, but currently we don't do anything to tell the OS * how to allocate memory on a node. * * On Apple and Android platforms, this setting doesn't do anything at all. */ int32_t cpu_id; enum aws_thread_join_strategy join_strategy; /** * Thread name, for debugging purpose. * The length should not exceed AWS_THREAD_NAME_RECOMMENDED_STRLEN(15) * if you want it to display properly on all platforms. */ struct aws_byte_cursor name; }; #ifdef _WIN32 typedef union { void *ptr; } aws_thread_once; # define AWS_THREAD_ONCE_STATIC_INIT \ { NULL } typedef unsigned long aws_thread_id_t; #else typedef pthread_once_t aws_thread_once; # define AWS_THREAD_ONCE_STATIC_INIT PTHREAD_ONCE_INIT typedef pthread_t aws_thread_id_t; #endif /* * Buffer size needed to represent aws_thread_id_t as a string (2 hex chars per byte * plus '\0' terminator). Needed for portable printing because pthread_t is * opaque. */ #define AWS_THREAD_ID_T_REPR_BUFSZ (sizeof(aws_thread_id_t) * 2 + 1) struct aws_thread { struct aws_allocator *allocator; enum aws_thread_detach_state detach_state; #ifdef _WIN32 void *thread_handle; #endif aws_thread_id_t thread_id; }; AWS_EXTERN_C_BEGIN /** * Returns an instance of system default thread options. */ AWS_COMMON_API const struct aws_thread_options *aws_default_thread_options(void); AWS_COMMON_API void aws_thread_call_once(aws_thread_once *flag, void (*call_once)(void *), void *user_data); /** * Initializes a new platform specific thread object struct (not the os-level * thread itself). */ AWS_COMMON_API int aws_thread_init(struct aws_thread *thread, struct aws_allocator *allocator); /** * Creates an OS level thread and associates it with func. context will be passed to func when it is executed. * options will be applied to the thread if they are applicable for the platform. * * After launch, you may join on the thread. A successfully launched thread must have clean_up called on it in order * to avoid a handle leak. If you do not join before calling clean_up, the thread will become detached. * * Managed threads must not have join or clean_up called on them by external code. */ AWS_COMMON_API int aws_thread_launch( struct aws_thread *thread, void (*func)(void *arg), void *arg, const struct aws_thread_options *options); /** * Gets the id of thread */ AWS_COMMON_API aws_thread_id_t aws_thread_get_id(struct aws_thread *thread); /** * Gets the detach state of the thread. For example, is it safe to call join on * this thread? Has it been detached()? */ AWS_COMMON_API enum aws_thread_detach_state aws_thread_get_detach_state(struct aws_thread *thread); /** * Joins the calling thread to a thread instance. Returns when thread is * finished. Calling this from the associated OS thread will cause a deadlock. */ AWS_COMMON_API int aws_thread_join(struct aws_thread *thread); /** * Blocking call that waits for all managed threads to complete their join call. This can only be called * from the main thread or a non-managed thread. * * This gets called automatically from library cleanup. * * By default the wait is unbounded, but that default can be overridden via aws_thread_set_managed_join_timeout_ns() */ AWS_COMMON_API int aws_thread_join_all_managed(void); /** * Overrides how long, in nanoseconds, that aws_thread_join_all_managed will wait for threads to complete. * A value of zero will result in an unbounded wait. */ AWS_COMMON_API void aws_thread_set_managed_join_timeout_ns(uint64_t timeout_in_ns); /** * Cleans up the thread handle. Don't call this on a managed thread. If you wish to join the thread, you must join * before calling this function. */ AWS_COMMON_API void aws_thread_clean_up(struct aws_thread *thread); /** * Returns the thread id of the calling thread. */ AWS_COMMON_API aws_thread_id_t aws_thread_current_thread_id(void); /** * Compare thread ids. */ AWS_COMMON_API bool aws_thread_thread_id_equal(aws_thread_id_t t1, aws_thread_id_t t2); /** * Sleeps the current thread by nanos. */ AWS_COMMON_API void aws_thread_current_sleep(uint64_t nanos); typedef void(aws_thread_atexit_fn)(void *user_data); /** * Adds a callback to the chain to be called when the current thread joins. * Callbacks are called from the current thread, in the reverse order they * were added, after the thread function returns. * If not called from within an aws_thread, has no effect. */ AWS_COMMON_API int aws_thread_current_at_exit(aws_thread_atexit_fn *callback, void *user_data); /** * Increments the count of unjoined threads in the managed thread system. Used by managed threads and * event loop threads. Additional usage requires the user to join corresponding threads themselves and * correctly increment/decrement even in the face of launch/join errors. * * aws_thread_join_all_managed() will not return until this count has gone to zero. */ AWS_COMMON_API void aws_thread_increment_unjoined_count(void); /** * Decrements the count of unjoined threads in the managed thread system. Used by managed threads and * event loop threads. Additional usage requires the user to join corresponding threads themselves and * correctly increment/decrement even in the face of launch/join errors. * * aws_thread_join_all_managed() will not return until this count has gone to zero. */ AWS_COMMON_API void aws_thread_decrement_unjoined_count(void); /** * Gets name of the current thread. * Caller is responsible for destroying returned string. * If thread does not have a name, AWS_OP_SUCCESS is returned and out_name is * set to NULL. * If underlying OS call fails, AWS_ERROR_SYS_CALL_FAILURE will be raised * If OS does not support getting thread name, AWS_ERROR_PLATFORM_NOT_SUPPORTED * will be raised */ AWS_COMMON_API int aws_thread_current_name(struct aws_allocator *allocator, struct aws_string **out_name); /** * Gets name of the thread. * Caller is responsible for destroying returned string. * If thread does not have a name, AWS_OP_SUCCESS is returned and out_name is * set to NULL. * If underlying OS call fails, AWS_ERROR_SYS_CALL_FAILURE will be raised * If OS does not support getting thread name, AWS_ERROR_PLATFORM_NOT_SUPPORTED * will be raised */ AWS_COMMON_API int aws_thread_name( struct aws_allocator *allocator, aws_thread_id_t thread_id, struct aws_string **out_name); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_COMMON_THREAD_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/thread_scheduler.h000066400000000000000000000037231456575232400277100ustar00rootroot00000000000000#ifndef AWS_COMMON_THREAD_SCHEDULER_H #define AWS_COMMON_THREAD_SCHEDULER_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include AWS_PUSH_SANE_WARNING_LEVEL struct aws_thread_scheduler; struct aws_thread_options; struct aws_task; AWS_EXTERN_C_BEGIN /** * Creates a new instance of a thread scheduler. This object receives scheduled tasks and executes them inside a * background thread. On success, this function returns an instance with a ref-count of 1. On failure it returns NULL. * * thread_options are optional. * * The semantics of this interface conform to the semantics of aws_task_scheduler. */ AWS_COMMON_API struct aws_thread_scheduler *aws_thread_scheduler_new( struct aws_allocator *allocator, const struct aws_thread_options *thread_options); /** * Acquire a reference to the scheduler. */ AWS_COMMON_API void aws_thread_scheduler_acquire(struct aws_thread_scheduler *scheduler); /** * Release a reference to the scheduler. */ AWS_COMMON_API void aws_thread_scheduler_release(const struct aws_thread_scheduler *scheduler); /** * Schedules a task to run in the future. time_to_run is the absolute time from the system hw_clock. */ AWS_COMMON_API void aws_thread_scheduler_schedule_future( struct aws_thread_scheduler *scheduler, struct aws_task *task, uint64_t time_to_run); /** * Schedules a task to run as soon as possible. */ AWS_COMMON_API void aws_thread_scheduler_schedule_now(struct aws_thread_scheduler *scheduler, struct aws_task *task); /** * Cancel a task that has been scheduled. The cancellation callback will be invoked in the background thread. * This function is slow, so please don't do it in the hot path for your code. */ AWS_COMMON_API void aws_thread_scheduler_cancel_task(struct aws_thread_scheduler *scheduler, struct aws_task *task); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_COMMON_THREAD_SCHEDULER_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/time.h000066400000000000000000000012611456575232400253340ustar00rootroot00000000000000#ifndef AWS_COMMON_TIME_H #define AWS_COMMON_TIME_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include AWS_PUSH_SANE_WARNING_LEVEL AWS_EXTERN_C_BEGIN /** * Cross platform friendly version of timegm */ AWS_COMMON_API time_t aws_timegm(struct tm *const t); /** * Cross platform friendly version of localtime_r */ AWS_COMMON_API void aws_localtime(time_t time, struct tm *t); /** * Cross platform friendly version of gmtime_r */ AWS_COMMON_API void aws_gmtime(time_t time, struct tm *t); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_COMMON_TIME_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/uri.h000066400000000000000000000157041456575232400252040ustar00rootroot00000000000000#ifndef AWS_COMMON_URI_H #define AWS_COMMON_URI_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include AWS_PUSH_SANE_WARNING_LEVEL /** * Data representing a URI. uri_str is always allocated and filled in. * The other portions are merely storing offsets into uri_str. */ struct aws_uri { size_t self_size; struct aws_allocator *allocator; struct aws_byte_buf uri_str; struct aws_byte_cursor scheme; struct aws_byte_cursor authority; struct aws_byte_cursor userinfo; struct aws_byte_cursor user; struct aws_byte_cursor password; struct aws_byte_cursor host_name; uint32_t port; struct aws_byte_cursor path; struct aws_byte_cursor query_string; struct aws_byte_cursor path_and_query; }; /** * key/value pairs for a query string. If the query fragment was not in format key=value, the fragment value * will be stored in key */ struct aws_uri_param { struct aws_byte_cursor key; struct aws_byte_cursor value; }; /** * Arguments for building a URI instance. All members must * be initialized before passing them to aws_uri_init(). * * query_string and query_params are exclusive to each other. If you set * query_string, do not prepend it with '?' */ struct aws_uri_builder_options { struct aws_byte_cursor scheme; struct aws_byte_cursor path; struct aws_byte_cursor host_name; uint32_t port; struct aws_array_list *query_params; struct aws_byte_cursor query_string; }; AWS_EXTERN_C_BEGIN /** * Parses 'uri_str' and initializes uri. Returns AWS_OP_SUCCESS, on success, AWS_OP_ERR on failure. * After calling this function, the parts can be accessed. */ AWS_COMMON_API int aws_uri_init_parse( struct aws_uri *uri, struct aws_allocator *allocator, const struct aws_byte_cursor *uri_str); /** * Initializes uri to values specified in options. Returns AWS_OP_SUCCESS, on success, AWS_OP_ERR on failure. * After calling this function, the parts can be accessed. */ AWS_COMMON_API int aws_uri_init_from_builder_options( struct aws_uri *uri, struct aws_allocator *allocator, struct aws_uri_builder_options *options); AWS_COMMON_API void aws_uri_clean_up(struct aws_uri *uri); /** * Returns the scheme portion of the uri (e.g. http, https, ftp, ftps, etc...). If the scheme was not present * in the uri, the returned value will be empty. It is the users job to determine the appropriate defaults * if this field is empty, based on protocol, port, etc... */ AWS_COMMON_API const struct aws_byte_cursor *aws_uri_scheme(const struct aws_uri *uri); /** * Returns the authority portion of the uri (host[:port]). If it was not present, this was a request uri. In that * case, the value will be empty. */ AWS_COMMON_API const struct aws_byte_cursor *aws_uri_authority(const struct aws_uri *uri); /** * Returns the path portion of the uri, including any leading '/'. If not present, this value will be empty. */ AWS_COMMON_API const struct aws_byte_cursor *aws_uri_path(const struct aws_uri *uri); /** * Returns the query string portion of the uri, minus the '?'. If not present, this value will be empty. */ AWS_COMMON_API const struct aws_byte_cursor *aws_uri_query_string(const struct aws_uri *uri); /** * Returns the 'host_name' portion of the authority. If no authority was present, this value will be empty. */ AWS_COMMON_API const struct aws_byte_cursor *aws_uri_host_name(const struct aws_uri *uri); /** * Returns the port portion of the authority if it was present, otherwise, returns 0. * If this is 0, it is the users job to determine the correct port based on scheme and protocol. */ AWS_COMMON_API uint32_t aws_uri_port(const struct aws_uri *uri); /** * Returns the path and query portion of the uri (i.e., the thing you send across the wire). */ AWS_COMMON_API const struct aws_byte_cursor *aws_uri_path_and_query(const struct aws_uri *uri); /** * For iterating over the params in the query string. * `param` is an in/out argument used to track progress, it MUST be zeroed out to start. * If true is returned, `param` contains the value of the next param. * If false is returned, there are no further params. * * Edge cases: * 1) Entries without '=' sign are treated as having a key and no value. * Example: First param in query string "a&b=c" has key="a" value="" * * 2) Blank entries are skipped. * Example: The only param in query string "&&a=b" is key="a" value="b" */ AWS_COMMON_API bool aws_query_string_next_param(struct aws_byte_cursor query_string, struct aws_uri_param *param); /** * Parses query string and stores the parameters in 'out_params'. Returns AWS_OP_SUCCESS on success and * AWS_OP_ERR on failure. The user is responsible for initializing out_params with item size of struct aws_query_param. * The user is also responsible for cleaning up out_params when finished. */ AWS_COMMON_API int aws_query_string_params(struct aws_byte_cursor query_string, struct aws_array_list *out_params); /** * For iterating over the params in the uri query string. * `param` is an in/out argument used to track progress, it MUST be zeroed out to start. * If true is returned, `param` contains the value of the next param. * If false is returned, there are no further params. * * Edge cases: * 1) Entries without '=' sign are treated as having a key and no value. * Example: First param in query string "a&b=c" has key="a" value="" * * 2) Blank entries are skipped. * Example: The only param in query string "&&a=b" is key="a" value="b" */ AWS_COMMON_API bool aws_uri_query_string_next_param(const struct aws_uri *uri, struct aws_uri_param *param); /** * Parses query string and stores the parameters in 'out_params'. Returns AWS_OP_SUCCESS on success and * AWS_OP_ERR on failure. The user is responsible for initializing out_params with item size of struct aws_query_param. * The user is also responsible for cleaning up out_params when finished. */ AWS_COMMON_API int aws_uri_query_string_params(const struct aws_uri *uri, struct aws_array_list *out_params); /** * Writes the uri path encoding of a cursor to a buffer. This is the modified version of rfc3986 used by * sigv4 signing. */ AWS_COMMON_API int aws_byte_buf_append_encoding_uri_path( struct aws_byte_buf *buffer, const struct aws_byte_cursor *cursor); /** * Writes the uri query param encoding (passthrough alnum + '-' '_' '~' '.') of a UTF-8 cursor to a buffer * For example, reading "a b_c" would write "a%20b_c". */ AWS_COMMON_API int aws_byte_buf_append_encoding_uri_param( struct aws_byte_buf *buffer, const struct aws_byte_cursor *cursor); /** * Writes the uri decoding of a UTF-8 cursor to a buffer, * replacing %xx escapes by their single byte equivalent. * For example, reading "a%20b_c" would write "a b_c". */ AWS_COMMON_API int aws_byte_buf_append_decoding_uri(struct aws_byte_buf *buffer, const struct aws_byte_cursor *cursor); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_COMMON_URI_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/uuid.h000066400000000000000000000015461456575232400253520ustar00rootroot00000000000000#ifndef AWS_COMMON_UUID_H #define AWS_COMMON_UUID_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include AWS_PUSH_SANE_WARNING_LEVEL struct aws_byte_cursor; struct aws_byte_buf; struct aws_uuid { uint8_t uuid_data[16]; }; /* 36 bytes for the UUID plus one more for the null terminator. */ enum { AWS_UUID_STR_LEN = 37 }; AWS_EXTERN_C_BEGIN AWS_COMMON_API int aws_uuid_init(struct aws_uuid *uuid); AWS_COMMON_API int aws_uuid_init_from_str(struct aws_uuid *uuid, const struct aws_byte_cursor *uuid_str); AWS_COMMON_API int aws_uuid_to_str(const struct aws_uuid *uuid, struct aws_byte_buf *output); AWS_COMMON_API bool aws_uuid_equals(const struct aws_uuid *a, const struct aws_uuid *b); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_COMMON_UUID_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/xml_parser.h000066400000000000000000000053201456575232400265520ustar00rootroot00000000000000#ifndef AWS_COMMON_XML_PARSER_H #define AWS_COMMON_XML_PARSER_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include AWS_PUSH_SANE_WARNING_LEVEL struct aws_xml_node; struct aws_xml_attribute { struct aws_byte_cursor name; struct aws_byte_cursor value; }; /** * Callback for when an xml node is encountered in the document. As a user you have a few options: * * 1. fail the parse by returning AWS_OP_ERR (after an error has been raised). This will stop any further parsing. * 2. call aws_xml_node_traverse() on the node to descend into the node with a new callback and user_data. * 3. call aws_xml_node_as_body() to retrieve the contents of the node as text. * * You MUST NOT call both aws_xml_node_traverse() and aws_xml_node_as_body() on the same node. * * return true to continue the parsing operation. */ typedef int(aws_xml_parser_on_node_encountered_fn)(struct aws_xml_node *node, void *user_data); struct aws_xml_parser_options { /* xml document to parse. */ struct aws_byte_cursor doc; /* Max node depth used for parsing document. */ size_t max_depth; /* Callback invoked on the root node */ aws_xml_parser_on_node_encountered_fn *on_root_encountered; /* User data for callback */ void *user_data; }; AWS_EXTERN_C_BEGIN /** * Parse an XML document. * WARNING: This is not a public API. It is only intended for use within the aws-c libraries. */ AWS_COMMON_API int aws_xml_parse(struct aws_allocator *allocator, const struct aws_xml_parser_options *options); /** * Writes the contents of the body of node into out_body. out_body is an output parameter in this case. Upon success, * out_body will contain the body of the node. */ AWS_COMMON_API int aws_xml_node_as_body(struct aws_xml_node *node, struct aws_byte_cursor *out_body); /** * Traverse node and invoke on_node_encountered when a nested node is encountered. */ AWS_COMMON_API int aws_xml_node_traverse( struct aws_xml_node *node, aws_xml_parser_on_node_encountered_fn *on_node_encountered, void *user_data); /* * Get the name of an xml node. */ AWS_COMMON_API struct aws_byte_cursor aws_xml_node_get_name(const struct aws_xml_node *node); /* * Get the number of attributes for an xml node. */ AWS_COMMON_API size_t aws_xml_node_get_num_attributes(const struct aws_xml_node *node); /* * Get an attribute for an xml node by its index. */ AWS_COMMON_API struct aws_xml_attribute aws_xml_node_get_attribute(const struct aws_xml_node *node, size_t attribute_index); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_COMMON_XML_PARSER_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/zero.h000066400000000000000000000040761456575232400253640ustar00rootroot00000000000000#ifndef AWS_COMMON_ZERO_H #define AWS_COMMON_ZERO_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include AWS_PUSH_SANE_WARNING_LEVEL AWS_EXTERN_C_BEGIN /** * Set each byte in the struct to zero. */ #define AWS_ZERO_STRUCT(object) \ do { \ memset(&(object), 0, sizeof(object)); \ } while (0) /** * Set each byte in the array to zero. * Does not work with arrays of unknown bound. */ #define AWS_ZERO_ARRAY(array) memset((void *)(array), 0, sizeof(array)) /** * Returns whether each byte in the object is zero. */ #ifdef CBMC /* clang-format off */ # define AWS_IS_ZEROED(object) \ __CPROVER_forall { \ int i; \ (i >= 0 && i < sizeof(object)) ==> ((const uint8_t *)&object)[i] == 0 \ } /* clang-format on */ #else # define AWS_IS_ZEROED(object) aws_is_mem_zeroed(&(object), sizeof(object)) #endif /** * Returns whether each byte is zero. */ AWS_STATIC_IMPL bool aws_is_mem_zeroed(const void *buf, size_t bufsize); /** * Securely zeroes a memory buffer. This function will attempt to ensure that * the compiler will not optimize away this zeroing operation. */ AWS_COMMON_API void aws_secure_zero(void *pBuf, size_t bufsize); #ifndef AWS_NO_STATIC_IMPL # include #endif /* AWS_NO_STATIC_IMPL */ AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_COMMON_ZERO_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/common/zero.inl000066400000000000000000000022001456575232400257020ustar00rootroot00000000000000#ifndef AWS_COMMON_ZERO_INL #define AWS_COMMON_ZERO_INL /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include AWS_EXTERN_C_BEGIN /** * Returns whether each byte is zero. */ AWS_STATIC_IMPL bool aws_is_mem_zeroed(const void *buf, size_t bufsize) { /* Optimization idea: vectorized instructions to check more than 64 bits at a time. */ /* Check 64 bits at a time */ const uint64_t *buf_u64 = (const uint64_t *)buf; const size_t num_u64_checks = bufsize / 8; size_t i; for (i = 0; i < num_u64_checks; ++i) { if (buf_u64[i]) { return false; } } /* Update buf to where u64 checks left off */ buf = buf_u64 + num_u64_checks; bufsize = bufsize % 8; /* Check 8 bits at a time */ const uint8_t *buf_u8 = (const uint8_t *)buf; for (i = 0; i < bufsize; ++i) { if (buf_u8[i]) { return false; } } return true; } AWS_EXTERN_C_END #endif /* AWS_COMMON_ZERO_INL */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/testing/000077500000000000000000000000001456575232400244125ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/include/aws/testing/aws_test_harness.h000066400000000000000000001212331456575232400301410ustar00rootroot00000000000000#ifndef AWS_TESTING_AWS_TEST_HARNESS_H #define AWS_TESTING_AWS_TEST_HARNESS_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include /** * The return code for skipped tests. Use the return code if the test should be skipped. */ #define AWS_OP_SKIP (-2) #ifndef AWS_UNSTABLE_TESTING_API # error The AWS Test Fixture is designed only for use by AWS owned libraries for the AWS C99 SDK. You are welcome to use it, \ but you should be aware we make no promises on the stability of this API. To enable use of the aws test fixtures, set \ the AWS_UNSTABLE_TESTING_API compiler flag #endif #ifndef AWS_TESTING_REPORT_FD # define AWS_TESTING_REPORT_FD stderr #endif #ifdef _MSC_VER # pragma warning(disable : 4221) /* aggregate initializer using local variable addresses */ # pragma warning(disable : 4204) /* non-constant aggregate initializer */ #endif #if defined(__clang__) # pragma clang diagnostic ignored "-Wgnu-zero-variadic-macro-arguments" #endif /** Prints a message to AWS_TESTING_REPORT_FD using printf format that appends the function, file and line number. * If format is null, returns 0 without printing anything; otherwise returns 1. * If function or file are null, the function, file and line number are not appended. */ static int s_cunit_failure_message0( const char *prefix, const char *function, const char *file, int line, const char *format, ...) { if (!format) { return 0; } fprintf(AWS_TESTING_REPORT_FD, "%s", prefix); va_list ap; va_start(ap, format); vfprintf(AWS_TESTING_REPORT_FD, format, ap); va_end(ap); if (function && file) { fprintf(AWS_TESTING_REPORT_FD, " [%s(): %s:%d]\n", function, file, line); } else { fprintf(AWS_TESTING_REPORT_FD, "\n"); } return 1; } #define FAIL_PREFIX "***FAILURE*** " #define CUNIT_FAILURE_MESSAGE(func, file, line, format, ...) \ s_cunit_failure_message0(FAIL_PREFIX, func, file, line, format, #__VA_ARGS__) #define SUCCESS (0) #define FAILURE (-1) /* The exit code returned to ctest to indicate the test is skipped. Refer to cmake doc: * https://cmake.org/cmake/help/latest/prop_test/SKIP_RETURN_CODE.html * The value has no special meaning, it's just an arbitrary exit code reducing the chance of clashing with exit codes * that may be returned from various tools (e.g. sanitizer). */ #define SKIP (103) #define POSTSKIP_INTERNAL() \ do { \ return SKIP; \ } while (0) #define RETURN_SKIP(format, ...) \ do { \ printf(format, ##__VA_ARGS__); \ printf("\n"); \ POSTSKIP_INTERNAL(); \ } while (0) #define RETURN_SUCCESS(format, ...) \ do { \ printf(format, ##__VA_ARGS__); \ printf("\n"); \ return SUCCESS; \ } while (0) #define PRINT_FAIL_INTERNAL(...) CUNIT_FAILURE_MESSAGE(__func__, __FILE__, __LINE__, ##__VA_ARGS__, (const char *)NULL) #define PRINT_FAIL_INTERNAL0(...) \ s_cunit_failure_message0(FAIL_PREFIX, __func__, __FILE__, __LINE__, ##__VA_ARGS__, (const char *)NULL) #define PRINT_FAIL_WITHOUT_LOCATION(...) \ s_cunit_failure_message0(FAIL_PREFIX, NULL, NULL, __LINE__, ##__VA_ARGS__, (const char *)NULL) #define POSTFAIL_INTERNAL() \ do { \ return FAILURE; \ } while (0) #define FAIL(...) \ do { \ PRINT_FAIL_INTERNAL0(__VA_ARGS__); \ POSTFAIL_INTERNAL(); \ } while (0) #define ASSERT_TRUE(condition, ...) \ do { \ if (!(condition)) { \ if (!PRINT_FAIL_INTERNAL0(__VA_ARGS__)) { \ PRINT_FAIL_INTERNAL0("Expected condition to be true: " #condition); \ } \ POSTFAIL_INTERNAL(); \ } \ } while (0) #define ASSERT_FALSE(condition, ...) \ do { \ if ((condition)) { \ if (!PRINT_FAIL_INTERNAL0(__VA_ARGS__)) { \ PRINT_FAIL_INTERNAL0("Expected condition to be false: " #condition); \ } \ POSTFAIL_INTERNAL(); \ } \ } while (0) #define ASSERT_SUCCESS(condition, ...) \ do { \ int assert_rv = (condition); \ if (assert_rv != AWS_OP_SUCCESS) { \ if (!PRINT_FAIL_INTERNAL0(__VA_ARGS__)) { \ PRINT_FAIL_INTERNAL0( \ "Expected success at %s; got return value %d with last error 0x%04x\n", \ #condition, \ assert_rv, \ aws_last_error()); \ } \ POSTFAIL_INTERNAL(); \ } \ } while (0) #define ASSERT_FAILS(condition, ...) \ do { \ int assert_rv = (condition); \ if (assert_rv != AWS_OP_ERR) { \ if (!PRINT_FAIL_INTERNAL0(__VA_ARGS__)) { \ PRINT_FAIL_INTERNAL0( \ "Expected failure at %s; got return value %d with last error 0x%04x\n", \ #condition, \ assert_rv, \ aws_last_error()); \ } \ POSTFAIL_INTERNAL(); \ } \ } while (0) #define ASSERT_ERROR(error, condition, ...) \ do { \ int assert_rv = (condition); \ int assert_err = aws_last_error(); \ int assert_err_expect = (error); \ if (assert_rv != AWS_OP_ERR) { \ fprintf( \ AWS_TESTING_REPORT_FD, \ "%sExpected error but no error occurred; rv=%d, aws_last_error=%04x (expected %04x): ", \ FAIL_PREFIX, \ assert_rv, \ assert_err, \ assert_err_expect); \ if (!PRINT_FAIL_INTERNAL0(__VA_ARGS__)) { \ PRINT_FAIL_INTERNAL0("%s", #condition); \ } \ POSTFAIL_INTERNAL(); \ } \ if (assert_err != assert_err_expect) { \ fprintf( \ AWS_TESTING_REPORT_FD, \ "%sIncorrect error code; aws_last_error=%04x (expected %04x): ", \ FAIL_PREFIX, \ assert_err, \ assert_err_expect); \ if (!PRINT_FAIL_INTERNAL0(__VA_ARGS__)) { \ PRINT_FAIL_INTERNAL0("%s", #condition); \ } \ POSTFAIL_INTERNAL(); \ } \ } while (0) #define ASSERT_NULL(ptr, ...) \ do { \ /* XXX: Some tests use ASSERT_NULL on ints... */ \ void *assert_p = (void *)(uintptr_t)(ptr); \ if (assert_p) { \ fprintf(AWS_TESTING_REPORT_FD, "%sExpected null but got %p: ", FAIL_PREFIX, assert_p); \ if (!PRINT_FAIL_INTERNAL0(__VA_ARGS__)) { \ PRINT_FAIL_INTERNAL0("%s", #ptr); \ } \ POSTFAIL_INTERNAL(); \ } \ } while (0) #define ASSERT_NOT_NULL(ptr, ...) \ do { \ /* XXX: Some tests use ASSERT_NULL on ints... */ \ void *assert_p = (void *)(uintptr_t)(ptr); \ if (!assert_p) { \ fprintf(AWS_TESTING_REPORT_FD, "%sExpected non-null but got null: ", FAIL_PREFIX); \ if (!PRINT_FAIL_INTERNAL0(__VA_ARGS__)) { \ PRINT_FAIL_INTERNAL0("%s", #ptr); \ } \ POSTFAIL_INTERNAL(); \ } \ } while (0) #define ASSERT_TYP_EQUALS(type, formatarg, expected, got, ...) \ do { \ type assert_expected = (expected); \ type assert_actual = (got); \ if (assert_expected != assert_actual) { \ fprintf( \ AWS_TESTING_REPORT_FD, \ "%s" formatarg " != " formatarg ": ", \ FAIL_PREFIX, \ assert_expected, \ assert_actual); \ if (!PRINT_FAIL_INTERNAL0(__VA_ARGS__)) { \ PRINT_FAIL_INTERNAL0("%s != %s", #expected, #got); \ } \ POSTFAIL_INTERNAL(); \ } \ } while (0) #ifdef _MSC_VER # define ASSERT_INT_EQUALS(expected, got, ...) ASSERT_TYP_EQUALS(intmax_t, "%lld", expected, got, __VA_ARGS__) # define ASSERT_UINT_EQUALS(expected, got, ...) ASSERT_TYP_EQUALS(uintmax_t, "%llu", expected, got, __VA_ARGS__) #else /* For comparing any signed integer types */ # define ASSERT_INT_EQUALS(expected, got, ...) ASSERT_TYP_EQUALS(intmax_t, "%jd", expected, got, __VA_ARGS__) /* For comparing any unsigned integer types */ # define ASSERT_UINT_EQUALS(expected, got, ...) ASSERT_TYP_EQUALS(uintmax_t, "%ju", expected, got, __VA_ARGS__) #endif #define ASSERT_PTR_EQUALS(expected, got, ...) \ do { \ void *assert_expected = (void *)(uintptr_t)(expected); \ void *assert_actual = (void *)(uintptr_t)(got); \ if (assert_expected != assert_actual) { \ fprintf(AWS_TESTING_REPORT_FD, "%s%p != %p: ", FAIL_PREFIX, assert_expected, assert_actual); \ if (!PRINT_FAIL_INTERNAL0(__VA_ARGS__)) { \ PRINT_FAIL_INTERNAL0("%s != %s", #expected, #got); \ } \ POSTFAIL_INTERNAL(); \ } \ } while (0) /* note that uint8_t is promoted to unsigned int in varargs, so %02x is an acceptable format string */ #define ASSERT_BYTE_HEX_EQUALS(expected, got, ...) ASSERT_TYP_EQUALS(uint8_t, "%02X", expected, got, __VA_ARGS__) #define ASSERT_HEX_EQUALS(expected, got, ...) ASSERT_TYP_EQUALS(unsigned long long, "%llX", expected, got, __VA_ARGS__) #define ASSERT_STR_EQUALS(expected, got, ...) \ do { \ const char *assert_expected = (expected); \ const char *assert_got = (got); \ ASSERT_NOT_NULL(assert_expected); \ ASSERT_NOT_NULL(assert_got); \ if (strcmp(assert_expected, assert_got) != 0) { \ fprintf( \ AWS_TESTING_REPORT_FD, "%sExpected: \"%s\"; got: \"%s\": ", FAIL_PREFIX, assert_expected, assert_got); \ if (!PRINT_FAIL_INTERNAL0(__VA_ARGS__)) { \ PRINT_FAIL_INTERNAL0("ASSERT_STR_EQUALS(%s, %s)", #expected, #got); \ } \ POSTFAIL_INTERNAL(); \ } \ } while (0) #define ASSERT_BIN_ARRAYS_EQUALS(expected, expected_size, got, got_size, ...) \ do { \ const uint8_t *assert_ex_p = (const uint8_t *)(expected); \ size_t assert_ex_s = (expected_size); \ const uint8_t *assert_got_p = (const uint8_t *)(got); \ size_t assert_got_s = (got_size); \ if (assert_ex_s == 0 && assert_got_s == 0) { \ break; \ } \ if (assert_ex_s != assert_got_s) { \ fprintf(AWS_TESTING_REPORT_FD, "%sSize mismatch: %zu != %zu: ", FAIL_PREFIX, assert_ex_s, assert_got_s); \ if (!PRINT_FAIL_INTERNAL0(__VA_ARGS__)) { \ PRINT_FAIL_INTERNAL0( \ "ASSERT_BIN_ARRAYS_EQUALS(%s, %s, %s, %s)", #expected, #expected_size, #got, #got_size); \ } \ POSTFAIL_INTERNAL(); \ } \ if (memcmp(assert_ex_p, assert_got_p, assert_got_s) != 0) { \ if (assert_got_s <= 1024) { \ for (size_t assert_i = 0; assert_i < assert_ex_s; ++assert_i) { \ if (assert_ex_p[assert_i] != assert_got_p[assert_i]) { \ fprintf( \ AWS_TESTING_REPORT_FD, \ "%sMismatch at byte[%zu]: 0x%02X != 0x%02X: ", \ FAIL_PREFIX, \ assert_i, \ assert_ex_p[assert_i], \ assert_got_p[assert_i]); \ break; \ } \ } \ } else { \ fprintf(AWS_TESTING_REPORT_FD, "%sData mismatch: ", FAIL_PREFIX); \ } \ if (!PRINT_FAIL_INTERNAL0(__VA_ARGS__)) { \ PRINT_FAIL_INTERNAL0( \ "ASSERT_BIN_ARRAYS_EQUALS(%s, %s, %s, %s)", #expected, #expected_size, #got, #got_size); \ } \ POSTFAIL_INTERNAL(); \ } \ } while (0) #define ASSERT_CURSOR_VALUE_CSTRING_EQUALS(cursor, cstring, ...) \ do { \ const uint8_t *assert_ex_p = (const uint8_t *)((cursor).ptr); \ size_t assert_ex_s = (cursor).len; \ const uint8_t *assert_got_p = (const uint8_t *)cstring; \ size_t assert_got_s = strlen(cstring); \ if (assert_ex_s == 0 && assert_got_s == 0) { \ break; \ } \ if (assert_ex_s != assert_got_s) { \ fprintf(AWS_TESTING_REPORT_FD, "%sSize mismatch: %zu != %zu: \n", FAIL_PREFIX, assert_ex_s, assert_got_s); \ fprintf( \ AWS_TESTING_REPORT_FD, \ "%sGot: \"" PRInSTR "\"; Expected: \"%s\" \n", \ FAIL_PREFIX, \ AWS_BYTE_CURSOR_PRI(cursor), \ cstring); \ if (!PRINT_FAIL_INTERNAL0(__VA_ARGS__)) { \ PRINT_FAIL_INTERNAL0("ASSERT_CURSOR_VALUE_STRING_EQUALS(%s, %s)", #cursor, #cstring); \ } \ POSTFAIL_INTERNAL(); \ } \ if (memcmp(assert_ex_p, assert_got_p, assert_got_s) != 0) { \ fprintf( \ AWS_TESTING_REPORT_FD, \ "%sData mismatch; Got: \"" PRInSTR "\"; Expected: \"%s\" \n", \ FAIL_PREFIX, \ AWS_BYTE_CURSOR_PRI(cursor), \ cstring); \ if (!PRINT_FAIL_INTERNAL0(__VA_ARGS__)) { \ PRINT_FAIL_INTERNAL0("ASSERT_CURSOR_VALUE_STRING_EQUALS(%s, %s)", #cursor, #cstring); \ } \ POSTFAIL_INTERNAL(); \ } \ } while (0) #define ASSERT_CURSOR_VALUE_STRING_EQUALS(cursor, string, ...) \ ASSERT_CURSOR_VALUE_CSTRING_EQUALS(cursor, aws_string_c_str(string)); typedef int(aws_test_before_fn)(struct aws_allocator *allocator, void *ctx); typedef int(aws_test_run_fn)(struct aws_allocator *allocator, void *ctx); typedef int(aws_test_after_fn)(struct aws_allocator *allocator, int setup_result, void *ctx); struct aws_test_harness { aws_test_before_fn *on_before; aws_test_run_fn *run; aws_test_after_fn *on_after; void *ctx; const char *test_name; int suppress_memcheck; }; #if defined(_WIN32) # include static LONG WINAPI s_test_print_stack_trace(struct _EXCEPTION_POINTERS *exception_pointers) { # if !defined(AWS_HEADER_CHECKER) aws_backtrace_print(stderr, exception_pointers); # endif return EXCEPTION_EXECUTE_HANDLER; } #elif defined(AWS_HAVE_EXECINFO) # include static void s_print_stack_trace(int sig, siginfo_t *sig_info, void *user_data) { (void)sig; (void)sig_info; (void)user_data; # if !defined(AWS_HEADER_CHECKER) aws_backtrace_print(stderr, sig_info); # endif exit(-1); } #endif static inline int s_aws_run_test_case(struct aws_test_harness *harness) { AWS_ASSERT(harness->run); /* * MSVC compiler has a weird interactive pop-up in debug whenever 'abort()' is called, which can be triggered * by hitting any aws_assert or aws_pre_condition, causing the CI to hang. So disable the pop-up in tests. */ #ifdef _MSC_VER _set_abort_behavior(0, _WRITE_ABORT_MSG | _CALL_REPORTFAULT); #endif #if defined(_WIN32) SetUnhandledExceptionFilter(s_test_print_stack_trace); /* Set working directory to path to this exe */ char cwd[512]; DWORD len = GetModuleFileNameA(NULL, cwd, sizeof(cwd)); DWORD idx = len - 1; while (idx && cwd[idx] != '\\') { idx--; } cwd[idx] = 0; SetCurrentDirectory(cwd); #elif defined(AWS_HAVE_EXECINFO) struct sigaction sa; memset(&sa, 0, sizeof(struct sigaction)); sigemptyset(&sa.sa_mask); sa.sa_flags = SA_NODEFER; sa.sa_sigaction = s_print_stack_trace; sigaction(SIGSEGV, &sa, NULL); #endif /* track allocations and report leaks in tests, unless suppressed */ struct aws_allocator *allocator = NULL; if (harness->suppress_memcheck) { allocator = aws_default_allocator(); } else { allocator = aws_mem_tracer_new(aws_default_allocator(), NULL, AWS_MEMTRACE_STACKS, 8); } /* wire up a logger to stderr by default, may be replaced by some tests */ struct aws_logger err_logger; struct aws_logger_standard_options options; options.file = AWS_TESTING_REPORT_FD; options.level = AWS_LL_TRACE; options.filename = NULL; aws_logger_init_standard(&err_logger, aws_default_allocator(), &options); aws_logger_set(&err_logger); int test_res = AWS_OP_ERR; int setup_res = AWS_OP_SUCCESS; if (harness->on_before) { setup_res = harness->on_before(allocator, harness->ctx); } if (!setup_res) { test_res = harness->run(allocator, harness->ctx); } if (harness->on_after) { test_res |= harness->on_after(allocator, setup_res, harness->ctx); } if (test_res != AWS_OP_SUCCESS && test_res != AWS_OP_SKIP) { goto fail; } if (!harness->suppress_memcheck) { /* Reset the logger, as test can set their own logger and clean it up, * but aws_mem_tracer_dump() needs a valid logger to be active */ aws_logger_set(&err_logger); const size_t leaked_allocations = aws_mem_tracer_count(allocator); const size_t leaked_bytes = aws_mem_tracer_bytes(allocator); if (leaked_bytes) { aws_mem_tracer_dump(allocator); PRINT_FAIL_WITHOUT_LOCATION( "Test leaked memory: %zu bytes %zu allocations", leaked_bytes, leaked_allocations); goto fail; } aws_mem_tracer_destroy(allocator); } aws_logger_set(NULL); aws_logger_clean_up(&err_logger); if (test_res == AWS_OP_SUCCESS) { RETURN_SUCCESS("%s [ \033[32mOK\033[0m ]", harness->test_name); } else if (test_res == AWS_OP_SKIP) { RETURN_SKIP("%s [ \033[32mSKIP\033[0m ]", harness->test_name); } fail: PRINT_FAIL_WITHOUT_LOCATION("%s [ \033[31mFAILED\033[0m ]", harness->test_name); /* Use _Exit() to terminate without cleaning up resources. * This prevents LeakSanitizer spam (yes, we know failing tests don't bother cleaning up). * It also prevents errors where threads that haven't cleaned are still using the logger declared in this fn. */ fflush(AWS_TESTING_REPORT_FD); fflush(stdout); fflush(stderr); _Exit(FAILURE); } /* Enables terminal escape sequences for text coloring on Windows. */ /* https://docs.microsoft.com/en-us/windows/console/console-virtual-terminal-sequences */ #ifdef _WIN32 # include # ifndef ENABLE_VIRTUAL_TERMINAL_PROCESSING # define ENABLE_VIRTUAL_TERMINAL_PROCESSING 0x0004 # endif static inline int enable_vt_mode(void) { HANDLE hOut = GetStdHandle(STD_OUTPUT_HANDLE); if (hOut == INVALID_HANDLE_VALUE) { return AWS_OP_ERR; } DWORD dwMode = 0; if (!GetConsoleMode(hOut, &dwMode)) { return AWS_OP_ERR; } dwMode |= ENABLE_VIRTUAL_TERMINAL_PROCESSING; if (!SetConsoleMode(hOut, dwMode)) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } #else static inline int enable_vt_mode(void) { return AWS_OP_ERR; } #endif #define AWS_TEST_CASE_SUPRESSION(name, fn, s) \ static int fn(struct aws_allocator *allocator, void *ctx); \ static struct aws_test_harness name##_test = { \ NULL, \ fn, \ NULL, \ NULL, \ #name, \ s, \ }; \ int name(int argc, char *argv[]) { \ (void)argc, (void)argv; \ return s_aws_run_test_case(&name##_test); \ } #define AWS_TEST_CASE_FIXTURE_SUPPRESSION(name, b, fn, af, c, s) \ static int b(struct aws_allocator *allocator, void *ctx); \ static int fn(struct aws_allocator *allocator, void *ctx); \ static int af(struct aws_allocator *allocator, int setup_result, void *ctx); \ static struct aws_test_harness name##_test = { \ b, \ fn, \ af, \ c, \ #name, \ s, \ }; \ int name(int argc, char *argv[]) { \ (void)argc; \ (void)argv; \ return s_aws_run_test_case(&name##_test); \ } #define AWS_TEST_CASE(name, fn) AWS_TEST_CASE_SUPRESSION(name, fn, 0) #define AWS_TEST_CASE_FIXTURE(name, b, fn, af, c) AWS_TEST_CASE_FIXTURE_SUPPRESSION(name, b, fn, af, c, 0) #endif /* AWS_TESTING_AWS_TEST_HARNESS_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/scripts/000077500000000000000000000000001456575232400222075ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/scripts/appverifier_ctest.py000077500000000000000000000112461456575232400263060ustar00rootroot00000000000000#!/usr/bin/env python3 # # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. # Built-in import argparse import subprocess import os import json import tempfile import sys import appverifier_xml def add_app_verifier_settings(app_verified_executables, app_verifier_tests): for test_executable in app_verified_executables: arguments = ["appverif", "-enable"] + app_verifier_tests + ["-for", test_executable] print (f'Calling AppVerifier with: {subprocess.list2cmdline(arguments)}') # NOTE: Needs elevated permissions. We need this for the XML dump below so I figured we might as well # also set AppVerifier here too then, simplifying the setup and running process subprocess.run(args=arguments) def remove_app_verifier_settings(app_verified_executables): for test_executable in app_verified_executables: arguments = ["appverif", "-delete", "settings", "-for", test_executable] print (f'Calling AppVerifier with: {subprocess.list2cmdline(arguments)}') # NOTE: Needs elevated permissions. We need this for the XML dump below so I figured we might as well # also set AppVerifier here too then, simplifying the setup and running process subprocess.run(args=arguments) def main(): argument_parser = argparse.ArgumentParser( description="AppVerifier Ctest runner util") argument_parser.add_argument("--build_directory", metavar="", required=True, default="../aws-c-common-build", help="Path to CMake build folder to run CTest in") parsed_commands = argument_parser.parse_args() ctest_execute_directory = parsed_commands.build_directory print (f"CTest execute directory: {ctest_execute_directory}") os.chdir(ctest_execute_directory) print (f"Current working directory {os.getcwd()}") tmp_xml_file_path = os.path.join(tempfile.gettempdir(), "tmp.xml") launch_arguments = ["ctest", "--show-only=json-v1"] print (f"Launching CTest with arguments: {subprocess.list2cmdline(launch_arguments)}") ctest_json_output = subprocess.run(args=launch_arguments, capture_output=True, encoding="utf8", check=True) output_json = json.loads(ctest_json_output.stdout) test_names = [] test_executables = [] # NOTE: Needs elevated permissions. We need this for the XML dump below so I figured we might as well # also set AppVerifier here too then, simplifying the setup and running process app_verified_executables = [] app_verifier_tests = ["Exceptions", "Handles", "Heaps", "Leak", "Locks", "Memory", "SRWLock", "Threadpool", "TLS"] json_tests_list = output_json["tests"] for test_data in json_tests_list: test_names.append(test_data["name"]) tmp_path = os.path.basename(test_data["command"][0]) test_executables.append(tmp_path) if not (tmp_path in app_verified_executables): app_verified_executables.append(tmp_path) if (len(test_names) <= 0): sys.exit("ERROR: No tests found via CTest") # Register with AppVerifier add_app_verifier_settings(app_verified_executables, app_verifier_tests) # Run all the tests! for i in range(0, len(test_names)): try: print (f"Running test {test_names[i]} ({i}/{len(test_names)})") ctest_args = ["ctest", "-R", "^" + test_names[i] + "$"] print (f"With arguments: {subprocess.list2cmdline(ctest_args)}") subprocess.run(args=ctest_args) appverif_xml_dump_args = ["appverif", "-export", "log", "-for", test_executables[i], "-with", "to="+ tmp_xml_file_path] print (f'Calling AppVerifier with: {subprocess.list2cmdline(appverif_xml_dump_args)}') # NOTE: Needs elevated permissions subprocess.run(args=appverif_xml_dump_args) xml_result = appverifier_xml.parseXML(tmp_xml_file_path, True) if (xml_result != 0): print (f"ERROR: Test {test_names[i]} - failed!") remove_app_verifier_settings(app_verified_executables) sys.exit(xml_result) finally: # Delete the temporary XML file AppVerifier made on each run, ensuring we have a new one each time. # We cannot use tempfile directly and just pass the path to it, because # AppVerifier freaks out - so we just have to make files in a temporary directory # and delete them when we're finished os.remove(tmp_xml_file_path) # Delete AppVerifier settings remove_app_verifier_settings(app_verified_executables) print ("SUCCESS: Finished running all tests!") if __name__ == "__main__": main() aws-crt-python-0.20.4+dfsg/crt/aws-c-common/scripts/appverifier_xml.py000077500000000000000000000411551456575232400257660ustar00rootroot00000000000000#!/usr/bin/env python3 # # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. # Built-in import xml.etree.ElementTree as ElementTree import argparse s_AppVerifier_LogText = "{Application Verifier}logSession" s_AppVerifier_EntryText = "{Application Verifier}logEntry" s_AppVerifier_ErrorSeverities = ["Warning", "Error", "UNKNOWN"] # A dictionary to take the error codes and convert them to basic information # on what went wrong. # # How to adjust/learn more: # To add/remove from this list, run "appverif" in a Windows terminal with # administrator privileges and then press F1 to get the help page. Then search # for the error code you got (minus the "0x" part at the beginning) and use the # information there to add/adjust the entry in the dictionary below. s_AppVerifier_ErrorCodeHelp = { "Exceptions": { "0x650": "The application is trying to run code from an address that is non-executable or free" }, "Handles": { "0x300": "The function on the top of the stack passed an invalid handle to system routines", "0x301": "The function on the top of the stack passed an invalid TLS index to TLS system routines", "0x302": "The function on the top of the stack called WaitForMultipleObjects with NULL as the address " "of the array of handles to wait for or with zero as the number of handles", "0x303": "The function on the top of the stack passed a NULL handle to system routines", "0x304": "The current thread is currently running code inside the DllMain function of one " "of the DLLs loaded in the current process and it calls WaitForSingleObject or " "WaitForMultipleObjects to wait on a thread handle in the same process", "0x305": "The current thread is calling an API with a handle to an object with an incorrect object type" }, "Heaps": { "0x01": "Unknown error encountered that cannot be determined/classified by AppVerifier", "0x02": "The application touched non-accessible page. Typically is caused by a buffer overrun error", "0x03": "A heap created with HEAP_NO_SERIALIZE flag was accessed simultaneously from two threads", "0x04": "The size of the block in a 'HeapAlloc' or 'HeapReAlloc' operation was above any reasonable value", "0x05": "Heap structure did not include magic value from AppVerifier - meaning somehow the internal heap " "structure was corrupted or a bogus value was used as heap handle", "0x06": "Typically means block was allocated in one heap and freed in another", "0x07": "Block was freed twice", "0x08": "Generic error due to corruption in the heap block that AppVerifier cannot place more specifically", "0x09": "Tried to destroy the default process heap", "0x0A": "Access violation raised while executing heap manager code", "0x0B": "AppVerifier could not determine any particular type of corruption for the block. " "Generally means heap points to non-accessible memory area", "0x0C": "AppVerifier could not determine any particular type of corruption for the block. " "Generally happens if during heap free operation you pass an address that poins to a non-accessible memory area. " "Can also occur with double free situations", "0x0D": "Block of memory is written to after being freed", "0x0E": "Freed block marked as non-accessible had access attempt", "0x0F": "Magic pattern added by AppVerifier at end of heap block changed. " "Typically means buffer overrun errors", "0x10": "Buffer underruns", "0x11": "Buffer underruns", "0x12": "Buffer underruns", "0x13": "Non-accessible page at end of heap allocation was touched. Typically caused by a buffer overrun error", "0x14": "Page heap manager detected internal inconsistencies while calling GetProcessHeaps" }, "Leak": { "0x900": "Owner DLL of the allocation was dynamically unloaded while owning resources", "0x901": "Owner DLL of the handle was dynamically unloaded while owning resources", "0x902": "Owner DLL of the registry key was dynamically unloaded while owning resources", "0x903": "Owner DLL of the virtual reservation was dynamically unloaded while owning resources", "0x904": "Owner DLL of the SysString was dynamically unloaded while owning resources", "0x905": "DLL registered for power notification was dynamically unloaded without registering", "0x906": "Owner DLL of the COM allocation was dynamically unloaded while owning resources" }, "Locks": { "0x200": "A thread is terminated, suspended, or in a state in which it cannot hold a critical section", "0x201": "A DLL has a global variable containing a critical section and the DLL is unloaded but the " "critical section has not been deleted", "0x202": "A heap allocation contains a critical section, the allocation is freed, and the critical section " "has not been deleted", "0x203": "Typicaly means a critical section has been initialized more than once. May mean the critical section " "or its debug information structure has been corrupted", "0x204": "Memory containing a critical section was freed but the critical section has not been deleted using 'DeleteCriticalSection'", "0x205": "The DebugInfo field of the critical section is pointing to freed memory", "0x206": "The owner thread ID is invalid in the current context", "0x207": "The recursion count field of the critical section structure is invalid in the current context", "0x208": "A critical section is owned by a thread if it is deleted or if the critical section is uninitialized", "0x209": "A critical section is released more times than the current thread acquired it", "0x210": "A critical section is used without being initialized or after it has been deleted", "0x211": "A critical section is reinitialized by the current thread", "0x212": "The current thread is calling VirtualFree on a memory block that contains an active critical section", "0x213": "The current thread is calling UnmapViewOfFile on a memory block that contains an active critical section", "0x214": "The current thread calling LeaveCriticalSection but does not own any critical section", "0x215": "The current thread tries to use a private lock that lives inside another DLL" }, "Memory": { "0x600": "AppVerifier detects a VirtualFree or a DLL unload with an invalid start adress or size of the memory allocation", "0x601": "AppVerifier detects a VirtualAlloc call with an invalid start adress or size of the memory allocation", "0x602": "AppVerifier detects a MapViewOfFile call with an invalid base address or size of the mapping", "0x603": "AppVerifier detects an IsBadXXXPtr call with an invalid address for the memory buffer to be probed", "0x604": "AppVerifier detects an IsBadXXXPtr call for a memory allocation that is free", "0x605": "AppVerifier detects an IsBadXXXPtr call for a memory allocation that contains at least one GUARD_PAGE", "0x606": "AppVerifier detects an IsBadXXXPtr call with a NULL address", "0x607": "AppVerifier detects an IsBadXXXPtr call with an invalid start address or invalid size for the memory buffer to be probed", "0x608": "AppVerifier detects a DLL unload with an invalid start address for the size of the DLL memory range", "0x609": "AppVerifier detects a VirtualFree for a block of memory that is actually part of the current thread's stack", "0x60A": "AppVerifier detects a VirtualFree with an incorrect value for the FreeType parameter", "0x60B": "AppVerifier detects a VirtualFree for an address that is already free", "0x60C": "AppVerifier detects a VirtualFree with a non-zero value for the dwSize parameter", "0x60D": "A DLL's entry point function is raising an exception", "0x60E": "A thread function is raising an exception", "0x60F": "An exception occured during an IsBadXXXPtr call", "0x610": "AppVerifier detects a VirtualFree call with a NULL first parameter", "0x612": "AppVerifier detects a HeapFree for a block of memory that is actually part of the current thread's stack", "0x613": "AppVerifier detects an UnmapViewOfFile for a block of memory that is actually part of the current thread's stack", "0x614": "The application is trying to use NULL or some other incorrect address as the address of a valid object", "0x615": "The application is trying to use NULL or some other incorrect address as the address of a valid object", "0x616": "The application is trying to run code from an address that is non-executable or free", "0x617": "An exception occurred while initializing a buffer specified as output parameter for a Win32 or (non-AWS) CRT API", "0x618": "An exception occurred while calling HeapSize for a heap block that is being freed", "0x619": "The program is calling VirtualFree with an IpAddress parameter that is not the base address returned by " "the VirtualAlloc or VirtualAllocEx function when the region of pages was reserved", "0x61A": "The program is calling UnmapViewOfFile with an IpBaseAddress parameter that is not identical to the value returned" "by a previous call to the MapViewOfFile or MapViewOfFileEx function", "0x61B": "A callback function in the threadpool thread is rasing an exception", "0x61C": "The application is trying to run code from an address that is non-executable or free", "0x61D": "The application is created an executable heap", "0x61E": "The application is allocating executable memory" }, "SRWLock": { "0x250": "A thread tried to use SRW lock that is not initalized", "0x251": "The SRW lock is being re-initialized", "0x252": "The SRW lock is being released with a wrong release API", "0x253": "The SRW lock is being acquired recursively by the same thread", "0x254": "The thread that owns the SRW lock is exiting or being terminated", "0x255": "The SRW lock is being released by the thread that did not acquire the lock", "0x256": "The memory address being freed contains an active SRW lock that is still in use", "0x257": "The DLL being unloaded contains an active SRW lock that is still in use" }, "Threadpool": { "0x700": "Thread priority is changed when thread is returned to threadpool", "0x701": "Thread affinity is changed when thread is returned to threadpool", "0x702": "One or more messages left as unprocessed when threadpool thread is returned to the threadpool", "0x703": "Any window is kept alive when threadpool thread is returned to the threadpool", "0x704": "ExitThread is called on a threadpool thread", "0x705": "Callback function changed the thread token to impersonate another user and forgot to reset it before " "returning it to the threadpool", "0x706": "Windows API that requires dedicated or persistent thread called from threadpool", "0x707": "Callback function forgot to close or reset the current transaction handle", "0x708": "Callback function called CoInit and CoUnInit in differing amounts (unbalanced)", "0x709": "The period to signal the timer is not zero when the timer is set to signal only once with the WT_EXECUTEONLYONCE flag", "0x70A": "The loader lock is held within the callback and is not released when the thread is returned to the threadpool", "0x70B": "The preferred language is set within the callback and is not cleared when the thread is returned to the threadpool", "0x70C": "The background priority is set within the callback and is not disabled when the thread is returned to the threadpool", "0x70D": "TerminateThread called on a threadpool thread", }, "TLS": { "0x350": "A DLL that allocated a TLS index is being unloaded before freeing that TLS index", "0x351": "The internal verifier structures used to store the state of TLS slots for thread are corrupted", "0x352": "An invalid TLS index is used" } } def parseXML(filepath, dump_xml_on_error): xml_is_app_verifier = False app_verifier_entries = [] print("Looking for AppVerifier XML file...") xml_tree = ElementTree.parse(filepath) # Go through every element in the XML tree for elem in xml_tree.iter(): if (elem.tag == s_AppVerifier_LogText): xml_is_app_verifier = True elif (elem.tag == s_AppVerifier_EntryText): app_verifier_entries.append(elem) # If the XML does not have any AppVerifier data, then something went wrong! if (xml_is_app_verifier == False): print("ERROR: XML File from AppVerifier does not include a AppVerifier session!") return -1 # If we have AppVerifier entries, then a test or tests failed, so process the data, # print it, and then return with an error to stop the GitHub action from passing if (len(app_verifier_entries) > 0): print("WARNING: AppVerifier entries found:") severity_error_found = False for entry in app_verifier_entries: element_time = entry.attrib.get("Time", "UNKNOWN") element_layer_name = entry.attrib.get("LayerName", "UNKNOWN") element_code = entry.attrib.get("StopCode", "UNKNOWN") element_severity = entry.attrib.get("Severity", "UNKNOWN") print_red = False if (element_severity in s_AppVerifier_ErrorSeverities): severity_error_found = True print_red = True if (print_red): print( f"ERROR: [{element_time}] {element_severity.upper()} - Test: {element_layer_name} - Stop Code: {element_code}") else: print( f"[{element_time}] {element_severity.upper()} - Test: {element_layer_name} - Stop Code: {element_code}") print(f"\t{getErrorCodeMeaning(element_layer_name, element_code)}") print( "\nNOTE: The error codes and information provided are just guesses based on the error code.\n" "\tRun AppVerifier locally and use WinDBG combined with the AppVerifier help to discover more " "about the error from its error code and how to debug it.") if (severity_error_found == True and dump_xml_on_error != None): if (dump_xml_on_error == True): print("\nERROR: Raw XML output for errors found:\n") for entry in app_verifier_entries: print(ElementTree.tostring( entry, encoding="unicode")) if (severity_error_found == True): print( "\nERROR: Failed due to AppVerifier finding entries marked as severe") return -1 else: print("SUCCESS: AppVerifier entries were not marked as severe") return 0 else: print("SUCCESS: No AppVerifier entries found! AppVerifier ran successfully and did not generate any entries") return 0 def getErrorCodeMeaning(element_layer_name, element_code): if (element_layer_name in s_AppVerifier_ErrorCodeHelp): layer_codes = s_AppVerifier_ErrorCodeHelp[element_layer_name] if (element_code in layer_codes): return layer_codes[element_code] else: return "Util-script unknown error: " + element_code + " for layer " + element_layer_name return "Util-script unknown layer: " + element_layer_name + " and error code: " + element_code def booleanString(string): string = string.lower() if string not in {"false", "true"}: raise ValueError("Boolean is not true or false!") return string == "true" def main(): argument_parser = argparse.ArgumentParser( description="AppVerifier XML output util") argument_parser.add_argument("--xml_file", metavar="", required=False, help="Path to XML file from AppVerifier") argument_parser.add_argument("--dump_xml_on_error", metavar="", default=True, required=False, type=booleanString, help="If true, the XML for found issues will be printed to the console") parsed_commands = argument_parser.parse_args() print("\nStarting AppVerifier XML check...", flush=True) print(parsed_commands.dump_xml_on_error) xml_result = parseXML(parsed_commands.xml_file, parsed_commands.dump_xml_on_error) print("\n") exit(xml_result) if __name__ == "__main__": main() aws-crt-python-0.20.4+dfsg/crt/aws-c-common/scripts/latest_submodules.py000077500000000000000000000114511456575232400263240ustar00rootroot00000000000000#!/usr/bin/env python3 import argparse import os import os.path import re import subprocess import sys def run(*args, check=True): return subprocess.run(args, capture_output=True, check=check, universal_newlines=True) def get_submodules(): """ Return list of submodules for current repo, sorted by name. Each item looks like: { 'name': 'aws-c-common', 'path': 'crt/aws-c-common', 'url': 'https://github.com/awslabs/aws-c-common.git', } """ if not os.path.exists('.gitmodules'): sys.exit(f'No .gitmodules found in {os.getcwd()}') submodules = [] start_pattern = re.compile(r'\[submodule') path_pattern = re.compile(r'\s+path = (\S+)') url_pattern = re.compile(r'\s+url = (\S+)') current = None with open('.gitmodules', 'r') as f: for line in f.readlines(): m = start_pattern.match(line) if m: current = {} submodules.append(current) continue m = path_pattern.match(line) if m: current['path'] = m.group(1) current['name'] = os.path.basename(current['path']) continue m = url_pattern.match(line) if m: current['url'] = m.group(1) continue return sorted(submodules, key=lambda x: x['name']) def get_release_tags(): """ Return list of release tags for current repo, sorted high to low. Each item looks like: { 'commit': 'e18f041a0c8d17189f2eae2a32f16e0a7a3f0f1c', 'version': 'v0.5.18' 'num_tuple': (0,5,18), } """ git_output = run('git', 'ls-remote', '--tags').stdout tags = [] for line in git_output.splitlines(): # line looks like: "e18f041a0c8d17189f2eae2a32f16e0a7a3f0f1c refs/tags/v0.5.18" match = re.match( r'([a-f0-9]+)\s+refs/tags/(v([0-9]+)\.([0-9]+)\.([0-9]+))$', line) if not match: # skip malformed release tags continue tags.append({ 'commit': match.group(1), 'version': match.group(2), 'num_tuple': (int(match.group(3)), int(match.group(4)), int(match.group(5))), }) # sort highest version first return sorted(tags, reverse=True, key=lambda tag: tag['num_tuple']) def get_current_commit(): git_output = run('git', 'rev-parse', 'HEAD').stdout return git_output.splitlines()[0] def is_ancestor(ancestor, descendant): """Return whether first commit is an ancestor to the second'""" result = run('git', 'merge-base', '--is-ancestor', ancestor, descendant, check=False) return result.returncode == 0 def get_tag_for_commit(tags, commit): for tag in tags: if tag['commit'] == commit: return tag return None def main(): parser = argparse.ArgumentParser( description="Update submodules to latest tags") parser.add_argument('ignore', nargs='*', help="submodules to ignore") parser.add_argument('--dry-run', action='store_true', help="print without actually updating") args = parser.parse_args() root_path = os.getcwd() submodules = get_submodules() name_pad = max([len(x['name']) for x in submodules]) for submodule in submodules: name = submodule['name'] os.chdir(os.path.join(root_path, submodule['path'])) tags = get_release_tags() current_commit = get_current_commit() current_tag = get_tag_for_commit(tags, current_commit) sync_from = current_tag['version'] if current_tag else current_commit if name in args.ignore: print(f"{name:<{name_pad}} {sync_from} (ignored)") continue latest_tag = tags[0] sync_to = latest_tag['version'] # The only time we don't want to sync to the latest release is: # The submodule is at some commit beyond the latest release, # and the CRT team doesn't control this repo so can't just cut a new release if sync_from != sync_to and current_tag is None: if name in ['aws-lc', 's2n', 's2n-tls']: # must fetch tags before we can check their ancestry run('git', 'fetch', '--tags', '--prune', '--prune-tags', '--force') if not is_ancestor(ancestor=current_commit, descendant=sync_to): sync_to = sync_from if sync_from == sync_to: print(f"{name:<{name_pad}} {sync_from} ✓") else: print(f"{name:<{name_pad}} {sync_from} -> {sync_to}") if not args.dry_run: run('git', 'fetch', '--tags', '--prune', '--prune-tags', '--force') run('git', 'checkout', sync_to) run('git', 'submodule', 'update') if __name__ == '__main__': main() aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/000077500000000000000000000000001456575232400220205ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/allocator.c000066400000000000000000000304411456575232400241460ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #ifdef _WIN32 # include #endif #ifdef __MACH__ # include #endif /* turn off unused named parameter warning on msvc.*/ #ifdef _MSC_VER # pragma warning(push) # pragma warning(disable : 4100) #endif #ifndef PAGE_SIZE # define PAGE_SIZE (4 * 1024) #endif bool aws_allocator_is_valid(const struct aws_allocator *alloc) { /* An allocator must define mem_acquire and mem_release. All other fields are optional */ return alloc && AWS_OBJECT_PTR_IS_READABLE(alloc) && alloc->mem_acquire && alloc->mem_release; } static void *s_aligned_malloc(struct aws_allocator *allocator, size_t size) { (void)allocator; /* larger allocations should be aligned so that AVX and friends can avoid * the extra preamble during unaligned versions of memcpy/memset on big buffers * This will also accelerate hardware CRC and SHA on ARM chips * * 64 byte alignment for > page allocations on 64 bit systems * 32 byte alignment for > page allocations on 32 bit systems * 16 byte alignment for <= page allocations on 64 bit systems * 8 byte alignment for <= page allocations on 32 bit systems * * We use PAGE_SIZE as the boundary because we are not aware of any allocations of * this size or greater that are not data buffers */ const size_t alignment = sizeof(void *) * (size > (size_t)PAGE_SIZE ? 8 : 2); #if !defined(_WIN32) void *result = NULL; int err = posix_memalign(&result, alignment, size); (void)err; AWS_PANIC_OOM(result, "posix_memalign failed to allocate memory"); return result; #else void *mem = _aligned_malloc(size, alignment); AWS_FATAL_POSTCONDITION(mem && "_aligned_malloc failed to allocate memory"); return mem; #endif } static void s_aligned_free(struct aws_allocator *allocator, void *ptr) { (void)allocator; #if !defined(_WIN32) free(ptr); #else _aligned_free(ptr); #endif } static void *s_aligned_realloc(struct aws_allocator *allocator, void *ptr, size_t oldsize, size_t newsize) { (void)allocator; (void)oldsize; AWS_FATAL_PRECONDITION(newsize); #if !defined(_WIN32) if (newsize <= oldsize) { return ptr; } /* newsize is > oldsize, need more memory */ void *new_mem = s_aligned_malloc(allocator, newsize); AWS_PANIC_OOM(new_mem, "Unhandled OOM encountered in s_aligned_malloc"); if (ptr) { memcpy(new_mem, ptr, oldsize); s_aligned_free(allocator, ptr); } return new_mem; #else const size_t alignment = sizeof(void *) * (newsize > (size_t)PAGE_SIZE ? 8 : 2); void *new_mem = _aligned_realloc(ptr, newsize, alignment); AWS_PANIC_OOM(new_mem, "Unhandled OOM encountered in _aligned_realloc"); return new_mem; #endif } static void *s_aligned_calloc(struct aws_allocator *allocator, size_t num, size_t size) { void *mem = s_aligned_malloc(allocator, num * size); AWS_PANIC_OOM(mem, "Unhandled OOM encountered in s_aligned_calloc"); memset(mem, 0, num * size); return mem; } static void *s_non_aligned_malloc(struct aws_allocator *allocator, size_t size) { (void)allocator; void *result = malloc(size); AWS_PANIC_OOM(result, "malloc failed to allocate memory"); return result; } static void s_non_aligned_free(struct aws_allocator *allocator, void *ptr) { (void)allocator; free(ptr); } static void *s_non_aligned_realloc(struct aws_allocator *allocator, void *ptr, size_t oldsize, size_t newsize) { (void)allocator; (void)oldsize; AWS_FATAL_PRECONDITION(newsize); if (newsize <= oldsize) { return ptr; } /* newsize is > oldsize, need more memory */ void *new_mem = s_non_aligned_malloc(allocator, newsize); AWS_PANIC_OOM(new_mem, "Unhandled OOM encountered in s_non_aligned_realloc"); if (ptr) { memcpy(new_mem, ptr, oldsize); s_non_aligned_free(allocator, ptr); } return new_mem; } static void *s_non_aligned_calloc(struct aws_allocator *allocator, size_t num, size_t size) { (void)allocator; void *mem = calloc(num, size); AWS_PANIC_OOM(mem, "Unhandled OOM encountered in s_non_aligned_calloc"); return mem; } static struct aws_allocator default_allocator = { .mem_acquire = s_non_aligned_malloc, .mem_release = s_non_aligned_free, .mem_realloc = s_non_aligned_realloc, .mem_calloc = s_non_aligned_calloc, }; struct aws_allocator *aws_default_allocator(void) { return &default_allocator; } static struct aws_allocator aligned_allocator = { .mem_acquire = s_aligned_malloc, .mem_release = s_aligned_free, .mem_realloc = s_aligned_realloc, .mem_calloc = s_aligned_calloc, }; struct aws_allocator *aws_aligned_allocator(void) { return &aligned_allocator; } void *aws_mem_acquire(struct aws_allocator *allocator, size_t size) { AWS_FATAL_PRECONDITION(allocator != NULL); AWS_FATAL_PRECONDITION(allocator->mem_acquire != NULL); /* Protect against https://wiki.sei.cmu.edu/confluence/display/c/MEM04-C.+Beware+of+zero-length+allocations */ AWS_FATAL_PRECONDITION(size != 0); void *mem = allocator->mem_acquire(allocator, size); AWS_PANIC_OOM(mem, "Unhandled OOM encountered in aws_mem_acquire with allocator"); return mem; } void *aws_mem_calloc(struct aws_allocator *allocator, size_t num, size_t size) { AWS_FATAL_PRECONDITION(allocator != NULL); AWS_FATAL_PRECONDITION(allocator->mem_calloc || allocator->mem_acquire); /* Protect against https://wiki.sei.cmu.edu/confluence/display/c/MEM04-C.+Beware+of+zero-length+allocations */ AWS_FATAL_PRECONDITION(num != 0 && size != 0); /* Defensive check: never use calloc with size * num that would overflow * https://wiki.sei.cmu.edu/confluence/display/c/MEM07-C.+Ensure+that+the+arguments+to+calloc%28%29%2C+when+multiplied%2C+do+not+wrap */ size_t required_bytes = 0; AWS_FATAL_POSTCONDITION(!aws_mul_size_checked(num, size, &required_bytes), "calloc computed size > SIZE_MAX"); /* If there is a defined calloc, use it */ if (allocator->mem_calloc) { void *mem = allocator->mem_calloc(allocator, num, size); AWS_PANIC_OOM(mem, "Unhandled OOM encountered in aws_mem_acquire with allocator"); return mem; } /* Otherwise, emulate calloc */ void *mem = allocator->mem_acquire(allocator, required_bytes); AWS_PANIC_OOM(mem, "Unhandled OOM encountered in aws_mem_acquire with allocator"); memset(mem, 0, required_bytes); return mem; } #define AWS_ALIGN_ROUND_UP(value, alignment) (((value) + ((alignment)-1)) & ~((alignment)-1)) void *aws_mem_acquire_many(struct aws_allocator *allocator, size_t count, ...) { enum { S_ALIGNMENT = sizeof(intmax_t) }; va_list args_size; va_start(args_size, count); va_list args_allocs; va_copy(args_allocs, args_size); size_t total_size = 0; for (size_t i = 0; i < count; ++i) { /* Ignore the pointer argument for now */ va_arg(args_size, void **); size_t alloc_size = va_arg(args_size, size_t); total_size += AWS_ALIGN_ROUND_UP(alloc_size, S_ALIGNMENT); } va_end(args_size); void *allocation = NULL; if (total_size > 0) { allocation = aws_mem_acquire(allocator, total_size); AWS_PANIC_OOM(allocation, "Unhandled OOM encountered in aws_mem_acquire with allocator"); uint8_t *current_ptr = allocation; for (size_t i = 0; i < count; ++i) { void **out_ptr = va_arg(args_allocs, void **); size_t alloc_size = va_arg(args_allocs, size_t); alloc_size = AWS_ALIGN_ROUND_UP(alloc_size, S_ALIGNMENT); *out_ptr = current_ptr; current_ptr += alloc_size; } } va_end(args_allocs); return allocation; } #undef AWS_ALIGN_ROUND_UP void aws_mem_release(struct aws_allocator *allocator, void *ptr) { AWS_FATAL_PRECONDITION(allocator != NULL); AWS_FATAL_PRECONDITION(allocator->mem_release != NULL); if (ptr != NULL) { allocator->mem_release(allocator, ptr); } } int aws_mem_realloc(struct aws_allocator *allocator, void **ptr, size_t oldsize, size_t newsize) { AWS_FATAL_PRECONDITION(allocator != NULL); AWS_FATAL_PRECONDITION(allocator->mem_realloc || allocator->mem_acquire); AWS_FATAL_PRECONDITION(allocator->mem_release); /* Protect against https://wiki.sei.cmu.edu/confluence/display/c/MEM04-C.+Beware+of+zero-length+allocations */ if (newsize == 0) { aws_mem_release(allocator, *ptr); *ptr = NULL; return AWS_OP_SUCCESS; } if (allocator->mem_realloc) { void *newptr = allocator->mem_realloc(allocator, *ptr, oldsize, newsize); AWS_PANIC_OOM(newptr, "Unhandled OOM encountered in aws_mem_acquire with allocator"); *ptr = newptr; return AWS_OP_SUCCESS; } /* Since the allocator doesn't support realloc, we'll need to emulate it (inefficiently). */ if (oldsize >= newsize) { return AWS_OP_SUCCESS; } void *newptr = allocator->mem_acquire(allocator, newsize); AWS_PANIC_OOM(newptr, "Unhandled OOM encountered in aws_mem_acquire with allocator"); memcpy(newptr, *ptr, oldsize); memset((uint8_t *)newptr + oldsize, 0, newsize - oldsize); aws_mem_release(allocator, *ptr); *ptr = newptr; return AWS_OP_SUCCESS; } /* Wraps a CFAllocator around aws_allocator. For Mac only. */ #ifdef __MACH__ static CFStringRef s_cf_allocator_description = CFSTR("CFAllocator wrapping aws_allocator."); /* note we don't have a standard specification stating sizeof(size_t) == sizeof(void *) so we have some extra casts */ static void *s_cf_allocator_allocate(CFIndex alloc_size, CFOptionFlags hint, void *info) { (void)hint; struct aws_allocator *allocator = info; void *mem = aws_mem_acquire(allocator, (size_t)alloc_size + sizeof(size_t)); size_t allocation_size = (size_t)alloc_size + sizeof(size_t); memcpy(mem, &allocation_size, sizeof(size_t)); return (void *)((uint8_t *)mem + sizeof(size_t)); } static void s_cf_allocator_deallocate(void *ptr, void *info) { struct aws_allocator *allocator = info; void *original_allocation = (uint8_t *)ptr - sizeof(size_t); aws_mem_release(allocator, original_allocation); } static void *s_cf_allocator_reallocate(void *ptr, CFIndex new_size, CFOptionFlags hint, void *info) { (void)hint; struct aws_allocator *allocator = info; AWS_ASSERT(allocator->mem_realloc); void *original_allocation = (uint8_t *)ptr - sizeof(size_t); size_t original_size = 0; memcpy(&original_size, original_allocation, sizeof(size_t)); aws_mem_realloc(allocator, &original_allocation, original_size, (size_t)new_size); AWS_FATAL_ASSERT(original_allocation); size_t new_allocation_size = (size_t)new_size; memcpy(original_allocation, &new_allocation_size, sizeof(size_t)); return (void *)((uint8_t *)original_allocation + sizeof(size_t)); } static CFStringRef s_cf_allocator_copy_description(const void *info) { (void)info; return s_cf_allocator_description; } static CFIndex s_cf_allocator_preferred_size(CFIndex size, CFOptionFlags hint, void *info) { (void)hint; (void)info; return (CFIndex)(size + sizeof(size_t)); } CFAllocatorRef aws_wrapped_cf_allocator_new(struct aws_allocator *allocator) { CFAllocatorRef cf_allocator = NULL; CFAllocatorReallocateCallBack reallocate_callback = NULL; if (allocator->mem_realloc) { reallocate_callback = s_cf_allocator_reallocate; } CFAllocatorContext context = { .allocate = s_cf_allocator_allocate, .copyDescription = s_cf_allocator_copy_description, .deallocate = s_cf_allocator_deallocate, .reallocate = reallocate_callback, .info = allocator, .preferredSize = s_cf_allocator_preferred_size, .release = NULL, .retain = NULL, .version = 0, }; cf_allocator = CFAllocatorCreate(NULL, &context); AWS_FATAL_ASSERT(cf_allocator && "creation of cf allocator failed!"); return cf_allocator; } void aws_wrapped_cf_allocator_destroy(CFAllocatorRef allocator) { CFRelease(allocator); } #endif /*__MACH__ */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/allocator_sba.c000066400000000000000000000420271456575232400247760ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include /* * Small Block Allocator * This is a fairly standard approach, the idea is to always allocate aligned pages of memory so that for * any address you can round to the nearest page boundary to find the bookkeeping data. The idea is to reduce * overhead per alloc and greatly improve runtime speed by doing as little actual allocation work as possible, * preferring instead to re-use (hopefully still cached) chunks in FIFO order, or chunking up a page if there's * no free chunks. When all chunks in a page are freed, the page is returned to the OS. * * The allocator itself is simply an array of bins, each representing a power of 2 size from 32 - N (512 tends to be * a good upper bound). Thread safety is guaranteed by a mutex per bin, and locks are only necessary around the * lowest level alloc and free operations. * * Note: this allocator gets its internal memory for data structures from the parent allocator, but does not * use the parent to allocate pages. Pages are allocated directly from the OS-specific aligned malloc implementation, * which allows the OS to do address re-mapping for us instead of over-allocating to fulfill alignment. */ #ifdef _WIN32 # include #elif __linux__ || __APPLE__ # include #endif #if !defined(AWS_SBA_PAGE_SIZE) # if defined(PAGE_SIZE) # define AWS_SBA_PAGE_SIZE ((uintptr_t)(PAGE_SIZE)) # else # define AWS_SBA_PAGE_SIZE ((uintptr_t)(4096)) # endif #endif #define AWS_SBA_PAGE_MASK ((uintptr_t) ~(AWS_SBA_PAGE_SIZE - 1)) #define AWS_SBA_TAG_VALUE 0x736f6d6570736575ULL /* list of sizes of bins, must be powers of 2, and less than AWS_SBA_PAGE_SIZE * 0.5 */ enum { AWS_SBA_BIN_COUNT = 5 }; static const size_t s_bin_sizes[AWS_SBA_BIN_COUNT] = {32, 64, 128, 256, 512}; static const size_t s_max_bin_size = 512; struct sba_bin { size_t size; /* size of allocs in this bin */ struct aws_mutex mutex; /* lock protecting this bin */ uint8_t *page_cursor; /* pointer to working page, currently being chunked from */ struct aws_array_list active_pages; /* all pages in use by this bin, could be optimized at scale by being a set */ struct aws_array_list free_chunks; /* free chunks available in this bin */ }; /* Header stored at the base of each page. * As long as this is under 32 bytes, all is well. * Above that, there's potentially more waste per page */ struct page_header { uint64_t tag; /* marker to identify/validate pages */ struct sba_bin *bin; /* bin this page belongs to */ uint32_t alloc_count; /* number of outstanding allocs from this page */ uint64_t tag2; }; /* This is the impl for the aws_allocator */ struct small_block_allocator { struct aws_allocator *allocator; /* parent allocator, for large allocs */ struct sba_bin bins[AWS_SBA_BIN_COUNT]; int (*lock)(struct aws_mutex *); int (*unlock)(struct aws_mutex *); }; static int s_null_lock(struct aws_mutex *mutex) { (void)mutex; /* NO OP */ return 0; } static int s_null_unlock(struct aws_mutex *mutex) { (void)mutex; /* NO OP */ return 0; } static int s_mutex_lock(struct aws_mutex *mutex) { return aws_mutex_lock(mutex); } static int s_mutex_unlock(struct aws_mutex *mutex) { return aws_mutex_unlock(mutex); } static void *s_page_base(const void *addr) { /* mask off the address to round it to page alignment */ uint8_t *page_base = (uint8_t *)(((uintptr_t)addr) & AWS_SBA_PAGE_MASK); return page_base; } static void *s_page_bind(void *addr, struct sba_bin *bin) { /* insert the header at the base of the page and advance past it */ struct page_header *page = (struct page_header *)addr; page->tag = page->tag2 = AWS_SBA_TAG_VALUE; page->bin = bin; page->alloc_count = 0; return (uint8_t *)addr + sizeof(struct page_header); } /* Wraps OS-specific aligned malloc implementation */ static void *s_aligned_alloc(size_t size, size_t align) { #ifdef _WIN32 return _aligned_malloc(size, align); #else void *mem = NULL; int return_code = posix_memalign(&mem, align, size); if (return_code) { aws_raise_error(AWS_ERROR_OOM); return NULL; } return mem; #endif } /* wraps OS-specific aligned free implementation */ static void s_aligned_free(void *addr) { #ifdef _WIN32 _aligned_free(addr); #else free(addr); #endif } /* aws_allocator vtable template */ static void *s_sba_mem_acquire(struct aws_allocator *allocator, size_t size); static void s_sba_mem_release(struct aws_allocator *allocator, void *ptr); static void *s_sba_mem_realloc(struct aws_allocator *allocator, void *old_ptr, size_t old_size, size_t new_size); static void *s_sba_mem_calloc(struct aws_allocator *allocator, size_t num, size_t size); static struct aws_allocator s_sba_allocator = { .mem_acquire = s_sba_mem_acquire, .mem_release = s_sba_mem_release, .mem_realloc = s_sba_mem_realloc, .mem_calloc = s_sba_mem_calloc, }; static int s_sba_init(struct small_block_allocator *sba, struct aws_allocator *allocator, bool multi_threaded) { sba->allocator = allocator; AWS_ZERO_ARRAY(sba->bins); sba->lock = multi_threaded ? s_mutex_lock : s_null_lock; sba->unlock = multi_threaded ? s_mutex_unlock : s_null_unlock; for (unsigned idx = 0; idx < AWS_SBA_BIN_COUNT; ++idx) { struct sba_bin *bin = &sba->bins[idx]; bin->size = s_bin_sizes[idx]; if (multi_threaded && aws_mutex_init(&bin->mutex)) { goto cleanup; } if (aws_array_list_init_dynamic(&bin->active_pages, sba->allocator, 16, sizeof(void *))) { goto cleanup; } /* start with enough chunks for 1 page */ if (aws_array_list_init_dynamic( &bin->free_chunks, sba->allocator, aws_max_size(AWS_SBA_PAGE_SIZE / bin->size, 16), sizeof(void *))) { goto cleanup; } } return AWS_OP_SUCCESS; cleanup: for (unsigned idx = 0; idx < AWS_SBA_BIN_COUNT; ++idx) { struct sba_bin *bin = &sba->bins[idx]; aws_mutex_clean_up(&bin->mutex); aws_array_list_clean_up(&bin->active_pages); aws_array_list_clean_up(&bin->free_chunks); } return AWS_OP_ERR; } static void s_sba_clean_up(struct small_block_allocator *sba) { /* free all known pages, then free the working page */ for (unsigned idx = 0; idx < AWS_SBA_BIN_COUNT; ++idx) { struct sba_bin *bin = &sba->bins[idx]; for (size_t page_idx = 0; page_idx < bin->active_pages.length; ++page_idx) { void *page_addr = NULL; aws_array_list_get_at(&bin->active_pages, &page_addr, page_idx); struct page_header *page = page_addr; AWS_ASSERT(page->alloc_count == 0 && "Memory still allocated in aws_sba_allocator (bin)"); s_aligned_free(page); } if (bin->page_cursor) { void *page_addr = s_page_base(bin->page_cursor); struct page_header *page = page_addr; AWS_ASSERT(page->alloc_count == 0 && "Memory still allocated in aws_sba_allocator (page)"); s_aligned_free(page); } aws_array_list_clean_up(&bin->active_pages); aws_array_list_clean_up(&bin->free_chunks); aws_mutex_clean_up(&bin->mutex); } } struct aws_allocator *aws_small_block_allocator_new(struct aws_allocator *allocator, bool multi_threaded) { struct small_block_allocator *sba = NULL; struct aws_allocator *sba_allocator = NULL; aws_mem_acquire_many( allocator, 2, &sba, sizeof(struct small_block_allocator), &sba_allocator, sizeof(struct aws_allocator)); if (!sba || !sba_allocator) { return NULL; } AWS_ZERO_STRUCT(*sba); AWS_ZERO_STRUCT(*sba_allocator); /* copy the template vtable */ *sba_allocator = s_sba_allocator; sba_allocator->impl = sba; if (s_sba_init(sba, allocator, multi_threaded)) { s_sba_clean_up(sba); aws_mem_release(allocator, sba); return NULL; } return sba_allocator; } void aws_small_block_allocator_destroy(struct aws_allocator *sba_allocator) { if (!sba_allocator) { return; } struct small_block_allocator *sba = sba_allocator->impl; if (!sba) { return; } struct aws_allocator *allocator = sba->allocator; s_sba_clean_up(sba); aws_mem_release(allocator, sba); } size_t aws_small_block_allocator_bytes_active(struct aws_allocator *sba_allocator) { AWS_FATAL_ASSERT(sba_allocator && "aws_small_block_allocator_bytes_used requires a non-null allocator"); struct small_block_allocator *sba = sba_allocator->impl; AWS_FATAL_ASSERT(sba && "aws_small_block_allocator_bytes_used: supplied allocator has invalid SBA impl"); size_t used = 0; for (unsigned idx = 0; idx < AWS_SBA_BIN_COUNT; ++idx) { struct sba_bin *bin = &sba->bins[idx]; sba->lock(&bin->mutex); for (size_t page_idx = 0; page_idx < bin->active_pages.length; ++page_idx) { void *page_addr = NULL; aws_array_list_get_at(&bin->active_pages, &page_addr, page_idx); struct page_header *page = page_addr; used += page->alloc_count * bin->size; } if (bin->page_cursor) { void *page_addr = s_page_base(bin->page_cursor); struct page_header *page = page_addr; used += page->alloc_count * bin->size; } sba->unlock(&bin->mutex); } return used; } size_t aws_small_block_allocator_bytes_reserved(struct aws_allocator *sba_allocator) { AWS_FATAL_ASSERT(sba_allocator && "aws_small_block_allocator_bytes_used requires a non-null allocator"); struct small_block_allocator *sba = sba_allocator->impl; AWS_FATAL_ASSERT(sba && "aws_small_block_allocator_bytes_used: supplied allocator has invalid SBA impl"); size_t used = 0; for (unsigned idx = 0; idx < AWS_SBA_BIN_COUNT; ++idx) { struct sba_bin *bin = &sba->bins[idx]; sba->lock(&bin->mutex); used += (bin->active_pages.length + (bin->page_cursor != NULL)) * AWS_SBA_PAGE_SIZE; sba->unlock(&bin->mutex); } return used; } size_t aws_small_block_allocator_page_size(struct aws_allocator *sba_allocator) { (void)sba_allocator; return AWS_SBA_PAGE_SIZE; } size_t aws_small_block_allocator_page_size_available(struct aws_allocator *sba_allocator) { (void)sba_allocator; return AWS_SBA_PAGE_SIZE - sizeof(struct page_header); } /* NOTE: Expects the mutex to be held by the caller */ static void *s_sba_alloc_from_bin(struct sba_bin *bin) { /* check the free list, hand chunks out in FIFO order */ if (bin->free_chunks.length > 0) { void *chunk = NULL; if (aws_array_list_back(&bin->free_chunks, &chunk)) { return NULL; } if (aws_array_list_pop_back(&bin->free_chunks)) { return NULL; } AWS_ASSERT(chunk); struct page_header *page = s_page_base(chunk); page->alloc_count++; return chunk; } /* If there is a working page to chunk from, use it */ if (bin->page_cursor) { struct page_header *page = s_page_base(bin->page_cursor); AWS_ASSERT(page); size_t space_left = AWS_SBA_PAGE_SIZE - (bin->page_cursor - (uint8_t *)page); if (space_left >= bin->size) { void *chunk = bin->page_cursor; page->alloc_count++; bin->page_cursor += bin->size; space_left -= bin->size; if (space_left < bin->size) { aws_array_list_push_back(&bin->active_pages, &page); bin->page_cursor = NULL; } return chunk; } } /* Nothing free to use, allocate a page and restart */ uint8_t *new_page = s_aligned_alloc(AWS_SBA_PAGE_SIZE, AWS_SBA_PAGE_SIZE); new_page = s_page_bind(new_page, bin); bin->page_cursor = new_page; return s_sba_alloc_from_bin(bin); } /* NOTE: Expects the mutex to be held by the caller */ static void s_sba_free_to_bin(struct sba_bin *bin, void *addr) { AWS_PRECONDITION(addr); struct page_header *page = s_page_base(addr); AWS_ASSERT(page->bin == bin); page->alloc_count--; if (page->alloc_count == 0 && page != s_page_base(bin->page_cursor)) { /* empty page, free it */ uint8_t *page_start = (uint8_t *)page + sizeof(struct page_header); uint8_t *page_end = page_start + AWS_SBA_PAGE_SIZE; /* Remove all chunks in the page from the free list */ intptr_t chunk_idx = (intptr_t)bin->free_chunks.length; for (; chunk_idx >= 0; --chunk_idx) { uint8_t *chunk = NULL; aws_array_list_get_at(&bin->free_chunks, &chunk, chunk_idx); if (chunk >= page_start && chunk < page_end) { aws_array_list_swap(&bin->free_chunks, chunk_idx, bin->free_chunks.length - 1); aws_array_list_pop_back(&bin->free_chunks); } } /* Find page in pages list and remove it */ for (size_t page_idx = 0; page_idx < bin->active_pages.length; ++page_idx) { void *page_addr = NULL; aws_array_list_get_at(&bin->active_pages, &page_addr, page_idx); if (page_addr == page) { aws_array_list_swap(&bin->active_pages, page_idx, bin->active_pages.length - 1); aws_array_list_pop_back(&bin->active_pages); break; } } /* ensure that the page tag is erased, in case nearby memory is re-used */ page->tag = page->tag2 = 0; s_aligned_free(page); return; } aws_array_list_push_back(&bin->free_chunks, &addr); } /* No lock required for this function, it's all read-only access to constant data */ static struct sba_bin *s_sba_find_bin(struct small_block_allocator *sba, size_t size) { AWS_PRECONDITION(size <= s_max_bin_size); /* map bits 5(32) to 9(512) to indices 0-4 */ size_t next_pow2 = 0; aws_round_up_to_power_of_two(size, &next_pow2); size_t lz = aws_clz_i32((int32_t)next_pow2); size_t idx = aws_sub_size_saturating(31 - lz, 5); AWS_ASSERT(idx <= 4); struct sba_bin *bin = &sba->bins[idx]; AWS_ASSERT(bin->size >= size); return bin; } static void *s_sba_alloc(struct small_block_allocator *sba, size_t size) { if (size <= s_max_bin_size) { struct sba_bin *bin = s_sba_find_bin(sba, size); AWS_FATAL_ASSERT(bin); /* BEGIN CRITICAL SECTION */ sba->lock(&bin->mutex); void *mem = s_sba_alloc_from_bin(bin); sba->unlock(&bin->mutex); /* END CRITICAL SECTION */ return mem; } return aws_mem_acquire(sba->allocator, size); } AWS_SUPPRESS_ASAN AWS_SUPPRESS_TSAN static void s_sba_free(struct small_block_allocator *sba, void *addr) { if (!addr) { return; } struct page_header *page = (struct page_header *)s_page_base(addr); /* Check to see if this page is tagged by the sba */ /* this check causes a read of (possibly) memory we didn't allocate, but it will always be * heap memory, so should not cause any issues. TSan will see this as a data race, but it * is not, that's a false positive */ if (page->tag == AWS_SBA_TAG_VALUE && page->tag2 == AWS_SBA_TAG_VALUE) { struct sba_bin *bin = page->bin; /* BEGIN CRITICAL SECTION */ sba->lock(&bin->mutex); s_sba_free_to_bin(bin, addr); sba->unlock(&bin->mutex); /* END CRITICAL SECTION */ return; } /* large alloc, give back to underlying allocator */ aws_mem_release(sba->allocator, addr); } static void *s_sba_mem_acquire(struct aws_allocator *allocator, size_t size) { struct small_block_allocator *sba = allocator->impl; return s_sba_alloc(sba, size); } static void s_sba_mem_release(struct aws_allocator *allocator, void *ptr) { struct small_block_allocator *sba = allocator->impl; s_sba_free(sba, ptr); } static void *s_sba_mem_realloc(struct aws_allocator *allocator, void *old_ptr, size_t old_size, size_t new_size) { struct small_block_allocator *sba = allocator->impl; /* If both allocations come from the parent, let the parent do it */ if (old_size > s_max_bin_size && new_size > s_max_bin_size) { void *ptr = old_ptr; if (aws_mem_realloc(sba->allocator, &ptr, old_size, new_size)) { return NULL; } return ptr; } if (new_size == 0) { s_sba_free(sba, old_ptr); return NULL; } if (old_size > new_size) { return old_ptr; } void *new_mem = s_sba_alloc(sba, new_size); if (old_ptr && old_size) { memcpy(new_mem, old_ptr, old_size); s_sba_free(sba, old_ptr); } return new_mem; } static void *s_sba_mem_calloc(struct aws_allocator *allocator, size_t num, size_t size) { struct small_block_allocator *sba = allocator->impl; void *mem = s_sba_alloc(sba, size * num); memset(mem, 0, size * num); return mem; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/android/000077500000000000000000000000001456575232400234405ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/android/logging.c000066400000000000000000000123201456575232400252300ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #define LOGCAT_MAX_BUFFER_SIZE (4 * 1024) struct logcat_format_data { char *buffer; size_t bytes_written; size_t total_length; const char *format; }; static size_t s_advance_and_clamp_index(size_t current_index, int amount, size_t maximum) { size_t next_index = current_index + amount; if (next_index > maximum) { next_index = maximum; } return next_index; } /* Override this for Android, as time and log level are taken care of by logcat */ static int s_logcat_format(struct logcat_format_data *formatting_data, va_list args) { size_t current_index = 0; if (formatting_data->total_length == 0) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } /* * Use this length for all but the last write, so we guarantee room for the newline even if we get truncated */ size_t fake_total_length = formatting_data->total_length - 1; if (current_index < fake_total_length) { /* * Add thread id and user content separator (" - ") */ aws_thread_id_t current_thread_id = aws_thread_current_thread_id(); char thread_id[AWS_THREAD_ID_T_REPR_BUFSZ]; if (aws_thread_id_t_to_string(current_thread_id, thread_id, AWS_THREAD_ID_T_REPR_BUFSZ)) { return AWS_OP_ERR; } int thread_id_written = snprintf(formatting_data->buffer + current_index, fake_total_length - current_index, "[%s] ", thread_id); if (thread_id_written < 0) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } current_index = s_advance_and_clamp_index(current_index, thread_id_written, fake_total_length); } if (current_index < fake_total_length) { uint64_t now = 0; aws_high_res_clock_get_ticks(&now); int current_time_written = snprintf( formatting_data->buffer + current_index, fake_total_length - current_index, "(HRC:%" PRIu64 ") ", now); if (current_time_written < 0) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } current_index = s_advance_and_clamp_index(current_index, current_time_written, fake_total_length); } if (current_index < fake_total_length) { int separator_written = snprintf(formatting_data->buffer + current_index, fake_total_length - current_index, " - "); current_index = s_advance_and_clamp_index(current_index, separator_written, fake_total_length); } if (current_index < fake_total_length) { /* * Now write the actual data requested by the user */ int written_count = vsnprintf( formatting_data->buffer + current_index, fake_total_length - current_index, formatting_data->format, args); if (written_count < 0) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } current_index = s_advance_and_clamp_index(current_index, written_count, fake_total_length); } /* * End with a newline. */ int newline_written_count = snprintf(formatting_data->buffer + current_index, formatting_data->total_length - current_index, "\n"); if (newline_written_count < 0) { return aws_raise_error(AWS_ERROR_UNKNOWN); /* we saved space, so this would be crazy */ } formatting_data->bytes_written = current_index + newline_written_count; return AWS_OP_SUCCESS; } static struct aws_logger_logcat { enum aws_log_level level; } s_logcat_impl; static int s_logcat_log( struct aws_logger *logger, enum aws_log_level log_level, aws_log_subject_t subject, const char *format, ...) { (void)logger; va_list format_args; va_start(format_args, format); char buffer[LOGCAT_MAX_BUFFER_SIZE]; struct logcat_format_data fmt = { .buffer = buffer, .total_length = AWS_ARRAY_SIZE(buffer), .format = format, }; int result = s_logcat_format(&fmt, format_args); va_end(format_args); if (result != AWS_OP_SUCCESS) { return AWS_OP_ERR; } /* ANDROID_LOG_VERBOSE = 2, ANDROID_LOG_FATAL = 7 */ const int prio = 0x8 - log_level; __android_log_write(prio, aws_log_subject_name(subject), buffer); return AWS_OP_SUCCESS; } static enum aws_log_level s_logcat_get_log_level(struct aws_logger *logger, aws_log_subject_t subject) { (void)subject; struct aws_logger_logcat *impl = logger->p_impl; return impl->level; } static void s_logcat_clean_up(struct aws_logger *logger) { logger->p_impl = NULL; } static struct aws_logger_vtable s_logcat_vtable = { .log = s_logcat_log, .get_log_level = s_logcat_get_log_level, .clean_up = s_logcat_clean_up, }; int aws_logger_init_logcat( struct aws_logger *logger, struct aws_allocator *allocator, struct aws_logger_standard_options *options) { logger->allocator = allocator; logger->vtable = &s_logcat_vtable; logger->p_impl = &s_logcat_impl; s_logcat_impl.level = options->level; return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/arch/000077500000000000000000000000001456575232400227355ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/arch/arm/000077500000000000000000000000001456575232400235145ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/arch/arm/asm/000077500000000000000000000000001456575232400242745ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/arch/arm/asm/cpuid.c000066400000000000000000000042261456575232400255500ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include #include #if defined(__linux__) || defined(__FreeBSD__) # include static unsigned long s_hwcap[2]; static bool s_hwcap_cached; struct cap_bits { unsigned long cap; unsigned long bit; }; # if (defined(__aarch64__)) struct cap_bits s_check_cap[AWS_CPU_FEATURE_COUNT] = { [AWS_CPU_FEATURE_ARM_CRC] = {0, 1 << 7 /* HWCAP_CRC */}, }; # else struct cap_bits s_check_cap[AWS_CPU_FEATURE_COUNT] = { [AWS_CPU_FEATURE_ARM_CRC] = {1, 1 << 4 /* HWCAP_CRC */}, }; # endif # if (defined(__linux__)) static void s_cache_hwcap(void) { s_hwcap[0] = getauxval(AT_HWCAP); s_hwcap[1] = getauxval(AT_HWCAP2); s_hwcap_cached = true; } # elif (defined(__FreeBSD__)) static void s_cache_hwcap(void) { int ret; ret = elf_aux_info(AT_HWCAP, &s_hwcap[0], sizeof(unsigned long)); if (ret) s_hwcap[0] = 0; ret = elf_aux_info(AT_HWCAP2, &s_hwcap[1], sizeof(unsigned long)); if (ret) s_hwcap[1] = 0; s_hwcap_cached = true; } # else # error "Unknown method" # endif bool aws_cpu_has_feature(enum aws_cpu_feature_name feature_name) { if (!s_hwcap_cached) s_cache_hwcap(); switch (feature_name) { case AWS_CPU_FEATURE_ARM_CRC: return s_hwcap[s_check_cap[feature_name].cap] & s_check_cap[feature_name].bit; default: return false; } } #else /* defined(__linux__) || defined(__FreeBSD__) */ bool aws_cpu_has_feature(enum aws_cpu_feature_name feature_name) { return false; } #endif /* defined(__linux__) || defined(__FreeBSD__) */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/arch/arm/msvc/000077500000000000000000000000001456575232400244645ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/arch/arm/msvc/cpuid.c000066400000000000000000000013121456575232400257310ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include #include bool aws_cpu_has_feature(enum aws_cpu_feature_name feature_name) { return false; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/arch/generic/000077500000000000000000000000001456575232400243515ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/arch/generic/cpuid.c000066400000000000000000000006221456575232400256210ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ /* * MSVC wants us to use the non-portable _dupenv_s instead; since we need * to remain portable, tell MSVC to suppress this warning. */ #include bool aws_cpu_has_feature(enum aws_cpu_feature_name feature_name) { (void)feature_name; return false; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/arch/intel/000077500000000000000000000000001456575232400240505ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/arch/intel/asm/000077500000000000000000000000001456575232400246305ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/arch/intel/asm/cpuid.c000066400000000000000000000014571456575232400261070ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include void aws_run_cpuid(uint32_t eax, uint32_t ecx, uint32_t *abcd) { uint32_t ebx = 0; uint32_t edx = 0; #if defined(__i386__) && defined(__PIC__) /* in case of PIC under 32-bit EBX cannot be clobbered */ __asm__ __volatile__("movl %%ebx, %%edi \n\t " "cpuid \n\t " "xchgl %%ebx, %%edi" : "=D"(ebx), #else __asm__ __volatile__("cpuid" : "+b"(ebx), #endif "+a"(eax), "+c"(ecx), "=d"(edx)); abcd[0] = eax; abcd[1] = ebx; abcd[2] = ecx; abcd[3] = edx; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/arch/intel/cpuid.c000066400000000000000000000105241456575232400253220ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ /* * MSVC wants us to use the non-portable _dupenv_s instead; since we need * to remain portable, tell MSVC to suppress this warning. */ #define _CRT_SECURE_NO_WARNINGS #include #include extern void aws_run_cpuid(uint32_t eax, uint32_t ecx, uint32_t *abcd); typedef bool(has_feature_fn)(void); static bool s_has_clmul(void) { uint32_t abcd[4]; uint32_t clmul_mask = 0x00000002; aws_run_cpuid(1, 0, abcd); if ((abcd[2] & clmul_mask) != clmul_mask) return false; return true; } static bool s_has_sse41(void) { uint32_t abcd[4]; uint32_t sse41_mask = 0x00080000; aws_run_cpuid(1, 0, abcd); if ((abcd[2] & sse41_mask) != sse41_mask) return false; return true; } static bool s_has_sse42(void) { uint32_t abcd[4]; uint32_t sse42_mask = 0x00100000; aws_run_cpuid(1, 0, abcd); if ((abcd[2] & sse42_mask) != sse42_mask) return false; return true; } static bool s_has_avx2(void) { uint32_t abcd[4]; /* Check AVX2: * CPUID.(EAX=07H, ECX=0H):EBX.AVX2[bit 5]==1 */ uint32_t avx2_mask = (1 << 5); aws_run_cpuid(7, 0, abcd); if ((abcd[1] & avx2_mask) != avx2_mask) { return false; } /* Also check AVX: * CPUID.(EAX=01H, ECX=0H):ECX.AVX[bit 28]==1 * * NOTE: It SHOULD be impossible for a CPU to support AVX2 without supporting AVX. * But we've received crash reports where the AVX2 feature check passed * and then an AVX instruction caused an "invalid instruction" crash. * * We diagnosed these machines by asking users to run the sample program from: * https://docs.microsoft.com/en-us/cpp/intrinsics/cpuid-cpuidex?view=msvc-160 * and observed the following results: * * AVX not supported * AVX2 supported * * We don't know for sure what was up with those machines, but this extra * check should stop them from running our AVX/AVX2 code paths. */ uint32_t avx1_mask = (1 << 28); aws_run_cpuid(1, 0, abcd); if ((abcd[2] & avx1_mask) != avx1_mask) { return false; } return true; } static bool s_has_avx512(void) { uint32_t abcd[4]; /* Check AVX512F: * CPUID.(EAX=07H, ECX=0H):EBX.AVX512[bit 16]==1 */ uint32_t avx512_mask = (1 << 16); aws_run_cpuid(7, 0, abcd); if ((abcd[1] & avx512_mask) != avx512_mask) { return false; } return true; } static bool s_has_bmi2(void) { uint32_t abcd[4]; /* Check BMI2: * CPUID.(EAX=07H, ECX=0H):EBX.BMI2[bit 8]==1 */ uint32_t bmi2_mask = (1 << 8); aws_run_cpuid(7, 0, abcd); if ((abcd[1] & bmi2_mask) != bmi2_mask) { return false; } return true; } static bool s_has_vpclmulqdq(void) { uint32_t abcd[4]; /* Check VPCLMULQDQ: * CPUID.(EAX=07H, ECX=0H):ECX.VPCLMULQDQ[bit 20]==1 */ uint32_t vpclmulqdq_mask = (1 << 20); aws_run_cpuid(7, 0, abcd); if ((abcd[2] & vpclmulqdq_mask) != vpclmulqdq_mask) { return false; } return true; } has_feature_fn *s_check_cpu_feature[AWS_CPU_FEATURE_COUNT] = { [AWS_CPU_FEATURE_CLMUL] = s_has_clmul, [AWS_CPU_FEATURE_SSE_4_1] = s_has_sse41, [AWS_CPU_FEATURE_SSE_4_2] = s_has_sse42, [AWS_CPU_FEATURE_AVX2] = s_has_avx2, [AWS_CPU_FEATURE_AVX512] = s_has_avx512, [AWS_CPU_FEATURE_BMI2] = s_has_bmi2, [AWS_CPU_FEATURE_VPCLMULQDQ] = s_has_vpclmulqdq, }; bool aws_cpu_has_feature(enum aws_cpu_feature_name feature_name) { if (s_check_cpu_feature[feature_name]) return s_check_cpu_feature[feature_name](); return false; } #define CPUID_AVAILABLE 0 #define CPUID_UNAVAILABLE 1 static int cpuid_state = 2; bool aws_common_private_has_avx2(void) { if (AWS_LIKELY(cpuid_state == 0)) { return true; } if (AWS_LIKELY(cpuid_state == 1)) { return false; } /* Provide a hook for testing fallbacks and benchmarking */ const char *env_avx2_enabled = getenv("AWS_COMMON_AVX2"); if (env_avx2_enabled) { int is_enabled = atoi(env_avx2_enabled); cpuid_state = !is_enabled; return is_enabled; } bool available = aws_cpu_has_feature(AWS_CPU_FEATURE_AVX2); cpuid_state = available ? CPUID_AVAILABLE : CPUID_UNAVAILABLE; return available; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/arch/intel/encoding_avx2.c000066400000000000000000000306771456575232400267570ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include /***** Decode logic *****/ /* * Decodes ranges of bytes in place * For each byte of 'in' that is between lo and hi (inclusive), adds offset and _adds_ it to the corresponding offset in * out. */ static inline __m256i translate_range(__m256i in, uint8_t lo, uint8_t hi, uint8_t offset) { __m256i lovec = _mm256_set1_epi8(lo); __m256i hivec = _mm256_set1_epi8((char)(hi - lo)); __m256i offsetvec = _mm256_set1_epi8(offset); __m256i tmp = _mm256_sub_epi8(in, lovec); /* * we'll use the unsigned min operator to do our comparison. Note that * there's no unsigned compare as a comparison intrinsic. */ __m256i mask = _mm256_min_epu8(tmp, hivec); /* if mask = tmp, then keep that byte */ mask = _mm256_cmpeq_epi8(mask, tmp); tmp = _mm256_add_epi8(tmp, offsetvec); tmp = _mm256_and_si256(tmp, mask); return tmp; } /* * For each 8-bit element in in, if the element equals match, add to the corresponding element in out the value decode. */ static inline __m256i translate_exact(__m256i in, uint8_t match, uint8_t decode) { __m256i mask = _mm256_cmpeq_epi8(in, _mm256_set1_epi8(match)); return _mm256_and_si256(mask, _mm256_set1_epi8(decode)); } /* * Input: a pointer to a 256-bit vector of base64 characters * The pointed-to-vector is replaced by a 256-bit vector of 6-bit decoded parts; * on decode failure, returns false, else returns true on success. */ static inline bool decode_vec(__m256i *in) { __m256i tmp1, tmp2, tmp3; /* * Base64 decoding table, see RFC4648 * * Note that we use multiple vector registers to try to allow the CPU to * paralellize the merging ORs */ tmp1 = translate_range(*in, 'A', 'Z', 0 + 1); tmp2 = translate_range(*in, 'a', 'z', 26 + 1); tmp3 = translate_range(*in, '0', '9', 52 + 1); tmp1 = _mm256_or_si256(tmp1, translate_exact(*in, '+', 62 + 1)); tmp2 = _mm256_or_si256(tmp2, translate_exact(*in, '/', 63 + 1)); tmp3 = _mm256_or_si256(tmp3, _mm256_or_si256(tmp1, tmp2)); /* * We use 0 to mark decode failures, so everything is decoded to one higher * than normal. We'll shift this down now. */ *in = _mm256_sub_epi8(tmp3, _mm256_set1_epi8(1)); /* If any byte is now zero, we had a decode failure */ __m256i mask = _mm256_cmpeq_epi8(tmp3, _mm256_set1_epi8(0)); return _mm256_testz_si256(mask, mask); } AWS_ALIGNED_TYPEDEF(uint8_t, aligned256[32], 32); /* * Input: a 256-bit vector, interpreted as 32 * 6-bit values * Output: a 256-bit vector, the lower 24 bytes of which contain the packed version of the input */ static inline __m256i pack_vec(__m256i in) { /* * Our basic strategy is to split the input vector into three vectors, for each 6-bit component * of each 24-bit group, shift the groups into place, then OR the vectors together. Conveniently, * we can do this on a (32 bit) dword-by-dword basis. * * It's important to note that we're interpreting the vector as being little-endian. That is, * on entry, we have dwords that look like this: * * MSB LSB * 00DD DDDD 00CC CCCC 00BB BBBB 00AA AAAA * * And we want to translate to: * * MSB LSB * 0000 0000 AAAA AABB BBBB CCCC CCDD DDDD * * After which point we can pack these dwords together to produce our final output. */ __m256i maskA = _mm256_set1_epi32(0xFF); // low bits __m256i maskB = _mm256_set1_epi32(0xFF00); __m256i maskC = _mm256_set1_epi32(0xFF0000); __m256i maskD = _mm256_set1_epi32((int)0xFF000000); __m256i bitsA = _mm256_slli_epi32(_mm256_and_si256(in, maskA), 18); __m256i bitsB = _mm256_slli_epi32(_mm256_and_si256(in, maskB), 4); __m256i bitsC = _mm256_srli_epi32(_mm256_and_si256(in, maskC), 10); __m256i bitsD = _mm256_srli_epi32(_mm256_and_si256(in, maskD), 24); __m256i dwords = _mm256_or_si256(_mm256_or_si256(bitsA, bitsB), _mm256_or_si256(bitsC, bitsD)); /* * Now we have a series of dwords with empty MSBs. * We need to pack them together (and shift down) with a shuffle operation. * Unfortunately the shuffle operation operates independently within each 128-bit lane, * so we'll need to do this in two steps: First we compact dwords within each lane, then * we do a dword shuffle to compact the two lanes together. * 15 14 13 12 11 10 09 08 07 06 05 04 03 02 01 00 <- byte index (little endian) * -- 09 0a 0b -- 06 07 08 -- 03 04 05 -- 00 01 02 <- data index * * We also reverse the order of 3-byte fragments within each lane; we've constructed * those fragments in little endian but the order of fragments within the overall * vector is in memory order (big endian) */ const aligned256 shufvec_buf = { /* clang-format off */ /* MSB */ 0xFF, 0xFF, 0xFF, 0xFF, /* Zero out the top 4 bytes of the lane */ 2, 1, 0, 6, 5, 4, 10, 9, 8, 14, 13, 12, 0xFF, 0xFF, 0xFF, 0xFF, /* Zero out the top 4 bytes of the lane */ 2, 1, 0, 6, 5, 4, 10, 9, 8, 14, 13, 12 /* LSB */ /* clang-format on */ }; __m256i shufvec = _mm256_load_si256((__m256i const *)&shufvec_buf); dwords = _mm256_shuffle_epi8(dwords, shufvec); /* * Now shuffle the 32-bit words: * A B C 0 D E F 0 -> 0 0 A B C D E F */ __m256i shuf32 = _mm256_set_epi32(0, 0, 7, 6, 5, 3, 2, 1); dwords = _mm256_permutevar8x32_epi32(dwords, shuf32); return dwords; } static inline bool decode(const unsigned char *in, unsigned char *out) { __m256i vec = _mm256_loadu_si256((__m256i const *)in); if (!decode_vec(&vec)) { return false; } vec = pack_vec(vec); /* * We'll do overlapping writes to get both the low 128 bits and the high 64-bits written. * Input (memory order): 0 1 2 3 4 5 - - (dwords) * Input (little endian) - - 5 4 3 2 1 0 * Output in memory: * [0 1 2 3] [4 5] */ __m128i lo = _mm256_extracti128_si256(vec, 0); /* * Unfortunately some compilers don't support _mm256_extract_epi64, * so we'll just copy right out of the vector as a fallback */ #ifdef AWS_HAVE_MM256_EXTRACT_EPI64 uint64_t hi = _mm256_extract_epi64(vec, 2); const uint64_t *p_hi = &hi; #else const uint64_t *p_hi = (uint64_t *)&vec + 2; #endif _mm_storeu_si128((__m128i *)out, lo); memcpy(out + 16, p_hi, sizeof(*p_hi)); return true; } size_t aws_common_private_base64_decode_sse41(const unsigned char *in, unsigned char *out, size_t len) { if (len % 4) { return (size_t)-1; } size_t outlen = 0; while (len > 32) { if (!decode(in, out)) { return (size_t)-1; } len -= 32; in += 32; out += 24; outlen += 24; } if (len > 0) { unsigned char tmp_in[32]; unsigned char tmp_out[24]; memset(tmp_out, 0xEE, sizeof(tmp_out)); /* We need to ensure the vector contains valid b64 characters */ memset(tmp_in, 'A', sizeof(tmp_in)); memcpy(tmp_in, in, len); size_t final_out = (3 * len) / 4; /* Check for end-of-string padding (up to 2 characters) */ for (int i = 0; i < 2; i++) { if (tmp_in[len - 1] == '=') { tmp_in[len - 1] = 'A'; /* make sure the inner loop doesn't bail out */ len--; final_out--; } } if (!decode(tmp_in, tmp_out)) { return (size_t)-1; } /* Check that there are no trailing ones bits */ for (size_t i = final_out; i < sizeof(tmp_out); i++) { if (tmp_out[i]) { return (size_t)-1; } } memcpy(out, tmp_out, final_out); outlen += final_out; } return outlen; } /***** Encode logic *****/ static inline __m256i encode_chars(__m256i in) { __m256i tmp1, tmp2, tmp3; /* * Base64 encoding table, see RFC4648 * * We again use fan-in for the ORs here. */ tmp1 = translate_range(in, 0, 25, 'A'); tmp2 = translate_range(in, 26, 26 + 25, 'a'); tmp3 = translate_range(in, 52, 61, '0'); tmp1 = _mm256_or_si256(tmp1, translate_exact(in, 62, '+')); tmp2 = _mm256_or_si256(tmp2, translate_exact(in, 63, '/')); return _mm256_or_si256(tmp3, _mm256_or_si256(tmp1, tmp2)); } /* * Input: A 256-bit vector, interpreted as 24 bytes (LSB) plus 8 bytes of high-byte padding * Output: A 256-bit vector of base64 characters */ static inline __m256i encode_stride(__m256i vec) { /* * First, since byte-shuffle operations operate within 128-bit subvectors, swap around the dwords * to balance the amount of actual data between 128-bit subvectors. * After this we want the LE representation to look like: -- XX XX XX -- XX XX XX */ __m256i shuf32 = _mm256_set_epi32(7, 5, 4, 3, 6, 2, 1, 0); vec = _mm256_permutevar8x32_epi32(vec, shuf32); /* * Next, within each group of 3 bytes, we need to byteswap into little endian form so our bitshifts * will work properly. We also shuffle around so that each dword has one 3-byte group, plus one byte * (MSB) of zero-padding. * Because this is a byte-shuffle, indexes are within each 128-bit subvector. * * -- -- -- -- 11 10 09 08 07 06 05 04 03 02 01 00 */ const aligned256 shufvec_buf = { /* clang-format off */ /* MSB */ 2, 1, 0, 0xFF, 5, 4, 3, 0xFF, 8, 7, 6, 0xFF, 11, 10, 9, 0xFF, 2, 1, 0, 0xFF, 5, 4, 3, 0xFF, 8, 7, 6, 0xFF, 11, 10, 9, 0xFF /* LSB */ /* clang-format on */ }; vec = _mm256_shuffle_epi8(vec, _mm256_load_si256((__m256i const *)&shufvec_buf)); /* * Now shift and mask to split out 6-bit groups. * We'll also do a second byteswap to get back into big-endian */ __m256i mask0 = _mm256_set1_epi32(0x3F); __m256i mask1 = _mm256_set1_epi32(0x3F << 6); __m256i mask2 = _mm256_set1_epi32(0x3F << 12); __m256i mask3 = _mm256_set1_epi32(0x3F << 18); __m256i digit0 = _mm256_and_si256(mask0, vec); __m256i digit1 = _mm256_and_si256(mask1, vec); __m256i digit2 = _mm256_and_si256(mask2, vec); __m256i digit3 = _mm256_and_si256(mask3, vec); /* * Because we want to byteswap, the low-order digit0 goes into the * high-order byte */ digit0 = _mm256_slli_epi32(digit0, 24); digit1 = _mm256_slli_epi32(digit1, 10); digit2 = _mm256_srli_epi32(digit2, 4); digit3 = _mm256_srli_epi32(digit3, 18); vec = _mm256_or_si256(_mm256_or_si256(digit0, digit1), _mm256_or_si256(digit2, digit3)); /* Finally translate to the base64 character set */ return encode_chars(vec); } void aws_common_private_base64_encode_sse41(const uint8_t *input, uint8_t *output, size_t inlen) { __m256i instride, outstride; while (inlen >= 32) { /* * Where possible, we'll load a full vector at a time and ignore the over-read. * However, if we have < 32 bytes left, this would result in a potential read * of unreadable pages, so we use bounce buffers below. */ instride = _mm256_loadu_si256((__m256i const *)input); outstride = encode_stride(instride); _mm256_storeu_si256((__m256i *)output, outstride); input += 24; output += 32; inlen -= 24; } while (inlen) { /* * We need to go through a bounce buffer for anything remaining, as we * don't want to over-read or over-write the ends of the buffers. */ size_t stridelen = inlen > 24 ? 24 : inlen; size_t outlen = ((stridelen + 2) / 3) * 4; memset(&instride, 0, sizeof(instride)); memcpy(&instride, input, stridelen); outstride = encode_stride(instride); memcpy(output, &outstride, outlen); if (inlen < 24) { if (inlen % 3 >= 1) { /* AA== or AAA= */ output[outlen - 1] = '='; } if (inlen % 3 == 1) { /* AA== */ output[outlen - 2] = '='; } return; } input += stridelen; output += outlen; inlen -= stridelen; } } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/arch/intel/msvc/000077500000000000000000000000001456575232400250205ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/arch/intel/msvc/cpuid.c000066400000000000000000000004301456575232400262650ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_run_cpuid(uint32_t eax, uint32_t ecx, uint32_t *abcd) { __cpuidex((int32_t *)abcd, eax, ecx); } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/array_list.c000066400000000000000000000173501456575232400243430ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include /* qsort */ int aws_array_list_calc_necessary_size(struct aws_array_list *AWS_RESTRICT list, size_t index, size_t *necessary_size) { AWS_PRECONDITION(aws_array_list_is_valid(list)); size_t index_inc = 0; if (aws_add_size_checked(index, 1, &index_inc)) { AWS_POSTCONDITION(aws_array_list_is_valid(list)); return AWS_OP_ERR; } if (aws_mul_size_checked(index_inc, list->item_size, necessary_size)) { AWS_POSTCONDITION(aws_array_list_is_valid(list)); return AWS_OP_ERR; } AWS_POSTCONDITION(aws_array_list_is_valid(list)); return AWS_OP_SUCCESS; } int aws_array_list_shrink_to_fit(struct aws_array_list *AWS_RESTRICT list) { AWS_PRECONDITION(aws_array_list_is_valid(list)); if (list->alloc) { size_t ideal_size; if (aws_mul_size_checked(list->length, list->item_size, &ideal_size)) { AWS_POSTCONDITION(aws_array_list_is_valid(list)); return AWS_OP_ERR; } if (ideal_size < list->current_size) { void *raw_data = NULL; if (ideal_size > 0) { raw_data = aws_mem_acquire(list->alloc, ideal_size); if (!raw_data) { AWS_POSTCONDITION(aws_array_list_is_valid(list)); return AWS_OP_ERR; } memcpy(raw_data, list->data, ideal_size); aws_mem_release(list->alloc, list->data); } list->data = raw_data; list->current_size = ideal_size; } AWS_POSTCONDITION(aws_array_list_is_valid(list)); return AWS_OP_SUCCESS; } AWS_POSTCONDITION(aws_array_list_is_valid(list)); return aws_raise_error(AWS_ERROR_LIST_STATIC_MODE_CANT_SHRINK); } int aws_array_list_copy(const struct aws_array_list *AWS_RESTRICT from, struct aws_array_list *AWS_RESTRICT to) { AWS_FATAL_PRECONDITION(from->item_size == to->item_size); AWS_FATAL_PRECONDITION(from->data); AWS_PRECONDITION(aws_array_list_is_valid(from)); AWS_PRECONDITION(aws_array_list_is_valid(to)); size_t copy_size; if (aws_mul_size_checked(from->length, from->item_size, ©_size)) { AWS_POSTCONDITION(aws_array_list_is_valid(from)); AWS_POSTCONDITION(aws_array_list_is_valid(to)); return AWS_OP_ERR; } if (to->current_size >= copy_size) { if (copy_size > 0) { memcpy(to->data, from->data, copy_size); } to->length = from->length; AWS_POSTCONDITION(aws_array_list_is_valid(from)); AWS_POSTCONDITION(aws_array_list_is_valid(to)); return AWS_OP_SUCCESS; } /* if to is in dynamic mode, we can just reallocate it and copy */ if (to->alloc != NULL) { void *tmp = aws_mem_acquire(to->alloc, copy_size); if (!tmp) { AWS_POSTCONDITION(aws_array_list_is_valid(from)); AWS_POSTCONDITION(aws_array_list_is_valid(to)); return AWS_OP_ERR; } memcpy(tmp, from->data, copy_size); if (to->data) { aws_mem_release(to->alloc, to->data); } to->data = tmp; to->length = from->length; to->current_size = copy_size; AWS_POSTCONDITION(aws_array_list_is_valid(from)); AWS_POSTCONDITION(aws_array_list_is_valid(to)); return AWS_OP_SUCCESS; } return aws_raise_error(AWS_ERROR_DEST_COPY_TOO_SMALL); } int aws_array_list_ensure_capacity(struct aws_array_list *AWS_RESTRICT list, size_t index) { AWS_PRECONDITION(aws_array_list_is_valid(list)); size_t necessary_size; if (aws_array_list_calc_necessary_size(list, index, &necessary_size)) { AWS_POSTCONDITION(aws_array_list_is_valid(list)); return AWS_OP_ERR; } if (list->current_size < necessary_size) { if (!list->alloc) { AWS_POSTCONDITION(aws_array_list_is_valid(list)); return aws_raise_error(AWS_ERROR_INVALID_INDEX); } /* this will double capacity if the index isn't bigger than what the * next allocation would be, but allocates the exact requested size if * it is. This is largely because we don't have a good way to predict * the usage pattern to make a smart decision about it. However, if the * user * is doing this in an iterative fashion, necessary_size will never be * used.*/ size_t next_allocation_size = list->current_size << 1; size_t new_size = next_allocation_size > necessary_size ? next_allocation_size : necessary_size; if (new_size < list->current_size) { /* this means new_size overflowed. The only way this happens is on a * 32-bit system where size_t is 32 bits, in which case we're out of * addressable memory anyways, or we're on a 64 bit system and we're * most certainly out of addressable memory. But since we're simply * going to fail fast and say, sorry can't do it, we'll just tell * the user they can't grow the list anymore. */ AWS_POSTCONDITION(aws_array_list_is_valid(list)); return aws_raise_error(AWS_ERROR_LIST_EXCEEDS_MAX_SIZE); } void *temp = aws_mem_acquire(list->alloc, new_size); if (!temp) { AWS_POSTCONDITION(aws_array_list_is_valid(list)); return AWS_OP_ERR; } if (list->data) { memcpy(temp, list->data, list->current_size); #ifdef DEBUG_BUILD memset( (void *)((uint8_t *)temp + list->current_size), AWS_ARRAY_LIST_DEBUG_FILL, new_size - list->current_size); #endif aws_mem_release(list->alloc, list->data); } list->data = temp; list->current_size = new_size; } AWS_POSTCONDITION(aws_array_list_is_valid(list)); return AWS_OP_SUCCESS; } static void aws_array_list_mem_swap(void *AWS_RESTRICT item1, void *AWS_RESTRICT item2, size_t item_size) { enum { SLICE = 128 }; AWS_FATAL_PRECONDITION(item1); AWS_FATAL_PRECONDITION(item2); /* copy SLICE sized bytes at a time */ size_t slice_count = item_size / SLICE; uint8_t temp[SLICE]; for (size_t i = 0; i < slice_count; i++) { memcpy((void *)temp, (void *)item1, SLICE); memcpy((void *)item1, (void *)item2, SLICE); memcpy((void *)item2, (void *)temp, SLICE); item1 = (uint8_t *)item1 + SLICE; item2 = (uint8_t *)item2 + SLICE; } size_t remainder = item_size & (SLICE - 1); /* item_size % SLICE */ memcpy((void *)temp, (void *)item1, remainder); memcpy((void *)item1, (void *)item2, remainder); memcpy((void *)item2, (void *)temp, remainder); } void aws_array_list_swap(struct aws_array_list *AWS_RESTRICT list, size_t a, size_t b) { AWS_FATAL_PRECONDITION(a < list->length); AWS_FATAL_PRECONDITION(b < list->length); AWS_PRECONDITION(aws_array_list_is_valid(list)); if (a == b) { AWS_POSTCONDITION(aws_array_list_is_valid(list)); return; } void *item1 = NULL; void *item2 = NULL; aws_array_list_get_at_ptr(list, &item1, a); aws_array_list_get_at_ptr(list, &item2, b); aws_array_list_mem_swap(item1, item2, list->item_size); AWS_POSTCONDITION(aws_array_list_is_valid(list)); } void aws_array_list_sort(struct aws_array_list *AWS_RESTRICT list, aws_array_list_comparator_fn *compare_fn) { AWS_PRECONDITION(aws_array_list_is_valid(list)); if (list->data) { qsort(list->data, aws_array_list_length(list), list->item_size, compare_fn); } AWS_POSTCONDITION(aws_array_list_is_valid(list)); } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/assert.c000066400000000000000000000007541456575232400234730ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include void aws_fatal_assert(const char *cond_str, const char *file, int line) { aws_debug_break(); fprintf(stderr, "Fatal error condition occurred in %s:%d: %s\nExiting Application\n", file, line, cond_str); aws_backtrace_print(stderr, NULL); abort(); } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/byte_buf.c000066400000000000000000001645751456575232400240050ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #ifdef _MSC_VER /* disables warning non const declared initializers for Microsoft compilers */ # pragma warning(disable : 4204) # pragma warning(disable : 4706) #endif int aws_byte_buf_init(struct aws_byte_buf *buf, struct aws_allocator *allocator, size_t capacity) { AWS_PRECONDITION(buf); AWS_PRECONDITION(allocator); buf->buffer = (capacity == 0) ? NULL : aws_mem_acquire(allocator, capacity); if (capacity != 0 && buf->buffer == NULL) { AWS_ZERO_STRUCT(*buf); return AWS_OP_ERR; } buf->len = 0; buf->capacity = capacity; buf->allocator = allocator; AWS_POSTCONDITION(aws_byte_buf_is_valid(buf)); return AWS_OP_SUCCESS; } int aws_byte_buf_init_copy(struct aws_byte_buf *dest, struct aws_allocator *allocator, const struct aws_byte_buf *src) { AWS_PRECONDITION(allocator); AWS_PRECONDITION(dest); AWS_ERROR_PRECONDITION(aws_byte_buf_is_valid(src)); if (!src->buffer) { AWS_ZERO_STRUCT(*dest); dest->allocator = allocator; AWS_POSTCONDITION(aws_byte_buf_is_valid(dest)); return AWS_OP_SUCCESS; } *dest = *src; dest->allocator = allocator; dest->buffer = (uint8_t *)aws_mem_acquire(allocator, src->capacity); if (dest->buffer == NULL) { AWS_ZERO_STRUCT(*dest); return AWS_OP_ERR; } memcpy(dest->buffer, src->buffer, src->len); AWS_POSTCONDITION(aws_byte_buf_is_valid(dest)); return AWS_OP_SUCCESS; } bool aws_byte_buf_is_valid(const struct aws_byte_buf *const buf) { return buf != NULL && ((buf->capacity == 0 && buf->len == 0 && buf->buffer == NULL) || (buf->capacity > 0 && buf->len <= buf->capacity && AWS_MEM_IS_WRITABLE(buf->buffer, buf->capacity))); } bool aws_byte_cursor_is_valid(const struct aws_byte_cursor *cursor) { return cursor != NULL && ((cursor->len == 0) || (cursor->len > 0 && cursor->ptr && AWS_MEM_IS_READABLE(cursor->ptr, cursor->len))); } void aws_byte_buf_reset(struct aws_byte_buf *buf, bool zero_contents) { if (zero_contents) { aws_byte_buf_secure_zero(buf); } buf->len = 0; } void aws_byte_buf_clean_up(struct aws_byte_buf *buf) { AWS_PRECONDITION(aws_byte_buf_is_valid(buf)); if (buf->allocator && buf->buffer) { aws_mem_release(buf->allocator, (void *)buf->buffer); } buf->allocator = NULL; buf->buffer = NULL; buf->len = 0; buf->capacity = 0; } void aws_byte_buf_secure_zero(struct aws_byte_buf *buf) { AWS_PRECONDITION(aws_byte_buf_is_valid(buf)); if (buf->buffer) { aws_secure_zero(buf->buffer, buf->capacity); } buf->len = 0; AWS_POSTCONDITION(aws_byte_buf_is_valid(buf)); } void aws_byte_buf_clean_up_secure(struct aws_byte_buf *buf) { AWS_PRECONDITION(aws_byte_buf_is_valid(buf)); aws_byte_buf_secure_zero(buf); aws_byte_buf_clean_up(buf); AWS_POSTCONDITION(aws_byte_buf_is_valid(buf)); } bool aws_byte_buf_eq(const struct aws_byte_buf *const a, const struct aws_byte_buf *const b) { AWS_PRECONDITION(aws_byte_buf_is_valid(a)); AWS_PRECONDITION(aws_byte_buf_is_valid(b)); bool rval = aws_array_eq(a->buffer, a->len, b->buffer, b->len); AWS_POSTCONDITION(aws_byte_buf_is_valid(a)); AWS_POSTCONDITION(aws_byte_buf_is_valid(b)); return rval; } bool aws_byte_buf_eq_ignore_case(const struct aws_byte_buf *const a, const struct aws_byte_buf *const b) { AWS_PRECONDITION(aws_byte_buf_is_valid(a)); AWS_PRECONDITION(aws_byte_buf_is_valid(b)); bool rval = aws_array_eq_ignore_case(a->buffer, a->len, b->buffer, b->len); AWS_POSTCONDITION(aws_byte_buf_is_valid(a)); AWS_POSTCONDITION(aws_byte_buf_is_valid(b)); return rval; } bool aws_byte_buf_eq_c_str(const struct aws_byte_buf *const buf, const char *const c_str) { AWS_PRECONDITION(aws_byte_buf_is_valid(buf)); AWS_PRECONDITION(c_str != NULL); bool rval = aws_array_eq_c_str(buf->buffer, buf->len, c_str); AWS_POSTCONDITION(aws_byte_buf_is_valid(buf)); return rval; } bool aws_byte_buf_eq_c_str_ignore_case(const struct aws_byte_buf *const buf, const char *const c_str) { AWS_PRECONDITION(aws_byte_buf_is_valid(buf)); AWS_PRECONDITION(c_str != NULL); bool rval = aws_array_eq_c_str_ignore_case(buf->buffer, buf->len, c_str); AWS_POSTCONDITION(aws_byte_buf_is_valid(buf)); return rval; } int aws_byte_buf_init_copy_from_cursor( struct aws_byte_buf *dest, struct aws_allocator *allocator, struct aws_byte_cursor src) { AWS_PRECONDITION(allocator); AWS_PRECONDITION(dest); AWS_ERROR_PRECONDITION(aws_byte_cursor_is_valid(&src)); AWS_ZERO_STRUCT(*dest); dest->buffer = (src.len > 0) ? (uint8_t *)aws_mem_acquire(allocator, src.len) : NULL; if (src.len != 0 && dest->buffer == NULL) { return AWS_OP_ERR; } dest->len = src.len; dest->capacity = src.len; dest->allocator = allocator; if (src.len > 0) { memcpy(dest->buffer, src.ptr, src.len); } AWS_POSTCONDITION(aws_byte_buf_is_valid(dest)); return AWS_OP_SUCCESS; } int aws_byte_buf_init_cache_and_update_cursors(struct aws_byte_buf *dest, struct aws_allocator *allocator, ...) { AWS_PRECONDITION(allocator); AWS_PRECONDITION(dest); AWS_ZERO_STRUCT(*dest); size_t total_len = 0; va_list args; va_start(args, allocator); /* Loop until final NULL arg is encountered */ struct aws_byte_cursor *cursor_i; while ((cursor_i = va_arg(args, struct aws_byte_cursor *)) != NULL) { AWS_ASSERT(aws_byte_cursor_is_valid(cursor_i)); if (aws_add_size_checked(total_len, cursor_i->len, &total_len)) { return AWS_OP_ERR; } } va_end(args); if (aws_byte_buf_init(dest, allocator, total_len)) { return AWS_OP_ERR; } va_start(args, allocator); while ((cursor_i = va_arg(args, struct aws_byte_cursor *)) != NULL) { /* Impossible for this call to fail, we pre-allocated sufficient space */ aws_byte_buf_append_and_update(dest, cursor_i); } va_end(args); return AWS_OP_SUCCESS; } bool aws_byte_cursor_next_split( const struct aws_byte_cursor *AWS_RESTRICT input_str, char split_on, struct aws_byte_cursor *AWS_RESTRICT substr) { AWS_PRECONDITION(aws_byte_cursor_is_valid(input_str)); /* If substr is zeroed-out, then this is the first run. */ const bool first_run = substr->ptr == NULL; /* It's legal for input_str to be zeroed out: {.ptr=NULL, .len=0} * Deal with this case separately */ if (AWS_UNLIKELY(input_str->ptr == NULL)) { if (first_run) { /* Set substr->ptr to something non-NULL so that next split() call doesn't look like the first run */ substr->ptr = (void *)""; substr->len = 0; return true; } /* done */ AWS_ZERO_STRUCT(*substr); return false; } /* Rest of function deals with non-NULL input_str->ptr */ if (first_run) { *substr = *input_str; } else { /* This is not the first run. * Advance substr past the previous split. */ const uint8_t *input_end = input_str->ptr + input_str->len; substr->ptr += substr->len + 1; /* Note that it's ok if substr->ptr == input_end, this happens in the * final valid split of an input_str that ends with the split_on character: * Ex: "AB&" split on '&' produces "AB" and "" */ if (substr->ptr > input_end || substr->ptr < input_str->ptr) { /* 2nd check is overflow check */ /* done */ AWS_ZERO_STRUCT(*substr); return false; } /* update len to be remainder of the string */ substr->len = input_str->len - (substr->ptr - input_str->ptr); } /* substr is now remainder of string, search for next split */ uint8_t *new_location = memchr(substr->ptr, split_on, substr->len); if (new_location) { /* Character found, update string length. */ substr->len = new_location - substr->ptr; } AWS_POSTCONDITION(aws_byte_cursor_is_valid(substr)); return true; } int aws_byte_cursor_split_on_char_n( const struct aws_byte_cursor *AWS_RESTRICT input_str, char split_on, size_t n, struct aws_array_list *AWS_RESTRICT output) { AWS_ASSERT(aws_byte_cursor_is_valid(input_str)); AWS_ASSERT(output); AWS_ASSERT(output->item_size >= sizeof(struct aws_byte_cursor)); size_t max_splits = n > 0 ? n : SIZE_MAX; size_t split_count = 0; struct aws_byte_cursor substr; AWS_ZERO_STRUCT(substr); /* Until we run out of substrs or hit the max split count, keep iterating and pushing into the array list. */ while (split_count <= max_splits && aws_byte_cursor_next_split(input_str, split_on, &substr)) { if (split_count == max_splits) { /* If this is the last split, take the rest of the string. */ substr.len = input_str->len - (substr.ptr - input_str->ptr); } if (AWS_UNLIKELY(aws_array_list_push_back(output, (const void *)&substr))) { return AWS_OP_ERR; } ++split_count; } return AWS_OP_SUCCESS; } int aws_byte_cursor_split_on_char( const struct aws_byte_cursor *AWS_RESTRICT input_str, char split_on, struct aws_array_list *AWS_RESTRICT output) { return aws_byte_cursor_split_on_char_n(input_str, split_on, 0, output); } int aws_byte_cursor_find_exact( const struct aws_byte_cursor *AWS_RESTRICT input_str, const struct aws_byte_cursor *AWS_RESTRICT to_find, struct aws_byte_cursor *first_find) { if (to_find->len > input_str->len) { return aws_raise_error(AWS_ERROR_STRING_MATCH_NOT_FOUND); } if (to_find->len < 1) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } struct aws_byte_cursor working_cur = *input_str; while (working_cur.len) { uint8_t *first_char_location = memchr(working_cur.ptr, (char)*to_find->ptr, working_cur.len); if (!first_char_location) { return aws_raise_error(AWS_ERROR_STRING_MATCH_NOT_FOUND); } aws_byte_cursor_advance(&working_cur, first_char_location - working_cur.ptr); if (working_cur.len < to_find->len) { return aws_raise_error(AWS_ERROR_STRING_MATCH_NOT_FOUND); } if (!memcmp(working_cur.ptr, to_find->ptr, to_find->len)) { *first_find = working_cur; return AWS_OP_SUCCESS; } aws_byte_cursor_advance(&working_cur, 1); } return aws_raise_error(AWS_ERROR_STRING_MATCH_NOT_FOUND); } int aws_byte_buf_cat(struct aws_byte_buf *dest, size_t number_of_args, ...) { AWS_PRECONDITION(aws_byte_buf_is_valid(dest)); va_list ap; va_start(ap, number_of_args); for (size_t i = 0; i < number_of_args; ++i) { struct aws_byte_buf *buffer = va_arg(ap, struct aws_byte_buf *); struct aws_byte_cursor cursor = aws_byte_cursor_from_buf(buffer); if (aws_byte_buf_append(dest, &cursor)) { va_end(ap); AWS_POSTCONDITION(aws_byte_buf_is_valid(dest)); return AWS_OP_ERR; } } va_end(ap); AWS_POSTCONDITION(aws_byte_buf_is_valid(dest)); return AWS_OP_SUCCESS; } bool aws_byte_cursor_eq(const struct aws_byte_cursor *a, const struct aws_byte_cursor *b) { AWS_PRECONDITION(aws_byte_cursor_is_valid(a)); AWS_PRECONDITION(aws_byte_cursor_is_valid(b)); bool rv = aws_array_eq(a->ptr, a->len, b->ptr, b->len); AWS_POSTCONDITION(aws_byte_cursor_is_valid(a)); AWS_POSTCONDITION(aws_byte_cursor_is_valid(b)); return rv; } bool aws_byte_cursor_eq_ignore_case(const struct aws_byte_cursor *a, const struct aws_byte_cursor *b) { AWS_PRECONDITION(aws_byte_cursor_is_valid(a)); AWS_PRECONDITION(aws_byte_cursor_is_valid(b)); bool rv = aws_array_eq_ignore_case(a->ptr, a->len, b->ptr, b->len); AWS_POSTCONDITION(aws_byte_cursor_is_valid(a)); AWS_POSTCONDITION(aws_byte_cursor_is_valid(b)); return rv; } /* Every possible uint8_t value, lowercased */ static const uint8_t s_tolower_table[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 91, 92, 93, 94, 95, 96, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255}; AWS_STATIC_ASSERT(AWS_ARRAY_SIZE(s_tolower_table) == 256); const uint8_t *aws_lookup_table_to_lower_get(void) { return s_tolower_table; } bool aws_array_eq_ignore_case( const void *const array_a, const size_t len_a, const void *const array_b, const size_t len_b) { AWS_PRECONDITION( (len_a == 0) || AWS_MEM_IS_READABLE(array_a, len_a), "Input array [array_a] must be readable up to [len_a]."); AWS_PRECONDITION( (len_b == 0) || AWS_MEM_IS_READABLE(array_b, len_b), "Input array [array_b] must be readable up to [len_b]."); if (len_a != len_b) { return false; } const uint8_t *bytes_a = array_a; const uint8_t *bytes_b = array_b; for (size_t i = 0; i < len_a; ++i) { if (s_tolower_table[bytes_a[i]] != s_tolower_table[bytes_b[i]]) { return false; } } return true; } bool aws_array_eq(const void *const array_a, const size_t len_a, const void *const array_b, const size_t len_b) { AWS_PRECONDITION( (len_a == 0) || AWS_MEM_IS_READABLE(array_a, len_a), "Input array [array_a] must be readable up to [len_a]."); AWS_PRECONDITION( (len_b == 0) || AWS_MEM_IS_READABLE(array_b, len_b), "Input array [array_b] must be readable up to [len_b]."); if (len_a != len_b) { return false; } if (len_a == 0) { return true; } return !memcmp(array_a, array_b, len_a); } bool aws_array_eq_c_str_ignore_case(const void *const array, const size_t array_len, const char *const c_str) { AWS_PRECONDITION( array || (array_len == 0), "Either input pointer [array_a] mustn't be NULL or input [array_len] mustn't be zero."); AWS_PRECONDITION(c_str != NULL); /* Simpler implementation could have been: * return aws_array_eq_ignore_case(array, array_len, c_str, strlen(c_str)); * but that would have traversed c_str twice. * This implementation traverses c_str just once. */ const uint8_t *array_bytes = array; const uint8_t *str_bytes = (const uint8_t *)c_str; for (size_t i = 0; i < array_len; ++i) { uint8_t s = str_bytes[i]; if (s == '\0') { return false; } if (s_tolower_table[array_bytes[i]] != s_tolower_table[s]) { return false; } } return str_bytes[array_len] == '\0'; } bool aws_array_eq_c_str(const void *const array, const size_t array_len, const char *const c_str) { AWS_PRECONDITION( array || (array_len == 0), "Either input pointer [array_a] mustn't be NULL or input [array_len] mustn't be zero."); AWS_PRECONDITION(c_str != NULL); /* Simpler implementation could have been: * return aws_array_eq(array, array_len, c_str, strlen(c_str)); * but that would have traversed c_str twice. * This implementation traverses c_str just once. */ const uint8_t *array_bytes = array; const uint8_t *str_bytes = (const uint8_t *)c_str; for (size_t i = 0; i < array_len; ++i) { uint8_t s = str_bytes[i]; if (s == '\0') { return false; } if (array_bytes[i] != s) { return false; } } return str_bytes[array_len] == '\0'; } uint64_t aws_hash_array_ignore_case(const void *array, const size_t len) { AWS_PRECONDITION(AWS_MEM_IS_READABLE(array, len)); /* FNV-1a: https://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function */ const uint64_t fnv_offset_basis = 0xcbf29ce484222325ULL; const uint64_t fnv_prime = 0x100000001b3ULL; const uint8_t *i = array; const uint8_t *end = (i == NULL) ? NULL : (i + len); uint64_t hash = fnv_offset_basis; while (i != end) { const uint8_t lower = s_tolower_table[*i++]; hash ^= lower; #ifdef CBMC # pragma CPROVER check push # pragma CPROVER check disable "unsigned-overflow" #endif hash *= fnv_prime; #ifdef CBMC # pragma CPROVER check pop #endif } return hash; } uint64_t aws_hash_byte_cursor_ptr_ignore_case(const void *item) { AWS_PRECONDITION(aws_byte_cursor_is_valid(item)); const struct aws_byte_cursor *const cursor = item; uint64_t rval = aws_hash_array_ignore_case(cursor->ptr, cursor->len); AWS_POSTCONDITION(aws_byte_cursor_is_valid(item)); return rval; } bool aws_byte_cursor_eq_byte_buf(const struct aws_byte_cursor *const a, const struct aws_byte_buf *const b) { AWS_PRECONDITION(aws_byte_cursor_is_valid(a)); AWS_PRECONDITION(aws_byte_buf_is_valid(b)); bool rv = aws_array_eq(a->ptr, a->len, b->buffer, b->len); AWS_POSTCONDITION(aws_byte_cursor_is_valid(a)); AWS_POSTCONDITION(aws_byte_buf_is_valid(b)); return rv; } bool aws_byte_cursor_eq_byte_buf_ignore_case( const struct aws_byte_cursor *const a, const struct aws_byte_buf *const b) { AWS_PRECONDITION(aws_byte_cursor_is_valid(a)); AWS_PRECONDITION(aws_byte_buf_is_valid(b)); bool rv = aws_array_eq_ignore_case(a->ptr, a->len, b->buffer, b->len); AWS_POSTCONDITION(aws_byte_cursor_is_valid(a)); AWS_POSTCONDITION(aws_byte_buf_is_valid(b)); return rv; } bool aws_byte_cursor_eq_c_str(const struct aws_byte_cursor *const cursor, const char *const c_str) { AWS_PRECONDITION(aws_byte_cursor_is_valid(cursor)); AWS_PRECONDITION(c_str != NULL); bool rv = aws_array_eq_c_str(cursor->ptr, cursor->len, c_str); AWS_POSTCONDITION(aws_byte_cursor_is_valid(cursor)); return rv; } bool aws_byte_cursor_eq_c_str_ignore_case(const struct aws_byte_cursor *const cursor, const char *const c_str) { AWS_PRECONDITION(aws_byte_cursor_is_valid(cursor)); AWS_PRECONDITION(c_str != NULL); bool rv = aws_array_eq_c_str_ignore_case(cursor->ptr, cursor->len, c_str); AWS_POSTCONDITION(aws_byte_cursor_is_valid(cursor)); return rv; } bool aws_byte_cursor_starts_with(const struct aws_byte_cursor *input, const struct aws_byte_cursor *prefix) { AWS_PRECONDITION(aws_byte_cursor_is_valid(input)); AWS_PRECONDITION(aws_byte_cursor_is_valid(prefix)); if (input->len < prefix->len) { return false; } struct aws_byte_cursor start = {.ptr = input->ptr, .len = prefix->len}; bool rv = aws_byte_cursor_eq(&start, prefix); AWS_POSTCONDITION(aws_byte_cursor_is_valid(input)); AWS_POSTCONDITION(aws_byte_cursor_is_valid(prefix)); return rv; } bool aws_byte_cursor_starts_with_ignore_case( const struct aws_byte_cursor *input, const struct aws_byte_cursor *prefix) { AWS_PRECONDITION(aws_byte_cursor_is_valid(input)); AWS_PRECONDITION(aws_byte_cursor_is_valid(prefix)); if (input->len < prefix->len) { return false; } struct aws_byte_cursor start = {.ptr = input->ptr, .len = prefix->len}; bool rv = aws_byte_cursor_eq_ignore_case(&start, prefix); AWS_POSTCONDITION(aws_byte_cursor_is_valid(input)); AWS_POSTCONDITION(aws_byte_cursor_is_valid(prefix)); return rv; } int aws_byte_buf_append(struct aws_byte_buf *to, const struct aws_byte_cursor *from) { AWS_PRECONDITION(aws_byte_buf_is_valid(to)); AWS_PRECONDITION(aws_byte_cursor_is_valid(from)); if (to->capacity - to->len < from->len) { AWS_POSTCONDITION(aws_byte_buf_is_valid(to)); AWS_POSTCONDITION(aws_byte_cursor_is_valid(from)); return aws_raise_error(AWS_ERROR_DEST_COPY_TOO_SMALL); } if (from->len > 0) { /* This assert teaches clang-tidy that from->ptr and to->buffer cannot be null in a non-empty buffers */ AWS_ASSERT(from->ptr); AWS_ASSERT(to->buffer); memcpy(to->buffer + to->len, from->ptr, from->len); to->len += from->len; } AWS_POSTCONDITION(aws_byte_buf_is_valid(to)); AWS_POSTCONDITION(aws_byte_cursor_is_valid(from)); return AWS_OP_SUCCESS; } int aws_byte_buf_append_with_lookup( struct aws_byte_buf *AWS_RESTRICT to, const struct aws_byte_cursor *AWS_RESTRICT from, const uint8_t *lookup_table) { AWS_PRECONDITION(aws_byte_buf_is_valid(to)); AWS_PRECONDITION(aws_byte_cursor_is_valid(from)); AWS_PRECONDITION( AWS_MEM_IS_READABLE(lookup_table, 256), "Input array [lookup_table] must be at least 256 bytes long."); if (to->capacity - to->len < from->len) { AWS_POSTCONDITION(aws_byte_buf_is_valid(to)); AWS_POSTCONDITION(aws_byte_cursor_is_valid(from)); return aws_raise_error(AWS_ERROR_DEST_COPY_TOO_SMALL); } for (size_t i = 0; i < from->len; ++i) { to->buffer[to->len + i] = lookup_table[from->ptr[i]]; } if (aws_add_size_checked(to->len, from->len, &to->len)) { return AWS_OP_ERR; } AWS_POSTCONDITION(aws_byte_buf_is_valid(to)); AWS_POSTCONDITION(aws_byte_cursor_is_valid(from)); return AWS_OP_SUCCESS; } static int s_aws_byte_buf_append_dynamic( struct aws_byte_buf *to, const struct aws_byte_cursor *from, bool clear_released_memory) { AWS_PRECONDITION(aws_byte_buf_is_valid(to)); AWS_PRECONDITION(aws_byte_cursor_is_valid(from)); AWS_ERROR_PRECONDITION(to->allocator); if (to->capacity - to->len < from->len) { /* * NewCapacity = Max(OldCapacity * 2, OldCapacity + MissingCapacity) */ size_t missing_capacity = from->len - (to->capacity - to->len); size_t required_capacity = 0; if (aws_add_size_checked(to->capacity, missing_capacity, &required_capacity)) { AWS_POSTCONDITION(aws_byte_buf_is_valid(to)); AWS_POSTCONDITION(aws_byte_cursor_is_valid(from)); return AWS_OP_ERR; } /* * It's ok if this overflows, just clamp to max possible. * In theory this lets us still grow a buffer that's larger than 1/2 size_t space * at least enough to accommodate the append. */ size_t growth_capacity = aws_add_size_saturating(to->capacity, to->capacity); size_t new_capacity = required_capacity; if (new_capacity < growth_capacity) { new_capacity = growth_capacity; } /* * Attempt to resize - we intentionally do not use reserve() in order to preserve * the (unlikely) use case of from and to being the same buffer range. */ /* * Try the max, but if that fails and the required is smaller, try it in fallback */ uint8_t *new_buffer = aws_mem_acquire(to->allocator, new_capacity); if (new_buffer == NULL) { if (new_capacity > required_capacity) { new_capacity = required_capacity; new_buffer = aws_mem_acquire(to->allocator, new_capacity); if (new_buffer == NULL) { AWS_POSTCONDITION(aws_byte_buf_is_valid(to)); AWS_POSTCONDITION(aws_byte_cursor_is_valid(from)); return AWS_OP_ERR; } } else { AWS_POSTCONDITION(aws_byte_buf_is_valid(to)); AWS_POSTCONDITION(aws_byte_cursor_is_valid(from)); return AWS_OP_ERR; } } /* * Copy old buffer -> new buffer */ if (to->len > 0) { memcpy(new_buffer, to->buffer, to->len); } /* * Copy what we actually wanted to append in the first place */ if (from->len > 0) { memcpy(new_buffer + to->len, from->ptr, from->len); } if (clear_released_memory) { aws_secure_zero(to->buffer, to->capacity); } /* * Get rid of the old buffer */ aws_mem_release(to->allocator, to->buffer); /* * Switch to the new buffer */ to->buffer = new_buffer; to->capacity = new_capacity; } else { if (from->len > 0) { /* This assert teaches clang-tidy that from->ptr and to->buffer cannot be null in a non-empty buffers */ AWS_ASSERT(from->ptr); AWS_ASSERT(to->buffer); memcpy(to->buffer + to->len, from->ptr, from->len); } } to->len += from->len; AWS_POSTCONDITION(aws_byte_buf_is_valid(to)); AWS_POSTCONDITION(aws_byte_cursor_is_valid(from)); return AWS_OP_SUCCESS; } int aws_byte_buf_append_dynamic(struct aws_byte_buf *to, const struct aws_byte_cursor *from) { return s_aws_byte_buf_append_dynamic(to, from, false); } int aws_byte_buf_append_dynamic_secure(struct aws_byte_buf *to, const struct aws_byte_cursor *from) { return s_aws_byte_buf_append_dynamic(to, from, true); } static int s_aws_byte_buf_append_byte_dynamic(struct aws_byte_buf *buffer, uint8_t value, bool clear_released_memory) { #if defined(_MSC_VER) # pragma warning(push) # pragma warning(disable : 4221) #endif /* _MSC_VER */ /* msvc isn't a fan of this pointer-to-local assignment */ struct aws_byte_cursor eq_cursor = {.len = 1, .ptr = &value}; #if defined(_MSC_VER) # pragma warning(pop) #endif /* _MSC_VER */ return s_aws_byte_buf_append_dynamic(buffer, &eq_cursor, clear_released_memory); } int aws_byte_buf_append_byte_dynamic(struct aws_byte_buf *buffer, uint8_t value) { return s_aws_byte_buf_append_byte_dynamic(buffer, value, false); } int aws_byte_buf_append_byte_dynamic_secure(struct aws_byte_buf *buffer, uint8_t value) { return s_aws_byte_buf_append_byte_dynamic(buffer, value, true); } int aws_byte_buf_reserve(struct aws_byte_buf *buffer, size_t requested_capacity) { AWS_ERROR_PRECONDITION(buffer->allocator); AWS_ERROR_PRECONDITION(aws_byte_buf_is_valid(buffer)); if (requested_capacity <= buffer->capacity) { AWS_POSTCONDITION(aws_byte_buf_is_valid(buffer)); return AWS_OP_SUCCESS; } if (!buffer->buffer && !buffer->capacity && requested_capacity > buffer->capacity) { if (aws_byte_buf_init(buffer, buffer->allocator, requested_capacity)) { return AWS_OP_ERR; } AWS_POSTCONDITION(aws_byte_buf_is_valid(buffer)); return AWS_OP_SUCCESS; } if (aws_mem_realloc(buffer->allocator, (void **)&buffer->buffer, buffer->capacity, requested_capacity)) { return AWS_OP_ERR; } buffer->capacity = requested_capacity; AWS_POSTCONDITION(aws_byte_buf_is_valid(buffer)); return AWS_OP_SUCCESS; } int aws_byte_buf_reserve_relative(struct aws_byte_buf *buffer, size_t additional_length) { AWS_ERROR_PRECONDITION(buffer->allocator); AWS_ERROR_PRECONDITION(aws_byte_buf_is_valid(buffer)); size_t requested_capacity = 0; if (AWS_UNLIKELY(aws_add_size_checked(buffer->len, additional_length, &requested_capacity))) { AWS_POSTCONDITION(aws_byte_buf_is_valid(buffer)); return AWS_OP_ERR; } return aws_byte_buf_reserve(buffer, requested_capacity); } struct aws_byte_cursor aws_byte_cursor_right_trim_pred( const struct aws_byte_cursor *source, aws_byte_predicate_fn *predicate) { AWS_PRECONDITION(aws_byte_cursor_is_valid(source)); AWS_PRECONDITION(predicate != NULL); struct aws_byte_cursor trimmed = *source; while (trimmed.len > 0 && predicate(*(trimmed.ptr + trimmed.len - 1))) { --trimmed.len; } AWS_POSTCONDITION(aws_byte_cursor_is_valid(source)); AWS_POSTCONDITION(aws_byte_cursor_is_valid(&trimmed)); return trimmed; } struct aws_byte_cursor aws_byte_cursor_left_trim_pred( const struct aws_byte_cursor *source, aws_byte_predicate_fn *predicate) { AWS_PRECONDITION(aws_byte_cursor_is_valid(source)); AWS_PRECONDITION(predicate != NULL); struct aws_byte_cursor trimmed = *source; while (trimmed.len > 0 && predicate(*(trimmed.ptr))) { --trimmed.len; ++trimmed.ptr; } AWS_POSTCONDITION(aws_byte_cursor_is_valid(source)); AWS_POSTCONDITION(aws_byte_cursor_is_valid(&trimmed)); return trimmed; } struct aws_byte_cursor aws_byte_cursor_trim_pred( const struct aws_byte_cursor *source, aws_byte_predicate_fn *predicate) { AWS_PRECONDITION(aws_byte_cursor_is_valid(source)); AWS_PRECONDITION(predicate != NULL); struct aws_byte_cursor left_trimmed = aws_byte_cursor_left_trim_pred(source, predicate); struct aws_byte_cursor dest = aws_byte_cursor_right_trim_pred(&left_trimmed, predicate); AWS_POSTCONDITION(aws_byte_cursor_is_valid(source)); AWS_POSTCONDITION(aws_byte_cursor_is_valid(&dest)); return dest; } bool aws_byte_cursor_satisfies_pred(const struct aws_byte_cursor *source, aws_byte_predicate_fn *predicate) { struct aws_byte_cursor trimmed = aws_byte_cursor_left_trim_pred(source, predicate); bool rval = (trimmed.len == 0); AWS_POSTCONDITION(aws_byte_cursor_is_valid(source)); return rval; } int aws_byte_cursor_compare_lexical(const struct aws_byte_cursor *lhs, const struct aws_byte_cursor *rhs) { AWS_PRECONDITION(aws_byte_cursor_is_valid(lhs)); AWS_PRECONDITION(aws_byte_cursor_is_valid(rhs)); /* make sure we don't pass NULL pointers to memcmp */ AWS_PRECONDITION(lhs->ptr != NULL); AWS_PRECONDITION(rhs->ptr != NULL); size_t comparison_length = lhs->len; if (comparison_length > rhs->len) { comparison_length = rhs->len; } int result = memcmp(lhs->ptr, rhs->ptr, comparison_length); AWS_POSTCONDITION(aws_byte_cursor_is_valid(lhs)); AWS_POSTCONDITION(aws_byte_cursor_is_valid(rhs)); if (result != 0) { return result; } if (lhs->len != rhs->len) { return comparison_length == lhs->len ? -1 : 1; } return 0; } int aws_byte_cursor_compare_lookup( const struct aws_byte_cursor *lhs, const struct aws_byte_cursor *rhs, const uint8_t *lookup_table) { AWS_PRECONDITION(aws_byte_cursor_is_valid(lhs)); AWS_PRECONDITION(aws_byte_cursor_is_valid(rhs)); AWS_PRECONDITION(AWS_MEM_IS_READABLE(lookup_table, 256)); if (lhs->len == 0 && rhs->len == 0) { return 0; } else if (lhs->len == 0) { return -1; } else if (rhs->len == 0) { return 1; } const uint8_t *lhs_curr = lhs->ptr; const uint8_t *lhs_end = lhs_curr + lhs->len; const uint8_t *rhs_curr = rhs->ptr; const uint8_t *rhs_end = rhs_curr + rhs->len; while (lhs_curr < lhs_end && rhs_curr < rhs_end) { uint8_t lhc = lookup_table[*lhs_curr]; uint8_t rhc = lookup_table[*rhs_curr]; AWS_POSTCONDITION(aws_byte_cursor_is_valid(lhs)); AWS_POSTCONDITION(aws_byte_cursor_is_valid(rhs)); if (lhc < rhc) { return -1; } if (lhc > rhc) { return 1; } lhs_curr++; rhs_curr++; } AWS_POSTCONDITION(aws_byte_cursor_is_valid(lhs)); AWS_POSTCONDITION(aws_byte_cursor_is_valid(rhs)); if (lhs_curr < lhs_end) { return 1; } if (rhs_curr < rhs_end) { return -1; } return 0; } /** * For creating a byte buffer from a null-terminated string literal. */ struct aws_byte_buf aws_byte_buf_from_c_str(const char *c_str) { struct aws_byte_buf buf; buf.len = (!c_str) ? 0 : strlen(c_str); buf.capacity = buf.len; buf.buffer = (buf.capacity == 0) ? NULL : (uint8_t *)c_str; buf.allocator = NULL; AWS_POSTCONDITION(aws_byte_buf_is_valid(&buf)); return buf; } struct aws_byte_buf aws_byte_buf_from_array(const void *bytes, size_t len) { AWS_PRECONDITION(AWS_MEM_IS_WRITABLE(bytes, len), "Input array [bytes] must be writable up to [len] bytes."); struct aws_byte_buf buf; buf.buffer = (len > 0) ? (uint8_t *)bytes : NULL; buf.len = len; buf.capacity = len; buf.allocator = NULL; AWS_POSTCONDITION(aws_byte_buf_is_valid(&buf)); return buf; } struct aws_byte_buf aws_byte_buf_from_empty_array(const void *bytes, size_t capacity) { AWS_PRECONDITION( AWS_MEM_IS_WRITABLE(bytes, capacity), "Input array [bytes] must be writable up to [capacity] bytes."); struct aws_byte_buf buf; buf.buffer = (capacity > 0) ? (uint8_t *)bytes : NULL; buf.len = 0; buf.capacity = capacity; buf.allocator = NULL; AWS_POSTCONDITION(aws_byte_buf_is_valid(&buf)); return buf; } struct aws_byte_cursor aws_byte_cursor_from_buf(const struct aws_byte_buf *const buf) { AWS_PRECONDITION(aws_byte_buf_is_valid(buf)); struct aws_byte_cursor cur; cur.ptr = buf->buffer; cur.len = buf->len; AWS_POSTCONDITION(aws_byte_cursor_is_valid(&cur)); return cur; } struct aws_byte_cursor aws_byte_cursor_from_c_str(const char *c_str) { struct aws_byte_cursor cur; cur.ptr = (uint8_t *)c_str; cur.len = (cur.ptr) ? strlen(c_str) : 0; AWS_POSTCONDITION(aws_byte_cursor_is_valid(&cur)); return cur; } struct aws_byte_cursor aws_byte_cursor_from_array(const void *const bytes, const size_t len) { AWS_PRECONDITION(len == 0 || AWS_MEM_IS_READABLE(bytes, len), "Input array [bytes] must be readable up to [len]."); struct aws_byte_cursor cur; cur.ptr = (uint8_t *)bytes; cur.len = len; AWS_POSTCONDITION(aws_byte_cursor_is_valid(&cur)); return cur; } #ifdef CBMC # pragma CPROVER check push # pragma CPROVER check disable "unsigned-overflow" #endif /** * If index >= bound, bound > (SIZE_MAX / 2), or index > (SIZE_MAX / 2), returns * 0. Otherwise, returns UINTPTR_MAX. This function is designed to return the correct * value even under CPU speculation conditions, and is intended to be used for * SPECTRE mitigation purposes. */ size_t aws_nospec_mask(size_t index, size_t bound) { /* * SPECTRE mitigation - we compute a mask that will be zero if len < 0 * or len >= buf->len, and all-ones otherwise, and AND it into the index. * It is critical that we avoid any branches in this logic. */ /* * Hide the index value from the optimizer. This helps ensure that all this * logic doesn't get eliminated. */ #if defined(__GNUC__) || defined(__clang__) __asm__ __volatile__("" : "+r"(index)); #endif #if defined(_MSVC_LANG) /* * MSVC doesn't have a good way for us to blind the optimizer, and doesn't * even have inline asm on x64. Some experimentation indicates that this * hack seems to confuse it sufficiently for our needs. */ *((volatile uint8_t *)&index) += 0; #endif /* * If len > (SIZE_MAX / 2), then we can end up with len - buf->len being * positive simply because the sign bit got inverted away. So we also check * that the sign bit isn't set from the start. * * We also check that bound <= (SIZE_MAX / 2) to catch cases where the * buffer is _already_ out of bounds. */ size_t negative_mask = index | bound; size_t toobig_mask = bound - index - (uintptr_t)1; size_t combined_mask = negative_mask | toobig_mask; /* * combined_mask needs to have its sign bit OFF for us to be in range. * We'd like to expand this to a mask we can AND into our index, so flip * that bit (and everything else), shift it over so it's the only bit in the * ones position, and multiply across the entire register. * * First, extract the (inverse) top bit and move it to the lowest bit. * Because there's no standard SIZE_BIT in C99, we'll divide by a mask with * just the top bit set instead. */ combined_mask = (~combined_mask) / (SIZE_MAX - (SIZE_MAX >> 1)); /* * Now multiply it to replicate it across all bits. * * Note that GCC is smart enough to optimize the divide-and-multiply into * an arithmetic right shift operation on x86. */ combined_mask = combined_mask * UINTPTR_MAX; return combined_mask; } #ifdef CBMC # pragma CPROVER check pop #endif /** * Tests if the given aws_byte_cursor has at least len bytes remaining. If so, * *buf is advanced by len bytes (incrementing ->ptr and decrementing ->len), * and an aws_byte_cursor referring to the first len bytes of the original *buf * is returned. Otherwise, an aws_byte_cursor with ->ptr = NULL, ->len = 0 is * returned. * * Note that if len is above (SIZE_MAX / 2), this function will also treat it as * a buffer overflow, and return NULL without changing *buf. */ struct aws_byte_cursor aws_byte_cursor_advance(struct aws_byte_cursor *const cursor, const size_t len) { AWS_PRECONDITION(aws_byte_cursor_is_valid(cursor)); struct aws_byte_cursor rv; if (cursor->len > (SIZE_MAX >> 1) || len > (SIZE_MAX >> 1) || len > cursor->len) { rv.ptr = NULL; rv.len = 0; } else { rv.ptr = cursor->ptr; rv.len = len; cursor->ptr = (cursor->ptr == NULL) ? NULL : cursor->ptr + len; cursor->len -= len; } AWS_POSTCONDITION(aws_byte_cursor_is_valid(cursor)); AWS_POSTCONDITION(aws_byte_cursor_is_valid(&rv)); return rv; } /** * Behaves identically to aws_byte_cursor_advance, but avoids speculative * execution potentially reading out-of-bounds pointers (by returning an * empty ptr in such speculated paths). * * This should generally be done when using an untrusted or * data-dependent value for 'len', to avoid speculating into a path where * cursor->ptr points outside the true ptr length. */ struct aws_byte_cursor aws_byte_cursor_advance_nospec(struct aws_byte_cursor *const cursor, size_t len) { AWS_PRECONDITION(aws_byte_cursor_is_valid(cursor)); struct aws_byte_cursor rv; if (len <= cursor->len && len <= (SIZE_MAX >> 1) && cursor->len <= (SIZE_MAX >> 1)) { /* * If we're speculating past a failed bounds check, null out the pointer. This ensures * that we don't try to read past the end of the buffer and leak information about other * memory through timing side-channels. */ uintptr_t mask = aws_nospec_mask(len, cursor->len + 1); /* Make sure we don't speculate-underflow len either */ len = len & mask; cursor->ptr = (uint8_t *)((uintptr_t)cursor->ptr & mask); /* Make sure subsequent nospec accesses don't advance ptr past NULL */ cursor->len = cursor->len & mask; rv.ptr = cursor->ptr; /* Make sure anything acting upon the returned cursor _also_ doesn't advance past NULL */ rv.len = len & mask; cursor->ptr = (cursor->ptr == NULL) ? NULL : cursor->ptr + len; cursor->len -= len; } else { rv.ptr = NULL; rv.len = 0; } AWS_POSTCONDITION(aws_byte_cursor_is_valid(cursor)); AWS_POSTCONDITION(aws_byte_cursor_is_valid(&rv)); return rv; } /** * Reads specified length of data from byte cursor and copies it to the * destination array. * * On success, returns true and updates the cursor pointer/length accordingly. * If there is insufficient space in the cursor, returns false, leaving the * cursor unchanged. */ bool aws_byte_cursor_read(struct aws_byte_cursor *AWS_RESTRICT cur, void *AWS_RESTRICT dest, const size_t len) { AWS_PRECONDITION(aws_byte_cursor_is_valid(cur)); AWS_PRECONDITION(AWS_MEM_IS_WRITABLE(dest, len)); if (len == 0) { return true; } struct aws_byte_cursor slice = aws_byte_cursor_advance_nospec(cur, len); if (slice.ptr) { memcpy(dest, slice.ptr, len); AWS_POSTCONDITION(aws_byte_cursor_is_valid(cur)); AWS_POSTCONDITION(AWS_MEM_IS_READABLE(dest, len)); return true; } AWS_POSTCONDITION(aws_byte_cursor_is_valid(cur)); return false; } /** * Reads as many bytes from cursor as size of buffer, and copies them to buffer. * * On success, returns true and updates the cursor pointer/length accordingly. * If there is insufficient space in the cursor, returns false, leaving the * cursor unchanged. */ bool aws_byte_cursor_read_and_fill_buffer( struct aws_byte_cursor *AWS_RESTRICT cur, struct aws_byte_buf *AWS_RESTRICT dest) { AWS_PRECONDITION(aws_byte_cursor_is_valid(cur)); AWS_PRECONDITION(aws_byte_buf_is_valid(dest)); if (aws_byte_cursor_read(cur, dest->buffer, dest->capacity)) { dest->len = dest->capacity; AWS_POSTCONDITION(aws_byte_cursor_is_valid(cur)); AWS_POSTCONDITION(aws_byte_buf_is_valid(dest)); return true; } AWS_POSTCONDITION(aws_byte_cursor_is_valid(cur)); AWS_POSTCONDITION(aws_byte_buf_is_valid(dest)); return false; } /** * Reads a single byte from cursor, placing it in *var. * * On success, returns true and updates the cursor pointer/length accordingly. * If there is insufficient space in the cursor, returns false, leaving the * cursor unchanged. */ bool aws_byte_cursor_read_u8(struct aws_byte_cursor *AWS_RESTRICT cur, uint8_t *AWS_RESTRICT var) { AWS_PRECONDITION(aws_byte_cursor_is_valid(cur)); AWS_PRECONDITION(AWS_MEM_IS_WRITABLE(var, 1)); bool rv = aws_byte_cursor_read(cur, var, 1); AWS_POSTCONDITION(aws_byte_cursor_is_valid(cur)); return rv; } /** * Reads a 16-bit value in network byte order from cur, and places it in host * byte order into var. * * On success, returns true and updates the cursor pointer/length accordingly. * If there is insufficient space in the cursor, returns false, leaving the * cursor unchanged. */ bool aws_byte_cursor_read_be16(struct aws_byte_cursor *cur, uint16_t *var) { AWS_PRECONDITION(aws_byte_cursor_is_valid(cur)); AWS_PRECONDITION(AWS_OBJECT_PTR_IS_WRITABLE(var)); bool rv = aws_byte_cursor_read(cur, var, 2); if (AWS_LIKELY(rv)) { *var = aws_ntoh16(*var); } AWS_POSTCONDITION(aws_byte_cursor_is_valid(cur)); return rv; } /** * Reads an unsigned 24-bit value (3 bytes) in network byte order from cur, * and places it in host byte order into 32-bit var. * Ex: if cur's next 3 bytes are {0xAA, 0xBB, 0xCC}, then var becomes 0x00AABBCC. * * On success, returns true and updates the cursor pointer/length accordingly. * If there is insufficient space in the cursor, returns false, leaving the * cursor unchanged. */ bool aws_byte_cursor_read_be24(struct aws_byte_cursor *cur, uint32_t *var) { AWS_PRECONDITION(aws_byte_cursor_is_valid(cur)); AWS_PRECONDITION(AWS_OBJECT_PTR_IS_WRITABLE(var)); uint8_t *var_bytes = (void *)var; /* read into "lower" 3 bytes */ bool rv = aws_byte_cursor_read(cur, &var_bytes[1], 3); if (AWS_LIKELY(rv)) { /* zero out "highest" 4th byte*/ var_bytes[0] = 0; *var = aws_ntoh32(*var); } AWS_POSTCONDITION(aws_byte_cursor_is_valid(cur)); return rv; } /** * Reads a 32-bit value in network byte order from cur, and places it in host * byte order into var. * * On success, returns true and updates the cursor pointer/length accordingly. * If there is insufficient space in the cursor, returns false, leaving the * cursor unchanged. */ bool aws_byte_cursor_read_be32(struct aws_byte_cursor *cur, uint32_t *var) { AWS_PRECONDITION(aws_byte_cursor_is_valid(cur)); AWS_PRECONDITION(AWS_OBJECT_PTR_IS_WRITABLE(var)); bool rv = aws_byte_cursor_read(cur, var, 4); if (AWS_LIKELY(rv)) { *var = aws_ntoh32(*var); } AWS_POSTCONDITION(aws_byte_cursor_is_valid(cur)); return rv; } /** * Reads a 32-bit value in network byte order from cur, and places it in host * byte order into var. * * On success, returns true and updates the cursor pointer/length accordingly. * If there is insufficient space in the cursor, returns false, leaving the * cursor unchanged. */ bool aws_byte_cursor_read_float_be32(struct aws_byte_cursor *cur, float *var) { AWS_PRECONDITION(aws_byte_cursor_is_valid(cur)); AWS_PRECONDITION(AWS_OBJECT_PTR_IS_WRITABLE(var)); bool rv = aws_byte_cursor_read(cur, var, sizeof(float)); if (AWS_LIKELY(rv)) { *var = aws_ntohf32(*var); } AWS_POSTCONDITION(aws_byte_cursor_is_valid(cur)); return rv; } /** * Reads a 64-bit value in network byte order from cur, and places it in host * byte order into var. * * On success, returns true and updates the cursor pointer/length accordingly. * If there is insufficient space in the cursor, returns false, leaving the * cursor unchanged. */ bool aws_byte_cursor_read_float_be64(struct aws_byte_cursor *cur, double *var) { AWS_PRECONDITION(aws_byte_cursor_is_valid(cur)); AWS_PRECONDITION(AWS_OBJECT_PTR_IS_WRITABLE(var)); bool rv = aws_byte_cursor_read(cur, var, sizeof(double)); if (AWS_LIKELY(rv)) { *var = aws_ntohf64(*var); } AWS_POSTCONDITION(aws_byte_cursor_is_valid(cur)); return rv; } /** * Reads a 64-bit value in network byte order from cur, and places it in host * byte order into var. * * On success, returns true and updates the cursor pointer/length accordingly. * If there is insufficient space in the cursor, returns false, leaving the * cursor unchanged. */ bool aws_byte_cursor_read_be64(struct aws_byte_cursor *cur, uint64_t *var) { AWS_PRECONDITION(aws_byte_cursor_is_valid(cur)); AWS_PRECONDITION(AWS_OBJECT_PTR_IS_WRITABLE(var)); bool rv = aws_byte_cursor_read(cur, var, sizeof(*var)); if (AWS_LIKELY(rv)) { *var = aws_ntoh64(*var); } AWS_POSTCONDITION(aws_byte_cursor_is_valid(cur)); return rv; } /* Lookup from '0' -> 0, 'f' -> 0xf, 'F' -> 0xF, etc * invalid characters have value 255 */ /* clang-format off */ static const uint8_t s_hex_to_num_table[] = { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, /* 0 - 9 */ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, 255, /* A - F */ 0xA, 0xB, 0xC, 0xD, 0xE, 0xF, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, /* a - f */ 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, }; AWS_STATIC_ASSERT(AWS_ARRAY_SIZE(s_hex_to_num_table) == 256); /* clang-format on */ const uint8_t *aws_lookup_table_hex_to_num_get(void) { return s_hex_to_num_table; } bool aws_byte_cursor_read_hex_u8(struct aws_byte_cursor *cur, uint8_t *var) { AWS_PRECONDITION(aws_byte_cursor_is_valid(cur)); AWS_PRECONDITION(AWS_OBJECT_PTR_IS_WRITABLE(var)); bool success = false; if (AWS_LIKELY(cur->len >= 2)) { const uint8_t hi = s_hex_to_num_table[cur->ptr[0]]; const uint8_t lo = s_hex_to_num_table[cur->ptr[1]]; /* table maps invalid characters to 255 */ if (AWS_LIKELY(hi != 255 && lo != 255)) { *var = (hi << 4) | lo; cur->ptr += 2; cur->len -= 2; success = true; } } AWS_POSTCONDITION(aws_byte_cursor_is_valid(cur)); return success; } /** * Appends a sub-buffer to the specified buffer. * * If the buffer has at least `len' bytes remaining (buffer->capacity - buffer->len >= len), * then buffer->len is incremented by len, and an aws_byte_buf is assigned to *output corresponding * to the last len bytes of the input buffer. The aws_byte_buf at *output will have a null * allocator, a zero initial length, and a capacity of 'len'. The function then returns true. * * If there is insufficient space, then this function nulls all fields in *output and returns * false. */ bool aws_byte_buf_advance( struct aws_byte_buf *const AWS_RESTRICT buffer, struct aws_byte_buf *const AWS_RESTRICT output, const size_t len) { AWS_PRECONDITION(aws_byte_buf_is_valid(buffer)); AWS_PRECONDITION(aws_byte_buf_is_valid(output)); if (buffer->capacity - buffer->len >= len) { *output = aws_byte_buf_from_array((buffer->buffer == NULL) ? NULL : buffer->buffer + buffer->len, len); buffer->len += len; output->len = 0; AWS_POSTCONDITION(aws_byte_buf_is_valid(buffer)); AWS_POSTCONDITION(aws_byte_buf_is_valid(output)); return true; } else { AWS_ZERO_STRUCT(*output); AWS_POSTCONDITION(aws_byte_buf_is_valid(buffer)); AWS_POSTCONDITION(aws_byte_buf_is_valid(output)); return false; } } /** * Write specified number of bytes from array to byte buffer. * * On success, returns true and updates the buffer length accordingly. * If there is insufficient space in the buffer, returns false, leaving the * buffer unchanged. */ bool aws_byte_buf_write(struct aws_byte_buf *AWS_RESTRICT buf, const uint8_t *AWS_RESTRICT src, size_t len) { AWS_PRECONDITION(aws_byte_buf_is_valid(buf)); AWS_PRECONDITION(AWS_MEM_IS_READABLE(src, len), "Input array [src] must be readable up to [len] bytes."); if (len == 0) { AWS_POSTCONDITION(aws_byte_buf_is_valid(buf)); return true; } if (buf->len > (SIZE_MAX >> 1) || len > (SIZE_MAX >> 1) || buf->len + len > buf->capacity) { AWS_POSTCONDITION(aws_byte_buf_is_valid(buf)); return false; } memcpy(buf->buffer + buf->len, src, len); buf->len += len; AWS_POSTCONDITION(aws_byte_buf_is_valid(buf)); return true; } /** * Copies all bytes from buffer to buffer. * * On success, returns true and updates the buffer /length accordingly. * If there is insufficient space in the buffer, returns false, leaving the * buffer unchanged. */ bool aws_byte_buf_write_from_whole_buffer(struct aws_byte_buf *AWS_RESTRICT buf, struct aws_byte_buf src) { AWS_PRECONDITION(aws_byte_buf_is_valid(buf)); AWS_PRECONDITION(aws_byte_buf_is_valid(&src)); return aws_byte_buf_write(buf, src.buffer, src.len); } /** * Copies all bytes from buffer to buffer. * * On success, returns true and updates the buffer /length accordingly. * If there is insufficient space in the buffer, returns false, leaving the * buffer unchanged. */ bool aws_byte_buf_write_from_whole_cursor(struct aws_byte_buf *AWS_RESTRICT buf, struct aws_byte_cursor src) { AWS_PRECONDITION(aws_byte_buf_is_valid(buf)); AWS_PRECONDITION(aws_byte_cursor_is_valid(&src)); return aws_byte_buf_write(buf, src.ptr, src.len); } struct aws_byte_cursor aws_byte_buf_write_to_capacity( struct aws_byte_buf *buf, struct aws_byte_cursor *advancing_cursor) { AWS_PRECONDITION(aws_byte_buf_is_valid(buf)); AWS_PRECONDITION(aws_byte_cursor_is_valid(advancing_cursor)); size_t available = buf->capacity - buf->len; size_t write_size = aws_min_size(available, advancing_cursor->len); struct aws_byte_cursor write_cursor = aws_byte_cursor_advance(advancing_cursor, write_size); aws_byte_buf_write_from_whole_cursor(buf, write_cursor); return write_cursor; } /** * Copies one byte to buffer. * * On success, returns true and updates the cursor /length accordingly. * If there is insufficient space in the cursor, returns false, leaving the cursor unchanged. */ bool aws_byte_buf_write_u8(struct aws_byte_buf *AWS_RESTRICT buf, uint8_t c) { AWS_PRECONDITION(aws_byte_buf_is_valid(buf)); return aws_byte_buf_write(buf, &c, 1); } /** * Writes one byte repeatedly to buffer (like memset) * * If there is insufficient space in the buffer, returns false, leaving the * buffer unchanged. */ bool aws_byte_buf_write_u8_n(struct aws_byte_buf *buf, uint8_t c, size_t count) { AWS_PRECONDITION(aws_byte_buf_is_valid(buf)); if (buf->len > (SIZE_MAX >> 1) || count > (SIZE_MAX >> 1) || buf->len + count > buf->capacity) { AWS_POSTCONDITION(aws_byte_buf_is_valid(buf)); return false; } memset(buf->buffer + buf->len, c, count); buf->len += count; AWS_POSTCONDITION(aws_byte_buf_is_valid(buf)); return true; } /** * Writes a 16-bit integer in network byte order (big endian) to buffer. * * On success, returns true and updates the cursor /length accordingly. * If there is insufficient space in the cursor, returns false, leaving the * cursor unchanged. */ bool aws_byte_buf_write_be16(struct aws_byte_buf *buf, uint16_t x) { AWS_PRECONDITION(aws_byte_buf_is_valid(buf)); x = aws_hton16(x); return aws_byte_buf_write(buf, (uint8_t *)&x, 2); } /** * Writes low 24-bits (3 bytes) of an unsigned integer in network byte order (big endian) to buffer. * Ex: If x is 0x00AABBCC then {0xAA, 0xBB, 0xCC} is written to buffer. * * On success, returns true and updates the buffer /length accordingly. * If there is insufficient space in the buffer, or x's value cannot fit in 3 bytes, * returns false, leaving the buffer unchanged. */ bool aws_byte_buf_write_be24(struct aws_byte_buf *buf, uint32_t x) { AWS_PRECONDITION(aws_byte_buf_is_valid(buf)); if (x > 0x00FFFFFF) { return false; } uint32_t be32 = aws_hton32(x); uint8_t *be32_bytes = (uint8_t *)&be32; /* write "lower" 3 bytes */ return aws_byte_buf_write(buf, &be32_bytes[1], 3); } /** * Writes a 32-bit integer in network byte order (big endian) to buffer. * * On success, returns true and updates the cursor /length accordingly. * If there is insufficient space in the cursor, returns false, leaving the * cursor unchanged. */ bool aws_byte_buf_write_be32(struct aws_byte_buf *buf, uint32_t x) { AWS_PRECONDITION(aws_byte_buf_is_valid(buf)); x = aws_hton32(x); return aws_byte_buf_write(buf, (uint8_t *)&x, 4); } /** * Writes a 32-bit float in network byte order (big endian) to buffer. * * On success, returns true and updates the cursor /length accordingly. * If there is insufficient space in the cursor, returns false, leaving the * cursor unchanged. */ bool aws_byte_buf_write_float_be32(struct aws_byte_buf *buf, float x) { AWS_PRECONDITION(aws_byte_buf_is_valid(buf)); x = aws_htonf32(x); return aws_byte_buf_write(buf, (uint8_t *)&x, 4); } /** * Writes a 64-bit integer in network byte order (big endian) to buffer. * * On success, returns true and updates the cursor /length accordingly. * If there is insufficient space in the cursor, returns false, leaving the * cursor unchanged. */ bool aws_byte_buf_write_be64(struct aws_byte_buf *buf, uint64_t x) { AWS_PRECONDITION(aws_byte_buf_is_valid(buf)); x = aws_hton64(x); return aws_byte_buf_write(buf, (uint8_t *)&x, 8); } /** * Writes a 64-bit float in network byte order (big endian) to buffer. * * On success, returns true and updates the cursor /length accordingly. * If there is insufficient space in the cursor, returns false, leaving the * cursor unchanged. */ bool aws_byte_buf_write_float_be64(struct aws_byte_buf *buf, double x) { AWS_PRECONDITION(aws_byte_buf_is_valid(buf)); x = aws_htonf64(x); return aws_byte_buf_write(buf, (uint8_t *)&x, 8); } int aws_byte_buf_append_and_update(struct aws_byte_buf *to, struct aws_byte_cursor *from_and_update) { AWS_PRECONDITION(aws_byte_buf_is_valid(to)); AWS_PRECONDITION(aws_byte_cursor_is_valid(from_and_update)); if (aws_byte_buf_append(to, from_and_update)) { return AWS_OP_ERR; } from_and_update->ptr = to->buffer == NULL ? NULL : to->buffer + (to->len - from_and_update->len); return AWS_OP_SUCCESS; } static struct aws_byte_cursor s_null_terminator_cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\0"); int aws_byte_buf_append_null_terminator(struct aws_byte_buf *buf) { return aws_byte_buf_append_dynamic(buf, &s_null_terminator_cursor); } bool aws_isalnum(uint8_t ch) { return (ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') || (ch >= '0' && ch <= '9'); } bool aws_isalpha(uint8_t ch) { return (ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z'); } bool aws_isdigit(uint8_t ch) { return (ch >= '0' && ch <= '9'); } bool aws_isxdigit(uint8_t ch) { return (ch >= '0' && ch <= '9') || (ch >= 'a' && ch <= 'f') || (ch >= 'A' && ch <= 'F'); } bool aws_isspace(uint8_t ch) { switch (ch) { case 0x20: /* ' ' - space */ case 0x09: /* '\t' - horizontal tab */ case 0x0A: /* '\n' - line feed */ case 0x0B: /* '\v' - vertical tab */ case 0x0C: /* '\f' - form feed */ case 0x0D: /* '\r' - carriage return */ return true; default: return false; } } static int s_read_unsigned(struct aws_byte_cursor cursor, uint64_t *dst, uint8_t base) { uint64_t val = 0; *dst = 0; if (cursor.len == 0) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } const uint8_t *hex_to_num_table = aws_lookup_table_hex_to_num_get(); /* read from left to right */ for (size_t i = 0; i < cursor.len; ++i) { const uint8_t c = cursor.ptr[i]; const uint8_t cval = hex_to_num_table[c]; if (cval >= base) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } if (aws_mul_u64_checked(val, base, &val)) { return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED); } if (aws_add_u64_checked(val, cval, &val)) { return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED); } } *dst = val; return AWS_OP_SUCCESS; } int aws_byte_cursor_utf8_parse_u64(struct aws_byte_cursor cursor, uint64_t *dst) { return s_read_unsigned(cursor, dst, 10 /*base*/); } int aws_byte_cursor_utf8_parse_u64_hex(struct aws_byte_cursor cursor, uint64_t *dst) { return s_read_unsigned(cursor, dst, 16 /*base*/); } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/cache.c000066400000000000000000000036431456575232400232350ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include void aws_cache_destroy(struct aws_cache *cache) { AWS_PRECONDITION(cache); cache->vtable->destroy(cache); } int aws_cache_find(struct aws_cache *cache, const void *key, void **p_value) { AWS_PRECONDITION(cache); return cache->vtable->find(cache, key, p_value); } int aws_cache_put(struct aws_cache *cache, const void *key, void *p_value) { AWS_PRECONDITION(cache); return cache->vtable->put(cache, key, p_value); } int aws_cache_remove(struct aws_cache *cache, const void *key) { AWS_PRECONDITION(cache); return cache->vtable->remove(cache, key); } void aws_cache_clear(struct aws_cache *cache) { AWS_PRECONDITION(cache); cache->vtable->clear(cache); } size_t aws_cache_get_element_count(const struct aws_cache *cache) { AWS_PRECONDITION(cache); return cache->vtable->get_element_count(cache); } void aws_cache_base_default_destroy(struct aws_cache *cache) { aws_linked_hash_table_clean_up(&cache->table); aws_mem_release(cache->allocator, cache); } int aws_cache_base_default_find(struct aws_cache *cache, const void *key, void **p_value) { return (aws_linked_hash_table_find(&cache->table, key, p_value)); } int aws_cache_base_default_remove(struct aws_cache *cache, const void *key) { /* allocated cache memory and the linked list entry will be removed in the * callback. */ return aws_linked_hash_table_remove(&cache->table, key); } void aws_cache_base_default_clear(struct aws_cache *cache) { /* clearing the table will remove all elements. That will also deallocate * any cache entries we currently have. */ aws_linked_hash_table_clear(&cache->table); } size_t aws_cache_base_default_get_element_count(const struct aws_cache *cache) { return aws_linked_hash_table_get_element_count(&cache->table); } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/codegen.c000066400000000000000000000011551456575232400235720ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ /* * This file generates exportable implementations for inlineable functions. */ #define AWS_STATIC_IMPL AWS_COMMON_API #include #include #include #include #include #include #include #include #include #include #include aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/command_line_parser.c000066400000000000000000000103471456575232400261720ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include int aws_cli_optind = 1; int aws_cli_opterr = -1; int aws_cli_optopt = 0; bool aws_cli_on_arg = false; const char *aws_cli_optarg = NULL; const char *aws_cli_positional_arg = NULL; static const struct aws_cli_option *s_find_option_from_char( const struct aws_cli_option *longopts, char search_for, int *longindex) { int index = 0; const struct aws_cli_option *option = &longopts[index]; while (option->val != 0 || option->name) { if (option->val == search_for) { if (longindex) { *longindex = index; } return option; } option = &longopts[++index]; } return NULL; } AWS_COMMON_API void aws_cli_reset_state(void) { aws_cli_optind = 1; aws_cli_opterr = -1; aws_cli_optopt = 0; aws_cli_on_arg = false; aws_cli_optarg = NULL; aws_cli_positional_arg = NULL; } static const struct aws_cli_option *s_find_option_from_c_str( const struct aws_cli_option *longopts, const char *search_for, int *longindex) { int index = 0; const struct aws_cli_option *option = &longopts[index]; while (option->name || option->val != 0) { if (option->name && !strcmp(search_for, option->name)) { if (longindex) { *longindex = index; } return option; } option = &longopts[++index]; } return NULL; } int aws_cli_getopt_long( int argc, char *const argv[], const char *optstring, const struct aws_cli_option *longopts, int *longindex) { aws_cli_optarg = NULL; if (aws_cli_optind >= argc) { return -1; } char first_char = argv[aws_cli_optind][0]; char second_char = argv[aws_cli_optind][1]; char *option_start = NULL; const struct aws_cli_option *option = NULL; bool positional_arg_encountered = false; if (first_char == '-' && second_char != '-') { aws_cli_on_arg = true; positional_arg_encountered = false; option_start = &argv[aws_cli_optind][1]; option = s_find_option_from_char(longopts, *option_start, longindex); } else if (first_char == '-' && second_char == '-') { aws_cli_on_arg = true; positional_arg_encountered = false; option_start = &argv[aws_cli_optind][2]; option = s_find_option_from_c_str(longopts, option_start, longindex); } else { if (!aws_cli_on_arg) { aws_cli_positional_arg = argv[aws_cli_optind]; positional_arg_encountered = true; } else { aws_cli_on_arg = false; aws_cli_positional_arg = NULL; } } aws_cli_optind++; if (option) { bool has_arg = false; aws_cli_on_arg = false; aws_cli_positional_arg = NULL; char *opt_value = memchr(optstring, option->val, strlen(optstring) + 1); if (!opt_value) { return '?'; } if (opt_value[1] == ':') { has_arg = true; } if (has_arg) { if (aws_cli_optind >= argc) { return '?'; } aws_cli_optarg = argv[aws_cli_optind++]; } return option->val; } /* start of text to indicate we just have a text argument. */ return positional_arg_encountered ? 0x02 : '?'; } int aws_cli_dispatch_on_subcommand( int argc, char *const argv[], struct aws_cli_subcommand_dispatch *dispatch_table, int table_length, void *user_data) { if (argc >= 2) { struct aws_byte_cursor arg_name = aws_byte_cursor_from_c_str(argv[1]); for (int i = 0; i < table_length; ++i) { struct aws_byte_cursor cmd_name = aws_byte_cursor_from_c_str(dispatch_table[i].command_name); if (aws_byte_cursor_eq_ignore_case(&arg_name, &cmd_name)) { return dispatch_table[i].subcommand_fn(argc - 1, &argv[1], (const char *)arg_name.ptr, user_data); } } return aws_raise_error(AWS_ERROR_UNIMPLEMENTED); } return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/common.c000066400000000000000000000375231456575232400234660ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #ifdef _WIN32 # include #else # include #endif #ifdef __MACH__ # include #endif /* turn off unused named parameter warning on msvc.*/ #ifdef _MSC_VER # pragma warning(push) # pragma warning(disable : 4100) #endif long (*g_set_mempolicy_ptr)(int, const unsigned long *, unsigned long) = NULL; int (*g_numa_available_ptr)(void) = NULL; int (*g_numa_num_configured_nodes_ptr)(void) = NULL; int (*g_numa_num_possible_cpus_ptr)(void) = NULL; int (*g_numa_node_of_cpu_ptr)(int cpu) = NULL; void *g_libnuma_handle = NULL; void aws_secure_zero(void *pBuf, size_t bufsize) { /* don't pass NULL to memset(), it's undefined behavior */ if (pBuf == NULL || bufsize == 0) { AWS_ASSERT(bufsize == 0); /* if you believe your NULL buffer has a size, then you have issues */ return; } #if defined(_WIN32) SecureZeroMemory(pBuf, bufsize); #else /* We cannot use memset_s, even on a C11 compiler, because that would require * that __STDC_WANT_LIB_EXT1__ be defined before the _first_ inclusion of string.h. * * We'll try to work around this by using inline asm on GCC-like compilers, * and by exposing the buffer pointer in a volatile local pointer elsewhere. */ # if defined(__GNUC__) || defined(__clang__) memset(pBuf, 0, bufsize); /* This inline asm serves to convince the compiler that the buffer is (somehow) still * used after the zero, and therefore that the optimizer can't eliminate the memset. */ __asm__ __volatile__("" /* The asm doesn't actually do anything. */ : /* no outputs */ /* Tell the compiler that the asm code has access to the pointer to the buffer, * and therefore it might be reading the (now-zeroed) buffer. * Without this. clang/LLVM 9.0.0 optimizes away a memset of a stack buffer. */ : "r"(pBuf) /* Also clobber memory. While this seems like it might be unnecessary - after all, * it's enough that the asm might read the buffer, right? - in practice GCC 7.3.0 * seems to optimize a zero of a stack buffer without it. */ : "memory"); # else // not GCC/clang /* We don't have access to inline asm, since we're on a non-GCC platform. Move the pointer * through a volatile pointer in an attempt to confuse the optimizer. */ volatile void *pVolBuf = pBuf; memset(pVolBuf, 0, bufsize); # endif // #else not GCC/clang #endif // #else not windows } #define AWS_DEFINE_ERROR_INFO_COMMON(C, ES) [(C)-0x0000] = AWS_DEFINE_ERROR_INFO(C, ES, "aws-c-common") /* clang-format off */ static struct aws_error_info errors[] = { AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_SUCCESS, "Success."), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_OOM, "Out of memory."), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_NO_SPACE, "Out of space on disk."), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_UNKNOWN, "Unknown error."), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_SHORT_BUFFER, "Buffer is not large enough to hold result."), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_OVERFLOW_DETECTED, "Fixed size value overflow was detected."), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_UNSUPPORTED_OPERATION, "Unsupported operation."), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_INVALID_BUFFER_SIZE, "Invalid buffer size."), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_INVALID_HEX_STR, "Invalid hex string."), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_INVALID_BASE64_STR, "Invalid base64 string."), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_INVALID_INDEX, "Invalid index for list access."), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_THREAD_INVALID_SETTINGS, "Invalid thread settings."), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_THREAD_INSUFFICIENT_RESOURCE, "Insufficent resources for thread."), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_THREAD_NO_PERMISSIONS, "Insufficient permissions for thread operation."), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_THREAD_NOT_JOINABLE, "Thread not joinable."), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_THREAD_NO_SUCH_THREAD_ID, "No such thread ID."), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_THREAD_DEADLOCK_DETECTED, "Deadlock detected in thread."), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_MUTEX_NOT_INIT, "Mutex not initialized."), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_MUTEX_TIMEOUT, "Mutex operation timed out."), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_MUTEX_CALLER_NOT_OWNER, "The caller of a mutex operation was not the owner."), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_MUTEX_FAILED, "Mutex operation failed."), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_COND_VARIABLE_INIT_FAILED, "Condition variable initialization failed."), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_COND_VARIABLE_TIMED_OUT, "Condition variable wait timed out."), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_COND_VARIABLE_ERROR_UNKNOWN, "Condition variable unknown error."), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_CLOCK_FAILURE, "Clock operation failed."), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_LIST_EMPTY, "Empty list."), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_DEST_COPY_TOO_SMALL, "Destination of copy is too small."), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_LIST_EXCEEDS_MAX_SIZE, "A requested operation on a list would exceed it's max size."), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_LIST_STATIC_MODE_CANT_SHRINK, "Attempt to shrink a list in static mode."), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_PRIORITY_QUEUE_FULL, "Attempt to add items to a full preallocated queue in static mode."), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_PRIORITY_QUEUE_EMPTY, "Attempt to pop an item from an empty queue."), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_PRIORITY_QUEUE_BAD_NODE, "Bad node handle passed to remove."), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_HASHTBL_ITEM_NOT_FOUND, "Item not found in hash table."), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_INVALID_DATE_STR, "Date string is invalid and cannot be parsed." ), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_INVALID_ARGUMENT, "An invalid argument was passed to a function." ), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_RANDOM_GEN_FAILED, "A call to the random number generator failed. Retry later." ), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_MALFORMED_INPUT_STRING, "An input string was passed to a parser and the string was incorrectly formatted." ), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_UNIMPLEMENTED, "A function was called, but is not implemented." ), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_INVALID_STATE, "An invalid state was encountered." ), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_ENVIRONMENT_GET, "System call failure when getting an environment variable." ), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_ENVIRONMENT_SET, "System call failure when setting an environment variable." ), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_ENVIRONMENT_UNSET, "System call failure when unsetting an environment variable." ), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_SYS_CALL_FAILURE, "System call failure."), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_FILE_INVALID_PATH, "Invalid file path."), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_MAX_FDS_EXCEEDED, "The maximum number of fds has been exceeded."), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_NO_PERMISSION, "User does not have permission to perform the requested action."), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_STREAM_UNSEEKABLE, "Stream does not support seek operations."), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_C_STRING_BUFFER_NOT_NULL_TERMINATED, "A c-string like buffer was passed but a null terminator was not found within the bounds of the buffer."), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_STRING_MATCH_NOT_FOUND, "The specified substring was not present in the input string."), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_DIVIDE_BY_ZERO, "Attempt to divide a number by zero."), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_INVALID_FILE_HANDLE, "Invalid file handle."), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_OPERATION_INTERUPTED, "The operation was interrupted." ), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_DIRECTORY_NOT_EMPTY, "An operation on a directory was attempted which is not allowed when the directory is not empty." ), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_PLATFORM_NOT_SUPPORTED, "Feature not supported on this platform."), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_INVALID_UTF8, "Invalid UTF-8."), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_GET_HOME_DIRECTORY_FAILED, "Failed to get home directory."), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_INVALID_XML, "Invalid XML document."), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_FILE_OPEN_FAILURE, "Failed opening file."), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_FILE_READ_FAILURE, "Failed reading from file."), AWS_DEFINE_ERROR_INFO_COMMON( AWS_ERROR_FILE_WRITE_FAILURE, "Failed writing to file."), }; /* clang-format on */ static struct aws_error_info_list s_list = { .error_list = errors, .count = AWS_ARRAY_SIZE(errors), }; static struct aws_log_subject_info s_common_log_subject_infos[] = { DEFINE_LOG_SUBJECT_INFO( AWS_LS_COMMON_GENERAL, "aws-c-common", "Subject for aws-c-common logging that doesn't belong to any particular category"), DEFINE_LOG_SUBJECT_INFO( AWS_LS_COMMON_TASK_SCHEDULER, "task-scheduler", "Subject for task scheduler or task specific logging."), DEFINE_LOG_SUBJECT_INFO(AWS_LS_COMMON_THREAD, "thread", "Subject for logging thread related functions."), DEFINE_LOG_SUBJECT_INFO(AWS_LS_COMMON_MEMTRACE, "memtrace", "Output from the aws_mem_trace_dump function"), DEFINE_LOG_SUBJECT_INFO(AWS_LS_COMMON_XML_PARSER, "xml-parser", "Subject for xml parser specific logging."), DEFINE_LOG_SUBJECT_INFO(AWS_LS_COMMON_IO, "common-io", "Common IO utilities"), DEFINE_LOG_SUBJECT_INFO(AWS_LS_COMMON_BUS, "bus", "Message bus"), DEFINE_LOG_SUBJECT_INFO(AWS_LS_COMMON_TEST, "test", "Unit/integration testing"), }; static struct aws_log_subject_info_list s_common_log_subject_list = { .subject_list = s_common_log_subject_infos, .count = AWS_ARRAY_SIZE(s_common_log_subject_infos), }; static bool s_common_library_initialized = false; void aws_common_library_init(struct aws_allocator *allocator) { (void)allocator; if (!s_common_library_initialized) { s_common_library_initialized = true; aws_register_error_info(&s_list); aws_register_log_subject_info_list(&s_common_log_subject_list); aws_thread_initialize_thread_management(); aws_json_module_init(allocator); /* NUMA is funky and we can't rely on libnuma.so being available. We also don't want to take a hard dependency on it, * try and load it if we can. */ #ifdef AWS_OS_LINUX /* libnuma defines set_mempolicy() as a WEAK symbol. Loading into the global symbol table overwrites symbols and assumptions due to the way loaders and dlload are often implemented and those symbols are defined by things like libpthread.so on some unix distros. Sorry about the memory usage here, but it's our only safe choice. Also, please don't do numa configurations if memory is your economic bottleneck. */ g_libnuma_handle = dlopen("libnuma.so", RTLD_LAZY | RTLD_LOCAL); /* turns out so versioning is really inconsistent these days */ if (!g_libnuma_handle) { g_libnuma_handle = dlopen("libnuma.so.1", RTLD_LAZY | RTLD_LOCAL); } if (!g_libnuma_handle) { g_libnuma_handle = dlopen("libnuma.so.2", RTLD_LAZY | RTLD_LOCAL); } if (g_libnuma_handle) { AWS_LOGF_INFO(AWS_LS_COMMON_GENERAL, "static: libnuma.so loaded"); *(void **)(&g_set_mempolicy_ptr) = dlsym(g_libnuma_handle, "set_mempolicy"); if (g_set_mempolicy_ptr) { AWS_LOGF_INFO(AWS_LS_COMMON_GENERAL, "static: set_mempolicy() loaded"); } else { AWS_LOGF_INFO(AWS_LS_COMMON_GENERAL, "static: set_mempolicy() failed to load"); } *(void **)(&g_numa_available_ptr) = dlsym(g_libnuma_handle, "numa_available"); if (g_numa_available_ptr) { AWS_LOGF_INFO(AWS_LS_COMMON_GENERAL, "static: numa_available() loaded"); } else { AWS_LOGF_INFO(AWS_LS_COMMON_GENERAL, "static: numa_available() failed to load"); } *(void **)(&g_numa_num_configured_nodes_ptr) = dlsym(g_libnuma_handle, "numa_num_configured_nodes"); if (g_numa_num_configured_nodes_ptr) { AWS_LOGF_INFO(AWS_LS_COMMON_GENERAL, "static: numa_num_configured_nodes() loaded"); } else { AWS_LOGF_INFO(AWS_LS_COMMON_GENERAL, "static: numa_num_configured_nodes() failed to load"); } *(void **)(&g_numa_num_possible_cpus_ptr) = dlsym(g_libnuma_handle, "numa_num_possible_cpus"); if (g_numa_num_possible_cpus_ptr) { AWS_LOGF_INFO(AWS_LS_COMMON_GENERAL, "static: numa_num_possible_cpus() loaded"); } else { AWS_LOGF_INFO(AWS_LS_COMMON_GENERAL, "static: numa_num_possible_cpus() failed to load"); } *(void **)(&g_numa_node_of_cpu_ptr) = dlsym(g_libnuma_handle, "numa_node_of_cpu"); if (g_numa_node_of_cpu_ptr) { AWS_LOGF_INFO(AWS_LS_COMMON_GENERAL, "static: numa_node_of_cpu() loaded"); } else { AWS_LOGF_INFO(AWS_LS_COMMON_GENERAL, "static: numa_node_of_cpu() failed to load"); } } else { AWS_LOGF_INFO(AWS_LS_COMMON_GENERAL, "static: libnuma.so failed to load"); } #endif } } void aws_common_library_clean_up(void) { if (s_common_library_initialized) { s_common_library_initialized = false; aws_thread_join_all_managed(); aws_unregister_error_info(&s_list); aws_unregister_log_subject_info_list(&s_common_log_subject_list); aws_json_module_cleanup(); #ifdef AWS_OS_LINUX if (g_libnuma_handle) { dlclose(g_libnuma_handle); } #endif } } void aws_common_fatal_assert_library_initialized(void) { if (!s_common_library_initialized) { fprintf( stderr, "%s", "aws_common_library_init() must be called before using any functionality in aws-c-common."); AWS_FATAL_ASSERT(s_common_library_initialized); } } #ifdef _MSC_VER # pragma warning(pop) #endif aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/condition_variable.c000066400000000000000000000016261456575232400260240ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include int aws_condition_variable_wait_pred( struct aws_condition_variable *condition_variable, struct aws_mutex *mutex, aws_condition_predicate_fn *pred, void *pred_ctx) { int err_code = 0; while (!err_code && !pred(pred_ctx)) { err_code = aws_condition_variable_wait(condition_variable, mutex); } return err_code; } int aws_condition_variable_wait_for_pred( struct aws_condition_variable *condition_variable, struct aws_mutex *mutex, int64_t time_to_wait, aws_condition_predicate_fn *pred, void *pred_ctx) { int err_code = 0; while (!err_code && !pred(pred_ctx)) { err_code = aws_condition_variable_wait_for(condition_variable, mutex, time_to_wait); } return err_code; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/date_time.c000066400000000000000000000650371456575232400241320ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include static const char *RFC822_DATE_FORMAT_STR_MINUS_Z = "%a, %d %b %Y %H:%M:%S GMT"; static const char *RFC822_DATE_FORMAT_STR_WITH_Z = "%a, %d %b %Y %H:%M:%S %Z"; static const char *RFC822_SHORT_DATE_FORMAT_STR = "%a, %d %b %Y"; static const char *ISO_8601_LONG_DATE_FORMAT_STR = "%Y-%m-%dT%H:%M:%SZ"; static const char *ISO_8601_SHORT_DATE_FORMAT_STR = "%Y-%m-%d"; static const char *ISO_8601_LONG_BASIC_DATE_FORMAT_STR = "%Y%m%dT%H%M%SZ"; static const char *ISO_8601_SHORT_BASIC_DATE_FORMAT_STR = "%Y%m%d"; #define STR_TRIPLET_TO_INDEX(str) \ (((uint32_t)tolower((uint8_t)((str)[0])) << 0) | ((uint32_t)tolower((uint8_t)((str)[1])) << 8) | \ ((uint32_t)tolower((uint8_t)((str)[2])) << 16)) static uint32_t s_jan = 0; static uint32_t s_feb = 0; static uint32_t s_mar = 0; static uint32_t s_apr = 0; static uint32_t s_may = 0; static uint32_t s_jun = 0; static uint32_t s_jul = 0; static uint32_t s_aug = 0; static uint32_t s_sep = 0; static uint32_t s_oct = 0; static uint32_t s_nov = 0; static uint32_t s_dec = 0; static uint32_t s_utc = 0; static uint32_t s_gmt = 0; static void s_check_init_str_to_int(void) { if (!s_jan) { s_jan = STR_TRIPLET_TO_INDEX("jan"); s_feb = STR_TRIPLET_TO_INDEX("feb"); s_mar = STR_TRIPLET_TO_INDEX("mar"); s_apr = STR_TRIPLET_TO_INDEX("apr"); s_may = STR_TRIPLET_TO_INDEX("may"); s_jun = STR_TRIPLET_TO_INDEX("jun"); s_jul = STR_TRIPLET_TO_INDEX("jul"); s_aug = STR_TRIPLET_TO_INDEX("aug"); s_sep = STR_TRIPLET_TO_INDEX("sep"); s_oct = STR_TRIPLET_TO_INDEX("oct"); s_nov = STR_TRIPLET_TO_INDEX("nov"); s_dec = STR_TRIPLET_TO_INDEX("dec"); s_utc = STR_TRIPLET_TO_INDEX("utc"); s_gmt = STR_TRIPLET_TO_INDEX("gmt"); } } /* Get the 0-11 monthly number from a string representing Month. Case insensitive and will stop on abbreviation*/ static int get_month_number_from_str(const char *time_string, size_t start_index, size_t stop_index) { s_check_init_str_to_int(); if (stop_index - start_index < 3) { return -1; } /* This AND forces the string to lowercase (assuming ASCII) */ uint32_t comp_val = STR_TRIPLET_TO_INDEX(time_string + start_index); /* this can't be a switch, because I can't make it a constant expression. */ if (s_jan == comp_val) { return 0; } if (s_feb == comp_val) { return 1; } if (s_mar == comp_val) { return 2; } if (s_apr == comp_val) { return 3; } if (s_may == comp_val) { return 4; } if (s_jun == comp_val) { return 5; } if (s_jul == comp_val) { return 6; } if (s_aug == comp_val) { return 7; } if (s_sep == comp_val) { return 8; } if (s_oct == comp_val) { return 9; } if (s_nov == comp_val) { return 10; } if (s_dec == comp_val) { return 11; } return -1; } /* Detects whether or not the passed in timezone string is a UTC zone. */ static bool is_utc_time_zone(const char *str) { s_check_init_str_to_int(); size_t len = strlen(str); if (len > 0) { if (str[0] == 'Z') { return true; } /* offsets count since their usable */ if (len == 5 && (str[0] == '+' || str[0] == '-')) { return true; } if (len == 2) { return tolower((uint8_t)str[0]) == 'u' && tolower((uint8_t)str[1]) == 't'; } if (len < 3) { return false; } uint32_t comp_val = STR_TRIPLET_TO_INDEX(str); if (comp_val == s_utc || comp_val == s_gmt) { return true; } } return false; } struct tm s_get_time_struct(struct aws_date_time *dt, bool local_time) { struct tm time; AWS_ZERO_STRUCT(time); if (local_time) { aws_localtime(dt->timestamp, &time); } else { aws_gmtime(dt->timestamp, &time); } return time; } void aws_date_time_init_now(struct aws_date_time *dt) { uint64_t current_time_ns = 0; aws_sys_clock_get_ticks(¤t_time_ns); aws_date_time_init_epoch_millis( dt, aws_timestamp_convert(current_time_ns, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_MILLIS, NULL)); } void aws_date_time_init_epoch_millis(struct aws_date_time *dt, uint64_t ms_since_epoch) { uint64_t milliseconds = 0; dt->timestamp = (time_t)aws_timestamp_convert(ms_since_epoch, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_SECS, &milliseconds); dt->milliseconds = (uint16_t)milliseconds; dt->gmt_time = s_get_time_struct(dt, false); dt->local_time = s_get_time_struct(dt, true); } void aws_date_time_init_epoch_secs(struct aws_date_time *dt, double sec_ms) { double integral = 0; dt->milliseconds = (uint16_t)(round(modf(sec_ms, &integral) * AWS_TIMESTAMP_MILLIS)); dt->timestamp = (time_t)integral; dt->gmt_time = s_get_time_struct(dt, false); dt->local_time = s_get_time_struct(dt, true); } enum parser_state { ON_WEEKDAY, ON_SPACE_DELIM, ON_YEAR, ON_MONTH, ON_MONTH_DAY, ON_HOUR, ON_MINUTE, ON_SECOND, ON_TZ, FINISHED, }; static int s_parse_iso_8601_basic(const struct aws_byte_cursor *date_str_cursor, struct tm *parsed_time) { size_t index = 0; size_t state_start_index = 0; enum parser_state state = ON_YEAR; bool error = false; AWS_ZERO_STRUCT(*parsed_time); while (state < FINISHED && !error && index < date_str_cursor->len) { char c = (char)date_str_cursor->ptr[index]; size_t sub_index = index - state_start_index; switch (state) { case ON_YEAR: if (aws_isdigit(c)) { parsed_time->tm_year = parsed_time->tm_year * 10 + (c - '0'); if (sub_index == 3) { state = ON_MONTH; state_start_index = index + 1; parsed_time->tm_year -= 1900; } } else { error = true; } break; case ON_MONTH: if (aws_isdigit(c)) { parsed_time->tm_mon = parsed_time->tm_mon * 10 + (c - '0'); if (sub_index == 1) { state = ON_MONTH_DAY; state_start_index = index + 1; parsed_time->tm_mon -= 1; } } else { error = true; } break; case ON_MONTH_DAY: if (c == 'T' && sub_index == 2) { state = ON_HOUR; state_start_index = index + 1; } else if (aws_isdigit(c)) { parsed_time->tm_mday = parsed_time->tm_mday * 10 + (c - '0'); } else { error = true; } break; case ON_HOUR: if (aws_isdigit(c)) { parsed_time->tm_hour = parsed_time->tm_hour * 10 + (c - '0'); if (sub_index == 1) { state = ON_MINUTE; state_start_index = index + 1; } } else { error = true; } break; case ON_MINUTE: if (aws_isdigit(c)) { parsed_time->tm_min = parsed_time->tm_min * 10 + (c - '0'); if (sub_index == 1) { state = ON_SECOND; state_start_index = index + 1; } } else { error = true; } break; case ON_SECOND: if (aws_isdigit(c)) { parsed_time->tm_sec = parsed_time->tm_sec * 10 + (c - '0'); if (sub_index == 1) { state = ON_TZ; state_start_index = index + 1; } } else { error = true; } break; case ON_TZ: if (c == 'Z' && (sub_index == 0 || sub_index == 3)) { state = FINISHED; } else if (!aws_isdigit(c) || sub_index > 3) { error = true; } break; default: error = true; break; } index++; } /* ISO8601 supports date only with no time portion. state ==ON_MONTH_DAY catches this case. */ return (state == FINISHED || state == ON_MONTH_DAY) && !error ? AWS_OP_SUCCESS : AWS_OP_ERR; } static int s_parse_iso_8601(const struct aws_byte_cursor *date_str_cursor, struct tm *parsed_time) { size_t index = 0; size_t state_start_index = 0; enum parser_state state = ON_YEAR; bool error = false; bool advance = true; AWS_ZERO_STRUCT(*parsed_time); while (state < FINISHED && !error && index < date_str_cursor->len) { char c = (char)date_str_cursor->ptr[index]; switch (state) { case ON_YEAR: if (c == '-' && index - state_start_index == 4) { state = ON_MONTH; state_start_index = index + 1; parsed_time->tm_year -= 1900; } else if (aws_isdigit(c)) { parsed_time->tm_year = parsed_time->tm_year * 10 + (c - '0'); } else { error = true; } break; case ON_MONTH: if (c == '-' && index - state_start_index == 2) { state = ON_MONTH_DAY; state_start_index = index + 1; parsed_time->tm_mon -= 1; } else if (aws_isdigit(c)) { parsed_time->tm_mon = parsed_time->tm_mon * 10 + (c - '0'); } else { error = true; } break; case ON_MONTH_DAY: if (c == 'T' && index - state_start_index == 2) { state = ON_HOUR; state_start_index = index + 1; } else if (aws_isdigit(c)) { parsed_time->tm_mday = parsed_time->tm_mday * 10 + (c - '0'); } else { error = true; } break; /* note: no time portion is spec compliant. */ case ON_HOUR: /* time parts can be delimited by ':' or just concatenated together, but must always be 2 digits. */ if (index - state_start_index == 2) { state = ON_MINUTE; state_start_index = index + 1; if (aws_isdigit(c)) { state_start_index = index; advance = false; } else if (c != ':') { error = true; } } else if (aws_isdigit(c)) { parsed_time->tm_hour = parsed_time->tm_hour * 10 + (c - '0'); } else { error = true; } break; case ON_MINUTE: /* time parts can be delimited by ':' or just concatenated together, but must always be 2 digits. */ if (index - state_start_index == 2) { state = ON_SECOND; state_start_index = index + 1; if (aws_isdigit(c)) { state_start_index = index; advance = false; } else if (c != ':') { error = true; } } else if (aws_isdigit(c)) { parsed_time->tm_min = parsed_time->tm_min * 10 + (c - '0'); } else { error = true; } break; case ON_SECOND: if (c == 'Z' && index - state_start_index == 2) { state = FINISHED; state_start_index = index + 1; } else if (c == '.' && index - state_start_index == 2) { state = ON_TZ; state_start_index = index + 1; } else if (aws_isdigit(c)) { parsed_time->tm_sec = parsed_time->tm_sec * 10 + (c - '0'); } else { error = true; } break; case ON_TZ: if (c == 'Z') { state = FINISHED; state_start_index = index + 1; } else if (!aws_isdigit(c)) { error = true; } break; default: error = true; break; } if (advance) { index++; } else { advance = true; } } /* ISO8601 supports date only with no time portion. state ==ON_MONTH_DAY catches this case. */ return (state == FINISHED || state == ON_MONTH_DAY) && !error ? AWS_OP_SUCCESS : AWS_OP_ERR; } static int s_parse_rfc_822( const struct aws_byte_cursor *date_str_cursor, struct tm *parsed_time, struct aws_date_time *dt) { size_t len = date_str_cursor->len; size_t index = 0; size_t state_start_index = 0; int state = ON_WEEKDAY; bool error = false; AWS_ZERO_STRUCT(*parsed_time); while (!error && index < len) { char c = (char)date_str_cursor->ptr[index]; switch (state) { /* week day abbr is optional. */ case ON_WEEKDAY: if (c == ',') { state = ON_SPACE_DELIM; state_start_index = index + 1; } else if (aws_isdigit(c)) { state = ON_MONTH_DAY; } else if (!aws_isalpha(c)) { error = true; } break; case ON_SPACE_DELIM: if (aws_isspace(c)) { state = ON_MONTH_DAY; state_start_index = index + 1; } else { error = true; } break; case ON_MONTH_DAY: if (aws_isdigit(c)) { parsed_time->tm_mday = parsed_time->tm_mday * 10 + (c - '0'); } else if (aws_isspace(c)) { state = ON_MONTH; state_start_index = index + 1; } else { error = true; } break; case ON_MONTH: if (aws_isspace(c)) { int monthNumber = get_month_number_from_str((const char *)date_str_cursor->ptr, state_start_index, index + 1); if (monthNumber > -1) { state = ON_YEAR; state_start_index = index + 1; parsed_time->tm_mon = monthNumber; } else { error = true; } } else if (!aws_isalpha(c)) { error = true; } break; /* year can be 4 or 2 digits. */ case ON_YEAR: if (aws_isspace(c) && index - state_start_index == 4) { state = ON_HOUR; state_start_index = index + 1; parsed_time->tm_year -= 1900; } else if (aws_isspace(c) && index - state_start_index == 2) { state = 5; state_start_index = index + 1; parsed_time->tm_year += 2000 - 1900; } else if (aws_isdigit(c)) { parsed_time->tm_year = parsed_time->tm_year * 10 + (c - '0'); } else { error = true; } break; case ON_HOUR: if (c == ':' && index - state_start_index == 2) { state = ON_MINUTE; state_start_index = index + 1; } else if (aws_isdigit(c)) { parsed_time->tm_hour = parsed_time->tm_hour * 10 + (c - '0'); } else { error = true; } break; case ON_MINUTE: if (c == ':' && index - state_start_index == 2) { state = ON_SECOND; state_start_index = index + 1; } else if (aws_isdigit(c)) { parsed_time->tm_min = parsed_time->tm_min * 10 + (c - '0'); } else { error = true; } break; case ON_SECOND: if (aws_isspace(c) && index - state_start_index == 2) { state = ON_TZ; state_start_index = index + 1; } else if (aws_isdigit(c)) { parsed_time->tm_sec = parsed_time->tm_sec * 10 + (c - '0'); } else { error = true; } break; case ON_TZ: if ((aws_isalnum(c) || c == '-' || c == '+') && (index - state_start_index) < 5) { dt->tz[index - state_start_index] = c; } else { error = true; } break; default: error = true; break; } index++; } if (dt->tz[0] != 0) { if (is_utc_time_zone(dt->tz)) { dt->utc_assumed = true; } else { error = true; } } return error || state != ON_TZ ? AWS_OP_ERR : AWS_OP_SUCCESS; } int aws_date_time_init_from_str_cursor( struct aws_date_time *dt, const struct aws_byte_cursor *date_str_cursor, enum aws_date_format fmt) { AWS_ERROR_PRECONDITION(date_str_cursor->len <= AWS_DATE_TIME_STR_MAX_LEN, AWS_ERROR_OVERFLOW_DETECTED); AWS_ZERO_STRUCT(*dt); struct tm parsed_time; bool successfully_parsed = false; time_t seconds_offset = 0; if (fmt == AWS_DATE_FORMAT_ISO_8601 || fmt == AWS_DATE_FORMAT_AUTO_DETECT) { if (!s_parse_iso_8601(date_str_cursor, &parsed_time)) { dt->utc_assumed = true; successfully_parsed = true; } } if (fmt == AWS_DATE_FORMAT_ISO_8601_BASIC || (fmt == AWS_DATE_FORMAT_AUTO_DETECT && !successfully_parsed)) { if (!s_parse_iso_8601_basic(date_str_cursor, &parsed_time)) { dt->utc_assumed = true; successfully_parsed = true; } } if (fmt == AWS_DATE_FORMAT_RFC822 || (fmt == AWS_DATE_FORMAT_AUTO_DETECT && !successfully_parsed)) { if (!s_parse_rfc_822(date_str_cursor, &parsed_time, dt)) { successfully_parsed = true; if (dt->utc_assumed) { if (dt->tz[0] == '+' || dt->tz[0] == '-') { /* in this format, the offset is in format +/-HHMM so convert that to seconds and we'll use * the offset later. */ char min_str[3] = {0}; char hour_str[3] = {0}; hour_str[0] = dt->tz[1]; hour_str[1] = dt->tz[2]; min_str[0] = dt->tz[3]; min_str[1] = dt->tz[4]; long hour = strtol(hour_str, NULL, 10); long min = strtol(min_str, NULL, 10); seconds_offset = (time_t)(hour * 3600 + min * 60); if (dt->tz[0] == '-') { seconds_offset = -seconds_offset; } } } } } if (!successfully_parsed) { return aws_raise_error(AWS_ERROR_INVALID_DATE_STR); } if (dt->utc_assumed || seconds_offset) { dt->timestamp = aws_timegm(&parsed_time); } else { dt->timestamp = mktime(&parsed_time); } /* negative means we need to move west (increase the timestamp), positive means head east, so decrease the * timestamp. */ dt->timestamp -= seconds_offset; dt->milliseconds = 0U; dt->gmt_time = s_get_time_struct(dt, false); dt->local_time = s_get_time_struct(dt, true); return AWS_OP_SUCCESS; } int aws_date_time_init_from_str( struct aws_date_time *dt, const struct aws_byte_buf *date_str, enum aws_date_format fmt) { AWS_ERROR_PRECONDITION(date_str->len <= AWS_DATE_TIME_STR_MAX_LEN, AWS_ERROR_OVERFLOW_DETECTED); struct aws_byte_cursor date_cursor = aws_byte_cursor_from_buf(date_str); return aws_date_time_init_from_str_cursor(dt, &date_cursor, fmt); } static inline int s_date_to_str(const struct tm *tm, const char *format_str, struct aws_byte_buf *output_buf) { size_t remaining_space = output_buf->capacity - output_buf->len; size_t bytes_written = strftime((char *)output_buf->buffer + output_buf->len, remaining_space, format_str, tm); if (bytes_written == 0) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } output_buf->len += bytes_written; return AWS_OP_SUCCESS; } int aws_date_time_to_local_time_str( const struct aws_date_time *dt, enum aws_date_format fmt, struct aws_byte_buf *output_buf) { AWS_ASSERT(fmt != AWS_DATE_FORMAT_AUTO_DETECT); switch (fmt) { case AWS_DATE_FORMAT_RFC822: return s_date_to_str(&dt->local_time, RFC822_DATE_FORMAT_STR_WITH_Z, output_buf); case AWS_DATE_FORMAT_ISO_8601: return s_date_to_str(&dt->local_time, ISO_8601_LONG_DATE_FORMAT_STR, output_buf); case AWS_DATE_FORMAT_ISO_8601_BASIC: return s_date_to_str(&dt->local_time, ISO_8601_LONG_BASIC_DATE_FORMAT_STR, output_buf); default: return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } } int aws_date_time_to_utc_time_str( const struct aws_date_time *dt, enum aws_date_format fmt, struct aws_byte_buf *output_buf) { AWS_ASSERT(fmt != AWS_DATE_FORMAT_AUTO_DETECT); switch (fmt) { case AWS_DATE_FORMAT_RFC822: return s_date_to_str(&dt->gmt_time, RFC822_DATE_FORMAT_STR_MINUS_Z, output_buf); case AWS_DATE_FORMAT_ISO_8601: return s_date_to_str(&dt->gmt_time, ISO_8601_LONG_DATE_FORMAT_STR, output_buf); case AWS_DATE_FORMAT_ISO_8601_BASIC: return s_date_to_str(&dt->gmt_time, ISO_8601_LONG_BASIC_DATE_FORMAT_STR, output_buf); default: return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } } int aws_date_time_to_local_time_short_str( const struct aws_date_time *dt, enum aws_date_format fmt, struct aws_byte_buf *output_buf) { AWS_ASSERT(fmt != AWS_DATE_FORMAT_AUTO_DETECT); switch (fmt) { case AWS_DATE_FORMAT_RFC822: return s_date_to_str(&dt->local_time, RFC822_SHORT_DATE_FORMAT_STR, output_buf); case AWS_DATE_FORMAT_ISO_8601: return s_date_to_str(&dt->local_time, ISO_8601_SHORT_DATE_FORMAT_STR, output_buf); case AWS_DATE_FORMAT_ISO_8601_BASIC: return s_date_to_str(&dt->local_time, ISO_8601_SHORT_BASIC_DATE_FORMAT_STR, output_buf); default: return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } } int aws_date_time_to_utc_time_short_str( const struct aws_date_time *dt, enum aws_date_format fmt, struct aws_byte_buf *output_buf) { AWS_ASSERT(fmt != AWS_DATE_FORMAT_AUTO_DETECT); switch (fmt) { case AWS_DATE_FORMAT_RFC822: return s_date_to_str(&dt->gmt_time, RFC822_SHORT_DATE_FORMAT_STR, output_buf); case AWS_DATE_FORMAT_ISO_8601: return s_date_to_str(&dt->gmt_time, ISO_8601_SHORT_DATE_FORMAT_STR, output_buf); case AWS_DATE_FORMAT_ISO_8601_BASIC: return s_date_to_str(&dt->gmt_time, ISO_8601_SHORT_BASIC_DATE_FORMAT_STR, output_buf); default: return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } } double aws_date_time_as_epoch_secs(const struct aws_date_time *dt) { return (double)dt->timestamp + (double)(dt->milliseconds / 1000.0); } uint64_t aws_date_time_as_nanos(const struct aws_date_time *dt) { return aws_timestamp_convert((uint64_t)dt->timestamp, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL) + aws_timestamp_convert((uint64_t)dt->milliseconds, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL); } uint64_t aws_date_time_as_millis(const struct aws_date_time *dt) { return aws_timestamp_convert((uint64_t)dt->timestamp, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_MILLIS, NULL) + (uint64_t)dt->milliseconds; } uint16_t aws_date_time_year(const struct aws_date_time *dt, bool local_time) { const struct tm *time = local_time ? &dt->local_time : &dt->gmt_time; return (uint16_t)(time->tm_year + 1900); } enum aws_date_month aws_date_time_month(const struct aws_date_time *dt, bool local_time) { const struct tm *time = local_time ? &dt->local_time : &dt->gmt_time; return time->tm_mon; } uint8_t aws_date_time_month_day(const struct aws_date_time *dt, bool local_time) { const struct tm *time = local_time ? &dt->local_time : &dt->gmt_time; return (uint8_t)time->tm_mday; } enum aws_date_day_of_week aws_date_time_day_of_week(const struct aws_date_time *dt, bool local_time) { const struct tm *time = local_time ? &dt->local_time : &dt->gmt_time; return time->tm_wday; } uint8_t aws_date_time_hour(const struct aws_date_time *dt, bool local_time) { const struct tm *time = local_time ? &dt->local_time : &dt->gmt_time; return (uint8_t)time->tm_hour; } uint8_t aws_date_time_minute(const struct aws_date_time *dt, bool local_time) { const struct tm *time = local_time ? &dt->local_time : &dt->gmt_time; return (uint8_t)time->tm_min; } uint8_t aws_date_time_second(const struct aws_date_time *dt, bool local_time) { const struct tm *time = local_time ? &dt->local_time : &dt->gmt_time; return (uint8_t)time->tm_sec; } bool aws_date_time_dst(const struct aws_date_time *dt, bool local_time) { const struct tm *time = local_time ? &dt->local_time : &dt->gmt_time; return (bool)time->tm_isdst; } time_t aws_date_time_diff(const struct aws_date_time *a, const struct aws_date_time *b) { return a->timestamp - b->timestamp; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/device_random.c000066400000000000000000000021461456575232400247660ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #ifdef _MSC_VER /* disables warning non const declared initializers for Microsoft compilers */ # pragma warning(disable : 4204) # pragma warning(disable : 4706) #endif int aws_device_random_u64(uint64_t *output) { struct aws_byte_buf buf = aws_byte_buf_from_empty_array((uint8_t *)output, sizeof(uint64_t)); return aws_device_random_buffer(&buf); } int aws_device_random_u32(uint32_t *output) { struct aws_byte_buf buf = aws_byte_buf_from_empty_array((uint8_t *)output, sizeof(uint32_t)); return aws_device_random_buffer(&buf); } int aws_device_random_u16(uint16_t *output) { struct aws_byte_buf buf = aws_byte_buf_from_empty_array((uint8_t *)output, sizeof(uint16_t)); return aws_device_random_buffer(&buf); } int aws_device_random_u8(uint8_t *output) { struct aws_byte_buf buf = aws_byte_buf_from_empty_array((uint8_t *)output, sizeof(uint8_t)); return aws_device_random_buffer(&buf); } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/encoding.c000066400000000000000000000462701456575232400237630ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #ifdef USE_SIMD_ENCODING size_t aws_common_private_base64_decode_sse41(const unsigned char *in, unsigned char *out, size_t len); void aws_common_private_base64_encode_sse41(const unsigned char *in, unsigned char *out, size_t len); bool aws_common_private_has_avx2(void); #else /* * When AVX2 compilation is unavailable, we use these stubs to fall back to the pure-C decoder. * Since we force aws_common_private_has_avx2 to return false, the encode and decode functions should * not be called - but we must provide them anyway to avoid link errors. */ static inline size_t aws_common_private_base64_decode_sse41(const unsigned char *in, unsigned char *out, size_t len) { (void)in; (void)out; (void)len; AWS_ASSERT(false); return (size_t)-1; /* unreachable */ } static inline void aws_common_private_base64_encode_sse41(const unsigned char *in, unsigned char *out, size_t len) { (void)in; (void)out; (void)len; AWS_ASSERT(false); } static inline bool aws_common_private_has_avx2(void) { return false; } #endif static const uint8_t *HEX_CHARS = (const uint8_t *)"0123456789abcdef"; static const uint8_t BASE64_SENTINEL_VALUE = 0xff; static const uint8_t BASE64_ENCODING_TABLE[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; /* in this table, 0xDD is an invalid decoded value, if you have to do byte counting for any reason, there's 16 bytes * per row. Reformatting is turned off to make sure this stays as 16 bytes per line. */ /* clang-format off */ static const uint8_t BASE64_DECODING_TABLE[256] = { 64, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 62, 0xDD, 0xDD, 0xDD, 63, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 0xDD, 0xDD, 0xDD, 255, 0xDD, 0xDD, 0xDD, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD}; /* clang-format on */ int aws_hex_compute_encoded_len(size_t to_encode_len, size_t *encoded_length) { AWS_ASSERT(encoded_length); size_t temp = (to_encode_len << 1) + 1; if (AWS_UNLIKELY(temp < to_encode_len)) { return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED); } *encoded_length = temp; return AWS_OP_SUCCESS; } int aws_hex_encode(const struct aws_byte_cursor *AWS_RESTRICT to_encode, struct aws_byte_buf *AWS_RESTRICT output) { AWS_PRECONDITION(aws_byte_cursor_is_valid(to_encode)); AWS_PRECONDITION(aws_byte_buf_is_valid(output)); size_t encoded_len = 0; if (AWS_UNLIKELY(aws_hex_compute_encoded_len(to_encode->len, &encoded_len))) { return AWS_OP_ERR; } if (AWS_UNLIKELY(output->capacity < encoded_len)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } size_t written = 0; for (size_t i = 0; i < to_encode->len; ++i) { output->buffer[written++] = HEX_CHARS[to_encode->ptr[i] >> 4 & 0x0f]; output->buffer[written++] = HEX_CHARS[to_encode->ptr[i] & 0x0f]; } output->buffer[written] = '\0'; output->len = encoded_len; return AWS_OP_SUCCESS; } int aws_hex_encode_append_dynamic( const struct aws_byte_cursor *AWS_RESTRICT to_encode, struct aws_byte_buf *AWS_RESTRICT output) { AWS_ASSERT(to_encode->ptr); AWS_ASSERT(aws_byte_buf_is_valid(output)); size_t encoded_len = 0; if (AWS_UNLIKELY(aws_add_size_checked(to_encode->len, to_encode->len, &encoded_len))) { return AWS_OP_ERR; } if (AWS_UNLIKELY(aws_byte_buf_reserve_relative(output, encoded_len))) { return AWS_OP_ERR; } size_t written = output->len; for (size_t i = 0; i < to_encode->len; ++i) { output->buffer[written++] = HEX_CHARS[to_encode->ptr[i] >> 4 & 0x0f]; output->buffer[written++] = HEX_CHARS[to_encode->ptr[i] & 0x0f]; } output->len += encoded_len; return AWS_OP_SUCCESS; } static int s_hex_decode_char_to_int(char character, uint8_t *int_val) { if (character >= 'a' && character <= 'f') { *int_val = (uint8_t)(10 + (character - 'a')); return 0; } if (character >= 'A' && character <= 'F') { *int_val = (uint8_t)(10 + (character - 'A')); return 0; } if (character >= '0' && character <= '9') { *int_val = (uint8_t)(character - '0'); return 0; } return AWS_OP_ERR; } int aws_hex_compute_decoded_len(size_t to_decode_len, size_t *decoded_len) { AWS_ASSERT(decoded_len); size_t temp = (to_decode_len + 1); if (AWS_UNLIKELY(temp < to_decode_len)) { return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED); } *decoded_len = temp >> 1; return AWS_OP_SUCCESS; } int aws_hex_decode(const struct aws_byte_cursor *AWS_RESTRICT to_decode, struct aws_byte_buf *AWS_RESTRICT output) { AWS_PRECONDITION(aws_byte_cursor_is_valid(to_decode)); AWS_PRECONDITION(aws_byte_buf_is_valid(output)); size_t decoded_length = 0; if (AWS_UNLIKELY(aws_hex_compute_decoded_len(to_decode->len, &decoded_length))) { return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED); } if (AWS_UNLIKELY(output->capacity < decoded_length)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } size_t written = 0; size_t i = 0; uint8_t high_value = 0; uint8_t low_value = 0; /* if the buffer isn't even, prepend a 0 to the buffer. */ if (AWS_UNLIKELY(to_decode->len & 0x01)) { i = 1; if (s_hex_decode_char_to_int((char)to_decode->ptr[0], &low_value)) { return aws_raise_error(AWS_ERROR_INVALID_HEX_STR); } output->buffer[written++] = low_value; } for (; i < to_decode->len; i += 2) { if (AWS_UNLIKELY( s_hex_decode_char_to_int(to_decode->ptr[i], &high_value) || s_hex_decode_char_to_int(to_decode->ptr[i + 1], &low_value))) { return aws_raise_error(AWS_ERROR_INVALID_HEX_STR); } uint8_t value = (uint8_t)(high_value << 4); value |= low_value; output->buffer[written++] = value; } output->len = decoded_length; return AWS_OP_SUCCESS; } int aws_base64_compute_encoded_len(size_t to_encode_len, size_t *encoded_len) { AWS_ASSERT(encoded_len); size_t tmp = to_encode_len + 2; if (AWS_UNLIKELY(tmp < to_encode_len)) { return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED); } tmp /= 3; size_t overflow_check = tmp; tmp = 4 * tmp + 1; /* plus one for the NULL terminator */ if (AWS_UNLIKELY(tmp < overflow_check)) { return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED); } *encoded_len = tmp; return AWS_OP_SUCCESS; } int aws_base64_compute_decoded_len(const struct aws_byte_cursor *AWS_RESTRICT to_decode, size_t *decoded_len) { AWS_ASSERT(to_decode); AWS_ASSERT(decoded_len); const size_t len = to_decode->len; const uint8_t *input = to_decode->ptr; if (len == 0) { *decoded_len = 0; return AWS_OP_SUCCESS; } if (AWS_UNLIKELY(len & 0x03)) { return aws_raise_error(AWS_ERROR_INVALID_BASE64_STR); } size_t tmp = len * 3; if (AWS_UNLIKELY(tmp < len)) { return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED); } size_t padding = 0; if (len >= 2 && input[len - 1] == '=' && input[len - 2] == '=') { /*last two chars are = */ padding = 2; } else if (input[len - 1] == '=') { /*last char is = */ padding = 1; } *decoded_len = (tmp / 4 - padding); return AWS_OP_SUCCESS; } int aws_base64_encode(const struct aws_byte_cursor *AWS_RESTRICT to_encode, struct aws_byte_buf *AWS_RESTRICT output) { AWS_ASSERT(to_encode->ptr); AWS_ASSERT(output->buffer); size_t terminated_length = 0; size_t encoded_length = 0; if (AWS_UNLIKELY(aws_base64_compute_encoded_len(to_encode->len, &terminated_length))) { return AWS_OP_ERR; } size_t needed_capacity = 0; if (AWS_UNLIKELY(aws_add_size_checked(output->len, terminated_length, &needed_capacity))) { return AWS_OP_ERR; } if (AWS_UNLIKELY(output->capacity < needed_capacity)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } /* * For convenience to standard C functions expecting a null-terminated * string, the output is terminated. As the encoding itself can be used in * various ways, however, its length should never account for that byte. */ encoded_length = (terminated_length - 1); if (aws_common_private_has_avx2()) { aws_common_private_base64_encode_sse41(to_encode->ptr, output->buffer + output->len, to_encode->len); output->buffer[output->len + encoded_length] = 0; output->len += encoded_length; return AWS_OP_SUCCESS; } size_t buffer_length = to_encode->len; size_t block_count = (buffer_length + 2) / 3; size_t remainder_count = (buffer_length % 3); size_t str_index = output->len; for (size_t i = 0; i < to_encode->len; i += 3) { uint32_t block = to_encode->ptr[i]; block <<= 8; if (AWS_LIKELY(i + 1 < buffer_length)) { block = block | to_encode->ptr[i + 1]; } block <<= 8; if (AWS_LIKELY(i + 2 < to_encode->len)) { block = block | to_encode->ptr[i + 2]; } output->buffer[str_index++] = BASE64_ENCODING_TABLE[(block >> 18) & 0x3F]; output->buffer[str_index++] = BASE64_ENCODING_TABLE[(block >> 12) & 0x3F]; output->buffer[str_index++] = BASE64_ENCODING_TABLE[(block >> 6) & 0x3F]; output->buffer[str_index++] = BASE64_ENCODING_TABLE[block & 0x3F]; } if (remainder_count > 0) { output->buffer[output->len + block_count * 4 - 1] = '='; if (remainder_count == 1) { output->buffer[output->len + block_count * 4 - 2] = '='; } } /* it's a string add the null terminator. */ output->buffer[output->len + encoded_length] = 0; output->len += encoded_length; return AWS_OP_SUCCESS; } static inline int s_base64_get_decoded_value(unsigned char to_decode, uint8_t *value, int8_t allow_sentinel) { uint8_t decode_value = BASE64_DECODING_TABLE[(size_t)to_decode]; if (decode_value != 0xDD && (decode_value != BASE64_SENTINEL_VALUE || allow_sentinel)) { *value = decode_value; return AWS_OP_SUCCESS; } return AWS_OP_ERR; } int aws_base64_decode(const struct aws_byte_cursor *AWS_RESTRICT to_decode, struct aws_byte_buf *AWS_RESTRICT output) { size_t decoded_length = 0; if (AWS_UNLIKELY(aws_base64_compute_decoded_len(to_decode, &decoded_length))) { return AWS_OP_ERR; } if (output->capacity < decoded_length) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } if (aws_common_private_has_avx2()) { size_t result = aws_common_private_base64_decode_sse41(to_decode->ptr, output->buffer, to_decode->len); if (result == -1) { return aws_raise_error(AWS_ERROR_INVALID_BASE64_STR); } output->len = result; return AWS_OP_SUCCESS; } int64_t block_count = (int64_t)to_decode->len / 4; size_t string_index = 0; uint8_t value1 = 0, value2 = 0, value3 = 0, value4 = 0; int64_t buffer_index = 0; for (int64_t i = 0; i < block_count - 1; ++i) { if (AWS_UNLIKELY( s_base64_get_decoded_value(to_decode->ptr[string_index++], &value1, 0) || s_base64_get_decoded_value(to_decode->ptr[string_index++], &value2, 0) || s_base64_get_decoded_value(to_decode->ptr[string_index++], &value3, 0) || s_base64_get_decoded_value(to_decode->ptr[string_index++], &value4, 0))) { return aws_raise_error(AWS_ERROR_INVALID_BASE64_STR); } buffer_index = i * 3; output->buffer[buffer_index++] = (uint8_t)((value1 << 2) | ((value2 >> 4) & 0x03)); output->buffer[buffer_index++] = (uint8_t)(((value2 << 4) & 0xF0) | ((value3 >> 2) & 0x0F)); output->buffer[buffer_index] = (uint8_t)((value3 & 0x03) << 6 | value4); } buffer_index = (block_count - 1) * 3; if (buffer_index >= 0) { if (s_base64_get_decoded_value(to_decode->ptr[string_index++], &value1, 0) || s_base64_get_decoded_value(to_decode->ptr[string_index++], &value2, 0) || s_base64_get_decoded_value(to_decode->ptr[string_index++], &value3, 1) || s_base64_get_decoded_value(to_decode->ptr[string_index], &value4, 1)) { return aws_raise_error(AWS_ERROR_INVALID_BASE64_STR); } output->buffer[buffer_index++] = (uint8_t)((value1 << 2) | ((value2 >> 4) & 0x03)); if (value3 != BASE64_SENTINEL_VALUE) { output->buffer[buffer_index++] = (uint8_t)(((value2 << 4) & 0xF0) | ((value3 >> 2) & 0x0F)); if (value4 != BASE64_SENTINEL_VALUE) { output->buffer[buffer_index] = (uint8_t)((value3 & 0x03) << 6 | value4); } } } output->len = decoded_length; return AWS_OP_SUCCESS; } struct aws_utf8_decoder { struct aws_allocator *alloc; /* Value of current codepoint, updated as we read each byte */ uint32_t codepoint; /* Minimum value that current codepoint is allowed to end up with * (i.e. text cannot use 2 bytes to encode what would have fit in 1 byte) */ uint32_t min; /* Number of bytes remaining the current codepoint */ uint8_t remaining; /* Custom callback */ int (*on_codepoint)(uint32_t codepoint, void *user_data); /* user_data for on_codepoint */ void *user_data; }; struct aws_utf8_decoder *aws_utf8_decoder_new( struct aws_allocator *allocator, const struct aws_utf8_decoder_options *options) { struct aws_utf8_decoder *decoder = aws_mem_calloc(allocator, 1, sizeof(struct aws_utf8_decoder)); decoder->alloc = allocator; if (options) { decoder->on_codepoint = options->on_codepoint; decoder->user_data = options->user_data; } return decoder; } void aws_utf8_decoder_destroy(struct aws_utf8_decoder *decoder) { if (decoder) { aws_mem_release(decoder->alloc, decoder); } } void aws_utf8_decoder_reset(struct aws_utf8_decoder *decoder) { decoder->codepoint = 0; decoder->min = 0; decoder->remaining = 0; } /* Why yes, this could be optimized. */ int aws_utf8_decoder_update(struct aws_utf8_decoder *decoder, struct aws_byte_cursor bytes) { /* We're respecting RFC-3629, which uses 1 to 4 byte sequences (never 5 or 6) */ for (size_t i = 0; i < bytes.len; ++i) { uint8_t byte = bytes.ptr[i]; if (decoder->remaining == 0) { /* Check first byte of the codepoint to determine how many more bytes remain */ if ((byte & 0x80) == 0x00) { /* 1 byte codepoints start with 0xxxxxxx */ decoder->remaining = 0; decoder->codepoint = byte; decoder->min = 0; } else if ((byte & 0xE0) == 0xC0) { /* 2 byte codepoints start with 110xxxxx */ decoder->remaining = 1; decoder->codepoint = byte & 0x1F; decoder->min = 0x80; } else if ((byte & 0xF0) == 0xE0) { /* 3 byte codepoints start with 1110xxxx */ decoder->remaining = 2; decoder->codepoint = byte & 0x0F; decoder->min = 0x800; } else if ((byte & 0xF8) == 0xF0) { /* 4 byte codepoints start with 11110xxx */ decoder->remaining = 3; decoder->codepoint = byte & 0x07; decoder->min = 0x10000; } else { return aws_raise_error(AWS_ERROR_INVALID_UTF8); } } else { /* This is not the first byte of a codepoint. * Ensure it starts with 10xxxxxx*/ if ((byte & 0xC0) != 0x80) { return aws_raise_error(AWS_ERROR_INVALID_UTF8); } /* Insert the 6 newly decoded bits: * shifting left anything we've already decoded, and insert the new bits to the right */ decoder->codepoint = (decoder->codepoint << 6) | (byte & 0x3F); /* If we've decoded the whole codepoint, check it for validity * (don't need to do these particular checks on 1 byte codepoints) */ if (--decoder->remaining == 0) { /* Check that it's not "overlong" (encoded using more bytes than necessary) */ if (decoder->codepoint < decoder->min) { return aws_raise_error(AWS_ERROR_INVALID_UTF8); } /* UTF-8 prohibits encoding character numbers between U+D800 and U+DFFF, * which are reserved for use with the UTF-16 encoding form (as * surrogate pairs) and do not directly represent characters */ if (decoder->codepoint >= 0xD800 && decoder->codepoint <= 0xDFFF) { return aws_raise_error(AWS_ERROR_INVALID_UTF8); } } } /* Invoke user's on_codepoint callback */ if (decoder->on_codepoint && decoder->remaining == 0) { if (decoder->on_codepoint(decoder->codepoint, decoder->user_data)) { return AWS_OP_ERR; } } } return AWS_OP_SUCCESS; } int aws_utf8_decoder_finalize(struct aws_utf8_decoder *decoder) { bool valid = decoder->remaining == 0; aws_utf8_decoder_reset(decoder); if (AWS_LIKELY(valid)) { return AWS_OP_SUCCESS; } return aws_raise_error(AWS_ERROR_INVALID_UTF8); } int aws_decode_utf8(struct aws_byte_cursor bytes, const struct aws_utf8_decoder_options *options) { struct aws_utf8_decoder decoder = { .on_codepoint = options ? options->on_codepoint : NULL, .user_data = options ? options->user_data : NULL, }; if (aws_utf8_decoder_update(&decoder, bytes)) { return AWS_OP_ERR; } if (aws_utf8_decoder_finalize(&decoder)) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/error.c000066400000000000000000000162121456575232400233170ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include static AWS_THREAD_LOCAL int tl_last_error = 0; static aws_error_handler_fn *s_global_handler = NULL; static void *s_global_error_context = NULL; static AWS_THREAD_LOCAL aws_error_handler_fn *tl_thread_handler = NULL; AWS_THREAD_LOCAL void *tl_thread_handler_context = NULL; /* Since slot size is 00000100 00000000, to divide, we need to shift right by 10 * bits to find the slot, and to find the modulus, we use a binary and with * 00000011 11111111 to find the index in that slot. */ #define SLOT_MASK (AWS_ERROR_ENUM_STRIDE - 1) static const int MAX_ERROR_CODE = AWS_ERROR_ENUM_STRIDE * AWS_PACKAGE_SLOTS; static const struct aws_error_info_list *volatile ERROR_SLOTS[AWS_PACKAGE_SLOTS] = {0}; int aws_last_error(void) { return tl_last_error; } static const struct aws_error_info *get_error_by_code(int err) { if (err >= MAX_ERROR_CODE || err < 0) { return NULL; } uint32_t slot_index = (uint32_t)err >> AWS_ERROR_ENUM_STRIDE_BITS; uint32_t error_index = (uint32_t)err & SLOT_MASK; const struct aws_error_info_list *error_slot = ERROR_SLOTS[slot_index]; if (!error_slot || error_index >= error_slot->count) { return NULL; } return &error_slot->error_list[error_index]; } const char *aws_error_str(int err) { const struct aws_error_info *error_info = get_error_by_code(err); if (error_info) { return error_info->error_str; } return "Unknown Error Code"; } const char *aws_error_name(int err) { const struct aws_error_info *error_info = get_error_by_code(err); if (error_info) { return error_info->literal_name; } return "Unknown Error Code"; } const char *aws_error_lib_name(int err) { const struct aws_error_info *error_info = get_error_by_code(err); if (error_info) { return error_info->lib_name; } return "Unknown Error Code"; } const char *aws_error_debug_str(int err) { const struct aws_error_info *error_info = get_error_by_code(err); if (error_info) { return error_info->formatted_name; } return "Unknown Error Code"; } void aws_raise_error_private(int err) { tl_last_error = err; if (tl_thread_handler) { tl_thread_handler(tl_last_error, tl_thread_handler_context); } else if (s_global_handler) { s_global_handler(tl_last_error, s_global_error_context); } } void aws_reset_error(void) { tl_last_error = 0; } void aws_restore_error(int err) { tl_last_error = err; } aws_error_handler_fn *aws_set_global_error_handler_fn(aws_error_handler_fn *handler, void *ctx) { aws_error_handler_fn *old_handler = s_global_handler; s_global_handler = handler; s_global_error_context = ctx; return old_handler; } aws_error_handler_fn *aws_set_thread_local_error_handler_fn(aws_error_handler_fn *handler, void *ctx) { aws_error_handler_fn *old_handler = tl_thread_handler; tl_thread_handler = handler; tl_thread_handler_context = ctx; return old_handler; } void aws_register_error_info(const struct aws_error_info_list *error_info) { /* * We're not so worried about these asserts being removed in an NDEBUG build * - we'll either segfault immediately (for the first two) or for the count * assert, the registration will be ineffective. */ AWS_FATAL_ASSERT(error_info); AWS_FATAL_ASSERT(error_info->error_list); AWS_FATAL_ASSERT(error_info->count); const int min_range = error_info->error_list[0].error_code; const int slot_index = min_range >> AWS_ERROR_ENUM_STRIDE_BITS; if (slot_index >= AWS_PACKAGE_SLOTS || slot_index < 0) { /* This is an NDEBUG build apparently. Kill the process rather than * corrupting heap. */ fprintf(stderr, "Bad error slot index %d\n", slot_index); AWS_FATAL_ASSERT(false); } #if DEBUG_BUILD /* Assert that first error has the right value */ const int expected_first_code = slot_index << AWS_ERROR_ENUM_STRIDE_BITS; if (error_info->error_list[0].error_code != expected_first_code) { fprintf( stderr, "Missing info: First error in list should be %d, not %d (%s)\n", expected_first_code, error_info->error_list[0].error_code, error_info->error_list[0].literal_name); AWS_FATAL_ASSERT(0); } /* Assert that error info entries are in the right order. */ for (int i = 0; i < error_info->count; ++i) { const int expected_code = min_range + i; const struct aws_error_info *info = &error_info->error_list[i]; if (info->error_code != expected_code) { if (info->error_code) { fprintf(stderr, "Error %s is at wrong index of error info list.\n", info->literal_name); } else { fprintf(stderr, "Error %d is missing from error info list.\n", expected_code); } AWS_FATAL_ASSERT(0); } } #endif /* DEBUG_BUILD */ ERROR_SLOTS[slot_index] = error_info; } void aws_unregister_error_info(const struct aws_error_info_list *error_info) { AWS_FATAL_ASSERT(error_info); AWS_FATAL_ASSERT(error_info->error_list); AWS_FATAL_ASSERT(error_info->count); const int min_range = error_info->error_list[0].error_code; const int slot_index = min_range >> AWS_ERROR_ENUM_STRIDE_BITS; if (slot_index >= AWS_PACKAGE_SLOTS || slot_index < 0) { /* This is an NDEBUG build apparently. Kill the process rather than * corrupting heap. */ fprintf(stderr, "Bad error slot index %d\n", slot_index); AWS_FATAL_ASSERT(0); } ERROR_SLOTS[slot_index] = NULL; } int aws_translate_and_raise_io_error(int error_no) { return aws_translate_and_raise_io_error_or(error_no, AWS_ERROR_SYS_CALL_FAILURE); } int aws_translate_and_raise_io_error_or(int error_no, int fallback_aws_error_code) { switch (error_no) { case EINVAL: /* If useful fallback code provided, raise that instead of AWS_ERROR_INVALID_ARGUMENT, * which isn't very useful when it bubbles out from deep within some complex system. */ if (fallback_aws_error_code != AWS_ERROR_SYS_CALL_FAILURE) { return aws_raise_error(fallback_aws_error_code); } else { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } case EPERM: case EACCES: return aws_raise_error(AWS_ERROR_NO_PERMISSION); case EISDIR: case ENAMETOOLONG: case ENOENT: case ENOTDIR: return aws_raise_error(AWS_ERROR_FILE_INVALID_PATH); case EMFILE: case ENFILE: return aws_raise_error(AWS_ERROR_MAX_FDS_EXCEEDED); case ENOMEM: return aws_raise_error(AWS_ERROR_OOM); case ENOSPC: return aws_raise_error(AWS_ERROR_NO_SPACE); case ENOTEMPTY: return aws_raise_error(AWS_ERROR_DIRECTORY_NOT_EMPTY); default: return aws_raise_error(fallback_aws_error_code); } } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/external/000077500000000000000000000000001456575232400236425ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/external/.clang-format000066400000000000000000000000501456575232400262100ustar00rootroot00000000000000DisableFormat: true SortIncludes: false aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/external/cJSON.c000066400000000000000000002322351456575232400247310ustar00rootroot00000000000000/* Copyright (c) 2009-2017 Dave Gamble and cJSON contributors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /* * This file has been modified from its original version by Amazon: * (1) Remove cJSON_GetErrorPtr and global_error as they are not thread-safe. * (2) Add NOLINTBEGIN/NOLINTEND so clang-tidy ignores file. * (3) Replace sprintf() with snprintf() to make compilers happier. */ /* NOLINTBEGIN */ /* cJSON */ /* JSON parser in C. */ /* disable warnings about old C89 functions in MSVC */ #if !defined(_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER) #define _CRT_SECURE_NO_DEPRECATE #endif #ifdef __GNUC__ #pragma GCC visibility push(default) #endif #if defined(_MSC_VER) #pragma warning (push) /* disable warning about single line comments in system headers */ #pragma warning (disable : 4001) #endif #include #include #include #include #include #include #include #ifdef ENABLE_LOCALES #include #endif #if defined(_MSC_VER) #pragma warning (pop) #endif #ifdef __GNUC__ #pragma GCC visibility pop #endif #include "cJSON.h" /* define our own boolean type */ #ifdef true #undef true #endif #define true ((cJSON_bool)1) #ifdef false #undef false #endif #define false ((cJSON_bool)0) /* define isnan and isinf for ANSI C, if in C99 or above, isnan and isinf has been defined in math.h */ #ifndef isinf #define isinf(d) (isnan((d - d)) && !isnan(d)) #endif #ifndef isnan #define isnan(d) (d != d) #endif #ifndef NAN #ifdef _WIN32 #define NAN sqrt(-1.0) #else #define NAN 0.0/0.0 #endif #endif typedef struct { const unsigned char *json; size_t position; } error; #if 0 /* Amazon edit */ static error global_error = { NULL, 0 }; CJSON_PUBLIC(const char *) cJSON_GetErrorPtr(void) { return (const char*) (global_error.json + global_error.position); } #endif /* Amazon edit */ CJSON_PUBLIC(char *) cJSON_GetStringValue(const cJSON * const item) { if (!cJSON_IsString(item)) { return NULL; } return item->valuestring; } CJSON_PUBLIC(double) cJSON_GetNumberValue(const cJSON * const item) { if (!cJSON_IsNumber(item)) { return (double) NAN; } return item->valuedouble; } /* This is a safeguard to prevent copy-pasters from using incompatible C and header files */ #if (CJSON_VERSION_MAJOR != 1) || (CJSON_VERSION_MINOR != 7) || (CJSON_VERSION_PATCH != 17) #error cJSON.h and cJSON.c have different versions. Make sure that both have the same. #endif CJSON_PUBLIC(const char*) cJSON_Version(void) { static char version[15]; snprintf(version, sizeof(version), "%i.%i.%i", CJSON_VERSION_MAJOR, CJSON_VERSION_MINOR, CJSON_VERSION_PATCH); /* Amazon edit */ return version; } /* Case insensitive string comparison, doesn't consider two NULL pointers equal though */ static int case_insensitive_strcmp(const unsigned char *string1, const unsigned char *string2) { if ((string1 == NULL) || (string2 == NULL)) { return 1; } if (string1 == string2) { return 0; } for(; tolower(*string1) == tolower(*string2); (void)string1++, string2++) { if (*string1 == '\0') { return 0; } } return tolower(*string1) - tolower(*string2); } typedef struct internal_hooks { void *(CJSON_CDECL *allocate)(size_t size); void (CJSON_CDECL *deallocate)(void *pointer); void *(CJSON_CDECL *reallocate)(void *pointer, size_t size); } internal_hooks; #if defined(_MSC_VER) /* work around MSVC error C2322: '...' address of dllimport '...' is not static */ static void * CJSON_CDECL internal_malloc(size_t size) { return malloc(size); } static void CJSON_CDECL internal_free(void *pointer) { free(pointer); } static void * CJSON_CDECL internal_realloc(void *pointer, size_t size) { return realloc(pointer, size); } #else #define internal_malloc malloc #define internal_free free #define internal_realloc realloc #endif /* strlen of character literals resolved at compile time */ #define static_strlen(string_literal) (sizeof(string_literal) - sizeof("")) static internal_hooks global_hooks = { internal_malloc, internal_free, internal_realloc }; static unsigned char* cJSON_strdup(const unsigned char* string, const internal_hooks * const hooks) { size_t length = 0; unsigned char *copy = NULL; if (string == NULL) { return NULL; } length = strlen((const char*)string) + sizeof(""); copy = (unsigned char*)hooks->allocate(length); if (copy == NULL) { return NULL; } memcpy(copy, string, length); return copy; } CJSON_PUBLIC(void) cJSON_InitHooks(cJSON_Hooks* hooks) { if (hooks == NULL) { /* Reset hooks */ global_hooks.allocate = malloc; global_hooks.deallocate = free; global_hooks.reallocate = realloc; return; } global_hooks.allocate = malloc; if (hooks->malloc_fn != NULL) { global_hooks.allocate = hooks->malloc_fn; } global_hooks.deallocate = free; if (hooks->free_fn != NULL) { global_hooks.deallocate = hooks->free_fn; } /* use realloc only if both free and malloc are used */ global_hooks.reallocate = NULL; if ((global_hooks.allocate == malloc) && (global_hooks.deallocate == free)) { global_hooks.reallocate = realloc; } } /* Internal constructor. */ static cJSON *cJSON_New_Item(const internal_hooks * const hooks) { cJSON* node = (cJSON*)hooks->allocate(sizeof(cJSON)); if (node) { memset(node, '\0', sizeof(cJSON)); } return node; } /* Delete a cJSON structure. */ CJSON_PUBLIC(void) cJSON_Delete(cJSON *item) { cJSON *next = NULL; while (item != NULL) { next = item->next; if (!(item->type & cJSON_IsReference) && (item->child != NULL)) { cJSON_Delete(item->child); } if (!(item->type & cJSON_IsReference) && (item->valuestring != NULL)) { global_hooks.deallocate(item->valuestring); } if (!(item->type & cJSON_StringIsConst) && (item->string != NULL)) { global_hooks.deallocate(item->string); } global_hooks.deallocate(item); item = next; } } /* get the decimal point character of the current locale */ static unsigned char get_decimal_point(void) { #ifdef ENABLE_LOCALES struct lconv *lconv = localeconv(); return (unsigned char) lconv->decimal_point[0]; #else return '.'; #endif } typedef struct { const unsigned char *content; size_t length; size_t offset; size_t depth; /* How deeply nested (in arrays/objects) is the input at the current offset. */ internal_hooks hooks; } parse_buffer; /* check if the given size is left to read in a given parse buffer (starting with 1) */ #define can_read(buffer, size) ((buffer != NULL) && (((buffer)->offset + size) <= (buffer)->length)) /* check if the buffer can be accessed at the given index (starting with 0) */ #define can_access_at_index(buffer, index) ((buffer != NULL) && (((buffer)->offset + index) < (buffer)->length)) #define cannot_access_at_index(buffer, index) (!can_access_at_index(buffer, index)) /* get a pointer to the buffer at the position */ #define buffer_at_offset(buffer) ((buffer)->content + (buffer)->offset) /* Parse the input text to generate a number, and populate the result into item. */ static cJSON_bool parse_number(cJSON * const item, parse_buffer * const input_buffer) { double number = 0; unsigned char *after_end = NULL; unsigned char number_c_string[64]; unsigned char decimal_point = get_decimal_point(); size_t i = 0; if ((input_buffer == NULL) || (input_buffer->content == NULL)) { return false; } /* copy the number into a temporary buffer and replace '.' with the decimal point * of the current locale (for strtod) * This also takes care of '\0' not necessarily being available for marking the end of the input */ for (i = 0; (i < (sizeof(number_c_string) - 1)) && can_access_at_index(input_buffer, i); i++) { switch (buffer_at_offset(input_buffer)[i]) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case '+': case '-': case 'e': case 'E': number_c_string[i] = buffer_at_offset(input_buffer)[i]; break; case '.': number_c_string[i] = decimal_point; break; default: goto loop_end; } } loop_end: number_c_string[i] = '\0'; number = strtod((const char*)number_c_string, (char**)&after_end); if (number_c_string == after_end) { return false; /* parse_error */ } item->valuedouble = number; /* use saturation in case of overflow */ if (number >= INT_MAX) { item->valueint = INT_MAX; } else if (number <= (double)INT_MIN) { item->valueint = INT_MIN; } else { item->valueint = (int)number; } item->type = cJSON_Number; input_buffer->offset += (size_t)(after_end - number_c_string); return true; } /* don't ask me, but the original cJSON_SetNumberValue returns an integer or double */ CJSON_PUBLIC(double) cJSON_SetNumberHelper(cJSON *object, double number) { if (number >= INT_MAX) { object->valueint = INT_MAX; } else if (number <= (double)INT_MIN) { object->valueint = INT_MIN; } else { object->valueint = (int)number; } return object->valuedouble = number; } CJSON_PUBLIC(char*) cJSON_SetValuestring(cJSON *object, const char *valuestring) { char *copy = NULL; /* if object's type is not cJSON_String or is cJSON_IsReference, it should not set valuestring */ if ((object == NULL) || !(object->type & cJSON_String) || (object->type & cJSON_IsReference)) { return NULL; } /* return NULL if the object is corrupted */ if (object->valuestring == NULL) { return NULL; } if (strlen(valuestring) <= strlen(object->valuestring)) { strcpy(object->valuestring, valuestring); return object->valuestring; } copy = (char*) cJSON_strdup((const unsigned char*)valuestring, &global_hooks); if (copy == NULL) { return NULL; } if (object->valuestring != NULL) { cJSON_free(object->valuestring); } object->valuestring = copy; return copy; } typedef struct { unsigned char *buffer; size_t length; size_t offset; size_t depth; /* current nesting depth (for formatted printing) */ cJSON_bool noalloc; cJSON_bool format; /* is this print a formatted print */ internal_hooks hooks; } printbuffer; /* realloc printbuffer if necessary to have at least "needed" bytes more */ static unsigned char* ensure(printbuffer * const p, size_t needed) { unsigned char *newbuffer = NULL; size_t newsize = 0; if ((p == NULL) || (p->buffer == NULL)) { return NULL; } if ((p->length > 0) && (p->offset >= p->length)) { /* make sure that offset is valid */ return NULL; } if (needed > INT_MAX) { /* sizes bigger than INT_MAX are currently not supported */ return NULL; } needed += p->offset + 1; if (needed <= p->length) { return p->buffer + p->offset; } if (p->noalloc) { return NULL; } /* calculate new buffer size */ if (needed > (INT_MAX / 2)) { /* overflow of int, use INT_MAX if possible */ if (needed <= INT_MAX) { newsize = INT_MAX; } else { return NULL; } } else { newsize = needed * 2; } if (p->hooks.reallocate != NULL) { /* reallocate with realloc if available */ newbuffer = (unsigned char*)p->hooks.reallocate(p->buffer, newsize); if (newbuffer == NULL) { p->hooks.deallocate(p->buffer); p->length = 0; p->buffer = NULL; return NULL; } } else { /* otherwise reallocate manually */ newbuffer = (unsigned char*)p->hooks.allocate(newsize); if (!newbuffer) { p->hooks.deallocate(p->buffer); p->length = 0; p->buffer = NULL; return NULL; } memcpy(newbuffer, p->buffer, p->offset + 1); p->hooks.deallocate(p->buffer); } p->length = newsize; p->buffer = newbuffer; return newbuffer + p->offset; } /* calculate the new length of the string in a printbuffer and update the offset */ static void update_offset(printbuffer * const buffer) { const unsigned char *buffer_pointer = NULL; if ((buffer == NULL) || (buffer->buffer == NULL)) { return; } buffer_pointer = buffer->buffer + buffer->offset; buffer->offset += strlen((const char*)buffer_pointer); } /* securely comparison of floating-point variables */ static cJSON_bool compare_double(double a, double b) { double maxVal = fabs(a) > fabs(b) ? fabs(a) : fabs(b); return (fabs(a - b) <= maxVal * DBL_EPSILON); } /* Render the number nicely from the given item into a string. */ static cJSON_bool print_number(const cJSON * const item, printbuffer * const output_buffer) { unsigned char *output_pointer = NULL; double d = item->valuedouble; int length = 0; size_t i = 0; unsigned char number_buffer[26] = {0}; /* temporary buffer to print the number into */ unsigned char decimal_point = get_decimal_point(); double test = 0.0; if (output_buffer == NULL) { return false; } /* This checks for NaN and Infinity */ if (isnan(d) || isinf(d)) { length = snprintf((char*)number_buffer, sizeof(number_buffer), "null"); /* Amazon edit */ } else if(d == (double)item->valueint) { length = snprintf((char*)number_buffer, sizeof(number_buffer), "%d", item->valueint); /* Amazon edit */ } else { /* Try 15 decimal places of precision to avoid nonsignificant nonzero digits */ length = snprintf((char*)number_buffer, sizeof(number_buffer), "%1.15g", d); /* Amazon edit */ /* Check whether the original double can be recovered */ if ((sscanf((char*)number_buffer, "%lg", &test) != 1) || !compare_double((double)test, d)) { /* If not, print with 17 decimal places of precision */ length = snprintf((char*)number_buffer, sizeof(number_buffer), "%1.17g", d); /* Amazon edit */ } } /* sprintf failed or buffer overrun occurred */ if ((length < 0) || (length > (int)(sizeof(number_buffer) - 1))) { return false; } /* reserve appropriate space in the output */ output_pointer = ensure(output_buffer, (size_t)length + sizeof("")); if (output_pointer == NULL) { return false; } /* copy the printed number to the output and replace locale * dependent decimal point with '.' */ for (i = 0; i < ((size_t)length); i++) { if (number_buffer[i] == decimal_point) { output_pointer[i] = '.'; continue; } output_pointer[i] = number_buffer[i]; } output_pointer[i] = '\0'; output_buffer->offset += (size_t)length; return true; } /* parse 4 digit hexadecimal number */ static unsigned parse_hex4(const unsigned char * const input) { unsigned int h = 0; size_t i = 0; for (i = 0; i < 4; i++) { /* parse digit */ if ((input[i] >= '0') && (input[i] <= '9')) { h += (unsigned int) input[i] - '0'; } else if ((input[i] >= 'A') && (input[i] <= 'F')) { h += (unsigned int) 10 + input[i] - 'A'; } else if ((input[i] >= 'a') && (input[i] <= 'f')) { h += (unsigned int) 10 + input[i] - 'a'; } else /* invalid */ { return 0; } if (i < 3) { /* shift left to make place for the next nibble */ h = h << 4; } } return h; } /* converts a UTF-16 literal to UTF-8 * A literal can be one or two sequences of the form \uXXXX */ static unsigned char utf16_literal_to_utf8(const unsigned char * const input_pointer, const unsigned char * const input_end, unsigned char **output_pointer) { long unsigned int codepoint = 0; unsigned int first_code = 0; const unsigned char *first_sequence = input_pointer; unsigned char utf8_length = 0; unsigned char utf8_position = 0; unsigned char sequence_length = 0; unsigned char first_byte_mark = 0; if ((input_end - first_sequence) < 6) { /* input ends unexpectedly */ goto fail; } /* get the first utf16 sequence */ first_code = parse_hex4(first_sequence + 2); /* check that the code is valid */ if (((first_code >= 0xDC00) && (first_code <= 0xDFFF))) { goto fail; } /* UTF16 surrogate pair */ if ((first_code >= 0xD800) && (first_code <= 0xDBFF)) { const unsigned char *second_sequence = first_sequence + 6; unsigned int second_code = 0; sequence_length = 12; /* \uXXXX\uXXXX */ if ((input_end - second_sequence) < 6) { /* input ends unexpectedly */ goto fail; } if ((second_sequence[0] != '\\') || (second_sequence[1] != 'u')) { /* missing second half of the surrogate pair */ goto fail; } /* get the second utf16 sequence */ second_code = parse_hex4(second_sequence + 2); /* check that the code is valid */ if ((second_code < 0xDC00) || (second_code > 0xDFFF)) { /* invalid second half of the surrogate pair */ goto fail; } /* calculate the unicode codepoint from the surrogate pair */ codepoint = 0x10000 + (((first_code & 0x3FF) << 10) | (second_code & 0x3FF)); } else { sequence_length = 6; /* \uXXXX */ codepoint = first_code; } /* encode as UTF-8 * takes at maximum 4 bytes to encode: * 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx */ if (codepoint < 0x80) { /* normal ascii, encoding 0xxxxxxx */ utf8_length = 1; } else if (codepoint < 0x800) { /* two bytes, encoding 110xxxxx 10xxxxxx */ utf8_length = 2; first_byte_mark = 0xC0; /* 11000000 */ } else if (codepoint < 0x10000) { /* three bytes, encoding 1110xxxx 10xxxxxx 10xxxxxx */ utf8_length = 3; first_byte_mark = 0xE0; /* 11100000 */ } else if (codepoint <= 0x10FFFF) { /* four bytes, encoding 1110xxxx 10xxxxxx 10xxxxxx 10xxxxxx */ utf8_length = 4; first_byte_mark = 0xF0; /* 11110000 */ } else { /* invalid unicode codepoint */ goto fail; } /* encode as utf8 */ for (utf8_position = (unsigned char)(utf8_length - 1); utf8_position > 0; utf8_position--) { /* 10xxxxxx */ (*output_pointer)[utf8_position] = (unsigned char)((codepoint | 0x80) & 0xBF); codepoint >>= 6; } /* encode first byte */ if (utf8_length > 1) { (*output_pointer)[0] = (unsigned char)((codepoint | first_byte_mark) & 0xFF); } else { (*output_pointer)[0] = (unsigned char)(codepoint & 0x7F); } *output_pointer += utf8_length; return sequence_length; fail: return 0; } /* Parse the input text into an unescaped cinput, and populate item. */ static cJSON_bool parse_string(cJSON * const item, parse_buffer * const input_buffer) { const unsigned char *input_pointer = buffer_at_offset(input_buffer) + 1; const unsigned char *input_end = buffer_at_offset(input_buffer) + 1; unsigned char *output_pointer = NULL; unsigned char *output = NULL; /* not a string */ if (buffer_at_offset(input_buffer)[0] != '\"') { goto fail; } { /* calculate approximate size of the output (overestimate) */ size_t allocation_length = 0; size_t skipped_bytes = 0; while (((size_t)(input_end - input_buffer->content) < input_buffer->length) && (*input_end != '\"')) { /* is escape sequence */ if (input_end[0] == '\\') { if ((size_t)(input_end + 1 - input_buffer->content) >= input_buffer->length) { /* prevent buffer overflow when last input character is a backslash */ goto fail; } skipped_bytes++; input_end++; } input_end++; } if (((size_t)(input_end - input_buffer->content) >= input_buffer->length) || (*input_end != '\"')) { goto fail; /* string ended unexpectedly */ } /* This is at most how much we need for the output */ allocation_length = (size_t) (input_end - buffer_at_offset(input_buffer)) - skipped_bytes; output = (unsigned char*)input_buffer->hooks.allocate(allocation_length + sizeof("")); if (output == NULL) { goto fail; /* allocation failure */ } } output_pointer = output; /* loop through the string literal */ while (input_pointer < input_end) { if (*input_pointer != '\\') { *output_pointer++ = *input_pointer++; } /* escape sequence */ else { unsigned char sequence_length = 2; if ((input_end - input_pointer) < 1) { goto fail; } switch (input_pointer[1]) { case 'b': *output_pointer++ = '\b'; break; case 'f': *output_pointer++ = '\f'; break; case 'n': *output_pointer++ = '\n'; break; case 'r': *output_pointer++ = '\r'; break; case 't': *output_pointer++ = '\t'; break; case '\"': case '\\': case '/': *output_pointer++ = input_pointer[1]; break; /* UTF-16 literal */ case 'u': sequence_length = utf16_literal_to_utf8(input_pointer, input_end, &output_pointer); if (sequence_length == 0) { /* failed to convert UTF16-literal to UTF-8 */ goto fail; } break; default: goto fail; } input_pointer += sequence_length; } } /* zero terminate the output */ *output_pointer = '\0'; item->type = cJSON_String; item->valuestring = (char*)output; input_buffer->offset = (size_t) (input_end - input_buffer->content); input_buffer->offset++; return true; fail: if (output != NULL) { input_buffer->hooks.deallocate(output); } if (input_pointer != NULL) { input_buffer->offset = (size_t)(input_pointer - input_buffer->content); } return false; } /* Render the cstring provided to an escaped version that can be printed. */ static cJSON_bool print_string_ptr(const unsigned char * const input, printbuffer * const output_buffer) { const unsigned char *input_pointer = NULL; unsigned char *output = NULL; unsigned char *output_pointer = NULL; size_t output_length = 0; /* numbers of additional characters needed for escaping */ size_t escape_characters = 0; if (output_buffer == NULL) { return false; } /* empty string */ if (input == NULL) { output = ensure(output_buffer, sizeof("\"\"")); if (output == NULL) { return false; } strcpy((char*)output, "\"\""); return true; } /* set "flag" to 1 if something needs to be escaped */ for (input_pointer = input; *input_pointer; input_pointer++) { switch (*input_pointer) { case '\"': case '\\': case '\b': case '\f': case '\n': case '\r': case '\t': /* one character escape sequence */ escape_characters++; break; default: if (*input_pointer < 32) { /* UTF-16 escape sequence uXXXX */ escape_characters += 5; } break; } } output_length = (size_t)(input_pointer - input) + escape_characters; output = ensure(output_buffer, output_length + sizeof("\"\"")); if (output == NULL) { return false; } /* no characters have to be escaped */ if (escape_characters == 0) { output[0] = '\"'; memcpy(output + 1, input, output_length); output[output_length + 1] = '\"'; output[output_length + 2] = '\0'; return true; } output[0] = '\"'; output_pointer = output + 1; /* copy the string */ for (input_pointer = input; *input_pointer != '\0'; (void)input_pointer++, output_pointer++) { if ((*input_pointer > 31) && (*input_pointer != '\"') && (*input_pointer != '\\')) { /* normal character, copy */ *output_pointer = *input_pointer; } else { /* character needs to be escaped */ *output_pointer++ = '\\'; switch (*input_pointer) { case '\\': *output_pointer = '\\'; break; case '\"': *output_pointer = '\"'; break; case '\b': *output_pointer = 'b'; break; case '\f': *output_pointer = 'f'; break; case '\n': *output_pointer = 'n'; break; case '\r': *output_pointer = 'r'; break; case '\t': *output_pointer = 't'; break; default: /* escape and print as unicode codepoint */ snprintf((char*)output_pointer, 6, "u%04x", *input_pointer); /* Amazon edit */ output_pointer += 4; break; } } } output[output_length + 1] = '\"'; output[output_length + 2] = '\0'; return true; } /* Invoke print_string_ptr (which is useful) on an item. */ static cJSON_bool print_string(const cJSON * const item, printbuffer * const p) { return print_string_ptr((unsigned char*)item->valuestring, p); } /* Predeclare these prototypes. */ static cJSON_bool parse_value(cJSON * const item, parse_buffer * const input_buffer); static cJSON_bool print_value(const cJSON * const item, printbuffer * const output_buffer); static cJSON_bool parse_array(cJSON * const item, parse_buffer * const input_buffer); static cJSON_bool print_array(const cJSON * const item, printbuffer * const output_buffer); static cJSON_bool parse_object(cJSON * const item, parse_buffer * const input_buffer); static cJSON_bool print_object(const cJSON * const item, printbuffer * const output_buffer); /* Utility to jump whitespace and cr/lf */ static parse_buffer *buffer_skip_whitespace(parse_buffer * const buffer) { if ((buffer == NULL) || (buffer->content == NULL)) { return NULL; } if (cannot_access_at_index(buffer, 0)) { return buffer; } while (can_access_at_index(buffer, 0) && (buffer_at_offset(buffer)[0] <= 32)) { buffer->offset++; } if (buffer->offset == buffer->length) { buffer->offset--; } return buffer; } /* skip the UTF-8 BOM (byte order mark) if it is at the beginning of a buffer */ static parse_buffer *skip_utf8_bom(parse_buffer * const buffer) { if ((buffer == NULL) || (buffer->content == NULL) || (buffer->offset != 0)) { return NULL; } if (can_access_at_index(buffer, 4) && (strncmp((const char*)buffer_at_offset(buffer), "\xEF\xBB\xBF", 3) == 0)) { buffer->offset += 3; } return buffer; } CJSON_PUBLIC(cJSON *) cJSON_ParseWithOpts(const char *value, const char **return_parse_end, cJSON_bool require_null_terminated) { size_t buffer_length; if (NULL == value) { return NULL; } /* Adding null character size due to require_null_terminated. */ buffer_length = strlen(value) + sizeof(""); return cJSON_ParseWithLengthOpts(value, buffer_length, return_parse_end, require_null_terminated); } /* Parse an object - create a new root, and populate. */ CJSON_PUBLIC(cJSON *) cJSON_ParseWithLengthOpts(const char *value, size_t buffer_length, const char **return_parse_end, cJSON_bool require_null_terminated) { parse_buffer buffer = { 0, 0, 0, 0, { 0, 0, 0 } }; cJSON *item = NULL; #if 0 /* Amazon edit */ /* reset error position */ global_error.json = NULL; global_error.position = 0; #endif /* Amazon edit */ if (value == NULL || 0 == buffer_length) { goto fail; } buffer.content = (const unsigned char*)value; buffer.length = buffer_length; buffer.offset = 0; buffer.hooks = global_hooks; item = cJSON_New_Item(&global_hooks); if (item == NULL) /* memory fail */ { goto fail; } if (!parse_value(item, buffer_skip_whitespace(skip_utf8_bom(&buffer)))) { /* parse failure. ep is set. */ goto fail; } /* if we require null-terminated JSON without appended garbage, skip and then check for a null terminator */ if (require_null_terminated) { buffer_skip_whitespace(&buffer); if ((buffer.offset >= buffer.length) || buffer_at_offset(&buffer)[0] != '\0') { goto fail; } } if (return_parse_end) { *return_parse_end = (const char*)buffer_at_offset(&buffer); } return item; fail: if (item != NULL) { cJSON_Delete(item); } if (value != NULL) { error local_error; local_error.json = (const unsigned char*)value; local_error.position = 0; if (buffer.offset < buffer.length) { local_error.position = buffer.offset; } else if (buffer.length > 0) { local_error.position = buffer.length - 1; } if (return_parse_end != NULL) { *return_parse_end = (const char*)local_error.json + local_error.position; } #if 0 /* Amazon edit */ global_error = local_error; #endif /* Amazon edit */ } return NULL; } /* Default options for cJSON_Parse */ CJSON_PUBLIC(cJSON *) cJSON_Parse(const char *value) { return cJSON_ParseWithOpts(value, 0, 0); } CJSON_PUBLIC(cJSON *) cJSON_ParseWithLength(const char *value, size_t buffer_length) { return cJSON_ParseWithLengthOpts(value, buffer_length, 0, 0); } #define cjson_min(a, b) (((a) < (b)) ? (a) : (b)) static unsigned char *print(const cJSON * const item, cJSON_bool format, const internal_hooks * const hooks) { static const size_t default_buffer_size = 256; printbuffer buffer[1]; unsigned char *printed = NULL; memset(buffer, 0, sizeof(buffer)); /* create buffer */ buffer->buffer = (unsigned char*) hooks->allocate(default_buffer_size); buffer->length = default_buffer_size; buffer->format = format; buffer->hooks = *hooks; if (buffer->buffer == NULL) { goto fail; } /* print the value */ if (!print_value(item, buffer)) { goto fail; } update_offset(buffer); /* check if reallocate is available */ if (hooks->reallocate != NULL) { printed = (unsigned char*) hooks->reallocate(buffer->buffer, buffer->offset + 1); if (printed == NULL) { goto fail; } buffer->buffer = NULL; } else /* otherwise copy the JSON over to a new buffer */ { printed = (unsigned char*) hooks->allocate(buffer->offset + 1); if (printed == NULL) { goto fail; } memcpy(printed, buffer->buffer, cjson_min(buffer->length, buffer->offset + 1)); printed[buffer->offset] = '\0'; /* just to be sure */ /* free the buffer */ hooks->deallocate(buffer->buffer); } return printed; fail: if (buffer->buffer != NULL) { hooks->deallocate(buffer->buffer); } if (printed != NULL) { hooks->deallocate(printed); } return NULL; } /* Render a cJSON item/entity/structure to text. */ CJSON_PUBLIC(char *) cJSON_Print(const cJSON *item) { return (char*)print(item, true, &global_hooks); } CJSON_PUBLIC(char *) cJSON_PrintUnformatted(const cJSON *item) { return (char*)print(item, false, &global_hooks); } CJSON_PUBLIC(char *) cJSON_PrintBuffered(const cJSON *item, int prebuffer, cJSON_bool fmt) { printbuffer p = { 0, 0, 0, 0, 0, 0, { 0, 0, 0 } }; if (prebuffer < 0) { return NULL; } p.buffer = (unsigned char*)global_hooks.allocate((size_t)prebuffer); if (!p.buffer) { return NULL; } p.length = (size_t)prebuffer; p.offset = 0; p.noalloc = false; p.format = fmt; p.hooks = global_hooks; if (!print_value(item, &p)) { global_hooks.deallocate(p.buffer); return NULL; } return (char*)p.buffer; } CJSON_PUBLIC(cJSON_bool) cJSON_PrintPreallocated(cJSON *item, char *buffer, const int length, const cJSON_bool format) { printbuffer p = { 0, 0, 0, 0, 0, 0, { 0, 0, 0 } }; if ((length < 0) || (buffer == NULL)) { return false; } p.buffer = (unsigned char*)buffer; p.length = (size_t)length; p.offset = 0; p.noalloc = true; p.format = format; p.hooks = global_hooks; return print_value(item, &p); } /* Parser core - when encountering text, process appropriately. */ static cJSON_bool parse_value(cJSON * const item, parse_buffer * const input_buffer) { if ((input_buffer == NULL) || (input_buffer->content == NULL)) { return false; /* no input */ } /* parse the different types of values */ /* null */ if (can_read(input_buffer, 4) && (strncmp((const char*)buffer_at_offset(input_buffer), "null", 4) == 0)) { item->type = cJSON_NULL; input_buffer->offset += 4; return true; } /* false */ if (can_read(input_buffer, 5) && (strncmp((const char*)buffer_at_offset(input_buffer), "false", 5) == 0)) { item->type = cJSON_False; input_buffer->offset += 5; return true; } /* true */ if (can_read(input_buffer, 4) && (strncmp((const char*)buffer_at_offset(input_buffer), "true", 4) == 0)) { item->type = cJSON_True; item->valueint = 1; input_buffer->offset += 4; return true; } /* string */ if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == '\"')) { return parse_string(item, input_buffer); } /* number */ if (can_access_at_index(input_buffer, 0) && ((buffer_at_offset(input_buffer)[0] == '-') || ((buffer_at_offset(input_buffer)[0] >= '0') && (buffer_at_offset(input_buffer)[0] <= '9')))) { return parse_number(item, input_buffer); } /* array */ if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == '[')) { return parse_array(item, input_buffer); } /* object */ if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == '{')) { return parse_object(item, input_buffer); } return false; } /* Render a value to text. */ static cJSON_bool print_value(const cJSON * const item, printbuffer * const output_buffer) { unsigned char *output = NULL; if ((item == NULL) || (output_buffer == NULL)) { return false; } switch ((item->type) & 0xFF) { case cJSON_NULL: output = ensure(output_buffer, 5); if (output == NULL) { return false; } strcpy((char*)output, "null"); return true; case cJSON_False: output = ensure(output_buffer, 6); if (output == NULL) { return false; } strcpy((char*)output, "false"); return true; case cJSON_True: output = ensure(output_buffer, 5); if (output == NULL) { return false; } strcpy((char*)output, "true"); return true; case cJSON_Number: return print_number(item, output_buffer); case cJSON_Raw: { size_t raw_length = 0; if (item->valuestring == NULL) { return false; } raw_length = strlen(item->valuestring) + sizeof(""); output = ensure(output_buffer, raw_length); if (output == NULL) { return false; } memcpy(output, item->valuestring, raw_length); return true; } case cJSON_String: return print_string(item, output_buffer); case cJSON_Array: return print_array(item, output_buffer); case cJSON_Object: return print_object(item, output_buffer); default: return false; } } /* Build an array from input text. */ static cJSON_bool parse_array(cJSON * const item, parse_buffer * const input_buffer) { cJSON *head = NULL; /* head of the linked list */ cJSON *current_item = NULL; if (input_buffer->depth >= CJSON_NESTING_LIMIT) { return false; /* to deeply nested */ } input_buffer->depth++; if (buffer_at_offset(input_buffer)[0] != '[') { /* not an array */ goto fail; } input_buffer->offset++; buffer_skip_whitespace(input_buffer); if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == ']')) { /* empty array */ goto success; } /* check if we skipped to the end of the buffer */ if (cannot_access_at_index(input_buffer, 0)) { input_buffer->offset--; goto fail; } /* step back to character in front of the first element */ input_buffer->offset--; /* loop through the comma separated array elements */ do { /* allocate next item */ cJSON *new_item = cJSON_New_Item(&(input_buffer->hooks)); if (new_item == NULL) { goto fail; /* allocation failure */ } /* attach next item to list */ if (head == NULL) { /* start the linked list */ current_item = head = new_item; } else { /* add to the end and advance */ current_item->next = new_item; new_item->prev = current_item; current_item = new_item; } /* parse next value */ input_buffer->offset++; buffer_skip_whitespace(input_buffer); if (!parse_value(current_item, input_buffer)) { goto fail; /* failed to parse value */ } buffer_skip_whitespace(input_buffer); } while (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == ',')); if (cannot_access_at_index(input_buffer, 0) || buffer_at_offset(input_buffer)[0] != ']') { goto fail; /* expected end of array */ } success: input_buffer->depth--; if (head != NULL) { head->prev = current_item; } item->type = cJSON_Array; item->child = head; input_buffer->offset++; return true; fail: if (head != NULL) { cJSON_Delete(head); } return false; } /* Render an array to text */ static cJSON_bool print_array(const cJSON * const item, printbuffer * const output_buffer) { unsigned char *output_pointer = NULL; size_t length = 0; cJSON *current_element = item->child; if (output_buffer == NULL) { return false; } /* Compose the output array. */ /* opening square bracket */ output_pointer = ensure(output_buffer, 1); if (output_pointer == NULL) { return false; } *output_pointer = '['; output_buffer->offset++; output_buffer->depth++; while (current_element != NULL) { if (!print_value(current_element, output_buffer)) { return false; } update_offset(output_buffer); if (current_element->next) { length = (size_t) (output_buffer->format ? 2 : 1); output_pointer = ensure(output_buffer, length + 1); if (output_pointer == NULL) { return false; } *output_pointer++ = ','; if(output_buffer->format) { *output_pointer++ = ' '; } *output_pointer = '\0'; output_buffer->offset += length; } current_element = current_element->next; } output_pointer = ensure(output_buffer, 2); if (output_pointer == NULL) { return false; } *output_pointer++ = ']'; *output_pointer = '\0'; output_buffer->depth--; return true; } /* Build an object from the text. */ static cJSON_bool parse_object(cJSON * const item, parse_buffer * const input_buffer) { cJSON *head = NULL; /* linked list head */ cJSON *current_item = NULL; if (input_buffer->depth >= CJSON_NESTING_LIMIT) { return false; /* to deeply nested */ } input_buffer->depth++; if (cannot_access_at_index(input_buffer, 0) || (buffer_at_offset(input_buffer)[0] != '{')) { goto fail; /* not an object */ } input_buffer->offset++; buffer_skip_whitespace(input_buffer); if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == '}')) { goto success; /* empty object */ } /* check if we skipped to the end of the buffer */ if (cannot_access_at_index(input_buffer, 0)) { input_buffer->offset--; goto fail; } /* step back to character in front of the first element */ input_buffer->offset--; /* loop through the comma separated array elements */ do { /* allocate next item */ cJSON *new_item = cJSON_New_Item(&(input_buffer->hooks)); if (new_item == NULL) { goto fail; /* allocation failure */ } /* attach next item to list */ if (head == NULL) { /* start the linked list */ current_item = head = new_item; } else { /* add to the end and advance */ current_item->next = new_item; new_item->prev = current_item; current_item = new_item; } /* parse the name of the child */ input_buffer->offset++; buffer_skip_whitespace(input_buffer); if (!parse_string(current_item, input_buffer)) { goto fail; /* failed to parse name */ } buffer_skip_whitespace(input_buffer); /* swap valuestring and string, because we parsed the name */ current_item->string = current_item->valuestring; current_item->valuestring = NULL; if (cannot_access_at_index(input_buffer, 0) || (buffer_at_offset(input_buffer)[0] != ':')) { goto fail; /* invalid object */ } /* parse the value */ input_buffer->offset++; buffer_skip_whitespace(input_buffer); if (!parse_value(current_item, input_buffer)) { goto fail; /* failed to parse value */ } buffer_skip_whitespace(input_buffer); } while (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == ',')); if (cannot_access_at_index(input_buffer, 0) || (buffer_at_offset(input_buffer)[0] != '}')) { goto fail; /* expected end of object */ } success: input_buffer->depth--; if (head != NULL) { head->prev = current_item; } item->type = cJSON_Object; item->child = head; input_buffer->offset++; return true; fail: if (head != NULL) { cJSON_Delete(head); } return false; } /* Render an object to text. */ static cJSON_bool print_object(const cJSON * const item, printbuffer * const output_buffer) { unsigned char *output_pointer = NULL; size_t length = 0; cJSON *current_item = item->child; if (output_buffer == NULL) { return false; } /* Compose the output: */ length = (size_t) (output_buffer->format ? 2 : 1); /* fmt: {\n */ output_pointer = ensure(output_buffer, length + 1); if (output_pointer == NULL) { return false; } *output_pointer++ = '{'; output_buffer->depth++; if (output_buffer->format) { *output_pointer++ = '\n'; } output_buffer->offset += length; while (current_item) { if (output_buffer->format) { size_t i; output_pointer = ensure(output_buffer, output_buffer->depth); if (output_pointer == NULL) { return false; } for (i = 0; i < output_buffer->depth; i++) { *output_pointer++ = '\t'; } output_buffer->offset += output_buffer->depth; } /* print key */ if (!print_string_ptr((unsigned char*)current_item->string, output_buffer)) { return false; } update_offset(output_buffer); length = (size_t) (output_buffer->format ? 2 : 1); output_pointer = ensure(output_buffer, length); if (output_pointer == NULL) { return false; } *output_pointer++ = ':'; if (output_buffer->format) { *output_pointer++ = '\t'; } output_buffer->offset += length; /* print value */ if (!print_value(current_item, output_buffer)) { return false; } update_offset(output_buffer); /* print comma if not last */ length = ((size_t)(output_buffer->format ? 1 : 0) + (size_t)(current_item->next ? 1 : 0)); output_pointer = ensure(output_buffer, length + 1); if (output_pointer == NULL) { return false; } if (current_item->next) { *output_pointer++ = ','; } if (output_buffer->format) { *output_pointer++ = '\n'; } *output_pointer = '\0'; output_buffer->offset += length; current_item = current_item->next; } output_pointer = ensure(output_buffer, output_buffer->format ? (output_buffer->depth + 1) : 2); if (output_pointer == NULL) { return false; } if (output_buffer->format) { size_t i; for (i = 0; i < (output_buffer->depth - 1); i++) { *output_pointer++ = '\t'; } } *output_pointer++ = '}'; *output_pointer = '\0'; output_buffer->depth--; return true; } /* Get Array size/item / object item. */ CJSON_PUBLIC(int) cJSON_GetArraySize(const cJSON *array) { cJSON *child = NULL; size_t size = 0; if (array == NULL) { return 0; } child = array->child; while(child != NULL) { size++; child = child->next; } /* FIXME: Can overflow here. Cannot be fixed without breaking the API */ return (int)size; } static cJSON* get_array_item(const cJSON *array, size_t index) { cJSON *current_child = NULL; if (array == NULL) { return NULL; } current_child = array->child; while ((current_child != NULL) && (index > 0)) { index--; current_child = current_child->next; } return current_child; } CJSON_PUBLIC(cJSON *) cJSON_GetArrayItem(const cJSON *array, int index) { if (index < 0) { return NULL; } return get_array_item(array, (size_t)index); } static cJSON *get_object_item(const cJSON * const object, const char * const name, const cJSON_bool case_sensitive) { cJSON *current_element = NULL; if ((object == NULL) || (name == NULL)) { return NULL; } current_element = object->child; if (case_sensitive) { while ((current_element != NULL) && (current_element->string != NULL) && (strcmp(name, current_element->string) != 0)) { current_element = current_element->next; } } else { while ((current_element != NULL) && (case_insensitive_strcmp((const unsigned char*)name, (const unsigned char*)(current_element->string)) != 0)) { current_element = current_element->next; } } if ((current_element == NULL) || (current_element->string == NULL)) { return NULL; } return current_element; } CJSON_PUBLIC(cJSON *) cJSON_GetObjectItem(const cJSON * const object, const char * const string) { return get_object_item(object, string, false); } CJSON_PUBLIC(cJSON *) cJSON_GetObjectItemCaseSensitive(const cJSON * const object, const char * const string) { return get_object_item(object, string, true); } CJSON_PUBLIC(cJSON_bool) cJSON_HasObjectItem(const cJSON *object, const char *string) { return cJSON_GetObjectItem(object, string) ? 1 : 0; } /* Utility for array list handling. */ static void suffix_object(cJSON *prev, cJSON *item) { prev->next = item; item->prev = prev; } /* Utility for handling references. */ static cJSON *create_reference(const cJSON *item, const internal_hooks * const hooks) { cJSON *reference = NULL; if (item == NULL) { return NULL; } reference = cJSON_New_Item(hooks); if (reference == NULL) { return NULL; } memcpy(reference, item, sizeof(cJSON)); reference->string = NULL; reference->type |= cJSON_IsReference; reference->next = reference->prev = NULL; return reference; } static cJSON_bool add_item_to_array(cJSON *array, cJSON *item) { cJSON *child = NULL; if ((item == NULL) || (array == NULL) || (array == item)) { return false; } child = array->child; /* * To find the last item in array quickly, we use prev in array */ if (child == NULL) { /* list is empty, start new one */ array->child = item; item->prev = item; item->next = NULL; } else { /* append to the end */ if (child->prev) { suffix_object(child->prev, item); array->child->prev = item; } } return true; } /* Add item to array/object. */ CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToArray(cJSON *array, cJSON *item) { return add_item_to_array(array, item); } #if defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 5)))) #pragma GCC diagnostic push #endif #ifdef __GNUC__ #pragma GCC diagnostic ignored "-Wcast-qual" #endif /* helper function to cast away const */ static void* cast_away_const(const void* string) { return (void*)string; } #if defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 5)))) #pragma GCC diagnostic pop #endif static cJSON_bool add_item_to_object(cJSON * const object, const char * const string, cJSON * const item, const internal_hooks * const hooks, const cJSON_bool constant_key) { char *new_key = NULL; int new_type = cJSON_Invalid; if ((object == NULL) || (string == NULL) || (item == NULL) || (object == item)) { return false; } if (constant_key) { new_key = (char*)cast_away_const(string); new_type = item->type | cJSON_StringIsConst; } else { new_key = (char*)cJSON_strdup((const unsigned char*)string, hooks); if (new_key == NULL) { return false; } new_type = item->type & ~cJSON_StringIsConst; } if (!(item->type & cJSON_StringIsConst) && (item->string != NULL)) { hooks->deallocate(item->string); } item->string = new_key; item->type = new_type; return add_item_to_array(object, item); } CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToObject(cJSON *object, const char *string, cJSON *item) { return add_item_to_object(object, string, item, &global_hooks, false); } /* Add an item to an object with constant string as key */ CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToObjectCS(cJSON *object, const char *string, cJSON *item) { return add_item_to_object(object, string, item, &global_hooks, true); } CJSON_PUBLIC(cJSON_bool) cJSON_AddItemReferenceToArray(cJSON *array, cJSON *item) { if (array == NULL) { return false; } return add_item_to_array(array, create_reference(item, &global_hooks)); } CJSON_PUBLIC(cJSON_bool) cJSON_AddItemReferenceToObject(cJSON *object, const char *string, cJSON *item) { if ((object == NULL) || (string == NULL)) { return false; } return add_item_to_object(object, string, create_reference(item, &global_hooks), &global_hooks, false); } CJSON_PUBLIC(cJSON*) cJSON_AddNullToObject(cJSON * const object, const char * const name) { cJSON *null = cJSON_CreateNull(); if (add_item_to_object(object, name, null, &global_hooks, false)) { return null; } cJSON_Delete(null); return NULL; } CJSON_PUBLIC(cJSON*) cJSON_AddTrueToObject(cJSON * const object, const char * const name) { cJSON *true_item = cJSON_CreateTrue(); if (add_item_to_object(object, name, true_item, &global_hooks, false)) { return true_item; } cJSON_Delete(true_item); return NULL; } CJSON_PUBLIC(cJSON*) cJSON_AddFalseToObject(cJSON * const object, const char * const name) { cJSON *false_item = cJSON_CreateFalse(); if (add_item_to_object(object, name, false_item, &global_hooks, false)) { return false_item; } cJSON_Delete(false_item); return NULL; } CJSON_PUBLIC(cJSON*) cJSON_AddBoolToObject(cJSON * const object, const char * const name, const cJSON_bool boolean) { cJSON *bool_item = cJSON_CreateBool(boolean); if (add_item_to_object(object, name, bool_item, &global_hooks, false)) { return bool_item; } cJSON_Delete(bool_item); return NULL; } CJSON_PUBLIC(cJSON*) cJSON_AddNumberToObject(cJSON * const object, const char * const name, const double number) { cJSON *number_item = cJSON_CreateNumber(number); if (add_item_to_object(object, name, number_item, &global_hooks, false)) { return number_item; } cJSON_Delete(number_item); return NULL; } CJSON_PUBLIC(cJSON*) cJSON_AddStringToObject(cJSON * const object, const char * const name, const char * const string) { cJSON *string_item = cJSON_CreateString(string); if (add_item_to_object(object, name, string_item, &global_hooks, false)) { return string_item; } cJSON_Delete(string_item); return NULL; } CJSON_PUBLIC(cJSON*) cJSON_AddRawToObject(cJSON * const object, const char * const name, const char * const raw) { cJSON *raw_item = cJSON_CreateRaw(raw); if (add_item_to_object(object, name, raw_item, &global_hooks, false)) { return raw_item; } cJSON_Delete(raw_item); return NULL; } CJSON_PUBLIC(cJSON*) cJSON_AddObjectToObject(cJSON * const object, const char * const name) { cJSON *object_item = cJSON_CreateObject(); if (add_item_to_object(object, name, object_item, &global_hooks, false)) { return object_item; } cJSON_Delete(object_item); return NULL; } CJSON_PUBLIC(cJSON*) cJSON_AddArrayToObject(cJSON * const object, const char * const name) { cJSON *array = cJSON_CreateArray(); if (add_item_to_object(object, name, array, &global_hooks, false)) { return array; } cJSON_Delete(array); return NULL; } CJSON_PUBLIC(cJSON *) cJSON_DetachItemViaPointer(cJSON *parent, cJSON * const item) { if ((parent == NULL) || (item == NULL)) { return NULL; } if (item != parent->child) { /* not the first element */ item->prev->next = item->next; } if (item->next != NULL) { /* not the last element */ item->next->prev = item->prev; } if (item == parent->child) { /* first element */ parent->child = item->next; } else if (item->next == NULL) { /* last element */ parent->child->prev = item->prev; } /* make sure the detached item doesn't point anywhere anymore */ item->prev = NULL; item->next = NULL; return item; } CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromArray(cJSON *array, int which) { if (which < 0) { return NULL; } return cJSON_DetachItemViaPointer(array, get_array_item(array, (size_t)which)); } CJSON_PUBLIC(void) cJSON_DeleteItemFromArray(cJSON *array, int which) { cJSON_Delete(cJSON_DetachItemFromArray(array, which)); } CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromObject(cJSON *object, const char *string) { cJSON *to_detach = cJSON_GetObjectItem(object, string); return cJSON_DetachItemViaPointer(object, to_detach); } CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromObjectCaseSensitive(cJSON *object, const char *string) { cJSON *to_detach = cJSON_GetObjectItemCaseSensitive(object, string); return cJSON_DetachItemViaPointer(object, to_detach); } CJSON_PUBLIC(void) cJSON_DeleteItemFromObject(cJSON *object, const char *string) { cJSON_Delete(cJSON_DetachItemFromObject(object, string)); } CJSON_PUBLIC(void) cJSON_DeleteItemFromObjectCaseSensitive(cJSON *object, const char *string) { cJSON_Delete(cJSON_DetachItemFromObjectCaseSensitive(object, string)); } /* Replace array/object items with new ones. */ CJSON_PUBLIC(cJSON_bool) cJSON_InsertItemInArray(cJSON *array, int which, cJSON *newitem) { cJSON *after_inserted = NULL; if (which < 0 || newitem == NULL) { return false; } after_inserted = get_array_item(array, (size_t)which); if (after_inserted == NULL) { return add_item_to_array(array, newitem); } if (after_inserted != array->child && after_inserted->prev == NULL) { /* return false if after_inserted is a corrupted array item */ return false; } newitem->next = after_inserted; newitem->prev = after_inserted->prev; after_inserted->prev = newitem; if (after_inserted == array->child) { array->child = newitem; } else { newitem->prev->next = newitem; } return true; } CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemViaPointer(cJSON * const parent, cJSON * const item, cJSON * replacement) { if ((parent == NULL) || (parent->child == NULL) || (replacement == NULL) || (item == NULL)) { return false; } if (replacement == item) { return true; } replacement->next = item->next; replacement->prev = item->prev; if (replacement->next != NULL) { replacement->next->prev = replacement; } if (parent->child == item) { if (parent->child->prev == parent->child) { replacement->prev = replacement; } parent->child = replacement; } else { /* * To find the last item in array quickly, we use prev in array. * We can't modify the last item's next pointer where this item was the parent's child */ if (replacement->prev != NULL) { replacement->prev->next = replacement; } if (replacement->next == NULL) { parent->child->prev = replacement; } } item->next = NULL; item->prev = NULL; cJSON_Delete(item); return true; } CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemInArray(cJSON *array, int which, cJSON *newitem) { if (which < 0) { return false; } return cJSON_ReplaceItemViaPointer(array, get_array_item(array, (size_t)which), newitem); } static cJSON_bool replace_item_in_object(cJSON *object, const char *string, cJSON *replacement, cJSON_bool case_sensitive) { if ((replacement == NULL) || (string == NULL)) { return false; } /* replace the name in the replacement */ if (!(replacement->type & cJSON_StringIsConst) && (replacement->string != NULL)) { cJSON_free(replacement->string); } replacement->string = (char*)cJSON_strdup((const unsigned char*)string, &global_hooks); if (replacement->string == NULL) { return false; } replacement->type &= ~cJSON_StringIsConst; return cJSON_ReplaceItemViaPointer(object, get_object_item(object, string, case_sensitive), replacement); } CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemInObject(cJSON *object, const char *string, cJSON *newitem) { return replace_item_in_object(object, string, newitem, false); } CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemInObjectCaseSensitive(cJSON *object, const char *string, cJSON *newitem) { return replace_item_in_object(object, string, newitem, true); } /* Create basic types: */ CJSON_PUBLIC(cJSON *) cJSON_CreateNull(void) { cJSON *item = cJSON_New_Item(&global_hooks); if(item) { item->type = cJSON_NULL; } return item; } CJSON_PUBLIC(cJSON *) cJSON_CreateTrue(void) { cJSON *item = cJSON_New_Item(&global_hooks); if(item) { item->type = cJSON_True; } return item; } CJSON_PUBLIC(cJSON *) cJSON_CreateFalse(void) { cJSON *item = cJSON_New_Item(&global_hooks); if(item) { item->type = cJSON_False; } return item; } CJSON_PUBLIC(cJSON *) cJSON_CreateBool(cJSON_bool boolean) { cJSON *item = cJSON_New_Item(&global_hooks); if(item) { item->type = boolean ? cJSON_True : cJSON_False; } return item; } CJSON_PUBLIC(cJSON *) cJSON_CreateNumber(double num) { cJSON *item = cJSON_New_Item(&global_hooks); if(item) { item->type = cJSON_Number; item->valuedouble = num; /* use saturation in case of overflow */ if (num >= INT_MAX) { item->valueint = INT_MAX; } else if (num <= (double)INT_MIN) { item->valueint = INT_MIN; } else { item->valueint = (int)num; } } return item; } CJSON_PUBLIC(cJSON *) cJSON_CreateString(const char *string) { cJSON *item = cJSON_New_Item(&global_hooks); if(item) { item->type = cJSON_String; item->valuestring = (char*)cJSON_strdup((const unsigned char*)string, &global_hooks); if(!item->valuestring) { cJSON_Delete(item); return NULL; } } return item; } CJSON_PUBLIC(cJSON *) cJSON_CreateStringReference(const char *string) { cJSON *item = cJSON_New_Item(&global_hooks); if (item != NULL) { item->type = cJSON_String | cJSON_IsReference; item->valuestring = (char*)cast_away_const(string); } return item; } CJSON_PUBLIC(cJSON *) cJSON_CreateObjectReference(const cJSON *child) { cJSON *item = cJSON_New_Item(&global_hooks); if (item != NULL) { item->type = cJSON_Object | cJSON_IsReference; item->child = (cJSON*)cast_away_const(child); } return item; } CJSON_PUBLIC(cJSON *) cJSON_CreateArrayReference(const cJSON *child) { cJSON *item = cJSON_New_Item(&global_hooks); if (item != NULL) { item->type = cJSON_Array | cJSON_IsReference; item->child = (cJSON*)cast_away_const(child); } return item; } CJSON_PUBLIC(cJSON *) cJSON_CreateRaw(const char *raw) { cJSON *item = cJSON_New_Item(&global_hooks); if(item) { item->type = cJSON_Raw; item->valuestring = (char*)cJSON_strdup((const unsigned char*)raw, &global_hooks); if(!item->valuestring) { cJSON_Delete(item); return NULL; } } return item; } CJSON_PUBLIC(cJSON *) cJSON_CreateArray(void) { cJSON *item = cJSON_New_Item(&global_hooks); if(item) { item->type=cJSON_Array; } return item; } CJSON_PUBLIC(cJSON *) cJSON_CreateObject(void) { cJSON *item = cJSON_New_Item(&global_hooks); if (item) { item->type = cJSON_Object; } return item; } /* Create Arrays: */ CJSON_PUBLIC(cJSON *) cJSON_CreateIntArray(const int *numbers, int count) { size_t i = 0; cJSON *n = NULL; cJSON *p = NULL; cJSON *a = NULL; if ((count < 0) || (numbers == NULL)) { return NULL; } a = cJSON_CreateArray(); for(i = 0; a && (i < (size_t)count); i++) { n = cJSON_CreateNumber(numbers[i]); if (!n) { cJSON_Delete(a); return NULL; } if(!i) { a->child = n; } else { suffix_object(p, n); } p = n; } if (a && a->child) { a->child->prev = n; } return a; } CJSON_PUBLIC(cJSON *) cJSON_CreateFloatArray(const float *numbers, int count) { size_t i = 0; cJSON *n = NULL; cJSON *p = NULL; cJSON *a = NULL; if ((count < 0) || (numbers == NULL)) { return NULL; } a = cJSON_CreateArray(); for(i = 0; a && (i < (size_t)count); i++) { n = cJSON_CreateNumber((double)numbers[i]); if(!n) { cJSON_Delete(a); return NULL; } if(!i) { a->child = n; } else { suffix_object(p, n); } p = n; } if (a && a->child) { a->child->prev = n; } return a; } CJSON_PUBLIC(cJSON *) cJSON_CreateDoubleArray(const double *numbers, int count) { size_t i = 0; cJSON *n = NULL; cJSON *p = NULL; cJSON *a = NULL; if ((count < 0) || (numbers == NULL)) { return NULL; } a = cJSON_CreateArray(); for(i = 0; a && (i < (size_t)count); i++) { n = cJSON_CreateNumber(numbers[i]); if(!n) { cJSON_Delete(a); return NULL; } if(!i) { a->child = n; } else { suffix_object(p, n); } p = n; } if (a && a->child) { a->child->prev = n; } return a; } CJSON_PUBLIC(cJSON *) cJSON_CreateStringArray(const char *const *strings, int count) { size_t i = 0; cJSON *n = NULL; cJSON *p = NULL; cJSON *a = NULL; if ((count < 0) || (strings == NULL)) { return NULL; } a = cJSON_CreateArray(); for (i = 0; a && (i < (size_t)count); i++) { n = cJSON_CreateString(strings[i]); if(!n) { cJSON_Delete(a); return NULL; } if(!i) { a->child = n; } else { suffix_object(p,n); } p = n; } if (a && a->child) { a->child->prev = n; } return a; } /* Duplication */ CJSON_PUBLIC(cJSON *) cJSON_Duplicate(const cJSON *item, cJSON_bool recurse) { cJSON *newitem = NULL; cJSON *child = NULL; cJSON *next = NULL; cJSON *newchild = NULL; /* Bail on bad ptr */ if (!item) { goto fail; } /* Create new item */ newitem = cJSON_New_Item(&global_hooks); if (!newitem) { goto fail; } /* Copy over all vars */ newitem->type = item->type & (~cJSON_IsReference); newitem->valueint = item->valueint; newitem->valuedouble = item->valuedouble; if (item->valuestring) { newitem->valuestring = (char*)cJSON_strdup((unsigned char*)item->valuestring, &global_hooks); if (!newitem->valuestring) { goto fail; } } if (item->string) { newitem->string = (item->type&cJSON_StringIsConst) ? item->string : (char*)cJSON_strdup((unsigned char*)item->string, &global_hooks); if (!newitem->string) { goto fail; } } /* If non-recursive, then we're done! */ if (!recurse) { return newitem; } /* Walk the ->next chain for the child. */ child = item->child; while (child != NULL) { newchild = cJSON_Duplicate(child, true); /* Duplicate (with recurse) each item in the ->next chain */ if (!newchild) { goto fail; } if (next != NULL) { /* If newitem->child already set, then crosswire ->prev and ->next and move on */ next->next = newchild; newchild->prev = next; next = newchild; } else { /* Set newitem->child and move to it */ newitem->child = newchild; next = newchild; } child = child->next; } if (newitem && newitem->child) { newitem->child->prev = newchild; } return newitem; fail: if (newitem != NULL) { cJSON_Delete(newitem); } return NULL; } static void skip_oneline_comment(char **input) { *input += static_strlen("//"); for (; (*input)[0] != '\0'; ++(*input)) { if ((*input)[0] == '\n') { *input += static_strlen("\n"); return; } } } static void skip_multiline_comment(char **input) { *input += static_strlen("/*"); for (; (*input)[0] != '\0'; ++(*input)) { if (((*input)[0] == '*') && ((*input)[1] == '/')) { *input += static_strlen("*/"); return; } } } static void minify_string(char **input, char **output) { (*output)[0] = (*input)[0]; *input += static_strlen("\""); *output += static_strlen("\""); for (; (*input)[0] != '\0'; (void)++(*input), ++(*output)) { (*output)[0] = (*input)[0]; if ((*input)[0] == '\"') { (*output)[0] = '\"'; *input += static_strlen("\""); *output += static_strlen("\""); return; } else if (((*input)[0] == '\\') && ((*input)[1] == '\"')) { (*output)[1] = (*input)[1]; *input += static_strlen("\""); *output += static_strlen("\""); } } } CJSON_PUBLIC(void) cJSON_Minify(char *json) { char *into = json; if (json == NULL) { return; } while (json[0] != '\0') { switch (json[0]) { case ' ': case '\t': case '\r': case '\n': json++; break; case '/': if (json[1] == '/') { skip_oneline_comment(&json); } else if (json[1] == '*') { skip_multiline_comment(&json); } else { json++; } break; case '\"': minify_string(&json, (char**)&into); break; default: into[0] = json[0]; json++; into++; } } /* and null-terminate. */ *into = '\0'; } CJSON_PUBLIC(cJSON_bool) cJSON_IsInvalid(const cJSON * const item) { if (item == NULL) { return false; } return (item->type & 0xFF) == cJSON_Invalid; } CJSON_PUBLIC(cJSON_bool) cJSON_IsFalse(const cJSON * const item) { if (item == NULL) { return false; } return (item->type & 0xFF) == cJSON_False; } CJSON_PUBLIC(cJSON_bool) cJSON_IsTrue(const cJSON * const item) { if (item == NULL) { return false; } return (item->type & 0xff) == cJSON_True; } CJSON_PUBLIC(cJSON_bool) cJSON_IsBool(const cJSON * const item) { if (item == NULL) { return false; } return (item->type & (cJSON_True | cJSON_False)) != 0; } CJSON_PUBLIC(cJSON_bool) cJSON_IsNull(const cJSON * const item) { if (item == NULL) { return false; } return (item->type & 0xFF) == cJSON_NULL; } CJSON_PUBLIC(cJSON_bool) cJSON_IsNumber(const cJSON * const item) { if (item == NULL) { return false; } return (item->type & 0xFF) == cJSON_Number; } CJSON_PUBLIC(cJSON_bool) cJSON_IsString(const cJSON * const item) { if (item == NULL) { return false; } return (item->type & 0xFF) == cJSON_String; } CJSON_PUBLIC(cJSON_bool) cJSON_IsArray(const cJSON * const item) { if (item == NULL) { return false; } return (item->type & 0xFF) == cJSON_Array; } CJSON_PUBLIC(cJSON_bool) cJSON_IsObject(const cJSON * const item) { if (item == NULL) { return false; } return (item->type & 0xFF) == cJSON_Object; } CJSON_PUBLIC(cJSON_bool) cJSON_IsRaw(const cJSON * const item) { if (item == NULL) { return false; } return (item->type & 0xFF) == cJSON_Raw; } CJSON_PUBLIC(cJSON_bool) cJSON_Compare(const cJSON * const a, const cJSON * const b, const cJSON_bool case_sensitive) { if ((a == NULL) || (b == NULL) || ((a->type & 0xFF) != (b->type & 0xFF))) { return false; } /* check if type is valid */ switch (a->type & 0xFF) { case cJSON_False: case cJSON_True: case cJSON_NULL: case cJSON_Number: case cJSON_String: case cJSON_Raw: case cJSON_Array: case cJSON_Object: break; default: return false; } /* identical objects are equal */ if (a == b) { return true; } switch (a->type & 0xFF) { /* in these cases and equal type is enough */ case cJSON_False: case cJSON_True: case cJSON_NULL: return true; case cJSON_Number: if (compare_double(a->valuedouble, b->valuedouble)) { return true; } return false; case cJSON_String: case cJSON_Raw: if ((a->valuestring == NULL) || (b->valuestring == NULL)) { return false; } if (strcmp(a->valuestring, b->valuestring) == 0) { return true; } return false; case cJSON_Array: { cJSON *a_element = a->child; cJSON *b_element = b->child; for (; (a_element != NULL) && (b_element != NULL);) { if (!cJSON_Compare(a_element, b_element, case_sensitive)) { return false; } a_element = a_element->next; b_element = b_element->next; } /* one of the arrays is longer than the other */ if (a_element != b_element) { return false; } return true; } case cJSON_Object: { cJSON *a_element = NULL; cJSON *b_element = NULL; cJSON_ArrayForEach(a_element, a) { /* TODO This has O(n^2) runtime, which is horrible! */ b_element = get_object_item(b, a_element->string, case_sensitive); if (b_element == NULL) { return false; } if (!cJSON_Compare(a_element, b_element, case_sensitive)) { return false; } } /* doing this twice, once on a and b to prevent true comparison if a subset of b * TODO: Do this the proper way, this is just a fix for now */ cJSON_ArrayForEach(b_element, b) { a_element = get_object_item(a, b_element->string, case_sensitive); if (a_element == NULL) { return false; } if (!cJSON_Compare(b_element, a_element, case_sensitive)) { return false; } } return true; } default: return false; } } CJSON_PUBLIC(void *) cJSON_malloc(size_t size) { return global_hooks.allocate(size); } CJSON_PUBLIC(void) cJSON_free(void *object) { global_hooks.deallocate(object); } /* Amazon edit */ /* NOLINTEND */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/external/cJSON.h000066400000000000000000000401771456575232400247400ustar00rootroot00000000000000/* Copyright (c) 2009-2017 Dave Gamble and cJSON contributors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /* * This file has been modified from its original version by Amazon: * (1) Remove cJSON_GetErrorPtr and global_error as they are not thread-safe. * (2) Add NOLINTBEGIN/NOLINTEND so clang-tidy ignores file. */ /* NOLINTBEGIN */ #ifndef cJSON__h #define cJSON__h #ifdef __cplusplus extern "C" { #endif #if !defined(__WINDOWS__) && (defined(WIN32) || defined(WIN64) || defined(_MSC_VER) || defined(_WIN32)) #define __WINDOWS__ #endif #ifdef __WINDOWS__ /* When compiling for windows, we specify a specific calling convention to avoid issues where we are being called from a project with a different default calling convention. For windows you have 3 define options: CJSON_HIDE_SYMBOLS - Define this in the case where you don't want to ever dllexport symbols CJSON_EXPORT_SYMBOLS - Define this on library build when you want to dllexport symbols (default) CJSON_IMPORT_SYMBOLS - Define this if you want to dllimport symbol For *nix builds that support visibility attribute, you can define similar behavior by setting default visibility to hidden by adding -fvisibility=hidden (for gcc) or -xldscope=hidden (for sun cc) to CFLAGS then using the CJSON_API_VISIBILITY flag to "export" the same symbols the way CJSON_EXPORT_SYMBOLS does */ #define CJSON_CDECL __cdecl #define CJSON_STDCALL __stdcall /* export symbols by default, this is necessary for copy pasting the C and header file */ #if !defined(CJSON_HIDE_SYMBOLS) && !defined(CJSON_IMPORT_SYMBOLS) && !defined(CJSON_EXPORT_SYMBOLS) #define CJSON_EXPORT_SYMBOLS #endif #if defined(CJSON_HIDE_SYMBOLS) #define CJSON_PUBLIC(type) type CJSON_STDCALL #elif defined(CJSON_EXPORT_SYMBOLS) #define CJSON_PUBLIC(type) __declspec(dllexport) type CJSON_STDCALL #elif defined(CJSON_IMPORT_SYMBOLS) #define CJSON_PUBLIC(type) __declspec(dllimport) type CJSON_STDCALL #endif #else /* !__WINDOWS__ */ #define CJSON_CDECL #define CJSON_STDCALL #if (defined(__GNUC__) || defined(__SUNPRO_CC) || defined (__SUNPRO_C)) && defined(CJSON_API_VISIBILITY) #define CJSON_PUBLIC(type) __attribute__((visibility("default"))) type #else #define CJSON_PUBLIC(type) type #endif #endif /* project version */ #define CJSON_VERSION_MAJOR 1 #define CJSON_VERSION_MINOR 7 #define CJSON_VERSION_PATCH 17 #include /* cJSON Types: */ #define cJSON_Invalid (0) #define cJSON_False (1 << 0) #define cJSON_True (1 << 1) #define cJSON_NULL (1 << 2) #define cJSON_Number (1 << 3) #define cJSON_String (1 << 4) #define cJSON_Array (1 << 5) #define cJSON_Object (1 << 6) #define cJSON_Raw (1 << 7) /* raw json */ #define cJSON_IsReference 256 #define cJSON_StringIsConst 512 /* The cJSON structure: */ typedef struct cJSON { /* next/prev allow you to walk array/object chains. Alternatively, use GetArraySize/GetArrayItem/GetObjectItem */ struct cJSON *next; struct cJSON *prev; /* An array or object item will have a child pointer pointing to a chain of the items in the array/object. */ struct cJSON *child; /* The type of the item, as above. */ int type; /* The item's string, if type==cJSON_String and type == cJSON_Raw */ char *valuestring; /* writing to valueint is DEPRECATED, use cJSON_SetNumberValue instead */ int valueint; /* The item's number, if type==cJSON_Number */ double valuedouble; /* The item's name string, if this item is the child of, or is in the list of subitems of an object. */ char *string; } cJSON; typedef struct cJSON_Hooks { /* malloc/free are CDECL on Windows regardless of the default calling convention of the compiler, so ensure the hooks allow passing those functions directly. */ void *(CJSON_CDECL *malloc_fn)(size_t sz); void (CJSON_CDECL *free_fn)(void *ptr); } cJSON_Hooks; typedef int cJSON_bool; /* Limits how deeply nested arrays/objects can be before cJSON rejects to parse them. * This is to prevent stack overflows. */ #ifndef CJSON_NESTING_LIMIT #define CJSON_NESTING_LIMIT 1000 #endif /* returns the version of cJSON as a string */ CJSON_PUBLIC(const char*) cJSON_Version(void); /* Supply malloc, realloc and free functions to cJSON */ CJSON_PUBLIC(void) cJSON_InitHooks(cJSON_Hooks* hooks); /* Memory Management: the caller is always responsible to free the results from all variants of cJSON_Parse (with cJSON_Delete) and cJSON_Print (with stdlib free, cJSON_Hooks.free_fn, or cJSON_free as appropriate). The exception is cJSON_PrintPreallocated, where the caller has full responsibility of the buffer. */ /* Supply a block of JSON, and this returns a cJSON object you can interrogate. */ CJSON_PUBLIC(cJSON *) cJSON_Parse(const char *value); CJSON_PUBLIC(cJSON *) cJSON_ParseWithLength(const char *value, size_t buffer_length); /* ParseWithOpts allows you to require (and check) that the JSON is null terminated, and to retrieve the pointer to the final byte parsed. */ /* If you supply a ptr in return_parse_end and parsing fails, then return_parse_end will contain a pointer to the error so will match cJSON_GetErrorPtr(). */ CJSON_PUBLIC(cJSON *) cJSON_ParseWithOpts(const char *value, const char **return_parse_end, cJSON_bool require_null_terminated); CJSON_PUBLIC(cJSON *) cJSON_ParseWithLengthOpts(const char *value, size_t buffer_length, const char **return_parse_end, cJSON_bool require_null_terminated); /* Render a cJSON entity to text for transfer/storage. */ CJSON_PUBLIC(char *) cJSON_Print(const cJSON *item); /* Render a cJSON entity to text for transfer/storage without any formatting. */ CJSON_PUBLIC(char *) cJSON_PrintUnformatted(const cJSON *item); /* Render a cJSON entity to text using a buffered strategy. prebuffer is a guess at the final size. guessing well reduces reallocation. fmt=0 gives unformatted, =1 gives formatted */ CJSON_PUBLIC(char *) cJSON_PrintBuffered(const cJSON *item, int prebuffer, cJSON_bool fmt); /* Render a cJSON entity to text using a buffer already allocated in memory with given length. Returns 1 on success and 0 on failure. */ /* NOTE: cJSON is not always 100% accurate in estimating how much memory it will use, so to be safe allocate 5 bytes more than you actually need */ CJSON_PUBLIC(cJSON_bool) cJSON_PrintPreallocated(cJSON *item, char *buffer, const int length, const cJSON_bool format); /* Delete a cJSON entity and all subentities. */ CJSON_PUBLIC(void) cJSON_Delete(cJSON *item); /* Returns the number of items in an array (or object). */ CJSON_PUBLIC(int) cJSON_GetArraySize(const cJSON *array); /* Retrieve item number "index" from array "array". Returns NULL if unsuccessful. */ CJSON_PUBLIC(cJSON *) cJSON_GetArrayItem(const cJSON *array, int index); /* Get item "string" from object. Case insensitive. */ CJSON_PUBLIC(cJSON *) cJSON_GetObjectItem(const cJSON * const object, const char * const string); CJSON_PUBLIC(cJSON *) cJSON_GetObjectItemCaseSensitive(const cJSON * const object, const char * const string); CJSON_PUBLIC(cJSON_bool) cJSON_HasObjectItem(const cJSON *object, const char *string); #if 0 /* Amazon edit */ /* For analysing failed parses. This returns a pointer to the parse error. You'll probably need to look a few chars back to make sense of it. Defined when cJSON_Parse() returns 0. 0 when cJSON_Parse() succeeds. */ CJSON_PUBLIC(const char *) cJSON_GetErrorPtr(void); #endif /* Amazon edit */ /* Check item type and return its value */ CJSON_PUBLIC(char *) cJSON_GetStringValue(const cJSON * const item); CJSON_PUBLIC(double) cJSON_GetNumberValue(const cJSON * const item); /* These functions check the type of an item */ CJSON_PUBLIC(cJSON_bool) cJSON_IsInvalid(const cJSON * const item); CJSON_PUBLIC(cJSON_bool) cJSON_IsFalse(const cJSON * const item); CJSON_PUBLIC(cJSON_bool) cJSON_IsTrue(const cJSON * const item); CJSON_PUBLIC(cJSON_bool) cJSON_IsBool(const cJSON * const item); CJSON_PUBLIC(cJSON_bool) cJSON_IsNull(const cJSON * const item); CJSON_PUBLIC(cJSON_bool) cJSON_IsNumber(const cJSON * const item); CJSON_PUBLIC(cJSON_bool) cJSON_IsString(const cJSON * const item); CJSON_PUBLIC(cJSON_bool) cJSON_IsArray(const cJSON * const item); CJSON_PUBLIC(cJSON_bool) cJSON_IsObject(const cJSON * const item); CJSON_PUBLIC(cJSON_bool) cJSON_IsRaw(const cJSON * const item); /* These calls create a cJSON item of the appropriate type. */ CJSON_PUBLIC(cJSON *) cJSON_CreateNull(void); CJSON_PUBLIC(cJSON *) cJSON_CreateTrue(void); CJSON_PUBLIC(cJSON *) cJSON_CreateFalse(void); CJSON_PUBLIC(cJSON *) cJSON_CreateBool(cJSON_bool boolean); CJSON_PUBLIC(cJSON *) cJSON_CreateNumber(double num); CJSON_PUBLIC(cJSON *) cJSON_CreateString(const char *string); /* raw json */ CJSON_PUBLIC(cJSON *) cJSON_CreateRaw(const char *raw); CJSON_PUBLIC(cJSON *) cJSON_CreateArray(void); CJSON_PUBLIC(cJSON *) cJSON_CreateObject(void); /* Create a string where valuestring references a string so * it will not be freed by cJSON_Delete */ CJSON_PUBLIC(cJSON *) cJSON_CreateStringReference(const char *string); /* Create an object/array that only references it's elements so * they will not be freed by cJSON_Delete */ CJSON_PUBLIC(cJSON *) cJSON_CreateObjectReference(const cJSON *child); CJSON_PUBLIC(cJSON *) cJSON_CreateArrayReference(const cJSON *child); /* These utilities create an Array of count items. * The parameter count cannot be greater than the number of elements in the number array, otherwise array access will be out of bounds.*/ CJSON_PUBLIC(cJSON *) cJSON_CreateIntArray(const int *numbers, int count); CJSON_PUBLIC(cJSON *) cJSON_CreateFloatArray(const float *numbers, int count); CJSON_PUBLIC(cJSON *) cJSON_CreateDoubleArray(const double *numbers, int count); CJSON_PUBLIC(cJSON *) cJSON_CreateStringArray(const char *const *strings, int count); /* Append item to the specified array/object. */ CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToArray(cJSON *array, cJSON *item); CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToObject(cJSON *object, const char *string, cJSON *item); /* Use this when string is definitely const (i.e. a literal, or as good as), and will definitely survive the cJSON object. * WARNING: When this function was used, make sure to always check that (item->type & cJSON_StringIsConst) is zero before * writing to `item->string` */ CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToObjectCS(cJSON *object, const char *string, cJSON *item); /* Append reference to item to the specified array/object. Use this when you want to add an existing cJSON to a new cJSON, but don't want to corrupt your existing cJSON. */ CJSON_PUBLIC(cJSON_bool) cJSON_AddItemReferenceToArray(cJSON *array, cJSON *item); CJSON_PUBLIC(cJSON_bool) cJSON_AddItemReferenceToObject(cJSON *object, const char *string, cJSON *item); /* Remove/Detach items from Arrays/Objects. */ CJSON_PUBLIC(cJSON *) cJSON_DetachItemViaPointer(cJSON *parent, cJSON * const item); CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromArray(cJSON *array, int which); CJSON_PUBLIC(void) cJSON_DeleteItemFromArray(cJSON *array, int which); CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromObject(cJSON *object, const char *string); CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromObjectCaseSensitive(cJSON *object, const char *string); CJSON_PUBLIC(void) cJSON_DeleteItemFromObject(cJSON *object, const char *string); CJSON_PUBLIC(void) cJSON_DeleteItemFromObjectCaseSensitive(cJSON *object, const char *string); /* Update array items. */ CJSON_PUBLIC(cJSON_bool) cJSON_InsertItemInArray(cJSON *array, int which, cJSON *newitem); /* Shifts pre-existing items to the right. */ CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemViaPointer(cJSON * const parent, cJSON * const item, cJSON * replacement); CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemInArray(cJSON *array, int which, cJSON *newitem); CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemInObject(cJSON *object,const char *string,cJSON *newitem); CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemInObjectCaseSensitive(cJSON *object,const char *string,cJSON *newitem); /* Duplicate a cJSON item */ CJSON_PUBLIC(cJSON *) cJSON_Duplicate(const cJSON *item, cJSON_bool recurse); /* Duplicate will create a new, identical cJSON item to the one you pass, in new memory that will * need to be released. With recurse!=0, it will duplicate any children connected to the item. * The item->next and ->prev pointers are always zero on return from Duplicate. */ /* Recursively compare two cJSON items for equality. If either a or b is NULL or invalid, they will be considered unequal. * case_sensitive determines if object keys are treated case sensitive (1) or case insensitive (0) */ CJSON_PUBLIC(cJSON_bool) cJSON_Compare(const cJSON * const a, const cJSON * const b, const cJSON_bool case_sensitive); /* Minify a strings, remove blank characters(such as ' ', '\t', '\r', '\n') from strings. * The input pointer json cannot point to a read-only address area, such as a string constant, * but should point to a readable and writable address area. */ CJSON_PUBLIC(void) cJSON_Minify(char *json); /* Helper functions for creating and adding items to an object at the same time. * They return the added item or NULL on failure. */ CJSON_PUBLIC(cJSON*) cJSON_AddNullToObject(cJSON * const object, const char * const name); CJSON_PUBLIC(cJSON*) cJSON_AddTrueToObject(cJSON * const object, const char * const name); CJSON_PUBLIC(cJSON*) cJSON_AddFalseToObject(cJSON * const object, const char * const name); CJSON_PUBLIC(cJSON*) cJSON_AddBoolToObject(cJSON * const object, const char * const name, const cJSON_bool boolean); CJSON_PUBLIC(cJSON*) cJSON_AddNumberToObject(cJSON * const object, const char * const name, const double number); CJSON_PUBLIC(cJSON*) cJSON_AddStringToObject(cJSON * const object, const char * const name, const char * const string); CJSON_PUBLIC(cJSON*) cJSON_AddRawToObject(cJSON * const object, const char * const name, const char * const raw); CJSON_PUBLIC(cJSON*) cJSON_AddObjectToObject(cJSON * const object, const char * const name); CJSON_PUBLIC(cJSON*) cJSON_AddArrayToObject(cJSON * const object, const char * const name); /* When assigning an integer value, it needs to be propagated to valuedouble too. */ #define cJSON_SetIntValue(object, number) ((object) ? (object)->valueint = (object)->valuedouble = (number) : (number)) /* helper for the cJSON_SetNumberValue macro */ CJSON_PUBLIC(double) cJSON_SetNumberHelper(cJSON *object, double number); #define cJSON_SetNumberValue(object, number) ((object != NULL) ? cJSON_SetNumberHelper(object, (double)number) : (number)) /* Change the valuestring of a cJSON_String object, only takes effect when type of object is cJSON_String */ CJSON_PUBLIC(char*) cJSON_SetValuestring(cJSON *object, const char *valuestring); /* If the object is not a boolean type this does nothing and returns cJSON_Invalid else it returns the new type*/ #define cJSON_SetBoolValue(object, boolValue) ( \ (object != NULL && ((object)->type & (cJSON_False|cJSON_True))) ? \ (object)->type=((object)->type &(~(cJSON_False|cJSON_True)))|((boolValue)?cJSON_True:cJSON_False) : \ cJSON_Invalid\ ) /* Macro for iterating over an array or object */ #define cJSON_ArrayForEach(element, array) for(element = (array != NULL) ? (array)->child : NULL; element != NULL; element = element->next) /* malloc/free objects using the malloc/free functions that have been set with cJSON_InitHooks */ CJSON_PUBLIC(void *) cJSON_malloc(size_t size); CJSON_PUBLIC(void) cJSON_free(void *object); #ifdef __cplusplus } #endif /* Amazon edit */ /* NOLINTEND */ #endif aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/fifo_cache.c000066400000000000000000000044021456575232400242320ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include static int s_fifo_cache_put(struct aws_cache *cache, const void *key, void *p_value); static struct aws_cache_vtable s_fifo_cache_vtable = { .destroy = aws_cache_base_default_destroy, .find = aws_cache_base_default_find, .put = s_fifo_cache_put, .remove = aws_cache_base_default_remove, .clear = aws_cache_base_default_clear, .get_element_count = aws_cache_base_default_get_element_count, }; struct aws_cache *aws_cache_new_fifo( struct aws_allocator *allocator, aws_hash_fn *hash_fn, aws_hash_callback_eq_fn *equals_fn, aws_hash_callback_destroy_fn *destroy_key_fn, aws_hash_callback_destroy_fn *destroy_value_fn, size_t max_items) { AWS_ASSERT(allocator); AWS_ASSERT(max_items); struct aws_cache *fifo_cache = aws_mem_calloc(allocator, 1, sizeof(struct aws_cache)); if (!fifo_cache) { return NULL; } fifo_cache->allocator = allocator; fifo_cache->max_items = max_items; fifo_cache->vtable = &s_fifo_cache_vtable; if (aws_linked_hash_table_init( &fifo_cache->table, allocator, hash_fn, equals_fn, destroy_key_fn, destroy_value_fn, max_items)) { return NULL; } return fifo_cache; } /* fifo cache put implementation */ static int s_fifo_cache_put(struct aws_cache *cache, const void *key, void *p_value) { if (aws_linked_hash_table_put(&cache->table, key, p_value)) { return AWS_OP_ERR; } /* Manage the space if we actually added a new element and the cache is full. */ if (aws_linked_hash_table_get_element_count(&cache->table) > cache->max_items) { /* we're over the cache size limit. Remove whatever is in the front of * the linked_hash_table, which is the oldest element */ const struct aws_linked_list *list = aws_linked_hash_table_get_iteration_list(&cache->table); struct aws_linked_list_node *node = aws_linked_list_front(list); struct aws_linked_hash_table_node *table_node = AWS_CONTAINER_OF(node, struct aws_linked_hash_table_node, node); return aws_linked_hash_table_remove(&cache->table, table_node->key); } return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/file.c000066400000000000000000000227251456575232400231130ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include /* For "special files", the OS often lies about size. * For example, on Amazon Linux 2: * /proc/cpuinfo: size is 0, but contents are several KB of data. * /sys/devices/virtual/dmi/id/product_name: size is 4096, but contents are "c5.2xlarge" * * Therefore, we may need to grow the buffer as we read until EOF. * This is the min/max step size for growth. */ #define MIN_BUFFER_GROWTH_READING_FILES 32 #define MAX_BUFFER_GROWTH_READING_FILES 4096 FILE *aws_fopen(const char *file_path, const char *mode) { if (!file_path || strlen(file_path) == 0) { AWS_LOGF_ERROR(AWS_LS_COMMON_IO, "static: Failed to open file. path is empty"); aws_raise_error(AWS_ERROR_FILE_INVALID_PATH); return NULL; } if (!mode || strlen(mode) == 0) { AWS_LOGF_ERROR(AWS_LS_COMMON_IO, "static: Failed to open file. mode is empty"); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } struct aws_string *file_path_str = aws_string_new_from_c_str(aws_default_allocator(), file_path); struct aws_string *mode_str = aws_string_new_from_c_str(aws_default_allocator(), mode); FILE *file = aws_fopen_safe(file_path_str, mode_str); aws_string_destroy(mode_str); aws_string_destroy(file_path_str); return file; } /* Helper function used by aws_byte_buf_init_from_file() and aws_byte_buf_init_from_file_with_size_hint() */ static int s_byte_buf_init_from_file_impl( struct aws_byte_buf *out_buf, struct aws_allocator *alloc, const char *filename, bool use_file_size_as_hint, size_t size_hint) { AWS_ZERO_STRUCT(*out_buf); FILE *fp = aws_fopen(filename, "rb"); if (fp == NULL) { goto error; } if (use_file_size_as_hint) { int64_t len64 = 0; if (aws_file_get_length(fp, &len64)) { AWS_LOGF_ERROR( AWS_LS_COMMON_IO, "static: Failed to get file length. file:'%s' error:%s", filename, aws_error_name(aws_last_error())); goto error; } if (len64 >= SIZE_MAX) { aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED); AWS_LOGF_ERROR( AWS_LS_COMMON_IO, "static: File too large to read into memory. file:'%s' error:%s", filename, aws_error_name(aws_last_error())); goto error; } /* Leave space for null terminator at end of buffer */ size_hint = (size_t)len64 + 1; } aws_byte_buf_init(out_buf, alloc, size_hint); /* Read in a loop until we hit EOF */ while (true) { /* Expand buffer if necessary (at a reasonable rate) */ if (out_buf->len == out_buf->capacity) { size_t additional_capacity = out_buf->capacity; additional_capacity = aws_max_size(MIN_BUFFER_GROWTH_READING_FILES, additional_capacity); additional_capacity = aws_min_size(MAX_BUFFER_GROWTH_READING_FILES, additional_capacity); if (aws_byte_buf_reserve_relative(out_buf, additional_capacity)) { AWS_LOGF_ERROR(AWS_LS_COMMON_IO, "static: Failed to grow buffer for file:'%s'", filename); goto error; } } size_t space_available = out_buf->capacity - out_buf->len; size_t bytes_read = fread(out_buf->buffer + out_buf->len, 1, space_available, fp); out_buf->len += bytes_read; /* If EOF, we're done! */ if (feof(fp)) { break; } /* If no EOF but we read 0 bytes, there's been an error or at least we need * to treat it like one because we can't just infinitely loop. */ if (bytes_read == 0) { int errno_value = ferror(fp) ? errno : 0; /* Always cache errno before potential side-effect */ aws_translate_and_raise_io_error_or(errno_value, AWS_ERROR_FILE_READ_FAILURE); AWS_LOGF_ERROR( AWS_LS_COMMON_IO, "static: Failed reading file:'%s' errno:%d aws-error:%s", filename, errno_value, aws_error_name(aws_last_error())); goto error; } } /* A null terminator is appended, but is not included as part of the length field. */ if (out_buf->len == out_buf->capacity) { if (aws_byte_buf_reserve_relative(out_buf, 1)) { AWS_LOGF_ERROR(AWS_LS_COMMON_IO, "static: Failed to grow buffer for file:'%s'", filename); goto error; } } out_buf->buffer[out_buf->len] = 0; fclose(fp); return AWS_OP_SUCCESS; error: if (fp) { fclose(fp); } aws_byte_buf_clean_up_secure(out_buf); return AWS_OP_ERR; } int aws_byte_buf_init_from_file(struct aws_byte_buf *out_buf, struct aws_allocator *alloc, const char *filename) { return s_byte_buf_init_from_file_impl(out_buf, alloc, filename, true /*use_file_size_as_hint*/, 0 /*size_hint*/); } int aws_byte_buf_init_from_file_with_size_hint( struct aws_byte_buf *out_buf, struct aws_allocator *alloc, const char *filename, size_t size_hint) { return s_byte_buf_init_from_file_impl(out_buf, alloc, filename, false /*use_file_size_as_hint*/, size_hint); } bool aws_is_any_directory_separator(char value) { return value == '\\' || value == '/'; } void aws_normalize_directory_separator(struct aws_byte_buf *path) { AWS_PRECONDITION(aws_byte_buf_is_valid(path)); const char local_platform_separator = aws_get_platform_directory_separator(); for (size_t i = 0; i < path->len; ++i) { if (aws_is_any_directory_separator((char)path->buffer[i])) { path->buffer[i] = local_platform_separator; } } AWS_POSTCONDITION(aws_byte_buf_is_valid(path)); } struct aws_directory_iterator { struct aws_linked_list list_data; struct aws_allocator *allocator; struct aws_linked_list_node *current_node; }; struct directory_entry_value { struct aws_directory_entry entry; struct aws_byte_buf path; struct aws_byte_buf relative_path; struct aws_linked_list_node node; }; static bool s_directory_iterator_directory_entry(const struct aws_directory_entry *entry, void *user_data) { struct aws_directory_iterator *iterator = user_data; struct directory_entry_value *value = aws_mem_calloc(iterator->allocator, 1, sizeof(struct directory_entry_value)); value->entry = *entry; aws_byte_buf_init_copy_from_cursor(&value->path, iterator->allocator, entry->path); value->entry.path = aws_byte_cursor_from_buf(&value->path); aws_byte_buf_init_copy_from_cursor(&value->relative_path, iterator->allocator, entry->relative_path); value->entry.relative_path = aws_byte_cursor_from_buf(&value->relative_path); aws_linked_list_push_back(&iterator->list_data, &value->node); return true; } struct aws_directory_iterator *aws_directory_entry_iterator_new( struct aws_allocator *allocator, const struct aws_string *path) { struct aws_directory_iterator *iterator = aws_mem_acquire(allocator, sizeof(struct aws_directory_iterator)); iterator->allocator = allocator; aws_linked_list_init(&iterator->list_data); /* the whole point of this iterator is to avoid recursion, so let's do that by passing recurse as false. */ if (AWS_OP_SUCCESS == aws_directory_traverse(allocator, path, false, s_directory_iterator_directory_entry, iterator)) { if (!aws_linked_list_empty(&iterator->list_data)) { iterator->current_node = aws_linked_list_front(&iterator->list_data); } return iterator; } aws_mem_release(allocator, iterator); return NULL; } int aws_directory_entry_iterator_next(struct aws_directory_iterator *iterator) { struct aws_linked_list_node *node = iterator->current_node; if (!node || node->next == aws_linked_list_end(&iterator->list_data)) { return aws_raise_error(AWS_ERROR_LIST_EMPTY); } iterator->current_node = aws_linked_list_next(node); return AWS_OP_SUCCESS; } int aws_directory_entry_iterator_previous(struct aws_directory_iterator *iterator) { struct aws_linked_list_node *node = iterator->current_node; if (!node || node == aws_linked_list_begin(&iterator->list_data)) { return aws_raise_error(AWS_ERROR_LIST_EMPTY); } iterator->current_node = aws_linked_list_prev(node); return AWS_OP_SUCCESS; } void aws_directory_entry_iterator_destroy(struct aws_directory_iterator *iterator) { while (!aws_linked_list_empty(&iterator->list_data)) { struct aws_linked_list_node *node = aws_linked_list_pop_front(&iterator->list_data); struct directory_entry_value *value = AWS_CONTAINER_OF(node, struct directory_entry_value, node); aws_byte_buf_clean_up(&value->path); aws_byte_buf_clean_up(&value->relative_path); aws_mem_release(iterator->allocator, value); } aws_mem_release(iterator->allocator, iterator); } const struct aws_directory_entry *aws_directory_entry_iterator_get_value( const struct aws_directory_iterator *iterator) { struct aws_linked_list_node *node = iterator->current_node; if (!iterator->current_node) { return NULL; } struct directory_entry_value *value = AWS_CONTAINER_OF(node, struct directory_entry_value, node); return &value->entry; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/hash_table.c000066400000000000000000001113351456575232400242620ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ /* For more information on how the RH hash works and in particular how we do * deletions, see: * http://codecapsule.com/2013/11/17/robin-hood-hashing-backward-shift-deletion/ */ #include #include #include #include #include #include #include /* Include lookup3.c so we can (potentially) inline it and make use of the mix() * macro. */ #include static void s_suppress_unused_lookup3_func_warnings(void) { /* We avoid making changes to lookup3 if we can avoid it, but since it has functions * we're not using, reference them somewhere to suppress the unused function warning. */ (void)hashword; (void)hashword2; (void)hashlittle; (void)hashbig; } /** * Calculate the hash for the given key. * Ensures a reasonable semantics for null keys. * Ensures that no object ever hashes to 0, which is the sentinal value for an empty hash element. */ static uint64_t s_hash_for(struct hash_table_state *state, const void *key) { AWS_PRECONDITION(hash_table_state_is_valid(state)); s_suppress_unused_lookup3_func_warnings(); if (key == NULL) { /* The best answer */ return 42; } uint64_t hash_code = state->hash_fn(key); if (!hash_code) { hash_code = 1; } AWS_RETURN_WITH_POSTCONDITION(hash_code, hash_code != 0); } /** * Check equality of two objects, with a reasonable semantics for null. */ static bool s_safe_eq_check(aws_hash_callback_eq_fn *equals_fn, const void *a, const void *b) { /* Short circuit if the pointers are the same */ if (a == b) { return true; } /* If one but not both are null, the objects are not equal */ if (a == NULL || b == NULL) { return false; } /* If both are non-null, call the underlying equals fn */ return equals_fn(a, b); } /** * Check equality of two hash keys, with a reasonable semantics for null keys. */ static bool s_hash_keys_eq(struct hash_table_state *state, const void *a, const void *b) { AWS_PRECONDITION(hash_table_state_is_valid(state)); bool rval = s_safe_eq_check(state->equals_fn, a, b); AWS_RETURN_WITH_POSTCONDITION(rval, hash_table_state_is_valid(state)); } static size_t s_index_for(struct hash_table_state *map, struct hash_table_entry *entry) { AWS_PRECONDITION(hash_table_state_is_valid(map)); size_t index = entry - map->slots; AWS_RETURN_WITH_POSTCONDITION(index, index < map->size && hash_table_state_is_valid(map)); } #if 0 /* Useful debugging code for anyone working on this in the future */ static uint64_t s_distance(struct hash_table_state *state, int index) { return (index - state->slots[index].hash_code) & state->mask; } void hash_dump(struct aws_hash_table *tbl) { struct hash_table_state *state = tbl->p_impl; printf("Dumping hash table contents:\n"); for (int i = 0; i < state->size; i++) { printf("%7d: ", i); struct hash_table_entry *e = &state->slots[i]; if (!e->hash_code) { printf("EMPTY\n"); } else { printf("k: %p v: %p hash_code: %lld displacement: %lld\n", e->element.key, e->element.value, e->hash_code, (i - e->hash_code) & state->mask); } } } #endif #if 0 /* Not currently exposed as an API. Should we have something like this? Useful for benchmarks */ AWS_COMMON_API void aws_hash_table_print_stats(struct aws_hash_table *table) { struct hash_table_state *state = table->p_impl; uint64_t total_disp = 0; uint64_t max_disp = 0; printf("\n=== Hash table statistics ===\n"); printf("Table size: %zu/%zu (max load %zu, remaining %zu)\n", state->entry_count, state->size, state->max_load, state->max_load - state->entry_count); printf("Load factor: %02.2lf%% (max %02.2lf%%)\n", 100.0 * ((double)state->entry_count / (double)state->size), state->max_load_factor); for (size_t i = 0; i < state->size; i++) { if (state->slots[i].hash_code) { int displacement = distance(state, i); total_disp += displacement; if (displacement > max_disp) { max_disp = displacement; } } } size_t *disp_counts = calloc(sizeof(*disp_counts), max_disp + 1); for (size_t i = 0; i < state->size; i++) { if (state->slots[i].hash_code) { disp_counts[distance(state, i)]++; } } uint64_t median = 0; uint64_t passed = 0; for (uint64_t i = 0; i <= max_disp && passed < total_disp / 2; i++) { median = i; passed += disp_counts[i]; } printf("Displacement statistics: Avg %02.2lf max %llu median %llu\n", (double)total_disp / (double)state->entry_count, max_disp, median); for (uint64_t i = 0; i <= max_disp; i++) { printf("Displacement %2lld: %zu entries\n", i, disp_counts[i]); } free(disp_counts); printf("\n"); } #endif size_t aws_hash_table_get_entry_count(const struct aws_hash_table *map) { struct hash_table_state *state = map->p_impl; return state->entry_count; } /* Given a header template, allocates space for a hash table of the appropriate * size, and copies the state header into this allocated memory, which is * returned. */ static struct hash_table_state *s_alloc_state(const struct hash_table_state *template) { size_t required_bytes; if (hash_table_state_required_bytes(template->size, &required_bytes)) { return NULL; } /* An empty slot has hashcode 0. So this marks all slots as empty */ struct hash_table_state *state = aws_mem_calloc(template->alloc, 1, required_bytes); if (state == NULL) { return state; } *state = *template; return state; } /* Computes the correct size and max_load based on a requested size. */ static int s_update_template_size(struct hash_table_state *template, size_t expected_elements) { size_t min_size = expected_elements; if (min_size < 2) { min_size = 2; } /* size is always a power of 2 */ size_t size; if (aws_round_up_to_power_of_two(min_size, &size)) { return AWS_OP_ERR; } /* Update the template once we've calculated everything successfully */ template->size = size; template->max_load = (size_t)(template->max_load_factor * (double)template->size); /* Ensure that there is always at least one empty slot in the hash table */ if (template->max_load >= size) { template->max_load = size - 1; } /* Since size is a power of 2: (index & (size - 1)) == (index % size) */ template->mask = size - 1; return AWS_OP_SUCCESS; } int aws_hash_table_init( struct aws_hash_table *map, struct aws_allocator *alloc, size_t size, aws_hash_fn *hash_fn, aws_hash_callback_eq_fn *equals_fn, aws_hash_callback_destroy_fn *destroy_key_fn, aws_hash_callback_destroy_fn *destroy_value_fn) { AWS_PRECONDITION(map != NULL); AWS_PRECONDITION(alloc != NULL); AWS_PRECONDITION(hash_fn != NULL); AWS_PRECONDITION(equals_fn != NULL); struct hash_table_state template; template.hash_fn = hash_fn; template.equals_fn = equals_fn; template.destroy_key_fn = destroy_key_fn; template.destroy_value_fn = destroy_value_fn; template.alloc = alloc; template.entry_count = 0; template.max_load_factor = 0.95; /* TODO - make configurable? */ if (s_update_template_size(&template, size)) { return AWS_OP_ERR; } map->p_impl = s_alloc_state(&template); if (!map->p_impl) { return AWS_OP_ERR; } AWS_SUCCEED_WITH_POSTCONDITION(aws_hash_table_is_valid(map)); } void aws_hash_table_clean_up(struct aws_hash_table *map) { AWS_PRECONDITION(map != NULL); AWS_PRECONDITION( map->p_impl == NULL || aws_hash_table_is_valid(map), "Input aws_hash_table [map] must be valid or hash_table_state pointer [map->p_impl] must be NULL, in case " "aws_hash_table_clean_up was called twice."); struct hash_table_state *state = map->p_impl; /* Ensure that we're idempotent */ if (!state) { return; } aws_hash_table_clear(map); aws_mem_release(map->p_impl->alloc, map->p_impl); map->p_impl = NULL; AWS_POSTCONDITION(map->p_impl == NULL); } void aws_hash_table_swap(struct aws_hash_table *AWS_RESTRICT a, struct aws_hash_table *AWS_RESTRICT b) { AWS_PRECONDITION(a != b); struct aws_hash_table tmp = *a; *a = *b; *b = tmp; } void aws_hash_table_move(struct aws_hash_table *AWS_RESTRICT to, struct aws_hash_table *AWS_RESTRICT from) { AWS_PRECONDITION(to != NULL); AWS_PRECONDITION(from != NULL); AWS_PRECONDITION(to != from); AWS_PRECONDITION(aws_hash_table_is_valid(from)); *to = *from; AWS_ZERO_STRUCT(*from); AWS_POSTCONDITION(aws_hash_table_is_valid(to)); } /* Tries to find where the requested key is or where it should go if put. * Returns AWS_ERROR_SUCCESS if the item existed (leaving it in *entry), * or AWS_ERROR_HASHTBL_ITEM_NOT_FOUND if it did not (putting its destination * in *entry). Note that this does not take care of displacing whatever was in * that entry before. * * probe_idx is set to the probe index of the entry found. */ static int s_find_entry1( struct hash_table_state *state, uint64_t hash_code, const void *key, struct hash_table_entry **p_entry, size_t *p_probe_idx); /* Inlined fast path: Check the first slot, only. */ /* TODO: Force inlining? */ static int inline s_find_entry( struct hash_table_state *state, uint64_t hash_code, const void *key, struct hash_table_entry **p_entry, size_t *p_probe_idx) { struct hash_table_entry *entry = &state->slots[hash_code & state->mask]; if (entry->hash_code == 0) { if (p_probe_idx) { *p_probe_idx = 0; } *p_entry = entry; return AWS_ERROR_HASHTBL_ITEM_NOT_FOUND; } if (entry->hash_code == hash_code && s_hash_keys_eq(state, key, entry->element.key)) { if (p_probe_idx) { *p_probe_idx = 0; } *p_entry = entry; return AWS_OP_SUCCESS; } return s_find_entry1(state, hash_code, key, p_entry, p_probe_idx); } static int s_find_entry1( struct hash_table_state *state, uint64_t hash_code, const void *key, struct hash_table_entry **p_entry, size_t *p_probe_idx) { size_t probe_idx = 1; /* If we find a deleted entry, we record that index and return it as our probe index (i.e. we'll keep searching to * see if it already exists, but if not we'll overwrite the deleted entry). */ int rv; struct hash_table_entry *entry; /* This loop is guaranteed to terminate because entry_probe is bounded above by state->mask (i.e. state->size - 1). * Since probe_idx increments every loop iteration, it will become larger than entry_probe after at most state->size * transitions and the loop will exit (if it hasn't already) */ while (1) { #ifdef CBMC # pragma CPROVER check push # pragma CPROVER check disable "unsigned-overflow" #endif uint64_t index = (hash_code + probe_idx) & state->mask; #ifdef CBMC # pragma CPROVER check pop #endif entry = &state->slots[index]; if (!entry->hash_code) { rv = AWS_ERROR_HASHTBL_ITEM_NOT_FOUND; break; } if (entry->hash_code == hash_code && s_hash_keys_eq(state, key, entry->element.key)) { rv = AWS_ERROR_SUCCESS; break; } #ifdef CBMC # pragma CPROVER check push # pragma CPROVER check disable "unsigned-overflow" #endif uint64_t entry_probe = (index - entry->hash_code) & state->mask; #ifdef CBMC # pragma CPROVER check pop #endif if (entry_probe < probe_idx) { /* We now know that our target entry cannot exist; if it did exist, * it would be at the current location as it has a higher probe * length than the entry we are examining and thus would have * preempted that item */ rv = AWS_ERROR_HASHTBL_ITEM_NOT_FOUND; break; } probe_idx++; } *p_entry = entry; if (p_probe_idx) { *p_probe_idx = probe_idx; } return rv; } int aws_hash_table_find(const struct aws_hash_table *map, const void *key, struct aws_hash_element **p_elem) { AWS_PRECONDITION(aws_hash_table_is_valid(map)); AWS_PRECONDITION(AWS_OBJECT_PTR_IS_WRITABLE(p_elem), "Input aws_hash_element pointer [p_elem] must be writable."); struct hash_table_state *state = map->p_impl; uint64_t hash_code = s_hash_for(state, key); struct hash_table_entry *entry; int rv = s_find_entry(state, hash_code, key, &entry, NULL); if (rv == AWS_ERROR_SUCCESS) { *p_elem = &entry->element; } else { *p_elem = NULL; } AWS_SUCCEED_WITH_POSTCONDITION(aws_hash_table_is_valid(map)); } /** * Attempts to find a home for the given entry. * If the entry was empty (i.e. hash-code of 0), then the function does nothing and returns NULL * Otherwise, it emplaces the item, and returns a pointer to the newly emplaced entry. * This function is only called after the hash-table has been expanded to fit the new element, * so it should never fail. */ static struct hash_table_entry *s_emplace_item( struct hash_table_state *state, struct hash_table_entry entry, size_t probe_idx) { AWS_PRECONDITION(hash_table_state_is_valid(state)); if (entry.hash_code == 0) { AWS_RETURN_WITH_POSTCONDITION(NULL, hash_table_state_is_valid(state)); } struct hash_table_entry *rval = NULL; /* Since a valid hash_table has at least one empty element, this loop will always terminate in at most linear time */ while (entry.hash_code != 0) { #ifdef CBMC # pragma CPROVER check push # pragma CPROVER check disable "unsigned-overflow" #endif size_t index = (size_t)(entry.hash_code + probe_idx) & state->mask; #ifdef CBMC # pragma CPROVER check pop #endif struct hash_table_entry *victim = &state->slots[index]; #ifdef CBMC # pragma CPROVER check push # pragma CPROVER check disable "unsigned-overflow" #endif size_t victim_probe_idx = (size_t)(index - victim->hash_code) & state->mask; #ifdef CBMC # pragma CPROVER check pop #endif if (!victim->hash_code || victim_probe_idx < probe_idx) { /* The first thing we emplace is the entry itself. A pointer to its location becomes the rval */ if (!rval) { rval = victim; } struct hash_table_entry tmp = *victim; *victim = entry; entry = tmp; probe_idx = victim_probe_idx + 1; } else { probe_idx++; } } AWS_RETURN_WITH_POSTCONDITION( rval, hash_table_state_is_valid(state) && rval >= &state->slots[0] && rval < &state->slots[state->size], "Output hash_table_entry pointer [rval] must point in the slots of [state]."); } static int s_expand_table(struct aws_hash_table *map) { struct hash_table_state *old_state = map->p_impl; struct hash_table_state template = *old_state; size_t new_size; if (aws_mul_size_checked(template.size, 2, &new_size)) { return AWS_OP_ERR; } if (s_update_template_size(&template, new_size)) { return AWS_OP_ERR; } struct hash_table_state *new_state = s_alloc_state(&template); if (!new_state) { return AWS_OP_ERR; } for (size_t i = 0; i < old_state->size; i++) { struct hash_table_entry entry = old_state->slots[i]; if (entry.hash_code) { /* We can directly emplace since we know we won't put the same item twice */ s_emplace_item(new_state, entry, 0); } } map->p_impl = new_state; aws_mem_release(new_state->alloc, old_state); return AWS_OP_SUCCESS; } int aws_hash_table_create( struct aws_hash_table *map, const void *key, struct aws_hash_element **p_elem, int *was_created) { struct hash_table_state *state = map->p_impl; uint64_t hash_code = s_hash_for(state, key); struct hash_table_entry *entry; size_t probe_idx; int ignored; if (!was_created) { was_created = &ignored; } int rv = s_find_entry(state, hash_code, key, &entry, &probe_idx); if (rv == AWS_ERROR_SUCCESS) { if (p_elem) { *p_elem = &entry->element; } *was_created = 0; return AWS_OP_SUCCESS; } /* Okay, we need to add an entry. Check the load factor first. */ size_t incr_entry_count; if (aws_add_size_checked(state->entry_count, 1, &incr_entry_count)) { return AWS_OP_ERR; } if (incr_entry_count > state->max_load) { rv = s_expand_table(map); if (rv != AWS_OP_SUCCESS) { /* Any error was already raised in expand_table */ return rv; } state = map->p_impl; /* If we expanded the table, we need to discard the probe index returned from find_entry, * as it's likely that we can find a more desirable slot. If we don't, then later gets will * terminate before reaching our probe index. * n.b. currently we ignore this probe_idx subsequently, but leaving this here so we don't * forget when we optimize later. */ probe_idx = 0; } state->entry_count++; struct hash_table_entry new_entry; new_entry.element.key = key; new_entry.element.value = NULL; new_entry.hash_code = hash_code; entry = s_emplace_item(state, new_entry, probe_idx); if (p_elem) { *p_elem = &entry->element; } *was_created = 1; return AWS_OP_SUCCESS; } AWS_COMMON_API int aws_hash_table_put(struct aws_hash_table *map, const void *key, void *value, int *was_created) { struct aws_hash_element *p_elem; int was_created_fallback; if (!was_created) { was_created = &was_created_fallback; } if (aws_hash_table_create(map, key, &p_elem, was_created)) { return AWS_OP_ERR; } /* * aws_hash_table_create might resize the table, which results in map->p_impl changing. * It is therefore important to wait to read p_impl until after we return. */ struct hash_table_state *state = map->p_impl; if (!*was_created) { if (p_elem->key != key && state->destroy_key_fn) { state->destroy_key_fn((void *)p_elem->key); } if (state->destroy_value_fn) { state->destroy_value_fn((void *)p_elem->value); } } p_elem->key = key; p_elem->value = value; return AWS_OP_SUCCESS; } /* Clears an entry. Does _not_ invoke destructor callbacks. * Returns the last slot touched (note that if we wrap, we'll report an index * lower than the original entry's index) */ static size_t s_remove_entry(struct hash_table_state *state, struct hash_table_entry *entry) { AWS_PRECONDITION(hash_table_state_is_valid(state)); AWS_PRECONDITION(state->entry_count > 0); AWS_PRECONDITION( entry >= &state->slots[0] && entry < &state->slots[state->size], "Input hash_table_entry [entry] pointer must point in the available slots."); state->entry_count--; /* Shift subsequent entries back until we find an entry that belongs at its * current position. This is important to ensure that subsequent searches * don't terminate at the removed element. */ size_t index = s_index_for(state, entry); /* There is always at least one empty slot in the hash table, so this loop always terminates */ while (1) { size_t next_index = (index + 1) & state->mask; /* If we hit an empty slot, stop */ if (!state->slots[next_index].hash_code) { break; } /* If the next slot is at the start of the probe sequence, stop. * We know that nothing with an earlier home slot is after this; * otherwise this index-zero entry would have been evicted from its * home. */ if ((state->slots[next_index].hash_code & state->mask) == next_index) { break; } /* Okay, shift this one back */ state->slots[index] = state->slots[next_index]; index = next_index; } /* Clear the entry we shifted out of */ AWS_ZERO_STRUCT(state->slots[index]); AWS_RETURN_WITH_POSTCONDITION(index, hash_table_state_is_valid(state) && index <= state->size); } int aws_hash_table_remove( struct aws_hash_table *map, const void *key, struct aws_hash_element *p_value, int *was_present) { AWS_PRECONDITION(aws_hash_table_is_valid(map)); AWS_PRECONDITION( p_value == NULL || AWS_OBJECT_PTR_IS_WRITABLE(p_value), "Input pointer [p_value] must be NULL or writable."); AWS_PRECONDITION( was_present == NULL || AWS_OBJECT_PTR_IS_WRITABLE(was_present), "Input pointer [was_present] must be NULL or writable."); struct hash_table_state *state = map->p_impl; uint64_t hash_code = s_hash_for(state, key); struct hash_table_entry *entry; int ignored; if (!was_present) { was_present = &ignored; } int rv = s_find_entry(state, hash_code, key, &entry, NULL); if (rv != AWS_ERROR_SUCCESS) { *was_present = 0; AWS_SUCCEED_WITH_POSTCONDITION(aws_hash_table_is_valid(map)); } *was_present = 1; if (p_value) { *p_value = entry->element; } else { if (state->destroy_key_fn) { state->destroy_key_fn((void *)entry->element.key); } if (state->destroy_value_fn) { state->destroy_value_fn(entry->element.value); } } s_remove_entry(state, entry); AWS_SUCCEED_WITH_POSTCONDITION(aws_hash_table_is_valid(map)); } int aws_hash_table_remove_element(struct aws_hash_table *map, struct aws_hash_element *p_value) { AWS_PRECONDITION(aws_hash_table_is_valid(map)); AWS_PRECONDITION(p_value != NULL); struct hash_table_state *state = map->p_impl; struct hash_table_entry *entry = AWS_CONTAINER_OF(p_value, struct hash_table_entry, element); s_remove_entry(state, entry); AWS_SUCCEED_WITH_POSTCONDITION(aws_hash_table_is_valid(map)); } int aws_hash_table_foreach( struct aws_hash_table *map, int (*callback)(void *context, struct aws_hash_element *pElement), void *context) { for (struct aws_hash_iter iter = aws_hash_iter_begin(map); !aws_hash_iter_done(&iter); aws_hash_iter_next(&iter)) { int rv = callback(context, &iter.element); if (rv & AWS_COMMON_HASH_TABLE_ITER_ERROR) { int error = aws_last_error(); if (error == AWS_ERROR_SUCCESS) { aws_raise_error(AWS_ERROR_UNKNOWN); } return AWS_OP_ERR; } if (rv & AWS_COMMON_HASH_TABLE_ITER_DELETE) { aws_hash_iter_delete(&iter, false); } if (!(rv & AWS_COMMON_HASH_TABLE_ITER_CONTINUE)) { break; } } return AWS_OP_SUCCESS; } bool aws_hash_table_eq( const struct aws_hash_table *a, const struct aws_hash_table *b, aws_hash_callback_eq_fn *value_eq) { AWS_PRECONDITION(aws_hash_table_is_valid(a)); AWS_PRECONDITION(aws_hash_table_is_valid(b)); AWS_PRECONDITION(value_eq != NULL); if (aws_hash_table_get_entry_count(a) != aws_hash_table_get_entry_count(b)) { AWS_RETURN_WITH_POSTCONDITION(false, aws_hash_table_is_valid(a) && aws_hash_table_is_valid(b)); } /* * Now that we have established that the two tables have the same number of * entries, we can simply iterate one and compare against the same key in * the other. */ for (size_t i = 0; i < a->p_impl->size; ++i) { const struct hash_table_entry *const a_entry = &a->p_impl->slots[i]; if (a_entry->hash_code == 0) { continue; } struct aws_hash_element *b_element = NULL; aws_hash_table_find(b, a_entry->element.key, &b_element); if (!b_element) { /* Key is present in A only */ AWS_RETURN_WITH_POSTCONDITION(false, aws_hash_table_is_valid(a) && aws_hash_table_is_valid(b)); } if (!s_safe_eq_check(value_eq, a_entry->element.value, b_element->value)) { AWS_RETURN_WITH_POSTCONDITION(false, aws_hash_table_is_valid(a) && aws_hash_table_is_valid(b)); } } AWS_RETURN_WITH_POSTCONDITION(true, aws_hash_table_is_valid(a) && aws_hash_table_is_valid(b)); } /** * Given an iterator, and a start slot, find the next available filled slot if it exists * Otherwise, return an iter that will return true for aws_hash_iter_done(). * Note that aws_hash_iter_is_valid() need not hold on entry to the function, since * it can be called on a partially constructed iter from aws_hash_iter_begin(). * * Note that calling this on an iterator which is "done" is idempotent: it will return another * iterator which is "done". */ static inline void s_get_next_element(struct aws_hash_iter *iter, size_t start_slot) { AWS_PRECONDITION(iter != NULL); AWS_PRECONDITION(aws_hash_table_is_valid(iter->map)); struct hash_table_state *state = iter->map->p_impl; size_t limit = iter->limit; for (size_t i = start_slot; i < limit; i++) { struct hash_table_entry *entry = &state->slots[i]; if (entry->hash_code) { iter->element = entry->element; iter->slot = i; iter->status = AWS_HASH_ITER_STATUS_READY_FOR_USE; return; } } iter->element.key = NULL; iter->element.value = NULL; iter->slot = iter->limit; iter->status = AWS_HASH_ITER_STATUS_DONE; AWS_POSTCONDITION(aws_hash_iter_is_valid(iter)); } struct aws_hash_iter aws_hash_iter_begin(const struct aws_hash_table *map) { AWS_PRECONDITION(aws_hash_table_is_valid(map)); struct hash_table_state *state = map->p_impl; struct aws_hash_iter iter; AWS_ZERO_STRUCT(iter); iter.map = map; iter.limit = state->size; s_get_next_element(&iter, 0); AWS_RETURN_WITH_POSTCONDITION( iter, aws_hash_iter_is_valid(&iter) && (iter.status == AWS_HASH_ITER_STATUS_DONE || iter.status == AWS_HASH_ITER_STATUS_READY_FOR_USE), "The status of output aws_hash_iter [iter] must either be DONE or READY_FOR_USE."); } bool aws_hash_iter_done(const struct aws_hash_iter *iter) { AWS_PRECONDITION(aws_hash_iter_is_valid(iter)); AWS_PRECONDITION( iter->status == AWS_HASH_ITER_STATUS_DONE || iter->status == AWS_HASH_ITER_STATUS_READY_FOR_USE, "Input aws_hash_iter [iter] must either be done, or ready to use."); /* * SIZE_MAX is a valid (non-terminal) value for iter->slot in the event that * we delete slot 0. See comments in aws_hash_iter_delete. * * As such we must use == rather than >= here. */ bool rval = (iter->slot == iter->limit); AWS_POSTCONDITION( iter->status == AWS_HASH_ITER_STATUS_DONE || iter->status == AWS_HASH_ITER_STATUS_READY_FOR_USE, "The status of output aws_hash_iter [iter] must either be DONE or READY_FOR_USE."); AWS_POSTCONDITION( rval == (iter->status == AWS_HASH_ITER_STATUS_DONE), "Output bool [rval] must be true if and only if the status of [iter] is DONE."); AWS_POSTCONDITION(aws_hash_iter_is_valid(iter)); return rval; } void aws_hash_iter_next(struct aws_hash_iter *iter) { AWS_PRECONDITION(aws_hash_iter_is_valid(iter)); #ifdef CBMC # pragma CPROVER check push # pragma CPROVER check disable "unsigned-overflow" #endif s_get_next_element(iter, iter->slot + 1); #ifdef CBMC # pragma CPROVER check pop #endif AWS_POSTCONDITION( iter->status == AWS_HASH_ITER_STATUS_DONE || iter->status == AWS_HASH_ITER_STATUS_READY_FOR_USE, "The status of output aws_hash_iter [iter] must either be DONE or READY_FOR_USE."); AWS_POSTCONDITION(aws_hash_iter_is_valid(iter)); } void aws_hash_iter_delete(struct aws_hash_iter *iter, bool destroy_contents) { AWS_PRECONDITION( iter->status == AWS_HASH_ITER_STATUS_READY_FOR_USE, "Input aws_hash_iter [iter] must be ready for use."); AWS_PRECONDITION(aws_hash_iter_is_valid(iter)); AWS_PRECONDITION( iter->map->p_impl->entry_count > 0, "The hash_table_state pointed by input [iter] must contain at least one entry."); struct hash_table_state *state = iter->map->p_impl; if (destroy_contents) { if (state->destroy_key_fn) { state->destroy_key_fn((void *)iter->element.key); } if (state->destroy_value_fn) { state->destroy_value_fn(iter->element.value); } } size_t last_index = s_remove_entry(state, &state->slots[iter->slot]); /* If we shifted elements that are not part of the window we intend to iterate * over, it means we shifted an element that we already visited into the * iter->limit - 1 position. To avoid double iteration, we'll now reduce the * limit to compensate. * * Note that last_index cannot equal iter->slot, because slots[iter->slot] * is empty before we start walking the table. */ if (last_index < iter->slot || last_index >= iter->limit) { iter->limit--; } /* * After removing this entry, the next entry might be in the same slot, or * in some later slot, or we might have no further entries. * * We also expect that the caller will call aws_hash_iter_done and aws_hash_iter_next * after this delete call. This gets a bit tricky if we just deleted the value * in slot 0, and a new value has shifted in. * * To deal with this, we'll just step back one slot, and let _next start iteration * at our current slot. Note that if we just deleted slot 0, this will result in * underflowing to SIZE_MAX; we have to take care in aws_hash_iter_done to avoid * treating this as an end-of-iteration condition. */ #ifdef CBMC # pragma CPROVER check push # pragma CPROVER check disable "unsigned-overflow" #endif iter->slot--; #ifdef CBMC # pragma CPROVER check pop #endif iter->status = AWS_HASH_ITER_STATUS_DELETE_CALLED; AWS_POSTCONDITION( iter->status == AWS_HASH_ITER_STATUS_DELETE_CALLED, "The status of output aws_hash_iter [iter] must be DELETE_CALLED."); AWS_POSTCONDITION(aws_hash_iter_is_valid(iter)); } void aws_hash_table_clear(struct aws_hash_table *map) { AWS_PRECONDITION(aws_hash_table_is_valid(map)); struct hash_table_state *state = map->p_impl; /* Check that we have at least one destructor before iterating over the table */ if (state->destroy_key_fn || state->destroy_value_fn) { for (size_t i = 0; i < state->size; ++i) { struct hash_table_entry *entry = &state->slots[i]; if (!entry->hash_code) { continue; } if (state->destroy_key_fn) { state->destroy_key_fn((void *)entry->element.key); } if (state->destroy_value_fn) { state->destroy_value_fn(entry->element.value); } } } /* Since hash code 0 represents an empty slot we can just zero out the * entire table. */ memset(state->slots, 0, sizeof(*state->slots) * state->size); state->entry_count = 0; AWS_POSTCONDITION(aws_hash_table_is_valid(map)); } uint64_t aws_hash_c_string(const void *item) { AWS_PRECONDITION(aws_c_string_is_valid(item)); const char *str = item; /* first digits of pi in hex */ uint32_t b = 0x3243F6A8, c = 0x885A308D; hashlittle2(str, strlen(str), &c, &b); return ((uint64_t)b << 32) | c; } uint64_t aws_hash_string(const void *item) { AWS_PRECONDITION(aws_string_is_valid(item)); const struct aws_string *str = item; /* first digits of pi in hex */ uint32_t b = 0x3243F6A8, c = 0x885A308D; hashlittle2(aws_string_bytes(str), str->len, &c, &b); AWS_RETURN_WITH_POSTCONDITION(((uint64_t)b << 32) | c, aws_string_is_valid(str)); } uint64_t aws_hash_byte_cursor_ptr(const void *item) { AWS_PRECONDITION(aws_byte_cursor_is_valid(item)); const struct aws_byte_cursor *cur = item; /* first digits of pi in hex */ uint32_t b = 0x3243F6A8, c = 0x885A308D; hashlittle2(cur->ptr, cur->len, &c, &b); AWS_RETURN_WITH_POSTCONDITION(((uint64_t)b << 32) | c, aws_byte_cursor_is_valid(cur)); /* NOLINT */ } uint64_t aws_hash_ptr(const void *item) { /* Since the numeric value of the pointer is considered, not the memory behind it, 0 is an acceptable value */ /* first digits of e in hex * 2.b7e 1516 28ae d2a6 */ uint32_t b = 0x2b7e1516, c = 0x28aed2a6; hashlittle2(&item, sizeof(item), &c, &b); return ((uint64_t)b << 32) | c; } uint64_t aws_hash_combine(uint64_t item1, uint64_t item2) { uint32_t b = item2 & 0xFFFFFFFF; /* LSB */ uint32_t c = item2 >> 32; /* MSB */ hashlittle2(&item1, sizeof(item1), &c, &b); return ((uint64_t)b << 32) | c; } bool aws_hash_callback_c_str_eq(const void *a, const void *b) { AWS_PRECONDITION(aws_c_string_is_valid(a)); AWS_PRECONDITION(aws_c_string_is_valid(b)); bool rval = !strcmp(a, b); AWS_RETURN_WITH_POSTCONDITION(rval, aws_c_string_is_valid(a) && aws_c_string_is_valid(b)); } bool aws_hash_callback_string_eq(const void *a, const void *b) { AWS_PRECONDITION(aws_string_is_valid(a)); AWS_PRECONDITION(aws_string_is_valid(b)); bool rval = aws_string_eq(a, b); AWS_RETURN_WITH_POSTCONDITION(rval, aws_string_is_valid(a) && aws_string_is_valid(b)); } void aws_hash_callback_string_destroy(void *a) { AWS_PRECONDITION(aws_string_is_valid(a)); aws_string_destroy(a); } bool aws_ptr_eq(const void *a, const void *b) { return a == b; } /** * Best-effort check of hash_table_state data-structure invariants * Some invariants, such as that the number of entries is actually the * same as the entry_count field, would require a loop to check */ bool aws_hash_table_is_valid(const struct aws_hash_table *map) { return map && map->p_impl && hash_table_state_is_valid(map->p_impl); } /** * Best-effort check of hash_table_state data-structure invariants * Some invariants, such as that the number of entries is actually the * same as the entry_count field, would require a loop to check */ bool hash_table_state_is_valid(const struct hash_table_state *map) { if (!map) { return false; } bool hash_fn_nonnull = (map->hash_fn != NULL); bool equals_fn_nonnull = (map->equals_fn != NULL); /*destroy_key_fn and destroy_value_fn are both allowed to be NULL*/ bool alloc_nonnull = (map->alloc != NULL); bool size_at_least_two = (map->size >= 2); bool size_is_power_of_two = aws_is_power_of_two(map->size); bool entry_count = (map->entry_count <= map->max_load); bool max_load = (map->max_load < map->size); bool mask_is_correct = (map->mask == (map->size - 1)); bool max_load_factor_bounded = map->max_load_factor == 0.95; //(map->max_load_factor < 1.0); bool slots_allocated = AWS_MEM_IS_WRITABLE(&map->slots[0], sizeof(map->slots[0]) * map->size); return hash_fn_nonnull && equals_fn_nonnull && alloc_nonnull && size_at_least_two && size_is_power_of_two && entry_count && max_load && mask_is_correct && max_load_factor_bounded && slots_allocated; } /** * Given a pointer to a hash_iter, checks that it is well-formed, with all data-structure invariants. */ bool aws_hash_iter_is_valid(const struct aws_hash_iter *iter) { if (!iter) { return false; } if (!iter->map) { return false; } if (!aws_hash_table_is_valid(iter->map)) { return false; } if (iter->limit > iter->map->p_impl->size) { return false; } switch (iter->status) { case AWS_HASH_ITER_STATUS_DONE: /* Done iff slot == limit */ return iter->slot == iter->limit; case AWS_HASH_ITER_STATUS_DELETE_CALLED: /* iter->slot can underflow to SIZE_MAX after a delete * see the comments for aws_hash_iter_delete() */ return iter->slot <= iter->limit || iter->slot == SIZE_MAX; case AWS_HASH_ITER_STATUS_READY_FOR_USE: /* A slot must point to a valid location (i.e. hash_code != 0) */ return iter->slot < iter->limit && iter->map->p_impl->slots[iter->slot].hash_code != 0; } /* Invalid status code */ return false; } /** * Determine the total number of bytes needed for a hash-table with * "size" slots. If the result would overflow a size_t, return * AWS_OP_ERR; otherwise, return AWS_OP_SUCCESS with the result in * "required_bytes". */ int hash_table_state_required_bytes(size_t size, size_t *required_bytes) { size_t elemsize; if (aws_mul_size_checked(size, sizeof(struct hash_table_entry), &elemsize)) { return AWS_OP_ERR; } if (aws_add_size_checked(elemsize, sizeof(struct hash_table_state), required_bytes)) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/json.c000066400000000000000000000322341456575232400231410ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include "external/cJSON.h" static struct aws_allocator *s_aws_json_module_allocator = NULL; static bool s_aws_json_module_initialized = false; struct aws_json_value *aws_json_value_new_string(struct aws_allocator *allocator, struct aws_byte_cursor string) { struct aws_string *tmp = aws_string_new_from_cursor(allocator, &string); void *ret_val = cJSON_CreateString(aws_string_c_str(tmp)); aws_string_destroy_secure(tmp); return ret_val; } struct aws_json_value *aws_json_value_new_number(struct aws_allocator *allocator, double number) { (void)allocator; // prevent warnings over unused parameter return (void *)cJSON_CreateNumber(number); } struct aws_json_value *aws_json_value_new_array(struct aws_allocator *allocator) { (void)allocator; // prevent warnings over unused parameter return (void *)cJSON_CreateArray(); } struct aws_json_value *aws_json_value_new_boolean(struct aws_allocator *allocator, bool boolean) { (void)allocator; // prevent warnings over unused parameter return (void *)cJSON_CreateBool(boolean); } struct aws_json_value *aws_json_value_new_null(struct aws_allocator *allocator) { (void)allocator; // prevent warnings over unused parameter return (void *)cJSON_CreateNull(); } struct aws_json_value *aws_json_value_new_object(struct aws_allocator *allocator) { (void)allocator; // prevent warnings over unused parameter return (void *)cJSON_CreateObject(); } int aws_json_value_get_string(const struct aws_json_value *value, struct aws_byte_cursor *output) { const struct cJSON *cjson = (const struct cJSON *)value; if (!cJSON_IsString(cjson)) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } *output = aws_byte_cursor_from_c_str(cJSON_GetStringValue(cjson)); return AWS_OP_SUCCESS; } int aws_json_value_get_number(const struct aws_json_value *value, double *output) { const struct cJSON *cjson = (const struct cJSON *)value; if (!cJSON_IsNumber(cjson)) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } *output = cjson->valuedouble; return AWS_OP_SUCCESS; } int aws_json_value_get_boolean(const struct aws_json_value *value, bool *output) { const struct cJSON *cjson = (const struct cJSON *)value; if (!cJSON_IsBool(cjson)) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } *output = cjson->type == cJSON_True; return AWS_OP_SUCCESS; } int aws_json_value_add_to_object( struct aws_json_value *object, struct aws_byte_cursor key, struct aws_json_value *value) { int result = AWS_OP_ERR; struct aws_string *tmp = aws_string_new_from_cursor(s_aws_json_module_allocator, &key); struct cJSON *cjson = (struct cJSON *)object; if (!cJSON_IsObject(cjson)) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); goto done; } struct cJSON *cjson_value = (struct cJSON *)value; if (cJSON_IsInvalid(cjson_value)) { result = aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); goto done; } if (cJSON_HasObjectItem(cjson, aws_string_c_str(tmp))) { goto done; } cJSON_AddItemToObject(cjson, aws_string_c_str(tmp), cjson_value); result = AWS_OP_SUCCESS; done: aws_string_destroy_secure(tmp); return result; } struct aws_json_value *aws_json_value_get_from_object(const struct aws_json_value *object, struct aws_byte_cursor key) { void *return_value = NULL; struct aws_string *tmp = aws_string_new_from_cursor(s_aws_json_module_allocator, &key); const struct cJSON *cjson = (const struct cJSON *)object; if (!cJSON_IsObject(cjson)) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); goto done; } if (!cJSON_HasObjectItem(cjson, aws_string_c_str(tmp))) { goto done; } return_value = (void *)cJSON_GetObjectItem(cjson, aws_string_c_str(tmp)); done: aws_string_destroy_secure(tmp); return return_value; } bool aws_json_value_has_key(const struct aws_json_value *object, struct aws_byte_cursor key) { struct aws_string *tmp = aws_string_new_from_cursor(s_aws_json_module_allocator, &key); bool result = false; const struct cJSON *cjson = (const struct cJSON *)object; if (!cJSON_IsObject(cjson)) { goto done; } if (!cJSON_HasObjectItem(cjson, aws_string_c_str(tmp))) { goto done; } result = true; done: aws_string_destroy_secure(tmp); return result; } int aws_json_value_remove_from_object(struct aws_json_value *object, struct aws_byte_cursor key) { int result = AWS_OP_ERR; struct aws_string *tmp = aws_string_new_from_cursor(s_aws_json_module_allocator, &key); struct cJSON *cjson = (struct cJSON *)object; if (!cJSON_IsObject(cjson)) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); goto done; } if (!cJSON_HasObjectItem(cjson, aws_string_c_str(tmp))) { goto done; } cJSON_DeleteItemFromObject(cjson, aws_string_c_str(tmp)); result = AWS_OP_SUCCESS; done: aws_string_destroy_secure(tmp); return result; } int aws_json_const_iterate_object( const struct aws_json_value *object, aws_json_on_member_encountered_const_fn *on_member, void *user_data) { int result = AWS_OP_ERR; const struct cJSON *cjson = (const struct cJSON *)object; if (!cJSON_IsObject(cjson)) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); goto done; } const cJSON *key = NULL; cJSON_ArrayForEach(key, cjson) { bool should_continue = true; struct aws_byte_cursor key_cur = aws_byte_cursor_from_c_str(key->string); if (on_member(&key_cur, (const struct aws_json_value *)key, &should_continue, user_data)) { goto done; } if (!should_continue) { break; } } result = AWS_OP_SUCCESS; done: return result; } int aws_json_value_add_array_element(struct aws_json_value *array, const struct aws_json_value *value) { struct cJSON *cjson = (struct cJSON *)array; if (!cJSON_IsArray(cjson)) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } struct cJSON *cjson_value = (struct cJSON *)value; if (cJSON_IsInvalid(cjson_value)) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } cJSON_AddItemToArray(cjson, cjson_value); return AWS_OP_SUCCESS; } struct aws_json_value *aws_json_get_array_element(const struct aws_json_value *array, size_t index) { const struct cJSON *cjson = (const struct cJSON *)array; if (!cJSON_IsArray(cjson)) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } if (index > (size_t)cJSON_GetArraySize(cjson)) { aws_raise_error(AWS_ERROR_INVALID_INDEX); return NULL; } return (void *)cJSON_GetArrayItem(cjson, (int)index); } size_t aws_json_get_array_size(const struct aws_json_value *array) { const struct cJSON *cjson = (const struct cJSON *)array; if (!cJSON_IsArray(cjson)) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return 0; } return cJSON_GetArraySize(cjson); } int aws_json_value_remove_array_element(struct aws_json_value *array, size_t index) { struct cJSON *cjson = (struct cJSON *)array; if (!cJSON_IsArray(cjson)) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } if (index > (size_t)cJSON_GetArraySize(cjson)) { return aws_raise_error(AWS_ERROR_INVALID_INDEX); } cJSON_DeleteItemFromArray(cjson, (int)index); return AWS_OP_SUCCESS; } int aws_json_const_iterate_array( const struct aws_json_value *array, aws_json_on_value_encountered_const_fn *on_value, void *user_data) { int result = AWS_OP_ERR; const struct cJSON *cjson = (const struct cJSON *)array; if (!cJSON_IsArray(cjson)) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); goto done; } size_t idx = 0; const cJSON *value = NULL; cJSON_ArrayForEach(value, cjson) { bool should_continue = true; if (on_value(idx, (const struct aws_json_value *)value, &should_continue, user_data)) { goto done; } if (!should_continue) { break; } ++idx; } result = AWS_OP_SUCCESS; done: return result; } bool aws_json_value_compare(const struct aws_json_value *a, const struct aws_json_value *b, bool is_case_sensitive) { const struct cJSON *cjson_a = (const struct cJSON *)a; const struct cJSON *cjson_b = (const struct cJSON *)b; return cJSON_Compare(cjson_a, cjson_b, is_case_sensitive); } struct aws_json_value *aws_json_value_duplicate(const struct aws_json_value *value) { const struct cJSON *cjson = (const struct cJSON *)value; if (cJSON_IsInvalid(cjson)) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } struct cJSON *ret = cJSON_Duplicate(cjson, true); if (ret == NULL) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } return (void *)ret; } bool aws_json_value_is_string(const struct aws_json_value *value) { const struct cJSON *cjson = (const struct cJSON *)value; if (cJSON_IsInvalid(cjson)) { return false; } return cJSON_IsString(cjson); } bool aws_json_value_is_number(const struct aws_json_value *value) { const struct cJSON *cjson = (const struct cJSON *)value; if (cJSON_IsInvalid(cjson)) { return false; } return cJSON_IsNumber(cjson); } bool aws_json_value_is_array(const struct aws_json_value *value) { const struct cJSON *cjson = (const struct cJSON *)value; if (cJSON_IsInvalid(cjson)) { return false; } return cJSON_IsArray(cjson); } bool aws_json_value_is_boolean(const struct aws_json_value *value) { const struct cJSON *cjson = (const struct cJSON *)value; if (cJSON_IsInvalid(cjson)) { return false; } return cJSON_IsBool(cjson); } bool aws_json_value_is_null(const struct aws_json_value *value) { const struct cJSON *cjson = (const struct cJSON *)value; if (cJSON_IsInvalid(cjson)) { return false; } return cJSON_IsNull(cjson); } bool aws_json_value_is_object(const struct aws_json_value *value) { const struct cJSON *cjson = (const struct cJSON *)value; if (cJSON_IsInvalid(cjson)) { return false; } return cJSON_IsObject(cjson); } static void *s_aws_cJSON_alloc(size_t sz) { return aws_mem_acquire(s_aws_json_module_allocator, sz); } static void s_aws_cJSON_free(void *ptr) { aws_mem_release(s_aws_json_module_allocator, ptr); } void aws_json_module_init(struct aws_allocator *allocator) { if (!s_aws_json_module_initialized) { s_aws_json_module_allocator = allocator; struct cJSON_Hooks allocation_hooks = { .malloc_fn = s_aws_cJSON_alloc, .free_fn = s_aws_cJSON_free, }; cJSON_InitHooks(&allocation_hooks); s_aws_json_module_initialized = true; } } void aws_json_module_cleanup(void) { if (s_aws_json_module_initialized) { s_aws_json_module_allocator = NULL; s_aws_json_module_initialized = false; } } void aws_json_value_destroy(struct aws_json_value *value) { struct cJSON *cjson = (struct cJSON *)value; /* Note: cJSON_IsInvalid returns false for NULL values, so we need explicit check for NULL to skip delete */ if (cjson != NULL && !cJSON_IsInvalid(cjson)) { cJSON_Delete(cjson); } } int aws_byte_buf_append_json_string(const struct aws_json_value *value, struct aws_byte_buf *output) { const struct cJSON *cjson = (const struct cJSON *)value; if (cJSON_IsInvalid(cjson)) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } char *tmp = cJSON_PrintUnformatted(cjson); if (tmp == NULL) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } // Append the text to the byte buffer struct aws_byte_cursor tmp_cursor = aws_byte_cursor_from_c_str(tmp); int return_val = aws_byte_buf_append_dynamic_secure(output, &tmp_cursor); s_aws_cJSON_free(tmp); // free the char* now that we do not need it return return_val; } int aws_byte_buf_append_json_string_formatted(const struct aws_json_value *value, struct aws_byte_buf *output) { const struct cJSON *cjson = (const struct cJSON *)value; if (cJSON_IsInvalid(cjson)) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } char *tmp = cJSON_Print(cjson); if (tmp == NULL) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } // Append the text to the byte buffer struct aws_byte_cursor tmp_cursor = aws_byte_cursor_from_c_str(tmp); int return_val = aws_byte_buf_append_dynamic_secure(output, &tmp_cursor); s_aws_cJSON_free(tmp); // free the char* now that we do not need it return return_val; } struct aws_json_value *aws_json_value_new_from_string(struct aws_allocator *allocator, struct aws_byte_cursor string) { struct aws_string *tmp = aws_string_new_from_cursor(allocator, &string); struct cJSON *cjson = cJSON_Parse(aws_string_c_str(tmp)); aws_string_destroy_secure(tmp); return (void *)cjson; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/lifo_cache.c000066400000000000000000000046031456575232400242430ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include static int s_lifo_cache_put(struct aws_cache *cache, const void *key, void *p_value); static struct aws_cache_vtable s_lifo_cache_vtable = { .destroy = aws_cache_base_default_destroy, .find = aws_cache_base_default_find, .put = s_lifo_cache_put, .remove = aws_cache_base_default_remove, .clear = aws_cache_base_default_clear, .get_element_count = aws_cache_base_default_get_element_count, }; struct aws_cache *aws_cache_new_lifo( struct aws_allocator *allocator, aws_hash_fn *hash_fn, aws_hash_callback_eq_fn *equals_fn, aws_hash_callback_destroy_fn *destroy_key_fn, aws_hash_callback_destroy_fn *destroy_value_fn, size_t max_items) { AWS_ASSERT(allocator); AWS_ASSERT(max_items); struct aws_cache *lifo_cache = aws_mem_calloc(allocator, 1, sizeof(struct aws_cache)); if (!lifo_cache) { return NULL; } lifo_cache->allocator = allocator; lifo_cache->max_items = max_items; lifo_cache->vtable = &s_lifo_cache_vtable; if (aws_linked_hash_table_init( &lifo_cache->table, allocator, hash_fn, equals_fn, destroy_key_fn, destroy_value_fn, max_items)) { return NULL; } return lifo_cache; } /* lifo cache put implementation */ static int s_lifo_cache_put(struct aws_cache *cache, const void *key, void *p_value) { if (aws_linked_hash_table_put(&cache->table, key, p_value)) { return AWS_OP_ERR; } /* Manage the space if we actually added a new element and the cache is full. */ if (aws_linked_hash_table_get_element_count(&cache->table) > cache->max_items) { /* we're over the cache size limit. Remove whatever is in the one before the back of the linked_hash_table, * which was the latest element before we put the new one */ const struct aws_linked_list *list = aws_linked_hash_table_get_iteration_list(&cache->table); struct aws_linked_list_node *node = aws_linked_list_back(list); if (!node->prev) { return AWS_OP_SUCCESS; } struct aws_linked_hash_table_node *table_node = AWS_CONTAINER_OF(node->prev, struct aws_linked_hash_table_node, node); return aws_linked_hash_table_remove(&cache->table, table_node->key); } return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/linked_hash_table.c000066400000000000000000000124361456575232400256120ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include static void s_element_destroy(void *value) { struct aws_linked_hash_table_node *node = value; if (node->table->user_on_value_destroy) { node->table->user_on_value_destroy(node->value); } aws_linked_list_remove(&node->node); aws_mem_release(node->table->allocator, node); } int aws_linked_hash_table_init( struct aws_linked_hash_table *table, struct aws_allocator *allocator, aws_hash_fn *hash_fn, aws_hash_callback_eq_fn *equals_fn, aws_hash_callback_destroy_fn *destroy_key_fn, aws_hash_callback_destroy_fn *destroy_value_fn, size_t initial_item_count) { AWS_ASSERT(table); AWS_ASSERT(allocator); AWS_ASSERT(hash_fn); AWS_ASSERT(equals_fn); table->allocator = allocator; table->user_on_value_destroy = destroy_value_fn; table->user_on_key_destroy = destroy_key_fn; aws_linked_list_init(&table->list); return aws_hash_table_init( &table->table, allocator, initial_item_count, hash_fn, equals_fn, destroy_key_fn, s_element_destroy); } void aws_linked_hash_table_clean_up(struct aws_linked_hash_table *table) { /* clearing the table will remove all elements. That will also deallocate * any table entries we currently have. */ aws_hash_table_clean_up(&table->table); AWS_ZERO_STRUCT(*table); } int aws_linked_hash_table_find(struct aws_linked_hash_table *table, const void *key, void **p_value) { struct aws_hash_element *element = NULL; int err_val = aws_hash_table_find(&table->table, key, &element); if (err_val || !element) { *p_value = NULL; return err_val; } struct aws_linked_hash_table_node *linked_node = element->value; *p_value = linked_node->value; return AWS_OP_SUCCESS; } int aws_linked_hash_table_find_and_move_to_back(struct aws_linked_hash_table *table, const void *key, void **p_value) { struct aws_hash_element *element = NULL; int err_val = aws_hash_table_find(&table->table, key, &element); if (err_val || !element) { *p_value = NULL; return err_val; } struct aws_linked_hash_table_node *linked_node = element->value; *p_value = linked_node->value; /* on access, remove from current place in list and move it to the back. */ aws_linked_hash_table_move_node_to_end_of_list(table, linked_node); return AWS_OP_SUCCESS; } int aws_linked_hash_table_put(struct aws_linked_hash_table *table, const void *key, void *p_value) { struct aws_linked_hash_table_node *node = aws_mem_calloc(table->allocator, 1, sizeof(struct aws_linked_hash_table_node)); if (!node) { return AWS_OP_ERR; } struct aws_hash_element *element = NULL; int was_added = 0; int err_val = aws_hash_table_create(&table->table, key, &element, &was_added); if (err_val) { aws_mem_release(table->allocator, node); return err_val; } if (element->value) { AWS_ASSERT(!was_added); /* * There's an existing element with a key that is "equal" to the submitted key. We need to destroy that * existing element's value if applicable. */ s_element_destroy(element->value); /* * We're reusing an old element. The keys might be different references but "equal" via comparison. In that * case we need to destroy the key (if appropriate) and point the element to the new key. This underhanded * mutation of the element is safe with respect to the hash table because the keys are "equal." */ if (table->user_on_key_destroy && element->key != key) { table->user_on_key_destroy((void *)element->key); } /* * Potentially a NOOP, but under certain circumstances (when the key and value are a part of the same structure * and we're overwriting the existing entry, for example), this is necessary. Equality via function does not * imply equal pointers. */ element->key = key; } node->value = p_value; node->key = key; node->table = table; element->value = node; aws_linked_list_push_back(&table->list, &node->node); return AWS_OP_SUCCESS; } int aws_linked_hash_table_remove(struct aws_linked_hash_table *table, const void *key) { /* allocated table memory and the linked list entry will be removed in the * callback. */ return aws_hash_table_remove(&table->table, key, NULL, NULL); } void aws_linked_hash_table_clear(struct aws_linked_hash_table *table) { /* clearing the table will remove all elements. That will also deallocate * any entries we currently have. */ aws_hash_table_clear(&table->table); } size_t aws_linked_hash_table_get_element_count(const struct aws_linked_hash_table *table) { return aws_hash_table_get_entry_count(&table->table); } void aws_linked_hash_table_move_node_to_end_of_list( struct aws_linked_hash_table *table, struct aws_linked_hash_table_node *node) { aws_linked_list_remove(&node->node); aws_linked_list_push_back(&table->list, &node->node); } const struct aws_linked_list *aws_linked_hash_table_get_iteration_list(const struct aws_linked_hash_table *table) { return &table->list; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/linux/000077500000000000000000000000001456575232400231575ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/linux/system_info.c000066400000000000000000000022111456575232400256560ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include int aws_system_environment_load_platform_impl(struct aws_system_environment *env) { /* provide size_hint when reading "special files", since some platforms mis-report these files' size as 4KB */ aws_byte_buf_init_from_file_with_size_hint( &env->virtualization_vendor, env->allocator, "/sys/devices/virtual/dmi/id/sys_vendor", 32 /*size_hint*/); /* whether this one works depends on if this is a sysfs filesystem. If it fails, it will just be empty * and these APIs are a best effort at the moment. We can add fallbacks as the loaders get more complicated. */ aws_byte_buf_init_from_file_with_size_hint( &env->product_name, env->allocator, "/sys/devices/virtual/dmi/id/product_name", 32 /*size_hint*/); return AWS_OP_SUCCESS; } void aws_system_environment_destroy_platform_impl(struct aws_system_environment *env) { aws_byte_buf_clean_up(&env->virtualization_vendor); aws_byte_buf_clean_up(&env->product_name); } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/log_channel.c000066400000000000000000000206041456575232400244370ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include /* * Basic channel implementations - synchronized foreground, synchronized background */ struct aws_log_foreground_channel { struct aws_mutex sync; }; static int s_foreground_channel_send(struct aws_log_channel *channel, struct aws_string *log_line) { struct aws_log_foreground_channel *impl = (struct aws_log_foreground_channel *)channel->impl; AWS_ASSERT(channel->writer->vtable->write); aws_mutex_lock(&impl->sync); (channel->writer->vtable->write)(channel->writer, log_line); aws_mutex_unlock(&impl->sync); /* * send is considered a transfer of ownership. write is not a transfer of ownership. * So it's always the channel's responsibility to clean up all log lines that enter * it as soon as they are no longer needed. */ aws_string_destroy(log_line); return AWS_OP_SUCCESS; } static void s_foreground_channel_clean_up(struct aws_log_channel *channel) { struct aws_log_foreground_channel *impl = (struct aws_log_foreground_channel *)channel->impl; aws_mutex_clean_up(&impl->sync); aws_mem_release(channel->allocator, impl); } static struct aws_log_channel_vtable s_foreground_channel_vtable = { .send = s_foreground_channel_send, .clean_up = s_foreground_channel_clean_up, }; int aws_log_channel_init_foreground( struct aws_log_channel *channel, struct aws_allocator *allocator, struct aws_log_writer *writer) { struct aws_log_foreground_channel *impl = aws_mem_calloc(allocator, 1, sizeof(struct aws_log_foreground_channel)); if (impl == NULL) { return AWS_OP_ERR; } if (aws_mutex_init(&impl->sync)) { aws_mem_release(allocator, impl); return AWS_OP_ERR; } channel->vtable = &s_foreground_channel_vtable; channel->allocator = allocator; channel->writer = writer; channel->impl = impl; return AWS_OP_SUCCESS; } struct aws_log_background_channel { struct aws_mutex sync; struct aws_thread background_thread; struct aws_array_list pending_log_lines; struct aws_condition_variable pending_line_signal; bool finished; }; static int s_background_channel_send(struct aws_log_channel *channel, struct aws_string *log_line) { struct aws_log_background_channel *impl = (struct aws_log_background_channel *)channel->impl; aws_mutex_lock(&impl->sync); aws_array_list_push_back(&impl->pending_log_lines, &log_line); aws_condition_variable_notify_one(&impl->pending_line_signal); aws_mutex_unlock(&impl->sync); return AWS_OP_SUCCESS; } static void s_background_channel_clean_up(struct aws_log_channel *channel) { struct aws_log_background_channel *impl = (struct aws_log_background_channel *)channel->impl; aws_mutex_lock(&impl->sync); impl->finished = true; aws_condition_variable_notify_one(&impl->pending_line_signal); aws_mutex_unlock(&impl->sync); aws_thread_join(&impl->background_thread); aws_thread_clean_up(&impl->background_thread); aws_condition_variable_clean_up(&impl->pending_line_signal); aws_array_list_clean_up(&impl->pending_log_lines); aws_mutex_clean_up(&impl->sync); aws_mem_release(channel->allocator, impl); } static struct aws_log_channel_vtable s_background_channel_vtable = { .send = s_background_channel_send, .clean_up = s_background_channel_clean_up, }; static bool s_background_wait(void *context) { struct aws_log_background_channel *impl = (struct aws_log_background_channel *)context; /* * Condition variable predicates are checked under mutex protection */ return impl->finished || aws_array_list_length(&impl->pending_log_lines) > 0; } /** * This is where the background thread spends 99.999% of its time. * We broke this out into its own function so that the stacktrace clearly shows * what this thread is doing. We've had a lot of cases where users think this * thread is deadlocked because it's stuck here. We want it to be clear * that it's doing nothing on purpose. It's waiting for log messages... */ AWS_NO_INLINE static void aws_background_logger_listen_for_messages(struct aws_log_background_channel *impl) { aws_condition_variable_wait_pred(&impl->pending_line_signal, &impl->sync, s_background_wait, impl); } static void aws_background_logger_thread(void *thread_data) { (void)thread_data; struct aws_log_channel *channel = (struct aws_log_channel *)thread_data; AWS_ASSERT(channel->writer->vtable->write); struct aws_log_background_channel *impl = (struct aws_log_background_channel *)channel->impl; struct aws_array_list log_lines; AWS_FATAL_ASSERT(aws_array_list_init_dynamic(&log_lines, channel->allocator, 10, sizeof(struct aws_string *)) == 0); while (true) { aws_mutex_lock(&impl->sync); aws_background_logger_listen_for_messages(impl); size_t line_count = aws_array_list_length(&impl->pending_log_lines); bool finished = impl->finished; if (line_count == 0) { aws_mutex_unlock(&impl->sync); if (finished) { break; } continue; } aws_array_list_swap_contents(&impl->pending_log_lines, &log_lines); aws_mutex_unlock(&impl->sync); /* * Consider copying these into a page-sized stack buffer (string) and then making the write calls * against it rather than the individual strings. Might be a savings when > 1 lines (cut down on * write calls). */ for (size_t i = 0; i < line_count; ++i) { struct aws_string *log_line = NULL; AWS_FATAL_ASSERT(aws_array_list_get_at(&log_lines, &log_line, i) == AWS_OP_SUCCESS); (channel->writer->vtable->write)(channel->writer, log_line); /* * send is considered a transfer of ownership. write is not a transfer of ownership. * So it's always the channel's responsibility to clean up all log lines that enter * it as soon as they are no longer needed. */ aws_string_destroy(log_line); } aws_array_list_clear(&log_lines); } aws_array_list_clean_up(&log_lines); } int aws_log_channel_init_background( struct aws_log_channel *channel, struct aws_allocator *allocator, struct aws_log_writer *writer) { struct aws_log_background_channel *impl = aws_mem_calloc(allocator, 1, sizeof(struct aws_log_background_channel)); if (impl == NULL) { return AWS_OP_ERR; } impl->finished = false; if (aws_mutex_init(&impl->sync)) { goto clean_up_sync_init_fail; } if (aws_array_list_init_dynamic(&impl->pending_log_lines, allocator, 10, sizeof(struct aws_string *))) { goto clean_up_pending_log_lines_init_fail; } if (aws_condition_variable_init(&impl->pending_line_signal)) { goto clean_up_pending_line_signal_init_fail; } if (aws_thread_init(&impl->background_thread, allocator)) { goto clean_up_background_thread_init_fail; } channel->vtable = &s_background_channel_vtable; channel->allocator = allocator; channel->impl = impl; channel->writer = writer; /* * Logging thread should need very little stack, but let's defer this to later */ struct aws_thread_options thread_options = *aws_default_thread_options(); thread_options.name = aws_byte_cursor_from_c_str("AwsLogger"); /* 15 characters is max for Linux */ if (aws_thread_launch(&impl->background_thread, aws_background_logger_thread, channel, &thread_options) == AWS_OP_SUCCESS) { return AWS_OP_SUCCESS; } aws_thread_clean_up(&impl->background_thread); clean_up_background_thread_init_fail: aws_condition_variable_clean_up(&impl->pending_line_signal); clean_up_pending_line_signal_init_fail: aws_array_list_clean_up(&impl->pending_log_lines); clean_up_pending_log_lines_init_fail: aws_mutex_clean_up(&impl->sync); clean_up_sync_init_fail: aws_mem_release(allocator, impl); return AWS_OP_ERR; } void aws_log_channel_clean_up(struct aws_log_channel *channel) { AWS_ASSERT(channel->vtable->clean_up); (channel->vtable->clean_up)(channel); } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/log_formatter.c000066400000000000000000000231241456575232400250320ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include /* * Default formatter implementation */ #ifdef _MSC_VER # pragma warning(disable : 4204) /* non-constant aggregate initializer */ #endif enum { /* (max) strlen of "[]" */ LOG_LEVEL_PREFIX_PADDING = 7, /* (max) strlen of "[]" */ THREAD_ID_PREFIX_PADDING = 22, /* strlen of (user-content separator) " - " + "\n" + spaces between prefix fields + brackets around timestamp + 1 + subject_name padding */ MISC_PADDING = 15, }; #define MAX_LOG_LINE_PREFIX_SIZE \ (LOG_LEVEL_PREFIX_PADDING + THREAD_ID_PREFIX_PADDING + MISC_PADDING + AWS_DATE_TIME_STR_MAX_LEN) static size_t s_advance_and_clamp_index(size_t current_index, int amount, size_t maximum) { size_t next_index = current_index + amount; if (next_index > maximum) { next_index = maximum; } return next_index; } /* Thread-local string representation of current thread id */ AWS_THREAD_LOCAL struct { bool is_valid; char repr[AWS_THREAD_ID_T_REPR_BUFSZ]; } tl_logging_thread_id = {.is_valid = false}; int aws_format_standard_log_line(struct aws_logging_standard_formatting_data *formatting_data, va_list args) { size_t current_index = 0; /* * Begin the log line with "[] [" */ const char *level_string = NULL; if (aws_log_level_to_string(formatting_data->level, &level_string)) { return AWS_OP_ERR; } if (formatting_data->total_length == 0) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } /* * Use this length for all but the last write, so we guarantee room for the newline even if we get truncated */ size_t fake_total_length = formatting_data->total_length - 1; int log_level_length = snprintf(formatting_data->log_line_buffer, fake_total_length, "[%s] [", level_string); if (log_level_length < 0) { return AWS_OP_ERR; } current_index = s_advance_and_clamp_index(current_index, log_level_length, fake_total_length); if (current_index < fake_total_length) { /* * Add the timestamp. To avoid copies and allocations, do some byte buffer tomfoolery. * * First, make a byte_buf that points to the current position in the output string */ struct aws_byte_buf timestamp_buffer = { .allocator = formatting_data->allocator, .buffer = (uint8_t *)formatting_data->log_line_buffer + current_index, .capacity = fake_total_length - current_index, .len = 0, }; /* * Output the current time to the byte_buf */ struct aws_date_time current_time; aws_date_time_init_now(¤t_time); int result = aws_date_time_to_utc_time_str(¤t_time, formatting_data->date_format, ×tamp_buffer); if (result != AWS_OP_SUCCESS) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } current_index = s_advance_and_clamp_index(current_index, (int)timestamp_buffer.len, fake_total_length); } if (current_index < fake_total_length) { /* * Add thread id and user content separator (" - ") */ if (!tl_logging_thread_id.is_valid) { aws_thread_id_t current_thread_id = aws_thread_current_thread_id(); if (aws_thread_id_t_to_string(current_thread_id, tl_logging_thread_id.repr, AWS_THREAD_ID_T_REPR_BUFSZ)) { return AWS_OP_ERR; } tl_logging_thread_id.is_valid = true; } int thread_id_written = snprintf( formatting_data->log_line_buffer + current_index, fake_total_length - current_index, "] [%s] ", tl_logging_thread_id.repr); if (thread_id_written < 0) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } current_index = s_advance_and_clamp_index(current_index, thread_id_written, fake_total_length); } if (current_index < fake_total_length) { /* output subject name */ if (formatting_data->subject_name) { int subject_written = snprintf( formatting_data->log_line_buffer + current_index, fake_total_length - current_index, "[%s]", formatting_data->subject_name); if (subject_written < 0) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } current_index = s_advance_and_clamp_index(current_index, subject_written, fake_total_length); } } if (current_index < fake_total_length) { int separator_written = snprintf(formatting_data->log_line_buffer + current_index, fake_total_length - current_index, " - "); current_index = s_advance_and_clamp_index(current_index, separator_written, fake_total_length); } if (current_index < fake_total_length) { /* * Now write the actual data requested by the user */ #ifdef _WIN32 int written_count = vsnprintf_s( formatting_data->log_line_buffer + current_index, fake_total_length - current_index, _TRUNCATE, formatting_data->format, args); #else int written_count = vsnprintf( formatting_data->log_line_buffer + current_index, fake_total_length - current_index, formatting_data->format, args); #endif /* _WIN32 */ if (written_count < 0) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } current_index = s_advance_and_clamp_index(current_index, written_count, fake_total_length); } /* * End with a newline. */ int newline_written_count = snprintf(formatting_data->log_line_buffer + current_index, formatting_data->total_length - current_index, "\n"); if (newline_written_count < 0) { return aws_raise_error(AWS_ERROR_UNKNOWN); /* we saved space, so this would be crazy */ } formatting_data->amount_written = current_index + newline_written_count; return AWS_OP_SUCCESS; } struct aws_default_log_formatter_impl { enum aws_date_format date_format; }; static int s_default_aws_log_formatter_format( struct aws_log_formatter *formatter, struct aws_string **formatted_output, enum aws_log_level level, aws_log_subject_t subject, const char *format, va_list args) { (void)subject; struct aws_default_log_formatter_impl *impl = formatter->impl; if (formatted_output == NULL) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } /* * Calculate how much room we'll need to build the full log line. * You cannot consume a va_list twice, so we have to copy it. */ va_list tmp_args; va_copy(tmp_args, args); #ifdef _WIN32 int required_length = _vscprintf(format, tmp_args) + 1; #else int required_length = vsnprintf(NULL, 0, format, tmp_args) + 1; #endif va_end(tmp_args); /* * Allocate enough room to hold the line. Then we'll (unsafely) do formatted IO directly into the aws_string * memory. */ const char *subject_name = aws_log_subject_name(subject); int subject_name_len = 0; if (subject_name) { subject_name_len = (int)strlen(subject_name); } int total_length = required_length + MAX_LOG_LINE_PREFIX_SIZE + subject_name_len; struct aws_string *raw_string = aws_mem_calloc(formatter->allocator, 1, sizeof(struct aws_string) + total_length); if (raw_string == NULL) { goto error_clean_up; } struct aws_logging_standard_formatting_data format_data = { .log_line_buffer = (char *)raw_string->bytes, .total_length = total_length, .level = level, .subject_name = subject_name, .format = format, .date_format = impl->date_format, .allocator = formatter->allocator, .amount_written = 0, }; if (aws_format_standard_log_line(&format_data, args)) { goto error_clean_up; } *(struct aws_allocator **)(&raw_string->allocator) = formatter->allocator; *(size_t *)(&raw_string->len) = format_data.amount_written; *formatted_output = raw_string; return AWS_OP_SUCCESS; error_clean_up: if (raw_string != NULL) { aws_mem_release(formatter->allocator, raw_string); } return AWS_OP_ERR; } static void s_default_aws_log_formatter_clean_up(struct aws_log_formatter *formatter) { aws_mem_release(formatter->allocator, formatter->impl); } static struct aws_log_formatter_vtable s_default_log_formatter_vtable = { .format = s_default_aws_log_formatter_format, .clean_up = s_default_aws_log_formatter_clean_up, }; int aws_log_formatter_init_default( struct aws_log_formatter *formatter, struct aws_allocator *allocator, struct aws_log_formatter_standard_options *options) { struct aws_default_log_formatter_impl *impl = aws_mem_calloc(allocator, 1, sizeof(struct aws_default_log_formatter_impl)); impl->date_format = options->date_format; formatter->vtable = &s_default_log_formatter_vtable; formatter->allocator = allocator; formatter->impl = impl; return AWS_OP_SUCCESS; } void aws_log_formatter_clean_up(struct aws_log_formatter *formatter) { AWS_ASSERT(formatter->vtable->clean_up); (formatter->vtable->clean_up)(formatter); } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/log_writer.c000066400000000000000000000064671456575232400243560ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include /* * Basic log writer implementations - stdout, stderr, arbitrary file */ struct aws_file_writer; struct aws_file_writer { FILE *log_file; bool close_file_on_cleanup; }; static int s_aws_file_writer_write(struct aws_log_writer *writer, const struct aws_string *output) { struct aws_file_writer *impl = (struct aws_file_writer *)writer->impl; size_t length = output->len; if (fwrite(output->bytes, 1, length, impl->log_file) < length) { int errno_value = ferror(impl->log_file) ? errno : 0; /* Always cache errno before potential side-effect */ return aws_translate_and_raise_io_error_or(errno_value, AWS_ERROR_FILE_WRITE_FAILURE); } return AWS_OP_SUCCESS; } static void s_aws_file_writer_clean_up(struct aws_log_writer *writer) { struct aws_file_writer *impl = (struct aws_file_writer *)writer->impl; if (impl->close_file_on_cleanup) { fclose(impl->log_file); } aws_mem_release(writer->allocator, impl); } static struct aws_log_writer_vtable s_aws_file_writer_vtable = { .write = s_aws_file_writer_write, .clean_up = s_aws_file_writer_clean_up, }; /* * Shared internal init implementation */ static int s_aws_file_writer_init_internal( struct aws_log_writer *writer, struct aws_allocator *allocator, const char *file_name_to_open, FILE *currently_open_file) { /* One or the other should be set */ if (!((file_name_to_open != NULL) ^ (currently_open_file != NULL))) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } /* Allocate and initialize the file writer */ struct aws_file_writer *impl = aws_mem_calloc(allocator, 1, sizeof(struct aws_file_writer)); if (impl == NULL) { return AWS_OP_ERR; } impl->log_file = NULL; impl->close_file_on_cleanup = false; /* Open file if name passed in */ if (file_name_to_open != NULL) { impl->log_file = aws_fopen(file_name_to_open, "a+"); if (impl->log_file == NULL) { aws_mem_release(allocator, impl); return AWS_OP_ERR; } impl->close_file_on_cleanup = true; } else { impl->log_file = currently_open_file; } writer->vtable = &s_aws_file_writer_vtable; writer->allocator = allocator; writer->impl = impl; return AWS_OP_SUCCESS; } /* * Public initialization interface */ int aws_log_writer_init_stdout(struct aws_log_writer *writer, struct aws_allocator *allocator) { return s_aws_file_writer_init_internal(writer, allocator, NULL, stdout); } int aws_log_writer_init_stderr(struct aws_log_writer *writer, struct aws_allocator *allocator) { return s_aws_file_writer_init_internal(writer, allocator, NULL, stderr); } int aws_log_writer_init_file( struct aws_log_writer *writer, struct aws_allocator *allocator, struct aws_log_writer_file_options *options) { return s_aws_file_writer_init_internal(writer, allocator, options->filename, options->file); } void aws_log_writer_clean_up(struct aws_log_writer *writer) { AWS_ASSERT(writer->vtable->clean_up); (writer->vtable->clean_up)(writer); } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/logging.c000066400000000000000000000430641456575232400236210ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #ifdef _MSC_VER # pragma warning(disable : 4204) /* non-constant aggregate initializer */ #endif /* * Null logger implementation */ static enum aws_log_level s_null_logger_get_log_level(struct aws_logger *logger, aws_log_subject_t subject) { (void)logger; (void)subject; return AWS_LL_NONE; } static int s_null_logger_log( struct aws_logger *logger, enum aws_log_level log_level, aws_log_subject_t subject, const char *format, ...) { (void)logger; (void)log_level; (void)subject; (void)format; return AWS_OP_SUCCESS; } static void s_null_logger_clean_up(struct aws_logger *logger) { (void)logger; } static struct aws_logger_vtable s_null_vtable = { .get_log_level = s_null_logger_get_log_level, .log = s_null_logger_log, .clean_up = s_null_logger_clean_up, }; static struct aws_logger s_null_logger = { .vtable = &s_null_vtable, .allocator = NULL, .p_impl = NULL, }; /* * Pipeline logger implementation */ static void s_aws_logger_pipeline_owned_clean_up(struct aws_logger *logger) { struct aws_logger_pipeline *impl = logger->p_impl; AWS_ASSERT(impl->channel->vtable->clean_up != NULL); (impl->channel->vtable->clean_up)(impl->channel); AWS_ASSERT(impl->formatter->vtable->clean_up != NULL); (impl->formatter->vtable->clean_up)(impl->formatter); AWS_ASSERT(impl->writer->vtable->clean_up != NULL); (impl->writer->vtable->clean_up)(impl->writer); aws_mem_release(impl->allocator, impl->channel); aws_mem_release(impl->allocator, impl->formatter); aws_mem_release(impl->allocator, impl->writer); aws_mem_release(impl->allocator, impl); } /* * Pipeline logger implementation */ static int s_aws_logger_pipeline_log( struct aws_logger *logger, enum aws_log_level log_level, aws_log_subject_t subject, const char *format, ...) { va_list format_args; va_start(format_args, format); struct aws_logger_pipeline *impl = logger->p_impl; struct aws_string *output = NULL; AWS_ASSERT(impl->formatter->vtable->format != NULL); int result = (impl->formatter->vtable->format)(impl->formatter, &output, log_level, subject, format, format_args); va_end(format_args); if (result != AWS_OP_SUCCESS || output == NULL) { return AWS_OP_ERR; } AWS_ASSERT(impl->channel->vtable->send != NULL); if ((impl->channel->vtable->send)(impl->channel, output)) { /* * failure to send implies failure to transfer ownership */ aws_string_destroy(output); return AWS_OP_ERR; } return AWS_OP_SUCCESS; } static enum aws_log_level s_aws_logger_pipeline_get_log_level(struct aws_logger *logger, aws_log_subject_t subject) { (void)subject; struct aws_logger_pipeline *impl = logger->p_impl; return (enum aws_log_level)aws_atomic_load_int(&impl->level); } static int s_aws_logger_pipeline_set_log_level(struct aws_logger *logger, enum aws_log_level level) { struct aws_logger_pipeline *impl = logger->p_impl; aws_atomic_store_int(&impl->level, (size_t)level); return AWS_OP_SUCCESS; } struct aws_logger_vtable g_pipeline_logger_owned_vtable = { .get_log_level = s_aws_logger_pipeline_get_log_level, .log = s_aws_logger_pipeline_log, .clean_up = s_aws_logger_pipeline_owned_clean_up, .set_log_level = s_aws_logger_pipeline_set_log_level, }; int aws_logger_init_standard( struct aws_logger *logger, struct aws_allocator *allocator, struct aws_logger_standard_options *options) { #ifdef ANDROID (void)options; extern int aws_logger_init_logcat( struct aws_logger *, struct aws_allocator *, struct aws_logger_standard_options *); return aws_logger_init_logcat(logger, allocator, options); #endif struct aws_logger_pipeline *impl = aws_mem_calloc(allocator, 1, sizeof(struct aws_logger_pipeline)); if (impl == NULL) { return AWS_OP_ERR; } struct aws_log_writer *writer = aws_mem_acquire(allocator, sizeof(struct aws_log_writer)); if (writer == NULL) { goto on_allocate_writer_failure; } struct aws_log_writer_file_options file_writer_options = { .filename = options->filename, .file = options->file, }; if (aws_log_writer_init_file(writer, allocator, &file_writer_options)) { goto on_init_writer_failure; } struct aws_log_formatter *formatter = aws_mem_acquire(allocator, sizeof(struct aws_log_formatter)); if (formatter == NULL) { goto on_allocate_formatter_failure; } struct aws_log_formatter_standard_options formatter_options = {.date_format = AWS_DATE_FORMAT_ISO_8601}; if (aws_log_formatter_init_default(formatter, allocator, &formatter_options)) { goto on_init_formatter_failure; } struct aws_log_channel *channel = aws_mem_acquire(allocator, sizeof(struct aws_log_channel)); if (channel == NULL) { goto on_allocate_channel_failure; } if (aws_log_channel_init_background(channel, allocator, writer) == AWS_OP_SUCCESS) { impl->formatter = formatter; impl->channel = channel; impl->writer = writer; impl->allocator = allocator; aws_atomic_store_int(&impl->level, (size_t)options->level); logger->vtable = &g_pipeline_logger_owned_vtable; logger->allocator = allocator; logger->p_impl = impl; return AWS_OP_SUCCESS; } aws_mem_release(allocator, channel); on_allocate_channel_failure: aws_log_formatter_clean_up(formatter); on_init_formatter_failure: aws_mem_release(allocator, formatter); on_allocate_formatter_failure: aws_log_writer_clean_up(writer); on_init_writer_failure: aws_mem_release(allocator, writer); on_allocate_writer_failure: aws_mem_release(allocator, impl); return AWS_OP_ERR; } /* * Pipeline logger implementation where all the components are externally owned. No clean up * is done on the components. Useful for tests where components are on the stack and often mocked. */ static void s_aws_pipeline_logger_unowned_clean_up(struct aws_logger *logger) { struct aws_logger_pipeline *impl = (struct aws_logger_pipeline *)logger->p_impl; aws_mem_release(impl->allocator, impl); } static struct aws_logger_vtable s_pipeline_logger_unowned_vtable = { .get_log_level = s_aws_logger_pipeline_get_log_level, .log = s_aws_logger_pipeline_log, .clean_up = s_aws_pipeline_logger_unowned_clean_up, .set_log_level = s_aws_logger_pipeline_set_log_level, }; int aws_logger_init_from_external( struct aws_logger *logger, struct aws_allocator *allocator, struct aws_log_formatter *formatter, struct aws_log_channel *channel, struct aws_log_writer *writer, enum aws_log_level level) { struct aws_logger_pipeline *impl = aws_mem_acquire(allocator, sizeof(struct aws_logger_pipeline)); if (impl == NULL) { return AWS_OP_ERR; } impl->formatter = formatter; impl->channel = channel; impl->writer = writer; impl->allocator = allocator; aws_atomic_store_int(&impl->level, (size_t)level); logger->vtable = &s_pipeline_logger_unowned_vtable; logger->allocator = allocator; logger->p_impl = impl; return AWS_OP_SUCCESS; } /* * Global API */ static struct aws_logger *s_root_logger_ptr = &s_null_logger; void aws_logger_set(struct aws_logger *logger) { if (logger != NULL) { s_root_logger_ptr = logger; } else { s_root_logger_ptr = &s_null_logger; } } struct aws_logger *aws_logger_get(void) { return s_root_logger_ptr; } struct aws_logger *aws_logger_get_conditional(aws_log_subject_t subject, enum aws_log_level level) { if (s_root_logger_ptr == NULL) { return NULL; } if (s_root_logger_ptr->vtable->get_log_level(s_root_logger_ptr, subject) < level) { return NULL; } return s_root_logger_ptr; } void aws_logger_clean_up(struct aws_logger *logger) { AWS_ASSERT(logger->vtable->clean_up != NULL); logger->vtable->clean_up(logger); } static const char *s_log_level_strings[AWS_LL_COUNT] = {"NONE", "FATAL", "ERROR", "WARN", "INFO", "DEBUG", "TRACE"}; int aws_log_level_to_string(enum aws_log_level log_level, const char **level_string) { AWS_ERROR_PRECONDITION(log_level < AWS_LL_COUNT); if (level_string != NULL) { *level_string = s_log_level_strings[log_level]; } return AWS_OP_SUCCESS; } int aws_string_to_log_level(const char *level_string, enum aws_log_level *log_level) { if (level_string != NULL && log_level != NULL) { size_t level_length = strlen(level_string); for (int i = 0; i < AWS_LL_COUNT; ++i) { if (aws_array_eq_c_str_ignore_case(level_string, level_length, s_log_level_strings[i])) { *log_level = i; return AWS_OP_SUCCESS; } } } aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return AWS_OP_ERR; } int aws_thread_id_t_to_string(aws_thread_id_t thread_id, char *buffer, size_t bufsz) { AWS_ERROR_PRECONDITION(AWS_THREAD_ID_T_REPR_BUFSZ == bufsz); AWS_ERROR_PRECONDITION(buffer && AWS_MEM_IS_WRITABLE(buffer, bufsz)); size_t current_index = 0; unsigned char *bytes = (unsigned char *)&thread_id; for (size_t i = sizeof(aws_thread_id_t); i != 0; --i) { unsigned char c = bytes[i - 1]; int written = snprintf(buffer + current_index, bufsz - current_index, "%02x", c); if (written < 0) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } current_index += written; if (bufsz <= current_index) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } } return AWS_OP_SUCCESS; } #define AWS_LOG_SUBJECT_SPACE_MASK (AWS_LOG_SUBJECT_STRIDE - 1) static const uint32_t S_MAX_LOG_SUBJECT = AWS_LOG_SUBJECT_STRIDE * AWS_PACKAGE_SLOTS - 1; static const struct aws_log_subject_info_list *volatile s_log_subject_slots[AWS_PACKAGE_SLOTS] = {0}; static const struct aws_log_subject_info *s_get_log_subject_info_by_id(aws_log_subject_t subject) { if (subject > S_MAX_LOG_SUBJECT) { return NULL; } uint32_t slot_index = subject >> AWS_LOG_SUBJECT_STRIDE_BITS; uint32_t subject_index = subject & AWS_LOG_SUBJECT_SPACE_MASK; const struct aws_log_subject_info_list *subject_slot = s_log_subject_slots[slot_index]; if (!subject_slot || subject_index >= subject_slot->count) { return NULL; } return &subject_slot->subject_list[subject_index]; } const char *aws_log_subject_name(aws_log_subject_t subject) { const struct aws_log_subject_info *subject_info = s_get_log_subject_info_by_id(subject); if (subject_info != NULL) { return subject_info->subject_name; } return "Unknown"; } void aws_register_log_subject_info_list(struct aws_log_subject_info_list *log_subject_list) { /* * We're not so worried about these asserts being removed in an NDEBUG build * - we'll either segfault immediately (for the first two) or for the count * assert, the registration will be ineffective. */ AWS_FATAL_ASSERT(log_subject_list); AWS_FATAL_ASSERT(log_subject_list->subject_list); AWS_FATAL_ASSERT(log_subject_list->count); const uint32_t min_range = log_subject_list->subject_list[0].subject_id; const uint32_t slot_index = min_range >> AWS_LOG_SUBJECT_STRIDE_BITS; #if DEBUG_BUILD for (uint32_t i = 0; i < log_subject_list->count; ++i) { const struct aws_log_subject_info *info = &log_subject_list->subject_list[i]; uint32_t expected_id = min_range + i; if (expected_id != info->subject_id) { fprintf(stderr, "\"%s\" is at wrong index in aws_log_subject_info[]\n", info->subject_name); AWS_FATAL_ASSERT(0); } } #endif /* DEBUG_BUILD */ if (slot_index >= AWS_PACKAGE_SLOTS) { /* This is an NDEBUG build apparently. Kill the process rather than * corrupting heap. */ fprintf(stderr, "Bad log subject slot index 0x%016x\n", slot_index); abort(); } s_log_subject_slots[slot_index] = log_subject_list; } void aws_unregister_log_subject_info_list(struct aws_log_subject_info_list *log_subject_list) { /* * We're not so worried about these asserts being removed in an NDEBUG build * - we'll either segfault immediately (for the first two) or for the count * assert, the registration will be ineffective. */ AWS_FATAL_ASSERT(log_subject_list); AWS_FATAL_ASSERT(log_subject_list->subject_list); AWS_FATAL_ASSERT(log_subject_list->count); const uint32_t min_range = log_subject_list->subject_list[0].subject_id; const uint32_t slot_index = min_range >> AWS_LOG_SUBJECT_STRIDE_BITS; if (slot_index >= AWS_PACKAGE_SLOTS) { /* This is an NDEBUG build apparently. Kill the process rather than * corrupting heap. */ fprintf(stderr, "Bad log subject slot index 0x%016x\n", slot_index); AWS_FATAL_ASSERT(false); } s_log_subject_slots[slot_index] = NULL; } /* * no alloc implementation */ struct aws_logger_noalloc { struct aws_atomic_var level; FILE *file; bool should_close; struct aws_mutex lock; }; static enum aws_log_level s_noalloc_stderr_logger_get_log_level(struct aws_logger *logger, aws_log_subject_t subject) { (void)subject; struct aws_logger_noalloc *impl = logger->p_impl; return (enum aws_log_level)aws_atomic_load_int(&impl->level); } enum { MAXIMUM_NO_ALLOC_LOG_LINE_SIZE = 8192 }; static int s_noalloc_stderr_logger_log( struct aws_logger *logger, enum aws_log_level log_level, aws_log_subject_t subject, const char *format, ...) { char format_buffer[MAXIMUM_NO_ALLOC_LOG_LINE_SIZE]; va_list format_args; va_start(format_args, format); #ifdef _MSC_VER # pragma warning(push) # pragma warning(disable : 4221) /* allow struct member to reference format_buffer */ #endif struct aws_logging_standard_formatting_data format_data = { .log_line_buffer = format_buffer, .total_length = MAXIMUM_NO_ALLOC_LOG_LINE_SIZE, .level = log_level, .subject_name = aws_log_subject_name(subject), .format = format, .date_format = AWS_DATE_FORMAT_ISO_8601, .allocator = logger->allocator, .amount_written = 0, }; #ifdef _MSC_VER # pragma warning(pop) /* disallow struct member to reference local value */ #endif int result = aws_format_standard_log_line(&format_data, format_args); va_end(format_args); if (result == AWS_OP_ERR) { return AWS_OP_ERR; } struct aws_logger_noalloc *impl = logger->p_impl; aws_mutex_lock(&impl->lock); int write_result = AWS_OP_SUCCESS; if (fwrite(format_buffer, 1, format_data.amount_written, impl->file) < format_data.amount_written) { int errno_value = ferror(impl->file) ? errno : 0; /* Always cache errno before potential side-effect */ aws_translate_and_raise_io_error_or(errno_value, AWS_ERROR_FILE_WRITE_FAILURE); write_result = AWS_OP_ERR; } aws_mutex_unlock(&impl->lock); return write_result; } static void s_noalloc_stderr_logger_clean_up(struct aws_logger *logger) { if (logger == NULL) { return; } struct aws_logger_noalloc *impl = logger->p_impl; if (impl->should_close) { fclose(impl->file); } aws_mutex_clean_up(&impl->lock); aws_mem_release(logger->allocator, impl); AWS_ZERO_STRUCT(*logger); } int s_no_alloc_stderr_logger_set_log_level(struct aws_logger *logger, enum aws_log_level level) { struct aws_logger_noalloc *impl = logger->p_impl; aws_atomic_store_int(&impl->level, (size_t)level); return AWS_OP_SUCCESS; } static struct aws_logger_vtable s_noalloc_stderr_vtable = { .get_log_level = s_noalloc_stderr_logger_get_log_level, .log = s_noalloc_stderr_logger_log, .clean_up = s_noalloc_stderr_logger_clean_up, .set_log_level = s_no_alloc_stderr_logger_set_log_level, }; int aws_logger_init_noalloc( struct aws_logger *logger, struct aws_allocator *allocator, struct aws_logger_standard_options *options) { struct aws_logger_noalloc *impl = aws_mem_calloc(allocator, 1, sizeof(struct aws_logger_noalloc)); if (impl == NULL) { return AWS_OP_ERR; } aws_atomic_store_int(&impl->level, (size_t)options->level); if (options->file != NULL) { impl->file = options->file; impl->should_close = false; } else { /* _MSC_VER */ if (options->filename != NULL) { impl->file = aws_fopen(options->filename, "w"); if (!impl->file) { aws_mem_release(allocator, impl); return AWS_OP_ERR; } impl->should_close = true; } else { impl->file = stderr; impl->should_close = false; } } aws_mutex_init(&impl->lock); logger->vtable = &s_noalloc_stderr_vtable; logger->allocator = allocator; logger->p_impl = impl; return AWS_OP_SUCCESS; } int aws_logger_set_log_level(struct aws_logger *logger, enum aws_log_level level) { if (logger == NULL || logger->vtable == NULL) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } if (logger->vtable->set_log_level == NULL) { return aws_raise_error(AWS_ERROR_UNIMPLEMENTED); } return logger->vtable->set_log_level(logger, level); } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/lru_cache.c000066400000000000000000000107641456575232400241210ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include static int s_lru_cache_put(struct aws_cache *cache, const void *key, void *p_value); static int s_lru_cache_find(struct aws_cache *cache, const void *key, void **p_value); static void *s_lru_cache_use_lru_element(struct aws_cache *cache); static void *s_lru_cache_get_mru_element(const struct aws_cache *cache); struct lru_cache_impl_vtable { void *(*use_lru_element)(struct aws_cache *cache); void *(*get_mru_element)(const struct aws_cache *cache); }; static struct aws_cache_vtable s_lru_cache_vtable = { .destroy = aws_cache_base_default_destroy, .find = s_lru_cache_find, .put = s_lru_cache_put, .remove = aws_cache_base_default_remove, .clear = aws_cache_base_default_clear, .get_element_count = aws_cache_base_default_get_element_count, }; struct aws_cache *aws_cache_new_lru( struct aws_allocator *allocator, aws_hash_fn *hash_fn, aws_hash_callback_eq_fn *equals_fn, aws_hash_callback_destroy_fn *destroy_key_fn, aws_hash_callback_destroy_fn *destroy_value_fn, size_t max_items) { AWS_ASSERT(allocator); AWS_ASSERT(max_items); struct aws_cache *lru_cache = NULL; struct lru_cache_impl_vtable *impl = NULL; if (!aws_mem_acquire_many( allocator, 2, &lru_cache, sizeof(struct aws_cache), &impl, sizeof(struct lru_cache_impl_vtable))) { return NULL; } impl->use_lru_element = s_lru_cache_use_lru_element; impl->get_mru_element = s_lru_cache_get_mru_element; lru_cache->allocator = allocator; lru_cache->max_items = max_items; lru_cache->vtable = &s_lru_cache_vtable; lru_cache->impl = impl; if (aws_linked_hash_table_init( &lru_cache->table, allocator, hash_fn, equals_fn, destroy_key_fn, destroy_value_fn, max_items)) { return NULL; } return lru_cache; } /* implementation for lru cache put */ static int s_lru_cache_put(struct aws_cache *cache, const void *key, void *p_value) { if (aws_linked_hash_table_put(&cache->table, key, p_value)) { return AWS_OP_ERR; } /* Manage the space if we actually added a new element and the cache is full. */ if (aws_linked_hash_table_get_element_count(&cache->table) > cache->max_items) { /* we're over the cache size limit. Remove whatever is in the front of * the linked_hash_table, which is the LRU element */ const struct aws_linked_list *list = aws_linked_hash_table_get_iteration_list(&cache->table); struct aws_linked_list_node *node = aws_linked_list_front(list); struct aws_linked_hash_table_node *table_node = AWS_CONTAINER_OF(node, struct aws_linked_hash_table_node, node); return aws_linked_hash_table_remove(&cache->table, table_node->key); } return AWS_OP_SUCCESS; } /* implementation for lru cache find */ static int s_lru_cache_find(struct aws_cache *cache, const void *key, void **p_value) { return (aws_linked_hash_table_find_and_move_to_back(&cache->table, key, p_value)); } static void *s_lru_cache_use_lru_element(struct aws_cache *cache) { const struct aws_linked_list *list = aws_linked_hash_table_get_iteration_list(&cache->table); if (aws_linked_list_empty(list)) { return NULL; } struct aws_linked_list_node *node = aws_linked_list_front(list); struct aws_linked_hash_table_node *lru_node = AWS_CONTAINER_OF(node, struct aws_linked_hash_table_node, node); aws_linked_hash_table_move_node_to_end_of_list(&cache->table, lru_node); return lru_node->value; } static void *s_lru_cache_get_mru_element(const struct aws_cache *cache) { const struct aws_linked_list *list = aws_linked_hash_table_get_iteration_list(&cache->table); if (aws_linked_list_empty(list)) { return NULL; } struct aws_linked_list_node *node = aws_linked_list_back(list); struct aws_linked_hash_table_node *mru_node = AWS_CONTAINER_OF(node, struct aws_linked_hash_table_node, node); return mru_node->value; } void *aws_lru_cache_use_lru_element(struct aws_cache *cache) { AWS_PRECONDITION(cache); AWS_PRECONDITION(cache->impl); struct lru_cache_impl_vtable *impl_vtable = cache->impl; return impl_vtable->use_lru_element(cache); } void *aws_lru_cache_get_mru_element(const struct aws_cache *cache) { AWS_PRECONDITION(cache); AWS_PRECONDITION(cache->impl); struct lru_cache_impl_vtable *impl_vtable = cache->impl; return impl_vtable->get_mru_element(cache); } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/math.c000066400000000000000000000011311456575232400231110ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include AWS_COMMON_API int aws_add_size_checked_varargs(size_t num, size_t *r, ...) { va_list argp; va_start(argp, r); size_t accum = 0; for (size_t i = 0; i < num; ++i) { size_t next = va_arg(argp, size_t); if (aws_add_size_checked(accum, next, &accum) == AWS_OP_ERR) { va_end(argp); return AWS_OP_ERR; } } *r = accum; va_end(argp); return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/memtrace.c000066400000000000000000000525471456575232400237760ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include /* describes a single live allocation. * allocated by aws_default_allocator() */ struct alloc_info { size_t size; uint64_t time; uint64_t stack; /* hash of stack frame pointers */ }; /* Using a flexible array member is the C99 compliant way to have the frames immediately follow the header. * * MSVC doesn't know this for some reason so we need to use a pragma to make * it happy. */ #ifdef _MSC_VER # pragma warning(push) # pragma warning(disable : 4200) /* nonstandard extension used: zero-sized array in struct/union */ #endif /* one of these is stored per unique stack * allocated by aws_default_allocator() */ struct stack_trace { size_t depth; /* length of frames[] */ void *const frames[]; /* rest of frames are allocated after */ }; #ifdef _MSC_VER # pragma warning(pop) #endif /* Tracking structure, used as the allocator impl. * This structure, and all its bookkeeping data structures, are created with the aws_default_allocator(). * This is not customizable because it's too expensive for every little allocation to store * a pointer back to its original allocator. */ struct alloc_tracer { struct aws_allocator *traced_allocator; /* underlying allocator */ enum aws_mem_trace_level level; /* level to trace at */ size_t frames_per_stack; /* how many frames to keep per stack */ struct aws_atomic_var allocated; /* bytes currently allocated */ struct aws_mutex mutex; /* protects everything below */ struct aws_hash_table allocs; /* live allocations, maps address -> alloc_info */ struct aws_hash_table stacks; /* unique stack traces, maps hash -> stack_trace */ }; /* number of frames to skip in call stacks (s_alloc_tracer_track, and the vtable function) */ enum { FRAMES_TO_SKIP = 2 }; static void *s_trace_mem_acquire(struct aws_allocator *allocator, size_t size); static void s_trace_mem_release(struct aws_allocator *allocator, void *ptr); static void *s_trace_mem_realloc(struct aws_allocator *allocator, void *old_ptr, size_t old_size, size_t new_size); static void *s_trace_mem_calloc(struct aws_allocator *allocator, size_t num, size_t size); static struct aws_allocator s_trace_allocator = { .mem_acquire = s_trace_mem_acquire, .mem_release = s_trace_mem_release, .mem_realloc = s_trace_mem_realloc, .mem_calloc = s_trace_mem_calloc, }; /* for the hash table, to destroy elements */ static void s_destroy_alloc(void *data) { struct alloc_info *alloc = data; aws_mem_release(aws_default_allocator(), alloc); } static void s_destroy_stacktrace(void *data) { struct stack_trace *stack = data; aws_mem_release(aws_default_allocator(), stack); } static void s_alloc_tracer_init( struct alloc_tracer *tracer, struct aws_allocator *traced_allocator, enum aws_mem_trace_level level, size_t frames_per_stack) { void *stack[1]; if (!aws_backtrace(stack, 1)) { /* clamp level if tracing isn't available */ level = level > AWS_MEMTRACE_BYTES ? AWS_MEMTRACE_BYTES : level; } tracer->traced_allocator = traced_allocator; tracer->level = level; if (tracer->level >= AWS_MEMTRACE_BYTES) { aws_atomic_init_int(&tracer->allocated, 0); AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_mutex_init(&tracer->mutex)); AWS_FATAL_ASSERT( AWS_OP_SUCCESS == aws_hash_table_init( &tracer->allocs, aws_default_allocator(), 1024, aws_hash_ptr, aws_ptr_eq, NULL, s_destroy_alloc)); } if (tracer->level == AWS_MEMTRACE_STACKS) { if (frames_per_stack > 128) { frames_per_stack = 128; } tracer->frames_per_stack = frames_per_stack ? frames_per_stack : 8; AWS_FATAL_ASSERT( AWS_OP_SUCCESS == aws_hash_table_init( &tracer->stacks, aws_default_allocator(), 1024, aws_hash_ptr, aws_ptr_eq, NULL, s_destroy_stacktrace)); } } static void s_alloc_tracer_track(struct alloc_tracer *tracer, void *ptr, size_t size) { if (tracer->level == AWS_MEMTRACE_NONE) { return; } aws_atomic_fetch_add(&tracer->allocated, size); struct alloc_info *alloc = aws_mem_calloc(aws_default_allocator(), 1, sizeof(struct alloc_info)); AWS_FATAL_ASSERT(alloc); alloc->size = size; aws_high_res_clock_get_ticks(&alloc->time); if (tracer->level == AWS_MEMTRACE_STACKS) { /* capture stack frames, skip 2 for this function and the allocation vtable function */ AWS_VARIABLE_LENGTH_ARRAY(void *, stack_frames, (FRAMES_TO_SKIP + tracer->frames_per_stack)); size_t stack_depth = aws_backtrace(stack_frames, FRAMES_TO_SKIP + tracer->frames_per_stack); if (stack_depth) { /* hash the stack pointers */ struct aws_byte_cursor stack_cursor = aws_byte_cursor_from_array(stack_frames, stack_depth * sizeof(void *)); uint64_t stack_id = aws_hash_byte_cursor_ptr(&stack_cursor); alloc->stack = stack_id; /* associate the stack with the alloc */ aws_mutex_lock(&tracer->mutex); struct aws_hash_element *item = NULL; int was_created = 0; AWS_FATAL_ASSERT( AWS_OP_SUCCESS == aws_hash_table_create(&tracer->stacks, (void *)(uintptr_t)stack_id, &item, &was_created)); /* If this is a new stack, save it to the hash */ if (was_created) { struct stack_trace *stack = aws_mem_calloc( aws_default_allocator(), 1, sizeof(struct stack_trace) + (sizeof(void *) * tracer->frames_per_stack)); AWS_FATAL_ASSERT(stack); memcpy( (void **)&stack->frames[0], &stack_frames[FRAMES_TO_SKIP], (stack_depth - FRAMES_TO_SKIP) * sizeof(void *)); stack->depth = stack_depth - FRAMES_TO_SKIP; item->value = stack; } aws_mutex_unlock(&tracer->mutex); } } aws_mutex_lock(&tracer->mutex); AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_hash_table_put(&tracer->allocs, ptr, alloc, NULL)); aws_mutex_unlock(&tracer->mutex); } static void s_alloc_tracer_untrack(struct alloc_tracer *tracer, void *ptr) { if (tracer->level == AWS_MEMTRACE_NONE) { return; } aws_mutex_lock(&tracer->mutex); struct aws_hash_element *item; AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_hash_table_find(&tracer->allocs, ptr, &item)); /* because the tracer can be installed at any time, it is possible for an allocation to not * be tracked. Therefore, we make sure the find succeeds, but then check the returned * value */ if (item) { AWS_FATAL_ASSERT(item->key == ptr && item->value); struct alloc_info *alloc = item->value; aws_atomic_fetch_sub(&tracer->allocated, alloc->size); s_destroy_alloc(item->value); AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_hash_table_remove_element(&tracer->allocs, item)); } aws_mutex_unlock(&tracer->mutex); } /* used only to resolve stacks -> trace, count, size at dump time */ struct stack_metadata { struct aws_string *trace; size_t count; size_t size; }; static int s_collect_stack_trace(void *context, struct aws_hash_element *item) { struct alloc_tracer *tracer = context; struct aws_hash_table *all_stacks = &tracer->stacks; struct stack_metadata *stack_info = item->value; struct aws_hash_element *stack_item = NULL; AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_hash_table_find(all_stacks, item->key, &stack_item)); AWS_FATAL_ASSERT(stack_item); struct stack_trace *stack = stack_item->value; void *const *stack_frames = &stack->frames[0]; /* convert the frame pointers to symbols, and concat into a buffer */ char buf[4096] = {0}; struct aws_byte_buf stacktrace = aws_byte_buf_from_empty_array(buf, AWS_ARRAY_SIZE(buf)); struct aws_byte_cursor newline = aws_byte_cursor_from_c_str("\n"); char **symbols = aws_backtrace_symbols(stack_frames, stack->depth); for (size_t idx = 0; idx < stack->depth; ++idx) { if (idx > 0) { aws_byte_buf_append(&stacktrace, &newline); } const char *caller = symbols[idx]; if (!caller || !caller[0]) { break; } struct aws_byte_cursor cursor = aws_byte_cursor_from_c_str(caller); aws_byte_buf_append(&stacktrace, &cursor); } aws_mem_release(aws_default_allocator(), symbols); /* record the resultant buffer as a string */ stack_info->trace = aws_string_new_from_array(aws_default_allocator(), stacktrace.buffer, stacktrace.len); AWS_FATAL_ASSERT(stack_info->trace); aws_byte_buf_clean_up(&stacktrace); return AWS_COMMON_HASH_TABLE_ITER_CONTINUE; } static int s_stack_info_compare_size(const void *a, const void *b) { const struct stack_metadata *stack_a = *(const struct stack_metadata **)a; const struct stack_metadata *stack_b = *(const struct stack_metadata **)b; return stack_b->size > stack_a->size; } static int s_stack_info_compare_count(const void *a, const void *b) { const struct stack_metadata *stack_a = *(const struct stack_metadata **)a; const struct stack_metadata *stack_b = *(const struct stack_metadata **)b; return stack_b->count > stack_a->count; } static void s_stack_info_destroy(void *data) { struct stack_metadata *stack = data; struct aws_allocator *allocator = stack->trace->allocator; aws_string_destroy(stack->trace); aws_mem_release(allocator, stack); } /* tally up count/size per stack from all allocs */ static int s_collect_stack_stats(void *context, struct aws_hash_element *item) { struct aws_hash_table *stack_info = context; struct alloc_info *alloc = item->value; struct aws_hash_element *stack_item = NULL; int was_created = 0; AWS_FATAL_ASSERT( AWS_OP_SUCCESS == aws_hash_table_create(stack_info, (void *)(uintptr_t)alloc->stack, &stack_item, &was_created)); if (was_created) { stack_item->value = aws_mem_calloc(aws_default_allocator(), 1, sizeof(struct stack_metadata)); AWS_FATAL_ASSERT(stack_item->value); } struct stack_metadata *stack = stack_item->value; stack->count++; stack->size += alloc->size; return AWS_COMMON_HASH_TABLE_ITER_CONTINUE; } static int s_insert_stacks(void *context, struct aws_hash_element *item) { struct aws_priority_queue *pq = context; struct stack_metadata *stack = item->value; AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_priority_queue_push(pq, &stack)); return AWS_COMMON_HASH_TABLE_ITER_CONTINUE; } static int s_insert_allocs(void *context, struct aws_hash_element *item) { struct aws_priority_queue *allocs = context; struct alloc_info *alloc = item->value; AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_priority_queue_push(allocs, &alloc)); return AWS_COMMON_HASH_TABLE_ITER_CONTINUE; } static int s_alloc_compare(const void *a, const void *b) { const struct alloc_info *alloc_a = *(const struct alloc_info **)a; const struct alloc_info *alloc_b = *(const struct alloc_info **)b; return alloc_a->time > alloc_b->time; } void aws_mem_tracer_dump(struct aws_allocator *trace_allocator) { struct alloc_tracer *tracer = trace_allocator->impl; if (tracer->level == AWS_MEMTRACE_NONE || aws_atomic_load_int(&tracer->allocated) == 0) { return; } aws_mutex_lock(&tracer->mutex); size_t num_allocs = aws_hash_table_get_entry_count(&tracer->allocs); AWS_LOGF_TRACE( AWS_LS_COMMON_MEMTRACE, "################################################################################"); AWS_LOGF_TRACE( AWS_LS_COMMON_MEMTRACE, "# BEGIN MEMTRACE DUMP #"); AWS_LOGF_TRACE( AWS_LS_COMMON_MEMTRACE, "################################################################################"); AWS_LOGF_TRACE( AWS_LS_COMMON_MEMTRACE, "tracer: %zu bytes still allocated in %zu allocations", aws_atomic_load_int(&tracer->allocated), num_allocs); /* convert stacks from pointers -> symbols */ struct aws_hash_table stack_info; AWS_ZERO_STRUCT(stack_info); if (tracer->level == AWS_MEMTRACE_STACKS) { AWS_FATAL_ASSERT( AWS_OP_SUCCESS == aws_hash_table_init( &stack_info, aws_default_allocator(), 64, aws_hash_ptr, aws_ptr_eq, NULL, s_stack_info_destroy)); /* collect active stacks, tally up sizes and counts */ aws_hash_table_foreach(&tracer->allocs, s_collect_stack_stats, &stack_info); /* collect stack traces for active stacks */ aws_hash_table_foreach(&stack_info, s_collect_stack_trace, tracer); } /* sort allocs by time */ struct aws_priority_queue allocs; AWS_FATAL_ASSERT( AWS_OP_SUCCESS == aws_priority_queue_init_dynamic( &allocs, aws_default_allocator(), num_allocs, sizeof(struct alloc_info *), s_alloc_compare)); aws_hash_table_foreach(&tracer->allocs, s_insert_allocs, &allocs); /* dump allocs by time */ AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"); AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "Leaks in order of allocation:"); AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"); while (aws_priority_queue_size(&allocs)) { struct alloc_info *alloc = NULL; aws_priority_queue_pop(&allocs, &alloc); if (alloc->stack) { struct aws_hash_element *item = NULL; AWS_FATAL_ASSERT( AWS_OP_SUCCESS == aws_hash_table_find(&stack_info, (void *)(uintptr_t)alloc->stack, &item)); struct stack_metadata *stack = item->value; AWS_LOGF_TRACE( AWS_LS_COMMON_MEMTRACE, "ALLOC %zu bytes, stacktrace:\n%s\n", alloc->size, aws_string_c_str(stack->trace)); } else { AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "ALLOC %zu bytes", alloc->size); } } aws_priority_queue_clean_up(&allocs); if (tracer->level == AWS_MEMTRACE_STACKS) { size_t num_stacks = aws_hash_table_get_entry_count(&stack_info); /* sort stacks by total size leaked */ struct aws_priority_queue stacks_by_size; AWS_FATAL_ASSERT( AWS_OP_SUCCESS == aws_priority_queue_init_dynamic( &stacks_by_size, aws_default_allocator(), num_stacks, sizeof(struct stack_metadata *), s_stack_info_compare_size)); aws_hash_table_foreach(&stack_info, s_insert_stacks, &stacks_by_size); AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"); AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "Stacks by bytes leaked:"); AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"); while (aws_priority_queue_size(&stacks_by_size) > 0) { struct stack_metadata *stack = NULL; aws_priority_queue_pop(&stacks_by_size, &stack); AWS_LOGF_TRACE( AWS_LS_COMMON_MEMTRACE, "%zu bytes in %zu allocations:\n%s\n", stack->size, stack->count, aws_string_c_str(stack->trace)); } aws_priority_queue_clean_up(&stacks_by_size); /* sort stacks by number of leaks */ struct aws_priority_queue stacks_by_count; AWS_FATAL_ASSERT( AWS_OP_SUCCESS == aws_priority_queue_init_dynamic( &stacks_by_count, aws_default_allocator(), num_stacks, sizeof(struct stack_metadata *), s_stack_info_compare_count)); AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"); AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "Stacks by number of leaks:"); AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"); aws_hash_table_foreach(&stack_info, s_insert_stacks, &stacks_by_count); while (aws_priority_queue_size(&stacks_by_count) > 0) { struct stack_metadata *stack = NULL; aws_priority_queue_pop(&stacks_by_count, &stack); AWS_LOGF_TRACE( AWS_LS_COMMON_MEMTRACE, "%zu allocations leaking %zu bytes:\n%s\n", stack->count, stack->size, aws_string_c_str(stack->trace)); } aws_priority_queue_clean_up(&stacks_by_count); aws_hash_table_clean_up(&stack_info); } AWS_LOGF_TRACE( AWS_LS_COMMON_MEMTRACE, "################################################################################"); AWS_LOGF_TRACE( AWS_LS_COMMON_MEMTRACE, "# END MEMTRACE DUMP #"); AWS_LOGF_TRACE( AWS_LS_COMMON_MEMTRACE, "################################################################################"); aws_mutex_unlock(&tracer->mutex); } static void *s_trace_mem_acquire(struct aws_allocator *allocator, size_t size) { struct alloc_tracer *tracer = allocator->impl; void *ptr = aws_mem_acquire(tracer->traced_allocator, size); if (ptr) { s_alloc_tracer_track(tracer, ptr, size); } return ptr; } static void s_trace_mem_release(struct aws_allocator *allocator, void *ptr) { struct alloc_tracer *tracer = allocator->impl; s_alloc_tracer_untrack(tracer, ptr); aws_mem_release(tracer->traced_allocator, ptr); } static void *s_trace_mem_realloc(struct aws_allocator *allocator, void *old_ptr, size_t old_size, size_t new_size) { struct alloc_tracer *tracer = allocator->impl; void *new_ptr = old_ptr; /* * Careful with the ordering of state clean up here. * Tracer keeps a hash table (alloc ptr as key) of meta info about each allocation. * To avoid race conditions during realloc state update needs to be done in * following order to avoid race conditions: * - remove meta info (other threads cant reuse that key, cause ptr is still valid ) * - realloc (cant fail, ptr might remain the same) * - add meta info for reallocated mem */ s_alloc_tracer_untrack(tracer, old_ptr); aws_mem_realloc(tracer->traced_allocator, &new_ptr, old_size, new_size); s_alloc_tracer_track(tracer, new_ptr, new_size); return new_ptr; } static void *s_trace_mem_calloc(struct aws_allocator *allocator, size_t num, size_t size) { struct alloc_tracer *tracer = allocator->impl; void *ptr = aws_mem_calloc(tracer->traced_allocator, num, size); if (ptr) { s_alloc_tracer_track(tracer, ptr, num * size); } return ptr; } struct aws_allocator *aws_mem_tracer_new( struct aws_allocator *allocator, struct aws_allocator *deprecated, enum aws_mem_trace_level level, size_t frames_per_stack) { /* deprecated customizable bookkeeping allocator */ (void)deprecated; struct alloc_tracer *tracer = NULL; struct aws_allocator *trace_allocator = NULL; aws_mem_acquire_many( aws_default_allocator(), 2, &tracer, sizeof(struct alloc_tracer), &trace_allocator, sizeof(struct aws_allocator)); AWS_FATAL_ASSERT(trace_allocator); AWS_FATAL_ASSERT(tracer); AWS_ZERO_STRUCT(*trace_allocator); AWS_ZERO_STRUCT(*tracer); /* copy the template vtable s*/ *trace_allocator = s_trace_allocator; trace_allocator->impl = tracer; s_alloc_tracer_init(tracer, allocator, level, frames_per_stack); return trace_allocator; } struct aws_allocator *aws_mem_tracer_destroy(struct aws_allocator *trace_allocator) { struct alloc_tracer *tracer = trace_allocator->impl; struct aws_allocator *allocator = tracer->traced_allocator; if (tracer->level != AWS_MEMTRACE_NONE) { aws_mutex_lock(&tracer->mutex); aws_hash_table_clean_up(&tracer->allocs); aws_hash_table_clean_up(&tracer->stacks); aws_mutex_unlock(&tracer->mutex); aws_mutex_clean_up(&tracer->mutex); } aws_mem_release(aws_default_allocator(), tracer); /* trace_allocator is freed as part of the block tracer was allocated in */ return allocator; } size_t aws_mem_tracer_bytes(struct aws_allocator *trace_allocator) { struct alloc_tracer *tracer = trace_allocator->impl; if (tracer->level == AWS_MEMTRACE_NONE) { return 0; } return aws_atomic_load_int(&tracer->allocated); } size_t aws_mem_tracer_count(struct aws_allocator *trace_allocator) { struct alloc_tracer *tracer = trace_allocator->impl; if (tracer->level == AWS_MEMTRACE_NONE) { return 0; } aws_mutex_lock(&tracer->mutex); size_t count = aws_hash_table_get_entry_count(&tracer->allocs); aws_mutex_unlock(&tracer->mutex); return count; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/platform_fallback_stubs/000077500000000000000000000000001456575232400267035ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/platform_fallback_stubs/system_info.c000066400000000000000000000011331456575232400314040ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include int aws_system_environment_load_platform_impl(struct aws_system_environment *env) { (void)env; AWS_LOGF_DEBUG( AWS_LS_COMMON_GENERAL, "id=%p: platform specific environment loading is not implemented for this platform.", (void *)env); return AWS_OP_SUCCESS; } void aws_system_environment_destroy_platform_impl(struct aws_system_environment *env) { (void)env; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/posix/000077500000000000000000000000001456575232400231625ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/posix/clock.c000066400000000000000000000100001456575232400244100ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include static const uint64_t NS_PER_SEC = 1000000000; #if defined(CLOCK_MONOTONIC_RAW) # define HIGH_RES_CLOCK CLOCK_MONOTONIC_RAW #else # define HIGH_RES_CLOCK CLOCK_MONOTONIC #endif /* This entire compilation branch has two goals. First, prior to OSX Sierra, clock_gettime does not exist on OSX, so we * already need to branch on that. Second, even if we compile on a newer OSX, which we will always do for bindings (e.g. * python, dotnet, java etc...), we have to worry about the same lib being loaded on an older version, and thus, we'd * get linker errors at runtime. To avoid this, we do a dynamic load * to keep the function out of linker tables and only use the symbol if the current running process has access to the * function. */ #if defined(__MACH__) # include # include # include # include static int s_legacy_get_time(uint64_t *timestamp) { struct timeval tv; int ret_val = gettimeofday(&tv, NULL); if (ret_val) { return aws_raise_error(AWS_ERROR_CLOCK_FAILURE); } uint64_t secs = (uint64_t)tv.tv_sec; uint64_t u_secs = (uint64_t)tv.tv_usec; *timestamp = (secs * NS_PER_SEC) + (u_secs * 1000); return AWS_OP_SUCCESS; } # if MAC_OS_X_VERSION_MAX_ALLOWED >= 101200 static aws_thread_once s_thread_once_flag = AWS_THREAD_ONCE_STATIC_INIT; static int (*s_gettime_fn)(clockid_t clock_id, struct timespec *tp) = NULL; static void s_do_osx_loads(void *user_data) { (void)user_data; s_gettime_fn = (int (*)(clockid_t clock_id, struct timespec * tp)) dlsym(RTLD_DEFAULT, "clock_gettime"); } int aws_high_res_clock_get_ticks(uint64_t *timestamp) { aws_thread_call_once(&s_thread_once_flag, s_do_osx_loads, NULL); int ret_val = 0; if (s_gettime_fn) { struct timespec ts; ret_val = s_gettime_fn(HIGH_RES_CLOCK, &ts); if (ret_val) { return aws_raise_error(AWS_ERROR_CLOCK_FAILURE); } uint64_t secs = (uint64_t)ts.tv_sec; uint64_t n_secs = (uint64_t)ts.tv_nsec; *timestamp = (secs * NS_PER_SEC) + n_secs; return AWS_OP_SUCCESS; } return s_legacy_get_time(timestamp); } int aws_sys_clock_get_ticks(uint64_t *timestamp) { aws_thread_call_once(&s_thread_once_flag, s_do_osx_loads, NULL); int ret_val = 0; if (s_gettime_fn) { struct timespec ts; ret_val = s_gettime_fn(CLOCK_REALTIME, &ts); if (ret_val) { return aws_raise_error(AWS_ERROR_CLOCK_FAILURE); } uint64_t secs = (uint64_t)ts.tv_sec; uint64_t n_secs = (uint64_t)ts.tv_nsec; *timestamp = (secs * NS_PER_SEC) + n_secs; return AWS_OP_SUCCESS; } return s_legacy_get_time(timestamp); } # else int aws_high_res_clock_get_ticks(uint64_t *timestamp) { return s_legacy_get_time(timestamp); } int aws_sys_clock_get_ticks(uint64_t *timestamp) { return s_legacy_get_time(timestamp); } # endif /* MAC_OS_X_VERSION_MAX_ALLOWED >= 101200 */ /* Everywhere else, just link clock_gettime in directly */ #else int aws_high_res_clock_get_ticks(uint64_t *timestamp) { int ret_val = 0; struct timespec ts; ret_val = clock_gettime(HIGH_RES_CLOCK, &ts); if (ret_val) { return aws_raise_error(AWS_ERROR_CLOCK_FAILURE); } uint64_t secs = (uint64_t)ts.tv_sec; uint64_t n_secs = (uint64_t)ts.tv_nsec; *timestamp = (secs * NS_PER_SEC) + n_secs; return AWS_OP_SUCCESS; } int aws_sys_clock_get_ticks(uint64_t *timestamp) { int ret_val = 0; struct timespec ts; ret_val = clock_gettime(CLOCK_REALTIME, &ts); if (ret_val) { return aws_raise_error(AWS_ERROR_CLOCK_FAILURE); } uint64_t secs = (uint64_t)ts.tv_sec; uint64_t n_secs = (uint64_t)ts.tv_nsec; *timestamp = (secs * NS_PER_SEC) + n_secs; return AWS_OP_SUCCESS; } #endif /* defined(__MACH__) */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/posix/condition_variable.c000066400000000000000000000063031456575232400271630ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include static int process_error_code(int err) { switch (err) { case ENOMEM: return aws_raise_error(AWS_ERROR_OOM); case ETIMEDOUT: return aws_raise_error(AWS_ERROR_COND_VARIABLE_TIMED_OUT); default: return aws_raise_error(AWS_ERROR_COND_VARIABLE_ERROR_UNKNOWN); } } int aws_condition_variable_init(struct aws_condition_variable *condition_variable) { AWS_PRECONDITION(condition_variable); if (pthread_cond_init(&condition_variable->condition_handle, NULL)) { AWS_ZERO_STRUCT(*condition_variable); return aws_raise_error(AWS_ERROR_COND_VARIABLE_INIT_FAILED); } condition_variable->initialized = true; return AWS_OP_SUCCESS; } void aws_condition_variable_clean_up(struct aws_condition_variable *condition_variable) { AWS_PRECONDITION(condition_variable); if (condition_variable->initialized) { pthread_cond_destroy(&condition_variable->condition_handle); } AWS_ZERO_STRUCT(*condition_variable); } int aws_condition_variable_notify_one(struct aws_condition_variable *condition_variable) { AWS_PRECONDITION(condition_variable && condition_variable->initialized); int err_code = pthread_cond_signal(&condition_variable->condition_handle); if (err_code) { return process_error_code(err_code); } return AWS_OP_SUCCESS; } int aws_condition_variable_notify_all(struct aws_condition_variable *condition_variable) { AWS_PRECONDITION(condition_variable && condition_variable->initialized); int err_code = pthread_cond_broadcast(&condition_variable->condition_handle); if (err_code) { return process_error_code(err_code); } return AWS_OP_SUCCESS; } int aws_condition_variable_wait(struct aws_condition_variable *condition_variable, struct aws_mutex *mutex) { AWS_PRECONDITION(condition_variable && condition_variable->initialized); AWS_PRECONDITION(mutex && mutex->initialized); int err_code = pthread_cond_wait(&condition_variable->condition_handle, &mutex->mutex_handle); if (err_code) { return process_error_code(err_code); } return AWS_OP_SUCCESS; } int aws_condition_variable_wait_for( struct aws_condition_variable *condition_variable, struct aws_mutex *mutex, int64_t time_to_wait) { AWS_PRECONDITION(condition_variable && condition_variable->initialized); AWS_PRECONDITION(mutex && mutex->initialized); uint64_t current_sys_time = 0; if (aws_sys_clock_get_ticks(¤t_sys_time)) { return AWS_OP_ERR; } struct timespec ts; uint64_t remainder = 0; ts.tv_sec = (time_t)aws_timestamp_convert( (uint64_t)(time_to_wait + current_sys_time), AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_SECS, &remainder); ts.tv_nsec = (long)remainder; int err_code = pthread_cond_timedwait(&condition_variable->condition_handle, &mutex->mutex_handle, &ts); if (err_code) { return process_error_code(err_code); } return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/posix/cross_process_lock.c000066400000000000000000000122131456575232400272240ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include struct aws_cross_process_lock { struct aws_allocator *allocator; int locked_fd; }; struct aws_cross_process_lock *aws_cross_process_lock_try_acquire( struct aws_allocator *allocator, struct aws_byte_cursor instance_nonce) { /* validate we don't have a directory slash. */ struct aws_byte_cursor to_find = aws_byte_cursor_from_c_str("/"); struct aws_byte_cursor found; AWS_ZERO_STRUCT(found); if (aws_byte_cursor_find_exact(&instance_nonce, &to_find, &found) != AWS_OP_ERR && aws_last_error() != AWS_ERROR_STRING_MATCH_NOT_FOUND) { AWS_LOGF_ERROR( AWS_LS_COMMON_GENERAL, "static: Lock " PRInSTR "creation has illegal character /", AWS_BYTE_CURSOR_PRI(instance_nonce)); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } /* * The unix standard says /tmp has to be there and be writable. However, while it may be tempting to just use the * /tmp/ directory, it often has the sticky bit set which would prevent a subprocess from being able to call open * with create on the file. The solution is simple, just write it to a subdirectory inside * /tmp and override umask via. chmod of 0777. */ struct aws_byte_cursor path_prefix = aws_byte_cursor_from_c_str("/tmp/aws_crt_cross_process_lock/"); struct aws_string *path_to_create = aws_string_new_from_cursor(allocator, &path_prefix); /* It's probably there already and we don't care if it is. */ if (!aws_directory_exists(path_to_create)) { /* if this call fails just let it fail on open below. */ aws_directory_create(path_to_create); /* bypass umask by setting the perms we actually requested */ chmod(aws_string_c_str(path_to_create), S_IRWXU | S_IRWXG | S_IRWXO); } aws_string_destroy(path_to_create); struct aws_byte_cursor path_suffix = aws_byte_cursor_from_c_str(".lock"); struct aws_byte_buf nonce_buf; aws_byte_buf_init_copy_from_cursor(&nonce_buf, allocator, path_prefix); aws_byte_buf_append_dynamic(&nonce_buf, &instance_nonce); aws_byte_buf_append_dynamic(&nonce_buf, &path_suffix); aws_byte_buf_append_null_terminator(&nonce_buf); struct aws_cross_process_lock *instance_lock = NULL; errno = 0; int fd = open((const char *)nonce_buf.buffer, O_CREAT | O_RDWR, 0666); if (fd < 0) { AWS_LOGF_DEBUG( AWS_LS_COMMON_GENERAL, "static: Lock file %s failed to open with errno %d", (const char *)nonce_buf.buffer, errno); aws_translate_and_raise_io_error_or(errno, AWS_ERROR_MUTEX_FAILED); if (aws_last_error() == AWS_ERROR_NO_PERMISSION) { AWS_LOGF_DEBUG( AWS_LS_COMMON_GENERAL, "static: Lock file %s couldn't be opened due to file ownership permissions. Attempting to open as read " "only", (const char *)nonce_buf.buffer); errno = 0; fd = open((const char *)nonce_buf.buffer, O_RDONLY); if (fd < 0) { AWS_LOGF_ERROR( AWS_LS_COMMON_GENERAL, "static: Lock file %s failed to open with read-only permissions with errno %d", (const char *)nonce_buf.buffer, errno); aws_translate_and_raise_io_error_or(errno, AWS_ERROR_MUTEX_FAILED); goto cleanup; } } else { AWS_LOGF_ERROR( AWS_LS_COMMON_GENERAL, "static: Lock file %s failed to open. The lock cannot be acquired.", (const char *)nonce_buf.buffer); goto cleanup; } } if (flock(fd, LOCK_EX | LOCK_NB) == -1) { AWS_LOGF_TRACE( AWS_LS_COMMON_GENERAL, "static: Lock file %s already acquired by another instance", (const char *)nonce_buf.buffer); close(fd); aws_raise_error(AWS_ERROR_MUTEX_CALLER_NOT_OWNER); goto cleanup; } instance_lock = aws_mem_calloc(allocator, 1, sizeof(struct aws_cross_process_lock)); instance_lock->locked_fd = fd; instance_lock->allocator = allocator; AWS_LOGF_TRACE( AWS_LS_COMMON_GENERAL, "static: Lock file %s acquired by this instance with fd %d", (const char *)nonce_buf.buffer, fd); cleanup: aws_byte_buf_clean_up(&nonce_buf); return instance_lock; } void aws_cross_process_lock_release(struct aws_cross_process_lock *instance_lock) { if (instance_lock) { flock(instance_lock->locked_fd, LOCK_UN); close(instance_lock->locked_fd); AWS_LOGF_TRACE(AWS_LS_COMMON_GENERAL, "static: Lock file released for fd %d", instance_lock->locked_fd); aws_mem_release(instance_lock->allocator, instance_lock); } } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/posix/device_random.c000066400000000000000000000041561456575232400261330ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include static int s_rand_fd = -1; static aws_thread_once s_rand_init = AWS_THREAD_ONCE_STATIC_INIT; #ifdef O_CLOEXEC # define OPEN_FLAGS (O_RDONLY | O_CLOEXEC) #else # define OPEN_FLAGS (O_RDONLY) #endif static void s_init_rand(void *user_data) { (void)user_data; s_rand_fd = open("/dev/urandom", OPEN_FLAGS); if (s_rand_fd == -1) { s_rand_fd = open("/dev/urandom", O_RDONLY); if (s_rand_fd == -1) { abort(); } } if (-1 == fcntl(s_rand_fd, F_SETFD, FD_CLOEXEC)) { abort(); } } int aws_device_random_buffer_append(struct aws_byte_buf *output, size_t n) { AWS_PRECONDITION(aws_byte_buf_is_valid(output)); aws_thread_call_once(&s_rand_init, s_init_rand, NULL); size_t space_available = output->capacity - output->len; if (space_available < n) { AWS_POSTCONDITION(aws_byte_buf_is_valid(output)); return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } size_t original_len = output->len; /* read() can fail if N is too large (e.g. x64 macos fails if N > INT32_MAX), * so work in reasonably sized chunks. */ while (n > 0) { size_t capped_n = aws_min_size( n, 1024 * 1024 * 1024 * 1 /* 1GiB */); /* NOLINT(bugprone-implicit-widening-of-multiplication-result) */ ssize_t amount_read = read(s_rand_fd, output->buffer + output->len, capped_n); if (amount_read <= 0) { output->len = original_len; AWS_POSTCONDITION(aws_byte_buf_is_valid(output)); return aws_raise_error(AWS_ERROR_RANDOM_GEN_FAILED); } output->len += amount_read; n -= amount_read; } AWS_POSTCONDITION(aws_byte_buf_is_valid(output)); return AWS_OP_SUCCESS; } int aws_device_random_buffer(struct aws_byte_buf *output) { return aws_device_random_buffer_append(output, output->capacity - output->len); } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/posix/environment.c000066400000000000000000000022771456575232400257020ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include int aws_get_environment_value( struct aws_allocator *allocator, const struct aws_string *variable_name, struct aws_string **value_out) { const char *value = getenv(aws_string_c_str(variable_name)); if (value == NULL) { *value_out = NULL; return AWS_OP_SUCCESS; } *value_out = aws_string_new_from_c_str(allocator, value); if (*value_out == NULL) { return aws_raise_error(AWS_ERROR_ENVIRONMENT_GET); } return AWS_OP_SUCCESS; } int aws_set_environment_value(const struct aws_string *variable_name, const struct aws_string *value) { if (setenv(aws_string_c_str(variable_name), aws_string_c_str(value), 1) != 0) { return aws_raise_error(AWS_ERROR_ENVIRONMENT_SET); } return AWS_OP_SUCCESS; } int aws_unset_environment_value(const struct aws_string *variable_name) { if (unsetenv(aws_string_c_str(variable_name)) != 0) { return aws_raise_error(AWS_ERROR_ENVIRONMENT_UNSET); } return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/posix/file.c000066400000000000000000000244521456575232400242540ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include FILE *aws_fopen_safe(const struct aws_string *file_path, const struct aws_string *mode) { FILE *f = fopen(aws_string_c_str(file_path), aws_string_c_str(mode)); if (!f) { int errno_cpy = errno; /* Always cache errno before potential side-effect */ aws_translate_and_raise_io_error_or(errno_cpy, AWS_ERROR_FILE_OPEN_FAILURE); AWS_LOGF_ERROR( AWS_LS_COMMON_IO, "static: Failed to open file. path:'%s' mode:'%s' errno:%d aws-error:%d(%s)", aws_string_c_str(file_path), aws_string_c_str(mode), errno_cpy, aws_last_error(), aws_error_name(aws_last_error())); } return f; } int aws_directory_create(const struct aws_string *dir_path) { int mkdir_ret = mkdir(aws_string_c_str(dir_path), S_IRWXU | S_IRWXG | S_IRWXO); int errno_value = errno; /* Always cache errno before potential side-effect */ /** nobody cares if it already existed. */ if (mkdir_ret != 0 && errno_value != EEXIST) { return aws_translate_and_raise_io_error(errno_value); } return AWS_OP_SUCCESS; } bool aws_directory_exists(const struct aws_string *dir_path) { struct stat dir_info; if (lstat(aws_string_c_str(dir_path), &dir_info) == 0 && S_ISDIR(dir_info.st_mode)) { return true; } return false; } static bool s_delete_file_or_directory(const struct aws_directory_entry *entry, void *user_data) { (void)user_data; struct aws_allocator *allocator = aws_default_allocator(); struct aws_string *path_str = aws_string_new_from_cursor(allocator, &entry->relative_path); int ret_val = AWS_OP_SUCCESS; if (entry->file_type & AWS_FILE_TYPE_FILE) { ret_val = aws_file_delete(path_str); } if (entry->file_type & AWS_FILE_TYPE_DIRECTORY) { ret_val = aws_directory_delete(path_str, false); } aws_string_destroy(path_str); return ret_val == AWS_OP_SUCCESS; } int aws_directory_delete(const struct aws_string *dir_path, bool recursive) { if (!aws_directory_exists(dir_path)) { return AWS_OP_SUCCESS; } int ret_val = AWS_OP_SUCCESS; if (recursive) { ret_val = aws_directory_traverse(aws_default_allocator(), dir_path, true, s_delete_file_or_directory, NULL); } if (ret_val && aws_last_error() == AWS_ERROR_FILE_INVALID_PATH) { aws_reset_error(); return AWS_OP_SUCCESS; } if (ret_val) { return AWS_OP_ERR; } int error_code = rmdir(aws_string_c_str(dir_path)); int errno_value = errno; /* Always cache errno before potential side-effect */ return error_code == 0 ? AWS_OP_SUCCESS : aws_translate_and_raise_io_error(errno_value); } int aws_directory_or_file_move(const struct aws_string *from, const struct aws_string *to) { int error_code = rename(aws_string_c_str(from), aws_string_c_str(to)); int errno_value = errno; /* Always cache errno before potential side-effect */ return error_code == 0 ? AWS_OP_SUCCESS : aws_translate_and_raise_io_error(errno_value); } int aws_file_delete(const struct aws_string *file_path) { int error_code = unlink(aws_string_c_str(file_path)); int errno_value = errno; /* Always cache errno before potential side-effect */ if (!error_code || errno_value == ENOENT) { return AWS_OP_SUCCESS; } return aws_translate_and_raise_io_error(errno_value); } int aws_directory_traverse( struct aws_allocator *allocator, const struct aws_string *path, bool recursive, aws_on_directory_entry *on_entry, void *user_data) { DIR *dir = opendir(aws_string_c_str(path)); int errno_value = errno; /* Always cache errno before potential side-effect */ if (!dir) { return aws_translate_and_raise_io_error(errno_value); } struct aws_byte_cursor current_path = aws_byte_cursor_from_string(path); if (current_path.ptr[current_path.len - 1] == AWS_PATH_DELIM) { current_path.len -= 1; } struct dirent *dirent = NULL; int ret_val = AWS_ERROR_SUCCESS; errno = 0; while (!ret_val && (dirent = readdir(dir)) != NULL) { /* note: dirent->name_len is only defined on the BSDs, but not linux. It's not in the * required posix spec. So we use dirent->d_name as a c string here. */ struct aws_byte_cursor name_component = aws_byte_cursor_from_c_str(dirent->d_name); if (aws_byte_cursor_eq_c_str(&name_component, "..") || aws_byte_cursor_eq_c_str(&name_component, ".")) { continue; } struct aws_byte_buf relative_path; aws_byte_buf_init_copy_from_cursor(&relative_path, allocator, current_path); aws_byte_buf_append_byte_dynamic(&relative_path, AWS_PATH_DELIM); aws_byte_buf_append_dynamic(&relative_path, &name_component); aws_byte_buf_append_byte_dynamic(&relative_path, 0); relative_path.len -= 1; struct aws_directory_entry entry; AWS_ZERO_STRUCT(entry); struct stat dir_info; if (!lstat((const char *)relative_path.buffer, &dir_info)) { if (S_ISDIR(dir_info.st_mode)) { entry.file_type |= AWS_FILE_TYPE_DIRECTORY; } if (S_ISLNK(dir_info.st_mode)) { entry.file_type |= AWS_FILE_TYPE_SYM_LINK; } if (S_ISREG(dir_info.st_mode)) { entry.file_type |= AWS_FILE_TYPE_FILE; entry.file_size = dir_info.st_size; } if (!entry.file_type) { AWS_ASSERT("Unknown file type encountered"); } entry.relative_path = aws_byte_cursor_from_buf(&relative_path); const char *full_path = realpath((const char *)relative_path.buffer, NULL); if (full_path) { entry.path = aws_byte_cursor_from_c_str(full_path); } if (recursive && entry.file_type & AWS_FILE_TYPE_DIRECTORY) { struct aws_string *rel_path_str = aws_string_new_from_cursor(allocator, &entry.relative_path); ret_val = aws_directory_traverse(allocator, rel_path_str, recursive, on_entry, user_data); aws_string_destroy(rel_path_str); } /* post order traversal, if a node below us ended the traversal, don't call the visitor again. */ if (ret_val && aws_last_error() == AWS_ERROR_OPERATION_INTERUPTED) { goto cleanup; } if (!on_entry(&entry, user_data)) { ret_val = aws_raise_error(AWS_ERROR_OPERATION_INTERUPTED); goto cleanup; } if (ret_val) { goto cleanup; } cleanup: /* per https://man7.org/linux/man-pages/man3/realpath.3.html, realpath must be freed, if NULL was passed * to the second argument. */ if (full_path) { free((void *)full_path); } aws_byte_buf_clean_up(&relative_path); } } closedir(dir); return ret_val; } char aws_get_platform_directory_separator(void) { return '/'; } AWS_STATIC_STRING_FROM_LITERAL(s_home_env_var, "HOME"); struct aws_string *aws_get_home_directory(struct aws_allocator *allocator) { /* First, check "HOME" environment variable. * If it's set, then return it, even if it's an empty string. */ struct aws_string *home_value = NULL; aws_get_environment_value(allocator, s_home_env_var, &home_value); if (home_value != NULL) { return home_value; } /* Next, check getpwuid_r(). * We need to allocate a tmp buffer to store the result strings, * and the max possible size for this thing can be pretty big, * so start with a reasonable allocation, and if that's not enough try something bigger. */ uid_t uid = getuid(); /* cannot fail */ struct passwd pwd; struct passwd *result = NULL; char *buf = NULL; int status = ERANGE; for (size_t bufsize = 1024; bufsize <= 16384 && status == ERANGE; bufsize *= 2) { if (buf) { aws_mem_release(allocator, buf); } buf = aws_mem_acquire(allocator, bufsize); /* Note: on newer GCC with address sanitizer on, getpwuid_r triggers * build error, since buf can in theory be null, but buffsize will be * nonzero. following if statement works around that. */ if (buf == NULL) { aws_raise_error(AWS_ERROR_GET_HOME_DIRECTORY_FAILED); return NULL; } status = getpwuid_r(uid, &pwd, buf, bufsize, &result); } if (status == 0 && result != NULL && result->pw_dir != NULL) { home_value = aws_string_new_from_c_str(allocator, result->pw_dir); } else { aws_raise_error(AWS_ERROR_GET_HOME_DIRECTORY_FAILED); } aws_mem_release(allocator, buf); return home_value; } bool aws_path_exists(const struct aws_string *path) { struct stat buffer; return stat(aws_string_c_str(path), &buffer) == 0; } int aws_fseek(FILE *file, int64_t offset, int whence) { #ifdef AWS_HAVE_POSIX_LARGE_FILE_SUPPORT int result = fseeko(file, offset, whence); #else /* must use fseek(), which takes offset as a long */ if (offset < LONG_MIN || offset > LONG_MAX) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } int result = fseek(file, offset, whence); #endif /* AWS_HAVE_POSIX_LFS */ int errno_value = errno; /* Always cache errno before potential side-effect */ if (result != 0) { return aws_translate_and_raise_io_error_or(errno_value, AWS_ERROR_STREAM_UNSEEKABLE); } return AWS_OP_SUCCESS; } int aws_file_get_length(FILE *file, int64_t *length) { struct stat file_stats; int fd = fileno(file); if (fd == -1) { return aws_raise_error(AWS_ERROR_INVALID_FILE_HANDLE); } if (fstat(fd, &file_stats)) { int errno_value = errno; /* Always cache errno before potential side-effect */ return aws_translate_and_raise_io_error(errno_value); } *length = file_stats.st_size; return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/posix/mutex.c000066400000000000000000000033071456575232400244730ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include void aws_mutex_clean_up(struct aws_mutex *mutex) { AWS_PRECONDITION(mutex); if (mutex->initialized) { pthread_mutex_destroy(&mutex->mutex_handle); } AWS_ZERO_STRUCT(*mutex); } int aws_mutex_init(struct aws_mutex *mutex) { AWS_PRECONDITION(mutex); pthread_mutexattr_t attr; int err_code = pthread_mutexattr_init(&attr); int return_code = AWS_OP_SUCCESS; if (!err_code) { err_code = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_NORMAL); if (!err_code) { err_code = pthread_mutex_init(&mutex->mutex_handle, &attr); } if (err_code) { return_code = aws_private_convert_and_raise_error_code(err_code); } pthread_mutexattr_destroy(&attr); } else { return_code = aws_private_convert_and_raise_error_code(err_code); } mutex->initialized = (return_code == AWS_OP_SUCCESS); return return_code; } int aws_mutex_lock(struct aws_mutex *mutex) { AWS_PRECONDITION(mutex && mutex->initialized); return aws_private_convert_and_raise_error_code(pthread_mutex_lock(&mutex->mutex_handle)); } int aws_mutex_try_lock(struct aws_mutex *mutex) { AWS_PRECONDITION(mutex && mutex->initialized); return aws_private_convert_and_raise_error_code(pthread_mutex_trylock(&mutex->mutex_handle)); } int aws_mutex_unlock(struct aws_mutex *mutex) { AWS_PRECONDITION(mutex && mutex->initialized); return aws_private_convert_and_raise_error_code(pthread_mutex_unlock(&mutex->mutex_handle)); } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/posix/process.c000066400000000000000000000024441456575232400250100ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include int aws_get_pid(void) { return (int)getpid(); } size_t aws_get_soft_limit_io_handles(void) { struct rlimit rlimit; AWS_ZERO_STRUCT(rlimit); AWS_FATAL_ASSERT( !getrlimit(RLIMIT_NOFILE, &rlimit) && "getrlimit() should never fail for RLIMIT_NOFILE regardless of user permissions"); return rlimit.rlim_cur; } size_t aws_get_hard_limit_io_handles(void) { struct rlimit rlimit; AWS_ZERO_STRUCT(rlimit); AWS_FATAL_ASSERT( !getrlimit(RLIMIT_NOFILE, &rlimit) && "getrlimit() should never fail for RLIMIT_NOFILE regardless of user permissions"); return rlimit.rlim_max; } int aws_set_soft_limit_io_handles(size_t max_handles) { size_t hard_limit = aws_get_hard_limit_io_handles(); if (max_handles > hard_limit) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } struct rlimit rlimit = { .rlim_cur = max_handles, .rlim_max = hard_limit, }; if (setrlimit(RLIMIT_NOFILE, &rlimit)) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/posix/rw_lock.c000066400000000000000000000025701456575232400247720ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include int aws_rw_lock_init(struct aws_rw_lock *lock) { return aws_private_convert_and_raise_error_code(pthread_rwlock_init(&lock->lock_handle, NULL)); } void aws_rw_lock_clean_up(struct aws_rw_lock *lock) { pthread_rwlock_destroy(&lock->lock_handle); } int aws_rw_lock_rlock(struct aws_rw_lock *lock) { return aws_private_convert_and_raise_error_code(pthread_rwlock_rdlock(&lock->lock_handle)); } int aws_rw_lock_wlock(struct aws_rw_lock *lock) { return aws_private_convert_and_raise_error_code(pthread_rwlock_wrlock(&lock->lock_handle)); } int aws_rw_lock_try_rlock(struct aws_rw_lock *lock) { return aws_private_convert_and_raise_error_code(pthread_rwlock_tryrdlock(&lock->lock_handle)); } int aws_rw_lock_try_wlock(struct aws_rw_lock *lock) { return aws_private_convert_and_raise_error_code(pthread_rwlock_trywrlock(&lock->lock_handle)); } int aws_rw_lock_runlock(struct aws_rw_lock *lock) { return aws_private_convert_and_raise_error_code(pthread_rwlock_unlock(&lock->lock_handle)); } int aws_rw_lock_wunlock(struct aws_rw_lock *lock) { return aws_private_convert_and_raise_error_code(pthread_rwlock_unlock(&lock->lock_handle)); } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/posix/system_info.c000066400000000000000000000352401456575232400256710ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #if defined(__FreeBSD__) || defined(__NetBSD__) # define __BSD_VISIBLE 1 #endif #if defined(__linux__) # include #endif #if defined(__linux__) || defined(__unix__) # include #endif #include #if defined(HAVE_SYSCONF) size_t aws_system_info_processor_count(void) { long nprocs = sysconf(_SC_NPROCESSORS_ONLN); if (AWS_LIKELY(nprocs >= 0)) { return (size_t)nprocs; } AWS_FATAL_POSTCONDITION(nprocs >= 0); return 0; } #else size_t aws_system_info_processor_count(void) { # if defined(AWS_NUM_CPU_CORES) AWS_FATAL_PRECONDITION(AWS_NUM_CPU_CORES > 0); return AWS_NUM_CPU_CORES; # else return 1; # endif } #endif #include #include uint16_t aws_get_cpu_group_count(void) { if (g_numa_num_configured_nodes_ptr) { return aws_max_u16(1, (uint16_t)g_numa_num_configured_nodes_ptr()); } return 1U; } size_t aws_get_cpu_count_for_group(uint16_t group_idx) { if (g_numa_node_of_cpu_ptr) { size_t total_cpus = aws_system_info_processor_count(); uint16_t cpu_count = 0; for (size_t i = 0; i < total_cpus; ++i) { if (group_idx == g_numa_node_of_cpu_ptr((int)i)) { cpu_count++; } } return cpu_count; } return aws_system_info_processor_count(); } void aws_get_cpu_ids_for_group(uint16_t group_idx, struct aws_cpu_info *cpu_ids_array, size_t cpu_ids_array_length) { AWS_PRECONDITION(cpu_ids_array); if (!cpu_ids_array_length) { return; } /* go ahead and initialize everything. */ for (size_t i = 0; i < cpu_ids_array_length; ++i) { cpu_ids_array[i].cpu_id = -1; cpu_ids_array[i].suspected_hyper_thread = false; } if (g_numa_node_of_cpu_ptr) { size_t total_cpus = aws_system_info_processor_count(); size_t current_array_idx = 0; for (size_t i = 0; i < total_cpus && current_array_idx < cpu_ids_array_length; ++i) { if ((int)group_idx == g_numa_node_of_cpu_ptr((int)i)) { cpu_ids_array[current_array_idx].cpu_id = (int32_t)i; /* looking for an index jump is a more reliable way to find these. If they're in the group and then * the index jumps, say from 17 to 36, we're most-likely in hyper-thread land. Also, inside a node, * once we find the first hyper-thread, the remaining cores are also likely hyper threads. */ if (current_array_idx > 0 && (cpu_ids_array[current_array_idx - 1].suspected_hyper_thread || cpu_ids_array[current_array_idx - 1].cpu_id < ((int)i - 1))) { cpu_ids_array[current_array_idx].suspected_hyper_thread = true; } current_array_idx += 1; } } return; } /* a crude hint, but hyper-threads are numbered as the second half of the cpu id listing. The assumption if you * hit here is that this is just listing all cpus on the system. */ size_t hyper_thread_hint = cpu_ids_array_length / 2 - 1; for (size_t i = 0; i < cpu_ids_array_length; ++i) { cpu_ids_array[i].cpu_id = (int32_t)i; cpu_ids_array[i].suspected_hyper_thread = i > hyper_thread_hint; } } bool aws_is_debugger_present(void) { /* Open the status file */ const int status_fd = open("/proc/self/status", O_RDONLY); if (status_fd == -1) { return false; } /* Read its contents */ char buf[4096]; const ssize_t num_read = read(status_fd, buf, sizeof(buf) - 1); close(status_fd); if (num_read <= 0) { return false; } buf[num_read] = '\0'; /* Search for the TracerPid field, which will indicate the debugger process */ const char tracerPidString[] = "TracerPid:"; const char *tracer_pid = strstr(buf, tracerPidString); if (!tracer_pid) { return false; } /* If it's not 0, then there's a debugger */ for (const char *cur = tracer_pid + sizeof(tracerPidString) - 1; cur <= buf + num_read; ++cur) { if (!aws_isspace(*cur)) { return aws_isdigit(*cur) && *cur != '0'; } } return false; } #include #ifndef __has_builtin # define __has_builtin(x) 0 #endif void aws_debug_break(void) { #ifdef DEBUG_BUILD if (aws_is_debugger_present()) { # if __has_builtin(__builtin_debugtrap) __builtin_debugtrap(); # else raise(SIGTRAP); # endif } #endif /* DEBUG_BUILD */ } #if defined(AWS_HAVE_EXECINFO) # include # include # define AWS_BACKTRACE_DEPTH 128 struct aws_stack_frame_info { char exe[PATH_MAX]; char addr[32]; char base[32]; /* base addr for dylib/exe */ char function[128]; }; /* Ensure only safe characters in a path buffer in case someone tries to rename the exe and trigger shell execution via the sub commands used to resolve symbols */ char *s_whitelist_chars(char *path) { char *cur = path; while (*cur) { bool whitelisted = aws_isalnum(*cur) || aws_isspace(*cur) || *cur == '/' || *cur == '_' || *cur == '.' || (cur > path && *cur == '-'); if (!whitelisted) { *cur = '_'; } ++cur; } return path; } # if defined(__APPLE__) # include # include # include static char s_exe_path[PATH_MAX]; static const char *s_get_executable_path(void) { static const char *s_exe = NULL; if (AWS_LIKELY(s_exe)) { return s_exe; } uint32_t len = sizeof(s_exe_path); if (!_NSGetExecutablePath(s_exe_path, &len)) { s_exe = s_exe_path; } return s_exe; } int s_parse_symbol(const char *symbol, void *addr, struct aws_stack_frame_info *frame) { /* symbols look like: + */ const char *current_exe = s_get_executable_path(); /* parse exe/shared lib */ const char *exe_start = strstr(symbol, " "); while (aws_isspace(*exe_start)) { ++exe_start; } const char *exe_end = strstr(exe_start, " "); strncpy(frame->exe, exe_start, exe_end - exe_start); /* executables get basename'd, so restore the path */ if (strstr(current_exe, frame->exe)) { strncpy(frame->exe, current_exe, strlen(current_exe)); } s_whitelist_chars(frame->exe); /* parse addr */ const char *addr_start = strstr(exe_end, "0x"); const char *addr_end = strstr(addr_start, " "); strncpy(frame->addr, addr_start, addr_end - addr_start); /* parse function */ const char *function_start = strstr(addr_end, " ") + 1; const char *function_end = strstr(function_start, " "); /* truncate function name if needed */ size_t function_len = function_end - function_start; if (function_len >= (sizeof(frame->function) - 1)) { function_len = sizeof(frame->function) - 1; } strncpy(frame->function, function_start, function_len); /* find base addr for library/exe */ Dl_info addr_info; dladdr(addr, &addr_info); snprintf(frame->base, sizeof(frame->base), "0x%p", addr_info.dli_fbase); return AWS_OP_SUCCESS; } void s_resolve_cmd(char *cmd, size_t len, struct aws_stack_frame_info *frame) { snprintf(cmd, len, "atos -o %s -l %s %s", frame->exe, frame->base, frame->addr); } # else int s_parse_symbol(const char *symbol, void *addr, struct aws_stack_frame_info *frame) { /* symbols look like: (+) [0x] * or: [0x] * or: [0x] */ (void)addr; const char *open_paren = strstr(symbol, "("); const char *close_paren = strstr(symbol, ")"); const char *exe_end = open_paren; /* there may not be a function in parens, or parens at all */ if (open_paren == NULL || close_paren == NULL) { exe_end = strstr(symbol, "["); if (!exe_end) { return AWS_OP_ERR; } /* if exe_end == symbol, there's no exe */ if (exe_end != symbol) { exe_end -= 1; } } ptrdiff_t exe_len = exe_end - symbol; if (exe_len > 0) { strncpy(frame->exe, symbol, exe_len); } s_whitelist_chars(frame->exe); long function_len = (open_paren && close_paren) ? close_paren - open_paren - 1 : 0; if (function_len > 0) { /* dynamic symbol was found */ /* there might be (+) or just () */ const char *function_start = open_paren + 1; const char *plus = strstr(function_start, "+"); const char *function_end = (plus) ? plus : close_paren; if (function_end > function_start) { function_len = function_end - function_start; strncpy(frame->function, function_start, function_len); } else if (plus) { long addr_len = close_paren - plus - 1; strncpy(frame->addr, plus + 1, addr_len); } } if (frame->addr[0] == 0) { /* use the address in []'s, since it's all we have */ const char *addr_start = strstr(exe_end, "[") + 1; char *addr_end = strstr(addr_start, "]"); if (!addr_end) { return AWS_OP_ERR; } strncpy(frame->addr, addr_start, addr_end - addr_start); } return AWS_OP_SUCCESS; } void s_resolve_cmd(char *cmd, size_t len, struct aws_stack_frame_info *frame) { snprintf(cmd, len, "addr2line -afips -e %s %s", frame->exe, frame->addr); } # endif size_t aws_backtrace(void **stack_frames, size_t num_frames) { return backtrace(stack_frames, (int)aws_min_size(num_frames, INT_MAX)); } char **aws_backtrace_symbols(void *const *stack_frames, size_t stack_depth) { return backtrace_symbols(stack_frames, (int)aws_min_size(stack_depth, INT_MAX)); } char **aws_backtrace_addr2line(void *const *stack_frames, size_t stack_depth) { char **symbols = aws_backtrace_symbols(stack_frames, stack_depth); AWS_FATAL_ASSERT(symbols); struct aws_byte_buf lines; aws_byte_buf_init(&lines, aws_default_allocator(), stack_depth * 256); /* insert pointers for each stack entry */ memset(lines.buffer, 0, stack_depth * sizeof(void *)); lines.len += stack_depth * sizeof(void *); /* symbols look like: (+) [0x] * or: [0x] * start at 1 to skip the current frame (this function) */ for (size_t frame_idx = 0; frame_idx < stack_depth; ++frame_idx) { struct aws_stack_frame_info frame; AWS_ZERO_STRUCT(frame); const char *symbol = symbols[frame_idx]; if (s_parse_symbol(symbol, stack_frames[frame_idx], &frame)) { goto parse_failed; } /* TODO: Emulate libunwind */ char cmd[sizeof(struct aws_stack_frame_info)] = {0}; s_resolve_cmd(cmd, sizeof(cmd), &frame); FILE *out = popen(cmd, "r"); if (!out) { goto parse_failed; } char output[1024]; if (fgets(output, sizeof(output), out)) { /* if addr2line or atos don't know what to do with an address, they just echo it */ /* if there are spaces in the output, then they resolved something */ if (strstr(output, " ")) { symbol = output; } } pclose(out); parse_failed: /* record the pointer to where the symbol will be */ *((char **)&lines.buffer[frame_idx * sizeof(void *)]) = (char *)lines.buffer + lines.len; struct aws_byte_cursor line_cursor = aws_byte_cursor_from_c_str(symbol); line_cursor.len += 1; /* strings must be null terminated, make sure we copy the null */ aws_byte_buf_append_dynamic(&lines, &line_cursor); } free(symbols); return (char **)lines.buffer; /* caller is responsible for freeing */ } void aws_backtrace_print(FILE *fp, void *call_site_data) { siginfo_t *siginfo = call_site_data; if (siginfo) { fprintf(fp, "Signal received: %d, errno: %d\n", siginfo->si_signo, siginfo->si_errno); if (siginfo->si_signo == SIGSEGV) { fprintf(fp, " SIGSEGV @ 0x%p\n", siginfo->si_addr); } } void *stack_frames[AWS_BACKTRACE_DEPTH]; size_t stack_depth = aws_backtrace(stack_frames, AWS_BACKTRACE_DEPTH); char **symbols = aws_backtrace_symbols(stack_frames, stack_depth); if (symbols == NULL) { fprintf(fp, "Unable to decode backtrace via backtrace_symbols\n"); return; } fprintf(fp, "################################################################################\n"); fprintf(fp, "Stack trace:\n"); fprintf(fp, "################################################################################\n"); for (size_t frame_idx = 1; frame_idx < stack_depth; ++frame_idx) { const char *symbol = symbols[frame_idx]; fprintf(fp, "%s\n", symbol); } fflush(fp); free(symbols); } void aws_backtrace_log(int log_level) { void *stack_frames[AWS_BACKTRACE_DEPTH]; size_t num_frames = aws_backtrace(stack_frames, AWS_BACKTRACE_DEPTH); if (!num_frames) { AWS_LOGF(log_level, AWS_LS_COMMON_GENERAL, "Unable to capture backtrace"); return; } char **symbols = aws_backtrace_symbols(stack_frames, num_frames); for (size_t line = 0; line < num_frames; ++line) { const char *symbol = symbols[line]; AWS_LOGF(log_level, AWS_LS_COMMON_GENERAL, "%s", symbol); } free(symbols); } #else void aws_backtrace_print(FILE *fp, void *call_site_data) { (void)call_site_data; fprintf(fp, "No call stack information available\n"); } size_t aws_backtrace(void **stack_frames, size_t num_frames) { (void)stack_frames; (void)num_frames; return 0; } char **aws_backtrace_symbols(void *const *stack_frames, size_t stack_depth) { (void)stack_frames; (void)stack_depth; return NULL; } char **aws_backtrace_addr2line(void *const *stack_frames, size_t stack_depth) { (void)stack_frames; (void)stack_depth; return NULL; } void aws_backtrace_log(int log_level) { AWS_LOGF(log_level, AWS_LS_COMMON_GENERAL, "aws_backtrace_log: no execinfo compatible backtrace API available"); } #endif /* AWS_HAVE_EXECINFO */ #if defined(AWS_OS_APPLE) enum aws_platform_os aws_get_platform_build_os(void) { return AWS_PLATFORM_OS_MAC; } #else enum aws_platform_os aws_get_platform_build_os(void) { return AWS_PLATFORM_OS_UNIX; } #endif /* AWS_OS_APPLE */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/posix/system_resource_utils.c000066400000000000000000000016021456575232400300000ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include int aws_init_memory_usage_for_current_process(struct aws_memory_usage_stats *memory_usage) { AWS_PRECONDITION(memory_usage); AWS_ZERO_STRUCT(*memory_usage); struct rusage usage; if (getrusage(RUSAGE_SELF, &usage)) { return aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); } #if defined(AWS_OS_APPLE) /* * For some reason Apple switched to reporting this in bytes instead of KB * around MacOS 10.6. * Make it back to KB. Result might be slightly off due to rounding. */ memory_usage->maxrss = usage.ru_maxrss / 1024; #else memory_usage->maxrss = usage.ru_maxrss; #endif memory_usage->page_faults = usage.ru_majflt; return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/posix/thread.c000066400000000000000000000402241456575232400245770ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #if !defined(__MACH__) # define _GNU_SOURCE /* NOLINT(bugprone-reserved-identifier) */ #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__FreeBSD__) || defined(__NetBSD__) # include typedef cpuset_t cpu_set_t; #elif defined(__OpenBSD__) # include #endif #if !defined(AWS_AFFINITY_METHOD) # error "Must provide a method for setting thread affinity" #endif // Possible methods for setting thread affinity #define AWS_AFFINITY_METHOD_NONE 0 #define AWS_AFFINITY_METHOD_PTHREAD_ATTR 1 #define AWS_AFFINITY_METHOD_PTHREAD 2 // Ensure provided affinity method matches one of the supported values // clang-format off #if AWS_AFFINITY_METHOD != AWS_AFFINITY_METHOD_NONE \ && AWS_AFFINITY_METHOD != AWS_AFFINITY_METHOD_PTHREAD_ATTR \ && AWS_AFFINITY_METHOD != AWS_AFFINITY_METHOD_PTHREAD // clang-format on # error "Invalid thread affinity method" #endif static struct aws_thread_options s_default_options = { /* this will make sure platform default stack size is used. */ .stack_size = 0, .cpu_id = -1, .join_strategy = AWS_TJS_MANUAL, }; struct thread_atexit_callback { aws_thread_atexit_fn *callback; void *user_data; struct thread_atexit_callback *next; }; struct thread_wrapper { struct aws_allocator *allocator; struct aws_linked_list_node node; void (*func)(void *arg); void *arg; struct thread_atexit_callback *atexit; void (*call_once)(void *); void *once_arg; struct aws_string *name; /* * The managed thread system does lazy joins on threads once finished via their wrapper. For that to work * we need something to join against, so we keep a by-value copy of the original thread here. The tricky part * is how to set the threadid/handle of this copy since the copy must be injected into the thread function before * the threadid/handle is known. We get around that by just querying it at the top of the wrapper thread function. */ struct aws_thread thread_copy; bool membind; }; static AWS_THREAD_LOCAL struct thread_wrapper *tl_wrapper = NULL; static void s_thread_wrapper_destroy(struct thread_wrapper *wrapper) { if (!wrapper) { return; } aws_string_destroy(wrapper->name); aws_mem_release(wrapper->allocator, wrapper); } /* * thread_wrapper is platform-dependent so this function ends up being duplicated in each thread implementation */ void aws_thread_join_and_free_wrapper_list(struct aws_linked_list *wrapper_list) { struct aws_linked_list_node *iter = aws_linked_list_begin(wrapper_list); while (iter != aws_linked_list_end(wrapper_list)) { struct thread_wrapper *join_thread_wrapper = AWS_CONTAINER_OF(iter, struct thread_wrapper, node); /* * Can't do a for-loop since we need to advance to the next wrapper before we free the wrapper */ iter = aws_linked_list_next(iter); join_thread_wrapper->thread_copy.detach_state = AWS_THREAD_JOINABLE; aws_thread_join(&join_thread_wrapper->thread_copy); /* * This doesn't actually do anything when using posix threads, but it keeps us * in sync with the Windows version as well as the lifecycle contract we're * presenting for threads. */ aws_thread_clean_up(&join_thread_wrapper->thread_copy); s_thread_wrapper_destroy(join_thread_wrapper); aws_thread_decrement_unjoined_count(); } } /* This must be called from the thread itself. * (only necessary for Apple, but we'll do it that way on every platform for consistency) */ static void s_set_thread_name(pthread_t thread_id, const char *name) { #if defined(__APPLE__) (void)thread_id; pthread_setname_np(name); #elif defined(AWS_PTHREAD_SETNAME_TAKES_2ARGS) pthread_setname_np(thread_id, name); #elif defined(AWS_PTHREAD_SET_NAME_TAKES_2ARGS) pthread_set_name_np(thread_id, name); #elif defined(AWS_PTHREAD_SETNAME_TAKES_3ARGS) pthread_setname_np(thread_id, name, NULL); #else (void)thread_id; (void)name; #endif } static void *thread_fn(void *arg) { struct thread_wrapper *wrapper_ptr = arg; /* * Make sure the aws_thread copy has the right thread id stored in it. */ wrapper_ptr->thread_copy.thread_id = aws_thread_current_thread_id(); /* If there's a name, set it. * Then free the aws_string before we make copies of the wrapper struct */ if (wrapper_ptr->name) { s_set_thread_name(wrapper_ptr->thread_copy.thread_id, aws_string_c_str(wrapper_ptr->name)); aws_string_destroy(wrapper_ptr->name); wrapper_ptr->name = NULL; } struct thread_wrapper wrapper = *wrapper_ptr; struct aws_allocator *allocator = wrapper.allocator; tl_wrapper = &wrapper; if (wrapper.membind && g_set_mempolicy_ptr) { AWS_LOGF_INFO( AWS_LS_COMMON_THREAD, "a cpu affinity was specified when launching this thread and set_mempolicy() is available on this " "system. Setting the memory policy to MPOL_PREFERRED"); /* if a user set a cpu id in their thread options, we're going to make sure the numa policy honors that * and makes sure the numa node of the cpu we launched this thread on is where memory gets allocated. However, * we don't want to fail the application if this fails, so make the call, and ignore the result. */ long resp = g_set_mempolicy_ptr(AWS_MPOL_PREFERRED_ALIAS, NULL, 0); int errno_value = errno; /* Always cache errno before potential side-effect */ if (resp) { AWS_LOGF_WARN(AWS_LS_COMMON_THREAD, "call to set_mempolicy() failed with errno %d", errno_value); } } wrapper.func(wrapper.arg); /* * Managed threads don't free the wrapper yet. The thread management system does it later after the thread * is joined. */ bool is_managed_thread = wrapper.thread_copy.detach_state == AWS_THREAD_MANAGED; if (!is_managed_thread) { s_thread_wrapper_destroy(wrapper_ptr); wrapper_ptr = NULL; } struct thread_atexit_callback *exit_callback_data = wrapper.atexit; while (exit_callback_data) { aws_thread_atexit_fn *exit_callback = exit_callback_data->callback; void *exit_callback_user_data = exit_callback_data->user_data; struct thread_atexit_callback *next_exit_callback_data = exit_callback_data->next; aws_mem_release(allocator, exit_callback_data); exit_callback(exit_callback_user_data); exit_callback_data = next_exit_callback_data; } tl_wrapper = NULL; /* * Release this thread to the managed thread system for lazy join. */ if (is_managed_thread) { aws_thread_pending_join_add(&wrapper_ptr->node); } return NULL; } const struct aws_thread_options *aws_default_thread_options(void) { return &s_default_options; } void aws_thread_clean_up(struct aws_thread *thread) { if (thread->detach_state == AWS_THREAD_JOINABLE) { pthread_detach(thread->thread_id); } } static void s_call_once(void) { tl_wrapper->call_once(tl_wrapper->once_arg); } void aws_thread_call_once(aws_thread_once *flag, void (*call_once)(void *), void *user_data) { // If this is a non-aws_thread, then gin up a temp thread wrapper struct thread_wrapper temp_wrapper; if (!tl_wrapper) { tl_wrapper = &temp_wrapper; } tl_wrapper->call_once = call_once; tl_wrapper->once_arg = user_data; pthread_once(flag, s_call_once); if (tl_wrapper == &temp_wrapper) { tl_wrapper = NULL; } } int aws_thread_init(struct aws_thread *thread, struct aws_allocator *allocator) { *thread = (struct aws_thread){.allocator = allocator, .detach_state = AWS_THREAD_NOT_CREATED}; return AWS_OP_SUCCESS; } int aws_thread_launch( struct aws_thread *thread, void (*func)(void *arg), void *arg, const struct aws_thread_options *options) { pthread_attr_t attributes; pthread_attr_t *attributes_ptr = NULL; int attr_return = 0; struct thread_wrapper *wrapper = NULL; bool is_managed_thread = options != NULL && options->join_strategy == AWS_TJS_MANAGED; if (is_managed_thread) { thread->detach_state = AWS_THREAD_MANAGED; } if (options) { attr_return = pthread_attr_init(&attributes); if (attr_return) { goto cleanup; } attributes_ptr = &attributes; if (options->stack_size > PTHREAD_STACK_MIN) { attr_return = pthread_attr_setstacksize(attributes_ptr, options->stack_size); if (attr_return) { goto cleanup; } } /* AFAIK you can't set thread affinity on apple platforms, and it doesn't really matter since all memory * NUMA or not is setup in interleave mode. * Thread affinity is also not supported on Android systems, and honestly, if you're running android on a NUMA * configuration, you've got bigger problems. */ #if AWS_AFFINITY_METHOD == AWS_AFFINITY_METHOD_PTHREAD_ATTR if (options->cpu_id >= 0) { AWS_LOGF_INFO( AWS_LS_COMMON_THREAD, "id=%p: cpu affinity of cpu_id %d was specified, attempting to honor the value.", (void *)thread, options->cpu_id); cpu_set_t cpuset; CPU_ZERO(&cpuset); CPU_SET((uint32_t)options->cpu_id, &cpuset); attr_return = pthread_attr_setaffinity_np(attributes_ptr, sizeof(cpuset), &cpuset); if (attr_return) { AWS_LOGF_ERROR( AWS_LS_COMMON_THREAD, "id=%p: pthread_attr_setaffinity_np() failed with %d.", (void *)thread, attr_return); goto cleanup; } } #endif /* AWS_AFFINITY_METHOD == AWS_AFFINITY_METHOD_PTHREAD_ATTR */ } wrapper = aws_mem_calloc(thread->allocator, 1, sizeof(struct thread_wrapper)); if (options) { if (options->cpu_id >= 0) { wrapper->membind = true; } if (options->name.len > 0) { wrapper->name = aws_string_new_from_cursor(thread->allocator, &options->name); } } wrapper->thread_copy = *thread; wrapper->allocator = thread->allocator; wrapper->func = func; wrapper->arg = arg; /* * Increment the count prior to spawning the thread. Decrement back if the create failed. */ if (is_managed_thread) { aws_thread_increment_unjoined_count(); } attr_return = pthread_create(&thread->thread_id, attributes_ptr, thread_fn, (void *)wrapper); if (attr_return) { AWS_LOGF_ERROR(AWS_LS_COMMON_THREAD, "id=%p: pthread_create() failed with %d", (void *)thread, attr_return); if (is_managed_thread) { aws_thread_decrement_unjoined_count(); } goto cleanup; } #if AWS_AFFINITY_METHOD == AWS_AFFINITY_METHOD_PTHREAD /* If we don't have pthread_attr_setaffinity_np, we may * still be able to set the thread affinity after creation. */ if (options && options->cpu_id >= 0) { AWS_LOGF_INFO( AWS_LS_COMMON_THREAD, "id=%p: cpu affinity of cpu_id %d was specified, attempting to honor the value.", (void *)thread, options->cpu_id); cpu_set_t cpuset; CPU_ZERO(&cpuset); CPU_SET((uint32_t)options->cpu_id, &cpuset); /* If this fails, just warn. We can't fail anymore, the thread has already launched. */ int setaffinity_return = pthread_setaffinity_np(thread->thread_id, sizeof(cpuset), &cpuset); if (setaffinity_return) { AWS_LOGF_WARN( AWS_LS_COMMON_THREAD, "id=%p: pthread_setaffinity_np() failed with %d. Running thread without CPU affinity.", (void *)thread, setaffinity_return); } } #endif /* AWS_AFFINITY_METHOD == AWS_AFFINITY_METHOD_PTHREAD */ /* * Managed threads need to stay unjoinable from an external perspective. We'll handle it after thread function * completion. */ if (is_managed_thread) { aws_thread_clean_up(thread); } else { thread->detach_state = AWS_THREAD_JOINABLE; } cleanup: if (attributes_ptr) { pthread_attr_destroy(attributes_ptr); } if (attr_return) { s_thread_wrapper_destroy(wrapper); switch (attr_return) { case EINVAL: return aws_raise_error(AWS_ERROR_THREAD_INVALID_SETTINGS); case EAGAIN: return aws_raise_error(AWS_ERROR_THREAD_INSUFFICIENT_RESOURCE); case EPERM: return aws_raise_error(AWS_ERROR_THREAD_NO_PERMISSIONS); case ENOMEM: return aws_raise_error(AWS_ERROR_OOM); default: return aws_raise_error(AWS_ERROR_UNKNOWN); } } return AWS_OP_SUCCESS; } aws_thread_id_t aws_thread_get_id(struct aws_thread *thread) { return thread->thread_id; } enum aws_thread_detach_state aws_thread_get_detach_state(struct aws_thread *thread) { return thread->detach_state; } int aws_thread_join(struct aws_thread *thread) { if (thread->detach_state == AWS_THREAD_JOINABLE) { int err_no = pthread_join(thread->thread_id, 0); if (err_no) { if (err_no == EINVAL) { return aws_raise_error(AWS_ERROR_THREAD_NOT_JOINABLE); } if (err_no == ESRCH) { return aws_raise_error(AWS_ERROR_THREAD_NO_SUCH_THREAD_ID); } if (err_no == EDEADLK) { return aws_raise_error(AWS_ERROR_THREAD_DEADLOCK_DETECTED); } } thread->detach_state = AWS_THREAD_JOIN_COMPLETED; } return AWS_OP_SUCCESS; } aws_thread_id_t aws_thread_current_thread_id(void) { return pthread_self(); } bool aws_thread_thread_id_equal(aws_thread_id_t t1, aws_thread_id_t t2) { return pthread_equal(t1, t2) != 0; } void aws_thread_current_sleep(uint64_t nanos) { uint64_t nano = 0; time_t seconds = (time_t)aws_timestamp_convert(nanos, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_SECS, &nano); struct timespec tm = { .tv_sec = seconds, .tv_nsec = (long)nano, }; struct timespec output; nanosleep(&tm, &output); } int aws_thread_current_at_exit(aws_thread_atexit_fn *callback, void *user_data) { if (!tl_wrapper) { return aws_raise_error(AWS_ERROR_THREAD_NOT_JOINABLE); } struct thread_atexit_callback *cb = aws_mem_calloc(tl_wrapper->allocator, 1, sizeof(struct thread_atexit_callback)); if (!cb) { return AWS_OP_ERR; } cb->callback = callback; cb->user_data = user_data; cb->next = tl_wrapper->atexit; tl_wrapper->atexit = cb; return AWS_OP_SUCCESS; } int aws_thread_current_name(struct aws_allocator *allocator, struct aws_string **out_name) { return aws_thread_name(allocator, aws_thread_current_thread_id(), out_name); } #define THREAD_NAME_BUFFER_SIZE 256 int aws_thread_name(struct aws_allocator *allocator, aws_thread_id_t thread_id, struct aws_string **out_name) { *out_name = NULL; #if defined(AWS_PTHREAD_GETNAME_TAKES_2ARGS) || defined(AWS_PTHREAD_GETNAME_TAKES_3ARGS) || \ defined(AWS_PTHREAD_GET_NAME_TAKES_2_ARGS) char name[THREAD_NAME_BUFFER_SIZE] = {0}; # ifdef AWS_PTHREAD_GETNAME_TAKES_3ARGS if (pthread_getname_np(thread_id, name, THREAD_NAME_BUFFER_SIZE)) { # elif AWS_PTHREAD_GETNAME_TAKES_2ARGS if (pthread_getname_np(thread_id, name)) { # elif AWS_PTHREAD_GET_NAME_TAKES_2ARGS if (pthread_get_name_np(thread_id, name)) { # endif return aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); } *out_name = aws_string_new_from_c_str(allocator, name); return AWS_OP_SUCCESS; #else return aws_raise_error(AWS_ERROR_PLATFORM_NOT_SUPPORTED); #endif } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/posix/time.c000066400000000000000000000054351456575232400242730ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #if defined(__ANDROID__) && !defined(__LP64__) /* * This branch brought to you by the kind folks at google chromium. It's been modified a bit, but * gotta give credit where it's due.... I'm not a lawyer so I'm just gonna drop their copyright * notification here to avoid all of that. */ /* * Copyright 2014 The Chromium Authors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * From src/base/os_compat_android.cc: */ # include static const time_t s_time_max = ~(1L << ((sizeof(time_t) * __CHAR_BIT__ - 1))); static const time_t s_time_min = (1L << ((sizeof(time_t)) * __CHAR_BIT__ - 1)); /* 32-bit Android has only timegm64() and not timegm(). */ time_t aws_timegm(struct tm *const t) { time64_t result = timegm64(t); if (result < s_time_min || result > s_time_max) { return -1; } return (time_t)result; } #else # ifndef __APPLE__ /* glibc.... you disappoint me.. */ extern time_t timegm(struct tm *); # endif time_t aws_timegm(struct tm *const t) { return timegm(t); } #endif /* defined(__ANDROID__) && !defined(__LP64__) */ void aws_localtime(time_t time, struct tm *t) { localtime_r(&time, t); } void aws_gmtime(time_t time, struct tm *t) { gmtime_r(&time, t); } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/priority_queue.c000066400000000000000000000361551456575232400252630ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #define PARENT_OF(index) (((index)&1) ? (index) >> 1 : (index) > 1 ? ((index)-2) >> 1 : 0) #define LEFT_OF(index) (((index) << 1) + 1) #define RIGHT_OF(index) (((index) << 1) + 2) static void s_swap(struct aws_priority_queue *queue, size_t a, size_t b) { AWS_PRECONDITION(aws_priority_queue_is_valid(queue)); AWS_PRECONDITION(a < queue->container.length); AWS_PRECONDITION(b < queue->container.length); AWS_PRECONDITION(aws_priority_queue_backpointer_index_valid(queue, a)); AWS_PRECONDITION(aws_priority_queue_backpointer_index_valid(queue, b)); aws_array_list_swap(&queue->container, a, b); /* Invariant: If the backpointer array is initialized, we have enough room for all elements */ if (!AWS_IS_ZEROED(queue->backpointers)) { AWS_ASSERT(queue->backpointers.length > a); AWS_ASSERT(queue->backpointers.length > b); struct aws_priority_queue_node **bp_a = &((struct aws_priority_queue_node **)queue->backpointers.data)[a]; struct aws_priority_queue_node **bp_b = &((struct aws_priority_queue_node **)queue->backpointers.data)[b]; struct aws_priority_queue_node *tmp = *bp_a; *bp_a = *bp_b; *bp_b = tmp; if (*bp_a) { (*bp_a)->current_index = a; } if (*bp_b) { (*bp_b)->current_index = b; } } AWS_POSTCONDITION(aws_priority_queue_is_valid(queue)); AWS_POSTCONDITION(aws_priority_queue_backpointer_index_valid(queue, a)); AWS_POSTCONDITION(aws_priority_queue_backpointer_index_valid(queue, b)); } /* Precondition: with the exception of the given root element, the container must be * in heap order */ static bool s_sift_down(struct aws_priority_queue *queue, size_t root) { AWS_PRECONDITION(aws_priority_queue_is_valid(queue)); AWS_PRECONDITION(root < queue->container.length); bool did_move = false; size_t len = aws_array_list_length(&queue->container); while (LEFT_OF(root) < len) { size_t left = LEFT_OF(root); size_t right = RIGHT_OF(root); size_t first = root; void *first_item = NULL; void *other_item = NULL; aws_array_list_get_at_ptr(&queue->container, &first_item, root); aws_array_list_get_at_ptr(&queue->container, &other_item, left); if (queue->pred(first_item, other_item) > 0) { first = left; first_item = other_item; } if (right < len) { aws_array_list_get_at_ptr(&queue->container, &other_item, right); /* choose the larger/smaller of the two in case of a max/min heap * respectively */ if (queue->pred(first_item, other_item) > 0) { first = right; first_item = other_item; } } if (first != root) { s_swap(queue, first, root); did_move = true; root = first; } else { break; } } AWS_POSTCONDITION(aws_priority_queue_is_valid(queue)); return did_move; } /* Precondition: Elements prior to the specified index must be in heap order. */ static bool s_sift_up(struct aws_priority_queue *queue, size_t index) { AWS_PRECONDITION(aws_priority_queue_is_valid(queue)); AWS_PRECONDITION(index < queue->container.length); bool did_move = false; void *parent_item = NULL; void *child_item = NULL; size_t parent = PARENT_OF(index); while (index) { /* * These get_ats are guaranteed to be successful; if they are not, we have * serious state corruption, so just abort. */ if (aws_array_list_get_at_ptr(&queue->container, &parent_item, parent) || aws_array_list_get_at_ptr(&queue->container, &child_item, index)) { abort(); } if (queue->pred(parent_item, child_item) > 0) { s_swap(queue, index, parent); did_move = true; index = parent; parent = PARENT_OF(index); } else { break; } } AWS_POSTCONDITION(aws_priority_queue_is_valid(queue)); return did_move; } /* * Precondition: With the exception of the given index, the heap condition holds for all elements. * In particular, the parent of the current index is a predecessor of all children of the current index. */ static void s_sift_either(struct aws_priority_queue *queue, size_t index) { AWS_PRECONDITION(aws_priority_queue_is_valid(queue)); AWS_PRECONDITION(index < queue->container.length); if (!index || !s_sift_up(queue, index)) { s_sift_down(queue, index); } AWS_POSTCONDITION(aws_priority_queue_is_valid(queue)); } int aws_priority_queue_init_dynamic( struct aws_priority_queue *queue, struct aws_allocator *alloc, size_t default_size, size_t item_size, aws_priority_queue_compare_fn *pred) { AWS_FATAL_PRECONDITION(queue != NULL); AWS_FATAL_PRECONDITION(alloc != NULL); AWS_FATAL_PRECONDITION(item_size > 0); queue->pred = pred; AWS_ZERO_STRUCT(queue->backpointers); int ret = aws_array_list_init_dynamic(&queue->container, alloc, default_size, item_size); if (ret == AWS_OP_SUCCESS) { AWS_POSTCONDITION(aws_priority_queue_is_valid(queue)); } else { AWS_POSTCONDITION(AWS_IS_ZEROED(queue->container)); AWS_POSTCONDITION(AWS_IS_ZEROED(queue->backpointers)); } return ret; } void aws_priority_queue_init_static( struct aws_priority_queue *queue, void *heap, size_t item_count, size_t item_size, aws_priority_queue_compare_fn *pred) { AWS_FATAL_PRECONDITION(queue != NULL); AWS_FATAL_PRECONDITION(heap != NULL); AWS_FATAL_PRECONDITION(item_count > 0); AWS_FATAL_PRECONDITION(item_size > 0); queue->pred = pred; AWS_ZERO_STRUCT(queue->backpointers); aws_array_list_init_static(&queue->container, heap, item_count, item_size); AWS_POSTCONDITION(aws_priority_queue_is_valid(queue)); } bool aws_priority_queue_backpointer_index_valid(const struct aws_priority_queue *const queue, size_t index) { if (AWS_IS_ZEROED(queue->backpointers)) { return true; } if (index < queue->backpointers.length) { struct aws_priority_queue_node *node = ((struct aws_priority_queue_node **)queue->backpointers.data)[index]; return (node == NULL) || AWS_MEM_IS_WRITABLE(node, sizeof(struct aws_priority_queue_node)); } return false; } bool aws_priority_queue_backpointers_valid_deep(const struct aws_priority_queue *const queue) { if (!queue) { return false; } for (size_t i = 0; i < queue->backpointers.length; i++) { if (!aws_priority_queue_backpointer_index_valid(queue, i)) { return false; } } return true; } bool aws_priority_queue_backpointers_valid(const struct aws_priority_queue *const queue) { if (!queue) { return false; } /* Internal container validity */ bool backpointer_list_is_valid = (aws_array_list_is_valid(&queue->backpointers) && (queue->backpointers.current_size != 0) && (queue->backpointers.data != NULL)); /* Backpointer struct should either be zero or should be * initialized to be at most as long as the container, and having * as elements potentially null pointers to * aws_priority_queue_nodes */ bool backpointer_list_item_size = queue->backpointers.item_size == sizeof(struct aws_priority_queue_node *); bool lists_equal_lengths = queue->backpointers.length == queue->container.length; bool backpointers_non_zero_current_size = queue->backpointers.current_size > 0; /* This check must be guarded, as it is not efficient, neither * when running tests nor CBMC */ #if (AWS_DEEP_CHECKS == 1) bool backpointers_valid_deep = aws_priority_queue_backpointers_valid_deep(queue); #else bool backpointers_valid_deep = true; #endif bool backpointers_zero = (queue->backpointers.current_size == 0 && queue->backpointers.length == 0 && queue->backpointers.data == NULL); bool backpointer_struct_is_valid = backpointers_zero || (backpointer_list_item_size && lists_equal_lengths && backpointers_non_zero_current_size && backpointers_valid_deep); return ((backpointer_list_is_valid && backpointer_struct_is_valid) || AWS_IS_ZEROED(queue->backpointers)); } bool aws_priority_queue_is_valid(const struct aws_priority_queue *const queue) { /* Pointer validity checks */ if (!queue) { return false; } bool pred_is_valid = (queue->pred != NULL); bool container_is_valid = aws_array_list_is_valid(&queue->container); bool backpointers_valid = aws_priority_queue_backpointers_valid(queue); return pred_is_valid && container_is_valid && backpointers_valid; } void aws_priority_queue_clean_up(struct aws_priority_queue *queue) { aws_array_list_clean_up(&queue->container); if (!AWS_IS_ZEROED(queue->backpointers)) { aws_array_list_clean_up(&queue->backpointers); } } int aws_priority_queue_push(struct aws_priority_queue *queue, void *item) { AWS_PRECONDITION(aws_priority_queue_is_valid(queue)); AWS_PRECONDITION(item && AWS_MEM_IS_READABLE(item, queue->container.item_size)); int rval = aws_priority_queue_push_ref(queue, item, NULL); AWS_POSTCONDITION(aws_priority_queue_is_valid(queue)); return rval; } int aws_priority_queue_push_ref( struct aws_priority_queue *queue, void *item, struct aws_priority_queue_node *backpointer) { AWS_PRECONDITION(aws_priority_queue_is_valid(queue)); AWS_PRECONDITION(item && AWS_MEM_IS_READABLE(item, queue->container.item_size)); int err = aws_array_list_push_back(&queue->container, item); if (err) { AWS_POSTCONDITION(aws_priority_queue_is_valid(queue)); return err; } size_t index = aws_array_list_length(&queue->container) - 1; if (backpointer && !queue->backpointers.alloc) { if (!queue->container.alloc) { aws_raise_error(AWS_ERROR_UNSUPPORTED_OPERATION); goto backpointer_update_failed; } if (aws_array_list_init_dynamic( &queue->backpointers, queue->container.alloc, index + 1, sizeof(struct aws_priority_queue_node *))) { goto backpointer_update_failed; } /* When we initialize the backpointers array we need to zero out all existing entries */ memset(queue->backpointers.data, 0, queue->backpointers.current_size); } /* * Once we have any backpointers, we want to make sure we always have room in the backpointers array * for all elements; otherwise, sift_down gets complicated if it runs out of memory when sifting an * element with a backpointer down in the array. */ if (!AWS_IS_ZEROED(queue->backpointers)) { if (aws_array_list_set_at(&queue->backpointers, &backpointer, index)) { goto backpointer_update_failed; } } if (backpointer) { backpointer->current_index = index; } s_sift_up(queue, aws_array_list_length(&queue->container) - 1); AWS_POSTCONDITION(aws_priority_queue_is_valid(queue)); return AWS_OP_SUCCESS; backpointer_update_failed: /* Failed to initialize or grow the backpointer array, back out the node addition */ aws_array_list_pop_back(&queue->container); AWS_POSTCONDITION(aws_priority_queue_is_valid(queue)); return AWS_OP_ERR; } static int s_remove_node(struct aws_priority_queue *queue, void *item, size_t item_index) { AWS_PRECONDITION(aws_priority_queue_is_valid(queue)); AWS_PRECONDITION(item && AWS_MEM_IS_WRITABLE(item, queue->container.item_size)); if (aws_array_list_get_at(&queue->container, item, item_index)) { /* shouldn't happen, but if it does we've already raised an error... */ AWS_POSTCONDITION(aws_priority_queue_is_valid(queue)); return AWS_OP_ERR; } size_t swap_with = aws_array_list_length(&queue->container) - 1; struct aws_priority_queue_node *backpointer = NULL; if (item_index != swap_with) { s_swap(queue, item_index, swap_with); } aws_array_list_pop_back(&queue->container); if (!AWS_IS_ZEROED(queue->backpointers)) { aws_array_list_get_at(&queue->backpointers, &backpointer, swap_with); if (backpointer) { backpointer->current_index = SIZE_MAX; } aws_array_list_pop_back(&queue->backpointers); } if (item_index != swap_with) { s_sift_either(queue, item_index); } AWS_POSTCONDITION(aws_priority_queue_is_valid(queue)); return AWS_OP_SUCCESS; } int aws_priority_queue_remove( struct aws_priority_queue *queue, void *item, const struct aws_priority_queue_node *node) { AWS_PRECONDITION(aws_priority_queue_is_valid(queue)); AWS_PRECONDITION(item && AWS_MEM_IS_WRITABLE(item, queue->container.item_size)); AWS_PRECONDITION(node && AWS_MEM_IS_READABLE(node, sizeof(struct aws_priority_queue_node))); AWS_ERROR_PRECONDITION( node->current_index < aws_array_list_length(&queue->container), AWS_ERROR_PRIORITY_QUEUE_BAD_NODE); AWS_ERROR_PRECONDITION(queue->backpointers.data, AWS_ERROR_PRIORITY_QUEUE_BAD_NODE); int rval = s_remove_node(queue, item, node->current_index); AWS_POSTCONDITION(aws_priority_queue_is_valid(queue)); return rval; } int aws_priority_queue_pop(struct aws_priority_queue *queue, void *item) { AWS_PRECONDITION(aws_priority_queue_is_valid(queue)); AWS_PRECONDITION(item && AWS_MEM_IS_WRITABLE(item, queue->container.item_size)); AWS_ERROR_PRECONDITION(aws_array_list_length(&queue->container) != 0, AWS_ERROR_PRIORITY_QUEUE_EMPTY); int rval = s_remove_node(queue, item, 0); AWS_POSTCONDITION(aws_priority_queue_is_valid(queue)); return rval; } int aws_priority_queue_top(const struct aws_priority_queue *queue, void **item) { AWS_ERROR_PRECONDITION(aws_array_list_length(&queue->container) != 0, AWS_ERROR_PRIORITY_QUEUE_EMPTY); return aws_array_list_get_at_ptr(&queue->container, item, 0); } size_t aws_priority_queue_size(const struct aws_priority_queue *queue) { return aws_array_list_length(&queue->container); } size_t aws_priority_queue_capacity(const struct aws_priority_queue *queue) { return aws_array_list_capacity(&queue->container); } void aws_priority_queue_clear(struct aws_priority_queue *queue) { AWS_PRECONDITION(aws_priority_queue_is_valid(queue)); size_t backpointer_count = aws_array_list_length(&queue->backpointers); for (size_t i = 0; i < backpointer_count; ++i) { struct aws_priority_queue_node *node = NULL; aws_array_list_get_at(&queue->backpointers, &node, i); if (node != NULL) { node->current_index = SIZE_MAX; } } aws_array_list_clear(&queue->backpointers); aws_array_list_clear(&queue->container); AWS_PRECONDITION(aws_priority_queue_is_valid(queue)); } void aws_priority_queue_node_init(struct aws_priority_queue_node *node) { node->current_index = SIZE_MAX; } bool aws_priority_queue_node_is_in_queue(const struct aws_priority_queue_node *node) { return node->current_index != SIZE_MAX; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/process_common.c000066400000000000000000000053561456575232400252230ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include enum { MAX_BUFFER_SIZE = 2048 }; int aws_run_command_result_init(struct aws_allocator *allocator, struct aws_run_command_result *result) { if (!allocator || !result) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } AWS_ZERO_STRUCT(*result); return AWS_OP_SUCCESS; } void aws_run_command_result_cleanup(struct aws_run_command_result *result) { if (!result) { return; } aws_string_destroy_secure(result->std_out); aws_string_destroy_secure(result->std_err); } #if defined(AWS_OS_WINDOWS) && !defined(AWS_OS_WINDOWS_DESKTOP) int aws_run_command( struct aws_allocator *allocator, struct aws_run_command_options *options, struct aws_run_command_result *result) { (void)allocator; (void)options; (void)result; return aws_raise_error(AWS_ERROR_UNSUPPORTED_OPERATION); } #else int aws_run_command( struct aws_allocator *allocator, struct aws_run_command_options *options, struct aws_run_command_result *result) { AWS_FATAL_ASSERT(allocator); AWS_FATAL_ASSERT(options); AWS_FATAL_ASSERT(result); FILE *output_stream; char output_buffer[MAX_BUFFER_SIZE]; struct aws_byte_buf result_buffer; int ret = AWS_OP_ERR; if (aws_byte_buf_init(&result_buffer, allocator, MAX_BUFFER_SIZE)) { goto on_finish; } # if defined(AWS_OS_WINDOWS) output_stream = _popen(options->command, "r"); # else output_stream = popen(options->command, "r"); # endif if (output_stream) { while (!feof(output_stream)) { if (fgets(output_buffer, MAX_BUFFER_SIZE, output_stream) != NULL) { struct aws_byte_cursor cursor = aws_byte_cursor_from_c_str(output_buffer); if (aws_byte_buf_append_dynamic(&result_buffer, &cursor)) { goto on_finish; } } } # if defined(AWS_OS_WINDOWS) result->ret_code = _pclose(output_stream); # else result->ret_code = pclose(output_stream); # endif } struct aws_byte_cursor trim_cursor = aws_byte_cursor_from_buf(&result_buffer); struct aws_byte_cursor trimmed_cursor = aws_byte_cursor_trim_pred(&trim_cursor, aws_char_is_space); if (trimmed_cursor.len) { result->std_out = aws_string_new_from_array(allocator, trimmed_cursor.ptr, trimmed_cursor.len); if (!result->std_out) { goto on_finish; } } ret = AWS_OP_SUCCESS; on_finish: aws_byte_buf_clean_up_secure(&result_buffer); return ret; } #endif /* !AWS_OS_WINDOWS */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/promise.c000066400000000000000000000077441456575232400236560ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include struct aws_promise { struct aws_allocator *allocator; struct aws_mutex mutex; struct aws_condition_variable cv; struct aws_ref_count rc; bool complete; int error_code; void *value; /* destructor for value, will be invoked if the value is not taken */ void (*dtor)(void *); }; static void s_aws_promise_dtor(void *ptr) { struct aws_promise *promise = ptr; aws_condition_variable_clean_up(&promise->cv); aws_mutex_clean_up(&promise->mutex); if (promise->value && promise->dtor) { promise->dtor(promise->value); } aws_mem_release(promise->allocator, promise); } struct aws_promise *aws_promise_new(struct aws_allocator *allocator) { struct aws_promise *promise = aws_mem_calloc(allocator, 1, sizeof(struct aws_promise)); promise->allocator = allocator; aws_ref_count_init(&promise->rc, promise, s_aws_promise_dtor); aws_mutex_init(&promise->mutex); aws_condition_variable_init(&promise->cv); return promise; } struct aws_promise *aws_promise_acquire(struct aws_promise *promise) { aws_ref_count_acquire(&promise->rc); return promise; } void aws_promise_release(struct aws_promise *promise) { aws_ref_count_release(&promise->rc); } static bool s_promise_completed(void *user_data) { struct aws_promise *promise = user_data; return promise->complete; } void aws_promise_wait(struct aws_promise *promise) { aws_mutex_lock(&promise->mutex); aws_condition_variable_wait_pred(&promise->cv, &promise->mutex, s_promise_completed, promise); aws_mutex_unlock(&promise->mutex); } bool aws_promise_wait_for(struct aws_promise *promise, size_t nanoseconds) { aws_mutex_lock(&promise->mutex); aws_condition_variable_wait_for_pred( &promise->cv, &promise->mutex, (int64_t)nanoseconds, s_promise_completed, promise); const bool complete = promise->complete; aws_mutex_unlock(&promise->mutex); return complete; } bool aws_promise_is_complete(struct aws_promise *promise) { aws_mutex_lock(&promise->mutex); const bool complete = promise->complete; aws_mutex_unlock(&promise->mutex); return complete; } void aws_promise_complete(struct aws_promise *promise, void *value, void (*dtor)(void *)) { aws_mutex_lock(&promise->mutex); AWS_FATAL_ASSERT(!promise->complete && "aws_promise_complete: cannot complete a promise more than once"); promise->value = value; promise->dtor = dtor; promise->complete = true; /* Notify before unlocking to prevent a race condition where the recipient spuriously * awakens after the unlock, sees a fulfilled promise, and attempts to free its resources * before the notification has actually occured. */ aws_condition_variable_notify_all(&promise->cv); aws_mutex_unlock(&promise->mutex); } void aws_promise_fail(struct aws_promise *promise, int error_code) { AWS_FATAL_ASSERT(error_code != 0 && "aws_promise_fail: cannot fail a promise with a 0 error_code"); aws_mutex_lock(&promise->mutex); AWS_FATAL_ASSERT(!promise->complete && "aws_promise_fail: cannot complete a promise more than once"); promise->error_code = error_code; promise->complete = true; aws_condition_variable_notify_all(&promise->cv); aws_mutex_unlock(&promise->mutex); } int aws_promise_error_code(struct aws_promise *promise) { AWS_FATAL_ASSERT(aws_promise_is_complete(promise)); return promise->error_code; } void *aws_promise_value(struct aws_promise *promise) { AWS_FATAL_ASSERT(aws_promise_is_complete(promise)); return promise->value; } void *aws_promise_take_value(struct aws_promise *promise) { AWS_FATAL_ASSERT(aws_promise_is_complete(promise)); void *value = promise->value; promise->value = NULL; promise->dtor = NULL; return value; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/ref_count.c000066400000000000000000000021001456575232400241410ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include void aws_ref_count_init(struct aws_ref_count *ref_count, void *object, aws_simple_completion_callback *on_zero_fn) { aws_atomic_init_int(&ref_count->ref_count, 1); ref_count->object = object; ref_count->on_zero_fn = on_zero_fn; } void *aws_ref_count_acquire(struct aws_ref_count *ref_count) { size_t old_value = aws_atomic_fetch_add(&ref_count->ref_count, 1); AWS_ASSERT(old_value > 0 && "refcount has been zero, it's invalid to use it again."); (void)old_value; return ref_count->object; } size_t aws_ref_count_release(struct aws_ref_count *ref_count) { size_t old_value = aws_atomic_fetch_sub(&ref_count->ref_count, 1); AWS_ASSERT(old_value > 0 && "refcount has gone negative"); if (old_value == 1) { ref_count->on_zero_fn(ref_count->object); } return old_value - 1; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/ring_buffer.c000066400000000000000000000277001456575232400244620ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #ifdef CBMC # define AWS_ATOMIC_LOAD_PTR(ring_buf, dest_ptr, atomic_ptr, memory_order) \ dest_ptr = aws_atomic_load_ptr_explicit(atomic_ptr, memory_order); \ assert(__CPROVER_same_object(dest_ptr, ring_buf->allocation)); \ assert(aws_ring_buffer_check_atomic_ptr(ring_buf, dest_ptr)); # define AWS_ATOMIC_STORE_PTR(ring_buf, atomic_ptr, src_ptr, memory_order) \ assert(aws_ring_buffer_check_atomic_ptr(ring_buf, src_ptr)); \ aws_atomic_store_ptr_explicit(atomic_ptr, src_ptr, memory_order); #else # define AWS_ATOMIC_LOAD_PTR(ring_buf, dest_ptr, atomic_ptr, memory_order) \ dest_ptr = aws_atomic_load_ptr_explicit(atomic_ptr, memory_order); # define AWS_ATOMIC_STORE_PTR(ring_buf, atomic_ptr, src_ptr, memory_order) \ aws_atomic_store_ptr_explicit(atomic_ptr, src_ptr, memory_order); #endif #define AWS_ATOMIC_LOAD_TAIL_PTR(ring_buf, dest_ptr) \ AWS_ATOMIC_LOAD_PTR(ring_buf, dest_ptr, &(ring_buf)->tail, aws_memory_order_acquire); #define AWS_ATOMIC_STORE_TAIL_PTR(ring_buf, src_ptr) \ AWS_ATOMIC_STORE_PTR(ring_buf, &(ring_buf)->tail, src_ptr, aws_memory_order_release); #define AWS_ATOMIC_LOAD_HEAD_PTR(ring_buf, dest_ptr) \ AWS_ATOMIC_LOAD_PTR(ring_buf, dest_ptr, &(ring_buf)->head, aws_memory_order_relaxed); #define AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, src_ptr) \ AWS_ATOMIC_STORE_PTR(ring_buf, &(ring_buf)->head, src_ptr, aws_memory_order_relaxed); int aws_ring_buffer_init(struct aws_ring_buffer *ring_buf, struct aws_allocator *allocator, size_t size) { AWS_PRECONDITION(ring_buf != NULL); AWS_PRECONDITION(allocator != NULL); AWS_PRECONDITION(size > 0); AWS_ZERO_STRUCT(*ring_buf); ring_buf->allocation = aws_mem_acquire(allocator, size); if (!ring_buf->allocation) { return AWS_OP_ERR; } ring_buf->allocator = allocator; aws_atomic_init_ptr(&ring_buf->head, ring_buf->allocation); aws_atomic_init_ptr(&ring_buf->tail, ring_buf->allocation); ring_buf->allocation_end = ring_buf->allocation + size; AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf)); return AWS_OP_SUCCESS; } void aws_ring_buffer_clean_up(struct aws_ring_buffer *ring_buf) { AWS_PRECONDITION(aws_ring_buffer_is_valid(ring_buf)); if (ring_buf->allocation) { aws_mem_release(ring_buf->allocator, ring_buf->allocation); } AWS_ZERO_STRUCT(*ring_buf); } int aws_ring_buffer_acquire(struct aws_ring_buffer *ring_buf, size_t requested_size, struct aws_byte_buf *dest) { AWS_PRECONDITION(aws_ring_buffer_is_valid(ring_buf)); AWS_PRECONDITION(aws_byte_buf_is_valid(dest)); AWS_ERROR_PRECONDITION(requested_size != 0); uint8_t *tail_cpy; uint8_t *head_cpy; AWS_ATOMIC_LOAD_TAIL_PTR(ring_buf, tail_cpy); AWS_ATOMIC_LOAD_HEAD_PTR(ring_buf, head_cpy); /* this branch is, we don't have any vended buffers. */ if (head_cpy == tail_cpy) { size_t ring_space = ring_buf->allocation_end == NULL ? 0 : ring_buf->allocation_end - ring_buf->allocation; if (requested_size > ring_space) { AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf)); AWS_POSTCONDITION(aws_byte_buf_is_valid(dest)); return aws_raise_error(AWS_ERROR_OOM); } AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, ring_buf->allocation + requested_size); AWS_ATOMIC_STORE_TAIL_PTR(ring_buf, ring_buf->allocation); *dest = aws_byte_buf_from_empty_array(ring_buf->allocation, requested_size); AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf)); AWS_POSTCONDITION(aws_byte_buf_is_valid(dest)); return AWS_OP_SUCCESS; } /* you'll constantly bounce between the next two branches as the ring buffer is traversed. */ /* after N + 1 wraps */ if (tail_cpy > head_cpy) { size_t space = tail_cpy - head_cpy - 1; if (space >= requested_size) { AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, head_cpy + requested_size); *dest = aws_byte_buf_from_empty_array(head_cpy, requested_size); AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf)); AWS_POSTCONDITION(aws_byte_buf_is_valid(dest)); return AWS_OP_SUCCESS; } /* After N wraps */ } else if (tail_cpy < head_cpy) { /* prefer the head space for efficiency. */ if ((size_t)(ring_buf->allocation_end - head_cpy) >= requested_size) { AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, head_cpy + requested_size); *dest = aws_byte_buf_from_empty_array(head_cpy, requested_size); AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf)); AWS_POSTCONDITION(aws_byte_buf_is_valid(dest)); return AWS_OP_SUCCESS; } if ((size_t)(tail_cpy - ring_buf->allocation) > requested_size) { AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, ring_buf->allocation + requested_size); *dest = aws_byte_buf_from_empty_array(ring_buf->allocation, requested_size); AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf)); AWS_POSTCONDITION(aws_byte_buf_is_valid(dest)); return AWS_OP_SUCCESS; } } AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf)); AWS_POSTCONDITION(aws_byte_buf_is_valid(dest)); return aws_raise_error(AWS_ERROR_OOM); } int aws_ring_buffer_acquire_up_to( struct aws_ring_buffer *ring_buf, size_t minimum_size, size_t requested_size, struct aws_byte_buf *dest) { AWS_PRECONDITION(requested_size >= minimum_size); AWS_PRECONDITION(aws_ring_buffer_is_valid(ring_buf)); AWS_PRECONDITION(aws_byte_buf_is_valid(dest)); if (requested_size == 0 || minimum_size == 0 || !ring_buf || !dest) { AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf)); AWS_POSTCONDITION(aws_byte_buf_is_valid(dest)); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } uint8_t *tail_cpy; uint8_t *head_cpy; AWS_ATOMIC_LOAD_TAIL_PTR(ring_buf, tail_cpy); AWS_ATOMIC_LOAD_HEAD_PTR(ring_buf, head_cpy); /* this branch is, we don't have any vended buffers. */ if (head_cpy == tail_cpy) { size_t ring_space = ring_buf->allocation_end == NULL ? 0 : ring_buf->allocation_end - ring_buf->allocation; size_t allocation_size = ring_space > requested_size ? requested_size : ring_space; if (allocation_size < minimum_size) { AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf)); AWS_POSTCONDITION(aws_byte_buf_is_valid(dest)); return aws_raise_error(AWS_ERROR_OOM); } /* go as big as we can. */ /* we don't have any vended, so this should be safe. */ AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, ring_buf->allocation + allocation_size); AWS_ATOMIC_STORE_TAIL_PTR(ring_buf, ring_buf->allocation); *dest = aws_byte_buf_from_empty_array(ring_buf->allocation, allocation_size); AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf)); AWS_POSTCONDITION(aws_byte_buf_is_valid(dest)); return AWS_OP_SUCCESS; } /* you'll constantly bounce between the next two branches as the ring buffer is traversed. */ /* after N + 1 wraps */ if (tail_cpy > head_cpy) { size_t space = tail_cpy - head_cpy; /* this shouldn't be possible. */ AWS_ASSERT(space); space -= 1; size_t returnable_size = space > requested_size ? requested_size : space; if (returnable_size >= minimum_size) { AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, head_cpy + returnable_size); *dest = aws_byte_buf_from_empty_array(head_cpy, returnable_size); AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf)); AWS_POSTCONDITION(aws_byte_buf_is_valid(dest)); return AWS_OP_SUCCESS; } /* after N wraps */ } else if (tail_cpy < head_cpy) { size_t head_space = ring_buf->allocation_end - head_cpy; size_t tail_space = tail_cpy - ring_buf->allocation; /* if you can vend the whole thing do it. Also prefer head space to tail space. */ if (head_space >= requested_size) { AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, head_cpy + requested_size); *dest = aws_byte_buf_from_empty_array(head_cpy, requested_size); AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf)); AWS_POSTCONDITION(aws_byte_buf_is_valid(dest)); return AWS_OP_SUCCESS; } if (tail_space > requested_size) { AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, ring_buf->allocation + requested_size); *dest = aws_byte_buf_from_empty_array(ring_buf->allocation, requested_size); AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf)); AWS_POSTCONDITION(aws_byte_buf_is_valid(dest)); return AWS_OP_SUCCESS; } /* now vend as much as possible, once again preferring head space. */ if (head_space >= minimum_size && head_space >= tail_space) { AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, head_cpy + head_space); *dest = aws_byte_buf_from_empty_array(head_cpy, head_space); AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf)); AWS_POSTCONDITION(aws_byte_buf_is_valid(dest)); return AWS_OP_SUCCESS; } if (tail_space > minimum_size) { AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, ring_buf->allocation + tail_space - 1); *dest = aws_byte_buf_from_empty_array(ring_buf->allocation, tail_space - 1); AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf)); AWS_POSTCONDITION(aws_byte_buf_is_valid(dest)); return AWS_OP_SUCCESS; } } AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf)); AWS_POSTCONDITION(aws_byte_buf_is_valid(dest)); return aws_raise_error(AWS_ERROR_OOM); } static inline bool s_buf_belongs_to_pool(const struct aws_ring_buffer *ring_buffer, const struct aws_byte_buf *buf) { #ifdef CBMC /* only continue if buf points-into ring_buffer because comparison of pointers to different objects is undefined * (C11 6.5.8) */ return ( __CPROVER_same_object(buf->buffer, ring_buffer->allocation) && AWS_IMPLIES( ring_buffer->allocation_end != NULL, __CPROVER_same_object(buf->buffer, ring_buffer->allocation_end - 1))); #endif return buf->buffer && ring_buffer->allocation && ring_buffer->allocation_end && buf->buffer >= ring_buffer->allocation && buf->buffer + buf->capacity <= ring_buffer->allocation_end; } void aws_ring_buffer_release(struct aws_ring_buffer *ring_buffer, struct aws_byte_buf *buf) { AWS_PRECONDITION(aws_ring_buffer_is_valid(ring_buffer)); AWS_PRECONDITION(aws_byte_buf_is_valid(buf)); AWS_PRECONDITION(s_buf_belongs_to_pool(ring_buffer, buf)); AWS_ATOMIC_STORE_TAIL_PTR(ring_buffer, buf->buffer + buf->capacity); AWS_ZERO_STRUCT(*buf); AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buffer)); } bool aws_ring_buffer_buf_belongs_to_pool(const struct aws_ring_buffer *ring_buffer, const struct aws_byte_buf *buf) { AWS_PRECONDITION(aws_ring_buffer_is_valid(ring_buffer)); AWS_PRECONDITION(aws_byte_buf_is_valid(buf)); bool rval = s_buf_belongs_to_pool(ring_buffer, buf); AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buffer)); AWS_POSTCONDITION(aws_byte_buf_is_valid(buf)); return rval; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/statistics.c000066400000000000000000000014301456575232400243540ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include void aws_crt_statistics_handler_process_statistics( struct aws_crt_statistics_handler *handler, struct aws_crt_statistics_sample_interval *interval, struct aws_array_list *stats, void *context) { handler->vtable->process_statistics(handler, interval, stats, context); } uint64_t aws_crt_statistics_handler_get_report_interval_ms(struct aws_crt_statistics_handler *handler) { return handler->vtable->get_report_interval_ms(handler); } void aws_crt_statistics_handler_destroy(struct aws_crt_statistics_handler *handler) { if (handler == NULL) { return; } handler->vtable->destroy(handler); } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/string.c000066400000000000000000000357711456575232400235070ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #ifdef _WIN32 # include struct aws_wstring *aws_string_convert_to_wstring( struct aws_allocator *allocator, const struct aws_string *to_convert) { AWS_PRECONDITION(to_convert); struct aws_byte_cursor convert_cur = aws_byte_cursor_from_string(to_convert); return aws_string_convert_to_wchar_from_byte_cursor(allocator, &convert_cur); } struct aws_wstring *aws_string_convert_to_wchar_from_byte_cursor( struct aws_allocator *allocator, const struct aws_byte_cursor *to_convert) { AWS_PRECONDITION(to_convert); /* if a length is passed for the to_convert string, converted size does not include the null terminator, * which is a good thing. */ int converted_size = MultiByteToWideChar(CP_UTF8, 0, (const char *)to_convert->ptr, (int)to_convert->len, NULL, 0); if (!converted_size) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } size_t str_len_size = 0; size_t malloc_size = 0; /* double the size because the return value above is # of characters, not bytes size. */ if (aws_mul_size_checked(sizeof(wchar_t), converted_size, &str_len_size)) { return NULL; } /* UTF-16, the NULL terminator is two bytes. */ if (aws_add_size_checked(sizeof(struct aws_wstring) + 2, str_len_size, &malloc_size)) { return NULL; } struct aws_wstring *str = aws_mem_acquire(allocator, malloc_size); if (!str) { return NULL; } /* Fields are declared const, so we need to copy them in like this */ *(struct aws_allocator **)(&str->allocator) = allocator; *(size_t *)(&str->len) = (size_t)converted_size; int converted_res = MultiByteToWideChar( CP_UTF8, 0, (const char *)to_convert->ptr, (int)to_convert->len, (wchar_t *)str->bytes, converted_size); /* windows had its chance to do its thing, no take backsies. */ AWS_FATAL_ASSERT(converted_res > 0); *(wchar_t *)&str->bytes[converted_size] = 0; return str; } struct aws_wstring *aws_wstring_new_from_cursor( struct aws_allocator *allocator, const struct aws_byte_cursor *w_str_cur) { AWS_PRECONDITION(allocator && aws_byte_cursor_is_valid(w_str_cur)); return aws_wstring_new_from_array(allocator, (wchar_t *)w_str_cur->ptr, w_str_cur->len / sizeof(wchar_t)); } struct aws_wstring *aws_wstring_new_from_array(struct aws_allocator *allocator, const wchar_t *w_str, size_t len) { AWS_PRECONDITION(allocator); AWS_PRECONDITION(AWS_MEM_IS_READABLE(w_str, len)); size_t str_byte_len = 0; size_t malloc_size = 0; /* double the size because the return value above is # of characters, not bytes size. */ if (aws_mul_size_checked(sizeof(wchar_t), len, &str_byte_len)) { return NULL; } /* UTF-16, the NULL terminator is two bytes. */ if (aws_add_size_checked(sizeof(struct aws_wstring) + 2, str_byte_len, &malloc_size)) { return NULL; } struct aws_wstring *str = aws_mem_acquire(allocator, malloc_size); /* Fields are declared const, so we need to copy them in like this */ *(struct aws_allocator **)(&str->allocator) = allocator; *(size_t *)(&str->len) = len; if (len > 0) { memcpy((void *)str->bytes, w_str, str_byte_len); } /* in case this is a utf-16 string in the array, allow that here. */ *(wchar_t *)&str->bytes[len] = 0; AWS_RETURN_WITH_POSTCONDITION(str, aws_wstring_is_valid(str)); } bool aws_wstring_is_valid(const struct aws_wstring *str) { return str && AWS_MEM_IS_READABLE(&str->bytes[0], str->len + 1) && str->bytes[str->len] == 0; } void aws_wstring_destroy(struct aws_wstring *str) { AWS_PRECONDITION(!str || aws_wstring_is_valid(str)); if (str && str->allocator) { aws_mem_release(str->allocator, str); } } static struct aws_string *s_convert_from_wchar( struct aws_allocator *allocator, const wchar_t *to_convert, int len_chars) { AWS_FATAL_PRECONDITION(to_convert); int bytes_size = WideCharToMultiByte(CP_UTF8, 0, to_convert, len_chars, NULL, 0, NULL, NULL); if (!bytes_size) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } size_t malloc_size = 0; /* bytes_size already contains the space for the null terminator */ if (aws_add_size_checked(sizeof(struct aws_string), bytes_size, &malloc_size)) { return NULL; } struct aws_string *str = aws_mem_acquire(allocator, malloc_size); if (!str) { return NULL; } /* Fields are declared const, so we need to copy them in like this */ *(struct aws_allocator **)(&str->allocator) = allocator; *(size_t *)(&str->len) = (size_t)bytes_size - 1; int converted_res = WideCharToMultiByte(CP_UTF8, 0, to_convert, len_chars, (char *)str->bytes, bytes_size, NULL, NULL); /* windows had its chance to do its thing, no take backsies. */ AWS_FATAL_ASSERT(converted_res > 0); *(uint8_t *)&str->bytes[str->len] = 0; return str; } struct aws_string *aws_string_convert_from_wchar_str( struct aws_allocator *allocator, const struct aws_wstring *to_convert) { AWS_FATAL_PRECONDITION(to_convert); return s_convert_from_wchar(allocator, aws_wstring_c_str(to_convert), (int)aws_wstring_num_chars(to_convert)); } struct aws_string *aws_string_convert_from_wchar_c_str(struct aws_allocator *allocator, const wchar_t *to_convert) { return s_convert_from_wchar(allocator, to_convert, -1); } const wchar_t *aws_wstring_c_str(const struct aws_wstring *str) { AWS_PRECONDITION(str); return str->bytes; } size_t aws_wstring_num_chars(const struct aws_wstring *str) { AWS_PRECONDITION(str); if (str->len == 0) { return 0; } return str->len; } size_t aws_wstring_size_bytes(const struct aws_wstring *str) { AWS_PRECONDITION(str); return aws_wstring_num_chars(str) * sizeof(wchar_t); } #endif /* _WIN32 */ struct aws_string *aws_string_new_from_c_str(struct aws_allocator *allocator, const char *c_str) { AWS_PRECONDITION(allocator && c_str); return aws_string_new_from_array(allocator, (const uint8_t *)c_str, strlen(c_str)); } struct aws_string *aws_string_new_from_array(struct aws_allocator *allocator, const uint8_t *bytes, size_t len) { AWS_PRECONDITION(allocator); AWS_PRECONDITION(AWS_MEM_IS_READABLE(bytes, len)); size_t malloc_size; if (aws_add_size_checked(sizeof(struct aws_string) + 1, len, &malloc_size)) { return NULL; } struct aws_string *str = aws_mem_acquire(allocator, malloc_size); if (!str) { return NULL; } /* Fields are declared const, so we need to copy them in like this */ *(struct aws_allocator **)(&str->allocator) = allocator; *(size_t *)(&str->len) = len; if (len > 0) { memcpy((void *)str->bytes, bytes, len); } *(uint8_t *)&str->bytes[len] = 0; AWS_RETURN_WITH_POSTCONDITION(str, aws_string_is_valid(str)); } struct aws_string *aws_string_new_from_string(struct aws_allocator *allocator, const struct aws_string *str) { AWS_PRECONDITION(allocator && aws_string_is_valid(str)); return aws_string_new_from_array(allocator, str->bytes, str->len); } struct aws_string *aws_string_new_from_cursor(struct aws_allocator *allocator, const struct aws_byte_cursor *cursor) { AWS_PRECONDITION(allocator && aws_byte_cursor_is_valid(cursor)); return aws_string_new_from_array(allocator, cursor->ptr, cursor->len); } struct aws_string *aws_string_new_from_buf(struct aws_allocator *allocator, const struct aws_byte_buf *buf) { AWS_PRECONDITION(allocator && aws_byte_buf_is_valid(buf)); return aws_string_new_from_array(allocator, buf->buffer, buf->len); } void aws_string_destroy(struct aws_string *str) { AWS_PRECONDITION(!str || aws_string_is_valid(str)); if (str && str->allocator) { aws_mem_release(str->allocator, str); } } void aws_string_destroy_secure(struct aws_string *str) { AWS_PRECONDITION(!str || aws_string_is_valid(str)); if (str) { aws_secure_zero((void *)aws_string_bytes(str), str->len); if (str->allocator) { aws_mem_release(str->allocator, str); } } } int aws_string_compare(const struct aws_string *a, const struct aws_string *b) { AWS_PRECONDITION(!a || aws_string_is_valid(a)); AWS_PRECONDITION(!b || aws_string_is_valid(b)); if (a == b) { return 0; /* strings identical */ } if (a == NULL) { return -1; } if (b == NULL) { return 1; } size_t len_a = a->len; size_t len_b = b->len; size_t min_len = len_a < len_b ? len_a : len_b; int ret = memcmp(aws_string_bytes(a), aws_string_bytes(b), min_len); AWS_POSTCONDITION(aws_string_is_valid(a)); AWS_POSTCONDITION(aws_string_is_valid(b)); if (ret) { return ret; /* overlapping characters differ */ } if (len_a == len_b) { return 0; /* strings identical */ } if (len_a > len_b) { return 1; /* string b is first n characters of string a */ } return -1; /* string a is first n characters of string b */ } int aws_array_list_comparator_string(const void *a, const void *b) { if (a == b) { return 0; /* strings identical */ } if (a == NULL) { return -1; } if (b == NULL) { return 1; } const struct aws_string *str_a = *(const struct aws_string **)a; const struct aws_string *str_b = *(const struct aws_string **)b; return aws_string_compare(str_a, str_b); } /** * Returns true if bytes of string are the same, false otherwise. */ bool aws_string_eq(const struct aws_string *a, const struct aws_string *b) { AWS_PRECONDITION(!a || aws_string_is_valid(a)); AWS_PRECONDITION(!b || aws_string_is_valid(b)); if (a == b) { return true; } if (a == NULL || b == NULL) { return false; } return aws_array_eq(a->bytes, a->len, b->bytes, b->len); } /** * Returns true if bytes of string are equivalent, using a case-insensitive comparison. */ bool aws_string_eq_ignore_case(const struct aws_string *a, const struct aws_string *b) { AWS_PRECONDITION(!a || aws_string_is_valid(a)); AWS_PRECONDITION(!b || aws_string_is_valid(b)); if (a == b) { return true; } if (a == NULL || b == NULL) { return false; } return aws_array_eq_ignore_case(a->bytes, a->len, b->bytes, b->len); } /** * Returns true if bytes of string and cursor are the same, false otherwise. */ bool aws_string_eq_byte_cursor(const struct aws_string *str, const struct aws_byte_cursor *cur) { AWS_PRECONDITION(!str || aws_string_is_valid(str)); AWS_PRECONDITION(!cur || aws_byte_cursor_is_valid(cur)); if (str == NULL && cur == NULL) { return true; } if (str == NULL || cur == NULL) { return false; } return aws_array_eq(str->bytes, str->len, cur->ptr, cur->len); } /** * Returns true if bytes of string and cursor are equivalent, using a case-insensitive comparison. */ bool aws_string_eq_byte_cursor_ignore_case(const struct aws_string *str, const struct aws_byte_cursor *cur) { AWS_PRECONDITION(!str || aws_string_is_valid(str)); AWS_PRECONDITION(!cur || aws_byte_cursor_is_valid(cur)); if (str == NULL && cur == NULL) { return true; } if (str == NULL || cur == NULL) { return false; } return aws_array_eq_ignore_case(str->bytes, str->len, cur->ptr, cur->len); } /** * Returns true if bytes of string and buffer are the same, false otherwise. */ bool aws_string_eq_byte_buf(const struct aws_string *str, const struct aws_byte_buf *buf) { AWS_PRECONDITION(!str || aws_string_is_valid(str)); AWS_PRECONDITION(!buf || aws_byte_buf_is_valid(buf)); if (str == NULL && buf == NULL) { return true; } if (str == NULL || buf == NULL) { return false; } return aws_array_eq(str->bytes, str->len, buf->buffer, buf->len); } /** * Returns true if bytes of string and buffer are equivalent, using a case-insensitive comparison. */ bool aws_string_eq_byte_buf_ignore_case(const struct aws_string *str, const struct aws_byte_buf *buf) { AWS_PRECONDITION(!str || aws_string_is_valid(str)); AWS_PRECONDITION(!buf || aws_byte_buf_is_valid(buf)); if (str == NULL && buf == NULL) { return true; } if (str == NULL || buf == NULL) { return false; } return aws_array_eq_ignore_case(str->bytes, str->len, buf->buffer, buf->len); } bool aws_string_eq_c_str(const struct aws_string *str, const char *c_str) { AWS_PRECONDITION(!str || aws_string_is_valid(str)); if (str == NULL && c_str == NULL) { return true; } if (str == NULL || c_str == NULL) { return false; } return aws_array_eq_c_str(str->bytes, str->len, c_str); } /** * Returns true if bytes of strings are equivalent, using a case-insensitive comparison. */ bool aws_string_eq_c_str_ignore_case(const struct aws_string *str, const char *c_str) { AWS_PRECONDITION(!str || aws_string_is_valid(str)); if (str == NULL && c_str == NULL) { return true; } if (str == NULL || c_str == NULL) { return false; } return aws_array_eq_c_str_ignore_case(str->bytes, str->len, c_str); } bool aws_byte_buf_write_from_whole_string( struct aws_byte_buf *AWS_RESTRICT buf, const struct aws_string *AWS_RESTRICT src) { AWS_PRECONDITION(!buf || aws_byte_buf_is_valid(buf)); AWS_PRECONDITION(!src || aws_string_is_valid(src)); if (buf == NULL || src == NULL) { return false; } return aws_byte_buf_write(buf, aws_string_bytes(src), src->len); } /** * Creates an aws_byte_cursor from an existing string. */ struct aws_byte_cursor aws_byte_cursor_from_string(const struct aws_string *src) { AWS_PRECONDITION(aws_string_is_valid(src)); return aws_byte_cursor_from_array(aws_string_bytes(src), src->len); } struct aws_string *aws_string_clone_or_reuse(struct aws_allocator *allocator, const struct aws_string *str) { AWS_PRECONDITION(allocator); AWS_PRECONDITION(aws_string_is_valid(str)); if (str->allocator == NULL) { /* Since the string cannot be deallocated, we assume that it will remain valid for the lifetime of the * application */ AWS_POSTCONDITION(aws_string_is_valid(str)); return (struct aws_string *)str; } AWS_POSTCONDITION(aws_string_is_valid(str)); return aws_string_new_from_string(allocator, str); } int aws_secure_strlen(const char *str, size_t max_read_len, size_t *str_len) { AWS_ERROR_PRECONDITION(str && str_len, AWS_ERROR_INVALID_ARGUMENT); /* why not strnlen? It doesn't work everywhere as it wasn't standardized til C11, and is considered * a GNU extension. This should be faster anyways. This should work for ascii and utf8. * Any other character sets in use deserve what they get. */ char *null_char_ptr = memchr(str, '\0', max_read_len); if (null_char_ptr) { *str_len = null_char_ptr - str; return AWS_OP_SUCCESS; } return aws_raise_error(AWS_ERROR_C_STRING_BUFFER_NOT_NULL_TERMINATED); } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/system_info.c000066400000000000000000000053741456575232400245340ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void s_destroy_env(void *arg) { struct aws_system_environment *env = arg; if (env) { aws_system_environment_destroy_platform_impl(env); aws_mem_release(env->allocator, env); } } struct aws_system_environment *aws_system_environment_load(struct aws_allocator *allocator) { struct aws_system_environment *env = aws_mem_calloc(allocator, 1, sizeof(struct aws_system_environment)); env->allocator = allocator; aws_ref_count_init(&env->ref_count, env, s_destroy_env); if (aws_system_environment_load_platform_impl(env)) { AWS_LOGF_ERROR( AWS_LS_COMMON_GENERAL, "id=%p: failed to load system environment with error %s.", (void *)env, aws_error_debug_str(aws_last_error())); goto error; } AWS_LOGF_TRACE( AWS_LS_COMMON_GENERAL, "id=%p: virtualization vendor detected as \"" PRInSTR "\"", (void *)env, AWS_BYTE_CURSOR_PRI(aws_system_environment_get_virtualization_vendor(env))); AWS_LOGF_TRACE( AWS_LS_COMMON_GENERAL, "id=%p: virtualization product name detected as \"" PRInSTR " \"", (void *)env, AWS_BYTE_CURSOR_PRI(aws_system_environment_get_virtualization_vendor(env))); env->os = aws_get_platform_build_os(); env->cpu_count = aws_system_info_processor_count(); env->cpu_group_count = aws_get_cpu_group_count(); return env; error: s_destroy_env(env); return NULL; } struct aws_system_environment *aws_system_environment_acquire(struct aws_system_environment *env) { aws_ref_count_acquire(&env->ref_count); return env; } void aws_system_environment_release(struct aws_system_environment *env) { aws_ref_count_release(&env->ref_count); } struct aws_byte_cursor aws_system_environment_get_virtualization_vendor(const struct aws_system_environment *env) { struct aws_byte_cursor vendor_string = aws_byte_cursor_from_buf(&env->virtualization_vendor); return aws_byte_cursor_trim_pred(&vendor_string, aws_char_is_space); } struct aws_byte_cursor aws_system_environment_get_virtualization_product_name( const struct aws_system_environment *env) { struct aws_byte_cursor product_name_str = aws_byte_cursor_from_buf(&env->product_name); return aws_byte_cursor_trim_pred(&product_name_str, aws_char_is_space); } size_t aws_system_environment_get_processor_count(struct aws_system_environment *env) { return env->cpu_count; } AWS_COMMON_API size_t aws_system_environment_get_cpu_group_count(const struct aws_system_environment *env) { return env->cpu_group_count; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/task_scheduler.c000066400000000000000000000231351456575232400251700ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include static const size_t DEFAULT_QUEUE_SIZE = 7; void aws_task_init(struct aws_task *task, aws_task_fn *fn, void *arg, const char *type_tag) { AWS_ZERO_STRUCT(*task); task->fn = fn; task->arg = arg; task->type_tag = type_tag; } const char *aws_task_status_to_c_str(enum aws_task_status status) { switch (status) { case AWS_TASK_STATUS_RUN_READY: return ""; case AWS_TASK_STATUS_CANCELED: return ""; default: return ""; } } void aws_task_run(struct aws_task *task, enum aws_task_status status) { AWS_ASSERT(task->fn); AWS_LOGF_DEBUG( AWS_LS_COMMON_TASK_SCHEDULER, "id=%p: Running %s task with %s status", (void *)task, task->type_tag, aws_task_status_to_c_str(status)); task->abi_extension.scheduled = false; task->fn(task, task->arg, status); } static int s_compare_timestamps(const void *a, const void *b) { uint64_t a_time = (*(struct aws_task **)a)->timestamp; uint64_t b_time = (*(struct aws_task **)b)->timestamp; return a_time > b_time; /* min-heap */ } static void s_run_all(struct aws_task_scheduler *scheduler, uint64_t current_time, enum aws_task_status status); int aws_task_scheduler_init(struct aws_task_scheduler *scheduler, struct aws_allocator *alloc) { AWS_ASSERT(alloc); AWS_ZERO_STRUCT(*scheduler); if (aws_priority_queue_init_dynamic( &scheduler->timed_queue, alloc, DEFAULT_QUEUE_SIZE, sizeof(struct aws_task *), &s_compare_timestamps)) { return AWS_OP_ERR; }; scheduler->alloc = alloc; aws_linked_list_init(&scheduler->timed_list); aws_linked_list_init(&scheduler->asap_list); AWS_POSTCONDITION(aws_task_scheduler_is_valid(scheduler)); return AWS_OP_SUCCESS; } void aws_task_scheduler_clean_up(struct aws_task_scheduler *scheduler) { AWS_ASSERT(scheduler); if (aws_task_scheduler_is_valid(scheduler)) { /* Execute all remaining tasks as CANCELED. * Do this in a loop so that tasks scheduled by other tasks are executed */ while (aws_task_scheduler_has_tasks(scheduler, NULL)) { s_run_all(scheduler, UINT64_MAX, AWS_TASK_STATUS_CANCELED); } } aws_priority_queue_clean_up(&scheduler->timed_queue); AWS_ZERO_STRUCT(*scheduler); } bool aws_task_scheduler_is_valid(const struct aws_task_scheduler *scheduler) { return scheduler && scheduler->alloc && aws_priority_queue_is_valid(&scheduler->timed_queue) && aws_linked_list_is_valid(&scheduler->asap_list) && aws_linked_list_is_valid(&scheduler->timed_list); } bool aws_task_scheduler_has_tasks(const struct aws_task_scheduler *scheduler, uint64_t *next_task_time) { AWS_ASSERT(scheduler); uint64_t timestamp = UINT64_MAX; bool has_tasks = false; if (!aws_linked_list_empty(&scheduler->asap_list)) { timestamp = 0; has_tasks = true; } else { /* Check whether timed_list or timed_queue has the earlier task */ if (AWS_UNLIKELY(!aws_linked_list_empty(&scheduler->timed_list))) { struct aws_linked_list_node *node = aws_linked_list_front(&scheduler->timed_list); struct aws_task *task = AWS_CONTAINER_OF(node, struct aws_task, node); timestamp = task->timestamp; has_tasks = true; } struct aws_task **task_ptrptr = NULL; if (aws_priority_queue_top(&scheduler->timed_queue, (void **)&task_ptrptr) == AWS_OP_SUCCESS) { if ((*task_ptrptr)->timestamp < timestamp) { timestamp = (*task_ptrptr)->timestamp; } has_tasks = true; } } if (next_task_time) { *next_task_time = timestamp; } return has_tasks; } void aws_task_scheduler_schedule_now(struct aws_task_scheduler *scheduler, struct aws_task *task) { AWS_ASSERT(scheduler); AWS_ASSERT(task); AWS_ASSERT(task->fn); AWS_LOGF_DEBUG( AWS_LS_COMMON_TASK_SCHEDULER, "id=%p: Scheduling %s task for immediate execution", (void *)task, task->type_tag); aws_priority_queue_node_init(&task->priority_queue_node); aws_linked_list_node_reset(&task->node); task->timestamp = 0; aws_linked_list_push_back(&scheduler->asap_list, &task->node); task->abi_extension.scheduled = true; } void aws_task_scheduler_schedule_future( struct aws_task_scheduler *scheduler, struct aws_task *task, uint64_t time_to_run) { AWS_ASSERT(scheduler); AWS_ASSERT(task); AWS_ASSERT(task->fn); AWS_LOGF_DEBUG( AWS_LS_COMMON_TASK_SCHEDULER, "id=%p: Scheduling %s task for future execution at time %" PRIu64, (void *)task, task->type_tag, time_to_run); task->timestamp = time_to_run; aws_priority_queue_node_init(&task->priority_queue_node); aws_linked_list_node_reset(&task->node); int err = aws_priority_queue_push_ref(&scheduler->timed_queue, &task, &task->priority_queue_node); if (AWS_UNLIKELY(err)) { /* In the (very unlikely) case that we can't push into the timed_queue, * perform a sorted insertion into timed_list. */ struct aws_linked_list_node *node_i; for (node_i = aws_linked_list_begin(&scheduler->timed_list); node_i != aws_linked_list_end(&scheduler->timed_list); node_i = aws_linked_list_next(node_i)) { struct aws_task *task_i = AWS_CONTAINER_OF(node_i, struct aws_task, node); if (task_i->timestamp > time_to_run) { break; } } aws_linked_list_insert_before(node_i, &task->node); } task->abi_extension.scheduled = true; } void aws_task_scheduler_run_all(struct aws_task_scheduler *scheduler, uint64_t current_time) { AWS_ASSERT(scheduler); s_run_all(scheduler, current_time, AWS_TASK_STATUS_RUN_READY); } static void s_run_all(struct aws_task_scheduler *scheduler, uint64_t current_time, enum aws_task_status status) { /* Move scheduled tasks to running_list before executing. * This gives us the desired behavior that: if executing a task results in another task being scheduled, * that new task is not executed until the next time run() is invoked. */ struct aws_linked_list running_list; aws_linked_list_init(&running_list); /* First move everything from asap_list */ aws_linked_list_swap_contents(&running_list, &scheduler->asap_list); /* Next move tasks from timed_queue and timed_list, based on whichever's next-task is sooner. * It's very unlikely that any tasks are in timed_list, so once it has no more valid tasks, * break out of this complex loop in favor of a simpler one. */ while (AWS_UNLIKELY(!aws_linked_list_empty(&scheduler->timed_list))) { struct aws_linked_list_node *timed_list_node = aws_linked_list_begin(&scheduler->timed_list); struct aws_task *timed_list_task = AWS_CONTAINER_OF(timed_list_node, struct aws_task, node); if (timed_list_task->timestamp > current_time) { /* timed_list is out of valid tasks, break out of complex loop */ break; } /* Check if timed_queue has a task which is sooner */ struct aws_task **timed_queue_task_ptrptr = NULL; if (aws_priority_queue_top(&scheduler->timed_queue, (void **)&timed_queue_task_ptrptr) == AWS_OP_SUCCESS) { if ((*timed_queue_task_ptrptr)->timestamp <= current_time) { if ((*timed_queue_task_ptrptr)->timestamp < timed_list_task->timestamp) { /* Take task from timed_queue */ struct aws_task *timed_queue_task; aws_priority_queue_pop(&scheduler->timed_queue, &timed_queue_task); aws_linked_list_push_back(&running_list, &timed_queue_task->node); continue; } } } /* Take task from timed_list */ aws_linked_list_pop_front(&scheduler->timed_list); aws_linked_list_push_back(&running_list, &timed_list_task->node); } /* Simpler loop that moves remaining valid tasks from timed_queue */ struct aws_task **timed_queue_task_ptrptr = NULL; while (aws_priority_queue_top(&scheduler->timed_queue, (void **)&timed_queue_task_ptrptr) == AWS_OP_SUCCESS) { if ((*timed_queue_task_ptrptr)->timestamp > current_time) { break; } struct aws_task *next_timed_task; aws_priority_queue_pop(&scheduler->timed_queue, &next_timed_task); aws_linked_list_push_back(&running_list, &next_timed_task->node); } /* Run tasks */ while (!aws_linked_list_empty(&running_list)) { struct aws_linked_list_node *task_node = aws_linked_list_pop_front(&running_list); struct aws_task *task = AWS_CONTAINER_OF(task_node, struct aws_task, node); aws_task_run(task, status); } } void aws_task_scheduler_cancel_task(struct aws_task_scheduler *scheduler, struct aws_task *task) { /* attempt the linked lists first since those will be faster access and more likely to occur * anyways. */ if (task->node.next) { aws_linked_list_remove(&task->node); } else if (task->abi_extension.scheduled) { aws_priority_queue_remove(&scheduler->timed_queue, &task, &task->priority_queue_node); } /* * No need to log cancellation specially; it will get logged during the run call with the canceled status */ aws_task_run(task, AWS_TASK_STATUS_CANCELED); } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/thread_scheduler.c000066400000000000000000000221031456575232400254670ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include struct aws_thread_scheduler { struct aws_allocator *allocator; struct aws_ref_count ref_count; struct aws_thread thread; struct aws_task_scheduler scheduler; struct aws_atomic_var should_exit; struct { struct aws_linked_list scheduling_queue; struct aws_linked_list cancel_queue; struct aws_mutex mutex; struct aws_condition_variable c_var; } thread_data; }; struct cancellation_node { struct aws_task *task_to_cancel; struct aws_linked_list_node node; }; static void s_destroy_callback(void *arg) { struct aws_thread_scheduler *scheduler = arg; aws_atomic_store_int(&scheduler->should_exit, 1U); aws_condition_variable_notify_all(&scheduler->thread_data.c_var); aws_thread_join(&scheduler->thread); aws_task_scheduler_clean_up(&scheduler->scheduler); aws_condition_variable_clean_up(&scheduler->thread_data.c_var); aws_mutex_clean_up(&scheduler->thread_data.mutex); aws_thread_clean_up(&scheduler->thread); aws_mem_release(scheduler->allocator, scheduler); } static bool s_thread_should_wake(void *arg) { struct aws_thread_scheduler *scheduler = arg; uint64_t current_time = 0; aws_high_res_clock_get_ticks(¤t_time); uint64_t next_scheduled_task = 0; aws_task_scheduler_has_tasks(&scheduler->scheduler, &next_scheduled_task); return aws_atomic_load_int(&scheduler->should_exit) || !aws_linked_list_empty(&scheduler->thread_data.scheduling_queue) || !aws_linked_list_empty(&scheduler->thread_data.cancel_queue) || (next_scheduled_task <= current_time); } static void s_thread_fn(void *arg) { struct aws_thread_scheduler *scheduler = arg; while (!aws_atomic_load_int(&scheduler->should_exit)) { /* move tasks from the mutex protected list to the scheduler. This is because we don't want to hold the lock * for the scheduler during run_all and then try and acquire the lock from another thread to schedule something * because that potentially would block the calling thread. */ struct aws_linked_list list_cpy; aws_linked_list_init(&list_cpy); struct aws_linked_list cancel_list_cpy; aws_linked_list_init(&cancel_list_cpy); AWS_FATAL_ASSERT(!aws_mutex_lock(&scheduler->thread_data.mutex) && "mutex lock failed!"); aws_linked_list_swap_contents(&scheduler->thread_data.scheduling_queue, &list_cpy); aws_linked_list_swap_contents(&scheduler->thread_data.cancel_queue, &cancel_list_cpy); AWS_FATAL_ASSERT(!aws_mutex_unlock(&scheduler->thread_data.mutex) && "mutex unlock failed!"); while (!aws_linked_list_empty(&list_cpy)) { struct aws_linked_list_node *node = aws_linked_list_pop_front(&list_cpy); struct aws_task *task = AWS_CONTAINER_OF(node, struct aws_task, node); if (task->timestamp) { aws_task_scheduler_schedule_future(&scheduler->scheduler, task, task->timestamp); } else { aws_task_scheduler_schedule_now(&scheduler->scheduler, task); } } /* now cancel the tasks. */ while (!aws_linked_list_empty(&cancel_list_cpy)) { struct aws_linked_list_node *node = aws_linked_list_pop_front(&cancel_list_cpy); struct cancellation_node *cancellation_node = AWS_CONTAINER_OF(node, struct cancellation_node, node); aws_task_scheduler_cancel_task(&scheduler->scheduler, cancellation_node->task_to_cancel); aws_mem_release(scheduler->allocator, cancellation_node); } /* now run everything */ uint64_t current_time = 0; aws_high_res_clock_get_ticks(¤t_time); aws_task_scheduler_run_all(&scheduler->scheduler, current_time); uint64_t next_scheduled_task = 0; aws_task_scheduler_has_tasks(&scheduler->scheduler, &next_scheduled_task); int64_t timeout = 0; if (next_scheduled_task == UINT64_MAX) { /* at least wake up once per 30 seconds. */ timeout = (int64_t)30 * (int64_t)AWS_TIMESTAMP_NANOS; } else { timeout = (int64_t)(next_scheduled_task - current_time); } if (timeout > 0) { AWS_FATAL_ASSERT(!aws_mutex_lock(&scheduler->thread_data.mutex) && "mutex lock failed!"); aws_condition_variable_wait_for_pred( &scheduler->thread_data.c_var, &scheduler->thread_data.mutex, timeout, s_thread_should_wake, scheduler); AWS_FATAL_ASSERT(!aws_mutex_unlock(&scheduler->thread_data.mutex) && "mutex unlock failed!"); } } } struct aws_thread_scheduler *aws_thread_scheduler_new( struct aws_allocator *allocator, const struct aws_thread_options *thread_options) { struct aws_thread_scheduler *scheduler = aws_mem_calloc(allocator, 1, sizeof(struct aws_thread_scheduler)); if (!scheduler) { return NULL; } if (aws_thread_init(&scheduler->thread, allocator)) { goto clean_up; } AWS_FATAL_ASSERT(!aws_mutex_init(&scheduler->thread_data.mutex) && "mutex init failed!"); AWS_FATAL_ASSERT(!aws_condition_variable_init(&scheduler->thread_data.c_var) && "condition variable init failed!"); if (aws_task_scheduler_init(&scheduler->scheduler, allocator)) { goto thread_init; } scheduler->allocator = allocator; aws_atomic_init_int(&scheduler->should_exit, 0U); aws_ref_count_init(&scheduler->ref_count, scheduler, s_destroy_callback); aws_linked_list_init(&scheduler->thread_data.scheduling_queue); aws_linked_list_init(&scheduler->thread_data.cancel_queue); if (aws_thread_launch(&scheduler->thread, s_thread_fn, scheduler, thread_options)) { goto scheduler_init; } return scheduler; scheduler_init: aws_task_scheduler_clean_up(&scheduler->scheduler); thread_init: aws_condition_variable_clean_up(&scheduler->thread_data.c_var); aws_mutex_clean_up(&scheduler->thread_data.mutex); aws_thread_clean_up(&scheduler->thread); clean_up: aws_mem_release(allocator, scheduler); return NULL; } void aws_thread_scheduler_acquire(struct aws_thread_scheduler *scheduler) { aws_ref_count_acquire(&scheduler->ref_count); } void aws_thread_scheduler_release(const struct aws_thread_scheduler *scheduler) { aws_ref_count_release((struct aws_ref_count *)&scheduler->ref_count); } void aws_thread_scheduler_schedule_future( struct aws_thread_scheduler *scheduler, struct aws_task *task, uint64_t time_to_run) { task->timestamp = time_to_run; AWS_FATAL_ASSERT(!aws_mutex_lock(&scheduler->thread_data.mutex) && "mutex lock failed!"); aws_linked_list_push_back(&scheduler->thread_data.scheduling_queue, &task->node); AWS_FATAL_ASSERT(!aws_mutex_unlock(&scheduler->thread_data.mutex) && "mutex unlock failed!"); aws_condition_variable_notify_one(&scheduler->thread_data.c_var); } void aws_thread_scheduler_schedule_now(struct aws_thread_scheduler *scheduler, struct aws_task *task) { aws_thread_scheduler_schedule_future(scheduler, task, 0U); } void aws_thread_scheduler_cancel_task(struct aws_thread_scheduler *scheduler, struct aws_task *task) { struct cancellation_node *cancellation_node = aws_mem_calloc(scheduler->allocator, 1, sizeof(struct cancellation_node)); AWS_FATAL_ASSERT(cancellation_node && "allocation failed for cancellation node!"); AWS_FATAL_ASSERT(!aws_mutex_lock(&scheduler->thread_data.mutex) && "mutex lock failed!"); struct aws_task *found_task = NULL; /* remove tasks that are still in the scheduling queue, but haven't made it to the scheduler yet. */ struct aws_linked_list_node *node = aws_linked_list_empty(&scheduler->thread_data.scheduling_queue) ? NULL : aws_linked_list_front(&scheduler->thread_data.scheduling_queue); while (node != NULL) { struct aws_task *potential_task = AWS_CONTAINER_OF(node, struct aws_task, node); if (potential_task == task) { found_task = potential_task; break; } if (aws_linked_list_node_next_is_valid(node)) { node = aws_linked_list_next(node); } else { node = NULL; } } if (found_task) { aws_linked_list_remove(&found_task->node); } cancellation_node->task_to_cancel = task; /* regardless put it in the cancel queue so the thread can call the task with canceled status. */ aws_linked_list_push_back(&scheduler->thread_data.cancel_queue, &cancellation_node->node); AWS_FATAL_ASSERT(!aws_mutex_unlock(&scheduler->thread_data.mutex) && "mutex unlock failed!"); /* notify so the loop knows to wakeup and process the cancellations. */ aws_condition_variable_notify_one(&scheduler->thread_data.c_var); } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/thread_shared.c000066400000000000000000000133211456575232400247610ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include /* * lock guarding the unjoined thread count and pending join list */ static struct aws_mutex s_managed_thread_lock = AWS_MUTEX_INIT; static struct aws_condition_variable s_managed_thread_signal = AWS_CONDITION_VARIABLE_INIT; static uint64_t s_default_managed_join_timeout_ns = 0; /* * The number of successfully launched managed threads (or event loop threads which participate by inc/dec) that * have not been joined yet. */ static uint32_t s_unjoined_thread_count = 0; /* * A list of thread_wrapper structs for threads whose thread function has finished but join has not been called * yet for the thread. * * This list is only ever at most length one. */ static struct aws_linked_list s_pending_join_managed_threads; void aws_thread_increment_unjoined_count(void) { aws_mutex_lock(&s_managed_thread_lock); ++s_unjoined_thread_count; aws_mutex_unlock(&s_managed_thread_lock); } void aws_thread_decrement_unjoined_count(void) { aws_mutex_lock(&s_managed_thread_lock); --s_unjoined_thread_count; aws_condition_variable_notify_one(&s_managed_thread_signal); aws_mutex_unlock(&s_managed_thread_lock); } size_t aws_thread_get_managed_thread_count(void) { size_t thread_count = 0; aws_mutex_lock(&s_managed_thread_lock); thread_count = s_unjoined_thread_count; aws_mutex_unlock(&s_managed_thread_lock); return thread_count; } static bool s_one_or_fewer_managed_threads_unjoined(void *context) { (void)context; return s_unjoined_thread_count <= 1; } void aws_thread_set_managed_join_timeout_ns(uint64_t timeout_in_ns) { aws_mutex_lock(&s_managed_thread_lock); s_default_managed_join_timeout_ns = timeout_in_ns; aws_mutex_unlock(&s_managed_thread_lock); } int aws_thread_join_all_managed(void) { struct aws_linked_list join_list; aws_mutex_lock(&s_managed_thread_lock); uint64_t timeout_in_ns = s_default_managed_join_timeout_ns; aws_mutex_unlock(&s_managed_thread_lock); uint64_t now_in_ns = 0; uint64_t timeout_timestamp_ns = 0; if (timeout_in_ns > 0) { aws_sys_clock_get_ticks(&now_in_ns); timeout_timestamp_ns = now_in_ns + timeout_in_ns; } bool successful = true; bool done = false; while (!done) { aws_mutex_lock(&s_managed_thread_lock); /* * We lazily join old threads as newer ones finish their thread function. This means that when called from * the main thread, there will always be one last thread (whichever completion serialized last) that is our * responsibility to join (as long as at least one managed thread was created). So we wait for a count <= 1 * rather than what you'd normally expect (0). * * Absent a timeout, we only terminate if there are no threads left so it is possible to spin-wait a while * if there is a single thread still running. */ if (timeout_timestamp_ns > 0) { uint64_t wait_ns = 0; /* * now_in_ns is always refreshed right before this either outside the loop before the first iteration or * after the previous wait when the overall timeout was checked. */ if (now_in_ns <= timeout_timestamp_ns) { wait_ns = timeout_timestamp_ns - now_in_ns; } aws_condition_variable_wait_for_pred( &s_managed_thread_signal, &s_managed_thread_lock, (int64_t)wait_ns, s_one_or_fewer_managed_threads_unjoined, NULL); } else { aws_condition_variable_wait_pred( &s_managed_thread_signal, &s_managed_thread_lock, s_one_or_fewer_managed_threads_unjoined, NULL); } done = s_unjoined_thread_count == 0; aws_sys_clock_get_ticks(&now_in_ns); if (timeout_timestamp_ns != 0 && now_in_ns >= timeout_timestamp_ns) { done = true; successful = false; } aws_linked_list_init(&join_list); aws_linked_list_swap_contents(&join_list, &s_pending_join_managed_threads); aws_mutex_unlock(&s_managed_thread_lock); /* * Join against any finished threads. These threads are guaranteed to: * (1) Not be the current thread * (2) Have already ran to user thread_function completion * * The number of finished threads on any iteration is at most one. */ aws_thread_join_and_free_wrapper_list(&join_list); } return successful ? AWS_OP_SUCCESS : AWS_OP_ERR; } void aws_thread_pending_join_add(struct aws_linked_list_node *node) { struct aws_linked_list join_list; aws_linked_list_init(&join_list); aws_mutex_lock(&s_managed_thread_lock); /* * Swap out the pending join threads before adding this, otherwise we'd join against ourselves which won't work */ aws_linked_list_swap_contents(&join_list, &s_pending_join_managed_threads); aws_linked_list_push_back(&s_pending_join_managed_threads, node); aws_mutex_unlock(&s_managed_thread_lock); /* * Join against any finished threads. This thread (it's only ever going to be at most one) * is guaranteed to: * (1) Not be the current thread * (2) Has already ran to user thread_function completion */ aws_thread_join_and_free_wrapper_list(&join_list); } void aws_thread_initialize_thread_management(void) { aws_linked_list_init(&s_pending_join_managed_threads); } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/uri.c000066400000000000000000000473461456575232400230010ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #ifdef _MSC_VER # pragma warning(disable : 4221) /* aggregate initializer using local variable addresses */ # pragma warning(disable : 4204) /* non-constant aggregate initializer */ #endif enum parser_state { ON_SCHEME, ON_AUTHORITY, ON_PATH, ON_QUERY_STRING, FINISHED, ERROR, }; struct uri_parser { struct aws_uri *uri; enum parser_state state; }; /* strlen of UINT32_MAX "4294967295" is 10, plus 1 for '\0' */ #define PORT_BUFFER_SIZE 11 typedef void(parse_fn)(struct uri_parser *parser, struct aws_byte_cursor *str); static void s_parse_scheme(struct uri_parser *parser, struct aws_byte_cursor *str); static void s_parse_authority(struct uri_parser *parser, struct aws_byte_cursor *str); static void s_parse_path(struct uri_parser *parser, struct aws_byte_cursor *str); static void s_parse_query_string(struct uri_parser *parser, struct aws_byte_cursor *str); static parse_fn *s_states[] = { [ON_SCHEME] = s_parse_scheme, [ON_AUTHORITY] = s_parse_authority, [ON_PATH] = s_parse_path, [ON_QUERY_STRING] = s_parse_query_string, }; static int s_init_from_uri_str(struct aws_uri *uri) { struct uri_parser parser = { .state = ON_SCHEME, .uri = uri, }; struct aws_byte_cursor uri_cur = aws_byte_cursor_from_buf(&uri->uri_str); while (parser.state < FINISHED) { s_states[parser.state](&parser, &uri_cur); } /* Each state function sets the next state, if something goes wrong it sets it to ERROR which is > FINISHED */ if (parser.state == FINISHED) { return AWS_OP_SUCCESS; } aws_byte_buf_clean_up(&uri->uri_str); AWS_ZERO_STRUCT(*uri); return AWS_OP_ERR; } int aws_uri_init_parse(struct aws_uri *uri, struct aws_allocator *allocator, const struct aws_byte_cursor *uri_str) { AWS_ZERO_STRUCT(*uri); uri->self_size = sizeof(struct aws_uri); uri->allocator = allocator; if (aws_byte_buf_init_copy_from_cursor(&uri->uri_str, allocator, *uri_str)) { return AWS_OP_ERR; } return s_init_from_uri_str(uri); } int aws_uri_init_from_builder_options( struct aws_uri *uri, struct aws_allocator *allocator, struct aws_uri_builder_options *options) { AWS_ZERO_STRUCT(*uri); if (options->query_string.len && options->query_params) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } uri->self_size = sizeof(struct aws_uri); uri->allocator = allocator; size_t buffer_size = 0; if (options->scheme.len) { /* 3 for :// */ buffer_size += options->scheme.len + 3; } buffer_size += options->host_name.len; if (options->port) { buffer_size += PORT_BUFFER_SIZE; } buffer_size += options->path.len; if (options->query_params) { size_t query_len = aws_array_list_length(options->query_params); if (query_len) { /* for the '?' */ buffer_size += 1; for (size_t i = 0; i < query_len; ++i) { struct aws_uri_param *uri_param_ptr = NULL; int result = aws_array_list_get_at_ptr(options->query_params, (void **)&uri_param_ptr, i); AWS_FATAL_ASSERT(result == AWS_OP_SUCCESS); /* 2 == 1 for '&' and 1 for '='. who cares if we over-allocate a little? */ buffer_size += uri_param_ptr->key.len + uri_param_ptr->value.len + 2; } } } else if (options->query_string.len) { /* for the '?' */ buffer_size += 1; buffer_size += options->query_string.len; } if (aws_byte_buf_init(&uri->uri_str, allocator, buffer_size)) { return AWS_OP_ERR; } uri->uri_str.len = 0; if (options->scheme.len) { aws_byte_buf_append(&uri->uri_str, &options->scheme); struct aws_byte_cursor scheme_app = aws_byte_cursor_from_c_str("://"); aws_byte_buf_append(&uri->uri_str, &scheme_app); } aws_byte_buf_append(&uri->uri_str, &options->host_name); struct aws_byte_cursor port_app = aws_byte_cursor_from_c_str(":"); if (options->port) { aws_byte_buf_append(&uri->uri_str, &port_app); char port_arr[PORT_BUFFER_SIZE] = {0}; snprintf(port_arr, sizeof(port_arr), "%" PRIu32, options->port); struct aws_byte_cursor port_csr = aws_byte_cursor_from_c_str(port_arr); aws_byte_buf_append(&uri->uri_str, &port_csr); } aws_byte_buf_append(&uri->uri_str, &options->path); struct aws_byte_cursor query_app = aws_byte_cursor_from_c_str("?"); if (options->query_params) { struct aws_byte_cursor query_param_app = aws_byte_cursor_from_c_str("&"); struct aws_byte_cursor key_value_delim = aws_byte_cursor_from_c_str("="); aws_byte_buf_append(&uri->uri_str, &query_app); size_t query_len = aws_array_list_length(options->query_params); for (size_t i = 0; i < query_len; ++i) { struct aws_uri_param *uri_param_ptr = NULL; aws_array_list_get_at_ptr(options->query_params, (void **)&uri_param_ptr, i); aws_byte_buf_append(&uri->uri_str, &uri_param_ptr->key); aws_byte_buf_append(&uri->uri_str, &key_value_delim); aws_byte_buf_append(&uri->uri_str, &uri_param_ptr->value); if (i < query_len - 1) { aws_byte_buf_append(&uri->uri_str, &query_param_app); } } } else if (options->query_string.len) { aws_byte_buf_append(&uri->uri_str, &query_app); aws_byte_buf_append(&uri->uri_str, &options->query_string); } return s_init_from_uri_str(uri); } void aws_uri_clean_up(struct aws_uri *uri) { if (uri->uri_str.allocator) { aws_byte_buf_clean_up(&uri->uri_str); } AWS_ZERO_STRUCT(*uri); } const struct aws_byte_cursor *aws_uri_scheme(const struct aws_uri *uri) { return &uri->scheme; } const struct aws_byte_cursor *aws_uri_authority(const struct aws_uri *uri) { return &uri->authority; } const struct aws_byte_cursor *aws_uri_path(const struct aws_uri *uri) { return &uri->path; } const struct aws_byte_cursor *aws_uri_query_string(const struct aws_uri *uri) { return &uri->query_string; } const struct aws_byte_cursor *aws_uri_path_and_query(const struct aws_uri *uri) { return &uri->path_and_query; } const struct aws_byte_cursor *aws_uri_host_name(const struct aws_uri *uri) { return &uri->host_name; } uint32_t aws_uri_port(const struct aws_uri *uri) { return uri->port; } bool aws_query_string_next_param(struct aws_byte_cursor query_string, struct aws_uri_param *param) { /* If param is zeroed, then this is the first run. */ bool first_run = param->value.ptr == NULL; /* aws_byte_cursor_next_split() is used to iterate over params in the query string. * It takes an in/out substring arg similar to how this function works */ struct aws_byte_cursor substr; if (first_run) { /* substring must be zeroed to start */ AWS_ZERO_STRUCT(substr); } else { /* re-assemble substring which contained key and value */ substr.ptr = param->key.ptr; substr.len = (param->value.ptr - param->key.ptr) + param->value.len; } /* The do-while is to skip over any empty substrings */ do { if (!aws_byte_cursor_next_split(&query_string, '&', &substr)) { /* no more splits, done iterating */ return false; } } while (substr.len == 0); uint8_t *delim = memchr(substr.ptr, '=', substr.len); if (delim) { param->key.ptr = substr.ptr; param->key.len = delim - substr.ptr; param->value.ptr = delim + 1; param->value.len = substr.len - param->key.len - 1; } else { /* no '=', key gets substring, value is blank */ param->key = substr; param->value.ptr = substr.ptr + substr.len; param->value.len = 0; } return true; } int aws_query_string_params(struct aws_byte_cursor query_string_cursor, struct aws_array_list *out_params) { struct aws_uri_param param; AWS_ZERO_STRUCT(param); while (aws_query_string_next_param(query_string_cursor, ¶m)) { if (aws_array_list_push_back(out_params, ¶m)) { return AWS_OP_ERR; } } return AWS_OP_SUCCESS; } bool aws_uri_query_string_next_param(const struct aws_uri *uri, struct aws_uri_param *param) { return aws_query_string_next_param(uri->query_string, param); } int aws_uri_query_string_params(const struct aws_uri *uri, struct aws_array_list *out_params) { return aws_query_string_params(uri->query_string, out_params); } static void s_parse_scheme(struct uri_parser *parser, struct aws_byte_cursor *str) { const uint8_t *location_of_colon = memchr(str->ptr, ':', str->len); if (!location_of_colon) { parser->state = ON_AUTHORITY; return; } /* make sure we didn't just pick up the port by mistake */ if ((size_t)(location_of_colon - str->ptr) < str->len && *(location_of_colon + 1) != '/') { parser->state = ON_AUTHORITY; return; } const size_t scheme_len = location_of_colon - str->ptr; parser->uri->scheme = aws_byte_cursor_advance(str, scheme_len); if (str->len < 3 || str->ptr[0] != ':' || str->ptr[1] != '/' || str->ptr[2] != '/') { aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING); parser->state = ERROR; return; } /* advance past the "://" */ aws_byte_cursor_advance(str, 3); parser->state = ON_AUTHORITY; } static void s_parse_authority(struct uri_parser *parser, struct aws_byte_cursor *str) { const uint8_t *location_of_slash = memchr(str->ptr, '/', str->len); const uint8_t *location_of_qmark = memchr(str->ptr, '?', str->len); if (!location_of_slash && !location_of_qmark && str->len) { parser->uri->authority.ptr = str->ptr; parser->uri->authority.len = str->len; parser->uri->path.ptr = NULL; parser->uri->path.len = 0; parser->uri->path_and_query = parser->uri->path; parser->state = FINISHED; aws_byte_cursor_advance(str, parser->uri->authority.len); } else if (!str->len) { parser->state = ERROR; aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING); return; } else { const uint8_t *end = str->ptr + str->len; if (location_of_slash) { parser->state = ON_PATH; end = location_of_slash; } else if (location_of_qmark) { parser->state = ON_QUERY_STRING; end = location_of_qmark; } parser->uri->authority = aws_byte_cursor_advance(str, end - str->ptr); } struct aws_byte_cursor authority_parse_csr = parser->uri->authority; if (authority_parse_csr.len) { /* RFC-3986 section 3.2: authority = [ userinfo "@" ] host [ ":" port ] */ const uint8_t *userinfo_delim = memchr(authority_parse_csr.ptr, '@', authority_parse_csr.len); if (userinfo_delim) { parser->uri->userinfo = aws_byte_cursor_advance(&authority_parse_csr, userinfo_delim - authority_parse_csr.ptr); /* For the "@" mark */ aws_byte_cursor_advance(&authority_parse_csr, 1); struct aws_byte_cursor userinfo_parse_csr = parser->uri->userinfo; uint8_t *info_delim = memchr(userinfo_parse_csr.ptr, ':', userinfo_parse_csr.len); /* RFC-3986 section 3.2.1: Use of the format "user:password" in the userinfo field is deprecated. But we * treat the userinfo as URL here, also, if the format is not following URL pattern, you have the whole * userinfo */ /* RFC-1738 section 3.1: : */ if (info_delim) { parser->uri->user.ptr = userinfo_parse_csr.ptr; parser->uri->user.len = info_delim - userinfo_parse_csr.ptr; parser->uri->password.ptr = info_delim + 1; parser->uri->password.len = parser->uri->userinfo.len - parser->uri->user.len - 1; } else { parser->uri->user = userinfo_parse_csr; } } /* RFC-3986 section 3.2: host identified by IPv6 literal address is * enclosed within square brackets. We must ignore any colons within * IPv6 literals and only search for port delimiter after closing bracket.*/ const uint8_t *port_search_start = authority_parse_csr.ptr; size_t port_search_len = authority_parse_csr.len; if (authority_parse_csr.len > 0 && authority_parse_csr.ptr[0] == '[') { port_search_start = memchr(authority_parse_csr.ptr, ']', authority_parse_csr.len); if (!port_search_start) { parser->state = ERROR; aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING); return; } port_search_len = authority_parse_csr.len - (port_search_start - authority_parse_csr.ptr); } const uint8_t *port_delim = memchr(port_search_start, ':', port_search_len); if (!port_delim) { parser->uri->port = 0; parser->uri->host_name = authority_parse_csr; return; } parser->uri->host_name.ptr = authority_parse_csr.ptr; parser->uri->host_name.len = port_delim - authority_parse_csr.ptr; size_t port_len = authority_parse_csr.len - parser->uri->host_name.len - 1; port_delim += 1; uint64_t port_u64 = 0; if (port_len > 0) { struct aws_byte_cursor port_cursor = aws_byte_cursor_from_array(port_delim, port_len); if (aws_byte_cursor_utf8_parse_u64(port_cursor, &port_u64)) { parser->state = ERROR; aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING); return; } if (port_u64 > UINT32_MAX) { parser->state = ERROR; aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING); return; } } parser->uri->port = (uint32_t)port_u64; } } static void s_parse_path(struct uri_parser *parser, struct aws_byte_cursor *str) { parser->uri->path_and_query = *str; const uint8_t *location_of_q_mark = memchr(str->ptr, '?', str->len); if (!location_of_q_mark) { parser->uri->path.ptr = str->ptr; parser->uri->path.len = str->len; parser->state = FINISHED; aws_byte_cursor_advance(str, parser->uri->path.len); return; } if (!str->len) { parser->state = ERROR; aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING); return; } parser->uri->path.ptr = str->ptr; parser->uri->path.len = location_of_q_mark - str->ptr; aws_byte_cursor_advance(str, parser->uri->path.len); parser->state = ON_QUERY_STRING; } static void s_parse_query_string(struct uri_parser *parser, struct aws_byte_cursor *str) { if (!parser->uri->path_and_query.ptr) { parser->uri->path_and_query = *str; } /* we don't want the '?' character. */ if (str->len) { parser->uri->query_string.ptr = str->ptr + 1; parser->uri->query_string.len = str->len - 1; } aws_byte_cursor_advance(str, parser->uri->query_string.len + 1); parser->state = FINISHED; } static uint8_t s_to_uppercase_hex(uint8_t value) { AWS_ASSERT(value < 16); if (value < 10) { return (uint8_t)('0' + value); } return (uint8_t)('A' + value - 10); } typedef void(unchecked_append_canonicalized_character_fn)(struct aws_byte_buf *buffer, uint8_t value); /* * Appends a character or its hex encoding to the buffer. We reserve enough space up front so that * we can do this with raw pointers rather than multiple function calls/cursors/etc... * * This function is for the uri path */ static void s_unchecked_append_canonicalized_path_character(struct aws_byte_buf *buffer, uint8_t value) { AWS_ASSERT(buffer->len + 3 <= buffer->capacity); uint8_t *dest_ptr = buffer->buffer + buffer->len; if (aws_isalnum(value)) { ++buffer->len; *dest_ptr = value; return; } switch (value) { /* non-alpha-numeric unreserved, don't % encode them */ case '-': case '_': case '.': case '~': /* reserved characters that we should not % encode in the path component */ case '/': ++buffer->len; *dest_ptr = value; return; /* * everything else we should % encode, including from the reserved list */ default: buffer->len += 3; *dest_ptr++ = '%'; *dest_ptr++ = s_to_uppercase_hex(value >> 4); *dest_ptr = s_to_uppercase_hex(value & 0x0F); return; } } /* * Appends a character or its hex encoding to the buffer. We reserve enough space up front so that * we can do this with raw pointers rather than multiple function calls/cursors/etc... * * This function is for query params */ static void s_raw_append_canonicalized_param_character(struct aws_byte_buf *buffer, uint8_t value) { AWS_ASSERT(buffer->len + 3 <= buffer->capacity); uint8_t *dest_ptr = buffer->buffer + buffer->len; if (aws_isalnum(value)) { ++buffer->len; *dest_ptr = value; return; } switch (value) { case '-': case '_': case '.': case '~': { ++buffer->len; *dest_ptr = value; return; } default: buffer->len += 3; *dest_ptr++ = '%'; *dest_ptr++ = s_to_uppercase_hex(value >> 4); *dest_ptr = s_to_uppercase_hex(value & 0x0F); return; } } /* * Writes a cursor to a buffer using the supplied encoding function. */ static int s_encode_cursor_to_buffer( struct aws_byte_buf *buffer, const struct aws_byte_cursor *cursor, unchecked_append_canonicalized_character_fn *append_canonicalized_character) { const uint8_t *current_ptr = cursor->ptr; const uint8_t *end_ptr = cursor->ptr + cursor->len; /* * reserve room up front for the worst possible case: everything gets % encoded */ size_t capacity_needed = 0; if (AWS_UNLIKELY(aws_mul_size_checked(3, cursor->len, &capacity_needed))) { return AWS_OP_ERR; } if (aws_byte_buf_reserve_relative(buffer, capacity_needed)) { return AWS_OP_ERR; } while (current_ptr < end_ptr) { append_canonicalized_character(buffer, *current_ptr); ++current_ptr; } return AWS_OP_SUCCESS; } int aws_byte_buf_append_encoding_uri_path(struct aws_byte_buf *buffer, const struct aws_byte_cursor *cursor) { return s_encode_cursor_to_buffer(buffer, cursor, s_unchecked_append_canonicalized_path_character); } int aws_byte_buf_append_encoding_uri_param(struct aws_byte_buf *buffer, const struct aws_byte_cursor *cursor) { return s_encode_cursor_to_buffer(buffer, cursor, s_raw_append_canonicalized_param_character); } int aws_byte_buf_append_decoding_uri(struct aws_byte_buf *buffer, const struct aws_byte_cursor *cursor) { /* reserve room up front for worst possible case: no % and everything copies over 1:1 */ if (aws_byte_buf_reserve_relative(buffer, cursor->len)) { return AWS_OP_ERR; } /* advance over cursor */ struct aws_byte_cursor advancing = *cursor; uint8_t c; while (aws_byte_cursor_read_u8(&advancing, &c)) { if (c == '%') { /* two hex characters following '%' are the byte's value */ if (AWS_UNLIKELY(aws_byte_cursor_read_hex_u8(&advancing, &c) == false)) { return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING); } } buffer->buffer[buffer->len++] = c; } return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/uuid.c000066400000000000000000000064121456575232400231350ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #define HEX_CHAR_FMT "%02" SCNx8 #define UUID_FORMAT \ HEX_CHAR_FMT HEX_CHAR_FMT HEX_CHAR_FMT HEX_CHAR_FMT \ "-" HEX_CHAR_FMT HEX_CHAR_FMT "-" HEX_CHAR_FMT HEX_CHAR_FMT "-" HEX_CHAR_FMT HEX_CHAR_FMT \ "-" HEX_CHAR_FMT HEX_CHAR_FMT HEX_CHAR_FMT HEX_CHAR_FMT HEX_CHAR_FMT HEX_CHAR_FMT #include #ifdef _MSC_VER /* disables warning non const declared initializers for Microsoft compilers */ # pragma warning(disable : 4204) # pragma warning(disable : 4706) /* sscanf warning */ # pragma warning(disable : 4996) #endif int aws_uuid_init(struct aws_uuid *uuid) { struct aws_byte_buf buf = aws_byte_buf_from_empty_array(uuid->uuid_data, sizeof(uuid->uuid_data)); return aws_device_random_buffer(&buf); } int aws_uuid_init_from_str(struct aws_uuid *uuid, const struct aws_byte_cursor *uuid_str) { AWS_ERROR_PRECONDITION(uuid_str->len >= AWS_UUID_STR_LEN - 1, AWS_ERROR_INVALID_BUFFER_SIZE); char cpy[AWS_UUID_STR_LEN] = {0}; memcpy(cpy, uuid_str->ptr, AWS_UUID_STR_LEN - 1); AWS_ZERO_STRUCT(*uuid); if (16 != sscanf( cpy, UUID_FORMAT, &uuid->uuid_data[0], &uuid->uuid_data[1], &uuid->uuid_data[2], &uuid->uuid_data[3], &uuid->uuid_data[4], &uuid->uuid_data[5], &uuid->uuid_data[6], &uuid->uuid_data[7], &uuid->uuid_data[8], &uuid->uuid_data[9], &uuid->uuid_data[10], &uuid->uuid_data[11], &uuid->uuid_data[12], &uuid->uuid_data[13], &uuid->uuid_data[14], &uuid->uuid_data[15])) { return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING); } return AWS_OP_SUCCESS; } int aws_uuid_to_str(const struct aws_uuid *uuid, struct aws_byte_buf *output) { size_t space_remaining = output->capacity - output->len; AWS_ERROR_PRECONDITION(space_remaining >= AWS_UUID_STR_LEN, AWS_ERROR_SHORT_BUFFER); snprintf( (char *)(output->buffer + output->len), space_remaining, UUID_FORMAT, uuid->uuid_data[0], uuid->uuid_data[1], uuid->uuid_data[2], uuid->uuid_data[3], uuid->uuid_data[4], uuid->uuid_data[5], uuid->uuid_data[6], uuid->uuid_data[7], uuid->uuid_data[8], uuid->uuid_data[9], uuid->uuid_data[10], uuid->uuid_data[11], uuid->uuid_data[12], uuid->uuid_data[13], uuid->uuid_data[14], uuid->uuid_data[15]); output->len += AWS_UUID_STR_LEN - 1; return AWS_OP_SUCCESS; } bool aws_uuid_equals(const struct aws_uuid *a, const struct aws_uuid *b) { return 0 == memcmp(a->uuid_data, b->uuid_data, sizeof(a->uuid_data)); } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/windows/000077500000000000000000000000001456575232400235125ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/windows/clock.c000066400000000000000000000052661456575232400247620ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include static const uint64_t FILE_TIME_TO_NS = 100; static const uint64_t EC_TO_UNIX_EPOCH = 11644473600LL; static const uint64_t WINDOWS_TICK = 10000000; static INIT_ONCE s_timefunc_init_once = INIT_ONCE_STATIC_INIT; typedef VOID WINAPI timefunc_t(LPFILETIME); static VOID WINAPI s_get_system_time_func_lazy_init(LPFILETIME filetime_p); static timefunc_t *volatile s_p_time_func = s_get_system_time_func_lazy_init; /* Convert a string from a macro to a wide string */ #define WIDEN2(s) L## #s #define WIDEN(s) WIDEN2(s) static BOOL CALLBACK s_get_system_time_init_once(PINIT_ONCE init_once, PVOID param, PVOID *context) { (void)init_once; (void)param; (void)context; HMODULE kernel = GetModuleHandleW(WIDEN(WINDOWS_KERNEL_LIB) L".dll"); timefunc_t *time_func = (timefunc_t *)GetProcAddress(kernel, "GetSystemTimePreciseAsFileTime"); if (time_func == NULL) { time_func = GetSystemTimeAsFileTime; } s_p_time_func = time_func; return TRUE; } static VOID WINAPI s_get_system_time_func_lazy_init(LPFILETIME filetime_p) { BOOL status = InitOnceExecuteOnce(&s_timefunc_init_once, s_get_system_time_init_once, NULL, NULL); if (status) { (*s_p_time_func)(filetime_p); } else { /* Something went wrong in static initialization? Should never happen, but deal with it somehow...*/ GetSystemTimeAsFileTime(filetime_p); } } int aws_high_res_clock_get_ticks(uint64_t *timestamp) { LARGE_INTEGER ticks, frequency; /* QPC runs on sub-microsecond precision, convert to nanoseconds */ if (QueryPerformanceFrequency(&frequency) && QueryPerformanceCounter(&ticks)) { *timestamp = aws_timestamp_convert_u64( (uint64_t)ticks.QuadPart, (uint64_t)frequency.QuadPart, AWS_TIMESTAMP_NANOS, NULL); return AWS_OP_SUCCESS; } return aws_raise_error(AWS_ERROR_CLOCK_FAILURE); } int aws_sys_clock_get_ticks(uint64_t *timestamp) { FILETIME ticks; /*GetSystemTimePreciseAsFileTime() returns 100 nanosecond precision. Convert to nanoseconds. *Also, this function returns a different epoch than unix, so we add a conversion to handle that as well. */ (*s_p_time_func)(&ticks); /*if this looks unnecessary, see: * https://msdn.microsoft.com/en-us/library/windows/desktop/ms724284(v=vs.85).aspx */ ULARGE_INTEGER int_conv; int_conv.LowPart = ticks.dwLowDateTime; int_conv.HighPart = ticks.dwHighDateTime; *timestamp = (int_conv.QuadPart - (WINDOWS_TICK * EC_TO_UNIX_EPOCH)) * FILE_TIME_TO_NS; return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/windows/condition_variable.c000066400000000000000000000050101456575232400275050ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #define AWSCV_TO_WINDOWS(pCV) (PCONDITION_VARIABLE) & (pCV)->condition_handle int aws_condition_variable_init(struct aws_condition_variable *condition_variable) { /* Ensure our condition variable and Windows' condition variables are the same size */ AWS_STATIC_ASSERT(sizeof(CONDITION_VARIABLE) == sizeof(condition_variable->condition_handle)); AWS_PRECONDITION(condition_variable); InitializeConditionVariable(AWSCV_TO_WINDOWS(condition_variable)); condition_variable->initialized = true; return AWS_OP_SUCCESS; } void aws_condition_variable_clean_up(struct aws_condition_variable *condition_variable) { AWS_PRECONDITION(condition_variable); AWS_ZERO_STRUCT(*condition_variable); } int aws_condition_variable_notify_one(struct aws_condition_variable *condition_variable) { AWS_PRECONDITION(condition_variable && condition_variable->initialized); WakeConditionVariable(AWSCV_TO_WINDOWS(condition_variable)); return AWS_OP_SUCCESS; } int aws_condition_variable_notify_all(struct aws_condition_variable *condition_variable) { AWS_PRECONDITION(condition_variable && condition_variable->initialized); WakeAllConditionVariable(AWSCV_TO_WINDOWS(condition_variable)); return AWS_OP_SUCCESS; } int aws_condition_variable_wait(struct aws_condition_variable *condition_variable, struct aws_mutex *mutex) { AWS_PRECONDITION(condition_variable && condition_variable->initialized); AWS_PRECONDITION(mutex && mutex->initialized); if (SleepConditionVariableSRW(AWSCV_TO_WINDOWS(condition_variable), AWSMUTEX_TO_WINDOWS(mutex), INFINITE, 0)) { return AWS_OP_SUCCESS; } return aws_raise_error(AWS_ERROR_COND_VARIABLE_ERROR_UNKNOWN); } int aws_condition_variable_wait_for( struct aws_condition_variable *condition_variable, struct aws_mutex *mutex, int64_t time_to_wait) { AWS_PRECONDITION(condition_variable && condition_variable->initialized); AWS_PRECONDITION(mutex && mutex->initialized); DWORD time_ms = (DWORD)aws_timestamp_convert(time_to_wait, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_MILLIS, NULL); if (SleepConditionVariableSRW(AWSCV_TO_WINDOWS(condition_variable), AWSMUTEX_TO_WINDOWS(mutex), time_ms, 0)) { return AWS_OP_SUCCESS; } return aws_raise_error(AWS_ERROR_COND_VARIABLE_TIMED_OUT); } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/windows/cross_process_lock.c000066400000000000000000000066161456575232400275660ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include struct aws_cross_process_lock { struct aws_allocator *allocator; HANDLE mutex; }; struct aws_cross_process_lock *aws_cross_process_lock_try_acquire( struct aws_allocator *allocator, struct aws_byte_cursor instance_nonce) { /* validate we don't have a directory slash. */ struct aws_byte_cursor to_find = aws_byte_cursor_from_c_str("\\"); struct aws_byte_cursor found; AWS_ZERO_STRUCT(found); if (aws_byte_cursor_find_exact(&instance_nonce, &to_find, &found) != AWS_OP_ERR && aws_last_error() != AWS_ERROR_STRING_MATCH_NOT_FOUND) { AWS_LOGF_ERROR( AWS_LS_COMMON_GENERAL, "static: Lock " PRInSTR " creation has illegal character \\", AWS_BYTE_CURSOR_PRI(instance_nonce)); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } struct aws_cross_process_lock *instance_lock = NULL; /* "Local\" prefix, per the docs, specifies user session scope (rather than "Global\" to the system). */ struct aws_byte_cursor path_prefix = aws_byte_cursor_from_c_str("Local\\aws_crt_cross_process_lock/"); struct aws_byte_buf nonce_buf; aws_byte_buf_init_copy_from_cursor(&nonce_buf, allocator, path_prefix); aws_byte_buf_append_dynamic(&nonce_buf, &instance_nonce); aws_byte_buf_append_null_terminator(&nonce_buf); HANDLE mutex = CreateMutexA(NULL, FALSE, (LPCSTR)nonce_buf.buffer); if (!mutex) { AWS_LOGF_WARN( AWS_LS_COMMON_GENERAL, "static: Lock %s creation failed with error %" PRIx32, (const char *)nonce_buf.buffer, GetLastError()); aws_translate_and_raise_io_error_or(GetLastError(), AWS_ERROR_MUTEX_FAILED); goto cleanup; } /* from the docs: * If the mutex is a named mutex and the object existed before this function call, the return value is a handle * to the existing object, and the GetLastError function returns ERROR_ALREADY_EXISTS. */ if (GetLastError() == ERROR_ALREADY_EXISTS) { AWS_LOGF_TRACE( AWS_LS_COMMON_GENERAL, "static: Lock %s is already acquired by another instance", (const char *)nonce_buf.buffer); CloseHandle(mutex); aws_raise_error(AWS_ERROR_MUTEX_CALLER_NOT_OWNER); goto cleanup; } instance_lock = aws_mem_calloc(allocator, 1, sizeof(struct aws_cross_process_lock)); instance_lock->mutex = mutex; instance_lock->allocator = allocator; AWS_LOGF_TRACE( AWS_LS_COMMON_GENERAL, "static: Lock %s acquired by this instance with HANDLE %p", (const char *)nonce_buf.buffer, (void *)mutex); cleanup: /* we could do this once above but then we'd lose logging for the buffer. */ aws_byte_buf_clean_up(&nonce_buf); return instance_lock; } void aws_cross_process_lock_release(struct aws_cross_process_lock *instance_lock) { if (instance_lock) { CloseHandle(instance_lock->mutex); AWS_LOGF_TRACE(AWS_LS_COMMON_GENERAL, "static: Lock released for handle %p", (void *)instance_lock->mutex); aws_mem_release(instance_lock->allocator, instance_lock); } } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/windows/device_random.c000066400000000000000000000035301456575232400264560ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include static BCRYPT_ALG_HANDLE s_alg_handle = NULL; static aws_thread_once s_rand_init = AWS_THREAD_ONCE_STATIC_INIT; static void s_init_rand(void *user_data) { (void)user_data; NTSTATUS status = 0; status = BCryptOpenAlgorithmProvider(&s_alg_handle, BCRYPT_RNG_ALGORITHM, NULL, 0); if (!BCRYPT_SUCCESS(status)) { abort(); } } int aws_device_random_buffer(struct aws_byte_buf *output) { return aws_device_random_buffer_append(output, output->capacity - output->len); } int aws_device_random_buffer_append(struct aws_byte_buf *output, size_t n) { AWS_PRECONDITION(aws_byte_buf_is_valid(output)); aws_thread_call_once(&s_rand_init, s_init_rand, NULL); size_t space_available = output->capacity - output->len; if (space_available < n) { AWS_POSTCONDITION(aws_byte_buf_is_valid(output)); return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } size_t original_len = output->len; /* BCryptGenRandom() takes 32bit length, but we accept size_t, * so work in chunks if necessary. */ while (n > 0) { uint32_t capped_n = (uint32_t)aws_min_size(n, UINT32_MAX); NTSTATUS status = BCryptGenRandom(s_alg_handle, output->buffer + output->len, capped_n, 0 /*flags*/); if (!BCRYPT_SUCCESS(status)) { output->len = original_len; AWS_POSTCONDITION(aws_byte_buf_is_valid(output)); return aws_raise_error(AWS_ERROR_RANDOM_GEN_FAILED); } output->len += capped_n; n -= capped_n; } AWS_POSTCONDITION(aws_byte_buf_is_valid(output)); return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/windows/environment.c000066400000000000000000000026361456575232400262310ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include int aws_get_environment_value( struct aws_allocator *allocator, const struct aws_string *variable_name, struct aws_string **value_out) { #ifdef _MSC_VER # pragma warning(push) # pragma warning(disable : 4996) #endif const char *value = getenv(aws_string_c_str(variable_name)); #ifdef _MSC_VER # pragma warning(pop) #endif if (value == NULL) { *value_out = NULL; return AWS_OP_SUCCESS; } *value_out = aws_string_new_from_c_str(allocator, value); if (*value_out == NULL) { return aws_raise_error(AWS_ERROR_ENVIRONMENT_GET); } return AWS_OP_SUCCESS; } int aws_set_environment_value(const struct aws_string *variable_name, const struct aws_string *value) { if (_putenv_s(aws_string_c_str(variable_name), aws_string_c_str(value)) != 0) { return aws_raise_error(AWS_ERROR_ENVIRONMENT_SET); } return AWS_OP_SUCCESS; } AWS_STATIC_STRING_FROM_LITERAL(s_empty_string, ""); int aws_unset_environment_value(const struct aws_string *variable_name) { if (_putenv_s(aws_string_c_str(variable_name), aws_string_c_str(s_empty_string)) != 0) { return aws_raise_error(AWS_ERROR_ENVIRONMENT_UNSET); } return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/windows/file.c000066400000000000000000000466131456575232400246070ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include static bool s_is_string_empty(const struct aws_string *str) { return str == NULL || str->len == 0; } static bool s_is_wstring_empty(const struct aws_wstring *str) { return str == NULL || str->len == 0; } FILE *aws_fopen_safe(const struct aws_string *file_path, const struct aws_string *mode) { if (s_is_string_empty(file_path)) { AWS_LOGF_ERROR(AWS_LS_COMMON_IO, "static: Failed to open file. path is empty"); aws_raise_error(AWS_ERROR_FILE_INVALID_PATH); return NULL; } if (s_is_string_empty(mode)) { AWS_LOGF_ERROR(AWS_LS_COMMON_IO, "static: Failed to open file. mode is empty"); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } struct aws_wstring *w_file_path = aws_string_convert_to_wstring(aws_default_allocator(), file_path); struct aws_wstring *w_mode = aws_string_convert_to_wstring(aws_default_allocator(), mode); FILE *file = NULL; errno_t error = _wfopen_s(&file, aws_wstring_c_str(w_file_path), aws_wstring_c_str(w_mode)); aws_wstring_destroy(w_mode); aws_wstring_destroy(w_file_path); if (error) { aws_translate_and_raise_io_error_or(error, AWS_ERROR_FILE_OPEN_FAILURE); AWS_LOGF_ERROR( AWS_LS_COMMON_IO, "static: Failed to open file. path:'%s' mode:'%s' errno:%d aws-error:%d(%s)", aws_string_c_str(file_path), aws_string_c_str(mode), error, aws_last_error(), aws_error_name(aws_last_error())); } return file; } struct aws_wstring *s_to_long_path(struct aws_allocator *allocator, const struct aws_wstring *path) { if (s_is_wstring_empty(path)) { return NULL; } wchar_t prefix[] = L"\\\\?\\"; size_t prefix_size = sizeof(prefix); if (aws_wstring_num_chars(path) > MAX_PATH - prefix_size) { struct aws_byte_buf new_path; aws_byte_buf_init(&new_path, allocator, sizeof(prefix) + path->len + 2); struct aws_byte_cursor prefix_cur = aws_byte_cursor_from_array((uint8_t *)prefix, sizeof(prefix) - 2); aws_byte_buf_append_dynamic(&new_path, &prefix_cur); struct aws_byte_cursor path_cur = aws_byte_cursor_from_array((uint8_t *)aws_wstring_c_str(path), path->len); aws_byte_buf_append_dynamic(&new_path, &path_cur); struct aws_wstring *long_path = aws_wstring_new_from_array(allocator, (wchar_t *)new_path.buffer, new_path.len / sizeof(wchar_t)); aws_byte_buf_clean_up(&new_path); return long_path; } return aws_wstring_new_from_array(allocator, aws_wstring_c_str(path), aws_wstring_num_chars(path)); } int aws_directory_create(const struct aws_string *dir_path) { if (s_is_string_empty(dir_path)) { return aws_raise_error(AWS_ERROR_FILE_INVALID_PATH); } struct aws_wstring *w_dir_path = aws_string_convert_to_wstring(aws_default_allocator(), dir_path); struct aws_wstring *long_dir_path = s_to_long_path(aws_default_allocator(), w_dir_path); aws_wstring_destroy(w_dir_path); BOOL create_dir_res = CreateDirectoryW(aws_wstring_c_str(long_dir_path), NULL); aws_wstring_destroy(long_dir_path); int error = GetLastError(); if (!create_dir_res) { if (error == ERROR_ALREADY_EXISTS) { return AWS_OP_SUCCESS; } if (error == ERROR_PATH_NOT_FOUND) { return aws_raise_error(AWS_ERROR_FILE_INVALID_PATH); } if (error == ERROR_ACCESS_DENIED) { return aws_raise_error(AWS_ERROR_NO_PERMISSION); } return aws_raise_error(AWS_ERROR_UNKNOWN); } return AWS_OP_SUCCESS; } bool aws_directory_exists(const struct aws_string *dir_path) { if (s_is_string_empty(dir_path)) { return false; } struct aws_wstring *w_dir_path = aws_string_convert_to_wstring(aws_default_allocator(), dir_path); struct aws_wstring *long_dir_path = s_to_long_path(aws_default_allocator(), w_dir_path); aws_wstring_destroy(w_dir_path); DWORD attributes = GetFileAttributesW(aws_wstring_c_str(long_dir_path)); aws_wstring_destroy(long_dir_path); return (attributes != INVALID_FILE_ATTRIBUTES && (attributes & FILE_ATTRIBUTE_DIRECTORY)); } static bool s_delete_file_or_directory(const struct aws_directory_entry *entry, void *user_data) { (void)user_data; struct aws_allocator *allocator = aws_default_allocator(); struct aws_string *path_str = aws_string_new_from_cursor(allocator, &entry->relative_path); int ret_val = AWS_OP_SUCCESS; if (entry->file_type & AWS_FILE_TYPE_FILE) { ret_val = aws_file_delete(path_str); } if (entry->file_type & AWS_FILE_TYPE_DIRECTORY) { ret_val = aws_directory_delete(path_str, false); } aws_string_destroy(path_str); return ret_val == AWS_OP_SUCCESS; } int aws_directory_delete(const struct aws_string *dir_path, bool recursive) { if (s_is_string_empty(dir_path)) { return aws_raise_error(AWS_ERROR_FILE_INVALID_PATH); } if (!aws_directory_exists(dir_path)) { return AWS_OP_SUCCESS; } int ret_val = AWS_OP_SUCCESS; if (recursive) { ret_val = aws_directory_traverse(aws_default_allocator(), dir_path, true, s_delete_file_or_directory, NULL); } if (ret_val && aws_last_error() == AWS_ERROR_FILE_INVALID_PATH) { aws_reset_error(); return AWS_OP_SUCCESS; } if (ret_val) { return AWS_OP_ERR; } struct aws_wstring *w_dir_path = aws_string_convert_to_wstring(aws_default_allocator(), dir_path); struct aws_wstring *long_dir_path = s_to_long_path(aws_default_allocator(), w_dir_path); aws_wstring_destroy(w_dir_path); BOOL remove_dir_res = RemoveDirectoryW(aws_wstring_c_str(long_dir_path)); aws_wstring_destroy(long_dir_path); if (!remove_dir_res) { int error = GetLastError(); if (error == ERROR_DIR_NOT_EMPTY) { return aws_raise_error(AWS_ERROR_DIRECTORY_NOT_EMPTY); } if (error == ERROR_ACCESS_DENIED) { return aws_raise_error(AWS_ERROR_NO_PERMISSION); } return aws_raise_error(AWS_ERROR_UNKNOWN); } return AWS_OP_SUCCESS; } int aws_file_delete(const struct aws_string *file_path) { if (s_is_string_empty(file_path)) { return aws_raise_error(AWS_ERROR_FILE_INVALID_PATH); } struct aws_wstring *w_file_path = aws_string_convert_to_wstring(aws_default_allocator(), file_path); struct aws_wstring *long_file_path = s_to_long_path(aws_default_allocator(), w_file_path); aws_wstring_destroy(w_file_path); BOOL remove_file_res = DeleteFileW(aws_wstring_c_str(long_file_path)); aws_wstring_destroy(long_file_path); if (!remove_file_res) { int error = GetLastError(); if (error == ERROR_FILE_NOT_FOUND) { return AWS_OP_SUCCESS; } if (error == ERROR_ACCESS_DENIED) { return aws_raise_error(AWS_ERROR_NO_PERMISSION); } return aws_raise_error(AWS_ERROR_UNKNOWN); } return AWS_OP_SUCCESS; } int aws_directory_or_file_move(const struct aws_string *from, const struct aws_string *to) { if (s_is_string_empty(from) || s_is_string_empty(to)) { return aws_raise_error(AWS_ERROR_FILE_INVALID_PATH); } struct aws_wstring *w_from_path = aws_string_convert_to_wstring(aws_default_allocator(), from); struct aws_wstring *long_from_path = s_to_long_path(aws_default_allocator(), w_from_path); aws_wstring_destroy(w_from_path); struct aws_wstring *w_to_path = aws_string_convert_to_wstring(aws_default_allocator(), to); struct aws_wstring *long_to_path = s_to_long_path(aws_default_allocator(), w_to_path); aws_wstring_destroy(w_to_path); BOOL move_res = MoveFileW(aws_wstring_c_str(long_from_path), aws_wstring_c_str(long_to_path)); aws_wstring_destroy(long_from_path); aws_wstring_destroy(long_to_path); if (!move_res) { int error = GetLastError(); if (error == ERROR_FILE_NOT_FOUND) { return aws_raise_error(AWS_ERROR_FILE_INVALID_PATH); } if (error == ERROR_ACCESS_DENIED) { return aws_raise_error(AWS_ERROR_NO_PERMISSION); } return aws_raise_error(AWS_ERROR_UNKNOWN); } return AWS_OP_SUCCESS; } int aws_directory_traverse( struct aws_allocator *allocator, const struct aws_string *path, bool recursive, aws_on_directory_entry *on_entry, void *user_data) { if (s_is_string_empty(path)) { return aws_raise_error(AWS_ERROR_FILE_INVALID_PATH); } struct aws_wstring *w_path_wchar = aws_string_convert_to_wstring(allocator, path); struct aws_wstring *long_path_wchar = s_to_long_path(allocator, w_path_wchar); aws_wstring_destroy(w_path_wchar); /* windows doesn't fail in FindFirstFile if it's not a directory. Do the check here. We don't call the perfectly good function for this check because the string is already converted to utf-16 and it's trivial to reuse it. */ DWORD attributes = GetFileAttributesW(aws_wstring_c_str(long_path_wchar)); if (!(attributes & FILE_ATTRIBUTE_DIRECTORY)) { aws_wstring_destroy(long_path_wchar); return aws_raise_error(AWS_ERROR_FILE_INVALID_PATH); } WIN32_FIND_DATAW ffd; HANDLE find_handle = FindFirstFileW(aws_wstring_c_str(long_path_wchar), &ffd); if (find_handle == INVALID_HANDLE_VALUE) { aws_wstring_destroy(long_path_wchar); int error = GetLastError(); if (error == ERROR_FILE_NOT_FOUND) { return aws_raise_error(AWS_ERROR_FILE_INVALID_PATH); } return aws_raise_error(AWS_ERROR_UNKNOWN); } FindClose(find_handle); /* create search path string */ struct aws_byte_cursor path_wchar_cur = aws_byte_cursor_from_array(aws_wstring_c_str(long_path_wchar), aws_wstring_size_bytes(long_path_wchar)); struct aws_byte_buf search_wchar_buf; aws_byte_buf_init_copy_from_cursor(&search_wchar_buf, allocator, path_wchar_cur); wchar_t search_wchar_pattern[] = L"\\*"; struct aws_byte_cursor search_char_wchar = aws_byte_cursor_from_array((uint8_t *)search_wchar_pattern, sizeof(search_wchar_pattern)); aws_byte_buf_append_dynamic(&search_wchar_buf, &search_char_wchar); struct aws_byte_cursor search_wchar_cur = aws_byte_cursor_from_buf(&search_wchar_buf); /* it's already converted to wide string */ struct aws_wstring *search_wchar_string = aws_wstring_new_from_cursor(allocator, &search_wchar_cur); find_handle = FindFirstFileW(aws_wstring_c_str(search_wchar_string), &ffd); aws_wstring_destroy(search_wchar_string); aws_byte_buf_clean_up(&search_wchar_buf); int ret_val = AWS_OP_SUCCESS; /* iterate each entry in the directory. Do a bunch of utf-16 conversions. Figure out the paths etc.... invoke the visitor, and continue recursing if the flag was set. */ do { struct aws_string *name_component_multi_char_str = aws_string_convert_from_wchar_c_str(allocator, ffd.cFileName); struct aws_byte_cursor name_component_multi_char = aws_byte_cursor_from_string(name_component_multi_char_str); /* disgard . and .. */ char *ascend_mark = ".."; char *cd_mark = "."; struct aws_byte_cursor ascend_mark_cur = aws_byte_cursor_from_c_str(ascend_mark); struct aws_byte_cursor cd_mark_cur = aws_byte_cursor_from_c_str(cd_mark); if (aws_byte_cursor_eq(&name_component_multi_char, &ascend_mark_cur) || aws_byte_cursor_eq(&name_component_multi_char, &cd_mark_cur)) { aws_string_destroy(name_component_multi_char_str); continue; } /* get the relative path as utf-16, so we can talk to windows. */ struct aws_byte_buf relative_path_wchar; aws_byte_buf_init_copy_from_cursor(&relative_path_wchar, allocator, path_wchar_cur); wchar_t unicode_delim[] = L"\\"; struct aws_byte_cursor delimiter_cur = aws_byte_cursor_from_array((uint8_t *)unicode_delim, sizeof(unicode_delim) - 2); aws_byte_buf_append_dynamic(&relative_path_wchar, &delimiter_cur); struct aws_byte_cursor name_str = aws_byte_cursor_from_array(ffd.cFileName, wcsnlen(ffd.cFileName, sizeof(ffd.cFileName)) * sizeof(wchar_t)); aws_byte_buf_append_dynamic(&relative_path_wchar, &name_str); aws_byte_buf_append_byte_dynamic(&relative_path_wchar, 0); aws_byte_buf_append_byte_dynamic(&relative_path_wchar, 0); relative_path_wchar.len -= 2; /* now get the absolute path from the relative path we just computed. */ DWORD path_res = GetFullPathNameW((wchar_t *)relative_path_wchar.buffer, 0, NULL, NULL); AWS_FATAL_ASSERT(path_res > 0); struct aws_byte_buf full_path_wchar_buf; aws_byte_buf_init(&full_path_wchar_buf, allocator, (size_t)path_res * sizeof(wchar_t) + 2); full_path_wchar_buf.len = full_path_wchar_buf.capacity - 2; path_res = GetFullPathNameW( (wchar_t *)relative_path_wchar.buffer, (DWORD)path_res + 1, (wchar_t *)full_path_wchar_buf.buffer, NULL); AWS_FATAL_ASSERT(path_res > 0); aws_byte_buf_append_byte_dynamic(&full_path_wchar_buf, 0); aws_byte_buf_append_byte_dynamic(&full_path_wchar_buf, 0); /* now we have the data, convert the utf-16 strings we used to communicate with windows back to utf-8 for the user to actually consume. */ struct aws_string *full_path_name_multi_char = aws_string_convert_from_wchar_c_str(allocator, (wchar_t *)full_path_wchar_buf.buffer); aws_byte_buf_clean_up(&full_path_wchar_buf); struct aws_string *relative_path_multi_char = aws_string_convert_from_wchar_c_str(allocator, (wchar_t *)relative_path_wchar.buffer); struct aws_directory_entry entry; AWS_ZERO_STRUCT(entry); entry.relative_path = aws_byte_cursor_from_string(relative_path_multi_char); entry.path = aws_byte_cursor_from_string(full_path_name_multi_char); LARGE_INTEGER file_size; file_size.HighPart = ffd.nFileSizeHigh; file_size.LowPart = ffd.nFileSizeLow; entry.file_size = (int64_t)file_size.QuadPart; if (ffd.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) { entry.file_type |= AWS_FILE_TYPE_DIRECTORY; } else { entry.file_type |= AWS_FILE_TYPE_FILE; } if (recursive && entry.file_type & AWS_FILE_TYPE_DIRECTORY) { ret_val = aws_directory_traverse(allocator, relative_path_multi_char, recursive, on_entry, user_data); } /* post order traversal, if a node below us ended the traversal, don't call the visitor again. */ if (ret_val && aws_last_error() == AWS_ERROR_OPERATION_INTERUPTED) { goto cleanup; } if (!on_entry(&entry, user_data)) { ret_val = aws_raise_error(AWS_ERROR_OPERATION_INTERUPTED); goto cleanup; } if (ret_val) { goto cleanup; } cleanup: aws_string_destroy(relative_path_multi_char); aws_string_destroy(full_path_name_multi_char); aws_byte_buf_clean_up(&relative_path_wchar); aws_string_destroy(name_component_multi_char_str); } while (ret_val == AWS_OP_SUCCESS && FindNextFileW(find_handle, &ffd)); aws_wstring_destroy(long_path_wchar); if (find_handle != INVALID_HANDLE_VALUE) { FindClose(find_handle); } return ret_val; } char aws_get_platform_directory_separator(void) { return '\\'; } AWS_STATIC_STRING_FROM_LITERAL(s_userprofile_env_var, "USERPROFILE"); AWS_STATIC_STRING_FROM_LITERAL(s_homedrive_env_var, "HOMEDRIVE"); AWS_STATIC_STRING_FROM_LITERAL(s_homepath_env_var, "HOMEPATH"); AWS_STATIC_STRING_FROM_LITERAL(s_home_env_var, "HOME"); struct aws_string *aws_get_home_directory(struct aws_allocator *allocator) { /* * 1. Check HOME */ struct aws_string *home_env_var_value = NULL; if (aws_get_environment_value(allocator, s_home_env_var, &home_env_var_value) == 0 && home_env_var_value != NULL) { return home_env_var_value; } /* * 2. (Windows) Check USERPROFILE */ struct aws_string *userprofile_env_var_value = NULL; if (aws_get_environment_value(allocator, s_userprofile_env_var, &userprofile_env_var_value) == 0 && userprofile_env_var_value != NULL) { return userprofile_env_var_value; } /* * 3. (Windows) Check HOMEDRIVE ++ HOMEPATH */ struct aws_string *final_path = NULL; struct aws_string *homedrive_env_var_value = NULL; if (aws_get_environment_value(allocator, s_homedrive_env_var, &homedrive_env_var_value) == 0 && homedrive_env_var_value != NULL) { struct aws_string *homepath_env_var_value = NULL; if (aws_get_environment_value(allocator, s_homepath_env_var, &homepath_env_var_value) == 0 && homepath_env_var_value != NULL) { struct aws_byte_buf concatenated_dir; aws_byte_buf_init( &concatenated_dir, allocator, homedrive_env_var_value->len + homepath_env_var_value->len + 1); struct aws_byte_cursor drive_cursor = aws_byte_cursor_from_string(homedrive_env_var_value); struct aws_byte_cursor path_cursor = aws_byte_cursor_from_string(homepath_env_var_value); aws_byte_buf_append(&concatenated_dir, &drive_cursor); aws_byte_buf_append(&concatenated_dir, &path_cursor); final_path = aws_string_new_from_buf(allocator, &concatenated_dir); aws_byte_buf_clean_up(&concatenated_dir); aws_string_destroy(homepath_env_var_value); } aws_string_destroy(homedrive_env_var_value); } if (final_path != NULL) { return final_path; } return NULL; } bool aws_path_exists(const struct aws_string *path) { if (s_is_string_empty(path)) { return false; } struct aws_wstring *wchar_path = aws_string_convert_to_wstring(aws_default_allocator(), path); bool ret_val = PathFileExistsW(aws_wstring_c_str(wchar_path)) == TRUE; aws_wstring_destroy(wchar_path); return ret_val; } int aws_fseek(FILE *file, int64_t offset, int whence) { if (_fseeki64(file, offset, whence)) { int errno_value = errno; /* Always cache errno before potential side-effect */ return aws_translate_and_raise_io_error_or(errno_value, AWS_ERROR_STREAM_UNSEEKABLE); } return AWS_OP_SUCCESS; } int aws_file_get_length(FILE *file, int64_t *length) { if (file == NULL) { return aws_raise_error(AWS_ERROR_INVALID_FILE_HANDLE); } int fd = _fileno(file); if (fd == -1) { return aws_raise_error(AWS_ERROR_INVALID_FILE_HANDLE); } HANDLE os_file = (HANDLE)_get_osfhandle(fd); if (os_file == INVALID_HANDLE_VALUE) { int errno_value = errno; /* Always cache errno before potential side-effect */ return aws_translate_and_raise_io_error(errno_value); } LARGE_INTEGER os_size; if (!GetFileSizeEx(os_file, &os_size)) { return aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); } int64_t size = os_size.QuadPart; if (size < 0) { return aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); } *length = size; return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/windows/mutex.c000066400000000000000000000046411456575232400250250ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include /* Convert a string from a macro to a wide string */ #define WIDEN2(s) L## #s #define WIDEN(s) WIDEN2(s) int aws_mutex_init(struct aws_mutex *mutex) { /* Ensure our mutex and Windows' mutex are the same size */ AWS_STATIC_ASSERT(sizeof(SRWLOCK) == sizeof(mutex->mutex_handle)); InitializeSRWLock(AWSMUTEX_TO_WINDOWS(mutex)); mutex->initialized = true; return AWS_OP_SUCCESS; } /* turn off unused named parameter warning on msvc.*/ #ifdef _MSC_VER # pragma warning(push) # pragma warning(disable : 4100) #endif void aws_mutex_clean_up(struct aws_mutex *mutex) { AWS_PRECONDITION(mutex); AWS_ZERO_STRUCT(*mutex); } int aws_mutex_lock(struct aws_mutex *mutex) { AWS_PRECONDITION(mutex && mutex->initialized); AcquireSRWLockExclusive(AWSMUTEX_TO_WINDOWS(mutex)); return AWS_OP_SUCCESS; } /* Check for functions that don't exist on ancient windows */ static aws_thread_once s_check_functions_once = INIT_ONCE_STATIC_INIT; typedef BOOLEAN WINAPI TryAcquireSRWLockExclusive_fn(PSRWLOCK SRWLock); static TryAcquireSRWLockExclusive_fn *s_TryAcquireSRWLockExclusive; static void s_check_try_lock_function(void *user_data) { (void)user_data; s_TryAcquireSRWLockExclusive = (TryAcquireSRWLockExclusive_fn *)GetProcAddress( GetModuleHandleW(WIDEN(WINDOWS_KERNEL_LIB) L".dll"), "TryAcquireSRWLockExclusive"); } int aws_mutex_try_lock(struct aws_mutex *mutex) { AWS_PRECONDITION(mutex && mutex->initialized); /* Check for functions that don't exist on ancient Windows */ aws_thread_call_once(&s_check_functions_once, s_check_try_lock_function, NULL); if (!s_TryAcquireSRWLockExclusive) { return aws_raise_error(AWS_ERROR_UNSUPPORTED_OPERATION); } BOOL res = s_TryAcquireSRWLockExclusive(AWSMUTEX_TO_WINDOWS(mutex)); /* * Per Windows documentation, a return value of zero indicates a failure to acquire the lock. */ if (!res) { return aws_raise_error(AWS_ERROR_MUTEX_TIMEOUT); } return AWS_OP_SUCCESS; } int aws_mutex_unlock(struct aws_mutex *mutex) { AWS_PRECONDITION(mutex && mutex->initialized); ReleaseSRWLockExclusive(AWSMUTEX_TO_WINDOWS(mutex)); return AWS_OP_SUCCESS; } #ifdef _MSC_VER # pragma warning(pop) #endif aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/windows/process.c000066400000000000000000000014311456575232400253330ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include /** * this is just the value it's hard coded to in windows NT and later * see https://docs.microsoft.com/en-us/windows/win32/sysinfo/kernel-objects * for more information. */ static const size_t s_max_handles = 1 << 24; int aws_get_pid(void) { #if defined(AWS_OS_WINDOWS_DESKTOP) return _getpid(); #else return -1; #endif } size_t aws_get_soft_limit_io_handles(void) { return s_max_handles; } size_t aws_get_hard_limit_io_handles(void) { return s_max_handles; } int aws_set_soft_limit_io_handles(size_t max_handles) { (void)max_handles; return aws_raise_error(AWS_ERROR_UNIMPLEMENTED); } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/windows/rw_lock.c000066400000000000000000000057101456575232400253210ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include /* Convert a string from a macro to a wide string */ #define WIDEN2(s) L## #s #define WIDEN(s) WIDEN2(s) /* Ensure our rwlock and Windows' rwlocks are the same size */ AWS_STATIC_ASSERT(sizeof(SRWLOCK) == sizeof(struct aws_rw_lock)); int aws_rw_lock_init(struct aws_rw_lock *lock) { InitializeSRWLock(AWSSRW_TO_WINDOWS(lock)); return AWS_OP_SUCCESS; } void aws_rw_lock_clean_up(struct aws_rw_lock *lock) { (void)lock; } int aws_rw_lock_rlock(struct aws_rw_lock *lock) { AcquireSRWLockShared(AWSSRW_TO_WINDOWS(lock)); return AWS_OP_SUCCESS; } int aws_rw_lock_wlock(struct aws_rw_lock *lock) { AcquireSRWLockExclusive(AWSSRW_TO_WINDOWS(lock)); return AWS_OP_SUCCESS; } /* Check for functions that don't exist on ancient windows */ static aws_thread_once s_check_functions_once = INIT_ONCE_STATIC_INIT; typedef BOOLEAN WINAPI TryAcquireSRWLockExclusive_fn(PSRWLOCK SRWLock); static TryAcquireSRWLockExclusive_fn *s_TryAcquireSRWLockExclusive; typedef BOOLEAN WINAPI TryAcquireSRWLockShared_fn(PSRWLOCK SRWLock); static TryAcquireSRWLockShared_fn *s_TryAcquireSRWLockShared; static void s_check_try_lock_function(void *user_data) { (void)user_data; s_TryAcquireSRWLockExclusive = (TryAcquireSRWLockExclusive_fn *)GetProcAddress( GetModuleHandleW(WIDEN(WINDOWS_KERNEL_LIB) L".dll"), "TryAcquireSRWLockExclusive"); s_TryAcquireSRWLockShared = (TryAcquireSRWLockShared_fn *)GetProcAddress( GetModuleHandleW(WIDEN(WINDOWS_KERNEL_LIB) L".dll"), "TryAcquireSRWLockShared"); } int aws_rw_lock_try_rlock(struct aws_rw_lock *lock) { (void)lock; /* Check for functions that don't exist on ancient Windows */ aws_thread_call_once(&s_check_functions_once, s_check_try_lock_function, NULL); if (!s_TryAcquireSRWLockShared) { return aws_raise_error(AWS_ERROR_UNSUPPORTED_OPERATION); } if (s_TryAcquireSRWLockShared(AWSSRW_TO_WINDOWS(lock))) { return AWS_OP_SUCCESS; } return aws_raise_error(AWS_ERROR_MUTEX_TIMEOUT); } int aws_rw_lock_try_wlock(struct aws_rw_lock *lock) { (void)lock; /* Check for functions that don't exist on ancient Windows */ aws_thread_call_once(&s_check_functions_once, s_check_try_lock_function, NULL); if (!s_TryAcquireSRWLockExclusive) { return aws_raise_error(AWS_ERROR_UNSUPPORTED_OPERATION); } if (s_TryAcquireSRWLockExclusive(AWSSRW_TO_WINDOWS(lock))) { return AWS_OP_SUCCESS; } return aws_raise_error(AWS_ERROR_MUTEX_TIMEOUT); } int aws_rw_lock_runlock(struct aws_rw_lock *lock) { ReleaseSRWLockShared(AWSSRW_TO_WINDOWS(lock)); return AWS_OP_SUCCESS; } int aws_rw_lock_wunlock(struct aws_rw_lock *lock) { ReleaseSRWLockExclusive(AWSSRW_TO_WINDOWS(lock)); return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/windows/system_info.c000066400000000000000000000233741456575232400262260ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include enum aws_platform_os aws_get_platform_build_os(void) { return AWS_PLATFORM_OS_WINDOWS; } size_t aws_system_info_processor_count(void) { SYSTEM_INFO info; GetSystemInfo(&info); return info.dwNumberOfProcessors; } /* the next three functions need actual implementations before we can have proper numa alignment on windows. * For now leave them stubbed out. */ uint16_t aws_get_cpu_group_count(void) { return 1U; } size_t aws_get_cpu_count_for_group(uint16_t group_idx) { (void)group_idx; return aws_system_info_processor_count(); } void aws_get_cpu_ids_for_group(uint16_t group_idx, struct aws_cpu_info *cpu_ids_array, size_t cpu_ids_array_length) { (void)group_idx; if (!cpu_ids_array_length) { return; } /* a crude hint, but hyper-threads are numbered as the second half of the cpu id listing. */ size_t hyper_threads_hint = cpu_ids_array_length / 2 - 1; for (size_t i = 0; i < cpu_ids_array_length; ++i) { cpu_ids_array[i].cpu_id = (int32_t)i; cpu_ids_array[i].suspected_hyper_thread = i > hyper_threads_hint; } } bool aws_is_debugger_present(void) { return IsDebuggerPresent(); } void aws_debug_break(void) { #ifdef DEBUG_BUILD if (aws_is_debugger_present()) { DebugBreak(); } #endif } #if defined(AWS_OS_WINDOWS_DESKTOP) /* If I meet the engineer that wrote the dbghelp.h file for the windows 8.1 SDK we're gonna have words! */ # ifdef _MSC_VER # pragma warning(disable : 4091) # endif # include struct win_symbol_data { struct _SYMBOL_INFO sym_info; char symbol_name[1024]; }; typedef BOOL __stdcall SymInitialize_fn(_In_ HANDLE hProcess, _In_opt_ PCSTR UserSearchPath, _In_ BOOL fInvadeProcess); typedef DWORD __stdcall SymSetOptions_fn(DWORD SymOptions); typedef BOOL __stdcall SymFromAddr_fn( _In_ HANDLE hProcess, _In_ DWORD64 Address, _Out_opt_ PDWORD64 Displacement, _Inout_ PSYMBOL_INFO Symbol); # if defined(_WIN64) typedef BOOL __stdcall SymGetLineFromAddr_fn( _In_ HANDLE hProcess, _In_ DWORD64 qwAddr, _Out_ PDWORD pdwDisplacement, _Out_ PIMAGEHLP_LINE64 Line64); # define SymGetLineFromAddrName "SymGetLineFromAddr64" # else typedef BOOL __stdcall SymGetLineFromAddr_fn( _In_ HANDLE hProcess, _In_ DWORD dwAddr, _Out_ PDWORD pdwDisplacement, _Out_ PIMAGEHLP_LINE Line); # define SymGetLineFromAddrName "SymGetLineFromAddr" # endif static SymInitialize_fn *s_SymInitialize = NULL; static SymSetOptions_fn *s_SymSetOptions = NULL; static SymFromAddr_fn *s_SymFromAddr = NULL; static SymGetLineFromAddr_fn *s_SymGetLineFromAddr = NULL; static aws_thread_once s_init_once = AWS_THREAD_ONCE_STATIC_INIT; static void s_init_dbghelp_impl(void *user_data) { (void)user_data; HMODULE dbghelp = LoadLibraryA("DbgHelp.dll"); if (!dbghelp) { fprintf(stderr, "Failed to load DbgHelp.dll.\n"); goto done; } s_SymInitialize = (SymInitialize_fn *)GetProcAddress(dbghelp, "SymInitialize"); if (!s_SymInitialize) { fprintf(stderr, "Failed to load SymInitialize from DbgHelp.dll.\n"); goto done; } s_SymSetOptions = (SymSetOptions_fn *)GetProcAddress(dbghelp, "SymSetOptions"); if (!s_SymSetOptions) { fprintf(stderr, "Failed to load SymSetOptions from DbgHelp.dll\n"); goto done; } s_SymFromAddr = (SymFromAddr_fn *)GetProcAddress(dbghelp, "SymFromAddr"); if (!s_SymFromAddr) { fprintf(stderr, "Failed to load SymFromAddr from DbgHelp.dll.\n"); goto done; } s_SymGetLineFromAddr = (SymGetLineFromAddr_fn *)GetProcAddress(dbghelp, SymGetLineFromAddrName); if (!s_SymGetLineFromAddr) { fprintf(stderr, "Failed to load " SymGetLineFromAddrName " from DbgHelp.dll.\n"); goto done; } HANDLE process = GetCurrentProcess(); AWS_FATAL_ASSERT(process); s_SymInitialize(process, NULL, TRUE); s_SymSetOptions(SYMOPT_DEFERRED_LOADS | SYMOPT_LOAD_ANYTHING | SYMOPT_LOAD_LINES); return; done: if (dbghelp) { FreeLibrary(dbghelp); } return; } static bool s_init_dbghelp() { if (AWS_LIKELY(s_SymInitialize)) { return true; } aws_thread_call_once(&s_init_once, s_init_dbghelp_impl, NULL); return s_SymInitialize != NULL; } size_t aws_backtrace(void **stack_frames, size_t num_frames) { return (int)CaptureStackBackTrace(0, (ULONG)num_frames, stack_frames, NULL); } char **aws_backtrace_symbols(void *const *stack_frames, size_t num_frames) { if (!s_init_dbghelp()) { return NULL; } struct aws_byte_buf symbols; aws_byte_buf_init(&symbols, aws_default_allocator(), num_frames * 256); /* pointers for each stack entry */ memset(symbols.buffer, 0, num_frames * sizeof(void *)); symbols.len += num_frames * sizeof(void *); DWORD64 displacement = 0; DWORD disp = 0; struct aws_byte_cursor null_term = aws_byte_cursor_from_array("", 1); HANDLE process = GetCurrentProcess(); AWS_FATAL_ASSERT(process); for (size_t i = 0; i < num_frames; ++i) { /* record a pointer to where the symbol will be */ *((char **)&symbols.buffer[i * sizeof(void *)]) = (char *)symbols.buffer + symbols.len; uintptr_t address = (uintptr_t)stack_frames[i]; struct win_symbol_data sym_info; AWS_ZERO_STRUCT(sym_info); sym_info.sym_info.MaxNameLen = sizeof(sym_info.symbol_name); sym_info.sym_info.SizeOfStruct = sizeof(struct _SYMBOL_INFO); char sym_buf[1024]; /* scratch space for extracting info */ if (s_SymFromAddr(process, address, &displacement, &sym_info.sym_info)) { /* record the address and name */ int len = snprintf( sym_buf, AWS_ARRAY_SIZE(sym_buf), "at 0x%llX: %s", sym_info.sym_info.Address, sym_info.sym_info.Name); if (len != -1) { struct aws_byte_cursor symbol = aws_byte_cursor_from_array(sym_buf, len); aws_byte_buf_append_dynamic(&symbols, &symbol); } IMAGEHLP_LINE line; line.SizeOfStruct = sizeof(IMAGEHLP_LINE); if (s_SymGetLineFromAddr(process, address, &disp, &line)) { /* record file/line info */ len = snprintf(sym_buf, AWS_ARRAY_SIZE(sym_buf), "(%s:%lu)", line.FileName, line.LineNumber); if (len != -1) { struct aws_byte_cursor symbol = aws_byte_cursor_from_array(sym_buf, len); aws_byte_buf_append_dynamic(&symbols, &symbol); } } } else { /* no luck, record the address and last error */ DWORD last_error = GetLastError(); int len = snprintf( sym_buf, AWS_ARRAY_SIZE(sym_buf), "at 0x%p: Failed to lookup symbol: error %u", stack_frames[i], last_error); if (len > 0) { struct aws_byte_cursor sym_cur = aws_byte_cursor_from_array(sym_buf, len); aws_byte_buf_append_dynamic(&symbols, &sym_cur); } } /* Null terminator */ aws_byte_buf_append_dynamic(&symbols, &null_term); } return (char **)symbols.buffer; /* buffer must be freed by the caller */ } char **aws_backtrace_addr2line(void *const *stack_frames, size_t stack_depth) { return aws_backtrace_symbols(stack_frames, stack_depth); } void aws_backtrace_print(FILE *fp, void *call_site_data) { struct _EXCEPTION_POINTERS *exception_pointers = call_site_data; if (exception_pointers) { fprintf(fp, "** Exception 0x%x occured **\n", exception_pointers->ExceptionRecord->ExceptionCode); } if (!s_init_dbghelp()) { fprintf(fp, "Unable to initialize dbghelp.dll"); return; } void *stack[1024]; size_t num_frames = aws_backtrace(stack, 1024); char **symbols = aws_backtrace_symbols(stack, num_frames); for (size_t line = 0; line < num_frames; ++line) { const char *symbol = symbols[line]; fprintf(fp, "%s\n", symbol); } fflush(fp); aws_mem_release(aws_default_allocator(), symbols); } void aws_backtrace_log(int log_level) { if (!s_init_dbghelp()) { AWS_LOGF_ERROR(AWS_LS_COMMON_GENERAL, "Unable to initialize dbghelp.dll for backtrace"); return; } void *stack[1024]; size_t num_frames = aws_backtrace(stack, 1024); char **symbols = aws_backtrace_symbols(stack, num_frames); for (size_t line = 0; line < num_frames; ++line) { const char *symbol = symbols[line]; AWS_LOGF(log_level, AWS_LS_COMMON_GENERAL, "%s", symbol); } aws_mem_release(aws_default_allocator(), symbols); } #else /* !AWS_OS_WINDOWS_DESKTOP */ size_t aws_backtrace(void **stack_frames, size_t num_frames) { (void)stack_frames; (void)num_frames; return 0; } char **aws_backtrace_symbols(void *const *stack_frames, size_t stack_depth) { (void)stack_frames; (void)stack_depth; return NULL; } char **aws_backtrace_addr2line(void *const *stack_frames, size_t stack_depth) { return aws_backtrace_symbols(stack_frames, stack_depth); } void aws_backtrace_print(FILE *fp, void *call_site_data) { (void)fp; (void)call_site_data; AWS_LOGF_TRACE( AWS_LS_COMMON_GENERAL, "aws_backtrace_print: backtrace requested, but logging is unsupported on this platform"); } void aws_backtrace_log() { AWS_LOGF_TRACE( AWS_LS_COMMON_GENERAL, "aws_backtrace_log: backtrace requested, but logging is unsupported on this platform"); } #endif /* AWS_OS_WINDOWS_DESKTOP */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/windows/system_resource_utils.c000066400000000000000000000013711456575232400303330ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include int aws_init_memory_usage_for_current_process(struct aws_memory_usage_stats *memory_usage) { AWS_PRECONDITION(memory_usage); AWS_ZERO_STRUCT(*memory_usage); HANDLE hProcess = GetCurrentProcess(); PROCESS_MEMORY_COUNTERS pmc; BOOL ret = GetProcessMemoryInfo(hProcess, &pmc, sizeof(pmc)); CloseHandle(hProcess); if (!ret) { return aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); } memory_usage->maxrss = pmc.PeakWorkingSetSize; memory_usage->page_faults = pmc.PageFaultCount; return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/windows/thread.c000066400000000000000000000407641456575232400251400ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include /* Convert a string from a macro to a wide string */ #define WIDEN2(s) L## #s #define WIDEN(s) WIDEN2(s) static struct aws_thread_options s_default_options = { /* zero will make sure whatever the default for that version of windows is used. */ .stack_size = 0, .join_strategy = AWS_TJS_MANUAL, }; struct thread_atexit_callback { aws_thread_atexit_fn *callback; void *user_data; struct thread_atexit_callback *next; }; struct thread_wrapper { struct aws_allocator *allocator; struct aws_linked_list_node node; void (*func)(void *arg); void *arg; struct thread_atexit_callback *atexit; /* * The managed thread system does lazy joins on threads once finished via their wrapper. For that to work * we need something to join against, so we keep a by-value copy of the original thread here. The tricky part * is how to set the threadid/handle of this copy since the copy must be injected into the thread function before * the threadid/handle is known. We get around that by just querying it at the top of the wrapper thread function. */ struct aws_thread thread_copy; }; static AWS_THREAD_LOCAL struct thread_wrapper *tl_wrapper = NULL; /* * thread_wrapper is platform-dependent so this function ends up being duplicated in each thread implementation */ void aws_thread_join_and_free_wrapper_list(struct aws_linked_list *wrapper_list) { struct aws_linked_list_node *iter = aws_linked_list_begin(wrapper_list); while (iter != aws_linked_list_end(wrapper_list)) { struct thread_wrapper *join_thread_wrapper = AWS_CONTAINER_OF(iter, struct thread_wrapper, node); iter = aws_linked_list_next(iter); join_thread_wrapper->thread_copy.detach_state = AWS_THREAD_JOINABLE; aws_thread_join(&join_thread_wrapper->thread_copy); aws_thread_clean_up(&join_thread_wrapper->thread_copy); aws_mem_release(join_thread_wrapper->allocator, join_thread_wrapper); aws_thread_decrement_unjoined_count(); } } static DWORD WINAPI thread_wrapper_fn(LPVOID arg) { struct thread_wrapper *wrapper_ptr = arg; /* * Make sure the aws_thread copy has the right handle stored in it. * We can't just call GetCurrentThread since that returns a fake handle that always maps to the local thread which * isn't what we want. */ DWORD current_thread_id = GetCurrentThreadId(); wrapper_ptr->thread_copy.thread_handle = OpenThread(THREAD_ALL_ACCESS, FALSE, current_thread_id); struct thread_wrapper thread_wrapper = *wrapper_ptr; struct aws_allocator *allocator = thread_wrapper.allocator; tl_wrapper = &thread_wrapper; thread_wrapper.func(thread_wrapper.arg); /* * Managed threads don't free the wrapper yet. The thread management system does it later after the thread * is joined. */ bool is_managed_thread = wrapper_ptr->thread_copy.detach_state == AWS_THREAD_MANAGED; if (!is_managed_thread) { aws_mem_release(allocator, arg); } struct thread_atexit_callback *exit_callback_data = thread_wrapper.atexit; while (exit_callback_data) { aws_thread_atexit_fn *exit_callback = exit_callback_data->callback; void *exit_callback_user_data = exit_callback_data->user_data; struct thread_atexit_callback *next_exit_callback_data = exit_callback_data->next; aws_mem_release(allocator, exit_callback_data); exit_callback(exit_callback_user_data); exit_callback_data = next_exit_callback_data; } tl_wrapper = NULL; /* * Release this thread to the managed thread system for lazy join. */ if (is_managed_thread) { aws_thread_pending_join_add(&wrapper_ptr->node); } return 0; } const struct aws_thread_options *aws_default_thread_options(void) { return &s_default_options; } struct callback_fn_wrapper { void (*call_once)(void *); void *user_data; }; BOOL WINAPI s_init_once_wrapper(PINIT_ONCE init_once, void *param, void **context) { (void)context; (void)init_once; struct callback_fn_wrapper *callback_fn_wrapper = param; callback_fn_wrapper->call_once(callback_fn_wrapper->user_data); return TRUE; } void aws_thread_call_once(aws_thread_once *flag, void (*call_once)(void *), void *user_data) { struct callback_fn_wrapper wrapper; wrapper.call_once = call_once; wrapper.user_data = user_data; InitOnceExecuteOnce((PINIT_ONCE)flag, s_init_once_wrapper, &wrapper, NULL); } int aws_thread_init(struct aws_thread *thread, struct aws_allocator *allocator) { thread->thread_handle = 0; thread->thread_id = 0; thread->allocator = allocator; thread->detach_state = AWS_THREAD_NOT_CREATED; return AWS_OP_SUCCESS; } /* Check for functions that don't exist on ancient windows */ static aws_thread_once s_check_functions_once = INIT_ONCE_STATIC_INIT; #if defined(AWS_OS_WINDOWS_DESKTOP) static aws_thread_once s_check_active_processor_functions_once = INIT_ONCE_STATIC_INIT; typedef DWORD WINAPI GetActiveProcessorCount_fn(WORD); static GetActiveProcessorCount_fn *s_GetActiveProcessorCount; typedef WORD WINAPI GetActiveProcessorGroupCount_fn(void); static GetActiveProcessorGroupCount_fn *s_GetActiveProcessorGroupCount; static void s_check_active_processor_functions(void *user_data) { (void)user_data; s_GetActiveProcessorGroupCount = (GetActiveProcessorGroupCount_fn *)GetProcAddress( GetModuleHandleW(WIDEN(WINDOWS_KERNEL_LIB) L".dll"), "GetActiveProcessorGroupCount"); s_GetActiveProcessorCount = (GetActiveProcessorCount_fn *)GetProcAddress( GetModuleHandleW(WIDEN(WINDOWS_KERNEL_LIB) L".dll"), "GetActiveProcessorCount"); } #endif /* windows is weird because apparently no one ever considered computers having more than 64 processors. Instead they have processor groups per process. We need to find the mask in the correct group. */ static void s_get_group_and_cpu_id(uint32_t desired_cpu, uint16_t *group, uint8_t *proc_num) { (void)desired_cpu; *group = 0; *proc_num = 0; #if defined(AWS_OS_WINDOWS_DESKTOP) /* Check for functions that don't exist on ancient Windows */ aws_thread_call_once(&s_check_active_processor_functions_once, s_check_active_processor_functions, NULL); if (!s_GetActiveProcessorCount || !s_GetActiveProcessorGroupCount) { return; } unsigned group_count = s_GetActiveProcessorGroupCount(); unsigned total_processors_detected = 0; uint8_t group_with_desired_processor = 0; uint8_t group_mask_for_desired_processor = 0; /* for each group, keep counting til we find the group and the processor mask */ for (uint8_t i = 0; i < group_count; ++group_count) { DWORD processor_count_in_group = s_GetActiveProcessorCount((WORD)i); if (total_processors_detected + processor_count_in_group > desired_cpu) { group_with_desired_processor = i; group_mask_for_desired_processor = (uint8_t)(desired_cpu - total_processors_detected); break; } total_processors_detected += processor_count_in_group; } *proc_num = group_mask_for_desired_processor; *group = group_with_desired_processor; return; #endif /* non-desktop has no processor groups */ } typedef BOOL WINAPI SetThreadGroupAffinity_fn( HANDLE hThread, const GROUP_AFFINITY *GroupAffinity, PGROUP_AFFINITY PreviousGroupAffinity); static SetThreadGroupAffinity_fn *s_SetThreadGroupAffinity; typedef BOOL WINAPI SetThreadIdealProcessorEx_fn( HANDLE hThread, PPROCESSOR_NUMBER lpIdealProcessor, PPROCESSOR_NUMBER lpPreviousIdealProcessor); static SetThreadIdealProcessorEx_fn *s_SetThreadIdealProcessorEx; typedef HRESULT WINAPI SetThreadDescription_fn(HANDLE hThread, PCWSTR lpThreadDescription); static SetThreadDescription_fn *s_SetThreadDescription; typedef HRESULT WINAPI GetThreadDescription_fn(HANDLE hThread, PWSTR *lpThreadDescription); static GetThreadDescription_fn *s_GetThreadDescription; static void s_check_thread_functions(void *user_data) { (void)user_data; s_SetThreadGroupAffinity = (SetThreadGroupAffinity_fn *)GetProcAddress( GetModuleHandleW(WIDEN(WINDOWS_KERNEL_LIB) L".dll"), "SetThreadGroupAffinity"); s_SetThreadIdealProcessorEx = (SetThreadIdealProcessorEx_fn *)GetProcAddress( GetModuleHandleW(WIDEN(WINDOWS_KERNEL_LIB) L".dll"), "SetThreadIdealProcessorEx"); s_SetThreadDescription = (SetThreadDescription_fn *)GetProcAddress( GetModuleHandleW(WIDEN(WINDOWS_KERNEL_LIB) L".dll"), "SetThreadDescription"); s_GetThreadDescription = (GetThreadDescription_fn *)GetProcAddress( GetModuleHandleW(WIDEN(WINDOWS_KERNEL_LIB) L".dll"), "GetThreadDescription"); } int aws_thread_launch( struct aws_thread *thread, void (*func)(void *arg), void *arg, const struct aws_thread_options *options) { /* Check for functions that don't exist on ancient Windows */ aws_thread_call_once(&s_check_functions_once, s_check_thread_functions, NULL); SIZE_T stack_size = 0; if (options && options->stack_size > 0) { stack_size = (SIZE_T)options->stack_size; } bool is_managed_thread = options != NULL && options->join_strategy == AWS_TJS_MANAGED; if (is_managed_thread) { thread->detach_state = AWS_THREAD_MANAGED; } struct thread_wrapper *thread_wrapper = (struct thread_wrapper *)aws_mem_calloc(thread->allocator, 1, sizeof(struct thread_wrapper)); thread_wrapper->allocator = thread->allocator; thread_wrapper->arg = arg; thread_wrapper->func = func; thread_wrapper->thread_copy = *thread; /* * Increment the count prior to spawning the thread. Decrement back if the create failed. */ if (is_managed_thread) { aws_thread_increment_unjoined_count(); } thread->thread_handle = CreateThread(0, stack_size, thread_wrapper_fn, (LPVOID)thread_wrapper, 0, &thread->thread_id); if (!thread->thread_handle) { aws_thread_decrement_unjoined_count(); return aws_raise_error(AWS_ERROR_THREAD_INSUFFICIENT_RESOURCE); } if (options && (options->name.len > 0) && s_SetThreadDescription) { /* Don't particularly care if this fails, it's just for debugging */ struct aws_wstring *name = aws_string_convert_to_wchar_from_byte_cursor(thread->allocator, &options->name); if (name) { s_SetThreadDescription(thread->thread_handle, aws_wstring_c_str(name)); aws_wstring_destroy(name); } } if (options && options->cpu_id >= 0) { AWS_LOGF_INFO( AWS_LS_COMMON_THREAD, "id=%p: cpu affinity of cpu_id %" PRIi32 " was specified, attempting to honor the value.", (void *)thread, options->cpu_id); uint16_t group = 0; uint8_t proc_num = 0; s_get_group_and_cpu_id(options->cpu_id, &group, &proc_num); GROUP_AFFINITY group_afinity; AWS_ZERO_STRUCT(group_afinity); group_afinity.Group = (WORD)group; group_afinity.Mask = (KAFFINITY)((uint64_t)1 << proc_num); AWS_LOGF_DEBUG( AWS_LS_COMMON_THREAD, "id=%p: computed mask %" PRIx64 " on group %" PRIu16 ".", (void *)thread, (uint64_t)group_afinity.Mask, (uint16_t)group_afinity.Group); if (!s_SetThreadGroupAffinity || !s_SetThreadIdealProcessorEx) { goto no_thread_affinity; } BOOL set_group_val = s_SetThreadGroupAffinity(thread->thread_handle, &group_afinity, NULL); AWS_LOGF_DEBUG( AWS_LS_COMMON_THREAD, "id=%p: SetThreadGroupAffinity() result %" PRIi8 ".", (void *)thread, (int8_t)set_group_val); if (set_group_val) { PROCESSOR_NUMBER processor_number; AWS_ZERO_STRUCT(processor_number); processor_number.Group = (WORD)group; processor_number.Number = proc_num; BOOL set_processor_val = s_SetThreadIdealProcessorEx(thread->thread_handle, &processor_number, NULL); AWS_LOGF_DEBUG( AWS_LS_COMMON_THREAD, "id=%p: SetThreadIdealProcessorEx() result %" PRIi8 ".", (void *)thread, (int8_t)set_processor_val); if (!set_processor_val) { AWS_LOGF_WARN( AWS_LS_COMMON_THREAD, "id=%p: SetThreadIdealProcessorEx() failed with %" PRIx32 ".", (void *)thread, (uint32_t)GetLastError()); } } else { AWS_LOGF_WARN( AWS_LS_COMMON_THREAD, "id=%p: SetThreadGroupAffinity() failed with %" PRIx32 ".", (void *)thread, (uint32_t)GetLastError()); } } no_thread_affinity: /* * Managed threads need to stay unjoinable from an external perspective. We'll handle it after thread function * completion. */ if (is_managed_thread) { aws_thread_clean_up(thread); } else { thread->detach_state = AWS_THREAD_JOINABLE; } return AWS_OP_SUCCESS; } aws_thread_id_t aws_thread_get_id(struct aws_thread *thread) { return thread->thread_id; } enum aws_thread_detach_state aws_thread_get_detach_state(struct aws_thread *thread) { return thread->detach_state; } int aws_thread_join(struct aws_thread *thread) { if (thread->detach_state == AWS_THREAD_JOINABLE) { WaitForSingleObject(thread->thread_handle, INFINITE); thread->detach_state = AWS_THREAD_JOIN_COMPLETED; } return AWS_OP_SUCCESS; } void aws_thread_clean_up(struct aws_thread *thread) { CloseHandle(thread->thread_handle); thread->thread_handle = 0; } aws_thread_id_t aws_thread_current_thread_id(void) { return GetCurrentThreadId(); } bool aws_thread_thread_id_equal(aws_thread_id_t t1, aws_thread_id_t t2) { return t1 == t2; } void aws_thread_current_sleep(uint64_t nanos) { /* We don't really have a better option here for windows that isn't super * complex AND we don't have a use case yet where we should have sleeps * anywhere other than for context switches and testing. When that time * arises put the effort in here. */ Sleep((DWORD)aws_timestamp_convert(nanos, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_MILLIS, NULL)); } int aws_thread_current_at_exit(aws_thread_atexit_fn *callback, void *user_data) { if (!tl_wrapper) { return aws_raise_error(AWS_ERROR_THREAD_NOT_JOINABLE); } struct thread_atexit_callback *cb = aws_mem_calloc(tl_wrapper->allocator, 1, sizeof(struct thread_atexit_callback)); if (!cb) { return AWS_OP_ERR; } cb->callback = callback; cb->user_data = user_data; cb->next = tl_wrapper->atexit; tl_wrapper->atexit = cb; return AWS_OP_SUCCESS; } int aws_thread_current_name(struct aws_allocator *allocator, struct aws_string **out_name) { if (s_GetThreadDescription) { PWSTR wname = NULL; if (SUCCEEDED(s_GetThreadDescription(GetCurrentThread(), &wname))) { *out_name = aws_string_convert_from_wchar_c_str(allocator, wname); LocalFree(wname); return AWS_OP_SUCCESS; } return aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); } return aws_raise_error(AWS_ERROR_PLATFORM_NOT_SUPPORTED); } int aws_thread_name(struct aws_allocator *allocator, aws_thread_id_t thread_id, struct aws_string **out_name) { if (s_GetThreadDescription) { HANDLE thread_handle = OpenThread(THREAD_QUERY_LIMITED_INFORMATION, FALSE, thread_id); if (thread_handle == NULL) { AWS_LOGF_WARN( AWS_LS_COMMON_THREAD, "thread_id=%lu: OpenThread() failed with %" PRIx32 ".", thread_id, (uint32_t)GetLastError()); return aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); } PWSTR wname = NULL; if (SUCCEEDED(s_GetThreadDescription(thread_handle, &wname))) { *out_name = aws_string_convert_from_wchar_c_str(allocator, wname); LocalFree(wname); CloseHandle(thread_handle); return AWS_OP_SUCCESS; } CloseHandle(thread_handle); return aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); } return aws_raise_error(AWS_ERROR_PLATFORM_NOT_SUPPORTED); } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/windows/time.c000066400000000000000000000005551456575232400246210ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include time_t aws_timegm(struct tm *const t) { return _mkgmtime(t); } void aws_localtime(time_t time, struct tm *t) { localtime_s(t, &time); } void aws_gmtime(time_t time, struct tm *t) { gmtime_s(t, &time); } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/source/xml_parser.c000066400000000000000000000335711456575232400243510ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #ifdef _MSC_VER /* allow non-constant declared initializers. */ # pragma warning(disable : 4204) #endif static const size_t s_max_document_depth = 20; #define MAX_NAME_LEN ((size_t)256) #define NODE_CLOSE_OVERHEAD ((size_t)3) struct cb_stack_data { aws_xml_parser_on_node_encountered_fn *cb; void *user_data; }; int s_node_next_sibling(struct aws_xml_parser *parser); static bool s_double_quote_fn(uint8_t value) { return value == '"'; } /* load the node declaration line, parsing node name and attributes. * * something of the form: * * */ static int s_load_node_decl( struct aws_xml_parser *parser, struct aws_byte_cursor *decl_body, struct aws_xml_node *node) { AWS_PRECONDITION(parser); AWS_PRECONDITION(decl_body); AWS_PRECONDITION(node); struct aws_array_list splits; AWS_ZERO_STRUCT(splits); AWS_ZERO_ARRAY(parser->split_scratch); aws_array_list_init_static( &splits, parser->split_scratch, AWS_ARRAY_SIZE(parser->split_scratch), sizeof(struct aws_byte_cursor)); /* split by space, first split will be the node name, everything after will be attribute=value pairs. For now * we limit to 10 attributes, if this is exceeded we consider it invalid document. */ if (aws_byte_cursor_split_on_char(decl_body, ' ', &splits)) { AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document is invalid."); return aws_raise_error(AWS_ERROR_INVALID_XML); } size_t splits_count = aws_array_list_length(&splits); if (splits_count < 1) { AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document is invalid."); return aws_raise_error(AWS_ERROR_INVALID_XML); } aws_array_list_get_at(&splits, &node->name, 0); AWS_ZERO_ARRAY(parser->attributes); if (splits.length > 1) { aws_array_list_init_static( &node->attributes, parser->attributes, AWS_ARRAY_SIZE(parser->attributes), sizeof(struct aws_xml_attribute)); for (size_t i = 1; i < splits.length; ++i) { struct aws_byte_cursor attribute_pair; AWS_ZERO_STRUCT(attribute_pair); aws_array_list_get_at(&splits, &attribute_pair, i); struct aws_byte_cursor att_val_pair[2]; AWS_ZERO_ARRAY(att_val_pair); struct aws_array_list att_val_pair_lst; AWS_ZERO_STRUCT(att_val_pair_lst); aws_array_list_init_static(&att_val_pair_lst, att_val_pair, 2, sizeof(struct aws_byte_cursor)); if (!aws_byte_cursor_split_on_char(&attribute_pair, '=', &att_val_pair_lst)) { struct aws_xml_attribute attribute = { .name = att_val_pair[0], .value = aws_byte_cursor_trim_pred(&att_val_pair[1], s_double_quote_fn), }; aws_array_list_push_back(&node->attributes, &attribute); } } } return AWS_OP_SUCCESS; } int aws_xml_parse(struct aws_allocator *allocator, const struct aws_xml_parser_options *options) { AWS_PRECONDITION(allocator); AWS_PRECONDITION(options); AWS_PRECONDITION(options->on_root_encountered); struct aws_xml_parser parser = { .allocator = allocator, .doc = options->doc, .max_depth = options->max_depth ? options->max_depth : s_max_document_depth, .error = AWS_OP_SUCCESS, }; aws_array_list_init_dynamic(&parser.callback_stack, allocator, 4, sizeof(struct cb_stack_data)); /* burn everything that precedes the actual xml nodes. */ while (parser.doc.len) { const uint8_t *start = memchr(parser.doc.ptr, '<', parser.doc.len); if (!start) { AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document is invalid."); parser.error = aws_raise_error(AWS_ERROR_INVALID_XML); goto clean_up; } const uint8_t *location = memchr(parser.doc.ptr, '>', parser.doc.len); if (!location) { AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document is invalid."); parser.error = aws_raise_error(AWS_ERROR_INVALID_XML); goto clean_up; } aws_byte_cursor_advance(&parser.doc, start - parser.doc.ptr); /* if these are preamble statements, burn them. otherwise don't seek at all * and assume it's just the doc with no preamble statements. */ if (*(parser.doc.ptr + 1) == '?' || *(parser.doc.ptr + 1) == '!') { /* nobody cares about the preamble */ size_t advance = location - parser.doc.ptr + 1; aws_byte_cursor_advance(&parser.doc, advance); } else { break; } } /* now we should be at the start of the actual document. */ struct cb_stack_data stack_data = { .cb = options->on_root_encountered, .user_data = options->user_data, }; aws_array_list_push_back(&parser.callback_stack, &stack_data); parser.error = s_node_next_sibling(&parser); clean_up: aws_array_list_clean_up(&parser.callback_stack); return parser.error; } int s_advance_to_closing_tag( struct aws_xml_parser *parser, struct aws_xml_node *node, struct aws_byte_cursor *out_body) { AWS_PRECONDITION(parser); AWS_PRECONDITION(node); /* currently the max node name is 256 characters. This is arbitrary, but should be enough * for our uses. If we ever generalize this, we'll have to come back and rethink this. */ uint8_t name_close[MAX_NAME_LEN + NODE_CLOSE_OVERHEAD] = {0}; uint8_t name_open[MAX_NAME_LEN + NODE_CLOSE_OVERHEAD] = {0}; struct aws_byte_buf closing_cmp_buf = aws_byte_buf_from_empty_array(name_close, sizeof(name_close)); struct aws_byte_buf open_cmp_buf = aws_byte_buf_from_empty_array(name_open, sizeof(name_open)); size_t closing_name_len = node->name.len + NODE_CLOSE_OVERHEAD; if (closing_name_len > node->doc_at_body.len) { AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document is invalid."); parser->error = aws_raise_error(AWS_ERROR_INVALID_XML); return AWS_OP_ERR; } if (sizeof(name_close) < closing_name_len) { AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document is invalid."); parser->error = aws_raise_error(AWS_ERROR_INVALID_XML); return AWS_OP_ERR; } struct aws_byte_cursor open_bracket = aws_byte_cursor_from_c_str("<"); struct aws_byte_cursor close_token = aws_byte_cursor_from_c_str("/"); struct aws_byte_cursor close_bracket = aws_byte_cursor_from_c_str(">"); aws_byte_buf_append(&open_cmp_buf, &open_bracket); aws_byte_buf_append(&open_cmp_buf, &node->name); aws_byte_buf_append(&closing_cmp_buf, &open_bracket); aws_byte_buf_append(&closing_cmp_buf, &close_token); aws_byte_buf_append(&closing_cmp_buf, &node->name); aws_byte_buf_append(&closing_cmp_buf, &close_bracket); size_t depth_count = 1; struct aws_byte_cursor to_find_open = aws_byte_cursor_from_buf(&open_cmp_buf); struct aws_byte_cursor to_find_close = aws_byte_cursor_from_buf(&closing_cmp_buf); struct aws_byte_cursor close_find_result; AWS_ZERO_STRUCT(close_find_result); do { if (aws_byte_cursor_find_exact(&parser->doc, &to_find_close, &close_find_result)) { AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document is invalid."); return aws_raise_error(AWS_ERROR_INVALID_XML); } /* if we find an opening node with the same name, before the closing tag keep going. */ struct aws_byte_cursor open_find_result; AWS_ZERO_STRUCT(open_find_result); while (parser->doc.len) { if (!aws_byte_cursor_find_exact(&parser->doc, &to_find_open, &open_find_result)) { if (open_find_result.ptr < close_find_result.ptr) { size_t skip_len = open_find_result.ptr - parser->doc.ptr; aws_byte_cursor_advance(&parser->doc, skip_len + 1); depth_count++; continue; } } size_t skip_len = close_find_result.ptr - parser->doc.ptr; aws_byte_cursor_advance(&parser->doc, skip_len + closing_cmp_buf.len); depth_count--; break; } } while (depth_count > 0); size_t len = close_find_result.ptr - node->doc_at_body.ptr; if (out_body) { *out_body = aws_byte_cursor_from_array(node->doc_at_body.ptr, len); } return parser->error; } int aws_xml_node_as_body(struct aws_xml_node *node, struct aws_byte_cursor *out_body) { AWS_PRECONDITION(node); AWS_FATAL_ASSERT(!node->processed && "XML node can be traversed, or read as body, but not both."); node->processed = true; return s_advance_to_closing_tag(node->parser, node, out_body); } int aws_xml_node_traverse( struct aws_xml_node *node, aws_xml_parser_on_node_encountered_fn *on_node_encountered, void *user_data) { AWS_PRECONDITION(node); AWS_PRECONDITION(on_node_encountered); struct aws_xml_parser *parser = node->parser; AWS_FATAL_ASSERT(!node->processed && "XML node can be traversed, or read as body, but not both."); node->processed = true; struct cb_stack_data stack_data = { .cb = on_node_encountered, .user_data = user_data, }; size_t doc_depth = aws_array_list_length(&parser->callback_stack); if (doc_depth >= parser->max_depth) { AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document exceeds max depth."); aws_raise_error(AWS_ERROR_INVALID_XML); goto error; } aws_array_list_push_back(&parser->callback_stack, &stack_data); /* look for the next node at the current level. do this until we encounter the parent node's * closing tag. */ while (!parser->error) { const uint8_t *next_location = memchr(parser->doc.ptr, '<', parser->doc.len); if (!next_location) { AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document is invalid."); aws_raise_error(AWS_ERROR_INVALID_XML); goto error; } const uint8_t *end_location = memchr(parser->doc.ptr, '>', parser->doc.len); if (!end_location) { AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document is invalid."); aws_raise_error(AWS_ERROR_INVALID_XML); goto error; } bool parent_closed = false; if (*(next_location + 1) == '/') { parent_closed = true; } size_t node_name_len = end_location - next_location; aws_byte_cursor_advance(&parser->doc, end_location - parser->doc.ptr + 1); if (parent_closed) { break; } struct aws_byte_cursor decl_body = aws_byte_cursor_from_array(next_location + 1, node_name_len - 1); struct aws_xml_node next_node = { .parser = parser, .doc_at_body = parser->doc, .processed = false, }; if (s_load_node_decl(parser, &decl_body, &next_node)) { return AWS_OP_ERR; } if (on_node_encountered(&next_node, user_data)) { goto error; } /* if the user simply returned while skipping the node altogether, go ahead and do the skip over. */ if (!next_node.processed) { if (s_advance_to_closing_tag(parser, &next_node, NULL)) { goto error; } } } aws_array_list_pop_back(&parser->callback_stack); return parser->error; error: parser->error = AWS_OP_ERR; return parser->error; } struct aws_byte_cursor aws_xml_node_get_name(const struct aws_xml_node *node) { AWS_PRECONDITION(node); return node->name; } size_t aws_xml_node_get_num_attributes(const struct aws_xml_node *node) { AWS_PRECONDITION(node); return aws_array_list_length(&node->attributes); } struct aws_xml_attribute aws_xml_node_get_attribute(const struct aws_xml_node *node, size_t attribute_index) { AWS_PRECONDITION(node); struct aws_xml_attribute attribute; if (aws_array_list_get_at(&node->attributes, &attribute, attribute_index)) { AWS_FATAL_ASSERT(0 && "Invalid XML attribute index"); } return attribute; } /* advance the parser to the next sibling node.*/ int s_node_next_sibling(struct aws_xml_parser *parser) { AWS_PRECONDITION(parser); const uint8_t *next_location = memchr(parser->doc.ptr, '<', parser->doc.len); if (!next_location) { return parser->error; } aws_byte_cursor_advance(&parser->doc, next_location - parser->doc.ptr); const uint8_t *end_location = memchr(parser->doc.ptr, '>', parser->doc.len); if (!end_location) { AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document is invalid."); return aws_raise_error(AWS_ERROR_INVALID_XML); } size_t node_name_len = end_location - next_location; aws_byte_cursor_advance(&parser->doc, end_location - parser->doc.ptr + 1); struct aws_byte_cursor node_decl_body = aws_byte_cursor_from_array(next_location + 1, node_name_len - 1); struct aws_xml_node sibling_node = { .parser = parser, .doc_at_body = parser->doc, .processed = false, }; if (s_load_node_decl(parser, &node_decl_body, &sibling_node)) { return AWS_OP_ERR; } struct cb_stack_data stack_data; AWS_ZERO_STRUCT(stack_data); aws_array_list_back(&parser->callback_stack, &stack_data); AWS_FATAL_ASSERT(stack_data.cb); if (stack_data.cb(&sibling_node, stack_data.user_data)) { return AWS_OP_ERR; } /* if the user simply returned while skipping the node altogether, go ahead and do the skip over. */ if (!sibling_node.processed) { if (s_advance_to_closing_tag(parser, &sibling_node, NULL)) { return AWS_OP_ERR; } } return parser->error; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/000077500000000000000000000000001456575232400216625ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/CMakeLists.txt000066400000000000000000000531601456575232400244270ustar00rootroot00000000000000include(AwsLibFuzzer) include(AwsTestHarness) enable_testing() file(GLOB META_TEST_SRC "assert_test.c") file(GLOB TEST_BASE_SRC "*.c") file(GLOB TEST_LOGGING_SRC "logging/*.c") file(GLOB TEST_SRC ${TEST_BASE_SRC} ${TEST_LOGGING_SRC}) list(REMOVE_ITEM TEST_SRC ${META_TEST_SRC}) file(GLOB TEST_BASE_HDRS "*.h") file(GLOB TEST_LOGGING_HDRS "logging/*.h") file(GLOB TEST_HDRS ${TEST_BASE_HDRS} ${TEST_LOGGING_HDRS}) file(GLOB TESTS ${TEST_HDRS} ${TEST_SRC}) set(METATEST_BINARY_NAME ${PROJECT_NAME}-assert-tests) if(NOT LEGACY_COMPILER_SUPPORT) add_executable(${METATEST_BINARY_NAME} ${META_TEST_SRC}) aws_set_common_properties(${METATEST_BINARY_NAME} NO_WEXTRA NO_PEDANTIC) aws_add_sanitizers(${METATEST_BINARY_NAME} ${${PROJECT_NAME}_SANITIZERS}) target_link_libraries(${METATEST_BINARY_NAME} PRIVATE ${PROJECT_NAME}) target_compile_definitions(${METATEST_BINARY_NAME} PRIVATE AWS_UNSTABLE_TESTING_API=1) target_include_directories(${METATEST_BINARY_NAME} PRIVATE ${CMAKE_CURRENT_LIST_DIR}) if(MSVC) target_compile_definitions(${METATEST_BINARY_NAME} PRIVATE "-D_CRT_SECURE_NO_WARNINGS") endif() add_test(assert_test ${METATEST_BINARY_NAME} ${CMAKE_CURRENT_BINARY_DIR}/metatest.tmp) endif() add_test_case(raise_errors_test) add_test_case(reset_errors_test) add_test_case(error_callback_test) add_test_case(unknown_error_code_in_slot_test) add_test_case(unknown_error_code_no_slot_test) add_test_case(unknown_error_code_range_too_large_test) add_test_case(aws_load_error_strings_test) add_test_case(aws_assume_compiles_test) add_test_case(thread_creation_join_test) add_test_case(thread_atexit_test) add_test_case(test_managed_thread_join) add_test_case(test_managed_thread_join_timeout) add_test_case(mutex_aquire_release_test) add_test_case(mutex_is_actually_mutex_test) add_test_case(mutex_try_lock_is_correct_test) add_test_case(conditional_notify_one) add_test_case(conditional_notify_all) add_test_case(error_code_cross_thread_test) add_test_case(high_res_clock_increments_test) add_test_case(sys_clock_increments_test) add_test_case(test_sec_and_millis_conversions) add_test_case(test_sec_and_micros_conversions) add_test_case(test_sec_and_nanos_conversions) add_test_case(test_milli_and_micros_conversion) add_test_case(test_milli_and_nanos_conversion) add_test_case(test_micro_and_nanos_conversion) add_test_case(test_precision_loss_remainders_conversion) add_test_case(test_overflow_conversion) add_test_case(test_old_overflow_cases) add_test_case(array_list_zero_length) add_test_case(array_list_order_push_back_pop_front_test) add_test_case(array_list_order_push_back_pop_back_test) add_test_case(array_list_order_push_front_pop_front_test) add_test_case(array_list_pop_front_n_test) add_test_case(array_list_erase_test) add_test_case(array_list_exponential_mem_model_test) add_test_case(array_list_exponential_mem_model_iteration_test) add_test_case(array_list_set_at_overwrite_safety) add_test_case(array_list_iteration_by_ptr_test) add_test_case(array_list_iteration_test) add_test_case(array_list_preallocated_iteration_test) add_test_case(array_list_preallocated_push_test) add_test_case(array_list_shrink_to_fit_test) add_test_case(array_list_shrink_to_fit_static_test) add_test_case(array_list_clear_test) add_test_case(array_list_copy_test) add_test_case(array_list_swap_contents_test) add_test_case(array_list_not_enough_space_test) add_test_case(array_list_not_enough_space_test_failure) add_test_case(array_list_of_strings_sort) add_test_case(array_list_empty_sort) add_test_case(priority_queue_push_pop_order_test) add_test_case(priority_queue_random_values_test) add_test_case(priority_queue_size_and_capacity_test) add_test_case(priority_queue_remove_root_test) add_test_case(priority_queue_remove_leaf_test) add_test_case(priority_queue_remove_interior_sift_up_test) add_test_case(priority_queue_remove_interior_sift_down_test) add_test_case(priority_queue_clear_backpointers_test) add_test_case(linked_list_push_back_pop_front) add_test_case(linked_list_push_front_pop_back) add_test_case(linked_list_swap_nodes) add_test_case(linked_list_iteration) add_test_case(linked_list_reverse_iteration) add_test_case(linked_list_swap_contents) add_test_case(linked_list_move_all_back) add_test_case(linked_list_move_all_front) add_test_case(hex_encoding_test_case_empty_test) add_test_case(hex_encoding_test_case_f_test) add_test_case(hex_encoding_test_case_fo_test) add_test_case(hex_encoding_test_case_foo_test) add_test_case(hex_encoding_test_case_foob_test) add_test_case(hex_encoding_test_case_fooba_test) add_test_case(hex_encoding_test_case_foobar_test) add_test_case(hex_encoding_test_case_missing_leading_zero) add_test_case(hex_encoding_invalid_buffer_size_test) add_test_case(hex_encoding_highbyte_string_test) add_test_case(hex_encoding_overflow_test) add_test_case(hex_encoding_invalid_string_test) add_test_case(hex_encoding_append_dynamic_test_case_empty) add_test_case(hex_encoding_append_dynamic_test_case_fooba) add_test_case(base64_encoding_test_case_empty_test) add_test_case(base64_encoding_test_case_f_test) add_test_case(base64_encoding_test_case_fo_test) add_test_case(base64_encoding_test_case_foo_test) add_test_case(base64_encoding_test_case_foob_test) add_test_case(base64_encoding_test_case_fooba_test) add_test_case(base64_encoding_test_case_foobar_test) add_test_case(base64_encoding_test_case_32bytes_test) add_test_case(base64_encoding_buffer_size_too_small_test) add_test_case(base64_encoding_buffer_size_overflow_test) add_test_case(base64_encoding_buffer_size_invalid_test) add_test_case(base64_encoding_invalid_buffer_test) add_test_case(base64_encoding_highbyte_string_test) add_test_case(base64_encoding_invalid_padding_test) add_test_case(base64_encoding_test_zeros) add_test_case(base64_encoding_test_roundtrip) add_test_case(base64_encoding_test_all_values) add_test_case(uint64_buffer_test) add_test_case(uint64_buffer_non_aligned_test) add_test_case(uint32_buffer_test) add_test_case(uint32_buffer_non_aligned_test) add_test_case(uint24_buffer_test) add_test_case(uint24_buffer_non_aligned_test) add_test_case(uint16_buffer_test) add_test_case(uint16_buffer_non_aligned_test) add_test_case(uint16_buffer_signed_positive_test) add_test_case(uint16_buffer_signed_negative_test) add_test_case(text_encoding_utf8) add_test_case(text_encoding_utf16) add_test_case(text_encoding_ascii) add_test_case(text_encoding_is_utf8) add_test_case(text_is_valid_utf8) add_test_case(text_is_valid_utf8_callback) add_test_case(utf8_decoder) add_test_case(scheduler_cleanup_cancellation) add_test_case(scheduler_ordering_test) add_test_case(scheduler_pops_task_late_test) add_test_case(scheduler_has_tasks_test) add_test_case(scheduler_reentrant_safe) add_test_case(scheduler_cleanup_reentrants) add_test_case(scheduler_schedule_cancellation) add_test_case(scheduler_cleanup_idempotent) add_test_case(scheduler_task_delete_on_run) add_test_case(test_hash_table_create_find) add_test_case(test_hash_table_string_create_find) add_test_case(test_hash_table_put) add_test_case(test_hash_table_put_null_dtor) add_test_case(test_hash_table_swap_move) add_test_case(test_hash_table_string_clean_up) add_test_case(test_hash_table_hash_collision) add_test_case(test_hash_table_hash_overwrite) add_test_case(test_hash_table_hash_remove) add_test_case(test_hash_table_hash_clear_allows_cleanup) add_test_case(test_hash_table_on_resize_returns_correct_entry) add_test_case(test_hash_table_foreach) add_test_case(test_hash_table_iter) add_test_case(test_hash_table_empty_iter) add_test_case(test_hash_table_iter_detail) add_test_case(test_hash_table_eq) add_test_case(test_hash_churn) add_test_case(test_hash_table_cleanup_idempotent) add_test_case(test_hash_table_byte_cursor_create_find) add_test_case(test_hash_combine) add_test_case(test_linked_hash_table_preserves_insertion_order) add_test_case(test_linked_hash_table_entries_cleanup) add_test_case(test_linked_hash_table_entries_overwrite) add_test_case(test_linked_hash_table_entries_overwrite_reference_unequal) add_test_case(test_linked_hash_table_entries_overwrite_backed_cursor) add_test_case(test_lru_cache_overflow_static_members) add_test_case(test_lru_cache_lru_ness_static_members) add_test_case(test_lru_cache_element_access_members) add_test_case(test_fifo_cache_overflow_static_members) add_test_case(test_lifo_cache_overflow_static_members) add_test_case(test_cache_entries_cleanup) add_test_case(test_cache_entries_overwrite) add_test_case(test_is_power_of_two) add_test_case(test_round_up_to_power_of_two) add_test_case(test_mul_size_checked) add_test_case(test_mul_size_saturating) add_test_case(test_mul_u32_checked) add_test_case(test_mul_u32_saturating) add_test_case(test_mul_u64_checked) add_test_case(test_mul_u64_saturating) add_test_case(test_add_size_checked) add_test_case(test_aws_add_size_checked_varargs) add_test_case(test_add_size_saturating) add_test_case(test_add_u32_checked) add_test_case(test_add_u32_saturating) add_test_case(test_add_u64_checked) add_test_case(test_add_u64_saturating) add_test_case(test_min_max) add_test_case(test_clz) add_test_case(test_ctz) add_test_case(nospec_index_test) add_test_case(test_byte_cursor_advance) add_test_case(test_byte_cursor_advance_nospec) add_test_case(byte_cursor_write_tests) add_test_case(byte_cursor_read_tests) add_test_case(byte_cursor_limit_tests) add_test_case(test_byte_cursor_read_hex_u8) add_test_case(test_byte_cursor_right_trim_empty) add_test_case(test_byte_cursor_right_trim_all_whitespace) add_test_case(test_byte_cursor_right_trim_basic) add_test_case(test_byte_cursor_left_trim_empty) add_test_case(test_byte_cursor_left_trim_all_whitespace) add_test_case(test_byte_cursor_left_trim_basic) add_test_case(test_byte_cursor_trim_basic) add_test_case(string_tests) add_test_case(binary_string_test) add_test_case(string_compare_test) add_test_case(string_destroy_secure_test) add_test_case(secure_strlen_test) add_test_case(test_char_split_happy_path) add_test_case(test_char_split_ends_with_token) add_test_case(test_char_split_token_not_present) add_test_case(test_char_split_empty) add_test_case(test_char_split_zeroed) add_test_case(test_char_split_adj_tokens) add_test_case(test_char_split_begins_with_token) add_test_case(test_char_split_with_max_splits) add_test_case(test_char_split_output_too_small) add_test_case(test_byte_cursor_next_split) add_test_case(test_buffer_cat) add_test_case(test_buffer_cat_dest_too_small) add_test_case(test_buffer_cpy) add_test_case(test_buffer_cpy_dest_too_small) add_test_case(test_buffer_cpy_offsets) add_test_case(test_buffer_cpy_offsets_dest_too_small) add_test_case(test_buffer_eq) add_test_case(test_buffer_eq_same_content_different_len) add_test_case(test_buffer_eq_null_internal_byte_buffer) add_test_case(test_buffer_init_copy) add_test_case(test_buffer_init_copy_null_buffer) add_test_case(test_buffer_advance) add_test_case(test_buffer_printf) add_test_case(test_array_eq) add_test_case(test_array_eq_ignore_case) add_test_case(test_array_eq_c_str) add_test_case(test_array_eq_c_str_ignore_case) add_test_case(test_array_hash_ignore_case) add_test_case(test_byte_buf_write_to_capacity) add_test_case(test_byte_buf_init_cache_and_update_cursors) add_test_case(test_byte_buf_empty_appends) add_test_case(test_byte_buf_append_and_update_fail) add_test_case(test_byte_buf_append_and_update_success) add_test_case(test_byte_buf_append_dynamic) add_test_case(test_byte_buf_append_byte) add_test_case(test_byte_buf_append_lookup_success) add_test_case(test_byte_buf_append_lookup_failure) add_test_case(test_byte_buf_reserve) add_test_case(test_byte_buf_reserve_initial_capacity_zero) add_test_case(test_byte_buf_reserve_relative) add_test_case(test_byte_buf_reset) add_test_case(test_byte_cursor_compare_lexical) add_test_case(test_byte_cursor_compare_lookup) add_test_case(test_byte_cursor_starts_with) add_test_case(test_byte_cursor_starts_with_ignore_case) add_test_case(test_isalnum) add_test_case(test_isalpha) add_test_case(test_isdigit) add_test_case(test_isxdigit) add_test_case(test_isspace) add_test_case(test_byte_cursor_utf8_parse_u64) add_test_case(test_byte_cursor_utf8_parse_u64_hex) add_test_case(byte_swap_test) if(AWS_HAVE_AVX2_INTRINSICS) add_test_case(alignment32_test) else() add_test_case(alignment16_test) endif() add_test_case(test_cpu_count_at_least_works_superficially) add_test_case(test_stack_trace_decoding) add_test_case(test_platform_build_os) add_test_case(test_sanity_check_numa_discovery) add_test_case(test_sanity_check_environment_loader) add_test_case(test_realloc_fallback) add_test_case(test_realloc_passthrough) add_test_case(test_cf_allocator_wrapper) add_test_case(test_acquire_many) add_test_case(test_alloc_nothing) add_test_case(sba_alloc_free_once) add_test_case(sba_random_allocs_and_frees) add_test_case(sba_random_reallocs) add_test_case(sba_threaded_allocs_and_frees) add_test_case(sba_threaded_reallocs) add_test_case(sba_churn) add_test_case(sba_metrics) add_test_case(default_threaded_reallocs) add_test_case(default_threaded_allocs_and_frees) add_test_case(aligned_threaded_reallocs) add_test_case(aligned_threaded_allocs_and_frees) add_test_case(test_memtrace_none) add_test_case(test_memtrace_count) add_test_case(test_memtrace_stacks) add_test_case(test_memtrace_midstream) add_test_case(test_calloc_override) add_test_case(test_calloc_fallback_from_default_allocator) add_test_case(test_calloc_fallback_from_given) add_test_case(test_calloc_from_default_allocator) add_test_case(test_calloc_from_given_allocator) add_test_case(rw_lock_aquire_release_test) add_test_case(rw_lock_is_actually_rw_lock_test) add_test_case(rw_lock_many_readers_test) add_test_case(test_secure_zero) add_test_case(test_buffer_secure_zero) add_test_case(test_buffer_clean_up_secure) add_test_case(is_zeroed) add_test_case(atomics_semantics) add_test_case(atomics_semantics_implicit) add_test_case(atomics_static_init) add_test_case(atomics_acquire_to_release_one_direction) add_test_case(atomics_acquire_to_release_mixed) add_test_case(rfc822_utc_parsing) add_test_case(rfc822_utc_parsing_auto_detect) add_test_case(rfc822_local_time_east_of_gmt_parsing) add_test_case(rfc822_local_time_west_of_gmt_parsing) add_test_case(rfc822_utc_two_digit_year_parsing) add_test_case(rfc822_utc_no_dow_parsing) add_test_case(rfc822_utc_dos_prevented) add_test_case(rfc822_invalid_format) add_test_case(rfc822_invalid_tz) add_test_case(rfc822_invalid_auto_format) add_test_case(iso8601_utc_parsing) add_test_case(iso8601_basic_utc_parsing) add_test_case(iso8601_utc_parsing_auto_detect) add_test_case(iso8601_basic_utc_parsing_auto_detect) add_test_case(iso8601_date_only_parsing) add_test_case(iso8601_basic_date_only_parsing) add_test_case(iso8601_utc_no_colon_parsing) add_test_case(iso8601_utc_dos_prevented) add_test_case(iso8601_invalid_format) add_test_case(iso8601_invalid_auto_format) add_test_case(unix_epoch_parsing) add_test_case(millis_parsing) add_test_case(device_rand_u64_distribution) add_test_case(device_rand_u32_distribution) add_test_case(device_rand_u16_distribution) add_test_case(device_rand_buffer_distribution) add_test_case(device_rand_buffer_append_distribution) add_test_case(device_rand_buffer_append_short_buffer) add_test_case(uuid_string) add_test_case(prefilled_uuid_string) add_test_case(uuid_string_short_buffer) add_test_case(uuid_string_parse) add_test_case(uuid_string_parse_too_short) add_test_case(uuid_string_parse_malformed) add_test_case(test_environment_functions) add_test_case(short_argument_parse) add_test_case(long_argument_parse) add_test_case(unqualified_argument_parse) add_test_case(unknown_argument_parse) add_test_case(test_command_dispatch) add_test_case(ring_buffer_1_to_1_acquire_release_wraps_test) add_test_case(ring_buffer_release_after_full_test) add_test_case(ring_buffer_acquire_up_to_test) add_test_case(ring_buffer_acquire_tail_always_chases_head_test) add_test_case(ring_buffer_acquire_multi_threaded_test) add_test_case(ring_buffer_acquire_up_to_multi_threaded_test) add_test_case(string_to_log_level_success_test) add_test_case(string_to_log_level_failure_test) add_test_case(test_memory_usage_maxrss) if(NOT ANDROID) add_test_case(test_logging_filter_at_AWS_LL_NONE_s_logf_all_levels) add_test_case(test_logging_filter_at_AWS_LL_FATAL_s_logf_all_levels) add_test_case(test_logging_filter_at_AWS_LL_ERROR_s_logf_all_levels) add_test_case(test_logging_filter_at_AWS_LL_WARN_s_logf_all_levels) add_test_case(test_logging_filter_at_AWS_LL_INFO_s_logf_all_levels) add_test_case(test_logging_filter_at_AWS_LL_DEBUG_s_logf_all_levels) add_test_case(test_logging_filter_at_AWS_LL_TRACE_s_logf_all_levels) add_test_case(test_logging_filter_at_AWS_LL_TRACE_s_logf_all_levels_trace_cutoff) add_test_case(test_logging_filter_at_AWS_LL_TRACE_s_logf_all_levels_debug_cutoff) add_test_case(test_logging_filter_at_AWS_LL_TRACE_s_logf_all_levels_info_cutoff) add_test_case(test_logging_filter_at_AWS_LL_TRACE_s_logf_all_levels_warn_cutoff) add_test_case(test_logging_filter_at_AWS_LL_TRACE_s_logf_all_levels_error_cutoff) add_test_case(test_logging_filter_at_AWS_LL_TRACE_s_logf_all_levels_fatal_cutoff) add_test_case(test_logging_filter_at_AWS_LL_TRACE_s_logf_all_levels_none_cutoff) add_test_case(test_log_formatter_s_formatter_empty_case) add_test_case(test_log_formatter_s_formatter_simple_case) add_test_case(test_log_formatter_s_formatter_number_case) add_test_case(test_log_formatter_s_formatter_string_case) add_test_case(test_log_formatter_s_formatter_newline_case) add_test_case(test_log_writer_simple_file_test) add_test_case(test_log_writer_existing_file_test) add_test_case(test_log_writer_bad_file_test) add_test_case(test_foreground_log_channel_single_line) add_test_case(test_foreground_log_channel_numbers) add_test_case(test_foreground_log_channel_words) add_test_case(test_foreground_log_channel_all) add_test_case(test_background_log_channel_single_line) add_test_case(test_background_log_channel_numbers) add_test_case(test_background_log_channel_words) add_test_case(test_background_log_channel_all) add_test_case(test_pipeline_logger_unformatted_test) add_test_case(test_pipeline_logger_formatted_test) add_test_case(dynamic_log_level_change_test) endif() # ANDROID add_test_case(get_pid_sanity_check_test) add_test_case(max_io_handles_sanity_check_test) add_test_case(run_command_test_success) add_test_case(run_command_test_bad_command) add_test_case(cpuid_test) add_test_case(xml_parser_root_with_text) add_test_case(xml_parser_child_with_text) add_test_case(xml_parser_siblings_with_text) add_test_case(xml_parser_preamble_and_attributes) add_test_case(xml_parser_nested_node_same_name_test) add_test_case(xml_parser_nested_node_deep_recursion_test) add_test_case(xml_parser_too_many_attributes_test) add_test_case(xml_parser_name_too_long_test) add_test_case(test_thread_scheduler_ordering) add_test_case(test_thread_scheduler_happy_path_cancellation) add_test_case(test_scheduler_cancellation_for_pending_scheduled_task) add_test_case(aws_fopen_non_ascii_read_existing_file_test) add_test_case(aws_fopen_non_ascii_test) add_test_case(aws_fopen_ascii_test) add_test_case(directory_traversal_test) add_test_case(directory_iteration_test) add_test_case(directory_iteration_non_existent_directory_test) add_test_case(directory_traversal_stop_traversal) add_test_case(directory_traversal_on_file_test) add_test_case(directory_existence_test) add_test_case(directory_creation_deletion_test) add_test_case(directory_non_empty_deletion_fails_test) add_test_case(directory_non_empty_deletion_recursively_succeeds_test) add_test_case(directory_move_succeeds_test) add_test_case(directory_move_src_non_existent_test) add_test_case(test_home_directory_not_null) add_test_case(test_normalize_posix_directory_separator) add_test_case(test_normalize_windows_directory_separator) add_test_case(test_byte_buf_init_from_file) add_test_case(promise_test_wait_forever) add_test_case(promise_test_wait_for_a_bit) add_test_case(promise_test_finish_immediately) add_test_case(promise_test_finish_before_wait) add_test_case(promise_test_multiple_waiters) add_test_case(test_json_parse_from_string) add_test_case(test_json_parse_to_string) add_test_case(uri_full_parse) add_test_case(uri_no_scheme_parse) add_test_case(uri_no_port_parse) add_test_case(uri_no_path_parse) add_test_case(uri_no_query_parse) add_test_case(uri_minimal_parse) add_test_case(uri_root_only_parse) add_test_case(uri_root_slash_only_path_parse) add_test_case(uri_path_and_query_only_parse) add_test_case(uri_userinfo_no_password_parse) add_test_case(uri_empty_user_parse) add_test_case(uri_query_params) add_test_case(uri_ipv6_parse) add_test_case(uri_ipv6_no_port_parse) add_test_case(uri_ipv4_parse) add_test_case(uri_invalid_scheme_parse) add_test_case(uri_invalid_port_parse) add_test_case(uri_port_too_large_parse) add_test_case(uri_builder) add_test_case(uri_builder_from_string) add_test_case(test_uri_encode_path_rfc3986) add_test_case(test_uri_encode_query) add_test_case(test_uri_decode) add_test_case(test_cross_process_lock_works_in_proc) add_test_case(test_cross_process_lock_works_cross_proc) #this one is here for use by test_instance_lock_works_cross_proc add_test_case(cross_process_lock_mp_test_runner) add_test_case(test_cross_process_lock_invalid_nonce_fails) generate_test_driver(${PROJECT_NAME}-tests) if(NOT MSVC AND NOT LEGACY_COMPILER_SUPPORT) # we have some tests here that purposely overflow target_compile_options(${PROJECT_NAME}-tests PRIVATE -Wno-overflow) endif() if(MSVC) target_compile_options(${PROJECT_NAME}-tests PRIVATE -D_CRT_SECURE_NO_WARNINGS) endif() file(GLOB FUZZ_TESTS "fuzz/*.c") aws_add_fuzz_tests("${FUZZ_TESTS}" "" "") # Resources to use for testing. add_custom_command(TARGET ${PROJECT_NAME}-tests PRE_BUILD COMMAND ${CMAKE_COMMAND} -E copy_directory ${CMAKE_CURRENT_SOURCE_DIR}/resources $) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/alloc_test.c000066400000000000000000000315161456575232400241650ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #ifdef __MACH__ # include #endif static void *s_test_alloc_acquire(struct aws_allocator *allocator, size_t size) { (void)allocator; return (size > 0) ? malloc(size) : NULL; } static void s_test_alloc_release(struct aws_allocator *allocator, void *ptr) { (void)allocator; free(ptr); } static void *s_test_realloc(struct aws_allocator *allocator, void *ptr, size_t oldsize, size_t newsize) { (void)allocator; (void)oldsize; /* Realloc should ensure that newsize is never 0 */ AWS_FATAL_ASSERT(newsize != 0); return realloc(ptr, newsize); } static void *s_test_calloc(struct aws_allocator *allocator, size_t num, size_t size) { (void)allocator; return (num > 0 && size > 0) ? calloc(num, size) : NULL; } /** * Check that we correctly protect against * https://wiki.sei.cmu.edu/confluence/display/c/MEM04-C.+Beware+of+zero-length+allocations * For now, can only test the realloc case, because it returns NULL on error * Test the remaining cases once https://github.com/awslabs/aws-c-common/issues/471 is solved */ AWS_TEST_CASE(test_alloc_nothing, s_test_alloc_nothing_fn) static int s_test_alloc_nothing_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_allocator test_allocator = { .mem_acquire = s_test_alloc_acquire, .mem_release = s_test_alloc_release, .mem_realloc = s_test_realloc, .mem_calloc = s_test_calloc, }; /* realloc should handle the case correctly, return null, and free the memory */ void *p = aws_mem_acquire(&test_allocator, 12); ASSERT_SUCCESS(aws_mem_realloc(&test_allocator, &p, 12, 0)); ASSERT_NULL(p); return 0; } /* * Small Block Allocator tests */ static int s_sba_alloc_free_once(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_allocator *sba = aws_small_block_allocator_new(allocator, false); void *mem = aws_mem_acquire(sba, 42); ASSERT_NOT_NULL(mem); const size_t allocated = aws_mem_tracer_bytes(allocator); ASSERT_TRUE(allocated > 0); aws_mem_release(sba, mem); aws_small_block_allocator_destroy(sba); return 0; } AWS_TEST_CASE(sba_alloc_free_once, s_sba_alloc_free_once) #define NUM_TEST_ALLOCS 10000 #define NUM_TEST_THREADS 8 static int s_sba_random_allocs_and_frees(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_allocator *sba = aws_small_block_allocator_new(allocator, false); srand(42); void *allocs[NUM_TEST_ALLOCS]; for (size_t count = 0; count < NUM_TEST_ALLOCS; ++count) { size_t size = aws_max_size(rand() % 512, 1); void *alloc = aws_mem_acquire(sba, size); ASSERT_NOT_NULL(alloc); allocs[count] = alloc; } for (size_t count = 0; count < NUM_TEST_ALLOCS; ++count) { void *alloc = allocs[count]; aws_mem_release(sba, alloc); } aws_small_block_allocator_destroy(sba); return 0; } AWS_TEST_CASE(sba_random_allocs_and_frees, s_sba_random_allocs_and_frees) static int s_sba_random_reallocs(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_allocator *sba = aws_small_block_allocator_new(allocator, false); srand(128); void *alloc = NULL; size_t size = 0; for (size_t count = 0; count < NUM_TEST_ALLOCS; ++count) { size_t old_size = size; size = rand() % 4096; ASSERT_SUCCESS(aws_mem_realloc(sba, &alloc, old_size, size)); } ASSERT_SUCCESS(aws_mem_realloc(sba, &alloc, size, 0)); aws_small_block_allocator_destroy(sba); return 0; } AWS_TEST_CASE(sba_random_reallocs, s_sba_random_reallocs) struct allocator_thread_test_data { struct aws_allocator *test_allocator; uint32_t thread_idx; }; static void s_threaded_alloc_worker(void *user_data) { struct aws_allocator *test_allocator = ((struct allocator_thread_test_data *)user_data)->test_allocator; void *allocs[NUM_TEST_ALLOCS]; for (size_t count = 0; count < NUM_TEST_ALLOCS / NUM_TEST_THREADS; ++count) { size_t size = aws_max_size(rand() % 512, 1); void *alloc = aws_mem_acquire(test_allocator, size); AWS_FATAL_ASSERT(alloc); allocs[count] = alloc; } for (size_t count = 0; count < NUM_TEST_ALLOCS / NUM_TEST_THREADS; ++count) { void *alloc = allocs[count]; aws_mem_release(test_allocator, alloc); } } static void s_thread_test( struct aws_allocator *allocator, void (*thread_fn)(void *), struct aws_allocator *test_allocator) { const struct aws_thread_options *thread_options = aws_default_thread_options(); struct aws_thread threads[NUM_TEST_THREADS]; struct allocator_thread_test_data thread_data[NUM_TEST_THREADS]; AWS_ZERO_ARRAY(threads); AWS_ZERO_ARRAY(thread_data); for (size_t thread_idx = 0; thread_idx < AWS_ARRAY_SIZE(threads); ++thread_idx) { struct aws_thread *thread = &threads[thread_idx]; aws_thread_init(thread, allocator); struct allocator_thread_test_data *data = &thread_data[thread_idx]; data->test_allocator = test_allocator; data->thread_idx = (uint32_t)thread_idx; aws_thread_launch(thread, thread_fn, data, thread_options); } for (size_t thread_idx = 0; thread_idx < AWS_ARRAY_SIZE(threads); ++thread_idx) { struct aws_thread *thread = &threads[thread_idx]; aws_thread_join(thread); } } static int s_sba_threaded_allocs_and_frees(struct aws_allocator *allocator, void *ctx) { (void)ctx; srand(96); struct aws_allocator *sba = aws_small_block_allocator_new(allocator, true); s_thread_test(allocator, s_threaded_alloc_worker, sba); aws_small_block_allocator_destroy(sba); return 0; } AWS_TEST_CASE(sba_threaded_allocs_and_frees, s_sba_threaded_allocs_and_frees) static void s_threaded_realloc_worker(void *user_data) { struct allocator_thread_test_data *thread_data = user_data; struct aws_allocator *test_allocator = thread_data->test_allocator; void *alloc = NULL; size_t size = 0; for (size_t count = 0; count < NUM_TEST_ALLOCS / NUM_TEST_THREADS; ++count) { size_t old_size = size; size = rand() % 1024; if (old_size) { AWS_FATAL_ASSERT(0 == memcmp(alloc, &thread_data->thread_idx, 1)); } AWS_FATAL_ASSERT(0 == aws_mem_realloc(test_allocator, &alloc, old_size, size)); /* If there was a value, make sure it's still there */ if (old_size && size) { AWS_FATAL_ASSERT(0 == memcmp(alloc, &thread_data->thread_idx, 1)); } if (size) { memset(alloc, (int)thread_data->thread_idx, size); } } AWS_FATAL_ASSERT(0 == aws_mem_realloc(test_allocator, &alloc, size, 0)); } static int s_sba_threaded_reallocs(struct aws_allocator *allocator, void *ctx) { (void)ctx; srand(12); struct aws_allocator *sba = aws_small_block_allocator_new(allocator, true); s_thread_test(allocator, s_threaded_realloc_worker, sba); aws_small_block_allocator_destroy(sba); return 0; } AWS_TEST_CASE(sba_threaded_reallocs, s_sba_threaded_reallocs) static int s_sba_churn(struct aws_allocator *allocator, void *ctx) { (void)ctx; srand(9000); struct aws_array_list allocs; aws_array_list_init_dynamic(&allocs, allocator, NUM_TEST_ALLOCS, sizeof(void *)); struct aws_allocator *sba = aws_small_block_allocator_new(allocator, false); size_t alloc_count = 0; while (alloc_count++ < NUM_TEST_ALLOCS * 10) { size_t size = aws_max_size(rand() % (2 * 4096), 1); void *alloc = aws_mem_acquire(sba, size); aws_array_list_push_back(&allocs, &alloc); /* randomly free a previous allocation, simulating the real world a bit */ if ((rand() % allocs.length) > (allocs.length / 2)) { size_t idx = rand() % allocs.length; aws_array_list_get_at(&allocs, &alloc, idx); aws_array_list_erase(&allocs, idx); aws_mem_release(sba, alloc); } } /* free all remaining allocations */ for (size_t idx = 0; idx < allocs.length; ++idx) { void *alloc = NULL; aws_array_list_get_at(&allocs, &alloc, idx); aws_mem_release(sba, alloc); } aws_array_list_clean_up(&allocs); aws_small_block_allocator_destroy(sba); return 0; } AWS_TEST_CASE(sba_churn, s_sba_churn) static int s_sba_metrics_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_allocator *sba = aws_small_block_allocator_new(allocator, false); size_t expected_active_size = 0; void *allocs[512] = {0}; for (int idx = 0; idx < AWS_ARRAY_SIZE(allocs); ++idx) { size_t size = idx + 1; size_t bin_size = 0; ASSERT_SUCCESS(aws_round_up_to_power_of_two(size, &bin_size)); expected_active_size += bin_size; allocs[idx] = aws_mem_acquire(sba, size); ASSERT_TRUE(aws_small_block_allocator_bytes_reserved(sba) > aws_small_block_allocator_bytes_active(sba)); ASSERT_TRUE(expected_active_size <= aws_small_block_allocator_bytes_active(sba)); } /* * There are * * 32 allocations of size < 32 # (bin 0) * 32 allocations of 32 < size <= 64 # (bin 1) * 64 allocations of 64 < size <= 128 # (bin 2) * 128 allocations of 128 < size <= 256 # (bin 3) * 256 allocations of 256 < size <= 512 # (bin 4) * * If we let actual_page_size = allocated_page_size - sizeof(page_header), then we expect to have reserved * * (32 + actual_page_size / 32 - 1) / (actual_page_size / 32) # (bin 0) * (32 + actual_page_size / 64 - 1) / (actual_page_size / 64) # (bin 1) * (64 + actual_page_size / 128 - 1) / (actual_page_size / 128) # (bin 2) * (128 + actual_page_size / 256 - 1) / (actual_page_size / 256) # (bin 3) * (256 + actual_page_size / 512 - 1) / (actual_page_size / 512) # (bin 4) * * total pages during the allocations. */ size_t actual_page_size = aws_small_block_allocator_page_size_available(sba); size_t bin0_pages = (32 + actual_page_size / 32 - 1) / (actual_page_size / 32); size_t bin1_pages = (32 + actual_page_size / 64 - 1) / (actual_page_size / 64); size_t bin2_pages = (64 + actual_page_size / 128 - 1) / (actual_page_size / 128); size_t bin3_pages = (128 + actual_page_size / 256 - 1) / (actual_page_size / 256); size_t bin4_pages = (256 + actual_page_size / 512 - 1) / (actual_page_size / 512); size_t expected_page_count = bin0_pages + bin1_pages + bin2_pages + bin3_pages + bin4_pages; ASSERT_INT_EQUALS( expected_page_count * aws_small_block_allocator_page_size(sba), aws_small_block_allocator_bytes_reserved(sba)); for (int idx = 0; idx < AWS_ARRAY_SIZE(allocs); ++idx) { aws_mem_release(sba, allocs[idx]); } ASSERT_INT_EQUALS(0, aws_small_block_allocator_bytes_active(sba)); /* after freeing everything, we should have relinquished all but one page in each bin */ ASSERT_INT_EQUALS(5 * aws_small_block_allocator_page_size(sba), aws_small_block_allocator_bytes_reserved(sba)); aws_small_block_allocator_destroy(sba); return 0; } AWS_TEST_CASE(sba_metrics, s_sba_metrics_test) /* * Default allocator tests. */ static int s_default_threaded_reallocs(struct aws_allocator *allocator, void *ctx) { (void)ctx; srand(15); s_thread_test(allocator, s_threaded_realloc_worker, allocator); return 0; } AWS_TEST_CASE(default_threaded_reallocs, s_default_threaded_reallocs) static int s_default_threaded_allocs_and_frees(struct aws_allocator *allocator, void *ctx) { (void)ctx; srand(99); s_thread_test(allocator, s_threaded_alloc_worker, allocator); return 0; } AWS_TEST_CASE(default_threaded_allocs_and_frees, s_default_threaded_allocs_and_frees) /* * No align allocator tests. */ static int s_aligned_threaded_reallocs(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; srand(15); struct aws_allocator *alloc = aws_mem_tracer_new(aws_aligned_allocator(), NULL, AWS_MEMTRACE_STACKS, 8); s_thread_test(alloc, s_threaded_realloc_worker, alloc); aws_mem_tracer_destroy(alloc); return 0; } AWS_TEST_CASE(aligned_threaded_reallocs, s_aligned_threaded_reallocs) static int s_aligned_threaded_allocs_and_frees(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; srand(99); struct aws_allocator *alloc = aws_mem_tracer_new(aws_aligned_allocator(), NULL, AWS_MEMTRACE_STACKS, 8); s_thread_test(alloc, s_threaded_alloc_worker, alloc); aws_mem_tracer_destroy(alloc); return 0; } AWS_TEST_CASE(aligned_threaded_allocs_and_frees, s_aligned_threaded_allocs_and_frees) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/array_list_test.c000066400000000000000000001345321456575232400252460ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include static int s_array_list_zero_length(struct aws_allocator *allocator, void *ctx) { (void)ctx; (void)allocator; struct aws_array_list list; AWS_ZERO_STRUCT(list); ASSERT_INT_EQUALS(0, aws_array_list_length(&list)); aws_array_list_clear(&list); return AWS_OP_SUCCESS; } AWS_TEST_CASE(array_list_zero_length, s_array_list_zero_length) static int s_array_list_order_push_back_pop_front_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_array_list list; size_t list_size = 4; int first = 1, second = 2, third = 3, fourth = 4; ASSERT_SUCCESS( aws_array_list_init_dynamic(&list, allocator, list_size, sizeof(int)), "List setup should have been successful. err code %d", aws_last_error()); ASSERT_INT_EQUALS(0, list.length, "List size should be 0."); ASSERT_INT_EQUALS( list_size, list.current_size / sizeof(int), "Allocated list size should be %d.", (int)list_size * sizeof(int)); ASSERT_SUCCESS( aws_array_list_push_back(&list, (void *)&first), "List push failed with error code %d", aws_last_error()); ASSERT_SUCCESS( aws_array_list_push_back(&list, (void *)&second), "List push failed with error code %d", aws_last_error()); ASSERT_SUCCESS( aws_array_list_push_back(&list, (void *)&third), "List push failed with error code %d", aws_last_error()); ASSERT_SUCCESS( aws_array_list_push_back(&list, (void *)&fourth), "List push failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(list_size, list.length, "List size should be %d.", (int)list_size); ASSERT_INT_EQUALS( list_size, list.current_size / sizeof(int), "Allocated list size should be %d.", (int)list_size * sizeof(int)); int item = 0; ASSERT_SUCCESS( aws_array_list_front(&list, (void *)&item), "List front failed with error code %d", aws_last_error()); ASSERT_SUCCESS(aws_array_list_pop_front(&list), "List pop front failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(first, item, "Item should have been the first item."); ASSERT_INT_EQUALS(list_size - 1, list.length, "List size should be %d.", (int)list_size - 1); ASSERT_INT_EQUALS( list_size, list.current_size / sizeof(int), "Allocated list size should be %d.", (int)list_size * sizeof(int)); ASSERT_SUCCESS( aws_array_list_front(&list, (void *)&item), "List front failed with error code %d", aws_last_error()); ASSERT_SUCCESS(aws_array_list_pop_front(&list), "List pop front failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(second, item, "Item should have been the second item."); ASSERT_INT_EQUALS(list_size - 2, list.length, "List size should be %d.", (int)list_size - 2); ASSERT_INT_EQUALS( list_size, list.current_size / sizeof(int), "Allocated list size should be %d.", (int)list_size * sizeof(int)); ASSERT_SUCCESS( aws_array_list_front(&list, (void *)&item), "List front failed with error code %d", aws_last_error()); ASSERT_SUCCESS(aws_array_list_pop_front(&list), "List pop front failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(third, item, "Item should have been the third item."); ASSERT_INT_EQUALS(list_size - 3, list.length, "List size should be %d.", (int)list_size - 3); ASSERT_INT_EQUALS( list_size, list.current_size / sizeof(int), "Allocated list size should be %d.", (int)list_size * sizeof(int)); ASSERT_SUCCESS( aws_array_list_front(&list, (void *)&item), "List front failed with error code %d", aws_last_error()); ASSERT_SUCCESS(aws_array_list_pop_front(&list), "List pop front failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(fourth, item, "Item should have been the fourth item."); ASSERT_INT_EQUALS(list_size - 4, list.length, "List size should be %d.", (int)list_size - 4); ASSERT_INT_EQUALS( list_size, list.current_size / sizeof(int), "Allocated list size should be %d.", (int)list_size * sizeof(int)); aws_array_list_clean_up(&list); return 0; } AWS_TEST_CASE(array_list_order_push_back_pop_front_test, s_array_list_order_push_back_pop_front_fn) static int s_array_list_order_push_back_pop_back_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_array_list list; static size_t list_size = 4; ASSERT_SUCCESS( aws_array_list_init_dynamic(&list, allocator, list_size, sizeof(int)), "List initialization failed with error %d", aws_last_error()); int first = 1, second = 2, third = 3, fourth = 4; ASSERT_INT_EQUALS(0, list.length, "List size should be 0."); ASSERT_INT_EQUALS( list_size, list.current_size / sizeof(int), "Allocated list size should be %d.", (int)list_size * sizeof(int)); ASSERT_SUCCESS( aws_array_list_push_back(&list, (void *)&first), "List push failed with error code %d", aws_last_error()); ASSERT_SUCCESS( aws_array_list_push_back(&list, (void *)&second), "List push failed with error code %d", aws_last_error()); ASSERT_SUCCESS( aws_array_list_push_back(&list, (void *)&third), "List push failed with error code %d", aws_last_error()); ASSERT_SUCCESS( aws_array_list_push_back(&list, (void *)&fourth), "List push failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(list_size, list.length, "List size should be %d.", (int)list_size); ASSERT_INT_EQUALS( list_size, list.current_size / sizeof(int), "Allocated list size should be %d.", (int)list_size * sizeof(int)); int item = 0; ASSERT_SUCCESS(aws_array_list_back(&list, (void *)&item), "List back failed with error code %d", aws_last_error()); ASSERT_SUCCESS(aws_array_list_pop_back(&list), "List pop back failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(fourth, item, "Item should have been the fourth item."); ASSERT_INT_EQUALS(list_size - 1, list.length, "List size should be %d.", (int)list_size - 4); ASSERT_INT_EQUALS( list_size, list.current_size / sizeof(int), "Allocated list size should be %d.", (int)list_size * sizeof(int)); ASSERT_SUCCESS(aws_array_list_back(&list, (void *)&item), "List back failed with error code %d", aws_last_error()); ASSERT_SUCCESS(aws_array_list_pop_back(&list), "List pop back failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(third, item, "Item should have been the third item."); ASSERT_INT_EQUALS(list_size - 2, list.length, "List size should be %d.", (int)list_size - 3); ASSERT_INT_EQUALS( list_size, list.current_size / sizeof(int), "Allocated list size should be %d.", (int)list_size * sizeof(int)); ASSERT_SUCCESS(aws_array_list_back(&list, (void *)&item), "List back failed with error code %d", aws_last_error()); ASSERT_SUCCESS(aws_array_list_pop_back(&list), "List pop back failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(second, item, "Item should have been the second item."); ASSERT_INT_EQUALS(list_size - 3, list.length, "List size should be %d.", (int)list_size - 2); ASSERT_INT_EQUALS( list_size, list.current_size / sizeof(int), "Allocated list size should be %d.", (int)list_size * sizeof(int)); ASSERT_SUCCESS(aws_array_list_back(&list, (void *)&item), "List back failed with error code %d", aws_last_error()); ASSERT_SUCCESS(aws_array_list_pop_back(&list), "List pop back failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(first, item, "Item should have been the first item."); ASSERT_INT_EQUALS(list_size - 4, list.length, "List size should be %d.", (int)list_size - 1); ASSERT_INT_EQUALS( list_size, list.current_size / sizeof(int), "Allocated list size should be %d.", (int)list_size * sizeof(int)); aws_array_list_clean_up(&list); return 0; } AWS_TEST_CASE(array_list_order_push_back_pop_back_test, s_array_list_order_push_back_pop_back_fn) static int s_array_list_order_push_front_pop_front_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_array_list list; size_t list_size = 4; size_t init_size = 2; int first = 1, second = 2, third = 3, fourth = 4; ASSERT_SUCCESS( aws_array_list_init_dynamic(&list, allocator, init_size, sizeof(int)), "List setup should have been successful. err code %d", aws_last_error()); ASSERT_INT_EQUALS(0, list.length, "List size should be 0."); ASSERT_INT_EQUALS( init_size, list.current_size / sizeof(int), "Allocated list size should be %d.", (int)init_size * sizeof(int)); ASSERT_SUCCESS( aws_array_list_push_front(&list, (void *)&first), "List push failed with error code %d", aws_last_error()); ASSERT_SUCCESS( aws_array_list_push_front(&list, (void *)&second), "List push failed with error code %d", aws_last_error()); ASSERT_SUCCESS( aws_array_list_push_front(&list, (void *)&third), "List push failed with error code %d", aws_last_error()); ASSERT_SUCCESS( aws_array_list_push_front(&list, (void *)&fourth), "List push failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(list_size, list.length, "List size should be %d.", (int)list_size); ASSERT_INT_EQUALS( list_size, list.current_size / sizeof(int), "Allocated list size should be %d.", (int)list_size * sizeof(int)); int item = 0; ASSERT_SUCCESS( aws_array_list_front(&list, (void *)&item), "List front failed with error code %d", aws_last_error()); ASSERT_SUCCESS(aws_array_list_pop_front(&list), "List pop front failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(fourth, item, "Item should have been the fourth item."); ASSERT_INT_EQUALS(list_size - 1, list.length, "List size should be %d.", (int)list_size - 1); ASSERT_INT_EQUALS( list_size, list.current_size / sizeof(int), "Allocated list size should be %d.", (int)list_size * sizeof(int)); ASSERT_SUCCESS( aws_array_list_front(&list, (void *)&item), "List front failed with error code %d", aws_last_error()); ASSERT_SUCCESS(aws_array_list_pop_front(&list), "List pop front failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(third, item, "Item should have been the third item."); ASSERT_INT_EQUALS(list_size - 2, list.length, "List size should be %d.", (int)list_size - 2); ASSERT_INT_EQUALS( list_size, list.current_size / sizeof(int), "Allocated list size should be %d.", (int)list_size * sizeof(int)); ASSERT_SUCCESS( aws_array_list_front(&list, (void *)&item), "List front failed with error code %d", aws_last_error()); ASSERT_SUCCESS(aws_array_list_pop_front(&list), "List pop front failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(second, item, "Item should have been the second item."); ASSERT_INT_EQUALS(list_size - 3, list.length, "List size should be %d.", (int)list_size - 3); ASSERT_INT_EQUALS( list_size, list.current_size / sizeof(int), "Allocated list size should be %d.", (int)list_size * sizeof(int)); ASSERT_SUCCESS( aws_array_list_front(&list, (void *)&item), "List front failed with error code %d", aws_last_error()); ASSERT_SUCCESS(aws_array_list_pop_front(&list), "List pop front failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(first, item, "Item should have been the first item."); ASSERT_INT_EQUALS(list_size - 4, list.length, "List size should be %d.", (int)list_size - 4); ASSERT_INT_EQUALS( list_size, list.current_size / sizeof(int), "Allocated list size should be %d.", (int)list_size * sizeof(int)); aws_array_list_clean_up(&list); return 0; } AWS_TEST_CASE(array_list_order_push_front_pop_front_test, s_array_list_order_push_front_pop_front_fn) static int s_array_list_pop_front_n_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_array_list list; ASSERT_SUCCESS(aws_array_list_init_dynamic(&list, allocator, 8, sizeof(int))); int first = 1, second = 2, third = 3, fourth = 4; int item = 0; ASSERT_SUCCESS(aws_array_list_push_back(&list, (void *)&first)); ASSERT_SUCCESS(aws_array_list_push_back(&list, (void *)&second)); ASSERT_SUCCESS(aws_array_list_push_back(&list, (void *)&third)); ASSERT_SUCCESS(aws_array_list_push_back(&list, (void *)&fourth)); /* Popping 0 front elements should have no effect */ aws_array_list_pop_front_n(&list, 0); ASSERT_INT_EQUALS(4, aws_array_list_length(&list)); /* Pop 2/4 front elements. Third item should be in front. */ aws_array_list_pop_front_n(&list, 2); ASSERT_INT_EQUALS(2, aws_array_list_length(&list)); ASSERT_SUCCESS(aws_array_list_front(&list, &item)); ASSERT_INT_EQUALS(third, item); /* Pop last 2/2 elements. List should be empty. */ aws_array_list_pop_front_n(&list, 2); ASSERT_INT_EQUALS(0, aws_array_list_length(&list), "List should be empty after popping last 2 items"); /* Put some elements into list again. * Popping more items than list contains should just clear the list */ ASSERT_SUCCESS(aws_array_list_push_back(&list, (void *)&first)); ASSERT_SUCCESS(aws_array_list_push_back(&list, (void *)&second)); ASSERT_SUCCESS(aws_array_list_push_back(&list, (void *)&third)); ASSERT_SUCCESS(aws_array_list_push_back(&list, (void *)&fourth)); aws_array_list_pop_front_n(&list, 99); ASSERT_INT_EQUALS(0, aws_array_list_length(&list)); aws_array_list_clean_up(&list); return 0; } AWS_TEST_CASE(array_list_pop_front_n_test, s_array_list_pop_front_n_fn) static int s_reset_list(struct aws_array_list *list, const int *array, size_t array_len) { aws_array_list_clear(list); for (size_t i = 0; i < array_len; ++i) { ASSERT_SUCCESS(aws_array_list_push_back(list, &array[i])); } return AWS_OP_SUCCESS; } static int s_check_list_eq(const struct aws_array_list *list, const int *array, size_t array_len) { ASSERT_UINT_EQUALS(array_len, aws_array_list_length(list)); for (size_t i = 0; i < array_len; ++i) { int item; ASSERT_SUCCESS(aws_array_list_get_at(list, &item, i)); ASSERT_INT_EQUALS(array[i], item); } return AWS_OP_SUCCESS; } static int s_array_list_erase_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_array_list list; ASSERT_SUCCESS(aws_array_list_init_dynamic(&list, allocator, 10, sizeof(int))); { /* Attempts to erase invalid indices should fail */ const int starting_values[] = {1, 2, 3, 4}; ASSERT_SUCCESS(s_reset_list(&list, starting_values, AWS_ARRAY_SIZE(starting_values))); ASSERT_ERROR(AWS_ERROR_INVALID_INDEX, aws_array_list_erase(&list, AWS_ARRAY_SIZE(starting_values))); ASSERT_ERROR(AWS_ERROR_INVALID_INDEX, aws_array_list_erase(&list, AWS_ARRAY_SIZE(starting_values) + 100)); ASSERT_SUCCESS(s_check_list_eq(&list, starting_values, AWS_ARRAY_SIZE(starting_values))); } { /* Erase front item */ const int starting_values[] = {1, 2, 3, 4}; ASSERT_SUCCESS(s_reset_list(&list, starting_values, AWS_ARRAY_SIZE(starting_values))); ASSERT_SUCCESS(aws_array_list_erase(&list, 0)); const int expected_values[] = {2, 3, 4}; ASSERT_SUCCESS(s_check_list_eq(&list, expected_values, AWS_ARRAY_SIZE(expected_values))); } { /* Erase back item */ const int starting_values[] = {1, 2, 3, 4}; ASSERT_SUCCESS(s_reset_list(&list, starting_values, AWS_ARRAY_SIZE(starting_values))); ASSERT_SUCCESS(aws_array_list_erase(&list, 3)); const int expected_values[] = {1, 2, 3}; ASSERT_SUCCESS(s_check_list_eq(&list, expected_values, AWS_ARRAY_SIZE(expected_values))); } { /* Erase middle item */ const int starting_values[] = {1, 2, 3, 4}; ASSERT_SUCCESS(s_reset_list(&list, starting_values, AWS_ARRAY_SIZE(starting_values))); ASSERT_SUCCESS(aws_array_list_erase(&list, 1)); const int expected_values[] = {1, 3, 4}; ASSERT_SUCCESS(s_check_list_eq(&list, expected_values, AWS_ARRAY_SIZE(expected_values))); } aws_array_list_clean_up(&list); return AWS_OP_SUCCESS; } AWS_TEST_CASE(array_list_erase_test, s_array_list_erase_fn) static int s_array_list_exponential_mem_model_test_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_array_list list; static size_t list_size = 1; ASSERT_SUCCESS( aws_array_list_init_dynamic(&list, allocator, list_size, sizeof(int)), "List initialization failed with error %d", aws_last_error()); int first = 1, second = 2, third = 3; ASSERT_INT_EQUALS(0, list.length, "List size should be 0."); ASSERT_INT_EQUALS( list_size, list.current_size / sizeof(int), "Allocated list size should be %d.", (int)list_size * sizeof(int)); ASSERT_SUCCESS( aws_array_list_push_back(&list, (void *)&first), "array list push back failed with error %d", aws_last_error()); ASSERT_INT_EQUALS(list_size, list.current_size / sizeof(int)); ASSERT_SUCCESS( aws_array_list_push_back(&list, (void *)&second), "array list push back failed with error %d", aws_last_error()); ASSERT_INT_EQUALS( list_size << 1, list.current_size / sizeof(int), "Allocated list size should be %d.", (int)(list_size << 1) * sizeof(int)); ASSERT_SUCCESS( aws_array_list_push_back(&list, (void *)&third), "array list push back failed with error %d", aws_last_error()); ASSERT_INT_EQUALS( list_size << 2, list.current_size / sizeof(int), "Allocated list size should be %d.", (int)(list_size << 2) * sizeof(int)); ASSERT_INT_EQUALS(3, list.length, "List size should be %d.", 3); int item = 0; ASSERT_SUCCESS( aws_array_list_front(&list, (void *)&item), "List front failed with error code %d", aws_last_error()); ASSERT_SUCCESS(aws_array_list_pop_front(&list), "List pop front failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(first, item, "Item should have been the first item."); ASSERT_SUCCESS( aws_array_list_front(&list, (void *)&item), "List front failed with error code %d", aws_last_error()); ASSERT_SUCCESS(aws_array_list_pop_front(&list), "List pop front failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(second, item, "Item should have been the second item."); ASSERT_SUCCESS( aws_array_list_front(&list, (void *)&item), "List front failed with error code %d", aws_last_error()); ASSERT_SUCCESS(aws_array_list_pop_front(&list), "List pop front failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(third, item, "Item should have been the third item."); ASSERT_INT_EQUALS(0, list.length, "List size should be 0."); ASSERT_INT_EQUALS( list_size << 2, list.current_size / sizeof(int), "Allocated list size should be %d.", (int)(list_size << 2) * sizeof(int)); aws_array_list_clean_up(&list); return 0; } AWS_TEST_CASE(array_list_exponential_mem_model_test, s_array_list_exponential_mem_model_test_fn) static int s_array_list_exponential_mem_model_iteration_test_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_array_list list; static size_t list_size = 1; ASSERT_SUCCESS( aws_array_list_init_dynamic(&list, allocator, list_size, sizeof(int)), "List initialization failed with error %d", aws_last_error()); int first = 1, second = 2, third = 3; ASSERT_INT_EQUALS(0, list.length, "List size should be 0."); ASSERT_INT_EQUALS( list_size, list.current_size / sizeof(int), "Allocated list size should be %d.", (int)list_size * sizeof(int)); ASSERT_SUCCESS( aws_array_list_set_at(&list, (void *)&first, 0), "array list push back failed with error %d", aws_last_error()); ASSERT_INT_EQUALS(list_size, list.current_size / sizeof(int)); ASSERT_SUCCESS( aws_array_list_set_at(&list, (void *)&second, 1), "array list push back failed with error %d", aws_last_error()); ASSERT_INT_EQUALS( list_size << 1, list.current_size / sizeof(int), "Allocated list size should be %d.", (int)(list_size << 1) * sizeof(int)); ASSERT_SUCCESS( aws_array_list_set_at(&list, (void *)&third, 2), "array list push back failed with error %d", aws_last_error()); ASSERT_INT_EQUALS( list_size << 2, list.current_size / sizeof(int), "Allocated list size should be %d.", (int)(list_size << 2) * sizeof(int)); ASSERT_INT_EQUALS(3, list.length, "List size should be %d.", 3); int item = 0; ASSERT_SUCCESS( aws_array_list_front(&list, (void *)&item), "List front failed with error code %d", aws_last_error()); ASSERT_SUCCESS(aws_array_list_pop_front(&list), "List pop front failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(first, item, "Item should have been the first item."); ASSERT_SUCCESS( aws_array_list_front(&list, (void *)&item), "List front failed with error code %d", aws_last_error()); ASSERT_SUCCESS(aws_array_list_pop_front(&list), "List pop front failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(second, item, "Item should have been the second item."); ASSERT_SUCCESS( aws_array_list_front(&list, (void *)&item), "List front failed with error code %d", aws_last_error()); ASSERT_SUCCESS(aws_array_list_pop_front(&list), "List pop front failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(third, item, "Item should have been the third item."); ASSERT_INT_EQUALS(0, list.length, "List size should be 0."); ASSERT_INT_EQUALS( list_size << 2, list.current_size / sizeof(int), "Allocated list size should be %d.", (int)(list_size << 2) * sizeof(int)); aws_array_list_clean_up(&list); return 0; } AWS_TEST_CASE(array_list_exponential_mem_model_iteration_test, s_array_list_exponential_mem_model_iteration_test_fn) static int s_array_list_set_at_overwrite_safety_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_array_list list; size_t list_size = 4; int overwrite_data[5]; aws_array_list_init_static(&list, overwrite_data, list_size, sizeof(int)); memset(overwrite_data, 0x11, sizeof(overwrite_data)); list.current_size = list_size * sizeof(int); unsigned value = 0xFFFFFFFF; ASSERT_SUCCESS(aws_array_list_set_at(&list, (void *)&value, 3)); ASSERT_ERROR(AWS_ERROR_INVALID_INDEX, aws_array_list_set_at(&list, (void *)&value, 4)); ASSERT_INT_EQUALS(0x11111111, overwrite_data[4]); aws_array_list_clean_up(&list); return 0; } AWS_TEST_CASE(array_list_set_at_overwrite_safety, s_array_list_set_at_overwrite_safety_fn) static int s_array_list_iteration_test_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_array_list list; static size_t list_size = 4; ASSERT_SUCCESS( aws_array_list_init_dynamic(&list, allocator, list_size, sizeof(int)), "List initialization failed with error %d", aws_last_error()); int first = 1, second = 2, third = 3, fourth = 4; ASSERT_SUCCESS( aws_array_list_set_at(&list, (void *)&first, 0), "Array set failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(1, list.length, "List size should be %d.", 1); ASSERT_SUCCESS( aws_array_list_set_at(&list, (void *)&second, 1), "Array set failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(2, list.length, "List size should be %d.", 2); ASSERT_SUCCESS( aws_array_list_set_at(&list, (void *)&third, 2), "Array set failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(3, list.length, "List size should be %d.", 3); ASSERT_SUCCESS( aws_array_list_set_at(&list, (void *)&fourth, 3), "Array set failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(4, list.length, "List size should be %d.", 4); int item = 0; ASSERT_SUCCESS( aws_array_list_get_at(&list, (void *)&item, 0), "Array set failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(first, item, "Item should have been the first item."); ASSERT_SUCCESS( aws_array_list_get_at(&list, (void *)&item, 1), "Array set failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(second, item, "Item should have been the second item."); ASSERT_SUCCESS( aws_array_list_get_at(&list, (void *)&item, 2), "Array set failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(third, item, "Item should have been the third item."); ASSERT_SUCCESS( aws_array_list_get_at(&list, (void *)&item, 3), "Array set failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(fourth, item, "Item should have been the fourth item."); aws_array_list_clean_up(&list); return 0; } AWS_TEST_CASE(array_list_iteration_test, s_array_list_iteration_test_fn) static int s_array_list_iteration_by_ptr_test_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_array_list list; static size_t list_size = 4; ASSERT_SUCCESS( aws_array_list_init_dynamic(&list, allocator, list_size, sizeof(int)), "List initialization failed with error %d", aws_last_error()); int first = 1, second = 2, third = 3, fourth = 4; ASSERT_SUCCESS( aws_array_list_set_at(&list, (void *)&first, 0), "Array set failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(1, list.length, "List size should be %d.", 1); ASSERT_SUCCESS( aws_array_list_set_at(&list, (void *)&second, 1), "Array set failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(2, list.length, "List size should be %d.", 2); ASSERT_SUCCESS( aws_array_list_set_at(&list, (void *)&third, 2), "Array set failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(3, list.length, "List size should be %d.", 3); ASSERT_SUCCESS( aws_array_list_set_at(&list, (void *)&fourth, 3), "Array set failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(4, list.length, "List size should be %d.", 4); int *item; ASSERT_SUCCESS( aws_array_list_get_at_ptr(&list, (void **)&item, 0), "Array set failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(first, *item, "Item should have been the first item."); ASSERT_SUCCESS( aws_array_list_get_at_ptr(&list, (void **)&item, 1), "Array set failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(second, *item, "Item should have been the second item."); ASSERT_SUCCESS( aws_array_list_get_at_ptr(&list, (void **)&item, 2), "Array set failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(third, *item, "Item should have been the third item."); ASSERT_SUCCESS( aws_array_list_get_at_ptr(&list, (void **)&item, 3), "Array set failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(fourth, *item, "Item should have been the fourth item."); aws_array_list_clean_up(&list); return 0; } AWS_TEST_CASE(array_list_iteration_by_ptr_test, s_array_list_iteration_by_ptr_test_fn) static int s_array_list_preallocated_iteration_test_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_array_list list; int list_data[4]; size_t list_size = 4; aws_array_list_init_static(&list, (void *)list_data, list_size, sizeof(int)); int first = 1, second = 2, third = 3, fourth = 4; ASSERT_SUCCESS( aws_array_list_set_at(&list, (void *)&first, 0), "Array set failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(1, list.length, "List size should be %d.", 1); ASSERT_SUCCESS( aws_array_list_set_at(&list, (void *)&second, 1), "Array set failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(2, list.length, "List size should be %d.", 2); ASSERT_SUCCESS( aws_array_list_set_at(&list, (void *)&third, 2), "Array set failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(3, list.length, "List size should be %d.", 3); ASSERT_SUCCESS( aws_array_list_set_at(&list, (void *)&fourth, 3), "Array set failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(4, list.length, "List size should be %d.", 4); ASSERT_FAILS(aws_array_list_set_at(&list, (void *)&fourth, 4), "Adding element past the end should have failed"); ASSERT_INT_EQUALS( AWS_ERROR_INVALID_INDEX, aws_last_error(), "Error code should have been INVALID_INDEX but was %d", aws_last_error()); int item = 0; ASSERT_SUCCESS( aws_array_list_get_at(&list, (void *)&item, 0), "Array set failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(first, item, "Item should have been the first item."); ASSERT_SUCCESS( aws_array_list_get_at(&list, (void *)&item, 1), "Array set failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(second, item, "Item should have been the second item."); ASSERT_SUCCESS( aws_array_list_get_at(&list, (void *)&item, 2), "Array set failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(third, item, "Item should have been the third item."); ASSERT_SUCCESS( aws_array_list_get_at(&list, (void *)&item, 3), "Array set failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(fourth, item, "Item should have been the fourth item."); ASSERT_FAILS(aws_array_list_get_at(&list, (void *)&item, 4), "Getting an element past the end should have failed"); ASSERT_INT_EQUALS( AWS_ERROR_INVALID_INDEX, aws_last_error(), "Error code should have been INVALID_INDEX but was %d", aws_last_error()); aws_array_list_clean_up(&list); return 0; } AWS_TEST_CASE(array_list_preallocated_iteration_test, s_array_list_preallocated_iteration_test_fn) static int s_array_list_preallocated_push_test_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_array_list list; int list_data[4]; const size_t list_size = 4; aws_array_list_init_static(&list, (void *)list_data, list_size, sizeof(int)); int first = 1, second = 2, third = 3, fourth = 4; ASSERT_INT_EQUALS(0, list.length, "List size should be 0."); ASSERT_INT_EQUALS(sizeof(list_data), list.current_size, "Allocated list size should be %d.", sizeof(list_data)); ASSERT_SUCCESS(aws_array_list_push_back(&list, &first), "List push failed with error code %d", aws_last_error()); ASSERT_SUCCESS(aws_array_list_push_back(&list, &second), "List push failed with error code %d", aws_last_error()); ASSERT_SUCCESS(aws_array_list_push_back(&list, &third), "List push failed with error code %d", aws_last_error()); ASSERT_SUCCESS(aws_array_list_push_back(&list, &fourth), "List push failed with error code %d", aws_last_error()); ASSERT_ERROR( AWS_ERROR_LIST_EXCEEDS_MAX_SIZE, aws_array_list_push_back(&list, &fourth), "List push past static size should have failed with AWS_ERROR_LIST_EXCEEDS_MAX_SIZE but was %d", aws_last_error()); aws_array_list_clean_up(&list); return 0; } AWS_TEST_CASE(array_list_preallocated_push_test, s_array_list_preallocated_push_test_fn) static int s_array_list_shrink_to_fit_test_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_array_list list; static size_t list_size = 4; ASSERT_SUCCESS( aws_array_list_init_dynamic(&list, allocator, list_size, sizeof(int)), "List initialization failed with error %d", aws_last_error()); int first = 1, second = 2; ASSERT_SUCCESS(aws_array_list_push_back(&list, &first), "List push failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(1, list.length, "List size should be %d.", 1); ASSERT_SUCCESS(aws_array_list_push_back(&list, &second), "List push failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(2, list.length, "List size should be %d.", 2); ASSERT_INT_EQUALS( list_size, list.current_size / sizeof(int), "size before shrink should be %d.", list_size * sizeof(int)); ASSERT_SUCCESS( aws_array_list_shrink_to_fit(&list), "List shrink to fit failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(2, list.length, "List size should be %d.", 2); ASSERT_INT_EQUALS(2, list.current_size / sizeof(int), "Shrunken size should be %d.", 2 * sizeof(int)); int item = 0; ASSERT_SUCCESS(aws_array_list_get_at(&list, &item, 0), "Array set failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(first, item, "Item should have been the first item."); ASSERT_SUCCESS(aws_array_list_get_at(&list, &item, 1), "Array set failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(second, item, "Item should have been the second item."); ASSERT_FAILS(aws_array_list_get_at(&list, &item, 2), "Getting an element past the end should have failed"); ASSERT_INT_EQUALS( AWS_ERROR_INVALID_INDEX, aws_last_error(), "Error code should have been INVALID_INDEX but was %d", aws_last_error()); aws_array_list_clean_up(&list); return 0; } AWS_TEST_CASE(array_list_shrink_to_fit_test, s_array_list_shrink_to_fit_test_fn) static int s_array_list_shrink_to_fit_static_test_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_array_list list; int list_data[4]; const size_t list_size = 4; aws_array_list_init_static(&list, (void *)list_data, list_size, sizeof(int)); int first = 1, second = 2; ASSERT_SUCCESS( aws_array_list_push_back(&list, (void *)&first), "List push failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(1, list.length, "List size should be %d.", 1); ASSERT_SUCCESS( aws_array_list_push_back(&list, (void *)&second), "List push failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(2, list.length, "List size should be %d.", 2); ASSERT_INT_EQUALS(sizeof(list_data), list.current_size, "size before shrink should be %d.", sizeof(list_data)); ASSERT_FAILS(aws_array_list_shrink_to_fit(&list), "List shrink of static list should have failed."); ASSERT_INT_EQUALS( AWS_ERROR_LIST_STATIC_MODE_CANT_SHRINK, aws_last_error(), "Error code should have been LIST_STATIC_MODE_CANT_SHRINK but was %d", aws_last_error()); ASSERT_PTR_EQUALS(&list_data, list.data, "The underlying allocation should not have changed"); ASSERT_INT_EQUALS(sizeof(list_data), list.current_size, "List size should not have been changed"); aws_array_list_clean_up(&list); return 0; } AWS_TEST_CASE(array_list_shrink_to_fit_static_test, s_array_list_shrink_to_fit_static_test_fn) static int s_array_list_clear_test_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_array_list list; static size_t list_size = 4; ASSERT_SUCCESS( aws_array_list_init_dynamic(&list, allocator, list_size, sizeof(int)), "List initialization failed with error %d", aws_last_error()); int first = 1, second = 2; ASSERT_SUCCESS( aws_array_list_push_back(&list, (void *)&first), "List push failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(1, list.length, "List size should be %d.", 1); ASSERT_SUCCESS( aws_array_list_push_back(&list, (void *)&second), "List push failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(2, list.length, "List size should be %d.", 2); ASSERT_INT_EQUALS( list_size, list.current_size / sizeof(int), "size before clear should be %d.", list_size * sizeof(int)); aws_array_list_clear(&list); ASSERT_INT_EQUALS(0, list.length, "List size should be %d after clear.", 0); ASSERT_INT_EQUALS( list_size, list.current_size / sizeof(int), "cleared size should be %d.", (int)list_size * sizeof(int)); int item; ASSERT_FAILS(aws_array_list_front(&list, (void *)&item), "front() after a clear on list should have been an error"); ASSERT_INT_EQUALS( AWS_ERROR_LIST_EMPTY, aws_last_error(), "Error code should have been LIST_EMPTY but was %d", aws_last_error()); aws_array_list_clean_up(&list); return 0; } AWS_TEST_CASE(array_list_clear_test, s_array_list_clear_test_fn) static int s_array_list_copy_test_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_array_list list_a; struct aws_array_list list_b; static size_t list_size = 4; ASSERT_SUCCESS( aws_array_list_init_dynamic(&list_a, allocator, list_size, sizeof(int)), "List initialization failed with error %d", aws_last_error()); ASSERT_SUCCESS( aws_array_list_init_dynamic(&list_b, allocator, 0, sizeof(int)), "List initialization failed with error %d", aws_last_error()); int first = 1, second = 2; ASSERT_SUCCESS( aws_array_list_push_back(&list_a, (void *)&first), "List push failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(1, list_a.length, "List size should be %d.", 1); ASSERT_SUCCESS( aws_array_list_push_back(&list_a, (void *)&second), "List push failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(2, list_a.length, "List size should be %d.", 2); ASSERT_SUCCESS(aws_array_list_copy(&list_a, &list_b), "List copy failed with error code %d", aws_last_error()); int item = 0; ASSERT_SUCCESS( aws_array_list_get_at(&list_b, (void *)&item, 0), "Array set failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(first, item, "Item should have been the first item."); ASSERT_SUCCESS( aws_array_list_get_at(&list_b, (void *)&item, 1), "Array set failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(second, item, "Item should have been the second item."); ASSERT_INT_EQUALS( aws_array_list_length(&list_a), aws_array_list_length(&list_b), "list lengths should have matched."); aws_array_list_clean_up(&list_a); aws_array_list_clean_up(&list_b); return 0; } AWS_TEST_CASE(array_list_copy_test, s_array_list_copy_test_fn) static int s_array_list_swap_contents_test_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; /* build lists */ struct aws_array_list list_a; int a_1 = 1; int a_capacity = 1; ASSERT_SUCCESS(aws_array_list_init_dynamic(&list_a, allocator, a_capacity, sizeof(int))); ASSERT_SUCCESS(aws_array_list_push_back(&list_a, (void *)&a_1)); struct aws_array_list list_b; int b_1 = 5; int b_2 = 6; int b_capacity = 3; ASSERT_SUCCESS(aws_array_list_init_dynamic(&list_b, allocator, b_capacity, sizeof(int))); ASSERT_SUCCESS(aws_array_list_push_back(&list_b, (void *)&b_1)); ASSERT_SUCCESS(aws_array_list_push_back(&list_b, (void *)&b_2)); void *a_buffer; ASSERT_SUCCESS(aws_array_list_get_at_ptr(&list_a, &a_buffer, 0)); void *b_buffer; ASSERT_SUCCESS(aws_array_list_get_at_ptr(&list_b, &b_buffer, 0)); /* swap */ aws_array_list_swap_contents(&list_a, &list_b); /* compare state after swap */ void *a_buffer_after_swap; ASSERT_SUCCESS(aws_array_list_get_at_ptr(&list_a, &a_buffer_after_swap, 0)); ASSERT_PTR_EQUALS(b_buffer, a_buffer_after_swap, "Lists A and B should have swapped buffer ownership, but did not"); void *b_buffer_after_swap; ASSERT_SUCCESS(aws_array_list_get_at_ptr(&list_b, &b_buffer_after_swap, 0)); ASSERT_PTR_EQUALS(a_buffer, b_buffer_after_swap, "Lists A and B should have swapped buffer ownership, but did not"); int item; ASSERT_INT_EQUALS(2, aws_array_list_length(&list_a), "List A should have taken B's old length"); ASSERT_INT_EQUALS(b_capacity, aws_array_list_capacity(&list_a), "List A should have taken B's old capacity"); ASSERT_SUCCESS(aws_array_list_get_at(&list_a, &item, 0), "List A should have B's old first item"); ASSERT_INT_EQUALS(b_1, item, "List A should have B's old first item"); ASSERT_SUCCESS(aws_array_list_get_at(&list_a, &item, 1), "List A should have B's old second item"); ASSERT_INT_EQUALS(b_2, item, "List A should have B's old second item"); ASSERT_INT_EQUALS(1, aws_array_list_length(&list_b), "List B should have taken A's old length"); ASSERT_INT_EQUALS(a_capacity, aws_array_list_capacity(&list_b), "List B should have taken A's old capacity"); ASSERT_SUCCESS(aws_array_list_get_at(&list_b, &item, 0), "List B should have A's old first item"); ASSERT_INT_EQUALS(a_1, item, "List B should have A's old first item"); aws_array_list_clean_up(&list_a); aws_array_list_clean_up(&list_b); return 0; } AWS_TEST_CASE(array_list_swap_contents_test, s_array_list_swap_contents_test_fn) static int s_array_list_not_enough_space_test_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_array_list list_a; struct aws_array_list list_b; static size_t list_size = 4; ASSERT_SUCCESS( aws_array_list_init_dynamic(&list_a, allocator, list_size, sizeof(int)), "List initialization failed with error %d", aws_last_error()); ASSERT_SUCCESS( aws_array_list_init_dynamic(&list_b, allocator, 1, sizeof(int)), "List initialization failed with error %d", aws_last_error()); int first = 1, second = 2; ASSERT_SUCCESS( aws_array_list_push_back(&list_a, (void *)&first), "List push failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(1, list_a.length, "List size should be %d.", 1); ASSERT_SUCCESS( aws_array_list_push_back(&list_a, (void *)&second), "List push failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(2, list_a.length, "List size should be %d.", 2); ASSERT_SUCCESS(aws_array_list_copy(&list_a, &list_b), "Copy from list_a to list_b should have succeeded"); ASSERT_INT_EQUALS(list_a.length, list_b.length, "List b should have grown to the length of list a"); ASSERT_INT_EQUALS( 2 * sizeof(int), list_b.current_size, "List b should have grown to the size of the number of elements in list a"); aws_array_list_clean_up(&list_a); aws_array_list_clean_up(&list_b); return 0; } AWS_TEST_CASE(array_list_not_enough_space_test, s_array_list_not_enough_space_test_fn) static int s_array_list_not_enough_space_test_failure_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_array_list list_a; struct aws_array_list list_b; size_t list_size = 4; int static_list[1]; ASSERT_SUCCESS( aws_array_list_init_dynamic(&list_a, allocator, list_size, sizeof(int)), "List initialization failed with error %d", aws_last_error()); ASSERT_TRUE(list_a.data); aws_array_list_init_static(&list_b, static_list, 1, sizeof(int)); ASSERT_TRUE(list_b.data); int first = 1, second = 2; ASSERT_SUCCESS( aws_array_list_push_back(&list_a, (void *)&first), "List push failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(1, list_a.length, "List size should be %d.", 1); ASSERT_SUCCESS( aws_array_list_push_back(&list_a, (void *)&second), "List push failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(2, list_a.length, "List size should be %d.", 2); ASSERT_ERROR( AWS_ERROR_DEST_COPY_TOO_SMALL, aws_array_list_copy(&list_a, &list_b), "Copying to a static list too small should have failed with TOO_SMALL but got %d instead", aws_last_error()); aws_array_list_clean_up(&list_a); aws_array_list_clean_up(&list_b); return 0; } AWS_TEST_CASE(array_list_not_enough_space_test_failure, s_array_list_not_enough_space_test_failure_fn) static int s_array_list_of_strings_sort_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; AWS_STATIC_STRING_FROM_LITERAL(empty, ""); AWS_STATIC_STRING_FROM_LITERAL(foo, "foo"); AWS_STATIC_STRING_FROM_LITERAL(bar, "bar"); AWS_STATIC_STRING_FROM_LITERAL(foobar, "foobar"); AWS_STATIC_STRING_FROM_LITERAL(foo2, "foo"); AWS_STATIC_STRING_FROM_LITERAL(foobaz, "foobaz"); AWS_STATIC_STRING_FROM_LITERAL(bar_food, "bar food"); AWS_STATIC_STRING_FROM_LITERAL(bar_null_food, "bar\0food"); AWS_STATIC_STRING_FROM_LITERAL(bar_null_back, "bar\0back"); const struct aws_string *strings[] = { empty, foo, bar, foobar, foo2, foobaz, bar_food, bar_null_food, bar_null_back}; const struct aws_string *sorted[] = {empty, bar, bar_null_back, bar_null_food, bar_food, foo, foo2, foobar, foobaz}; int num_strings = AWS_ARRAY_SIZE(strings); struct aws_array_list list; ASSERT_SUCCESS( aws_array_list_init_dynamic(&list, allocator, num_strings, sizeof(const struct aws_string *)), "List initialization failed with error %d", aws_last_error()); for (int idx = 0; idx < num_strings; ++idx) { ASSERT_SUCCESS( aws_array_list_push_back(&list, (void *)(strings + idx)), "List push failed with error code %d", aws_last_error()); } aws_array_list_sort(&list, aws_array_list_comparator_string); /* No control over whether foo or foo2 will be first, but checking for * string equality with sorted array makes that irrelevant. */ for (int idx = 0; idx < num_strings; ++idx) { const struct aws_string *str; ASSERT_SUCCESS( aws_array_list_get_at(&list, (void **)&str, idx), "List get failed with error code %d", aws_last_error()); ASSERT_INT_EQUALS(0, aws_string_compare(str, sorted[idx]), "Strings should be equal"); } aws_array_list_clean_up(&list); return 0; } AWS_TEST_CASE(array_list_of_strings_sort, s_array_list_of_strings_sort_fn) static int s_array_list_empty_sort_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_array_list list; ASSERT_SUCCESS( aws_array_list_init_dynamic(&list, allocator, 5, sizeof(const struct aws_string *)), "List initialization failed with error %d", aws_last_error()); /* Nothing much to check, just want to make sure sort run on empty list * doesn't crash. */ ASSERT_INT_EQUALS(0, aws_array_list_length(&list)); ASSERT_INT_EQUALS(5, aws_array_list_capacity(&list)); aws_array_list_sort(&list, aws_array_list_comparator_string); ASSERT_INT_EQUALS(0, aws_array_list_length(&list)); ASSERT_INT_EQUALS(5, aws_array_list_capacity(&list)); aws_array_list_clean_up(&list); return 0; } AWS_TEST_CASE(array_list_empty_sort, s_array_list_empty_sort_fn) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/assert_test.c000066400000000000000000000221101456575232400243620ustar00rootroot00000000000000/** This standalone test harness tests that the asserts themselves function properly */ #define AWS_TESTING_REPORT_FD g_test_filedes #include #include FILE *g_test_filedes; #include #include #define NO_MORE_TESTS 12345 #define BAILED_OUT 98765 #ifdef _MSC_VER /* disable warning about unreferenced formal parameter */ # pragma warning(disable : 4100) #endif const char *g_test_filename; int g_cur_line; int g_expected_return; int g_bail_out; #define TEST_SUCCESS(name) \ if (g_bail_out) \ return BAILED_OUT; \ if (begin_test(index, #name, __FILE__, __LINE__, 0)) #define TEST_FAILURE(name) \ if (g_bail_out) \ return BAILED_OUT; \ if (begin_test(index, #name, __FILE__, __LINE__, -1)) const char *g_cur_testname, *g_cur_file; int begin_test(int *index, const char *testname, const char *file, int line, int expected) { if (*index <= line) { *index = line; g_cur_testname = testname; g_cur_file = file; g_cur_line = line; g_expected_return = expected == 0 ? BAILED_OUT : expected; g_bail_out = 1; return 1; } return 0; } static int side_effect_ctr = 0; int side_effect(void) { if (side_effect_ctr++) { fprintf( stderr, "***FAILURE*** Side effects triggered multiple times, after %s:%d (%s)", g_cur_file, g_cur_line, g_cur_testname); abort(); } return 0; } /* NOLINTNEXTLINE(readability-function-size) */ int test_asserts(int *index) { TEST_SUCCESS(null_test) {} TEST_FAILURE(null_failure_test) { fprintf(AWS_TESTING_REPORT_FD, "***FAILURE*** test\n"); return FAILURE; } TEST_FAILURE(basic_fail_1) { FAIL("Failed: %d", 42); } TEST_FAILURE(assert_bool) { ASSERT_TRUE(0); } TEST_FAILURE(assert_bool) { ASSERT_TRUE(0, "foo %d", 42); } TEST_FAILURE(assert_bool) { ASSERT_FALSE(1); } TEST_FAILURE(assert_bool) { ASSERT_FALSE(1, "foo %d", 42); } TEST_SUCCESS(assert_bool) { ASSERT_TRUE(1); } TEST_SUCCESS(assert_bool) { ASSERT_TRUE(2); } TEST_SUCCESS(assert_bool) { ASSERT_FALSE(0); } TEST_SUCCESS(assert_success) { ASSERT_SUCCESS(AWS_OP_SUCCESS); } TEST_SUCCESS(assert_success) { ASSERT_SUCCESS(AWS_OP_SUCCESS, "foo"); } TEST_FAILURE(assert_success) { ASSERT_SUCCESS(aws_raise_error(AWS_ERROR_OOM), "foo"); } TEST_SUCCESS(assert_fails) { ASSERT_FAILS(aws_raise_error(AWS_ERROR_OOM)); } TEST_SUCCESS(assert_fails) { ASSERT_FAILS(aws_raise_error(AWS_ERROR_OOM), "foo"); } TEST_FAILURE(assert_fails) { ASSERT_FAILS(AWS_OP_SUCCESS, "foo"); } TEST_SUCCESS(assert_error) { ASSERT_ERROR(AWS_ERROR_OOM, aws_raise_error(AWS_ERROR_OOM)); } TEST_SUCCESS(assert_error_side_effect) { ASSERT_ERROR((side_effect(), AWS_ERROR_OOM), aws_raise_error(AWS_ERROR_OOM)); } TEST_SUCCESS(assert_error_side_effect) { ASSERT_ERROR(AWS_ERROR_OOM, (side_effect(), aws_raise_error(AWS_ERROR_OOM))); } TEST_SUCCESS(assert_error) { ASSERT_ERROR(AWS_ERROR_OOM, aws_raise_error(AWS_ERROR_OOM), "foo"); } TEST_FAILURE(assert_error) { ASSERT_ERROR(AWS_ERROR_CLOCK_FAILURE, aws_raise_error(AWS_ERROR_OOM), "foo"); } aws_raise_error(AWS_ERROR_CLOCK_FAILURE); // set last error TEST_FAILURE(assert_error) { ASSERT_ERROR(AWS_ERROR_CLOCK_FAILURE, AWS_OP_SUCCESS, "foo"); } TEST_SUCCESS(assert_null) { ASSERT_NULL(NULL); } { const struct forward_decl *nullp2 = NULL; TEST_SUCCESS(assert_null) { void *nullp = NULL; ASSERT_NULL(nullp); } TEST_SUCCESS(assert_null) { ASSERT_NULL(nullp2); } TEST_SUCCESS(assert_null_sideeffects) { ASSERT_NULL((side_effect(), nullp2)); } } TEST_SUCCESS(assert_null) { ASSERT_NULL(0, "foo"); } TEST_FAILURE(assert_null) { ASSERT_NULL("hello world", "foo"); } TEST_SUCCESS(inteq) { ASSERT_INT_EQUALS(4321, 4321); } TEST_SUCCESS(inteq) { ASSERT_INT_EQUALS(4321, 4321, "foo"); } TEST_SUCCESS(inteq_side_effects) { int increment = 4321; ASSERT_INT_EQUALS(4321, increment++, "foo"); ASSERT_INT_EQUALS(4322, increment++, "foo"); } TEST_FAILURE(inteq_difference) { ASSERT_INT_EQUALS(0, 1, "foo"); } // UINT/PTR/BYTE_HEX/HEX are the same backend, so just test that the format string doesn't break TEST_FAILURE(uinteq) { ASSERT_UINT_EQUALS(0, 1, "Foo"); } TEST_FAILURE(ptreq) { ASSERT_PTR_EQUALS("x", "y", "Foo"); } TEST_FAILURE(bytehex) { ASSERT_BYTE_HEX_EQUALS('a', 'b'); } TEST_FAILURE(hex) { ASSERT_HEX_EQUALS((uint64_t)-1, 0); } TEST_SUCCESS(streq) { ASSERT_STR_EQUALS((side_effect(), "x"), "x"); } TEST_SUCCESS(streq) { char str_x[2] = "x"; ASSERT_STR_EQUALS("x", (side_effect(), str_x), "foo"); } TEST_FAILURE(streq) { ASSERT_STR_EQUALS("x", "xy", "bar"); } TEST_FAILURE(streq) { ASSERT_STR_EQUALS("xy", "x"); } uint8_t bin1[] = {0, 1, 2}; uint8_t bin2[] = {0, 1, 2}; TEST_SUCCESS(bineq) { ASSERT_BIN_ARRAYS_EQUALS((side_effect(), bin1), 3, bin2, 3); side_effect_ctr = 0; ASSERT_BIN_ARRAYS_EQUALS(bin1, (side_effect(), 3), bin2, 3); side_effect_ctr = 0; ASSERT_BIN_ARRAYS_EQUALS(bin1, 3, (side_effect(), bin2), 3); side_effect_ctr = 0; ASSERT_BIN_ARRAYS_EQUALS(bin1, 3, bin2, (side_effect(), 3)); } TEST_FAILURE(bineq_samesize) { uint8_t bin3[] = {0, 1, 3}; ASSERT_BIN_ARRAYS_EQUALS(bin1, 3, bin3, 3, "foo"); } TEST_FAILURE(bineq_diffsize) { ASSERT_BIN_ARRAYS_EQUALS(bin1, 3, bin2, 2); } TEST_FAILURE(bineq_diffsize) { ASSERT_BIN_ARRAYS_EQUALS(bin1, 2, bin2, 3); } TEST_SUCCESS(bineq_empty) { ASSERT_BIN_ARRAYS_EQUALS(bin1, 0, bin2, 0, "foo"); } TEST_SUCCESS(bineq_same) { ASSERT_BIN_ARRAYS_EQUALS(bin1, 3, bin1, 3); } return NO_MORE_TESTS; } void reset(void) { g_cur_testname = "UNKNOWN"; g_cur_file = "UNKNOWN"; g_bail_out = 0; if (g_test_filedes) { fclose(g_test_filedes); } g_test_filedes = aws_fopen(g_test_filename, "w"); if (!g_test_filedes) { perror("***INTERNAL ERROR*** Failed to open temporary file"); abort(); } side_effect_ctr = 0; } int check_failure_output(const char *expected) { fclose(g_test_filedes); g_test_filedes = NULL; FILE *readfd = aws_fopen(g_test_filename, "r"); static char tmpbuf[256]; char *rv = fgets(tmpbuf, sizeof(tmpbuf), readfd); fclose(readfd); if (!expected) { return rv == NULL; } if (!rv) { return 0; } return !strncmp(tmpbuf, expected, strlen(expected)); } int main(int argc, char **argv) { int index = 0; if (argc < 2) { return 1; } g_test_filename = argv[1]; // Suppress unused function warnings (void)s_aws_run_test_case; // Sanity checks for our own test macros reset(); if (test_asserts(&index) != BAILED_OUT) { fprintf( stderr, "***FAILURE*** Initial case did not succeed; stopped at %s:%d (%s)\n", g_cur_file, index, g_cur_testname); return 1; } index++; reset(); if (test_asserts(&index) != FAILURE) { fprintf( stderr, "***FAILURE*** Second case did not fail; stopped at %s:%d (%s)\n", g_cur_file, index, g_cur_testname); return 1; } index = 0; for (;;) { reset(); int rv = test_asserts(&index); if (rv == NO_MORE_TESTS) { break; } if (rv != g_expected_return) { fprintf( stderr, "***FAILURE*** Wrong result (%d expected, %d got) after %s:%d (%s)\n", g_expected_return, rv, g_cur_file, index, g_cur_testname); return 1; } if (g_expected_return == FAILURE) { if (!check_failure_output("***FAILURE*** ")) { fprintf( stderr, "***FAILURE*** Output did not start with ***FAILURE*** after %s:%d (%s)\n", g_cur_file, index, g_cur_testname); return 1; } } else { if (!check_failure_output(NULL)) { fprintf( stderr, "***FAILURE*** Output was not empty after %s:%d (%s)\n", g_cur_file, index, g_cur_testname); return 1; } } index++; } return 0; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/atomics_test.c000066400000000000000000000455211456575232400245330ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #ifdef _WIN32 # include # ifndef alloca # define alloca _alloca # endif #elif defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) # include #else # include #endif AWS_TEST_CASE(atomics_semantics, t_semantics) static int t_semantics(struct aws_allocator *allocator, void *ctx) { /* * This test verifies that the atomics work properly on a single thread. * Because we're only accessing from a single thread, program order fully constrains * the order in which all loads and stores can happen, and so our memory order selection * doesn't matter. We do however use a variety of memory orders to ensure that they * are accepted. */ (void)ctx; (void)allocator; /* These provide us with some unique test pointers */ int dummy_1, dummy_2, dummy_3; void *expected_ptr; size_t expected_int; struct aws_atomic_var var; /* First, pointer tests */ aws_atomic_init_ptr(&var, &dummy_1); ASSERT_PTR_EQUALS(&dummy_1, aws_atomic_load_ptr_explicit(&var, aws_memory_order_relaxed)); ASSERT_PTR_EQUALS(&dummy_1, aws_atomic_exchange_ptr_explicit(&var, &dummy_2, aws_memory_order_acq_rel)); ASSERT_PTR_EQUALS(&dummy_2, aws_atomic_load_ptr_explicit(&var, aws_memory_order_acquire)); aws_atomic_store_ptr_explicit(&var, &dummy_1, aws_memory_order_release); ASSERT_PTR_EQUALS(&dummy_1, aws_atomic_load_ptr_explicit(&var, aws_memory_order_acquire)); expected_ptr = &dummy_3; ASSERT_FALSE(aws_atomic_compare_exchange_ptr_explicit( &var, &expected_ptr, &dummy_3, aws_memory_order_release, aws_memory_order_relaxed)); ASSERT_PTR_EQUALS(&dummy_1, aws_atomic_load_ptr_explicit(&var, aws_memory_order_acquire)); ASSERT_PTR_EQUALS(&dummy_1, expected_ptr); ASSERT_TRUE(aws_atomic_compare_exchange_ptr_explicit( &var, &expected_ptr, &dummy_3, aws_memory_order_release, aws_memory_order_relaxed)); ASSERT_PTR_EQUALS(&dummy_3, aws_atomic_load_ptr_explicit(&var, aws_memory_order_acquire)); /* Integer tests */ aws_atomic_init_int(&var, 12345); ASSERT_INT_EQUALS(12345, aws_atomic_load_int_explicit(&var, aws_memory_order_relaxed)); aws_atomic_store_int_explicit(&var, 54321, aws_memory_order_release); ASSERT_INT_EQUALS(54321, aws_atomic_load_int_explicit(&var, aws_memory_order_acquire)); ASSERT_INT_EQUALS(54321, aws_atomic_exchange_int_explicit(&var, 9999, aws_memory_order_acq_rel)); ASSERT_INT_EQUALS(9999, aws_atomic_load_int_explicit(&var, aws_memory_order_acquire)); expected_int = 1111; ASSERT_FALSE(aws_atomic_compare_exchange_int_explicit( &var, &expected_int, 0, aws_memory_order_acq_rel, aws_memory_order_relaxed)); ASSERT_INT_EQUALS(9999, aws_atomic_load_int_explicit(&var, aws_memory_order_acquire)); ASSERT_INT_EQUALS(9999, expected_int); ASSERT_TRUE(aws_atomic_compare_exchange_int_explicit( &var, &expected_int, 0x7000, aws_memory_order_acq_rel, aws_memory_order_relaxed)); ASSERT_INT_EQUALS(0x7000, aws_atomic_load_int_explicit(&var, aws_memory_order_acquire)); ASSERT_INT_EQUALS(0x7000, aws_atomic_fetch_add_explicit(&var, 6, aws_memory_order_relaxed)); ASSERT_INT_EQUALS(0x7006, aws_atomic_fetch_sub_explicit(&var, 0x16, aws_memory_order_relaxed)); ASSERT_INT_EQUALS(0x6ff0, aws_atomic_fetch_or_explicit(&var, 0x14, aws_memory_order_relaxed)); ASSERT_INT_EQUALS(0x6ff4, aws_atomic_fetch_and_explicit(&var, 0x2115, aws_memory_order_relaxed)); ASSERT_INT_EQUALS(0x2114, aws_atomic_fetch_xor_explicit(&var, 0x3356, aws_memory_order_relaxed)); ASSERT_INT_EQUALS(0x1242, aws_atomic_load_int_explicit(&var, aws_memory_order_acquire)); /* Proving that atomic_thread_fence works is hard, for now just demonstrate that it doesn't crash */ aws_atomic_thread_fence(aws_memory_order_relaxed); aws_atomic_thread_fence(aws_memory_order_release); aws_atomic_thread_fence(aws_memory_order_acquire); aws_atomic_thread_fence(aws_memory_order_acq_rel); return 0; } AWS_TEST_CASE(atomics_semantics_implicit, t_semantics_implicit) static int t_semantics_implicit(struct aws_allocator *allocator, void *ctx) { /* * This test verifies that the non-_explicit atomics work properly on a single thread. */ (void)ctx; (void)allocator; /* These provide us with some unique test pointers */ int dummy_1, dummy_2, dummy_3; void *expected_ptr; size_t expected_int; struct aws_atomic_var var; /* First, pointer tests */ aws_atomic_init_ptr(&var, &dummy_1); ASSERT_PTR_EQUALS(&dummy_1, aws_atomic_load_ptr(&var)); ASSERT_PTR_EQUALS(&dummy_1, aws_atomic_exchange_ptr(&var, &dummy_2)); ASSERT_PTR_EQUALS(&dummy_2, aws_atomic_load_ptr(&var)); aws_atomic_store_ptr(&var, &dummy_1); ASSERT_PTR_EQUALS(&dummy_1, aws_atomic_load_ptr(&var)); expected_ptr = &dummy_3; ASSERT_FALSE(aws_atomic_compare_exchange_ptr(&var, &expected_ptr, &dummy_3)); ASSERT_PTR_EQUALS(&dummy_1, aws_atomic_load_ptr(&var)); ASSERT_PTR_EQUALS(&dummy_1, expected_ptr); ASSERT_TRUE(aws_atomic_compare_exchange_ptr(&var, &expected_ptr, &dummy_3)); ASSERT_PTR_EQUALS(&dummy_3, aws_atomic_load_ptr(&var)); /* Integer tests */ aws_atomic_init_int(&var, 12345); ASSERT_INT_EQUALS(12345, aws_atomic_load_int(&var)); aws_atomic_store_int(&var, 54321); ASSERT_INT_EQUALS(54321, aws_atomic_load_int(&var)); ASSERT_INT_EQUALS(54321, aws_atomic_exchange_int(&var, 9999)); ASSERT_INT_EQUALS(9999, aws_atomic_load_int(&var)); expected_int = 1111; ASSERT_FALSE(aws_atomic_compare_exchange_int(&var, &expected_int, 0)); ASSERT_INT_EQUALS(9999, aws_atomic_load_int(&var)); ASSERT_INT_EQUALS(9999, expected_int); ASSERT_TRUE(aws_atomic_compare_exchange_int(&var, &expected_int, 0x7000)); ASSERT_INT_EQUALS(0x7000, aws_atomic_load_int(&var)); ASSERT_INT_EQUALS(0x7000, aws_atomic_fetch_add(&var, 6)); ASSERT_INT_EQUALS(0x7006, aws_atomic_fetch_sub(&var, 0x16)); ASSERT_INT_EQUALS(0x6ff0, aws_atomic_fetch_or(&var, 0x14)); ASSERT_INT_EQUALS(0x6ff4, aws_atomic_fetch_and(&var, 0x2115)); ASSERT_INT_EQUALS(0x2114, aws_atomic_fetch_xor(&var, 0x3356)); ASSERT_INT_EQUALS(0x1242, aws_atomic_load_int(&var)); /* Proving that atomic_thread_fence works is hard, for now just demonstrate that it doesn't crash */ aws_atomic_thread_fence(aws_memory_order_relaxed); aws_atomic_thread_fence(aws_memory_order_release); aws_atomic_thread_fence(aws_memory_order_acquire); aws_atomic_thread_fence(aws_memory_order_acq_rel); return 0; } AWS_TEST_CASE(atomics_static_init, t_static_init) static int t_static_init(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; /* Verify that we have the right sign extension behavior - we should see zero extension here */ struct aws_atomic_var int_init = AWS_ATOMIC_INIT_INT((uint8_t)0x80); struct aws_atomic_var ptr_init = AWS_ATOMIC_INIT_PTR(&int_init); ASSERT_INT_EQUALS(0x80, aws_atomic_load_int(&int_init)); ASSERT_PTR_EQUALS(&int_init, aws_atomic_load_int(&ptr_init)); return 0; } union padded_var { struct aws_atomic_var var; char pad[32]; }; /* * We define the race loop in a macro to encourage inlining; performance matters when trying to tickle low-level data * races */ struct one_race { struct aws_atomic_var *wait; struct aws_atomic_var **vars; struct aws_atomic_var **observations; }; static struct one_race *races; static size_t n_races, n_vars, n_observations; static int n_participants; static int done_racing; static struct aws_mutex done_mutex = AWS_MUTEX_INIT; static struct aws_condition_variable done_cvar = AWS_CONDITION_VARIABLE_INIT; static struct aws_atomic_var last_race_index; static struct aws_atomic_var *alloc_var(struct aws_allocator *alloc, const struct aws_atomic_var *template) { struct aws_atomic_var *var = aws_mem_acquire(alloc, sizeof(union padded_var)); if (!var) { abort(); } memcpy(var, template, sizeof(*var)); return var; } static void setup_races( struct aws_allocator *alloc, size_t n_races_v, size_t n_vars_v, size_t n_observations_v, const struct aws_atomic_var *init_vars, const struct aws_atomic_var *init_observations) { struct aws_atomic_var init_wait; aws_atomic_init_int(&init_wait, 0); n_races = n_races_v; n_vars = n_vars_v; n_observations = n_observations_v; races = aws_mem_acquire(alloc, n_races * sizeof(*races)); if (!races) { abort(); } for (size_t i = 0; i < n_races; i++) { races[i].wait = alloc_var(alloc, &init_wait); races[i].vars = aws_mem_acquire(alloc, n_vars * sizeof(*races[i].vars)); races[i].observations = aws_mem_acquire(alloc, n_observations * sizeof(*races[i].observations)); if (!races[i].vars || !races[i].observations) { abort(); } for (size_t j = 0; j < n_vars; j++) { races[i].vars[j] = alloc_var(alloc, &init_vars[j]); } for (size_t j = 0; j < n_observations; j++) { races[i].observations[j] = alloc_var(alloc, &init_observations[j]); } } } static void free_races(struct aws_allocator *alloc) { for (size_t i = 0; i < n_races; i++) { for (size_t j = 0; j < n_vars; j++) { aws_mem_release(alloc, races[i].vars[j]); } for (size_t j = 0; j < n_observations; j++) { aws_mem_release(alloc, races[i].observations[j]); } aws_mem_release(alloc, races[i].wait); aws_mem_release(alloc, races[i].vars); aws_mem_release(alloc, races[i].observations); } aws_mem_release(alloc, races); } static bool are_races_done(void *ignored) { (void)ignored; return done_racing >= n_participants; } static int run_races( size_t *last_race, struct aws_allocator *alloc, int n_participants_local, void (*race_fn)(void *vp_participant)) { int *participant_indexes = alloca(n_participants_local * sizeof(*participant_indexes)); struct aws_thread *threads = alloca(n_participants_local * sizeof(struct aws_thread)); *last_race = (size_t)-1; n_participants = n_participants_local; done_racing = false; aws_atomic_init_int(&last_race_index, 0); for (int i = 0; i < n_participants; i++) { participant_indexes[i] = i; ASSERT_SUCCESS(aws_thread_init(&threads[i], alloc)); ASSERT_SUCCESS(aws_thread_launch(&threads[i], race_fn, &participant_indexes[i], NULL)); } ASSERT_SUCCESS(aws_mutex_lock(&done_mutex)); if (aws_condition_variable_wait_for_pred(&done_cvar, &done_mutex, 1000000000ULL /* 1s */, are_races_done, NULL) == AWS_OP_ERR) { ASSERT_TRUE(aws_last_error() == AWS_ERROR_COND_VARIABLE_TIMED_OUT); } if (done_racing >= n_participants) { *last_race = n_races; } else { *last_race = (size_t)aws_atomic_load_int_explicit(&last_race_index, aws_memory_order_relaxed); if (*last_race == (size_t)-1) { /* We didn't even see the first race complete */ *last_race = 0; } } ASSERT_SUCCESS(aws_mutex_unlock(&done_mutex)); /* Poison all remaining races to make sure the threads exit quickly */ for (size_t i = 0; i < n_races; i++) { aws_atomic_store_int_explicit(races[i].wait, n_participants, aws_memory_order_relaxed); } for (int i = 0; i < n_participants; i++) { ASSERT_SUCCESS(aws_thread_join(&threads[i])); aws_thread_clean_up(&threads[i]); } aws_atomic_thread_fence(aws_memory_order_acq_rel); return 0; } static void notify_race_completed(void) { if (aws_mutex_lock(&done_mutex)) { abort(); } done_racing++; if (done_racing >= n_participants) { if (aws_condition_variable_notify_all(&done_cvar)) { abort(); } } if (aws_mutex_unlock(&done_mutex)) { abort(); } } #define DEFINE_RACE(race_name, vn_participant, vn_race) \ static void race_name##_iter(int participant, struct one_race *race); \ static void race_name(void *vp_participant) { \ int participant = *(int *)vp_participant; \ size_t n_races_local = n_races; \ size_t n_participants_local = n_participants; \ for (size_t i = 0; i < n_races_local; i++) { \ while (i > 0 && \ aws_atomic_load_int_explicit(races[i - 1].wait, aws_memory_order_relaxed) < n_participants_local) { \ /* spin */ \ } \ if (participant == 0) { \ aws_atomic_store_int_explicit(&last_race_index, i - 1, aws_memory_order_relaxed); \ } \ race_name##_iter(participant, &races[i]); \ aws_atomic_fetch_add_explicit(races[i].wait, 1, aws_memory_order_relaxed); \ } \ notify_race_completed(); \ aws_atomic_thread_fence(aws_memory_order_release); \ } \ static void race_name##_iter(int vn_participant, struct one_race *vn_race) /* NOLINT */ /* * The following race races these two threads: * * Thread 1: * DATA <- 1 [relaxed] * FLAG <- 2 [release] * Thread 2: * Read FLAG [acquire] * Read DATA [relaxed] * * We expect that, if FLAG is observed to be 2, then DATA must be 1, due to * acquire-release ordering. * * Note however, that this race never fails on x86; on x86 all loads have acquire semantics, * and all stores have release semantics. */ DEFINE_RACE(acquire_to_release_one_direction, participant, race) { struct aws_atomic_var *flag = race->vars[0]; struct aws_atomic_var *protected_data = race->vars[1]; struct aws_atomic_var *observation = race->observations[0]; if (participant == 0) { aws_atomic_store_int_explicit(protected_data, 1, aws_memory_order_relaxed); aws_atomic_store_int_explicit(flag, 2, aws_memory_order_release); } else { size_t flagval = aws_atomic_load_int_explicit(flag, aws_memory_order_acquire); size_t dataval = aws_atomic_load_int_explicit(protected_data, aws_memory_order_relaxed); aws_atomic_store_int_explicit(observation, flagval ^ dataval, aws_memory_order_relaxed); } } AWS_TEST_CASE(atomics_acquire_to_release_one_direction, t_acquire_to_release_one_direction) static int t_acquire_to_release_one_direction(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_atomic_var template[2]; size_t last_race; aws_atomic_init_int(&template[0], 0); aws_atomic_init_int(&template[1], 0); setup_races(allocator, 100000, 2, 1, template, template); run_races(&last_race, allocator, 2, acquire_to_release_one_direction); for (size_t i = 0; i < last_race; i++) { size_t a = aws_atomic_load_int_explicit(races[i].observations[0], aws_memory_order_relaxed); /* * If we see that flag == 2, then the data observation must be 1. * If flag == 0, then the data value may be anything. */ ASSERT_FALSE(a == 2, "Acquire-release ordering failed at iteration %zu", i); } free_races(allocator); return 0; } /* * The following race races these two threads: * * Thread 1: * Read DATA [relaxed] (observation 0) * FLAG <- 1 [release] * Thread 2: * Read FLAG [acquire] (observation 1) * DATA <- 1 [relaxed] * * We expect that, if FLAG is observed to be 1, then DATA must be 0, due to * acquire-release ordering. * * Note however, that this race never fails on x86; on x86 all loads have acquire semantics, * and all stores have release semantics. */ DEFINE_RACE(acquire_to_release_mixed, participant, race) { struct aws_atomic_var *flag = race->vars[0]; struct aws_atomic_var *protected_data = race->vars[1]; if (participant == 0) { aws_atomic_store_int_explicit( race->observations[0], aws_atomic_load_int_explicit(protected_data, aws_memory_order_relaxed), aws_memory_order_relaxed); aws_atomic_store_int_explicit(flag, 2, aws_memory_order_release); } else { aws_atomic_store_int_explicit( race->observations[1], aws_atomic_load_int_explicit(flag, aws_memory_order_acquire), aws_memory_order_relaxed); aws_atomic_store_int_explicit(protected_data, 1, aws_memory_order_relaxed); } } AWS_TEST_CASE(atomics_acquire_to_release_mixed, t_acquire_to_release_mixed) static int t_acquire_to_release_mixed(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_atomic_var template[2]; size_t last_race; aws_atomic_init_int(&template[0], 0); aws_atomic_init_int(&template[1], 0); setup_races(allocator, 100000, 2, 2, template, template); run_races(&last_race, allocator, 2, acquire_to_release_mixed); for (size_t i = 0; i < last_race; i++) { size_t data_observation = aws_atomic_load_int_explicit(races[i].observations[0], aws_memory_order_relaxed); size_t flag_observation = aws_atomic_load_int_explicit(races[i].observations[0], aws_memory_order_relaxed); /* * If we see that flag == 2, then the data observation must be 1. * If flag == 0, then the data value may be anything. */ ASSERT_FALSE(flag_observation && !data_observation, "Acquire-release ordering failed at iteration %zu", i); } free_races(allocator); return 0; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/byte_buf_test.c000066400000000000000000001432271456575232400246750ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include AWS_TEST_CASE(test_buffer_cat, s_test_buffer_cat_fn) static int s_test_buffer_cat_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_buf str1 = aws_byte_buf_from_c_str("testa"); struct aws_byte_buf str2 = aws_byte_buf_from_c_str(";testb"); struct aws_byte_buf str3 = aws_byte_buf_from_c_str(";testc"); const char expected[] = "testa;testb;testc"; struct aws_byte_buf destination; ASSERT_SUCCESS(aws_byte_buf_init(&destination, allocator, str1.len + str2.len + str3.len + 10)); ASSERT_SUCCESS(aws_byte_buf_cat(&destination, 3, &str1, &str2, &str3)); ASSERT_INT_EQUALS(strlen(expected), destination.len); ASSERT_INT_EQUALS(strlen(expected) + 10, destination.capacity); ASSERT_BIN_ARRAYS_EQUALS(expected, strlen(expected), destination.buffer, destination.len); destination.len = 0; ASSERT_SUCCESS(aws_byte_buf_cat(&destination, 1, &str1)); ASSERT_INT_EQUALS(str1.len, destination.len); ASSERT_BIN_ARRAYS_EQUALS(str1.buffer, str1.len, destination.buffer, destination.len); destination.len = 0; ASSERT_SUCCESS(aws_byte_buf_cat(&destination, 0)); ASSERT_INT_EQUALS(0, destination.len); aws_byte_buf_clean_up(&destination); return 0; } AWS_TEST_CASE(test_buffer_cat_dest_too_small, s_test_buffer_cat_dest_too_small_fn) static int s_test_buffer_cat_dest_too_small_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_buf str1 = aws_byte_buf_from_c_str("testa"); struct aws_byte_buf str2 = aws_byte_buf_from_c_str(";testb"); struct aws_byte_buf str3 = aws_byte_buf_from_c_str(";testc"); struct aws_byte_buf destination; ASSERT_SUCCESS(aws_byte_buf_init(&destination, allocator, str1.len + str2.len)); ASSERT_INT_EQUALS(0, destination.len); ASSERT_INT_EQUALS(str1.len + str2.len, destination.capacity); ASSERT_ERROR(AWS_ERROR_DEST_COPY_TOO_SMALL, aws_byte_buf_cat(&destination, 3, &str1, &str2, &str3)); aws_byte_buf_clean_up(&destination); return 0; } AWS_TEST_CASE(test_buffer_cpy, s_test_buffer_cpy_fn) static int s_test_buffer_cpy_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_buf from_buf = aws_byte_buf_from_c_str("testa"); struct aws_byte_cursor from = aws_byte_cursor_from_buf(&from_buf); struct aws_byte_buf destination; ASSERT_SUCCESS(aws_byte_buf_init(&destination, allocator, from.len + 10)); ASSERT_SUCCESS(aws_byte_buf_append(&destination, &from)); ASSERT_INT_EQUALS(from.len, destination.len); ASSERT_INT_EQUALS(from.len + 10, destination.capacity); ASSERT_BIN_ARRAYS_EQUALS(from.ptr, from.len, destination.buffer, destination.len); aws_byte_buf_clean_up(&destination); return 0; } AWS_TEST_CASE(test_buffer_cpy_offsets, s_test_buffer_cpy_offsets_fn) static int s_test_buffer_cpy_offsets_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_buf from_buf = aws_byte_buf_from_c_str("testa"); struct aws_byte_cursor from = aws_byte_cursor_from_buf(&from_buf); aws_byte_cursor_advance(&from, 2); struct aws_byte_buf destination; ASSERT_SUCCESS(aws_byte_buf_init(&destination, allocator, from_buf.len + 10)); ASSERT_SUCCESS(aws_byte_buf_append(&destination, &from)); ASSERT_INT_EQUALS(from_buf.len - 2, destination.len); ASSERT_INT_EQUALS(from_buf.len + 10, destination.capacity); char expected[] = "sta"; ASSERT_BIN_ARRAYS_EQUALS(expected, strlen(expected), destination.buffer, destination.len); aws_byte_buf_clean_up(&destination); return 0; } AWS_TEST_CASE(test_buffer_cpy_dest_too_small, s_test_buffer_cpy_dest_too_small_fn) static int s_test_buffer_cpy_dest_too_small_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_buf from_buf = aws_byte_buf_from_c_str("testa"); struct aws_byte_cursor from = aws_byte_cursor_from_buf(&from_buf); struct aws_byte_buf destination; ASSERT_SUCCESS(aws_byte_buf_init(&destination, allocator, from.len - 1)); ASSERT_ERROR(AWS_ERROR_DEST_COPY_TOO_SMALL, aws_byte_buf_append(&destination, &from)); ASSERT_INT_EQUALS(0, destination.len); aws_byte_buf_clean_up(&destination); return 0; } AWS_TEST_CASE(test_buffer_cpy_offsets_dest_too_small, s_test_buffer_cpy_offsets_dest_too_small_fn) static int s_test_buffer_cpy_offsets_dest_too_small_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_buf from_buf = aws_byte_buf_from_c_str("testa"); struct aws_byte_cursor from = aws_byte_cursor_from_buf(&from_buf); struct aws_byte_buf destination; ASSERT_SUCCESS(aws_byte_buf_init(&destination, allocator, from.len)); destination.len = 1; ASSERT_ERROR(AWS_ERROR_DEST_COPY_TOO_SMALL, aws_byte_buf_append(&destination, &from)); ASSERT_INT_EQUALS(1, destination.len); aws_byte_buf_clean_up(&destination); return 0; } AWS_TEST_CASE(test_buffer_eq, s_test_buffer_eq_fn) static int s_test_buffer_eq_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_buf b1 = aws_byte_buf_from_c_str("testa"); struct aws_byte_buf b1_equal = aws_byte_buf_from_c_str("testa"); struct aws_byte_buf b2 = aws_byte_buf_from_c_str("testb"); b1.capacity = 5; b1_equal.allocator = allocator; ASSERT_TRUE(aws_byte_buf_eq(&b1, &b1_equal)); ASSERT_TRUE(aws_byte_buf_eq(&b1, &b1)); ASSERT_FALSE(aws_byte_buf_eq(&b1, &b2)); return 0; } AWS_TEST_CASE(test_buffer_eq_same_content_different_len, s_test_buffer_eq_same_content_different_len_fn) static int s_test_buffer_eq_same_content_different_len_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_byte_buf b1 = aws_byte_buf_from_c_str("testa"); struct aws_byte_buf b2 = aws_byte_buf_from_c_str("testa"); b2.len--; ASSERT_FALSE(aws_byte_buf_eq(&b1, &b2)); return 0; } AWS_TEST_CASE(test_buffer_eq_null_internal_byte_buffer, s_test_buffer_eq_null_internal_byte_buffer_fn) static int s_test_buffer_eq_null_internal_byte_buffer_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_byte_buf b1 = aws_byte_buf_from_array(NULL, 0); struct aws_byte_buf b2 = aws_byte_buf_from_array(NULL, 0); ASSERT_TRUE(aws_byte_buf_eq(&b1, &b2)); ASSERT_TRUE(aws_byte_buf_eq(&b2, &b1)); struct aws_byte_buf b3 = aws_byte_buf_from_c_str("abc"); ASSERT_FALSE(aws_byte_buf_eq(&b1, &b3)); return 0; } AWS_TEST_CASE(test_buffer_init_copy, s_test_buffer_init_copy_fn) static int s_test_buffer_init_copy_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_buf src = aws_byte_buf_from_c_str("test_string"); struct aws_byte_buf dest; ASSERT_SUCCESS(aws_byte_buf_init_copy(&dest, allocator, &src)); ASSERT_TRUE(aws_byte_buf_eq(&src, &dest)); ASSERT_INT_EQUALS(src.len, dest.capacity); ASSERT_PTR_EQUALS(allocator, dest.allocator); aws_byte_buf_clean_up(&dest); return 0; } AWS_TEST_CASE(test_buffer_init_copy_null_buffer, s_test_buffer_init_copy_null_buffer_fn) static int s_test_buffer_init_copy_null_buffer_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_buf src; AWS_ZERO_STRUCT(src); struct aws_byte_buf dest; ASSERT_SUCCESS(aws_byte_buf_init_copy(&dest, allocator, &src)); return 0; } AWS_TEST_CASE(test_buffer_advance, s_test_buffer_advance) static int s_test_buffer_advance(struct aws_allocator *allocator, void *ctx) { (void)ctx; (void)allocator; uint8_t arr[16]; AWS_ZERO_ARRAY(arr); struct aws_byte_buf src_buf = aws_byte_buf_from_empty_array(arr, sizeof(arr)); struct aws_byte_buf dst_buf = {0}; ASSERT_TRUE(aws_byte_buf_advance(&src_buf, &dst_buf, 4)); ASSERT_NULL(dst_buf.allocator); ASSERT_INT_EQUALS(src_buf.len, 4); ASSERT_INT_EQUALS(dst_buf.len, 0); ASSERT_INT_EQUALS(dst_buf.capacity, 4); ASSERT_PTR_EQUALS(src_buf.buffer, arr); ASSERT_PTR_EQUALS(dst_buf.buffer, arr); ASSERT_TRUE(aws_byte_buf_advance(&src_buf, &dst_buf, 12)); ASSERT_PTR_EQUALS(dst_buf.buffer, arr + 4); ASSERT_INT_EQUALS(src_buf.len, 16); src_buf.len = 12; ASSERT_FALSE(aws_byte_buf_advance(&src_buf, &dst_buf, 5)); ASSERT_PTR_EQUALS(dst_buf.buffer, NULL); ASSERT_NULL(dst_buf.allocator); ASSERT_INT_EQUALS(dst_buf.len, 0); ASSERT_INT_EQUALS(dst_buf.capacity, 0); ASSERT_INT_EQUALS(src_buf.len, 12); return 0; } AWS_TEST_CASE(test_buffer_printf, s_test_buffer_printf) static int s_test_buffer_printf(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; const char src[] = "abcdefg"; char dst[100] = {0}; /* check PRInSTR */ memset(dst, 0, sizeof(dst)); snprintf(dst, sizeof(dst), PRInSTR, 2, src); ASSERT_UINT_EQUALS('a', dst[0]); ASSERT_UINT_EQUALS('b', dst[1]); ASSERT_UINT_EQUALS(0, dst[2]); /* check AWS_BYTE_CURSOR_PRI() */ struct aws_byte_cursor cursor = aws_byte_cursor_from_array(src, 2); memset(dst, 0, sizeof(dst)); snprintf(dst, sizeof(dst), PRInSTR, AWS_BYTE_CURSOR_PRI(cursor)); ASSERT_UINT_EQUALS('a', dst[0]); ASSERT_UINT_EQUALS('b', dst[1]); ASSERT_UINT_EQUALS(0, dst[2]); /* check AWS_BYTE_BUF_PRI() */ struct aws_byte_buf buf = aws_byte_buf_from_array(src, 2); memset(dst, 0, sizeof(dst)); snprintf(dst, sizeof(dst), PRInSTR, AWS_BYTE_BUF_PRI(buf)); ASSERT_UINT_EQUALS('a', dst[0]); ASSERT_UINT_EQUALS('b', dst[1]); ASSERT_UINT_EQUALS(0, dst[2]); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_array_eq, s_test_array_eq) static int s_test_array_eq(struct aws_allocator *allocator, void *ctx) { (void)ctx; (void)allocator; uint8_t a[] = {1, 2, 3}; uint8_t b[] = {1, 2, 3}; uint8_t c[] = {7, 8, 9}; uint8_t d[] = {1, 2, 3, 4}; /* Simple */ ASSERT_TRUE(aws_array_eq(a, 3, b, 3)); ASSERT_FALSE(aws_array_eq(a, 3, c, 3)); ASSERT_FALSE(aws_array_eq(a, 3, d, 4)); /* Comparisons agains self */ ASSERT_TRUE(aws_array_eq(a, 3, a, 3)); ASSERT_FALSE(aws_array_eq(a, 3, a, 2)); /* Different data but size is 0 */ ASSERT_TRUE(aws_array_eq(a, 0, c, 0)); /* NULL inputs are OK if length is 0 */ ASSERT_TRUE(aws_array_eq(NULL, 0, NULL, 0)); ASSERT_TRUE(aws_array_eq(a, 0, NULL, 0)); ASSERT_TRUE(aws_array_eq(NULL, 0, b, 0)); return 0; } AWS_TEST_CASE(test_array_eq_ignore_case, s_test_array_eq_ignore_case) static int s_test_array_eq_ignore_case(struct aws_allocator *allocator, void *ctx) { (void)ctx; (void)allocator; { uint8_t a[] = {'a', 'B', 'c', 'D', '1'}; uint8_t b[] = {'a', 'b', 'C', 'D', '1'}; /* same as a */ uint8_t c[] = {'a', 'b', 'c', 'd', '9'}; /* different */ /* Simple */ ASSERT_TRUE(aws_array_eq_ignore_case(a, 5, b, 5)); ASSERT_FALSE(aws_array_eq_ignore_case(a, 5, c, 5)); ASSERT_FALSE(aws_array_eq_ignore_case(a, 5, b, 3)); /* Comparisons against self */ ASSERT_TRUE(aws_array_eq_ignore_case(a, 5, a, 5)); ASSERT_FALSE(aws_array_eq_ignore_case(a, 5, a, 4)); /* Different data but size is 0 */ ASSERT_TRUE(aws_array_eq_ignore_case(a, 0, c, 0)); /* NULL inputs are OK if length is 0 */ ASSERT_TRUE(aws_array_eq_ignore_case(NULL, 0, NULL, 0)); ASSERT_TRUE(aws_array_eq_ignore_case(a, 0, NULL, 0)); ASSERT_TRUE(aws_array_eq_ignore_case(NULL, 0, b, 0)); } { /* Comparison should continue beyond null-terminator */ uint8_t a[] = {'a', 0, 'b'}; uint8_t b[] = {'a', 0, 'c'}; uint8_t c[] = {'a', 0, 'b'}; ASSERT_FALSE(aws_array_eq_ignore_case(&a, 3, &b, 3)); ASSERT_TRUE(aws_array_eq_ignore_case(&a, 3, &c, 3)); } { /* Compare every possible uint8_t value, then lower against upper, then upper against lower. * Ex: * a_src = {0 ... 255, 'a' ... 'z', 'A' ... 'Z'}; * b_src = {0 ... 255, 'A' ... 'Z', 'a' ... 'z'}; */ uint8_t a[256 + 26 + 26]; uint8_t b[256 + 26 + 26]; for (size_t i = 0; i < 256; ++i) { a[i] = (uint8_t)i; b[i] = (uint8_t)i; } for (size_t i = 0, c = 'a'; c <= 'z'; ++i, ++c) { a[256 + i] = (uint8_t)c; b[256 + 26 + i] = (uint8_t)c; } for (size_t i = 0, c = 'A'; c <= 'Z'; ++i, ++c) { a[256 + 26 + i] = (uint8_t)c; b[256 + i] = (uint8_t)c; } ASSERT_TRUE(aws_array_eq_ignore_case(&a, sizeof(a), &b, sizeof(b))); } return 0; } AWS_TEST_CASE(test_array_eq_c_str, s_test_array_eq_c_str) static int s_test_array_eq_c_str(struct aws_allocator *allocator, void *ctx) { (void)ctx; (void)allocator; { uint8_t arr_a[] = {'a', 'b', 'c'}; const char *str_a = "abc"; const char *str_b = "xyz"; const char *str_c = "abcd"; const char *empty = ""; /* Simple */ ASSERT_TRUE(aws_array_eq_c_str(arr_a, 3, str_a)); ASSERT_FALSE(aws_array_eq_c_str(arr_a, 3, str_b)); ASSERT_FALSE(aws_array_eq_c_str(arr_a, 3, str_c)); /* Referencing self */ ASSERT_TRUE(aws_array_eq_c_str(str_a, 3, str_a)); ASSERT_FALSE(aws_array_eq_c_str(str_a, 2, str_a)); /* Check length 0 */ ASSERT_TRUE(aws_array_eq_c_str(arr_a, 0, empty)); ASSERT_FALSE(aws_array_eq_c_str(arr_a, 0, str_a)); /* NULL array is OK if length is 0 */ ASSERT_TRUE(aws_array_eq_c_str(NULL, 0, empty)); ASSERT_FALSE(aws_array_eq_c_str(NULL, 0, str_a)); } { /* Array is not expected to contain null-terminator */ uint8_t arr_a[] = {'a', 'b', 'c', 0}; const char *str_a = "abc"; ASSERT_FALSE(aws_array_eq_c_str(arr_a, 4, str_a)); } return 0; } AWS_TEST_CASE(test_array_eq_c_str_ignore_case, s_test_array_eq_c_str_ignore_case) static int s_test_array_eq_c_str_ignore_case(struct aws_allocator *allocator, void *ctx) { (void)ctx; (void)allocator; { uint8_t arr_a[] = {'a', 'B', 'c'}; const char *str_a = "Abc"; const char *str_b = "xyz"; const char *str_c = "aBcd"; const char *empty = ""; /* Simple */ ASSERT_TRUE(aws_array_eq_c_str_ignore_case(arr_a, 3, str_a)); ASSERT_FALSE(aws_array_eq_c_str_ignore_case(arr_a, 3, str_b)); ASSERT_FALSE(aws_array_eq_c_str_ignore_case(arr_a, 3, str_c)); /* Referencing self */ ASSERT_TRUE(aws_array_eq_c_str_ignore_case(str_a, 3, str_a)); ASSERT_FALSE(aws_array_eq_c_str_ignore_case(str_a, 2, str_a)); /* Check length 0 */ ASSERT_TRUE(aws_array_eq_c_str_ignore_case(arr_a, 0, empty)); ASSERT_FALSE(aws_array_eq_c_str_ignore_case(arr_a, 0, str_a)); /* NULL array is OK if length is 0 */ ASSERT_TRUE(aws_array_eq_c_str_ignore_case(NULL, 0, empty)); ASSERT_FALSE(aws_array_eq_c_str_ignore_case(NULL, 0, str_a)); } { /* Array is not expected to contain null-terminator */ uint8_t arr_a[] = {'a', 'b', 'c', 0}; const char *str_a = "abc"; ASSERT_FALSE(aws_array_eq_c_str_ignore_case(arr_a, 4, str_a)); } return 0; } AWS_TEST_CASE(test_array_hash_ignore_case, s_test_array_hash_ignore_case) static int s_test_array_hash_ignore_case(struct aws_allocator *allocator, void *ctx) { (void)ctx; (void)allocator; { /* Check against known FNV-1A values */ uint8_t a[] = {'a', 'b', 'c'}; ASSERT_UINT_EQUALS(0xe71fa2190541574bULL, aws_hash_array_ignore_case(a, 3)); uint8_t b[] = {'A', 'B', 'C'}; ASSERT_UINT_EQUALS(0xe71fa2190541574bULL, aws_hash_array_ignore_case(&b, 3)); } { uint8_t a[] = {'a', 'B', 'c', 1, 2, 3}; uint8_t b[] = {'A', 'b', 'c', 1, 2, 3}; ASSERT_TRUE(aws_hash_array_ignore_case(a, 6) == aws_hash_array_ignore_case(b, 6)); } { uint8_t a[] = {'a', 'b', 'c'}; uint8_t b[] = {'x', 'y', 'z'}; ASSERT_FALSE(aws_hash_array_ignore_case(a, 3) == aws_hash_array_ignore_case(b, 3)); } return 0; } static int s_do_append_dynamic_test( struct aws_allocator *allocator, size_t starting_size, size_t append_size, size_t iterations, int (*append_dynamic)(struct aws_byte_buf *, const struct aws_byte_cursor *)) { struct aws_byte_buf accum_buf; aws_byte_buf_init(&accum_buf, allocator, starting_size); memset(accum_buf.buffer, 0, starting_size); accum_buf.len = starting_size; struct aws_byte_buf append_buf; aws_byte_buf_init(&append_buf, allocator, append_size); append_buf.len = append_size; struct aws_byte_cursor append_cursor = aws_byte_cursor_from_buf(&append_buf); for (size_t i = 0; i < iterations; ++i) { /* * Initialize the source and dest buffers to different, easily recognizable byte blocks */ memset(append_buf.buffer, 255, append_buf.capacity); memset(accum_buf.buffer, 0, accum_buf.capacity); size_t before_size = accum_buf.len; ASSERT_TRUE(append_dynamic(&accum_buf, &append_cursor) == AWS_OP_SUCCESS); size_t after_size = accum_buf.len; size_t expected_len = starting_size + (i + 1) * append_size; ASSERT_TRUE(accum_buf.capacity >= expected_len); ASSERT_TRUE(after_size == expected_len); /* * Verify post-append contents. * * Check that the result has the right number of 0s followed by the right number of * 255s. */ for (size_t bi = 0; bi < before_size; ++bi) { ASSERT_TRUE(accum_buf.buffer[bi] == 0); } for (size_t ai = before_size; ai < after_size; ++ai) { ASSERT_TRUE(accum_buf.buffer[ai] == 255); } } aws_byte_buf_clean_up(&accum_buf); aws_byte_buf_clean_up(&append_buf); return AWS_OP_SUCCESS; } static int s_test_byte_buf_write_to_capacity(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; uint8_t buf_storage[5]; AWS_ZERO_ARRAY(buf_storage); struct aws_byte_buf buf = aws_byte_buf_from_empty_array(buf_storage, sizeof(buf_storage)); /* Test a write to a fresh buffer with plenty of space */ struct aws_byte_cursor original_abc = aws_byte_cursor_from_c_str("abc"); struct aws_byte_cursor advancing_abc = original_abc; struct aws_byte_cursor written = aws_byte_buf_write_to_capacity(&buf, &advancing_abc); ASSERT_BIN_ARRAYS_EQUALS("abc", 3, buf.buffer, buf.len); ASSERT_UINT_EQUALS(0, advancing_abc.len); ASSERT_BIN_ARRAYS_EQUALS("abc", 3, written.ptr, written.len); ASSERT_PTR_EQUALS(original_abc.ptr, written.ptr); /* Test writing again to same buffer, but we can't fit it all */ struct aws_byte_cursor advancing_def = aws_byte_cursor_from_c_str("def"); written = aws_byte_buf_write_to_capacity(&buf, &advancing_def); ASSERT_UINT_EQUALS(buf.len, buf.capacity); ASSERT_BIN_ARRAYS_EQUALS("abcde", 5, buf.buffer, buf.len); ASSERT_BIN_ARRAYS_EQUALS("f", 1, advancing_def.ptr, advancing_def.len); ASSERT_BIN_ARRAYS_EQUALS("de", 2, written.ptr, written.len); /* Test writing a cursor that exactly matches capacity. */ aws_byte_buf_reset(&buf, false); struct aws_byte_cursor advancing_filler = aws_byte_cursor_from_c_str("12345"); written = aws_byte_buf_write_to_capacity(&buf, &advancing_filler); ASSERT_BIN_ARRAYS_EQUALS("12345", 5, buf.buffer, buf.len); ASSERT_UINT_EQUALS(0, advancing_filler.len); ASSERT_BIN_ARRAYS_EQUALS("12345", 5, written.ptr, written.len); /* Test passing an empty cursor. Nothing should happen. */ aws_byte_buf_reset(&buf, false); struct aws_byte_cursor advancing_zeroed; AWS_ZERO_STRUCT(advancing_zeroed); written = aws_byte_buf_write_to_capacity(&buf, &advancing_zeroed); ASSERT_UINT_EQUALS(0, buf.len); ASSERT_UINT_EQUALS(0, advancing_zeroed.len); ASSERT_NULL(advancing_zeroed.ptr); ASSERT_UINT_EQUALS(0, written.len); ASSERT_NULL(written.ptr); /* Test writing to a full buffer. Nothing should happen. */ buf.len = buf.capacity; struct aws_byte_cursor original_nope = aws_byte_cursor_from_c_str("nope"); struct aws_byte_cursor advancing_nope = original_nope; written = aws_byte_buf_write_to_capacity(&buf, &advancing_nope); ASSERT_UINT_EQUALS(buf.capacity, buf.len); ASSERT_UINT_EQUALS(original_nope.len, advancing_nope.len); ASSERT_PTR_EQUALS(original_nope.ptr, advancing_nope.ptr); ASSERT_PTR_EQUALS(original_nope.ptr, written.ptr); ASSERT_UINT_EQUALS(0, written.len); aws_byte_buf_clean_up(&buf); return 0; } AWS_TEST_CASE(test_byte_buf_write_to_capacity, s_test_byte_buf_write_to_capacity); static int s_test_byte_buf_append_dynamic(struct aws_allocator *allocator, void *ctx) { (void)ctx; /* * Throw a small sample of different growth request profiles at the functions */ /* * regular append */ ASSERT_TRUE(s_do_append_dynamic_test(allocator, 1, 10000, 1, aws_byte_buf_append_dynamic) == AWS_OP_SUCCESS); ASSERT_TRUE(s_do_append_dynamic_test(allocator, 1, 1, 1000, aws_byte_buf_append_dynamic) == AWS_OP_SUCCESS); ASSERT_TRUE(s_do_append_dynamic_test(allocator, 10000, 1, 2, aws_byte_buf_append_dynamic) == AWS_OP_SUCCESS); ASSERT_TRUE(s_do_append_dynamic_test(allocator, 100, 10, 100, aws_byte_buf_append_dynamic) == AWS_OP_SUCCESS); /* * secure append - note we don't attempt to check if the memory was actually zeroed */ ASSERT_TRUE(s_do_append_dynamic_test(allocator, 1, 10000, 1, aws_byte_buf_append_dynamic_secure) == AWS_OP_SUCCESS); ASSERT_TRUE(s_do_append_dynamic_test(allocator, 1, 1, 1000, aws_byte_buf_append_dynamic_secure) == AWS_OP_SUCCESS); ASSERT_TRUE(s_do_append_dynamic_test(allocator, 10000, 1, 2, aws_byte_buf_append_dynamic_secure) == AWS_OP_SUCCESS); ASSERT_TRUE( s_do_append_dynamic_test(allocator, 100, 10, 100, aws_byte_buf_append_dynamic_secure) == AWS_OP_SUCCESS); return 0; } AWS_TEST_CASE(test_byte_buf_append_dynamic, s_test_byte_buf_append_dynamic) static uint8_t s_append_byte_array[] = {0xFF, 0xFE, 0xAB, 0x00, 0x55, 0x62}; static int s_test_byte_buf_append_byte(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_buf buffer; aws_byte_buf_init(&buffer, allocator, 1); for (size_t i = 0; i < AWS_ARRAY_SIZE(s_append_byte_array); ++i) { ASSERT_SUCCESS(aws_byte_buf_append_byte_dynamic(&buffer, s_append_byte_array[i])); ASSERT_BIN_ARRAYS_EQUALS(s_append_byte_array, i + 1, buffer.buffer, buffer.len); } aws_byte_buf_clean_up(&buffer); return 0; } AWS_TEST_CASE(test_byte_buf_append_byte, s_test_byte_buf_append_byte) AWS_STATIC_STRING_FROM_LITERAL(s_to_lower_test, "UPPerANdLowercASE"); static int s_test_byte_buf_append_lookup_success(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_buf buffer; aws_byte_buf_init(&buffer, allocator, s_to_lower_test->len); struct aws_byte_cursor to_lower_cursor = aws_byte_cursor_from_c_str((char *)s_to_lower_test->bytes); ASSERT_TRUE( aws_byte_buf_append_with_lookup(&buffer, &to_lower_cursor, aws_lookup_table_to_lower_get()) == AWS_OP_SUCCESS); ASSERT_TRUE(buffer.len == s_to_lower_test->len); for (size_t i = 0; i < s_to_lower_test->len; ++i) { uint8_t value = buffer.buffer[i]; ASSERT_TRUE(value > 'Z' || value < 'A'); } aws_byte_buf_clean_up(&buffer); return 0; } AWS_TEST_CASE(test_byte_buf_append_lookup_success, s_test_byte_buf_append_lookup_success) static int s_test_reset_body(struct aws_byte_buf *buffer) { struct aws_byte_cursor to_lower_cursor = aws_byte_cursor_from_c_str((char *)s_to_lower_test->bytes); ASSERT_TRUE( aws_byte_buf_append_with_lookup(buffer, &to_lower_cursor, aws_lookup_table_to_lower_get()) == AWS_OP_SUCCESS); ASSERT_TRUE(buffer->len == s_to_lower_test->len); for (size_t i = 0; i < s_to_lower_test->len; ++i) { uint8_t value = buffer->buffer[i]; ASSERT_TRUE(value > 'Z' || value < 'A'); } return 0; } static int s_test_byte_buf_reset(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_buf buffer; aws_byte_buf_init(&buffer, allocator, s_to_lower_test->len); ASSERT_SUCCESS(s_test_reset_body(&buffer)); size_t old_cap = buffer.capacity; aws_byte_buf_reset(&buffer, false); ASSERT_TRUE(buffer.len == 0); ASSERT_TRUE(buffer.capacity == old_cap); ASSERT_SUCCESS(s_test_reset_body(&buffer)); old_cap = buffer.capacity; aws_byte_buf_reset(&buffer, true); ASSERT_TRUE(buffer.len == 0); ASSERT_TRUE(buffer.capacity == old_cap); for (size_t i = 0; i < buffer.capacity; i++) { ASSERT_TRUE(buffer.buffer[i] == 0); } ASSERT_SUCCESS(s_test_reset_body(&buffer)); aws_byte_buf_clean_up(&buffer); /* check that reset succeeds even on an empty buffer */ aws_byte_buf_reset(&buffer, true); return 0; } AWS_TEST_CASE(test_byte_buf_reset, s_test_byte_buf_reset) static int s_test_byte_buf_append_lookup_failure(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_buf buffer; aws_byte_buf_init(&buffer, allocator, 3); struct aws_byte_cursor to_lower_cursor = aws_byte_cursor_from_c_str((char *)s_to_lower_test->bytes); ASSERT_TRUE( aws_byte_buf_append_with_lookup(&buffer, &to_lower_cursor, aws_lookup_table_to_lower_get()) == AWS_OP_ERR); aws_byte_buf_clean_up(&buffer); return 0; } AWS_TEST_CASE(test_byte_buf_append_lookup_failure, s_test_byte_buf_append_lookup_failure) AWS_STATIC_STRING_FROM_LITERAL(s_reserve_test_suffix, "ReserveTest"); AWS_STATIC_STRING_FROM_LITERAL(s_reserve_test_prefix, "Successful"); AWS_STATIC_STRING_FROM_LITERAL(s_reserve_test_prefix_concatenated, "SuccessfulReserveTest"); static int s_test_byte_buf_reserve(struct aws_allocator *allocator, void *ctx) { (void)ctx; (void)allocator; struct aws_byte_buf buffer; aws_byte_buf_init(&buffer, allocator, s_reserve_test_prefix->len); struct aws_byte_cursor prefix_cursor = aws_byte_cursor_from_string(s_reserve_test_prefix); ASSERT_TRUE(aws_byte_buf_append(&buffer, &prefix_cursor) == AWS_OP_SUCCESS); struct aws_byte_cursor suffix_cursor = aws_byte_cursor_from_string(s_reserve_test_suffix); ASSERT_TRUE(aws_byte_buf_append(&buffer, &suffix_cursor) == AWS_OP_ERR); aws_byte_buf_reserve(&buffer, s_reserve_test_prefix_concatenated->len); ASSERT_TRUE(aws_byte_buf_append(&buffer, &suffix_cursor) == AWS_OP_SUCCESS); ASSERT_TRUE(aws_byte_buf_eq_c_str(&buffer, (char *)s_reserve_test_prefix_concatenated->bytes)); aws_byte_buf_clean_up(&buffer); return 0; } AWS_TEST_CASE(test_byte_buf_reserve, s_test_byte_buf_reserve) static int s_test_byte_buf_reserve_initial_capacity_zero(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_buf buffer; AWS_ZERO_STRUCT(buffer); size_t capacity = 1; ASSERT_SUCCESS(aws_byte_buf_init(&buffer, allocator, 0)); ASSERT_SUCCESS(aws_byte_buf_reserve(&buffer, capacity)); ASSERT_TRUE(buffer.capacity == capacity); aws_byte_buf_clean_up(&buffer); return 0; } AWS_TEST_CASE(test_byte_buf_reserve_initial_capacity_zero, s_test_byte_buf_reserve_initial_capacity_zero) static int s_test_byte_buf_reserve_relative(struct aws_allocator *allocator, void *ctx) { (void)ctx; (void)allocator; struct aws_byte_buf buffer; aws_byte_buf_init(&buffer, allocator, 1); struct aws_byte_cursor prefix_cursor = aws_byte_cursor_from_string(s_reserve_test_prefix); ASSERT_TRUE(aws_byte_buf_reserve_relative(&buffer, prefix_cursor.len) == AWS_OP_SUCCESS); ASSERT_TRUE(aws_byte_buf_append(&buffer, &prefix_cursor) == AWS_OP_SUCCESS); struct aws_byte_cursor suffix_cursor = aws_byte_cursor_from_string(s_reserve_test_suffix); ASSERT_TRUE(aws_byte_buf_reserve_relative(&buffer, suffix_cursor.len) == AWS_OP_SUCCESS); ASSERT_TRUE(aws_byte_buf_append(&buffer, &suffix_cursor) == AWS_OP_SUCCESS); ASSERT_TRUE(aws_byte_buf_eq_c_str(&buffer, (char *)s_reserve_test_prefix_concatenated->bytes)); aws_byte_buf_clean_up(&buffer); return 0; } AWS_TEST_CASE(test_byte_buf_reserve_relative, s_test_byte_buf_reserve_relative) static int s_test_byte_cursor_starts_with(struct aws_allocator *allocator, void *ctx) { (void)ctx; (void)allocator; struct aws_byte_cursor a = aws_byte_cursor_from_c_str("a"); struct aws_byte_cursor abcd = aws_byte_cursor_from_c_str("abcd"); struct aws_byte_cursor ab = aws_byte_cursor_from_c_str("ab"); struct aws_byte_cursor abcde = aws_byte_cursor_from_c_str("abcde"); struct aws_byte_cursor ABCD = aws_byte_cursor_from_c_str("ABCD"); struct aws_byte_cursor AB = aws_byte_cursor_from_c_str("AB"); struct aws_byte_cursor empty_string = aws_byte_cursor_from_c_str(""); struct aws_byte_cursor null_char_string = aws_byte_cursor_from_array("\0", 1); /* TRUE */ ASSERT_TRUE(aws_byte_cursor_starts_with(&a, &a)); ASSERT_TRUE(aws_byte_cursor_starts_with(&abcd, &ab)); ASSERT_TRUE(aws_byte_cursor_starts_with(&abcd, &abcd)); ASSERT_TRUE(aws_byte_cursor_starts_with(&abcd, &empty_string)); ASSERT_TRUE(aws_byte_cursor_starts_with(&empty_string, &empty_string)); /* FALSE */ ASSERT_FALSE(aws_byte_cursor_starts_with(&abcd, &abcde)); ASSERT_FALSE(aws_byte_cursor_starts_with(&abcd, &ABCD)); ASSERT_FALSE(aws_byte_cursor_starts_with(&abcd, &AB)); ASSERT_FALSE(aws_byte_cursor_starts_with(&empty_string, &a)); ASSERT_FALSE(aws_byte_cursor_starts_with(&empty_string, &null_char_string)); ASSERT_FALSE(aws_byte_cursor_starts_with(&abcd, &null_char_string)); return 0; } AWS_TEST_CASE(test_byte_cursor_starts_with, s_test_byte_cursor_starts_with) static int s_test_byte_cursor_starts_with_ignore_case(struct aws_allocator *allocator, void *ctx) { (void)ctx; (void)allocator; struct aws_byte_cursor a = aws_byte_cursor_from_c_str("a"); struct aws_byte_cursor A = aws_byte_cursor_from_c_str("A"); struct aws_byte_cursor abcd = aws_byte_cursor_from_c_str("abcd"); struct aws_byte_cursor ABCD = aws_byte_cursor_from_c_str("ABCD"); struct aws_byte_cursor abcde = aws_byte_cursor_from_c_str("abcde"); struct aws_byte_cursor azcd = aws_byte_cursor_from_c_str("azcd"); struct aws_byte_cursor empty_string = aws_byte_cursor_from_c_str(""); struct aws_byte_cursor null_char_string = aws_byte_cursor_from_array("\0", 1); /* TRUE */ ASSERT_TRUE(aws_byte_cursor_starts_with_ignore_case(&abcd, &abcd)); ASSERT_TRUE(aws_byte_cursor_starts_with_ignore_case(&abcd, &ABCD)); ASSERT_TRUE(aws_byte_cursor_starts_with_ignore_case(&ABCD, &abcd)); ASSERT_TRUE(aws_byte_cursor_starts_with_ignore_case(&ABCD, &ABCD)); ASSERT_TRUE(aws_byte_cursor_starts_with_ignore_case(&abcd, &a)); ASSERT_TRUE(aws_byte_cursor_starts_with_ignore_case(&abcd, &A)); ASSERT_TRUE(aws_byte_cursor_starts_with_ignore_case(&ABCD, &a)); ASSERT_TRUE(aws_byte_cursor_starts_with_ignore_case(&ABCD, &A)); ASSERT_TRUE(aws_byte_cursor_starts_with_ignore_case(&abcd, &empty_string)); ASSERT_TRUE(aws_byte_cursor_starts_with_ignore_case(&empty_string, &empty_string)); /* FALSE */ ASSERT_FALSE(aws_byte_cursor_starts_with_ignore_case(&abcd, &abcde)); ASSERT_FALSE(aws_byte_cursor_starts_with_ignore_case(&abcd, &azcd)); ASSERT_FALSE(aws_byte_cursor_starts_with_ignore_case(&empty_string, &a)); ASSERT_FALSE(aws_byte_cursor_starts_with_ignore_case(&empty_string, &null_char_string)); return 0; } AWS_TEST_CASE(test_byte_cursor_starts_with_ignore_case, s_test_byte_cursor_starts_with_ignore_case) static int s_test_byte_cursor_compare_lexical(struct aws_allocator *allocator, void *ctx) { (void)ctx; (void)allocator; struct aws_byte_cursor test_cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("test"); struct aws_byte_cursor test_cursor2 = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("test"); struct aws_byte_cursor test1_cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("test1"); struct aws_byte_cursor test2_cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("test2"); struct aws_byte_cursor abc_cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("abc"); ASSERT_TRUE(aws_byte_cursor_compare_lexical(&test_cursor, &test_cursor2) == 0); ASSERT_TRUE(aws_byte_cursor_compare_lexical(&test_cursor, &abc_cursor) > 0); ASSERT_TRUE(aws_byte_cursor_compare_lexical(&abc_cursor, &test_cursor) < 0); ASSERT_TRUE(aws_byte_cursor_compare_lexical(&test_cursor, &test2_cursor) < 0); ASSERT_TRUE(aws_byte_cursor_compare_lexical(&test2_cursor, &test_cursor) > 0); ASSERT_TRUE(aws_byte_cursor_compare_lexical(&test1_cursor, &test2_cursor) < 0); ASSERT_TRUE(aws_byte_cursor_compare_lexical(&test2_cursor, &test1_cursor) > 0); struct aws_byte_cursor ff_cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\xFF\xFF"); struct aws_byte_cursor one_cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\x01\x01"); ASSERT_TRUE(aws_byte_cursor_compare_lexical(&ff_cursor, &one_cursor) > 0); ASSERT_TRUE(aws_byte_cursor_compare_lexical(&one_cursor, &ff_cursor) < 0); struct aws_byte_cursor Test_cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Test"); struct aws_byte_cursor tesT_cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("tesT"); ASSERT_TRUE(aws_byte_cursor_compare_lexical(&Test_cursor, &tesT_cursor) < 0); ASSERT_TRUE(aws_byte_cursor_compare_lexical(&tesT_cursor, &Test_cursor) > 0); return 0; } AWS_TEST_CASE(test_byte_cursor_compare_lexical, s_test_byte_cursor_compare_lexical) static int s_test_byte_cursor_compare_lookup(struct aws_allocator *allocator, void *ctx) { (void)ctx; (void)allocator; struct aws_byte_cursor Test_cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Test"); struct aws_byte_cursor tesT_cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("tesT"); ASSERT_TRUE(aws_byte_cursor_compare_lookup(&Test_cursor, &tesT_cursor, aws_lookup_table_to_lower_get()) == 0); ASSERT_TRUE(aws_byte_cursor_compare_lookup(&tesT_cursor, &Test_cursor, aws_lookup_table_to_lower_get()) == 0); struct aws_byte_cursor ABC_cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("ABC"); struct aws_byte_cursor abc_cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("abc"); ASSERT_TRUE(aws_byte_cursor_compare_lexical(&ABC_cursor, &abc_cursor) < 0); ASSERT_TRUE(aws_byte_cursor_compare_lookup(&ABC_cursor, &abc_cursor, aws_lookup_table_to_lower_get()) == 0); ASSERT_TRUE(aws_byte_cursor_compare_lookup(&abc_cursor, &ABC_cursor, aws_lookup_table_to_lower_get()) == 0); ASSERT_TRUE(aws_byte_cursor_compare_lookup(&ABC_cursor, &tesT_cursor, aws_lookup_table_to_lower_get()) < 0); ASSERT_TRUE(aws_byte_cursor_compare_lookup(&tesT_cursor, &ABC_cursor, aws_lookup_table_to_lower_get()) > 0); ASSERT_TRUE(aws_byte_cursor_compare_lookup(&abc_cursor, &tesT_cursor, aws_lookup_table_to_lower_get()) < 0); ASSERT_TRUE(aws_byte_cursor_compare_lookup(&tesT_cursor, &abc_cursor, aws_lookup_table_to_lower_get()) > 0); ASSERT_TRUE(aws_byte_cursor_compare_lookup(&abc_cursor, &Test_cursor, aws_lookup_table_to_lower_get()) < 0); ASSERT_TRUE(aws_byte_cursor_compare_lookup(&Test_cursor, &abc_cursor, aws_lookup_table_to_lower_get()) > 0); return 0; } AWS_TEST_CASE(test_byte_cursor_compare_lookup, s_test_byte_cursor_compare_lookup) static int s_test_byte_buf_init_cache_and_update_cursors(struct aws_allocator *allocator, void *ctx) { (void)ctx; { /* store one cursor */ struct aws_byte_cursor cursor = aws_byte_cursor_from_c_str("asdf"); struct aws_byte_buf buf; ASSERT_SUCCESS(aws_byte_buf_init_cache_and_update_cursors(&buf, allocator, &cursor, NULL)); ASSERT_BIN_ARRAYS_EQUALS("asdf", 4, cursor.ptr, cursor.len); ASSERT_TRUE(cursor.ptr >= buf.buffer && cursor.ptr < buf.buffer + buf.len); ASSERT_BIN_ARRAYS_EQUALS("asdf", 4, buf.buffer, buf.len); ASSERT_UINT_EQUALS(buf.len, buf.capacity); aws_byte_buf_clean_up(&buf); } { /* store no cursors */ struct aws_byte_buf buf; ASSERT_SUCCESS(aws_byte_buf_init_cache_and_update_cursors(&buf, allocator, NULL)); aws_byte_buf_clean_up(&buf); } { /* store multiple cursors */ struct aws_byte_cursor cursor1 = aws_byte_cursor_from_c_str("one"); struct aws_byte_cursor cursor2 = aws_byte_cursor_from_c_str("two"); struct aws_byte_buf buf; ASSERT_SUCCESS(aws_byte_buf_init_cache_and_update_cursors(&buf, allocator, &cursor1, &cursor2, NULL)); ASSERT_BIN_ARRAYS_EQUALS("one", 3, cursor1.ptr, cursor1.len); ASSERT_TRUE(cursor1.ptr >= buf.buffer && cursor1.ptr < buf.buffer + buf.len); ASSERT_BIN_ARRAYS_EQUALS("two", 3, cursor2.ptr, cursor2.len); ASSERT_TRUE(cursor2.ptr >= buf.buffer && cursor2.ptr < buf.buffer + buf.len); ASSERT_BIN_ARRAYS_EQUALS("onetwo", 6, buf.buffer, buf.len); ASSERT_UINT_EQUALS(buf.len, buf.capacity); aws_byte_buf_clean_up(&buf); } { /* store empty string cursor */ struct aws_byte_cursor cursor = aws_byte_cursor_from_c_str(""); struct aws_byte_buf buf; ASSERT_SUCCESS(aws_byte_buf_init_cache_and_update_cursors(&buf, allocator, &cursor, NULL)); ASSERT_UINT_EQUALS(0, cursor.len); ASSERT_UINT_EQUALS(0, buf.len); aws_byte_buf_clean_up(&buf); } { /* store zeroed out cursor */ struct aws_byte_cursor cursor; AWS_ZERO_STRUCT(cursor); struct aws_byte_buf buf; ASSERT_SUCCESS(aws_byte_buf_init_cache_and_update_cursors(&buf, allocator, &cursor, NULL)); ASSERT_UINT_EQUALS(0, cursor.len); ASSERT_UINT_EQUALS(0, buf.len); aws_byte_buf_clean_up(&buf); } { /* store something valid after some empty cursors */ struct aws_byte_cursor cursor_empty = aws_byte_cursor_from_c_str(""); struct aws_byte_cursor cursor_zeroed; AWS_ZERO_STRUCT(cursor_zeroed); struct aws_byte_cursor cursor_normal = aws_byte_cursor_from_c_str("normal"); struct aws_byte_buf buf; ASSERT_SUCCESS(aws_byte_buf_init_cache_and_update_cursors( &buf, allocator, &cursor_empty, &cursor_zeroed, &cursor_normal, NULL)); ASSERT_UINT_EQUALS(0, cursor_empty.len); ASSERT_UINT_EQUALS(0, cursor_zeroed.len); ASSERT_BIN_ARRAYS_EQUALS("normal", 6, cursor_normal.ptr, cursor_normal.len); ASSERT_TRUE(cursor_normal.ptr >= buf.buffer && cursor_normal.ptr < buf.buffer + buf.len); ASSERT_BIN_ARRAYS_EQUALS("normal", 6, buf.buffer, buf.len); ASSERT_UINT_EQUALS(buf.len, buf.capacity); aws_byte_buf_clean_up(&buf); } return 0; } AWS_TEST_CASE(test_byte_buf_init_cache_and_update_cursors, s_test_byte_buf_init_cache_and_update_cursors) static int s_test_byte_buf_empty_appends(struct aws_allocator *allocator, void *ctx) { (void)ctx; { /* append */ struct aws_byte_buf buffer; AWS_ZERO_STRUCT(buffer); struct aws_byte_cursor zeroed_out; AWS_ZERO_STRUCT(zeroed_out); ASSERT_SUCCESS(aws_byte_buf_append(&buffer, &zeroed_out)); ASSERT_UINT_EQUALS(buffer.len, 0); struct aws_byte_cursor empty = aws_byte_cursor_from_c_str(""); ASSERT_SUCCESS(aws_byte_buf_append(&buffer, &empty)); ASSERT_UINT_EQUALS(buffer.len, 0); } { /* dynamic append */ struct aws_byte_buf buffer; aws_byte_buf_init(&buffer, allocator, 0); struct aws_byte_cursor zeroed_out; AWS_ZERO_STRUCT(zeroed_out); ASSERT_SUCCESS(aws_byte_buf_append_dynamic(&buffer, &zeroed_out)); ASSERT_UINT_EQUALS(buffer.len, 0); struct aws_byte_cursor empty = aws_byte_cursor_from_c_str(""); ASSERT_SUCCESS(aws_byte_buf_append_dynamic(&buffer, &empty)); ASSERT_UINT_EQUALS(buffer.len, 0); } { /* append with lookup */ struct aws_byte_buf buffer; AWS_ZERO_STRUCT(buffer); struct aws_byte_cursor zeroed_out; AWS_ZERO_STRUCT(zeroed_out); ASSERT_SUCCESS(aws_byte_buf_append_with_lookup(&buffer, &zeroed_out, aws_lookup_table_to_lower_get())); ASSERT_UINT_EQUALS(buffer.len, 0); struct aws_byte_cursor empty = aws_byte_cursor_from_c_str(""); ASSERT_SUCCESS(aws_byte_buf_append_with_lookup(&buffer, &empty, aws_lookup_table_to_lower_get())); ASSERT_UINT_EQUALS(buffer.len, 0); } { /* append and update */ struct aws_byte_buf buffer; AWS_ZERO_STRUCT(buffer); struct aws_byte_cursor zeroed_out; AWS_ZERO_STRUCT(zeroed_out); ASSERT_SUCCESS(aws_byte_buf_append_and_update(&buffer, &zeroed_out)); ASSERT_UINT_EQUALS(buffer.len, 0); ASSERT_NULL(zeroed_out.ptr, 0); ASSERT_UINT_EQUALS(zeroed_out.len, 0); struct aws_byte_cursor empty = aws_byte_cursor_from_c_str(""); ASSERT_SUCCESS(aws_byte_buf_append_and_update(&buffer, &empty)); ASSERT_NULL(empty.ptr, 0); ASSERT_UINT_EQUALS(empty.len, 0); } return 0; } AWS_TEST_CASE(test_byte_buf_empty_appends, s_test_byte_buf_empty_appends) static int s_test_byte_buf_append_and_update_fail(struct aws_allocator *allocator, void *ctx) { (void)ctx; (void)allocator; struct aws_byte_buf buffer; aws_byte_buf_init(&buffer, allocator, 10); struct aws_byte_cursor test_cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("TOOOOOOOO LONG"); struct aws_byte_cursor test_cursor_copy = test_cursor; ASSERT_FAILS(aws_byte_buf_append_and_update(&buffer, &test_cursor)); ASSERT_TRUE((test_cursor.ptr == test_cursor_copy.ptr) && (test_cursor.len == test_cursor_copy.len)); aws_byte_buf_clean_up(&buffer); return 0; } AWS_TEST_CASE(test_byte_buf_append_and_update_fail, s_test_byte_buf_append_and_update_fail) static int s_test_byte_buf_append_and_update_success(struct aws_allocator *allocator, void *ctx) { (void)ctx; (void)allocator; struct aws_byte_buf buffer; aws_byte_buf_init(&buffer, allocator, 12); struct aws_byte_cursor test_cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("SHORT"); struct aws_byte_cursor test_cursor_copy = test_cursor; ASSERT_SUCCESS(aws_byte_buf_append_and_update(&buffer, &test_cursor)); ASSERT_TRUE(test_cursor.ptr == buffer.buffer); ASSERT_TRUE(aws_byte_cursor_eq(&test_cursor, &test_cursor_copy)); struct aws_byte_cursor test_cursor2 = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("STOP"); struct aws_byte_cursor test_cursor_copy2 = test_cursor2; ASSERT_SUCCESS(aws_byte_buf_append_and_update(&buffer, &test_cursor2)); ASSERT_TRUE(test_cursor2.ptr == buffer.buffer + test_cursor.len); ASSERT_TRUE(aws_byte_cursor_eq(&test_cursor2, &test_cursor_copy2)); ASSERT_TRUE(buffer.len == test_cursor.len + test_cursor2.len); aws_byte_buf_clean_up(&buffer); return 0; } AWS_TEST_CASE(test_byte_buf_append_and_update_success, s_test_byte_buf_append_and_update_success) static int s_test_isalnum(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; ASSERT_TRUE(aws_isalnum('0')); ASSERT_TRUE(aws_isalnum('a')); ASSERT_TRUE(aws_isalnum('A')); ASSERT_FALSE(aws_isalnum(' ')); ASSERT_FALSE(aws_isalnum('\0')); size_t count = 0; for (size_t i = 0; i <= UINT8_MAX; ++i) { if (aws_isalnum((uint8_t)i)) { count++; } } ASSERT_UINT_EQUALS(62, count); /* should not be affected by C locale */ setlocale(LC_CTYPE, "de_DE.iso88591"); ASSERT_FALSE(aws_isalnum((uint8_t)'\xdf')); /* German letter ß in ISO-8859-1 */ return 0; } AWS_TEST_CASE(test_isalnum, s_test_isalnum) static int s_test_isalpha(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; ASSERT_TRUE(aws_isalpha('a')); ASSERT_TRUE(aws_isalpha('A')); ASSERT_FALSE(aws_isalpha('0')); ASSERT_FALSE(aws_isalpha('\0')); ASSERT_FALSE(aws_isalpha(' ')); size_t count = 0; for (size_t i = 0; i <= UINT8_MAX; ++i) { if (aws_isalpha((uint8_t)i)) { count++; } } ASSERT_UINT_EQUALS(52, count); /* should not be affected by C locale */ setlocale(LC_CTYPE, "de_DE.iso88591"); ASSERT_FALSE(aws_isalpha((uint8_t)'\xdf')); /* German letter ß in ISO-8859-1 */ return 0; } AWS_TEST_CASE(test_isalpha, s_test_isalpha) static int s_test_isdigit(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; ASSERT_TRUE(aws_isdigit('0')); ASSERT_TRUE(aws_isdigit('9')); ASSERT_FALSE(aws_isdigit('a')); ASSERT_FALSE(aws_isdigit('A')); ASSERT_FALSE(aws_isdigit('\0')); ASSERT_FALSE(aws_isdigit(' ')); size_t count = 0; for (size_t i = 0; i <= UINT8_MAX; ++i) { if (aws_isdigit((uint8_t)i)) { count++; } } ASSERT_UINT_EQUALS(10, count); return 0; } AWS_TEST_CASE(test_isdigit, s_test_isdigit) static int s_test_isxdigit(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; ASSERT_TRUE(aws_isxdigit('0')); ASSERT_TRUE(aws_isxdigit('9')); ASSERT_TRUE(aws_isxdigit('a')); ASSERT_TRUE(aws_isxdigit('A')); ASSERT_TRUE(aws_isxdigit('f')); ASSERT_TRUE(aws_isxdigit('F')); ASSERT_FALSE(aws_isxdigit('g')); ASSERT_FALSE(aws_isxdigit('G')); ASSERT_FALSE(aws_isxdigit('\0')); ASSERT_FALSE(aws_isxdigit(' ')); size_t count = 0; for (size_t i = 0; i <= UINT8_MAX; ++i) { if (aws_isxdigit((uint8_t)i)) { count++; } } ASSERT_UINT_EQUALS(22, count); return 0; } AWS_TEST_CASE(test_isxdigit, s_test_isxdigit) static int s_test_isspace(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; ASSERT_TRUE(aws_isspace(' ')); ASSERT_TRUE(aws_isspace('\t')); ASSERT_TRUE(aws_isspace('\n')); ASSERT_TRUE(aws_isspace('\v')); ASSERT_TRUE(aws_isspace('\f')); ASSERT_TRUE(aws_isspace('\r')); ASSERT_FALSE(aws_isspace('\0')); ASSERT_FALSE(aws_isspace('a')); ASSERT_FALSE(aws_isspace(0xA0)); /* NBSP in some code-pages */ size_t count = 0; for (size_t i = 0; i <= UINT8_MAX; ++i) { if (aws_isspace((uint8_t)i)) { count++; } } ASSERT_UINT_EQUALS(6, count); return 0; } AWS_TEST_CASE(test_isspace, s_test_isspace) AWS_TEST_CASE(test_byte_cursor_utf8_parse_u64, s_byte_cursor_utf8_parse_u64); static int s_byte_cursor_utf8_parse_u64(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; uint64_t val; /* sanity check */ ASSERT_SUCCESS(aws_byte_cursor_utf8_parse_u64(aws_byte_cursor_from_c_str("0"), &val)); ASSERT_UINT_EQUALS(0, val); /* every acceptable character */ ASSERT_SUCCESS(aws_byte_cursor_utf8_parse_u64(aws_byte_cursor_from_c_str("9876543210"), &val)); ASSERT_UINT_EQUALS(9876543210, val); /* max value */ ASSERT_SUCCESS(aws_byte_cursor_utf8_parse_u64(aws_byte_cursor_from_c_str("18446744073709551615"), &val)); ASSERT_UINT_EQUALS(UINT64_MAX, val); /* leading zeros should have no effect */ ASSERT_SUCCESS( aws_byte_cursor_utf8_parse_u64(aws_byte_cursor_from_c_str("00000000000018446744073709551615"), &val)); ASSERT_UINT_EQUALS(UINT64_MAX, val); /* one bigger than max */ ASSERT_ERROR( AWS_ERROR_OVERFLOW_DETECTED, aws_byte_cursor_utf8_parse_u64(aws_byte_cursor_from_c_str("18446744073709551616"), &val)); /* overflow on base multiply */ ASSERT_ERROR( AWS_ERROR_OVERFLOW_DETECTED, aws_byte_cursor_utf8_parse_u64(aws_byte_cursor_from_c_str("184467440737095516150"), &val)); /* whitespace is not ok */ ASSERT_ERROR(AWS_ERROR_INVALID_ARGUMENT, aws_byte_cursor_utf8_parse_u64(aws_byte_cursor_from_c_str(" 0"), &val)); ASSERT_ERROR(AWS_ERROR_INVALID_ARGUMENT, aws_byte_cursor_utf8_parse_u64(aws_byte_cursor_from_c_str("0 "), &val)); ASSERT_ERROR(AWS_ERROR_INVALID_ARGUMENT, aws_byte_cursor_utf8_parse_u64(aws_byte_cursor_from_c_str("0 0"), &val)); /* blank strings are not ok */ ASSERT_ERROR(AWS_ERROR_INVALID_ARGUMENT, aws_byte_cursor_utf8_parse_u64(aws_byte_cursor_from_c_str(""), &val)); /* hex is not ok */ ASSERT_ERROR(AWS_ERROR_INVALID_ARGUMENT, aws_byte_cursor_utf8_parse_u64(aws_byte_cursor_from_c_str("0x0"), &val)); ASSERT_ERROR(AWS_ERROR_INVALID_ARGUMENT, aws_byte_cursor_utf8_parse_u64(aws_byte_cursor_from_c_str("FF"), &val)); return 0; } AWS_TEST_CASE(test_byte_cursor_utf8_parse_u64_hex, s_byte_cursor_parse_uint64_hex); static int s_byte_cursor_parse_uint64_hex(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; uint64_t val; /* sanity check */ ASSERT_SUCCESS(aws_byte_cursor_utf8_parse_u64_hex(aws_byte_cursor_from_c_str("0"), &val)); ASSERT_UINT_EQUALS(0x0, val); /* every possible character */ ASSERT_SUCCESS(aws_byte_cursor_utf8_parse_u64_hex(aws_byte_cursor_from_c_str("9876543210"), &val)); ASSERT_UINT_EQUALS(0x9876543210, val); ASSERT_SUCCESS(aws_byte_cursor_utf8_parse_u64_hex(aws_byte_cursor_from_c_str("ABCDEFabcdef"), &val)); ASSERT_UINT_EQUALS(0xABCDEFabcdefULL, val); /* max value */ ASSERT_SUCCESS(aws_byte_cursor_utf8_parse_u64_hex(aws_byte_cursor_from_c_str("ffffffffffffffff"), &val)); ASSERT_UINT_EQUALS(UINT64_MAX, val); /* ignore leading zeroes */ ASSERT_SUCCESS( aws_byte_cursor_utf8_parse_u64_hex(aws_byte_cursor_from_c_str("0000000000000000ffffffffffffffff"), &val)); ASSERT_UINT_EQUALS(UINT64_MAX, val); /* overflow */ ASSERT_ERROR( AWS_ERROR_OVERFLOW_DETECTED, aws_byte_cursor_utf8_parse_u64_hex(aws_byte_cursor_from_c_str("10000000000000000"), &val)); /* overflow - regression test */ ASSERT_ERROR( AWS_ERROR_OVERFLOW_DETECTED, aws_byte_cursor_utf8_parse_u64_hex(aws_byte_cursor_from_c_str("fffffffffffffffff"), &val)); /* invalid character */ ASSERT_ERROR(AWS_ERROR_INVALID_ARGUMENT, aws_byte_cursor_utf8_parse_u64_hex(aws_byte_cursor_from_c_str("g"), &val)); return 0; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/byte_cursor_find_test.c000066400000000000000000000053021456575232400264250ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include static int s_test_byte_cursor_find_str_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; const char *string_with_match = "This is a string and we want to find a substring of it."; const char *to_find = "and we want"; struct aws_byte_cursor string_with_match_cur = aws_byte_cursor_from_c_str(string_with_match); struct aws_byte_cursor to_find_cur = aws_byte_cursor_from_c_str(to_find); struct aws_byte_cursor find_res; AWS_ZERO_STRUCT(find_res); ASSERT_SUCCESS(aws_byte_cursor_find_exact(&string_with_match_cur, &to_find_cur, &find_res)); ASSERT_BIN_ARRAYS_EQUALS(to_find_cur.ptr, to_find_cur.len, find_res.ptr, to_find_cur.len); ASSERT_UINT_EQUALS(string_with_match_cur.len - (find_res.ptr - string_with_match_cur.ptr), find_res.len); ASSERT_PTR_EQUALS(string_with_match_cur.ptr + (find_res.ptr - string_with_match_cur.ptr), find_res.ptr); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_byte_cursor_find_str, s_test_byte_cursor_find_str_fn) static int s_test_byte_cursor_find_str_not_found_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; const char *string_with_match = "This is a string and we want to find a substring of it."; const char *to_find = "and we went"; struct aws_byte_cursor string_with_match_cur = aws_byte_cursor_from_c_str(string_with_match); struct aws_byte_cursor to_find_cur = aws_byte_cursor_from_c_str(to_find); struct aws_byte_cursor find_res; AWS_ZERO_STRUCT(find_res); ASSERT_ERROR( AWS_ERROR_STRING_MATCH_NOT_FOUND, aws_byte_cursor_find_exact(&string_with_match_cur, &to_find_cur, &find_res)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_byte_cursor_find_str_not_found, s_test_byte_cursor_find_str_not_found_fn) static int s_test_byte_cursor_find_str_longer_than_input_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; const char *string_with_match = "This "; const char *to_find = "and we want"; struct aws_byte_cursor string_with_match_cur = aws_byte_cursor_from_c_str(string_with_match); struct aws_byte_cursor to_find_cur = aws_byte_cursor_from_c_str(to_find); struct aws_byte_cursor find_res; AWS_ZERO_STRUCT(find_res); ASSERT_ERROR( AWS_ERROR_STRING_MATCH_NOT_FOUND, aws_byte_cursor_find_exact(&string_with_match_cur, &to_find_cur, &find_res)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_byte_cursor_find_str_longer_than_input, s_test_byte_cursor_find_str_longer_than_input_fn) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/byte_order_test.c000066400000000000000000000041151456575232400252240ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #ifdef _MSC_VER # pragma warning(disable : 4324) /* structure was padded due to alignment specifier */ #endif static int s_byte_swap_test_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; uint64_t ans_x = 0x1122334455667788ULL; uint32_t ans_y = 0xaabbccdd; uint16_t ans_w = 0xeeff; uint8_t x[] = {0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88}; uint8_t y[] = {0xaa, 0xbb, 0xcc, 0xdd}; uint8_t w[] = {0xee, 0xff}; uint64_t x64; uint32_t y32; uint16_t w16; memcpy(&x64, x, sizeof(x)); memcpy(&y32, y, sizeof(y)); memcpy(&w16, w, sizeof(w)); ASSERT_UINT_EQUALS(aws_ntoh64(x64), ans_x); ASSERT_UINT_EQUALS(aws_hton64(x64), ans_x); ASSERT_UINT_EQUALS(aws_ntoh32(y32), ans_y); ASSERT_UINT_EQUALS(aws_hton32(y32), ans_y); ASSERT_UINT_EQUALS(aws_ntoh16(w16), ans_w); ASSERT_UINT_EQUALS(aws_hton16(w16), ans_w); return 0; } AWS_TEST_CASE(byte_swap_test, s_byte_swap_test_fn); AWS_ALIGNED_TYPEDEF(uint8_t, aligned32_storage[64], 32); struct padding32_disaster { uint8_t dumb; aligned32_storage b; }; static int s_alignment32_test_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; size_t spacing = offsetof(struct padding32_disaster, b) - offsetof(struct padding32_disaster, dumb); ASSERT_UINT_EQUALS(0, spacing % 32); return 0; } AWS_TEST_CASE(alignment32_test, s_alignment32_test_fn) AWS_ALIGNED_TYPEDEF(uint8_t, aligned16_storage[64], 16); struct padding16_disaster { uint8_t dumb; aligned16_storage b; }; static int s_alignment16_test_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; size_t spacing = offsetof(struct padding32_disaster, b) - offsetof(struct padding32_disaster, dumb); ASSERT_UINT_EQUALS(0, spacing % 16); return 0; } AWS_TEST_CASE(alignment16_test, s_alignment16_test_fn) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/cache_test.c000066400000000000000000000312031456575232400241270ustar00rootroot00000000000000/* * Copyright 2010-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include #include #include #include static int s_test_lru_cache_overflow_static_members_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_cache *lru_cache = aws_cache_new_lru(allocator, aws_hash_c_string, aws_hash_callback_c_str_eq, NULL, NULL, 3); ASSERT_NOT_NULL(lru_cache); const char *first_key = "first"; const char *second_key = "second"; const char *third_key = "third"; const char *fourth_key = "fourth"; int first = 1; int second = 2; int third = 3; int fourth = 4; ASSERT_SUCCESS(aws_cache_put(lru_cache, first_key, &first)); ASSERT_SUCCESS(aws_cache_put(lru_cache, second_key, &second)); ASSERT_SUCCESS(aws_cache_put(lru_cache, third_key, &third)); ASSERT_INT_EQUALS(3, aws_cache_get_element_count(lru_cache)); int *value = NULL; ASSERT_SUCCESS(aws_cache_find(lru_cache, first_key, (void **)&value)); ASSERT_NOT_NULL(value); ASSERT_INT_EQUALS(first, *value); ASSERT_SUCCESS(aws_cache_find(lru_cache, second_key, (void **)&value)); ASSERT_NOT_NULL(value); ASSERT_INT_EQUALS(second, *value); ASSERT_SUCCESS(aws_cache_find(lru_cache, third_key, (void **)&value)); ASSERT_NOT_NULL(value); ASSERT_INT_EQUALS(third, *value); ASSERT_SUCCESS(aws_cache_put(lru_cache, fourth_key, (void **)&fourth)); /* make sure the oldest entry was purged. Note, value should now be NULL but * the call succeeds. */ ASSERT_SUCCESS(aws_cache_find(lru_cache, first_key, (void **)&value)); ASSERT_NULL(value); ASSERT_SUCCESS(aws_cache_find(lru_cache, second_key, (void **)&value)); ASSERT_NOT_NULL(value); ASSERT_INT_EQUALS(second, *value); ASSERT_SUCCESS(aws_cache_find(lru_cache, third_key, (void **)&value)); ASSERT_NOT_NULL(value); ASSERT_INT_EQUALS(third, *value); ASSERT_SUCCESS(aws_cache_find(lru_cache, fourth_key, (void **)&value)); ASSERT_NOT_NULL(value); ASSERT_INT_EQUALS(fourth, *value); aws_cache_destroy(lru_cache); return 0; } AWS_TEST_CASE(test_lru_cache_overflow_static_members, s_test_lru_cache_overflow_static_members_fn) static int s_test_lru_cache_lru_ness_static_members_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_cache *lru_cache = aws_cache_new_lru(allocator, aws_hash_c_string, aws_hash_callback_c_str_eq, NULL, NULL, 3); ASSERT_NOT_NULL(lru_cache); const char *first_key = "first"; const char *second_key = "second"; const char *third_key = "third"; const char *fourth_key = "fourth"; int first = 1; int second = 2; int third = 3; int fourth = 4; ASSERT_SUCCESS(aws_cache_put(lru_cache, first_key, &first)); ASSERT_SUCCESS(aws_cache_put(lru_cache, second_key, &second)); ASSERT_SUCCESS(aws_cache_put(lru_cache, third_key, &third)); ASSERT_INT_EQUALS(3, aws_cache_get_element_count(lru_cache)); int *value = NULL; ASSERT_SUCCESS(aws_cache_find(lru_cache, first_key, (void **)&value)); ASSERT_NOT_NULL(value); ASSERT_INT_EQUALS(first, *value); ASSERT_SUCCESS(aws_cache_find(lru_cache, second_key, (void **)&value)); ASSERT_NOT_NULL(value); ASSERT_INT_EQUALS(second, *value); ASSERT_SUCCESS(aws_cache_put(lru_cache, fourth_key, (void **)&fourth)); /* The third element is the LRU element (see above). Note, value should now * be NULL but the call succeeds. */ ASSERT_SUCCESS(aws_cache_find(lru_cache, third_key, (void **)&value)); ASSERT_NULL(value); ASSERT_SUCCESS(aws_cache_find(lru_cache, first_key, (void **)&value)); ASSERT_NOT_NULL(value); ASSERT_INT_EQUALS(first, *value); ASSERT_SUCCESS(aws_cache_find(lru_cache, second_key, (void **)&value)); ASSERT_NOT_NULL(value); ASSERT_INT_EQUALS(second, *value); ASSERT_SUCCESS(aws_cache_find(lru_cache, fourth_key, (void **)&value)); ASSERT_NOT_NULL(value); ASSERT_INT_EQUALS(fourth, *value); aws_cache_destroy(lru_cache); return 0; } AWS_TEST_CASE(test_lru_cache_lru_ness_static_members, s_test_lru_cache_lru_ness_static_members_fn) static int s_test_lru_cache_element_access_members_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_cache *lru_cache = aws_cache_new_lru(allocator, aws_hash_c_string, aws_hash_callback_c_str_eq, NULL, NULL, 3); ASSERT_NOT_NULL(lru_cache); int *value = NULL; ASSERT_NULL(aws_lru_cache_use_lru_element(lru_cache)); ASSERT_NULL(aws_lru_cache_get_mru_element(lru_cache)); const char *first_key = "first"; const char *second_key = "second"; const char *third_key = "third"; int first = 1; int second = 2; int third = 3; ASSERT_SUCCESS(aws_cache_put(lru_cache, first_key, &first)); ASSERT_SUCCESS(aws_cache_put(lru_cache, second_key, &second)); ASSERT_SUCCESS(aws_cache_put(lru_cache, third_key, &third)); ASSERT_INT_EQUALS(3, aws_cache_get_element_count(lru_cache)); value = aws_lru_cache_get_mru_element(lru_cache); ASSERT_NOT_NULL(value); ASSERT_INT_EQUALS(third, *value); value = aws_lru_cache_use_lru_element(lru_cache); ASSERT_NOT_NULL(value); ASSERT_INT_EQUALS(first, *value); value = aws_lru_cache_get_mru_element(lru_cache); ASSERT_NOT_NULL(value); ASSERT_INT_EQUALS(first, *value); aws_cache_destroy(lru_cache); return 0; } AWS_TEST_CASE(test_lru_cache_element_access_members, s_test_lru_cache_element_access_members_fn) static int s_test_fifo_cache_overflow_static_members_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_cache *fifo_cache = aws_cache_new_fifo(allocator, aws_hash_c_string, aws_hash_callback_c_str_eq, NULL, NULL, 3); ASSERT_NOT_NULL(fifo_cache); const char *first_key = "first"; const char *second_key = "second"; const char *third_key = "third"; const char *fourth_key = "fourth"; int first = 1; int second = 2; int third = 3; int fourth = 4; ASSERT_SUCCESS(aws_cache_put(fifo_cache, first_key, &first)); ASSERT_SUCCESS(aws_cache_put(fifo_cache, second_key, &second)); ASSERT_SUCCESS(aws_cache_put(fifo_cache, third_key, &third)); ASSERT_INT_EQUALS(3, aws_cache_get_element_count(fifo_cache)); int *value = NULL; ASSERT_SUCCESS(aws_cache_find(fifo_cache, third_key, (void **)&value)); ASSERT_NOT_NULL(value); ASSERT_INT_EQUALS(third, *value); ASSERT_SUCCESS(aws_cache_find(fifo_cache, second_key, (void **)&value)); ASSERT_NOT_NULL(value); ASSERT_INT_EQUALS(second, *value); ASSERT_SUCCESS(aws_cache_find(fifo_cache, first_key, (void **)&value)); ASSERT_NOT_NULL(value); ASSERT_INT_EQUALS(first, *value); ASSERT_SUCCESS(aws_cache_put(fifo_cache, fourth_key, (void **)&fourth)); /* make sure the oldest entry was purged. Note, value should now be NULL but * the call succeeds. */ ASSERT_SUCCESS(aws_cache_find(fifo_cache, first_key, (void **)&value)); ASSERT_NULL(value); ASSERT_SUCCESS(aws_cache_find(fifo_cache, second_key, (void **)&value)); ASSERT_NOT_NULL(value); ASSERT_INT_EQUALS(second, *value); ASSERT_SUCCESS(aws_cache_find(fifo_cache, third_key, (void **)&value)); ASSERT_NOT_NULL(value); ASSERT_INT_EQUALS(third, *value); ASSERT_SUCCESS(aws_cache_find(fifo_cache, fourth_key, (void **)&value)); ASSERT_NOT_NULL(value); ASSERT_INT_EQUALS(fourth, *value); aws_cache_destroy(fifo_cache); return 0; } AWS_TEST_CASE(test_fifo_cache_overflow_static_members, s_test_fifo_cache_overflow_static_members_fn) static int s_test_lifo_cache_overflow_static_members_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_cache *lifo_cache = aws_cache_new_lifo(allocator, aws_hash_c_string, aws_hash_callback_c_str_eq, NULL, NULL, 3); ASSERT_NOT_NULL(lifo_cache); const char *first_key = "first"; const char *second_key = "second"; const char *third_key = "third"; const char *fourth_key = "fourth"; int first = 1; int second = 2; int third = 3; int fourth = 4; ASSERT_SUCCESS(aws_cache_put(lifo_cache, first_key, &first)); ASSERT_SUCCESS(aws_cache_put(lifo_cache, second_key, &second)); ASSERT_SUCCESS(aws_cache_put(lifo_cache, third_key, &third)); ASSERT_INT_EQUALS(3, aws_cache_get_element_count(lifo_cache)); int *value = NULL; ASSERT_SUCCESS(aws_cache_find(lifo_cache, third_key, (void **)&value)); ASSERT_NOT_NULL(value); ASSERT_INT_EQUALS(third, *value); ASSERT_SUCCESS(aws_cache_find(lifo_cache, second_key, (void **)&value)); ASSERT_NOT_NULL(value); ASSERT_INT_EQUALS(second, *value); ASSERT_SUCCESS(aws_cache_find(lifo_cache, first_key, (void **)&value)); ASSERT_NOT_NULL(value); ASSERT_INT_EQUALS(first, *value); ASSERT_SUCCESS(aws_cache_put(lifo_cache, fourth_key, (void **)&fourth)); /* make sure the latest entry was purged. Note, value should now be NULL but * the call succeeds. */ ASSERT_SUCCESS(aws_cache_find(lifo_cache, third_key, (void **)&value)); ASSERT_NULL(value); ASSERT_SUCCESS(aws_cache_find(lifo_cache, first_key, (void **)&value)); ASSERT_NOT_NULL(value); ASSERT_INT_EQUALS(first, *value); ASSERT_SUCCESS(aws_cache_find(lifo_cache, second_key, (void **)&value)); ASSERT_NOT_NULL(value); ASSERT_INT_EQUALS(second, *value); ASSERT_SUCCESS(aws_cache_find(lifo_cache, fourth_key, (void **)&value)); ASSERT_NOT_NULL(value); ASSERT_INT_EQUALS(fourth, *value); aws_cache_destroy(lifo_cache); return 0; } AWS_TEST_CASE(test_lifo_cache_overflow_static_members, s_test_lifo_cache_overflow_static_members_fn) struct cache_test_value_element { bool value_removed; }; static void s_cache_element_value_destroy(void *value) { struct cache_test_value_element *value_element = value; value_element->value_removed = true; } static int s_test_cache_entries_cleanup_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; /* take fifo cache as example, others are the same as it */ struct aws_cache *cache = aws_cache_new_fifo( allocator, aws_hash_c_string, aws_hash_callback_c_str_eq, NULL, s_cache_element_value_destroy, 3); ASSERT_NOT_NULL(cache); const char *first_key = "first"; const char *second_key = "second"; struct cache_test_value_element first = {.value_removed = false}; struct cache_test_value_element second = {.value_removed = false}; ASSERT_SUCCESS(aws_cache_put(cache, first_key, &first)); ASSERT_SUCCESS(aws_cache_put(cache, second_key, &second)); ASSERT_INT_EQUALS(2, aws_cache_get_element_count(cache)); ASSERT_SUCCESS(aws_cache_remove(cache, second_key)); ASSERT_TRUE(second.value_removed); ASSERT_INT_EQUALS(1, aws_cache_get_element_count(cache)); aws_cache_clear(cache); ASSERT_INT_EQUALS(0, aws_cache_get_element_count(cache)); ASSERT_TRUE(first.value_removed); aws_cache_destroy(cache); return 0; } AWS_TEST_CASE(test_cache_entries_cleanup, s_test_cache_entries_cleanup_fn) static int s_test_cache_entries_overwrite_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; /* take fifo cache as example, others are the same as it */ struct aws_cache *cache = aws_cache_new_fifo( allocator, aws_hash_c_string, aws_hash_callback_c_str_eq, NULL, s_cache_element_value_destroy, 3); ASSERT_NOT_NULL(cache); const char *first_key = "first"; struct cache_test_value_element first = {.value_removed = false}; struct cache_test_value_element second = {.value_removed = false}; ASSERT_SUCCESS(aws_cache_put(cache, first_key, &first)); ASSERT_SUCCESS(aws_cache_put(cache, first_key, &second)); ASSERT_INT_EQUALS(1, aws_cache_get_element_count(cache)); ASSERT_TRUE(first.value_removed); ASSERT_FALSE(second.value_removed); struct cache_test_value_element *value = NULL; ASSERT_SUCCESS(aws_cache_find(cache, first_key, (void **)&value)); ASSERT_NOT_NULL(value); ASSERT_PTR_EQUALS(&second, value); aws_cache_destroy(cache); return 0; } AWS_TEST_CASE(test_cache_entries_overwrite, s_test_cache_entries_overwrite_fn) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/calloc_test.c000066400000000000000000000052131456575232400243230ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include static void *s_calloc_stub(struct aws_allocator *allocator, size_t num, size_t size) { allocator->impl = (void *)(num * size); return calloc(num, size); } static void s_mem_release_stub(struct aws_allocator *allocator, void *ptr) { allocator->impl = 0; free(ptr); } static int s_test_calloc_on_given_allocator(struct aws_allocator *allocator, bool using_calloc_stub_impl) { /* Check that calloc gives 0ed memory */ char *p = aws_mem_calloc(allocator, 2, 4); ASSERT_NOT_NULL(p); for (size_t i = 0; i < 2 * 4; ++i) { ASSERT_TRUE(p[i] == 0); } if (using_calloc_stub_impl) { ASSERT_TRUE((intptr_t)allocator->impl == 8); } aws_mem_release(allocator, p); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_calloc_override, s_test_calloc_override_fn) static int s_test_calloc_override_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_allocator my_alloc = { .mem_calloc = s_calloc_stub, .mem_release = s_mem_release_stub, }; return s_test_calloc_on_given_allocator(&my_alloc, true); } AWS_TEST_CASE(test_calloc_fallback_from_default_allocator, s_test_calloc_fallback_from_default_allocator_fn) static int s_test_calloc_fallback_from_default_allocator_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_allocator my_alloc = *aws_default_allocator(); my_alloc.mem_calloc = NULL; return s_test_calloc_on_given_allocator(&my_alloc, false); } AWS_TEST_CASE(test_calloc_fallback_from_given, s_test_calloc_fallback_from_given_fn) static int s_test_calloc_fallback_from_given_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_allocator my_alloc = *allocator; my_alloc.mem_calloc = NULL; return s_test_calloc_on_given_allocator(&my_alloc, false); } AWS_TEST_CASE(test_calloc_from_default_allocator, s_test_calloc_from_default_allocator_fn) static int s_test_calloc_from_default_allocator_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; return s_test_calloc_on_given_allocator(aws_default_allocator(), false); } AWS_TEST_CASE(test_calloc_from_given_allocator, s_test_calloc_from_given_allocator_fn) static int s_test_calloc_from_given_allocator_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_test_calloc_on_given_allocator(allocator, false); } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/clock_test.c000066400000000000000000000212351456575232400241630ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include static int s_test_high_res_clock_increments(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; uint64_t ticks = 0, prev = 0; for (unsigned i = 0; i < 100; ++i) { ASSERT_SUCCESS( aws_high_res_clock_get_ticks(&ticks), "High res get ticks failed with error %d", aws_last_error()); ASSERT_TRUE( ticks >= prev, "Next get ticks should have been greater than or equal to previous. previous %llu current %llu", (long long unsigned int)prev, (long long unsigned int)ticks); aws_thread_current_sleep(1000000); prev = ticks; } return 0; } static int s_test_sys_clock_increments(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; uint64_t ticks = 0, prev = 0; for (unsigned i = 0; i < 100; ++i) { ASSERT_SUCCESS( aws_sys_clock_get_ticks(&ticks), "Sys clock res get ticks failed with error %d", aws_last_error()); ASSERT_TRUE( ticks >= prev, "Next get ticks should have been greater than or equal to previous. previous %llu current %llu", (long long unsigned int)prev, (long long unsigned int)ticks); aws_thread_current_sleep(1000000); prev = ticks; } return 0; } static int s_test_sec_and_millis_conversion(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; uint64_t secs = 10; ASSERT_UINT_EQUALS(10000, aws_timestamp_convert(secs, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_MILLIS, NULL)); ASSERT_UINT_EQUALS(secs, aws_timestamp_convert(10000, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_SECS, NULL)); return 0; } static int s_test_sec_and_micros_conversion(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; uint64_t secs = 10; ASSERT_UINT_EQUALS(10000000, aws_timestamp_convert(secs, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_MICROS, NULL)); ASSERT_UINT_EQUALS(secs, aws_timestamp_convert(10000000, AWS_TIMESTAMP_MICROS, AWS_TIMESTAMP_SECS, NULL)); return 0; } static int s_test_sec_and_nanos_conversion(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; uint64_t secs = 10; ASSERT_UINT_EQUALS(10000000000, aws_timestamp_convert(secs, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL)); ASSERT_UINT_EQUALS(secs, aws_timestamp_convert(10000000000, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_SECS, NULL)); return 0; } static int s_test_milli_and_micros_conversion(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; uint64_t ms = 10; ASSERT_UINT_EQUALS(10000, aws_timestamp_convert(ms, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_MICROS, NULL)); ASSERT_UINT_EQUALS(ms, aws_timestamp_convert(10000, AWS_TIMESTAMP_MICROS, AWS_TIMESTAMP_MILLIS, NULL)); return 0; } static int s_test_milli_and_nanos_conversion(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; uint64_t ms = 10; ASSERT_UINT_EQUALS(10000000, aws_timestamp_convert(ms, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL)); ASSERT_UINT_EQUALS(ms, aws_timestamp_convert(10000000, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_MILLIS, NULL)); return 0; } static int s_test_micro_and_nanos_conversion(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; uint64_t micros = 10; ASSERT_UINT_EQUALS(10000, aws_timestamp_convert(micros, AWS_TIMESTAMP_MICROS, AWS_TIMESTAMP_NANOS, NULL)); ASSERT_UINT_EQUALS(micros, aws_timestamp_convert(10000, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_MICROS, NULL)); return 0; } static int s_test_precision_loss_remainders_conversion(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; uint64_t nanos = 10123456789; uint64_t remainder = 0; ASSERT_UINT_EQUALS(10, aws_timestamp_convert(nanos, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_SECS, &remainder)); ASSERT_UINT_EQUALS(123456789, remainder); return 0; } static int s_test_overflow_conversion(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; ASSERT_UINT_EQUALS( UINT64_MAX, aws_timestamp_convert(100000000000ULL, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL)); ASSERT_UINT_EQUALS( UINT64_MAX, aws_timestamp_convert(100000000000000ULL, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_MICROS, NULL)); ASSERT_UINT_EQUALS( UINT64_MAX, aws_timestamp_convert(100000000000000ULL, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL)); ASSERT_UINT_EQUALS( UINT64_MAX, aws_timestamp_convert(100000000000000000ULL, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_MILLIS, NULL)); ASSERT_UINT_EQUALS( UINT64_MAX, aws_timestamp_convert(100000000000000000ULL, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_MICROS, NULL)); ASSERT_UINT_EQUALS( UINT64_MAX, aws_timestamp_convert(100000000000000000ULL, AWS_TIMESTAMP_MICROS, AWS_TIMESTAMP_NANOS, NULL)); return 0; } AWS_TEST_CASE(high_res_clock_increments_test, s_test_high_res_clock_increments) AWS_TEST_CASE(sys_clock_increments_test, s_test_sys_clock_increments) AWS_TEST_CASE(test_sec_and_millis_conversions, s_test_sec_and_millis_conversion) AWS_TEST_CASE(test_sec_and_micros_conversions, s_test_sec_and_micros_conversion) AWS_TEST_CASE(test_sec_and_nanos_conversions, s_test_sec_and_nanos_conversion) AWS_TEST_CASE(test_milli_and_micros_conversion, s_test_milli_and_micros_conversion) AWS_TEST_CASE(test_milli_and_nanos_conversion, s_test_milli_and_nanos_conversion) AWS_TEST_CASE(test_micro_and_nanos_conversion, s_test_micro_and_nanos_conversion) AWS_TEST_CASE(test_precision_loss_remainders_conversion, s_test_precision_loss_remainders_conversion) AWS_TEST_CASE(test_overflow_conversion, s_test_overflow_conversion) #define ONE_MHZ 1000000ULL #define THREE_MHZ 3000000ULL #define TEN_MHZ 10000000ULL #define SIXTEEN_MHZ 16000000ULL #define ONE_GHZ 1000000000ULL #define TWO_GHZ 2000000000ULL #define THIRTY_DAYS_IN_SECONDS (30ULL * 24 * 3600) #define SIXTY_DAYS_IN_SECONDS (60ULL * 24 * 3600) #define FIVE_YEARS_IN_SECONDS (5ULL * 365 * 24 * 3600) /* * A test for a variety of edge cases that would unnecessarily overflow the old conversion logic. */ static int s_test_old_overflow_cases(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; uint64_t timestamp = 0; /* * https://github.com/awslabs/aws-c-common/issues/790 */ timestamp = aws_timestamp_convert_u64(18446744073710ULL, ONE_MHZ, AWS_TIMESTAMP_NANOS, NULL); ASSERT_UINT_EQUALS(timestamp, 18446744073710000ULL); /* * 30 days of ticks at 3Mhz to nanos: https://github.com/awslabs/aws-c-common/pull/791#issuecomment-821784745 */ timestamp = aws_timestamp_convert_u64(THIRTY_DAYS_IN_SECONDS * THREE_MHZ, THREE_MHZ, AWS_TIMESTAMP_NANOS, NULL); ASSERT_UINT_EQUALS(timestamp, 2592000000000000ULL); /* * Same duration, but at 16Mhz */ timestamp = aws_timestamp_convert_u64(THIRTY_DAYS_IN_SECONDS * SIXTEEN_MHZ, SIXTEEN_MHZ, AWS_TIMESTAMP_NANOS, NULL); ASSERT_UINT_EQUALS(timestamp, 2592000000000000ULL); /* * 60 days at 1ghz (could be shortcutted internally since frequencies are equal) */ timestamp = aws_timestamp_convert_u64(SIXTY_DAYS_IN_SECONDS * ONE_GHZ, ONE_GHZ, AWS_TIMESTAMP_NANOS, NULL); ASSERT_UINT_EQUALS(timestamp, 5184000000000000ULL); /* * 60 days at 2ghz */ timestamp = aws_timestamp_convert_u64(SIXTY_DAYS_IN_SECONDS * TWO_GHZ, TWO_GHZ, AWS_TIMESTAMP_NANOS, NULL); ASSERT_UINT_EQUALS(timestamp, 5184000000000000ULL); /* * 60 days at 2ghz with a little bit more for some remainder */ timestamp = aws_timestamp_convert_u64(SIXTY_DAYS_IN_SECONDS * TWO_GHZ + 123, TWO_GHZ, AWS_TIMESTAMP_NANOS, NULL); ASSERT_UINT_EQUALS(timestamp, 5184000000000061ULL); /* * Five years at 10mhz + remainder */ timestamp = aws_timestamp_convert_u64(FIVE_YEARS_IN_SECONDS * TEN_MHZ + 5, TEN_MHZ, AWS_TIMESTAMP_NANOS, NULL); ASSERT_UINT_EQUALS(timestamp, 157680000000000500ULL); /* * large ns -> 1mhz */ timestamp = aws_timestamp_convert_u64(THIRTY_DAYS_IN_SECONDS * ONE_GHZ + 123456789, ONE_GHZ, ONE_MHZ, NULL); ASSERT_UINT_EQUALS(timestamp, 2592000000000ULL + 123456); /* * large ns -> 3mhz */ timestamp = aws_timestamp_convert_u64(FIVE_YEARS_IN_SECONDS * ONE_GHZ + 1001, ONE_GHZ, THREE_MHZ, NULL); ASSERT_UINT_EQUALS(timestamp, 473040000000000ULL + 3); return 0; } AWS_TEST_CASE(test_old_overflow_cases, s_test_old_overflow_cases) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/command_line_parser_test.c000066400000000000000000000205471456575232400270760ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include /* If this is tested from a dynamic library, the static state needs to be reset */ static void s_reset_static_state(void) { aws_cli_optind = 1; } static int s_test_short_argument_parse_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_cli_option options[] = { {.name = NULL, .has_arg = AWS_CLI_OPTIONS_NO_ARGUMENT, .flag = NULL, .val = 'a'}, {.name = "beeee", .has_arg = AWS_CLI_OPTIONS_REQUIRED_ARGUMENT, .flag = NULL, .val = 'b'}, {.name = NULL, .has_arg = AWS_CLI_OPTIONS_OPTIONAL_ARGUMENT, .flag = NULL, .val = 'c'}, {.name = NULL, .has_arg = 0, .flag = NULL, .val = 0}, }; char *const args[] = { "prog-name", "-a", "-b", "bval", "-c", }; int argc = 5; int longindex = 0; s_reset_static_state(); int arg = aws_cli_getopt_long(argc, args, "ab:c", options, &longindex); ASSERT_INT_EQUALS('a', arg); ASSERT_INT_EQUALS(0, longindex); ASSERT_INT_EQUALS(2, aws_cli_optind); arg = aws_cli_getopt_long(argc, args, "ab:c", options, &longindex); ASSERT_INT_EQUALS('b', arg); ASSERT_STR_EQUALS("bval", aws_cli_optarg); ASSERT_INT_EQUALS(1, longindex); ASSERT_INT_EQUALS(4, aws_cli_optind); arg = aws_cli_getopt_long(argc, args, "ab:c", options, &longindex); ASSERT_INT_EQUALS('c', arg); ASSERT_INT_EQUALS(2, longindex); ASSERT_INT_EQUALS(-1, aws_cli_getopt_long(argc, args, "ab:c", options, &longindex)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(short_argument_parse, s_test_short_argument_parse_fn) static int s_test_long_argument_parse_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_cli_option options[] = { {.name = "aaee", .has_arg = AWS_CLI_OPTIONS_NO_ARGUMENT, .flag = NULL, .val = 'a'}, {.name = "beeee", .has_arg = AWS_CLI_OPTIONS_REQUIRED_ARGUMENT, .flag = NULL, .val = 'b'}, {.name = "cceeee", .has_arg = AWS_CLI_OPTIONS_OPTIONAL_ARGUMENT, .flag = NULL, .val = 'c'}, {.name = NULL, .has_arg = 0, .flag = NULL, .val = 0}, }; char *const args[] = { "prog-name", "--aaee", "--beeee", "bval", "-cceeee", }; int argc = 5; int longindex = 0; s_reset_static_state(); int arg = aws_cli_getopt_long(argc, args, "ab:c", options, &longindex); ASSERT_INT_EQUALS('a', arg); ASSERT_INT_EQUALS(0, longindex); ASSERT_INT_EQUALS(2, aws_cli_optind); arg = aws_cli_getopt_long(argc, args, "ab:c", options, &longindex); ASSERT_INT_EQUALS('b', arg); ASSERT_STR_EQUALS("bval", aws_cli_optarg); ASSERT_INT_EQUALS(1, longindex); ASSERT_INT_EQUALS(4, aws_cli_optind); arg = aws_cli_getopt_long(argc, args, "ab:c", options, &longindex); ASSERT_INT_EQUALS('c', arg); ASSERT_INT_EQUALS(2, longindex); ASSERT_INT_EQUALS(-1, aws_cli_getopt_long(argc, args, "ab:c", options, &longindex)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(long_argument_parse, s_test_long_argument_parse_fn) static int s_test_unqualified_argument_parse_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_cli_option options[] = { {.name = "aaee", .has_arg = AWS_CLI_OPTIONS_NO_ARGUMENT, .flag = NULL, .val = 'a'}, {.name = "beeee", .has_arg = AWS_CLI_OPTIONS_REQUIRED_ARGUMENT, .flag = NULL, .val = 'b'}, {.name = "cceeee", .has_arg = AWS_CLI_OPTIONS_OPTIONAL_ARGUMENT, .flag = NULL, .val = 'c'}, {.name = NULL, .has_arg = 0, .flag = NULL, .val = 0}, }; char *const args[] = {"prog-name", "-a", "--beeee", "bval", "-c", "operand"}; int argc = 6; int longindex = 0; s_reset_static_state(); int arg = aws_cli_getopt_long(argc, args, "ab:c", options, &longindex); ASSERT_INT_EQUALS('a', arg); ASSERT_INT_EQUALS(0, longindex); ASSERT_INT_EQUALS(2, aws_cli_optind); arg = aws_cli_getopt_long(argc, args, "ab:c", options, &longindex); ASSERT_INT_EQUALS('b', arg); ASSERT_STR_EQUALS("bval", aws_cli_optarg); ASSERT_INT_EQUALS(1, longindex); ASSERT_INT_EQUALS(4, aws_cli_optind); arg = aws_cli_getopt_long(argc, args, "ab:c", options, &longindex); ASSERT_INT_EQUALS('c', arg); ASSERT_INT_EQUALS(2, longindex); ASSERT_INT_EQUALS(0x02, aws_cli_getopt_long(argc, args, "ab:c", options, &longindex)); ASSERT_TRUE(aws_cli_optind == argc); ASSERT_STR_EQUALS("operand", aws_cli_positional_arg); return AWS_OP_SUCCESS; } AWS_TEST_CASE(unqualified_argument_parse, s_test_unqualified_argument_parse_fn) static int s_test_unknown_argument_parse_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_cli_option options[] = { {.name = "aaee", .has_arg = AWS_CLI_OPTIONS_NO_ARGUMENT, .flag = NULL, .val = 'a'}, {.name = "beeee", .has_arg = AWS_CLI_OPTIONS_REQUIRED_ARGUMENT, .flag = NULL, .val = 'b'}, {.name = "cceeee", .has_arg = AWS_CLI_OPTIONS_OPTIONAL_ARGUMENT, .flag = NULL, .val = 'c'}, {.name = NULL, .has_arg = 0, .flag = NULL, .val = 0}, }; char *const args[] = {"prog-name", "-BOO!", "--beeee", "bval", "-c", "operand"}; int argc = 6; int longindex = 0; s_reset_static_state(); int arg = aws_cli_getopt_long(argc, args, "ab:c", options, &longindex); ASSERT_INT_EQUALS('?', arg); ASSERT_INT_EQUALS(0, longindex); ASSERT_INT_EQUALS(2, aws_cli_optind); arg = aws_cli_getopt_long(argc, args, "ab:c", options, &longindex); ASSERT_INT_EQUALS('b', arg); ASSERT_STR_EQUALS("bval", aws_cli_optarg); ASSERT_INT_EQUALS(1, longindex); ASSERT_INT_EQUALS(4, aws_cli_optind); arg = aws_cli_getopt_long(argc, args, "ab:c", options, &longindex); ASSERT_INT_EQUALS('c', arg); ASSERT_INT_EQUALS(2, longindex); arg = aws_cli_getopt_long(argc, args, "ab:c", options, &longindex); ASSERT_TRUE(arg == 0x02); ASSERT_TRUE(aws_cli_optind == argc); ASSERT_STR_EQUALS("operand", aws_cli_positional_arg); return AWS_OP_SUCCESS; } AWS_TEST_CASE(unknown_argument_parse, s_test_unknown_argument_parse_fn) struct subcommand_dispatch_data { const char *command_name; int argc; char *const *argv; }; static int s_subcommand_callback(int argc, char *const argv[], const char *command_name, void *user_data) { struct subcommand_dispatch_data *dispatch_data = user_data; dispatch_data->command_name = command_name; dispatch_data->argc = argc; dispatch_data->argv = argv; return AWS_OP_SUCCESS; } static int s_test_command_dispatch_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct subcommand_dispatch_data dispatch_data; AWS_ZERO_STRUCT(dispatch_data); struct aws_cli_subcommand_dispatch dispatch_table[] = { { .command_name = "command1", .subcommand_fn = s_subcommand_callback, }, { .command_name = "command2", .subcommand_fn = s_subcommand_callback, }, }; char *const args_1[] = {"prog-name", "command1", "-BOO!", "--beeee", "bval", "-c", "operand"}; ASSERT_SUCCESS(aws_cli_dispatch_on_subcommand(7, args_1, dispatch_table, 2, &dispatch_data)); ASSERT_STR_EQUALS("command1", dispatch_data.command_name); ASSERT_INT_EQUALS(6, dispatch_data.argc); ASSERT_STR_EQUALS("command1", dispatch_data.argv[0]); AWS_ZERO_STRUCT(dispatch_data); char *const args_2[] = {"prog-name", "command2", "-BOO!", "--beeee", "bval", "-c", "operand"}; ASSERT_SUCCESS(aws_cli_dispatch_on_subcommand(7, args_2, dispatch_table, 2, &dispatch_data)); ASSERT_STR_EQUALS("command2", dispatch_data.command_name); ASSERT_INT_EQUALS(6, dispatch_data.argc); ASSERT_STR_EQUALS("command2", dispatch_data.argv[0]); char *const args_3[] = {"prog-name", "command3", "-BOO!", "--beeee", "bval", "-c", "operand"}; ASSERT_ERROR(AWS_ERROR_UNIMPLEMENTED, aws_cli_dispatch_on_subcommand(7, args_3, dispatch_table, 2, &dispatch_data)); char *const args_4[] = {"prog-name"}; ASSERT_ERROR( AWS_ERROR_INVALID_ARGUMENT, aws_cli_dispatch_on_subcommand(1, args_4, dispatch_table, 2, &dispatch_data)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_command_dispatch, s_test_command_dispatch_fn) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/condition_variable_test.c000066400000000000000000000122541456575232400267240ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include struct condition_predicate_args { int call_count; }; struct conditional_test_data { struct aws_mutex mutex; struct aws_condition_variable condition_variable_1; struct aws_condition_variable condition_variable_2; struct condition_predicate_args *predicate_args; int thread_1; int thread_2; int thread_3; }; static bool s_conditional_predicate(void *arg) { struct condition_predicate_args *condition_predicate_args = (struct condition_predicate_args *)arg; condition_predicate_args->call_count++; return condition_predicate_args->call_count % 2 == 0; } static void s_conditional_thread_2_fn(void *arg) { struct conditional_test_data *test_data = (struct conditional_test_data *)arg; aws_mutex_lock(&test_data->mutex); while (!test_data->thread_1) { aws_condition_variable_wait_pred( &test_data->condition_variable_1, &test_data->mutex, s_conditional_predicate, test_data->predicate_args); } test_data->thread_2 = 1; aws_condition_variable_notify_one(&test_data->condition_variable_2); aws_mutex_unlock(&test_data->mutex); } static void s_conditional_thread_3_fn(void *arg) { struct conditional_test_data *test_data = (struct conditional_test_data *)arg; aws_mutex_lock(&test_data->mutex); while (!test_data->thread_1) { aws_condition_variable_wait_pred( &test_data->condition_variable_1, &test_data->mutex, s_conditional_predicate, test_data->predicate_args); } test_data->thread_3 = 1; aws_condition_variable_notify_one(&test_data->condition_variable_2); aws_mutex_unlock(&test_data->mutex); } static int s_test_conditional_notify_one_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct condition_predicate_args predicate_args = {.call_count = 0}; struct conditional_test_data test_data = {.condition_variable_1 = AWS_CONDITION_VARIABLE_INIT, .condition_variable_2 = AWS_CONDITION_VARIABLE_INIT, .mutex = AWS_MUTEX_INIT, .predicate_args = &predicate_args, .thread_1 = 0, .thread_2 = 0, .thread_3 = 0}; ASSERT_SUCCESS(aws_mutex_lock(&test_data.mutex)); struct aws_thread thread; ASSERT_SUCCESS(aws_thread_init(&thread, allocator)); ASSERT_SUCCESS(aws_thread_launch(&thread, s_conditional_thread_2_fn, &test_data, NULL)); test_data.thread_1 = 1; ASSERT_SUCCESS(aws_condition_variable_notify_one(&test_data.condition_variable_1)); while (!test_data.thread_2) { ASSERT_SUCCESS(aws_condition_variable_wait_pred( &test_data.condition_variable_2, &test_data.mutex, s_conditional_predicate, &predicate_args)); } ASSERT_SUCCESS(aws_mutex_unlock(&test_data.mutex)); aws_thread_join(&thread); aws_thread_clean_up(&thread); ASSERT_TRUE(predicate_args.call_count >= 2); return AWS_OP_SUCCESS; } AWS_TEST_CASE(conditional_notify_one, s_test_conditional_notify_one_fn) static int s_test_conditional_notify_all_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct condition_predicate_args predicate_args = {.call_count = 0}; struct conditional_test_data test_data = {.condition_variable_1 = AWS_CONDITION_VARIABLE_INIT, .condition_variable_2 = AWS_CONDITION_VARIABLE_INIT, .mutex = AWS_MUTEX_INIT, .predicate_args = &predicate_args, .thread_1 = 0, .thread_2 = 0, .thread_3 = 0}; ASSERT_SUCCESS(aws_mutex_lock(&test_data.mutex)); struct aws_thread thread_2; ASSERT_SUCCESS(aws_thread_init(&thread_2, allocator)); ASSERT_SUCCESS(aws_thread_launch(&thread_2, s_conditional_thread_2_fn, &test_data, NULL)); struct aws_thread thread_3; ASSERT_SUCCESS(aws_thread_init(&thread_3, allocator)); ASSERT_SUCCESS(aws_thread_launch(&thread_3, s_conditional_thread_3_fn, &test_data, NULL)); test_data.thread_1 = 1; ASSERT_SUCCESS(aws_condition_variable_notify_all(&test_data.condition_variable_1)); while (!test_data.thread_2 && !test_data.thread_3) { ASSERT_SUCCESS(aws_condition_variable_wait_pred( &test_data.condition_variable_2, &test_data.mutex, s_conditional_predicate, &predicate_args)); } ASSERT_SUCCESS(aws_mutex_unlock(&test_data.mutex)); aws_thread_join(&thread_2); aws_thread_join(&thread_3); aws_thread_clean_up(&thread_2); aws_thread_clean_up(&thread_3); ASSERT_TRUE(predicate_args.call_count >= 2); return AWS_OP_SUCCESS; } AWS_TEST_CASE(conditional_notify_all, s_test_conditional_notify_all_fn) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/cpuid_test.c000066400000000000000000000023341456575232400241730ustar00rootroot00000000000000/* * Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include #include static int s_cpuid_test_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; /* TODO: make sure those check returns the expected value. */ aws_cpu_has_feature(AWS_CPU_FEATURE_CLMUL); aws_cpu_has_feature(AWS_CPU_FEATURE_SSE_4_1); aws_cpu_has_feature(AWS_CPU_FEATURE_SSE_4_2); aws_cpu_has_feature(AWS_CPU_FEATURE_AVX2); aws_cpu_has_feature(AWS_CPU_FEATURE_AVX512); aws_cpu_has_feature(AWS_CPU_FEATURE_ARM_CRC); aws_cpu_has_feature(AWS_CPU_FEATURE_BMI2); return AWS_OP_SUCCESS; } AWS_TEST_CASE(cpuid_test, s_cpuid_test_fn); aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/cross_process_lock_tests.c000066400000000000000000000114721456575232400271540ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include static int s_test_cross_process_lock_works_in_proc(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_common_library_init(allocator); struct aws_byte_cursor lock_nonce = aws_byte_cursor_from_c_str("lock_nonce"); struct aws_cross_process_lock *instance_lock = aws_cross_process_lock_try_acquire(allocator, lock_nonce); ASSERT_NOT_NULL(instance_lock); struct aws_cross_process_lock *should_be_null = aws_cross_process_lock_try_acquire(allocator, lock_nonce); ASSERT_NULL(should_be_null); aws_cross_process_lock_release(instance_lock); struct aws_cross_process_lock *should_not_be_null = aws_cross_process_lock_try_acquire(allocator, lock_nonce); ASSERT_NOT_NULL(should_not_be_null); aws_cross_process_lock_release(should_not_be_null); aws_common_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_cross_process_lock_works_in_proc, s_test_cross_process_lock_works_in_proc) static int s_cross_process_lock_mp_test_runner(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_common_library_init(allocator); /* so the test runner doesn't actually run the portion of the test we want to test unless we're invoking it as a * subprocess. */ struct aws_string *test_run_gate = aws_string_new_from_c_str(allocator, "aws_crt_test_run_gate"); struct aws_string *output_val = NULL; if (aws_get_environment_value(allocator, test_run_gate, &output_val) == AWS_OP_SUCCESS && output_val) { aws_string_destroy(output_val); struct aws_byte_cursor lock_nonce = aws_byte_cursor_from_c_str("lock_mp_nonce"); struct aws_cross_process_lock *instance_lock = aws_cross_process_lock_try_acquire(allocator, lock_nonce); ASSERT_NOT_NULL(instance_lock); aws_cross_process_lock_release(instance_lock); } aws_string_destroy(test_run_gate); aws_common_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(cross_process_lock_mp_test_runner, s_cross_process_lock_mp_test_runner) static int s_test_cross_process_lock_works_cross_proc(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_common_library_init(allocator); struct aws_string *test_run_gate = aws_string_new_from_c_str(allocator, "aws_crt_test_run_gate"); struct aws_string *test_run_gate_val = aws_string_new_from_c_str(allocator, "ON"); /* so the test runner doesn't actually run the portion of the test we want to test unless we're invoking it from * here. */ ASSERT_SUCCESS(aws_set_environment_value(test_run_gate, test_run_gate_val)); aws_string_destroy(test_run_gate_val); aws_string_destroy(test_run_gate); /* Invoke the test runner in a new process for ease so cmake automatically does the work for us. */ struct aws_run_command_options command_options = { #ifdef _WIN32 .command = "aws-c-common-tests cross_process_lock_mp_test_runner", #else .command = "./aws-c-common-tests cross_process_lock_mp_test_runner", #endif /* _WIN32 */ }; struct aws_run_command_result result; AWS_ZERO_STRUCT(result); ASSERT_SUCCESS(aws_run_command(allocator, &command_options, &result)); ASSERT_TRUE(result.ret_code == 0); aws_run_command_result_cleanup(&result); AWS_ZERO_STRUCT(result); struct aws_byte_cursor lock_nonce = aws_byte_cursor_from_c_str("lock_mp_nonce"); struct aws_cross_process_lock *instance_lock = aws_cross_process_lock_try_acquire(allocator, lock_nonce); ASSERT_NOT_NULL(instance_lock); ASSERT_SUCCESS(aws_run_command(allocator, &command_options, &result)); ASSERT_FALSE(result.ret_code == 0); aws_run_command_result_cleanup(&result); aws_cross_process_lock_release(instance_lock); aws_common_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_cross_process_lock_works_cross_proc, s_test_cross_process_lock_works_cross_proc) static int s_test_cross_process_lock_invalid_nonce(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_common_library_init(allocator); #ifdef _WIN32 struct aws_byte_cursor lock_nonce = aws_byte_cursor_from_c_str("invalid\\lock_nonce"); #else struct aws_byte_cursor lock_nonce = aws_byte_cursor_from_c_str("invalid/lock_nonce"); #endif /* _WIN32 */ struct aws_cross_process_lock *instance_lock = aws_cross_process_lock_try_acquire(allocator, lock_nonce); ASSERT_NULL(instance_lock); ASSERT_INT_EQUALS(AWS_ERROR_INVALID_ARGUMENT, aws_last_error()); aws_common_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_cross_process_lock_invalid_nonce_fails, s_test_cross_process_lock_invalid_nonce) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/cursor_test.c000066400000000000000000000411121456575232400244010ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #ifndef SSIZE_MAX # define SSIZE_MAX (SIZE_MAX >> 1) #endif AWS_TEST_CASE(nospec_index_test, s_nospec_index_test_fn) static int s_nospec_index_test_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; ASSERT_UINT_EQUALS(UINTPTR_MAX, aws_nospec_mask(0, 1)); ASSERT_UINT_EQUALS(0, aws_nospec_mask(0, 0)); ASSERT_UINT_EQUALS(UINTPTR_MAX, aws_nospec_mask(0, SSIZE_MAX)); ASSERT_UINT_EQUALS(0, aws_nospec_mask(0, SIZE_MAX)); ASSERT_UINT_EQUALS(0, aws_nospec_mask(1, 1)); ASSERT_UINT_EQUALS(UINTPTR_MAX, aws_nospec_mask(1, 2)); ASSERT_UINT_EQUALS(UINTPTR_MAX, aws_nospec_mask(1, 4)); ASSERT_UINT_EQUALS(UINTPTR_MAX, aws_nospec_mask(1, SSIZE_MAX)); ASSERT_UINT_EQUALS(0, aws_nospec_mask(1, SIZE_MAX)); ASSERT_UINT_EQUALS(0, aws_nospec_mask(1, 0)); ASSERT_UINT_EQUALS(0, aws_nospec_mask(4, 3)); ASSERT_UINT_EQUALS(0, aws_nospec_mask(4, 4)); ASSERT_UINT_EQUALS(UINTPTR_MAX, aws_nospec_mask(4, 5)); ASSERT_UINT_EQUALS(UINTPTR_MAX, aws_nospec_mask((SIZE_MAX >> 1) - 1, (SIZE_MAX >> 1))); ASSERT_UINT_EQUALS(0, aws_nospec_mask((SIZE_MAX >> 1) + 1, (SIZE_MAX >> 1))); ASSERT_UINT_EQUALS(0, aws_nospec_mask((SIZE_MAX >> 1), (SIZE_MAX >> 1) + 1)); return 0; } #define ASSERT_NOADVANCE(advlen, cursorlen) \ do { \ struct aws_byte_cursor cursor; \ cursor.ptr = (uint8_t *)&cursor; \ cursor.len = (cursorlen); \ struct aws_byte_cursor rv = advance(&cursor, (advlen)); \ ASSERT_NULL(rv.ptr, "advance(cursorlen=%s, advlen=%s) should fail", #cursorlen, #advlen); \ ASSERT_UINT_EQUALS(0, rv.len, "advance(cursorlen=%s, advlen=%s) should fail", #cursorlen, #advlen); \ } while (0) #define ASSERT_ADVANCE(advlen, cursorlen) \ do { \ uint8_t *orig_cursor; \ struct aws_byte_cursor cursor; \ cursor.len = (cursorlen); \ cursor.ptr = orig_cursor = malloc(cursor.len); \ if (!cursor.ptr) { \ abort(); \ } \ struct aws_byte_cursor rv = advance(&cursor, (advlen)); \ ASSERT_PTR_EQUALS(orig_cursor, rv.ptr, "Wrong ptr in advance(cursorlen=%s, advlen=%s)", #cursorlen, #advlen); \ ASSERT_PTR_EQUALS(orig_cursor + (advlen), cursor.ptr, "Wrong new cursorptr in advance"); \ ASSERT_UINT_EQUALS((advlen), rv.len, "Wrong returned length"); \ ASSERT_UINT_EQUALS((cursorlen) - (advlen), cursor.len, "Wrong residual length"); \ free(orig_cursor); \ } while (0) static int s_test_byte_cursor_advance_internal( struct aws_byte_cursor (*advance)(struct aws_byte_cursor *cursor, size_t len)) { ASSERT_ADVANCE(0, 1); ASSERT_ADVANCE(1, 1); ASSERT_NOADVANCE(2, 1); ASSERT_ADVANCE(4, 5); ASSERT_ADVANCE(5, 5); ASSERT_NOADVANCE(6, 5); ASSERT_NOADVANCE((SIZE_MAX >> 1) + 1, (SIZE_MAX >> 1)); ASSERT_NOADVANCE((SIZE_MAX >> 1), (SIZE_MAX >> 1) + 1); return 0; } AWS_TEST_CASE(test_byte_cursor_advance, s_test_byte_cursor_advance_fn) static int s_test_byte_cursor_advance_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; return s_test_byte_cursor_advance_internal(aws_byte_cursor_advance); } AWS_TEST_CASE(test_byte_cursor_advance_nospec, s_test_byte_cursor_advance_nospec_fn) static int s_test_byte_cursor_advance_nospec_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; return s_test_byte_cursor_advance_internal(aws_byte_cursor_advance_nospec); } static const uint8_t TEST_VECTOR[] = { 0xaa, 0xbb, 0xaa, /* aba */ 0xbb, 0xcc, 0xbb, /* bcb */ 0x42, /* u8 */ 0x12, 0x34, /* be16 */ 0xab, 0xcd, 0xef, /* be24 */ 0x45, 0x67, 0x89, 0xab, /* be32 */ 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, /* be64 */ 0x42, 0x42, 0x42, /* u8_n */ }; AWS_TEST_CASE(byte_cursor_write_tests, s_byte_cursor_write_tests_fn); static int s_byte_cursor_write_tests_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; uint8_t buf[sizeof(TEST_VECTOR) + 1]; memset(buf, 0, sizeof(buf)); buf[sizeof(buf) - 1] = 0x99; struct aws_byte_buf cur = aws_byte_buf_from_empty_array(buf, sizeof(buf) - 1); uint8_t aba[] = {0xaa, 0xbb, 0xaa}; uint8_t bcb[] = {0xbb, 0xcc, 0xbb}; ASSERT_TRUE(aws_byte_buf_write(&cur, aba, sizeof(aba))); struct aws_byte_buf bcb_buf = aws_byte_buf_from_array(bcb, sizeof(bcb)); ASSERT_TRUE(aws_byte_buf_write_from_whole_buffer(&cur, bcb_buf)); ASSERT_TRUE(aws_byte_buf_write_u8(&cur, 0x42)); ASSERT_TRUE(aws_byte_buf_write_be16(&cur, 0x1234)); ASSERT_TRUE(aws_byte_buf_write_be24(&cur, 0xabcdef)); ASSERT_TRUE(aws_byte_buf_write_be32(&cur, 0x456789ab)); ASSERT_TRUE(aws_byte_buf_write_be64(&cur, (uint64_t)0x1122334455667788ULL)); ASSERT_TRUE(aws_byte_buf_write_u8_n(&cur, 0x42, 3)); ASSERT_FALSE(aws_byte_buf_write_u8(&cur, 0xFF)); ASSERT_UINT_EQUALS(0x99, buf[sizeof(buf) - 1]); ASSERT_BIN_ARRAYS_EQUALS(TEST_VECTOR, sizeof(TEST_VECTOR), buf, sizeof(TEST_VECTOR)); return 0; } AWS_TEST_CASE(byte_cursor_read_tests, s_byte_cursor_read_tests_fn); static int s_byte_cursor_read_tests_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_byte_cursor cur = aws_byte_cursor_from_array(TEST_VECTOR, sizeof(TEST_VECTOR)); uint8_t aba[3] = {0}; uint8_t bcb[3] = {0}; ASSERT_TRUE(aws_byte_cursor_read(&cur, aba, sizeof(aba))); struct aws_byte_buf buf = aws_byte_buf_from_empty_array(bcb, sizeof(bcb)); ASSERT_TRUE(aws_byte_cursor_read_and_fill_buffer(&cur, &buf)); uint8_t aba_expect[] = {0xaa, 0xbb, 0xaa}, bcb_expect[] = {0xbb, 0xcc, 0xbb}; ASSERT_BIN_ARRAYS_EQUALS(aba_expect, 3, aba, 3); ASSERT_BIN_ARRAYS_EQUALS(bcb_expect, 3, bcb, 3); uint8_t u8; ASSERT_TRUE(aws_byte_cursor_read_u8(&cur, &u8)); ASSERT_UINT_EQUALS(u8, 0x42); uint16_t u16; ASSERT_TRUE(aws_byte_cursor_read_be16(&cur, &u16)); ASSERT_UINT_EQUALS(u16, 0x1234); uint32_t u24; ASSERT_TRUE(aws_byte_cursor_read_be24(&cur, &u24)); ASSERT_UINT_EQUALS(u24, 0xabcdef); uint32_t u32; ASSERT_TRUE(aws_byte_cursor_read_be32(&cur, &u32)); ASSERT_UINT_EQUALS(u32, 0x456789ab); uint64_t u64; ASSERT_TRUE(aws_byte_cursor_read_be64(&cur, &u64)); ASSERT_UINT_EQUALS(u64, (uint64_t)0x1122334455667788ULL); /* advance past "u8_n" data */ ASSERT_UINT_EQUALS(3, aws_byte_cursor_advance(&cur, 3).len); ASSERT_FALSE(aws_byte_cursor_read_u8(&cur, &u8)); ASSERT_UINT_EQUALS(u8, 0x42); return 0; } AWS_TEST_CASE(byte_cursor_limit_tests, s_byte_cursor_limit_tests_fn); static int s_byte_cursor_limit_tests_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; uint8_t buf[] = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08}; uint8_t starting_buf[sizeof(buf)]; memcpy(starting_buf, buf, sizeof(buf)); struct aws_byte_cursor cur = aws_byte_cursor_from_array(buf, sizeof(buf)); struct aws_byte_buf buffer = aws_byte_buf_from_empty_array(buf, sizeof(buf)); uint64_t u64 = 0; uint32_t u32 = 0; uint16_t u16 = 0; uint8_t u8 = 0; uint8_t arr[2] = {0}; struct aws_byte_buf arrbuf = aws_byte_buf_from_array(arr, sizeof(arr)); cur.len = 7; buffer.capacity = 7; ASSERT_FALSE(aws_byte_cursor_read_be64(&cur, &u64)); ASSERT_UINT_EQUALS(0, u64); ASSERT_FALSE(aws_byte_buf_write_be64(&buffer, 0)); ASSERT_BIN_ARRAYS_EQUALS(buf, sizeof(buf), starting_buf, sizeof(starting_buf)); cur.len = 3; buffer.capacity = 3; ASSERT_FALSE(aws_byte_cursor_read_be32(&cur, &u32)); ASSERT_UINT_EQUALS(0, u32); ASSERT_FALSE(aws_byte_buf_write_be32(&buffer, 0)); ASSERT_BIN_ARRAYS_EQUALS(buf, sizeof(buf), starting_buf, sizeof(starting_buf)); cur.len = 2; buffer.capacity = 2; ASSERT_FALSE(aws_byte_cursor_read_be32(&cur, &u32)); ASSERT_UINT_EQUALS(0, u32); ASSERT_FALSE(aws_byte_buf_write_be24(&buffer, 0)); ASSERT_BIN_ARRAYS_EQUALS(buf, sizeof(buf), starting_buf, sizeof(starting_buf)); cur.len = 1; buffer.capacity = 1; ASSERT_FALSE(aws_byte_cursor_read_be16(&cur, &u16)); ASSERT_UINT_EQUALS(0, u16); ASSERT_FALSE(aws_byte_buf_write_be16(&buffer, 0)); ASSERT_FALSE(aws_byte_cursor_read(&cur, arr, sizeof(arr))); ASSERT_FALSE(aws_byte_buf_write_from_whole_buffer(&buffer, arrbuf)); ASSERT_FALSE(aws_byte_cursor_read_and_fill_buffer(&cur, &arrbuf)); ASSERT_BIN_ARRAYS_EQUALS(buf, sizeof(buf), starting_buf, sizeof(starting_buf)); ASSERT_UINT_EQUALS(0, arr[0]); ASSERT_UINT_EQUALS(0, arr[1]); cur.len = 0; aws_byte_buf_clean_up(&buffer); ASSERT_FALSE(aws_byte_cursor_read_u8(&cur, &u8)); ASSERT_UINT_EQUALS(0, u8); ASSERT_FALSE(aws_byte_buf_write_u8(&buffer, 0)); ASSERT_BIN_ARRAYS_EQUALS(buf, sizeof(buf), starting_buf, sizeof(starting_buf)); ASSERT_FALSE(aws_byte_buf_write_u8_n(&buffer, 0x0, 8)); ASSERT_BIN_ARRAYS_EQUALS(buf, sizeof(buf), starting_buf, sizeof(starting_buf)); ASSERT_TRUE(aws_byte_cursor_read(&cur, arr, 0)); ASSERT_TRUE(aws_byte_buf_write(&buffer, arr, 0)); aws_byte_buf_clean_up(&arrbuf); ASSERT_TRUE(aws_byte_cursor_read_and_fill_buffer(&cur, &arrbuf)); ASSERT_TRUE(aws_byte_buf_write_from_whole_buffer(&buffer, arrbuf)); ASSERT_UINT_EQUALS(0, arr[0]); ASSERT_UINT_EQUALS(0, arr[1]); return 0; } AWS_TEST_CASE(test_byte_cursor_read_hex_u8, s_test_byte_cursor_read_hex_u8) static int s_test_byte_cursor_read_hex_u8(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_byte_cursor cur; uint8_t val = 0; cur = aws_byte_cursor_from_c_str("90"); ASSERT_TRUE(aws_byte_cursor_read_hex_u8(&cur, &val)); ASSERT_UINT_EQUALS(0x90, val); ASSERT_UINT_EQUALS(0, cur.len); cur = aws_byte_cursor_from_c_str("001"); ASSERT_TRUE(aws_byte_cursor_read_hex_u8(&cur, &val)); ASSERT_UINT_EQUALS(0x00, val); ASSERT_UINT_EQUALS(1, cur.len); ASSERT_UINT_EQUALS('1', cur.ptr[0]); cur = aws_byte_cursor_from_c_str("Fa"); ASSERT_TRUE(aws_byte_cursor_read_hex_u8(&cur, &val)); ASSERT_UINT_EQUALS(0xFA, val); ASSERT_UINT_EQUALS(0, cur.len); /* bad short buffer */ cur = aws_byte_cursor_from_c_str("0"); ASSERT_FALSE(aws_byte_cursor_read_hex_u8(&cur, &val)); ASSERT_UINT_EQUALS(1, cur.len); cur.len = 0; ASSERT_FALSE(aws_byte_cursor_read_hex_u8(&cur, &val)); ASSERT_UINT_EQUALS(0, cur.len); /* bad characters */ uint8_t bad_chars[][2] = { {'0', 0}, {'-', '0'}, {'/', '0'}, {'g', '0'}, {'x', '0'}, }; for (size_t i = 0; i < AWS_ARRAY_SIZE(bad_chars); ++i) { cur = aws_byte_cursor_from_array(bad_chars[i], 2); ASSERT_FALSE(aws_byte_cursor_read_hex_u8(&cur, &val)); ASSERT_UINT_EQUALS(2, cur.len); } return 0; } #define TEST_STRING "hello" static const char *s_empty = ""; static const char *s_all_whitespace = " \t\r\n "; static const char *s_left_whitespace = "\t \r" TEST_STRING; static const char *s_right_whitespace = TEST_STRING " \r \t \n"; static const char *s_both_whitespace = " \t \r\n " TEST_STRING " \r \t \n"; static const char *expected_non_empty_result = TEST_STRING; static int s_test_byte_cursor_right_trim_empty(struct aws_allocator *allocator, void *ctx) { (void)ctx; (void)allocator; struct aws_byte_cursor test_cursor = aws_byte_cursor_from_c_str(s_empty); struct aws_byte_cursor result = aws_byte_cursor_right_trim_pred(&test_cursor, aws_isspace); ASSERT_TRUE(result.len == 0); return 0; } AWS_TEST_CASE(test_byte_cursor_right_trim_empty, s_test_byte_cursor_right_trim_empty) static int s_test_byte_cursor_right_trim_all_whitespace(struct aws_allocator *allocator, void *ctx) { (void)ctx; (void)allocator; struct aws_byte_cursor test_cursor = aws_byte_cursor_from_c_str(s_all_whitespace); struct aws_byte_cursor result = aws_byte_cursor_right_trim_pred(&test_cursor, aws_isspace); ASSERT_TRUE(result.len == 0); return 0; } AWS_TEST_CASE(test_byte_cursor_right_trim_all_whitespace, s_test_byte_cursor_right_trim_all_whitespace) static int s_test_byte_cursor_right_trim_basic(struct aws_allocator *allocator, void *ctx) { (void)ctx; (void)allocator; struct aws_byte_cursor test_cursor = aws_byte_cursor_from_c_str(s_right_whitespace); struct aws_byte_cursor result = aws_byte_cursor_right_trim_pred(&test_cursor, aws_isspace); size_t expected_length = strlen(expected_non_empty_result); ASSERT_TRUE(strncmp((const char *)result.ptr, expected_non_empty_result, expected_length) == 0); ASSERT_TRUE(result.len == expected_length); return 0; } AWS_TEST_CASE(test_byte_cursor_right_trim_basic, s_test_byte_cursor_right_trim_basic) static int s_test_byte_cursor_left_trim_empty(struct aws_allocator *allocator, void *ctx) { (void)ctx; (void)allocator; struct aws_byte_cursor test_cursor = aws_byte_cursor_from_c_str(s_empty); struct aws_byte_cursor result = aws_byte_cursor_right_trim_pred(&test_cursor, aws_isspace); ASSERT_TRUE(result.len == 0); return 0; } AWS_TEST_CASE(test_byte_cursor_left_trim_empty, s_test_byte_cursor_left_trim_empty) static int s_test_byte_cursor_left_trim_all_whitespace(struct aws_allocator *allocator, void *ctx) { (void)ctx; (void)allocator; struct aws_byte_cursor test_cursor = aws_byte_cursor_from_c_str(s_all_whitespace); struct aws_byte_cursor result = aws_byte_cursor_right_trim_pred(&test_cursor, aws_isspace); ASSERT_TRUE(result.len == 0); return 0; } AWS_TEST_CASE(test_byte_cursor_left_trim_all_whitespace, s_test_byte_cursor_left_trim_all_whitespace) static int s_test_byte_cursor_left_trim_basic(struct aws_allocator *allocator, void *ctx) { (void)ctx; (void)allocator; struct aws_byte_cursor test_cursor = aws_byte_cursor_from_c_str(s_left_whitespace); struct aws_byte_cursor result = aws_byte_cursor_left_trim_pred(&test_cursor, aws_isspace); ASSERT_TRUE(strcmp((const char *)result.ptr, expected_non_empty_result) == 0); return 0; } AWS_TEST_CASE(test_byte_cursor_left_trim_basic, s_test_byte_cursor_left_trim_basic) static int s_test_byte_cursor_trim_basic(struct aws_allocator *allocator, void *ctx) { (void)ctx; (void)allocator; struct aws_byte_cursor test_cursor = aws_byte_cursor_from_c_str(s_both_whitespace); struct aws_byte_cursor result = aws_byte_cursor_trim_pred(&test_cursor, aws_isspace); size_t expected_length = strlen(expected_non_empty_result); ASSERT_TRUE(strncmp((const char *)result.ptr, expected_non_empty_result, expected_length) == 0); ASSERT_TRUE(result.len == expected_length); return 0; } AWS_TEST_CASE(test_byte_cursor_trim_basic, s_test_byte_cursor_trim_basic) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/date_time_test.c000066400000000000000000000747061456575232400250360ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include static int s_test_rfc822_utc_parsing_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; const char *valid_utc_dates[] = { "Wed, 02 Oct 2002 08:05:09 GMT", "Wed, 02 Oct 2002 08:05:09 UT", "Wed, 02 Oct 2002 08:05:09 Z", "Wed, 02 Oct 2002 08:05:09 UTC", }; for (size_t i = 0; i < 4; ++i) { struct aws_date_time date_time; const char *date_str = valid_utc_dates[i]; struct aws_byte_buf date_buf = aws_byte_buf_from_c_str(date_str); ASSERT_SUCCESS(aws_date_time_init_from_str(&date_time, &date_buf, AWS_DATE_FORMAT_RFC822)); ASSERT_INT_EQUALS(AWS_DATE_DAY_OF_WEEK_WEDNESDAY, aws_date_time_day_of_week(&date_time, false)); ASSERT_UINT_EQUALS(2, aws_date_time_month_day(&date_time, false)); ASSERT_UINT_EQUALS(AWS_DATE_MONTH_OCTOBER, aws_date_time_month(&date_time, false)); ASSERT_UINT_EQUALS(2002, aws_date_time_year(&date_time, false)); ASSERT_UINT_EQUALS(8, aws_date_time_hour(&date_time, false)); ASSERT_UINT_EQUALS(5, aws_date_time_minute(&date_time, false)); ASSERT_UINT_EQUALS(9, aws_date_time_second(&date_time, false)); uint8_t date_output[AWS_DATE_TIME_STR_MAX_LEN]; AWS_ZERO_ARRAY(date_output); struct aws_byte_buf str_output = aws_byte_buf_from_array(date_output, sizeof(date_output)); str_output.len = 0; ASSERT_SUCCESS(aws_date_time_to_utc_time_str(&date_time, AWS_DATE_FORMAT_RFC822, &str_output)); const char *expected_long_str = "Wed, 02 Oct 2002 08:05:09 GMT"; struct aws_byte_buf expected_long_buf = aws_byte_buf_from_c_str(expected_long_str); ASSERT_BIN_ARRAYS_EQUALS(expected_long_buf.buffer, expected_long_buf.len, str_output.buffer, str_output.len); AWS_ZERO_ARRAY(date_output); str_output.len = 0; ASSERT_SUCCESS(aws_date_time_to_utc_time_short_str(&date_time, AWS_DATE_FORMAT_RFC822, &str_output)); const char *expected_short_str = "Wed, 02 Oct 2002"; struct aws_byte_buf expected_short_buf = aws_byte_buf_from_c_str(expected_short_str); ASSERT_BIN_ARRAYS_EQUALS(expected_short_buf.buffer, expected_short_buf.len, str_output.buffer, str_output.len); } return AWS_OP_SUCCESS; } AWS_TEST_CASE(rfc822_utc_parsing, s_test_rfc822_utc_parsing_fn) static int s_test_rfc822_utc_parsing_auto_detect_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_date_time date_time; const char *date_str = "Wed, 02 Oct 2002 08:05:09 GMT"; struct aws_byte_buf date_buf = aws_byte_buf_from_c_str(date_str); ASSERT_SUCCESS(aws_date_time_init_from_str(&date_time, &date_buf, AWS_DATE_FORMAT_AUTO_DETECT)); ASSERT_INT_EQUALS(AWS_DATE_DAY_OF_WEEK_WEDNESDAY, aws_date_time_day_of_week(&date_time, false)); ASSERT_UINT_EQUALS(2, aws_date_time_month_day(&date_time, false)); ASSERT_UINT_EQUALS(AWS_DATE_MONTH_OCTOBER, aws_date_time_month(&date_time, false)); ASSERT_UINT_EQUALS(2002, aws_date_time_year(&date_time, false)); ASSERT_UINT_EQUALS(8, aws_date_time_hour(&date_time, false)); ASSERT_UINT_EQUALS(5, aws_date_time_minute(&date_time, false)); ASSERT_UINT_EQUALS(9, aws_date_time_second(&date_time, false)); uint8_t date_output[AWS_DATE_TIME_STR_MAX_LEN]; AWS_ZERO_ARRAY(date_output); struct aws_byte_buf str_output = aws_byte_buf_from_array(date_output, sizeof(date_output)); str_output.len = 0; ASSERT_SUCCESS(aws_date_time_to_utc_time_str(&date_time, AWS_DATE_FORMAT_RFC822, &str_output)); ASSERT_BIN_ARRAYS_EQUALS(date_buf.buffer, date_buf.len, str_output.buffer, str_output.len); return AWS_OP_SUCCESS; } AWS_TEST_CASE(rfc822_utc_parsing_auto_detect, s_test_rfc822_utc_parsing_auto_detect_fn) static int s_test_rfc822_local_time_east_of_gmt_parsing_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_date_time date_time; const char *date_str = "Wed, 02 Oct 2002 09:35:09 +0130"; struct aws_byte_buf date_buf = aws_byte_buf_from_c_str(date_str); ASSERT_SUCCESS(aws_date_time_init_from_str(&date_time, &date_buf, AWS_DATE_FORMAT_RFC822)); ASSERT_INT_EQUALS(AWS_DATE_DAY_OF_WEEK_WEDNESDAY, aws_date_time_day_of_week(&date_time, false)); ASSERT_UINT_EQUALS(2, aws_date_time_month_day(&date_time, false)); ASSERT_UINT_EQUALS(AWS_DATE_MONTH_OCTOBER, aws_date_time_month(&date_time, false)); ASSERT_UINT_EQUALS(2002, aws_date_time_year(&date_time, false)); ASSERT_UINT_EQUALS(8, aws_date_time_hour(&date_time, false)); ASSERT_UINT_EQUALS(5, aws_date_time_minute(&date_time, false)); ASSERT_UINT_EQUALS(9, aws_date_time_second(&date_time, false)); uint8_t date_output[AWS_DATE_TIME_STR_MAX_LEN]; AWS_ZERO_ARRAY(date_output); struct aws_byte_buf str_output = aws_byte_buf_from_array(date_output, sizeof(date_output)); str_output.len = 0; ASSERT_SUCCESS(aws_date_time_to_utc_time_str(&date_time, AWS_DATE_FORMAT_RFC822, &str_output)); const char *expected_str = "Wed, 02 Oct 2002 08:05:09 GMT"; struct aws_byte_buf expected_buf = aws_byte_buf_from_c_str(expected_str); ASSERT_BIN_ARRAYS_EQUALS(expected_buf.buffer, expected_buf.len, str_output.buffer, str_output.len); return AWS_OP_SUCCESS; } AWS_TEST_CASE(rfc822_local_time_east_of_gmt_parsing, s_test_rfc822_local_time_east_of_gmt_parsing_fn) static int s_test_rfc822_local_time_west_of_gmt_parsing_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_date_time date_time; const char *date_str = "Wed, 02 Oct 2002 07:05:09 -0100"; struct aws_byte_buf date_buf = aws_byte_buf_from_c_str(date_str); ASSERT_SUCCESS(aws_date_time_init_from_str(&date_time, &date_buf, AWS_DATE_FORMAT_RFC822)); ASSERT_INT_EQUALS(AWS_DATE_DAY_OF_WEEK_WEDNESDAY, aws_date_time_day_of_week(&date_time, false)); ASSERT_UINT_EQUALS(2, aws_date_time_month_day(&date_time, false)); ASSERT_UINT_EQUALS(AWS_DATE_MONTH_OCTOBER, aws_date_time_month(&date_time, false)); ASSERT_UINT_EQUALS(2002, aws_date_time_year(&date_time, false)); ASSERT_UINT_EQUALS(8, aws_date_time_hour(&date_time, false)); ASSERT_UINT_EQUALS(5, aws_date_time_minute(&date_time, false)); ASSERT_UINT_EQUALS(9, aws_date_time_second(&date_time, false)); uint8_t date_output[AWS_DATE_TIME_STR_MAX_LEN]; AWS_ZERO_ARRAY(date_output); struct aws_byte_buf str_output = aws_byte_buf_from_array(date_output, sizeof(date_output)); str_output.len = 0; ASSERT_SUCCESS(aws_date_time_to_utc_time_str(&date_time, AWS_DATE_FORMAT_RFC822, &str_output)); const char *expected_str = "Wed, 02 Oct 2002 08:05:09 GMT"; struct aws_byte_buf expected_buf = aws_byte_buf_from_c_str(expected_str); ASSERT_BIN_ARRAYS_EQUALS(expected_buf.buffer, expected_buf.len, str_output.buffer, str_output.len); return AWS_OP_SUCCESS; } AWS_TEST_CASE(rfc822_local_time_west_of_gmt_parsing, s_test_rfc822_local_time_west_of_gmt_parsing_fn) static int s_test_rfc822_utc_two_digit_year_parsing_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_date_time date_time; const char *date_str = "Wed, 02 Oct 02 08:05:09 GMT"; struct aws_byte_buf date_buf = aws_byte_buf_from_c_str(date_str); ASSERT_SUCCESS(aws_date_time_init_from_str(&date_time, &date_buf, AWS_DATE_FORMAT_RFC822)); ASSERT_INT_EQUALS(AWS_DATE_DAY_OF_WEEK_WEDNESDAY, aws_date_time_day_of_week(&date_time, false)); ASSERT_UINT_EQUALS(2, aws_date_time_month_day(&date_time, false)); ASSERT_UINT_EQUALS(AWS_DATE_MONTH_OCTOBER, aws_date_time_month(&date_time, false)); ASSERT_UINT_EQUALS(2002, aws_date_time_year(&date_time, false)); ASSERT_UINT_EQUALS(8, aws_date_time_hour(&date_time, false)); ASSERT_UINT_EQUALS(5, aws_date_time_minute(&date_time, false)); ASSERT_UINT_EQUALS(9, aws_date_time_second(&date_time, false)); uint8_t date_output[AWS_DATE_TIME_STR_MAX_LEN]; AWS_ZERO_ARRAY(date_output); struct aws_byte_buf str_output = aws_byte_buf_from_array(date_output, sizeof(date_output)); str_output.len = 0; ASSERT_SUCCESS(aws_date_time_to_utc_time_str(&date_time, AWS_DATE_FORMAT_RFC822, &str_output)); const char *expected_date_str = "Wed, 02 Oct 2002 08:05:09 GMT"; struct aws_byte_buf expected_date_buf = aws_byte_buf_from_c_str(expected_date_str); ASSERT_BIN_ARRAYS_EQUALS(expected_date_buf.buffer, expected_date_buf.len, str_output.buffer, str_output.len); return AWS_OP_SUCCESS; } AWS_TEST_CASE(rfc822_utc_two_digit_year_parsing, s_test_rfc822_utc_two_digit_year_parsing_fn) static int s_test_rfc822_utc_no_dow_parsing_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_date_time date_time; const char *date_str = "02 Oct 02 08:05:09 GMT"; struct aws_byte_buf date_buf = aws_byte_buf_from_c_str(date_str); ASSERT_SUCCESS(aws_date_time_init_from_str(&date_time, &date_buf, AWS_DATE_FORMAT_RFC822)); ASSERT_INT_EQUALS(AWS_DATE_DAY_OF_WEEK_WEDNESDAY, aws_date_time_day_of_week(&date_time, false)); ASSERT_UINT_EQUALS(2, aws_date_time_month_day(&date_time, false)); ASSERT_UINT_EQUALS(AWS_DATE_MONTH_OCTOBER, aws_date_time_month(&date_time, false)); ASSERT_UINT_EQUALS(2002, aws_date_time_year(&date_time, false)); ASSERT_UINT_EQUALS(8, aws_date_time_hour(&date_time, false)); ASSERT_UINT_EQUALS(5, aws_date_time_minute(&date_time, false)); ASSERT_UINT_EQUALS(9, aws_date_time_second(&date_time, false)); uint8_t date_output[AWS_DATE_TIME_STR_MAX_LEN]; AWS_ZERO_ARRAY(date_output); struct aws_byte_buf str_output = aws_byte_buf_from_array(date_output, sizeof(date_output)); str_output.len = 0; ASSERT_SUCCESS(aws_date_time_to_utc_time_str(&date_time, AWS_DATE_FORMAT_RFC822, &str_output)); const char *expected_date_str = "Wed, 02 Oct 2002 08:05:09 GMT"; struct aws_byte_buf expected_date_buf = aws_byte_buf_from_c_str(expected_date_str); ASSERT_BIN_ARRAYS_EQUALS(expected_date_buf.buffer, expected_date_buf.len, str_output.buffer, str_output.len); return AWS_OP_SUCCESS; } AWS_TEST_CASE(rfc822_utc_no_dow_parsing, s_test_rfc822_utc_no_dow_parsing_fn) static int s_test_rfc822_utc_dos_prevented_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_date_time date_time; const char *date_str = "Weddkasdiweijbnawei8eriojngsdgasdgsdf1gasd8asdgfasdfgsdikweisdfksdnsdksdklas" "dfsdklasdfdfsdfsdfsdfsadfasdafsdfgjjfgghdfgsdfsfsdfsdfasdfsdfasdfsdfasdfsdf"; struct aws_byte_buf date_buf = aws_byte_buf_from_c_str(date_str); ASSERT_ERROR( AWS_ERROR_OVERFLOW_DETECTED, aws_date_time_init_from_str(&date_time, &date_buf, AWS_DATE_FORMAT_RFC822)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(rfc822_utc_dos_prevented, s_test_rfc822_utc_dos_prevented_fn) static int s_test_rfc822_invalid_format_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_date_time date_time; const char *date_str = "Wed, 02 Oct 2002"; struct aws_byte_buf date_buf = aws_byte_buf_from_c_str(date_str); ASSERT_ERROR( AWS_ERROR_INVALID_DATE_STR, aws_date_time_init_from_str(&date_time, &date_buf, AWS_DATE_FORMAT_RFC822)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(rfc822_invalid_format, s_test_rfc822_invalid_format_fn) static int s_test_rfc822_invalid_tz_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_date_time date_time; const char *date_str = "Wed, 02 Oct 2002 08:05:09 DST"; struct aws_byte_buf date_buf = aws_byte_buf_from_c_str(date_str); ASSERT_ERROR( AWS_ERROR_INVALID_DATE_STR, aws_date_time_init_from_str(&date_time, &date_buf, AWS_DATE_FORMAT_RFC822)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(rfc822_invalid_tz, s_test_rfc822_invalid_tz_fn) static int s_test_rfc822_invalid_auto_format_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_date_time date_time; const char *date_str = "Wed, 02 Oct 2002"; struct aws_byte_buf date_buf = aws_byte_buf_from_c_str(date_str); ASSERT_ERROR( AWS_ERROR_INVALID_DATE_STR, aws_date_time_init_from_str(&date_time, &date_buf, AWS_DATE_FORMAT_AUTO_DETECT)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(rfc822_invalid_auto_format, s_test_rfc822_invalid_auto_format_fn) static int s_test_iso8601_utc_parsing_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_date_time date_time; const char *date_str = "2002-10-02T08:05:09.000Z"; struct aws_byte_buf date_buf = aws_byte_buf_from_c_str(date_str); ASSERT_SUCCESS(aws_date_time_init_from_str(&date_time, &date_buf, AWS_DATE_FORMAT_ISO_8601)); ASSERT_INT_EQUALS(AWS_DATE_DAY_OF_WEEK_WEDNESDAY, aws_date_time_day_of_week(&date_time, false)); ASSERT_UINT_EQUALS(2, aws_date_time_month_day(&date_time, false)); ASSERT_UINT_EQUALS(AWS_DATE_MONTH_OCTOBER, aws_date_time_month(&date_time, false)); ASSERT_UINT_EQUALS(2002, aws_date_time_year(&date_time, false)); ASSERT_UINT_EQUALS(8, aws_date_time_hour(&date_time, false)); ASSERT_UINT_EQUALS(5, aws_date_time_minute(&date_time, false)); ASSERT_UINT_EQUALS(9, aws_date_time_second(&date_time, false)); uint8_t date_output[AWS_DATE_TIME_STR_MAX_LEN]; AWS_ZERO_ARRAY(date_output); struct aws_byte_buf str_output = aws_byte_buf_from_array(date_output, sizeof(date_output)); str_output.len = 0; ASSERT_SUCCESS(aws_date_time_to_utc_time_str(&date_time, AWS_DATE_FORMAT_ISO_8601, &str_output)); const char *expected_date_str = "2002-10-02T08:05:09Z"; struct aws_byte_buf expected_date_buf = aws_byte_buf_from_c_str(expected_date_str); ASSERT_BIN_ARRAYS_EQUALS(expected_date_buf.buffer, expected_date_buf.len, str_output.buffer, str_output.len); AWS_ZERO_ARRAY(date_output); str_output.len = 0; ASSERT_SUCCESS(aws_date_time_to_utc_time_short_str(&date_time, AWS_DATE_FORMAT_ISO_8601, &str_output)); const char *expected_short_str = "2002-10-02"; struct aws_byte_buf expected_short_buf = aws_byte_buf_from_c_str(expected_short_str); ASSERT_BIN_ARRAYS_EQUALS(expected_short_buf.buffer, expected_short_buf.len, str_output.buffer, str_output.len); return AWS_OP_SUCCESS; } AWS_TEST_CASE(iso8601_utc_parsing, s_test_iso8601_utc_parsing_fn) static int s_test_iso8601_basic_utc_parsing_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_date_time date_time; const char *date_str = "20021002T080509000Z"; struct aws_byte_buf date_buf = aws_byte_buf_from_c_str(date_str); ASSERT_SUCCESS(aws_date_time_init_from_str(&date_time, &date_buf, AWS_DATE_FORMAT_ISO_8601_BASIC)); ASSERT_INT_EQUALS(AWS_DATE_DAY_OF_WEEK_WEDNESDAY, aws_date_time_day_of_week(&date_time, false)); ASSERT_UINT_EQUALS(2, aws_date_time_month_day(&date_time, false)); ASSERT_UINT_EQUALS(AWS_DATE_MONTH_OCTOBER, aws_date_time_month(&date_time, false)); ASSERT_UINT_EQUALS(2002, aws_date_time_year(&date_time, false)); ASSERT_UINT_EQUALS(8, aws_date_time_hour(&date_time, false)); ASSERT_UINT_EQUALS(5, aws_date_time_minute(&date_time, false)); ASSERT_UINT_EQUALS(9, aws_date_time_second(&date_time, false)); uint8_t date_output[AWS_DATE_TIME_STR_MAX_LEN]; AWS_ZERO_ARRAY(date_output); struct aws_byte_buf str_output = aws_byte_buf_from_array(date_output, sizeof(date_output)); str_output.len = 0; ASSERT_SUCCESS(aws_date_time_to_utc_time_str(&date_time, AWS_DATE_FORMAT_ISO_8601_BASIC, &str_output)); const char *expected_date_str = "20021002T080509Z"; struct aws_byte_buf expected_date_buf = aws_byte_buf_from_c_str(expected_date_str); ASSERT_BIN_ARRAYS_EQUALS(expected_date_buf.buffer, expected_date_buf.len, str_output.buffer, str_output.len); AWS_ZERO_ARRAY(date_output); str_output.len = 0; ASSERT_SUCCESS(aws_date_time_to_utc_time_short_str(&date_time, AWS_DATE_FORMAT_ISO_8601_BASIC, &str_output)); const char *expected_short_str = "20021002"; struct aws_byte_buf expected_short_buf = aws_byte_buf_from_c_str(expected_short_str); ASSERT_BIN_ARRAYS_EQUALS(expected_short_buf.buffer, expected_short_buf.len, str_output.buffer, str_output.len); return AWS_OP_SUCCESS; } AWS_TEST_CASE(iso8601_basic_utc_parsing, s_test_iso8601_basic_utc_parsing_fn) static int s_test_iso8601_utc_parsing_auto_detect_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_date_time date_time; const char *date_str = "2002-10-02T08:05:09.000Z"; struct aws_byte_buf date_buf = aws_byte_buf_from_c_str(date_str); ASSERT_SUCCESS(aws_date_time_init_from_str(&date_time, &date_buf, AWS_DATE_FORMAT_AUTO_DETECT)); ASSERT_INT_EQUALS(AWS_DATE_DAY_OF_WEEK_WEDNESDAY, aws_date_time_day_of_week(&date_time, false)); ASSERT_UINT_EQUALS(2, aws_date_time_month_day(&date_time, false)); ASSERT_UINT_EQUALS(AWS_DATE_MONTH_OCTOBER, aws_date_time_month(&date_time, false)); ASSERT_UINT_EQUALS(2002, aws_date_time_year(&date_time, false)); ASSERT_UINT_EQUALS(8, aws_date_time_hour(&date_time, false)); ASSERT_UINT_EQUALS(5, aws_date_time_minute(&date_time, false)); ASSERT_UINT_EQUALS(9, aws_date_time_second(&date_time, false)); uint8_t date_output[AWS_DATE_TIME_STR_MAX_LEN]; AWS_ZERO_ARRAY(date_output); struct aws_byte_buf str_output = aws_byte_buf_from_array(date_output, sizeof(date_output)); str_output.len = 0; ASSERT_SUCCESS(aws_date_time_to_utc_time_str(&date_time, AWS_DATE_FORMAT_ISO_8601, &str_output)); const char *expected_date_str = "2002-10-02T08:05:09Z"; struct aws_byte_buf expected_date_buf = aws_byte_buf_from_c_str(expected_date_str); ASSERT_BIN_ARRAYS_EQUALS(expected_date_buf.buffer, expected_date_buf.len, str_output.buffer, str_output.len); return AWS_OP_SUCCESS; } AWS_TEST_CASE(iso8601_utc_parsing_auto_detect, s_test_iso8601_utc_parsing_auto_detect_fn) static int s_test_iso8601_basic_utc_parsing_auto_detect_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_date_time date_time; const char *date_str = "20021002T080509000Z"; struct aws_byte_buf date_buf = aws_byte_buf_from_c_str(date_str); ASSERT_SUCCESS(aws_date_time_init_from_str(&date_time, &date_buf, AWS_DATE_FORMAT_AUTO_DETECT)); ASSERT_INT_EQUALS(AWS_DATE_DAY_OF_WEEK_WEDNESDAY, aws_date_time_day_of_week(&date_time, false)); ASSERT_UINT_EQUALS(2, aws_date_time_month_day(&date_time, false)); ASSERT_UINT_EQUALS(AWS_DATE_MONTH_OCTOBER, aws_date_time_month(&date_time, false)); ASSERT_UINT_EQUALS(2002, aws_date_time_year(&date_time, false)); ASSERT_UINT_EQUALS(8, aws_date_time_hour(&date_time, false)); ASSERT_UINT_EQUALS(5, aws_date_time_minute(&date_time, false)); ASSERT_UINT_EQUALS(9, aws_date_time_second(&date_time, false)); uint8_t date_output[AWS_DATE_TIME_STR_MAX_LEN]; AWS_ZERO_ARRAY(date_output); struct aws_byte_buf str_output = aws_byte_buf_from_array(date_output, sizeof(date_output)); str_output.len = 0; ASSERT_SUCCESS(aws_date_time_to_utc_time_str(&date_time, AWS_DATE_FORMAT_ISO_8601_BASIC, &str_output)); const char *expected_date_str = "20021002T080509Z"; struct aws_byte_buf expected_date_buf = aws_byte_buf_from_c_str(expected_date_str); ASSERT_BIN_ARRAYS_EQUALS(expected_date_buf.buffer, expected_date_buf.len, str_output.buffer, str_output.len); return AWS_OP_SUCCESS; } AWS_TEST_CASE(iso8601_basic_utc_parsing_auto_detect, s_test_iso8601_basic_utc_parsing_auto_detect_fn) static int s_test_iso8601_utc_no_colon_parsing_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_date_time date_time; const char *date_str = "2002-10-02T080509.000Z"; struct aws_byte_buf date_buf = aws_byte_buf_from_c_str(date_str); ASSERT_SUCCESS(aws_date_time_init_from_str(&date_time, &date_buf, AWS_DATE_FORMAT_ISO_8601)); ASSERT_INT_EQUALS(AWS_DATE_DAY_OF_WEEK_WEDNESDAY, aws_date_time_day_of_week(&date_time, false)); ASSERT_UINT_EQUALS(2, aws_date_time_month_day(&date_time, false)); ASSERT_UINT_EQUALS(AWS_DATE_MONTH_OCTOBER, aws_date_time_month(&date_time, false)); ASSERT_UINT_EQUALS(2002, aws_date_time_year(&date_time, false)); ASSERT_UINT_EQUALS(8, aws_date_time_hour(&date_time, false)); ASSERT_UINT_EQUALS(5, aws_date_time_minute(&date_time, false)); ASSERT_UINT_EQUALS(9, aws_date_time_second(&date_time, false)); uint8_t date_output[AWS_DATE_TIME_STR_MAX_LEN]; AWS_ZERO_ARRAY(date_output); struct aws_byte_buf str_output = aws_byte_buf_from_array(date_output, sizeof(date_output)); str_output.len = 0; ASSERT_SUCCESS(aws_date_time_to_utc_time_str(&date_time, AWS_DATE_FORMAT_ISO_8601, &str_output)); const char *expected_date_str = "2002-10-02T08:05:09Z"; struct aws_byte_buf expected_date_buf = aws_byte_buf_from_c_str(expected_date_str); ASSERT_BIN_ARRAYS_EQUALS(expected_date_buf.buffer, expected_date_buf.len, str_output.buffer, str_output.len); return AWS_OP_SUCCESS; } AWS_TEST_CASE(iso8601_utc_no_colon_parsing, s_test_iso8601_utc_no_colon_parsing_fn) static int s_test_iso8601_date_only_parsing_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_date_time date_time; const char *date_str = "2002-10-02"; struct aws_byte_buf date_buf = aws_byte_buf_from_c_str(date_str); ASSERT_SUCCESS(aws_date_time_init_from_str(&date_time, &date_buf, AWS_DATE_FORMAT_ISO_8601)); ASSERT_INT_EQUALS(AWS_DATE_DAY_OF_WEEK_WEDNESDAY, aws_date_time_day_of_week(&date_time, false)); ASSERT_UINT_EQUALS(2, aws_date_time_month_day(&date_time, false)); ASSERT_UINT_EQUALS(AWS_DATE_MONTH_OCTOBER, aws_date_time_month(&date_time, false)); ASSERT_UINT_EQUALS(2002, aws_date_time_year(&date_time, false)); ASSERT_UINT_EQUALS(0, aws_date_time_hour(&date_time, false)); ASSERT_UINT_EQUALS(0, aws_date_time_minute(&date_time, false)); ASSERT_UINT_EQUALS(0, aws_date_time_second(&date_time, false)); uint8_t date_output[AWS_DATE_TIME_STR_MAX_LEN]; AWS_ZERO_ARRAY(date_output); struct aws_byte_buf str_output = aws_byte_buf_from_array(date_output, sizeof(date_output)); str_output.len = 0; ASSERT_SUCCESS(aws_date_time_to_utc_time_str(&date_time, AWS_DATE_FORMAT_ISO_8601, &str_output)); const char *expected_date_str = "2002-10-02T00:00:00Z"; struct aws_byte_buf expected_date_buf = aws_byte_buf_from_c_str(expected_date_str); ASSERT_BIN_ARRAYS_EQUALS(expected_date_buf.buffer, expected_date_buf.len, str_output.buffer, str_output.len); return AWS_OP_SUCCESS; } AWS_TEST_CASE(iso8601_date_only_parsing, s_test_iso8601_date_only_parsing_fn) static int s_test_iso8601_basic_date_only_parsing_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_date_time date_time; const char *date_str = "20021002"; struct aws_byte_buf date_buf = aws_byte_buf_from_c_str(date_str); ASSERT_SUCCESS(aws_date_time_init_from_str(&date_time, &date_buf, AWS_DATE_FORMAT_ISO_8601_BASIC)); ASSERT_INT_EQUALS(AWS_DATE_DAY_OF_WEEK_WEDNESDAY, aws_date_time_day_of_week(&date_time, false)); ASSERT_UINT_EQUALS(2, aws_date_time_month_day(&date_time, false)); ASSERT_UINT_EQUALS(AWS_DATE_MONTH_OCTOBER, aws_date_time_month(&date_time, false)); ASSERT_UINT_EQUALS(2002, aws_date_time_year(&date_time, false)); ASSERT_UINT_EQUALS(0, aws_date_time_hour(&date_time, false)); ASSERT_UINT_EQUALS(0, aws_date_time_minute(&date_time, false)); ASSERT_UINT_EQUALS(0, aws_date_time_second(&date_time, false)); uint8_t date_output[AWS_DATE_TIME_STR_MAX_LEN]; AWS_ZERO_ARRAY(date_output); struct aws_byte_buf str_output = aws_byte_buf_from_array(date_output, sizeof(date_output)); str_output.len = 0; ASSERT_SUCCESS(aws_date_time_to_utc_time_str(&date_time, AWS_DATE_FORMAT_ISO_8601_BASIC, &str_output)); const char *expected_date_str = "20021002T000000Z"; struct aws_byte_buf expected_date_buf = aws_byte_buf_from_c_str(expected_date_str); ASSERT_BIN_ARRAYS_EQUALS(expected_date_buf.buffer, expected_date_buf.len, str_output.buffer, str_output.len); return AWS_OP_SUCCESS; } AWS_TEST_CASE(iso8601_basic_date_only_parsing, s_test_iso8601_basic_date_only_parsing_fn) static int s_test_iso8601_utc_dos_prevented_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_date_time date_time; const char *date_str = "Weddkasdiweijbnawei8eriojngsdgasdgsdf1gasd8asdgfasdfgsdikweisdfksdnsdksdklas" "dfsdklasdfdfsdfsdfsdfsadfasdafsdfgjjfgghdfgsdfsfsdfsdfasdfsdfasdfsdfasdfsdf"; struct aws_byte_buf date_buf = aws_byte_buf_from_c_str(date_str); ASSERT_ERROR( AWS_ERROR_OVERFLOW_DETECTED, aws_date_time_init_from_str(&date_time, &date_buf, AWS_DATE_FORMAT_ISO_8601)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(iso8601_utc_dos_prevented, s_test_iso8601_utc_dos_prevented_fn) static int s_test_iso8601_invalid_format_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_date_time date_time; const char *date_str = "2002-10-02T"; struct aws_byte_buf date_buf = aws_byte_buf_from_c_str(date_str); ASSERT_ERROR( AWS_ERROR_INVALID_DATE_STR, aws_date_time_init_from_str(&date_time, &date_buf, AWS_DATE_FORMAT_ISO_8601)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(iso8601_invalid_format, s_test_iso8601_invalid_format_fn) static int s_test_iso8601_invalid_auto_format_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_date_time date_time; const char *date_str = "2002-10-02T"; struct aws_byte_buf date_buf = aws_byte_buf_from_c_str(date_str); ASSERT_ERROR( AWS_ERROR_INVALID_DATE_STR, aws_date_time_init_from_str(&date_time, &date_buf, AWS_DATE_FORMAT_AUTO_DETECT)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(iso8601_invalid_auto_format, s_test_iso8601_invalid_auto_format_fn) static int s_test_unix_epoch_parsing_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_date_time date_time; /* Test milliseconds are zeroed out. */ aws_date_time_init_epoch_secs(&date_time, 1033545909.0); ASSERT_INT_EQUALS(AWS_DATE_DAY_OF_WEEK_WEDNESDAY, aws_date_time_day_of_week(&date_time, false)); ASSERT_UINT_EQUALS(2, aws_date_time_month_day(&date_time, false)); ASSERT_UINT_EQUALS(AWS_DATE_MONTH_OCTOBER, aws_date_time_month(&date_time, false)); ASSERT_UINT_EQUALS(2002, aws_date_time_year(&date_time, false)); ASSERT_UINT_EQUALS(8, aws_date_time_hour(&date_time, false)); ASSERT_UINT_EQUALS(5, aws_date_time_minute(&date_time, false)); ASSERT_UINT_EQUALS(9, aws_date_time_second(&date_time, false)); ASSERT_UINT_EQUALS(0U, date_time.milliseconds); /* Test milliseconds of `date_time` match the milliseconds from decimal portion of the input, double timestamp. */ aws_date_time_init_epoch_secs(&date_time, 1033545909.50542123); ASSERT_INT_EQUALS(AWS_DATE_DAY_OF_WEEK_WEDNESDAY, aws_date_time_day_of_week(&date_time, false)); ASSERT_UINT_EQUALS(2, aws_date_time_month_day(&date_time, false)); ASSERT_UINT_EQUALS(AWS_DATE_MONTH_OCTOBER, aws_date_time_month(&date_time, false)); ASSERT_UINT_EQUALS(2002, aws_date_time_year(&date_time, false)); ASSERT_UINT_EQUALS(8, aws_date_time_hour(&date_time, false)); ASSERT_UINT_EQUALS(5, aws_date_time_minute(&date_time, false)); ASSERT_UINT_EQUALS(9, aws_date_time_second(&date_time, false)); ASSERT_UINT_EQUALS(505U, date_time.milliseconds); uint8_t date_output[AWS_DATE_TIME_STR_MAX_LEN]; AWS_ZERO_ARRAY(date_output); struct aws_byte_buf str_output = aws_byte_buf_from_array(date_output, sizeof(date_output)); str_output.len = 0; ASSERT_SUCCESS(aws_date_time_to_utc_time_str(&date_time, AWS_DATE_FORMAT_ISO_8601, &str_output)); const char *expected_date_str = "2002-10-02T08:05:09Z"; struct aws_byte_buf expected_date_buf = aws_byte_buf_from_c_str(expected_date_str); ASSERT_BIN_ARRAYS_EQUALS(expected_date_buf.buffer, expected_date_buf.len, str_output.buffer, str_output.len); return AWS_OP_SUCCESS; } AWS_TEST_CASE(unix_epoch_parsing, s_test_unix_epoch_parsing_fn) static int s_test_millis_parsing_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_date_time date_time; /* Test milliseconds are zeroed. */ aws_date_time_init_epoch_millis(&date_time, 1033545909000); ASSERT_INT_EQUALS(AWS_DATE_DAY_OF_WEEK_WEDNESDAY, aws_date_time_day_of_week(&date_time, false)); ASSERT_UINT_EQUALS(2, aws_date_time_month_day(&date_time, false)); ASSERT_UINT_EQUALS(AWS_DATE_MONTH_OCTOBER, aws_date_time_month(&date_time, false)); ASSERT_UINT_EQUALS(2002, aws_date_time_year(&date_time, false)); ASSERT_UINT_EQUALS(8, aws_date_time_hour(&date_time, false)); ASSERT_UINT_EQUALS(5, aws_date_time_minute(&date_time, false)); ASSERT_UINT_EQUALS(9, aws_date_time_second(&date_time, false)); ASSERT_UINT_EQUALS(0U, date_time.milliseconds); /* Test milliseconds of `date_time` match the milliseconds from the 64-bit timestamp parameter. */ aws_date_time_init_epoch_millis(&date_time, 1033545909619); ASSERT_INT_EQUALS(AWS_DATE_DAY_OF_WEEK_WEDNESDAY, aws_date_time_day_of_week(&date_time, false)); ASSERT_UINT_EQUALS(2, aws_date_time_month_day(&date_time, false)); ASSERT_UINT_EQUALS(AWS_DATE_MONTH_OCTOBER, aws_date_time_month(&date_time, false)); ASSERT_UINT_EQUALS(2002, aws_date_time_year(&date_time, false)); ASSERT_UINT_EQUALS(8, aws_date_time_hour(&date_time, false)); ASSERT_UINT_EQUALS(5, aws_date_time_minute(&date_time, false)); ASSERT_UINT_EQUALS(9, aws_date_time_second(&date_time, false)); ASSERT_UINT_EQUALS(619U, date_time.milliseconds); uint8_t date_output[AWS_DATE_TIME_STR_MAX_LEN]; AWS_ZERO_ARRAY(date_output); struct aws_byte_buf str_output = aws_byte_buf_from_array(date_output, sizeof(date_output)); str_output.len = 0; ASSERT_SUCCESS(aws_date_time_to_utc_time_str(&date_time, AWS_DATE_FORMAT_ISO_8601, &str_output)); const char *expected_date_str = "2002-10-02T08:05:09Z"; struct aws_byte_buf expected_date_buf = aws_byte_buf_from_c_str(expected_date_str); ASSERT_BIN_ARRAYS_EQUALS(expected_date_buf.buffer, expected_date_buf.len, str_output.buffer, str_output.len); return AWS_OP_SUCCESS; } AWS_TEST_CASE(millis_parsing, s_test_millis_parsing_fn) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/device_random_test.c000066400000000000000000000147741456575232400257010ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include /* Number of random numbers to generate and put in buckets. Higher numbers mean more tolerance */ #define DISTRIBUTION_PUT_COUNT 1000000 /* Must be a power of 2. Lower numbers mean more tolerance. */ #define DISTRIBUTION_BUCKET_COUNT 16 /* Fail if a bucket's contents vary from expected by more than this ratio. Higher ratio means more tolerance. * For example, if putting 1000 numbers into 10 buckets, we expect 100 in each bucket. * If ratio is 0.25 than accept 75 -> 125 numbers per bucket. */ #define DISTRIBUTION_ACCEPTED_DEVIATION_RATIO 0.05 /* For testing that random number generator has a uniform distribution. * They're RANDOM numbers, so to avoid RANDOM failures use lots of inputs and be tolerate some deviance */ struct distribution_tester { uint64_t max_value; uint64_t buckets[DISTRIBUTION_BUCKET_COUNT]; uint64_t num_puts; }; static int s_distribution_tester_put(struct distribution_tester *tester, uint64_t rand_num) { ASSERT_TRUE(rand_num <= tester->max_value); uint64_t bucket_size = (tester->max_value / DISTRIBUTION_BUCKET_COUNT) + 1; uint64_t bucket_idx = rand_num / bucket_size; ASSERT_TRUE(bucket_idx < DISTRIBUTION_BUCKET_COUNT); tester->buckets[bucket_idx]++; tester->num_puts++; return AWS_OP_SUCCESS; } static int s_distribution_tester_check_results(const struct distribution_tester *tester) { ASSERT_TRUE(tester->num_puts == DISTRIBUTION_PUT_COUNT); double expected_numbers_per_bucket = (double)DISTRIBUTION_PUT_COUNT / DISTRIBUTION_BUCKET_COUNT; uint64_t max_acceptable_numbers_per_bucket = (uint64_t)ceil(expected_numbers_per_bucket * (1.0 + DISTRIBUTION_ACCEPTED_DEVIATION_RATIO)); uint64_t min_acceptable_numbers_per_bucket = (uint64_t)floor(expected_numbers_per_bucket * (1.0 - DISTRIBUTION_ACCEPTED_DEVIATION_RATIO)); for (uint64_t i = 0; i < DISTRIBUTION_BUCKET_COUNT; ++i) { uint64_t numbers_in_bucket = tester->buckets[i]; ASSERT_TRUE(numbers_in_bucket <= max_acceptable_numbers_per_bucket); ASSERT_TRUE(numbers_in_bucket >= min_acceptable_numbers_per_bucket); } return AWS_OP_SUCCESS; } static int s_device_rand_u64_distribution_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct distribution_tester tester = {.max_value = UINT64_MAX}; for (size_t i = 0; i < DISTRIBUTION_PUT_COUNT; ++i) { uint64_t next_value = 0; ASSERT_SUCCESS(aws_device_random_u64(&next_value)); ASSERT_SUCCESS(s_distribution_tester_put(&tester, next_value)); } ASSERT_SUCCESS(s_distribution_tester_check_results(&tester)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(device_rand_u64_distribution, s_device_rand_u64_distribution_fn) static int s_device_rand_u32_distribution_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct distribution_tester tester = {.max_value = UINT32_MAX}; for (size_t i = 0; i < DISTRIBUTION_PUT_COUNT; ++i) { uint32_t next_value = 0; ASSERT_SUCCESS(aws_device_random_u32(&next_value)); ASSERT_SUCCESS(s_distribution_tester_put(&tester, next_value)); } ASSERT_SUCCESS(s_distribution_tester_check_results(&tester)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(device_rand_u32_distribution, s_device_rand_u32_distribution_fn) static int s_device_rand_u16_distribution_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct distribution_tester tester = {.max_value = UINT16_MAX}; for (size_t i = 0; i < DISTRIBUTION_PUT_COUNT; ++i) { uint16_t next_value = 0; ASSERT_SUCCESS(aws_device_random_u16(&next_value)); ASSERT_SUCCESS(s_distribution_tester_put(&tester, next_value)); } ASSERT_SUCCESS(s_distribution_tester_check_results(&tester)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(device_rand_u16_distribution, s_device_rand_u16_distribution_fn) static int s_device_rand_buffer_distribution_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; uint8_t array[DISTRIBUTION_PUT_COUNT] = {0}; struct aws_byte_buf buf = aws_byte_buf_from_empty_array(array, sizeof(array)); ASSERT_SUCCESS(aws_device_random_buffer(&buf)); /* Test each byte in the buffer */ struct distribution_tester tester = {.max_value = UINT8_MAX}; for (size_t i = 0; i < DISTRIBUTION_PUT_COUNT; ++i) { ASSERT_SUCCESS(s_distribution_tester_put(&tester, array[i])); } ASSERT_SUCCESS(s_distribution_tester_check_results(&tester)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(device_rand_buffer_distribution, s_device_rand_buffer_distribution_fn) static int s_device_rand_buffer_append_distribution_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; /* Create array full of zeroes, but only partially fill it with randomness */ uint8_t array[DISTRIBUTION_PUT_COUNT + 100] = {0}; struct aws_byte_buf buf = aws_byte_buf_from_empty_array(array, sizeof(array)); ASSERT_SUCCESS(aws_device_random_buffer_append(&buf, DISTRIBUTION_PUT_COUNT)); /* Test that first half of buffer has randomness */ struct distribution_tester tester = {.max_value = UINT8_MAX}; for (size_t i = 0; i < DISTRIBUTION_PUT_COUNT; ++i) { ASSERT_SUCCESS(s_distribution_tester_put(&tester, array[i])); } ASSERT_SUCCESS(s_distribution_tester_check_results(&tester)); /* Test that second half of buffer is untouched (still full of zeroes) */ ASSERT_UINT_EQUALS(DISTRIBUTION_PUT_COUNT, buf.len); ASSERT_UINT_EQUALS(sizeof(array), buf.capacity); for (size_t i = buf.len; i < buf.capacity; ++i) { ASSERT_UINT_EQUALS(0, buf.buffer[i]); } return AWS_OP_SUCCESS; } AWS_TEST_CASE(device_rand_buffer_append_distribution, s_device_rand_buffer_append_distribution_fn) static int s_device_rand_buffer_append_short_buffer_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; uint8_t array[200] = {0}; struct aws_byte_buf buf = aws_byte_buf_from_empty_array(array, sizeof(array)); ASSERT_ERROR(AWS_ERROR_SHORT_BUFFER, aws_device_random_buffer_append(&buf, sizeof(array) + 1)); ASSERT_UINT_EQUALS(0, buf.len); return AWS_OP_SUCCESS; } AWS_TEST_CASE(device_rand_buffer_append_short_buffer, s_device_rand_buffer_append_short_buffer_fn) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/encoding_test.c000066400000000000000000001517371456575232400246710ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include /* Test cases from rfc4648 for Base 16 Encoding */ static int s_run_hex_encoding_test_case( struct aws_allocator *allocator, const char *test_str, size_t test_str_size, const char *expected, size_t expected_size) { size_t output_size = 0; ASSERT_SUCCESS( aws_hex_compute_encoded_len(test_str_size - 1, &output_size), "compute hex encoded len failed with error %d", aws_last_error()); ASSERT_INT_EQUALS(expected_size, output_size, "Output size on string should be %d", expected_size); struct aws_byte_cursor to_encode = aws_byte_cursor_from_array(test_str, test_str_size - 1); struct aws_byte_buf allocation; ASSERT_SUCCESS(aws_byte_buf_init(&allocation, allocator, output_size + 2)); memset(allocation.buffer, 0xdd, allocation.capacity); struct aws_byte_buf output = aws_byte_buf_from_empty_array(allocation.buffer + 1, output_size); ASSERT_SUCCESS(aws_hex_encode(&to_encode, &output), "encode call should have succeeded"); ASSERT_BIN_ARRAYS_EQUALS( expected, expected_size, output.buffer, output_size, "Encode output should have been {%s}, was {%s}.", expected, output.buffer); ASSERT_INT_EQUALS(output_size, output.len); ASSERT_INT_EQUALS( (unsigned char)*(allocation.buffer), (unsigned char)0xdd, "Write should not have occurred before the start of the buffer."); ASSERT_INT_EQUALS( (unsigned char)*(allocation.buffer + output_size + 1), (unsigned char)0xdd, "Write should not have occurred after the start of the buffer."); ASSERT_SUCCESS( aws_hex_compute_decoded_len(expected_size - 1, &output_size), "compute hex decoded len failed with error %d", aws_last_error()); memset(allocation.buffer, 0xdd, allocation.capacity); ASSERT_INT_EQUALS(test_str_size - 1, output_size, "Output size on string should be %d", test_str_size - 1); aws_byte_buf_reset(&output, false); struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, expected_size - 1); ASSERT_SUCCESS(aws_hex_decode(&expected_buf, &output), "decode call should have succeeded"); ASSERT_BIN_ARRAYS_EQUALS( test_str, test_str_size - 1, output.buffer, output_size, "Decode output should have been %s.", test_str); ASSERT_INT_EQUALS(output_size, output.len); ASSERT_INT_EQUALS( (unsigned char)*(allocation.buffer), (unsigned char)0xdd, "Write should not have occurred before the start of the buffer."); ASSERT_INT_EQUALS( (unsigned char)*(allocation.buffer + output_size + 1), (unsigned char)0xdd, "Write should not have occurred after the start of the buffer."); aws_byte_buf_clean_up(&allocation); return 0; } static int s_hex_encoding_test_case_empty(struct aws_allocator *allocator, void *ctx) { (void)ctx; char test_data[] = ""; char expected[] = ""; return s_run_hex_encoding_test_case(allocator, test_data, sizeof(test_data), expected, sizeof(expected)); } AWS_TEST_CASE(hex_encoding_test_case_empty_test, s_hex_encoding_test_case_empty) static int s_hex_encoding_test_case_f(struct aws_allocator *allocator, void *ctx) { (void)ctx; char test_data[] = "f"; char expected[] = "66"; return s_run_hex_encoding_test_case(allocator, test_data, sizeof(test_data), expected, sizeof(expected)); } AWS_TEST_CASE(hex_encoding_test_case_f_test, s_hex_encoding_test_case_f) static int s_hex_encoding_test_case_fo(struct aws_allocator *allocator, void *ctx) { (void)ctx; char test_data[] = "fo"; char expected[] = "666f"; return s_run_hex_encoding_test_case(allocator, test_data, sizeof(test_data), expected, sizeof(expected)); } AWS_TEST_CASE(hex_encoding_test_case_fo_test, s_hex_encoding_test_case_fo) static int s_hex_encoding_test_case_foo(struct aws_allocator *allocator, void *ctx) { (void)ctx; char test_data[] = "foo"; char expected[] = "666f6f"; return s_run_hex_encoding_test_case(allocator, test_data, sizeof(test_data), expected, sizeof(expected)); } AWS_TEST_CASE(hex_encoding_test_case_foo_test, s_hex_encoding_test_case_foo) static int s_hex_encoding_test_case_foob(struct aws_allocator *allocator, void *ctx) { (void)ctx; char test_data[] = "foob"; char expected[] = "666f6f62"; return s_run_hex_encoding_test_case(allocator, test_data, sizeof(test_data), expected, sizeof(expected)); } AWS_TEST_CASE(hex_encoding_test_case_foob_test, s_hex_encoding_test_case_foob) static int s_hex_encoding_test_case_fooba(struct aws_allocator *allocator, void *ctx) { (void)ctx; char test_data[] = "fooba"; char expected[] = "666f6f6261"; return s_run_hex_encoding_test_case(allocator, test_data, sizeof(test_data), expected, sizeof(expected)); } AWS_TEST_CASE(hex_encoding_test_case_fooba_test, s_hex_encoding_test_case_fooba) static int s_hex_encoding_test_case_foobar(struct aws_allocator *allocator, void *ctx) { (void)ctx; char test_data[] = "foobar"; char expected[] = "666f6f626172"; return s_run_hex_encoding_test_case(allocator, test_data, sizeof(test_data), expected, sizeof(expected)); } AWS_TEST_CASE(hex_encoding_test_case_foobar_test, s_hex_encoding_test_case_foobar) static int s_hex_encoding_append_test_case(struct aws_allocator *allocator, void *ctx) { (void)ctx; char test_data[] = "foobar"; char expected[] = "666f6f626172"; return s_run_hex_encoding_test_case(allocator, test_data, sizeof(test_data), expected, sizeof(expected) - 1); } AWS_TEST_CASE(hex_encoding_append_test_case, s_hex_encoding_append_test_case) static int s_hex_encoding_test_case_missing_leading_zero_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; uint8_t expected[] = {0x01, 0x02, 0x03, 0x04}; char test_data[] = "1020304"; uint8_t output[sizeof(expected)] = {0}; struct aws_byte_cursor test_buf = aws_byte_cursor_from_c_str(test_data); struct aws_byte_buf output_buf = aws_byte_buf_from_empty_array(output, sizeof(expected)); ASSERT_SUCCESS( aws_hex_decode(&test_buf, &output_buf), "Hex decoding failed with " "error code %d", aws_last_error()); ASSERT_BIN_ARRAYS_EQUALS( expected, sizeof(expected), output, sizeof(output), "Hex decode expected output did not match actual output"); return 0; } AWS_TEST_CASE(hex_encoding_test_case_missing_leading_zero, s_hex_encoding_test_case_missing_leading_zero_fn) static int s_hex_encoding_invalid_buffer_size_test_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; char test_data[] = "foobar"; size_t size_too_small = 2; uint8_t output[] = {0, 0}; struct aws_byte_cursor test_buf = aws_byte_cursor_from_c_str(test_data); struct aws_byte_buf output_buf = aws_byte_buf_from_empty_array(output, size_too_small); ASSERT_ERROR( AWS_ERROR_SHORT_BUFFER, aws_hex_encode(&test_buf, &output_buf), "Invalid buffer size should have failed with AWS_ERROR_SHORT_BUFFER"); ASSERT_ERROR( AWS_ERROR_SHORT_BUFFER, aws_hex_decode(&test_buf, &output_buf), "Invalid buffer size should have failed with AWS_ERROR_SHORT_BUFFER"); return 0; } AWS_TEST_CASE(hex_encoding_invalid_buffer_size_test, s_hex_encoding_invalid_buffer_size_test_fn) static int s_hex_encoding_highbyte_string_test_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; char bad_input[] = "66\xb6\xb6" "6f6f6617"; uint8_t output[sizeof(bad_input)] = {0}; struct aws_byte_cursor bad_buf = aws_byte_cursor_from_c_str(bad_input); struct aws_byte_buf output_buf = aws_byte_buf_from_empty_array(output, sizeof(output)); ASSERT_ERROR(AWS_ERROR_INVALID_HEX_STR, aws_hex_decode(&bad_buf, &output_buf)); return 0; } AWS_TEST_CASE(hex_encoding_highbyte_string_test, s_hex_encoding_highbyte_string_test_fn) static int s_hex_encoding_overflow_test_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; char test_data[] = "foobar"; /* kill off the last two bits, so the not a multiple of 4 check doesn't * trigger first */ size_t overflow = (SIZE_MAX - 1); uint8_t output[] = {0, 0}; struct aws_byte_cursor test_buf = aws_byte_cursor_from_array(test_data, overflow); struct aws_byte_buf output_buf = aws_byte_buf_from_empty_array(output, sizeof(output)); ASSERT_ERROR( AWS_ERROR_OVERFLOW_DETECTED, aws_hex_encode(&test_buf, &output_buf), "overflow buffer size should have failed with AWS_ERROR_OVERFLOW_DETECTED"); return 0; } AWS_TEST_CASE(hex_encoding_overflow_test, s_hex_encoding_overflow_test_fn) static int s_hex_encoding_invalid_string_test_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; char bad_input[] = "666f6f6x6172"; uint8_t output[sizeof(bad_input)] = {0}; struct aws_byte_cursor bad_buf = aws_byte_cursor_from_c_str(bad_input); struct aws_byte_buf output_buf = aws_byte_buf_from_empty_array(output, sizeof(output)); ASSERT_ERROR( AWS_ERROR_INVALID_HEX_STR, aws_hex_decode(&bad_buf, &output_buf), "An invalid string should have failed with AWS_ERROR_INVALID_HEX_STR"); return 0; } AWS_TEST_CASE(hex_encoding_invalid_string_test, s_hex_encoding_invalid_string_test_fn) AWS_STATIC_STRING_FROM_LITERAL(s_base64_encode_prefix, "Prefix"); /*base64 encoding test cases */ static int s_run_base64_encoding_test_case( struct aws_allocator *allocator, const char *test_str, size_t test_str_size, const char *expected, size_t expected_size) { size_t output_size = 0; size_t terminated_size = (expected_size + 1); /* Part 1: encoding */ ASSERT_SUCCESS( aws_base64_compute_encoded_len(test_str_size, &output_size), "Compute base64 encoded length failed with %d", aws_last_error()); ASSERT_INT_EQUALS(terminated_size, output_size, "Output size on string should be %d", terminated_size); struct aws_byte_cursor to_encode = aws_byte_cursor_from_array(test_str, test_str_size); struct aws_byte_buf allocation; ASSERT_SUCCESS(aws_byte_buf_init(&allocation, allocator, output_size + 2)); memset(allocation.buffer, 0xdd, allocation.capacity); struct aws_byte_buf output = aws_byte_buf_from_empty_array(allocation.buffer + 1, output_size); ASSERT_SUCCESS(aws_base64_encode(&to_encode, &output), "encode call should have succeeded"); ASSERT_BIN_ARRAYS_EQUALS( expected, expected_size, output.buffer, output.len, "Encode output should have been {%s}, was {%s}.", expected, output.buffer); ASSERT_INT_EQUALS( (unsigned char)*(allocation.buffer), (unsigned char)0xdd, "Write should not have occurred before the start of the buffer."); ASSERT_INT_EQUALS( (unsigned char)*(allocation.buffer + output_size + 1), (unsigned char)0xdd, "Write should not have occurred after the start of the buffer."); aws_byte_buf_clean_up(&allocation); /* part 2 - encoding properly appends rather than overwrites */ ASSERT_SUCCESS(aws_byte_buf_init(&allocation, allocator, output_size + s_base64_encode_prefix->len)); struct aws_byte_cursor prefix_cursor = aws_byte_cursor_from_string(s_base64_encode_prefix); ASSERT_SUCCESS(aws_byte_buf_append(&allocation, &prefix_cursor)); ASSERT_SUCCESS(aws_base64_encode(&to_encode, &allocation), "encode call should have succeeded"); ASSERT_BIN_ARRAYS_EQUALS( expected, expected_size, allocation.buffer + s_base64_encode_prefix->len, expected_size, "Encode output should have been {%s}, was {%s}.", expected, allocation.buffer + s_base64_encode_prefix->len); struct aws_byte_cursor prefix_output = {.ptr = allocation.buffer, .len = s_base64_encode_prefix->len}; ASSERT_BIN_ARRAYS_EQUALS( s_base64_encode_prefix->bytes, s_base64_encode_prefix->len, allocation.buffer, s_base64_encode_prefix->len, "Encode prefix should have been {%s}, was {" PRInSTR "}.", s_base64_encode_prefix->bytes, AWS_BYTE_CURSOR_PRI(prefix_output)); aws_byte_buf_clean_up(&allocation); /* Part 3: decoding */ struct aws_byte_cursor expected_cur = aws_byte_cursor_from_array(expected, expected_size); ASSERT_SUCCESS( aws_base64_compute_decoded_len(&expected_cur, &output_size), "Compute base64 decoded length failed with %d", aws_last_error()); ASSERT_INT_EQUALS(test_str_size, output_size, "Output size on string should be %d", test_str_size); ASSERT_SUCCESS(aws_byte_buf_init(&allocation, allocator, output_size + 2)); memset(allocation.buffer, 0xdd, allocation.capacity); output = aws_byte_buf_from_empty_array(allocation.buffer + 1, output_size); struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, expected_size); ASSERT_SUCCESS(aws_base64_decode(&expected_buf, &output), "decode call should have succeeded"); ASSERT_BIN_ARRAYS_EQUALS( test_str, test_str_size, output.buffer, output_size, "Decode output should have been {%s} (len=%zu).", test_str, test_str_size); ASSERT_INT_EQUALS( (unsigned char)*(allocation.buffer), (unsigned char)0xdd, "Write should not have occurred before the start of the buffer."); ASSERT_INT_EQUALS( (unsigned char)*(allocation.buffer + output_size + 1), (unsigned char)0xdd, "Write should not have occurred after the start of the buffer."); aws_byte_buf_clean_up(&allocation); return 0; } static int s_base64_encoding_test_case_empty(struct aws_allocator *allocator, void *ctx) { (void)ctx; char test_data[] = ""; char expected[] = ""; return s_run_base64_encoding_test_case(allocator, test_data, sizeof(test_data) - 1, expected, sizeof(expected) - 1); } AWS_TEST_CASE(base64_encoding_test_case_empty_test, s_base64_encoding_test_case_empty) static int s_base64_encoding_test_case_f(struct aws_allocator *allocator, void *ctx) { (void)ctx; char test_data[] = "f"; char expected[] = "Zg=="; return s_run_base64_encoding_test_case(allocator, test_data, sizeof(test_data) - 1, expected, sizeof(expected) - 1); } AWS_TEST_CASE(base64_encoding_test_case_f_test, s_base64_encoding_test_case_f) static int s_base64_encoding_test_case_fo(struct aws_allocator *allocator, void *ctx) { (void)ctx; char test_data[] = "fo"; char expected[] = "Zm8="; return s_run_base64_encoding_test_case(allocator, test_data, sizeof(test_data) - 1, expected, sizeof(expected) - 1); } AWS_TEST_CASE(base64_encoding_test_case_fo_test, s_base64_encoding_test_case_fo) static int s_base64_encoding_test_case_foo(struct aws_allocator *allocator, void *ctx) { (void)ctx; char test_data[] = "foo"; char expected[] = "Zm9v"; return s_run_base64_encoding_test_case(allocator, test_data, sizeof(test_data) - 1, expected, sizeof(expected) - 1); } AWS_TEST_CASE(base64_encoding_test_case_foo_test, s_base64_encoding_test_case_foo) static int s_base64_encoding_test_case_foob(struct aws_allocator *allocator, void *ctx) { (void)ctx; char test_data[] = "foob"; char expected[] = "Zm9vYg=="; return s_run_base64_encoding_test_case(allocator, test_data, sizeof(test_data) - 1, expected, sizeof(expected) - 1); } AWS_TEST_CASE(base64_encoding_test_case_foob_test, s_base64_encoding_test_case_foob) static int s_base64_encoding_test_case_fooba(struct aws_allocator *allocator, void *ctx) { (void)ctx; char test_data[] = "fooba"; char expected[] = "Zm9vYmE="; return s_run_base64_encoding_test_case(allocator, test_data, sizeof(test_data) - 1, expected, sizeof(expected) - 1); } AWS_TEST_CASE(base64_encoding_test_case_fooba_test, s_base64_encoding_test_case_fooba) static int s_base64_encoding_test_case_foobar(struct aws_allocator *allocator, void *ctx) { (void)ctx; char test_data[] = "foobar"; char expected[] = "Zm9vYmFy"; return s_run_base64_encoding_test_case(allocator, test_data, sizeof(test_data) - 1, expected, sizeof(expected) - 1); } AWS_TEST_CASE(base64_encoding_test_case_foobar_test, s_base64_encoding_test_case_foobar) static int s_base64_encoding_test_case_32bytes(struct aws_allocator *allocator, void *ctx) { (void)ctx; /* 01234567890123456789012345678901 */ char test_data[] = "this is a 32 byte long string!!!"; char expected[] = "dGhpcyBpcyBhIDMyIGJ5dGUgbG9uZyBzdHJpbmchISE="; return s_run_base64_encoding_test_case(allocator, test_data, sizeof(test_data) - 1, expected, sizeof(expected) - 1); } AWS_TEST_CASE(base64_encoding_test_case_32bytes_test, s_base64_encoding_test_case_32bytes) static int s_base64_encoding_test_zeros_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint8_t test_data[6] = {0}; char expected[] = "AAAAAAAA"; return s_run_base64_encoding_test_case( allocator, (char *)test_data, sizeof(test_data), expected, sizeof(expected) - 1); } AWS_TEST_CASE(base64_encoding_test_zeros, s_base64_encoding_test_zeros_fn) static int s_base64_encoding_test_roundtrip(struct aws_allocator *allocator, void *ctx) { (void)ctx; (void)allocator; fprintf(stderr, "--test\n"); uint8_t test_data[32]; for (size_t i = 0; i < sizeof(test_data); i++) { /* 0000 0100 0010 0000 1100 0100 */ #if 0 test_data[i] = 0x; test_data[i + 1] = 0x20; test_data[i + 2] = 0xc4; #endif test_data[i] = (uint8_t)i; /* b64 nibbles: 1 2 3 4 (BCDE) */ } struct aws_byte_cursor original_data = aws_byte_cursor_from_array(test_data, sizeof(test_data)); uint8_t test_hex[65] = {0}; struct aws_byte_buf hex = aws_byte_buf_from_empty_array(test_hex, sizeof(test_hex)); uint8_t test_b64[128] = {0}; struct aws_byte_buf b64_data = aws_byte_buf_from_empty_array(test_b64, sizeof(test_b64)); aws_base64_encode(&original_data, &b64_data); b64_data.len--; uint8_t decoded_data[32] = {0}; struct aws_byte_buf decoded_buf = aws_byte_buf_from_empty_array(decoded_data, sizeof(decoded_data)); struct aws_byte_cursor b64_cur = aws_byte_cursor_from_buf(&b64_data); aws_base64_decode(&b64_cur, &decoded_buf); if (memcmp(decoded_buf.buffer, original_data.ptr, decoded_buf.len) != 0) { aws_hex_encode(&original_data, &hex); fprintf(stderr, "Base64 round-trip failed\n"); fprintf(stderr, "Original: %s\n", (char *)test_hex); fprintf(stderr, "Base64 : "); for (size_t i = 0; i < sizeof(test_b64); i++) { if (!test_b64[i]) { break; } fprintf(stderr, " %c", test_b64[i]); } fprintf(stderr, "\n"); memset(test_hex, 0, sizeof(test_hex)); struct aws_byte_cursor decoded_cur = aws_byte_cursor_from_buf(&decoded_buf); aws_hex_encode(&decoded_cur, &hex); fprintf(stderr, "Decoded : %s\n", (char *)test_hex); return 1; } return 0; } AWS_TEST_CASE(base64_encoding_test_roundtrip, s_base64_encoding_test_roundtrip) /* this test is here because I manually touched the decoding table with sentinal * values for efficiency reasons and I want to make sure it matches the encoded * string. This checks that none of those values that were previously 0 which I * moved to a sentinal value of 0xDD, were actually supposed to be a 0 other * than character value of 65 -> "A" -> 0. */ static int s_base64_encoding_test_all_values_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint8_t test_data[255] = {0}; for (uint8_t i = 0; i < (uint8_t)sizeof(test_data); ++i) { test_data[i] = i; } char expected[] = "AAECAwQFBgcICQoLDA0ODxAREhMUFRYXGBkaGxwdHh8gISIjJCUmJygpKissLS4vMDEyMzQ1Njc4OTo7PD0+P0BBQkNERU" "ZHSElKS0xNTk9QUVJTVFVWV1hZWltcXV5fYGFiY2RlZmdoaWprbG1ub3BxcnN0dXZ3eHl6e3x9fn+AgYKDhIWGh4iJiouM" "jY6PkJGSk5SVlpeYmZqbnJ2en6ChoqOkpaanqKmqq6ytrq+wsbKztLW2t7i5uru8vb6/wMHCw8TFxsfIycrLzM3Oz9DR0t" "PU1dbX2Nna29zd3t/g4eLj5OXm5+jp6uvs7e7v8PHy8/T19vf4+fr7/P3+"; return s_run_base64_encoding_test_case( allocator, (char *)test_data, sizeof(test_data), expected, sizeof(expected) - 1); } AWS_TEST_CASE(base64_encoding_test_all_values, s_base64_encoding_test_all_values_fn) static int s_base64_encoding_buffer_size_too_small_test_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; char test_data[] = "foobar"; char encoded_data[] = "Zm9vYmFy"; size_t size_too_small = 4; uint8_t output[] = {0, 0}; struct aws_byte_cursor test_buf = aws_byte_cursor_from_c_str(test_data); struct aws_byte_buf output_buf = aws_byte_buf_from_empty_array(output, size_too_small); ASSERT_ERROR( AWS_ERROR_SHORT_BUFFER, aws_base64_encode(&test_buf, &output_buf), "Invalid buffer size should have failed with AWS_ERROR_SHORT_BUFFER"); struct aws_byte_cursor encoded_buf = aws_byte_cursor_from_c_str(encoded_data); ASSERT_ERROR( AWS_ERROR_SHORT_BUFFER, aws_base64_decode(&encoded_buf, &output_buf), "Invalid buffer size should have failed with AWS_ERROR_SHORT_BUFFER"); return 0; } AWS_TEST_CASE(base64_encoding_buffer_size_too_small_test, s_base64_encoding_buffer_size_too_small_test_fn) static int s_base64_encoding_buffer_size_overflow_test_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; char test_data[] = "foobar"; char encoded_data[] = "Zm9vYmFy"; /* kill off the last two bits, so the not a multiple of 4 check doesn't * trigger first */ size_t overflow = (SIZE_MAX - 1) & ~0x03; uint8_t output[] = {0, 0}; struct aws_byte_cursor test_buf = aws_byte_cursor_from_array(test_data, overflow + 2); struct aws_byte_buf output_buf = aws_byte_buf_from_empty_array(output, sizeof(output)); ASSERT_ERROR( AWS_ERROR_OVERFLOW_DETECTED, aws_base64_encode(&test_buf, &output_buf), "overflow buffer size should have failed with AWS_ERROR_OVERFLOW_DETECTED"); struct aws_byte_cursor encoded_buf = aws_byte_cursor_from_array(encoded_data, overflow); ASSERT_ERROR( AWS_ERROR_OVERFLOW_DETECTED, aws_base64_decode(&encoded_buf, &output_buf), "overflow buffer size should have failed with AWS_ERROR_OVERFLOW_DETECTED"); return 0; } AWS_TEST_CASE(base64_encoding_buffer_size_overflow_test, s_base64_encoding_buffer_size_overflow_test_fn) static int s_base64_encoding_buffer_size_invalid_test_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; char encoded_data[] = "Zm9vYmFy"; /* kill off the last two bits, so the not a multiple of 4 check doesn't * trigger first */ uint8_t output[] = {0, 0}; struct aws_byte_cursor encoded_buf = aws_byte_cursor_from_array(encoded_data, sizeof(encoded_data)); struct aws_byte_buf output_buf = aws_byte_buf_from_empty_array(output, sizeof(output)); ASSERT_ERROR( AWS_ERROR_INVALID_BASE64_STR, aws_base64_decode(&encoded_buf, &output_buf), "Non multiple of 4 buffer size should have failed with AWS_ERROR_INVALID_BASE64_STR"); return 0; } AWS_TEST_CASE(base64_encoding_buffer_size_invalid_test, s_base64_encoding_buffer_size_invalid_test_fn) static int s_base64_encoding_invalid_buffer_test_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; char encoded_data[] = "Z\n9vYmFy"; uint8_t output[sizeof(encoded_data)] = {0}; struct aws_byte_cursor encoded_buf = aws_byte_cursor_from_c_str(encoded_data); struct aws_byte_buf output_buf = aws_byte_buf_from_empty_array(output, sizeof(output)); ASSERT_ERROR( AWS_ERROR_INVALID_BASE64_STR, aws_base64_decode(&encoded_buf, &output_buf), "buffer with invalid character should have failed with AWS_ERROR_INVALID_BASE64_STR"); return 0; } AWS_TEST_CASE(base64_encoding_invalid_buffer_test, s_base64_encoding_invalid_buffer_test_fn) static int s_base64_encoding_highbyte_string_test_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; char bad_input[] = "AAAA\xC1" "AAA"; uint8_t output[sizeof(bad_input)] = {0}; struct aws_byte_cursor bad_buf = aws_byte_cursor_from_c_str(bad_input); struct aws_byte_buf output_buf = aws_byte_buf_from_empty_array(output, sizeof(output)); ASSERT_ERROR(AWS_ERROR_INVALID_BASE64_STR, aws_base64_decode(&bad_buf, &output_buf)); return 0; } AWS_TEST_CASE(base64_encoding_highbyte_string_test, s_base64_encoding_highbyte_string_test_fn) static int s_base64_encoding_invalid_padding_test_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; char encoded_data[] = "Zm9vY==="; uint8_t output[sizeof(encoded_data)] = {0}; struct aws_byte_cursor encoded_buf = aws_byte_cursor_from_c_str(encoded_data); struct aws_byte_buf output_buf = aws_byte_buf_from_empty_array(output, sizeof(output)); ASSERT_ERROR( AWS_ERROR_INVALID_BASE64_STR, aws_base64_decode(&encoded_buf, &output_buf), "buffer with invalid padding should have failed with AWS_ERROR_INVALID_BASE64_STR"); return 0; } AWS_TEST_CASE(base64_encoding_invalid_padding_test, s_base64_encoding_invalid_padding_test_fn) /* network integer encoding tests */ static int s_uint64_buffer_test_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; uint64_t test_value = 0x1020304050607080; uint8_t buffer[8] = {0}; aws_write_u64(test_value, buffer); uint8_t expected[] = {0x10, 0x20, 0x30, 0x40, 0x50, 0x60, 0x70, 0x80}; ASSERT_BIN_ARRAYS_EQUALS(expected, sizeof(expected), buffer, sizeof(buffer), "Uint64_t to buffer failed"); uint64_t unmarshalled_value = aws_read_u64(buffer); ASSERT_INT_EQUALS(test_value, unmarshalled_value, "After unmarshalling the encoded data, it didn't match"); return 0; } AWS_TEST_CASE(uint64_buffer_test, s_uint64_buffer_test_fn) static int s_uint64_buffer_non_aligned_test_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; uint64_t test_value = 0x1020304050607080; uint8_t *buffer = (uint8_t *)aws_mem_acquire(allocator, 9); ASSERT_FALSE((size_t)buffer & 0x07, "Heap allocated buffer should have been 8-byte aligned."); aws_write_u64(test_value, buffer + 1); uint8_t expected[] = {0x10, 0x20, 0x30, 0x40, 0x50, 0x60, 0x70, 0x80}; ASSERT_BIN_ARRAYS_EQUALS(expected, sizeof(expected), (buffer + 1), sizeof(expected), "Uint64_t to buffer failed"); uint64_t unmarshalled_value = aws_read_u64(buffer + 1); ASSERT_INT_EQUALS(test_value, unmarshalled_value, "After unmarshalling the encoded data, it didn't match"); aws_mem_release(allocator, (void *)buffer); return 0; } AWS_TEST_CASE(uint64_buffer_non_aligned_test, s_uint64_buffer_non_aligned_test_fn) static int s_uint32_buffer_test_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; uint32_t test_value = 0x10203040; uint8_t buffer[4] = {0}; aws_write_u32(test_value, buffer); uint8_t expected[] = {0x10, 0x20, 0x30, 0x40}; ASSERT_BIN_ARRAYS_EQUALS(expected, sizeof(expected), buffer, sizeof(buffer), "Uint32_t to buffer failed"); uint32_t unmarshalled_value = aws_read_u32(buffer); ASSERT_INT_EQUALS(test_value, unmarshalled_value, "After unmarshalling the encoded data, it didn't match"); return 0; } AWS_TEST_CASE(uint32_buffer_test, s_uint32_buffer_test_fn) static int s_uint32_buffer_non_aligned_test_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint32_t test_value = 0x10203040; uint8_t *buffer = (uint8_t *)aws_mem_acquire(allocator, 9); ASSERT_FALSE((size_t)buffer & 0x07, "Heap allocated buffer should have been 8-byte aligned."); aws_write_u32(test_value, buffer + 5); uint8_t expected[] = {0x10, 0x20, 0x30, 0x40}; ASSERT_BIN_ARRAYS_EQUALS(expected, sizeof(expected), (buffer + 5), sizeof(expected), "Uint32_t to buffer failed"); uint64_t unmarshalled_value = aws_read_u32(buffer + 5); ASSERT_INT_EQUALS(test_value, unmarshalled_value, "After unmarshalling the encoded data, it didn't match"); aws_mem_release(allocator, (void *)buffer); return 0; } AWS_TEST_CASE(uint32_buffer_non_aligned_test, s_uint32_buffer_non_aligned_test_fn) static int s_uint24_buffer_test_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; uint32_t test_value = 0x102030; uint8_t buffer[3] = {0}; aws_write_u24(test_value, buffer); uint8_t expected[] = {0x10, 0x20, 0x30}; ASSERT_BIN_ARRAYS_EQUALS(expected, sizeof(expected), buffer, sizeof(buffer), "24 bit int to buffer failed"); uint32_t unmarshalled_value = aws_read_u24(buffer); ASSERT_INT_EQUALS(test_value, unmarshalled_value, "After unmarshalling the encoded data, it didn't match"); return 0; } AWS_TEST_CASE(uint24_buffer_test, s_uint24_buffer_test_fn) static int s_uint24_buffer_non_aligned_test_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint32_t test_value = 0x102030; uint8_t *buffer = (uint8_t *)aws_mem_acquire(allocator, 9); ASSERT_FALSE((size_t)buffer & 0x07, "Heap allocated buffer should have been 8-byte aligned."); aws_write_u24(test_value, buffer + 6); uint8_t expected[] = {0x10, 0x20, 0x30}; ASSERT_BIN_ARRAYS_EQUALS(expected, sizeof(expected), (buffer + 6), sizeof(expected), "24 bit int to buffer failed"); uint32_t unmarshalled_value = aws_read_u24(buffer + 6); ASSERT_INT_EQUALS(test_value, unmarshalled_value, "After unmarshalling the encoded data, it didn't match"); aws_mem_release(allocator, (void *)buffer); return 0; } AWS_TEST_CASE(uint24_buffer_non_aligned_test, s_uint24_buffer_non_aligned_test_fn) static int s_uint16_buffer_test_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; uint16_t test_value = 0x1020; uint8_t buffer[2] = {0}; aws_write_u16(test_value, buffer); uint8_t expected[] = {0x10, 0x20}; ASSERT_BIN_ARRAYS_EQUALS(expected, sizeof(expected), buffer, sizeof(buffer), "Uint16_t to buffer failed"); uint16_t unmarshalled_value = aws_read_u16(buffer); ASSERT_INT_EQUALS(test_value, unmarshalled_value, "After unmarshalling the encoded data, it didn't match"); return 0; } AWS_TEST_CASE(uint16_buffer_test, s_uint16_buffer_test_fn) static int s_uint16_buffer_non_aligned_test_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint16_t test_value = 0x1020; uint8_t *buffer = (uint8_t *)aws_mem_acquire(allocator, 9); ASSERT_FALSE((size_t)buffer & 0x07, "Heap allocated buffer should have been 8-byte aligned."); aws_write_u16(test_value, buffer + 7); uint8_t expected[] = {0x10, 0x20}; ASSERT_BIN_ARRAYS_EQUALS(expected, sizeof(expected), (buffer + 7), sizeof(expected), "16 bit int to buffer failed"); uint16_t unmarshalled_value = aws_read_u16(buffer + 7); ASSERT_INT_EQUALS(test_value, unmarshalled_value, "After unmarshalling the encoded data, it didn't match"); aws_mem_release(allocator, (void *)buffer); return 0; } AWS_TEST_CASE(uint16_buffer_non_aligned_test, s_uint16_buffer_non_aligned_test_fn) /* sanity check that signed/unsigned work the same */ static int s_uint16_buffer_signed_positive_test_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; int16_t test_value = 0x4030; uint8_t buffer[2] = {0}; aws_write_u16((uint16_t)test_value, buffer); uint8_t expected[] = {0x40, 0x30}; ASSERT_BIN_ARRAYS_EQUALS(expected, sizeof(expected), buffer, sizeof(buffer), "Uint16_t to buffer failed"); int16_t unmarshalled_value = (int16_t)aws_read_u16(buffer); ASSERT_INT_EQUALS(test_value, unmarshalled_value, "After unmarshalling the encoded data, it didn't match"); return 0; } AWS_TEST_CASE(uint16_buffer_signed_positive_test, s_uint16_buffer_signed_positive_test_fn) static int s_uint16_buffer_signed_negative_test_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; int16_t test_value = -2; uint8_t buffer[2] = {0}; aws_write_u16((uint16_t)test_value, buffer); uint8_t expected[] = {0xFF, 0xFE}; ASSERT_BIN_ARRAYS_EQUALS(expected, sizeof(expected), buffer, sizeof(buffer), "Uint16_t to buffer failed"); int16_t unmarshalled_value = (int16_t)aws_read_u16(buffer); ASSERT_INT_EQUALS(test_value, unmarshalled_value, "After unmarshalling the encoded data, it didn't match"); return 0; } AWS_TEST_CASE(uint16_buffer_signed_negative_test, s_uint16_buffer_signed_negative_test_fn) static int s_run_hex_encoding_append_dynamic_test_case( struct aws_allocator *allocator, const char *test_str, const char *expected, size_t initial_capacity, size_t starting_offset) { size_t output_size = 2 * strlen(test_str); struct aws_byte_cursor to_encode = aws_byte_cursor_from_c_str(test_str); struct aws_byte_buf dest; ASSERT_SUCCESS(aws_byte_buf_init(&dest, allocator, initial_capacity)); memset(dest.buffer, 0xdd, dest.capacity); dest.len = starting_offset; ASSERT_SUCCESS(aws_hex_encode_append_dynamic(&to_encode, &dest), "encode call should have succeeded"); size_t expected_size = strlen(expected); ASSERT_BIN_ARRAYS_EQUALS( expected, expected_size, dest.buffer + starting_offset, output_size, "Encode output should have been {%s}, was {%s}.", expected, dest.buffer + starting_offset); ASSERT_INT_EQUALS(output_size, dest.len - starting_offset); for (size_t i = 0; i < starting_offset; ++i) { ASSERT_INT_EQUALS( (unsigned char)*(dest.buffer + i), (unsigned char)0xdd, "Write should not have occurred before the the encoding's starting position."); } for (size_t i = starting_offset + output_size; i < dest.capacity; ++i) { ASSERT_INT_EQUALS( (unsigned char)*(dest.buffer + i), (unsigned char)0xdd, "Write should not have occurred after the encoding's final position."); } aws_byte_buf_clean_up(&dest); return 0; } static int s_hex_encoding_append_dynamic_test_case_fooba(struct aws_allocator *allocator, void *ctx) { (void)ctx; char test_data[] = "fooba"; char expected[] = "666f6f6261"; ASSERT_TRUE(s_run_hex_encoding_append_dynamic_test_case(allocator, test_data, expected, 5, 3) == AWS_OP_SUCCESS); ASSERT_TRUE(s_run_hex_encoding_append_dynamic_test_case(allocator, test_data, expected, 50, 3) == AWS_OP_SUCCESS); return 0; } AWS_TEST_CASE(hex_encoding_append_dynamic_test_case_fooba, s_hex_encoding_append_dynamic_test_case_fooba) static int s_hex_encoding_append_dynamic_test_case_empty(struct aws_allocator *allocator, void *ctx) { (void)ctx; char test_data[] = ""; char expected[] = ""; ASSERT_TRUE(s_run_hex_encoding_append_dynamic_test_case(allocator, test_data, expected, 5, 3) == AWS_OP_SUCCESS); ASSERT_TRUE(s_run_hex_encoding_append_dynamic_test_case(allocator, test_data, expected, 50, 3) == AWS_OP_SUCCESS); return 0; } AWS_TEST_CASE(hex_encoding_append_dynamic_test_case_empty, s_hex_encoding_append_dynamic_test_case_empty) static int read_file_contents(struct aws_byte_buf *out_buf, struct aws_allocator *alloc, const char *filename) { AWS_ZERO_STRUCT(*out_buf); FILE *fp = aws_fopen(filename, "r"); ASSERT_NOT_NULL(fp); if (fp) { if (fseek(fp, 0L, SEEK_END)) { fclose(fp); ASSERT_FALSE(true, "Failed to seek to end"); return AWS_OP_ERR; } size_t allocation_size = (size_t)ftell(fp) + 1; /* Tell the user that we allocate here and if success they're responsible for the free. */ if (aws_byte_buf_init(out_buf, alloc, allocation_size)) { fclose(fp); ASSERT_FALSE(true, "Failed to init buffer"); return AWS_OP_ERR; } /* Ensure compatibility with null-terminated APIs, but don't consider * the null terminator part of the length of the payload */ out_buf->len = out_buf->capacity - 1; out_buf->buffer[out_buf->len] = 0; if (fseek(fp, 0L, SEEK_SET)) { aws_byte_buf_clean_up(out_buf); fclose(fp); ASSERT_FALSE(true, "Failed to seek to start"); return AWS_OP_ERR; } size_t read = fread(out_buf->buffer, 1, out_buf->len, fp); fclose(fp); if (read < (out_buf->len - 1)) { ASSERT_INT_EQUALS(read, out_buf->len); aws_byte_buf_clean_up(out_buf); return AWS_OP_ERR; } out_buf->len = read; return AWS_OP_SUCCESS; } return AWS_OP_ERR; } static int s_text_encoding_utf8(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_buf contents; ASSERT_SUCCESS(read_file_contents(&contents, allocator, "utf8.txt")); ASSERT_INT_EQUALS(AWS_TEXT_UTF8, aws_text_detect_encoding(contents.buffer, contents.len)); aws_byte_buf_clean_up(&contents); return 0; } AWS_TEST_CASE(text_encoding_utf8, s_text_encoding_utf8) static int s_text_encoding_utf16(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_buf contents; ASSERT_SUCCESS(read_file_contents(&contents, allocator, "utf16le.txt")); ASSERT_INT_EQUALS(AWS_TEXT_UTF16, aws_text_detect_encoding(contents.buffer, contents.len)); aws_byte_buf_clean_up(&contents); ASSERT_SUCCESS(read_file_contents(&contents, allocator, "utf16be.txt")); ASSERT_INT_EQUALS(AWS_TEXT_UTF16, aws_text_detect_encoding(contents.buffer, contents.len)); aws_byte_buf_clean_up(&contents); return 0; } AWS_TEST_CASE(text_encoding_utf16, s_text_encoding_utf16) static int s_text_encoding_ascii(struct aws_allocator *allocator, void *ctx) { (void)ctx; char all_ascii_chars[128]; for (char c = 0; c < AWS_ARRAY_SIZE(all_ascii_chars); ++c) { all_ascii_chars[(int)c] = (c + 1) % 128; } ASSERT_INT_EQUALS( AWS_TEXT_ASCII, aws_text_detect_encoding((const uint8_t *)all_ascii_chars, AWS_ARRAY_SIZE(all_ascii_chars))); struct aws_byte_buf contents; ASSERT_SUCCESS(read_file_contents(&contents, allocator, "ascii.txt")); ASSERT_INT_EQUALS(AWS_TEXT_ASCII, aws_text_detect_encoding(contents.buffer, contents.len)); aws_byte_buf_clean_up(&contents); return 0; } AWS_TEST_CASE(text_encoding_ascii, s_text_encoding_ascii) static int s_text_encoding_is_utf8(struct aws_allocator *allocator, void *ctx) { (void)ctx; { struct aws_byte_buf contents; ASSERT_SUCCESS(read_file_contents(&contents, allocator, "utf8.txt")); ASSERT_TRUE(aws_text_is_utf8(contents.buffer, contents.len)); aws_byte_buf_clean_up(&contents); } { struct aws_byte_buf contents; ASSERT_SUCCESS(read_file_contents(&contents, allocator, "ascii.txt")); ASSERT_TRUE(aws_text_is_utf8(contents.buffer, contents.len)); aws_byte_buf_clean_up(&contents); } { struct aws_byte_buf contents; ASSERT_SUCCESS(read_file_contents(&contents, allocator, "utf16be.txt")); ASSERT_FALSE(aws_text_is_utf8(contents.buffer, contents.len)); aws_byte_buf_clean_up(&contents); } { struct aws_byte_buf contents; ASSERT_SUCCESS(read_file_contents(&contents, allocator, "utf16le.txt")); ASSERT_FALSE(aws_text_is_utf8(contents.buffer, contents.len)); aws_byte_buf_clean_up(&contents); } return 0; } AWS_TEST_CASE(text_encoding_is_utf8, s_text_encoding_is_utf8) struct utf8_example { const char *name; struct aws_byte_cursor text; }; static struct utf8_example s_valid_utf8_examples[] = { { .name = "1 letter", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("a"), }, { .name = "Several ascii letters", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("ascii word"), }, { .name = "empty string", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(""), }, { .name = "Embedded null byte", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("a\x00b"), }, { .name = "2 byte codepoint", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\xC2\xA3"), }, { .name = "3 byte codepoint", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\xE2\x82\xAC"), }, { .name = "4 byte codepoint", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\xF0\x90\x8D\x88"), }, { .name = "A variety of different length codepoints", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL( "\xF0\x90\x8D\x88\xE2\x82\xAC\xC2\xA3\x24\xC2\xA3\xE2\x82\xAC\xF0\x90\x8D\x88"), }, { .name = "UTF8 BOM", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\xEF\xBB\xBF"), }, { .name = "UTF8 BOM plus extra", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\xEF\xBB\xBF\x24\xC2\xA3"), }, { .name = "First possible 1 byte codepoint", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\x00"), }, { .name = "First possible 2 byte codepoint", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\xC2\x80"), }, { .name = "First possible 3 byte codepoint", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\xE0\xA0\x80"), }, { .name = "First possible 4 byte codepoint", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\xF0\x90\x80\x80"), }, { .name = "Last possible 1 byte codepoint", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\x7F"), }, { .name = "Last possible 2 byte codepoint", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\xDF\xBF"), }, { .name = "Last possible 3 byte codepoint", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\xEF\xBF\xBF"), }, { .name = "Last possible 4 byte codepoint", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\xF7\xBF\xBF\xBF"), }, { .name = "Last valid codepoint before prohibited range U+D800 - U+DFFF", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\xED\x9F\xBF"), }, { .name = "Next valid codepoint after prohibited range U+D800 - U+DFFF", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\xEE\x80\x80"), }, { .name = "Boundary condition", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\xEF\xBF\xBD"), }, { .name = "Boundary condition", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\xF4\x8F\xBF\xBF"), }, { .name = "Boundary condition", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\xF4\x90\x80\x80"), }, }; static struct utf8_example s_illegal_utf8_examples[] = { { .name = "Missing last byte of 2 byte codepoint", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\xC2"), }, { .name = "Missing last byte of 3 byte codepoint", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\xE2\x82"), }, { .name = "Missing last byte of 4 byte codepoint", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\xF0\x90\x8D"), }, { .name = "5 byte codepoints not allowed by RFC-3629", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\xF8\x88\x80\x80\x80"), }, { .name = "6 byte codepoints not allowed by RFC-3629", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\xFC\x84\x80\x80\x80\x80"), }, { .name = "Illegal first byte", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\xFF"), }, { .name = "Overlong 2 byte encoding of U+00", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\xC0\x80"), }, { .name = "Overlong 3 byte encoding of U+00", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\xE0\x80\x80"), }, { .name = "Overlong 4 byte encoding of U+00", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\xF0\x80\x80\x80"), }, { .name = "Continuation byte as first byte (lowest possible value)", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\x80"), }, { .name = "Continuation byte as first byte (highest possible value)", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\xBF"), }, { .name = "Unexpected continuation byte after valid codepoint", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\x61\x80"), }, { .name = "Illegal value for continuation byte (starts 11xxxxxx)", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\xC2\xC0"), }, { .name = "Illegal value for continuation byte (starts 00xxxxxx)", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\xC2\x3F"), }, { .name = "Codepoint in prohibited range U+D800 - U+DFFF (lowest possible value)", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\xED\xA0\x80"), }, { .name = "Codepoint in prohibited range U+D800 - U+DFFF (highest possible value)", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\xED\xBF\xBF"), }, { .name = "Codepoint in prohibited range U+D800 - U+DFFF (in the middle)", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\xED\xA3\xBF"), }, }; static int s_utf8_validation_callback_always_fails(const uint32_t codepoint, void *user_data) { (void)codepoint; (void)user_data; return aws_raise_error(AWS_ERROR_INVALID_UTF8); } static int s_utf8_validation_callback_always_passes(const uint32_t codepoint, void *user_data) { (void)codepoint; (void)user_data; return AWS_OP_SUCCESS; } static struct utf8_example s_valid_utf8_examples_for_callback[] = { { .name = "one byte", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\x01"), }, { .name = "empty string", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(""), }, { .name = "Several valid bytes", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\x01\x02\x02\x01\x01"), }}; static int s_utf8_validation_callback(const uint32_t codepoint, void *user_data) { (void)user_data; return (codepoint >= 0x01 && codepoint <= 0x02) ? AWS_ERROR_SUCCESS : aws_raise_error(AWS_ERROR_INVALID_UTF8); } static int s_text_is_valid_utf8(struct aws_allocator *allocator, void *ctx) { (void)ctx; /* Check the valid test cases */ for (size_t i = 0; i < AWS_ARRAY_SIZE(s_valid_utf8_examples); ++i) { struct utf8_example example = s_valid_utf8_examples[i]; printf("valid example [%zu]: %s\n", i, example.name); ASSERT_SUCCESS(aws_decode_utf8(example.text, NULL /*options*/)); } /* Glue all the valid test cases together, they ought to pass */ struct aws_byte_buf all_good_text; aws_byte_buf_init(&all_good_text, allocator, 1024); for (size_t i = 0; i < AWS_ARRAY_SIZE(s_valid_utf8_examples); ++i) { aws_byte_buf_append_dynamic(&all_good_text, &s_valid_utf8_examples[i].text); } ASSERT_SUCCESS(aws_decode_utf8(aws_byte_cursor_from_buf(&all_good_text), NULL /*options*/)); aws_byte_buf_clean_up(&all_good_text); /* Check the illegal test cases */ for (size_t i = 0; i < AWS_ARRAY_SIZE(s_illegal_utf8_examples); ++i) { struct utf8_example example = s_illegal_utf8_examples[i]; printf("illegal example [%zu]: %s\n", i, example.name); ASSERT_FAILS(aws_decode_utf8(example.text, NULL /*options*/)); } return 0; } AWS_TEST_CASE(text_is_valid_utf8, s_text_is_valid_utf8); static int s_text_is_valid_utf8_callback(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_utf8_decoder_options with_validation_callback = { .on_codepoint = s_utf8_validation_callback, }; struct aws_utf8_decoder_options with_validation_callback_always_fails = { .on_codepoint = s_utf8_validation_callback_always_fails, }; struct aws_utf8_decoder_options with_validation_callback_always_passes = { .on_codepoint = s_utf8_validation_callback_always_passes, }; /* s_valid_utf8_examples_for_callback which would pass the validation callback */ for (size_t i = 0; i < AWS_ARRAY_SIZE(s_valid_utf8_examples_for_callback); ++i) { struct utf8_example example = s_valid_utf8_examples_for_callback[i]; printf("valid example [%zu]: %s\n", i, example.name); ASSERT_SUCCESS(aws_decode_utf8(example.text, &with_validation_callback)); } /* s_valid_utf8_examples which would failed by the callback */ for (size_t i = 0; i < AWS_ARRAY_SIZE(s_valid_utf8_examples); ++i) { struct utf8_example example = s_valid_utf8_examples[i]; if (example.text.len == 0) { /* The validation will always be true for empty string */ printf("empty string would be always valid[%zu]: %s\n", i, example.name); ASSERT_SUCCESS(aws_decode_utf8(example.text, &with_validation_callback)); } else { printf("valid example should still failed by the callback[%zu]: %s\n", i, example.name); ASSERT_FAILS(aws_decode_utf8(example.text, &with_validation_callback)); } } /* The callback should failed the valid test cases */ for (size_t i = 0; i < AWS_ARRAY_SIZE(s_valid_utf8_examples); ++i) { struct utf8_example example = s_valid_utf8_examples[i]; if (example.text.len == 0) { /* The validation will always be true for empty string */ printf("empty string would be always valid[%zu]: %s\n", i, example.name); ASSERT_SUCCESS(aws_decode_utf8(example.text, &with_validation_callback)); } else { printf("The callback should fail the valid example [%zu]: %s\n", i, example.name); ASSERT_FAILS(aws_decode_utf8(example.text, &with_validation_callback_always_fails)); } } /* Glue all the valid test cases together, they ought to failed by the always false callback */ struct aws_byte_buf all_good_text; aws_byte_buf_init(&all_good_text, allocator, 1024); for (size_t i = 0; i < AWS_ARRAY_SIZE(s_valid_utf8_examples); ++i) { aws_byte_buf_append_dynamic(&all_good_text, &s_valid_utf8_examples[i].text); } ASSERT_FAILS(aws_decode_utf8(aws_byte_cursor_from_buf(&all_good_text), &with_validation_callback_always_fails)); aws_byte_buf_clean_up(&all_good_text); /* Check the illegal test cases with always true callbck, it should still fail*/ for (size_t i = 0; i < AWS_ARRAY_SIZE(s_illegal_utf8_examples); ++i) { struct utf8_example example = s_illegal_utf8_examples[i]; printf("illegal example [%zu]: %s\n", i, example.name); ASSERT_FAILS(aws_decode_utf8(example.text, &with_validation_callback_always_passes)); } return 0; } AWS_TEST_CASE(text_is_valid_utf8_callback, s_text_is_valid_utf8_callback); static int s_utf8_decoder_update_in_chunks( struct aws_utf8_decoder *decoder, struct aws_byte_cursor text, size_t chunk_size) { while (text.len > 0) { struct aws_byte_cursor chunk = aws_byte_cursor_advance(&text, aws_min_size(chunk_size, text.len)); if (aws_utf8_decoder_update(decoder, chunk)) { return AWS_OP_ERR; } } return AWS_OP_SUCCESS; } static int s_utf8_decoder(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_utf8_decoder *decoder = aws_utf8_decoder_new(allocator, NULL /*options*/); ASSERT_NOT_NULL(decoder); /* Check valid examples, streaming the text in at various sized chunks*/ for (size_t i = 0; i < AWS_ARRAY_SIZE(s_valid_utf8_examples); ++i) { struct utf8_example example = s_valid_utf8_examples[i]; printf("valid example [%zu]: %s\n", i, example.name); aws_utf8_decoder_reset(decoder); for (size_t chunk_size = 1; chunk_size <= example.text.len; ++chunk_size) { printf(" processing %zu byte chunks\n", chunk_size); ASSERT_SUCCESS(s_utf8_decoder_update_in_chunks(decoder, example.text, chunk_size)); } ASSERT_SUCCESS(aws_utf8_decoder_finalize(decoder)); } /* Check illegal examples, streaming the text in at various sized chunks*/ for (size_t i = 0; i < AWS_ARRAY_SIZE(s_illegal_utf8_examples); ++i) { struct utf8_example example = s_illegal_utf8_examples[i]; printf("illegal example [%zu]: %s\n", i, example.name); aws_utf8_decoder_reset(decoder); bool decoder_error = false; for (size_t chunk_size = 1; chunk_size <= example.text.len; ++chunk_size) { printf(" processing %zu byte chunks\n", chunk_size); if (s_utf8_decoder_update_in_chunks(decoder, example.text, chunk_size)) { ASSERT_INT_EQUALS(AWS_ERROR_INVALID_UTF8, aws_last_error()); decoder_error = true; break; } } if (!decoder_error) { ASSERT_ERROR(AWS_ERROR_INVALID_UTF8, aws_utf8_decoder_finalize(decoder)); } } aws_utf8_decoder_destroy(decoder); return 0; } AWS_TEST_CASE(utf8_decoder, s_utf8_decoder); aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/environment_test.c000066400000000000000000000025211456575232400254310ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include AWS_STATIC_STRING_FROM_LITERAL(s_test_variable, "AWS_TEST_VAR"); AWS_STATIC_STRING_FROM_LITERAL(s_test_value, "SOME_VALUE"); static int s_test_environment_functions_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_string *value; int result = aws_get_environment_value(allocator, s_test_variable, &value); ASSERT_TRUE(result == AWS_OP_SUCCESS); ASSERT_TRUE(value == NULL); result = aws_set_environment_value(s_test_variable, (struct aws_string *)s_test_value); ASSERT_TRUE(result == AWS_OP_SUCCESS); result = aws_get_environment_value(allocator, s_test_variable, &value); ASSERT_TRUE(result == AWS_OP_SUCCESS); ASSERT_TRUE(aws_string_compare(value, s_test_value) == 0); aws_string_destroy(value); result = aws_unset_environment_value(s_test_variable); ASSERT_TRUE(result == AWS_OP_SUCCESS); result = aws_get_environment_value(allocator, s_test_variable, &value); ASSERT_TRUE(result == AWS_OP_SUCCESS); ASSERT_TRUE(value == NULL); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_environment_functions, s_test_environment_functions_fn) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/error_test.c000066400000000000000000000425431456575232400242260ustar00rootroot00000000000000/* * Copyright 2010-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include #include #include static struct aws_error_info s_errors[] = { AWS_DEFINE_ERROR_INFO(1024, "test error 1", "test lib"), AWS_DEFINE_ERROR_INFO(1025, "test error 2", "test lib"), }; static struct aws_error_info_list s_errors_list = { .error_list = s_errors, .count = AWS_ARRAY_SIZE(s_errors), }; static int s_setup_errors_test_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; aws_reset_error(); aws_set_global_error_handler_fn(NULL, NULL); aws_set_thread_local_error_handler_fn(NULL, NULL); aws_register_error_info(&s_errors_list); return AWS_OP_SUCCESS; } static int s_teardown_errors_test_fn(struct aws_allocator *allocator, int setup_res, void *ctx) { (void)allocator; (void)setup_res; (void)ctx; aws_reset_error(); aws_set_global_error_handler_fn(NULL, NULL); aws_set_thread_local_error_handler_fn(NULL, NULL); return AWS_OP_SUCCESS; } static int s_raise_errors_test_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; int error = aws_last_error(); ASSERT_NULL(error, "error should be initialized to NULL"); ASSERT_INT_EQUALS(0, aws_last_error(), "error code should be initialized to 0"); struct aws_error_info test_error_1 = s_errors[0]; struct aws_error_info test_error_2 = s_errors[1]; ASSERT_INT_EQUALS(-1, aws_raise_error(test_error_1.error_code), "Raise error should return failure code."); error = aws_last_error(); ASSERT_INT_EQUALS( test_error_1.error_code, error, "Expected error code %d, but was %d", test_error_1.error_code, error); ASSERT_STR_EQUALS( test_error_1.error_str, aws_error_str(error), "Expected error string %s, but got %s", test_error_1.error_str, aws_error_str(error)); ASSERT_STR_EQUALS( test_error_1.lib_name, aws_error_lib_name(error), "Expected error libname %s, but got %s", test_error_1.lib_name, aws_error_lib_name(error)); ASSERT_INT_EQUALS(-1, aws_raise_error(test_error_2.error_code), "Raise error should return failure code."); error = aws_last_error(); ASSERT_INT_EQUALS( test_error_2.error_code, error, "Expected error code %d, but was %d", test_error_2.error_code, error); error = aws_last_error(); ASSERT_NOT_NULL(error, "last error should not have been null"); ASSERT_STR_EQUALS( test_error_2.error_str, aws_error_str(error), "Expected error string %s, but got %s", test_error_2.error_str, aws_error_str(error)); ASSERT_STR_EQUALS( test_error_2.lib_name, aws_error_lib_name(error), "Expected error libname %s, but got %s", test_error_2.lib_name, aws_error_lib_name(error)); aws_reset_error(); error = aws_last_error(); ASSERT_NULL(error, "error should be reset to NULL"); ASSERT_INT_EQUALS(0, aws_last_error(), "error code should be reset to 0"); return 0; } static int s_reset_errors_test_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_error_info test_error_1 = s_errors[0]; struct aws_error_info test_error_2 = s_errors[1]; aws_raise_error(test_error_2.error_code); aws_restore_error(test_error_1.error_code); int error = aws_last_error(); ASSERT_NOT_NULL(error, "last error should not have been null"); ASSERT_INT_EQUALS( test_error_1.error_code, error, "Expected error code %d, but was %d", test_error_1.error_code, error); ASSERT_STR_EQUALS( test_error_1.error_str, aws_error_str(error), "Expected error string %s, but got %s", test_error_1.error_str, aws_error_str(error)); ASSERT_STR_EQUALS( test_error_1.lib_name, aws_error_lib_name(error), "Expected error libname %s, but got %s", test_error_1.lib_name, aws_error_lib_name(error)); return 0; } struct error_test_cb_data { int global_cb_called; int tl_cb_called; int last_seen; }; static void s_error_test_global_cb(int err, void *ctx) { struct error_test_cb_data *cb_data = (struct error_test_cb_data *)ctx; cb_data->global_cb_called = 1; cb_data->last_seen = err; } static void s_error_test_thread_local_cb(int err, void *ctx) { struct error_test_cb_data *cb_data = (struct error_test_cb_data *)ctx; cb_data->tl_cb_called = 1; cb_data->last_seen = err; } static int s_error_callback_test_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct error_test_cb_data cb_data = {.last_seen = 0, .global_cb_called = 0, .tl_cb_called = 0}; struct aws_error_info test_error_1 = s_errors[0]; struct aws_error_info test_error_2 = s_errors[1]; aws_error_handler_fn *old_fn = aws_set_global_error_handler_fn(s_error_test_global_cb, &cb_data); ASSERT_NULL(old_fn, "setting the global error callback the first time should return null"); aws_raise_error(test_error_1.error_code); ASSERT_NOT_NULL(cb_data.last_seen, "last error should not have been null"); ASSERT_TRUE(cb_data.global_cb_called, "Global Callback should have been invoked"); ASSERT_FALSE(cb_data.tl_cb_called, "Thread Local Callback should not have been invoked"); ASSERT_INT_EQUALS( test_error_1.error_code, cb_data.last_seen, "Expected error code %d, but was %d", test_error_1.error_code, cb_data.last_seen); ASSERT_STR_EQUALS( test_error_1.error_str, aws_error_str(cb_data.last_seen), "Expected error string %s, but got %s", test_error_1.error_str, aws_error_str(cb_data.last_seen)); ASSERT_STR_EQUALS( test_error_1.lib_name, aws_error_lib_name(cb_data.last_seen), "Expected error libname %s, but got %s", test_error_1.lib_name, aws_error_lib_name(cb_data.last_seen)); cb_data.last_seen = 0; cb_data.global_cb_called = 0; old_fn = aws_set_thread_local_error_handler_fn(s_error_test_thread_local_cb, &cb_data); ASSERT_NULL(old_fn, "setting the global error callback the first time should return null"); aws_raise_error(test_error_2.error_code); ASSERT_INT_EQUALS( test_error_2.error_code, aws_last_error(), "Expected error code %d, but was %d", test_error_2.error_code, aws_last_error()); ASSERT_NOT_NULL(cb_data.last_seen, "last error should not have been null"); ASSERT_FALSE(cb_data.global_cb_called, "Global Callback should not have been invoked"); ASSERT_TRUE(cb_data.tl_cb_called, "Thread local Callback should have been invoked"); ASSERT_INT_EQUALS( test_error_2.error_code, cb_data.last_seen, "Expected error code %d, but was %d", test_error_2.error_code, cb_data.last_seen); ASSERT_STR_EQUALS( test_error_2.error_str, aws_error_str(cb_data.last_seen), "Expected error string %s, but got %s", test_error_2.error_str, aws_error_str(cb_data.last_seen)); ASSERT_STR_EQUALS( test_error_2.lib_name, aws_error_lib_name(cb_data.last_seen), "Expected error libname %s, but got %s", test_error_2.lib_name, aws_error_lib_name(cb_data.last_seen)); old_fn = aws_set_thread_local_error_handler_fn(NULL, NULL); ASSERT_PTR_EQUALS( s_error_test_thread_local_cb, old_fn, "Setting a new thread local error callback should have returned the most recent value"); old_fn = aws_set_global_error_handler_fn(NULL, NULL); ASSERT_PTR_EQUALS( s_error_test_global_cb, old_fn, "Setting a new global error callback should have returned the most recent value"); return 0; } static int s_unknown_error_code_in_slot_test_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; int error = aws_last_error(); ASSERT_NULL(error, "error should be initialized to NULL"); ASSERT_INT_EQUALS(0, aws_last_error(), "error code should be initialized to 0"); struct aws_error_info test_error_2 = s_errors[1]; aws_raise_error(test_error_2.error_code + 1); error = aws_last_error(); /* error code should still propogate */ ASSERT_INT_EQUALS( test_error_2.error_code + 1, error, "Expected error code %d, but was %d", test_error_2.error_code + 1, error); /* string should be invalid though */ ASSERT_STR_EQUALS( "Unknown Error Code", aws_error_str(error), "Expected error string %s, but got %s", "Unknown Error Code", aws_error_str(error)); ASSERT_STR_EQUALS( "Unknown Error Code", aws_error_lib_name(error), "Expected error string %s, but got %s", "Unknown Error Code", aws_error_lib_name(error)); ASSERT_STR_EQUALS( "Unknown Error Code", aws_error_debug_str(error), "Expected error string %s, but got %s", "Unknown Error Code", aws_error_debug_str(error)); return 0; } static int s_unknown_error_code_no_slot_test_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; int error = aws_last_error(); ASSERT_NULL(error, "error should be initialized to NULL"); ASSERT_INT_EQUALS(0, aws_last_error(), "error code should be initialized to 0"); int non_slotted_error_code = 3000; aws_raise_error(non_slotted_error_code); error = aws_last_error(); /* error code should still propogate */ ASSERT_INT_EQUALS( non_slotted_error_code, error, "Expected error code %d, but was %d", non_slotted_error_code, error); /* string should be invalid though */ ASSERT_STR_EQUALS( "Unknown Error Code", aws_error_str(error), "Expected error string %s, but got %s", "Unknown Error Code", aws_error_str(error)); ASSERT_STR_EQUALS( "Unknown Error Code", aws_error_lib_name(error), "Expected error string %s, but got %s", "Unknown Error Code", aws_error_lib_name(error)); ASSERT_STR_EQUALS( "Unknown Error Code", aws_error_debug_str(error), "Expected error string %s, but got %s", "Unknown Error Code", aws_error_debug_str(error)); return 0; } static int s_unknown_error_code_range_too_large_test_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; int error = aws_last_error(); ASSERT_NULL(error, "error should be initialized to NULL"); ASSERT_INT_EQUALS(0, aws_last_error(), "error code should be initialized to 0"); int oor_error_code = 10001; aws_raise_error(oor_error_code); error = aws_last_error(); /* error code should still propogate */ ASSERT_INT_EQUALS(oor_error_code, error, "Expected error code %d, but was %d", oor_error_code, error); /* string should be invalid though */ ASSERT_STR_EQUALS( "Unknown Error Code", aws_error_str(error), "Expected error string %s, but got %s", "Unknown Error Code", aws_error_str(error)); ASSERT_STR_EQUALS( "Unknown Error Code", aws_error_lib_name(error), "Expected error string %s, but got %s", "Unknown Error Code", aws_error_lib_name(error)); ASSERT_STR_EQUALS( "Unknown Error Code", aws_error_debug_str(error), "Expected error string %s, but got %s", "Unknown Error Code", aws_error_debug_str(error)); return 0; } struct error_thread_test_data { int thread_1_code; int thread_1_get_last_code; aws_thread_id_t thread_1_id; int thread_1_encountered_count; int thread_2_code; int thread_2_get_last_code; int thread_2_encountered_count; aws_thread_id_t thread_2_id; }; static void s_error_thread_test_thread_local_cb(int err, void *ctx) { struct error_thread_test_data *cb_data = (struct error_thread_test_data *)ctx; aws_thread_id_t thread_id = aws_thread_current_thread_id(); if (aws_thread_thread_id_equal(thread_id, cb_data->thread_1_id)) { cb_data->thread_1_code = err; cb_data->thread_1_get_last_code = aws_last_error(); cb_data->thread_1_encountered_count += 1; return; } cb_data->thread_2_code = err; cb_data->thread_2_get_last_code = aws_last_error(); cb_data->thread_2_id = aws_thread_current_thread_id(); cb_data->thread_2_encountered_count += 1; } static void s_error_thread_fn(void *arg) { aws_set_thread_local_error_handler_fn(s_error_thread_test_thread_local_cb, arg); aws_raise_error(15); } static int s_error_code_cross_thread_test_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct error_thread_test_data test_data = {.thread_1_code = 0, .thread_1_get_last_code = 0, .thread_1_encountered_count = 0, .thread_2_code = 0, .thread_2_get_last_code = 0, .thread_2_encountered_count = 0, .thread_2_id = 0}; test_data.thread_1_id = aws_thread_current_thread_id(); aws_set_thread_local_error_handler_fn(s_error_thread_test_thread_local_cb, &test_data); int thread_1_error_code_expected = 5; aws_raise_error(thread_1_error_code_expected); struct aws_thread thread; aws_thread_init(&thread, allocator); ASSERT_SUCCESS( aws_thread_launch(&thread, s_error_thread_fn, &test_data, NULL), "Thread creation failed with error %d", aws_last_error()); ASSERT_SUCCESS(aws_thread_join(&thread), "Thread join failed with error %d", aws_last_error()); aws_thread_clean_up(&thread); ASSERT_INT_EQUALS( 1, test_data.thread_1_encountered_count, "The thread local CB should only have triggered for the first thread once."); ASSERT_INT_EQUALS( 1, test_data.thread_2_encountered_count, "The thread local CB should only have triggered for the second thread once."); ASSERT_FALSE(test_data.thread_2_id == 0, "thread 2 id should have been set to something other than 0"); ASSERT_FALSE(test_data.thread_2_id == test_data.thread_1_id, "threads 1 and 2 should be different ids"); ASSERT_INT_EQUALS( thread_1_error_code_expected, aws_last_error(), "Thread 1's error should not have changed when thread 2 raised an error."); ASSERT_INT_EQUALS( thread_1_error_code_expected, test_data.thread_1_code, "Thread 1 code should have matched the original error."); ASSERT_INT_EQUALS( thread_1_error_code_expected, test_data.thread_1_get_last_code, "Thread 1 get last error code should have matched the original error."); ASSERT_INT_EQUALS(15, test_data.thread_2_code, "Thread 2 code should have matched the thread 2 error."); ASSERT_INT_EQUALS( 15, test_data.thread_2_get_last_code, "Thread 2 get last error code should have matched the thread 2 error."); return 0; } static int s_aws_load_error_strings_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; /* Load aws-c-common's actual error info. * This will fail if the error info list is out of sync with the error enums. */ aws_common_library_init(allocator); return AWS_OP_SUCCESS; } static int s_aws_assume_compiles_test(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; AWS_ASSUME(true); if (false) { AWS_UNREACHABLE(); } return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( raise_errors_test, s_setup_errors_test_fn, s_raise_errors_test_fn, s_teardown_errors_test_fn, NULL) AWS_TEST_CASE_FIXTURE( error_callback_test, s_setup_errors_test_fn, s_error_callback_test_fn, s_teardown_errors_test_fn, NULL) AWS_TEST_CASE_FIXTURE( reset_errors_test, s_setup_errors_test_fn, s_reset_errors_test_fn, s_teardown_errors_test_fn, NULL) AWS_TEST_CASE_FIXTURE( unknown_error_code_in_slot_test, s_setup_errors_test_fn, s_unknown_error_code_in_slot_test_fn, s_teardown_errors_test_fn, NULL) AWS_TEST_CASE_FIXTURE( unknown_error_code_no_slot_test, s_setup_errors_test_fn, s_unknown_error_code_no_slot_test_fn, s_teardown_errors_test_fn, NULL) AWS_TEST_CASE_FIXTURE( unknown_error_code_range_too_large_test, s_setup_errors_test_fn, s_unknown_error_code_range_too_large_test_fn, s_teardown_errors_test_fn, NULL) AWS_TEST_CASE_FIXTURE( error_code_cross_thread_test, s_setup_errors_test_fn, s_error_code_cross_thread_test_fn, s_teardown_errors_test_fn, NULL) AWS_TEST_CASE(aws_load_error_strings_test, s_aws_load_error_strings_test) AWS_TEST_CASE(aws_assume_compiles_test, s_aws_assume_compiles_test) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/file_test.c000066400000000000000000000513631456575232400240140ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include static int s_aws_fopen_test_helper(char *file_path, char *content) { char read_result[100]; AWS_ZERO_ARRAY(read_result); FILE *file = aws_fopen(file_path, "w+"); ASSERT_NOT_NULL(file); fprintf(file, "%s", content); fclose(file); FILE *readfile = aws_fopen(file_path, "r"); ASSERT_NOT_NULL(readfile); size_t read_len = fread(read_result, sizeof(char), strlen(content), readfile); ASSERT_UINT_EQUALS(strlen(content), read_len); fclose(readfile); ASSERT_SUCCESS(strcmp(content, read_result)); #ifdef _WIN32 wchar_t w_file_path[1000]; /* plus one for the EOS */ size_t file_path_len = strlen(file_path) + 1; MultiByteToWideChar(CP_UTF8, 0, file_path, -1, w_file_path, (int)file_path_len); ASSERT_SUCCESS(_wremove(w_file_path)); #else ASSERT_SUCCESS(remove(file_path)); #endif return AWS_OP_SUCCESS; } static int s_aws_fopen_content_matches(char *file_path, char *content) { char read_result[100]; AWS_ZERO_ARRAY(read_result); FILE *file = aws_fopen(file_path, "rb"); ASSERT_NOT_NULL(file); size_t read_len = fread(read_result, sizeof(char), strlen(content), file); ASSERT_UINT_EQUALS(strlen(content), read_len); fclose(file); ASSERT_SUCCESS(strcmp(content, read_result)); return AWS_OP_SUCCESS; } static int s_aws_fopen_non_ascii_read_existing_file_test_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; char expected_content[] = "This is a non-ascii file path file."; char file_path[] = "Å Éxample.txt"; char read_result[100]; AWS_ZERO_ARRAY(read_result); FILE *readfile = aws_fopen(file_path, "r"); ASSERT_NOT_NULL(readfile); size_t read_len = fread(read_result, sizeof(char), strlen(expected_content), readfile); ASSERT_UINT_EQUALS(strlen(expected_content), read_len); fclose(readfile); ASSERT_SUCCESS(strcmp(expected_content, read_result)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(aws_fopen_non_ascii_read_existing_file_test, s_aws_fopen_non_ascii_read_existing_file_test_fn) static int s_aws_fopen_non_ascii_test_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; char file_path[] = "Éxample.txt"; char content[] = "samples"; ASSERT_SUCCESS(s_aws_fopen_test_helper(file_path, content)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(aws_fopen_non_ascii_test, s_aws_fopen_non_ascii_test_fn) static int s_aws_fopen_ascii_test_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; char file_path[] = "sample.txt"; char content[] = "samples"; ASSERT_SUCCESS(s_aws_fopen_test_helper(file_path, content)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(aws_fopen_ascii_test, s_aws_fopen_ascii_test_fn) struct directory_traversal_test_data { bool child_dir_verified; bool child_file_verified; bool root_file_verified; }; static const char *s_first_child_dir_path = "dir_traversal_test" AWS_PATH_DELIM_STR "first_child_dir"; static const char *s_first_child_file_path = "dir_traversal_test" AWS_PATH_DELIM_STR "first_child_dir" AWS_PATH_DELIM_STR "child.txt"; static const char *s_root_child_path = "dir_traversal_test" AWS_PATH_DELIM_STR "root_child.txt"; bool s_on_directory_entry(const struct aws_directory_entry *entry, void *user_data) { struct directory_traversal_test_data *test_data = user_data; if (aws_byte_cursor_eq_c_str(&entry->relative_path, s_root_child_path)) { test_data->root_file_verified = entry->file_type & AWS_FILE_TYPE_FILE && entry->file_size && s_aws_fopen_content_matches((char *)entry->relative_path.ptr, "dir_traversal_test->root_child.txt") == AWS_OP_SUCCESS; return true; } if (aws_byte_cursor_eq_c_str(&entry->relative_path, s_first_child_file_path)) { test_data->child_file_verified = entry->file_type & AWS_FILE_TYPE_FILE && entry->file_size && s_aws_fopen_content_matches( (char *)entry->relative_path.ptr, "dir_traversal_test->first_child_dir->child.txt") == AWS_OP_SUCCESS; return true; } if (aws_byte_cursor_eq_c_str(&entry->relative_path, s_first_child_dir_path)) { test_data->child_dir_verified = entry->file_type & AWS_FILE_TYPE_DIRECTORY; return true; } return false; } static int s_directory_traversal_test_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_string *path = aws_string_new_from_c_str(allocator, "dir_traversal_test"); struct directory_traversal_test_data test_data; AWS_ZERO_STRUCT(test_data); ASSERT_SUCCESS(aws_directory_traverse(allocator, path, true, s_on_directory_entry, &test_data)); ASSERT_TRUE(test_data.child_dir_verified); ASSERT_TRUE(test_data.root_file_verified); ASSERT_TRUE(test_data.child_file_verified); AWS_ZERO_STRUCT(test_data); ASSERT_SUCCESS(aws_directory_traverse(allocator, path, false, s_on_directory_entry, &test_data)); ASSERT_TRUE(test_data.child_dir_verified); ASSERT_TRUE(test_data.root_file_verified); ASSERT_FALSE(test_data.child_file_verified); aws_string_destroy(path); return AWS_OP_SUCCESS; } AWS_TEST_CASE(directory_traversal_test, s_directory_traversal_test_fn) static int s_directory_iteration_test_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_string *path = aws_string_new_from_c_str(allocator, "dir_traversal_test"); struct aws_directory_iterator *iterator = aws_directory_entry_iterator_new(allocator, path); ASSERT_NOT_NULL(iterator); const struct aws_directory_entry *first_entry = aws_directory_entry_iterator_get_value(iterator); ASSERT_NOT_NULL(first_entry); bool first_child_dir_found = false; bool root_file_found = false; do { const struct aws_directory_entry *entry = aws_directory_entry_iterator_get_value(iterator); if (entry->file_type == AWS_FILE_TYPE_DIRECTORY) { struct aws_byte_cursor first_child_dir_path_cur = aws_byte_cursor_from_c_str(s_first_child_dir_path); ASSERT_BIN_ARRAYS_EQUALS( first_child_dir_path_cur.ptr, first_child_dir_path_cur.len, entry->relative_path.ptr, entry->relative_path.len); first_child_dir_found = true; struct aws_string *next_path = aws_string_new_from_cursor(allocator, &entry->relative_path); struct aws_directory_iterator *next_iter = aws_directory_entry_iterator_new(allocator, next_path); aws_string_destroy(next_path); ASSERT_NOT_NULL(next_iter); entry = aws_directory_entry_iterator_get_value(next_iter); struct aws_byte_cursor first_child_file_path_cur = aws_byte_cursor_from_c_str(s_first_child_file_path); ASSERT_BIN_ARRAYS_EQUALS( first_child_file_path_cur.ptr, first_child_file_path_cur.len, entry->relative_path.ptr, entry->relative_path.len); ASSERT_INT_EQUALS(AWS_FILE_TYPE_FILE, entry->file_type); ASSERT_ERROR(AWS_ERROR_LIST_EMPTY, aws_directory_entry_iterator_next(next_iter)); aws_directory_entry_iterator_destroy(next_iter); } else { struct aws_byte_cursor root_child_file_path_cur = aws_byte_cursor_from_c_str(s_root_child_path); ASSERT_BIN_ARRAYS_EQUALS( root_child_file_path_cur.ptr, root_child_file_path_cur.len, entry->relative_path.ptr, entry->relative_path.len); ASSERT_INT_EQUALS(AWS_FILE_TYPE_FILE, entry->file_type); root_file_found = true; } } while (aws_directory_entry_iterator_next(iterator) == AWS_OP_SUCCESS); ASSERT_ERROR(AWS_ERROR_LIST_EMPTY, aws_directory_entry_iterator_next(iterator)); ASSERT_SUCCESS(aws_directory_entry_iterator_previous(iterator)); ASSERT_PTR_EQUALS(first_entry, aws_directory_entry_iterator_get_value(iterator)); aws_directory_entry_iterator_destroy(iterator); aws_string_destroy(path); ASSERT_TRUE(root_file_found); ASSERT_TRUE(first_child_dir_found); return AWS_OP_SUCCESS; } AWS_TEST_CASE(directory_iteration_test, s_directory_iteration_test_fn) static int s_directory_iteration_non_existent_directory_test_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_string *path = aws_string_new_from_c_str(allocator, "dir_traversal_test_non_existent"); struct aws_directory_iterator *iterator = aws_directory_entry_iterator_new(allocator, path); ASSERT_NULL(iterator); ASSERT_INT_EQUALS(aws_last_error(), AWS_ERROR_FILE_INVALID_PATH); aws_string_destroy(path); return AWS_OP_SUCCESS; } AWS_TEST_CASE(directory_iteration_non_existent_directory_test, s_directory_iteration_non_existent_directory_test_fn) struct directory_traversal_abort_test_data { int times_called; }; bool directory_traversal_abort_test_data(const struct aws_directory_entry *entry, void *user_data) { (void)entry; struct directory_traversal_abort_test_data *test_data = user_data; test_data->times_called += 1; return false; } static int s_directory_traversal_stop_traversal_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_string *path = aws_string_new_from_c_str(allocator, "dir_traversal_test"); struct directory_traversal_abort_test_data test_data; AWS_ZERO_STRUCT(test_data); ASSERT_ERROR( AWS_ERROR_OPERATION_INTERUPTED, aws_directory_traverse(allocator, path, true, directory_traversal_abort_test_data, &test_data)); ASSERT_INT_EQUALS(1, test_data.times_called); aws_string_destroy(path); return AWS_OP_SUCCESS; } AWS_TEST_CASE(directory_traversal_stop_traversal, s_directory_traversal_stop_traversal_fn) static int s_directory_traversal_on_file_test_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_string *path = aws_string_new_from_c_str(allocator, "dir_traversal_test/root_child.txt"); struct directory_traversal_test_data test_data; AWS_ZERO_STRUCT(test_data); ASSERT_ERROR( AWS_ERROR_FILE_INVALID_PATH, aws_directory_traverse(allocator, path, true, s_on_directory_entry, &test_data)); aws_string_destroy(path); return AWS_OP_SUCCESS; } AWS_TEST_CASE(directory_traversal_on_file_test, s_directory_traversal_on_file_test_fn) static int s_directory_existence_test_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_string *path = aws_string_new_from_c_str(allocator, "dir_traversal_test"); ASSERT_TRUE(aws_directory_exists(path)); aws_string_destroy(path); path = aws_string_new_from_c_str(allocator, "dir_traversal_test_blah"); ASSERT_FALSE(aws_directory_exists(path)); aws_string_destroy(path); path = aws_string_new_from_c_str(allocator, "dir_traversal_test/root_child.txt"); ASSERT_FALSE(aws_directory_exists(path)); aws_string_destroy(path); return AWS_OP_SUCCESS; } AWS_TEST_CASE(directory_existence_test, s_directory_existence_test_fn) static int s_directory_creation_deletion_test_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_string *path = aws_string_new_from_c_str(allocator, "temp_dir"); ASSERT_SUCCESS(aws_directory_create(path)); /* should be idempotent */ ASSERT_SUCCESS(aws_directory_create(path)); ASSERT_TRUE(aws_directory_exists(path)); ASSERT_SUCCESS(aws_directory_delete(path, false)); ASSERT_FALSE(aws_directory_exists(path)); aws_string_destroy(path); return AWS_OP_SUCCESS; } AWS_TEST_CASE(directory_creation_deletion_test, s_directory_creation_deletion_test_fn) static int s_directory_non_empty_deletion_fails_test_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_string *path = aws_string_new_from_c_str(allocator, "dir_traversal_test"); ASSERT_TRUE(aws_directory_exists(path)); ASSERT_ERROR(AWS_ERROR_DIRECTORY_NOT_EMPTY, aws_directory_delete(path, false)); ASSERT_TRUE(aws_directory_exists(path)); aws_string_destroy(path); return AWS_OP_SUCCESS; } AWS_TEST_CASE(directory_non_empty_deletion_fails_test, s_directory_non_empty_deletion_fails_test_fn) static int s_directory_non_empty_deletion_recursively_succeeds_test_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_string *path = aws_string_new_from_c_str(allocator, "non_empty_dir_del_test_dir_1"); ASSERT_SUCCESS(aws_directory_create(path)); const char *nested_dir = "non_empty_dir_del_test_dir_1" AWS_PATH_DELIM_STR "test_dir_2"; struct aws_string *nested_dir_path = aws_string_new_from_c_str(allocator, nested_dir); ASSERT_SUCCESS(aws_directory_create(nested_dir_path)); const char *nested_file = "non_empty_dir_del_test_dir_1" AWS_PATH_DELIM_STR "test_dir_2" AWS_PATH_DELIM_STR "nested_file.txt"; FILE *nested_file_ptr = aws_fopen(nested_file, "w"); ASSERT_NOT_NULL(nested_file_ptr); fclose(nested_file_ptr); ASSERT_SUCCESS(aws_directory_delete(path, true)); ASSERT_FALSE(aws_directory_exists(path)); aws_string_destroy(nested_dir_path); aws_string_destroy(path); return AWS_OP_SUCCESS; } AWS_TEST_CASE( directory_non_empty_deletion_recursively_succeeds_test, s_directory_non_empty_deletion_recursively_succeeds_test_fn) static int s_directory_move_succeeds_test_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_string *path = aws_string_new_from_c_str(allocator, "directory_move_succeeds_test_dir_1"); ASSERT_SUCCESS(aws_directory_create(path)); struct aws_string *to_path = aws_string_new_from_c_str(allocator, "directory_move_succeeds_test_dir_2"); ASSERT_SUCCESS(aws_directory_or_file_move(path, to_path)); ASSERT_FALSE(aws_directory_exists(path)); ASSERT_TRUE(aws_directory_exists(to_path)); ASSERT_SUCCESS(aws_directory_delete(to_path, true)); aws_string_destroy(to_path); aws_string_destroy(path); return AWS_OP_SUCCESS; } AWS_TEST_CASE(directory_move_succeeds_test, s_directory_move_succeeds_test_fn) static int s_directory_move_src_non_existent_test_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_string *path = aws_string_new_from_c_str(allocator, "directory_move_src_non_existent_test_dir_1"); struct aws_string *to_path = aws_string_new_from_c_str(allocator, "directory_move_src_non_existent_test_dir_2"); ASSERT_ERROR(AWS_ERROR_FILE_INVALID_PATH, aws_directory_or_file_move(path, to_path)); aws_string_destroy(to_path); aws_string_destroy(path); return AWS_OP_SUCCESS; } AWS_TEST_CASE(directory_move_src_non_existent_test, s_directory_move_src_non_existent_test_fn) static int s_test_home_directory_not_null(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_string *home_directory = aws_get_home_directory(allocator); ASSERT_TRUE(home_directory != NULL); aws_string_destroy(home_directory); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_home_directory_not_null, s_test_home_directory_not_null); static int s_test_normalize_posix_directory_separator(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_string *buffer = aws_string_new_from_c_str(allocator, "./test/path/abc"); struct aws_byte_buf path_buf = aws_byte_buf_from_array(buffer->bytes, buffer->len); aws_normalize_directory_separator(&path_buf); for (size_t i = 0; i < path_buf.len; ++i) { if (aws_is_any_directory_separator((char)path_buf.buffer[i])) { ASSERT_INT_EQUALS(aws_get_platform_directory_separator(), path_buf.buffer[i]); } } aws_string_destroy(buffer); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_normalize_posix_directory_separator, s_test_normalize_posix_directory_separator); static int s_test_normalize_windows_directory_separator(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_string *buffer = aws_string_new_from_c_str(allocator, ".\\test\\path\\abc"); struct aws_byte_buf path_buf = aws_byte_buf_from_array(buffer->bytes, buffer->len); aws_normalize_directory_separator(&path_buf); for (size_t i = 0; i < path_buf.len; ++i) { if (aws_is_any_directory_separator((char)path_buf.buffer[i])) { ASSERT_INT_EQUALS(aws_get_platform_directory_separator(), path_buf.buffer[i]); } } aws_string_destroy(buffer); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_normalize_windows_directory_separator, s_test_normalize_windows_directory_separator); static int s_check_byte_buf_from_file(const struct aws_byte_buf *buf, struct aws_byte_cursor expected_contents) { ASSERT_TRUE(aws_byte_cursor_eq_byte_buf(&expected_contents, buf), "Contents should match"); ASSERT_TRUE(buf->capacity > buf->len, "Buffer should end with null-terminator"); ASSERT_UINT_EQUALS(0, buf->buffer[buf->len], "Buffer should end with null-terminator"); return AWS_OP_SUCCESS; } static int s_create_file_then_read_it(struct aws_allocator *allocator, struct aws_byte_cursor contents) { /* create file */ const char *filename = "testy"; FILE *f = aws_fopen(filename, "wb"); ASSERT_UINT_EQUALS(contents.len, fwrite(contents.ptr, 1, contents.len, f)); ASSERT_INT_EQUALS(0, fclose(f)); struct aws_byte_buf buf; /* check aws_byte_buf_init_from_file() */ ASSERT_SUCCESS(aws_byte_buf_init_from_file(&buf, allocator, filename)); ASSERT_SUCCESS(s_check_byte_buf_from_file(&buf, contents)); aws_byte_buf_clean_up(&buf); /* now check aws_byte_buf_init_from_file_with_size_hint() ... */ /* size_hint more then big enough */ size_t size_hint = contents.len * 2; ASSERT_SUCCESS(aws_byte_buf_init_from_file_with_size_hint(&buf, allocator, filename, size_hint)); ASSERT_SUCCESS(s_check_byte_buf_from_file(&buf, contents)); aws_byte_buf_clean_up(&buf); /* size_hint not big enough for null-terminator */ size_hint = contents.len; ASSERT_SUCCESS(aws_byte_buf_init_from_file_with_size_hint(&buf, allocator, filename, size_hint)); ASSERT_SUCCESS(s_check_byte_buf_from_file(&buf, contents)); aws_byte_buf_clean_up(&buf); /* size_hint 0 */ size_hint = 0; ASSERT_SUCCESS(aws_byte_buf_init_from_file_with_size_hint(&buf, allocator, filename, size_hint)); ASSERT_SUCCESS(s_check_byte_buf_from_file(&buf, contents)); aws_byte_buf_clean_up(&buf); /* size_hint 1 */ size_hint = 1; ASSERT_SUCCESS(aws_byte_buf_init_from_file_with_size_hint(&buf, allocator, filename, size_hint)); ASSERT_SUCCESS(s_check_byte_buf_from_file(&buf, contents)); aws_byte_buf_clean_up(&buf); remove(filename); return AWS_OP_SUCCESS; } /* Read an actual "special file" (if it exists on this machine) */ static int s_read_special_file(struct aws_allocator *allocator, const char *filename) { struct aws_string *filename_str = aws_string_new_from_c_str(allocator, filename); bool exists = aws_path_exists(filename_str); aws_string_destroy(filename_str); if (!exists) { return AWS_OP_SUCCESS; } struct aws_byte_buf buf; ASSERT_SUCCESS(aws_byte_buf_init_from_file(&buf, allocator, filename)); ASSERT_TRUE(buf.capacity > buf.len, "Buffer should end with null-terminator"); ASSERT_UINT_EQUALS(0, buf.buffer[buf.len], "Buffer should end with null-terminator"); if (strcmp("/dev/null", filename) == 0) { ASSERT_UINT_EQUALS(0, buf.len, "expected /dev/null to be empty"); } else { ASSERT_TRUE(buf.len > 0, "expected special file to have data"); } aws_byte_buf_clean_up(&buf); return AWS_OP_SUCCESS; } static int s_test_byte_buf_init_from_file(struct aws_allocator *allocator, void *ctx) { (void)ctx; /* simple text file */ ASSERT_SUCCESS(s_create_file_then_read_it(allocator, aws_byte_cursor_from_c_str("asdf"))); /* empty file */ ASSERT_SUCCESS(s_create_file_then_read_it(allocator, aws_byte_cursor_from_c_str(""))); /* large 3MB+1byte binary file */ struct aws_byte_buf big_rando; aws_byte_buf_init(&big_rando, allocator, (1024 * 1024 * 3) + 1); ASSERT_SUCCESS(aws_device_random_buffer(&big_rando)); ASSERT_SUCCESS(s_create_file_then_read_it(allocator, aws_byte_cursor_from_buf(&big_rando))); aws_byte_buf_clean_up(&big_rando); /* test some "special files" (if they exist) */ ASSERT_SUCCESS(s_read_special_file(allocator, "/proc/cpuinfo")); ASSERT_SUCCESS(s_read_special_file(allocator, "/proc/net/tcp")); ASSERT_SUCCESS(s_read_special_file(allocator, "/sys/devices/virtual/dmi/id/sys_vendor")); ASSERT_SUCCESS(s_read_special_file(allocator, "/dev/null")); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_byte_buf_init_from_file, s_test_byte_buf_init_from_file) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/fuzz/000077500000000000000000000000001456575232400226605ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/fuzz/base64_encoding_transitive.c000066400000000000000000000027531456575232400302350ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include /* NOLINTNEXTLINE(readability-identifier-naming) */ int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) { struct aws_allocator *allocator = aws_default_allocator(); size_t output_size = 0; int result = aws_base64_compute_encoded_len(size, &output_size); AWS_ASSERT(result == AWS_OP_SUCCESS); struct aws_byte_cursor to_encode = aws_byte_cursor_from_array(data, size); struct aws_byte_buf encode_output; result = aws_byte_buf_init(&encode_output, allocator, output_size); AWS_ASSERT(result == AWS_OP_SUCCESS); result = aws_base64_encode(&to_encode, &encode_output); AWS_ASSERT(result == AWS_OP_SUCCESS); struct aws_byte_cursor to_decode = aws_byte_cursor_from_buf(&encode_output); result = aws_base64_compute_decoded_len(&to_decode, &output_size); AWS_ASSERT(result == AWS_OP_SUCCESS); AWS_ASSERT(output_size == size); struct aws_byte_buf decode_output; result = aws_byte_buf_init(&decode_output, allocator, output_size); AWS_ASSERT(result == AWS_OP_SUCCESS); result = aws_base64_decode(&to_decode, &decode_output); AWS_ASSERT(result == AWS_OP_SUCCESS); AWS_ASSERT(output_size == decode_output.len); AWS_ASSERT(memcmp(decode_output.buffer, data, size) == 0); aws_byte_buf_clean_up(&encode_output); aws_byte_buf_clean_up(&decode_output); return 0; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/fuzz/hex_encoding_transitive.c000066400000000000000000000030421456575232400277250ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include /* NOLINTNEXTLINE(readability-identifier-naming) */ int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) { struct aws_allocator *allocator = aws_default_allocator(); size_t output_size = 0; int result = aws_hex_compute_encoded_len(size, &output_size); AWS_ASSERT(result == AWS_OP_SUCCESS); struct aws_byte_cursor to_encode = aws_byte_cursor_from_array(data, size); struct aws_byte_buf encode_output; result = aws_byte_buf_init(&encode_output, allocator, output_size); AWS_ASSERT(result == AWS_OP_SUCCESS); result = aws_hex_encode(&to_encode, &encode_output); AWS_ASSERT(result == AWS_OP_SUCCESS); --encode_output.len; /* Remove null terminator */ result = aws_hex_compute_decoded_len(encode_output.len, &output_size); AWS_ASSERT(result == AWS_OP_SUCCESS); AWS_ASSERT(output_size == size); struct aws_byte_buf decode_output; result = aws_byte_buf_init(&decode_output, allocator, output_size); AWS_ASSERT(result == AWS_OP_SUCCESS); struct aws_byte_cursor decode_input = aws_byte_cursor_from_buf(&encode_output); result = aws_hex_decode(&decode_input, &decode_output); AWS_ASSERT(result == AWS_OP_SUCCESS); AWS_ASSERT(output_size == decode_output.len); AWS_ASSERT(memcmp(decode_output.buffer, data, size) == 0); aws_byte_buf_clean_up(&encode_output); aws_byte_buf_clean_up(&decode_output); return 0; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/hash_table_test.c000066400000000000000000001423521456575232400251660ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include static const char *TEST_STR_1 = "test 1"; static const char *TEST_STR_2 = "test 2"; static const char *TEST_VAL_STR_1 = "value 1"; static const char *TEST_VAL_STR_2 = "value 2"; #define ASSERT_HASH_TABLE_ENTRY_COUNT(map, count) \ ASSERT_UINT_EQUALS(count, aws_hash_table_get_entry_count(map), "Hash map should have %d entries", count) #define ASSERT_NO_KEY(hash_table, key) \ do { \ AWS_STATIC_STRING_FROM_LITERAL(assert_key, key); \ struct aws_hash_element *pElem_assert; \ ASSERT_SUCCESS(aws_hash_table_find((hash_table), (void *)assert_key, &pElem_assert)); \ ASSERT_NULL(pElem_assert, "Expected key to not be present: " key); \ } while (0) #define ASSERT_KEY_VALUE(hash_table, key, expected) \ do { \ AWS_STATIC_STRING_FROM_LITERAL(assert_key, key); \ AWS_STATIC_STRING_FROM_LITERAL(assert_value, expected); \ struct aws_hash_element *pElem_assert; \ ASSERT_SUCCESS(aws_hash_table_find((hash_table), (void *)assert_key, &pElem_assert)); \ ASSERT_NOT_NULL(pElem_assert, "Expected key to be present: " key); \ ASSERT_TRUE( \ aws_string_eq(assert_value, (const struct aws_string *)pElem_assert->value), \ "Expected key \"" key "\" to have value \"" expected "\"; actually had value \"%s\"", \ aws_string_bytes((const struct aws_string *)pElem_assert->value)); \ } while (0) AWS_TEST_CASE(test_hash_table_create_find, s_test_hash_table_create_find_fn) static int s_test_hash_table_create_find_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_hash_table hash_table; int err_code = aws_hash_table_init(&hash_table, allocator, 10, aws_hash_c_string, aws_hash_callback_c_str_eq, NULL, NULL); struct aws_hash_element *pElem; int was_created; ASSERT_SUCCESS(err_code, "Hash Map init should have succeeded."); ASSERT_HASH_TABLE_ENTRY_COUNT(&hash_table, 0); err_code = aws_hash_table_create(&hash_table, (void *)TEST_STR_1, &pElem, &was_created); ASSERT_SUCCESS(err_code, "Hash Map put should have succeeded."); ASSERT_INT_EQUALS(1, was_created, "Hash Map put should have created a new element."); pElem->value = (void *)TEST_VAL_STR_1; ASSERT_HASH_TABLE_ENTRY_COUNT(&hash_table, 1); /* Try passing a NULL was_created this time */ err_code = aws_hash_table_create(&hash_table, (void *)TEST_STR_2, &pElem, NULL); ASSERT_SUCCESS(err_code, "Hash Map put should have succeeded."); pElem->value = (void *)TEST_VAL_STR_2; ASSERT_HASH_TABLE_ENTRY_COUNT(&hash_table, 2); err_code = aws_hash_table_find(&hash_table, (void *)TEST_STR_1, &pElem); ASSERT_SUCCESS(err_code, "Hash Map get should have succeeded."); ASSERT_STR_EQUALS( TEST_VAL_STR_1, (const char *)pElem->value, "Returned value for %s, should have been %s", TEST_STR_1, TEST_VAL_STR_1); err_code = aws_hash_table_find(&hash_table, (void *)TEST_STR_2, &pElem); ASSERT_SUCCESS(err_code, "Hash Map get should have succeeded."); ASSERT_BIN_ARRAYS_EQUALS( TEST_VAL_STR_2, strlen(TEST_VAL_STR_2) + 1, (const char *)pElem->value, strlen(pElem->value) + 1, "Returned value for %s, should have been %s", TEST_STR_2, TEST_VAL_STR_2); ASSERT_HASH_TABLE_ENTRY_COUNT(&hash_table, 2); err_code = aws_hash_table_remove_element(&hash_table, pElem); ASSERT_SUCCESS(err_code, "Hash Map remove element should have succeeded."); ASSERT_HASH_TABLE_ENTRY_COUNT(&hash_table, 1); aws_hash_table_clean_up(&hash_table); return 0; } AWS_TEST_CASE(test_hash_table_string_create_find, s_test_hash_table_string_create_find_fn) static int s_test_hash_table_string_create_find_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_hash_table hash_table; struct aws_hash_element *pElem; int was_created; int ret = aws_hash_table_init( &hash_table, allocator, 10, aws_hash_string, aws_hash_callback_string_eq, aws_hash_callback_string_destroy, aws_hash_callback_string_destroy); ASSERT_SUCCESS(ret, "Hash Map init should have succeeded."); /* First element of hash, both key and value are statically allocated strings */ AWS_STATIC_STRING_FROM_LITERAL(key_1, "tweedle dee"); AWS_STATIC_STRING_FROM_LITERAL(val_1, "tweedle dum"); /* Second element of hash, only value is dynamically allocated string */ AWS_STATIC_STRING_FROM_LITERAL(key_2, "what's for dinner?"); const struct aws_string *val_2 = aws_string_new_from_c_str(allocator, "deadbeef"); /* Third element of hash, only key is dynamically allocated string */ uint8_t bytes[] = {0x88, 0x00, 0xaa, 0x13, 0xb7, 0x93, 0x7f, 0xdd, 0xbb, 0x62}; const struct aws_string *key_3 = aws_string_new_from_array(allocator, bytes, 10); AWS_STATIC_STRING_FROM_LITERAL(val_3, "hunter2"); ret = aws_hash_table_create(&hash_table, (void *)key_1, &pElem, &was_created); ASSERT_SUCCESS(ret, "Hash Map put should have succeeded."); ASSERT_INT_EQUALS(1, was_created, "Hash Map put should have created a new element."); pElem->value = (void *)val_1; /* Try passing a NULL was_created this time */ ret = aws_hash_table_create(&hash_table, (void *)key_2, &pElem, NULL); ASSERT_SUCCESS(ret, "Hash Map put should have succeeded."); pElem->value = (void *)val_2; ret = aws_hash_table_create(&hash_table, (void *)key_3, &pElem, NULL); ASSERT_SUCCESS(ret, "Hash Map put should have succeeded."); pElem->value = (void *)val_3; ret = aws_hash_table_find(&hash_table, (void *)key_1, &pElem); ASSERT_SUCCESS(ret, "Hash Map get should have succeeded."); ASSERT_BIN_ARRAYS_EQUALS( "tweedle dee", strlen("tweedle dee"), aws_string_bytes(pElem->key), ((struct aws_string *)pElem->key)->len, "Returned key for %s, should have been %s", "tweedle dee", "tweedle dee"); ASSERT_BIN_ARRAYS_EQUALS( "tweedle dum", strlen("tweedle dum"), aws_string_bytes(pElem->value), ((struct aws_string *)pElem->value)->len, "Returned value for %s, should have been %s", "tweedle dee", "tweedle dum"); ret = aws_hash_table_find(&hash_table, (void *)key_2, &pElem); ASSERT_SUCCESS(ret, "Hash Map get should have succeeded."); ASSERT_BIN_ARRAYS_EQUALS( "what's for dinner?", strlen("what's for dinner?"), aws_string_bytes(pElem->key), ((struct aws_string *)pElem->key)->len, "Returned key for %s, should have been %s", "what's for dinner?", "what's for dinner?"); ASSERT_BIN_ARRAYS_EQUALS( "deadbeef", strlen("deadbeef"), aws_string_bytes(pElem->value), ((struct aws_string *)pElem->value)->len, "Returned value for %s, should have been %s", "what's for dinner?", "deadbeef"); ret = aws_hash_table_find(&hash_table, (void *)key_3, &pElem); ASSERT_SUCCESS(ret, "Hash Map get should have succeeded."); ASSERT_BIN_ARRAYS_EQUALS( bytes, 10, aws_string_bytes(pElem->key), ((struct aws_string *)pElem->key)->len, "Returned key for %02hhx%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx should have been same", bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], bytes[5], bytes[6], bytes[7], bytes[8], bytes[9]); ASSERT_BIN_ARRAYS_EQUALS( "hunter2", strlen("hunter2"), aws_string_bytes(pElem->value), ((struct aws_string *)pElem->value)->len, "Returned value for binary bytes should have been %s", "hunter2"); aws_string_destroy((struct aws_string *)pElem->key); aws_string_destroy(pElem->value); ret = aws_hash_table_remove_element(&hash_table, pElem); ASSERT_SUCCESS(ret, "Hash Map remove element should have succeeded."); ASSERT_HASH_TABLE_ENTRY_COUNT(&hash_table, 2); aws_hash_table_clean_up(&hash_table); return 0; } static const void *last_key, *last_value; static void destroy_key_record(void *key) { last_key = key; } static void destroy_value_record(void *value) { last_value = value; } AWS_TEST_CASE(test_hash_table_put, s_test_hash_table_put_fn) static int s_test_hash_table_put_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_hash_table hash_table; struct aws_hash_element *pElem; int was_created; int ret = aws_hash_table_init( &hash_table, allocator, 10, aws_hash_string, aws_hash_callback_string_eq, destroy_key_record, destroy_value_record); ASSERT_SUCCESS(ret, "Hash Map init should have succeeded."); AWS_STATIC_STRING_FROM_LITERAL(sentinel, ""); AWS_STATIC_STRING_FROM_LITERAL(key_a_1, "a"); AWS_STATIC_STRING_FROM_LITERAL(value_b_1, "b"); ASSERT_NO_KEY(&hash_table, "a"); last_key = last_value = sentinel; aws_hash_table_put(&hash_table, key_a_1, (void *)value_b_1, &was_created); ASSERT_INT_EQUALS(was_created, 1); ASSERT_KEY_VALUE(&hash_table, "a", "b"); /* dtors were not called, even with nulls */ ASSERT_PTR_EQUALS(last_key, sentinel); ASSERT_PTR_EQUALS(last_value, sentinel); AWS_STATIC_STRING_FROM_LITERAL(key_a_2, "a"); AWS_STATIC_STRING_FROM_LITERAL(value_c_1, "c"); last_key = last_value = NULL; aws_hash_table_put(&hash_table, key_a_2, (void *)value_c_1, &was_created); ASSERT_INT_EQUALS(was_created, 0); ASSERT_KEY_VALUE(&hash_table, "a", "c"); ASSERT_SUCCESS(aws_hash_table_find(&hash_table, (void *)key_a_1, &pElem)); ASSERT_PTR_EQUALS(key_a_2, pElem->key); /* verify dtor was called on the old key ptr */ ASSERT_PTR_EQUALS(last_key, key_a_1); ASSERT_PTR_EQUALS(last_value, value_b_1); last_key = last_value = NULL; aws_hash_table_put(&hash_table, key_a_2, (void *)value_b_1, NULL); ASSERT_KEY_VALUE(&hash_table, "a", "b"); /* Since the key ptr did not change, it was not destroyed */ ASSERT_PTR_EQUALS(last_key, NULL); /* The value was destroyed however */ ASSERT_PTR_EQUALS(last_value, value_c_1); aws_hash_table_clean_up(&hash_table); return 0; } AWS_TEST_CASE(test_hash_table_put_null_dtor, s_test_hash_table_put_null_dtor_fn) static int s_test_hash_table_put_null_dtor_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_hash_table hash_table; int ret = aws_hash_table_init(&hash_table, allocator, 10, aws_hash_string, aws_hash_callback_string_eq, NULL, NULL); ASSERT_SUCCESS(ret, "Hash Map init should have succeeded."); AWS_STATIC_STRING_FROM_LITERAL(foo, "foo"); ASSERT_SUCCESS(aws_hash_table_put(&hash_table, foo, (void *)foo, NULL)); ASSERT_SUCCESS(aws_hash_table_put(&hash_table, foo, (void *)foo, NULL)); aws_hash_table_clean_up(&hash_table); return 0; } AWS_TEST_CASE(test_hash_table_swap_move, s_test_hash_table_swap_move) static int s_test_hash_table_swap_move(struct aws_allocator *allocator, void *ctx) { (void)ctx; AWS_STATIC_STRING_FROM_LITERAL(foo, "foo"); AWS_STATIC_STRING_FROM_LITERAL(bar, "bar"); AWS_STATIC_STRING_FROM_LITERAL(key, "key"); struct aws_hash_table table1, table2, tmp; ASSERT_SUCCESS( aws_hash_table_init(&table1, allocator, 10, aws_hash_string, aws_hash_callback_string_eq, NULL, NULL)); ASSERT_SUCCESS( aws_hash_table_init(&table2, allocator, 10, aws_hash_string, aws_hash_callback_string_eq, NULL, NULL)); ASSERT_SUCCESS(aws_hash_table_put(&table1, key, (void *)foo, NULL)); ASSERT_SUCCESS(aws_hash_table_put(&table2, key, (void *)bar, NULL)); aws_hash_table_swap(&table1, &table2); ASSERT_KEY_VALUE(&table1, "key", "bar"); ASSERT_KEY_VALUE(&table2, "key", "foo"); aws_hash_table_clean_up(&table2); ASSERT_KEY_VALUE(&table1, "key", "bar"); /* Swap is safe with freed/uninitialized tables */ aws_hash_table_swap(&table1, &table2); ASSERT_KEY_VALUE(&table2, "key", "bar"); memset(&table1, 0xDD, sizeof(table1)); aws_hash_table_swap(&table1, &table2); ASSERT_KEY_VALUE(&table1, "key", "bar"); /* Move is safe with freed/uninitialized destination */ aws_hash_table_move(&table2, &table1); ASSERT_KEY_VALUE(&table2, "key", "bar"); /* After move, source can be cleaned up as a no-op */ memcpy(&tmp, &table1, sizeof(table1)); aws_hash_table_clean_up(&table1); ASSERT_INT_EQUALS(0, memcmp(&tmp, &table1, sizeof(table1))); aws_hash_table_clean_up(&table2); return 0; } AWS_TEST_CASE(test_hash_table_string_clean_up, s_test_hash_table_string_clean_up_fn) static int s_test_hash_table_string_clean_up_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; /* Verify that clean up happens properly when a destructor function is used only on keys or only on values. */ struct aws_hash_table hash_table; struct aws_hash_element *pElem; int was_created; const struct aws_string *key_1 = aws_string_new_from_c_str(allocator, "Once upon a midnight dreary,"); AWS_STATIC_STRING_FROM_LITERAL(val_1, "while I pondered, weak and weary,"); const struct aws_string *key_2 = aws_string_new_from_c_str(allocator, "Over many a quaint and curious"); AWS_STATIC_STRING_FROM_LITERAL(val_2, "volume of forgotten lore--"); const struct aws_string *key_3 = aws_string_new_from_c_str(allocator, "While I nodded, nearly napping,"); AWS_STATIC_STRING_FROM_LITERAL(val_3, "suddenly there came a tapping,"); const struct aws_string *dyn_keys[] = {key_1, key_2, key_3}; const struct aws_string *static_vals[] = {val_1, val_2, val_3}; int ret = aws_hash_table_init( &hash_table, allocator, 10, aws_hash_string, aws_hash_callback_string_eq, aws_hash_callback_string_destroy, NULL); /* destroy keys not values */ ASSERT_SUCCESS(ret, "Hash Map init should have succeeded."); for (int idx = 0; idx < 3; ++idx) { ret = aws_hash_table_create(&hash_table, (void *)dyn_keys[idx], &pElem, &was_created); ASSERT_SUCCESS(ret, "Hash Map put should have succeeded."); ASSERT_INT_EQUALS(1, was_created, "Hash Map put should have created a new element."); pElem->value = (void *)static_vals[idx]; } aws_hash_table_clean_up(&hash_table); AWS_STATIC_STRING_FROM_LITERAL(key_4, "As of some one gently rapping,"); const struct aws_string *val_4 = aws_string_new_from_c_str(allocator, "rapping at my chamber door."); AWS_STATIC_STRING_FROM_LITERAL(key_5, "\"'Tis some visitor,\" I muttered,"); const struct aws_string *val_5 = aws_string_new_from_c_str(allocator, "\"tapping at my chamber door--"); AWS_STATIC_STRING_FROM_LITERAL(key_6, "Only this and nothing more.\""); const struct aws_string *val_6 = aws_string_new_from_c_str(allocator, "from The Raven by Edgar Allan Poe (1845)"); const struct aws_string *static_keys[] = {key_4, key_5, key_6}; const struct aws_string *dyn_vals[] = {val_4, val_5, val_6}; ret = aws_hash_table_init( &hash_table, allocator, 10, aws_hash_string, aws_hash_callback_string_eq, NULL, aws_hash_callback_string_destroy); /* destroy values not keys */ ASSERT_SUCCESS(ret, "Hash Map init should have succeeded."); for (int idx = 0; idx < 3; ++idx) { ret = aws_hash_table_create(&hash_table, (void *)static_keys[idx], &pElem, &was_created); ASSERT_SUCCESS(ret, "Hash Map put should have succeeded."); ASSERT_INT_EQUALS(1, was_created, "Hash Map put should have created a new element."); pElem->value = (void *)dyn_vals[idx]; } aws_hash_table_clean_up(&hash_table); return 0; } static uint64_t hash_collide(const void *a) { (void)a; return 4; } AWS_TEST_CASE(test_hash_table_hash_collision, s_test_hash_table_hash_collision_fn) static int s_test_hash_table_hash_collision_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_hash_table hash_table; struct aws_hash_element *pElem; int err_code = aws_hash_table_init(&hash_table, allocator, 10, hash_collide, aws_hash_callback_c_str_eq, NULL, NULL); ASSERT_SUCCESS(err_code, "Hash Map init should have succeeded."); err_code = aws_hash_table_create(&hash_table, (void *)TEST_STR_1, &pElem, NULL); ASSERT_SUCCESS(err_code, "Hash Map put should have succeeded."); pElem->value = (void *)TEST_VAL_STR_1; err_code = aws_hash_table_create(&hash_table, (void *)TEST_STR_2, &pElem, NULL); ASSERT_SUCCESS(err_code, "Hash Map put should have succeeded."); pElem->value = (void *)TEST_VAL_STR_2; err_code = aws_hash_table_find(&hash_table, (void *)TEST_STR_1, &pElem); ASSERT_SUCCESS(err_code, "Hash Map get should have succeeded."); ASSERT_STR_EQUALS( TEST_VAL_STR_1, pElem->value, "Returned value for %s, should have been %s", TEST_STR_1, TEST_VAL_STR_1); err_code = aws_hash_table_find(&hash_table, (void *)TEST_STR_2, &pElem); ASSERT_SUCCESS(err_code, "Hash Map get should have succeeded."); ASSERT_STR_EQUALS( TEST_VAL_STR_2, pElem->value, "Returned value for %s, should have been %s", TEST_STR_2, TEST_VAL_STR_2); aws_hash_table_clean_up(&hash_table); return 0; } AWS_TEST_CASE(test_hash_table_hash_overwrite, s_test_hash_table_hash_overwrite_fn) static int s_test_hash_table_hash_overwrite_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_hash_table hash_table; struct aws_hash_element *pElem; int err_code = aws_hash_table_init(&hash_table, allocator, 10, aws_hash_c_string, aws_hash_callback_c_str_eq, NULL, NULL); int was_created = 42; ASSERT_SUCCESS(err_code, "Hash Map init should have succeeded."); err_code = aws_hash_table_create(&hash_table, (void *)TEST_STR_1, &pElem, &was_created); //(void *)TEST_VAL_STR_1); ASSERT_SUCCESS(err_code, "Hash Map put should have succeeded."); ASSERT_INT_EQUALS(1, was_created, "Hash Map create should have created a new element."); pElem->value = (void *)TEST_VAL_STR_1; err_code = aws_hash_table_create(&hash_table, (void *)TEST_STR_1, &pElem, &was_created); ASSERT_SUCCESS(err_code, "Hash Map put should have succeeded."); ASSERT_INT_EQUALS(0, was_created, "Hash Map create should not have created a new element."); ASSERT_PTR_EQUALS(TEST_VAL_STR_1, pElem->value, "Create should have returned the old value."); pElem->value = (void *)TEST_VAL_STR_2; pElem = NULL; err_code = aws_hash_table_find(&hash_table, (void *)TEST_STR_1, &pElem); ASSERT_SUCCESS(err_code, "Hash Map get should have succeeded."); ASSERT_PTR_EQUALS(TEST_VAL_STR_2, pElem->value, "The new value should have been preserved on get"); aws_hash_table_clean_up(&hash_table); return 0; } static void *s_last_removed_key; static void *s_last_removed_value; static int s_key_removal_counter = 0; static int s_value_removal_counter = 0; static void s_destroy_key_fn(void *key) { s_last_removed_key = key; ++s_key_removal_counter; } static void s_destroy_value_fn(void *value) { s_last_removed_value = value; ++s_value_removal_counter; } static void s_reset_destroy_ck(void) { s_key_removal_counter = 0; s_value_removal_counter = 0; s_last_removed_key = NULL; s_last_removed_value = NULL; } AWS_TEST_CASE(test_hash_table_hash_remove, s_test_hash_table_hash_remove_fn) static int s_test_hash_table_hash_remove_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_hash_table hash_table; struct aws_hash_element *pElem, elem; int err_code = aws_hash_table_init( &hash_table, allocator, 10, aws_hash_c_string, aws_hash_callback_c_str_eq, s_destroy_key_fn, s_destroy_value_fn); int was_present = 42; s_reset_destroy_ck(); ASSERT_SUCCESS(err_code, "Hash Map init should have succeeded."); err_code = aws_hash_table_create(&hash_table, (void *)TEST_STR_1, NULL, NULL); ASSERT_SUCCESS(err_code, "Hash Map put should have succeeded."); err_code = aws_hash_table_create(&hash_table, (void *)TEST_STR_2, &pElem, NULL); ASSERT_SUCCESS(err_code, "Hash Map put should have succeeded."); pElem->value = (void *)TEST_VAL_STR_2; /* Create a second time; this should not invoke destroy */ err_code = aws_hash_table_create(&hash_table, (void *)TEST_STR_2, &pElem, NULL); ASSERT_SUCCESS(err_code, "Hash Map put should have succeeded."); ASSERT_INT_EQUALS(0, s_key_removal_counter, "No keys should be destroyed at this point"); ASSERT_INT_EQUALS(0, s_value_removal_counter, "No values should be destroyed at this point"); err_code = aws_hash_table_remove(&hash_table, (void *)TEST_STR_1, &elem, &was_present); ASSERT_SUCCESS(err_code, "Hash Map remove should have succeeded."); ASSERT_INT_EQUALS(0, s_key_removal_counter, "No keys should be destroyed at this point"); ASSERT_INT_EQUALS(0, s_value_removal_counter, "No values should be destroyed at this point"); ASSERT_INT_EQUALS(1, was_present, "Item should have been removed"); err_code = aws_hash_table_find(&hash_table, (void *)TEST_STR_1, &pElem); ASSERT_SUCCESS(err_code, "Find for nonexistent item should still succeed"); ASSERT_NULL(pElem, "Expected item to be nonexistent"); err_code = aws_hash_table_find(&hash_table, (void *)TEST_STR_2, &pElem); ASSERT_SUCCESS(err_code, "Hash Map get should have succeeded."); ASSERT_PTR_EQUALS(TEST_VAL_STR_2, pElem->value, "Wrong value returned from second get"); /* If we delete and discard the element, destroy_fn should be invoked */ err_code = aws_hash_table_remove(&hash_table, (void *)TEST_STR_2, NULL, NULL); ASSERT_SUCCESS(err_code, "Remove should have succeeded."); ASSERT_INT_EQUALS(1, s_key_removal_counter, "One key should be destroyed at this point"); ASSERT_INT_EQUALS(1, s_value_removal_counter, "One value should be destroyed at this point"); ASSERT_PTR_EQUALS(s_last_removed_value, TEST_VAL_STR_2, "Wrong element destroyed"); /* If we delete an element that's not there, we shouldn't invoke destroy_fn */ err_code = aws_hash_table_remove(&hash_table, (void *)TEST_STR_1, NULL, &was_present); ASSERT_SUCCESS(err_code, "Remove still should succeed on nonexistent items"); ASSERT_INT_EQUALS(0, was_present, "Remove should indicate item not present"); ASSERT_INT_EQUALS(1, s_key_removal_counter, "We shouldn't delete an item if none was found"); ASSERT_INT_EQUALS(1, s_value_removal_counter, "We shouldn't delete an item if none was found"); aws_hash_table_clean_up(&hash_table); return 0; } AWS_TEST_CASE(test_hash_table_hash_clear_allows_cleanup, s_test_hash_table_hash_clear_allows_cleanup_fn) static int s_test_hash_table_hash_clear_allows_cleanup_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_hash_table hash_table; int err_code = aws_hash_table_init( &hash_table, allocator, 10, aws_hash_c_string, aws_hash_callback_c_str_eq, s_destroy_key_fn, s_destroy_value_fn); ASSERT_SUCCESS(err_code, "Hash Map init should have succeeded."); s_reset_destroy_ck(); err_code = aws_hash_table_create(&hash_table, (void *)TEST_STR_1, NULL, NULL); ASSERT_SUCCESS(err_code, "Hash Map put should have succeeded."); err_code = aws_hash_table_create(&hash_table, (void *)TEST_STR_2, NULL, NULL); ASSERT_SUCCESS(err_code, "Hash Map put should have succeeded."); ASSERT_INT_EQUALS(2, aws_hash_table_get_entry_count(&hash_table)); aws_hash_table_clear(&hash_table); ASSERT_INT_EQUALS(2, s_key_removal_counter, "Clear should destroy all keys"); ASSERT_INT_EQUALS(2, s_value_removal_counter, "Clear should destroy all values"); ASSERT_INT_EQUALS(0, aws_hash_table_get_entry_count(&hash_table)); struct aws_hash_element *pElem; err_code = aws_hash_table_find(&hash_table, (void *)TEST_STR_1, &pElem); ASSERT_SUCCESS(err_code, "Find should still succeed after clear"); ASSERT_NULL(pElem, "Element should not be found"); s_reset_destroy_ck(); err_code = aws_hash_table_create(&hash_table, (void *)TEST_STR_1, NULL, NULL); ASSERT_SUCCESS(err_code, "Hash Map put should have succeeded."); err_code = aws_hash_table_create(&hash_table, (void *)TEST_STR_2, NULL, NULL); ASSERT_SUCCESS(err_code, "Hash Map put should have succeeded."); aws_hash_table_clean_up(&hash_table); ASSERT_INT_EQUALS(2, s_key_removal_counter, "Cleanup should destroy all keys"); ASSERT_INT_EQUALS(2, s_value_removal_counter, "Cleanup should destroy all values"); return 0; } AWS_TEST_CASE(test_hash_table_on_resize_returns_correct_entry, s_test_hash_table_on_resize_returns_correct_entry_fn) static int s_test_hash_table_on_resize_returns_correct_entry_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_hash_table hash_table; int err_code = aws_hash_table_init(&hash_table, allocator, 10, aws_hash_ptr, aws_ptr_eq, NULL, NULL); ASSERT_SUCCESS(err_code, "Hash Map init should have succeeded."); for (int i = 0; i < 20; i++) { struct aws_hash_element *pElem; int was_created; err_code = aws_hash_table_create(&hash_table, (void *)(intptr_t)i, &pElem, &was_created); ASSERT_SUCCESS(err_code, "Create should have succeeded"); ASSERT_INT_EQUALS(1, was_created, "Create should have created new element"); ASSERT_PTR_EQUALS(NULL, pElem->value, "New element should have null value"); pElem->value = &hash_table; } aws_hash_table_clean_up(&hash_table); return 0; } static int s_foreach_cb_tomask(void *context, struct aws_hash_element *p_element) { int *p_mask = context; uintptr_t index = (uintptr_t)p_element->key; *p_mask |= (1 << index); return AWS_COMMON_HASH_TABLE_ITER_CONTINUE; } static int s_foreach_cb_error_and_delete(void *context, struct aws_hash_element *p_element) { (void)context; (void)p_element; return AWS_COMMON_HASH_TABLE_ITER_ERROR | AWS_COMMON_HASH_TABLE_ITER_DELETE; } static int s_iter_count = 0; static int s_foreach_cb_deltarget(void *context, struct aws_hash_element *p_element) { void **pTarget = context; int rv = AWS_COMMON_HASH_TABLE_ITER_CONTINUE; if (p_element->key == *pTarget) { rv |= AWS_COMMON_HASH_TABLE_ITER_DELETE; } s_iter_count++; return rv; } static int s_foreach_cb_cutoff(void *context, struct aws_hash_element *p_element) { (void)p_element; int *p_remain = context; s_iter_count++; if (--*p_remain) { return AWS_COMMON_HASH_TABLE_ITER_CONTINUE; } return 0; } static int s_foreach_cb_cutoff_del(void *context, struct aws_hash_element *p_element) { int *p_remain = context; s_iter_count++; if (--*p_remain) { return AWS_COMMON_HASH_TABLE_ITER_CONTINUE; } *p_remain = (int)(intptr_t)p_element->key; return AWS_COMMON_HASH_TABLE_ITER_DELETE; } AWS_TEST_CASE(test_hash_table_foreach, s_test_hash_table_foreach_fn) static int s_test_hash_table_foreach_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_hash_table hash_table; ASSERT_SUCCESS( aws_hash_table_init(&hash_table, allocator, 10, aws_hash_ptr, aws_ptr_eq, NULL, NULL), "hash table init"); for (int i = 0; i < 8; i++) { struct aws_hash_element *pElem; ASSERT_SUCCESS(aws_hash_table_create(&hash_table, (void *)(intptr_t)i, &pElem, NULL), "insert element"); pElem->value = NULL; } // delete will not work as long as the error has set ASSERT_FAILS( aws_hash_table_foreach(&hash_table, s_foreach_cb_error_and_delete, NULL), "foreach error from callback"); // We should find all eight elements int mask = 0; ASSERT_SUCCESS(aws_hash_table_foreach(&hash_table, s_foreach_cb_tomask, &mask), "foreach invocation"); ASSERT_INT_EQUALS(0xff, mask, "bitmask"); void *target = (void *)(uintptr_t)3; s_iter_count = 0; ASSERT_SUCCESS(aws_hash_table_foreach(&hash_table, s_foreach_cb_deltarget, &target), "foreach invocation"); ASSERT_INT_EQUALS(8, s_iter_count, "iteration should not stop when deleting"); mask = 0; ASSERT_SUCCESS(aws_hash_table_foreach(&hash_table, s_foreach_cb_tomask, &mask), "foreach invocation"); ASSERT_INT_EQUALS(0xf7, mask, "element 3 deleted"); s_iter_count = 0; int remain = 4; ASSERT_SUCCESS(aws_hash_table_foreach(&hash_table, s_foreach_cb_cutoff, &remain), "foreach invocation"); ASSERT_INT_EQUALS(0, remain, "no more remaining iterations"); ASSERT_INT_EQUALS(4, s_iter_count, "correct iteration count"); s_iter_count = 0; remain = 4; ASSERT_SUCCESS(aws_hash_table_foreach(&hash_table, s_foreach_cb_cutoff_del, &remain), "foreach invocation"); ASSERT_INT_EQUALS(4, s_iter_count, "correct iteration count"); // we use remain as a side channel to report which element we deleted int expected_mask = 0xf7 & ~(1 << remain); mask = 0; ASSERT_SUCCESS(aws_hash_table_foreach(&hash_table, s_foreach_cb_tomask, &mask), "foreach invocation"); ASSERT_INT_EQUALS(expected_mask, mask, "stop element deleted"); aws_hash_table_clean_up(&hash_table); return 0; } /* * Convenience functions for a hash table which uses uint64_t as keys, and whose * hash function is just the identity function. */ static uint64_t s_hash_uint64_identity(const void *a) { return *(uint64_t *)a; } static bool s_hash_uint64_eq(const void *a, const void *b) { uint64_t my_a = *(uint64_t *)a; uint64_t my_b = *(uint64_t *)b; return my_a == my_b; } AWS_TEST_CASE(test_hash_table_iter, s_test_hash_table_iter_fn) static int s_test_hash_table_iter_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; /* Table entries are: (2^0 -> 2^10), (2^1 -> 2^11), (2^2 -> 2^12), ..., (2^9 -> 2^19). * We will iterate through the table and AND all the keys and all the values together * to ensure that we have hit every element of the table. */ uint64_t powers_of_2[20]; uint64_t x = 1; for (int i = 0; i < 20; ++i, x <<= 1) { powers_of_2[i] = x; } struct aws_hash_table map; ASSERT_SUCCESS( aws_hash_table_init(&map, allocator, 10, s_hash_uint64_identity, s_hash_uint64_eq, NULL, NULL), "hash table init"); struct aws_hash_element *elem; for (int i = 0; i < 10; ++i) { int ret = aws_hash_table_create(&map, (void *)(powers_of_2 + i), &elem, NULL); ASSERT_SUCCESS(ret, "Hash Map put should have succeeded."); elem->value = (void *)(powers_of_2 + 10 + i); } uint64_t keys_bitflags = 0; uint64_t values_bitflags = 0; int num_elements = 0; for (struct aws_hash_iter iter = aws_hash_iter_begin(&map); !aws_hash_iter_done(&iter); aws_hash_iter_next(&iter)) { uint64_t key = *(const uint64_t *)iter.element.key; uint64_t value = *(uint64_t *)iter.element.value; keys_bitflags |= key; values_bitflags |= value; ++num_elements; } ASSERT_INT_EQUALS(num_elements, 10); ASSERT_UINT_EQUALS(keys_bitflags, 0x3ffULL); // keys are bottom 10 bits ASSERT_UINT_EQUALS(values_bitflags, 0xffc00ULL); // values are next 10 bits aws_hash_table_clean_up(&map); return 0; } AWS_TEST_CASE(test_hash_table_empty_iter, s_test_hash_table_empty_iter_fn) static int s_test_hash_table_empty_iter_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_hash_table map; ASSERT_SUCCESS(aws_hash_table_init(&map, allocator, 10, s_hash_uint64_identity, s_hash_uint64_eq, NULL, NULL)); struct aws_hash_iter iter = aws_hash_iter_begin(&map); ASSERT_TRUE(aws_hash_iter_done(&iter)); aws_hash_iter_next(&iter); ASSERT_TRUE(aws_hash_iter_done(&iter)); aws_hash_table_clean_up(&map); return 0; } AWS_TEST_CASE(test_hash_table_iter_detail, s_test_hash_table_iter_detail) static int s_test_hash_table_iter_detail(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint64_t keys[32], vals[32]; for (uint64_t i = 0; i < 32; i++) { keys[i] = i; vals[i] = i + 100; } struct aws_hash_table map; ASSERT_SUCCESS(aws_hash_table_init( &map, allocator, 10, s_hash_uint64_identity, s_hash_uint64_eq, destroy_key_record, destroy_value_record)); /* * We'll fill hash table entries as follows: * Slot Value * 0 16 * 1 17 * 2 18 * 3 (empty) * 4 (empty) * 5 5 * 6 6 * 7 7 * 8 8 * 9 9 * 10 10 * 11 11 * 12 12 * 13 13 * 14 14 * 15 15 */ for (size_t i = 5; i <= 18; i++) { ASSERT_SUCCESS(aws_hash_table_put(&map, &keys[i], &vals[i], NULL)); } /* Verify that we have the correct set of values in the right order, first of all */ #define ASSERT_ORDER(iter, ...) \ do { \ uint64_t expected[] = {__VA_ARGS__}; \ size_t count = sizeof(expected) / sizeof(*expected); \ for (size_t i = 0; i < count; i++) { \ ASSERT_FALSE(aws_hash_iter_done(&(iter))); \ ASSERT_INT_EQUALS(expected[i], *(const uint64_t *)(iter).element.key); \ ASSERT_INT_EQUALS(expected[i] + 100, *(const uint64_t *)(iter).element.value); \ aws_hash_iter_next(&(iter)); \ } \ } while (0) struct aws_hash_iter iter = aws_hash_iter_begin(&map); ASSERT_ORDER(iter, 16, 17, 18, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); ASSERT_TRUE(aws_hash_iter_done(&(iter))); /* If we delete the very first slot, we expect that we'll see the remaining elements. */ iter = aws_hash_iter_begin(&map); last_key = last_value = NULL; aws_hash_iter_delete(&iter, true); aws_hash_iter_next(&iter); /* Since we passed true to delete, we should have destroyed the key and value */ ASSERT_PTR_EQUALS(&keys[16], last_key); ASSERT_PTR_EQUALS(&vals[16], last_value); ASSERT_ORDER(iter, 17, 18, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); /* * If we delete one of the later elements (in this case, 5), the deletion has to wrap * around the hash table. Verify that we don't see the element that wrapped around * (in this case 17) twice. */ iter = aws_hash_iter_begin(&map); last_key = last_value = NULL; aws_hash_iter_next(&iter); /* 17 => 18 */ aws_hash_iter_next(&iter); /* 18 => 5 */ aws_hash_iter_delete(&iter, false); ASSERT_NULL(last_key); ASSERT_NULL(last_value); aws_hash_iter_next(&iter); ASSERT_ORDER(iter, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); /* Now verify that we did in fact wrap the element around */ iter = aws_hash_iter_begin(&map); ASSERT_ORDER(iter, 17, 18, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); aws_hash_table_clean_up(&map); #undef ASSERT_ORDER return 0; } static uint64_t bad_hash_fn(const void *key) { (void)key; return 4; // chosen by fair dice roll // guaranteed to be random } static bool everything_is_eq(const void *a, const void *b) { (void)a; (void)b; return true; } AWS_TEST_CASE(test_hash_table_eq, s_test_hash_table_eq) static int s_test_hash_table_eq(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_hash_table table_a, table_b; ASSERT_SUCCESS( aws_hash_table_init(&table_a, allocator, 16, aws_hash_string, aws_hash_callback_string_eq, NULL, NULL)); ASSERT_SUCCESS(aws_hash_table_init(&table_b, allocator, 16, bad_hash_fn, aws_hash_callback_string_eq, NULL, NULL)); AWS_STATIC_STRING_FROM_LITERAL(foo_a, "foo"); AWS_STATIC_STRING_FROM_LITERAL(foo_b, "foo"); AWS_STATIC_STRING_FROM_LITERAL(bar_a, "bar"); AWS_STATIC_STRING_FROM_LITERAL(bar_b, "bar"); AWS_STATIC_STRING_FROM_LITERAL(quux_a, "quux"); AWS_STATIC_STRING_FROM_LITERAL(quux_b, "quux"); ASSERT_SUCCESS(aws_hash_table_put(&table_a, foo_a, (void *)bar_a, NULL)); ASSERT_SUCCESS(aws_hash_table_put(&table_b, foo_b, (void *)bar_b, NULL)); ASSERT_SUCCESS(aws_hash_table_put(&table_a, bar_a, (void *)quux_a, NULL)); ASSERT_SUCCESS(aws_hash_table_put(&table_b, bar_a, (void *)quux_a, NULL)); ASSERT_TRUE(aws_hash_table_eq(&table_a, &table_b, aws_hash_callback_string_eq)); ASSERT_TRUE(aws_hash_table_eq(&table_a, &table_b, everything_is_eq)); ASSERT_FALSE(aws_hash_table_eq(&table_a, &table_b, aws_ptr_eq)); /* Non-equal: Table B has extra members */ ASSERT_SUCCESS(aws_hash_table_put(&table_b, quux_a, (void *)quux_b, NULL)); ASSERT_FALSE(aws_hash_table_eq(&table_a, &table_b, aws_hash_callback_string_eq)); ASSERT_FALSE(aws_hash_table_eq(&table_a, &table_b, everything_is_eq)); ASSERT_FALSE(aws_hash_table_eq(&table_a, &table_b, aws_ptr_eq)); /* Non-equal: Same number of members, but different keys */ ASSERT_SUCCESS(aws_hash_table_remove(&table_b, bar_a, NULL, NULL)); ASSERT_FALSE(aws_hash_table_eq(&table_a, &table_b, aws_hash_callback_string_eq)); ASSERT_FALSE(aws_hash_table_eq(&table_a, &table_b, everything_is_eq)); ASSERT_FALSE(aws_hash_table_eq(&table_a, &table_b, aws_ptr_eq)); /* Non-equal: Same keys, values differ */ ASSERT_SUCCESS(aws_hash_table_remove(&table_b, quux_a, NULL, NULL)); ASSERT_SUCCESS(aws_hash_table_put(&table_b, bar_a, (void *)foo_b, NULL)); ASSERT_FALSE(aws_hash_table_eq(&table_a, &table_b, aws_hash_callback_string_eq)); ASSERT_TRUE(aws_hash_table_eq(&table_a, &table_b, everything_is_eq)); ASSERT_FALSE(aws_hash_table_eq(&table_a, &table_b, aws_ptr_eq)); aws_hash_table_clean_up(&table_b); aws_hash_table_clean_up(&table_a); return 0; } struct churn_entry { void *key; int original_index; void *value; int is_removed; }; static int s_qsort_churn_entry(const void *a, const void *b) { const struct churn_entry *const *p1 = a, *const *p2 = b; const struct churn_entry *e1 = *p1, *e2 = *p2; if (e1->key < e2->key) { return -1; } if (e1->key > e2->key) { return 1; } if (e1->original_index < e2->original_index) { return -1; } if (e1->original_index > e2->original_index) { return 1; } return 0; } static long s_timestamp(void) { uint64_t time = 0; aws_sys_clock_get_ticks(&time); return (long)(time / 1000); } AWS_TEST_CASE(test_hash_churn, s_test_hash_churn_fn) static int s_test_hash_churn_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; int i = 0; struct aws_hash_table hash_table; int nentries = 2 * 512 * 1024; int err_code = aws_hash_table_init(&hash_table, allocator, nentries, aws_hash_ptr, aws_ptr_eq, NULL, NULL); if (AWS_ERROR_SUCCESS != err_code) { FAIL("hash table creation failed: %d", err_code); } /* Probability that we deliberately try to overwrite. Note that random collisions can occur, and are not explicitly avoided. */ double pOverwrite = 0.05; double pDelete = 0.05; struct churn_entry *entries = calloc(sizeof(*entries), nentries); struct churn_entry **permuted = calloc(sizeof(*permuted), nentries); for (i = 0; i < nentries; i++) { struct churn_entry *e = &entries[i]; permuted[i] = e; e->original_index = i; int mode = 0; /* 0 = new entry, 1 = overwrite, 2 = delete */ if (i != 0) { double p = (double)rand(); if (p < pOverwrite) { mode = 1; } else if (p < pOverwrite + pDelete) { mode = 2; } } e->is_removed = 0; if (mode == 0) { e->key = (void *)(uintptr_t)rand(); e->value = (void *)(uintptr_t)rand(); } else if (mode == 1) { e->key = entries[(size_t)rand() % i].key; /* not evenly distributed but close enough */ e->value = (void *)(uintptr_t)rand(); } else if (mode == 2) { e->key = entries[(size_t)rand() % i].key; /* not evenly distributed but close enough */ e->value = 0; e->is_removed = 1; } } qsort(permuted, nentries, sizeof(*permuted), s_qsort_churn_entry); long start = s_timestamp(); for (i = 0; i < nentries; i++) { if (!(i % 100000)) { printf("Put progress: %d/%d\n", i, nentries); } struct churn_entry *e = &entries[i]; if (e->is_removed) { int was_present; err_code = aws_hash_table_remove(&hash_table, e->key, NULL, &was_present); ASSERT_SUCCESS(err_code, "Unexpected failure removing element"); if (i == 0 && entries[i - 1].key == e->key && entries[i - 1].is_removed) { ASSERT_INT_EQUALS(0, was_present, "Expected item to be missing"); } else { ASSERT_INT_EQUALS(1, was_present, "Expected item to be present"); } } else { struct aws_hash_element *pElem; int was_created; err_code = aws_hash_table_create(&hash_table, e->key, &pElem, &was_created); ASSERT_SUCCESS(err_code, "Unexpected failure adding element"); pElem->value = e->value; } } for (i = 0; i < nentries; i++) { if (!(i % 100000)) { printf("Check progress: %d/%d\n", i, nentries); } struct churn_entry *e = permuted[i]; if (i < nentries - 1 && permuted[i + 1]->key == e->key) { // overwritten on subsequent step continue; } struct aws_hash_element *pElem; aws_hash_table_find(&hash_table, e->key, &pElem); if (e->is_removed) { ASSERT_NULL(pElem, "expected item to be deleted"); } else { ASSERT_NOT_NULL(pElem, "expected item to be present"); ASSERT_PTR_EQUALS(e->value, pElem->value, "wrong value for item"); } } aws_hash_table_clean_up(&hash_table); long end = s_timestamp(); free(entries); free(permuted); printf("elapsed=%ld us\n", end - start); return 0; } AWS_TEST_CASE(test_hash_table_cleanup_idempotent, s_test_hash_table_cleanup_idempotent_fn) static int s_test_hash_table_cleanup_idempotent_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_hash_table hash_table; ASSERT_SUCCESS( aws_hash_table_init(&hash_table, allocator, 10, aws_hash_c_string, aws_hash_callback_c_str_eq, NULL, NULL)); aws_hash_table_clean_up(&hash_table); aws_hash_table_clean_up(&hash_table); return 0; } struct hash_table_entry { struct aws_allocator *allocator; struct aws_byte_cursor key; }; static void s_hash_table_entry_destroy(void *item) { struct hash_table_entry *entry = item; aws_mem_release(entry->allocator, entry); } AWS_TEST_CASE(test_hash_table_byte_cursor_create_find, s_test_hash_table_byte_cursor_create_find_fn) static int s_test_hash_table_byte_cursor_create_find_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_hash_table hash_table; struct aws_hash_element *pElem; int was_created; int ret = aws_hash_table_init( &hash_table, allocator, 10, aws_hash_byte_cursor_ptr, (aws_hash_callback_eq_fn *)aws_byte_cursor_eq, NULL, s_hash_table_entry_destroy); ASSERT_SUCCESS(ret, "Hash Map init should have succeeded."); /* First element of hash, both key and value are statically allocated * strings */ AWS_STATIC_STRING_FROM_LITERAL(key_1_str, "tweedle dee"); struct hash_table_entry *val_1 = aws_mem_acquire(allocator, sizeof(struct hash_table_entry)); val_1->allocator = allocator; val_1->key = aws_byte_cursor_from_string(key_1_str); /* Second element of hash, only value is dynamically allocated string */ AWS_STATIC_STRING_FROM_LITERAL(key_2_str, "what's for dinner?"); struct hash_table_entry *val_2 = aws_mem_acquire(allocator, sizeof(struct hash_table_entry)); val_2->allocator = allocator; val_2->key = aws_byte_cursor_from_string(key_2_str); /* Third element of hash, only key is dynamically allocated string */ uint8_t bytes[] = {0x88, 0x00, 0xaa, 0x13, 0xb7, 0x93, 0x7f, 0xdd, 0xbb, 0x62}; struct aws_string *key_3_str = aws_string_new_from_array(allocator, bytes, 10); struct hash_table_entry *val_3 = aws_mem_acquire(allocator, sizeof(struct hash_table_entry)); val_3->allocator = allocator; val_3->key = aws_byte_cursor_from_string(key_3_str); ret = aws_hash_table_create(&hash_table, (void *)&val_1->key, &pElem, &was_created); ASSERT_SUCCESS(ret, "Hash Map put should have succeeded."); ASSERT_INT_EQUALS(1, was_created, "Hash Map put should have created a new element."); pElem->value = (void *)val_1; /* Try passing a NULL was_created this time */ ret = aws_hash_table_create(&hash_table, (void *)&val_2->key, &pElem, NULL); ASSERT_SUCCESS(ret, "Hash Map put should have succeeded."); pElem->value = (void *)val_2; ret = aws_hash_table_create(&hash_table, (void *)&val_3->key, &pElem, NULL); ASSERT_SUCCESS(ret, "Hash Map put should have succeeded."); pElem->value = (void *)val_3; ret = aws_hash_table_find(&hash_table, (void *)&val_1->key, &pElem); ASSERT_SUCCESS(ret, "Hash Map get should have succeeded."); ASSERT_BIN_ARRAYS_EQUALS( "tweedle dee", strlen("tweedle dee"), ((struct aws_byte_cursor *)pElem->key)->ptr, ((struct aws_byte_cursor *)pElem->key)->len, "Returned key for %s, should have been %s", "tweedle dee", "tweedle dee"); ASSERT_PTR_EQUALS(val_1, pElem->value); ret = aws_hash_table_find(&hash_table, (void *)&val_2->key, &pElem); ASSERT_SUCCESS(ret, "Hash Map get should have succeeded."); ASSERT_BIN_ARRAYS_EQUALS( "what's for dinner?", strlen("what's for dinner?"), ((struct aws_byte_cursor *)pElem->key)->ptr, ((struct aws_byte_cursor *)pElem->key)->len, "Returned key for %s, should have been %s", "what's for dinner?", "what's for dinner?"); ASSERT_PTR_EQUALS(val_2, pElem->value); ret = aws_hash_table_find(&hash_table, (void *)&val_3->key, &pElem); ASSERT_SUCCESS(ret, "Hash Map get should have succeeded."); ASSERT_BIN_ARRAYS_EQUALS( bytes, 10, ((struct aws_byte_cursor *)pElem->key)->ptr, ((struct aws_byte_cursor *)pElem->key)->len, "Returned key for %02hhx%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx should have been same", bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], bytes[5], bytes[6], bytes[7], bytes[8], bytes[9]); ASSERT_PTR_EQUALS(val_3, pElem->value); aws_hash_table_clean_up(&hash_table); aws_string_destroy(key_3_str); return 0; } AWS_TEST_CASE(test_hash_combine, s_test_hash_combine_fn) static int s_test_hash_combine_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; /* We're assuming that the underlying hashing function works well. * This test just makes sure we hooked it up right for 2 64bit values */ uint64_t a = 0x123456789abcdef; uint64_t b = 0xfedcba987654321; uint64_t c = aws_hash_combine(a, b); /* Sanity check */ ASSERT_TRUE(c != a); ASSERT_TRUE(c != b); /* Same inputs gets same results, right? */ ASSERT_UINT_EQUALS(c, aws_hash_combine(a, b)); /* Result spread across all bytes, right? */ uint8_t *c_bytes = (uint8_t *)&c; for (size_t i = 0; i < sizeof(c); ++i) { ASSERT_TRUE(c_bytes[i] != 0); } /* Hash should NOT be commutative */ ASSERT_TRUE(aws_hash_combine(a, b) != aws_hash_combine(b, a)); return 0; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/json_test.c000066400000000000000000000253651456575232400240510ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include static char *s_test_json = "{\"array\":[1,2,3],\"boolean\":true,\"color\":\"gold\",\"null\":null,\"number\":123," "\"object\":{\"a\":\"b\",\"c\":\"d\"}}"; struct json_parse_test_data { int elements_encountered; bool all_elements_are_strings; bool all_elements_are_numbers; }; static int s_on_obj_member( const struct aws_byte_cursor *key, const struct aws_json_value *value, bool *out_should_continue, void *user_data) { (void)key; (void)out_should_continue; struct json_parse_test_data *data = user_data; ++(data->elements_encountered); data->all_elements_are_strings &= aws_json_value_is_string(value); data->all_elements_are_numbers &= aws_json_value_is_number(value); return AWS_OP_SUCCESS; } static int s_on_array_value( size_t index, const struct aws_json_value *value, bool *out_should_continue, void *user_data) { (void)index; (void)out_should_continue; struct json_parse_test_data *data = user_data; ++(data->elements_encountered); data->all_elements_are_strings &= aws_json_value_is_string(value); data->all_elements_are_numbers &= aws_json_value_is_number(value); return AWS_OP_SUCCESS; } static int s_on_obj_member_error( const struct aws_byte_cursor *key, const struct aws_json_value *value, bool *out_should_continue, void *user_data) { (void)key; (void)value; (void)out_should_continue; (void)user_data; return AWS_OP_ERR; } static int s_on_array_value_error( size_t index, const struct aws_json_value *value, bool *out_should_continue, void *user_data) { (void)index; (void)value; (void)out_should_continue; (void)user_data; return AWS_OP_ERR; } static int s_on_obj_member_early( const struct aws_byte_cursor *key, const struct aws_json_value *value, bool *out_should_continue, void *user_data) { (void)key; (void)value; (void)user_data; *out_should_continue = false; return AWS_OP_SUCCESS; } static int s_on_array_value_early( size_t index, const struct aws_json_value *value, bool *out_should_continue, void *user_data) { (void)index; (void)value; (void)user_data; *out_should_continue = false; return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_json_parse_from_string, s_test_json_parse_from_string) static int s_test_json_parse_from_string(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_common_library_init(allocator); struct aws_json_value *root = aws_json_value_new_from_string(allocator, aws_byte_cursor_from_c_str(s_test_json)); ASSERT_NOT_NULL(root); ASSERT_TRUE(aws_json_value_is_object(root)); ASSERT_TRUE(aws_json_value_compare(root, root, true)); ASSERT_TRUE(aws_json_value_compare(root, root, false)); struct aws_json_value *temp = aws_json_value_new_null(allocator); ASSERT_FALSE(aws_json_value_compare(root, temp, true)); aws_json_value_destroy(temp); // Testing valid array struct aws_json_value *array_node = aws_json_value_get_from_object(root, aws_byte_cursor_from_c_str("array")); ASSERT_NOT_NULL(array_node); ASSERT_TRUE(aws_json_value_is_array(array_node)); ASSERT_TRUE(aws_json_get_array_size(array_node) == 3); struct aws_json_value *array_node_one = aws_json_get_array_element(array_node, 0); ASSERT_NOT_NULL(array_node_one); ASSERT_TRUE(aws_json_value_is_number(array_node_one)); double double_check_value = 0; ASSERT_INT_EQUALS(AWS_OP_SUCCESS, aws_json_value_get_number(array_node_one, &double_check_value)); ASSERT_NOT_NULL(double_check_value); ASSERT_TRUE(double_check_value == (double)1); struct json_parse_test_data array_test_data; array_test_data.elements_encountered = 0; array_test_data.all_elements_are_strings = true; array_test_data.all_elements_are_numbers = true; ASSERT_INT_EQUALS(AWS_OP_SUCCESS, aws_json_const_iterate_array(array_node, s_on_array_value, &array_test_data)); ASSERT_INT_EQUALS(array_test_data.elements_encountered, 3); ASSERT_FALSE(array_test_data.all_elements_are_strings); ASSERT_TRUE(array_test_data.all_elements_are_numbers); ASSERT_INT_EQUALS(AWS_OP_ERR, aws_json_const_iterate_array(array_node, s_on_array_value_error, &array_test_data)); array_test_data.elements_encountered = 0; ASSERT_INT_EQUALS( AWS_OP_SUCCESS, aws_json_const_iterate_array(array_node, s_on_array_value_early, &array_test_data)); ASSERT_INT_EQUALS(array_test_data.elements_encountered, 0); // Testing valid boolean struct aws_json_value *boolean_node = aws_json_value_get_from_object(root, aws_byte_cursor_from_c_str("boolean")); ASSERT_NOT_NULL(boolean_node); ASSERT_TRUE(aws_json_value_is_boolean(boolean_node)); bool bool_check_value = false; aws_json_value_get_boolean(boolean_node, &bool_check_value); ASSERT_TRUE(bool_check_value); ASSERT_INT_EQUALS(AWS_OP_ERR, aws_json_const_iterate_object(boolean_node, s_on_obj_member, NULL)); ASSERT_INT_EQUALS(AWS_OP_ERR, aws_json_const_iterate_array(boolean_node, s_on_array_value, NULL)); // Testing valid string struct aws_json_value *string_node = aws_json_value_get_from_object(root, aws_byte_cursor_from_c_str("color")); ASSERT_NOT_NULL(string_node); ASSERT_TRUE(aws_json_value_is_string(string_node)); struct aws_byte_cursor str_string_check_value; aws_json_value_get_string(string_node, &str_string_check_value); struct aws_string *tmp_str = aws_string_new_from_cursor(allocator, &str_string_check_value); ASSERT_TRUE(strcmp(aws_string_c_str(tmp_str), "gold") == 0); aws_string_destroy_secure(tmp_str); // Testing valid number struct aws_json_value *number_node = aws_json_value_get_from_object(root, aws_byte_cursor_from_c_str("number")); ASSERT_NOT_NULL(number_node); ASSERT_TRUE(aws_json_value_is_number(number_node)); double double_test_two = 0; aws_json_value_get_number(number_node, &double_test_two); ASSERT_TRUE(double_test_two == (double)123); // Testing valid object struct aws_json_value *object_node = aws_json_value_get_from_object(root, aws_byte_cursor_from_c_str("object")); ASSERT_NOT_NULL(object_node); ASSERT_TRUE(aws_json_value_is_object(object_node)); struct aws_json_value *sub_object_node = aws_json_value_get_from_object(object_node, aws_byte_cursor_from_c_str("a")); ASSERT_NOT_NULL(sub_object_node); ASSERT_TRUE(aws_json_value_is_string(sub_object_node)); struct aws_byte_cursor str_a_value_cursor; aws_json_value_get_string(sub_object_node, &str_a_value_cursor); struct aws_string *sub_a_string = aws_string_new_from_cursor(allocator, &str_a_value_cursor); ASSERT_TRUE(strcmp(aws_string_c_str(sub_a_string), "b") == 0); aws_string_destroy_secure(sub_a_string); struct aws_json_value *duplicate = aws_json_value_duplicate(object_node); ASSERT_TRUE(aws_json_value_compare(object_node, duplicate, true)); aws_json_value_destroy(duplicate); struct json_parse_test_data test_data; test_data.elements_encountered = 0; test_data.all_elements_are_strings = true; test_data.all_elements_are_numbers = true; ASSERT_INT_EQUALS(AWS_OP_SUCCESS, aws_json_const_iterate_object(object_node, s_on_obj_member, &test_data)); ASSERT_INT_EQUALS(test_data.elements_encountered, 2); ASSERT_TRUE(test_data.all_elements_are_strings); ASSERT_FALSE(test_data.all_elements_are_numbers); ASSERT_INT_EQUALS(AWS_OP_ERR, aws_json_const_iterate_object(object_node, s_on_obj_member_error, &test_data)); test_data.elements_encountered = 0; ASSERT_INT_EQUALS(AWS_OP_SUCCESS, aws_json_const_iterate_object(object_node, s_on_obj_member_early, &test_data)); ASSERT_INT_EQUALS(test_data.elements_encountered, 0); // Testing invalid object struct aws_json_value *invalid_object = aws_json_value_get_from_object(root, aws_byte_cursor_from_c_str("invalid")); ASSERT_NULL(invalid_object); ASSERT_INT_EQUALS(aws_json_value_get_number(invalid_object, NULL), AWS_OP_ERR); // Test getting invalid type of data ASSERT_INT_EQUALS(aws_json_value_get_number(string_node, NULL), AWS_OP_ERR); aws_json_value_destroy(root); // Make sure that destroying NULL does not have any bad effects. aws_json_value_destroy(NULL); aws_common_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_json_parse_to_string, s_test_json_parse_to_string) static int s_test_json_parse_to_string(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_common_library_init(allocator); struct aws_json_value *root = aws_json_value_new_object(allocator); struct aws_json_value *array = aws_json_value_new_array(allocator); aws_json_value_add_array_element(array, aws_json_value_new_number(allocator, 1)); aws_json_value_add_array_element(array, aws_json_value_new_number(allocator, 2)); aws_json_value_add_array_element(array, aws_json_value_new_number(allocator, 3)); aws_json_value_add_to_object(root, aws_byte_cursor_from_c_str("array"), array); aws_json_value_add_to_object( root, aws_byte_cursor_from_c_str("boolean"), aws_json_value_new_boolean(allocator, true)); aws_json_value_add_to_object( root, aws_byte_cursor_from_c_str("color"), aws_json_value_new_string(allocator, aws_byte_cursor_from_c_str("gold"))); aws_json_value_add_to_object(root, aws_byte_cursor_from_c_str("null"), aws_json_value_new_null(allocator)); aws_json_value_add_to_object(root, aws_byte_cursor_from_c_str("number"), aws_json_value_new_number(allocator, 123)); struct aws_json_value *object = aws_json_value_new_object(allocator); aws_json_value_add_to_object( object, aws_byte_cursor_from_c_str("a"), aws_json_value_new_string(allocator, aws_byte_cursor_from_c_str("b"))); aws_json_value_add_to_object( object, aws_byte_cursor_from_c_str("c"), aws_json_value_new_string(allocator, aws_byte_cursor_from_c_str("d"))); aws_json_value_add_to_object(root, aws_byte_cursor_from_c_str("object"), object); struct aws_byte_buf result_string_buf; aws_byte_buf_init(&result_string_buf, allocator, 0); ASSERT_INT_EQUALS(AWS_OP_SUCCESS, aws_byte_buf_append_json_string(root, &result_string_buf)); struct aws_string *result_string = aws_string_new_from_buf(allocator, &result_string_buf); ASSERT_STR_EQUALS(s_test_json, aws_string_c_str(result_string)); aws_byte_buf_clean_up_secure(&result_string_buf); aws_string_destroy_secure(result_string); aws_json_value_destroy(root); aws_common_library_clean_up(); return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/linked_hash_table_test.c000066400000000000000000000250521456575232400265110ustar00rootroot00000000000000/* * Copyright 2010-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include #include #include static int s_test_linked_hash_table_preserves_insertion_order_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_linked_hash_table table; ASSERT_SUCCESS( aws_linked_hash_table_init(&table, allocator, aws_hash_c_string, aws_hash_callback_c_str_eq, NULL, NULL, 3)); const char *first_key = "first"; const char *second_key = "second"; const char *third_key = "third"; const char *fourth_key = "fourth"; int first = 1; int second = 2; int third = 3; int fourth = 4; ASSERT_SUCCESS(aws_linked_hash_table_put(&table, first_key, &first)); ASSERT_SUCCESS(aws_linked_hash_table_put(&table, second_key, &second)); ASSERT_SUCCESS(aws_linked_hash_table_put(&table, third_key, &third)); ASSERT_SUCCESS(aws_linked_hash_table_put(&table, fourth_key, &fourth)); ASSERT_INT_EQUALS(4, aws_linked_hash_table_get_element_count(&table)); int *value = NULL; ASSERT_SUCCESS(aws_linked_hash_table_find(&table, first_key, (void **)&value)); ASSERT_NOT_NULL(value); ASSERT_INT_EQUALS(first, *value); ASSERT_SUCCESS(aws_linked_hash_table_find(&table, second_key, (void **)&value)); ASSERT_NOT_NULL(value); ASSERT_INT_EQUALS(second, *value); ASSERT_SUCCESS(aws_linked_hash_table_find(&table, third_key, (void **)&value)); ASSERT_NOT_NULL(value); ASSERT_INT_EQUALS(third, *value); ASSERT_SUCCESS(aws_linked_hash_table_find(&table, fourth_key, (void **)&value)); ASSERT_NOT_NULL(value); ASSERT_INT_EQUALS(fourth, *value); const struct aws_linked_list *list = aws_linked_hash_table_get_iteration_list(&table); ASSERT_NOT_NULL(list); struct aws_linked_list_node *node = aws_linked_list_front(list); struct aws_linked_hash_table_node *table_node = AWS_CONTAINER_OF(node, struct aws_linked_hash_table_node, node); ASSERT_INT_EQUALS(first, *(int *)table_node->value); node = aws_linked_list_next(node); ASSERT_NOT_NULL(node); table_node = AWS_CONTAINER_OF(node, struct aws_linked_hash_table_node, node); ASSERT_INT_EQUALS(second, *(int *)table_node->value); node = aws_linked_list_next(node); ASSERT_NOT_NULL(node); table_node = AWS_CONTAINER_OF(node, struct aws_linked_hash_table_node, node); ASSERT_INT_EQUALS(third, *(int *)table_node->value); node = aws_linked_list_next(node); ASSERT_NOT_NULL(node); table_node = AWS_CONTAINER_OF(node, struct aws_linked_hash_table_node, node); ASSERT_INT_EQUALS(fourth, *(int *)table_node->value); node = aws_linked_list_next(node); ASSERT_PTR_EQUALS(aws_linked_list_end(list), node); aws_linked_hash_table_clean_up(&table); return 0; } AWS_TEST_CASE(test_linked_hash_table_preserves_insertion_order, s_test_linked_hash_table_preserves_insertion_order_fn) struct linked_hash_table_test_value_element { bool value_removed; }; static void s_linked_hash_table_element_value_destroy(void *value) { struct linked_hash_table_test_value_element *value_element = value; value_element->value_removed = true; } static int s_test_linked_hash_table_entries_cleanup_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_linked_hash_table table; ASSERT_SUCCESS(aws_linked_hash_table_init( &table, allocator, aws_hash_c_string, aws_hash_callback_c_str_eq, NULL, s_linked_hash_table_element_value_destroy, 2)); const char *first_key = "first"; const char *second_key = "second"; struct linked_hash_table_test_value_element first = {.value_removed = false}; struct linked_hash_table_test_value_element second = {.value_removed = false}; ASSERT_SUCCESS(aws_linked_hash_table_put(&table, first_key, &first)); ASSERT_SUCCESS(aws_linked_hash_table_put(&table, second_key, &second)); ASSERT_INT_EQUALS(2, aws_linked_hash_table_get_element_count(&table)); ASSERT_SUCCESS(aws_linked_hash_table_remove(&table, second_key)); ASSERT_TRUE(second.value_removed); ASSERT_INT_EQUALS(1, aws_linked_hash_table_get_element_count(&table)); aws_linked_hash_table_clear(&table); ASSERT_INT_EQUALS(0, aws_linked_hash_table_get_element_count(&table)); ASSERT_TRUE(first.value_removed); ASSERT_TRUE(aws_linked_list_empty(aws_linked_hash_table_get_iteration_list(&table))); aws_linked_hash_table_clean_up(&table); return 0; } AWS_TEST_CASE(test_linked_hash_table_entries_cleanup, s_test_linked_hash_table_entries_cleanup_fn) static int s_test_linked_hash_table_entries_overwrite_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_linked_hash_table table; ASSERT_SUCCESS(aws_linked_hash_table_init( &table, allocator, aws_hash_c_string, aws_hash_callback_c_str_eq, NULL, s_linked_hash_table_element_value_destroy, 2)); const char *first_key = "first"; struct linked_hash_table_test_value_element first = {.value_removed = false}; struct linked_hash_table_test_value_element second = {.value_removed = false}; ASSERT_SUCCESS(aws_linked_hash_table_put(&table, first_key, &first)); ASSERT_SUCCESS(aws_linked_hash_table_put(&table, first_key, &second)); ASSERT_INT_EQUALS(1, aws_linked_hash_table_get_element_count(&table)); ASSERT_TRUE(first.value_removed); ASSERT_FALSE(second.value_removed); struct linked_hash_table_test_value_element *value = NULL; ASSERT_SUCCESS(aws_linked_hash_table_find(&table, first_key, (void **)&value)); ASSERT_NOT_NULL(value); ASSERT_PTR_EQUALS(&second, value); aws_linked_hash_table_clean_up(&table); return 0; } AWS_TEST_CASE(test_linked_hash_table_entries_overwrite, s_test_linked_hash_table_entries_overwrite_fn) static int s_test_linked_hash_table_entries_overwrite_reference_unequal_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_linked_hash_table table; ASSERT_SUCCESS(aws_linked_hash_table_init( &table, allocator, aws_hash_string, aws_hash_callback_string_eq, aws_hash_callback_string_destroy, s_linked_hash_table_element_value_destroy, 2)); struct aws_string *first_key = aws_string_new_from_c_str(allocator, "first"); struct aws_string *first_key_v2 = aws_string_new_from_c_str(allocator, "first"); struct linked_hash_table_test_value_element first = {.value_removed = false}; struct linked_hash_table_test_value_element second = {.value_removed = false}; ASSERT_SUCCESS(aws_linked_hash_table_put(&table, first_key, &first)); ASSERT_SUCCESS(aws_linked_hash_table_put(&table, first_key_v2, &second)); ASSERT_INT_EQUALS(1, aws_linked_hash_table_get_element_count(&table)); ASSERT_TRUE(first.value_removed); ASSERT_FALSE(second.value_removed); struct linked_hash_table_test_value_element *value = NULL; struct aws_string *first_key_v3 = aws_string_new_from_c_str(allocator, "first"); ASSERT_SUCCESS(aws_linked_hash_table_find(&table, first_key_v3, (void **)&value)); ASSERT_NOT_NULL(value); ASSERT_PTR_EQUALS(&second, value); aws_linked_hash_table_clean_up(&table); aws_string_destroy(first_key_v3); return 0; } AWS_TEST_CASE( test_linked_hash_table_entries_overwrite_reference_unequal, s_test_linked_hash_table_entries_overwrite_reference_unequal_fn) struct backed_cursor_element { struct aws_byte_cursor name_cursor; struct aws_byte_buf name; uint32_t value; struct aws_allocator *allocator; }; static void s_backed_cursor_element_value_destroy(void *value) { struct backed_cursor_element *element = value; if (element == NULL) { return; } aws_byte_buf_clean_up(&element->name); aws_mem_release(element->allocator, element); } static struct backed_cursor_element *s_backed_cursor_element_new( struct aws_allocator *allocator, struct aws_byte_cursor name, uint32_t value) { struct backed_cursor_element *element = aws_mem_calloc(allocator, 1, sizeof(struct backed_cursor_element)); element->allocator = allocator; element->value = value; if (aws_byte_buf_init_copy_from_cursor(&element->name, allocator, name)) { goto on_error; } element->name_cursor = aws_byte_cursor_from_buf(&element->name); return element; on_error: s_backed_cursor_element_value_destroy(element); return NULL; } static bool s_cursor_hash_equality_fn(const void *a, const void *b) { const struct aws_byte_cursor *a_cursor = a; const struct aws_byte_cursor *b_cursor = b; return aws_byte_cursor_eq(a_cursor, b_cursor); } static int s_test_linked_hash_table_entries_overwrite_backed_cursor_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_linked_hash_table table; ASSERT_SUCCESS(aws_linked_hash_table_init( &table, allocator, aws_hash_byte_cursor_ptr, s_cursor_hash_equality_fn, NULL, s_backed_cursor_element_value_destroy, 2)); struct aws_byte_cursor shared_name = aws_byte_cursor_from_c_str("Hello"); struct backed_cursor_element *element1 = s_backed_cursor_element_new(allocator, shared_name, 1); ASSERT_SUCCESS(aws_linked_hash_table_put(&table, &element1->name_cursor, element1)); struct backed_cursor_element *element1_prime = s_backed_cursor_element_new(allocator, shared_name, 2); ASSERT_SUCCESS(aws_linked_hash_table_put(&table, &element1_prime->name_cursor, element1_prime)); ASSERT_INT_EQUALS(1, aws_linked_hash_table_get_element_count(&table)); void *element = NULL; ASSERT_SUCCESS(aws_linked_hash_table_find(&table, &shared_name, (void **)&element)); ASSERT_NOT_NULL(element); struct backed_cursor_element *found_element = element; ASSERT_INT_EQUALS(2, found_element->value); aws_linked_hash_table_clean_up(&table); return 0; } AWS_TEST_CASE( test_linked_hash_table_entries_overwrite_backed_cursor, s_test_linked_hash_table_entries_overwrite_backed_cursor_fn) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/linked_list_test.c000066400000000000000000000377041456575232400254010ustar00rootroot00000000000000/* * Copyright 2010-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include #include struct int_value { int value; struct aws_linked_list_node node; }; static int s_test_linked_list_order_push_back_pop_front(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_linked_list list; aws_linked_list_init(&list); ASSERT_TRUE(aws_linked_list_empty(&list)); struct int_value first = (struct int_value){.value = 1}; struct int_value second = (struct int_value){.value = 2}; struct int_value third = (struct int_value){.value = 3}; struct int_value fourth = (struct int_value){.value = 4}; aws_linked_list_push_back(&list, &first.node); aws_linked_list_push_back(&list, &second.node); aws_linked_list_push_back(&list, &third.node); aws_linked_list_push_back(&list, &fourth.node); int item; struct aws_linked_list_node *node = aws_linked_list_pop_front(&list); item = AWS_CONTAINER_OF(node, struct int_value, node)->value; ASSERT_INT_EQUALS(first.value, item); node = aws_linked_list_pop_front(&list); item = AWS_CONTAINER_OF(node, struct int_value, node)->value; ASSERT_INT_EQUALS(second.value, item); node = aws_linked_list_pop_front(&list); item = AWS_CONTAINER_OF(node, struct int_value, node)->value; ASSERT_INT_EQUALS(third.value, item); node = aws_linked_list_pop_front(&list); item = AWS_CONTAINER_OF(node, struct int_value, node)->value; ASSERT_INT_EQUALS(fourth.value, item); ASSERT_TRUE(aws_linked_list_empty(&list)); return 0; } static int s_test_linked_list_order_push_front_pop_back(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_linked_list list; aws_linked_list_init(&list); ASSERT_TRUE(aws_linked_list_empty(&list)); struct int_value first = (struct int_value){.value = 1}; struct int_value second = (struct int_value){.value = 2}; struct int_value third = (struct int_value){.value = 3}; struct int_value fourth = (struct int_value){.value = 4}; aws_linked_list_push_front(&list, &first.node); aws_linked_list_push_front(&list, &second.node); aws_linked_list_push_front(&list, &third.node); aws_linked_list_push_front(&list, &fourth.node); ASSERT_FALSE(aws_linked_list_empty(&list)); int item; struct aws_linked_list_node *node = aws_linked_list_pop_back(&list); item = AWS_CONTAINER_OF(node, struct int_value, node)->value; ASSERT_INT_EQUALS(first.value, item); node = aws_linked_list_pop_back(&list); item = AWS_CONTAINER_OF(node, struct int_value, node)->value; ASSERT_INT_EQUALS(second.value, item); node = aws_linked_list_pop_back(&list); item = AWS_CONTAINER_OF(node, struct int_value, node)->value; ASSERT_INT_EQUALS(third.value, item); node = aws_linked_list_pop_back(&list); item = AWS_CONTAINER_OF(node, struct int_value, node)->value; ASSERT_INT_EQUALS(fourth.value, item); ASSERT_TRUE(aws_linked_list_empty(&list)); return 0; } static int s_test_linked_list_swap_nodes(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_linked_list list; aws_linked_list_init(&list); ASSERT_TRUE(aws_linked_list_empty(&list)); struct int_value first = (struct int_value){.value = 1}; struct int_value second = (struct int_value){.value = 2}; struct int_value third = (struct int_value){.value = 3}; struct int_value fourth = (struct int_value){.value = 4}; aws_linked_list_push_back(&list, &first.node); aws_linked_list_push_back(&list, &second.node); aws_linked_list_push_back(&list, &third.node); aws_linked_list_push_back(&list, &fourth.node); /* non-adjacent swap: new order becomes 3, 2, 1, 4 */ aws_linked_list_swap_nodes(&first.node, &third.node); /* adjacent swap: new order becomes 3, 2, 4, 1 */ aws_linked_list_swap_nodes(&first.node, &fourth.node); int item; struct aws_linked_list_node *node = aws_linked_list_pop_front(&list); item = AWS_CONTAINER_OF(node, struct int_value, node)->value; ASSERT_INT_EQUALS(third.value, item); node = aws_linked_list_pop_front(&list); item = AWS_CONTAINER_OF(node, struct int_value, node)->value; ASSERT_INT_EQUALS(second.value, item); node = aws_linked_list_pop_front(&list); item = AWS_CONTAINER_OF(node, struct int_value, node)->value; ASSERT_INT_EQUALS(fourth.value, item); node = aws_linked_list_pop_front(&list); item = AWS_CONTAINER_OF(node, struct int_value, node)->value; ASSERT_INT_EQUALS(first.value, item); ASSERT_TRUE(aws_linked_list_empty(&list)); return 0; } static int s_test_linked_list_iteration(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_linked_list list; aws_linked_list_init(&list); ASSERT_TRUE(aws_linked_list_empty(&list)); ASSERT_PTR_EQUALS(aws_linked_list_begin(&list), aws_linked_list_end(&list)); struct int_value first = (struct int_value){.value = 1}; struct int_value second = (struct int_value){.value = 2}; struct int_value third = (struct int_value){.value = 3}; struct int_value fourth = (struct int_value){.value = 4}; aws_linked_list_push_back(&list, &first.node); aws_linked_list_push_back(&list, &second.node); aws_linked_list_push_back(&list, &third.node); aws_linked_list_push_back(&list, &fourth.node); ASSERT_FALSE(aws_linked_list_empty(&list)); ASSERT_FALSE(aws_linked_list_begin(&list) == aws_linked_list_end(&list)); int count = 1; for (struct aws_linked_list_node *iter = aws_linked_list_begin(&list); iter != aws_linked_list_end(&list); iter = aws_linked_list_next(iter)) { int item = AWS_CONTAINER_OF(iter, struct int_value, node)->value; ASSERT_INT_EQUALS(count, item); ++count; } return 0; } static int s_test_linked_list_reverse_iteration(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_linked_list list; aws_linked_list_init(&list); ASSERT_TRUE(aws_linked_list_empty(&list)); ASSERT_PTR_EQUALS(aws_linked_list_rbegin(&list), aws_linked_list_rend(&list)); struct int_value first = (struct int_value){.value = 1}; struct int_value second = (struct int_value){.value = 2}; struct int_value third = (struct int_value){.value = 3}; struct int_value fourth = (struct int_value){.value = 4}; aws_linked_list_push_back(&list, &first.node); aws_linked_list_push_back(&list, &second.node); aws_linked_list_push_back(&list, &third.node); aws_linked_list_push_back(&list, &fourth.node); ASSERT_FALSE(aws_linked_list_empty(&list)); ASSERT_FALSE(aws_linked_list_rbegin(&list) == aws_linked_list_rend(&list)); int count = 4; for (struct aws_linked_list_node *iter = aws_linked_list_rbegin(&list); iter != aws_linked_list_rend(&list); iter = aws_linked_list_prev(iter)) { int item = AWS_CONTAINER_OF(iter, struct int_value, node)->value; ASSERT_INT_EQUALS(count, item); --count; } return 0; } static int s_test_linked_list_swap_contents(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_linked_list a, b; struct aws_linked_list_node a1, a2, b1, b2; /* Setup lists like: * a = {a1, a2} * b = {b1, b2} * * After swap should be like: * a = {b1, b2} * b = {a1, a2} */ aws_linked_list_init(&a); aws_linked_list_push_back(&a, &a1); aws_linked_list_push_back(&a, &a2); aws_linked_list_init(&b); aws_linked_list_push_back(&b, &b1); aws_linked_list_push_back(&b, &b2); aws_linked_list_swap_contents(&a, &b); ASSERT_PTR_EQUALS(&b1, aws_linked_list_pop_front(&a)); ASSERT_PTR_EQUALS(&b2, aws_linked_list_pop_front(&a)); ASSERT_TRUE(aws_linked_list_empty(&a)); ASSERT_PTR_EQUALS(&a1, aws_linked_list_pop_front(&b)); ASSERT_PTR_EQUALS(&a2, aws_linked_list_pop_front(&b)); ASSERT_TRUE(aws_linked_list_empty(&b)); /* Setup lists like: * a = {a1, a2} * b = {} * * After swap should be like: * a = {} * b = {a1, a2} */ aws_linked_list_init(&a); aws_linked_list_push_back(&a, &a1); aws_linked_list_push_back(&a, &a2); aws_linked_list_init(&b); aws_linked_list_swap_contents(&a, &b); ASSERT_TRUE(aws_linked_list_empty(&a)); ASSERT_PTR_EQUALS(&a1, aws_linked_list_pop_front(&b)); ASSERT_PTR_EQUALS(&a2, aws_linked_list_pop_front(&b)); ASSERT_TRUE(aws_linked_list_empty(&b)); /* Setup lists like: * a = {} * b = {b1, b2} * * After swap should be like: * a = {b1, b2} * b = {} */ aws_linked_list_init(&a); aws_linked_list_init(&b); aws_linked_list_push_back(&b, &b1); aws_linked_list_push_back(&b, &b2); aws_linked_list_swap_contents(&a, &b); ASSERT_PTR_EQUALS(&b1, aws_linked_list_pop_front(&a)); ASSERT_PTR_EQUALS(&b2, aws_linked_list_pop_front(&a)); ASSERT_TRUE(aws_linked_list_empty(&a)); ASSERT_TRUE(aws_linked_list_empty(&b)); /* Setup two empty lists, after swap they should both still be ok. */ aws_linked_list_init(&a); aws_linked_list_init(&b); aws_linked_list_swap_contents(&a, &b); ASSERT_TRUE(aws_linked_list_empty(&a)); ASSERT_TRUE(aws_linked_list_empty(&b)); return AWS_OP_SUCCESS; } static int s_test_linked_list_move_all_back(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_linked_list a, b; struct aws_linked_list_node a1, a2, b1, b2; /* Setup lists like: * a = {a1, a2} * b = {b1, b2} * * After move should be like: * a = {a1, a2, b1, b2} * b = {} */ aws_linked_list_init(&a); aws_linked_list_push_back(&a, &a1); aws_linked_list_push_back(&a, &a2); aws_linked_list_init(&b); aws_linked_list_push_back(&b, &b1); aws_linked_list_push_back(&b, &b2); aws_linked_list_move_all_back(&a, &b); ASSERT_TRUE(aws_linked_list_is_valid_deep(&a)); ASSERT_TRUE(aws_linked_list_is_valid_deep(&b)); ASSERT_TRUE(aws_linked_list_empty(&b)); struct aws_linked_list_node *expected_a1a2b1b2[] = {&a1, &a2, &b1, &b2}; struct aws_linked_list_node *it = aws_linked_list_begin(&a); size_t i = 0; while (it != aws_linked_list_end(&a)) { ASSERT_PTR_EQUALS(expected_a1a2b1b2[i], it); it = aws_linked_list_next(it); i++; }; ASSERT_UINT_EQUALS(AWS_ARRAY_SIZE(expected_a1a2b1b2), i); /* Setup lists like: * a = {} * b = {b1, b2} * * After move should be like: * a = {b1, b2} * b = {} */ aws_linked_list_init(&a); aws_linked_list_init(&b); aws_linked_list_push_back(&b, &b1); aws_linked_list_push_back(&b, &b2); aws_linked_list_move_all_back(&a, &b); ASSERT_TRUE(aws_linked_list_is_valid_deep(&a)); ASSERT_TRUE(aws_linked_list_is_valid_deep(&b)); ASSERT_TRUE(aws_linked_list_empty(&b)); struct aws_linked_list_node *expected_b1b2[] = {&b1, &b2}; it = aws_linked_list_begin(&a); i = 0; while (it != aws_linked_list_end(&a)) { ASSERT_PTR_EQUALS(expected_b1b2[i], it); it = aws_linked_list_next(it); i++; }; ASSERT_UINT_EQUALS(AWS_ARRAY_SIZE(expected_b1b2), i); /* Setup lists like: * a = {a1} * b = {b1} * * After move should be like: * a = {a1, b1} * b = {} */ aws_linked_list_init(&a); aws_linked_list_push_back(&a, &a1); aws_linked_list_init(&b); aws_linked_list_push_back(&b, &b1); aws_linked_list_move_all_back(&a, &b); ASSERT_TRUE(aws_linked_list_is_valid_deep(&a)); ASSERT_TRUE(aws_linked_list_is_valid_deep(&b)); ASSERT_TRUE(aws_linked_list_empty(&b)); struct aws_linked_list_node *expected_a1b1[] = {&a1, &b1}; it = aws_linked_list_begin(&a); i = 0; while (it != aws_linked_list_end(&a)) { ASSERT_PTR_EQUALS(expected_a1b1[i], it); it = aws_linked_list_next(it); i++; }; ASSERT_UINT_EQUALS(AWS_ARRAY_SIZE(expected_a1b1), i); return AWS_OP_SUCCESS; } static int s_test_linked_list_move_all_front(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_linked_list a, b; struct aws_linked_list_node a1, a2, b1, b2; /* Setup lists like: * a = {a2, a1} * b = {b2, b1} * * After move should be like: * a = {b2, b1, a2, a1} * b = {} */ aws_linked_list_init(&a); aws_linked_list_push_front(&a, &a1); aws_linked_list_push_front(&a, &a2); aws_linked_list_init(&b); aws_linked_list_push_front(&b, &b1); aws_linked_list_push_front(&b, &b2); aws_linked_list_move_all_front(&a, &b); ASSERT_TRUE(aws_linked_list_is_valid_deep(&a)); ASSERT_TRUE(aws_linked_list_is_valid_deep(&b)); ASSERT_TRUE(aws_linked_list_empty(&b)); struct aws_linked_list_node *expected_b2b1a2a1[] = {&b2, &b1, &a2, &a1}; struct aws_linked_list_node *it = aws_linked_list_begin(&a); size_t i = 0; while (it != aws_linked_list_end(&a)) { ASSERT_PTR_EQUALS(expected_b2b1a2a1[i], it); it = aws_linked_list_next(it); i++; }; ASSERT_UINT_EQUALS(AWS_ARRAY_SIZE(expected_b2b1a2a1), i); /* Setup lists like: * a = {} * b = {b2, b1} * * After move should be like: * a = {b2, b1} * b = {} */ aws_linked_list_init(&a); aws_linked_list_init(&b); aws_linked_list_push_front(&b, &b1); aws_linked_list_push_front(&b, &b2); aws_linked_list_move_all_front(&a, &b); ASSERT_TRUE(aws_linked_list_is_valid_deep(&a)); ASSERT_TRUE(aws_linked_list_is_valid_deep(&b)); ASSERT_TRUE(aws_linked_list_empty(&b)); struct aws_linked_list_node *expected_b2b1[] = {&b2, &b1}; it = aws_linked_list_begin(&a); i = 0; while (it != aws_linked_list_end(&a)) { ASSERT_PTR_EQUALS(expected_b2b1[i], it); it = aws_linked_list_next(it); i++; }; ASSERT_UINT_EQUALS(AWS_ARRAY_SIZE(expected_b2b1), i); /* Setup lists like: * a = {a1} * b = {b1} * * After move should be like: * a = {b1, a1} * b = {} */ aws_linked_list_init(&a); aws_linked_list_push_front(&a, &a1); aws_linked_list_init(&b); aws_linked_list_push_front(&b, &b1); aws_linked_list_move_all_front(&a, &b); ASSERT_TRUE(aws_linked_list_is_valid_deep(&a)); ASSERT_TRUE(aws_linked_list_is_valid_deep(&b)); ASSERT_TRUE(aws_linked_list_empty(&b)); struct aws_linked_list_node *expected_b1a1[] = {&b1, &a1}; it = aws_linked_list_begin(&a); i = 0; while (it != aws_linked_list_end(&a)) { ASSERT_PTR_EQUALS(expected_b1a1[i], it); it = aws_linked_list_next(it); i++; }; ASSERT_UINT_EQUALS(AWS_ARRAY_SIZE(expected_b1a1), i); return AWS_OP_SUCCESS; } AWS_TEST_CASE(linked_list_push_back_pop_front, s_test_linked_list_order_push_back_pop_front) AWS_TEST_CASE(linked_list_push_front_pop_back, s_test_linked_list_order_push_front_pop_back) AWS_TEST_CASE(linked_list_swap_nodes, s_test_linked_list_swap_nodes) AWS_TEST_CASE(linked_list_iteration, s_test_linked_list_iteration) AWS_TEST_CASE(linked_list_reverse_iteration, s_test_linked_list_reverse_iteration) AWS_TEST_CASE(linked_list_swap_contents, s_test_linked_list_swap_contents) AWS_TEST_CASE(linked_list_move_all_back, s_test_linked_list_move_all_back) AWS_TEST_CASE(linked_list_move_all_front, s_test_linked_list_move_all_front) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/logging/000077500000000000000000000000001456575232400233105ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/logging/log_channel_test.c000066400000000000000000000224321456575232400267670ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include /* * Mock writer to capture what gets passed through */ struct mock_log_writer_impl { struct aws_array_list log_lines; }; static int s_mock_log_writer_write(struct aws_log_writer *writer, const struct aws_string *output) { struct mock_log_writer_impl *impl = (struct mock_log_writer_impl *)writer->impl; struct aws_string *output_copy = aws_string_new_from_string(writer->allocator, output); if (output_copy == NULL) { return AWS_OP_ERR; } aws_array_list_push_back(&impl->log_lines, &output_copy); return AWS_OP_SUCCESS; } static void s_mock_log_writer_clean_up(struct aws_log_writer *writer) { struct mock_log_writer_impl *impl = (struct mock_log_writer_impl *)writer->impl; size_t line_count = aws_array_list_length(&impl->log_lines); for (size_t i = 0; i < line_count; ++i) { struct aws_string *line = NULL; if (aws_array_list_get_at(&impl->log_lines, &line, i)) { continue; } aws_string_destroy(line); } aws_array_list_clean_up(&impl->log_lines); aws_mem_release(writer->allocator, impl); } static struct aws_log_writer_vtable s_mock_writer_vtable = { .write = s_mock_log_writer_write, .clean_up = s_mock_log_writer_clean_up, }; static int s_aws_mock_log_writer_init(struct aws_log_writer *writer, struct aws_allocator *allocator) { struct mock_log_writer_impl *impl = (struct mock_log_writer_impl *)aws_mem_acquire(allocator, sizeof(struct mock_log_writer_impl)); if (impl == NULL) { return AWS_OP_ERR; } if (aws_array_list_init_dynamic(&impl->log_lines, allocator, 10, sizeof(struct aws_string *))) { aws_mem_release(allocator, impl); return AWS_OP_ERR; } writer->vtable = &s_mock_writer_vtable; writer->allocator = allocator; writer->impl = impl; return AWS_OP_SUCCESS; } /* * Test utilities */ static char s_test_error_message[4096]; static bool s_verify_mock_equal( struct aws_log_writer *writer, const struct aws_string ***test_lines, size_t array_length) { struct mock_log_writer_impl *impl = (struct mock_log_writer_impl *)writer->impl; size_t line_count = aws_array_list_length(&impl->log_lines); if (line_count != array_length) { snprintf( s_test_error_message, sizeof(s_test_error_message), "Expected %" PRIu64 " lines, but received %" PRIu64 "", (uint64_t)array_length, (uint64_t)line_count); return false; } for (size_t i = 0; i < array_length; ++i) { struct aws_string *captured = NULL; if (aws_array_list_get_at(&impl->log_lines, &captured, i)) { snprintf(s_test_error_message, sizeof(s_test_error_message), "Unable to fetch log line from array list"); return false; } const struct aws_string *original = *(test_lines[i]); if (!aws_string_eq(original, captured)) { snprintf( s_test_error_message, sizeof(s_test_error_message), "Expected log line:\n%s\nbut received log line:\n%s", (char *)original->bytes, (char *)captured->bytes); return false; } } return true; } typedef int ( *init_channel_fn)(struct aws_log_channel *channel, struct aws_allocator *allocator, struct aws_log_writer *writer); static int s_do_channel_test( struct aws_allocator *allocator, init_channel_fn init_fn, const struct aws_string ***test_lines, size_t test_lines_length, int *sleep_times) { struct aws_log_writer mock_writer; if (s_aws_mock_log_writer_init(&mock_writer, allocator)) { return AWS_OP_ERR; } struct aws_log_channel log_channel; if (init_fn(&log_channel, allocator, &mock_writer)) { return AWS_OP_ERR; } int result = AWS_OP_SUCCESS; for (size_t i = 0; i < test_lines_length; ++i) { struct aws_string *test_line_copy = aws_string_new_from_string(log_channel.allocator, *(test_lines[i])); if ((log_channel.vtable->send)(&log_channel, test_line_copy)) { snprintf(s_test_error_message, sizeof(s_test_error_message), "Failed call to log channel send"); result = AWS_OP_ERR; } /* * For background case, optionally sleep to vary the push timing */ if (sleep_times && sleep_times[i] > 0) { aws_thread_current_sleep(sleep_times[i]); } } aws_log_channel_clean_up(&log_channel); if (!s_verify_mock_equal(log_channel.writer, test_lines, test_lines_length)) { result = AWS_OP_ERR; } aws_log_writer_clean_up(&mock_writer); return result; } /* * Test body helper macros */ #define DEFINE_FOREGROUND_LOG_CHANNEL_TEST(test_name, string_array_name) \ static int s_foreground_log_channel_##test_name(struct aws_allocator *allocator, void *ctx) { \ (void)ctx; \ return s_do_channel_test( \ allocator, \ aws_log_channel_init_foreground, \ string_array_name, \ sizeof(string_array_name) / sizeof(struct aws_string **), \ NULL); \ } \ AWS_TEST_CASE(test_foreground_log_channel_##test_name, s_foreground_log_channel_##test_name); #define DEFINE_BACKGROUND_LOG_CHANNEL_TEST(test_name, string_array_name, sleep_times) \ static int s_background_log_channel_##test_name(struct aws_allocator *allocator, void *ctx) { \ (void)ctx; \ return s_do_channel_test( \ allocator, \ aws_log_channel_init_background, \ string_array_name, \ sizeof(string_array_name) / sizeof(struct aws_string **), \ sleep_times); \ } \ AWS_TEST_CASE(test_background_log_channel_##test_name, s_background_log_channel_##test_name); /* * Test data */ AWS_STATIC_STRING_FROM_LITERAL(s_log_line_1, "1"); AWS_STATIC_STRING_FROM_LITERAL(s_log_line_2, "2"); AWS_STATIC_STRING_FROM_LITERAL(s_log_line_3, "3"); AWS_STATIC_STRING_FROM_LITERAL(s_log_line_4, "4"); AWS_STATIC_STRING_FROM_LITERAL(s_log_line_simple, "A simple line.\n"); AWS_STATIC_STRING_FROM_LITERAL(s_log_line_multiline, "There's\na lot\n\tof snow outside.\n"); AWS_STATIC_STRING_FROM_LITERAL(s_log_line_fake, "[DEBUG] [??] [1234567] - Time to crash\n"); const struct aws_string **s_channel_test_one_line[] = {&s_log_line_1}; const struct aws_string **s_channel_test_numbers[] = {&s_log_line_1, &s_log_line_2, &s_log_line_3, &s_log_line_4}; const struct aws_string **s_channel_test_words[] = {&s_log_line_simple, &s_log_line_multiline, &s_log_line_fake}; const struct aws_string **s_channel_test_all[] = { &s_log_line_1, &s_log_line_2, &s_log_line_3, &s_log_line_4, &s_log_line_simple, &s_log_line_multiline, &s_log_line_fake, }; static int s_background_sleep_times_ns[] = {0, 100000, 0, 0, 1000000, 0, 1000000}; /* * Foreground channel tests */ DEFINE_FOREGROUND_LOG_CHANNEL_TEST(single_line, s_channel_test_one_line) DEFINE_FOREGROUND_LOG_CHANNEL_TEST(numbers, s_channel_test_numbers) DEFINE_FOREGROUND_LOG_CHANNEL_TEST(words, s_channel_test_words) DEFINE_FOREGROUND_LOG_CHANNEL_TEST(all, s_channel_test_all) /* * Background channel tests */ DEFINE_BACKGROUND_LOG_CHANNEL_TEST(single_line, s_channel_test_one_line, NULL) DEFINE_BACKGROUND_LOG_CHANNEL_TEST(numbers, s_channel_test_numbers, s_background_sleep_times_ns) DEFINE_BACKGROUND_LOG_CHANNEL_TEST(words, s_channel_test_words, s_background_sleep_times_ns) DEFINE_BACKGROUND_LOG_CHANNEL_TEST(all, s_channel_test_all, s_background_sleep_times_ns) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/logging/log_formatter_test.c000066400000000000000000000205741456575232400273670ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #define TEST_FORMATTER_MAX_BUFFER_SIZE 4096 typedef int(log_formatter_test_fn)(struct aws_log_formatter *formatter, struct aws_string **output); int do_default_log_formatter_test( struct aws_allocator *allocator, log_formatter_test_fn *test_fn, const char *expected_user_output, enum aws_log_level log_level, enum aws_date_format date_format) { /* Initialize a default formatter*/ struct aws_log_formatter_standard_options options = {.date_format = date_format}; struct aws_log_formatter formatter; aws_log_formatter_init_default(&formatter, allocator, &options); struct aws_date_time test_time; aws_date_time_init_now(&test_time); /* Output something via the callback */ struct aws_string *output = NULL; int result = (*test_fn)(&formatter, &output); aws_log_formatter_clean_up(&formatter); char buffer[TEST_FORMATTER_MAX_BUFFER_SIZE]; snprintf(buffer, TEST_FORMATTER_MAX_BUFFER_SIZE, "%s", aws_string_c_str(output)); aws_string_destroy(output); /* Check that the format call was successful */ ASSERT_TRUE(result == AWS_OP_SUCCESS, "Formatting operation failed"); /* * Make sure there's an endline as the final character(s). * Move the end-of-string marker on top of the endline. * Otherwise our failure outputs include the endline and become confusing. */ size_t line_length = strlen(buffer); ASSERT_TRUE(line_length >= 2, "Log line \"%s\" is too short", buffer); ASSERT_TRUE(buffer[line_length - 1] == '\n', "Log line did not end with a newline"); buffer[line_length - 1] = 0; /* * Check that the log level appears properly */ const char *log_level_start = strstr(buffer, "["); ASSERT_TRUE(log_level_start != NULL, "Could not find start of log level in output line \"%s\"", buffer); const char *level_string = NULL; ASSERT_SUCCESS( aws_log_level_to_string(log_level, &level_string), "Failed to convert log level %d to string", (int)log_level); ASSERT_TRUE( strncmp(log_level_start + 1, level_string, strlen(level_string)) == 0, "Incorrect value for log level in output line \"%s\"", buffer); /** * Find the timestamp substring. */ const char *time_start = strstr(log_level_start + 1, "["); ASSERT_TRUE(time_start != NULL, "Could not find start of timestamp in output line \"%s\"", buffer); time_start += 1; const char *time_end = strstr(time_start, "]"); ASSERT_TRUE(time_end != NULL, "Could not find end of timestamp in output line \"%s\"", buffer); size_t time_length = time_end - time_start; /* * Fake a buffer pointing to the logged timestamp string; convert it to a date time */ struct aws_byte_buf timestamp_buffer; timestamp_buffer.allocator = formatter.allocator; timestamp_buffer.buffer = (uint8_t *)time_start; timestamp_buffer.capacity = time_length; timestamp_buffer.len = time_length; struct aws_date_time log_time; ASSERT_SUCCESS( aws_date_time_init_from_str(&log_time, ×tamp_buffer, date_format), "Could not parse timestamp value starting at \"%s\"", time_start); /* * Check that the timestamp, when converted back, is close to the current time. */ time_t time_diff = aws_date_time_diff(&log_time, &test_time); ASSERT_TRUE(time_diff <= 1, "Log timestamp deviated too far from test timestamp: %d seconds", (int)time_diff); /* * Find the thread id substring */ const char *thread_id_start = strstr(time_end + 1, "["); ASSERT_TRUE(thread_id_start != NULL, "Could not find start of thread id in output line \"%s\"", buffer); thread_id_start += 1; char *thread_id_end = strstr(thread_id_start, "]"); ASSERT_TRUE(thread_id_end != NULL, "Could not find end of thread id in output line \"%s\"", buffer); ASSERT_TRUE((thread_id_end - thread_id_start + 1) == AWS_THREAD_ID_T_REPR_BUFSZ, "Unexpected thread id length"); aws_thread_id_t current_thread_id = aws_thread_current_thread_id(); char repr[AWS_THREAD_ID_T_REPR_BUFSZ]; ASSERT_SUCCESS( aws_thread_id_t_to_string(current_thread_id, repr, AWS_THREAD_ID_T_REPR_BUFSZ), "Could not convert aws_thread_id_t to string repr"); char logged_id[AWS_THREAD_ID_T_REPR_BUFSZ]; memcpy(logged_id, thread_id_start, AWS_THREAD_ID_T_REPR_BUFSZ - 1); logged_id[AWS_THREAD_ID_T_REPR_BUFSZ - 1] = '\0'; ASSERT_SUCCESS( strncmp(repr, logged_id, AWS_THREAD_ID_T_REPR_BUFSZ), "Expected logged thread id to be \"%s\" but it was actually \"%s\"", repr, logged_id); /* * Check that the user content is what was expected */ const char *separator = strstr(thread_id_end, " - "); ASSERT_TRUE(separator != NULL, ""); const char *user_content = separator + 3; size_t expected_user_content_length = strlen(expected_user_output); ASSERT_SUCCESS( strncmp(user_content, expected_user_output, expected_user_content_length), "Expected user content \"%s\" but received \"%s\"", expected_user_output, user_content); return AWS_OP_SUCCESS; } #define DEFINE_LOG_FORMATTER_TEST(test_function, log_level, date_format, expected_user_string) \ static int s_log_formatter_##test_function(struct aws_allocator *allocator, void *ctx) { \ (void)ctx; \ return do_default_log_formatter_test(allocator, test_function, expected_user_string, log_level, date_format); \ } \ AWS_TEST_CASE(test_log_formatter_##test_function, s_log_formatter_##test_function); static int invoke_formatter( struct aws_log_formatter *formatter, struct aws_string **output, enum aws_log_level log_level, const char *format, ...) { va_list args; va_start(args, format); int result = formatter->vtable->format(formatter, output, log_level, AWS_LS_COMMON_GENERAL, format, args); va_end(args); return result; } /* * Tests */ /* * Empty string case */ static int s_formatter_empty_case(struct aws_log_formatter *formatter, struct aws_string **output) { return invoke_formatter(formatter, output, AWS_LL_WARN, ""); } DEFINE_LOG_FORMATTER_TEST(s_formatter_empty_case, AWS_LL_WARN, AWS_DATE_FORMAT_RFC822, "") /* * Simple string */ static int s_formatter_simple_case(struct aws_log_formatter *formatter, struct aws_string **output) { return invoke_formatter(formatter, output, AWS_LL_DEBUG, "Sample log output"); } DEFINE_LOG_FORMATTER_TEST(s_formatter_simple_case, AWS_LL_DEBUG, AWS_DATE_FORMAT_ISO_8601, "Sample log output") /* * Format string with numbers */ static int s_formatter_number_case(struct aws_log_formatter *formatter, struct aws_string **output) { return invoke_formatter( formatter, output, AWS_LL_FATAL, "%d bottles of milk on the wall. Take %.4f bottles down.", 99, .9999f); } DEFINE_LOG_FORMATTER_TEST( s_formatter_number_case, AWS_LL_FATAL, AWS_DATE_FORMAT_RFC822, "99 bottles of milk on the wall. Take 0.9999 bottles down.") /* * Format string with strings */ static int s_formatter_string_case(struct aws_log_formatter *formatter, struct aws_string **output) { return invoke_formatter( formatter, output, AWS_LL_INFO, "Once there was, if %s there was, and just perhaps there %s was", "ever", "never"); } DEFINE_LOG_FORMATTER_TEST( s_formatter_string_case, AWS_LL_INFO, AWS_DATE_FORMAT_ISO_8601, "Once there was, if ever there was, and just perhaps there never was") /* * Format string with newlines */ static int s_formatter_newline_case(struct aws_log_formatter *formatter, struct aws_string **output) { return invoke_formatter(formatter, output, AWS_LL_TRACE, "\nMaking sure \nnewlines don't mess things\nup"); } DEFINE_LOG_FORMATTER_TEST( s_formatter_newline_case, AWS_LL_TRACE, AWS_DATE_FORMAT_RFC822, "\nMaking sure \nnewlines don't mess things\nup") aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/logging/log_writer_test.c000066400000000000000000000113131456575232400266670ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "logging_test_utilities.h" #include #include #include #include #include #include #ifndef _WIN32 # include #endif /* _WIN32 */ #define TEST_WRITER_MAX_BUFFER_SIZE 4096 int do_default_log_writer_test( struct aws_log_writer *writer, const char *test_file_name, const char *expected_file_content, const struct aws_string *output, FILE *close_fp) { int result = writer->vtable->write(writer, output); aws_log_writer_clean_up(writer); /* * When we redirect stdout/stderr to a file, we need to close the file manually since the writer implementations do * not do so. */ if (close_fp != NULL) { fclose(close_fp); } char buffer[TEST_WRITER_MAX_BUFFER_SIZE]; FILE *file = aws_fopen(test_file_name, "r"); int open_error = errno; size_t bytes_read = 0; if (file != NULL) { bytes_read = fread(buffer, 1, TEST_WRITER_MAX_BUFFER_SIZE - 1, file); fclose(file); } remove(test_file_name); /* * Check that the write call was successful */ ASSERT_TRUE(result == AWS_OP_SUCCESS, "Writing operation failed"); /* * Check the file was read successfully */ ASSERT_TRUE( file != NULL, "Unable to open output file \"%s\" to verify contents. Error: %d", test_file_name, open_error); ASSERT_TRUE(bytes_read >= 0, "Failed to read test output file \"%s\"", test_file_name); /* * add end of string marker */ buffer[bytes_read] = 0; /* * Check file contents */ ASSERT_TRUE( strcmp(buffer, expected_file_content) == 0, "Expected log file to contain:\n\n%s\n\nbut instead it contained:\n\n%s\n", expected_file_content, buffer); return AWS_OP_SUCCESS; } /* * Test cases */ #define EXISTING_TEXT "Some existing text\n" #define SIMPLE_FILE_CONTENT "A few\nlog lines.\n" static const char *s_combined_text = EXISTING_TEXT SIMPLE_FILE_CONTENT; AWS_STATIC_STRING_FROM_LITERAL(s_simple_file_content, SIMPLE_FILE_CONTENT); /* * Simple file test */ static int s_log_writer_simple_file_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_string *test_file_str = aws_string_new_log_writer_test_filename(allocator); const char *test_file_cstr = aws_string_c_str(test_file_str); remove(test_file_cstr); struct aws_log_writer_file_options options = {.filename = test_file_cstr}; struct aws_log_writer writer; ASSERT_SUCCESS(aws_log_writer_init_file(&writer, allocator, &options)); ASSERT_SUCCESS( do_default_log_writer_test(&writer, test_file_cstr, SIMPLE_FILE_CONTENT, s_simple_file_content, NULL)); aws_string_destroy(test_file_str); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_log_writer_simple_file_test, s_log_writer_simple_file_test); /* * Existing file test (verifies append is being used) */ static int s_log_writer_existing_file_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_string *test_file_str = aws_string_new_log_writer_test_filename(allocator); const char *test_file_cstr = aws_string_c_str(test_file_str); remove(test_file_cstr); FILE *fp = aws_fopen(test_file_cstr, "w+"); fprintf(fp, EXISTING_TEXT); fclose(fp); struct aws_log_writer_file_options options = {.filename = test_file_cstr}; struct aws_log_writer writer; ASSERT_SUCCESS(aws_log_writer_init_file(&writer, allocator, &options)); ASSERT_SUCCESS(do_default_log_writer_test(&writer, test_file_cstr, s_combined_text, s_simple_file_content, NULL)); aws_string_destroy(test_file_str); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_log_writer_existing_file_test, s_log_writer_existing_file_test); /* * (Error case) Bad filename test */ static int s_log_writer_bad_file_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_log_writer_file_options options = {.filename = "."}; struct aws_log_writer writer; int result = aws_log_writer_init_file(&writer, allocator, &options); int aws_error = aws_last_error(); ASSERT_TRUE(result == AWS_OP_ERR, "Log file open succeeded despite an invalid file name"); #ifdef _WIN32 ASSERT_TRUE(aws_error == AWS_ERROR_NO_PERMISSION, "File open error was not no permission as expected"); #else ASSERT_TRUE(aws_error == AWS_ERROR_FILE_INVALID_PATH, "File open error was not invalid path as expected"); #endif /* _WIN32 */ return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_log_writer_bad_file_test, s_log_writer_bad_file_test); aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/logging/logging_filter_debug_static_test.c000066400000000000000000000012451456575232400322250ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #define AWS_STATIC_LOG_LEVEL 5 #include "logging_test_utilities.h" /** * A log testing callback that makes a LOGF_ call for each level. * * Because AWS_STATIC_LOG_LEVEL is 5 (DEBUG) in this translation unit, we expect one * of the log calls to be removed at compile time. * * So even though our test sets the dynamic level to TRACE, only the * {FATAL, ERROR, WARN, INFO, DEBUG} log calls will be recorded. */ DECLARE_LOGF_ALL_LEVELS_FUNCTION(s_logf_all_levels_debug_cutoff) TEST_LEVEL_FILTER(AWS_LL_TRACE, "12345", s_logf_all_levels_debug_cutoff) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/logging/logging_filter_error_static_test.c000066400000000000000000000012301456575232400322620ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #define AWS_STATIC_LOG_LEVEL 2 #include "logging_test_utilities.h" /** * A log testing callback that makes a LOGF_ call for each level. * * Because AWS_STATIC_LOG_LEVEL is 2 (ERROR) in this translation unit, we expect all but two * of the log calls to be removed at compile time. * * So even though our test sets the dynamic level to TRACE, only the FATAL and ERROR log calls will * be recorded. */ DECLARE_LOGF_ALL_LEVELS_FUNCTION(s_logf_all_levels_error_cutoff) TEST_LEVEL_FILTER(AWS_LL_TRACE, "12", s_logf_all_levels_error_cutoff) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/logging/logging_filter_fatal_static_test.c000066400000000000000000000012141456575232400322220ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #define AWS_STATIC_LOG_LEVEL 1 #include "logging_test_utilities.h" /** * A log testing callback that makes a LOGF_ call for each level. * * Because AWS_STATIC_LOG_LEVEL is 1 (FATAL) in this translation unit, we expect all but one * of the log calls to be removed at compile time. * * So even though our test sets the dynamic level to TRACE, only the FATAL log call will * be recorded. */ DECLARE_LOGF_ALL_LEVELS_FUNCTION(s_logf_all_levels_fatal_cutoff) TEST_LEVEL_FILTER(AWS_LL_TRACE, "1", s_logf_all_levels_fatal_cutoff) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/logging/logging_filter_info_static_test.c000066400000000000000000000012321456575232400320660ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #define AWS_STATIC_LOG_LEVEL 4 #include "logging_test_utilities.h" /** * A log testing callback that makes a LOGF_ call for each level. * * Because AWS_STATIC_LOG_LEVEL is 4 (INFO) in this translation unit, we expect two * of the log calls to be removed at compile time. * * So even though our test sets the dynamic level to TRACE, only the * {FATAL, ERROR, WARN, INFO} log calls will be recorded. */ DECLARE_LOGF_ALL_LEVELS_FUNCTION(s_logf_all_levels_info_cutoff) TEST_LEVEL_FILTER(AWS_LL_TRACE, "1234", s_logf_all_levels_info_cutoff) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/logging/logging_filter_none_static_test.c000066400000000000000000000011601456575232400320720ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #define AWS_STATIC_LOG_LEVEL 0 #include "logging_test_utilities.h" /** * A log testing callback that makes a LOGF_ call for each level. * * Because AWS_STATIC_LOG_LEVEL is 0 (NONE) in this translation unit, we expect all of the log * calls to be removed at compile time. * * So even though our test sets the dynamic level to TRACE, nothing will * be recorded. */ DECLARE_LOGF_ALL_LEVELS_FUNCTION(s_logf_all_levels_none_cutoff) TEST_LEVEL_FILTER(AWS_LL_TRACE, "", s_logf_all_levels_none_cutoff) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/logging/logging_filter_trace_static_test.c000066400000000000000000000010471456575232400322350ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #define AWS_STATIC_LOG_LEVEL 6 #include "logging_test_utilities.h" /** * A log testing callback that makes a LOGF_ call for each level. * * Because AWS_STATIC_LOG_LEVEL is 6 (TRACE) in this translation unit, we do not expect any * of the log calls to be removed at compile time. * */ DECLARE_LOGF_ALL_LEVELS_FUNCTION(s_logf_all_levels_trace_cutoff) TEST_LEVEL_FILTER(AWS_LL_TRACE, "123456", s_logf_all_levels_trace_cutoff) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/logging/logging_filter_warn_static_test.c000066400000000000000000000012351456575232400321050ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #define AWS_STATIC_LOG_LEVEL 3 #include "logging_test_utilities.h" /** * A log testing callback that makes a LOGF_ call for each level. * * Because AWS_STATIC_LOG_LEVEL is 3 (WARN) in this translation unit, we expect all but three * of the log calls to be removed at compile time. * * So even though our test sets the dynamic level to TRACE, only the {FATAL, ERROR, WARN} log calls will * be recorded. */ DECLARE_LOGF_ALL_LEVELS_FUNCTION(s_logf_all_levels_warn_cutoff) TEST_LEVEL_FILTER(AWS_LL_TRACE, "123", s_logf_all_levels_warn_cutoff) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/logging/logging_general_test.c000066400000000000000000000046621456575232400276460ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "logging_test_utilities.h" #include "test_logger.h" DECLARE_LOGF_ALL_LEVELS_FUNCTION(s_logf_all_levels) /** * These tests check the dynamic (run-time) filtering capabilities of the logging * system. * * In each case, we use the test logger and invoke a log operation at each supported level. * Using the level (integer value) itself as the log text, we can easily check to see * what got filtered and what got through. * * For example, when the log level is AWS_LL_INFO, we expect * {AWS_LL_FATAL, AWS_LL_ERROR, AWS_LL_WARN, AWS_LL_INFO} calls to all go through ("1234") * but * {AWS_LL_DEBUG, AWS_LL_TRACE} to be filtered out ("56") */ TEST_LEVEL_FILTER(AWS_LL_TRACE, "123456", s_logf_all_levels) TEST_LEVEL_FILTER(AWS_LL_DEBUG, "12345", s_logf_all_levels) TEST_LEVEL_FILTER(AWS_LL_INFO, "1234", s_logf_all_levels) TEST_LEVEL_FILTER(AWS_LL_WARN, "123", s_logf_all_levels) TEST_LEVEL_FILTER(AWS_LL_ERROR, "12", s_logf_all_levels) TEST_LEVEL_FILTER(AWS_LL_FATAL, "1", s_logf_all_levels) TEST_LEVEL_FILTER(AWS_LL_NONE, "", s_logf_all_levels) /* * Dynamic level change testing */ #define TEST_LOGGER_MAX_BUFFER_SIZE 4096 static int s_dynamic_log_level_change_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; (void)allocator; /* Create and attach a logger for testing*/ struct aws_logger test_logger; test_logger_init(&test_logger, allocator, AWS_LOG_LEVEL_ERROR, TEST_LOGGER_MAX_BUFFER_SIZE); aws_logger_set(&test_logger); /* Perform logging operations */ s_logf_all_levels(AWS_LOG_LEVEL_DEBUG); aws_logger_set_log_level(&test_logger, AWS_LOG_LEVEL_DEBUG); s_logf_all_levels(AWS_LOG_LEVEL_DEBUG); aws_logger_set_log_level(&test_logger, AWS_LOG_LEVEL_WARN); s_logf_all_levels(AWS_LOG_LEVEL_DEBUG); /* Pull out what was logged before clean up */ char buffer[TEST_LOGGER_MAX_BUFFER_SIZE]; test_logger_get_contents(&test_logger, buffer, TEST_LOGGER_MAX_BUFFER_SIZE); /* clean up */ aws_logger_set(NULL); aws_logger_clean_up(&test_logger); /* Check the test results last */ static const char *expected_result = "1212345123"; ASSERT_SUCCESS(strcmp(buffer, expected_result), "Expected \"%s\" but received \"%s\"", expected_result, buffer); return 0; } AWS_TEST_CASE(dynamic_log_level_change_test, s_dynamic_log_level_change_test) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/logging/logging_misc.c000066400000000000000000000035561456575232400261260ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include static int s_string_to_log_level_success_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; (void)allocator; enum aws_log_level level = AWS_LL_NONE; ASSERT_SUCCESS(aws_string_to_log_level("DeBug", &level)); ASSERT_INT_EQUALS(level, AWS_LL_DEBUG); ASSERT_SUCCESS(aws_string_to_log_level("TRACE", &level)); ASSERT_INT_EQUALS(level, AWS_LL_TRACE); ASSERT_SUCCESS(aws_string_to_log_level("warn", &level)); ASSERT_INT_EQUALS(level, AWS_LL_WARN); ASSERT_SUCCESS(aws_string_to_log_level("InFo", &level)); ASSERT_INT_EQUALS(level, AWS_LL_INFO); ASSERT_SUCCESS(aws_string_to_log_level("errOr", &level)); ASSERT_INT_EQUALS(level, AWS_LL_ERROR); ASSERT_SUCCESS(aws_string_to_log_level("FATAL", &level)); ASSERT_INT_EQUALS(level, AWS_LL_FATAL); ASSERT_SUCCESS(aws_string_to_log_level("none", &level)); ASSERT_INT_EQUALS(level, AWS_LL_NONE); return 0; } AWS_TEST_CASE(string_to_log_level_success_test, s_string_to_log_level_success_test) static int s_string_to_log_level_failure_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; (void)allocator; enum aws_log_level level = AWS_LL_NONE; ASSERT_FAILS(aws_string_to_log_level("", &level)); ASSERT_FAILS(aws_string_to_log_level("Tracee", &level)); ASSERT_FAILS(aws_string_to_log_level("war", &level)); ASSERT_FAILS(aws_string_to_log_level("Not a log level", &level)); ASSERT_FAILS(aws_string_to_log_level(NULL, &level)); ASSERT_FAILS(aws_string_to_log_level(NULL, NULL)); ASSERT_FAILS(aws_string_to_log_level("FATAL", NULL)); return 0; } AWS_TEST_CASE(string_to_log_level_failure_test, s_string_to_log_level_failure_test) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/logging/logging_test_utilities.c000066400000000000000000000040251456575232400302350ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "logging_test_utilities.h" #include "test_logger.h" #include #include #define TEST_LOGGER_MAX_BUFFER_SIZE 4096 int do_log_test( struct aws_allocator *allocator, enum aws_log_level level, const char *expected_result, void (*callback)(enum aws_log_level)) { /* Create and attach a logger for testing*/ struct aws_logger test_logger; test_logger_init(&test_logger, allocator, level, TEST_LOGGER_MAX_BUFFER_SIZE); aws_logger_set(&test_logger); /* Perform logging operations */ (*callback)(level); /* Pull out what was logged before clean up */ char buffer[TEST_LOGGER_MAX_BUFFER_SIZE]; test_logger_get_contents(&test_logger, buffer, TEST_LOGGER_MAX_BUFFER_SIZE); /* clean up */ aws_logger_set(NULL); aws_logger_clean_up(&test_logger); /* Check the test results last */ ASSERT_SUCCESS(strcmp(buffer, expected_result), "Expected \"%s\" but received \"%s\"", expected_result, buffer); return AWS_OP_SUCCESS; } struct aws_string *aws_string_new_log_writer_test_filename(struct aws_allocator *allocator) { char filename_array[64]; AWS_ZERO_ARRAY(filename_array); struct aws_byte_buf filename_buf = aws_byte_buf_from_empty_array(filename_array, sizeof(filename_array)); #ifndef _WIN32 AWS_FATAL_ASSERT(aws_byte_buf_write_from_whole_cursor(&filename_buf, aws_byte_cursor_from_c_str("./"))); #endif AWS_FATAL_ASSERT( aws_byte_buf_write_from_whole_cursor(&filename_buf, aws_byte_cursor_from_c_str("aws_log_writer_test_"))); struct aws_uuid uuid; AWS_FATAL_ASSERT(aws_uuid_init(&uuid) == AWS_OP_SUCCESS); AWS_FATAL_ASSERT(aws_uuid_to_str(&uuid, &filename_buf) == AWS_OP_SUCCESS); AWS_FATAL_ASSERT(aws_byte_buf_write_from_whole_cursor(&filename_buf, aws_byte_cursor_from_c_str(".log"))); return aws_string_new_from_array(allocator, filename_buf.buffer, filename_buf.len); } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/logging/logging_test_utilities.h000066400000000000000000000064241456575232400302470ustar00rootroot00000000000000/* NOLINTNEXTLINE(llvm-header-guard) */ #ifndef AWS_COMMON_LOGGING_TEST_UTILITIES_H #define AWS_COMMON_LOGGING_TEST_UTILITIES_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include /** * A staging function for basic logging tests. It * (1) Initializes and globally attaches a test logger * (2) Invokes the supplied callback, which should perform the logging operations under test * (3) Detaches and cleans up the test logger * (4) Checks if what was recorded by the test logger matches what the test expected. */ int do_log_test( struct aws_allocator *allocator, enum aws_log_level level, const char *expected_result, void (*callback)(enum aws_log_level)); /** * A macro capable of defining simple logging tests that follow the do_log_test function pattern */ #define TEST_LEVEL_FILTER(log_level, expected, action_fn) \ static int s_logging_filter_at_##log_level##_##action_fn(struct aws_allocator *allocator, void *ctx) { \ (void)ctx; \ return do_log_test(allocator, log_level, expected, action_fn); \ } \ AWS_TEST_CASE(test_logging_filter_at_##log_level##_##action_fn, s_logging_filter_at_##log_level##_##action_fn); /** * A macro that defines a function that invokes all 6 LOGF_ variants * * Needs to be a macro and not just a function because the compile-time filtering tests require a private implementation * that is compiled with AWS_STATIC_LOG_LEVEL at the level to be tested. There's no way to shared a single definition * that does so. */ #define DECLARE_LOGF_ALL_LEVELS_FUNCTION(fn_name) \ static void fn_name(enum aws_log_level level) { \ (void)level; \ AWS_LOGF_FATAL(AWS_LS_COMMON_GENERAL, "%d", (int)AWS_LL_FATAL); \ AWS_LOGF_ERROR(AWS_LS_COMMON_GENERAL, "%d", (int)AWS_LL_ERROR); \ AWS_LOGF_WARN(AWS_LS_COMMON_GENERAL, "%d", (int)AWS_LL_WARN); \ AWS_LOGF_INFO(AWS_LS_COMMON_GENERAL, "%d", (int)AWS_LL_INFO); \ AWS_LOGF_DEBUG(AWS_LS_COMMON_GENERAL, "%d", (int)AWS_LL_DEBUG); \ AWS_LOGF_TRACE(AWS_LS_COMMON_GENERAL, "%d", (int)AWS_LL_TRACE); \ } /** * Return new string with format "./aws_log_writer_test_{UUID}.log" * This function cannot fail. */ struct aws_string *aws_string_new_log_writer_test_filename(struct aws_allocator *allocator); #endif /* AWS_COMMON_LOGGING_TEST_UTILITIES_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/logging/pipeline_logger_test.c000066400000000000000000000116311456575232400276610ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "logging_test_utilities.h" #include #include #include #include #include #include #define TEST_PIPELINE_MAX_BUFFER_SIZE 4096 typedef void(log_test_fn)(void); int do_pipeline_logger_test( struct aws_allocator *allocator, log_test_fn *log_fn, const char **expected_user_content, size_t user_content_count) { struct aws_string *test_file_str = aws_string_new_log_writer_test_filename(allocator); const char *test_file_cstr = aws_string_c_str(test_file_str); remove(test_file_cstr); struct aws_logger_standard_options options = {.level = AWS_LL_TRACE, .filename = test_file_cstr}; struct aws_logger logger; if (aws_logger_init_standard(&logger, allocator, &options)) { return AWS_OP_ERR; } aws_logger_set(&logger); (*log_fn)(); aws_logger_set(NULL); aws_logger_clean_up(&logger); char buffer[TEST_PIPELINE_MAX_BUFFER_SIZE]; FILE *file = aws_fopen(test_file_cstr, "r"); int open_error = errno; size_t bytes_read = 0; if (file != NULL) { bytes_read = fread(buffer, 1, TEST_PIPELINE_MAX_BUFFER_SIZE - 1, file); fclose(file); } remove(test_file_cstr); /* * Check the file was read successfully */ ASSERT_TRUE( file != NULL, "Unable to open log file \"%s\" to verify contents. Error: %d", test_file_str, open_error); ASSERT_TRUE(bytes_read >= 0, "Failed to read log file \"%s\"", test_file_str); /* * add end of string marker */ buffer[bytes_read] = 0; /* * Timestamps prevent us from doing simple string comparisons to check the log file and writing a parser to pull out * log lines in the face of multi-line arbitrary content seems overkill. Since we've already validated * the formatter via the formatter tests, the main thing to do here is just verify that the user part of the log * lines is making it to the log file. */ const char *buffer_ptr = buffer; for (size_t i = 0; i < user_content_count; ++i) { buffer_ptr = strstr(buffer_ptr, expected_user_content[i]); ASSERT_TRUE( buffer_ptr != NULL, "Expected to find \"%s\" in log file but could not. Content is either missing or out-of-order.", expected_user_content[i]); } aws_string_destroy(test_file_str); return AWS_OP_SUCCESS; } static void s_unformatted_pipeline_logger_test_callback(void) { AWS_LOGF_TRACE(AWS_LS_COMMON_GENERAL, "trace log call"); AWS_LOGF_DEBUG(AWS_LS_COMMON_GENERAL, "debug log call"); AWS_LOGF_INFO(AWS_LS_COMMON_GENERAL, "info log call"); AWS_LOGF_WARN(AWS_LS_COMMON_GENERAL, "warn log call"); AWS_LOGF_ERROR(AWS_LS_COMMON_GENERAL, "error log call"); AWS_LOGF_FATAL(AWS_LS_COMMON_GENERAL, "fatal log call"); } static void s_formatted_pipeline_logger_test_callback(void) { AWS_LOGF_TRACE(AWS_LS_COMMON_GENERAL, "%s log call", "trace"); AWS_LOGF_DEBUG(AWS_LS_COMMON_GENERAL, "%s log call", "debug"); AWS_LOGF_INFO(AWS_LS_COMMON_GENERAL, "%s log call", "info"); AWS_LOGF_WARN(AWS_LS_COMMON_GENERAL, "%s log call", "warn"); AWS_LOGF_ERROR(AWS_LS_COMMON_GENERAL, "%s log call", "error"); AWS_LOGF_FATAL(AWS_LS_COMMON_GENERAL, "%s log call", "fatal"); } static const char *expected_test_user_content[] = {"trace log call", "debug log call", "info log call", "warn log call", "error log call", "fatal log call"}; #define DEFINE_PIPELINE_LOGGER_TEST(test_name, callback_function) \ static int s_pipeline_logger_##test_name(struct aws_allocator *allocator, void *ctx) { \ (void)ctx; \ return do_pipeline_logger_test( \ allocator, \ callback_function, \ expected_test_user_content, \ sizeof(expected_test_user_content) / sizeof(expected_test_user_content[0])); \ } \ AWS_TEST_CASE(test_pipeline_logger_##test_name, s_pipeline_logger_##test_name); DEFINE_PIPELINE_LOGGER_TEST(unformatted_test, s_unformatted_pipeline_logger_test_callback) DEFINE_PIPELINE_LOGGER_TEST(formatted_test, s_formatted_pipeline_logger_test_callback) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/logging/test_logger.c000066400000000000000000000072551456575232400260030ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "test_logger.h" #include #include #include /** * A real logger wouldn't have such size restrictions, but these are good enough for our tests */ #define TEST_LOGGER_MAX_LOG_LINE_SIZE 2048 int s_test_logger_log( struct aws_logger *logger, enum aws_log_level log_level, aws_log_subject_t subject, const char *format, ...) { (void)subject; (void)log_level; va_list format_args; va_start(format_args, format); static char buffer[TEST_LOGGER_MAX_LOG_LINE_SIZE]; #ifdef _WIN32 int written = vsnprintf_s(buffer, TEST_LOGGER_MAX_LOG_LINE_SIZE, _TRUNCATE, format, format_args); #else int written = vsnprintf(buffer, TEST_LOGGER_MAX_LOG_LINE_SIZE, format, format_args); #endif /* _WIN32 */ va_end(format_args); if (written < 0) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } struct test_logger_impl *impl = (struct test_logger_impl *)logger->p_impl; struct aws_byte_cursor line = aws_byte_cursor_from_array(buffer, written); if (impl->max_size) { if (aws_byte_buf_write(&impl->log_buffer, line.ptr, line.len)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } } else { if (aws_byte_buf_append_dynamic(&impl->log_buffer, &line)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } } return AWS_OP_SUCCESS; } enum aws_log_level s_test_logger_get_log_level(struct aws_logger *logger, aws_log_subject_t subject) { (void)subject; struct test_logger_impl *impl = (struct test_logger_impl *)logger->p_impl; return impl->level; } void s_test_logger_clean_up(struct aws_logger *logger) { struct test_logger_impl *impl = (struct test_logger_impl *)logger->p_impl; aws_byte_buf_clean_up(&impl->log_buffer); struct aws_allocator *allocator = logger->allocator; aws_mem_release(allocator, impl); } int s_test_logger_set_log_level(struct aws_logger *logger, enum aws_log_level level) { struct test_logger_impl *impl = (struct test_logger_impl *)logger->p_impl; impl->level = level; return AWS_OP_SUCCESS; } static struct aws_logger_vtable s_test_logger_vtable = { .get_log_level = s_test_logger_get_log_level, .log = s_test_logger_log, .clean_up = s_test_logger_clean_up, .set_log_level = s_test_logger_set_log_level, }; int test_logger_init( struct aws_logger *logger, struct aws_allocator *allocator, enum aws_log_level level, size_t max_size) { struct test_logger_impl *impl = (struct test_logger_impl *)aws_mem_acquire(allocator, sizeof(struct test_logger_impl)); if (impl == NULL) { return AWS_OP_ERR; } if (aws_byte_buf_init(&impl->log_buffer, allocator, max_size ? max_size : 4096)) { aws_mem_release(allocator, impl); return AWS_OP_ERR; } impl->level = level; impl->max_size = max_size; logger->vtable = &s_test_logger_vtable; logger->allocator = allocator; logger->p_impl = impl; return AWS_OP_SUCCESS; } int test_logger_get_contents(struct aws_logger *logger, char *buffer, size_t max_length) { struct test_logger_impl *impl = (struct test_logger_impl *)logger->p_impl; if (max_length == 0) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } size_t copy_length = max_length - 1; if (impl->log_buffer.len < copy_length) { copy_length = impl->log_buffer.len; } if (copy_length > 0) { memcpy(buffer, impl->log_buffer.buffer, copy_length); } buffer[copy_length] = 0; return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/logging/test_logger.h000066400000000000000000000020061456575232400257750ustar00rootroot00000000000000/* NOLINTNEXTLINE(llvm-header-guard) */ #ifndef AWS_COMMON_TEST_LOGGER_H #define AWS_COMMON_TEST_LOGGER_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include /** * The test logger is a simple forwarding logger that just records what was passed to it. * We provide an extraction function for easy test validation. */ struct test_logger_impl { enum aws_log_level level; struct aws_byte_buf log_buffer; size_t max_size; }; /** * Given a pointer to a logger, initializes it as a test logger using the supplied log level. * max_size of 0 is unlimited */ int test_logger_init( struct aws_logger *logger, struct aws_allocator *allocator, enum aws_log_level level, size_t max_size); /** * Extracts logged content from a test logger. */ int test_logger_get_contents(struct aws_logger *logger, char *buffer, size_t max_length); #endif /* AWS_COMMON_TEST_LOGGER_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/math_test.c000066400000000000000000001413441456575232400240250ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #define CHECK_SAT(fn, a, b, result) \ do { \ ASSERT_UINT_EQUALS( \ (result), \ fn((a), (b)), \ "%s(0x%016llx, 0x%016llx) = 0x%016llx", \ #fn, \ (unsigned long long)(a), \ (unsigned long long)(b), \ (unsigned long long)(result)); \ ASSERT_UINT_EQUALS( \ (result), \ fn((b), (a)), \ "%s(0x%016llx, 0x%016llx) = 0x%016llx", \ #fn, \ (unsigned long long)(b), \ (unsigned long long)(a), \ (unsigned long long)(result)); \ } while (0) AWS_TEST_CASE(test_is_power_of_two, s_test_is_power_of_two_fn) static int s_test_is_power_of_two_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; ASSERT_FALSE(aws_is_power_of_two(0)); for (size_t i = 0; i < SIZE_BITS; ++i) { const size_t ith_power = (size_t)1 << i; ASSERT_TRUE(aws_is_power_of_two(ith_power)); ASSERT_FALSE(aws_is_power_of_two(ith_power + 9)); ASSERT_FALSE(aws_is_power_of_two(ith_power - 9)); ASSERT_FALSE(aws_is_power_of_two(ith_power + 100)); ASSERT_FALSE(aws_is_power_of_two(ith_power - 100)); } return 0; } #define CHECK_ROUND_OVERFLOWS(a) \ do { \ size_t result_val; \ ASSERT_TRUE(aws_round_up_to_power_of_two((a), &result_val)); \ } while (0) #define CHECK_ROUND_SUCCEEDS(a, r) \ do { \ size_t result_val; \ ASSERT_FALSE(aws_round_up_to_power_of_two((a), &result_val)); \ ASSERT_TRUE(aws_is_power_of_two(result_val)); \ ASSERT_INT_EQUALS( \ (uint64_t)result_val, \ (uint64_t)(r), \ "Expected %s(%016llx) = %016llx; got %016llx", \ "aws_round_up_to_power_of_two", \ (unsigned long long)(a), \ (unsigned long long)(r), \ (unsigned long long)(result_val)); \ } while (0) AWS_TEST_CASE(test_round_up_to_power_of_two, s_test_round_up_to_power_of_two_fn) static int s_test_round_up_to_power_of_two_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; /*special case 0, 1 and 2, where subtracting from the number doesn't cause it to round back up */ CHECK_ROUND_SUCCEEDS(0, 1); CHECK_ROUND_SUCCEEDS(1, 1); CHECK_ROUND_SUCCEEDS(2, 2); for (size_t i = 2; i < SIZE_BITS - 1; ++i) { const size_t ith_power = (size_t)1 << i; CHECK_ROUND_SUCCEEDS(ith_power, ith_power); CHECK_ROUND_SUCCEEDS(ith_power - 1, ith_power); CHECK_ROUND_SUCCEEDS(ith_power + 1, ith_power << 1); } /* Special case for the largest representable power of two, where addition causes overflow */ CHECK_ROUND_SUCCEEDS(SIZE_MAX_POWER_OF_TWO, SIZE_MAX_POWER_OF_TWO); CHECK_ROUND_SUCCEEDS(SIZE_MAX_POWER_OF_TWO - 1, SIZE_MAX_POWER_OF_TWO); CHECK_ROUND_OVERFLOWS(SIZE_MAX_POWER_OF_TWO + 1); return 0; } AWS_TEST_CASE(test_mul_u64_saturating, s_test_mul_u64_saturating_fn) static int s_test_mul_u64_saturating_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; CHECK_SAT(aws_mul_u64_saturating, 0, 0, 0); CHECK_SAT(aws_mul_u64_saturating, 0, 1, 0); CHECK_SAT(aws_mul_u64_saturating, 0, ~0LLU, 0); CHECK_SAT(aws_mul_u64_saturating, 4, 5, 20); CHECK_SAT(aws_mul_u64_saturating, 1234, 4321, 5332114); CHECK_SAT(aws_mul_u64_saturating, 0xFFFFFFFF, 1, 0xFFFFFFFF); CHECK_SAT(aws_mul_u64_saturating, 0xFFFFFFFF, 0xFFFFFFFF, 0xfffffffe00000001LLU); CHECK_SAT(aws_mul_u64_saturating, 0x100000000, 0xFFFFFFFF, 0xFFFFFFFF00000000LLU); CHECK_SAT(aws_mul_u64_saturating, 0x100000001, 0xFFFFFFFF, 0xFFFFFFFFFFFFFFFFLLU); CHECK_SAT(aws_mul_u64_saturating, 0x100000001, 0xFFFFFFFE, 0xFFFFFFFEFFFFFFFELLU); CHECK_SAT(aws_mul_u64_saturating, 0x100000002, 0xFFFFFFFE, 0xFFFFFFFFFFFFFFFCLLU); CHECK_SAT(aws_mul_u64_saturating, 0x100000003, 0xFFFFFFFE, 0xFFFFFFFFFFFFFFFFLLU); CHECK_SAT(aws_mul_u64_saturating, 0xFFFFFFFE, 0xFFFFFFFE, 0xFFFFFFFC00000004LLU); CHECK_SAT(aws_mul_u64_saturating, 0x1FFFFFFFF, 0x1FFFFFFFF, 0xFFFFFFFFFFFFFFFFLLU); CHECK_SAT(aws_mul_u64_saturating, ~0LLU, ~0LLU, ~0LLU); return 0; } AWS_TEST_CASE(test_mul_u32_saturating, s_test_mul_u32_saturating_fn) static int s_test_mul_u32_saturating_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; CHECK_SAT(aws_mul_u32_saturating, 0, 0, 0); CHECK_SAT(aws_mul_u32_saturating, 0, 1, 0); CHECK_SAT(aws_mul_u32_saturating, 0, ~0U, 0); CHECK_SAT(aws_mul_u32_saturating, 4, 5, 20); CHECK_SAT(aws_mul_u32_saturating, 1234, 4321, 5332114); CHECK_SAT(aws_mul_u32_saturating, 0xFFFFFFFF, 1, 0xFFFFFFFF); CHECK_SAT(aws_mul_u32_saturating, 0xFFFF, 1, 0xFFFF); CHECK_SAT(aws_mul_u32_saturating, 0xFFFF, 0xFFFF, 0xfffe0001); CHECK_SAT(aws_mul_u32_saturating, 0x10000, 0xFFFF, 0xFFFF0000U); CHECK_SAT(aws_mul_u32_saturating, 0x10001, 0xFFFF, 0xFFFFFFFFU); CHECK_SAT(aws_mul_u32_saturating, 0x10001, 0xFFFE, 0xFFFEFFFEU); CHECK_SAT(aws_mul_u32_saturating, 0x10002, 0xFFFE, 0xFFFFFFFCU); CHECK_SAT(aws_mul_u32_saturating, 0x10003, 0xFFFE, 0xFFFFFFFFU); CHECK_SAT(aws_mul_u32_saturating, 0xFFFE, 0xFFFE, 0xFFFC0004U); CHECK_SAT(aws_mul_u32_saturating, 0x1FFFF, 0x1FFFF, 0xFFFFFFFFU); CHECK_SAT(aws_mul_u32_saturating, ~0U, ~0U, ~0U); return 0; } AWS_TEST_CASE(test_mul_size_saturating, s_test_mul_size_saturating_fn) /* NOLINTNEXTLINE(readability-function-size) */ static int s_test_mul_size_saturating_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; #if SIZE_BITS == 32 CHECK_SAT(aws_mul_size_saturating, 0, 0, 0); CHECK_SAT(aws_mul_size_saturating, 0, 1, 0); CHECK_SAT(aws_mul_size_saturating, 0, ~0U, 0); CHECK_SAT(aws_mul_size_saturating, 4, 5, 20); CHECK_SAT(aws_mul_size_saturating, 1234, 4321, 5332114); CHECK_SAT(aws_mul_size_saturating, 0xFFFFFFFF, 1, 0xFFFFFFFF); CHECK_SAT(aws_mul_size_saturating, 0xFFFF, 1, 0xFFFF); CHECK_SAT(aws_mul_size_saturating, 0xFFFF, 0xFFFF, 0xfffe0001); CHECK_SAT(aws_mul_size_saturating, 0x10000, 0xFFFF, 0xFFFF0000U); CHECK_SAT(aws_mul_size_saturating, 0x10001, 0xFFFF, 0xFFFFFFFFU); CHECK_SAT(aws_mul_size_saturating, 0x10001, 0xFFFE, 0xFFFEFFFEU); CHECK_SAT(aws_mul_size_saturating, 0x10002, 0xFFFE, 0xFFFFFFFCU); CHECK_SAT(aws_mul_size_saturating, 0x10003, 0xFFFE, 0xFFFFFFFFU); CHECK_SAT(aws_mul_size_saturating, 0xFFFE, 0xFFFE, 0xFFFC0004U); CHECK_SAT(aws_mul_size_saturating, 0x1FFFF, 0x1FFFF, 0xFFFFFFFFU); CHECK_SAT(aws_mul_size_saturating, ~0U, ~0U, ~0U); #elif SIZE_BITS == 64 CHECK_SAT(aws_mul_size_saturating, 0, 0, 0); CHECK_SAT(aws_mul_size_saturating, 0, 1, 0); CHECK_SAT(aws_mul_size_saturating, 0, ~0LLU, 0); CHECK_SAT(aws_mul_size_saturating, 4, 5, 20); CHECK_SAT(aws_mul_size_saturating, 1234, 4321, 5332114); CHECK_SAT(aws_mul_size_saturating, 0xFFFFFFFF, 1, 0xFFFFFFFF); CHECK_SAT(aws_mul_size_saturating, 0xFFFFFFFF, 0xFFFFFFFF, 0xfffffffe00000001LLU); CHECK_SAT(aws_mul_size_saturating, 0x100000000, 0xFFFFFFFF, 0xFFFFFFFF00000000LLU); CHECK_SAT(aws_mul_size_saturating, 0x100000001, 0xFFFFFFFF, 0xFFFFFFFFFFFFFFFFLLU); CHECK_SAT(aws_mul_size_saturating, 0x100000001, 0xFFFFFFFE, 0xFFFFFFFEFFFFFFFELLU); CHECK_SAT(aws_mul_size_saturating, 0x100000002, 0xFFFFFFFE, 0xFFFFFFFFFFFFFFFCLLU); CHECK_SAT(aws_mul_size_saturating, 0x100000003, 0xFFFFFFFE, 0xFFFFFFFFFFFFFFFFLLU); CHECK_SAT(aws_mul_size_saturating, 0xFFFFFFFE, 0xFFFFFFFE, 0xFFFFFFFC00000004LLU); CHECK_SAT(aws_mul_size_saturating, 0x1FFFFFFFF, 0x1FFFFFFFF, 0xFFFFFFFFFFFFFFFFLLU); CHECK_SAT(aws_mul_size_saturating, ~0LLU, ~0LLU, ~0LLU); #else FAIL("Unexpected size for size_t: %zu", sizeof(size_t)); #endif return 0; } #define CHECK_OVF_0(fn, type, a, b) \ do { \ type result_val = 0; \ ASSERT_TRUE(fn((a), (b), &result_val)); \ } while (0) #define CHECK_OVF(fn, type, a, b) \ do { \ CHECK_OVF_0(fn, type, a, b); \ CHECK_OVF_0(fn, type, b, a); \ } while (0) #define CHECK_NO_OVF_0(fn, type, a, b, r) \ do { \ type result_val = 0; \ ASSERT_FALSE(fn((a), (b), &result_val)); \ ASSERT_INT_EQUALS( \ (uint64_t)result_val, \ (uint64_t)(r), \ "Expected %s(%016llx, %016llx) = %016llx; got %016llx", \ #fn, \ (unsigned long long)(a), \ (unsigned long long)(b), \ (unsigned long long)(r), \ (unsigned long long)(result_val)); \ } while (0) #define CHECK_NO_OVF(fn, type, a, b, r) \ do { \ CHECK_NO_OVF_0(fn, type, a, b, r); \ CHECK_NO_OVF_0(fn, type, b, a, r); \ } while (0) AWS_TEST_CASE(test_mul_u64_checked, s_test_mul_u64_checked_fn) static int s_test_mul_u64_checked_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; CHECK_NO_OVF(aws_mul_u64_checked, uint64_t, 0, 0, 0); CHECK_NO_OVF(aws_mul_u64_checked, uint64_t, 0, 1, 0); CHECK_NO_OVF(aws_mul_u64_checked, uint64_t, 0, ~0LLU, 0); CHECK_NO_OVF(aws_mul_u64_checked, uint64_t, 4, 5, 20); CHECK_NO_OVF(aws_mul_u64_checked, uint64_t, 1234, 4321, 5332114); CHECK_NO_OVF(aws_mul_u64_checked, uint64_t, 0xFFFFFFFFLLU, 1LLU, 0xFFFFFFFFLLU); CHECK_NO_OVF(aws_mul_u64_checked, uint64_t, 0xFFFFFFFFLLU, 0xFFFFFFFFLLU, 0xfffffffe00000001LLU); CHECK_NO_OVF(aws_mul_u64_checked, uint64_t, 0x100000000LLU, 0xFFFFFFFFLLU, 0xFFFFFFFF00000000LLU); CHECK_NO_OVF(aws_mul_u64_checked, uint64_t, 0x100000001LLU, 0xFFFFFFFFLLU, 0xFFFFFFFFFFFFFFFFLLU); CHECK_NO_OVF(aws_mul_u64_checked, uint64_t, 0x100000001LLU, 0xFFFFFFFELLU, 0xFFFFFFFEFFFFFFFELLU); CHECK_NO_OVF(aws_mul_u64_checked, uint64_t, 0x100000002LLU, 0xFFFFFFFELLU, 0xFFFFFFFFFFFFFFFCLLU); CHECK_OVF(aws_mul_u64_checked, uint64_t, 0x100000003LLU, 0xFFFFFFFELLU); CHECK_NO_OVF(aws_mul_u64_checked, uint64_t, 0xFFFFFFFELLU, 0xFFFFFFFELLU, 0xFFFFFFFC00000004LLU); CHECK_OVF(aws_mul_u64_checked, uint64_t, 0x1FFFFFFFFLLU, 0x1FFFFFFFFLLU); CHECK_OVF(aws_mul_u64_checked, uint64_t, ~0LLU, ~0LLU); return 0; } AWS_TEST_CASE(test_mul_u32_checked, s_test_mul_u32_checked_fn) /* NOLINTNEXTLINE(readability-function-size) */ static int s_test_mul_u32_checked_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; CHECK_NO_OVF(aws_mul_u32_checked, uint32_t, 0, 0, 0); CHECK_NO_OVF(aws_mul_u32_checked, uint32_t, 0, 1, 0); CHECK_NO_OVF(aws_mul_u32_checked, uint32_t, 0, ~0U, 0); CHECK_NO_OVF(aws_mul_u32_checked, uint32_t, 4, 5, 20); CHECK_NO_OVF(aws_mul_u32_checked, uint32_t, 1234, 4321, 5332114); CHECK_NO_OVF(aws_mul_u32_checked, uint32_t, 0xFFFFFFFF, 1, 0xFFFFFFFF); CHECK_NO_OVF(aws_mul_u32_checked, uint32_t, 0xFFFF, 1, 0xFFFF); CHECK_NO_OVF(aws_mul_u32_checked, uint32_t, 0xFFFF, 0xFFFF, 0xfffe0001U); CHECK_NO_OVF(aws_mul_u32_checked, uint32_t, 0x10000, 0xFFFF, 0xFFFF0000U); CHECK_NO_OVF(aws_mul_u32_checked, uint32_t, 0x10001, 0xFFFF, 0xFFFFFFFFu); CHECK_NO_OVF(aws_mul_u32_checked, uint32_t, 0x10001, 0xFFFE, 0xFFFEFFFEu); CHECK_NO_OVF(aws_mul_u32_checked, uint32_t, 0x10002, 0xFFFE, 0xFFFFFFFCu); CHECK_OVF(aws_mul_u32_checked, uint32_t, 0x10003, 0xFFFE); CHECK_NO_OVF(aws_mul_u32_checked, uint32_t, 0xFFFE, 0xFFFE, 0xFFFC0004u); CHECK_OVF(aws_mul_u32_checked, uint32_t, 0x1FFFF, 0x1FFFF); CHECK_OVF(aws_mul_u32_checked, uint32_t, ~0U, ~0U); return 0; } AWS_TEST_CASE(test_mul_size_checked, s_test_mul_size_checked_fn) /* NOLINTNEXTLINE(readability-function-size) */ static int s_test_mul_size_checked_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; #if SIZE_BITS == 32 CHECK_NO_OVF(aws_mul_size_checked, size_t, 0, 0, 0); CHECK_NO_OVF(aws_mul_size_checked, size_t, 0, 1, 0); CHECK_NO_OVF(aws_mul_size_checked, size_t, 0, ~0U, 0); CHECK_NO_OVF(aws_mul_size_checked, size_t, 4, 5, 20); CHECK_NO_OVF(aws_mul_size_checked, size_t, 1234, 4321, 5332114); CHECK_NO_OVF(aws_mul_size_checked, size_t, 0xFFFFFFFF, 1, 0xFFFFFFFF); CHECK_NO_OVF(aws_mul_size_checked, size_t, 0xFFFF, 1, 0xFFFF); CHECK_NO_OVF(aws_mul_size_checked, size_t, 0xFFFF, 0xFFFF, 0xfffe0001U); CHECK_NO_OVF(aws_mul_size_checked, size_t, 0x10000, 0xFFFF, 0xFFFF0000U); CHECK_NO_OVF(aws_mul_size_checked, size_t, 0x10001, 0xFFFF, 0xFFFFFFFFu); CHECK_NO_OVF(aws_mul_size_checked, size_t, 0x10001, 0xFFFE, 0xFFFEFFFEu); CHECK_NO_OVF(aws_mul_size_checked, size_t, 0x10002, 0xFFFE, 0xFFFFFFFCu); CHECK_OVF(aws_mul_size_checked, size_t, 0x10003, 0xFFFE); CHECK_NO_OVF(aws_mul_size_checked, size_t, 0xFFFE, 0xFFFE, 0xFFFC0004u); CHECK_OVF(aws_mul_size_checked, size_t, 0x1FFFF, 0x1FFFF); CHECK_OVF(aws_mul_size_checked, size_t, ~0U, ~0U); #elif SIZE_BITS == 64 CHECK_NO_OVF(aws_mul_size_checked, size_t, 0, 0, 0); CHECK_NO_OVF(aws_mul_size_checked, size_t, 0, 1, 0); CHECK_NO_OVF(aws_mul_size_checked, size_t, 0, ~0LLU, 0); CHECK_NO_OVF(aws_mul_size_checked, size_t, 4, 5, 20); CHECK_NO_OVF(aws_mul_size_checked, size_t, 1234, 4321, 5332114); CHECK_NO_OVF(aws_mul_size_checked, size_t, 0xFFFFFFFFLLU, 1LLU, 0xFFFFFFFFLLU); CHECK_NO_OVF(aws_mul_size_checked, size_t, 0xFFFFFFFFLLU, 0xFFFFFFFFLLU, 0xfffffffe00000001LLU); CHECK_NO_OVF(aws_mul_size_checked, size_t, 0x100000000LLU, 0xFFFFFFFFLLU, 0xFFFFFFFF00000000LLU); CHECK_NO_OVF(aws_mul_size_checked, size_t, 0x100000001LLU, 0xFFFFFFFFLLU, 0xFFFFFFFFFFFFFFFFLLU); CHECK_NO_OVF(aws_mul_size_checked, size_t, 0x100000001LLU, 0xFFFFFFFELLU, 0xFFFFFFFEFFFFFFFELLU); CHECK_NO_OVF(aws_mul_size_checked, size_t, 0x100000002LLU, 0xFFFFFFFELLU, 0xFFFFFFFFFFFFFFFCLLU); CHECK_OVF(aws_mul_size_checked, size_t, 0x100000003LLU, 0xFFFFFFFELLU); CHECK_NO_OVF(aws_mul_size_checked, size_t, 0xFFFFFFFELLU, 0xFFFFFFFELLU, 0xFFFFFFFC00000004LLU); CHECK_OVF(aws_mul_size_checked, size_t, 0x1FFFFFFFFLLU, 0x1FFFFFFFFLLU); CHECK_OVF(aws_mul_size_checked, size_t, ~0LLU, ~0LLU); #else FAIL("Unexpected size for size_t: %zu", sizeof(size_t)); #endif return 0; } AWS_TEST_CASE(test_add_size_checked, s_test_add_size_checked_fn) /* NOLINTNEXTLINE(readability-function-size) */ static int s_test_add_size_checked_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; #if SIZE_BITS == 32 const uint32_t HALF_MAX = UINT32_MAX / 2; const uint32_t ACTUAL_MAX = UINT32_MAX; #elif SIZE_BITS == 64 const uint64_t HALF_MAX = UINT64_MAX / 2; const uint64_t ACTUAL_MAX = UINT64_MAX; #else FAIL("Unexpected size for size_t: %zu", sizeof(size_t)); #endif CHECK_NO_OVF(aws_add_size_checked, size_t, 0, 0, 0); CHECK_NO_OVF(aws_add_size_checked, size_t, 0, 1, 1); CHECK_NO_OVF(aws_add_size_checked, size_t, 4, 5, 9); CHECK_NO_OVF(aws_add_size_checked, size_t, 1234, 4321, 5555); CHECK_NO_OVF(aws_add_size_checked, size_t, 0, ACTUAL_MAX, ACTUAL_MAX); CHECK_NO_OVF(aws_add_size_checked, size_t, HALF_MAX, HALF_MAX, ACTUAL_MAX - 1); CHECK_NO_OVF(aws_add_size_checked, size_t, HALF_MAX + 1, HALF_MAX, ACTUAL_MAX); CHECK_NO_OVF(aws_add_size_checked, size_t, 100, ACTUAL_MAX - 102, ACTUAL_MAX - 2); CHECK_NO_OVF(aws_add_size_checked, size_t, 100, ACTUAL_MAX - 100, ACTUAL_MAX); CHECK_OVF(aws_add_size_checked, size_t, 1, ACTUAL_MAX); CHECK_OVF(aws_add_size_checked, size_t, 100, ACTUAL_MAX); CHECK_OVF(aws_add_size_checked, size_t, HALF_MAX, ACTUAL_MAX); CHECK_OVF(aws_add_size_checked, size_t, ACTUAL_MAX, ACTUAL_MAX); CHECK_OVF(aws_add_size_checked, size_t, HALF_MAX + 1, HALF_MAX + 1); CHECK_OVF(aws_add_size_checked, size_t, HALF_MAX, ACTUAL_MAX); CHECK_OVF(aws_add_size_checked, size_t, HALF_MAX, ACTUAL_MAX); CHECK_OVF(aws_add_size_checked, size_t, 100, ACTUAL_MAX - 99); CHECK_OVF(aws_add_size_checked, size_t, 100, ACTUAL_MAX - 1); return 0; } AWS_TEST_CASE(test_sub_size_checked, s_test_sub_size_checked_fn) /* NOLINTNEXTLINE(readability-function-size) */ static int s_test_sub_size_checked_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; const size_t HALF_MAX = SIZE_MAX / 2; const size_t ACTUAL_MAX = SIZE_MAX; /* No overflow expected */ CHECK_NO_OVF(aws_sub_size_checked, size_t, 0, 0, 0); CHECK_NO_OVF(aws_sub_size_checked, size_t, 1, 0, 1); CHECK_NO_OVF(aws_sub_size_checked, size_t, 9, 4, 5); CHECK_NO_OVF(aws_sub_size_checked, size_t, 5555, 1234, 4321); CHECK_NO_OVF(aws_sub_size_checked, size_t, ACTUAL_MAX, 0, ACTUAL_MAX); CHECK_NO_OVF(aws_sub_size_checked, size_t, ACTUAL_MAX - 1, HALF_MAX, HALF_MAX); CHECK_NO_OVF(aws_sub_size_checked, size_t, ACTUAL_MAX, HALF_MAX + 1, HALF_MAX); CHECK_NO_OVF(aws_sub_size_checked, size_t, ACTUAL_MAX - 2, 100, ACTUAL_MAX - 102); CHECK_NO_OVF(aws_sub_size_checked, size_t, ACTUAL_MAX, 100, ACTUAL_MAX - 100); /* Overflow expected */ CHECK_OVF(aws_sub_size_checked, size_t, 0, 1); CHECK_OVF(aws_sub_size_checked, size_t, 0, 100); CHECK_OVF(aws_sub_size_checked, size_t, HALF_MAX, ACTUAL_MAX); CHECK_OVF(aws_sub_size_checked, size_t, 0, ACTUAL_MAX); CHECK_OVF(aws_sub_size_checked, size_t, HALF_MAX, HALF_MAX + 1); CHECK_OVF(aws_sub_size_checked, size_t, HALF_MAX, ACTUAL_MAX); CHECK_OVF(aws_sub_size_checked, size_t, 99, 100); CHECK_OVF(aws_sub_size_checked, size_t, 1, 100); return 0; } #define CHECK_OVF_VARARGS(fn, type, num, ...) \ do { \ type result_val; \ ASSERT_TRUE(fn(num, &result_val, __VA_ARGS__)); \ } while (0) #define CHECK_NO_OVF_VARARGS(fn, type, num, r, ...) \ do { \ type result_val; \ ASSERT_FALSE(fn(num, &result_val, __VA_ARGS__)); \ ASSERT_INT_EQUALS( \ (uint64_t)result_val, \ (uint64_t)(r), \ "From fn %s num %016llx: Expected %016llx; got %016llx", \ #fn, \ (unsigned long long)(num), \ (unsigned long long)(r), \ (unsigned long long)(result_val)); \ } while (0) void print_array(size_t *a, size_t len) { for (size_t i = 0; i < len; ++i) { fprintf(AWS_TESTING_REPORT_FD, "a: %zu\t%zu\n", i, a[i]); } } static int check_add_varargs_no_overflow(size_t a, size_t b, size_t r) { // Check for 2 inputs for (size_t i = 0; i < 2; ++i) { for (size_t j = 0; i < 2; ++i) { size_t array[2] = {0}; if (i != j) { // fprintf(AWS_TESTING_REPORT_FD,"i: %zu j: %zu\n", i, j); array[i] = a; array[j] = b; // print_array(array,2); CHECK_NO_OVF_VARARGS(aws_add_size_checked_varargs, size_t, 2, r, array[0], array[1]); } } } // Check for 3 inputs for (size_t i = 0; i < 3; ++i) { for (size_t j = 0; i < 3; ++i) { size_t array[3] = {0}; if (i != j) { array[i] = a; array[j] = b; CHECK_NO_OVF_VARARGS(aws_add_size_checked_varargs, size_t, 3, r, array[0], array[1], array[2]); } } } // Check for 5 inputs for (size_t i = 0; i < 5; ++i) { for (size_t j = 0; i < 5; ++i) { size_t array[5] = {0}; if (i != j) { array[i] = a; array[j] = b; CHECK_NO_OVF_VARARGS( aws_add_size_checked_varargs, size_t, 5, r, array[0], array[1], array[2], array[3], array[4]); } } } return 0; } static int check_add_varargs_overflow(size_t a, size_t b) { // Check for 2 inputs for (size_t i = 0; i < 2; ++i) { for (size_t j = 0; i < 2; ++i) { size_t array[2] = {0}; if (i != j) { // fprintf(AWS_TESTING_REPORT_FD,"i: %zu j: %zu\n", i, j); array[i] = a; array[j] = b; // print_array(array,2); CHECK_OVF_VARARGS(aws_add_size_checked_varargs, size_t, 2, array[0], array[1]); } } } // Check for 3 inputs for (size_t i = 0; i < 3; ++i) { for (size_t j = 0; i < 3; ++i) { size_t array[3] = {0}; if (i != j) { array[i] = a; array[j] = b; CHECK_OVF_VARARGS(aws_add_size_checked_varargs, size_t, 3, array[0], array[1], array[2]); } } } // Check for 5 inputs for (size_t i = 0; i < 5; ++i) { for (size_t j = 0; i < 5; ++i) { size_t array[5] = {0}; if (i != j) { array[i] = a; array[j] = b; CHECK_OVF_VARARGS( aws_add_size_checked_varargs, size_t, 5, array[0], array[1], array[2], array[3], array[4]); } } } return 0; } AWS_TEST_CASE(test_aws_add_size_checked_varargs, s_test_add_size_checked_varargs_fn) /* NOLINTNEXTLINE(readability-function-size) */ static int s_test_add_size_checked_varargs_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; #if SIZE_BITS == 32 const uint32_t HALF_MAX = UINT32_MAX / 2; const uint32_t ACTUAL_MAX = UINT32_MAX; #elif SIZE_BITS == 64 const uint64_t HALF_MAX = UINT64_MAX / 2; const uint64_t ACTUAL_MAX = UINT64_MAX; #else FAIL("Unexpected size for size_t: %zu", sizeof(size_t)); #endif ASSERT_SUCCESS(check_add_varargs_no_overflow(0, 0, 0)); ASSERT_SUCCESS(check_add_varargs_no_overflow(0, 1, 1)); ASSERT_SUCCESS(check_add_varargs_no_overflow(4, 5, 9)); ASSERT_SUCCESS(check_add_varargs_no_overflow(1234, 4321, 5555)); ASSERT_SUCCESS(check_add_varargs_no_overflow(0, ACTUAL_MAX, ACTUAL_MAX)); ASSERT_SUCCESS(check_add_varargs_no_overflow(HALF_MAX, HALF_MAX, ACTUAL_MAX - 1)); ASSERT_SUCCESS(check_add_varargs_no_overflow(HALF_MAX + 1, HALF_MAX, ACTUAL_MAX)); ASSERT_SUCCESS(check_add_varargs_no_overflow(100, ACTUAL_MAX - 102, ACTUAL_MAX - 2)); ASSERT_SUCCESS(check_add_varargs_no_overflow(100, ACTUAL_MAX - 100, ACTUAL_MAX)); ASSERT_SUCCESS(check_add_varargs_overflow(1, ACTUAL_MAX)); ASSERT_SUCCESS(check_add_varargs_overflow(100, ACTUAL_MAX)); ASSERT_SUCCESS(check_add_varargs_overflow(HALF_MAX, ACTUAL_MAX)); ASSERT_SUCCESS(check_add_varargs_overflow(ACTUAL_MAX, ACTUAL_MAX)); ASSERT_SUCCESS(check_add_varargs_overflow(HALF_MAX + 1, HALF_MAX + 1)); ASSERT_SUCCESS(check_add_varargs_overflow(HALF_MAX, ACTUAL_MAX)); ASSERT_SUCCESS(check_add_varargs_overflow(HALF_MAX, ACTUAL_MAX)); ASSERT_SUCCESS(check_add_varargs_overflow(100, ACTUAL_MAX - 99)); ASSERT_SUCCESS(check_add_varargs_overflow(100, ACTUAL_MAX - 1)); return 0; } AWS_TEST_CASE(test_add_size_saturating, s_test_add_size_saturating_fn) /* NOLINTNEXTLINE(readability-function-size) */ static int s_test_add_size_saturating_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; #if SIZE_BITS == 32 const uint32_t HALF_MAX = UINT32_MAX / 2; const uint32_t ACTUAL_MAX = UINT32_MAX; #elif SIZE_BITS == 64 const uint64_t HALF_MAX = UINT64_MAX / 2; const uint64_t ACTUAL_MAX = UINT64_MAX; #else FAIL("Unexpected size for size_t: %zu", sizeof(size_t)); #endif (void)HALF_MAX; (void)ACTUAL_MAX; /* No overflow expected */ CHECK_SAT(aws_add_size_saturating, 0, 0, 0); CHECK_SAT(aws_add_size_saturating, 0, 1, 1); CHECK_SAT(aws_add_size_saturating, 4, 5, 9); CHECK_SAT(aws_add_size_saturating, 1234, 4321, 5555); CHECK_SAT(aws_add_size_saturating, 0, ACTUAL_MAX, ACTUAL_MAX); CHECK_SAT(aws_add_size_saturating, HALF_MAX, HALF_MAX, ACTUAL_MAX - 1); CHECK_SAT(aws_add_size_saturating, HALF_MAX + 1, HALF_MAX, ACTUAL_MAX); CHECK_SAT(aws_add_size_saturating, 100, ACTUAL_MAX - 102, ACTUAL_MAX - 2); CHECK_SAT(aws_add_size_saturating, 100, ACTUAL_MAX - 100, ACTUAL_MAX); /* Overflow expected */ CHECK_SAT(aws_add_size_saturating, 1, ACTUAL_MAX, ACTUAL_MAX); CHECK_SAT(aws_add_size_saturating, 100, ACTUAL_MAX, ACTUAL_MAX); CHECK_SAT(aws_add_size_saturating, HALF_MAX, ACTUAL_MAX, ACTUAL_MAX); CHECK_SAT(aws_add_size_saturating, ACTUAL_MAX, ACTUAL_MAX, ACTUAL_MAX); CHECK_SAT(aws_add_size_saturating, HALF_MAX + 1, HALF_MAX + 1, ACTUAL_MAX); CHECK_SAT(aws_add_size_saturating, HALF_MAX, ACTUAL_MAX, ACTUAL_MAX); CHECK_SAT(aws_add_size_saturating, HALF_MAX, ACTUAL_MAX, ACTUAL_MAX); CHECK_SAT(aws_add_size_saturating, 100, ACTUAL_MAX - 99, ACTUAL_MAX); CHECK_SAT(aws_add_size_saturating, 100, ACTUAL_MAX - 1, ACTUAL_MAX); return 0; } AWS_TEST_CASE(test_sub_size_saturating, s_test_sub_size_saturating_fn) /* NOLINTNEXTLINE(readability-function-size) */ static int s_test_sub_size_saturating_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; const size_t HALF_MAX = SIZE_MAX / 2; const size_t ACTUAL_MAX = SIZE_MAX; /* No overflow expected */ CHECK_SAT(aws_sub_size_saturating, 0, 0, 0); CHECK_SAT(aws_sub_size_saturating, 1, 0, 1); CHECK_SAT(aws_sub_size_saturating, 9, 4, 5); CHECK_SAT(aws_sub_size_saturating, 5555, 1234, 4321); CHECK_SAT(aws_sub_size_saturating, ACTUAL_MAX, 0, ACTUAL_MAX); CHECK_SAT(aws_sub_size_saturating, ACTUAL_MAX - 1, HALF_MAX, HALF_MAX); CHECK_SAT(aws_sub_size_saturating, ACTUAL_MAX, HALF_MAX + 1, HALF_MAX); CHECK_SAT(aws_sub_size_saturating, ACTUAL_MAX - 2, 100, ACTUAL_MAX - 102); CHECK_SAT(aws_sub_size_saturating, ACTUAL_MAX, 100, ACTUAL_MAX - 100); /* Overflow expected */ CHECK_SAT(aws_sub_size_saturating, 0, 1, 0); CHECK_SAT(aws_sub_size_saturating, 0, 100, 0); CHECK_SAT(aws_sub_size_saturating, HALF_MAX, ACTUAL_MAX, 0); CHECK_SAT(aws_sub_size_saturating, 0, ACTUAL_MAX, 0); CHECK_SAT(aws_sub_size_saturating, HALF_MAX, HALF_MAX + 1, 0); CHECK_SAT(aws_sub_size_saturating, HALF_MAX, ACTUAL_MAX, 0); CHECK_SAT(aws_sub_size_saturating, 99, 100, 0); CHECK_SAT(aws_sub_size_saturating, 1, 100, 0); return 0; } AWS_TEST_CASE(test_add_u32_checked, s_test_add_u32_checked_fn) /* NOLINTNEXTLINE(readability-function-size) */ static int s_test_add_u32_checked_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; const uint32_t HALF_MAX = UINT32_MAX / 2; const uint32_t ACTUAL_MAX = UINT32_MAX; CHECK_NO_OVF(aws_add_u32_checked, uint32_t, 0, 0, 0); CHECK_NO_OVF(aws_add_u32_checked, uint32_t, 0, 1, 1); CHECK_NO_OVF(aws_add_u32_checked, uint32_t, 4, 5, 9); CHECK_NO_OVF(aws_add_u32_checked, uint32_t, 1234, 4321, 5555); CHECK_NO_OVF(aws_add_u32_checked, uint32_t, 0, ACTUAL_MAX, ACTUAL_MAX); CHECK_NO_OVF(aws_add_u32_checked, uint32_t, HALF_MAX, HALF_MAX, ACTUAL_MAX - 1); CHECK_NO_OVF(aws_add_u32_checked, uint32_t, HALF_MAX + 1, HALF_MAX, ACTUAL_MAX); CHECK_NO_OVF(aws_add_u32_checked, uint32_t, 100, ACTUAL_MAX - 102, ACTUAL_MAX - 2); CHECK_NO_OVF(aws_add_u32_checked, uint32_t, 100, ACTUAL_MAX - 100, ACTUAL_MAX); CHECK_OVF(aws_add_u32_checked, uint32_t, 1, ACTUAL_MAX); CHECK_OVF(aws_add_u32_checked, uint32_t, 100, ACTUAL_MAX); CHECK_OVF(aws_add_u32_checked, uint32_t, HALF_MAX, ACTUAL_MAX); CHECK_OVF(aws_add_u32_checked, uint32_t, ACTUAL_MAX, ACTUAL_MAX); CHECK_OVF(aws_add_u32_checked, uint32_t, HALF_MAX + 1, HALF_MAX + 1); CHECK_OVF(aws_add_u32_checked, uint32_t, HALF_MAX, ACTUAL_MAX); CHECK_OVF(aws_add_u32_checked, uint32_t, HALF_MAX, ACTUAL_MAX); CHECK_OVF(aws_add_u32_checked, uint32_t, 100, ACTUAL_MAX - 99); CHECK_OVF(aws_add_u32_checked, uint32_t, 100, ACTUAL_MAX - 1); return 0; } AWS_TEST_CASE(test_add_u32_saturating, s_test_add_u32_saturating_fn) /* NOLINTNEXTLINE(readability-function-size) */ static int s_test_add_u32_saturating_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; const uint32_t HALF_MAX = UINT32_MAX / 2; const uint32_t ACTUAL_MAX = UINT32_MAX; /* No overflow expected */ CHECK_SAT(aws_add_u32_saturating, 0, 0, 0); CHECK_SAT(aws_add_u32_saturating, 0, 1, 1); CHECK_SAT(aws_add_u32_saturating, 4, 5, 9); CHECK_SAT(aws_add_u32_saturating, 1234, 4321, 5555); CHECK_SAT(aws_add_u32_saturating, 0, ACTUAL_MAX, ACTUAL_MAX); CHECK_SAT(aws_add_u32_saturating, HALF_MAX, HALF_MAX, ACTUAL_MAX - 1); CHECK_SAT(aws_add_u32_saturating, HALF_MAX + 1, HALF_MAX, ACTUAL_MAX); CHECK_SAT(aws_add_u32_saturating, 100, ACTUAL_MAX - 102, ACTUAL_MAX - 2); CHECK_SAT(aws_add_u32_saturating, 100, ACTUAL_MAX - 100, ACTUAL_MAX); /* Overflow expected */ CHECK_SAT(aws_add_u32_saturating, 1, ACTUAL_MAX, ACTUAL_MAX); CHECK_SAT(aws_add_u32_saturating, 100, ACTUAL_MAX, ACTUAL_MAX); CHECK_SAT(aws_add_u32_saturating, HALF_MAX, ACTUAL_MAX, ACTUAL_MAX); CHECK_SAT(aws_add_u32_saturating, ACTUAL_MAX, ACTUAL_MAX, ACTUAL_MAX); CHECK_SAT(aws_add_u32_saturating, HALF_MAX + 1, HALF_MAX + 1, ACTUAL_MAX); CHECK_SAT(aws_add_u32_saturating, HALF_MAX, ACTUAL_MAX, ACTUAL_MAX); CHECK_SAT(aws_add_u32_saturating, HALF_MAX, ACTUAL_MAX, ACTUAL_MAX); CHECK_SAT(aws_add_u32_saturating, 100, ACTUAL_MAX - 99, ACTUAL_MAX); CHECK_SAT(aws_add_u32_saturating, 100, ACTUAL_MAX - 1, ACTUAL_MAX); return 0; } AWS_TEST_CASE(test_sub_u32_checked, s_test_sub_u32_checked_fn) /* NOLINTNEXTLINE(readability-function-size) */ static int s_test_sub_u32_checked_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; const uint32_t HALF_MAX = UINT32_MAX / 2; const uint32_t ACTUAL_MAX = UINT32_MAX; CHECK_NO_OVF(aws_sub_u32_checked, uint32_t, 0, 0, 0); CHECK_NO_OVF(aws_sub_u32_checked, uint32_t, 1, 0, 1); CHECK_NO_OVF(aws_sub_u32_checked, uint32_t, 9, 4, 5); CHECK_NO_OVF(aws_sub_u32_checked, uint32_t, 5555, 1234, 4321); CHECK_NO_OVF(aws_sub_u32_checked, uint32_t, ACTUAL_MAX, 0, ACTUAL_MAX); CHECK_NO_OVF(aws_sub_u32_checked, uint32_t, ACTUAL_MAX - 1, HALF_MAX, HALF_MAX); CHECK_NO_OVF(aws_sub_u32_checked, uint32_t, ACTUAL_MAX, HALF_MAX + 1, HALF_MAX); CHECK_NO_OVF(aws_sub_u32_checked, uint32_t, ACTUAL_MAX - 2, 100, ACTUAL_MAX - 102); CHECK_NO_OVF(aws_sub_u32_checked, uint32_t, ACTUAL_MAX, 100, ACTUAL_MAX - 100); CHECK_OVF(aws_sub_u32_checked, uint32_t, 0, 1); CHECK_OVF(aws_sub_u32_checked, uint32_t, 0, 100); CHECK_OVF(aws_sub_u32_checked, uint32_t, HALF_MAX, ACTUAL_MAX); CHECK_OVF(aws_sub_u32_checked, uint32_t, 0, ACTUAL_MAX); CHECK_OVF(aws_sub_u32_checked, uint32_t, HALF_MAX, HALF_MAX + 1); CHECK_OVF(aws_sub_u32_checked, uint32_t, HALF_MAX, ACTUAL_MAX); CHECK_OVF(aws_sub_u32_checked, uint32_t, 99, 100); CHECK_OVF(aws_sub_u32_checked, uint32_t, 1, 100); return 0; } AWS_TEST_CASE(test_sub_u32_saturating, s_test_sub_u32_saturating_fn) /* NOLINTNEXTLINE(readability-function-size) */ static int s_test_sub_u32_saturating_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; const uint32_t HALF_MAX = UINT32_MAX / 2; const uint32_t ACTUAL_MAX = UINT32_MAX; /* No overflow expected */ CHECK_SAT(aws_sub_u32_saturating, 0, 0, 0); CHECK_SAT(aws_sub_u32_saturating, 1, 0, 1); CHECK_SAT(aws_sub_u32_saturating, 9, 4, 5); CHECK_SAT(aws_sub_u32_saturating, 5555, 1234, 4321); CHECK_SAT(aws_sub_u32_saturating, ACTUAL_MAX, 0, ACTUAL_MAX); CHECK_SAT(aws_sub_u32_saturating, ACTUAL_MAX - 1, HALF_MAX, HALF_MAX); CHECK_SAT(aws_sub_u32_saturating, ACTUAL_MAX, HALF_MAX + 1, HALF_MAX); CHECK_SAT(aws_sub_u32_saturating, ACTUAL_MAX - 2, 100, ACTUAL_MAX - 102); CHECK_SAT(aws_sub_u32_saturating, ACTUAL_MAX, 100, ACTUAL_MAX - 100); /* Overflow expected */ CHECK_SAT(aws_sub_u32_saturating, 0, 1, 0); CHECK_SAT(aws_sub_u32_saturating, 0, 100, 0); CHECK_SAT(aws_sub_u32_saturating, HALF_MAX, ACTUAL_MAX, 0); CHECK_SAT(aws_sub_u32_saturating, 0, ACTUAL_MAX, 0); CHECK_SAT(aws_sub_u32_saturating, HALF_MAX, HALF_MAX + 1, 0); CHECK_SAT(aws_sub_u32_saturating, HALF_MAX, ACTUAL_MAX, 0); CHECK_SAT(aws_sub_u32_saturating, 99, 100, 0); CHECK_SAT(aws_sub_u32_saturating, 1, 100, 0); return 0; } AWS_TEST_CASE(test_add_u64_checked, s_test_add_u64_checked_fn) /* NOLINTNEXTLINE(readability-function-size) */ static int s_test_add_u64_checked_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; const uint64_t HALF_MAX = UINT64_MAX / 2; const uint64_t ACTUAL_MAX = UINT64_MAX; CHECK_NO_OVF(aws_add_u64_checked, uint64_t, 0, 0, 0); CHECK_NO_OVF(aws_add_u64_checked, uint64_t, 0, 1, 1); CHECK_NO_OVF(aws_add_u64_checked, uint64_t, 4, 5, 9); CHECK_NO_OVF(aws_add_u64_checked, uint64_t, 1234, 4321, 5555); CHECK_NO_OVF(aws_add_u64_checked, uint64_t, 0, ACTUAL_MAX, ACTUAL_MAX); CHECK_NO_OVF(aws_add_u64_checked, uint64_t, HALF_MAX, HALF_MAX, ACTUAL_MAX - 1); CHECK_NO_OVF(aws_add_u64_checked, uint64_t, HALF_MAX + 1, HALF_MAX, ACTUAL_MAX); CHECK_NO_OVF(aws_add_u64_checked, uint64_t, 100, ACTUAL_MAX - 102, ACTUAL_MAX - 2); CHECK_NO_OVF(aws_add_u64_checked, uint64_t, 100, ACTUAL_MAX - 100, ACTUAL_MAX); CHECK_OVF(aws_add_u64_checked, uint64_t, 1, ACTUAL_MAX); CHECK_OVF(aws_add_u64_checked, uint64_t, 100, ACTUAL_MAX); CHECK_OVF(aws_add_u64_checked, uint64_t, HALF_MAX, ACTUAL_MAX); CHECK_OVF(aws_add_u64_checked, uint64_t, ACTUAL_MAX, ACTUAL_MAX); CHECK_OVF(aws_add_u64_checked, uint64_t, HALF_MAX + 1, HALF_MAX + 1); CHECK_OVF(aws_add_u64_checked, uint64_t, HALF_MAX, ACTUAL_MAX); CHECK_OVF(aws_add_u64_checked, uint64_t, HALF_MAX, ACTUAL_MAX); CHECK_OVF(aws_add_u64_checked, uint64_t, 100, ACTUAL_MAX - 99); CHECK_OVF(aws_add_u64_checked, uint64_t, 100, ACTUAL_MAX - 1); return 0; } AWS_TEST_CASE(test_add_u64_saturating, s_test_add_u64_saturating_fn) /* NOLINTNEXTLINE(readability-function-size) */ static int s_test_add_u64_saturating_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; const uint64_t HALF_MAX = UINT64_MAX / 2; const uint64_t ACTUAL_MAX = UINT64_MAX; /* No overflow expected */ CHECK_SAT(aws_add_u64_saturating, 0, 0, 0); CHECK_SAT(aws_add_u64_saturating, 0, 1, 1); CHECK_SAT(aws_add_u64_saturating, 4, 5, 9); CHECK_SAT(aws_add_u64_saturating, 1234, 4321, 5555); CHECK_SAT(aws_add_u64_saturating, 0, ACTUAL_MAX, ACTUAL_MAX); CHECK_SAT(aws_add_u64_saturating, HALF_MAX, HALF_MAX, ACTUAL_MAX - 1); CHECK_SAT(aws_add_u64_saturating, HALF_MAX + 1, HALF_MAX, ACTUAL_MAX); CHECK_SAT(aws_add_u64_saturating, 100, ACTUAL_MAX - 102, ACTUAL_MAX - 2); CHECK_SAT(aws_add_u64_saturating, 100, ACTUAL_MAX - 100, ACTUAL_MAX); /* Overflow expected */ CHECK_SAT(aws_add_u64_saturating, 1, ACTUAL_MAX, ACTUAL_MAX); CHECK_SAT(aws_add_u64_saturating, 100, ACTUAL_MAX, ACTUAL_MAX); CHECK_SAT(aws_add_u64_saturating, HALF_MAX, ACTUAL_MAX, ACTUAL_MAX); CHECK_SAT(aws_add_u64_saturating, ACTUAL_MAX, ACTUAL_MAX, ACTUAL_MAX); CHECK_SAT(aws_add_u64_saturating, HALF_MAX + 1, HALF_MAX + 1, ACTUAL_MAX); CHECK_SAT(aws_add_u64_saturating, HALF_MAX, ACTUAL_MAX, ACTUAL_MAX); CHECK_SAT(aws_add_u64_saturating, HALF_MAX, ACTUAL_MAX, ACTUAL_MAX); CHECK_SAT(aws_add_u64_saturating, 100, ACTUAL_MAX - 99, ACTUAL_MAX); CHECK_SAT(aws_add_u64_saturating, 100, ACTUAL_MAX - 1, ACTUAL_MAX); return 0; } AWS_TEST_CASE(test_sub_u64_checked, s_test_sub_u64_checked_fn) /* NOLINTNEXTLINE(readability-function-size) */ static int s_test_sub_u64_checked_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; const uint64_t HALF_MAX = UINT64_MAX / 2; const uint64_t ACTUAL_MAX = UINT64_MAX; /* No overflow expected */ CHECK_NO_OVF(aws_sub_u64_checked, uint64_t, 0, 0, 0); CHECK_NO_OVF(aws_sub_u64_checked, uint64_t, 1, 0, 1); CHECK_NO_OVF(aws_sub_u64_checked, uint64_t, 9, 4, 5); CHECK_NO_OVF(aws_sub_u64_checked, uint64_t, 5555, 1234, 4321); CHECK_NO_OVF(aws_sub_u64_checked, uint64_t, ACTUAL_MAX, 0, ACTUAL_MAX); CHECK_NO_OVF(aws_sub_u64_checked, uint64_t, ACTUAL_MAX - 1, HALF_MAX, HALF_MAX); CHECK_NO_OVF(aws_sub_u64_checked, uint64_t, ACTUAL_MAX, HALF_MAX + 1, HALF_MAX); CHECK_NO_OVF(aws_sub_u64_checked, uint64_t, ACTUAL_MAX - 2, 100, ACTUAL_MAX - 102); CHECK_NO_OVF(aws_sub_u64_checked, uint64_t, ACTUAL_MAX, 100, ACTUAL_MAX - 100); /* Overflow expected */ CHECK_OVF(aws_sub_u64_checked, uint64_t, 0, 1); CHECK_OVF(aws_sub_u64_checked, uint64_t, 0, 100); CHECK_OVF(aws_sub_u64_checked, uint64_t, HALF_MAX, ACTUAL_MAX); CHECK_OVF(aws_sub_u64_checked, uint64_t, 0, ACTUAL_MAX); CHECK_OVF(aws_sub_u64_checked, uint64_t, HALF_MAX, HALF_MAX + 1); CHECK_OVF(aws_sub_u64_checked, uint64_t, HALF_MAX, ACTUAL_MAX); CHECK_OVF(aws_sub_u64_checked, uint64_t, 99, 100); CHECK_OVF(aws_sub_u64_checked, uint64_t, 1, 100); return 0; } AWS_TEST_CASE(test_sub_u64_saturating, s_test_sub_u64_saturating_fn) /* NOLINTNEXTLINE(readability-function-size) */ static int s_test_sub_u64_saturating_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; const uint64_t HALF_MAX = UINT64_MAX / 2; const uint64_t ACTUAL_MAX = UINT64_MAX; /* No overflow expected */ CHECK_SAT(aws_sub_u64_saturating, 0, 0, 0); CHECK_SAT(aws_sub_u64_saturating, 1, 0, 1); CHECK_SAT(aws_sub_u64_saturating, 9, 4, 5); CHECK_SAT(aws_sub_u64_saturating, 5555, 1234, 4321); CHECK_SAT(aws_sub_u64_saturating, ACTUAL_MAX, 0, ACTUAL_MAX); CHECK_SAT(aws_sub_u64_saturating, ACTUAL_MAX - 1, HALF_MAX, HALF_MAX); CHECK_SAT(aws_sub_u64_saturating, ACTUAL_MAX, HALF_MAX + 1, HALF_MAX); CHECK_SAT(aws_sub_u64_saturating, ACTUAL_MAX - 2, 100, ACTUAL_MAX - 102); CHECK_SAT(aws_sub_u64_saturating, ACTUAL_MAX, 100, ACTUAL_MAX - 100); /* Overflow expected */ CHECK_SAT(aws_sub_u64_saturating, 0, 1, 0); CHECK_SAT(aws_sub_u64_saturating, 0, 100, 0); CHECK_SAT(aws_sub_u64_saturating, HALF_MAX, ACTUAL_MAX, 0); CHECK_SAT(aws_sub_u64_saturating, 0, ACTUAL_MAX, 0); CHECK_SAT(aws_sub_u64_saturating, HALF_MAX, HALF_MAX + 1, 0); CHECK_SAT(aws_sub_u64_saturating, HALF_MAX, ACTUAL_MAX, 0); CHECK_SAT(aws_sub_u64_saturating, 99, 100, 0); CHECK_SAT(aws_sub_u64_saturating, 1, 100, 0); return 0; } AWS_TEST_CASE(test_min_max, s_test_min_max) static int s_test_min_max(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; ASSERT_UINT_EQUALS(0, aws_min_u8(0, UINT8_MAX)); ASSERT_UINT_EQUALS(0, aws_min_u8(UINT8_MAX, 0)); ASSERT_UINT_EQUALS(UINT8_MAX, aws_max_u8(0, UINT8_MAX)); ASSERT_UINT_EQUALS(UINT8_MAX, aws_max_u8(UINT8_MAX, 0)); ASSERT_INT_EQUALS(INT8_MIN, aws_min_i8(INT8_MIN, INT8_MAX)); ASSERT_INT_EQUALS(INT8_MIN, aws_min_i8(INT8_MAX, INT8_MIN)); ASSERT_INT_EQUALS(INT8_MAX, aws_max_i8(INT8_MIN, INT8_MAX)); ASSERT_INT_EQUALS(INT8_MAX, aws_max_i8(INT8_MAX, INT8_MIN)); ASSERT_UINT_EQUALS(0, aws_min_u16(0, UINT16_MAX)); ASSERT_UINT_EQUALS(0, aws_min_u16(UINT16_MAX, 0)); ASSERT_UINT_EQUALS(UINT16_MAX, aws_max_u16(0, UINT16_MAX)); ASSERT_UINT_EQUALS(UINT16_MAX, aws_max_u16(UINT16_MAX, 0)); ASSERT_INT_EQUALS(INT16_MIN, aws_min_i16(INT16_MIN, INT16_MAX)); ASSERT_INT_EQUALS(INT16_MIN, aws_min_i16(INT16_MAX, INT16_MIN)); ASSERT_INT_EQUALS(INT16_MAX, aws_max_i16(INT16_MIN, INT16_MAX)); ASSERT_INT_EQUALS(INT16_MAX, aws_max_i16(INT16_MAX, INT16_MIN)); ASSERT_UINT_EQUALS(0, aws_min_u32(0, UINT32_MAX)); ASSERT_UINT_EQUALS(0, aws_min_u32(UINT32_MAX, 0)); ASSERT_UINT_EQUALS(UINT32_MAX, aws_max_u32(0, UINT32_MAX)); ASSERT_UINT_EQUALS(UINT32_MAX, aws_max_u32(UINT32_MAX, 0)); ASSERT_INT_EQUALS(INT32_MIN, aws_min_i32(INT32_MIN, INT32_MAX)); ASSERT_INT_EQUALS(INT32_MIN, aws_min_i32(INT32_MAX, INT32_MIN)); ASSERT_INT_EQUALS(INT32_MAX, aws_max_i32(INT32_MIN, INT32_MAX)); ASSERT_INT_EQUALS(INT32_MAX, aws_max_i32(INT32_MAX, INT32_MIN)); ASSERT_UINT_EQUALS(0, aws_min_u64(0, UINT64_MAX)); ASSERT_UINT_EQUALS(0, aws_min_u64(UINT64_MAX, 0)); ASSERT_UINT_EQUALS(UINT64_MAX, aws_max_u64(0, UINT64_MAX)); ASSERT_UINT_EQUALS(UINT64_MAX, aws_max_u64(UINT64_MAX, 0)); ASSERT_INT_EQUALS(INT64_MIN, aws_min_i64(INT64_MIN, INT64_MAX)); ASSERT_INT_EQUALS(INT64_MIN, aws_min_i64(INT64_MAX, INT64_MIN)); ASSERT_INT_EQUALS(INT64_MAX, aws_max_i64(INT64_MIN, INT64_MAX)); ASSERT_INT_EQUALS(INT64_MAX, aws_max_i64(INT64_MAX, INT64_MIN)); ASSERT_UINT_EQUALS(0, aws_min_size(0, SIZE_MAX)); ASSERT_UINT_EQUALS(0, aws_min_size(SIZE_MAX, 0)); ASSERT_UINT_EQUALS(SIZE_MAX, aws_max_size(0, SIZE_MAX)); ASSERT_UINT_EQUALS(SIZE_MAX, aws_max_size(SIZE_MAX, 0)); ASSERT_INT_EQUALS(INT_MIN, aws_min_int(INT_MIN, INT_MAX)); ASSERT_INT_EQUALS(INT_MIN, aws_min_int(INT_MAX, INT_MIN)); ASSERT_INT_EQUALS(INT_MAX, aws_max_int(INT_MIN, INT_MAX)); ASSERT_INT_EQUALS(INT_MAX, aws_max_int(INT_MAX, INT_MIN)); ASSERT_TRUE(FLT_MIN == aws_min_float(FLT_MIN, FLT_MAX)); ASSERT_TRUE(FLT_MIN == aws_min_float(FLT_MAX, FLT_MIN)); ASSERT_TRUE(FLT_MAX == aws_max_float(FLT_MIN, FLT_MAX)); ASSERT_TRUE(FLT_MAX == aws_max_float(FLT_MAX, FLT_MIN)); ASSERT_TRUE(DBL_MIN == aws_min_double(DBL_MIN, DBL_MAX)); ASSERT_TRUE(DBL_MIN == aws_min_double(DBL_MAX, DBL_MIN)); ASSERT_TRUE(DBL_MAX == aws_max_double(DBL_MIN, DBL_MAX)); ASSERT_TRUE(DBL_MAX == aws_max_double(DBL_MAX, DBL_MIN)); return 0; } AWS_TEST_CASE(test_clz, s_test_clz) static int s_test_clz(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; ASSERT_UINT_EQUALS(0, aws_clz_u32(UINT32_MAX)); ASSERT_UINT_EQUALS(1, aws_clz_u32(INT32_MAX)); ASSERT_UINT_EQUALS(32, aws_clz_u32(0)); ASSERT_UINT_EQUALS(0, aws_clz_i32(-1)); ASSERT_UINT_EQUALS(1, aws_clz_i32(INT32_MAX)); ASSERT_UINT_EQUALS(32, aws_clz_i32(0)); ASSERT_UINT_EQUALS(0, aws_clz_u64(UINT64_MAX)); ASSERT_UINT_EQUALS(1, aws_clz_u64(INT64_MAX)); ASSERT_UINT_EQUALS(64, aws_clz_u64(0)); ASSERT_UINT_EQUALS(0, aws_clz_i64(-1)); ASSERT_UINT_EQUALS(1, aws_clz_i64(INT64_MAX)); ASSERT_UINT_EQUALS(64, aws_clz_i64(0)); return 0; } AWS_TEST_CASE(test_ctz, s_test_ctz) static int s_test_ctz(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; ASSERT_UINT_EQUALS(0, aws_ctz_u32(1)); ASSERT_UINT_EQUALS(1, aws_ctz_u32(2)); ASSERT_UINT_EQUALS(32, aws_ctz_u32(0)); ASSERT_UINT_EQUALS(0, aws_ctz_i32(1)); ASSERT_UINT_EQUALS(1, aws_ctz_i32(2)); ASSERT_UINT_EQUALS(32, aws_ctz_i32(0)); ASSERT_UINT_EQUALS(0, aws_ctz_u64(1)); ASSERT_UINT_EQUALS(1, aws_ctz_u64(2)); ASSERT_UINT_EQUALS(64, aws_ctz_u64(0)); ASSERT_UINT_EQUALS(0, aws_ctz_i64(1)); ASSERT_UINT_EQUALS(1, aws_ctz_i64(2)); ASSERT_UINT_EQUALS(64, aws_ctz_i64(0)); return 0; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/memtrace_test.c000066400000000000000000000215121456575232400246630ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include "logging/test_logger.h" #define NUM_ALLOCS 100 static int s_test_memtrace_count(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_allocator *tracer = aws_mem_tracer_new(allocator, NULL, AWS_MEMTRACE_BYTES, 0); void *allocs[NUM_ALLOCS] = {0}; size_t sizes[NUM_ALLOCS] = {0}; size_t total = 0; for (size_t idx = 0; idx < AWS_ARRAY_SIZE(allocs); ++idx) { uint32_t size = 0; aws_device_random_u32(&size); size = (size % 1024) + 1; /* not necessary to allocate a gajillion bytes */ allocs[idx] = aws_mem_acquire(tracer, size); sizes[idx] = size; total += size; } ASSERT_UINT_EQUALS(total, aws_mem_tracer_bytes(tracer)); ASSERT_UINT_EQUALS(NUM_ALLOCS, aws_mem_tracer_count(tracer)); size_t freed = 0; for (size_t idx = 0; idx < AWS_ARRAY_SIZE(allocs); ++idx) { uint32_t roll = 0; aws_device_random_u32(&roll); if (roll % 3 == 0) { aws_mem_release(tracer, allocs[idx]); allocs[idx] = NULL; total -= sizes[idx]; ++freed; } } ASSERT_UINT_EQUALS(total, aws_mem_tracer_bytes(tracer)); ASSERT_UINT_EQUALS(NUM_ALLOCS - freed, aws_mem_tracer_count(tracer)); for (size_t idx = 0; idx < AWS_ARRAY_SIZE(allocs); ++idx) { if (allocs[idx]) { aws_mem_release(tracer, allocs[idx]); } } ASSERT_UINT_EQUALS(0, aws_mem_tracer_bytes(tracer)); ASSERT_UINT_EQUALS(0, aws_mem_tracer_count(tracer)); struct aws_allocator *original = aws_mem_tracer_destroy(tracer); ASSERT_PTR_EQUALS(allocator, original); return 0; } AWS_TEST_CASE(test_memtrace_count, s_test_memtrace_count) #if defined(__GNUC__) || defined(__clang__) # define AWS_PREVENT_OPTIMIZATION __asm__ __volatile__("" ::: "memory") #else # define AWS_PREVENT_OPTIMIZATION #endif AWS_NO_INLINE void *s_alloc_1(struct aws_allocator *allocator, size_t size) { AWS_PREVENT_OPTIMIZATION; return aws_mem_acquire(allocator, size); } AWS_NO_INLINE void *s_alloc_2(struct aws_allocator *allocator, size_t size) { AWS_PREVENT_OPTIMIZATION; return aws_mem_acquire(allocator, size); } AWS_NO_INLINE void *s_alloc_3(struct aws_allocator *allocator, size_t size) { AWS_PREVENT_OPTIMIZATION; return aws_mem_acquire(allocator, size); } AWS_NO_INLINE void *s_alloc_4(struct aws_allocator *allocator, size_t size) { AWS_PREVENT_OPTIMIZATION; return aws_mem_acquire(allocator, size); } static struct aws_logger s_test_logger; static int s_test_memtrace_stacks(struct aws_allocator *allocator, void *ctx) { (void)ctx; /* only bother to run this test if the platform can do a backtrace */ void *probe_stack[1]; if (!aws_backtrace(probe_stack, 1)) { return 0; } test_logger_init(&s_test_logger, allocator, AWS_LL_TRACE, 0); aws_logger_set(&s_test_logger); struct aws_allocator *tracer = aws_mem_tracer_new(allocator, NULL, AWS_MEMTRACE_STACKS, 8); void *allocs[NUM_ALLOCS] = {0}; size_t total = 0; for (size_t idx = 0; idx < AWS_ARRAY_SIZE(allocs); ++idx) { uint32_t size = 0; aws_device_random_u32(&size); size = (size % 1024) + 1; /* not necessary to allocate a gajillion bytes */ void *(*allocate)(struct aws_allocator *, size_t) = NULL; switch (idx % 4) { case 0: allocate = s_alloc_1; break; case 1: allocate = s_alloc_2; break; case 2: allocate = s_alloc_3; break; case 3: allocate = s_alloc_4; break; } allocs[idx] = allocate(tracer, size); total += size; } ASSERT_UINT_EQUALS(total, aws_mem_tracer_bytes(tracer)); ASSERT_UINT_EQUALS(NUM_ALLOCS, aws_mem_tracer_count(tracer)); aws_mem_tracer_dump(tracer); /* make sure all of the functions that allocated are found */ struct test_logger_impl *test_logger = s_test_logger.p_impl; /* if this is not a debug build, there may not be symbols, so the test cannot * verify if a best effort was made */ #if defined(DEBUG_BUILD) /* fprintf(stderr, "%s\n", test_logger->log_buffer.buffer); */ char s_alloc_1_addr[32]; char s_alloc_2_addr[32]; char s_alloc_3_addr[32]; char s_alloc_4_addr[32]; # if defined(_MSC_VER) # pragma warning(push) # pragma warning(disable : 4054) /* type cast function pointer to data pointer */ snprintf(s_alloc_1_addr, AWS_ARRAY_SIZE(s_alloc_1_addr), "0x%tx", (uintptr_t)(void *)s_alloc_1); snprintf(s_alloc_2_addr, AWS_ARRAY_SIZE(s_alloc_2_addr), "0x%tx", (uintptr_t)(void *)s_alloc_2); snprintf(s_alloc_3_addr, AWS_ARRAY_SIZE(s_alloc_3_addr), "0x%tx", (uintptr_t)(void *)s_alloc_3); snprintf(s_alloc_4_addr, AWS_ARRAY_SIZE(s_alloc_4_addr), "0x%tx", (uintptr_t)(void *)s_alloc_4); # pragma warning(pop) # endif /* defined(_MSC_VER) */ const char *log_buffer = (const char *)test_logger->log_buffer.buffer; ASSERT_TRUE(strstr(log_buffer, "s_alloc_1") || strstr(log_buffer, s_alloc_1_addr)); ASSERT_TRUE(strstr(log_buffer, "s_alloc_2") || strstr(log_buffer, s_alloc_2_addr)); ASSERT_TRUE(strstr(log_buffer, "s_alloc_3") || strstr(log_buffer, s_alloc_3_addr)); ASSERT_TRUE(strstr(log_buffer, "s_alloc_4") || strstr(log_buffer, s_alloc_4_addr)); #endif /* reset log */ aws_byte_buf_reset(&test_logger->log_buffer, true); for (size_t idx = 0; idx < AWS_ARRAY_SIZE(allocs); ++idx) { if (allocs[idx]) { aws_mem_release(tracer, allocs[idx]); } } ASSERT_UINT_EQUALS(0, aws_mem_tracer_bytes(tracer)); ASSERT_UINT_EQUALS(0, aws_mem_tracer_count(tracer)); aws_mem_tracer_dump(tracer); /* Make sure no known allocs are left */ ASSERT_UINT_EQUALS(0, test_logger->log_buffer.len); struct aws_allocator *original = aws_mem_tracer_destroy(tracer); ASSERT_PTR_EQUALS(allocator, original); aws_logger_clean_up(&s_test_logger); return 0; } AWS_TEST_CASE(test_memtrace_stacks, s_test_memtrace_stacks) static int s_test_memtrace_none(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_allocator *tracer = aws_mem_tracer_new(allocator, NULL, AWS_MEMTRACE_NONE, 0); void *allocs[NUM_ALLOCS] = {0}; for (size_t idx = 0; idx < AWS_ARRAY_SIZE(allocs); ++idx) { uint32_t size = 0; aws_device_random_u32(&size); size = (size % 1024) + 1; /* not necessary to allocate a gajillion bytes */ allocs[idx] = aws_mem_acquire(tracer, size); } ASSERT_UINT_EQUALS(0, aws_mem_tracer_bytes(tracer)); for (size_t idx = 0; idx < AWS_ARRAY_SIZE(allocs); ++idx) { if (allocs[idx]) { aws_mem_release(tracer, allocs[idx]); } } ASSERT_UINT_EQUALS(0, aws_mem_tracer_bytes(tracer)); struct aws_allocator *original = aws_mem_tracer_destroy(tracer); ASSERT_PTR_EQUALS(allocator, original); return 0; } AWS_TEST_CASE(test_memtrace_none, s_test_memtrace_none) static int s_test_memtrace_midstream(struct aws_allocator *allocator, void *ctx) { (void)ctx; void *allocs[NUM_ALLOCS] = {0}; /* allocate some from the base allocator first */ for (size_t idx = 0; idx < AWS_ARRAY_SIZE(allocs) / 4; ++idx) { uint32_t size = 0; aws_device_random_u32(&size); size = (size % 1024) + 1; /* not necessary to allocate a gajillion bytes */ allocs[idx] = aws_mem_acquire(allocator, size); } struct aws_allocator *tracer = aws_mem_tracer_new(allocator, NULL, AWS_MEMTRACE_BYTES, 0); /* Now allocate from the tracer, and make sure everything still works */ size_t total = 0; size_t tracked_allocs = 0; for (size_t idx = AWS_ARRAY_SIZE(allocs) / 4 + 1; idx < AWS_ARRAY_SIZE(allocs); ++idx) { uint32_t size = 0; aws_device_random_u32(&size); size = (size % 1024) + 1; /* not necessary to allocate a gajillion bytes */ allocs[idx] = aws_mem_acquire(tracer, size); total += size; ++tracked_allocs; } ASSERT_UINT_EQUALS(total, aws_mem_tracer_bytes(tracer)); ASSERT_UINT_EQUALS(tracked_allocs, aws_mem_tracer_count(tracer)); for (size_t idx = 0; idx < AWS_ARRAY_SIZE(allocs); ++idx) { if (allocs[idx]) { aws_mem_release(tracer, allocs[idx]); } } ASSERT_UINT_EQUALS(0, aws_mem_tracer_bytes(tracer)); ASSERT_UINT_EQUALS(0, aws_mem_tracer_count(tracer)); struct aws_allocator *original = aws_mem_tracer_destroy(tracer); ASSERT_PTR_EQUALS(allocator, original); return 0; } AWS_TEST_CASE(test_memtrace_midstream, s_test_memtrace_midstream) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/mutex_test.c000066400000000000000000000120271456575232400242310ustar00rootroot00000000000000/* * Copyright 2010-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include #include #include static int s_test_mutex_acquire_release(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_mutex mutex; aws_mutex_init(&mutex); ASSERT_SUCCESS(aws_mutex_lock(&mutex), "Mutex acquire should have returned success."); ASSERT_SUCCESS(aws_mutex_unlock(&mutex), "Mutex release should have returned success."); aws_mutex_clean_up(&mutex); return 0; } struct thread_mutex_data { struct aws_mutex mutex; int counter; int max_counts; /* To ensure both threads are in their loops at the same time, fighting over the mutex, * the main thread will wait for the spawned thread to tick the counter from 0->1 * and the spawned thread will wait for the main thread to tick the counter from 1->2. * Without this, it's possible for either thread to do all the work before * the other one enters its loop. */ int thread_fn_increments; int main_fn_increments; }; static void s_mutex_thread_fn(void *mutex_data) { struct thread_mutex_data *p_mutex = (struct thread_mutex_data *)mutex_data; int finished = 0; while (!finished) { aws_mutex_lock(&p_mutex->mutex); if (p_mutex->counter != p_mutex->max_counts) { if (p_mutex->counter == 1) { /* wait for the main thread to tick the counter from 1->2. (see notes on thread_mutex_data)*/ } else { int counter = p_mutex->counter + 1; p_mutex->counter = counter; p_mutex->thread_fn_increments += 1; finished = p_mutex->counter == p_mutex->max_counts; } } else { finished = 1; } aws_mutex_unlock(&p_mutex->mutex); } } static int s_test_mutex_is_actually_mutex(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct thread_mutex_data mutex_data = { .counter = 0, .max_counts = 1000000, .thread_fn_increments = 0, .main_fn_increments = 0, }; aws_mutex_init(&mutex_data.mutex); struct aws_thread thread; aws_thread_init(&thread, allocator); ASSERT_SUCCESS( aws_thread_launch(&thread, s_mutex_thread_fn, &mutex_data, 0), "thread creation failed with error %d", aws_last_error()); int finished = 0; while (!finished) { aws_mutex_lock(&mutex_data.mutex); /* wait for the spawned thread to tick the counter from 0->1. (see notes on thread_mutex_data)*/ if (!mutex_data.thread_fn_increments) { aws_mutex_unlock(&mutex_data.mutex); continue; } if (mutex_data.counter != mutex_data.max_counts) { mutex_data.main_fn_increments += 1; int counter = mutex_data.counter + 1; mutex_data.counter = counter; finished = mutex_data.counter == mutex_data.max_counts; } else { finished = 1; } aws_mutex_unlock(&mutex_data.mutex); } ASSERT_SUCCESS(aws_thread_join(&thread), "Thread join failed with error code %d.", aws_last_error()); ASSERT_TRUE(mutex_data.thread_fn_increments > 0, "Thread 2 should have written some"); ASSERT_TRUE(mutex_data.main_fn_increments > 0, "Main thread should have written some"); ASSERT_INT_EQUALS( mutex_data.max_counts, mutex_data.counter, "Both threads should have written exactly the max counts."); ASSERT_INT_EQUALS( mutex_data.counter, mutex_data.thread_fn_increments + mutex_data.main_fn_increments, "Both threads should have written up to the max count"); aws_thread_clean_up(&thread); aws_mutex_clean_up(&mutex_data.mutex); return 0; } AWS_TEST_CASE(mutex_aquire_release_test, s_test_mutex_acquire_release) AWS_TEST_CASE(mutex_is_actually_mutex_test, s_test_mutex_is_actually_mutex) static int s_test_mutex_try_lock_is_correct(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_mutex lock; ASSERT_SUCCESS(aws_mutex_init(&lock)); ASSERT_SUCCESS(aws_mutex_lock(&lock)); ASSERT_FAILS(aws_mutex_try_lock(&lock)); ASSERT_SUCCESS(aws_mutex_unlock(&lock)); ASSERT_SUCCESS(aws_mutex_try_lock(&lock)); ASSERT_FAILS(aws_mutex_try_lock(&lock)); ASSERT_SUCCESS(aws_mutex_unlock(&lock)); aws_mutex_clean_up(&lock); return 0; } AWS_TEST_CASE(mutex_try_lock_is_correct_test, s_test_mutex_try_lock_is_correct) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/priority_queue_test.c000066400000000000000000000401421456575232400261530ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include static int s_compare_ints(const void *a, const void *b) { int arg1 = *(const int *)a; int arg2 = *(const int *)b; if (arg1 < arg2) { return -1; } if (arg1 > arg2) { return 1; } return 0; } static int s_test_priority_queue_preserves_order(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_priority_queue queue; int err = aws_priority_queue_init_dynamic(&queue, allocator, 10, sizeof(int), s_compare_ints); ASSERT_SUCCESS(err, "Priority queue initialization failed with error %d", err); int first = 45, second = 67, third = 80, fourth = 120, fifth = 10000; ASSERT_SUCCESS(aws_priority_queue_push(&queue, &third), "Push operation failed for item %d", third); ASSERT_SUCCESS(aws_priority_queue_push(&queue, &fourth), "Push operation failed for item %d", fourth); ASSERT_SUCCESS(aws_priority_queue_push(&queue, &second), "Push operation failed for item %d", second); ASSERT_SUCCESS(aws_priority_queue_push(&queue, &fifth), "Push operation failed for item %d", fifth); ASSERT_SUCCESS(aws_priority_queue_push(&queue, &first), "Push operation failed for item %d", first); size_t num_elements = aws_priority_queue_size(&queue); ASSERT_INT_EQUALS(5, num_elements, "Priority queue size should have been %d but was %d", 5, num_elements); int pop_val, top_val, *top_val_ptr; err = aws_priority_queue_top(&queue, (void **)&top_val_ptr); ASSERT_SUCCESS(err, "Top operation failed with error %d", err); top_val = *top_val_ptr; err = aws_priority_queue_pop(&queue, &pop_val); ASSERT_SUCCESS(err, "Pop operation failed with error %d", err); ASSERT_INT_EQUALS(first, pop_val, "First element returned should have been %d but was %d", first, pop_val); ASSERT_INT_EQUALS( pop_val, top_val, "Popped element should have been the top element. expected %d but was %d", pop_val, top_val); err = aws_priority_queue_top(&queue, (void **)&top_val_ptr); ASSERT_SUCCESS(err, "Top operation failed with error %d", err); top_val = *top_val_ptr; err = aws_priority_queue_pop(&queue, &pop_val); ASSERT_SUCCESS(err, "Pop operation failed with error %d", err); ASSERT_INT_EQUALS(second, pop_val, "Second element returned should have been %d but was %d", second, pop_val); ASSERT_INT_EQUALS( pop_val, top_val, "Popped element should have been the top element. expected %d but was %d", pop_val, top_val); err = aws_priority_queue_top(&queue, (void **)&top_val_ptr); ASSERT_SUCCESS(err, "Top operation failed with error %d", err); top_val = *top_val_ptr; err = aws_priority_queue_pop(&queue, &pop_val); ASSERT_SUCCESS(err, "Pop operation failed with error %d", err); ASSERT_INT_EQUALS(third, pop_val, "Third element returned should have been %d but was %d", third, pop_val); ASSERT_INT_EQUALS( pop_val, top_val, "Popped element should have been the top element. expected %d but was %d", pop_val, top_val); err = aws_priority_queue_top(&queue, (void **)&top_val_ptr); ASSERT_SUCCESS(err, "Top operation failed with error %d", err); top_val = *top_val_ptr; err = aws_priority_queue_pop(&queue, &pop_val); ASSERT_SUCCESS(err, "Pop operation failed with error %d", err); ASSERT_INT_EQUALS(fourth, pop_val, "Fourth element returned should have been %d but was %d", fourth, pop_val); ASSERT_INT_EQUALS( pop_val, top_val, "Popped element should have been the top element. expected %d but was %d", pop_val, top_val); err = aws_priority_queue_top(&queue, (void **)&top_val_ptr); ASSERT_SUCCESS(err, "Top operation failed with error %d", err); top_val = *top_val_ptr; err = aws_priority_queue_pop(&queue, &pop_val); ASSERT_SUCCESS(err, "Pop operation failed with error %d", err); ASSERT_INT_EQUALS(fifth, pop_val, "Fifth element returned should have been %d but was %d", fifth, pop_val); ASSERT_INT_EQUALS( pop_val, top_val, "Popped element should have been the top element. expected %d but was %d", pop_val, top_val); ASSERT_ERROR( AWS_ERROR_PRIORITY_QUEUE_EMPTY, aws_priority_queue_pop(&queue, &pop_val), "Popping from empty queue should result in error"); aws_priority_queue_clean_up(&queue); return 0; } static int s_test_priority_queue_random_values(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; enum { SIZE = 20 }; struct aws_priority_queue queue; int storage[SIZE], err; aws_priority_queue_init_static(&queue, storage, SIZE, sizeof(int), s_compare_ints); int values[SIZE]; srand((unsigned)(uintptr_t)&queue); for (int i = 0; i < SIZE; i++) { values[i] = rand() % 1000; err = aws_priority_queue_push(&queue, &values[i]); ASSERT_SUCCESS(err, "Push operation failed with error %d", err); } qsort(values, SIZE, sizeof(int), s_compare_ints); /* pop only half */ for (int i = 0; i < SIZE / 2; i++) { int top; err = aws_priority_queue_pop(&queue, &top); ASSERT_SUCCESS(err, "Pop operation failed with error %d", err); ASSERT_INT_EQUALS(values[i], top, "Elements priority are out of order. Expected: %d Actual %d", values[i], top); } /* push new random values in that first half*/ for (int i = 0; i < SIZE / 2; i++) { values[i] = rand() % 1000; err = aws_priority_queue_push(&queue, &values[i]); ASSERT_SUCCESS(err, "Push operation failed with error %d", err); } /* sort again so we can verify correct order on pop */ qsort(values, SIZE, sizeof(int), s_compare_ints); /* pop all the queue */ for (int i = 0; i < SIZE; i++) { int top; err = aws_priority_queue_pop(&queue, &top); ASSERT_SUCCESS(err, "Pop operation failed with error %d", err); ASSERT_INT_EQUALS(values[i], top, "Elements priority are out of order. Expected: %d Actual %d", values[i], top); } aws_priority_queue_clean_up(&queue); return 0; } static int s_test_priority_queue_size_and_capacity(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_priority_queue queue; int err = aws_priority_queue_init_dynamic(&queue, allocator, 5, sizeof(int), s_compare_ints); ASSERT_SUCCESS(err, "Dynamic init failed with error %d", err); size_t capacity = aws_priority_queue_capacity(&queue); ASSERT_INT_EQUALS(5, capacity, "Expected Capacity %d but was %d", 5, capacity); for (int i = 0; i < 15; i++) { err = aws_priority_queue_push(&queue, &i); ASSERT_SUCCESS(err, "Push operation failed with error %d", err); } size_t size = aws_priority_queue_size(&queue); ASSERT_INT_EQUALS(15, size, "Expected Size %d but was %d", 15, capacity); capacity = aws_priority_queue_capacity(&queue); ASSERT_INT_EQUALS(20, capacity, "Expected Capacity %d but was %d", 20, capacity); aws_priority_queue_clean_up(&queue); return 0; } #define ADD_ELEMS(pq, ...) \ do { \ static int ADD_ELEMS_elems[] = {__VA_ARGS__}; \ for (size_t ADD_ELEMS_i = 0; ADD_ELEMS_i < sizeof(ADD_ELEMS_elems) / sizeof(*ADD_ELEMS_elems); \ ADD_ELEMS_i++) { \ ASSERT_SUCCESS(aws_priority_queue_push(&(pq), &ADD_ELEMS_elems[ADD_ELEMS_i])); \ } \ } while (0) #define CHECK_ORDER(pq, ...) \ do { \ static int CHECK_ORDER_elems[] = {__VA_ARGS__}; \ size_t CHECK_ORDER_count = sizeof(CHECK_ORDER_elems) / sizeof(*CHECK_ORDER_elems); \ size_t CHECK_ORDER_i = 0; \ int CHECK_ORDER_val; \ while (aws_priority_queue_pop(&(pq), &CHECK_ORDER_val) == AWS_OP_SUCCESS) { \ ASSERT_TRUE(CHECK_ORDER_i < CHECK_ORDER_count); \ ASSERT_INT_EQUALS(CHECK_ORDER_val, CHECK_ORDER_elems[CHECK_ORDER_i]); \ CHECK_ORDER_i++; \ } \ ASSERT_INT_EQUALS(CHECK_ORDER_i, CHECK_ORDER_count); \ } while (0) static int s_test_remove_root(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_priority_queue queue; struct aws_priority_queue_node node = {12345}; int val = 0; ASSERT_SUCCESS(aws_priority_queue_init_dynamic(&queue, allocator, 16, sizeof(int), s_compare_ints)); ASSERT_SUCCESS(aws_priority_queue_push_ref(&queue, &val, &node)); ADD_ELEMS(queue, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); val = 42; ASSERT_SUCCESS(aws_priority_queue_remove(&queue, &val, &node)); ASSERT_INT_EQUALS(val, 0); ASSERT_ERROR(AWS_ERROR_PRIORITY_QUEUE_BAD_NODE, aws_priority_queue_remove(&queue, &val, &node)); CHECK_ORDER(queue, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); aws_priority_queue_clean_up(&queue); return 0; } static int s_test_remove_leaf(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_priority_queue queue; struct aws_priority_queue_node node = {12345}; ASSERT_SUCCESS(aws_priority_queue_init_dynamic(&queue, allocator, 16, sizeof(int), s_compare_ints)); ADD_ELEMS(queue, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); int val = 16; ASSERT_SUCCESS(aws_priority_queue_push_ref(&queue, &val, &node)); val = 42; ASSERT_SUCCESS(aws_priority_queue_remove(&queue, &val, &node)); ASSERT_INT_EQUALS(val, 16); ASSERT_ERROR(AWS_ERROR_PRIORITY_QUEUE_BAD_NODE, aws_priority_queue_remove(&queue, &val, &node)); CHECK_ORDER(queue, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); aws_priority_queue_clean_up(&queue); return 0; } /* * Here we force the heap to sift a value up to its parents when removing an interior node. * * 0 * 20 * 22 * 222 <- Removed, swapped with 15 * 2222 * 2221 * 221 * 2212 * 2211 * 21 * 212 * 2122 * 2121 * 211 * 2112 * 2111 * 1 * 2 * 3 * 4 * 5 * 6 * 7 * 8 * 9 * 10 * 11 * 12 * 13 * 14 * 15 */ static int s_test_remove_interior_sift_up(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_priority_queue queue; struct aws_priority_queue_node node = {12345}; ASSERT_SUCCESS(aws_priority_queue_init_dynamic(&queue, allocator, 16, sizeof(int), s_compare_ints)); ADD_ELEMS(queue, 0, 20, 1, 22, 21, 2, 9); int val = 222; ASSERT_SUCCESS(aws_priority_queue_push_ref(&queue, &val, &node)); ADD_ELEMS( queue, 221, 212, 211, 3, 6, 10, 13, 2222, 2221, 2212, 2211, 2122, 2121, 2112, 2111, 4, 5, 7, 8, 11, 12, 14, 15); val = 42; ASSERT_SUCCESS(aws_priority_queue_remove(&queue, &val, &node)); ASSERT_INT_EQUALS(val, 222); ASSERT_ERROR(AWS_ERROR_PRIORITY_QUEUE_BAD_NODE, aws_priority_queue_remove(&queue, &val, &node)); CHECK_ORDER( queue, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 20, 21, 22, 211, 212, 221, /* 222, */ 2111, 2112, 2121, 2122, 2211, 2212, 2221, 2222); aws_priority_queue_clean_up(&queue); return 0; } /* * Here we force the heap to sift a value down to a leaf when removing an interior node. * * 0 * 1 <- Removed, swapped with 30 * 2 * 3 * 4 * 5 * 6 * 7 * 8 * 9 * 10 * 11 * 12 * 13 * 14 * 15 * 16 * 17 * 18 * 19 * 20 * 21 * 22 * 23 * 24 * 25 * 26 * 27 * 28 * 29 * 30 */ static int s_test_remove_interior_sift_down(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_priority_queue queue; struct aws_priority_queue_node node = {12345}; ASSERT_SUCCESS(aws_priority_queue_init_dynamic(&queue, allocator, 16, sizeof(int), s_compare_ints)); ADD_ELEMS(queue, 0); int val = 1; ASSERT_SUCCESS(aws_priority_queue_push_ref(&queue, &val, &node)); ADD_ELEMS( queue, 16, 2, 9, 17, 24, 3, 6, 10, 13, 18, 21, 25, 28, 4, 5, 7, 8, 11, 12, 14, 15, 19, 20, 22, 23, 26, 27, 29, 30); val = 42; ASSERT_SUCCESS(aws_priority_queue_remove(&queue, &val, &node)); ASSERT_INT_EQUALS(val, 1); ASSERT_ERROR(AWS_ERROR_PRIORITY_QUEUE_BAD_NODE, aws_priority_queue_remove(&queue, &val, &node)); CHECK_ORDER( queue, 0, /* 1, */ 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30); aws_priority_queue_clean_up(&queue); return 0; } #define BACKPOINTER_CLEAR_NODE_COUNT 16 static int s_priority_queue_clear_backpointers_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_priority_queue queue; ASSERT_SUCCESS(aws_priority_queue_init_dynamic(&queue, allocator, 16, sizeof(int), s_compare_ints)); struct aws_priority_queue_node queue_nodes[BACKPOINTER_CLEAR_NODE_COUNT]; for (size_t i = 0; i < BACKPOINTER_CLEAR_NODE_COUNT; ++i) { aws_priority_queue_node_init(&queue_nodes[i]); } for (int i = 0; i < BACKPOINTER_CLEAR_NODE_COUNT; i++) { aws_priority_queue_push_ref(&queue, &i, &queue_nodes[i]); } for (size_t i = 0; i < BACKPOINTER_CLEAR_NODE_COUNT; ++i) { ASSERT_TRUE(aws_priority_queue_node_is_in_queue(&queue_nodes[i])); } aws_priority_queue_clear(&queue); ASSERT_INT_EQUALS(0, aws_priority_queue_size(&queue)); for (size_t i = 0; i < BACKPOINTER_CLEAR_NODE_COUNT; ++i) { ASSERT_FALSE(aws_priority_queue_node_is_in_queue(&queue_nodes[i])); } aws_priority_queue_clean_up(&queue); return 0; } AWS_TEST_CASE(priority_queue_remove_interior_sift_down_test, s_test_remove_interior_sift_down); AWS_TEST_CASE(priority_queue_remove_interior_sift_up_test, s_test_remove_interior_sift_up); AWS_TEST_CASE(priority_queue_remove_leaf_test, s_test_remove_leaf); AWS_TEST_CASE(priority_queue_remove_root_test, s_test_remove_root); AWS_TEST_CASE(priority_queue_push_pop_order_test, s_test_priority_queue_preserves_order); AWS_TEST_CASE(priority_queue_random_values_test, s_test_priority_queue_random_values); AWS_TEST_CASE(priority_queue_size_and_capacity_test, s_test_priority_queue_size_and_capacity); AWS_TEST_CASE(priority_queue_clear_backpointers_test, s_priority_queue_clear_backpointers_test); aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/process_test.c000066400000000000000000000061121456575232400245430ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include static int s_get_pid_sanity_check_test_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; int pid = aws_get_pid(); ASSERT_TRUE(pid > 0); return AWS_OP_SUCCESS; } AWS_TEST_CASE(get_pid_sanity_check_test, s_get_pid_sanity_check_test_fn) static int s_max_io_handles_sanity_check_test_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; size_t soft_max_handles = aws_get_soft_limit_io_handles(); ASSERT_TRUE(soft_max_handles > 0); size_t hard_max_handles = aws_get_hard_limit_io_handles(); ASSERT_TRUE(hard_max_handles >= soft_max_handles); int error = aws_set_soft_limit_io_handles(soft_max_handles - 1); if (error) { /* this operation does nothing on some platforms such as windows, let's make sure that's why this failed. */ ASSERT_UINT_EQUALS(AWS_ERROR_UNIMPLEMENTED, aws_last_error()); } else { ASSERT_ERROR(AWS_ERROR_INVALID_ARGUMENT, aws_set_soft_limit_io_handles(hard_max_handles + 1)); } return AWS_OP_SUCCESS; } AWS_TEST_CASE(max_io_handles_sanity_check_test, s_max_io_handles_sanity_check_test_fn) #ifdef _WIN32 AWS_STATIC_STRING_FROM_LITERAL(s_test_command, "echo {\"Version\": 1, \"AccessKeyId\": \"AccessKey123\"}"); #else AWS_STATIC_STRING_FROM_LITERAL(s_test_command, "echo '{\"Version\": 1, \"AccessKeyId\": \"AccessKey123\"}'"); #endif AWS_STATIC_STRING_FROM_LITERAL(s_expected_output, "{\"Version\": 1, \"AccessKeyId\": \"AccessKey123\"}"); static int s_run_command_test_success_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_run_command_options options = {.command = aws_string_c_str(s_test_command)}; struct aws_run_command_result result; ASSERT_SUCCESS(aws_run_command_result_init(allocator, &result)); ASSERT_SUCCESS(aws_run_command(allocator, &options, &result)); ASSERT_TRUE(result.ret_code == 0); ASSERT_NOT_NULL(result.std_out); ASSERT_BIN_ARRAYS_EQUALS( result.std_out->bytes, result.std_out->len, s_expected_output->bytes, s_expected_output->len); aws_run_command_result_cleanup(&result); return AWS_OP_SUCCESS; } AWS_TEST_CASE(run_command_test_success, s_run_command_test_success_fn) AWS_STATIC_STRING_FROM_LITERAL(s_bad_command, "/i/dont/know/what/is/this/command"); static int s_run_command_test_bad_command_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_run_command_options options = {.command = aws_string_c_str(s_bad_command)}; struct aws_run_command_result result; ASSERT_SUCCESS(aws_run_command_result_init(allocator, &result)); ASSERT_SUCCESS(aws_run_command(allocator, &options, &result)); ASSERT_TRUE(result.ret_code != 0); ASSERT_NULL(result.std_out); aws_run_command_result_cleanup(&result); return AWS_OP_SUCCESS; } AWS_TEST_CASE(run_command_test_bad_command, s_run_command_test_bad_command_fn) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/promise_test.c000066400000000000000000000131471456575232400245510ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include struct promise_test_work { struct aws_allocator *allocator; struct aws_promise *promise; uint64_t work_time; int error_code; void *value; void (*dtor)(void *); }; static void s_promise_test_worker(void *data) { struct promise_test_work *work = data; aws_promise_acquire(work->promise); aws_thread_current_sleep(work->work_time); if (work->error_code) { aws_promise_fail(work->promise, work->error_code); } else { aws_promise_complete(work->promise, work->value, work->dtor); } aws_promise_release(work->promise); } static struct aws_thread s_promise_test_launch_worker(struct promise_test_work *work) { const struct aws_thread_options *thread_options = aws_default_thread_options(); AWS_FATAL_ASSERT(thread_options); struct aws_thread worker_thread; AWS_FATAL_ASSERT(aws_thread_init(&worker_thread, work->allocator) == AWS_OP_SUCCESS); AWS_FATAL_ASSERT(aws_thread_launch(&worker_thread, s_promise_test_worker, work, thread_options) == AWS_OP_SUCCESS); return worker_thread; } struct pmr_payload { struct aws_allocator *allocator; }; void s_promise_test_free(void *ptr) { struct pmr_payload *payload = ptr; aws_mem_release(payload->allocator, payload); } static int s_promise_test_wait_forever(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_promise *promise = aws_promise_new(allocator); ASSERT_NOT_NULL(promise); struct pmr_payload *payload = aws_mem_acquire(allocator, 42); payload->allocator = allocator; struct promise_test_work work = { .allocator = allocator, .promise = promise, .work_time = 2 * 1000 * 1000, .value = payload, .dtor = s_promise_test_free, }; struct aws_thread worker_thread = s_promise_test_launch_worker(&work); aws_promise_wait(promise); ASSERT_SUCCESS(aws_thread_join(&worker_thread)); aws_promise_release(promise); return 0; } AWS_TEST_CASE(promise_test_wait_forever, s_promise_test_wait_forever) static int s_promise_test_wait_for_a_bit(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_promise *promise = aws_promise_new(allocator); ASSERT_NOT_NULL(promise); struct promise_test_work work = { .allocator = allocator, .promise = promise, .work_time = 3 * 1000 * 1000, }; struct aws_thread worker_thread = s_promise_test_launch_worker(&work); /* wait until the worker finishes, in 500ms intervals */ while (!aws_promise_wait_for(promise, 500)) ; ASSERT_TRUE(aws_promise_error_code(promise) == 0); ASSERT_NULL(aws_promise_value(promise)); ASSERT_SUCCESS(aws_thread_join(&worker_thread)); aws_promise_release(promise); return 0; } AWS_TEST_CASE(promise_test_wait_for_a_bit, s_promise_test_wait_for_a_bit) static int s_promise_test_finish_immediately(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_promise *promise = aws_promise_new(allocator); ASSERT_NOT_NULL(promise); struct promise_test_work work = { .allocator = allocator, .promise = promise, .work_time = 0, }; struct aws_thread worker_thread = s_promise_test_launch_worker(&work); aws_promise_wait(promise); ASSERT_TRUE(aws_promise_error_code(promise) == 0); ASSERT_NULL(aws_promise_value(promise)); aws_promise_release(promise); ASSERT_SUCCESS(aws_thread_join(&worker_thread)); return 0; } AWS_TEST_CASE(promise_test_finish_immediately, s_promise_test_finish_immediately) static int s_promise_test_finish_before_wait(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_promise *promise = aws_promise_new(allocator); ASSERT_NOT_NULL(promise); aws_promise_fail(promise, 1024); aws_promise_wait(promise); ASSERT_TRUE(aws_promise_error_code(promise) == 1024); ASSERT_NULL(aws_promise_value(promise)); aws_promise_release(promise); return 0; } AWS_TEST_CASE(promise_test_finish_before_wait, s_promise_test_finish_before_wait) void s_promise_test_waiter(void *data) { struct promise_test_work *work = data; aws_promise_acquire(work->promise); /* sleep 0.2 seconds */ aws_thread_current_sleep(1000 * 1000 * 2); aws_promise_wait(work->promise); AWS_FATAL_ASSERT(aws_promise_error_code(work->promise) == 0); aws_promise_release(work->promise); } static int s_promise_test_multiple_waiters(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_promise *promise = aws_promise_new(allocator); ASSERT_NOT_NULL(promise); struct promise_test_work work = { .allocator = allocator, .promise = promise, .work_time = 2 * 1000 * 1000, .value = promise, }; struct aws_thread threads[8]; const struct aws_thread_options *worker_options = aws_default_thread_options(); for (int idx = 0; idx < AWS_ARRAY_SIZE(threads); ++idx) { aws_thread_init(&threads[idx], allocator); aws_thread_launch(&threads[idx], s_promise_test_waiter, &work, worker_options); } aws_thread_current_sleep(1000 * 1000 * 4); aws_promise_complete(promise, promise, NULL); aws_promise_release(promise); for (int idx = 0; idx < AWS_ARRAY_SIZE(threads); ++idx) { ASSERT_SUCCESS(aws_thread_join(&threads[idx])); } return 0; } AWS_TEST_CASE(promise_test_multiple_waiters, s_promise_test_multiple_waiters) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/realloc_test.c000066400000000000000000000132011456575232400245030ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #ifdef __MACH__ # include #endif static size_t s_alloc_counter, s_alloc_total_size, s_call_ct_malloc, s_call_ct_free, s_call_ct_realloc; static void *s_test_alloc_acquire(struct aws_allocator *allocator, size_t size) { (void)allocator; s_alloc_counter++; s_call_ct_malloc++; s_alloc_total_size += size; uint8_t *buf = malloc(size + 16); *(size_t *)buf = size; buf += 16; return buf; } static void s_test_alloc_release(struct aws_allocator *allocator, void *ptr) { (void)allocator; uint8_t *buf = ptr; s_call_ct_free++; buf -= 16; size_t old_size = *(size_t *)buf; s_alloc_counter--; s_alloc_total_size -= old_size; free(buf); } static size_t s_original_size, s_reported_oldsize; static void *s_test_realloc(struct aws_allocator *allocator, void *ptr, size_t oldsize, size_t newsize) { (void)allocator; uint8_t *buf = ptr; buf -= 16; s_call_ct_realloc++; s_original_size = *(size_t *)buf; s_reported_oldsize = oldsize; /* Always pick a new pointer for test purposes */ void *newbuf = malloc(newsize); if (!newbuf) { abort(); } memcpy(newbuf, buf, 16 + (oldsize > newsize ? newsize : oldsize)); free(buf); buf = newbuf; *(size_t *)buf = newsize; s_alloc_total_size += (newsize - oldsize); return buf + 16; } static const uint8_t TEST_PATTERN[32] = {0xa5, 0x41, 0xcb, 0xe7, 0x00, 0x19, 0xd9, 0xf3, 0x60, 0x4a, 0x2b, 0x68, 0x55, 0x46, 0xb7, 0xe0, 0x74, 0x91, 0x2a, 0xbe, 0x5e, 0x41, 0x06, 0x39, 0x02, 0x02, 0xf6, 0x79, 0x1c, 0x4a, 0x08, 0xa9}; AWS_TEST_CASE(test_realloc_fallback, s_test_realloc_fallback_fn) static int s_test_realloc_fallback_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_allocator test_allocator = { .mem_acquire = s_test_alloc_acquire, .mem_release = s_test_alloc_release, .mem_realloc = NULL, }; s_call_ct_malloc = s_call_ct_free = s_call_ct_realloc = 0; void *buf = aws_mem_acquire(&test_allocator, 32); void *oldbuf = buf; memcpy(buf, TEST_PATTERN, 32); ASSERT_SUCCESS(aws_mem_realloc(&test_allocator, &buf, 32, 64)); ASSERT_INT_EQUALS(s_call_ct_malloc, 2); ASSERT_INT_EQUALS(s_call_ct_free, 1); ASSERT_INT_EQUALS(s_alloc_counter, 1); ASSERT_INT_EQUALS(s_alloc_total_size, 64); ASSERT_INT_EQUALS(memcmp(buf, TEST_PATTERN, 32), 0); ASSERT_FALSE(buf == oldbuf); aws_mem_release(&test_allocator, buf); return 0; } AWS_TEST_CASE(test_realloc_passthrough, s_test_realloc_passthrough_fn) static int s_test_realloc_passthrough_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_allocator test_allocator = { .mem_acquire = s_test_alloc_acquire, .mem_release = s_test_alloc_release, .mem_realloc = s_test_realloc, }; s_call_ct_malloc = s_call_ct_free = s_call_ct_realloc = 0; void *buf = aws_mem_acquire(&test_allocator, 32); void *oldbuf = buf; memcpy(buf, TEST_PATTERN, 32); ASSERT_SUCCESS(aws_mem_realloc(&test_allocator, &buf, 32, 64)); ASSERT_INT_EQUALS(memcmp(buf, TEST_PATTERN, 32), 0); ASSERT_INT_EQUALS(s_reported_oldsize, 32); ASSERT_INT_EQUALS(s_original_size, 32); ASSERT_INT_EQUALS(s_call_ct_malloc, 1); ASSERT_INT_EQUALS(s_call_ct_free, 0); ASSERT_FALSE(buf == oldbuf); aws_mem_release(&test_allocator, buf); return 0; } AWS_TEST_CASE(test_cf_allocator_wrapper, s_test_cf_allocator_wrapper_fn) static int s_test_cf_allocator_wrapper_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; #ifdef __MACH__ CFAllocatorRef cf_allocator = aws_wrapped_cf_allocator_new(allocator); ASSERT_NOT_NULL(cf_allocator); char test_prefix[] = "test_string"; CFStringRef test_str = CFStringCreateWithCString(cf_allocator, test_prefix, kCFStringEncodingUTF8); ASSERT_NOT_NULL(test_str); /* NOLINTNEXTLINE */ ASSERT_BIN_ARRAYS_EQUALS( test_prefix, sizeof(test_prefix) - 1, CFStringGetCStringPtr(test_str, kCFStringEncodingUTF8), CFStringGetLength(test_str)); CFRelease(test_str); aws_wrapped_cf_allocator_destroy(cf_allocator); #endif return 0; } AWS_TEST_CASE(test_acquire_many, s_test_acquire_many_fn) static int s_test_acquire_many_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; void *a = NULL; void *b = NULL; void *buffer = aws_mem_acquire_many(allocator, 2, &a, (size_t)64, &b, (size_t)64); ASSERT_NOT_NULL(buffer); ASSERT_NOT_NULL(a); ASSERT_NOT_NULL(b); ASSERT_UINT_EQUALS((uintptr_t)a, (uintptr_t)buffer); ASSERT_UINT_EQUALS((uintptr_t)b, (uintptr_t)buffer + 64); ASSERT_UINT_EQUALS((uintptr_t)a % sizeof(intmax_t), 0); ASSERT_UINT_EQUALS((uintptr_t)b % sizeof(intmax_t), 0); aws_mem_release(allocator, buffer); a = NULL; b = NULL; buffer = aws_mem_acquire_many(allocator, 2, &a, (size_t)1, &b, (size_t)1); ASSERT_NOT_NULL(buffer); ASSERT_NOT_NULL(a); ASSERT_NOT_NULL(b); ASSERT_UINT_EQUALS((uintptr_t)a, (uintptr_t)buffer); ASSERT_UINT_EQUALS((uintptr_t)b, (uintptr_t)buffer + sizeof(intmax_t)); ASSERT_UINT_EQUALS((uintptr_t)a % sizeof(intmax_t), 0); ASSERT_UINT_EQUALS((uintptr_t)b % sizeof(intmax_t), 0); aws_mem_release(allocator, buffer); return 0; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/resources/000077500000000000000000000000001456575232400236745ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/resources/ascii.txt000066400000000000000000000000441456575232400255230ustar00rootroot00000000000000This is some text encoded as ASCII. aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/resources/dir_traversal_test/000077500000000000000000000000001456575232400275745ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/resources/dir_traversal_test/first_child_dir/000077500000000000000000000000001456575232400327245ustar00rootroot00000000000000child.txt000066400000000000000000000000561456575232400344720ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/resources/dir_traversal_test/first_child_dirdir_traversal_test->first_child_dir->child.txtaws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/resources/dir_traversal_test/root_child.txt000066400000000000000000000000421456575232400324570ustar00rootroot00000000000000dir_traversal_test->root_child.txtaws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/resources/utf16be.txt000066400000000000000000000001201456575232400257020ustar00rootroot00000000000000This is some text encoded as UTF16 BE. aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/resources/utf16le.txt000066400000000000000000000001201456575232400257140ustar00rootroot00000000000000This is some text encoded as UTF16 LE. aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/resources/utf8.txt000066400000000000000000000000461456575232400253230ustar00rootroot00000000000000This is some text encoded in UTF8. aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/resources/Å Éxample.txt000066400000000000000000000000431456575232400272040ustar00rootroot00000000000000This is a non-ascii file path file.aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/ring_buffer_test.c000066400000000000000000000362731456575232400253700ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include static int s_test_1_to_1_acquire_release_wraps(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_ring_buffer ring_buffer; size_t buf_size = 16; ASSERT_SUCCESS(aws_ring_buffer_init(&ring_buffer, allocator, buf_size)); struct aws_byte_buf vended_buffer; AWS_ZERO_STRUCT(vended_buffer); ASSERT_SUCCESS(aws_ring_buffer_acquire(&ring_buffer, 4, &vended_buffer)); uint8_t *ptr = vended_buffer.buffer; ASSERT_UINT_EQUALS(4, vended_buffer.capacity); ASSERT_TRUE(aws_ring_buffer_buf_belongs_to_pool(&ring_buffer, &vended_buffer)); aws_ring_buffer_release(&ring_buffer, &vended_buffer); ASSERT_SUCCESS(aws_ring_buffer_acquire(&ring_buffer, 8, &vended_buffer)); ASSERT_PTR_EQUALS(ptr, vended_buffer.buffer); ASSERT_UINT_EQUALS(8, vended_buffer.capacity); ASSERT_TRUE(aws_ring_buffer_buf_belongs_to_pool(&ring_buffer, &vended_buffer)); aws_ring_buffer_release(&ring_buffer, &vended_buffer); ASSERT_SUCCESS(aws_ring_buffer_acquire(&ring_buffer, 4, &vended_buffer)); ASSERT_PTR_EQUALS(ptr, vended_buffer.buffer); ASSERT_UINT_EQUALS(4, vended_buffer.capacity); ASSERT_TRUE(aws_ring_buffer_buf_belongs_to_pool(&ring_buffer, &vended_buffer)); aws_ring_buffer_release(&ring_buffer, &vended_buffer); ASSERT_SUCCESS(aws_ring_buffer_acquire(&ring_buffer, 8, &vended_buffer)); ASSERT_PTR_EQUALS(ptr, vended_buffer.buffer); ASSERT_UINT_EQUALS(8, vended_buffer.capacity); ASSERT_TRUE(aws_ring_buffer_buf_belongs_to_pool(&ring_buffer, &vended_buffer)); aws_ring_buffer_release(&ring_buffer, &vended_buffer); ASSERT_SUCCESS(aws_ring_buffer_acquire(&ring_buffer, 8, &vended_buffer)); ASSERT_PTR_EQUALS(ptr, vended_buffer.buffer); ASSERT_UINT_EQUALS(8, vended_buffer.capacity); ASSERT_TRUE(aws_ring_buffer_buf_belongs_to_pool(&ring_buffer, &vended_buffer)); aws_ring_buffer_release(&ring_buffer, &vended_buffer); aws_ring_buffer_clean_up(&ring_buffer); return AWS_OP_SUCCESS; } AWS_TEST_CASE(ring_buffer_1_to_1_acquire_release_wraps_test, s_test_1_to_1_acquire_release_wraps) static int s_test_release_after_full(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_ring_buffer ring_buffer; size_t buf_size = 16; ASSERT_SUCCESS(aws_ring_buffer_init(&ring_buffer, allocator, buf_size)); struct aws_byte_buf vended_buffer_1; AWS_ZERO_STRUCT(vended_buffer_1); ASSERT_SUCCESS(aws_ring_buffer_acquire(&ring_buffer, 12, &vended_buffer_1)); uint8_t *ptr = vended_buffer_1.buffer; ASSERT_UINT_EQUALS(12, vended_buffer_1.capacity); ASSERT_TRUE(aws_ring_buffer_buf_belongs_to_pool(&ring_buffer, &vended_buffer_1)); struct aws_byte_buf vended_buffer_2; AWS_ZERO_STRUCT(vended_buffer_2); ASSERT_SUCCESS(aws_ring_buffer_acquire(&ring_buffer, 4, &vended_buffer_2)); ASSERT_PTR_EQUALS(ptr + 12, vended_buffer_2.buffer); ASSERT_UINT_EQUALS(4, vended_buffer_2.capacity); ASSERT_TRUE(aws_ring_buffer_buf_belongs_to_pool(&ring_buffer, &vended_buffer_2)); ASSERT_ERROR(AWS_ERROR_OOM, aws_ring_buffer_acquire(&ring_buffer, 1, &vended_buffer_1)); aws_ring_buffer_release(&ring_buffer, &vended_buffer_1); ASSERT_SUCCESS(aws_ring_buffer_acquire(&ring_buffer, 8, &vended_buffer_2)); ASSERT_PTR_EQUALS(ptr, vended_buffer_2.buffer); ASSERT_UINT_EQUALS(8, vended_buffer_2.capacity); ASSERT_TRUE(aws_ring_buffer_buf_belongs_to_pool(&ring_buffer, &vended_buffer_2)); aws_ring_buffer_release(&ring_buffer, &vended_buffer_2); aws_ring_buffer_clean_up(&ring_buffer); return AWS_OP_SUCCESS; } AWS_TEST_CASE(ring_buffer_release_after_full_test, s_test_release_after_full) static int s_test_acquire_up_to(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_ring_buffer ring_buffer; size_t buf_size = 16; ASSERT_SUCCESS(aws_ring_buffer_init(&ring_buffer, allocator, buf_size)); struct aws_byte_buf vended_buffer_1; AWS_ZERO_STRUCT(vended_buffer_1); ASSERT_SUCCESS(aws_ring_buffer_acquire_up_to(&ring_buffer, 1, 12, &vended_buffer_1)); uint8_t *ptr = vended_buffer_1.buffer; ASSERT_UINT_EQUALS(12, vended_buffer_1.capacity); ASSERT_TRUE(aws_ring_buffer_buf_belongs_to_pool(&ring_buffer, &vended_buffer_1)); struct aws_byte_buf vended_buffer_2; AWS_ZERO_STRUCT(vended_buffer_2); /* only 4 are available, so this should error. */ ASSERT_ERROR(AWS_ERROR_OOM, aws_ring_buffer_acquire_up_to(&ring_buffer, 5, 8, &vended_buffer_2)); ASSERT_SUCCESS(aws_ring_buffer_acquire_up_to(&ring_buffer, 4, 8, &vended_buffer_2)); ASSERT_PTR_EQUALS(ptr + 12, vended_buffer_2.buffer); ASSERT_UINT_EQUALS(4, vended_buffer_2.capacity); ASSERT_TRUE(aws_ring_buffer_buf_belongs_to_pool(&ring_buffer, &vended_buffer_2)); ASSERT_ERROR(AWS_ERROR_OOM, aws_ring_buffer_acquire_up_to(&ring_buffer, 1, 1, &vended_buffer_1)); aws_ring_buffer_release(&ring_buffer, &vended_buffer_1); aws_ring_buffer_release(&ring_buffer, &vended_buffer_2); ASSERT_SUCCESS(aws_ring_buffer_acquire_up_to(&ring_buffer, 1, 8, &vended_buffer_1)); ASSERT_PTR_EQUALS(ptr, vended_buffer_1.buffer); ASSERT_UINT_EQUALS(8, vended_buffer_1.capacity); ASSERT_TRUE(aws_ring_buffer_buf_belongs_to_pool(&ring_buffer, &vended_buffer_1)); ASSERT_SUCCESS(aws_ring_buffer_acquire_up_to(&ring_buffer, 1, 8, &vended_buffer_2)); ASSERT_PTR_EQUALS(ptr + 8, vended_buffer_2.buffer); ASSERT_UINT_EQUALS(8, vended_buffer_2.capacity); ASSERT_TRUE(aws_ring_buffer_buf_belongs_to_pool(&ring_buffer, &vended_buffer_2)); aws_ring_buffer_release(&ring_buffer, &vended_buffer_1); aws_ring_buffer_release(&ring_buffer, &vended_buffer_2); aws_ring_buffer_clean_up(&ring_buffer); return AWS_OP_SUCCESS; } AWS_TEST_CASE(ring_buffer_acquire_up_to_test, s_test_acquire_up_to) static int s_test_acquire_tail_always_chases_head(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_ring_buffer ring_buffer; size_t buf_size = 16; ASSERT_SUCCESS(aws_ring_buffer_init(&ring_buffer, allocator, buf_size)); struct aws_byte_buf vended_buffer_1; AWS_ZERO_STRUCT(vended_buffer_1); ASSERT_SUCCESS(aws_ring_buffer_acquire(&ring_buffer, 12, &vended_buffer_1)); uint8_t *ptr = vended_buffer_1.buffer; ASSERT_UINT_EQUALS(12, vended_buffer_1.capacity); ASSERT_TRUE(aws_ring_buffer_buf_belongs_to_pool(&ring_buffer, &vended_buffer_1)); struct aws_byte_buf vended_buffer_2; AWS_ZERO_STRUCT(vended_buffer_2); ASSERT_SUCCESS(aws_ring_buffer_acquire(&ring_buffer, 4, &vended_buffer_2)); ASSERT_PTR_EQUALS(ptr + 12, vended_buffer_2.buffer); ASSERT_UINT_EQUALS(4, vended_buffer_2.capacity); ASSERT_TRUE(aws_ring_buffer_buf_belongs_to_pool(&ring_buffer, &vended_buffer_2)); ASSERT_ERROR(AWS_ERROR_OOM, aws_ring_buffer_acquire(&ring_buffer, 1, &vended_buffer_1)); aws_ring_buffer_release(&ring_buffer, &vended_buffer_1); /* we should turn over right here*/ ASSERT_SUCCESS(aws_ring_buffer_acquire(&ring_buffer, 8, &vended_buffer_1)); ASSERT_PTR_EQUALS(ptr, vended_buffer_1.buffer); ASSERT_UINT_EQUALS(8, vended_buffer_1.capacity); ASSERT_TRUE(aws_ring_buffer_buf_belongs_to_pool(&ring_buffer, &vended_buffer_1)); aws_ring_buffer_release(&ring_buffer, &vended_buffer_2); ASSERT_ERROR(AWS_ERROR_OOM, aws_ring_buffer_acquire(&ring_buffer, 8, &vended_buffer_2)); ASSERT_SUCCESS(aws_ring_buffer_acquire(&ring_buffer, 7, &vended_buffer_2)); ASSERT_PTR_EQUALS(ptr + 8, vended_buffer_2.buffer); ASSERT_UINT_EQUALS(7, vended_buffer_2.capacity); ASSERT_TRUE(aws_ring_buffer_buf_belongs_to_pool(&ring_buffer, &vended_buffer_2)); /* tail will flip here. */ aws_ring_buffer_release(&ring_buffer, &vended_buffer_1); ASSERT_ERROR(AWS_ERROR_OOM, aws_ring_buffer_acquire(&ring_buffer, 8, &vended_buffer_1)); ASSERT_SUCCESS(aws_ring_buffer_acquire(&ring_buffer, 7, &vended_buffer_1)); ASSERT_PTR_EQUALS(ptr, vended_buffer_1.buffer); ASSERT_UINT_EQUALS(7, vended_buffer_1.capacity); ASSERT_TRUE(aws_ring_buffer_buf_belongs_to_pool(&ring_buffer, &vended_buffer_1)); aws_ring_buffer_release(&ring_buffer, &vended_buffer_2); ASSERT_ERROR(AWS_ERROR_OOM, aws_ring_buffer_acquire(&ring_buffer, 8, &vended_buffer_2)); ASSERT_SUCCESS(aws_ring_buffer_acquire(&ring_buffer, 7, &vended_buffer_2)); ASSERT_PTR_EQUALS(ptr + 7, vended_buffer_2.buffer); ASSERT_UINT_EQUALS(7, vended_buffer_2.capacity); ASSERT_TRUE(aws_ring_buffer_buf_belongs_to_pool(&ring_buffer, &vended_buffer_2)); aws_ring_buffer_clean_up(&ring_buffer); return AWS_OP_SUCCESS; } AWS_TEST_CASE(ring_buffer_acquire_tail_always_chases_head_test, s_test_acquire_tail_always_chases_head) struct mt_test_data { struct aws_ring_buffer ring_buf; struct aws_linked_list buffer_queue; struct aws_mutex mutex; struct aws_condition_variable termination_signal; int consumer_count; int max_count; bool consumer_finished; bool match_failed; }; struct mt_test_buffer_node { struct aws_linked_list_node node; struct aws_byte_buf buf; }; /* why so high? because the up_to allocs can get REALLY fragmented. */ #define MT_BUFFER_COUNT 60 #define MT_TEST_BUFFER_SIZE 16 static void s_consumer_thread(void *args) { struct mt_test_data *test_data = args; while (test_data->consumer_count < test_data->max_count) { aws_mutex_lock(&test_data->mutex); struct aws_linked_list_node *node = NULL; if (!aws_linked_list_empty(&test_data->buffer_queue)) { node = aws_linked_list_pop_front(&test_data->buffer_queue); } aws_mutex_unlock(&test_data->mutex); if (!node) { continue; } struct mt_test_buffer_node *buffer_node = AWS_CONTAINER_OF(node, struct mt_test_buffer_node, node); char counter_data[MT_TEST_BUFFER_SIZE + 1]; AWS_ZERO_ARRAY(counter_data); size_t written = 0; int num_to_write = test_data->consumer_count++; /* all this does is print count out as far as it can to fill the buffer. */ while (written < buffer_node->buf.capacity) { int bytes_written = snprintf(counter_data + written, buffer_node->buf.capacity - written, "%d", num_to_write); if (bytes_written > 0 && bytes_written < (int)(buffer_node->buf.capacity - written)) { written += bytes_written; } else { break; } } int not_matched = memcmp(buffer_node->buf.buffer, counter_data, written); if (not_matched) { fprintf(stderr, "match failed!\n"); fprintf(stderr, "produced buffer was "); fwrite(buffer_node->buf.buffer, 1, buffer_node->buf.capacity, stderr); fprintf(stderr, " but we were expecting %s\n", counter_data); test_data->match_failed = true; aws_ring_buffer_release(&test_data->ring_buf, &buffer_node->buf); break; } aws_ring_buffer_release(&test_data->ring_buf, &buffer_node->buf); } aws_mutex_lock(&test_data->mutex); test_data->consumer_finished = true; aws_mutex_unlock(&test_data->mutex); aws_condition_variable_notify_one(&test_data->termination_signal); } static bool s_termination_predicate(void *args) { struct mt_test_data *test_data = args; return test_data->consumer_finished; } static int s_acquire_up_to_wrapper(struct aws_ring_buffer *ring_buf, size_t requested, struct aws_byte_buf *dest) { if (requested >= 4) { return aws_ring_buffer_acquire_up_to(ring_buf, 4, requested, dest); } return aws_ring_buffer_acquire_up_to(ring_buf, 1, requested, dest); } static int s_test_acquire_any_muti_threaded( struct aws_allocator *allocator, int (*acquire_fn)(struct aws_ring_buffer *, size_t, struct aws_byte_buf *)) { /* spin up a consumer thread, let current thread be the producer. Let them fight it out and give a chance * for race conditions to happen and explode the universe. */ struct mt_test_data test_data = { .match_failed = false, .consumer_count = 0, .mutex = AWS_MUTEX_INIT, .max_count = 1000000, .consumer_finished = false, .termination_signal = AWS_CONDITION_VARIABLE_INIT, }; static struct mt_test_buffer_node s_buffer_nodes[MT_BUFFER_COUNT]; /* 3 16 byte acquirable buffers + 15 bytes == 63 */ ASSERT_SUCCESS(aws_ring_buffer_init(&test_data.ring_buf, allocator, 3 * MT_TEST_BUFFER_SIZE + 15)); aws_linked_list_init(&test_data.buffer_queue); struct aws_thread consumer_thread; ASSERT_SUCCESS(aws_thread_init(&consumer_thread, allocator)); ASSERT_SUCCESS(aws_thread_launch(&consumer_thread, s_consumer_thread, &test_data, NULL)); int counter = 0; /* consumer_finished isn't protected and we don't need it to be immediately and it won't rip, * we just need it eventually if the consumer thread fails prematurely. */ while (counter < test_data.max_count && !test_data.consumer_finished) { struct aws_byte_buf dest; AWS_ZERO_STRUCT(dest); if (!acquire_fn(&test_data.ring_buf, MT_TEST_BUFFER_SIZE, &dest)) { size_t written = 0; memset(dest.buffer, 0, dest.capacity); /* all this does is print count out as far as it can to fill the buffer. */ while (written < dest.capacity) { int bytes_written = snprintf((char *)dest.buffer + written, dest.capacity - written, "%d", counter); if (bytes_written > 0 && bytes_written < (int)(dest.capacity - written)) { written += bytes_written; } else { break; } } int index = counter % MT_BUFFER_COUNT; s_buffer_nodes[index].buf = dest; counter++; aws_mutex_lock(&test_data.mutex); aws_linked_list_push_back(&test_data.buffer_queue, &s_buffer_nodes[index].node); aws_mutex_unlock(&test_data.mutex); } } aws_mutex_lock(&test_data.mutex); aws_condition_variable_wait_pred( &test_data.termination_signal, &test_data.mutex, s_termination_predicate, &test_data); aws_mutex_unlock(&test_data.mutex); aws_thread_join(&consumer_thread); aws_ring_buffer_clean_up(&test_data.ring_buf); aws_thread_clean_up(&consumer_thread); ASSERT_FALSE(test_data.match_failed); return AWS_OP_SUCCESS; } static int s_test_acquire_multi_threaded(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_test_acquire_any_muti_threaded(allocator, aws_ring_buffer_acquire); } AWS_TEST_CASE(ring_buffer_acquire_multi_threaded_test, s_test_acquire_multi_threaded) static int s_test_acquire_up_to_multi_threaded(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_test_acquire_any_muti_threaded(allocator, s_acquire_up_to_wrapper); } AWS_TEST_CASE(ring_buffer_acquire_up_to_multi_threaded_test, s_test_acquire_up_to_multi_threaded) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/rw_lock_test.c000066400000000000000000000116641456575232400245350ustar00rootroot00000000000000/* * Copyright 2010-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include #include #include static int s_test_rw_lock_acquire_release(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_rw_lock rw_lock; aws_rw_lock_init(&rw_lock); ASSERT_SUCCESS(aws_rw_lock_wlock(&rw_lock), "rw_lock acquire should have returned success."); ASSERT_SUCCESS(aws_rw_lock_wunlock(&rw_lock), "rw_lock release should have returned success."); ASSERT_SUCCESS(aws_rw_lock_rlock(&rw_lock), "rw_lock acquire should have returned success."); ASSERT_SUCCESS(aws_rw_lock_runlock(&rw_lock), "rw_lock release should have returned success."); aws_rw_lock_clean_up(&rw_lock); return 0; } AWS_TEST_CASE(rw_lock_aquire_release_test, s_test_rw_lock_acquire_release) struct thread_rw_lock_data { struct aws_rw_lock rw_lock; volatile int counter; int max_counts; volatile int thread_fn_increments; }; static void s_rw_lock_thread_fn(void *rw_lock_data) { struct thread_rw_lock_data *p_rw_lock = (struct thread_rw_lock_data *)rw_lock_data; int finished = 0; while (!finished) { aws_rw_lock_rlock(&p_rw_lock->rw_lock); if (p_rw_lock->counter != p_rw_lock->max_counts) { int counter = p_rw_lock->counter + 1; aws_rw_lock_runlock(&p_rw_lock->rw_lock); aws_rw_lock_wlock(&p_rw_lock->rw_lock); p_rw_lock->counter = counter; p_rw_lock->thread_fn_increments += 1; aws_rw_lock_wunlock(&p_rw_lock->rw_lock); aws_rw_lock_rlock(&p_rw_lock->rw_lock); finished = p_rw_lock->counter == p_rw_lock->max_counts; } else { finished = 1; } aws_rw_lock_runlock(&p_rw_lock->rw_lock); } } static int s_test_rw_lock_is_actually_rw_lock(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct thread_rw_lock_data rw_lock_data = { .counter = 0, .max_counts = 1000000, .thread_fn_increments = 0, }; aws_rw_lock_init(&rw_lock_data.rw_lock); struct aws_thread thread; aws_thread_init(&thread, allocator); ASSERT_SUCCESS( aws_thread_launch(&thread, s_rw_lock_thread_fn, &rw_lock_data, 0), "thread creation failed with error %d", aws_last_error()); int finished = 0; while (!finished) { aws_rw_lock_rlock(&rw_lock_data.rw_lock); finished = rw_lock_data.counter == rw_lock_data.max_counts; aws_rw_lock_runlock(&rw_lock_data.rw_lock); } ASSERT_SUCCESS(aws_thread_join(&thread), "Thread join failed with error code %d.", aws_last_error()); ASSERT_INT_EQUALS( rw_lock_data.thread_fn_increments, rw_lock_data.max_counts, "Thread 2 should have written all data"); ASSERT_INT_EQUALS( rw_lock_data.max_counts, rw_lock_data.counter, "Both threads should have written exactly the max counts."); aws_thread_clean_up(&thread); aws_rw_lock_clean_up(&rw_lock_data.rw_lock); return 0; } AWS_TEST_CASE(rw_lock_is_actually_rw_lock_test, s_test_rw_lock_is_actually_rw_lock) static int s_iterations = 0; static void s_thread_reader_fn(void *ud) { struct aws_rw_lock *lock = ud; int finished = 0; while (!finished) { aws_rw_lock_rlock(lock); finished = s_iterations == 10000; aws_rw_lock_runlock(lock); } } static int s_test_rw_lock_many_readers(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_rw_lock lock; aws_rw_lock_init(&lock); struct aws_thread threads[2]; AWS_ZERO_ARRAY(threads); for (size_t i = 0; i < AWS_ARRAY_SIZE(threads); ++i) { aws_thread_init(&threads[i], allocator); ASSERT_SUCCESS( aws_thread_launch(&threads[i], s_thread_reader_fn, &lock, 0), "thread creation failed with error %d", aws_last_error()); } int finished = 0; while (!finished) { aws_rw_lock_wlock(&lock); finished = ++s_iterations == 10000; aws_rw_lock_wunlock(&lock); } for (size_t i = 0; i < AWS_ARRAY_SIZE(threads); ++i) { ASSERT_SUCCESS(aws_thread_join(&threads[i]), "Thread join failed with error code %d.", aws_last_error()); aws_thread_clean_up(&threads[i]); } aws_rw_lock_clean_up(&lock); return 0; } AWS_TEST_CASE(rw_lock_many_readers_test, s_test_rw_lock_many_readers) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/split_test.c000066400000000000000000000300531456575232400242210ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include AWS_TEST_CASE(test_char_split_happy_path, s_test_char_split_happy_path_fn) static int s_test_char_split_happy_path_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; const char str_to_split[] = "testa;testb;testc"; struct aws_byte_cursor to_split = aws_byte_cursor_from_c_str(str_to_split); struct aws_array_list output; ASSERT_SUCCESS(aws_array_list_init_dynamic(&output, allocator, 4, sizeof(struct aws_byte_cursor))); ASSERT_SUCCESS(aws_byte_cursor_split_on_char(&to_split, ';', &output)); ASSERT_INT_EQUALS(3, aws_array_list_length(&output)); struct aws_byte_cursor value = {0}; ASSERT_SUCCESS(aws_array_list_get_at(&output, &value, 0)); char *expected = "testa"; ASSERT_BIN_ARRAYS_EQUALS(expected, strlen(expected), value.ptr, value.len); ASSERT_SUCCESS(aws_array_list_get_at(&output, &value, 1)); expected = "testb"; ASSERT_BIN_ARRAYS_EQUALS(expected, strlen(expected), value.ptr, value.len); ASSERT_SUCCESS(aws_array_list_get_at(&output, &value, 2)); expected = "testc"; ASSERT_BIN_ARRAYS_EQUALS(expected, strlen(expected), value.ptr, value.len); aws_array_list_clean_up(&output); return 0; } AWS_TEST_CASE(test_char_split_ends_with_token, s_test_char_split_ends_with_token_fn) static int s_test_char_split_ends_with_token_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; const char str_to_split[] = "testa;testb;testc;"; struct aws_byte_cursor to_split = aws_byte_cursor_from_c_str(str_to_split); struct aws_array_list output; ASSERT_SUCCESS(aws_array_list_init_dynamic(&output, allocator, 4, sizeof(struct aws_byte_cursor))); ASSERT_SUCCESS(aws_byte_cursor_split_on_char(&to_split, ';', &output)); ASSERT_INT_EQUALS(4, aws_array_list_length(&output)); struct aws_byte_cursor value = {0}; ASSERT_SUCCESS(aws_array_list_get_at(&output, &value, 0)); char *expected = "testa"; ASSERT_BIN_ARRAYS_EQUALS(expected, strlen(expected), value.ptr, value.len); ASSERT_SUCCESS(aws_array_list_get_at(&output, &value, 1)); expected = "testb"; ASSERT_BIN_ARRAYS_EQUALS(expected, strlen(expected), value.ptr, value.len); ASSERT_SUCCESS(aws_array_list_get_at(&output, &value, 2)); expected = "testc"; ASSERT_BIN_ARRAYS_EQUALS(expected, strlen(expected), value.ptr, value.len); ASSERT_SUCCESS(aws_array_list_get_at(&output, &value, 3)); expected = ""; ASSERT_BIN_ARRAYS_EQUALS(expected, strlen(expected), value.ptr, value.len); aws_array_list_clean_up(&output); return 0; } AWS_TEST_CASE(test_char_split_begins_with_token, s_test_char_split_begins_with_token_fn) static int s_test_char_split_begins_with_token_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; const char str_to_split[] = ";testa;testb;testc"; struct aws_byte_cursor to_split = aws_byte_cursor_from_c_str(str_to_split); struct aws_array_list output; ASSERT_SUCCESS(aws_array_list_init_dynamic(&output, allocator, 4, sizeof(struct aws_byte_cursor))); ASSERT_SUCCESS(aws_byte_cursor_split_on_char(&to_split, ';', &output)); ASSERT_INT_EQUALS(4, aws_array_list_length(&output)); struct aws_byte_cursor value = {0}; ASSERT_SUCCESS(aws_array_list_get_at(&output, &value, 0)); char *expected = ""; ASSERT_BIN_ARRAYS_EQUALS(expected, strlen(expected), value.ptr, value.len); ASSERT_SUCCESS(aws_array_list_get_at(&output, &value, 1)); expected = "testa"; ASSERT_BIN_ARRAYS_EQUALS(expected, strlen(expected), value.ptr, value.len); ASSERT_SUCCESS(aws_array_list_get_at(&output, &value, 2)); expected = "testb"; ASSERT_BIN_ARRAYS_EQUALS(expected, strlen(expected), value.ptr, value.len); ASSERT_SUCCESS(aws_array_list_get_at(&output, &value, 3)); expected = "testc"; ASSERT_BIN_ARRAYS_EQUALS(expected, strlen(expected), value.ptr, value.len); aws_array_list_clean_up(&output); return 0; } AWS_TEST_CASE(test_char_split_token_not_present, s_test_char_split_token_not_present_fn) static int s_test_char_split_token_not_present_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; const char str_to_split[] = "testa"; struct aws_byte_cursor to_split = aws_byte_cursor_from_c_str(str_to_split); struct aws_array_list output; ASSERT_SUCCESS(aws_array_list_init_dynamic(&output, allocator, 4, sizeof(struct aws_byte_cursor))); ASSERT_SUCCESS(aws_byte_cursor_split_on_char(&to_split, ';', &output)); ASSERT_INT_EQUALS(1, aws_array_list_length(&output)); struct aws_byte_cursor value = {0}; ASSERT_SUCCESS(aws_array_list_get_at(&output, &value, 0)); char *expected = "testa"; ASSERT_BIN_ARRAYS_EQUALS(expected, strlen(expected), value.ptr, value.len); aws_array_list_clean_up(&output); return 0; } AWS_TEST_CASE(test_char_split_empty, s_test_char_split_empty_fn) static int s_test_char_split_empty_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; const char str_to_split[] = ""; struct aws_byte_cursor to_split = aws_byte_cursor_from_c_str(str_to_split); struct aws_array_list output; ASSERT_SUCCESS(aws_array_list_init_dynamic(&output, allocator, 4, sizeof(struct aws_byte_cursor))); ASSERT_SUCCESS(aws_byte_cursor_split_on_char(&to_split, ';', &output)); ASSERT_INT_EQUALS(1, aws_array_list_length(&output)); struct aws_byte_cursor value = {0}; ASSERT_SUCCESS(aws_array_list_get_at(&output, &value, 0)); ASSERT_INT_EQUALS(0, value.len); aws_array_list_clean_up(&output); return 0; } AWS_TEST_CASE(test_char_split_zeroed, s_test_char_split_zeroed_fn) static int s_test_char_split_zeroed_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor to_split; AWS_ZERO_STRUCT(to_split); struct aws_array_list output; ASSERT_SUCCESS(aws_array_list_init_dynamic(&output, allocator, 4, sizeof(struct aws_byte_cursor))); ASSERT_SUCCESS(aws_byte_cursor_split_on_char(&to_split, ';', &output)); ASSERT_INT_EQUALS(1, aws_array_list_length(&output)); struct aws_byte_cursor value = {0}; ASSERT_SUCCESS(aws_array_list_get_at(&output, &value, 0)); ASSERT_INT_EQUALS(0, value.len); aws_array_list_clean_up(&output); return 0; } AWS_TEST_CASE(test_char_split_adj_tokens, s_test_char_split_adj_tokens_fn) static int s_test_char_split_adj_tokens_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; const char str_to_split[] = "testa;;testb;testc"; struct aws_byte_cursor to_split = aws_byte_cursor_from_c_str(str_to_split); struct aws_array_list output; ASSERT_SUCCESS(aws_array_list_init_dynamic(&output, allocator, 4, sizeof(struct aws_byte_cursor))); ASSERT_SUCCESS(aws_byte_cursor_split_on_char(&to_split, ';', &output)); ASSERT_INT_EQUALS(4, aws_array_list_length(&output)); struct aws_byte_cursor value = {0}; ASSERT_SUCCESS(aws_array_list_get_at(&output, &value, 0)); char *expected = "testa"; ASSERT_BIN_ARRAYS_EQUALS(expected, strlen(expected), value.ptr, value.len); ASSERT_SUCCESS(aws_array_list_get_at(&output, &value, 1)); expected = ""; ASSERT_BIN_ARRAYS_EQUALS(expected, strlen(expected), value.ptr, value.len); ASSERT_SUCCESS(aws_array_list_get_at(&output, &value, 2)); expected = "testb"; ASSERT_BIN_ARRAYS_EQUALS(expected, strlen(expected), value.ptr, value.len); ASSERT_SUCCESS(aws_array_list_get_at(&output, &value, 3)); expected = "testc"; ASSERT_BIN_ARRAYS_EQUALS(expected, strlen(expected), value.ptr, value.len); aws_array_list_clean_up(&output); return 0; } AWS_TEST_CASE(test_char_split_with_max_splits, s_test_char_split_with_max_splits_fn) static int s_test_char_split_with_max_splits_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; const char str_to_split[] = ";testa;testb;testc"; struct aws_byte_cursor to_split = aws_byte_cursor_from_c_str(str_to_split); struct aws_array_list output; ASSERT_SUCCESS(aws_array_list_init_dynamic(&output, allocator, 4, sizeof(struct aws_byte_cursor))); ASSERT_SUCCESS(aws_byte_cursor_split_on_char_n(&to_split, ';', 2, &output)); ASSERT_INT_EQUALS(3, aws_array_list_length(&output)); struct aws_byte_cursor value = {0}; ASSERT_SUCCESS(aws_array_list_get_at(&output, &value, 0)); char *expected = ""; ASSERT_BIN_ARRAYS_EQUALS(expected, strlen(expected), value.ptr, value.len); ASSERT_SUCCESS(aws_array_list_get_at(&output, &value, 1)); expected = "testa"; ASSERT_BIN_ARRAYS_EQUALS(expected, strlen(expected), value.ptr, value.len); ASSERT_SUCCESS(aws_array_list_get_at(&output, &value, 2)); expected = "testb;testc"; ASSERT_BIN_ARRAYS_EQUALS(expected, strlen(expected), value.ptr, value.len); aws_array_list_clean_up(&output); return 0; } AWS_TEST_CASE(test_char_split_output_too_small, s_test_char_split_output_too_small_fn) static int s_test_char_split_output_too_small_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; const char str_to_split[] = "testa;testb;testc;"; struct aws_byte_cursor to_split = aws_byte_cursor_from_c_str(str_to_split); struct aws_array_list output; struct aws_byte_cursor output_array[3] = {{0}}; aws_array_list_init_static(&output, output_array, 3, sizeof(struct aws_byte_cursor)); ASSERT_ERROR(AWS_ERROR_LIST_EXCEEDS_MAX_SIZE, aws_byte_cursor_split_on_char(&to_split, ';', &output)); ASSERT_INT_EQUALS(3, aws_array_list_length(&output)); struct aws_byte_cursor value = {0}; ASSERT_SUCCESS(aws_array_list_get_at(&output, &value, 0)); char *expected = "testa"; ASSERT_BIN_ARRAYS_EQUALS(expected, strlen(expected), value.ptr, value.len); ASSERT_SUCCESS(aws_array_list_get_at(&output, &value, 1)); expected = "testb"; ASSERT_BIN_ARRAYS_EQUALS(expected, strlen(expected), value.ptr, value.len); ASSERT_SUCCESS(aws_array_list_get_at(&output, &value, 2)); expected = "testc"; ASSERT_BIN_ARRAYS_EQUALS(expected, strlen(expected), value.ptr, value.len); return 0; } AWS_TEST_CASE(test_byte_cursor_next_split, s_test_byte_cursor_next_split) static int s_test_byte_cursor_next_split(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_byte_cursor to_split1 = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("testa;testb;testc;a"); struct aws_byte_cursor result1 = {0}; ASSERT_TRUE(aws_byte_cursor_next_split(&to_split1, ';', &result1)); ASSERT_CURSOR_VALUE_CSTRING_EQUALS(result1, "testa"); ASSERT_TRUE(aws_byte_cursor_next_split(&to_split1, ';', &result1)); ASSERT_CURSOR_VALUE_CSTRING_EQUALS(result1, "testb"); ASSERT_TRUE(aws_byte_cursor_next_split(&to_split1, ';', &result1)); ASSERT_CURSOR_VALUE_CSTRING_EQUALS(result1, "testc"); ASSERT_TRUE(aws_byte_cursor_next_split(&to_split1, ';', &result1)); ASSERT_CURSOR_VALUE_CSTRING_EQUALS(result1, "a"); ASSERT_FALSE(aws_byte_cursor_next_split(&to_split1, ';', &result1)); ASSERT_CURSOR_VALUE_CSTRING_EQUALS(result1, ""); struct aws_byte_cursor to_split2 = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(""); struct aws_byte_cursor result2 = {0}; ASSERT_TRUE(aws_byte_cursor_next_split(&to_split2, ';', &result2)); ASSERT_CURSOR_VALUE_CSTRING_EQUALS(result2, ""); ASSERT_FALSE(aws_byte_cursor_next_split(&to_split2, ';', &result2)); ASSERT_CURSOR_VALUE_CSTRING_EQUALS(result2, ""); struct aws_byte_cursor to_split3 = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(";;"); struct aws_byte_cursor result3 = {0}; ASSERT_TRUE(aws_byte_cursor_next_split(&to_split3, ';', &result3)); ASSERT_CURSOR_VALUE_CSTRING_EQUALS(result3, ""); ASSERT_TRUE(aws_byte_cursor_next_split(&to_split3, ';', &result3)); ASSERT_CURSOR_VALUE_CSTRING_EQUALS(result3, ""); ASSERT_TRUE(aws_byte_cursor_next_split(&to_split3, ';', &result3)); ASSERT_CURSOR_VALUE_CSTRING_EQUALS(result3, ""); ASSERT_FALSE(aws_byte_cursor_next_split(&to_split3, ';', &result3)); ASSERT_CURSOR_VALUE_CSTRING_EQUALS(result3, ""); return 0; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/string_test.c000066400000000000000000000216021456575232400243740ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include AWS_TEST_CASE(string_tests, s_string_tests_fn); static int s_string_tests_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; /* Test: static string creation from macro works. */ AWS_STATIC_STRING_FROM_LITERAL(test_string_1, "foofaraw"); ASSERT_NULL(test_string_1->allocator, "Static string should have no allocator."); ASSERT_INT_EQUALS(test_string_1->len, 8, "Length should have been set correctly."); ASSERT_BIN_ARRAYS_EQUALS( aws_string_bytes(test_string_1), test_string_1->len, "foofaraw", 8, "Data bytes should have been set correctly."); ASSERT_INT_EQUALS( aws_string_bytes(test_string_1)[test_string_1->len], '\0', "Static string should have null byte at end."); /* Test: string creation works. */ struct aws_string *test_string_2 = aws_string_new_from_c_str(allocator, "foofaraw"); ASSERT_NOT_NULL(test_string_2, "Memory allocation of string should have succeeded."); ASSERT_PTR_EQUALS(test_string_2->allocator, allocator, "Allocator should have been set correctly."); ASSERT_INT_EQUALS(test_string_2->len, 8, "Length should have been set correctly."); ASSERT_BIN_ARRAYS_EQUALS( aws_string_bytes(test_string_2), test_string_2->len, "foofaraw", 8, "Data bytes should have been set correctly."); ASSERT_INT_EQUALS( aws_string_bytes(test_string_2)[test_string_2->len], '\0', "String from C-string should have null byte at end."); /* Test: strings from first two tests are equal and have same hashes. */ ASSERT_TRUE(aws_string_eq(test_string_1, test_string_2), "Buffers should be equal."); ASSERT_INT_EQUALS( aws_hash_string(test_string_1), aws_hash_string(test_string_2), "Hash values of byte buffers should be equal."); /* Test: write from string to byte cursor works. */ uint8_t dest[8] = {0}; struct aws_byte_buf dest_cur = aws_byte_buf_from_empty_array(dest, sizeof(dest)); ASSERT_TRUE( aws_byte_buf_write_from_whole_string(&dest_cur, test_string_2), "Write from whole string should have succeeded."); ASSERT_BIN_ARRAYS_EQUALS(dest, 8, "foofaraw", 8); /* Test: write from string fails cleanly when byte cursor too short. */ int8_t short_dest[7] = {0}; struct aws_byte_buf short_dest_buf = aws_byte_buf_from_empty_array(short_dest, sizeof(short_dest)); ASSERT_FALSE( aws_byte_buf_write_from_whole_string(&short_dest_buf, test_string_2), "Write from whole buffer should have failed."); ASSERT_INT_EQUALS(short_dest_buf.len, 0, "Destination cursor length should be unchanged."); ASSERT_INT_EQUALS(0, short_dest_buf.buffer[0], "Destination cursor should not have received data."); /* Test: can duplicate both a static string and an allocated one. */ struct aws_string *dup_string_1 = aws_string_new_from_string(allocator, test_string_1); ASSERT_NOT_NULL(dup_string_1, "Memory allocation of string should have succeeded."); ASSERT_TRUE(aws_string_eq(test_string_1, dup_string_1), "Strings should be equal."); struct aws_string *dup_string_2 = aws_string_new_from_string(allocator, test_string_2); ASSERT_NOT_NULL(dup_string_2, "Memory allocation of string should have succeeded."); ASSERT_TRUE(aws_string_eq(test_string_2, dup_string_2), "Strings should be equal."); /* Test: can clone_or_reuse both a static string and an allocated one. */ struct aws_string *clone_string_1 = aws_string_clone_or_reuse(allocator, test_string_1); ASSERT_NOT_NULL(clone_string_1, "Memory allocation of string should have succeeded."); ASSERT_TRUE(aws_string_eq(test_string_1, clone_string_1), "Strings should be equal."); ASSERT_TRUE(test_string_1 == clone_string_1, "Static strings should be reused"); struct aws_string *clone_string_2 = aws_string_clone_or_reuse(allocator, test_string_2); ASSERT_NOT_NULL(clone_string_2, "Memory allocation of string should have succeeded."); ASSERT_TRUE(aws_string_eq(test_string_2, clone_string_2), "Strings should be equal."); ASSERT_TRUE(test_string_2 != clone_string_2, "Dynamic strings should not be reused"); /* Test: all allocated memory is deallocated properly. */ aws_string_destroy(test_string_2); aws_string_destroy(dup_string_1); aws_string_destroy(dup_string_2); aws_string_destroy(clone_string_1); aws_string_destroy(clone_string_2); return 0; } AWS_TEST_CASE(binary_string_test, s_binary_string_test_fn); static int s_binary_string_test_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint8_t test_array[] = {0x86, 0x75, 0x30, 0x90, 0x00, 0xde, 0xad, 0xbe, 0xef}; size_t len = sizeof(test_array); struct aws_string *binary_string = aws_string_new_from_array(allocator, test_array, len); ASSERT_NOT_NULL(binary_string, "Memory allocation of string should have succeeded."); ASSERT_PTR_EQUALS(allocator, binary_string->allocator, "Allocator should have been set correctly."); ASSERT_BIN_ARRAYS_EQUALS( test_array, len, aws_string_bytes(binary_string), binary_string->len, "Binary string bytes should be same as source array."); ASSERT_INT_EQUALS( aws_string_bytes(binary_string)[binary_string->len], 0x00, "String from binary array should have null byte at end"); aws_string_destroy(binary_string); return 0; } AWS_TEST_CASE(string_compare_test, s_string_compare_test_fn); static int s_string_compare_test_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; AWS_STATIC_STRING_FROM_LITERAL(empty, ""); AWS_STATIC_STRING_FROM_LITERAL(foo, "foo"); AWS_STATIC_STRING_FROM_LITERAL(bar, "bar"); AWS_STATIC_STRING_FROM_LITERAL(foobar, "foobar"); AWS_STATIC_STRING_FROM_LITERAL(foo2, "foo"); AWS_STATIC_STRING_FROM_LITERAL(foobaz, "foobaz"); AWS_STATIC_STRING_FROM_LITERAL(bar_food, "bar food"); AWS_STATIC_STRING_FROM_LITERAL(bar_null_food, "bar\0food"); AWS_STATIC_STRING_FROM_LITERAL(bar_null_back, "bar\0back"); ASSERT_TRUE(aws_string_compare(empty, bar) < 0); ASSERT_TRUE(aws_string_compare(foo, bar) > 0); ASSERT_TRUE(aws_string_compare(bar, foo) < 0); ASSERT_TRUE(aws_string_compare(foo, foobar) < 0); ASSERT_TRUE(aws_string_compare(foo, foo2) == 0); ASSERT_TRUE(aws_string_compare(foobar, foobaz) < 0); ASSERT_TRUE(aws_string_compare(foobaz, empty) > 0); ASSERT_TRUE(aws_string_compare(empty, empty) == 0); ASSERT_TRUE(aws_string_compare(foo, bar_food) > 0); ASSERT_TRUE(aws_string_compare(bar_food, bar) > 0); ASSERT_TRUE(aws_string_compare(bar_null_food, bar) > 0); ASSERT_TRUE(aws_string_compare(bar_null_food, bar_food) < 0); ASSERT_TRUE(aws_string_compare(bar_null_food, bar_null_back) > 0); /* Test that bytes are being compared as unsigned integers. */ AWS_STATIC_STRING_FROM_LITERAL(x80, "\x80"); AWS_STATIC_STRING_FROM_LITERAL(x7f, "\x79"); ASSERT_TRUE(aws_string_compare(x80, x7f) > 0); ASSERT_TRUE(aws_string_compare(x7f, x80) < 0); return 0; } AWS_TEST_CASE(string_destroy_secure_test, string_destroy_secure_test_fn); static int string_destroy_secure_test_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; /* Just verifies all memory was freed. */ struct aws_string *empty = aws_string_new_from_c_str(allocator, ""); struct aws_string *logorrhea = aws_string_new_from_c_str(allocator, "logorrhea"); const uint8_t bytes[] = {0xde, 0xad, 0xbe, 0xef, 0x00, 0x86, 0x75, 0x30, 0x90}; struct aws_string *deadbeef = aws_string_new_from_array(allocator, bytes, sizeof(bytes)); ASSERT_NOT_NULL(empty, "Memory allocation of string should have succeeded."); ASSERT_NOT_NULL(logorrhea, "Memory allocation of string should have succeeded."); ASSERT_NOT_NULL(deadbeef, "Memory allocation of string should have succeeded."); aws_string_destroy_secure(empty); aws_string_destroy_secure(logorrhea); aws_string_destroy_secure(deadbeef); return 0; } static int secure_strlen_test_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; size_t str_len = 0; const char test_string[] = "HelloWorld!"; ASSERT_SUCCESS(aws_secure_strlen(test_string, sizeof(test_string), &str_len)); ASSERT_UINT_EQUALS(sizeof(test_string) - 1, str_len); ASSERT_ERROR(AWS_ERROR_INVALID_ARGUMENT, aws_secure_strlen(NULL, sizeof(test_string), &str_len)); ASSERT_ERROR(AWS_ERROR_INVALID_ARGUMENT, aws_secure_strlen(test_string, sizeof(test_string), NULL)); ASSERT_ERROR( AWS_ERROR_C_STRING_BUFFER_NOT_NULL_TERMINATED, aws_secure_strlen(test_string, sizeof(test_string) - 1, &str_len)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(secure_strlen_test, secure_strlen_test_fn) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/system_info_tests.c000066400000000000000000000150361456575232400256140ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include "logging/test_logger.h" #include static int s_test_cpu_count_at_least_works_superficially_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; size_t processor_count = aws_system_info_processor_count(); /* I think this is a fairly reasonable assumption given the circumstances * (you know this test is part of a program * that must be running on at least one core).... */ ASSERT_TRUE(processor_count > 0); return 0; } AWS_TEST_CASE(test_cpu_count_at_least_works_superficially, s_test_cpu_count_at_least_works_superficially_fn) #if defined(_WIN32) # include # define DIRSEP "\\" #else # define DIRSEP "/" #endif static int s_test_stack_trace_decoding(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_logger test_log; test_logger_init(&test_log, allocator, AWS_LL_TRACE, 0); aws_logger_set(&test_log); int line = 0; /* captured on line of aws_backtrace_log call to match call site */ (void)line; /* may not be used if debug info is unavailable */ aws_backtrace_log(AWS_LL_TRACE), (line = __LINE__); /* NOLINT */ struct test_logger_impl *log = test_log.p_impl; ASSERT_NOT_NULL(log); struct aws_byte_buf *buffer = &log->log_buffer; (void)buffer; #if defined(AWS_BACKTRACE_STACKS_AVAILABLE) && defined(DEBUG_BUILD) /* ensure that this file/function is found */ char *file = __FILE__; char *next = strstr(file, DIRSEP); /* strip path info, just filename will be found */ while (next) { file = next + 1; next = strstr(file, DIRSEP); } struct aws_byte_cursor null_term = aws_byte_cursor_from_array("", 1); aws_byte_buf_append_dynamic(buffer, &null_term); fprintf(stderr, "%s", (const char *)buffer->buffer); const char *func = __func__; if (func[0] == 's' && func[1] == '_') { func += 2; /* skip over s_ */ } ASSERT_NOT_NULL(strstr((const char *)buffer->buffer, func)); /* if this is not a debug build, there may not be symbols, so the test cannot * verify if a best effort was made */ if (strstr((const char *)buffer->buffer, file)) { /* check for the call site of aws_backtrace_print. Note that line numbers are off by one * in both directions depending on compiler, so we check a range around the call site __LINE__ * The line number can also be ? on old compilers */ char fileline[4096]; uint32_t found_file_line = 0; for (int lineno = line - 1; lineno <= line + 1; ++lineno) { snprintf(fileline, sizeof(fileline), "%s:%d", file, lineno); found_file_line = strstr((const char *)buffer->buffer, fileline) != NULL; if (found_file_line) { break; } } if (!found_file_line) { snprintf(fileline, sizeof(fileline), "%s:?", file); found_file_line = strstr((const char *)buffer->buffer, fileline) != NULL; } ASSERT_TRUE(found_file_line); } #endif aws_logger_clean_up(&test_log); return 0; } AWS_TEST_CASE(test_stack_trace_decoding, s_test_stack_trace_decoding); static int s_test_platform_build_os_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; enum aws_platform_os build_os = aws_get_platform_build_os(); #if defined(AWS_OS_APPLE) ASSERT_INT_EQUALS(build_os, AWS_PLATFORM_OS_MAC); #elif defined(_WIN32) ASSERT_INT_EQUALS(build_os, AWS_PLATFORM_OS_WINDOWS); #else ASSERT_INT_EQUALS(build_os, AWS_PLATFORM_OS_UNIX); #endif return 0; } AWS_TEST_CASE(test_platform_build_os, s_test_platform_build_os_fn) static int s_test_sanity_check_numa_discovery(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_common_library_init(allocator); size_t processor_count = aws_system_info_processor_count(); ASSERT_TRUE(processor_count > 0); uint16_t group_count = aws_get_cpu_group_count(); ASSERT_TRUE(group_count > 0); /* log for the test output since it's the only way I can verify on certain platforms this looks correct. */ AWS_LOGF_INFO(AWS_LS_COMMON_GENERAL, "found %d cpu groups", (int)group_count); size_t total_cpus_found_via_numa = 0; for (uint16_t i = 0; i < group_count; ++i) { size_t cpus_per_group = aws_get_cpu_count_for_group(i); AWS_LOGF_INFO( AWS_LS_COMMON_GENERAL, "found cpu count %d, which lives on group node %d", (int)cpus_per_group, (int)i); ASSERT_TRUE(cpus_per_group > 0); total_cpus_found_via_numa += cpus_per_group; struct aws_cpu_info *cpus_for_group = aws_mem_calloc(allocator, cpus_per_group, sizeof(struct aws_cpu_info)); ASSERT_NOT_NULL(cpus_per_group); aws_get_cpu_ids_for_group(i, cpus_for_group, cpus_per_group); /* make sure at least one is set */ bool at_least_one = false; for (size_t cpu_idx = 0; cpu_idx < cpus_per_group; ++cpu_idx) { AWS_LOGF_INFO( AWS_LS_COMMON_GENERAL, "found cpu_id %d, which lives on group node %d. Is it likely a hyper-thread ? %s", (int)cpus_for_group[cpu_idx].cpu_id, (int)i, cpus_for_group[cpu_idx].suspected_hyper_thread ? "Yes" : "No"); if (cpus_for_group[cpu_idx].cpu_id >= 0) { at_least_one = true; } } ASSERT_TRUE(at_least_one); aws_mem_release(allocator, cpus_for_group); } ASSERT_TRUE(total_cpus_found_via_numa <= processor_count); aws_common_library_clean_up(); return 0; } AWS_TEST_CASE(test_sanity_check_numa_discovery, s_test_sanity_check_numa_discovery) static int s_test_sanity_check_environment_loader(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_common_library_init(allocator); struct aws_system_environment *env = aws_system_environment_load(allocator); ASSERT_NOT_NULL(env); struct aws_byte_cursor virt_vendor = aws_system_environment_get_virtualization_vendor(env); ASSERT_TRUE(aws_byte_cursor_is_valid(&virt_vendor)); struct aws_byte_cursor virt_product = aws_system_environment_get_virtualization_product_name(env); ASSERT_TRUE(aws_byte_cursor_is_valid(&virt_product)); aws_system_environment_release(env); aws_common_library_clean_up(); return 0; } AWS_TEST_CASE(test_sanity_check_environment_loader, s_test_sanity_check_environment_loader) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/system_resource_util_test.c000066400000000000000000000022241456575232400273550ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include static int s_test_memory_usage_maxrss(struct aws_allocator *allocator, void *ctx) { (void)ctx; /* * Note: mem usage apis currently rely on getrusage on posix systems. * On freebsd maxrss seems to return current process rss based on testing, * while on every other posix platform maxrss is high water mark for rss * over the program lifetime. * Workaround it by allocating a buffer first. Long term using procfs should * avoid the issue. */ struct aws_byte_buf temp; aws_byte_buf_init(&temp, allocator, 8 * 1024 * 1024); ASSERT_SUCCESS(aws_device_random_buffer(&temp)); struct aws_memory_usage_stats mu; ASSERT_SUCCESS(aws_init_memory_usage_for_current_process(&mu)); ASSERT_TRUE(mu.maxrss > 0); aws_byte_buf_clean_up_secure(&temp); return 0; } AWS_TEST_CASE(test_memory_usage_maxrss, s_test_memory_usage_maxrss) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/task_scheduler_test.c000066400000000000000000000357331456575232400261000ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include struct executed_task_data { struct aws_task *task; void *arg; enum aws_task_status status; }; static struct executed_task_data s_executed_tasks[16]; static size_t s_executed_tasks_n; /* Updates tl_executed_tasks and tl_executed_task_n when function is executed */ static void s_task_n_fn(struct aws_task *task, void *arg, enum aws_task_status status) { if (s_executed_tasks_n > AWS_ARRAY_SIZE(s_executed_tasks)) { AWS_ASSERT(0); } struct executed_task_data *data = &s_executed_tasks[s_executed_tasks_n++]; data->task = task; data->arg = arg; data->status = status; } static int s_test_scheduler_ordering(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_executed_tasks_n = 0; struct aws_task_scheduler scheduler; aws_task_scheduler_init(&scheduler, allocator); struct aws_task task2; aws_task_init(&task2, s_task_n_fn, (void *)2, "scheduler_ordering_1"); /* schedule 250 ns in the future. */ uint64_t task2_timestamp = 250; aws_task_scheduler_schedule_future(&scheduler, &task2, task2_timestamp); struct aws_task task1; aws_task_init(&task1, s_task_n_fn, (void *)1, "scheduler_ordering_2"); /* schedule now. */ aws_task_scheduler_schedule_now(&scheduler, &task1); struct aws_task task3; aws_task_init(&task3, s_task_n_fn, (void *)3, "scheduler_ordering_3"); /* schedule 500 ns in the future. */ uint64_t task3_timestamp = 500; aws_task_scheduler_schedule_future(&scheduler, &task3, task3_timestamp); /* run tasks 1 and 2 (but not 3) */ aws_task_scheduler_run_all(&scheduler, task2_timestamp); ASSERT_UINT_EQUALS(2, s_executed_tasks_n); struct executed_task_data *task_data = &s_executed_tasks[0]; ASSERT_PTR_EQUALS(&task1, task_data->task); ASSERT_PTR_EQUALS(task1.arg, task_data->arg); ASSERT_INT_EQUALS(AWS_TASK_STATUS_RUN_READY, task_data->status); task_data = &s_executed_tasks[1]; ASSERT_PTR_EQUALS(&task2, task_data->task); ASSERT_PTR_EQUALS(task2.arg, task_data->arg); ASSERT_INT_EQUALS(AWS_TASK_STATUS_RUN_READY, task_data->status); /* run task 3 */ aws_task_scheduler_run_all(&scheduler, task3.timestamp); ASSERT_UINT_EQUALS(3, s_executed_tasks_n); task_data = &s_executed_tasks[2]; ASSERT_PTR_EQUALS(&task3, task_data->task); ASSERT_PTR_EQUALS(task3.arg, task_data->arg); ASSERT_INT_EQUALS(AWS_TASK_STATUS_RUN_READY, task_data->status); aws_task_scheduler_clean_up(&scheduler); return 0; } static void s_null_fn(struct aws_task *task, void *arg, enum aws_task_status status) { (void)task; (void)arg; (void)status; } static int s_test_scheduler_has_tasks(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_task_scheduler scheduler; aws_task_scheduler_init(&scheduler, allocator); /* Check when no tasks scheduled */ uint64_t next_task_time = 123456; ASSERT_FALSE(aws_task_scheduler_has_tasks(&scheduler, &next_task_time)); ASSERT_UINT_EQUALS(UINT64_MAX, next_task_time); /* Check when a task is scheduled for the future */ struct aws_task timed_task; aws_task_init(&timed_task, s_null_fn, (void *)1, "scheduler_has_tasks_1"); aws_task_scheduler_schedule_future(&scheduler, &timed_task, 10); ASSERT_TRUE(aws_task_scheduler_has_tasks(&scheduler, &next_task_time)); ASSERT_UINT_EQUALS(10, next_task_time); /* Check when a task is scheduled for now */ struct aws_task now_task; aws_task_init(&now_task, s_null_fn, (void *)2, "scheduler_has_tasks_2"); aws_task_scheduler_schedule_now(&scheduler, &now_task); ASSERT_TRUE(aws_task_scheduler_has_tasks(&scheduler, &next_task_time)); ASSERT_UINT_EQUALS(0, next_task_time); aws_task_scheduler_clean_up(&scheduler); return 0; } static int s_test_scheduler_pops_task_fashionably_late(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_executed_tasks_n = 0; struct aws_task_scheduler scheduler; aws_task_scheduler_init(&scheduler, allocator); struct aws_task task; aws_task_init(&task, s_task_n_fn, (void *)0, "scheduler_pops_task_fashionably_late"); aws_task_scheduler_schedule_future(&scheduler, &task, 10); /* Run scheduler before task is supposed to execute, check that it didn't execute */ aws_task_scheduler_run_all(&scheduler, 5); ASSERT_UINT_EQUALS(0, s_executed_tasks_n); /* Run scheduler long after task was due to execute, check that it executed */ aws_task_scheduler_run_all(&scheduler, 500); ASSERT_UINT_EQUALS(1, s_executed_tasks_n); struct executed_task_data *task_data = &s_executed_tasks[0]; ASSERT_PTR_EQUALS(&task, task_data->task); ASSERT_PTR_EQUALS(task.arg, task_data->arg); ASSERT_INT_EQUALS(AWS_TASK_STATUS_RUN_READY, task_data->status); aws_task_scheduler_clean_up(&scheduler); return 0; } /* container for running a task that schedules another task when it executes */ struct task_scheduler_reentrancy_args { struct aws_task_scheduler *scheduler; struct aws_task task; bool executed; enum aws_task_status status; struct task_scheduler_reentrancy_args *next_task_args; }; static void s_reentrancy_fn(struct aws_task *task, void *arg, enum aws_task_status status) { (void)task; struct task_scheduler_reentrancy_args *reentrancy_args = (struct task_scheduler_reentrancy_args *)arg; if (reentrancy_args->next_task_args) { aws_task_scheduler_schedule_now(reentrancy_args->scheduler, &reentrancy_args->next_task_args->task); } reentrancy_args->executed = 1; reentrancy_args->status = status; } static void s_reentrancy_args_init( struct task_scheduler_reentrancy_args *args, struct aws_task_scheduler *scheduler, struct task_scheduler_reentrancy_args *next_task_args) { AWS_ZERO_STRUCT(*args); args->scheduler = scheduler; aws_task_init(&args->task, s_reentrancy_fn, args, "scheduler_reentrancy"); args->status = -1; args->next_task_args = next_task_args; } static int s_test_scheduler_reentrant_safe(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_task_scheduler scheduler; aws_task_scheduler_init(&scheduler, allocator); /* When task1 executes, it schedules task2 */ struct task_scheduler_reentrancy_args task2_args; s_reentrancy_args_init(&task2_args, &scheduler, NULL); struct task_scheduler_reentrancy_args task1_args; s_reentrancy_args_init(&task1_args, &scheduler, &task2_args); aws_task_scheduler_schedule_now(&scheduler, &task1_args.task); /* Run, only task1 should have executed */ aws_task_scheduler_run_all(&scheduler, 100); ASSERT_TRUE(task1_args.executed); ASSERT_INT_EQUALS(AWS_TASK_STATUS_RUN_READY, task1_args.status); ASSERT_FALSE(task2_args.executed); /* Run again, task2 should execute */ aws_task_scheduler_run_all(&scheduler, 200); ASSERT_TRUE(task2_args.executed); ASSERT_INT_EQUALS(AWS_TASK_STATUS_RUN_READY, task2_args.status); aws_task_scheduler_clean_up(&scheduler); return 0; } struct cancellation_args { enum aws_task_status status; }; static void s_cancellation_fn(struct aws_task *task, void *arg, enum aws_task_status status) { (void)task; struct cancellation_args *cancellation_args = (struct cancellation_args *)arg; cancellation_args->status = status; } static int s_test_scheduler_cleanup_cancellation(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_task_scheduler scheduler; aws_task_scheduler_init(&scheduler, allocator); struct cancellation_args now_task_args = {.status = 100000}; struct aws_task now_task; aws_task_init(&now_task, s_cancellation_fn, &now_task_args, "scheduler_cleanup_cancellation_1"); aws_task_scheduler_schedule_now(&scheduler, &now_task); struct cancellation_args future_task_args = {.status = 100000}; struct aws_task future_task; aws_task_init(&future_task, s_cancellation_fn, &future_task_args, "scheduler_cleanup_cancellation_2"); aws_task_scheduler_schedule_future(&scheduler, &future_task, 9999999999999); aws_task_scheduler_clean_up(&scheduler); ASSERT_INT_EQUALS(AWS_TASK_STATUS_CANCELED, now_task_args.status); ASSERT_INT_EQUALS(AWS_TASK_STATUS_CANCELED, future_task_args.status); return 0; } static int s_test_scheduler_cleanup_reentrants(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_task_scheduler scheduler; aws_task_scheduler_init(&scheduler, allocator); /* When now_task1 executes, it schedules now_task2 */ struct task_scheduler_reentrancy_args now_task2_args; s_reentrancy_args_init(&now_task2_args, &scheduler, NULL); struct task_scheduler_reentrancy_args now_task1_args; s_reentrancy_args_init(&now_task1_args, &scheduler, &now_task2_args); aws_task_scheduler_schedule_now(&scheduler, &now_task1_args.task); /* When future_task1 executes, it schedules future_task2 */ struct task_scheduler_reentrancy_args future_task2_args; s_reentrancy_args_init(&future_task2_args, &scheduler, NULL); struct task_scheduler_reentrancy_args future_task1_args; s_reentrancy_args_init(&future_task1_args, &scheduler, &future_task2_args); aws_task_scheduler_schedule_future(&scheduler, &future_task1_args.task, 555555555555555555); /* Clean up scheduler. All tasks should be executed with CANCELLED status */ aws_task_scheduler_clean_up(&scheduler); ASSERT_INT_EQUALS(AWS_TASK_STATUS_CANCELED, now_task1_args.status); ASSERT_INT_EQUALS(AWS_TASK_STATUS_CANCELED, now_task2_args.status); ASSERT_INT_EQUALS(AWS_TASK_STATUS_CANCELED, future_task1_args.status); ASSERT_INT_EQUALS(AWS_TASK_STATUS_CANCELED, future_task2_args.status); return AWS_OP_SUCCESS; } struct task_cancelling_task_data { struct aws_task_scheduler *scheduler; struct aws_task *task_to_cancel; }; static void s_task_cancelling_task(struct aws_task *task, void *arg, enum aws_task_status status) { s_task_n_fn(task, arg, status); struct task_cancelling_task_data *task_data = arg; aws_task_scheduler_cancel_task(task_data->scheduler, task_data->task_to_cancel); } static int s_test_scheduler_schedule_cancellation(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_executed_tasks_n = 0; struct aws_task_scheduler scheduler; aws_task_scheduler_init(&scheduler, allocator); struct aws_task task2; aws_task_init(&task2, s_task_n_fn, (void *)2, "scheduler_schedule_cancellation1"); /* schedule 250 ns in the future. */ uint64_t task2_timestamp = 250; aws_task_scheduler_schedule_future(&scheduler, &task2, task2_timestamp); struct aws_task task1; aws_task_init(&task1, s_task_n_fn, (void *)1, "scheduler_schedule_cancellation2"); /* schedule now. */ aws_task_scheduler_schedule_now(&scheduler, &task1); struct aws_task task5; aws_task_init(&task5, s_task_n_fn, (void *)3, "scheduler_schedule_cancellation5"); /* schedule 500 ns in the future. */ uint64_t task5_timestamp = 500; aws_task_scheduler_schedule_future(&scheduler, &task5, task5_timestamp); struct aws_task task4; aws_task_init(&task4, s_task_n_fn, (void *)5, "scheduler_schedule_cancellation4"); struct task_cancelling_task_data task_cancel_data = { .scheduler = &scheduler, .task_to_cancel = &task4, }; struct aws_task task3; aws_task_init(&task3, s_task_cancelling_task, &task_cancel_data, "scheduler_schedule_cancellation3"); aws_task_scheduler_schedule_now(&scheduler, &task3); aws_task_scheduler_schedule_now(&scheduler, &task4); aws_task_scheduler_cancel_task(&scheduler, &task1); aws_task_scheduler_cancel_task(&scheduler, &task2); aws_task_scheduler_run_all(&scheduler, task5_timestamp); ASSERT_UINT_EQUALS(5, s_executed_tasks_n); struct executed_task_data *task_data = &s_executed_tasks[0]; ASSERT_PTR_EQUALS(&task1, task_data->task); ASSERT_PTR_EQUALS(task1.arg, task_data->arg); ASSERT_INT_EQUALS(AWS_TASK_STATUS_CANCELED, task_data->status); task_data = &s_executed_tasks[1]; ASSERT_PTR_EQUALS(&task2, task_data->task); ASSERT_PTR_EQUALS(task2.arg, task_data->arg); ASSERT_INT_EQUALS(AWS_TASK_STATUS_CANCELED, task_data->status); task_data = &s_executed_tasks[2]; ASSERT_PTR_EQUALS(&task3, task_data->task); ASSERT_PTR_EQUALS(task3.arg, task_data->arg); ASSERT_INT_EQUALS(AWS_TASK_STATUS_RUN_READY, task_data->status); task_data = &s_executed_tasks[3]; ASSERT_PTR_EQUALS(&task4, task_data->task); ASSERT_PTR_EQUALS(task4.arg, task_data->arg); ASSERT_INT_EQUALS(AWS_TASK_STATUS_CANCELED, task_data->status); task_data = &s_executed_tasks[4]; ASSERT_PTR_EQUALS(&task5, task_data->task); ASSERT_PTR_EQUALS(task5.arg, task_data->arg); ASSERT_INT_EQUALS(AWS_TASK_STATUS_RUN_READY, task_data->status); aws_task_scheduler_clean_up(&scheduler); return 0; } static int s_test_scheduler_cleanup_idempotent(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_task_scheduler scheduler; ASSERT_SUCCESS(aws_task_scheduler_init(&scheduler, allocator)); aws_task_scheduler_clean_up(&scheduler); aws_task_scheduler_clean_up(&scheduler); return 0; } static void s_delete_myself_fn(struct aws_task *task, void *arg, enum aws_task_status status) { (void)status; struct aws_allocator *allocator = arg; aws_mem_release(allocator, task); } static int s_test_scheduler_task_delete_on_run(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_task_scheduler scheduler; aws_task_scheduler_init(&scheduler, allocator); /* Check when no tasks scheduled */ uint64_t task_time = 10; /* Check when a task is scheduled for the future */ struct aws_task *self_deleting_task = aws_mem_calloc(allocator, 1, sizeof(struct aws_task)); aws_task_init(self_deleting_task, s_delete_myself_fn, allocator, "self_deleting_task"); aws_task_scheduler_schedule_future(&scheduler, self_deleting_task, task_time); ASSERT_TRUE(aws_task_scheduler_has_tasks(&scheduler, &task_time)); aws_task_scheduler_run_all(&scheduler, task_time); aws_task_scheduler_clean_up(&scheduler); return 0; } AWS_TEST_CASE(scheduler_pops_task_late_test, s_test_scheduler_pops_task_fashionably_late); AWS_TEST_CASE(scheduler_ordering_test, s_test_scheduler_ordering); AWS_TEST_CASE(scheduler_has_tasks_test, s_test_scheduler_has_tasks); AWS_TEST_CASE(scheduler_reentrant_safe, s_test_scheduler_reentrant_safe); AWS_TEST_CASE(scheduler_cleanup_cancellation, s_test_scheduler_cleanup_cancellation); AWS_TEST_CASE(scheduler_cleanup_reentrants, s_test_scheduler_cleanup_reentrants); AWS_TEST_CASE(scheduler_schedule_cancellation, s_test_scheduler_schedule_cancellation); AWS_TEST_CASE(scheduler_cleanup_idempotent, s_test_scheduler_cleanup_idempotent); AWS_TEST_CASE(scheduler_task_delete_on_run, s_test_scheduler_task_delete_on_run); aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/thread_scheduler_test.c000066400000000000000000000212361456575232400263760ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include struct executed_task_data { struct aws_task *task; void *arg; enum aws_task_status status; }; static struct executed_task_data s_executed_tasks[16]; static struct aws_mutex s_test_mutex = AWS_MUTEX_INIT; static struct aws_condition_variable s_test_c_var = AWS_CONDITION_VARIABLE_INIT; static size_t s_executed_tasks_n; /* Updates tl_executed_tasks and tl_executed_task_n when function is executed */ static void s_task_n_fn(struct aws_task *task, void *arg, enum aws_task_status status) { AWS_LOGF_INFO(AWS_LS_COMMON_GENERAL, "Invoking task"); aws_mutex_lock(&s_test_mutex); AWS_LOGF_INFO(AWS_LS_COMMON_GENERAL, "Mutex Acquired"); if (s_executed_tasks_n > AWS_ARRAY_SIZE(s_executed_tasks)) { AWS_ASSERT(0); } struct executed_task_data *data = &s_executed_tasks[s_executed_tasks_n++]; data->task = task; data->arg = arg; data->status = status; aws_mutex_unlock(&s_test_mutex); AWS_LOGF_INFO(AWS_LS_COMMON_GENERAL, "Mutex Released, notifying"); aws_condition_variable_notify_one(&s_test_c_var); } static bool s_scheduled_tasks_ran_predicate(void *arg) { size_t *waiting_for = arg; return *waiting_for == s_executed_tasks_n; } static int s_test_scheduler_ordering(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_common_library_init(allocator); s_executed_tasks_n = 0; struct aws_thread_scheduler *thread_scheduler = aws_thread_scheduler_new(allocator, NULL); ASSERT_NOT_NULL(thread_scheduler); struct aws_task task2; aws_task_init(&task2, s_task_n_fn, (void *)2, "scheduler_ordering_1"); /* schedule 250 ms in the future. */ uint64_t task2_timestamp = 0; aws_high_res_clock_get_ticks(&task2_timestamp); task2_timestamp += 250000000; aws_thread_scheduler_schedule_future(thread_scheduler, &task2, task2_timestamp); struct aws_task task1; aws_task_init(&task1, s_task_n_fn, (void *)1, "scheduler_ordering_2"); /* schedule now. */ aws_thread_scheduler_schedule_now(thread_scheduler, &task1); struct aws_task task3; aws_task_init(&task3, s_task_n_fn, (void *)3, "scheduler_ordering_3"); /* schedule 500 ms in the future. */ uint64_t task3_timestamp = 0; aws_high_res_clock_get_ticks(&task3_timestamp); task3_timestamp += 500000000; aws_thread_scheduler_schedule_future(thread_scheduler, &task3, task3_timestamp); ASSERT_SUCCESS(aws_mutex_lock(&s_test_mutex)); size_t expected_runs = 2; ASSERT_SUCCESS(aws_condition_variable_wait_pred( &s_test_c_var, &s_test_mutex, s_scheduled_tasks_ran_predicate, &expected_runs)); ASSERT_UINT_EQUALS(2, s_executed_tasks_n); struct executed_task_data *task_data = &s_executed_tasks[0]; ASSERT_PTR_EQUALS(&task1, task_data->task); ASSERT_PTR_EQUALS(task1.arg, task_data->arg); ASSERT_INT_EQUALS(AWS_TASK_STATUS_RUN_READY, task_data->status); task_data = &s_executed_tasks[1]; ASSERT_PTR_EQUALS(&task2, task_data->task); ASSERT_PTR_EQUALS(task2.arg, task_data->arg); ASSERT_INT_EQUALS(AWS_TASK_STATUS_RUN_READY, task_data->status); expected_runs = 3; ASSERT_SUCCESS(aws_condition_variable_wait_pred( &s_test_c_var, &s_test_mutex, s_scheduled_tasks_ran_predicate, &expected_runs)); ASSERT_SUCCESS(aws_mutex_unlock(&s_test_mutex)); /* run task 3 */ ASSERT_UINT_EQUALS(3, s_executed_tasks_n); task_data = &s_executed_tasks[2]; ASSERT_PTR_EQUALS(&task3, task_data->task); ASSERT_PTR_EQUALS(task3.arg, task_data->arg); ASSERT_INT_EQUALS(AWS_TASK_STATUS_RUN_READY, task_data->status); aws_thread_scheduler_release(thread_scheduler); aws_common_library_clean_up(); return 0; } AWS_TEST_CASE(test_thread_scheduler_ordering, s_test_scheduler_ordering) static int s_test_scheduler_happy_path_cancellation(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_common_library_init(allocator); s_executed_tasks_n = 0; struct aws_thread_scheduler *thread_scheduler = aws_thread_scheduler_new(allocator, NULL); ASSERT_NOT_NULL(thread_scheduler); struct aws_task task2; aws_task_init(&task2, s_task_n_fn, (void *)2, "scheduler_ordering_1"); /* schedule 250 ms in the future. */ uint64_t task2_timestamp = 0; aws_high_res_clock_get_ticks(&task2_timestamp); task2_timestamp += 250000000; aws_thread_scheduler_schedule_future(thread_scheduler, &task2, task2_timestamp); struct aws_task task1; aws_task_init(&task1, s_task_n_fn, (void *)1, "scheduler_ordering_2"); /* schedule now. */ aws_thread_scheduler_schedule_now(thread_scheduler, &task1); struct aws_task task3; aws_task_init(&task3, s_task_n_fn, (void *)3, "scheduler_ordering_3"); /* schedule 500 ms in the future. */ uint64_t task3_timestamp = 0; aws_high_res_clock_get_ticks(&task3_timestamp); task3_timestamp += 500000000; aws_thread_scheduler_schedule_future(thread_scheduler, &task3, task3_timestamp); ASSERT_SUCCESS(aws_mutex_lock(&s_test_mutex)); size_t expected_runs = 2; ASSERT_SUCCESS(aws_condition_variable_wait_pred( &s_test_c_var, &s_test_mutex, s_scheduled_tasks_ran_predicate, &expected_runs)); ASSERT_UINT_EQUALS(2, s_executed_tasks_n); struct executed_task_data *task_data = &s_executed_tasks[0]; ASSERT_PTR_EQUALS(&task1, task_data->task); ASSERT_PTR_EQUALS(task1.arg, task_data->arg); ASSERT_INT_EQUALS(AWS_TASK_STATUS_RUN_READY, task_data->status); task_data = &s_executed_tasks[1]; ASSERT_PTR_EQUALS(&task2, task_data->task); ASSERT_PTR_EQUALS(task2.arg, task_data->arg); ASSERT_INT_EQUALS(AWS_TASK_STATUS_RUN_READY, task_data->status); aws_thread_scheduler_cancel_task(thread_scheduler, &task3); expected_runs = 3; ASSERT_SUCCESS(aws_condition_variable_wait_pred( &s_test_c_var, &s_test_mutex, s_scheduled_tasks_ran_predicate, &expected_runs)); ASSERT_SUCCESS(aws_mutex_unlock(&s_test_mutex)); /* run task 3 */ ASSERT_UINT_EQUALS(3, s_executed_tasks_n); task_data = &s_executed_tasks[2]; ASSERT_PTR_EQUALS(&task3, task_data->task); ASSERT_PTR_EQUALS(task3.arg, task_data->arg); ASSERT_INT_EQUALS(AWS_TASK_STATUS_CANCELED, task_data->status); aws_thread_scheduler_release(thread_scheduler); aws_common_library_clean_up(); return 0; } AWS_TEST_CASE(test_thread_scheduler_happy_path_cancellation, s_test_scheduler_happy_path_cancellation) static struct aws_task s_cancel_task; static void s_schedule_and_cancel_task(struct aws_task *task, void *arg, enum aws_task_status status) { struct aws_thread_scheduler *scheduler = arg; aws_task_init(&s_cancel_task, s_task_n_fn, (void *)2, "scheduler_ordering_2"); aws_thread_scheduler_schedule_now(scheduler, &s_cancel_task); aws_thread_scheduler_cancel_task(scheduler, &s_cancel_task); s_task_n_fn(task, arg, status); } /* schedule a task. Inside that task schedule and then immediately cancel it. This will exercise the pending to be * scheduled code path. */ static int s_test_scheduler_cancellation_for_pending_scheduled_task(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_common_library_init(allocator); s_executed_tasks_n = 0; struct aws_thread_scheduler *thread_scheduler = aws_thread_scheduler_new(allocator, NULL); ASSERT_NOT_NULL(thread_scheduler); struct aws_task task1; aws_task_init(&task1, s_schedule_and_cancel_task, thread_scheduler, "scheduler_ordering_1"); aws_thread_scheduler_schedule_now(thread_scheduler, &task1); ASSERT_SUCCESS(aws_mutex_lock(&s_test_mutex)); size_t expected_runs = 2; ASSERT_SUCCESS(aws_condition_variable_wait_pred( &s_test_c_var, &s_test_mutex, s_scheduled_tasks_ran_predicate, &expected_runs)); ASSERT_SUCCESS(aws_mutex_unlock(&s_test_mutex)); ASSERT_UINT_EQUALS(2, s_executed_tasks_n); struct executed_task_data *task_data = &s_executed_tasks[0]; ASSERT_PTR_EQUALS(&task1, task_data->task); ASSERT_PTR_EQUALS(task1.arg, task_data->arg); ASSERT_INT_EQUALS(AWS_TASK_STATUS_RUN_READY, task_data->status); task_data = &s_executed_tasks[1]; ASSERT_PTR_EQUALS(&s_cancel_task, task_data->task); ASSERT_INT_EQUALS(AWS_TASK_STATUS_CANCELED, task_data->status); aws_thread_scheduler_release(thread_scheduler); aws_common_library_clean_up(); return 0; } AWS_TEST_CASE( test_scheduler_cancellation_for_pending_scheduled_task, s_test_scheduler_cancellation_for_pending_scheduled_task) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/thread_test.c000066400000000000000000000166361456575232400243500ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include struct thread_test_data { struct aws_allocator *allocator; aws_thread_id_t thread_id; struct aws_string *thread_name; int get_thread_name_error; }; static void s_thread_fn(void *arg) { struct thread_test_data *test_data = (struct thread_test_data *)arg; test_data->thread_id = aws_thread_current_thread_id(); test_data->get_thread_name_error = AWS_OP_SUCCESS; if (aws_thread_name(test_data->allocator, test_data->thread_id, &test_data->thread_name)) { test_data->get_thread_name_error = aws_last_error(); } } static int s_test_thread_creation_join_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_common_library_init(allocator); struct thread_test_data test_data = {.allocator = allocator}; struct aws_thread thread; aws_thread_init(&thread, allocator); struct aws_thread_options thread_options = *aws_default_thread_options(); /* there should be at least 1 cpu on any machine running this test. Just bind that to make sure that code * path is exercised. */ thread_options.cpu_id = 0; /* Exercise the thread naming code path */ thread_options.name = aws_byte_cursor_from_c_str("MyThreadName"); ASSERT_SUCCESS( aws_thread_launch(&thread, s_thread_fn, (void *)&test_data, &thread_options), "thread creation failed"); ASSERT_INT_EQUALS( AWS_THREAD_JOINABLE, aws_thread_get_detach_state(&thread), "thread state should have returned JOINABLE"); ASSERT_SUCCESS(aws_thread_join(&thread), "thread join failed"); ASSERT_TRUE( aws_thread_thread_id_equal(test_data.thread_id, aws_thread_get_id(&thread)), "get_thread_id should have returned the same id as the thread calling current_thread_id"); ASSERT_INT_EQUALS( AWS_THREAD_JOIN_COMPLETED, aws_thread_get_detach_state(&thread), "thread state should have returned JOIN_COMPLETED"); if (AWS_OP_SUCCESS == test_data.get_thread_name_error) { ASSERT_CURSOR_VALUE_STRING_EQUALS( aws_byte_cursor_from_c_str("MyThreadName"), test_data.thread_name, "thread name equals"); } else { ASSERT_INT_EQUALS(test_data.get_thread_name_error, AWS_ERROR_PLATFORM_NOT_SUPPORTED); } aws_string_destroy(test_data.thread_name); aws_thread_clean_up(&thread); aws_common_library_clean_up(); return 0; } AWS_TEST_CASE(thread_creation_join_test, s_test_thread_creation_join_fn) static uint32_t s_atexit_call_count = 0; static void s_thread_atexit_fn(void *user_data) { (void)user_data; AWS_FATAL_ASSERT(s_atexit_call_count == 0); s_atexit_call_count = 1; } static void s_thread_atexit_fn2(void *user_data) { (void)user_data; AWS_FATAL_ASSERT(s_atexit_call_count == 1); s_atexit_call_count = 2; } static void s_thread_worker_with_atexit(void *arg) { (void)arg; AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_thread_current_at_exit(s_thread_atexit_fn2, NULL)); AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_thread_current_at_exit(s_thread_atexit_fn, NULL)); } static int s_test_thread_atexit(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_thread thread; ASSERT_SUCCESS(aws_thread_init(&thread, allocator)); ASSERT_SUCCESS(aws_thread_launch(&thread, s_thread_worker_with_atexit, NULL, 0), "thread creation failed"); ASSERT_SUCCESS(aws_thread_join(&thread), "thread join failed"); ASSERT_INT_EQUALS(2, s_atexit_call_count); aws_thread_clean_up(&thread); return 0; } AWS_TEST_CASE(thread_atexit_test, s_test_thread_atexit) struct managed_thread_test_data { uint64_t sleep_time_in_ns; }; static void s_managed_thread_fn(void *arg) { struct managed_thread_test_data *test_data = (struct managed_thread_test_data *)arg; aws_thread_current_sleep(test_data->sleep_time_in_ns); } #define MAX_MANAGED_THREAD_TEST_QUANTITY 16 static int s_do_managed_thread_join_test(struct aws_allocator *allocator, size_t thread_count) { struct aws_thread threads[MAX_MANAGED_THREAD_TEST_QUANTITY]; struct managed_thread_test_data thread_data[MAX_MANAGED_THREAD_TEST_QUANTITY]; AWS_FATAL_ASSERT(thread_count <= MAX_MANAGED_THREAD_TEST_QUANTITY); for (size_t i = 0; i < thread_count; ++i) { thread_data[i].sleep_time_in_ns = aws_timestamp_convert(100 * (i / 2), AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL); aws_thread_init(&threads[i], allocator); } struct aws_thread_options thread_options = *aws_default_thread_options(); thread_options.join_strategy = AWS_TJS_MANAGED; for (size_t i = 0; i < thread_count; ++i) { ASSERT_SUCCESS( aws_thread_launch(&threads[i], s_managed_thread_fn, (void *)&thread_data[i], &thread_options), "thread creation failed"); ASSERT_INT_EQUALS( AWS_THREAD_MANAGED, aws_thread_get_detach_state(&threads[i]), "thread state should have returned JOINABLE"); } aws_thread_join_all_managed(); return AWS_OP_SUCCESS; } static int s_test_managed_thread_join(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_common_library_init(allocator); for (size_t i = 1; i <= MAX_MANAGED_THREAD_TEST_QUANTITY; ++i) { ASSERT_SUCCESS(s_do_managed_thread_join_test(allocator, i)); } aws_common_library_clean_up(); return 0; } AWS_TEST_CASE(test_managed_thread_join, s_test_managed_thread_join) /* * Because this is unmocked time, this is technically not a purely deterministic test, but we set the time values * to extreme enough values that it should absurdly unlikely that an internal OS/CPU hiccup causes this test to fail. */ static int s_test_managed_thread_join_timeout(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_common_library_init(allocator); /* * Add a short timeout to managed thread join */ aws_thread_set_managed_join_timeout_ns(aws_timestamp_convert(1, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL)); /* * Spawn a managed thread that sleeps for significantly longer. */ struct managed_thread_test_data thread_data; AWS_ZERO_STRUCT(thread_data); thread_data.sleep_time_in_ns = aws_timestamp_convert(3, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL); struct aws_thread thread; AWS_ZERO_STRUCT(thread); aws_thread_init(&thread, allocator); struct aws_thread_options thread_options = *aws_default_thread_options(); thread_options.join_strategy = AWS_TJS_MANAGED; ASSERT_SUCCESS( aws_thread_launch(&thread, s_managed_thread_fn, (void *)&thread_data, &thread_options), "thread creation failed"); ASSERT_TRUE(aws_thread_get_managed_thread_count() == 1); /* * Do a managed thread join, it should timeout */ aws_thread_join_all_managed(); /* * Check that the managed thread is still running */ ASSERT_TRUE(aws_thread_get_managed_thread_count() == 1); /* * Increase the timeout and shut down */ aws_thread_set_managed_join_timeout_ns(aws_timestamp_convert(5, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL)); aws_common_library_clean_up(); ASSERT_TRUE(aws_thread_get_managed_thread_count() == 0); return 0; } AWS_TEST_CASE(test_managed_thread_join_timeout, s_test_managed_thread_join_timeout) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/uri_test.c000066400000000000000000001173601456575232400236740ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include static int s_test_uri_full_parse(struct aws_allocator *allocator, void *ctx) { (void)ctx; const char *str_uri = "https://some_user:some_password@www.test.com:8443/path/to/" "resource?test1=value1&test%20space=value%20space&test2=value2&test2=value3"; struct aws_byte_cursor uri_csr = aws_byte_cursor_from_c_str(str_uri); struct aws_uri uri; ASSERT_SUCCESS(aws_uri_init_parse(&uri, allocator, &uri_csr)); struct aws_byte_cursor expected_scheme = aws_byte_cursor_from_c_str("https"); ASSERT_BIN_ARRAYS_EQUALS(expected_scheme.ptr, expected_scheme.len, uri.scheme.ptr, uri.scheme.len); struct aws_byte_cursor expected_authority = aws_byte_cursor_from_c_str("some_user:some_password@www.test.com:8443"); ASSERT_BIN_ARRAYS_EQUALS(expected_authority.ptr, expected_authority.len, uri.authority.ptr, uri.authority.len); struct aws_byte_cursor expected_userinfo = aws_byte_cursor_from_c_str("some_user:some_password"); ASSERT_BIN_ARRAYS_EQUALS(expected_userinfo.ptr, expected_userinfo.len, uri.userinfo.ptr, uri.userinfo.len); struct aws_byte_cursor expected_user = aws_byte_cursor_from_c_str("some_user"); ASSERT_BIN_ARRAYS_EQUALS(expected_user.ptr, expected_user.len, uri.user.ptr, uri.user.len); struct aws_byte_cursor expected_password = aws_byte_cursor_from_c_str("some_password"); ASSERT_BIN_ARRAYS_EQUALS(expected_password.ptr, expected_password.len, uri.password.ptr, uri.password.len); struct aws_byte_cursor expected_host = aws_byte_cursor_from_c_str("www.test.com"); ASSERT_BIN_ARRAYS_EQUALS(expected_host.ptr, expected_host.len, uri.host_name.ptr, uri.host_name.len); ASSERT_UINT_EQUALS(8443, uri.port); struct aws_byte_cursor expected_path = aws_byte_cursor_from_c_str("/path/to/resource"); ASSERT_BIN_ARRAYS_EQUALS(expected_path.ptr, expected_path.len, uri.path.ptr, uri.path.len); struct aws_byte_cursor expected_query_str = aws_byte_cursor_from_c_str("test1=value1&test%20space=value%20space&test2=value2&test2=value3"); ASSERT_BIN_ARRAYS_EQUALS( expected_query_str.ptr, expected_query_str.len, uri.query_string.ptr, uri.query_string.len); struct aws_byte_cursor expected_request_uri = aws_byte_cursor_from_c_str( "/path/to/resource?test1=value1&test%20space=value%20space&test2=value2&test2=value3"); ASSERT_BIN_ARRAYS_EQUALS( expected_request_uri.ptr, expected_request_uri.len, uri.path_and_query.ptr, uri.path_and_query.len); aws_uri_clean_up(&uri); return AWS_OP_SUCCESS; } AWS_TEST_CASE(uri_full_parse, s_test_uri_full_parse); static int s_test_uri_no_scheme_parse(struct aws_allocator *allocator, void *ctx) { (void)ctx; const char *str_uri = "www.test.com:8443/path/to/resource?test1=value1&test%20space=value%20space&test2=value2&test2=value3"; struct aws_byte_cursor uri_csr = aws_byte_cursor_from_c_str(str_uri); struct aws_uri uri; ASSERT_SUCCESS(aws_uri_init_parse(&uri, allocator, &uri_csr)); ASSERT_UINT_EQUALS(0U, uri.scheme.len); struct aws_byte_cursor expected_authority = aws_byte_cursor_from_c_str("www.test.com:8443"); ASSERT_BIN_ARRAYS_EQUALS(expected_authority.ptr, expected_authority.len, uri.authority.ptr, uri.authority.len); struct aws_byte_cursor expected_host = aws_byte_cursor_from_c_str("www.test.com"); ASSERT_BIN_ARRAYS_EQUALS(expected_host.ptr, expected_host.len, uri.host_name.ptr, uri.host_name.len); ASSERT_UINT_EQUALS(8443, uri.port); struct aws_byte_cursor expected_path = aws_byte_cursor_from_c_str("/path/to/resource"); ASSERT_BIN_ARRAYS_EQUALS(expected_path.ptr, expected_path.len, uri.path.ptr, uri.path.len); struct aws_byte_cursor expected_query_str = aws_byte_cursor_from_c_str("test1=value1&test%20space=value%20space&test2=value2&test2=value3"); ASSERT_BIN_ARRAYS_EQUALS( expected_query_str.ptr, expected_query_str.len, uri.query_string.ptr, uri.query_string.len); struct aws_byte_cursor expected_request_uri = aws_byte_cursor_from_c_str( "/path/to/resource?test1=value1&test%20space=value%20space&test2=value2&test2=value3"); ASSERT_BIN_ARRAYS_EQUALS( expected_request_uri.ptr, expected_request_uri.len, uri.path_and_query.ptr, uri.path_and_query.len); aws_uri_clean_up(&uri); return AWS_OP_SUCCESS; } AWS_TEST_CASE(uri_no_scheme_parse, s_test_uri_no_scheme_parse); static int s_test_uri_no_port_parse(struct aws_allocator *allocator, void *ctx) { (void)ctx; const char *str_uri = "https://www.test.com/path/to/resource?test1=value1&test%20space=value%20space&test2=value2&test2=value3"; struct aws_byte_cursor uri_csr = aws_byte_cursor_from_c_str(str_uri); struct aws_uri uri; ASSERT_SUCCESS(aws_uri_init_parse(&uri, allocator, &uri_csr)); struct aws_byte_cursor expected_scheme = aws_byte_cursor_from_c_str("https"); ASSERT_BIN_ARRAYS_EQUALS(expected_scheme.ptr, expected_scheme.len, uri.scheme.ptr, uri.scheme.len); struct aws_byte_cursor expected_authority = aws_byte_cursor_from_c_str("www.test.com"); ASSERT_BIN_ARRAYS_EQUALS(expected_authority.ptr, expected_authority.len, uri.authority.ptr, uri.authority.len); struct aws_byte_cursor expected_host = aws_byte_cursor_from_c_str("www.test.com"); ASSERT_BIN_ARRAYS_EQUALS(expected_host.ptr, expected_host.len, uri.host_name.ptr, uri.host_name.len); ASSERT_UINT_EQUALS(0, uri.port); struct aws_byte_cursor expected_path = aws_byte_cursor_from_c_str("/path/to/resource"); ASSERT_BIN_ARRAYS_EQUALS(expected_path.ptr, expected_path.len, uri.path.ptr, uri.path.len); struct aws_byte_cursor expected_query_str = aws_byte_cursor_from_c_str("test1=value1&test%20space=value%20space&test2=value2&test2=value3"); ASSERT_BIN_ARRAYS_EQUALS( expected_query_str.ptr, expected_query_str.len, uri.query_string.ptr, uri.query_string.len); struct aws_byte_cursor expected_request_uri = aws_byte_cursor_from_c_str( "/path/to/resource?test1=value1&test%20space=value%20space&test2=value2&test2=value3"); ASSERT_BIN_ARRAYS_EQUALS( expected_request_uri.ptr, expected_request_uri.len, uri.path_and_query.ptr, uri.path_and_query.len); aws_uri_clean_up(&uri); return AWS_OP_SUCCESS; } AWS_TEST_CASE(uri_no_port_parse, s_test_uri_no_port_parse); static int s_test_uri_no_path_parse(struct aws_allocator *allocator, void *ctx) { (void)ctx; const char *str_uri = "https://www.test.com:8443/?test1=value1&test%20space=value%20space&test2=value2&test2=value3"; struct aws_byte_cursor uri_csr = aws_byte_cursor_from_c_str(str_uri); struct aws_uri uri; ASSERT_SUCCESS(aws_uri_init_parse(&uri, allocator, &uri_csr)); struct aws_byte_cursor expected_scheme = aws_byte_cursor_from_c_str("https"); ASSERT_BIN_ARRAYS_EQUALS(expected_scheme.ptr, expected_scheme.len, uri.scheme.ptr, uri.scheme.len); struct aws_byte_cursor expected_authority = aws_byte_cursor_from_c_str("www.test.com:8443"); ASSERT_BIN_ARRAYS_EQUALS(expected_authority.ptr, expected_authority.len, uri.authority.ptr, uri.authority.len); struct aws_byte_cursor expected_host = aws_byte_cursor_from_c_str("www.test.com"); ASSERT_BIN_ARRAYS_EQUALS(expected_host.ptr, expected_host.len, uri.host_name.ptr, uri.host_name.len); ASSERT_UINT_EQUALS(8443, uri.port); struct aws_byte_cursor expected_path = aws_byte_cursor_from_c_str("/"); ASSERT_BIN_ARRAYS_EQUALS(expected_path.ptr, expected_path.len, uri.path.ptr, uri.path.len); struct aws_byte_cursor expected_query_str = aws_byte_cursor_from_c_str("test1=value1&test%20space=value%20space&test2=value2&test2=value3"); ASSERT_BIN_ARRAYS_EQUALS( expected_query_str.ptr, expected_query_str.len, uri.query_string.ptr, uri.query_string.len); struct aws_byte_cursor expected_request_uri = aws_byte_cursor_from_c_str("/?test1=value1&test%20space=value%20space&test2=value2&test2=value3"); ASSERT_BIN_ARRAYS_EQUALS( expected_request_uri.ptr, expected_request_uri.len, uri.path_and_query.ptr, uri.path_and_query.len); aws_uri_clean_up(&uri); return AWS_OP_SUCCESS; } AWS_TEST_CASE(uri_no_path_parse, s_test_uri_no_path_parse); static int s_test_uri_no_query_parse(struct aws_allocator *allocator, void *ctx) { (void)ctx; const char *str_uri = "https://www.test.com:8443/path/to/resource"; struct aws_byte_cursor uri_csr = aws_byte_cursor_from_c_str(str_uri); struct aws_uri uri; ASSERT_SUCCESS(aws_uri_init_parse(&uri, allocator, &uri_csr)); struct aws_byte_cursor expected_scheme = aws_byte_cursor_from_c_str("https"); ASSERT_BIN_ARRAYS_EQUALS(expected_scheme.ptr, expected_scheme.len, uri.scheme.ptr, uri.scheme.len); struct aws_byte_cursor expected_authority = aws_byte_cursor_from_c_str("www.test.com:8443"); ASSERT_BIN_ARRAYS_EQUALS(expected_authority.ptr, expected_authority.len, uri.authority.ptr, uri.authority.len); struct aws_byte_cursor expected_host = aws_byte_cursor_from_c_str("www.test.com"); ASSERT_BIN_ARRAYS_EQUALS(expected_host.ptr, expected_host.len, uri.host_name.ptr, uri.host_name.len); ASSERT_UINT_EQUALS(8443, uri.port); struct aws_byte_cursor expected_path = aws_byte_cursor_from_c_str("/path/to/resource"); ASSERT_BIN_ARRAYS_EQUALS(expected_path.ptr, expected_path.len, uri.path.ptr, uri.path.len); struct aws_byte_cursor expected_request_uri = aws_byte_cursor_from_c_str("/path/to/resource"); ASSERT_BIN_ARRAYS_EQUALS( expected_request_uri.ptr, expected_request_uri.len, uri.path_and_query.ptr, uri.path_and_query.len); ASSERT_UINT_EQUALS(0U, uri.query_string.len); aws_uri_clean_up(&uri); return AWS_OP_SUCCESS; } AWS_TEST_CASE(uri_no_query_parse, s_test_uri_no_query_parse); static int s_test_uri_minimal_parse(struct aws_allocator *allocator, void *ctx) { (void)ctx; const char *str_uri = "www.test.com/path/to/resource"; struct aws_byte_cursor uri_csr = aws_byte_cursor_from_c_str(str_uri); struct aws_uri uri; ASSERT_SUCCESS(aws_uri_init_parse(&uri, allocator, &uri_csr)); ASSERT_UINT_EQUALS(0U, uri.scheme.len); struct aws_byte_cursor expected_authority = aws_byte_cursor_from_c_str("www.test.com"); ASSERT_BIN_ARRAYS_EQUALS(expected_authority.ptr, expected_authority.len, uri.authority.ptr, uri.authority.len); struct aws_byte_cursor expected_host = aws_byte_cursor_from_c_str("www.test.com"); ASSERT_BIN_ARRAYS_EQUALS(expected_host.ptr, expected_host.len, uri.host_name.ptr, uri.host_name.len); ASSERT_UINT_EQUALS(0, uri.port); struct aws_byte_cursor expected_path = aws_byte_cursor_from_c_str("/path/to/resource"); ASSERT_BIN_ARRAYS_EQUALS(expected_path.ptr, expected_path.len, uri.path.ptr, uri.path.len); struct aws_byte_cursor expected_request_uri = aws_byte_cursor_from_c_str("/path/to/resource"); ASSERT_BIN_ARRAYS_EQUALS( expected_request_uri.ptr, expected_request_uri.len, uri.path_and_query.ptr, uri.path_and_query.len); ASSERT_UINT_EQUALS(0U, uri.query_string.len); aws_uri_clean_up(&uri); return AWS_OP_SUCCESS; } AWS_TEST_CASE(uri_minimal_parse, s_test_uri_minimal_parse); static int s_test_uri_path_and_query_only_parse(struct aws_allocator *allocator, void *ctx) { (void)ctx; const char *str_uri = "/path/to/resource?test1=value1&test%20space=value%20space&test2=value2&test2=value3"; struct aws_byte_cursor uri_csr = aws_byte_cursor_from_c_str(str_uri); struct aws_uri uri; ASSERT_SUCCESS(aws_uri_init_parse(&uri, allocator, &uri_csr)); ASSERT_UINT_EQUALS(0U, uri.scheme.len); ASSERT_UINT_EQUALS(0U, uri.authority.len); struct aws_byte_cursor expected_path = aws_byte_cursor_from_c_str("/path/to/resource"); ASSERT_BIN_ARRAYS_EQUALS(expected_path.ptr, expected_path.len, uri.path.ptr, uri.path.len); struct aws_byte_cursor expected_query_str = aws_byte_cursor_from_c_str("test1=value1&test%20space=value%20space&test2=value2&test2=value3"); ASSERT_BIN_ARRAYS_EQUALS( expected_query_str.ptr, expected_query_str.len, uri.query_string.ptr, uri.query_string.len); struct aws_byte_cursor expected_path_and_query = aws_byte_cursor_from_c_str( "/path/to/resource?test1=value1&test%20space=value%20space&test2=value2&test2=value3"); ASSERT_BIN_ARRAYS_EQUALS( expected_path_and_query.ptr, expected_path_and_query.len, uri.path_and_query.ptr, uri.path_and_query.len); aws_uri_clean_up(&uri); return AWS_OP_SUCCESS; } AWS_TEST_CASE(uri_path_and_query_only_parse, s_test_uri_path_and_query_only_parse); static int s_test_uri_root_only_parse(struct aws_allocator *allocator, void *ctx) { (void)ctx; const char *str_uri = "https://www.test.com"; struct aws_byte_cursor uri_csr = aws_byte_cursor_from_c_str(str_uri); struct aws_uri uri; ASSERT_SUCCESS(aws_uri_init_parse(&uri, allocator, &uri_csr)); struct aws_byte_cursor expected_scheme = aws_byte_cursor_from_c_str("https"); ASSERT_BIN_ARRAYS_EQUALS(expected_scheme.ptr, expected_scheme.len, uri.scheme.ptr, uri.scheme.len); struct aws_byte_cursor expected_authority = aws_byte_cursor_from_c_str("www.test.com"); ASSERT_BIN_ARRAYS_EQUALS(expected_authority.ptr, expected_authority.len, uri.authority.ptr, uri.authority.len); struct aws_byte_cursor expected_host = aws_byte_cursor_from_c_str("www.test.com"); ASSERT_BIN_ARRAYS_EQUALS(expected_host.ptr, expected_host.len, uri.host_name.ptr, uri.host_name.len); ASSERT_UINT_EQUALS(0, uri.port); struct aws_byte_cursor expected_path = aws_byte_cursor_from_c_str(""); ASSERT_BIN_ARRAYS_EQUALS(expected_path.ptr, expected_path.len, uri.path.ptr, uri.path.len); ASSERT_UINT_EQUALS(0U, uri.query_string.len); aws_uri_clean_up(&uri); return AWS_OP_SUCCESS; } AWS_TEST_CASE(uri_root_only_parse, s_test_uri_root_only_parse); static int s_test_uri_root_slash_only_path_parse(struct aws_allocator *allocator, void *ctx) { (void)ctx; const char *str_uri = "https://www.test.com/"; struct aws_byte_cursor uri_csr = aws_byte_cursor_from_c_str(str_uri); struct aws_uri uri; ASSERT_SUCCESS(aws_uri_init_parse(&uri, allocator, &uri_csr)); struct aws_byte_cursor expected_scheme = aws_byte_cursor_from_c_str("https"); ASSERT_BIN_ARRAYS_EQUALS(expected_scheme.ptr, expected_scheme.len, uri.scheme.ptr, uri.scheme.len); struct aws_byte_cursor expected_authority = aws_byte_cursor_from_c_str("www.test.com"); ASSERT_BIN_ARRAYS_EQUALS(expected_authority.ptr, expected_authority.len, uri.authority.ptr, uri.authority.len); struct aws_byte_cursor expected_host = aws_byte_cursor_from_c_str("www.test.com"); ASSERT_BIN_ARRAYS_EQUALS(expected_host.ptr, expected_host.len, uri.host_name.ptr, uri.host_name.len); ASSERT_UINT_EQUALS(0, uri.port); struct aws_byte_cursor expected_path = aws_byte_cursor_from_c_str("/"); ASSERT_BIN_ARRAYS_EQUALS(expected_path.ptr, expected_path.len, uri.path.ptr, uri.path.len); ASSERT_UINT_EQUALS(0U, uri.query_string.len); aws_uri_clean_up(&uri); return AWS_OP_SUCCESS; } AWS_TEST_CASE(uri_root_slash_only_path_parse, s_test_uri_root_slash_only_path_parse); static int s_test_uri_userinfo_no_password_parse(struct aws_allocator *allocator, void *ctx) { (void)ctx; /* RFC-3986 section 3.2.1: Use of the format "user:password" in the userinfo field is deprecated. * We will try to parse the userinfo with the format still, but if not happening, it will not be treated as an * error. The whole userinfo will still be available to access */ const char *str_uri = "https://some_name@www.test.com"; struct aws_byte_cursor uri_csr = aws_byte_cursor_from_c_str(str_uri); struct aws_uri uri; ASSERT_SUCCESS(aws_uri_init_parse(&uri, allocator, &uri_csr)); struct aws_byte_cursor expected_scheme = aws_byte_cursor_from_c_str("https"); ASSERT_BIN_ARRAYS_EQUALS(expected_scheme.ptr, expected_scheme.len, uri.scheme.ptr, uri.scheme.len); struct aws_byte_cursor expected_authority = aws_byte_cursor_from_c_str("some_name@www.test.com"); ASSERT_BIN_ARRAYS_EQUALS(expected_authority.ptr, expected_authority.len, uri.authority.ptr, uri.authority.len); struct aws_byte_cursor expected_userinfo = aws_byte_cursor_from_c_str("some_name"); ASSERT_BIN_ARRAYS_EQUALS(expected_userinfo.ptr, expected_userinfo.len, uri.userinfo.ptr, uri.userinfo.len); struct aws_byte_cursor expected_user = aws_byte_cursor_from_c_str("some_name"); ASSERT_BIN_ARRAYS_EQUALS(expected_user.ptr, expected_user.len, uri.user.ptr, uri.user.len); ASSERT_UINT_EQUALS(0U, uri.password.len); struct aws_byte_cursor expected_host = aws_byte_cursor_from_c_str("www.test.com"); ASSERT_BIN_ARRAYS_EQUALS(expected_host.ptr, expected_host.len, uri.host_name.ptr, uri.host_name.len); ASSERT_UINT_EQUALS(0, uri.port); struct aws_byte_cursor expected_path = aws_byte_cursor_from_c_str(""); ASSERT_BIN_ARRAYS_EQUALS(expected_path.ptr, expected_path.len, uri.path.ptr, uri.path.len); ASSERT_UINT_EQUALS(0U, uri.query_string.len); aws_uri_clean_up(&uri); return AWS_OP_SUCCESS; } AWS_TEST_CASE(uri_userinfo_no_password_parse, s_test_uri_userinfo_no_password_parse); static int s_test_uri_empty_user_parse(struct aws_allocator *allocator, void *ctx) { (void)ctx; /* RFC-3986 section 3.2.1: Use of the format "user:password" in the userinfo field is deprecated. * We will try to parse the userinfo with the format still, but if not happening, it will not be treated as an * error. The whole userinfo will still be available to access */ const char *str_uri = "https://@www.test.com"; struct aws_byte_cursor uri_csr = aws_byte_cursor_from_c_str(str_uri); struct aws_uri uri; ASSERT_SUCCESS(aws_uri_init_parse(&uri, allocator, &uri_csr)); struct aws_byte_cursor expected_scheme = aws_byte_cursor_from_c_str("https"); ASSERT_BIN_ARRAYS_EQUALS(expected_scheme.ptr, expected_scheme.len, uri.scheme.ptr, uri.scheme.len); struct aws_byte_cursor expected_authority = aws_byte_cursor_from_c_str("@www.test.com"); ASSERT_BIN_ARRAYS_EQUALS(expected_authority.ptr, expected_authority.len, uri.authority.ptr, uri.authority.len); ASSERT_UINT_EQUALS(0U, uri.userinfo.len); ASSERT_UINT_EQUALS(0U, uri.user.len); ASSERT_UINT_EQUALS(0U, uri.password.len); struct aws_byte_cursor expected_host = aws_byte_cursor_from_c_str("www.test.com"); ASSERT_BIN_ARRAYS_EQUALS(expected_host.ptr, expected_host.len, uri.host_name.ptr, uri.host_name.len); ASSERT_UINT_EQUALS(0, uri.port); struct aws_byte_cursor expected_path = aws_byte_cursor_from_c_str(""); ASSERT_BIN_ARRAYS_EQUALS(expected_path.ptr, expected_path.len, uri.path.ptr, uri.path.len); ASSERT_UINT_EQUALS(0U, uri.query_string.len); aws_uri_clean_up(&uri); return AWS_OP_SUCCESS; } AWS_TEST_CASE(uri_empty_user_parse, s_test_uri_empty_user_parse); static int s_test_uri_query_params(struct aws_allocator *allocator, void *ctx) { (void)ctx; const char *str_uri = "https://www.test.com:8443/path/to/" "resource?test1=value1&testkeyonly&&test%20space=value%20space&test2=value2&test2=value3"; struct aws_byte_cursor uri_csr = aws_byte_cursor_from_c_str(str_uri); struct aws_uri uri; ASSERT_SUCCESS(aws_uri_init_parse(&uri, allocator, &uri_csr)); struct aws_uri_param params[5]; AWS_ZERO_ARRAY(params); struct aws_array_list params_list; aws_array_list_init_static(¶ms_list, ¶ms, 5, sizeof(struct aws_uri_param)); ASSERT_SUCCESS(aws_uri_query_string_params(&uri, ¶ms_list)); ASSERT_UINT_EQUALS(5u, aws_array_list_length(¶ms_list)); struct aws_byte_cursor expected_key = aws_byte_cursor_from_c_str("test1"); struct aws_byte_cursor expected_value = aws_byte_cursor_from_c_str("value1"); ASSERT_BIN_ARRAYS_EQUALS(expected_key.ptr, expected_key.len, params[0].key.ptr, params[0].key.len); ASSERT_BIN_ARRAYS_EQUALS(expected_value.ptr, expected_value.len, params[0].value.ptr, params[0].value.len); expected_key = aws_byte_cursor_from_c_str("testkeyonly"); ASSERT_BIN_ARRAYS_EQUALS(expected_key.ptr, expected_key.len, params[1].key.ptr, params[1].key.len); ASSERT_UINT_EQUALS(0U, params[1].value.len); expected_key = aws_byte_cursor_from_c_str("test%20space"); expected_value = aws_byte_cursor_from_c_str("value%20space"); ASSERT_BIN_ARRAYS_EQUALS(expected_key.ptr, expected_key.len, params[2].key.ptr, params[2].key.len); ASSERT_BIN_ARRAYS_EQUALS(expected_value.ptr, expected_value.len, params[2].value.ptr, params[2].value.len); expected_key = aws_byte_cursor_from_c_str("test2"); expected_value = aws_byte_cursor_from_c_str("value2"); ASSERT_BIN_ARRAYS_EQUALS(expected_key.ptr, expected_key.len, params[3].key.ptr, params[3].key.len); ASSERT_BIN_ARRAYS_EQUALS(expected_value.ptr, expected_value.len, params[3].value.ptr, params[3].value.len); expected_key = aws_byte_cursor_from_c_str("test2"); expected_value = aws_byte_cursor_from_c_str("value3"); ASSERT_BIN_ARRAYS_EQUALS(expected_key.ptr, expected_key.len, params[4].key.ptr, params[4].key.len); ASSERT_BIN_ARRAYS_EQUALS(expected_value.ptr, expected_value.len, params[4].value.ptr, params[4].value.len); aws_uri_clean_up(&uri); /* Test empty query string */ str_uri = "https://www.test.com:8443/path/to/resource"; uri_csr = aws_byte_cursor_from_c_str(str_uri); ASSERT_SUCCESS(aws_uri_init_parse(&uri, allocator, &uri_csr)); aws_array_list_clear(¶ms_list); ASSERT_SUCCESS(aws_uri_query_string_params(&uri, ¶ms_list)); ASSERT_UINT_EQUALS(0, aws_array_list_length(¶ms_list)); aws_uri_clean_up(&uri); return AWS_OP_SUCCESS; } AWS_TEST_CASE(uri_query_params, s_test_uri_query_params); static int s_test_uri_ipv4_parse(struct aws_allocator *allocator, void *ctx) { (void)ctx; const char *str_uri = "https://127.0.0.1:8443"; struct aws_byte_cursor uri_csr = aws_byte_cursor_from_c_str(str_uri); struct aws_uri uri; ASSERT_SUCCESS(aws_uri_init_parse(&uri, allocator, &uri_csr)); struct aws_byte_cursor expected_scheme = aws_byte_cursor_from_c_str("https"); ASSERT_BIN_ARRAYS_EQUALS(expected_scheme.ptr, expected_scheme.len, uri.scheme.ptr, uri.scheme.len); struct aws_byte_cursor expected_authority = aws_byte_cursor_from_c_str("127.0.0.1:8443"); ASSERT_BIN_ARRAYS_EQUALS(expected_authority.ptr, expected_authority.len, uri.authority.ptr, uri.authority.len); struct aws_byte_cursor expected_host = aws_byte_cursor_from_c_str("127.0.0.1"); ASSERT_BIN_ARRAYS_EQUALS(expected_host.ptr, expected_host.len, uri.host_name.ptr, uri.host_name.len); ASSERT_UINT_EQUALS(8443, uri.port); struct aws_byte_cursor expected_path = aws_byte_cursor_from_c_str(""); ASSERT_BIN_ARRAYS_EQUALS(expected_path.ptr, expected_path.len, uri.path.ptr, uri.path.len); ASSERT_UINT_EQUALS(0U, uri.query_string.len); aws_uri_clean_up(&uri); return AWS_OP_SUCCESS; } AWS_TEST_CASE(uri_ipv4_parse, s_test_uri_ipv4_parse); static int s_test_uri_ipv6_parse(struct aws_allocator *allocator, void *ctx) { (void)ctx; const char *str_uri = "https://[2001:db8:85a3:8d3:1319:8a2e:370:7348%25en0]:443"; struct aws_byte_cursor uri_csr = aws_byte_cursor_from_c_str(str_uri); struct aws_uri uri; ASSERT_SUCCESS(aws_uri_init_parse(&uri, allocator, &uri_csr)); struct aws_byte_cursor expected_scheme = aws_byte_cursor_from_c_str("https"); ASSERT_BIN_ARRAYS_EQUALS(expected_scheme.ptr, expected_scheme.len, uri.scheme.ptr, uri.scheme.len); struct aws_byte_cursor expected_authority = aws_byte_cursor_from_c_str("[2001:db8:85a3:8d3:1319:8a2e:370:7348%25en0]:443"); ASSERT_BIN_ARRAYS_EQUALS(expected_authority.ptr, expected_authority.len, uri.authority.ptr, uri.authority.len); struct aws_byte_cursor expected_host = aws_byte_cursor_from_c_str("[2001:db8:85a3:8d3:1319:8a2e:370:7348%25en0]"); ASSERT_BIN_ARRAYS_EQUALS(expected_host.ptr, expected_host.len, uri.host_name.ptr, uri.host_name.len); ASSERT_UINT_EQUALS(443, uri.port); struct aws_byte_cursor expected_path = aws_byte_cursor_from_c_str(""); ASSERT_BIN_ARRAYS_EQUALS(expected_path.ptr, expected_path.len, uri.path.ptr, uri.path.len); ASSERT_UINT_EQUALS(0U, uri.query_string.len); aws_uri_clean_up(&uri); return AWS_OP_SUCCESS; } AWS_TEST_CASE(uri_ipv6_parse, s_test_uri_ipv6_parse); static int s_test_uri_ipv6_no_port_parse(struct aws_allocator *allocator, void *ctx) { (void)ctx; const char *str_uri = "https://[2001:db8:85a3:8d3:1319:8a2e:370:7348%25en0]"; struct aws_byte_cursor uri_csr = aws_byte_cursor_from_c_str(str_uri); struct aws_uri uri; ASSERT_SUCCESS(aws_uri_init_parse(&uri, allocator, &uri_csr)); struct aws_byte_cursor expected_scheme = aws_byte_cursor_from_c_str("https"); ASSERT_BIN_ARRAYS_EQUALS(expected_scheme.ptr, expected_scheme.len, uri.scheme.ptr, uri.scheme.len); struct aws_byte_cursor expected_authority = aws_byte_cursor_from_c_str("[2001:db8:85a3:8d3:1319:8a2e:370:7348%25en0]"); ASSERT_BIN_ARRAYS_EQUALS(expected_authority.ptr, expected_authority.len, uri.authority.ptr, uri.authority.len); struct aws_byte_cursor expected_host = aws_byte_cursor_from_c_str("[2001:db8:85a3:8d3:1319:8a2e:370:7348%25en0]"); ASSERT_BIN_ARRAYS_EQUALS(expected_host.ptr, expected_host.len, uri.host_name.ptr, uri.host_name.len); struct aws_byte_cursor expected_path = aws_byte_cursor_from_c_str(""); ASSERT_BIN_ARRAYS_EQUALS(expected_path.ptr, expected_path.len, uri.path.ptr, uri.path.len); ASSERT_UINT_EQUALS(0U, uri.query_string.len); aws_uri_clean_up(&uri); return AWS_OP_SUCCESS; } AWS_TEST_CASE(uri_ipv6_no_port_parse, s_test_uri_ipv6_no_port_parse); static int s_test_uri_invalid_scheme_parse(struct aws_allocator *allocator, void *ctx) { (void)ctx; const char *str_uri = "https:/www.test.com:8443/path/to/resource?test1=value1&test%20space=value%20space&test2=value2&test2=value3"; struct aws_byte_cursor uri_csr = aws_byte_cursor_from_c_str(str_uri); struct aws_uri uri; ASSERT_ERROR(AWS_ERROR_MALFORMED_INPUT_STRING, aws_uri_init_parse(&uri, allocator, &uri_csr)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(uri_invalid_scheme_parse, s_test_uri_invalid_scheme_parse); static int s_test_uri_invalid_port_parse(struct aws_allocator *allocator, void *ctx) { (void)ctx; const char *str_uri = "https://www.test.com:s8443/path/to/resource?test1=value1&test%20space=value%20space&test2=value2&test2=value3"; struct aws_byte_cursor uri_csr = aws_byte_cursor_from_c_str(str_uri); struct aws_uri uri; ASSERT_ERROR(AWS_ERROR_MALFORMED_INPUT_STRING, aws_uri_init_parse(&uri, allocator, &uri_csr)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(uri_invalid_port_parse, s_test_uri_invalid_port_parse); static int s_test_uri_port_too_large_parse(struct aws_allocator *allocator, void *ctx) { (void)ctx; const char *str_uri = "https://www.test.com:4294967296/path/to/" "resource?test1=value1&test%20space=value%20space&test2=value2&test2=value3"; struct aws_byte_cursor uri_csr = aws_byte_cursor_from_c_str(str_uri); struct aws_uri uri; ASSERT_ERROR(AWS_ERROR_MALFORMED_INPUT_STRING, aws_uri_init_parse(&uri, allocator, &uri_csr)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(uri_port_too_large_parse, s_test_uri_port_too_large_parse); static int s_test_uri_builder(struct aws_allocator *allocator, void *ctx) { (void)ctx; const char *str_uri = "https://www.test.com:8443/path/to/resource?test1=value1&test%20space=value%20space&test2=value2&test2=value3"; struct aws_byte_cursor uri_csr = aws_byte_cursor_from_c_str(str_uri); struct aws_uri uri; ASSERT_SUCCESS(aws_uri_init_parse(&uri, allocator, &uri_csr)); struct aws_uri_param params[4]; AWS_ZERO_ARRAY(params); struct aws_array_list params_list; aws_array_list_init_static(¶ms_list, ¶ms, 4, sizeof(struct aws_uri_param)); ASSERT_SUCCESS(aws_uri_query_string_params(&uri, ¶ms_list)); struct aws_uri_builder_options builder_args = { .scheme = uri.scheme, .path = uri.path, .host_name = uri.host_name, .port = uri.port, .query_params = ¶ms_list, }; struct aws_uri built_uri; ASSERT_SUCCESS(aws_uri_init_from_builder_options(&built_uri, allocator, &builder_args)); struct aws_byte_cursor expected_scheme = aws_byte_cursor_from_c_str("https"); ASSERT_BIN_ARRAYS_EQUALS(expected_scheme.ptr, expected_scheme.len, built_uri.scheme.ptr, built_uri.scheme.len); struct aws_byte_cursor expected_authority = aws_byte_cursor_from_c_str("www.test.com:8443"); ASSERT_BIN_ARRAYS_EQUALS( expected_authority.ptr, expected_authority.len, built_uri.authority.ptr, built_uri.authority.len); struct aws_byte_cursor expected_host = aws_byte_cursor_from_c_str("www.test.com"); ASSERT_BIN_ARRAYS_EQUALS(expected_host.ptr, expected_host.len, built_uri.host_name.ptr, built_uri.host_name.len); ASSERT_UINT_EQUALS(8443, built_uri.port); struct aws_byte_cursor expected_path = aws_byte_cursor_from_c_str("/path/to/resource"); ASSERT_BIN_ARRAYS_EQUALS(expected_path.ptr, expected_path.len, built_uri.path.ptr, built_uri.path.len); struct aws_byte_cursor expected_query_str = aws_byte_cursor_from_c_str("test1=value1&test%20space=value%20space&test2=value2&test2=value3"); ASSERT_BIN_ARRAYS_EQUALS( expected_query_str.ptr, expected_query_str.len, built_uri.query_string.ptr, built_uri.query_string.len); struct aws_byte_cursor expected_request_uri = aws_byte_cursor_from_c_str( "/path/to/resource?test1=value1&test%20space=value%20space&test2=value2&test2=value3"); ASSERT_BIN_ARRAYS_EQUALS( expected_request_uri.ptr, expected_request_uri.len, built_uri.path_and_query.ptr, built_uri.path_and_query.len); aws_uri_clean_up(&uri); aws_uri_clean_up(&built_uri); return AWS_OP_SUCCESS; } AWS_TEST_CASE(uri_builder, s_test_uri_builder); static int s_test_uri_builder_from_string(struct aws_allocator *allocator, void *ctx) { (void)ctx; const char *str_uri = "https://www.test.com:8443/path/to/resource?test1=value1&test%20space=value%20space&test2=value2&test2=value3"; struct aws_byte_cursor uri_csr = aws_byte_cursor_from_c_str(str_uri); struct aws_uri uri; ASSERT_SUCCESS(aws_uri_init_parse(&uri, allocator, &uri_csr)); struct aws_uri_param params[4]; AWS_ZERO_ARRAY(params); struct aws_byte_cursor query_string = aws_byte_cursor_from_c_str("test1=value1&test%20space=value%20space&test2=value2&test2=value3"); struct aws_uri_builder_options builder_args = { .scheme = uri.scheme, .path = uri.path, .host_name = uri.host_name, .port = uri.port, .query_string = query_string, }; struct aws_uri built_uri; ASSERT_SUCCESS(aws_uri_init_from_builder_options(&built_uri, allocator, &builder_args)); struct aws_byte_cursor expected_scheme = aws_byte_cursor_from_c_str("https"); ASSERT_BIN_ARRAYS_EQUALS(expected_scheme.ptr, expected_scheme.len, built_uri.scheme.ptr, built_uri.scheme.len); struct aws_byte_cursor expected_authority = aws_byte_cursor_from_c_str("www.test.com:8443"); ASSERT_BIN_ARRAYS_EQUALS( expected_authority.ptr, expected_authority.len, built_uri.authority.ptr, built_uri.authority.len); struct aws_byte_cursor expected_host = aws_byte_cursor_from_c_str("www.test.com"); ASSERT_BIN_ARRAYS_EQUALS(expected_host.ptr, expected_host.len, built_uri.host_name.ptr, built_uri.host_name.len); ASSERT_UINT_EQUALS(8443, built_uri.port); struct aws_byte_cursor expected_path = aws_byte_cursor_from_c_str("/path/to/resource"); ASSERT_BIN_ARRAYS_EQUALS(expected_path.ptr, expected_path.len, built_uri.path.ptr, built_uri.path.len); ASSERT_BIN_ARRAYS_EQUALS( query_string.ptr, query_string.len, built_uri.query_string.ptr, built_uri.query_string.len); struct aws_byte_cursor expected_request_uri = aws_byte_cursor_from_c_str( "/path/to/resource?test1=value1&test%20space=value%20space&test2=value2&test2=value3"); ASSERT_BIN_ARRAYS_EQUALS( expected_request_uri.ptr, expected_request_uri.len, built_uri.path_and_query.ptr, built_uri.path_and_query.len); aws_uri_clean_up(&uri); aws_uri_clean_up(&built_uri); return AWS_OP_SUCCESS; } AWS_TEST_CASE(uri_builder_from_string, s_test_uri_builder_from_string); static int s_test_uri_encode_path_case( struct aws_allocator *allocator, const char *input, const char *expected_output) { struct aws_byte_buf encoding; ASSERT_SUCCESS(aws_byte_buf_init(&encoding, allocator, 100)); struct aws_byte_cursor path_cursor = aws_byte_cursor_from_c_str(input); ASSERT_SUCCESS(aws_byte_buf_append_encoding_uri_path(&encoding, &path_cursor)); struct aws_byte_cursor expected_path = aws_byte_cursor_from_c_str(expected_output); ASSERT_BIN_ARRAYS_EQUALS(encoding.buffer, encoding.len, expected_path.ptr, expected_path.len); aws_byte_buf_clean_up(&encoding); return AWS_OP_SUCCESS; } static int s_test_uri_encode_path_rfc3986(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(s_test_uri_encode_path_case(allocator, "/path/1234/", "/path/1234/")); ASSERT_SUCCESS(s_test_uri_encode_path_case( allocator, "/abcdefghijklmnopqrstuvwxyz/1234567890/", "/abcdefghijklmnopqrstuvwxyz/1234567890/")); ASSERT_SUCCESS(s_test_uri_encode_path_case( allocator, "/ABCDEFGHIJKLMNOPQRSTUVWXYZ/1234567890/", "/ABCDEFGHIJKLMNOPQRSTUVWXYZ/1234567890/")); ASSERT_SUCCESS(s_test_uri_encode_path_case( allocator, "/ABCDEFGHIJKLMNOPQRSTUVWXYZ/_-~./$@&,:;=/", "/ABCDEFGHIJKLMNOPQRSTUVWXYZ/_-~./%24%40%26%2C%3A%3B%3D/")); ASSERT_SUCCESS(s_test_uri_encode_path_case(allocator, "/path/%^#! /", "/path/%25%5E%23%21%20/")); ASSERT_SUCCESS(s_test_uri_encode_path_case(allocator, "/path/ሴ", "/path/%E1%88%B4")); ASSERT_SUCCESS(s_test_uri_encode_path_case( allocator, "/path/\"'()*+<>[\\]`{|}/", "/path/%22%27%28%29%2A%2B%3C%3E%5B%5C%5D%60%7B%7C%7D/")); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_uri_encode_path_rfc3986, s_test_uri_encode_path_rfc3986); static int s_test_uri_encode_param_case( struct aws_allocator *allocator, const char *input, const char *expected_output) { struct aws_byte_buf encoding; ASSERT_SUCCESS(aws_byte_buf_init(&encoding, allocator, 10)); struct aws_byte_cursor path_cursor = aws_byte_cursor_from_c_str(input); ASSERT_SUCCESS(aws_byte_buf_append_encoding_uri_param(&encoding, &path_cursor)); struct aws_byte_cursor expected_path = aws_byte_cursor_from_c_str(expected_output); ASSERT_BIN_ARRAYS_EQUALS(encoding.buffer, encoding.len, expected_path.ptr, expected_path.len); aws_byte_buf_clean_up(&encoding); return AWS_OP_SUCCESS; } static int s_test_uri_encode_query(struct aws_allocator *allocator, void *ctx) { (void)ctx; (void)allocator; ASSERT_SUCCESS(s_test_uri_encode_param_case( allocator, "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz", "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz")); ASSERT_SUCCESS(s_test_uri_encode_param_case(allocator, "1234567890", "1234567890")); ASSERT_SUCCESS(s_test_uri_encode_param_case(allocator, "_~.-", "_~.-")); ASSERT_SUCCESS(s_test_uri_encode_param_case(allocator, "%^#! ", "%25%5E%23%21%20")); ASSERT_SUCCESS(s_test_uri_encode_param_case(allocator, "/$@&,:;=", "%2F%24%40%26%2C%3A%3B%3D")); ASSERT_SUCCESS(s_test_uri_encode_param_case(allocator, "ሴ", "%E1%88%B4")); ASSERT_SUCCESS( s_test_uri_encode_param_case(allocator, "\"'()*+<>[\\]`{|}", "%22%27%28%29%2A%2B%3C%3E%5B%5C%5D%60%7B%7C%7D")); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_uri_encode_query, s_test_uri_encode_query); static int s_test_uri_decode_ok(struct aws_allocator *allocator, const char *input, const char *expected_output) { struct aws_byte_buf decoding; ASSERT_SUCCESS(aws_byte_buf_init(&decoding, allocator, 10)); struct aws_byte_cursor input_cursor = aws_byte_cursor_from_c_str(input); ASSERT_SUCCESS(aws_byte_buf_append_decoding_uri(&decoding, &input_cursor)); ASSERT_BIN_ARRAYS_EQUALS(expected_output, strlen(expected_output), decoding.buffer, decoding.len); aws_byte_buf_clean_up(&decoding); return AWS_OP_SUCCESS; } static int s_test_uri_decode_err(struct aws_allocator *allocator, const char *input) { struct aws_byte_buf decoding; ASSERT_SUCCESS(aws_byte_buf_init(&decoding, allocator, 10)); struct aws_byte_cursor input_cursor = aws_byte_cursor_from_c_str(input); ASSERT_ERROR(AWS_ERROR_MALFORMED_INPUT_STRING, aws_byte_buf_append_decoding_uri(&decoding, &input_cursor)); aws_byte_buf_clean_up(&decoding); return AWS_OP_SUCCESS; } static int s_test_uri_roundtrip(struct aws_allocator *allocator, const char *input) { struct aws_byte_cursor input_cursor = aws_byte_cursor_from_c_str(input); struct aws_byte_buf encoding; ASSERT_SUCCESS(aws_byte_buf_init(&encoding, allocator, 10)); struct aws_byte_buf decoding; ASSERT_SUCCESS(aws_byte_buf_init(&decoding, allocator, 10)); /* test param roundtrip encode/decode */ ASSERT_SUCCESS(aws_byte_buf_append_encoding_uri_param(&encoding, &input_cursor)); struct aws_byte_cursor encoding_cursor = aws_byte_cursor_from_buf(&encoding); ASSERT_SUCCESS(aws_byte_buf_append_decoding_uri(&decoding, &encoding_cursor)); ASSERT_BIN_ARRAYS_EQUALS(input, strlen(input), decoding.buffer, decoding.len); /* test path roundtrip encode/decode */ aws_byte_buf_reset(&encoding, false); aws_byte_buf_reset(&decoding, false); ASSERT_SUCCESS(aws_byte_buf_append_encoding_uri_path(&encoding, &input_cursor)); encoding_cursor = aws_byte_cursor_from_buf(&encoding); ASSERT_SUCCESS(aws_byte_buf_append_decoding_uri(&decoding, &encoding_cursor)); ASSERT_BIN_ARRAYS_EQUALS(input, strlen(input), decoding.buffer, decoding.len); aws_byte_buf_clean_up(&encoding); aws_byte_buf_clean_up(&decoding); return AWS_OP_SUCCESS; } static int s_test_uri_decode(struct aws_allocator *allocator, void *ctx) { (void)ctx; /* test against expected */ ASSERT_SUCCESS(s_test_uri_decode_ok(allocator, "", "")); ASSERT_SUCCESS(s_test_uri_decode_ok(allocator, "abc123", "abc123")); ASSERT_SUCCESS(s_test_uri_decode_ok(allocator, "%20", " ")); ASSERT_SUCCESS(s_test_uri_decode_ok(allocator, "%E1%88%B4", "ሴ")); ASSERT_SUCCESS(s_test_uri_decode_ok(allocator, "%e1%88%b4", "ሴ")); ASSERT_SUCCESS(s_test_uri_decode_ok(allocator, "%2520", "%20")); ASSERT_SUCCESS(s_test_uri_decode_ok(allocator, "ሴ", "ሴ")); /* odd input should just pass through */ ASSERT_SUCCESS(s_test_uri_decode_ok( allocator, "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz", /* long enough to resize output buffer */ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz")); /* these should fail */ ASSERT_SUCCESS(s_test_uri_decode_err(allocator, "%")); ASSERT_SUCCESS(s_test_uri_decode_err(allocator, "%2")); ASSERT_SUCCESS(s_test_uri_decode_err(allocator, "%%20")); ASSERT_SUCCESS(s_test_uri_decode_err(allocator, "%fg")); ASSERT_SUCCESS(s_test_uri_decode_err(allocator, "%gf")); /* Test roundtrip encoding and decoding. Results should match original input */ ASSERT_SUCCESS(s_test_uri_roundtrip(allocator, "")); ASSERT_SUCCESS(s_test_uri_roundtrip(allocator, "abc123")); ASSERT_SUCCESS(s_test_uri_roundtrip(allocator, "a + b")); ASSERT_SUCCESS(s_test_uri_roundtrip(allocator, "ሴ")); /* do roundtrip test against every possible value (except 0 because helper functions use c-strings) */ uint8_t every_value[256]; for (size_t i = 0; i < 255; ++i) { every_value[i] = (uint8_t)(i + 1); } every_value[255] = 0; ASSERT_SUCCESS(s_test_uri_roundtrip(allocator, (const char *)every_value)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_uri_decode, s_test_uri_decode); aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/uuid_test.c000066400000000000000000000100511456575232400240300ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include static int s_uuid_string_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_uuid uuid; ASSERT_SUCCESS(aws_uuid_init(&uuid)); uint8_t uuid_array[AWS_UUID_STR_LEN] = {0}; struct aws_byte_buf uuid_buf = aws_byte_buf_from_array(uuid_array, sizeof(uuid_array)); uuid_buf.len = 0; ASSERT_SUCCESS(aws_uuid_to_str(&uuid, &uuid_buf)); uint8_t zerod_buf[AWS_UUID_STR_LEN] = {0}; ASSERT_UINT_EQUALS(AWS_UUID_STR_LEN - 1, uuid_buf.len); ASSERT_FALSE(0 == memcmp(zerod_buf, uuid_array, sizeof(uuid_array))); return AWS_OP_SUCCESS; } AWS_TEST_CASE(uuid_string, s_uuid_string_fn) static int s_prefilled_uuid_string_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_uuid uuid = { .uuid_data = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10}, }; uint8_t uuid_array[AWS_UUID_STR_LEN] = {0}; struct aws_byte_buf uuid_buf = aws_byte_buf_from_array(uuid_array, sizeof(uuid_array)); uuid_buf.len = 0; ASSERT_SUCCESS(aws_uuid_to_str(&uuid, &uuid_buf)); const char *expected_str = "01020304-0506-0708-090a-0b0c0d0e0f10"; struct aws_byte_buf expected = aws_byte_buf_from_c_str(expected_str); ASSERT_BIN_ARRAYS_EQUALS(expected.buffer, expected.len, uuid_buf.buffer, uuid_buf.len); return AWS_OP_SUCCESS; } AWS_TEST_CASE(prefilled_uuid_string, s_prefilled_uuid_string_fn) static int s_uuid_string_short_buffer_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_uuid uuid; ASSERT_SUCCESS(aws_uuid_init(&uuid)); uint8_t uuid_array[AWS_UUID_STR_LEN - 2] = {0}; struct aws_byte_buf uuid_buf = aws_byte_buf_from_array(uuid_array, sizeof(uuid_array)); uuid_buf.len = 0; ASSERT_ERROR(AWS_ERROR_SHORT_BUFFER, aws_uuid_to_str(&uuid, &uuid_buf)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(uuid_string_short_buffer, s_uuid_string_short_buffer_fn) static int s_uuid_string_parse_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; uint8_t expected_uuid[] = { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10}; const char *uuid_str = "01020304-0506-0708-090a-0b0c0d0e0f10"; struct aws_byte_buf uuid_buf = aws_byte_buf_from_c_str(uuid_str); struct aws_byte_cursor uuid_cur = aws_byte_cursor_from_buf(&uuid_buf); struct aws_uuid uuid; ASSERT_SUCCESS(aws_uuid_init_from_str(&uuid, &uuid_cur)); ASSERT_BIN_ARRAYS_EQUALS(expected_uuid, sizeof(expected_uuid), uuid.uuid_data, sizeof(uuid.uuid_data)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(uuid_string_parse, s_uuid_string_parse_fn) static int s_uuid_string_parse_too_short_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; const char *uuid_str = "01020304-0506-0708-090a-0b0c0d0e0f1"; struct aws_byte_buf uuid_buf = aws_byte_buf_from_c_str(uuid_str); struct aws_byte_cursor uuid_cur = aws_byte_cursor_from_buf(&uuid_buf); struct aws_uuid uuid; ASSERT_ERROR(AWS_ERROR_INVALID_BUFFER_SIZE, aws_uuid_init_from_str(&uuid, &uuid_cur)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(uuid_string_parse_too_short, s_uuid_string_parse_too_short_fn) static int s_uuid_string_parse_malformed_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; const char *uuid_str = "010203-040506-0708-090a-0b0c0d0e0f10"; struct aws_byte_buf uuid_buf = aws_byte_buf_from_c_str(uuid_str); struct aws_byte_cursor uuid_cur = aws_byte_cursor_from_buf(&uuid_buf); struct aws_uuid uuid; ASSERT_ERROR(AWS_ERROR_MALFORMED_INPUT_STRING, aws_uuid_init_from_str(&uuid, &uuid_cur)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(uuid_string_parse_malformed, s_uuid_string_parse_malformed_fn) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/xml_parser_test.c000066400000000000000000000411441456575232400252450ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include const char *root_with_text = "TestBody"; struct root_with_text_capture { struct aws_byte_cursor capture; struct aws_byte_cursor node_name; }; int s_root_with_text_root_node(struct aws_xml_node *node, void *user_data) { struct root_with_text_capture *capture = user_data; if (aws_xml_node_as_body(node, &capture->capture)) { return AWS_OP_ERR; } capture->node_name = aws_xml_node_get_name(node); return AWS_OP_SUCCESS; } static int s_xml_parser_root_with_text_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct root_with_text_capture capture; AWS_ZERO_STRUCT(capture); struct aws_xml_parser_options options = { .doc = aws_byte_cursor_from_c_str(root_with_text), .on_root_encountered = s_root_with_text_root_node, .user_data = &capture, }; ASSERT_SUCCESS(aws_xml_parse(allocator, &options)); const char expected_name[] = "rootNode"; const char expected_value[] = "TestBody"; ASSERT_BIN_ARRAYS_EQUALS(expected_name, sizeof(expected_name) - 1, capture.node_name.ptr, capture.node_name.len); ASSERT_BIN_ARRAYS_EQUALS(expected_value, sizeof(expected_value) - 1, capture.capture.ptr, capture.capture.len); return AWS_OP_SUCCESS; } AWS_TEST_CASE(xml_parser_root_with_text, s_xml_parser_root_with_text_test) const char *child_with_text = "TestBody"; struct child_text_capture { struct aws_byte_cursor capture; struct aws_byte_cursor node_name; }; int s_child_with_text_root_node(struct aws_xml_node *node, void *user_data) { struct child_text_capture *capture = user_data; if (aws_xml_node_as_body(node, &capture->capture)) { return AWS_OP_ERR; } capture->node_name = aws_xml_node_get_name(node); return AWS_OP_SUCCESS; } int s_root_with_child(struct aws_xml_node *node, void *user_data) { if (aws_xml_node_traverse(node, s_child_with_text_root_node, user_data)) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } static int s_xml_parser_child_with_text_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct child_text_capture capture; AWS_ZERO_STRUCT(capture); struct aws_xml_parser_options options = { .doc = aws_byte_cursor_from_c_str(child_with_text), .on_root_encountered = s_root_with_child, .user_data = &capture, }; ASSERT_SUCCESS(aws_xml_parse(allocator, &options)); const char expected_name[] = "child1"; const char expected_value[] = "TestBody"; ASSERT_BIN_ARRAYS_EQUALS(expected_name, sizeof(expected_name) - 1, capture.node_name.ptr, capture.node_name.len); ASSERT_BIN_ARRAYS_EQUALS(expected_value, sizeof(expected_value) - 1, capture.capture.ptr, capture.capture.len); return AWS_OP_SUCCESS; } AWS_TEST_CASE(xml_parser_child_with_text, s_xml_parser_child_with_text_test) const char *siblings_with_text = "TestBodyTestBody2"; struct sibling_text_capture { struct aws_byte_cursor capture1; struct aws_byte_cursor capture2; struct aws_byte_cursor node_name1; struct aws_byte_cursor node_name2; }; int s_sibling_with_text_root_node(struct aws_xml_node *node, void *user_data) { struct sibling_text_capture *capture = user_data; struct aws_byte_cursor child1_name = aws_byte_cursor_from_c_str("child1"); struct aws_byte_cursor child2_name = aws_byte_cursor_from_c_str("child2"); struct aws_byte_cursor node_name = aws_xml_node_get_name(node); if (aws_byte_cursor_eq_ignore_case(&node_name, &child1_name)) { capture->node_name1 = node_name; if (aws_xml_node_as_body(node, &capture->capture1)) { return AWS_OP_ERR; } } else if (aws_byte_cursor_eq_ignore_case(&node_name, &child2_name)) { capture->node_name2 = node_name; if (aws_xml_node_as_body(node, &capture->capture2)) { return AWS_OP_ERR; } } return AWS_OP_SUCCESS; } int s_root_with_child_siblings(struct aws_xml_node *node, void *user_data) { return aws_xml_node_traverse(node, s_sibling_with_text_root_node, user_data); } static int s_xml_parser_siblings_with_text_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct sibling_text_capture capture; AWS_ZERO_STRUCT(capture); struct aws_xml_parser_options options = { .doc = aws_byte_cursor_from_c_str(siblings_with_text), .on_root_encountered = s_root_with_child_siblings, .user_data = &capture, }; ASSERT_SUCCESS(aws_xml_parse(allocator, &options)); const char expected_name1[] = "child1"; const char expected_value1[] = "TestBody"; const char expected_name2[] = "child2"; const char expected_value2[] = "TestBody2"; ASSERT_BIN_ARRAYS_EQUALS( expected_name1, sizeof(expected_name1) - 1, capture.node_name1.ptr, capture.node_name1.len); ASSERT_BIN_ARRAYS_EQUALS(expected_value1, sizeof(expected_value1) - 1, capture.capture1.ptr, capture.capture1.len); ASSERT_BIN_ARRAYS_EQUALS( expected_name2, sizeof(expected_name2) - 1, capture.node_name2.ptr, capture.node_name2.len); ASSERT_BIN_ARRAYS_EQUALS(expected_value2, sizeof(expected_value2) - 1, capture.capture2.ptr, capture.capture2.len); return AWS_OP_SUCCESS; } AWS_TEST_CASE(xml_parser_siblings_with_text, s_xml_parser_siblings_with_text_test) const char *preamble_and_attributes = "\n" " " "\n" "TestBodyTestBody2"; struct preamble_and_attributes_capture { struct aws_byte_cursor capture1; struct aws_byte_cursor capture2; struct aws_xml_attribute capture2_attr; struct aws_byte_cursor node_name1; struct aws_byte_cursor node_name2; struct aws_xml_attribute root_attr1; struct aws_xml_attribute root_attr2; }; int s_preamble_and_attributes_child_node(struct aws_xml_node *node, void *user_data) { struct preamble_and_attributes_capture *capture = user_data; struct aws_byte_cursor child1_name = aws_byte_cursor_from_c_str("child1"); struct aws_byte_cursor child2_name = aws_byte_cursor_from_c_str("child2"); struct aws_byte_cursor node_name = aws_xml_node_get_name(node); if (aws_byte_cursor_eq_ignore_case(&node_name, &child1_name)) { capture->node_name1 = node_name; if (aws_xml_node_as_body(node, &capture->capture1)) { return AWS_OP_ERR; } } else if (aws_byte_cursor_eq_ignore_case(&node_name, &child2_name)) { capture->node_name2 = node_name; if (aws_xml_node_as_body(node, &capture->capture2)) { return AWS_OP_ERR; } ASSERT_TRUE(aws_xml_node_get_num_attributes(node) == 1); capture->capture2_attr = aws_xml_node_get_attribute(node, 0); } return AWS_OP_SUCCESS; } int s_preamble_and_attributes(struct aws_xml_node *node, void *user_data) { struct preamble_and_attributes_capture *capture = user_data; ASSERT_TRUE(aws_xml_node_get_num_attributes(node) == 2); capture->root_attr1 = aws_xml_node_get_attribute(node, 0); capture->root_attr2 = aws_xml_node_get_attribute(node, 1); return aws_xml_node_traverse(node, s_preamble_and_attributes_child_node, user_data); } static int s_xml_parser_preamble_and_attributes_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct preamble_and_attributes_capture capture; AWS_ZERO_STRUCT(capture); struct aws_xml_parser_options options = { .doc = aws_byte_cursor_from_c_str(preamble_and_attributes), .on_root_encountered = s_preamble_and_attributes, .user_data = &capture, }; ASSERT_SUCCESS(aws_xml_parse(allocator, &options)); const char expected_attr1_name[] = "attribute1"; const char expected_attr1_value1[] = "abc"; ASSERT_BIN_ARRAYS_EQUALS( expected_attr1_name, sizeof(expected_attr1_name) - 1, capture.root_attr1.name.ptr, capture.root_attr1.name.len); ASSERT_BIN_ARRAYS_EQUALS( expected_attr1_value1, sizeof(expected_attr1_value1) - 1, capture.root_attr1.value.ptr, capture.root_attr1.value.len); const char expected_attr2_name[] = "attribute2"; const char expected_attr2_value1[] = "def"; ASSERT_BIN_ARRAYS_EQUALS( expected_attr2_name, sizeof(expected_attr2_name) - 1, capture.root_attr2.name.ptr, capture.root_attr2.name.len); ASSERT_BIN_ARRAYS_EQUALS( expected_attr2_value1, sizeof(expected_attr2_value1) - 1, capture.root_attr2.value.ptr, capture.root_attr2.value.len); const char expected_name1[] = "child1"; const char expected_value1[] = "TestBody"; const char expected_name2[] = "child2"; const char expected_value2[] = "TestBody2"; ASSERT_BIN_ARRAYS_EQUALS( expected_name1, sizeof(expected_name1) - 1, capture.node_name1.ptr, capture.node_name1.len); ASSERT_BIN_ARRAYS_EQUALS(expected_value1, sizeof(expected_value1) - 1, capture.capture1.ptr, capture.capture1.len); ASSERT_BIN_ARRAYS_EQUALS( expected_name2, sizeof(expected_name2) - 1, capture.node_name2.ptr, capture.node_name2.len); ASSERT_BIN_ARRAYS_EQUALS(expected_value2, sizeof(expected_value2) - 1, capture.capture2.ptr, capture.capture2.len); const char expected_attr3_name[] = "attribute3"; const char expected_attr3_value1[] = "childAttr"; ASSERT_BIN_ARRAYS_EQUALS( expected_attr3_name, sizeof(expected_attr2_name) - 1, capture.capture2_attr.name.ptr, capture.capture2_attr.name.len); ASSERT_BIN_ARRAYS_EQUALS( expected_attr3_value1, sizeof(expected_attr3_value1) - 1, capture.capture2_attr.value.ptr, capture.capture2_attr.value.len); return AWS_OP_SUCCESS; } AWS_TEST_CASE(xml_parser_preamble_and_attributes, s_xml_parser_preamble_and_attributes_test) const char *nested_nodes_same_name_doc = "\n" " " "\n" " \n" " \n" " TestBody\n" " \n" " \n" " \n" " TestBody2\n" " \n" ""; struct nested_node_capture { struct aws_byte_cursor node_body; }; int s_nested_node(struct aws_xml_node *node, void *user_data) { struct nested_node_capture *capture = user_data; return aws_xml_node_as_body(node, &capture->node_body); } static int s_xml_parser_nested_node_same_name_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct nested_node_capture capture; AWS_ZERO_STRUCT(capture); struct aws_xml_parser_options options = { .doc = aws_byte_cursor_from_c_str(nested_nodes_same_name_doc), .on_root_encountered = s_nested_node, .user_data = &capture, }; ASSERT_SUCCESS(aws_xml_parse(allocator, &options)); const char *expected_body = "\n \n" " \n" " TestBody\n" " \n" " \n" " \n" " TestBody2\n" " \n"; ASSERT_BIN_ARRAYS_EQUALS(expected_body, strlen(expected_body), capture.node_body.ptr, capture.node_body.len); return AWS_OP_SUCCESS; } AWS_TEST_CASE(xml_parser_nested_node_same_name_test, s_xml_parser_nested_node_same_name_test) const char *nested_nodes_deep_recursion_doc = "\n" " " "\n" " \n" " \n" " \n" ""; int s_nested_node_deep_recursion(struct aws_xml_node *node, void *user_data) { return aws_xml_node_traverse(node, s_nested_node_deep_recursion, user_data); } static int s_xml_parser_nested_node_deep_recursion_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_xml_parser_options options = { .doc = aws_byte_cursor_from_c_str(nested_nodes_deep_recursion_doc), .max_depth = 2, .on_root_encountered = s_nested_node_deep_recursion, .user_data = NULL, }; ASSERT_ERROR(AWS_ERROR_INVALID_XML, aws_xml_parse(allocator, &options)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(xml_parser_nested_node_deep_recursion_test, s_xml_parser_nested_node_deep_recursion_test) const char *too_many_attributes = "\n" " " "\n" ""; int s_too_many_attributes(struct aws_xml_node *node, void *user_data) { (void)node; (void)user_data; return AWS_OP_SUCCESS; } static int s_xml_parser_too_many_attributes_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_xml_parser_options options = { .doc = aws_byte_cursor_from_c_str(too_many_attributes), .on_root_encountered = s_too_many_attributes, .user_data = NULL, }; ASSERT_ERROR(AWS_ERROR_INVALID_XML, aws_xml_parse(allocator, &options)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(xml_parser_too_many_attributes_test, s_xml_parser_too_many_attributes_test) const char *node_name_too_long = "\n" " " "" ""; int s_too_long(struct aws_xml_node *node, void *user_data) { (void)node; (void)user_data; return AWS_OP_SUCCESS; } static int s_xml_parser_name_too_long_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_xml_parser_options options = { .doc = aws_byte_cursor_from_c_str(node_name_too_long), .on_root_encountered = s_too_long, .user_data = NULL, }; ASSERT_ERROR(AWS_ERROR_INVALID_XML, aws_xml_parse(allocator, &options)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(xml_parser_name_too_long_test, s_xml_parser_name_too_long_test) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/tests/zero_test.c000066400000000000000000000063421456575232400240510ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include AWS_TEST_CASE(test_secure_zero, s_test_secure_zero_fn) static int s_test_secure_zero_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; /* We can't actually test the secure part of the zero operation - anything * we do to observe the buffer will teach the compiler that it needs to * actually zero the buffer (or provide a convincing-enough simulation of * the same). So we'll just test that it behaves like memset. */ unsigned char buf[16]; for (size_t i = 0; i < sizeof(buf); i++) { volatile unsigned char *ptr = buf; ptr += i; *ptr = (unsigned char)0xDD; } aws_secure_zero(buf, sizeof(buf) / 2); for (size_t i = 0; i < sizeof(buf); i++) { if (i < sizeof(buf) / 2) { ASSERT_INT_EQUALS(0, buf[i]); } else { ASSERT_INT_EQUALS((unsigned char)0xDD, (unsigned char)buf[i]); } } /* check that it's safe to pass NULL */ aws_secure_zero(NULL, 0); return SUCCESS; } AWS_TEST_CASE(test_buffer_secure_zero, s_test_buffer_secure_zero_fn) static int s_test_buffer_secure_zero_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_buf buf; size_t len = 27; ASSERT_SUCCESS(aws_byte_buf_init(&buf, allocator, len)); buf.len = buf.capacity; for (size_t i = 0; i < len; ++i) { buf.buffer[i] = 0xDD; } aws_byte_buf_secure_zero(&buf); for (size_t i = 0; i < len; ++i) { ASSERT_INT_EQUALS(0, buf.buffer[i]); } ASSERT_INT_EQUALS(0, buf.len); aws_byte_buf_clean_up(&buf); return SUCCESS; } AWS_TEST_CASE(test_buffer_clean_up_secure, s_test_buffer_clean_up_secure_fn) static int s_test_buffer_clean_up_secure_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; /* We cannot test the zeroing of data here, because that would require reading * memory that has already been freed. Simply verifies that there is no memory leak. */ struct aws_byte_buf buf; ASSERT_SUCCESS(aws_byte_buf_init(&buf, allocator, 37)); aws_byte_buf_clean_up_secure(&buf); ASSERT_INT_EQUALS(buf.len, 0); ASSERT_INT_EQUALS(buf.capacity, 0); ASSERT_NULL(buf.buffer); ASSERT_NULL(buf.allocator); return SUCCESS; } AWS_TEST_CASE(is_zeroed, s_test_is_zeroed_fn) static int s_test_is_zeroed_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; /* Using a value that's 2X the largest amount we check in a single CPU instruction */ enum { max_size = 64 * 2 }; uint8_t buf[max_size]; for (size_t size = 1; size <= max_size; ++size) { /* Zero out buffer and check */ memset(buf, 0, size); ASSERT_TRUE(aws_is_mem_zeroed(buf, size)); /* Set 1 byte to be non-zero and check */ for (size_t non_zero_byte = 0; non_zero_byte < size; ++non_zero_byte) { buf[non_zero_byte] = 1; ASSERT_FALSE(aws_is_mem_zeroed(buf, size)); buf[non_zero_byte] = 0; } } return SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/000077500000000000000000000000001456575232400232025ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/000077500000000000000000000000001456575232400241065ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/.gitignore000066400000000000000000000005561456575232400261040ustar00rootroot00000000000000# Emitted when running CBMC proofs proofs/**/logs proofs/**/gotos proofs/**/report proofs/**/html proofs/output # Emitted by CBMC Viewer TAGS-* # Emitted by Arpa arpa_cmake/ arpa-validation-logs/ Makefile.arpa # Emitted by litani .ninja_deps .ninja_log .litani_cache_dir # These files should be overwritten whenever prepare.py runs cbmc-batch.yaml __pycache__/ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/README.md000066400000000000000000000024711456575232400253710ustar00rootroot00000000000000# CBMC Batch Running CBMC Batch jobs for a project. ## Expected Directory Structure project │ ... │ └───.cbmc-batch │ │ ... │ │ │ └───jobs │ └───job1 │ │ | Makefile │ │ | cbmc-batch.yaml │ └───job2 │ │ | Makefile │ │ | cbmc-batch.yaml │ ... It is expected that the repository contains a directory `.cbmc-batch`, which itself contains a directory `jobs`. Each directory in `.cbmc-batch/jobs` should correspond to a CBMC Batch job. Each job directory must contain a `Makefile` to be used by CBMC Batch to build the goto for CBMC and a `cbmc-batch.yaml` file to provide CBMC Batch options and provide an expected substring in the result of the CBMC run. ## Running Locally In order to start the CBMC Batch jobs and check results locally, you need to have installed CBMC Batch. You can start the CBMC Batch jobs locally by running bash cbmc-batch.sh --start You can then check CBMC Batch results locally by running bash cbmc-batch.sh --end This will run until all the jobs have finished and output results in `results.txt`. You can clean up the local CBMC Batch bookkeeping files by running bash cbmc-batch.sh --cleanup aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/include/000077500000000000000000000000001456575232400255315ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/include/README.md000066400000000000000000000003511456575232400270070ustar00rootroot00000000000000CBMC proof include files ======================== This directory contains include files written for CBMC proof. It is common to write some code to model aspects of the system under test, and the header files for this code go here. aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/include/aws/000077500000000000000000000000001456575232400263235ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/include/aws/common/000077500000000000000000000000001456575232400276135ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/include/aws/common/config.h000066400000000000000000000004351456575232400312330ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ // Disable all compiler, and go to bare C #undef AWS_CRYPTOSDK_P_USE_X86_64_ASM #undef AWS_CRYPTOSDK_P_SPECTRE_MITIGATIONS #undef AWS_CRYPTOSDK_P_HAVE_BUILTIN_EXPECT aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/include/proof_helpers/000077500000000000000000000000001456575232400304005ustar00rootroot00000000000000aws_byte_cursor_read_common.h000066400000000000000000000033161456575232400362520ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/include/proof_helpers/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_byte_cursor_read_common_harness() { /* parameters */ struct aws_byte_cursor cur; DEST_TYPE *dest = malloc(sizeof(*dest)); /* assumptions */ ensure_byte_cursor_has_allocated_buffer_member(&cur); __CPROVER_assume(aws_byte_cursor_is_valid(&cur)); __CPROVER_assume(cur.len >= BYTE_WIDTH); __CPROVER_assume(AWS_MEM_IS_READABLE(cur.ptr, BYTE_WIDTH)); __CPROVER_assume(dest != NULL); /* save current state of the data structure */ struct aws_byte_cursor old_cur = cur; struct store_byte_from_buffer old_byte_from_cur; save_byte_from_array(cur.ptr, cur.len, &old_byte_from_cur); DEST_TYPE dest_copy; memcpy(&dest_copy, old_cur.ptr, BYTE_WIDTH); dest_copy = AWS_NTOH(dest_copy); /* operation under verification */ if (BYTE_CURSOR_READ(&cur, dest)) { assert_bytes_match((uint8_t *)&dest_copy, (uint8_t *)dest, BYTE_WIDTH); /* the following assertions are included, because aws_byte_cursor_read internally uses * aws_byte_cursor_advance_nospec and it copies the bytes from the advanced cursor to *dest */ assert(BYTE_WIDTH <= old_cur.len && old_cur.len <= (SIZE_MAX >> 1)); assert(cur.ptr == old_cur.ptr + BYTE_WIDTH); assert(cur.len == old_cur.len - BYTE_WIDTH); } else { assert(cur.len == old_cur.len); if (cur.len != 0) { assert_byte_from_buffer_matches(cur.ptr, &old_byte_from_cur); } } /* assertions */ assert(aws_byte_cursor_is_valid(&cur)); } make_common_data_structures.h000066400000000000000000000122751456575232400362620ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/include/proof_helpers/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #pragma once #include #include #include #include #include #include #include #include #include #include #include /* Assume valid memory for ptr, if count > 0 and count < MAX_MALLOC. */ #define ASSUME_VALID_MEMORY_COUNT(ptr, count) \ do { \ ptr = malloc(sizeof(*(ptr)) * (count)); \ __CPROVER_assume(ptr != NULL); \ } while (0) #define ASSUME_VALID_MEMORY(ptr) ASSUME_VALID_MEMORY_COUNT(ptr, sizeof(*(ptr))) #define ASSUME_DEFAULT_ALLOCATOR(allocator) allocator = aws_default_allocator() /* * Checks whether aws_byte_buf is bounded by max_size */ bool aws_byte_buf_is_bounded(const struct aws_byte_buf *const buf, const size_t max_size); /* * Checks whether aws_byte_buf has the correct allocator */ bool aws_byte_buf_has_allocator(const struct aws_byte_buf *const buf); /* * Ensures aws_byte_buf has a proper allocated buffer member */ void ensure_byte_buf_has_allocated_buffer_member(struct aws_byte_buf *const buf); /* * Ensures aws_ring_buffer has proper allocated members */ void ensure_ring_buffer_has_allocated_members(struct aws_ring_buffer *ring_buf, const size_t size); /* * Checks whether aws_byte_cursor is bounded by max_size */ bool aws_byte_cursor_is_bounded(const struct aws_byte_cursor *const cursor, const size_t max_size); /* * Ensure a byte_buf is allocated within a ring_buf (a relational invariant) */ void ensure_byte_buf_has_allocated_buffer_member_in_ring_buf( struct aws_byte_buf *buf, struct aws_ring_buffer *ring_buf); /* * Ensures aws_byte_cursor has a proper allocated buffer member */ void ensure_byte_cursor_has_allocated_buffer_member(struct aws_byte_cursor *const cursor); /* * Checks whether aws_array_list is bounded by max_initial_item_allocation and max_item_size */ bool aws_array_list_is_bounded( const struct aws_array_list *const list, const size_t max_initial_item_allocation, const size_t max_item_size); /** * Ensures the data member of an aws_array_list structure is correctly allocated */ void ensure_array_list_has_allocated_data_member(struct aws_array_list *const list); /** * Ensures that the aws_linked_list [list] is correctly allocated */ void ensure_linked_list_is_allocated(struct aws_linked_list *list, size_t max_length); /* * Checks whether aws_priority_queue is bounded by max_initial_item_allocation and max_item_size */ bool aws_priority_queue_is_bounded( const struct aws_priority_queue *const queue, const size_t max_initial_item_allocation, const size_t max_item_size); /** * Ensures members of an aws_priority_queue structure are correctly * allocated. */ void ensure_priority_queue_has_allocated_members(struct aws_priority_queue *const queue); /* * Ensures aws_hash_table has a proper allocated p_impl member */ void ensure_allocated_hash_table(struct aws_hash_table *map, size_t max_table_entries); /* * Ensures aws_hash_table has destroy function pointers that are enther null or valid */ void ensure_hash_table_has_valid_destroy_functions(struct aws_hash_table *map); /** * A correct hash table has max_load < size. This means that there is always one slot empty. * These functions are useful for assuming that there is some (nondet) slot which is empty * which is necessary to prove termination for hash-table deletion code. Should only be used inside * an assume because of the way it does nondet. */ bool aws_hash_table_has_an_empty_slot(const struct aws_hash_table *const map, size_t *const rval); bool hash_table_state_has_an_empty_slot(const struct hash_table_state *const state, size_t *const rval); /** * A correct implementation of the hash_destroy function should never have a memory * error on valid input. There is the question of whether the destroy functions themselves * are correctly called (i.e. only on valid input, no double free, etc.). Testing this would * require a stronger function here. */ void hash_proof_destroy_noop(void *p); /** * Ensures a valid string is allocated, with as much nondet as possible */ struct aws_string *ensure_string_is_allocated_nondet_length(); /** * Ensures a valid string is allocated, with as much nondet as possible, but len < max */ struct aws_string *nondet_allocate_string_bounded_length(size_t max_size); /** * Ensures a valid string is allocated, with as much nondet as possible, but fixed defined len */ struct aws_string *ensure_string_is_allocated(size_t size); /** * Ensures a valid const string is allocated, with as much nondet as possible, len < max_size */ const char *ensure_c_str_is_allocated(size_t max_size); aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/include/proof_helpers/nondet.h000066400000000000000000000006771456575232400320520ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #pragma once #include #include #include /** * Non-determinstic functions used in CBMC proofs */ bool nondet_bool(); int nondet_int(); size_t nondet_size_t(); uint16_t nondet_uint16_t(); uint32_t nondet_uint32_t(); uint64_t nondet_uint64_t(); uint8_t nondet_uint8_t(); void *nondet_voidp(); ring_buffer_abstract_states.h000066400000000000000000000023471456575232400362360ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/include/proof_helpers/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #pragma once /** * Ring buffer is empty */ bool is_empty_state(struct aws_ring_buffer *ring_buf) { uint8_t *head = aws_atomic_load_ptr(&ring_buf->head); uint8_t *tail = aws_atomic_load_ptr(&ring_buf->tail); return tail == head; } /** * Ring buffer is valid [allocation==tail...head) */ bool is_front_valid_state(struct aws_ring_buffer *ring_buf) { uint8_t *head = aws_atomic_load_ptr(&ring_buf->head); uint8_t *tail = aws_atomic_load_ptr(&ring_buf->tail); return ring_buf->allocation == tail && tail < head; } /** * Ring buffer is valid [allocationhead); uint8_t *tail = aws_atomic_load_ptr(&ring_buf->tail); return ring_buf->allocation < tail && tail < head; } /** * Ring buffer is valid [allocation...head) and [tail...allocation_end) */ bool is_ends_valid_state(struct aws_ring_buffer *ring_buf) { uint8_t *head = aws_atomic_load_ptr(&ring_buf->head); uint8_t *tail = aws_atomic_load_ptr(&ring_buf->tail); return tail > head; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/include/proof_helpers/utils.h000066400000000000000000000114321456575232400317120ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #pragma once #include #include #include #include #include /** * CBMC has an internal representation in which each object has an index and a (signed) offset * A buffer cannot be larger than the max size of the offset * The Makefile is expected to set CBMC_OBJECT_BITS to the value of --object-bits */ #define MAX_MALLOC (SIZE_MAX >> (CBMC_OBJECT_BITS + 1)) // The magical name __CPROVER_uninterpreted_* causes CBMC to give us an // uninterpreted function uint64_t __CPROVER_uninterpreted_hasher(void *); #define IMPLIES(a, b) (!(a) || (b)) struct store_byte_from_buffer { size_t index; uint8_t byte; }; /** * Asserts whether all bytes from two arrays of same length match. */ void assert_bytes_match(const uint8_t *const a, const uint8_t *const b, const size_t len); /** * Asserts whether all bytes from an array are equal to c. */ void assert_all_bytes_are(const uint8_t *const a, const uint8_t c, const size_t len); /** * Asserts whether all bytes from an array are equal to 0. */ void assert_all_zeroes(const uint8_t *const a, const size_t len); /** * Asserts whether the byte in storage correspond to the byte in the same position in buffer. */ void assert_byte_from_buffer_matches(const uint8_t *const buffer, const struct store_byte_from_buffer *const b); /** * Nondeterministically selects a byte from array and stores it into a store_array_list_byte * structure. Afterwards, one can prove using the assert_array_list_equivalence function * whether no byte in the array has changed. */ void save_byte_from_array(const uint8_t *const array, const size_t size, struct store_byte_from_buffer *const storage); /** * Asserts two aws_array_list structures are equivalent. Prior to using this function, * it is necessary to select a non-deterministic byte from the rhs aws_array_list structure * (use save_byte_from_array function), so it can properly assert all bytes match. */ void assert_array_list_equivalence( const struct aws_array_list *const lhs, const struct aws_array_list *const rhs, const struct store_byte_from_buffer *const rhs_byte); /** * Asserts two aws_byte_buf structures are equivalent. In order to be considered equivalent, * all member from both structures must match (i.e., len, *buffer, capacity, and *allocator), * including all bytes from its underlying buffers. Prior to using this function, * it is necessary to select a non-deterministic byte from the rhs aws_byte_buf structure * (use save_byte_from_array function), so it can properly assert all bytes match. */ void assert_byte_cursor_equivalence( const struct aws_byte_cursor *const lhs, const struct aws_byte_cursor *const rhs, const struct store_byte_from_buffer *const rhs_byte); /** * Asserts two aws_byte_cursor structures are equivalent. Prior to using this function, * it is necessary to select a non-deterministic byte from the rhs aws_byte_cursor structure * (use save_byte_from_array function), so it can properly assert all bytes match. */ void assert_byte_buf_equivalence( const struct aws_byte_buf *const lhs, const struct aws_byte_buf *const rhs, const struct store_byte_from_buffer *const rhs_byte); /** * Nondeterministically selects a byte from a hash_table implementation and stores it into a * store_array_list_byte structure. */ void save_byte_from_hash_table(const struct aws_hash_table *map, struct store_byte_from_buffer *storage); /** * Checks that a no bytes in the hash_table have changed from when "storage" was stored. */ void check_hash_table_unchanged(const struct aws_hash_table *map, const struct store_byte_from_buffer *storage); /** * Standard stub function to compare two items. */ int nondet_compare(const void *const a, const void *const b); /** * Standard stub function to compare two items. */ int uninterpreted_compare(const void *const a, const void *const b); /** * Standard stub function to compare two items. */ bool nondet_equals(const void *const a, const void *const b); /** * Standard stub function to compare two items. * Also enforces uninterpreted_hasher() to be equal for equal values. */ bool uninterpreted_equals(const void *const a, const void *const b); /** * uninterpreted_equals(), but with an extra assertion that a and b are both not null */ bool uninterpreted_equals_assert_inputs_nonnull(const void *const a, const void *const b); /** * Standard stub function to hash one item. */ uint64_t nondet_hasher(const void *a); /** * Standard stub function to hash one item. */ uint64_t uninterpreted_hasher(const void *a); /** * Standard stub function of a predicate */ bool uninterpreted_predicate_fn(uint8_t value); aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/000077500000000000000000000000001456575232400254165ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/Makefile-project-defines000066400000000000000000000030471456575232400321410ustar00rootroot00000000000000# -*- mode: makefile -*- # The first line sets the emacs major mode to Makefile # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 ################################################################ # Use this file to give project-specific definitions of the command # line arguments to pass to CBMC tools like goto-cc to build the goto # binaries and cbmc to do the property and coverage checking. # # Use this file to override most default definitions of variables in # Makefile.common. ################################################################ # Flags to pass to goto-cc for compilation (typically those passed to gcc -c) # COMPILE_FLAGS = # Flags to pass to goto-cc for linking (typically those passed to gcc) # LINK_FLAGS = # Preprocessor include paths -I... # Consider adding # INCLUDES += -I$(CBMC_ROOT)/include # You will want to decide what order that comes in relative to the other # include directories in your project. # INCLUDES += -I$(CBMC_ROOT)/include INCLUDES += -I$(SRCDIR)/include # Preprocessor definitions -D... # Enables costly checks (e.g. ones that contain loops). # Don't execute deep checks by default. AWS_DEEP_CHECKS ?= 0 DEFINES += -DAWS_DEEP_CHECKS=$(AWS_DEEP_CHECKS) # Extra CBMC flags not enabled by Makefile.common # CHECKFLAGS += --enum-range-check CHECKFLAGS += --pointer-primitive-check # We override abort() to be assert(0), as it is not caught by # CBMC as a violation PROOF_SOURCES += $(PROOF_STUB)/abort_override_assert_false.c REMOVE_FUNCTION_BODY += abort aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/Makefile-project-targets000066400000000000000000000006741456575232400322000ustar00rootroot00000000000000# -*- mode: makefile -*- # The first line sets the emacs major mode to Makefile # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 ################################################################ # Use this file to give project-specific targets, including targets # that may depend on targets defined in Makefile.common. ################################################################ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/Makefile-project-testing000066400000000000000000000007471456575232400322050ustar00rootroot00000000000000# -*- mode: makefile -*- # The first line sets the emacs major mode to Makefile # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 ################################################################ # Use this file to define project-specific targets and definitions for # unit testing or continuous integration that may depend on targets # defined in Makefile.common ################################################################ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/Makefile-template-defines000066400000000000000000000006741456575232400323110ustar00rootroot00000000000000 # Absolute path to the root of the source tree. # SRCDIR ?= $(abspath $(PROOF_ROOT)/../../..) # Absolute path to the litani script. # LITANI ?= litani # Name of this proof project, displayed in proof reports. For example, # "s2n" or "Amazon FreeRTOS". For projects with multiple proof roots, # this may be overridden on the command-line to Make, for example # # make PROJECT_NAME="FreeRTOS MQTT" report # PROJECT_NAME = "AWS C Common" aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/Makefile.aws_array_list000066400000000000000000000007561456575232400321100ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########## # Sufficiently long to get full coverage on the aws_array_list APIs # short enough that all proofs complete quickly MAX_ITEM_SIZE ?= 2 # Necessary to get full coverage when using functions from math.h MAX_INITIAL_ITEM_ALLOCATION ?= 9223372036854775808ULL DEFINES += -DMAX_ITEM_SIZE=$(MAX_ITEM_SIZE) DEFINES += -DMAX_INITIAL_ITEM_ALLOCATION=$(MAX_INITIAL_ITEM_ALLOCATION) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/Makefile.aws_byte_buf000066400000000000000000000005231456575232400315260ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########## # Sufficently long to get full coverage on the aws_byte_buf and aws_byte_cursor APIs # short enough that all proofs complete in less than a minute MAX_BUFFER_SIZE ?= 10 DEFINES += -DMAX_BUFFER_SIZE=$(MAX_BUFFER_SIZE) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/Makefile.aws_hash_table000066400000000000000000000005071456575232400320230ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########## MAX_TABLE_SIZE ?= 4 DEFINES += -DMAX_TABLE_SIZE=$(MAX_TABLE_SIZE) #A table has 10 words for the struct, plus 3 words for each entry TABLE_SIZE_IN_WORDS=$(shell echo $$(($$((3 * $(MAX_TABLE_SIZE))) + 10))) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/Makefile.aws_linked_list000066400000000000000000000004021456575232400322240ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########## # Arbitrary limit MAX_LINKED_LIST_ITEM_ALLOCATION ?= 20 DEFINES += -DMAX_LINKED_LIST_ITEM_ALLOCATION=$(MAX_LINKED_LIST_ITEM_ALLOCATION) Makefile.aws_priority_queue_sift000066400000000000000000000005021456575232400337570ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ################################# MAX_PRIORITY_QUEUE_ITEMS ?= 5 # This should be the ceil(1 + log2(MAX_PRIORITY_QUEUE_ITEMS)) MAX_HEAP_HEIGHT ?= 3 DEFINES += -DMAX_PRIORITY_QUEUE_ITEMS=$(MAX_PRIORITY_QUEUE_ITEMS) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/Makefile.aws_string000066400000000000000000000004471456575232400312420ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########## # Sufficently long to get 100% coverage on the string APIs # short enough that all proofs complete quickly MAX_STRING_LEN ?= 16 DEFINES += -DMAX_STRING_LEN=$(MAX_STRING_LEN) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/Makefile.cbmc_batch000066400000000000000000000040741456575232400311270ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. .PHONY: batch-yaml ci-yaml ################################################################ # Launching cbmc on cbmc-batch BATCH ?= cbmc-batch BATCHFLAGS ?= \ --batchpkg $(BATCHPKG) \ --blddir $(SRCDIR) \ --bucket $(BUCKET) \ --cbmcflags $(call encode_options,$(CBMCFLAGS)) \ --cbmcpkg $(CBMCPKG) \ --coverage-memory $(COVMEM) \ --goto $(ENTRY).goto \ --jobprefix $(ENTRY) \ --no-build \ --no-copysrc \ --property-memory $(PROPMEM) \ --srcdir $(SRCDIR) \ --viewerpkg $(VIEWERPKG) \ --wsdir $(WS) BATCHPKG ?= cbmc-batch.tar.gz BUCKET ?= cbmc CBMCPKG ?= cbmc.tar.gz COVMEM ?= 64000 define encode_options '=$(shell echo $(1) | sed 's/ ,/ /g' | sed 's/ /;/g')=' endef PROPMEM ?= 64000 VIEWERPKG ?= cbmc-viewer.tar.gz WS ?= ws define yaml_encode_options "$(shell echo $(1) | sed 's/ ,/ /g' | sed 's/ /;/g')" endef $(ENTRY).yaml: $(ENTRY).goto Makefile echo 'batchpkg: $(BATCHPKG)' > $@ echo 'build: true' >> $@ echo 'cbmcflags: $(call yaml_encode_options,$(CBMCFLAGS))' >> $@ echo 'cbmcpkg: $(CBMCPKG)' >> $@ echo 'coverage_memory: $(COVMEM)' >> $@ echo 'expected: "SUCCESSFUL"' >> $@ echo 'goto: $(ENTRY).goto' >> $@ echo 'jobos: ubuntu16' >> $@ echo 'property_memory: $(PROPMEM)' >> $@ echo 'viewerpkg: $(VIEWERPKG)' >> $@ batch-yaml: $(ENTRY).yaml cbmc-batch.yaml: $(ENTRY).goto Makefile echo 'cbmcflags: $(strip $(call yaml_encode_options,$(CBMCFLAGS)))' > $@ echo 'expected: "SUCCESSFUL"' >> $@ echo 'goto: $(ENTRY).goto' >> $@ echo 'jobos: ubuntu16' >> $@ ci-yaml: cbmc-batch.yaml launch: $(ENTRY).goto Makefile mkdir -p $(WS) cp $(ENTRY).goto $(WS) $(BATCH) $(BATCHFLAGS) launch-clean: for d in $(ENTRY)*; do \ if [ -d $$d ]; then \ for f in $$d.json $$d.yaml Makefile-$$d; do \ if [ -f $$f ]; then mv $$f $$d; fi \ done\ fi \ done $(RM) Makefile-$(ENTRY)-[0-7]*-[0-7]* $(RM) $(ENTRY)-[0-7]*-[0-7]*.json $(ENTRY)-[0-7]*-[0-7]*.yaml $(RM) -r $(WS) launch-veryclean: launch-clean $(RM) -r $(ENTRY)-[0-7]*-[0-7]* aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/Makefile.common000066400000000000000000001054251456575232400303540ustar00rootroot00000000000000# -*- mode: makefile -*- # The first line sets the emacs major mode to Makefile # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: MIT-0 CBMC_STARTER_KIT_VERSION = CBMC starter kit 2.5 ################################################################ # The CBMC Starter Kit depends on the files Makefile.common and # run-cbmc-proofs.py. They are installed by the setup script # cbmc-starter-kit-setup and updated to the latest version by the # update script cbmc-starter-kit-update. For more information about # the starter kit and these files and these scripts, see # https://model-checking.github.io/cbmc-starter-kit # # Makefile.common implements what we consider to be some best # practices for using cbmc for software verification. # # Section I gives default values for a large number of Makefile # variables that control # * how your code is built (include paths, etc), # * what program transformations are applied to your code (loop # unwinding, etc), and # * what properties cbmc checks for in your code (memory safety, etc). # # These variables are defined below with definitions of the form # VARIABLE ?= DEFAULT_VALUE # meaning VARIABLE is set to DEFAULT_VALUE if VARIABLE has not already # been given a value. # # For your project, you can override these default values with # project-specific definitions in Makefile-project-defines. # # For any individual proof, you can override these default values and # project-specific values with proof-specific definitions in the # Makefile for your proof. # # The definitions in the proof Makefile override definitions in the # project Makefile-project-defines which override definitions in this # Makefile.common. # # Section II uses the values defined in Section I to build your code, run # your proof, and build a report of your results. You should not need # to modify or override anything in Section II, but you may want to # read it to understand how the values defined in Section I control # things. # # To use Makefile.common, set variables as described above as needed, # and then for each proof, # # * Create a subdirectory . # * Write a proof harness (a function) with the name # in a file with the name /.c # * Write a makefile with the name /Makefile that looks # something like # # HARNESS_FILE= # HARNESS_ENTRY= # PROOF_UID= # # PROJECT_SOURCES += $(SRCDIR)/libraries/api_1.c # PROJECT_SOURCES += $(SRCDIR)/libraries/api_2.c # # PROOF_SOURCES += $(PROOFDIR)/harness.c # PROOF_SOURCES += $(SRCDIR)/cbmc/proofs/stub_a.c # PROOF_SOURCES += $(SRCDIR)/cbmc/proofs/stub_b.c # # UNWINDSET += foo.0:3 # UNWINDSET += bar.1:6 # # REMOVE_FUNCTION_BODY += api_stub_a # REMOVE_FUNCTION_BODY += api_stub_b # # DEFINES = -DDEBUG=0 # # include ../Makefile.common # # * Change directory to and run make # # The proof setup script cbmc-starter-kit-setup-proof from the CBMC # Starter Kit will do most of this for, creating a directory and # writing a basic Makefile and proof harness into it that you can edit # as described above. # # Warning: If you get results that are hard to explain, consider # running "make clean" or "make veryclean" before "make" if you get # results that are hard to explain. Dependency handling in this # Makefile.common may not be perfect. SHELL=/bin/bash default: report ################################################################ ################################################################ ## Section I: This section gives common variable definitions. ## ## Override these definitions in Makefile-project-defines or ## your proof Makefile. ## ## Remember that Makefile.common and Makefile-project-defines are ## included into the proof Makefile in your proof directory, so all ## relative pathnames defined there should be relative to your proof ## directory. ################################################################ # Define the layout of the source tree and the proof subtree # # Generally speaking, # # SRCDIR = the root of the repository # CBMC_ROOT = /srcdir/cbmc # PROOF_ROOT = /srcdir/cbmc/proofs # PROOF_SOURCE = /srcdir/cbmc/sources # PROOF_INCLUDE = /srcdir/cbmc/include # PROOF_STUB = /srcdir/cbmc/stubs # PROOFDIR = the directory containing the Makefile for your proof # # The path /srcdir/cbmc used in the example above is determined by the # setup script cbmc-starter-kit-setup. Projects usually create a cbmc # directory somewhere in the source tree, and run the setup script in # that directory. The value of CBMC_ROOT becomes the absolute path to # that directory. # # The location of that cbmc directory in the source tree affects the # definition of SRCDIR, which is defined in terms of the relative path # from a proof directory to the repository root. The definition is # usually determined by the setup script cbmc-starter-kit-setup and # written to Makefile-template-defines, but you can override it for a # project in Makefile-project-defines and for a specific proof in the # Makefile for the proof. # Absolute path to the directory containing this Makefile.common # See https://ftp.gnu.org/old-gnu/Manuals/make-3.80/html_node/make_17.html # # Note: We compute the absolute paths to the makefiles in MAKEFILE_LIST # before we filter the list of makefiles for %/Makefile.common. # Otherwise an invocation of the form "make -f Makefile.common" will set # MAKEFILE_LIST to "Makefile.common" which will fail to match the # pattern %/Makefile.common. # MAKEFILE_PATHS = $(foreach makefile,$(MAKEFILE_LIST),$(abspath $(makefile))) PROOF_ROOT = $(dir $(filter %/Makefile.common,$(MAKEFILE_PATHS))) CBMC_ROOT = $(shell dirname $(PROOF_ROOT)) PROOF_SOURCE = $(CBMC_ROOT)/sources PROOF_INCLUDE = $(CBMC_ROOT)/include PROOF_STUB = $(CBMC_ROOT)/stubs # Project-specific definitions to override default definitions below # * Makefile-project-defines will never be overwritten # * Makefile-template-defines may be overwritten when the starter # kit is updated sinclude $(PROOF_ROOT)/Makefile-project-defines sinclude $(PROOF_ROOT)/Makefile-template-defines # SRCDIR is the path to the root of the source tree # This is a default definition that is frequently overridden in # another Makefile, see the discussion of SRCDIR above. SRCDIR ?= $(abspath ../..) # PROOFDIR is the path to the directory containing the proof harness PROOFDIR ?= $(abspath .) ################################################################ # Define how to run CBMC # Do property checking with the external SAT solver given by # EXTERNAL_SAT_SOLVER. Do coverage checking with the default solver, # since coverage checking requires the use of an incremental solver. # The EXTERNAL_SAT_SOLVER variable is typically set (if it is at all) # as an environment variable or as a makefile variable in # Makefile-project-defines. # # For a particular proof, if the default solver is faster, do property # checking with the default solver by including this definition in the # proof Makefile: # USE_EXTERNAL_SAT_SOLVER = # ifneq ($(strip $(EXTERNAL_SAT_SOLVER)),) USE_EXTERNAL_SAT_SOLVER ?= --external-sat-solver $(EXTERNAL_SAT_SOLVER) endif CHECKFLAGS += $(USE_EXTERNAL_SAT_SOLVER) # Job pools # For version of Litani that are new enough (where `litani print-capabilities` # prints "pools"), proofs for which `EXPENSIVE = true` is set can be added to a # "job pool" that restricts how many expensive proofs are run at a time. All # other proofs will be built in parallel as usual. # # In more detail: all compilation, instrumentation, and report jobs are run with # full parallelism as usual, even for expensive proofs. The CBMC jobs for # non-expensive proofs are also run in parallel. The only difference is that the # CBMC safety checks and coverage checks for expensive proofs are run with a # restricted parallelism level. At any one time, only N of these jobs are run at # once, amongst all the proofs. # # To configure N, Litani needs to be initialized with a pool called "expensive". # For example, to only run two CBMC safety/coverage jobs at a time from amongst # all the proofs, you would initialize litani like # litani init --pools expensive:2 # The run-cbmc-proofs.py script takes care of this initialization through the # --expensive-jobs-parallelism flag. # # To enable this feature, set # the ENABLE_POOLS variable when running Make, like # `make ENABLE_POOLS=true report` # The run-cbmc-proofs.py script takes care of this through the # --restrict-expensive-jobs flag. ifeq ($(strip $(ENABLE_POOLS)),) POOL = else ifeq ($(strip $(EXPENSIVE)),) POOL = else POOL = --pool expensive endif # Similar to the pool feature above. If Litani is new enough, enable # profiling CBMC's memory use. ifeq ($(strip $(ENABLE_MEMORY_PROFILING)),) MEMORY_PROFILING = else MEMORY_PROFILING = --profile-memory endif # Property checking flags # # Each variable below controls a specific property checking flag # within CBMC. If desired, a property flag can be disabled within # a particular proof by nulling the corresponding variable. For # instance, the following line: # # CHECK_FLAG_POINTER_CHECK = # # would disable the --pointer-check CBMC flag within: # * an entire project when added to Makefile-project-defines # * a specific proof when added to the harness Makefile CBMC_FLAG_MALLOC_MAY_FAIL ?= --malloc-may-fail CBMC_FLAG_MALLOC_FAIL_NULL ?= --malloc-fail-null CBMC_FLAG_BOUNDS_CHECK ?= --bounds-check CBMC_FLAG_CONVERSION_CHECK ?= --conversion-check CBMC_FLAG_DIV_BY_ZERO_CHECK ?= --div-by-zero-check CBMC_FLAG_FLOAT_OVERFLOW_CHECK ?= --float-overflow-check CBMC_FLAG_NAN_CHECK ?= --nan-check CBMC_FLAG_POINTER_CHECK ?= --pointer-check CBMC_FLAG_POINTER_OVERFLOW_CHECK ?= --pointer-overflow-check CBMC_FLAG_POINTER_PRIMITIVE_CHECK ?= --pointer-primitive-check CBMC_FLAG_SIGNED_OVERFLOW_CHECK ?= --signed-overflow-check CBMC_FLAG_UNDEFINED_SHIFT_CHECK ?= --undefined-shift-check CBMC_FLAG_UNSIGNED_OVERFLOW_CHECK ?= --unsigned-overflow-check CBMC_FLAG_UNWINDING_ASSERTIONS ?= --unwinding-assertions CBMC_FLAG_UNWIND ?= --unwind 1 CBMC_FLAG_FLUSH ?= --flush # CBMC flags used for property checking and coverage checking CBMCFLAGS += $(CBMC_FLAG_UNWIND) $(CBMC_UNWINDSET) $(CBMC_FLAG_FLUSH) # CBMC flags used for property checking CHECKFLAGS += $(CBMC_FLAG_MALLOC_MAY_FAIL) CHECKFLAGS += $(CBMC_FLAG_MALLOC_FAIL_NULL) CHECKFLAGS += $(CBMC_FLAG_BOUNDS_CHECK) CHECKFLAGS += $(CBMC_FLAG_CONVERSION_CHECK) CHECKFLAGS += $(CBMC_FLAG_DIV_BY_ZERO_CHECK) CHECKFLAGS += $(CBMC_FLAG_FLOAT_OVERFLOW_CHECK) CHECKFLAGS += $(CBMC_FLAG_NAN_CHECK) CHECKFLAGS += $(CBMC_FLAG_POINTER_CHECK) CHECKFLAGS += $(CBMC_FLAG_POINTER_OVERFLOW_CHECK) CHECKFLAGS += $(CBMC_FLAG_POINTER_PRIMITIVE_CHECK) CHECKFLAGS += $(CBMC_FLAG_SIGNED_OVERFLOW_CHECK) CHECKFLAGS += $(CBMC_FLAG_UNDEFINED_SHIFT_CHECK) CHECKFLAGS += $(CBMC_FLAG_UNSIGNED_OVERFLOW_CHECK) CHECKFLAGS += $(CBMC_FLAG_UNWINDING_ASSERTIONS) # CBMC flags used for coverage checking COVERFLAGS += $(CBMC_FLAG_MALLOC_MAY_FAIL) COVERFLAGS += $(CBMC_FLAG_MALLOC_FAIL_NULL) # Additional CBMC flag to CBMC control verbosity. # # Meaningful values are # 0 none # 1 only errors # 2 + warnings # 4 + results # 6 + status/phase information # 8 + statistical information # 9 + progress information # 10 + debug info # # Uncomment the following line or set in Makefile-project-defines # CBMC_VERBOSITY ?= --verbosity 4 # Additional CBMC flag to control how CBMC treats static variables. # # NONDET_STATIC is a list of flags of the form --nondet-static # and --nondet-static-exclude VAR. The --nondet-static flag causes # CBMC to initialize static variables with unconstrained value # (ignoring initializers and default zero-initialization). The # --nondet-static-exclude VAR excludes VAR for the variables # initialized with unconstrained values. NONDET_STATIC ?= # Flags to pass to goto-cc for compilation and linking COMPILE_FLAGS ?= -Wall LINK_FLAGS ?= -Wall EXPORT_FILE_LOCAL_SYMBOLS ?= --export-file-local-symbols # Preprocessor include paths -I... INCLUDES ?= # Preprocessor definitions -D... DEFINES ?= # CBMC object model # # CBMC_OBJECT_BITS is the number of bits in a pointer CBMC uses for # the id of the object to which a pointer is pointing. CBMC uses 8 # bits for the object id by default. The remaining bits in the pointer # are used for offset into the object. This limits the size of the # objects that CBMC can model. This Makefile defines this bound on # object size to be CBMC_MAX_OBJECT_SIZE. You are likely to get # unexpected results if you try to malloc an object larger than this # bound. CBMC_OBJECT_BITS ?= 8 # CBMC loop unwinding (Normally set in the proof Makefile) # # UNWINDSET is a list of pairs of the form foo.1:4 meaning that # CBMC should unwind loop 1 in function foo no more than 4 times. # For historical reasons, the number 4 is one more than the number # of times CBMC actually unwinds the loop. UNWINDSET ?= # CBMC early loop unwinding (Normally set in the proof Makefile) # # Most users can ignore this variable. # # This variable exists to support the use of loop and function # contracts, two features under development for CBMC. Checking the # assigns clause for function contracts and loop invariants currently # assumes loop-free bodies for loops and functions with contracts # (possibly after replacing nested loops with their own loop # contracts). To satisfy this requirement, it may be necessary to # unwind some loops before the function contract and loop invariant # transformations are applied to the goto program. This variable # EARLY_UNWINDSET is identical to UNWINDSET, and we assume that the # loops mentioned in EARLY_UNWINDSET and UNWINDSET are disjoint. EARLY_UNWINDSET ?= # CBMC function removal (Normally set set in the proof Makefile) # # REMOVE_FUNCTION_BODY is a list of function names. CBMC will "undefine" # the function, and CBMC will treat the function as having no side effects # and returning an unconstrained value of the appropriate return type. # The list should include the names of functions being stubbed out. REMOVE_FUNCTION_BODY ?= # CBMC function pointer restriction (Normally set in the proof Makefile) # # RESTRICT_FUNCTION_POINTER is a list of function pointer restriction # instructions of the form: # # .function_pointer_call./[,]* # # The function pointer call number in the specified function gets # rewritten to a case switch over a finite list of functions. # If some possible target functions are omitted from the list a counter # example trace will be found by CBMC, i.e. the transformation is sound. # If the target functions are file-local symbols, then mangled names must # be used. RESTRICT_FUNCTION_POINTER ?= # The project source files (Normally set set in the proof Makefile) # # PROJECT_SOURCES is the list of project source files to compile, # including the source file defining the function under test. PROJECT_SOURCES ?= # The proof source files (Normally set in the proof Makefile) # # PROOF_SOURCES is the list of proof source files to compile, including # the proof harness, and including any function stubs being used. PROOF_SOURCES ?= # The number of seconds that CBMC should be allowed to run for before # being forcefully terminated. Currently, this is set to be less than # the time limit for a CodeBuild job, which is eight hours. If a proof # run takes longer than the time limit of the CI environment, the # environment will halt the proof run without updating the Litani # report, making the proof run appear to "hang". CBMC_TIMEOUT ?= 21600 # Proof writers could add function contracts in their source code. # These contracts are ignored by default, but may be enabled in two distinct # contexts using the following two variables: # 1. To check whether one or more function contracts are sound with respect to # the function implementation, CHECK_FUNCTION_CONTRACTS should be a list of # function names. # 2. To replace calls to certain functions with their correspondent function # contracts, USE_FUNCTION_CONTRACTS should be a list of function names. # One must check separately whether a function contract is sound before # replacing it in calling contexts. CHECK_FUNCTION_CONTRACTS ?= CBMC_CHECK_FUNCTION_CONTRACTS := $(patsubst %,--enforce-contract %, $(CHECK_FUNCTION_CONTRACTS)) USE_FUNCTION_CONTRACTS ?= CBMC_USE_FUNCTION_CONTRACTS := $(patsubst %,--replace-call-with-contract %, $(USE_FUNCTION_CONTRACTS)) # Similarly, proof writers could also add loop contracts in their source code # to obtain unbounded correctness proofs. Unlike function contracts, loop # contracts are not reusable and thus are checked and used simultaneously. # These contracts are also ignored by default, but may be enabled by setting # the APPLY_LOOP_CONTRACTS variable to 1. APPLY_LOOP_CONTRACTS ?= 0 ifeq ($(APPLY_LOOP_CONTRACTS),1) CBMC_APPLY_LOOP_CONTRACTS ?= --apply-loop-contracts endif # Silence makefile output (eg, long litani commands) unless VERBOSE is set. ifndef VERBOSE MAKEFLAGS := $(MAKEFLAGS) -s endif ################################################################ ################################################################ ## Section II: This section defines the process of running a proof ## ## There should be no reason to edit anything below this line. ################################################################ # Paths CBMC ?= cbmc GOTO_ANALYZER ?= goto-analyzer GOTO_CC ?= goto-cc GOTO_INSTRUMENT ?= goto-instrument CRANGLER ?= crangler VIEWER ?= cbmc-viewer MAKE_SOURCE ?= make-source VIEWER2 ?= cbmc-viewer CMAKE ?= cmake GOTODIR ?= $(PROOFDIR)/gotos LOGDIR ?= $(PROOFDIR)/logs PROJECT ?= project PROOF ?= proof HARNESS_GOTO ?= $(GOTODIR)/$(HARNESS_FILE) PROJECT_GOTO ?= $(GOTODIR)/$(PROJECT) PROOF_GOTO ?= $(GOTODIR)/$(PROOF) ################################################################ # Useful macros for values that are hard to reference SPACE :=$() $() COMMA :=, ################################################################ # Set C compiler defines CBMCFLAGS += --object-bits $(CBMC_OBJECT_BITS) COMPILE_FLAGS += --object-bits $(CBMC_OBJECT_BITS) DEFINES += -DCBMC=1 DEFINES += -DCBMC_OBJECT_BITS=$(CBMC_OBJECT_BITS) DEFINES += -DCBMC_MAX_OBJECT_SIZE="(SIZE_MAX>>(CBMC_OBJECT_BITS+1))" # CI currently assumes cbmc invocation has at most one --unwindset ifdef UNWINDSET ifneq ($(strip $(UNWINDSET)),"") CBMC_UNWINDSET := --unwindset $(subst $(SPACE),$(COMMA),$(strip $(UNWINDSET))) endif endif ifdef EARLY_UNWINDSET ifneq ($(strip $(EARLY_UNWINDSET)),"") CBMC_EARLY_UNWINDSET := --unwindset $(subst $(SPACE),$(COMMA),$(strip $(EARLY_UNWINDSET))) endif endif CBMC_REMOVE_FUNCTION_BODY := $(patsubst %,--remove-function-body %, $(REMOVE_FUNCTION_BODY)) CBMC_RESTRICT_FUNCTION_POINTER := $(patsubst %,--restrict-function-pointer %, $(RESTRICT_FUNCTION_POINTER)) ################################################################ # Targets for rewriting source files with crangler # Construct crangler configuration files # # REWRITTEN_SOURCES is a list of crangler output files source.i. # This target assumes that for each source.i # * source.i_SOURCE is the path to a source file, # * source.i_FUNCTIONS is a list of functions (may be empty) # * source.i_OBJECTS is a list of variables (may be empty) # This target constructs the crangler configuration file source.i.json # of the form # { # "sources": [ "/proj/code.c" ], # "includes": [ "/proj/include" ], # "defines": [ "VAR=1" ], # "functions": [ {"function_name": ["remove static"]} ], # "objects": [ {"variable_name": ["remove static"]} ], # "output": "source.i" # } # to remove the static attribute from function_name and variable_name # in the source file source.c and write the result to source.i. # # This target assumes that filenames include no spaces and that # the INCLUDES and DEFINES variables include no spaces after -I # and -D. For example, use "-DVAR=1" and not "-D VAR=1". # # Define *_SOURCE, *_FUNCTIONS, and *_OBJECTS in the proof Makefile. # The string source.i is usually an absolute path $(PROOFDIR)/code.i # to a file in the proof directory that contains the proof Makefile. # The proof Makefile usually includes the definitions # $(PROOFDIR)/code.i_SOURCE = /proj/code.c # $(PROOFDIR)/code.i_FUNCTIONS = function_name # $(PROOFDIR)/code.i_OBJECTS = variable_name # Because these definitions refer to PROOFDIR that is defined in this # Makefile.common, these definitions must appear after the inclusion # of Makefile.common in the proof Makefile. # $(foreach rs,$(REWRITTEN_SOURCES),$(eval $(rs).json: $($(rs)_SOURCE))) $(foreach rs,$(REWRITTEN_SOURCES),$(rs).json): echo '{'\ '"sources": ['\ '"$($(@:.json=)_SOURCE)"'\ '],'\ '"includes": ['\ '$(subst $(SPACE),$(COMMA),$(patsubst -I%,"%",$(strip $(INCLUDES))))' \ '],'\ '"defines": ['\ '$(subst $(SPACE),$(COMMA),$(patsubst -D%,"%",$(subst ",\",$(strip $(DEFINES)))))' \ '],'\ '"functions": ['\ '{'\ '$(subst ~, ,$(subst $(SPACE),$(COMMA),$(patsubst %,"%":["remove~static"],$($(@:.json=)_FUNCTIONS))))' \ '}'\ '],'\ '"objects": ['\ '{'\ '$(subst ~, ,$(subst $(SPACE),$(COMMA),$(patsubst %,"%":["remove~static"],$($(@:.json=)_OBJECTS))))' \ '}'\ '],'\ '"output": "$(@:.json=)"'\ '}' > $@ # Rewrite source files with crangler # $(foreach rs,$(REWRITTEN_SOURCES),$(eval $(rs): $(rs).json)) $(REWRITTEN_SOURCES): $(LITANI) add-job \ --command \ '$(CRANGLER) $@.json' \ --inputs $($@_SOURCE) \ --outputs $@ \ --stdout-file $(LOGDIR)/crangler-$(subst /,_,$(subst .,_,$@))-log.txt \ --interleave-stdout-stderr \ --pipeline-name "$(PROOF_UID)" \ --ci-stage build \ --description "$(PROOF_UID): removing static" ################################################################ # Build targets that make the relevant .goto files # Compile project sources $(PROJECT_GOTO)1.goto: $(PROJECT_SOURCES) $(REWRITTEN_SOURCES) $(LITANI) add-job \ --command \ '$(GOTO_CC) $(CBMC_VERBOSITY) $(COMPILE_FLAGS) $(EXPORT_FILE_LOCAL_SYMBOLS) $(INCLUDES) $(DEFINES) $^ -o $@' \ --inputs $^ \ --outputs $@ \ --stdout-file $(LOGDIR)/project_sources-log.txt \ --pipeline-name "$(PROOF_UID)" \ --ci-stage build \ --description "$(PROOF_UID): building project binary" # Compile proof sources $(PROOF_GOTO)1.goto: $(PROOF_SOURCES) $(LITANI) add-job \ --command \ '$(GOTO_CC) $(CBMC_VERBOSITY) $(COMPILE_FLAGS) $(EXPORT_FILE_LOCAL_SYMBOLS) $(INCLUDES) $(DEFINES) $^ -o $@' \ --inputs $^ \ --outputs $@ \ --stdout-file $(LOGDIR)/proof_sources-log.txt \ --pipeline-name "$(PROOF_UID)" \ --ci-stage build \ --description "$(PROOF_UID): building proof binary" # Remove function bodies from project sources $(PROJECT_GOTO)2.goto: $(PROJECT_GOTO)1.goto $(LITANI) add-job \ --command \ '$(GOTO_INSTRUMENT) $(CBMC_VERBOSITY) $(CBMC_REMOVE_FUNCTION_BODY) $^ $@' \ --inputs $^ \ --outputs $@ \ --stdout-file $(LOGDIR)/remove_function_body-log.txt \ --pipeline-name "$(PROOF_UID)" \ --ci-stage build \ --description "$(PROOF_UID): removing function bodies from project sources" # Link project and proof sources into the proof harness $(HARNESS_GOTO)1.goto: $(PROOF_GOTO)1.goto $(PROJECT_GOTO)2.goto $(LITANI) add-job \ --command '$(GOTO_CC) $(CBMC_VERBOSITY) --function $(HARNESS_ENTRY) $^ $(LINK_FLAGS) -o $@' \ --inputs $^ \ --outputs $@ \ --stdout-file $(LOGDIR)/link_proof_project-log.txt \ --pipeline-name "$(PROOF_UID)" \ --ci-stage build \ --description "$(PROOF_UID): linking project to proof" # Restrict function pointers $(HARNESS_GOTO)2.goto: $(HARNESS_GOTO)1.goto $(LITANI) add-job \ --command \ '$(GOTO_INSTRUMENT) $(CBMC_VERBOSITY) $(CBMC_RESTRICT_FUNCTION_POINTER) $^ $@' \ --inputs $^ \ --outputs $@ \ --stdout-file $(LOGDIR)/restrict_function_pointer-log.txt \ --pipeline-name "$(PROOF_UID)" \ --ci-stage build \ --description "$(PROOF_UID): restricting function pointers in project sources" # Fill static variable with unconstrained values $(HARNESS_GOTO)3.goto: $(HARNESS_GOTO)2.goto $(LITANI) add-job \ --command \ '$(GOTO_INSTRUMENT) $(CBMC_VERBOSITY) $(NONDET_STATIC) $^ $@' \ --inputs $^ \ --outputs $@ \ --stdout-file $(LOGDIR)/nondet_static-log.txt \ --pipeline-name "$(PROOF_UID)" \ --ci-stage build \ --description "$(PROOF_UID): setting static variables to nondet" # Omit unused functions (sharpens coverage calculations) $(HARNESS_GOTO)4.goto: $(HARNESS_GOTO)3.goto $(LITANI) add-job \ --command \ '$(GOTO_INSTRUMENT) $(CBMC_VERBOSITY) --drop-unused-functions $^ $@' \ --inputs $^ \ --outputs $@ \ --stdout-file $(LOGDIR)/drop_unused_functions-log.txt \ --pipeline-name "$(PROOF_UID)" \ --ci-stage build \ --description "$(PROOF_UID): dropping unused functions" # Omit initialization of unused global variables (reduces problem size) $(HARNESS_GOTO)5.goto: $(HARNESS_GOTO)4.goto $(LITANI) add-job \ --command \ '$(GOTO_INSTRUMENT) $(CBMC_VERBOSITY) --slice-global-inits $^ $@' \ --inputs $^ \ --outputs $@ \ --stdout-file $(LOGDIR)/slice_global_inits-log.txt \ --pipeline-name "$(PROOF_UID)" \ --ci-stage build \ --description "$(PROOF_UID): slicing global initializations" # Replace function calls with function contracts # This must be done before enforcing function contracts, # since contract enforcement inlines all function calls. $(HARNESS_GOTO)6.goto: $(HARNESS_GOTO)5.goto $(LITANI) add-job \ --command \ '$(GOTO_INSTRUMENT) $(CBMC_VERBOSITY) $(CBMC_USE_FUNCTION_CONTRACTS) $^ $@' \ --inputs $^ \ --outputs $@ \ --stdout-file $(LOGDIR)/use_function_contracts-log.txt \ --pipeline-name "$(PROOF_UID)" \ --ci-stage build \ --description "$(PROOF_UID): replacing function calls with function contracts" # Unwind loops for loop and function contracts $(HARNESS_GOTO)7.goto: $(HARNESS_GOTO)6.goto $(LITANI) add-job \ --command \ '$(GOTO_INSTRUMENT) $(CBMC_VERBOSITY) $(CBMC_EARLY_UNWINDSET) $(CBMC_FLAG_UNWINDING_ASSERTIONS) $^ $@' \ --inputs $^ \ --outputs $@ \ --stdout-file $(LOGDIR)/unwind_loops-log.txt \ --pipeline-name "$(PROOF_UID)" \ --ci-stage build \ --description "$(PROOF_UID): unwinding loops" # Apply loop contracts $(HARNESS_GOTO)8.goto: $(HARNESS_GOTO)7.goto $(LITANI) add-job \ --command \ '$(GOTO_INSTRUMENT) $(CBMC_VERBOSITY) $(CBMC_APPLY_LOOP_CONTRACTS) $^ $@' \ --inputs $^ \ --outputs $@ \ --stdout-file $(LOGDIR)/apply_loop_contracts-log.txt \ --pipeline-name "$(PROOF_UID)" \ --ci-stage build \ --description "$(PROOF_UID): applying loop contracts" # Check function contracts $(HARNESS_GOTO)9.goto: $(HARNESS_GOTO)8.goto $(LITANI) add-job \ --command \ '$(GOTO_INSTRUMENT) $(CBMC_VERBOSITY) $(CBMC_CHECK_FUNCTION_CONTRACTS) $^ $@' \ --inputs $^ \ --outputs $@ \ --stdout-file $(LOGDIR)/check_function_contracts-log.txt \ --pipeline-name "$(PROOF_UID)" \ --ci-stage build \ --description "$(PROOF_UID): checking function contracts" # Final name for proof harness $(HARNESS_GOTO).goto: $(HARNESS_GOTO)9.goto $(LITANI) add-job \ --command 'cp $< $@' \ --inputs $^ \ --outputs $@ \ --pipeline-name "$(PROOF_UID)" \ --ci-stage build \ --description "$(PROOF_UID): copying final goto-binary" ################################################################ # Targets to run the analysis commands $(LOGDIR)/result.txt: $(HARNESS_GOTO).goto $(LITANI) add-job \ $(POOL) \ --command \ '$(CBMC) $(CBMC_VERBOSITY) $(CBMCFLAGS) $(CBMC_FLAG_UNWINDING_ASSERTIONS) $(CHECKFLAGS) --trace $<' \ --inputs $^ \ --outputs $@ \ --ci-stage test \ --stdout-file $@ \ $(MEMORY_PROFILING) \ --ignore-returns 10 \ --timeout $(CBMC_TIMEOUT) \ --pipeline-name "$(PROOF_UID)" \ --tags "stats-group:safety checks" \ --stderr-file $(LOGDIR)/result-err-log.txt \ --description "$(PROOF_UID): checking safety properties" $(LOGDIR)/result.xml: $(HARNESS_GOTO).goto $(LITANI) add-job \ $(POOL) \ --command \ '$(CBMC) $(CBMC_VERBOSITY) $(CBMCFLAGS) $(CBMC_FLAG_UNWINDING_ASSERTIONS) $(CHECKFLAGS) --trace --xml-ui $<' \ --inputs $^ \ --outputs $@ \ --ci-stage test \ --stdout-file $@ \ $(MEMORY_PROFILING) \ --ignore-returns 10 \ --timeout $(CBMC_TIMEOUT) \ --pipeline-name "$(PROOF_UID)" \ --tags "stats-group:safety checks" \ --stderr-file $(LOGDIR)/result-err-log.txt \ --description "$(PROOF_UID): checking safety properties" $(LOGDIR)/property.xml: $(HARNESS_GOTO).goto $(LITANI) add-job \ --command \ '$(CBMC) $(CBMC_VERBOSITY) $(CBMCFLAGS) $(CBMC_FLAG_UNWINDING_ASSERTIONS) $(CHECKFLAGS) --show-properties --xml-ui $<' \ --inputs $^ \ --outputs $@ \ --ci-stage test \ --stdout-file $@ \ --ignore-returns 10 \ --pipeline-name "$(PROOF_UID)" \ --stderr-file $(LOGDIR)/property-err-log.txt \ --description "$(PROOF_UID): printing safety properties" $(LOGDIR)/coverage.xml: $(HARNESS_GOTO).goto $(LITANI) add-job \ $(POOL) \ --command \ '$(CBMC) $(CBMC_VERBOSITY) $(CBMCFLAGS) $(COVERFLAGS) --cover location --xml-ui $<' \ --inputs $^ \ --outputs $@ \ --ci-stage test \ --stdout-file $@ \ $(MEMORY_PROFILING) \ --ignore-returns 10 \ --timeout $(CBMC_TIMEOUT) \ --pipeline-name "$(PROOF_UID)" \ --tags "stats-group:coverage computation" \ --stderr-file $(LOGDIR)/coverage-err-log.txt \ --description "$(PROOF_UID): calculating coverage" define VIEWER_CMD $(VIEWER) \ --result $(LOGDIR)/result.txt \ --block $(LOGDIR)/coverage.xml \ --property $(LOGDIR)/property.xml \ --srcdir $(SRCDIR) \ --goto $(HARNESS_GOTO).goto \ --htmldir $(PROOFDIR)/html endef export VIEWER_CMD $(PROOFDIR)/html: $(LOGDIR)/result.txt $(LOGDIR)/property.xml $(LOGDIR)/coverage.xml $(LITANI) add-job \ --command "$$VIEWER_CMD" \ --inputs $^ \ --outputs $(PROOFDIR)/html \ --pipeline-name "$(PROOF_UID)" \ --ci-stage report \ --stdout-file $(LOGDIR)/viewer-log.txt \ --description "$(PROOF_UID): generating report" # Caution: run make-source before running property and coverage checking # The current make-source script removes the goto binary $(LOGDIR)/source.json: mkdir -p $(dir $@) $(RM) -r $(GOTODIR) $(MAKE_SOURCE) --srcdir $(SRCDIR) --wkdir $(PROOFDIR) > $@ $(RM) -r $(GOTODIR) define VIEWER2_CMD $(VIEWER2) \ --result $(LOGDIR)/result.xml \ --coverage $(LOGDIR)/coverage.xml \ --property $(LOGDIR)/property.xml \ --srcdir $(SRCDIR) \ --goto $(HARNESS_GOTO).goto \ --reportdir $(PROOFDIR)/report \ --config $(PROOFDIR)/cbmc-viewer.json endef export VIEWER2_CMD # Omit logs/source.json from report generation until make-sources # works correctly with Makefiles that invoke the compiler with # mutliple source files at once. $(PROOFDIR)/report: $(LOGDIR)/result.xml $(LOGDIR)/property.xml $(LOGDIR)/coverage.xml $(LITANI) add-job \ --command "$$VIEWER2_CMD" \ --inputs $^ \ --outputs $(PROOFDIR)/report \ --pipeline-name "$(PROOF_UID)" \ --stdout-file $(LOGDIR)/viewer-log.txt \ --ci-stage report \ --description "$(PROOF_UID): generating report" litani-path: @echo $(LITANI) # ############################################################## # Phony Rules # # These rules provide a convenient way to run a single proof up to a # certain stage. Users can browse into a proof directory and run # "make -Bj 3 report" to generate a report for just that proof, or # "make goto" to build the goto binary. Under the hood, this runs litani # for just that proof. _goto: $(HARNESS_GOTO).goto goto: @ echo Running 'litani init' $(LITANI) init --project $(PROJECT_NAME) @ echo Running 'litani add-job' $(MAKE) -B _goto @ echo Running 'litani build' $(LITANI) run-build _result: $(LOGDIR)/result.txt result: @ echo Running 'litani init' $(LITANI) init --project $(PROJECT_NAME) @ echo Running 'litani add-job' $(MAKE) -B _result @ echo Running 'litani build' $(LITANI) run-build _property: $(LOGDIR)/property.xml property: @ echo Running 'litani init' $(LITANI) init --project $(PROJECT_NAME) @ echo Running 'litani add-job' $(MAKE) -B _property @ echo Running 'litani build' $(LITANI) run-build _coverage: $(LOGDIR)/coverage.xml coverage: @ echo Running 'litani init' $(LITANI) init --project $(PROJECT_NAME) @ echo Running 'litani add-job' $(MAKE) -B _coverage @ echo Running 'litani build' $(LITANI) run-build # Choose the invocation of cbmc-viewer depending on which version of # cbmc-viewer is installed. The --version flag is not implemented in # version 1 --- it is an "unrecognized argument" --- but it is # implemented in version 2. _report1: $(PROOFDIR)/html _report2: $(PROOFDIR)/report _report: (cbmc-viewer --version 2>&1 | grep "unrecognized argument" > /dev/null) && \ $(MAKE) -B _report1 || $(MAKE) -B _report2 report report1 report2: @ echo Running 'litani init' $(LITANI) init --project $(PROJECT_NAME) @ echo Running 'litani add-job' $(MAKE) -B _report @ echo Running 'litani build' $(LITANI) run-build ################################################################ # Targets to clean up after ourselves clean: -$(RM) $(DEPENDENT_GOTOS) -$(RM) TAGS* -$(RM) *~ \#* -$(RM) $(REWRITTEN_SOURCES) $(foreach rs,$(REWRITTEN_SOURCES),$(rs).json) veryclean: clean -$(RM) -r html report -$(RM) -r $(LOGDIR) $(GOTODIR) .PHONY: \ _coverage \ _goto \ _property \ _report \ _report2 \ _result \ clean \ coverage \ goto \ litani-path \ property \ report \ report2 \ result \ setup_dependencies \ testdeps \ veryclean \ # ################################################################ # Rule for generating cbmc-batch.yaml, used by the CI at # https://github.com/awslabs/aws-batch-cbmc/ JOB_OS ?= ubuntu16 JOB_MEMORY ?= 32000 # Proofs that are expected to fail should set EXPECTED to # "FAILED" in their Makefile. Values other than SUCCESSFUL # or FAILED will cause a CI error. EXPECTED ?= SUCCESSFUL define yaml_encode_options "$(shell echo $(1) | sed 's/ ,/ /g' | sed 's/ /;/g')" endef CI_FLAGS = $(CBMCFLAGS) $(CHECKFLAGS) $(COVERFLAGS) cbmc-batch.yaml: @$(RM) $@ @echo 'build_memory: $(JOB_MEMORY)' > $@ @echo 'cbmcflags: $(strip $(call yaml_encode_options,$(CI_FLAGS)))' >> $@ @echo 'coverage_memory: $(JOB_MEMORY)' >> $@ @echo 'expected: $(EXPECTED)' >> $@ @echo 'goto: $(HARNESS_GOTO).goto' >> $@ @echo 'jobos: $(JOB_OS)' >> $@ @echo 'property_memory: $(JOB_MEMORY)' >> $@ @echo 'report_memory: $(JOB_MEMORY)' >> $@ .PHONY: cbmc-batch.yaml ################################################################ # Run "make echo-proof-uid" to print the proof ID of a proof. This can be # used by scripts to ensure that every proof has an ID, that there are # no duplicates, etc. .PHONY: echo-proof-uid echo-proof-uid: @echo $(PROOF_UID) .PHONY: echo-project-name echo-project-name: @echo $(PROJECT_NAME) ################################################################ # Project-specific targets requiring values defined above sinclude $(PROOF_ROOT)/Makefile-project-targets # CI-specific targets to drive cbmc in CI sinclude $(PROOF_ROOT)/Makefile-project-testing ################################################################ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/README.md000066400000000000000000000017271456575232400267040ustar00rootroot00000000000000CBMC proofs =========== This directory contains the CBMC proofs. Each proof is in its own directory. This directory includes four Makefiles. One Makefile describes the basic workflow for building and running proofs: * Makefile.common: * make: builds the goto binary, does the cbmc property checking and coverage checking, and builds the final report. * make goto: builds the goto binary * make result: does cbmc property checking * make coverage: does cbmc coverage checking * make report: builds the final report Three included Makefiles describe project-specific settings and can override definitions in Makefile.common: * Makefile-project-defines: definitions like compiler flags required to build the goto binaries, and definitions to override definitions in Makefile.common. * Makefile-project-targets: other make targets needed for the project * Makefile-project-testing: other definitions and targets needed for unit testing or continuous integration. aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_add_size_checked/000077500000000000000000000000001456575232400315205ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_add_size_checked/Makefile000066400000000000000000000007011456575232400331560ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. PROOF_UID = aws_add_size_checked HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c CBMCFLAGS += PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/common.c include ../Makefile.common aws_add_size_checked_harness.c000066400000000000000000000020701456575232400374310ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_add_size_checked#include #include /** * Coverage: 1.00 (31 lines out of 31 statically-reachable lines in 5 functions reached) * Runtime: 0m2.025s * * Assumptions: * - given 2 non-deterministics unsigned integers * * Assertions: * - r does not overflow, if aws_add_u32_checked or * aws_add_u64_checked functions return AWS_OP_SUCCESS */ void aws_add_size_checked_harness() { if (nondet_bool()) { uint64_t a = nondet_uint64_t(); uint64_t b = nondet_uint64_t(); uint64_t r = nondet_uint64_t(); int rval = aws_add_u64_checked(a, b, &r); if (!rval) { assert(r == a + b); } else { assert((b > 0) && (a > (UINT64_MAX - b))); } } else { uint32_t a = nondet_uint32_t(); uint32_t b = nondet_uint32_t(); uint32_t r = nondet_uint32_t(); if (!aws_add_u32_checked(a, b, &r)) { assert(r == a + b); } else { assert((b > 0) && (a > (UINT32_MAX - b))); } } } cbmc-proof.txt000066400000000000000000000000711456575232400342270ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_add_size_checkedThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_add_size_saturating/000077500000000000000000000000001456575232400323135ustar00rootroot00000000000000Makefile000066400000000000000000000010451456575232400336740ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_add_size_saturating# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### #NOTE: If we don't use the unwindset, leave it empty #CBMC_UNWINDSET = CBMCFLAGS += PROOF_UID = aws_add_size_saturating HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_add_size_saturating_harness.c000066400000000000000000000020741456575232400410230ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_add_size_saturating#include #include /** * Coverage: 1.00 (24 lines out of 24 statically-reachable lines in 3 functions reached) * Runtime: 0m2.698s * * Assumptions: * - given 2 non-deterministics unsigned integers * * Assertions: * - if a + b overflows, aws_add_u32_saturating and aws_add_u64_saturating * functions must always return the corresponding saturated value */ void aws_add_size_saturating_harness() { if (nondet_bool()) { uint64_t a = nondet_uint64_t(); uint64_t b = nondet_uint64_t(); uint64_t r = aws_add_u64_saturating(a, b); if ((b > 0) && (a > (UINT64_MAX - b))) { assert(r == UINT64_MAX); } else { assert(r == a + b); } } else { uint32_t a = nondet_uint32_t(); uint32_t b = nondet_uint32_t(); uint32_t r = aws_add_u32_saturating(a, b); if ((b > 0) && (a > (UINT32_MAX - b))) { assert(r == UINT32_MAX); } else { assert(r == a + b); } } } cbmc-proof.txt000066400000000000000000000000711456575232400350220ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_add_size_saturatingThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_eq/000077500000000000000000000000001456575232400300735ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_eq/Makefile000066400000000000000000000012761456575232400315410ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_byte_buf UNWINDSET += memcmp.0:$(shell echo $$(($(MAX_BUFFER_SIZE) + 1))) CBMCFLAGS += PROOF_UID = aws_array_eq HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_array_eq_harness.c000066400000000000000000000031531456575232400343620ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_eq/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_array_eq_harness() { /* assumptions */ size_t lhs_len; __CPROVER_assume(lhs_len <= MAX_BUFFER_SIZE); void *lhs = malloc(lhs_len); void *rhs; size_t rhs_len; if (nondet_bool()) { /* rhs could be equal to lhs */ rhs_len = lhs_len; rhs = lhs; } else { __CPROVER_assume(rhs_len <= MAX_BUFFER_SIZE); rhs = malloc(rhs_len); } /* save current state of the parameters */ struct store_byte_from_buffer old_byte_from_lhs; save_byte_from_array((uint8_t *)lhs, lhs_len, &old_byte_from_lhs); struct store_byte_from_buffer old_byte_from_rhs; save_byte_from_array((uint8_t *)rhs, rhs_len, &old_byte_from_rhs); /* pre-conditions */ __CPROVER_assume((lhs_len == 0) || AWS_MEM_IS_READABLE(lhs, lhs_len)); __CPROVER_assume((rhs_len == 0) || AWS_MEM_IS_READABLE(rhs, rhs_len)); /* operation under verification */ if (aws_array_eq(lhs, lhs_len, rhs, rhs_len)) { /* asserts equivalence */ assert(lhs_len == rhs_len); if (lhs_len > 0 && lhs) { assert_bytes_match((uint8_t *)lhs, (uint8_t *)rhs, lhs_len); } } /* asserts both parameters remain unchanged */ if (lhs_len > 0 && lhs) { assert_byte_from_buffer_matches((uint8_t *)lhs, &old_byte_from_lhs); } if (rhs_len > 0 && rhs) { assert_byte_from_buffer_matches((uint8_t *)rhs, &old_byte_from_rhs); } } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_eq/cbmc-proof.txt000066400000000000000000000000711456575232400326610ustar00rootroot00000000000000This file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_eq_c_str/000077500000000000000000000000001456575232400312655ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_eq_c_str/Makefile000066400000000000000000000014211456575232400327230ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_byte_buf UNWINDSET += aws_array_eq_c_str.0:$(shell echo $$(($(MAX_BUFFER_SIZE) + 1))) UNWINDSET += strlen.0:$(shell echo $$(($(MAX_BUFFER_SIZE) + 1))) CBMCFLAGS += PROOF_UID = aws_array_eq_c_str HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_array_eq_c_str_harness.c000066400000000000000000000027211456575232400367460ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_eq_c_str/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_array_eq_c_str_harness() { /* assumptions */ void *array; size_t array_len; __CPROVER_assume(array_len <= MAX_BUFFER_SIZE); array = malloc(array_len); const char *c_str = ensure_c_str_is_allocated(MAX_BUFFER_SIZE); /* save current state of the parameters */ struct store_byte_from_buffer old_byte_from_array; save_byte_from_array((uint8_t *)array, array_len, &old_byte_from_array); size_t str_len = (c_str != NULL) ? strlen(c_str) : 0; struct store_byte_from_buffer old_byte_from_str; save_byte_from_array((uint8_t *)c_str, str_len, &old_byte_from_str); /* pre-conditions */ __CPROVER_assume(array || (array_len == 0)); __CPROVER_assume(c_str); /* operation under verification */ if (aws_array_eq_c_str(array, array_len, c_str)) { /* asserts equivalence */ assert(array_len == str_len); if (array_len > 0) { assert_bytes_match((uint8_t *)array, (uint8_t *)c_str, array_len); } } /* asserts both parameters remain unchanged */ if (array_len > 0) { assert_byte_from_buffer_matches((uint8_t *)array, &old_byte_from_array); } if (str_len > 0) { assert_byte_from_buffer_matches((uint8_t *)c_str, &old_byte_from_str); } } cbmc-proof.txt000066400000000000000000000000711456575232400337740ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_eq_c_strThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_eq_c_str_ignore_case/000077500000000000000000000000001456575232400336235ustar00rootroot00000000000000Makefile000066400000000000000000000014521456575232400352060ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_eq_c_str_ignore_case# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_byte_buf UNWINDSET += aws_array_eq_c_str_ignore_case.0:$(shell echo $$(($(MAX_BUFFER_SIZE) + 1))) UNWINDSET += strlen.0:$(shell echo $$(($(MAX_BUFFER_SIZE) + 1))) CBMCFLAGS += PROOF_UID = aws_array_eq_c_str_ignore_case HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_array_eq_c_str_ignore_case_harness.c000066400000000000000000000025061456575232400436430ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_eq_c_str_ignore_case/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_array_eq_c_str_ignore_case_harness() { /* assumptions */ size_t array_len; __CPROVER_assume(array_len <= MAX_BUFFER_SIZE); void *array = malloc(array_len); const char *c_str = ensure_c_str_is_allocated(MAX_BUFFER_SIZE); /* save current state of the parameters */ struct store_byte_from_buffer old_byte_from_array; save_byte_from_array((uint8_t *)array, array_len, &old_byte_from_array); size_t str_len = (c_str != NULL) ? strlen(c_str) : 0; struct store_byte_from_buffer old_byte_from_str; save_byte_from_array((uint8_t *)c_str, str_len, &old_byte_from_str); /* pre-conditions */ __CPROVER_assume(array || (array_len == 0)); __CPROVER_assume(c_str); /* operation under verification */ if (aws_array_eq_c_str_ignore_case(array, array_len, c_str)) { assert(array_len == str_len); } /* asserts both parameters remain unchanged */ if (array_len > 0) { assert_byte_from_buffer_matches((uint8_t *)array, &old_byte_from_array); } if (str_len > 0) { assert_byte_from_buffer_matches((uint8_t *)c_str, &old_byte_from_str); } } cbmc-proof.txt000066400000000000000000000000711456575232400363320ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_eq_c_str_ignore_caseThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_eq_ignore_case/000077500000000000000000000000001456575232400324315ustar00rootroot00000000000000Makefile000066400000000000000000000013351456575232400340140ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_eq_ignore_case# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_byte_buf UNWINDSET += aws_array_eq_ignore_case.0:$(shell echo $$(($(MAX_BUFFER_SIZE) + 1))) CBMCFLAGS += PROOF_UID = aws_array_eq_ignore_case HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_array_eq_ignore_case_harness.c000066400000000000000000000027361456575232400412640ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_eq_ignore_case/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_array_eq_ignore_case_harness() { /* assumptions */ size_t lhs_len; __CPROVER_assume(lhs_len <= MAX_BUFFER_SIZE); void *lhs = malloc(lhs_len); void *rhs; size_t rhs_len; if (nondet_bool()) { /* rhs could be equal to lhs */ rhs_len = lhs_len; rhs = lhs; } else { __CPROVER_assume(rhs_len <= MAX_BUFFER_SIZE); rhs = malloc(rhs_len); } /* save current state of the parameters */ struct store_byte_from_buffer old_byte_from_lhs; save_byte_from_array((uint8_t *)lhs, lhs_len, &old_byte_from_lhs); struct store_byte_from_buffer old_byte_from_rhs; save_byte_from_array((uint8_t *)rhs, rhs_len, &old_byte_from_rhs); /* pre-conditions */ __CPROVER_assume((lhs_len == 0) || AWS_MEM_IS_READABLE(lhs, lhs_len)); __CPROVER_assume((rhs_len == 0) || AWS_MEM_IS_READABLE(rhs, rhs_len)); /* operation under verification */ if (aws_array_eq_ignore_case(lhs, lhs_len, rhs, rhs_len)) { assert(lhs_len == rhs_len); } /* asserts both parameters remain unchanged */ if (lhs_len > 0) { assert_byte_from_buffer_matches((uint8_t *)lhs, &old_byte_from_lhs); } if (rhs_len > 0) { assert_byte_from_buffer_matches((uint8_t *)rhs, &old_byte_from_rhs); } } cbmc-proof.txt000066400000000000000000000000711456575232400351400ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_eq_ignore_caseThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_back/000077500000000000000000000000001456575232400314215ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_back/Makefile000066400000000000000000000014001456575232400330540ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_array_list # This bound allows us to reach 100% coverage rate UNWINDSET += memcpy_impl.0:$(shell echo $$(($(MAX_ITEM_SIZE) + 1))) CBMCFLAGS += PROOF_UID = aws_array_list_back HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/array_list.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_array_list_back_harness.c000066400000000000000000000025421456575232400372370ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_back/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include /** * Runtime: 19s */ void aws_array_list_back_harness() { /* data structure */ struct aws_array_list list; void *val = malloc(list.item_size); /* assumptions */ __CPROVER_assume(aws_array_list_is_bounded(&list, MAX_INITIAL_ITEM_ALLOCATION, MAX_ITEM_SIZE)); ensure_array_list_has_allocated_data_member(&list); __CPROVER_assume(aws_array_list_is_valid(&list)); /* save current state of the data structure */ struct aws_array_list old = list; struct store_byte_from_buffer old_byte; save_byte_from_array((uint8_t *)list.data, list.current_size, &old_byte); /* assume preconditions */ __CPROVER_assume(aws_array_list_is_valid(&list)); __CPROVER_assume(val && AWS_MEM_IS_WRITABLE(val, list.item_size)); /* perform operation under verification */ if (aws_array_list_back(&list, val) == AWS_OP_SUCCESS) { /* In the case aws_array_list_back is successful, we can ensure the list isn't empty */ assert(list.data != NULL); assert(list.length != 0); } /* assertions */ assert(aws_array_list_is_valid(&list)); assert_array_list_equivalence(&list, &old, &old_byte); } cbmc-proof.txt000066400000000000000000000000711456575232400341300ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_backThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_capacity/000077500000000000000000000000001456575232400323165ustar00rootroot00000000000000Makefile000066400000000000000000000012321456575232400336750ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_capacity# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_array_list UNWINDSET += CBMCFLAGS += PROOF_UID = aws_array_list_capacity HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/array_list.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_array_list_capacity_harness.c000066400000000000000000000021231456575232400410240ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_capacity/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include /** * Runtime: 5s */ void aws_array_list_capacity_harness() { /* data structure */ struct aws_array_list list; /* assumptions */ __CPROVER_assume(aws_array_list_is_bounded(&list, MAX_INITIAL_ITEM_ALLOCATION, MAX_ITEM_SIZE)); ensure_array_list_has_allocated_data_member(&list); __CPROVER_assume(aws_array_list_is_valid(&list)); __CPROVER_assume(list.item_size > 0); /* save current state of the data structure */ struct aws_array_list old = list; struct store_byte_from_buffer old_byte; save_byte_from_array((uint8_t *)list.data, list.current_size, &old_byte); /* perform operation under verification */ size_t capacity = aws_array_list_capacity(&list); /* assertions */ assert(aws_array_list_is_valid(&list)); assert_array_list_equivalence(&list, &old, &old_byte); assert(capacity == list.current_size / list.item_size); } cbmc-proof.txt000066400000000000000000000000711456575232400350250ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_capacityThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_clean_up/000077500000000000000000000000001456575232400323075ustar00rootroot00000000000000Makefile000066400000000000000000000011611456575232400336670ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_clean_up# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_array_list UNWINDSET += CBMCFLAGS += PROOF_UID = aws_array_list_clean_up HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/array_list.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_array_list_clean_up_harness.c000066400000000000000000000013011456575232400410030ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_clean_up/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include /** * Runtime: 4s */ void aws_array_list_clean_up_harness() { /* data structure */ struct aws_array_list list; /* assumptions */ __CPROVER_assume(aws_array_list_is_bounded(&list, MAX_INITIAL_ITEM_ALLOCATION, MAX_ITEM_SIZE)); ensure_array_list_has_allocated_data_member(&list); __CPROVER_assume(aws_array_list_is_valid(&list)); /* perform operation under verification */ aws_array_list_clean_up(&list); /* assertions */ assert(AWS_IS_ZEROED(list)); } cbmc-proof.txt000066400000000000000000000000711456575232400350160ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_clean_upThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_clear/000077500000000000000000000000001456575232400316075ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_clear/Makefile000066400000000000000000000011561456575232400332520ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_array_list UNWINDSET += CBMCFLAGS += PROOF_UID = aws_array_list_clear HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/array_list.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_array_list_clear_harness.c000066400000000000000000000017461456575232400376200ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_clear/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include /** * Runtime: 5s */ void aws_array_list_clear_harness() { /* data structure */ struct aws_array_list list; /* assumptions */ __CPROVER_assume(aws_array_list_is_bounded(&list, MAX_INITIAL_ITEM_ALLOCATION, MAX_ITEM_SIZE)); ensure_array_list_has_allocated_data_member(&list); __CPROVER_assume(aws_array_list_is_valid(&list)); /* save current state of the data structure */ struct aws_array_list old = list; /* perform operation under verification */ aws_array_list_clear(&list); /* assertions */ assert(aws_array_list_is_valid(&list)); assert(list.length == 0); assert(list.alloc == old.alloc); assert(list.current_size == old.current_size); assert(list.item_size == old.item_size); assert(list.data == old.data); } cbmc-proof.txt000066400000000000000000000000711456575232400343160ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_clearThis file marks the directory as containing a CBMC proof aws_array_list_comparator_string/000077500000000000000000000000001456575232400341775ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofsMakefile000066400000000000000000000014331456575232400356400ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_comparator_string# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. include ../Makefile.aws_string UNWINDSET += memcmp.0:$(shell echo $$(($(MAX_STRING_LEN) + 1))) CBMCFLAGS += PROOF_UID = aws_array_list_comparator_string HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROJECT_SOURCES += $(SRCDIR)/source/array_list.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c PROJECT_SOURCES += $(SRCDIR)/source/error.c PROJECT_SOURCES += $(SRCDIR)/source/string.c include ../Makefile.common aws_array_list_comparator_string_harness.c000066400000000000000000000016431456575232400447320ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_comparator_string/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include void aws_array_list_comparator_string_harness() { struct aws_string *str_a = nondet_allocate_string_bounded_length(MAX_STRING_LEN); struct aws_string *str_b = nondet_bool() ? str_a : nondet_allocate_string_bounded_length(MAX_STRING_LEN); __CPROVER_assume(aws_string_is_valid(str_a)); __CPROVER_assume(aws_string_is_valid(str_b)); bool nondet_parameter_a; bool nondet_parameter_b; if (aws_array_list_comparator_string(nondet_parameter_a ? &str_a : NULL, nondet_parameter_b ? &str_b : NULL) == 0) { if (nondet_parameter_a && nondet_parameter_b) { assert_bytes_match(str_a->bytes, str_b->bytes, str_a->len); } } } cbmc-proof.txt000066400000000000000000000000711456575232400367650ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_comparator_stringThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_copy/000077500000000000000000000000001456575232400314735ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_copy/Makefile000066400000000000000000000013151456575232400331330ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_array_list UNWINDSET += CBMCFLAGS += PROOF_UID = aws_array_list_copy HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROOF_SOURCES += $(PROOF_STUB)/memcpy_override_havoc.c PROJECT_SOURCES += $(SRCDIR)/source/array_list.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_array_list_copy_harness.c000066400000000000000000000025021456575232400373570ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_copy/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include /** * Runtime: 10s */ void aws_array_list_copy_harness() { /* data structure */ struct aws_array_list from; struct aws_array_list to; /* assumptions */ __CPROVER_assume(aws_array_list_is_bounded(&from, MAX_INITIAL_ITEM_ALLOCATION, MAX_ITEM_SIZE)); ensure_array_list_has_allocated_data_member(&from); __CPROVER_assume(aws_array_list_is_valid(&from)); __CPROVER_assume(aws_array_list_is_bounded(&to, MAX_INITIAL_ITEM_ALLOCATION, MAX_ITEM_SIZE)); ensure_array_list_has_allocated_data_member(&to); __CPROVER_assume(aws_array_list_is_valid(&to)); __CPROVER_assume(from.item_size == to.item_size); __CPROVER_assume(from.data != NULL); /* perform operation under verification */ if (!aws_array_list_copy(&from, &to)) { /* In the case aws_array_list_copy is successful, both lists have the same length */ assert(to.length == from.length); assert(to.current_size >= (from.length * from.item_size)); } /* assertions */ assert(aws_array_list_is_valid(&from)); assert(aws_array_list_is_valid(&to)); assert(from.item_size == to.item_size); } cbmc-proof.txt000066400000000000000000000000711456575232400342020ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_copyThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_ensure_capacity/000077500000000000000000000000001456575232400336775ustar00rootroot00000000000000Makefile000066400000000000000000000013301456575232400352550ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_ensure_capacity# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_array_list UNWINDSET += CBMCFLAGS += PROOF_UID = aws_array_list_ensure_capacity HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROOF_SOURCES += $(PROOF_STUB)/memcpy_override_havoc.c PROJECT_SOURCES += $(SRCDIR)/source/array_list.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_array_list_ensure_capacity_harness.c000066400000000000000000000025101456575232400437660ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_ensure_capacity/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include /** * Runtime: 9s */ void aws_array_list_ensure_capacity_harness() { /* data structure */ struct aws_array_list list; /* assumptions */ __CPROVER_assume(aws_array_list_is_bounded(&list, MAX_INITIAL_ITEM_ALLOCATION, MAX_ITEM_SIZE)); ensure_array_list_has_allocated_data_member(&list); __CPROVER_assume(aws_array_list_is_valid(&list)); /* save current state of the data structure */ struct aws_array_list old = list; struct store_byte_from_buffer old_byte; save_byte_from_array((uint8_t *)list.data, list.current_size, &old_byte); /* perform operation under verification */ size_t index; if (!aws_array_list_ensure_capacity(&list, index)) { /* assertions */ assert(aws_array_list_is_valid(&list)); assert(list.item_size == old.item_size); assert(list.alloc == old.alloc); assert(list.length == old.length); assert(list.current_size >= old.current_size); } else { /* In the case aws_array_list_ensure_capacity is not successful, the list must not change */ assert_array_list_equivalence(&list, &old, &old_byte); } } cbmc-proof.txt000066400000000000000000000000711456575232400364060ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_ensure_capacityThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_erase/000077500000000000000000000000001456575232400316205ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_erase/Makefile000066400000000000000000000014751456575232400332670ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_array_list UNWINDSET += CBMCFLAGS += PROOF_UID = aws_array_list_erase HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROOF_SOURCES += $(PROOF_STUB)/memmove_override_havoc.c PROOF_SOURCES += $(PROOF_STUB)/memcpy_override_havoc.c PROOF_SOURCES += $(PROOF_STUB)/memset_override_havoc.c PROJECT_SOURCES += $(SRCDIR)/source/array_list.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_array_list_erase_harness.c000066400000000000000000000023201456575232400376270ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_erase/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_array_list_erase_harness() { /* parameters */ struct aws_array_list list; size_t index; /* assumptions */ __CPROVER_assume(aws_array_list_is_bounded(&list, MAX_INITIAL_ITEM_ALLOCATION, MAX_ITEM_SIZE)); ensure_array_list_has_allocated_data_member(&list); __CPROVER_assume(aws_array_list_is_valid(&list)); /* save current state of the data structure */ struct aws_array_list old = list; struct store_byte_from_buffer old_byte; save_byte_from_array((uint8_t *)list.data, list.current_size, &old_byte); /* perform operation under verification */ if (aws_array_list_erase(&list, index) == AWS_OP_SUCCESS) { assert(list.length == old.length - 1); assert(list.item_size == old.item_size); assert(list.alloc == old.alloc); assert(list.current_size == old.current_size); assert(index < old.length); } else { assert_array_list_equivalence(&list, &old, &old_byte); } assert(aws_array_list_is_valid(&list)); } cbmc-proof.txt000066400000000000000000000000711456575232400343270ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_eraseThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_front/000077500000000000000000000000001456575232400316515ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_front/Makefile000066400000000000000000000015031456575232400333100ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_array_list # Set deep checks to enable the AWS_BYTES_EQ AWS_DEEP_CHECKS = 1 # This bound allows us to reach 100% coverage rate UNWINDSET += memcpy_impl.0:$(shell echo $$(($(MAX_ITEM_SIZE) + 1))) CBMCFLAGS += PROOF_UID = aws_array_list_front HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/array_list.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_array_list_front_harness.c000066400000000000000000000026031456575232400377150ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_front/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include /** * Runtime: 6s */ void aws_array_list_front_harness() { /* data structure */ struct aws_array_list list; /* assumptions */ __CPROVER_assume(aws_array_list_is_bounded(&list, MAX_INITIAL_ITEM_ALLOCATION, MAX_ITEM_SIZE)); ensure_array_list_has_allocated_data_member(&list); __CPROVER_assume(aws_array_list_is_valid(&list)); void *val = malloc(list.item_size); /* save current state of the data structure */ struct aws_array_list old = list; struct store_byte_from_buffer old_byte; save_byte_from_array((uint8_t *)list.data, list.current_size, &old_byte); /* assume preconditions */ __CPROVER_assume(aws_array_list_is_valid(&list)); __CPROVER_assume(val && AWS_MEM_IS_WRITABLE(val, list.item_size)); /* perform operation under verification */ if (!aws_array_list_front(&list, val)) { /* In the case aws_array_list_front is successful, we can ensure the list isn't empty */ assert(AWS_BYTES_EQ(val, list.data, list.item_size)); assert(list.data); assert(list.length); } /* assertions */ assert(aws_array_list_is_valid(&list)); assert_array_list_equivalence(&list, &old, &old_byte); } cbmc-proof.txt000066400000000000000000000000711456575232400343600ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_frontThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_get_at/000077500000000000000000000000001456575232400317645ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_get_at/Makefile000066400000000000000000000014021456575232400334210ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_array_list # This bound allows us to reach 100% coverage rate UNWINDSET += memcpy_impl.0:$(shell echo $$(($(MAX_ITEM_SIZE) + 1))) CBMCFLAGS += PROOF_UID = aws_array_list_get_at HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/array_list.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_array_list_get_at_harness.c000066400000000000000000000026311456575232400401440ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_get_at/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include /** * Runtime: 9s */ void aws_array_list_get_at_harness() { /* data structure */ struct aws_array_list list; /* assumptions */ __CPROVER_assume(aws_array_list_is_bounded(&list, MAX_INITIAL_ITEM_ALLOCATION, MAX_ITEM_SIZE)); ensure_array_list_has_allocated_data_member(&list); __CPROVER_assume(aws_array_list_is_valid(&list)); void *val = malloc(list.item_size); size_t index; /* save current state of the data structure */ struct aws_array_list old = list; struct store_byte_from_buffer old_byte; save_byte_from_array((uint8_t *)list.data, list.current_size, &old_byte); /* assume preconditions */ __CPROVER_assume(aws_array_list_is_valid(&list)); __CPROVER_assume(val && AWS_MEM_IS_WRITABLE(val, list.item_size)); /* perform operation under verification */ if (!aws_array_list_get_at(&list, val, index)) { /* In the case aws_array_list_get_at is successful, we can ensure the list isn't empty * and index is within bounds. */ assert(list.data); assert(list.length > index); } /* assertions */ assert(aws_array_list_is_valid(&list)); assert_array_list_equivalence(&list, &old, &old_byte); } cbmc-proof.txt000066400000000000000000000000711456575232400344730ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_get_atThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_get_at_ptr/000077500000000000000000000000001456575232400326515ustar00rootroot00000000000000Makefile000066400000000000000000000014671456575232400342420ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_get_at_ptr# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_array_list # This bound allows us to reach 100% coverage rate UNWINDSET += memcpy_impl.0:$(shell echo $$(($(MAX_ITEM_SIZE) + 1))) CBMCFLAGS += PROOF_UID = aws_array_list_get_at_ptr HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROOF_SOURCES += $(PROOF_STUB)/memcpy_override.c PROJECT_SOURCES += $(SRCDIR)/source/array_list.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_array_list_get_at_ptr_harness.c000066400000000000000000000025661456575232400417250ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_get_at_ptr/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include /** * Runtime: 6s */ void aws_array_list_get_at_ptr_harness() { /* data structure */ struct aws_array_list list; /* assumptions */ __CPROVER_assume(aws_array_list_is_bounded(&list, MAX_INITIAL_ITEM_ALLOCATION, MAX_ITEM_SIZE)); ensure_array_list_has_allocated_data_member(&list); __CPROVER_assume(aws_array_list_is_valid(&list)); void **val = malloc(sizeof(void *)); size_t index; /* save current state of the data structure */ struct aws_array_list old = list; struct store_byte_from_buffer old_byte; save_byte_from_array((uint8_t *)list.data, list.current_size, &old_byte); /* assume preconditions */ __CPROVER_assume(aws_array_list_is_valid(&list)); __CPROVER_assume(val); /* perform operation under verification */ if (!aws_array_list_get_at_ptr(&list, val, index)) { /* In the case aws_array_list_get_at is successful, we can ensure the list isn't empty * and index is within bounds. */ assert(list.data); assert(list.length > index); } /* assertions */ assert(aws_array_list_is_valid(&list)); assert_array_list_equivalence(&list, &old, &old_byte); } cbmc-proof.txt000066400000000000000000000000711456575232400353600ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_get_at_ptrThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_init_dynamic/000077500000000000000000000000001456575232400331705ustar00rootroot00000000000000Makefile000066400000000000000000000011661456575232400345550ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_init_dynamic# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_array_list UNWINDSET += CBMCFLAGS += PROOF_UID = aws_array_list_init_dynamic HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/array_list.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_array_list_init_dynamic_harness.c000066400000000000000000000023211456575232400425500ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_init_dynamic/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include /** * Runtime: 6s */ void aws_array_list_init_dynamic_harness() { /* data structure */ struct aws_array_list list; /* Precondition: list is non-null */ /* parameters */ struct aws_allocator *allocator = aws_default_allocator(); /* Precondition: allocator is non-null */ size_t item_size; size_t initial_item_allocation; /* assumptions */ __CPROVER_assume(initial_item_allocation <= MAX_INITIAL_ITEM_ALLOCATION); __CPROVER_assume(item_size > 0 && item_size <= MAX_ITEM_SIZE); /* perform operation under verification */ if (aws_array_list_init_dynamic(&list, allocator, initial_item_allocation, item_size) == AWS_OP_SUCCESS) { /* assertions */ assert(aws_array_list_is_valid(&list)); assert(list.alloc == allocator); assert(list.item_size == item_size); assert(list.length == 0); assert(list.current_size == item_size * initial_item_allocation); } else { /*assertions */ assert(AWS_IS_ZEROED(list)); } } cbmc-proof.txt000066400000000000000000000000711456575232400356770ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_init_dynamicThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_init_static/000077500000000000000000000000001456575232400330335ustar00rootroot00000000000000Makefile000066400000000000000000000011601456575232400344120ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_init_static# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_array_list UNWINDSET += CBMCFLAGS += PROOF_UID = aws_array_list_init_static HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/array_list.c ########### include ../Makefile.common aws_array_list_init_static_harness.c000066400000000000000000000026231456575232400422630ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_init_static/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include /** * Runtime: 7s */ void aws_array_list_init_static_harness() { /* data structure */ struct aws_array_list list; /* Precondition: list is non-null */ /* parameters */ size_t item_size; size_t initial_item_allocation; size_t len; /* assumptions */ __CPROVER_assume(initial_item_allocation > 0 && initial_item_allocation <= MAX_INITIAL_ITEM_ALLOCATION); __CPROVER_assume(item_size > 0 && item_size <= MAX_ITEM_SIZE); __CPROVER_assume(!aws_mul_size_checked(initial_item_allocation, item_size, &len)); /* perform operation under verification */ uint8_t *raw_array = malloc(len); __CPROVER_assume(raw_array != NULL); struct store_byte_from_buffer old_byte; save_byte_from_array(raw_array, len, &old_byte); aws_array_list_init_static(&list, raw_array, initial_item_allocation, item_size); /* assertions */ assert(aws_array_list_is_valid(&list)); assert(list.alloc == NULL); assert(list.item_size == item_size); assert(list.length == 0); assert(list.current_size == initial_item_allocation * item_size); assert_bytes_match((uint8_t *)list.data, raw_array, len); assert_byte_from_buffer_matches(raw_array, &old_byte); } cbmc-proof.txt000066400000000000000000000000711456575232400355420ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_init_staticThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_length/000077500000000000000000000000001456575232400320025ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_length/Makefile000066400000000000000000000012301456575232400334360ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_array_list UNWINDSET += CBMCFLAGS += PROOF_UID = aws_array_list_length HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/array_list.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_array_list_length_harness.c000066400000000000000000000017441456575232400402040ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_length/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include /** * Runtime: 7s */ void aws_array_list_length_harness() { /* data structure */ struct aws_array_list list; /* assumptions */ __CPROVER_assume(aws_array_list_is_bounded(&list, MAX_INITIAL_ITEM_ALLOCATION, MAX_ITEM_SIZE)); ensure_array_list_has_allocated_data_member(&list); __CPROVER_assume(aws_array_list_is_valid(&list)); /* save current state of the data structure */ struct aws_array_list old = list; struct store_byte_from_buffer old_byte; save_byte_from_array((uint8_t *)list.data, list.current_size, &old_byte); /* perform operation under verification */ size_t len = aws_array_list_length(&list); /* assertions */ assert(aws_array_list_is_valid(&list)); assert_array_list_equivalence(&list, &old, &old_byte); } cbmc-proof.txt000066400000000000000000000000711456575232400345110ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_lengthThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_pop_back/000077500000000000000000000000001456575232400322775ustar00rootroot00000000000000Makefile000066400000000000000000000014651456575232400336660ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_pop_back# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_array_list # This bound allows us to reach 100% coverage rate UNWINDSET += memset_impl.0:$(shell echo $$(($(MAX_ITEM_SIZE) + 1))) CBMCFLAGS += PROOF_UID = aws_array_list_pop_back HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROOF_SOURCES += $(PROOF_STUB)/memset_override.c PROJECT_SOURCES += $(SRCDIR)/source/array_list.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_array_list_pop_back_harness.c000066400000000000000000000024531456575232400407740ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_pop_back/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include /** * Runtime: 9s */ void aws_array_list_pop_back_harness() { /* data structure */ struct aws_array_list list; /* assumptions */ __CPROVER_assume(aws_array_list_is_bounded(&list, MAX_INITIAL_ITEM_ALLOCATION, MAX_ITEM_SIZE)); ensure_array_list_has_allocated_data_member(&list); __CPROVER_assume(aws_array_list_is_valid(&list)); /* save current state of the data structure */ struct aws_array_list old = list; struct store_byte_from_buffer old_byte; save_byte_from_array((uint8_t *)list.data, list.current_size, &old_byte); /* perform operation under verification and assertions */ if (!aws_array_list_pop_back(&list)) { assert(list.length == old.length - 1); assert(list.data); assert(list.alloc == old.alloc); assert(list.current_size == old.current_size); assert(list.item_size == old.item_size); } else { /* In the case aws_array_list_pop_back is not successful, the list must not change */ assert_array_list_equivalence(&list, &old, &old_byte); } assert(aws_array_list_is_valid(&list)); } cbmc-proof.txt000066400000000000000000000000711456575232400350060ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_pop_backThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_pop_front/000077500000000000000000000000001456575232400325275ustar00rootroot00000000000000Makefile000066400000000000000000000013231456575232400341070ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_pop_front# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_array_list UNWINDSET += CBMCFLAGS += PROOF_UID = aws_array_list_pop_front HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROOF_SOURCES += $(PROOF_STUB)/memmove_override_no_op.c PROJECT_SOURCES += $(SRCDIR)/source/array_list.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_array_list_pop_front_harness.c000066400000000000000000000024561456575232400414570ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_pop_front/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include /** * Runtime: 9s */ void aws_array_list_pop_front_harness() { /* data structure */ struct aws_array_list list; /* assumptions */ __CPROVER_assume(aws_array_list_is_bounded(&list, MAX_INITIAL_ITEM_ALLOCATION, MAX_ITEM_SIZE)); ensure_array_list_has_allocated_data_member(&list); __CPROVER_assume(aws_array_list_is_valid(&list)); /* save current state of the data structure */ struct aws_array_list old = list; struct store_byte_from_buffer old_byte; save_byte_from_array((uint8_t *)list.data, list.current_size, &old_byte); /* perform operation under verification and assertions */ if (!aws_array_list_pop_front(&list)) { assert(list.length == old.length - 1); assert(list.data); assert(list.alloc == old.alloc); assert(list.current_size == old.current_size); assert(list.item_size == old.item_size); } else { /* In the case aws_array_list_pop_front is not successful, the list must not change */ assert_array_list_equivalence(&list, &old, &old_byte); } assert(aws_array_list_is_valid(&list)); } cbmc-proof.txt000066400000000000000000000000711456575232400352360ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_pop_frontThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_pop_front_n/000077500000000000000000000000001456575232400330445ustar00rootroot00000000000000Makefile000066400000000000000000000013251456575232400344260ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_pop_front_n# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_array_list UNWINDSET += CBMCFLAGS += PROOF_UID = aws_array_list_pop_front_n HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROOF_SOURCES += $(PROOF_STUB)/memmove_override_no_op.c PROJECT_SOURCES += $(SRCDIR)/source/array_list.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_array_list_pop_front_n_harness.c000066400000000000000000000024131456575232400423020ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_pop_front_n/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include /** * Runtime: 11s */ void aws_array_list_pop_front_n_harness() { /* data structure */ struct aws_array_list list; /* assumptions */ __CPROVER_assume(aws_array_list_is_bounded(&list, MAX_INITIAL_ITEM_ALLOCATION, MAX_ITEM_SIZE)); ensure_array_list_has_allocated_data_member(&list); __CPROVER_assume(aws_array_list_is_valid(&list)); /* save current state of the data structure */ struct aws_array_list old = list; struct store_byte_from_buffer old_byte; save_byte_from_array((uint8_t *)list.data, list.current_size, &old_byte); /* perform operation under verification */ size_t n; aws_array_list_pop_front_n(&list, n); /* assertions */ assert(aws_array_list_is_valid(&list)); if (n == 0) { assert_array_list_equivalence(&list, &old, &old_byte); } else { assert(list.alloc == old.alloc); assert(list.current_size == old.current_size); assert(list.item_size == old.item_size); (n >= old.length) ? assert(list.length == 0) : assert(list.length == old.length - n); } } cbmc-proof.txt000066400000000000000000000000711456575232400355530ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_pop_front_nThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_push_back/000077500000000000000000000000001456575232400324605ustar00rootroot00000000000000Makefile000066400000000000000000000012331456575232400340400ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_push_back# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_array_list UNWINDSET += CBMCFLAGS += PROOF_UID = aws_array_list_push_back HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/array_list.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_array_list_push_back_harness.c000066400000000000000000000025701456575232400413360ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_push_back/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include /** * Runtime: 4 min */ void aws_array_list_push_back_harness() { /* data structure */ struct aws_array_list list; /* assumptions */ __CPROVER_assume(aws_array_list_is_bounded(&list, MAX_INITIAL_ITEM_ALLOCATION, MAX_ITEM_SIZE)); ensure_array_list_has_allocated_data_member(&list); __CPROVER_assume(aws_array_list_is_valid(&list)); __CPROVER_assume(list.data != NULL); void *val = malloc(list.item_size); /* save current state of the data structure */ struct aws_array_list old = list; struct store_byte_from_buffer old_byte; save_byte_from_array((uint8_t *)list.data, list.current_size, &old_byte); /* assume preconditions */ __CPROVER_assume(aws_array_list_is_valid(&list)); __CPROVER_assume(val && AWS_MEM_IS_READABLE(val, list.item_size)); /* perform operation under verification and assertions */ if (!aws_array_list_push_back(&list, val)) { assert(list.length == old.length + 1); } else { /* In the case aws_array_list_push_back is not successful, the list must not change */ assert_array_list_equivalence(&list, &old, &old_byte); } assert(aws_array_list_is_valid(&list)); } cbmc-proof.txt000066400000000000000000000000711456575232400351670ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_push_backThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_push_front/000077500000000000000000000000001456575232400327105ustar00rootroot00000000000000Makefile000066400000000000000000000012731456575232400342740ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_push_front# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. include ../Makefile.aws_array_list UNWINDSET += CBMCFLAGS += PROOF_UID = aws_array_list_push_front HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROOF_SOURCES += $(PROOF_STUB)/memmove_override_no_op.c PROJECT_SOURCES += $(SRCDIR)/source/array_list.c PROJECT_SOURCES += $(SRCDIR)/source/common.c include ../Makefile.common aws_array_list_push_front_harness.c000066400000000000000000000031251456575232400420130ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_push_front/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include /** * Runtime: 2 min */ void aws_array_list_push_front_harness() { /* Data structure. */ struct aws_array_list list; /* * We need to bound the input to cope with the complexity of checking arithmetic operations * (i.e., multiplications) over item_size and length. This is a limitation of CBMC. */ __CPROVER_assume(aws_array_list_is_bounded(&list, MAX_INITIAL_ITEM_ALLOCATION, MAX_ITEM_SIZE)); /* Non-deterministic allocations. */ ensure_array_list_has_allocated_data_member(&list); void *val = malloc(list.item_size); /* Save current state of the data structure. */ struct aws_array_list old = list; struct store_byte_from_buffer old_byte; if (list.data != NULL) { save_byte_from_array((uint8_t *)list.data, list.current_size, &old_byte); } /* Assume preconditions. */ __CPROVER_assume(aws_array_list_is_valid(&list)); __CPROVER_assume(val && AWS_MEM_IS_READABLE(val, list.item_size)); /* Perform operation under verification and check postconditions. */ if (aws_array_list_push_front(&list, val) == AWS_OP_SUCCESS) { assert(list.length == old.length + 1); } else if (list.data != NULL) { /* In the case aws_array_list_push_front is not successful, the list must not change. */ assert_array_list_equivalence(&list, &old, &old_byte); } assert(aws_array_list_is_valid(&list)); } cbmc-proof.txt000066400000000000000000000000711456575232400354170ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_push_frontThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_set_at/000077500000000000000000000000001456575232400320005ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_set_at/Makefile000066400000000000000000000012301456575232400334340ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_array_list UNWINDSET += CBMCFLAGS += PROOF_UID = aws_array_list_set_at HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/array_list.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_array_list_set_at_harness.c000066400000000000000000000027001456575232400401710ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_set_at/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include /** * Runtime: 3m 42s */ void aws_array_list_set_at_harness() { /* data structure */ struct aws_array_list list; /* assumptions */ __CPROVER_assume(aws_array_list_is_bounded(&list, MAX_INITIAL_ITEM_ALLOCATION, MAX_ITEM_SIZE)); ensure_array_list_has_allocated_data_member(&list); __CPROVER_assume(aws_array_list_is_valid(&list)); __CPROVER_assume(list.data != NULL); size_t malloc_size; void *val = malloc(list.item_size); size_t index; /* save current state of the data structure */ struct aws_array_list old = list; struct store_byte_from_buffer old_byte; save_byte_from_array((uint8_t *)list.data, list.current_size, &old_byte); /* assume preconditions */ __CPROVER_assume(aws_array_list_is_valid(&list)); __CPROVER_assume(val && AWS_MEM_IS_READABLE(val, list.item_size)); /* perform operation under verification and assertions */ if (!aws_array_list_set_at(&list, val, index)) { if (index > old.length) assert(list.length == index + 1); } else { /* In the case aws_array_list_set_at is not successful, the list must not change */ assert_array_list_equivalence(&list, &old, &old_byte); } assert(aws_array_list_is_valid(&list)); } cbmc-proof.txt000066400000000000000000000000711456575232400345070ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_set_atThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_shrink_to_fit/000077500000000000000000000000001456575232400333635ustar00rootroot00000000000000Makefile000066400000000000000000000015051456575232400347450ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_shrink_to_fit# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_array_list UNWINDSET += CBMCFLAGS += PROOF_UID = aws_array_list_shrink_to_fit HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROOF_SOURCES += $(PROOF_STUB)/memcpy_override_havoc.c PROOF_SOURCES += $(PROOF_STUB)/memmove_override_no_op.c PROOF_SOURCES += $(PROOF_STUB)/memset_override_no_op.c PROJECT_SOURCES += $(SRCDIR)/source/array_list.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_array_list_shrink_to_fit_harness.c000066400000000000000000000025771456575232400431530ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_shrink_to_fit/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include /** * Runtime: 13s */ void aws_array_list_shrink_to_fit_harness() { /* data structure */ struct aws_array_list list; /* assumptions */ __CPROVER_assume(aws_array_list_is_bounded(&list, MAX_INITIAL_ITEM_ALLOCATION, MAX_ITEM_SIZE)); ensure_array_list_has_allocated_data_member(&list); __CPROVER_assume(aws_array_list_is_valid(&list)); /* remove some elements before shrinking the data structure */ size_t n; aws_array_list_pop_front_n(&list, n); /* save current state of the data structure */ struct aws_array_list old = list; struct store_byte_from_buffer old_byte; save_byte_from_array((uint8_t *)list.data, list.current_size, &old_byte); /* perform operation under verification and assertions */ if (!aws_array_list_shrink_to_fit(&list)) { assert( (list.current_size == 0 && list.data == NULL) || (list.data != NULL && list.current_size == list.length * list.item_size)); } else { /* In the case aws_array_list_shrink_to_fit is not successful, the list must not change */ assert_array_list_equivalence(&list, &old, &old_byte); } assert(aws_array_list_is_valid(&list)); } cbmc-proof.txt000066400000000000000000000000711456575232400360720ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_shrink_to_fitThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_sort/000077500000000000000000000000001456575232400315105ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_sort/Makefile000066400000000000000000000013061456575232400331500ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_array_list UNWINDSET += CBMCFLAGS += PROOF_UID = aws_array_list_sort HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROOF_SOURCES += $(PROOF_STUB)/qsort_override.c PROJECT_SOURCES += $(SRCDIR)/source/array_list.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_array_list_sort_harness.c000066400000000000000000000025511456575232400374150ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_sort/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include /** * Standard implementation of compare function for qsort */ size_t item_size; int compare(const void *a, const void *b) { __CPROVER_precondition(__CPROVER_r_ok(a, item_size), "first element readable in compare function"); __CPROVER_precondition(__CPROVER_r_ok(b, item_size), "second element readable in compare function"); return nondet_int(); } /** * Runtime: 12s */ void aws_array_list_sort_harness() { /* data structure */ struct aws_array_list list; /* assumptions */ __CPROVER_assume(aws_array_list_is_bounded(&list, MAX_INITIAL_ITEM_ALLOCATION, MAX_ITEM_SIZE)); ensure_array_list_has_allocated_data_member(&list); __CPROVER_assume(aws_array_list_is_valid(&list)); /* save current state of the data structure */ struct aws_array_list old = list; struct store_byte_from_buffer old_byte; save_byte_from_array((uint8_t *)list.data, list.current_size, &old_byte); /* perform operation under verification */ item_size = list.item_size; aws_array_list_sort(&list, compare); /* assertions */ assert(aws_array_list_is_valid(&list)); assert_array_list_equivalence(&list, &old, &old_byte); } cbmc-proof.txt000066400000000000000000000000711456575232400342170ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_sortThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_swap/000077500000000000000000000000001456575232400314735ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_swap/Makefile000066400000000000000000000015011456575232400331300ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_array_list # This bound allows us to reach 100% coverage rate UNWINDSET += aws_array_list_mem_swap.0:$(shell echo $$(($(MAX_ITEM_SIZE) + 1))) CBMCFLAGS += PROOF_UID = aws_array_list_swap HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROOF_SOURCES += $(PROOF_STUB)/memcpy_override_havoc.c PROJECT_SOURCES += $(SRCDIR)/source/array_list.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_array_list_swap_harness.c000066400000000000000000000022301456575232400373550ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_swap/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include /** * Runtime: 11s */ void aws_array_list_swap_harness() { /* data structure */ struct aws_array_list list; /* parameters */ size_t index_a; size_t index_b; /* assumptions */ __CPROVER_assume(aws_array_list_is_bounded(&list, MAX_INITIAL_ITEM_ALLOCATION, MAX_ITEM_SIZE)); ensure_array_list_has_allocated_data_member(&list); __CPROVER_assume(aws_array_list_is_valid(&list)); __CPROVER_assume(index_a < aws_array_list_length(&list)); __CPROVER_assume(index_b < aws_array_list_length(&list)); /* save current state of the data structure */ struct aws_array_list old = list; /* perform operation under verification */ aws_array_list_swap(&list, index_a, index_b); /* assertions */ assert(aws_array_list_is_valid(&list)); assert(list.alloc == old.alloc); assert(list.current_size == old.current_size); assert(list.length == old.length); assert(list.item_size == old.item_size); } cbmc-proof.txt000066400000000000000000000000711456575232400342020ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_swapThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_swap_contents/000077500000000000000000000000001456575232400334105ustar00rootroot00000000000000Makefile000066400000000000000000000012371456575232400347740ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_swap_contents# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_array_list UNWINDSET += CBMCFLAGS += PROOF_UID = aws_array_list_swap_contents HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/array_list.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_array_list_swap_contents_harness.c000066400000000000000000000033271456575232400432170ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_swap_contents/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include /** * Runtime: 7s */ void aws_array_list_swap_contents_harness() { /* data structure */ struct aws_array_list from; struct aws_array_list to; /* assumptions */ __CPROVER_assume(aws_array_list_is_bounded(&from, MAX_INITIAL_ITEM_ALLOCATION, MAX_ITEM_SIZE)); ensure_array_list_has_allocated_data_member(&from); __CPROVER_assume(aws_array_list_is_valid(&from)); __CPROVER_assume(aws_array_list_is_bounded(&to, MAX_INITIAL_ITEM_ALLOCATION, MAX_ITEM_SIZE)); ensure_array_list_has_allocated_data_member(&to); __CPROVER_assume(aws_array_list_is_valid(&to)); __CPROVER_assume(from.alloc != NULL); __CPROVER_assume(to.alloc != NULL); __CPROVER_assume(from.item_size > 0); __CPROVER_assume(to.item_size > 0); __CPROVER_assume(from.item_size == to.item_size); /* save current state of the data structure */ struct aws_array_list old_from = from; struct store_byte_from_buffer old_byte_from; save_byte_from_array((uint8_t *)from.data, from.current_size, &old_byte_from); struct aws_array_list old_to = to; struct store_byte_from_buffer old_byte_to; save_byte_from_array((uint8_t *)to.data, to.current_size, &old_byte_to); /* perform operation under verification */ aws_array_list_swap_contents(&from, &to); /* assertions */ assert(aws_array_list_is_valid(&from)); assert(aws_array_list_is_valid(&to)); assert_array_list_equivalence(&from, &old_to, &old_byte_to); assert_array_list_equivalence(&to, &old_from, &old_byte_from); } cbmc-proof.txt000066400000000000000000000000711456575232400361170ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_array_list_swap_contentsThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_advance/000077500000000000000000000000001456575232400315505ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_advance/Makefile000066400000000000000000000015731456575232400332160ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_byte_buf # Required to cover the size of aws_byte_buf structure MAX_BUFFER_SIZE=40 # This bound allows us to reach 100% coverage rate UNWINDSET += memset_impl.0:$(shell echo $$(($(MAX_BUFFER_SIZE) + 1))) CBMCFLAGS += PROOF_UID = aws_byte_buf_advance HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROOF_SOURCES += $(PROOF_STUB)/memset_override.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_byte_buf_advance_harness.c000066400000000000000000000034011456575232400375100ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_advance/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_byte_buf_advance_harness() { /* parameters */ struct aws_byte_buf buf; struct aws_byte_buf output; size_t len; /* assumptions */ __CPROVER_assume(aws_byte_buf_is_bounded(&buf, MAX_BUFFER_SIZE)); ensure_byte_buf_has_allocated_buffer_member(&buf); __CPROVER_assume(aws_byte_buf_is_valid(&buf)); if (nondet_bool()) { output = buf; } else { __CPROVER_assume(aws_byte_buf_is_bounded(&output, MAX_BUFFER_SIZE)); ensure_byte_buf_has_allocated_buffer_member(&output); __CPROVER_assume(aws_byte_buf_is_valid(&output)); } /* save current state of the parameters */ struct aws_byte_buf old = buf; struct store_byte_from_buffer old_byte_from_buf; save_byte_from_array(buf.buffer, buf.len, &old_byte_from_buf); /* operation under verification */ if (aws_byte_buf_advance(&buf, &output, len)) { assert(buf.len == old.len + len); assert(buf.capacity == old.capacity); assert(buf.allocator == old.allocator); if (old.len > 0) { assert_byte_from_buffer_matches(buf.buffer, &old_byte_from_buf); } assert(output.len == 0); assert(output.capacity == len); assert(output.allocator == NULL); } else { assert_byte_buf_equivalence(&buf, &old, &old_byte_from_buf); assert(output.len == 0); assert(output.capacity == 0); assert(output.allocator == NULL); assert(output.buffer == NULL); } assert(aws_byte_buf_is_valid(&buf)); assert(aws_byte_buf_is_valid(&output)); } cbmc-proof.txt000066400000000000000000000000711456575232400342570ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_advanceThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_append/000077500000000000000000000000001456575232400314165ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_append/Makefile000066400000000000000000000013741456575232400330630ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_byte_buf UNWINDSET += memcpy_impl.0:$(shell echo $$(($(MAX_BUFFER_SIZE) + 1))) CBMCFLAGS += PROOF_UID = aws_byte_buf_append HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROOF_SOURCES += $(PROOF_STUB)/memcpy_override.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_byte_buf_append_harness.c000066400000000000000000000026201456575232400372260ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_append/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_byte_buf_append_harness() { struct aws_byte_buf to; __CPROVER_assume(aws_byte_buf_is_bounded(&to, MAX_BUFFER_SIZE)); ensure_byte_buf_has_allocated_buffer_member(&to); __CPROVER_assume(aws_byte_buf_is_valid(&to)); /* save current state of the data structure */ struct aws_byte_buf to_old = to; struct aws_byte_cursor from; __CPROVER_assume(aws_byte_cursor_is_bounded(&from, MAX_BUFFER_SIZE)); ensure_byte_cursor_has_allocated_buffer_member(&from); __CPROVER_assume(aws_byte_cursor_is_valid(&from)); /* save current state of the data structure */ struct aws_byte_cursor from_old = from; if (aws_byte_buf_append(&to, &from) == AWS_OP_SUCCESS) { assert(to.len == to_old.len + from.len); } else { /* if the operation return an error, to must not change */ assert_bytes_match(to_old.buffer, to.buffer, to.len); assert(to_old.len == to.len); } assert(aws_byte_buf_is_valid(&to)); assert(aws_byte_cursor_is_valid(&from)); assert(to_old.allocator == to.allocator); assert(to_old.capacity == to.capacity); assert_bytes_match(from_old.ptr, from.ptr, from.len); assert(from_old.len == from.len); } cbmc-proof.txt000066400000000000000000000000711456575232400341250ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_appendThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_append_and_update/000077500000000000000000000000001456575232400336025ustar00rootroot00000000000000Makefile000066400000000000000000000012011456575232400351550ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_append_and_update# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_byte_buf CBMCFLAGS += PROOF_UID = aws_byte_buf_append_and_update HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c include ../Makefile.common aws_byte_buf_append_and_update_harness.c000066400000000000000000000026461456575232400436060ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_append_and_update/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_byte_buf_append_and_update_harness() { struct aws_byte_buf to; ensure_byte_buf_has_allocated_buffer_member(&to); __CPROVER_assume(aws_byte_buf_is_valid(&to)); /* save current state of the data structure */ struct aws_byte_buf to_old = to; struct aws_byte_cursor from_and_update; ensure_byte_cursor_has_allocated_buffer_member(&from_and_update); __CPROVER_assume(aws_byte_cursor_is_valid(&from_and_update)); /* save current state of the data structure */ struct aws_byte_cursor from_and_update_old = from_and_update; if (aws_byte_buf_append_and_update(&to, &from_and_update) == AWS_OP_SUCCESS) { assert(to.len == to_old.len + from_and_update.len); } else { /* if the operation return an error, to must not change */ assert_bytes_match(to_old.buffer, to.buffer, to.len); assert(to_old.len == to.len); } assert(aws_byte_buf_is_valid(&to)); assert(aws_byte_cursor_is_valid(&from_and_update)); assert(to_old.allocator == to.allocator); assert(to_old.capacity == to.capacity); assert_bytes_match(from_and_update_old.ptr, from_and_update.ptr, from_and_update.len); assert(from_and_update_old.len == from_and_update.len); } cbmc-proof.txt000066400000000000000000000000711456575232400363110ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_append_and_updateThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_append_dynamic/000077500000000000000000000000001456575232400331225ustar00rootroot00000000000000Makefile000066400000000000000000000013211456575232400345000ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_append_dynamic# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_byte_buf UNWINDSET += CBMCFLAGS += PROOF_UID = aws_byte_buf_append_dynamic HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROOF_SOURCES += $(PROOF_STUB)/memcpy_override_havoc.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_byte_buf_append_dynamic_harness.c000066400000000000000000000023451456575232400424420ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_append_dynamic/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_byte_buf_append_dynamic_harness() { struct aws_byte_buf to; ensure_byte_buf_has_allocated_buffer_member(&to); __CPROVER_assume(aws_byte_buf_is_valid(&to)); /* save current state of the data structure */ struct aws_byte_buf to_old = to; struct aws_byte_cursor from; ensure_byte_cursor_has_allocated_buffer_member(&from); __CPROVER_assume(aws_byte_cursor_is_valid(&from)); /* save current state of the data structure */ struct aws_byte_cursor from_old = from; if (aws_byte_buf_append_dynamic(&to, &from) == AWS_OP_SUCCESS) { assert(to.len == to_old.len + from.len); } else { /* if the operation return an error, to must not change */ assert_bytes_match(to_old.buffer, to.buffer, to.len); assert(to_old.len == to.len); } assert(aws_byte_buf_is_valid(&to)); assert(aws_byte_cursor_is_valid(&from)); assert(to_old.allocator == to.allocator); assert_bytes_match(from_old.ptr, from.ptr, from.len); assert(from_old.len == from.len); } cbmc-proof.txt000066400000000000000000000000711456575232400356310ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_append_dynamicThis file marks the directory as containing a CBMC proof aws_byte_buf_append_with_lookup/000077500000000000000000000000001456575232400337635ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofsMakefile000066400000000000000000000014421456575232400354240ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_append_with_lookup# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_byte_buf UNWINDSET += aws_byte_buf_append_with_lookup.0:$(shell echo $$(($(MAX_BUFFER_SIZE) + 1))) CBMCFLAGS += PROOF_UID = aws_byte_buf_append_with_lookup HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROOF_SOURCES += $(PROOF_STUB)/memcpy_override_havoc.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_byte_buf_append_with_lookup_harness.c000066400000000000000000000031061456575232400442760ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_append_with_lookup/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_byte_buf_append_with_lookup_harness() { struct aws_byte_buf to; __CPROVER_assume(aws_byte_buf_is_bounded(&to, MAX_BUFFER_SIZE)); ensure_byte_buf_has_allocated_buffer_member(&to); __CPROVER_assume(aws_byte_buf_is_valid(&to)); /* save current state of the data structure */ struct aws_byte_buf to_old = to; struct aws_byte_cursor from; __CPROVER_assume(aws_byte_cursor_is_bounded(&from, MAX_BUFFER_SIZE)); ensure_byte_cursor_has_allocated_buffer_member(&from); __CPROVER_assume(aws_byte_cursor_is_valid(&from)); /* save current state of the data structure */ struct aws_byte_cursor from_old = from; /** * The specification for the function requires that the buffer * be at least 256 bytes. */ uint8_t lookup_table[256]; if (aws_byte_buf_append_with_lookup(&to, &from, lookup_table) == AWS_OP_SUCCESS) { assert(to.len == to_old.len + from.len); } else { /* if the operation return an error, to must not change */ assert_bytes_match(to_old.buffer, to.buffer, to.len); assert(to_old.len == to.len); } assert(aws_byte_buf_is_valid(&to)); assert(aws_byte_cursor_is_valid(&from)); assert(to_old.allocator == to.allocator); assert(to_old.capacity == to.capacity); assert_bytes_match(from_old.ptr, from.ptr, from.len); assert(from_old.len == from.len); } cbmc-proof.txt000066400000000000000000000000711456575232400365510ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_append_with_lookupThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_cat/000077500000000000000000000000001456575232400307165ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_cat/Makefile000066400000000000000000000012441456575232400323570ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_byte_buf UNWINDSET += aws_byte_buf_cat.0:4 CBMCFLAGS += PROOF_UID = aws_byte_buf_cat HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_byte_buf_cat_harness.c000066400000000000000000000051671456575232400360370ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_cat/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_byte_buf_cat_harness() { /* parameters */ struct aws_byte_buf buffer1; struct aws_byte_buf buffer2; struct aws_byte_buf buffer3; struct aws_byte_buf dest; size_t number_of_args = 3; /* assumptions */ __CPROVER_assume(aws_byte_buf_is_bounded(&buffer1, MAX_BUFFER_SIZE)); ensure_byte_buf_has_allocated_buffer_member(&buffer1); __CPROVER_assume(aws_byte_buf_is_valid(&buffer1)); __CPROVER_assume(aws_byte_buf_is_bounded(&buffer2, MAX_BUFFER_SIZE)); ensure_byte_buf_has_allocated_buffer_member(&buffer2); __CPROVER_assume(aws_byte_buf_is_valid(&buffer2)); __CPROVER_assume(aws_byte_buf_is_bounded(&buffer3, MAX_BUFFER_SIZE)); ensure_byte_buf_has_allocated_buffer_member(&buffer3); __CPROVER_assume(aws_byte_buf_is_valid(&buffer3)); ensure_byte_buf_has_allocated_buffer_member(&dest); __CPROVER_assume(aws_byte_buf_is_valid(&dest)); /* save current state of the data structure */ struct aws_byte_buf old_buffer1 = buffer1; struct store_byte_from_buffer old_byte_from_buffer1; save_byte_from_array(buffer1.buffer, buffer1.len, &old_byte_from_buffer1); struct aws_byte_buf old_buffer2 = buffer2; struct store_byte_from_buffer old_byte_from_buffer2; save_byte_from_array(buffer2.buffer, buffer2.len, &old_byte_from_buffer2); struct aws_byte_buf old_buffer3 = buffer3; struct store_byte_from_buffer old_byte_from_buffer3; save_byte_from_array(buffer3.buffer, buffer3.len, &old_byte_from_buffer3); struct aws_byte_buf old_dest = dest; struct store_byte_from_buffer old_byte_from_dest; save_byte_from_array(dest.buffer, dest.len, &old_byte_from_dest); /* operation under verification */ if (aws_byte_buf_cat(&dest, number_of_args, &buffer1, &buffer2, &buffer3) == AWS_OP_SUCCESS) { assert((old_dest.capacity - old_dest.len) >= (buffer1.len + buffer2.len + buffer3.len)); } else { assert((old_dest.capacity - old_dest.len) < (buffer1.len + buffer2.len + buffer3.len)); } /* assertions */ assert(aws_byte_buf_is_valid(&buffer1)); assert(aws_byte_buf_is_valid(&buffer2)); assert(aws_byte_buf_is_valid(&buffer3)); assert(aws_byte_buf_is_valid(&dest)); assert_byte_buf_equivalence(&buffer1, &old_buffer1, &old_byte_from_buffer1); assert_byte_buf_equivalence(&buffer2, &old_buffer2, &old_byte_from_buffer2); assert_byte_buf_equivalence(&buffer3, &old_buffer3, &old_byte_from_buffer3); } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_cat/cbmc-proof.txt000066400000000000000000000000711456575232400335040ustar00rootroot00000000000000This file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_clean_up/000077500000000000000000000000001456575232400317355ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_clean_up/Makefile000066400000000000000000000014341456575232400333770ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_byte_buf MAX_BUFFER_SIZE=40 # This bound allows us to reach 100% coverage rate UNWINDSET += memset_impl.0:$(shell echo $$(($(MAX_BUFFER_SIZE) + 1))) CBMCFLAGS += PROOF_UID = aws_byte_buf_clean_up HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROOF_SOURCES += $(PROOF_STUB)/memset_override.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_byte_buf_clean_up_harness.c000066400000000000000000000010371456575232400400650ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_clean_up/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_byte_buf_clean_up_harness() { struct aws_byte_buf buf; ensure_byte_buf_has_allocated_buffer_member(&buf); __CPROVER_assume(aws_byte_buf_is_valid(&buf)); aws_byte_buf_clean_up(&buf); assert(buf.allocator == NULL); assert(buf.buffer == NULL); assert(buf.len == 0); assert(buf.capacity == 0); } cbmc-proof.txt000066400000000000000000000000711456575232400344440ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_clean_upThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_clean_up_secure/000077500000000000000000000000001456575232400333035ustar00rootroot00000000000000Makefile000066400000000000000000000014431456575232400346660ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_clean_up_secure# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_byte_buf MAX_BUFFER_SIZE=40 # This bound allows us to reach 100% coverage rate UNWINDSET += memset_impl.0:$(shell echo $$(($(MAX_BUFFER_SIZE) + 1))) CBMCFLAGS += PROOF_UID = aws_byte_buf_clean_up_secure HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROOF_SOURCES += $(PROOF_STUB)/memset_override.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_byte_buf_clean_up_secure_harness.c000066400000000000000000000013371456575232400430040ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_clean_up_secure/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_byte_buf_clean_up_secure_harness() { /* data structure */ struct aws_byte_buf buf; /* assumptions */ __CPROVER_assume(aws_byte_buf_is_bounded(&buf, MAX_BUFFER_SIZE)); ensure_byte_buf_has_allocated_buffer_member(&buf); __CPROVER_assume(aws_byte_buf_is_valid(&buf)); /* operation under verification */ aws_byte_buf_clean_up_secure(&buf); /* assertions */ assert(buf.allocator == NULL); assert(buf.buffer == NULL); assert(buf.len == 0); assert(buf.capacity == 0); } cbmc-proof.txt000066400000000000000000000000711456575232400360120ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_clean_up_secureThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_eq/000077500000000000000000000000001456575232400305545ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_eq/Makefile000066400000000000000000000013021456575232400322100ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_byte_buf UNWINDSET += memcmp.0:$(shell echo $$(($(MAX_BUFFER_SIZE) + 1))) CBMCFLAGS += PROOF_UID = aws_byte_buf_eq HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_byte_buf_eq_harness.c000066400000000000000000000031071456575232400355230ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_eq/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_byte_buf_eq_harness() { /* parameters */ struct aws_byte_buf lhs; struct aws_byte_buf rhs; /* assumptions */ __CPROVER_assume(aws_byte_buf_is_bounded(&lhs, MAX_BUFFER_SIZE)); ensure_byte_buf_has_allocated_buffer_member(&lhs); __CPROVER_assume(aws_byte_buf_is_valid(&lhs)); if (nondet_bool()) { rhs = lhs; } else { __CPROVER_assume(aws_byte_buf_is_bounded(&rhs, MAX_BUFFER_SIZE)); ensure_byte_buf_has_allocated_buffer_member(&rhs); __CPROVER_assume(aws_byte_buf_is_valid(&rhs)); } /* save current state of the data structure */ struct aws_byte_buf old_lhs = lhs; struct store_byte_from_buffer old_byte_from_lhs; save_byte_from_array(lhs.buffer, lhs.len, &old_byte_from_lhs); struct aws_byte_buf old_rhs = rhs; struct store_byte_from_buffer old_byte_from_rhs; save_byte_from_array(rhs.buffer, rhs.len, &old_byte_from_rhs); /* operation under verification */ if (aws_byte_buf_eq(&lhs, &rhs)) { assert(lhs.len == rhs.len); if (lhs.len > 0) { assert_bytes_match(lhs.buffer, rhs.buffer, lhs.len); } } /* assertions */ assert(aws_byte_buf_is_valid(&lhs)); assert(aws_byte_buf_is_valid(&rhs)); assert_byte_buf_equivalence(&lhs, &old_lhs, &old_byte_from_lhs); assert_byte_buf_equivalence(&rhs, &old_rhs, &old_byte_from_rhs); } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_eq/cbmc-proof.txt000066400000000000000000000000711456575232400333420ustar00rootroot00000000000000This file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_eq_c_str/000077500000000000000000000000001456575232400317465ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_eq_c_str/Makefile000066400000000000000000000014251456575232400334100ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_byte_buf UNWINDSET += aws_array_eq_c_str.0:$(shell echo $$(($(MAX_BUFFER_SIZE) + 1))) UNWINDSET += strlen.0:$(shell echo $$(($(MAX_BUFFER_SIZE) + 1))) CBMCFLAGS += PROOF_UID = aws_byte_buf_eq_c_str HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_byte_buf_eq_c_str_harness.c000066400000000000000000000026621456575232400401140ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_eq_c_str/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_byte_buf_eq_c_str_harness() { /* parameters */ struct aws_byte_buf buf; const char *c_str = ensure_c_str_is_allocated(MAX_BUFFER_SIZE); /* assumptions */ __CPROVER_assume(aws_c_string_is_valid(c_str)); __CPROVER_assume(aws_byte_buf_is_bounded(&buf, MAX_BUFFER_SIZE)); ensure_byte_buf_has_allocated_buffer_member(&buf); __CPROVER_assume(aws_byte_buf_is_valid(&buf)); /* save current state of the parameters */ struct aws_byte_buf old = buf; struct store_byte_from_buffer old_byte; save_byte_from_array(buf.buffer, buf.len, &old_byte); size_t str_len = strlen(c_str); struct store_byte_from_buffer old_byte_from_str; save_byte_from_array((uint8_t *)c_str, str_len, &old_byte_from_str); /* operation under verification */ if (aws_byte_buf_eq_c_str(&buf, c_str)) { assert(buf.len == str_len); if (buf.len > 0) { assert_bytes_match(buf.buffer, (uint8_t *)c_str, buf.len); } } /* asserts both parameters remain unchanged */ assert(aws_byte_buf_is_valid(&buf)); assert_byte_buf_equivalence(&buf, &old, &old_byte); if (str_len > 0) { assert_byte_from_buffer_matches((uint8_t *)c_str, &old_byte_from_str); } } cbmc-proof.txt000066400000000000000000000000711456575232400344550ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_eq_c_strThis file marks the directory as containing a CBMC proof aws_byte_buf_eq_c_str_ignore_case/000077500000000000000000000000001456575232400342255ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofsMakefile000066400000000000000000000014551456575232400356720ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_eq_c_str_ignore_case# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_byte_buf UNWINDSET += aws_array_eq_c_str_ignore_case.0:$(shell echo $$(($(MAX_BUFFER_SIZE) + 1))) UNWINDSET += strlen.0:$(shell echo $$(($(MAX_BUFFER_SIZE) + 1))) CBMCFLAGS += PROOF_UID = aws_byte_buf_eq_c_str_ignore_case HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_byte_buf_eq_c_str_ignore_case_harness.c000066400000000000000000000025171456575232400450070ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_eq_c_str_ignore_case/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_byte_buf_eq_c_str_ignore_case_harness() { /* parameters */ struct aws_byte_buf buf; const char *c_str = ensure_c_str_is_allocated(MAX_BUFFER_SIZE); /* assumptions */ __CPROVER_assume(c_str != NULL); __CPROVER_assume(aws_byte_buf_is_bounded(&buf, MAX_BUFFER_SIZE)); ensure_byte_buf_has_allocated_buffer_member(&buf); __CPROVER_assume(aws_byte_buf_is_valid(&buf)); /* save current state of the parameters */ struct aws_byte_buf old = buf; struct store_byte_from_buffer old_byte; save_byte_from_array(buf.buffer, buf.len, &old_byte); size_t str_len = strlen(c_str); struct store_byte_from_buffer old_byte_from_str; save_byte_from_array((uint8_t *)c_str, str_len, &old_byte_from_str); /* operation under verification */ if (aws_byte_buf_eq_c_str_ignore_case(&buf, c_str)) { assert(buf.len == str_len); } /* asserts both parameters remain unchanged */ assert(aws_byte_buf_is_valid(&buf)); assert_byte_buf_equivalence(&buf, &old, &old_byte); if (str_len > 0) { assert_byte_from_buffer_matches((uint8_t *)c_str, &old_byte_from_str); } } cbmc-proof.txt000066400000000000000000000000711456575232400370130ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_eq_c_str_ignore_caseThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_eq_ignore_case/000077500000000000000000000000001456575232400331125ustar00rootroot00000000000000Makefile000066400000000000000000000014411456575232400344730ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_eq_ignore_case# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_byte_buf UNWINDSET += memcmp.0:$(shell echo $$(($(MAX_BUFFER_SIZE) + 1))) UNWINDSET += aws_array_eq_ignore_case.0:$(shell echo $$(($(MAX_BUFFER_SIZE) + 1))) CBMCFLAGS += PROOF_UID = aws_byte_buf_eq_ignore_case HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_byte_buf_eq_ignore_case_harness.c000066400000000000000000000027711456575232400424250ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_eq_ignore_case/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_byte_buf_eq_ignore_case_harness() { /* parameters */ struct aws_byte_buf lhs; struct aws_byte_buf rhs; /* assumptions */ __CPROVER_assume(aws_byte_buf_is_bounded(&lhs, MAX_BUFFER_SIZE)); ensure_byte_buf_has_allocated_buffer_member(&lhs); __CPROVER_assume(aws_byte_buf_is_valid(&lhs)); if (nondet_bool()) { rhs = lhs; } else { __CPROVER_assume(aws_byte_buf_is_bounded(&rhs, MAX_BUFFER_SIZE)); ensure_byte_buf_has_allocated_buffer_member(&rhs); __CPROVER_assume(aws_byte_buf_is_valid(&rhs)); } /* save current state of the data structure */ struct aws_byte_buf old_lhs = lhs; struct store_byte_from_buffer old_byte_from_lhs; save_byte_from_array(lhs.buffer, lhs.len, &old_byte_from_lhs); struct aws_byte_buf old_rhs = rhs; struct store_byte_from_buffer old_byte_from_rhs; save_byte_from_array(rhs.buffer, rhs.len, &old_byte_from_rhs); /* operation under verification */ if (aws_byte_buf_eq_ignore_case(&lhs, &rhs)) { assert(lhs.len == rhs.len); } /* assertions */ assert(aws_byte_buf_is_valid(&lhs)); assert(aws_byte_buf_is_valid(&rhs)); assert_byte_buf_equivalence(&lhs, &old_lhs, &old_byte_from_lhs); assert_byte_buf_equivalence(&rhs, &old_rhs, &old_byte_from_rhs); } cbmc-proof.txt000066400000000000000000000000711456575232400356210ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_eq_ignore_caseThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_from_array/000077500000000000000000000000001456575232400323105ustar00rootroot00000000000000Makefile000066400000000000000000000011641456575232400336730ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_from_array# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### UNWINDSET += CBMCFLAGS += PROOF_UID = aws_byte_buf_from_array HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_byte_buf_from_array_harness.c000066400000000000000000000013441456575232400410140ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_from_array/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_byte_buf_from_array_harness() { /* parameters */ size_t length; uint8_t *array; /* assumptions. */ ASSUME_VALID_MEMORY_COUNT(array, length); /* operation under verification */ struct aws_byte_buf buf = aws_byte_buf_from_array(array, length); /* assertions */ assert(aws_byte_buf_is_valid(&buf)); assert(buf.len == length); assert(buf.capacity == length); assert(buf.allocator == NULL); if (buf.buffer) { assert_bytes_match(buf.buffer, array, buf.len); } } cbmc-proof.txt000066400000000000000000000000711456575232400350170ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_from_arrayThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_from_c_str/000077500000000000000000000000001456575232400323045ustar00rootroot00000000000000Makefile000066400000000000000000000013121456575232400336620ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_from_c_str# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_byte_buf UNWINDSET += strlen.0:$(shell echo $$(($(MAX_BUFFER_SIZE) + 1))) CBMCFLAGS += PROOF_UID = aws_byte_buf_from_c_str HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_byte_buf_from_c_str_harness.c000066400000000000000000000015421456575232400410040ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_from_c_str/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_byte_buf_from_c_str_harness() { /* parameter */ const char *c_str = ensure_c_str_is_allocated(MAX_BUFFER_SIZE); /* operation under verification */ struct aws_byte_buf buf = aws_byte_buf_from_c_str(c_str); /* assertions */ assert(aws_byte_buf_is_valid(&buf)); assert(buf.allocator == NULL); if (buf.buffer) { assert(buf.len == strlen(c_str)); assert(buf.capacity == buf.len); assert_bytes_match(buf.buffer, (uint8_t *)c_str, buf.len); } else { if (c_str) { assert(strlen(c_str) == 0); } assert(buf.len == 0); assert(buf.capacity == 0); } } cbmc-proof.txt000066400000000000000000000000711456575232400350130ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_from_c_strThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_from_empty_array/000077500000000000000000000000001456575232400335265ustar00rootroot00000000000000Makefile000066400000000000000000000011721456575232400351100ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_from_empty_array# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### UNWINDSET += CBMCFLAGS += PROOF_UID = aws_byte_buf_from_empty_array HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_byte_buf_from_empty_array_harness.c000066400000000000000000000012101456575232400434400ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_from_empty_array/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_byte_buf_from_empty_array_harness() { size_t capacity; void *array; ASSUME_VALID_MEMORY_COUNT(array, capacity); struct aws_byte_buf buf = aws_byte_buf_from_empty_array(array, capacity); assert(aws_byte_buf_is_valid(&buf)); assert(buf.len == 0); assert(buf.capacity == capacity); assert(buf.allocator == NULL); if (buf.buffer) { assert_bytes_match(buf.buffer, array, capacity); } } cbmc-proof.txt000066400000000000000000000000711456575232400362350ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_from_empty_arrayThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_init/000077500000000000000000000000001456575232400311125ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_init/Makefile000066400000000000000000000011471456575232400325550ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_byte_buf UNWINDSET += CBMCFLAGS += PROOF_UID = aws_byte_buf_init HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_byte_buf_init_harness.c000066400000000000000000000013561456575232400364230ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_init/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_byte_buf_init_harness() { /* data structure */ struct aws_byte_buf buf; /* Precondition: buf is non-null */ /* parameters */ struct aws_allocator *allocator = aws_default_allocator(); /* Precondition: allocator is non-null */ size_t capacity; if (aws_byte_buf_init(&buf, allocator, capacity) == AWS_OP_SUCCESS) { /* assertions */ assert(aws_byte_buf_is_valid(&buf)); assert(buf.allocator == allocator); assert(buf.len == 0); assert(buf.capacity == capacity); } } cbmc-proof.txt000066400000000000000000000000711456575232400336210ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_initThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_init_copy/000077500000000000000000000000001456575232400321445ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_init_copy/Makefile000066400000000000000000000013771456575232400336140ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_byte_buf UNWINDSET += memcpy_impl.0:$(shell echo $$(($(MAX_BUFFER_SIZE) + 1))) CBMCFLAGS += PROOF_UID = aws_byte_buf_init_copy HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROOF_SOURCES += $(PROOF_STUB)/memcpy_override.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_byte_buf_init_copy_harness.c000066400000000000000000000026511456575232400405060ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_init_copy/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_byte_buf_init_copy_harness() { /* data structure */ struct aws_byte_buf *dest; /* parameters */ struct aws_allocator *allocator; struct aws_byte_buf src; /* assumptions */ __CPROVER_assume(aws_byte_buf_is_bounded(&src, MAX_BUFFER_SIZE)); ensure_byte_buf_has_allocated_buffer_member(&src); __CPROVER_assume(aws_byte_buf_is_valid(&src)); ASSUME_VALID_MEMORY(dest); ASSUME_DEFAULT_ALLOCATOR(allocator); /* save current state of the data structure */ struct aws_byte_buf old = src; struct store_byte_from_buffer old_byte; save_byte_from_array(src.buffer, src.len, &old_byte); /* operation under verification */ if (!aws_byte_buf_init_copy(dest, allocator, &src)) { /* assertions */ assert(aws_byte_buf_is_valid(dest)); assert(aws_byte_buf_has_allocator(dest)); assert(dest->len == src.len); assert(dest->capacity == src.capacity); assert_bytes_match(dest->buffer, src.buffer, dest->len); assert(aws_byte_buf_is_valid(&src)); if (src.len > 0) { assert_byte_from_buffer_matches(src.buffer, &old_byte); assert_byte_from_buffer_matches(dest->buffer, &old_byte); } } } cbmc-proof.txt000066400000000000000000000000711456575232400346530ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_init_copyThis file marks the directory as containing a CBMC proof aws_byte_buf_init_copy_from_cursor/000077500000000000000000000000001456575232400345055ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofsMakefile000066400000000000000000000014131456575232400361440ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_init_copy_from_cursor# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_byte_buf UNWINDSET += memcpy_impl.0:$(shell echo $$(($(MAX_BUFFER_SIZE) + 1))) CBMCFLAGS += PROOF_UID = aws_byte_buf_init_copy_from_cursor HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROOF_SOURCES += $(PROOF_STUB)/memcpy_override.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_byte_buf_init_copy_from_cursor_harness.c000066400000000000000000000021321456575232400455400ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_init_copy_from_cursor/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_byte_buf_init_copy_from_cursor_harness() { /* data structure */ struct aws_byte_buf buf; /* parameters */ struct aws_allocator *allocator; struct aws_byte_cursor cursor; /* assumptions */ __CPROVER_assume(aws_byte_cursor_is_bounded(&cursor, MAX_BUFFER_SIZE)); ensure_byte_cursor_has_allocated_buffer_member(&cursor); __CPROVER_assume(aws_byte_cursor_is_valid(&cursor)); ASSUME_DEFAULT_ALLOCATOR(allocator); if (aws_byte_buf_init_copy_from_cursor(&buf, allocator, cursor) == AWS_OP_SUCCESS) { /* assertions */ assert(aws_byte_buf_is_valid(&buf)); assert(aws_byte_cursor_is_valid(&cursor)); assert(buf.len == cursor.len); assert(buf.capacity == cursor.len); assert(buf.allocator == allocator); if (buf.buffer) { assert_bytes_match(buf.buffer, cursor.ptr, buf.len); } } } cbmc-proof.txt000066400000000000000000000000711456575232400372730ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_init_copy_from_cursorThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_reserve/000077500000000000000000000000001456575232400316225ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_reserve/Makefile000066400000000000000000000014011456575232400332560ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_byte_buf UNWINDSET += CBMCFLAGS += PROOF_UID = aws_byte_buf_reserve HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROOF_SOURCES += $(PROOF_STUB)/memcpy_override_havoc.c PROOF_SOURCES += $(PROOF_STUB)/memset_override_no_op.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_byte_buf_reserve_harness.c000066400000000000000000000015171456575232400376420ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_reserve/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_byte_buf_reserve_harness() { struct aws_byte_buf buf; ensure_byte_buf_has_allocated_buffer_member(&buf); __CPROVER_assume(aws_byte_buf_is_valid(&buf)); struct aws_byte_buf old = buf; size_t requested_capacity; if (aws_byte_buf_reserve(&buf, requested_capacity) == AWS_OP_SUCCESS) { assert(buf.capacity >= requested_capacity); assert(aws_byte_buf_has_allocator(&buf)); assert(aws_byte_buf_is_valid(&buf)); } assert(old.len == buf.len); assert(old.allocator == buf.allocator); if (!buf.buffer) { assert_bytes_match(old.buffer, buf.buffer, buf.len); } } cbmc-proof.txt000066400000000000000000000000711456575232400343310ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_reserveThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_reserve_relative/000077500000000000000000000000001456575232400335155ustar00rootroot00000000000000Makefile000066400000000000000000000013031456575232400350730ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_reserve_relative# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### CBMC_UNWINDSET = CBMCFLAGS += PROOF_UID = aws_byte_buf_reserve_relative HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROOF_SOURCES += $(PROOF_STUB)/memcpy_override_havoc.c PROOF_SOURCES += $(PROOF_STUB)/memset_override_no_op.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_byte_buf_reserve_relative_harness.c000066400000000000000000000013331456575232400434240ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_reserve_relative/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_byte_buf_reserve_relative_harness() { struct aws_byte_buf buf; ensure_byte_buf_has_allocated_buffer_member(&buf); __CPROVER_assume(aws_byte_buf_is_valid(&buf)); struct aws_byte_buf old = buf; size_t requested_capacity; int rval = aws_byte_buf_reserve_relative(&buf, requested_capacity); if (rval == AWS_OP_SUCCESS) { assert(buf.capacity >= (old.len + requested_capacity)); assert(aws_byte_buf_has_allocator(&buf)); assert(aws_byte_buf_is_valid(&buf)); } } cbmc-proof.txt000066400000000000000000000000711456575232400362240ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_reserve_relativeThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_reset/000077500000000000000000000000001456575232400312715ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_reset/Makefile000066400000000000000000000013721456575232400327340ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_byte_buf UNWINDSET += memset_impl.0:$(shell echo $$(($(MAX_BUFFER_SIZE) + 1))) CBMCFLAGS += PROOF_UID = aws_byte_buf_reset HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROOF_SOURCES += $(PROOF_STUB)/memset_override.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_byte_buf_reset_harness.c000066400000000000000000000014761456575232400367640ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_reset/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include void aws_byte_buf_reset_harness() { struct aws_byte_buf buf; __CPROVER_assume(aws_byte_buf_is_bounded(&buf, MAX_BUFFER_SIZE)); ensure_byte_buf_has_allocated_buffer_member(&buf); __CPROVER_assume(aws_byte_buf_is_valid(&buf)); struct aws_byte_buf old = buf; bool zero_contents; aws_byte_buf_reset(&buf, zero_contents); assert(buf.len == 0); assert(buf.allocator == old.allocator); assert(buf.buffer == old.buffer); assert(buf.capacity == old.capacity); if (zero_contents) { assert_all_bytes_are(buf.buffer, 0, buf.capacity); } } cbmc-proof.txt000066400000000000000000000000711456575232400340000ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_resetThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_secure_zero/000077500000000000000000000000001456575232400324745ustar00rootroot00000000000000Makefile000066400000000000000000000015101456575232400340520ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_secure_zero# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_byte_buf MAX_BUFFER_SIZE=40 # This bound allows us to reach 100% coverage rate UNWINDSET += memset_impl.0:$(shell echo $$(($(MAX_BUFFER_SIZE) + 1))) CBMCFLAGS += PROOF_UID = aws_byte_buf_secure_zero HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROOF_SOURCES += $(PROOF_STUB)/memset_override.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_byte_buf_secure_zero_harness.c000066400000000000000000000012131456575232400413570ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_secure_zero/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_byte_buf_secure_zero_harness() { struct aws_byte_buf buf; __CPROVER_assume(aws_byte_buf_is_bounded(&buf, MAX_BUFFER_SIZE)); ensure_byte_buf_has_allocated_buffer_member(&buf); __CPROVER_assume(aws_byte_buf_is_valid(&buf)); /* operation under verification */ aws_byte_buf_secure_zero(&buf); assert(aws_byte_buf_is_valid(&buf)); assert_all_zeroes(buf.buffer, buf.capacity); assert(buf.len == 0); } cbmc-proof.txt000066400000000000000000000000711456575232400352030ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_secure_zeroThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_write/000077500000000000000000000000001456575232400313015ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_write/Makefile000066400000000000000000000012031456575232400327350ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_byte_buf CBMCFLAGS += PROOF_UID = aws_byte_buf_write HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_byte_buf_write_harness.c000066400000000000000000000021601456575232400367730ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_write/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_byte_buf_write_harness() { /* parameters */ struct aws_byte_buf buf; size_t len; uint8_t *array; /* assumptions */ ASSUME_VALID_MEMORY_COUNT(array, len); ensure_byte_buf_has_allocated_buffer_member(&buf); __CPROVER_assume(aws_byte_buf_is_valid(&buf)); /* save current state of the parameters */ struct aws_byte_buf old = buf; struct store_byte_from_buffer old_byte_from_buf; save_byte_from_array(buf.buffer, buf.len, &old_byte_from_buf); if (aws_byte_buf_write(&buf, array, len)) { assert(buf.len == old.len + len); assert(old.capacity == buf.capacity); assert(old.allocator == buf.allocator); if (len > 0 && buf.len > 0) { assert_bytes_match(buf.buffer + old.len, array, len); } } else { assert_byte_buf_equivalence(&buf, &old, &old_byte_from_buf); } assert(aws_byte_buf_is_valid(&buf)); } cbmc-proof.txt000066400000000000000000000000711456575232400340100ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_writeThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_write_be16/000077500000000000000000000000001456575232400321165ustar00rootroot00000000000000Makefile000066400000000000000000000014001456575232400334720ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_write_be16# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_byte_buf UNWINDSET += memcpy_impl.0:$(shell echo $$(($(MAX_BUFFER_SIZE) + 1))) CBMCFLAGS += PROOF_UID = aws_byte_buf_write_be16 HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROOF_SOURCES += $(PROOF_STUB)/memcpy_override.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_byte_buf_write_be16_harness.c000066400000000000000000000020531456575232400404260ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_write_be16/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_byte_buf_write_be16_harness() { /* parameters */ struct aws_byte_buf buf; uint16_t x; /* assumptions */ __CPROVER_assume(aws_byte_buf_is_bounded(&buf, MAX_BUFFER_SIZE)); ensure_byte_buf_has_allocated_buffer_member(&buf); __CPROVER_assume(aws_byte_buf_is_valid(&buf)); /* save current state of the parameters */ struct aws_byte_buf old = buf; struct store_byte_from_buffer old_byte_from_buf; save_byte_from_array(buf.buffer, buf.len, &old_byte_from_buf); /* operation under verification */ if (aws_byte_buf_write_be16(&buf, x)) { assert(buf.len == old.len + 2); assert(old.capacity == buf.capacity); assert(old.allocator == buf.allocator); } else { assert_byte_buf_equivalence(&buf, &old, &old_byte_from_buf); } assert(aws_byte_buf_is_valid(&buf)); } cbmc-proof.txt000066400000000000000000000000711456575232400346250ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_write_be16This file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_write_be32/000077500000000000000000000000001456575232400321145ustar00rootroot00000000000000Makefile000066400000000000000000000014001456575232400334700ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_write_be32# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_byte_buf UNWINDSET += memcpy_impl.0:$(shell echo $$(($(MAX_BUFFER_SIZE) + 1))) CBMCFLAGS += PROOF_UID = aws_byte_buf_write_be32 HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROOF_SOURCES += $(PROOF_STUB)/memcpy_override.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_byte_buf_write_be32_harness.c000066400000000000000000000020531456575232400404220ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_write_be32/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_byte_buf_write_be32_harness() { /* parameters */ struct aws_byte_buf buf; uint32_t x; /* assumptions */ __CPROVER_assume(aws_byte_buf_is_bounded(&buf, MAX_BUFFER_SIZE)); ensure_byte_buf_has_allocated_buffer_member(&buf); __CPROVER_assume(aws_byte_buf_is_valid(&buf)); /* save current state of the parameters */ struct aws_byte_buf old = buf; struct store_byte_from_buffer old_byte_from_buf; save_byte_from_array(buf.buffer, buf.len, &old_byte_from_buf); /* operation under verification */ if (aws_byte_buf_write_be32(&buf, x)) { assert(buf.len == old.len + 4); assert(old.capacity == buf.capacity); assert(old.allocator == buf.allocator); } else { assert_byte_buf_equivalence(&buf, &old, &old_byte_from_buf); } assert(aws_byte_buf_is_valid(&buf)); } cbmc-proof.txt000066400000000000000000000000711456575232400346230ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_write_be32This file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_write_be64/000077500000000000000000000000001456575232400321215ustar00rootroot00000000000000Makefile000066400000000000000000000014001456575232400334750ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_write_be64# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_byte_buf UNWINDSET += memcpy_impl.0:$(shell echo $$(($(MAX_BUFFER_SIZE) + 1))) CBMCFLAGS += PROOF_UID = aws_byte_buf_write_be64 HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROOF_SOURCES += $(PROOF_STUB)/memcpy_override.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_byte_buf_write_be64_harness.c000066400000000000000000000020531456575232400404340ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_write_be64/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_byte_buf_write_be64_harness() { /* parameters */ struct aws_byte_buf buf; uint64_t x; /* assumptions */ __CPROVER_assume(aws_byte_buf_is_bounded(&buf, MAX_BUFFER_SIZE)); ensure_byte_buf_has_allocated_buffer_member(&buf); __CPROVER_assume(aws_byte_buf_is_valid(&buf)); /* save current state of the parameters */ struct aws_byte_buf old = buf; struct store_byte_from_buffer old_byte_from_buf; save_byte_from_array(buf.buffer, buf.len, &old_byte_from_buf); /* operation under verification */ if (aws_byte_buf_write_be64(&buf, x)) { assert(buf.len == old.len + 8); assert(old.capacity == buf.capacity); assert(old.allocator == buf.allocator); } else { assert_byte_buf_equivalence(&buf, &old, &old_byte_from_buf); } assert(aws_byte_buf_is_valid(&buf)); } cbmc-proof.txt000066400000000000000000000000711456575232400346300ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_write_be64This file marks the directory as containing a CBMC proof aws_byte_buf_write_from_whole_buffer/000077500000000000000000000000001456575232400347745ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofsMakefile000066400000000000000000000014151456575232400364350ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_write_from_whole_buffer# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_byte_buf UNWINDSET += memcpy_impl.0:$(shell echo $$(($(MAX_BUFFER_SIZE) + 1))) CBMCFLAGS += PROOF_UID = aws_byte_buf_write_from_whole_buffer HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROOF_SOURCES += $(PROOF_STUB)/memcpy_override.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_byte_buf_write_from_whole_buffer_harness.c000066400000000000000000000032561456575232400463260ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_write_from_whole_buffer/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_byte_buf_write_from_whole_buffer_harness() { /* parameters */ struct aws_byte_buf buf; struct aws_byte_buf src; /* assumptions */ __CPROVER_assume(aws_byte_buf_is_bounded(&buf, MAX_BUFFER_SIZE)); ensure_byte_buf_has_allocated_buffer_member(&buf); __CPROVER_assume(aws_byte_buf_is_valid(&buf)); __CPROVER_assume(aws_byte_buf_is_bounded(&src, MAX_BUFFER_SIZE)); ensure_byte_buf_has_allocated_buffer_member(&src); __CPROVER_assume(aws_byte_buf_is_valid(&src)); /* save current state of the parameters */ struct aws_byte_buf buf_old = buf; struct store_byte_from_buffer old_byte_from_buf; save_byte_from_array(buf.buffer, buf.len, &old_byte_from_buf); struct aws_byte_buf src_old = src; struct store_byte_from_buffer old_byte_from_src; save_byte_from_array(src.buffer, src.len, &old_byte_from_src); /* operation under verification */ if (aws_byte_buf_write_from_whole_buffer(&buf, src)) { assert(buf.len == buf_old.len + src.len); assert(buf_old.capacity == buf.capacity); assert(buf_old.allocator == buf.allocator); if (src.len > 0 && buf.len > 0) { assert_bytes_match(buf.buffer + buf_old.len, src.buffer, src.len); } } else { assert_byte_buf_equivalence(&buf, &buf_old, &old_byte_from_buf); } assert(aws_byte_buf_is_valid(&buf)); assert(aws_byte_buf_is_valid(&src)); assert_byte_buf_equivalence(&src, &src_old, &old_byte_from_src); } cbmc-proof.txt000066400000000000000000000000711456575232400375620ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_write_from_whole_bufferThis file marks the directory as containing a CBMC proof aws_byte_buf_write_from_whole_cursor/000077500000000000000000000000001456575232400350405ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofsMakefile000066400000000000000000000013341456575232400365010ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_write_from_whole_cursor# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_byte_buf UNWINDSET += memcpy_impl.0:$(shell echo $$(($(MAX_BUFFER_SIZE) + 1))) CBMCFLAGS += PROOF_UID = aws_byte_buf_write_from_whole_cursor HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_byte_buf_write_from_whole_cursor_harness.c000066400000000000000000000032751456575232400464370ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_write_from_whole_cursor/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_byte_buf_write_from_whole_cursor_harness() { /* parameters */ struct aws_byte_buf buf; struct aws_byte_cursor src; /* assumptions */ __CPROVER_assume(aws_byte_buf_is_bounded(&buf, MAX_BUFFER_SIZE)); ensure_byte_buf_has_allocated_buffer_member(&buf); __CPROVER_assume(aws_byte_buf_is_valid(&buf)); __CPROVER_assume(aws_byte_cursor_is_bounded(&src, MAX_BUFFER_SIZE)); ensure_byte_cursor_has_allocated_buffer_member(&src); __CPROVER_assume(aws_byte_cursor_is_valid(&src)); /* save current state of the parameters */ struct aws_byte_buf buf_old = buf; struct store_byte_from_buffer old_byte_from_buf; save_byte_from_array(buf.buffer, buf.len, &old_byte_from_buf); struct aws_byte_cursor src_old = src; struct store_byte_from_buffer old_byte_from_src; save_byte_from_array(src.ptr, src.len, &old_byte_from_src); /* operation under verification */ if (aws_byte_buf_write_from_whole_cursor(&buf, src)) { assert(buf.len == buf_old.len + src.len); assert(buf_old.capacity == buf.capacity); assert(buf_old.allocator == buf.allocator); if (src.len > 0 && buf.len > 0) { assert_bytes_match(buf.buffer + buf_old.len, src.ptr, src.len); } } else { assert_byte_buf_equivalence(&buf, &buf_old, &old_byte_from_buf); } assert(aws_byte_buf_is_valid(&buf)); assert(aws_byte_cursor_is_valid(&src)); assert_byte_cursor_equivalence(&src, &src_old, &old_byte_from_src); } cbmc-proof.txt000066400000000000000000000000711456575232400376260ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_write_from_whole_cursorThis file marks the directory as containing a CBMC proof aws_byte_buf_write_from_whole_string/000077500000000000000000000000001456575232400350315ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofsMakefile000066400000000000000000000013341456575232400364720ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_write_from_whole_string# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. include ../Makefile.aws_string CBMCFLAGS += PROOF_UID = aws_byte_buf_write_from_whole_string HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROJECT_SOURCES += $(SRCDIR)/source/array_list.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c PROJECT_SOURCES += $(SRCDIR)/source/error.c PROJECT_SOURCES += $(SRCDIR)/source/string.c include ../Makefile.common aws_byte_buf_write_from_whole_string_harness.c000066400000000000000000000027211456575232400464140ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_write_from_whole_string/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include void aws_byte_buf_write_from_whole_string_harness() { struct aws_string *str = nondet_allocate_string_bounded_length(MAX_STRING_LEN); struct aws_byte_buf buf; ensure_byte_buf_has_allocated_buffer_member(&buf); __CPROVER_assume(aws_byte_buf_is_valid(&buf)); /* save current state of the data structure */ struct aws_byte_buf old_buf = buf; struct store_byte_from_buffer old_byte_from_buf; save_byte_from_array(buf.buffer, buf.len, &old_byte_from_buf); size_t available_cap = buf.capacity - buf.len; bool nondet_parameter; if (aws_byte_buf_write_from_whole_string(nondet_parameter ? &buf : NULL, str) && str) { assert(aws_string_is_valid(str)); assert(available_cap >= str->len); if (nondet_parameter) { assert(buf.len == old_buf.len + str->len); assert(old_buf.capacity == buf.capacity); assert(old_buf.allocator == buf.allocator); if (str->len > 0 && buf.len > 0) { assert_bytes_match(buf.buffer + old_buf.len, str->bytes, str->len); } } } else { assert_byte_buf_equivalence(&buf, &old_buf, &old_byte_from_buf); } assert(aws_byte_buf_is_valid(&buf)); } cbmc-proof.txt000066400000000000000000000000711456575232400376170ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_write_from_whole_stringThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_write_u8/000077500000000000000000000000001456575232400317155ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_write_u8/Makefile000066400000000000000000000013761456575232400333640ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_byte_buf UNWINDSET += memcpy_impl.0:$(shell echo $$(($(MAX_BUFFER_SIZE) + 1))) CBMCFLAGS += PROOF_UID = aws_byte_buf_write_u8 HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROOF_SOURCES += $(PROOF_STUB)/memcpy_override.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_byte_buf_write_u8_harness.c000066400000000000000000000020461456575232400400260ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_write_u8/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_byte_buf_write_u8_harness() { /* parameters */ struct aws_byte_buf buf; uint8_t x; /* assumptions */ __CPROVER_assume(aws_byte_buf_is_bounded(&buf, MAX_BUFFER_SIZE)); ensure_byte_buf_has_allocated_buffer_member(&buf); __CPROVER_assume(aws_byte_buf_is_valid(&buf)); /* save current state of the parameters */ struct aws_byte_buf old = buf; struct store_byte_from_buffer old_byte_from_buf; save_byte_from_array(buf.buffer, buf.len, &old_byte_from_buf); /* operation under verification */ if (aws_byte_buf_write_u8(&buf, x)) { assert(buf.len == old.len + 1); assert(old.capacity == buf.capacity); assert(old.allocator == buf.allocator); } else { assert_byte_buf_equivalence(&buf, &old, &old_byte_from_buf); } assert(aws_byte_buf_is_valid(&buf)); } cbmc-proof.txt000066400000000000000000000000711456575232400344240ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_buf_write_u8This file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_advance/000077500000000000000000000000001456575232400323115ustar00rootroot00000000000000Makefile000066400000000000000000000012261456575232400336730ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_advance# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_byte_buf UNWINDSET += CBMCFLAGS += PROOF_UID = aws_byte_cursor_advance HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_byte_cursor_advance_harness.c000066400000000000000000000026651456575232400410250ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_advance/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_byte_cursor_advance_harness() { /* data structure */ struct aws_byte_cursor cursor; size_t len; /* assumptions */ ensure_byte_cursor_has_allocated_buffer_member(&cursor); __CPROVER_assume(aws_byte_cursor_is_valid(&cursor)); /* save current state of cursor */ uint8_t *debug_ptr = cursor.ptr; size_t debug_len = cursor.len; struct aws_byte_cursor old = cursor; struct store_byte_from_buffer old_byte_from_cursor; save_byte_from_array(cursor.ptr, cursor.len, &old_byte_from_cursor); /* operation under verification */ struct aws_byte_cursor rv = aws_byte_cursor_advance(&cursor, len); /* assertions */ assert(aws_byte_cursor_is_valid(&rv)); if (old.len > (SIZE_MAX >> 1) || len > (SIZE_MAX >> 1) || len > old.len) { assert(rv.ptr == NULL); assert(rv.len == 0); if (old.len != 0) { assert_byte_from_buffer_matches(cursor.ptr, &old_byte_from_cursor); } } else { assert(rv.ptr == old.ptr); assert(rv.len == len); if (old.ptr != NULL) { assert(cursor.ptr == old.ptr + len); } else { assert(cursor.ptr == NULL); } assert(cursor.len == old.len - len); } } cbmc-proof.txt000066400000000000000000000000711456575232400350200ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_advanceThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_advance_nospec/000077500000000000000000000000001456575232400336605ustar00rootroot00000000000000Makefile000066400000000000000000000012351456575232400352420ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_advance_nospec# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_byte_buf UNWINDSET += CBMCFLAGS += PROOF_UID = aws_byte_cursor_advance_nospec HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_byte_cursor_advance_nospec_harness.c000066400000000000000000000027031456575232400437340ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_advance_nospec/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_byte_cursor_advance_nospec_harness() { /* data structure */ struct aws_byte_cursor cursor; size_t len; /* assumptions */ ensure_byte_cursor_has_allocated_buffer_member(&cursor); __CPROVER_assume(aws_byte_cursor_is_valid(&cursor)); /* save current state of cursor */ uint8_t *debug_ptr = cursor.ptr; size_t debug_len = cursor.len; struct aws_byte_cursor old = cursor; struct store_byte_from_buffer old_byte_from_cursor; save_byte_from_array(cursor.ptr, cursor.len, &old_byte_from_cursor); /* operation under verification */ struct aws_byte_cursor rv = aws_byte_cursor_advance_nospec(&cursor, len); /* assertions */ assert(aws_byte_cursor_is_valid(&rv)); if (old.len > (SIZE_MAX >> 1) || len > (SIZE_MAX >> 1) || len > old.len) { assert(rv.ptr == NULL); assert(rv.len == 0); if (old.len != 0) { assert_byte_from_buffer_matches(cursor.ptr, &old_byte_from_cursor); } } else { assert(rv.ptr == old.ptr); assert(rv.len == len); if (old.ptr != NULL) { assert(cursor.ptr == old.ptr + len); } else { assert(cursor.ptr == NULL); } assert(cursor.len == old.len - len); } } cbmc-proof.txt000066400000000000000000000000711456575232400363670ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_advance_nospecThis file marks the directory as containing a CBMC proof aws_byte_cursor_compare_lexical/000077500000000000000000000000001456575232400337605ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofsMakefile000066400000000000000000000013221456575232400354160ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_compare_lexical# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_byte_buf UNWINDSET += memcmp.0:$(shell echo $$(($(MAX_BUFFER_SIZE) + 1))) CBMCFLAGS += PROOF_UID = aws_byte_cursor_compare_lexical HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_byte_cursor_compare_lexical_harness.c000066400000000000000000000035221456575232400442720ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_compare_lexical/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_byte_cursor_compare_lexical_harness() { /* parameters */ struct aws_byte_cursor lhs; struct aws_byte_cursor rhs; /* assumptions */ __CPROVER_assume(aws_byte_cursor_is_bounded(&lhs, MAX_BUFFER_SIZE)); ensure_byte_cursor_has_allocated_buffer_member(&lhs); __CPROVER_assume(aws_byte_cursor_is_valid(&lhs)); __CPROVER_assume(lhs.ptr != NULL); if (nondet_bool()) { rhs = lhs; } else { __CPROVER_assume(aws_byte_cursor_is_bounded(&rhs, MAX_BUFFER_SIZE)); ensure_byte_cursor_has_allocated_buffer_member(&rhs); __CPROVER_assume(aws_byte_cursor_is_valid(&rhs)); __CPROVER_assume(rhs.ptr != NULL); } /* save current state of the data structure */ struct aws_byte_cursor old_lhs = lhs; struct store_byte_from_buffer old_byte_from_lhs; save_byte_from_array(lhs.ptr, lhs.len, &old_byte_from_lhs); struct aws_byte_cursor old_rhs = rhs; struct store_byte_from_buffer old_byte_from_rhs; save_byte_from_array(rhs.ptr, rhs.len, &old_byte_from_rhs); /* operation under verification */ if (aws_byte_cursor_compare_lexical(&lhs, &rhs) == 0) { assert(lhs.len == rhs.len); if (lhs.len > 0) { assert_bytes_match(lhs.ptr, rhs.ptr, lhs.len); } } assert(aws_byte_cursor_compare_lexical(&lhs, &lhs) == 0); /* assertions */ assert(aws_byte_cursor_is_valid(&lhs)); assert(aws_byte_cursor_is_valid(&rhs)); if (lhs.len != 0) { assert_byte_from_buffer_matches(lhs.ptr, &old_byte_from_lhs); } if (rhs.len != 0) { assert_byte_from_buffer_matches(rhs.ptr, &old_byte_from_rhs); } } cbmc-proof.txt000066400000000000000000000000711456575232400365460ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_compare_lexicalThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_compare_lookup/000077500000000000000000000000001456575232400337275ustar00rootroot00000000000000Makefile000066400000000000000000000013511456575232400353100ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_compare_lookup# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_byte_buf UNWINDSET += aws_byte_cursor_compare_lookup.0:$(shell echo $$(($(MAX_BUFFER_SIZE) + 1))) CBMCFLAGS += PROOF_UID = aws_byte_cursor_compare_lookup HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_byte_cursor_compare_lookup_harness.c000066400000000000000000000035111456575232400440500ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_compare_lookup/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_byte_cursor_compare_lookup_harness() { /* parameters */ struct aws_byte_cursor lhs; struct aws_byte_cursor rhs; /** * The specification for the function requires that the buffer * be at least 256 bytes. */ uint8_t lookup_table[256]; /* assumptions */ __CPROVER_assume(aws_byte_cursor_is_bounded(&lhs, MAX_BUFFER_SIZE)); ensure_byte_cursor_has_allocated_buffer_member(&lhs); __CPROVER_assume(aws_byte_cursor_is_valid(&lhs)); if (nondet_bool()) { rhs = lhs; } else { __CPROVER_assume(aws_byte_cursor_is_bounded(&rhs, MAX_BUFFER_SIZE)); ensure_byte_cursor_has_allocated_buffer_member(&rhs); __CPROVER_assume(aws_byte_cursor_is_valid(&rhs)); } /* save current state of the data structure */ struct aws_byte_cursor old_lhs = lhs; struct store_byte_from_buffer old_byte_from_lhs; save_byte_from_array(lhs.ptr, lhs.len, &old_byte_from_lhs); struct aws_byte_cursor old_rhs = rhs; struct store_byte_from_buffer old_byte_from_rhs; save_byte_from_array(rhs.ptr, rhs.len, &old_byte_from_rhs); /* operation under verification */ if (aws_byte_cursor_compare_lookup(&lhs, &rhs, lookup_table) == 0) { assert(lhs.len == rhs.len); } assert(aws_byte_cursor_compare_lookup(&lhs, &lhs, lookup_table) == 0); /* assertions */ assert(aws_byte_cursor_is_valid(&lhs)); assert(aws_byte_cursor_is_valid(&rhs)); if (lhs.len != 0) { assert_byte_from_buffer_matches(lhs.ptr, &old_byte_from_lhs); } if (rhs.len != 0) { assert_byte_from_buffer_matches(rhs.ptr, &old_byte_from_rhs); } } cbmc-proof.txt000066400000000000000000000000711456575232400364360ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_compare_lookupThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_eq/000077500000000000000000000000001456575232400313155ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_eq/Makefile000066400000000000000000000013051456575232400327540ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_byte_buf UNWINDSET += memcmp.0:$(shell echo $$(($(MAX_BUFFER_SIZE) + 1))) CBMCFLAGS += PROOF_UID = aws_byte_cursor_eq HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_byte_cursor_eq_harness.c000066400000000000000000000032431456575232400370260ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_eq/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_byte_cursor_eq_harness() { /* parameters */ struct aws_byte_cursor lhs; struct aws_byte_cursor rhs; /* assumptions */ __CPROVER_assume(aws_byte_cursor_is_bounded(&lhs, MAX_BUFFER_SIZE)); ensure_byte_cursor_has_allocated_buffer_member(&lhs); __CPROVER_assume(aws_byte_cursor_is_valid(&lhs)); if (nondet_bool()) { rhs = lhs; } else { __CPROVER_assume(aws_byte_cursor_is_bounded(&rhs, MAX_BUFFER_SIZE)); ensure_byte_cursor_has_allocated_buffer_member(&rhs); __CPROVER_assume(aws_byte_cursor_is_valid(&rhs)); } /* save current state of the data structure */ struct aws_byte_cursor old_lhs = lhs; struct store_byte_from_buffer old_byte_from_lhs; save_byte_from_array(lhs.ptr, lhs.len, &old_byte_from_lhs); struct aws_byte_cursor old_rhs = rhs; struct store_byte_from_buffer old_byte_from_rhs; save_byte_from_array(rhs.ptr, rhs.len, &old_byte_from_rhs); /* operation under verification */ if (aws_byte_cursor_eq(&lhs, &rhs)) { assert(lhs.len == rhs.len); if (lhs.len > 0) { assert_bytes_match(lhs.ptr, rhs.ptr, lhs.len); } } /* assertions */ assert(aws_byte_cursor_is_valid(&lhs)); assert(aws_byte_cursor_is_valid(&rhs)); if (lhs.len != 0) { assert_byte_from_buffer_matches(lhs.ptr, &old_byte_from_lhs); } if (rhs.len != 0) { assert_byte_from_buffer_matches(rhs.ptr, &old_byte_from_rhs); } } cbmc-proof.txt000066400000000000000000000000711456575232400340240ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_eqThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_eq_byte_buf/000077500000000000000000000000001456575232400331745ustar00rootroot00000000000000Makefile000066400000000000000000000013161456575232400345560ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_eq_byte_buf# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_byte_buf UNWINDSET += memcmp.0:$(shell echo $$(($(MAX_BUFFER_SIZE) + 1))) CBMCFLAGS += PROOF_UID = aws_byte_cursor_eq_byte_buf HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_byte_cursor_eq_byte_buf_harness.c000066400000000000000000000030761456575232400425700ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_eq_byte_buf/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_byte_cursor_eq_byte_buf_harness() { /* parameters */ struct aws_byte_cursor cur; struct aws_byte_buf buf; /* assumptions */ __CPROVER_assume(aws_byte_cursor_is_bounded(&cur, MAX_BUFFER_SIZE)); ensure_byte_cursor_has_allocated_buffer_member(&cur); __CPROVER_assume(aws_byte_cursor_is_valid(&cur)); __CPROVER_assume(aws_byte_buf_is_bounded(&buf, MAX_BUFFER_SIZE)); ensure_byte_buf_has_allocated_buffer_member(&buf); __CPROVER_assume(aws_byte_buf_is_valid(&buf)); /* save current state of the data structure */ struct aws_byte_cursor old_cur = cur; struct store_byte_from_buffer old_byte_from_cur; save_byte_from_array(cur.ptr, cur.len, &old_byte_from_cur); struct aws_byte_buf old_buf = buf; struct store_byte_from_buffer old_byte_from_buf; save_byte_from_array(buf.buffer, buf.len, &old_byte_from_buf); /* operation under verification */ if (aws_byte_cursor_eq_byte_buf(&cur, &buf)) { assert(cur.len == buf.len); if (cur.len > 0) { assert_bytes_match(cur.ptr, buf.buffer, cur.len); } } /* assertions */ assert(aws_byte_cursor_is_valid(&cur)); assert(aws_byte_buf_is_valid(&buf)); if (cur.len > 0) { assert_byte_from_buffer_matches(cur.ptr, &old_byte_from_cur); } assert_byte_buf_equivalence(&buf, &old_buf, &old_byte_from_buf); } cbmc-proof.txt000066400000000000000000000000711456575232400357030ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_eq_byte_bufThis file marks the directory as containing a CBMC proof aws_byte_cursor_eq_byte_buf_ignore_case/000077500000000000000000000000001456575232400354535ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofsMakefile000066400000000000000000000014551456575232400371200ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_eq_byte_buf_ignore_case# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_byte_buf UNWINDSET += aws_array_eq_ignore_case.0:$(shell echo $$(($(MAX_BUFFER_SIZE) + 1))) UNWINDSET += memcmp.0:$(shell echo $$(($(MAX_BUFFER_SIZE) + 1))) CBMCFLAGS += PROOF_UID = aws_byte_cursor_eq_byte_buf_ignore_case HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_byte_cursor_eq_byte_buf_ignore_case_harness.c000066400000000000000000000027631456575232400474660ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_eq_byte_buf_ignore_case/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_byte_cursor_eq_byte_buf_ignore_case_harness() { /* parameters */ struct aws_byte_cursor cur; struct aws_byte_buf buf; /* assumptions */ __CPROVER_assume(aws_byte_cursor_is_bounded(&cur, MAX_BUFFER_SIZE)); ensure_byte_cursor_has_allocated_buffer_member(&cur); __CPROVER_assume(aws_byte_cursor_is_valid(&cur)); __CPROVER_assume(aws_byte_buf_is_bounded(&buf, MAX_BUFFER_SIZE)); ensure_byte_buf_has_allocated_buffer_member(&buf); __CPROVER_assume(aws_byte_buf_is_valid(&buf)); /* save current state of the data structure */ struct aws_byte_cursor old_cur = cur; struct store_byte_from_buffer old_byte_from_cur; save_byte_from_array(cur.ptr, cur.len, &old_byte_from_cur); struct aws_byte_buf old_buf = buf; struct store_byte_from_buffer old_byte_from_buf; save_byte_from_array(buf.buffer, buf.len, &old_byte_from_buf); /* operation under verification */ if (aws_byte_cursor_eq_byte_buf_ignore_case(&cur, &buf)) { assert(cur.len == buf.len); } /* assertions */ assert(aws_byte_cursor_is_valid(&cur)); assert(aws_byte_buf_is_valid(&buf)); if (cur.len > 0) { assert_byte_from_buffer_matches(cur.ptr, &old_byte_from_cur); } assert_byte_buf_equivalence(&buf, &old_buf, &old_byte_from_buf); } cbmc-proof.txt000066400000000000000000000000711456575232400402410ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_eq_byte_buf_ignore_caseThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_eq_c_str/000077500000000000000000000000001456575232400325075ustar00rootroot00000000000000Makefile000066400000000000000000000014301456575232400340660ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_eq_c_str# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_byte_buf UNWINDSET += aws_array_eq_c_str.0:$(shell echo $$(($(MAX_BUFFER_SIZE) + 1))) UNWINDSET += strlen.0:$(shell echo $$(($(MAX_BUFFER_SIZE) + 1))) CBMCFLAGS += PROOF_UID = aws_byte_cursor_eq_c_str HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_byte_cursor_eq_c_str_harness.c000066400000000000000000000027731456575232400414210ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_eq_c_str/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_byte_cursor_eq_c_str_harness() { /* parameters */ struct aws_byte_cursor cur; const char *c_str = ensure_c_str_is_allocated(MAX_BUFFER_SIZE); /* assumptions */ __CPROVER_assume(c_str != NULL); __CPROVER_assume(aws_byte_cursor_is_bounded(&cur, MAX_BUFFER_SIZE)); ensure_byte_cursor_has_allocated_buffer_member(&cur); __CPROVER_assume(aws_byte_cursor_is_valid(&cur)); /* save current state of the parameters */ struct aws_byte_cursor old = cur; struct store_byte_from_buffer old_byte_from_cursor; save_byte_from_array(cur.ptr, cur.len, &old_byte_from_cursor); size_t str_len = strlen(c_str); struct store_byte_from_buffer old_byte_from_str; save_byte_from_array((uint8_t *)c_str, str_len, &old_byte_from_str); /* operation under verification */ if (aws_byte_cursor_eq_c_str(&cur, c_str)) { assert(cur.len == str_len); if (cur.len > 0) { assert_bytes_match(cur.ptr, (uint8_t *)c_str, cur.len); } } /* asserts both parameters remain unchanged */ assert(aws_byte_cursor_is_valid(&cur)); if (cur.len > 0) { assert_byte_from_buffer_matches(cur.ptr, &old_byte_from_cursor); } if (str_len > 0) { assert_byte_from_buffer_matches((uint8_t *)c_str, &old_byte_from_str); } } cbmc-proof.txt000066400000000000000000000000711456575232400352160ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_eq_c_strThis file marks the directory as containing a CBMC proof aws_byte_cursor_eq_c_str_ignore_case/000077500000000000000000000000001456575232400347665ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofsMakefile000066400000000000000000000014601456575232400364270ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_eq_c_str_ignore_case# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_byte_buf UNWINDSET += aws_array_eq_c_str_ignore_case.0:$(shell echo $$(($(MAX_BUFFER_SIZE) + 1))) UNWINDSET += strlen.0:$(shell echo $$(($(MAX_BUFFER_SIZE) + 1))) CBMCFLAGS += PROOF_UID = aws_byte_cursor_eq_c_str_ignore_case HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_byte_cursor_eq_c_str_ignore_case_harness.c000066400000000000000000000026521456575232400463110ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_eq_c_str_ignore_case/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_byte_cursor_eq_c_str_ignore_case_harness() { /* parameters */ struct aws_byte_cursor cur; const char *c_str = ensure_c_str_is_allocated(MAX_BUFFER_SIZE); /* assumptions */ __CPROVER_assume(c_str != NULL); __CPROVER_assume(aws_byte_cursor_is_bounded(&cur, MAX_BUFFER_SIZE)); ensure_byte_cursor_has_allocated_buffer_member(&cur); __CPROVER_assume(aws_byte_cursor_is_valid(&cur)); /* save current state of the parameters */ struct aws_byte_cursor old = cur; struct store_byte_from_buffer old_byte_from_cursor; save_byte_from_array(cur.ptr, cur.len, &old_byte_from_cursor); size_t str_len = strlen(c_str); struct store_byte_from_buffer old_byte_from_str; save_byte_from_array((uint8_t *)c_str, str_len, &old_byte_from_str); /* operation under verification */ if (aws_byte_cursor_eq_c_str_ignore_case(&cur, c_str)) { assert(cur.len == str_len); } /* asserts both parameters remain unchanged */ assert(aws_byte_cursor_is_valid(&cur)); if (cur.len > 0) { assert_byte_from_buffer_matches(cur.ptr, &old_byte_from_cursor); } if (str_len > 0) { assert_byte_from_buffer_matches((uint8_t *)c_str, &old_byte_from_str); } } cbmc-proof.txt000066400000000000000000000000711456575232400375540ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_eq_c_str_ignore_caseThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_eq_ignore_case/000077500000000000000000000000001456575232400336535ustar00rootroot00000000000000Makefile000066400000000000000000000014131456575232400352330ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_eq_ignore_case# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. include ../Makefile.aws_byte_buf UNWINDSET += memcmp.0:$(shell echo $$(($(MAX_BUFFER_SIZE) + 1))) UNWINDSET += aws_array_eq_ignore_case.0:$(shell echo $$(($(MAX_BUFFER_SIZE) + 1))) CBMCFLAGS += PROOF_UID = aws_byte_cursor_eq_ignore_case HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c include ../Makefile.common aws_byte_cursor_eq_ignore_case_harness.c000066400000000000000000000031331456575232400437200ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_eq_ignore_case/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_byte_cursor_eq_ignore_case_harness() { /* parameters */ struct aws_byte_cursor lhs; struct aws_byte_cursor rhs; /* assumptions */ __CPROVER_assume(aws_byte_cursor_is_bounded(&lhs, MAX_BUFFER_SIZE)); ensure_byte_cursor_has_allocated_buffer_member(&lhs); __CPROVER_assume(aws_byte_cursor_is_valid(&lhs)); if (nondet_bool()) { rhs = lhs; } else { __CPROVER_assume(aws_byte_cursor_is_bounded(&rhs, MAX_BUFFER_SIZE)); ensure_byte_cursor_has_allocated_buffer_member(&rhs); __CPROVER_assume(aws_byte_cursor_is_valid(&rhs)); } /* save current state of the data structure */ struct aws_byte_cursor old_lhs = lhs; struct store_byte_from_buffer old_byte_from_lhs; save_byte_from_array(lhs.ptr, lhs.len, &old_byte_from_lhs); struct aws_byte_cursor old_rhs = rhs; struct store_byte_from_buffer old_byte_from_rhs; save_byte_from_array(rhs.ptr, rhs.len, &old_byte_from_rhs); /* operation under verification */ if (aws_byte_cursor_eq_ignore_case(&lhs, &rhs)) { assert(lhs.len == rhs.len); } /* assertions */ assert(aws_byte_cursor_is_valid(&lhs)); assert(aws_byte_cursor_is_valid(&rhs)); if (lhs.len != 0) { assert_byte_from_buffer_matches(lhs.ptr, &old_byte_from_lhs); } if (rhs.len != 0) { assert_byte_from_buffer_matches(rhs.ptr, &old_byte_from_rhs); } } cbmc-proof.txt000066400000000000000000000000711456575232400363620ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_eq_ignore_caseThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_from_array/000077500000000000000000000000001456575232400330515ustar00rootroot00000000000000Makefile000066400000000000000000000012311456575232400344270ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_from_array# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_byte_buf UNWINDSET += CBMCFLAGS += PROOF_UID = aws_byte_cursor_from_array HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_byte_cursor_from_array_harness.c000066400000000000000000000012411456575232400423120ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_from_array/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_byte_cursor_from_array_harness() { /* parameters */ size_t length; uint8_t *array; /* assumption */ ASSUME_VALID_MEMORY_COUNT(array, length); /* operation under verification */ struct aws_byte_cursor cur = aws_byte_cursor_from_array(array, length); /* assertions */ assert(aws_byte_cursor_is_valid(&cur)); assert(cur.len == length); if (cur.ptr) { assert_bytes_match(cur.ptr, array, cur.len); } } cbmc-proof.txt000066400000000000000000000000711456575232400355600ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_from_arrayThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_from_buf/000077500000000000000000000000001456575232400325075ustar00rootroot00000000000000Makefile000066400000000000000000000012271456575232400340720ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_from_buf# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_byte_buf UNWINDSET += CBMCFLAGS += PROOF_UID = aws_byte_cursor_from_buf HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_byte_cursor_from_buf_harness.c000066400000000000000000000021071456575232400414100ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_from_buf/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_byte_cursor_from_buf_harness() { /* parameter */ struct aws_byte_buf buf; /* assumptions */ __CPROVER_assume(aws_byte_buf_is_bounded(&buf, MAX_BUFFER_SIZE)); ensure_byte_buf_has_allocated_buffer_member(&buf); __CPROVER_assume(aws_byte_buf_is_valid(&buf)); /* save current state of the parameters */ struct aws_byte_buf old = buf; struct store_byte_from_buffer old_byte_from_buf; save_byte_from_array(buf.buffer, buf.len, &old_byte_from_buf); /* operation under verification */ struct aws_byte_cursor cur = aws_byte_cursor_from_buf(&buf); /* assertions */ assert(aws_byte_buf_is_valid(&buf)); assert(aws_byte_cursor_is_valid(&cur)); assert_byte_buf_equivalence(&buf, &old, &old_byte_from_buf); assert(cur.len == buf.len); if (cur.ptr) { assert_bytes_match(cur.ptr, buf.buffer, buf.len); } } cbmc-proof.txt000066400000000000000000000000711456575232400352160ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_from_bufThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_from_c_str/000077500000000000000000000000001456575232400330455ustar00rootroot00000000000000Makefile000066400000000000000000000013151456575232400344260ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_from_c_str# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_byte_buf UNWINDSET += strlen.0:$(shell echo $$(($(MAX_BUFFER_SIZE) + 1))) CBMCFLAGS += PROOF_UID = aws_byte_cursor_from_c_str HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_byte_cursor_from_c_str_harness.c000066400000000000000000000013611456575232400423050ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_from_c_str/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_byte_cursor_from_c_str_harness() { /* parameters */ const char *c_str = ensure_c_str_is_allocated(MAX_BUFFER_SIZE); /* operation under verification */ struct aws_byte_cursor cur = aws_byte_cursor_from_c_str(c_str); /* assertions */ assert(aws_byte_cursor_is_valid(&cur)); if (cur.ptr) { /* if ptr is NULL len shoud be 0, otherwise equal to c_str */ assert(cur.len == strlen(c_str)); assert_bytes_match(cur.ptr, (uint8_t *)c_str, cur.len); } else { assert(cur.len == 0); } } cbmc-proof.txt000066400000000000000000000000711456575232400355540ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_from_c_strThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_from_string/000077500000000000000000000000001456575232400332415ustar00rootroot00000000000000Makefile000066400000000000000000000012421456575232400346210ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_from_string# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. include ../Makefile.aws_string CBMCFLAGS += PROOF_UID = aws_byte_cursor_from_string HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c PROJECT_SOURCES += $(SRCDIR)/source/error.c PROJECT_SOURCES += $(SRCDIR)/source/string.c include ../Makefile.common aws_byte_cursor_from_string_harness.c000066400000000000000000000013011456575232400426670ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_from_string/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include void aws_byte_cursor_from_string_harness() { struct aws_string *str = nondet_allocate_string_bounded_length(MAX_STRING_LEN); __CPROVER_assume(aws_string_is_valid(str)); struct aws_byte_cursor cursor = aws_byte_cursor_from_string(str); assert(aws_string_is_valid(str)); assert(aws_byte_cursor_is_valid(&cursor)); assert(cursor.len == str->len); assert(cursor.ptr == str->bytes); assert_bytes_match(str->bytes, cursor.ptr, str->len); } cbmc-proof.txt000066400000000000000000000000711456575232400357500ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_from_stringThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_left_trim_pred/000077500000000000000000000000001456575232400337075ustar00rootroot00000000000000Makefile000066400000000000000000000013511456575232400352700ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_left_trim_pred# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_byte_buf UNWINDSET += aws_byte_cursor_left_trim_pred.0:$(shell echo $$(($(MAX_BUFFER_SIZE) + 1))) CBMCFLAGS += PROOF_UID = aws_byte_cursor_left_trim_pred HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_byte_cursor_left_trim_pred_harness.c000066400000000000000000000020121456575232400440030ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_left_trim_pred/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_byte_cursor_left_trim_pred_harness() { /* parameters */ struct aws_byte_cursor cur; /* assumptions */ __CPROVER_assume(aws_byte_cursor_is_bounded(&cur, MAX_BUFFER_SIZE)); ensure_byte_cursor_has_allocated_buffer_member(&cur); __CPROVER_assume(aws_byte_cursor_is_valid(&cur)); /* save current state of the data structure */ struct store_byte_from_buffer old_byte_from_cur; save_byte_from_array(cur.ptr, cur.len, &old_byte_from_cur); /* operation under verification */ struct aws_byte_cursor rv = aws_byte_cursor_left_trim_pred(&cur, uninterpreted_predicate_fn); /* assertions */ assert(aws_byte_cursor_is_valid(&cur)); assert(aws_byte_cursor_is_valid(&rv)); if (cur.len > 0) { assert_byte_from_buffer_matches(cur.ptr, &old_byte_from_cur); } } cbmc-proof.txt000066400000000000000000000000711456575232400364160ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_left_trim_predThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_read/000077500000000000000000000000001456575232400316235ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_read/Makefile000066400000000000000000000012231456575232400332610ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_byte_buf UNWINDSET += CBMCFLAGS += PROOF_UID = aws_byte_cursor_read HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_byte_cursor_read_harness.c000066400000000000000000000031271456575232400376430ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_read/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_byte_cursor_read_harness() { /* parameters */ struct aws_byte_cursor cur; size_t length; void *dest = malloc(length); /* assumptions */ ensure_byte_cursor_has_allocated_buffer_member(&cur); __CPROVER_assume(aws_byte_cursor_is_valid(&cur)); /* precondition */ __CPROVER_assume(AWS_MEM_IS_WRITABLE(dest, length)); /* save current state of the data structure */ struct aws_byte_cursor old_cur = cur; struct store_byte_from_buffer old_byte_from_cur; save_byte_from_array(cur.ptr, cur.len, &old_byte_from_cur); /* operation under verification */ if (aws_byte_cursor_read(&cur, dest, length)) { assert_bytes_match(old_cur.ptr, dest, length); } /* assertions */ assert(aws_byte_cursor_is_valid(&cur)); /* the following assertions are included, because aws_byte_cursor_read internally uses * aws_byte_cursor_advance_nospec and it copies the bytes from the advanced cursor to *dest */ if (old_cur.len > (SIZE_MAX >> 1) || length > (SIZE_MAX >> 1) || length > old_cur.len) { if (old_cur.len != 0) { assert_byte_from_buffer_matches(cur.ptr, &old_byte_from_cur); } } else { if (old_cur.ptr != NULL) { assert(cur.ptr == old_cur.ptr + length); } else { assert(cur.ptr == NULL); } assert(cur.len == old_cur.len - length); } } cbmc-proof.txt000066400000000000000000000000711456575232400343320ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_readThis file marks the directory as containing a CBMC proof aws_byte_cursor_read_and_fill_buffer/000077500000000000000000000000001456575232400347255ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofsMakefile000066400000000000000000000012431456575232400363650ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_read_and_fill_buffer# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_byte_buf UNWINDSET += CBMCFLAGS += PROOF_UID = aws_byte_cursor_read_and_fill_buffer HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_byte_cursor_read_and_fill_buffer_harness.c000066400000000000000000000037061456575232400462100ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_read_and_fill_buffer/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_byte_cursor_read_and_fill_buffer_harness() { /* parameters */ struct aws_byte_cursor cur; struct aws_byte_buf buf; /* assumptions */ ensure_byte_cursor_has_allocated_buffer_member(&cur); __CPROVER_assume(aws_byte_cursor_is_valid(&cur)); ensure_byte_buf_has_allocated_buffer_member(&buf); __CPROVER_assume(aws_byte_buf_is_valid(&buf)); /* save current state of the data structure */ struct aws_byte_cursor old_cur = cur; struct store_byte_from_buffer old_byte_from_cur; save_byte_from_array(cur.ptr, cur.len, &old_byte_from_cur); struct aws_byte_buf old_buf = buf; struct store_byte_from_buffer old_byte_from_buf; save_byte_from_array(buf.buffer, buf.len, &old_byte_from_buf); /* operation under verification */ if (aws_byte_cursor_read_and_fill_buffer(&cur, &buf)) { assert(buf.len == buf.capacity); assert_bytes_match(old_cur.ptr, buf.buffer, buf.capacity); } /* assertions */ assert(aws_byte_cursor_is_valid(&cur)); assert(aws_byte_buf_is_valid(&buf)); assert(old_buf.allocator == buf.allocator); /* the following assertions are included, because aws_byte_cursor_read internally uses * aws_byte_cursor_advance_nospec and it copies the bytes from the advanced cursor to *dest */ if (old_cur.len > (SIZE_MAX >> 1) || old_buf.capacity > (SIZE_MAX >> 1) || old_buf.capacity > old_cur.len) { if (old_cur.len != 0) { assert_byte_from_buffer_matches(cur.ptr, &old_byte_from_cur); } } else { if (old_cur.ptr != NULL) { assert(cur.ptr == old_cur.ptr + old_buf.capacity); } else { assert(cur.ptr == NULL); } assert(cur.len == old_cur.len - old_buf.capacity); } } cbmc-proof.txt000066400000000000000000000000711456575232400375130ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_read_and_fill_bufferThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_read_be16/000077500000000000000000000000001456575232400324405ustar00rootroot00000000000000Makefile000066400000000000000000000012301456575232400340150ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_read_be16# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_byte_buf UNWINDSET += CBMCFLAGS += PROOF_UID = aws_byte_cursor_read_be16 HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_byte_cursor_read_be16_harness.c000066400000000000000000000006071456575232400412750ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_read_be16/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #define DEST_TYPE uint16_t #define BYTE_WIDTH 2 #define BYTE_CURSOR_READ aws_byte_cursor_read_be16 #define AWS_NTOH aws_ntoh16 #include void aws_byte_cursor_read_be16_harness() { aws_byte_cursor_read_common_harness(); } cbmc-proof.txt000066400000000000000000000000711456575232400351470ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_read_be16This file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_read_be32/000077500000000000000000000000001456575232400324365ustar00rootroot00000000000000Makefile000066400000000000000000000012301456575232400340130ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_read_be32# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_byte_buf UNWINDSET += CBMCFLAGS += PROOF_UID = aws_byte_cursor_read_be32 HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_byte_cursor_read_be32_harness.c000066400000000000000000000006071456575232400412710ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_read_be32/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #define DEST_TYPE uint32_t #define BYTE_WIDTH 4 #define BYTE_CURSOR_READ aws_byte_cursor_read_be32 #define AWS_NTOH aws_ntoh32 #include void aws_byte_cursor_read_be32_harness() { aws_byte_cursor_read_common_harness(); } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_read_be64/000077500000000000000000000000001456575232400324435ustar00rootroot00000000000000Makefile000066400000000000000000000012301456575232400340200ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_read_be64# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_byte_buf UNWINDSET += CBMCFLAGS += PROOF_UID = aws_byte_cursor_read_be64 HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_byte_cursor_read_be64_harness.c000066400000000000000000000006071456575232400413030ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_read_be64/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #define DEST_TYPE uint64_t #define BYTE_WIDTH 8 #define BYTE_CURSOR_READ aws_byte_cursor_read_be64 #define AWS_NTOH aws_ntoh64 #include void aws_byte_cursor_read_be64_harness() { aws_byte_cursor_read_common_harness(); } cbmc-proof.txt000066400000000000000000000000711456575232400351520ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_read_be64This file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_read_u8/000077500000000000000000000000001456575232400322375ustar00rootroot00000000000000Makefile000066400000000000000000000012261456575232400336210ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_read_u8# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_byte_buf UNWINDSET += CBMCFLAGS += PROOF_UID = aws_byte_cursor_read_u8 HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_byte_cursor_read_u8_harness.c000066400000000000000000000025361456575232400406760ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_read_u8/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_byte_cursor_read_u8_harness() { /* parameters */ struct aws_byte_cursor cur; size_t length; __CPROVER_assume(length >= 1); uint8_t *dest = malloc(length); /* assumptions */ ensure_byte_cursor_has_allocated_buffer_member(&cur); __CPROVER_assume(aws_byte_cursor_is_valid(&cur)); /* precondition */ __CPROVER_assume(AWS_MEM_IS_WRITABLE(dest, 1)); /* save current state of the data structure */ struct aws_byte_cursor old_cur = cur; struct store_byte_from_buffer old_byte_from_cur; save_byte_from_array(cur.ptr, cur.len, &old_byte_from_cur); /* operation under verification */ if (aws_byte_cursor_read_u8(&cur, dest)) { assert_bytes_match(old_cur.ptr, dest, 1); } /* assertions */ assert(aws_byte_cursor_is_valid(&cur)); /* the following assertions are included, because aws_byte_cursor_read internally uses * aws_byte_cursor_advance_nospec and it copies the bytes from the advanced cursor to *dest */ if (old_cur.len < (SIZE_MAX >> 1) && old_cur.len > 1) { assert(cur.ptr == old_cur.ptr + 1); assert(cur.len == old_cur.len - 1); } } cbmc-proof.txt000066400000000000000000000000711456575232400347460ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_read_u8This file marks the directory as containing a CBMC proof aws_byte_cursor_right_trim_pred/000077500000000000000000000000001456575232400340135ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofsMakefile000066400000000000000000000013531456575232400354550ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_right_trim_pred# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_byte_buf UNWINDSET += aws_byte_cursor_right_trim_pred.0:$(shell echo $$(($(MAX_BUFFER_SIZE) + 1))) CBMCFLAGS += PROOF_UID = aws_byte_cursor_right_trim_pred HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_byte_cursor_right_trim_pred_harness.c000066400000000000000000000020141456575232400443530ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_right_trim_pred/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_byte_cursor_right_trim_pred_harness() { /* parameters */ struct aws_byte_cursor cur; /* assumptions */ __CPROVER_assume(aws_byte_cursor_is_bounded(&cur, MAX_BUFFER_SIZE)); ensure_byte_cursor_has_allocated_buffer_member(&cur); __CPROVER_assume(aws_byte_cursor_is_valid(&cur)); /* save current state of the data structure */ struct store_byte_from_buffer old_byte_from_cur; save_byte_from_array(cur.ptr, cur.len, &old_byte_from_cur); /* operation under verification */ struct aws_byte_cursor rv = aws_byte_cursor_right_trim_pred(&cur, uninterpreted_predicate_fn); /* assertions */ assert(aws_byte_cursor_is_valid(&cur)); assert(aws_byte_cursor_is_valid(&rv)); if (cur.len > 0) { assert_byte_from_buffer_matches(cur.ptr, &old_byte_from_cur); } } cbmc-proof.txt000066400000000000000000000000711456575232400366010ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_right_trim_predThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_satisfies_pred/000077500000000000000000000000001456575232400337145ustar00rootroot00000000000000Makefile000066400000000000000000000013511456575232400352750ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_satisfies_pred# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_byte_buf UNWINDSET += aws_byte_cursor_left_trim_pred.0:$(shell echo $$(($(MAX_BUFFER_SIZE) + 1))) CBMCFLAGS += PROOF_UID = aws_byte_cursor_satisfies_pred HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_byte_cursor_satisfies_pred_harness.c000066400000000000000000000017031456575232400440230ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_satisfies_pred/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_byte_cursor_satisfies_pred_harness() { /* parameters */ struct aws_byte_cursor cur; /* assumptions */ __CPROVER_assume(aws_byte_cursor_is_bounded(&cur, MAX_BUFFER_SIZE)); ensure_byte_cursor_has_allocated_buffer_member(&cur); __CPROVER_assume(aws_byte_cursor_is_valid(&cur)); /* save current state of the data structure */ struct store_byte_from_buffer old_byte_from_cur; save_byte_from_array(cur.ptr, cur.len, &old_byte_from_cur); /* operation under verification */ aws_byte_cursor_satisfies_pred(&cur, uninterpreted_predicate_fn); /* assertions */ assert(aws_byte_cursor_is_valid(&cur)); if (cur.len > 0) { assert_byte_from_buffer_matches(cur.ptr, &old_byte_from_cur); } } cbmc-proof.txt000066400000000000000000000000711456575232400364230ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_satisfies_predThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_trim_pred/000077500000000000000000000000001456575232400326755ustar00rootroot00000000000000Makefile000066400000000000000000000014761456575232400342660ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_trim_pred# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_byte_buf UNWINDSET += aws_byte_cursor_left_trim_pred.0:$(shell echo $$(($(MAX_BUFFER_SIZE) + 1))) UNWINDSET += aws_byte_cursor_right_trim_pred.0:$(shell echo $$(($(MAX_BUFFER_SIZE) + 1))) CBMCFLAGS += PROOF_UID = aws_byte_cursor_trim_pred HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_byte_cursor_trim_pred_harness.c000066400000000000000000000020001456575232400417540ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_trim_pred/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_byte_cursor_trim_pred_harness() { /* parameters */ struct aws_byte_cursor cur; /* assumptions */ __CPROVER_assume(aws_byte_cursor_is_bounded(&cur, MAX_BUFFER_SIZE)); ensure_byte_cursor_has_allocated_buffer_member(&cur); __CPROVER_assume(aws_byte_cursor_is_valid(&cur)); /* save current state of the data structure */ struct store_byte_from_buffer old_byte_from_cur; save_byte_from_array(cur.ptr, cur.len, &old_byte_from_cur); /* operation under verification */ struct aws_byte_cursor rv = aws_byte_cursor_trim_pred(&cur, uninterpreted_predicate_fn); /* assertions */ assert(aws_byte_cursor_is_valid(&cur)); assert(aws_byte_cursor_is_valid(&rv)); if (cur.len > 0) { assert_byte_from_buffer_matches(cur.ptr, &old_byte_from_cur); } } cbmc-proof.txt000066400000000000000000000000711456575232400354040ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_byte_cursor_trim_predThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_array_ignore_case/000077500000000000000000000000001456575232400327475ustar00rootroot00000000000000Makefile000066400000000000000000000013411456575232400343270ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_array_ignore_case# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_byte_buf UNWINDSET += aws_hash_array_ignore_case.0:$(shell echo $$((1 + $(MAX_BUFFER_SIZE)))) CBMCFLAGS += PROOF_UID = aws_hash_array_ignore_case HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_hash_array_ignore_case_harness.c000066400000000000000000000011421456575232400421060ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_array_ignore_case/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_hash_array_ignore_case_harness() { /* parameters */ size_t length; __CPROVER_assume(length < MAX_BUFFER_SIZE); uint8_t *array = malloc(length); __CPROVER_assume(AWS_MEM_IS_READABLE(array, length)); /* operation under verification */ uint64_t rval = aws_hash_array_ignore_case(array, length); /* assertions */ assert(AWS_MEM_IS_READABLE(array, length)); } cbmc-proof.txt000066400000000000000000000000711456575232400354560ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_array_ignore_caseThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_byte_cursor_ptr/000077500000000000000000000000001456575232400325205ustar00rootroot00000000000000Makefile000066400000000000000000000021021456575232400340740ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_byte_cursor_ptr# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### MAX_CURSOR_SIZE ?= 32 DEFINES += -DMAX_CURSOR_SIZE=$(MAX_CURSOR_SIZE) UNWINDSET += __CPROVER_file_local_lookup3_inl_hashlittle2.0:$(shell echo $$(( $$(( $(MAX_CURSOR_SIZE) / 12 )) +1 )) ) UNWINDSET += __CPROVER_file_local_lookup3_inl_hashlittle2.1:$(shell echo $$(( $$(( $(MAX_CURSOR_SIZE) / 12 )) +1 )) ) UNWINDSET += __CPROVER_file_local_lookup3_inl_hashlittle2.2:$(shell echo $$(( $$(( $(MAX_CURSOR_SIZE) / 12 )) +1 )) ) CBMCFLAGS += PROOF_UID = aws_hash_byte_cursor_ptr HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c PROJECT_SOURCES += $(SRCDIR)/source/hash_table.c ########### include ../Makefile.common aws_hash_byte_cursor_ptr_harness.c000066400000000000000000000013011456575232400414250ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_byte_cursor_ptr/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include void aws_hash_byte_cursor_ptr_harness() { struct aws_byte_cursor cur; __CPROVER_assume(aws_byte_cursor_is_bounded(&cur, MAX_CURSOR_SIZE)); ensure_byte_cursor_has_allocated_buffer_member(&cur); __CPROVER_assume(aws_byte_cursor_is_valid(&cur)); /* This function has no pre or post conditions */ uint64_t rval = aws_hash_byte_cursor_ptr(&cur); assert(aws_byte_cursor_is_valid(&cur)); } cbmc-proof.txt000066400000000000000000000000711456575232400352270ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_byte_cursor_ptrThis file marks the directory as containing a CBMC proof aws_hash_byte_cursor_ptr_ignore_case/000077500000000000000000000000001456575232400347775ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofsMakefile000066400000000000000000000013531456575232400364410ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_byte_cursor_ptr_ignore_case# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_byte_buf UNWINDSET += aws_hash_array_ignore_case.0:$(shell echo $$((1 + $(MAX_BUFFER_SIZE)))) CBMCFLAGS += PROOF_UID = aws_hash_byte_cursor_ptr_ignore_case HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_hash_byte_cursor_ptr_ignore_case_harness.c000066400000000000000000000013441456575232400463300ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_byte_cursor_ptr_ignore_case/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_hash_byte_cursor_ptr_ignore_case_harness() { /* parameters */ struct aws_byte_cursor cur; /* assumptions */ __CPROVER_assume(aws_byte_cursor_is_bounded(&cur, MAX_BUFFER_SIZE)); ensure_byte_cursor_has_allocated_buffer_member(&cur); __CPROVER_assume(aws_byte_cursor_is_valid(&cur)); __CPROVER_assume(AWS_MEM_IS_READABLE(cur.ptr, cur.len)); /* operation under verification */ aws_hash_byte_cursor_ptr_ignore_case(&cur); /* assertions */ assert(aws_byte_cursor_is_valid(&cur)); } cbmc-proof.txt000066400000000000000000000000711456575232400375650ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_byte_cursor_ptr_ignore_caseThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_c_string/000077500000000000000000000000001456575232400311035ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_c_string/Makefile000066400000000000000000000022511456575232400325430ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### #32: 4s #64: 5s #128 6s #256 11s #1024: 2m30s MAX_STRING_SIZE ?= 32 DEFINES += -DMAX_STRING_SIZE=$(MAX_STRING_SIZE) UNWINDSET += strlen.0:$(shell echo $$(( $(MAX_STRING_SIZE) + 1))) UNWINDSET += __CPROVER_file_local_lookup3_inl_hashlittle2.0:$(shell echo $$(( $$(( $(MAX_STRING_SIZE) / 12 )) +1 )) ) UNWINDSET += __CPROVER_file_local_lookup3_inl_hashlittle2.1:$(shell echo $$(( $$(( $(MAX_STRING_SIZE) / 12 )) +1 )) ) UNWINDSET += __CPROVER_file_local_lookup3_inl_hashlittle2.2:$(shell echo $$(( $$(( $(MAX_STRING_SIZE) / 12 )) +1 )) ) CBMCFLAGS += PROOF_UID = aws_hash_c_string HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/common.c PROJECT_SOURCES += $(SRCDIR)/source/hash_table.c PROJECT_SOURCES += $(SRCDIR)/source/string.c ########### include ../Makefile.common aws_hash_c_string_harness.c000066400000000000000000000007521456575232400364040ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_c_string/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include void aws_hash_c_string_harness() { const char *str = ensure_c_str_is_allocated(MAX_STRING_SIZE); __CPROVER_assume(aws_c_string_is_valid(str)); uint64_t rval = aws_hash_c_string(str); } cbmc-proof.txt000066400000000000000000000000711456575232400336120ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_c_stringThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_callback_c_str_eq/000077500000000000000000000000001456575232400327065ustar00rootroot00000000000000Makefile000066400000000000000000000015721456575232400342740ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_callback_c_str_eq# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### #NOTE: Sufficient for full coverage #16: 4s #64: 14s #128 1m40ss MAX_STRING_LEN ?= 64 DEFINES += -DMAX_STRING_LEN=$(MAX_STRING_LEN) UNWINDSET += strcmp.0:$(shell echo $$(($(MAX_STRING_LEN) + 1))) UNWINDSET += strlen.0:$(shell echo $$(($(MAX_STRING_LEN) + 1))) CBMCFLAGS += PROOF_UID = aws_hash_callback_c_str_eq HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/hash_table.c PROJECT_SOURCES += $(SRCDIR)/source/string.c ########### include ../Makefile.common aws_hash_callback_c_str_eq_harness.c000066400000000000000000000013221456575232400420040ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_callback_c_str_eq/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include void aws_hash_callback_c_str_eq_harness() { const char *str1 = ensure_c_str_is_allocated(MAX_STRING_LEN); const char *str2 = nondet_bool() ? str1 : ensure_c_str_is_allocated(MAX_STRING_LEN); __CPROVER_assume(aws_c_string_is_valid(str1)); __CPROVER_assume(aws_c_string_is_valid(str2)); bool rval = aws_hash_callback_c_str_eq(str1, str2); if (rval) { size_t len = strlen(str1); assert_bytes_match(str1, str2, len); } } cbmc-proof.txt000066400000000000000000000000711456575232400354150ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_callback_c_str_eqThis file marks the directory as containing a CBMC proof aws_hash_callback_string_destroy/000077500000000000000000000000001456575232400341075ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofsMakefile000066400000000000000000000014021456575232400355440ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_callback_string_destroy# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### #NOTE: Sufficient for full coverage #16: 4s #64: 5s #128 10s #256 53s MAX_STRING_LEN ?= 128 DEFINES += -DMAX_STRING_LEN=$(MAX_STRING_LEN) CBMCFLAGS += PROOF_UID = aws_hash_callback_string_destroy HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROJECT_SOURCES += $(SRCDIR)/source/common.c PROJECT_SOURCES += $(SRCDIR)/source/hash_table.c PROJECT_SOURCES += $(SRCDIR)/source/string.c ########### include ../Makefile.common aws_hash_callback_string_destroy_harness.c000066400000000000000000000007501456575232400445500ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_callback_string_destroy/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include void aws_hash_callback_string_destroy_harness() { struct aws_string *str = nondet_allocate_string_bounded_length(MAX_STRING_LEN); __CPROVER_assume(aws_string_is_valid(str)); aws_hash_callback_string_destroy(str); } cbmc-proof.txt000066400000000000000000000000711456575232400366750ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_callback_string_destroyThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_callback_string_eq/000077500000000000000000000000001456575232400331025ustar00rootroot00000000000000Makefile000066400000000000000000000015021456575232400344610ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_callback_string_eq# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### #NOTE: Sufficient for full coverage #32: 6 #64: 15s #96: 1m #128 2m 10s MAX_STRING_LEN ?= 96 DEFINES += -DMAX_STRING_LEN=$(MAX_STRING_LEN) UNWINDSET += memcmp.0:$(shell echo $$(($(MAX_STRING_LEN) + 1))) CBMCFLAGS += PROOF_UID = aws_hash_callback_string_eq HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/hash_table.c PROJECT_SOURCES += $(SRCDIR)/source/string.c ########### include ../Makefile.common aws_hash_callback_string_eq_harness.c000066400000000000000000000014331456575232400423770ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_callback_string_eq/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include void aws_hash_callback_string_eq_harness() { const struct aws_string *str1 = nondet_allocate_string_bounded_length(MAX_STRING_LEN); const struct aws_string *str2 = nondet_bool() ? str1 : nondet_allocate_string_bounded_length(MAX_STRING_LEN); __CPROVER_assume(aws_string_is_valid(str1)); __CPROVER_assume(aws_string_is_valid(str2)); bool rval = aws_hash_callback_string_eq(str1, str2); if (rval) { assert(str1->len == str2->len); assert_bytes_match(str1->bytes, str2->bytes, str1->len); } } cbmc-proof.txt000066400000000000000000000000711456575232400356110ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_callback_string_eqThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_iter_begin/000077500000000000000000000000001456575232400314025ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_iter_begin/Makefile000066400000000000000000000014751456575232400330510ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### #16: 6 seconds #32: 10s #128: 2m 45s MAX_TABLE_SIZE ?= 32 DEFINES += -DMAX_TABLE_SIZE=$(MAX_TABLE_SIZE) UNWINDSET += __CPROVER_file_local_hash_table_c_s_get_next_element.0:$(shell echo $$((1 + $(MAX_TABLE_SIZE)))) CBMCFLAGS += PROOF_UID = aws_hash_iter_begin HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/common.c PROJECT_SOURCES += $(SRCDIR)/source/hash_table.c ########### include ../Makefile.common aws_hash_iter_begin_harness.c000066400000000000000000000015421456575232400372000ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_iter_begin/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include void aws_hash_iter_begin_harness() { struct aws_hash_table map; ensure_allocated_hash_table(&map, MAX_TABLE_SIZE); __CPROVER_assume(aws_hash_table_is_valid(&map)); struct store_byte_from_buffer old_byte; save_byte_from_hash_table(&map, &old_byte); struct aws_hash_iter iter = aws_hash_iter_begin(&map); assert(aws_hash_iter_is_valid(&iter)); assert(iter.status == AWS_HASH_ITER_STATUS_DONE || iter.status == AWS_HASH_ITER_STATUS_READY_FOR_USE); assert(aws_hash_table_is_valid(&map)); check_hash_table_unchanged(&map, &old_byte); } cbmc-proof.txt000066400000000000000000000000711456575232400341110ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_iter_beginThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_iter_delete/000077500000000000000000000000001456575232400315605ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_iter_delete/Makefile000066400000000000000000000015051456575232400332210ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. #4: 4m40s (smallest size that gives full coverage) MAX_TABLE_SIZE ?= 4 DEFINES += -DMAX_TABLE_SIZE=$(MAX_TABLE_SIZE) CBMCFLAGS += PROOF_UID = aws_hash_iter_delete HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/s_remove_entry_override.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/common.c PROJECT_SOURCES += $(SRCDIR)/source/hash_table.c REMOVE_FUNCTION_BODY += __CPROVER_file_local_hash_table_c_s_remove_entry include ../Makefile.common aws_hash_iter_delete_harness.c000066400000000000000000000022361456575232400375350ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_iter_delete/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include void aws_hash_iter_delete_harness() { struct aws_hash_table map; ensure_allocated_hash_table(&map, MAX_TABLE_SIZE); __CPROVER_assume(aws_hash_table_is_valid(&map)); __CPROVER_assume(map.p_impl->destroy_key_fn == hash_proof_destroy_noop || !map.p_impl->destroy_key_fn); __CPROVER_assume(map.p_impl->destroy_value_fn == hash_proof_destroy_noop || !map.p_impl->destroy_value_fn); size_t empty_slot_idx; __CPROVER_assume(aws_hash_table_has_an_empty_slot(&map, &empty_slot_idx)); struct hash_table_state *state = map.p_impl; struct aws_hash_iter iter; iter.map = ↦ __CPROVER_assume(aws_hash_iter_is_valid(&iter)); __CPROVER_assume(iter.status == AWS_HASH_ITER_STATUS_READY_FOR_USE); aws_hash_iter_delete(&iter, nondet_bool()); assert(aws_hash_iter_is_valid(&iter)); assert(iter.status == AWS_HASH_ITER_STATUS_DELETE_CALLED); } cbmc-proof.txt000066400000000000000000000000711456575232400342670ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_iter_deleteThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_iter_done/000077500000000000000000000000001456575232400312435ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_iter_done/Makefile000066400000000000000000000013701456575232400327040ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### # ensure_allocated_hash_table expects some max table size # using SIZE_MAX leaves it maximally unconstrained. DEFINES += -DMAX_TABLE_SIZE=SIZE_MAX CBMCFLAGS += PROOF_UID = aws_hash_iter_done HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/common.c PROJECT_SOURCES += $(SRCDIR)/source/hash_table.c ########### include ../Makefile.common aws_hash_iter_done_harness.c000066400000000000000000000021421456575232400366770ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_iter_done/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include void aws_hash_iter_done_harness() { struct aws_hash_table map; ensure_allocated_hash_table(&map, MAX_TABLE_SIZE); __CPROVER_assume(aws_hash_table_is_valid(&map)); struct aws_hash_iter iter; iter.map = ↦ __CPROVER_assume(aws_hash_iter_is_valid(&iter)); __CPROVER_assume(iter.status == AWS_HASH_ITER_STATUS_DONE || iter.status == AWS_HASH_ITER_STATUS_READY_FOR_USE); enum aws_hash_iter_status old_status = iter.status; struct store_byte_from_buffer old_byte; save_byte_from_hash_table(&map, &old_byte); bool rval = aws_hash_iter_done(&iter); assert(aws_hash_iter_is_valid(&iter)); assert(rval == (iter.status == AWS_HASH_ITER_STATUS_DONE)); assert(iter.status == old_status); assert(aws_hash_table_is_valid(&map)); check_hash_table_unchanged(&map, &old_byte); } cbmc-proof.txt000066400000000000000000000000711456575232400337520ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_iter_doneThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_iter_next/000077500000000000000000000000001456575232400312745ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_iter_next/Makefile000066400000000000000000000014751456575232400327430ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### #4: 13 s #8: 40s #16: 1m 36s #24 3m 20s MAX_TABLE_SIZE ?= 8 DEFINES += -DMAX_TABLE_SIZE=$(MAX_TABLE_SIZE) UNWINDSET += __CPROVER_file_local_hash_table_c_s_get_next_element.0:$(shell echo $$((1 + $(MAX_TABLE_SIZE)))) CBMCFLAGS += PROOF_UID = aws_hash_iter_next HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/common.c PROJECT_SOURCES += $(SRCDIR)/source/hash_table.c ########### include ../Makefile.common aws_hash_iter_next_harness.c000066400000000000000000000021151456575232400367610ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_iter_next/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include void aws_hash_iter_next_harness() { struct aws_hash_table map; ensure_allocated_hash_table(&map, MAX_TABLE_SIZE); __CPROVER_assume(aws_hash_table_is_valid(&map)); struct aws_hash_iter iter; iter.map = ↦ __CPROVER_assume(aws_hash_iter_is_valid(&iter)); enum aws_hash_iter_status old_status = iter.status; struct store_byte_from_buffer old_byte; save_byte_from_hash_table(&map, &old_byte); aws_hash_iter_next(&iter); assert(aws_hash_iter_is_valid(&iter)); assert(iter.status == AWS_HASH_ITER_STATUS_DONE || iter.status == AWS_HASH_ITER_STATUS_READY_FOR_USE); assert(IMPLIES(old_status == AWS_HASH_ITER_STATUS_DONE, iter.status == AWS_HASH_ITER_STATUS_DONE)); assert(aws_hash_table_is_valid(&map)); check_hash_table_unchanged(&map, &old_byte); } cbmc-proof.txt000066400000000000000000000000711456575232400340030ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_iter_nextThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_ptr/000077500000000000000000000000001456575232400301005ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_ptr/Makefile000066400000000000000000000007571456575232400315510ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### CBMCFLAGS += PROOF_UID = aws_hash_ptr HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/common.c PROJECT_SOURCES += $(SRCDIR)/source/hash_table.c ########### include ../Makefile.common aws_hash_ptr_harness.c000066400000000000000000000006571456575232400344020ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_ptr/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include void aws_hash_ptr_harness() { void *ptr; /* This function has no pre or post conditions */ uint64_t rval = aws_hash_ptr(ptr); } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_ptr/cbmc-proof.txt000066400000000000000000000000711456575232400326660ustar00rootroot00000000000000This file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_string/000077500000000000000000000000001456575232400306015ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_string/Makefile000066400000000000000000000021461456575232400322440ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### #32: 4s #64: 5s #128 6s #256 11s #1024: 2m 30s MAX_STRING_SIZE ?= 32 DEFINES += -DMAX_STRING_SIZE=$(MAX_STRING_SIZE) UNWINDSET += __CPROVER_file_local_lookup3_inl_hashlittle2.0:$(shell echo $$(( $$(( $(MAX_STRING_SIZE) / 12 )) +1 )) ) UNWINDSET += __CPROVER_file_local_lookup3_inl_hashlittle2.1:$(shell echo $$(( $$(( $(MAX_STRING_SIZE) / 12 )) +1 )) ) UNWINDSET += __CPROVER_file_local_lookup3_inl_hashlittle2.2:$(shell echo $$(( $$(( $(MAX_STRING_SIZE) / 12 )) +1 )) ) CBMCFLAGS += PROOF_UID = aws_hash_string HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/common.c PROJECT_SOURCES += $(SRCDIR)/source/hash_table.c PROJECT_SOURCES += $(SRCDIR)/source/string.c ########### include ../Makefile.common aws_hash_string_harness.c000066400000000000000000000020041456575232400355700ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_string/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include void aws_hash_string_harness() { struct aws_string *str = nondet_allocate_string_bounded_length(MAX_STRING_SIZE); __CPROVER_assume(aws_string_is_valid(str)); /* * #TODO: Currently, CBMC is unable to check all possible paths in these proof. * aws_hash_string function calls hashlittle2 function, which calculates two 32-bit * hash values. Internally, it contains two conditions that test for alignment to * 4 byte/2 byte boundaries, but CBMC is unable to correctly evaluate such conditions, * due to its pointer encoding. A potential fix to this problem is under development. * For more details, see https://github.com/diffblue/cbmc/pull/1086. */ uint64_t rval = aws_hash_string(str); } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_string/cbmc-proof.txt000066400000000000000000000000711456575232400333670ustar00rootroot00000000000000This file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_table_clean_up/000077500000000000000000000000001456575232400322305ustar00rootroot00000000000000Makefile000066400000000000000000000014151456575232400336120ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_table_clean_up# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### #4: 60s #8: 1m50s #16: 6m30s MAX_TABLE_SIZE ?= 8 include ../Makefile.aws_hash_table UNWINDSET += aws_hash_table_clear.0:$(shell echo $$((1 + $(MAX_TABLE_SIZE)))) CBMCFLAGS += PROOF_UID = aws_hash_table_clean_up HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/common.c PROJECT_SOURCES += $(SRCDIR)/source/hash_table.c ########### include ../Makefile.common aws_hash_table_clean_up_harness.c000066400000000000000000000025311456575232400406530ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_table_clean_up/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include void aws_hash_table_clean_up_harness() { struct aws_hash_table map; ensure_allocated_hash_table(&map, MAX_TABLE_SIZE); __CPROVER_assume(aws_hash_table_is_valid(&map)); ensure_hash_table_has_valid_destroy_functions(&map); map.p_impl->alloc = aws_default_allocator(); struct hash_table_state *state = map.p_impl; size_t empty_slot_idx; size_t size_in_bytes = sizeof(struct hash_table_state) + sizeof(struct hash_table_entry) * state->size; __CPROVER_assume(aws_hash_table_has_an_empty_slot(&map, &empty_slot_idx)); aws_hash_table_clean_up(&map); assert(map.p_impl == NULL); // Check that the bytes were zeroed. // This would normally raise a warning since the memory was deallocated, so use the pragma. #pragma CPROVER check push #pragma CPROVER check disable "pointer" #pragma CPROVER check disable "pointer-overflow" size_t i; size_t len = state->size * sizeof(state->slots[0]); __CPROVER_assume(i < len); uint8_t *bytes = (uint8_t *)&state->slots[0]; assert(bytes[i] == 0); #pragma CPROVER check pop } cbmc-proof.txt000066400000000000000000000000711456575232400347370ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_table_clean_upThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_table_clear/000077500000000000000000000000001456575232400315305ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_table_clear/Makefile000066400000000000000000000016221456575232400331710ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### #4: 35s #8: 1m10s #16: 6m30s MAX_TABLE_SIZE ?= 4 include ../Makefile.aws_hash_table UNWINDSET += aws_hash_table_clear.0:$(shell echo $$((1 + $(MAX_TABLE_SIZE)))) UNWINDSET += memset_override_0_impl.0:$(shell echo $$((1 + $(TABLE_SIZE_IN_WORDS)))) CBMCFLAGS += PROOF_UID = aws_hash_table_clear HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROOF_SOURCES += $(PROOF_STUB)/memset_override_0.c PROJECT_SOURCES += $(SRCDIR)/source/common.c PROJECT_SOURCES += $(SRCDIR)/source/hash_table.c ########### include ../Makefile.common aws_hash_table_clear_harness.c000066400000000000000000000015151456575232400374540ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_table_clear/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include void aws_hash_table_clear_harness() { struct aws_hash_table map; ensure_allocated_hash_table(&map, MAX_TABLE_SIZE); __CPROVER_assume(aws_hash_table_is_valid(&map)); ensure_hash_table_has_valid_destroy_functions(&map); size_t empty_slot_idx; __CPROVER_assume(aws_hash_table_has_an_empty_slot(&map, &empty_slot_idx)); aws_hash_table_clear(&map); assert(aws_hash_table_is_valid(&map)); struct hash_table_state *impl = map.p_impl; assert_all_zeroes((uint8_t *)&impl->slots[0], impl->size * sizeof(impl->slots[0])); } cbmc-proof.txt000066400000000000000000000000711456575232400342370ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_table_clearThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_table_create/000077500000000000000000000000001456575232400317055ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_table_create/Makefile000066400000000000000000000023361456575232400333510ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### # 2: 2m30s # 4: 3m30s MAX_TABLE_SIZE ?= 4 DEFINES += -DMAX_TABLE_SIZE=$(MAX_TABLE_SIZE) UNWINDSET +=__CPROVER_file_local_hash_table_c_s_find_entry1.0:$(shell echo $$((1 + $(MAX_TABLE_SIZE)))) UNWINDSET += __CPROVER_file_local_hash_table_c_s_emplace_item.0:$(shell echo $$((1 + $$(( 2 * $(MAX_TABLE_SIZE) )) )) ) CBMCFLAGS += PROOF_UID = aws_hash_table_create HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROOF_SOURCES += $(PROOF_STUB)/memset_override_no_op.c PROOF_SOURCES += $(PROOF_STUB)/s_emplace_item_override.c PROOF_SOURCES += $(PROOF_STUB)/s_expand_table_override.c PROJECT_SOURCES += $(SRCDIR)/source/common.c PROJECT_SOURCES += $(SRCDIR)/source/hash_table.c REMOVE_FUNCTION_BODY += __CPROVER_file_local_hash_table_c_s_emplace_item REMOVE_FUNCTION_BODY += __CPROVER_file_local_hash_table_c_s_expand_table ########### include ../Makefile.common aws_hash_table_create_harness.c000066400000000000000000000031051456575232400400030ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_table_create/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include void aws_hash_table_create_harness() { struct aws_hash_table map; ensure_allocated_hash_table(&map, MAX_TABLE_SIZE); __CPROVER_assume(aws_hash_table_is_valid(&map)); map.p_impl->equals_fn = uninterpreted_equals_assert_inputs_nonnull; map.p_impl->hash_fn = uninterpreted_hasher; map.p_impl->alloc = aws_default_allocator(); size_t empty_slot_idx; __CPROVER_assume(aws_hash_table_has_an_empty_slot(&map, &empty_slot_idx)); void *key; struct aws_hash_element *p_elem; bool get_p_elem; bool track_was_created; int was_created; struct hash_table_state old_state = *map.p_impl; int rval = aws_hash_table_create(&map, key, get_p_elem ? &p_elem : NULL, track_was_created ? &was_created : NULL); assert(aws_hash_table_is_valid(&map)); if (rval == AWS_OP_SUCCESS) { if (track_was_created) { assert(map.p_impl->entry_count == old_state.entry_count + was_created); } else { assert( map.p_impl->entry_count == old_state.entry_count || map.p_impl->entry_count == old_state.entry_count + 1); } if (get_p_elem) { assert(uninterpreted_equals(p_elem->key, key)); } } else { assert(map.p_impl->entry_count == old_state.entry_count); } } cbmc-proof.txt000066400000000000000000000000711456575232400344140ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_table_createThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_table_eq/000077500000000000000000000000001456575232400310475ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_table_eq/Makefile000066400000000000000000000015511456575232400325110ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### # 8: 2m30s MAX_TABLE_SIZE ?= 8 DEFINES += -DMAX_TABLE_SIZE=$(MAX_TABLE_SIZE) UNWINDSET += aws_hash_table_eq.3:$(shell echo $$((1 + $(MAX_TABLE_SIZE)))) CBMCFLAGS += PROOF_UID = aws_hash_table_eq HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/aws_hash_table_find_override.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/common.c PROJECT_SOURCES += $(SRCDIR)/source/hash_table.c REMOVE_FUNCTION_BODY += aws_hash_table_find ########### include ../Makefile.common aws_hash_table_eq_harness.c000066400000000000000000000027361456575232400363200ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_table_eq/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include void aws_hash_table_eq_harness() { struct aws_hash_table map_a; ensure_allocated_hash_table(&map_a, MAX_TABLE_SIZE); __CPROVER_assume(aws_hash_table_is_valid(&map_a)); map_a.p_impl->equals_fn = uninterpreted_equals_assert_inputs_nonnull; map_a.p_impl->hash_fn = uninterpreted_hasher; struct store_byte_from_buffer old_byte_a; save_byte_from_hash_table(&map_a, &old_byte_a); struct aws_hash_table map_b; ensure_allocated_hash_table(&map_b, MAX_TABLE_SIZE); __CPROVER_assume(aws_hash_table_is_valid(&map_b)); map_b.p_impl->equals_fn = uninterpreted_equals_assert_inputs_nonnull; map_b.p_impl->hash_fn = uninterpreted_hasher; struct store_byte_from_buffer old_byte_b; save_byte_from_hash_table(&map_b, &old_byte_b); /* assume the preconditions */ __CPROVER_assume(aws_hash_table_is_valid(&map_a)); __CPROVER_assume(aws_hash_table_is_valid(&map_b)); bool rval = aws_hash_table_eq(&map_a, &map_b, uninterpreted_equals_assert_inputs_nonnull); assert(aws_hash_table_is_valid(&map_a)); assert(aws_hash_table_is_valid(&map_b)); check_hash_table_unchanged(&map_a, &old_byte_a); check_hash_table_unchanged(&map_b, &old_byte_b); } cbmc-proof.txt000066400000000000000000000000711456575232400335560ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_table_eqThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_table_find/000077500000000000000000000000001456575232400313625ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_table_find/Makefile000066400000000000000000000014541456575232400330260ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### #4: 25s #8: 1m #16: 5m45s MAX_TABLE_SIZE ?= 8 DEFINES += -DMAX_TABLE_SIZE=$(MAX_TABLE_SIZE) UNWINDSET += __CPROVER_file_local_hash_table_c_s_find_entry1.0:$(shell echo $$((1 + $(MAX_TABLE_SIZE)))) CBMCFLAGS += PROOF_UID = aws_hash_table_find HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/common.c PROJECT_SOURCES += $(SRCDIR)/source/hash_table.c ########### include ../Makefile.common aws_hash_table_find_harness.c000066400000000000000000000024461456575232400371440ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_table_find/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include void aws_hash_table_find_harness() { struct aws_hash_table map; ensure_allocated_hash_table(&map, MAX_TABLE_SIZE); __CPROVER_assume(aws_hash_table_is_valid(&map)); map.p_impl->equals_fn = uninterpreted_equals_assert_inputs_nonnull; map.p_impl->hash_fn = uninterpreted_hasher; size_t empty_slot_idx; __CPROVER_assume(aws_hash_table_has_an_empty_slot(&map, &empty_slot_idx)); struct store_byte_from_buffer old_byte; save_byte_from_hash_table(&map, &old_byte); void *key; struct aws_hash_element *p_elem; /* Preconditions */ __CPROVER_assume(aws_hash_table_is_valid(&map)); __CPROVER_assume(AWS_OBJECT_PTR_IS_WRITABLE(&p_elem)); int rval = aws_hash_table_find(&map, key, &p_elem); assert(rval == AWS_OP_SUCCESS); if (p_elem) { assert(AWS_OBJECT_PTR_IS_READABLE(p_elem)); assert(p_elem->key == key || uninterpreted_equals(p_elem->key, key)); } assert(aws_hash_table_is_valid(&map)); check_hash_table_unchanged(&map, &old_byte); } cbmc-proof.txt000066400000000000000000000000711456575232400340710ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_table_findThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_table_foreach/000077500000000000000000000000001456575232400320515ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_table_foreach/Makefile000066400000000000000000000020061456575232400335070ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### #4: 25s #16: 10s #32: 1m10s MAX_TABLE_SIZE ?= 16 DEFINES += -DMAX_TABLE_SIZE=$(MAX_TABLE_SIZE) UNWINDSET += aws_hash_table_foreach.0:$(shell echo $$((1 + $(MAX_TABLE_SIZE)))) CBMCFLAGS += PROOF_UID = aws_hash_table_foreach HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROOF_SOURCES += $(PROOF_STUB)/aws_hash_iter_overrides.c PROJECT_SOURCES += $(SRCDIR)/source/common.c PROJECT_SOURCES += $(SRCDIR)/source/hash_table.c REMOVE_FUNCTION_BODY += aws_hash_iter_begin REMOVE_FUNCTION_BODY += aws_hash_iter_delete REMOVE_FUNCTION_BODY += aws_hash_iter_done REMOVE_FUNCTION_BODY += aws_hash_iter_next ########### include ../Makefile.common aws_hash_table_foreach_harness.c000066400000000000000000000020751456575232400403200ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_table_foreach/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include int hash_table_foreach_proof_callback(void *context, struct aws_hash_element *pElement) { AWS_PRECONDITION(AWS_OBJECT_PTR_IS_WRITABLE(pElement), "Input pointer [pElement] must be writable."); return nondet_int(); } void aws_hash_table_foreach_harness() { struct aws_hash_table map; ensure_allocated_hash_table(&map, MAX_TABLE_SIZE); __CPROVER_assume(aws_hash_table_is_valid(&map)); map.p_impl->equals_fn = uninterpreted_equals_assert_inputs_nonnull; map.p_impl->hash_fn = uninterpreted_hasher; void *context; aws_hash_table_foreach(&map, hash_table_foreach_proof_callback, context); /* No obvious postconditions, other than that the map remains valid. But the iterator could have modified the table */ assert(aws_hash_table_is_valid(&map)); } cbmc-proof.txt000066400000000000000000000000711456575232400345600ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_table_foreachThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_table_get_entry_count/000077500000000000000000000000001456575232400336525ustar00rootroot00000000000000Makefile000066400000000000000000000011571456575232400352370ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_table_get_entry_count# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### CBMCFLAGS += PROOF_UID = aws_hash_table_get_entry_count HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/common.c PROJECT_SOURCES += $(SRCDIR)/source/hash_table.c ########### include ../Makefile.common aws_hash_table_get_entry_count_harness.c000066400000000000000000000020671456575232400437230ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_table_get_entry_count/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include void aws_hash_table_get_entry_count_harness() { struct aws_hash_table table; /* There are no loops in the code under test, so use the biggest possible value */ ensure_allocated_hash_table(&table, SIZE_MAX); __CPROVER_assume(aws_hash_table_is_valid(&table)); struct hash_table_state *state = table.p_impl; struct store_byte_from_buffer stored_byte; save_byte_from_hash_table(&table, &stored_byte); size_t old_entry_count = state->entry_count; size_t rval = aws_hash_table_get_entry_count(&table); assert(rval == old_entry_count); /* Ensure that the table remains valid */ assert(aws_hash_table_is_valid(&table)); /* Ensure that table is unchanged */ check_hash_table_unchanged(&table, &stored_byte); } cbmc-proof.txt000066400000000000000000000000711456575232400363610ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_table_get_entry_countThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_table_init_bounded/000077500000000000000000000000001456575232400331055ustar00rootroot00000000000000Makefile000066400000000000000000000016401456575232400344670ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_table_init_bounded# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### # Using the new calloc model, get full coverage in about 5s on unbounded sizes! MAX_TABLE_SIZE ?= SIZE_MAX DEFINES += -DMAX_TABLE_SIZE=$(MAX_TABLE_SIZE) PROOF_UID = aws_hash_table_init_bounded HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/common.c PROJECT_SOURCES += $(SRCDIR)/source/hash_table.c REMOVE_FUNCTION_BODY += __CPROVER_file_local_allocator_c_s_cf_allocator_allocate REMOVE_FUNCTION_BODY += __CPROVER_file_local_allocator_c_s_cf_allocator_preferred_size ########### include ../Makefile.common aws_hash_table_init_bounded_harness.c000066400000000000000000000021761456575232400424120ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_table_init_bounded/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include /** * Assume an bounded size to enable using an accurate stub for memset. */ void aws_hash_table_init_bounded_harness() { struct aws_allocator *allocator = aws_default_allocator(); size_t size; __CPROVER_assume(size <= MAX_TABLE_SIZE); aws_hash_fn *hash_fn; __CPROVER_assume(hash_fn); aws_hash_callback_eq_fn *equals_fn; __CPROVER_assume(equals_fn); aws_hash_callback_destroy_fn *destroy_key_fn; aws_hash_callback_destroy_fn *destroy_value_fn; struct aws_hash_table map; int rval = aws_hash_table_init(&map, allocator, size, hash_fn, equals_fn, destroy_key_fn, destroy_value_fn); if (rval == AWS_OP_SUCCESS) { assert(aws_hash_table_is_valid(&map)); struct hash_table_state *impl = map.p_impl; assert_all_zeroes((uint8_t *)&impl->slots[0], impl->size * sizeof(impl->slots[0])); } } cbmc-proof.txt000066400000000000000000000000711456575232400356140ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_table_init_boundedThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_table_init_unbounded/000077500000000000000000000000001456575232400334505ustar00rootroot00000000000000Makefile000066400000000000000000000012131456575232400350260ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_table_init_unbounded# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. PROOF_UID = aws_hash_table_init_unbounded HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROOF_SOURCES += $(PROOF_STUB)/memset_override_no_op.c PROJECT_SOURCES += $(SRCDIR)/source/common.c PROJECT_SOURCES += $(SRCDIR)/source/hash_table.c ########### include ../Makefile.common aws_hash_table_init_unbounded_harness.c000066400000000000000000000017451456575232400433210ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_table_init_unbounded/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include /** * Assume an unbounded size to get better coverage. * We make this performant by using a stub for memset */ void aws_hash_table_init_unbounded_harness() { struct aws_allocator *allocator = aws_default_allocator(); size_t size; aws_hash_fn *hash_fn; __CPROVER_assume(hash_fn); aws_hash_callback_eq_fn *equals_fn; __CPROVER_assume(equals_fn); aws_hash_callback_destroy_fn *destroy_key_fn; aws_hash_callback_destroy_fn *destroy_value_fn; struct aws_hash_table map; int rval = aws_hash_table_init(&map, allocator, size, hash_fn, equals_fn, destroy_key_fn, destroy_value_fn); if (rval == AWS_OP_SUCCESS) { assert(aws_hash_table_is_valid(&map)); } } cbmc-proof.txt000066400000000000000000000000711456575232400361570ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_table_init_unboundedThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_table_move/000077500000000000000000000000001456575232400314105ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_table_move/Makefile000066400000000000000000000011441456575232400330500ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### CBMCFLAGS += PROOF_UID = aws_hash_table_move HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/common.c PROJECT_SOURCES += $(SRCDIR)/source/hash_table.c ########### include ../Makefile.common aws_hash_table_move_harness.c000066400000000000000000000016731456575232400372210ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_table_move/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include void aws_hash_table_move_harness() { struct aws_hash_table to; struct aws_hash_table from; // There are no loops in the code under test, so use the biggest possible value ensure_allocated_hash_table(&from, SIZE_MAX); __CPROVER_assume(aws_hash_table_is_valid(&from)); struct store_byte_from_buffer stored_byte; save_byte_from_hash_table(&from, &stored_byte); aws_hash_table_move(&to, &from); // Ensure that the destination table is valid. assert(aws_hash_table_is_valid(&to)); // Ensure that the two tables were byte for byte swapped check_hash_table_unchanged(&to, &stored_byte); } cbmc-proof.txt000066400000000000000000000000711456575232400341170ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_table_moveThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_table_put/000077500000000000000000000000001456575232400312525ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_table_put/Makefile000066400000000000000000000025671456575232400327240ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### # 2: 6m # 4: 14m MAX_TABLE_SIZE ?= 2 DEFINES += -DMAX_TABLE_SIZE=$(MAX_TABLE_SIZE) UNWINDSET +=__CPROVER_file_local_hash_table_c_s_find_entry1.0:$(shell echo $$((1 + $(MAX_TABLE_SIZE)))) UNWINDSET += __CPROVER_file_local_hash_table_c_s_emplace_item.0:$(shell echo $$((1 + $$(( 2 * $(MAX_TABLE_SIZE) )) )) ) CBMCFLAGS += PROOF_UID = aws_hash_table_put HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROOF_SOURCES += $(PROOF_STUB)/memset_override_no_op.c PROOF_SOURCES += $(PROOF_STUB)/s_emplace_item_override.c PROOF_SOURCES += $(PROOF_STUB)/s_expand_table_override.c PROJECT_SOURCES += $(SRCDIR)/source/common.c PROJECT_SOURCES += $(SRCDIR)/source/hash_table.c REMOVE_FUNCTION_BODY += __CPROVER_file_local_hash_table_c_s_emplace_item REMOVE_FUNCTION_BODY += __CPROVER_file_local_hash_table_c_s_expand_table REMOVE_FUNCTION_BODY += __CPROVER_file_local_allocator_c_s_cf_allocator_copy_description REMOVE_FUNCTION_BODY += __CPROVER_file_local_allocator_c_s_cf_allocator_deallocate include ../Makefile.common aws_hash_table_put_harness.c000066400000000000000000000031341456575232400367170ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_table_put/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include // Currently takes 4m40s void aws_hash_table_put_harness() { struct aws_hash_table map; ensure_allocated_hash_table(&map, MAX_TABLE_SIZE); __CPROVER_assume(aws_hash_table_is_valid(&map)); map.p_impl->destroy_key_fn = nondet_bool() ? NULL : hash_proof_destroy_noop; map.p_impl->destroy_value_fn = nondet_bool() ? NULL : hash_proof_destroy_noop; map.p_impl->equals_fn = uninterpreted_equals_assert_inputs_nonnull; map.p_impl->hash_fn = uninterpreted_hasher; map.p_impl->alloc = aws_default_allocator(); size_t empty_slot_idx; __CPROVER_assume(aws_hash_table_has_an_empty_slot(&map, &empty_slot_idx)); void *key; void *value; bool track_was_created; int was_created; struct hash_table_state old_state = *map.p_impl; int rval = aws_hash_table_put(&map, key, value, track_was_created ? &was_created : NULL); if (rval == AWS_OP_SUCCESS) { if (track_was_created) { assert(map.p_impl->entry_count == old_state.entry_count + was_created); } else { assert( map.p_impl->entry_count == old_state.entry_count || map.p_impl->entry_count == old_state.entry_count + 1); } } else { assert(map.p_impl->entry_count == old_state.entry_count); } assert(aws_hash_table_is_valid(&map)); } cbmc-proof.txt000066400000000000000000000000711456575232400337610ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_table_putThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_table_remove/000077500000000000000000000000001456575232400317375ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_table_remove/Makefile000066400000000000000000000023331456575232400334000ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### # 2: 2m30s # 4: 3m30s MAX_TABLE_SIZE ?= 4 DEFINES += -DMAX_TABLE_SIZE=$(MAX_TABLE_SIZE) UNWINDSET +=__CPROVER_file_local_hash_table_c_s_find_entry1.0:$(shell echo $$((1 + $(MAX_TABLE_SIZE)))) CBMCFLAGS += PROOF_UID = aws_hash_table_remove HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROOF_SOURCES += $(PROOF_STUB)/memset_override_no_op.c PROOF_SOURCES += $(PROOF_STUB)/s_emplace_item_override.c PROOF_SOURCES += $(PROOF_STUB)/s_expand_table_override.c PROOF_SOURCES += $(PROOF_STUB)/s_remove_entry_override.c PROJECT_SOURCES += $(SRCDIR)/source/common.c PROJECT_SOURCES += $(SRCDIR)/source/hash_table.c REMOVE_FUNCTION_BODY += __CPROVER_file_local_hash_table_c_s_emplace_item REMOVE_FUNCTION_BODY += __CPROVER_file_local_hash_table_c_s_expand_table REMOVE_FUNCTION_BODY += __CPROVER_file_local_hash_table_c_s_remove_entry include ../Makefile.common aws_hash_table_remove_harness.c000066400000000000000000000037441456575232400401000ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_table_remove/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include void aws_hash_table_remove_harness() { struct aws_hash_table map; ensure_allocated_hash_table(&map, MAX_TABLE_SIZE); __CPROVER_assume(aws_hash_table_is_valid(&map)); map.p_impl->destroy_key_fn = nondet_bool() ? NULL : hash_proof_destroy_noop; map.p_impl->destroy_value_fn = nondet_bool() ? NULL : hash_proof_destroy_noop; map.p_impl->equals_fn = uninterpreted_equals_assert_inputs_nonnull; map.p_impl->hash_fn = uninterpreted_hasher; map.p_impl->alloc = aws_default_allocator(); size_t empty_slot_idx; __CPROVER_assume(aws_hash_table_has_an_empty_slot(&map, &empty_slot_idx)); void *key; struct aws_hash_element p_elem; bool get_p_elem; bool track_was_present; int was_present; struct hash_table_state old_state = *map.p_impl; /* assume the preconditions */ __CPROVER_assume(aws_hash_table_is_valid(&map)); __CPROVER_assume(AWS_OBJECT_PTR_IS_WRITABLE(&p_elem)); __CPROVER_assume(AWS_OBJECT_PTR_IS_WRITABLE(&was_present)); int rval = aws_hash_table_remove(&map, key, get_p_elem ? &p_elem : NULL, track_was_present ? &was_present : NULL); assert(aws_hash_table_is_valid(&map)); if (rval == AWS_OP_SUCCESS) { if (track_was_present) { assert(map.p_impl->entry_count == old_state.entry_count - was_present); } else { assert( map.p_impl->entry_count == old_state.entry_count || map.p_impl->entry_count == old_state.entry_count - 1); } if (get_p_elem && track_was_present && was_present == 1) { assert(uninterpreted_equals(p_elem.key, key)); } } else { assert(map.p_impl->entry_count == old_state.entry_count); } } cbmc-proof.txt000066400000000000000000000000711456575232400344460ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_table_removeThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_table_swap/000077500000000000000000000000001456575232400314145ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_table_swap/Makefile000066400000000000000000000011441456575232400330540ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### CBMCFLAGS += PROOF_UID = aws_hash_table_swap HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/common.c PROJECT_SOURCES += $(SRCDIR)/source/hash_table.c ########### include ../Makefile.common aws_hash_table_swap_harness.c000066400000000000000000000024061456575232400372240ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_table_swap/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include void aws_hash_table_swap_harness() { struct aws_hash_table a; struct aws_hash_table b; bool inita; bool initb; struct store_byte_from_buffer stored_byte_a; struct store_byte_from_buffer stored_byte_b; /* There are no loops in the code under test, so use the biggest possible value */ if (inita) { ensure_allocated_hash_table(&a, SIZE_MAX); __CPROVER_assume(aws_hash_table_is_valid(&a)); save_byte_from_hash_table(&a, &stored_byte_a); } if (initb) { ensure_allocated_hash_table(&b, SIZE_MAX); __CPROVER_assume(aws_hash_table_is_valid(&b)); save_byte_from_hash_table(&b, &stored_byte_b); } aws_hash_table_swap(&a, &b); if (inita) { assert(aws_hash_table_is_valid(&b)); check_hash_table_unchanged(&b, &stored_byte_a); } if (initb) { assert(aws_hash_table_is_valid(&a)); check_hash_table_unchanged(&a, &stored_byte_b); } } cbmc-proof.txt000066400000000000000000000000711456575232400341230ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_hash_table_swapThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_is_power_of_two/000077500000000000000000000000001456575232400314745ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_is_power_of_two/Makefile000066400000000000000000000010551456575232400331350ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### CBMCFLAGS += PROOF_UID = aws_is_power_of_two HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) # The actual implementation that we're proving comes from .inl files # that the stubs pull in. Link against an empty file, since we're not # using any other files from c-common. PROJECT_SOURCES += $(PROOF_STUB)/empty-source-file.c ########### include ../Makefile.common aws_is_power_of_two_harness.c000066400000000000000000000007311456575232400373630ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_is_power_of_two/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include void aws_is_power_of_two_harness() { size_t test_val; bool rval = aws_is_power_of_two(test_val); #if ULONG_MAX == SIZE_MAX int popcount = __builtin_popcountl(test_val); #elif ULLONG_MAX == SIZE_MAX int popcount = __builtin_popcountll(test_val); #else # error #endif assert(rval == (popcount == 1)); } cbmc-proof.txt000066400000000000000000000000711456575232400342030ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_is_power_of_twoThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_back/000077500000000000000000000000001456575232400315515ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_back/Makefile000066400000000000000000000021101456575232400332030ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### # Run deep validity checks in is_valid AWS_DEEP_CHECKS = 1 include ../Makefile.aws_linked_list UNWINDSET += __CPROVER_file_local_linked_list_inl_aws_linked_list_is_valid_deep.0:$(shell echo $$((2 + $(MAX_LINKED_LIST_ITEM_ALLOCATION)))) UNWINDSET += ensure_linked_list_is_allocated.0:$(shell echo $$((1 + $(MAX_LINKED_LIST_ITEM_ALLOCATION)))) CBMCFLAGS += PROOF_UID = aws_linked_list_back HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c # The actual implementation that we're proving comes from .inl files # that the stubs pull in. Link against an empty file, since we're not # using any other files from c-common. PROJECT_SOURCES += $(PROOF_STUB)/empty-source-file.c ########### include ../Makefile.common aws_linked_list_back_harness.c000066400000000000000000000016701456575232400375200ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_back/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_linked_list_back_harness() { /* data structure */ struct aws_linked_list list; ensure_linked_list_is_allocated(&list, MAX_LINKED_LIST_ITEM_ALLOCATION); /* Keep the old last node of the linked list */ struct aws_linked_list_node *old_last = list.tail.prev; /* Assume the preconditions. The function requires that list != NULL */ __CPROVER_assume(!aws_linked_list_empty(&list)); /* perform operation under verification */ struct aws_linked_list_node *back = aws_linked_list_back(&list); /* assertions */ assert(aws_linked_list_is_valid(&list)); assert(aws_linked_list_node_prev_is_valid(back)); assert(aws_linked_list_node_next_is_valid(back)); assert(back == old_last); } cbmc-proof.txt000066400000000000000000000000711456575232400342600ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_backThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_begin/000077500000000000000000000000001456575232400317355ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_begin/Makefile000066400000000000000000000021251456575232400333750ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_linked_list # Run deep validity checks in linked_list_is_valid AWS_DEEP_CHECKS = 1 UNWINDSET += __CPROVER_file_local_linked_list_inl_aws_linked_list_is_valid_deep.0:$(shell echo $$((2 + $(MAX_LINKED_LIST_ITEM_ALLOCATION)))) UNWINDSET += ensure_linked_list_is_allocated.0:$(shell echo $$((1 + $(MAX_LINKED_LIST_ITEM_ALLOCATION)))) CBMCFLAGS += PROOF_UID = aws_linked_list_begin HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c # The actual implementation that we're proving comes from .inl files # that the stubs pull in. Link against an empty file, since we're not # using any other files from c-common. PROJECT_SOURCES += $(PROOF_STUB)/empty-source-file.c ########### include ../Makefile.common aws_linked_list_begin_harness.c000066400000000000000000000014121456575232400400620ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_begin/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_linked_list_begin_harness() { /* data structure */ struct aws_linked_list list; ensure_linked_list_is_allocated(&list, MAX_LINKED_LIST_ITEM_ALLOCATION); /* Assume the preconditions */ __CPROVER_assume(aws_linked_list_is_valid(&list)); /* Note: list can never be a NULL pointer as is_valid checks for that */ /* perform operation under verification */ struct aws_linked_list_node *rval = aws_linked_list_begin(&list); /* assertions */ assert(rval == list.head.next); assert(aws_linked_list_is_valid(&list)); } cbmc-proof.txt000066400000000000000000000000711456575232400344440ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_beginThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_end/000077500000000000000000000000001456575232400314175ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_end/Makefile000066400000000000000000000021231456575232400330550ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_linked_list # Run deep validity checks in linked_list_is_valid AWS_DEEP_CHECKS = 1 UNWINDSET += __CPROVER_file_local_linked_list_inl_aws_linked_list_is_valid_deep.0:$(shell echo $$((2 + $(MAX_LINKED_LIST_ITEM_ALLOCATION)))) UNWINDSET += ensure_linked_list_is_allocated.0:$(shell echo $$((1 + $(MAX_LINKED_LIST_ITEM_ALLOCATION)))) CBMCFLAGS += PROOF_UID = aws_linked_list_end HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c # The actual implementation that we're proving comes from .inl files # that the stubs pull in. Link against an empty file, since we're not # using any other files from c-common. PROJECT_SOURCES += $(PROOF_STUB)/empty-source-file.c ########### include ../Makefile.common aws_linked_list_end_harness.c000066400000000000000000000014101456575232400372240ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_end/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_linked_list_end_harness() { /* data structure */ struct aws_linked_list list; ensure_linked_list_is_allocated(&list, MAX_LINKED_LIST_ITEM_ALLOCATION); /* Assume the preconditions */ __CPROVER_assume(aws_linked_list_is_valid(&list)); /* Note: list can never be a NULL pointer as is_valid checks for that */ /* perform operation under verification */ struct aws_linked_list_node const *rval = aws_linked_list_end(&list); /* assertions */ assert(rval == &list.tail); assert(aws_linked_list_is_valid(&list)); } cbmc-proof.txt000066400000000000000000000000711456575232400341260ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_endThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_front/000077500000000000000000000000001456575232400320015ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_front/Makefile000066400000000000000000000021111456575232400334340ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### # Run deep validity checks in is_valid AWS_DEEP_CHECKS = 1 include ../Makefile.aws_linked_list UNWINDSET += __CPROVER_file_local_linked_list_inl_aws_linked_list_is_valid_deep.0:$(shell echo $$((2 + $(MAX_LINKED_LIST_ITEM_ALLOCATION)))) UNWINDSET += ensure_linked_list_is_allocated.0:$(shell echo $$((1 + $(MAX_LINKED_LIST_ITEM_ALLOCATION)))) CBMCFLAGS += PROOF_UID = aws_linked_list_front HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c # The actual implementation that we're proving comes from .inl files # that the stubs pull in. Link against an empty file, since we're not # using any other files from c-common. PROJECT_SOURCES += $(PROOF_STUB)/empty-source-file.c ########### include ../Makefile.common aws_linked_list_front_harness.c000066400000000000000000000017001456575232400401720ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_front/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_linked_list_front_harness() { /* data structure */ struct aws_linked_list list; ensure_linked_list_is_allocated(&list, MAX_LINKED_LIST_ITEM_ALLOCATION); /* Keep the old last node of the linked list */ struct aws_linked_list_node *old_first = list.head.next; /* Assume the preconditions. The function requires that list != NULL */ __CPROVER_assume(!aws_linked_list_empty(&list)); /* perform operation under verification */ struct aws_linked_list_node *front = aws_linked_list_front(&list); /* assertions */ assert(aws_linked_list_is_valid(&list)); assert(aws_linked_list_node_prev_is_valid(front)); assert(aws_linked_list_node_next_is_valid(front)); assert(front == old_first); } cbmc-proof.txt000066400000000000000000000000711456575232400345100ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_frontThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_init/000077500000000000000000000000001456575232400316145ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_init/Makefile000066400000000000000000000020511456575232400332520ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### # Run deep validity checks in linked_list_is_valid AWS_DEEP_CHECKS = 1 # The loops have to be unwinded as many times as the elements of the # list + 2. In this case, aws_linked_list_init returns an empty list # so it is just 2. UNWINDSET += __CPROVER_file_local_linked_list_inl_aws_linked_list_is_valid_deep.0:2 CBMCFLAGS += PROOF_UID = aws_linked_list_init HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c # The actual implementation that we're proving comes from .inl files # that the stubs pull in. Link against an empty file, since we're not # using any other files from c-common. PROJECT_SOURCES += $(PROOF_STUB)/empty-source-file.c ########### include ../Makefile.common aws_linked_list_init_harness.c000066400000000000000000000010541456575232400376220ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_init/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_linked_list_init_harness() { /* data structure */ struct aws_linked_list list; /* Note: list is assumed to be a valid pointer in the function's * precondition */ /* perform operation under verification */ aws_linked_list_init(&list); /* assertions */ assert(aws_linked_list_is_valid(&list)); } cbmc-proof.txt000066400000000000000000000000711456575232400343230ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_initThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_insert_after/000077500000000000000000000000001456575232400333365ustar00rootroot00000000000000Makefile000066400000000000000000000014341456575232400347210ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_insert_after# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_linked_list CBMCFLAGS += PROOF_UID = aws_linked_list_insert_after HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c # The actual implementation that we're proving comes from .inl files # that the stubs pull in. Link against an empty file, since we're not # using any other files from c-common. PROJECT_SOURCES += $(PROOF_STUB)/empty-source-file.c ########### include ../Makefile.common aws_linked_list_insert_after_harness.c000066400000000000000000000016001456575232400430630ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_insert_after/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_linked_list_insert_after_harness() { /* data structure */ struct aws_linked_list_node after; struct aws_linked_list_node after_next; struct aws_linked_list_node to_add; after.next = &after_next; after_next.prev = &after; /* perform operation under verification */ aws_linked_list_insert_after(&after, &to_add); /* assertions */ assert(aws_linked_list_node_next_is_valid(&after)); assert(aws_linked_list_node_prev_is_valid(&to_add)); assert(aws_linked_list_node_next_is_valid(&to_add)); assert(aws_linked_list_node_prev_is_valid(&after_next)); assert(after.next == &to_add); assert(after_next.prev == &to_add); } cbmc-proof.txt000066400000000000000000000000711456575232400360450ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_insert_afterThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_insert_before/000077500000000000000000000000001456575232400334775ustar00rootroot00000000000000Makefile000066400000000000000000000014351456575232400350630ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_insert_before# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_linked_list CBMCFLAGS += PROOF_UID = aws_linked_list_insert_before HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c # The actual implementation that we're proving comes from .inl files # that the stubs pull in. Link against an empty file, since we're not # using any other files from c-common. PROJECT_SOURCES += $(PROOF_STUB)/empty-source-file.c ########### include ../Makefile.common aws_linked_list_insert_before_harness.c000066400000000000000000000016151456575232400433730ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_insert_before/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_linked_list_insert_before_harness() { /* data structure */ struct aws_linked_list_node before; struct aws_linked_list_node before_prev; struct aws_linked_list_node to_add; before.prev = &before_prev; before_prev.next = &before; /* perform operation under verification */ aws_linked_list_insert_before(&before, &to_add); /* assertions */ assert(aws_linked_list_node_prev_is_valid(&before)); assert(aws_linked_list_node_prev_is_valid(&to_add)); assert(aws_linked_list_node_next_is_valid(&to_add)); assert(aws_linked_list_node_next_is_valid(&before_prev)); assert(before.prev == &to_add); assert(before_prev.next == &to_add); } cbmc-proof.txt000066400000000000000000000000711456575232400362060ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_insert_beforeThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_next/000077500000000000000000000000001456575232400316275ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_next/Makefile000066400000000000000000000014241456575232400332700ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_linked_list CBMCFLAGS += PROOF_UID = aws_linked_list_next HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c # The actual implementation that we're proving comes from .inl files # that the stubs pull in. Link against an empty file, since we're not # using any other files from c-common. PROJECT_SOURCES += $(PROOF_STUB)/empty-source-file.c ########### include ../Makefile.common aws_linked_list_next_harness.c000066400000000000000000000014451456575232400376540ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_next/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_linked_list_next_harness() { /* data structure */ struct aws_linked_list_node node; // Preconditions require node to not be NULL struct aws_linked_list_node after; // Preconditions require after to not be NULL /* Assume the preconditions */ node.next = &after; after.prev = &node; /* perform operation under verification */ struct aws_linked_list_node *rval = aws_linked_list_next(&node); /* assertions */ assert(aws_linked_list_node_next_is_valid(&node)); assert(aws_linked_list_node_prev_is_valid(rval)); assert(rval == &after); } cbmc-proof.txt000066400000000000000000000000711456575232400343360ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_nextThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_node_reset/000077500000000000000000000000001456575232400330005ustar00rootroot00000000000000Makefile000066400000000000000000000014321456575232400343610ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_node_reset# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_linked_list CBMCFLAGS += PROOF_UID = aws_linked_list_node_reset HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c # The actual implementation that we're proving comes from .inl files # that the stubs pull in. Link against an empty file, since we're not # using any other files from c-common. PROJECT_SOURCES += $(PROOF_STUB)/empty-source-file.c ########### include ../Makefile.common aws_linked_list_node_reset_harness.c000066400000000000000000000007731456575232400422010ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_node_reset/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_linked_list_node_reset_harness() { /* data structure */ struct aws_linked_list_node node; // Preconditions require node to not be NULL /* perform operation under verification */ aws_linked_list_node_reset(&node); /* assertions */ assert(AWS_IS_ZEROED(node)); } cbmc-proof.txt000066400000000000000000000000711456575232400355070ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_node_resetThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_pop_back/000077500000000000000000000000001456575232400324275ustar00rootroot00000000000000Makefile000066400000000000000000000021141456575232400340060ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_pop_back# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### # Run deep validity checks in is_valid AWS_DEEP_CHECKS = 1 include ../Makefile.aws_linked_list UNWINDSET += __CPROVER_file_local_linked_list_inl_aws_linked_list_is_valid_deep.0:$(shell echo $$((2 + $(MAX_LINKED_LIST_ITEM_ALLOCATION)))) UNWINDSET += ensure_linked_list_is_allocated.0:$(shell echo $$((1 + $(MAX_LINKED_LIST_ITEM_ALLOCATION)))) CBMCFLAGS += PROOF_UID = aws_linked_list_pop_back HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c # The actual implementation that we're proving comes from .inl files # that the stubs pull in. Link against an empty file, since we're not # using any other files from c-common. PROJECT_SOURCES += $(PROOF_STUB)/empty-source-file.c ########### include ../Makefile.common aws_linked_list_pop_back_harness.c000066400000000000000000000016431456575232400412540ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_pop_back/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_linked_list_pop_back_harness() { /* data structure */ struct aws_linked_list list; ensure_linked_list_is_allocated(&list, MAX_LINKED_LIST_ITEM_ALLOCATION); /* Assume the preconditions. The function requires that list != NULL */ __CPROVER_assume(!aws_linked_list_empty(&list)); /* Keep the old last node of the linked list */ struct aws_linked_list_node *old_prev_last = (list.tail.prev)->prev; /* perform operation under verification */ struct aws_linked_list_node *ret = aws_linked_list_pop_back(&list); /* assertions */ assert(aws_linked_list_is_valid(&list)); assert(ret->next == NULL && ret->prev == NULL); assert(list.tail.prev == old_prev_last); } cbmc-proof.txt000066400000000000000000000000711456575232400351360ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_pop_backThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_pop_front/000077500000000000000000000000001456575232400326575ustar00rootroot00000000000000Makefile000066400000000000000000000021151456575232400342370ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_pop_front# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### # Run deep validity checks in is_valid AWS_DEEP_CHECKS = 1 include ../Makefile.aws_linked_list UNWINDSET += __CPROVER_file_local_linked_list_inl_aws_linked_list_is_valid_deep.0:$(shell echo $$((2 + $(MAX_LINKED_LIST_ITEM_ALLOCATION)))) UNWINDSET += ensure_linked_list_is_allocated.0:$(shell echo $$((1 + $(MAX_LINKED_LIST_ITEM_ALLOCATION)))) CBMCFLAGS += PROOF_UID = aws_linked_list_pop_front HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c # The actual implementation that we're proving comes from .inl files # that the stubs pull in. Link against an empty file, since we're not # using any other files from c-common. PROJECT_SOURCES += $(PROOF_STUB)/empty-source-file.c ########### include ../Makefile.common aws_linked_list_pop_front_harness.c000066400000000000000000000016471456575232400417400ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_pop_front/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_linked_list_pop_front_harness() { /* data structure */ struct aws_linked_list list; ensure_linked_list_is_allocated(&list, MAX_LINKED_LIST_ITEM_ALLOCATION); /* Assume the preconditions. The function requires that list != NULL */ __CPROVER_assume(!aws_linked_list_empty(&list)); /* Keep the old last node of the linked list */ struct aws_linked_list_node *old_next_first = (list.head.next)->next; /* perform operation under verification */ struct aws_linked_list_node *ret = aws_linked_list_pop_front(&list); /* assertions */ assert(aws_linked_list_is_valid(&list)); assert(ret->next == NULL && ret->prev == NULL); assert(list.head.next == old_next_first); } cbmc-proof.txt000066400000000000000000000000711456575232400353660ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_pop_frontThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_prev/000077500000000000000000000000001456575232400316255ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_prev/Makefile000066400000000000000000000014241456575232400332660ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_linked_list CBMCFLAGS += PROOF_UID = aws_linked_list_prev HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c # The actual implementation that we're proving comes from .inl files # that the stubs pull in. Link against an empty file, since we're not # using any other files from c-common. PROJECT_SOURCES += $(PROOF_STUB)/empty-source-file.c ########### include ../Makefile.common aws_linked_list_prev_harness.c000066400000000000000000000014531456575232400376470ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_prev/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_linked_list_prev_harness() { /* data structure */ struct aws_linked_list_node node; // Preconditions require node to not be NULL struct aws_linked_list_node before; // Preconditions require before to not be NULL /* Assume the preconditions */ node.prev = &before; before.next = &node; /* perform operation under verification */ struct aws_linked_list_node *rval = aws_linked_list_prev(&node); /* assertions */ assert(aws_linked_list_node_prev_is_valid(&node)); assert(aws_linked_list_node_next_is_valid(rval)); assert(rval == &before); } cbmc-proof.txt000066400000000000000000000000711456575232400343340ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_prevThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_push_back/000077500000000000000000000000001456575232400326105ustar00rootroot00000000000000Makefile000066400000000000000000000024321456575232400341720ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_push_back# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### # Run deep validity checks in linked_list_is_valid AWS_DEEP_CHECKS = 1 include ../Makefile.aws_linked_list ## This has to take into account that a new element has been added to ## the list afterwards so we have to unwind one more time (instead of ## the standard 2 + MAX_LINKED_LIST_ITEM_ALLOCATION) UNWINDSET += __CPROVER_file_local_linked_list_inl_aws_linked_list_is_valid_deep.0:$(shell echo $$((3 + $(MAX_LINKED_LIST_ITEM_ALLOCATION)))) UNWINDSET += ensure_linked_list_is_allocated.0:$(shell echo $$((1 + $(MAX_LINKED_LIST_ITEM_ALLOCATION)))) CBMCFLAGS += PROOF_UID = aws_linked_list_push_back HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c # The actual implementation that we're proving comes from .inl files # that the stubs pull in. Link against an empty file, since we're not # using any other files from c-common. PROJECT_SOURCES += $(PROOF_STUB)/empty-source-file.c ########### include ../Makefile.common aws_linked_list_push_back_harness.c000066400000000000000000000016011456575232400416100ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_push_back/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_linked_list_push_back_harness() { /* data structure */ struct aws_linked_list list; struct aws_linked_list_node to_add; ensure_linked_list_is_allocated(&list, MAX_LINKED_LIST_ITEM_ALLOCATION); /* Keep the old last node of the linked list */ struct aws_linked_list_node *old_last = list.tail.prev; /* perform operation under verification */ aws_linked_list_push_back(&list, &to_add); /* assertions */ assert(aws_linked_list_is_valid(&list)); assert(aws_linked_list_node_prev_is_valid(&to_add)); assert(aws_linked_list_node_next_is_valid(&to_add)); assert(list.tail.prev == &to_add); assert(to_add.prev == old_last); } cbmc-proof.txt000066400000000000000000000000711456575232400353170ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_push_backThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_push_front/000077500000000000000000000000001456575232400330405ustar00rootroot00000000000000Makefile000066400000000000000000000024331456575232400344230ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_push_front# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### # Run deep validity checks in linked_list_is_valid AWS_DEEP_CHECKS = 1 include ../Makefile.aws_linked_list ## This has to take into account that a new element has been added to ## the list afterwards so we have to unwind one more time (instead of ## the standard 2 + MAX_LINKED_LIST_ITEM_ALLOCATION) UNWINDSET += __CPROVER_file_local_linked_list_inl_aws_linked_list_is_valid_deep.0:$(shell echo $$((3 + $(MAX_LINKED_LIST_ITEM_ALLOCATION)))) UNWINDSET += ensure_linked_list_is_allocated.0:$(shell echo $$((1 + $(MAX_LINKED_LIST_ITEM_ALLOCATION)))) CBMCFLAGS += PROOF_UID = aws_linked_list_push_front HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c # The actual implementation that we're proving comes from .inl files # that the stubs pull in. Link against an empty file, since we're not # using any other files from c-common. PROJECT_SOURCES += $(PROOF_STUB)/empty-source-file.c ########### include ../Makefile.common aws_linked_list_push_front_harness.c000066400000000000000000000015201456575232400422700ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_push_front/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_linked_list_push_front_harness() { /* data structure */ struct aws_linked_list list; struct aws_linked_list_node to_add; ensure_linked_list_is_allocated(&list, MAX_LINKED_LIST_ITEM_ALLOCATION); struct aws_linked_list_node *old_first = list.head.next; /* perform operation under verification */ aws_linked_list_push_front(&list, &to_add); /* assertions */ assert(aws_linked_list_is_valid(&list)); assert(aws_linked_list_node_prev_is_valid(&to_add)); assert(aws_linked_list_node_next_is_valid(&to_add)); assert(list.head.next == &to_add); assert(to_add.next == old_first); } cbmc-proof.txt000066400000000000000000000000711456575232400355470ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_push_frontThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_rbegin/000077500000000000000000000000001456575232400321175ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_rbegin/Makefile000066400000000000000000000021261456575232400335600ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### # Run deep validity checks in linked_list_is_valid AWS_DEEP_CHECKS = 1 include ../Makefile.aws_linked_list UNWINDSET += __CPROVER_file_local_linked_list_inl_aws_linked_list_is_valid_deep.0:$(shell echo $$((2 + $(MAX_LINKED_LIST_ITEM_ALLOCATION)))) UNWINDSET += ensure_linked_list_is_allocated.0:$(shell echo $$((1 + $(MAX_LINKED_LIST_ITEM_ALLOCATION)))) CBMCFLAGS += PROOF_UID = aws_linked_list_rbegin HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c # The actual implementation that we're proving comes from .inl files # that the stubs pull in. Link against an empty file, since we're not # using any other files from c-common. PROJECT_SOURCES += $(PROOF_STUB)/empty-source-file.c ########### include ../Makefile.common aws_linked_list_rbegin_harness.c000066400000000000000000000014141456575232400404300ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_rbegin/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_linked_list_rbegin_harness() { /* data structure */ struct aws_linked_list list; ensure_linked_list_is_allocated(&list, MAX_LINKED_LIST_ITEM_ALLOCATION); /* Assume the preconditions */ __CPROVER_assume(aws_linked_list_is_valid(&list)); /* Note: list can never be a NULL pointer as is_valid checks for that */ /* perform operation under verification */ struct aws_linked_list_node *rval = aws_linked_list_rbegin(&list); /* assertions */ assert(rval == list.tail.prev); assert(aws_linked_list_is_valid(&list)); } cbmc-proof.txt000066400000000000000000000000711456575232400346260ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_rbeginThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_remove/000077500000000000000000000000001456575232400321465ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_remove/Makefile000066400000000000000000000014261456575232400336110ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_linked_list CBMCFLAGS += PROOF_UID = aws_linked_list_remove HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c # The actual implementation that we're proving comes from .inl files # that the stubs pull in. Link against an empty file, since we're not # using any other files from c-common. PROJECT_SOURCES += $(PROOF_STUB)/empty-source-file.c ########### include ../Makefile.common aws_linked_list_remove_harness.c000066400000000000000000000016011456575232400405040ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_remove/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_linked_list_remove_harness() { /* data structure */ struct aws_linked_list_node prev; struct aws_linked_list_node next; struct aws_linked_list_node node; /* Assume the preconditions */ prev.next = &node; node.prev = &prev; next.prev = &node; node.next = &next; /* Note: The function has a precondition that node != NULL */ /* perform operation under verification */ aws_linked_list_remove(&node); /* assertions */ assert(aws_linked_list_node_next_is_valid(&prev)); assert(aws_linked_list_node_prev_is_valid(&next)); assert(prev.next == &next); assert(node.next == NULL); assert(node.prev == NULL); } cbmc-proof.txt000066400000000000000000000000711456575232400346550ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_removeThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_rend/000077500000000000000000000000001456575232400316015ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_rend/Makefile000066400000000000000000000021241456575232400332400ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_linked_list # Run deep validity checks in linked_list_is_valid AWS_DEEP_CHECKS = 1 UNWINDSET += __CPROVER_file_local_linked_list_inl_aws_linked_list_is_valid_deep.0:$(shell echo $$((2 + $(MAX_LINKED_LIST_ITEM_ALLOCATION)))) UNWINDSET += ensure_linked_list_is_allocated.0:$(shell echo $$((1 + $(MAX_LINKED_LIST_ITEM_ALLOCATION)))) CBMCFLAGS += PROOF_UID = aws_linked_list_rend HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c # The actual implementation that we're proving comes from .inl files # that the stubs pull in. Link against an empty file, since we're not # using any other files from c-common. PROJECT_SOURCES += $(PROOF_STUB)/empty-source-file.c ########### include ../Makefile.common aws_linked_list_rend_harness.c000066400000000000000000000014121456575232400375720ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_rend/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_linked_list_rend_harness() { /* data structure */ struct aws_linked_list list; ensure_linked_list_is_allocated(&list, MAX_LINKED_LIST_ITEM_ALLOCATION); /* Assume the preconditions */ __CPROVER_assume(aws_linked_list_is_valid(&list)); /* Note: list can never be a NULL pointer as is_valid checks for that */ /* perform operation under verification */ struct aws_linked_list_node const *rval = aws_linked_list_rend(&list); /* assertions */ assert(rval == &list.head); assert(aws_linked_list_is_valid(&list)); } cbmc-proof.txt000066400000000000000000000000711456575232400343100ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_rendThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_swap_contents/000077500000000000000000000000001456575232400335405ustar00rootroot00000000000000Makefile000066400000000000000000000021211456575232400351150ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_swap_contents# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### # Run deep validity checks in is_valid AWS_DEEP_CHECKS = 1 include ../Makefile.aws_linked_list UNWINDSET += __CPROVER_file_local_linked_list_inl_aws_linked_list_is_valid_deep.0:$(shell echo $$((2 + $(MAX_LINKED_LIST_ITEM_ALLOCATION)))) UNWINDSET += ensure_linked_list_is_allocated.0:$(shell echo $$((1 + $(MAX_LINKED_LIST_ITEM_ALLOCATION)))) CBMCFLAGS += PROOF_UID = aws_linked_list_swap_contents HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c # The actual implementation that we're proving comes from .inl files # that the stubs pull in. Link against an empty file, since we're not # using any other files from c-common. PROJECT_SOURCES += $(PROOF_STUB)/empty-source-file.c ########### include ../Makefile.common aws_linked_list_swap_contents_harness.c000066400000000000000000000027031456575232400434740ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_swap_contents/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_linked_list_swap_contents_harness() { /* data structure */ struct aws_linked_list a, b; ensure_linked_list_is_allocated(&a, MAX_LINKED_LIST_ITEM_ALLOCATION); ensure_linked_list_is_allocated(&b, MAX_LINKED_LIST_ITEM_ALLOCATION); /* Keep the old first node of the linked lists. Note that we need * to save the old head address separately from the list itself * because &old_a.head != &a.head (since they are different * variables). */ struct aws_linked_list_node *old_a_head = &a.head; struct aws_linked_list old_a = a; struct aws_linked_list_node *old_b_head = &b.head; struct aws_linked_list old_b = b; /* perform operation under verification */ aws_linked_list_swap_contents(&a, &b); /* assertions */ assert(aws_linked_list_is_valid(&a)); assert(aws_linked_list_is_valid(&b)); if (aws_linked_list_empty(&a)) { assert(old_b.tail.prev == old_b_head); } else { assert(a.head.next == old_b.head.next); assert(a.tail.prev == old_b.tail.prev); } if (aws_linked_list_empty(&b)) { assert(old_a.tail.prev == old_a_head); } else { assert(b.head.next == old_a.head.next); assert(b.tail.prev == old_a.tail.prev); } } cbmc-proof.txt000066400000000000000000000000711456575232400362470ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_linked_list_swap_contentsThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_mul_size_checked/000077500000000000000000000000001456575232400315655ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_mul_size_checked/Makefile000066400000000000000000000010421456575232400332220ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### #NOTE: If we don't use the unwindset, leave it empty #CBMC_UNWINDSET = CBMCFLAGS += PROOF_UID = aws_mul_size_checked HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_mul_size_checked_harness.c000066400000000000000000000026001456575232400375420ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_mul_size_checked#include #include /** * Coverage: 1.00 (31 lines out of 31 statically-reachable lines in 5 functions reached) * Runtime: 0m3.302s * * Assumptions: * - given 2 non-deterministics unsigned integers * * Assertions: * - r does not overflow, if aws_mul_u32_checked or * aws_mul_u64_checked functions return AWS_OP_SUCCESS */ void aws_mul_size_checked_harness() { if (nondet_bool()) { /* * In this particular case, full range of nondet inputs leads * to excessively long runtimes, so use 0 or UINT64_MAX instead. */ uint64_t a = (nondet_int()) ? 0 : UINT64_MAX; uint64_t b = nondet_uint64_t(); uint64_t r = nondet_uint64_t(); if (!aws_mul_u64_checked(a, b, &r)) { assert(r == a * b); } else { assert(__CPROVER_overflow_mult(a, b)); } } else { /* * In this particular case, full range of nondet inputs leads * to excessively long runtimes, so use 0 or UINT32_MAX instead. */ uint32_t a = (nondet_bool()) ? 0 : UINT32_MAX; uint32_t b = nondet_uint32_t(); uint32_t r = nondet_uint32_t(); if (!aws_mul_u32_checked(a, b, &r)) { assert(r == a * b); } else { assert(__CPROVER_overflow_mult(a, b)); } } } cbmc-proof.txt000066400000000000000000000000711456575232400342740ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_mul_size_checkedThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_mul_size_saturating/000077500000000000000000000000001456575232400323605ustar00rootroot00000000000000Makefile000066400000000000000000000010451456575232400337410ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_mul_size_saturating# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### #NOTE: If we don't use the unwindset, leave it empty #CBMC_UNWINDSET = CBMCFLAGS += PROOF_UID = aws_mul_size_saturating HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_mul_size_saturating_harness.c000066400000000000000000000026571456575232400411440ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_mul_size_saturating#include #include /** * Coverage: 1.00 (24 lines out of 24 statically-reachable lines in 3 functions reached) * Runtime: 0m2.962s * * Assumptions: * - given 2 non-deterministics unsigned integers * * Assertions: * - if a * b overflows, aws_mul_u32_saturating and aws_mul_u64_saturating * functions must always return the corresponding saturated value */ void aws_mul_size_saturating_harness() { if (nondet_bool()) { /* * In this particular case, full range of nondet inputs leads * to excessively long runtimes, so use 0 or UINT64_MAX instead. */ uint64_t a = (nondet_int()) ? 0 : UINT64_MAX; uint64_t b = nondet_uint64_t(); uint64_t r = aws_mul_u64_saturating(a, b); if (a > 0 && b > 0 && a > (UINT64_MAX / b)) { assert(r == UINT64_MAX); } else { assert(r == a * b); } } else { /* * In this particular case, full range of nondet inputs leads * to excessively long runtimes, so use 0 or UINT32_MAX instead. */ uint32_t a = (nondet_bool()) ? 0 : UINT32_MAX; uint32_t b = nondet_uint32_t(); uint32_t r = aws_mul_u32_saturating(a, b); if (a > 0 && b > 0 && a > (UINT32_MAX / b)) { assert(r == UINT32_MAX); } else { assert(r == a * b); } } } cbmc-proof.txt000066400000000000000000000000711456575232400350670ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_mul_size_saturatingThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_nospec_mask/000077500000000000000000000000001456575232400305725ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_nospec_mask/Makefile000066400000000000000000000011441456575232400322320ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_byte_buf UNWINDSET += CBMCFLAGS += PROOF_UID = aws_nospec_mask HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_nospec_mask_harness.c000066400000000000000000000013101456575232400355510ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_nospec_mask/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include void aws_nospec_mask_harness() { /* parameters */ size_t index; size_t bound; /* operation under verification */ size_t rval = aws_nospec_mask(index, bound); /* assertions */ if (rval == 0) { assert((index >= bound) || (bound > (SIZE_MAX / 2)) || (index > (SIZE_MAX / 2))); } else { assert(rval == UINTPTR_MAX); assert(!((index >= bound) || (bound > (SIZE_MAX / 2)) || (index > (SIZE_MAX / 2)))); } } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_nospec_mask/cbmc-proof.txt000066400000000000000000000000711456575232400333600ustar00rootroot00000000000000This file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_priority_queue_capacity/000077500000000000000000000000001456575232400332325ustar00rootroot00000000000000Makefile000066400000000000000000000014341456575232400346150ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_priority_queue_capacity# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_array_list # This is the minimum bound to reach full coverage rate UNWINDSET += memset_impl.0:41 CBMCFLAGS += PROOF_UID = aws_priority_queue_capacity HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROOF_SOURCES += $(PROOF_STUB)/memset_override.c PROJECT_SOURCES += $(SRCDIR)/source/priority_queue.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_priority_queue_capacity_harness.c000066400000000000000000000032021456575232400426530ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_priority_queue_capacity/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include /** * Runtime: 12s */ void aws_priority_queue_capacity_harness() { /* data structure */ struct aws_priority_queue queue; /* assumptions */ __CPROVER_assume(aws_priority_queue_is_bounded(&queue, MAX_INITIAL_ITEM_ALLOCATION, MAX_ITEM_SIZE)); ensure_priority_queue_has_allocated_members(&queue); __CPROVER_assume(aws_priority_queue_is_valid(&queue)); __CPROVER_assume(queue.container.item_size > 0); /* save current state of the container structure */ struct aws_array_list old_container = queue.container; struct store_byte_from_buffer old_byte_container; save_byte_from_array((uint8_t *)old_container.data, old_container.current_size, &old_byte_container); /* save current state of the backpointers structure */ struct aws_array_list old_backpointers = queue.backpointers; struct store_byte_from_buffer old_byte_backpointers; save_byte_from_array((uint8_t *)old_backpointers.data, old_backpointers.current_size, &old_byte_backpointers); /* perform operation under verification */ size_t capacity = aws_priority_queue_capacity(&queue); /* assertions */ assert(aws_priority_queue_is_valid(&queue)); assert(capacity == queue.container.current_size / queue.container.item_size); assert_array_list_equivalence(&queue.container, &old_container, &old_byte_container); assert_array_list_equivalence(&queue.backpointers, &old_backpointers, &old_byte_backpointers); } cbmc-proof.txt000066400000000000000000000000711456575232400357410ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_priority_queue_capacityThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_priority_queue_clean_up/000077500000000000000000000000001456575232400332235ustar00rootroot00000000000000Makefile000066400000000000000000000014341456575232400346060ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_priority_queue_clean_up# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_array_list # This is the minimum bound to reach full coverage rate UNWINDSET += memset_impl.0:41 CBMCFLAGS += PROOF_UID = aws_priority_queue_clean_up HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROOF_SOURCES += $(PROOF_STUB)/memset_override.c PROJECT_SOURCES += $(SRCDIR)/source/priority_queue.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_priority_queue_clean_up_harness.c000066400000000000000000000014301456575232400426360ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_priority_queue_clean_up/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include /** * Runtime: 8s */ void aws_priority_queue_clean_up_harness() { /* data structure */ struct aws_priority_queue queue; /* assumptions */ __CPROVER_assume(aws_priority_queue_is_bounded(&queue, MAX_INITIAL_ITEM_ALLOCATION, MAX_ITEM_SIZE)); ensure_priority_queue_has_allocated_members(&queue); __CPROVER_assume(aws_priority_queue_is_valid(&queue)); /* perform operation under verification */ aws_priority_queue_clean_up(&queue); /* assertions */ assert(AWS_IS_ZEROED(queue.container)); assert(AWS_IS_ZEROED(queue.backpointers)); } cbmc-proof.txt000066400000000000000000000000711456575232400357320ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_priority_queue_clean_upThis file marks the directory as containing a CBMC proof aws_priority_queue_init_dynamic/000077500000000000000000000000001456575232400340255ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofsMakefile000066400000000000000000000014321456575232400354650ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_priority_queue_init_dynamic# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_array_list # This bound allow us to reach full coverage rate UNWINDSET += memset_impl.0:41 CBMCFLAGS += PROOF_UID = aws_priority_queue_init_dynamic HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROOF_SOURCES += $(PROOF_STUB)/memset_override.c PROJECT_SOURCES += $(SRCDIR)/source/priority_queue.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_priority_queue_init_dynamic_harness.c000066400000000000000000000031671456575232400444110ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_priority_queue_init_dynamic/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include /** * Runtime: 9s */ void aws_priority_queue_init_dynamic_harness() { /* data structure */ struct aws_priority_queue queue; /* Precondition: queue is non-null */ /* parameters */ struct aws_allocator *allocator = aws_default_allocator(); /* Precondition: allocator is non-null */ size_t item_size; size_t initial_item_allocation; size_t len; /* assumptions */ __CPROVER_assume(initial_item_allocation <= MAX_INITIAL_ITEM_ALLOCATION); __CPROVER_assume(item_size > 0 && item_size <= MAX_ITEM_SIZE); __CPROVER_assume(!aws_mul_size_checked(initial_item_allocation, item_size, &len)); /* perform operation under verification */ uint8_t *raw_array = malloc(len); if (aws_priority_queue_init_dynamic(&queue, allocator, initial_item_allocation, item_size, nondet_compare) == AWS_OP_SUCCESS) { /* assertions */ assert(aws_priority_queue_is_valid(&queue)); assert(queue.container.alloc == allocator); assert(queue.container.item_size == item_size); assert(queue.container.length == 0); assert( (queue.container.data == NULL && queue.container.current_size == 0) || (queue.container.data && queue.container.current_size == (initial_item_allocation * item_size))); } else { /* assertions */ assert(AWS_IS_ZEROED(queue.container)); assert(AWS_IS_ZEROED(queue.backpointers)); } } cbmc-proof.txt000066400000000000000000000000711456575232400366130ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_priority_queue_init_dynamicThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_priority_queue_init_static/000077500000000000000000000000001456575232400337475ustar00rootroot00000000000000Makefile000066400000000000000000000014371456575232400353350ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_priority_queue_init_static# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_array_list # This is the minimum bound to reach full coverage rate UNWINDSET += memset_impl.0:41 CBMCFLAGS += PROOF_UID = aws_priority_queue_init_static HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROOF_SOURCES += $(PROOF_STUB)/memset_override.c PROJECT_SOURCES += $(SRCDIR)/source/priority_queue.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_priority_queue_init_static_harness.c000066400000000000000000000025151456575232400441130ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_priority_queue_init_static/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include /** * Runtime: 8s */ void aws_priority_queue_init_static_harness() { /* data structure */ struct aws_priority_queue queue; /* Precondition: queue is non-null */ /* parameters */ size_t item_size; size_t initial_item_allocation; size_t len; uint8_t *raw_array; /* assumptions */ __CPROVER_assume(initial_item_allocation > 0 && initial_item_allocation <= MAX_INITIAL_ITEM_ALLOCATION); __CPROVER_assume(item_size > 0 && item_size <= MAX_ITEM_SIZE); __CPROVER_assume(!aws_mul_size_checked(initial_item_allocation, item_size, &len)); /* perform operation under verification */ ASSUME_VALID_MEMORY_COUNT(raw_array, len); aws_priority_queue_init_static(&queue, raw_array, initial_item_allocation, item_size, nondet_compare); /* assertions */ assert(aws_priority_queue_is_valid(&queue)); assert(queue.container.alloc == NULL); assert(queue.container.item_size == item_size); assert(queue.container.length == 0); assert(queue.container.current_size == initial_item_allocation * item_size); assert_bytes_match((uint8_t *)queue.container.data, raw_array, len); } cbmc-proof.txt000066400000000000000000000000711456575232400364560ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_priority_queue_init_staticThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_priority_queue_pop/000077500000000000000000000000001456575232400322335ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_priority_queue_pop/Makefile000066400000000000000000000016531456575232400337000ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_array_list ########### # # Runtime: 500s UNWINDSET += CBMCFLAGS += PROOF_UID = aws_priority_queue_pop HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROOF_SOURCES += $(PROOF_STUB)/memcpy_override_no_op.c PROOF_SOURCES += $(PROOF_STUB)/s_remove_node_override.c PROJECT_SOURCES += $(SRCDIR)/source/array_list.c PROJECT_SOURCES += $(SRCDIR)/source/common.c PROJECT_SOURCES += $(SRCDIR)/source/priority_queue.c REMOVE_FUNCTION_BODY += __CPROVER_file_local_priority_queue_c_s_remove_node ########### include ../Makefile.common aws_priority_queue_pop_harness.c000066400000000000000000000036421456575232400406650ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_priority_queue_pop/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_priority_queue_pop_harness() { /* Data structure */ struct aws_priority_queue queue; /* Assumptions */ __CPROVER_assume(aws_priority_queue_is_bounded(&queue, MAX_INITIAL_ITEM_ALLOCATION, MAX_ITEM_SIZE)); ensure_priority_queue_has_allocated_members(&queue); /* Assume the function preconditions */ __CPROVER_assume(aws_priority_queue_is_valid(&queue)); void *item = malloc(queue.container.item_size); if (queue.backpointers.data) { /* Assume that the two backpointers 0, len-1 are valid, * either by being NULL or by allocating their objects. This * is important for the s_swap that happens in s_remove. */ size_t len = queue.backpointers.length; if (0 < len) { ((struct aws_priority_queue_node **)queue.backpointers.data)[0] = malloc(sizeof(struct aws_priority_queue_node)); if (0 != len - 1) { ((struct aws_priority_queue_node **)queue.backpointers.data)[len - 1] = malloc(sizeof(struct aws_priority_queue_node)); } } } /* Save the old priority queue state */ struct aws_priority_queue old_queue = queue; /* Assume the preconditions */ __CPROVER_assume(item && AWS_MEM_IS_WRITABLE(item, queue.container.item_size)); /* Perform operation under verification */ if (aws_priority_queue_pop(&queue, item) == AWS_OP_SUCCESS) { assert(old_queue.container.length == 1 + queue.container.length); if (queue.backpointers.data) { assert(old_queue.backpointers.length == 1 + queue.backpointers.length); } } /* Assert the postconditions */ assert(aws_priority_queue_is_valid(&queue)); } cbmc-proof.txt000066400000000000000000000000711456575232400347420ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_priority_queue_popThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_priority_queue_push/000077500000000000000000000000001456575232400324145ustar00rootroot00000000000000Makefile000066400000000000000000000017341456575232400340020ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_priority_queue_push# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_array_list ########### # # Runtime: 1200s # This is the minimum bound to reach full coverage rate UNWINDSET += CBMCFLAGS += PROOF_UID = aws_priority_queue_push HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROOF_SOURCES += $(PROOF_STUB)/memcpy_override_no_op.c PROOF_SOURCES += $(PROOF_STUB)/s_sift_up_override.c PROJECT_SOURCES += $(SRCDIR)/source/array_list.c PROJECT_SOURCES += $(SRCDIR)/source/common.c PROJECT_SOURCES += $(SRCDIR)/source/priority_queue.c REMOVE_FUNCTION_BODY += __CPROVER_file_local_priority_queue_c_s_sift_up ########### include ../Makefile.common aws_priority_queue_push_harness.c000066400000000000000000000016311456575232400412230ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_priority_queue_push/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_priority_queue_push_harness() { /* Data structure */ struct aws_priority_queue queue; /* Assumptions */ __CPROVER_assume(aws_priority_queue_is_bounded(&queue, MAX_INITIAL_ITEM_ALLOCATION, MAX_ITEM_SIZE)); ensure_priority_queue_has_allocated_members(&queue); __CPROVER_assume(aws_priority_queue_is_valid(&queue)); void *item = malloc(queue.container.item_size); /* Assume the function preconditions */ __CPROVER_assume(item && AWS_MEM_IS_READABLE(item, queue.container.item_size)); /* Perform operation under verification */ aws_priority_queue_push(&queue, item); /* Assert the postconditions */ assert(aws_priority_queue_is_valid(&queue)); } cbmc-proof.txt000066400000000000000000000000711456575232400351230ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_priority_queue_pushThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_priority_queue_push_ref/000077500000000000000000000000001456575232400332505ustar00rootroot00000000000000Makefile000066400000000000000000000017401456575232400346330ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_priority_queue_push_ref# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_array_list ########### # # Runtime: 1200s # This is the minimum bound to reach full coverage rate UNWINDSET += CBMCFLAGS += PROOF_UID = aws_priority_queue_push_ref HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROOF_SOURCES += $(PROOF_STUB)/memcpy_override_no_op.c PROOF_SOURCES += $(PROOF_STUB)/s_sift_up_override.c PROJECT_SOURCES += $(SRCDIR)/source/array_list.c PROJECT_SOURCES += $(SRCDIR)/source/common.c PROJECT_SOURCES += $(SRCDIR)/source/priority_queue.c REMOVE_FUNCTION_BODY += __CPROVER_file_local_priority_queue_c_s_sift_up ########### include ../Makefile.common aws_priority_queue_push_ref_harness.c000066400000000000000000000020201456575232400427040ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_priority_queue_push_ref/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_priority_queue_push_ref_harness() { /* Data structure */ struct aws_priority_queue queue; /* Assumptions */ __CPROVER_assume(aws_priority_queue_is_bounded(&queue, MAX_INITIAL_ITEM_ALLOCATION, MAX_ITEM_SIZE)); ensure_priority_queue_has_allocated_members(&queue); __CPROVER_assume(aws_priority_queue_is_valid(&queue)); void *item = malloc(queue.container.item_size); struct aws_priority_queue_node *backpointer = malloc(sizeof(struct aws_priority_queue_node)); /* Assume the function preconditions */ __CPROVER_assume(item && AWS_MEM_IS_READABLE(item, queue.container.item_size)); /* Perform operation under verification */ aws_priority_queue_push_ref(&queue, item, backpointer); /* Assert the postconditions */ assert(aws_priority_queue_is_valid(&queue)); } cbmc-proof.txt000066400000000000000000000000711456575232400357570ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_priority_queue_push_refThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_priority_queue_remove/000077500000000000000000000000001456575232400327325ustar00rootroot00000000000000Makefile000066400000000000000000000016531456575232400343200ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_priority_queue_remove# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_array_list ########### # Runtime: 500s UNWINDSET += CBMCFLAGS += PROOF_UID = aws_priority_queue_remove HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROOF_SOURCES += $(PROOF_STUB)/memcpy_override_no_op.c PROOF_SOURCES += $(PROOF_STUB)/s_remove_node_override.c PROJECT_SOURCES += $(SRCDIR)/source/array_list.c PROJECT_SOURCES += $(SRCDIR)/source/common.c PROJECT_SOURCES += $(SRCDIR)/source/priority_queue.c REMOVE_FUNCTION_BODY += __CPROVER_file_local_priority_queue_c_s_remove_node ########### include ../Makefile.common aws_priority_queue_remove_harness.c000066400000000000000000000043301456575232400420560ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_priority_queue_remove/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_priority_queue_remove_harness() { /* Data structure */ struct aws_priority_queue queue; /* Assumptions */ __CPROVER_assume(aws_priority_queue_is_bounded(&queue, MAX_INITIAL_ITEM_ALLOCATION, MAX_ITEM_SIZE)); ensure_priority_queue_has_allocated_members(&queue); /* Assume the function preconditions */ __CPROVER_assume(aws_priority_queue_is_valid(&queue)); void *item = malloc(queue.container.item_size); struct aws_priority_queue_node *backpointer = malloc(sizeof(struct aws_priority_queue_node)); if (queue.backpointers.data && backpointer) { /* Assume that the two backpointers index, len-1 are valid, * either by being NULL or by allocating their objects. This * is important for the s_swap that happens in s_remove. */ size_t index = backpointer->current_index; size_t len = queue.backpointers.length; if (index < len) { ((struct aws_priority_queue_node **)queue.backpointers.data)[index] = malloc(sizeof(struct aws_priority_queue_node)); if (index != len - 1) { ((struct aws_priority_queue_node **)queue.backpointers.data)[len - 1] = malloc(sizeof(struct aws_priority_queue_node)); } } } /* Save the old priority queue state */ struct aws_priority_queue old_queue = queue; /* Assume the preconditions */ __CPROVER_assume(item && AWS_MEM_IS_WRITABLE(item, queue.container.item_size)); __CPROVER_assume(backpointer && AWS_MEM_IS_READABLE(backpointer, sizeof(struct aws_priority_queue_node))); /* Perform operation under verification */ if (aws_priority_queue_remove(&queue, item, backpointer) == AWS_OP_SUCCESS) { assert(old_queue.container.length == 1 + queue.container.length); if (queue.backpointers.data) { assert(old_queue.backpointers.length == 1 + queue.backpointers.length); } } /* Assert the postconditions */ assert(aws_priority_queue_is_valid(&queue)); } cbmc-proof.txt000066400000000000000000000000711456575232400354410ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_priority_queue_removeThis file marks the directory as containing a CBMC proof aws_priority_queue_s_remove_node/000077500000000000000000000000001456575232400342025ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofsMakefile000066400000000000000000000024641456575232400356500ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_priority_queue_s_remove_node# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_array_list ########### # # Runtime: 500s ## This is here for the backpointer array list which contains pointers ## to aws_priority_queue_node elements. As pointers are 64bits, it is ## adequate to unroll the memcpy loop twice. UNWINDSET += memcpy_using_uint64_impl.0:2 CBMCFLAGS += PROOF_UID = aws_priority_queue_s_remove_node HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROOF_SOURCES += $(PROOF_STUB)/memcpy_using_uint64.c PROOF_SOURCES += $(PROOF_STUB)/memset_override_no_op.c PROOF_SOURCES += $(PROOF_STUB)/s_sift_either_override.c PROOF_SOURCES += $(PROOF_STUB)/aws_array_list_swap_override.c PROJECT_SOURCES += $(SRCDIR)/source/array_list.c PROJECT_SOURCES += $(SRCDIR)/source/common.c PROJECT_SOURCES += $(SRCDIR)/source/priority_queue.c REMOVE_FUNCTION_BODY += __CPROVER_file_local_priority_queue_c_s_sift_either REMOVE_FUNCTION_BODY += aws_array_list_swap_override ########### include ../Makefile.common aws_priority_queue_s_remove_node_harness.c000066400000000000000000000047171456575232400447450ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_priority_queue_s_remove_node/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include int __CPROVER_file_local_priority_queue_c_s_remove_node(struct aws_priority_queue *queue, void *item, size_t index); void aws_priority_queue_s_remove_node_harness() { /* Data structure */ struct aws_priority_queue queue; /* Assumptions */ __CPROVER_assume(aws_priority_queue_is_bounded(&queue, MAX_INITIAL_ITEM_ALLOCATION, MAX_ITEM_SIZE)); ensure_priority_queue_has_allocated_members(&queue); /* Assume the function preconditions */ __CPROVER_assume(aws_priority_queue_is_valid(&queue)); void *item = malloc(queue.container.item_size); size_t index; __CPROVER_assume(index < queue.container.length); struct aws_priority_queue_node *node = malloc(sizeof(struct aws_priority_queue_node)); if (queue.backpointers.data) { /* Assume that the two backpointers index, len-1 are valid, * either by being NULL or by allocating their objects. This * is important for the s_swap that happens in s_remove. */ size_t len = queue.backpointers.length; if (index < len) { ((struct aws_priority_queue_node **)queue.backpointers.data)[index] = node; if (index != len - 1) { ((struct aws_priority_queue_node **)queue.backpointers.data)[len - 1] = malloc(sizeof(struct aws_priority_queue_node)); } } } /* Save the old priority queue state */ struct aws_priority_queue old_queue = queue; /* Assume the preconditions */ __CPROVER_assume(item && AWS_MEM_IS_WRITABLE(item, queue.container.item_size)); /* Perform operation under verification */ if (__CPROVER_file_local_priority_queue_c_s_remove_node(&queue, item, index) == AWS_OP_SUCCESS) { assert(old_queue.container.length == 1 + queue.container.length); if (queue.backpointers.data) { assert(old_queue.backpointers.length == 1 + queue.backpointers.length); if (node) { /* The node pointing in the element of the priority queue * now points to SIZE_MAX to indicate that the item was * removed */ assert(node->current_index == SIZE_MAX); } } } /* Assert the postconditions */ assert(aws_priority_queue_is_valid(&queue)); } cbmc-proof.txt000066400000000000000000000000711456575232400367700ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_priority_queue_s_remove_nodeThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_priority_queue_s_sift_down/000077500000000000000000000000001456575232400337535ustar00rootroot00000000000000Makefile000066400000000000000000000030561456575232400353400ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_priority_queue_s_sift_down# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. include ../Makefile.aws_array_list # Runtime: # - 300s for MAX_PRIORITY_QUEUE_ITEMS=3 items # - 300s for MAX_PRIORITY_QUEUE_ITEMS=4 items MAX_PRIORITY_QUEUE_ITEMS ?= 3 # This should be the ceil(1 + log2(MAX_PRIORITY_QUEUE_ITEMS)) MAX_HEAP_HEIGHT ?= 3 DEFINES += -DMAX_PRIORITY_QUEUE_ITEMS=$(MAX_PRIORITY_QUEUE_ITEMS) # Note: # In order to reach full coverage we need to unwind the harness loop # as many times as the number of queue items, and the sift down loop # log(NUMBER_PRIO_QUEUE_ITEMS) times. UNWINDSET += aws_priority_queue_s_sift_down_harness.0:$(shell echo $$((1 + $(MAX_PRIORITY_QUEUE_ITEMS)))) UNWINDSET += __CPROVER_file_local_priority_queue_c_s_sift_down.0:$(MAX_HEAP_HEIGHT) UNWINDSET += aws_priority_queue_backpointers_valid_deep.0:$(shell echo $$((1 + $(MAX_PRIORITY_QUEUE_ITEMS)))) CBMCFLAGS += PROOF_UID = aws_priority_queue_s_sift_down HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/memcpy_override_havoc.c PROOF_SOURCES += $(PROOF_STUB)/memcpy_override_havoc.c PROOF_SOURCES += $(PROOF_STUB)/s_swap_override_no_op.c PROJECT_SOURCES += $(SRCDIR)/source/array_list.c PROJECT_SOURCES += $(SRCDIR)/source/common.c PROJECT_SOURCES += $(SRCDIR)/source/priority_queue.c include ../Makefile.common aws_priority_queue_s_sift_down_harness.c000066400000000000000000000026461456575232400441300ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_priority_queue_s_sift_down/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include _Bool __CPROVER_file_local_priority_queue_c_s_sift_down(struct aws_priority_queue *queue, size_t root); void aws_priority_queue_s_sift_down_harness() { /* Data structure */ struct aws_priority_queue queue; /* Assumptions */ __CPROVER_assume(aws_priority_queue_is_bounded(&queue, MAX_PRIORITY_QUEUE_ITEMS, MAX_ITEM_SIZE)); ensure_priority_queue_has_allocated_members(&queue); /* Assume the function preconditions */ __CPROVER_assume(aws_priority_queue_is_valid(&queue)); size_t root; __CPROVER_assume(root < queue.container.length); if (queue.backpointers.data) { /* Assume that all backpointers are valid valid, either by * being NULL or by allocating their objects. */ size_t i; for (i = 0; i < queue.container.length; i++) { ((struct aws_priority_queue_node **)queue.backpointers.data)[i] = malloc(sizeof(struct aws_priority_queue_node)); } } /* Perform operation under verification */ __CPROVER_file_local_priority_queue_c_s_sift_down(&queue, root); /* Assert the postconditions */ assert(aws_priority_queue_is_valid(&queue)); assert(aws_priority_queue_backpointers_valid_deep(&queue)); } cbmc-proof.txt000066400000000000000000000000711456575232400364620ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_priority_queue_s_sift_downThis file marks the directory as containing a CBMC proof aws_priority_queue_s_sift_either/000077500000000000000000000000001456575232400342055ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofsMakefile000066400000000000000000000030551456575232400356500ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_priority_queue_s_sift_either# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_array_list include ../Makefile.aws_priority_queue_sift ########### # # Runtime: # - 300s for MAX_PRIORITY_QUEUE_ITEMS=3 items # Note: # In order to reach full coverage we need to unwind the harness loop # as many times as the number of queue items, and the sift down loop # log(NUMBER_PRIO_QUEUE_ITEMS) times. UNWINDSET += __CPROVER_file_local_priority_queue_c_s_sift_down.0:$(MAX_HEAP_HEIGHT) UNWINDSET += __CPROVER_file_local_priority_queue_c_s_sift_up.0:$(MAX_HEAP_HEIGHT) UNWINDSET += aws_priority_queue_s_sift_either_harness.0:$(shell echo $$((1 + $(MAX_PRIORITY_QUEUE_ITEMS)))) UNWINDSET += aws_priority_queue_backpointers_valid_deep.0:$(shell echo $$((1 + $(MAX_PRIORITY_QUEUE_ITEMS)))) CBMCFLAGS += PROOF_UID = aws_priority_queue_s_sift_either HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROOF_SOURCES += $(PROOF_STUB)/memcpy_override_havoc.c PROOF_SOURCES += $(PROOF_STUB)/s_swap_override_no_op.c PROJECT_SOURCES += $(SRCDIR)/source/array_list.c PROJECT_SOURCES += $(SRCDIR)/source/common.c PROJECT_SOURCES += $(SRCDIR)/source/priority_queue.c REMOVE_FUNCTION_BODY += __CPROVER_file_local_priority_queue_c_s_swap ########### include ../Makefile.common aws_priority_queue_s_sift_either_harness.c000066400000000000000000000032601456575232400447430ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_priority_queue_s_sift_either/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void __CPROVER_file_local_priority_queue_c_s_sift_either(struct aws_priority_queue *queue, size_t root); void aws_priority_queue_s_sift_either_harness() { /* Data structure */ struct aws_priority_queue queue; /* Assumptions */ __CPROVER_assume(aws_priority_queue_is_bounded(&queue, MAX_PRIORITY_QUEUE_ITEMS, MAX_ITEM_SIZE)); ensure_priority_queue_has_allocated_members(&queue); /* Assume the function preconditions */ __CPROVER_assume(aws_priority_queue_is_valid(&queue)); size_t root; __CPROVER_assume(root < queue.container.length); if (queue.backpointers.data) { /* Ensuring that just the root cell is correctly allocated is * not enough, as the swap requires that both the swapped * cells are correctly allocated. Therefore, if swap is to * not be overriden, I have to ensure that all of the root * descendants at least are correctly allocated. For now it is * ensured that all of them are. */ size_t i; for (i = 0; i < queue.container.length; i++) { ((struct aws_priority_queue_node **)queue.backpointers.data)[i] = malloc(sizeof(struct aws_priority_queue_node)); } } /* Perform operation under verification */ __CPROVER_file_local_priority_queue_c_s_sift_either(&queue, root); /* Assert the postconditions */ assert(aws_priority_queue_is_valid(&queue)); assert(aws_priority_queue_backpointers_valid_deep(&queue)); } cbmc-proof.txt000066400000000000000000000000711456575232400367730ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_priority_queue_s_sift_eitherThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_priority_queue_s_sift_up/000077500000000000000000000000001456575232400334305ustar00rootroot00000000000000Makefile000066400000000000000000000027771456575232400350260ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_priority_queue_s_sift_up# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_array_list include ../Makefile.aws_priority_queue_sift ########### # # Runtime: # - 300s for MAX_PRIORITY_QUEUE_ITEMS=3 items # - 450s for MAX_PRIORITY_QUEUE_ITEMS=5 items # Note: # In order to reach full coverage we need to unwind the harness loop # as many times as the number of queue items, and the sift down loop # log(NUMBER_PRIO_QUEUE_ITEMS) times. UNWINDSET += aws_priority_queue_s_sift_up_harness.0:$(shell echo $$((1 + $(MAX_PRIORITY_QUEUE_ITEMS)))) UNWINDSET += __CPROVER_file_local_priority_queue_c_s_sift_up.0:$(MAX_HEAP_HEIGHT) UNWINDSET += aws_priority_queue_backpointers_valid_deep.0:$(shell echo $$((1 + $(MAX_PRIORITY_QUEUE_ITEMS)))) CBMCFLAGS += PROOF_UID = aws_priority_queue_s_sift_up HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROOF_SOURCES += $(PROOF_STUB)/memcpy_override_havoc.c PROOF_SOURCES += $(PROOF_STUB)/s_swap_override_no_op.c PROJECT_SOURCES += $(SRCDIR)/source/array_list.c PROJECT_SOURCES += $(SRCDIR)/source/common.c PROJECT_SOURCES += $(SRCDIR)/source/priority_queue.c REMOVE_FUNCTION_BODY += __CPROVER_file_local_priority_queue_c_s_swap ########### include ../Makefile.common aws_priority_queue_s_sift_up_harness.c000066400000000000000000000032451456575232400432560ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_priority_queue_s_sift_up/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include _Bool __CPROVER_file_local_priority_queue_c_s_sift_up(struct aws_priority_queue *queue, size_t root); void aws_priority_queue_s_sift_up_harness() { /* Data structure */ struct aws_priority_queue queue; /* Assumptions */ __CPROVER_assume(aws_priority_queue_is_bounded(&queue, MAX_PRIORITY_QUEUE_ITEMS, MAX_ITEM_SIZE)); ensure_priority_queue_has_allocated_members(&queue); /* Assume the function preconditions */ __CPROVER_assume(aws_priority_queue_is_valid(&queue)); size_t root; __CPROVER_assume(root < queue.container.length); if (queue.backpointers.data) { /* Ensuring that just the root cell is correctly allocated is * not enough, as the swap requires that both the swapped * cells are correctly allocated. Therefore, if swap is to * not be overriden, I have to ensure that all of the root * descendants at least are correctly allocated. For now it is * ensured that all of them are. */ size_t i; for (i = 0; i < queue.container.length; i++) { ((struct aws_priority_queue_node **)queue.backpointers.data)[i] = malloc(sizeof(struct aws_priority_queue_node)); } } /* Perform operation under verification */ __CPROVER_file_local_priority_queue_c_s_sift_up(&queue, root); /* Assert the postconditions */ assert(aws_priority_queue_is_valid(&queue)); assert(aws_priority_queue_backpointers_valid_deep(&queue)); } cbmc-proof.txt000066400000000000000000000000711456575232400361370ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_priority_queue_s_sift_upThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_priority_queue_s_swap/000077500000000000000000000000001456575232400327315ustar00rootroot00000000000000Makefile000066400000000000000000000020021456575232400343040ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_priority_queue_s_swap# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_array_list ########### # # Runtime: 180s # This is the minimum bound to reach full coverage rate UNWINDSET += __CPROVER_file_local_array_list_c_aws_array_list_mem_swap.0:$(shell echo $$(($(MAX_ITEM_SIZE) + 1))) UNWINDSET += memcpy_impl.0:$(shell echo $$(($(MAX_ITEM_SIZE) + 1))) CBMCFLAGS += PROOF_UID = aws_priority_queue_s_swap HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROOF_SOURCES += $(PROOF_STUB)/memcpy_override.c PROJECT_SOURCES += $(SRCDIR)/source/array_list.c PROJECT_SOURCES += $(SRCDIR)/source/common.c PROJECT_SOURCES += $(SRCDIR)/source/priority_queue.c ########### include ../Makefile.common aws_priority_queue_s_swap_harness.c000066400000000000000000000054441456575232400420630ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_priority_queue_s_swap/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void __CPROVER_file_local_priority_queue_c_s_swap(struct aws_priority_queue *queue, size_t a, size_t b); void aws_priority_queue_s_swap_harness() { /* Data structure */ struct aws_priority_queue queue; /* Assumptions */ __CPROVER_assume(aws_priority_queue_is_bounded(&queue, MAX_INITIAL_ITEM_ALLOCATION, MAX_ITEM_SIZE)); ensure_priority_queue_has_allocated_members(&queue); /* Assume the function preconditions */ __CPROVER_assume(aws_priority_queue_is_valid(&queue)); size_t a; size_t b; __CPROVER_assume(a < queue.container.length); __CPROVER_assume(b < queue.container.length); if (queue.backpointers.data) { /* Assume that the two backpointers a, b are valid, either by * being NULL or by allocating their objects with their correct * values. */ ((struct aws_priority_queue_node **)queue.backpointers.data)[a] = malloc(sizeof(struct aws_priority_queue_node)); ((struct aws_priority_queue_node **)queue.backpointers.data)[b] = malloc(sizeof(struct aws_priority_queue_node)); } /* save current state of the data structure */ struct aws_array_list old = queue.container; struct store_byte_from_buffer old_byte; save_byte_from_array((uint8_t *)old.data, old.current_size, &old_byte); size_t item_sz = queue.container.item_size; size_t offset; __CPROVER_assume(offset < item_sz); /* save a byte of the element at index a */ struct store_byte_from_buffer old_a_byte; old_a_byte.index = a * item_sz + offset; old_a_byte.byte = ((uint8_t *)queue.container.data)[old_a_byte.index]; /* save a byte of the element at index b */ struct store_byte_from_buffer old_b_byte; old_b_byte.index = b * item_sz + offset; old_b_byte.byte = ((uint8_t *)queue.container.data)[old_b_byte.index]; /* Perform operation under verification */ __CPROVER_file_local_priority_queue_c_s_swap(&queue, a, b); /* Assert the postconditions */ assert(aws_priority_queue_is_valid(&queue)); /* All the elements in the container except for a and b should stay unchanged */ size_t ob_i = old_byte.index; if ((ob_i < a * item_sz || ob_i >= (a + 1) * item_sz) && (ob_i < b * item_sz || ob_i >= (b + 1) * item_sz)) { assert_array_list_equivalence(&queue.container, &old, &old_byte); } /* The new element at index a must be equal to the old element in index b and vice versa */ assert(old_a_byte.byte == ((uint8_t *)queue.container.data)[old_b_byte.index]); assert(old_b_byte.byte == ((uint8_t *)queue.container.data)[old_a_byte.index]); } cbmc-proof.txt000066400000000000000000000000711456575232400354400ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_priority_queue_s_swapThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_priority_queue_size/000077500000000000000000000000001456575232400324075ustar00rootroot00000000000000Makefile000066400000000000000000000014301456575232400337660ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_priority_queue_size# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_array_list # This is the minimum bound to reach full coverage rate UNWINDSET += memset_impl.0:41 CBMCFLAGS += PROOF_UID = aws_priority_queue_size HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROOF_SOURCES += $(PROOF_STUB)/memset_override.c PROJECT_SOURCES += $(SRCDIR)/source/priority_queue.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_priority_queue_size_harness.c000066400000000000000000000027571456575232400412230ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_priority_queue_size/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include /** * Runtime: 13s */ void aws_priority_queue_size_harness() { /* data structure */ struct aws_priority_queue queue; /* assumptions */ __CPROVER_assume(aws_priority_queue_is_bounded(&queue, MAX_INITIAL_ITEM_ALLOCATION, MAX_ITEM_SIZE)); ensure_priority_queue_has_allocated_members(&queue); __CPROVER_assume(aws_priority_queue_is_valid(&queue)); /* save current state of the container structure */ struct aws_array_list old_container = queue.container; struct store_byte_from_buffer old_byte_container; save_byte_from_array((uint8_t *)old_container.data, old_container.current_size, &old_byte_container); /* save current state of the backpointers structure */ struct aws_array_list old_backpointers = queue.backpointers; struct store_byte_from_buffer old_byte_backpointers; save_byte_from_array((uint8_t *)old_backpointers.data, old_backpointers.current_size, &old_byte_backpointers); /* perform operation under verification */ size_t size = aws_priority_queue_size(&queue); /* assertions */ assert(aws_priority_queue_is_valid(&queue)); assert_array_list_equivalence(&queue.container, &old_container, &old_byte_container); assert_array_list_equivalence(&queue.backpointers, &old_backpointers, &old_byte_backpointers); } cbmc-proof.txt000066400000000000000000000000711456575232400351160ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_priority_queue_sizeThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_priority_queue_top/000077500000000000000000000000001456575232400322375ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_priority_queue_top/Makefile000066400000000000000000000014271456575232400337030ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### include ../Makefile.aws_array_list # This is the minimum bound to reach full coverage rate UNWINDSET += memset_impl.0:41 CBMCFLAGS += PROOF_UID = aws_priority_queue_top HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROOF_SOURCES += $(PROOF_STUB)/memset_override.c PROJECT_SOURCES += $(SRCDIR)/source/priority_queue.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_priority_queue_top_harness.c000066400000000000000000000030501456575232400406660ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_priority_queue_top/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include /** * Runtime: 21s */ void aws_priority_queue_top_harness() { /* data structure */ struct aws_priority_queue queue; /* assumptions */ __CPROVER_assume(aws_priority_queue_is_bounded(&queue, MAX_INITIAL_ITEM_ALLOCATION, MAX_ITEM_SIZE)); ensure_priority_queue_has_allocated_members(&queue); __CPROVER_assume(aws_priority_queue_is_valid(&queue)); /* save current state of the container structure */ struct aws_array_list old_container = queue.container; struct store_byte_from_buffer old_byte_container; save_byte_from_array((uint8_t *)old_container.data, old_container.current_size, &old_byte_container); /* save current state of the backpointers structure */ struct aws_array_list old_backpointers = queue.backpointers; struct store_byte_from_buffer old_byte_backpointers; save_byte_from_array((uint8_t *)old_backpointers.data, old_backpointers.current_size, &old_byte_backpointers); /* perform operation under verification */ void *top_val_ptr = malloc(queue.container.item_size); aws_priority_queue_top(&queue, &top_val_ptr); /* assertions */ assert(aws_priority_queue_is_valid(&queue)); assert_array_list_equivalence(&queue.container, &old_container, &old_byte_container); assert_array_list_equivalence(&queue.backpointers, &old_backpointers, &old_byte_backpointers); } cbmc-proof.txt000066400000000000000000000000711456575232400347460ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_priority_queue_topThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_ptr_eq/000077500000000000000000000000001456575232400275625ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_ptr_eq/Makefile000066400000000000000000000005571456575232400312310ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### CBMCFLAGS += PROOF_UID = aws_ptr_eq HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROJECT_SOURCES += $(SRCDIR)/source/hash_table.c ########### include ../Makefile.common aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_ptr_eq/aws_ptr_eq_harness.c000066400000000000000000000006011456575232400336120ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include void aws_ptr_eq_harness() { void *p1; void *p2; bool rval = aws_ptr_eq(p1, p2); assert(rval == (p1 == p2)); } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_ptr_eq/cbmc-proof.txt000066400000000000000000000000711456575232400323500ustar00rootroot00000000000000This file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_ring_buffer_acquire/000077500000000000000000000000001456575232400322715ustar00rootroot00000000000000Makefile000066400000000000000000000012461456575232400336550ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_ring_buffer_acquire# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### UNWINDSET += CBMCFLAGS += PROOF_UID = aws_ring_buffer_acquire HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/ring_buffer.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_ring_buffer_acquire_harness.c000066400000000000000000000052421456575232400407570ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_ring_buffer_acquire/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include void aws_ring_buffer_acquire_harness() { /* parameters */ struct aws_byte_buf buf; struct aws_ring_buffer ring_buf; size_t requested_size; size_t ring_buf_size; /* assumptions */ ensure_ring_buffer_has_allocated_members(&ring_buf, ring_buf_size); __CPROVER_assume(aws_ring_buffer_is_valid(&ring_buf)); ensure_byte_buf_has_allocated_buffer_member(&buf); __CPROVER_assume(aws_byte_buf_is_valid(&buf)); /* copy of state before call */ struct aws_ring_buffer ring_buf_old = ring_buf; int result = aws_ring_buffer_acquire(&ring_buf, requested_size, &buf); /* assertions */ uint8_t *old_head = aws_atomic_load_ptr(&ring_buf_old.head); uint8_t *old_tail = aws_atomic_load_ptr(&ring_buf_old.tail); uint8_t *new_head = aws_atomic_load_ptr(&ring_buf.head); uint8_t *new_tail = aws_atomic_load_ptr(&ring_buf.tail); if (result == AWS_OP_SUCCESS) { assert(aws_byte_buf_is_valid(&buf)); assert(buf.capacity == requested_size); assert(AWS_MEM_IS_WRITABLE(buf.buffer, buf.capacity)); assert(buf.len == 0); /* aws_byte_buf always created with aws_byte_buf_from_empty_array */ assert(aws_ring_buffer_buf_belongs_to_pool(&ring_buf, &buf)); if (aws_ring_buffer_is_empty(&ring_buf_old)) { assert(new_head == ring_buf_old.allocation + requested_size); assert(new_tail == ring_buf_old.allocation); } else { assert(new_head == ring_buf_old.allocation + requested_size || new_head == old_head + requested_size); assert(new_tail == old_tail); } assert(IMPLIES(is_empty_state(&ring_buf_old), is_front_valid_state(&ring_buf))); assert(IMPLIES(is_front_valid_state(&ring_buf_old), is_front_valid_state(&ring_buf))); assert(IMPLIES( is_middle_valid_state(&ring_buf_old), is_middle_valid_state(&ring_buf) || is_ends_valid_state(&ring_buf))); assert(IMPLIES(is_ends_valid_state(&ring_buf_old), is_ends_valid_state(&ring_buf))); assert(!(is_front_valid_state(&ring_buf_old) && is_middle_valid_state(&ring_buf))); } else { assert(ring_buf == ring_buf_old); } assert(aws_ring_buffer_is_valid(&ring_buf)); assert(ring_buf.allocator == ring_buf_old.allocator); assert(ring_buf.allocation == ring_buf_old.allocation); assert(ring_buf.allocation_end == ring_buf_old.allocation_end); } cbmc-proof.txt000066400000000000000000000000711456575232400350000ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_ring_buffer_acquireThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_ring_buffer_acquire_up_to/000077500000000000000000000000001456575232400334775ustar00rootroot00000000000000Makefile000066400000000000000000000012541456575232400350620ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_ring_buffer_acquire_up_to# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### UNWINDSET += CBMCFLAGS += PROOF_UID = aws_ring_buffer_acquire_up_to HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/ring_buffer.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_ring_buffer_acquire_up_to_harness.c000066400000000000000000000041761456575232400434000ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_ring_buffer_acquire_up_to/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include void aws_ring_buffer_acquire_up_to_harness() { /* parameters */ struct aws_byte_buf buf; struct aws_ring_buffer ring_buf; size_t minimum_size; size_t requested_size; size_t ring_buf_size; /* assumptions */ __CPROVER_assume(requested_size >= minimum_size); ensure_ring_buffer_has_allocated_members(&ring_buf, ring_buf_size); __CPROVER_assume(aws_ring_buffer_is_valid(&ring_buf)); ensure_byte_buf_has_allocated_buffer_member(&buf); __CPROVER_assume(aws_byte_buf_is_valid(&buf)); /* copy of state before call */ struct aws_ring_buffer ring_buf_old = ring_buf; int result = aws_ring_buffer_acquire_up_to(&ring_buf, minimum_size, requested_size, &buf); /* assertions */ if (result == AWS_OP_SUCCESS) { assert(aws_byte_buf_is_valid(&buf)); assert(buf.capacity >= minimum_size && buf.capacity <= requested_size); assert(buf.len == 0); /* aws_byte_buf always created with aws_byte_buf_from_empty_array */ assert(aws_ring_buffer_buf_belongs_to_pool(&ring_buf, &buf)); assert(IMPLIES(is_empty_state(&ring_buf_old), is_front_valid_state(&ring_buf))); assert(IMPLIES(is_front_valid_state(&ring_buf_old), is_front_valid_state(&ring_buf))); assert(IMPLIES( is_middle_valid_state(&ring_buf_old), is_middle_valid_state(&ring_buf) || is_ends_valid_state(&ring_buf))); assert(IMPLIES(is_ends_valid_state(&ring_buf_old), is_ends_valid_state(&ring_buf))); assert(!(is_front_valid_state(&ring_buf_old) && is_middle_valid_state(&ring_buf))); } else { assert(ring_buf == ring_buf_old); } assert(aws_ring_buffer_is_valid(&ring_buf)); assert(ring_buf.allocator == ring_buf_old.allocator); assert(ring_buf.allocation == ring_buf_old.allocation); assert(ring_buf.allocation_end == ring_buf_old.allocation_end); } cbmc-proof.txt000066400000000000000000000000711456575232400362060ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_ring_buffer_acquire_up_toThis file marks the directory as containing a CBMC proof aws_ring_buffer_buf_belongs_to_pool/000077500000000000000000000000001456575232400346015ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofsMakefile000066400000000000000000000012621456575232400362420ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_ring_buffer_buf_belongs_to_pool# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### UNWINDSET += CBMCFLAGS += PROOF_UID = aws_ring_buffer_buf_belongs_to_pool HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/ring_buffer.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_ring_buffer_buf_belongs_to_pool_harness.c000066400000000000000000000024601456575232400457340ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_ring_buffer_buf_belongs_to_pool/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include void aws_ring_buffer_buf_belongs_to_pool_harness() { /* parameters */ struct aws_byte_buf buf; struct aws_ring_buffer ring_buf; size_t ring_buf_size; /* assumptions */ ensure_ring_buffer_has_allocated_members(&ring_buf, ring_buf_size); bool is_member = nondet_bool(); /* nondet assignment required to force true/false */ if (is_member) { __CPROVER_assume(!aws_ring_buffer_is_empty(&ring_buf)); ensure_byte_buf_has_allocated_buffer_member_in_ring_buf(&buf, &ring_buf); } else { ensure_byte_buf_has_allocated_buffer_member(&buf); } __CPROVER_assume(aws_ring_buffer_is_valid(&ring_buf)); __CPROVER_assume(aws_byte_buf_is_valid(&buf)); struct aws_ring_buffer ring_buf_old = ring_buf; struct aws_byte_buf buf_old = buf; bool result = aws_ring_buffer_buf_belongs_to_pool(&ring_buf, &buf); /* assertions */ assert(is_member == result); assert(aws_ring_buffer_is_valid(&ring_buf)); assert(aws_byte_buf_is_valid(&buf)); assert(ring_buf_old == ring_buf); assert(buf_old == buf); } cbmc-proof.txt000066400000000000000000000000711456575232400373670ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_ring_buffer_buf_belongs_to_poolThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_ring_buffer_clean_up/000077500000000000000000000000001456575232400324265ustar00rootroot00000000000000Makefile000066400000000000000000000012471456575232400340130ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_ring_buffer_clean_up# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### UNWINDSET += CBMCFLAGS += PROOF_UID = aws_ring_buffer_clean_up HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/ring_buffer.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_ring_buffer_clean_up_harness.c000066400000000000000000000015451456575232400412530ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_ring_buffer_clean_up/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include void aws_ring_buffer_clean_up_harness() { /* parameters */ struct aws_ring_buffer ring_buf; size_t ring_buf_size; /* assumptions */ ensure_ring_buffer_has_allocated_members(&ring_buf, ring_buf_size); __CPROVER_assume(aws_ring_buffer_is_valid(&ring_buf)); /* operation under verification */ aws_ring_buffer_clean_up(&ring_buf); /* assertions */ assert(ring_buf.allocator == NULL); assert(ring_buf.allocation == NULL); assert(aws_atomic_load_ptr(&ring_buf.head) == NULL); assert(aws_atomic_load_ptr(&ring_buf.tail) == NULL); assert(ring_buf.allocation_end == NULL); } cbmc-proof.txt000066400000000000000000000000711456575232400351350ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_ring_buffer_clean_upThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_ring_buffer_init/000077500000000000000000000000001456575232400316035ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_ring_buffer_init/Makefile000066400000000000000000000014441456575232400332460ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. UNWINDSET += CBMCFLAGS += PROOF_UID = aws_ring_buffer_init HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/ring_buffer.c PROJECT_SOURCES += $(SRCDIR)/source/common.c # We abstract this function because manual inspection demonstrates it is unreachable. # Improves coverage metrics. REMOVE_FUNCTION_BODY += __CPROVER_file_local_ring_buffer_c_s_ring_buffer_mem_acquire include ../Makefile.common aws_ring_buffer_init_harness.c000066400000000000000000000016361456575232400376060ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_ring_buffer_init/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_ring_buffer_init_harness() { /* Non-deterministic parameters. */ struct aws_ring_buffer *ring_buf = malloc(sizeof(*ring_buf)); struct aws_allocator *allocator = aws_default_allocator(); size_t size; /* Preconditions. */ __CPROVER_assume(ring_buf != NULL); __CPROVER_assume(allocator != NULL); __CPROVER_assume(size > 0 && size < MAX_MALLOC); /* Operation under verification. */ if (aws_ring_buffer_init(ring_buf, allocator, size) == AWS_OP_SUCCESS) { /* Postconditions. */ assert(aws_ring_buffer_is_valid(ring_buf)); assert(ring_buf->allocator == allocator); assert(ring_buf->allocation_end - ring_buf->allocation == size); } } cbmc-proof.txt000066400000000000000000000000711456575232400343120ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_ring_buffer_initThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_ring_buffer_release/000077500000000000000000000000001456575232400322605ustar00rootroot00000000000000Makefile000066400000000000000000000012461456575232400336440ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_ring_buffer_release# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### UNWINDSET += CBMCFLAGS += PROOF_UID = aws_ring_buffer_release HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/ring_buffer.c PROJECT_SOURCES += $(SRCDIR)/source/common.c ########### include ../Makefile.common aws_ring_buffer_release_harness.c000066400000000000000000000041651456575232400407400ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_ring_buffer_release/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include void aws_ring_buffer_release_harness() { /* parameters */ struct aws_byte_buf buf; struct aws_ring_buffer ring_buf; size_t ring_buf_size; /* assumptions */ ensure_ring_buffer_has_allocated_members(&ring_buf, ring_buf_size); __CPROVER_assume(!aws_ring_buffer_is_empty(&ring_buf)); __CPROVER_assume(aws_ring_buffer_is_valid(&ring_buf)); ensure_byte_buf_has_allocated_buffer_member_in_ring_buf(&buf, &ring_buf); __CPROVER_assume(aws_byte_buf_is_valid(&buf)); /* copy of state before call */ struct aws_ring_buffer ring_buf_old = ring_buf; struct aws_byte_buf buf_old = buf; aws_ring_buffer_release(&ring_buf, &buf); /* assertions */ uint8_t *old_head = aws_atomic_load_ptr(&ring_buf_old.head); uint8_t *old_tail = aws_atomic_load_ptr(&ring_buf_old.tail); uint8_t *new_head = aws_atomic_load_ptr(&ring_buf.head); uint8_t *new_tail = aws_atomic_load_ptr(&ring_buf.tail); assert(aws_ring_buffer_is_valid(&ring_buf)); assert(ring_buf.allocator == ring_buf_old.allocator); assert(ring_buf.allocation == ring_buf_old.allocation); assert(new_head == old_head); assert(new_tail == buf_old.buffer + buf_old.capacity); assert(ring_buf.allocation_end == ring_buf_old.allocation_end); assert(buf.allocator == NULL); assert(buf.buffer == NULL); assert(buf.len == 0); assert(buf.capacity == 0); assert(IMPLIES(is_front_valid_state(&ring_buf_old), is_empty_state(&ring_buf) || is_middle_valid_state(&ring_buf))); assert(IMPLIES( is_middle_valid_state(&ring_buf_old), is_empty_state(&ring_buf) || is_middle_valid_state(&ring_buf) || is_ends_valid_state(&ring_buf))); assert(IMPLIES( is_ends_valid_state(&ring_buf_old), is_empty_state(&ring_buf) || is_middle_valid_state(&ring_buf) || is_ends_valid_state(&ring_buf))); } cbmc-proof.txt000066400000000000000000000000711456575232400347670ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_ring_buffer_releaseThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_round_up_to_power_of_two/000077500000000000000000000000001456575232400334165ustar00rootroot00000000000000Makefile000066400000000000000000000011351456575232400347770ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_round_up_to_power_of_two# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### CBMCFLAGS += PROOF_UID = aws_round_up_to_power_of_two HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_STUB)/error.c # The actual implementation that we're proving comes from .inl files # that the stubs pull in. Link against an empty file, since we're not # using any other files from c-common. PROJECT_SOURCES += $(PROOF_STUB)/empty-source-file.c ########### include ../Makefile.common aws_round_up_to_power_of_two_harness.c000066400000000000000000000013611456575232400432270ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_round_up_to_power_of_two/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include void aws_round_up_to_power_of_two_harness() { size_t test_val; size_t result; int rval = aws_round_up_to_power_of_two(test_val, &result); #if ULONG_MAX == SIZE_MAX int popcount = __builtin_popcountl(result); #elif ULLONG_MAX == SIZE_MAX int popcount = __builtin_popcountll(result); #else # error #endif if (rval == AWS_OP_SUCCESS) { assert(popcount == 1); assert(test_val <= result); assert(test_val >= result >> 1); } else { // Only fail if rounding up would cause us to overflow. assert(test_val > ((SIZE_MAX >> 1) + 1)); } } cbmc-proof.txt000066400000000000000000000000711456575232400361250ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_round_up_to_power_of_twoThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_string_bytes/000077500000000000000000000000001456575232400310045ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_string_bytes/Makefile000066400000000000000000000007461456575232400324530ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. include ../Makefile.aws_string CBMCFLAGS += PROOF_UID = aws_string_bytes HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROJECT_SOURCES += $(SRCDIR)/source/string.c include ../Makefile.common aws_string_bytes_harness.c000066400000000000000000000007021456575232400362010ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_string_bytes/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_string_bytes_harness() { struct aws_string *str = ensure_string_is_allocated_nondet_length(); __CPROVER_assume(aws_string_is_valid(str)); assert(aws_string_bytes(str) == str->bytes); assert(aws_string_is_valid(str)); } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_string_bytes/cbmc-proof.txt000066400000000000000000000000711456575232400335720ustar00rootroot00000000000000This file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_string_compare/000077500000000000000000000000001456575232400313045ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_string_compare/Makefile000066400000000000000000000013331456575232400327440ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. include ../Makefile.aws_string UNWINDSET += memcmp.0:$(shell echo $$(($(MAX_STRING_LEN) + 1))) CBMCFLAGS += PROOF_UID = aws_string_compare HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c PROJECT_SOURCES += $(SRCDIR)/source/error.c PROJECT_SOURCES += $(SRCDIR)/source/string.c include ../Makefile.common aws_string_compare_harness.c000066400000000000000000000020711456575232400370020ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_string_compare/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include void aws_string_compare_harness() { struct aws_string *str_a = nondet_bool() ? nondet_allocate_string_bounded_length(MAX_STRING_LEN) : NULL; struct aws_string *str_b = nondet_bool() ? (nondet_bool() ? str_a : NULL) : nondet_allocate_string_bounded_length(MAX_STRING_LEN); __CPROVER_assume(IMPLIES(str_a != NULL, aws_string_is_valid(str_a))); __CPROVER_assume(IMPLIES(str_b != NULL, aws_string_is_valid(str_b))); bool nondet_parameter = nondet_bool(); if (aws_string_compare(str_a, nondet_parameter ? str_b : str_a) == AWS_OP_SUCCESS) { if (nondet_parameter && str_a && str_b) { assert_bytes_match(str_a->bytes, str_b->bytes, str_a->len); } } if (str_a) { assert(aws_string_is_valid(str_a)); } if (str_b) { assert(aws_string_is_valid(str_b)); } } cbmc-proof.txt000066400000000000000000000000711456575232400340130ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_string_compareThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_string_destroy/000077500000000000000000000000001456575232400313475ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_string_destroy/Makefile000066400000000000000000000011611456575232400330060ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. include ../Makefile.aws_string CBMCFLAGS += PROOF_UID = aws_string_destroy HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c PROJECT_SOURCES += $(SRCDIR)/source/error.c PROJECT_SOURCES += $(SRCDIR)/source/string.c include ../Makefile.common aws_string_destroy_harness.c000066400000000000000000000006401456575232400371100ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_string_destroy/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_string_destroy_harness() { struct aws_string *str = ensure_string_is_allocated_nondet_length(); __CPROVER_assume(IMPLIES(str != NULL, aws_string_is_valid(str))); aws_string_destroy(str); } cbmc-proof.txt000066400000000000000000000000711456575232400340560ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_string_destroyThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_string_destroy_secure/000077500000000000000000000000001456575232400327155ustar00rootroot00000000000000Makefile000066400000000000000000000012411456575232400342740ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_string_destroy_secure# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. include ../Makefile.aws_string CBMCFLAGS += PROOF_UID = aws_string_destroy_secure HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c PROJECT_SOURCES += $(SRCDIR)/source/error.c PROJECT_SOURCES += $(SRCDIR)/source/string.c include ../Makefile.common aws_string_destroy_secure_harness.c000066400000000000000000000022271456575232400420270ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_string_destroy_secure/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_string_destroy_secure_harness() { /* Non-deterministic parameters. */ struct aws_string *str = nondet_allocate_string_bounded_length(MAX_STRING_LEN); char const *bytes; size_t len; bool is_str_null = (str == NULL); /* Assumptions. */ __CPROVER_assume(IMPLIES(str != NULL, aws_string_is_valid(str))); struct aws_string old_str = {0}; if (str != NULL) { old_str = *str; } /* Operation under verification. */ aws_string_destroy_secure(str); /* Check that all bytes are 0. Since the memory is freed, * this will trigger a use-after-free check * Disabiling the check only for this bit of the harness. */ #pragma CPROVER check push #pragma CPROVER check disable "pointer" if (old_str.bytes == NULL) { if (old_str.len > 0) { size_t i; __CPROVER_assume(i < old_str.len); assert(old_str.bytes[i] == 0); } } #pragma CPROVER check pop } cbmc-proof.txt000066400000000000000000000000711456575232400354240ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_string_destroy_secureThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_string_eq/000077500000000000000000000000001456575232400302635ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_string_eq/Makefile000066400000000000000000000011741456575232400317260ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. include ../Makefile.aws_string UNWINDSET += memcmp.0:$(shell echo $$(($(MAX_STRING_LEN) + 1))) CBMCFLAGS += PROOF_UID = aws_string_eq HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROJECT_SOURCES += $(SRCDIR)/source/string.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c include ../Makefile.common aws_string_eq_harness.c000066400000000000000000000015551456575232400347460ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_string_eq/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include void aws_string_eq_harness() { struct aws_string *str_a = nondet_allocate_string_bounded_length(MAX_STRING_LEN); struct aws_string *str_b = nondet_bool() ? str_a : nondet_allocate_string_bounded_length(MAX_STRING_LEN); __CPROVER_assume(IMPLIES(str_a != NULL, aws_string_is_valid(str_a))); __CPROVER_assume(IMPLIES(str_b != NULL, aws_string_is_valid(str_b))); if (aws_string_eq(str_a, str_b) && str_a && str_b) { assert(str_a->len == str_b->len); assert_bytes_match(str_a->bytes, str_b->bytes, str_a->len); assert(aws_string_is_valid(str_a)); assert(aws_string_is_valid(str_b)); } } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_string_eq/cbmc-proof.txt000066400000000000000000000000711456575232400330510ustar00rootroot00000000000000This file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_string_eq_byte_buf/000077500000000000000000000000001456575232400321425ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_string_eq_byte_buf/Makefile000066400000000000000000000014361456575232400336060ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. include ../Makefile.aws_string UNWINDSET += memcmp.0:$(shell echo $$(($(MAX_STRING_LEN) + 1))) UNWINDSET += strlen.0:$(shell echo $$(($(MAX_STRING_LEN) + 1))) CBMCFLAGS += PROOF_UID = aws_string_eq_byte_buf HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c PROJECT_SOURCES += $(SRCDIR)/source/error.c PROJECT_SOURCES += $(SRCDIR)/source/string.c include ../Makefile.common aws_string_eq_byte_buf_harness.c000066400000000000000000000016351456575232400405030ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_string_eq_byte_buf/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include void aws_string_eq_byte_buf_harness() { struct aws_string *str = nondet_allocate_string_bounded_length(MAX_STRING_LEN); struct aws_byte_buf buf; __CPROVER_assume(IMPLIES(str != NULL, aws_string_is_valid(str))); __CPROVER_assume(aws_byte_buf_is_bounded(&buf, MAX_STRING_LEN)); ensure_byte_buf_has_allocated_buffer_member(&buf); __CPROVER_assume(aws_byte_buf_is_valid(&buf)); if (aws_string_eq_byte_buf(str, nondet_bool() ? &buf : NULL) && str) { assert(str->len == buf.len); assert_bytes_match(str->bytes, buf.buffer, str->len); assert(aws_string_is_valid(str)); } assert(aws_byte_buf_is_valid(&buf)); } cbmc-proof.txt000066400000000000000000000000711456575232400346510ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_string_eq_byte_bufThis file marks the directory as containing a CBMC proof aws_string_eq_byte_buf_ignore_case/000077500000000000000000000000001456575232400344215ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofsMakefile000066400000000000000000000014741456575232400360670ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_string_eq_byte_buf_ignore_case# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. include ../Makefile.aws_string UNWINDSET += memcmp.0:$(shell echo $$(($(MAX_STRING_LEN) + 1))) UNWINDSET += aws_array_eq_ignore_case.0:$(shell echo $$(($(MAX_STRING_LEN) + 1))) CBMCFLAGS += PROOF_UID = aws_string_eq_byte_buf_ignore_case HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c PROJECT_SOURCES += $(SRCDIR)/source/error.c PROJECT_SOURCES += $(SRCDIR)/source/string.c include ../Makefile.common aws_string_eq_byte_buf_ignore_case_harness.c000066400000000000000000000017031456575232400453730ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_string_eq_byte_buf_ignore_case/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include void aws_string_eq_byte_buf_ignore_case_harness() { struct aws_string *str = nondet_allocate_string_bounded_length(MAX_STRING_LEN); struct aws_byte_buf buf; __CPROVER_assume(IMPLIES(str != NULL, aws_string_is_valid(str))); __CPROVER_assume(aws_byte_buf_is_bounded(&buf, MAX_STRING_LEN)); ensure_byte_buf_has_allocated_buffer_member(&buf); __CPROVER_assume(aws_byte_buf_is_valid(&buf)); bool nondet_parameter; if (aws_string_eq_byte_buf_ignore_case(str, nondet_parameter ? &buf : NULL) && str) { assert(aws_string_is_valid(str)); if (nondet_parameter) { assert(str->len == buf.len); } } assert(aws_byte_buf_is_valid(&buf)); } cbmc-proof.txt000066400000000000000000000000711456575232400372070ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_string_eq_byte_buf_ignore_caseThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_string_eq_byte_cursor/000077500000000000000000000000001456575232400327035ustar00rootroot00000000000000Makefile000066400000000000000000000013411456575232400342630ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_string_eq_byte_cursor# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. include ../Makefile.aws_string UNWINDSET += memcmp.0:$(shell echo $$(($(MAX_STRING_LEN) + 1))) CBMCFLAGS += PROOF_UID = aws_string_eq_byte_cursor HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c PROJECT_SOURCES += $(SRCDIR)/source/error.c PROJECT_SOURCES += $(SRCDIR)/source/string.c include ../Makefile.common aws_string_eq_byte_cursor_harness.c000066400000000000000000000017511456575232400420040ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_string_eq_byte_cursor/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include void aws_string_eq_byte_cursor_harness() { struct aws_string *str = nondet_allocate_string_bounded_length(MAX_STRING_LEN); struct aws_byte_cursor cursor; __CPROVER_assume(IMPLIES(str != NULL, aws_string_is_valid(str))); ensure_byte_cursor_has_allocated_buffer_member(&cursor); __CPROVER_assume(aws_byte_cursor_is_valid(&cursor)); bool nondet_parameter; if (aws_string_eq_byte_cursor(str, nondet_parameter ? &cursor : NULL) && str) { assert(aws_string_is_valid(str)); if (nondet_parameter) { assert(str->len == cursor.len); if (str->len > 0) { assert_bytes_match(str->bytes, cursor.ptr, str->len); } } } assert(aws_byte_cursor_is_valid(&cursor)); } cbmc-proof.txt000066400000000000000000000000711456575232400354120ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_string_eq_byte_cursorThis file marks the directory as containing a CBMC proof aws_string_eq_byte_cursor_ignore_case/000077500000000000000000000000001456575232400351625ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofsMakefile000066400000000000000000000014771456575232400366330ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_string_eq_byte_cursor_ignore_case# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. include ../Makefile.aws_string UNWINDSET += memcmp.0:$(shell echo $$(($(MAX_STRING_LEN) + 1))) UNWINDSET += aws_array_eq_ignore_case.0:$(shell echo $$(($(MAX_STRING_LEN) + 1))) CBMCFLAGS += PROOF_UID = aws_string_eq_byte_cursor_ignore_case HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c PROJECT_SOURCES += $(SRCDIR)/source/error.c PROJECT_SOURCES += $(SRCDIR)/source/string.c include ../Makefile.common aws_string_eq_byte_cursor_ignore_case_harness.c000066400000000000000000000015541456575232400467010ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_string_eq_byte_cursor_ignore_case/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void aws_string_eq_byte_cursor_ignore_case_harness() { struct aws_string *str = nondet_allocate_string_bounded_length(MAX_STRING_LEN); struct aws_byte_cursor cursor; __CPROVER_assume(IMPLIES(str != NULL, aws_string_is_valid(str))); ensure_byte_cursor_has_allocated_buffer_member(&cursor); __CPROVER_assume(aws_byte_cursor_is_valid(&cursor)); bool nondet_parameter; if (aws_string_eq_byte_cursor_ignore_case(str, nondet_parameter ? &cursor : NULL) && str) { assert(aws_string_is_valid(str)); if (nondet_parameter) { assert(str->len == cursor.len); } } assert(aws_byte_cursor_is_valid(&cursor)); } cbmc-proof.txt000066400000000000000000000000711456575232400377500ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_string_eq_byte_cursor_ignore_caseThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_string_eq_c_str/000077500000000000000000000000001456575232400314555ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_string_eq_c_str/Makefile000066400000000000000000000014161456575232400331170ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. include ../Makefile.aws_string UNWINDSET += aws_array_eq_c_str.0:$(shell echo $$(($(MAX_STRING_LEN) + 1))) UNWINDSET += memcmp.0:$(shell echo $$(($(MAX_STRING_LEN) + 1))) UNWINDSET += strlen.0:$(shell echo $$(($(MAX_STRING_LEN) + 1))) CBMCFLAGS += PROOF_UID = aws_string_eq_c_str HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROJECT_SOURCES += $(SRCDIR)/source/string.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c include ../Makefile.common aws_string_eq_c_str_harness.c000066400000000000000000000013601456575232400373240ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_string_eq_c_str/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include void aws_string_eq_c_str_harness() { struct aws_string *str = nondet_allocate_string_bounded_length(MAX_STRING_LEN); __CPROVER_assume(IMPLIES(str != NULL, aws_string_is_valid(str))); const char *c_str = ensure_c_str_is_allocated(MAX_STRING_LEN); if (aws_string_eq_c_str(str, c_str) && str && c_str) { assert(aws_string_is_valid(str)); assert(aws_c_string_is_valid(c_str)); assert(str->len == strlen(c_str)); assert_bytes_match(str->bytes, c_str, str->len); } } cbmc-proof.txt000066400000000000000000000000711456575232400341640ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_string_eq_c_strThis file marks the directory as containing a CBMC proof aws_string_eq_c_str_ignore_case/000077500000000000000000000000001456575232400337345ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofsMakefile000066400000000000000000000014461456575232400354010ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_string_eq_c_str_ignore_case# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. include ../Makefile.aws_string UNWINDSET += aws_array_eq_c_str_ignore_case.0:$(shell echo $$(($(MAX_STRING_LEN) + 1))) UNWINDSET += memcmp.0:$(shell echo $$(($(MAX_STRING_LEN) + 1))) UNWINDSET += strlen.0:$(shell echo $$(($(MAX_STRING_LEN) + 1))) CBMCFLAGS += PROOF_UID = aws_string_eq_c_str_ignore_case HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROJECT_SOURCES += $(SRCDIR)/source/string.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c include ../Makefile.common aws_string_eq_c_str_ignore_case_harness.c000066400000000000000000000013171456575232400442220ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_string_eq_c_str_ignore_case/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include void aws_string_eq_c_str_ignore_case_harness() { struct aws_string *str = nondet_allocate_string_bounded_length(MAX_STRING_LEN); __CPROVER_assume(IMPLIES(str != NULL, aws_string_is_valid(str))); const char *c_str = ensure_c_str_is_allocated(MAX_STRING_LEN); if (aws_string_eq_c_str_ignore_case(str, c_str) && str && c_str) { assert(aws_string_is_valid(str)); assert(aws_c_string_is_valid(c_str)); assert(str->len == strlen(c_str)); } } cbmc-proof.txt000066400000000000000000000000711456575232400365220ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_string_eq_c_str_ignore_caseThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_string_eq_ignore_case/000077500000000000000000000000001456575232400326215ustar00rootroot00000000000000Makefile000066400000000000000000000013321456575232400342010ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_string_eq_ignore_case# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. include ../Makefile.aws_string UNWINDSET += aws_array_eq_ignore_case.0:$(shell echo $$(($(MAX_STRING_LEN) + 1))) UNWINDSET += memcmp.0:$(shell echo $$(($(MAX_STRING_LEN) + 1))) CBMCFLAGS += PROOF_UID = aws_string_eq_ignore_case HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROJECT_SOURCES += $(SRCDIR)/source/string.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c include ../Makefile.common aws_string_eq_ignore_case_harness.c000066400000000000000000000015231456575232400416350ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_string_eq_ignore_case/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include void aws_string_eq_ignore_case_harness() { struct aws_string *str_a = nondet_allocate_string_bounded_length(MAX_STRING_LEN); struct aws_string *str_b = nondet_bool() ? str_a : nondet_allocate_string_bounded_length(MAX_STRING_LEN); __CPROVER_assume(IMPLIES(str_a != NULL, aws_string_is_valid(str_a))); __CPROVER_assume(IMPLIES(str_b != NULL && str_a != str_b, aws_string_is_valid(str_b))); if (aws_string_eq_ignore_case(str_a, str_b) && str_a && str_b) { assert(aws_string_is_valid(str_a)); assert(aws_string_is_valid(str_b)); assert(str_a->len == str_b->len); } } cbmc-proof.txt000066400000000000000000000000711456575232400353300ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_string_eq_ignore_caseThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_string_new_from_array/000077500000000000000000000000001456575232400326705ustar00rootroot00000000000000Makefile000066400000000000000000000012331456575232400342500ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_string_new_from_array# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. include ../Makefile.aws_string CBMCFLAGS += PROOF_UID = aws_string_new_from_array HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c PROJECT_SOURCES += $(SRCDIR)/source/string.c include ../Makefile.common aws_string_new_from_array_harness.c000066400000000000000000000016461456575232400417610ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_string_new_from_array/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include void aws_string_new_from_array_harness() { /* parameters */ size_t alloc_size; uint8_t *array; struct aws_allocator *allocator; size_t reported_size; /* precondition */ ASSUME_VALID_MEMORY_COUNT(array, alloc_size); ASSUME_DEFAULT_ALLOCATOR(allocator); __CPROVER_assume(reported_size <= alloc_size); /* operation under verification */ struct aws_string *str = aws_string_new_from_array(allocator, array, reported_size); /* assertions */ if (str) { assert(str->len == reported_size); assert(str->bytes[str->len] == 0); assert_bytes_match(str->bytes, array, str->len); assert(aws_string_is_valid(str)); } } cbmc-proof.txt000066400000000000000000000000711456575232400353770ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_string_new_from_arrayThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_string_new_from_c_str/000077500000000000000000000000001456575232400326645ustar00rootroot00000000000000Makefile000066400000000000000000000013351456575232400342470ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_string_new_from_c_str# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. include ../Makefile.aws_string UNWINDSET += strlen.0:$(shell echo $$(($(MAX_STRING_LEN) + 1))) CBMCFLAGS += PROOF_UID = aws_string_new_from_c_str HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c PROJECT_SOURCES += $(SRCDIR)/source/string.c include ../Makefile.common aws_string_new_from_c_str_harness.c000066400000000000000000000015771456575232400417540ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_string_new_from_c_str/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include void aws_string_new_from_c_str_harness() { /* parameters */ const char *c_str = ensure_c_str_is_allocated(MAX_STRING_LEN); struct aws_allocator *allocator; /* assumptions */ __CPROVER_assume(c_str != NULL); ASSUME_DEFAULT_ALLOCATOR(allocator); /* operation under verification */ struct aws_string *str = aws_string_new_from_c_str(allocator, c_str); /* assertions */ if (str) { assert(str->len <= MAX_STRING_LEN); assert(str->bytes[str->len] == 0); assert_bytes_match(str->bytes, c_str, str->len); assert(aws_string_is_valid(str)); } assert(aws_c_string_is_valid(c_str)); } cbmc-proof.txt000066400000000000000000000000711456575232400353730ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_string_new_from_c_strThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_string_new_from_string/000077500000000000000000000000001456575232400330605ustar00rootroot00000000000000Makefile000066400000000000000000000012351456575232400344420ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_string_new_from_string# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. include ../Makefile.aws_string CBMCFLAGS += PROOF_UID = aws_string_new_from_string HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/make_common_data_structures.c PROJECT_SOURCES += $(SRCDIR)/source/allocator.c PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/error.c PROJECT_SOURCES += $(SRCDIR)/source/byte_buf.c PROJECT_SOURCES += $(SRCDIR)/source/common.c PROJECT_SOURCES += $(SRCDIR)/source/string.c include ../Makefile.common aws_string_new_from_string_harness.c000066400000000000000000000017241456575232400423360ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_string_new_from_string/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include void aws_string_new_from_string_harness() { /* parameters */ struct aws_string *source = ensure_string_is_allocated_nondet_length(); __CPROVER_assume(aws_string_is_valid(source)); struct aws_allocator *allocator = (source->allocator) ? source->allocator : aws_default_allocator(); /* operation under verification */ struct aws_string *str = aws_string_new_from_string(allocator, source); /* assertions */ if (str) { assert(source->len == str->len); assert(str->allocator == allocator); assert(str->bytes[str->len] == '\0'); assert_bytes_match(source->bytes, str->bytes, source->len); assert(aws_string_is_valid(str)); } assert(aws_string_is_valid(source)); } cbmc-proof.txt000066400000000000000000000000711456575232400355670ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/aws_string_new_from_stringThis file marks the directory as containing a CBMC proof aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/build-buildspec.sh000066400000000000000000000000351456575232400310170ustar00rootroot00000000000000for d in $(find . -name dir) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/lib/000077500000000000000000000000001456575232400261645ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/lib/__init__.py000066400000000000000000000000001456575232400302630ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/lib/print_tool_versions.py000077500000000000000000000035521456575232400326670ustar00rootroot00000000000000#!/usr/bin/env python3 # # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: MIT-0 import logging import pathlib import shutil import subprocess _TOOLS = [ "cadical", "cbmc", "cbmc-viewer", "cbmc-starter-kit-update", "kissat", "litani", ] def _format_versions(table): lines = [ "", '', ] for tool, version in table.items(): if version: v_str = f'
{version}
' else: v_str = 'not found' lines.append( f'' f'') lines.append("
Tool Versions
{tool}:{v_str}
") return "\n".join(lines) def _get_tool_versions(): ret = {} for tool in _TOOLS: err = f"Could not determine version of {tool}: " ret[tool] = None if not shutil.which(tool): logging.error("%s'%s' not found on $PATH", err, tool) continue cmd = [tool, "--version"] proc = subprocess.Popen(cmd, text=True, stdout=subprocess.PIPE) try: out, _ = proc.communicate(timeout=10) except subprocess.TimeoutExpired: logging.error("%s'%s --version' timed out", err, tool) continue if proc.returncode: logging.error( "%s'%s --version' returned %s", err, tool, str(proc.returncode)) continue ret[tool] = out.strip() return ret def main(): exe_name = pathlib.Path(__file__).name logging.basicConfig(format=f"{exe_name}: %(message)s") table = _get_tool_versions() out = _format_versions(table) print(out) if __name__ == "__main__": main() aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/lib/summarize.py000066400000000000000000000106401456575232400305530ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: MIT-0 import argparse import json import logging import os import sys DESCRIPTION = """Print 2 tables in GitHub-flavored Markdown that summarize an execution of CBMC proofs.""" def get_args(): """Parse arguments for summarize script.""" parser = argparse.ArgumentParser(description=DESCRIPTION) for arg in [{ "flags": ["--run-file"], "help": "path to the Litani run.json file", "required": True, }]: flags = arg.pop("flags") parser.add_argument(*flags, **arg) return parser.parse_args() def _get_max_length_per_column_list(data): ret = [len(item) + 1 for item in data[0]] for row in data[1:]: for idx, item in enumerate(row): ret[idx] = max(ret[idx], len(item) + 1) return ret def _get_table_header_separator(max_length_per_column_list): line_sep = "" for max_length_of_word_in_col in max_length_per_column_list: line_sep += "|" + "-" * (max_length_of_word_in_col + 1) line_sep += "|\n" return line_sep def _get_entries(max_length_per_column_list, row_data): entries = [] for row in row_data: entry = "" for idx, word in enumerate(row): max_length_of_word_in_col = max_length_per_column_list[idx] space_formatted_word = (max_length_of_word_in_col - len(word)) * " " entry += "| " + word + space_formatted_word entry += "|\n" entries.append(entry) return entries def _get_rendered_table(data): table = [] max_length_per_column_list = _get_max_length_per_column_list(data) entries = _get_entries(max_length_per_column_list, data) for idx, entry in enumerate(entries): if idx == 1: line_sep = _get_table_header_separator(max_length_per_column_list) table.append(line_sep) table.append(entry) table.append("\n") return "".join(table) def _get_status_and_proof_summaries(run_dict): """Parse a dict representing a Litani run and create lists summarizing the proof results. Parameters ---------- run_dict A dictionary representing a Litani run. Returns ------- A list of 2 lists. The first sub-list maps a status to the number of proofs with that status. The second sub-list maps each proof to its status. """ count_statuses = {} proofs = [["Proof", "Status"]] for proof_pipeline in run_dict["pipelines"]: status_pretty_name = proof_pipeline["status"].title().replace("_", " ") try: count_statuses[status_pretty_name] += 1 except KeyError: count_statuses[status_pretty_name] = 1 if proof_pipeline["name"] == "print_tool_versions": continue proofs.append([proof_pipeline["name"], status_pretty_name]) statuses = [["Status", "Count"]] for status, count in count_statuses.items(): statuses.append([status, str(count)]) return [statuses, proofs] def print_proof_results(out_file): """ Print 2 strings that summarize the proof results. When printing, each string will render as a GitHub flavored Markdown table. """ output = "## Summary of CBMC proof results\n\n" with open(out_file, encoding='utf-8') as run_json: run_dict = json.load(run_json) status_table, proof_table = _get_status_and_proof_summaries(run_dict) for summary in (status_table, proof_table): output += _get_rendered_table(summary) print(output) sys.stdout.flush() github_summary_file = os.getenv("GITHUB_STEP_SUMMARY") if github_summary_file: with open(github_summary_file, "a") as handle: print(output, file=handle) handle.flush() else: logging.warning( "$GITHUB_STEP_SUMMARY not set, not writing summary file") msg = ( "Click the 'Summary' button to view a Markdown table " "summarizing all proof results") if run_dict["status"] != "success": logging.error("Not all proofs passed.") logging.error(msg) sys.exit(1) logging.info(msg) if __name__ == '__main__': args = get_args() logging.basicConfig(format="%(levelname)s: %(message)s") try: print_proof_results(args.run_file) except Exception as ex: # pylint: disable=broad-except logging.critical("Could not print results. Exception: %s", str(ex)) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/memcpy_using_uint64/000077500000000000000000000000001456575232400313265ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/memcpy_using_uint64/Makefile000066400000000000000000000020111456575232400327600ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### # Max needs to be big enough to have multiple loop unrollings to have full coverage # 160 is well larger than that, and still completes quite fast: ~ 40s MAX = 160 DEFINES += -DMAX=$(MAX) UNWINDSET += memcpy_impl.0:$(shell echo $$(($(MAX) + 1))) UNWINDSET += memcpy_using_uint64_impl.0:$(shell echo $$(( $$(( $(MAX) / 8 )) + 1))) CBMCFLAGS += PROOF_UID = memcpy_using_uint64 HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/memcpy_override.c PROOF_SOURCES += $(PROOF_STUB)/memcpy_using_uint64.c # The actual implementation that we're proving comes from .inl files # that the stubs pull in. Link against an empty file, since we're not # using any other files from c-common. PROJECT_SOURCES += $(PROOF_STUB)/empty-source-file.c ########### include ../Makefile.common cbmc-proof.txt000066400000000000000000000000711456575232400340350ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/memcpy_using_uint64This file marks the directory as containing a CBMC proof memcpy_using_uint64_harness.c000066400000000000000000000012331456575232400370450ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/memcpy_using_uint64/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void *memcpy_impl(void *dst, const void *src, size_t n); void *memcpy_using_uint64_impl(void *dst, const void *src, size_t n); /* * Check that the optimized version of memcpy is memory safe * And that it matches the naive version */ void memcpy_using_uint64_harness() { char s[MAX]; char d1[MAX]; char d2[MAX]; unsigned size; __CPROVER_assume(size < MAX); memcpy_impl(d1, s, size); memcpy_using_uint64_impl(d2, s, size); assert_bytes_match(d1, d2, size); } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/memset_override_0/000077500000000000000000000000001456575232400310265ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/memset_override_0/Makefile000066400000000000000000000020031456575232400324610ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### # Max needs to be big enough to have multiple loop unrollings to have full coverage # 160 is well larger than that, and still completes quite fast: ~ 40s MAX = 160 DEFINES += -DMAX=$(MAX) UNWINDSET += memset_impl.0:$(shell echo $$(($(MAX) + 1))) UNWINDSET += memset_override_0_impl.0:$(shell echo $$(( $$(( $(MAX) / 8 )) + 1))) CBMCFLAGS += PROOF_UID = memset_override_0 HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/memset_override.c PROOF_SOURCES += $(PROOF_STUB)/memset_override_0.c # The actual implementation that we're proving comes from .inl files # that the stubs pull in. Link against an empty file, since we're not # using any other files in the project. PROJECT_SOURCES += $(PROOF_STUB)/empty-source-file.c ########### include ../Makefile.common cbmc-proof.txt000066400000000000000000000000711456575232400335350ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/memset_override_0This file marks the directory as containing a CBMC proof memset_override_0_harness.c000066400000000000000000000015211456575232400362450ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/memset_override_0/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void *memset_impl(void *dst, int c, size_t n); void *memset_override_0_impl(void *dst, int c, size_t n); /* * Check that the optimized version of memset is memory safe * And that it matches the naive version */ void memset_override_0_harness() { short d1[MAX]; short d2[MAX]; int c; __CPROVER_assume(c == 0); // asserted in the implementation unsigned size; __CPROVER_assume((size & 0x7) == 0); // asserted in the implementation __CPROVER_assume(size < MAX); memset_impl(d1, c, size); memset_override_0_impl(d2, c, size); assert_bytes_match((uint8_t *)d1, (uint8_t *)d2, size); assert_all_bytes_are((uint8_t *)d2, c, size); } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/memset_using_uint64/000077500000000000000000000000001456575232400313265ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/memset_using_uint64/Makefile000066400000000000000000000020101456575232400327570ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. ########### # Max needs to be big enough to have multiple loop unrollings to have full coverage # 160 is well larger than that, and still completes quite fast: ~ 40s MAX = 160 DEFINES += -DMAX=$(MAX) UNWINDSET += memset_impl.0:$(shell echo $$(($(MAX) + 1))) UNWINDSET += memset_using_uint64_impl.0:$(shell echo $$(( $$(( $(MAX) / 8 )) + 1))) CBMCFLAGS += PROOF_UID = memset_using_uint64 HARNESS_ENTRY = $(PROOF_UID)_harness HARNESS_FILE = $(PROOFDIR)/$(HARNESS_ENTRY).c PROOF_SOURCES += $(HARNESS_FILE) PROOF_SOURCES += $(PROOF_SOURCE)/utils.c PROOF_SOURCES += $(PROOF_STUB)/memset_override.c PROOF_SOURCES += $(PROOF_STUB)/memset_using_uint64.c # The actual implementation that we're proving comes from .inl files # that the stubs pull in. Link against an empty file, since we're not # using any other files from c-common. PROJECT_SOURCES += $(PROOF_STUB)/empty-source-file.c ########### include ../Makefile.common cbmc-proof.txt000066400000000000000000000000711456575232400340350ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/memset_using_uint64This file marks the directory as containing a CBMC proof memset_using_uint64_harness.c000066400000000000000000000012601456575232400370450ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/memset_using_uint64/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void *memset_impl(void *dst, int c, size_t n); void *memset_using_uint64_impl(void *dst, int c, size_t n); /* * Check that the optimized version of memset is memory safe * And that it matches the naive version */ void memset_using_uint64_harness() { uint8_t d1[MAX]; uint8_t d2[MAX]; uint8_t c; size_t size; __CPROVER_assume(size < MAX); memset_impl(d1, c, size); memset_using_uint64_impl(d2, c, size); assert_bytes_match(d1, d2, size); assert_all_bytes_are(d2, c, size); } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/run-cbmc-proofs.py000077500000000000000000000322631456575232400310150ustar00rootroot00000000000000#!/usr/bin/env python3 # # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: MIT-0 import argparse import asyncio import json import logging import math import os import pathlib import re import subprocess import sys import tempfile from lib.summarize import print_proof_results DESCRIPTION = "Configure and run all CBMC proofs in parallel" # Keep the epilog hard-wrapped at 70 characters, as it gets printed # verbatim in the terminal. 70 characters stops here --------------> | EPILOG = """ This tool automates the process of running `make report` in each of the CBMC proof directories. The tool calculates the dependency graph of all tasks needed to build, run, and report on all the proofs, and executes these tasks in parallel. The tool is roughly equivalent to doing this: litani init --project "my-cool-project"; find . -name cbmc-proof.txt | while read -r proof; do pushd $(dirname ${proof}); # The `make _report` rule adds a single proof to litani # without running it make _report; popd; done litani run-build; except that it is much faster and provides some convenience options. The CBMC CI runs this script with no arguments to build and run all proofs in parallel. The value of "my-cool-project" is taken from the PROJECT_NAME variable in Makefile-project-defines. The --no-standalone argument omits the `litani init` and `litani run-build`; use it when you want to add additional proof jobs, not just the CBMC ones. In that case, you would run `litani init` yourself; then run `run-cbmc-proofs --no-standalone`; add any additional jobs that you want to execute with `litani add-job`; and finally run `litani run-build`. The litani dashboard will be written under the `output` directory; the cbmc-viewer reports remain in the `$PROOF_DIR/report` directory. The HTML dashboard from the latest Litani run will always be symlinked to `output/latest/html/index.html`, so you can keep that page open in your browser and reload the page whenever you re-run this script. """ # 70 characters stops here ----------------------------------------> | def get_project_name(): cmd = [ "make", "--no-print-directory", "-f", "Makefile.common", "echo-project-name", ] logging.debug(" ".join(cmd)) proc = subprocess.run(cmd, universal_newlines=True, stdout=subprocess.PIPE, check=False) if proc.returncode: logging.critical("could not run make to determine project name") sys.exit(1) if not proc.stdout.strip(): logging.warning( "project name has not been set; using generic name instead. " "Set the PROJECT_NAME value in Makefile-project-defines to " "remove this warning") return "" return proc.stdout.strip() def get_args(): pars = argparse.ArgumentParser( description=DESCRIPTION, epilog=EPILOG, formatter_class=argparse.RawDescriptionHelpFormatter) for arg in [{ "flags": ["-j", "--parallel-jobs"], "type": int, "metavar": "N", "help": "run at most N proof jobs in parallel", }, { "flags": ["--fail-on-proof-failure"], "action": "store_true", "help": "exit with return code `10' if any proof failed" " (default: exit 0)", }, { "flags": ["--no-standalone"], "action": "store_true", "help": "only configure proofs: do not initialize nor run", }, { "flags": ["-p", "--proofs"], "nargs": "+", "metavar": "DIR", "help": "only run proof in directory DIR (can pass more than one)", }, { "flags": ["--project-name"], "metavar": "NAME", "default": get_project_name(), "help": "project name for report. Default: %(default)s", }, { "flags": ["--marker-file"], "metavar": "FILE", "default": "cbmc-proof.txt", "help": ( "name of file that marks proof directories. Default: " "%(default)s"), }, { "flags": ["--no-memory-profile"], "action": "store_true", "help": "disable memory profiling, even if Litani supports it" }, { "flags": ["--no-expensive-limit"], "action": "store_true", "help": "do not limit parallelism of 'EXPENSIVE' jobs", }, { "flags": ["--expensive-jobs-parallelism"], "metavar": "N", "default": 1, "type": int, "help": ( "how many proof jobs marked 'EXPENSIVE' to run in parallel. " "Default: %(default)s"), }, { "flags": ["--verbose"], "action": "store_true", "help": "verbose output", }, { "flags": ["--debug"], "action": "store_true", "help": "debug output", }, { "flags": ["--summarize"], "action": "store_true", "help": "summarize proof results with two tables on stdout", }, { "flags": ["--version"], "action": "version", "version": "CBMC starter kit 2.5", "help": "display version and exit" }]: flags = arg.pop("flags") pars.add_argument(*flags, **arg) return pars.parse_args() def set_up_logging(verbose): if verbose: level = logging.DEBUG else: level = logging.WARNING logging.basicConfig( format="run-cbmc-proofs: %(message)s", level=level) def task_pool_size(): ret = os.cpu_count() if ret is None or ret < 3: return 1 return ret - 2 def print_counter(counter): # pylint: disable=consider-using-f-string print("\rConfiguring CBMC proofs: " "{complete:{width}} / {total:{width}}".format(**counter), end="", file=sys.stderr) def get_proof_dirs(proof_root, proof_list, marker_file): if proof_list is not None: proofs_remaining = list(proof_list) else: proofs_remaining = [] for root, _, fyles in os.walk(proof_root): proof_name = str(pathlib.Path(root).name) if root != str(proof_root) and ".litani_cache_dir" in fyles: pathlib.Path(f"{root}/.litani_cache_dir").unlink() if proof_list and proof_name not in proof_list: continue if proof_list and proof_name in proofs_remaining: proofs_remaining.remove(proof_name) if marker_file in fyles: yield root if proofs_remaining: logging.critical( "The following proofs were not found: %s", ", ".join(proofs_remaining)) sys.exit(1) def run_build(litani, jobs, fail_on_proof_failure, summarize): cmd = [str(litani), "run-build"] if jobs: cmd.extend(["-j", str(jobs)]) if fail_on_proof_failure: cmd.append("--fail-on-pipeline-failure") if summarize: out_file = pathlib.Path(tempfile.gettempdir(), "run.json").resolve() cmd.extend(["--out-file", str(out_file)]) logging.debug(" ".join(cmd)) proc = subprocess.run(cmd, check=False) if proc.returncode and not fail_on_proof_failure: logging.critical("Failed to run litani run-build") sys.exit(1) if summarize: print_proof_results(out_file) out_file.unlink() if proc.returncode: logging.error("One or more proofs failed") sys.exit(10) def get_litani_path(proof_root): cmd = [ "make", "--no-print-directory", f"PROOF_ROOT={proof_root}", "-f", "Makefile.common", "litani-path", ] logging.debug(" ".join(cmd)) proc = subprocess.run(cmd, universal_newlines=True, stdout=subprocess.PIPE, check=False) if proc.returncode: logging.critical("Could not determine path to litani") sys.exit(1) return proc.stdout.strip() def get_litani_capabilities(litani_path): cmd = [litani_path, "print-capabilities"] proc = subprocess.run( cmd, text=True, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, check=False) if proc.returncode: return [] try: return json.loads(proc.stdout) except RuntimeError: logging.warning("Could not load litani capabilities: '%s'", proc.stdout) return [] def check_uid_uniqueness(proof_dir, proof_uids): with (pathlib.Path(proof_dir) / "Makefile").open() as handle: for line in handle: match = re.match(r"^PROOF_UID\s*=\s*(?P\w+)", line) if not match: continue if match["uid"] not in proof_uids: proof_uids[match["uid"]] = proof_dir return logging.critical( "The Makefile in directory '%s' should have a different " "PROOF_UID than the Makefile in directory '%s'", proof_dir, proof_uids[match["uid"]]) sys.exit(1) logging.critical( "The Makefile in directory '%s' should contain a line like", proof_dir) logging.critical("PROOF_UID = ...") logging.critical("with a unique identifier for the proof.") sys.exit(1) def should_enable_memory_profiling(litani_caps, args): if args.no_memory_profile: return False return "memory_profile" in litani_caps def should_enable_pools(litani_caps, args): if args.no_expensive_limit: return False return "pools" in litani_caps async def configure_proof_dirs( # pylint: disable=too-many-arguments queue, counter, proof_uids, enable_pools, enable_memory_profiling, debug): while True: print_counter(counter) path = str(await queue.get()) check_uid_uniqueness(path, proof_uids) pools = ["ENABLE_POOLS=true"] if enable_pools else [] profiling = [ "ENABLE_MEMORY_PROFILING=true"] if enable_memory_profiling else [] # Allow interactive tasks to preempt proof configuration proc = await asyncio.create_subprocess_exec( "nice", "-n", "15", "make", *pools, *profiling, "-B", "_report", "" if debug else "--quiet", cwd=path, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE) stdout, stderr = await proc.communicate() logging.debug("returncode: %s", str(proc.returncode)) logging.debug("stdout:") for line in stdout.decode().splitlines(): logging.debug(line) logging.debug("stderr:") for line in stderr.decode().splitlines(): logging.debug(line) counter["fail" if proc.returncode else "pass"].append(path) counter["complete"] += 1 print_counter(counter) queue.task_done() async def main(): # pylint: disable=too-many-locals args = get_args() set_up_logging(args.verbose) proof_root = pathlib.Path(os.getcwd()) litani = get_litani_path(proof_root) litani_caps = get_litani_capabilities(litani) enable_pools = should_enable_pools(litani_caps, args) init_pools = [ "--pools", f"expensive:{args.expensive_jobs_parallelism}" ] if enable_pools else [] if not args.no_standalone: cmd = [ str(litani), "init", *init_pools, "--project", args.project_name, "--no-print-out-dir", ] if "output_directory_flags" in litani_caps: out_prefix = proof_root / "output" out_symlink = out_prefix / "latest" out_index = out_symlink / "html" / "index.html" cmd.extend([ "--output-prefix", str(out_prefix), "--output-symlink", str(out_symlink), ]) print( "\nFor your convenience, the output of this run will be symbolically linked to ", out_index, "\n") logging.debug(" ".join(cmd)) proc = subprocess.run(cmd, check=False) if proc.returncode: logging.critical("Failed to run litani init") sys.exit(1) proof_dirs = list(get_proof_dirs( proof_root, args.proofs, args.marker_file)) if not proof_dirs: logging.critical("No proof directories found") sys.exit(1) proof_queue = asyncio.Queue() for proof_dir in proof_dirs: proof_queue.put_nowait(proof_dir) counter = { "pass": [], "fail": [], "complete": 0, "total": len(proof_dirs), "width": int(math.log10(len(proof_dirs))) + 1 } proof_uids = {} tasks = [] enable_memory_profiling = should_enable_memory_profiling(litani_caps, args) for _ in range(task_pool_size()): task = asyncio.create_task(configure_proof_dirs( proof_queue, counter, proof_uids, enable_pools, enable_memory_profiling, args.debug)) tasks.append(task) await proof_queue.join() print_counter(counter) print("", file=sys.stderr) if counter["fail"]: logging.critical( "Failed to configure the following proofs:\n%s", "\n".join( [str(f) for f in counter["fail"]])) sys.exit(1) if not args.no_standalone: run_build(litani, args.parallel_jobs, args.fail_on_proof_failure, args.summarize) if __name__ == "__main__": asyncio.run(main()) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/scripts/000077500000000000000000000000001456575232400271055ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/scripts/fixup_makefiles.py000077500000000000000000000031641456575232400326410ustar00rootroot00000000000000#!/usr/bin/env python3 import re import sys import pathlib def replace(makefile, included): if included == "../Makefile.common": return "../../Makefile.common" if included == "../Makefile.aws_array_list": return "../../aws_array_list/Makefile" if included == "../Makefile.aws_string": return "../../aws_string/Makefile" if included == "../Makefile.aws_byte_buf": return "../../aws_byte_buf/Makefile" if included == "../Makefile.aws_linked_list": return "../../aws_linked_list/Makefile" if included == "../Makefile.aws_hash_table": return "../../aws_hash_table/Makefile" if included == "../Makefile.aws_priority_queue_sift": return "../../aws_priority_queue_sift/Makefile" return None ok = True pat = re.compile("include\s+(?P[-/.\w]+)") for fyle in pathlib.Path(".").rglob("Makefile"): this_ok = True buf = [] with open(fyle) as handle: for line in handle: line = line.rstrip() m = pat.match(line) if not m: buf.append(line) continue included = fyle.parent / m["path"] if included.exists(): buf.append(line) else: rep = replace(fyle, m["path"]) if rep and (fyle.parent / rep).exists(): buf.append(f"include {rep}") else: print(f"{fyle}: {line}", file=sys.stderr) ok = False if this_ok: with open(fyle, "w") as handle: print("\n".join(buf), file=handle) if not ok: sys.exit(1) aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/proofs/scripts/move_proofs.py000077500000000000000000000026431456575232400320250ustar00rootroot00000000000000#!/usr/bin/env python3 import os import pathlib import shutil def main(): jobs_dir = pathlib.Path(".") suffixes = [m.name[9:] for m in jobs_dir.glob("Makefile.*")] suffixes.sort(key=lambda x: len(x), reverse=True) misc = pathlib.Path("aws_misc") moves = {misc: []} for dyr in jobs_dir.iterdir(): if not dyr.is_dir(): continue found = False for suffix in suffixes: if not dyr.name.startswith(suffix): continue group = pathlib.Path(suffix) pair = (dyr, group / dyr.name[len(suffix) + 1:]) try: moves[group].append(pair) except KeyError: moves[group] = [pair] found = True break if not found: moves[misc].append((dyr, misc / dyr.name)) for group, dyrs in moves.items(): if len(dyrs) != len(set(dyrs)): print(f"group {group} contains duplicates") exit(1) for group, dyrs in moves.items(): print() print(group.name) group.mkdir(exist_ok=True, parents=True) makefile = pathlib.Path(f"Makefile.{group.name}") if makefile.exists(): makefile.rename(group / makefile.name) for src, dst in sorted(dyrs): if dst.name == group.name: continue src.rename(dst) if __name__ == "__main__": main() aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/sources/000077500000000000000000000000001456575232400255715ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/sources/README.md000066400000000000000000000003211456575232400270440ustar00rootroot00000000000000CBMC proof source code ====================== This directory contains source code written for CBMC proofs. It is common to write some code to model aspects of the system under test, and this code goes here. aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/sources/make_common_data_structures.c000066400000000000000000000204241456575232400335200ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include bool aws_byte_buf_is_bounded(const struct aws_byte_buf *const buf, const size_t max_size) { return (buf->capacity <= max_size); } bool aws_byte_buf_has_allocator(const struct aws_byte_buf *const buf) { return (buf->allocator == aws_default_allocator()); } void ensure_byte_buf_has_allocated_buffer_member(struct aws_byte_buf *const buf) { if (buf) { buf->allocator = (nondet_bool()) ? NULL : aws_default_allocator(); buf->buffer = (buf->capacity == 0) ? NULL : malloc(sizeof(*(buf->buffer)) * buf->capacity); } } void ensure_ring_buffer_has_allocated_members(struct aws_ring_buffer *ring_buf, const size_t size) { ring_buf->allocator = aws_default_allocator(); /* The `aws_ring_buffer_init` function requires size > 0. */ __CPROVER_assume(size > 0 && size <= MAX_MALLOC); ring_buf->allocation = malloc(sizeof(*(ring_buf->allocation)) * size); size_t position_head; size_t position_tail; __CPROVER_assume(position_head < size); __CPROVER_assume(position_tail < size); if (ring_buf->allocation != NULL) { aws_atomic_store_ptr(&ring_buf->head, (ring_buf->allocation + position_head)); aws_atomic_store_ptr(&ring_buf->tail, (ring_buf->allocation + position_tail)); ring_buf->allocation_end = ring_buf->allocation + size; } else { aws_atomic_store_ptr(&ring_buf->head, NULL); aws_atomic_store_ptr(&ring_buf->tail, NULL); ring_buf->allocation_end = NULL; } } /** * Constrain a buffer to point-into and be contained within a range [lo,hi] */ void ensure_byte_buf_has_allocated_buffer_member_in_range(struct aws_byte_buf *buf, uint8_t *lo, uint8_t *hi) { assert(lo < hi); size_t space = hi - lo; size_t pos; __CPROVER_assume(pos < space); buf->buffer = lo + pos; size_t max_capacity = hi - buf->buffer; assert(0 < max_capacity); __CPROVER_assume(0 < buf->capacity && buf->capacity <= max_capacity); } /** * Constrain a buffer to point-into the valid elements of a ring_buffer */ void ensure_byte_buf_has_allocated_buffer_member_in_ring_buf( struct aws_byte_buf *buf, struct aws_ring_buffer *ring_buf) { buf->allocator = (nondet_bool()) ? NULL : aws_default_allocator(); uint8_t *head = aws_atomic_load_ptr(&ring_buf->head); uint8_t *tail = aws_atomic_load_ptr(&ring_buf->tail); if (head < tail) { /* [....H T....] */ if (nondet_bool()) { __CPROVER_assume(tail < ring_buf->allocation_end); ensure_byte_buf_has_allocated_buffer_member_in_range(buf, tail, ring_buf->allocation_end); } else { __CPROVER_assume(ring_buf->allocation < head); ensure_byte_buf_has_allocated_buffer_member_in_range(buf, ring_buf->allocation, head); } } else { /* [ T....H ] */ ensure_byte_buf_has_allocated_buffer_member_in_range(buf, tail, head); } } bool aws_byte_cursor_is_bounded(const struct aws_byte_cursor *const cursor, const size_t max_size) { return cursor->len <= max_size; } void ensure_byte_cursor_has_allocated_buffer_member(struct aws_byte_cursor *const cursor) { if (cursor != NULL) { cursor->ptr = malloc(cursor->len); } } bool aws_array_list_is_bounded( const struct aws_array_list *const list, const size_t max_initial_item_allocation, const size_t max_item_size) { bool item_size_is_bounded = list->item_size <= max_item_size; bool length_is_bounded = list->length <= max_initial_item_allocation; return item_size_is_bounded && length_is_bounded; } void ensure_array_list_has_allocated_data_member(struct aws_array_list *const list) { list->data = malloc(list->current_size); list->alloc = nondet_bool() ? NULL : aws_default_allocator(); } void ensure_linked_list_is_allocated(struct aws_linked_list *const list, size_t max_length) { list->head.prev = NULL; list->tail.next = NULL; struct aws_linked_list_node *curr = &list->head; for (size_t i = 0; i < max_length; i++) { struct aws_linked_list_node *node = malloc(sizeof(struct aws_linked_list_node)); if (!node) break; curr->next = node; node->prev = curr; curr = node; } curr->next = &list->tail; list->tail.prev = curr; } bool aws_priority_queue_is_bounded( const struct aws_priority_queue *const queue, const size_t max_initial_item_allocation, const size_t max_item_size) { bool container_is_bounded = aws_array_list_is_bounded(&queue->container, max_initial_item_allocation, max_item_size); /* The backpointer list holds pointers to [struct * aws_priority_queue_node] and so the max_item_size should be * larger than the pointer size. */ bool backpointers_list_is_bounded = aws_array_list_is_bounded( &queue->backpointers, max_initial_item_allocation, sizeof(struct aws_priority_queue_node *)); return container_is_bounded && backpointers_list_is_bounded; } void ensure_priority_queue_has_allocated_members(struct aws_priority_queue *const queue) { ensure_array_list_has_allocated_data_member(&queue->container); ensure_array_list_has_allocated_data_member(&queue->backpointers); queue->pred = nondet_compare; } void ensure_allocated_hash_table(struct aws_hash_table *map, size_t max_table_entries) { if (map == NULL) { return; } size_t num_entries; __CPROVER_assume(num_entries <= max_table_entries); __CPROVER_assume(aws_is_power_of_two(num_entries)); size_t required_bytes; __CPROVER_assume(!hash_table_state_required_bytes(num_entries, &required_bytes)); struct hash_table_state *impl = malloc(required_bytes); if (impl) { impl->size = num_entries; map->p_impl = impl; } else { map->p_impl = NULL; } } void ensure_hash_table_has_valid_destroy_functions(struct aws_hash_table *map) { map->p_impl->destroy_key_fn = nondet_bool() ? NULL : hash_proof_destroy_noop; map->p_impl->destroy_value_fn = nondet_bool() ? NULL : hash_proof_destroy_noop; } bool aws_hash_table_has_an_empty_slot(const struct aws_hash_table *const map, size_t *const rval) { return hash_table_state_has_an_empty_slot(map->p_impl, rval); } bool hash_table_state_has_an_empty_slot(const struct hash_table_state *const state, size_t *const rval) { __CPROVER_assume(state->entry_count > 0); size_t empty_slot_idx; __CPROVER_assume(empty_slot_idx < state->size); *rval = empty_slot_idx; return state->slots[empty_slot_idx].hash_code == 0; } /** * A correct implementation of the hash_destroy function should never have a memory * error on valid input. There is the question of whether the destroy functions themselves * are correctly called (i.e. only on valid input, no double free, etc.). This will be tested in * future proofs. */ void hash_proof_destroy_noop(void *p) {} struct aws_string *ensure_string_is_allocated_nondet_length() { /* Considers any size up to the maximum possible size for the array [bytes] in aws_string */ return nondet_allocate_string_bounded_length(SIZE_MAX - 1 - sizeof(struct aws_string)); } struct aws_string *nondet_allocate_string_bounded_length(size_t max_size) { size_t len; __CPROVER_assume(len < max_size); return ensure_string_is_allocated(len); } struct aws_string *ensure_string_is_allocated(size_t len) { struct aws_string *str = malloc(sizeof(struct aws_string) + len + 1); if (str == NULL) { return NULL; } /* Fields are declared const, so we need to copy them in like this */ if (str != NULL) { *(struct aws_allocator **)(&str->allocator) = nondet_bool() ? aws_default_allocator() : NULL; *(size_t *)(&str->len) = len; *(uint8_t *)&str->bytes[len] = '\0'; } return str; } const char *ensure_c_str_is_allocated(size_t max_size) { size_t cap; __CPROVER_assume(cap > 0 && cap <= max_size); const char *str = malloc(cap); /* Ensure that its a valid c string. Since all bytes are nondeterminstic, the actual * string length is 0..str_cap */ __CPROVER_assume(IMPLIES(str != NULL, str[cap - 1] == '\0')); return str; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/sources/utils.c000066400000000000000000000133351456575232400271020ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include void assert_bytes_match(const uint8_t *const a, const uint8_t *const b, const size_t len) { assert(len == 0 || !a == !b); if (len > 0 && a != NULL && b != NULL) { size_t i; __CPROVER_assume(i < len && len < MAX_MALLOC); /* prevent spurious pointer overflows */ assert(a[i] == b[i]); } } void assert_all_bytes_are(const uint8_t *const a, const uint8_t c, const size_t len) { if (len > 0 && a != NULL) { size_t i; __CPROVER_assume(i < len); assert(a[i] == c); } } void assert_all_zeroes(const uint8_t *const a, const size_t len) { assert_all_bytes_are(a, 0, len); } void assert_byte_from_buffer_matches(const uint8_t *const buffer, const struct store_byte_from_buffer *const b) { if (buffer && b) { assert(*(buffer + b->index) == b->byte); } } void save_byte_from_array(const uint8_t *const array, const size_t size, struct store_byte_from_buffer *const storage) { if (size > 0 && array && storage) { storage->index = nondet_size_t(); __CPROVER_assume(storage->index < size); storage->byte = array[storage->index]; } } void assert_array_list_equivalence( const struct aws_array_list *const lhs, const struct aws_array_list *const rhs, const struct store_byte_from_buffer *const rhs_byte) { /* In order to be equivalent, either both are NULL or both are non-NULL */ if (lhs == rhs) { return; } else { assert(lhs && rhs); /* if only one is null, they differ */ } assert(lhs->alloc == rhs->alloc); assert(lhs->current_size == rhs->current_size); assert(lhs->length == rhs->length); assert(lhs->item_size == rhs->item_size); if (lhs->current_size > 0) { assert_byte_from_buffer_matches((uint8_t *)lhs->data, rhs_byte); } } void assert_byte_buf_equivalence( const struct aws_byte_buf *const lhs, const struct aws_byte_buf *const rhs, const struct store_byte_from_buffer *const rhs_byte) { /* In order to be equivalent, either both are NULL or both are non-NULL */ if (lhs == rhs) { return; } else { assert(lhs && rhs); /* if only one is null, they differ */ } assert(lhs->len == rhs->len); assert(lhs->capacity == rhs->capacity); assert(lhs->allocator == rhs->allocator); if (lhs->len > 0) { assert_byte_from_buffer_matches(lhs->buffer, rhs_byte); } } void assert_byte_cursor_equivalence( const struct aws_byte_cursor *const lhs, const struct aws_byte_cursor *const rhs, const struct store_byte_from_buffer *const rhs_byte) { assert(!lhs == !rhs); if (lhs && rhs) { assert(lhs->len == rhs->len); if (lhs->len > 0) { assert_byte_from_buffer_matches(lhs->ptr, rhs_byte); } } } void save_byte_from_hash_table(const struct aws_hash_table *map, struct store_byte_from_buffer *storage) { struct hash_table_state *state = map->p_impl; size_t size_in_bytes; __CPROVER_assume(hash_table_state_required_bytes(state->size, &size_in_bytes) == AWS_OP_SUCCESS); save_byte_from_array((uint8_t *)state, size_in_bytes, storage); } void check_hash_table_unchanged(const struct aws_hash_table *map, const struct store_byte_from_buffer *storage) { struct hash_table_state *state = map->p_impl; uint8_t *byte_array = (uint8_t *)state; assert(byte_array[storage->index] == storage->byte); } int nondet_compare(const void *const a, const void *const b) { assert(a != NULL); assert(b != NULL); return nondet_int(); } int __CPROVER_uninterpreted_compare(const void *const a, const void *const b); int uninterpreted_compare(const void *const a, const void *const b) { assert(a != NULL); assert(b != NULL); int rval = __CPROVER_uninterpreted_compare(a, b); /* Compare is reflexive */ __CPROVER_assume(IMPLIES(a == b, rval == 0)); /* Compare is anti-symmetric*/ __CPROVER_assume(__CPROVER_uninterpreted_compare(b, a) == -rval); /* If two things are equal, their hashes are also equal */ if (rval == 0) { __CPROVER_assume(__CPROVER_uninterpreted_hasher(a) == __CPROVER_uninterpreted_hasher(b)); } return rval; } bool nondet_equals(const void *const a, const void *const b) { assert(a != NULL); assert(b != NULL); return nondet_bool(); } bool __CPROVER_uninterpreted_equals(const void *const a, const void *const b); uint64_t __CPROVER_uninterpreted_hasher(const void *const a); /** * Add assumptions that equality is reflexive and symmetric. Don't bother with * transitivity because it doesn't cause any spurious proof failures on hash-table */ bool uninterpreted_equals(const void *const a, const void *const b) { bool rval = __CPROVER_uninterpreted_equals(a, b); /* Equals is reflexive */ __CPROVER_assume(IMPLIES(a == b, rval)); /* Equals is symmetric */ __CPROVER_assume(__CPROVER_uninterpreted_equals(b, a) == rval); /* If two things are equal, their hashes are also equal */ if (rval) { __CPROVER_assume(__CPROVER_uninterpreted_hasher(a) == __CPROVER_uninterpreted_hasher(b)); } return rval; } bool uninterpreted_equals_assert_inputs_nonnull(const void *const a, const void *const b) { assert(a != NULL); assert(b != NULL); return uninterpreted_equals(a, b); } uint64_t nondet_hasher(const void *a) { assert(a != NULL); return nondet_uint64_t(); } /** * Standard stub function to hash one item. */ uint64_t uninterpreted_hasher(const void *a) { assert(a != NULL); return __CPROVER_uninterpreted_hasher(a); } bool uninterpreted_predicate_fn(uint8_t value); aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/stubs/000077500000000000000000000000001456575232400252465ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/stubs/README.md000066400000000000000000000003651456575232400265310ustar00rootroot00000000000000CBMC proof stubs ====================== This directory contains the stubs written for CBMC proofs. It is common to stub out functionality like network send and receive methods when writing a CBMC proof, and the code for these stubs goes here. aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/stubs/abort_override_assert_false.c000066400000000000000000000004201456575232400331470ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ /** * FUNCTION: abort * * We override abort to be an assert(0) so that it is caught by CBMC */ #include void abort() { assert(0); } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/stubs/aws_array_list_swap_override.c000066400000000000000000000025151456575232400333710ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ /** * FUNCTION: aws_array_list_swap * * We override aws_array_list_swap because mem_swap makes CBMC * struggle (because of the many memcpys) and because the * array_list_get_at in before the mem_swap are unneccessary if we * stub out mem_swap. Instead we add a havoc assumption on the two * swapped byted to ensure that no assertion on the values of the two * cells is made afterwards. */ #include #include void aws_array_list_swap(struct aws_array_list *AWS_RESTRICT list, size_t a, size_t b) { assert(a < list->length); assert(b < list->length); assert(aws_array_list_is_valid(list)); if (a == b) { AWS_POSTCONDITION(aws_array_list_is_valid(list)); return; } /* Havoc one byte of each item so that assertions on their value * fail */ if (list->item_size > 0) { size_t item_sz = list->item_size; size_t offset_a; __CPROVER_assume(offset_a < item_sz); ((uint8_t *)list->data)[(a * item_sz) + offset_a] = nondet_uint8_t(); size_t offset_b; __CPROVER_assume(offset_b < item_sz); ((uint8_t *)list->data)[(b * item_sz) + offset_b] = nondet_uint8_t(); } return; } aws_byte_cursor_read_be16_override.c000066400000000000000000000035131456575232400342560ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/stubs/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include /** * The signature for the value generator, if it is used. */ #ifdef AWS_BYTE_CURSOR_READ_BE16_GENERATOR uint16_t AWS_BYTE_CURSOR_READ_BE16_GENERATOR(const struct aws_byte_cursor *cursor); #endif /** * This function is used in deserializing values from a byte cursor. * Sometimes, for CBMC proof, it is expected that certain values in byte stream are constrained. * For example, in the aws_cryptosdk_enc_ctx_deserilize() proof, the first value we read is the number of elements, * which we need to be constrained in order to ensure that CBMC can fully unwind all loops. All other values can be left * nondet. In this case, define -DAWS_BYTE_CURSOR_READ_BE16_GENERATOR=generator_name, and the correct generator will be * called. If there is no structure that must be followed, AWS_BYTE_CURSOR_READ_BE16_GENERATOR can be left undefined, * and the var will be set to a nondet value. */ bool aws_byte_cursor_read_be16(struct aws_byte_cursor *cursor, uint16_t *var) { AWS_PRECONDITION(aws_byte_cursor_is_valid(cursor)); AWS_PRECONDITION(AWS_OBJECT_PTR_IS_WRITABLE(var)); #ifdef AWS_BYTE_CURSOR_READ_BE16_GENERATOR *var = AWS_BYTE_CURSOR_READ_BE16_GENERATOR(cursor); #else *var = nondet_uint16_t(); #endif const size_t len = sizeof(uint16_t); /* If there are not 2 bytes left, or if we nondet fail */ if (cursor->len > (SIZE_MAX >> 1) || len > (SIZE_MAX >> 1) || len > cursor->len || nondet_bool()) { AWS_POSTCONDITION(aws_byte_cursor_is_valid(cursor)); return false; } /* Otherwise, succeed */ cursor->ptr += len; cursor->len -= len; AWS_POSTCONDITION(aws_byte_cursor_is_valid(cursor)); return true; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/stubs/aws_hash_iter_overrides.c000066400000000000000000000100261456575232400323130ustar00rootroot00000000000000#include #include #include #include #include #include #include #include #include /* If the consumer of the iterator doesn't use the .element in the iterator, we can just leave it undef * This is sound, as it gives you a totally nondet value every time you call the iterator, and is the default behaviour * of CBMC. But if it is used, we need a way for the harness to specify valid values for the element, for example if * they are copying values out of the table. They can do this by defining * -DHASH_ITER_ELEMENT_GENERATOR=the_generator_fn, where the_generator_fn has signature: * the_generator_fn(struct aws_hash_iter *new_iter, const struct aws_hash_iter* old_iter). * * [new_iter] is a pointer to the iterator that will be returned from this function, and the generator function can * modify it in any way it sees fit. In particular, it can update the [new_iter->element] field to be valid for the * type of hash-table being proved. Two obvious generators are: * (a) one that simply creates a new non-determinsitic value each time its called using malloc * (b) one that uses a value stored in the underlying map, and copies it into the iterator. * * [old_iter] is a pointer to the old iterator, in the case of a "aws_hash_iter_next" call, and null in the case of * "aws_hash_iter_begin". */ #ifdef HASH_ITER_ELEMENT_GENERATOR void HASH_ITER_ELEMENT_GENERATOR(struct aws_hash_iter *new_iter, const struct aws_hash_iter *old_iter); #endif /* Simple map for what the iterator does: it just chugs along, returns a nondet value, until its gone at most map->size * times */ struct aws_hash_iter aws_hash_iter_begin(const struct aws_hash_table *map) { /* Leave it as non-det as possible */ AWS_PRECONDITION(aws_hash_table_is_valid(map)); /* Build a nondet iterator, set the required fields, and return it */ struct aws_hash_iter rval; rval.map = map; rval.limit = map->p_impl->size; __CPROVER_assume(rval.slot <= rval.limit); rval.status = (rval.slot == rval.limit) ? AWS_HASH_ITER_STATUS_DONE : AWS_HASH_ITER_STATUS_READY_FOR_USE; #ifdef HASH_ITER_ELEMENT_GENERATOR HASH_ITER_ELEMENT_GENERATOR(&rval, NULL); #endif return rval; } bool aws_hash_iter_done(const struct aws_hash_iter *iter) { AWS_PRECONDITION( iter->status == AWS_HASH_ITER_STATUS_DONE || iter->status == AWS_HASH_ITER_STATUS_READY_FOR_USE, "Input aws_hash_iter [iter] status should either be done or ready to use."); bool rval = iter->slot == iter->limit; assert(rval == (iter->status == AWS_HASH_ITER_STATUS_DONE)); return rval; } void aws_hash_iter_next(struct aws_hash_iter *iter) { if (iter->slot == iter->limit) { iter->status = AWS_HASH_ITER_STATUS_DONE; return; } /* Build a nondet iterator, set the required fields, and copy it over */ struct aws_hash_iter rval; rval.map = iter->map; rval.limit = iter->limit; size_t next_step; __CPROVER_assume(next_step > 0 && next_step <= iter->limit - iter->slot); rval.limit = iter->limit; rval.slot = iter->slot + next_step; rval.status = (rval.slot == rval.limit) ? AWS_HASH_ITER_STATUS_DONE : AWS_HASH_ITER_STATUS_READY_FOR_USE; #ifdef HASH_ITER_ELEMENT_GENERATOR HASH_ITER_ELEMENT_GENERATOR(&rval, iter); #endif *iter = rval; } /* delete always leaves the element unusable, so we'll just leave the element totally nondet */ void aws_hash_iter_delete(struct aws_hash_iter *iter, bool destroy_contents) { /* Build a nondet iterator, set the required fields, and copy it over */ struct aws_hash_iter rval; rval.map = iter->map; rval.slot = iter->slot; rval.limit = iter->limit - 1; rval.status = AWS_HASH_ITER_STATUS_DELETE_CALLED; rval.map->p_impl->entry_count = iter->map->p_impl->entry_count; if (rval.map->p_impl->entry_count) rval.map->p_impl->entry_count--; *iter = rval; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/stubs/aws_hash_table_find_override.c000066400000000000000000000023261456575232400332600ustar00rootroot00000000000000#include #include #include #include #include #include #include #include #include bool __CPROVER_file_local_hash_table_c_s_hash_keys_eq(struct hash_table_state *state, const void *a, const void *b); int aws_hash_table_find(const struct aws_hash_table *map, const void *key, struct aws_hash_element **p_elem) { AWS_PRECONDITION(aws_hash_table_is_valid(map), "Input hash_table [map] must be valid."); AWS_PRECONDITION(AWS_OBJECT_PTR_IS_WRITABLE(p_elem), "Input pointer [p_elem] must be writable."); const struct hash_table_state *const state = map->p_impl; size_t i; __CPROVER_assume(i < state->size); if (nondet_bool()) { *p_elem = NULL; } else { const struct hash_table_entry *const entry = &state->slots[i]; __CPROVER_assume(__CPROVER_file_local_hash_table_c_s_hash_keys_eq(state, key, entry->element.key)); *p_elem = &entry->element; } AWS_POSTCONDITION(aws_hash_table_is_valid(map), "Output hash_table [map] must be valid."); return AWS_OP_SUCCESS; } aws_hash_table_no_slots_override.c000066400000000000000000000212301456575232400341140ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/stubs/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include /******************************************************************************** * This module represents a hash-table that is not backed by any actual memory * It just takes non-det actions when given inputs. We know this is safe because * we've already proven the c-common hash-table memory safe under these * pre/post conditions. * * Since we're making almost everything nondet, the only externally visible property * of the hash-table is the entry_count. */ /** * As noted above the only externally visible property of the hash-table is the [entry_count]. And it can vary between * 0-SIZE_MAX. So there is really nothing to assert here */ bool aws_hash_table_is_valid(const struct aws_hash_table *map) { return map && map->p_impl; } /** * Given a pointer to a hash_iter, checks that it is well-formed, with all data-structure invariants. */ bool aws_hash_iter_is_valid(const struct aws_hash_iter *iter) { if (!iter) { return false; } if (!iter->map) { return false; } if (!aws_hash_table_is_valid(iter->map)) { return false; } if (iter->limit != iter->map->p_impl->entry_count) { return false; } switch (iter->status) { case AWS_HASH_ITER_STATUS_DONE: /* Done iff slot == limit */ return iter->slot == iter->limit; case AWS_HASH_ITER_STATUS_DELETE_CALLED: /* iter->slot can underflow to SIZE_MAX after a delete * see the comments for aws_hash_iter_delete() */ return iter->slot <= iter->limit; case AWS_HASH_ITER_STATUS_READY_FOR_USE: /* A slot must point to a valid location (i.e. hash_code != 0) */ return iter->slot < iter->limit; } /* Invalid status code */ return false; } /** * Allocate a hash_table_state with no memory for the slots. * Since CBMC runs with memory safety assertions on, * CBMC will detect any attempt to use the slots. * This ensures that no code will ever accidentally use the values * in the slots, ensuring maximal nondeterminism. */ void make_hash_table_with_no_backing_store(struct aws_hash_table *map, size_t max_table_entries) { if (map != NULL) { map->p_impl = malloc(sizeof(struct hash_table_state)); __CPROVER_assume(map->p_impl != NULL); __CPROVER_assume(map->p_impl->entry_count <= max_table_entries); } } /** * Nondet clear. Since the only externally visible property of this * table is the entry_count, just set it to 0 */ void aws_hash_table_clear(struct aws_hash_table *map) { AWS_PRECONDITION(aws_hash_table_is_valid(map)); struct hash_table_state *state = map->p_impl; state->entry_count = 0; AWS_POSTCONDITION(aws_hash_table_is_valid(map)); } /** * Nondet put. Since there is no backing store, we just non-determinstically either add it or don't. */ int aws_hash_table_put(struct aws_hash_table *map, const void *key, void *value, int *was_created) { AWS_PRECONDITION(aws_hash_table_is_valid(map)); int track_was_created; if (was_created) { *was_created = track_was_created; } int rval; struct hash_table_state *state = map->p_impl; /* Avoid overflow */ if (state->entry_count == SIZE_MAX) { return track_was_created ? AWS_OP_ERR : rval; } if (rval == AWS_OP_SUCCESS) { state->entry_count++; } AWS_POSTCONDITION(aws_hash_table_is_valid(map)); return rval; } /** * Not yet implemented */ int aws_hash_table_remove_element(struct aws_hash_table *map, struct aws_hash_element *p_value); /** * Not yet implemented */ int aws_hash_table_remove( struct aws_hash_table *map, const void *key, struct aws_hash_element *p_value, int *was_present); /** * Not yet implemented */ int aws_hash_table_create( struct aws_hash_table *map, const void *key, struct aws_hash_element **p_elem, int *was_created); /** * Implements a version of aws_hash_table_find() that non-determinstially either: * 1. Return NULL, indicating that the element didn't exist * 2. Returns a newly created element. By default, just create a totally non-determinstic element. * However, if the consumer of the stub uses the element, this may be insufficient. Use the same * style of generator as the hash_iterator stubs, except with a different signature due to the different * calling context. * * To declare a genarator: * -DHASH_TABLE_FIND_ELEMENT_GENERATOR=the_generator_fn, where the_generator_fn has signature: * the_generator_fnconst struct aws_hash_table *map, const void *key, struct aws_hash_element *p_elem). * * NOTE: If you want a version of aws_hash_table_find() that that ensures that the table actually has the found value * when find returns success, that can be found in aws_hash_table_find_override.c */ #ifdef HASH_TABLE_FIND_ELEMENT_GENERATOR void HASH_TABLE_FIND_ELEMENT_GENERATOR( const struct aws_hash_table *map, const void *key, struct aws_hash_element *p_elem); #endif int aws_hash_table_find(const struct aws_hash_table *map, const void *key, struct aws_hash_element **p_elem) { AWS_PRECONDITION(aws_hash_table_is_valid(map), "Input hash_table [map] must be valid."); AWS_PRECONDITION(AWS_OBJECT_PTR_IS_WRITABLE(p_elem), "Input pointer [p_elem] must be writable."); const struct hash_table_state *const state = map->p_impl; if (nondet_bool()) { *p_elem = NULL; } else { *p_elem = malloc(sizeof(struct aws_hash_element)); #ifdef HASH_TABLE_FIND_ELEMENT_GENERATOR HASH_TABLE_FIND_ELEMENT_GENERATOR(map, key, *p_elem); #endif } AWS_POSTCONDITION(aws_hash_table_is_valid(map), "Output hash_table [map] must be valid."); return AWS_OP_SUCCESS; } #ifdef HASH_ITER_ELEMENT_GENERATOR void HASH_ITER_ELEMENT_GENERATOR(struct aws_hash_iter *new_iter, const struct aws_hash_iter *old_iter); #endif /* Simple map for what the iterator does: it just chugs along, returns a nondet value, until its gone at most * map->entry_count times */ struct aws_hash_iter aws_hash_iter_begin(const struct aws_hash_table *map) { /* Leave it as non-det as possible */ AWS_PRECONDITION(aws_hash_table_is_valid(map)); /* Build a nondet iterator, set the required fields, and return it */ struct aws_hash_iter rval; rval.map = map; rval.limit = map->p_impl->entry_count; rval.slot = 0; rval.status = (rval.slot == rval.limit) ? AWS_HASH_ITER_STATUS_DONE : AWS_HASH_ITER_STATUS_READY_FOR_USE; #ifdef HASH_ITER_ELEMENT_GENERATOR HASH_ITER_ELEMENT_GENERATOR(&rval, NULL); #endif return rval; } bool aws_hash_iter_done(const struct aws_hash_iter *iter) { AWS_PRECONDITION(aws_hash_iter_is_valid(iter)); AWS_PRECONDITION( iter->status == AWS_HASH_ITER_STATUS_DONE || iter->status == AWS_HASH_ITER_STATUS_READY_FOR_USE, "Input aws_hash_iter [iter] status should either be done or ready to use."); bool rval = iter->slot == iter->limit; assert(rval == (iter->status == AWS_HASH_ITER_STATUS_DONE)); return rval; } void aws_hash_iter_next(struct aws_hash_iter *iter) { AWS_PRECONDITION(aws_hash_iter_is_valid(iter)); if (iter->slot == iter->limit) { iter->status = AWS_HASH_ITER_STATUS_DONE; return; } /* Build a nondet iterator, set the required fields, and copy it over */ struct aws_hash_iter rval; rval.map = iter->map; rval.limit = iter->limit; rval.slot = iter->slot + 1; rval.status = (rval.slot == rval.limit) ? AWS_HASH_ITER_STATUS_DONE : AWS_HASH_ITER_STATUS_READY_FOR_USE; #ifdef HASH_ITER_ELEMENT_GENERATOR HASH_ITER_ELEMENT_GENERATOR(&rval, iter); #endif *iter = rval; } /* delete always leaves the element unusable, so we'll just leave the element totally nondet */ void aws_hash_iter_delete(struct aws_hash_iter *iter, bool destroy_contents) { AWS_PRECONDITION(aws_hash_iter_is_valid(iter)); AWS_PRECONDITION( iter->status == AWS_HASH_ITER_STATUS_READY_FOR_USE, "Input aws_hash_iter [iter] must be ready for use."); AWS_PRECONDITION(aws_hash_iter_is_valid(iter)); AWS_PRECONDITION( iter->map->p_impl->entry_count > 0, "The hash_table_state pointed by input [iter] must contain at least one entry."); /* reduce the size of the underlying map */ iter->map->p_impl->entry_count--; /* Build a nondet iterator, set the required fields, and copy it over */ struct aws_hash_iter rval; rval.map = iter->map; rval.slot = iter->slot; rval.limit = iter->limit - 1; rval.status = AWS_HASH_ITER_STATUS_DELETE_CALLED; *iter = rval; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/stubs/aws_string_destroy_override.c000066400000000000000000000014411456575232400332420ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include /** * Allocation/Deallocation using the standard aws-c-common allocators requires * calls through function pointers. Until we get better function pointer support in * CBMC, this is expensive. Instead, since we know we're using "malloc" to do allocation * in CBMC proofs, we can just directly use "free" here, saving the function pointer derefences. * Otherwise the same as the real function. */ void aws_string_destroy(struct aws_string *str) { AWS_PRECONDITION(!str || aws_string_is_valid(str)); /* If the string has no allocator, its a static string and can't be freed */ if (str && str->allocator) { free(str); } } aws_string_new_from_array_override.c000066400000000000000000000020471456575232400345070ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/stubs/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include /** * Override the aws_string_new_from_array to just give non-det bytes, rather than doing the memcpy. * Since we already check AWS_MEM_IS_READABLE(bytes, len) in the precondition, this is sound - it overapproximates * the behaviour of the real function, and has all the same memory safety checks. */ struct aws_string *aws_string_new_from_array(struct aws_allocator *allocator, const uint8_t *bytes, size_t len) { AWS_PRECONDITION(allocator); AWS_PRECONDITION(AWS_MEM_IS_READABLE(bytes, len)); size_t malloc_size = sizeof(struct aws_string) + 1 + len; struct aws_string *str = malloc(malloc_size); if (str == NULL) { return NULL; } __CPROVER_assume(str->allocator == allocator); __CPROVER_assume(str->len == len); __CPROVER_assume(str->bytes[len] == '\0'); AWS_RETURN_WITH_POSTCONDITION(str, aws_string_is_valid(str)); } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/stubs/empty-source-file.c000066400000000000000000000005111456575232400307600ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. * * Empty source file. This is required because some proofs, like * memset_using_uint64, do not link against any project files at all. This file * is used to fill in the PROJECT_SOURCES array in such cases. */ aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/stubs/error.c000066400000000000000000000010431456575232400265410ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include static AWS_THREAD_LOCAL int tl_last_error = 0; /** * It overrides the original aws_raise_error_private implementation, to avoid * error handler functions (unnecessary for the verification process). */ void aws_raise_error_private(int err) { tl_last_error = err; } /** * It overrides the original aws_last_error implementation. */ int aws_last_error(void) { return tl_last_error; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/stubs/hash_table_generators.c000066400000000000000000000056251456575232400317450ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include /* This file contains generators useful in hash-table stubs. See * aws_hash_table_non_slots_override.c and aws_hash_iter_overrides.c for how these are used */ /** * The common case for the hash-table is that it maps strings to strings. This generates * fully non-deterministic strings for both key and value */ void hash_iterator_string_string_generator(struct aws_hash_iter *new_iter, const struct aws_hash_iter *old_iter) { (void)old_iter; if (new_iter->status == AWS_HASH_ITER_STATUS_READY_FOR_USE) { new_iter->element.key = ensure_string_is_allocated_nondet_length(); __CPROVER_assume(aws_string_is_valid(new_iter->element.key)); new_iter->element.value = ensure_string_is_allocated_nondet_length(); __CPROVER_assume(aws_string_is_valid(new_iter->element.value)); } } /** * The common case for the hash-table is that it maps strings to strings. This generates * fully non-deterministic strings for both key and value */ void hash_find_string_string_generator( const struct aws_hash_table *map, const void *key, struct aws_hash_element *p_elem) { if (p_elem) { p_elem->key = ensure_string_is_allocated_nondet_length(); __CPROVER_assume(aws_string_is_valid(p_elem->key)); p_elem->value = ensure_string_is_allocated_nondet_length(); __CPROVER_assume(aws_string_is_valid(p_elem->value)); } } /** * The common case for the hash-table is that it maps strings to strings. * Some code (for e.g. the aws_cryptosdk_enc_ctx_size() function in the ESDK uses the string header * but not the actual string itself. So this allocates the header, but not the string. * This ensures that no successful proof CAN use the bytes of the string. */ void hash_iterator_unbacked_string_string_generator( struct aws_hash_iter *new_iter, const struct aws_hash_iter *old_iter) { (void)old_iter; if (new_iter->status == AWS_HASH_ITER_STATUS_READY_FOR_USE) { new_iter->element.key = malloc(sizeof(struct aws_string)); new_iter->element.value = malloc(sizeof(struct aws_string)); } } /** * The common case for the hash-table is that it maps strings to strings. * Some code (for e.g. the aws_cryptosdk_enc_ctx_size() function in the ESDK uses the string header * but not the actual string itself. So this allocates the header, but not the string. * This ensures that no successful proof CAN use the bytes of the string. */ void hash_find_unbacked_string_string_generator( const struct aws_hash_table *map, const void *key, struct aws_hash_element *p_elem) { p_elem->key = malloc(sizeof(struct aws_string)); p_elem->value = malloc(sizeof(struct aws_string)); } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/stubs/memcpy_override.c000066400000000000000000000026051456575232400306060ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ /** * FUNCTION: memcpy * * This function overrides the original implementation of the memcpy function * from string.h. It copies the values of n bytes from the *src to the *dst. * It also checks if the size of the arrays pointed to by both the *dst and * *src parameters are at least n bytes and if they overlap. */ #undef memcpy #include #include /** * Override the version of memcpy used by CBMC. */ void *memcpy_impl(void *dst, const void *src, size_t n) { __CPROVER_precondition( __CPROVER_POINTER_OBJECT(dst) != __CPROVER_POINTER_OBJECT(src) || ((const char *)src >= (const char *)dst + n) || ((const char *)dst >= (const char *)src + n), "memcpy src/dst overlap"); __CPROVER_precondition(__CPROVER_r_ok(src, n), "memcpy source region readable"); __CPROVER_precondition(__CPROVER_w_ok(dst, n), "memcpy destination region writeable"); for (__CPROVER_size_t i = 0; i < n; ++i) ((char *)dst)[i] = ((const char *)src)[i]; return dst; } void *memcpy(void *dst, const void *src, size_t n) { return memcpy_impl(dst, src, n); } void *__builtin___memcpy_chk(void *dst, const void *src, __CPROVER_size_t n, __CPROVER_size_t size) { (void)size; return memcpy_impl(dst, src, n); } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/stubs/memcpy_override_havoc.c000066400000000000000000000025421456575232400317660ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #undef memcpy #include #include /** * Override the version of memcpy used by CBMC. Users may not want to pay * for the cost of performing the computation of memcpy in proofs. In that * case, this stub at least checks for the preconditions and make sure to * havoc all elements pointed by *dst up to n. */ void *memcpy_impl(void *dst, const void *src, size_t n) { __CPROVER_precondition( __CPROVER_POINTER_OBJECT(dst) != __CPROVER_POINTER_OBJECT(src) || ((const char *)src >= (const char *)dst + n) || ((const char *)dst >= (const char *)src + n), "memcpy src/dst overlap"); __CPROVER_precondition(src != NULL && __CPROVER_r_ok(src, n), "memcpy source region readable"); __CPROVER_precondition(dst != NULL && __CPROVER_w_ok(dst, n), "memcpy destination region writeable"); if (n > 0) { size_t index; __CPROVER_assume(index < n); ((uint8_t *)dst)[index] = nondet_uint8_t(); } return dst; } void *memcpy(void *dst, const void *src, size_t n) { return memcpy_impl(dst, src, n); } void *__builtin___memcpy_chk(void *dst, const void *src, __CPROVER_size_t n, __CPROVER_size_t size) { (void)size; return memcpy_impl(dst, src, n); } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/stubs/memcpy_override_no_op.c000066400000000000000000000025021456575232400317740ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ /** * FUNCTION: memcpy * * This function overrides the original implementation of the memcpy function * from string.h. It copies the values of n bytes from the *src to the *dst. * It also checks if the size of the arrays pointed to by both the *dst and * *src parameters are at least n bytes and if they overlap. */ #undef memcpy #include #include /** * Override the version of memcpy used by CBMC. */ void *memcpy_impl(void *dst, const void *src, size_t n) { __CPROVER_precondition( __CPROVER_POINTER_OBJECT(dst) != __CPROVER_POINTER_OBJECT(src) || ((const char *)src >= (const char *)dst + n) || ((const char *)dst >= (const char *)src + n), "memcpy src/dst overlap"); __CPROVER_precondition(src != NULL && __CPROVER_r_ok(src, n), "memcpy source region readable"); __CPROVER_precondition(dst != NULL && __CPROVER_w_ok(dst, n), "memcpy destination region writeable"); return dst; } void *memcpy(void *dst, const void *src, size_t n) { return memcpy_impl(dst, src, n); } void *__builtin___memcpy_chk(void *dst, const void *src, __CPROVER_size_t n, __CPROVER_size_t size) { (void)size; return memcpy_impl(dst, src, n); } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/stubs/memcpy_using_uint64.c000066400000000000000000000041311456575232400313210ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ /** * FUNCTION: memcpy * * This function overrides the original implementation of the memcpy function * from string.h. It copies the values of n bytes from the *src to the *dst. * It also checks if the size of the arrays pointed to by both the *dst and * *src parameters are at least n bytes and if they overlap. * * This takes advantage of the fact that 64bit operations require fewer array updates, * which can make this version faster than the naive unrolling when used in CBMC. * Benchmark your particular proof to know for sure. */ #include #include void *memcpy_using_uint64_impl(void *dst, const void *src, size_t n) { __CPROVER_precondition( __CPROVER_POINTER_OBJECT(dst) != __CPROVER_POINTER_OBJECT(src) || ((const char *)src >= (const char *)dst + n) || ((const char *)dst >= (const char *)src + n), "memcpy src/dst overlap"); __CPROVER_precondition(__CPROVER_r_ok(src, n), "memcpy source region readable"); __CPROVER_precondition(__CPROVER_w_ok(dst, n), "memcpy destination region writeable"); size_t num_uint64s = n >> 3; size_t rem = n & 0x7; uint8_t *d = (uint8_t *)dst; const uint8_t *s = (const uint8_t *)src; // Use fallthrough to unroll the remainder loop switch (rem) { case 7: d[6] = s[6]; case 6: d[5] = s[5]; case 5: d[4] = s[4]; case 4: d[3] = s[3]; case 3: d[2] = s[2]; case 2: d[1] = s[1]; case 1: d[0] = s[0]; } d += rem; s += rem; for (size_t i = 0; i < num_uint64s; ++i) { ((uint64_t *)d)[i] = ((const uint64_t *)s)[i]; } return dst; } void *memcpy(void *dst, const void *src, size_t n) { return memcpy_using_uint64_impl(dst, src, n); } void *__builtin___memcpy_chk(void *dst, const void *src, __CPROVER_size_t n, __CPROVER_size_t size) { (void)size; return memcpy_using_uint64_impl(dst, src, n); } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/stubs/memmove_override.c000066400000000000000000000024761456575232400307670ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #undef memmove #include #include /** * Override the version of memmove used by CBMC. * Source: public domain code copied from https://clc-wiki.net/wiki/memmove */ void *memmove_impl(void *dest, const void *src, size_t n) { if (n > 0) { (void)*(char *)dest; /* check that the memory is accessible */ (void)*(const char *)src; /* check that the memory is accessible */ (void)*(((unsigned char *)dest) + n - 1); /* check that the memory is accessible */ (void)*(((const unsigned char *)src) + n - 1); /* check that the memory is accessible */ unsigned char *pd = dest; const unsigned char *ps = src; if ((ps) < (pd)) { for (pd += n, ps += n; n--;) *--pd = *--ps; } else { while (n) { *pd++ = *ps++; n--; } } } return dest; } void *memmove(void *dest, const void *src, size_t n) { return memmove_impl(dest, src, n); } void *__builtin___memmove_chk(void *dest, const void *src, size_t n, size_t size) { (void)size; return memmove_impl(dest, src, n); } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/stubs/memmove_override_havoc.c000066400000000000000000000021601456575232400321350ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #undef memmove #include #include /** * Override the version of memmove used by CBMC. Users may not want to pay * for the cost of performing the computation of memmove in proofs. In that * case, this stub at least checks for the preconditions and make sure to * havoc all elements pointed by *dest up to n. */ void *memmove_impl(void *dest, const void *src, size_t n) { __CPROVER_precondition(src != NULL && __CPROVER_r_ok(src, n), "memmove source region readable"); __CPROVER_precondition(dest != NULL && __CPROVER_w_ok(dest, n), "memmove destination region writeable"); if (n > 0) { size_t index; __CPROVER_assume(index < n); ((uint8_t *)dest)[index] = nondet_uint8_t(); } return dest; } void *memmove(void *dest, const void *src, size_t n) { return memmove_impl(dest, src, n); } void *__builtin___memmove_chk(void *dest, const void *src, size_t n, size_t size) { (void)size; return memmove_impl(dest, src, n); } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/stubs/memmove_override_no_op.c000066400000000000000000000017201456575232400321500ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #undef memmove #include #include /** * Override the version of memmove used by CBMC. */ void *memmove_impl(void *dest, const void *src, size_t n) { if (n > 0) { (void)*(char *)dest; /* check that the memory is accessible */ (void)*(const char *)src; /* check that the memory is accessible */ (void)*(((unsigned char *)dest) + n - 1); /* check that the memory is accessible */ (void)*(((const unsigned char *)src) + n - 1); /* check that the memory is accessible */ } return dest; } void *memmove(void *dest, const void *src, size_t n) { return memmove_impl(dest, src, n); } void *__builtin___memmove_chk(void *dest, const void *src, size_t n, size_t size) { (void)size; return memmove_impl(dest, src, n); } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/stubs/memset_override.c000066400000000000000000000013521456575232400306040ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #undef memset #include #include /** * Override the version of memset used by CBMC. */ void *memset_impl(void *s, int c, size_t n) { __CPROVER_precondition(__CPROVER_w_ok(s, n), "memset destination region writeable"); if (n > 0) { unsigned char *sp = (unsigned char *)s; for (__CPROVER_size_t i = 0; i < n; i++) sp[i] = c & UINT8_MAX; } return s; } void *memset(void *s, int c, size_t n) { return memset_impl(s, c, n); } void *__builtin___memset_chk(void *s, int c, size_t n, size_t os) { (void)os; return memset_impl(s, c, n); } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/stubs/memset_override_0.c000066400000000000000000000020661456575232400310260ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ /** * FUNCTION: memset * * Override the version of memset used by CBMC. * This takes advantage of the fact that 64bit operations require fewer array updates, * which can make this version faster than the naive unrolling when used in CBMC. * Benchmark your particular proof to know for sure. */ #include #include #include void *memset_override_0_impl(void *dst, int c, size_t n) { __CPROVER_precondition(__CPROVER_w_ok(dst, n), "memset destination region writeable"); assert(c == 0); size_t num_uint64s = n >> 3; size_t rem = n & 0x7; assert(rem == 0); uint64_t *d = (uint64_t *)dst; for (size_t i = 0; i < num_uint64s; ++i) { d[i] = 0; } return dst; } void *memset(void *s, int c, size_t n) { return memset_override_0_impl(s, c, n); } void *__builtin___memset_chk(void *s, int c, size_t n, size_t os) { (void)os; return memset_override_0_impl(s, c, n); } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/stubs/memset_override_havoc.c000066400000000000000000000016571456575232400317740ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #undef memset #include #include /** * Override the version of memset used by CBMC. Users may not want to pay * for the cost of performing the computation of memset in proofs. In that * case, this stub at least checks for the preconditions and make sure to * havoc all elements pointed by *s up to n. */ void *memset_impl(void *s, int c, size_t n) { __CPROVER_precondition(__CPROVER_w_ok(s, n), "memset destination region writeable"); if (n > 0) { size_t index; __CPROVER_assume(index < n); ((uint8_t *)s)[index] = nondet_uint8_t(); } return s; } void *memset(void *s, int c, size_t n) { return memset_impl(s, c, n); } void *__builtin___memset_chk(void *s, int c, size_t n, size_t os) { (void)os; return memset_impl(s, c, n); } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/stubs/memset_override_no_op.c000066400000000000000000000011201456575232400317670ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #undef memset #include #include /** * Override the version of memset used by CBMC. */ void *memset_impl(void *s, int c, size_t n) { __CPROVER_precondition(__CPROVER_w_ok(s, n), "memset destination region writeable"); return s; } void *memset(void *s, int c, size_t n) { return memset_impl(s, c, n); } void *__builtin___memset_chk(void *s, int c, size_t n, size_t os) { (void)os; return memset_impl(s, c, n); } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/stubs/memset_using_uint64.c000066400000000000000000000032441456575232400313250ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ /** * FUNCTION: memset * * Override the version of memset used by CBMC. * This takes advantage of the fact that 64bit operations require fewer array updates, * which can make this version faster than the naive unrolling when used in CBMC. * Benchmark your particular proof to know for sure. */ #include #include void *memset_using_uint64_impl(void *dst, int c, size_t n) { __CPROVER_precondition(__CPROVER_w_ok(dst, n), "memset destination region writeable"); size_t num_uint64s = n >> 3; size_t rem = n & 0x7; uint8_t *d = (uint8_t *)dst; // Use fallthrough to unroll the remainder loop switch (rem) { case 7: d[6] = c; case 6: d[5] = c; case 5: d[4] = c; case 4: d[3] = c; case 3: d[2] = c; case 2: d[1] = c; case 1: d[0] = c; } d += rem; uint64_t compounded = 0; if (num_uint64s > 0 && c != 0) { uint8_t *chars = (uint8_t *)&compounded; chars[0] = c; chars[1] = c; chars[2] = c; chars[3] = c; chars[4] = c; chars[5] = c; chars[6] = c; chars[7] = c; } for (size_t i = 0; i < num_uint64s; ++i) { ((uint64_t *)d)[i] = compounded; } return dst; } void *memset(void *s, int c, size_t n) { return memset_using_uint64_impl(s, c, n); } void *__builtin___memset_chk(void *s, int c, size_t n, size_t os) { (void)os; return memset_using_uint64_impl(s, c, n); } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/stubs/qsort_override.c000066400000000000000000000012761456575232400304670ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include void qsort(void *base, __CPROVER_size_t num, __CPROVER_size_t size, int (*compare)(const void *, const void *)) { __CPROVER_precondition(__CPROVER_r_ok(base, num * size), "qsort base region readable"); __CPROVER_precondition(__CPROVER_w_ok(base, num * size), "qsort base region writeable"); __CPROVER_size_t index_a; __CPROVER_assume(index_a < num); __CPROVER_size_t index_b; __CPROVER_assume(index_b < num); __CPROVER_assume(index_a != index_b); compare((uint8_t *)base + (size * index_a), (uint8_t *)base + (size * index_b)); } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/stubs/s_emplace_item_override.c000066400000000000000000000031411456575232400322560ustar00rootroot00000000000000#include #include #include #include #include #include #include #include #include /** * Attempts to locate an element at key. If no such element was found, * creates a new element, with value initialized to NULL. In either case, a * pointer to the element is placed in *p_elem. * * If was_created is non-NULL, *was_created is set to 0 if an existing * element was found, or 1 is a new element was created. * * Returns AWS_OP_SUCCESS if an item was found or created. * Raises AWS_ERROR_OOM if hash table expansion was required and memory * allocation failed. */ struct hash_table_entry *__CPROVER_file_local_hash_table_c_s_emplace_item( struct hash_table_state *state, struct hash_table_entry entry, size_t probe_idx) { AWS_PRECONDITION(hash_table_state_is_valid(state), "Input hash_table_state [state] must be valid."); if (entry.hash_code == 0) { AWS_POSTCONDITION(hash_table_state_is_valid(state), "Output hash_table_state [state] must be valid."); return NULL; } size_t index; __CPROVER_assume(index < state->size); __CPROVER_assume(state->slots[index].hash_code == 0); state->slots[index] = entry; size_t empty_slot_idx; __CPROVER_assume(hash_table_state_has_an_empty_slot(state, &empty_slot_idx)); AWS_POSTCONDITION(hash_table_state_is_valid(state), "Output hash_table_state [state] must be valid."); return &state->slots[index]; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/stubs/s_expand_table_override.c000066400000000000000000000030741456575232400322650ustar00rootroot00000000000000#include #include #include #include #include #include #include #include #include int __CPROVER_file_local_hash_table_c_s_update_template_size( struct hash_table_state *template, size_t expected_elements); int __CPROVER_file_local_hash_table_c_s_expand_table(struct aws_hash_table *map) { struct hash_table_state *old_state = map->p_impl; struct hash_table_state template = *old_state; if (__CPROVER_file_local_hash_table_c_s_update_template_size(&template, template.size * 2) != AWS_OP_SUCCESS) { return AWS_OP_ERR; } /* Don't use s_alloc_state because that will call calloc and we want non-det values for the entries */ size_t required_bytes = sizeof(struct hash_table_state) + template.size * sizeof(struct hash_table_entry); /* An empty slot has hashcode 0. So this marks all slots as empty */ struct hash_table_state *new_state = aws_mem_acquire(template.alloc, required_bytes); if (new_state == NULL) { return AWS_OP_ERR; } struct hash_table_state *d1 = new_state; *new_state = template; map->p_impl = new_state; void *d2 = map; void *d3 = map->p_impl; aws_mem_release(new_state->alloc, old_state); __CPROVER_assume(aws_hash_table_is_valid(map)); size_t empty_slot_idx; __CPROVER_assume(aws_hash_table_has_an_empty_slot(map, &empty_slot_idx)); return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/stubs/s_remove_entry_override.c000066400000000000000000000021521456575232400323510ustar00rootroot00000000000000#include #include #include #include #include #include #include #include #include /* Clears an entry. Does _not_ invoke destructor callbacks. * Returns the last slot touched (note that if we wrap, we'll report an index * lower than the original entry's index) */ size_t __CPROVER_file_local_hash_table_c_s_remove_entry( struct hash_table_state *state, struct hash_table_entry *entry) { AWS_PRECONDITION(hash_table_state_is_valid(state)); AWS_PRECONDITION(state->entry_count > 0); AWS_PRECONDITION( entry >= &state->slots[0] && entry < &state->slots[state->size], "Input hash_table_entry [entry] pointer must point in the available slots."); state->entry_count--; /* This stub does not update any slots. */ size_t index; __CPROVER_assume(index <= state->size); AWS_RETURN_WITH_POSTCONDITION(index, hash_table_state_is_valid(state) && index <= state->size); } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/stubs/s_remove_node_override.c000066400000000000000000000034641456575232400321440ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ /** * FUNCTION: s_remove_node * * This function overrides the original implementation of the * s_remove_node function from priority_queue.h with a no_op. * * It is safe to use as long as there are no assertions on the * positions of elements in the priority queue after its invocation, * as it doesn't really remove an element and reorders the rest, but * it just reduces the length of both of the array lists of the * priority queue by 1. */ #include int __CPROVER_file_local_priority_queue_c_s_remove_node( struct aws_priority_queue *queue, void *item, size_t item_index) { assert(aws_priority_queue_is_valid(queue)); assert(item && AWS_MEM_IS_WRITABLE(item, queue->container.item_size)); /* If aws_array_list_get_at succeeds, it means that the item_index * is in range of the container, and thus the queue has at least * item_index + 1 elements */ if (aws_array_list_get_at(&queue->container, item, item_index)) { /* shouldn't happen, but if it does we've already raised an error... */ assert(aws_priority_queue_is_valid(queue)); return AWS_OP_ERR; } /* This can never underflow, as aws_array_list_get_at has * succeeded, which means that the container has at least one * item. Also if the backpointers array_list is initialized, it is * constrained to have the same length as the container array_list * (as queue is a valid priority queue) and thus is guaranteed to * not underflow. */ queue->container.length -= 1; if (queue->backpointers.data) { queue->backpointers.length -= 1; } assert(aws_priority_queue_is_valid(queue)); return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/stubs/s_sift_either_override.c000066400000000000000000000015601456575232400321420ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ /** * FUNCTION: s_sift_either * * This function overrides the original implementation of the * s_sift_either function from priority_queue.h with a no_op. * * It is necessary to stub out s_sift_either because in order for it * to work, we have to have initialized all backpointers of the * priority queue correctly (which needs a loop). * * It is safe to stub out s_sift_either as long as we don't make any * assertions on the positions of elements in the priority queue and * their relative values. * */ #include void __CPROVER_file_local_priority_queue_c_s_sift_either(struct aws_priority_queue *queue, size_t index) { assert(aws_priority_queue_is_valid(queue)); assert(index < queue->container.length); } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/stubs/s_sift_up_override.c000066400000000000000000000011671456575232400313110ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ /** * FUNCTION: s_sift_up * * This function overrides the original implementation of the * s_sift_up function from priority_queue.h with a no_op. */ #include bool __CPROVER_file_local_priority_queue_c_s_sift_up(struct aws_priority_queue *queue, size_t index) { AWS_PRECONDITION(aws_priority_queue_is_valid(queue)); AWS_PRECONDITION(index < queue->container.length); bool did_move; AWS_POSTCONDITION(aws_priority_queue_is_valid(queue)); return did_move; } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/stubs/s_swap_override_no_op.c000066400000000000000000000025321456575232400320010ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ /** * FUNCTION: s_swap * * This function overrides s_swap and only swaps the backpointer * indexes. It is used to improve the running time of the CBMC proofs, * as this function adds heavy overhead on the proofs, especially the * ones that loop over the elements of the priority queue (such as * s_sift_up, s_sift_down, s_sift_either). * * It is safe to stub s_swap out, as long as no assertions about the * values of the elements in the priority queue are made * afterwards. Therefore, this stub cannot be used in proofs about the * order of the elements in the priority queue, or any other * functional correctness property concerning the values of the * elements in the queue. * */ #include void __CPROVER_file_local_priority_queue_c_s_swap(struct aws_priority_queue *queue, size_t a, size_t b) { assert(aws_priority_queue_is_valid(queue)); assert(a < queue->container.length); assert(b < queue->container.length); assert(aws_priority_queue_backpointer_index_valid(queue, a)); assert(aws_priority_queue_backpointer_index_valid(queue, b)); if (queue->backpointers.data) { assert(queue->backpointers.length > a); assert(queue->backpointers.length > b); } } aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/uninline/000077500000000000000000000000001456575232400257275ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/uninline/array_list.c000066400000000000000000000002341456575232400302430ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/uninline/atomics.c000066400000000000000000000002311456575232400275260ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/uninline/byte_order.c000066400000000000000000000002341456575232400302300ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/uninline/clock.c000066400000000000000000000002271456575232400271670ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/uninline/encoding.c000066400000000000000000000002321456575232400276560ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/uninline/error.c000066400000000000000000000002271456575232400272250ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/uninline/linked_list.c000066400000000000000000000002351456575232400303740ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/uninline/math.c000066400000000000000000000002261456575232400270240ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/uninline/ring_buffer.c000066400000000000000000000002351456575232400303630ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/uninline/string.c000066400000000000000000000002301456575232400273740ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include aws-crt-python-0.20.4+dfsg/crt/aws-c-common/verification/cbmc/uninline/zero.c000066400000000000000000000002261456575232400270520ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/000077500000000000000000000000001456575232400215715ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/.clang-format000066400000000000000000000031611456575232400241450ustar00rootroot00000000000000--- Language: Cpp # BasedOnStyle: Mozilla AlignAfterOpenBracket: AlwaysBreak AlignConsecutiveAssignments: false AlignConsecutiveDeclarations: false AlignEscapedNewlines: Right AlignOperands: true AlignTrailingComments: true AllowAllParametersOfDeclarationOnNextLine: false AllowShortBlocksOnASingleLine: false AllowShortCaseLabelsOnASingleLine: false AllowShortFunctionsOnASingleLine: Inline AllowShortIfStatementsOnASingleLine: false AllowShortLoopsOnASingleLine: false AlwaysBreakAfterReturnType: None AlwaysBreakBeforeMultilineStrings: false BinPackArguments: false BinPackParameters: false BreakBeforeBinaryOperators: None BreakBeforeBraces: Attach BreakBeforeTernaryOperators: true BreakStringLiterals: true ColumnLimit: 120 ContinuationIndentWidth: 4 DerivePointerAlignment: false IncludeBlocks: Preserve IndentCaseLabels: true IndentPPDirectives: AfterHash IndentWidth: 4 IndentWrappedFunctionNames: true KeepEmptyLinesAtTheStartOfBlocks: true MacroBlockBegin: '' MacroBlockEnd: '' MaxEmptyLinesToKeep: 1 PenaltyBreakAssignment: 2 PenaltyBreakBeforeFirstCallParameter: 19 PenaltyBreakComment: 300 PenaltyBreakFirstLessLess: 120 PenaltyBreakString: 1000 PenaltyExcessCharacter: 1000000 PenaltyReturnTypeOnItsOwnLine: 100000 PointerAlignment: Right ReflowComments: true SortIncludes: true SpaceAfterCStyleCast: false SpaceBeforeAssignmentOperators: true SpaceBeforeParens: ControlStatements SpaceInEmptyParentheses: false SpacesInContainerLiterals: true SpacesInCStyleCastParentheses: false SpacesInParentheses: false SpacesInSquareBrackets: false Standard: Cpp11 TabWidth: 4 UseTab: Never ... aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/.clang-tidy000066400000000000000000000013521456575232400236260ustar00rootroot00000000000000--- Checks: 'clang-diagnostic-*,clang-analyzer-*,readability-*,modernize-*,bugprone-*,misc-*,google-runtime-int,llvm-header-guard,fuchsia-restrict-system-includes,-clang-analyzer-valist.Uninitialized,-clang-analyzer-security.insecureAPI.rand,-clang-analyzer-alpha.*,-readability-magic-numbers' WarningsAsErrors: '*' HeaderFilterRegex: '\./*' FormatStyle: 'file' # Use empty line filter to skip linting code we don't own CheckOptions: - key: readability-braces-around-statements.ShortStatementLines value: '1' - key: google-runtime-int.TypeSufix value: '_t' - key: fuchsia-restrict-system-includes.Includes value: '*,-stdint.h,-stdbool.h,-assert.h' ... aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/.github/000077500000000000000000000000001456575232400231315ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/.github/ISSUE_TEMPLATE/000077500000000000000000000000001456575232400253145ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/.github/ISSUE_TEMPLATE/bug-report.yml000066400000000000000000000045351456575232400301340ustar00rootroot00000000000000--- name: "🐛 Bug Report" description: Report a bug title: "(short issue description)" labels: [bug, needs-triage] assignees: [] body: - type: textarea id: description attributes: label: Describe the bug description: What is the problem? A clear and concise description of the bug. validations: required: true - type: textarea id: expected attributes: label: Expected Behavior description: | What did you expect to happen? validations: required: true - type: textarea id: current attributes: label: Current Behavior description: | What actually happened? Please include full errors, uncaught exceptions, stack traces, and relevant logs. If service responses are relevant, please include wire logs. validations: required: true - type: textarea id: reproduction attributes: label: Reproduction Steps description: | Provide a self-contained, concise snippet of code that can be used to reproduce the issue. For more complex issues provide a repo with the smallest sample that reproduces the bug. Avoid including business logic or unrelated code, it makes diagnosis more difficult. The code sample should be an SSCCE. See http://sscce.org/ for details. In short, please provide a code sample that we can copy/paste, run and reproduce. validations: required: true - type: textarea id: solution attributes: label: Possible Solution description: | Suggest a fix/reason for the bug validations: required: false - type: textarea id: context attributes: label: Additional Information/Context description: | Anything else that might be relevant for troubleshooting this bug. Providing context helps us come up with a solution that is most useful in the real world. validations: required: false - type: input id: aws-c-compression-version attributes: label: aws-c-compression version used validations: required: true - type: input id: compiler-version attributes: label: Compiler and version used validations: required: true - type: input id: operating-system attributes: label: Operating System and version validations: required: true aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/.github/ISSUE_TEMPLATE/config.yml000066400000000000000000000003361456575232400273060ustar00rootroot00000000000000blank_issues_enabled: false contact_links: - name: 💬 General Question url: https://github.com/awslabs/aws-c-compression/discussions/categories/q-a about: Please ask and answer questions as a discussion thread aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/.github/ISSUE_TEMPLATE/documentation.yml000066400000000000000000000011141456575232400307050ustar00rootroot00000000000000--- name: "📕 Documentation Issue" description: Report an issue in the API Reference documentation or Developer Guide title: "(short issue description)" labels: [documentation, needs-triage] assignees: [] body: - type: textarea id: description attributes: label: Describe the issue description: A clear and concise description of the issue. validations: required: true - type: textarea id: links attributes: label: Links description: | Include links to affected documentation page(s). validations: required: true aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/.github/ISSUE_TEMPLATE/feature-request.yml000066400000000000000000000026231456575232400311630ustar00rootroot00000000000000--- name: 🚀 Feature Request description: Suggest an idea for this project title: "(short issue description)" labels: [feature-request, needs-triage] assignees: [] body: - type: textarea id: description attributes: label: Describe the feature description: A clear and concise description of the feature you are proposing. validations: required: true - type: textarea id: use-case attributes: label: Use Case description: | Why do you need this feature? For example: "I'm always frustrated when..." validations: required: true - type: textarea id: solution attributes: label: Proposed Solution description: | Suggest how to implement the addition or change. Please include prototype/workaround/sketch/reference implementation. validations: required: false - type: textarea id: other attributes: label: Other Information description: | Any alternative solutions or features you considered, a more detailed explanation, stack traces, related issues, links for context, etc. validations: required: false - type: checkboxes id: ack attributes: label: Acknowledgements options: - label: I may be able to implement this feature request required: false - label: This feature might incur a breaking change required: false aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/.github/PULL_REQUEST_TEMPLATE.md000066400000000000000000000002511456575232400267300ustar00rootroot00000000000000*Issue #, if available:* *Description of changes:* By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license. aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/.github/workflows/000077500000000000000000000000001456575232400251665ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/.github/workflows/ci.yml000066400000000000000000000144261456575232400263130ustar00rootroot00000000000000name: CI on: push: branches-ignore: - 'main' env: BUILDER_VERSION: v0.9.55 BUILDER_SOURCE: releases BUILDER_HOST: https://d19elf31gohf1l.cloudfront.net PACKAGE_NAME: aws-c-compression LINUX_BASE_IMAGE: ubuntu-18-x64 RUN: ${{ github.run_id }}-${{ github.run_number }} AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} AWS_REGION: us-east-1 jobs: linux-compat: runs-on: ubuntu-20.04 # latest strategy: fail-fast: false matrix: image: - manylinux1-x64 - manylinux1-x86 - manylinux2014-x64 - manylinux2014-x86 - fedora-34-x64 - opensuse-leap - rhel8-x64 - al2-x64 steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ matrix.image }} build -p ${{ env.PACKAGE_NAME }} linux-compiler-compat: runs-on: ubuntu-20.04 # latest strategy: matrix: compiler: - clang-3 - clang-6 - clang-8 - clang-9 - clang-10 - clang-11 - gcc-4.8 - gcc-5 - gcc-6 - gcc-7 - gcc-8 steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ env.LINUX_BASE_IMAGE }} build -p ${{ env.PACKAGE_NAME }} --compiler=${{ matrix.compiler }} clang-sanitizers: runs-on: ubuntu-20.04 # latest strategy: matrix: sanitizers: [",thread", ",address,undefined"] steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ env.LINUX_BASE_IMAGE }} build -p ${{ env.PACKAGE_NAME }} --compiler=clang-11 --cmake-extra=-DENABLE_SANITIZERS=ON --cmake-extra=-DSANITIZERS="${{ matrix.sanitizers }}" linux-shared-libs: runs-on: ubuntu-20.04 # latest steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ env.LINUX_BASE_IMAGE }} build -p ${{ env.PACKAGE_NAME }} --cmake-extra=-DBUILD_SHARED_LIBS=ON windows: runs-on: windows-2022 # latest steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python builder.pyz build -p ${{ env.PACKAGE_NAME }} windows-vc14: runs-on: windows-2019 # windows-2019 is last env with Visual Studio 2015 (v14.0) strategy: matrix: arch: [x86, x64] steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python builder.pyz build -p ${{ env.PACKAGE_NAME }} --target windows-${{ matrix.arch }} --compiler msvc-14 windows-shared-libs: runs-on: windows-2022 # latest steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python builder.pyz build -p ${{ env.PACKAGE_NAME }} --cmake-extra=-DBUILD_SHARED_LIBS=ON windows-app-verifier: runs-on: windows-2022 # latest steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python builder.pyz build -p ${{ env.PACKAGE_NAME }} run_tests=false --cmake-extra=-DBUILD_TESTING=ON - name: Run and check AppVerifier run: | python .\aws-c-compression\build\deps\aws-c-common\scripts\appverifier_ctest.py --build_directory .\aws-c-compression\build\aws-c-compression osx: runs-on: macos-12 # latest steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python3 -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder')" chmod a+x builder ./builder build -p ${{ env.PACKAGE_NAME }} # Test downstream repos. # This should not be required because we can run into a chicken and egg problem if there is a change that needs some fix in a downstream repo. downstream: runs-on: ubuntu-20.04 # latest steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ env.LINUX_BASE_IMAGE }} build downstream -p ${{ env.PACKAGE_NAME }} aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/.github/workflows/clang-format.yml000066400000000000000000000004671456575232400302720ustar00rootroot00000000000000name: Lint on: [push] jobs: clang-format: runs-on: ubuntu-20.04 # latest steps: - name: Checkout Sources uses: actions/checkout@v1 - name: clang-format lint uses: DoozyX/clang-format-lint-action@v0.3.1 with: # List of extensions to check extensions: c,h aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/.github/workflows/closed-issue-message.yml000066400000000000000000000013271456575232400317350ustar00rootroot00000000000000name: Closed Issue Message on: issues: types: [closed] jobs: auto_comment: runs-on: ubuntu-latest steps: - uses: aws-actions/closed-issue-message@v1 with: # These inputs are both required repo-token: "${{ secrets.GITHUB_TOKEN }}" message: | ### ⚠️COMMENT VISIBILITY WARNING⚠️ Comments on closed issues are hard for our team to see. If you need more assistance, please either tag a team member or open a new issue that references this one. If you wish to keep having a conversation with other community members under this issue feel free to do so. aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/.github/workflows/handle-stale-discussions.yml000066400000000000000000000006471456575232400326250ustar00rootroot00000000000000name: HandleStaleDiscussions on: schedule: - cron: '0 */4 * * *' discussion_comment: types: [created] jobs: handle-stale-discussions: name: Handle stale discussions runs-on: ubuntu-latest permissions: discussions: write steps: - name: Stale discussions action uses: aws-github-ops/handle-stale-discussions@v1 env: GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}}aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/.github/workflows/stale_issue.yml000066400000000000000000000045011456575232400302310ustar00rootroot00000000000000name: "Close stale issues" # Controls when the action will run. on: schedule: - cron: "*/60 * * * *" jobs: cleanup: runs-on: ubuntu-latest name: Stale issue job steps: - uses: aws-actions/stale-issue-cleanup@v3 with: # Setting messages to an empty string will cause the automation to skip # that category ancient-issue-message: Greetings! Sorry to say but this is a very old issue that is probably not getting as much attention as it deserves. We encourage you to check if this is still an issue in the latest release and if you find that this is still a problem, please feel free to open a new one. stale-issue-message: Greetings! It looks like this issue hasn’t been active in a few days. We encourage you to check if this is still an issue in the latest release. Because it has been a few days since the last update on this, and in the absence of more information, we will be closing this issue soon. If you find that this is still a problem, please feel free to provide a comment or add an upvote to prevent automatic closure, or if the issue is already closed, please feel free to open a new one. stale-pr-message: Greetings! It looks like this PR hasn’t been active in a few days, add a comment or an upvote to prevent automatic closure, or if the issue is already closed, please feel free to open a new one. # These labels are required stale-issue-label: closing-soon exempt-issue-label: automation-exempt stale-pr-label: closing-soon exempt-pr-label: pr/needs-review response-requested-label: response-requested # Don't set closed-for-staleness label to skip closing very old issues # regardless of label closed-for-staleness-label: closed-for-staleness # Issue timing days-before-stale: 2 days-before-close: 5 days-before-ancient: 36500 # If you don't want to mark a issue as being ancient based on a # threshold of "upvotes", you can set this here. An "upvote" is # the total number of +1, heart, hooray, and rocket reactions # on an issue. minimum-upvotes-to-exempt: 1 repo-token: ${{ secrets.GITHUB_TOKEN }} loglevel: DEBUG # Set dry-run to true to not perform label or close actions. dry-run: false aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/.gitignore000066400000000000000000000010271456575232400235610ustar00rootroot00000000000000# IDE Artifacts .metadata .build .idea *.d Debug Release *~ *# *.iml tags #vim swap file *.swp #compiled python files *.pyc #Vagrant stuff Vagrantfile .vagrant #Mac stuff .DS_Store #doxygen doxygen/html/ doxygen/latex/ #cmake artifacts dependencies _build build _build_* cmake-build* # Compiled Object files *.slo *.lo *.o *.obj # Precompiled Headers *.gch *.pch # Compiled Dynamic libraries *.so *.dylib *.dll # Fortran module files *.mod # Compiled Static libraries *.lai *.la *.a *.lib # Executables *.exe *.out *.app aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/CMakeLists.txt000066400000000000000000000064261456575232400243410ustar00rootroot00000000000000cmake_minimum_required(VERSION 3.1) project(aws-c-compression C) if (POLICY CMP0069) cmake_policy(SET CMP0069 NEW) # Enable LTO/IPO if available in the compiler, see AwsCFlags endif() if (DEFINED CMAKE_PREFIX_PATH) file(TO_CMAKE_PATH "${CMAKE_PREFIX_PATH}" CMAKE_PREFIX_PATH) endif() if (DEFINED CMAKE_INSTALL_PREFIX) file(TO_CMAKE_PATH "${CMAKE_INSTALL_PREFIX}" CMAKE_INSTALL_PREFIX) endif() if (UNIX AND NOT APPLE) include(GNUInstallDirs) elseif(NOT DEFINED CMAKE_INSTALL_LIBDIR) set(CMAKE_INSTALL_LIBDIR "lib") endif() # This is required in order to append /lib/cmake to each element in CMAKE_PREFIX_PATH set(AWS_MODULE_DIR "/${CMAKE_INSTALL_LIBDIR}/cmake") string(REPLACE ";" "${AWS_MODULE_DIR};" AWS_MODULE_PATH "${CMAKE_PREFIX_PATH}${AWS_MODULE_DIR}") # Append that generated list to the module search path list(APPEND CMAKE_MODULE_PATH ${AWS_MODULE_PATH}) include(AwsCFlags) include(AwsCheckHeaders) include(AwsSharedLibSetup) include(AwsSanitizers) include(CheckCCompilerFlag) include(AwsFindPackage) file(GLOB AWS_COMPRESSION_HEADERS "include/aws/compression/*.h" ) file(GLOB AWS_COMPRESSION_PRIV_HEADERS "include/aws/compression/private/*.h" ) file(GLOB AWS_COMPRESSION_TESTING_HEADERS "include/aws/testing/compression/*.h" "include/aws/testing/compression/*.inl" ) file(GLOB AWS_COMPRESSION_SRC "source/*.c" ) file(GLOB COMPRESSION_HEADERS ${AWS_COMPRESSION_HEADERS} ${AWS_COMPRESSION_PRIV_HEADERS} ${AWS_COMPRESSION_TESTING_HEADERS} ) file(GLOB COMPRESSION_SRC ${AWS_COMPRESSION_SRC} ) add_library(${PROJECT_NAME} ${LIBTYPE} ${COMPRESSION_HEADERS} ${COMPRESSION_SRC}) aws_set_common_properties(${PROJECT_NAME}) aws_prepare_symbol_visibility_args(${PROJECT_NAME} "AWS_COMPRESSION") aws_add_sanitizers(${PROJECT_NAME}) # We are not ABI stable yet set_target_properties(${PROJECT_NAME} PROPERTIES VERSION 1.0.0) target_include_directories(${PROJECT_NAME} PUBLIC $ $) aws_use_package(aws-c-common) target_link_libraries(${PROJECT_NAME} PUBLIC ${DEP_AWS_LIBS}) aws_prepare_shared_lib_exports(${PROJECT_NAME}) aws_check_headers(${PROJECT_NAME} ${AWS_COMPRESSION_HEADERS}) install(FILES ${AWS_COMPRESSION_HEADERS} DESTINATION "include/aws/compression") install(FILES ${AWS_COMPRESSION_TESTING_HEADERS} DESTINATION "include/aws/testing/compression") if (BUILD_SHARED_LIBS) set (TARGET_DIR "shared") else() set (TARGET_DIR "static") endif() install(EXPORT "${PROJECT_NAME}-targets" DESTINATION "${LIBRARY_DIRECTORY}/${PROJECT_NAME}/cmake/${TARGET_DIR}" NAMESPACE AWS:: COMPONENT Development) configure_file("cmake/${PROJECT_NAME}-config.cmake" "${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-config.cmake" @ONLY) install(FILES "${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-config.cmake" DESTINATION "${LIBRARY_DIRECTORY}/${PROJECT_NAME}/cmake/" COMPONENT Development) option(BUILD_HUFFMAN_GENERATOR "Whether or not to build the aws-c-common-huffman-generator tool" OFF) if (BUILD_HUFFMAN_GENERATOR) add_subdirectory(source/huffman_generator) endif() include(CTest) if (BUILD_TESTING) add_subdirectory(tests) endif() aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/CODE_OF_CONDUCT.md000066400000000000000000000004671456575232400243770ustar00rootroot00000000000000## Code of Conduct This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact opensource-codeofconduct@amazon.com with any additional questions or comments. aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/CONTRIBUTING.md000066400000000000000000000070001456575232400240170ustar00rootroot00000000000000# Contributing Guidelines Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional documentation, we greatly value feedback and contributions from our community. Please read through this document before submitting any issues or pull requests to ensure we have all the necessary information to effectively respond to your bug report or contribution. ## Reporting Bugs/Feature Requests We welcome you to use the GitHub issue tracker to report bugs or suggest features. When filing an issue, please check [existing open](https://github.com/awslabs/aws-c-compression/issues), or [recently closed](https://github.com/awslabs/aws-c-compression/issues?utf8=%E2%9C%93&q=is%3Aissue%20is%3Aclosed%20), issues to make sure somebody else hasn't already reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: * A reproducible test case or series of steps * The version of our code being used * Any modifications you've made relevant to the bug * Anything unusual about your environment or deployment ## Contributing via Pull Requests Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: 1. You are working against the latest source on the *main* branch. 2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. 3. You open an issue to discuss any significant work - we would hate for your time to be wasted. To send us a pull request, please: 1. Fork the repository. 2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. 3. Ensure local tests pass. 4. Commit to your fork using clear commit messages. 5. Send us a pull request, answering any default questions in the pull request interface. 6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and [creating a pull request](https://help.github.com/articles/creating-a-pull-request/). ## Finding contributions to work on Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels ((enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any ['help wanted'](https://github.com/awslabs/aws-c-compression/labels/help%20wanted) issues is a great place to start. ## Code of Conduct This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact opensource-codeofconduct@amazon.com with any additional questions or comments. ## Security issue notifications If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. ## Licensing See the [LICENSE](https://github.com/awslabs/aws-c-compression/blob/main/LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. We may ask you to sign a [Contributor License Agreement (CLA)](http://en.wikipedia.org/wiki/Contributor_License_Agreement) for larger changes. aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/LICENSE000066400000000000000000000261361456575232400226060ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/NOTICE000066400000000000000000000001721456575232400224750ustar00rootroot00000000000000AWS C Compression Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: Apache-2.0. aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/README.md000066400000000000000000000160261456575232400230550ustar00rootroot00000000000000## AWS C Compression This is a cross-platform C99 implementation of compression algorithms such as gzip, and huffman encoding/decoding. Currently only huffman is implemented. ## License This library is licensed under the Apache 2.0 License. ## Usage ### Building Note that aws-c-compression has a dependency on aws-c-common: ``` git clone git@github.com:awslabs/aws-c-common.git cmake -DCMAKE_PREFIX_PATH= -DCMAKE_INSTALL_PREFIX= -S aws-c-common -B aws-c-common/build cmake --build aws-c-common/build --target install git clone git@github.com:awslabs/aws-c-compression.git cmake -DCMAKE_PREFIX_PATH= -DCMAKE_INSTALL_PREFIX= -S aws-c-compression -B aws-c-compression/build cmake --build aws-c-compression/build --target install ``` ### Huffman The Huffman implemention in this library is designed around the concept of a generic "symbol coder" object, which defines how each symbol (value between 0 and 255) is encoded and decoded. This object looks like this: ```c typedef struct aws_huffman_code (*aws_huffman_symbol_encoder)(uint8_t symbol, void *userdata); typedef uint8_t (*aws_huffman_symbol_decoder)(uint32_t bits, uint8_t *symbol, void *userdata); struct aws_huffman_symbol_coder { aws_huffman_symbol_encoder encode; aws_huffman_symbol_decoder decode; void *userdata; }; ``` These callbacks may be implemented manually, or you may use the included Huffman coder generator to generate one from a table definition file. The generator expects to be called with the following arguments: ```shell $ aws-c-compression-huffman-generator path/to/table.def path/to/generated.c coder_name ``` The table definition file should be in the following format: ```c /* sym bits code len */ HUFFMAN_CODE( 0, "1100101110", 0x32e, 10) HUFFMAN_CODE( 1, "1100101111", 0x32f, 10) /* ... */ ``` The HUFFMAN_CODE macro expects 4 arguments: * sym: the symbol value [0-255] * bits: the bits representing the symbol in string form * code: the bits representing the symbol in numeric form * len: the number of bits used to represent the symbol > #### Note > This file may also be `#include`d in the following way to generate a static > list of codes: > ```c > /* Provides the HUFFMAN_CODE macro */ > #include > > static struct huffman_test_code_point code_points[] = { > #include "test_huffman_static_table.def" > }; > ``` This will emit a c file which exports a function with the following signiture: ```c struct aws_huffman_symbol_coder *{coder_name}_get_coder(); ``` Note that this function does not allocate, but maintains a static instance of the coder. An example implementation of this file is provided in `tests/test_huffman_static_table.def`. To use the coder, forward declare that function, and pass the result as the second argument to `aws_huffman_encoder_init` and `aws_huffman_decoder_init`. ```c struct aws_huffman_encoder encoder; aws_huffman_encoder_init(&encoder, {coder_name}_get_coder()); struct aws_huffman_decoder decoder; aws_huffman_decoder_init(&decoder, {coder_name}_get_coder()) ``` #### Encoding ```c /** * Encode a symbol buffer into the output buffer. * * \param[in] encoder The encoder object to use * \param[in] to_encode The symbol buffer to encode * \param[in,out] length In: The length of to_decode. Out: The number of bytes read from to_encode * \param[in] output The buffer to write encoded bytes to * \param[in,out] output_size In: The size of output. Out: The number of bytes written to output * * \return AWS_OP_SUCCESS if encoding is successful, AWS_OP_ERR the code for the error that occured */ int aws_huffman_encode(struct aws_huffman_encoder *encoder, const char *to_encode, size_t *length, uint8_t *output, size_t *output_size); ``` The encoder is built to support partial encoding. This means that if there isn't enough space in `output`, the encoder will encode as much as possible, update `length` to indicate how much was consumed, `output_size` won't change, and `AWS_ERROR_SHORT_BUFFER` will be raised. `aws_huffman_encode` may then be called again like the following pseudo-code: ```c void encode_and_send(const char *to_encode, size_t size) { while (size > 0) { uint8_t output[some_chunk_size]; size_t output_size = sizeof(output); size_t bytes_read = size; aws_huffman_encode(encoder, to_encode, &bytes_read, output, &output_size); /* AWS_ERROR_SHORT_BUFFER was raised... */ send_output_to_someone_else(output, output_size); to_encode += bytes_read; size -= bytes_read; } /* Be sure to reset the encoder after use */ aws_huffman_encoder_reset(encoder); } ``` `aws_huffman_encoder` also has a `uint8_t` field called `eos_padding` that defines how any unwritten bits in the last byte of output are filled. The most significant bits will used. For example, if the last byte contains only 3 bits and `eos_padding` is `0b01010101`, `01010` will be appended to the byte. #### Decoding ```c /** * Decodes a byte buffer into the provided symbol array. * * \param[in] decoder The decoder object to use * \param[in] to_decode The encoded byte buffer to read from * \param[in,out] length In: The length of to_decode. Out: The number of bytes read from to_decode * \param[in] output The buffer to write decoded symbols to * \param[in,out] output_size In: The size of output. Out: The number of bytes written to output * * \return AWS_OP_SUCCESS if encoding is successful, AWS_OP_ERR the code for the error that occured */ int aws_huffman_decode(struct aws_huffman_decoder *decoder, const uint8_t *to_decode, size_t *length, char *output, size_t *output_size); ``` The decoder is built to support partial encoding. This means that if there isn't enough space in `output`, the decoder will decode as much as possible, update `length` to indicate how much was consumed, `output_size` won't change, and `AWS_ERROR_SHORT_BUFFER` will be raised. `aws_huffman_decode` may then be called again like the following pseudo-code: ```c void decode_and_send(const char *to_decode, size_t size) { while (size > 0) { uint8_t output[some_chunk_size]; size_t output_size = sizeof(output); size_t bytes_read = size; aws_huffman_decode(decoder, to_decode, &bytes_read, output, &output_size); /* AWS_ERROR_SHORT_BUFFER was raised... */ send_output_to_someone_else(output, output_size); to_decode += bytes_read; size -= bytes_read; } /* Be sure to reset the decoder after use */ aws_huffman_decoder_reset(decoder); } ``` Upon completion of a decode, the most significant bits of `decoder->working_bits` will contain the final bits of `to_decode` that could not match a symbol. This is useful for verifying the padding bits of a stream. For example, to validate that a stream ends in all 1's (like HPACK requires), you could do the following: ```c AWS_ASSERT(decoder->working_bits == UINT64_MAX << (64 - decoder->num_bits)); ``` aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/builder.json000066400000000000000000000002341456575232400241110ustar00rootroot00000000000000{ "name": "aws-c-compression", "upstream": [ { "name": "aws-c-common" } ], "downstream": [ { "name": "aws-c-http" } ] } aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/cmake/000077500000000000000000000000001456575232400226515ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/cmake/aws-c-compression-config.cmake000066400000000000000000000010751456575232400304720ustar00rootroot00000000000000include(CMakeFindDependencyMacro) find_dependency(aws-c-common) macro(aws_load_targets type) include(${CMAKE_CURRENT_LIST_DIR}/${type}/@PROJECT_NAME@-targets.cmake) endmacro() # try to load the lib follow BUILD_SHARED_LIBS. Fall back if not exist. if (BUILD_SHARED_LIBS) if (EXISTS "${CMAKE_CURRENT_LIST_DIR}/shared") aws_load_targets(shared) else() aws_load_targets(static) endif() else() if (EXISTS "${CMAKE_CURRENT_LIST_DIR}/static") aws_load_targets(static) else() aws_load_targets(shared) endif() endif() aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/codebuild/000077500000000000000000000000001456575232400235235ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/codebuild/common-posix.sh000077500000000000000000000013661456575232400265200ustar00rootroot00000000000000#!/bin/bash set -e CMAKE_ARGS="$@" BUILD_PATH="/tmp/builds" mkdir -p $BUILD_PATH INSTALL_PATH="$BUILD_PATH/install" mkdir -p $INSTALL_PATH function install_library { pushd $BUILD_PATH git clone https://github.com/awslabs/$1.git cd $1 if [ -n "$2" ]; then git checkout $2 fi cmake -DCMAKE_INSTALL_PREFIX=$INSTALL_PATH -DCMAKE_PREFIX_PATH=$INSTALL_PATH -DENABLE_SANITIZERS=ON $CMAKE_ARGS ./ make install popd } install_library aws-c-common if [ "$CODEBUILD_SRC_DIR" ]; then cd $CODEBUILD_SRC_DIR fi mkdir build cd build cmake -DCMAKE_INSTALL_PREFIX=$INSTALL_PATH -DCMAKE_PREFIX_PATH=$INSTALL_PATH -DENABLE_SANITIZERS=ON $CMAKE_ARGS ../ make LSAN_OPTIONS=verbosity=1:log_threads=1 ctest --output-on-failure aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/codebuild/common-windows.bat000066400000000000000000000016321456575232400271750ustar00rootroot00000000000000 set CMAKE_ARGS=%* set BUILDS_DIR=%TEMP%\builds set INSTALL_DIR=%BUILDS_DIR%\install mkdir %BUILDS_DIR% mkdir %INSTALL_DIR% CALL :install_library aws-c-common mkdir %BUILDS_DIR%\aws-c-compression-build cd %BUILDS_DIR%\aws-c-compression-build cmake %CMAKE_ARGS% -DCMAKE_BUILD_TYPE="RelWithDebInfo" -DCMAKE_INSTALL_PREFIX="%INSTALL_DIR%" -DCMAKE_PREFIX_PATH="%INSTALL_DIR%" %CODEBUILD_SRC_DIR% || goto error cmake --build . --config RelWithDebInfo || goto error ctest -V || goto error goto :EOF :install_library mkdir %BUILDS_DIR%\%~1-build cd %BUILDS_DIR%\%~1-build git clone https://github.com/awslabs/%~1.git cmake %CMAKE_ARGS% -DCMAKE_BUILD_TYPE="RelWithDebInfo" -DCMAKE_INSTALL_PREFIX="%INSTALL_DIR%" -DCMAKE_PREFIX_PATH="%INSTALL_DIR%" %~1 || goto error cmake --build . --target install --config RelWithDebInfo || goto error exit /b %errorlevel% :error echo Failed with error #%errorlevel%. exit /b %errorlevel%aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/codebuild/linux-clang3-x64.yml000066400000000000000000000010051456575232400271650ustar00rootroot00000000000000version: 0.2 #this buildspec assumes the ubuntu 14.04 trusty image phases: install: commands: - sudo apt-get update -y - sudo apt-get update - sudo apt-get install clang-3.9 cmake3 clang-tidy-3.9 -y pre_build: commands: - export CC=clang-3.9 build: commands: - echo Build started on `date` - ./codebuild/common-posix.sh -DCMAKE_EXPORT_COMPILE_COMMANDS=ON - clang-tidy-3.9 -p=build **/*.c post_build: commands: - echo Build completed on `date` aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/codebuild/linux-clang6-x64.yml000066400000000000000000000015121456575232400271730ustar00rootroot00000000000000version: 0.2 #this buildspec assumes the ubuntu 14.04 trusty image phases: install: commands: - wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add - - sudo add-apt-repository ppa:ubuntu-toolchain-r/test - sudo apt-add-repository "deb http://apt.llvm.org/trusty/ llvm-toolchain-trusty-6.0 main" - sudo apt-get update -y - sudo apt-get install clang-6.0 cmake3 clang-tidy-6.0 clang-format-6.0 -y -f pre_build: commands: - export CC=clang-6.0 - export CLANG_FORMAT=clang-format-6.0 build: commands: - echo Build started on `date` - ./codebuild/common-posix.sh -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DENABLE_FUZZ_TESTS=ON - clang-tidy-6.0 -p=build **/*.c - ./format-check.sh post_build: commands: - echo Build completed on `date` aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/codebuild/linux-gcc-4x-x64.yml000066400000000000000000000006041456575232400271070ustar00rootroot00000000000000version: 0.2 #this build spec assumes the ubuntu 14.04 trusty image phases: install: commands: - sudo apt-get update -y - sudo apt-get install gcc cmake3 -y pre_build: commands: - export CC=gcc build: commands: - echo Build started on `date` - ./codebuild/common-posix.sh post_build: commands: - echo Build completed on `date` aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/codebuild/linux-gcc-4x-x86.yml000066400000000000000000000007021456575232400271120ustar00rootroot00000000000000version: 0.2 #this build spec assumes the ubuntu 14.04 trusty image phases: install: commands: - sudo apt-get update -y - sudo apt-get install gcc gcc-multilib cmake3 -y pre_build: commands: - export CC=gcc build: commands: - echo Build started on `date` - ./codebuild/common-posix.sh -DCMAKE_C_FLAGS="-m32" -DCMAKE_LINK_FLAGS="-m32" post_build: commands: - echo Build completed on `date` aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/codebuild/linux-gcc-5x-x64.yml000066400000000000000000000007041456575232400271110ustar00rootroot00000000000000version: 0.2 #this build spec assumes the ubuntu 14.04 trusty image phases: install: commands: - sudo add-apt-repository ppa:ubuntu-toolchain-r/test - sudo apt-get update -y - sudo apt-get install gcc-5 cmake3 -y pre_build: commands: - export CC=gcc-5 build: commands: - echo Build started on `date` - ./codebuild/common-posix.sh post_build: commands: - echo Build completed on `date` aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/codebuild/linux-gcc-6x-x64.yml000066400000000000000000000007041456575232400271120ustar00rootroot00000000000000version: 0.2 #this build spec assumes the ubuntu 14.04 trusty image phases: install: commands: - sudo add-apt-repository ppa:ubuntu-toolchain-r/test - sudo apt-get update -y - sudo apt-get install gcc-6 cmake3 -y pre_build: commands: - export CC=gcc-6 build: commands: - echo Build started on `date` - ./codebuild/common-posix.sh post_build: commands: - echo Build completed on `date` aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/codebuild/linux-gcc-7x-x64.yml000066400000000000000000000007041456575232400271130ustar00rootroot00000000000000version: 0.2 #this build spec assumes the ubuntu 14.04 trusty image phases: install: commands: - sudo add-apt-repository ppa:ubuntu-toolchain-r/test - sudo apt-get update -y - sudo apt-get install gcc-7 cmake3 -y pre_build: commands: - export CC=gcc-7 build: commands: - echo Build started on `date` - ./codebuild/common-posix.sh post_build: commands: - echo Build completed on `date` aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/codebuild/windows-msvc-2015-x86.yml000066400000000000000000000001601456575232400277130ustar00rootroot00000000000000version: 0.2 phases: build: commands: - .\codebuild\common-windows.bat -G "Visual Studio 14 2015" aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/codebuild/windows-msvc-2015.yml000066400000000000000000000001661456575232400272760ustar00rootroot00000000000000version: 0.2 phases: build: commands: - .\codebuild\common-windows.bat -G "Visual Studio 14 2015 Win64" aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/codebuild/windows-msvc-2017.yml000066400000000000000000000001661456575232400273000ustar00rootroot00000000000000version: 0.2 phases: build: commands: - .\codebuild\common-windows.bat -G "Visual Studio 15 2017 Win64" aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/format-check.sh000077500000000000000000000010361456575232400244730ustar00rootroot00000000000000#!/usr/bin/env bash if [[ -z $CLANG_FORMAT ]] ; then CLANG_FORMAT=clang-format fi if NOT type $CLANG_FORMAT 2> /dev/null ; then echo "No appropriate clang-format found." exit 1 fi FAIL=0 SOURCE_FILES=`find source include tests -type f \( -name '*.h' -o -name '*.c' \) -not -name 'test_huffman_static.c'` for i in $SOURCE_FILES do $CLANG_FORMAT -output-replacements-xml $i | grep -c " /dev/null if [ $? -ne 1 ] then echo "$i failed clang-format check." FAIL=1 fi done exit $FAIL aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/include/000077500000000000000000000000001456575232400232145ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/include/aws/000077500000000000000000000000001456575232400240065ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/include/aws/compression/000077500000000000000000000000001456575232400263475ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/include/aws/compression/compression.h000066400000000000000000000021501456575232400310570ustar00rootroot00000000000000#ifndef AWS_COMPRESSION_COMPRESSION_H #define AWS_COMPRESSION_COMPRESSION_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include AWS_PUSH_SANE_WARNING_LEVEL #define AWS_C_COMPRESSION_PACKAGE_ID 3 enum aws_compression_error { AWS_ERROR_COMPRESSION_UNKNOWN_SYMBOL = AWS_ERROR_ENUM_BEGIN_RANGE(AWS_C_COMPRESSION_PACKAGE_ID), AWS_ERROR_END_COMPRESSION_RANGE = AWS_ERROR_ENUM_END_RANGE(AWS_C_COMPRESSION_PACKAGE_ID) }; AWS_EXTERN_C_BEGIN /** * Initializes internal datastructures used by aws-c-compression. * Must be called before using any functionality in aws-c-compression. */ AWS_COMPRESSION_API void aws_compression_library_init(struct aws_allocator *alloc); /** * Clean up internal datastructures used by aws-c-compression. * Must not be called until application is done using functionality in aws-c-compression. */ AWS_COMPRESSION_API void aws_compression_library_clean_up(void); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_COMPRESSION_COMPRESSION_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/include/aws/compression/exports.h000066400000000000000000000021401456575232400302210ustar00rootroot00000000000000#ifndef AWS_COMPRESSION_EXPORTS_H #define AWS_COMPRESSION_EXPORTS_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #if defined(USE_WINDOWS_DLL_SEMANTICS) || defined(WIN32) # ifdef AWS_COMPRESSION_USE_IMPORT_EXPORT # ifdef AWS_COMPRESSION_EXPORTS # define AWS_COMPRESSION_API __declspec(dllexport) # else # define AWS_COMPRESSION_API __declspec(dllimport) # endif /* AWS_COMPRESSION_EXPORTS */ # else # define AWS_COMPRESSION_API # endif /* AWS_COMPRESSION_USE_IMPORT_EXPORT */ #else /* defined (USE_WINDOWS_DLL_SEMANTICS) || defined (WIN32) */ # if ((__GNUC__ >= 4) || defined(__clang__)) && defined(AWS_COMPRESSION_USE_IMPORT_EXPORT) && \ defined(AWS_COMPRESSION_EXPORTS) # define AWS_COMPRESSION_API __attribute__((visibility("default"))) # else # define AWS_COMPRESSION_API # endif /* __GNUC__ >= 4 || defined(__clang__) */ #endif /* defined (USE_WINDOWS_DLL_SEMANTICS) || defined (WIN32) */ #endif /* AWS_COMPRESSION_EXPORTS_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/include/aws/compression/huffman.h000066400000000000000000000112201456575232400301400ustar00rootroot00000000000000#ifndef AWS_COMPRESSION_HUFFMAN_H #define AWS_COMPRESSION_HUFFMAN_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include AWS_PUSH_SANE_WARNING_LEVEL /** * Represents an encoded code */ struct aws_huffman_code { /** * The value of the code * \note The pattern is stored in the least significant bits */ uint32_t pattern; /** The number of bits in pattern to use */ uint8_t num_bits; }; /** * Function used to encode a single symbol to an aws_huffman_code * * \param[in] symbol The symbol to encode * \param[in] userdata Optional userdata (aws_huffman_symbol_coder.userdata) * * \returns The code representing the symbol. If this symbol is not recognized, * return a code with num_bits set to 0. */ typedef struct aws_huffman_code(aws_huffman_symbol_encoder_fn)(uint8_t symbol, void *userdata); /** * Function used to decode a code into a symbol * * \param[in] bits The bits to attept to decode a symbol from * \param[out] symbol The symbol found. Do not write to if no valid symbol * found \param[in] userdata Optional userdata * (aws_huffman_symbol_coder.userdata) * * \returns The number of bits read from bits */ typedef uint8_t(aws_huffman_symbol_decoder_fn)(uint32_t bits, uint8_t *symbol, void *userdata); /** * Structure used to define how symbols are encoded and decoded */ struct aws_huffman_symbol_coder { aws_huffman_symbol_encoder_fn *encode; aws_huffman_symbol_decoder_fn *decode; void *userdata; }; /** * Structure used for persistent encoding. * Allows for reading from or writing to incomplete buffers. */ struct aws_huffman_encoder { /* Params */ struct aws_huffman_symbol_coder *coder; uint8_t eos_padding; /* State */ struct aws_huffman_code overflow_bits; }; /** * Structure used for persistent decoding. * Allows for reading from or writing to incomplete buffers. */ struct aws_huffman_decoder { /* Param */ struct aws_huffman_symbol_coder *coder; bool allow_growth; /* State */ uint64_t working_bits; uint8_t num_bits; }; AWS_EXTERN_C_BEGIN /** * Initialize a encoder object with a symbol coder. */ AWS_COMPRESSION_API void aws_huffman_encoder_init(struct aws_huffman_encoder *encoder, struct aws_huffman_symbol_coder *coder); /** * Resets a decoder for use with a new binary stream */ AWS_COMPRESSION_API void aws_huffman_encoder_reset(struct aws_huffman_encoder *encoder); /** * Initialize a decoder object with a symbol coder. */ AWS_COMPRESSION_API void aws_huffman_decoder_init(struct aws_huffman_decoder *decoder, struct aws_huffman_symbol_coder *coder); /** * Resets a decoder for use with a new binary stream */ AWS_COMPRESSION_API void aws_huffman_decoder_reset(struct aws_huffman_decoder *decoder); /** * Get the byte length of to_encode post-encoding. * * \param[in] encoder The encoder object to use * \param[in] to_encode The symbol buffer to encode * * \return The length of the encoded string. */ AWS_COMPRESSION_API size_t aws_huffman_get_encoded_length(struct aws_huffman_encoder *encoder, struct aws_byte_cursor to_encode); /** * Encode a symbol buffer into the output buffer. * * \param[in] encoder The encoder object to use * \param[in] to_encode The symbol buffer to encode * \param[in] output The buffer to write encoded bytes to * * \return AWS_OP_SUCCESS if encoding is successful, AWS_OP_ERR otherwise */ AWS_COMPRESSION_API int aws_huffman_encode( struct aws_huffman_encoder *encoder, struct aws_byte_cursor *to_encode, struct aws_byte_buf *output); /** * Decodes a byte buffer into the provided symbol array. * * \param[in] decoder The decoder object to use * \param[in] to_decode The encoded byte buffer to read from * \param[in] output The buffer to write decoded symbols to. * If decoder is set to allow growth, capacity will be increased when necessary. * * \return AWS_OP_SUCCESS if encoding is successful, AWS_OP_ERR otherwise */ AWS_COMPRESSION_API int aws_huffman_decode( struct aws_huffman_decoder *decoder, struct aws_byte_cursor *to_decode, struct aws_byte_buf *output); /** * Set whether or not to increase capacity when the output buffer fills up while decoding. * This is false by default. */ AWS_COMPRESSION_API void aws_huffman_decoder_allow_growth(struct aws_huffman_decoder *decoder, bool allow_growth); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_COMPRESSION_HUFFMAN_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/include/aws/compression/private/000077500000000000000000000000001456575232400300215ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/include/aws/compression/private/huffman_testing.h000066400000000000000000000073121456575232400333560ustar00rootroot00000000000000#ifndef AWS_COMPRESSION_HUFFMAN_TESTING_H #define AWS_COMPRESSION_HUFFMAN_TESTING_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include /** * The intended use of file is to allow testing of huffman character coders. * By doing the following, you can ensure the output of encoders decoders are * correct: * * \code{c} * static struct huffman_test_code_point code_points[] = { * #include "test_huffman_static_table.def" * }; * \endcode * * You may then iterate over each code point in the array, and test the * following (pseudo-code): * * \code{c} for (cp in code_points) { * AWS_ASSERT(my_coder->encode(cp.symbol) == cp.pattern); * AWS_ASSERT(my_coder->decode(cp.pattern) == cp.symbol); * } * \endcode */ /** * Structure containing all relevant information about a code point */ struct huffman_test_code_point { uint8_t symbol; struct aws_huffman_code code; }; /** * Macro to be used when including a table def file, populates an array of * huffman_test_code_points */ #define HUFFMAN_CODE(psymbol, pbit_string, pbit_pattern, pnum_bits) \ { \ .symbol = (psymbol), \ .code = \ { \ .pattern = (pbit_pattern), \ .num_bits = (pnum_bits), \ }, \ }, /** * Function to test a huffman coder to ensure the transitive property applies * (input == decode(incode(input))) * * \param[in] coder The symbol coder to test * \param[in] input The buffer to test * \param[in] size The size of input * \param[in] encoded_size The length of the encoded buffer. Pass 0 to skip check. * \param[out] error_string In case of failure, the error string to report * * \return AWS_OP_SUCCESS on success, AWS_OP_FAILURE on failure (error_string * will be set) */ AWS_COMPRESSION_API int huffman_test_transitive( struct aws_huffman_symbol_coder *coder, const char *input, size_t size, size_t encoded_size, const char **error_string); /** * Function to test a huffman coder to ensure the transitive property applies * when doing partial encodes/decodes (input == decode(incode(input))) * * \param[in] coder The symbol coder to test * \param[in] input The buffer to test * \param[in] size The size of input * \param[in] encoded_size The length of the encoded buffer. Pass 0 to skip check. * \param[in] output_chunk_size The amount of output to write at once * \param[out] error_string In case of failure, the error string to * report * * \return AWS_OP_SUCCESS on success, AWS_OP_FAILURE on failure (error_string * will be set) */ AWS_COMPRESSION_API int huffman_test_transitive_chunked( struct aws_huffman_symbol_coder *coder, const char *input, size_t size, size_t encoded_size, size_t output_chunk_size, const char **error_string); #endif /* AWS_COMPRESSION_HUFFMAN_TESTING_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/source/000077500000000000000000000000001456575232400230715ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/source/compression.c000066400000000000000000000024331456575232400256000ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #define DEFINE_ERROR_INFO(CODE, STR) \ [(CODE)-AWS_ERROR_ENUM_BEGIN_RANGE(AWS_C_COMPRESSION_PACKAGE_ID)] = \ AWS_DEFINE_ERROR_INFO(CODE, STR, "aws-c-compression") /* clang-format off */ static struct aws_error_info s_errors[] = { DEFINE_ERROR_INFO( AWS_ERROR_COMPRESSION_UNKNOWN_SYMBOL, "Compression encountered an unknown symbol."), }; /* clang-format on */ static struct aws_error_info_list s_error_list = { .error_list = s_errors, .count = AWS_ARRAY_SIZE(s_errors), }; static bool s_library_initialized = false; void aws_compression_library_init(struct aws_allocator *alloc) { if (s_library_initialized) { return; } s_library_initialized = true; aws_common_library_init(alloc); aws_register_error_info(&s_error_list); } void aws_compression_library_clean_up(void) { if (!s_library_initialized) { return; } s_library_initialized = false; aws_unregister_error_info(&s_error_list); aws_common_library_clean_up(); } aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/source/huffman.c000066400000000000000000000214651456575232400246710ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #define BITSIZEOF(val) (sizeof(val) * 8) static uint8_t MAX_PATTERN_BITS = BITSIZEOF(((struct aws_huffman_code *)0)->pattern); void aws_huffman_encoder_init(struct aws_huffman_encoder *encoder, struct aws_huffman_symbol_coder *coder) { AWS_ASSERT(encoder); AWS_ASSERT(coder); AWS_ZERO_STRUCT(*encoder); encoder->coder = coder; encoder->eos_padding = UINT8_MAX; } void aws_huffman_encoder_reset(struct aws_huffman_encoder *encoder) { AWS_ASSERT(encoder); AWS_ZERO_STRUCT(encoder->overflow_bits); } void aws_huffman_decoder_init(struct aws_huffman_decoder *decoder, struct aws_huffman_symbol_coder *coder) { AWS_ASSERT(decoder); AWS_ASSERT(coder); AWS_ZERO_STRUCT(*decoder); decoder->coder = coder; } void aws_huffman_decoder_reset(struct aws_huffman_decoder *decoder) { decoder->working_bits = 0; decoder->num_bits = 0; } void aws_huffman_decoder_allow_growth(struct aws_huffman_decoder *decoder, bool allow_growth) { decoder->allow_growth = allow_growth; } /* Much of encode is written in a helper function, so this struct helps avoid passing all the parameters through by hand */ struct encoder_state { struct aws_huffman_encoder *encoder; struct aws_byte_buf *output_buf; uint8_t working; uint8_t bit_pos; }; /* Helper function to write a single bit_pattern to memory (or working_bits if * out of buffer space) */ static int encode_write_bit_pattern(struct encoder_state *state, struct aws_huffman_code bit_pattern) { AWS_PRECONDITION(state->output_buf->len < state->output_buf->capacity); if (bit_pattern.num_bits == 0) { return aws_raise_error(AWS_ERROR_COMPRESSION_UNKNOWN_SYMBOL); } uint8_t bits_to_write = bit_pattern.num_bits; while (bits_to_write > 0) { uint8_t bits_for_current = bits_to_write > state->bit_pos ? state->bit_pos : bits_to_write; /* Chop off the top 0s and bits that have already been read */ uint8_t bits_to_cut = (BITSIZEOF(bit_pattern.pattern) - bit_pattern.num_bits) + (bit_pattern.num_bits - bits_to_write); /* Write the appropiate number of bits to this byte Shift to the left to cut any unneeded bits Shift to the right to position the bits correctly */ state->working |= (bit_pattern.pattern << bits_to_cut) >> (MAX_PATTERN_BITS - state->bit_pos); bits_to_write -= bits_for_current; state->bit_pos -= bits_for_current; if (state->bit_pos == 0) { /* Save the whole byte */ aws_byte_buf_write_u8(state->output_buf, state->working); state->bit_pos = 8; state->working = 0; if (state->output_buf->len == state->output_buf->capacity) { state->encoder->overflow_bits.num_bits = bits_to_write; if (bits_to_write) { /* If buffer is full and there are remaining bits, save them to overflow and return */ bits_to_cut += bits_for_current; state->encoder->overflow_bits.pattern = (bit_pattern.pattern << bits_to_cut) >> (MAX_PATTERN_BITS - bits_to_write); return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } } } } return AWS_OP_SUCCESS; } size_t aws_huffman_get_encoded_length(struct aws_huffman_encoder *encoder, struct aws_byte_cursor to_encode) { AWS_PRECONDITION(encoder); AWS_PRECONDITION(aws_byte_cursor_is_valid(&to_encode)); size_t num_bits = 0; while (to_encode.len) { uint8_t new_byte = 0; aws_byte_cursor_read_u8(&to_encode, &new_byte); struct aws_huffman_code code_point = encoder->coder->encode(new_byte, encoder->coder->userdata); num_bits += code_point.num_bits; } size_t length = num_bits / 8; /* Round up */ if (num_bits % 8) { ++length; } return length; } int aws_huffman_encode( struct aws_huffman_encoder *encoder, struct aws_byte_cursor *to_encode, struct aws_byte_buf *output) { AWS_ASSERT(encoder); AWS_ASSERT(encoder->coder); AWS_ASSERT(to_encode); AWS_ASSERT(output); struct encoder_state state = { .working = 0, .bit_pos = 8, }; state.encoder = encoder; state.output_buf = output; /* Write any bits leftover from previous invocation */ if (encoder->overflow_bits.num_bits) { if (output->len == output->capacity) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } if (encode_write_bit_pattern(&state, encoder->overflow_bits)) { return AWS_OP_ERR; } encoder->overflow_bits.num_bits = 0; } while (to_encode->len) { if (output->len == output->capacity) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } uint8_t new_byte = 0; aws_byte_cursor_read_u8(to_encode, &new_byte); struct aws_huffman_code code_point = encoder->coder->encode(new_byte, encoder->coder->userdata); if (encode_write_bit_pattern(&state, code_point)) { return AWS_OP_ERR; } } /* The following code only runs when the buffer has written successfully */ /* If whole buffer processed, write EOS */ if (state.bit_pos != 8) { struct aws_huffman_code eos_cp; eos_cp.pattern = encoder->eos_padding; eos_cp.num_bits = state.bit_pos; encode_write_bit_pattern(&state, eos_cp); AWS_ASSERT(state.bit_pos == 8); } return AWS_OP_SUCCESS; } /* Decode's reading is written in a helper function, so this struct helps avoid passing all the parameters through by hand */ struct huffman_decoder_state { struct aws_huffman_decoder *decoder; struct aws_byte_cursor *input_cursor; }; static void decode_fill_working_bits(struct huffman_decoder_state *state) { /* Read from bytes in the buffer until there are enough bytes to process */ while (state->decoder->num_bits < MAX_PATTERN_BITS && state->input_cursor->len) { /* Read the appropiate number of bits from this byte */ uint8_t new_byte = 0; aws_byte_cursor_read_u8(state->input_cursor, &new_byte); uint64_t positioned = ((uint64_t)new_byte) << (BITSIZEOF(state->decoder->working_bits) - 8 - state->decoder->num_bits); state->decoder->working_bits |= positioned; state->decoder->num_bits += 8; } } int aws_huffman_decode( struct aws_huffman_decoder *decoder, struct aws_byte_cursor *to_decode, struct aws_byte_buf *output) { AWS_ASSERT(decoder); AWS_ASSERT(decoder->coder); AWS_ASSERT(to_decode); AWS_ASSERT(output); struct huffman_decoder_state state; state.decoder = decoder; state.input_cursor = to_decode; /* Measures how much of the input was read */ size_t bits_left = decoder->num_bits + to_decode->len * 8; while (1) { decode_fill_working_bits(&state); uint8_t symbol; uint8_t bits_read = decoder->coder->decode( (uint32_t)(decoder->working_bits >> (BITSIZEOF(decoder->working_bits) - MAX_PATTERN_BITS)), &symbol, decoder->coder->userdata); if (bits_read == 0) { if (bits_left < MAX_PATTERN_BITS) { /* More input is needed to continue */ return AWS_OP_SUCCESS; } /* Unknown symbol found */ return aws_raise_error(AWS_ERROR_COMPRESSION_UNKNOWN_SYMBOL); } if (bits_read > bits_left) { /* Check if the buffer has been overrun. Note: because of the check in decode_fill_working_bits, the buffer won't actually overrun, instead there will be 0's in the bottom of working_bits. */ return AWS_OP_SUCCESS; } if (output->len == output->capacity) { /* Check if we've hit the end of the output buffer. * Grow buffer, or raise error, depending on settings */ if (decoder->allow_growth) { /* Double the capacity */ if (aws_byte_buf_reserve_relative(output, output->capacity)) { return AWS_OP_ERR; } } else { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } } bits_left -= bits_read; decoder->working_bits <<= bits_read; decoder->num_bits -= bits_read; /* Store the found symbol */ aws_byte_buf_write_u8(output, symbol); /* Successfully decoded whole buffer */ if (bits_left == 0) { return AWS_OP_SUCCESS; } } /* This case is unreachable */ AWS_ASSERT(0); return aws_raise_error(AWS_ERROR_INVALID_STATE); } aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/source/huffman_generator/000077500000000000000000000000001456575232400265635ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/source/huffman_generator/CMakeLists.txt000066400000000000000000000011221456575232400313170ustar00rootroot00000000000000file(GLOB GENERATIR_SRC "generator.c") set(GENERATOR_BINARY_NAME ${CMAKE_PROJECT_NAME}-huffman-generator) add_executable(${GENERATOR_BINARY_NAME} ${GENERATIR_SRC}) aws_set_common_properties(${GENERATOR_BINARY_NAME}) aws_add_sanitizers(${GENERATOR_BINARY_NAME}) target_include_directories(${GENERATOR_BINARY_NAME} PRIVATE ${PROJECT_SOURCE_DIR}/include ) if (MSVC) target_compile_definitions(${GENERATOR_BINARY_NAME} PRIVATE "-D_CRT_SECURE_NO_WARNINGS") endif () install( TARGETS ${GENERATOR_BINARY_NAME} RUNTIME DESTINATION ${CMAKE_INSTALL_PREFIX}/bin ) aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/source/huffman_generator/generator.c000066400000000000000000000245101456575232400307170ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include /* NOLINT(fuchsia-restrict-system-includes) */ #include #include #include /* NOLINT(fuchsia-restrict-system-includes) */ #include #include #include struct huffman_code { uint8_t num_bits; uint32_t bits; }; struct huffman_code_point { uint8_t symbol; struct huffman_code code; }; enum { num_code_points = 256 }; static struct huffman_code_point code_points[num_code_points]; static size_t skip_whitespace(const char *str) { size_t offset = 0; while (str[offset] == ' ' || str[offset] == '\t') { ++offset; } return offset; } static size_t read_past_comma(const char *str) { size_t offset = 0; while (str[offset] != ',') { ++offset; } return offset + 1; } int read_code_points(const char *input_path) { memset(code_points, 0, sizeof(code_points)); FILE *file = aws_fopen(input_path, "r"); if (!file) { printf("Failed to open file '%s' for read.", input_path); return 1; } static const char HC_KEYWORD[] = "HUFFMAN_CODE"; static const size_t HC_KW_LEN = sizeof(HC_KEYWORD) - 1; int is_comment = 0; char line[120]; while (fgets(line, sizeof(line), file) != NULL) { const size_t line_length = strlen(line); if (line[0] == '#') { /* Ignore preprocessor directives */ continue; } for (size_t i = 0; i < line_length - 1; ++i) { if (!is_comment) { if (line[i] == '/' && line[i + 1] == '*') { is_comment = 1; } else if (strncmp(&line[i], HC_KEYWORD, HC_KW_LEN) == 0) { /* Found code, parse it */ /* Skip macro */ const char *current_char = &line[i + HC_KW_LEN]; current_char += skip_whitespace(current_char); /* Skip ( */ assert(*current_char == '('); ++current_char; /* Parse symbol */ uint8_t symbol = (uint8_t)atoi(current_char); struct huffman_code_point *code_point = &code_points[symbol]; assert(!code_point->symbol && "Symbol already found!"); code_point->symbol = symbol; current_char += read_past_comma(current_char); /* Skip the binary string form */ current_char += read_past_comma(current_char); /* Parse bits */ code_point->code.bits = (uint32_t)strtol(current_char, NULL, 16); current_char += read_past_comma(current_char); code_point->code.num_bits = (uint8_t)atoi(current_char); } } else if (line[i] == '*' && line[i + 1] == '/') { is_comment = 0; } } } fclose(file); return 0; } void code_write(struct huffman_code *code, FILE *file) { for (int bit_idx = code->num_bits - 1; bit_idx >= 0; --bit_idx) { char bit = ((code->bits >> bit_idx) & 0x1) ? '1' : '0'; fprintf(file, "%c", bit); } } struct huffman_node { struct huffman_code_point *value; struct huffman_code code; struct huffman_node *children[2]; }; struct huffman_node *huffman_node_new(struct huffman_code code) { struct huffman_node *node = malloc(sizeof(struct huffman_node)); memset(node, 0, sizeof(struct huffman_node)); node->code = code; return node; } struct huffman_node *huffman_node_new_value(struct huffman_code_point *value) { struct huffman_node *node = malloc(sizeof(struct huffman_node)); memset(node, 0, sizeof(struct huffman_node)); node->value = value; node->code = value->code; return node; } /* note: Does not actually free the memory. This is useful so this function may be called on the tree root. */ void huffman_node_clean_up(struct huffman_node *node) { for (int i = 0; i < 2; ++i) { if (node->children[i]) { huffman_node_clean_up(node->children[i]); free(node->children[i]); } } memset(node, 0, sizeof(struct huffman_node)); } /* This function writes what to do if the pattern for node is a match */ void huffman_node_write_decode_handle_value(struct huffman_node *node, FILE *file) { if (!node) { /* Invalid node, return 0 */ fprintf(file, " return 0; /* invalid node */\n"); } else if (node->value) { /* Attempt to inline value return */ fprintf( file, " *symbol = %u;\n" " return %u;\n", node->value->symbol, node->value->code.num_bits); } else { /* Otherwise go to branch check */ fprintf(file, " goto node_"); code_write(&node->code, file); fprintf(file, ";\n"); } } void huffman_node_write_decode(struct huffman_node *node, FILE *file, uint8_t current_bit) { /* Value nodes should have been inlined into parent branch checks */ assert(!node->value); assert(node->children[0] || node->children[1]); static int write_label = 0; if (write_label) { /* Write this node's label after the first run */ fprintf(file, "node_"); code_write(&node->code, file); fprintf(file, ":\n"); } write_label = 1; /* Check 1 bit pattern */ uint32_t single_bit_mask = (uint32_t)(1ull << (31 - current_bit)); uint32_t left_aligned_pattern = ((node->code.bits << 1) + 1) << (31 - node->code.num_bits); uint32_t check_pattern = left_aligned_pattern & single_bit_mask; fprintf(file, " if (bits & 0x%x) {\n", check_pattern); huffman_node_write_decode_handle_value(node->children[1], file); fprintf(file, " } else {\n"); /* Child 0 is valid, go there */ huffman_node_write_decode_handle_value(node->children[0], file); fprintf(file, " }\n\n"); /* Recursively write child nodes */ for (uint8_t i = 0; i < 2; ++i) { struct huffman_node *child = node->children[i]; if (child && !child->value) { huffman_node_write_decode(child, file, current_bit + 1); } } } int main(int argc, char *argv[]) { if (argc != 4) { fprintf( stderr, "generator expects 3 arguments: [input file] [output file] " "[encoding name]\n" "A function of the following signature will be exported:\n" "struct aws_huffman_symbol_coder *[encoding name]_get_coder()\n"); return 1; } const char *input_file = argv[1]; const char *output_file = argv[2]; const char *decoder_name = argv[3]; if (read_code_points(input_file)) { return 1; } struct huffman_node tree_root; memset(&tree_root, 0, sizeof(struct huffman_node)); /* Populate the tree */ for (size_t i = 0; i < num_code_points; ++i) { struct huffman_code_point *value = &code_points[i]; if (value->code.num_bits == 0) { continue; } struct huffman_node *current = &tree_root; uint8_t bit_idx = value->code.num_bits - 1; while (1) { struct huffman_code code = value->code; code.bits >>= bit_idx; code.num_bits = value->code.num_bits - bit_idx; uint8_t encoded_bit = code.bits & 0x01; assert(encoded_bit == 0 || encoded_bit == 1); if (bit_idx == 0) { /* Done traversing, add value as leaf */ assert(!current->children[encoded_bit]); current->children[encoded_bit] = huffman_node_new_value(value); break; } if (current->children[encoded_bit]) { /* Not at the end yet, keep traversing */ current = current->children[encoded_bit]; } else { /* Not at the end yet, but this is the first time down this * path. */ struct huffman_node *new_node = huffman_node_new(code); current->children[encoded_bit] = new_node; current = new_node; } --bit_idx; } } /* Open the file */ FILE *file = aws_fopen(output_file, "w"); if (!file) { printf("Failed to open file '%s' for write.", output_file); return 1; } /* Write the file/function header */ fprintf( file, "/**\n" " * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\n" " * SPDX-License-Identifier: Apache-2.0.\n" " */\n" "\n" "/* WARNING: THIS FILE WAS AUTOMATICALLY GENERATED. DO NOT EDIT. */\n" "/* clang-format off */\n" "\n" "#include \n" "\n" "static struct aws_huffman_code code_points[] = {\n"); for (size_t i = 0; i < num_code_points; ++i) { struct huffman_code_point *cp = &code_points[i]; fprintf( file, " { .pattern = 0x%x, .num_bits = %u }, /* '%c' %u */\n", cp->code.bits, cp->code.num_bits, isprint(cp->symbol) ? cp->symbol : ' ', cp->symbol); } fprintf( file, "};\n" "\n" "static struct aws_huffman_code encode_symbol(uint8_t symbol, void " "*userdata) {\n" " (void)userdata;\n\n" " return code_points[symbol];\n" "}\n" "\n" "/* NOLINTNEXTLINE(readability-function-size) */\n" "static uint8_t decode_symbol(uint32_t bits, uint8_t *symbol, void " "*userdata) {\n" " (void)userdata;\n\n"); /* Traverse the tree */ huffman_node_write_decode(&tree_root, file, 0); /* Write the function footer & encode header */ fprintf( file, "}\n" "\n" "struct aws_huffman_symbol_coder *%s_get_coder(void) {\n" "\n" " static struct aws_huffman_symbol_coder coder = {\n" " .encode = encode_symbol,\n" " .decode = decode_symbol,\n" " .userdata = NULL,\n" " };\n" " return &coder;\n" "}\n", decoder_name); fclose(file); huffman_node_clean_up(&tree_root); return 0; } aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/source/huffman_testing.c000066400000000000000000000134101456575232400264150ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ /** * See aws/testing/compression/huffman.h for docs. */ #define AWS_UNSTABLE_TESTING_API #include #include #include int huffman_test_transitive( struct aws_huffman_symbol_coder *coder, const char *input, size_t size, size_t encoded_size, const char **error_string) { struct aws_huffman_encoder encoder; aws_huffman_encoder_init(&encoder, coder); struct aws_huffman_decoder decoder; aws_huffman_decoder_init(&decoder, coder); const size_t intermediate_buffer_size = size * 2; AWS_VARIABLE_LENGTH_ARRAY(uint8_t, intermediate_buffer, intermediate_buffer_size); memset(intermediate_buffer, 0, intermediate_buffer_size); AWS_VARIABLE_LENGTH_ARRAY(char, output_buffer, size); memset(output_buffer, 0, size); struct aws_byte_cursor to_encode = aws_byte_cursor_from_array((uint8_t *)input, size); struct aws_byte_buf intermediate_buf = aws_byte_buf_from_empty_array(intermediate_buffer, intermediate_buffer_size); struct aws_byte_buf output_buf = aws_byte_buf_from_empty_array(output_buffer, size); int result = aws_huffman_encode(&encoder, &to_encode, &intermediate_buf); if (result != AWS_OP_SUCCESS) { *error_string = "aws_huffman_encode failed"; return AWS_OP_ERR; } if (to_encode.len != 0) { *error_string = "not all data encoded"; return AWS_OP_ERR; } if (encoded_size && intermediate_buf.len != encoded_size) { *error_string = "encoded length is incorrect"; return AWS_OP_ERR; } struct aws_byte_cursor intermediate_cur = aws_byte_cursor_from_buf(&intermediate_buf); result = aws_huffman_decode(&decoder, &intermediate_cur, &output_buf); if (result != AWS_OP_SUCCESS) { *error_string = "aws_huffman_decode failed"; return AWS_OP_ERR; } if (intermediate_cur.len != 0) { *error_string = "not all encoded data was decoded"; return AWS_OP_ERR; } if (output_buf.len != size) { *error_string = "decode output size incorrect"; return AWS_OP_ERR; } if (memcmp(input, output_buffer, size) != 0) { *error_string = "decoded data does not match input data"; return AWS_OP_ERR; } return AWS_OP_SUCCESS; } int huffman_test_transitive_chunked( struct aws_huffman_symbol_coder *coder, const char *input, size_t size, size_t encoded_size, size_t output_chunk_size, const char **error_string) { struct aws_huffman_encoder encoder; aws_huffman_encoder_init(&encoder, coder); struct aws_huffman_decoder decoder; aws_huffman_decoder_init(&decoder, coder); const size_t intermediate_buffer_size = size * 2; AWS_VARIABLE_LENGTH_ARRAY(uint8_t, intermediate_buffer, intermediate_buffer_size); memset(intermediate_buffer, 0, intermediate_buffer_size); AWS_VARIABLE_LENGTH_ARRAY(char, output_buffer, size); memset(output_buffer, 0, size); struct aws_byte_cursor to_encode = aws_byte_cursor_from_array(input, size); struct aws_byte_buf intermediate_buf = aws_byte_buf_from_empty_array(intermediate_buffer, (size_t)-1); intermediate_buf.capacity = 0; struct aws_byte_buf output_buf = aws_byte_buf_from_empty_array(output_buffer, (size_t)-1); output_buf.capacity = 0; int result = AWS_OP_SUCCESS; { do { const size_t previous_intermediate_len = intermediate_buf.len; intermediate_buf.capacity += output_chunk_size; result = aws_huffman_encode(&encoder, &to_encode, &intermediate_buf); if (intermediate_buf.len == previous_intermediate_len) { *error_string = "encode didn't write any data"; return AWS_OP_ERR; } if (result != AWS_OP_SUCCESS && aws_last_error() != AWS_ERROR_SHORT_BUFFER) { *error_string = "encode returned wrong error code"; return AWS_OP_ERR; } } while (result != AWS_OP_SUCCESS); } if (result != AWS_OP_SUCCESS) { *error_string = "aws_huffman_encode failed"; return AWS_OP_ERR; } if (intermediate_buf.len > intermediate_buffer_size) { *error_string = "too much data encoded"; return AWS_OP_ERR; } if (encoded_size && intermediate_buf.len != encoded_size) { *error_string = "encoded length is incorrect"; return AWS_OP_ERR; } struct aws_byte_cursor intermediate_cur = aws_byte_cursor_from_buf(&intermediate_buf); { do { const size_t previous_output_len = output_buf.len; output_buf.capacity += output_chunk_size; if (output_buf.capacity > size) { output_buf.capacity = size; } result = aws_huffman_decode(&decoder, &intermediate_cur, &output_buf); if (output_buf.len == previous_output_len) { *error_string = "decode didn't write any data"; return AWS_OP_ERR; } if (result != AWS_OP_SUCCESS && aws_last_error() != AWS_ERROR_SHORT_BUFFER) { *error_string = "decode returned wrong error code"; return AWS_OP_ERR; } } while (result != AWS_OP_SUCCESS); } if (result != AWS_OP_SUCCESS) { *error_string = "aws_huffman_decode failed"; return AWS_OP_ERR; } if (output_buf.len != size) { *error_string = "decode output size incorrect"; return AWS_OP_ERR; } if (memcmp(input, output_buffer, size) != 0) { *error_string = "decoded data does not match input data"; return AWS_OP_ERR; } return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/tests/000077500000000000000000000000001456575232400227335ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/tests/CMakeLists.txt000066400000000000000000000020741456575232400254760ustar00rootroot00000000000000include(CTest) include(AwsTestHarness) include(AwsLibFuzzer) enable_testing() file(GLOB TEST_SRC "*.c") file(GLOB TEST_HDRS "*.h") file(GLOB TESTS ${TEST_HDRS} ${TEST_SRC}) add_test_case(library_init) add_test_case(huffman_symbol_encoder) add_test_case(huffman_encoder) add_test_case(huffman_encoder_all_code_points) add_test_case(huffman_encoder_partial_output) add_test_case(huffman_encoder_exact_output) add_test_case(huffman_symbol_decoder) add_test_case(huffman_decoder) add_test_case(huffman_decoder_all_code_points) add_test_case(huffman_decoder_partial_input) add_test_case(huffman_decoder_partial_output) add_test_case(huffman_decoder_allow_growth) add_test_case(huffman_transitive) add_test_case(huffman_transitive_even_bytes) add_test_case(huffman_transitive_all_code_points) add_test_case(huffman_transitive_chunked) generate_test_driver(${PROJECT_NAME}-tests) if(MSVC) target_compile_definitions(${PROJECT_NAME}-tests PRIVATE "-D_CRT_SECURE_NO_WARNINGS") endif() file(GLOB FUZZ_TESTS "fuzz/*.c") aws_add_fuzz_tests("${FUZZ_TESTS}" "test_huffman_static.c" "") aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/tests/fuzz/000077500000000000000000000000001456575232400237315ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/tests/fuzz/decode.c000066400000000000000000000017501456575232400253230ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include struct aws_huffman_symbol_coder *test_get_coder(void); int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) { if (!size) { return 0; } struct aws_huffman_decoder decoder; aws_huffman_decoder_init(&decoder, test_get_coder()); size_t output_buffer_size = size * 2; char output_buffer[output_buffer_size]; struct aws_byte_cursor to_decode = aws_byte_cursor_from_array(data, size); struct aws_byte_buf output_buf = aws_byte_buf_from_empty_array(output_buffer, AWS_ARRAY_SIZE(output_buffer)); /* Don't really care about result, just make sure there's no crash */ aws_huffman_decode(&decoder, &to_decode, &output_buf); return 0; // Non-zero return values are reserved for future use. } aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/tests/fuzz/transitive.c000066400000000000000000000012411456575232400262630ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include struct aws_huffman_symbol_coder *test_get_coder(void); int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) { if (!size) { return 0; } const char *error_message = NULL; int result = huffman_test_transitive(test_get_coder(), (const char *)data, size, 0, &error_message); ASSERT_SUCCESS(result, error_message); return 0; // Non-zero return values are reserved for future use. } aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/tests/fuzz/transitive_chunked.c000066400000000000000000000016121456575232400277660ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include struct aws_huffman_symbol_coder *test_get_coder(void); int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) { if (!size) { return 0; } static const size_t step_sizes[] = {1, 2, 4, 8, 16, 32, 64, 128}; for (size_t i = 0; i < sizeof(step_sizes) / sizeof(size_t); ++i) { size_t step_size = step_sizes[i]; const char *error_message = NULL; int result = huffman_test_transitive_chunked(test_get_coder(), (const char *)data, size, 0, step_size, &error_message); ASSERT_SUCCESS(result, error_message); } return 0; // Non-zero return values are reserved for future use. } aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/tests/huffman_test.c000066400000000000000000000424471456575232400255750ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include /* Exported by generated file */ struct aws_huffman_symbol_coder *test_get_coder(void); static struct huffman_test_code_point s_code_points[] = { #include "test_huffman_static_table.def" }; enum { NUM_CODE_POINTS = sizeof(s_code_points) / sizeof(s_code_points[0]) }; /* Useful data for testing */ static const char s_url_string[] = "www.example.com"; enum { URL_STRING_LEN = sizeof(s_url_string) - 1 }; static uint8_t s_encoded_url[] = {0x9e, 0x79, 0xeb, 0x9b, 0x04, 0xb3, 0x5a, 0x94, 0xd5, 0xe0, 0x4c, 0xdf}; enum { ENCODED_URL_LEN = sizeof(s_encoded_url) }; static const char s_all_codes[] = " !\"#$%&'()*+,-./" "0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[" "\\]^_`abcdefghijklmnopqrstuvwxyz{|}~"; enum { ALL_CODES_LEN = sizeof(s_all_codes) - 1 }; static uint8_t s_encoded_codes[] = { 0x26, 0x9b, 0xa7, 0x69, 0xfa, 0x86, 0xa3, 0xa9, 0x56, 0xd4, 0xf5, 0x4d, 0x57, 0x56, 0xb9, 0xc4, 0x57, 0xd5, 0xf5, 0x8d, 0x67, 0x5a, 0xd6, 0xf5, 0xcd, 0x77, 0x5e, 0xd7, 0xf6, 0x0d, 0x87, 0x62, 0xd8, 0xf6, 0x4d, 0x97, 0x66, 0xba, 0xd9, 0xf6, 0x8b, 0xbc, 0x4e, 0x2b, 0x17, 0x8c, 0xc6, 0xe3, 0xaf, 0x36, 0x9d, 0xab, 0x1f, 0x90, 0xda, 0xf6, 0xcc, 0x8e, 0xdb, 0xb7, 0x6d, 0xf7, 0xbb, 0x86, 0x4a, 0xfb, 0x71, 0xc9, 0xee, 0x5b, 0x9e, 0xe9, 0xba, 0xee, 0xdb, 0xbe, 0xf0, 0x5b, 0x10, 0x42, 0x68, 0xac, 0xc6, 0x7b, 0xf9, 0x25, 0x99, 0x09, 0xb5, 0x94, 0x52, 0xd8, 0xdc, 0x09, 0xf0, 0x68, 0xde, 0x77, 0xad, 0xef, 0x7c, 0xdf, 0x7f}; enum { ENCODED_CODES_LEN = sizeof(s_encoded_codes) }; static const size_t s_step_sizes[] = {1, 2, 4, 8, 16, 32, 64, 128}; enum { NUM_STEP_SIZES = sizeof(s_step_sizes) / sizeof(s_step_sizes[0]) }; AWS_TEST_CASE(huffman_symbol_encoder, test_huffman_symbol_encoder) static int test_huffman_symbol_encoder(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; /* Test encoding each character */ struct aws_huffman_symbol_coder *coder = test_get_coder(); for (size_t i = 0; i < NUM_CODE_POINTS; ++i) { struct huffman_test_code_point *value = &s_code_points[i]; struct aws_huffman_code code = coder->encode(value->symbol, NULL); ASSERT_UINT_EQUALS(value->code.pattern, code.pattern); ASSERT_UINT_EQUALS(value->code.num_bits, code.num_bits); } return AWS_OP_SUCCESS; } AWS_TEST_CASE(huffman_encoder, test_huffman_encoder) static int test_huffman_encoder(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; /* Test encoding a short url */ uint8_t output_buffer[ENCODED_URL_LEN + 1]; AWS_ZERO_ARRAY(output_buffer); struct aws_byte_buf output_buf = aws_byte_buf_from_empty_array(output_buffer, ENCODED_URL_LEN); struct aws_huffman_symbol_coder *coder = test_get_coder(); struct aws_huffman_encoder encoder; aws_huffman_encoder_init(&encoder, coder); struct aws_byte_cursor to_encode = aws_byte_cursor_from_array(s_url_string, URL_STRING_LEN); const size_t encoded_length = aws_huffman_get_encoded_length(&encoder, to_encode); ASSERT_UINT_EQUALS(ENCODED_URL_LEN, encoded_length); int result = aws_huffman_encode(&encoder, &to_encode, &output_buf); ASSERT_SUCCESS(result); ASSERT_UINT_EQUALS(ENCODED_URL_LEN, output_buf.len); ASSERT_UINT_EQUALS(0, output_buffer[ENCODED_URL_LEN]); ASSERT_BIN_ARRAYS_EQUALS(s_encoded_url, ENCODED_URL_LEN, output_buf.buffer, output_buf.len); return AWS_OP_SUCCESS; } AWS_TEST_CASE(huffman_encoder_all_code_points, test_huffman_encoder_all_code_points) static int test_huffman_encoder_all_code_points(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; /* Test encoding a sequence of all character values expressable as * characters */ uint8_t output_buffer[ENCODED_CODES_LEN + 1]; AWS_ZERO_ARRAY(output_buffer); struct aws_byte_buf output_buf = aws_byte_buf_from_empty_array(output_buffer, ENCODED_CODES_LEN); struct aws_huffman_symbol_coder *coder = test_get_coder(); struct aws_huffman_encoder encoder; aws_huffman_encoder_init(&encoder, coder); struct aws_byte_cursor to_encode = aws_byte_cursor_from_array(s_all_codes, ALL_CODES_LEN); const size_t encoded_length = aws_huffman_get_encoded_length(&encoder, to_encode); ASSERT_UINT_EQUALS(ENCODED_CODES_LEN, encoded_length); int result = aws_huffman_encode(&encoder, &to_encode, &output_buf); ASSERT_SUCCESS(result); ASSERT_UINT_EQUALS(ENCODED_CODES_LEN, output_buf.len); ASSERT_UINT_EQUALS(0, output_buffer[ENCODED_CODES_LEN]); ASSERT_BIN_ARRAYS_EQUALS(s_encoded_codes, ENCODED_CODES_LEN, output_buf.buffer, output_buf.len); return AWS_OP_SUCCESS; } AWS_TEST_CASE(huffman_encoder_partial_output, test_huffman_encoder_partial_output) static int test_huffman_encoder_partial_output(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; /* Test encoding when the output buffer size is limited */ struct aws_huffman_encoder encoder; aws_huffman_encoder_init(&encoder, test_get_coder()); uint8_t output_buffer[ENCODED_CODES_LEN]; for (size_t i = 0; i < NUM_STEP_SIZES; ++i) { const size_t step_size = s_step_sizes[i]; aws_huffman_encoder_reset(&encoder); struct aws_byte_cursor to_encode = aws_byte_cursor_from_array(s_all_codes, ALL_CODES_LEN); struct aws_byte_buf output_buf = aws_byte_buf_from_empty_array(output_buffer, (size_t)-1); output_buf.capacity = 0; AWS_ZERO_ARRAY(output_buffer); do { output_buf.capacity += step_size; if (output_buf.capacity > ENCODED_CODES_LEN) { output_buf.capacity = ENCODED_CODES_LEN; } const size_t previous_output_len = output_buf.len; int result = aws_huffman_encode(&encoder, &to_encode, &output_buf); ASSERT_TRUE(output_buf.len > previous_output_len); ASSERT_BIN_ARRAYS_EQUALS(s_encoded_codes, output_buf.len, output_buf.buffer, output_buf.len); if (output_buf.len == ENCODED_CODES_LEN) { ASSERT_SUCCESS(result); } else { ASSERT_UINT_EQUALS(AWS_ERROR_SHORT_BUFFER, aws_last_error()); aws_reset_error(); } } while (output_buf.len < ENCODED_CODES_LEN); ASSERT_UINT_EQUALS(ENCODED_CODES_LEN, output_buf.len); ASSERT_BIN_ARRAYS_EQUALS(s_encoded_codes, ENCODED_CODES_LEN, output_buf.buffer, output_buf.len); } return AWS_OP_SUCCESS; } AWS_TEST_CASE(huffman_encoder_exact_output, test_huffman_encoder_exact_output) static int test_huffman_encoder_exact_output(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; /* Test encoding when the output buffer size is exactly the necessary size */ struct aws_huffman_encoder encoder; aws_huffman_encoder_init(&encoder, test_get_coder()); uint8_t output_buffer[2]; struct aws_byte_buf output_buf = aws_byte_buf_from_empty_array(output_buffer, 2); /* Encode a character that uses 8 bits into a 1 byte buffer */ struct aws_byte_cursor to_encode = aws_byte_cursor_from_array("?", 1); uint8_t expected_1byte[] = {0xba}; output_buf.capacity = 1; ASSERT_SUCCESS(aws_huffman_encode(&encoder, &to_encode, &output_buf)); ASSERT_BIN_ARRAYS_EQUALS(expected_1byte, 1, output_buf.buffer, output_buf.len); /* Encode 2 characters that sum to 16 bits, into a 2 byte buffer * y: 101000 * z: 1101111001 * combined: 1010001101111001 == 0xa379 */ to_encode = aws_byte_cursor_from_array("yz", 2); uint8_t expected_2byte[] = {0xa3, 0x79}; output_buf.capacity = 2; aws_byte_buf_reset(&output_buf, true /*zero*/); ASSERT_SUCCESS(aws_huffman_encode(&encoder, &to_encode, &output_buf)); ASSERT_BIN_ARRAYS_EQUALS(expected_2byte, 2, output_buf.buffer, output_buf.len); return AWS_OP_SUCCESS; } AWS_TEST_CASE(huffman_symbol_decoder, test_huffman_symbol_decoder) static int test_huffman_symbol_decoder(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; /* Test decoding each character */ struct aws_huffman_symbol_coder *coder = test_get_coder(); for (size_t i = 0; i < NUM_CODE_POINTS; ++i) { struct huffman_test_code_point *value = &s_code_points[i]; uint32_t bit_pattern = value->code.pattern << (32 - value->code.num_bits); uint8_t out; size_t bits_read = coder->decode(bit_pattern, &out, NULL); ASSERT_UINT_EQUALS(value->symbol, out); ASSERT_UINT_EQUALS(value->code.num_bits, bits_read); } return AWS_OP_SUCCESS; } AWS_TEST_CASE(huffman_decoder, test_huffman_decoder) static int test_huffman_decoder(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; /* Test decoding a short url */ char output_buffer[URL_STRING_LEN + 1]; AWS_ZERO_ARRAY(output_buffer); struct aws_byte_buf output_buf = aws_byte_buf_from_empty_array(output_buffer, URL_STRING_LEN); struct aws_huffman_symbol_coder *coder = test_get_coder(); struct aws_huffman_decoder decoder; aws_huffman_decoder_init(&decoder, coder); struct aws_byte_cursor to_decode = aws_byte_cursor_from_array(s_encoded_url, ENCODED_URL_LEN); int result = aws_huffman_decode(&decoder, &to_decode, &output_buf); ASSERT_SUCCESS(result); ASSERT_UINT_EQUALS(URL_STRING_LEN, output_buf.len); ASSERT_UINT_EQUALS(0, to_decode.len); ASSERT_UINT_EQUALS(output_buffer[URL_STRING_LEN], 0); ASSERT_BIN_ARRAYS_EQUALS(s_url_string, URL_STRING_LEN, output_buf.buffer, output_buf.len); return AWS_OP_SUCCESS; } AWS_TEST_CASE(huffman_decoder_all_code_points, test_huffman_decoder_all_code_points) static int test_huffman_decoder_all_code_points(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; /* Test decoding a sequence of all character values expressable as * characters */ char output_buffer[ALL_CODES_LEN + 1]; AWS_ZERO_ARRAY(output_buffer); struct aws_byte_buf output_buf = aws_byte_buf_from_empty_array(output_buffer, ALL_CODES_LEN); struct aws_huffman_symbol_coder *coder = test_get_coder(); struct aws_huffman_decoder decoder; aws_huffman_decoder_init(&decoder, coder); struct aws_byte_cursor to_decode = aws_byte_cursor_from_array(s_encoded_codes, ENCODED_CODES_LEN); int result = aws_huffman_decode(&decoder, &to_decode, &output_buf); ASSERT_SUCCESS(result); ASSERT_UINT_EQUALS(ALL_CODES_LEN, output_buf.len); ASSERT_UINT_EQUALS(0, to_decode.len); ASSERT_UINT_EQUALS(output_buffer[ALL_CODES_LEN], 0); ASSERT_BIN_ARRAYS_EQUALS(s_all_codes, ALL_CODES_LEN, output_buf.buffer, output_buf.len); return AWS_OP_SUCCESS; } AWS_TEST_CASE(huffman_decoder_partial_input, test_huffman_decoder_partial_input) static int test_huffman_decoder_partial_input(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; /* Test decoding a buffer in chunks */ struct aws_huffman_decoder decoder; aws_huffman_decoder_init(&decoder, test_get_coder()); char output_buffer[150]; for (size_t i = 0; i < NUM_STEP_SIZES; ++i) { const size_t step_size = s_step_sizes[i]; aws_huffman_decoder_reset(&decoder); struct aws_byte_cursor to_decode = aws_byte_cursor_from_array(s_encoded_codes, ENCODED_CODES_LEN); struct aws_byte_buf output_buf = aws_byte_buf_from_empty_array(output_buffer, ALL_CODES_LEN); AWS_ZERO_ARRAY(output_buffer); do { const size_t chunk_size = step_size < to_decode.len ? step_size : to_decode.len; struct aws_byte_cursor to_decode_chunk = aws_byte_cursor_advance(&to_decode, chunk_size); int result = aws_huffman_decode(&decoder, &to_decode_chunk, &output_buf); ASSERT_UINT_EQUALS(0, to_decode_chunk.len); ASSERT_BIN_ARRAYS_EQUALS(s_all_codes, output_buf.len, output_buf.buffer, output_buf.len); if (output_buf.len == ALL_CODES_LEN) { ASSERT_SUCCESS(result); } } while (output_buf.len < ALL_CODES_LEN); ASSERT_UINT_EQUALS(ALL_CODES_LEN, output_buf.len); ASSERT_BIN_ARRAYS_EQUALS(s_all_codes, ALL_CODES_LEN, output_buf.buffer, output_buf.len); } return AWS_OP_SUCCESS; } AWS_TEST_CASE(huffman_decoder_partial_output, test_huffman_decoder_partial_output) static int test_huffman_decoder_partial_output(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; /* Test decoding when the output buffer size is limited */ struct aws_huffman_decoder decoder; aws_huffman_decoder_init(&decoder, test_get_coder()); char output_buffer[150]; for (size_t i = 0; i < NUM_STEP_SIZES; ++i) { const size_t step_size = s_step_sizes[i]; aws_huffman_decoder_reset(&decoder); struct aws_byte_cursor to_decode = aws_byte_cursor_from_array(s_encoded_codes, ENCODED_CODES_LEN); struct aws_byte_buf output_buf = aws_byte_buf_from_empty_array(output_buffer, (size_t)-1); output_buf.capacity = 0; /* Can't set above because it sets buffer to 0 */ AWS_ZERO_ARRAY(output_buffer); do { output_buf.capacity += step_size; if (output_buf.capacity > ALL_CODES_LEN) { output_buf.capacity = ALL_CODES_LEN; } const size_t previous_output_size = output_buf.len; int result = aws_huffman_decode(&decoder, &to_decode, &output_buf); ASSERT_TRUE(output_buf.len > previous_output_size); ASSERT_BIN_ARRAYS_EQUALS(s_all_codes, output_buf.len, output_buf.buffer, output_buf.len); if (output_buf.len == ALL_CODES_LEN) { ASSERT_SUCCESS(result); } else { ASSERT_UINT_EQUALS(AWS_ERROR_SHORT_BUFFER, aws_last_error()); aws_reset_error(); } } while (output_buf.len < ALL_CODES_LEN); ASSERT_UINT_EQUALS(ALL_CODES_LEN, output_buf.len); ASSERT_BIN_ARRAYS_EQUALS(s_all_codes, ALL_CODES_LEN, output_buf.buffer, output_buf.len); } return AWS_OP_SUCCESS; } AWS_TEST_CASE(huffman_decoder_allow_growth, test_huffman_decoder_allow_growth) static int test_huffman_decoder_allow_growth(struct aws_allocator *allocator, void *ctx) { (void)ctx; /* Test that decoder will grow output buffer if allow-growth is set */ struct aws_huffman_decoder decoder; aws_huffman_decoder_init(&decoder, test_get_coder()); aws_huffman_decoder_allow_growth(&decoder, true); struct aws_byte_buf output_buf; ASSERT_SUCCESS(aws_byte_buf_init(&output_buf, allocator, 1 /* way too small */)); struct aws_byte_cursor to_decode = aws_byte_cursor_from_array(s_encoded_url, ENCODED_URL_LEN); ASSERT_SUCCESS(aws_huffman_decode(&decoder, &to_decode, &output_buf)); ASSERT_UINT_EQUALS(0, to_decode.len); ASSERT_BIN_ARRAYS_EQUALS(s_url_string, URL_STRING_LEN, output_buf.buffer, output_buf.len); aws_byte_buf_clean_up(&output_buf); return AWS_OP_SUCCESS; } AWS_TEST_CASE(huffman_transitive, test_huffman_transitive) static int test_huffman_transitive(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; /* Test encoding a short url and immediately decoding it */ const char *error_message = NULL; int result = huffman_test_transitive(test_get_coder(), s_url_string, URL_STRING_LEN, ENCODED_URL_LEN, &error_message); ASSERT_SUCCESS(result, error_message); return AWS_OP_SUCCESS; } AWS_TEST_CASE(huffman_transitive_even_bytes, test_huffman_transitive_even_bytes) static int test_huffman_transitive_even_bytes(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; /* Test encoding a string that encodes to a multiple of 8 bits */ const char *error_message = NULL; int result = huffman_test_transitive(test_get_coder(), "cdfh", 4, 3, &error_message); ASSERT_SUCCESS(result, error_message); return AWS_OP_SUCCESS; } AWS_TEST_CASE(huffman_transitive_all_code_points, test_huffman_transitive_all_code_points) static int test_huffman_transitive_all_code_points(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; /* Test encoding a sequence of all character values expressable as * characters and immediately decoding it */ const char *error_message = NULL; int result = huffman_test_transitive(test_get_coder(), s_all_codes, ALL_CODES_LEN, ENCODED_CODES_LEN, &error_message); ASSERT_SUCCESS(result, error_message); return AWS_OP_SUCCESS; } AWS_TEST_CASE(huffman_transitive_chunked, test_huffman_transitive_chunked) static int test_huffman_transitive_chunked(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; /* Test encoding a sequence of all character values expressable as * characters and immediately decoding it */ for (size_t i = 0; i < NUM_STEP_SIZES; ++i) { const size_t step_size = s_step_sizes[i]; const char *error_message = NULL; int result = huffman_test_transitive_chunked( test_get_coder(), s_all_codes, ALL_CODES_LEN, ENCODED_CODES_LEN, step_size, &error_message); ASSERT_SUCCESS(result, error_message); } return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/tests/library_test.c000066400000000000000000000013441456575232400256040ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include AWS_TEST_CASE(library_init, s_test_library_init) static int s_test_library_init(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_compression_library_init(allocator); /* Ensure that errors were registered */ const char *err_name = aws_error_name(AWS_ERROR_COMPRESSION_UNKNOWN_SYMBOL); const char *expected = "AWS_ERROR_COMPRESSION_UNKNOWN_SYMBOL"; ASSERT_BIN_ARRAYS_EQUALS(expected, strlen(expected), err_name, strlen(err_name)); aws_compression_library_clean_up(); return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/tests/test_huffman_static.c000066400000000000000000001375171456575232400271470ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ /* WARNING: THIS FILE WAS AUTOMATICALLY GENERATED. DO NOT EDIT. */ #include static struct aws_huffman_code code_points[] = { {.pattern = 0x32e, .num_bits = 10}, /* ' ' 0 */ {.pattern = 0x32f, .num_bits = 10}, /* ' ' 1 */ {.pattern = 0x330, .num_bits = 10}, /* ' ' 2 */ {.pattern = 0x331, .num_bits = 10}, /* ' ' 3 */ {.pattern = 0x332, .num_bits = 10}, /* ' ' 4 */ {.pattern = 0x333, .num_bits = 10}, /* ' ' 5 */ {.pattern = 0x334, .num_bits = 10}, /* ' ' 6 */ {.pattern = 0x335, .num_bits = 10}, /* ' ' 7 */ {.pattern = 0x336, .num_bits = 10}, /* ' ' 8 */ {.pattern = 0x337, .num_bits = 10}, /* ' ' 9 */ {.pattern = 0xb8, .num_bits = 8}, /* ' ' 10 */ {.pattern = 0x338, .num_bits = 10}, /* ' ' 11 */ {.pattern = 0x339, .num_bits = 10}, /* ' ' 12 */ {.pattern = 0x33a, .num_bits = 10}, /* ' ' 13 */ {.pattern = 0x33b, .num_bits = 10}, /* ' ' 14 */ {.pattern = 0x33c, .num_bits = 10}, /* ' ' 15 */ {.pattern = 0x33d, .num_bits = 10}, /* ' ' 16 */ {.pattern = 0x33e, .num_bits = 10}, /* ' ' 17 */ {.pattern = 0x33f, .num_bits = 10}, /* ' ' 18 */ {.pattern = 0x340, .num_bits = 10}, /* ' ' 19 */ {.pattern = 0x341, .num_bits = 10}, /* ' ' 20 */ {.pattern = 0x342, .num_bits = 10}, /* ' ' 21 */ {.pattern = 0x343, .num_bits = 10}, /* ' ' 22 */ {.pattern = 0x344, .num_bits = 10}, /* ' ' 23 */ {.pattern = 0x345, .num_bits = 10}, /* ' ' 24 */ {.pattern = 0x346, .num_bits = 10}, /* ' ' 25 */ {.pattern = 0x347, .num_bits = 10}, /* ' ' 26 */ {.pattern = 0x348, .num_bits = 10}, /* ' ' 27 */ {.pattern = 0x349, .num_bits = 10}, /* ' ' 28 */ {.pattern = 0x34a, .num_bits = 10}, /* ' ' 29 */ {.pattern = 0x34b, .num_bits = 10}, /* ' ' 30 */ {.pattern = 0x34c, .num_bits = 10}, /* ' ' 31 */ {.pattern = 0x4, .num_bits = 5}, /* ' ' 32 */ {.pattern = 0x34d, .num_bits = 10}, /* '!' 33 */ {.pattern = 0x34e, .num_bits = 10}, /* '"' 34 */ {.pattern = 0x34f, .num_bits = 10}, /* '#' 35 */ {.pattern = 0x350, .num_bits = 10}, /* '$' 36 */ {.pattern = 0x351, .num_bits = 10}, /* '%' 37 */ {.pattern = 0x352, .num_bits = 10}, /* '&' 38 */ {.pattern = 0x56, .num_bits = 7}, /* ''' 39 */ {.pattern = 0x353, .num_bits = 10}, /* '(' 40 */ {.pattern = 0x354, .num_bits = 10}, /* ')' 41 */ {.pattern = 0x355, .num_bits = 10}, /* '*' 42 */ {.pattern = 0x356, .num_bits = 10}, /* '+' 43 */ {.pattern = 0xb9, .num_bits = 8}, /* ',' 44 */ {.pattern = 0x188, .num_bits = 9}, /* '-' 45 */ {.pattern = 0x57, .num_bits = 7}, /* '.' 46 */ {.pattern = 0x357, .num_bits = 10}, /* '/' 47 */ {.pattern = 0x358, .num_bits = 10}, /* '0' 48 */ {.pattern = 0x359, .num_bits = 10}, /* '1' 49 */ {.pattern = 0x35a, .num_bits = 10}, /* '2' 50 */ {.pattern = 0x35b, .num_bits = 10}, /* '3' 51 */ {.pattern = 0x35c, .num_bits = 10}, /* '4' 52 */ {.pattern = 0x35d, .num_bits = 10}, /* '5' 53 */ {.pattern = 0x35e, .num_bits = 10}, /* '6' 54 */ {.pattern = 0x35f, .num_bits = 10}, /* '7' 55 */ {.pattern = 0x360, .num_bits = 10}, /* '8' 56 */ {.pattern = 0x361, .num_bits = 10}, /* '9' 57 */ {.pattern = 0x362, .num_bits = 10}, /* ':' 58 */ {.pattern = 0x363, .num_bits = 10}, /* ';' 59 */ {.pattern = 0x364, .num_bits = 10}, /* '<' 60 */ {.pattern = 0x365, .num_bits = 10}, /* '=' 61 */ {.pattern = 0x366, .num_bits = 10}, /* '>' 62 */ {.pattern = 0xba, .num_bits = 8}, /* '?' 63 */ {.pattern = 0x367, .num_bits = 10}, /* '@' 64 */ {.pattern = 0x368, .num_bits = 10}, /* 'A' 65 */ {.pattern = 0xbb, .num_bits = 8}, /* 'B' 66 */ {.pattern = 0x189, .num_bits = 9}, /* 'C' 67 */ {.pattern = 0x18a, .num_bits = 9}, /* 'D' 68 */ {.pattern = 0x18b, .num_bits = 9}, /* 'E' 69 */ {.pattern = 0x18c, .num_bits = 9}, /* 'F' 70 */ {.pattern = 0x18d, .num_bits = 9}, /* 'G' 71 */ {.pattern = 0x18e, .num_bits = 9}, /* 'H' 72 */ {.pattern = 0xbc, .num_bits = 8}, /* 'I' 73 */ {.pattern = 0x369, .num_bits = 10}, /* 'J' 74 */ {.pattern = 0x36a, .num_bits = 10}, /* 'K' 75 */ {.pattern = 0x18f, .num_bits = 9}, /* 'L' 76 */ {.pattern = 0x190, .num_bits = 9}, /* 'M' 77 */ {.pattern = 0x36b, .num_bits = 10}, /* 'N' 78 */ {.pattern = 0x36c, .num_bits = 10}, /* 'O' 79 */ {.pattern = 0x191, .num_bits = 9}, /* 'P' 80 */ {.pattern = 0x36d, .num_bits = 10}, /* 'Q' 81 */ {.pattern = 0x36e, .num_bits = 10}, /* 'R' 82 */ {.pattern = 0x36f, .num_bits = 10}, /* 'S' 83 */ {.pattern = 0xbd, .num_bits = 8}, /* 'T' 84 */ {.pattern = 0x370, .num_bits = 10}, /* 'U' 85 */ {.pattern = 0x192, .num_bits = 9}, /* 'V' 86 */ {.pattern = 0xbe, .num_bits = 8}, /* 'W' 87 */ {.pattern = 0x371, .num_bits = 10}, /* 'X' 88 */ {.pattern = 0x193, .num_bits = 9}, /* 'Y' 89 */ {.pattern = 0x372, .num_bits = 10}, /* 'Z' 90 */ {.pattern = 0x373, .num_bits = 10}, /* '[' 91 */ {.pattern = 0x374, .num_bits = 10}, /* '\' 92 */ {.pattern = 0x375, .num_bits = 10}, /* ']' 93 */ {.pattern = 0x376, .num_bits = 10}, /* '^' 94 */ {.pattern = 0x377, .num_bits = 10}, /* '_' 95 */ {.pattern = 0x378, .num_bits = 10}, /* '`' 96 */ {.pattern = 0x5, .num_bits = 5}, /* 'a' 97 */ {.pattern = 0x58, .num_bits = 7}, /* 'b' 98 */ {.pattern = 0x20, .num_bits = 6}, /* 'c' 99 */ {.pattern = 0x21, .num_bits = 6}, /* 'd' 100 */ {.pattern = 0x6, .num_bits = 5}, /* 'e' 101 */ {.pattern = 0x22, .num_bits = 6}, /* 'f' 102 */ {.pattern = 0x59, .num_bits = 7}, /* 'g' 103 */ {.pattern = 0x23, .num_bits = 6}, /* 'h' 104 */ {.pattern = 0x7, .num_bits = 5}, /* 'i' 105 */ {.pattern = 0xbf, .num_bits = 8}, /* 'j' 106 */ {.pattern = 0x24, .num_bits = 6}, /* 'k' 107 */ {.pattern = 0x25, .num_bits = 6}, /* 'l' 108 */ {.pattern = 0x26, .num_bits = 6}, /* 'm' 109 */ {.pattern = 0x8, .num_bits = 5}, /* 'n' 110 */ {.pattern = 0x9, .num_bits = 5}, /* 'o' 111 */ {.pattern = 0x5a, .num_bits = 7}, /* 'p' 112 */ {.pattern = 0x194, .num_bits = 9}, /* 'q' 113 */ {.pattern = 0xa, .num_bits = 5}, /* 'r' 114 */ {.pattern = 0xb, .num_bits = 5}, /* 's' 115 */ {.pattern = 0xc, .num_bits = 5}, /* 't' 116 */ {.pattern = 0xd, .num_bits = 5}, /* 'u' 117 */ {.pattern = 0xc0, .num_bits = 8}, /* 'v' 118 */ {.pattern = 0x27, .num_bits = 6}, /* 'w' 119 */ {.pattern = 0xc1, .num_bits = 8}, /* 'x' 120 */ {.pattern = 0x28, .num_bits = 6}, /* 'y' 121 */ {.pattern = 0x379, .num_bits = 10}, /* 'z' 122 */ {.pattern = 0x37a, .num_bits = 10}, /* '{' 123 */ {.pattern = 0x37b, .num_bits = 10}, /* '|' 124 */ {.pattern = 0x37c, .num_bits = 10}, /* '}' 125 */ {.pattern = 0x37d, .num_bits = 10}, /* '~' 126 */ {.pattern = 0x37e, .num_bits = 10}, /* ' ' 127 */ {.pattern = 0x37f, .num_bits = 10}, /* ' ' 128 */ {.pattern = 0x380, .num_bits = 10}, /* ' ' 129 */ {.pattern = 0x381, .num_bits = 10}, /* ' ' 130 */ {.pattern = 0x382, .num_bits = 10}, /* ' ' 131 */ {.pattern = 0x383, .num_bits = 10}, /* ' ' 132 */ {.pattern = 0x384, .num_bits = 10}, /* ' ' 133 */ {.pattern = 0x385, .num_bits = 10}, /* ' ' 134 */ {.pattern = 0x386, .num_bits = 10}, /* ' ' 135 */ {.pattern = 0x387, .num_bits = 10}, /* ' ' 136 */ {.pattern = 0x388, .num_bits = 10}, /* ' ' 137 */ {.pattern = 0x389, .num_bits = 10}, /* ' ' 138 */ {.pattern = 0x38a, .num_bits = 10}, /* ' ' 139 */ {.pattern = 0x38b, .num_bits = 10}, /* ' ' 140 */ {.pattern = 0x38c, .num_bits = 10}, /* ' ' 141 */ {.pattern = 0x38d, .num_bits = 10}, /* ' ' 142 */ {.pattern = 0x38e, .num_bits = 10}, /* ' ' 143 */ {.pattern = 0x38f, .num_bits = 10}, /* ' ' 144 */ {.pattern = 0x390, .num_bits = 10}, /* ' ' 145 */ {.pattern = 0x391, .num_bits = 10}, /* ' ' 146 */ {.pattern = 0x392, .num_bits = 10}, /* ' ' 147 */ {.pattern = 0x393, .num_bits = 10}, /* ' ' 148 */ {.pattern = 0x394, .num_bits = 10}, /* ' ' 149 */ {.pattern = 0x395, .num_bits = 10}, /* ' ' 150 */ {.pattern = 0x396, .num_bits = 10}, /* ' ' 151 */ {.pattern = 0x397, .num_bits = 10}, /* ' ' 152 */ {.pattern = 0x398, .num_bits = 10}, /* ' ' 153 */ {.pattern = 0x399, .num_bits = 10}, /* ' ' 154 */ {.pattern = 0x39a, .num_bits = 10}, /* ' ' 155 */ {.pattern = 0x39b, .num_bits = 10}, /* ' ' 156 */ {.pattern = 0x39c, .num_bits = 10}, /* ' ' 157 */ {.pattern = 0x39d, .num_bits = 10}, /* ' ' 158 */ {.pattern = 0x39e, .num_bits = 10}, /* ' ' 159 */ {.pattern = 0x39f, .num_bits = 10}, /* ' ' 160 */ {.pattern = 0x3a0, .num_bits = 10}, /* ' ' 161 */ {.pattern = 0x3a1, .num_bits = 10}, /* ' ' 162 */ {.pattern = 0x3a2, .num_bits = 10}, /* ' ' 163 */ {.pattern = 0x3a3, .num_bits = 10}, /* ' ' 164 */ {.pattern = 0x3a4, .num_bits = 10}, /* ' ' 165 */ {.pattern = 0x3a5, .num_bits = 10}, /* ' ' 166 */ {.pattern = 0x3a6, .num_bits = 10}, /* ' ' 167 */ {.pattern = 0x3a7, .num_bits = 10}, /* ' ' 168 */ {.pattern = 0x3a8, .num_bits = 10}, /* ' ' 169 */ {.pattern = 0x3a9, .num_bits = 10}, /* ' ' 170 */ {.pattern = 0x3aa, .num_bits = 10}, /* ' ' 171 */ {.pattern = 0x3ab, .num_bits = 10}, /* ' ' 172 */ {.pattern = 0x3ac, .num_bits = 10}, /* ' ' 173 */ {.pattern = 0x3ad, .num_bits = 10}, /* ' ' 174 */ {.pattern = 0x3ae, .num_bits = 10}, /* ' ' 175 */ {.pattern = 0x3af, .num_bits = 10}, /* ' ' 176 */ {.pattern = 0x3b0, .num_bits = 10}, /* ' ' 177 */ {.pattern = 0x3b1, .num_bits = 10}, /* ' ' 178 */ {.pattern = 0x3b2, .num_bits = 10}, /* ' ' 179 */ {.pattern = 0x3b3, .num_bits = 10}, /* ' ' 180 */ {.pattern = 0x3b4, .num_bits = 10}, /* ' ' 181 */ {.pattern = 0x3b5, .num_bits = 10}, /* ' ' 182 */ {.pattern = 0x3b6, .num_bits = 10}, /* ' ' 183 */ {.pattern = 0x3b7, .num_bits = 10}, /* ' ' 184 */ {.pattern = 0x3b8, .num_bits = 10}, /* ' ' 185 */ {.pattern = 0x3b9, .num_bits = 10}, /* ' ' 186 */ {.pattern = 0x3ba, .num_bits = 10}, /* ' ' 187 */ {.pattern = 0x3bb, .num_bits = 10}, /* ' ' 188 */ {.pattern = 0x3bc, .num_bits = 10}, /* ' ' 189 */ {.pattern = 0x3bd, .num_bits = 10}, /* ' ' 190 */ {.pattern = 0x3be, .num_bits = 10}, /* ' ' 191 */ {.pattern = 0x3bf, .num_bits = 10}, /* ' ' 192 */ {.pattern = 0x3c0, .num_bits = 10}, /* ' ' 193 */ {.pattern = 0x3c1, .num_bits = 10}, /* ' ' 194 */ {.pattern = 0x3c2, .num_bits = 10}, /* ' ' 195 */ {.pattern = 0x3c3, .num_bits = 10}, /* ' ' 196 */ {.pattern = 0x3c4, .num_bits = 10}, /* ' ' 197 */ {.pattern = 0x3c5, .num_bits = 10}, /* ' ' 198 */ {.pattern = 0x3c6, .num_bits = 10}, /* ' ' 199 */ {.pattern = 0x3c7, .num_bits = 10}, /* ' ' 200 */ {.pattern = 0x3c8, .num_bits = 10}, /* ' ' 201 */ {.pattern = 0x3c9, .num_bits = 10}, /* ' ' 202 */ {.pattern = 0x3ca, .num_bits = 10}, /* ' ' 203 */ {.pattern = 0x3cb, .num_bits = 10}, /* ' ' 204 */ {.pattern = 0x3cc, .num_bits = 10}, /* ' ' 205 */ {.pattern = 0x3cd, .num_bits = 10}, /* ' ' 206 */ {.pattern = 0x3ce, .num_bits = 10}, /* ' ' 207 */ {.pattern = 0x3cf, .num_bits = 10}, /* ' ' 208 */ {.pattern = 0x3d0, .num_bits = 10}, /* ' ' 209 */ {.pattern = 0x3d1, .num_bits = 10}, /* ' ' 210 */ {.pattern = 0x3d2, .num_bits = 10}, /* ' ' 211 */ {.pattern = 0x3d3, .num_bits = 10}, /* ' ' 212 */ {.pattern = 0x3d4, .num_bits = 10}, /* ' ' 213 */ {.pattern = 0x3d5, .num_bits = 10}, /* ' ' 214 */ {.pattern = 0x3d6, .num_bits = 10}, /* ' ' 215 */ {.pattern = 0x3d7, .num_bits = 10}, /* ' ' 216 */ {.pattern = 0x3d8, .num_bits = 10}, /* ' ' 217 */ {.pattern = 0x3d9, .num_bits = 10}, /* ' ' 218 */ {.pattern = 0x3da, .num_bits = 10}, /* ' ' 219 */ {.pattern = 0x3db, .num_bits = 10}, /* ' ' 220 */ {.pattern = 0x3dc, .num_bits = 10}, /* ' ' 221 */ {.pattern = 0x3dd, .num_bits = 10}, /* ' ' 222 */ {.pattern = 0x3de, .num_bits = 10}, /* ' ' 223 */ {.pattern = 0x3df, .num_bits = 10}, /* ' ' 224 */ {.pattern = 0x3e0, .num_bits = 10}, /* ' ' 225 */ {.pattern = 0x3e1, .num_bits = 10}, /* ' ' 226 */ {.pattern = 0x3e2, .num_bits = 10}, /* ' ' 227 */ {.pattern = 0x3e3, .num_bits = 10}, /* ' ' 228 */ {.pattern = 0x3e4, .num_bits = 10}, /* ' ' 229 */ {.pattern = 0x3e5, .num_bits = 10}, /* ' ' 230 */ {.pattern = 0x3e6, .num_bits = 10}, /* ' ' 231 */ {.pattern = 0x3e7, .num_bits = 10}, /* ' ' 232 */ {.pattern = 0x3e8, .num_bits = 10}, /* ' ' 233 */ {.pattern = 0x3e9, .num_bits = 10}, /* ' ' 234 */ {.pattern = 0x3ea, .num_bits = 10}, /* ' ' 235 */ {.pattern = 0x3eb, .num_bits = 10}, /* ' ' 236 */ {.pattern = 0x3ec, .num_bits = 10}, /* ' ' 237 */ {.pattern = 0x3ed, .num_bits = 10}, /* ' ' 238 */ {.pattern = 0x3ee, .num_bits = 10}, /* ' ' 239 */ {.pattern = 0x3ef, .num_bits = 10}, /* ' ' 240 */ {.pattern = 0x3f0, .num_bits = 10}, /* ' ' 241 */ {.pattern = 0x3f1, .num_bits = 10}, /* ' ' 242 */ {.pattern = 0x3f2, .num_bits = 10}, /* ' ' 243 */ {.pattern = 0x3f3, .num_bits = 10}, /* ' ' 244 */ {.pattern = 0x3f4, .num_bits = 10}, /* ' ' 245 */ {.pattern = 0x3f5, .num_bits = 10}, /* ' ' 246 */ {.pattern = 0x3f6, .num_bits = 10}, /* ' ' 247 */ {.pattern = 0x3f7, .num_bits = 10}, /* ' ' 248 */ {.pattern = 0x3f8, .num_bits = 10}, /* ' ' 249 */ {.pattern = 0x3f9, .num_bits = 10}, /* ' ' 250 */ {.pattern = 0x3fa, .num_bits = 10}, /* ' ' 251 */ {.pattern = 0x3fb, .num_bits = 10}, /* ' ' 252 */ {.pattern = 0x3fc, .num_bits = 10}, /* ' ' 253 */ {.pattern = 0x3fd, .num_bits = 10}, /* ' ' 254 */ {.pattern = 0x3fe, .num_bits = 10}, /* ' ' 255 */ }; static struct aws_huffman_code encode_symbol(uint8_t symbol, void *userdata) { (void)userdata; return code_points[symbol]; } /* NOLINTNEXTLINE(readability-function-size) */ static uint8_t decode_symbol(uint32_t bits, uint8_t *symbol, void *userdata) { (void)userdata; if (bits & 0x80000000) { goto node_1; } else { goto node_0; } node_0: if (bits & 0x40000000) { goto node_01; } else { goto node_00; } node_00: if (bits & 0x20000000) { goto node_001; } else { return 0; /* invalid node */ } node_001: if (bits & 0x10000000) { goto node_0011; } else { goto node_0010; } node_0010: if (bits & 0x8000000) { *symbol = 97; return 5; } else { *symbol = 32; return 5; } node_0011: if (bits & 0x8000000) { *symbol = 105; return 5; } else { *symbol = 101; return 5; } node_01: if (bits & 0x20000000) { goto node_011; } else { goto node_010; } node_010: if (bits & 0x10000000) { goto node_0101; } else { goto node_0100; } node_0100: if (bits & 0x8000000) { *symbol = 111; return 5; } else { *symbol = 110; return 5; } node_0101: if (bits & 0x8000000) { *symbol = 115; return 5; } else { *symbol = 114; return 5; } node_011: if (bits & 0x10000000) { return 0; /* invalid node */ } else { goto node_0110; } node_0110: if (bits & 0x8000000) { *symbol = 117; return 5; } else { *symbol = 116; return 5; } node_1: if (bits & 0x40000000) { goto node_11; } else { goto node_10; } node_10: if (bits & 0x20000000) { goto node_101; } else { goto node_100; } node_100: if (bits & 0x10000000) { goto node_1001; } else { goto node_1000; } node_1000: if (bits & 0x8000000) { goto node_10001; } else { goto node_10000; } node_10000: if (bits & 0x4000000) { *symbol = 100; return 6; } else { *symbol = 99; return 6; } node_10001: if (bits & 0x4000000) { *symbol = 104; return 6; } else { *symbol = 102; return 6; } node_1001: if (bits & 0x8000000) { goto node_10011; } else { goto node_10010; } node_10010: if (bits & 0x4000000) { *symbol = 108; return 6; } else { *symbol = 107; return 6; } node_10011: if (bits & 0x4000000) { *symbol = 119; return 6; } else { *symbol = 109; return 6; } node_101: if (bits & 0x10000000) { goto node_1011; } else { goto node_1010; } node_1010: if (bits & 0x8000000) { goto node_10101; } else { goto node_10100; } node_10100: if (bits & 0x4000000) { return 0; /* invalid node */ } else { *symbol = 121; return 6; } node_10101: if (bits & 0x4000000) { goto node_101011; } else { return 0; /* invalid node */ } node_101011: if (bits & 0x2000000) { *symbol = 46; return 7; } else { *symbol = 39; return 7; } node_1011: if (bits & 0x8000000) { goto node_10111; } else { goto node_10110; } node_10110: if (bits & 0x4000000) { goto node_101101; } else { goto node_101100; } node_101100: if (bits & 0x2000000) { *symbol = 103; return 7; } else { *symbol = 98; return 7; } node_101101: if (bits & 0x2000000) { return 0; /* invalid node */ } else { *symbol = 112; return 7; } node_10111: if (bits & 0x4000000) { goto node_101111; } else { goto node_101110; } node_101110: if (bits & 0x2000000) { goto node_1011101; } else { goto node_1011100; } node_1011100: if (bits & 0x1000000) { *symbol = 44; return 8; } else { *symbol = 10; return 8; } node_1011101: if (bits & 0x1000000) { *symbol = 66; return 8; } else { *symbol = 63; return 8; } node_101111: if (bits & 0x2000000) { goto node_1011111; } else { goto node_1011110; } node_1011110: if (bits & 0x1000000) { *symbol = 84; return 8; } else { *symbol = 73; return 8; } node_1011111: if (bits & 0x1000000) { *symbol = 106; return 8; } else { *symbol = 87; return 8; } node_11: if (bits & 0x20000000) { goto node_111; } else { goto node_110; } node_110: if (bits & 0x10000000) { goto node_1101; } else { goto node_1100; } node_1100: if (bits & 0x8000000) { goto node_11001; } else { goto node_11000; } node_11000: if (bits & 0x4000000) { goto node_110001; } else { goto node_110000; } node_110000: if (bits & 0x2000000) { return 0; /* invalid node */ } else { goto node_1100000; } node_1100000: if (bits & 0x1000000) { *symbol = 120; return 8; } else { *symbol = 118; return 8; } node_110001: if (bits & 0x2000000) { goto node_1100011; } else { goto node_1100010; } node_1100010: if (bits & 0x1000000) { goto node_11000101; } else { goto node_11000100; } node_11000100: if (bits & 0x800000) { *symbol = 67; return 9; } else { *symbol = 45; return 9; } node_11000101: if (bits & 0x800000) { *symbol = 69; return 9; } else { *symbol = 68; return 9; } node_1100011: if (bits & 0x1000000) { goto node_11000111; } else { goto node_11000110; } node_11000110: if (bits & 0x800000) { *symbol = 71; return 9; } else { *symbol = 70; return 9; } node_11000111: if (bits & 0x800000) { *symbol = 76; return 9; } else { *symbol = 72; return 9; } node_11001: if (bits & 0x4000000) { goto node_110011; } else { goto node_110010; } node_110010: if (bits & 0x2000000) { goto node_1100101; } else { goto node_1100100; } node_1100100: if (bits & 0x1000000) { goto node_11001001; } else { goto node_11001000; } node_11001000: if (bits & 0x800000) { *symbol = 80; return 9; } else { *symbol = 77; return 9; } node_11001001: if (bits & 0x800000) { *symbol = 89; return 9; } else { *symbol = 86; return 9; } node_1100101: if (bits & 0x1000000) { goto node_11001011; } else { goto node_11001010; } node_11001010: if (bits & 0x800000) { return 0; /* invalid node */ } else { *symbol = 113; return 9; } node_11001011: if (bits & 0x800000) { goto node_110010111; } else { return 0; /* invalid node */ } node_110010111: if (bits & 0x400000) { *symbol = 1; return 10; } else { *symbol = 0; return 10; } node_110011: if (bits & 0x2000000) { goto node_1100111; } else { goto node_1100110; } node_1100110: if (bits & 0x1000000) { goto node_11001101; } else { goto node_11001100; } node_11001100: if (bits & 0x800000) { goto node_110011001; } else { goto node_110011000; } node_110011000: if (bits & 0x400000) { *symbol = 3; return 10; } else { *symbol = 2; return 10; } node_110011001: if (bits & 0x400000) { *symbol = 5; return 10; } else { *symbol = 4; return 10; } node_11001101: if (bits & 0x800000) { goto node_110011011; } else { goto node_110011010; } node_110011010: if (bits & 0x400000) { *symbol = 7; return 10; } else { *symbol = 6; return 10; } node_110011011: if (bits & 0x400000) { *symbol = 9; return 10; } else { *symbol = 8; return 10; } node_1100111: if (bits & 0x1000000) { goto node_11001111; } else { goto node_11001110; } node_11001110: if (bits & 0x800000) { goto node_110011101; } else { goto node_110011100; } node_110011100: if (bits & 0x400000) { *symbol = 12; return 10; } else { *symbol = 11; return 10; } node_110011101: if (bits & 0x400000) { *symbol = 14; return 10; } else { *symbol = 13; return 10; } node_11001111: if (bits & 0x800000) { goto node_110011111; } else { goto node_110011110; } node_110011110: if (bits & 0x400000) { *symbol = 16; return 10; } else { *symbol = 15; return 10; } node_110011111: if (bits & 0x400000) { *symbol = 18; return 10; } else { *symbol = 17; return 10; } node_1101: if (bits & 0x8000000) { goto node_11011; } else { goto node_11010; } node_11010: if (bits & 0x4000000) { goto node_110101; } else { goto node_110100; } node_110100: if (bits & 0x2000000) { goto node_1101001; } else { goto node_1101000; } node_1101000: if (bits & 0x1000000) { goto node_11010001; } else { goto node_11010000; } node_11010000: if (bits & 0x800000) { goto node_110100001; } else { goto node_110100000; } node_110100000: if (bits & 0x400000) { *symbol = 20; return 10; } else { *symbol = 19; return 10; } node_110100001: if (bits & 0x400000) { *symbol = 22; return 10; } else { *symbol = 21; return 10; } node_11010001: if (bits & 0x800000) { goto node_110100011; } else { goto node_110100010; } node_110100010: if (bits & 0x400000) { *symbol = 24; return 10; } else { *symbol = 23; return 10; } node_110100011: if (bits & 0x400000) { *symbol = 26; return 10; } else { *symbol = 25; return 10; } node_1101001: if (bits & 0x1000000) { goto node_11010011; } else { goto node_11010010; } node_11010010: if (bits & 0x800000) { goto node_110100101; } else { goto node_110100100; } node_110100100: if (bits & 0x400000) { *symbol = 28; return 10; } else { *symbol = 27; return 10; } node_110100101: if (bits & 0x400000) { *symbol = 30; return 10; } else { *symbol = 29; return 10; } node_11010011: if (bits & 0x800000) { goto node_110100111; } else { goto node_110100110; } node_110100110: if (bits & 0x400000) { *symbol = 33; return 10; } else { *symbol = 31; return 10; } node_110100111: if (bits & 0x400000) { *symbol = 35; return 10; } else { *symbol = 34; return 10; } node_110101: if (bits & 0x2000000) { goto node_1101011; } else { goto node_1101010; } node_1101010: if (bits & 0x1000000) { goto node_11010101; } else { goto node_11010100; } node_11010100: if (bits & 0x800000) { goto node_110101001; } else { goto node_110101000; } node_110101000: if (bits & 0x400000) { *symbol = 37; return 10; } else { *symbol = 36; return 10; } node_110101001: if (bits & 0x400000) { *symbol = 40; return 10; } else { *symbol = 38; return 10; } node_11010101: if (bits & 0x800000) { goto node_110101011; } else { goto node_110101010; } node_110101010: if (bits & 0x400000) { *symbol = 42; return 10; } else { *symbol = 41; return 10; } node_110101011: if (bits & 0x400000) { *symbol = 47; return 10; } else { *symbol = 43; return 10; } node_1101011: if (bits & 0x1000000) { goto node_11010111; } else { goto node_11010110; } node_11010110: if (bits & 0x800000) { goto node_110101101; } else { goto node_110101100; } node_110101100: if (bits & 0x400000) { *symbol = 49; return 10; } else { *symbol = 48; return 10; } node_110101101: if (bits & 0x400000) { *symbol = 51; return 10; } else { *symbol = 50; return 10; } node_11010111: if (bits & 0x800000) { goto node_110101111; } else { goto node_110101110; } node_110101110: if (bits & 0x400000) { *symbol = 53; return 10; } else { *symbol = 52; return 10; } node_110101111: if (bits & 0x400000) { *symbol = 55; return 10; } else { *symbol = 54; return 10; } node_11011: if (bits & 0x4000000) { goto node_110111; } else { goto node_110110; } node_110110: if (bits & 0x2000000) { goto node_1101101; } else { goto node_1101100; } node_1101100: if (bits & 0x1000000) { goto node_11011001; } else { goto node_11011000; } node_11011000: if (bits & 0x800000) { goto node_110110001; } else { goto node_110110000; } node_110110000: if (bits & 0x400000) { *symbol = 57; return 10; } else { *symbol = 56; return 10; } node_110110001: if (bits & 0x400000) { *symbol = 59; return 10; } else { *symbol = 58; return 10; } node_11011001: if (bits & 0x800000) { goto node_110110011; } else { goto node_110110010; } node_110110010: if (bits & 0x400000) { *symbol = 61; return 10; } else { *symbol = 60; return 10; } node_110110011: if (bits & 0x400000) { *symbol = 64; return 10; } else { *symbol = 62; return 10; } node_1101101: if (bits & 0x1000000) { goto node_11011011; } else { goto node_11011010; } node_11011010: if (bits & 0x800000) { goto node_110110101; } else { goto node_110110100; } node_110110100: if (bits & 0x400000) { *symbol = 74; return 10; } else { *symbol = 65; return 10; } node_110110101: if (bits & 0x400000) { *symbol = 78; return 10; } else { *symbol = 75; return 10; } node_11011011: if (bits & 0x800000) { goto node_110110111; } else { goto node_110110110; } node_110110110: if (bits & 0x400000) { *symbol = 81; return 10; } else { *symbol = 79; return 10; } node_110110111: if (bits & 0x400000) { *symbol = 83; return 10; } else { *symbol = 82; return 10; } node_110111: if (bits & 0x2000000) { goto node_1101111; } else { goto node_1101110; } node_1101110: if (bits & 0x1000000) { goto node_11011101; } else { goto node_11011100; } node_11011100: if (bits & 0x800000) { goto node_110111001; } else { goto node_110111000; } node_110111000: if (bits & 0x400000) { *symbol = 88; return 10; } else { *symbol = 85; return 10; } node_110111001: if (bits & 0x400000) { *symbol = 91; return 10; } else { *symbol = 90; return 10; } node_11011101: if (bits & 0x800000) { goto node_110111011; } else { goto node_110111010; } node_110111010: if (bits & 0x400000) { *symbol = 93; return 10; } else { *symbol = 92; return 10; } node_110111011: if (bits & 0x400000) { *symbol = 95; return 10; } else { *symbol = 94; return 10; } node_1101111: if (bits & 0x1000000) { goto node_11011111; } else { goto node_11011110; } node_11011110: if (bits & 0x800000) { goto node_110111101; } else { goto node_110111100; } node_110111100: if (bits & 0x400000) { *symbol = 122; return 10; } else { *symbol = 96; return 10; } node_110111101: if (bits & 0x400000) { *symbol = 124; return 10; } else { *symbol = 123; return 10; } node_11011111: if (bits & 0x800000) { goto node_110111111; } else { goto node_110111110; } node_110111110: if (bits & 0x400000) { *symbol = 126; return 10; } else { *symbol = 125; return 10; } node_110111111: if (bits & 0x400000) { *symbol = 128; return 10; } else { *symbol = 127; return 10; } node_111: if (bits & 0x10000000) { goto node_1111; } else { goto node_1110; } node_1110: if (bits & 0x8000000) { goto node_11101; } else { goto node_11100; } node_11100: if (bits & 0x4000000) { goto node_111001; } else { goto node_111000; } node_111000: if (bits & 0x2000000) { goto node_1110001; } else { goto node_1110000; } node_1110000: if (bits & 0x1000000) { goto node_11100001; } else { goto node_11100000; } node_11100000: if (bits & 0x800000) { goto node_111000001; } else { goto node_111000000; } node_111000000: if (bits & 0x400000) { *symbol = 130; return 10; } else { *symbol = 129; return 10; } node_111000001: if (bits & 0x400000) { *symbol = 132; return 10; } else { *symbol = 131; return 10; } node_11100001: if (bits & 0x800000) { goto node_111000011; } else { goto node_111000010; } node_111000010: if (bits & 0x400000) { *symbol = 134; return 10; } else { *symbol = 133; return 10; } node_111000011: if (bits & 0x400000) { *symbol = 136; return 10; } else { *symbol = 135; return 10; } node_1110001: if (bits & 0x1000000) { goto node_11100011; } else { goto node_11100010; } node_11100010: if (bits & 0x800000) { goto node_111000101; } else { goto node_111000100; } node_111000100: if (bits & 0x400000) { *symbol = 138; return 10; } else { *symbol = 137; return 10; } node_111000101: if (bits & 0x400000) { *symbol = 140; return 10; } else { *symbol = 139; return 10; } node_11100011: if (bits & 0x800000) { goto node_111000111; } else { goto node_111000110; } node_111000110: if (bits & 0x400000) { *symbol = 142; return 10; } else { *symbol = 141; return 10; } node_111000111: if (bits & 0x400000) { *symbol = 144; return 10; } else { *symbol = 143; return 10; } node_111001: if (bits & 0x2000000) { goto node_1110011; } else { goto node_1110010; } node_1110010: if (bits & 0x1000000) { goto node_11100101; } else { goto node_11100100; } node_11100100: if (bits & 0x800000) { goto node_111001001; } else { goto node_111001000; } node_111001000: if (bits & 0x400000) { *symbol = 146; return 10; } else { *symbol = 145; return 10; } node_111001001: if (bits & 0x400000) { *symbol = 148; return 10; } else { *symbol = 147; return 10; } node_11100101: if (bits & 0x800000) { goto node_111001011; } else { goto node_111001010; } node_111001010: if (bits & 0x400000) { *symbol = 150; return 10; } else { *symbol = 149; return 10; } node_111001011: if (bits & 0x400000) { *symbol = 152; return 10; } else { *symbol = 151; return 10; } node_1110011: if (bits & 0x1000000) { goto node_11100111; } else { goto node_11100110; } node_11100110: if (bits & 0x800000) { goto node_111001101; } else { goto node_111001100; } node_111001100: if (bits & 0x400000) { *symbol = 154; return 10; } else { *symbol = 153; return 10; } node_111001101: if (bits & 0x400000) { *symbol = 156; return 10; } else { *symbol = 155; return 10; } node_11100111: if (bits & 0x800000) { goto node_111001111; } else { goto node_111001110; } node_111001110: if (bits & 0x400000) { *symbol = 158; return 10; } else { *symbol = 157; return 10; } node_111001111: if (bits & 0x400000) { *symbol = 160; return 10; } else { *symbol = 159; return 10; } node_11101: if (bits & 0x4000000) { goto node_111011; } else { goto node_111010; } node_111010: if (bits & 0x2000000) { goto node_1110101; } else { goto node_1110100; } node_1110100: if (bits & 0x1000000) { goto node_11101001; } else { goto node_11101000; } node_11101000: if (bits & 0x800000) { goto node_111010001; } else { goto node_111010000; } node_111010000: if (bits & 0x400000) { *symbol = 162; return 10; } else { *symbol = 161; return 10; } node_111010001: if (bits & 0x400000) { *symbol = 164; return 10; } else { *symbol = 163; return 10; } node_11101001: if (bits & 0x800000) { goto node_111010011; } else { goto node_111010010; } node_111010010: if (bits & 0x400000) { *symbol = 166; return 10; } else { *symbol = 165; return 10; } node_111010011: if (bits & 0x400000) { *symbol = 168; return 10; } else { *symbol = 167; return 10; } node_1110101: if (bits & 0x1000000) { goto node_11101011; } else { goto node_11101010; } node_11101010: if (bits & 0x800000) { goto node_111010101; } else { goto node_111010100; } node_111010100: if (bits & 0x400000) { *symbol = 170; return 10; } else { *symbol = 169; return 10; } node_111010101: if (bits & 0x400000) { *symbol = 172; return 10; } else { *symbol = 171; return 10; } node_11101011: if (bits & 0x800000) { goto node_111010111; } else { goto node_111010110; } node_111010110: if (bits & 0x400000) { *symbol = 174; return 10; } else { *symbol = 173; return 10; } node_111010111: if (bits & 0x400000) { *symbol = 176; return 10; } else { *symbol = 175; return 10; } node_111011: if (bits & 0x2000000) { goto node_1110111; } else { goto node_1110110; } node_1110110: if (bits & 0x1000000) { goto node_11101101; } else { goto node_11101100; } node_11101100: if (bits & 0x800000) { goto node_111011001; } else { goto node_111011000; } node_111011000: if (bits & 0x400000) { *symbol = 178; return 10; } else { *symbol = 177; return 10; } node_111011001: if (bits & 0x400000) { *symbol = 180; return 10; } else { *symbol = 179; return 10; } node_11101101: if (bits & 0x800000) { goto node_111011011; } else { goto node_111011010; } node_111011010: if (bits & 0x400000) { *symbol = 182; return 10; } else { *symbol = 181; return 10; } node_111011011: if (bits & 0x400000) { *symbol = 184; return 10; } else { *symbol = 183; return 10; } node_1110111: if (bits & 0x1000000) { goto node_11101111; } else { goto node_11101110; } node_11101110: if (bits & 0x800000) { goto node_111011101; } else { goto node_111011100; } node_111011100: if (bits & 0x400000) { *symbol = 186; return 10; } else { *symbol = 185; return 10; } node_111011101: if (bits & 0x400000) { *symbol = 188; return 10; } else { *symbol = 187; return 10; } node_11101111: if (bits & 0x800000) { goto node_111011111; } else { goto node_111011110; } node_111011110: if (bits & 0x400000) { *symbol = 190; return 10; } else { *symbol = 189; return 10; } node_111011111: if (bits & 0x400000) { *symbol = 192; return 10; } else { *symbol = 191; return 10; } node_1111: if (bits & 0x8000000) { goto node_11111; } else { goto node_11110; } node_11110: if (bits & 0x4000000) { goto node_111101; } else { goto node_111100; } node_111100: if (bits & 0x2000000) { goto node_1111001; } else { goto node_1111000; } node_1111000: if (bits & 0x1000000) { goto node_11110001; } else { goto node_11110000; } node_11110000: if (bits & 0x800000) { goto node_111100001; } else { goto node_111100000; } node_111100000: if (bits & 0x400000) { *symbol = 194; return 10; } else { *symbol = 193; return 10; } node_111100001: if (bits & 0x400000) { *symbol = 196; return 10; } else { *symbol = 195; return 10; } node_11110001: if (bits & 0x800000) { goto node_111100011; } else { goto node_111100010; } node_111100010: if (bits & 0x400000) { *symbol = 198; return 10; } else { *symbol = 197; return 10; } node_111100011: if (bits & 0x400000) { *symbol = 200; return 10; } else { *symbol = 199; return 10; } node_1111001: if (bits & 0x1000000) { goto node_11110011; } else { goto node_11110010; } node_11110010: if (bits & 0x800000) { goto node_111100101; } else { goto node_111100100; } node_111100100: if (bits & 0x400000) { *symbol = 202; return 10; } else { *symbol = 201; return 10; } node_111100101: if (bits & 0x400000) { *symbol = 204; return 10; } else { *symbol = 203; return 10; } node_11110011: if (bits & 0x800000) { goto node_111100111; } else { goto node_111100110; } node_111100110: if (bits & 0x400000) { *symbol = 206; return 10; } else { *symbol = 205; return 10; } node_111100111: if (bits & 0x400000) { *symbol = 208; return 10; } else { *symbol = 207; return 10; } node_111101: if (bits & 0x2000000) { goto node_1111011; } else { goto node_1111010; } node_1111010: if (bits & 0x1000000) { goto node_11110101; } else { goto node_11110100; } node_11110100: if (bits & 0x800000) { goto node_111101001; } else { goto node_111101000; } node_111101000: if (bits & 0x400000) { *symbol = 210; return 10; } else { *symbol = 209; return 10; } node_111101001: if (bits & 0x400000) { *symbol = 212; return 10; } else { *symbol = 211; return 10; } node_11110101: if (bits & 0x800000) { goto node_111101011; } else { goto node_111101010; } node_111101010: if (bits & 0x400000) { *symbol = 214; return 10; } else { *symbol = 213; return 10; } node_111101011: if (bits & 0x400000) { *symbol = 216; return 10; } else { *symbol = 215; return 10; } node_1111011: if (bits & 0x1000000) { goto node_11110111; } else { goto node_11110110; } node_11110110: if (bits & 0x800000) { goto node_111101101; } else { goto node_111101100; } node_111101100: if (bits & 0x400000) { *symbol = 218; return 10; } else { *symbol = 217; return 10; } node_111101101: if (bits & 0x400000) { *symbol = 220; return 10; } else { *symbol = 219; return 10; } node_11110111: if (bits & 0x800000) { goto node_111101111; } else { goto node_111101110; } node_111101110: if (bits & 0x400000) { *symbol = 222; return 10; } else { *symbol = 221; return 10; } node_111101111: if (bits & 0x400000) { *symbol = 224; return 10; } else { *symbol = 223; return 10; } node_11111: if (bits & 0x4000000) { goto node_111111; } else { goto node_111110; } node_111110: if (bits & 0x2000000) { goto node_1111101; } else { goto node_1111100; } node_1111100: if (bits & 0x1000000) { goto node_11111001; } else { goto node_11111000; } node_11111000: if (bits & 0x800000) { goto node_111110001; } else { goto node_111110000; } node_111110000: if (bits & 0x400000) { *symbol = 226; return 10; } else { *symbol = 225; return 10; } node_111110001: if (bits & 0x400000) { *symbol = 228; return 10; } else { *symbol = 227; return 10; } node_11111001: if (bits & 0x800000) { goto node_111110011; } else { goto node_111110010; } node_111110010: if (bits & 0x400000) { *symbol = 230; return 10; } else { *symbol = 229; return 10; } node_111110011: if (bits & 0x400000) { *symbol = 232; return 10; } else { *symbol = 231; return 10; } node_1111101: if (bits & 0x1000000) { goto node_11111011; } else { goto node_11111010; } node_11111010: if (bits & 0x800000) { goto node_111110101; } else { goto node_111110100; } node_111110100: if (bits & 0x400000) { *symbol = 234; return 10; } else { *symbol = 233; return 10; } node_111110101: if (bits & 0x400000) { *symbol = 236; return 10; } else { *symbol = 235; return 10; } node_11111011: if (bits & 0x800000) { goto node_111110111; } else { goto node_111110110; } node_111110110: if (bits & 0x400000) { *symbol = 238; return 10; } else { *symbol = 237; return 10; } node_111110111: if (bits & 0x400000) { *symbol = 240; return 10; } else { *symbol = 239; return 10; } node_111111: if (bits & 0x2000000) { goto node_1111111; } else { goto node_1111110; } node_1111110: if (bits & 0x1000000) { goto node_11111101; } else { goto node_11111100; } node_11111100: if (bits & 0x800000) { goto node_111111001; } else { goto node_111111000; } node_111111000: if (bits & 0x400000) { *symbol = 242; return 10; } else { *symbol = 241; return 10; } node_111111001: if (bits & 0x400000) { *symbol = 244; return 10; } else { *symbol = 243; return 10; } node_11111101: if (bits & 0x800000) { goto node_111111011; } else { goto node_111111010; } node_111111010: if (bits & 0x400000) { *symbol = 246; return 10; } else { *symbol = 245; return 10; } node_111111011: if (bits & 0x400000) { *symbol = 248; return 10; } else { *symbol = 247; return 10; } node_1111111: if (bits & 0x1000000) { goto node_11111111; } else { goto node_11111110; } node_11111110: if (bits & 0x800000) { goto node_111111101; } else { goto node_111111100; } node_111111100: if (bits & 0x400000) { *symbol = 250; return 10; } else { *symbol = 249; return 10; } node_111111101: if (bits & 0x400000) { *symbol = 252; return 10; } else { *symbol = 251; return 10; } node_11111111: if (bits & 0x800000) { goto node_111111111; } else { goto node_111111110; } node_111111110: if (bits & 0x400000) { *symbol = 254; return 10; } else { *symbol = 253; return 10; } node_111111111: if (bits & 0x400000) { return 0; /* invalid node */ } else { *symbol = 255; return 10; } } struct aws_huffman_symbol_coder *test_get_coder(void) { static struct aws_huffman_symbol_coder coder = { .encode = encode_symbol, .decode = decode_symbol, .userdata = NULL, }; return &coder; } aws-crt-python-0.20.4+dfsg/crt/aws-c-compression/tests/test_huffman_static_table.def000066400000000000000000000304201456575232400306130ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #ifndef HUFFMAN_CODE #error "Macro HUFFMAN_CODE must be defined before including this header file!" #endif /* sym bits code len */ HUFFMAN_CODE( 0, "1100101110", 0x32e, 10) HUFFMAN_CODE( 1, "1100101111", 0x32f, 10) HUFFMAN_CODE( 2, "1100110000", 0x330, 10) HUFFMAN_CODE( 3, "1100110001", 0x331, 10) HUFFMAN_CODE( 4, "1100110010", 0x332, 10) HUFFMAN_CODE( 5, "1100110011", 0x333, 10) HUFFMAN_CODE( 6, "1100110100", 0x334, 10) HUFFMAN_CODE( 7, "1100110101", 0x335, 10) HUFFMAN_CODE( 8, "1100110110", 0x336, 10) HUFFMAN_CODE( 9, "1100110111", 0x337, 10) HUFFMAN_CODE( 10, "10111000", 0xb8, 8) HUFFMAN_CODE( 11, "1100111000", 0x338, 10) HUFFMAN_CODE( 12, "1100111001", 0x339, 10) HUFFMAN_CODE( 13, "1100111010", 0x33a, 10) HUFFMAN_CODE( 14, "1100111011", 0x33b, 10) HUFFMAN_CODE( 15, "1100111100", 0x33c, 10) HUFFMAN_CODE( 16, "1100111101", 0x33d, 10) HUFFMAN_CODE( 17, "1100111110", 0x33e, 10) HUFFMAN_CODE( 18, "1100111111", 0x33f, 10) HUFFMAN_CODE( 19, "1101000000", 0x340, 10) HUFFMAN_CODE( 20, "1101000001", 0x341, 10) HUFFMAN_CODE( 21, "1101000010", 0x342, 10) HUFFMAN_CODE( 22, "1101000011", 0x343, 10) HUFFMAN_CODE( 23, "1101000100", 0x344, 10) HUFFMAN_CODE( 24, "1101000101", 0x345, 10) HUFFMAN_CODE( 25, "1101000110", 0x346, 10) HUFFMAN_CODE( 26, "1101000111", 0x347, 10) HUFFMAN_CODE( 27, "1101001000", 0x348, 10) HUFFMAN_CODE( 28, "1101001001", 0x349, 10) HUFFMAN_CODE( 29, "1101001010", 0x34a, 10) HUFFMAN_CODE( 30, "1101001011", 0x34b, 10) HUFFMAN_CODE( 31, "1101001100", 0x34c, 10) HUFFMAN_CODE( 32, "00100", 0x4, 5) HUFFMAN_CODE( 33, "1101001101", 0x34d, 10) HUFFMAN_CODE( 34, "1101001110", 0x34e, 10) HUFFMAN_CODE( 35, "1101001111", 0x34f, 10) HUFFMAN_CODE( 36, "1101010000", 0x350, 10) HUFFMAN_CODE( 37, "1101010001", 0x351, 10) HUFFMAN_CODE( 38, "1101010010", 0x352, 10) HUFFMAN_CODE( 39, "1010110", 0x56, 7) HUFFMAN_CODE( 40, "1101010011", 0x353, 10) HUFFMAN_CODE( 41, "1101010100", 0x354, 10) HUFFMAN_CODE( 42, "1101010101", 0x355, 10) HUFFMAN_CODE( 43, "1101010110", 0x356, 10) HUFFMAN_CODE( 44, "10111001", 0xb9, 8) HUFFMAN_CODE( 45, "110001000", 0x188, 9) HUFFMAN_CODE( 46, "1010111", 0x57, 7) HUFFMAN_CODE( 47, "1101010111", 0x357, 10) HUFFMAN_CODE( 48, "1101011000", 0x358, 10) HUFFMAN_CODE( 49, "1101011001", 0x359, 10) HUFFMAN_CODE( 50, "1101011010", 0x35a, 10) HUFFMAN_CODE( 51, "1101011011", 0x35b, 10) HUFFMAN_CODE( 52, "1101011100", 0x35c, 10) HUFFMAN_CODE( 53, "1101011101", 0x35d, 10) HUFFMAN_CODE( 54, "1101011110", 0x35e, 10) HUFFMAN_CODE( 55, "1101011111", 0x35f, 10) HUFFMAN_CODE( 56, "1101100000", 0x360, 10) HUFFMAN_CODE( 57, "1101100001", 0x361, 10) HUFFMAN_CODE( 58, "1101100010", 0x362, 10) HUFFMAN_CODE( 59, "1101100011", 0x363, 10) HUFFMAN_CODE( 60, "1101100100", 0x364, 10) HUFFMAN_CODE( 61, "1101100101", 0x365, 10) HUFFMAN_CODE( 62, "1101100110", 0x366, 10) HUFFMAN_CODE( 63, "10111010", 0xba, 8) HUFFMAN_CODE( 64, "1101100111", 0x367, 10) HUFFMAN_CODE( 65, "1101101000", 0x368, 10) HUFFMAN_CODE( 66, "10111011", 0xbb, 8) HUFFMAN_CODE( 67, "110001001", 0x189, 9) HUFFMAN_CODE( 68, "110001010", 0x18a, 9) HUFFMAN_CODE( 69, "110001011", 0x18b, 9) HUFFMAN_CODE( 70, "110001100", 0x18c, 9) HUFFMAN_CODE( 71, "110001101", 0x18d, 9) HUFFMAN_CODE( 72, "110001110", 0x18e, 9) HUFFMAN_CODE( 73, "10111100", 0xbc, 8) HUFFMAN_CODE( 74, "1101101001", 0x369, 10) HUFFMAN_CODE( 75, "1101101010", 0x36a, 10) HUFFMAN_CODE( 76, "110001111", 0x18f, 9) HUFFMAN_CODE( 77, "110010000", 0x190, 9) HUFFMAN_CODE( 78, "1101101011", 0x36b, 10) HUFFMAN_CODE( 79, "1101101100", 0x36c, 10) HUFFMAN_CODE( 80, "110010001", 0x191, 9) HUFFMAN_CODE( 81, "1101101101", 0x36d, 10) HUFFMAN_CODE( 82, "1101101110", 0x36e, 10) HUFFMAN_CODE( 83, "1101101111", 0x36f, 10) HUFFMAN_CODE( 84, "10111101", 0xbd, 8) HUFFMAN_CODE( 85, "1101110000", 0x370, 10) HUFFMAN_CODE( 86, "110010010", 0x192, 9) HUFFMAN_CODE( 87, "10111110", 0xbe, 8) HUFFMAN_CODE( 88, "1101110001", 0x371, 10) HUFFMAN_CODE( 89, "110010011", 0x193, 9) HUFFMAN_CODE( 90, "1101110010", 0x372, 10) HUFFMAN_CODE( 91, "1101110011", 0x373, 10) HUFFMAN_CODE( 92, "1101110100", 0x374, 10) HUFFMAN_CODE( 93, "1101110101", 0x375, 10) HUFFMAN_CODE( 94, "1101110110", 0x376, 10) HUFFMAN_CODE( 95, "1101110111", 0x377, 10) HUFFMAN_CODE( 96, "1101111000", 0x378, 10) HUFFMAN_CODE( 97, "00101", 0x5, 5) HUFFMAN_CODE( 98, "1011000", 0x58, 7) HUFFMAN_CODE( 99, "100000", 0x20, 6) HUFFMAN_CODE(100, "100001", 0x21, 6) HUFFMAN_CODE(101, "00110", 0x6, 5) HUFFMAN_CODE(102, "100010", 0x22, 6) HUFFMAN_CODE(103, "1011001", 0x59, 7) HUFFMAN_CODE(104, "100011", 0x23, 6) HUFFMAN_CODE(105, "00111", 0x7, 5) HUFFMAN_CODE(106, "10111111", 0xbf, 8) HUFFMAN_CODE(107, "100100", 0x24, 6) HUFFMAN_CODE(108, "100101", 0x25, 6) HUFFMAN_CODE(109, "100110", 0x26, 6) HUFFMAN_CODE(110, "01000", 0x8, 5) HUFFMAN_CODE(111, "01001", 0x9, 5) HUFFMAN_CODE(112, "1011010", 0x5a, 7) HUFFMAN_CODE(113, "110010100", 0x194, 9) HUFFMAN_CODE(114, "01010", 0xa, 5) HUFFMAN_CODE(115, "01011", 0xb, 5) HUFFMAN_CODE(116, "01100", 0xc, 5) HUFFMAN_CODE(117, "01101", 0xd, 5) HUFFMAN_CODE(118, "11000000", 0xc0, 8) HUFFMAN_CODE(119, "100111", 0x27, 6) HUFFMAN_CODE(120, "11000001", 0xc1, 8) HUFFMAN_CODE(121, "101000", 0x28, 6) HUFFMAN_CODE(122, "1101111001", 0x379, 10) HUFFMAN_CODE(123, "1101111010", 0x37a, 10) HUFFMAN_CODE(124, "1101111011", 0x37b, 10) HUFFMAN_CODE(125, "1101111100", 0x37c, 10) HUFFMAN_CODE(126, "1101111101", 0x37d, 10) HUFFMAN_CODE(127, "1101111110", 0x37e, 10) HUFFMAN_CODE(128, "1101111111", 0x37f, 10) HUFFMAN_CODE(129, "1110000000", 0x380, 10) HUFFMAN_CODE(130, "1110000001", 0x381, 10) HUFFMAN_CODE(131, "1110000010", 0x382, 10) HUFFMAN_CODE(132, "1110000011", 0x383, 10) HUFFMAN_CODE(133, "1110000100", 0x384, 10) HUFFMAN_CODE(134, "1110000101", 0x385, 10) HUFFMAN_CODE(135, "1110000110", 0x386, 10) HUFFMAN_CODE(136, "1110000111", 0x387, 10) HUFFMAN_CODE(137, "1110001000", 0x388, 10) HUFFMAN_CODE(138, "1110001001", 0x389, 10) HUFFMAN_CODE(139, "1110001010", 0x38a, 10) HUFFMAN_CODE(140, "1110001011", 0x38b, 10) HUFFMAN_CODE(141, "1110001100", 0x38c, 10) HUFFMAN_CODE(142, "1110001101", 0x38d, 10) HUFFMAN_CODE(143, "1110001110", 0x38e, 10) HUFFMAN_CODE(144, "1110001111", 0x38f, 10) HUFFMAN_CODE(145, "1110010000", 0x390, 10) HUFFMAN_CODE(146, "1110010001", 0x391, 10) HUFFMAN_CODE(147, "1110010010", 0x392, 10) HUFFMAN_CODE(148, "1110010011", 0x393, 10) HUFFMAN_CODE(149, "1110010100", 0x394, 10) HUFFMAN_CODE(150, "1110010101", 0x395, 10) HUFFMAN_CODE(151, "1110010110", 0x396, 10) HUFFMAN_CODE(152, "1110010111", 0x397, 10) HUFFMAN_CODE(153, "1110011000", 0x398, 10) HUFFMAN_CODE(154, "1110011001", 0x399, 10) HUFFMAN_CODE(155, "1110011010", 0x39a, 10) HUFFMAN_CODE(156, "1110011011", 0x39b, 10) HUFFMAN_CODE(157, "1110011100", 0x39c, 10) HUFFMAN_CODE(158, "1110011101", 0x39d, 10) HUFFMAN_CODE(159, "1110011110", 0x39e, 10) HUFFMAN_CODE(160, "1110011111", 0x39f, 10) HUFFMAN_CODE(161, "1110100000", 0x3a0, 10) HUFFMAN_CODE(162, "1110100001", 0x3a1, 10) HUFFMAN_CODE(163, "1110100010", 0x3a2, 10) HUFFMAN_CODE(164, "1110100011", 0x3a3, 10) HUFFMAN_CODE(165, "1110100100", 0x3a4, 10) HUFFMAN_CODE(166, "1110100101", 0x3a5, 10) HUFFMAN_CODE(167, "1110100110", 0x3a6, 10) HUFFMAN_CODE(168, "1110100111", 0x3a7, 10) HUFFMAN_CODE(169, "1110101000", 0x3a8, 10) HUFFMAN_CODE(170, "1110101001", 0x3a9, 10) HUFFMAN_CODE(171, "1110101010", 0x3aa, 10) HUFFMAN_CODE(172, "1110101011", 0x3ab, 10) HUFFMAN_CODE(173, "1110101100", 0x3ac, 10) HUFFMAN_CODE(174, "1110101101", 0x3ad, 10) HUFFMAN_CODE(175, "1110101110", 0x3ae, 10) HUFFMAN_CODE(176, "1110101111", 0x3af, 10) HUFFMAN_CODE(177, "1110110000", 0x3b0, 10) HUFFMAN_CODE(178, "1110110001", 0x3b1, 10) HUFFMAN_CODE(179, "1110110010", 0x3b2, 10) HUFFMAN_CODE(180, "1110110011", 0x3b3, 10) HUFFMAN_CODE(181, "1110110100", 0x3b4, 10) HUFFMAN_CODE(182, "1110110101", 0x3b5, 10) HUFFMAN_CODE(183, "1110110110", 0x3b6, 10) HUFFMAN_CODE(184, "1110110111", 0x3b7, 10) HUFFMAN_CODE(185, "1110111000", 0x3b8, 10) HUFFMAN_CODE(186, "1110111001", 0x3b9, 10) HUFFMAN_CODE(187, "1110111010", 0x3ba, 10) HUFFMAN_CODE(188, "1110111011", 0x3bb, 10) HUFFMAN_CODE(189, "1110111100", 0x3bc, 10) HUFFMAN_CODE(190, "1110111101", 0x3bd, 10) HUFFMAN_CODE(191, "1110111110", 0x3be, 10) HUFFMAN_CODE(192, "1110111111", 0x3bf, 10) HUFFMAN_CODE(193, "1111000000", 0x3c0, 10) HUFFMAN_CODE(194, "1111000001", 0x3c1, 10) HUFFMAN_CODE(195, "1111000010", 0x3c2, 10) HUFFMAN_CODE(196, "1111000011", 0x3c3, 10) HUFFMAN_CODE(197, "1111000100", 0x3c4, 10) HUFFMAN_CODE(198, "1111000101", 0x3c5, 10) HUFFMAN_CODE(199, "1111000110", 0x3c6, 10) HUFFMAN_CODE(200, "1111000111", 0x3c7, 10) HUFFMAN_CODE(201, "1111001000", 0x3c8, 10) HUFFMAN_CODE(202, "1111001001", 0x3c9, 10) HUFFMAN_CODE(203, "1111001010", 0x3ca, 10) HUFFMAN_CODE(204, "1111001011", 0x3cb, 10) HUFFMAN_CODE(205, "1111001100", 0x3cc, 10) HUFFMAN_CODE(206, "1111001101", 0x3cd, 10) HUFFMAN_CODE(207, "1111001110", 0x3ce, 10) HUFFMAN_CODE(208, "1111001111", 0x3cf, 10) HUFFMAN_CODE(209, "1111010000", 0x3d0, 10) HUFFMAN_CODE(210, "1111010001", 0x3d1, 10) HUFFMAN_CODE(211, "1111010010", 0x3d2, 10) HUFFMAN_CODE(212, "1111010011", 0x3d3, 10) HUFFMAN_CODE(213, "1111010100", 0x3d4, 10) HUFFMAN_CODE(214, "1111010101", 0x3d5, 10) HUFFMAN_CODE(215, "1111010110", 0x3d6, 10) HUFFMAN_CODE(216, "1111010111", 0x3d7, 10) HUFFMAN_CODE(217, "1111011000", 0x3d8, 10) HUFFMAN_CODE(218, "1111011001", 0x3d9, 10) HUFFMAN_CODE(219, "1111011010", 0x3da, 10) HUFFMAN_CODE(220, "1111011011", 0x3db, 10) HUFFMAN_CODE(221, "1111011100", 0x3dc, 10) HUFFMAN_CODE(222, "1111011101", 0x3dd, 10) HUFFMAN_CODE(223, "1111011110", 0x3de, 10) HUFFMAN_CODE(224, "1111011111", 0x3df, 10) HUFFMAN_CODE(225, "1111100000", 0x3e0, 10) HUFFMAN_CODE(226, "1111100001", 0x3e1, 10) HUFFMAN_CODE(227, "1111100010", 0x3e2, 10) HUFFMAN_CODE(228, "1111100011", 0x3e3, 10) HUFFMAN_CODE(229, "1111100100", 0x3e4, 10) HUFFMAN_CODE(230, "1111100101", 0x3e5, 10) HUFFMAN_CODE(231, "1111100110", 0x3e6, 10) HUFFMAN_CODE(232, "1111100111", 0x3e7, 10) HUFFMAN_CODE(233, "1111101000", 0x3e8, 10) HUFFMAN_CODE(234, "1111101001", 0x3e9, 10) HUFFMAN_CODE(235, "1111101010", 0x3ea, 10) HUFFMAN_CODE(236, "1111101011", 0x3eb, 10) HUFFMAN_CODE(237, "1111101100", 0x3ec, 10) HUFFMAN_CODE(238, "1111101101", 0x3ed, 10) HUFFMAN_CODE(239, "1111101110", 0x3ee, 10) HUFFMAN_CODE(240, "1111101111", 0x3ef, 10) HUFFMAN_CODE(241, "1111110000", 0x3f0, 10) HUFFMAN_CODE(242, "1111110001", 0x3f1, 10) HUFFMAN_CODE(243, "1111110010", 0x3f2, 10) HUFFMAN_CODE(244, "1111110011", 0x3f3, 10) HUFFMAN_CODE(245, "1111110100", 0x3f4, 10) HUFFMAN_CODE(246, "1111110101", 0x3f5, 10) HUFFMAN_CODE(247, "1111110110", 0x3f6, 10) HUFFMAN_CODE(248, "1111110111", 0x3f7, 10) HUFFMAN_CODE(249, "1111111000", 0x3f8, 10) HUFFMAN_CODE(250, "1111111001", 0x3f9, 10) HUFFMAN_CODE(251, "1111111010", 0x3fa, 10) HUFFMAN_CODE(252, "1111111011", 0x3fb, 10) HUFFMAN_CODE(253, "1111111100", 0x3fc, 10) HUFFMAN_CODE(254, "1111111101", 0x3fd, 10) HUFFMAN_CODE(255, "1111111110", 0x3fe, 10) aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/000077500000000000000000000000001456575232400216425ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/.clang-format000066400000000000000000000031611456575232400242160ustar00rootroot00000000000000--- Language: Cpp # BasedOnStyle: Mozilla AlignAfterOpenBracket: AlwaysBreak AlignConsecutiveAssignments: false AlignConsecutiveDeclarations: false AlignEscapedNewlines: Right AlignOperands: true AlignTrailingComments: true AllowAllParametersOfDeclarationOnNextLine: false AllowShortBlocksOnASingleLine: false AllowShortCaseLabelsOnASingleLine: false AllowShortFunctionsOnASingleLine: Inline AllowShortIfStatementsOnASingleLine: false AllowShortLoopsOnASingleLine: false AlwaysBreakAfterReturnType: None AlwaysBreakBeforeMultilineStrings: false BinPackArguments: false BinPackParameters: false BreakBeforeBinaryOperators: None BreakBeforeBraces: Attach BreakBeforeTernaryOperators: true BreakStringLiterals: true ColumnLimit: 120 ContinuationIndentWidth: 4 DerivePointerAlignment: false IncludeBlocks: Preserve IndentCaseLabels: true IndentPPDirectives: AfterHash IndentWidth: 4 IndentWrappedFunctionNames: true KeepEmptyLinesAtTheStartOfBlocks: true MacroBlockBegin: '' MacroBlockEnd: '' MaxEmptyLinesToKeep: 1 PenaltyBreakAssignment: 2 PenaltyBreakBeforeFirstCallParameter: 19 PenaltyBreakComment: 300 PenaltyBreakFirstLessLess: 120 PenaltyBreakString: 1000 PenaltyExcessCharacter: 1000000 PenaltyReturnTypeOnItsOwnLine: 100000 PointerAlignment: Right ReflowComments: true SortIncludes: true SpaceAfterCStyleCast: false SpaceBeforeAssignmentOperators: true SpaceBeforeParens: ControlStatements SpaceInEmptyParentheses: false SpacesInContainerLiterals: true SpacesInCStyleCastParentheses: false SpacesInParentheses: false SpacesInSquareBrackets: false Standard: Cpp11 TabWidth: 4 UseTab: Never ... aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/.clang-tidy000066400000000000000000000012751456575232400237030ustar00rootroot00000000000000--- Checks: 'clang-diagnostic-*,clang-analyzer-*,readability-*,modernize-*,bugprone-*,misc-*,google-runtime-int,fuchsia-restrict-system-includes,-clang-analyzer-valist.Uninitialized,-clang-analyzer-security.insecureAPI.rand,-clang-analyzer-alpha.*' WarningsAsErrors: '*' HeaderFilterRegex: '\./*' FormatStyle: 'file' # Use empty line filter to skip linting code we don't own CheckOptions: - key: readability-braces-around-statements.ShortStatementLines value: '1' - key: google-runtime-int.TypeSufix value: '_t' - key: fuchsia-restrict-system-includes.Includes value: '*,-stdint.h,-stdbool.h,-assert.h' ... aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/.github/000077500000000000000000000000001456575232400232025ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/.github/ISSUE_TEMPLATE/000077500000000000000000000000001456575232400253655ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/.github/ISSUE_TEMPLATE/bug-report.yml000066400000000000000000000045371456575232400302070ustar00rootroot00000000000000--- name: "🐛 Bug Report" description: Report a bug title: "(short issue description)" labels: [bug, needs-triage] assignees: [] body: - type: textarea id: description attributes: label: Describe the bug description: What is the problem? A clear and concise description of the bug. validations: required: true - type: textarea id: expected attributes: label: Expected Behavior description: | What did you expect to happen? validations: required: true - type: textarea id: current attributes: label: Current Behavior description: | What actually happened? Please include full errors, uncaught exceptions, stack traces, and relevant logs. If service responses are relevant, please include wire logs. validations: required: true - type: textarea id: reproduction attributes: label: Reproduction Steps description: | Provide a self-contained, concise snippet of code that can be used to reproduce the issue. For more complex issues provide a repo with the smallest sample that reproduces the bug. Avoid including business logic or unrelated code, it makes diagnosis more difficult. The code sample should be an SSCCE. See http://sscce.org/ for details. In short, please provide a code sample that we can copy/paste, run and reproduce. validations: required: true - type: textarea id: solution attributes: label: Possible Solution description: | Suggest a fix/reason for the bug validations: required: false - type: textarea id: context attributes: label: Additional Information/Context description: | Anything else that might be relevant for troubleshooting this bug. Providing context helps us come up with a solution that is most useful in the real world. validations: required: false - type: input id: aws-c-event-stream-version attributes: label: aws-c-event-stream version used validations: required: true - type: input id: compiler-version attributes: label: Compiler and version used validations: required: true - type: input id: operating-system attributes: label: Operating System and version validations: required: true aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/.github/ISSUE_TEMPLATE/config.yml000066400000000000000000000003371456575232400273600ustar00rootroot00000000000000blank_issues_enabled: false contact_links: - name: 💬 General Question url: https://github.com/awslabs/aws-c-event-stream/discussions/categories/q-a about: Please ask and answer questions as a discussion thread aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/.github/ISSUE_TEMPLATE/documentation.yml000066400000000000000000000011141456575232400307560ustar00rootroot00000000000000--- name: "📕 Documentation Issue" description: Report an issue in the API Reference documentation or Developer Guide title: "(short issue description)" labels: [documentation, needs-triage] assignees: [] body: - type: textarea id: description attributes: label: Describe the issue description: A clear and concise description of the issue. validations: required: true - type: textarea id: links attributes: label: Links description: | Include links to affected documentation page(s). validations: required: true aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/.github/ISSUE_TEMPLATE/feature-request.yml000066400000000000000000000026231456575232400312340ustar00rootroot00000000000000--- name: 🚀 Feature Request description: Suggest an idea for this project title: "(short issue description)" labels: [feature-request, needs-triage] assignees: [] body: - type: textarea id: description attributes: label: Describe the feature description: A clear and concise description of the feature you are proposing. validations: required: true - type: textarea id: use-case attributes: label: Use Case description: | Why do you need this feature? For example: "I'm always frustrated when..." validations: required: true - type: textarea id: solution attributes: label: Proposed Solution description: | Suggest how to implement the addition or change. Please include prototype/workaround/sketch/reference implementation. validations: required: false - type: textarea id: other attributes: label: Other Information description: | Any alternative solutions or features you considered, a more detailed explanation, stack traces, related issues, links for context, etc. validations: required: false - type: checkboxes id: ack attributes: label: Acknowledgements options: - label: I may be able to implement this feature request required: false - label: This feature might incur a breaking change required: false aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/.github/PULL_REQUEST_TEMPLATE.md000066400000000000000000000002511456575232400270010ustar00rootroot00000000000000*Issue #, if available:* *Description of changes:* By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license. aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/.github/workflows/000077500000000000000000000000001456575232400252375ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/.github/workflows/ci.yml000066400000000000000000000144411456575232400263610ustar00rootroot00000000000000name: CI on: push: branches-ignore: - 'main' env: BUILDER_VERSION: v0.9.55 BUILDER_SOURCE: releases BUILDER_HOST: https://d19elf31gohf1l.cloudfront.net PACKAGE_NAME: aws-c-event-stream LINUX_BASE_IMAGE: ubuntu-18-x64 RUN: ${{ github.run_id }}-${{ github.run_number }} AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} AWS_REGION: us-east-1 jobs: linux-compat: runs-on: ubuntu-20.04 # latest strategy: fail-fast: false matrix: image: - manylinux1-x64 - manylinux1-x86 - manylinux2014-x64 - manylinux2014-x86 - al2-x64 - fedora-34-x64 - opensuse-leap - rhel8-x64 steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ matrix.image }} build -p ${{ env.PACKAGE_NAME }} linux-compiler-compat: runs-on: ubuntu-20.04 # latest strategy: matrix: compiler: - clang-3 - clang-6 - clang-8 - clang-9 - clang-10 - clang-11 - gcc-4.8 - gcc-5 - gcc-6 - gcc-7 - gcc-8 steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ env.LINUX_BASE_IMAGE }} build -p ${{ env.PACKAGE_NAME }} --compiler=${{ matrix.compiler }} clang-sanitizers: runs-on: ubuntu-20.04 # latest strategy: matrix: sanitizers: [",thread", ",address,undefined"] steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ env.LINUX_BASE_IMAGE }} build -p ${{ env.PACKAGE_NAME }} --compiler=clang-11 --cmake-extra=-DENABLE_SANITIZERS=ON --cmake-extra=-DSANITIZERS="${{ matrix.sanitizers }}" linux-shared-libs: runs-on: ubuntu-20.04 # latest steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ env.LINUX_BASE_IMAGE }} build -p ${{ env.PACKAGE_NAME }} --cmake-extra=-DBUILD_SHARED_LIBS=ON windows: runs-on: windows-2022 # latest steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python builder.pyz build -p ${{ env.PACKAGE_NAME }} windows-vc14: runs-on: windows-2019 # windows-2019 is last env with Visual Studio 2015 (v14.0) strategy: matrix: arch: [x86, x64] steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python builder.pyz build -p ${{ env.PACKAGE_NAME }} --target windows-${{ matrix.arch }} --compiler msvc-14 windows-shared-libs: runs-on: windows-2022 # latest steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python builder.pyz build -p ${{ env.PACKAGE_NAME }} --cmake-extra=-DBUILD_SHARED_LIBS=ON windows-app-verifier: runs-on: windows-2022 # latest steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python builder.pyz build -p ${{ env.PACKAGE_NAME }} run_tests=false --cmake-extra=-DBUILD_TESTING=ON - name: Run and check AppVerifier run: | python .\aws-c-event-stream\build\deps\aws-c-common\scripts\appverifier_ctest.py --build_directory .\aws-c-event-stream\build\aws-c-event-stream osx: runs-on: macos-12 # latest steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python3 -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder')" chmod a+x builder ./builder build -p ${{ env.PACKAGE_NAME }} # Test downstream repos. # This should not be required because we can run into a chicken and egg problem if there is a change that needs some fix in a downstream repo. downstream: runs-on: ubuntu-20.04 # latest steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ env.LINUX_BASE_IMAGE }} build downstream -p ${{ env.PACKAGE_NAME }} aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/.github/workflows/clang-format.yml000066400000000000000000000005621456575232400303370ustar00rootroot00000000000000name: Lint on: [push] jobs: clang-format: runs-on: ubuntu-20.04 # latest steps: - name: Checkout Sources uses: actions/checkout@v1 - name: clang-format lint uses: DoozyX/clang-format-lint-action@v0.13 with: # List of extensions to check extensions: c,h exclude: './bin' clangFormatVersion: 11.1.0 aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/.github/workflows/closed-issue-message.yml000066400000000000000000000013271456575232400320060ustar00rootroot00000000000000name: Closed Issue Message on: issues: types: [closed] jobs: auto_comment: runs-on: ubuntu-latest steps: - uses: aws-actions/closed-issue-message@v1 with: # These inputs are both required repo-token: "${{ secrets.GITHUB_TOKEN }}" message: | ### ⚠️COMMENT VISIBILITY WARNING⚠️ Comments on closed issues are hard for our team to see. If you need more assistance, please either tag a team member or open a new issue that references this one. If you wish to keep having a conversation with other community members under this issue feel free to do so. aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/.github/workflows/handle-stale-discussions.yml000066400000000000000000000006471456575232400326760ustar00rootroot00000000000000name: HandleStaleDiscussions on: schedule: - cron: '0 */4 * * *' discussion_comment: types: [created] jobs: handle-stale-discussions: name: Handle stale discussions runs-on: ubuntu-latest permissions: discussions: write steps: - name: Stale discussions action uses: aws-github-ops/handle-stale-discussions@v1 env: GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}}aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/.github/workflows/stale_issue.yml000066400000000000000000000045011456575232400303020ustar00rootroot00000000000000name: "Close stale issues" # Controls when the action will run. on: schedule: - cron: "*/60 * * * *" jobs: cleanup: runs-on: ubuntu-latest name: Stale issue job steps: - uses: aws-actions/stale-issue-cleanup@v3 with: # Setting messages to an empty string will cause the automation to skip # that category ancient-issue-message: Greetings! Sorry to say but this is a very old issue that is probably not getting as much attention as it deserves. We encourage you to check if this is still an issue in the latest release and if you find that this is still a problem, please feel free to open a new one. stale-issue-message: Greetings! It looks like this issue hasn’t been active in a few days. We encourage you to check if this is still an issue in the latest release. Because it has been a few days since the last update on this, and in the absence of more information, we will be closing this issue soon. If you find that this is still a problem, please feel free to provide a comment or add an upvote to prevent automatic closure, or if the issue is already closed, please feel free to open a new one. stale-pr-message: Greetings! It looks like this PR hasn’t been active in a few days, add a comment or an upvote to prevent automatic closure, or if the issue is already closed, please feel free to open a new one. # These labels are required stale-issue-label: closing-soon exempt-issue-label: automation-exempt stale-pr-label: closing-soon exempt-pr-label: pr/needs-review response-requested-label: response-requested # Don't set closed-for-staleness label to skip closing very old issues # regardless of label closed-for-staleness-label: closed-for-staleness # Issue timing days-before-stale: 2 days-before-close: 5 days-before-ancient: 36500 # If you don't want to mark a issue as being ancient based on a # threshold of "upvotes", you can set this here. An "upvote" is # the total number of +1, heart, hooray, and rocket reactions # on an issue. minimum-upvotes-to-exempt: 1 repo-token: ${{ secrets.GITHUB_TOKEN }} loglevel: DEBUG # Set dry-run to true to not perform label or close actions. dry-run: false aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/.gitignore000066400000000000000000000010461456575232400236330ustar00rootroot00000000000000# IDE Artifacts .metadata .build .idea *.d Debug Release *~ *# *.iml tags .vscode #vim swap file *.swp #compiled python files *.pyc #Vagrant stuff Vagrantfile .vagrant #Mac stuff .DS_Store #doxygen doxygen/html/ doxygen/latex/ #cmake artifacts dependencies _build build _build_* cmake-build* *-build # Compiled Object files *.slo *.lo *.o *.obj # Precompiled Headers *.gch *.pch # Compiled Dynamic libraries *.so *.dylib *.dll # Fortran module files *.mod # Compiled Static libraries *.lai *.la *.a *.lib # Executables *.exe *.out *.app aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/CMakeLists.txt000066400000000000000000000061531456575232400244070ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. cmake_minimum_required (VERSION 3.1) project (aws-c-event-stream C) if (DEFINED CMAKE_PREFIX_PATH) file(TO_CMAKE_PATH "${CMAKE_PREFIX_PATH}" CMAKE_PREFIX_PATH) endif() if (DEFINED CMAKE_INSTALL_PREFIX) file(TO_CMAKE_PATH "${CMAKE_INSTALL_PREFIX}" CMAKE_INSTALL_PREFIX) endif() if (UNIX AND NOT APPLE) include(GNUInstallDirs) elseif(NOT DEFINED CMAKE_INSTALL_LIBDIR) set(CMAKE_INSTALL_LIBDIR "lib") endif() # This is required in order to append /lib/cmake to each element in CMAKE_PREFIX_PATH set(AWS_MODULE_DIR "/${CMAKE_INSTALL_LIBDIR}/cmake") string(REPLACE ";" "${AWS_MODULE_DIR};" AWS_MODULE_PATH "${CMAKE_PREFIX_PATH}${AWS_MODULE_DIR}") # Append that generated list to the module search path list(APPEND CMAKE_MODULE_PATH ${AWS_MODULE_PATH}) include(AwsCFlags) include(AwsSharedLibSetup) include(AwsSanitizers) include(CheckCCompilerFlag) include(AwsFindPackage) include(AwsCheckHeaders) if(NOT MSVC) set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib) set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib) endif() file(GLOB AWS_EVENT_STREAM_HEADERS "include/aws/event-stream/*.h" ) file(GLOB AWS_EVENT_STREAM_SRC "source/*.c" ) if(WIN32) if(MSVC) source_group("Header Files\\aws\\event-stream" FILES ${AWS_EVENT_STREAM_HEADERS}) source_group("Source Files" FILES ${AWS_EVENT_STREAM_SRC}) endif() endif() file(GLOB EVENT_STREAM_HEADERS ${AWS_EVENT_STREAM_HEADERS} ) file(GLOB EVENT_STREAM_SRC ${AWS_EVENT_STREAM_SRC} ) add_library(${PROJECT_NAME} ${EVENT_STREAM_SRC}) aws_set_common_properties(${PROJECT_NAME}) aws_add_sanitizers(${PROJECT_NAME}) aws_prepare_symbol_visibility_args(${PROJECT_NAME} "AWS_EVENT_STREAM") aws_check_headers(${PROJECT_NAME} ${AWS_EVENT_STREAM_HEADERS}) target_include_directories(${PROJECT_NAME} PUBLIC $ $) set_target_properties(${PROJECT_NAME} PROPERTIES VERSION 1.0.0) aws_use_package(aws-c-io) aws_use_package(aws-c-common) aws_use_package(aws-checksums) target_link_libraries(${PROJECT_NAME} PUBLIC ${DEP_AWS_LIBS}) aws_prepare_shared_lib_exports(${PROJECT_NAME}) install(FILES ${AWS_EVENT_STREAM_HEADERS} DESTINATION "include/aws/event-stream" COMPONENT Development) if (BUILD_SHARED_LIBS) set (TARGET_DIR "shared") else() set (TARGET_DIR "static") endif() install(EXPORT "${PROJECT_NAME}-targets" DESTINATION "${LIBRARY_DIRECTORY}/${PROJECT_NAME}/cmake/${TARGET_DIR}/" NAMESPACE AWS:: COMPONENT Development) configure_file("cmake/${PROJECT_NAME}-config.cmake" "${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-config.cmake" @ONLY) install(FILES "${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-config.cmake" DESTINATION "${LIBRARY_DIRECTORY}/${PROJECT_NAME}/cmake/" COMPONENT Development) include(CTest) enable_testing() if (BUILD_TESTING) add_subdirectory(tests) if(NOT MSVC) set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin) endif() add_subdirectory(bin) endif() aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/CODE_OF_CONDUCT.md000066400000000000000000000004671456575232400244500ustar00rootroot00000000000000## Code of Conduct This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact opensource-codeofconduct@amazon.com with any additional questions or comments. aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/CONTRIBUTING.md000066400000000000000000000070041456575232400240740ustar00rootroot00000000000000# Contributing Guidelines Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional documentation, we greatly value feedback and contributions from our community. Please read through this document before submitting any issues or pull requests to ensure we have all the necessary information to effectively respond to your bug report or contribution. ## Reporting Bugs/Feature Requests We welcome you to use the GitHub issue tracker to report bugs or suggest features. When filing an issue, please check [existing open](https://github.com/awslabs/aws-c-event-stream/issues), or [recently closed](https://github.com/awslabs/aws-c-event-stream/issues?utf8=%E2%9C%93&q=is%3Aissue%20is%3Aclosed%20), issues to make sure somebody else hasn't already reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: * A reproducible test case or series of steps * The version of our code being used * Any modifications you've made relevant to the bug * Anything unusual about your environment or deployment ## Contributing via Pull Requests Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: 1. You are working against the latest source on the *main* branch. 2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. 3. You open an issue to discuss any significant work - we would hate for your time to be wasted. To send us a pull request, please: 1. Fork the repository. 2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. 3. Ensure local tests pass. 4. Commit to your fork using clear commit messages. 5. Send us a pull request, answering any default questions in the pull request interface. 6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and [creating a pull request](https://help.github.com/articles/creating-a-pull-request/). ## Finding contributions to work on Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels ((enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any ['help wanted'](https://github.com/awslabs/aws-c-event-stream/labels/help%20wanted) issues is a great place to start. ## Code of Conduct This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact opensource-codeofconduct@amazon.com with any additional questions or comments. ## Security issue notifications If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. ## Licensing See the [LICENSE](https://github.com/awslabs/aws-c-event-stream/blob/main/LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. We may ask you to sign a [Contributor License Agreement (CLA)](http://en.wikipedia.org/wiki/Contributor_License_Agreement) for larger changes. aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/LICENSE000066400000000000000000000261361456575232400226570ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/NOTICE000066400000000000000000000001731456575232400225470ustar00rootroot00000000000000AWS C Event Stream Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: Apache-2.0. aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/README.md000066400000000000000000000035321456575232400231240ustar00rootroot00000000000000## AWS C Event Stream C99 implementation of the vnd.amazon.event-stream content-type. ## License This library is licensed under the Apache 2.0 License. ## Usage ### Building CMake 3.1+ is required to build. `` must be an absolute path in the following instructions. #### Linux-Only Dependencies If you are building on Linux, you will need to build aws-lc and s2n-tls first. ``` git clone git@github.com:awslabs/aws-lc.git cmake -S aws-lc -B aws-lc/build -DCMAKE_INSTALL_PREFIX= cmake --build aws-lc/build --target install git clone git@github.com:aws/s2n-tls.git cmake -S s2n-tls -B s2n-tls/build -DCMAKE_INSTALL_PREFIX= -DCMAKE_PREFIX_PATH= cmake --build s2n-tls/build --target install ``` #### Building aws-c-event-stream and Remaining Dependencies ``` git clone git@github.com:awslabs/aws-c-common.git cmake -S aws-c-common -B aws-c-common/build -DCMAKE_INSTALL_PREFIX= cmake --build aws-c-common/build --target install git clone git@github.com:awslabs/aws-checksums.git cmake -S aws-checksums -B aws-checksums/build -DCMAKE_INSTALL_PREFIX= -DCMAKE_PREFIX_PATH= cmake --build aws-checksums/build --target install git clone git@github.com:awslabs/aws-c-cal.git cmake -S aws-c-cal -B aws-c-cal/build -DCMAKE_INSTALL_PREFIX= -DCMAKE_PREFIX_PATH= cmake --build aws-c-cal/build --target install git clone git@github.com:awslabs/aws-c-io.git cmake -S aws-c-io -B aws-c-io/build -DCMAKE_INSTALL_PREFIX= -DCMAKE_PREFIX_PATH= cmake --build aws-c-io/build --target install git clone git@github.com:awslabs/aws-c-event-stream.git cmake -S aws-c-event-stream -B aws-c-event-stream/build -DCMAKE_INSTALL_PREFIX= -DCMAKE_PREFIX_PATH= cmake --build aws-c-event-stream/build --target install ``` aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/bin/000077500000000000000000000000001456575232400224125ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/bin/CMakeLists.txt000066400000000000000000000015001456575232400251460ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. add_executable(aws-c-event-stream-pipe "event_stream_pipe.c") aws_set_common_properties(aws-c-event-stream-pipe) target_link_libraries(aws-c-event-stream-pipe PRIVATE ${PROJECT_NAME}) set_target_properties(aws-c-event-stream-pipe PROPERTIES LINKER_LANGUAGE C) set_property(TARGET aws-c-event-stream-pipe PROPERTY C_STANDARD 99) add_executable(aws-c-event-stream-write-test-case "event_stream_write_test_case.c") aws_set_common_properties(aws-c-event-stream-write-test-case) target_link_libraries(aws-c-event-stream-write-test-case PRIVATE ${PROJECT_NAME}) set_target_properties(aws-c-event-stream-write-test-case PROPERTIES LINKER_LANGUAGE C) set_property(TARGET aws-c-event-stream-write-test-case PROPERTY C_STANDARD 99) aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/bin/event_stream_pipe.c000066400000000000000000000116351456575232400262750ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include /** * 4996 is to disable unsafe function fopen vs fopen_s * 4706 is to disable assignment expression inside condition expression at line 133. */ #ifdef _MSC_VER #pragma warning (disable: 4996 4706) #endif static void s_on_payload_segment( struct aws_event_stream_streaming_decoder *decoder, struct aws_byte_buf *data, int8_t final_segment, void *user_data) { (void)decoder; (void)final_segment; (void)user_data; if (data->len) { fwrite(data->buffer, sizeof(uint8_t), data->len, stdout); } } static void s_on_prelude_received( struct aws_event_stream_streaming_decoder *decoder, struct aws_event_stream_message_prelude *prelude, void *user_data) { (void)decoder; (void)user_data; fprintf(stdout, "\n--------------------------------------------------------------------------------\n"); fprintf( stdout, "total_length = 0x%08" PRIx32 "\nheaders_len = 0x%08" PRIx32 "\nprelude_crc = 0x%08" PRIx32 "\n\n", prelude->total_len, prelude->headers_len, prelude->prelude_crc); } static void s_on_header_received( struct aws_event_stream_streaming_decoder *decoder, struct aws_event_stream_message_prelude *prelude, struct aws_event_stream_header_value_pair *header, void *user_data) { (void)decoder; (void)prelude; (void)user_data; fwrite(header->header_name, sizeof(uint8_t), (size_t)header->header_name_len, stdout); fprintf(stdout, ": "); if (header->header_value_type == AWS_EVENT_STREAM_HEADER_BOOL_FALSE) { fprintf(stdout, "false"); } else if (header->header_value_type == AWS_EVENT_STREAM_HEADER_BOOL_TRUE) { fprintf(stdout, "true"); } else if (header->header_value_type == AWS_EVENT_STREAM_HEADER_BYTE) { int8_t int_value = aws_event_stream_header_value_as_byte(header); fprintf(stdout, "%d", (int)int_value); } else if (header->header_value_type == AWS_EVENT_STREAM_HEADER_INT16) { int16_t int_value = aws_event_stream_header_value_as_int16(header); fprintf(stdout, "%d", (int)int_value); } else if (header->header_value_type == AWS_EVENT_STREAM_HEADER_INT32) { int32_t int_value = aws_event_stream_header_value_as_int32(header); fprintf(stdout, "%d", (int)int_value); } else if ( header->header_value_type == AWS_EVENT_STREAM_HEADER_INT64 || header->header_value_type == AWS_EVENT_STREAM_HEADER_TIMESTAMP) { int64_t int_value = aws_event_stream_header_value_as_int64(header); fprintf(stdout, "%lld", (long long)int_value); } else { if (header->header_value_type == AWS_EVENT_STREAM_HEADER_UUID) { struct aws_byte_buf uuid = aws_event_stream_header_value_as_uuid(header); fwrite(uuid.buffer, sizeof(uint8_t), uuid.len, stdout); } else { struct aws_byte_buf byte_buf = aws_event_stream_header_value_as_bytebuf(header); fwrite(byte_buf.buffer, sizeof(uint8_t), byte_buf.len, stdout); } } fprintf(stdout, "\n"); } static void s_on_error( struct aws_event_stream_streaming_decoder *decoder, struct aws_event_stream_message_prelude *prelude, int error_code, const char *message, void *user_data) { (void)decoder; (void)prelude; (void)user_data; fprintf( stderr, "Error encountered: Code: %d, Error Str: %s, Message: %s\n", error_code, aws_error_debug_str(error_code), message); exit(-1); } int main(void) { struct aws_allocator *alloc = aws_default_allocator(); aws_event_stream_library_init(alloc); struct aws_event_stream_streaming_decoder decoder; aws_event_stream_streaming_decoder_init( &decoder, alloc, s_on_payload_segment, s_on_prelude_received, s_on_header_received, s_on_error, NULL); setvbuf(stdin, NULL, _IONBF, 0); uint8_t data_buffer[1024]; size_t read_val = 0; while ((read_val = fread(data_buffer, sizeof(uint8_t), sizeof(data_buffer), stdin))) { if (read_val > 0) { struct aws_byte_buf decode_data = aws_byte_buf_from_array(data_buffer, read_val); int err_code = aws_event_stream_streaming_decoder_pump(&decoder, &decode_data); if (err_code) { fprintf(stderr, "Error occurred during parsing. Error code: %d\n", err_code); aws_event_stream_streaming_decoder_clean_up(&decoder); return -1; } continue; } if (feof(stdin)) { fprintf(stdout, "\n"); return 0; } if (ferror(stdin)) { perror("Error reading from stdin\n"); return ferror(stdin); } } return 0; } aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/bin/event_stream_write_test_case.c000066400000000000000000000217711456575232400305260ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #ifdef _WIN32 # define DELIM "\\" #else # define DELIM "/" #endif /** * 4996 is to disable unsafe function fopen * 4310 is to disable type casting to smaller type at line 215, which is needed to avoid gcc overflow warning when casting int to int8_t */ #ifdef _MSC_VER #pragma warning (disable: 4996 4310) #endif static void write_negative_test_case( const char *root_dir, const char *test_name, const uint8_t *buffer, size_t buffer_size, const char *err_msg) { size_t dir_len = strlen(root_dir); size_t encoded_len = strlen("encoded") + strlen("negative") + strlen(test_name) + 2; size_t decoded_len = strlen("decoded") + strlen("negative") + strlen(test_name) + 2; size_t enc_output_file_size = dir_len + 1 + encoded_len + 1; char *enc_output_file = (char *)malloc(enc_output_file_size); snprintf(enc_output_file, enc_output_file_size, "%s%s%s%s%s%s%s", root_dir, DELIM, "encoded", DELIM, "negative", DELIM, test_name); size_t dec_output_file_size = dir_len + 1 + decoded_len + 1; char *dec_output_file = (char *)malloc(dec_output_file_size); snprintf(dec_output_file, dec_output_file_size, "%s%s%s%s%s%s%s", root_dir, DELIM, "decoded", DELIM, "negative", DELIM, test_name); FILE *enc = fopen(enc_output_file, "w"); if (!enc) { fprintf(stderr, "couldn't write to %s", enc_output_file); exit(-1); } fwrite(buffer, sizeof(uint8_t), buffer_size, enc); fflush(enc); fclose(enc); FILE *dec = fopen(dec_output_file, "w"); if (!dec) { fprintf(stderr, "couldn't write to %s", dec_output_file); exit(-1); } fwrite(err_msg, sizeof(char), strlen(err_msg), dec); fflush(dec); fclose(dec); free(enc_output_file); free(dec_output_file); } static void write_positive_test_case( const char *root_dir, const char *test_name, struct aws_event_stream_message *message) { size_t dir_len = strlen(root_dir); size_t encoded_len = strlen("encoded") + strlen("positive") + strlen(test_name) + 2; size_t decoded_len = strlen("decoded") + strlen("positive") + strlen(test_name) + 2; size_t enc_output_file_size = dir_len + 1 + encoded_len + 1; char *enc_output_file = (char *)malloc(enc_output_file_size); snprintf(enc_output_file, enc_output_file_size, "%s%s%s%s%s%s%s", root_dir, DELIM, "encoded", DELIM, "positive", DELIM, test_name); size_t dec_output_file_size = dir_len + 1 + decoded_len + 1; char *dec_output_file = (char *)malloc(dec_output_file_size); snprintf(dec_output_file, dec_output_file_size, "%s%s%s%s%s%s%s", root_dir, DELIM, "decoded", DELIM, "positive", DELIM, test_name); FILE *enc = fopen(enc_output_file, "w"); if (!enc) { fprintf(stderr, "couldn't write to %s", enc_output_file); exit(-1); } fwrite( aws_event_stream_message_buffer(message), sizeof(uint8_t), aws_event_stream_message_total_length(message), enc); fflush(enc); fclose(enc); FILE *dec = fopen(dec_output_file, "w"); if (!dec) { fprintf(stderr, "couldn't write to %s", dec_output_file); exit(-1); } aws_event_stream_message_to_debug_str(dec, message); fflush(dec); fclose(dec); free(enc_output_file); free(dec_output_file); } int main(void) { struct aws_array_list headers; struct aws_allocator *alloc = aws_default_allocator(); aws_event_stream_headers_list_init(&headers, alloc); struct aws_event_stream_message msg; aws_event_stream_message_init(&msg, alloc, &headers, NULL); write_positive_test_case(".", "empty_message", &msg); struct aws_byte_buf payload = aws_byte_buf_from_c_str("{'foo':'bar'}"); aws_event_stream_message_clean_up(&msg); aws_event_stream_message_init(&msg, alloc, &headers, &payload); write_positive_test_case(".", "payload_no_headers", &msg); aws_event_stream_message_clean_up(&msg); static const char content_type[] = "content-type"; static const char json[] = "application/json"; aws_event_stream_add_string_header(&headers, content_type, sizeof(content_type) - 1, json, sizeof(json) - 1, 0); aws_event_stream_message_init(&msg, alloc, &headers, &payload); write_positive_test_case(".", "payload_one_str_header", &msg); /* corrupt length */ uint32_t original_length = aws_event_stream_message_total_length(&msg); uint8_t *buffer_cpy = aws_mem_acquire(alloc, original_length); memcpy(buffer_cpy, aws_event_stream_message_buffer(&msg), original_length); aws_write_u32(original_length + 1, buffer_cpy); write_negative_test_case(".", "corrupted_length", buffer_cpy, original_length, "Prelude checksum mismatch"); aws_mem_release(alloc, buffer_cpy); /* corrupt header length */ buffer_cpy = aws_mem_acquire(alloc, original_length); memcpy(buffer_cpy, aws_event_stream_message_buffer(&msg), original_length); uint32_t original_hdr_len = aws_event_stream_message_headers_len(&msg); aws_write_u32(original_hdr_len + 1, buffer_cpy + 4); write_negative_test_case(".", "corrupted_header_len", buffer_cpy, original_length, "Prelude checksum mismatch"); aws_mem_release(alloc, buffer_cpy); /* corrupt headers */ buffer_cpy = aws_mem_acquire(alloc, original_length); memcpy(buffer_cpy, aws_event_stream_message_buffer(&msg), original_length); uint32_t hdr_len = aws_event_stream_message_headers_len(&msg); buffer_cpy[hdr_len + AWS_EVENT_STREAM_PRELUDE_LENGTH + 1] = 'a'; write_negative_test_case(".", "corrupted_headers", buffer_cpy, original_length, "Message checksum mismatch"); aws_mem_release(alloc, buffer_cpy); buffer_cpy = aws_mem_acquire(alloc, original_length); memcpy(buffer_cpy, aws_event_stream_message_buffer(&msg), original_length); aws_event_stream_message_clean_up(&msg); /* corrupt payload */ aws_event_stream_message_init(&msg, alloc, NULL, &payload); ((uint8_t *)aws_event_stream_message_payload(&msg))[0] = '['; write_negative_test_case( ".", "corrupted_payload", aws_event_stream_message_buffer(&msg), aws_event_stream_message_total_length(&msg), "Message checksum mismatch"); aws_event_stream_message_clean_up(&msg); /* int header */ static const char event_type[] = "event-type"; aws_array_list_clear(&headers); aws_event_stream_add_int32_header(&headers, event_type, sizeof(event_type) - 1, 0x0000A00C); aws_event_stream_message_init(&msg, alloc, &headers, &payload); write_positive_test_case(".", "int32_header", &msg); aws_event_stream_message_clean_up(&msg); aws_array_list_clear(&headers); /* one of every header type */ aws_event_stream_add_int32_header(&headers, event_type, sizeof(event_type) - 1, 0x0000A00C); aws_event_stream_add_string_header(&headers, content_type, sizeof(content_type) - 1, json, sizeof(json) - 1, 0); static const char bool_false[] = "bool false"; aws_event_stream_add_bool_header(&headers, bool_false, sizeof(bool_false) - 1, 0); static const char bool_true[] = "bool true"; aws_event_stream_add_bool_header(&headers, bool_true, sizeof(bool_true) - 1, 1); static const char byte_hdr[] = "byte"; aws_event_stream_add_byte_header(&headers, byte_hdr, sizeof(byte_hdr) - 1, (int8_t)0xcf); static const char byte_buf_hdr[] = "byte buf"; static const char byte_buf[] = "I'm a little teapot!"; aws_event_stream_add_bytebuf_header( &headers, byte_buf_hdr, sizeof(byte_buf_hdr) - 1, (uint8_t *)byte_buf, sizeof(byte_buf) - 1, 0); static const char timestamp_hdr[] = "timestamp"; aws_event_stream_add_timestamp_header(&headers, timestamp_hdr, sizeof(timestamp_hdr) - 1, 8675309); static const char int16_hdr[] = "int16"; aws_event_stream_add_int16_header(&headers, int16_hdr, sizeof(int16_hdr) - 1, 42); static const char int64_hdr[] = "int64"; aws_event_stream_add_int64_header(&headers, int64_hdr, sizeof(int64_hdr) - 1, 42424242); static const char uuid_hdr[] = "uuid"; static const uint8_t uuid[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}; aws_event_stream_add_uuid_header(&headers, uuid_hdr, sizeof(uuid_hdr) - 1, (uint8_t *)uuid); aws_event_stream_message_init(&msg, alloc, &headers, &payload); struct aws_event_stream_message sanity_check_message; struct aws_byte_buf message_buffer = aws_byte_buf_from_array(aws_event_stream_message_buffer(&msg), aws_event_stream_message_total_length(&msg)); int err = aws_event_stream_message_from_buffer(&sanity_check_message, alloc, &message_buffer); if (err) { fprintf(stderr, "failed to parse what should have been a valid message\n"); exit(-1); } write_positive_test_case(".", "all_headers", &msg); aws_event_stream_message_clean_up(&msg); aws_event_stream_headers_list_cleanup(&headers); return 0; } aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/builder.json000066400000000000000000000002461456575232400241650ustar00rootroot00000000000000{ "name": "aws-c-event-stream", "upstream": [ { "name": "aws-c-common" }, { "name": "aws-c-io" }, { "name": "aws-checksums" } ] } aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/clang-tidy/000077500000000000000000000000001456575232400236755ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/clang-tidy/run-clang-tidy.sh000077500000000000000000000004211456575232400270660ustar00rootroot00000000000000#!/bin/bash SOURCE_FILES=`find source include tests -type f -name '*.h' -o -name '*.c'` for i in $SOURCE_FILES do clang-tidy -config="`cat .clang-tidy`" $i -- -Iinclude if [ $? -ne 1 ] then echo "$i failed clang-tidy check." FAIL=1 fi done aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/cmake/000077500000000000000000000000001456575232400227225ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/cmake/aws-c-event-stream-config.cmake000066400000000000000000000011271456575232400306120ustar00rootroot00000000000000include(CMakeFindDependencyMacro) find_dependency(aws-c-io) find_dependency(aws-checksums) macro(aws_load_targets type) include(${CMAKE_CURRENT_LIST_DIR}/${type}/@PROJECT_NAME@-targets.cmake) endmacro() # try to load the lib follow BUILD_SHARED_LIBS. Fall back if not exist. if (BUILD_SHARED_LIBS) if (EXISTS "${CMAKE_CURRENT_LIST_DIR}/shared") aws_load_targets(shared) else() aws_load_targets(static) endif() else() if (EXISTS "${CMAKE_CURRENT_LIST_DIR}/static") aws_load_targets(static) else() aws_load_targets(shared) endif() endif() aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/format-check.sh000077500000000000000000000007631456575232400245520ustar00rootroot00000000000000#!/bin/bash if [[ -z $CLANG_FORMAT ]] ; then CLANG_FORMAT=clang-format fi if NOT type $CLANG_FORMAT 2> /dev/null ; then echo "No appropriate clang-format found." exit 1 fi FAIL=0 SOURCE_FILES=`find source include tests -type f \( -name '*.h' -o -name '*.c' \)` for i in $SOURCE_FILES do $CLANG_FORMAT -output-replacements-xml $i | grep -c " /dev/null if [ $? -ne 1 ] then echo "$i failed clang-format check." FAIL=1 fi done exit $FAIL aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/include/000077500000000000000000000000001456575232400232655ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/include/aws/000077500000000000000000000000001456575232400240575ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/include/aws/event-stream/000077500000000000000000000000001456575232400264715ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/include/aws/event-stream/event_stream.h000066400000000000000000000561361456575232400313510ustar00rootroot00000000000000#ifndef AWS_EVENT_STREAM_H_ #define AWS_EVENT_STREAM_H_ /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include AWS_PUSH_SANE_WARNING_LEVEL #define AWS_C_EVENT_STREAM_PACKAGE_ID 4 /* max message size is 16MB */ #define AWS_EVENT_STREAM_MAX_MESSAGE_SIZE (16 * 1024 * 1024) /* max header size is 128kb */ #define AWS_EVENT_STREAM_MAX_HEADERS_SIZE (128 * 1024) /* Max header name length is 127 bytes */ #define AWS_EVENT_STREAM_HEADER_NAME_LEN_MAX (INT8_MAX) /* Max header static value length is 16 bytes */ #define AWS_EVENT_STREAM_HEADER_STATIC_VALUE_LEN_MAX (16) enum aws_event_stream_errors { AWS_ERROR_EVENT_STREAM_BUFFER_LENGTH_MISMATCH = AWS_ERROR_ENUM_BEGIN_RANGE(AWS_C_EVENT_STREAM_PACKAGE_ID), AWS_ERROR_EVENT_STREAM_INSUFFICIENT_BUFFER_LEN, AWS_ERROR_EVENT_STREAM_MESSAGE_FIELD_SIZE_EXCEEDED, AWS_ERROR_EVENT_STREAM_PRELUDE_CHECKSUM_FAILURE, AWS_ERROR_EVENT_STREAM_MESSAGE_CHECKSUM_FAILURE, AWS_ERROR_EVENT_STREAM_MESSAGE_INVALID_HEADERS_LEN, AWS_ERROR_EVENT_STREAM_MESSAGE_UNKNOWN_HEADER_TYPE, AWS_ERROR_EVENT_STREAM_MESSAGE_PARSER_ILLEGAL_STATE, AWS_ERROR_EVENT_STREAM_RPC_CONNECTION_CLOSED, AWS_ERROR_EVENT_STREAM_RPC_PROTOCOL_ERROR, AWS_ERROR_EVENT_STREAM_RPC_STREAM_CLOSED, AWS_ERROR_EVENT_STREAM_RPC_STREAM_NOT_ACTIVATED, AWS_ERROR_EVENT_STREAM_END_RANGE = AWS_ERROR_ENUM_END_RANGE(AWS_C_EVENT_STREAM_PACKAGE_ID), }; enum aws_event_stream_log_subject { AWS_LS_EVENT_STREAM_GENERAL = AWS_LOG_SUBJECT_BEGIN_RANGE(AWS_C_EVENT_STREAM_PACKAGE_ID), AWS_LS_EVENT_STREAM_CHANNEL_HANDLER, AWS_LS_EVENT_STREAM_RPC_SERVER, AWS_LS_EVENT_STREAM_RPC_CLIENT, AWS_LS_EVENT_STREAM_LAST = AWS_LOG_SUBJECT_END_RANGE(AWS_C_EVENT_STREAM_PACKAGE_ID), }; struct aws_event_stream_message_prelude { uint32_t total_len; uint32_t headers_len; uint32_t prelude_crc; }; struct aws_event_stream_message { struct aws_allocator *alloc; struct aws_byte_buf message_buffer; }; #define AWS_EVENT_STREAM_PRELUDE_LENGTH (uint32_t)(sizeof(uint32_t) + sizeof(uint32_t) + sizeof(uint32_t)) #define AWS_EVENT_STREAM_TRAILER_LENGTH (uint32_t)(sizeof(uint32_t)) enum aws_event_stream_header_value_type { AWS_EVENT_STREAM_HEADER_BOOL_TRUE = 0, AWS_EVENT_STREAM_HEADER_BOOL_FALSE, AWS_EVENT_STREAM_HEADER_BYTE, AWS_EVENT_STREAM_HEADER_INT16, AWS_EVENT_STREAM_HEADER_INT32, AWS_EVENT_STREAM_HEADER_INT64, AWS_EVENT_STREAM_HEADER_BYTE_BUF, AWS_EVENT_STREAM_HEADER_STRING, /* 64 bit integer (millis since epoch) */ AWS_EVENT_STREAM_HEADER_TIMESTAMP, AWS_EVENT_STREAM_HEADER_UUID }; struct aws_event_stream_header_value_pair { uint8_t header_name_len; char header_name[INT8_MAX]; enum aws_event_stream_header_value_type header_value_type; union { uint8_t *variable_len_val; uint8_t static_val[AWS_EVENT_STREAM_HEADER_STATIC_VALUE_LEN_MAX]; } header_value; uint16_t header_value_len; int8_t value_owned; }; struct aws_event_stream_streaming_decoder; typedef int(aws_event_stream_process_state_fn)( struct aws_event_stream_streaming_decoder *decoder, const uint8_t *data, size_t len, size_t *processed); /** * Called by aws_aws_event_stream_streaming_decoder when payload data has been received. * 'data' doesn't belong to you, so copy the data if it is needed beyond the scope of your callback. * final_segment == 1 indicates the current data is the last payload buffer for that message. */ typedef void(aws_event_stream_process_on_payload_segment_fn)( struct aws_event_stream_streaming_decoder *decoder, struct aws_byte_buf *payload, int8_t final_segment, void *user_data); /** * Called by aws_aws_event_stream_streaming_decoder when a new message has arrived. The prelude will contain metadata * about the message. At this point no headers or payload have been received. prelude is copyable. */ typedef void(aws_event_stream_prelude_received_fn)( struct aws_event_stream_streaming_decoder *decoder, struct aws_event_stream_message_prelude *prelude, void *user_data); /** * Called by aws_aws_event_stream_streaming_decoder when a header is encountered. 'header' is not yours. Copy the data * you want from it if your scope extends beyond your callback. */ typedef void(aws_event_stream_header_received_fn)( struct aws_event_stream_streaming_decoder *decoder, struct aws_event_stream_message_prelude *prelude, struct aws_event_stream_header_value_pair *header, void *user_data); /** * Called by aws_aws_event_stream_streaming_decoder when a message decoding is complete * and crc is verified. */ typedef void(aws_event_stream_on_complete_fn)( struct aws_event_stream_streaming_decoder *decoder, uint32_t message_crc, void *user_data); /** * Called by aws_aws_event_stream_streaming_decoder when an error is encountered. The decoder is not in a good state for * usage after this callback. */ typedef void(aws_event_stream_on_error_fn)( struct aws_event_stream_streaming_decoder *decoder, struct aws_event_stream_message_prelude *prelude, int error_code, const char *message, void *user_data); struct aws_event_stream_streaming_decoder { struct aws_allocator *alloc; uint8_t working_buffer[AWS_EVENT_STREAM_PRELUDE_LENGTH]; size_t message_pos; uint32_t running_crc; size_t current_header_name_offset; size_t current_header_value_offset; struct aws_event_stream_header_value_pair current_header; struct aws_event_stream_message_prelude prelude; aws_event_stream_process_state_fn *state; aws_event_stream_process_on_payload_segment_fn *on_payload; aws_event_stream_prelude_received_fn *on_prelude; aws_event_stream_header_received_fn *on_header; aws_event_stream_on_complete_fn *on_complete; aws_event_stream_on_error_fn *on_error; void *user_context; }; struct aws_event_stream_streaming_decoder_options { /** * (Required) * Invoked repeatedly as payload segment are received. * See `aws_event_stream_process_on_payload_segment_fn`. */ aws_event_stream_process_on_payload_segment_fn *on_payload_segment; /** * (Required) * Invoked when when a new message has arrived. The prelude will contain metadata about the message. * See `aws_event_stream_prelude_received_fn`. */ aws_event_stream_prelude_received_fn *on_prelude; /** * (Required) * Invoked repeatedly as headers are received. * See `aws_event_stream_header_received_fn`. */ aws_event_stream_header_received_fn *on_header; /** * (Optional) * Invoked if a message is decoded successfully. * See `aws_event_stream_on_complete_fn`. */ aws_event_stream_on_complete_fn *on_complete; /** * (Required) * Invoked when an error is encountered. The decoder is not in a good state for usage after this callback. * See `aws_event_stream_on_error_fn`. */ aws_event_stream_on_error_fn *on_error; /** * (Optional) * user_data passed to callbacks. */ void *user_data; }; AWS_EXTERN_C_BEGIN /** * Initializes with a list of headers, the payload, and a payload length. CRCs will be computed for you. * If headers or payload is NULL, then the fields will be appropriately set to indicate no headers and/or no payload. * Both payload and headers will result in an allocation. */ AWS_EVENT_STREAM_API int aws_event_stream_message_init( struct aws_event_stream_message *message, struct aws_allocator *alloc, const struct aws_array_list *headers, const struct aws_byte_buf *payload); /** * Zero allocation, Zero copy. The message will simply wrap the buffer. The message functions are only useful as long as * buffer is referencable memory. */ AWS_EVENT_STREAM_API int aws_event_stream_message_from_buffer( struct aws_event_stream_message *message, struct aws_allocator *alloc, struct aws_byte_buf *buffer); /** * Allocates memory and copies buffer. Otherwise the same as aws_aws_event_stream_message_from_buffer. This is slower, * but possibly safer. */ AWS_EVENT_STREAM_API int aws_event_stream_message_from_buffer_copy( struct aws_event_stream_message *message, struct aws_allocator *alloc, const struct aws_byte_buf *buffer); /** * Cleans up any internally allocated memory. Always call this for API compatibility reasons, even if you only used the * aws_aws_event_stream_message_from_buffer function. */ AWS_EVENT_STREAM_API void aws_event_stream_message_clean_up(struct aws_event_stream_message *message); /** * Returns the total length of the message (including the length field). */ AWS_EVENT_STREAM_API uint32_t aws_event_stream_message_total_length(const struct aws_event_stream_message *message); /** * Returns the length of the headers portion of the message. */ AWS_EVENT_STREAM_API uint32_t aws_event_stream_message_headers_len(const struct aws_event_stream_message *message); /** * Returns the prelude crc (crc32) */ AWS_EVENT_STREAM_API uint32_t aws_event_stream_message_prelude_crc(const struct aws_event_stream_message *message); /** * Writes the message to fd in json format. All strings and binary fields are base64 encoded. */ AWS_EVENT_STREAM_API int aws_event_stream_message_to_debug_str( FILE *fd, const struct aws_event_stream_message *message); /** * Adds the headers for the message to list. The memory in each header is owned as part of the message, do not free it * or pass its address around. */ AWS_EVENT_STREAM_API int aws_event_stream_message_headers( const struct aws_event_stream_message *message, struct aws_array_list *headers); /** * Returns a pointer to the beginning of the message payload. */ AWS_EVENT_STREAM_API const uint8_t *aws_event_stream_message_payload(const struct aws_event_stream_message *message); /** * Returns the length of the message payload. */ AWS_EVENT_STREAM_API uint32_t aws_event_stream_message_payload_len(const struct aws_event_stream_message *message); /** * Returns the checksum of the entire message (crc32) */ AWS_EVENT_STREAM_API uint32_t aws_event_stream_message_message_crc(const struct aws_event_stream_message *message); /** * Returns the message as a buffer ready for transport. */ AWS_EVENT_STREAM_API const uint8_t *aws_event_stream_message_buffer(const struct aws_event_stream_message *message); AWS_EVENT_STREAM_API uint32_t aws_event_stream_compute_headers_required_buffer_len(const struct aws_array_list *headers); /** * Writes headers to buf assuming buf is large enough to hold the data. Prefer this function over the unsafe variant * 'aws_event_stream_write_headers_to_buffer'. * * Returns AWS_OP_SUCCESS if the headers were successfully and completely written and AWS_OP_ERR otherwise. */ AWS_EVENT_STREAM_API int aws_event_stream_write_headers_to_buffer_safe( const struct aws_array_list *headers, struct aws_byte_buf *buf); /** * Deprecated in favor of 'aws_event_stream_write_headers_to_buffer_safe' as this API is unsafe. * * Writes headers to buffer and returns the length of bytes written to buffer. Assumes buffer is large enough to * store the headers. */ AWS_EVENT_STREAM_API size_t aws_event_stream_write_headers_to_buffer(const struct aws_array_list *headers, uint8_t *buffer); /** Get the headers from the buffer, store them in the headers list. * the user's responsibility to cleanup the list when they are finished with it. * no buffer copies happen here, the lifetime of the buffer, must outlive the usage of the headers. * returns error codes defined in the public interface. */ AWS_EVENT_STREAM_API int aws_event_stream_read_headers_from_buffer( struct aws_array_list *headers, const uint8_t *buffer, size_t headers_len); /** * Initialize a streaming decoder for messages with callbacks for usage * and an optional user context pointer. */ AWS_EVENT_STREAM_API void aws_event_stream_streaming_decoder_init_from_options( struct aws_event_stream_streaming_decoder *decoder, struct aws_allocator *allocator, const struct aws_event_stream_streaming_decoder_options *options); /** * Deprecated. Use aws_event_stream_streaming_decoder_init_from_options instead. * Initialize a streaming decoder for messages with callbacks for usage and an optional user context pointer. */ AWS_EVENT_STREAM_API void aws_event_stream_streaming_decoder_init( struct aws_event_stream_streaming_decoder *decoder, struct aws_allocator *alloc, aws_event_stream_process_on_payload_segment_fn *on_payload_segment, aws_event_stream_prelude_received_fn *on_prelude, aws_event_stream_header_received_fn *on_header, aws_event_stream_on_error_fn *on_error, void *user_data); /** * Currently, no memory is allocated inside aws_aws_event_stream_streaming_decoder, but for future API compatibility, * you should call this when finished with it. */ AWS_EVENT_STREAM_API void aws_event_stream_streaming_decoder_clean_up( struct aws_event_stream_streaming_decoder *decoder); /** * initializes a headers list for you. It will default to a capacity of 4 in dynamic mode. */ AWS_EVENT_STREAM_API int aws_event_stream_headers_list_init( struct aws_array_list *headers, struct aws_allocator *allocator); /** * Cleans up the headers list. Also deallocates any headers that were the result of a copy flag for strings or buffer. */ AWS_EVENT_STREAM_API void aws_event_stream_headers_list_cleanup(struct aws_array_list *headers); /** * Adds a string header to the list of headers. If copy is set to true, this will result in an allocation for the header * value. Otherwise, the value will be set to the memory address of 'value'. */ AWS_EVENT_STREAM_API int aws_event_stream_add_string_header( struct aws_array_list *headers, const char *name, uint8_t name_len, const char *value, uint16_t value_len, int8_t copy); AWS_EVENT_STREAM_API struct aws_event_stream_header_value_pair aws_event_stream_create_string_header( struct aws_byte_cursor name, struct aws_byte_cursor value); AWS_EVENT_STREAM_API struct aws_event_stream_header_value_pair aws_event_stream_create_int32_header( struct aws_byte_cursor name, int32_t value); /** * Adds a byte header to the list of headers. */ AWS_EVENT_STREAM_API int aws_event_stream_add_byte_header( struct aws_array_list *headers, const char *name, uint8_t name_len, int8_t value); /** * Adds a bool header to the list of headers. */ AWS_EVENT_STREAM_API int aws_event_stream_add_bool_header( struct aws_array_list *headers, const char *name, uint8_t name_len, int8_t value); /** * adds a 16 bit integer to the list of headers. */ AWS_EVENT_STREAM_API int aws_event_stream_add_int16_header( struct aws_array_list *headers, const char *name, uint8_t name_len, int16_t value); /** * adds a 32 bit integer to the list of headers. */ AWS_EVENT_STREAM_API int aws_event_stream_add_int32_header( struct aws_array_list *headers, const char *name, uint8_t name_len, int32_t value); /** * adds a 64 bit integer to the list of headers. */ AWS_EVENT_STREAM_API int aws_event_stream_add_int64_header( struct aws_array_list *headers, const char *name, uint8_t name_len, int64_t value); /** * Adds a byte-buffer header to the list of headers. If copy is set to true, this will result in an allocation for the * header value. Otherwise, the value will be set to the memory address of 'value'. */ AWS_EVENT_STREAM_API int aws_event_stream_add_bytebuf_header( struct aws_array_list *headers, const char *name, uint8_t name_len, uint8_t *value, uint16_t value_len, int8_t copy); /** * adds a 64 bit integer representing milliseconds since unix epoch to the list of headers. */ AWS_EVENT_STREAM_API int aws_event_stream_add_timestamp_header( struct aws_array_list *headers, const char *name, uint8_t name_len, int64_t value); /** * adds a uuid buffer to the list of headers. Value must always be 16 bytes long. */ AWS_EVENT_STREAM_API int aws_event_stream_add_uuid_header( struct aws_array_list *headers, const char *name, uint8_t name_len, const uint8_t *value); /** * Adds a generic header to the list of headers. * Makes a copy of the underlaying data. */ AWS_EVENT_STREAM_API int aws_event_stream_add_header( struct aws_array_list *headers, const struct aws_event_stream_header_value_pair *header); /* Cursor-based header APIs */ /** * Adds a boolean-valued header to a header list * * @param headers header list to add to * @param name name of the header to add * @param value value of the header to add * @return AWS_OP_SUCCESS on success, AWS_OP_ERR on failure */ AWS_EVENT_STREAM_API int aws_event_stream_add_bool_header_by_cursor( struct aws_array_list *headers, struct aws_byte_cursor name, bool value); /** * Adds a byte-valued header to a header list * * @param headers header list to add to * @param name name of the header to add * @param value value of the header to add * @return AWS_OP_SUCCESS on success, AWS_OP_ERR on failure */ AWS_EVENT_STREAM_API int aws_event_stream_add_byte_header_by_cursor( struct aws_array_list *headers, struct aws_byte_cursor name, int8_t value); /** * Adds a int16-valued header to a header list * * @param headers header list to add to * @param name name of the header to add * @param value value of the header to add * @return AWS_OP_SUCCESS on success, AWS_OP_ERR on failure */ AWS_EVENT_STREAM_API int aws_event_stream_add_int16_header_by_cursor( struct aws_array_list *headers, struct aws_byte_cursor name, int16_t value); /** * Adds a int32-valued header to a header list * * @param headers header list to add to * @param name name of the header to add * @param value value of the header to add * @return AWS_OP_SUCCESS on success, AWS_OP_ERR on failure */ AWS_EVENT_STREAM_API int aws_event_stream_add_int32_header_by_cursor( struct aws_array_list *headers, struct aws_byte_cursor name, int32_t value); /** * Adds a int64-valued header to a header list * * @param headers header list to add to * @param name name of the header to add * @param value value of the header to add * @return AWS_OP_SUCCESS on success, AWS_OP_ERR on failure */ AWS_EVENT_STREAM_API int aws_event_stream_add_int64_header_by_cursor( struct aws_array_list *headers, struct aws_byte_cursor name, int64_t value); /** * Adds a string-valued header to a header list * * @param headers header list to add to * @param name name of the header to add * @param value value of the header to add * @return AWS_OP_SUCCESS on success, AWS_OP_ERR on failure */ AWS_EVENT_STREAM_API int aws_event_stream_add_string_header_by_cursor( struct aws_array_list *headers, struct aws_byte_cursor name, struct aws_byte_cursor value); /** * Adds a byte_buf-valued header to a header list * * @param headers header list to add to * @param name name of the header to add * @param value value of the header to add * @return AWS_OP_SUCCESS on success, AWS_OP_ERR on failure */ AWS_EVENT_STREAM_API int aws_event_stream_add_byte_buf_header_by_cursor( struct aws_array_list *headers, struct aws_byte_cursor name, struct aws_byte_cursor value); /** * Adds a timestamp-valued header to a header list * * @param headers header list to add to * @param name name of the header to add * @param value value of the header to add * @return AWS_OP_SUCCESS on success, AWS_OP_ERR on failure */ AWS_EVENT_STREAM_API int aws_event_stream_add_timestamp_header_by_cursor( struct aws_array_list *headers, struct aws_byte_cursor name, int64_t value); /** * Adds a uuid-valued header to a header list * * @param headers header list to add to * @param name name of the header to add * @param value value of the header to add * @return AWS_OP_SUCCESS on success, AWS_OP_ERR on failure */ AWS_EVENT_STREAM_API int aws_event_stream_add_uuid_header_by_cursor( struct aws_array_list *headers, struct aws_byte_cursor name, struct aws_byte_cursor value); /** * Returns the header name. Note: this value is not null terminated */ AWS_EVENT_STREAM_API struct aws_byte_buf aws_event_stream_header_name( struct aws_event_stream_header_value_pair *header); /** * Returns the header value as a string. Note: this value is not null terminated. */ AWS_EVENT_STREAM_API struct aws_byte_buf aws_event_stream_header_value_as_string( struct aws_event_stream_header_value_pair *header); /** * Returns the header value as a byte */ AWS_EVENT_STREAM_API int8_t aws_event_stream_header_value_as_byte(struct aws_event_stream_header_value_pair *header); /** * Returns the header value as a boolean value. */ AWS_EVENT_STREAM_API int8_t aws_event_stream_header_value_as_bool(struct aws_event_stream_header_value_pair *header); /** * Returns the header value as a 16 bit integer. */ AWS_EVENT_STREAM_API int16_t aws_event_stream_header_value_as_int16(struct aws_event_stream_header_value_pair *header); /** * Returns the header value as a 32 bit integer. */ AWS_EVENT_STREAM_API int32_t aws_event_stream_header_value_as_int32(struct aws_event_stream_header_value_pair *header); /** * Returns the header value as a 64 bit integer. */ AWS_EVENT_STREAM_API int64_t aws_event_stream_header_value_as_int64(struct aws_event_stream_header_value_pair *header); /** * Returns the header value as a pointer to a byte buffer, call aws_event_stream_header_value_length to determine * the length of the buffer. */ AWS_EVENT_STREAM_API struct aws_byte_buf aws_event_stream_header_value_as_bytebuf( struct aws_event_stream_header_value_pair *header); /** * Returns the header value as a 64 bit integer representing milliseconds since unix epoch. */ AWS_EVENT_STREAM_API int64_t aws_event_stream_header_value_as_timestamp(struct aws_event_stream_header_value_pair *header); /** * Returns the header value a byte buffer which is 16 bytes long. Represents a UUID. */ AWS_EVENT_STREAM_API struct aws_byte_buf aws_event_stream_header_value_as_uuid( struct aws_event_stream_header_value_pair *header); /** * Returns the length of the header value buffer. This is mostly intended for string and byte buffer types. */ AWS_EVENT_STREAM_API uint16_t aws_event_stream_header_value_length(struct aws_event_stream_header_value_pair *header); /** * The main driver of the decoder. Pass data that should be decoded with its length. A likely use-case here is in * response to a read event from an io-device */ AWS_EVENT_STREAM_API int aws_event_stream_streaming_decoder_pump( struct aws_event_stream_streaming_decoder *decoder, const struct aws_byte_buf *data); /** * Initializes internal datastructures used by aws-c-event-stream. * Must be called before using any functionality in aws-c-event-stream. */ AWS_EVENT_STREAM_API void aws_event_stream_library_init(struct aws_allocator *allocator); /** * Clean up internal datastructures used by aws-c-event-stream. * Must not be called until application is done using functionality in aws-c-event-stream. */ AWS_EVENT_STREAM_API void aws_event_stream_library_clean_up(void); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_EVENT_STREAM_H_ */ event_stream_channel_handler.h000066400000000000000000000066741456575232400344610ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/include/aws/event-stream#ifndef AWS_EVENT_STREAM_CHANNEL_HANDLER_H #define AWS_EVENT_STREAM_CHANNEL_HANDLER_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include AWS_PUSH_SANE_WARNING_LEVEL struct aws_event_stream_channel_handler; struct aws_channel_handler; /** * Invoked when an aws_event_stream_message is encountered. If the message * parsed successfully, message will be non-null and error_code will be AWS_ERROR_SUCCESS. * Otherwise message will be null and error_code will represent the error that was encountered. * Note that any case that error_code was not AWS_OP_SUCCESS, the channel also shuts down. */ typedef void(aws_event_stream_channel_handler_on_message_received_fn)( struct aws_event_stream_message *message, int error_code, void *user_data); /** * Invoked when an aws_event_stream_message is flushed to the IO interface. When error_code is AWS_ERROR_SUCCESS the * write happened successfuly. Regardless, message is held from the aws_event_stream_channel_handler_write_message() * call and should likely be freed in this callback. If error_code is non-zero, the channel will be shutdown immediately * after this callback returns. */ typedef void(aws_event_stream_channel_handler_on_message_written_fn)( struct aws_event_stream_message *message, int error_code, void *user_data); struct aws_event_stream_channel_handler_options { /** Callback for when messages are received. Can not be null. */ aws_event_stream_channel_handler_on_message_received_fn *on_message_received; /** user data passed to message callback. Optional */ void *user_data; /** initial window size to use for the channel. If automatic window management is set to true, this value is * ignored. */ size_t initial_window_size; /** * if set to false (the default), windowing will be managed automatically for the user. * Otherwise, after any on_message_received, the user must invoke * aws_event_stream_channel_handler_increment_read_window() */ bool manual_window_management; }; AWS_EXTERN_C_BEGIN /** * Allocates and initializes a new channel handler for processing aws_event_stream_message() events. Handler options * must not be null. */ AWS_EVENT_STREAM_API struct aws_channel_handler *aws_event_stream_channel_handler_new( struct aws_allocator *allocator, const struct aws_event_stream_channel_handler_options *handler_options); /** * Writes an aws_event_stream_message() to the channel. Once the channel flushes or an error occurs, on_message_written * will be invoked. message should stay valid until the callback is invoked. If an error an occurs, the channel will * automatically be shutdown. */ AWS_EVENT_STREAM_API int aws_event_stream_channel_handler_write_message( struct aws_channel_handler *handler, struct aws_event_stream_message *message, aws_event_stream_channel_handler_on_message_written_fn *on_message_written, void *user_data); /** * Updates the read window for the channel if automatic_window_managemanet was set to false. */ AWS_EVENT_STREAM_API void aws_event_stream_channel_handler_increment_read_window( struct aws_channel_handler *handler, size_t window_update_size); AWS_EVENT_STREAM_API void *aws_event_stream_channel_handler_get_user_data(struct aws_channel_handler *handler); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_EVENT_STREAM_CHANNEL_HANDLER_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/include/aws/event-stream/event_stream_exports.h000066400000000000000000000021601456575232400331210ustar00rootroot00000000000000#ifndef AWS_EVENT_STREAM_EXPORTS_H_ #define AWS_EVENT_STREAM_EXPORTS_H_ /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #if defined(USE_WINDOWS_DLL_SEMANTICS) || defined(WIN32) # ifdef AWS_EVENT_STREAM_USE_IMPORT_EXPORT # ifdef AWS_EVENT_STREAM_EXPORTS # define AWS_EVENT_STREAM_API __declspec(dllexport) # else # define AWS_EVENT_STREAM_API __declspec(dllimport) # endif /* AWS_EVENT_STREAM_EXPORTS */ # else # define AWS_EVENT_STREAM_API # endif /* AWS_EVENT_STREAM_USE_IMPORT_EXPORT */ #else /* defined (USE_WINDOWS_DLL_SEMANTICS) || defined (WIN32) */ # if ((__GNUC__ >= 4) || defined(__clang__)) && defined(AWS_EVENT_STREAM_USE_IMPORT_EXPORT) && \ defined(AWS_EVENT_STREAM_EXPORTS) # define AWS_EVENT_STREAM_API __attribute__((visibility("default"))) # else # define AWS_EVENT_STREAM_API # endif /* __GNUC__ >= 4 || defined(__clang__) */ #endif /* defined (USE_WINDOWS_DLL_SEMANTICS) || defined (WIN32) */ #endif /* AWS_EVENT_STREAM_EXPORTS_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/include/aws/event-stream/event_stream_rpc.h000066400000000000000000000047261456575232400322130ustar00rootroot00000000000000#ifndef AWS_EVENT_STREAM_RPC_H #define AWS_EVENT_STREAM_RPC_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include AWS_PUSH_SANE_WARNING_LEVEL /** * :message-type header name */ extern AWS_EVENT_STREAM_API const struct aws_byte_cursor aws_event_stream_rpc_message_type_name; /** * :message-flags header name */ extern AWS_EVENT_STREAM_API const struct aws_byte_cursor aws_event_stream_rpc_message_flags_name; /** * :stream-id header name */ extern AWS_EVENT_STREAM_API const struct aws_byte_cursor aws_event_stream_rpc_stream_id_name; /** * operation header name. */ extern AWS_EVENT_STREAM_API const struct aws_byte_cursor aws_event_stream_rpc_operation_name; enum aws_event_stream_rpc_message_type { AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_APPLICATION_MESSAGE, AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_APPLICATION_ERROR, AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_PING, AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_PING_RESPONSE, AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_CONNECT, AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_CONNECT_ACK, AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_PROTOCOL_ERROR, AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_INTERNAL_ERROR, AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_COUNT, }; enum aws_event_stream_rpc_message_flag { AWS_EVENT_STREAM_RPC_MESSAGE_FLAG_CONNECTION_ACCEPTED = 1, AWS_EVENT_STREAM_RPC_MESSAGE_FLAG_TERMINATE_STREAM = 2, }; struct aws_event_stream_rpc_message_args { /** array of headers for an event-stream message. */ struct aws_event_stream_header_value_pair *headers; /** number of headers in the headers array. * headers are copied in aws_event_stream_rpc_*_send_message() * so you can free the memory immediately after calling it if you need to.*/ size_t headers_count; /** payload buffer for the message, payload is copied in aws_event_stream_rpc_*_send_message() * so you can free the memory immediately after calling it if you need to. */ struct aws_byte_buf *payload; /** message type for the message. This will be added to the headers array * and the ":message-type" header should not be included in headers */ enum aws_event_stream_rpc_message_type message_type; /** message flags for the message. This will be added to the headers array * and the ":message-flags" header should not be included in headers */ uint32_t message_flags; }; AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_EVENT_STREAM_RPC_SERVER_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/include/aws/event-stream/event_stream_rpc_client.h000066400000000000000000000223131456575232400335410ustar00rootroot00000000000000#ifndef AWS_EVENT_STREAM_RPC_CLIENT_H #define AWS_EVENT_STREAM_RPC_CLIENT_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include AWS_PUSH_SANE_WARNING_LEVEL struct aws_channel; struct aws_event_stream_rpc_client_connection; struct aws_event_stream_rpc_client_continuation_token; /** * Invoked when a connection receives a message on an existing stream. message_args contains the * message data. */ typedef void(aws_event_stream_rpc_client_stream_continuation_fn)( struct aws_event_stream_rpc_client_continuation_token *token, const struct aws_event_stream_rpc_message_args *message_args, void *user_data); /** * Invoked when a continuation has either been closed with the TERMINATE_STREAM flag, or when the connection * shuts down and deletes the continuation. */ typedef void(aws_event_stream_rpc_client_stream_continuation_closed_fn)( struct aws_event_stream_rpc_client_continuation_token *token, void *user_data); struct aws_event_stream_rpc_client_stream_continuation_options { aws_event_stream_rpc_client_stream_continuation_fn *on_continuation; aws_event_stream_rpc_client_stream_continuation_closed_fn *on_continuation_closed; void *user_data; }; /** * Invoked when a non-stream level message is received on a connection. */ typedef void(aws_event_stream_rpc_client_connection_protocol_message_fn)( struct aws_event_stream_rpc_client_connection *connection, const struct aws_event_stream_rpc_message_args *message_args, void *user_data); /** * Invoked when a successfully created connection is shutdown. error_code will indicate the reason for the shutdown. */ typedef void(aws_event_stream_rpc_client_on_connection_shutdown_fn)( struct aws_event_stream_rpc_client_connection *connection, int error_code, void *user_data); /** * Invoked when a connection attempt completes. * * If the attempt was unsuccessful, the error_code will be non-zero and the connection pointer will be NULL, * and aws_event_stream_rpc_client_on_connection_shutdown_fn will not be invoked. * * If the attempt was successful, error_code will be 0 and the connection pointer will be valid. * You must call aws_event_stream_rpc_client_connection_acquire() * to prevent the pointer's memory from being destroyed before you are ready. * When you are completely done with the connection pointer you must call * aws_event_stream_rpc_client_connection_release() or its memory will leak. * aws_event_stream_rpc_client_on_connection_shutdown_fn will be invoked * when the network connection has closed. If you are done with the connection, * but it is still open, you must call aws_aws_event_stream_rpc_client_close() * or network connection will remain open, even if you call release(). */ typedef void(aws_event_stream_rpc_client_on_connection_setup_fn)( struct aws_event_stream_rpc_client_connection *connection, int error_code, void *user_data); /** * Invoked whenever a message has been flushed to the channel. */ typedef void(aws_event_stream_rpc_client_message_flush_fn)(int error_code, void *user_data); struct aws_client_bootstrap; struct aws_event_stream_rpc_client_connection_options { /** host name to use for the connection. This depends on your socket type. */ const char *host_name; /** port to use for your connection, assuming for the appropriate socket type. */ uint32_t port; /** socket options for establishing the connection to the RPC server. */ const struct aws_socket_options *socket_options; /** optional: tls options for using when establishing your connection. */ const struct aws_tls_connection_options *tls_options; struct aws_client_bootstrap *bootstrap; aws_event_stream_rpc_client_on_connection_setup_fn *on_connection_setup; aws_event_stream_rpc_client_connection_protocol_message_fn *on_connection_protocol_message; aws_event_stream_rpc_client_on_connection_shutdown_fn *on_connection_shutdown; void *user_data; }; AWS_EXTERN_C_BEGIN /** * Initiate a new connection. If this function returns AWS_OP_SUCESSS, the * aws_event_stream_rpc_client_connection_options::on_connection_setup is guaranteed to be called exactly once. If that * callback successfully creates a connection, aws_event_stream_rpc_client_connection_options::on_connection_shutdown * will be invoked upon connection closure. However if the connection was never successfully setup, * aws_event_stream_rpc_client_connection_options::on_connection_shutdown will not be invoked later. */ AWS_EVENT_STREAM_API int aws_event_stream_rpc_client_connection_connect( struct aws_allocator *allocator, const struct aws_event_stream_rpc_client_connection_options *conn_options); AWS_EVENT_STREAM_API void aws_event_stream_rpc_client_connection_acquire( const struct aws_event_stream_rpc_client_connection *connection); AWS_EVENT_STREAM_API void aws_event_stream_rpc_client_connection_release( const struct aws_event_stream_rpc_client_connection *connection); /** * Closes the connection if it is open and aws_event_stream_rpc_client_connection_options::on_connection_shutdown will * be invoked upon shutdown. shutdown_error_code will indicate the reason for shutdown. For a graceful shutdown pass 0 * or AWS_ERROR_SUCCESS. */ AWS_EVENT_STREAM_API void aws_event_stream_rpc_client_connection_close( struct aws_event_stream_rpc_client_connection *connection, int shutdown_error_code); /** * Returns true if the connection is open, false otherwise. */ AWS_EVENT_STREAM_API bool aws_event_stream_rpc_client_connection_is_open( const struct aws_event_stream_rpc_client_connection *connection); /** * Sends a message on the connection. These must be connection level messages (not application messages). * * flush_fn will be invoked when the message has been successfully writen to the wire or when it fails. * * returns AWS_OP_SUCCESS if the message was successfully created and queued, and in that case flush_fn will always be * invoked. Otherwise, flush_fn will not be invoked. */ AWS_EVENT_STREAM_API int aws_event_stream_rpc_client_connection_send_protocol_message( struct aws_event_stream_rpc_client_connection *connection, const struct aws_event_stream_rpc_message_args *message_args, aws_event_stream_rpc_client_message_flush_fn *flush_fn, void *user_data); /** * Create a new stream. continuation_option's callbacks will not be invoked, and nothing will be sent across the wire * until aws_event_stream_rpc_client_continuation_activate() is invoked. * * returns an instance of a aws_event_stream_rpc_client_continuation_token on success with a reference count of 1. You * must call aws_event_stream_rpc_client_continuation_release() when you're finished with it. Returns NULL on failure. */ AWS_EVENT_STREAM_API struct aws_event_stream_rpc_client_continuation_token * aws_event_stream_rpc_client_connection_new_stream( struct aws_event_stream_rpc_client_connection *connection, const struct aws_event_stream_rpc_client_stream_continuation_options *continuation_options); AWS_EVENT_STREAM_API void aws_event_stream_rpc_client_continuation_acquire( const struct aws_event_stream_rpc_client_continuation_token *continuation); AWS_EVENT_STREAM_API void aws_event_stream_rpc_client_continuation_release( const struct aws_event_stream_rpc_client_continuation_token *continuation); /** * returns true if the continuation has been closed. */ AWS_EVENT_STREAM_API bool aws_event_stream_rpc_client_continuation_is_closed( const struct aws_event_stream_rpc_client_continuation_token *continuation); /** * Actually sends the initial stream to the peer. Callbacks from aws_event_stream_rpc_client_connection_new_stream() * will actually be invoked if this function returns AWS_OP_SUCCESS, otherwise, the stream has not been queued and no * callbacks will be invoked. * * operation_name is the name to identify which logical rpc call you want to kick off with the peer. It must be * non-empty. flush_fn will be invoked once the message has either been written to the wire or it fails. */ AWS_EVENT_STREAM_API int aws_event_stream_rpc_client_continuation_activate( struct aws_event_stream_rpc_client_continuation_token *continuation, struct aws_byte_cursor operation_name, const struct aws_event_stream_rpc_message_args *message_args, aws_event_stream_rpc_client_message_flush_fn *flush_fn, void *user_data); AWS_EVENT_STREAM_API void *aws_event_stream_rpc_client_continuation_get_user_data( struct aws_event_stream_rpc_client_continuation_token *continuation); /** * Sends a message on the continuation. aws_event_stream_rpc_client_continuation_activate() must be successfully invoked * prior to calling this function. * * If this function returns AWS_OP_SUCCESS, flush_fn will be invoked once the message has either been written to the * wire or it fails. */ AWS_EVENT_STREAM_API int aws_event_stream_rpc_client_continuation_send_message( struct aws_event_stream_rpc_client_continuation_token *continuation, const struct aws_event_stream_rpc_message_args *message_args, aws_event_stream_rpc_client_message_flush_fn *flush_fn, void *user_data); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_EVENT_STREAM_RPC_CLIENT_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/include/aws/event-stream/event_stream_rpc_server.h000066400000000000000000000222431456575232400335730ustar00rootroot00000000000000#ifndef AWS_EVENT_STREAM_RPC_SERVER_H #define AWS_EVENT_STREAM_RPC_SERVER_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include AWS_PUSH_SANE_WARNING_LEVEL struct aws_channel; struct aws_event_stream_rpc_server_connection; struct aws_event_stream_rpc_server_continuation_token; /** * Invoked when a connection receives a message on an existing stream. message_args contains the * message data. */ typedef void(aws_event_stream_rpc_server_stream_continuation_fn)( struct aws_event_stream_rpc_server_continuation_token *token, const struct aws_event_stream_rpc_message_args *message_args, void *user_data); /** * Invoked when a continuation has either been closed with the TERMINATE_STREAM flag, or when the connection * shutsdown and deletes the continuation. */ typedef void(aws_event_stream_rpc_server_stream_continuation_closed_fn)( struct aws_event_stream_rpc_server_continuation_token *token, void *user_data); struct aws_event_stream_rpc_server_stream_continuation_options { aws_event_stream_rpc_server_stream_continuation_fn *on_continuation; aws_event_stream_rpc_server_stream_continuation_closed_fn *on_continuation_closed; void *user_data; }; /** * Invoked when a non-stream level message is received on a connection. */ typedef void(aws_event_stream_rpc_server_connection_protocol_message_fn)( struct aws_event_stream_rpc_server_connection *connection, const struct aws_event_stream_rpc_message_args *message_args, void *user_data); /** * Invoked when a new stream has been received on the connection. If you return AWS_OP_SUCCESS (0), * You must fill in the fields for continuation options or the program will assert and exit. * * A failure path MUST leave the ref count of the continuation alone. * * A success path should probably take a ref which will leave the continuation (assuming no other interference) * at two AFTER creation is complete: 1 for the connection's continuation table, and one for the callback * recipient which is presumably tracking it as well. */ typedef int(aws_event_stream_rpc_server_on_incoming_stream_fn)( struct aws_event_stream_rpc_server_connection *connection, struct aws_event_stream_rpc_server_continuation_token *token, struct aws_byte_cursor operation_name, struct aws_event_stream_rpc_server_stream_continuation_options *continuation_options, void *user_data); struct aws_event_stream_rpc_connection_options { aws_event_stream_rpc_server_on_incoming_stream_fn *on_incoming_stream; aws_event_stream_rpc_server_connection_protocol_message_fn *on_connection_protocol_message; void *user_data; }; /** * Invoked when a new connection is received on a server listener. If you return AWS_OP_SUCCESS, * You must fill in the fields for connection_options or the program will assert and exit. * * If error_code is non-zero, an error occurred upon setting up the channel and connection will be NULL. Otherwise, * connection is non-null. If you intend to seat a pointer to connection, you MUST call * aws_event_stream_rpc_server_connection_acquire() and when you're finished with the connection you MUST call * aws_event_stream_server_connection_release(). */ typedef int(aws_event_stream_rpc_server_on_new_connection_fn)( struct aws_event_stream_rpc_server_connection *connection, int error_code, struct aws_event_stream_rpc_connection_options *connection_options, void *user_data); /** * Invoked when a successfully created connection is shutdown. error_code will indicate the reason for the shutdown. */ typedef void(aws_event_stream_rpc_server_on_connection_shutdown_fn)( struct aws_event_stream_rpc_server_connection *connection, int error_code, void *user_data); /** * Invoked whenever a message has been flushed to the channel. */ typedef void(aws_event_stream_rpc_server_message_flush_fn)(int error_code, void *user_data); struct aws_server_bootstrap; struct aws_event_stream_rpc_server_listener; /** * (Optional). Invoked when the listener has been successfully shutdown (after the last ref count release). */ typedef void(aws_event_stream_rpc_server_on_listener_destroy_fn)( struct aws_event_stream_rpc_server_listener *server, void *user_data); struct aws_event_stream_rpc_server_listener_options { /** host name to use for the listener. This depends on your socket type. */ const char *host_name; /** port to use for your listener, assuming for the appropriate socket type. */ uint32_t port; const struct aws_socket_options *socket_options; /** optional: tls options for using when setting up your server. */ const struct aws_tls_connection_options *tls_options; struct aws_server_bootstrap *bootstrap; aws_event_stream_rpc_server_on_new_connection_fn *on_new_connection; aws_event_stream_rpc_server_on_connection_shutdown_fn *on_connection_shutdown; aws_event_stream_rpc_server_on_listener_destroy_fn *on_destroy_callback; void *user_data; }; AWS_EXTERN_C_BEGIN /** * Creates a listener with a ref count of 1. You are responsible for calling * aws_event_stream_rpc_server_listener_release() when you're finished with the listener. Returns NULL if an error * occurs. */ AWS_EVENT_STREAM_API struct aws_event_stream_rpc_server_listener *aws_event_stream_rpc_server_new_listener( struct aws_allocator *allocator, struct aws_event_stream_rpc_server_listener_options *options); AWS_EVENT_STREAM_API void aws_event_stream_rpc_server_listener_acquire( struct aws_event_stream_rpc_server_listener *listener); AWS_EVENT_STREAM_API void aws_event_stream_rpc_server_listener_release( struct aws_event_stream_rpc_server_listener *listener); /** * Get the local port which the listener's socket is bound to. */ AWS_EVENT_STREAM_API uint32_t aws_event_stream_rpc_server_listener_get_bound_port( const struct aws_event_stream_rpc_server_listener *listener); /** * Bypasses server, and creates a connection on an already existing channel. No connection lifetime callbacks will be * invoked on the returned connection. Returns NULL if an error occurs. If and only if, you use this API, the returned * connection is already ref counted and you must call aws_event_stream_rpc_server_connection_release() even if you did * not explictly call aws_event_stream_rpc_server_connection_acquire() */ AWS_EVENT_STREAM_API struct aws_event_stream_rpc_server_connection * aws_event_stream_rpc_server_connection_from_existing_channel( struct aws_event_stream_rpc_server_listener *server, struct aws_channel *channel, const struct aws_event_stream_rpc_connection_options *connection_options); AWS_EVENT_STREAM_API void aws_event_stream_rpc_server_connection_acquire( struct aws_event_stream_rpc_server_connection *connection); AWS_EVENT_STREAM_API void aws_event_stream_rpc_server_connection_release( struct aws_event_stream_rpc_server_connection *connection); AWS_EVENT_STREAM_API void *aws_event_stream_rpc_server_connection_get_user_data( struct aws_event_stream_rpc_server_connection *connection); /** * returns true if the connection is open. False otherwise. */ AWS_EVENT_STREAM_API bool aws_event_stream_rpc_server_connection_is_open( struct aws_event_stream_rpc_server_connection *connection); /** * Closes the connection (including all continuations on the connection), and releases the connection ref count. * shutdown_error_code is the error code to use when shutting down the channel. Use AWS_ERROR_SUCCESS for non-error * cases. */ AWS_EVENT_STREAM_API void aws_event_stream_rpc_server_connection_close( struct aws_event_stream_rpc_server_connection *connection, int shutdown_error_code); /** * Sends a protocol message on the connection (not application data). If the message is valid and successfully written * to the channel, flush_fn will be invoked. */ AWS_EVENT_STREAM_API int aws_event_stream_rpc_server_connection_send_protocol_message( struct aws_event_stream_rpc_server_connection *connection, const struct aws_event_stream_rpc_message_args *message_args, aws_event_stream_rpc_server_message_flush_fn *flush_fn, void *user_data); AWS_EVENT_STREAM_API void aws_event_stream_rpc_server_continuation_acquire( struct aws_event_stream_rpc_server_continuation_token *continuation); AWS_EVENT_STREAM_API void aws_event_stream_rpc_server_continuation_release( struct aws_event_stream_rpc_server_continuation_token *continuation); /** * returns true if the continuation is still in an open state. */ AWS_EVENT_STREAM_API bool aws_event_stream_rpc_server_continuation_is_closed( struct aws_event_stream_rpc_server_continuation_token *continuation); /** * Sends an application message on the continuation. If the message is valid and successfully written * to the channel, flush_fn will be invoked. */ AWS_EVENT_STREAM_API int aws_event_stream_rpc_server_continuation_send_message( struct aws_event_stream_rpc_server_continuation_token *continuation, const struct aws_event_stream_rpc_message_args *message_args, aws_event_stream_rpc_server_message_flush_fn *flush_fn, void *user_data); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_EVENT_STREAM_RPC_SERVER_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/include/aws/event-stream/private/000077500000000000000000000000001456575232400301435ustar00rootroot00000000000000event_stream_rpc_priv.h000066400000000000000000000057561456575232400346520ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/include/aws/event-stream/private#ifndef AWS_EVENT_STREAM_RPC_PRIV_H #define AWS_EVENT_STREAM_RPC_PRIV_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include enum aws_event_stream_connection_handshake_state { CONNECTION_HANDSHAKE_STATE_INITIALIZED = 0u, CONNECTION_HANDSHAKE_STATE_CONNECT_PROCESSED = 1u, CONNECTION_HANDSHAKE_STATE_CONNECT_ACK_PROCESSED = 2u, }; int aws_event_stream_rpc_extract_message_metadata( const struct aws_array_list *message_headers, int32_t *stream_id, int32_t *message_type, int32_t *message_flags, struct aws_byte_buf *operation_name); uint64_t aws_event_stream_rpc_hash_streamid(const void *to_hash); bool aws_event_stream_rpc_streamid_eq(const void *a, const void *b); static const struct aws_byte_cursor s_json_content_type_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(":content-type"); static const struct aws_byte_cursor s_json_content_type_value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("application/json"); static const struct aws_byte_cursor s_invalid_stream_id_error = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("{ \"message\": \"non-zero stream-id field is only allowed for messages of " "type APPLICATION_MESSAGE. The stream id max value is INT32_MAX.\" }"); static const struct aws_byte_cursor s_invalid_client_stream_id_error = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("{ \"message\": \"stream-id values must be monotonically incrementing. A " "stream-id arrived that was lower than the last seen stream-id.\" }"); static const struct aws_byte_cursor s_invalid_new_client_stream_id_error = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("{ \"message\": \"stream-id values must be monotonically incrementing. A new " "stream-id arrived that was incremented by more than 1.\" }"); static const struct aws_byte_cursor s_invalid_message_type_error = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("{ \"message\": \"an invalid value for message-type field was received.\" }"); static const struct aws_byte_cursor s_invalid_message_error = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL( "{ \"message\": \"A message was received with missing required fields. Check that your client is sending at least, " ":message-type, :message-flags, and :stream-id\" }"); static const struct aws_byte_cursor s_internal_error = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL( "{ \"message\": \"An error occurred on the peer endpoint. This is not likely caused by your endpoint.\" }"); static const struct aws_byte_cursor s_connect_not_completed_error = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL( "{ \"message\": \"A CONNECT message must be received, and the CONNECT_ACK must be sent in response, before any " "other message-types can be sent on this connection. In addition, only one CONNECT message is allowed on a " "connection.\" }"); #endif /* #define AWS_EVENT_STREAM_RPC_PRIV_H */ event_stream_rpc_test_helper.h000066400000000000000000000013521456575232400361740ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/include/aws/event-stream/private#ifndef AWS_EVENT_STREAM_RPC_TEST_HELPER_H #define AWS_EVENT_STREAM_RPC_TEST_HELPER_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #ifndef AWS_UNSTABLE_TESTING_API # error The functions in this header file are for testing purposes only! #endif AWS_EXTERN_C_BEGIN /** This is for testing edge cases around stream id exhaustion. Don't ever include this file outside of a unit test. */ AWS_EVENT_STREAM_API void aws_event_stream_rpc_server_override_last_stream_id( struct aws_event_stream_rpc_server_connection *connection, int32_t value); AWS_EXTERN_C_END #endif /* AWS_EVENT_STREAM_RPC_TEST_HELPER_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/source/000077500000000000000000000000001456575232400231425ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/source/event_stream.c000066400000000000000000001761751456575232400260230ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #define LIB_NAME "libaws-c-event-stream" #ifdef _MSC_VER # pragma warning(push) # pragma warning(disable : 4221) /* aggregate initializer using local variable addresses */ # pragma warning(disable : 4204) /* non-constant aggregate initializer */ # pragma warning(disable : 4306) /* msft doesn't trust us to do pointer arithmetic. */ #endif static struct aws_error_info s_errors[] = { AWS_DEFINE_ERROR_INFO(AWS_ERROR_EVENT_STREAM_BUFFER_LENGTH_MISMATCH, "Buffer length mismatch", LIB_NAME), AWS_DEFINE_ERROR_INFO(AWS_ERROR_EVENT_STREAM_INSUFFICIENT_BUFFER_LEN, "insufficient buffer length", LIB_NAME), AWS_DEFINE_ERROR_INFO( AWS_ERROR_EVENT_STREAM_MESSAGE_FIELD_SIZE_EXCEEDED, "a field for the message was too large", LIB_NAME), AWS_DEFINE_ERROR_INFO(AWS_ERROR_EVENT_STREAM_PRELUDE_CHECKSUM_FAILURE, "prelude checksum was incorrect", LIB_NAME), AWS_DEFINE_ERROR_INFO(AWS_ERROR_EVENT_STREAM_MESSAGE_CHECKSUM_FAILURE, "message checksum was incorrect", LIB_NAME), AWS_DEFINE_ERROR_INFO( AWS_ERROR_EVENT_STREAM_MESSAGE_INVALID_HEADERS_LEN, "message headers length was incorrect", LIB_NAME), AWS_DEFINE_ERROR_INFO( AWS_ERROR_EVENT_STREAM_MESSAGE_UNKNOWN_HEADER_TYPE, "An unknown header type was encountered", LIB_NAME), AWS_DEFINE_ERROR_INFO( AWS_ERROR_EVENT_STREAM_MESSAGE_PARSER_ILLEGAL_STATE, "message parser encountered an illegal state", LIB_NAME), AWS_DEFINE_ERROR_INFO( AWS_ERROR_EVENT_STREAM_RPC_CONNECTION_CLOSED, "event stream rpc connection has been closed", LIB_NAME), AWS_DEFINE_ERROR_INFO( AWS_ERROR_EVENT_STREAM_RPC_PROTOCOL_ERROR, "event stream rpc connection has encountered a protocol error", LIB_NAME), AWS_DEFINE_ERROR_INFO( AWS_ERROR_EVENT_STREAM_RPC_STREAM_CLOSED, "event stream rpc connection stream is closed.", LIB_NAME), AWS_DEFINE_ERROR_INFO( AWS_ERROR_EVENT_STREAM_RPC_STREAM_NOT_ACTIVATED, "event stream rpc stream continuation was not successfully activated before use. Call " "aws_event_stream_rpc_client_continuation_activate()" " before using a stream continuation token.", LIB_NAME), }; static struct aws_error_info_list s_list = { .error_list = s_errors, .count = sizeof(s_errors) / sizeof(struct aws_error_info), }; static bool s_event_stream_library_initialized = false; static struct aws_log_subject_info s_event_stream_log_subject_infos[] = { DEFINE_LOG_SUBJECT_INFO( AWS_LS_EVENT_STREAM_GENERAL, "event-stream-general", "Subject for aws-c-event-stream logging that defies categorization."), DEFINE_LOG_SUBJECT_INFO( AWS_LS_EVENT_STREAM_CHANNEL_HANDLER, "event-stream-channel-handler", "Subject for event-stream channel handler related logging."), DEFINE_LOG_SUBJECT_INFO( AWS_LS_EVENT_STREAM_RPC_SERVER, "event-stream-rpc-server", "Subject for event-stream rpc server."), DEFINE_LOG_SUBJECT_INFO( AWS_LS_EVENT_STREAM_RPC_CLIENT, "event-stream-rpc-client", "Subject for event-stream rpc client."), }; static struct aws_log_subject_info_list s_event_stream_log_subject_list = { .subject_list = s_event_stream_log_subject_infos, .count = AWS_ARRAY_SIZE(s_event_stream_log_subject_infos), }; static const uint16_t UUID_LEN = 16U; void aws_event_stream_library_init(struct aws_allocator *allocator) { if (!s_event_stream_library_initialized) { s_event_stream_library_initialized = true; aws_io_library_init(allocator); aws_register_error_info(&s_list); aws_register_log_subject_info_list(&s_event_stream_log_subject_list); } } void aws_event_stream_library_clean_up(void) { if (s_event_stream_library_initialized) { s_event_stream_library_initialized = false; aws_unregister_error_info(&s_list); aws_io_library_clean_up(); } } #define TOTAL_LEN_OFFSET 0 #define PRELUDE_CRC_OFFSET (sizeof(uint32_t) + sizeof(uint32_t)) #define HEADER_LEN_OFFSET sizeof(uint32_t) /* Computes the byte length necessary to store the headers represented in the headers list. * returns that length. */ uint32_t aws_event_stream_compute_headers_required_buffer_len(const struct aws_array_list *headers) { if (!headers || !aws_array_list_length(headers)) { return 0; } size_t headers_count = aws_array_list_length(headers); size_t headers_len = 0; for (size_t i = 0; i < headers_count; ++i) { struct aws_event_stream_header_value_pair *header = NULL; aws_array_list_get_at_ptr(headers, (void **)&header, i); AWS_FATAL_ASSERT( !aws_add_size_checked(headers_len, sizeof(header->header_name_len), &headers_len) && "integer overflow occurred computing total headers length."); AWS_FATAL_ASSERT( !aws_add_size_checked(headers_len, header->header_name_len + 1, &headers_len) && "integer overflow occurred computing total headers length."); if (header->header_value_type == AWS_EVENT_STREAM_HEADER_STRING || header->header_value_type == AWS_EVENT_STREAM_HEADER_BYTE_BUF) { AWS_FATAL_ASSERT( !aws_add_size_checked(headers_len, sizeof(header->header_value_len), &headers_len) && "integer overflow occurred computing total headers length."); } if (header->header_value_type != AWS_EVENT_STREAM_HEADER_BOOL_FALSE && header->header_value_type != AWS_EVENT_STREAM_HEADER_BOOL_TRUE) { AWS_FATAL_ASSERT( !aws_add_size_checked(headers_len, header->header_value_len, &headers_len) && "integer overflow occurred computing total headers length."); } } return (uint32_t)headers_len; } int aws_event_stream_write_headers_to_buffer_safe(const struct aws_array_list *headers, struct aws_byte_buf *buf) { AWS_FATAL_PRECONDITION(buf); if (!headers || !aws_array_list_length(headers)) { return AWS_OP_SUCCESS; } size_t headers_count = aws_array_list_length(headers); for (size_t i = 0; i < headers_count; ++i) { struct aws_event_stream_header_value_pair *header = NULL; aws_array_list_get_at_ptr(headers, (void **)&header, i); AWS_RETURN_ERROR_IF( aws_byte_buf_write_u8(buf, header->header_name_len), AWS_ERROR_EVENT_STREAM_INSUFFICIENT_BUFFER_LEN); AWS_RETURN_ERROR_IF( aws_byte_buf_write(buf, (uint8_t *)header->header_name, (size_t)header->header_name_len), AWS_ERROR_EVENT_STREAM_INSUFFICIENT_BUFFER_LEN); AWS_RETURN_ERROR_IF( aws_byte_buf_write_u8(buf, (uint8_t)header->header_value_type), AWS_ERROR_EVENT_STREAM_INSUFFICIENT_BUFFER_LEN); switch (header->header_value_type) { case AWS_EVENT_STREAM_HEADER_BOOL_FALSE: case AWS_EVENT_STREAM_HEADER_BOOL_TRUE: break; /* additions of integers here assume the endianness conversion has already happened */ case AWS_EVENT_STREAM_HEADER_BYTE: case AWS_EVENT_STREAM_HEADER_INT16: case AWS_EVENT_STREAM_HEADER_INT32: case AWS_EVENT_STREAM_HEADER_INT64: case AWS_EVENT_STREAM_HEADER_TIMESTAMP: case AWS_EVENT_STREAM_HEADER_UUID: AWS_RETURN_ERROR_IF( aws_byte_buf_write(buf, header->header_value.static_val, header->header_value_len), AWS_ERROR_EVENT_STREAM_INSUFFICIENT_BUFFER_LEN); break; case AWS_EVENT_STREAM_HEADER_BYTE_BUF: case AWS_EVENT_STREAM_HEADER_STRING: AWS_RETURN_ERROR_IF( aws_byte_buf_write_be16(buf, header->header_value_len), AWS_ERROR_EVENT_STREAM_INSUFFICIENT_BUFFER_LEN); AWS_RETURN_ERROR_IF( aws_byte_buf_write(buf, header->header_value.variable_len_val, header->header_value_len), AWS_ERROR_EVENT_STREAM_INSUFFICIENT_BUFFER_LEN); break; default: AWS_FATAL_ASSERT(false && !"Unknown header type!"); break; } } return AWS_OP_SUCCESS; } /* adds the headers represented in the headers list to the buffer. returns the new buffer offset for use elsewhere. Assumes buffer length is at least the length of the return value from compute_headers_length() */ size_t aws_event_stream_write_headers_to_buffer(const struct aws_array_list *headers, uint8_t *buffer) { AWS_FATAL_PRECONDITION(buffer); uint32_t min_buffer_len_assumption = aws_event_stream_compute_headers_required_buffer_len(headers); struct aws_byte_buf safer_buf = aws_byte_buf_from_empty_array(buffer, min_buffer_len_assumption); if (aws_event_stream_write_headers_to_buffer_safe(headers, &safer_buf)) { return 0; } return safer_buf.len; } int aws_event_stream_read_headers_from_buffer( struct aws_array_list *headers, const uint8_t *buffer, size_t headers_len) { AWS_FATAL_PRECONDITION(headers); AWS_FATAL_PRECONDITION(buffer); if (AWS_UNLIKELY(headers_len > (size_t)AWS_EVENT_STREAM_MAX_HEADERS_SIZE)) { return aws_raise_error(AWS_ERROR_EVENT_STREAM_MESSAGE_FIELD_SIZE_EXCEEDED); } struct aws_byte_cursor buffer_cur = aws_byte_cursor_from_array(buffer, headers_len); /* iterate the buffer per header. */ while (buffer_cur.len) { struct aws_event_stream_header_value_pair header; AWS_ZERO_STRUCT(header); /* get the header info from the buffer, make sure to increment buffer offset. */ aws_byte_cursor_read_u8(&buffer_cur, &header.header_name_len); AWS_RETURN_ERROR_IF(header.header_name_len <= INT8_MAX, AWS_ERROR_EVENT_STREAM_MESSAGE_INVALID_HEADERS_LEN); AWS_RETURN_ERROR_IF( aws_byte_cursor_read(&buffer_cur, header.header_name, (size_t)header.header_name_len), AWS_ERROR_EVENT_STREAM_BUFFER_LENGTH_MISMATCH); AWS_RETURN_ERROR_IF( aws_byte_cursor_read_u8(&buffer_cur, (uint8_t *)&header.header_value_type), AWS_ERROR_EVENT_STREAM_BUFFER_LENGTH_MISMATCH); switch (header.header_value_type) { case AWS_EVENT_STREAM_HEADER_BOOL_FALSE: header.header_value_len = 0; header.header_value.static_val[0] = 0; break; case AWS_EVENT_STREAM_HEADER_BOOL_TRUE: header.header_value_len = 0; header.header_value.static_val[0] = 1; break; case AWS_EVENT_STREAM_HEADER_BYTE: header.header_value_len = sizeof(uint8_t); AWS_RETURN_ERROR_IF( aws_byte_cursor_read(&buffer_cur, header.header_value.static_val, header.header_value_len), AWS_ERROR_EVENT_STREAM_BUFFER_LENGTH_MISMATCH); break; case AWS_EVENT_STREAM_HEADER_INT16: header.header_value_len = sizeof(uint16_t); AWS_RETURN_ERROR_IF( aws_byte_cursor_read(&buffer_cur, header.header_value.static_val, header.header_value_len), AWS_ERROR_EVENT_STREAM_BUFFER_LENGTH_MISMATCH); break; case AWS_EVENT_STREAM_HEADER_INT32: header.header_value_len = sizeof(uint32_t); AWS_RETURN_ERROR_IF( aws_byte_cursor_read(&buffer_cur, header.header_value.static_val, header.header_value_len), AWS_ERROR_EVENT_STREAM_BUFFER_LENGTH_MISMATCH); break; case AWS_EVENT_STREAM_HEADER_INT64: case AWS_EVENT_STREAM_HEADER_TIMESTAMP: header.header_value_len = sizeof(uint64_t); AWS_RETURN_ERROR_IF( aws_byte_cursor_read(&buffer_cur, header.header_value.static_val, header.header_value_len), AWS_ERROR_EVENT_STREAM_BUFFER_LENGTH_MISMATCH); break; case AWS_EVENT_STREAM_HEADER_BYTE_BUF: case AWS_EVENT_STREAM_HEADER_STRING: AWS_RETURN_ERROR_IF( aws_byte_cursor_read_be16(&buffer_cur, &header.header_value_len), AWS_ERROR_EVENT_STREAM_BUFFER_LENGTH_MISMATCH); AWS_RETURN_ERROR_IF( header.header_value_len <= INT16_MAX, AWS_ERROR_EVENT_STREAM_MESSAGE_INVALID_HEADERS_LEN); AWS_RETURN_ERROR_IF( buffer_cur.len >= header.header_value_len, AWS_ERROR_EVENT_STREAM_BUFFER_LENGTH_MISMATCH); header.header_value.variable_len_val = (uint8_t *)buffer_cur.ptr; aws_byte_cursor_advance(&buffer_cur, header.header_value_len); break; case AWS_EVENT_STREAM_HEADER_UUID: header.header_value_len = UUID_LEN; AWS_RETURN_ERROR_IF( aws_byte_cursor_read(&buffer_cur, header.header_value.static_val, UUID_LEN), AWS_ERROR_EVENT_STREAM_BUFFER_LENGTH_MISMATCH); break; } if (aws_array_list_push_back(headers, (const void *)&header)) { return AWS_OP_ERR; } } return AWS_OP_SUCCESS; } /* initialize message with the arguments * the underlying buffer will be allocated and payload will be copied. * see specification, this code should simply add these fields according to that.*/ int aws_event_stream_message_init( struct aws_event_stream_message *message, struct aws_allocator *alloc, const struct aws_array_list *headers, const struct aws_byte_buf *payload) { AWS_FATAL_PRECONDITION(message); AWS_FATAL_PRECONDITION(alloc); size_t payload_len = payload ? payload->len : 0; uint32_t headers_length = aws_event_stream_compute_headers_required_buffer_len(headers); if (AWS_UNLIKELY(headers_length > AWS_EVENT_STREAM_MAX_HEADERS_SIZE)) { return aws_raise_error(AWS_ERROR_EVENT_STREAM_MESSAGE_FIELD_SIZE_EXCEEDED); } uint32_t total_length = (uint32_t)(AWS_EVENT_STREAM_PRELUDE_LENGTH + headers_length + payload_len + AWS_EVENT_STREAM_TRAILER_LENGTH); if (AWS_UNLIKELY(total_length < headers_length || total_length < payload_len)) { return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED); } if (AWS_UNLIKELY(total_length > AWS_EVENT_STREAM_MAX_MESSAGE_SIZE)) { return aws_raise_error(AWS_ERROR_EVENT_STREAM_MESSAGE_FIELD_SIZE_EXCEEDED); } message->alloc = alloc; aws_byte_buf_init(&message->message_buffer, message->alloc, total_length); aws_byte_buf_write_be32(&message->message_buffer, total_length); aws_byte_buf_write_be32(&message->message_buffer, headers_length); uint32_t running_crc = aws_checksums_crc32(message->message_buffer.buffer, (int)message->message_buffer.len, 0); const uint8_t *pre_prelude_marker = message->message_buffer.buffer + message->message_buffer.len; size_t pre_prelude_position_marker = message->message_buffer.len; aws_byte_buf_write_be32(&message->message_buffer, running_crc); if (headers_length) { if (aws_event_stream_write_headers_to_buffer_safe(headers, &message->message_buffer)) { aws_event_stream_message_clean_up(message); return AWS_OP_ERR; } } if (payload) { aws_byte_buf_write_from_whole_buffer(&message->message_buffer, *payload); } running_crc = aws_checksums_crc32( pre_prelude_marker, (int)(message->message_buffer.len - pre_prelude_position_marker), running_crc); aws_byte_buf_write_be32(&message->message_buffer, running_crc); return AWS_OP_SUCCESS; } /* add buffer to the message (non-owning). Verify buffer crcs and that length fields are reasonable. */ int aws_event_stream_message_from_buffer( struct aws_event_stream_message *message, struct aws_allocator *alloc, struct aws_byte_buf *buffer) { AWS_FATAL_PRECONDITION(message); AWS_FATAL_PRECONDITION(alloc); AWS_FATAL_PRECONDITION(buffer); message->alloc = alloc; if (AWS_UNLIKELY(buffer->len < AWS_EVENT_STREAM_PRELUDE_LENGTH + AWS_EVENT_STREAM_TRAILER_LENGTH)) { return aws_raise_error(AWS_ERROR_EVENT_STREAM_BUFFER_LENGTH_MISMATCH); } struct aws_byte_cursor parsing_cur = aws_byte_cursor_from_buf(buffer); uint32_t message_length = 0; aws_byte_cursor_read_be32(&parsing_cur, &message_length); if (AWS_UNLIKELY(message_length != buffer->len)) { return aws_raise_error(AWS_ERROR_EVENT_STREAM_BUFFER_LENGTH_MISMATCH); } if (AWS_UNLIKELY(message_length > AWS_EVENT_STREAM_MAX_MESSAGE_SIZE)) { return aws_raise_error(AWS_ERROR_EVENT_STREAM_MESSAGE_FIELD_SIZE_EXCEEDED); } /* skip the headers for the moment, we'll handle those later. */ aws_byte_cursor_advance(&parsing_cur, sizeof(uint32_t)); uint32_t running_crc = aws_checksums_crc32(buffer->buffer, (int)PRELUDE_CRC_OFFSET, 0); uint32_t prelude_crc = 0; const uint8_t *start_of_payload_checksum = parsing_cur.ptr; size_t start_of_payload_checksum_pos = PRELUDE_CRC_OFFSET; aws_byte_cursor_read_be32(&parsing_cur, &prelude_crc); if (running_crc != prelude_crc) { return aws_raise_error(AWS_ERROR_EVENT_STREAM_PRELUDE_CHECKSUM_FAILURE); } running_crc = aws_checksums_crc32( start_of_payload_checksum, (int)(message_length - start_of_payload_checksum_pos - AWS_EVENT_STREAM_TRAILER_LENGTH), running_crc); uint32_t message_crc = aws_read_u32(buffer->buffer + message_length - AWS_EVENT_STREAM_TRAILER_LENGTH); if (running_crc != message_crc) { return aws_raise_error(AWS_ERROR_EVENT_STREAM_MESSAGE_CHECKSUM_FAILURE); } message->message_buffer = *buffer; /* we don't own this buffer, this is a zero allocation/copy path. Setting allocator to null will prevent the * clean_up from attempting to free it */ message->message_buffer.allocator = NULL; if (aws_event_stream_message_headers_len(message) > message_length - AWS_EVENT_STREAM_PRELUDE_LENGTH - AWS_EVENT_STREAM_TRAILER_LENGTH) { AWS_ZERO_STRUCT(message->message_buffer); return aws_raise_error(AWS_ERROR_EVENT_STREAM_MESSAGE_INVALID_HEADERS_LEN); } return AWS_OP_SUCCESS; } /* Verify buffer crcs and that length fields are reasonable. Once that is done, the buffer is copied to the message. */ int aws_event_stream_message_from_buffer_copy( struct aws_event_stream_message *message, struct aws_allocator *alloc, const struct aws_byte_buf *buffer) { int parse_value = aws_event_stream_message_from_buffer(message, alloc, (struct aws_byte_buf *)buffer); if (!parse_value) { aws_byte_buf_init_copy(&message->message_buffer, alloc, buffer); message->alloc = alloc; return AWS_OP_SUCCESS; } return parse_value; } /* if buffer is owned, release the memory. */ void aws_event_stream_message_clean_up(struct aws_event_stream_message *message) { aws_byte_buf_clean_up(&message->message_buffer); } uint32_t aws_event_stream_message_total_length(const struct aws_event_stream_message *message) { struct aws_byte_cursor read_cur = aws_byte_cursor_from_buf(&message->message_buffer); aws_byte_cursor_advance(&read_cur, TOTAL_LEN_OFFSET); uint32_t total_len = 0; aws_byte_cursor_read_be32(&read_cur, &total_len); return total_len; } uint32_t aws_event_stream_message_headers_len(const struct aws_event_stream_message *message) { struct aws_byte_cursor read_cur = aws_byte_cursor_from_buf(&message->message_buffer); aws_byte_cursor_advance(&read_cur, HEADER_LEN_OFFSET); uint32_t headers_len = 0; aws_byte_cursor_read_be32(&read_cur, &headers_len); return headers_len; } uint32_t aws_event_stream_message_prelude_crc(const struct aws_event_stream_message *message) { struct aws_byte_cursor read_cur = aws_byte_cursor_from_buf(&message->message_buffer); aws_byte_cursor_advance(&read_cur, PRELUDE_CRC_OFFSET); uint32_t prelude_crc = 0; aws_byte_cursor_read_be32(&read_cur, &prelude_crc); return prelude_crc; } int aws_event_stream_message_headers(const struct aws_event_stream_message *message, struct aws_array_list *headers) { struct aws_byte_cursor read_cur = aws_byte_cursor_from_buf(&message->message_buffer); aws_byte_cursor_advance(&read_cur, AWS_EVENT_STREAM_PRELUDE_LENGTH); return aws_event_stream_read_headers_from_buffer( headers, read_cur.ptr, aws_event_stream_message_headers_len(message)); } const uint8_t *aws_event_stream_message_payload(const struct aws_event_stream_message *message) { AWS_FATAL_PRECONDITION(message); struct aws_byte_cursor read_cur = aws_byte_cursor_from_buf(&message->message_buffer); aws_byte_cursor_advance(&read_cur, AWS_EVENT_STREAM_PRELUDE_LENGTH + aws_event_stream_message_headers_len(message)); return read_cur.ptr; } uint32_t aws_event_stream_message_payload_len(const struct aws_event_stream_message *message) { AWS_FATAL_PRECONDITION(message); return aws_event_stream_message_total_length(message) - (AWS_EVENT_STREAM_PRELUDE_LENGTH + aws_event_stream_message_headers_len(message) + AWS_EVENT_STREAM_TRAILER_LENGTH); } uint32_t aws_event_stream_message_message_crc(const struct aws_event_stream_message *message) { AWS_FATAL_PRECONDITION(message); struct aws_byte_cursor read_cur = aws_byte_cursor_from_buf(&message->message_buffer); aws_byte_cursor_advance( &read_cur, aws_event_stream_message_total_length(message) - AWS_EVENT_STREAM_TRAILER_LENGTH); uint32_t message_crc = 0; aws_byte_cursor_read_be32(&read_cur, &message_crc); return message_crc; } const uint8_t *aws_event_stream_message_buffer(const struct aws_event_stream_message *message) { AWS_FATAL_PRECONDITION(message); return message->message_buffer.buffer; } #define DEBUG_STR_PRELUDE_TOTAL_LEN "\"total_length\": " #define DEBUG_STR_PRELUDE_HDRS_LEN "\"headers_length\": " #define DEBUG_STR_PRELUDE_CRC "\"prelude_crc\": " #define DEBUG_STR_MESSAGE_CRC "\"message_crc\": " #define DEBUG_STR_HEADER_NAME "\"name\": " #define DEBUG_STR_HEADER_VALUE "\"value\": " #define DEBUG_STR_HEADER_TYPE "\"type\": " int aws_event_stream_message_to_debug_str(FILE *fd, const struct aws_event_stream_message *message) { AWS_FATAL_PRECONDITION(fd); AWS_FATAL_PRECONDITION(message); struct aws_array_list headers; aws_event_stream_headers_list_init(&headers, message->alloc); aws_event_stream_message_headers(message, &headers); fprintf( fd, "{\n " DEBUG_STR_PRELUDE_TOTAL_LEN "%d,\n " DEBUG_STR_PRELUDE_HDRS_LEN "%d,\n " DEBUG_STR_PRELUDE_CRC "%d,\n", aws_event_stream_message_total_length(message), aws_event_stream_message_headers_len(message), aws_event_stream_message_prelude_crc(message)); int count = 0; uint16_t headers_count = (uint16_t)aws_array_list_length(&headers); fprintf(fd, " \"headers\": ["); for (uint16_t i = 0; i < headers_count; ++i) { struct aws_event_stream_header_value_pair *header = NULL; aws_array_list_get_at_ptr(&headers, (void **)&header, i); fprintf(fd, " {\n"); fprintf(fd, " " DEBUG_STR_HEADER_NAME "\""); fwrite(header->header_name, sizeof(char), (size_t)header->header_name_len, fd); fprintf(fd, "\",\n"); fprintf(fd, " " DEBUG_STR_HEADER_TYPE "%d,\n", header->header_value_type); if (header->header_value_type == AWS_EVENT_STREAM_HEADER_BOOL_FALSE) { fprintf(fd, " " DEBUG_STR_HEADER_VALUE "false\n"); } else if (header->header_value_type == AWS_EVENT_STREAM_HEADER_BOOL_TRUE) { fprintf(fd, " " DEBUG_STR_HEADER_VALUE "true\n"); } else if (header->header_value_type == AWS_EVENT_STREAM_HEADER_BYTE) { int8_t int_value = (int8_t)header->header_value.static_val[0]; fprintf(fd, " " DEBUG_STR_HEADER_VALUE "%d\n", (int)int_value); } else if (header->header_value_type == AWS_EVENT_STREAM_HEADER_INT16) { int16_t int_value = aws_read_u16(header->header_value.static_val); fprintf(fd, " " DEBUG_STR_HEADER_VALUE "%d\n", (int)int_value); } else if (header->header_value_type == AWS_EVENT_STREAM_HEADER_INT32) { int32_t int_value = (int32_t)aws_read_u32(header->header_value.static_val); fprintf(fd, " " DEBUG_STR_HEADER_VALUE "%d\n", (int)int_value); } else if ( header->header_value_type == AWS_EVENT_STREAM_HEADER_INT64 || header->header_value_type == AWS_EVENT_STREAM_HEADER_TIMESTAMP) { int64_t int_value = (int64_t)aws_read_u64(header->header_value.static_val); fprintf(fd, " " DEBUG_STR_HEADER_VALUE "%lld\n", (long long)int_value); } else { size_t buffer_len = 0; aws_base64_compute_encoded_len(header->header_value_len, &buffer_len); char *encoded_buffer = (char *)aws_mem_acquire(message->alloc, buffer_len); struct aws_byte_buf encode_output = aws_byte_buf_from_array((uint8_t *)encoded_buffer, buffer_len); if (header->header_value_type == AWS_EVENT_STREAM_HEADER_UUID) { struct aws_byte_cursor to_encode = aws_byte_cursor_from_array(header->header_value.static_val, header->header_value_len); aws_base64_encode(&to_encode, &encode_output); } else { struct aws_byte_cursor to_encode = aws_byte_cursor_from_array(header->header_value.variable_len_val, header->header_value_len); aws_base64_encode(&to_encode, &encode_output); } fprintf(fd, " " DEBUG_STR_HEADER_VALUE "\"%s\"\n", encoded_buffer); aws_mem_release(message->alloc, encoded_buffer); } fprintf(fd, " }"); if (count < headers_count - 1) { fprintf(fd, ","); } fprintf(fd, "\n"); count++; } aws_event_stream_headers_list_cleanup(&headers); fprintf(fd, " ],\n"); size_t payload_len = aws_event_stream_message_payload_len(message); const uint8_t *payload = aws_event_stream_message_payload(message); size_t encoded_len = 0; aws_base64_compute_encoded_len(payload_len, &encoded_len); char *encoded_payload = (char *)aws_mem_acquire(message->alloc, encoded_len); struct aws_byte_cursor payload_buffer = aws_byte_cursor_from_array(payload, payload_len); struct aws_byte_buf encoded_payload_buffer = aws_byte_buf_from_array((uint8_t *)encoded_payload, encoded_len); aws_base64_encode(&payload_buffer, &encoded_payload_buffer); fprintf(fd, " \"payload\": \"%s\",\n", encoded_payload); fprintf(fd, " " DEBUG_STR_MESSAGE_CRC "%d\n}\n", aws_event_stream_message_message_crc(message)); return AWS_OP_SUCCESS; } int aws_event_stream_headers_list_init(struct aws_array_list *headers, struct aws_allocator *allocator) { AWS_FATAL_PRECONDITION(headers); AWS_FATAL_PRECONDITION(allocator); return aws_array_list_init_dynamic(headers, allocator, 4, sizeof(struct aws_event_stream_header_value_pair)); } void aws_event_stream_headers_list_cleanup(struct aws_array_list *headers) { AWS_FATAL_PRECONDITION(headers); if (AWS_UNLIKELY(!headers || !aws_array_list_is_valid(headers))) { return; } for (size_t i = 0; i < aws_array_list_length(headers); ++i) { struct aws_event_stream_header_value_pair *header = NULL; aws_array_list_get_at_ptr(headers, (void **)&header, i); if (header->value_owned) { aws_mem_release(headers->alloc, (void *)header->header_value.variable_len_val); } } aws_array_list_clean_up(headers); } static int s_add_variable_len_header( struct aws_array_list *headers, struct aws_event_stream_header_value_pair *header, const char *name, uint8_t name_len, uint8_t *value, uint16_t value_len, int8_t copy) { memcpy((void *)header->header_name, (void *)name, (size_t)name_len); if (value_len != 0 && copy) { header->header_value.variable_len_val = aws_mem_acquire(headers->alloc, value_len); header->value_owned = 1; memcpy((void *)header->header_value.variable_len_val, (void *)value, value_len); } else { header->value_owned = 0; header->header_value.variable_len_val = value; } if (aws_array_list_push_back(headers, (void *)header)) { if (header->value_owned) { aws_mem_release(headers->alloc, (void *)header->header_value.variable_len_val); } return AWS_OP_ERR; } return AWS_OP_SUCCESS; } int aws_event_stream_add_string_header( struct aws_array_list *headers, const char *name, uint8_t name_len, const char *value, uint16_t value_len, int8_t copy) { AWS_FATAL_PRECONDITION(headers); AWS_RETURN_ERROR_IF( name_len <= AWS_EVENT_STREAM_HEADER_NAME_LEN_MAX, AWS_ERROR_EVENT_STREAM_MESSAGE_INVALID_HEADERS_LEN); AWS_RETURN_ERROR_IF(value_len <= INT16_MAX, AWS_ERROR_EVENT_STREAM_MESSAGE_INVALID_HEADERS_LEN); struct aws_event_stream_header_value_pair header = { .header_name_len = name_len, .header_value_len = value_len, .value_owned = copy, .header_value_type = AWS_EVENT_STREAM_HEADER_STRING, }; return s_add_variable_len_header(headers, &header, name, name_len, (uint8_t *)value, value_len, copy); } struct aws_event_stream_header_value_pair aws_event_stream_create_string_header( struct aws_byte_cursor name, struct aws_byte_cursor value) { AWS_FATAL_PRECONDITION(name.len <= AWS_EVENT_STREAM_HEADER_NAME_LEN_MAX); AWS_FATAL_PRECONDITION(value.len <= INT16_MAX); struct aws_event_stream_header_value_pair header = { .header_value_type = AWS_EVENT_STREAM_HEADER_STRING, .header_value.variable_len_val = value.ptr, .header_value_len = (uint16_t)value.len, .header_name_len = (uint8_t)name.len, .value_owned = 0, }; memcpy(header.header_name, name.ptr, name.len); return header; } struct aws_event_stream_header_value_pair aws_event_stream_create_int32_header( struct aws_byte_cursor name, int32_t value) { AWS_FATAL_PRECONDITION(name.len <= AWS_EVENT_STREAM_HEADER_NAME_LEN_MAX); struct aws_event_stream_header_value_pair header = { .header_value_type = AWS_EVENT_STREAM_HEADER_INT32, .header_value_len = (uint16_t)sizeof(int32_t), .header_name_len = (uint8_t)name.len, .value_owned = 0, }; memcpy(header.header_name, name.ptr, name.len); aws_write_u32((uint32_t)value, header.header_value.static_val); return header; } int aws_event_stream_add_bool_header(struct aws_array_list *headers, const char *name, uint8_t name_len, int8_t value) { struct aws_byte_cursor name_cursor = aws_byte_cursor_from_array(name, (size_t)name_len); return aws_event_stream_add_bool_header_by_cursor(headers, name_cursor, value != 0); } #define AWS_EVENT_STREAM_VALIDATE_HEADER_NAME_CURSOR(name_cursor) \ AWS_FATAL_PRECONDITION(name_cursor.len > 0); \ AWS_FATAL_PRECONDITION(name_cursor.ptr != NULL); \ AWS_RETURN_ERROR_IF( \ name_cursor.len <= AWS_EVENT_STREAM_HEADER_NAME_LEN_MAX, AWS_ERROR_EVENT_STREAM_MESSAGE_INVALID_HEADERS_LEN); int aws_event_stream_add_bool_header_by_cursor( struct aws_array_list *headers, struct aws_byte_cursor name, bool value) { AWS_FATAL_PRECONDITION(headers); AWS_EVENT_STREAM_VALIDATE_HEADER_NAME_CURSOR(name); struct aws_event_stream_header_value_pair header = { .header_name_len = (uint8_t)name.len, .header_value_len = 0, .value_owned = 0, .header_value_type = value ? AWS_EVENT_STREAM_HEADER_BOOL_TRUE : AWS_EVENT_STREAM_HEADER_BOOL_FALSE, }; memcpy((void *)header.header_name, (void *)name.ptr, (size_t)name.len); return aws_array_list_push_back(headers, (void *)&header); } int aws_event_stream_add_byte_header(struct aws_array_list *headers, const char *name, uint8_t name_len, int8_t value) { struct aws_byte_cursor name_cursor = aws_byte_cursor_from_array(name, (size_t)name_len); return aws_event_stream_add_byte_header_by_cursor(headers, name_cursor, value); } int aws_event_stream_add_byte_header_by_cursor( struct aws_array_list *headers, struct aws_byte_cursor name, int8_t value) { AWS_FATAL_PRECONDITION(headers); AWS_EVENT_STREAM_VALIDATE_HEADER_NAME_CURSOR(name); struct aws_event_stream_header_value_pair header = { .header_name_len = (uint8_t)name.len, .header_value_len = 1, .value_owned = 0, .header_value_type = AWS_EVENT_STREAM_HEADER_BYTE, }; header.header_value.static_val[0] = (uint8_t)value; memcpy((void *)header.header_name, (void *)name.ptr, (size_t)name.len); return aws_array_list_push_back(headers, (void *)&header); } int aws_event_stream_add_int16_header( struct aws_array_list *headers, const char *name, uint8_t name_len, int16_t value) { struct aws_byte_cursor name_cursor = aws_byte_cursor_from_array(name, (size_t)name_len); return aws_event_stream_add_int16_header_by_cursor(headers, name_cursor, value); } int aws_event_stream_add_int16_header_by_cursor( struct aws_array_list *headers, struct aws_byte_cursor name, int16_t value) { AWS_FATAL_PRECONDITION(headers); AWS_EVENT_STREAM_VALIDATE_HEADER_NAME_CURSOR(name); struct aws_event_stream_header_value_pair header = { .header_name_len = (uint8_t)name.len, .header_value_len = sizeof(value), .value_owned = 0, .header_value_type = AWS_EVENT_STREAM_HEADER_INT16, }; aws_write_u16((uint16_t)value, header.header_value.static_val); memcpy((void *)header.header_name, (void *)name.ptr, (size_t)name.len); return aws_array_list_push_back(headers, (void *)&header); } int aws_event_stream_add_int32_header( struct aws_array_list *headers, const char *name, uint8_t name_len, int32_t value) { struct aws_byte_cursor name_cursor = aws_byte_cursor_from_array(name, (size_t)name_len); return aws_event_stream_add_int32_header_by_cursor(headers, name_cursor, value); } int aws_event_stream_add_int32_header_by_cursor( struct aws_array_list *headers, struct aws_byte_cursor name, int32_t value) { AWS_FATAL_PRECONDITION(headers); AWS_EVENT_STREAM_VALIDATE_HEADER_NAME_CURSOR(name); struct aws_event_stream_header_value_pair header = { .header_name_len = (uint8_t)name.len, .header_value_len = sizeof(value), .value_owned = 0, .header_value_type = AWS_EVENT_STREAM_HEADER_INT32, }; aws_write_u32((uint32_t)value, header.header_value.static_val); memcpy((void *)header.header_name, (void *)name.ptr, (size_t)name.len); return aws_array_list_push_back(headers, (void *)&header); } int aws_event_stream_add_int64_header( struct aws_array_list *headers, const char *name, uint8_t name_len, int64_t value) { struct aws_byte_cursor name_cursor = aws_byte_cursor_from_array(name, (size_t)name_len); return aws_event_stream_add_int64_header_by_cursor(headers, name_cursor, value); } int aws_event_stream_add_int64_header_by_cursor( struct aws_array_list *headers, struct aws_byte_cursor name, int64_t value) { AWS_FATAL_PRECONDITION(headers); AWS_EVENT_STREAM_VALIDATE_HEADER_NAME_CURSOR(name); struct aws_event_stream_header_value_pair header = { .header_name_len = (uint8_t)name.len, .header_value_len = sizeof(value), .value_owned = 0, .header_value_type = AWS_EVENT_STREAM_HEADER_INT64, }; aws_write_u64((uint64_t)value, header.header_value.static_val); memcpy((void *)header.header_name, (void *)name.ptr, (size_t)name.len); return aws_array_list_push_back(headers, (void *)&header); } int aws_event_stream_add_string_header_by_cursor( struct aws_array_list *headers, struct aws_byte_cursor name, struct aws_byte_cursor value) { AWS_FATAL_PRECONDITION(headers); AWS_EVENT_STREAM_VALIDATE_HEADER_NAME_CURSOR(name); AWS_RETURN_ERROR_IF(value.len <= INT16_MAX, AWS_ERROR_EVENT_STREAM_MESSAGE_INVALID_HEADERS_LEN); struct aws_event_stream_header_value_pair header = { .header_name_len = (uint8_t)name.len, .header_value_len = (uint16_t)value.len, .value_owned = 1, .header_value_type = AWS_EVENT_STREAM_HEADER_STRING, }; return s_add_variable_len_header( headers, &header, (const char *)name.ptr, (uint8_t)name.len, value.ptr, (uint16_t)value.len, 1); } int aws_event_stream_add_byte_buf_header_by_cursor( struct aws_array_list *headers, struct aws_byte_cursor name, struct aws_byte_cursor value) { AWS_FATAL_PRECONDITION(headers); AWS_EVENT_STREAM_VALIDATE_HEADER_NAME_CURSOR(name); AWS_RETURN_ERROR_IF(value.len <= INT16_MAX, AWS_ERROR_EVENT_STREAM_MESSAGE_INVALID_HEADERS_LEN); struct aws_event_stream_header_value_pair header = { .header_name_len = (uint8_t)name.len, .header_value_len = (uint16_t)value.len, .value_owned = 1, .header_value_type = AWS_EVENT_STREAM_HEADER_BYTE_BUF, }; return s_add_variable_len_header( headers, &header, (const char *)name.ptr, (uint8_t)name.len, value.ptr, (uint16_t)value.len, 1); } int aws_event_stream_add_timestamp_header( struct aws_array_list *headers, const char *name, uint8_t name_len, int64_t value) { struct aws_byte_cursor name_cursor = aws_byte_cursor_from_array(name, (size_t)name_len); return aws_event_stream_add_timestamp_header_by_cursor(headers, name_cursor, value); } int aws_event_stream_add_timestamp_header_by_cursor( struct aws_array_list *headers, struct aws_byte_cursor name, int64_t value) { AWS_FATAL_PRECONDITION(headers); AWS_EVENT_STREAM_VALIDATE_HEADER_NAME_CURSOR(name); struct aws_event_stream_header_value_pair header = { .header_name_len = (uint8_t)name.len, .header_value_len = sizeof(value), .value_owned = 0, .header_value_type = AWS_EVENT_STREAM_HEADER_TIMESTAMP, }; aws_write_u64((uint64_t)value, header.header_value.static_val); memcpy((void *)header.header_name, (void *)name.ptr, (size_t)name.len); return aws_array_list_push_back(headers, (void *)&header); } int aws_event_stream_add_uuid_header( struct aws_array_list *headers, const char *name, uint8_t name_len, const uint8_t *value) { struct aws_byte_cursor name_cursor = aws_byte_cursor_from_array(name, (size_t)name_len); struct aws_byte_cursor value_cursor = aws_byte_cursor_from_array(value, (size_t)UUID_LEN); return aws_event_stream_add_uuid_header_by_cursor(headers, name_cursor, value_cursor); } int aws_event_stream_add_uuid_header_by_cursor( struct aws_array_list *headers, struct aws_byte_cursor name, struct aws_byte_cursor value) { AWS_FATAL_PRECONDITION(headers); AWS_EVENT_STREAM_VALIDATE_HEADER_NAME_CURSOR(name); AWS_RETURN_ERROR_IF(value.len == UUID_LEN, AWS_ERROR_EVENT_STREAM_MESSAGE_INVALID_HEADERS_LEN); struct aws_event_stream_header_value_pair header = { .header_name_len = (uint8_t)name.len, .header_value_len = UUID_LEN, .value_owned = 0, .header_value_type = AWS_EVENT_STREAM_HEADER_UUID, }; memcpy((void *)header.header_name, (void *)name.ptr, (size_t)name.len); memcpy((void *)header.header_value.static_val, value.ptr, UUID_LEN); return aws_array_list_push_back(headers, (void *)&header); } int aws_event_stream_add_bytebuf_header( struct aws_array_list *headers, const char *name, uint8_t name_len, uint8_t *value, uint16_t value_len, int8_t copy) { AWS_FATAL_PRECONDITION(headers); AWS_FATAL_PRECONDITION(name); AWS_RETURN_ERROR_IF( name_len <= AWS_EVENT_STREAM_HEADER_NAME_LEN_MAX, AWS_ERROR_EVENT_STREAM_MESSAGE_INVALID_HEADERS_LEN); AWS_RETURN_ERROR_IF(value_len <= INT16_MAX, AWS_ERROR_EVENT_STREAM_MESSAGE_INVALID_HEADERS_LEN); struct aws_event_stream_header_value_pair header = { .header_name_len = name_len, .header_value_len = value_len, .value_owned = copy, .header_value_type = AWS_EVENT_STREAM_HEADER_BYTE_BUF, }; return s_add_variable_len_header(headers, &header, name, name_len, value, value_len, copy); } int aws_event_stream_add_header( struct aws_array_list *headers, const struct aws_event_stream_header_value_pair *header) { AWS_FATAL_PRECONDITION(headers); AWS_FATAL_PRECONDITION(header); struct aws_event_stream_header_value_pair header_copy = *header; if (header->header_value_type == AWS_EVENT_STREAM_HEADER_STRING || header->header_value_type == AWS_EVENT_STREAM_HEADER_BYTE_BUF) { return s_add_variable_len_header( headers, &header_copy, header->header_name, header->header_name_len, header->header_value.variable_len_val, header->header_value_len, 1); /* Copy the header value */ } return aws_array_list_push_back(headers, (void *)&header_copy); } struct aws_byte_buf aws_event_stream_header_name(struct aws_event_stream_header_value_pair *header) { AWS_FATAL_PRECONDITION(header); return aws_byte_buf_from_array((uint8_t *)header->header_name, header->header_name_len); } int8_t aws_event_stream_header_value_as_byte(struct aws_event_stream_header_value_pair *header) { AWS_FATAL_PRECONDITION(header); return (int8_t)header->header_value.static_val[0]; } struct aws_byte_buf aws_event_stream_header_value_as_string(struct aws_event_stream_header_value_pair *header) { AWS_FATAL_PRECONDITION(header); return aws_event_stream_header_value_as_bytebuf(header); } int8_t aws_event_stream_header_value_as_bool(struct aws_event_stream_header_value_pair *header) { AWS_FATAL_PRECONDITION(header); return header->header_value_type == AWS_EVENT_STREAM_HEADER_BOOL_TRUE ? (int8_t)1 : (int8_t)0; } int16_t aws_event_stream_header_value_as_int16(struct aws_event_stream_header_value_pair *header) { AWS_FATAL_PRECONDITION(header); return (int16_t)aws_read_u16(header->header_value.static_val); } int32_t aws_event_stream_header_value_as_int32(struct aws_event_stream_header_value_pair *header) { AWS_FATAL_PRECONDITION(header); return (int32_t)aws_read_u32(header->header_value.static_val); } int64_t aws_event_stream_header_value_as_int64(struct aws_event_stream_header_value_pair *header) { AWS_FATAL_PRECONDITION(header); return (int64_t)aws_read_u64(header->header_value.static_val); } struct aws_byte_buf aws_event_stream_header_value_as_bytebuf(struct aws_event_stream_header_value_pair *header) { AWS_FATAL_PRECONDITION(header); return aws_byte_buf_from_array(header->header_value.variable_len_val, header->header_value_len); } int64_t aws_event_stream_header_value_as_timestamp(struct aws_event_stream_header_value_pair *header) { AWS_FATAL_PRECONDITION(header); return aws_event_stream_header_value_as_int64(header); } struct aws_byte_buf aws_event_stream_header_value_as_uuid(struct aws_event_stream_header_value_pair *header) { AWS_FATAL_PRECONDITION(header); return aws_byte_buf_from_array(header->header_value.static_val, UUID_LEN); } uint16_t aws_event_stream_header_value_length(struct aws_event_stream_header_value_pair *header) { AWS_FATAL_PRECONDITION(header); return header->header_value_len; } static struct aws_event_stream_message_prelude s_empty_prelude = {.total_len = 0, .headers_len = 0, .prelude_crc = 0}; static void s_reset_header_state(struct aws_event_stream_streaming_decoder *decoder, uint8_t free_header_data) { if (free_header_data && decoder->current_header.value_owned) { aws_mem_release(decoder->alloc, (void *)decoder->current_header.header_value.variable_len_val); } memset((void *)&decoder->current_header, 0, sizeof(struct aws_event_stream_header_value_pair)); } static void s_reset_state(struct aws_event_stream_streaming_decoder *decoder); static int s_headers_state( struct aws_event_stream_streaming_decoder *decoder, const uint8_t *data, size_t len, size_t *processed); static int s_read_header_value( struct aws_event_stream_streaming_decoder *decoder, const uint8_t *data, size_t len, size_t *processed) { size_t current_pos = decoder->message_pos; /* amount that we've already read */ size_t length_read = current_pos - decoder->current_header_value_offset; struct aws_event_stream_header_value_pair *current_header = &decoder->current_header; if (!length_read && (current_header->header_value_type == AWS_EVENT_STREAM_HEADER_BYTE_BUF || current_header->header_value_type == AWS_EVENT_STREAM_HEADER_STRING)) { /* save an allocation, this can only happen if the data we were handed is larger than the length of the header * value. we don't really need to handle offsets in this case. This expects the user is living by the contract * that they cannot act like they own this memory beyond the lifetime of their callback, and they should not * mutate it */ if (len >= current_header->header_value_len) { /* this part works regardless of type since the layout of the union will line up. */ current_header->header_value.variable_len_val = (uint8_t *)data; current_header->value_owned = 0; decoder->on_header(decoder, &decoder->prelude, &decoder->current_header, decoder->user_context); *processed += current_header->header_value_len; decoder->message_pos += current_header->header_value_len; decoder->running_crc = aws_checksums_crc32(data, (int)current_header->header_value_len, decoder->running_crc); s_reset_header_state(decoder, 1); decoder->state = s_headers_state; return AWS_OP_SUCCESS; } /* a possible optimization later would be to only allocate this once, and then keep reusing the same buffer. for * subsequent messages.*/ current_header->header_value.variable_len_val = aws_mem_acquire(decoder->alloc, decoder->current_header.header_value_len); current_header->value_owned = 1; } size_t max_read = len >= current_header->header_value_len - length_read ? current_header->header_value_len - length_read : len; const uint8_t *header_value_alias = current_header->header_value_type == AWS_EVENT_STREAM_HEADER_BYTE_BUF || current_header->header_value_type == AWS_EVENT_STREAM_HEADER_STRING ? current_header->header_value.variable_len_val : current_header->header_value.static_val; memcpy((void *)(header_value_alias + length_read), data, max_read); decoder->running_crc = aws_checksums_crc32(data, (int)max_read, decoder->running_crc); *processed += max_read; decoder->message_pos += max_read; length_read += max_read; if (length_read == current_header->header_value_len) { decoder->on_header(decoder, &decoder->prelude, current_header, decoder->user_context); s_reset_header_state(decoder, 1); decoder->state = s_headers_state; } return AWS_OP_SUCCESS; } static int s_read_header_value_len( struct aws_event_stream_streaming_decoder *decoder, const uint8_t *data, size_t len, size_t *processed) { size_t current_pos = decoder->message_pos; size_t length_portion_read = current_pos - decoder->current_header_value_offset; if (length_portion_read < sizeof(uint16_t)) { size_t max_to_read = len > sizeof(uint16_t) - length_portion_read ? sizeof(uint16_t) - length_portion_read : len; memcpy(decoder->working_buffer + length_portion_read, data, max_to_read); decoder->running_crc = aws_checksums_crc32(data, (int)max_to_read, decoder->running_crc); *processed += max_to_read; decoder->message_pos += max_to_read; length_portion_read = decoder->message_pos - decoder->current_header_value_offset; } if (length_portion_read == sizeof(uint16_t)) { decoder->current_header.header_value_len = aws_read_u16(decoder->working_buffer); decoder->current_header_value_offset = decoder->message_pos; decoder->state = s_read_header_value; } return AWS_OP_SUCCESS; } static int s_read_header_type( struct aws_event_stream_streaming_decoder *decoder, const uint8_t *data, size_t len, size_t *processed) { (void)len; uint8_t type = *data; decoder->running_crc = aws_checksums_crc32(data, 1, decoder->running_crc); *processed += 1; decoder->message_pos++; decoder->current_header_value_offset++; struct aws_event_stream_header_value_pair *current_header = &decoder->current_header; current_header->header_value_type = (enum aws_event_stream_header_value_type)type; switch (type) { case AWS_EVENT_STREAM_HEADER_STRING: case AWS_EVENT_STREAM_HEADER_BYTE_BUF: decoder->state = s_read_header_value_len; break; case AWS_EVENT_STREAM_HEADER_BOOL_FALSE: current_header->header_value_len = 0; current_header->header_value.static_val[0] = 0; decoder->on_header(decoder, &decoder->prelude, current_header, decoder->user_context); s_reset_header_state(decoder, 1); decoder->state = s_headers_state; break; case AWS_EVENT_STREAM_HEADER_BOOL_TRUE: current_header->header_value_len = 0; current_header->header_value.static_val[0] = 1; decoder->on_header(decoder, &decoder->prelude, current_header, decoder->user_context); s_reset_header_state(decoder, 1); decoder->state = s_headers_state; break; case AWS_EVENT_STREAM_HEADER_BYTE: current_header->header_value_len = 1; decoder->state = s_read_header_value; break; case AWS_EVENT_STREAM_HEADER_INT16: current_header->header_value_len = sizeof(uint16_t); decoder->state = s_read_header_value; break; case AWS_EVENT_STREAM_HEADER_INT32: current_header->header_value_len = sizeof(uint32_t); decoder->state = s_read_header_value; break; case AWS_EVENT_STREAM_HEADER_INT64: case AWS_EVENT_STREAM_HEADER_TIMESTAMP: current_header->header_value_len = sizeof(uint64_t); decoder->state = s_read_header_value; break; case AWS_EVENT_STREAM_HEADER_UUID: current_header->header_value_len = 16; decoder->state = s_read_header_value; break; default: return aws_raise_error(AWS_ERROR_EVENT_STREAM_MESSAGE_UNKNOWN_HEADER_TYPE); } return AWS_OP_SUCCESS; } static int s_read_header_name( struct aws_event_stream_streaming_decoder *decoder, const uint8_t *data, size_t len, size_t *processed) { size_t current_pos = decoder->message_pos; size_t length_read = current_pos - decoder->current_header_name_offset; size_t max_read = len >= decoder->current_header.header_name_len - length_read ? decoder->current_header.header_name_len - length_read : len; memcpy((void *)(decoder->current_header.header_name + length_read), data, max_read); decoder->running_crc = aws_checksums_crc32(data, (int)max_read, decoder->running_crc); *processed += max_read; decoder->message_pos += max_read; length_read += max_read; if (length_read == decoder->current_header.header_name_len) { decoder->state = s_read_header_type; decoder->current_header_value_offset = decoder->message_pos; } return AWS_OP_SUCCESS; } static int s_read_header_name_len( struct aws_event_stream_streaming_decoder *decoder, const uint8_t *data, size_t len, size_t *processed) { (void)len; decoder->current_header.header_name_len = *data; decoder->message_pos++; decoder->current_header_name_offset++; *processed += 1; decoder->state = s_read_header_name; decoder->running_crc = aws_checksums_crc32(data, 1, decoder->running_crc); return AWS_OP_SUCCESS; } static int s_start_header( struct aws_event_stream_streaming_decoder *decoder, const uint8_t *data, size_t len, size_t *processed) /* NOLINT */ { (void)data; (void)len; (void)processed; decoder->state = s_read_header_name_len; decoder->current_header_name_offset = decoder->message_pos; return AWS_OP_SUCCESS; } static int s_payload_state( struct aws_event_stream_streaming_decoder *decoder, const uint8_t *data, size_t len, size_t *processed); /*Handles the initial state for header parsing. will oscillate between multiple other states as well. after all headers have been handled, payload will be set as the next state. */ static int s_headers_state( struct aws_event_stream_streaming_decoder *decoder, const uint8_t *data, size_t len, size_t *processed) /* NOLINT */ { (void)data; (void)len; (void)processed; size_t current_pos = decoder->message_pos; size_t headers_boundary = decoder->prelude.headers_len + AWS_EVENT_STREAM_PRELUDE_LENGTH; if (current_pos < headers_boundary) { decoder->state = s_start_header; return AWS_OP_SUCCESS; } if (current_pos == headers_boundary) { decoder->state = s_payload_state; return AWS_OP_SUCCESS; } return aws_raise_error(AWS_ERROR_EVENT_STREAM_MESSAGE_PARSER_ILLEGAL_STATE); } /* handles reading the trailer. Once it has been read, it will be compared to the running checksum. If successful, * the state will be reset. */ static int s_read_trailer_state( struct aws_event_stream_streaming_decoder *decoder, const uint8_t *data, size_t len, size_t *processed) { size_t remaining_amount = decoder->prelude.total_len - decoder->message_pos; size_t segment_length = len > remaining_amount ? remaining_amount : len; size_t offset = sizeof(uint32_t) - remaining_amount; memcpy(decoder->working_buffer + offset, data, segment_length); decoder->message_pos += segment_length; *processed += segment_length; if (decoder->message_pos == decoder->prelude.total_len) { uint32_t message_crc = aws_read_u32(decoder->working_buffer); if (message_crc == decoder->running_crc) { if (decoder->on_complete) { decoder->on_complete(decoder, message_crc, decoder->user_context); } s_reset_state(decoder); } else { char error_message[70]; snprintf( error_message, sizeof(error_message), "CRC Mismatch. message_crc was 0x08%" PRIX32 ", but computed 0x08%" PRIX32, message_crc, decoder->running_crc); aws_raise_error(AWS_ERROR_EVENT_STREAM_MESSAGE_CHECKSUM_FAILURE); decoder->on_error( decoder, &decoder->prelude, AWS_ERROR_EVENT_STREAM_MESSAGE_CHECKSUM_FAILURE, error_message, decoder->user_context); return AWS_OP_ERR; } } return AWS_OP_SUCCESS; } /* handles the reading of the payload up to the final checksum. Sets read_trailer_state as the new state once * the payload has been processed. */ static int s_payload_state( struct aws_event_stream_streaming_decoder *decoder, const uint8_t *data, size_t len, size_t *processed) { if (decoder->message_pos < decoder->prelude.total_len - AWS_EVENT_STREAM_TRAILER_LENGTH) { size_t remaining_amount = decoder->prelude.total_len - decoder->message_pos - AWS_EVENT_STREAM_TRAILER_LENGTH; size_t segment_length = len > remaining_amount ? remaining_amount : len; int8_t final_segment = (segment_length + decoder->message_pos) == (decoder->prelude.total_len - AWS_EVENT_STREAM_TRAILER_LENGTH); struct aws_byte_buf payload_buf = aws_byte_buf_from_array(data, segment_length); decoder->on_payload(decoder, &payload_buf, final_segment, decoder->user_context); decoder->message_pos += segment_length; decoder->running_crc = aws_checksums_crc32(data, (int)segment_length, decoder->running_crc); *processed += segment_length; } if (decoder->message_pos == decoder->prelude.total_len - AWS_EVENT_STREAM_TRAILER_LENGTH) { decoder->state = s_read_trailer_state; } return AWS_OP_SUCCESS; } /* Parses the payload and verifies checksums. Sets the next state if successful. */ static int s_verify_prelude_state( struct aws_event_stream_streaming_decoder *decoder, const uint8_t *data, size_t len, size_t *processed) /* NOLINT */ { (void)data; (void)len; (void)processed; decoder->prelude.headers_len = aws_read_u32(decoder->working_buffer + HEADER_LEN_OFFSET); decoder->prelude.prelude_crc = aws_read_u32(decoder->working_buffer + PRELUDE_CRC_OFFSET); decoder->prelude.total_len = aws_read_u32(decoder->working_buffer + TOTAL_LEN_OFFSET); decoder->running_crc = aws_checksums_crc32(decoder->working_buffer, PRELUDE_CRC_OFFSET, 0); if (AWS_LIKELY(decoder->running_crc == decoder->prelude.prelude_crc)) { if (AWS_UNLIKELY( decoder->prelude.headers_len > AWS_EVENT_STREAM_MAX_HEADERS_SIZE || decoder->prelude.total_len > AWS_EVENT_STREAM_MAX_MESSAGE_SIZE)) { aws_raise_error(AWS_ERROR_EVENT_STREAM_MESSAGE_FIELD_SIZE_EXCEEDED); char error_message[] = "Maximum message field size exceeded"; decoder->on_error( decoder, &decoder->prelude, AWS_ERROR_EVENT_STREAM_MESSAGE_FIELD_SIZE_EXCEEDED, error_message, decoder->user_context); return AWS_OP_ERR; } /* Should only call on_prelude() after passing crc check and limitation check, otherwise call on_prelude() with * incorrect prelude is error prune. */ decoder->on_prelude(decoder, &decoder->prelude, decoder->user_context); decoder->running_crc = aws_checksums_crc32( decoder->working_buffer + PRELUDE_CRC_OFFSET, (int)sizeof(decoder->prelude.prelude_crc), decoder->running_crc); memset(decoder->working_buffer, 0, sizeof(decoder->working_buffer)); decoder->state = decoder->prelude.headers_len > 0 ? s_headers_state : s_payload_state; } else { char error_message[70]; snprintf( error_message, sizeof(error_message), "CRC Mismatch. prelude_crc was 0x08%" PRIX32 ", but computed 0x08%" PRIX32, decoder->prelude.prelude_crc, decoder->running_crc); aws_raise_error(AWS_ERROR_EVENT_STREAM_PRELUDE_CHECKSUM_FAILURE); decoder->on_error( decoder, &decoder->prelude, AWS_ERROR_EVENT_STREAM_PRELUDE_CHECKSUM_FAILURE, error_message, decoder->user_context); return AWS_OP_ERR; } return AWS_OP_SUCCESS; } /* initial state handles up to the reading of the prelude */ static int s_start_state( struct aws_event_stream_streaming_decoder *decoder, const uint8_t *data, size_t len, size_t *processed) { size_t previous_position = decoder->message_pos; if (decoder->message_pos < AWS_EVENT_STREAM_PRELUDE_LENGTH) { if (len >= AWS_EVENT_STREAM_PRELUDE_LENGTH - decoder->message_pos) { memcpy( decoder->working_buffer + decoder->message_pos, data, AWS_EVENT_STREAM_PRELUDE_LENGTH - decoder->message_pos); decoder->message_pos += AWS_EVENT_STREAM_PRELUDE_LENGTH - decoder->message_pos; } else { memcpy(decoder->working_buffer + decoder->message_pos, data, len); decoder->message_pos += len; } *processed += decoder->message_pos - previous_position; } if (decoder->message_pos == AWS_EVENT_STREAM_PRELUDE_LENGTH) { decoder->state = s_verify_prelude_state; } return AWS_OP_SUCCESS; } static void s_reset_state(struct aws_event_stream_streaming_decoder *decoder) { memset(decoder->working_buffer, 0, sizeof(decoder->working_buffer)); decoder->message_pos = 0; decoder->running_crc = 0; decoder->current_header_name_offset = 0; decoder->current_header_value_offset = 0; AWS_ZERO_STRUCT(decoder->current_header); decoder->prelude = s_empty_prelude; decoder->state = s_start_state; } void aws_event_stream_streaming_decoder_init_from_options( struct aws_event_stream_streaming_decoder *decoder, struct aws_allocator *allocator, const struct aws_event_stream_streaming_decoder_options *options) { AWS_ASSERT(decoder); AWS_ASSERT(allocator); AWS_ASSERT(options); AWS_ASSERT(options->on_error); AWS_ASSERT(options->on_header); AWS_ASSERT(options->on_payload_segment); AWS_ASSERT(options->on_prelude); AWS_ASSERT(options->on_prelude); s_reset_state(decoder); decoder->alloc = allocator; decoder->on_error = options->on_error; decoder->on_header = options->on_header; decoder->on_payload = options->on_payload_segment; decoder->on_prelude = options->on_prelude; decoder->on_complete = options->on_complete; decoder->user_context = options->user_data; } void aws_event_stream_streaming_decoder_init( struct aws_event_stream_streaming_decoder *decoder, struct aws_allocator *alloc, aws_event_stream_process_on_payload_segment_fn *on_payload_segment, aws_event_stream_prelude_received_fn *on_prelude, aws_event_stream_header_received_fn *on_header, aws_event_stream_on_error_fn *on_error, void *user_data) { struct aws_event_stream_streaming_decoder_options decoder_options = { .on_payload_segment = on_payload_segment, .on_prelude = on_prelude, .on_header = on_header, .on_error = on_error, .user_data = user_data, }; aws_event_stream_streaming_decoder_init_from_options(decoder, alloc, &decoder_options); } void aws_event_stream_streaming_decoder_clean_up(struct aws_event_stream_streaming_decoder *decoder) { s_reset_state(decoder); decoder->on_error = 0; decoder->on_header = 0; decoder->on_payload = 0; decoder->on_prelude = 0; decoder->user_context = 0; decoder->on_complete = 0; } /* Simply sends the data to the state machine until all has been processed or an error is returned. */ int aws_event_stream_streaming_decoder_pump( struct aws_event_stream_streaming_decoder *decoder, const struct aws_byte_buf *data) { size_t processed = 0; int err_val = 0; while (!err_val && data->buffer && data->len && processed < data->len) { err_val = decoder->state(decoder, data->buffer + processed, data->len - processed, &processed); } return err_val; } #ifdef _MSC_VER # pragma warning(pop) #endif aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/source/event_stream_channel_handler.c000066400000000000000000000553531456575232400312020ustar00rootroot00000000000000/* * Copyright 2010-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include #include #include #include #include static const size_t s_default_payload_size = 1024; /* an event stream message has overhead of * [msg len (uint32_t)] * [headers len (uint32_t)] * [prelude crc (uint32_t)] * ... headers and payload .... * [message crc (uint32_t)] */ static const size_t s_message_overhead_size = AWS_EVENT_STREAM_PRELUDE_LENGTH + AWS_EVENT_STREAM_TRAILER_LENGTH; struct aws_event_stream_channel_handler { struct aws_channel_handler handler; struct aws_byte_buf message_buf; uint32_t running_crc; uint32_t current_message_len; aws_event_stream_channel_handler_on_message_received_fn *on_message_received; void *user_data; size_t initial_window_size; bool manual_window_management; }; static int s_process_read_message( struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_io_message *message) { AWS_LOGF_TRACE( AWS_LS_EVENT_STREAM_CHANNEL_HANDLER, "id=%p: received message of size %zu", (void *)handler, message->message_data.len); struct aws_event_stream_channel_handler *event_stream_handler = handler->impl; struct aws_byte_cursor message_cursor = aws_byte_cursor_from_buf(&message->message_data); int error_code = AWS_ERROR_SUCCESS; while (message_cursor.len) { AWS_LOGF_TRACE( AWS_LS_EVENT_STREAM_CHANNEL_HANDLER, "id=%p: processing chunk of size %zu", (void *)handler, message_cursor.len); /* first read only the prelude so we can do checks before reading the entire buffer. */ if (event_stream_handler->message_buf.len < AWS_EVENT_STREAM_PRELUDE_LENGTH) { size_t remaining_prelude = AWS_EVENT_STREAM_PRELUDE_LENGTH - event_stream_handler->message_buf.len; size_t to_copy = aws_min_size(message_cursor.len, remaining_prelude); AWS_LOGF_TRACE( AWS_LS_EVENT_STREAM_CHANNEL_HANDLER, "id=%p: processing prelude, %zu bytes of an expected 12.", (void *)handler, to_copy); if (!aws_byte_buf_write(&event_stream_handler->message_buf, message_cursor.ptr, to_copy)) { error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_CHANNEL_HANDLER, "id=%p: writing to prelude buffer failed with error %s", (void *)handler, aws_error_debug_str(error_code)); goto finished; } aws_byte_cursor_advance(&message_cursor, to_copy); } /* we need to get the prelude so we can get the message length to know how much to read and also * to check the prelude CRC to protect against bit-flips causing us to read to much memory */ if (event_stream_handler->message_buf.len == AWS_EVENT_STREAM_PRELUDE_LENGTH) { AWS_LOGF_TRACE(AWS_LS_EVENT_STREAM_CHANNEL_HANDLER, "id=%p: processing prelude buffer", (void *)handler); struct aws_byte_cursor prelude_cursor = aws_byte_cursor_from_buf(&event_stream_handler->message_buf); event_stream_handler->running_crc = aws_checksums_crc32(prelude_cursor.ptr, sizeof(uint32_t) + sizeof(uint32_t), 0); AWS_LOGF_DEBUG( AWS_LS_EVENT_STREAM_CHANNEL_HANDLER, "id=%p: calculated prelude CRC of %" PRIu32, (void *)handler, event_stream_handler->running_crc); aws_byte_cursor_read_be32(&prelude_cursor, &event_stream_handler->current_message_len); AWS_LOGF_DEBUG( AWS_LS_EVENT_STREAM_CHANNEL_HANDLER, "id=%p: read total message length of %" PRIu32, (void *)handler, event_stream_handler->current_message_len); if (event_stream_handler->current_message_len > AWS_EVENT_STREAM_MAX_MESSAGE_SIZE) { AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_CHANNEL_HANDLER, "id=%p: message length of %" PRIu32 " exceeds the max size of %zu", (void *)handler, event_stream_handler->current_message_len, (size_t)AWS_EVENT_STREAM_MAX_MESSAGE_SIZE); aws_raise_error(AWS_ERROR_EVENT_STREAM_MESSAGE_FIELD_SIZE_EXCEEDED); error_code = aws_last_error(); goto finished; } /* advance past the headers field since we don't really care about it at this point */ aws_byte_cursor_advance(&prelude_cursor, sizeof(uint32_t)); uint32_t prelude_crc = 0; aws_byte_cursor_read_be32(&prelude_cursor, &prelude_crc); AWS_LOGF_DEBUG( AWS_LS_EVENT_STREAM_CHANNEL_HANDLER, "id=%p: read prelude CRC of %" PRIu32, (void *)handler, prelude_crc); /* make sure the checksum matches before processing any further */ if (event_stream_handler->running_crc != prelude_crc) { AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_CHANNEL_HANDLER, "id=%p: prelude CRC mismatch. calculated %" PRIu32 " but the crc for the message was %" PRIu32, (void *)handler, event_stream_handler->running_crc, prelude_crc); aws_raise_error(AWS_ERROR_EVENT_STREAM_PRELUDE_CHECKSUM_FAILURE); error_code = aws_last_error(); goto finished; } } /* read whatever is remaining from the message */ if (event_stream_handler->message_buf.len < event_stream_handler->current_message_len) { AWS_LOGF_TRACE( AWS_LS_EVENT_STREAM_CHANNEL_HANDLER, "id=%p: processing remaining message buffer", (void *)handler); size_t remaining = event_stream_handler->current_message_len - event_stream_handler->message_buf.len; size_t to_copy = aws_min_size(message_cursor.len, remaining); AWS_LOGF_TRACE( AWS_LS_EVENT_STREAM_CHANNEL_HANDLER, "id=%p: of the remaining %zu, processing %zu from the " "current message.", (void *)handler, remaining, to_copy); struct aws_byte_cursor to_append = aws_byte_cursor_advance(&message_cursor, to_copy); if (aws_byte_buf_append_dynamic(&event_stream_handler->message_buf, &to_append)) { error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_CHANNEL_HANDLER, "id=%p: Appending to the message buffer failed with error %s.", (void *)handler, aws_error_debug_str(error_code)); goto finished; } } /* If we read the entire message, parse it and give it back to the subscriber. Keep in mind, once we're to this * point the aws_event_stream API handles the rest of the message parsing and validation. */ if (event_stream_handler->message_buf.len == event_stream_handler->current_message_len) { AWS_LOGF_TRACE( AWS_LS_EVENT_STREAM_CHANNEL_HANDLER, "id=%p: An entire message has been read. Parsing the message now.", (void *)handler); struct aws_event_stream_message received_message; AWS_ZERO_STRUCT(received_message); if (aws_event_stream_message_from_buffer( &received_message, event_stream_handler->handler.alloc, &event_stream_handler->message_buf)) { error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_CHANNEL_HANDLER, "id=%p: Parsing the message failed with error %s.", (void *)handler, aws_error_debug_str(error_code)); goto finished; } size_t message_size = event_stream_handler->message_buf.len; AWS_LOGF_TRACE( AWS_LS_EVENT_STREAM_CHANNEL_HANDLER, "id=%p: Invoking on_message_received callback.", (void *)handler); event_stream_handler->on_message_received( &received_message, AWS_ERROR_SUCCESS, event_stream_handler->user_data); aws_event_stream_message_clean_up(&received_message); event_stream_handler->current_message_len = 0; event_stream_handler->running_crc = 0; aws_byte_buf_reset(&event_stream_handler->message_buf, true); if (!event_stream_handler->manual_window_management) { aws_channel_slot_increment_read_window(slot, message_size); } } } finished: if (error_code) { event_stream_handler->on_message_received(NULL, error_code, event_stream_handler->user_data); aws_channel_shutdown(slot->channel, error_code); } aws_mem_release(message->allocator, message); return AWS_OP_SUCCESS; } struct message_write_data { struct aws_allocator *allocator; struct aws_channel_task task; struct aws_event_stream_channel_handler *handler; struct aws_event_stream_message *message; aws_event_stream_channel_handler_on_message_written_fn *on_message_written; void *user_data; }; static void s_on_message_write_completed_fn( struct aws_channel *channel, struct aws_io_message *message, int err_code, void *user_data) { (void)channel; (void)message; struct message_write_data *message_data = user_data; AWS_LOGF_TRACE( AWS_LS_EVENT_STREAM_CHANNEL_HANDLER, "channel=%p: Message write completed. Invoking " "on_message_written callback.", (void *)channel); message_data->on_message_written(message_data->message, err_code, message_data->user_data); aws_mem_release(message_data->allocator, message_data); } static void s_write_handler_message(struct aws_channel_task *task, void *arg, enum aws_task_status status) { (void)task; struct message_write_data *message_data = arg; AWS_LOGF_TRACE(AWS_LS_EVENT_STREAM_CHANNEL_HANDLER, "static: Write message task invoked."); if (status == AWS_TASK_STATUS_RUN_READY) { struct aws_event_stream_message *message = message_data->message; struct aws_event_stream_channel_handler *handler = message_data->handler; struct aws_byte_cursor message_cur = aws_byte_cursor_from_array( aws_event_stream_message_buffer(message), aws_event_stream_message_total_length(message)); while (message_cur.len) { AWS_LOGF_TRACE( AWS_LS_EVENT_STREAM_CHANNEL_HANDLER, "id=%p: writing message chunk of size %zu.", (void *)&handler->handler, message_cur.len); /* io messages from the pool are allowed to be smaller than the requested size. */ struct aws_io_message *io_message = aws_channel_acquire_message_from_pool( handler->handler.slot->channel, AWS_IO_MESSAGE_APPLICATION_DATA, message_cur.len); if (!io_message) { int error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_CHANNEL_HANDLER, "id=%p: Error occurred while acquiring io message %s.", (void *)&handler->handler, aws_error_debug_str(error_code)); message_data->on_message_written(message, error_code, message_data->user_data); aws_mem_release(message_data->allocator, message_data); aws_channel_shutdown(handler->handler.slot->channel, error_code); break; } aws_byte_buf_write_to_capacity(&io_message->message_data, &message_cur); /* if that was the end of the buffer we want to write, attach the completion callback to that io message */ if (message_cur.len == 0) { AWS_LOGF_TRACE( AWS_LS_EVENT_STREAM_CHANNEL_HANDLER, "id=%p: Message completely written to all io buffers.", (void *)&handler->handler); io_message->on_completion = s_on_message_write_completed_fn; io_message->user_data = message_data; } /* note if this fails the io message will not be queued and as a result will not have it's completion * callback invoked. */ if (aws_channel_slot_send_message(handler->handler.slot, io_message, AWS_CHANNEL_DIR_WRITE)) { aws_mem_release(io_message->allocator, io_message); int error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_CHANNEL_HANDLER, "id=%p: Error occurred while sending message to channel %s.", (void *)&handler->handler, aws_error_debug_str(error_code)); message_data->on_message_written(message, error_code, message_data->user_data); aws_mem_release(message_data->allocator, message_data); aws_channel_shutdown(handler->handler.slot->channel, error_code); break; } AWS_LOGF_TRACE( AWS_LS_EVENT_STREAM_CHANNEL_HANDLER, "id=%p: Message sent to channel", (void *)&handler->handler); } } else { AWS_LOGF_WARN(AWS_LS_EVENT_STREAM_CHANNEL_HANDLER, "static: Channel was shutdown. Message not sent"); message_data->on_message_written( message_data->message, AWS_ERROR_IO_OPERATION_CANCELLED, message_data->user_data); aws_mem_release(message_data->allocator, message_data); } } int aws_event_stream_channel_handler_write_message( struct aws_channel_handler *channel_handler, struct aws_event_stream_message *message, aws_event_stream_channel_handler_on_message_written_fn *on_message_written, void *user_data) { AWS_PRECONDITION(channel_handler); AWS_PRECONDITION(message); AWS_PRECONDITION(on_message_written); struct aws_event_stream_channel_handler *handler = channel_handler->impl; struct message_write_data *write_data = aws_mem_calloc(handler->handler.alloc, 1, sizeof(struct message_write_data)); if (!write_data) { AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_CHANNEL_HANDLER, "id=%p: Error occurred while allocating callback data %s.", (void *)channel_handler, aws_error_debug_str(aws_last_error())); aws_channel_shutdown(channel_handler->slot->channel, aws_last_error()); return AWS_OP_ERR; } write_data->handler = handler; write_data->user_data = user_data; write_data->message = message; write_data->on_message_written = on_message_written; write_data->allocator = handler->handler.alloc; AWS_LOGF_TRACE( AWS_LS_EVENT_STREAM_CHANNEL_HANDLER, "id=%p: Scheduling message write task", (void *)channel_handler); aws_channel_task_init( &write_data->task, s_write_handler_message, write_data, "aws_event_stream_channel_handler_write_message"); aws_channel_schedule_task_now_serialized(handler->handler.slot->channel, &write_data->task); return AWS_OP_SUCCESS; } void *aws_event_stream_channel_handler_get_user_data(struct aws_channel_handler *channel_handler) { struct aws_event_stream_channel_handler *handler = channel_handler->impl; return handler->user_data; } struct window_update_data { struct aws_allocator *allocator; struct aws_channel_task task; struct aws_event_stream_channel_handler *handler; size_t window_update_size; }; static void s_update_window_task(struct aws_channel_task *task, void *arg, enum aws_task_status status) { (void)task; struct window_update_data *update_data = arg; if (status == AWS_TASK_STATUS_RUN_READY) { AWS_LOGF_DEBUG( AWS_LS_EVENT_STREAM_CHANNEL_HANDLER, "static: updating window. increment of %zu", update_data->window_update_size); aws_channel_slot_increment_read_window(update_data->handler->handler.slot, update_data->window_update_size); } aws_mem_release(update_data->allocator, update_data); } void aws_event_stream_channel_handler_increment_read_window( struct aws_channel_handler *channel_handler, size_t window_update_size) { AWS_PRECONDITION(channel_handler); struct aws_event_stream_channel_handler *handler = channel_handler->impl; if (!handler->manual_window_management) { return; } AWS_LOGF_DEBUG( AWS_LS_EVENT_STREAM_CHANNEL_HANDLER, "id=%p: A user requested window update and manual window management is specified. Updating size of %zu", (void *)channel_handler, window_update_size); if (aws_channel_thread_is_callers_thread(handler->handler.slot->channel)) { if (aws_channel_slot_increment_read_window(handler->handler.slot, window_update_size)) { aws_channel_shutdown(handler->handler.slot->channel, aws_last_error()); return; } } struct window_update_data *update_data = aws_mem_calloc(handler->handler.alloc, 1, sizeof(struct window_update_data)); if (!update_data) { AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_CHANNEL_HANDLER, "id=%p: Error occurred while allocating update window data %s.", (void *)channel_handler, aws_error_debug_str(aws_last_error())); aws_channel_shutdown(handler->handler.slot->channel, aws_last_error()); return; } update_data->allocator = handler->handler.alloc; update_data->handler = handler; update_data->window_update_size = window_update_size; aws_channel_task_init( &update_data->task, s_update_window_task, update_data, "aws_event_stream_channel_handler_increment_read_window"); aws_channel_schedule_task_now(handler->handler.slot->channel, &update_data->task); } static int s_process_write_message( struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_io_message *message) { (void)handler; (void)slot; (void)message; AWS_FATAL_ASSERT(!"The event-stream-channel-handler is not designed to be a mid-channel handler."); return aws_raise_error(AWS_ERROR_UNIMPLEMENTED); } static int s_increment_read_window(struct aws_channel_handler *handler, struct aws_channel_slot *slot, size_t size) { (void)handler; return aws_channel_slot_increment_read_window(slot, size); } static size_t s_initial_window_size(struct aws_channel_handler *handler) { struct aws_event_stream_channel_handler *message_handler = handler->impl; return message_handler->initial_window_size; } static size_t s_message_overhead(struct aws_channel_handler *handler) { (void)handler; return s_message_overhead_size; } static void s_destroy(struct aws_channel_handler *handler) { AWS_LOGF_DEBUG( AWS_LS_EVENT_STREAM_CHANNEL_HANDLER, "id=%p: destroying event-stream message channel handler.", (void *)handler); struct aws_event_stream_channel_handler *event_stream_handler = handler->impl; aws_byte_buf_clean_up(&event_stream_handler->message_buf); aws_mem_release(handler->alloc, event_stream_handler); } static int s_shutdown( struct aws_channel_handler *handler, struct aws_channel_slot *slot, enum aws_channel_direction dir, int error_code, bool free_scarce_resources_immediately) { (void)handler; AWS_LOGF_DEBUG( AWS_LS_EVENT_STREAM_CHANNEL_HANDLER, "id=%p: shutdown called on event-stream channel handler with error %s.", (void *)handler, aws_error_debug_str(error_code)); return aws_channel_slot_on_handler_shutdown_complete(slot, dir, error_code, free_scarce_resources_immediately); } static struct aws_channel_handler_vtable vtable = { .destroy = s_destroy, .increment_read_window = s_increment_read_window, .initial_window_size = s_initial_window_size, .process_read_message = s_process_read_message, .process_write_message = s_process_write_message, .message_overhead = s_message_overhead, .shutdown = s_shutdown, }; struct aws_channel_handler *aws_event_stream_channel_handler_new( struct aws_allocator *allocator, const struct aws_event_stream_channel_handler_options *handler_options) { AWS_PRECONDITION(allocator); AWS_PRECONDITION(handler_options); AWS_PRECONDITION(handler_options->on_message_received); AWS_LOGF_INFO(AWS_LS_EVENT_STREAM_CHANNEL_HANDLER, "static: creating new event-stream message channel handler."); struct aws_event_stream_channel_handler *event_stream_handler = aws_mem_calloc(allocator, 1, sizeof(struct aws_event_stream_channel_handler)); if (!event_stream_handler) { AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_CHANNEL_HANDLER, "static: Error occurred while allocating handler %s.", aws_error_debug_str(aws_last_error())); return NULL; } AWS_LOGF_DEBUG(AWS_LS_EVENT_STREAM_RPC_CLIENT, "static: new handler is %p", (void *)&event_stream_handler->handler); if (aws_byte_buf_init( &event_stream_handler->message_buf, allocator, s_default_payload_size + s_message_overhead_size)) { AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_CHANNEL_HANDLER, "id=%p: Error occurred while allocating scratch buffer %s.", (void *)&event_stream_handler->handler, aws_error_debug_str(aws_last_error())); aws_mem_release(allocator, event_stream_handler); return NULL; } event_stream_handler->on_message_received = handler_options->on_message_received; event_stream_handler->user_data = handler_options->user_data; event_stream_handler->initial_window_size = handler_options->initial_window_size > 0 ? handler_options->initial_window_size : SIZE_MAX; event_stream_handler->manual_window_management = handler_options->manual_window_management; event_stream_handler->handler.vtable = &vtable; event_stream_handler->handler.alloc = allocator; event_stream_handler->handler.impl = event_stream_handler; return &event_stream_handler->handler; } aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/source/event_stream_rpc.c000066400000000000000000000123241456575232400266500ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include const struct aws_byte_cursor aws_event_stream_rpc_message_type_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(":message-type"); const struct aws_byte_cursor aws_event_stream_rpc_message_flags_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(":message-flags"); const struct aws_byte_cursor aws_event_stream_rpc_stream_id_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(":stream-id"); const struct aws_byte_cursor aws_event_stream_rpc_operation_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("operation"); /* just a convenience function for fetching message metadata from the event stream headers on a single iteration. */ int aws_event_stream_rpc_extract_message_metadata( const struct aws_array_list *message_headers, int32_t *stream_id, int32_t *message_type, int32_t *message_flags, struct aws_byte_buf *operation_name) { size_t length = aws_array_list_length(message_headers); bool message_type_found = 0; bool message_flags_found = 0; bool stream_id_found = 0; bool operation_name_found = 0; AWS_LOGF_TRACE( AWS_LS_EVENT_STREAM_GENERAL, "processing message headers for rpc protocol. %zu headers to process.", length); for (size_t i = 0; i < length; ++i) { struct aws_event_stream_header_value_pair *header = NULL; aws_array_list_get_at_ptr(message_headers, (void **)&header, i); struct aws_byte_buf name_buf = aws_event_stream_header_name(header); AWS_LOGF_DEBUG(AWS_LS_EVENT_STREAM_GENERAL, "processing header name " PRInSTR, AWS_BYTE_BUF_PRI(name_buf)); /* check type first since that's cheaper than a string compare */ if (header->header_value_type == AWS_EVENT_STREAM_HEADER_INT32) { struct aws_byte_buf stream_id_field = aws_byte_buf_from_array( aws_event_stream_rpc_stream_id_name.ptr, aws_event_stream_rpc_stream_id_name.len); if (aws_byte_buf_eq_ignore_case(&name_buf, &stream_id_field)) { *stream_id = aws_event_stream_header_value_as_int32(header); AWS_LOGF_DEBUG(AWS_LS_EVENT_STREAM_GENERAL, "stream id header value %" PRId32, *stream_id); stream_id_found += 1; goto found; } struct aws_byte_buf message_type_field = aws_byte_buf_from_array( aws_event_stream_rpc_message_type_name.ptr, aws_event_stream_rpc_message_type_name.len); if (aws_byte_buf_eq_ignore_case(&name_buf, &message_type_field)) { *message_type = aws_event_stream_header_value_as_int32(header); AWS_LOGF_DEBUG(AWS_LS_EVENT_STREAM_GENERAL, "message type header value %" PRId32, *message_type); message_type_found += 1; goto found; } struct aws_byte_buf message_flags_field = aws_byte_buf_from_array( aws_event_stream_rpc_message_flags_name.ptr, aws_event_stream_rpc_message_flags_name.len); if (aws_byte_buf_eq_ignore_case(&name_buf, &message_flags_field)) { *message_flags = aws_event_stream_header_value_as_int32(header); AWS_LOGF_DEBUG(AWS_LS_EVENT_STREAM_GENERAL, "message flags header value %" PRId32, *message_flags); message_flags_found += 1; goto found; } } if (header->header_value_type == AWS_EVENT_STREAM_HEADER_STRING) { struct aws_byte_buf operation_field = aws_byte_buf_from_array( aws_event_stream_rpc_operation_name.ptr, aws_event_stream_rpc_operation_name.len); if (aws_byte_buf_eq_ignore_case(&name_buf, &operation_field)) { *operation_name = aws_event_stream_header_value_as_string(header); AWS_LOGF_DEBUG( AWS_LS_EVENT_STREAM_GENERAL, "operation name header value" PRInSTR, AWS_BYTE_BUF_PRI(*operation_name)); operation_name_found += 1; goto found; } } continue; found: if (message_flags_found && message_type_found && stream_id_found && operation_name_found) { return AWS_OP_SUCCESS; } } return message_flags_found && message_type_found && stream_id_found ? AWS_OP_SUCCESS : aws_raise_error(AWS_ERROR_EVENT_STREAM_RPC_PROTOCOL_ERROR); } static const uint32_t s_bit_scrambling_magic = 0x45d9f3bU; static const uint32_t s_bit_shift_magic = 16U; /* this is a repurposed hash function based on the technique in splitmix64. The magic number was a result of numerical * analysis on maximum bit entropy. */ uint64_t aws_event_stream_rpc_hash_streamid(const void *to_hash) { uint32_t int_to_hash = *(const uint32_t *)to_hash; uint32_t hash = ((int_to_hash >> s_bit_shift_magic) ^ int_to_hash) * s_bit_scrambling_magic; hash = ((hash >> s_bit_shift_magic) ^ hash) * s_bit_scrambling_magic; hash = (hash >> s_bit_shift_magic) ^ hash; return (uint64_t)hash; } bool aws_event_stream_rpc_streamid_eq(const void *a, const void *b) { return *(const uint32_t *)a == *(const uint32_t *)b; } aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/source/event_stream_rpc_client.c000066400000000000000000001243551456575232400302160ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #ifdef _MSC_VER /* allow declared initializer using address of automatic variable */ # pragma warning(disable : 4221) /* allow non-constant aggregate initializers */ # pragma warning(disable : 4204) #endif static void s_clear_continuation_table(struct aws_event_stream_rpc_client_connection *connection); struct aws_event_stream_rpc_client_connection { struct aws_allocator *allocator; struct aws_hash_table continuation_table; struct aws_client_bootstrap *bootstrap_ref; struct aws_atomic_var ref_count; struct aws_channel *channel; struct aws_channel_handler *event_stream_handler; uint32_t latest_stream_id; struct aws_mutex stream_lock; struct aws_atomic_var is_open; struct aws_atomic_var handshake_state; size_t initial_window_size; aws_event_stream_rpc_client_on_connection_setup_fn *on_connection_setup; aws_event_stream_rpc_client_connection_protocol_message_fn *on_connection_protocol_message; aws_event_stream_rpc_client_on_connection_shutdown_fn *on_connection_shutdown; void *user_data; bool bootstrap_owned; bool enable_read_back_pressure; }; struct aws_event_stream_rpc_client_continuation_token { uint32_t stream_id; struct aws_event_stream_rpc_client_connection *connection; aws_event_stream_rpc_client_stream_continuation_fn *continuation_fn; aws_event_stream_rpc_client_stream_continuation_closed_fn *closed_fn; void *user_data; struct aws_atomic_var ref_count; struct aws_atomic_var is_closed; struct aws_atomic_var is_complete; }; static void s_on_message_received(struct aws_event_stream_message *message, int error_code, void *user_data); static int s_create_connection_on_channel( struct aws_event_stream_rpc_client_connection *connection, struct aws_channel *channel) { struct aws_channel_handler *event_stream_handler = NULL; struct aws_channel_slot *slot = NULL; struct aws_event_stream_channel_handler_options handler_options = { .on_message_received = s_on_message_received, .user_data = connection, .initial_window_size = connection->initial_window_size, .manual_window_management = connection->enable_read_back_pressure, }; AWS_LOGF_TRACE( AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: creating an event-stream handler on channel %p", (void *)connection, (void *)channel); event_stream_handler = aws_event_stream_channel_handler_new(connection->allocator, &handler_options); if (!event_stream_handler) { AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: creating an event-stream handler failed with error %s", (void *)connection, aws_error_debug_str(aws_last_error())); goto error; } slot = aws_channel_slot_new(channel); if (!slot) { AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: creating channel slot failed with error %s", (void *)connection, aws_error_debug_str(aws_last_error())); goto error; } aws_channel_slot_insert_end(channel, slot); if (aws_channel_slot_set_handler(slot, event_stream_handler)) { AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: setting handler on channel slot failed with error %s", (void *)connection, aws_error_debug_str(aws_last_error())); goto error; } connection->event_stream_handler = event_stream_handler; connection->channel = channel; aws_channel_acquire_hold(channel); return AWS_OP_SUCCESS; error: if (!slot && event_stream_handler) { aws_channel_handler_destroy(event_stream_handler); } return AWS_OP_ERR; } static void s_on_channel_setup_fn( struct aws_client_bootstrap *bootstrap, int error_code, struct aws_channel *channel, void *user_data) { (void)bootstrap; struct aws_event_stream_rpc_client_connection *connection = user_data; AWS_LOGF_DEBUG( AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: on_channel_setup_fn invoked with error_code %d with channel %p", (void *)connection, error_code, (void *)channel); if (!error_code) { connection->bootstrap_owned = true; if (s_create_connection_on_channel(connection, channel)) { int last_error = aws_last_error(); connection->on_connection_setup(NULL, last_error, connection->user_data); aws_channel_shutdown(channel, last_error); return; } AWS_LOGF_DEBUG( AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: successful event-stream channel setup %p", (void *)connection, (void *)channel); aws_event_stream_rpc_client_connection_acquire(connection); connection->on_connection_setup(connection, AWS_OP_SUCCESS, connection->user_data); aws_event_stream_rpc_client_connection_release(connection); } else { connection->on_connection_setup(NULL, error_code, connection->user_data); aws_event_stream_rpc_client_connection_release(connection); } } static void s_on_channel_shutdown_fn( struct aws_client_bootstrap *bootstrap, int error_code, struct aws_channel *channel, void *user_data) { (void)bootstrap; struct aws_event_stream_rpc_client_connection *connection = user_data; AWS_LOGF_DEBUG( AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: on_channel_shutdown_fn invoked with error_code %d with channel %p", (void *)connection, error_code, (void *)channel); aws_atomic_store_int(&connection->is_open, 0u); if (connection->bootstrap_owned) { s_clear_continuation_table(connection); aws_event_stream_rpc_client_connection_acquire(connection); connection->on_connection_shutdown(connection, error_code, connection->user_data); aws_event_stream_rpc_client_connection_release(connection); } aws_channel_release_hold(channel); aws_event_stream_rpc_client_connection_release(connection); } /* Set each continuation's is_closed=true. * A lock MUST be held while calling this. * For use with aws_hash_table_foreach(). */ static int s_mark_each_continuation_closed(void *context, struct aws_hash_element *p_element) { (void)context; struct aws_event_stream_rpc_client_continuation_token *continuation = p_element->value; aws_atomic_store_int(&continuation->is_closed, 1U); return AWS_COMMON_HASH_TABLE_ITER_CONTINUE; } /* Invoke continuation's on_closed() callback. * A lock must NOT be hold while calling this */ static void s_complete_continuation(struct aws_event_stream_rpc_client_continuation_token *token) { size_t expect_not_complete = 0U; if (aws_atomic_compare_exchange_int(&token->is_complete, &expect_not_complete, 1U)) { AWS_LOGF_DEBUG( AWS_LS_EVENT_STREAM_RPC_CLIENT, "token=%p: completing continuation with stream-id %" PRIu32, (void *)token, token->stream_id); if (token->stream_id) { token->closed_fn(token, token->user_data); } aws_event_stream_rpc_client_continuation_release(token); } } static int s_complete_and_clear_each_continuation(void *context, struct aws_hash_element *p_element) { (void)context; struct aws_event_stream_rpc_client_continuation_token *continuation = p_element->value; s_complete_continuation(continuation); return AWS_COMMON_HASH_TABLE_ITER_DELETE | AWS_COMMON_HASH_TABLE_ITER_CONTINUE; } /* Remove each continuation from hash-table and invoke its on_closed() callback. * The connection->is_open must be set false before calling this. */ static void s_clear_continuation_table(struct aws_event_stream_rpc_client_connection *connection) { AWS_ASSERT(!aws_event_stream_rpc_client_connection_is_open(connection)); /* Use lock to ensure synchronization with code that adds entries to table. * Since connection was just marked closed, no further entries will be * added to table once we acquire the lock. */ aws_mutex_lock(&connection->stream_lock); aws_hash_table_foreach(&connection->continuation_table, s_mark_each_continuation_closed, NULL); aws_mutex_unlock(&connection->stream_lock); /* Now release lock before invoking callbacks. * It's safe to alter the table now without a lock, since no further * entries can be added, and we've gone through the critical section * above to ensure synchronization */ aws_hash_table_foreach(&connection->continuation_table, s_complete_and_clear_each_continuation, NULL); } int aws_event_stream_rpc_client_connection_connect( struct aws_allocator *allocator, const struct aws_event_stream_rpc_client_connection_options *conn_options) { AWS_PRECONDITION(allocator); AWS_PRECONDITION(conn_options); AWS_PRECONDITION(conn_options->on_connection_protocol_message); AWS_PRECONDITION(conn_options->on_connection_setup); AWS_PRECONDITION(conn_options->on_connection_shutdown); struct aws_event_stream_rpc_client_connection *connection = aws_mem_calloc(allocator, 1, sizeof(struct aws_event_stream_rpc_client_connection)); AWS_LOGF_TRACE(AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: creating new connection", (void *)connection); if (!connection) { return AWS_OP_ERR; } connection->allocator = allocator; aws_atomic_init_int(&connection->ref_count, 1); connection->bootstrap_ref = conn_options->bootstrap; /* this is released in the connection release which gets called regardless of if this function is successful or * not*/ aws_client_bootstrap_acquire(connection->bootstrap_ref); aws_atomic_init_int(&connection->handshake_state, CONNECTION_HANDSHAKE_STATE_INITIALIZED); aws_atomic_init_int(&connection->is_open, 1); aws_mutex_init(&connection->stream_lock); connection->on_connection_shutdown = conn_options->on_connection_shutdown; connection->on_connection_protocol_message = conn_options->on_connection_protocol_message; connection->on_connection_setup = conn_options->on_connection_setup; connection->user_data = conn_options->user_data; if (aws_hash_table_init( &connection->continuation_table, allocator, 64, aws_event_stream_rpc_hash_streamid, aws_event_stream_rpc_streamid_eq, NULL, NULL)) { AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: failed initializing continuation table with error %s.", (void *)connection, aws_error_debug_str(aws_last_error())); goto error; } struct aws_socket_channel_bootstrap_options bootstrap_options = { .bootstrap = connection->bootstrap_ref, .tls_options = conn_options->tls_options, .socket_options = conn_options->socket_options, .user_data = connection, .host_name = conn_options->host_name, .port = conn_options->port, .enable_read_back_pressure = false, .setup_callback = s_on_channel_setup_fn, .shutdown_callback = s_on_channel_shutdown_fn, }; if (aws_client_bootstrap_new_socket_channel(&bootstrap_options)) { AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: failed creating new socket channel with error %s.", (void *)connection, aws_error_debug_str(aws_last_error())); goto error; } return AWS_OP_SUCCESS; error: aws_event_stream_rpc_client_connection_release(connection); return AWS_OP_ERR; } void aws_event_stream_rpc_client_connection_acquire(const struct aws_event_stream_rpc_client_connection *connection) { AWS_PRECONDITION(connection); size_t current_count = aws_atomic_fetch_add_explicit( &((struct aws_event_stream_rpc_client_connection *)connection)->ref_count, 1, aws_memory_order_relaxed); AWS_LOGF_TRACE( AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: connection acquired, new ref count is %zu.", (void *)connection, current_count + 1); } static void s_destroy_connection(struct aws_event_stream_rpc_client_connection *connection) { AWS_LOGF_DEBUG(AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: destroying connection.", (void *)connection); aws_hash_table_clean_up(&connection->continuation_table); aws_client_bootstrap_release(connection->bootstrap_ref); aws_mem_release(connection->allocator, connection); } void aws_event_stream_rpc_client_connection_release(const struct aws_event_stream_rpc_client_connection *connection) { if (!connection) { return; } struct aws_event_stream_rpc_client_connection *connection_mut = (struct aws_event_stream_rpc_client_connection *)connection; size_t ref_count = aws_atomic_fetch_sub_explicit(&connection_mut->ref_count, 1, aws_memory_order_seq_cst); AWS_LOGF_TRACE( AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: connection released, new ref count is %zu.", (void *)connection, ref_count - 1); AWS_FATAL_ASSERT(ref_count != 0 && "Connection ref count has gone negative"); if (ref_count == 1) { s_destroy_connection(connection_mut); } } void aws_event_stream_rpc_client_connection_close( struct aws_event_stream_rpc_client_connection *connection, int shutdown_error_code) { AWS_LOGF_TRACE( AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: connection close invoked with reason %s.", (void *)connection, aws_error_debug_str(shutdown_error_code)); size_t expect_open = 1U; if (aws_atomic_compare_exchange_int(&connection->is_open, &expect_open, 0U)) { aws_channel_shutdown(connection->channel, shutdown_error_code); if (!connection->bootstrap_owned) { s_clear_continuation_table(connection); aws_event_stream_rpc_client_connection_release(connection); } } else { AWS_LOGF_TRACE(AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: connection already closed.", (void *)connection); } } bool aws_event_stream_rpc_client_connection_is_open(const struct aws_event_stream_rpc_client_connection *connection) { return aws_atomic_load_int(&connection->is_open) == 1U; } struct event_stream_connection_send_message_args { struct aws_allocator *allocator; struct aws_event_stream_message message; enum aws_event_stream_rpc_message_type message_type; struct aws_event_stream_rpc_client_connection *connection; struct aws_event_stream_rpc_client_continuation_token *continuation; aws_event_stream_rpc_client_message_flush_fn *flush_fn; void *user_data; bool end_stream; bool terminate_connection; }; static void s_on_protocol_message_written_fn( struct aws_event_stream_message *message, int error_code, void *user_data) { (void)message; struct event_stream_connection_send_message_args *message_args = user_data; AWS_LOGF_TRACE( AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: message %p flushed to channel.", (void *)message_args->connection, (void *)message); if (message_args->message_type == AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_CONNECT) { AWS_LOGF_TRACE( AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: connect message flushed to the wire.", (void *)message_args->connection); } if (message_args->end_stream) { AWS_LOGF_DEBUG( AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: the end stream flag was set, closing continuation %p.", (void *)message_args->connection, (void *)message_args->continuation); AWS_FATAL_ASSERT(message_args->continuation && "end stream flag was set but it wasn't on a continuation"); aws_atomic_store_int(&message_args->continuation->is_closed, 1U); aws_mutex_lock(&message_args->connection->stream_lock); aws_hash_table_remove( &message_args->connection->continuation_table, &message_args->continuation->stream_id, NULL, NULL); aws_mutex_unlock(&message_args->connection->stream_lock); /* Lock must NOT be held while invoking callback */ s_complete_continuation(message_args->continuation); } message_args->flush_fn(error_code, message_args->user_data); if (message_args->terminate_connection) { AWS_LOGF_DEBUG( AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: terminate_connection flag was specified. Shutting down the connection.", (void *)message_args->connection); aws_event_stream_rpc_client_connection_close(message_args->connection, AWS_ERROR_SUCCESS); } aws_event_stream_rpc_client_connection_release(message_args->connection); if (message_args->continuation) { aws_event_stream_rpc_client_continuation_release(message_args->continuation); } aws_event_stream_message_clean_up(&message_args->message); aws_mem_release(message_args->allocator, message_args); } static int s_send_protocol_message( struct aws_event_stream_rpc_client_connection *connection, struct aws_event_stream_rpc_client_continuation_token *continuation, struct aws_byte_cursor *operation_name, const struct aws_event_stream_rpc_message_args *message_args, int32_t stream_id, aws_event_stream_rpc_client_message_flush_fn *flush_fn, void *user_data) { AWS_LOGF_TRACE( AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: sending message. continuation: %p, stream id %" PRId32, (void *)connection, (void *)continuation, stream_id); size_t connect_handshake_state = aws_atomic_load_int(&connection->handshake_state); AWS_LOGF_TRACE( AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: handshake completion value %zu", (void *)connection, connect_handshake_state); /* handshake step 1 is a connect message being received. Handshake 2 is the connect ack being sent. * no messages other than connect and connect ack are allowed until this count reaches 2. */ if (connect_handshake_state != CONNECTION_HANDSHAKE_STATE_CONNECT_ACK_PROCESSED && message_args->message_type < AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_CONNECT) { AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: handshake not completed, only a connect message can be sent.", (void *)connection); return aws_raise_error(AWS_ERROR_EVENT_STREAM_RPC_PROTOCOL_ERROR); } struct event_stream_connection_send_message_args *args = aws_mem_calloc(connection->allocator, 1, sizeof(struct event_stream_connection_send_message_args)); args->allocator = connection->allocator; args->user_data = user_data; args->message_type = message_args->message_type; args->connection = connection; args->flush_fn = flush_fn; if (continuation) { AWS_LOGF_TRACE( AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: sending message on continuation %p", (void *)connection, (void *)continuation); args->continuation = continuation; aws_event_stream_rpc_client_continuation_acquire(continuation); if (message_args->message_flags & AWS_EVENT_STREAM_RPC_MESSAGE_FLAG_TERMINATE_STREAM) { AWS_LOGF_DEBUG( AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p:end stream flag was specified on continuation %p", (void *)connection, (void *)continuation); args->end_stream = true; } } if (message_args->message_type == AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_CONNECT_ACK && !(message_args->message_flags & AWS_EVENT_STREAM_RPC_MESSAGE_FLAG_CONNECTION_ACCEPTED)) { AWS_LOGF_DEBUG(AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: terminating connection", (void *)connection); args->terminate_connection = true; } if (message_args->message_type == AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_CONNECT) { AWS_LOGF_DEBUG( AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: sending connect message, waiting on connect ack", (void *)connection); aws_atomic_store_int(&connection->handshake_state, CONNECTION_HANDSHAKE_STATE_CONNECT_PROCESSED); } args->flush_fn = flush_fn; size_t headers_count = 0; if (operation_name) { if (aws_add_size_checked(message_args->headers_count, 4, &headers_count)) { return AWS_OP_ERR; } } else { if (aws_add_size_checked(message_args->headers_count, 3, &headers_count)) { return AWS_OP_ERR; } } struct aws_array_list headers_list; AWS_ZERO_STRUCT(headers_list); if (aws_array_list_init_dynamic( &headers_list, connection->allocator, headers_count, sizeof(struct aws_event_stream_header_value_pair))) { AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: an error occurred while initializing the headers list %s", (void *)connection, aws_error_debug_str(aws_last_error())); goto args_allocated_before_failure; } /* since we preallocated the space for the headers, these can't fail, but we'll go ahead an assert on them just in * case */ for (size_t i = 0; i < message_args->headers_count; ++i) { AWS_FATAL_ASSERT(!aws_array_list_push_back(&headers_list, &message_args->headers[i])); } AWS_FATAL_ASSERT(!aws_event_stream_add_int32_header( &headers_list, (const char *)aws_event_stream_rpc_message_type_name.ptr, (uint8_t)aws_event_stream_rpc_message_type_name.len, message_args->message_type)); AWS_FATAL_ASSERT(!aws_event_stream_add_int32_header( &headers_list, (const char *)aws_event_stream_rpc_message_flags_name.ptr, (uint8_t)aws_event_stream_rpc_message_flags_name.len, message_args->message_flags)); AWS_FATAL_ASSERT(!aws_event_stream_add_int32_header( &headers_list, (const char *)aws_event_stream_rpc_stream_id_name.ptr, (uint8_t)aws_event_stream_rpc_stream_id_name.len, stream_id)); if (operation_name) { AWS_LOGF_DEBUG( AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: operation name specified " PRInSTR, (void *)connection, AWS_BYTE_CURSOR_PRI(*operation_name)); AWS_FATAL_ASSERT(!aws_event_stream_add_string_header( &headers_list, (const char *)aws_event_stream_rpc_operation_name.ptr, (uint8_t)aws_event_stream_rpc_operation_name.len, (const char *)operation_name->ptr, (uint16_t)operation_name->len, 0)); } int message_init_err_code = aws_event_stream_message_init(&args->message, connection->allocator, &headers_list, message_args->payload); aws_array_list_clean_up(&headers_list); if (message_init_err_code) { AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: message init failed with error %s", (void *)connection, aws_error_debug_str(aws_last_error())); goto args_allocated_before_failure; } aws_event_stream_rpc_client_connection_acquire(connection); if (aws_event_stream_channel_handler_write_message( connection->event_stream_handler, &args->message, s_on_protocol_message_written_fn, args)) { AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: writing message failed with error %s", (void *)connection, aws_error_debug_str(aws_last_error())); goto message_initialized_before_failure; } return AWS_OP_SUCCESS; message_initialized_before_failure: aws_event_stream_message_clean_up(&args->message); args_allocated_before_failure: aws_mem_release(args->allocator, args); aws_event_stream_rpc_client_connection_release(connection); return AWS_OP_ERR; } int aws_event_stream_rpc_client_connection_send_protocol_message( struct aws_event_stream_rpc_client_connection *connection, const struct aws_event_stream_rpc_message_args *message_args, aws_event_stream_rpc_client_message_flush_fn *flush_fn, void *user_data) { if (!aws_event_stream_rpc_client_connection_is_open(connection)) { return aws_raise_error(AWS_ERROR_EVENT_STREAM_RPC_CONNECTION_CLOSED); } return s_send_protocol_message(connection, NULL, NULL, message_args, 0, flush_fn, user_data); } static void s_connection_error_message_flush_fn(int error_code, void *user_data) { (void)error_code; struct aws_event_stream_rpc_client_connection *connection = user_data; aws_event_stream_rpc_client_connection_close(connection, AWS_ERROR_EVENT_STREAM_RPC_PROTOCOL_ERROR); } static void s_send_connection_level_error( struct aws_event_stream_rpc_client_connection *connection, uint32_t message_type, uint32_t message_flags, const struct aws_byte_cursor *message) { struct aws_byte_buf payload_buf = aws_byte_buf_from_array(message->ptr, message->len); AWS_LOGF_DEBUG( AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: sending connection-level error\n" PRInSTR, (void *)connection, AWS_BYTE_BUF_PRI(payload_buf)); struct aws_event_stream_header_value_pair content_type_header = aws_event_stream_create_string_header(s_json_content_type_name, s_json_content_type_value); struct aws_event_stream_header_value_pair headers[] = { content_type_header, }; struct aws_event_stream_rpc_message_args message_args = { .message_type = message_type, .message_flags = message_flags, .payload = &payload_buf, .headers_count = 1, .headers = headers, }; aws_event_stream_rpc_client_connection_send_protocol_message( connection, &message_args, s_connection_error_message_flush_fn, connection); } static void s_route_message_by_type( struct aws_event_stream_rpc_client_connection *connection, struct aws_event_stream_message *message, struct aws_array_list *headers_list, uint32_t stream_id, uint32_t message_type, uint32_t message_flags) { struct aws_byte_buf payload_buf = aws_byte_buf_from_array( aws_event_stream_message_payload(message), aws_event_stream_message_payload_len(message)); struct aws_event_stream_rpc_message_args message_args = { .headers = headers_list->data, .headers_count = aws_array_list_length(headers_list), .payload = &payload_buf, .message_flags = message_flags, .message_type = message_type, }; size_t handshake_complete = aws_atomic_load_int(&connection->handshake_state); /* make sure if this is not a CONNECT message being received, the handshake has been completed. */ if (handshake_complete < CONNECTION_HANDSHAKE_STATE_CONNECT_ACK_PROCESSED && message_type != AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_CONNECT_ACK) { AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: a message was received on this connection prior to the " "connect handshake completing", (void *)connection); aws_raise_error(AWS_ERROR_EVENT_STREAM_RPC_PROTOCOL_ERROR); s_send_connection_level_error( connection, AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_PROTOCOL_ERROR, 0, &s_connect_not_completed_error); return; } /* stream_id being non zero ALWAYS indicates APPLICATION_DATA or APPLICATION_ERROR. */ if (stream_id > 0) { AWS_LOGF_TRACE(AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: stream id %" PRIu32, (void *)connection, stream_id); struct aws_event_stream_rpc_client_continuation_token *continuation = NULL; if (message_type > AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_APPLICATION_ERROR) { AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: only application messages can be sent on a stream id, " "but this message is the incorrect type", (void *)connection); aws_raise_error(AWS_ERROR_EVENT_STREAM_RPC_PROTOCOL_ERROR); s_send_connection_level_error( connection, AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_PROTOCOL_ERROR, 0, &s_invalid_stream_id_error); return; } aws_mutex_lock(&connection->stream_lock); struct aws_hash_element *continuation_element = NULL; if (aws_hash_table_find(&connection->continuation_table, &stream_id, &continuation_element) || !continuation_element) { bool old_stream_id = stream_id <= connection->latest_stream_id; aws_mutex_unlock(&connection->stream_lock); if (!old_stream_id) { AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: a stream id was received that was not created by this client", (void *)connection); aws_raise_error(AWS_ERROR_EVENT_STREAM_RPC_PROTOCOL_ERROR); s_send_connection_level_error( connection, AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_PROTOCOL_ERROR, 0, &s_invalid_client_stream_id_error); } else { AWS_LOGF_WARN( AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: a stream id was received that corresponds to an already-closed stream", (void *)connection); } return; } continuation = continuation_element->value; AWS_FATAL_ASSERT(continuation != NULL); aws_event_stream_rpc_client_continuation_acquire(continuation); aws_mutex_unlock(&connection->stream_lock); continuation->continuation_fn(continuation, &message_args, continuation->user_data); aws_event_stream_rpc_client_continuation_release(continuation); /* if it was a terminal stream message purge it from the hash table. The delete will decref the continuation. */ if (message_flags & AWS_EVENT_STREAM_RPC_MESSAGE_FLAG_TERMINATE_STREAM) { AWS_LOGF_DEBUG( AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: the terminate stream flag was specified for continuation %p", (void *)connection, (void *)continuation); aws_atomic_store_int(&continuation->is_closed, 1U); aws_mutex_lock(&connection->stream_lock); aws_hash_table_remove(&connection->continuation_table, &stream_id, NULL, NULL); aws_mutex_unlock(&connection->stream_lock); /* Note that we do not invoke callback while holding lock */ s_complete_continuation(continuation); } } else { if (message_type <= AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_APPLICATION_ERROR || message_type >= AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_COUNT) { AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: a zero stream id was received with an invalid message-type %" PRIu32, (void *)connection, message_type); s_send_connection_level_error( connection, AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_PROTOCOL_ERROR, 0, &s_invalid_message_type_error); return; } if (message_type == AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_CONNECT_ACK) { if (handshake_complete != CONNECTION_HANDSHAKE_STATE_CONNECT_PROCESSED) { AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: connect ack received but the handshake is already completed. Only one is allowed.", (void *)connection); /* only one connect is allowed. This would be a duplicate. */ s_send_connection_level_error( connection, AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_PROTOCOL_ERROR, 0, &s_connect_not_completed_error); return; } aws_atomic_store_int(&connection->handshake_state, CONNECTION_HANDSHAKE_STATE_CONNECT_ACK_PROCESSED); AWS_LOGF_INFO( AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: connect ack received, connection handshake completed", (void *)connection); } connection->on_connection_protocol_message(connection, &message_args, connection->user_data); } } /* invoked by the event stream channel handler when a complete message has been read from the channel. */ static void s_on_message_received(struct aws_event_stream_message *message, int error_code, void *user_data) { if (!error_code) { struct aws_event_stream_rpc_client_connection *connection = user_data; AWS_LOGF_TRACE( AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: message received on connection of length %" PRIu32, (void *)connection, aws_event_stream_message_total_length(message)); struct aws_array_list headers; if (aws_array_list_init_dynamic( &headers, connection->allocator, 8, sizeof(struct aws_event_stream_header_value_pair))) { AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: error initializing headers %s", (void *)connection, aws_error_debug_str(aws_last_error())); s_send_connection_level_error( connection, AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_INTERNAL_ERROR, 0, &s_internal_error); return; } if (aws_event_stream_message_headers(message, &headers)) { AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: error fetching headers %s", (void *)connection, aws_error_debug_str(aws_last_error())); s_send_connection_level_error( connection, AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_INTERNAL_ERROR, 0, &s_internal_error); goto clean_up; } int32_t stream_id = -1; int32_t message_type = -1; int32_t message_flags = -1; struct aws_byte_buf operation_name_buf; AWS_ZERO_STRUCT(operation_name_buf); if (aws_event_stream_rpc_extract_message_metadata( &headers, &stream_id, &message_type, &message_flags, &operation_name_buf)) { AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: invalid protocol message with error %s", (void *)connection, aws_error_debug_str(aws_last_error())); s_send_connection_level_error( connection, AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_PROTOCOL_ERROR, 0, &s_invalid_message_error); goto clean_up; } (void)operation_name_buf; AWS_LOGF_TRACE(AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: routing message", (void *)connection); s_route_message_by_type(connection, message, &headers, stream_id, message_type, message_flags); clean_up: aws_event_stream_headers_list_cleanup(&headers); } } struct aws_event_stream_rpc_client_continuation_token *aws_event_stream_rpc_client_connection_new_stream( struct aws_event_stream_rpc_client_connection *connection, const struct aws_event_stream_rpc_client_stream_continuation_options *continuation_options) { AWS_PRECONDITION(continuation_options->on_continuation_closed); AWS_PRECONDITION(continuation_options->on_continuation); AWS_LOGF_TRACE(AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: creating a new stream on connection", (void *)connection); struct aws_event_stream_rpc_client_continuation_token *continuation = aws_mem_calloc(connection->allocator, 1, sizeof(struct aws_event_stream_rpc_client_continuation_token)); if (!continuation) { AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: error while allocating continuation %s", (void *)connection, aws_error_debug_str(aws_last_error())); return NULL; } AWS_LOGF_DEBUG( AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: continuation created %p", (void *)connection, (void *)continuation); continuation->connection = connection; aws_event_stream_rpc_client_connection_acquire(continuation->connection); aws_atomic_init_int(&continuation->ref_count, 1); aws_atomic_init_int(&continuation->is_closed, 0); aws_atomic_init_int(&continuation->is_complete, 0); continuation->continuation_fn = continuation_options->on_continuation; continuation->closed_fn = continuation_options->on_continuation_closed; continuation->user_data = continuation_options->user_data; return continuation; } void *aws_event_stream_rpc_client_continuation_get_user_data( struct aws_event_stream_rpc_client_continuation_token *continuation) { return continuation->user_data; } void aws_event_stream_rpc_client_continuation_acquire( const struct aws_event_stream_rpc_client_continuation_token *continuation) { size_t current_count = aws_atomic_fetch_add_explicit( &((struct aws_event_stream_rpc_client_continuation_token *)continuation)->ref_count, 1u, aws_memory_order_relaxed); AWS_LOGF_TRACE( AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: continuation acquired, new ref count is %zu.", (void *)continuation, current_count + 1); } void aws_event_stream_rpc_client_continuation_release( const struct aws_event_stream_rpc_client_continuation_token *continuation) { if (AWS_UNLIKELY(!continuation)) { return; } struct aws_event_stream_rpc_client_continuation_token *continuation_mut = (struct aws_event_stream_rpc_client_continuation_token *)continuation; size_t ref_count = aws_atomic_fetch_sub_explicit(&continuation_mut->ref_count, 1, aws_memory_order_seq_cst); AWS_LOGF_TRACE( AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: continuation released, new ref count is %zu.", (void *)continuation, ref_count - 1); AWS_FATAL_ASSERT(ref_count != 0 && "Continuation ref count has gone negative"); if (ref_count == 1) { struct aws_allocator *allocator = continuation_mut->connection->allocator; aws_event_stream_rpc_client_connection_release(continuation_mut->connection); aws_mem_release(allocator, continuation_mut); } } bool aws_event_stream_rpc_client_continuation_is_closed( const struct aws_event_stream_rpc_client_continuation_token *continuation) { return aws_atomic_load_int(&continuation->is_closed) == 1u; } int aws_event_stream_rpc_client_continuation_activate( struct aws_event_stream_rpc_client_continuation_token *continuation, struct aws_byte_cursor operation_name, const struct aws_event_stream_rpc_message_args *message_args, aws_event_stream_rpc_client_message_flush_fn *flush_fn, void *user_data) { AWS_LOGF_TRACE(AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: activating continuation", (void *)continuation); int ret_val = AWS_OP_ERR; aws_mutex_lock(&continuation->connection->stream_lock); if (continuation->stream_id) { AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: stream has already been activated", (void *)continuation); aws_raise_error(AWS_ERROR_INVALID_STATE); goto clean_up; } /* Even though is_open is atomic, we need to hold a lock while checking it. * This lets us coordinate with code that sets is_open to false. */ if (!aws_event_stream_rpc_client_connection_is_open(continuation->connection)) { AWS_LOGF_ERROR(AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: stream's connection is not open", (void *)continuation); aws_raise_error(AWS_ERROR_EVENT_STREAM_RPC_CONNECTION_CLOSED); goto clean_up; } /* we cannot update the connection's stream id until we're certain the message at least made it to the wire, because * the next stream id must be consecutively increasing by 1. So send the message then update the connection state * once we've made it to the wire. */ continuation->stream_id = continuation->connection->latest_stream_id + 1; AWS_LOGF_DEBUG( AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: continuation's new stream id is %" PRIu32, (void *)continuation, continuation->stream_id); if (aws_hash_table_put( &continuation->connection->continuation_table, &continuation->stream_id, continuation, NULL)) { AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: storing the new stream failed with %s", (void *)continuation, aws_error_debug_str(aws_last_error())); continuation->stream_id = 0; goto clean_up; } if (s_send_protocol_message( continuation->connection, continuation, &operation_name, message_args, continuation->stream_id, flush_fn, user_data)) { aws_hash_table_remove(&continuation->connection->continuation_table, &continuation->stream_id, NULL, NULL); continuation->stream_id = 0; AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: failed to flush the new stream to the channel with error %s", (void *)continuation, aws_error_debug_str(aws_last_error())); goto clean_up; } /* The continuation table gets a ref count on the continuation. Take it here. */ aws_event_stream_rpc_client_continuation_acquire(continuation); continuation->connection->latest_stream_id = continuation->stream_id; ret_val = AWS_OP_SUCCESS; clean_up: aws_mutex_unlock(&continuation->connection->stream_lock); return ret_val; } int aws_event_stream_rpc_client_continuation_send_message( struct aws_event_stream_rpc_client_continuation_token *continuation, const struct aws_event_stream_rpc_message_args *message_args, aws_event_stream_rpc_client_message_flush_fn *flush_fn, void *user_data) { if (aws_event_stream_rpc_client_continuation_is_closed(continuation)) { return aws_raise_error(AWS_ERROR_EVENT_STREAM_RPC_STREAM_CLOSED); } if (!continuation->stream_id) { return aws_raise_error(AWS_ERROR_EVENT_STREAM_RPC_STREAM_NOT_ACTIVATED); } return s_send_protocol_message( continuation->connection, continuation, NULL, message_args, continuation->stream_id, flush_fn, user_data); } aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/source/event_stream_rpc_server.c000066400000000000000000001400221456575232400302330ustar00rootroot00000000000000/* * Copyright 2010-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include #include #include #include #include #include #include #include #include #if defined(_MSC_VER) /* allow non-constant aggregate initializer */ # pragma warning(disable : 4204) /* allow passing a pointer to an automatically allocated variable around, cause I'm smarter than the compiler. */ # pragma warning(disable : 4221) #endif static const struct aws_byte_cursor s_missing_operation_name_error = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL( "{ \"message\": \"The first message for on a non-zero :stream-id must contain an operation header value.\"; }"); struct aws_event_stream_rpc_server_listener { struct aws_allocator *allocator; struct aws_socket *listener; struct aws_server_bootstrap *bootstrap; struct aws_atomic_var ref_count; aws_event_stream_rpc_server_on_new_connection_fn *on_new_connection; aws_event_stream_rpc_server_on_connection_shutdown_fn *on_connection_shutdown; aws_event_stream_rpc_server_on_listener_destroy_fn *on_destroy_callback; size_t initial_window_size; bool enable_read_backpressure; bool initialized; void *user_data; }; struct aws_event_stream_rpc_server_connection { struct aws_allocator *allocator; struct aws_hash_table continuation_table; struct aws_event_stream_rpc_server_listener *server; struct aws_atomic_var ref_count; aws_event_stream_rpc_server_on_incoming_stream_fn *on_incoming_stream; aws_event_stream_rpc_server_connection_protocol_message_fn *on_connection_protocol_message; struct aws_channel *channel; struct aws_channel_handler *event_stream_handler; uint32_t latest_stream_id; void *user_data; struct aws_atomic_var is_open; struct aws_atomic_var handshake_state; bool bootstrap_owned; }; struct aws_event_stream_rpc_server_continuation_token { uint32_t stream_id; struct aws_event_stream_rpc_server_connection *connection; aws_event_stream_rpc_server_stream_continuation_fn *continuation_fn; aws_event_stream_rpc_server_stream_continuation_closed_fn *closed_fn; void *user_data; struct aws_atomic_var ref_count; struct aws_atomic_var is_closed; }; /** This is the destructor callback invoked by the connections continuation table when a continuation is removed * from the hash table. */ void s_continuation_destroy(void *value) { struct aws_event_stream_rpc_server_continuation_token *continuation = value; AWS_LOGF_DEBUG(AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: destroying continuation", (void *)continuation); /* * When creating a stream, we end up putting the continuation in the table before we finish initializing it. * If an error occurs in the on incoming stream callback, we end up with a continuation with no user data or * callbacks. This means we have to check closed_fn for validity even though the success path does a fatal assert * on validity. */ if (continuation->closed_fn != NULL) { continuation->closed_fn(continuation, continuation->user_data); } aws_event_stream_rpc_server_continuation_release(continuation); } static void s_on_message_received(struct aws_event_stream_message *message, int error_code, void *user_data); /* We have two paths for creating a connection on a channel. The first is an incoming connection on the server listener. * The second is adding a connection to an already existing channel. This is the code common to both cases. */ static struct aws_event_stream_rpc_server_connection *s_create_connection_on_channel( struct aws_event_stream_rpc_server_listener *server, struct aws_channel *channel) { AWS_LOGF_TRACE( AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: creating connection on channel %p", (void *)server, (void *)channel); struct aws_event_stream_rpc_server_connection *connection = aws_mem_calloc(server->allocator, 1, sizeof(struct aws_event_stream_rpc_server_connection)); struct aws_channel_handler *event_stream_handler = NULL; struct aws_channel_slot *slot = NULL; if (!connection) { AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: allocation failed for connection with error %s", (void *)server, aws_error_debug_str(aws_last_error())); return NULL; } AWS_LOGF_DEBUG(AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: new connection is %p", (void *)server, (void *)connection); aws_atomic_init_int(&connection->ref_count, 1u); aws_atomic_init_int(&connection->is_open, 1u); /* handshake step 1 is a connect message being received. Handshake 2 is the connect ack being sent. * no messages other than connect and connect ack are allowed until this count reaches 2. */ aws_atomic_init_int(&connection->handshake_state, CONNECTION_HANDSHAKE_STATE_INITIALIZED); connection->allocator = server->allocator; if (aws_hash_table_init( &connection->continuation_table, server->allocator, 64, aws_event_stream_rpc_hash_streamid, aws_event_stream_rpc_streamid_eq, NULL, s_continuation_destroy)) { AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: initialization of connection stream table failed with error %s", (void *)connection, aws_error_debug_str(aws_last_error())); goto error; } struct aws_event_stream_channel_handler_options handler_options = { .on_message_received = s_on_message_received, .user_data = connection, .initial_window_size = server->initial_window_size, .manual_window_management = server->enable_read_backpressure, }; event_stream_handler = aws_event_stream_channel_handler_new(server->allocator, &handler_options); if (!event_stream_handler) { AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: initialization of event-stream handler failed with error %s", (void *)connection, aws_error_debug_str(aws_last_error())); goto error; } slot = aws_channel_slot_new(channel); if (!slot) { AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: initialization of channel slot failed with error %s", (void *)connection, aws_error_debug_str(aws_last_error())); goto error; } aws_channel_slot_insert_end(channel, slot); if (aws_channel_slot_set_handler(slot, event_stream_handler)) { AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: setting the handler on the slot failed with error %s", (void *)connection, aws_error_debug_str(aws_last_error())); goto error; } aws_event_stream_rpc_server_listener_acquire(server); connection->server = server; connection->event_stream_handler = event_stream_handler; connection->channel = channel; aws_channel_acquire_hold(channel); return connection; error: if (!slot && event_stream_handler) { aws_channel_handler_destroy(event_stream_handler); } if (connection) { aws_event_stream_rpc_server_connection_release(connection); } return NULL; } struct aws_event_stream_rpc_server_connection *aws_event_stream_rpc_server_connection_from_existing_channel( struct aws_event_stream_rpc_server_listener *server, struct aws_channel *channel, const struct aws_event_stream_rpc_connection_options *connection_options) { AWS_FATAL_ASSERT( connection_options->on_connection_protocol_message && "on_connection_protocol_message must be specified!"); AWS_FATAL_ASSERT(connection_options->on_incoming_stream && "on_incoming_stream must be specified"); struct aws_event_stream_rpc_server_connection *connection = s_create_connection_on_channel(server, channel); if (!connection) { return NULL; } connection->on_incoming_stream = connection_options->on_incoming_stream; connection->on_connection_protocol_message = connection_options->on_connection_protocol_message; connection->user_data = connection_options->user_data; aws_event_stream_rpc_server_connection_acquire(connection); return connection; } void aws_event_stream_rpc_server_connection_acquire(struct aws_event_stream_rpc_server_connection *connection) { size_t current_count = aws_atomic_fetch_add_explicit(&connection->ref_count, 1, aws_memory_order_relaxed); AWS_LOGF_TRACE( AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: connection acquired, new ref count is %zu.", (void *)connection, current_count + 1); } void aws_event_stream_rpc_server_connection_release(struct aws_event_stream_rpc_server_connection *connection) { if (!connection) { return; } size_t value = aws_atomic_fetch_sub_explicit(&connection->ref_count, 1, aws_memory_order_seq_cst); AWS_LOGF_TRACE( AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: connection released, new ref count is %zu.", (void *)connection, value - 1); if (value == 1) { AWS_LOGF_DEBUG(AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: destroying connection.", (void *)connection); aws_channel_release_hold(connection->channel); aws_hash_table_clean_up(&connection->continuation_table); aws_event_stream_rpc_server_listener_release(connection->server); aws_mem_release(connection->allocator, connection); } } /* incoming from a socket on this listener. */ static void s_on_accept_channel_setup( struct aws_server_bootstrap *bootstrap, int error_code, struct aws_channel *channel, void *user_data) { (void)bootstrap; struct aws_event_stream_rpc_server_listener *server = user_data; if (!error_code) { AWS_LOGF_INFO( AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: incoming connection with channel %p.", (void *)server, (void *)channel); AWS_FATAL_ASSERT(channel && "Channel should never be null with a 0 error code."); struct aws_event_stream_rpc_server_connection *connection = s_create_connection_on_channel(server, channel); if (!connection) { int error = aws_last_error(); server->on_new_connection(NULL, error, NULL, server->user_data); aws_channel_shutdown(channel, error); } struct aws_event_stream_rpc_connection_options connection_options; AWS_ZERO_STRUCT(connection_options); aws_event_stream_rpc_server_connection_acquire(connection); AWS_LOGF_TRACE( AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: invoking on_new_connection with connection %p.", (void *)server, (void *)connection); if (server->on_new_connection(connection, AWS_ERROR_SUCCESS, &connection_options, server->user_data)) { aws_channel_shutdown(channel, aws_last_error()); aws_event_stream_rpc_server_connection_release(connection); return; } AWS_FATAL_ASSERT( connection_options.on_connection_protocol_message && "on_connection_protocol_message must be specified!"); AWS_FATAL_ASSERT(connection_options.on_incoming_stream && "on_incoming_stream must be specified"); connection->on_incoming_stream = connection_options.on_incoming_stream; connection->on_connection_protocol_message = connection_options.on_connection_protocol_message; connection->user_data = connection_options.user_data; connection->bootstrap_owned = true; aws_event_stream_rpc_server_connection_release(connection); } else { AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: invoking on_new_connection with error %s", (void *)server, aws_error_debug_str(error_code)); server->on_new_connection(NULL, error_code, NULL, server->user_data); } } /* this is just to get the connection object off of the channel. */ static inline struct aws_event_stream_rpc_server_connection *s_rpc_connection_from_channel( struct aws_channel *channel) { struct aws_channel_slot *our_slot = NULL; struct aws_channel_slot *current_slot = aws_channel_get_first_slot(channel); AWS_FATAL_ASSERT( current_slot && "It should be logically impossible to have a channel in this callback that doesn't have a slot in it"); while (current_slot->adj_right) { current_slot = current_slot->adj_right; } our_slot = current_slot; struct aws_channel_handler *our_handler = our_slot->handler; return aws_event_stream_channel_handler_get_user_data(our_handler); } static void s_on_accept_channel_shutdown( struct aws_server_bootstrap *bootstrap, int error_code, struct aws_channel *channel, void *user_data) { (void)bootstrap; struct aws_event_stream_rpc_server_listener *server = user_data; struct aws_event_stream_rpc_server_connection *connection = s_rpc_connection_from_channel(channel); AWS_LOGF_DEBUG( AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: channel %p and connection %p shutdown occurred with error %s", (void *)server, (void *)channel, (void *)connection, aws_error_debug_str(error_code)); aws_atomic_store_int(&connection->is_open, 0U); aws_hash_table_clear(&connection->continuation_table); aws_event_stream_rpc_server_connection_acquire(connection); server->on_connection_shutdown(connection, error_code, server->user_data); aws_event_stream_rpc_server_connection_release(connection); aws_event_stream_rpc_server_connection_release(connection); } static void s_on_server_listener_destroy(struct aws_server_bootstrap *bootstrap, void *user_data) { (void)bootstrap; struct aws_event_stream_rpc_server_listener *listener = user_data; AWS_LOGF_INFO(AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: destroying server", (void *)listener); /* server bootstrap invokes this callback regardless of if the listener was successfully created, so * just check that we successfully set it up before freeing anything. When that's fixed in aws-c-io, this * code will still be correct, so just leave it here for now. */ if (listener->initialized) { if (listener->on_destroy_callback) { listener->on_destroy_callback(listener, listener->user_data); } aws_mem_release(listener->allocator, listener); } } struct aws_event_stream_rpc_server_listener *aws_event_stream_rpc_server_new_listener( struct aws_allocator *allocator, struct aws_event_stream_rpc_server_listener_options *options) { struct aws_event_stream_rpc_server_listener *server = aws_mem_calloc(allocator, 1, sizeof(struct aws_event_stream_rpc_server_listener)); if (!server) { AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_RPC_SERVER, "static: failed to allocate new server with error %s", aws_error_debug_str(aws_last_error())); return NULL; } AWS_LOGF_DEBUG(AWS_LS_EVENT_STREAM_RPC_SERVER, "static: new server is %p", (void *)server); aws_atomic_init_int(&server->ref_count, 1); struct aws_server_socket_channel_bootstrap_options bootstrap_options = { .bootstrap = options->bootstrap, .socket_options = options->socket_options, .tls_options = options->tls_options, .enable_read_back_pressure = false, .host_name = options->host_name, .port = options->port, .incoming_callback = s_on_accept_channel_setup, .shutdown_callback = s_on_accept_channel_shutdown, .destroy_callback = s_on_server_listener_destroy, .user_data = server, }; server->bootstrap = options->bootstrap; server->allocator = allocator; server->on_destroy_callback = options->on_destroy_callback; server->on_new_connection = options->on_new_connection; server->on_connection_shutdown = options->on_connection_shutdown; server->user_data = options->user_data; server->listener = aws_server_bootstrap_new_socket_listener(&bootstrap_options); if (!server->listener) { AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_RPC_SERVER, "static: failed to allocate new socket listener with error %s", aws_error_debug_str(aws_last_error())); goto error; } server->initialized = true; return server; error: if (server->listener) { aws_server_bootstrap_destroy_socket_listener(options->bootstrap, server->listener); } aws_mem_release(server->allocator, server); return NULL; } uint32_t aws_event_stream_rpc_server_listener_get_bound_port( const struct aws_event_stream_rpc_server_listener *server) { struct aws_socket_endpoint address; AWS_ZERO_STRUCT(address); /* not checking error code because it can't fail when called on a listening socket */ aws_socket_get_bound_address(server->listener, &address); return address.port; } void aws_event_stream_rpc_server_listener_acquire(struct aws_event_stream_rpc_server_listener *server) { size_t current_count = aws_atomic_fetch_add_explicit(&server->ref_count, 1, aws_memory_order_relaxed); AWS_LOGF_TRACE( AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: server acquired, new ref count is %zu.", (void *)server, current_count + 1); } static void s_destroy_server(struct aws_event_stream_rpc_server_listener *server) { if (server) { AWS_LOGF_INFO(AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: destroying server", (void *)server); /* the memory for this is cleaned up in the listener shutdown complete callback. */ aws_server_bootstrap_destroy_socket_listener(server->bootstrap, server->listener); } } void aws_event_stream_rpc_server_listener_release(struct aws_event_stream_rpc_server_listener *server) { if (!server) { return; } size_t ref_count = aws_atomic_fetch_sub_explicit(&server->ref_count, 1, aws_memory_order_seq_cst); AWS_LOGF_TRACE( AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: server released, new ref count is %zu.", (void *)server, ref_count - 1); if (ref_count == 1) { s_destroy_server(server); } } struct event_stream_connection_send_message_args { struct aws_allocator *allocator; struct aws_event_stream_message message; enum aws_event_stream_rpc_message_type message_type; struct aws_event_stream_rpc_server_connection *connection; struct aws_event_stream_rpc_server_continuation_token *continuation; aws_event_stream_rpc_server_message_flush_fn *flush_fn; void *user_data; bool end_stream; bool terminate_connection; }; static void s_on_protocol_message_written_fn( struct aws_event_stream_message *message, int error_code, void *user_data) { (void)message; struct event_stream_connection_send_message_args *message_args = user_data; AWS_LOGF_TRACE( AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: message flushed to channel with error %s", (void *)message_args->connection, aws_error_debug_str(error_code)); if (message_args->message_type == AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_CONNECT_ACK) { AWS_LOGF_TRACE( AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: connect ack message flushed to wire", (void *)message_args->connection); } if (message_args->end_stream) { AWS_FATAL_ASSERT(message_args->continuation && "end stream flag was set but it wasn't on a continuation"); AWS_LOGF_DEBUG( AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: end_stream flag for continuation %p was set, closing", (void *)message_args->connection, (void *)message_args->continuation); aws_atomic_store_int(&message_args->continuation->is_closed, 1U); aws_hash_table_remove( &message_args->connection->continuation_table, &message_args->continuation->stream_id, NULL, NULL); } message_args->flush_fn(error_code, message_args->user_data); if (message_args->terminate_connection) { AWS_LOGF_INFO( AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: terminate connection flag was set. closing", (void *)message_args->connection); aws_event_stream_rpc_server_connection_close(message_args->connection, AWS_ERROR_SUCCESS); } aws_event_stream_rpc_server_connection_release(message_args->connection); if (message_args->continuation) { aws_event_stream_rpc_server_continuation_release(message_args->continuation); } aws_event_stream_message_clean_up(&message_args->message); aws_mem_release(message_args->allocator, message_args); } static int s_send_protocol_message( struct aws_event_stream_rpc_server_connection *connection, struct aws_event_stream_rpc_server_continuation_token *continuation, const struct aws_event_stream_rpc_message_args *message_args, int32_t stream_id, aws_event_stream_rpc_server_message_flush_fn *flush_fn, void *user_data) { size_t connect_handshake_state = aws_atomic_load_int(&connection->handshake_state); AWS_LOGF_TRACE( AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: connect handshake state %zu", (void *)connection, connect_handshake_state); /* handshake step 1 is a connect message being received. Handshake 2 is the connect ack being sent. * no messages other than connect and connect ack are allowed until this count reaches 2. */ if (connect_handshake_state != CONNECTION_HANDSHAKE_STATE_CONNECT_ACK_PROCESSED && message_args->message_type < AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_CONNECT_ACK) { AWS_LOGF_TRACE( AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: invalid state, a message was received prior to connect handshake completion", (void *)connection); return aws_raise_error(AWS_ERROR_EVENT_STREAM_RPC_PROTOCOL_ERROR); } struct event_stream_connection_send_message_args *args = aws_mem_calloc(connection->allocator, 1, sizeof(struct event_stream_connection_send_message_args)); args->allocator = connection->allocator; args->user_data = user_data; args->message_type = message_args->message_type; args->connection = connection; args->flush_fn = flush_fn; if (continuation) { args->continuation = continuation; aws_event_stream_rpc_server_continuation_acquire(continuation); if (message_args->message_flags & AWS_EVENT_STREAM_RPC_MESSAGE_FLAG_TERMINATE_STREAM) { AWS_LOGF_DEBUG( AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: continuation with terminate stream flag was specified closing", (void *)continuation); args->end_stream = true; } } if (message_args->message_type == AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_CONNECT_ACK) { AWS_LOGF_INFO( AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: sending connect ack message, the connect handshake is completed", (void *)connection); aws_atomic_store_int(&connection->handshake_state, CONNECTION_HANDSHAKE_STATE_CONNECT_ACK_PROCESSED); if (!(message_args->message_flags & AWS_EVENT_STREAM_RPC_MESSAGE_FLAG_CONNECTION_ACCEPTED)) { AWS_LOGF_DEBUG( AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: connection ack was rejected closing connection", (void *)connection); args->terminate_connection = true; } } args->flush_fn = flush_fn; size_t headers_count = 0; if (aws_add_size_checked(message_args->headers_count, 3, &headers_count)) { AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: integer overflow detected when using headers_count %zu", (void *)connection, message_args->headers_count); goto args_allocated_before_failure; } struct aws_array_list headers_list; AWS_ZERO_STRUCT(headers_list); if (aws_array_list_init_dynamic( &headers_list, connection->allocator, headers_count, sizeof(struct aws_event_stream_header_value_pair))) { AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: allocation of headers failed with error %s", (void *)connection, aws_error_debug_str(aws_last_error())); goto args_allocated_before_failure; } /* since we preallocated the space for the headers, these can't fail, but we'll go ahead an assert on them just in * case */ for (size_t i = 0; i < message_args->headers_count; ++i) { AWS_FATAL_ASSERT(!aws_array_list_push_back(&headers_list, &message_args->headers[i])); } AWS_FATAL_ASSERT(!aws_event_stream_add_int32_header( &headers_list, (const char *)aws_event_stream_rpc_message_type_name.ptr, (uint8_t)aws_event_stream_rpc_message_type_name.len, message_args->message_type)); AWS_FATAL_ASSERT(!aws_event_stream_add_int32_header( &headers_list, (const char *)aws_event_stream_rpc_message_flags_name.ptr, (uint8_t)aws_event_stream_rpc_message_flags_name.len, message_args->message_flags)); AWS_FATAL_ASSERT(!aws_event_stream_add_int32_header( &headers_list, (const char *)aws_event_stream_rpc_stream_id_name.ptr, (uint8_t)aws_event_stream_rpc_stream_id_name.len, stream_id)); int message_init_err_code = aws_event_stream_message_init(&args->message, connection->allocator, &headers_list, message_args->payload); aws_array_list_clean_up(&headers_list); if (message_init_err_code) { AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: initialization of message failed with error %s", (void *)connection, aws_error_debug_str(aws_last_error())); goto args_allocated_before_failure; } aws_event_stream_rpc_server_connection_acquire(connection); if (aws_event_stream_channel_handler_write_message( connection->event_stream_handler, &args->message, s_on_protocol_message_written_fn, args)) { AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: message send failed with error %s", (void *)connection, aws_error_debug_str(aws_last_error())); goto message_initialized_before_failure; } return AWS_OP_SUCCESS; message_initialized_before_failure: aws_event_stream_message_clean_up(&args->message); args_allocated_before_failure: aws_mem_release(args->allocator, args); aws_event_stream_rpc_server_connection_release(connection); return AWS_OP_ERR; } int aws_event_stream_rpc_server_connection_send_protocol_message( struct aws_event_stream_rpc_server_connection *connection, const struct aws_event_stream_rpc_message_args *message_args, aws_event_stream_rpc_server_message_flush_fn *flush_fn, void *user_data) { if (!aws_event_stream_rpc_server_connection_is_open(connection)) { return aws_raise_error(AWS_ERROR_EVENT_STREAM_RPC_CONNECTION_CLOSED); } return s_send_protocol_message(connection, NULL, message_args, 0, flush_fn, user_data); } void *aws_event_stream_rpc_server_connection_get_user_data(struct aws_event_stream_rpc_server_connection *connection) { return connection->user_data; } AWS_EVENT_STREAM_API void aws_event_stream_rpc_server_override_last_stream_id( struct aws_event_stream_rpc_server_connection *connection, int32_t value) { connection->latest_stream_id = value; } void aws_event_stream_rpc_server_connection_close( struct aws_event_stream_rpc_server_connection *connection, int shutdown_error_code) { if (aws_event_stream_rpc_server_connection_is_open(connection)) { AWS_LOGF_DEBUG( AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: closing connection with error %s", (void *)connection, aws_error_debug_str(shutdown_error_code)); aws_atomic_store_int(&connection->is_open, 0U); aws_channel_shutdown(connection->channel, shutdown_error_code); if (!connection->bootstrap_owned) { aws_hash_table_clear(&connection->continuation_table); aws_event_stream_rpc_server_connection_release(connection); } } } bool aws_event_stream_rpc_server_continuation_is_closed( struct aws_event_stream_rpc_server_continuation_token *continuation) { return aws_atomic_load_int(&continuation->is_closed) == 1U; } bool aws_event_stream_rpc_server_connection_is_open(struct aws_event_stream_rpc_server_connection *connection) { return aws_atomic_load_int(&connection->is_open) == 1U; } void aws_event_stream_rpc_server_continuation_acquire( struct aws_event_stream_rpc_server_continuation_token *continuation) { size_t current_count = aws_atomic_fetch_add_explicit(&continuation->ref_count, 1, aws_memory_order_relaxed); AWS_LOGF_TRACE( AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: continuation acquired, new ref count is %zu.", (void *)continuation, current_count + 1); } void aws_event_stream_rpc_server_continuation_release( struct aws_event_stream_rpc_server_continuation_token *continuation) { size_t value = aws_atomic_fetch_sub_explicit(&continuation->ref_count, 1, aws_memory_order_seq_cst); AWS_LOGF_TRACE( AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: continuation released, new ref count is %zu.", (void *)continuation, value - 1); if (value == 1) { AWS_LOGF_DEBUG(AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: destroying continuation.", (void *)continuation); struct aws_allocator *allocator = continuation->connection->allocator; aws_event_stream_rpc_server_connection_release(continuation->connection); aws_mem_release(allocator, continuation); } } int aws_event_stream_rpc_server_continuation_send_message( struct aws_event_stream_rpc_server_continuation_token *continuation, const struct aws_event_stream_rpc_message_args *message_args, aws_event_stream_rpc_server_message_flush_fn *flush_fn, void *user_data) { AWS_FATAL_PRECONDITION(continuation->continuation_fn); AWS_FATAL_PRECONDITION(continuation->closed_fn); if (aws_event_stream_rpc_server_continuation_is_closed(continuation)) { return aws_raise_error(AWS_ERROR_EVENT_STREAM_RPC_STREAM_CLOSED); } return s_send_protocol_message( continuation->connection, continuation, message_args, continuation->stream_id, flush_fn, user_data); } static void s_connection_error_message_flush_fn(int error_code, void *user_data) { (void)error_code; struct aws_event_stream_rpc_server_connection *connection = user_data; aws_event_stream_rpc_server_connection_close(connection, AWS_ERROR_EVENT_STREAM_RPC_PROTOCOL_ERROR); } static void s_send_connection_level_error( struct aws_event_stream_rpc_server_connection *connection, uint32_t message_type, uint32_t message_flags, const struct aws_byte_cursor *message) { struct aws_byte_buf payload_buf = aws_byte_buf_from_array(message->ptr, message->len); AWS_LOGF_DEBUG( AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: sending connection-level error\n" PRInSTR, (void *)connection, AWS_BYTE_BUF_PRI(payload_buf)); struct aws_event_stream_header_value_pair content_type_header = aws_event_stream_create_string_header(s_json_content_type_name, s_json_content_type_value); struct aws_event_stream_header_value_pair headers[] = { content_type_header, }; struct aws_event_stream_rpc_message_args message_args = { .message_type = message_type, .message_flags = message_flags, .payload = &payload_buf, .headers_count = 1, .headers = headers, }; aws_event_stream_rpc_server_connection_send_protocol_message( connection, &message_args, s_connection_error_message_flush_fn, connection); } /* TODO: come back and make this a proper state pattern. For now it's branches all over the place until we nail * down the spec. */ static void s_route_message_by_type( struct aws_event_stream_rpc_server_connection *connection, struct aws_event_stream_message *message, struct aws_array_list *headers_list, uint32_t stream_id, uint32_t message_type, uint32_t message_flags, struct aws_byte_cursor operation_name) { struct aws_byte_buf payload_buf = aws_byte_buf_from_array( aws_event_stream_message_payload(message), aws_event_stream_message_payload_len(message)); struct aws_event_stream_rpc_message_args message_args = { .headers = headers_list->data, .headers_count = aws_array_list_length(headers_list), .payload = &payload_buf, .message_flags = message_flags, .message_type = message_type, }; size_t handshake_state = aws_atomic_load_int(&connection->handshake_state); /* make sure if this is not a CONNECT message being received, the handshake has been completed. */ if (handshake_state < CONNECTION_HANDSHAKE_STATE_CONNECT_ACK_PROCESSED && message_type != AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_CONNECT) { AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: a message was received on this connection prior to the " "connect handshake completing", (void *)connection); aws_raise_error(AWS_ERROR_EVENT_STREAM_RPC_PROTOCOL_ERROR); s_send_connection_level_error( connection, AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_PROTOCOL_ERROR, 0, &s_connect_not_completed_error); return; } /* stream_id being non zero ALWAYS indicates APPLICATION_DATA or APPLICATION_ERROR. */ if (stream_id > 0) { AWS_LOGF_TRACE(AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: stream id %" PRIu32, (void *)connection, stream_id); struct aws_event_stream_rpc_server_continuation_token *continuation = NULL; if (message_type > AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_APPLICATION_ERROR) { AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: only application messages can be sent on a stream id, " "but this message is the incorrect type", (void *)connection); aws_raise_error(AWS_ERROR_EVENT_STREAM_RPC_PROTOCOL_ERROR); s_send_connection_level_error( connection, AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_PROTOCOL_ERROR, 0, &s_invalid_stream_id_error); return; } /* INT32_MAX is the max stream id. */ if (stream_id > INT32_MAX) { AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: stream_id is larger than the max acceptable value", (void *)connection); aws_raise_error(AWS_ERROR_EVENT_STREAM_RPC_PROTOCOL_ERROR); s_send_connection_level_error( connection, AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_PROTOCOL_ERROR, 0, &s_invalid_stream_id_error); return; } /* if the stream is in the past, look it up from the continuation table. If it's not there, that's an error. * if it is, find it and notify the user a message arrived */ if (stream_id <= connection->latest_stream_id) { AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: stream_id is an already seen stream_id, looking for existing continuation", (void *)connection); struct aws_hash_element *continuation_element = NULL; if (aws_hash_table_find(&connection->continuation_table, &stream_id, &continuation_element) || !continuation_element) { AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: stream_id does not have a corresponding continuation", (void *)connection); aws_raise_error(AWS_ERROR_EVENT_STREAM_RPC_PROTOCOL_ERROR); s_send_connection_level_error( connection, AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_PROTOCOL_ERROR, 0, &s_invalid_client_stream_id_error); return; } continuation = continuation_element->value; AWS_LOGF_TRACE( AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: stream_id corresponds to continuation %p", (void *)connection, (void *)continuation); /* * I don't think it's possible for the continuation_fn to be NULL at this point, but given the * multiple partially-initialized object crashes we've had, let's be safe. */ if (continuation->continuation_fn != NULL) { aws_event_stream_rpc_server_continuation_acquire(continuation); continuation->continuation_fn(continuation, &message_args, continuation->user_data); aws_event_stream_rpc_server_continuation_release(continuation); } /* now these are potentially new streams. Make sure they're in bounds, create a new continuation * and notify the user the stream has been created, then send them the message. */ } else { AWS_LOGF_TRACE( AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: stream_id is unknown, attempting to create a continuation for it", (void *)connection); if (stream_id != connection->latest_stream_id + 1) { AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: stream_id is invalid because it's not sequentially increasing", (void *)connection); aws_raise_error(AWS_ERROR_EVENT_STREAM_RPC_PROTOCOL_ERROR); s_send_connection_level_error( connection, AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_PROTOCOL_ERROR, 0, &s_invalid_new_client_stream_id_error); return; } /* new streams must always have an operation name. */ if (operation_name.len == 0) { AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: new stream_id encountered, but an operation name was not received", (void *)connection); aws_raise_error(AWS_ERROR_EVENT_STREAM_RPC_PROTOCOL_ERROR); s_send_connection_level_error( connection, AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_PROTOCOL_ERROR, 0, &s_missing_operation_name_error); return; } AWS_LOGF_DEBUG( AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: stream_id is a valid new stream. Creating continuation", (void *)connection); continuation = aws_mem_calloc(connection->allocator, 1, sizeof(struct aws_event_stream_rpc_server_continuation_token)); if (!continuation) { AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: continuation allocation failed with error %s", (void *)connection, aws_error_debug_str(aws_last_error())); s_send_connection_level_error( connection, AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_INTERNAL_ERROR, 0, &s_internal_error); return; } AWS_LOGF_DEBUG( AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: new continuation is %p", (void *)connection, (void *)continuation); continuation->stream_id = stream_id; continuation->connection = connection; aws_event_stream_rpc_server_connection_acquire(continuation->connection); aws_atomic_init_int(&continuation->ref_count, 1); if (aws_hash_table_put(&connection->continuation_table, &continuation->stream_id, continuation, NULL)) { AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: continuation table update failed with error %s", (void *)connection, aws_error_debug_str(aws_last_error())); /* continuation release will drop the connection reference as well */ aws_event_stream_rpc_server_continuation_release(continuation); s_send_connection_level_error( connection, AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_INTERNAL_ERROR, 0, &s_internal_error); return; } struct aws_event_stream_rpc_server_stream_continuation_options options; AWS_ZERO_STRUCT(options); aws_event_stream_rpc_server_continuation_acquire(continuation); AWS_LOGF_TRACE( AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: invoking on_incoming_stream callback", (void *)connection); /* * This callback must only keep a ref to the continuation on a success path. On a failure, it must * leave the ref count alone so that the release + removal destroys the continuation */ if (connection->on_incoming_stream == NULL || connection->on_incoming_stream( continuation->connection, continuation, operation_name, &options, connection->user_data)) { AWS_FATAL_ASSERT(aws_atomic_load_int(&continuation->ref_count) == 2); /* undo the continuation acquire that was done a few lines above */ aws_event_stream_rpc_server_continuation_release(continuation); /* removing the continuation from the table will do the final decref on the continuation */ aws_hash_table_remove(&connection->continuation_table, &continuation->stream_id, NULL, NULL); s_send_connection_level_error( connection, AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_INTERNAL_ERROR, 0, &s_internal_error); return; } AWS_FATAL_ASSERT(options.on_continuation); AWS_FATAL_ASSERT(options.on_continuation_closed); continuation->continuation_fn = options.on_continuation; continuation->closed_fn = options.on_continuation_closed; continuation->user_data = options.user_data; connection->latest_stream_id = stream_id; continuation->continuation_fn(continuation, &message_args, continuation->user_data); /* undo the acquire made before the on_incoming_stream callback invocation */ aws_event_stream_rpc_server_continuation_release(continuation); } /* if it was a terminal stream message purge it from the hash table. The delete will decref the continuation. */ if (message_flags & AWS_EVENT_STREAM_RPC_MESSAGE_FLAG_TERMINATE_STREAM) { AWS_LOGF_DEBUG( AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: the terminate_stream flag was received for continuation %p, closing", (void *)connection, (void *)continuation); aws_atomic_store_int(&continuation->is_closed, 1U); aws_hash_table_remove(&connection->continuation_table, &stream_id, NULL, NULL); } } else { if (message_type <= AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_APPLICATION_ERROR || message_type >= AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_COUNT) { AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: a zero stream id was received with an invalid message-type %" PRIu32, (void *)connection, message_type); s_send_connection_level_error( connection, AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_PROTOCOL_ERROR, 0, &s_invalid_message_type_error); return; } if (message_type == AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_CONNECT) { if (handshake_state) { AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: connect received but the handshake is already completed. Only one is allowed.", (void *)connection); /* only one connect is allowed. This would be a duplicate. */ s_send_connection_level_error( connection, AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_PROTOCOL_ERROR, 0, &s_connect_not_completed_error); return; } aws_atomic_store_int(&connection->handshake_state, CONNECTION_HANDSHAKE_STATE_CONNECT_PROCESSED); AWS_LOGF_INFO( AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: connect received, connection handshake completion pending the server sending an ack.", (void *)connection); } if (connection->on_connection_protocol_message != NULL) { connection->on_connection_protocol_message(connection, &message_args, connection->user_data); } } } /* invoked by the event stream channel handler when a complete message has been read from the channel. */ static void s_on_message_received(struct aws_event_stream_message *message, int error_code, void *user_data) { if (!error_code) { struct aws_event_stream_rpc_server_connection *connection = user_data; AWS_LOGF_TRACE( AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: message received on connection of length %" PRIu32, (void *)connection, aws_event_stream_message_total_length(message)); struct aws_array_list headers; if (aws_array_list_init_dynamic( &headers, connection->allocator, 8, sizeof(struct aws_event_stream_header_value_pair))) { AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: error initializing headers %s", (void *)connection, aws_error_debug_str(aws_last_error())); s_send_connection_level_error( connection, AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_INTERNAL_ERROR, 0, &s_internal_error); return; } if (aws_event_stream_message_headers(message, &headers)) { AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: error fetching headers %s", (void *)connection, aws_error_debug_str(aws_last_error())); s_send_connection_level_error( connection, AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_INTERNAL_ERROR, 0, &s_internal_error); goto clean_up; } int32_t stream_id = -1; int32_t message_type = -1; int32_t message_flags = -1; struct aws_byte_buf operation_name_buf; AWS_ZERO_STRUCT(operation_name_buf); if (aws_event_stream_rpc_extract_message_metadata( &headers, &stream_id, &message_type, &message_flags, &operation_name_buf)) { AWS_LOGF_ERROR( AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: invalid protocol message with error %s", (void *)connection, aws_error_debug_str(aws_last_error())); s_send_connection_level_error( connection, AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_PROTOCOL_ERROR, 0, &s_invalid_message_error); goto clean_up; } AWS_LOGF_TRACE(AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: routing message", (void *)connection); s_route_message_by_type( connection, message, &headers, stream_id, message_type, message_flags, aws_byte_cursor_from_buf(&operation_name_buf)); clean_up: aws_event_stream_headers_list_cleanup(&headers); } } aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/tests/000077500000000000000000000000001456575232400230045ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/tests/CMakeLists.txt000066400000000000000000000070271456575232400255520ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. include(AwsTestHarness) enable_testing() file(GLOB TEST_SRC "*.c") file(GLOB TESTS ${TEST_SRC}) set(TEST_BINARY_NAME ${PROJECT_NAME}-tests) add_test_case(test_incoming_no_op_valid) add_test_case(test_incoming_application_data_no_headers_valid) add_test_case(test_incoming_application_one_compressed_header_pair_valid) add_test_case(test_incoming_application_int32_header_valid) add_test_case(test_outgoing_no_op_valid) add_test_case(test_outgoing_application_data_no_headers_valid) add_test_case(test_outgoing_application_one_compressed_header_pair_valid) add_test_case(test_streaming_decoder_incoming_no_op_valid_single_message) add_test_case(test_streaming_decoder_incoming_application_no_headers) add_test_case(test_streaming_decoder_incoming_application_one_compressed_header_pair_valid) add_test_case(test_streaming_decoder_incoming_application_one_int32_header_pair_valid) add_test_case(test_streaming_decoder_incoming_application_variable_headers_with_empty_length_pair_valid) add_test_case(test_streaming_decoder_incoming_application_one_bool_header_pair_valid) add_test_case(test_streaming_decoder_incoming_multiple_messages) add_test_case(test_channel_handler_single_valid_messages_parse) add_test_case(test_channel_handler_multiple_valid_messages_parse) add_test_case(test_channel_handler_corrupted_crc_fails) add_test_case(test_channel_handler_msg_too_large_fails) add_test_case(test_channel_handler_write_message) add_net_test_case(test_event_stream_rpc_server_connection_setup_and_teardown) add_net_test_case(test_event_stream_rpc_server_connection_setup_and_teardown_with_bind_to_zero_port) add_net_test_case(test_event_stream_rpc_server_connection_connect_flow) add_net_test_case(test_event_stream_rpc_server_connection_connect_reject_flow) add_net_test_case(test_event_stream_rpc_server_connection_messages_before_connect_received) add_net_test_case(test_event_stream_rpc_server_connection_messages_before_connect_ack_sent) add_net_test_case(test_event_stream_rpc_server_connection_unknown_message_type) add_net_test_case(test_event_stream_rpc_server_connection_missing_message_type) add_net_test_case(test_event_stream_rpc_server_connection_missing_message_flags) add_net_test_case(test_event_stream_rpc_server_connection_continuation_missing_operation) add_net_test_case(test_event_stream_rpc_server_connection_missing_stream_id) add_net_test_case(test_event_stream_rpc_server_connection_continuation_messages_flow) add_net_test_case(test_event_stream_rpc_server_connection_continuation_failure) add_net_test_case(test_event_stream_rpc_server_connection_stream_id_ahead) add_net_test_case(test_event_stream_rpc_server_connection_continuation_reused_stream_id_fails) add_net_test_case(test_event_stream_rpc_server_connection_continuation_max_stream_id_reached) add_net_test_case(test_event_stream_rpc_client_connection_setup_and_teardown) add_net_test_case(test_event_stream_rpc_client_connection_connect) add_net_test_case(test_event_stream_rpc_client_connection_message_before_connect) add_net_test_case(test_event_stream_rpc_client_connection_protocol_message) add_net_test_case(test_event_stream_rpc_client_connection_continuation_flow) add_net_test_case(test_event_stream_rpc_client_connection_unactivated_continuation_fails) add_net_test_case(test_event_stream_rpc_client_connection_continuation_send_message_on_closed_fails) add_net_test_case(test_event_stream_rpc_client_connection_continuation_duplicated_activate_fails) generate_test_driver(${TEST_BINARY_NAME}) aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/tests/channel_handler_test.c000066400000000000000000000520651456575232400273240ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include struct test_data { struct aws_allocator *allocator; struct testing_channel testing_channel; struct aws_channel_handler *handler; aws_event_stream_channel_handler_on_message_received_fn *received_fn; void *user_data; }; static struct test_data s_test_data; static void s_fixture_on_message(struct aws_event_stream_message *message, int error_code, void *user_data) { struct test_data *test_data = user_data; test_data->received_fn(message, error_code, test_data->user_data); } static int s_fixture_setup(struct aws_allocator *allocator, void *ctx) { aws_event_stream_library_init(allocator); struct test_data *test_data = ctx; AWS_ZERO_STRUCT(*test_data); test_data->allocator = allocator; struct aws_testing_channel_options testing_channel_options = { .clock_fn = aws_high_res_clock_get_ticks, }; ASSERT_SUCCESS(testing_channel_init(&test_data->testing_channel, allocator, &testing_channel_options)); struct aws_channel_slot *slot = aws_channel_slot_new(test_data->testing_channel.channel); ASSERT_NOT_NULL(slot); ASSERT_SUCCESS(aws_channel_slot_insert_end(test_data->testing_channel.channel, slot)); struct aws_event_stream_channel_handler_options options = { .initial_window_size = SIZE_MAX, .user_data = ctx, .on_message_received = s_fixture_on_message, }; test_data->handler = aws_event_stream_channel_handler_new(allocator, &options); ASSERT_NOT_NULL(test_data->handler); ASSERT_SUCCESS(aws_channel_slot_set_handler(slot, test_data->handler)); testing_channel_run_currently_queued_tasks(&test_data->testing_channel); return AWS_OP_SUCCESS; } static int s_fixture_shutdown(struct aws_allocator *allocator, int setup_result, void *ctx) { (void)allocator; struct test_data *test_data = ctx; if (!setup_result) { testing_channel_clean_up(&test_data->testing_channel); } aws_event_stream_library_clean_up(); return AWS_OP_SUCCESS; } struct single_message_test_data { int last_error_code; struct aws_event_stream_message received_msg_cpy; }; static void s_test_on_single_message(struct aws_event_stream_message *message, int error_code, void *user_data) { struct single_message_test_data *msg_test_data = user_data; msg_test_data->last_error_code = error_code; if (!error_code) { struct aws_byte_buf message_buf = aws_byte_buf_from_array( aws_event_stream_message_buffer(message), aws_event_stream_message_total_length(message)); aws_event_stream_message_from_buffer_copy( &msg_test_data->received_msg_cpy, s_test_data.allocator, &message_buf); } } /* send various valid messages in serial to make sure the happy path of message parsing is correct. */ static int s_test_channel_handler_single_valid_messages_parse(struct aws_allocator *allocator, void *ctx) { struct test_data *test_data = ctx; struct single_message_test_data message_test_data; AWS_ZERO_STRUCT(message_test_data); test_data->received_fn = s_test_on_single_message; test_data->user_data = &message_test_data; uint8_t empty_message[] = { 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x05, 0xc2, 0x48, 0xeb, 0x7d, 0x98, 0xc8, 0xff, }; struct aws_byte_cursor empty_message_cursor = aws_byte_cursor_from_array(empty_message, sizeof(empty_message)); ASSERT_SUCCESS(testing_channel_push_read_data(&s_test_data.testing_channel, empty_message_cursor)); ASSERT_UINT_EQUALS(AWS_OP_SUCCESS, message_test_data.last_error_code); ASSERT_UINT_EQUALS(0x00000010, aws_event_stream_message_total_length(&message_test_data.received_msg_cpy)); ASSERT_UINT_EQUALS(0x00000000, aws_event_stream_message_headers_len(&message_test_data.received_msg_cpy)); ASSERT_UINT_EQUALS(0x05c248eb, aws_event_stream_message_prelude_crc(&message_test_data.received_msg_cpy)); ASSERT_UINT_EQUALS(0x7d98c8ff, aws_event_stream_message_message_crc(&message_test_data.received_msg_cpy)); aws_event_stream_message_clean_up(&message_test_data.received_msg_cpy); AWS_ZERO_STRUCT(message_test_data); uint8_t no_headers_data[] = { 0x00, 0x00, 0x00, 0x1D, 0x00, 0x00, 0x00, 0x00, 0xfd, 0x52, 0x8c, 0x5a, 0x7b, 0x27, 0x66, 0x6f, 0x6f, 0x27, 0x3a, 0x27, 0x62, 0x61, 0x72, 0x27, 0x7d, 0xc3, 0x65, 0x39, 0x36, }; struct aws_byte_cursor no_headers_cur = aws_byte_cursor_from_array(no_headers_data, sizeof(no_headers_data)); ASSERT_SUCCESS(testing_channel_push_read_data(&s_test_data.testing_channel, no_headers_cur)); ASSERT_UINT_EQUALS(AWS_OP_SUCCESS, message_test_data.last_error_code); ASSERT_UINT_EQUALS(0x0000001D, aws_event_stream_message_total_length(&message_test_data.received_msg_cpy)); ASSERT_UINT_EQUALS(0x00000000, aws_event_stream_message_headers_len(&message_test_data.received_msg_cpy)); ASSERT_UINT_EQUALS(0xfd528c5a, aws_event_stream_message_prelude_crc(&message_test_data.received_msg_cpy)); const char *expected_str = "{'foo':'bar'}"; ASSERT_UINT_EQUALS(strlen(expected_str), aws_event_stream_message_payload_len(&message_test_data.received_msg_cpy)); ASSERT_BIN_ARRAYS_EQUALS( expected_str, strlen(expected_str), aws_event_stream_message_payload(&message_test_data.received_msg_cpy), aws_event_stream_message_payload_len(&message_test_data.received_msg_cpy)); ASSERT_UINT_EQUALS(0xc3653936, aws_event_stream_message_message_crc(&message_test_data.received_msg_cpy)); aws_event_stream_message_clean_up(&message_test_data.received_msg_cpy); AWS_ZERO_STRUCT(message_test_data); uint8_t headers_test_data[] = { 0x00, 0x00, 0x00, 0x3D, 0x00, 0x00, 0x00, 0x20, 0x07, 0xFD, 0x83, 0x96, 0x0C, 'c', 'o', 'n', 't', 'e', 'n', 't', '-', 't', 'y', 'p', 'e', 0x07, 0x00, 0x10, 'a', 'p', 'p', 'l', 'i', 'c', 'a', 't', 'i', 'o', 'n', '/', 'j', 's', 'o', 'n', 0x7b, 0x27, 0x66, 0x6f, 0x6f, 0x27, 0x3a, 0x27, 0x62, 0x61, 0x72, 0x27, 0x7d, 0x8D, 0x9C, 0x08, 0xB1, }; struct aws_byte_cursor headers_test_cur = aws_byte_cursor_from_array(headers_test_data, sizeof(headers_test_data)); ASSERT_SUCCESS(testing_channel_push_read_data(&s_test_data.testing_channel, headers_test_cur)); ASSERT_UINT_EQUALS(AWS_OP_SUCCESS, message_test_data.last_error_code); ASSERT_UINT_EQUALS(0x0000003D, aws_event_stream_message_total_length(&message_test_data.received_msg_cpy)); ASSERT_UINT_EQUALS(0x00000020, aws_event_stream_message_headers_len(&message_test_data.received_msg_cpy)); ASSERT_UINT_EQUALS(0x07FD8396, aws_event_stream_message_prelude_crc(&message_test_data.received_msg_cpy)); ASSERT_UINT_EQUALS(strlen(expected_str), aws_event_stream_message_payload_len(&message_test_data.received_msg_cpy)); ASSERT_BIN_ARRAYS_EQUALS( expected_str, strlen(expected_str), aws_event_stream_message_payload(&message_test_data.received_msg_cpy), aws_event_stream_message_payload_len(&message_test_data.received_msg_cpy)); struct aws_array_list headers; ASSERT_SUCCESS(aws_event_stream_headers_list_init(&headers, allocator)); ASSERT_SUCCESS(aws_event_stream_message_headers(&message_test_data.received_msg_cpy, &headers)); ASSERT_UINT_EQUALS(1, headers.length, ); struct aws_event_stream_header_value_pair header; ASSERT_SUCCESS(aws_array_list_front(&headers, &header)); const char *content_type = "content-type"; const char *content_type_value = "application/json"; struct aws_byte_buf header_name_buf = aws_event_stream_header_name(&header); ASSERT_BIN_ARRAYS_EQUALS(content_type, strlen(content_type), header_name_buf.buffer, header_name_buf.len); struct aws_byte_buf header_value_buf = aws_event_stream_header_value_as_string(&header); ASSERT_BIN_ARRAYS_EQUALS( content_type_value, strlen(content_type_value), header_value_buf.buffer, header_value_buf.len); ASSERT_UINT_EQUALS(0x8D9C08B1, aws_event_stream_message_message_crc(&message_test_data.received_msg_cpy)); aws_event_stream_headers_list_cleanup(&headers); aws_event_stream_message_clean_up(&message_test_data.received_msg_cpy); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( test_channel_handler_single_valid_messages_parse, s_fixture_setup, s_test_channel_handler_single_valid_messages_parse, s_fixture_shutdown, &s_test_data) struct multiple_message_test_data { int last_error_code; struct aws_event_stream_message *received_msgs_cpy; size_t msg_count; }; static void s_test_on_multiple_message(struct aws_event_stream_message *message, int error_code, void *user_data) { struct multiple_message_test_data *msg_test_data = user_data; msg_test_data->last_error_code = error_code; if (!error_code) { struct aws_byte_buf message_buf = aws_byte_buf_from_array( aws_event_stream_message_buffer(message), aws_event_stream_message_total_length(message)); struct aws_event_stream_message msg_cpy; AWS_ZERO_STRUCT(msg_cpy); aws_event_stream_message_from_buffer_copy(&msg_cpy, s_test_data.allocator, &message_buf); msg_test_data->received_msgs_cpy[msg_test_data->msg_count++] = msg_cpy; } } /* send various valid messages as a batch to make sure the happy path of message parsing is correct. */ static int s_test_channel_handler_multiple_valid_messages_parse(struct aws_allocator *allocator, void *ctx) { struct test_data *test_data = ctx; uint8_t multi_message[] = { 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x05, 0xc2, 0x48, 0xeb, 0x7d, 0x98, 0xc8, 0xff, 0x00, 0x00, 0x00, 0x1D, 0x00, 0x00, 0x00, 0x00, 0xfd, 0x52, 0x8c, 0x5a, 0x7b, 0x27, 0x66, 0x6f, 0x6f, 0x27, 0x3a, 0x27, 0x62, 0x61, 0x72, 0x27, 0x7d, 0xc3, 0x65, 0x39, 0x36, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x00, 0x00, 0x20, 0x07, 0xFD, 0x83, 0x96, 0x0C, 'c', 'o', 'n', 't', 'e', 'n', 't', '-', 't', 'y', 'p', 'e', 0x07, 0x00, 0x10, 'a', 'p', 'p', 'l', 'i', 'c', 'a', 't', 'i', 'o', 'n', '/', 'j', 's', 'o', 'n', 0x7b, 0x27, 0x66, 0x6f, 0x6f, 0x27, 0x3a, 0x27, 0x62, 0x61, 0x72, 0x27, 0x7d, 0x8D, 0x9C, 0x08, 0xB1, }; /* fuzz the parser's boundary conditions */ for (size_t fragment_size = 1; fragment_size <= sizeof(multi_message); ++fragment_size) { struct multiple_message_test_data message_test_data; AWS_ZERO_STRUCT(message_test_data); struct aws_event_stream_message messages[100]; AWS_ZERO_ARRAY(messages); message_test_data.received_msgs_cpy = messages; test_data->received_fn = s_test_on_multiple_message; test_data->user_data = &message_test_data; size_t processed = 0; while (processed < sizeof(multi_message)) { size_t remaining = sizeof(multi_message) - processed; size_t to_copy = fragment_size < remaining ? fragment_size : remaining; struct aws_byte_cursor multi_message_cursor = aws_byte_cursor_from_array(multi_message + processed, to_copy); processed += to_copy; ASSERT_SUCCESS(testing_channel_push_read_data(&s_test_data.testing_channel, multi_message_cursor)); } ASSERT_UINT_EQUALS(AWS_OP_SUCCESS, message_test_data.last_error_code); ASSERT_UINT_EQUALS(3, message_test_data.msg_count); ASSERT_UINT_EQUALS(0x00000010, aws_event_stream_message_total_length(&message_test_data.received_msgs_cpy[0])); ASSERT_UINT_EQUALS(0x00000000, aws_event_stream_message_headers_len(&message_test_data.received_msgs_cpy[0])); ASSERT_UINT_EQUALS(0x05c248eb, aws_event_stream_message_prelude_crc(&message_test_data.received_msgs_cpy[0])); ASSERT_UINT_EQUALS(0x7d98c8ff, aws_event_stream_message_message_crc(&message_test_data.received_msgs_cpy[0])); aws_event_stream_message_clean_up(&message_test_data.received_msgs_cpy[0]); ASSERT_UINT_EQUALS(0x0000001D, aws_event_stream_message_total_length(&message_test_data.received_msgs_cpy[1])); ASSERT_UINT_EQUALS(0x00000000, aws_event_stream_message_headers_len(&message_test_data.received_msgs_cpy[1])); ASSERT_UINT_EQUALS(0xfd528c5a, aws_event_stream_message_prelude_crc(&message_test_data.received_msgs_cpy[1])); const char *expected_str = "{'foo':'bar'}"; ASSERT_UINT_EQUALS( strlen(expected_str), aws_event_stream_message_payload_len(&message_test_data.received_msgs_cpy[1])); ASSERT_BIN_ARRAYS_EQUALS( expected_str, strlen(expected_str), aws_event_stream_message_payload(&message_test_data.received_msgs_cpy[1]), aws_event_stream_message_payload_len(&message_test_data.received_msgs_cpy[1])); ASSERT_UINT_EQUALS(0xc3653936, aws_event_stream_message_message_crc(&message_test_data.received_msgs_cpy[1])); aws_event_stream_message_clean_up(&message_test_data.received_msgs_cpy[1]); ASSERT_UINT_EQUALS(0x0000003D, aws_event_stream_message_total_length(&message_test_data.received_msgs_cpy[2])); ASSERT_UINT_EQUALS(0x00000020, aws_event_stream_message_headers_len(&message_test_data.received_msgs_cpy[2])); ASSERT_UINT_EQUALS(0x07FD8396, aws_event_stream_message_prelude_crc(&message_test_data.received_msgs_cpy[2])); ASSERT_UINT_EQUALS( strlen(expected_str), aws_event_stream_message_payload_len(&message_test_data.received_msgs_cpy[2])); ASSERT_BIN_ARRAYS_EQUALS( expected_str, strlen(expected_str), aws_event_stream_message_payload(&message_test_data.received_msgs_cpy[2]), aws_event_stream_message_payload_len(&message_test_data.received_msgs_cpy[2])); struct aws_array_list headers; ASSERT_SUCCESS(aws_event_stream_headers_list_init(&headers, allocator)); ASSERT_SUCCESS(aws_event_stream_message_headers(&message_test_data.received_msgs_cpy[2], &headers)); ASSERT_UINT_EQUALS(1, headers.length, ); struct aws_event_stream_header_value_pair header; ASSERT_SUCCESS(aws_array_list_front(&headers, &header)); const char *content_type = "content-type"; const char *content_type_value = "application/json"; struct aws_byte_buf header_name_buf = aws_event_stream_header_name(&header); ASSERT_BIN_ARRAYS_EQUALS(content_type, strlen(content_type), header_name_buf.buffer, header_name_buf.len); struct aws_byte_buf header_value_buf = aws_event_stream_header_value_as_string(&header); ASSERT_BIN_ARRAYS_EQUALS( content_type_value, strlen(content_type_value), header_value_buf.buffer, header_value_buf.len); ASSERT_UINT_EQUALS(0x8D9C08B1, aws_event_stream_message_message_crc(&message_test_data.received_msgs_cpy[2])); aws_event_stream_headers_list_cleanup(&headers); aws_event_stream_message_clean_up(&message_test_data.received_msgs_cpy[2]); } return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( test_channel_handler_multiple_valid_messages_parse, s_fixture_setup, s_test_channel_handler_multiple_valid_messages_parse, s_fixture_shutdown, &s_test_data) /* send various valid messages in serial to make sure the happy path of message parsing is correct. */ static int s_test_channel_handler_corrupted_crc_fails(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct test_data *test_data = ctx; struct single_message_test_data message_test_data; AWS_ZERO_STRUCT(message_test_data); test_data->received_fn = s_test_on_single_message; test_data->user_data = &message_test_data; /* altered the 9th byte to a single bit flip */ uint8_t empty_message[] = { 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x05, 0xc3, 0x48, 0xeb, 0x7d, 0x98, 0xc8, 0xff, }; struct aws_byte_cursor empty_message_cursor = aws_byte_cursor_from_array(empty_message, sizeof(empty_message)); ASSERT_SUCCESS(testing_channel_push_read_data(&s_test_data.testing_channel, empty_message_cursor)); ASSERT_UINT_EQUALS(AWS_ERROR_EVENT_STREAM_PRELUDE_CHECKSUM_FAILURE, message_test_data.last_error_code); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( test_channel_handler_corrupted_crc_fails, s_fixture_setup, s_test_channel_handler_corrupted_crc_fails, s_fixture_shutdown, &s_test_data) /* send various valid messages in serial to make sure the happy path of message parsing is correct. */ static int s_test_channel_handler_msg_too_large_fails(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct test_data *test_data = ctx; struct single_message_test_data message_test_data; AWS_ZERO_STRUCT(message_test_data); test_data->received_fn = s_test_on_single_message; test_data->user_data = &message_test_data; /* message is 1 byte too large */ uint8_t empty_message[] = { 0x01, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x94, 0xE8, 0xF6, 0x47, 0xC8, 0x86, 0xFB, 0xA0, }; struct aws_byte_cursor empty_message_cursor = aws_byte_cursor_from_array(empty_message, sizeof(empty_message)); ASSERT_SUCCESS(testing_channel_push_read_data(&s_test_data.testing_channel, empty_message_cursor)); ASSERT_UINT_EQUALS(AWS_ERROR_EVENT_STREAM_MESSAGE_FIELD_SIZE_EXCEEDED, message_test_data.last_error_code); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( test_channel_handler_msg_too_large_fails, s_fixture_setup, s_test_channel_handler_msg_too_large_fails, s_fixture_shutdown, &s_test_data) static void s_on_message_written(struct aws_event_stream_message *message, int error_code, void *user_data) { struct single_message_test_data *msg_test_data = user_data; msg_test_data->last_error_code = error_code; if (!error_code) { struct aws_byte_buf message_buf = aws_byte_buf_from_array( aws_event_stream_message_buffer(message), aws_event_stream_message_total_length(message)); aws_event_stream_message_from_buffer_copy( &msg_test_data->received_msg_cpy, s_test_data.allocator, &message_buf); } } static int s_test_channel_handler_write_message(struct aws_allocator *allocator, void *ctx) { struct test_data *test_data = ctx; struct single_message_test_data message_test_data; AWS_ZERO_STRUCT(message_test_data); uint8_t empty_message[] = { 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x05, 0xc2, 0x48, 0xeb, 0x7d, 0x98, 0xc8, 0xff, }; struct aws_byte_buf empty_message_buf = aws_byte_buf_from_array(empty_message, sizeof(empty_message)); struct aws_event_stream_message msg_to_send; AWS_ZERO_STRUCT(msg_to_send); ASSERT_SUCCESS(aws_event_stream_message_from_buffer(&msg_to_send, allocator, &empty_message_buf)); ASSERT_SUCCESS(aws_event_stream_channel_handler_write_message( test_data->handler, &msg_to_send, s_on_message_written, &message_test_data)); testing_channel_drain_queued_tasks(&s_test_data.testing_channel); ASSERT_UINT_EQUALS(0x00000010, aws_event_stream_message_total_length(&message_test_data.received_msg_cpy)); ASSERT_UINT_EQUALS(0x00000000, aws_event_stream_message_headers_len(&message_test_data.received_msg_cpy)); ASSERT_UINT_EQUALS(0x05c248eb, aws_event_stream_message_prelude_crc(&message_test_data.received_msg_cpy)); ASSERT_UINT_EQUALS(0x7d98c8ff, aws_event_stream_message_message_crc(&message_test_data.received_msg_cpy)); aws_event_stream_message_clean_up(&message_test_data.received_msg_cpy); aws_event_stream_message_clean_up(&msg_to_send); struct aws_linked_list *list = testing_channel_get_written_message_queue(&test_data->testing_channel); ASSERT_FALSE(aws_linked_list_empty(list)); struct aws_linked_list_node *node = aws_linked_list_front(list); ASSERT_NOT_NULL(node); struct aws_io_message *message = AWS_CONTAINER_OF(node, struct aws_io_message, queueing_handle); struct aws_event_stream_message sent_msg; AWS_ZERO_STRUCT(sent_msg); ASSERT_SUCCESS(aws_event_stream_message_from_buffer(&sent_msg, allocator, &message->message_data)); ASSERT_UINT_EQUALS(0x00000010, aws_event_stream_message_total_length(&sent_msg)); ASSERT_UINT_EQUALS(0x00000000, aws_event_stream_message_headers_len(&sent_msg)); ASSERT_UINT_EQUALS(0x05c248eb, aws_event_stream_message_prelude_crc(&sent_msg)); ASSERT_UINT_EQUALS(0x7d98c8ff, aws_event_stream_message_message_crc(&sent_msg)); aws_event_stream_message_clean_up(&sent_msg); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( test_channel_handler_write_message, s_fixture_setup, s_test_channel_handler_write_message, s_fixture_shutdown, &s_test_data) aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/tests/event_stream_rpc_client_connection_test.c000066400000000000000000001642171456575232400333370ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include static const char *s_test_host_name = "127.0.0.1"; struct client_test_data { struct aws_allocator *allocator; int received_message_flags; struct aws_mutex sync_lock; struct aws_condition_variable sync_cvar; enum aws_event_stream_rpc_message_type received_message_type; struct aws_byte_buf received_payload; struct aws_event_stream_rpc_server_continuation_token *server_token; struct aws_byte_buf last_seen_operation_name; bool client_message_sent; bool client_message_received; bool server_message_sent; bool server_message_received; bool client_token_closed; bool server_token_closed; }; struct test_data { struct aws_allocator *allocator; struct aws_event_loop_group *el_group; struct aws_server_bootstrap *server_bootstrap; struct aws_client_bootstrap *client_bootstrap; struct aws_host_resolver *resolver; struct aws_event_stream_rpc_server_listener *listener; struct aws_event_stream_rpc_server_connection *server_connection; struct aws_event_stream_rpc_client_connection *client_connection; aws_event_stream_rpc_server_connection_protocol_message_fn *on_server_message_received; aws_event_stream_rpc_server_on_incoming_stream_fn *on_new_server_stream; aws_event_stream_rpc_server_stream_continuation_fn *on_new_server_continuation; aws_event_stream_rpc_server_stream_continuation_closed_fn *on_server_continuation_closed; aws_event_stream_rpc_client_connection_protocol_message_fn *on_client_message_received; struct client_test_data client_test_data; void *user_data; void *server_continuation_user_data; struct aws_mutex shutdown_lock; struct aws_condition_variable shutdown_cvar; struct aws_mutex setup_lock; struct aws_condition_variable setup_cvar; bool client_setup_completed; bool server_setup_completed; bool client_connection_shutdown; bool server_connection_shutdown_completed; bool event_loop_shutdown_completed; bool listener_shutdown_completed; bool resolver_shutdown_completed; }; static struct test_data s_test_data; static void s_fixture_on_server_protocol_message( struct aws_event_stream_rpc_server_connection *connection, const struct aws_event_stream_rpc_message_args *message_args, void *user_data) { struct test_data *test_data = user_data; test_data->on_server_message_received(connection, message_args, test_data->user_data); } static void s_on_stream_server_continuation_shim( struct aws_event_stream_rpc_server_continuation_token *token, const struct aws_event_stream_rpc_message_args *message_args, void *user_data) { struct test_data *test_data = user_data; test_data->on_new_server_continuation(token, message_args, test_data->server_continuation_user_data); } static void s_stream_server_continuation_closed_shim( struct aws_event_stream_rpc_server_continuation_token *token, void *user_data) { struct test_data *test_data = user_data; test_data->on_server_continuation_closed(token, test_data->server_continuation_user_data); } static int s_on_server_incoming_stream_shim( struct aws_event_stream_rpc_server_connection *connection, struct aws_event_stream_rpc_server_continuation_token *token, struct aws_byte_cursor operation_name, struct aws_event_stream_rpc_server_stream_continuation_options *continuation_options, void *user_data) { struct test_data *test_data = user_data; continuation_options->on_continuation = s_on_stream_server_continuation_shim; continuation_options->on_continuation_closed = s_stream_server_continuation_closed_shim; continuation_options->user_data = test_data; if (test_data->on_new_server_stream) { test_data->on_new_server_stream( connection, token, operation_name, continuation_options, test_data->server_continuation_user_data); } return AWS_OP_SUCCESS; } static int s_fixture_on_new_server_connection( struct aws_event_stream_rpc_server_connection *connection, int error_code, struct aws_event_stream_rpc_connection_options *connection_options, void *user_data) { (void)error_code; struct test_data *test_data = user_data; test_data->server_connection = connection; aws_mutex_lock(&test_data->setup_lock); test_data->server_setup_completed = true; aws_mutex_unlock(&test_data->setup_lock); aws_event_stream_rpc_server_connection_acquire(connection); connection_options->on_connection_protocol_message = s_fixture_on_server_protocol_message; connection_options->on_incoming_stream = s_on_server_incoming_stream_shim; connection_options->user_data = user_data; aws_condition_variable_notify_one(&test_data->setup_cvar); return AWS_OP_SUCCESS; } static void s_fixture_on_server_connection_shutdown( struct aws_event_stream_rpc_server_connection *connection, int error_code, void *user_data) { (void)connection; (void)error_code; struct test_data *test_data = user_data; aws_mutex_lock(&test_data->shutdown_lock); test_data->server_connection_shutdown_completed = true; aws_mutex_unlock(&test_data->shutdown_lock); aws_condition_variable_notify_one(&test_data->shutdown_cvar); } static bool s_server_connection_shutdown_completed(void *args) { struct test_data *test_data = args; return test_data->server_connection_shutdown_completed; } static void s_on_listener_destroy(struct aws_event_stream_rpc_server_listener *server, void *user_data) { (void)server; struct test_data *test_data = user_data; aws_mutex_lock(&test_data->shutdown_lock); test_data->listener_shutdown_completed = true; aws_mutex_unlock(&test_data->shutdown_lock); aws_condition_variable_notify_one(&test_data->shutdown_cvar); } static bool s_listener_shutdown_pred(void *arg) { struct test_data *test_data = arg; return test_data->listener_shutdown_completed; } static void s_event_loop_shutdown_callback(void *user_data) { struct test_data *test_data = user_data; aws_mutex_lock(&test_data->shutdown_lock); test_data->event_loop_shutdown_completed = true; aws_mutex_unlock(&test_data->shutdown_lock); aws_condition_variable_notify_one(&test_data->shutdown_cvar); } static bool s_event_loop_shutdown_pred(void *arg) { struct test_data *test_data = arg; return test_data->event_loop_shutdown_completed; } static void s_client_on_connection_setup( struct aws_event_stream_rpc_client_connection *connection, int error_code, void *user_data) { (void)error_code; struct test_data *test_data = user_data; aws_mutex_lock(&test_data->setup_lock); test_data->client_connection = connection; if (connection) { aws_event_stream_rpc_client_connection_acquire(connection); } test_data->client_setup_completed = true; aws_mutex_unlock(&test_data->setup_lock); aws_condition_variable_notify_one(&test_data->setup_cvar); } static void s_client_on_connection_shutdown( struct aws_event_stream_rpc_client_connection *connection, int error_code, void *user_data) { (void)connection; (void)error_code; struct test_data *test_data = user_data; aws_mutex_lock(&test_data->shutdown_lock); test_data->client_connection_shutdown = true; aws_mutex_unlock(&test_data->shutdown_lock); aws_condition_variable_notify_one(&test_data->shutdown_cvar); } static bool s_client_connection_shutdown_completed(void *args) { struct test_data *test_data = args; return test_data->client_connection_shutdown; } static void s_client_connection_protocol_message( struct aws_event_stream_rpc_client_connection *connection, const struct aws_event_stream_rpc_message_args *message_args, void *user_data) { struct test_data *test_data = user_data; test_data->on_client_message_received(connection, message_args, test_data->user_data); } static bool s_setup_completed_pred(void *arg) { struct test_data *test_data = arg; return test_data->client_setup_completed && test_data->server_setup_completed; } static void s_resolver_shutdown_completion_callback(void *arg) { struct test_data *test_data = arg; aws_mutex_lock(&test_data->shutdown_lock); test_data->resolver_shutdown_completed = true; aws_mutex_unlock(&test_data->shutdown_lock); aws_condition_variable_notify_one(&test_data->shutdown_cvar); } static bool s_resolver_shutdown_completed_pred(void *arg) { struct test_data *test_data = arg; return test_data->resolver_shutdown_completed; } static int s_fixture_setup(struct aws_allocator *allocator, void *ctx) { aws_event_stream_library_init(allocator); struct test_data *test_data = ctx; AWS_ZERO_STRUCT(*test_data); aws_mutex_init(&test_data->setup_lock); aws_mutex_init(&test_data->shutdown_lock); aws_condition_variable_init(&test_data->setup_cvar); aws_condition_variable_init(&test_data->shutdown_cvar); struct aws_shutdown_callback_options el_shutdown_options = { .shutdown_callback_fn = s_event_loop_shutdown_callback, .shutdown_callback_user_data = test_data, }; test_data->el_group = aws_event_loop_group_new_default(allocator, 1, &el_shutdown_options); ASSERT_NOT_NULL(test_data->el_group); test_data->server_bootstrap = aws_server_bootstrap_new(allocator, test_data->el_group); ASSERT_NOT_NULL(test_data->server_bootstrap); struct aws_shutdown_callback_options host_resolver_shutdown_options = { .shutdown_callback_fn = s_resolver_shutdown_completion_callback, .shutdown_callback_user_data = test_data, }; struct aws_host_resolver_default_options resolver_options = { .el_group = test_data->el_group, .max_entries = 1, .shutdown_options = &host_resolver_shutdown_options, }; test_data->resolver = aws_host_resolver_new_default(allocator, &resolver_options); ASSERT_NOT_NULL(test_data->resolver); struct aws_client_bootstrap_options client_bootstrap_options = { .user_data = test_data, .event_loop_group = test_data->el_group, .host_resolver = test_data->resolver, }; test_data->client_bootstrap = aws_client_bootstrap_new(allocator, &client_bootstrap_options); ASSERT_NOT_NULL(test_data->client_bootstrap); ASSERT_SUCCESS(aws_mutex_init(&test_data->shutdown_lock)); ASSERT_SUCCESS(aws_condition_variable_init(&test_data->shutdown_cvar)); ASSERT_SUCCESS(aws_mutex_init(&test_data->client_test_data.sync_lock)); ASSERT_SUCCESS(aws_condition_variable_init(&test_data->client_test_data.sync_cvar)); test_data->client_test_data.allocator = allocator; struct aws_socket_options socket_options = { .connect_timeout_ms = 3000, .domain = AWS_SOCKET_IPV4, .type = AWS_SOCKET_STREAM, }; /* Find a random open port */ uint16_t test_port = 0; while (!test_data->listener) { aws_device_random_u16(&test_port); test_port |= 0x8000; /* Use high numbers */ struct aws_event_stream_rpc_server_listener_options listener_options = { .socket_options = &socket_options, .host_name = s_test_host_name, .port = test_port, .bootstrap = test_data->server_bootstrap, .user_data = test_data, .on_new_connection = s_fixture_on_new_server_connection, .on_connection_shutdown = s_fixture_on_server_connection_shutdown, .on_destroy_callback = s_on_listener_destroy, }; test_data->listener = aws_event_stream_rpc_server_new_listener(allocator, &listener_options); if (!test_data->listener) { int error_code = aws_last_error(); ASSERT_TRUE(error_code == AWS_IO_SOCKET_ADDRESS_IN_USE || error_code == AWS_ERROR_NO_PERMISSION); } } test_data->allocator = allocator; struct aws_event_stream_rpc_client_connection_options connection_options = { .socket_options = &socket_options, .user_data = test_data, .bootstrap = test_data->client_bootstrap, .host_name = s_test_host_name, .port = test_port, .on_connection_setup = s_client_on_connection_setup, .on_connection_shutdown = s_client_on_connection_shutdown, .on_connection_protocol_message = s_client_connection_protocol_message, }; ASSERT_SUCCESS(aws_event_stream_rpc_client_connection_connect(allocator, &connection_options)); aws_mutex_lock(&test_data->setup_lock); aws_condition_variable_wait_pred(&test_data->setup_cvar, &test_data->setup_lock, s_setup_completed_pred, test_data); aws_mutex_unlock(&test_data->setup_lock); ASSERT_NOT_NULL(&test_data->client_connection); return AWS_OP_SUCCESS; } static int s_fixture_shutdown(struct aws_allocator *allocator, int setup_result, void *ctx) { (void)allocator; struct test_data *test_data = ctx; if (!setup_result) { aws_mutex_lock(&test_data->shutdown_lock); aws_condition_variable_wait_pred( &test_data->shutdown_cvar, &test_data->shutdown_lock, s_server_connection_shutdown_completed, test_data); aws_condition_variable_wait_pred( &test_data->shutdown_cvar, &test_data->shutdown_lock, s_client_connection_shutdown_completed, test_data); aws_event_stream_rpc_client_connection_release(test_data->client_connection); aws_event_stream_rpc_server_connection_release(test_data->server_connection); aws_event_stream_rpc_server_listener_release(test_data->listener); aws_condition_variable_wait_pred( &test_data->shutdown_cvar, &test_data->shutdown_lock, s_listener_shutdown_pred, test_data); aws_server_bootstrap_release(test_data->server_bootstrap); aws_client_bootstrap_release(test_data->client_bootstrap); aws_host_resolver_release(test_data->resolver); aws_condition_variable_wait_pred( &test_data->shutdown_cvar, &test_data->shutdown_lock, s_resolver_shutdown_completed_pred, test_data); aws_event_loop_group_release(test_data->el_group); aws_condition_variable_wait_pred( &test_data->shutdown_cvar, &test_data->shutdown_lock, s_event_loop_shutdown_pred, test_data); aws_mutex_unlock(&test_data->shutdown_lock); aws_thread_join_all_managed(); aws_mutex_clean_up(&test_data->shutdown_lock); aws_condition_variable_clean_up(&test_data->shutdown_cvar); aws_mutex_clean_up(&test_data->setup_lock); aws_condition_variable_clean_up(&test_data->setup_cvar); aws_mutex_clean_up(&test_data->client_test_data.sync_lock); aws_condition_variable_clean_up(&test_data->client_test_data.sync_cvar); } aws_event_stream_library_clean_up(); return AWS_OP_SUCCESS; } static int s_test_event_stream_rpc_client_connection_setup_and_teardown(struct aws_allocator *allocator, void *ctx) { struct test_data *test_data = ctx; (void)allocator; /* just let setup and shutdown run to make sure the basic init/cleanup flow references are properly counted without * having to worry about continuation reference counts. */ aws_event_stream_rpc_client_connection_close(test_data->client_connection, AWS_ERROR_SUCCESS); aws_event_stream_rpc_server_connection_close(test_data->server_connection, AWS_ERROR_SUCCESS); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( test_event_stream_rpc_client_connection_setup_and_teardown, s_fixture_setup, s_test_event_stream_rpc_client_connection_setup_and_teardown, s_fixture_shutdown, &s_test_data) static void s_rpc_client_message_flush(int error_code, void *user_data) { (void)error_code; struct client_test_data *client_test_data = user_data; aws_mutex_lock(&client_test_data->sync_lock); client_test_data->client_message_sent = true; aws_condition_variable_notify_one(&client_test_data->sync_cvar); /* make these pessimistic to prevent a cleanup race. */ aws_mutex_unlock(&client_test_data->sync_lock); } static void s_rpc_server_message_flush(int error_code, void *user_data) { (void)error_code; struct client_test_data *client_test_data = user_data; aws_mutex_lock(&client_test_data->sync_lock); client_test_data->server_message_sent = true; aws_condition_variable_notify_one(&client_test_data->sync_cvar); /* make these pessimistic to prevent a cleanup race. */ aws_mutex_unlock(&client_test_data->sync_lock); } static bool s_rpc_client_message_transmission_completed_pred(void *arg) { struct client_test_data *client_test_data = arg; return client_test_data->client_message_sent && client_test_data->server_message_received; } static bool s_rpc_server_message_transmission_completed_pred(void *arg) { struct client_test_data *client_test_data = arg; return client_test_data->server_message_sent && client_test_data->client_message_received; } static void s_rpc_server_connection_protocol_message( struct aws_event_stream_rpc_server_connection *connection, const struct aws_event_stream_rpc_message_args *message_args, void *user_data) { (void)connection; struct client_test_data *client_test_data = user_data; aws_mutex_lock(&client_test_data->sync_lock); client_test_data->server_message_received = true; client_test_data->received_message_type = message_args->message_type; aws_byte_buf_init_copy(&client_test_data->received_payload, client_test_data->allocator, message_args->payload); aws_mutex_unlock(&client_test_data->sync_lock); aws_condition_variable_notify_one(&client_test_data->sync_cvar); } static void s_rpc_client_connection_protocol_message( struct aws_event_stream_rpc_client_connection *connection, const struct aws_event_stream_rpc_message_args *message_args, void *user_data) { (void)connection; struct client_test_data *client_test_data = user_data; aws_mutex_lock(&client_test_data->sync_lock); client_test_data->client_message_received = true; client_test_data->received_message_type = message_args->message_type; client_test_data->received_message_flags = message_args->message_flags; aws_byte_buf_init_copy(&client_test_data->received_payload, client_test_data->allocator, message_args->payload); aws_mutex_unlock(&client_test_data->sync_lock); aws_condition_variable_notify_one(&client_test_data->sync_cvar); } static int s_test_event_stream_rpc_client_connection_connect(struct aws_allocator *allocator, void *ctx) { struct test_data *test_data = ctx; test_data->on_server_message_received = s_rpc_server_connection_protocol_message; test_data->on_client_message_received = s_rpc_client_connection_protocol_message; struct client_test_data client_test_data = { .allocator = allocator, .sync_cvar = AWS_CONDITION_VARIABLE_INIT, .sync_lock = AWS_MUTEX_INIT, }; test_data->user_data = &client_test_data; struct aws_byte_buf connect_payload = aws_byte_buf_from_c_str("{ \"message\": \" connect message \" }"); struct aws_event_stream_rpc_message_args connect_args = { .headers_count = 0, .headers = NULL, .message_type = AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_CONNECT, .payload = &connect_payload, }; ASSERT_SUCCESS(aws_event_stream_rpc_client_connection_send_protocol_message( test_data->client_connection, &connect_args, s_rpc_client_message_flush, &client_test_data)); aws_mutex_lock(&client_test_data.sync_lock); aws_condition_variable_wait_pred( &client_test_data.sync_cvar, &client_test_data.sync_lock, s_rpc_client_message_transmission_completed_pred, &client_test_data); ASSERT_INT_EQUALS(AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_CONNECT, client_test_data.received_message_type); ASSERT_BIN_ARRAYS_EQUALS( connect_payload.buffer, connect_payload.len, client_test_data.received_payload.buffer, client_test_data.received_payload.len); aws_byte_buf_clean_up(&client_test_data.received_payload); client_test_data.received_message_type = 0; client_test_data.client_message_received = false; client_test_data.client_message_sent = false; client_test_data.server_message_received = false; client_test_data.server_message_sent = false; connect_args.message_type = AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_CONNECT_ACK; connect_args.message_flags = AWS_EVENT_STREAM_RPC_MESSAGE_FLAG_CONNECTION_ACCEPTED; ASSERT_SUCCESS(aws_event_stream_rpc_server_connection_send_protocol_message( test_data->server_connection, &connect_args, s_rpc_server_message_flush, &client_test_data)); aws_condition_variable_wait_pred( &client_test_data.sync_cvar, &client_test_data.sync_lock, s_rpc_server_message_transmission_completed_pred, &client_test_data); ASSERT_INT_EQUALS(AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_CONNECT_ACK, client_test_data.received_message_type); ASSERT_BIN_ARRAYS_EQUALS( connect_payload.buffer, connect_payload.len, client_test_data.received_payload.buffer, client_test_data.received_payload.len); aws_byte_buf_clean_up(&client_test_data.received_payload); aws_event_stream_rpc_client_connection_close(test_data->client_connection, AWS_ERROR_SUCCESS); aws_event_stream_rpc_server_connection_close(test_data->server_connection, AWS_ERROR_SUCCESS); aws_mutex_unlock(&client_test_data.sync_lock); aws_mutex_clean_up(&client_test_data.sync_lock); aws_condition_variable_clean_up(&client_test_data.sync_cvar); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( test_event_stream_rpc_client_connection_connect, s_fixture_setup, s_test_event_stream_rpc_client_connection_connect, s_fixture_shutdown, &s_test_data) static int s_test_event_stream_rpc_client_connection_message_before_connect( struct aws_allocator *allocator, void *ctx) { (void)allocator; struct test_data *test_data = ctx; struct aws_byte_buf ping_payload = aws_byte_buf_from_c_str("{ \"message\": \" ping message \" }"); struct aws_event_stream_rpc_message_args connect_args = { .headers_count = 0, .headers = NULL, .message_type = AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_PING, .payload = &ping_payload, }; ASSERT_ERROR( AWS_ERROR_EVENT_STREAM_RPC_PROTOCOL_ERROR, aws_event_stream_rpc_client_connection_send_protocol_message( test_data->client_connection, &connect_args, s_rpc_client_message_flush, NULL)); aws_event_stream_rpc_client_connection_close(test_data->client_connection, AWS_ERROR_SUCCESS); aws_event_stream_rpc_server_connection_close(test_data->server_connection, AWS_ERROR_SUCCESS); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( test_event_stream_rpc_client_connection_message_before_connect, s_fixture_setup, s_test_event_stream_rpc_client_connection_message_before_connect, s_fixture_shutdown, &s_test_data) static int s_test_event_stream_rpc_client_connection_protocol_message(struct aws_allocator *allocator, void *ctx) { struct test_data *test_data = ctx; test_data->on_server_message_received = s_rpc_server_connection_protocol_message; test_data->on_client_message_received = s_rpc_client_connection_protocol_message; struct client_test_data client_test_data = { .allocator = allocator, .sync_cvar = AWS_CONDITION_VARIABLE_INIT, .sync_lock = AWS_MUTEX_INIT, }; test_data->user_data = &client_test_data; struct aws_byte_buf connect_payload = aws_byte_buf_from_c_str("{ \"message\": \" connect message \" }"); struct aws_event_stream_rpc_message_args connect_args = { .headers_count = 0, .headers = NULL, .message_type = AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_CONNECT, .payload = &connect_payload, }; ASSERT_SUCCESS(aws_event_stream_rpc_client_connection_send_protocol_message( test_data->client_connection, &connect_args, s_rpc_client_message_flush, &client_test_data)); aws_mutex_lock(&client_test_data.sync_lock); aws_condition_variable_wait_pred( &client_test_data.sync_cvar, &client_test_data.sync_lock, s_rpc_client_message_transmission_completed_pred, &client_test_data); ASSERT_INT_EQUALS(AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_CONNECT, client_test_data.received_message_type); ASSERT_BIN_ARRAYS_EQUALS( connect_payload.buffer, connect_payload.len, client_test_data.received_payload.buffer, client_test_data.received_payload.len); aws_byte_buf_clean_up(&client_test_data.received_payload); client_test_data.received_message_type = 0; client_test_data.client_message_received = false; client_test_data.client_message_sent = false; client_test_data.server_message_received = false; client_test_data.server_message_sent = false; connect_args.message_type = AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_CONNECT_ACK; connect_args.message_flags = AWS_EVENT_STREAM_RPC_MESSAGE_FLAG_CONNECTION_ACCEPTED; ASSERT_SUCCESS(aws_event_stream_rpc_server_connection_send_protocol_message( test_data->server_connection, &connect_args, s_rpc_server_message_flush, &client_test_data)); aws_condition_variable_wait_pred( &client_test_data.sync_cvar, &client_test_data.sync_lock, s_rpc_server_message_transmission_completed_pred, &client_test_data); ASSERT_INT_EQUALS(AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_CONNECT_ACK, client_test_data.received_message_type); ASSERT_BIN_ARRAYS_EQUALS( connect_payload.buffer, connect_payload.len, client_test_data.received_payload.buffer, client_test_data.received_payload.len); aws_byte_buf_clean_up(&client_test_data.received_payload); client_test_data.received_message_type = 0; client_test_data.client_message_received = false; client_test_data.client_message_sent = false; client_test_data.server_message_received = false; client_test_data.server_message_sent = false; struct aws_byte_buf ping_payload = aws_byte_buf_from_c_str("{ \"message\": \"hello device that will further isolate humans from each other " "into an ever increasing digital dystopia.\" }"); struct aws_event_stream_rpc_message_args ping_args = { .headers_count = 0, .headers = NULL, .message_type = AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_PING, .payload = &ping_payload, }; ASSERT_SUCCESS(aws_event_stream_rpc_client_connection_send_protocol_message( test_data->client_connection, &ping_args, s_rpc_client_message_flush, &client_test_data)); aws_condition_variable_wait_pred( &client_test_data.sync_cvar, &client_test_data.sync_lock, s_rpc_client_message_transmission_completed_pred, &client_test_data); ASSERT_INT_EQUALS(AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_PING, client_test_data.received_message_type); ASSERT_BIN_ARRAYS_EQUALS( ping_payload.buffer, ping_payload.len, client_test_data.received_payload.buffer, client_test_data.received_payload.len); aws_byte_buf_clean_up(&client_test_data.received_payload); ping_args.message_type = AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_PING_RESPONSE; client_test_data.received_message_type = 0; client_test_data.client_message_received = false; client_test_data.client_message_sent = false; client_test_data.server_message_received = false; client_test_data.server_message_sent = false; ASSERT_SUCCESS(aws_event_stream_rpc_server_connection_send_protocol_message( test_data->server_connection, &ping_args, s_rpc_server_message_flush, &client_test_data)); aws_condition_variable_wait_pred( &client_test_data.sync_cvar, &client_test_data.sync_lock, s_rpc_server_message_transmission_completed_pred, &client_test_data); ASSERT_INT_EQUALS(AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_PING_RESPONSE, client_test_data.received_message_type); ASSERT_BIN_ARRAYS_EQUALS( ping_payload.buffer, ping_payload.len, client_test_data.received_payload.buffer, client_test_data.received_payload.len); aws_byte_buf_clean_up(&client_test_data.received_payload); aws_event_stream_rpc_client_connection_close(test_data->client_connection, AWS_ERROR_SUCCESS); aws_event_stream_rpc_server_connection_close(test_data->server_connection, AWS_ERROR_SUCCESS); aws_mutex_unlock(&client_test_data.sync_lock); aws_mutex_clean_up(&client_test_data.sync_lock); aws_condition_variable_clean_up(&client_test_data.sync_cvar); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( test_event_stream_rpc_client_connection_protocol_message, s_fixture_setup, s_test_event_stream_rpc_client_connection_protocol_message, s_fixture_shutdown, &s_test_data) static void s_rpc_client_stream_continuation( struct aws_event_stream_rpc_client_continuation_token *token, const struct aws_event_stream_rpc_message_args *message_args, void *user_data) { (void)token; struct client_test_data *client_test_data = user_data; aws_mutex_lock(&client_test_data->sync_lock); client_test_data->client_message_received = true; client_test_data->received_message_type = message_args->message_type; aws_byte_buf_init_copy(&client_test_data->received_payload, client_test_data->allocator, message_args->payload); aws_mutex_unlock(&client_test_data->sync_lock); aws_condition_variable_notify_one(&client_test_data->sync_cvar); } static void s_rpc_client_stream_continuation_closed( struct aws_event_stream_rpc_client_continuation_token *token, void *user_data) { (void)token; struct client_test_data *client_test_data = user_data; aws_mutex_lock(&client_test_data->sync_lock); client_test_data->client_token_closed = true; aws_mutex_unlock(&client_test_data->sync_lock); aws_condition_variable_notify_one(&client_test_data->sync_cvar); } static bool s_rpc_client_continuation_token_closed_pred(void *arg) { struct client_test_data *client_test_data = arg; return client_test_data->client_token_closed && client_test_data->server_token_closed; } static int s_rpc_server_on_incoming_stream( struct aws_event_stream_rpc_server_connection *connection, struct aws_event_stream_rpc_server_continuation_token *token, struct aws_byte_cursor operation_name, struct aws_event_stream_rpc_server_stream_continuation_options *continuation_options, void *user_data) { (void)connection; (void)continuation_options; struct client_test_data *client_test_data = user_data; client_test_data->server_token = token; aws_byte_buf_init_copy_from_cursor( &client_test_data->last_seen_operation_name, client_test_data->allocator, operation_name); return AWS_OP_SUCCESS; } static void s_rpc_server_stream_continuation( struct aws_event_stream_rpc_server_continuation_token *token, const struct aws_event_stream_rpc_message_args *message_args, void *user_data) { (void)token; struct client_test_data *client_test_data = user_data; aws_mutex_lock(&client_test_data->sync_lock); client_test_data->server_message_received = true; client_test_data->received_message_type = message_args->message_type; aws_byte_buf_init_copy(&client_test_data->received_payload, client_test_data->allocator, message_args->payload); aws_mutex_unlock(&client_test_data->sync_lock); aws_condition_variable_notify_one(&client_test_data->sync_cvar); } static void s_rpc_server_stream_continuation_closed( struct aws_event_stream_rpc_server_continuation_token *token, void *user_data) { (void)token; struct client_test_data *client_test_data = user_data; aws_mutex_lock(&client_test_data->sync_lock); client_test_data->server_token_closed = true; aws_mutex_unlock(&client_test_data->sync_lock); aws_condition_variable_notify_one(&client_test_data->sync_cvar); } static int s_test_event_stream_rpc_client_connection_continuation_flow(struct aws_allocator *allocator, void *ctx) { struct test_data *test_data = ctx; test_data->on_server_message_received = s_rpc_server_connection_protocol_message; test_data->on_client_message_received = s_rpc_client_connection_protocol_message; test_data->on_new_server_stream = s_rpc_server_on_incoming_stream; test_data->on_new_server_continuation = s_rpc_server_stream_continuation; test_data->on_server_continuation_closed = s_rpc_server_stream_continuation_closed; struct client_test_data client_test_data = { .allocator = allocator, .sync_cvar = AWS_CONDITION_VARIABLE_INIT, .sync_lock = AWS_MUTEX_INIT, }; test_data->user_data = &client_test_data; test_data->server_continuation_user_data = &client_test_data; struct aws_byte_buf connect_payload = aws_byte_buf_from_c_str("{ \"message\": \" connect message \" }"); struct aws_event_stream_rpc_message_args connect_args = { .headers_count = 0, .headers = NULL, .message_type = AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_CONNECT, .payload = &connect_payload, }; ASSERT_SUCCESS(aws_event_stream_rpc_client_connection_send_protocol_message( test_data->client_connection, &connect_args, s_rpc_client_message_flush, &client_test_data)); aws_mutex_lock(&client_test_data.sync_lock); aws_condition_variable_wait_pred( &client_test_data.sync_cvar, &client_test_data.sync_lock, s_rpc_client_message_transmission_completed_pred, &client_test_data); ASSERT_INT_EQUALS(AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_CONNECT, client_test_data.received_message_type); ASSERT_BIN_ARRAYS_EQUALS( connect_payload.buffer, connect_payload.len, client_test_data.received_payload.buffer, client_test_data.received_payload.len); aws_byte_buf_clean_up(&client_test_data.received_payload); client_test_data.received_message_type = 0; client_test_data.client_message_received = false; client_test_data.client_message_sent = false; client_test_data.server_message_received = false; client_test_data.server_message_sent = false; connect_args.message_type = AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_CONNECT_ACK; connect_args.message_flags = AWS_EVENT_STREAM_RPC_MESSAGE_FLAG_CONNECTION_ACCEPTED; ASSERT_SUCCESS(aws_event_stream_rpc_server_connection_send_protocol_message( test_data->server_connection, &connect_args, s_rpc_server_message_flush, &client_test_data)); aws_condition_variable_wait_pred( &client_test_data.sync_cvar, &client_test_data.sync_lock, s_rpc_server_message_transmission_completed_pred, &client_test_data); ASSERT_INT_EQUALS(AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_CONNECT_ACK, client_test_data.received_message_type); ASSERT_BIN_ARRAYS_EQUALS( connect_payload.buffer, connect_payload.len, client_test_data.received_payload.buffer, client_test_data.received_payload.len); aws_byte_buf_clean_up(&client_test_data.received_payload); client_test_data.received_message_type = 0; client_test_data.client_message_received = false; client_test_data.client_message_sent = false; client_test_data.server_message_received = false; client_test_data.server_message_sent = false; struct aws_event_stream_rpc_client_stream_continuation_options continuation_options = { .user_data = &client_test_data, .on_continuation = s_rpc_client_stream_continuation, .on_continuation_closed = s_rpc_client_stream_continuation_closed, }; struct aws_event_stream_rpc_client_continuation_token *client_token = aws_event_stream_rpc_client_connection_new_stream(test_data->client_connection, &continuation_options); ASSERT_NOT_NULL(client_token); struct aws_byte_cursor operation_name = aws_byte_cursor_from_c_str("test_operation"); struct aws_byte_buf operation_payload = aws_byte_buf_from_c_str("{ \"message\": \" operation payload \" }"); struct aws_event_stream_rpc_message_args operation_args = { .headers_count = 0, .headers = NULL, .message_type = AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_APPLICATION_MESSAGE, .payload = &operation_payload, }; ASSERT_SUCCESS(aws_event_stream_rpc_client_continuation_activate( client_token, operation_name, &operation_args, s_rpc_client_message_flush, &client_test_data)); aws_condition_variable_wait_pred( &client_test_data.sync_cvar, &client_test_data.sync_lock, s_rpc_client_message_transmission_completed_pred, &client_test_data); ASSERT_INT_EQUALS(AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_APPLICATION_MESSAGE, client_test_data.received_message_type); ASSERT_BIN_ARRAYS_EQUALS( operation_payload.buffer, operation_payload.len, client_test_data.received_payload.buffer, client_test_data.received_payload.len); aws_byte_buf_clean_up(&client_test_data.received_payload); ASSERT_BIN_ARRAYS_EQUALS( operation_name.ptr, operation_name.len, client_test_data.last_seen_operation_name.buffer, client_test_data.last_seen_operation_name.len); aws_byte_buf_clean_up(&client_test_data.last_seen_operation_name); client_test_data.received_message_type = 0; client_test_data.client_message_received = false; client_test_data.client_message_sent = false; client_test_data.server_message_received = false; client_test_data.server_message_sent = false; operation_args.message_flags = AWS_EVENT_STREAM_RPC_MESSAGE_FLAG_TERMINATE_STREAM; operation_args.message_type = AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_APPLICATION_ERROR; ASSERT_SUCCESS(aws_event_stream_rpc_server_continuation_send_message( client_test_data.server_token, &operation_args, s_rpc_server_message_flush, &client_test_data)); aws_condition_variable_wait_pred( &client_test_data.sync_cvar, &client_test_data.sync_lock, s_rpc_server_message_transmission_completed_pred, &client_test_data); aws_condition_variable_wait_pred( &client_test_data.sync_cvar, &client_test_data.sync_lock, s_rpc_client_continuation_token_closed_pred, &client_test_data); ASSERT_BIN_ARRAYS_EQUALS( operation_payload.buffer, operation_payload.len, client_test_data.received_payload.buffer, client_test_data.received_payload.len); ASSERT_INT_EQUALS(AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_APPLICATION_ERROR, client_test_data.received_message_type); aws_byte_buf_clean_up(&client_test_data.received_payload); aws_event_stream_rpc_client_continuation_release(client_token); aws_event_stream_rpc_client_connection_close(test_data->client_connection, AWS_ERROR_SUCCESS); aws_event_stream_rpc_server_connection_close(test_data->server_connection, AWS_ERROR_SUCCESS); aws_mutex_unlock(&client_test_data.sync_lock); aws_mutex_clean_up(&client_test_data.sync_lock); aws_condition_variable_clean_up(&client_test_data.sync_cvar); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( test_event_stream_rpc_client_connection_continuation_flow, s_fixture_setup, s_test_event_stream_rpc_client_connection_continuation_flow, s_fixture_shutdown, &s_test_data) static int s_test_event_stream_rpc_client_connection_unactivated_continuation_fails( struct aws_allocator *allocator, void *ctx) { struct test_data *test_data = ctx; test_data->on_server_message_received = s_rpc_server_connection_protocol_message; test_data->on_client_message_received = s_rpc_client_connection_protocol_message; test_data->on_new_server_stream = s_rpc_server_on_incoming_stream; test_data->on_new_server_continuation = s_rpc_server_stream_continuation; test_data->on_server_continuation_closed = s_rpc_server_stream_continuation_closed; struct client_test_data client_test_data = { .allocator = allocator, .sync_cvar = AWS_CONDITION_VARIABLE_INIT, .sync_lock = AWS_MUTEX_INIT, }; test_data->user_data = &client_test_data; test_data->server_continuation_user_data = &client_test_data; struct aws_byte_buf connect_payload = aws_byte_buf_from_c_str("{ \"message\": \" connect message \" }"); struct aws_event_stream_rpc_message_args connect_args = { .headers_count = 0, .headers = NULL, .message_type = AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_CONNECT, .payload = &connect_payload, }; ASSERT_SUCCESS(aws_event_stream_rpc_client_connection_send_protocol_message( test_data->client_connection, &connect_args, s_rpc_client_message_flush, &client_test_data)); aws_mutex_lock(&client_test_data.sync_lock); aws_condition_variable_wait_pred( &client_test_data.sync_cvar, &client_test_data.sync_lock, s_rpc_client_message_transmission_completed_pred, &client_test_data); ASSERT_INT_EQUALS(AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_CONNECT, client_test_data.received_message_type); ASSERT_BIN_ARRAYS_EQUALS( connect_payload.buffer, connect_payload.len, client_test_data.received_payload.buffer, client_test_data.received_payload.len); aws_byte_buf_clean_up(&client_test_data.received_payload); client_test_data.received_message_type = 0; client_test_data.client_message_received = false; client_test_data.client_message_sent = false; client_test_data.server_message_received = false; client_test_data.server_message_sent = false; connect_args.message_type = AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_CONNECT_ACK; connect_args.message_flags = AWS_EVENT_STREAM_RPC_MESSAGE_FLAG_CONNECTION_ACCEPTED; ASSERT_SUCCESS(aws_event_stream_rpc_server_connection_send_protocol_message( test_data->server_connection, &connect_args, s_rpc_server_message_flush, &client_test_data)); aws_condition_variable_wait_pred( &client_test_data.sync_cvar, &client_test_data.sync_lock, s_rpc_server_message_transmission_completed_pred, &client_test_data); ASSERT_INT_EQUALS(AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_CONNECT_ACK, client_test_data.received_message_type); ASSERT_BIN_ARRAYS_EQUALS( connect_payload.buffer, connect_payload.len, client_test_data.received_payload.buffer, client_test_data.received_payload.len); aws_byte_buf_clean_up(&client_test_data.received_payload); client_test_data.received_message_type = 0; client_test_data.client_message_received = false; client_test_data.client_message_sent = false; client_test_data.server_message_received = false; client_test_data.server_message_sent = false; struct aws_event_stream_rpc_client_stream_continuation_options continuation_options = { .user_data = &client_test_data, .on_continuation = s_rpc_client_stream_continuation, .on_continuation_closed = s_rpc_client_stream_continuation_closed, }; struct aws_event_stream_rpc_client_continuation_token *client_token = aws_event_stream_rpc_client_connection_new_stream(test_data->client_connection, &continuation_options); ASSERT_NOT_NULL(client_token); struct aws_byte_buf operation_payload = aws_byte_buf_from_c_str("{ \"message\": \" operation payload \" }"); struct aws_event_stream_rpc_message_args operation_args = { .headers_count = 0, .headers = NULL, .message_type = AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_APPLICATION_MESSAGE, .payload = &operation_payload, }; ASSERT_ERROR( AWS_ERROR_EVENT_STREAM_RPC_STREAM_NOT_ACTIVATED, aws_event_stream_rpc_client_continuation_send_message( client_token, &operation_args, s_rpc_client_message_flush, &client_test_data)); aws_event_stream_rpc_client_continuation_release(client_token); aws_event_stream_rpc_client_connection_close(test_data->client_connection, AWS_ERROR_SUCCESS); aws_event_stream_rpc_server_connection_close(test_data->server_connection, AWS_ERROR_SUCCESS); aws_mutex_unlock(&client_test_data.sync_lock); aws_mutex_clean_up(&client_test_data.sync_lock); aws_condition_variable_clean_up(&client_test_data.sync_cvar); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( test_event_stream_rpc_client_connection_unactivated_continuation_fails, s_fixture_setup, s_test_event_stream_rpc_client_connection_unactivated_continuation_fails, s_fixture_shutdown, &s_test_data) static int s_test_event_stream_rpc_client_connection_continuation_send_message_on_closed_fails( struct aws_allocator *allocator, void *ctx) { struct test_data *test_data = ctx; test_data->on_server_message_received = s_rpc_server_connection_protocol_message; test_data->on_client_message_received = s_rpc_client_connection_protocol_message; test_data->on_new_server_stream = s_rpc_server_on_incoming_stream; test_data->on_new_server_continuation = s_rpc_server_stream_continuation; test_data->on_server_continuation_closed = s_rpc_server_stream_continuation_closed; struct client_test_data client_test_data = { .allocator = allocator, .sync_cvar = AWS_CONDITION_VARIABLE_INIT, .sync_lock = AWS_MUTEX_INIT, }; test_data->user_data = &client_test_data; test_data->server_continuation_user_data = &client_test_data; /* client sends CONNECT */ struct aws_byte_buf connect_payload = aws_byte_buf_from_c_str("{ \"message\": \" connect message \" }"); struct aws_event_stream_rpc_message_args connect_args = { .headers_count = 0, .headers = NULL, .message_type = AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_CONNECT, .payload = &connect_payload, }; ASSERT_SUCCESS(aws_event_stream_rpc_client_connection_send_protocol_message( test_data->client_connection, &connect_args, s_rpc_client_message_flush, &client_test_data)); aws_mutex_lock(&client_test_data.sync_lock); /* ...wait until sent and received... */ aws_condition_variable_wait_pred( &client_test_data.sync_cvar, &client_test_data.sync_lock, s_rpc_client_message_transmission_completed_pred, &client_test_data); ASSERT_INT_EQUALS(AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_CONNECT, client_test_data.received_message_type); ASSERT_BIN_ARRAYS_EQUALS( connect_payload.buffer, connect_payload.len, client_test_data.received_payload.buffer, client_test_data.received_payload.len); aws_byte_buf_clean_up(&client_test_data.received_payload); /* server sends CONNECT_ACK */ client_test_data.received_message_type = 0; client_test_data.client_message_received = false; client_test_data.client_message_sent = false; client_test_data.server_message_received = false; client_test_data.server_message_sent = false; connect_args.message_type = AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_CONNECT_ACK; connect_args.message_flags = AWS_EVENT_STREAM_RPC_MESSAGE_FLAG_CONNECTION_ACCEPTED; ASSERT_SUCCESS(aws_event_stream_rpc_server_connection_send_protocol_message( test_data->server_connection, &connect_args, s_rpc_server_message_flush, &client_test_data)); /* ...wait until sent and received... */ aws_condition_variable_wait_pred( &client_test_data.sync_cvar, &client_test_data.sync_lock, s_rpc_server_message_transmission_completed_pred, &client_test_data); ASSERT_INT_EQUALS(AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_CONNECT_ACK, client_test_data.received_message_type); ASSERT_BIN_ARRAYS_EQUALS( connect_payload.buffer, connect_payload.len, client_test_data.received_payload.buffer, client_test_data.received_payload.len); aws_byte_buf_clean_up(&client_test_data.received_payload); /* client sends message creating new stream */ client_test_data.received_message_type = 0; client_test_data.client_message_received = false; client_test_data.client_message_sent = false; client_test_data.server_message_received = false; client_test_data.server_message_sent = false; struct aws_event_stream_rpc_client_stream_continuation_options continuation_options = { .user_data = &client_test_data, .on_continuation = s_rpc_client_stream_continuation, .on_continuation_closed = s_rpc_client_stream_continuation_closed, }; struct aws_event_stream_rpc_client_continuation_token *client_token = aws_event_stream_rpc_client_connection_new_stream(test_data->client_connection, &continuation_options); ASSERT_NOT_NULL(client_token); struct aws_byte_cursor operation_name = aws_byte_cursor_from_c_str("test_operation"); struct aws_byte_buf operation_payload = aws_byte_buf_from_c_str("{ \"message\": \" operation payload \" }"); struct aws_event_stream_rpc_message_args operation_args = { .headers_count = 0, .headers = NULL, .message_type = AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_APPLICATION_MESSAGE, .payload = &operation_payload, }; ASSERT_SUCCESS(aws_event_stream_rpc_client_continuation_activate( client_token, operation_name, &operation_args, s_rpc_client_message_flush, &client_test_data)); /* ...wait until sent and received... */ aws_condition_variable_wait_pred( &client_test_data.sync_cvar, &client_test_data.sync_lock, s_rpc_client_message_transmission_completed_pred, &client_test_data); ASSERT_INT_EQUALS(AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_APPLICATION_MESSAGE, client_test_data.received_message_type); ASSERT_BIN_ARRAYS_EQUALS( operation_payload.buffer, operation_payload.len, client_test_data.received_payload.buffer, client_test_data.received_payload.len); aws_byte_buf_clean_up(&client_test_data.received_payload); ASSERT_BIN_ARRAYS_EQUALS( operation_name.ptr, operation_name.len, client_test_data.last_seen_operation_name.buffer, client_test_data.last_seen_operation_name.len); aws_byte_buf_clean_up(&client_test_data.last_seen_operation_name); /* server sends response with TERMINATE_STREAM flag set */ client_test_data.received_message_type = 0; client_test_data.client_message_received = false; client_test_data.client_message_sent = false; client_test_data.server_message_received = false; client_test_data.server_message_sent = false; operation_args.message_flags = AWS_EVENT_STREAM_RPC_MESSAGE_FLAG_TERMINATE_STREAM; operation_args.message_type = AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_APPLICATION_ERROR; ASSERT_SUCCESS(aws_event_stream_rpc_server_continuation_send_message( client_test_data.server_token, &operation_args, s_rpc_server_message_flush, &client_test_data)); /* ...wait until sent and received... */ aws_condition_variable_wait_pred( &client_test_data.sync_cvar, &client_test_data.sync_lock, s_rpc_server_message_transmission_completed_pred, &client_test_data); /* ...wait until client stream closed... */ aws_condition_variable_wait_pred( &client_test_data.sync_cvar, &client_test_data.sync_lock, s_rpc_client_continuation_token_closed_pred, &client_test_data); ASSERT_BIN_ARRAYS_EQUALS( operation_payload.buffer, operation_payload.len, client_test_data.received_payload.buffer, client_test_data.received_payload.len); ASSERT_INT_EQUALS(AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_APPLICATION_ERROR, client_test_data.received_message_type); aws_byte_buf_clean_up(&client_test_data.received_payload); /* should not be allowed to send further stream messages */ ASSERT_ERROR( AWS_ERROR_EVENT_STREAM_RPC_STREAM_CLOSED, aws_event_stream_rpc_client_continuation_send_message( client_token, &operation_args, s_rpc_client_message_flush, &client_test_data)); aws_event_stream_rpc_client_continuation_release(client_token); aws_event_stream_rpc_client_connection_close(test_data->client_connection, AWS_ERROR_SUCCESS); aws_event_stream_rpc_server_connection_close(test_data->server_connection, AWS_ERROR_SUCCESS); aws_mutex_unlock(&client_test_data.sync_lock); aws_mutex_clean_up(&client_test_data.sync_lock); aws_condition_variable_clean_up(&client_test_data.sync_cvar); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( test_event_stream_rpc_client_connection_continuation_send_message_on_closed_fails, s_fixture_setup, s_test_event_stream_rpc_client_connection_continuation_send_message_on_closed_fails, s_fixture_shutdown, &s_test_data) static int s_test_event_stream_rpc_client_connection_continuation_duplicated_activate_fails( struct aws_allocator *allocator, void *ctx) { (void)allocator; struct test_data *test_data = ctx; test_data->on_server_message_received = s_rpc_server_connection_protocol_message; test_data->on_client_message_received = s_rpc_client_connection_protocol_message; test_data->on_new_server_stream = s_rpc_server_on_incoming_stream; test_data->on_new_server_continuation = s_rpc_server_stream_continuation; test_data->on_server_continuation_closed = s_rpc_server_stream_continuation_closed; struct client_test_data *client_test_data = &test_data->client_test_data; test_data->user_data = client_test_data; test_data->server_continuation_user_data = client_test_data; struct aws_byte_buf connect_payload = aws_byte_buf_from_c_str("{ \"message\": \" connect message \" }"); struct aws_event_stream_rpc_message_args connect_args = { .headers_count = 0, .headers = NULL, .message_type = AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_CONNECT, .payload = &connect_payload, }; ASSERT_SUCCESS(aws_event_stream_rpc_client_connection_send_protocol_message( test_data->client_connection, &connect_args, s_rpc_client_message_flush, client_test_data)); aws_mutex_lock(&client_test_data->sync_lock); aws_condition_variable_wait_pred( &client_test_data->sync_cvar, &client_test_data->sync_lock, s_rpc_client_message_transmission_completed_pred, client_test_data); ASSERT_INT_EQUALS(AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_CONNECT, client_test_data->received_message_type); ASSERT_BIN_ARRAYS_EQUALS( connect_payload.buffer, connect_payload.len, client_test_data->received_payload.buffer, client_test_data->received_payload.len); aws_byte_buf_clean_up(&client_test_data->received_payload); client_test_data->received_message_type = 0; client_test_data->client_message_received = false; client_test_data->client_message_sent = false; client_test_data->server_message_received = false; client_test_data->server_message_sent = false; connect_args.message_type = AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_CONNECT_ACK; connect_args.message_flags = AWS_EVENT_STREAM_RPC_MESSAGE_FLAG_CONNECTION_ACCEPTED; ASSERT_SUCCESS(aws_event_stream_rpc_server_connection_send_protocol_message( test_data->server_connection, &connect_args, s_rpc_server_message_flush, client_test_data)); aws_condition_variable_wait_pred( &client_test_data->sync_cvar, &client_test_data->sync_lock, s_rpc_server_message_transmission_completed_pred, client_test_data); ASSERT_INT_EQUALS(AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_CONNECT_ACK, client_test_data->received_message_type); ASSERT_BIN_ARRAYS_EQUALS( connect_payload.buffer, connect_payload.len, client_test_data->received_payload.buffer, client_test_data->received_payload.len); aws_byte_buf_clean_up(&client_test_data->received_payload); client_test_data->received_message_type = 0; client_test_data->client_message_received = false; client_test_data->client_message_sent = false; client_test_data->server_message_received = false; client_test_data->server_message_sent = false; struct aws_event_stream_rpc_client_stream_continuation_options continuation_options = { .user_data = client_test_data, .on_continuation = s_rpc_client_stream_continuation, .on_continuation_closed = s_rpc_client_stream_continuation_closed, }; struct aws_event_stream_rpc_client_continuation_token *client_token = aws_event_stream_rpc_client_connection_new_stream(test_data->client_connection, &continuation_options); ASSERT_NOT_NULL(client_token); struct aws_byte_cursor operation_name = aws_byte_cursor_from_c_str("test_operation"); struct aws_byte_buf operation_payload = aws_byte_buf_from_c_str("{ \"message\": \" operation payload \" }"); struct aws_event_stream_rpc_message_args operation_args = { .headers_count = 0, .headers = NULL, .message_type = AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_APPLICATION_MESSAGE, .payload = &operation_payload, }; ASSERT_SUCCESS(aws_event_stream_rpc_client_continuation_activate( client_token, operation_name, &operation_args, s_rpc_client_message_flush, client_test_data)); aws_condition_variable_wait_pred( &client_test_data->sync_cvar, &client_test_data->sync_lock, s_rpc_client_message_transmission_completed_pred, client_test_data); aws_byte_buf_clean_up(&client_test_data->received_payload); aws_byte_buf_clean_up(&client_test_data->last_seen_operation_name); ASSERT_ERROR( AWS_ERROR_INVALID_STATE, aws_event_stream_rpc_client_continuation_activate( client_token, operation_name, &operation_args, s_rpc_client_message_flush, client_test_data)); aws_event_stream_rpc_client_continuation_release(client_token); aws_event_stream_rpc_client_connection_close(test_data->client_connection, AWS_ERROR_SUCCESS); aws_event_stream_rpc_server_connection_close(test_data->server_connection, AWS_ERROR_SUCCESS); aws_condition_variable_wait_pred( &client_test_data->sync_cvar, &client_test_data->sync_lock, s_rpc_client_continuation_token_closed_pred, client_test_data); aws_mutex_unlock(&client_test_data->sync_lock); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( test_event_stream_rpc_client_connection_continuation_duplicated_activate_fails, s_fixture_setup, s_test_event_stream_rpc_client_connection_continuation_duplicated_activate_fails, s_fixture_shutdown, &s_test_data) aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/tests/event_stream_rpc_server_connection_test.c000066400000000000000000002373211456575232400333640ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include struct test_data { struct aws_allocator *allocator; struct testing_channel testing_channel; struct aws_event_loop_group *el_group; struct aws_server_bootstrap *server_bootstrap; struct aws_event_stream_rpc_server_listener *listener; struct aws_event_stream_rpc_server_connection *connection; aws_event_stream_rpc_server_connection_protocol_message_fn *received_fn; aws_event_stream_rpc_server_on_incoming_stream_fn *on_new_stream; aws_event_stream_rpc_server_stream_continuation_fn *on_continuation; aws_event_stream_rpc_server_stream_continuation_closed_fn *on_continuation_closed; void *user_data; void *continuation_user_data; struct aws_mutex shutdown_lock; struct aws_condition_variable shutdown_cvar; bool shutdown_completed; }; static struct test_data s_test_data; static void s_fixture_on_protocol_message( struct aws_event_stream_rpc_server_connection *connection, const struct aws_event_stream_rpc_message_args *message_args, void *user_data) { struct test_data *test_data = user_data; test_data->received_fn(connection, message_args, test_data->user_data); } static void s_on_stream_continuation_shim( struct aws_event_stream_rpc_server_continuation_token *token, const struct aws_event_stream_rpc_message_args *message_args, void *user_data) { struct test_data *test_data = user_data; test_data->on_continuation(token, message_args, test_data->continuation_user_data); } static void s_server_stream_continuation_closed_shim( struct aws_event_stream_rpc_server_continuation_token *token, void *user_data) { struct test_data *test_data = user_data; test_data->on_continuation_closed(token, test_data->continuation_user_data); } static int s_on_server_incoming_stream_shim( struct aws_event_stream_rpc_server_connection *connection, struct aws_event_stream_rpc_server_continuation_token *token, struct aws_byte_cursor operation_name, struct aws_event_stream_rpc_server_stream_continuation_options *continuation_options, void *user_data) { struct test_data *test_data = user_data; continuation_options->on_continuation = s_on_stream_continuation_shim; continuation_options->on_continuation_closed = s_server_stream_continuation_closed_shim; continuation_options->user_data = test_data; if (test_data->on_new_stream) { test_data->on_new_stream( connection, token, operation_name, continuation_options, test_data->continuation_user_data); } return AWS_OP_SUCCESS; } static int s_fixture_on_new_server_connection( struct aws_event_stream_rpc_server_connection *connection, int error_code, struct aws_event_stream_rpc_connection_options *connection_options, void *user_data) { (void)connection; (void)error_code; (void)connection_options; (void)user_data; return AWS_OP_SUCCESS; } static void s_fixture_on_server_connection_shutdown( struct aws_event_stream_rpc_server_connection *connection, int error_code, void *user_data) { (void)connection; (void)error_code; (void)user_data; } static void s_on_listener_destroy(struct aws_event_stream_rpc_server_listener *server, void *user_data) { (void)server; struct test_data *test_data = user_data; aws_mutex_lock(&test_data->shutdown_lock); test_data->shutdown_completed = true; aws_mutex_unlock(&test_data->shutdown_lock); aws_condition_variable_notify_one(&test_data->shutdown_cvar); } static void s_event_loop_shutdown_callback(void *user_data) { struct test_data *test_data = user_data; aws_mutex_lock(&test_data->shutdown_lock); test_data->shutdown_completed = true; aws_mutex_unlock(&test_data->shutdown_lock); aws_condition_variable_notify_one(&test_data->shutdown_cvar); } static int s_fixture_setup_shared( struct aws_allocator *allocator, void *ctx, aws_event_stream_rpc_server_on_incoming_stream_fn *on_incoming_stream) { aws_event_stream_library_init(allocator); struct test_data *test_data = ctx; AWS_ZERO_STRUCT(*test_data); struct aws_shutdown_callback_options el_shutdown_options = { .shutdown_callback_fn = s_event_loop_shutdown_callback, .shutdown_callback_user_data = test_data, }; test_data->el_group = aws_event_loop_group_new_default(allocator, 0, &el_shutdown_options); ASSERT_NOT_NULL(test_data->el_group); test_data->server_bootstrap = aws_server_bootstrap_new(allocator, test_data->el_group); ASSERT_NOT_NULL(test_data->server_bootstrap); ASSERT_SUCCESS(aws_mutex_init(&test_data->shutdown_lock)); ASSERT_SUCCESS(aws_condition_variable_init(&test_data->shutdown_cvar)); struct aws_socket_options socket_options = { .connect_timeout_ms = 3000, .domain = AWS_SOCKET_IPV4, .type = AWS_SOCKET_STREAM, }; /* Find a random open port */ uint16_t test_port = 0; while (!test_data->listener) { aws_device_random_u16(&test_port); test_port |= 0x8000; /* Use high numbers */ struct aws_event_stream_rpc_server_listener_options listener_options = { .socket_options = &socket_options, .host_name = "127.0.0.1", .port = test_port, .bootstrap = test_data->server_bootstrap, .user_data = test_data, .on_new_connection = s_fixture_on_new_server_connection, .on_connection_shutdown = s_fixture_on_server_connection_shutdown, .on_destroy_callback = s_on_listener_destroy, }; test_data->listener = aws_event_stream_rpc_server_new_listener(allocator, &listener_options); if (!test_data->listener) { int error_code = aws_last_error(); ASSERT_TRUE(error_code == AWS_IO_SOCKET_ADDRESS_IN_USE || error_code == AWS_ERROR_NO_PERMISSION); } } test_data->allocator = allocator; struct aws_testing_channel_options testing_channel_options = { .clock_fn = aws_high_res_clock_get_ticks, }; ASSERT_SUCCESS(testing_channel_init(&test_data->testing_channel, allocator, &testing_channel_options)); struct aws_event_stream_rpc_connection_options connection_options = { .on_connection_protocol_message = s_fixture_on_protocol_message, .on_incoming_stream = on_incoming_stream, .user_data = test_data, }; test_data->connection = aws_event_stream_rpc_server_connection_from_existing_channel( test_data->listener, test_data->testing_channel.channel, &connection_options); ASSERT_NOT_NULL(test_data->connection); testing_channel_run_currently_queued_tasks(&test_data->testing_channel); return AWS_OP_SUCCESS; } static int s_on_server_incoming_stream_failure( struct aws_event_stream_rpc_server_connection *connection, struct aws_event_stream_rpc_server_continuation_token *token, struct aws_byte_cursor operation_name, struct aws_event_stream_rpc_server_stream_continuation_options *continuation_options, void *user_data) { (void)connection; (void)token; (void)operation_name; (void)continuation_options; (void)user_data; return AWS_OP_ERR; } static int s_fixture_setup_new_stream_failure(struct aws_allocator *allocator, void *ctx) { return s_fixture_setup_shared(allocator, ctx, s_on_server_incoming_stream_failure); } static int s_fixture_setup(struct aws_allocator *allocator, void *ctx) { return s_fixture_setup_shared(allocator, ctx, s_on_server_incoming_stream_shim); } static int s_fixture_setup_port0(struct aws_allocator *allocator, void *ctx) { aws_event_stream_library_init(allocator); struct test_data *test_data = ctx; AWS_ZERO_STRUCT(*test_data); struct aws_shutdown_callback_options el_shutdown_options = { .shutdown_callback_fn = s_event_loop_shutdown_callback, .shutdown_callback_user_data = test_data, }; test_data->el_group = aws_event_loop_group_new_default(allocator, 0, &el_shutdown_options); ASSERT_NOT_NULL(test_data->el_group); test_data->server_bootstrap = aws_server_bootstrap_new(allocator, test_data->el_group); ASSERT_NOT_NULL(test_data->server_bootstrap); ASSERT_SUCCESS(aws_mutex_init(&test_data->shutdown_lock)); ASSERT_SUCCESS(aws_condition_variable_init(&test_data->shutdown_cvar)); struct aws_socket_options socket_options = { .connect_timeout_ms = 3000, .domain = AWS_SOCKET_IPV4, .type = AWS_SOCKET_STREAM, }; /* Find a random open port directly by ask bind() with port 0 */ uint16_t test_port = 0; struct aws_event_stream_rpc_server_listener_options listener_options = { .socket_options = &socket_options, .host_name = "127.0.0.1", .port = test_port, .bootstrap = test_data->server_bootstrap, .user_data = test_data, .on_new_connection = s_fixture_on_new_server_connection, .on_connection_shutdown = s_fixture_on_server_connection_shutdown, .on_destroy_callback = s_on_listener_destroy, }; test_data->listener = aws_event_stream_rpc_server_new_listener(allocator, &listener_options); ASSERT_NOT_NULL(test_data->listener); uint32_t actual_port = aws_event_stream_rpc_server_listener_get_bound_port(test_data->listener); ASSERT_TRUE(actual_port > 0); test_data->allocator = allocator; struct aws_testing_channel_options testing_channel_options = { .clock_fn = aws_high_res_clock_get_ticks, }; ASSERT_SUCCESS(testing_channel_init(&test_data->testing_channel, allocator, &testing_channel_options)); struct aws_event_stream_rpc_connection_options connection_options = { .on_connection_protocol_message = s_fixture_on_protocol_message, .on_incoming_stream = s_on_server_incoming_stream_shim, .user_data = test_data, }; test_data->connection = aws_event_stream_rpc_server_connection_from_existing_channel( test_data->listener, test_data->testing_channel.channel, &connection_options); ASSERT_NOT_NULL(test_data->connection); testing_channel_run_currently_queued_tasks(&test_data->testing_channel); return AWS_OP_SUCCESS; } static bool s_shutdown_predicate_fn(void *user_data) { struct test_data *test_data = user_data; return test_data->shutdown_completed; } static int s_fixture_shutdown(struct aws_allocator *allocator, int setup_result, void *ctx) { (void)allocator; struct test_data *test_data = ctx; if (!setup_result) { aws_event_stream_rpc_server_connection_release(test_data->connection); testing_channel_clean_up(&test_data->testing_channel); aws_event_stream_rpc_server_listener_release(test_data->listener); aws_mutex_lock(&test_data->shutdown_lock); aws_condition_variable_wait_pred( &test_data->shutdown_cvar, &test_data->shutdown_lock, s_shutdown_predicate_fn, test_data); test_data->shutdown_completed = false; aws_mutex_unlock(&test_data->shutdown_lock); aws_server_bootstrap_release(test_data->server_bootstrap); aws_event_loop_group_release(test_data->el_group); aws_mutex_lock(&test_data->shutdown_lock); aws_condition_variable_wait_pred( &test_data->shutdown_cvar, &test_data->shutdown_lock, s_shutdown_predicate_fn, test_data); aws_mutex_unlock(&test_data->shutdown_lock); aws_thread_join_all_managed(); aws_mutex_clean_up(&test_data->shutdown_lock); aws_condition_variable_clean_up(&test_data->shutdown_cvar); } aws_event_stream_library_clean_up(); return AWS_OP_SUCCESS; } static int s_test_event_stream_rpc_server_connection_setup_and_teardown(struct aws_allocator *allocator, void *ctx) { struct test_data *test_data = ctx; (void)allocator; /* just let setup and shutdown run to make sure the basic init/cleanup flow references are properly counted without * having to worry about continuation reference counts. */ aws_event_stream_rpc_server_connection_close(test_data->connection, AWS_ERROR_SUCCESS); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( test_event_stream_rpc_server_connection_setup_and_teardown, s_fixture_setup, s_test_event_stream_rpc_server_connection_setup_and_teardown, s_fixture_shutdown, &s_test_data) AWS_TEST_CASE_FIXTURE( test_event_stream_rpc_server_connection_setup_and_teardown_with_bind_to_zero_port, s_fixture_setup_port0, s_test_event_stream_rpc_server_connection_setup_and_teardown, s_fixture_shutdown, &s_test_data) struct received_protocol_message_data { struct aws_allocator *allocator; enum aws_event_stream_rpc_message_type message_type; int message_flags; struct aws_byte_buf payload_cpy; bool message_flushed; int message_flush_err_code; bool continuation_closed; struct aws_event_stream_rpc_server_continuation_token *continuation_token; struct aws_byte_buf last_seen_operation_name; }; static void s_on_recieved_protocol_message( struct aws_event_stream_rpc_server_connection *connection, const struct aws_event_stream_rpc_message_args *message_args, void *user_data) { (void)connection; struct received_protocol_message_data *message_data = user_data; message_data->message_type = message_args->message_type; message_data->message_flags = message_args->message_flags; aws_byte_buf_init_copy(&message_data->payload_cpy, message_data->allocator, message_args->payload); } static void s_on_message_flush_fn(int error_code, void *user_data) { (void)error_code; struct received_protocol_message_data *message_data = user_data; message_data->message_flushed = true; message_data->message_flush_err_code = AWS_ERROR_SUCCESS; } static int s_do_connect( struct aws_allocator *allocator, struct test_data *test_data, struct received_protocol_message_data *message_data) { struct aws_byte_buf payload = aws_byte_buf_from_c_str("test connect message payload"); struct aws_array_list headers_list; ASSERT_SUCCESS(aws_event_stream_headers_list_init(&headers_list, allocator)); ASSERT_SUCCESS(aws_event_stream_add_int32_header( &headers_list, (const char *)aws_event_stream_rpc_message_type_name.ptr, (uint8_t)aws_event_stream_rpc_message_type_name.len, AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_CONNECT)); ASSERT_SUCCESS(aws_event_stream_add_int32_header( &headers_list, (const char *)aws_event_stream_rpc_message_flags_name.ptr, (uint8_t)aws_event_stream_rpc_message_flags_name.len, 0)); ASSERT_SUCCESS(aws_event_stream_add_int32_header( &headers_list, (const char *)aws_event_stream_rpc_stream_id_name.ptr, (uint8_t)aws_event_stream_rpc_stream_id_name.len, 0)); struct aws_event_stream_message message; ASSERT_SUCCESS(aws_event_stream_message_init(&message, allocator, &headers_list, &payload)); struct aws_byte_cursor send_data = aws_byte_cursor_from_array( aws_event_stream_message_buffer(&message), aws_event_stream_message_total_length(&message)); ASSERT_SUCCESS(testing_channel_push_read_data(&test_data->testing_channel, send_data)); testing_channel_drain_queued_tasks(&test_data->testing_channel); ASSERT_INT_EQUALS(AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_CONNECT, message_data->message_type); ASSERT_INT_EQUALS(0, message_data->message_flags); ASSERT_BIN_ARRAYS_EQUALS( payload.buffer, payload.len, message_data->payload_cpy.buffer, message_data->payload_cpy.len); aws_event_stream_message_clean_up(&message); aws_array_list_clear(&headers_list); aws_byte_buf_clean_up(&message_data->payload_cpy); struct aws_event_stream_rpc_message_args connect_ack_args = { .message_flags = AWS_EVENT_STREAM_RPC_MESSAGE_FLAG_CONNECTION_ACCEPTED, .message_type = AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_CONNECT_ACK, .payload = &payload, }; ASSERT_SUCCESS(aws_event_stream_rpc_server_connection_send_protocol_message( test_data->connection, &connect_ack_args, s_on_message_flush_fn, message_data)); testing_channel_drain_queued_tasks(&test_data->testing_channel); ASSERT_TRUE(message_data->message_flushed); ASSERT_INT_EQUALS(0, message_data->message_flush_err_code); ASSERT_TRUE(aws_event_stream_rpc_server_connection_is_open(test_data->connection)); struct aws_byte_buf connect_ack_data; ASSERT_SUCCESS(aws_byte_buf_init(&connect_ack_data, allocator, 1024)); testing_channel_drain_written_messages(&test_data->testing_channel, &connect_ack_data); ASSERT_SUCCESS(aws_event_stream_message_from_buffer_copy(&message, allocator, &connect_ack_data)); aws_byte_buf_clean_up(&connect_ack_data); ASSERT_SUCCESS(aws_event_stream_message_headers(&message, &headers_list)); enum aws_event_stream_rpc_message_type message_type = -1; for (size_t i = 0; aws_array_list_length(&headers_list); ++i) { struct aws_event_stream_header_value_pair *header = NULL; aws_array_list_get_at_ptr(&headers_list, (void **)&header, i); struct aws_byte_cursor header_name = aws_byte_cursor_from_array(header->header_name, header->header_name_len); if (aws_byte_cursor_eq(&aws_event_stream_rpc_message_type_name, &header_name)) { message_type = aws_event_stream_header_value_as_int32(header); break; } } ASSERT_INT_EQUALS(AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_CONNECT_ACK, message_type); aws_array_list_clean_up(&headers_list); aws_event_stream_message_clean_up(&message); return AWS_OP_SUCCESS; } static int s_test_event_stream_rpc_server_connection_connect_flow(struct aws_allocator *allocator, void *ctx) { struct test_data *test_data = ctx; struct received_protocol_message_data message_data = { .allocator = allocator, }; test_data->user_data = &message_data; test_data->received_fn = s_on_recieved_protocol_message; ASSERT_SUCCESS(s_do_connect(allocator, test_data, &message_data)); aws_event_stream_rpc_server_connection_close(test_data->connection, AWS_ERROR_SUCCESS); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( test_event_stream_rpc_server_connection_connect_flow, s_fixture_setup, s_test_event_stream_rpc_server_connection_connect_flow, s_fixture_shutdown, &s_test_data) static int s_test_event_stream_rpc_server_connection_connect_reject_flow(struct aws_allocator *allocator, void *ctx) { struct test_data *test_data = ctx; struct received_protocol_message_data message_data = { .allocator = allocator, }; test_data->user_data = &message_data; test_data->received_fn = s_on_recieved_protocol_message; struct aws_byte_buf payload = aws_byte_buf_from_c_str("test connect message payload"); struct aws_array_list headers_list; ASSERT_SUCCESS(aws_event_stream_headers_list_init(&headers_list, allocator)); ASSERT_SUCCESS(aws_event_stream_add_int32_header( &headers_list, (const char *)aws_event_stream_rpc_message_type_name.ptr, (uint8_t)aws_event_stream_rpc_message_type_name.len, AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_CONNECT)); ASSERT_SUCCESS(aws_event_stream_add_int32_header( &headers_list, (const char *)aws_event_stream_rpc_message_flags_name.ptr, (uint8_t)aws_event_stream_rpc_message_flags_name.len, 0)); ASSERT_SUCCESS(aws_event_stream_add_int32_header( &headers_list, (const char *)aws_event_stream_rpc_stream_id_name.ptr, (uint8_t)aws_event_stream_rpc_stream_id_name.len, 0)); struct aws_event_stream_message message; ASSERT_SUCCESS(aws_event_stream_message_init(&message, allocator, &headers_list, &payload)); struct aws_byte_cursor send_data = aws_byte_cursor_from_array( aws_event_stream_message_buffer(&message), aws_event_stream_message_total_length(&message)); ASSERT_SUCCESS(testing_channel_push_read_data(&test_data->testing_channel, send_data)); testing_channel_drain_queued_tasks(&test_data->testing_channel); ASSERT_INT_EQUALS(AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_CONNECT, message_data.message_type); ASSERT_INT_EQUALS(0, message_data.message_flags); ASSERT_BIN_ARRAYS_EQUALS( payload.buffer, payload.len, message_data.payload_cpy.buffer, message_data.payload_cpy.len); aws_event_stream_message_clean_up(&message); aws_event_stream_headers_list_cleanup(&headers_list); aws_byte_buf_clean_up(&message_data.payload_cpy); struct aws_event_stream_rpc_message_args connect_ack_args = { .message_type = AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_CONNECT_ACK, .payload = &payload, }; ASSERT_SUCCESS(aws_event_stream_rpc_server_connection_send_protocol_message( test_data->connection, &connect_ack_args, s_on_message_flush_fn, &message_data)); testing_channel_drain_queued_tasks(&test_data->testing_channel); ASSERT_TRUE(message_data.message_flushed); ASSERT_INT_EQUALS(0, message_data.message_flush_err_code); ASSERT_FALSE(aws_event_stream_rpc_server_connection_is_open(test_data->connection)); aws_event_stream_rpc_server_connection_close(test_data->connection, AWS_ERROR_SUCCESS); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( test_event_stream_rpc_server_connection_connect_reject_flow, s_fixture_setup, s_test_event_stream_rpc_server_connection_connect_reject_flow, s_fixture_shutdown, &s_test_data) static int s_test_event_stream_rpc_server_connection_messages_before_connect_received( struct aws_allocator *allocator, void *ctx) { struct test_data *test_data = ctx; struct received_protocol_message_data message_data = { .allocator = allocator, }; test_data->user_data = &message_data; test_data->received_fn = s_on_recieved_protocol_message; struct aws_byte_buf payload = aws_byte_buf_from_c_str("test connect message payload"); struct aws_array_list headers_list; ASSERT_SUCCESS(aws_event_stream_headers_list_init(&headers_list, allocator)); ASSERT_SUCCESS(aws_event_stream_add_int32_header( &headers_list, (const char *)aws_event_stream_rpc_message_type_name.ptr, (uint8_t)aws_event_stream_rpc_message_type_name.len, AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_APPLICATION_MESSAGE)); ASSERT_SUCCESS(aws_event_stream_add_int32_header( &headers_list, (const char *)aws_event_stream_rpc_message_flags_name.ptr, (uint8_t)aws_event_stream_rpc_message_flags_name.len, 0)); ASSERT_SUCCESS(aws_event_stream_add_int32_header( &headers_list, (const char *)aws_event_stream_rpc_stream_id_name.ptr, (uint8_t)aws_event_stream_rpc_stream_id_name.len, 1)); struct aws_event_stream_message message; ASSERT_SUCCESS(aws_event_stream_message_init(&message, allocator, &headers_list, &payload)); struct aws_byte_cursor send_data = aws_byte_cursor_from_array( aws_event_stream_message_buffer(&message), aws_event_stream_message_total_length(&message)); ASSERT_SUCCESS(testing_channel_push_read_data(&test_data->testing_channel, send_data)); testing_channel_drain_queued_tasks(&test_data->testing_channel); /* message should have just been outright rejected */ ASSERT_INT_EQUALS(0, message_data.message_type); ASSERT_UINT_EQUALS(0, message_data.payload_cpy.len); aws_event_stream_message_clean_up(&message); ASSERT_FALSE(aws_event_stream_rpc_server_connection_is_open(test_data->connection)); struct aws_linked_list *message_queue = testing_channel_get_written_message_queue(&test_data->testing_channel); ASSERT_FALSE(aws_linked_list_empty(message_queue)); struct aws_linked_list_node *written_message_node = aws_linked_list_front(message_queue); struct aws_io_message *io_message = AWS_CONTAINER_OF(written_message_node, struct aws_io_message, queueing_handle); struct aws_event_stream_message written_message; ASSERT_SUCCESS(aws_event_stream_message_from_buffer_copy(&written_message, allocator, &io_message->message_data)); aws_array_list_clear(&headers_list); ASSERT_SUCCESS(aws_event_stream_message_headers(&written_message, &headers_list)); enum aws_event_stream_rpc_message_type message_type = -1; for (size_t i = 0; aws_array_list_length(&headers_list); ++i) { struct aws_event_stream_header_value_pair *header = NULL; aws_array_list_get_at_ptr(&headers_list, (void **)&header, i); struct aws_byte_cursor header_name = aws_byte_cursor_from_array(header->header_name, header->header_name_len); if (aws_byte_cursor_eq(&aws_event_stream_rpc_message_type_name, &header_name)) { message_type = aws_event_stream_header_value_as_int32(header); break; } } aws_event_stream_headers_list_cleanup(&headers_list); aws_event_stream_message_clean_up(&written_message); ASSERT_INT_EQUALS(AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_PROTOCOL_ERROR, message_type); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( test_event_stream_rpc_server_connection_messages_before_connect_received, s_fixture_setup, s_test_event_stream_rpc_server_connection_messages_before_connect_received, s_fixture_shutdown, &s_test_data) static int s_test_event_stream_rpc_server_connection_messages_before_connect_ack_sent( struct aws_allocator *allocator, void *ctx) { struct test_data *test_data = ctx; struct received_protocol_message_data message_data = { .allocator = allocator, }; test_data->user_data = &message_data; test_data->received_fn = s_on_recieved_protocol_message; struct aws_byte_buf payload = aws_byte_buf_from_c_str("test connect message payload"); struct aws_array_list headers_list; ASSERT_SUCCESS(aws_event_stream_headers_list_init(&headers_list, allocator)); ASSERT_SUCCESS(aws_event_stream_add_int32_header( &headers_list, (const char *)aws_event_stream_rpc_message_type_name.ptr, (uint8_t)aws_event_stream_rpc_message_type_name.len, AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_CONNECT)); ASSERT_SUCCESS(aws_event_stream_add_int32_header( &headers_list, (const char *)aws_event_stream_rpc_message_flags_name.ptr, (uint8_t)aws_event_stream_rpc_message_flags_name.len, 0)); ASSERT_SUCCESS(aws_event_stream_add_int32_header( &headers_list, (const char *)aws_event_stream_rpc_stream_id_name.ptr, (uint8_t)aws_event_stream_rpc_stream_id_name.len, 0)); struct aws_event_stream_message message; ASSERT_SUCCESS(aws_event_stream_message_init(&message, allocator, &headers_list, &payload)); struct aws_byte_cursor send_data = aws_byte_cursor_from_array( aws_event_stream_message_buffer(&message), aws_event_stream_message_total_length(&message)); ASSERT_SUCCESS(testing_channel_push_read_data(&test_data->testing_channel, send_data)); testing_channel_drain_queued_tasks(&test_data->testing_channel); ASSERT_INT_EQUALS(AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_CONNECT, message_data.message_type); ASSERT_INT_EQUALS(0, message_data.message_flags); ASSERT_BIN_ARRAYS_EQUALS( payload.buffer, payload.len, message_data.payload_cpy.buffer, message_data.payload_cpy.len); aws_event_stream_message_clean_up(&message); aws_event_stream_headers_list_cleanup(&headers_list); aws_byte_buf_clean_up(&message_data.payload_cpy); AWS_ZERO_STRUCT(message_data); ASSERT_SUCCESS(aws_event_stream_headers_list_init(&headers_list, allocator)); ASSERT_SUCCESS(aws_event_stream_add_int32_header( &headers_list, (const char *)aws_event_stream_rpc_message_type_name.ptr, (uint8_t)aws_event_stream_rpc_message_type_name.len, AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_APPLICATION_MESSAGE)); ASSERT_SUCCESS(aws_event_stream_add_int32_header( &headers_list, (const char *)aws_event_stream_rpc_message_flags_name.ptr, (uint8_t)aws_event_stream_rpc_message_flags_name.len, 0)); ASSERT_SUCCESS(aws_event_stream_add_int32_header( &headers_list, (const char *)aws_event_stream_rpc_stream_id_name.ptr, (uint8_t)aws_event_stream_rpc_stream_id_name.len, 1)); ASSERT_SUCCESS(aws_event_stream_message_init(&message, allocator, &headers_list, &payload)); send_data = aws_byte_cursor_from_array( aws_event_stream_message_buffer(&message), aws_event_stream_message_total_length(&message)); ASSERT_SUCCESS(testing_channel_push_read_data(&test_data->testing_channel, send_data)); testing_channel_drain_queued_tasks(&test_data->testing_channel); aws_byte_buf_clean_up(&message_data.payload_cpy); ASSERT_INT_EQUALS(0, message_data.message_type); ASSERT_UINT_EQUALS(0, message_data.payload_cpy.len); aws_event_stream_message_clean_up(&message); ASSERT_FALSE(aws_event_stream_rpc_server_connection_is_open(test_data->connection)); struct aws_linked_list *message_queue = testing_channel_get_written_message_queue(&test_data->testing_channel); ASSERT_FALSE(aws_linked_list_empty(message_queue)); struct aws_linked_list_node *written_message_node = aws_linked_list_front(message_queue); struct aws_io_message *io_message = AWS_CONTAINER_OF(written_message_node, struct aws_io_message, queueing_handle); struct aws_event_stream_message written_message; ASSERT_SUCCESS(aws_event_stream_message_from_buffer_copy(&written_message, allocator, &io_message->message_data)); aws_array_list_clear(&headers_list); ASSERT_SUCCESS(aws_event_stream_message_headers(&written_message, &headers_list)); enum aws_event_stream_rpc_message_type message_type = -1; for (size_t i = 0; aws_array_list_length(&headers_list); ++i) { struct aws_event_stream_header_value_pair *header = NULL; aws_array_list_get_at_ptr(&headers_list, (void **)&header, i); struct aws_byte_cursor header_name = aws_byte_cursor_from_array(header->header_name, header->header_name_len); if (aws_byte_cursor_eq(&aws_event_stream_rpc_message_type_name, &header_name)) { message_type = aws_event_stream_header_value_as_int32(header); break; } } aws_event_stream_headers_list_cleanup(&headers_list); aws_event_stream_message_clean_up(&written_message); ASSERT_INT_EQUALS(AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_PROTOCOL_ERROR, message_type); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( test_event_stream_rpc_server_connection_messages_before_connect_ack_sent, s_fixture_setup, s_test_event_stream_rpc_server_connection_messages_before_connect_ack_sent, s_fixture_shutdown, &s_test_data) static int s_test_event_stream_rpc_server_connection_unknown_message_type(struct aws_allocator *allocator, void *ctx) { struct test_data *test_data = ctx; struct received_protocol_message_data message_data = { .allocator = allocator, }; test_data->user_data = &message_data; test_data->received_fn = s_on_recieved_protocol_message; struct aws_byte_buf payload = aws_byte_buf_from_c_str("test connect message payload"); struct aws_array_list headers_list; ASSERT_SUCCESS(aws_event_stream_headers_list_init(&headers_list, allocator)); ASSERT_SUCCESS(aws_event_stream_add_int32_header( &headers_list, (const char *)aws_event_stream_rpc_message_type_name.ptr, (uint8_t)aws_event_stream_rpc_message_type_name.len, 200)); ASSERT_SUCCESS(aws_event_stream_add_int32_header( &headers_list, (const char *)aws_event_stream_rpc_message_flags_name.ptr, (uint8_t)aws_event_stream_rpc_message_flags_name.len, 0)); ASSERT_SUCCESS(aws_event_stream_add_int32_header( &headers_list, (const char *)aws_event_stream_rpc_stream_id_name.ptr, (uint8_t)aws_event_stream_rpc_stream_id_name.len, 0)); struct aws_event_stream_message message; ASSERT_SUCCESS(aws_event_stream_message_init(&message, allocator, &headers_list, &payload)); struct aws_byte_cursor send_data = aws_byte_cursor_from_array( aws_event_stream_message_buffer(&message), aws_event_stream_message_total_length(&message)); ASSERT_SUCCESS(testing_channel_push_read_data(&test_data->testing_channel, send_data)); testing_channel_drain_queued_tasks(&test_data->testing_channel); ASSERT_INT_EQUALS(0, message_data.message_type); ASSERT_UINT_EQUALS(0, message_data.payload_cpy.len); aws_event_stream_message_clean_up(&message); ASSERT_FALSE(aws_event_stream_rpc_server_connection_is_open(test_data->connection)); struct aws_linked_list *message_queue = testing_channel_get_written_message_queue(&test_data->testing_channel); ASSERT_FALSE(aws_linked_list_empty(message_queue)); struct aws_linked_list_node *written_message_node = aws_linked_list_front(message_queue); struct aws_io_message *io_message = AWS_CONTAINER_OF(written_message_node, struct aws_io_message, queueing_handle); struct aws_event_stream_message written_message; ASSERT_SUCCESS(aws_event_stream_message_from_buffer_copy(&written_message, allocator, &io_message->message_data)); aws_array_list_clear(&headers_list); ASSERT_SUCCESS(aws_event_stream_message_headers(&written_message, &headers_list)); enum aws_event_stream_rpc_message_type message_type = -1; for (size_t i = 0; aws_array_list_length(&headers_list); ++i) { struct aws_event_stream_header_value_pair *header = NULL; aws_array_list_get_at_ptr(&headers_list, (void **)&header, i); struct aws_byte_cursor header_name = aws_byte_cursor_from_array(header->header_name, header->header_name_len); if (aws_byte_cursor_eq(&aws_event_stream_rpc_message_type_name, &header_name)) { message_type = aws_event_stream_header_value_as_int32(header); break; } } aws_event_stream_headers_list_cleanup(&headers_list); aws_event_stream_message_clean_up(&written_message); ASSERT_INT_EQUALS(AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_PROTOCOL_ERROR, message_type); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( test_event_stream_rpc_server_connection_unknown_message_type, s_fixture_setup, s_test_event_stream_rpc_server_connection_unknown_message_type, s_fixture_shutdown, &s_test_data) static int s_test_event_stream_rpc_server_connection_missing_message_type(struct aws_allocator *allocator, void *ctx) { struct test_data *test_data = ctx; struct received_protocol_message_data message_data = { .allocator = allocator, }; test_data->user_data = &message_data; test_data->received_fn = s_on_recieved_protocol_message; struct aws_byte_buf payload = aws_byte_buf_from_c_str("test connect message payload"); struct aws_array_list headers_list; ASSERT_SUCCESS(aws_event_stream_headers_list_init(&headers_list, allocator)); ASSERT_SUCCESS(aws_event_stream_add_int32_header( &headers_list, (const char *)aws_event_stream_rpc_message_flags_name.ptr, (uint8_t)aws_event_stream_rpc_message_flags_name.len, 0)); ASSERT_SUCCESS(aws_event_stream_add_int32_header( &headers_list, (const char *)aws_event_stream_rpc_stream_id_name.ptr, (uint8_t)aws_event_stream_rpc_stream_id_name.len, 0)); struct aws_event_stream_message message; ASSERT_SUCCESS(aws_event_stream_message_init(&message, allocator, &headers_list, &payload)); struct aws_byte_cursor send_data = aws_byte_cursor_from_array( aws_event_stream_message_buffer(&message), aws_event_stream_message_total_length(&message)); ASSERT_SUCCESS(testing_channel_push_read_data(&test_data->testing_channel, send_data)); testing_channel_drain_queued_tasks(&test_data->testing_channel); ASSERT_INT_EQUALS(0, message_data.message_type); ASSERT_UINT_EQUALS(0, message_data.payload_cpy.len); aws_event_stream_message_clean_up(&message); ASSERT_FALSE(aws_event_stream_rpc_server_connection_is_open(test_data->connection)); struct aws_linked_list *message_queue = testing_channel_get_written_message_queue(&test_data->testing_channel); ASSERT_FALSE(aws_linked_list_empty(message_queue)); struct aws_linked_list_node *written_message_node = aws_linked_list_front(message_queue); struct aws_io_message *io_message = AWS_CONTAINER_OF(written_message_node, struct aws_io_message, queueing_handle); struct aws_event_stream_message written_message; ASSERT_SUCCESS(aws_event_stream_message_from_buffer_copy(&written_message, allocator, &io_message->message_data)); aws_array_list_clear(&headers_list); ASSERT_SUCCESS(aws_event_stream_message_headers(&written_message, &headers_list)); enum aws_event_stream_rpc_message_type message_type = -1; for (size_t i = 0; aws_array_list_length(&headers_list); ++i) { struct aws_event_stream_header_value_pair *header = NULL; aws_array_list_get_at_ptr(&headers_list, (void **)&header, i); struct aws_byte_cursor header_name = aws_byte_cursor_from_array(header->header_name, header->header_name_len); if (aws_byte_cursor_eq(&aws_event_stream_rpc_message_type_name, &header_name)) { message_type = aws_event_stream_header_value_as_int32(header); break; } } aws_event_stream_headers_list_cleanup(&headers_list); aws_event_stream_message_clean_up(&written_message); ASSERT_INT_EQUALS(AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_PROTOCOL_ERROR, message_type); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( test_event_stream_rpc_server_connection_missing_message_type, s_fixture_setup, s_test_event_stream_rpc_server_connection_missing_message_type, s_fixture_shutdown, &s_test_data) static int s_test_event_stream_rpc_server_connection_missing_message_flags(struct aws_allocator *allocator, void *ctx) { struct test_data *test_data = ctx; struct received_protocol_message_data message_data = { .allocator = allocator, }; test_data->user_data = &message_data; test_data->received_fn = s_on_recieved_protocol_message; struct aws_byte_buf payload = aws_byte_buf_from_c_str("test connect message payload"); struct aws_array_list headers_list; ASSERT_SUCCESS(aws_event_stream_headers_list_init(&headers_list, allocator)); ASSERT_SUCCESS(aws_event_stream_add_int32_header( &headers_list, (const char *)aws_event_stream_rpc_message_type_name.ptr, (uint8_t)aws_event_stream_rpc_message_type_name.len, AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_CONNECT)); ASSERT_SUCCESS(aws_event_stream_add_int32_header( &headers_list, (const char *)aws_event_stream_rpc_stream_id_name.ptr, (uint8_t)aws_event_stream_rpc_stream_id_name.len, 0)); struct aws_event_stream_message message; ASSERT_SUCCESS(aws_event_stream_message_init(&message, allocator, &headers_list, &payload)); struct aws_byte_cursor send_data = aws_byte_cursor_from_array( aws_event_stream_message_buffer(&message), aws_event_stream_message_total_length(&message)); ASSERT_SUCCESS(testing_channel_push_read_data(&test_data->testing_channel, send_data)); testing_channel_drain_queued_tasks(&test_data->testing_channel); ASSERT_INT_EQUALS(0, message_data.message_type); ASSERT_UINT_EQUALS(0, message_data.payload_cpy.len); aws_event_stream_message_clean_up(&message); ASSERT_FALSE(aws_event_stream_rpc_server_connection_is_open(test_data->connection)); struct aws_linked_list *message_queue = testing_channel_get_written_message_queue(&test_data->testing_channel); ASSERT_FALSE(aws_linked_list_empty(message_queue)); struct aws_linked_list_node *written_message_node = aws_linked_list_front(message_queue); struct aws_io_message *io_message = AWS_CONTAINER_OF(written_message_node, struct aws_io_message, queueing_handle); struct aws_event_stream_message written_message; ASSERT_SUCCESS(aws_event_stream_message_from_buffer_copy(&written_message, allocator, &io_message->message_data)); aws_array_list_clear(&headers_list); ASSERT_SUCCESS(aws_event_stream_message_headers(&written_message, &headers_list)); enum aws_event_stream_rpc_message_type message_type = -1; for (size_t i = 0; aws_array_list_length(&headers_list); ++i) { struct aws_event_stream_header_value_pair *header = NULL; aws_array_list_get_at_ptr(&headers_list, (void **)&header, i); struct aws_byte_cursor header_name = aws_byte_cursor_from_array(header->header_name, header->header_name_len); if (aws_byte_cursor_eq(&aws_event_stream_rpc_message_type_name, &header_name)) { message_type = aws_event_stream_header_value_as_int32(header); break; } } aws_event_stream_headers_list_cleanup(&headers_list); aws_event_stream_message_clean_up(&written_message); ASSERT_INT_EQUALS(AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_PROTOCOL_ERROR, message_type); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( test_event_stream_rpc_server_connection_missing_message_flags, s_fixture_setup, s_test_event_stream_rpc_server_connection_missing_message_flags, s_fixture_shutdown, &s_test_data) static int s_test_event_stream_rpc_server_connection_missing_stream_id(struct aws_allocator *allocator, void *ctx) { struct test_data *test_data = ctx; struct received_protocol_message_data message_data = { .allocator = allocator, }; test_data->user_data = &message_data; test_data->received_fn = s_on_recieved_protocol_message; struct aws_byte_buf payload = aws_byte_buf_from_c_str("test connect message payload"); struct aws_array_list headers_list; ASSERT_SUCCESS(aws_event_stream_headers_list_init(&headers_list, allocator)); ASSERT_SUCCESS(aws_event_stream_add_int32_header( &headers_list, (const char *)aws_event_stream_rpc_message_type_name.ptr, (uint8_t)aws_event_stream_rpc_message_type_name.len, AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_CONNECT)); ASSERT_SUCCESS(aws_event_stream_add_int32_header( &headers_list, (const char *)aws_event_stream_rpc_message_flags_name.ptr, (uint8_t)aws_event_stream_rpc_message_flags_name.len, 0)); struct aws_event_stream_message message; ASSERT_SUCCESS(aws_event_stream_message_init(&message, allocator, &headers_list, &payload)); struct aws_byte_cursor send_data = aws_byte_cursor_from_array( aws_event_stream_message_buffer(&message), aws_event_stream_message_total_length(&message)); ASSERT_SUCCESS(testing_channel_push_read_data(&test_data->testing_channel, send_data)); testing_channel_drain_queued_tasks(&test_data->testing_channel); ASSERT_INT_EQUALS(0, message_data.message_type); ASSERT_UINT_EQUALS(0, message_data.payload_cpy.len); aws_event_stream_message_clean_up(&message); ASSERT_FALSE(aws_event_stream_rpc_server_connection_is_open(test_data->connection)); struct aws_byte_buf connect_ack_data; ASSERT_SUCCESS(aws_byte_buf_init(&connect_ack_data, allocator, 1024)); testing_channel_drain_written_messages(&test_data->testing_channel, &connect_ack_data); ASSERT_SUCCESS(aws_event_stream_message_from_buffer_copy(&message, allocator, &connect_ack_data)); aws_byte_buf_clean_up(&connect_ack_data); aws_array_list_clear(&headers_list); ASSERT_SUCCESS(aws_event_stream_message_headers(&message, &headers_list)); enum aws_event_stream_rpc_message_type message_type = -1; for (size_t i = 0; aws_array_list_length(&headers_list); ++i) { struct aws_event_stream_header_value_pair *header = NULL; aws_array_list_get_at_ptr(&headers_list, (void **)&header, i); struct aws_byte_cursor header_name = aws_byte_cursor_from_array(header->header_name, header->header_name_len); if (aws_byte_cursor_eq(&aws_event_stream_rpc_message_type_name, &header_name)) { message_type = aws_event_stream_header_value_as_int32(header); break; } } aws_event_stream_headers_list_cleanup(&headers_list); aws_event_stream_message_clean_up(&message); ASSERT_INT_EQUALS(AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_PROTOCOL_ERROR, message_type); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( test_event_stream_rpc_server_connection_missing_stream_id, s_fixture_setup, s_test_event_stream_rpc_server_connection_missing_stream_id, s_fixture_shutdown, &s_test_data) static int s_on_incoming_stream( struct aws_event_stream_rpc_server_connection *connection, struct aws_event_stream_rpc_server_continuation_token *token, struct aws_byte_cursor operation_name, struct aws_event_stream_rpc_server_stream_continuation_options *continuation_options, void *user_data) { (void)connection; (void)continuation_options; struct received_protocol_message_data *message_data = user_data; message_data->continuation_token = token; aws_event_stream_rpc_server_continuation_acquire(token); aws_byte_buf_init_copy_from_cursor( &message_data->last_seen_operation_name, message_data->allocator, operation_name); return AWS_OP_SUCCESS; } static void s_on_continuation_message( struct aws_event_stream_rpc_server_continuation_token *token, const struct aws_event_stream_rpc_message_args *message_args, void *user_data) { (void)token; struct received_protocol_message_data *message_data = user_data; message_data->message_type = message_args->message_type; message_data->message_flags = message_args->message_flags; aws_byte_buf_init_copy(&message_data->payload_cpy, message_data->allocator, message_args->payload); } static void s_on_continuation_closed(struct aws_event_stream_rpc_server_continuation_token *token, void *user_data) { (void)token; struct received_protocol_message_data *message_data = user_data; message_data->continuation_closed = true; } static int s_test_event_stream_rpc_server_connection_continuation_messages_flow( struct aws_allocator *allocator, void *ctx) { struct test_data *test_data = ctx; struct received_protocol_message_data message_data = { .allocator = allocator, }; test_data->user_data = &message_data; test_data->received_fn = s_on_recieved_protocol_message; test_data->continuation_user_data = &message_data; test_data->on_continuation = s_on_continuation_message; test_data->on_continuation_closed = s_on_continuation_closed; test_data->on_new_stream = s_on_incoming_stream; s_do_connect(allocator, test_data, &message_data); struct aws_byte_buf payload = aws_byte_buf_from_c_str("test operation payload!"); struct aws_event_stream_message message; struct aws_array_list headers_list; ASSERT_SUCCESS(aws_event_stream_headers_list_init(&headers_list, allocator)); struct aws_byte_cursor operation_name = aws_byte_cursor_from_c_str("testOperation"); ASSERT_SUCCESS(aws_event_stream_add_int32_header( &headers_list, (const char *)aws_event_stream_rpc_message_type_name.ptr, (uint8_t)aws_event_stream_rpc_message_type_name.len, AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_APPLICATION_MESSAGE)); ASSERT_SUCCESS(aws_event_stream_add_int32_header( &headers_list, (const char *)aws_event_stream_rpc_message_flags_name.ptr, (uint8_t)aws_event_stream_rpc_message_flags_name.len, 0)); ASSERT_SUCCESS(aws_event_stream_add_int32_header( &headers_list, (const char *)aws_event_stream_rpc_stream_id_name.ptr, (uint8_t)aws_event_stream_rpc_stream_id_name.len, 1)); ASSERT_SUCCESS(aws_event_stream_add_string_header( &headers_list, (const char *)aws_event_stream_rpc_operation_name.ptr, (uint8_t)aws_event_stream_rpc_operation_name.len, (const char *)operation_name.ptr, (uint16_t)operation_name.len, 0)); ASSERT_SUCCESS(aws_event_stream_message_init(&message, allocator, &headers_list, &payload)); struct aws_byte_cursor send_data = aws_byte_cursor_from_array( aws_event_stream_message_buffer(&message), aws_event_stream_message_total_length(&message)); ASSERT_SUCCESS(testing_channel_push_read_data(&test_data->testing_channel, send_data)); testing_channel_drain_queued_tasks(&test_data->testing_channel); aws_event_stream_message_clean_up(&message); aws_array_list_clear(&headers_list); ASSERT_NOT_NULL(message_data.continuation_token); ASSERT_INT_EQUALS(AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_APPLICATION_MESSAGE, message_data.message_type); ASSERT_BIN_ARRAYS_EQUALS( operation_name.ptr, operation_name.len, message_data.last_seen_operation_name.buffer, message_data.last_seen_operation_name.len); ASSERT_BIN_ARRAYS_EQUALS( payload.buffer, payload.len, message_data.payload_cpy.buffer, message_data.payload_cpy.len); aws_byte_buf_clean_up(&message_data.last_seen_operation_name); aws_byte_buf_clean_up(&message_data.payload_cpy); message_data.message_flushed = 0; message_data.message_flush_err_code = 0; struct aws_byte_buf dummy_buf; aws_byte_buf_init(&dummy_buf, allocator, 1024); testing_channel_drain_written_messages(&test_data->testing_channel, &dummy_buf); aws_byte_buf_clean_up(&dummy_buf); struct aws_byte_buf server_msg_payload = aws_byte_buf_from_c_str("message from the server on continuation"); struct aws_byte_cursor server_header_name = aws_byte_cursor_from_c_str("testHeader1"); int32_t server_header_value = 6; struct aws_event_stream_header_value_pair server_payload_header = aws_event_stream_create_int32_header(server_header_name, server_header_value); struct aws_event_stream_header_value_pair server_headers[] = { server_payload_header, }; struct aws_event_stream_rpc_message_args message_args = { .payload = &server_msg_payload, .headers = server_headers, .headers_count = 1, .message_type = AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_APPLICATION_MESSAGE, }; ASSERT_SUCCESS(aws_event_stream_rpc_server_continuation_send_message( message_data.continuation_token, &message_args, s_on_message_flush_fn, &message_data)); testing_channel_drain_queued_tasks(&test_data->testing_channel); struct aws_linked_list *message_queue = testing_channel_get_written_message_queue(&test_data->testing_channel); ASSERT_FALSE(aws_linked_list_empty(message_queue)); struct aws_linked_list_node *written_message_node = aws_linked_list_front(message_queue); struct aws_io_message *io_message = AWS_CONTAINER_OF(written_message_node, struct aws_io_message, queueing_handle); struct aws_event_stream_message written_message; ASSERT_SUCCESS(aws_event_stream_message_from_buffer_copy(&written_message, allocator, &io_message->message_data)); ASSERT_SUCCESS(aws_event_stream_message_headers(&written_message, &headers_list)); bool message_type_found = false; bool stream_id_found = false; bool custom_header_found = false; for (size_t i = 0; i < aws_array_list_length(&headers_list); ++i) { struct aws_event_stream_header_value_pair *header = NULL; aws_array_list_get_at_ptr(&headers_list, (void **)&header, i); struct aws_byte_cursor header_name = aws_byte_cursor_from_array(header->header_name, header->header_name_len); if (aws_byte_cursor_eq(&aws_event_stream_rpc_message_type_name, &header_name)) { message_type_found = true; ASSERT_INT_EQUALS( AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_APPLICATION_MESSAGE, aws_event_stream_header_value_as_int32(header)); } if (aws_byte_cursor_eq(&aws_event_stream_rpc_stream_id_name, &header_name)) { stream_id_found = true; ASSERT_INT_EQUALS(1, aws_event_stream_header_value_as_int32(header)); } if (aws_byte_cursor_eq(&server_header_name, &header_name)) { custom_header_found = true; ASSERT_INT_EQUALS(6, aws_event_stream_header_value_as_int32(header)); } } aws_event_stream_headers_list_cleanup(&headers_list); ASSERT_TRUE(message_type_found); ASSERT_TRUE(stream_id_found); ASSERT_TRUE(custom_header_found); ASSERT_BIN_ARRAYS_EQUALS( server_msg_payload.buffer, server_msg_payload.len, aws_event_stream_message_payload(&written_message), aws_event_stream_message_payload_len(&written_message)); aws_event_stream_message_clean_up(&written_message); /* now send a terminal stream from the client. */ ASSERT_SUCCESS(aws_event_stream_headers_list_init(&headers_list, allocator)); ASSERT_SUCCESS(aws_event_stream_add_int32_header( &headers_list, (const char *)aws_event_stream_rpc_message_type_name.ptr, (uint8_t)aws_event_stream_rpc_message_type_name.len, AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_APPLICATION_MESSAGE)); ASSERT_SUCCESS(aws_event_stream_add_int32_header( &headers_list, (const char *)aws_event_stream_rpc_message_flags_name.ptr, (uint8_t)aws_event_stream_rpc_message_flags_name.len, AWS_EVENT_STREAM_RPC_MESSAGE_FLAG_TERMINATE_STREAM)); ASSERT_SUCCESS(aws_event_stream_add_int32_header( &headers_list, (const char *)aws_event_stream_rpc_stream_id_name.ptr, (uint8_t)aws_event_stream_rpc_stream_id_name.len, 1)); struct aws_byte_buf closing_payload = aws_byte_buf_from_c_str("final message for this stream"); ASSERT_SUCCESS(aws_event_stream_message_init(&message, allocator, &headers_list, &closing_payload)); send_data = aws_byte_cursor_from_array( aws_event_stream_message_buffer(&message), aws_event_stream_message_total_length(&message)); ASSERT_SUCCESS(testing_channel_push_read_data(&test_data->testing_channel, send_data)); testing_channel_drain_queued_tasks(&test_data->testing_channel); aws_event_stream_message_clean_up(&message); aws_array_list_clean_up(&headers_list); ASSERT_BIN_ARRAYS_EQUALS( closing_payload.buffer, closing_payload.len, message_data.payload_cpy.buffer, message_data.payload_cpy.len); ASSERT_TRUE(message_data.continuation_closed); aws_byte_buf_clean_up(&message_data.payload_cpy); aws_event_stream_rpc_server_connection_close(test_data->connection, AWS_ERROR_SUCCESS); testing_channel_drain_queued_tasks(&test_data->testing_channel); aws_event_stream_rpc_server_continuation_release(message_data.continuation_token); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( test_event_stream_rpc_server_connection_continuation_messages_flow, s_fixture_setup, s_test_event_stream_rpc_server_connection_continuation_messages_flow, s_fixture_shutdown, &s_test_data) static int s_test_event_stream_rpc_server_connection_continuation_failure(struct aws_allocator *allocator, void *ctx) { struct test_data *test_data = ctx; struct received_protocol_message_data message_data = { .allocator = allocator, }; test_data->user_data = &message_data; test_data->received_fn = s_on_recieved_protocol_message; test_data->continuation_user_data = &message_data; test_data->on_continuation = s_on_continuation_message; test_data->on_continuation_closed = s_on_continuation_closed; s_do_connect(allocator, test_data, &message_data); struct aws_byte_buf payload = aws_byte_buf_from_c_str("test operation payload!"); struct aws_event_stream_message message; struct aws_array_list headers_list; ASSERT_SUCCESS(aws_event_stream_headers_list_init(&headers_list, allocator)); struct aws_byte_cursor operation_name = aws_byte_cursor_from_c_str("testOperation"); ASSERT_SUCCESS(aws_event_stream_add_int32_header( &headers_list, (const char *)aws_event_stream_rpc_message_type_name.ptr, (uint8_t)aws_event_stream_rpc_message_type_name.len, AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_APPLICATION_MESSAGE)); ASSERT_SUCCESS(aws_event_stream_add_int32_header( &headers_list, (const char *)aws_event_stream_rpc_message_flags_name.ptr, (uint8_t)aws_event_stream_rpc_message_flags_name.len, 0)); ASSERT_SUCCESS(aws_event_stream_add_int32_header( &headers_list, (const char *)aws_event_stream_rpc_stream_id_name.ptr, (uint8_t)aws_event_stream_rpc_stream_id_name.len, 1)); ASSERT_SUCCESS(aws_event_stream_add_string_header( &headers_list, (const char *)aws_event_stream_rpc_operation_name.ptr, (uint8_t)aws_event_stream_rpc_operation_name.len, (const char *)operation_name.ptr, (uint16_t)operation_name.len, 0)); ASSERT_SUCCESS(aws_event_stream_message_init(&message, allocator, &headers_list, &payload)); struct aws_byte_cursor send_data = aws_byte_cursor_from_array( aws_event_stream_message_buffer(&message), aws_event_stream_message_total_length(&message)); ASSERT_SUCCESS(testing_channel_push_read_data(&test_data->testing_channel, send_data)); testing_channel_drain_queued_tasks(&test_data->testing_channel); aws_event_stream_message_clean_up(&message); aws_array_list_clean_up(&headers_list); ASSERT_NULL(message_data.continuation_token); aws_event_stream_rpc_server_connection_close(test_data->connection, AWS_ERROR_SUCCESS); testing_channel_drain_queued_tasks(&test_data->testing_channel); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( test_event_stream_rpc_server_connection_continuation_failure, s_fixture_setup_new_stream_failure, s_test_event_stream_rpc_server_connection_continuation_failure, s_fixture_shutdown, &s_test_data) static int s_test_event_stream_rpc_server_connection_continuation_missing_operation( struct aws_allocator *allocator, void *ctx) { struct test_data *test_data = ctx; struct received_protocol_message_data message_data = { .allocator = allocator, }; test_data->user_data = &message_data; test_data->received_fn = s_on_recieved_protocol_message; test_data->continuation_user_data = &message_data; test_data->on_continuation = s_on_continuation_message; test_data->on_continuation_closed = s_on_continuation_closed; test_data->on_new_stream = s_on_incoming_stream; s_do_connect(allocator, test_data, &message_data); struct aws_byte_buf payload = aws_byte_buf_from_c_str("test operation payload!"); struct aws_event_stream_message message; struct aws_array_list headers_list; ASSERT_SUCCESS(aws_event_stream_headers_list_init(&headers_list, allocator)); ASSERT_SUCCESS(aws_event_stream_add_int32_header( &headers_list, (const char *)aws_event_stream_rpc_message_type_name.ptr, (uint8_t)aws_event_stream_rpc_message_type_name.len, AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_APPLICATION_MESSAGE)); ASSERT_SUCCESS(aws_event_stream_add_int32_header( &headers_list, (const char *)aws_event_stream_rpc_message_flags_name.ptr, (uint8_t)aws_event_stream_rpc_message_flags_name.len, 0)); ASSERT_SUCCESS(aws_event_stream_add_int32_header( &headers_list, (const char *)aws_event_stream_rpc_stream_id_name.ptr, (uint8_t)aws_event_stream_rpc_stream_id_name.len, 1)); ASSERT_SUCCESS(aws_event_stream_message_init(&message, allocator, &headers_list, &payload)); struct aws_byte_cursor send_data = aws_byte_cursor_from_array( aws_event_stream_message_buffer(&message), aws_event_stream_message_total_length(&message)); ASSERT_SUCCESS(testing_channel_push_read_data(&test_data->testing_channel, send_data)); testing_channel_drain_queued_tasks(&test_data->testing_channel); aws_event_stream_message_clean_up(&message); aws_array_list_clear(&headers_list); ASSERT_NULL(message_data.continuation_token); struct aws_linked_list *message_queue = testing_channel_get_written_message_queue(&test_data->testing_channel); ASSERT_FALSE(aws_linked_list_empty(message_queue)); struct aws_linked_list_node *written_message_node = aws_linked_list_front(message_queue); struct aws_io_message *io_message = AWS_CONTAINER_OF(written_message_node, struct aws_io_message, queueing_handle); struct aws_event_stream_message written_message; ASSERT_SUCCESS(aws_event_stream_message_from_buffer_copy(&written_message, allocator, &io_message->message_data)); ASSERT_SUCCESS(aws_event_stream_message_headers(&written_message, &headers_list)); bool message_type_found = false; for (size_t i = 0; i < aws_array_list_length(&headers_list); ++i) { struct aws_event_stream_header_value_pair *header = NULL; aws_array_list_get_at_ptr(&headers_list, (void **)&header, i); struct aws_byte_cursor header_name = aws_byte_cursor_from_array(header->header_name, header->header_name_len); if (aws_byte_cursor_eq(&aws_event_stream_rpc_message_type_name, &header_name)) { message_type_found = true; ASSERT_INT_EQUALS( AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_PROTOCOL_ERROR, aws_event_stream_header_value_as_int32(header)); } } aws_event_stream_headers_list_cleanup(&headers_list); aws_event_stream_message_clean_up(&written_message); ASSERT_TRUE(message_type_found); testing_channel_drain_queued_tasks(&test_data->testing_channel); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( test_event_stream_rpc_server_connection_continuation_missing_operation, s_fixture_setup, s_test_event_stream_rpc_server_connection_continuation_missing_operation, s_fixture_shutdown, &s_test_data) static int s_test_event_stream_rpc_server_connection_stream_id_ahead(struct aws_allocator *allocator, void *ctx) { struct test_data *test_data = ctx; struct received_protocol_message_data message_data = { .allocator = allocator, }; test_data->user_data = &message_data; test_data->received_fn = s_on_recieved_protocol_message; test_data->continuation_user_data = &message_data; test_data->on_continuation = s_on_continuation_message; test_data->on_continuation_closed = s_on_continuation_closed; test_data->on_new_stream = s_on_incoming_stream; s_do_connect(allocator, test_data, &message_data); struct aws_byte_buf payload = aws_byte_buf_from_c_str("test operation payload!"); struct aws_event_stream_message message; struct aws_array_list headers_list; ASSERT_SUCCESS(aws_event_stream_headers_list_init(&headers_list, allocator)); struct aws_byte_cursor operation_name = aws_byte_cursor_from_c_str("testOperation"); ASSERT_SUCCESS(aws_event_stream_add_int32_header( &headers_list, (const char *)aws_event_stream_rpc_message_type_name.ptr, (uint8_t)aws_event_stream_rpc_message_type_name.len, AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_APPLICATION_MESSAGE)); ASSERT_SUCCESS(aws_event_stream_add_int32_header( &headers_list, (const char *)aws_event_stream_rpc_message_flags_name.ptr, (uint8_t)aws_event_stream_rpc_message_flags_name.len, 0)); ASSERT_SUCCESS(aws_event_stream_add_int32_header( &headers_list, (const char *)aws_event_stream_rpc_stream_id_name.ptr, (uint8_t)aws_event_stream_rpc_stream_id_name.len, 2)); ASSERT_SUCCESS(aws_event_stream_add_string_header( &headers_list, (const char *)aws_event_stream_rpc_operation_name.ptr, (uint8_t)aws_event_stream_rpc_operation_name.len, (const char *)operation_name.ptr, (uint16_t)operation_name.len, 0)); ASSERT_SUCCESS(aws_event_stream_message_init(&message, allocator, &headers_list, &payload)); struct aws_byte_cursor send_data = aws_byte_cursor_from_array( aws_event_stream_message_buffer(&message), aws_event_stream_message_total_length(&message)); ASSERT_SUCCESS(testing_channel_push_read_data(&test_data->testing_channel, send_data)); testing_channel_drain_queued_tasks(&test_data->testing_channel); aws_event_stream_message_clean_up(&message); aws_array_list_clear(&headers_list); ASSERT_NULL(message_data.continuation_token); struct aws_linked_list *message_queue = testing_channel_get_written_message_queue(&test_data->testing_channel); ASSERT_FALSE(aws_linked_list_empty(message_queue)); struct aws_linked_list_node *written_message_node = aws_linked_list_front(message_queue); struct aws_io_message *io_message = AWS_CONTAINER_OF(written_message_node, struct aws_io_message, queueing_handle); struct aws_event_stream_message written_message; ASSERT_SUCCESS(aws_event_stream_message_from_buffer_copy(&written_message, allocator, &io_message->message_data)); ASSERT_SUCCESS(aws_event_stream_message_headers(&written_message, &headers_list)); bool message_type_found = false; for (size_t i = 0; i < aws_array_list_length(&headers_list); ++i) { struct aws_event_stream_header_value_pair *header = NULL; aws_array_list_get_at_ptr(&headers_list, (void **)&header, i); struct aws_byte_cursor header_name = aws_byte_cursor_from_array(header->header_name, header->header_name_len); if (aws_byte_cursor_eq(&aws_event_stream_rpc_message_type_name, &header_name)) { message_type_found = true; ASSERT_INT_EQUALS( AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_PROTOCOL_ERROR, aws_event_stream_header_value_as_int32(header)); } } aws_event_stream_headers_list_cleanup(&headers_list); aws_event_stream_message_clean_up(&written_message); ASSERT_TRUE(message_type_found); testing_channel_drain_queued_tasks(&test_data->testing_channel); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( test_event_stream_rpc_server_connection_stream_id_ahead, s_fixture_setup, s_test_event_stream_rpc_server_connection_stream_id_ahead, s_fixture_shutdown, &s_test_data) static int s_test_event_stream_rpc_server_connection_continuation_reused_stream_id_fails( struct aws_allocator *allocator, void *ctx) { struct test_data *test_data = ctx; struct received_protocol_message_data message_data = { .allocator = allocator, }; test_data->user_data = &message_data; test_data->received_fn = s_on_recieved_protocol_message; test_data->continuation_user_data = &message_data; test_data->on_continuation = s_on_continuation_message; test_data->on_continuation_closed = s_on_continuation_closed; test_data->on_new_stream = s_on_incoming_stream; s_do_connect(allocator, test_data, &message_data); struct aws_byte_buf payload = aws_byte_buf_from_c_str("test operation payload!"); struct aws_event_stream_message message; struct aws_array_list headers_list; ASSERT_SUCCESS(aws_event_stream_headers_list_init(&headers_list, allocator)); struct aws_byte_cursor operation_name = aws_byte_cursor_from_c_str("testOperation"); ASSERT_SUCCESS(aws_event_stream_add_int32_header( &headers_list, (const char *)aws_event_stream_rpc_message_type_name.ptr, (uint8_t)aws_event_stream_rpc_message_type_name.len, AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_APPLICATION_MESSAGE)); ASSERT_SUCCESS(aws_event_stream_add_int32_header( &headers_list, (const char *)aws_event_stream_rpc_message_flags_name.ptr, (uint8_t)aws_event_stream_rpc_message_flags_name.len, 0)); ASSERT_SUCCESS(aws_event_stream_add_int32_header( &headers_list, (const char *)aws_event_stream_rpc_stream_id_name.ptr, (uint8_t)aws_event_stream_rpc_stream_id_name.len, 1)); ASSERT_SUCCESS(aws_event_stream_add_string_header( &headers_list, (const char *)aws_event_stream_rpc_operation_name.ptr, (uint8_t)aws_event_stream_rpc_operation_name.len, (const char *)operation_name.ptr, (uint16_t)operation_name.len, 0)); ASSERT_SUCCESS(aws_event_stream_message_init(&message, allocator, &headers_list, &payload)); struct aws_byte_cursor send_data = aws_byte_cursor_from_array( aws_event_stream_message_buffer(&message), aws_event_stream_message_total_length(&message)); ASSERT_SUCCESS(testing_channel_push_read_data(&test_data->testing_channel, send_data)); testing_channel_drain_queued_tasks(&test_data->testing_channel); aws_event_stream_message_clean_up(&message); aws_array_list_clear(&headers_list); ASSERT_NOT_NULL(message_data.continuation_token); ASSERT_INT_EQUALS(AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_APPLICATION_MESSAGE, message_data.message_type); ASSERT_BIN_ARRAYS_EQUALS( operation_name.ptr, operation_name.len, message_data.last_seen_operation_name.buffer, message_data.last_seen_operation_name.len); ASSERT_BIN_ARRAYS_EQUALS( payload.buffer, payload.len, message_data.payload_cpy.buffer, message_data.payload_cpy.len); aws_byte_buf_clean_up(&message_data.last_seen_operation_name); aws_byte_buf_clean_up(&message_data.payload_cpy); message_data.message_flushed = 0; message_data.message_flush_err_code = 0; struct aws_byte_buf dummy_buf; aws_byte_buf_init(&dummy_buf, allocator, 1024); testing_channel_drain_written_messages(&test_data->testing_channel, &dummy_buf); aws_byte_buf_clean_up(&dummy_buf); struct aws_byte_buf server_msg_payload = aws_byte_buf_from_c_str("message from the server on continuation"); struct aws_byte_cursor server_header_name = aws_byte_cursor_from_c_str("testHeader1"); int32_t server_header_value = 6; struct aws_event_stream_header_value_pair server_payload_header = aws_event_stream_create_int32_header(server_header_name, server_header_value); struct aws_event_stream_header_value_pair server_headers[] = { server_payload_header, }; struct aws_event_stream_rpc_message_args message_args = { .payload = &server_msg_payload, .headers = server_headers, .headers_count = 1, .message_type = AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_APPLICATION_MESSAGE, .message_flags = AWS_EVENT_STREAM_RPC_MESSAGE_FLAG_TERMINATE_STREAM, }; ASSERT_SUCCESS(aws_event_stream_rpc_server_continuation_send_message( message_data.continuation_token, &message_args, s_on_message_flush_fn, &message_data)); testing_channel_drain_queued_tasks(&test_data->testing_channel); ASSERT_TRUE(message_data.continuation_closed); ASSERT_TRUE(aws_event_stream_rpc_server_continuation_is_closed(message_data.continuation_token)); struct aws_linked_list *message_queue = testing_channel_get_written_message_queue(&test_data->testing_channel); ASSERT_FALSE(aws_linked_list_empty(message_queue)); struct aws_byte_buf final_message; aws_byte_buf_init(&final_message, allocator, 1024); testing_channel_drain_written_messages(&test_data->testing_channel, &final_message); struct aws_event_stream_message written_message; ASSERT_SUCCESS(aws_event_stream_message_from_buffer_copy(&written_message, allocator, &final_message)); aws_byte_buf_clean_up(&final_message); ASSERT_SUCCESS(aws_event_stream_message_headers(&written_message, &headers_list)); bool message_type_found = false; bool stream_id_found = false; bool custom_header_found = false; bool message_flags_found = false; for (size_t i = 0; i < aws_array_list_length(&headers_list); ++i) { struct aws_event_stream_header_value_pair *header = NULL; aws_array_list_get_at_ptr(&headers_list, (void **)&header, i); struct aws_byte_cursor header_name = aws_byte_cursor_from_array(header->header_name, header->header_name_len); if (aws_byte_cursor_eq(&aws_event_stream_rpc_message_type_name, &header_name)) { message_type_found = true; ASSERT_INT_EQUALS( AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_APPLICATION_MESSAGE, aws_event_stream_header_value_as_int32(header)); } if (aws_byte_cursor_eq(&aws_event_stream_rpc_message_flags_name, &header_name)) { message_flags_found = true; ASSERT_INT_EQUALS( AWS_EVENT_STREAM_RPC_MESSAGE_FLAG_TERMINATE_STREAM, aws_event_stream_header_value_as_int32(header)); } if (aws_byte_cursor_eq(&aws_event_stream_rpc_stream_id_name, &header_name)) { stream_id_found = true; ASSERT_INT_EQUALS(1, aws_event_stream_header_value_as_int32(header)); } if (aws_byte_cursor_eq(&server_header_name, &header_name)) { custom_header_found = true; ASSERT_INT_EQUALS(6, aws_event_stream_header_value_as_int32(header)); } } aws_event_stream_headers_list_cleanup(&headers_list); ASSERT_TRUE(message_type_found); ASSERT_TRUE(stream_id_found); ASSERT_TRUE(custom_header_found); ASSERT_TRUE(message_flags_found); ASSERT_BIN_ARRAYS_EQUALS( server_msg_payload.buffer, server_msg_payload.len, aws_event_stream_message_payload(&written_message), aws_event_stream_message_payload_len(&written_message)); aws_event_stream_message_clean_up(&written_message); /* now send a message on the same stream from the client. */ ASSERT_SUCCESS(aws_event_stream_headers_list_init(&headers_list, allocator)); ASSERT_SUCCESS(aws_event_stream_add_int32_header( &headers_list, (const char *)aws_event_stream_rpc_message_type_name.ptr, (uint8_t)aws_event_stream_rpc_message_type_name.len, AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_APPLICATION_MESSAGE)); ASSERT_SUCCESS(aws_event_stream_add_int32_header( &headers_list, (const char *)aws_event_stream_rpc_message_flags_name.ptr, (uint8_t)aws_event_stream_rpc_message_flags_name.len, 0)); ASSERT_SUCCESS(aws_event_stream_add_int32_header( &headers_list, (const char *)aws_event_stream_rpc_stream_id_name.ptr, (uint8_t)aws_event_stream_rpc_stream_id_name.len, 1)); struct aws_byte_buf closing_payload = aws_byte_buf_from_c_str("final message for this stream"); ASSERT_SUCCESS(aws_event_stream_message_init(&message, allocator, &headers_list, &closing_payload)); send_data = aws_byte_cursor_from_array( aws_event_stream_message_buffer(&message), aws_event_stream_message_total_length(&message)); ASSERT_SUCCESS(testing_channel_push_read_data(&test_data->testing_channel, send_data)); testing_channel_drain_queued_tasks(&test_data->testing_channel); aws_event_stream_message_clean_up(&message); aws_array_list_clear(&headers_list); struct aws_linked_list_node *written_message_node = aws_linked_list_front(message_queue); struct aws_io_message *io_message = AWS_CONTAINER_OF(written_message_node, struct aws_io_message, queueing_handle); ASSERT_SUCCESS(aws_event_stream_message_from_buffer_copy(&written_message, allocator, &io_message->message_data)); ASSERT_SUCCESS(aws_event_stream_message_headers(&written_message, &headers_list)); message_type_found = false; for (size_t i = 0; i < aws_array_list_length(&headers_list); ++i) { struct aws_event_stream_header_value_pair *header = NULL; aws_array_list_get_at_ptr(&headers_list, (void **)&header, i); struct aws_byte_cursor header_name = aws_byte_cursor_from_array(header->header_name, header->header_name_len); if (aws_byte_cursor_eq(&aws_event_stream_rpc_message_type_name, &header_name)) { message_type_found = true; ASSERT_INT_EQUALS( AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_PROTOCOL_ERROR, aws_event_stream_header_value_as_int32(header)); } } aws_event_stream_headers_list_cleanup(&headers_list); aws_event_stream_message_clean_up(&written_message); ASSERT_TRUE(message_type_found); testing_channel_drain_queued_tasks(&test_data->testing_channel); aws_event_stream_rpc_server_continuation_release(message_data.continuation_token); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( test_event_stream_rpc_server_connection_continuation_reused_stream_id_fails, s_fixture_setup, s_test_event_stream_rpc_server_connection_continuation_reused_stream_id_fails, s_fixture_shutdown, &s_test_data) static int s_test_event_stream_rpc_server_connection_continuation_max_stream_id_reached( struct aws_allocator *allocator, void *ctx) { struct test_data *test_data = ctx; struct received_protocol_message_data message_data = { .allocator = allocator, }; test_data->user_data = &message_data; test_data->received_fn = s_on_recieved_protocol_message; test_data->continuation_user_data = &message_data; test_data->on_continuation = s_on_continuation_message; test_data->on_continuation_closed = s_on_continuation_closed; test_data->on_new_stream = s_on_incoming_stream; s_do_connect(allocator, test_data, &message_data); aws_event_stream_rpc_server_override_last_stream_id(test_data->connection, INT32_MAX); struct aws_byte_buf payload = aws_byte_buf_from_c_str("test operation payload!"); struct aws_event_stream_message message; struct aws_array_list headers_list; ASSERT_SUCCESS(aws_event_stream_headers_list_init(&headers_list, allocator)); struct aws_byte_cursor operation_name = aws_byte_cursor_from_c_str("testOperation"); ASSERT_SUCCESS(aws_event_stream_add_int32_header( &headers_list, (const char *)aws_event_stream_rpc_message_type_name.ptr, (uint8_t)aws_event_stream_rpc_message_type_name.len, AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_APPLICATION_MESSAGE)); ASSERT_SUCCESS(aws_event_stream_add_int32_header( &headers_list, (const char *)aws_event_stream_rpc_message_flags_name.ptr, (uint8_t)aws_event_stream_rpc_message_flags_name.len, 0)); ASSERT_SUCCESS(aws_event_stream_add_int32_header( &headers_list, (const char *)aws_event_stream_rpc_stream_id_name.ptr, (uint8_t)aws_event_stream_rpc_stream_id_name.len, (int32_t)((uint32_t)(INT32_MAX) + 1))); ASSERT_SUCCESS(aws_event_stream_add_string_header( &headers_list, (const char *)aws_event_stream_rpc_operation_name.ptr, (uint8_t)aws_event_stream_rpc_operation_name.len, (const char *)operation_name.ptr, (uint16_t)operation_name.len, 0)); ASSERT_SUCCESS(aws_event_stream_message_init(&message, allocator, &headers_list, &payload)); struct aws_byte_cursor send_data = aws_byte_cursor_from_array( aws_event_stream_message_buffer(&message), aws_event_stream_message_total_length(&message)); ASSERT_SUCCESS(testing_channel_push_read_data(&test_data->testing_channel, send_data)); testing_channel_drain_queued_tasks(&test_data->testing_channel); aws_event_stream_message_clean_up(&message); aws_array_list_clear(&headers_list); ASSERT_NULL(message_data.continuation_token); struct aws_byte_buf final_message; ASSERT_SUCCESS(aws_byte_buf_init(&final_message, allocator, 1024)); testing_channel_drain_written_messages(&test_data->testing_channel, &final_message); ASSERT_SUCCESS(aws_event_stream_message_from_buffer_copy(&message, allocator, &final_message)); aws_byte_buf_clean_up(&final_message); ASSERT_SUCCESS(aws_event_stream_message_headers(&message, &headers_list)); bool message_type_found = false; for (size_t i = 0; i < aws_array_list_length(&headers_list); ++i) { struct aws_event_stream_header_value_pair *header = NULL; aws_array_list_get_at_ptr(&headers_list, (void **)&header, i); struct aws_byte_cursor header_name = aws_byte_cursor_from_array(header->header_name, header->header_name_len); if (aws_byte_cursor_eq(&aws_event_stream_rpc_message_type_name, &header_name)) { message_type_found = true; ASSERT_INT_EQUALS( AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_PROTOCOL_ERROR, aws_event_stream_header_value_as_int32(header)); } } ASSERT_TRUE(message_type_found); aws_event_stream_headers_list_cleanup(&headers_list); aws_event_stream_message_clean_up(&message); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( test_event_stream_rpc_server_connection_continuation_max_stream_id_reached, s_fixture_setup, s_test_event_stream_rpc_server_connection_continuation_max_stream_id_reached, s_fixture_shutdown, &s_test_data) aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/tests/message_deserializer_test.c000066400000000000000000000152761456575232400304100ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include static int s_test_outgoing_no_op_valid_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; uint8_t test_data[] = { 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x05, 0xc2, 0x48, 0xeb, 0x7d, 0x98, 0xc8, 0xff}; struct aws_event_stream_message message; struct aws_byte_buf test_buf = aws_byte_buf_from_array(test_data, sizeof(test_data)); ASSERT_SUCCESS( aws_event_stream_message_from_buffer(&message, allocator, &test_buf), "Message validation should have succeeded"); ASSERT_INT_EQUALS( 0x00000010, aws_event_stream_message_total_length(&message), "Message length should have been 0x10"); ASSERT_INT_EQUALS( 0x00000000, aws_event_stream_message_headers_len(&message), "Headers Length should have been 0x00"); ASSERT_INT_EQUALS( 0x05c248eb, aws_event_stream_message_prelude_crc(&message), "Prelude CRC should have been 0x05c248eb"); ASSERT_INT_EQUALS( 0x7d98c8ff, aws_event_stream_message_message_crc(&message), "Message CRC should have been 0x7d98c8ff"); aws_event_stream_message_clean_up(&message); return 0; } AWS_TEST_CASE(test_outgoing_no_op_valid, s_test_outgoing_no_op_valid_fn) static int s_test_outgoing_application_data_no_headers_valid_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; uint8_t test_data[] = {0x00, 0x00, 0x00, 0x1D, 0x00, 0x00, 0x00, 0x00, 0xfd, 0x52, 0x8c, 0x5a, 0x7b, 0x27, 0x66, 0x6f, 0x6f, 0x27, 0x3a, 0x27, 0x62, 0x61, 0x72, 0x27, 0x7d, 0xc3, 0x65, 0x39, 0x36}; struct aws_event_stream_message message; struct aws_byte_buf test_buf = aws_byte_buf_from_array(test_data, sizeof(test_data)); ASSERT_SUCCESS( aws_event_stream_message_from_buffer(&message, allocator, &test_buf), "Message validation should have succeeded"); ASSERT_INT_EQUALS( 0x0000001D, aws_event_stream_message_total_length(&message), "Message length should have been 0x0000001D"); ASSERT_INT_EQUALS( 0x00000000, aws_event_stream_message_headers_len(&message), "Headers Length should have been 0x00"); ASSERT_INT_EQUALS( 0xfd528c5a, aws_event_stream_message_prelude_crc(&message), "Prelude CRC should have been 0xfd528c5a"); const char *expected_str = "{'foo':'bar'}"; ASSERT_INT_EQUALS( strlen(expected_str), aws_event_stream_message_payload_len(&message), "payload length should have been %d", (int)(strlen(expected_str))); ASSERT_BIN_ARRAYS_EQUALS( expected_str, strlen(expected_str), aws_event_stream_message_payload(&message), aws_event_stream_message_payload_len(&message), "payload should have been %s", expected_str); ASSERT_INT_EQUALS( 0xc3653936, aws_event_stream_message_message_crc(&message), "Message CRC should have been 0xc3653936"); aws_event_stream_message_clean_up(&message); return 0; } AWS_TEST_CASE(test_outgoing_application_data_no_headers_valid, s_test_outgoing_application_data_no_headers_valid_fn) static int s_test_outgoing_application_one_compressed_header_pair_valid_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; uint8_t test_data[] = {0x00, 0x00, 0x00, 0x3D, 0x00, 0x00, 0x00, 0x20, 0x07, 0xFD, 0x83, 0x96, 0x0C, 'c', 'o', 'n', 't', 'e', 'n', 't', '-', 't', 'y', 'p', 'e', 0x07, 0x00, 0x10, 'a', 'p', 'p', 'l', 'i', 'c', 'a', 't', 'i', 'o', 'n', '/', 'j', 's', 'o', 'n', 0x7b, 0x27, 0x66, 0x6f, 0x6f, 0x27, 0x3a, 0x27, 0x62, 0x61, 0x72, 0x27, 0x7d, 0x8D, 0x9C, 0x08, 0xB1}; struct aws_event_stream_message message; struct aws_byte_buf test_buf = aws_byte_buf_from_array(test_data, sizeof(test_data)); ASSERT_SUCCESS( aws_event_stream_message_from_buffer(&message, allocator, &test_buf), "Message validation should have succeeded"); ASSERT_INT_EQUALS( 0x0000003D, aws_event_stream_message_total_length(&message), "Message length should have been 0x0000003D"); ASSERT_INT_EQUALS( 0x00000020, aws_event_stream_message_headers_len(&message), "Headers Length should have been 0x00000020"); ASSERT_INT_EQUALS( 0x07FD8396, aws_event_stream_message_prelude_crc(&message), "Prelude CRC should have been 0x07FD8396"); const char *expected_str = "{'foo':'bar'}"; ASSERT_INT_EQUALS( strlen(expected_str), aws_event_stream_message_payload_len(&message), "payload length should have been %d", (int)(strlen(expected_str))); ASSERT_BIN_ARRAYS_EQUALS( expected_str, strlen(expected_str), aws_event_stream_message_payload(&message), aws_event_stream_message_payload_len(&message), "payload should have been %s", expected_str); struct aws_array_list headers; ASSERT_SUCCESS(aws_event_stream_headers_list_init(&headers, allocator), "Header initialization failed"); ASSERT_SUCCESS(aws_event_stream_message_headers(&message, &headers), "Header parsing should have succeeded"); ASSERT_INT_EQUALS(1, headers.length, "There should be exactly one header found"); struct aws_event_stream_header_value_pair header; ASSERT_SUCCESS( aws_array_list_front(&headers, &header), "accessing the first element of an array of size 1 should have succeeded"); const char *content_type = "content-type"; const char *content_type_value = "application/json"; struct aws_byte_buf header_name_buf = aws_event_stream_header_name(&header); ASSERT_BIN_ARRAYS_EQUALS( content_type, strlen(content_type), header_name_buf.buffer, header_name_buf.len, "header name should have been %s", content_type); struct aws_byte_buf header_value_buf = aws_event_stream_header_value_as_string(&header); ASSERT_BIN_ARRAYS_EQUALS( content_type_value, strlen(content_type_value), header_value_buf.buffer, header_value_buf.len, "header value should have been %s", content_type_value); ASSERT_INT_EQUALS( 0x8D9C08B1, aws_event_stream_message_message_crc(&message), "Message CRC should have been 0x8D9C08B1"); aws_event_stream_headers_list_cleanup(&headers); aws_event_stream_message_clean_up(&message); return 0; } AWS_TEST_CASE( test_outgoing_application_one_compressed_header_pair_valid, s_test_outgoing_application_one_compressed_header_pair_valid_fn) aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/tests/message_serializer_test.c000066400000000000000000000127611456575232400300730ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include static int s_test_incoming_no_op_valid_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint8_t expected_data[] = { 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x05, 0xc2, 0x48, 0xeb, 0x7d, 0x98, 0xc8, 0xff}; struct aws_event_stream_message message; ASSERT_SUCCESS( aws_event_stream_message_init(&message, allocator, NULL, NULL), "Message validation should have succeeded"); ASSERT_BIN_ARRAYS_EQUALS( expected_data, sizeof(expected_data), aws_event_stream_message_buffer(&message), aws_event_stream_message_total_length(&message), "buffers didn't match"); aws_event_stream_message_clean_up(&message); return 0; } AWS_TEST_CASE(test_incoming_no_op_valid, s_test_incoming_no_op_valid_fn) static int s_test_incoming_application_data_no_headers_valid_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint8_t expected_data[] = {0x00, 0x00, 0x00, 0x1D, 0x00, 0x00, 0x00, 0x00, 0xfd, 0x52, 0x8c, 0x5a, 0x7b, 0x27, 0x66, 0x6f, 0x6f, 0x27, 0x3a, 0x27, 0x62, 0x61, 0x72, 0x27, 0x7d, 0xc3, 0x65, 0x39, 0x36}; const char *test_str = "{'foo':'bar'}"; struct aws_event_stream_message message; struct aws_byte_buf test_buf = aws_byte_buf_from_c_str(test_str); ASSERT_SUCCESS( aws_event_stream_message_init(&message, allocator, NULL, &test_buf), "Message validation should have succeeded"); ASSERT_BIN_ARRAYS_EQUALS( expected_data, sizeof(expected_data), aws_event_stream_message_buffer(&message), aws_event_stream_message_total_length(&message), "buffers didn't match"); aws_event_stream_message_clean_up(&message); return 0; } AWS_TEST_CASE(test_incoming_application_data_no_headers_valid, s_test_incoming_application_data_no_headers_valid_fn) static int s_test_incoming_application_one_compressed_header_pair_valid_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint8_t expected_data[] = {0x00, 0x00, 0x00, 0x3D, 0x00, 0x00, 0x00, 0x20, 0x07, 0xFD, 0x83, 0x96, 0x0C, 'c', 'o', 'n', 't', 'e', 'n', 't', '-', 't', 'y', 'p', 'e', 0x07, 0x00, 0x10, 'a', 'p', 'p', 'l', 'i', 'c', 'a', 't', 'i', 'o', 'n', '/', 'j', 's', 'o', 'n', 0x7b, 0x27, 0x66, 0x6f, 0x6f, 0x27, 0x3a, 0x27, 0x62, 0x61, 0x72, 0x27, 0x7d, 0x8D, 0x9C, 0x08, 0xB1}; const char *test_str = "{'foo':'bar'}"; struct aws_event_stream_message message; struct aws_array_list headers; ASSERT_SUCCESS(aws_event_stream_headers_list_init(&headers, allocator), "Header initialization failed"); const char *header_name = "content-type"; const char *header_value = "application/json"; ASSERT_SUCCESS( aws_event_stream_add_string_header( &headers, header_name, (int8_t)strlen(header_name), header_value, (uint16_t)strlen(header_value), 0), "Adding a header should have succeeded."); struct aws_byte_buf test_buf = aws_byte_buf_from_c_str(test_str); ASSERT_SUCCESS( aws_event_stream_message_init(&message, allocator, &headers, &test_buf), "Message validation should have succeeded"); ASSERT_BIN_ARRAYS_EQUALS( expected_data, sizeof(expected_data), aws_event_stream_message_buffer(&message), aws_event_stream_message_total_length(&message), "buffers didn't match"); aws_event_stream_headers_list_cleanup(&headers); aws_event_stream_message_clean_up(&message); return 0; } AWS_TEST_CASE( test_incoming_application_one_compressed_header_pair_valid, s_test_incoming_application_one_compressed_header_pair_valid_fn) static int s_test_incoming_application_int32_header_valid_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint8_t expected_data[] = {0x00, 0x00, 0x00, 0x2B, 0x00, 0x00, 0x00, 0x0E, 0x34, 0x8B, 0xEC, 0x7B, 0x08, 'e', 'v', 'e', 'n', 't', '-', 'i', 'd', 0x04, 0x00, 0x00, 0xA0, 0x0C, 0x7b, 0x27, 0x66, 0x6f, 0x6f, 0x27, 0x3a, 0x27, 0x62, 0x61, 0x72, 0x27, 0x7d, 0xD3, 0x89, 0x02, 0x85}; const char *test_str = "{'foo':'bar'}"; struct aws_event_stream_message message; struct aws_array_list headers; ASSERT_SUCCESS(aws_event_stream_headers_list_init(&headers, allocator), "Header initialization failed"); const char *header_name = "event-id"; ASSERT_SUCCESS( aws_event_stream_add_int32_header(&headers, header_name, (int8_t)strlen(header_name), 0x0000A00c), "Adding a header should have succeeded."); struct aws_byte_buf test_buf = aws_byte_buf_from_c_str(test_str); ASSERT_SUCCESS(aws_event_stream_message_init(&message, allocator, &headers, &test_buf), "buffers didn't match"); ASSERT_BIN_ARRAYS_EQUALS( expected_data, sizeof(expected_data), aws_event_stream_message_buffer(&message), aws_event_stream_message_total_length(&message), "buffers didn't match"); aws_array_list_clean_up(&headers); aws_event_stream_message_clean_up(&message); return 0; } AWS_TEST_CASE(test_incoming_application_int32_header_valid, s_test_incoming_application_int32_header_valid_fn) aws-crt-python-0.20.4+dfsg/crt/aws-c-event-stream/tests/message_streaming_decoder_test.c000066400000000000000000000667351456575232400314120ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include struct test_decoder_data { struct aws_event_stream_message_prelude latest_prelude; struct aws_array_list headers_list; uint8_t *latest_payload; size_t written; struct aws_allocator *alloc; int latest_error; uint32_t message_crc; }; static void s_decoder_test_on_payload_segment( struct aws_event_stream_streaming_decoder *decoder, struct aws_byte_buf *data, int8_t final_segment, void *user_data) { (void)final_segment; (void)decoder; struct test_decoder_data *decoder_data = (struct test_decoder_data *)user_data; memcpy(decoder_data->latest_payload + decoder_data->written, data->buffer, data->len); decoder_data->written += data->len; } static void s_decoder_test_on_prelude_received( struct aws_event_stream_streaming_decoder *decoder, struct aws_event_stream_message_prelude *prelude, void *user_data) { (void)decoder; struct test_decoder_data *decoder_data = (struct test_decoder_data *)user_data; decoder_data->latest_prelude = *prelude; if (decoder_data->latest_payload) { aws_mem_release(decoder_data->alloc, decoder_data->latest_payload); } const size_t payload_size = decoder_data->latest_prelude.total_len - AWS_EVENT_STREAM_PRELUDE_LENGTH - AWS_EVENT_STREAM_TRAILER_LENGTH - decoder_data->latest_prelude.headers_len; if (payload_size) { decoder_data->latest_payload = aws_mem_acquire(decoder_data->alloc, payload_size); } else { decoder_data->latest_payload = NULL; } decoder_data->written = 0; } static void s_decoder_test_header_received( struct aws_event_stream_streaming_decoder *decoder, struct aws_event_stream_message_prelude *prelude, struct aws_event_stream_header_value_pair *header, void *user_data) { (void)decoder; (void)prelude; struct test_decoder_data *decoder_data = (struct test_decoder_data *)user_data; aws_event_stream_add_header(&decoder_data->headers_list, header); } static void s_decoder_test_on_complete( struct aws_event_stream_streaming_decoder *decoder, uint32_t message_crc, void *user_data) { (void)decoder; struct test_decoder_data *decoder_data = (struct test_decoder_data *)user_data; decoder_data->message_crc = message_crc; } static void s_decoder_test_on_error( struct aws_event_stream_streaming_decoder *decoder, struct aws_event_stream_message_prelude *prelude, int error_code, const char *message, void *user_data) { (void)decoder; (void)prelude; (void)message; struct test_decoder_data *decoder_data = (struct test_decoder_data *)user_data; decoder_data->latest_error = error_code; } static int s_test_streaming_decoder_incoming_no_op_valid_single_message_fn(struct aws_allocator *allocator, void *ctx) { uint8_t test_data[] = { 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x05, 0xc2, 0x48, 0xeb, 0x7d, 0x98, 0xc8, 0xff, }; (void)ctx; struct test_decoder_data decoder_data = {.latest_payload = 0, .written = 0, .alloc = allocator, .latest_error = 0}; struct aws_event_stream_streaming_decoder_options decoder_options = { .on_payload_segment = s_decoder_test_on_payload_segment, .on_prelude = s_decoder_test_on_prelude_received, .on_header = s_decoder_test_header_received, .on_complete = s_decoder_test_on_complete, .on_error = s_decoder_test_on_error, .user_data = &decoder_data}; struct aws_event_stream_streaming_decoder decoder; aws_event_stream_streaming_decoder_init_from_options(&decoder, allocator, &decoder_options); struct aws_byte_buf test_buf = aws_byte_buf_from_array(test_data, sizeof(test_data)); ASSERT_SUCCESS( aws_event_stream_streaming_decoder_pump(&decoder, &test_buf), "Message validation should have succeeded"); ASSERT_SUCCESS(decoder_data.latest_error, "No Error callback shouldn't have been called"); ASSERT_INT_EQUALS(0x00000010, decoder_data.latest_prelude.total_len, "Message length should have been 0x10"); ASSERT_INT_EQUALS(0x00000000, decoder_data.latest_prelude.headers_len, "Headers Length should have been 0x00"); ASSERT_INT_EQUALS(0x05c248eb, decoder_data.latest_prelude.prelude_crc, "Prelude CRC should have been 0x8c335472"); ASSERT_INT_EQUALS(0, decoder_data.written, "No payload data should have been written"); if (decoder_data.latest_payload) { aws_mem_release(allocator, decoder_data.latest_payload); } ASSERT_UINT_EQUALS(0x7D98C8FF, decoder_data.message_crc); aws_event_stream_streaming_decoder_clean_up(&decoder); return 0; } AWS_TEST_CASE( test_streaming_decoder_incoming_no_op_valid_single_message, s_test_streaming_decoder_incoming_no_op_valid_single_message_fn) static int s_test_streaming_decoder_incoming_application_no_headers_fn(struct aws_allocator *allocator, void *ctx) { uint8_t test_data[] = { 0x00, 0x00, 0x00, 0x1D, 0x00, 0x00, 0x00, 0x00, 0xfd, 0x52, 0x8c, 0x5a, 0x7b, 0x27, 0x66, 0x6f, 0x6f, 0x27, 0x3a, 0x27, 0x62, 0x61, 0x72, 0x27, 0x7d, 0xc3, 0x65, 0x39, 0x36, }; (void)ctx; struct test_decoder_data decoder_data = {.latest_payload = 0, .written = 0, .alloc = allocator, .latest_error = 0}; struct aws_event_stream_streaming_decoder_options decoder_options = { .on_payload_segment = s_decoder_test_on_payload_segment, .on_prelude = s_decoder_test_on_prelude_received, .on_header = s_decoder_test_header_received, .on_complete = s_decoder_test_on_complete, .on_error = s_decoder_test_on_error, .user_data = &decoder_data}; struct aws_event_stream_streaming_decoder decoder; aws_event_stream_streaming_decoder_init_from_options(&decoder, allocator, &decoder_options); struct aws_byte_buf test_buf = aws_byte_buf_from_array(test_data, sizeof(test_data)); ASSERT_SUCCESS( aws_event_stream_streaming_decoder_pump(&decoder, &test_buf), "Message validation should have succeeded"); ASSERT_SUCCESS(decoder_data.latest_error, "No Error callback shouldn't have been called"); ASSERT_INT_EQUALS(0x0000001D, decoder_data.latest_prelude.total_len, "Message length should have been 0x1D"); ASSERT_INT_EQUALS(0x00000000, decoder_data.latest_prelude.headers_len, "Headers Length should have been 0x00"); ASSERT_INT_EQUALS(0xfd528c5a, decoder_data.latest_prelude.prelude_crc, "Prelude CRC should have been 0xfd528c5a"); const char *expected_str = "{'foo':'bar'}"; size_t payload_len = decoder_data.latest_prelude.total_len - AWS_EVENT_STREAM_PRELUDE_LENGTH - AWS_EVENT_STREAM_TRAILER_LENGTH - decoder_data.latest_prelude.headers_len; ASSERT_INT_EQUALS( strlen(expected_str), payload_len, "payload length should have been %d", (int)(strlen(expected_str))); ASSERT_BIN_ARRAYS_EQUALS( expected_str, strlen(expected_str), decoder_data.latest_payload, payload_len, "payload should have been %s", expected_str); if (decoder_data.latest_payload) { aws_mem_release(allocator, decoder_data.latest_payload); } ASSERT_UINT_EQUALS(0xC3653936, decoder_data.message_crc); aws_event_stream_streaming_decoder_clean_up(&decoder); return 0; } AWS_TEST_CASE( test_streaming_decoder_incoming_application_no_headers, s_test_streaming_decoder_incoming_application_no_headers_fn) static int s_test_streaming_decoder_incoming_application_one_compressed_header_pair_valid_fn( struct aws_allocator *allocator, void *ctx) { (void)ctx; uint8_t test_data[] = { 0x00, 0x00, 0x00, 0x3D, 0x00, 0x00, 0x00, 0x20, 0x07, 0xFD, 0x83, 0x96, 0x0C, 'c', 'o', 'n', 't', 'e', 'n', 't', '-', 't', 'y', 'p', 'e', 0x07, 0x00, 0x10, 'a', 'p', 'p', 'l', 'i', 'c', 'a', 't', 'i', 'o', 'n', '/', 'j', 's', 'o', 'n', 0x7b, 0x27, 0x66, 0x6f, 0x6f, 0x27, 0x3a, 0x27, 0x62, 0x61, 0x72, 0x27, 0x7d, 0x8D, 0x9C, 0x08, 0xB1, }; struct test_decoder_data decoder_data = { .latest_payload = 0, .written = 0, .alloc = allocator, .latest_error = 0, }; aws_event_stream_headers_list_init(&decoder_data.headers_list, allocator); struct aws_event_stream_streaming_decoder_options decoder_options = { .on_payload_segment = s_decoder_test_on_payload_segment, .on_prelude = s_decoder_test_on_prelude_received, .on_header = s_decoder_test_header_received, .on_complete = s_decoder_test_on_complete, .on_error = s_decoder_test_on_error, .user_data = &decoder_data}; struct aws_event_stream_streaming_decoder decoder; aws_event_stream_streaming_decoder_init_from_options(&decoder, allocator, &decoder_options); struct aws_byte_buf test_buf = aws_byte_buf_from_array(test_data, sizeof(test_data)); ASSERT_SUCCESS( aws_event_stream_streaming_decoder_pump(&decoder, &test_buf), "Message validation should have succeeded"); ASSERT_SUCCESS(decoder_data.latest_error, "No Error callback shouldn't have been called"); ASSERT_INT_EQUALS(0x0000003D, decoder_data.latest_prelude.total_len, "Message length should have been 0x3D"); ASSERT_INT_EQUALS(0x00000020, decoder_data.latest_prelude.headers_len, "Headers Length should have been 0x20"); ASSERT_INT_EQUALS(0x07FD8396, decoder_data.latest_prelude.prelude_crc, "Prelude CRC should have been 0x07FD8396"); const char *content_type = "content-type"; const char *content_type_value = "application/json"; struct aws_event_stream_header_value_pair latest_header; aws_array_list_get_at(&decoder_data.headers_list, &latest_header, 0); struct aws_byte_buf latest_header_value = aws_event_stream_header_value_as_string(&latest_header); ASSERT_BIN_ARRAYS_EQUALS( content_type, strlen(content_type), latest_header.header_name, latest_header.header_name_len, "header name should have been %s", content_type); ASSERT_BIN_ARRAYS_EQUALS( content_type_value, strlen(content_type_value), latest_header_value.buffer, latest_header_value.len, "header value should have been %s", content_type_value); const char *expected_str = "{'foo':'bar'}"; size_t payload_len = decoder_data.latest_prelude.total_len - AWS_EVENT_STREAM_PRELUDE_LENGTH - AWS_EVENT_STREAM_TRAILER_LENGTH - decoder_data.latest_prelude.headers_len; ASSERT_INT_EQUALS( strlen(expected_str), payload_len, "payload length should have been %d", (int)(strlen(expected_str))); ASSERT_BIN_ARRAYS_EQUALS( expected_str, strlen(expected_str), decoder_data.latest_payload, payload_len, "payload should have been %s", expected_str); if (decoder_data.latest_payload) { aws_mem_release(allocator, decoder_data.latest_payload); } ASSERT_UINT_EQUALS(0x8D9C08B1, decoder_data.message_crc); aws_event_stream_headers_list_cleanup(&decoder_data.headers_list); return 0; } AWS_TEST_CASE( test_streaming_decoder_incoming_application_one_compressed_header_pair_valid, s_test_streaming_decoder_incoming_application_one_compressed_header_pair_valid_fn) static int s_test_streaming_decoder_incoming_application_one_int32_header_pair_valid_fn( struct aws_allocator *allocator, void *ctx) { (void)ctx; /* clang-format off */ uint8_t test_data[] = { 0x00, 0x00, 0x00, 0x1b, /* total length */ 0x00, 0x00, 0x00, 0x0b, /* headers length */ 0xe5, 0xc0, 0xa0, 0x72, /* prelude crc */ 0x05, /* header name length */ 'e', 'v', 'e', 'n', 't', /* header name */ 0x04, /* header value type */ 0x00, 0x00, /* header value length */ 0x00, 0x20, /* header value */ 0x04, 0xa1, 0xd4, 0x7c /* message crc */ }; /* clang-format on */ struct test_decoder_data decoder_data = { .latest_payload = 0, .written = 0, .alloc = allocator, .latest_error = 0, }; aws_event_stream_headers_list_init(&decoder_data.headers_list, allocator); struct aws_event_stream_streaming_decoder_options decoder_options = { .on_payload_segment = s_decoder_test_on_payload_segment, .on_prelude = s_decoder_test_on_prelude_received, .on_header = s_decoder_test_header_received, .on_complete = s_decoder_test_on_complete, .on_error = s_decoder_test_on_error, .user_data = &decoder_data}; struct aws_event_stream_streaming_decoder decoder; aws_event_stream_streaming_decoder_init_from_options(&decoder, allocator, &decoder_options); struct aws_byte_buf test_buf = aws_byte_buf_from_array(test_data, sizeof(test_data)); ASSERT_SUCCESS( aws_event_stream_streaming_decoder_pump(&decoder, &test_buf), "Message validation should have succeeded"); ASSERT_SUCCESS(decoder_data.latest_error, "No Error callback shouldn't have been called"); ASSERT_INT_EQUALS(0x0000001B, decoder_data.latest_prelude.total_len, "Message length should have been 0x1B"); ASSERT_INT_EQUALS(0x0000000B, decoder_data.latest_prelude.headers_len, "Headers Length should have been 0xB"); ASSERT_INT_EQUALS(0xE5C0A072, decoder_data.latest_prelude.prelude_crc, "Prelude CRC should have been 0xE5C0A072"); const char *expected_header_name = "event"; struct aws_event_stream_header_value_pair latest_header; aws_array_list_get_at(&decoder_data.headers_list, &latest_header, 0); ASSERT_BIN_ARRAYS_EQUALS( expected_header_name, strlen(expected_header_name), latest_header.header_name, latest_header.header_name_len, "header name should have been %s", expected_header_name); int32_t latest_header_value = aws_event_stream_header_value_as_int32(&latest_header); ASSERT_INT_EQUALS(0x00000020, latest_header_value, "Header value should have been 0x00000020"); ASSERT_UINT_EQUALS(0x04A1D47C, decoder_data.message_crc); aws_event_stream_headers_list_cleanup(&decoder_data.headers_list); return 0; } AWS_TEST_CASE( test_streaming_decoder_incoming_application_one_int32_header_pair_valid, s_test_streaming_decoder_incoming_application_one_int32_header_pair_valid_fn) static int s_test_streaming_decoder_incoming_application_variable_headers_with_empty_length_pair_valid_fn( struct aws_allocator *allocator, void *ctx) { (void)ctx; /* clang-format off */ uint8_t test_data[] = { 0x00, 0x00, 0x00, 0x22, /* total length */ 0x00, 0x00, 0x00, 0x12, /* headers length */ 0x2D, 0x9A, 0xD2, 0x45, /* prelude crc */ 0x04, /* header name length */ 'b', 'u', 'f', 'f', /* header name */ 0x06, /* header value type (BYTE ARRAY)*/ 0x00, 0x00, /* header value length */ 0x06, /* header name length */ 's', 't', 'r', 'i','n','g', /* header name */ 0x07, /* header value type (String)*/ 0x00, 0x00, /* header value length */ 0xC8, 0x4C, 0xF8, 0x53 /* message crc */ }; /* clang-format on */ struct test_decoder_data decoder_data = { .latest_payload = 0, .written = 0, .alloc = allocator, .latest_error = 0, }; aws_event_stream_headers_list_init(&decoder_data.headers_list, allocator); struct aws_event_stream_streaming_decoder_options decoder_options = { .on_payload_segment = s_decoder_test_on_payload_segment, .on_prelude = s_decoder_test_on_prelude_received, .on_header = s_decoder_test_header_received, .on_complete = s_decoder_test_on_complete, .on_error = s_decoder_test_on_error, .user_data = &decoder_data}; struct aws_event_stream_streaming_decoder decoder; aws_event_stream_streaming_decoder_init_from_options(&decoder, allocator, &decoder_options); struct aws_byte_buf test_buf = aws_byte_buf_from_array(test_data, sizeof(test_data)); ASSERT_SUCCESS( aws_event_stream_streaming_decoder_pump(&decoder, &test_buf), "Message validation should have succeeded"); ASSERT_SUCCESS(decoder_data.latest_error, "No Error callback shouldn't have been called"); ASSERT_INT_EQUALS(0x00000022, decoder_data.latest_prelude.total_len); ASSERT_INT_EQUALS(0x00000012, decoder_data.latest_prelude.headers_len); ASSERT_INT_EQUALS(0x2D9AD245, decoder_data.latest_prelude.prelude_crc); ASSERT_UINT_EQUALS(0xC84CF853, decoder_data.message_crc); const char *expected_header_name = "buff"; struct aws_event_stream_header_value_pair latest_header; aws_array_list_get_at(&decoder_data.headers_list, &latest_header, 0); ASSERT_BIN_ARRAYS_EQUALS( expected_header_name, strlen(expected_header_name), latest_header.header_name, latest_header.header_name_len, "header name should have been %s", expected_header_name); struct aws_byte_buf latest_header_value = aws_event_stream_header_value_as_bytebuf(&latest_header); ASSERT_INT_EQUALS(0, latest_header_value.len); ASSERT_NULL(latest_header_value.buffer); const char *expected_string_header_name = "string"; aws_array_list_get_at(&decoder_data.headers_list, &latest_header, 1); ASSERT_BIN_ARRAYS_EQUALS( expected_string_header_name, strlen(expected_string_header_name), latest_header.header_name, latest_header.header_name_len, "header name should have been %s", expected_header_name); latest_header_value = aws_event_stream_header_value_as_bytebuf(&latest_header); ASSERT_INT_EQUALS(0, latest_header_value.len); ASSERT_NULL(latest_header_value.buffer); aws_event_stream_headers_list_cleanup(&decoder_data.headers_list); return 0; } AWS_TEST_CASE( test_streaming_decoder_incoming_application_variable_headers_with_empty_length_pair_valid, s_test_streaming_decoder_incoming_application_variable_headers_with_empty_length_pair_valid_fn) static int s_test_streaming_decoder_incoming_application_one_bool_header_pair_valid_fn( struct aws_allocator *allocator, void *ctx) { (void)ctx; /* clang-format off */ uint8_t test_data[] = { 0x00, 0x00, 0x00, 0x17, /* total length */ 0x00, 0x00, 0x00, 0x07, /* headers length */ 0x29, 0x86, 0x01, 0x58, /* prelude crc */ 0x05, /* header name length */ 'e', 'v', 'e', 'n', 't', /* header name */ 0x00, /* header value type */ 0x4b, 0x4d, 0x2b, 0xe7 /* message crc */ }; /* clang-format on */ struct test_decoder_data decoder_data = { .latest_payload = 0, .written = 0, .alloc = allocator, .latest_error = 0, }; aws_event_stream_headers_list_init(&decoder_data.headers_list, allocator); struct aws_event_stream_streaming_decoder_options decoder_options = { .on_payload_segment = s_decoder_test_on_payload_segment, .on_prelude = s_decoder_test_on_prelude_received, .on_header = s_decoder_test_header_received, .on_complete = s_decoder_test_on_complete, .on_error = s_decoder_test_on_error, .user_data = &decoder_data}; struct aws_event_stream_streaming_decoder decoder; aws_event_stream_streaming_decoder_init_from_options(&decoder, allocator, &decoder_options); struct aws_byte_buf test_buf = aws_byte_buf_from_array(test_data, sizeof(test_data)); ASSERT_SUCCESS( aws_event_stream_streaming_decoder_pump(&decoder, &test_buf), "Message validation should have succeeded"); ASSERT_SUCCESS(decoder_data.latest_error, "No Error callback shouldn't have been called"); ASSERT_INT_EQUALS(0x00000017, decoder_data.latest_prelude.total_len, "Message length should have been 0x17"); ASSERT_INT_EQUALS(0x00000007, decoder_data.latest_prelude.headers_len, "Headers Length should have been 0x7"); ASSERT_INT_EQUALS(0x29860158, decoder_data.latest_prelude.prelude_crc, "Prelude CRC should have been 0x29860158"); const char *expected_header_name = "event"; struct aws_event_stream_header_value_pair latest_header; aws_array_list_get_at(&decoder_data.headers_list, &latest_header, 0); ASSERT_BIN_ARRAYS_EQUALS( expected_header_name, strlen(expected_header_name), latest_header.header_name, latest_header.header_name_len, "header name should have been %s", expected_header_name); int8_t latest_header_value = aws_event_stream_header_value_as_bool(&latest_header); ASSERT_INT_EQUALS(1, latest_header_value, "Header value should have been true"); ASSERT_UINT_EQUALS(0x4B4D2BE7, decoder_data.message_crc); aws_event_stream_headers_list_cleanup(&decoder_data.headers_list); return 0; } AWS_TEST_CASE( test_streaming_decoder_incoming_application_one_bool_header_pair_valid, s_test_streaming_decoder_incoming_application_one_bool_header_pair_valid_fn) static int s_test_streaming_decoder_incoming_multiple_messages_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint8_t test_data[] = { /* message 1 */ 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x05, 0xc2, 0x48, 0xeb, 0x7d, 0x98, 0xc8, 0xff, /* message 2 */ 0x00, 0x00, 0x00, 0x3D, 0x00, 0x00, 0x00, 0x20, 0x07, 0xFD, 0x83, 0x96, 0x0C, 'c', 'o', 'n', 't', 'e', 'n', 't', '-', 't', 'y', 'p', 'e', 0x07, 0x00, 0x10, 'a', 'p', 'p', 'l', 'i', 'c', 'a', 't', 'i', 'o', 'n', '/', 'j', 's', 'o', 'n', 0x7b, 0x27, 0x66, 0x6f, 0x6f, 0x27, 0x3a, 0x27, 0x62, 0x61, 0x72, 0x27, 0x7d, 0x8D, 0x9C, 0x08, 0xB1, }; size_t first_message_size = 0x10; size_t read_size = 7; /* make this a weird number to force edge case coverage in the parser. This will fall into the middle of message boundaries and preludes. */ struct test_decoder_data decoder_data = {.latest_payload = 0, .written = 0, .alloc = allocator, .latest_error = 0}; aws_event_stream_headers_list_init(&decoder_data.headers_list, allocator); struct aws_event_stream_streaming_decoder_options decoder_options = { .on_payload_segment = s_decoder_test_on_payload_segment, .on_prelude = s_decoder_test_on_prelude_received, .on_header = s_decoder_test_header_received, .on_complete = s_decoder_test_on_complete, .on_error = s_decoder_test_on_error, .user_data = &decoder_data}; struct aws_event_stream_streaming_decoder decoder; aws_event_stream_streaming_decoder_init_from_options(&decoder, allocator, &decoder_options); size_t current_written = 0; int err_code = 0; while (current_written < first_message_size && !err_code) { struct aws_byte_buf test_buf = aws_byte_buf_from_array(test_data + current_written, read_size); err_code = aws_event_stream_streaming_decoder_pump(&decoder, &test_buf); current_written += read_size; } /* we should have written into the second message, but prior to the new prelude being found. check first message was parsed correctly */ ASSERT_SUCCESS(err_code, "Message validation should have succeeded"); ASSERT_SUCCESS(decoder_data.latest_error, "No Error callback shouldn't have been called"); ASSERT_INT_EQUALS(0x00000010, decoder_data.latest_prelude.total_len, "Message length should have been 0x10"); ASSERT_INT_EQUALS(0x00000000, decoder_data.latest_prelude.headers_len, "Headers Length should have been 0x00"); ASSERT_INT_EQUALS(0x05c248eb, decoder_data.latest_prelude.prelude_crc, "Prelude CRC should have been 0x8c335472"); ASSERT_INT_EQUALS(0, decoder_data.written, "No payload data should have been written"); ASSERT_UINT_EQUALS(0x7D98C8FF, decoder_data.message_crc); while (current_written < sizeof(test_data) && !err_code) { size_t to_write = current_written + read_size < sizeof(test_data) ? read_size : sizeof(test_data) - current_written; struct aws_byte_buf test_buf = aws_byte_buf_from_array(test_data + current_written, to_write); err_code = aws_event_stream_streaming_decoder_pump(&decoder, &test_buf); current_written += to_write; } /* Second message should have been found and fully parsed at this point. */ ASSERT_SUCCESS(err_code, "Message validation should have succeeded"); ASSERT_SUCCESS(decoder_data.latest_error, "No Error callback shouldn't have been called"); ASSERT_INT_EQUALS(0x0000003D, decoder_data.latest_prelude.total_len, "Message length should have been 0x3D"); ASSERT_INT_EQUALS(0x00000020, decoder_data.latest_prelude.headers_len, "Headers Length should have been 0x20"); ASSERT_INT_EQUALS(0x07FD8396, decoder_data.latest_prelude.prelude_crc, "Prelude CRC should have been 0x07FD8396"); const char *content_type = "content-type"; const char *content_type_value = "application/json"; struct aws_event_stream_header_value_pair latest_header; aws_array_list_get_at(&decoder_data.headers_list, &latest_header, 0); struct aws_byte_buf latest_header_value = aws_event_stream_header_value_as_string(&latest_header); ASSERT_BIN_ARRAYS_EQUALS( content_type, strlen(content_type), latest_header.header_name, latest_header.header_name_len, "header name should have been %s", content_type); ASSERT_BIN_ARRAYS_EQUALS( content_type_value, strlen(content_type_value), latest_header_value.buffer, latest_header_value.len, "header value should have been %s", content_type_value); const char *expected_str = "{'foo':'bar'}"; size_t payload_len = decoder_data.latest_prelude.total_len - AWS_EVENT_STREAM_PRELUDE_LENGTH - AWS_EVENT_STREAM_TRAILER_LENGTH - decoder_data.latest_prelude.headers_len; ASSERT_INT_EQUALS( strlen(expected_str), payload_len, "payload length should have been %d", (int)(strlen(expected_str))); ASSERT_BIN_ARRAYS_EQUALS( expected_str, strlen(expected_str), decoder_data.latest_payload, payload_len, "payload should have been %s", expected_str); if (decoder_data.latest_payload) { aws_mem_release(allocator, decoder_data.latest_payload); } ASSERT_UINT_EQUALS(0x8D9C08B1, decoder_data.message_crc); aws_event_stream_streaming_decoder_clean_up(&decoder); aws_event_stream_headers_list_cleanup(&decoder_data.headers_list); return 0; } AWS_TEST_CASE(test_streaming_decoder_incoming_multiple_messages, s_test_streaming_decoder_incoming_multiple_messages_fn) aws-crt-python-0.20.4+dfsg/crt/aws-c-http/000077500000000000000000000000001456575232400202075ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-http/.builder/000077500000000000000000000000001456575232400217135ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-http/.builder/action/000077500000000000000000000000001456575232400231705ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-http/.builder/action/aws-c-http-test.py000066400000000000000000000033511456575232400265100ustar00rootroot00000000000000import Builder import sys import os import re class AWSCHttpTest(Builder.Action): def _export_env_var(self, filename, env): with open(filename) as file: pattern = re.compile("(\w+) (\w+)=(.+)") for i in file.readlines(): match = pattern.match(i) if match != None: env.shell.setenv(match.groups()[1], match.groups()[2]) print(match.groups()[1]) def run(self, env): actions = [] if os.path.exists('/tmp/setup_proxy_test_env.sh'): # For proxy integration test, we download the setup script to tmp/ from codebuild/linux-integration-tests.yml # aws s3 cp s3://aws-crt-test-stuff/setup_proxy_test_env.sh /tmp/setup_proxy_test_env.sh print("setting proxy integration test environment") self._export_env_var('/tmp/setup_proxy_test_env.sh', env) env.shell.setenv('AWS_PROXY_NO_VERIFY_PEER', 'on') if os.path.exists('./build/aws-c-http/'): # This is the directory (relative to repo root) that will contain the build when the repo is built directly by the # builder os.chdir('./build/aws-c-http/') elif os.path.exists('../../aws-c-http'): # This is the directory (relative to repo root) that will contain the build when the repo is built as an upstream # consumer os.chdir('../../aws-c-http') actions.append(['ctest', '--output-on-failure']) # generate the test coverage report whenever possible, will be ignored by ctest if there is no test coverage data available. actions.append(['ctest', '-T', 'coverage']) return Builder.Script(actions, name='aws-c-http-test') aws-crt-python-0.20.4+dfsg/crt/aws-c-http/.builder/action/local-server-setup.py000066400000000000000000000032621456575232400273010ustar00rootroot00000000000000""" Setup local server for tests """ import Builder import os import sys import subprocess import atexit class LocalServerSetup(Builder.Action): """ Set up this machine for running the local h2 server test. To run the local server related test, use `--cmake-extra=-DENABLE_LOCALHOST_INTEGRATION_TESTS=ON` from builder. Not running local server tests for every CI as it takes a while. This action should be run in the 'pre_build_steps' or 'build_steps' stage. """ def run(self, env): if not env.project.needs_tests(env): print("Skipping local server setup because tests disabled for project") return self.env = env python_path = sys.executable # Install dependency for mock server. # Okay to fail, and if it fails, you will know when you enable the localhost tests. # We don't need it to succeed on every platform we have. result = self.env.shell.exec(python_path, '-m', 'pip', 'install', 'h2') if result.returncode != 0: print( "Could not install python HTTP/2 server." + " The localhost integration tests will fail if you run them.", file=sys.stderr) return base_dir = os.path.dirname(os.path.realpath(__file__)) dir = os.path.join(base_dir, "..", "..", "tests", "py_localhost") os.chdir(dir) p_server = subprocess.Popen([python_path, "server.py"]) p_non_tls_server = subprocess.Popen([python_path, "non_tls_server.py"]) @atexit.register def close_local_server(): p_server.terminate() p_non_tls_server.terminate() aws-crt-python-0.20.4+dfsg/crt/aws-c-http/.clang-format000066400000000000000000000031611456575232400225630ustar00rootroot00000000000000--- Language: Cpp # BasedOnStyle: Mozilla AlignAfterOpenBracket: AlwaysBreak AlignConsecutiveAssignments: false AlignConsecutiveDeclarations: false AlignEscapedNewlines: Right AlignOperands: true AlignTrailingComments: true AllowAllParametersOfDeclarationOnNextLine: false AllowShortBlocksOnASingleLine: false AllowShortCaseLabelsOnASingleLine: false AllowShortFunctionsOnASingleLine: Inline AllowShortIfStatementsOnASingleLine: false AllowShortLoopsOnASingleLine: false AlwaysBreakAfterReturnType: None AlwaysBreakBeforeMultilineStrings: false BinPackArguments: false BinPackParameters: false BreakBeforeBinaryOperators: None BreakBeforeBraces: Attach BreakBeforeTernaryOperators: true BreakStringLiterals: true ColumnLimit: 120 ContinuationIndentWidth: 4 DerivePointerAlignment: false IncludeBlocks: Preserve IndentCaseLabels: true IndentPPDirectives: AfterHash IndentWidth: 4 IndentWrappedFunctionNames: true KeepEmptyLinesAtTheStartOfBlocks: true MacroBlockBegin: '' MacroBlockEnd: '' MaxEmptyLinesToKeep: 1 PenaltyBreakAssignment: 2 PenaltyBreakBeforeFirstCallParameter: 19 PenaltyBreakComment: 300 PenaltyBreakFirstLessLess: 120 PenaltyBreakString: 1000 PenaltyExcessCharacter: 1000000 PenaltyReturnTypeOnItsOwnLine: 100000 PointerAlignment: Right ReflowComments: true SortIncludes: true SpaceAfterCStyleCast: false SpaceBeforeAssignmentOperators: true SpaceBeforeParens: ControlStatements SpaceInEmptyParentheses: false SpacesInContainerLiterals: true SpacesInCStyleCastParentheses: false SpacesInParentheses: false SpacesInSquareBrackets: false Standard: Cpp11 TabWidth: 4 UseTab: Never ... aws-crt-python-0.20.4+dfsg/crt/aws-c-http/.clang-tidy000066400000000000000000000014271456575232400222470ustar00rootroot00000000000000--- Checks: 'clang-diagnostic-*,clang-analyzer-*,readability-*,modernize-*,bugprone-*,misc-*,google-runtime-int,fuchsia-restrict-system-includes,-clang-analyzer-valist.Uninitialized,-clang-analyzer-security.insecureAPI.rand,-clang-analyzer-alpha.*,-readability-else-after-return,-readability-magic-numbers,-misc-unused-parameters' WarningsAsErrors: '*' HeaderFilterRegex: '.*\.[h|inl]$' FormatStyle: 'file' # Use empty line filter to skip linting code we don't own CheckOptions: - key: readability-braces-around-statements.ShortStatementLines value: '1' - key: google-runtime-int.TypeSufix value: '_t' - key: fuchsia-restrict-system-includes.Includes value: '*,-stdint.h,-stdbool.h,-assert.h' ... aws-crt-python-0.20.4+dfsg/crt/aws-c-http/.github/000077500000000000000000000000001456575232400215475ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-http/.github/ISSUE_TEMPLATE/000077500000000000000000000000001456575232400237325ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-http/.github/ISSUE_TEMPLATE/bug-report.yml000066400000000000000000000045171456575232400265520ustar00rootroot00000000000000--- name: "🐛 Bug Report" description: Report a bug title: "(short issue description)" labels: [bug, needs-triage] assignees: [] body: - type: textarea id: description attributes: label: Describe the bug description: What is the problem? A clear and concise description of the bug. validations: required: true - type: textarea id: expected attributes: label: Expected Behavior description: | What did you expect to happen? validations: required: true - type: textarea id: current attributes: label: Current Behavior description: | What actually happened? Please include full errors, uncaught exceptions, stack traces, and relevant logs. If service responses are relevant, please include wire logs. validations: required: true - type: textarea id: reproduction attributes: label: Reproduction Steps description: | Provide a self-contained, concise snippet of code that can be used to reproduce the issue. For more complex issues provide a repo with the smallest sample that reproduces the bug. Avoid including business logic or unrelated code, it makes diagnosis more difficult. The code sample should be an SSCCE. See http://sscce.org/ for details. In short, please provide a code sample that we can copy/paste, run and reproduce. validations: required: true - type: textarea id: solution attributes: label: Possible Solution description: | Suggest a fix/reason for the bug validations: required: false - type: textarea id: context attributes: label: Additional Information/Context description: | Anything else that might be relevant for troubleshooting this bug. Providing context helps us come up with a solution that is most useful in the real world. validations: required: false - type: input id: aws-c-http-version attributes: label: aws-c-http version used validations: required: true - type: input id: compiler-version attributes: label: Compiler and version used validations: required: true - type: input id: operating-system attributes: label: Operating System and version validations: required: true aws-crt-python-0.20.4+dfsg/crt/aws-c-http/.github/ISSUE_TEMPLATE/config.yml000066400000000000000000000003271456575232400257240ustar00rootroot00000000000000blank_issues_enabled: false contact_links: - name: 💬 General Question url: https://github.com/awslabs/aws-c-http/discussions/categories/q-a about: Please ask and answer questions as a discussion thread aws-crt-python-0.20.4+dfsg/crt/aws-c-http/.github/ISSUE_TEMPLATE/documentation.yml000066400000000000000000000011141456575232400273230ustar00rootroot00000000000000--- name: "📕 Documentation Issue" description: Report an issue in the API Reference documentation or Developer Guide title: "(short issue description)" labels: [documentation, needs-triage] assignees: [] body: - type: textarea id: description attributes: label: Describe the issue description: A clear and concise description of the issue. validations: required: true - type: textarea id: links attributes: label: Links description: | Include links to affected documentation page(s). validations: required: true aws-crt-python-0.20.4+dfsg/crt/aws-c-http/.github/ISSUE_TEMPLATE/feature-request.yml000066400000000000000000000026231456575232400276010ustar00rootroot00000000000000--- name: 🚀 Feature Request description: Suggest an idea for this project title: "(short issue description)" labels: [feature-request, needs-triage] assignees: [] body: - type: textarea id: description attributes: label: Describe the feature description: A clear and concise description of the feature you are proposing. validations: required: true - type: textarea id: use-case attributes: label: Use Case description: | Why do you need this feature? For example: "I'm always frustrated when..." validations: required: true - type: textarea id: solution attributes: label: Proposed Solution description: | Suggest how to implement the addition or change. Please include prototype/workaround/sketch/reference implementation. validations: required: false - type: textarea id: other attributes: label: Other Information description: | Any alternative solutions or features you considered, a more detailed explanation, stack traces, related issues, links for context, etc. validations: required: false - type: checkboxes id: ack attributes: label: Acknowledgements options: - label: I may be able to implement this feature request required: false - label: This feature might incur a breaking change required: false aws-crt-python-0.20.4+dfsg/crt/aws-c-http/.github/PULL_REQUEST_TEMPLATE.md000066400000000000000000000002511456575232400253460ustar00rootroot00000000000000*Issue #, if available:* *Description of changes:* By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license. aws-crt-python-0.20.4+dfsg/crt/aws-c-http/.github/workflows/000077500000000000000000000000001456575232400236045ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-http/.github/workflows/ci.yml000066400000000000000000000202471456575232400247270ustar00rootroot00000000000000name: CI on: push: branches-ignore: - 'main' env: BUILDER_VERSION: v0.9.55 BUILDER_SOURCE: releases BUILDER_HOST: https://d19elf31gohf1l.cloudfront.net PACKAGE_NAME: aws-c-http LINUX_BASE_IMAGE: ubuntu-18-x64 RUN: ${{ github.run_id }}-${{ github.run_number }} AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} AWS_REGION: us-east-1 jobs: linux-compat: runs-on: ubuntu-20.04 # latest strategy: matrix: image: - manylinux1-x64 - manylinux1-x86 - manylinux2014-x64 - manylinux2014-x86 - al2-x64 - fedora-34-x64 - opensuse-leap - rhel8-x64 steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ matrix.image }} build -p ${{ env.PACKAGE_NAME }} linux-compiler-compat: runs-on: ubuntu-20.04 # latest strategy: matrix: compiler: - clang-3 - clang-6 - clang-8 - clang-9 - clang-10 - clang-11 - gcc-4.8 - gcc-5 - gcc-6 - gcc-7 - gcc-8 steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ env.LINUX_BASE_IMAGE }} build -p ${{ env.PACKAGE_NAME }} --compiler=${{ matrix.compiler }} clang-sanitizers: runs-on: ubuntu-20.04 # latest strategy: matrix: sanitizers: [",thread", ",address,undefined"] steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ env.LINUX_BASE_IMAGE }} build -p ${{ env.PACKAGE_NAME }} --compiler=clang-11 --cmake-extra=-DENABLE_SANITIZERS=ON --cmake-extra=-DSANITIZERS="${{ matrix.sanitizers }}" linux-shared-libs: runs-on: ubuntu-20.04 # latest steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ env.LINUX_BASE_IMAGE }} build -p ${{ env.PACKAGE_NAME }} --cmake-extra=-DBUILD_SHARED_LIBS=ON byo-crypto: runs-on: ubuntu-20.04 # latest steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ env.LINUX_BASE_IMAGE }} build -p ${{ env.PACKAGE_NAME }} --cmake-extra=-DBYO_CRYPTO=ON # Test downstream repos. # This should not be required because we can run into a chicken and egg problem if there is a change that needs some fix in a downstream repo. downstream: runs-on: ubuntu-20.04 # latest steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ env.LINUX_BASE_IMAGE }} build downstream -p ${{ env.PACKAGE_NAME }} windows: runs-on: windows-2022 # latest steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python builder.pyz build -p ${{ env.PACKAGE_NAME }} windows-vc14: runs-on: windows-2019 # windows-2019 is last env with Visual Studio 2015 (v14.0) strategy: matrix: arch: [x86, x64] steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python builder.pyz build -p ${{ env.PACKAGE_NAME }} --target windows-${{ matrix.arch }} --compiler msvc-14 windows-shared-libs: runs-on: windows-2022 # latest steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python builder.pyz build -p ${{ env.PACKAGE_NAME }} --cmake-extra=-DBUILD_SHARED_LIBS=ON windows-app-verifier: runs-on: windows-2022 # latest steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python builder.pyz build -p ${{ env.PACKAGE_NAME }} run_tests=false --cmake-extra=-DBUILD_TESTING=ON - name: Run and check AppVerifier run: | python .\aws-c-http\build\deps\aws-c-common\scripts\appverifier_ctest.py --build_directory .\aws-c-http\build\aws-c-http osx: runs-on: macos-12 # latest steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python3 -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder')" chmod a+x builder ./builder build -p ${{ env.PACKAGE_NAME }} localhost-test-linux: runs-on: ubuntu-20.04 # latest steps: - name: Checkout uses: actions/checkout@v3 - name: Build and test run: | python3 -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python3 builder.pyz build -p aws-c-http --cmake-extra=-DENABLE_LOCALHOST_INTEGRATION_TESTS=ON --config Debug localhost-test-mac: runs-on: macos-11 # latest steps: - name: Checkout uses: actions/checkout@v3 - name: Build and test run: | python3 -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python3 builder.pyz build -p aws-c-http --cmake-extra=-DENABLE_LOCALHOST_INTEGRATION_TESTS=ON --config Debug localhost-test-win: runs-on: windows-2022 # latest steps: - name: Checkout uses: actions/checkout@v3 - name: Build and test run: | python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python builder.pyz build -p aws-c-http --cmake-extra=-DENABLE_LOCALHOST_INTEGRATION_TESTS=ON --config Debug aws-crt-python-0.20.4+dfsg/crt/aws-c-http/.github/workflows/clang-format.yml000066400000000000000000000004671456575232400267100ustar00rootroot00000000000000name: Lint on: [push] jobs: clang-format: runs-on: ubuntu-20.04 # latest steps: - name: Checkout Sources uses: actions/checkout@v1 - name: clang-format lint uses: DoozyX/clang-format-lint-action@v0.3.1 with: # List of extensions to check extensions: c,h aws-crt-python-0.20.4+dfsg/crt/aws-c-http/.github/workflows/closed-issue-message.yml000066400000000000000000000012261456575232400303510ustar00rootroot00000000000000name: Closed Issue Message on: issues: types: [closed] jobs: auto_comment: runs-on: ubuntu-latest steps: - uses: aws-actions/closed-issue-message@v1 with: # These inputs are both required repo-token: "${{ secrets.GITHUB_TOKEN }}" message: | Comments on closed issues are hard for our team to see. If you need more assistance, please either tag a team member or open a new issue that references this one. If you wish to keep having a conversation with other community members under this issue feel free to do so. aws-crt-python-0.20.4+dfsg/crt/aws-c-http/.github/workflows/handle-stale-discussions.yml000066400000000000000000000006471456575232400312430ustar00rootroot00000000000000name: HandleStaleDiscussions on: schedule: - cron: '0 */4 * * *' discussion_comment: types: [created] jobs: handle-stale-discussions: name: Handle stale discussions runs-on: ubuntu-latest permissions: discussions: write steps: - name: Stale discussions action uses: aws-github-ops/handle-stale-discussions@v1 env: GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}}aws-crt-python-0.20.4+dfsg/crt/aws-c-http/.github/workflows/stale_issue.yml000066400000000000000000000045331456575232400266540ustar00rootroot00000000000000name: "Close stale issues" # Controls when the action will run. on: schedule: - cron: "*/60 * * * *" jobs: cleanup: runs-on: ubuntu-latest name: Stale issue job steps: - uses: aws-actions/stale-issue-cleanup@v3 with: # Setting messages to an empty string will cause the automation to skip # that category ancient-issue-message: Greetings! Sorry to say but this is a very old issue that is probably not getting as much attention as it deservers. We encourage you to check if this is still an issue in the latest release and if you find that this is still a problem, please feel free to open a new one. stale-issue-message: Greetings! It looks like this issue hasn’t been active in longer than a week. We encourage you to check if this is still an issue in the latest release. Because it has been longer than a week since the last update on this, and in the absence of more information, we will be closing this issue soon. If you find that this is still a problem, please feel free to provide a comment or add an upvote to prevent automatic closure, or if the issue is already closed, please feel free to open a new one. stale-pr-message: Greetings! It looks like this PR hasn’t been active in longer than a week, add a comment or an upvote to prevent automatic closure, or if the issue is already closed, please feel free to open a new one. # These labels are required stale-issue-label: closing-soon exempt-issue-label: automation-exempt stale-pr-label: closing-soon exempt-pr-label: pr/needs-review response-requested-label: response-requested # Don't set closed-for-staleness label to skip closing very old issues # regardless of label closed-for-staleness-label: closed-for-staleness # Issue timing days-before-stale: 10 days-before-close: 4 days-before-ancient: 36500 # If you don't want to mark a issue as being ancient based on a # threshold of "upvotes", you can set this here. An "upvote" is # the total number of +1, heart, hooray, and rocket reactions # on an issue. minimum-upvotes-to-exempt: 1 repo-token: ${{ secrets.GITHUB_TOKEN }} loglevel: DEBUG # Set dry-run to true to not perform label or close actions. dry-run: false aws-crt-python-0.20.4+dfsg/crt/aws-c-http/.gitignore000066400000000000000000000010471456575232400222010ustar00rootroot00000000000000# IDE Artifacts .metadata .build .idea *.d Debug Release *~ *# *.iml tags .vscode #vim swap file *.swp #compiled python files *.pyc #Vagrant stuff Vagrantfile .vagrant #Mac stuff .DS_Store #doxygen doxygen/html/ doxygen/latex/ #cmake artifacts dependencies _build build _build_* cmake-build* # Compiled Object files *.slo *.lo *.o *.obj # Precompiled Headers *.gch *.pch # Compiled Dynamic libraries *.so *.dylib *.dll # Fortran module files *.mod # Compiled Static libraries *.lai *.la *.a *.lib # Executables *.exe *.out *.app corpus/ aws-crt-python-0.20.4+dfsg/crt/aws-c-http/.travis/000077500000000000000000000000001456575232400215755ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-http/.travis/travis_build.sh000077500000000000000000000013271456575232400246260ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. # set -e PROJECT_DIR=`pwd` cd .. #build aws-c-common git clone https://github.com/awslabs/aws-c-common.git mkdir common-build && cd common-build cmake ../aws-c-common make && make test cd .. #build s2n git clone https://github.com/awslabs/s2n.git mkdir s2n-build && cd s2n-build cmake ../s2n make && make test cd .. #build aws-c-io cd $PROJECT_DIR cppcheck --enable=all --std=c99 --language=c --suppress=unusedFunction -I include ../aws-c-common/include --force --error-exitcode=-1 ./ cd .. mkdir build && cd build cmake -Ds2n_DIR="../s2n-build" -Daws-c-common_DIR="../common-build" $PROJECT_DIR make && make test aws-crt-python-0.20.4+dfsg/crt/aws-c-http/.tsan_suppressions.txt000066400000000000000000000005631456575232400246340ustar00rootroot00000000000000# threads created here are not explicitly joined but they are part of a ref-count mechanism that # decrements (with a possible signal of a shutdown callback based on state and count) on thread # exit function. For now, there is no reasonable way to integrate thread join into the host resolver # logic, and so this is a false positive. thread:create_and_init_host_entry aws-crt-python-0.20.4+dfsg/crt/aws-c-http/CMakeLists.txt000066400000000000000000000060671456575232400227600ustar00rootroot00000000000000cmake_minimum_required(VERSION 3.1) project(aws-c-http C) if (POLICY CMP0069) cmake_policy(SET CMP0069 NEW) # Enable LTO/IPO if available in the compiler, see AwsCFlags endif() option(ENABLE_PROXY_INTEGRATION_TESTS "Whether to run the proxy integration tests that rely on pre-configured proxy" OFF) option(ENABLE_LOCALHOST_INTEGRATION_TESTS "Whether to run the integration tests that rely on pre-configured localhost" OFF) if (DEFINED CMAKE_PREFIX_PATH) file(TO_CMAKE_PATH "${CMAKE_PREFIX_PATH}" CMAKE_PREFIX_PATH) endif() if (DEFINED CMAKE_INSTALL_PREFIX) file(TO_CMAKE_PATH "${CMAKE_INSTALL_PREFIX}" CMAKE_INSTALL_PREFIX) endif() if (UNIX AND NOT APPLE) include(GNUInstallDirs) elseif(NOT DEFINED CMAKE_INSTALL_LIBDIR) set(CMAKE_INSTALL_LIBDIR "lib") endif() # This is required in order to append /lib/cmake to each element in CMAKE_PREFIX_PATH set(AWS_MODULE_DIR "/${CMAKE_INSTALL_LIBDIR}/cmake") string(REPLACE ";" "${AWS_MODULE_DIR};" AWS_MODULE_PATH "${CMAKE_PREFIX_PATH}${AWS_MODULE_DIR}") # Append that generated list to the module search path list(APPEND CMAKE_MODULE_PATH ${AWS_MODULE_PATH}) include(AwsCFlags) include(AwsCheckHeaders) include(AwsSharedLibSetup) include(AwsSanitizers) include(CheckCCompilerFlag) include(AwsFindPackage) file(GLOB AWS_HTTP_HEADERS "include/aws/http/*.h" ) file(GLOB AWS_HTTP_PRIV_HEADERS "include/aws/http/private/*.h" ) file(GLOB AWS_HTTP_SRC "source/*.c" ) file(GLOB HTTP_HEADERS ${AWS_HTTP_HEADERS} ${AWS_HTTP_PRIV_HEADERS} ) file(GLOB HTTP_SRC ${AWS_HTTP_SRC} ) add_library(${PROJECT_NAME} ${HTTP_HEADERS} ${HTTP_SRC}) aws_set_common_properties(${PROJECT_NAME}) aws_prepare_symbol_visibility_args(${PROJECT_NAME} "AWS_HTTP") aws_add_sanitizers(${PROJECT_NAME}) # We are not ABI stable yet set_target_properties(${PROJECT_NAME} PROPERTIES VERSION 1.0.0) target_include_directories(${PROJECT_NAME} PUBLIC $ $) aws_use_package(aws-c-io) aws_use_package(aws-c-compression) target_link_libraries(${PROJECT_NAME} PUBLIC ${DEP_AWS_LIBS}) aws_prepare_shared_lib_exports(${PROJECT_NAME}) aws_check_headers(${PROJECT_NAME} ${AWS_HTTP_HEADERS}) install(FILES ${AWS_HTTP_HEADERS} DESTINATION "include/aws/http") if (BUILD_SHARED_LIBS) set (TARGET_DIR "shared") else() set (TARGET_DIR "static") endif() install(EXPORT "${PROJECT_NAME}-targets" DESTINATION "${LIBRARY_DIRECTORY}/${PROJECT_NAME}/cmake/${TARGET_DIR}/" NAMESPACE AWS:: COMPONENT Development) configure_file("cmake/${PROJECT_NAME}-config.cmake" "${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-config.cmake" @ONLY) install(FILES "${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-config.cmake" DESTINATION "${LIBRARY_DIRECTORY}/${PROJECT_NAME}/cmake/" COMPONENT Development) include(CTest) if (NOT BYO_CRYPTO AND BUILD_TESTING) add_subdirectory(tests) if (NOT CMAKE_CROSSCOMPILING) add_subdirectory(bin/elasticurl) endif() endif() aws-crt-python-0.20.4+dfsg/crt/aws-c-http/CODE_OF_CONDUCT.md000066400000000000000000000004671456575232400230150ustar00rootroot00000000000000## Code of Conduct This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact opensource-codeofconduct@amazon.com with any additional questions or comments. aws-crt-python-0.20.4+dfsg/crt/aws-c-http/CONTRIBUTING.md000066400000000000000000000067441456575232400224530ustar00rootroot00000000000000# Contributing Guidelines Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional documentation, we greatly value feedback and contributions from our community. Please read through this document before submitting any issues or pull requests to ensure we have all the necessary information to effectively respond to your bug report or contribution. ## Reporting Bugs/Feature Requests We welcome you to use the GitHub issue tracker to report bugs or suggest features. When filing an issue, please check [existing open](https://github.com/awslabs/aws-c-http/issues), or [recently closed](https://github.com/awslabs/aws-c-http/issues?utf8=%E2%9C%93&q=is%3Aissue%20is%3Aclosed%20), issues to make sure somebody else hasn't already reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: * A reproducible test case or series of steps * The version of our code being used * Any modifications you've made relevant to the bug * Anything unusual about your environment or deployment ## Contributing via Pull Requests Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: 1. You are working against the latest source on the *main* branch. 2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. 3. You open an issue to discuss any significant work - we would hate for your time to be wasted. To send us a pull request, please: 1. Fork the repository. 2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. 3. Ensure local tests pass. 4. Commit to your fork using clear commit messages. 5. Send us a pull request, answering any default questions in the pull request interface. 6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and [creating a pull request](https://help.github.com/articles/creating-a-pull-request/). ## Finding contributions to work on Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels ((enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any ['help wanted'](https://github.com/awslabs/aws-c-http/labels/help%20wanted) issues is a great place to start. ## Code of Conduct This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact opensource-codeofconduct@amazon.com with any additional questions or comments. ## Security issue notifications If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. ## Licensing See the [LICENSE](https://github.com/awslabs/aws-c-http/blob/main/LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. We may ask you to sign a [Contributor License Agreement (CLA)](http://en.wikipedia.org/wiki/Contributor_License_Agreement) for larger changes. aws-crt-python-0.20.4+dfsg/crt/aws-c-http/LICENSE000066400000000000000000000261361456575232400212240ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. aws-crt-python-0.20.4+dfsg/crt/aws-c-http/NOTICE000066400000000000000000000001631456575232400211130ustar00rootroot00000000000000AWS C Http Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: Apache-2.0. aws-crt-python-0.20.4+dfsg/crt/aws-c-http/README.md000066400000000000000000000044171456575232400214740ustar00rootroot00000000000000## AWS C Http C99 implementation of the HTTP/1.1 and HTTP/2 specifications ## License This library is licensed under the Apache 2.0 License. ## Usage ### Building CMake 3.1+ is required to build. `` must be an absolute path in the following instructions. #### Linux-Only Dependencies If you are building on Linux, you will need to build aws-lc and s2n-tls first. ``` git clone git@github.com:awslabs/aws-lc.git cmake -S aws-lc -B aws-lc/build -DCMAKE_INSTALL_PREFIX= cmake --build aws-lc/build --target install git clone git@github.com:aws/s2n-tls.git cmake -S s2n-tls -B s2n-tls/build -DCMAKE_INSTALL_PREFIX= -DCMAKE_PREFIX_PATH= cmake --build s2n-tls/build --target install ``` #### Building aws-c-http and Remaining Dependencies ``` git clone git@github.com:awslabs/aws-c-common.git cmake -S aws-c-common -B aws-c-common/build -DCMAKE_INSTALL_PREFIX= cmake --build aws-c-common/build --target install git clone git@github.com:awslabs/aws-c-cal.git cmake -S aws-c-cal -B aws-c-cal/build -DCMAKE_INSTALL_PREFIX= -DCMAKE_PREFIX_PATH= cmake --build aws-c-cal/build --target install git clone git@github.com:awslabs/aws-c-io.git cmake -S aws-c-io -B aws-c-io/build -DCMAKE_INSTALL_PREFIX= -DCMAKE_PREFIX_PATH= cmake --build aws-c-io/build --target install git clone git@github.com:awslabs/aws-c-compression.git cmake -S aws-c-compression -B aws-c-compression/build -DCMAKE_INSTALL_PREFIX= -DCMAKE_PREFIX_PATH= cmake --build aws-c-compression/build --target install git clone git@github.com:awslabs/aws-c-http.git cmake -S aws-c-http -B aws-c-http/build -DCMAKE_INSTALL_PREFIX= -DCMAKE_PREFIX_PATH= cmake --build aws-c-http/build --target install ``` #### Run Integration Tests with localhost To run some of the integration tests (start with localhost_integ_*), you need to set up a localhost that echo the request headers from `/echo` back first. To do that, check [localhost](./tests/py_localhost/) script we have. After that, configure and build your cmake project with `-DENABLE_LOCALHOST_INTEGRATION_TESTS=true` to build the tests with localhost and run them from `ctest --output-on-failure -R localhost_integ_*`. aws-crt-python-0.20.4+dfsg/crt/aws-c-http/bin/000077500000000000000000000000001456575232400207575ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-http/bin/elasticurl/000077500000000000000000000000001456575232400231265ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-http/bin/elasticurl/CMakeLists.txt000066400000000000000000000016261456575232400256730ustar00rootroot00000000000000project(elasticurl C) list(APPEND CMAKE_MODULE_PATH "${CMAKE_INSTALL_PREFIX}/lib/cmake") file(GLOB ELASTICURL_SRC "*.c" ) set(ELASTICURL_PROJECT_NAME elasticurl) add_executable(${ELASTICURL_PROJECT_NAME} ${ELASTICURL_SRC}) aws_set_common_properties(${ELASTICURL_PROJECT_NAME}) target_include_directories(${ELASTICURL_PROJECT_NAME} PUBLIC $ $) target_link_libraries(${ELASTICURL_PROJECT_NAME} PRIVATE aws-c-http) if (BUILD_SHARED_LIBS AND NOT WIN32) message(INFO " elasticurl will be built with shared libs, but you may need to set LD_LIBRARY_PATH=${CMAKE_INSTALL_PREFIX}/lib to run the application") endif() install(TARGETS ${ELASTICURL_PROJECT_NAME} EXPORT ${ELASTICURL_PROJECT_NAME}-targets COMPONENT Runtime RUNTIME DESTINATION bin COMPONENT Runtime) aws-crt-python-0.20.4+dfsg/crt/aws-c-http/bin/elasticurl/README.md000066400000000000000000000053141456575232400244100ustar00rootroot00000000000000## ElastiCurl This is a sample application showing how to use `aws-c-http` in client mode. It's intended to replicate the command-line interface of curl's http support. ### Usage ### Examples Dump the body of example.com to stdout elasticurl example.com Make a POST request with a header and payload, logging ERROR and FATAL messages: elasticurl -v ERROR -P -H "content-type: application/json" -i -d "{'test':'testval'}" http://httpbin.org/post Download an http resource to a file on disk, logging INFO, WARN, ERROR, and FATAL messages: elasticurl -v INFO -o elastigirl.png https://upload.wikimedia.org/wikipedia/en/thumb/e/ef/Helen_Parr.png/220px-Helen_Parr.png ### Command Line Interface elasticurl [options] url Note: https is always the default. If you want plain-text http, either specify `http` manually, or set ports `80` or `8080` #### Options ##### --cacert Path to a PEM Armored PKCS#7 CA Certificate file. ##### --capath Path to a directory containing ca certificates (only supported on Unix systems). ##### --cert Path for a certificate to use with mTLS. Usually this is a path to a PEM armored PKCS#7 file. On windows this can also be a registry path for certificate manager. ##### --key Key corresponding to `--cert`. Usually this is a path to a PEM armored PKCS#7 file, if using a certificate manager registry path for `--cert`, this should be empty. ##### --connect-timeout Amount of time to wait for a connection. The default value is 3000 (3 seconds). This value is specified in milliseconds. ##### -H, --header Line to send as a header in format `[header-key]: [header-value]`. This option can be specified multiple times. The max number of supported values is currently 10. ##### -d, --data String to send as the payload body for a POST or PUT method. ##### --data-file Path to a file to send as the payload body for a POST or PUT method. ##### -M, --method Http method to use for the request (e.g. GET, POST, PUT, DELETE etc...). GET is the default. ##### -G, --get Uses GET as the method for the http request. ##### -P, --post Uses POST as the method for the http request. ##### -I, --head Uses HEAD as the method for the http request. ##### -i, --include Includes the response headers in the output to stdout. ##### -k, --insecure Turns off TLS certificate validation. ##### -o, --output Sends the response body to the path specified instead of stdout. ##### -t, --trace Sends log message to the path specified instead of stderr. ##### -v, --verbose Sets the verbosity level of logs. Options are: ERROR|INFO|DEBUG|TRACE. Default is no logging. If you set this option, without the `--trace` argument, logs will be written to stderr. ##### -h, --help Displays the help message and exits the program. aws-crt-python-0.20.4+dfsg/crt/aws-c-http/bin/elasticurl/main.c000066400000000000000000000701531456575232400242240ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef _MSC_VER # pragma warning(disable : 4996) /* Disable warnings about fopen() being insecure */ # pragma warning(disable : 4204) /* Declared initializers */ # pragma warning(disable : 4221) /* Local var in declared initializer */ #endif #define ELASTICURL_VERSION "0.2.0" struct elasticurl_ctx { struct aws_allocator *allocator; const char *verb; struct aws_uri uri; struct aws_mutex mutex; struct aws_condition_variable c_var; bool response_code_written; const char *cacert; const char *capath; const char *cert; const char *key; int connect_timeout; const char *header_lines[10]; size_t header_line_count; FILE *input_file; struct aws_input_stream *input_body; struct aws_http_message *request; struct aws_http_connection *connection; const char *signing_library_path; struct aws_shared_library signing_library; const char *signing_function_name; struct aws_hash_table signing_context; aws_http_message_transform_fn *signing_function; const char *alpn; bool include_headers; bool insecure; FILE *output; const char *trace_file; enum aws_log_level log_level; enum aws_http_version required_http_version; bool exchange_completed; }; static void s_usage(int exit_code) { fprintf(stderr, "usage: elasticurl [options] url\n"); fprintf(stderr, " url: url to make a request to. The default is a GET request.\n"); fprintf(stderr, "\n Options:\n\n"); fprintf(stderr, " --cacert FILE: path to a CA certficate file.\n"); fprintf(stderr, " --capath PATH: path to a directory containing CA files.\n"); fprintf(stderr, " --cert FILE: path to a PEM encoded certificate to use with mTLS\n"); fprintf(stderr, " --key FILE: Path to a PEM encoded private key that matches cert.\n"); fprintf(stderr, " --connect-timeout INT: time in milliseconds to wait for a connection.\n"); fprintf(stderr, " -H, --header LINE: line to send as a header in format [header-key]: [header-value]\n"); fprintf(stderr, " -d, --data STRING: Data to POST or PUT\n"); fprintf(stderr, " --data-file FILE: File to read from file and POST or PUT\n"); fprintf(stderr, " -M, --method STRING: Http Method verb to use for the request\n"); fprintf(stderr, " -G, --get: uses GET for the verb.\n"); fprintf(stderr, " -P, --post: uses POST for the verb.\n"); fprintf(stderr, " -I, --head: uses HEAD for the verb.\n"); fprintf(stderr, " -i, --include: includes headers in output.\n"); fprintf(stderr, " -k, --insecure: turns off SSL/TLS validation.\n"); fprintf(stderr, " --signing-lib: path to a shared library with an exported signing function to use\n"); fprintf(stderr, " --signing-func: name of the signing function to use within the signing library\n"); fprintf( stderr, " --signing-context: key=value pair to pass to the signing function; may be used multiple times\n"); fprintf(stderr, " -o, --output FILE: dumps content-body to FILE instead of stdout.\n"); fprintf(stderr, " -t, --trace FILE: dumps logs to FILE instead of stderr.\n"); fprintf(stderr, " -v, --verbose: ERROR|INFO|DEBUG|TRACE: log level to configure. Default is none.\n"); fprintf(stderr, " --version: print the version of elasticurl.\n"); fprintf(stderr, " --http2: HTTP/2 connection required\n"); fprintf(stderr, " --http1_1: HTTP/1.1 connection required\n"); fprintf(stderr, " -h, --help\n"); fprintf(stderr, " Display this message and quit.\n"); exit(exit_code); } static struct aws_cli_option s_long_options[] = { {"cacert", AWS_CLI_OPTIONS_REQUIRED_ARGUMENT, NULL, 'a'}, {"capath", AWS_CLI_OPTIONS_REQUIRED_ARGUMENT, NULL, 'b'}, {"cert", AWS_CLI_OPTIONS_REQUIRED_ARGUMENT, NULL, 'c'}, {"key", AWS_CLI_OPTIONS_REQUIRED_ARGUMENT, NULL, 'e'}, {"connect-timeout", AWS_CLI_OPTIONS_REQUIRED_ARGUMENT, NULL, 'f'}, {"header", AWS_CLI_OPTIONS_REQUIRED_ARGUMENT, NULL, 'H'}, {"data", AWS_CLI_OPTIONS_REQUIRED_ARGUMENT, NULL, 'd'}, {"data-file", AWS_CLI_OPTIONS_REQUIRED_ARGUMENT, NULL, 'g'}, {"method", AWS_CLI_OPTIONS_REQUIRED_ARGUMENT, NULL, 'M'}, {"get", AWS_CLI_OPTIONS_NO_ARGUMENT, NULL, 'G'}, {"post", AWS_CLI_OPTIONS_NO_ARGUMENT, NULL, 'P'}, {"head", AWS_CLI_OPTIONS_NO_ARGUMENT, NULL, 'I'}, {"signing-lib", AWS_CLI_OPTIONS_REQUIRED_ARGUMENT, NULL, 'j'}, {"include", AWS_CLI_OPTIONS_NO_ARGUMENT, NULL, 'i'}, {"insecure", AWS_CLI_OPTIONS_NO_ARGUMENT, NULL, 'k'}, {"signing-func", AWS_CLI_OPTIONS_REQUIRED_ARGUMENT, NULL, 'l'}, {"signing-context", AWS_CLI_OPTIONS_REQUIRED_ARGUMENT, NULL, 'm'}, {"output", AWS_CLI_OPTIONS_REQUIRED_ARGUMENT, NULL, 'o'}, {"trace", AWS_CLI_OPTIONS_REQUIRED_ARGUMENT, NULL, 't'}, {"verbose", AWS_CLI_OPTIONS_REQUIRED_ARGUMENT, NULL, 'v'}, {"version", AWS_CLI_OPTIONS_NO_ARGUMENT, NULL, 'V'}, {"http2", AWS_CLI_OPTIONS_NO_ARGUMENT, NULL, 'w'}, {"http1_1", AWS_CLI_OPTIONS_NO_ARGUMENT, NULL, 'W'}, {"help", AWS_CLI_OPTIONS_NO_ARGUMENT, NULL, 'h'}, /* Per getopt(3) the last element of the array has to be filled with all zeros */ {NULL, AWS_CLI_OPTIONS_NO_ARGUMENT, NULL, 0}, }; static int s_parse_signing_context( struct aws_hash_table *signing_context, struct aws_allocator *allocator, const char *context_argument) { (void)signing_context; (void)context_argument; char *delimiter = memchr(context_argument, ':', strlen(context_argument)); if (!delimiter) { fprintf(stderr, "invalid signing context line \"%s\".", context_argument); exit(1); } struct aws_string *key = aws_string_new_from_array(allocator, (const uint8_t *)context_argument, delimiter - context_argument); struct aws_string *value = aws_string_new_from_array(allocator, (const uint8_t *)delimiter + 1, strlen(delimiter + 1)); if (key == NULL || value == NULL) { fprintf(stderr, "failure allocating signing context kv pair"); exit(1); } aws_hash_table_put(signing_context, key, value, NULL); return AWS_OP_SUCCESS; } static void s_parse_options(int argc, char **argv, struct elasticurl_ctx *ctx) { bool uri_found = false; while (true) { int option_index = 0; int c = aws_cli_getopt_long(argc, argv, "a:b:c:e:f:H:d:g:j:l:m:M:GPHiko:t:v:VwWh", s_long_options, &option_index); if (c == -1) { break; } switch (c) { case 0: /* getopt_long() returns 0 if an option.flag is non-null */ break; case 'a': ctx->cacert = aws_cli_optarg; break; case 'b': ctx->capath = aws_cli_optarg; break; case 'c': ctx->cert = aws_cli_optarg; break; case 'e': ctx->key = aws_cli_optarg; break; case 'f': ctx->connect_timeout = atoi(aws_cli_optarg); break; case 'H': if (ctx->header_line_count >= sizeof(ctx->header_lines) / sizeof(const char *)) { fprintf(stderr, "currently only 10 header lines are supported.\n"); s_usage(1); } ctx->header_lines[ctx->header_line_count++] = aws_cli_optarg; break; case 'd': { struct aws_byte_cursor data_cursor = aws_byte_cursor_from_c_str(aws_cli_optarg); ctx->input_body = aws_input_stream_new_from_cursor(ctx->allocator, &data_cursor); break; } case 'g': ctx->input_file = fopen(aws_cli_optarg, "rb"); ctx->input_body = aws_input_stream_new_from_open_file(ctx->allocator, ctx->input_file); if (!ctx->input_file) { fprintf(stderr, "unable to open file %s.\n", aws_cli_optarg); s_usage(1); } break; case 'j': ctx->signing_library_path = aws_cli_optarg; if (aws_shared_library_init(&ctx->signing_library, aws_cli_optarg)) { fprintf(stderr, "unable to open signing library %s.\n", aws_cli_optarg); s_usage(1); } break; case 'l': ctx->signing_function_name = aws_cli_optarg; break; case 'm': if (s_parse_signing_context(&ctx->signing_context, ctx->allocator, aws_cli_optarg)) { fprintf(stderr, "error parsing signing context \"%s\"\n", aws_cli_optarg); s_usage(1); } break; case 'M': ctx->verb = aws_cli_optarg; break; case 'G': ctx->verb = "GET"; break; case 'P': ctx->verb = "POST"; break; case 'I': ctx->verb = "HEAD"; break; case 'i': ctx->include_headers = true; break; case 'k': ctx->insecure = true; break; case 'o': ctx->output = fopen(aws_cli_optarg, "wb"); if (!ctx->output) { fprintf(stderr, "unable to open file %s.\n", aws_cli_optarg); s_usage(1); } break; case 't': ctx->trace_file = aws_cli_optarg; break; case 'v': if (!strcmp(aws_cli_optarg, "TRACE")) { ctx->log_level = AWS_LL_TRACE; } else if (!strcmp(aws_cli_optarg, "INFO")) { ctx->log_level = AWS_LL_INFO; } else if (!strcmp(aws_cli_optarg, "DEBUG")) { ctx->log_level = AWS_LL_DEBUG; } else if (!strcmp(aws_cli_optarg, "ERROR")) { ctx->log_level = AWS_LL_ERROR; } else { fprintf(stderr, "unsupported log level %s.\n", aws_cli_optarg); s_usage(1); } break; case 'V': fprintf(stderr, "elasticurl %s\n", ELASTICURL_VERSION); exit(0); case 'w': ctx->alpn = "h2"; ctx->required_http_version = AWS_HTTP_VERSION_2; break; case 'W': ctx->alpn = "http/1.1"; ctx->required_http_version = AWS_HTTP_VERSION_1_1; break; case 'h': s_usage(0); break; case 0x02: { struct aws_byte_cursor uri_cursor = aws_byte_cursor_from_c_str(aws_cli_positional_arg); if (aws_uri_init_parse(&ctx->uri, ctx->allocator, &uri_cursor)) { fprintf( stderr, "Failed to parse uri %s with error %s\n", (char *)uri_cursor.ptr, aws_error_debug_str(aws_last_error())); s_usage(1); } uri_found = true; } break; default: fprintf(stderr, "Unknown option\n"); s_usage(1); } } if (ctx->signing_function_name != NULL) { if (ctx->signing_library_path == NULL) { fprintf( stderr, "To sign a request made by Elasticurl you must supply both a signing library path and a signing " "function name\n"); s_usage(1); } if (aws_shared_library_find_function( &ctx->signing_library, ctx->signing_function_name, (aws_generic_function *)&ctx->signing_function)) { fprintf( stderr, "Unable to find function %s in signing library %s", ctx->signing_function_name, ctx->signing_library_path); s_usage(1); } } if (ctx->input_body == NULL) { struct aws_byte_cursor empty_cursor; AWS_ZERO_STRUCT(empty_cursor); ctx->input_body = aws_input_stream_new_from_cursor(ctx->allocator, &empty_cursor); } if (!uri_found) { fprintf(stderr, "A URI for the request must be supplied.\n"); s_usage(1); } } static int s_on_incoming_body_fn(struct aws_http_stream *stream, const struct aws_byte_cursor *data, void *user_data) { (void)stream; struct elasticurl_ctx *app_ctx = user_data; fwrite(data->ptr, 1, data->len, app_ctx->output); return AWS_OP_SUCCESS; } static int s_on_incoming_headers_fn( struct aws_http_stream *stream, enum aws_http_header_block header_block, const struct aws_http_header *header_array, size_t num_headers, void *user_data) { struct elasticurl_ctx *app_ctx = user_data; (void)app_ctx; (void)stream; /* Ignore informational headers */ if (header_block == AWS_HTTP_HEADER_BLOCK_INFORMATIONAL) { return AWS_OP_SUCCESS; } if (app_ctx->include_headers) { if (!app_ctx->response_code_written) { int status = 0; aws_http_stream_get_incoming_response_status(stream, &status); fprintf(stdout, "Response Status: %d\n", status); app_ctx->response_code_written = true; } for (size_t i = 0; i < num_headers; ++i) { fwrite(header_array[i].name.ptr, 1, header_array[i].name.len, stdout); fprintf(stdout, ": "); fwrite(header_array[i].value.ptr, 1, header_array[i].value.len, stdout); fprintf(stdout, "\n"); } } return AWS_OP_SUCCESS; } static int s_on_incoming_header_block_done_fn( struct aws_http_stream *stream, enum aws_http_header_block header_block, void *user_data) { (void)stream; (void)header_block; (void)user_data; return AWS_OP_SUCCESS; } static void s_on_stream_complete_fn(struct aws_http_stream *stream, int error_code, void *user_data) { (void)error_code; (void)user_data; aws_http_stream_release(stream); } static struct aws_http_message *s_build_http_request( struct elasticurl_ctx *app_ctx, enum aws_http_version protocol_version) { struct aws_http_message *request = protocol_version == AWS_HTTP_VERSION_2 ? aws_http2_message_new_request(app_ctx->allocator) : aws_http_message_new_request(app_ctx->allocator); if (request == NULL) { fprintf(stderr, "failed to allocate request\n"); exit(1); } aws_http_message_set_request_method(request, aws_byte_cursor_from_c_str(app_ctx->verb)); if (app_ctx->uri.path_and_query.len != 0) { aws_http_message_set_request_path(request, app_ctx->uri.path_and_query); } else { aws_http_message_set_request_path(request, aws_byte_cursor_from_c_str("/")); } if (protocol_version == AWS_HTTP_VERSION_2) { struct aws_http_headers *h2_headers = aws_http_message_get_headers(request); aws_http2_headers_set_request_scheme(h2_headers, app_ctx->uri.scheme); aws_http2_headers_set_request_authority(h2_headers, app_ctx->uri.host_name); } else { struct aws_http_header host_header = { .name = aws_byte_cursor_from_c_str("host"), .value = app_ctx->uri.host_name, }; aws_http_message_add_header(request, host_header); } struct aws_http_header accept_header = { .name = aws_byte_cursor_from_c_str("accept"), .value = aws_byte_cursor_from_c_str("*/*"), }; aws_http_message_add_header(request, accept_header); struct aws_http_header user_agent_header = { .name = aws_byte_cursor_from_c_str("user-agent"), .value = aws_byte_cursor_from_c_str("elasticurl 1.0, Powered by the AWS Common Runtime."), }; aws_http_message_add_header(request, user_agent_header); if (app_ctx->input_body) { int64_t data_len = 0; if (aws_input_stream_get_length(app_ctx->input_body, &data_len)) { fprintf(stderr, "failed to get length of input stream.\n"); exit(1); } if (data_len > 0) { char content_length[64]; AWS_ZERO_ARRAY(content_length); snprintf(content_length, sizeof(content_length), "%" PRIi64, data_len); struct aws_http_header content_length_header = { .name = aws_byte_cursor_from_c_str("content-length"), .value = aws_byte_cursor_from_c_str(content_length), }; aws_http_message_add_header(request, content_length_header); aws_http_message_set_body_stream(request, app_ctx->input_body); } } AWS_ASSERT(app_ctx->header_line_count <= 10); for (size_t i = 0; i < app_ctx->header_line_count; ++i) { char *delimiter = memchr(app_ctx->header_lines[i], ':', strlen(app_ctx->header_lines[i])); if (!delimiter) { fprintf(stderr, "invalid header line %s configured.", app_ctx->header_lines[i]); exit(1); } struct aws_http_header custom_header = { .name = aws_byte_cursor_from_array(app_ctx->header_lines[i], delimiter - app_ctx->header_lines[i]), .value = aws_byte_cursor_from_c_str(delimiter + 1), }; aws_http_message_add_header(request, custom_header); } return request; } static void s_on_signing_complete(struct aws_http_message *request, int error_code, void *user_data); static void s_on_client_connection_setup(struct aws_http_connection *connection, int error_code, void *user_data) { struct elasticurl_ctx *app_ctx = user_data; if (error_code) { fprintf(stderr, "Connection failed with error %s\n", aws_error_debug_str(error_code)); aws_mutex_lock(&app_ctx->mutex); app_ctx->exchange_completed = true; aws_mutex_unlock(&app_ctx->mutex); aws_condition_variable_notify_all(&app_ctx->c_var); return; } if (app_ctx->required_http_version) { if (aws_http_connection_get_version(connection) != app_ctx->required_http_version) { fprintf(stderr, "Error. The requested HTTP version, %s, is not supported by the peer.", app_ctx->alpn); exit(1); } } app_ctx->connection = connection; app_ctx->request = s_build_http_request(app_ctx, aws_http_connection_get_version(connection)); /* If async signing function is set, invoke it. It must invoke the signing complete callback when it's done. */ if (app_ctx->signing_function) { app_ctx->signing_function(app_ctx->request, &app_ctx->signing_context, s_on_signing_complete, app_ctx); } else { /* If no signing function, proceed immediately to next step. */ s_on_signing_complete(app_ctx->request, AWS_ERROR_SUCCESS, app_ctx); } } static void s_on_signing_complete(struct aws_http_message *request, int error_code, void *user_data) { struct elasticurl_ctx *app_ctx = user_data; AWS_FATAL_ASSERT(request == app_ctx->request); if (error_code) { fprintf(stderr, "Signing failure\n"); exit(1); } struct aws_http_make_request_options final_request = { .self_size = sizeof(final_request), .user_data = app_ctx, .request = app_ctx->request, .on_response_headers = s_on_incoming_headers_fn, .on_response_header_block_done = s_on_incoming_header_block_done_fn, .on_response_body = s_on_incoming_body_fn, .on_complete = s_on_stream_complete_fn, }; app_ctx->response_code_written = false; struct aws_http_stream *stream = aws_http_connection_make_request(app_ctx->connection, &final_request); if (!stream) { fprintf(stderr, "failed to create request."); exit(1); } aws_http_stream_activate(stream); /* Connection will stay alive until stream completes */ aws_http_connection_release(app_ctx->connection); app_ctx->connection = NULL; } static void s_on_client_connection_shutdown(struct aws_http_connection *connection, int error_code, void *user_data) { (void)error_code; (void)connection; struct elasticurl_ctx *app_ctx = user_data; aws_mutex_lock(&app_ctx->mutex); app_ctx->exchange_completed = true; aws_mutex_unlock(&app_ctx->mutex); aws_condition_variable_notify_all(&app_ctx->c_var); } static bool s_completion_predicate(void *arg) { struct elasticurl_ctx *app_ctx = arg; return app_ctx->exchange_completed; } int main(int argc, char **argv) { struct aws_allocator *allocator = aws_default_allocator(); aws_http_library_init(allocator); struct elasticurl_ctx app_ctx; AWS_ZERO_STRUCT(app_ctx); app_ctx.allocator = allocator; app_ctx.c_var = (struct aws_condition_variable)AWS_CONDITION_VARIABLE_INIT; app_ctx.connect_timeout = 3000; app_ctx.output = stdout; app_ctx.verb = "GET"; app_ctx.alpn = "h2;http/1.1"; aws_mutex_init(&app_ctx.mutex); aws_hash_table_init( &app_ctx.signing_context, allocator, 10, aws_hash_string, aws_hash_callback_string_eq, aws_hash_callback_string_destroy, aws_hash_callback_string_destroy); s_parse_options(argc, argv, &app_ctx); struct aws_logger logger; AWS_ZERO_STRUCT(logger); if (app_ctx.log_level) { struct aws_logger_standard_options options = { .level = app_ctx.log_level, }; if (app_ctx.trace_file) { options.filename = app_ctx.trace_file; } else { options.file = stderr; } if (aws_logger_init_standard(&logger, allocator, &options)) { fprintf(stderr, "Failed to initialize logger with error %s\n", aws_error_debug_str(aws_last_error())); exit(1); } aws_logger_set(&logger); } bool use_tls = true; uint32_t port = 443; if (!app_ctx.uri.scheme.len && (app_ctx.uri.port == 80 || app_ctx.uri.port == 8080)) { use_tls = false; } else { if (aws_byte_cursor_eq_c_str_ignore_case(&app_ctx.uri.scheme, "http")) { use_tls = false; } } struct aws_tls_ctx *tls_ctx = NULL; struct aws_tls_ctx_options tls_ctx_options; AWS_ZERO_STRUCT(tls_ctx_options); struct aws_tls_connection_options tls_connection_options; AWS_ZERO_STRUCT(tls_connection_options); struct aws_tls_connection_options *tls_options = NULL; if (use_tls) { if (app_ctx.cert && app_ctx.key) { if (aws_tls_ctx_options_init_client_mtls_from_path( &tls_ctx_options, allocator, app_ctx.cert, app_ctx.key)) { fprintf( stderr, "Failed to load %s and %s with error %s.", app_ctx.cert, app_ctx.key, aws_error_debug_str(aws_last_error())); exit(1); } } #ifdef _WIN32 else if (app_ctx.cert && !app_ctx.key) { aws_tls_ctx_options_init_client_mtls_from_system_path(&tls_ctx_options, allocator, app_ctx.cert); } #endif else { aws_tls_ctx_options_init_default_client(&tls_ctx_options, allocator); } if (app_ctx.capath || app_ctx.cacert) { if (aws_tls_ctx_options_override_default_trust_store_from_path( &tls_ctx_options, app_ctx.capath, app_ctx.cacert)) { fprintf( stderr, "Failed to load %s and %s with error %s", app_ctx.capath, app_ctx.cacert, aws_error_debug_str(aws_last_error())); exit(1); } } if (app_ctx.insecure) { aws_tls_ctx_options_set_verify_peer(&tls_ctx_options, false); } if (aws_tls_ctx_options_set_alpn_list(&tls_ctx_options, app_ctx.alpn)) { fprintf(stderr, "Failed to load alpn list with error %s.", aws_error_debug_str(aws_last_error())); exit(1); } tls_ctx = aws_tls_client_ctx_new(allocator, &tls_ctx_options); if (!tls_ctx) { fprintf(stderr, "Failed to initialize TLS context with error %s.", aws_error_debug_str(aws_last_error())); exit(1); } aws_tls_connection_options_init_from_ctx(&tls_connection_options, tls_ctx); if (aws_tls_connection_options_set_server_name(&tls_connection_options, allocator, &app_ctx.uri.host_name)) { fprintf(stderr, "Failed to set servername with error %s.", aws_error_debug_str(aws_last_error())); exit(1); } tls_options = &tls_connection_options; if (app_ctx.uri.port) { port = app_ctx.uri.port; } } else { port = 80; if (app_ctx.uri.port) { port = app_ctx.uri.port; } } struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); struct aws_host_resolver_default_options resolver_options = { .el_group = el_group, .max_entries = 8, }; struct aws_host_resolver *resolver = aws_host_resolver_new_default(allocator, &resolver_options); struct aws_client_bootstrap_options bootstrap_options = { .event_loop_group = el_group, .host_resolver = resolver, }; struct aws_client_bootstrap *bootstrap = aws_client_bootstrap_new(allocator, &bootstrap_options); struct aws_socket_options socket_options = { .type = AWS_SOCKET_STREAM, .connect_timeout_ms = (uint32_t)app_ctx.connect_timeout, .keep_alive_timeout_sec = 0, .keepalive = false, .keep_alive_interval_sec = 0, }; struct aws_http_client_connection_options http_client_options = { .self_size = sizeof(struct aws_http_client_connection_options), .socket_options = &socket_options, .allocator = allocator, .port = port, .host_name = app_ctx.uri.host_name, .bootstrap = bootstrap, .initial_window_size = SIZE_MAX, .tls_options = tls_options, .user_data = &app_ctx, .on_setup = s_on_client_connection_setup, .on_shutdown = s_on_client_connection_shutdown, }; if (app_ctx.required_http_version == AWS_HTTP_VERSION_2 && !use_tls) { /* Use prior knowledge to connect */ http_client_options.prior_knowledge_http2 = true; } aws_http_client_connect(&http_client_options); aws_mutex_lock(&app_ctx.mutex); aws_condition_variable_wait_pred(&app_ctx.c_var, &app_ctx.mutex, s_completion_predicate, &app_ctx); aws_mutex_unlock(&app_ctx.mutex); aws_client_bootstrap_release(bootstrap); aws_host_resolver_release(resolver); aws_event_loop_group_release(el_group); if (tls_ctx) { aws_tls_connection_options_clean_up(&tls_connection_options); aws_tls_ctx_release(tls_ctx); aws_tls_ctx_options_clean_up(&tls_ctx_options); } aws_http_library_clean_up(); if (app_ctx.log_level) { aws_logger_clean_up(&logger); } aws_uri_clean_up(&app_ctx.uri); aws_http_message_destroy(app_ctx.request); aws_shared_library_clean_up(&app_ctx.signing_library); if (app_ctx.output != stdout) { fclose(app_ctx.output); } if (app_ctx.input_body) { aws_input_stream_release(app_ctx.input_body); } if (app_ctx.input_file) { fclose(app_ctx.input_file); } aws_hash_table_clean_up(&app_ctx.signing_context); return 0; } aws-crt-python-0.20.4+dfsg/crt/aws-c-http/builder.json000066400000000000000000000012331456575232400225270ustar00rootroot00000000000000{ "name": "aws-c-http", "targets": { "android": { "enabled": false, "_comment": "disabled until we need to support it. LibCrypto needs to be configured on build machine." } }, "upstream": [ { "name": "aws-c-io" }, { "name": "aws-c-compression" } ], "downstream": [ { "name": "aws-c-auth" }, { "name": "aws-c-mqtt" }, { "name": "aws-c-s3" } ], "pre_build_steps": ["local-server-setup"], "test_steps": [ "aws-c-http-test", ["{python}", "{source_dir}/integration-testing/http_client_test.py", "{install_dir}/bin/elasticurl{exe}"] ] } aws-crt-python-0.20.4+dfsg/crt/aws-c-http/cmake/000077500000000000000000000000001456575232400212675ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-http/cmake/aws-c-http-config.cmake000066400000000000000000000011341456575232400255220ustar00rootroot00000000000000include(CMakeFindDependencyMacro) find_dependency(aws-c-io) find_dependency(aws-c-compression) macro(aws_load_targets type) include(${CMAKE_CURRENT_LIST_DIR}/${type}/@PROJECT_NAME@-targets.cmake) endmacro() # try to load the lib follow BUILD_SHARED_LIBS. Fall back if not exist. if (BUILD_SHARED_LIBS) if (EXISTS "${CMAKE_CURRENT_LIST_DIR}/shared") aws_load_targets(shared) else() aws_load_targets(static) endif() else() if (EXISTS "${CMAKE_CURRENT_LIST_DIR}/static") aws_load_targets(static) else() aws_load_targets(shared) endif() endif() aws-crt-python-0.20.4+dfsg/crt/aws-c-http/codebuild/000077500000000000000000000000001456575232400221415ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-http/codebuild/linux-integration-tests.yml000066400000000000000000000025241456575232400275070ustar00rootroot00000000000000version: 0.2 #this build spec assumes the manylinux1 image for pypi #additional packages we installed: cmake 3.5, libcrypto 1.1.0j, gcc 4.8.4 phases: install: commands: - add-apt-repository ppa:ubuntu-toolchain-r/test - apt-get update -y - apt-get install gcc-7 cmake ninja-build python3 -y pre_build: commands: - export CC=gcc-7 - export BUILDER_VERSION=$(cat .github/workflows/ci.yml | grep 'BUILDER_VERSION:' | sed 's/\s*BUILDER_VERSION:\s*\(.*\)/\1/') - export BUILDER_SOURCE=$(cat .github/workflows/ci.yml | grep 'BUILDER_SOURCE:' | sed 's/\s*BUILDER_SOURCE:\s*\(.*\)/\1/') - echo "Using builder version='${BUILDER_VERSION}' source='${BUILDER_SOURCE}'" - export BUILDER_HOST=https://d19elf31gohf1l.cloudfront.net build: commands: - echo Build started on `date` - aws s3 cp s3://aws-crt-test-stuff/setup_proxy_test_env_h2.sh /tmp/setup_proxy_test_env.sh - chmod a+xr /tmp/setup_proxy_test_env.sh - python3 -c "from urllib.request import urlretrieve; urlretrieve('$BUILDER_HOST/$BUILDER_SOURCE/$BUILDER_VERSION/builder.pyz', 'builder.pyz')" - python3 builder.pyz build -p aws-c-http --cmake-extra=-DENABLE_PROXY_INTEGRATION_TESTS=ON --cmake-extra=-DENABLE_LOCALHOST_INTEGRATION_TESTS=ON --coverage post_build: commands: - echo Build completed on `date` aws-crt-python-0.20.4+dfsg/crt/aws-c-http/format-check.sh000077500000000000000000000007671456575232400231230ustar00rootroot00000000000000#!/bin/bash if [[ -z $CLANG_FORMAT ]] ; then CLANG_FORMAT=clang-format fi if NOT type $CLANG_FORMAT 2> /dev/null ; then echo "No appropriate clang-format found." exit 1 fi FAIL=0 SOURCE_FILES=`find bin source include tests -type f \( -name '*.h' -o -name '*.c' \)` for i in $SOURCE_FILES do $CLANG_FORMAT -output-replacements-xml $i | grep -c " /dev/null if [ $? -ne 1 ] then echo "$i failed clang-format check." FAIL=1 fi done exit $FAIL aws-crt-python-0.20.4+dfsg/crt/aws-c-http/include/000077500000000000000000000000001456575232400216325ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-http/include/aws/000077500000000000000000000000001456575232400224245ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-http/include/aws/http/000077500000000000000000000000001456575232400234035ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-http/include/aws/http/connection.h000066400000000000000000000645251456575232400257270ustar00rootroot00000000000000#ifndef AWS_HTTP_CONNECTION_H #define AWS_HTTP_CONNECTION_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include AWS_PUSH_SANE_WARNING_LEVEL struct aws_client_bootstrap; struct aws_socket_options; struct aws_socket_endpoint; struct aws_tls_connection_options; struct aws_http2_setting; struct proxy_env_var_settings; /** * An HTTP connection. * This type is used by both server-side and client-side connections. * This type is also used by all supported versions of HTTP. */ struct aws_http_connection; /** * Invoked when connect completes. * * If unsuccessful, error_code will be set, connection will be NULL, * and the on_shutdown callback will never be invoked. * * If successful, error_code will be 0 and connection will be valid. * The user is now responsible for the connection and must * call aws_http_connection_release() when they are done with it. * * The connection uses one event-loop thread to do all its work. * The thread invoking this callback will be the same thread that invokes all * future callbacks for this connection and its streams. */ typedef void( aws_http_on_client_connection_setup_fn)(struct aws_http_connection *connection, int error_code, void *user_data); /** * Invoked when the connection has finished shutting down. * Never invoked if on_setup failed. * This is always invoked on connection's event-loop thread. * Note that the connection is not completely done until on_shutdown has been invoked * AND aws_http_connection_release() has been called. */ typedef void( aws_http_on_client_connection_shutdown_fn)(struct aws_http_connection *connection, int error_code, void *user_data); /** * Invoked when the HTTP/2 settings change is complete. * If connection setup successfully this will always be invoked whether settings change successfully or unsuccessfully. * If error_code is AWS_ERROR_SUCCESS (0), then the peer has acknowledged the settings and the change has been applied. * If error_code is non-zero, then a connection error occurred before the settings could be fully acknowledged and * applied. This is always invoked on the connection's event-loop thread. */ typedef void(aws_http2_on_change_settings_complete_fn)( struct aws_http_connection *http2_connection, int error_code, void *user_data); /** * Invoked when the HTTP/2 PING completes, whether peer has acknowledged it or not. * If error_code is AWS_ERROR_SUCCESS (0), then the peer has acknowledged the PING and round_trip_time_ns will be the * round trip time in nano seconds for the connection. * If error_code is non-zero, then a connection error occurred before the PING get acknowledgment and round_trip_time_ns * will be useless in this case. */ typedef void(aws_http2_on_ping_complete_fn)( struct aws_http_connection *http2_connection, uint64_t round_trip_time_ns, int error_code, void *user_data); /** * Invoked when an HTTP/2 GOAWAY frame is received from peer. * Implies that the peer has initiated shutdown, or encountered a serious error. * Once a GOAWAY is received, no further streams may be created on this connection. * * @param http2_connection This HTTP/2 connection. * @param last_stream_id ID of the last locally-initiated stream that peer will * process. Any locally-initiated streams with a higher ID are ignored by * peer, and are safe to retry on another connection. * @param http2_error_code The HTTP/2 error code (RFC-7540 section 7) sent by peer. * `enum aws_http2_error_code` lists official codes. * @param debug_data The debug data sent by peer. It can be empty. (NOTE: this data is only valid for the lifetime of * the callback. Make a deep copy if you wish to keep it longer.) * @param user_data User-data passed to the callback. */ typedef void(aws_http2_on_goaway_received_fn)( struct aws_http_connection *http2_connection, uint32_t last_stream_id, uint32_t http2_error_code, struct aws_byte_cursor debug_data, void *user_data); /** * Invoked when new HTTP/2 settings from peer have been applied. * Settings_array is the array of aws_http2_settings that contains all the settings we just changed in the order we * applied (the order settings arrived). Num_settings is the number of elements in that array. */ typedef void(aws_http2_on_remote_settings_change_fn)( struct aws_http_connection *http2_connection, const struct aws_http2_setting *settings_array, size_t num_settings, void *user_data); /** * Callback invoked on each statistics sample. * * connection_nonce is unique to each connection for disambiguation of each callback per connection. */ typedef void( aws_http_statistics_observer_fn)(size_t connection_nonce, const struct aws_array_list *stats_list, void *user_data); /** * Configuration options for connection monitoring */ struct aws_http_connection_monitoring_options { /** * minimum required throughput of the connection. Throughput is only measured against the interval of time where * there is actual io to perform. Read and write throughput are measured and checked independently of one another. */ uint64_t minimum_throughput_bytes_per_second; /* * amount of time, in seconds, throughput is allowed to drop below the minimum before the connection is shut down * as unhealthy. */ uint32_t allowable_throughput_failure_interval_seconds; /** * invoked on each statistics publish by the underlying IO channel. Install this callback to receive the statistics * for observation. This field is optional. */ aws_http_statistics_observer_fn *statistics_observer_fn; /** * user_data to be passed to statistics_observer_fn. */ void *statistics_observer_user_data; }; /** * Options specific to HTTP/1.x connections. */ struct aws_http1_connection_options { /** * Optional * Capacity in bytes of the HTTP/1 connection's read buffer. * The buffer grows if the flow-control window of the incoming HTTP-stream * reaches zero. If the buffer reaches capacity, no further socket data is * read until the HTTP-stream's window opens again, allowing data to resume flowing. * * Ignored if `manual_window_management` is false. * If zero is specified (the default) then a default capacity is chosen. * A capacity that is too small may hinder throughput. * A capacity that is too big may waste memory without helping throughput. */ size_t read_buffer_capacity; }; /** * Options specific to HTTP/2 connections. */ struct aws_http2_connection_options { /** * Optional * The data of settings to change for initial settings. * Note: each setting has its boundary. If settings_array is not set, num_settings has to be 0 to send an empty * SETTINGS frame. */ struct aws_http2_setting *initial_settings_array; /** * Required * The num of settings to change (Length of the initial_settings_array). */ size_t num_initial_settings; /** * Optional. * Invoked when the HTTP/2 initial settings change is complete. * If failed to setup the connection, this will not be invoked. * Otherwise, this will be invoked, whether settings change successfully or unsuccessfully. * See `aws_http2_on_change_settings_complete_fn`. */ aws_http2_on_change_settings_complete_fn *on_initial_settings_completed; /** * Optional * The max number of recently-closed streams to remember. * Set it to zero to use the default setting, AWS_HTTP2_DEFAULT_MAX_CLOSED_STREAMS * * If the connection receives a frame for a closed stream, * the frame will be ignored or cause a connection error, * depending on the frame type and how the stream was closed. * Remembering more streams reduces the chances that a late frame causes * a connection error, but costs some memory. */ size_t max_closed_streams; /** * Optional. * Invoked when a valid GOAWAY frame received. * See `aws_http2_on_goaway_received_fn`. */ aws_http2_on_goaway_received_fn *on_goaway_received; /** * Optional. * Invoked when new settings from peer have been applied. * See `aws_http2_on_remote_settings_change_fn`. */ aws_http2_on_remote_settings_change_fn *on_remote_settings_change; /** * Optional. * Set to true to manually manage the flow-control window of whole HTTP/2 connection. * * If false, the connection will maintain its flow-control windows such that * no back-pressure is applied and data arrives as fast as possible. * * If true, the flow-control window of the whole connection will shrink as body data * is received (headers, padding, and other metadata do not affect the window) for every streams * created on this connection. * The initial connection flow-control window is 65,535. * Once the connection's flow-control window reaches to 0, all the streams on the connection stop receiving any * further data. * The user must call aws_http2_connection_update_window() to increment the connection's * window and keep data flowing. * Note: the padding of data frame counts to the flow-control window. * But, the client will always automatically update the window for padding even for manual window update. */ bool conn_manual_window_management; }; /** * Options for creating an HTTP client connection. * Initialize with AWS_HTTP_CLIENT_CONNECTION_OPTIONS_INIT to set default values. */ struct aws_http_client_connection_options { /** * The sizeof() this struct, used for versioning. * Set by AWS_HTTP_CLIENT_CONNECTION_OPTIONS_INIT. */ size_t self_size; /** * Required. * Must outlive the connection. */ struct aws_allocator *allocator; /** * Required. * The connection keeps the bootstrap alive via ref-counting. */ struct aws_client_bootstrap *bootstrap; /** * Required. * aws_http_client_connect() makes a copy. */ struct aws_byte_cursor host_name; /** * Required. */ uint32_t port; /** * Required. * aws_http_client_connect() makes a copy. */ const struct aws_socket_options *socket_options; /** * Optional. * aws_http_client_connect() deep-copies all contents, * and keeps `aws_tls_ctx` alive via ref-counting. */ const struct aws_tls_connection_options *tls_options; /** * Optional * Configuration options related to http proxy usage. * Relevant fields are copied internally. */ const struct aws_http_proxy_options *proxy_options; /* * Optional. * Configuration for using proxy from environment variable. * Only works when proxy_options is not set. */ const struct proxy_env_var_settings *proxy_ev_settings; /** * Optional * Configuration options related to connection health monitoring */ const struct aws_http_connection_monitoring_options *monitoring_options; /** * Optional (ignored if 0). * After a request is fully sent, if the server does not begin responding within N milliseconds, * then fail with AWS_ERROR_HTTP_RESPONSE_FIRST_BYTE_TIMEOUT. * This can be overridden per-request by aws_http_make_request_options.response_first_byte_timeout_ms. * TODO: Only supported in HTTP/1.1 now, support it in HTTP/2 */ uint64_t response_first_byte_timeout_ms; /** * Set to true to manually manage the flow-control window of each stream. * * If false, the connection will maintain its flow-control windows such that * no back-pressure is applied and data arrives as fast as possible. * * If true, the flow-control window of each stream will shrink as body data * is received (headers, padding, and other metadata do not affect the window). * `initial_window_size` determines the starting size of each stream's window for HTTP/1 stream, while HTTP/2 stream * will use the settings AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE to inform the other side about read back pressure * * If a stream's flow-control window reaches 0, no further data will be received. The user must call * aws_http_stream_update_window() to increment the stream's window and keep data flowing. * * If a HTTP/2 connection created, it will ONLY control the stream window * management. Connection window management is controlled by * conn_manual_window_management. Note: the padding of data frame counts to the flow-control window. * But, the client will always automatically update the window for padding even for manual window update. */ bool manual_window_management; /** * The starting size of each HTTP stream's flow-control window for HTTP/1 connection. * Required if `manual_window_management` is true, * ignored if `manual_window_management` is false. * * Always ignored when HTTP/2 connection created. The initial window size is controlled by the settings, * `AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE` */ size_t initial_window_size; /** * User data for callbacks * Optional. */ void *user_data; /** * Invoked when connect completes. * Required. * See `aws_http_on_client_connection_setup_fn`. */ aws_http_on_client_connection_setup_fn *on_setup; /** * Invoked when the connection has finished shutting down. * Never invoked if setup failed. * Optional. * See `aws_http_on_client_connection_shutdown_fn`. */ aws_http_on_client_connection_shutdown_fn *on_shutdown; /** * Optional. * When true, use prior knowledge to set up an HTTP/2 connection on a cleartext * connection. * When TLS is set and this is true, the connection will failed to be established, * as prior knowledge only works for cleartext TLS. * Refer to RFC7540 3.4 */ bool prior_knowledge_http2; /** * Optional. * Pointer to the hash map containing the ALPN string to protocol to use. * Hash from `struct aws_string *` to `enum aws_http_version`. * If not set, only the predefined string `h2` and `http/1.1` will be recognized. Other negotiated ALPN string will * result in a HTTP1/1 connection * Note: Connection will keep a deep copy of the table and the strings. */ struct aws_hash_table *alpn_string_map; /** * Options specific to HTTP/1.x connections. * Optional. * Ignored if connection is not HTTP/1.x. * If connection is HTTP/1.x and options were not specified, default values are used. */ const struct aws_http1_connection_options *http1_options; /** * Options specific to HTTP/2 connections. * Optional. * Ignored if connection is not HTTP/2. * If connection is HTTP/2 and options were not specified, default values are used. */ const struct aws_http2_connection_options *http2_options; /** * Optional. * Requests the channel/connection be bound to a specific event loop rather than chosen sequentially from the * event loop group associated with the client bootstrap. */ struct aws_event_loop *requested_event_loop; /** * Optional * Host resolution override that allows the user to override DNS behavior for this particular connection. */ const struct aws_host_resolution_config *host_resolution_config; }; /* Predefined settings identifiers (RFC-7540 6.5.2) */ enum aws_http2_settings_id { AWS_HTTP2_SETTINGS_BEGIN_RANGE = 0x1, /* Beginning of known values */ AWS_HTTP2_SETTINGS_HEADER_TABLE_SIZE = 0x1, AWS_HTTP2_SETTINGS_ENABLE_PUSH = 0x2, AWS_HTTP2_SETTINGS_MAX_CONCURRENT_STREAMS = 0x3, AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE = 0x4, AWS_HTTP2_SETTINGS_MAX_FRAME_SIZE = 0x5, AWS_HTTP2_SETTINGS_MAX_HEADER_LIST_SIZE = 0x6, AWS_HTTP2_SETTINGS_END_RANGE, /* End of known values */ }; /* A HTTP/2 setting and its value, used in SETTINGS frame */ struct aws_http2_setting { enum aws_http2_settings_id id; uint32_t value; }; /** * HTTP/2: Default value for max closed streams we will keep in memory. */ #define AWS_HTTP2_DEFAULT_MAX_CLOSED_STREAMS (32) /** * HTTP/2: The size of payload for HTTP/2 PING frame. */ #define AWS_HTTP2_PING_DATA_SIZE (8) /** * HTTP/2: The number of known settings. */ #define AWS_HTTP2_SETTINGS_COUNT (6) /** * Initializes aws_http_client_connection_options with default values. */ #define AWS_HTTP_CLIENT_CONNECTION_OPTIONS_INIT \ { .self_size = sizeof(struct aws_http_client_connection_options), .initial_window_size = SIZE_MAX, } AWS_EXTERN_C_BEGIN /** * Asynchronously establish a client connection. * The on_setup callback is invoked when the operation has created a connection or failed. */ AWS_HTTP_API int aws_http_client_connect(const struct aws_http_client_connection_options *options); /** * Users must release the connection when they are done with it. * The connection's memory cannot be reclaimed until this is done. * If the connection was not already shutting down, it will be shut down. * * Users should always wait for the on_shutdown() callback to be called before releasing any data passed to the * http_connection (Eg aws_tls_connection_options, aws_socket_options) otherwise there will be race conditions between * http_connection shutdown tasks and memory release tasks, causing Segfaults. */ AWS_HTTP_API void aws_http_connection_release(struct aws_http_connection *connection); /** * Begin shutdown sequence of the connection if it hasn't already started. This will schedule shutdown tasks on the * EventLoop that may send HTTP/TLS/TCP shutdown messages to peers if necessary, and will eventually cause internal * connection memory to stop being accessed and on_shutdown() callback to be called. * * It's safe to call this function regardless of the connection state as long as you hold a reference to the connection. */ AWS_HTTP_API void aws_http_connection_close(struct aws_http_connection *connection); /** * Stop accepting new requests for the connection. It will NOT start the shutdown process for the connection. The * requests that are already open can still wait to be completed, but new requests will fail to be created, */ AWS_HTTP_API void aws_http_connection_stop_new_requests(struct aws_http_connection *connection); /** * Returns true unless the connection is closed or closing. */ AWS_HTTP_API bool aws_http_connection_is_open(const struct aws_http_connection *connection); /** * Return whether the connection can make a new requests. * If false, then a new connection must be established to make further requests. */ AWS_HTTP_API bool aws_http_connection_new_requests_allowed(const struct aws_http_connection *connection); /** * Returns true if this is a client connection. */ AWS_HTTP_API bool aws_http_connection_is_client(const struct aws_http_connection *connection); AWS_HTTP_API enum aws_http_version aws_http_connection_get_version(const struct aws_http_connection *connection); /** * Returns the channel hosting the HTTP connection. * Do not expose this function to language bindings. */ AWS_HTTP_API struct aws_channel *aws_http_connection_get_channel(struct aws_http_connection *connection); /** * Returns the remote endpoint of the HTTP connection. */ AWS_HTTP_API const struct aws_socket_endpoint *aws_http_connection_get_remote_endpoint(const struct aws_http_connection *connection); /** * Initialize an map copied from the *src map, which maps `struct aws_string *` to `enum aws_http_version`. */ AWS_HTTP_API int aws_http_alpn_map_init_copy( struct aws_allocator *allocator, struct aws_hash_table *dest, struct aws_hash_table *src); /** * Initialize an empty hash-table that maps `struct aws_string *` to `enum aws_http_version`. * This map can used in aws_http_client_connections_options.alpn_string_map. */ AWS_HTTP_API int aws_http_alpn_map_init(struct aws_allocator *allocator, struct aws_hash_table *map); /** * Checks http proxy options for correctness */ AWS_HTTP_API int aws_http_options_validate_proxy_configuration(const struct aws_http_client_connection_options *options); /** * Send a SETTINGS frame (HTTP/2 only). * SETTINGS will be applied locally when SETTINGS ACK is received from peer. * * @param http2_connection HTTP/2 connection. * @param settings_array The array of settings to change. Note: each setting has its boundary. * @param num_settings The num of settings to change in settings_array. * @param on_completed Optional callback, see `aws_http2_on_change_settings_complete_fn`. * @param user_data User-data pass to on_completed callback. */ AWS_HTTP_API int aws_http2_connection_change_settings( struct aws_http_connection *http2_connection, const struct aws_http2_setting *settings_array, size_t num_settings, aws_http2_on_change_settings_complete_fn *on_completed, void *user_data); /** * Send a PING frame (HTTP/2 only). * Round-trip-time is calculated when PING ACK is received from peer. * * @param http2_connection HTTP/2 connection. * @param optional_opaque_data Optional payload for PING frame. * Must be NULL, or exactly 8 bytes (AWS_HTTP2_PING_DATA_SIZE). * If NULL, the 8 byte payload will be all zeroes. * @param on_completed Optional callback, invoked when PING ACK is received from peer, * or when a connection error prevents the PING ACK from being received. * Callback always fires on the connection's event-loop thread. * @param user_data User-data pass to on_completed callback. */ AWS_HTTP_API int aws_http2_connection_ping( struct aws_http_connection *http2_connection, const struct aws_byte_cursor *optional_opaque_data, aws_http2_on_ping_complete_fn *on_completed, void *user_data); /** * Get the local settings we are using to affect the decoding. * * @param http2_connection HTTP/2 connection. * @param out_settings fixed size array of aws_http2_setting gets set to the local settings */ AWS_HTTP_API void aws_http2_connection_get_local_settings( const struct aws_http_connection *http2_connection, struct aws_http2_setting out_settings[AWS_HTTP2_SETTINGS_COUNT]); /** * Get the settings received from remote peer, which we are using to restricts the message to send. * * @param http2_connection HTTP/2 connection. * @param out_settings fixed size array of aws_http2_setting gets set to the remote settings */ AWS_HTTP_API void aws_http2_connection_get_remote_settings( const struct aws_http_connection *http2_connection, struct aws_http2_setting out_settings[AWS_HTTP2_SETTINGS_COUNT]); /** * Send a custom GOAWAY frame (HTTP/2 only). * * Note that the connection automatically attempts to send a GOAWAY during * shutdown (unless a GOAWAY with a valid Last-Stream-ID has already been sent). * * This call can be used to gracefully warn the peer of an impending shutdown * (http2_error=0, allow_more_streams=true), or to customize the final GOAWAY * frame that is sent by this connection. * * The other end may not receive the goaway, if the connection already closed. * * @param http2_connection HTTP/2 connection. * @param http2_error The HTTP/2 error code (RFC-7540 section 7) to send. * `enum aws_http2_error_code` lists official codes. * @param allow_more_streams If true, new peer-initiated streams will continue * to be acknowledged and the GOAWAY's Last-Stream-ID will be set to a max value. * If false, new peer-initiated streams will be ignored and the GOAWAY's * Last-Stream-ID will be set to the latest acknowledged stream. * @param optional_debug_data Optional debug data to send. Size must not exceed 16KB. */ AWS_HTTP_API void aws_http2_connection_send_goaway( struct aws_http_connection *http2_connection, uint32_t http2_error, bool allow_more_streams, const struct aws_byte_cursor *optional_debug_data); /** * Get data about the latest GOAWAY frame sent to peer (HTTP/2 only). * If no GOAWAY has been sent, AWS_ERROR_HTTP_DATA_NOT_AVAILABLE will be raised. * Note that GOAWAY frames are typically sent automatically by the connection * during shutdown. * * @param http2_connection HTTP/2 connection. * @param out_http2_error Gets set to HTTP/2 error code sent in most recent GOAWAY. * @param out_last_stream_id Gets set to Last-Stream-ID sent in most recent GOAWAY. */ AWS_HTTP_API int aws_http2_connection_get_sent_goaway( struct aws_http_connection *http2_connection, uint32_t *out_http2_error, uint32_t *out_last_stream_id); /** * Get data about the latest GOAWAY frame received from peer (HTTP/2 only). * If no GOAWAY has been received, or the GOAWAY payload is still in transmitting, * AWS_ERROR_HTTP_DATA_NOT_AVAILABLE will be raised. * * @param http2_connection HTTP/2 connection. * @param out_http2_error Gets set to HTTP/2 error code received in most recent GOAWAY. * @param out_last_stream_id Gets set to Last-Stream-ID received in most recent GOAWAY. */ AWS_HTTP_API int aws_http2_connection_get_received_goaway( struct aws_http_connection *http2_connection, uint32_t *out_http2_error, uint32_t *out_last_stream_id); /** * Increment the connection's flow-control window to keep data flowing (HTTP/2 only). * * If the connection was created with `conn_manual_window_management` set true, * the flow-control window of the connection will shrink as body data is received for all the streams created on it. * (headers, padding, and other metadata do not affect the window). * The initial connection flow-control window is 65,535. * Once the connection's flow-control window reaches to 0, all the streams on the connection stop receiving any further * data. * * If `conn_manual_window_management` is false, this call will have no effect. * The connection maintains its flow-control windows such that * no back-pressure is applied and data arrives as fast as possible. * * If you are not connected, this call will have no effect. * * Crashes when the connection is not http2 connection. * The limit of the Maximum Size is 2**31 - 1. If the increment size cause the connection flow window exceeds the * Maximum size, this call will result in the connection lost. * * @param http2_connection HTTP/2 connection. * @param increment_size The size to increment for the connection's flow control window */ AWS_HTTP_API void aws_http2_connection_update_window(struct aws_http_connection *http2_connection, uint32_t increment_size); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_HTTP_CONNECTION_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-http/include/aws/http/connection_manager.h000066400000000000000000000156131456575232400274130ustar00rootroot00000000000000#ifndef AWS_HTTP_CONNECTION_MANAGER_H #define AWS_HTTP_CONNECTION_MANAGER_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include AWS_PUSH_SANE_WARNING_LEVEL struct aws_client_bootstrap; struct aws_http_connection; struct aws_http_connection_manager; struct aws_socket_options; struct aws_tls_connection_options; struct proxy_env_var_settings; struct aws_http2_setting; typedef void(aws_http_connection_manager_on_connection_setup_fn)( struct aws_http_connection *connection, int error_code, void *user_data); typedef void(aws_http_connection_manager_shutdown_complete_fn)(void *user_data); /** * Metrics for logging and debugging purpose. */ struct aws_http_manager_metrics { /** * The number of additional concurrent requests that can be supported by the HTTP manager without needing to * establish additional connections to the target server. * * For connection manager, it equals to connections that's idle. * For stream manager, it equals to the number of streams that are possible to be made without creating new * connection, although the implementation can create new connection without fully filling it. */ size_t available_concurrency; /* The number of requests that are awaiting concurrency to be made available from the HTTP manager. */ size_t pending_concurrency_acquires; /* The number of connections (http/1.1) or streams (for h2 via. stream manager) currently vended to user. */ size_t leased_concurrency; }; /* * Connection manager configuration struct. * * Contains all of the configuration needed to create an http connection as well as * the maximum number of connections to ever have in existence. */ struct aws_http_connection_manager_options { /* * http connection configuration, check `struct aws_http_client_connection_options` for details of each config */ struct aws_client_bootstrap *bootstrap; size_t initial_window_size; const struct aws_socket_options *socket_options; /** * Options to create secure (HTTPS) connections. * For secure connections, set "h2" in the ALPN string for HTTP/2, otherwise HTTP/1.1 is used. * * Leave NULL to create cleartext (HTTP) connections. * For cleartext connections, use `http2_prior_knowledge` (RFC-7540 3.4) * to control whether that are treated as HTTP/1.1 or HTTP/2. */ const struct aws_tls_connection_options *tls_connection_options; /** * Specify whether you have prior knowledge that cleartext (HTTP) connections are HTTP/2 (RFC-7540 3.4). * If false, then cleartext connections are treated as HTTP/1.1. * It is illegal to set this true when secure connections are being used. * Note that upgrading from HTTP/1.1 to HTTP/2 is not supported (RFC-7540 3.2). */ bool http2_prior_knowledge; const struct aws_http_connection_monitoring_options *monitoring_options; struct aws_byte_cursor host; uint32_t port; /** * Optional. * HTTP/2 specific configuration. Check `struct aws_http2_connection_options` for details of each config */ const struct aws_http2_setting *initial_settings_array; size_t num_initial_settings; size_t max_closed_streams; bool http2_conn_manual_window_management; /* Proxy configuration for http connection */ const struct aws_http_proxy_options *proxy_options; /* * Optional. * Configuration for using proxy from environment variable. * Only works when proxy_options is not set. */ const struct proxy_env_var_settings *proxy_ev_settings; /* * Maximum number of connections this manager is allowed to contain */ size_t max_connections; /* * Callback and associated user data to invoke when the connection manager has * completely shutdown and has finished deleting itself. * Technically optional, but correctness may be impossible without it. */ void *shutdown_complete_user_data; aws_http_connection_manager_shutdown_complete_fn *shutdown_complete_callback; /** * If set to true, the read back pressure mechanism will be enabled. */ bool enable_read_back_pressure; /** * If set to a non-zero value, then connections that stay in the pool longer than the specified * timeout will be closed automatically. */ uint64_t max_connection_idle_in_milliseconds; }; AWS_EXTERN_C_BEGIN /* * Connection managers are ref counted. Adds one external ref to the manager. */ AWS_HTTP_API void aws_http_connection_manager_acquire(struct aws_http_connection_manager *manager); /* * Connection managers are ref counted. Removes one external ref from the manager. * * When the ref count goes to zero, the connection manager begins its shut down * process. All pending connection acquisitions are failed (with callbacks * invoked) and any (erroneous) subsequent attempts to acquire a connection * fail immediately. The connection manager destroys itself once all pending * asynchronous activities have resolved. */ AWS_HTTP_API void aws_http_connection_manager_release(struct aws_http_connection_manager *manager); /* * Creates a new connection manager with the supplied configuration options. * * The returned connection manager begins with a ref count of 1. */ AWS_HTTP_API struct aws_http_connection_manager *aws_http_connection_manager_new( struct aws_allocator *allocator, const struct aws_http_connection_manager_options *options); /* * Requests a connection from the manager. The requester is notified of * an acquired connection (or failure to acquire) via the supplied callback. * * For HTTP/2 connections, the callback will not fire until the server's settings have been received. * * Once a connection has been successfully acquired from the manager it * must be released back (via aws_http_connection_manager_release_connection) * at some point. Failure to do so will cause a resource leak. */ AWS_HTTP_API void aws_http_connection_manager_acquire_connection( struct aws_http_connection_manager *manager, aws_http_connection_manager_on_connection_setup_fn *callback, void *user_data); /* * Returns a connection back to the manager. All acquired connections must * eventually be released back to the manager in order to avoid a resource leak. * * Note: it can lead to another acquired callback to be invoked within the thread. */ AWS_HTTP_API int aws_http_connection_manager_release_connection( struct aws_http_connection_manager *manager, struct aws_http_connection *connection); /** * Fetch the current manager metrics from connection manager. */ AWS_HTTP_API void aws_http_connection_manager_fetch_metrics( const struct aws_http_connection_manager *manager, struct aws_http_manager_metrics *out_metrics); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_HTTP_CONNECTION_MANAGER_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-http/include/aws/http/exports.h000066400000000000000000000016301456575232400252600ustar00rootroot00000000000000#ifndef AWS_HTTP_EXPORTS_H #define AWS_HTTP_EXPORTS_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #if defined(USE_WINDOWS_DLL_SEMANTICS) || defined(WIN32) # ifdef AWS_HTTP_USE_IMPORT_EXPORT # ifdef AWS_HTTP_EXPORTS # define AWS_HTTP_API __declspec(dllexport) # else # define AWS_HTTP_API __declspec(dllimport) # endif /* AWS_HTTP_EXPORTS */ # else # define AWS_HTTP_API # endif /* USE_IMPORT_EXPORT */ #else # if ((__GNUC__ >= 4) || defined(__clang__)) && defined(AWS_HTTP_USE_IMPORT_EXPORT) && defined(AWS_HTTP_EXPORTS) # define AWS_HTTP_API __attribute__((visibility("default"))) # else # define AWS_HTTP_API # endif /* __GNUC__ >= 4 || defined(__clang__) */ #endif /* defined(USE_WINDOWS_DLL_SEMANTICS) || defined(WIN32) */ #endif /* AWS_HTTP_EXPORTS_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-http/include/aws/http/http.h000066400000000000000000000126711456575232400245420ustar00rootroot00000000000000#ifndef AWS_HTTP_H #define AWS_HTTP_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include AWS_PUSH_SANE_WARNING_LEVEL #define AWS_C_HTTP_PACKAGE_ID 2 enum aws_http_errors { AWS_ERROR_HTTP_UNKNOWN = AWS_ERROR_ENUM_BEGIN_RANGE(AWS_C_HTTP_PACKAGE_ID), AWS_ERROR_HTTP_HEADER_NOT_FOUND, AWS_ERROR_HTTP_INVALID_HEADER_FIELD, AWS_ERROR_HTTP_INVALID_HEADER_NAME, AWS_ERROR_HTTP_INVALID_HEADER_VALUE, AWS_ERROR_HTTP_INVALID_METHOD, AWS_ERROR_HTTP_INVALID_PATH, AWS_ERROR_HTTP_INVALID_STATUS_CODE, AWS_ERROR_HTTP_MISSING_BODY_STREAM, AWS_ERROR_HTTP_INVALID_BODY_STREAM, AWS_ERROR_HTTP_CONNECTION_CLOSED, AWS_ERROR_HTTP_SWITCHED_PROTOCOLS, AWS_ERROR_HTTP_UNSUPPORTED_PROTOCOL, AWS_ERROR_HTTP_REACTION_REQUIRED, AWS_ERROR_HTTP_DATA_NOT_AVAILABLE, AWS_ERROR_HTTP_OUTGOING_STREAM_LENGTH_INCORRECT, AWS_ERROR_HTTP_CALLBACK_FAILURE, AWS_ERROR_HTTP_WEBSOCKET_UPGRADE_FAILURE, AWS_ERROR_HTTP_WEBSOCKET_CLOSE_FRAME_SENT, AWS_ERROR_HTTP_WEBSOCKET_IS_MIDCHANNEL_HANDLER, AWS_ERROR_HTTP_CONNECTION_MANAGER_INVALID_STATE_FOR_ACQUIRE, AWS_ERROR_HTTP_CONNECTION_MANAGER_VENDED_CONNECTION_UNDERFLOW, AWS_ERROR_HTTP_SERVER_CLOSED, AWS_ERROR_HTTP_PROXY_CONNECT_FAILED, AWS_ERROR_HTTP_CONNECTION_MANAGER_SHUTTING_DOWN, AWS_ERROR_HTTP_CHANNEL_THROUGHPUT_FAILURE, AWS_ERROR_HTTP_PROTOCOL_ERROR, AWS_ERROR_HTTP_STREAM_IDS_EXHAUSTED, AWS_ERROR_HTTP_GOAWAY_RECEIVED, AWS_ERROR_HTTP_RST_STREAM_RECEIVED, AWS_ERROR_HTTP_RST_STREAM_SENT, AWS_ERROR_HTTP_STREAM_NOT_ACTIVATED, AWS_ERROR_HTTP_STREAM_HAS_COMPLETED, AWS_ERROR_HTTP_PROXY_STRATEGY_NTLM_CHALLENGE_TOKEN_MISSING, AWS_ERROR_HTTP_PROXY_STRATEGY_TOKEN_RETRIEVAL_FAILURE, AWS_ERROR_HTTP_PROXY_CONNECT_FAILED_RETRYABLE, AWS_ERROR_HTTP_PROTOCOL_SWITCH_FAILURE, AWS_ERROR_HTTP_MAX_CONCURRENT_STREAMS_EXCEEDED, AWS_ERROR_HTTP_STREAM_MANAGER_SHUTTING_DOWN, AWS_ERROR_HTTP_STREAM_MANAGER_CONNECTION_ACQUIRE_FAILURE, AWS_ERROR_HTTP_STREAM_MANAGER_UNEXPECTED_HTTP_VERSION, AWS_ERROR_HTTP_WEBSOCKET_PROTOCOL_ERROR, AWS_ERROR_HTTP_MANUAL_WRITE_NOT_ENABLED, AWS_ERROR_HTTP_MANUAL_WRITE_HAS_COMPLETED, AWS_ERROR_HTTP_RESPONSE_FIRST_BYTE_TIMEOUT, AWS_ERROR_HTTP_END_RANGE = AWS_ERROR_ENUM_END_RANGE(AWS_C_HTTP_PACKAGE_ID) }; /* Error codes that may be present in HTTP/2 RST_STREAM and GOAWAY frames (RFC-7540 7). */ enum aws_http2_error_code { AWS_HTTP2_ERR_NO_ERROR = 0x00, AWS_HTTP2_ERR_PROTOCOL_ERROR = 0x01, AWS_HTTP2_ERR_INTERNAL_ERROR = 0x02, AWS_HTTP2_ERR_FLOW_CONTROL_ERROR = 0x03, AWS_HTTP2_ERR_SETTINGS_TIMEOUT = 0x04, AWS_HTTP2_ERR_STREAM_CLOSED = 0x05, AWS_HTTP2_ERR_FRAME_SIZE_ERROR = 0x06, AWS_HTTP2_ERR_REFUSED_STREAM = 0x07, AWS_HTTP2_ERR_CANCEL = 0x08, AWS_HTTP2_ERR_COMPRESSION_ERROR = 0x09, AWS_HTTP2_ERR_CONNECT_ERROR = 0x0A, AWS_HTTP2_ERR_ENHANCE_YOUR_CALM = 0x0B, AWS_HTTP2_ERR_INADEQUATE_SECURITY = 0x0C, AWS_HTTP2_ERR_HTTP_1_1_REQUIRED = 0x0D, AWS_HTTP2_ERR_COUNT, }; enum aws_http_log_subject { AWS_LS_HTTP_GENERAL = AWS_LOG_SUBJECT_BEGIN_RANGE(AWS_C_HTTP_PACKAGE_ID), AWS_LS_HTTP_CONNECTION, AWS_LS_HTTP_ENCODER, AWS_LS_HTTP_DECODER, AWS_LS_HTTP_SERVER, AWS_LS_HTTP_STREAM, AWS_LS_HTTP_CONNECTION_MANAGER, AWS_LS_HTTP_STREAM_MANAGER, AWS_LS_HTTP_WEBSOCKET, AWS_LS_HTTP_WEBSOCKET_SETUP, AWS_LS_HTTP_PROXY_NEGOTIATION, }; enum aws_http_version { AWS_HTTP_VERSION_UNKNOWN, /* Invalid version. */ AWS_HTTP_VERSION_1_0, AWS_HTTP_VERSION_1_1, AWS_HTTP_VERSION_2, AWS_HTTP_VERSION_COUNT, }; AWS_EXTERN_C_BEGIN /** * Initializes internal datastructures used by aws-c-http. * Must be called before using any functionality in aws-c-http. */ AWS_HTTP_API void aws_http_library_init(struct aws_allocator *alloc); /** * Clean up internal datastructures used by aws-c-http. * Must not be called until application is done using functionality in aws-c-http. */ AWS_HTTP_API void aws_http_library_clean_up(void); /** * Returns the description of common status codes. * Ex: 404 -> "Not Found" * An empty string is returned if the status code is not recognized. */ AWS_HTTP_API const char *aws_http_status_text(int status_code); /** * Shortcuts for common HTTP request methods */ AWS_HTTP_API extern const struct aws_byte_cursor aws_http_method_get; AWS_HTTP_API extern const struct aws_byte_cursor aws_http_method_head; AWS_HTTP_API extern const struct aws_byte_cursor aws_http_method_post; AWS_HTTP_API extern const struct aws_byte_cursor aws_http_method_put; AWS_HTTP_API extern const struct aws_byte_cursor aws_http_method_delete; AWS_HTTP_API extern const struct aws_byte_cursor aws_http_method_connect; AWS_HTTP_API extern const struct aws_byte_cursor aws_http_method_options; AWS_HTTP_API extern const struct aws_byte_cursor aws_http_header_method; AWS_HTTP_API extern const struct aws_byte_cursor aws_http_header_scheme; AWS_HTTP_API extern const struct aws_byte_cursor aws_http_header_authority; AWS_HTTP_API extern const struct aws_byte_cursor aws_http_header_path; AWS_HTTP_API extern const struct aws_byte_cursor aws_http_header_status; AWS_HTTP_API extern const struct aws_byte_cursor aws_http_scheme_http; AWS_HTTP_API extern const struct aws_byte_cursor aws_http_scheme_https; AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_HTTP_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-http/include/aws/http/http2_stream_manager.h000066400000000000000000000176621456575232400276760ustar00rootroot00000000000000#ifndef AWS_HTTP2_STREAM_MANAGER_H #define AWS_HTTP2_STREAM_MANAGER_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include AWS_PUSH_SANE_WARNING_LEVEL struct aws_http2_stream_manager; struct aws_client_bootstrap; struct aws_http_connection; struct aws_http_connection_manager; struct aws_socket_options; struct aws_tls_connection_options; struct proxy_env_var_settings; struct aws_http2_setting; struct aws_http_make_request_options; struct aws_http_stream; struct aws_http_manager_metrics; /** * Always invoked asynchronously when the stream was created, successfully or not. * When stream is NULL, error code will be set to indicate what happened. * If there is a stream returned, you own the stream completely. * Invoked on the same thread as other callback of the stream, which will be the thread of the connection, ideally. * If there is no connection made, the callback will be invoked from a sperate thread. */ typedef void( aws_http2_stream_manager_on_stream_acquired_fn)(struct aws_http_stream *stream, int error_code, void *user_data); /** * Invoked asynchronously when the stream manager has been shutdown completely. * Never invoked when `aws_http2_stream_manager_new` failed. */ typedef void(aws_http2_stream_manager_shutdown_complete_fn)(void *user_data); /** * HTTP/2 stream manager configuration struct. * * Contains all of the configuration needed to create an http2 connection as well as * connection manager under the hood. */ struct aws_http2_stream_manager_options { /** * basic http connection configuration */ struct aws_client_bootstrap *bootstrap; const struct aws_socket_options *socket_options; /** * Options to create secure (HTTPS) connections. * For secure connections, the ALPN string must be "h2". * * To create cleartext (HTTP) connections, leave this NULL * and set `http2_prior_knowledge` (RFC-7540 3.4). */ const struct aws_tls_connection_options *tls_connection_options; /** * Specify whether you have prior knowledge that cleartext (HTTP) connections are HTTP/2 (RFC-7540 3.4). * It is illegal to set this true when secure connections are being used. * Note that upgrading from HTTP/1.1 to HTTP/2 is not supported (RFC-7540 3.2). */ bool http2_prior_knowledge; struct aws_byte_cursor host; uint32_t port; /** * Optional. * HTTP/2 connection configuration. Check `struct aws_http2_connection_options` for details of each config. * Notes for window control: * - By default, client will will maintain its flow-control windows such that no back-pressure is applied and data * arrives as fast as possible. * - For connection level window control, `conn_manual_window_management` will enable manual control. The * inital window size is not controllable. * - For stream level window control, `enable_read_back_pressure` will enable manual control. The initial window * size needs to be set through `initial_settings_array`. */ const struct aws_http2_setting *initial_settings_array; size_t num_initial_settings; size_t max_closed_streams; bool conn_manual_window_management; /** * HTTP/2 Stream window control. * If set to true, the read back pressure mechanism will be enabled for streams created. * The initial window size can be set by `AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE` via `initial_settings_array` */ bool enable_read_back_pressure; /* Connection monitor for the underlying connections made */ const struct aws_http_connection_monitoring_options *monitoring_options; /* Optional. Proxy configuration for underlying http connection */ const struct aws_http_proxy_options *proxy_options; const struct proxy_env_var_settings *proxy_ev_settings; /** * Required. * When the stream manager finishes deleting all the resources, the callback will be invoked. */ void *shutdown_complete_user_data; aws_http2_stream_manager_shutdown_complete_fn *shutdown_complete_callback; /** * Optional. * When set, connection will be closed if 5xx response received from server. */ bool close_connection_on_server_error; /** * Optional. * The period for all the connections held by stream manager to send a PING in milliseconds. * If you specify 0, manager will NOT send any PING. * Note: if set, it must be large than the time of ping timeout setting. */ size_t connection_ping_period_ms; /** * Optional. * Network connection will be closed if a ping response is not received * within this amount of time (milliseconds). * If you specify 0, a default value will be used. */ size_t connection_ping_timeout_ms; /* TODO: More flexible policy about the connections, but will always has these three values below. */ /** * Optional. * 0 will be considered as using a default value. * The ideal number of concurrent streams for a connection. Stream manager will try to create a new connection if * one connection reaches this number. But, if the max connections reaches, manager will reuse connections to create * the acquired steams as much as possible. */ size_t ideal_concurrent_streams_per_connection; /** * Optional. * Default is no limit, which will use the limit from the server. 0 will be considered as using the default value. * The real number of concurrent streams per connection will be controlled by the minmal value of the setting from * other end and the value here. */ size_t max_concurrent_streams_per_connection; /** * Required. * The max number of connections will be open at same time. If all the connections are full, manager will wait until * available to vender more streams */ size_t max_connections; }; struct aws_http2_stream_manager_acquire_stream_options { /** * Required. * Invoked when the stream finishes acquiring by stream manager. */ aws_http2_stream_manager_on_stream_acquired_fn *callback; /** * Optional. * User data for the callback. */ void *user_data; /* Required. see `aws_http_make_request_options` */ const struct aws_http_make_request_options *options; }; AWS_EXTERN_C_BEGIN /** * Acquire a refcount from the stream manager, stream manager will start to destroy after the refcount drops to zero. * NULL is acceptable. Initial refcount after new is 1. * * @param manager * @return The same pointer acquiring. */ AWS_HTTP_API struct aws_http2_stream_manager *aws_http2_stream_manager_acquire(struct aws_http2_stream_manager *manager); /** * Release a refcount from the stream manager, stream manager will start to destroy after the refcount drops to zero. * NULL is acceptable. Initial refcount after new is 1. * * @param manager * @return NULL */ AWS_HTTP_API struct aws_http2_stream_manager *aws_http2_stream_manager_release(struct aws_http2_stream_manager *manager); AWS_HTTP_API struct aws_http2_stream_manager *aws_http2_stream_manager_new( struct aws_allocator *allocator, const struct aws_http2_stream_manager_options *options); /** * Acquire a stream from stream manager asynchronously. * * @param http2_stream_manager * @param acquire_stream_option see `aws_http2_stream_manager_acquire_stream_options` */ AWS_HTTP_API void aws_http2_stream_manager_acquire_stream( struct aws_http2_stream_manager *http2_stream_manager, const struct aws_http2_stream_manager_acquire_stream_options *acquire_stream_option); /** * Fetch the current metrics from stream manager. * * @param http2_stream_manager * @param out_metrics The metrics to be fetched */ AWS_HTTP_API void aws_http2_stream_manager_fetch_metrics( const struct aws_http2_stream_manager *http2_stream_manager, struct aws_http_manager_metrics *out_metrics); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_HTTP2_STREAM_MANAGER_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-http/include/aws/http/private/000077500000000000000000000000001456575232400250555ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-http/include/aws/http/private/connection_impl.h000066400000000000000000000200521456575232400304050ustar00rootroot00000000000000#ifndef AWS_HTTP_CONNECTION_IMPL_H #define AWS_HTTP_CONNECTION_IMPL_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include struct aws_http_message; struct aws_http_make_request_options; struct aws_http_request_handler_options; struct aws_http_stream; /* vtable of functions that aws_http_connection uses to interact with external systems. * tests override the vtable to mock those systems */ struct aws_http_connection_system_vtable { int (*aws_client_bootstrap_new_socket_channel)(struct aws_socket_channel_bootstrap_options *options); }; struct aws_http_connection_vtable { struct aws_channel_handler_vtable channel_handler_vtable; /* This is a callback I wish was in aws_channel_handler_vtable. */ void (*on_channel_handler_installed)(struct aws_channel_handler *handler, struct aws_channel_slot *slot); struct aws_http_stream *(*make_request)( struct aws_http_connection *client_connection, const struct aws_http_make_request_options *options); struct aws_http_stream *(*new_server_request_handler_stream)( const struct aws_http_request_handler_options *options); int (*stream_send_response)(struct aws_http_stream *stream, struct aws_http_message *response); void (*close)(struct aws_http_connection *connection); void (*stop_new_requests)(struct aws_http_connection *connection); bool (*is_open)(const struct aws_http_connection *connection); bool (*new_requests_allowed)(const struct aws_http_connection *connection); /* HTTP/2 specific functions */ void (*update_window)(struct aws_http_connection *connection, uint32_t increment_size); int (*change_settings)( struct aws_http_connection *http2_connection, const struct aws_http2_setting *settings_array, size_t num_settings, aws_http2_on_change_settings_complete_fn *on_completed, void *user_data); int (*send_ping)( struct aws_http_connection *http2_connection, const struct aws_byte_cursor *optional_opaque_data, aws_http2_on_ping_complete_fn *on_completed, void *user_data); void (*send_goaway)( struct aws_http_connection *http2_connection, uint32_t http2_error, bool allow_more_streams, const struct aws_byte_cursor *optional_debug_data); int (*get_sent_goaway)( struct aws_http_connection *http2_connection, uint32_t *out_http2_error, uint32_t *out_last_stream_id); int (*get_received_goaway)( struct aws_http_connection *http2_connection, uint32_t *out_http2_error, uint32_t *out_last_stream_id); void (*get_local_settings)( const struct aws_http_connection *http2_connection, struct aws_http2_setting out_settings[AWS_HTTP2_SETTINGS_COUNT]); void (*get_remote_settings)( const struct aws_http_connection *http2_connection, struct aws_http2_setting out_settings[AWS_HTTP2_SETTINGS_COUNT]); }; typedef int(aws_http_proxy_request_transform_fn)(struct aws_http_message *request, void *user_data); /** * Base class for connections. * There are specific implementations for each HTTP version. */ struct aws_http_connection { const struct aws_http_connection_vtable *vtable; struct aws_channel_handler channel_handler; struct aws_channel_slot *channel_slot; struct aws_allocator *alloc; enum aws_http_version http_version; aws_http_proxy_request_transform_fn *proxy_request_transform; void *user_data; /* Connection starts with 1 hold for the user. * aws_http_streams will also acquire holds on their connection for the duration of their lifetime */ struct aws_atomic_var refcount; /* Starts at either 1 or 2, increments by two with each new stream */ uint32_t next_stream_id; union { struct aws_http_connection_client_data { uint64_t response_first_byte_timeout_ms; } client; struct aws_http_connection_server_data { aws_http_on_incoming_request_fn *on_incoming_request; aws_http_on_server_connection_shutdown_fn *on_shutdown; } server; } client_or_server_data; /* On client connections, `client_data` points to client_or_server_data.client and `server_data` is null. * Opposite is true on server connections */ struct aws_http_connection_client_data *client_data; struct aws_http_connection_server_data *server_data; bool stream_manual_window_management; }; /* Gets a client connection up and running. * Responsible for firing on_setup and on_shutdown callbacks. */ struct aws_http_client_bootstrap { struct aws_allocator *alloc; bool is_using_tls; bool stream_manual_window_management; bool prior_knowledge_http2; size_t initial_window_size; struct aws_http_connection_monitoring_options monitoring_options; void *user_data; aws_http_on_client_connection_setup_fn *on_setup; aws_http_on_client_connection_shutdown_fn *on_shutdown; aws_http_proxy_request_transform_fn *proxy_request_transform; uint64_t response_first_byte_timeout_ms; struct aws_http1_connection_options http1_options; struct aws_http2_connection_options http2_options; /* allocated with bootstrap */ struct aws_hash_table *alpn_string_map; /* allocated with bootstrap */ struct aws_http_connection *connection; }; AWS_EXTERN_C_BEGIN AWS_HTTP_API void aws_http_client_bootstrap_destroy(struct aws_http_client_bootstrap *bootstrap); AWS_HTTP_API void aws_http_connection_set_system_vtable(const struct aws_http_connection_system_vtable *system_vtable); AWS_HTTP_API int aws_http_client_connect_internal( const struct aws_http_client_connection_options *options, aws_http_proxy_request_transform_fn *proxy_request_transform); /** * Internal API for adding a reference to a connection */ AWS_HTTP_API void aws_http_connection_acquire(struct aws_http_connection *connection); /** * Allow tests to fake stats data */ AWS_HTTP_API struct aws_crt_statistics_http1_channel *aws_h1_connection_get_statistics(struct aws_http_connection *connection); /** * Gets the next available stream id within the connection. Valid for creating both h1 and h2 streams. * * This function is not thread-safe. * * Returns 0 if there was an error. */ AWS_HTTP_API uint32_t aws_http_connection_get_next_stream_id(struct aws_http_connection *connection); /** * Layers an http channel handler/connection onto a channel. Moved from internal to private so that the proxy * logic could apply a new http connection/handler after tunneling proxy negotiation (into http) is finished. * This is a synchronous operation. * * @param alloc memory allocator to use * @param channel channel to apply the http handler/connection to * @param is_server should the handler behave like an http server * @param is_using_tls is tls is being used (do an alpn check of the to-the-left channel handler) * @param manual_window_management is manual window management enabled * @param prior_knowledge_http2 prior knowledge about http2 connection to be used * @param initial_window_size what should the initial window size be * @param alpn_string_map the customized ALPN string map from `struct aws_string *` to `enum aws_http_version`. * @param http1_options http1 options * @param http2_options http2 options * @return a new http connection or NULL on failure */ AWS_HTTP_API struct aws_http_connection *aws_http_connection_new_channel_handler( struct aws_allocator *alloc, struct aws_channel *channel, bool is_server, bool is_using_tls, bool manual_window_management, bool prior_knowledge_http2, size_t initial_window_size, const struct aws_hash_table *alpn_string_map, const struct aws_http1_connection_options *http1_options, const struct aws_http2_connection_options *http2_options, void *connection_user_data); AWS_EXTERN_C_END #endif /* AWS_HTTP_CONNECTION_IMPL_H */ connection_manager_system_vtable.h000066400000000000000000000034031456575232400337410ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-http/include/aws/http/private#ifndef AWS_HTTP_CONNECTION_MANAGER_SYSTEM_VTABLE_H #define AWS_HTTP_CONNECTION_MANAGER_SYSTEM_VTABLE_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include struct aws_http_connection_manager; /* vtable of functions that aws_http_connection_manager uses to interact with external systems. * tests override the vtable to mock those systems */ struct aws_http_connection_manager_system_vtable { /* * Downstream http functions */ int (*aws_http_client_connect)(const struct aws_http_client_connection_options *options); void (*aws_http_connection_close)(struct aws_http_connection *connection); void (*aws_http_connection_release)(struct aws_http_connection *connection); bool (*aws_http_connection_new_requests_allowed)(const struct aws_http_connection *connection); int (*aws_high_res_clock_get_ticks)(uint64_t *timestamp); bool (*aws_channel_thread_is_callers_thread)(struct aws_channel *channel); struct aws_channel *(*aws_http_connection_get_channel)(struct aws_http_connection *connection); enum aws_http_version (*aws_http_connection_get_version)(const struct aws_http_connection *connection); }; AWS_HTTP_API bool aws_http_connection_manager_system_vtable_is_valid(const struct aws_http_connection_manager_system_vtable *table); AWS_HTTP_API void aws_http_connection_manager_set_system_vtable( struct aws_http_connection_manager *manager, const struct aws_http_connection_manager_system_vtable *system_vtable); AWS_HTTP_API extern const struct aws_http_connection_manager_system_vtable *g_aws_http_connection_manager_default_system_vtable_ptr; #endif /* AWS_HTTP_CONNECTION_MANAGER_SYSTEM_VTABLE_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-http/include/aws/http/private/connection_monitor.h000066400000000000000000000024161456575232400311370ustar00rootroot00000000000000#ifndef AWS_HTTP_HTTP_MONITOR_H #define AWS_HTTP_HTTP_MONITOR_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include struct aws_allocator; struct aws_crt_statistics_handler; /* * Needed by tests */ struct aws_statistics_handler_http_connection_monitor_impl { struct aws_http_connection_monitoring_options options; uint64_t throughput_failure_time_ms; uint32_t last_incoming_stream_id; uint32_t last_outgoing_stream_id; uint64_t last_measured_throughput; }; AWS_EXTERN_C_BEGIN /** * Creates a new http connection monitor that regularly checks the connection's throughput and shuts the connection * down if the a minimum threshold is not met for a configurable number of seconds. */ AWS_HTTP_API struct aws_crt_statistics_handler *aws_crt_statistics_handler_new_http_connection_monitor( struct aws_allocator *allocator, struct aws_http_connection_monitoring_options *options); /** * Validates monitoring options to ensure they are sensible */ AWS_HTTP_API bool aws_http_connection_monitoring_options_is_valid(const struct aws_http_connection_monitoring_options *options); AWS_EXTERN_C_END #endif /* AWS_HTTP_HTTP_MONITOR_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-http/include/aws/http/private/h1_connection.h000066400000000000000000000175331456575232400277660ustar00rootroot00000000000000#ifndef AWS_HTTP_H1_CONNECTION_H #define AWS_HTTP_H1_CONNECTION_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #ifdef _MSC_VER # pragma warning(disable : 4214) /* nonstandard extension used: bit field types other than int */ #endif struct aws_h1_connection { struct aws_http_connection base; size_t initial_stream_window_size; /* Task responsible for sending data. * As long as there is data available to send, the task will be "active" and repeatedly: * 1) Encode outgoing stream data to an aws_io_message and send it up the channel. * 2) Wait until the aws_io_message's write_complete callback fires. * 3) Reschedule the task to run again. * * `thread_data.is_outgoing_stream_task_active` tells whether the task is "active". * * If there is no data available to write (waiting for user to add more streams or chunks), * then the task stops being active. The task is made active again when the user * adds more outgoing data. */ struct aws_channel_task outgoing_stream_task; /* Task that removes items from `synced_data` and does their on-thread work. * Runs once and wait until it's scheduled again. * Any function that wants to schedule this task MUST: * - acquire the synced_data.lock * - check whether `synced_data.is_cross_thread_work_scheduled` was true or false. * - set `synced_data.is_cross_thread_work_scheduled = true` * - release synced_data.lock * - ONLY IF `synced_data.is_cross_thread_work_scheduled` CHANGED from false to true: * - then schedule the task */ struct aws_channel_task cross_thread_work_task; /* Only the event-loop thread may touch this data */ struct { /* List of streams being worked on. */ struct aws_linked_list stream_list; /* Points to the stream whose data is currently being sent. * This stream is ALWAYS in the `stream_list`. * HTTP pipelining is supported, so once the stream is completely written * we'll start working on the next stream in the list */ struct aws_h1_stream *outgoing_stream; /* Points to the stream being decoded. * This stream is ALWAYS in the `stream_list`. */ struct aws_h1_stream *incoming_stream; struct aws_h1_decoder *incoming_stream_decoder; /* Used to encode requests and responses */ struct aws_h1_encoder encoder; /** * All aws_io_messages arriving in the read direction are queued here before processing. * This allows the connection to receive more data than the the current HTTP-stream might allow, * and process the data later when HTTP-stream's window opens or the next stream begins. * * The `aws_io_message.copy_mark` is used to track progress on partially processed messages. * `pending_bytes` is the sum of all unprocessed bytes across all queued messages. * `capacity` is the limit for how many unprocessed bytes we'd like in the queue. */ struct { struct aws_linked_list messages; size_t pending_bytes; size_t capacity; } read_buffer; /** * The connection's current window size. * We use this variable, instead of the existing `aws_channel_slot.window_size`, * because that variable is not updated immediately, the channel uses a task to update it. * Since we use the difference between current and desired window size when deciding * how much to increment, we need the most up-to-date values possible. */ size_t connection_window; /* Only used by tests. Sum of window_increments issued by this slot. Resets each time it's queried */ size_t recent_window_increments; struct aws_crt_statistics_http1_channel stats; uint64_t outgoing_stream_timestamp_ns; uint64_t incoming_stream_timestamp_ns; /* True when read and/or writing has stopped, whether due to errors or normal channel shutdown. */ bool is_reading_stopped : 1; bool is_writing_stopped : 1; /* If true, the connection has upgraded to another protocol. * It will pass data to adjacent channel handlers without altering it. * The connection can no longer service request/response streams. */ bool has_switched_protocols : 1; /* Server-only. Request-handler streams can only be created while this is true. */ bool can_create_request_handler_stream : 1; /* see `outgoing_stream_task` */ bool is_outgoing_stream_task_active : 1; bool is_processing_read_messages : 1; } thread_data; /* Any thread may touch this data, but the lock must be held */ struct { struct aws_mutex lock; /* New client streams that have not been moved to `stream_list` yet. * This list is not used on servers. */ struct aws_linked_list new_client_stream_list; /* If non-zero, then window_update_task is scheduled */ size_t window_update_size; /* If non-zero, reason to immediately reject new streams. (ex: closing) */ int new_stream_error_code; /* See `cross_thread_work_task` */ bool is_cross_thread_work_task_scheduled : 1; /* For checking status from outside the event-loop thread. */ bool is_open : 1; } synced_data; }; /* Allow tests to check current window stats */ struct aws_h1_window_stats { size_t connection_window; size_t recent_window_increments; /* Resets to 0 each time window stats are queried*/ size_t buffer_capacity; size_t buffer_pending_bytes; uint64_t stream_window; bool has_incoming_stream; }; AWS_EXTERN_C_BEGIN /* The functions below are exported so they can be accessed from tests. */ AWS_HTTP_API struct aws_http_connection *aws_http_connection_new_http1_1_server( struct aws_allocator *allocator, bool manual_window_management, size_t initial_window_size, const struct aws_http1_connection_options *http1_options); AWS_HTTP_API struct aws_http_connection *aws_http_connection_new_http1_1_client( struct aws_allocator *allocator, bool manual_window_management, size_t initial_window_size, const struct aws_http1_connection_options *http1_options); /* Allow tests to check current window stats */ AWS_HTTP_API struct aws_h1_window_stats aws_h1_connection_window_stats(struct aws_http_connection *connection_base); AWS_EXTERN_C_END /* DO NOT export functions below. They're only used by other .c files in this library */ /* TODO: introduce naming conventions for private header functions */ void aws_h1_connection_lock_synced_data(struct aws_h1_connection *connection); void aws_h1_connection_unlock_synced_data(struct aws_h1_connection *connection); /** * Try to kick off the outgoing-stream-task. * If task is already active, nothing happens. * If there's nothing to do, the task will immediately stop itself. * Call this whenever the user provides new outgoing data (ex: new stream, new chunk). * MUST be called from the connection's event-loop thread. */ void aws_h1_connection_try_write_outgoing_stream(struct aws_h1_connection *connection); /** * If any read messages are queued, and the downstream window is non-zero, * process data and send it downstream. Then calculate the connection's * desired window size and increment it if necessary. * * During normal operations "downstream" means the current incoming stream. * If the connection has switched protocols "downstream" means the next * channel handler in the read direction. */ void aws_h1_connection_try_process_read_messages(struct aws_h1_connection *connection); #endif /* AWS_HTTP_H1_CONNECTION_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-http/include/aws/http/private/h1_decoder.h000066400000000000000000000065461456575232400272360ustar00rootroot00000000000000#ifndef AWS_HTTP_H1_DECODER_H #define AWS_HTTP_H1_DECODER_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include struct aws_h1_decoded_header { /* Name of the header. If the type is `AWS_HTTP_HEADER_NAME_UNKNOWN` then `name_data` must be parsed manually. */ enum aws_http_header_name name; /* Raw buffer storing the header's name. */ struct aws_byte_cursor name_data; /* Raw buffer storing the header's value. */ struct aws_byte_cursor value_data; /* Raw buffer storing the entire header. */ struct aws_byte_cursor data; }; struct aws_h1_decoder_vtable { /** * Called from `aws_h*_decode` when an http header has been received. * All pointers are strictly *read only*; any data that needs to persist must be copied out into user-owned memory. */ int (*on_header)(const struct aws_h1_decoded_header *header, void *user_data); /** * Called from `aws_h1_decode` when a portion of the http body has been received. * `finished` is true if this is the last section of the http body, and false if more body data is yet to be * received. All pointers are strictly *read only*; any data that needs to persist must be copied out into * user-owned memory. */ int (*on_body)(const struct aws_byte_cursor *data, bool finished, void *user_data); /* Only needed for requests, can be NULL for responses. */ int (*on_request)( enum aws_http_method method_enum, const struct aws_byte_cursor *method_str, const struct aws_byte_cursor *uri, void *user_data); /* Only needed for responses, can be NULL for requests. */ int (*on_response)(int status_code, void *user_data); int (*on_done)(void *user_data); }; /** * Structure used to initialize an `aws_h1_decoder`. */ struct aws_h1_decoder_params { struct aws_allocator *alloc; size_t scratch_space_initial_size; /* Set false if decoding responses */ bool is_decoding_requests; void *user_data; struct aws_h1_decoder_vtable vtable; }; struct aws_h1_decoder; AWS_EXTERN_C_BEGIN AWS_HTTP_API struct aws_h1_decoder *aws_h1_decoder_new(struct aws_h1_decoder_params *params); AWS_HTTP_API void aws_h1_decoder_destroy(struct aws_h1_decoder *decoder); AWS_HTTP_API int aws_h1_decode(struct aws_h1_decoder *decoder, struct aws_byte_cursor *data); AWS_HTTP_API void aws_h1_decoder_set_logging_id(struct aws_h1_decoder *decoder, const void *id); AWS_HTTP_API void aws_h1_decoder_set_body_headers_ignored(struct aws_h1_decoder *decoder, bool body_headers_ignored); /* RFC-7230 section 4.2 Message Format */ #define AWS_HTTP_TRANSFER_ENCODING_CHUNKED (1 << 0) #define AWS_HTTP_TRANSFER_ENCODING_GZIP (1 << 1) #define AWS_HTTP_TRANSFER_ENCODING_DEFLATE (1 << 2) #define AWS_HTTP_TRANSFER_ENCODING_DEPRECATED_COMPRESS (1 << 3) AWS_HTTP_API int aws_h1_decoder_get_encoding_flags(const struct aws_h1_decoder *decoder); AWS_HTTP_API uint64_t aws_h1_decoder_get_content_length(const struct aws_h1_decoder *decoder); AWS_HTTP_API bool aws_h1_decoder_get_body_headers_ignored(const struct aws_h1_decoder *decoder); AWS_HTTP_API enum aws_http_header_block aws_h1_decoder_get_header_block(const struct aws_h1_decoder *decoder); AWS_EXTERN_C_END #endif /* AWS_HTTP_H1_DECODER_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-http/include/aws/http/private/h1_encoder.h000066400000000000000000000114031456575232400272340ustar00rootroot00000000000000#ifndef AWS_HTTP_H1_ENCODER_H #define AWS_HTTP_H1_ENCODER_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include struct aws_h1_chunk { struct aws_allocator *allocator; struct aws_input_stream *data; uint64_t data_size; aws_http1_stream_write_chunk_complete_fn *on_complete; void *user_data; struct aws_linked_list_node node; /* Buffer containing pre-encoded start line: chunk-size [chunk-ext] CRLF */ struct aws_byte_buf chunk_line; }; struct aws_h1_trailer { struct aws_allocator *allocator; struct aws_byte_buf trailer_data; }; /** * Message to be submitted to encoder. * Contains data necessary for encoder to write an outgoing request or response. */ struct aws_h1_encoder_message { /* Upon creation, the "head" (everything preceding body) is buffered here. */ struct aws_byte_buf outgoing_head_buf; /* Single stream used for unchunked body */ struct aws_input_stream *body; /* Pointer to list of `struct aws_h1_chunk`, used for chunked encoding. * List is owned by aws_h1_stream. * Encoder completes/frees/pops front chunk when it's done sending. * If list goes empty, encoder waits for more chunks to arrive. * A chunk with data_size=0 means "final chunk" */ struct aws_linked_list *pending_chunk_list; /* Pointer to chunked_trailer, used for chunked_trailer. */ struct aws_h1_trailer *trailer; /* If non-zero, length of unchunked body to send */ uint64_t content_length; bool has_connection_close_header; bool has_chunked_encoding_header; }; enum aws_h1_encoder_state { AWS_H1_ENCODER_STATE_INIT, AWS_H1_ENCODER_STATE_HEAD, AWS_H1_ENCODER_STATE_UNCHUNKED_BODY, AWS_H1_ENCODER_STATE_CHUNK_NEXT, AWS_H1_ENCODER_STATE_CHUNK_LINE, AWS_H1_ENCODER_STATE_CHUNK_BODY, AWS_H1_ENCODER_STATE_CHUNK_END, AWS_H1_ENCODER_STATE_CHUNK_TRAILER, AWS_H1_ENCODER_STATE_DONE, }; struct aws_h1_encoder { struct aws_allocator *allocator; enum aws_h1_encoder_state state; /* Current message being encoded */ struct aws_h1_encoder_message *message; /* Used by some states to track progress. Reset to 0 whenever state changes */ uint64_t progress_bytes; /* Current chunk */ struct aws_h1_chunk *current_chunk; /* Number of chunks sent, just used for logging */ size_t chunk_count; /* Encoder logs with this stream ptr as the ID, and passes this ptr to the chunk_complete callback */ struct aws_http_stream *current_stream; }; struct aws_h1_chunk *aws_h1_chunk_new(struct aws_allocator *allocator, const struct aws_http1_chunk_options *options); struct aws_h1_trailer *aws_h1_trailer_new( struct aws_allocator *allocator, const struct aws_http_headers *trailing_headers); void aws_h1_trailer_destroy(struct aws_h1_trailer *trailer); /* Just destroy the chunk (don't fire callback) */ void aws_h1_chunk_destroy(struct aws_h1_chunk *chunk); /* Destroy chunk and fire its completion callback */ void aws_h1_chunk_complete_and_destroy(struct aws_h1_chunk *chunk, struct aws_http_stream *http_stream, int error_code); int aws_chunk_line_from_options(struct aws_http1_chunk_options *options, struct aws_byte_buf *chunk_line); AWS_EXTERN_C_BEGIN /* Validate request and cache any info the encoder will need later in the "encoder message". */ AWS_HTTP_API int aws_h1_encoder_message_init_from_request( struct aws_h1_encoder_message *message, struct aws_allocator *allocator, const struct aws_http_message *request, struct aws_linked_list *pending_chunk_list); int aws_h1_encoder_message_init_from_response( struct aws_h1_encoder_message *message, struct aws_allocator *allocator, const struct aws_http_message *response, bool body_headers_ignored, struct aws_linked_list *pending_chunk_list); AWS_HTTP_API void aws_h1_encoder_message_clean_up(struct aws_h1_encoder_message *message); AWS_HTTP_API void aws_h1_encoder_init(struct aws_h1_encoder *encoder, struct aws_allocator *allocator); AWS_HTTP_API void aws_h1_encoder_clean_up(struct aws_h1_encoder *encoder); AWS_HTTP_API int aws_h1_encoder_start_message( struct aws_h1_encoder *encoder, struct aws_h1_encoder_message *message, struct aws_http_stream *stream); AWS_HTTP_API int aws_h1_encoder_process(struct aws_h1_encoder *encoder, struct aws_byte_buf *out_buf); AWS_HTTP_API bool aws_h1_encoder_is_message_in_progress(const struct aws_h1_encoder *encoder); /* Return true if the encoder is stuck waiting for more chunks to be added to the current message */ AWS_HTTP_API bool aws_h1_encoder_is_waiting_for_chunks(const struct aws_h1_encoder *encoder); AWS_EXTERN_C_END #endif /* AWS_HTTP_H1_ENCODER_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-http/include/aws/http/private/h1_stream.h000066400000000000000000000112441456575232400271130ustar00rootroot00000000000000#ifndef AWS_HTTP_H1_STREAM_H #define AWS_HTTP_H1_STREAM_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #ifdef _MSC_VER # pragma warning(disable : 4214) /* nonstandard extension used: bit field types other than int */ #endif /* Simple view of stream's state. * Used to determine whether it's safe for a user to call functions that alter state. */ enum aws_h1_stream_api_state { AWS_H1_STREAM_API_STATE_INIT, AWS_H1_STREAM_API_STATE_ACTIVE, AWS_H1_STREAM_API_STATE_COMPLETE, }; struct aws_h1_stream { struct aws_http_stream base; struct aws_linked_list_node node; /* Task that removes items from `synced_data` and does their on-thread work. * Runs once and wait until it's scheduled again. * Any function that wants to schedule this task MUST: * - acquire the synced_data.lock * - check whether `synced_data.is_cross_thread_work_scheduled` was true or false. * - set `synced_data.is_cross_thread_work_scheduled = true` * - release synced_data.lock * - ONLY IF `synced_data.is_cross_thread_work_scheduled` CHANGED from false to true: * - increment the stream's refcount, to keep stream alive until task runs * - schedule the task */ struct aws_channel_task cross_thread_work_task; /* Message (derived from outgoing request or response) to be submitted to encoder */ struct aws_h1_encoder_message encoder_message; bool is_outgoing_message_done; bool is_incoming_message_done; bool is_incoming_head_done; /* If true, this is the last stream the connection should process. * See RFC-7230 Section 6: Connection Management. */ bool is_final_stream; /* Buffer for incoming data that needs to stick around. */ struct aws_byte_buf incoming_storage_buf; struct { /* TODO: move most other members in here */ /* List of `struct aws_h1_chunk`, used for chunked encoding. * Encoder completes/frees/pops front chunk when it's done sending. */ struct aws_linked_list pending_chunk_list; struct aws_h1_encoder_message message; /* Size of stream's flow-control window. * Only body data (not headers, etc) counts against the stream's flow-control window. */ uint64_t stream_window; /* Whether a "request handler" stream has a response to send. * Has mirror variable in synced_data */ bool has_outgoing_response : 1; } thread_data; /* Any thread may touch this data, but the connection's lock must be held. * Sharing a lock is fine because it's rare for an HTTP/1 connection * to have more than one stream at a time. */ struct { /* List of `struct aws_h1_chunk` which have been submitted by user, * but haven't yet moved to encoder_message.pending_chunk_list where the encoder will find them. */ struct aws_linked_list pending_chunk_list; /* trailing headers which have been submitted by user, * but haven't yet moved to encoder_message where the encoder will find them. */ struct aws_h1_trailer *pending_trailer; enum aws_h1_stream_api_state api_state; /* Sum of all aws_http_stream_update_window() calls that haven't yet moved to thread_data.stream_window */ uint64_t pending_window_update; /* See `cross_thread_work_task` */ bool is_cross_thread_work_task_scheduled : 1; /* Whether a "request handler" stream has a response to send. * Has mirror variable in thread_data */ bool has_outgoing_response : 1; /* Whether the outgoing message is using chunked encoding */ bool using_chunked_encoding : 1; /* Whether the final 0 length chunk has already been sent */ bool has_final_chunk : 1; /* Whether the chunked trailer has already been sent */ bool has_added_trailer : 1; } synced_data; }; /* DO NOT export functions below. They're only used by other .c files in this library */ struct aws_h1_stream *aws_h1_stream_new_request( struct aws_http_connection *client_connection, const struct aws_http_make_request_options *options); struct aws_h1_stream *aws_h1_stream_new_request_handler(const struct aws_http_request_handler_options *options); int aws_h1_stream_activate(struct aws_http_stream *stream); void aws_h1_stream_cancel(struct aws_http_stream *stream, int error_code); int aws_h1_stream_send_response(struct aws_h1_stream *stream, struct aws_http_message *response); #endif /* AWS_HTTP_H1_STREAM_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-http/include/aws/http/private/h2_connection.h000066400000000000000000000264621456575232400277700ustar00rootroot00000000000000#ifndef AWS_HTTP_H2_CONNECTION_H #define AWS_HTTP_H2_CONNECTION_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include struct aws_h2_decoder; struct aws_h2_stream; struct aws_h2_connection { struct aws_http_connection base; aws_http2_on_goaway_received_fn *on_goaway_received; aws_http2_on_remote_settings_change_fn *on_remote_settings_change; struct aws_channel_task cross_thread_work_task; struct aws_channel_task outgoing_frames_task; bool conn_manual_window_management; /* Only the event-loop thread may touch this data */ struct { struct aws_h2_decoder *decoder; struct aws_h2_frame_encoder encoder; /* True when reading/writing has stopped, whether due to errors or normal channel shutdown. */ bool is_reading_stopped; bool is_writing_stopped; bool is_outgoing_frames_task_active; /* Settings received from peer, which restricts the message to send */ uint32_t settings_peer[AWS_HTTP2_SETTINGS_END_RANGE]; /* Local settings to send/sent to peer, which affects the decoding */ uint32_t settings_self[AWS_HTTP2_SETTINGS_END_RANGE]; /* List using aws_h2_pending_settings.node * Contains settings waiting to be ACKed by peer and applied */ struct aws_linked_list pending_settings_queue; /* List using aws_h2_pending_ping.node * Pings waiting to be ACKed by peer */ struct aws_linked_list pending_ping_queue; /* Most recent stream-id that was initiated by peer */ uint32_t latest_peer_initiated_stream_id; /* Maps stream-id to aws_h2_stream*. * Contains all streams in the open, reserved, and half-closed states (terms from RFC-7540 5.1). * Once a stream enters closed state, it is removed from this map. */ struct aws_hash_table active_streams_map; /* List using aws_h2_stream.node. * Contains all streams with DATA frames to send. * Any stream in this list is also in the active_streams_map. */ struct aws_linked_list outgoing_streams_list; /* List using aws_h2_stream.node. * Contains all streams with DATA frames to send, and cannot send now due to flow control. * Waiting for WINDOW_UPDATE to set them free */ struct aws_linked_list stalled_window_streams_list; /* List using aws_h2_stream.node. * Contains all streams that are open, but are only sending data when notified, rather than polling * for it (e.g. event streams) * Streams are moved to the outgoing_streams_list until they send pending data, then are moved back * to this list to sleep until more data comes in */ struct aws_linked_list waiting_streams_list; /* List using aws_h2_frame.node. * Queues all frames (except DATA frames) for connection to send. * When queue is empty, then we send DATA frames from the outgoing_streams_list */ struct aws_linked_list outgoing_frames_queue; /* FIFO cache for closed stream, key: stream-id, value: aws_h2_stream_closed_when. * Contains data about streams that were recently closed. * The oldest entry will be removed if the cache is full */ struct aws_cache *closed_streams; /* Flow-control of connection from peer. Indicating the buffer capacity of our peer. * Reduce the space after sending a flow-controlled frame. Increment after receiving WINDOW_UPDATE for * connection */ size_t window_size_peer; /* Flow-control of connection for this side. * Reduce the space after receiving a flow-controlled frame. Increment after sending WINDOW_UPDATE for * connection */ size_t window_size_self; /* Highest self-initiated stream-id that peer might have processed. * Defaults to max stream-id, may be lowered when GOAWAY frame received. */ uint32_t goaway_received_last_stream_id; /* Last-stream-id sent in most recent GOAWAY frame. Defaults to max stream-id. */ uint32_t goaway_sent_last_stream_id; /* Frame we are encoding now. NULL if we are not encoding anything. */ struct aws_h2_frame *current_outgoing_frame; /* Pointer to initial pending settings. If ACKed by peer, it will be NULL. */ struct aws_h2_pending_settings *init_pending_settings; /* Cached channel shutdown values. * If possible, we delay shutdown-in-the-write-dir until GOAWAY is written. */ int channel_shutdown_error_code; bool channel_shutdown_immediately; bool channel_shutdown_waiting_for_goaway_to_be_written; /* TODO: Consider adding stream monitor */ struct aws_crt_statistics_http2_channel stats; /* Timestamp when connection has data to send, which is when there is an active stream with body to send */ uint64_t outgoing_timestamp_ns; /* Timestamp when connection has data to receive, which is when there is an active stream */ uint64_t incoming_timestamp_ns; } thread_data; /* Any thread may touch this data, but the lock must be held (unless it's an atomic) */ struct { struct aws_mutex lock; /* New `aws_h2_stream *` that haven't moved to `thread_data` yet */ struct aws_linked_list pending_stream_list; /* New `aws_h2_frames *`, connection control frames created by user that haven't moved to `thread_data` yet */ struct aws_linked_list pending_frame_list; /* New `aws_h2_pending_settings *` created by user that haven't moved to `thread_data` yet */ struct aws_linked_list pending_settings_list; /* New `aws_h2_pending_ping *` created by user that haven't moved to `thread_data` yet */ struct aws_linked_list pending_ping_list; /* New `aws_h2_pending_goaway *` created by user that haven't sent yet */ struct aws_linked_list pending_goaway_list; bool is_cross_thread_work_task_scheduled; /* The window_update value for `thread_data.window_size_self` that haven't applied yet */ size_t window_update_size; /* For checking status from outside the event-loop thread. */ bool is_open; /* If non-zero, reason to immediately reject new streams. (ex: closing) */ int new_stream_error_code; /* Last-stream-id sent in most recent GOAWAY frame. Defaults to AWS_H2_STREAM_ID_MAX + 1 indicates no GOAWAY has * been sent so far.*/ uint32_t goaway_sent_last_stream_id; /* aws_http2_error_code sent in most recent GOAWAY frame. Defaults to 0, check goaway_sent_last_stream_id for * any GOAWAY has sent or not */ uint32_t goaway_sent_http2_error_code; /* Last-stream-id received in most recent GOAWAY frame. Defaults to AWS_H2_STREAM_ID_MAX + 1 indicates no GOAWAY * has been received so far.*/ uint32_t goaway_received_last_stream_id; /* aws_http2_error_code received in most recent GOAWAY frame. Defaults to 0, check * goaway_received_last_stream_id for any GOAWAY has received or not */ uint32_t goaway_received_http2_error_code; /* For checking settings received from peer from outside the event-loop thread. */ uint32_t settings_peer[AWS_HTTP2_SETTINGS_END_RANGE]; /* For checking local settings to send/sent to peer from outside the event-loop thread. */ uint32_t settings_self[AWS_HTTP2_SETTINGS_END_RANGE]; } synced_data; }; struct aws_h2_pending_settings { struct aws_http2_setting *settings_array; size_t num_settings; struct aws_linked_list_node node; /* user callback */ void *user_data; aws_http2_on_change_settings_complete_fn *on_completed; }; struct aws_h2_pending_ping { uint8_t opaque_data[AWS_HTTP2_PING_DATA_SIZE]; /* For calculating round-trip time */ uint64_t started_time; struct aws_linked_list_node node; /* user callback */ void *user_data; aws_http2_on_ping_complete_fn *on_completed; }; struct aws_h2_pending_goaway { bool allow_more_streams; uint32_t http2_error; struct aws_byte_cursor debug_data; struct aws_linked_list_node node; }; /** * The action which caused the stream to close. */ enum aws_h2_stream_closed_when { AWS_H2_STREAM_CLOSED_UNKNOWN, AWS_H2_STREAM_CLOSED_WHEN_BOTH_SIDES_END_STREAM, AWS_H2_STREAM_CLOSED_WHEN_RST_STREAM_RECEIVED, AWS_H2_STREAM_CLOSED_WHEN_RST_STREAM_SENT, }; enum aws_h2_data_encode_status { AWS_H2_DATA_ENCODE_COMPLETE, AWS_H2_DATA_ENCODE_ONGOING, AWS_H2_DATA_ENCODE_ONGOING_BODY_STREAM_STALLED, /* stalled reading from body stream */ AWS_H2_DATA_ENCODE_ONGOING_WAITING_FOR_WRITES, /* waiting for next manual write */ AWS_H2_DATA_ENCODE_ONGOING_WINDOW_STALLED, /* stalled due to reduced window size */ }; /* When window size is too small to fit the possible padding into it, we stop sending data and wait for WINDOW_UPDATE */ #define AWS_H2_MIN_WINDOW_SIZE (256) /* Private functions called from tests... */ AWS_EXTERN_C_BEGIN AWS_HTTP_API struct aws_http_connection *aws_http_connection_new_http2_server( struct aws_allocator *allocator, bool manual_window_management, const struct aws_http2_connection_options *http2_options); AWS_HTTP_API struct aws_http_connection *aws_http_connection_new_http2_client( struct aws_allocator *allocator, bool manual_window_management, const struct aws_http2_connection_options *http2_options); AWS_EXTERN_C_END /* Private functions called from multiple .c files... */ /** * Enqueue outgoing frame. * Connection takes ownership of frame. * Frames are sent into FIFO order. * Do not enqueue DATA frames, these are sent by other means when the frame queue is empty. */ void aws_h2_connection_enqueue_outgoing_frame(struct aws_h2_connection *connection, struct aws_h2_frame *frame); /** * Invoked immediately after a stream enters the CLOSED state. * The connection will remove the stream from its "active" datastructures, * guaranteeing that no further decoder callbacks are invoked on the stream. * * This should NOT be invoked in the case of a "Connection Error", * though a "Stream Error", in which a RST_STREAM is sent and the stream * is closed early, would invoke this. */ int aws_h2_connection_on_stream_closed( struct aws_h2_connection *connection, struct aws_h2_stream *stream, enum aws_h2_stream_closed_when closed_when, int aws_error_code); /** * Send RST_STREAM and close a stream reserved via PUSH_PROMISE. */ int aws_h2_connection_send_rst_and_close_reserved_stream( struct aws_h2_connection *connection, uint32_t stream_id, uint32_t h2_error_code); /** * Error happens while writing into channel, shutdown the connection. Only called within the eventloop thread */ void aws_h2_connection_shutdown_due_to_write_err(struct aws_h2_connection *connection, int error_code); /** * Try to write outgoing frames, if the outgoing-frames-task isn't scheduled, run it immediately. */ void aws_h2_try_write_outgoing_frames(struct aws_h2_connection *connection); #endif /* AWS_HTTP_H2_CONNECTION_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-http/include/aws/http/private/h2_decoder.h000066400000000000000000000127021456575232400272260ustar00rootroot00000000000000#ifndef AWS_HTTP_H2_DECODER_H #define AWS_HTTP_H2_DECODER_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include /* Decoder design goals: * - Minimize state tracking and verification required by user. * For example, we have _begin()/_i()/_end() callbacks when something happens N times. * The _begin() and _end() callbacks tell the user when to transition states. * Without them the user needs to be like, oh, I was doing X but now I'm doing Y, * so I guess I need to end X and start Y. * - A callback should result in 1 distinct action. * For example, we have distinct callbacks for `on_ping()` and `on_ping_ack()`. * We COULD have had just one `on_ping(bool ack)` callback, but since user must * take two complete different actions based on the ACK, we opted for two callbacks. */ /* Return a failed aws_h2err from any callback to stop the decoder and cause a Connection Error */ struct aws_h2_decoder_vtable { /* For HEADERS header-block: _begin() is called, then 0+ _i() calls, then _end(). * No other decoder callbacks will occur in this time. * If something is malformed, no further _i() calls occur, and it is reported in _end() */ struct aws_h2err (*on_headers_begin)(uint32_t stream_id, void *userdata); struct aws_h2err (*on_headers_i)( uint32_t stream_id, const struct aws_http_header *header, enum aws_http_header_name name_enum, enum aws_http_header_block block_type, void *userdata); struct aws_h2err ( *on_headers_end)(uint32_t stream_id, bool malformed, enum aws_http_header_block block_type, void *userdata); /* For PUSH_PROMISE header-block: _begin() is called, then 0+ _i() calls, then _end(). * No other decoder callbacks will occur in this time. * If something is malformed, no further _i() calls occur, and it is reported in _end() */ struct aws_h2err (*on_push_promise_begin)(uint32_t stream_id, uint32_t promised_stream_id, void *userdata); struct aws_h2err (*on_push_promise_i)( uint32_t stream_id, const struct aws_http_header *header, enum aws_http_header_name name_enum, void *userdata); struct aws_h2err (*on_push_promise_end)(uint32_t stream_id, bool malformed, void *userdata); /* For DATA frame: _begin() is called, then 0+ _i() calls, then _end(). * No other decoder callbacks will occur in this time */ struct aws_h2err (*on_data_begin)( uint32_t stream_id, uint32_t payload_len, /* Whole payload length including padding and padding length */ uint32_t total_padding_bytes, /* The length of padding and the byte for padding length */ bool end_stream, void *userdata); struct aws_h2err (*on_data_i)(uint32_t stream_id, struct aws_byte_cursor data, void *userdata); struct aws_h2err (*on_data_end)(uint32_t stream_id, void *userdata); /* Called at end of DATA frame containing the END_STREAM flag. * OR called at end of header-block which began with HEADERS frame containing the END_STREAM flag */ struct aws_h2err (*on_end_stream)(uint32_t stream_id, void *userdata); /* Called once for RST_STREAM frame */ struct aws_h2err (*on_rst_stream)(uint32_t stream_id, uint32_t error_code, void *userdata); /* Called once For PING frame with ACK flag set */ struct aws_h2err (*on_ping_ack)(uint8_t opaque_data[AWS_HTTP2_PING_DATA_SIZE], void *userdata); /* Called once for PING frame (no ACK flag set)*/ struct aws_h2err (*on_ping)(uint8_t opaque_data[AWS_HTTP2_PING_DATA_SIZE], void *userdata); /* Called once for SETTINGS frame with ACK flag */ struct aws_h2err (*on_settings_ack)(void *userdata); /* Called once for SETTINGS frame, without ACK flag */ struct aws_h2err ( *on_settings)(const struct aws_http2_setting *settings_array, size_t num_settings, void *userdata); /* Called once for GOAWAY frame */ struct aws_h2err ( *on_goaway)(uint32_t last_stream, uint32_t error_code, struct aws_byte_cursor debug_data, void *userdata); /* Called once for WINDOW_UPDATE frame */ struct aws_h2err (*on_window_update)(uint32_t stream_id, uint32_t window_size_increment, void *userdata); }; /** * Structure used to initialize an `aws_h2_decoder`. */ struct aws_h2_decoder_params { struct aws_allocator *alloc; const struct aws_h2_decoder_vtable *vtable; void *userdata; const void *logging_id; bool is_server; /* If true, do not expect the connection preface and immediately accept any frame type. * Only set this when testing the decoder itself */ bool skip_connection_preface; }; struct aws_h2_decoder; AWS_EXTERN_C_BEGIN AWS_HTTP_API struct aws_h2_decoder *aws_h2_decoder_new(struct aws_h2_decoder_params *params); AWS_HTTP_API void aws_h2_decoder_destroy(struct aws_h2_decoder *decoder); /* If failed aws_h2err returned, it is a Connection Error */ AWS_HTTP_API struct aws_h2err aws_h2_decode(struct aws_h2_decoder *decoder, struct aws_byte_cursor *data); AWS_HTTP_API void aws_h2_decoder_set_setting_header_table_size(struct aws_h2_decoder *decoder, uint32_t data); AWS_HTTP_API void aws_h2_decoder_set_setting_enable_push(struct aws_h2_decoder *decoder, uint32_t data); AWS_HTTP_API void aws_h2_decoder_set_setting_max_frame_size(struct aws_h2_decoder *decoder, uint32_t data); AWS_EXTERN_C_END #endif /* AWS_HTTP_H2_DECODER_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-http/include/aws/http/private/h2_frames.h000066400000000000000000000223511456575232400270770ustar00rootroot00000000000000#ifndef AWS_HTTP_H2_FRAMES_H #define AWS_HTTP_H2_FRAMES_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include /* Ids for each frame type (RFC-7540 6) */ enum aws_h2_frame_type { AWS_H2_FRAME_T_DATA = 0x00, AWS_H2_FRAME_T_HEADERS = 0x01, AWS_H2_FRAME_T_PRIORITY = 0x02, AWS_H2_FRAME_T_RST_STREAM = 0x03, AWS_H2_FRAME_T_SETTINGS = 0x04, AWS_H2_FRAME_T_PUSH_PROMISE = 0x05, AWS_H2_FRAME_T_PING = 0x06, AWS_H2_FRAME_T_GOAWAY = 0x07, AWS_H2_FRAME_T_WINDOW_UPDATE = 0x08, AWS_H2_FRAME_T_CONTINUATION = 0x09, AWS_H2_FRAME_T_UNKNOWN, AWS_H2_FRAME_TYPE_COUNT, }; /* Represents flags that may be set on a frame (RFC-7540 6) */ enum aws_h2_frame_flag { AWS_H2_FRAME_F_ACK = 0x01, AWS_H2_FRAME_F_END_STREAM = 0x01, AWS_H2_FRAME_F_END_HEADERS = 0x04, AWS_H2_FRAME_F_PADDED = 0x08, AWS_H2_FRAME_F_PRIORITY = 0x20, }; /* Pairs the AWS_ERROR_* to show our API user, * along with the AWS_HTTP2_ERR_* that should * be sent to the peer via RST_STREAM or GOAWAY. * * Used in place of normal error handling in functions that may result * in an HTTP/2 Connection Error or Stream Error. */ struct aws_h2err { enum aws_http2_error_code h2_code; int aws_code; }; #define AWS_H2ERR_SUCCESS \ (struct aws_h2err) { .h2_code = 0, .aws_code = 0 } #define AWS_H2_PAYLOAD_MAX (0x00FFFFFF) /* must fit in 3 bytes */ #define AWS_H2_WINDOW_UPDATE_MAX (0x7FFFFFFF) /* cannot use high bit */ #define AWS_H2_STREAM_ID_MAX (0x7FFFFFFF) /* cannot use high bit */ #define AWS_H2_FRAME_PREFIX_SIZE (9) #define AWS_H2_INIT_WINDOW_SIZE (65535) /* Defined initial window size */ /* Legal min(inclusive) and max(inclusive) for each setting */ extern const uint32_t aws_h2_settings_bounds[AWS_HTTP2_SETTINGS_END_RANGE][2]; /* Initial values for settings RFC-7540 6.5.2 */ AWS_HTTP_API extern const uint32_t aws_h2_settings_initial[AWS_HTTP2_SETTINGS_END_RANGE]; /* This magic string must be the very first thing a client sends to the server. * See RFC-7540 3.5 - HTTP/2 Connection Preface. * Exported for tests */ AWS_HTTP_API extern const struct aws_byte_cursor aws_h2_connection_preface_client_string; /** * Present in all frames that may have set AWS_H2_FRAME_F_PRIORITY * * Encoded as: * +-+-------------------------------------------------------------+ * |E| Stream Dependency (31) | * +-+-------------+-----------------------------------------------+ * | Weight (8) | * +-+-------------+ */ struct aws_h2_frame_priority_settings { uint32_t stream_dependency; bool stream_dependency_exclusive; uint8_t weight; }; /** * A frame to be encoded. * (in the case of HEADERS and PUSH_PROMISE, it might turn into multiple frames due to CONTINUATION) */ struct aws_h2_frame { const struct aws_h2_frame_vtable *vtable; struct aws_allocator *alloc; struct aws_linked_list_node node; enum aws_h2_frame_type type; uint32_t stream_id; /* If true, frame will be sent before those with normal priority. * Useful for frames like PING ACK where low latency is important. */ bool high_priority; }; /* Used to encode a frame */ struct aws_h2_frame_encoder { struct aws_allocator *allocator; const void *logging_id; struct aws_hpack_encoder hpack; struct aws_h2_frame *current_frame; /* Settings for frame encoder, which is based on the settings received from peer */ struct { /* the size of the largest frame payload */ uint32_t max_frame_size; } settings; bool has_errored; }; typedef void aws_h2_frame_destroy_fn(struct aws_h2_frame *frame_base); typedef int aws_h2_frame_encode_fn( struct aws_h2_frame *frame_base, struct aws_h2_frame_encoder *encoder, struct aws_byte_buf *output, bool *complete); struct aws_h2_frame_vtable { aws_h2_frame_destroy_fn *destroy; aws_h2_frame_encode_fn *encode; }; AWS_EXTERN_C_BEGIN AWS_HTTP_API const char *aws_h2_frame_type_to_str(enum aws_h2_frame_type type); AWS_HTTP_API const char *aws_http2_error_code_to_str(enum aws_http2_error_code h2_error_code); /** * Specify which HTTP/2 error-code will be sent to the peer in a GOAWAY or RST_STREAM frame. * * The AWS_ERROR reported to the API user will be AWS_ERROR_HTTP_PROTOCOL_ERROR. */ AWS_HTTP_API struct aws_h2err aws_h2err_from_h2_code(enum aws_http2_error_code h2_error_code); /** * Specify which AWS_ERROR will be reported to the API user. * * The peer will be sent a GOAWAY or RST_STREAM with the INTERNAL_ERROR HTTP/2 error-code. */ AWS_HTTP_API struct aws_h2err aws_h2err_from_aws_code(int aws_error_code); AWS_HTTP_API struct aws_h2err aws_h2err_from_last_error(void); AWS_HTTP_API bool aws_h2err_success(struct aws_h2err err); AWS_HTTP_API bool aws_h2err_failed(struct aws_h2err err); /* Raises AWS_ERROR_INVALID_ARGUMENT if stream_id is 0 or exceeds AWS_H2_MAX_STREAM_ID */ AWS_HTTP_API int aws_h2_validate_stream_id(uint32_t stream_id); /** * The process of encoding a frame looks like: * 1. Create a encoder object on the stack and initialize with aws_h2_frame_encoder_init * 2. Encode the frame using aws_h2_encode_frame() */ AWS_HTTP_API int aws_h2_frame_encoder_init( struct aws_h2_frame_encoder *encoder, struct aws_allocator *allocator, const void *logging_id); AWS_HTTP_API void aws_h2_frame_encoder_clean_up(struct aws_h2_frame_encoder *encoder); /** * Attempt to encode frame into output buffer. * AWS_OP_ERR is returned if encoder encounters an unrecoverable error. * frame_complete will be set true if the frame finished encoding. * * If frame_complete is false then we MUST call aws_h2_encode_frame() again * with all the same inputs, when we have a fresh buffer (it would be illegal * to encode a different frame). */ AWS_HTTP_API int aws_h2_encode_frame( struct aws_h2_frame_encoder *encoder, struct aws_h2_frame *frame, struct aws_byte_buf *output, bool *frame_complete); /** * Attempt to encode a DATA frame into the output buffer. * The body_stream will be read into the available space (up to MAX_FRAME_SIZE). * AWS_OP_ERR is returned if encoder encounters an unrecoverable error. * body_complete will be set true if encoder reaches the end of the body_stream. * body_stalled will be true if aws_input_stream_read() stopped early (didn't * complete, though more space was available). * * Each call to this function encodes a complete DATA frame, or nothing at all, * so it's always safe to encode a different frame type or the body of a different stream * after calling this. */ AWS_HTTP_API int aws_h2_encode_data_frame( struct aws_h2_frame_encoder *encoder, uint32_t stream_id, struct aws_input_stream *body_stream, bool body_ends_stream, uint8_t pad_length, int32_t *stream_window_size_peer, size_t *connection_window_size_peer, struct aws_byte_buf *output, bool *body_complete, bool *body_stalled); AWS_HTTP_API void aws_h2_frame_destroy(struct aws_h2_frame *frame); /** * This frame type may actually end up encoding multiple frames * (HEADERS followed by 0 or more CONTINUATION frames). */ AWS_HTTP_API struct aws_h2_frame *aws_h2_frame_new_headers( struct aws_allocator *allocator, uint32_t stream_id, const struct aws_http_headers *headers, bool end_stream, uint8_t pad_length, const struct aws_h2_frame_priority_settings *optional_priority); AWS_HTTP_API struct aws_h2_frame *aws_h2_frame_new_priority( struct aws_allocator *allocator, uint32_t stream_id, const struct aws_h2_frame_priority_settings *priority); AWS_HTTP_API struct aws_h2_frame *aws_h2_frame_new_rst_stream( struct aws_allocator *allocator, uint32_t stream_id, uint32_t error_code); AWS_HTTP_API struct aws_h2_frame *aws_h2_frame_new_settings( struct aws_allocator *allocator, const struct aws_http2_setting *settings_array, size_t num_settings, bool ack); /** * This frame type may actually end up encoding multiple frames * (PUSH_PROMISE followed 0 or more CONTINUATION frames). */ AWS_HTTP_API struct aws_h2_frame *aws_h2_frame_new_push_promise( struct aws_allocator *allocator, uint32_t stream_id, uint32_t promised_stream_id, const struct aws_http_headers *headers, uint8_t pad_length); AWS_HTTP_API struct aws_h2_frame *aws_h2_frame_new_ping( struct aws_allocator *allocator, bool ack, const uint8_t opaque_data[AWS_HTTP2_PING_DATA_SIZE]); AWS_HTTP_API struct aws_h2_frame *aws_h2_frame_new_goaway( struct aws_allocator *allocator, uint32_t last_stream_id, uint32_t error_code, struct aws_byte_cursor debug_data); AWS_HTTP_API struct aws_h2_frame *aws_h2_frame_new_window_update( struct aws_allocator *allocator, uint32_t stream_id, uint32_t window_size_increment); AWS_HTTP_API void aws_h2_frame_encoder_set_setting_header_table_size( struct aws_h2_frame_encoder *encoder, uint32_t data); AWS_HTTP_API void aws_h2_frame_encoder_set_setting_max_frame_size(struct aws_h2_frame_encoder *encoder, uint32_t data); AWS_EXTERN_C_END #endif /* AWS_HTTP_H2_FRAMES_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-http/include/aws/http/private/h2_stream.h000066400000000000000000000200471456575232400271150ustar00rootroot00000000000000#ifndef AWS_HTTP_H2_STREAM_H #define AWS_HTTP_H2_STREAM_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #define AWS_H2_STREAM_LOGF(level, stream, text, ...) \ AWS_LOGF_##level( \ AWS_LS_HTTP_STREAM, \ "id=%" PRIu32 " connection=%p state=%s: " text, \ (stream)->base.id, \ (void *)(stream)->base.owning_connection, \ aws_h2_stream_state_to_str((stream)->thread_data.state), \ __VA_ARGS__) #define AWS_H2_STREAM_LOG(level, stream, text) AWS_H2_STREAM_LOGF(level, (stream), "%s", (text)) enum aws_h2_stream_state { /* Initial state, before anything sent or received. */ AWS_H2_STREAM_STATE_IDLE, /* (server-only) stream-id was reserved via PUSH_PROMISE on another stream, * but HEADERS for this stream have not been sent yet */ AWS_H2_STREAM_STATE_RESERVED_LOCAL, /* (client-only) stream-id was reserved via PUSH_PROMISE on another stream, * but HEADERS for this stream have not been received yet */ AWS_H2_STREAM_STATE_RESERVED_REMOTE, /* Neither side is done sending their message. */ AWS_H2_STREAM_STATE_OPEN, /* This side is done sending message (END_STREAM), but peer is not done. */ AWS_H2_STREAM_STATE_HALF_CLOSED_LOCAL, /* Peer is done sending message (END_STREAM), but this side is not done */ AWS_H2_STREAM_STATE_HALF_CLOSED_REMOTE, /* Both sides done sending message (END_STREAM), * or either side has sent RST_STREAM */ AWS_H2_STREAM_STATE_CLOSED, AWS_H2_STREAM_STATE_COUNT, }; /* simplified stream state for API implementation */ enum aws_h2_stream_api_state { AWS_H2_STREAM_API_STATE_INIT, AWS_H2_STREAM_API_STATE_ACTIVE, AWS_H2_STREAM_API_STATE_COMPLETE, }; /* Indicates the state of the body of the HTTP/2 stream */ enum aws_h2_stream_body_state { AWS_H2_STREAM_BODY_STATE_NONE, /* Has no body for the HTTP/2 stream */ AWS_H2_STREAM_BODY_STATE_WAITING_WRITES, /* Has no active body, but waiting for more to be write */ AWS_H2_STREAM_BODY_STATE_ONGOING, /* Has active ongoing body */ }; /* represents a write operation, which will be turned into a data frame */ struct aws_h2_stream_data_write { struct aws_linked_list_node node; struct aws_input_stream *data_stream; aws_http2_stream_write_data_complete_fn *on_complete; void *user_data; bool end_stream; }; struct aws_h2_stream { struct aws_http_stream base; struct aws_linked_list_node node; struct aws_channel_task cross_thread_work_task; /* Only the event-loop thread may touch this data */ struct { enum aws_h2_stream_state state; int32_t window_size_peer; /* The local window size. * We allow this value exceed the max window size (int64 can hold much more than 0x7FFFFFFF), * We leave it up to the remote peer to detect whether the max window size has been exceeded. */ int64_t window_size_self; struct aws_http_message *outgoing_message; /* All queued writes. If the message provides a body stream, it will be first in this list * This list can drain, which results in the stream being put to sleep (moved to waiting_streams_list in * h2_connection). */ struct aws_linked_list outgoing_writes; /* aws_http2_stream_data_write */ bool received_main_headers; bool content_length_received; /* Set if incoming message has content-length header */ uint64_t incoming_content_length; /* The total length of payload of data frame received */ uint64_t incoming_data_length; /* Indicates that the stream is currently in the waiting_streams_list and is * asleep. When stream needs to be awaken, moving the stream back to the outgoing_streams_list and set this bool * to false */ bool waiting_for_writes; } thread_data; /* Any thread may touch this data, but the lock must be held (unless it's an atomic) */ struct { struct aws_mutex lock; bool is_cross_thread_work_task_scheduled; /* The window_update value for `thread_data.window_size_self` that haven't applied yet */ size_t window_update_size; /* The combined aws_http2_error_code user wanted to send to remote peer via rst_stream and internal aws error * code we want to inform user about. */ struct aws_h2err reset_error; bool reset_called; bool manual_write_ended; /* Simplified stream state. */ enum aws_h2_stream_api_state api_state; /* any data streams sent manually via aws_http2_stream_write_data */ struct aws_linked_list pending_write_list; /* aws_h2_stream_pending_data */ } synced_data; bool manual_write; /* Store the sent reset HTTP/2 error code, set to -1, if none has sent so far */ int64_t sent_reset_error_code; /* Store the received reset HTTP/2 error code, set to -1, if none has received so far */ int64_t received_reset_error_code; }; const char *aws_h2_stream_state_to_str(enum aws_h2_stream_state state); struct aws_h2_stream *aws_h2_stream_new_request( struct aws_http_connection *client_connection, const struct aws_http_make_request_options *options); enum aws_h2_stream_state aws_h2_stream_get_state(const struct aws_h2_stream *stream); struct aws_h2err aws_h2_stream_window_size_change(struct aws_h2_stream *stream, int32_t size_changed, bool self); /* Connection is ready to send frames from stream now */ int aws_h2_stream_on_activated(struct aws_h2_stream *stream, enum aws_h2_stream_body_state *body_state); /* Completes stream for one reason or another, clean up any pending writes/resources. */ void aws_h2_stream_complete(struct aws_h2_stream *stream, int error_code); /* Connection is ready to send data from stream now. * Stream may complete itself during this call. * data_encode_status: see `aws_h2_data_encode_status` */ int aws_h2_stream_encode_data_frame( struct aws_h2_stream *stream, struct aws_h2_frame_encoder *encoder, struct aws_byte_buf *output, int *data_encode_status); struct aws_h2err aws_h2_stream_on_decoder_headers_begin(struct aws_h2_stream *stream); struct aws_h2err aws_h2_stream_on_decoder_headers_i( struct aws_h2_stream *stream, const struct aws_http_header *header, enum aws_http_header_name name_enum, enum aws_http_header_block block_type); struct aws_h2err aws_h2_stream_on_decoder_headers_end( struct aws_h2_stream *stream, bool malformed, enum aws_http_header_block block_type); struct aws_h2err aws_h2_stream_on_decoder_push_promise(struct aws_h2_stream *stream, uint32_t promised_stream_id); struct aws_h2err aws_h2_stream_on_decoder_data_begin( struct aws_h2_stream *stream, uint32_t payload_len, uint32_t total_padding_bytes, bool end_stream); struct aws_h2err aws_h2_stream_on_decoder_data_i(struct aws_h2_stream *stream, struct aws_byte_cursor data); struct aws_h2err aws_h2_stream_on_decoder_window_update( struct aws_h2_stream *stream, uint32_t window_size_increment, bool *window_resume); struct aws_h2err aws_h2_stream_on_decoder_end_stream(struct aws_h2_stream *stream); struct aws_h2err aws_h2_stream_on_decoder_rst_stream(struct aws_h2_stream *stream, uint32_t h2_error_code); int aws_h2_stream_activate(struct aws_http_stream *stream); #endif /* AWS_HTTP_H2_STREAM_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-http/include/aws/http/private/hpack.h000066400000000000000000000224741456575232400263250ustar00rootroot00000000000000#ifndef AWS_HTTP_HPACK_H #define AWS_HTTP_HPACK_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include /** * Result of aws_hpack_decode() call. * If a complete entry has not been decoded yet, type is ONGOING. * Otherwise, type informs which data to look at. */ struct aws_hpack_decode_result { enum aws_hpack_decode_type { AWS_HPACK_DECODE_T_ONGOING, AWS_HPACK_DECODE_T_HEADER_FIELD, AWS_HPACK_DECODE_T_DYNAMIC_TABLE_RESIZE, } type; union { /* If type is AWS_HPACK_DECODE_T_HEADER_FIELD */ struct aws_http_header header_field; /* If type is AWS_HPACK_DECODE_T_DYNAMIC_TABLE_RESIZE */ size_t dynamic_table_resize; } data; }; /** * Controls whether non-indexed strings will use Huffman encoding. * In SMALLEST mode, strings will only be sent with Huffman encoding if it makes them smaller. * * Note: This does not control compression via "indexing", * for that, see `aws_http_header_compression`. * This only controls how string values are encoded when they're not already in a table. */ enum aws_hpack_huffman_mode { AWS_HPACK_HUFFMAN_SMALLEST, AWS_HPACK_HUFFMAN_NEVER, AWS_HPACK_HUFFMAN_ALWAYS, }; /** * Maintains the dynamic table. * Insertion is backwards, indexing is forwards */ struct aws_hpack_context { struct aws_allocator *allocator; enum aws_http_log_subject log_subject; const void *log_id; struct { /* Array of headers, pointers to memory we alloced, which needs to be cleaned up whenever we move an entry out */ struct aws_http_header *buffer; size_t buffer_capacity; /* Number of http_headers that can fit in buffer */ size_t num_elements; size_t index_0; /* Size in bytes, according to [4.1] */ size_t size; size_t max_size; /* aws_http_header * -> size_t */ struct aws_hash_table reverse_lookup; /* aws_byte_cursor * -> size_t */ struct aws_hash_table reverse_lookup_name_only; } dynamic_table; }; /** * Encodes outgoing headers. */ struct aws_hpack_encoder { const void *log_id; struct aws_huffman_encoder huffman_encoder; enum aws_hpack_huffman_mode huffman_mode; struct aws_hpack_context context; struct { size_t latest_value; size_t smallest_value; bool pending; } dynamic_table_size_update; }; /** * Decodes incoming headers */ struct aws_hpack_decoder { const void *log_id; struct aws_huffman_decoder huffman_decoder; struct aws_hpack_context context; /* TODO: check the new (RFC 9113 - 4.3.1) to make sure we did it right */ /* SETTINGS_HEADER_TABLE_SIZE from http2 */ size_t dynamic_table_protocol_max_size_setting; /* PRO TIP: Don't union progress_integer and progress_string together, since string_decode calls integer_decode */ struct hpack_progress_integer { enum { HPACK_INTEGER_STATE_INIT, HPACK_INTEGER_STATE_VALUE, } state; uint8_t bit_count; } progress_integer; struct hpack_progress_string { enum { HPACK_STRING_STATE_INIT, HPACK_STRING_STATE_LENGTH, HPACK_STRING_STATE_VALUE, } state; bool use_huffman; uint64_t length; } progress_string; struct hpack_progress_entry { enum { HPACK_ENTRY_STATE_INIT, /* Indexed header field: just 1 state. read index, find name and value at index */ HPACK_ENTRY_STATE_INDEXED, /* Literal header field: name may be indexed OR literal, value is always literal */ HPACK_ENTRY_STATE_LITERAL_BEGIN, HPACK_ENTRY_STATE_LITERAL_NAME_STRING, HPACK_ENTRY_STATE_LITERAL_VALUE_STRING, /* Dynamic table resize: just 1 state. read new size */ HPACK_ENTRY_STATE_DYNAMIC_TABLE_RESIZE, /* Done */ HPACK_ENTRY_STATE_COMPLETE, } state; union { struct { uint64_t index; } indexed; struct hpack_progress_literal { uint8_t prefix_size; enum aws_http_header_compression compression; uint64_t name_index; size_t name_length; } literal; struct { uint64_t size; } dynamic_table_resize; } u; enum aws_hpack_decode_type type; /* Scratch holds header name and value while decoding */ struct aws_byte_buf scratch; } progress_entry; }; AWS_EXTERN_C_BEGIN /* Library-level init and shutdown */ void aws_hpack_static_table_init(struct aws_allocator *allocator); void aws_hpack_static_table_clean_up(void); AWS_HTTP_API void aws_hpack_context_init( struct aws_hpack_context *aws_hpack_context, struct aws_allocator *allocator, enum aws_http_log_subject log_subject, const void *log_id); AWS_HTTP_API void aws_hpack_context_clean_up(struct aws_hpack_context *context); /* Returns the hpack size of a header (name.len + value.len + 32) [4.1] */ AWS_HTTP_API size_t aws_hpack_get_header_size(const struct aws_http_header *header); /* Returns the number of elements in dynamic table now */ AWS_HTTP_API size_t aws_hpack_get_dynamic_table_num_elements(const struct aws_hpack_context *context); size_t aws_hpack_get_dynamic_table_max_size(const struct aws_hpack_context *context); AWS_HTTP_API const struct aws_http_header *aws_hpack_get_header(const struct aws_hpack_context *context, size_t index); /* A return value of 0 indicates that the header wasn't found */ AWS_HTTP_API size_t aws_hpack_find_index( const struct aws_hpack_context *context, const struct aws_http_header *header, bool search_value, bool *found_value); AWS_HTTP_API int aws_hpack_insert_header(struct aws_hpack_context *context, const struct aws_http_header *header); /** * Set the max size of the dynamic table (in octets). The size of each header is name.len + value.len + 32 [4.1]. */ AWS_HTTP_API int aws_hpack_resize_dynamic_table(struct aws_hpack_context *context, size_t new_max_size); AWS_HTTP_API void aws_hpack_encoder_init(struct aws_hpack_encoder *encoder, struct aws_allocator *allocator, const void *log_id); AWS_HTTP_API void aws_hpack_encoder_clean_up(struct aws_hpack_encoder *encoder); /* Call this after receiving SETTINGS_HEADER_TABLE_SIZE from peer and sending the ACK. * The hpack-encoder remembers all size updates, and makes sure to encode the proper * number of Dynamic Table Size Updates the next time a header block is sent. */ AWS_HTTP_API void aws_hpack_encoder_update_max_table_size(struct aws_hpack_encoder *encoder, uint32_t new_max_size); AWS_HTTP_API void aws_hpack_encoder_set_huffman_mode(struct aws_hpack_encoder *encoder, enum aws_hpack_huffman_mode mode); /** * Encode header-block into the output. * This function will mutate hpack, so an error means hpack can no longer be used. * Note that output will be dynamically resized if it's too short. */ AWS_HTTP_API int aws_hpack_encode_header_block( struct aws_hpack_encoder *encoder, const struct aws_http_headers *headers, struct aws_byte_buf *output); AWS_HTTP_API void aws_hpack_decoder_init(struct aws_hpack_decoder *decoder, struct aws_allocator *allocator, const void *log_id); AWS_HTTP_API void aws_hpack_decoder_clean_up(struct aws_hpack_decoder *decoder); /* Call this after sending SETTINGS_HEADER_TABLE_SIZE and receiving ACK from the peer. * The hpack-decoder remembers all size updates, and makes sure that the peer * sends the appropriate Dynamic Table Size Updates in the next header block we receive. */ AWS_HTTP_API void aws_hpack_decoder_update_max_table_size(struct aws_hpack_decoder *decoder, uint32_t new_max_size); /** * Decode the next entry in the header-block-fragment. * If result->type is ONGOING, then call decode() again with more data to resume decoding. * Otherwise, type is either a HEADER_FIELD or a DYNAMIC_TABLE_RESIZE. * * If an error occurs, the decoder is broken and decode() must not be called again. */ AWS_HTTP_API int aws_hpack_decode( struct aws_hpack_decoder *decoder, struct aws_byte_cursor *to_decode, struct aws_hpack_decode_result *result); /******************************************************************************* * Private functions for encoder/decoder, but public for testing purposes ******************************************************************************/ /* Output will be dynamically resized if it's too short */ AWS_HTTP_API int aws_hpack_encode_integer(uint64_t integer, uint8_t starting_bits, uint8_t prefix_size, struct aws_byte_buf *output); /* Output will be dynamically resized if it's too short */ AWS_HTTP_API int aws_hpack_encode_string( struct aws_hpack_encoder *encoder, struct aws_byte_cursor to_encode, struct aws_byte_buf *output); AWS_HTTP_API int aws_hpack_decode_integer( struct aws_hpack_decoder *decoder, struct aws_byte_cursor *to_decode, uint8_t prefix_size, uint64_t *integer, bool *complete); AWS_HTTP_API int aws_hpack_decode_string( struct aws_hpack_decoder *decoder, struct aws_byte_cursor *to_decode, struct aws_byte_buf *output, bool *complete); AWS_EXTERN_C_END #endif /* AWS_HTTP_HPACK_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-http/include/aws/http/private/hpack_header_static_table.def000066400000000000000000000041651456575232400326570ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #ifndef HEADER #error "Macro HEADER(index, name) must be defined before including this header file!" #endif #ifndef HEADER_WITH_VALUE #error "Macro HEADER_WITH_VALUE(index, name, value) must be defined before including this header file!" #endif HEADER(1, ":authority") HEADER_WITH_VALUE(2, ":method", "GET") HEADER_WITH_VALUE(3, ":method", "POST") HEADER_WITH_VALUE(4, ":path", "/") HEADER_WITH_VALUE(5, ":path", "/index.html") HEADER_WITH_VALUE(6, ":scheme", "http") HEADER_WITH_VALUE(7, ":scheme", "https") HEADER_WITH_VALUE(8, ":status", "200") HEADER_WITH_VALUE(9, ":status", "204") HEADER_WITH_VALUE(10, ":status", "206") HEADER_WITH_VALUE(11, ":status", "304") HEADER_WITH_VALUE(12, ":status", "400") HEADER_WITH_VALUE(13, ":status", "404") HEADER_WITH_VALUE(14, ":status", "500") HEADER(15, "accept-charset") HEADER_WITH_VALUE(16, "accept-encoding", "gzip,deflate") HEADER(17, "accept-language") HEADER(18, "accept-ranges") HEADER(19, "accept") HEADER(20, "access-control-allow-origin") HEADER(21, "age") HEADER(22, "allow") HEADER(23, "authorization") HEADER(24, "cache-control") HEADER(25, "content-disposition") HEADER(26, "content-encoding") HEADER(27, "content-language") HEADER(28, "content-length") HEADER(29, "content-location") HEADER(30, "content-range") HEADER(31, "content-type") HEADER(32, "cookie") HEADER(33, "date") HEADER(34, "etag") HEADER(35, "expect") HEADER(36, "expires") HEADER(37, "from") HEADER(38, "host") HEADER(39, "if-match") HEADER(40, "if-modified-since") HEADER(41, "if-none-match") HEADER(42, "if-range") HEADER(43, "if-unmodified-since") HEADER(44, "last-modified") HEADER(45, "link") HEADER(46, "location") HEADER(47, "max-forwards") HEADER(48, "proxy-authenticate") HEADER(49, "proxy-authorization") HEADER(50, "range") HEADER(51, "referer") HEADER(52, "refresh") HEADER(53, "retry-after") HEADER(54, "server") HEADER(55, "set-cookie") HEADER(56, "strict-transport-security") HEADER(57, "transfer-encoding") HEADER(58, "user-agent") HEADER(59, "vary") HEADER(60, "via") HEADER(61, "www-authenticate") aws-crt-python-0.20.4+dfsg/crt/aws-c-http/include/aws/http/private/hpack_huffman_static_table.def000066400000000000000000000444561456575232400330620ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #ifndef HUFFMAN_CODE #error "Macro HUFFMAN_CODE must be defined before including this header file!" #endif /* sym bits code len */ HUFFMAN_CODE( 0, "1111111111000", 0x1ff8, 13) HUFFMAN_CODE( 1, "11111111111111111011000", 0x7fffd8, 23) HUFFMAN_CODE( 2, "1111111111111111111111100010", 0xfffffe2, 28) HUFFMAN_CODE( 3, "1111111111111111111111100011", 0xfffffe3, 28) HUFFMAN_CODE( 4, "1111111111111111111111100100", 0xfffffe4, 28) HUFFMAN_CODE( 5, "1111111111111111111111100101", 0xfffffe5, 28) HUFFMAN_CODE( 6, "1111111111111111111111100110", 0xfffffe6, 28) HUFFMAN_CODE( 7, "1111111111111111111111100111", 0xfffffe7, 28) HUFFMAN_CODE( 8, "1111111111111111111111101000", 0xfffffe8, 28) HUFFMAN_CODE( 9, "111111111111111111101010", 0xffffea, 24) HUFFMAN_CODE( 10, "111111111111111111111111111100", 0x3ffffffc, 30) HUFFMAN_CODE( 11, "1111111111111111111111101001", 0xfffffe9, 28) HUFFMAN_CODE( 12, "1111111111111111111111101010", 0xfffffea, 28) HUFFMAN_CODE( 13, "111111111111111111111111111101", 0x3ffffffd, 30) HUFFMAN_CODE( 14, "1111111111111111111111101011", 0xfffffeb, 28) HUFFMAN_CODE( 15, "1111111111111111111111101100", 0xfffffec, 28) HUFFMAN_CODE( 16, "1111111111111111111111101101", 0xfffffed, 28) HUFFMAN_CODE( 17, "1111111111111111111111101110", 0xfffffee, 28) HUFFMAN_CODE( 18, "1111111111111111111111101111", 0xfffffef, 28) HUFFMAN_CODE( 19, "1111111111111111111111110000", 0xffffff0, 28) HUFFMAN_CODE( 20, "1111111111111111111111110001", 0xffffff1, 28) HUFFMAN_CODE( 21, "1111111111111111111111110010", 0xffffff2, 28) HUFFMAN_CODE( 22, "111111111111111111111111111110", 0x3ffffffe, 30) HUFFMAN_CODE( 23, "1111111111111111111111110011", 0xffffff3, 28) HUFFMAN_CODE( 24, "1111111111111111111111110100", 0xffffff4, 28) HUFFMAN_CODE( 25, "1111111111111111111111110101", 0xffffff5, 28) HUFFMAN_CODE( 26, "1111111111111111111111110110", 0xffffff6, 28) HUFFMAN_CODE( 27, "1111111111111111111111110111", 0xffffff7, 28) HUFFMAN_CODE( 28, "1111111111111111111111111000", 0xffffff8, 28) HUFFMAN_CODE( 29, "1111111111111111111111111001", 0xffffff9, 28) HUFFMAN_CODE( 30, "1111111111111111111111111010", 0xffffffa, 28) HUFFMAN_CODE( 31, "1111111111111111111111111011", 0xffffffb, 28) HUFFMAN_CODE( 32, "010100", 0x14, 6) HUFFMAN_CODE( 33, "1111111000", 0x3f8, 10) HUFFMAN_CODE( 34, "1111111001", 0x3f9, 10) HUFFMAN_CODE( 35, "111111111010", 0xffa, 12) HUFFMAN_CODE( 36, "1111111111001", 0x1ff9, 13) HUFFMAN_CODE( 37, "010101", 0x15, 6) HUFFMAN_CODE( 38, "11111000", 0xf8, 8) HUFFMAN_CODE( 39, "11111111010", 0x7fa, 11) HUFFMAN_CODE( 40, "1111111010", 0x3fa, 10) HUFFMAN_CODE( 41, "1111111011", 0x3fb, 10) HUFFMAN_CODE( 42, "11111001", 0xf9, 8) HUFFMAN_CODE( 43, "11111111011", 0x7fb, 11) HUFFMAN_CODE( 44, "11111010", 0xfa, 8) HUFFMAN_CODE( 45, "010110", 0x16, 6) HUFFMAN_CODE( 46, "010111", 0x17, 6) HUFFMAN_CODE( 47, "011000", 0x18, 6) HUFFMAN_CODE( 48, "00000", 0x0, 5) HUFFMAN_CODE( 49, "00001", 0x1, 5) HUFFMAN_CODE( 50, "00010", 0x2, 5) HUFFMAN_CODE( 51, "011001", 0x19, 6) HUFFMAN_CODE( 52, "011010", 0x1a, 6) HUFFMAN_CODE( 53, "011011", 0x1b, 6) HUFFMAN_CODE( 54, "011100", 0x1c, 6) HUFFMAN_CODE( 55, "011101", 0x1d, 6) HUFFMAN_CODE( 56, "011110", 0x1e, 6) HUFFMAN_CODE( 57, "011111", 0x1f, 6) HUFFMAN_CODE( 58, "1011100", 0x5c, 7) HUFFMAN_CODE( 59, "11111011", 0xfb, 8) HUFFMAN_CODE( 60, "111111111111100", 0x7ffc, 15) HUFFMAN_CODE( 61, "100000", 0x20, 6) HUFFMAN_CODE( 62, "111111111011", 0xffb, 12) HUFFMAN_CODE( 63, "1111111100", 0x3fc, 10) HUFFMAN_CODE( 64, "1111111111010", 0x1ffa, 13) HUFFMAN_CODE( 65, "100001", 0x21, 6) HUFFMAN_CODE( 66, "1011101", 0x5d, 7) HUFFMAN_CODE( 67, "1011110", 0x5e, 7) HUFFMAN_CODE( 68, "1011111", 0x5f, 7) HUFFMAN_CODE( 69, "1100000", 0x60, 7) HUFFMAN_CODE( 70, "1100001", 0x61, 7) HUFFMAN_CODE( 71, "1100010", 0x62, 7) HUFFMAN_CODE( 72, "1100011", 0x63, 7) HUFFMAN_CODE( 73, "1100100", 0x64, 7) HUFFMAN_CODE( 74, "1100101", 0x65, 7) HUFFMAN_CODE( 75, "1100110", 0x66, 7) HUFFMAN_CODE( 76, "1100111", 0x67, 7) HUFFMAN_CODE( 77, "1101000", 0x68, 7) HUFFMAN_CODE( 78, "1101001", 0x69, 7) HUFFMAN_CODE( 79, "1101010", 0x6a, 7) HUFFMAN_CODE( 80, "1101011", 0x6b, 7) HUFFMAN_CODE( 81, "1101100", 0x6c, 7) HUFFMAN_CODE( 82, "1101101", 0x6d, 7) HUFFMAN_CODE( 83, "1101110", 0x6e, 7) HUFFMAN_CODE( 84, "1101111", 0x6f, 7) HUFFMAN_CODE( 85, "1110000", 0x70, 7) HUFFMAN_CODE( 86, "1110001", 0x71, 7) HUFFMAN_CODE( 87, "1110010", 0x72, 7) HUFFMAN_CODE( 88, "11111100", 0xfc, 8) HUFFMAN_CODE( 89, "1110011", 0x73, 7) HUFFMAN_CODE( 90, "11111101", 0xfd, 8) HUFFMAN_CODE( 91, "1111111111011", 0x1ffb, 13) HUFFMAN_CODE( 92, "1111111111111110000", 0x7fff0, 19) HUFFMAN_CODE( 93, "1111111111100", 0x1ffc, 13) HUFFMAN_CODE( 94, "11111111111100", 0x3ffc, 14) HUFFMAN_CODE( 95, "100010", 0x22, 6) HUFFMAN_CODE( 96, "111111111111101", 0x7ffd, 15) HUFFMAN_CODE( 97, "00011", 0x3, 5) HUFFMAN_CODE( 98, "100011", 0x23, 6) HUFFMAN_CODE( 99, "00100", 0x4, 5) HUFFMAN_CODE(100, "100100", 0x24, 6) HUFFMAN_CODE(101, "00101", 0x5, 5) HUFFMAN_CODE(102, "100101", 0x25, 6) HUFFMAN_CODE(103, "100110", 0x26, 6) HUFFMAN_CODE(104, "100111", 0x27, 6) HUFFMAN_CODE(105, "00110", 0x6, 5) HUFFMAN_CODE(106, "1110100", 0x74, 7) HUFFMAN_CODE(107, "1110101", 0x75, 7) HUFFMAN_CODE(108, "101000", 0x28, 6) HUFFMAN_CODE(109, "101001", 0x29, 6) HUFFMAN_CODE(110, "101010", 0x2a, 6) HUFFMAN_CODE(111, "00111", 0x7, 5) HUFFMAN_CODE(112, "101011", 0x2b, 6) HUFFMAN_CODE(113, "1110110", 0x76, 7) HUFFMAN_CODE(114, "101100", 0x2c, 6) HUFFMAN_CODE(115, "01000", 0x8, 5) HUFFMAN_CODE(116, "01001", 0x9, 5) HUFFMAN_CODE(117, "101101", 0x2d, 6) HUFFMAN_CODE(118, "1110111", 0x77, 7) HUFFMAN_CODE(119, "1111000", 0x78, 7) HUFFMAN_CODE(120, "1111001", 0x79, 7) HUFFMAN_CODE(121, "1111010", 0x7a, 7) HUFFMAN_CODE(122, "1111011", 0x7b, 7) HUFFMAN_CODE(123, "111111111111110", 0x7ffe, 15) HUFFMAN_CODE(124, "11111111100", 0x7fc, 11) HUFFMAN_CODE(125, "11111111111101", 0x3ffd, 14) HUFFMAN_CODE(126, "1111111111101", 0x1ffd, 13) HUFFMAN_CODE(127, "1111111111111111111111111100", 0xffffffc, 28) HUFFMAN_CODE(128, "11111111111111100110", 0xfffe6, 20) HUFFMAN_CODE(129, "1111111111111111010010", 0x3fffd2, 22) HUFFMAN_CODE(130, "11111111111111100111", 0xfffe7, 20) HUFFMAN_CODE(131, "11111111111111101000", 0xfffe8, 20) HUFFMAN_CODE(132, "1111111111111111010011", 0x3fffd3, 22) HUFFMAN_CODE(133, "1111111111111111010100", 0x3fffd4, 22) HUFFMAN_CODE(134, "1111111111111111010101", 0x3fffd5, 22) HUFFMAN_CODE(135, "11111111111111111011001", 0x7fffd9, 23) HUFFMAN_CODE(136, "1111111111111111010110", 0x3fffd6, 22) HUFFMAN_CODE(137, "11111111111111111011010", 0x7fffda, 23) HUFFMAN_CODE(138, "11111111111111111011011", 0x7fffdb, 23) HUFFMAN_CODE(139, "11111111111111111011100", 0x7fffdc, 23) HUFFMAN_CODE(140, "11111111111111111011101", 0x7fffdd, 23) HUFFMAN_CODE(141, "11111111111111111011110", 0x7fffde, 23) HUFFMAN_CODE(142, "111111111111111111101011", 0xffffeb, 24) HUFFMAN_CODE(143, "11111111111111111011111", 0x7fffdf, 23) HUFFMAN_CODE(144, "111111111111111111101100", 0xffffec, 24) HUFFMAN_CODE(145, "111111111111111111101101", 0xffffed, 24) HUFFMAN_CODE(146, "1111111111111111010111", 0x3fffd7, 22) HUFFMAN_CODE(147, "11111111111111111100000", 0x7fffe0, 23) HUFFMAN_CODE(148, "111111111111111111101110", 0xffffee, 24) HUFFMAN_CODE(149, "11111111111111111100001", 0x7fffe1, 23) HUFFMAN_CODE(150, "11111111111111111100010", 0x7fffe2, 23) HUFFMAN_CODE(151, "11111111111111111100011", 0x7fffe3, 23) HUFFMAN_CODE(152, "11111111111111111100100", 0x7fffe4, 23) HUFFMAN_CODE(153, "111111111111111011100", 0x1fffdc, 21) HUFFMAN_CODE(154, "1111111111111111011000", 0x3fffd8, 22) HUFFMAN_CODE(155, "11111111111111111100101", 0x7fffe5, 23) HUFFMAN_CODE(156, "1111111111111111011001", 0x3fffd9, 22) HUFFMAN_CODE(157, "11111111111111111100110", 0x7fffe6, 23) HUFFMAN_CODE(158, "11111111111111111100111", 0x7fffe7, 23) HUFFMAN_CODE(159, "111111111111111111101111", 0xffffef, 24) HUFFMAN_CODE(160, "1111111111111111011010", 0x3fffda, 22) HUFFMAN_CODE(161, "111111111111111011101", 0x1fffdd, 21) HUFFMAN_CODE(162, "11111111111111101001", 0xfffe9, 20) HUFFMAN_CODE(163, "1111111111111111011011", 0x3fffdb, 22) HUFFMAN_CODE(164, "1111111111111111011100", 0x3fffdc, 22) HUFFMAN_CODE(165, "11111111111111111101000", 0x7fffe8, 23) HUFFMAN_CODE(166, "11111111111111111101001", 0x7fffe9, 23) HUFFMAN_CODE(167, "111111111111111011110", 0x1fffde, 21) HUFFMAN_CODE(168, "11111111111111111101010", 0x7fffea, 23) HUFFMAN_CODE(169, "1111111111111111011101", 0x3fffdd, 22) HUFFMAN_CODE(170, "1111111111111111011110", 0x3fffde, 22) HUFFMAN_CODE(171, "111111111111111111110000", 0xfffff0, 24) HUFFMAN_CODE(172, "111111111111111011111", 0x1fffdf, 21) HUFFMAN_CODE(173, "1111111111111111011111", 0x3fffdf, 22) HUFFMAN_CODE(174, "11111111111111111101011", 0x7fffeb, 23) HUFFMAN_CODE(175, "11111111111111111101100", 0x7fffec, 23) HUFFMAN_CODE(176, "111111111111111100000", 0x1fffe0, 21) HUFFMAN_CODE(177, "111111111111111100001", 0x1fffe1, 21) HUFFMAN_CODE(178, "1111111111111111100000", 0x3fffe0, 22) HUFFMAN_CODE(179, "111111111111111100010", 0x1fffe2, 21) HUFFMAN_CODE(180, "11111111111111111101101", 0x7fffed, 23) HUFFMAN_CODE(181, "1111111111111111100001", 0x3fffe1, 22) HUFFMAN_CODE(182, "11111111111111111101110", 0x7fffee, 23) HUFFMAN_CODE(183, "11111111111111111101111", 0x7fffef, 23) HUFFMAN_CODE(184, "11111111111111101010", 0xfffea, 20) HUFFMAN_CODE(185, "1111111111111111100010", 0x3fffe2, 22) HUFFMAN_CODE(186, "1111111111111111100011", 0x3fffe3, 22) HUFFMAN_CODE(187, "1111111111111111100100", 0x3fffe4, 22) HUFFMAN_CODE(188, "11111111111111111110000", 0x7ffff0, 23) HUFFMAN_CODE(189, "1111111111111111100101", 0x3fffe5, 22) HUFFMAN_CODE(190, "1111111111111111100110", 0x3fffe6, 22) HUFFMAN_CODE(191, "11111111111111111110001", 0x7ffff1, 23) HUFFMAN_CODE(192, "11111111111111111111100000", 0x3ffffe0, 26) HUFFMAN_CODE(193, "11111111111111111111100001", 0x3ffffe1, 26) HUFFMAN_CODE(194, "11111111111111101011", 0xfffeb, 20) HUFFMAN_CODE(195, "1111111111111110001", 0x7fff1, 19) HUFFMAN_CODE(196, "1111111111111111100111", 0x3fffe7, 22) HUFFMAN_CODE(197, "11111111111111111110010", 0x7ffff2, 23) HUFFMAN_CODE(198, "1111111111111111101000", 0x3fffe8, 22) HUFFMAN_CODE(199, "1111111111111111111101100", 0x1ffffec, 25) HUFFMAN_CODE(200, "11111111111111111111100010", 0x3ffffe2, 26) HUFFMAN_CODE(201, "11111111111111111111100011", 0x3ffffe3, 26) HUFFMAN_CODE(202, "11111111111111111111100100", 0x3ffffe4, 26) HUFFMAN_CODE(203, "111111111111111111111011110", 0x7ffffde, 27) HUFFMAN_CODE(204, "111111111111111111111011111", 0x7ffffdf, 27) HUFFMAN_CODE(205, "11111111111111111111100101", 0x3ffffe5, 26) HUFFMAN_CODE(206, "111111111111111111110001", 0xfffff1, 24) HUFFMAN_CODE(207, "1111111111111111111101101", 0x1ffffed, 25) HUFFMAN_CODE(208, "1111111111111110010", 0x7fff2, 19) HUFFMAN_CODE(209, "111111111111111100011", 0x1fffe3, 21) HUFFMAN_CODE(210, "11111111111111111111100110", 0x3ffffe6, 26) HUFFMAN_CODE(211, "111111111111111111111100000", 0x7ffffe0, 27) HUFFMAN_CODE(212, "111111111111111111111100001", 0x7ffffe1, 27) HUFFMAN_CODE(213, "11111111111111111111100111", 0x3ffffe7, 26) HUFFMAN_CODE(214, "111111111111111111111100010", 0x7ffffe2, 27) HUFFMAN_CODE(215, "111111111111111111110010", 0xfffff2, 24) HUFFMAN_CODE(216, "111111111111111100100", 0x1fffe4, 21) HUFFMAN_CODE(217, "111111111111111100101", 0x1fffe5, 21) HUFFMAN_CODE(218, "11111111111111111111101000", 0x3ffffe8, 26) HUFFMAN_CODE(219, "11111111111111111111101001", 0x3ffffe9, 26) HUFFMAN_CODE(220, "1111111111111111111111111101", 0xffffffd, 28) HUFFMAN_CODE(221, "111111111111111111111100011", 0x7ffffe3, 27) HUFFMAN_CODE(222, "111111111111111111111100100", 0x7ffffe4, 27) HUFFMAN_CODE(223, "111111111111111111111100101", 0x7ffffe5, 27) HUFFMAN_CODE(224, "11111111111111101100", 0xfffec, 20) HUFFMAN_CODE(225, "111111111111111111110011", 0xfffff3, 24) HUFFMAN_CODE(226, "11111111111111101101", 0xfffed, 20) HUFFMAN_CODE(227, "111111111111111100110", 0x1fffe6, 21) HUFFMAN_CODE(228, "1111111111111111101001", 0x3fffe9, 22) HUFFMAN_CODE(229, "111111111111111100111", 0x1fffe7, 21) HUFFMAN_CODE(230, "111111111111111101000", 0x1fffe8, 21) HUFFMAN_CODE(231, "11111111111111111110011", 0x7ffff3, 23) HUFFMAN_CODE(232, "1111111111111111101010", 0x3fffea, 22) HUFFMAN_CODE(233, "1111111111111111101011", 0x3fffeb, 22) HUFFMAN_CODE(234, "1111111111111111111101110", 0x1ffffee, 25) HUFFMAN_CODE(235, "1111111111111111111101111", 0x1ffffef, 25) HUFFMAN_CODE(236, "111111111111111111110100", 0xfffff4, 24) HUFFMAN_CODE(237, "111111111111111111110101", 0xfffff5, 24) HUFFMAN_CODE(238, "11111111111111111111101010", 0x3ffffea, 26) HUFFMAN_CODE(239, "11111111111111111110100", 0x7ffff4, 23) HUFFMAN_CODE(240, "11111111111111111111101011", 0x3ffffeb, 26) HUFFMAN_CODE(241, "111111111111111111111100110", 0x7ffffe6, 27) HUFFMAN_CODE(242, "11111111111111111111101100", 0x3ffffec, 26) HUFFMAN_CODE(243, "11111111111111111111101101", 0x3ffffed, 26) HUFFMAN_CODE(244, "111111111111111111111100111", 0x7ffffe7, 27) HUFFMAN_CODE(245, "111111111111111111111101000", 0x7ffffe8, 27) HUFFMAN_CODE(246, "111111111111111111111101001", 0x7ffffe9, 27) HUFFMAN_CODE(247, "111111111111111111111101010", 0x7ffffea, 27) HUFFMAN_CODE(248, "111111111111111111111101011", 0x7ffffeb, 27) HUFFMAN_CODE(249, "1111111111111111111111111110", 0xffffffe, 28) HUFFMAN_CODE(250, "111111111111111111111101100", 0x7ffffec, 27) HUFFMAN_CODE(251, "111111111111111111111101101", 0x7ffffed, 27) HUFFMAN_CODE(252, "111111111111111111111101110", 0x7ffffee, 27) HUFFMAN_CODE(253, "111111111111111111111101111", 0x7ffffef, 27) HUFFMAN_CODE(254, "111111111111111111111110000", 0x7fffff0, 27) HUFFMAN_CODE(255, "11111111111111111111101110", 0x3ffffee, 26) aws-crt-python-0.20.4+dfsg/crt/aws-c-http/include/aws/http/private/http2_stream_manager_impl.h000066400000000000000000000203121456575232400323530ustar00rootroot00000000000000#ifndef AWS_HTTP2_STREAM_MANAGER_IMPL_H #define AWS_HTTP2_STREAM_MANAGER_IMPL_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include enum aws_h2_sm_state_type { AWS_H2SMST_READY, AWS_H2SMST_DESTROYING, /* On zero external ref count, can destroy */ }; enum aws_h2_sm_connection_state_type { AWS_H2SMCST_IDEAL, AWS_H2SMCST_NEARLY_FULL, AWS_H2SMCST_FULL, }; /* Live with the streams opening, and if there no outstanding pending acquisition and no opening streams on the * connection, this structure should die */ struct aws_h2_sm_connection { struct aws_allocator *allocator; struct aws_http2_stream_manager *stream_manager; struct aws_http_connection *connection; uint32_t num_streams_assigned; /* From a stream assigned to the connection until the stream completed or failed to be created from the connection. */ uint32_t max_concurrent_streams; /* lower bound between user configured and the other side */ /* task to send ping periodically from connection thread. */ struct aws_ref_count ref_count; struct aws_channel_task ping_task; struct aws_channel_task ping_timeout_task; struct { bool ping_received; bool stopped_new_requests; uint64_t next_ping_task_time; } thread_data; enum aws_h2_sm_connection_state_type state; }; /* Live from the user request to acquire a stream to the stream completed. */ struct aws_h2_sm_pending_stream_acquisition { struct aws_allocator *allocator; struct aws_linked_list_node node; struct aws_http_make_request_options options; struct aws_h2_sm_connection *sm_connection; /* The connection to make request to. Keep NULL, until find available one and move it to the pending_make_requests list. */ struct aws_http_message *request; struct aws_channel_task make_request_task; aws_http2_stream_manager_on_stream_acquired_fn *callback; void *user_data; }; /* connections_acquiring_count, open_stream_count, pending_make_requests_count AND pending_stream_acquisition_count */ enum aws_sm_count_type { AWS_SMCT_CONNECTIONS_ACQUIRING, AWS_SMCT_OPEN_STREAM, AWS_SMCT_PENDING_MAKE_REQUESTS, AWS_SMCT_PENDING_ACQUISITION, AWS_SMCT_COUNT, }; struct aws_http2_stream_manager { struct aws_allocator *allocator; void *shutdown_complete_user_data; aws_http2_stream_manager_shutdown_complete_fn *shutdown_complete_callback; /** * Underlying connection manager. Always has the same life time with the stream manager who owns it. */ struct aws_http_connection_manager *connection_manager; /** * Refcount managed by user. Once this drops to zero, the manager state transitions to shutting down */ struct aws_ref_count external_ref_count; /** * Internal refcount that keeps connection manager alive. * * It's a sum of connections_acquiring_count, open_stream_count, pending_make_requests_count and * pending_stream_acquisition_count, besides the number of `struct aws_http2_stream_management_transaction` alive. * And one for external usage. * * Once this refcount drops to zero, stream manager should either be cleaned up all the memory all waiting for * the last task to clean un the memory and do nothing else. */ struct aws_ref_count internal_ref_count; struct aws_client_bootstrap *bootstrap; /* Configurations */ size_t max_connections; /* Connection will be closed if 5xx response received from server. */ bool close_connection_on_server_error; uint64_t connection_ping_period_ns; uint64_t connection_ping_timeout_ns; /** * Default is no limit. 0 will be considered as using the default value. * The ideal number of concurrent streams for a connection. Stream manager will try to create a new connection if * one connection reaches this number. But, if the max connections reaches, manager will reuse connections to create * the acquired steams as much as possible. */ size_t ideal_concurrent_streams_per_connection; /** * Default is no limit. 0 will be considered as using the default value. * The real number of concurrent streams per connection will be controlled by the minmal value of the setting from * other end and the value here. */ size_t max_concurrent_streams_per_connection; /** * Task to invoke pending acquisition callbacks asynchronously if stream manager is shutting. */ struct aws_event_loop *finish_pending_stream_acquisitions_task_event_loop; /* Any thread may touch this data, but the lock must be held (unless it's an atomic) */ struct { struct aws_mutex lock; /* * A manager can be in one of two states, READY or SHUTTING_DOWN. The state transition * takes place when ref_count drops to zero. */ enum aws_h2_sm_state_type state; /** * A set of all connections that meet all requirement to use. Note: there will be connections not in this set, * but hold by the stream manager, which can be tracked by the streams created on it. Set of `struct * aws_h2_sm_connection *` */ struct aws_random_access_set ideal_available_set; /** * A set of all available connections that exceed the soft limits set by users. Note: there will be connections * not in this set, but hold by the stream manager, which can be tracked by the streams created. Set of `struct * aws_h2_sm_connection *` */ struct aws_random_access_set nonideal_available_set; /* We don't mantain set for connections that is full or "dead" (Cannot make any new streams). We have streams * opening from the connection tracking them */ /** * The set of all incomplete stream acquisition requests (haven't decide what connection to make the request * to), list of `struct aws_h2_sm_pending_stream_acquisition*` */ struct aws_linked_list pending_stream_acquisitions; /** * The number of connections acquired from connection manager and not released yet. */ size_t holding_connections_count; /** * Counts that contributes to the internal refcount. * When the value changes, s_sm_count_increase/decrease_synced needed. * * AWS_SMCT_CONNECTIONS_ACQUIRING: The number of new connections we acquiring from the connection manager. * AWS_SMCT_OPEN_STREAM: The number of streams that opened and not completed yet. * AWS_SMCT_PENDING_MAKE_REQUESTS: The number of streams that scheduled to be made from a connection but haven't * been executed yet. * AWS_SMCT_PENDING_ACQUISITION: The number of all incomplete stream acquisition requests (haven't decide what * connection to make the request to). So that we don't have compute the size of a linked list every time. */ size_t internal_refcount_stats[AWS_SMCT_COUNT]; bool finish_pending_stream_acquisitions_task_scheduled; } synced_data; }; /** * Encompasses all of the external operations that need to be done for various * events: * - User level: * stream manager release * stream acquire * - Internal eventloop (anther thread): * connection_acquired * stream_completed * - Internal (can happen from any thread): * connection acquire * connection release * * The transaction is built under the manager's lock (and the internal state is updated optimistically), * but then executed outside of it. */ struct aws_http2_stream_management_transaction { struct aws_http2_stream_manager *stream_manager; struct aws_allocator *allocator; size_t new_connections; struct aws_h2_sm_connection *sm_connection_to_release; struct aws_linked_list pending_make_requests; /* List of aws_h2_sm_pending_stream_acquisition with chosen connection */ }; #endif /* AWS_HTTP2_STREAM_MANAGER_IMPL_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-http/include/aws/http/private/http_impl.h000066400000000000000000000054371456575232400272370ustar00rootroot00000000000000#ifndef AWS_HTTP_IMPL_H #define AWS_HTTP_IMPL_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include /** * Methods that affect internal processing. * This is NOT a definitive list of methods. */ enum aws_http_method { AWS_HTTP_METHOD_UNKNOWN, /* Unrecognized value. */ AWS_HTTP_METHOD_GET, AWS_HTTP_METHOD_HEAD, AWS_HTTP_METHOD_CONNECT, AWS_HTTP_METHOD_COUNT, /* Number of enums */ }; /** * Headers that affect internal processing. * This is NOT a definitive list of headers. */ enum aws_http_header_name { AWS_HTTP_HEADER_UNKNOWN, /* Unrecognized value */ /* Request pseudo-headers */ AWS_HTTP_HEADER_METHOD, AWS_HTTP_HEADER_SCHEME, AWS_HTTP_HEADER_AUTHORITY, AWS_HTTP_HEADER_PATH, /* Response pseudo-headers */ AWS_HTTP_HEADER_STATUS, /* Regular headers */ AWS_HTTP_HEADER_CONNECTION, AWS_HTTP_HEADER_CONTENT_LENGTH, AWS_HTTP_HEADER_EXPECT, AWS_HTTP_HEADER_TRANSFER_ENCODING, AWS_HTTP_HEADER_COOKIE, AWS_HTTP_HEADER_SET_COOKIE, AWS_HTTP_HEADER_HOST, AWS_HTTP_HEADER_CACHE_CONTROL, AWS_HTTP_HEADER_MAX_FORWARDS, AWS_HTTP_HEADER_PRAGMA, AWS_HTTP_HEADER_RANGE, AWS_HTTP_HEADER_TE, AWS_HTTP_HEADER_CONTENT_ENCODING, AWS_HTTP_HEADER_CONTENT_TYPE, AWS_HTTP_HEADER_CONTENT_RANGE, AWS_HTTP_HEADER_TRAILER, AWS_HTTP_HEADER_WWW_AUTHENTICATE, AWS_HTTP_HEADER_AUTHORIZATION, AWS_HTTP_HEADER_PROXY_AUTHENTICATE, AWS_HTTP_HEADER_PROXY_AUTHORIZATION, AWS_HTTP_HEADER_AGE, AWS_HTTP_HEADER_EXPIRES, AWS_HTTP_HEADER_DATE, AWS_HTTP_HEADER_LOCATION, AWS_HTTP_HEADER_RETRY_AFTER, AWS_HTTP_HEADER_VARY, AWS_HTTP_HEADER_WARNING, AWS_HTTP_HEADER_UPGRADE, AWS_HTTP_HEADER_KEEP_ALIVE, AWS_HTTP_HEADER_PROXY_CONNECTION, AWS_HTTP_HEADER_COUNT, /* Number of enums */ }; AWS_EXTERN_C_BEGIN AWS_HTTP_API void aws_http_fatal_assert_library_initialized(void); AWS_HTTP_API struct aws_byte_cursor aws_http_version_to_str(enum aws_http_version version); /** * Returns appropriate enum, or AWS_HTTP_METHOD_UNKNOWN if no match found. * Case-sensitive */ AWS_HTTP_API enum aws_http_method aws_http_str_to_method(struct aws_byte_cursor cursor); /** * Returns appropriate enum, or AWS_HTTP_HEADER_UNKNOWN if no match found. * Not case-sensitive */ AWS_HTTP_API enum aws_http_header_name aws_http_str_to_header_name(struct aws_byte_cursor cursor); /** * Returns appropriate enum, or AWS_HTTP_HEADER_UNKNOWN if no match found. * Case-sensitive (ex: "Connection" -> AWS_HTTP_HEADER_UNKNOWN because we looked for "connection"). */ AWS_HTTP_API enum aws_http_header_name aws_http_lowercase_str_to_header_name(struct aws_byte_cursor cursor); AWS_EXTERN_C_END #endif /* AWS_HTTP_IMPL_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-http/include/aws/http/private/proxy_impl.h000066400000000000000000000212321456575232400274300ustar00rootroot00000000000000#ifndef AWS_HTTP_PROXY_IMPL_H #define AWS_HTTP_PROXY_IMPL_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include struct aws_http_connection_manager_options; struct aws_http_message; struct aws_channel_slot; struct aws_string; struct aws_tls_connection_options; struct aws_http_proxy_negotiator; struct aws_http_proxy_strategy; struct aws_http_proxy_strategy_tunneling_sequence_options; struct aws_http_proxy_strategy_tunneling_kerberos_options; struct aws_http_proxy_strategy_tunneling_ntlm_options; /* * (Successful) State transitions for proxy connections * * Http : None -> Socket Connect -> Success * Https: None -> Socket Connect -> Http Connect -> Tls Negotiation -> Success */ enum aws_proxy_bootstrap_state { AWS_PBS_NONE = 0, AWS_PBS_SOCKET_CONNECT, AWS_PBS_HTTP_CONNECT, AWS_PBS_TLS_NEGOTIATION, AWS_PBS_SUCCESS, AWS_PBS_FAILURE, }; /** * A persistent copy of the aws_http_proxy_options struct. Clones everything appropriate. */ struct aws_http_proxy_config { struct aws_allocator *allocator; enum aws_http_proxy_connection_type connection_type; struct aws_byte_buf host; uint32_t port; struct aws_tls_connection_options *tls_options; struct aws_http_proxy_strategy *proxy_strategy; }; /* * When a proxy connection is made, we wrap the user-supplied user data with this * proxy user data. Callbacks are passed properly to the user. By having this data * available, the proxy request transform that was attached to the connection can extract * the proxy settings it needs in order to properly transform the requests. * * Another possibility would be to fold this data into the connection itself. */ struct aws_http_proxy_user_data { struct aws_allocator *allocator; /* * dynamic proxy connection resolution state */ enum aws_proxy_bootstrap_state state; int error_code; enum aws_http_status_code connect_status_code; /* * The initial http connection object between the client and the proxy. */ struct aws_http_connection *proxy_connection; /* * The http connection object that gets surfaced to callers if http is the final protocol of proxy * negotiation. * * In the case of a forwarding proxy, proxy_connection and final_connection are the same. */ struct aws_http_connection *final_connection; struct aws_http_message *connect_request; struct aws_http_stream *connect_stream; struct aws_http_proxy_negotiator *proxy_negotiator; /* * Cached original connect options */ struct aws_string *original_host; uint32_t original_port; void *original_user_data; struct aws_tls_connection_options *original_tls_options; struct aws_client_bootstrap *original_bootstrap; struct aws_socket_options original_socket_options; bool original_manual_window_management; size_t original_initial_window_size; bool prior_knowledge_http2; struct aws_http1_connection_options original_http1_options; struct aws_http2_connection_options original_http2_options; /* the resource within options are allocated with userdata */ struct aws_hash_table alpn_string_map; /* * setup/shutdown callbacks. We enforce via fatal assert that either the http callbacks are supplied or * the channel callbacks are supplied but never both. * * When using a proxy to ultimately establish an http connection, use the http callbacks. * When using a proxy to establish any other protocol connection, use the raw channel callbacks. * * In the future, we might consider a further refactor which only use raw channel callbacks. */ aws_http_on_client_connection_setup_fn *original_http_on_setup; aws_http_on_client_connection_shutdown_fn *original_http_on_shutdown; aws_client_bootstrap_on_channel_event_fn *original_channel_on_setup; aws_client_bootstrap_on_channel_event_fn *original_channel_on_shutdown; struct aws_http_proxy_config *proxy_config; struct aws_event_loop *requested_event_loop; const struct aws_host_resolution_config *host_resolution_config; }; /* vtable of functions that proxy uses to interact with external systems. * tests override the vtable to mock those systems */ struct aws_http_proxy_system_vtable { int (*aws_channel_setup_client_tls)( struct aws_channel_slot *right_of_slot, struct aws_tls_connection_options *tls_options); }; AWS_EXTERN_C_BEGIN AWS_HTTP_API struct aws_http_proxy_user_data *aws_http_proxy_user_data_new( struct aws_allocator *allocator, const struct aws_http_client_connection_options *options, aws_client_bootstrap_on_channel_event_fn *on_channel_setup, aws_client_bootstrap_on_channel_event_fn *on_channel_shutdown); AWS_HTTP_API void aws_http_proxy_user_data_destroy(struct aws_http_proxy_user_data *user_data); AWS_HTTP_API int aws_http_client_connect_via_proxy(const struct aws_http_client_connection_options *options); AWS_HTTP_API int aws_http_rewrite_uri_for_proxy_request( struct aws_http_message *request, struct aws_http_proxy_user_data *proxy_user_data); AWS_HTTP_API void aws_http_proxy_system_set_vtable(struct aws_http_proxy_system_vtable *vtable); /** * Checks if tunneling proxy negotiation should continue to try and connect * @param proxy_negotiator negotiator to query * @return true if another connect request should be attempted, false otherwise */ AWS_HTTP_API enum aws_http_proxy_negotiation_retry_directive aws_http_proxy_negotiator_get_retry_directive( struct aws_http_proxy_negotiator *proxy_negotiator); /** * Constructor for a tunnel-only proxy strategy that applies no changes to outbound CONNECT requests. Intended to be * the first link in an adaptive sequence for a tunneling proxy: first try a basic CONNECT, then based on the response, * later links are allowed to make attempts. * * @param allocator memory allocator to use * @return a new proxy strategy if successfully constructed, otherwise NULL */ AWS_HTTP_API struct aws_http_proxy_strategy *aws_http_proxy_strategy_new_tunneling_one_time_identity( struct aws_allocator *allocator); /** * Constructor for a forwarding-only proxy strategy that does nothing. Exists so that all proxy logic uses a * strategy. * * @param allocator memory allocator to use * @return a new proxy strategy if successfully constructed, otherwise NULL */ AWS_HTTP_API struct aws_http_proxy_strategy *aws_http_proxy_strategy_new_forwarding_identity(struct aws_allocator *allocator); /** * Constructor for a tunneling proxy strategy that contains a set of sub-strategies which are tried * sequentially in order. Each strategy has the choice to either proceed on a fresh connection or * reuse the current one. * * @param allocator memory allocator to use * @param config sequence configuration options * @return a new proxy strategy if successfully constructed, otherwise NULL */ AWS_HTTP_API struct aws_http_proxy_strategy *aws_http_proxy_strategy_new_tunneling_sequence( struct aws_allocator *allocator, struct aws_http_proxy_strategy_tunneling_sequence_options *config); /** * A constructor for a proxy strategy that performs kerberos authentication by adding the appropriate * header and header value to CONNECT requests. * * Currently only supports synchronous fetch of kerberos token values. * * @param allocator memory allocator to use * @param config kerberos authentication configuration info * @return a new proxy strategy if successfully constructed, otherwise NULL */ AWS_HTTP_API struct aws_http_proxy_strategy *aws_http_proxy_strategy_new_tunneling_kerberos( struct aws_allocator *allocator, struct aws_http_proxy_strategy_tunneling_kerberos_options *config); /** * Constructor for an NTLM proxy strategy. Because ntlm is a challenge-response authentication protocol, this * strategy will only succeed in a chain in a non-leading position. The strategy extracts the challenge from the * proxy's response to a previous CONNECT request in the chain. * * Currently only supports synchronous fetch of token values. * * @param allocator memory allocator to use * @param config configuration options for the strategy * @return a new proxy strategy if successfully constructed, otherwise NULL */ AWS_HTTP_API struct aws_http_proxy_strategy *aws_http_proxy_strategy_new_tunneling_ntlm( struct aws_allocator *allocator, struct aws_http_proxy_strategy_tunneling_ntlm_options *config); AWS_EXTERN_C_END #endif /* AWS_HTTP_PROXY_IMPL_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-http/include/aws/http/private/random_access_set.h000066400000000000000000000060151456575232400307040ustar00rootroot00000000000000#ifndef AWS_HTTP_RANDOM_ACCESS_SET_H #define AWS_HTTP_RANDOM_ACCESS_SET_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include /* TODO: someday, if you want to use it from other repo, move it to aws-c-common. */ struct aws_random_access_set_impl; struct aws_random_access_set { struct aws_random_access_set_impl *impl; }; AWS_EXTERN_C_BEGIN /** * Initialize the set, which support constant time of insert, remove and get random element * from the data structure. * * The underlying hash map will use hash_fn to compute the hash of each element. equals_fn to compute equality of two * keys. * * @param set Pointer of structure to initialize with * @param allocator Allocator * @param hash_fn Compute the hash of each element * @param equals_fn Compute equality of two elements * @param destroy_element_fn Optional. Called when the element is removed * @param initial_item_allocation The initial number of item to allocate. * @return AWS_OP_ERR if any fails to initialize, AWS_OP_SUCCESS on success. */ AWS_HTTP_API int aws_random_access_set_init( struct aws_random_access_set *set, struct aws_allocator *allocator, aws_hash_fn *hash_fn, aws_hash_callback_eq_fn *equals_fn, aws_hash_callback_destroy_fn *destroy_element_fn, size_t initial_item_allocation); AWS_HTTP_API void aws_random_access_set_clean_up(struct aws_random_access_set *set); /** * Insert the element to the end of the array list. A map from the element to the index of it to the hash table. */ AWS_HTTP_API int aws_random_access_set_add(struct aws_random_access_set *set, const void *element, bool *added); /** * Find and remove the element from the table. If the element does not exist, or the table is empty, nothing will * happen. Switch the element with the end of the arraylist if needed. Remove the end of the arraylist */ AWS_HTTP_API int aws_random_access_set_remove(struct aws_random_access_set *set, const void *element); /** * Get the pointer to a random element from the data structure. Fails when the data structure is empty. */ AWS_HTTP_API int aws_random_access_set_random_get_ptr(const struct aws_random_access_set *set, void **out); AWS_HTTP_API size_t aws_random_access_set_get_size(const struct aws_random_access_set *set); /** * Check the element exist in the data structure or not. */ AWS_HTTP_API int aws_random_access_set_exist(const struct aws_random_access_set *set, const void *element, bool *exist); /** * Get the pointer to an element that currently stored at that index. It may change if operations like remove and add * happens. Helpful for debugging and iterating through the whole set. */ AWS_HTTP_API int aws_random_access_set_random_get_ptr_index(const struct aws_random_access_set *set, void **out, size_t index); AWS_EXTERN_C_END #endif /* AWS_HTTP_RANDOM_ACCESS_SET_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-http/include/aws/http/private/request_response_impl.h000066400000000000000000000057441456575232400316670ustar00rootroot00000000000000#ifndef AWS_HTTP_REQUEST_RESPONSE_IMPL_H #define AWS_HTTP_REQUEST_RESPONSE_IMPL_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include struct aws_http_stream_vtable { void (*destroy)(struct aws_http_stream *stream); void (*update_window)(struct aws_http_stream *stream, size_t increment_size); int (*activate)(struct aws_http_stream *stream); void (*cancel)(struct aws_http_stream *stream, int error_code); int (*http1_write_chunk)(struct aws_http_stream *http1_stream, const struct aws_http1_chunk_options *options); int (*http1_add_trailer)(struct aws_http_stream *http1_stream, const struct aws_http_headers *trailing_headers); int (*http2_reset_stream)(struct aws_http_stream *http2_stream, uint32_t http2_error); int (*http2_get_received_error_code)(struct aws_http_stream *http2_stream, uint32_t *http2_error); int (*http2_get_sent_error_code)(struct aws_http_stream *http2_stream, uint32_t *http2_error); int (*http2_write_data)( struct aws_http_stream *http2_stream, const struct aws_http2_stream_write_data_options *options); }; /** * Base class for streams. * There are specific implementations for each HTTP version. */ struct aws_http_stream { const struct aws_http_stream_vtable *vtable; struct aws_allocator *alloc; struct aws_http_connection *owning_connection; uint32_t id; void *user_data; aws_http_on_incoming_headers_fn *on_incoming_headers; aws_http_on_incoming_header_block_done_fn *on_incoming_header_block_done; aws_http_on_incoming_body_fn *on_incoming_body; aws_http_on_stream_metrics_fn *on_metrics; aws_http_on_stream_complete_fn *on_complete; aws_http_on_stream_destroy_fn *on_destroy; struct aws_atomic_var refcount; enum aws_http_method request_method; struct aws_http_stream_metrics metrics; union { struct aws_http_stream_client_data { int response_status; uint64_t response_first_byte_timeout_ms; /* Using aws_task instead of aws_channel_task because, currently, channel-tasks can't be canceled. * We only touch this from the connection's thread */ struct aws_task response_first_byte_timeout_task; } client; struct aws_http_stream_server_data { struct aws_byte_cursor request_method_str; struct aws_byte_cursor request_path; aws_http_on_incoming_request_done_fn *on_request_done; } server; } client_or_server_data; /* On client connections, `client_data` points to client_or_server_data.client and `server_data` is null. * Opposite is true on server connections */ struct aws_http_stream_client_data *client_data; struct aws_http_stream_server_data *server_data; }; #endif /* AWS_HTTP_REQUEST_RESPONSE_IMPL_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-http/include/aws/http/private/strutil.h000066400000000000000000000051351456575232400267400ustar00rootroot00000000000000#ifndef AWS_HTTP_STRUTIL_H #define AWS_HTTP_STRUTIL_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include AWS_EXTERN_C_BEGIN /** * Return a cursor with all leading and trailing SPACE and TAB characters removed. * RFC7230 section 3.2.3 Whitespace * Examples: * " \t a \t " -> "a" * "a \t a" -> "a \t a" */ AWS_HTTP_API struct aws_byte_cursor aws_strutil_trim_http_whitespace(struct aws_byte_cursor cursor); /** * Return whether this is a valid token, as defined by RFC7230 section 3.2.6: * token = 1*tchar * tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" * / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~" * / DIGIT / ALPHA */ AWS_HTTP_API bool aws_strutil_is_http_token(struct aws_byte_cursor token); /** * Same as aws_strutil_is_http_token(), but uppercase letters are forbidden. */ AWS_HTTP_API bool aws_strutil_is_lowercase_http_token(struct aws_byte_cursor token); /** * Return whether this ASCII/UTF-8 sequence is a valid HTTP header field-value. * * As defined in RFC7230 section 3.2 (except we are ALWAYS forbidding obs-fold): * * field-value = *( field-content / obs-fold ) * field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] * field-vchar = VCHAR / obs-text * VCHAR = %x21-7E ; visible (printing) characters * obs-text = %x80-FF * * Note that we ALWAYS forbid obs-fold. Section 3.2.4 explains how * obs-fold is deprecated "except within the message/http media type". */ AWS_HTTP_API bool aws_strutil_is_http_field_value(struct aws_byte_cursor cursor); /** * Return whether this ASCII/UTF-8 sequence is a valid HTTP response status reason-phrase. * * As defined in RFC7230 section 3.1.2: * * reason-phrase = *( HTAB / SP / VCHAR / obs-text ) * VCHAR = %x21-7E ; visible (printing) characters * obs-text = %x80-FF */ AWS_HTTP_API bool aws_strutil_is_http_reason_phrase(struct aws_byte_cursor cursor); /** * Return whether this ASCII/UTF-8 sequence is a valid HTTP request-target. * * TODO: Actually check the complete grammar as defined in RFC7230 5.3 and * RFC3986. Currently this just checks whether the sequence is blatantly illegal * (ex: contains CR or LF) */ AWS_HTTP_API bool aws_strutil_is_http_request_target(struct aws_byte_cursor cursor); /** * Return whether this ASCII/UTF-8 sequence start with ":" or not as the requirement for pseudo headers. */ AWS_HTTP_API bool aws_strutil_is_http_pseudo_header_name(struct aws_byte_cursor cursor); AWS_EXTERN_C_END #endif /* AWS_HTTP_STRUTIL_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-http/include/aws/http/private/websocket_decoder.h000066400000000000000000000057001456575232400307030ustar00rootroot00000000000000#ifndef AWS_HTTP_WEBSOCKET_DECODER_H #define AWS_HTTP_WEBSOCKET_DECODER_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include /* Called when the non-payload portion of a frame has been decoded. */ typedef int(aws_websocket_decoder_frame_fn)(const struct aws_websocket_frame *frame, void *user_data); /* Called repeatedly as the payload is decoded. If a mask was used, the data has been unmasked. */ typedef int(aws_websocket_decoder_payload_fn)(struct aws_byte_cursor data, void *user_data); /** * Each state consumes data and/or moves decoder to a subsequent state. */ enum aws_websocket_decoder_state { AWS_WEBSOCKET_DECODER_STATE_INIT, AWS_WEBSOCKET_DECODER_STATE_OPCODE_BYTE, AWS_WEBSOCKET_DECODER_STATE_LENGTH_BYTE, AWS_WEBSOCKET_DECODER_STATE_EXTENDED_LENGTH, AWS_WEBSOCKET_DECODER_STATE_MASKING_KEY_CHECK, AWS_WEBSOCKET_DECODER_STATE_MASKING_KEY, AWS_WEBSOCKET_DECODER_STATE_PAYLOAD_CHECK, AWS_WEBSOCKET_DECODER_STATE_PAYLOAD, AWS_WEBSOCKET_DECODER_STATE_FRAME_END, AWS_WEBSOCKET_DECODER_STATE_DONE, }; struct aws_websocket_decoder { enum aws_websocket_decoder_state state; uint64_t state_bytes_processed; /* For multi-byte states, the number of bytes processed so far */ uint8_t state_cache[8]; /* For multi-byte states to cache data that might be split across packets */ struct aws_websocket_frame current_frame; /* Data about current frame being decoded */ bool expecting_continuation_data_frame; /* True when the next data frame must be CONTINUATION frame */ /* True while processing a TEXT "message" (from the start of a TEXT frame, * until the end of the TEXT or CONTINUATION frame with the FIN bit set). */ bool processing_text_message; struct aws_utf8_decoder *text_message_validator; void *user_data; aws_websocket_decoder_frame_fn *on_frame; aws_websocket_decoder_payload_fn *on_payload; }; AWS_EXTERN_C_BEGIN AWS_HTTP_API void aws_websocket_decoder_init( struct aws_websocket_decoder *decoder, struct aws_allocator *alloc, aws_websocket_decoder_frame_fn *on_frame, aws_websocket_decoder_payload_fn *on_payload, void *user_data); AWS_HTTP_API void aws_websocket_decoder_clean_up(struct aws_websocket_decoder *decoder); /** * Returns when all data is processed, or a frame and its payload have completed. * `data` will be advanced to reflect the amount of data processed by this call. * `frame_complete` will be set true if this call returned due to completion of a frame. * The `on_frame` and `on_payload` callbacks may each be invoked once as a result of this call. * If an error occurs, the decoder is invalid forevermore. */ AWS_HTTP_API int aws_websocket_decoder_process( struct aws_websocket_decoder *decoder, struct aws_byte_cursor *data, bool *frame_complete); AWS_EXTERN_C_END #endif /* AWS_HTTP_WEBSOCKET_DECODER_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-http/include/aws/http/private/websocket_encoder.h000066400000000000000000000033521456575232400307160ustar00rootroot00000000000000#ifndef AWS_HTTP_WEBSOCKET_ENCODER_H #define AWS_HTTP_WEBSOCKET_ENCODER_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include typedef int(aws_websocket_encoder_payload_fn)(struct aws_byte_buf *out_buf, void *user_data); enum aws_websocket_encoder_state { AWS_WEBSOCKET_ENCODER_STATE_INIT, AWS_WEBSOCKET_ENCODER_STATE_OPCODE_BYTE, AWS_WEBSOCKET_ENCODER_STATE_LENGTH_BYTE, AWS_WEBSOCKET_ENCODER_STATE_EXTENDED_LENGTH, AWS_WEBSOCKET_ENCODER_STATE_MASKING_KEY_CHECK, AWS_WEBSOCKET_ENCODER_STATE_MASKING_KEY, AWS_WEBSOCKET_ENCODER_STATE_PAYLOAD_CHECK, AWS_WEBSOCKET_ENCODER_STATE_PAYLOAD, AWS_WEBSOCKET_ENCODER_STATE_DONE, }; struct aws_websocket_encoder { enum aws_websocket_encoder_state state; uint64_t state_bytes_processed; struct aws_websocket_frame frame; bool is_frame_in_progress; /* True when the next data frame must be a CONTINUATION frame */ bool expecting_continuation_data_frame; void *user_data; aws_websocket_encoder_payload_fn *stream_outgoing_payload; }; AWS_EXTERN_C_BEGIN AWS_HTTP_API void aws_websocket_encoder_init( struct aws_websocket_encoder *encoder, aws_websocket_encoder_payload_fn *stream_outgoing_payload, void *user_data); AWS_HTTP_API int aws_websocket_encoder_start_frame(struct aws_websocket_encoder *encoder, const struct aws_websocket_frame *frame); AWS_HTTP_API bool aws_websocket_encoder_is_frame_in_progress(const struct aws_websocket_encoder *encoder); AWS_HTTP_API int aws_websocket_encoder_process(struct aws_websocket_encoder *encoder, struct aws_byte_buf *out_buf); AWS_EXTERN_C_END #endif /* AWS_HTTP_WEBSOCKET_ENCODER_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-http/include/aws/http/private/websocket_impl.h000066400000000000000000000106761456575232400302470ustar00rootroot00000000000000#ifndef AWS_HTTP_WEBSOCKET_IMPL_H #define AWS_HTTP_WEBSOCKET_IMPL_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include struct aws_http_client_connection_options; struct aws_http_connection; struct aws_http_make_request_options; /* RFC-6455 Section 5.2 Base Framing Protocol * Payload length: 7 bits, 7+16 bits, or 7+64 bits * * The length of the "Payload data", in bytes: if 0-125, that is the * payload length. If 126, the following 2 bytes interpreted as a * 16-bit unsigned integer are the payload length. If 127, the * following 8 bytes interpreted as a 64-bit unsigned integer (the * most significant bit MUST be 0) are the payload length. Multibyte * length quantities are expressed in network byte order. Note that * in all cases, the minimal number of bytes MUST be used to encode * the length, for example, the length of a 124-byte-long string * can't be encoded as the sequence 126, 0, 124. The payload length * is the length of the "Extension data" + the length of the * "Application data". The length of the "Extension data" may be * zero, in which case the payload length is the length of the * "Application data". */ #define AWS_WEBSOCKET_7BIT_VALUE_FOR_2BYTE_EXTENDED_LENGTH 126 #define AWS_WEBSOCKET_7BIT_VALUE_FOR_8BYTE_EXTENDED_LENGTH 127 #define AWS_WEBSOCKET_2BYTE_EXTENDED_LENGTH_MIN_VALUE AWS_WEBSOCKET_7BIT_VALUE_FOR_2BYTE_EXTENDED_LENGTH #define AWS_WEBSOCKET_2BYTE_EXTENDED_LENGTH_MAX_VALUE 0x000000000000FFFF #define AWS_WEBSOCKET_8BYTE_EXTENDED_LENGTH_MIN_VALUE 0x0000000000010000 #define AWS_WEBSOCKET_8BYTE_EXTENDED_LENGTH_MAX_VALUE 0x7FFFFFFFFFFFFFFF /* Max bytes necessary to send non-payload parts of a frame */ #define AWS_WEBSOCKET_MAX_FRAME_OVERHEAD (2 + 8 + 4) /* base + extended-length + masking-key */ /** * Full contents of a websocket frame, excluding the payload. */ struct aws_websocket_frame { bool fin; bool rsv[3]; bool masked; uint8_t opcode; uint64_t payload_length; uint8_t masking_key[4]; }; struct aws_websocket_handler_options { struct aws_allocator *allocator; struct aws_channel *channel; size_t initial_window_size; void *user_data; aws_websocket_on_incoming_frame_begin_fn *on_incoming_frame_begin; aws_websocket_on_incoming_frame_payload_fn *on_incoming_frame_payload; aws_websocket_on_incoming_frame_complete_fn *on_incoming_frame_complete; bool is_server; bool manual_window_update; }; struct aws_websocket_client_bootstrap_system_vtable { int (*aws_http_client_connect)(const struct aws_http_client_connection_options *options); void (*aws_http_connection_release)(struct aws_http_connection *connection); void (*aws_http_connection_close)(struct aws_http_connection *connection); struct aws_channel *(*aws_http_connection_get_channel)(struct aws_http_connection *connection); struct aws_http_stream *(*aws_http_connection_make_request)( struct aws_http_connection *client_connection, const struct aws_http_make_request_options *options); int (*aws_http_stream_activate)(struct aws_http_stream *stream); void (*aws_http_stream_release)(struct aws_http_stream *stream); struct aws_http_connection *(*aws_http_stream_get_connection)(const struct aws_http_stream *stream); void (*aws_http_stream_update_window)(struct aws_http_stream *stream, size_t increment_size); int (*aws_http_stream_get_incoming_response_status)(const struct aws_http_stream *stream, int *out_status); struct aws_websocket *(*aws_websocket_handler_new)(const struct aws_websocket_handler_options *options); }; AWS_EXTERN_C_BEGIN /** * Returns printable name for opcode as c-string. */ AWS_HTTP_API const char *aws_websocket_opcode_str(uint8_t opcode); /** * Return total number of bytes needed to encode frame and its payload */ AWS_HTTP_API uint64_t aws_websocket_frame_encoded_size(const struct aws_websocket_frame *frame); /** * Create a websocket channel-handler and insert it into the channel. */ AWS_HTTP_API struct aws_websocket *aws_websocket_handler_new(const struct aws_websocket_handler_options *options); /** * Override the functions that websocket bootstrap uses to interact with external systems. * Used for unit testing. */ AWS_HTTP_API void aws_websocket_client_bootstrap_set_system_vtable( const struct aws_websocket_client_bootstrap_system_vtable *system_vtable); AWS_EXTERN_C_END #endif /* AWS_HTTP_WEBSOCKET_IMPL_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-http/include/aws/http/proxy.h000066400000000000000000000471251456575232400247460ustar00rootroot00000000000000#ifndef AWS_PROXY_H #define AWS_PROXY_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include AWS_PUSH_SANE_WARNING_LEVEL struct aws_http_client_connection_options; struct aws_http_connection_manager_options; struct aws_http_message; struct aws_http_header; struct aws_http_proxy_config; struct aws_http_proxy_negotiator; struct aws_http_proxy_strategy; struct aws_socket_channel_bootstrap_options; /** * @Deprecated - Supported proxy authentication modes. Superceded by proxy strategy. */ enum aws_http_proxy_authentication_type { AWS_HPAT_NONE = 0, AWS_HPAT_BASIC, }; enum aws_http_proxy_env_var_type { /** * Default. * Disable reading from environment variable for proxy. */ AWS_HPEV_DISABLE = 0, /** * Enable get proxy URL from environment variable, when the manual proxy options of connection manager is not set. * env HTTPS_PROXY/https_proxy will be checked when the main connection use tls. * env HTTP_PROXY/http_proxy will be checked when the main connection NOT use tls. * The lower case version has precedence. */ AWS_HPEV_ENABLE, }; /** * Supported proxy connection types */ enum aws_http_proxy_connection_type { /** * Deprecated, but 0-valued for backwards compatibility * * If tls options are provided (for the main connection) then treat the proxy as a tunneling proxy * If tls options are not provided (for the main connection), then treat the proxy as a forwarding proxy */ AWS_HPCT_HTTP_LEGACY = 0, /** * Use the proxy to forward http requests. Attempting to use both this mode and TLS on the tunnel destination * is a configuration error. */ AWS_HPCT_HTTP_FORWARD, /** * Use the proxy to establish a connection to a remote endpoint via a CONNECT request through the proxy. * Works for both plaintext and tls connections. */ AWS_HPCT_HTTP_TUNNEL, }; /* * Configuration for using proxy from environment variable. * Zero out as default settings. */ struct proxy_env_var_settings { enum aws_http_proxy_env_var_type env_var_type; /* * Optional. * If not set: * If tls options are provided (for the main connection) use tunnel proxy type * If tls options are not provided (for the main connection) use forward proxy type */ enum aws_http_proxy_connection_type connection_type; /* * Optional. * If not set, a default tls option will be created. when https used for Local to proxy connection. * Must be distinct from the the tls_connection_options from aws_http_connection_manager_options */ const struct aws_tls_connection_options *tls_options; }; struct aws_http_proxy_strategy; /** * Options for http proxy server usage */ struct aws_http_proxy_options { /** * Type of proxy connection to make */ enum aws_http_proxy_connection_type connection_type; /** * Proxy host to connect to */ struct aws_byte_cursor host; /** * Port to make the proxy connection to */ uint32_t port; /** * Optional. * TLS configuration for the Local <-> Proxy connection * Must be distinct from the the TLS options in the parent aws_http_connection_options struct */ const struct aws_tls_connection_options *tls_options; /** * Optional * Advanced option that allows the user to create a custom strategy that gives low-level control of * certain logical flows within the proxy logic. * * For tunneling proxies it allows custom retry and adaptive negotiation of CONNECT requests. * For forwarding proxies it allows custom request transformations. */ struct aws_http_proxy_strategy *proxy_strategy; /** * @Deprecated - What type of proxy authentication to use, if any. * Replaced by instantiating a proxy_strategy */ enum aws_http_proxy_authentication_type auth_type; /** * @Deprecated - Optional user name to use for basic authentication * Replaced by instantiating a proxy_strategy via aws_http_proxy_strategy_new_basic_auth() */ struct aws_byte_cursor auth_username; /** * @Deprecated - Optional password to use for basic authentication * Replaced by instantiating a proxy_strategy via aws_http_proxy_strategy_new_basic_auth() */ struct aws_byte_cursor auth_password; }; /** * Synchronous (for now) callback function to fetch a token used in modifying CONNECT requests */ typedef struct aws_string *(aws_http_proxy_negotiation_get_token_sync_fn)(void *user_data, int *out_error_code); /** * Synchronous (for now) callback function to fetch a token used in modifying CONNECT request. Includes a (byte string) * context intended to be used as part of a challenge-response flow. */ typedef struct aws_string *(aws_http_proxy_negotiation_get_challenge_token_sync_fn)( void *user_data, const struct aws_byte_cursor *challenge_context, int *out_error_code); /** * Proxy negotiation logic must call this function to indicate an unsuccessful outcome */ typedef void(aws_http_proxy_negotiation_terminate_fn)( struct aws_http_message *message, int error_code, void *internal_proxy_user_data); /** * Proxy negotiation logic must call this function to forward the potentially-mutated request back to the proxy * connection logic. */ typedef void(aws_http_proxy_negotiation_http_request_forward_fn)( struct aws_http_message *message, void *internal_proxy_user_data); /** * User-supplied transform callback which implements the proxy request flow and ultimately, across all execution * pathways, invokes either the terminate function or the forward function appropriately. * * For tunneling proxy connections, this request flow transform only applies to the CONNECT stage of proxy * connection establishment. * * For forwarding proxy connections, this request flow transform applies to every single http request that goes * out on the connection. * * Forwarding proxy connections cannot yet support a truly async request transform without major surgery on http * stream creation, so for now, we split into an async version (for tunneling proxies) and a separate * synchronous version for forwarding proxies. Also forwarding proxies are a kind of legacy dead-end in some * sense. * */ typedef void(aws_http_proxy_negotiation_http_request_transform_async_fn)( struct aws_http_proxy_negotiator *proxy_negotiator, struct aws_http_message *message, aws_http_proxy_negotiation_terminate_fn *negotiation_termination_callback, aws_http_proxy_negotiation_http_request_forward_fn *negotiation_http_request_forward_callback, void *internal_proxy_user_data); typedef int(aws_http_proxy_negotiation_http_request_transform_fn)( struct aws_http_proxy_negotiator *proxy_negotiator, struct aws_http_message *message); /** * Tunneling proxy connections only. A callback that lets the negotiator examine the headers in the * response to the most recent CONNECT request as they arrive. */ typedef int(aws_http_proxy_negotiation_connect_on_incoming_headers_fn)( struct aws_http_proxy_negotiator *proxy_negotiator, enum aws_http_header_block header_block, const struct aws_http_header *header_array, size_t num_headers); /** * Tunneling proxy connections only. A callback that lets the negotiator examine the status code of the * response to the most recent CONNECT request. */ typedef int(aws_http_proxy_negotiator_connect_status_fn)( struct aws_http_proxy_negotiator *proxy_negotiator, enum aws_http_status_code status_code); /** * Tunneling proxy connections only. A callback that lets the negotiator examine the body of the response * to the most recent CONNECT request. */ typedef int(aws_http_proxy_negotiator_connect_on_incoming_body_fn)( struct aws_http_proxy_negotiator *proxy_negotiator, const struct aws_byte_cursor *data); /* * Control value that lets the http proxy implementation know if and how to retry a CONNECT request based on * the proxy negotiator's state. */ enum aws_http_proxy_negotiation_retry_directive { /* * Stop trying to connect through the proxy and give up. */ AWS_HPNRD_STOP, /* * Establish a new connection to the proxy before making the next CONNECT request */ AWS_HPNRD_NEW_CONNECTION, /* * Reuse the existing connection to make the next CONNECT request */ AWS_HPNRD_CURRENT_CONNECTION, }; typedef enum aws_http_proxy_negotiation_retry_directive(aws_http_proxy_negotiator_get_retry_directive_fn)( struct aws_http_proxy_negotiator *proxy_negotiator); /** * Vtable for forwarding-based proxy negotiators */ struct aws_http_proxy_negotiator_forwarding_vtable { aws_http_proxy_negotiation_http_request_transform_fn *forward_request_transform; }; /** * Vtable for tunneling-based proxy negotiators */ struct aws_http_proxy_negotiator_tunnelling_vtable { aws_http_proxy_negotiation_http_request_transform_async_fn *connect_request_transform; aws_http_proxy_negotiation_connect_on_incoming_headers_fn *on_incoming_headers_callback; aws_http_proxy_negotiator_connect_status_fn *on_status_callback; aws_http_proxy_negotiator_connect_on_incoming_body_fn *on_incoming_body_callback; aws_http_proxy_negotiator_get_retry_directive_fn *get_retry_directive; }; /* * Base definition of a proxy negotiator. * * A negotiator works differently based on what kind of proxy connection is being asked for: * * (1) Tunneling - In a tunneling proxy connection, the connect_request_transform is invoked on every CONNECT request. * The connect_request_transform implementation *MUST*, in turn, eventually call one of the terminate or forward * functions it gets supplied with. * * Every CONNECT request, if a response is obtained, will properly invoke the response handling callbacks supplied * in the tunneling vtable. * * (2) Forwarding - In a forwarding proxy connection, the forward_request_transform is invoked on every request sent out * on the connection. */ struct aws_http_proxy_negotiator { struct aws_ref_count ref_count; void *impl; union { struct aws_http_proxy_negotiator_forwarding_vtable *forwarding_vtable; struct aws_http_proxy_negotiator_tunnelling_vtable *tunnelling_vtable; } strategy_vtable; }; /*********************************************************************************************/ typedef struct aws_http_proxy_negotiator *(aws_http_proxy_strategy_create_negotiator_fn)( struct aws_http_proxy_strategy *proxy_strategy, struct aws_allocator *allocator); struct aws_http_proxy_strategy_vtable { aws_http_proxy_strategy_create_negotiator_fn *create_negotiator; }; struct aws_http_proxy_strategy { struct aws_ref_count ref_count; struct aws_http_proxy_strategy_vtable *vtable; void *impl; enum aws_http_proxy_connection_type proxy_connection_type; }; /* * Options necessary to create a basic authentication proxy strategy */ struct aws_http_proxy_strategy_basic_auth_options { /* type of proxy connection being established, must be forwarding or tunnel */ enum aws_http_proxy_connection_type proxy_connection_type; /* user name to use in basic authentication */ struct aws_byte_cursor user_name; /* password to use in basic authentication */ struct aws_byte_cursor password; }; /* * Options necessary to create a (synchronous) kerberos authentication proxy strategy */ struct aws_http_proxy_strategy_tunneling_kerberos_options { aws_http_proxy_negotiation_get_token_sync_fn *get_token; void *get_token_user_data; }; /* * Options necessary to create a (synchronous) ntlm authentication proxy strategy */ struct aws_http_proxy_strategy_tunneling_ntlm_options { aws_http_proxy_negotiation_get_token_sync_fn *get_token; aws_http_proxy_negotiation_get_challenge_token_sync_fn *get_challenge_token; void *get_challenge_token_user_data; }; /* * Options necessary to create an adaptive sequential strategy that tries one or more of kerberos and ntlm (in that * order, if both are active). If an options struct is NULL, then that strategy will not be used. */ struct aws_http_proxy_strategy_tunneling_adaptive_options { /* * If non-null, will insert a kerberos proxy strategy into the adaptive sequence */ struct aws_http_proxy_strategy_tunneling_kerberos_options *kerberos_options; /* * If non-null will insert an ntlm proxy strategy into the adaptive sequence */ struct aws_http_proxy_strategy_tunneling_ntlm_options *ntlm_options; }; /* * Options necessary to create a sequential proxy strategy. */ struct aws_http_proxy_strategy_tunneling_sequence_options { struct aws_http_proxy_strategy **strategies; uint32_t strategy_count; }; AWS_EXTERN_C_BEGIN /** * Take a reference to an http proxy negotiator * @param proxy_negotiator negotiator to take a reference to * @return the strategy */ AWS_HTTP_API struct aws_http_proxy_negotiator *aws_http_proxy_negotiator_acquire(struct aws_http_proxy_negotiator *proxy_negotiator); /** * Release a reference to an http proxy negotiator * @param proxy_negotiator negotiator to release a reference to */ AWS_HTTP_API void aws_http_proxy_negotiator_release(struct aws_http_proxy_negotiator *proxy_negotiator); /** * Creates a new proxy negotiator from a proxy strategy * @param allocator memory allocator to use * @param strategy strategy to creation a new negotiator for * @return a new proxy negotiator if successful, otherwise NULL */ AWS_HTTP_API struct aws_http_proxy_negotiator *aws_http_proxy_strategy_create_negotiator( struct aws_http_proxy_strategy *strategy, struct aws_allocator *allocator); /** * Take a reference to an http proxy strategy * @param proxy_strategy strategy to take a reference to * @return the strategy */ AWS_HTTP_API struct aws_http_proxy_strategy *aws_http_proxy_strategy_acquire(struct aws_http_proxy_strategy *proxy_strategy); /** * Release a reference to an http proxy strategy * @param proxy_strategy strategy to release a reference to */ AWS_HTTP_API void aws_http_proxy_strategy_release(struct aws_http_proxy_strategy *proxy_strategy); /** * A constructor for a proxy strategy that performs basic authentication by adding the appropriate * header and header value to requests or CONNECT requests. * * @param allocator memory allocator to use * @param config basic authentication configuration info * @return a new proxy strategy if successfully constructed, otherwise NULL */ AWS_HTTP_API struct aws_http_proxy_strategy *aws_http_proxy_strategy_new_basic_auth( struct aws_allocator *allocator, struct aws_http_proxy_strategy_basic_auth_options *config); /** * Constructor for an adaptive tunneling proxy strategy. This strategy attempts a vanilla CONNECT and if that * fails it may make followup CONNECT attempts using kerberos or ntlm tokens, based on configuration and proxy * response properties. * * @param allocator memory allocator to use * @param config configuration options for the strategy * @return a new proxy strategy if successfully constructed, otherwise NULL */ AWS_HTTP_API struct aws_http_proxy_strategy *aws_http_proxy_strategy_new_tunneling_adaptive( struct aws_allocator *allocator, struct aws_http_proxy_strategy_tunneling_adaptive_options *config); /* * aws_http_proxy_config is the persistent, memory-managed version of aws_http_proxy_options * * This is a set of APIs for creating, destroying and converting between them */ /** * Create a persistent proxy configuration from http connection options * @param allocator memory allocator to use * @param options http connection options to source proxy configuration from * @return */ AWS_HTTP_API struct aws_http_proxy_config *aws_http_proxy_config_new_from_connection_options( struct aws_allocator *allocator, const struct aws_http_client_connection_options *options); /** * Create a persistent proxy configuration from http connection manager options * @param allocator memory allocator to use * @param options http connection manager options to source proxy configuration from * @return */ AWS_HTTP_API struct aws_http_proxy_config *aws_http_proxy_config_new_from_manager_options( struct aws_allocator *allocator, const struct aws_http_connection_manager_options *options); /** * Create a persistent proxy configuration from non-persistent proxy options. The resulting * proxy configuration assumes a tunneling connection type. * * @param allocator memory allocator to use * @param options http proxy options to source proxy configuration from * @return */ AWS_HTTP_API struct aws_http_proxy_config *aws_http_proxy_config_new_tunneling_from_proxy_options( struct aws_allocator *allocator, const struct aws_http_proxy_options *options); /** * Create a persistent proxy configuration from non-persistent proxy options. * Legacy connection type of proxy options will be rejected. * * @param allocator memory allocator to use * @param options http proxy options to source proxy configuration from * @return */ AWS_HTTP_API struct aws_http_proxy_config *aws_http_proxy_config_new_from_proxy_options( struct aws_allocator *allocator, const struct aws_http_proxy_options *options); /** * Create a persistent proxy configuration from non-persistent proxy options. * * @param allocator memory allocator to use * @param options http proxy options to source proxy configuration from * @param is_tls_connection tls connection info of the main connection to determine connection_type * when the connection_type is legacy. * @return */ AWS_HTTP_API struct aws_http_proxy_config *aws_http_proxy_config_new_from_proxy_options_with_tls_info( struct aws_allocator *allocator, const struct aws_http_proxy_options *proxy_options, bool is_tls_connection); /** * Clones an existing proxy configuration. A refactor could remove this (do a "move" between the old and new user * data in the one spot it's used) but that should wait until we have better test cases for the logic where this * gets invoked (ntlm/kerberos chains). * * @param allocator memory allocator to use * @param proxy_config http proxy configuration to clone * @return */ AWS_HTTP_API struct aws_http_proxy_config *aws_http_proxy_config_new_clone( struct aws_allocator *allocator, const struct aws_http_proxy_config *proxy_config); /** * Destroys an http proxy configuration * @param config http proxy configuration to destroy */ AWS_HTTP_API void aws_http_proxy_config_destroy(struct aws_http_proxy_config *config); /** * Initializes non-persistent http proxy options from a persistent http proxy configuration * @param options http proxy options to initialize * @param config the http proxy config to use as an initialization source */ AWS_HTTP_API void aws_http_proxy_options_init_from_config( struct aws_http_proxy_options *options, const struct aws_http_proxy_config *config); /** * Establish an arbitrary protocol connection through an http proxy via tunneling CONNECT. Alpn is * not required for this connection process to succeed, but we encourage its use if available. * * @param channel_options configuration options for the socket level connection * @param proxy_options configuration options for the proxy connection * * @return AWS_OP_SUCCESS if the asynchronous channel kickoff succeeded, AWS_OP_ERR otherwise */ AWS_HTTP_API int aws_http_proxy_new_socket_channel( struct aws_socket_channel_bootstrap_options *channel_options, const struct aws_http_proxy_options *proxy_options); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_PROXY_STRATEGY_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-http/include/aws/http/request_response.h000066400000000000000000001252561456575232400271750ustar00rootroot00000000000000#ifndef AWS_HTTP_REQUEST_RESPONSE_H #define AWS_HTTP_REQUEST_RESPONSE_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include AWS_PUSH_SANE_WARNING_LEVEL struct aws_http_connection; struct aws_input_stream; /** * A stream exists for the duration of a request/response exchange. * A client creates a stream to send a request and receive a response. * A server creates a stream to receive a request and send a response. * In http/2, a push-promise stream can be sent by a server and received by a client. */ struct aws_http_stream; /** * Controls whether a header's strings may be compressed by encoding the index of * strings in a cache, rather than encoding the literal string. * * This setting has no effect on HTTP/1.x connections. * On HTTP/2 connections this controls HPACK behavior. * See RFC-7541 Section 7.1 for security considerations. */ enum aws_http_header_compression { /** * Compress header by encoding the cached index of its strings, * or by updating the cache to contain these strings for future reference. * Best for headers that are sent repeatedly. * This is the default setting. */ AWS_HTTP_HEADER_COMPRESSION_USE_CACHE, /** * Encode header strings literally. * If an intermediary re-broadcasts the headers, it is permitted to use cache. * Best for unique headers that are unlikely to repeat. */ AWS_HTTP_HEADER_COMPRESSION_NO_CACHE, /** * Encode header strings literally and forbid all intermediaries from using * cache when re-broadcasting. * Best for header fields that are highly valuable or sensitive to recovery. */ AWS_HTTP_HEADER_COMPRESSION_NO_FORWARD_CACHE, }; /** * A lightweight HTTP header struct. * Note that the underlying strings are not owned by the byte cursors. */ struct aws_http_header { struct aws_byte_cursor name; struct aws_byte_cursor value; /* Controls whether the header's strings may be compressed via caching. */ enum aws_http_header_compression compression; }; /** * A transformable block of HTTP headers. * Provides a nice API for getting/setting header names and values. * * All strings are copied and stored within this datastructure. * The index of a given header may change any time headers are modified. * When iterating headers, the following ordering rules apply: * * - Headers with the same name will always be in the same order, relative to one another. * If "A: one" is added before "A: two", then "A: one" will always precede "A: two". * * - Headers with different names could be in any order, relative to one another. * If "A: one" is seen before "B: bee" in one iteration, you might see "B: bee" before "A: one" on the next. */ struct aws_http_headers; /** * Header block type. * INFORMATIONAL: Header block for 1xx informational (interim) responses. * MAIN: Main header block sent with request or response. * TRAILING: Headers sent after the body of a request or response. */ enum aws_http_header_block { AWS_HTTP_HEADER_BLOCK_MAIN, AWS_HTTP_HEADER_BLOCK_INFORMATIONAL, AWS_HTTP_HEADER_BLOCK_TRAILING, }; /** * The definition for an outgoing HTTP request or response. * The message may be transformed (ex: signing the request) before its data is eventually sent. * * The message keeps internal copies of its trivial strings (method, path, headers) * but does NOT take ownership of its body stream. * * A language binding would likely present this as an HttpMessage base class with * HttpRequest and HttpResponse subclasses. */ struct aws_http_message; /** * Function to invoke when a message transformation completes. * This function MUST be invoked or the application will soft-lock. * `message` and `complete_ctx` must be the same pointers provided to the `aws_http_message_transform_fn`. * `error_code` should should be AWS_ERROR_SUCCESS if transformation was successful, * otherwise pass a different AWS_ERROR_X value. */ typedef void( aws_http_message_transform_complete_fn)(struct aws_http_message *message, int error_code, void *complete_ctx); /** * A function that may modify a request or response before it is sent. * The transformation may be asynchronous or immediate. * The user MUST invoke the `complete_fn` when transformation is complete or the application will soft-lock. * When invoking the `complete_fn`, pass along the `message` and `complete_ctx` provided here and an error code. * The error code should be AWS_ERROR_SUCCESS if transformation was successful, * otherwise pass a different AWS_ERROR_X value. */ typedef void(aws_http_message_transform_fn)( struct aws_http_message *message, void *user_data, aws_http_message_transform_complete_fn *complete_fn, void *complete_ctx); /** * Invoked repeatedly times as headers are received. * At this point, aws_http_stream_get_incoming_response_status() can be called for the client. * And aws_http_stream_get_incoming_request_method() and aws_http_stream_get_incoming_request_uri() can be called for * the server. * This is always invoked on the HTTP connection's event-loop thread. * * Return AWS_OP_SUCCESS to continue processing the stream. * Return aws_raise_error(E) to indicate failure and cancel the stream. * The error you raise will be reflected in the error_code passed to the on_complete callback. */ typedef int(aws_http_on_incoming_headers_fn)( struct aws_http_stream *stream, enum aws_http_header_block header_block, const struct aws_http_header *header_array, size_t num_headers, void *user_data); /** * Invoked when the incoming header block of this type(informational/main/trailing) has been completely read. * This is always invoked on the HTTP connection's event-loop thread. * * Return AWS_OP_SUCCESS to continue processing the stream. * Return aws_raise_error(E) to indicate failure and cancel the stream. * The error you raise will be reflected in the error_code passed to the on_complete callback. */ typedef int(aws_http_on_incoming_header_block_done_fn)( struct aws_http_stream *stream, enum aws_http_header_block header_block, void *user_data); /** * Called repeatedly as body data is received. * The data must be copied immediately if you wish to preserve it. * This is always invoked on the HTTP connection's event-loop thread. * * Note that, if the connection is using manual_window_management then the window * size has shrunk by the amount of body data received. If the window size * reaches 0 no further data will be received. Increment the window size with * aws_http_stream_update_window(). * * Return AWS_OP_SUCCESS to continue processing the stream. * Return aws_raise_error(E) to indicate failure and cancel the stream. * The error you raise will be reflected in the error_code passed to the on_complete callback. */ typedef int( aws_http_on_incoming_body_fn)(struct aws_http_stream *stream, const struct aws_byte_cursor *data, void *user_data); /** * Invoked when request has been completely read. * This is always invoked on the HTTP connection's event-loop thread. * * Return AWS_OP_SUCCESS to continue processing the stream. * Return aws_raise_error(E) to indicate failure and cancel the stream. * The error you raise will be reflected in the error_code passed to the on_complete callback. */ typedef int(aws_http_on_incoming_request_done_fn)(struct aws_http_stream *stream, void *user_data); /** * Invoked when a request/response stream is complete, whether successful or unsuccessful * This is always invoked on the HTTP connection's event-loop thread. * This will not be invoked if the stream is never activated. */ typedef void(aws_http_on_stream_complete_fn)(struct aws_http_stream *stream, int error_code, void *user_data); /** * Invoked when request/response stream destroy completely. * This can be invoked within the same thead who release the refcount on http stream. * This is invoked even if the stream is never activated. */ typedef void(aws_http_on_stream_destroy_fn)(void *user_data); /** * Tracing metrics for aws_http_stream. * Data maybe not be available if the data of stream was never sent/received before it completes. */ struct aws_http_stream_metrics { /* The time stamp when the request started to be encoded. -1 means data not available. Timestamp * are from `aws_high_res_clock_get_ticks` */ int64_t send_start_timestamp_ns; /* The time stamp when the request finished to be encoded. -1 means data not available. * Timestamp are from `aws_high_res_clock_get_ticks` */ int64_t send_end_timestamp_ns; /* The time duration for the request from start encoding to finish encoding (send_end_timestamp_ns - * send_start_timestamp_ns). -1 means data not available. */ int64_t sending_duration_ns; /* The time stamp when the response started to be received from the network channel. -1 means data not available. * Timestamp are from `aws_high_res_clock_get_ticks` */ int64_t receive_start_timestamp_ns; /* The time stamp when the response finished to be received from the network channel. -1 means data not available. * Timestamp are from `aws_high_res_clock_get_ticks` */ int64_t receive_end_timestamp_ns; /* The time duration for the request from start receiving to finish receiving. receive_end_timestamp_ns - * receive_start_timestamp_ns. -1 means data not available. */ int64_t receiving_duration_ns; /* The stream-id on the connection when this stream was activated. */ uint32_t stream_id; }; /** * Invoked right before request/response stream is complete to report the tracing metrics for aws_http_stream. * This may be invoked synchronously when aws_http_stream_release() is called. * This is invoked even if the stream is never activated. * See `aws_http_stream_metrics` for details. */ typedef void(aws_http_on_stream_metrics_fn)( struct aws_http_stream *stream, const struct aws_http_stream_metrics *metrics, void *user_data); /** * Options for creating a stream which sends a request from the client and receives a response from the server. */ struct aws_http_make_request_options { /** * The sizeof() this struct, used for versioning. * Required. */ size_t self_size; /** * Definition for outgoing request. * Required. * The request will be kept alive via refcounting until the request completes. */ struct aws_http_message *request; void *user_data; /** * Invoked repeatedly times as headers are received. * Optional. * See `aws_http_on_incoming_headers_fn`. */ aws_http_on_incoming_headers_fn *on_response_headers; /** * Invoked when response header block has been completely read. * Optional. * See `aws_http_on_incoming_header_block_done_fn`. */ aws_http_on_incoming_header_block_done_fn *on_response_header_block_done; /** * Invoked repeatedly as body data is received. * Optional. * See `aws_http_on_incoming_body_fn`. */ aws_http_on_incoming_body_fn *on_response_body; /** * Invoked right before stream is complete, whether successful or unsuccessful * Optional. * See `aws_http_on_stream_metrics_fn` */ aws_http_on_stream_metrics_fn *on_metrics; /** * Invoked when request/response stream is complete, whether successful or unsuccessful * Optional. * See `aws_http_on_stream_complete_fn`. */ aws_http_on_stream_complete_fn *on_complete; /* Callback for when the request/response stream is completely destroyed. */ aws_http_on_stream_destroy_fn *on_destroy; /** * When using HTTP/2, request body data will be provided over time. The stream will only be polled for writing * when data has been supplied via `aws_http2_stream_write_data` */ bool http2_use_manual_data_writes; /** * Optional (ignored if 0). * After a request is fully sent, if the server does not begin responding within N milliseconds, then fail with * AWS_ERROR_HTTP_RESPONSE_FIRST_BYTE_TIMEOUT. * It override the connection level settings, when the request completes, the * original monitoring options will be applied back to the connection. * TODO: Only supported in HTTP/1.1 now, support it in HTTP/2 */ uint64_t response_first_byte_timeout_ms; }; struct aws_http_request_handler_options { /* Set to sizeof() this struct, used for versioning. */ size_t self_size; /** * Required. */ struct aws_http_connection *server_connection; /** * user_data passed to callbacks. * Optional. */ void *user_data; /** * Invoked repeatedly times as headers are received. * Optional. * See `aws_http_on_incoming_headers_fn`. */ aws_http_on_incoming_headers_fn *on_request_headers; /** * Invoked when the request header block has been completely read. * Optional. * See `aws_http_on_incoming_header_block_done_fn`. */ aws_http_on_incoming_header_block_done_fn *on_request_header_block_done; /** * Invoked as body data is received. * Optional. * See `aws_http_on_incoming_body_fn`. */ aws_http_on_incoming_body_fn *on_request_body; /** * Invoked when request has been completely read. * Optional. * See `aws_http_on_incoming_request_done_fn`. */ aws_http_on_incoming_request_done_fn *on_request_done; /** * Invoked when request/response stream is complete, whether successful or unsuccessful * Optional. * See `aws_http_on_stream_complete_fn`. */ aws_http_on_stream_complete_fn *on_complete; /* Callback for when the request/response stream is completely destroyed. */ aws_http_on_stream_destroy_fn *on_destroy; }; /** * Invoked when the data stream of an outgoing HTTP write operation is no longer in use. * This is always invoked on the HTTP connection's event-loop thread. * * @param stream HTTP-stream this write operation was submitted to. * @param error_code If error_code is AWS_ERROR_SUCCESS (0), the data was successfully sent. * Any other error_code indicates that the HTTP-stream is in the process of terminating. * If the error_code is AWS_ERROR_HTTP_STREAM_HAS_COMPLETED, * the stream's termination has nothing to do with this write operation. * Any other non-zero error code indicates a problem with this particular write * operation's data. * @param user_data User data for this write operation. */ typedef void aws_http_stream_write_complete_fn(struct aws_http_stream *stream, int error_code, void *user_data); /** * Invoked when the data of an outgoing HTTP/1.1 chunk is no longer in use. * This is always invoked on the HTTP connection's event-loop thread. * * @param stream HTTP-stream this chunk was submitted to. * @param error_code If error_code is AWS_ERROR_SUCCESS (0), the data was successfully sent. * Any other error_code indicates that the HTTP-stream is in the process of terminating. * If the error_code is AWS_ERROR_HTTP_STREAM_HAS_COMPLETED, * the stream's termination has nothing to do with this chunk. * Any other non-zero error code indicates a problem with this particular chunk's data. * @param user_data User data for this chunk. */ typedef aws_http_stream_write_complete_fn aws_http1_stream_write_chunk_complete_fn; /** * HTTP/1.1 chunk extension for chunked encoding. * Note that the underlying strings are not owned by the byte cursors. */ struct aws_http1_chunk_extension { struct aws_byte_cursor key; struct aws_byte_cursor value; }; /** * Encoding options for an HTTP/1.1 chunked transfer encoding chunk. */ struct aws_http1_chunk_options { /* * The data stream to be sent in a single chunk. * The aws_input_stream must remain valid until on_complete is invoked. * May be NULL in the final chunk with size 0. * * Note that, for Transfer-Encodings other than "chunked", the data is * expected to already have that encoding applied. For example, if * "Transfer-Encoding: gzip, chunked" then the data from aws_input_stream * should already be in gzip format. */ struct aws_input_stream *chunk_data; /* * Size of the chunk_data input stream in bytes. */ uint64_t chunk_data_size; /** * A pointer to an array of chunked extensions. * The num_extensions must match the length of the array. * This data is deep-copied by aws_http1_stream_write_chunk(), * it does not need to remain valid until on_complete is invoked. */ struct aws_http1_chunk_extension *extensions; /** * The number of elements defined in the extensions array. */ size_t num_extensions; /** * Invoked when the chunk data is no longer in use, whether or not it was successfully sent. * Optional. * See `aws_http1_stream_write_chunk_complete_fn`. */ aws_http1_stream_write_chunk_complete_fn *on_complete; /** * User provided data passed to the on_complete callback on its invocation. */ void *user_data; }; /** * Invoked when the data of an outgoing HTTP2 data frame is no longer in use. * This is always invoked on the HTTP connection's event-loop thread. * * @param stream HTTP2-stream this write was submitted to. * @param error_code If error_code is AWS_ERROR_SUCCESS (0), the data was successfully sent. * Any other error_code indicates that the HTTP-stream is in the process of terminating. * If the error_code is AWS_ERROR_HTTP_STREAM_HAS_COMPLETED, * the stream's termination has nothing to do with this write. * Any other non-zero error code indicates a problem with this particular write's data. * @param user_data User data for this write. */ typedef aws_http_stream_write_complete_fn aws_http2_stream_write_data_complete_fn; /** * Encoding options for manual H2 data frame writes */ struct aws_http2_stream_write_data_options { /** * The data to be sent. * Optional. * If not set, input stream with length 0 will be used. */ struct aws_input_stream *data; /** * Set true when it's the last chunk to be sent. * After a write with end_stream, no more data write will be accepted. */ bool end_stream; /** * Invoked when the data stream is no longer in use, whether or not it was successfully sent. * Optional. * See `aws_http2_stream_write_data_complete_fn`. */ aws_http2_stream_write_data_complete_fn *on_complete; /** * User provided data passed to the on_complete callback on its invocation. */ void *user_data; }; #define AWS_HTTP_REQUEST_HANDLER_OPTIONS_INIT \ { .self_size = sizeof(struct aws_http_request_handler_options), } AWS_EXTERN_C_BEGIN /** * Return whether both names are equivalent. * This is a case-insensitive string comparison. * * Example Matches: * "Content-Length" == "content-length" // upper or lower case ok * Example Mismatches: * "Content-Length" != " Content-Length" // leading whitespace bad */ AWS_HTTP_API bool aws_http_header_name_eq(struct aws_byte_cursor name_a, struct aws_byte_cursor name_b); /** * Create a new headers object. * The caller has a hold on the object and must call aws_http_headers_release() when they are done with it. */ AWS_HTTP_API struct aws_http_headers *aws_http_headers_new(struct aws_allocator *allocator); /** * Acquire a hold on the object, preventing it from being deleted until * aws_http_headers_release() is called by all those with a hold on it. */ AWS_HTTP_API void aws_http_headers_acquire(struct aws_http_headers *headers); /** * Release a hold on the object. * The object is deleted when all holds on it are released. */ AWS_HTTP_API void aws_http_headers_release(struct aws_http_headers *headers); /** * Add a header. * The underlying strings are copied. */ AWS_HTTP_API int aws_http_headers_add_header(struct aws_http_headers *headers, const struct aws_http_header *header); /** * Add a header. * The underlying strings are copied. */ AWS_HTTP_API int aws_http_headers_add(struct aws_http_headers *headers, struct aws_byte_cursor name, struct aws_byte_cursor value); /** * Add an array of headers. * The underlying strings are copied. */ AWS_HTTP_API int aws_http_headers_add_array(struct aws_http_headers *headers, const struct aws_http_header *array, size_t count); /** * Set a header value. * The header is added if necessary and any existing values for this name are removed. * The underlying strings are copied. */ AWS_HTTP_API int aws_http_headers_set(struct aws_http_headers *headers, struct aws_byte_cursor name, struct aws_byte_cursor value); /** * Get the total number of headers. */ AWS_HTTP_API size_t aws_http_headers_count(const struct aws_http_headers *headers); /** * Get the header at the specified index. * The index of a given header may change any time headers are modified. * When iterating headers, the following ordering rules apply: * * - Headers with the same name will always be in the same order, relative to one another. * If "A: one" is added before "A: two", then "A: one" will always precede "A: two". * * - Headers with different names could be in any order, relative to one another. * If "A: one" is seen before "B: bee" in one iteration, you might see "B: bee" before "A: one" on the next. * * AWS_ERROR_INVALID_INDEX is raised if the index is invalid. */ AWS_HTTP_API int aws_http_headers_get_index( const struct aws_http_headers *headers, size_t index, struct aws_http_header *out_header); /** * * Get all values with this name, combined into one new aws_string that you are responsible for destroying. * If there are multiple headers with this name, their values are appended with comma-separators. * If there are no headers with this name, NULL is returned and AWS_ERROR_HTTP_HEADER_NOT_FOUND is raised. */ AWS_HTTP_API struct aws_string *aws_http_headers_get_all(const struct aws_http_headers *headers, struct aws_byte_cursor name); /** * Get the first value for this name, ignoring any additional values. * AWS_ERROR_HTTP_HEADER_NOT_FOUND is raised if the name is not found. */ AWS_HTTP_API int aws_http_headers_get( const struct aws_http_headers *headers, struct aws_byte_cursor name, struct aws_byte_cursor *out_value); /** * Test if header name exists or not in headers */ AWS_HTTP_API bool aws_http_headers_has(const struct aws_http_headers *headers, struct aws_byte_cursor name); /** * Remove all headers with this name. * AWS_ERROR_HTTP_HEADER_NOT_FOUND is raised if no headers with this name are found. */ AWS_HTTP_API int aws_http_headers_erase(struct aws_http_headers *headers, struct aws_byte_cursor name); /** * Remove the first header found with this name and value. * AWS_ERROR_HTTP_HEADER_NOT_FOUND is raised if no such header is found. */ AWS_HTTP_API int aws_http_headers_erase_value( struct aws_http_headers *headers, struct aws_byte_cursor name, struct aws_byte_cursor value); /** * Remove the header at the specified index. * * AWS_ERROR_INVALID_INDEX is raised if the index is invalid. */ AWS_HTTP_API int aws_http_headers_erase_index(struct aws_http_headers *headers, size_t index); /** * Clear all headers. */ AWS_HTTP_API void aws_http_headers_clear(struct aws_http_headers *headers); /** * Get the `:method` value (HTTP/2 headers only). */ AWS_HTTP_API int aws_http2_headers_get_request_method(const struct aws_http_headers *h2_headers, struct aws_byte_cursor *out_method); /** * Set `:method` (HTTP/2 headers only). * The headers makes its own copy of the underlying string. */ AWS_HTTP_API int aws_http2_headers_set_request_method(struct aws_http_headers *h2_headers, struct aws_byte_cursor method); /* * Get the `:scheme` value (HTTP/2 headers only). */ AWS_HTTP_API int aws_http2_headers_get_request_scheme(const struct aws_http_headers *h2_headers, struct aws_byte_cursor *out_scheme); /** * Set `:scheme` (request pseudo headers only). * The pseudo headers makes its own copy of the underlying string. */ AWS_HTTP_API int aws_http2_headers_set_request_scheme(struct aws_http_headers *h2_headers, struct aws_byte_cursor scheme); /* * Get the `:authority` value (request pseudo headers only). */ AWS_HTTP_API int aws_http2_headers_get_request_authority( const struct aws_http_headers *h2_headers, struct aws_byte_cursor *out_authority); /** * Set `:authority` (request pseudo headers only). * The pseudo headers makes its own copy of the underlying string. */ AWS_HTTP_API int aws_http2_headers_set_request_authority(struct aws_http_headers *h2_headers, struct aws_byte_cursor authority); /* * Get the `:path` value (request pseudo headers only). */ AWS_HTTP_API int aws_http2_headers_get_request_path(const struct aws_http_headers *h2_headers, struct aws_byte_cursor *out_path); /** * Set `:path` (request pseudo headers only). * The pseudo headers makes its own copy of the underlying string. */ AWS_HTTP_API int aws_http2_headers_set_request_path(struct aws_http_headers *h2_headers, struct aws_byte_cursor path); /** * Get `:status` (response pseudo headers only). * If no status is set, AWS_ERROR_HTTP_DATA_NOT_AVAILABLE is raised. */ AWS_HTTP_API int aws_http2_headers_get_response_status(const struct aws_http_headers *h2_headers, int *out_status_code); /** * Set `:status` (response pseudo headers only). */ AWS_HTTP_API int aws_http2_headers_set_response_status(struct aws_http_headers *h2_headers, int status_code); /** * Create a new HTTP/1.1 request message. * The message is blank, all properties (method, path, etc) must be set individually. * If HTTP/1.1 message used in HTTP/2 connection, the transformation will be automatically applied. * A HTTP/2 message will created and sent based on the HTTP/1.1 message. * * The caller has a hold on the object and must call aws_http_message_release() when they are done with it. */ AWS_HTTP_API struct aws_http_message *aws_http_message_new_request(struct aws_allocator *allocator); /** * Like aws_http_message_new_request(), but uses existing aws_http_headers instead of creating a new one. * Acquires a hold on the headers, and releases it when the request is destroyed. */ AWS_HTTP_API struct aws_http_message *aws_http_message_new_request_with_headers( struct aws_allocator *allocator, struct aws_http_headers *existing_headers); /** * Create a new HTTP/1.1 response message. * The message is blank, all properties (status, headers, etc) must be set individually. * * The caller has a hold on the object and must call aws_http_message_release() when they are done with it. */ AWS_HTTP_API struct aws_http_message *aws_http_message_new_response(struct aws_allocator *allocator); /** * Create a new HTTP/2 request message. * pseudo headers need to be set from aws_http2_headers_set_request_* to the headers of the aws_http_message. * Will be errored out if used in HTTP/1.1 connection. * * The caller has a hold on the object and must call aws_http_message_release() when they are done with it. */ AWS_HTTP_API struct aws_http_message *aws_http2_message_new_request(struct aws_allocator *allocator); /** * Create a new HTTP/2 response message. * pseudo headers need to be set from aws_http2_headers_set_response_status to the headers of the aws_http_message. * Will be errored out if used in HTTP/1.1 connection. * * The caller has a hold on the object and must call aws_http_message_release() when they are done with it. */ AWS_HTTP_API struct aws_http_message *aws_http2_message_new_response(struct aws_allocator *allocator); /** * Create an HTTP/2 message from HTTP/1.1 message. * pseudo headers will be created from the context and added to the headers of new message. * Normal headers will be copied to the headers of new message. * Note: * - if `host` exist, it will be removed and `:authority` will be added using the information. * - `:scheme` always defaults to "https". To use a different scheme create the HTTP/2 message directly */ AWS_HTTP_API struct aws_http_message *aws_http2_message_new_from_http1( struct aws_allocator *alloc, const struct aws_http_message *http1_msg); /** * Acquire a hold on the object, preventing it from being deleted until * aws_http_message_release() is called by all those with a hold on it. * * This function returns the passed in message (possibly NULL) so that acquire-and-assign can be done with a single * statement. */ AWS_HTTP_API struct aws_http_message *aws_http_message_acquire(struct aws_http_message *message); /** * Release a hold on the object. * The object is deleted when all holds on it are released. * * This function always returns NULL so that release-and-assign-NULL can be done with a single statement. */ AWS_HTTP_API struct aws_http_message *aws_http_message_release(struct aws_http_message *message); /** * Deprecated. This is equivalent to aws_http_message_release(). */ AWS_HTTP_API void aws_http_message_destroy(struct aws_http_message *message); AWS_HTTP_API bool aws_http_message_is_request(const struct aws_http_message *message); AWS_HTTP_API bool aws_http_message_is_response(const struct aws_http_message *message); /** * Get the protocol version of the http message. */ AWS_HTTP_API enum aws_http_version aws_http_message_get_protocol_version(const struct aws_http_message *message); /** * Get the method (request messages only). */ AWS_HTTP_API int aws_http_message_get_request_method( const struct aws_http_message *request_message, struct aws_byte_cursor *out_method); /** * Set the method (request messages only). * The request makes its own copy of the underlying string. */ AWS_HTTP_API int aws_http_message_set_request_method(struct aws_http_message *request_message, struct aws_byte_cursor method); /* * Get the path-and-query value (request messages only). */ AWS_HTTP_API int aws_http_message_get_request_path(const struct aws_http_message *request_message, struct aws_byte_cursor *out_path); /** * Set the path-and-query value (request messages only). * The request makes its own copy of the underlying string. */ AWS_HTTP_API int aws_http_message_set_request_path(struct aws_http_message *request_message, struct aws_byte_cursor path); /** * Get the status code (response messages only). * If no status is set, AWS_ERROR_HTTP_DATA_NOT_AVAILABLE is raised. */ AWS_HTTP_API int aws_http_message_get_response_status(const struct aws_http_message *response_message, int *out_status_code); /** * Set the status code (response messages only). */ AWS_HTTP_API int aws_http_message_set_response_status(struct aws_http_message *response_message, int status_code); /** * Get the body stream. * Returns NULL if no body stream is set. */ AWS_HTTP_API struct aws_input_stream *aws_http_message_get_body_stream(const struct aws_http_message *message); /** * Set the body stream. * NULL is an acceptable value for messages with no body. * Note: The message does NOT take ownership of the body stream. * The stream must not be destroyed until the message is complete. */ AWS_HTTP_API void aws_http_message_set_body_stream(struct aws_http_message *message, struct aws_input_stream *body_stream); /** * aws_future */ AWS_FUTURE_T_POINTER_WITH_RELEASE_DECLARATION(aws_future_http_message, struct aws_http_message, AWS_HTTP_API) /** * Submit a chunk of data to be sent on an HTTP/1.1 stream. * The stream must have specified "chunked" in a "transfer-encoding" header. * For client streams, activate() must be called before any chunks are submitted. * For server streams, the response must be submitted before any chunks. * A final chunk with size 0 must be submitted to successfully complete the HTTP-stream. * * Returns AWS_OP_SUCCESS if the chunk has been submitted. The chunk's completion * callback will be invoked when the HTTP-stream is done with the chunk data, * whether or not it was successfully sent (see `aws_http1_stream_write_chunk_complete_fn`). * The chunk data must remain valid until the completion callback is invoked. * * Returns AWS_OP_ERR and raises an error if the chunk could not be submitted. * In this case, the chunk's completion callback will never be invoked. * Note that it is always possible for the HTTP-stream to terminate unexpectedly * prior to this call being made, in which case the error raised is * AWS_ERROR_HTTP_STREAM_HAS_COMPLETED. */ AWS_HTTP_API int aws_http1_stream_write_chunk( struct aws_http_stream *http1_stream, const struct aws_http1_chunk_options *options); /** * The stream must have specified `http2_use_manual_data_writes` during request creation. * For client streams, activate() must be called before any frames are submitted. * For server streams, the response headers must be submitted before any frames. * A write with options that has end_stream set to be true will end the stream and prevent any further write. * * @return AWS_OP_SUCCESS if the write was queued * AWS_OP_ERROR indicating the attempt raised an error code. * AWS_ERROR_INVALID_STATE will be raised for invalid usage. * AWS_ERROR_HTTP_STREAM_HAS_COMPLETED will be raised if the stream ended for reasons behind the scenes. * * Typical usage will be something like: * options.http2_use_manual_data_writes = true; * stream = aws_http_connection_make_request(connection, &options); * aws_http_stream_activate(stream); * ... * struct aws_http2_stream_write_data_options write; * aws_http2_stream_write_data(stream, &write); * ... * struct aws_http2_stream_write_data_options last_write; * last_write.end_stream = true; * aws_http2_stream_write_data(stream, &write); * ... * aws_http_stream_release(stream); */ AWS_HTTP_API int aws_http2_stream_write_data( struct aws_http_stream *http2_stream, const struct aws_http2_stream_write_data_options *options); /** * Add a list of headers to be added as trailing headers sent after the last chunk is sent. * a "Trailer" header field which indicates the fields present in the trailer. * * Certain headers are forbidden in the trailer (e.g., Transfer-Encoding, Content-Length, Host). See RFC-7541 * Section 4.1.2 for more details. * * For client streams, activate() must be called before any chunks are submitted. * * For server streams, the response must be submitted before the trailer can be added * * aws_http1_stream_add_chunked_trailer must be called before the final size 0 chunk, and at the moment can only * be called once, though this could change if need be. * * Returns AWS_OP_SUCCESS if the chunk has been submitted. */ AWS_HTTP_API int aws_http1_stream_add_chunked_trailer( struct aws_http_stream *http1_stream, const struct aws_http_headers *trailing_headers); /** * * This datastructure has more functions for inspecting and modifying headers than * are available on the aws_http_message datastructure. */ AWS_HTTP_API struct aws_http_headers *aws_http_message_get_headers(const struct aws_http_message *message); /** * Get the message's const aws_http_headers. */ AWS_HTTP_API const struct aws_http_headers *aws_http_message_get_const_headers(const struct aws_http_message *message); /** * Get the number of headers. */ AWS_HTTP_API size_t aws_http_message_get_header_count(const struct aws_http_message *message); /** * Get the header at the specified index. * This function cannot fail if a valid index is provided. * Otherwise, AWS_ERROR_INVALID_INDEX will be raised. * * The underlying strings are stored within the message. */ AWS_HTTP_API int aws_http_message_get_header( const struct aws_http_message *message, struct aws_http_header *out_header, size_t index); /** * Add a header to the end of the array. * The message makes its own copy of the underlying strings. */ AWS_HTTP_API int aws_http_message_add_header(struct aws_http_message *message, struct aws_http_header header); /** * Add an array of headers to the end of the header array. * The message makes its own copy of the underlying strings. * * This is a helper function useful when it's easier to define headers as a stack array, rather than calling add_header * repeatedly. */ AWS_HTTP_API int aws_http_message_add_header_array( struct aws_http_message *message, const struct aws_http_header *headers, size_t num_headers); /** * Remove the header at the specified index. * Headers after this index are all shifted back one position. * * This function cannot fail if a valid index is provided. * Otherwise, AWS_ERROR_INVALID_INDEX will be raised. */ AWS_HTTP_API int aws_http_message_erase_header(struct aws_http_message *message, size_t index); /** * Create a stream, with a client connection sending a request. * The request does not start sending automatically once the stream is created. You must call * aws_http_stream_activate to begin execution of the request. * * The `options` are copied during this call. * * Tip for language bindings: Do not bind the `options` struct. Use something more natural for your language, * such as Builder Pattern in Java, or Python's ability to take many optional arguments by name. * * Note: The header of the request will be sent as it is when the message to send protocol matches the protocol of the * connection. * - No `user-agent` will be added. * - No security check will be enforced. eg: `referer` header privacy should be enforced by the user-agent who adds the * header * - When HTTP/1 message sent on HTTP/2 connection, `aws_http2_message_new_from_http1` will be applied under the hood. * - When HTTP/2 message sent on HTTP/1 connection, no change will be made. */ AWS_HTTP_API struct aws_http_stream *aws_http_connection_make_request( struct aws_http_connection *client_connection, const struct aws_http_make_request_options *options); /** * Create a stream, with a server connection receiving and responding to a request. * This function can only be called from the `aws_http_on_incoming_request_fn` callback. * aws_http_stream_send_response() should be used to send a response. */ AWS_HTTP_API struct aws_http_stream *aws_http_stream_new_server_request_handler( const struct aws_http_request_handler_options *options); /** * Acquire refcount on the stream to prevent it from being cleaned up until it is released. */ AWS_HTTP_API struct aws_http_stream *aws_http_stream_acquire(struct aws_http_stream *stream); /** * Users must release the stream when they are done with it, or its memory will never be cleaned up. * This will not cancel the stream, its callbacks will still fire if the stream is still in progress. * * Tips for language bindings: * - Invoke this from the wrapper class's finalizer/destructor. * - Do not let the wrapper class be destroyed until on_complete() has fired. */ AWS_HTTP_API void aws_http_stream_release(struct aws_http_stream *stream); /** * Only used for client initiated streams (immediately following a call to aws_http_connection_make_request). * * Activates the request's outgoing stream processing. */ AWS_HTTP_API int aws_http_stream_activate(struct aws_http_stream *stream); AWS_HTTP_API struct aws_http_connection *aws_http_stream_get_connection(const struct aws_http_stream *stream); /* Only valid in "request" streams, once response headers start arriving */ AWS_HTTP_API int aws_http_stream_get_incoming_response_status(const struct aws_http_stream *stream, int *out_status); /* Only valid in "request handler" streams, once request headers start arriving */ AWS_HTTP_API int aws_http_stream_get_incoming_request_method( const struct aws_http_stream *stream, struct aws_byte_cursor *out_method); AWS_HTTP_API int aws_http_stream_get_incoming_request_uri(const struct aws_http_stream *stream, struct aws_byte_cursor *out_uri); /** * Send response (only callable from "request handler" streams) * The response object must stay alive at least until the stream's on_complete is called. */ AWS_HTTP_API int aws_http_stream_send_response(struct aws_http_stream *stream, struct aws_http_message *response); /** * Increment the stream's flow-control window to keep data flowing. * * If the connection was created with `manual_window_management` set true, * the flow-control window of each stream will shrink as body data is received * (headers, padding, and other metadata do not affect the window). * The connection's `initial_window_size` determines the starting size of each stream's window. * If a stream's flow-control window reaches 0, no further data will be received. * * If `manual_window_management` is false, this call will have no effect. * The connection maintains its flow-control windows such that * no back-pressure is applied and data arrives as fast as possible. */ AWS_HTTP_API void aws_http_stream_update_window(struct aws_http_stream *stream, size_t increment_size); /** * Gets the HTTP/2 id associated with a stream. Even h1 streams have an id (using the same allocation procedure * as http/2) for easier tracking purposes. For client streams, this will only be non-zero after a successful call * to aws_http_stream_activate() */ AWS_HTTP_API uint32_t aws_http_stream_get_id(const struct aws_http_stream *stream); /** * Cancel the stream in flight. * For HTTP/1.1 streams, it's equivalent to closing the connection. * For HTTP/2 streams, it's equivalent to calling reset on the stream with `AWS_HTTP2_ERR_CANCEL`. * * the stream will complete with the error code provided, unless the stream is * already completing for other reasons, or the stream is not activated, * in which case this call will have no impact. */ AWS_HTTP_API void aws_http_stream_cancel(struct aws_http_stream *stream, int error_code); /** * Reset the HTTP/2 stream (HTTP/2 only). * Note that if the stream closes before this async call is fully processed, the RST_STREAM frame will not be sent. * * @param http2_stream HTTP/2 stream. * @param http2_error aws_http2_error_code. Reason to reset the stream. */ AWS_HTTP_API int aws_http2_stream_reset(struct aws_http_stream *http2_stream, uint32_t http2_error); /** * Get the error code received in rst_stream. * Only valid if the stream has completed, and an RST_STREAM frame has received. * * @param http2_stream HTTP/2 stream. * @param out_http2_error Gets to set to HTTP/2 error code received in rst_stream. */ AWS_HTTP_API int aws_http2_stream_get_received_reset_error_code(struct aws_http_stream *http2_stream, uint32_t *out_http2_error); /** * Get the HTTP/2 error code sent in the RST_STREAM frame (HTTP/2 only). * Only valid if the stream has completed, and has sent an RST_STREAM frame. * * @param http2_stream HTTP/2 stream. * @param out_http2_error Gets to set to HTTP/2 error code sent in rst_stream. */ AWS_HTTP_API int aws_http2_stream_get_sent_reset_error_code(struct aws_http_stream *http2_stream, uint32_t *out_http2_error); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_HTTP_REQUEST_RESPONSE_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-http/include/aws/http/server.h000066400000000000000000000137031456575232400250660ustar00rootroot00000000000000#ifndef AWS_HTTP_SERVER_H #define AWS_HTTP_SERVER_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include AWS_PUSH_SANE_WARNING_LEVEL struct aws_http_connection; struct aws_server_bootstrap; struct aws_socket_options; struct aws_tls_connection_options; /** * A listening socket which accepts incoming HTTP connections, * creating a server-side aws_http_connection to handle each one. */ struct aws_http_server; struct aws_http_stream; typedef void(aws_http_server_on_incoming_connection_fn)( struct aws_http_server *server, struct aws_http_connection *connection, int error_code, void *user_data); typedef void(aws_http_server_on_destroy_fn)(void *user_data); /** * Options for creating an HTTP server. * Initialize with AWS_HTTP_SERVER_OPTIONS_INIT to set default values. */ struct aws_http_server_options { /** * The sizeof() this struct, used for versioning. * Set by AWS_HTTP_SERVER_OPTIONS_INIT. */ size_t self_size; /** * Required. * Must outlive server. */ struct aws_allocator *allocator; /** * Required. * Must outlive server. */ struct aws_server_bootstrap *bootstrap; /** * Required. * Server makes copy. */ struct aws_socket_endpoint *endpoint; /** * Required. * Server makes a copy. */ struct aws_socket_options *socket_options; /** * Optional. * Server copies all contents except the `aws_tls_ctx`, which must outlive the server. */ struct aws_tls_connection_options *tls_options; /** * Initial window size for incoming connections. * Optional. * A default size is set by AWS_HTTP_SERVER_OPTIONS_INIT. */ size_t initial_window_size; /** * User data passed to callbacks. * Optional. */ void *server_user_data; /** * Invoked when an incoming connection has been set up, or when setup has failed. * Required. * If setup succeeds, the user must call aws_http_connection_configure_server(). */ aws_http_server_on_incoming_connection_fn *on_incoming_connection; /** * Invoked when the server finishes the destroy operation. * Optional. */ aws_http_server_on_destroy_fn *on_destroy_complete; /** * Set to true to manually manage the read window size. * * If this is false, the connection will maintain a constant window size. * * If this is true, the caller must manually increment the window size using aws_http_stream_update_window(). * If the window is not incremented, it will shrink by the amount of body data received. If the window size * reaches 0, no further data will be received. **/ bool manual_window_management; }; /** * Initializes aws_http_server_options with default values. */ #define AWS_HTTP_SERVER_OPTIONS_INIT \ { .self_size = sizeof(struct aws_http_server_options), .initial_window_size = SIZE_MAX, } /** * Invoked at the start of an incoming request. * To process the request, the user must create a request handler stream and return it to the connection. * If NULL is returned, the request will not be processed and the last error will be reported as the reason for failure. */ typedef struct aws_http_stream *( aws_http_on_incoming_request_fn)(struct aws_http_connection *connection, void *user_data); typedef void(aws_http_on_server_connection_shutdown_fn)( struct aws_http_connection *connection, int error_code, void *connection_user_data); /** * Options for configuring a server-side aws_http_connection. * Initialized with AWS_HTTP_SERVER_CONNECTION_OPTIONS_INIT to set default values. */ struct aws_http_server_connection_options { /** * The sizeof() this struct, used for versioning. * Set by AWS_HTTP_SERVER_CONNECTION_OPTIONS_INIT. */ size_t self_size; /** * User data specific to this connection. * Optional. */ void *connection_user_data; /** * Invoked at the start of an incoming request. * Required. * The user must create a request handler stream and return it to the connection. * See `aws_http_on_incoming_request_fn`. */ aws_http_on_incoming_request_fn *on_incoming_request; /** * Invoked when the connection is shut down. * Optional. */ aws_http_on_server_connection_shutdown_fn *on_shutdown; }; /** * Initializes aws_http_server_connection_options with default values. */ #define AWS_HTTP_SERVER_CONNECTION_OPTIONS_INIT \ { .self_size = sizeof(struct aws_http_server_connection_options), } AWS_EXTERN_C_BEGIN /** * Create server, a listening socket that accepts incoming connections. */ AWS_HTTP_API struct aws_http_server *aws_http_server_new(const struct aws_http_server_options *options); /** * Release the server. It will close the listening socket and all the connections existing in the server. * The on_destroy_complete will be invoked when the destroy operation completes */ AWS_HTTP_API void aws_http_server_release(struct aws_http_server *server); /** * Configure a server connection. * This must be called from the server's on_incoming_connection callback. */ AWS_HTTP_API int aws_http_connection_configure_server( struct aws_http_connection *connection, const struct aws_http_server_connection_options *options); /** * Returns true if this is a server connection. */ AWS_HTTP_API bool aws_http_connection_is_server(const struct aws_http_connection *connection); /** * Returns the local listener endpoint of the HTTP server. Only valid as long as the server remains valid. */ AWS_HTTP_API const struct aws_socket_endpoint *aws_http_server_get_listener_endpoint(const struct aws_http_server *server); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_HTTP_SERVER_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-http/include/aws/http/statistics.h000066400000000000000000000041321456575232400257460ustar00rootroot00000000000000#ifndef AWS_HTTP_STATISTICS_H #define AWS_HTTP_STATISTICS_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include AWS_PUSH_SANE_WARNING_LEVEL enum aws_crt_http_statistics_category { AWSCRT_STAT_CAT_HTTP1_CHANNEL = AWS_CRT_STATISTICS_CATEGORY_BEGIN_RANGE(AWS_C_HTTP_PACKAGE_ID), AWSCRT_STAT_CAT_HTTP2_CHANNEL, }; /** * A statistics struct for http handlers. Tracks the actual amount of time that incoming and outgoing requests are * waiting for their IO to complete. */ struct aws_crt_statistics_http1_channel { aws_crt_statistics_category_t category; uint64_t pending_outgoing_stream_ms; uint64_t pending_incoming_stream_ms; uint32_t current_outgoing_stream_id; uint32_t current_incoming_stream_id; }; struct aws_crt_statistics_http2_channel { aws_crt_statistics_category_t category; uint64_t pending_outgoing_stream_ms; uint64_t pending_incoming_stream_ms; /* True if during the time of report, there has ever been no active streams on the connection */ bool was_inactive; }; AWS_EXTERN_C_BEGIN /** * Initializes a http channel handler statistics struct */ AWS_HTTP_API int aws_crt_statistics_http1_channel_init(struct aws_crt_statistics_http1_channel *stats); /** * Cleans up a http channel handler statistics struct */ AWS_HTTP_API void aws_crt_statistics_http1_channel_cleanup(struct aws_crt_statistics_http1_channel *stats); /** * Resets a http channel handler statistics struct's statistics */ AWS_HTTP_API void aws_crt_statistics_http1_channel_reset(struct aws_crt_statistics_http1_channel *stats); /** * Initializes a HTTP/2 channel handler statistics struct */ AWS_HTTP_API void aws_crt_statistics_http2_channel_init(struct aws_crt_statistics_http2_channel *stats); /** * Resets a HTTP/2 channel handler statistics struct's statistics */ AWS_HTTP_API void aws_crt_statistics_http2_channel_reset(struct aws_crt_statistics_http2_channel *stats); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_HTTP_STATISTICS_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-http/include/aws/http/status_code.h000066400000000000000000000074351456575232400261020ustar00rootroot00000000000000#ifndef AWS_HTTP_STATUS_CODE_H #define AWS_HTTP_STATUS_CODE_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ /* * Define most of the http response codes we probably will use. * https://www.iana.org/assignments/http-status-codes/http-status-codes.txt * This is NOT a definitive list of codes. */ enum aws_http_status_code { /* * This is a special response code defined for convenience in error processing, * indicating processing of http request met error and didn't reach server. */ AWS_HTTP_STATUS_CODE_UNKNOWN = -1, AWS_HTTP_STATUS_CODE_100_CONTINUE = 100, AWS_HTTP_STATUS_CODE_101_SWITCHING_PROTOCOLS = 101, AWS_HTTP_STATUS_CODE_102_PROCESSING = 102, AWS_HTTP_STATUS_CODE_103_EARLY_HINTS = 103, AWS_HTTP_STATUS_CODE_200_OK = 200, AWS_HTTP_STATUS_CODE_201_CREATED = 201, AWS_HTTP_STATUS_CODE_202_ACCEPTED = 202, AWS_HTTP_STATUS_CODE_203_NON_AUTHORITATIVE_INFORMATION = 203, AWS_HTTP_STATUS_CODE_204_NO_CONTENT = 204, AWS_HTTP_STATUS_CODE_205_RESET_CONTENT = 205, AWS_HTTP_STATUS_CODE_206_PARTIAL_CONTENT = 206, AWS_HTTP_STATUS_CODE_207_MULTI_STATUS = 207, AWS_HTTP_STATUS_CODE_208_ALREADY_REPORTED = 208, AWS_HTTP_STATUS_CODE_226_IM_USED = 226, AWS_HTTP_STATUS_CODE_300_MULTIPLE_CHOICES = 300, AWS_HTTP_STATUS_CODE_301_MOVED_PERMANENTLY = 301, AWS_HTTP_STATUS_CODE_302_FOUND = 302, AWS_HTTP_STATUS_CODE_303_SEE_OTHER = 303, AWS_HTTP_STATUS_CODE_304_NOT_MODIFIED = 304, AWS_HTTP_STATUS_CODE_305_USE_PROXY = 305, AWS_HTTP_STATUS_CODE_307_TEMPORARY_REDIRECT = 307, AWS_HTTP_STATUS_CODE_308_PERMANENT_REDIRECT = 308, AWS_HTTP_STATUS_CODE_400_BAD_REQUEST = 400, AWS_HTTP_STATUS_CODE_401_UNAUTHORIZED = 401, AWS_HTTP_STATUS_CODE_402_PAYMENT_REQUIRED = 402, AWS_HTTP_STATUS_CODE_403_FORBIDDEN = 403, AWS_HTTP_STATUS_CODE_404_NOT_FOUND = 404, AWS_HTTP_STATUS_CODE_405_METHOD_NOT_ALLOWED = 405, AWS_HTTP_STATUS_CODE_406_NOT_ACCEPTABLE = 406, AWS_HTTP_STATUS_CODE_407_PROXY_AUTHENTICATION_REQUIRED = 407, AWS_HTTP_STATUS_CODE_408_REQUEST_TIMEOUT = 408, AWS_HTTP_STATUS_CODE_409_CONFLICT = 409, AWS_HTTP_STATUS_CODE_410_GONE = 410, AWS_HTTP_STATUS_CODE_411_LENGTH_REQUIRED = 411, AWS_HTTP_STATUS_CODE_412_PRECONDITION_FAILED = 412, AWS_HTTP_STATUS_CODE_413_REQUEST_ENTITY_TOO_LARGE = 413, AWS_HTTP_STATUS_CODE_414_REQUEST_URI_TOO_LONG = 414, AWS_HTTP_STATUS_CODE_415_UNSUPPORTED_MEDIA_TYPE = 415, AWS_HTTP_STATUS_CODE_416_REQUESTED_RANGE_NOT_SATISFIABLE = 416, AWS_HTTP_STATUS_CODE_417_EXPECTATION_FAILED = 417, AWS_HTTP_STATUS_CODE_421_MISDIRECTED_REQUEST = 421, AWS_HTTP_STATUS_CODE_422_UNPROCESSABLE_ENTITY = 422, AWS_HTTP_STATUS_CODE_423_LOCKED = 423, AWS_HTTP_STATUS_CODE_424_FAILED_DEPENDENCY = 424, AWS_HTTP_STATUS_CODE_425_TOO_EARLY = 425, AWS_HTTP_STATUS_CODE_426_UPGRADE_REQUIRED = 426, AWS_HTTP_STATUS_CODE_428_PRECONDITION_REQUIRED = 428, AWS_HTTP_STATUS_CODE_429_TOO_MANY_REQUESTS = 429, AWS_HTTP_STATUS_CODE_431_REQUEST_HEADER_FIELDS_TOO_LARGE = 431, AWS_HTTP_STATUS_CODE_451_UNAVAILABLE_FOR_LEGAL_REASON = 451, AWS_HTTP_STATUS_CODE_500_INTERNAL_SERVER_ERROR = 500, AWS_HTTP_STATUS_CODE_501_NOT_IMPLEMENTED = 501, AWS_HTTP_STATUS_CODE_502_BAD_GATEWAY = 502, AWS_HTTP_STATUS_CODE_503_SERVICE_UNAVAILABLE = 503, AWS_HTTP_STATUS_CODE_504_GATEWAY_TIMEOUT = 504, AWS_HTTP_STATUS_CODE_505_HTTP_VERSION_NOT_SUPPORTED = 505, AWS_HTTP_STATUS_CODE_506_VARIANT_ALSO_NEGOTIATES = 506, AWS_HTTP_STATUS_CODE_507_INSUFFICIENT_STORAGE = 507, AWS_HTTP_STATUS_CODE_508_LOOP_DETECTED = 508, AWS_HTTP_STATUS_CODE_510_NOT_EXTENDED = 510, AWS_HTTP_STATUS_CODE_511_NETWORK_AUTHENTICATION_REQUIRED = 511, }; #endif /* AWS_HTTP_STATUS_CODE_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-http/include/aws/http/websocket.h000066400000000000000000000436661456575232400255610ustar00rootroot00000000000000#ifndef AWS_HTTP_WEBSOCKET_H #define AWS_HTTP_WEBSOCKET_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include AWS_PUSH_SANE_WARNING_LEVEL struct aws_http_header; struct aws_http_message; /* TODO: Document lifetime stuff */ /* TODO: Document CLOSE frame behavior (when auto-sent during close, when auto-closed) */ /* TODO: Accept payload as aws_input_stream */ /** * A websocket connection. */ struct aws_websocket; /** * Opcode describing the type of a websocket frame. * RFC-6455 Section 5.2 */ enum aws_websocket_opcode { AWS_WEBSOCKET_OPCODE_CONTINUATION = 0x0, AWS_WEBSOCKET_OPCODE_TEXT = 0x1, AWS_WEBSOCKET_OPCODE_BINARY = 0x2, AWS_WEBSOCKET_OPCODE_CLOSE = 0x8, AWS_WEBSOCKET_OPCODE_PING = 0x9, AWS_WEBSOCKET_OPCODE_PONG = 0xA, }; #define AWS_WEBSOCKET_MAX_PAYLOAD_LENGTH 0x7FFFFFFFFFFFFFFF #define AWS_WEBSOCKET_MAX_HANDSHAKE_KEY_LENGTH 25 #define AWS_WEBSOCKET_CLOSE_TIMEOUT 1000000000 // nanos -> 1 sec /** * Data passed to the websocket on_connection_setup callback. * * An error_code of zero indicates that setup was completely successful. * You own the websocket pointer now and must call aws_websocket_release() when you are done with it. * You can inspect the response headers, if you're interested. * * A non-zero error_code indicates that setup failed. * The websocket pointer will be NULL. * If the server sent a response, you can inspect its status-code, headers, and body, * but this data will NULL if setup failed before a full response could be received. * If you wish to persist data from the response make a deep copy. * The response data becomes invalid once the callback completes. */ struct aws_websocket_on_connection_setup_data { int error_code; struct aws_websocket *websocket; const int *handshake_response_status; const struct aws_http_header *handshake_response_header_array; size_t num_handshake_response_headers; const struct aws_byte_cursor *handshake_response_body; }; /** * Called when websocket setup is complete. * Called exactly once on the websocket's event-loop thread. * See `aws_websocket_on_connection_setup_data`. */ typedef void( aws_websocket_on_connection_setup_fn)(const struct aws_websocket_on_connection_setup_data *setup, void *user_data); /** * Called when the websocket has finished shutting down. * Called once on the websocket's event-loop thread if setup succeeded. * If setup failed, this is never called. */ typedef void(aws_websocket_on_connection_shutdown_fn)(struct aws_websocket *websocket, int error_code, void *user_data); /** * Data about an incoming frame. * See RFC-6455 Section 5.2. */ struct aws_websocket_incoming_frame { uint64_t payload_length; uint8_t opcode; bool fin; }; /** * Called when a new frame arrives. * Invoked once per frame on the websocket's event-loop thread. * Each incoming-frame-begin call will eventually be followed by an incoming-frame-complete call, * before the next frame begins and before the websocket shuts down. * * Return true to proceed normally. If false is returned, the websocket will read no further data, * the frame will complete with an error-code, and the connection will close. */ typedef bool(aws_websocket_on_incoming_frame_begin_fn)( struct aws_websocket *websocket, const struct aws_websocket_incoming_frame *frame, void *user_data); /** * Called repeatedly as payload data arrives. * Invoked 0 or more times on the websocket's event-loop thread. * Payload data will not be valid after this call, so copy if necessary. * The payload data is always unmasked at this point. * * NOTE: If you created the websocket with `manual_window_management` set true, you must maintain the read window. * Whenever the read window reaches 0, you will stop receiving anything. * The websocket's `initial_window_size` determines the starting size of the read window. * The read window shrinks as you receive the payload from "data" frames (TEXT, BINARY, and CONTINUATION). * Use aws_websocket_increment_read_window() to increment the window again and keep frames flowing. * Maintain a larger window to keep up high throughput. * You only need to worry about the payload from "data" frames. * The websocket automatically increments the window to account for any * other incoming bytes, including other parts of a frame (opcode, payload-length, etc) * and the payload of other frame types (PING, PONG, CLOSE). * * Return true to proceed normally. If false is returned, the websocket will read no further data, * the frame will complete with an error-code, and the connection will close. */ typedef bool(aws_websocket_on_incoming_frame_payload_fn)( struct aws_websocket *websocket, const struct aws_websocket_incoming_frame *frame, struct aws_byte_cursor data, void *user_data); /** * Called when done processing an incoming frame. * If error_code is non-zero, an error occurred and the payload may not have been completely received. * Invoked once per frame on the websocket's event-loop thread. * * Return true to proceed normally. If false is returned, the websocket will read no further data * and the connection will close. */ typedef bool(aws_websocket_on_incoming_frame_complete_fn)( struct aws_websocket *websocket, const struct aws_websocket_incoming_frame *frame, int error_code, void *user_data); /** * Options for creating a websocket client connection. */ struct aws_websocket_client_connection_options { /** * Required. * Must outlive the connection. */ struct aws_allocator *allocator; /** * Required. * The connection keeps the bootstrap alive via ref-counting. */ struct aws_client_bootstrap *bootstrap; /** * Required. * aws_websocket_client_connect() makes a copy. */ const struct aws_socket_options *socket_options; /** * Optional. * aws_websocket_client_connect() deep-copies all contents, * and keeps the `aws_tls_ctx` alive via ref-counting. */ const struct aws_tls_connection_options *tls_options; /** * Optional * Configuration options related to http proxy usage. */ const struct aws_http_proxy_options *proxy_options; /** * Required. * aws_websocket_client_connect() makes a copy. */ struct aws_byte_cursor host; /** * Optional. * Defaults to 443 if tls_options is present, 80 if it is not. */ uint32_t port; /** * Required. * The request will be kept alive via ref-counting until the handshake completes. * Suggestion: create via aws_http_message_new_websocket_handshake_request() * * The method MUST be set to GET. * The following headers are required (replace values in []): * * Host: [server.example.com] * Upgrade: websocket * Connection: Upgrade * Sec-WebSocket-Key: [dGhlIHNhbXBsZSBub25jZQ==] * Sec-WebSocket-Version: 13 * * Sec-Websocket-Key should be a random 16 bytes value, Base64 encoded. */ struct aws_http_message *handshake_request; /** * Initial size of the websocket's read window. * Ignored unless `manual_window_management` is true. * Set to 0 to prevent any incoming websocket frames until aws_websocket_increment_read_window() is called. */ size_t initial_window_size; /** * User data for callbacks. * Optional. */ void *user_data; /** * Called when connect completes. * Required. * If unsuccessful, error_code will be set, connection will be NULL, * and the on_connection_shutdown callback will never be called. * If successful, the user is now responsible for the websocket and must * call aws_websocket_release() when they are done with it. */ aws_websocket_on_connection_setup_fn *on_connection_setup; /** * Called when connection has finished shutting down. * Optional. * Never called if `on_connection_setup` reported failure. * Note that the connection is not completely done until `on_connection_shutdown` has been called * AND aws_websocket_release() has been called. */ aws_websocket_on_connection_shutdown_fn *on_connection_shutdown; /** * Called when each new frame arrives. * Optional. * See `aws_websocket_on_incoming_frame_begin_fn`. */ aws_websocket_on_incoming_frame_begin_fn *on_incoming_frame_begin; /** * Called repeatedly as payload data arrives. * Optional. * See `aws_websocket_on_incoming_frame_payload_fn`. */ aws_websocket_on_incoming_frame_payload_fn *on_incoming_frame_payload; /** * Called when done processing an incoming frame. * Optional. * See `aws_websocket_on_incoming_frame_complete_fn`. */ aws_websocket_on_incoming_frame_complete_fn *on_incoming_frame_complete; /** * Set to true to manually manage the read window size. * * If this is false, no backpressure is applied and frames will arrive as fast as possible. * * If this is true, then whenever the read window reaches 0 you will stop receiving anything. * The websocket's `initial_window_size` determines the starting size of the read window. * The read window shrinks as you receive the payload from "data" frames (TEXT, BINARY, and CONTINUATION). * Use aws_websocket_increment_read_window() to increment the window again and keep frames flowing. * Maintain a larger window to keep up high throughput. * You only need to worry about the payload from "data" frames. * The websocket automatically increments the window to account for any * other incoming bytes, including other parts of a frame (opcode, payload-length, etc) * and the payload of other frame types (PING, PONG, CLOSE). */ bool manual_window_management; /** * Optional * If set, requests that a specific event loop be used to seat the connection, rather than the next one * in the event loop group. Useful for serializing all io and external events related to a client onto * a single thread. */ struct aws_event_loop *requested_event_loop; /** * Optional * Host resolution override that allows the user to override DNS behavior for this particular connection. */ const struct aws_host_resolution_config *host_resolution_config; }; /** * Called repeatedly as the websocket's payload is streamed out. * The user should write payload data to out_buf, up to available capacity. * The websocket will mask this data for you, if necessary. * Invoked repeatedly on the websocket's event-loop thread. * * Return true to proceed normally. If false is returned, the websocket will send no further data, * the frame will complete with an error-code, and the connection will close. */ typedef bool(aws_websocket_stream_outgoing_payload_fn)( struct aws_websocket *websocket, struct aws_byte_buf *out_buf, void *user_data); /** * Called when a aws_websocket_send_frame() operation completes. * error_code will be zero if the operation was successful. * "Success" does not guarantee that the peer actually received or processed the frame. * Invoked exactly once per sent frame on the websocket's event-loop thread. */ typedef void( aws_websocket_outgoing_frame_complete_fn)(struct aws_websocket *websocket, int error_code, void *user_data); /** * Options for sending a websocket frame. * This structure is copied immediately by aws_websocket_send(). * For descriptions of opcode, fin, and payload_length see in RFC-6455 Section 5.2. */ struct aws_websocket_send_frame_options { /** * Size of payload to be sent via `stream_outgoing_payload` callback. */ uint64_t payload_length; /** * User data passed to callbacks. */ void *user_data; /** * Callback for sending payload data. * See `aws_websocket_stream_outgoing_payload_fn`. * Required if `payload_length` is non-zero. */ aws_websocket_stream_outgoing_payload_fn *stream_outgoing_payload; /** * Callback for completion of send operation. * See `aws_websocket_outgoing_frame_complete_fn`. * Optional. */ aws_websocket_outgoing_frame_complete_fn *on_complete; /** * Frame type. * `aws_websocket_opcode` enum provides standard values. */ uint8_t opcode; /** * Indicates that this is the final fragment in a message. The first fragment MAY also be the final fragment. */ bool fin; }; AWS_EXTERN_C_BEGIN /** * Return true if opcode is for a data frame, false if opcode if for a control frame. */ AWS_HTTP_API bool aws_websocket_is_data_frame(uint8_t opcode); /** * Asynchronously establish a client websocket connection. * The on_connection_setup callback is invoked when the operation has finished creating a connection, or failed. */ AWS_HTTP_API int aws_websocket_client_connect(const struct aws_websocket_client_connection_options *options); /** * Increment the websocket's ref-count, preventing it from being destroyed. * @return Always returns the same pointer that is passed in. */ AWS_HTTP_API struct aws_websocket *aws_websocket_acquire(struct aws_websocket *websocket); /** * Decrement the websocket's ref-count. * When the ref-count reaches zero, the connection will shut down, if it hasn't already. * Users must release the websocket when they are done with it. * The websocket's memory cannot be reclaimed until this is done. * Callbacks may continue firing after this is called, with "shutdown" being the final callback. * This function may be called from any thread. * * It is safe to pass NULL, nothing will happen. */ AWS_HTTP_API void aws_websocket_release(struct aws_websocket *websocket); /** * Close the websocket connection. * It is safe to call this, even if the connection is already closed or closing. * The websocket will attempt to send a CLOSE frame during normal shutdown. * If `free_scarce_resources_immediately` is true, the connection will be torn down as quickly as possible. * This function may be called from any thread. */ AWS_HTTP_API void aws_websocket_close(struct aws_websocket *websocket, bool free_scarce_resources_immediately); /** * Send a websocket frame. * The `options` struct is copied. * A callback will be invoked when the operation completes. * This function may be called from any thread. */ AWS_HTTP_API int aws_websocket_send_frame(struct aws_websocket *websocket, const struct aws_websocket_send_frame_options *options); /** * Manually increment the read window to keep frames flowing. * * If the websocket was created with `manual_window_management` set true, * then whenever the read window reaches 0 you will stop receiving data. * The websocket's `initial_window_size` determines the starting size of the read window. * The read window shrinks as you receive the payload from "data" frames (TEXT, BINARY, and CONTINUATION). * Use aws_websocket_increment_read_window() to increment the window again and keep frames flowing. * Maintain a larger window to keep up high throughput. * You only need to worry about the payload from "data" frames. * The websocket automatically increments the window to account for any * other incoming bytes, including other parts of a frame (opcode, payload-length, etc) * and the payload of other frame types (PING, PONG, CLOSE). * * If the websocket was created with `manual_window_management` set false, this function does nothing. * * This function may be called from any thread. */ AWS_HTTP_API void aws_websocket_increment_read_window(struct aws_websocket *websocket, size_t size); /** * Convert the websocket into a mid-channel handler. * The websocket will stop being usable via its public API and become just another handler in the channel. * The caller will likely install a channel handler to the right. * This must not be called in the middle of an incoming frame (between "frame begin" and "frame complete" callbacks). * This MUST be called from the websocket's thread. * * If successful: * - Other than aws_websocket_release(), all calls to aws_websocket_x() functions are ignored. * - The websocket will no longer invoke any "incoming frame" callbacks. * - aws_io_messages written by a downstream handler will be wrapped in binary data frames and sent upstream. * The data may be split/combined as it is sent along. * - aws_io_messages read from upstream handlers will be scanned for binary data frames. * The payloads of these frames will be sent downstream. * The payloads may be split/combined as they are sent along. * - An incoming close frame will automatically result in channel-shutdown. * - aws_websocket_release() must still be called or the websocket and its channel will never be cleaned up. * - The websocket will still invoke its "on connection shutdown" callback when channel shutdown completes. * * If unsuccessful, NULL is returned and the websocket is unchanged. */ AWS_HTTP_API int aws_websocket_convert_to_midchannel_handler(struct aws_websocket *websocket); /** * Returns the websocket's underlying I/O channel. */ AWS_HTTP_API struct aws_channel *aws_websocket_get_channel(const struct aws_websocket *websocket); /** * Generate value for a Sec-WebSocket-Key header and write it into `dst` buffer. * The buffer should have at least AWS_WEBSOCKET_MAX_HANDSHAKE_KEY_LENGTH space available. * * This value is the base64 encoding of a random 16-byte value. * RFC-6455 Section 4.1 */ AWS_HTTP_API int aws_websocket_random_handshake_key(struct aws_byte_buf *dst); /** * Create request with all required fields for a websocket upgrade request. * The method and path are set, and the the following headers are added: * * Host: * Upgrade: websocket * Connection: Upgrade * Sec-WebSocket-Key: * Sec-WebSocket-Version: 13 */ AWS_HTTP_API struct aws_http_message *aws_http_message_new_websocket_handshake_request( struct aws_allocator *allocator, struct aws_byte_cursor path, struct aws_byte_cursor host); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_HTTP_WEBSOCKET_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-http/integration-testing/000077500000000000000000000000001456575232400242055ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-http/integration-testing/http_client_test.py000066400000000000000000000132571456575232400301430ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. import filecmp import subprocess import sys import urllib.request import unittest import os.path TIMEOUT = 100 # Accepting multiple args so we can pass something like: python elasticurl.py elasticurl_cmd_prefix = sys.argv[1:] if not elasticurl_cmd_prefix: print('You must pass the elasticurl cmd prefix') sys.exit(-1) program_to_run = elasticurl_cmd_prefix[0] if 'bin' in program_to_run: if not os.path.exists(program_to_run): print('the program_to_run is not found, skip integration test') sys.exit(0) # Remove args from sys.argv so that unittest doesn't also try to parse them. sys.argv = sys.argv[:1] def run_command(args): # gather all stderr and stdout to a single string that we print only if things go wrong process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) timedout = False try: output = process.communicate(timeout=TIMEOUT)[0] except subprocess.TimeoutExpired: timedout = True process.kill() args_str = subprocess.list2cmdline(args) output = process.communicate()[0] finally: if process.returncode != 0 or timedout: args_str = subprocess.list2cmdline(args) print(args_str) for line in output.splitlines(): print(line.decode()) if timedout: raise RuntimeError("Timeout happened after {secs} secs from: {cmd}".format(secs=TIMEOUT, cmd=args_str)) else: raise RuntimeError("Return code {code} from: {cmd}".format(code=process.returncode, cmd=args_str)) def compare_files(filename_expected, filename_other): if not filecmp.cmp(filename_expected, filename_other, shallow=False): # Give a helpful error message with open(filename_expected, 'rb') as expected: try: bytes_expected = bytearray(expected.read()) except: raise RuntimeError("Failed to open %s" % filename_expected) with open(filename_other, 'rb') as other: try: bytes_other = bytearray(other.read()) except: raise RuntimeError("Failed to open %s" % filename_other) if len(bytes_expected) != len(bytes_other): raise RuntimeError("File lengths differ. Expected %d, got %d" % (len(bytes_expected), len(bytes_other))) for i in range(len(bytes_expected)): if bytes_expected[i] != bytes_other[i]: raise RuntimeError("Files differ at byte[%d]. Expected %d, got %d." % (i, bytes_expected[i], bytes_other[i])) print("filecmp says these files differ, but they are identical. what the heck.") class SimpleTests(unittest.TestCase): def test_simple_get_amazon(self): """make a simple GET request via alpn h2;http/1.1 to amazon and make sure it succeeds""" simple_get_args = elasticurl_cmd_prefix + ['-v', 'TRACE', 'https://www.amazon.com'] run_command(simple_get_args) def test_simple_get_google(self): """make a simple GET request via alpn h2;http/1.1 to google and make sure it succeeds""" simple_get_args = elasticurl_cmd_prefix + ['-v', 'TRACE', 'https://www.google.com'] run_command(simple_get_args) def test_simple_get_h1(self): """make a simple GET request via HTTP/1.1 and make sure it succeeds""" simple_get_args = elasticurl_cmd_prefix + ['-v', 'TRACE', '--http1_1', 'http://postman-echo.com/get'] run_command(simple_get_args) def test_simple_post_h1(self): """make a simple POST request via HTTP/1.1 to make sure sending data succeeds""" simple_post_args = elasticurl_cmd_prefix + ['-v', 'TRACE', '--http1_1', '-P', '-H', 'content-type: application/json', '-i', '-d', '\"{\'test\':\'testval\'}\"', 'http://postman-echo.com/post'] run_command(simple_post_args) def test_simple_download_h1(self): """download a large file via HTTP/1.1 and compare the results with something we assume works (e.g. urllib)""" elasticurl_download_args = elasticurl_cmd_prefix + ['-v', 'TRACE', '--http1_1', '-o', 'elastigirl.png', 'https://s3.amazonaws.com/code-sharing-aws-crt/elastigirl.png'] run_command(elasticurl_download_args) urllib.request.urlretrieve('https://s3.amazonaws.com/code-sharing-aws-crt/elastigirl.png', 'elastigirl_expected.png') compare_files('elastigirl_expected.png', 'elastigirl.png') def test_simple_get_h2(self): """make a simple GET request via HTTP2 and make sure it succeeds""" simple_get_args = elasticurl_cmd_prefix + ['-v', 'TRACE', '--http2', 'https://postman-echo.com/get'] run_command(simple_get_args) def test_simple_post_h2(self): """make a simple POST request via HTTP2 to make sure sending data succeeds""" simple_post_args = elasticurl_cmd_prefix + ['-v', 'TRACE', '--http2', '-P', '-H', 'content-type: application/json', '-i', '-d', '\"{\'test\':\'testval\'}\"', 'https://postman-echo.com/post'] run_command(simple_post_args) def test_simple_download_h2(self): """download a large file via HTTP2 and compare the results with something we assume works (e.g. urllib)""" elasticurl_download_args = elasticurl_cmd_prefix + ['-v', 'TRACE', '--http2', '-o', 'elastigirl_h2.png', 'https://d1cz66xoahf9cl.cloudfront.net/elastigirl.png'] run_command(elasticurl_download_args) urllib.request.urlretrieve('https://d1cz66xoahf9cl.cloudfront.net/elastigirl.png', 'elastigirl_expected.png') compare_files('elastigirl_expected.png', 'elastigirl_h2.png') if __name__ == '__main__': unittest.main(verbosity=2) aws-crt-python-0.20.4+dfsg/crt/aws-c-http/source/000077500000000000000000000000001456575232400215075ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-http/source/connection.c000066400000000000000000001322001456575232400240100ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef _MSC_VER # pragma warning(disable : 4204) /* non-constant aggregate initializer */ # pragma warning(disable : 4232) /* function pointer to dll symbol */ #endif static struct aws_http_connection_system_vtable s_default_system_vtable = { .aws_client_bootstrap_new_socket_channel = aws_client_bootstrap_new_socket_channel, }; static const struct aws_http_connection_system_vtable *s_system_vtable_ptr = &s_default_system_vtable; void aws_http_client_bootstrap_destroy(struct aws_http_client_bootstrap *bootstrap) { /* During allocating, the underlying stuctures should be allocated with the bootstrap by aws_mem_acquire_many. Thus, * we only need to clean up the first pointer which is the bootstrap */ if (bootstrap->alpn_string_map) { aws_hash_table_clean_up(bootstrap->alpn_string_map); } aws_mem_release(bootstrap->alloc, bootstrap); } void aws_http_connection_set_system_vtable(const struct aws_http_connection_system_vtable *system_vtable) { s_system_vtable_ptr = system_vtable; } AWS_STATIC_STRING_FROM_LITERAL(s_alpn_protocol_http_1_1, "http/1.1"); AWS_STATIC_STRING_FROM_LITERAL(s_alpn_protocol_http_2, "h2"); struct aws_http_server { struct aws_allocator *alloc; struct aws_server_bootstrap *bootstrap; bool is_using_tls; bool manual_window_management; size_t initial_window_size; void *user_data; aws_http_server_on_incoming_connection_fn *on_incoming_connection; aws_http_server_on_destroy_fn *on_destroy_complete; struct aws_socket *socket; /* Any thread may touch this data, but the lock must be held */ struct { struct aws_mutex lock; bool is_shutting_down; struct aws_hash_table channel_to_connection_map; } synced_data; }; static void s_server_lock_synced_data(struct aws_http_server *server) { int err = aws_mutex_lock(&server->synced_data.lock); AWS_ASSERT(!err); (void)err; } static void s_server_unlock_synced_data(struct aws_http_server *server) { int err = aws_mutex_unlock(&server->synced_data.lock); AWS_ASSERT(!err); (void)err; } /* Determine the http-version, create appropriate type of connection, and insert it into the channel. */ struct aws_http_connection *aws_http_connection_new_channel_handler( struct aws_allocator *alloc, struct aws_channel *channel, bool is_server, bool is_using_tls, bool manual_window_management, bool prior_knowledge_http2, size_t initial_window_size, const struct aws_hash_table *alpn_string_map, const struct aws_http1_connection_options *http1_options, const struct aws_http2_connection_options *http2_options, void *connection_user_data) { struct aws_channel_slot *connection_slot = NULL; struct aws_http_connection *connection = NULL; /* Create slot for connection. */ connection_slot = aws_channel_slot_new(channel); if (!connection_slot) { AWS_LOGF_ERROR( AWS_LS_HTTP_CONNECTION, "static: Failed to create slot in channel %p, error %d (%s).", (void *)channel, aws_last_error(), aws_error_name(aws_last_error())); goto error; } int err = aws_channel_slot_insert_end(channel, connection_slot); if (err) { AWS_LOGF_ERROR( AWS_LS_HTTP_CONNECTION, "static: Failed to insert slot into channel %p, error %d (%s).", (void *)channel, aws_last_error(), aws_error_name(aws_last_error())); goto error; } /* Determine HTTP version */ enum aws_http_version version = AWS_HTTP_VERSION_1_1; if (is_using_tls) { /* Query TLS channel handler (immediately to left in the channel) for negotiated ALPN protocol */ if (!connection_slot->adj_left || !connection_slot->adj_left->handler) { aws_raise_error(AWS_ERROR_INVALID_STATE); AWS_LOGF_ERROR( AWS_LS_HTTP_CONNECTION, "static: Failed to find TLS handler in channel %p.", (void *)channel); goto error; } struct aws_channel_slot *tls_slot = connection_slot->adj_left; struct aws_channel_handler *tls_handler = tls_slot->handler; struct aws_byte_buf protocol = aws_tls_handler_protocol(tls_handler); if (protocol.len) { bool customized = false; if (alpn_string_map) { customized = true; struct aws_string *negotiated_result = aws_string_new_from_buf(alloc, &protocol); struct aws_hash_element *found = NULL; aws_hash_table_find(alpn_string_map, (void *)negotiated_result, &found); if (found) { version = (enum aws_http_version)(size_t)found->value; AWS_LOGF_DEBUG( AWS_LS_HTTP_CONNECTION, "static: Customized ALPN protocol " PRInSTR " used. " PRInSTR " client connection established.", AWS_BYTE_BUF_PRI(protocol), AWS_BYTE_CURSOR_PRI(aws_http_version_to_str(version))); } else { AWS_LOGF_ERROR( AWS_LS_HTTP_CONNECTION, "static: Customized ALPN protocol " PRInSTR " used. However the it's not found in the ALPN map provided.", AWS_BYTE_BUF_PRI(protocol)); version = AWS_HTTP_VERSION_UNKNOWN; } aws_string_destroy(negotiated_result); } if (customized) { /* Do nothing */ } else if (aws_string_eq_byte_buf(s_alpn_protocol_http_1_1, &protocol)) { version = AWS_HTTP_VERSION_1_1; } else if (aws_string_eq_byte_buf(s_alpn_protocol_http_2, &protocol)) { version = AWS_HTTP_VERSION_2; } else { AWS_LOGF_WARN(AWS_LS_HTTP_CONNECTION, "static: Unrecognized ALPN protocol. Assuming HTTP/1.1"); AWS_LOGF_DEBUG( AWS_LS_HTTP_CONNECTION, "static: Unrecognized ALPN protocol " PRInSTR, AWS_BYTE_BUF_PRI(protocol)); version = AWS_HTTP_VERSION_1_1; } } } else { if (prior_knowledge_http2) { AWS_LOGF_TRACE(AWS_LS_HTTP_CONNECTION, "Using prior knowledge to start HTTP/2 connection"); version = AWS_HTTP_VERSION_2; } } /* Create connection/handler */ switch (version) { case AWS_HTTP_VERSION_1_1: if (is_server) { connection = aws_http_connection_new_http1_1_server( alloc, manual_window_management, initial_window_size, http1_options); } else { connection = aws_http_connection_new_http1_1_client( alloc, manual_window_management, initial_window_size, http1_options); } break; case AWS_HTTP_VERSION_2: if (is_server) { connection = aws_http_connection_new_http2_server(alloc, manual_window_management, http2_options); } else { connection = aws_http_connection_new_http2_client(alloc, manual_window_management, http2_options); } break; default: AWS_LOGF_ERROR( AWS_LS_HTTP_CONNECTION, "static: Unsupported version " PRInSTR, AWS_BYTE_CURSOR_PRI(aws_http_version_to_str(version))); aws_raise_error(AWS_ERROR_HTTP_UNSUPPORTED_PROTOCOL); goto error; } if (!connection) { AWS_LOGF_ERROR( AWS_LS_HTTP_CONNECTION, "static: Failed to create " PRInSTR " %s connection object, error %d (%s).", AWS_BYTE_CURSOR_PRI(aws_http_version_to_str(version)), is_server ? "server" : "client", aws_last_error(), aws_error_name(aws_last_error())); goto error; } connection->user_data = connection_user_data; /* Connect handler and slot */ if (aws_channel_slot_set_handler(connection_slot, &connection->channel_handler)) { AWS_LOGF_ERROR( AWS_LS_HTTP_CONNECTION, "static: Failed to set HTTP handler into slot on channel %p, error %d (%s).", (void *)channel, aws_last_error(), aws_error_name(aws_last_error())); goto error; } /* Success! Inform connection that installation is complete */ connection->vtable->on_channel_handler_installed(&connection->channel_handler, connection_slot); return connection; error: if (connection_slot) { if (!connection_slot->handler && connection) { aws_channel_handler_destroy(&connection->channel_handler); } aws_channel_slot_remove(connection_slot); } return NULL; } void aws_http_connection_close(struct aws_http_connection *connection) { AWS_ASSERT(connection); connection->vtable->close(connection); } void aws_http_connection_stop_new_requests(struct aws_http_connection *connection) { AWS_ASSERT(connection); connection->vtable->stop_new_requests(connection); } bool aws_http_connection_is_open(const struct aws_http_connection *connection) { AWS_ASSERT(connection); return connection->vtable->is_open(connection); } bool aws_http_connection_new_requests_allowed(const struct aws_http_connection *connection) { AWS_ASSERT(connection); return connection->vtable->new_requests_allowed(connection); } bool aws_http_connection_is_client(const struct aws_http_connection *connection) { return connection->client_data; } bool aws_http_connection_is_server(const struct aws_http_connection *connection) { return connection->server_data; } int aws_http2_connection_change_settings( struct aws_http_connection *http2_connection, const struct aws_http2_setting *settings_array, size_t num_settings, aws_http2_on_change_settings_complete_fn *on_completed, void *user_data) { AWS_ASSERT(http2_connection); AWS_PRECONDITION(http2_connection->vtable); AWS_FATAL_ASSERT(http2_connection->http_version == AWS_HTTP_VERSION_2); return http2_connection->vtable->change_settings( http2_connection, settings_array, num_settings, on_completed, user_data); } int aws_http2_connection_ping( struct aws_http_connection *http2_connection, const struct aws_byte_cursor *optional_opaque_data, aws_http2_on_ping_complete_fn *on_ack, void *user_data) { AWS_ASSERT(http2_connection); AWS_PRECONDITION(http2_connection->vtable); AWS_FATAL_ASSERT(http2_connection->http_version == AWS_HTTP_VERSION_2); return http2_connection->vtable->send_ping(http2_connection, optional_opaque_data, on_ack, user_data); } void aws_http2_connection_send_goaway( struct aws_http_connection *http2_connection, uint32_t http2_error, bool allow_more_streams, const struct aws_byte_cursor *optional_debug_data) { AWS_ASSERT(http2_connection); AWS_PRECONDITION(http2_connection->vtable); AWS_FATAL_ASSERT(http2_connection->http_version == AWS_HTTP_VERSION_2); http2_connection->vtable->send_goaway(http2_connection, http2_error, allow_more_streams, optional_debug_data); } int aws_http2_connection_get_sent_goaway( struct aws_http_connection *http2_connection, uint32_t *out_http2_error, uint32_t *out_last_stream_id) { AWS_ASSERT(http2_connection); AWS_PRECONDITION(out_http2_error); AWS_PRECONDITION(out_last_stream_id); AWS_PRECONDITION(http2_connection->vtable); AWS_FATAL_ASSERT(http2_connection->http_version == AWS_HTTP_VERSION_2); return http2_connection->vtable->get_sent_goaway(http2_connection, out_http2_error, out_last_stream_id); } int aws_http2_connection_get_received_goaway( struct aws_http_connection *http2_connection, uint32_t *out_http2_error, uint32_t *out_last_stream_id) { AWS_ASSERT(http2_connection); AWS_PRECONDITION(out_http2_error); AWS_PRECONDITION(out_last_stream_id); AWS_PRECONDITION(http2_connection->vtable); AWS_FATAL_ASSERT(http2_connection->http_version == AWS_HTTP_VERSION_2); return http2_connection->vtable->get_received_goaway(http2_connection, out_http2_error, out_last_stream_id); } void aws_http2_connection_get_local_settings( const struct aws_http_connection *http2_connection, struct aws_http2_setting out_settings[AWS_HTTP2_SETTINGS_COUNT]) { AWS_ASSERT(http2_connection); AWS_PRECONDITION(http2_connection->vtable); AWS_FATAL_ASSERT(http2_connection->http_version == AWS_HTTP_VERSION_2); http2_connection->vtable->get_local_settings(http2_connection, out_settings); } void aws_http2_connection_get_remote_settings( const struct aws_http_connection *http2_connection, struct aws_http2_setting out_settings[AWS_HTTP2_SETTINGS_COUNT]) { AWS_ASSERT(http2_connection); AWS_PRECONDITION(http2_connection->vtable); AWS_FATAL_ASSERT(http2_connection->http_version == AWS_HTTP_VERSION_2); http2_connection->vtable->get_remote_settings(http2_connection, out_settings); } void aws_http2_connection_update_window(struct aws_http_connection *http2_connection, uint32_t increment_size) { AWS_ASSERT(http2_connection); AWS_PRECONDITION(http2_connection->vtable); AWS_FATAL_ASSERT(http2_connection->http_version == AWS_HTTP_VERSION_2); http2_connection->vtable->update_window(http2_connection, increment_size); } struct aws_channel *aws_http_connection_get_channel(struct aws_http_connection *connection) { AWS_ASSERT(connection); return connection->channel_slot->channel; } const struct aws_socket_endpoint *aws_http_connection_get_remote_endpoint( const struct aws_http_connection *connection) { AWS_ASSERT(connection); struct aws_channel *channel = connection->channel_slot->channel; /* The first slot for an HTTP connection is always socket */ struct aws_channel_slot *socket_slot = aws_channel_get_first_slot(channel); const struct aws_socket *socket = aws_socket_handler_get_socket(socket_slot->handler); return &socket->remote_endpoint; } int aws_http_alpn_map_init(struct aws_allocator *allocator, struct aws_hash_table *map) { AWS_ASSERT(allocator); AWS_ASSERT(map); int result = aws_hash_table_init( map, allocator, 5 /* initial size */, aws_hash_string, aws_hash_callback_string_eq, aws_hash_callback_string_destroy, NULL); if (result) { /* OOM will crash */ int error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_HTTP_CONNECTION, "Failed to initialize ALPN map with error code %d (%s)", error_code, aws_error_name(error_code)); } return result; } void aws_http_connection_acquire(struct aws_http_connection *connection) { AWS_ASSERT(connection); aws_atomic_fetch_add(&connection->refcount, 1); } void aws_http_connection_release(struct aws_http_connection *connection) { if (!connection) { return; } size_t prev_refcount = aws_atomic_fetch_sub(&connection->refcount, 1); if (prev_refcount == 1) { AWS_LOGF_TRACE( AWS_LS_HTTP_CONNECTION, "id=%p: Final connection refcount released, shut down if necessary.", (void *)connection); /* Channel might already be shut down, but make sure */ aws_channel_shutdown(connection->channel_slot->channel, AWS_ERROR_SUCCESS); /* When the channel's refcount reaches 0, it destroys its slots/handlers, which will destroy the connection */ aws_channel_release_hold(connection->channel_slot->channel); } else { AWS_FATAL_ASSERT(prev_refcount != 0); AWS_LOGF_TRACE( AWS_LS_HTTP_CONNECTION, "id=%p: Connection refcount released, %zu remaining.", (void *)connection, prev_refcount - 1); } } /* At this point, the server bootstrapper has accepted an incoming connection from a client and set up a channel. * Now we need to create an aws_http_connection and insert it into the channel as a channel-handler. * Note: Be careful not to access server->socket until lock is acquired to avoid race conditions */ static void s_server_bootstrap_on_accept_channel_setup( struct aws_server_bootstrap *bootstrap, int error_code, struct aws_channel *channel, void *user_data) { (void)bootstrap; AWS_ASSERT(user_data); struct aws_http_server *server = user_data; bool user_cb_invoked = false; struct aws_http_connection *connection = NULL; if (error_code) { AWS_LOGF_ERROR( AWS_LS_HTTP_SERVER, "%p: Incoming connection failed with error code %d (%s)", (void *)server, error_code, aws_error_name(error_code)); goto error; } /* Create connection */ /* TODO: expose http1/2 options to server API */ struct aws_http1_connection_options http1_options; AWS_ZERO_STRUCT(http1_options); struct aws_http2_connection_options http2_options; AWS_ZERO_STRUCT(http2_options); connection = aws_http_connection_new_channel_handler( server->alloc, channel, true, server->is_using_tls, server->manual_window_management, false, /* prior_knowledge_http2 */ server->initial_window_size, NULL, /* alpn_string_map */ &http1_options, &http2_options, NULL /* connection_user_data */); if (!connection) { AWS_LOGF_ERROR( AWS_LS_HTTP_SERVER, "%p: Failed to create connection object, error %d (%s).", (void *)server, aws_last_error(), aws_error_name(aws_last_error())); goto error; } int put_err = 0; /* BEGIN CRITICAL SECTION */ s_server_lock_synced_data(server); if (server->synced_data.is_shutting_down) { error_code = AWS_ERROR_HTTP_CONNECTION_CLOSED; } if (!error_code) { put_err = aws_hash_table_put(&server->synced_data.channel_to_connection_map, channel, connection, NULL); } s_server_unlock_synced_data(server); /* END CRITICAL SECTION */ if (error_code) { AWS_LOGF_ERROR( AWS_ERROR_HTTP_SERVER_CLOSED, "id=%p: Incoming connection failed. The server is shutting down.", (void *)server); goto error; } if (put_err) { AWS_LOGF_ERROR( AWS_LS_HTTP_SERVER, "%p: %s:%u: Failed to store connection object, error %d (%s).", (void *)server, server->socket->local_endpoint.address, server->socket->local_endpoint.port, aws_last_error(), aws_error_name(aws_last_error())); goto error; } /* Tell user of successful connection. */ AWS_LOGF_INFO( AWS_LS_HTTP_CONNECTION, "id=%p: " PRInSTR " server connection established at %p %s:%u.", (void *)connection, AWS_BYTE_CURSOR_PRI(aws_http_version_to_str(connection->http_version)), (void *)server, server->socket->local_endpoint.address, server->socket->local_endpoint.port); server->on_incoming_connection(server, connection, AWS_ERROR_SUCCESS, server->user_data); user_cb_invoked = true; /* If user failed to configure the server during callback, shut down the channel. */ if (!connection->server_data->on_incoming_request) { AWS_LOGF_ERROR( AWS_LS_HTTP_CONNECTION, "id=%p: Caller failed to invoke aws_http_connection_configure_server() during on_incoming_connection " "callback, closing connection.", (void *)connection); aws_raise_error(AWS_ERROR_HTTP_REACTION_REQUIRED); goto error; } return; error: if (!error_code) { error_code = aws_last_error(); } if (!user_cb_invoked) { server->on_incoming_connection(server, NULL, error_code, server->user_data); } if (channel) { aws_channel_shutdown(channel, error_code); } if (connection) { /* release the ref count for the user side */ aws_http_connection_release(connection); } } /* clean the server memory up */ static void s_http_server_clean_up(struct aws_http_server *server) { if (!server) { return; } aws_server_bootstrap_release(server->bootstrap); /* invoke the user callback */ if (server->on_destroy_complete) { server->on_destroy_complete(server->user_data); } aws_hash_table_clean_up(&server->synced_data.channel_to_connection_map); aws_mutex_clean_up(&server->synced_data.lock); aws_mem_release(server->alloc, server); } /* At this point, the channel for a server connection has completed shutdown, but hasn't been destroyed yet. */ static void s_server_bootstrap_on_accept_channel_shutdown( struct aws_server_bootstrap *bootstrap, int error_code, struct aws_channel *channel, void *user_data) { (void)bootstrap; AWS_ASSERT(user_data); struct aws_http_server *server = user_data; /* Figure out which connection this was, and remove that entry from the map. * It won't be in the map if something went wrong while setting up the connection. */ struct aws_hash_element map_elem; int was_present; /* BEGIN CRITICAL SECTION */ s_server_lock_synced_data(server); int remove_err = aws_hash_table_remove(&server->synced_data.channel_to_connection_map, channel, &map_elem, &was_present); s_server_unlock_synced_data(server); /* END CRITICAL SECTION */ if (!remove_err && was_present) { struct aws_http_connection *connection = map_elem.value; AWS_LOGF_INFO(AWS_LS_HTTP_CONNECTION, "id=%p: Server connection shut down.", (void *)connection); /* Tell user about shutdown */ if (connection->server_data->on_shutdown) { connection->server_data->on_shutdown(connection, error_code, connection->user_data); } } } /* the server listener has finished the destroy process, no existing connections * finally safe to clean the server up */ static void s_server_bootstrap_on_server_listener_destroy(struct aws_server_bootstrap *bootstrap, void *user_data) { (void)bootstrap; AWS_ASSERT(user_data); struct aws_http_server *server = user_data; s_http_server_clean_up(server); } struct aws_http_server *aws_http_server_new(const struct aws_http_server_options *options) { aws_http_fatal_assert_library_initialized(); struct aws_http_server *server = NULL; if (!options || options->self_size == 0 || !options->allocator || !options->bootstrap || !options->socket_options || !options->on_incoming_connection || !options->endpoint) { AWS_LOGF_ERROR(AWS_LS_HTTP_SERVER, "static: Invalid options, cannot create server."); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); /* nothing to clean up */ return NULL; } server = aws_mem_calloc(options->allocator, 1, sizeof(struct aws_http_server)); if (!server) { /* nothing to clean up */ return NULL; } server->alloc = options->allocator; server->bootstrap = aws_server_bootstrap_acquire(options->bootstrap); server->is_using_tls = options->tls_options != NULL; server->initial_window_size = options->initial_window_size; server->user_data = options->server_user_data; server->on_incoming_connection = options->on_incoming_connection; server->on_destroy_complete = options->on_destroy_complete; server->manual_window_management = options->manual_window_management; int err = aws_mutex_init(&server->synced_data.lock); if (err) { AWS_LOGF_ERROR( AWS_LS_HTTP_SERVER, "static: Failed to initialize mutex, error %d (%s).", err, aws_error_name(err)); goto mutex_error; } err = aws_hash_table_init( &server->synced_data.channel_to_connection_map, server->alloc, 16, aws_hash_ptr, aws_ptr_eq, NULL, NULL); if (err) { AWS_LOGF_ERROR( AWS_LS_HTTP_SERVER, "static: Cannot create server, error %d (%s).", aws_last_error(), aws_error_name(aws_last_error())); goto hash_table_error; } /* Protect against callbacks firing before server->socket is set */ s_server_lock_synced_data(server); if (options->tls_options) { server->is_using_tls = true; } struct aws_server_socket_channel_bootstrap_options bootstrap_options = { .enable_read_back_pressure = options->manual_window_management, .tls_options = options->tls_options, .bootstrap = options->bootstrap, .socket_options = options->socket_options, .incoming_callback = s_server_bootstrap_on_accept_channel_setup, .shutdown_callback = s_server_bootstrap_on_accept_channel_shutdown, .destroy_callback = s_server_bootstrap_on_server_listener_destroy, .host_name = options->endpoint->address, .port = options->endpoint->port, .user_data = server, }; server->socket = aws_server_bootstrap_new_socket_listener(&bootstrap_options); s_server_unlock_synced_data(server); if (!server->socket) { AWS_LOGF_ERROR( AWS_LS_HTTP_SERVER, "static: Failed creating new socket listener, error %d (%s). Cannot create server.", aws_last_error(), aws_error_name(aws_last_error())); goto socket_error; } AWS_LOGF_INFO( AWS_LS_HTTP_SERVER, "%p %s:%u: Server setup complete, listening for incoming connections.", (void *)server, server->socket->local_endpoint.address, server->socket->local_endpoint.port); return server; socket_error: aws_hash_table_clean_up(&server->synced_data.channel_to_connection_map); hash_table_error: aws_mutex_clean_up(&server->synced_data.lock); mutex_error: aws_mem_release(server->alloc, server); return NULL; } void aws_http_server_release(struct aws_http_server *server) { if (!server) { return; } bool already_shutting_down = false; /* BEGIN CRITICAL SECTION */ s_server_lock_synced_data(server); if (server->synced_data.is_shutting_down) { already_shutting_down = true; } else { server->synced_data.is_shutting_down = true; } if (!already_shutting_down) { /* shutdown all existing channels */ for (struct aws_hash_iter iter = aws_hash_iter_begin(&server->synced_data.channel_to_connection_map); !aws_hash_iter_done(&iter); aws_hash_iter_next(&iter)) { struct aws_channel *channel = (struct aws_channel *)iter.element.key; aws_channel_shutdown(channel, AWS_ERROR_HTTP_CONNECTION_CLOSED); } } s_server_unlock_synced_data(server); /* END CRITICAL SECTION */ if (already_shutting_down) { /* The service is already shutting down, not shutting it down again */ AWS_LOGF_TRACE(AWS_LS_HTTP_SERVER, "id=%p: The server is already shutting down", (void *)server); return; } /* stop listening, clean up the socket, after all existing connections finish shutting down, the * s_server_bootstrap_on_server_listener_destroy will be invoked, clean up of the server will be there */ AWS_LOGF_INFO( AWS_LS_HTTP_SERVER, "%p %s:%u: Shutting down the server.", (void *)server, server->socket->local_endpoint.address, server->socket->local_endpoint.port); aws_server_bootstrap_destroy_socket_listener(server->bootstrap, server->socket); /* wait for connections to finish shutting down * clean up will be called from eventloop */ } const struct aws_socket_endpoint *aws_http_server_get_listener_endpoint(const struct aws_http_server *server) { AWS_FATAL_ASSERT(server); return &server->socket->local_endpoint; } /* At this point, the channel bootstrapper has established a connection to the server and set up a channel. * Now we need to create the aws_http_connection and insert it into the channel as a channel-handler. */ static void s_client_bootstrap_on_channel_setup( struct aws_client_bootstrap *channel_bootstrap, int error_code, struct aws_channel *channel, void *user_data) { (void)channel_bootstrap; AWS_ASSERT(user_data); struct aws_http_client_bootstrap *http_bootstrap = user_data; /* Contract for setup callbacks is: channel is NULL if error_code is non-zero. */ AWS_FATAL_ASSERT((error_code != 0) == (channel == NULL)); if (error_code) { AWS_LOGF_ERROR( AWS_LS_HTTP_CONNECTION, "static: Client connection failed with error %d (%s).", error_code, aws_error_name(error_code)); /* Immediately tell user of failed connection. * No channel exists, so there will be no channel_shutdown callback. */ http_bootstrap->on_setup(NULL, error_code, http_bootstrap->user_data); /* Clean up the http_bootstrap, it has no more work to do. */ aws_http_client_bootstrap_destroy(http_bootstrap); return; } AWS_LOGF_TRACE(AWS_LS_HTTP_CONNECTION, "static: Socket connected, creating client connection object."); http_bootstrap->connection = aws_http_connection_new_channel_handler( http_bootstrap->alloc, channel, false, http_bootstrap->is_using_tls, http_bootstrap->stream_manual_window_management, http_bootstrap->prior_knowledge_http2, http_bootstrap->initial_window_size, http_bootstrap->alpn_string_map, &http_bootstrap->http1_options, &http_bootstrap->http2_options, http_bootstrap->user_data); if (!http_bootstrap->connection) { AWS_LOGF_ERROR( AWS_LS_HTTP_CONNECTION, "static: Failed to create the client connection object, error %d (%s).", aws_last_error(), aws_error_name(aws_last_error())); goto error; } if (aws_http_connection_monitoring_options_is_valid(&http_bootstrap->monitoring_options)) { /* * On creation we validate monitoring options, if they exist, and fail if they're not * valid. So at this point, is_valid() functions as an is-monitoring-on? check. A false * value here is not an error, it's just not enabled. */ struct aws_crt_statistics_handler *http_connection_monitor = aws_crt_statistics_handler_new_http_connection_monitor( http_bootstrap->alloc, &http_bootstrap->monitoring_options); if (http_connection_monitor == NULL) { goto error; } aws_channel_set_statistics_handler(channel, http_connection_monitor); } http_bootstrap->connection->proxy_request_transform = http_bootstrap->proxy_request_transform; http_bootstrap->connection->client_data->response_first_byte_timeout_ms = http_bootstrap->response_first_byte_timeout_ms; AWS_LOGF_INFO( AWS_LS_HTTP_CONNECTION, "id=%p: " PRInSTR " client connection established.", (void *)http_bootstrap->connection, AWS_BYTE_CURSOR_PRI(aws_http_version_to_str(http_bootstrap->connection->http_version))); /* Tell user of successful connection. * Then clear the on_setup callback so that we know it's been called */ http_bootstrap->on_setup(http_bootstrap->connection, AWS_ERROR_SUCCESS, http_bootstrap->user_data); http_bootstrap->on_setup = NULL; return; error: /* Something went wrong. Invoke channel shutdown. Then wait for channel shutdown to complete * before informing the user that setup failed and cleaning up the http_bootstrap.*/ aws_channel_shutdown(channel, aws_last_error()); } /* At this point, the channel for a client connection has completed its shutdown */ static void s_client_bootstrap_on_channel_shutdown( struct aws_client_bootstrap *channel_bootstrap, int error_code, struct aws_channel *channel, void *user_data) { (void)channel_bootstrap; (void)channel; AWS_ASSERT(user_data); struct aws_http_client_bootstrap *http_bootstrap = user_data; /* If on_setup hasn't been called yet, inform user of failed setup. * If on_setup was already called, inform user that it's shut down now. */ if (http_bootstrap->on_setup) { /* make super duper sure that failed setup receives a non-zero error_code */ if (error_code == 0) { error_code = AWS_ERROR_UNKNOWN; } AWS_LOGF_ERROR( AWS_LS_HTTP_CONNECTION, "static: Client setup failed with error %d (%s).", error_code, aws_error_name(error_code)); http_bootstrap->on_setup(NULL, error_code, http_bootstrap->user_data); } else if (http_bootstrap->on_shutdown) { AWS_LOGF_INFO( AWS_LS_HTTP_CONNECTION, "%p: Client shutdown completed with error %d (%s).", (void *)http_bootstrap->connection, error_code, aws_error_name(error_code)); http_bootstrap->on_shutdown(http_bootstrap->connection, error_code, http_bootstrap->user_data); } /* Clean up bootstrapper */ aws_http_client_bootstrap_destroy(http_bootstrap); } int s_validate_http_client_connection_options(const struct aws_http_client_connection_options *options) { if (options->self_size == 0) { AWS_LOGF_ERROR(AWS_LS_HTTP_CONNECTION, "static: Invalid connection options, self size not initialized"); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } if (!options->allocator) { AWS_LOGF_ERROR(AWS_LS_HTTP_CONNECTION, "static: Invalid connection options, no allocator supplied"); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } if (options->host_name.len == 0) { AWS_LOGF_ERROR(AWS_LS_HTTP_CONNECTION, "static: Invalid connection options, empty host name."); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } if (!options->socket_options) { AWS_LOGF_ERROR(AWS_LS_HTTP_CONNECTION, "static: Invalid connection options, socket options are null."); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } if (!options->on_setup) { AWS_LOGF_ERROR(AWS_LS_HTTP_CONNECTION, "static: Invalid connection options, setup callback is null"); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } /* http2_options cannot be NULL here, calling function adds them if they were missing */ if (options->http2_options->num_initial_settings > 0 && options->http2_options->initial_settings_array) { AWS_LOGF_ERROR( AWS_LS_HTTP_CONNECTION, "static: Invalid connection options, h2 settings count is non-zero but settings array is null"); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } if (options->monitoring_options && !aws_http_connection_monitoring_options_is_valid(options->monitoring_options)) { AWS_LOGF_ERROR(AWS_LS_HTTP_CONNECTION, "static: Invalid connection options, invalid monitoring options"); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } if (options->prior_knowledge_http2 && options->tls_options) { AWS_LOGF_ERROR(AWS_LS_HTTP_CONNECTION, "static: HTTP/2 prior knowledge only works with cleartext TCP."); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } return AWS_OP_SUCCESS; } struct s_copy_alpn_string_map_context { struct aws_hash_table *map; struct aws_allocator *allocator; }; /* put every item into the source to make a deep copy of the map */ static int s_copy_alpn_string_map(void *context, struct aws_hash_element *item) { struct s_copy_alpn_string_map_context *func_context = context; struct aws_hash_table *dest = func_context->map; /* make a deep copy of the string and hash map will own the copy */ struct aws_string *key_copy = aws_string_new_from_string(func_context->allocator, item->key); int was_created; if (aws_hash_table_put(dest, key_copy, item->value, &was_created)) { int error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_HTTP_CONNECTION, "Failed to copy ALPN map with error code %d (%s)", error_code, aws_error_name(error_code)); /* failed to put into the table, we need to clean up the copy ourselves */ aws_string_destroy(key_copy); /* return error to stop iteration */ return AWS_COMMON_HASH_TABLE_ITER_ERROR; } if (!was_created) { /* no new entry created, clean up the copy ourselves */ aws_string_destroy(key_copy); } return AWS_COMMON_HASH_TABLE_ITER_CONTINUE; } int aws_http_alpn_map_init_copy( struct aws_allocator *allocator, struct aws_hash_table *dest, struct aws_hash_table *src) { if (!src) { AWS_ZERO_STRUCT(*dest); return AWS_OP_SUCCESS; } if (!src->p_impl) { AWS_ZERO_STRUCT(*dest); return AWS_OP_SUCCESS; } if (aws_http_alpn_map_init(allocator, dest)) { return AWS_OP_ERR; } struct s_copy_alpn_string_map_context context; context.allocator = allocator; context.map = dest; /* make a deep copy of the map */ if (aws_hash_table_foreach(src, s_copy_alpn_string_map, &context)) { int error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_HTTP_CONNECTION, "Failed to copy ALPN map with error code %d (%s)", error_code, aws_error_name(error_code)); aws_hash_table_clean_up(dest); return AWS_OP_ERR; } return AWS_OP_SUCCESS; } int aws_http_client_connect_internal( const struct aws_http_client_connection_options *orig_options, aws_http_proxy_request_transform_fn *proxy_request_transform) { if (!orig_options) { AWS_LOGF_ERROR(AWS_LS_HTTP_CONNECTION, "static: http connection options are null."); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } struct aws_http_client_bootstrap *http_bootstrap = NULL; struct aws_string *host_name = NULL; int err = 0; /* make copy of options, and add defaults for missing optional structs */ struct aws_http_client_connection_options options = *orig_options; struct aws_http1_connection_options default_http1_options; AWS_ZERO_STRUCT(default_http1_options); if (options.http1_options == NULL) { options.http1_options = &default_http1_options; } struct aws_http2_connection_options default_http2_options; AWS_ZERO_STRUCT(default_http2_options); if (options.http2_options == NULL) { options.http2_options = &default_http2_options; } /* validate options */ if (s_validate_http_client_connection_options(&options)) { goto error; } AWS_FATAL_ASSERT(options.proxy_options == NULL); /* bootstrap_new() functions requires a null-terminated c-str */ host_name = aws_string_new_from_cursor(options.allocator, &options.host_name); if (!host_name) { goto error; } struct aws_http2_setting *setting_array = NULL; struct aws_hash_table *alpn_string_map = NULL; aws_mem_acquire_many( options.allocator, 3, &http_bootstrap, sizeof(struct aws_http_client_bootstrap), &setting_array, options.http2_options->num_initial_settings * sizeof(struct aws_http2_setting), &alpn_string_map, sizeof(struct aws_hash_table)); AWS_ZERO_STRUCT(*http_bootstrap); http_bootstrap->alloc = options.allocator; http_bootstrap->is_using_tls = options.tls_options != NULL; http_bootstrap->stream_manual_window_management = options.manual_window_management; http_bootstrap->prior_knowledge_http2 = options.prior_knowledge_http2; http_bootstrap->initial_window_size = options.initial_window_size; http_bootstrap->user_data = options.user_data; http_bootstrap->on_setup = options.on_setup; http_bootstrap->on_shutdown = options.on_shutdown; http_bootstrap->proxy_request_transform = proxy_request_transform; http_bootstrap->http1_options = *options.http1_options; http_bootstrap->http2_options = *options.http2_options; http_bootstrap->response_first_byte_timeout_ms = options.response_first_byte_timeout_ms; /* keep a copy of the settings array if it's not NULL */ if (options.http2_options->num_initial_settings > 0) { memcpy( setting_array, options.http2_options->initial_settings_array, options.http2_options->num_initial_settings * sizeof(struct aws_http2_setting)); http_bootstrap->http2_options.initial_settings_array = setting_array; } if (options.alpn_string_map) { if (aws_http_alpn_map_init_copy(options.allocator, alpn_string_map, options.alpn_string_map)) { goto error; } http_bootstrap->alpn_string_map = alpn_string_map; } if (options.monitoring_options) { http_bootstrap->monitoring_options = *options.monitoring_options; } AWS_LOGF_TRACE( AWS_LS_HTTP_CONNECTION, "static: attempting to initialize a new client channel to %s:%u", aws_string_c_str(host_name), options.port); struct aws_socket_channel_bootstrap_options channel_options = { .bootstrap = options.bootstrap, .host_name = aws_string_c_str(host_name), .port = options.port, .socket_options = options.socket_options, .tls_options = options.tls_options, .setup_callback = s_client_bootstrap_on_channel_setup, .shutdown_callback = s_client_bootstrap_on_channel_shutdown, .enable_read_back_pressure = options.manual_window_management, .user_data = http_bootstrap, .requested_event_loop = options.requested_event_loop, .host_resolution_override_config = options.host_resolution_config, }; err = s_system_vtable_ptr->aws_client_bootstrap_new_socket_channel(&channel_options); if (err) { AWS_LOGF_ERROR( AWS_LS_HTTP_CONNECTION, "static: Failed to initiate socket channel for new client connection, error %d (%s).", aws_last_error(), aws_error_name(aws_last_error())); goto error; } aws_string_destroy(host_name); return AWS_OP_SUCCESS; error: if (http_bootstrap) { aws_http_client_bootstrap_destroy(http_bootstrap); } if (host_name) { aws_string_destroy(host_name); } return AWS_OP_ERR; } int aws_http_client_connect(const struct aws_http_client_connection_options *options) { aws_http_fatal_assert_library_initialized(); if (options->prior_knowledge_http2 && options->tls_options) { AWS_LOGF_ERROR(AWS_LS_HTTP_CONNECTION, "static: HTTP/2 prior knowledge only works with cleartext TCP."); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } if (options->proxy_options != NULL) { return aws_http_client_connect_via_proxy(options); } else { if (!options->proxy_ev_settings || options->proxy_ev_settings->env_var_type != AWS_HPEV_ENABLE) { return aws_http_client_connect_internal(options, NULL); } else { /* Proxy through envrionment variable is enabled */ return aws_http_client_connect_via_proxy(options); } } } enum aws_http_version aws_http_connection_get_version(const struct aws_http_connection *connection) { return connection->http_version; } int aws_http_connection_configure_server( struct aws_http_connection *connection, const struct aws_http_server_connection_options *options) { if (!connection || !options || !options->on_incoming_request) { AWS_LOGF_ERROR(AWS_LS_HTTP_CONNECTION, "id=%p: Invalid server configuration options.", (void *)connection); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } if (!connection->server_data) { AWS_LOGF_WARN( AWS_LS_HTTP_CONNECTION, "id=%p: Server-only function invoked on client, ignoring call.", (void *)connection); return aws_raise_error(AWS_ERROR_INVALID_STATE); } if (connection->server_data->on_incoming_request) { AWS_LOGF_WARN( AWS_LS_HTTP_CONNECTION, "id=%p: Connection is already configured, ignoring call.", (void *)connection); return aws_raise_error(AWS_ERROR_INVALID_STATE); } connection->user_data = options->connection_user_data; connection->server_data->on_incoming_request = options->on_incoming_request; connection->server_data->on_shutdown = options->on_shutdown; return AWS_OP_SUCCESS; } /* Stream IDs are only 31 bits [5.1.1] */ static const uint32_t MAX_STREAM_ID = UINT32_MAX >> 1; uint32_t aws_http_connection_get_next_stream_id(struct aws_http_connection *connection) { uint32_t next_id = connection->next_stream_id; if (AWS_UNLIKELY(next_id > MAX_STREAM_ID)) { AWS_LOGF_INFO(AWS_LS_HTTP_CONNECTION, "id=%p: All available stream ids are gone", (void *)connection); next_id = 0; aws_raise_error(AWS_ERROR_HTTP_STREAM_IDS_EXHAUSTED); } else { connection->next_stream_id += 2; } return next_id; } aws-crt-python-0.20.4+dfsg/crt/aws-c-http/source/connection_manager.c000066400000000000000000002000041456575232400255000ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef _MSC_VER # pragma warning(disable : 4232) /* function pointer to dll symbol */ #endif /* * Established connections not currently in use are tracked via this structure. */ struct aws_idle_connection { struct aws_allocator *allocator; struct aws_linked_list_node node; uint64_t cull_timestamp; struct aws_http_connection *connection; }; /* * System vtable to use under normal circumstances */ static struct aws_http_connection_manager_system_vtable s_default_system_vtable = { .aws_http_client_connect = aws_http_client_connect, .aws_http_connection_release = aws_http_connection_release, .aws_http_connection_close = aws_http_connection_close, .aws_http_connection_new_requests_allowed = aws_http_connection_new_requests_allowed, .aws_high_res_clock_get_ticks = aws_high_res_clock_get_ticks, .aws_channel_thread_is_callers_thread = aws_channel_thread_is_callers_thread, .aws_http_connection_get_channel = aws_http_connection_get_channel, .aws_http_connection_get_version = aws_http_connection_get_version, }; const struct aws_http_connection_manager_system_vtable *g_aws_http_connection_manager_default_system_vtable_ptr = &s_default_system_vtable; bool aws_http_connection_manager_system_vtable_is_valid(const struct aws_http_connection_manager_system_vtable *table) { return table->aws_http_client_connect && table->aws_http_connection_close && table->aws_http_connection_release && table->aws_http_connection_new_requests_allowed; } enum aws_http_connection_manager_state_type { AWS_HCMST_UNINITIALIZED, AWS_HCMST_READY, AWS_HCMST_SHUTTING_DOWN }; /* * AWS_HCMCT_VENDED_CONNECTION: The number of connections currently being used by external users. * AWS_HCMCT_PENDING_CONNECTIONS: The number of pending new connection requests we have outstanding to the http * layer. * AWS_HCMCT_OPEN_CONNECTION: Always equal to # of connection shutdown callbacks not yet invoked * or equivalently: * * # of connections ever created by the manager - # shutdown callbacks received */ enum aws_http_connection_manager_count_type { AWS_HCMCT_VENDED_CONNECTION, AWS_HCMCT_PENDING_CONNECTIONS, AWS_HCMCT_OPEN_CONNECTION, AWS_HCMCT_COUNT, }; /** * Vocabulary * Acquisition - a request by a user for a connection * Pending Acquisition - a request by a user for a new connection that has not been completed. It may be * waiting on http, a release by another user, or the manager itself. * Pending Connect - a request to the http layer for a new connection that has not been resolved yet * Vended Connection - a successfully established connection that is currently in use by something; must * be released (through the connection manager) by the user before anyone else can use it. The connection * manager does not explicitly track vended connections. * Task Set - A set of operations that should be attempted once the lock is released. A task set includes * completion callbacks (which can't fail) and connection attempts (which can fail either immediately or * asynchronously). * * Requirements/Assumptions * (1) Don't invoke user callbacks while holding the internal state lock * (2) Don't invoke downstream http calls while holding the internal state lock * (3) Only log unusual or rare events while the lock is held. Common-path logging should be while it is * not held. * (4) Don't crash or do awful things (leaking resources is ok though) if the interface contract * (ref counting + balanced acquire/release of connections) is violated by the user * * In order to fulfill (1) and (2), all side-effecting operations within the connection manager follow a pattern: * * (1) Lock * (2) Make state changes based on the operation * (3) Build a set of work (completions, connect calls, releases, self-destruction) as appropriate to the operation * (4) Unlock * (5) Execute the task set * * Asynchronous work order failures are handled in the async callback, but immediate failures require * us to relock and update the internal state. When there's an immediate connect failure, we use a * conservative policy to fail all excess (beyond the # of pending connects) acquisitions; this allows us * to avoid a possible recursive invocation (and potential failures) to connect again. * * Lifecycle * Our connection manager implementation has a reasonably complex lifecycle. * * All state around the life cycle is protected by a lock. It seemed too risky and error-prone * to try and mix an atomic ref count with the internal tracking counters we need. * * Over the course of its lifetime, a connection manager moves through two states: * * READY - connections may be acquired and released. When the external ref count for the manager * drops to zero, the manager moves to: * * TODO: Seems like connections can still be release while shutting down. * SHUTTING_DOWN - connections may no longer be acquired and released (how could they if the external * ref count was accurate?) but in case of user ref errors, we simply fail attempts to do so rather * than crash or underflow. While in this state, we wait for a set of tracking counters to all fall to zero: * * pending_connect_count - the # of unresolved calls to the http layer's connect logic * open_connection_count - the # of connections for whom the shutdown callback (from http) has not been invoked * vended_connection_count - the # of connections held by external users that haven't been released. Under correct * usage this should be zero before SHUTTING_DOWN is entered, but we attempt to handle incorrect usage gracefully. * * While all the counter fall to zero and no outlife transition, connection manager will detroy itself. * * While shutting down, as pending connects resolve, we immediately release new incoming (from http) connections * * During the transition from READY to SHUTTING_DOWN, we flush the pending acquisition queue (with failure callbacks) * and since we disallow new acquires, pending_acquisition_count should always be zero after the transition. * */ struct aws_http_connection_manager { struct aws_allocator *allocator; /* * A union of external downstream dependencies (primarily global http API functions) and * internal implementation references. Selectively overridden by tests in order to * enable strong coverage of internal implementation details. */ const struct aws_http_connection_manager_system_vtable *system_vtable; /* * Callback to invoke when shutdown has completed and all resources have been cleaned up. */ aws_http_connection_manager_shutdown_complete_fn *shutdown_complete_callback; /* * User data to pass to the shutdown completion callback. */ void *shutdown_complete_user_data; /* * Controls access to all mutable state on the connection manager */ struct aws_mutex lock; /* * A manager can be in one of two states, READY or SHUTTING_DOWN. The state transition * takes place when ref_count drops to zero. */ enum aws_http_connection_manager_state_type state; /* * The number of all established, idle connections. So * that we don't have compute the size of a linked list every time. * It doesn't contribute to internal refcount as AWS_HCMCT_OPEN_CONNECTION includes all idle connections as well. */ size_t idle_connection_count; /* * The set of all available, ready-to-be-used connections, as aws_idle_connection structs. * * This must be a LIFO stack. When connections are released by the user, they must be added on to the back. * When we vend connections to the user, they must be removed from the back first. * In this way, the list will always be sorted from oldest (in terms of time spent idle) to newest. This means * we can always use the cull timestamp of the front connection as the next scheduled time for culling. * It also means that when we cull connections, we can quit the loop as soon as we find a connection * whose timestamp is greater than the current timestamp. */ struct aws_linked_list idle_connections; /* * The set of all incomplete connection acquisition requests */ struct aws_linked_list pending_acquisitions; /* * The number of all incomplete connection acquisition requests. So * that we don't have compute the size of a linked list every time. */ size_t pending_acquisition_count; /* * Counts that contributes to the internal refcount. * When the value changes, s_connection_manager_internal_ref_increase/decrease needed. * * AWS_HCMCT_VENDED_CONNECTION: The number of connections currently being used by external users. * AWS_HCMCT_PENDING_CONNECTIONS: The number of pending new connection requests we have outstanding to the http * layer. * AWS_HCMCT_OPEN_CONNECTION: Always equal to # of connection shutdown callbacks not yet invoked * or equivalently: * * # of connections ever created by the manager - # shutdown callbacks received */ size_t internal_ref[AWS_HCMCT_COUNT]; /* * The number of established new HTTP/2 connections we have waiting for SETTINGS from the http layer * It doesn't contribute to internal refcount as AWS_HCMCT_OPEN_CONNECTION inclues all connections waiting for * settings as well. */ size_t pending_settings_count; /* * All the options needed to create an http connection */ struct aws_client_bootstrap *bootstrap; size_t initial_window_size; struct aws_socket_options socket_options; struct aws_tls_connection_options *tls_connection_options; struct aws_http_proxy_config *proxy_config; struct aws_http_connection_monitoring_options monitoring_options; struct aws_string *host; struct proxy_env_var_settings proxy_ev_settings; struct aws_tls_connection_options *proxy_ev_tls_options; uint32_t port; /* * HTTP/2 specific. */ bool http2_prior_knowledge; struct aws_array_list *initial_settings; size_t max_closed_streams; bool http2_conn_manual_window_management; /* * The maximum number of connections this manager should ever have at once. */ size_t max_connections; /* * Lifecycle tracking for the connection manager. Starts at 1. * * Once this drops to zero, the manager state transitions to shutting down * * The manager is deleted when all other tracking counters have returned to zero. * * We don't use an atomic here because the shutdown phase wants to check many different * values. You could argue that we could use a sum of everything, but we still need the * individual values for proper behavior and error checking during the ready state. Also, * a hybrid atomic/lock solution felt excessively complicated and delicate. */ size_t external_ref_count; /* * Internal refcount that keeps connection manager alive. * * It's a sum of all internal_ref, the `struct aws_connection_management_transaction` alive and one for any external * usage. * * Once this refcount drops to zero, connection manager should either be cleaned up all the memory all waiting for * the last task to clean un the memory and do nothing else. */ struct aws_ref_count internal_ref_count; /* * if set to true, read back pressure mechanism will be enabled. */ bool enable_read_back_pressure; /** * If set to a non-zero value, then connections that stay in the pool longer than the specified * timeout will be closed automatically. */ uint64_t max_connection_idle_in_milliseconds; /* * Task to cull idle connections. This task is run periodically on the cull_event_loop if a non-zero * culling time interval is specified. */ struct aws_task *cull_task; struct aws_event_loop *cull_event_loop; }; struct aws_http_connection_manager_snapshot { enum aws_http_connection_manager_state_type state; size_t idle_connection_count; size_t pending_acquisition_count; size_t pending_settings_count; /* From internal_ref */ size_t pending_connects_count; size_t vended_connection_count; size_t open_connection_count; size_t external_ref_count; }; /* * Correct usage requires AWS_ZERO_STRUCT to have been called beforehand. */ static void s_aws_http_connection_manager_get_snapshot( struct aws_http_connection_manager *manager, struct aws_http_connection_manager_snapshot *snapshot) { snapshot->state = manager->state; snapshot->idle_connection_count = manager->idle_connection_count; snapshot->pending_acquisition_count = manager->pending_acquisition_count; snapshot->pending_settings_count = manager->pending_settings_count; snapshot->pending_connects_count = manager->internal_ref[AWS_HCMCT_PENDING_CONNECTIONS]; snapshot->vended_connection_count = manager->internal_ref[AWS_HCMCT_VENDED_CONNECTION]; snapshot->open_connection_count = manager->internal_ref[AWS_HCMCT_OPEN_CONNECTION]; snapshot->external_ref_count = manager->external_ref_count; } static void s_aws_http_connection_manager_log_snapshot( struct aws_http_connection_manager *manager, struct aws_http_connection_manager_snapshot *snapshot) { if (snapshot->state != AWS_HCMST_UNINITIALIZED) { AWS_LOGF_DEBUG( AWS_LS_HTTP_CONNECTION_MANAGER, "id=%p: snapshot - state=%d, idle_connection_count=%zu, pending_acquire_count=%zu, " "pending_settings_count=%zu, pending_connect_count=%zu, vended_connection_count=%zu, " "open_connection_count=%zu, ref_count=%zu", (void *)manager, (int)snapshot->state, snapshot->idle_connection_count, snapshot->pending_acquisition_count, snapshot->pending_settings_count, snapshot->pending_connects_count, snapshot->vended_connection_count, snapshot->open_connection_count, snapshot->external_ref_count); } else { AWS_LOGF_DEBUG( AWS_LS_HTTP_CONNECTION_MANAGER, "id=%p: snapshot not initialized by control flow", (void *)manager); } } void aws_http_connection_manager_set_system_vtable( struct aws_http_connection_manager *manager, const struct aws_http_connection_manager_system_vtable *system_vtable) { AWS_FATAL_ASSERT(aws_http_connection_manager_system_vtable_is_valid(system_vtable)); manager->system_vtable = system_vtable; } /* * A struct that functions as both the pending acquisition tracker and the about-to-complete data. * * The list in the connection manager (pending_acquisitions) is the set of all acquisition requests that we * haven't yet resolved. * * In order to make sure we never invoke callbacks while holding the manager's lock, in a number of places * we build a list of one or more acquisitions to complete. Once the lock is released * we complete all the acquisitions in the list using the data within the struct (hence why we have * "result-oriented" members like connection and error_code). This means we can fail an acquisition * simply by setting the error_code and moving it to the current transaction's completion list. */ struct aws_http_connection_acquisition { struct aws_allocator *allocator; struct aws_linked_list_node node; struct aws_http_connection_manager *manager; /* Only used by logging */ aws_http_connection_manager_on_connection_setup_fn *callback; void *user_data; struct aws_http_connection *connection; int error_code; struct aws_channel_task acquisition_task; }; static void s_connection_acquisition_task( struct aws_channel_task *channel_task, void *arg, enum aws_task_status status) { (void)channel_task; struct aws_http_connection_acquisition *pending_acquisition = arg; /* this is a channel task. If it is canceled, that means the channel shutdown. In that case, that's equivalent * to a closed connection. */ if (status != AWS_TASK_STATUS_RUN_READY) { AWS_LOGF_WARN( AWS_LS_HTTP_CONNECTION_MANAGER, "id=%p: Failed to complete connection acquisition because the connection was closed", (void *)pending_acquisition->manager); pending_acquisition->callback(NULL, AWS_ERROR_HTTP_CONNECTION_CLOSED, pending_acquisition->user_data); /* release it back to prevent a leak of the connection count. */ aws_http_connection_manager_release_connection(pending_acquisition->manager, pending_acquisition->connection); } else { AWS_LOGF_DEBUG( AWS_LS_HTTP_CONNECTION_MANAGER, "id=%p: Successfully completed connection acquisition with connection id=%p", (void *)pending_acquisition->manager, (void *)pending_acquisition->connection); pending_acquisition->callback( pending_acquisition->connection, pending_acquisition->error_code, pending_acquisition->user_data); } aws_mem_release(pending_acquisition->allocator, pending_acquisition); } /* * Invokes a set of connection acquisition completion callbacks. * * Soft Requirement: The manager's lock must not be held in the callstack. * * Assumes that internal state (like pending_acquisition_count, vended_connection_count, etc...) have already been * updated according to the list's contents. */ static void s_aws_http_connection_manager_complete_acquisitions( struct aws_linked_list *acquisitions, struct aws_allocator *allocator) { while (!aws_linked_list_empty(acquisitions)) { struct aws_linked_list_node *node = aws_linked_list_pop_front(acquisitions); struct aws_http_connection_acquisition *pending_acquisition = AWS_CONTAINER_OF(node, struct aws_http_connection_acquisition, node); if (pending_acquisition->error_code == AWS_OP_SUCCESS) { struct aws_channel *channel = pending_acquisition->manager->system_vtable->aws_http_connection_get_channel( pending_acquisition->connection); AWS_PRECONDITION(channel); /* For some workloads, going ahead and moving the connection callback to the connection's thread is a * substantial performance improvement so let's do that */ if (!pending_acquisition->manager->system_vtable->aws_channel_thread_is_callers_thread(channel)) { aws_channel_task_init( &pending_acquisition->acquisition_task, s_connection_acquisition_task, pending_acquisition, "s_connection_acquisition_task"); aws_channel_schedule_task_now(channel, &pending_acquisition->acquisition_task); return; } AWS_LOGF_DEBUG( AWS_LS_HTTP_CONNECTION_MANAGER, "id=%p: Successfully completed connection acquisition with connection id=%p", (void *)pending_acquisition->manager, (void *)pending_acquisition->connection); } else { AWS_LOGF_WARN( AWS_LS_HTTP_CONNECTION_MANAGER, "id=%p: Failed to complete connection acquisition with error_code %d(%s)", (void *)pending_acquisition->manager, pending_acquisition->error_code, aws_error_str(pending_acquisition->error_code)); } pending_acquisition->callback( pending_acquisition->connection, pending_acquisition->error_code, pending_acquisition->user_data); aws_mem_release(allocator, pending_acquisition); } } /* * Moves the first pending connection acquisition into a (task set) list. Call this while holding the lock to * build the set of callbacks to be completed once the lock is released. * * Hard Requirement: Manager's lock must held somewhere in the call stack * * If this was a successful acquisition then connection is non-null * If this was a failed acquisition then connection is null and error_code is hopefully a useful diagnostic (extreme * edge cases exist where it may not be though) */ static void s_aws_http_connection_manager_move_front_acquisition( struct aws_http_connection_manager *manager, struct aws_http_connection *connection, int error_code, struct aws_linked_list *output_list) { AWS_FATAL_ASSERT(!aws_linked_list_empty(&manager->pending_acquisitions)); struct aws_linked_list_node *node = aws_linked_list_pop_front(&manager->pending_acquisitions); AWS_FATAL_ASSERT(manager->pending_acquisition_count > 0); --manager->pending_acquisition_count; if (error_code == AWS_ERROR_SUCCESS && connection == NULL) { AWS_LOGF_FATAL( AWS_LS_HTTP_CONNECTION_MANAGER, "id=%p: Connection acquisition completed with NULL connection and no error code. Investigate.", (void *)manager); error_code = AWS_ERROR_UNKNOWN; } struct aws_http_connection_acquisition *pending_acquisition = AWS_CONTAINER_OF(node, struct aws_http_connection_acquisition, node); pending_acquisition->connection = connection; pending_acquisition->error_code = error_code; aws_linked_list_push_back(output_list, node); } /* * Encompasses all of the external operations that need to be done for various * events: * manager release * connection release * connection acquire * connection_setup * connection_shutdown * * The transaction is built under the manager's lock (and the internal state is updated optimistically), * but then executed outside of it. */ struct aws_connection_management_transaction { struct aws_http_connection_manager *manager; struct aws_allocator *allocator; struct aws_linked_list completions; struct aws_http_connection *connection_to_release; struct aws_linked_list connections_to_release; /* */ struct aws_http_connection_manager_snapshot snapshot; size_t new_connections; }; static void s_aws_connection_management_transaction_init( struct aws_connection_management_transaction *work, struct aws_http_connection_manager *manager) { AWS_ZERO_STRUCT(*work); aws_linked_list_init(&work->connections_to_release); aws_linked_list_init(&work->completions); work->manager = manager; work->allocator = manager->allocator; aws_ref_count_acquire(&manager->internal_ref_count); } static void s_aws_connection_management_transaction_clean_up(struct aws_connection_management_transaction *work) { AWS_FATAL_ASSERT(aws_linked_list_empty(&work->connections_to_release)); AWS_FATAL_ASSERT(aws_linked_list_empty(&work->completions)); AWS_ASSERT(work->manager); aws_ref_count_release(&work->manager->internal_ref_count); } /* The count acquire and release all needs to be invoked helding the lock */ static void s_connection_manager_internal_ref_increase( struct aws_http_connection_manager *manager, enum aws_http_connection_manager_count_type count_type, size_t num) { manager->internal_ref[count_type] += num; for (size_t i = 0; i < num; i++) { aws_ref_count_acquire(&manager->internal_ref_count); } } static void s_connection_manager_internal_ref_decrease( struct aws_http_connection_manager *manager, enum aws_http_connection_manager_count_type count_type, size_t num) { manager->internal_ref[count_type] -= num; for (size_t i = 0; i < num; i++) { /* This only happens between transcation init and transcation clean up. As transcation always has a internal * refcount, this will never bring the refcount to zero */ aws_ref_count_release(&manager->internal_ref_count); } } /* Only invoked with the lock held */ static void s_aws_http_connection_manager_build_transaction(struct aws_connection_management_transaction *work) { struct aws_http_connection_manager *manager = work->manager; if (manager->state == AWS_HCMST_READY) { /* * Step 1 - If there's free connections, complete acquisition requests */ while (!aws_linked_list_empty(&manager->idle_connections) > 0 && manager->pending_acquisition_count > 0) { AWS_FATAL_ASSERT(manager->idle_connection_count >= 1); /* * It is absolutely critical that this is pop_back and not front. By making the idle connections * a LIFO stack, the list will always be sorted from oldest (in terms of idle time) to newest. This means * we can always use the cull timestamp of the first connection as the next scheduled time for culling. * It also means that when we cull connections, we can quit the loop as soon as we find a connection * whose timestamp is greater than the current timestamp. */ struct aws_linked_list_node *node = aws_linked_list_pop_back(&manager->idle_connections); struct aws_idle_connection *idle_connection = AWS_CONTAINER_OF(node, struct aws_idle_connection, node); struct aws_http_connection *connection = idle_connection->connection; AWS_LOGF_DEBUG( AWS_LS_HTTP_CONNECTION_MANAGER, "id=%p: Grabbing pooled connection (%p)", (void *)manager, (void *)connection); s_aws_http_connection_manager_move_front_acquisition( manager, connection, AWS_ERROR_SUCCESS, &work->completions); s_connection_manager_internal_ref_increase(manager, AWS_HCMCT_VENDED_CONNECTION, 1); --manager->idle_connection_count; aws_mem_release(idle_connection->allocator, idle_connection); } /* * Step 2 - if there's excess pending acquisitions and we have room to make more, make more */ if (manager->pending_acquisition_count > manager->internal_ref[AWS_HCMCT_PENDING_CONNECTIONS] + manager->pending_settings_count) { AWS_FATAL_ASSERT( manager->max_connections >= manager->internal_ref[AWS_HCMCT_VENDED_CONNECTION] + manager->internal_ref[AWS_HCMCT_PENDING_CONNECTIONS] + manager->pending_settings_count); work->new_connections = manager->pending_acquisition_count - manager->internal_ref[AWS_HCMCT_PENDING_CONNECTIONS] - manager->pending_settings_count; size_t max_new_connections = manager->max_connections - (manager->internal_ref[AWS_HCMCT_VENDED_CONNECTION] + manager->internal_ref[AWS_HCMCT_PENDING_CONNECTIONS] + manager->pending_settings_count); if (work->new_connections > max_new_connections) { work->new_connections = max_new_connections; } s_connection_manager_internal_ref_increase(manager, AWS_HCMCT_PENDING_CONNECTIONS, work->new_connections); } } else { /* * swap our internal connection set with the empty work set */ AWS_FATAL_ASSERT(aws_linked_list_empty(&work->connections_to_release)); aws_linked_list_swap_contents(&manager->idle_connections, &work->connections_to_release); manager->idle_connection_count = 0; /* * Move all manager pending acquisitions to the work completion list */ while (!aws_linked_list_empty(&manager->pending_acquisitions)) { AWS_LOGF_DEBUG( AWS_LS_HTTP_CONNECTION_MANAGER, "id=%p: Failing pending connection acquisition due to manager shut down", (void *)manager); s_aws_http_connection_manager_move_front_acquisition( manager, NULL, AWS_ERROR_HTTP_CONNECTION_MANAGER_SHUTTING_DOWN, &work->completions); } AWS_LOGF_INFO( AWS_LS_HTTP_CONNECTION_MANAGER, "id=%p: manager release, failing %zu pending acquisitions", (void *)manager, manager->pending_acquisition_count); manager->pending_acquisition_count = 0; } s_aws_http_connection_manager_get_snapshot(manager, &work->snapshot); } static void s_aws_http_connection_manager_execute_transaction(struct aws_connection_management_transaction *work); /* * The final last gasp of a connection manager where memory is cleaned up. Destruction is split up into two parts, * a begin and a finish. Idle connection culling requires a scheduled task on an arbitrary event loop. If idle * connection culling is on then this task must be cancelled before destruction can finish, but you can only cancel * a task from the same event loop that it is scheduled on. To resolve this, when using idle connection culling, * we schedule a finish destruction task on the event loop that the culling task is on. This finish task * cancels the culling task and then calls this function. If we are not using idle connection culling, we can * call this function immediately from the start of destruction. */ static void s_aws_http_connection_manager_finish_destroy(struct aws_http_connection_manager *manager) { if (manager == NULL) { return; } AWS_LOGF_INFO(AWS_LS_HTTP_CONNECTION_MANAGER, "id=%p: Destroying self", (void *)manager); AWS_FATAL_ASSERT(manager->internal_ref[AWS_HCMCT_PENDING_CONNECTIONS] == 0); AWS_FATAL_ASSERT(manager->pending_settings_count == 0); AWS_FATAL_ASSERT(manager->internal_ref[AWS_HCMCT_VENDED_CONNECTION] == 0); AWS_FATAL_ASSERT(manager->pending_acquisition_count == 0); AWS_FATAL_ASSERT(manager->internal_ref[AWS_HCMCT_OPEN_CONNECTION] == 0); AWS_FATAL_ASSERT(aws_linked_list_empty(&manager->pending_acquisitions)); AWS_FATAL_ASSERT(aws_linked_list_empty(&manager->idle_connections)); aws_string_destroy(manager->host); if (manager->initial_settings) { aws_array_list_clean_up(manager->initial_settings); aws_mem_release(manager->allocator, manager->initial_settings); } if (manager->tls_connection_options) { aws_tls_connection_options_clean_up(manager->tls_connection_options); aws_mem_release(manager->allocator, manager->tls_connection_options); } if (manager->proxy_ev_tls_options) { aws_tls_connection_options_clean_up(manager->proxy_ev_tls_options); aws_mem_release(manager->allocator, manager->proxy_ev_tls_options); } if (manager->proxy_config) { aws_http_proxy_config_destroy(manager->proxy_config); } /* * If this task exists then we are actually in the corresponding event loop running the final destruction task. * In that case, we've already cancelled this task and when you cancel, it runs synchronously. So in that * case the task has run as cancelled, it was not rescheduled, and so we can safely release the memory. */ if (manager->cull_task) { aws_mem_release(manager->allocator, manager->cull_task); } aws_mutex_clean_up(&manager->lock); aws_client_bootstrap_release(manager->bootstrap); if (manager->shutdown_complete_callback) { manager->shutdown_complete_callback(manager->shutdown_complete_user_data); } aws_mem_release(manager->allocator, manager); } /* This is scheduled to run on the cull task's event loop. Should only be scheduled to run if we have one */ static void s_final_destruction_task(struct aws_task *task, void *arg, enum aws_task_status status) { (void)status; struct aws_http_connection_manager *manager = arg; struct aws_allocator *allocator = manager->allocator; AWS_FATAL_ASSERT(manager->cull_task != NULL); AWS_FATAL_ASSERT(manager->cull_event_loop != NULL); aws_event_loop_cancel_task(manager->cull_event_loop, manager->cull_task); aws_mem_release(allocator, task); /* release the refcount on manager as the culling task will not run again */ aws_ref_count_release(&manager->internal_ref_count); } static void s_cull_task(struct aws_task *task, void *arg, enum aws_task_status status); static void s_schedule_connection_culling(struct aws_http_connection_manager *manager) { if (manager->max_connection_idle_in_milliseconds == 0) { return; } if (manager->cull_task == NULL) { manager->cull_task = aws_mem_calloc(manager->allocator, 1, sizeof(struct aws_task)); aws_task_init(manager->cull_task, s_cull_task, manager, "cull_idle_connections"); /* For the task to properly run and cancel, we need to keep manager alive */ aws_ref_count_acquire(&manager->internal_ref_count); } if (manager->cull_event_loop == NULL) { manager->cull_event_loop = aws_event_loop_group_get_next_loop(manager->bootstrap->event_loop_group); } AWS_FATAL_ASSERT(manager->cull_event_loop != NULL); uint64_t cull_task_time = 0; aws_mutex_lock(&manager->lock); const struct aws_linked_list_node *end = aws_linked_list_end(&manager->idle_connections); struct aws_linked_list_node *oldest_node = aws_linked_list_begin(&manager->idle_connections); if (oldest_node != end) { /* * Since the connections are in LIFO order in the list, the front of the list has the closest * cull time. */ struct aws_idle_connection *oldest_idle_connection = AWS_CONTAINER_OF(oldest_node, struct aws_idle_connection, node); cull_task_time = oldest_idle_connection->cull_timestamp; } else { /* * There are no connections in the list, so the absolute minimum anything could be culled is the full * culling interval from now. */ uint64_t now = 0; manager->system_vtable->aws_high_res_clock_get_ticks(&now); cull_task_time = now + aws_timestamp_convert( manager->max_connection_idle_in_milliseconds, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL); } aws_mutex_unlock(&manager->lock); aws_event_loop_schedule_task_future(manager->cull_event_loop, manager->cull_task, cull_task_time); return; } struct aws_http_connection_manager *aws_http_connection_manager_new( struct aws_allocator *allocator, const struct aws_http_connection_manager_options *options) { aws_http_fatal_assert_library_initialized(); if (!options) { AWS_LOGF_ERROR(AWS_LS_HTTP_CONNECTION_MANAGER, "Invalid options - options is null"); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } if (!options->socket_options) { AWS_LOGF_ERROR(AWS_LS_HTTP_CONNECTION_MANAGER, "Invalid options - socket_options is null"); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } if (options->max_connections == 0) { AWS_LOGF_ERROR(AWS_LS_HTTP_CONNECTION_MANAGER, "Invalid options - max_connections cannot be 0"); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } if (options->tls_connection_options && options->http2_prior_knowledge) { AWS_LOGF_ERROR( AWS_LS_HTTP_CONNECTION_MANAGER, "Invalid options - HTTP/2 prior knowledge cannot be set when TLS is used"); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } struct aws_http_connection_manager *manager = aws_mem_calloc(allocator, 1, sizeof(struct aws_http_connection_manager)); if (manager == NULL) { return NULL; } manager->allocator = allocator; if (aws_mutex_init(&manager->lock)) { goto on_error; } aws_ref_count_init( &manager->internal_ref_count, manager, (aws_simple_completion_callback *)s_aws_http_connection_manager_finish_destroy); aws_linked_list_init(&manager->idle_connections); aws_linked_list_init(&manager->pending_acquisitions); manager->host = aws_string_new_from_cursor(allocator, &options->host); if (manager->host == NULL) { goto on_error; } if (options->tls_connection_options) { manager->tls_connection_options = aws_mem_calloc(allocator, 1, sizeof(struct aws_tls_connection_options)); if (aws_tls_connection_options_copy(manager->tls_connection_options, options->tls_connection_options)) { goto on_error; } } if (options->proxy_options) { manager->proxy_config = aws_http_proxy_config_new_from_manager_options(allocator, options); if (manager->proxy_config == NULL) { goto on_error; } } if (options->monitoring_options) { manager->monitoring_options = *options->monitoring_options; } manager->state = AWS_HCMST_READY; manager->initial_window_size = options->initial_window_size; manager->port = options->port; manager->max_connections = options->max_connections; manager->socket_options = *options->socket_options; manager->bootstrap = aws_client_bootstrap_acquire(options->bootstrap); manager->system_vtable = g_aws_http_connection_manager_default_system_vtable_ptr; manager->external_ref_count = 1; manager->shutdown_complete_callback = options->shutdown_complete_callback; manager->shutdown_complete_user_data = options->shutdown_complete_user_data; manager->enable_read_back_pressure = options->enable_read_back_pressure; manager->max_connection_idle_in_milliseconds = options->max_connection_idle_in_milliseconds; if (options->proxy_ev_settings) { manager->proxy_ev_settings = *options->proxy_ev_settings; } if (manager->proxy_ev_settings.tls_options) { manager->proxy_ev_tls_options = aws_mem_calloc(allocator, 1, sizeof(struct aws_tls_connection_options)); if (aws_tls_connection_options_copy(manager->proxy_ev_tls_options, manager->proxy_ev_settings.tls_options)) { goto on_error; } manager->proxy_ev_settings.tls_options = manager->proxy_ev_tls_options; } manager->http2_prior_knowledge = options->http2_prior_knowledge; if (options->num_initial_settings > 0) { manager->initial_settings = aws_mem_calloc(allocator, 1, sizeof(struct aws_array_list)); aws_array_list_init_dynamic( manager->initial_settings, allocator, options->num_initial_settings, sizeof(struct aws_http2_setting)); memcpy( manager->initial_settings->data, options->initial_settings_array, options->num_initial_settings * sizeof(struct aws_http2_setting)); } manager->max_closed_streams = options->max_closed_streams; manager->http2_conn_manual_window_management = options->http2_conn_manual_window_management; /* NOTHING can fail after here */ s_schedule_connection_culling(manager); AWS_LOGF_INFO(AWS_LS_HTTP_CONNECTION_MANAGER, "id=%p: Successfully created", (void *)manager); return manager; on_error: s_aws_http_connection_manager_finish_destroy(manager); return NULL; } void aws_http_connection_manager_acquire(struct aws_http_connection_manager *manager) { aws_mutex_lock(&manager->lock); AWS_FATAL_ASSERT(manager->external_ref_count > 0); manager->external_ref_count += 1; aws_mutex_unlock(&manager->lock); } void aws_http_connection_manager_release(struct aws_http_connection_manager *manager) { struct aws_connection_management_transaction work; s_aws_connection_management_transaction_init(&work, manager); AWS_LOGF_INFO(AWS_LS_HTTP_CONNECTION_MANAGER, "id=%p: release", (void *)manager); aws_mutex_lock(&manager->lock); if (manager->external_ref_count > 0) { manager->external_ref_count -= 1; if (manager->external_ref_count == 0) { AWS_LOGF_INFO( AWS_LS_HTTP_CONNECTION_MANAGER, "id=%p: ref count now zero, starting shut down process", (void *)manager); manager->state = AWS_HCMST_SHUTTING_DOWN; s_aws_http_connection_manager_build_transaction(&work); if (manager->cull_task != NULL) { /* When manager shutting down, schedule the task to cancel the cull task if exist. */ AWS_FATAL_ASSERT(manager->cull_event_loop); struct aws_task *final_destruction_task = aws_mem_calloc(manager->allocator, 1, sizeof(struct aws_task)); aws_task_init(final_destruction_task, s_final_destruction_task, manager, "final_scheduled_destruction"); aws_event_loop_schedule_task_now(manager->cull_event_loop, final_destruction_task); } aws_ref_count_release(&manager->internal_ref_count); } } else { AWS_LOGF_ERROR( AWS_LS_HTTP_CONNECTION_MANAGER, "id=%p: Connection manager release called with a zero reference count", (void *)manager); } aws_mutex_unlock(&manager->lock); s_aws_http_connection_manager_execute_transaction(&work); } static void s_aws_http_connection_manager_on_connection_setup( struct aws_http_connection *connection, int error_code, void *user_data); static void s_aws_http_connection_manager_on_connection_shutdown( struct aws_http_connection *connection, int error_code, void *user_data); static void s_aws_http_connection_manager_h2_on_goaway_received( struct aws_http_connection *http2_connection, uint32_t last_stream_id, uint32_t http2_error_code, struct aws_byte_cursor debug_data, void *user_data); static void s_aws_http_connection_manager_h2_on_initial_settings_completed( struct aws_http_connection *http2_connection, int error_code, void *user_data); static int s_aws_http_connection_manager_new_connection(struct aws_http_connection_manager *manager) { struct aws_http_client_connection_options options; AWS_ZERO_STRUCT(options); options.self_size = sizeof(struct aws_http_client_connection_options); options.bootstrap = manager->bootstrap; options.tls_options = manager->tls_connection_options; options.allocator = manager->allocator; options.user_data = manager; options.host_name = aws_byte_cursor_from_string(manager->host); options.port = manager->port; options.initial_window_size = manager->initial_window_size; options.socket_options = &manager->socket_options; options.on_setup = s_aws_http_connection_manager_on_connection_setup; options.on_shutdown = s_aws_http_connection_manager_on_connection_shutdown; options.manual_window_management = manager->enable_read_back_pressure; options.proxy_ev_settings = &manager->proxy_ev_settings; options.prior_knowledge_http2 = manager->http2_prior_knowledge; struct aws_http2_connection_options h2_options; AWS_ZERO_STRUCT(h2_options); if (manager->initial_settings) { h2_options.initial_settings_array = manager->initial_settings->data; h2_options.num_initial_settings = aws_array_list_length(manager->initial_settings); } h2_options.max_closed_streams = manager->max_closed_streams; h2_options.conn_manual_window_management = manager->http2_conn_manual_window_management; /* The initial_settings_completed invoked after the other side acknowledges it, and will always be invoked if the * connection set up */ h2_options.on_initial_settings_completed = s_aws_http_connection_manager_h2_on_initial_settings_completed; h2_options.on_goaway_received = s_aws_http_connection_manager_h2_on_goaway_received; options.http2_options = &h2_options; if (aws_http_connection_monitoring_options_is_valid(&manager->monitoring_options)) { options.monitoring_options = &manager->monitoring_options; } struct aws_http_proxy_options proxy_options; AWS_ZERO_STRUCT(proxy_options); if (manager->proxy_config) { aws_http_proxy_options_init_from_config(&proxy_options, manager->proxy_config); options.proxy_options = &proxy_options; } if (manager->system_vtable->aws_http_client_connect(&options)) { AWS_LOGF_ERROR( AWS_LS_HTTP_CONNECTION_MANAGER, "id=%p: http connection creation failed with error code %d(%s)", (void *)manager, aws_last_error(), aws_error_str(aws_last_error())); return AWS_OP_ERR; } return AWS_OP_SUCCESS; } static void s_aws_http_connection_manager_execute_transaction(struct aws_connection_management_transaction *work) { struct aws_http_connection_manager *manager = work->manager; int representative_error = 0; size_t new_connection_failures = 0; /* * Step 1 - Logging */ s_aws_http_connection_manager_log_snapshot(manager, &work->snapshot); /* * Step 2 - Perform any requested connection releases */ while (!aws_linked_list_empty(&work->connections_to_release)) { struct aws_linked_list_node *node = aws_linked_list_pop_back(&work->connections_to_release); struct aws_idle_connection *idle_connection = AWS_CONTAINER_OF(node, struct aws_idle_connection, node); AWS_LOGF_INFO( AWS_LS_HTTP_CONNECTION_MANAGER, "id=%p: Releasing connection (id=%p)", (void *)manager, (void *)idle_connection->connection); manager->system_vtable->aws_http_connection_release(idle_connection->connection); aws_mem_release(idle_connection->allocator, idle_connection); } if (work->connection_to_release) { AWS_LOGF_INFO( AWS_LS_HTTP_CONNECTION_MANAGER, "id=%p: Releasing connection (id=%p)", (void *)manager, (void *)work->connection_to_release); manager->system_vtable->aws_http_connection_release(work->connection_to_release); } /* * Step 3 - Make new connections */ struct aws_array_list errors; AWS_ZERO_STRUCT(errors); /* Even if we can't init this array, we still need to invoke error callbacks properly */ bool push_errors = false; if (work->new_connections > 0) { AWS_LOGF_INFO( AWS_LS_HTTP_CONNECTION_MANAGER, "id=%p: Requesting %zu new connections from http", (void *)manager, work->new_connections); push_errors = aws_array_list_init_dynamic(&errors, work->allocator, work->new_connections, sizeof(int)) == AWS_ERROR_SUCCESS; } for (size_t i = 0; i < work->new_connections; ++i) { if (s_aws_http_connection_manager_new_connection(manager)) { ++new_connection_failures; representative_error = aws_last_error(); if (push_errors) { AWS_FATAL_ASSERT(aws_array_list_push_back(&errors, &representative_error) == AWS_OP_SUCCESS); } } } if (new_connection_failures > 0) { /* * We failed and aren't going to receive a callback, but the current state assumes we will receive * a callback. So we need to re-lock and update the state ourselves. */ aws_mutex_lock(&manager->lock); AWS_FATAL_ASSERT(manager->internal_ref[AWS_HCMCT_PENDING_CONNECTIONS] >= new_connection_failures); s_connection_manager_internal_ref_decrease(manager, AWS_HCMCT_PENDING_CONNECTIONS, new_connection_failures); /* * Rather than failing one acquisition for each connection failure, if there's at least one * connection failure, we instead fail all excess acquisitions, since there's no pending * connect that will necessarily resolve them. * * Try to correspond an error with the acquisition failure, but as a fallback just use the * representative error. */ size_t i = 0; while (manager->pending_acquisition_count > manager->internal_ref[AWS_HCMCT_PENDING_CONNECTIONS]) { int error = representative_error; if (i < aws_array_list_length(&errors)) { aws_array_list_get_at(&errors, &error, i); } AWS_LOGF_DEBUG( AWS_LS_HTTP_CONNECTION_MANAGER, "id=%p: Failing excess connection acquisition with error code %d", (void *)manager, (int)error); s_aws_http_connection_manager_move_front_acquisition(manager, NULL, error, &work->completions); ++i; } aws_mutex_unlock(&manager->lock); } /* * Step 4 - Perform acquisition callbacks */ s_aws_http_connection_manager_complete_acquisitions(&work->completions, work->allocator); aws_array_list_clean_up(&errors); /* * Step 5 - Clean up work. Do this here rather than at the end of every caller. Destroy the manager if necessary */ s_aws_connection_management_transaction_clean_up(work); } void aws_http_connection_manager_acquire_connection( struct aws_http_connection_manager *manager, aws_http_connection_manager_on_connection_setup_fn *callback, void *user_data) { AWS_LOGF_DEBUG(AWS_LS_HTTP_CONNECTION_MANAGER, "id=%p: Acquire connection", (void *)manager); struct aws_http_connection_acquisition *request = aws_mem_calloc(manager->allocator, 1, sizeof(struct aws_http_connection_acquisition)); request->allocator = manager->allocator; request->callback = callback; request->user_data = user_data; request->manager = manager; struct aws_connection_management_transaction work; s_aws_connection_management_transaction_init(&work, manager); aws_mutex_lock(&manager->lock); /* It's a use after free crime, we don't want to handle */ AWS_FATAL_ASSERT(manager->state == AWS_HCMST_READY); aws_linked_list_push_back(&manager->pending_acquisitions, &request->node); ++manager->pending_acquisition_count; s_aws_http_connection_manager_build_transaction(&work); aws_mutex_unlock(&manager->lock); s_aws_http_connection_manager_execute_transaction(&work); } /* Only invoke with lock held */ static int s_idle_connection(struct aws_http_connection_manager *manager, struct aws_http_connection *connection) { struct aws_idle_connection *idle_connection = aws_mem_calloc(manager->allocator, 1, sizeof(struct aws_idle_connection)); idle_connection->allocator = manager->allocator; idle_connection->connection = connection; uint64_t idle_start_timestamp = 0; if (manager->system_vtable->aws_high_res_clock_get_ticks(&idle_start_timestamp)) { goto on_error; } idle_connection->cull_timestamp = idle_start_timestamp + aws_timestamp_convert( manager->max_connection_idle_in_milliseconds, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL); aws_linked_list_push_back(&manager->idle_connections, &idle_connection->node); ++manager->idle_connection_count; return AWS_OP_SUCCESS; on_error: aws_mem_release(idle_connection->allocator, idle_connection); return AWS_OP_ERR; } int aws_http_connection_manager_release_connection( struct aws_http_connection_manager *manager, struct aws_http_connection *connection) { struct aws_connection_management_transaction work; s_aws_connection_management_transaction_init(&work, manager); int result = AWS_OP_ERR; bool should_release_connection = !manager->system_vtable->aws_http_connection_new_requests_allowed(connection); AWS_LOGF_DEBUG( AWS_LS_HTTP_CONNECTION_MANAGER, "id=%p: User releasing connection (id=%p)", (void *)manager, (void *)connection); aws_mutex_lock(&manager->lock); /* We're probably hosed in this case, but let's not underflow */ if (manager->internal_ref[AWS_HCMCT_VENDED_CONNECTION] == 0) { AWS_LOGF_FATAL( AWS_LS_HTTP_CONNECTION_MANAGER, "id=%p: Connection released when vended connection count is zero", (void *)manager); aws_raise_error(AWS_ERROR_HTTP_CONNECTION_MANAGER_VENDED_CONNECTION_UNDERFLOW); goto release; } result = AWS_OP_SUCCESS; s_connection_manager_internal_ref_decrease(manager, AWS_HCMCT_VENDED_CONNECTION, 1); if (!should_release_connection) { if (s_idle_connection(manager, connection)) { should_release_connection = true; } } s_aws_http_connection_manager_build_transaction(&work); if (should_release_connection) { work.connection_to_release = connection; } release: aws_mutex_unlock(&manager->lock); s_aws_http_connection_manager_execute_transaction(&work); return result; } static void s_aws_http_connection_manager_h2_on_goaway_received( struct aws_http_connection *http2_connection, uint32_t last_stream_id, uint32_t http2_error_code, struct aws_byte_cursor debug_data, void *user_data) { struct aws_http_connection_manager *manager = user_data; /* We don't offer user the details, but we can still log it out for debugging */ AWS_LOGF_DEBUG( AWS_LS_HTTP_CONNECTION_MANAGER, "id=%p: HTTP/2 connection (id=%p) received GOAWAY with: last stream id - %u, error code - %u, debug data - " "\"%.*s\"", (void *)manager, (void *)http2_connection, last_stream_id, http2_error_code, (int)debug_data.len, debug_data.ptr); struct aws_connection_management_transaction work; s_aws_connection_management_transaction_init(&work, manager); aws_mutex_lock(&manager->lock); /* Goaway received, remove the connection from idle and release it, if it's there. But, not decrease the * open_connection_count as the shutdown callback will be invoked, we still need the manager to be alive */ const struct aws_linked_list_node *end = aws_linked_list_end(&manager->idle_connections); for (struct aws_linked_list_node *node = aws_linked_list_begin(&manager->idle_connections); node != end; node = aws_linked_list_next(node)) { struct aws_idle_connection *current_idle_connection = AWS_CONTAINER_OF(node, struct aws_idle_connection, node); if (current_idle_connection->connection == http2_connection) { aws_linked_list_remove(node); work.connection_to_release = http2_connection; aws_mem_release(current_idle_connection->allocator, current_idle_connection); --manager->idle_connection_count; break; } } s_aws_http_connection_manager_build_transaction(&work); aws_mutex_unlock(&manager->lock); s_aws_http_connection_manager_execute_transaction(&work); } /* Only invoke with lock held */ static void s_cm_on_connection_ready_or_failed( struct aws_http_connection_manager *manager, int error_code, struct aws_http_connection *connection, struct aws_connection_management_transaction *work) { bool is_shutting_down = manager->state == AWS_HCMST_SHUTTING_DOWN; if (!error_code) { if (is_shutting_down || s_idle_connection(manager, connection)) { /* * release it immediately */ AWS_LOGF_DEBUG( AWS_LS_HTTP_CONNECTION_MANAGER, "id=%p: New connection (id=%p) releasing immediately", (void *)manager, (void *)connection); work->connection_to_release = connection; } } else { /* fail acquisition as one connection cannot be used any more */ while (manager->pending_acquisition_count > manager->internal_ref[AWS_HCMCT_PENDING_CONNECTIONS] + manager->pending_settings_count) { AWS_LOGF_DEBUG( AWS_LS_HTTP_CONNECTION_MANAGER, "id=%p: Failing excess connection acquisition with error code %d", (void *)manager, (int)error_code); s_aws_http_connection_manager_move_front_acquisition(manager, NULL, error_code, &work->completions); } /* Since the connection never being idle, we need to release the connection here. */ if (connection) { work->connection_to_release = connection; } } } static void s_aws_http_connection_manager_h2_on_initial_settings_completed( struct aws_http_connection *http2_connection, int error_code, void *user_data) { struct aws_http_connection_manager *manager = user_data; /* The other side acknowledge about the settings which also means we received the settings from other side at this * point, because the settings should be the fist frame to be sent */ struct aws_connection_management_transaction work; s_aws_connection_management_transaction_init(&work, manager); AWS_LOGF_DEBUG( AWS_LS_HTTP_CONNECTION_MANAGER, "id=%p: HTTP/2 connection (id=%p) completed initial settings", (void *)manager, (void *)http2_connection); aws_mutex_lock(&manager->lock); AWS_FATAL_ASSERT(manager->pending_settings_count > 0); --manager->pending_settings_count; s_cm_on_connection_ready_or_failed(manager, error_code, http2_connection, &work); s_aws_http_connection_manager_build_transaction(&work); aws_mutex_unlock(&manager->lock); s_aws_http_connection_manager_execute_transaction(&work); } static void s_aws_http_connection_manager_on_connection_setup( struct aws_http_connection *connection, int error_code, void *user_data) { struct aws_http_connection_manager *manager = user_data; struct aws_connection_management_transaction work; s_aws_connection_management_transaction_init(&work, manager); if (connection != NULL) { AWS_LOGF_DEBUG( AWS_LS_HTTP_CONNECTION_MANAGER, "id=%p: Received new connection (id=%p) from http layer", (void *)manager, (void *)connection); } else { AWS_LOGF_WARN( AWS_LS_HTTP_CONNECTION_MANAGER, "id=%p: Failed to obtain new connection from http layer, error %d(%s)", (void *)manager, error_code, aws_error_str(error_code)); } aws_mutex_lock(&manager->lock); AWS_FATAL_ASSERT(manager->internal_ref[AWS_HCMCT_PENDING_CONNECTIONS] > 0); s_connection_manager_internal_ref_decrease(manager, AWS_HCMCT_PENDING_CONNECTIONS, 1); if (!error_code) { /* Shutdown will not be invoked if setup completed with error */ s_connection_manager_internal_ref_increase(manager, AWS_HCMCT_OPEN_CONNECTION, 1); } if (connection != NULL && manager->system_vtable->aws_http_connection_get_version(connection) == AWS_HTTP_VERSION_2) { /* If the manager is shutting down, we will still wait for the settings, since we don't have map for connections */ ++manager->pending_settings_count; /* For http/2 connection, we vent the connection after the initial settings completed for the user to make * sure the connection is really ready to use. So, we can revert the counting and act like nothing happens * here and wait for the on_initial_settings_completed, which will ALWAYS be invoked before shutdown. BUT, * we increase the open_connection_count, as the shutdown will be invoked no matter what happens. */ AWS_LOGF_TRACE( AWS_LS_HTTP_CONNECTION_MANAGER, "id=%p: New HTTP/2 connection (id=%p) set up, waiting for initial settings to complete", (void *)manager, (void *)connection); } else { /* If there is no connection, error code cannot be zero */ AWS_ASSERT(connection || error_code); s_cm_on_connection_ready_or_failed(manager, error_code, connection, &work); } s_aws_http_connection_manager_build_transaction(&work); aws_mutex_unlock(&manager->lock); s_aws_http_connection_manager_execute_transaction(&work); } static void s_aws_http_connection_manager_on_connection_shutdown( struct aws_http_connection *connection, int error_code, void *user_data) { (void)error_code; struct aws_http_connection_manager *manager = user_data; AWS_LOGF_DEBUG( AWS_LS_HTTP_CONNECTION_MANAGER, "id=%p: shutdown received for connection (id=%p)", (void *)manager, (void *)connection); struct aws_connection_management_transaction work; s_aws_connection_management_transaction_init(&work, manager); aws_mutex_lock(&manager->lock); AWS_FATAL_ASSERT(manager->internal_ref[AWS_HCMCT_OPEN_CONNECTION] > 0); s_connection_manager_internal_ref_decrease(manager, AWS_HCMCT_OPEN_CONNECTION, 1); /* * Find and, if found, remove it from idle connections */ const struct aws_linked_list_node *end = aws_linked_list_end(&manager->idle_connections); for (struct aws_linked_list_node *node = aws_linked_list_begin(&manager->idle_connections); node != end; node = aws_linked_list_next(node)) { struct aws_idle_connection *current_idle_connection = AWS_CONTAINER_OF(node, struct aws_idle_connection, node); if (current_idle_connection->connection == connection) { aws_linked_list_remove(node); work.connection_to_release = connection; aws_mem_release(current_idle_connection->allocator, current_idle_connection); --manager->idle_connection_count; break; } } s_aws_http_connection_manager_build_transaction(&work); aws_mutex_unlock(&manager->lock); s_aws_http_connection_manager_execute_transaction(&work); } static void s_cull_idle_connections(struct aws_http_connection_manager *manager) { AWS_LOGF_INFO(AWS_LS_HTTP_CONNECTION_MANAGER, "id=%p: culling idle connections", (void *)manager); if (manager == NULL || manager->max_connection_idle_in_milliseconds == 0) { return; } uint64_t now = 0; if (manager->system_vtable->aws_high_res_clock_get_ticks(&now)) { return; } struct aws_connection_management_transaction work; s_aws_connection_management_transaction_init(&work, manager); aws_mutex_lock(&manager->lock); /* Only if we're not shutting down */ if (manager->state == AWS_HCMST_READY) { const struct aws_linked_list_node *end = aws_linked_list_end(&manager->idle_connections); struct aws_linked_list_node *current_node = aws_linked_list_begin(&manager->idle_connections); while (current_node != end) { struct aws_linked_list_node *node = current_node; struct aws_idle_connection *current_idle_connection = AWS_CONTAINER_OF(node, struct aws_idle_connection, node); if (current_idle_connection->cull_timestamp > now) { break; } current_node = aws_linked_list_next(current_node); aws_linked_list_remove(node); aws_linked_list_push_back(&work.connections_to_release, node); --manager->idle_connection_count; AWS_LOGF_DEBUG( AWS_LS_HTTP_CONNECTION_MANAGER, "id=%p: culling idle connection (%p)", (void *)manager, (void *)current_idle_connection->connection); } } s_aws_http_connection_manager_get_snapshot(manager, &work.snapshot); aws_mutex_unlock(&manager->lock); s_aws_http_connection_manager_execute_transaction(&work); } static void s_cull_task(struct aws_task *task, void *arg, enum aws_task_status status) { (void)task; if (status != AWS_TASK_STATUS_RUN_READY) { return; } struct aws_http_connection_manager *manager = arg; s_cull_idle_connections(manager); s_schedule_connection_culling(manager); } void aws_http_connection_manager_fetch_metrics( const struct aws_http_connection_manager *manager, struct aws_http_manager_metrics *out_metrics) { AWS_PRECONDITION(manager); AWS_PRECONDITION(out_metrics); AWS_FATAL_ASSERT(aws_mutex_lock((struct aws_mutex *)(void *)&manager->lock) == AWS_OP_SUCCESS); out_metrics->available_concurrency = manager->idle_connection_count; out_metrics->pending_concurrency_acquires = manager->pending_acquisition_count; out_metrics->leased_concurrency = manager->internal_ref[AWS_HCMCT_VENDED_CONNECTION]; AWS_FATAL_ASSERT(aws_mutex_unlock((struct aws_mutex *)(void *)&manager->lock) == AWS_OP_SUCCESS); } aws-crt-python-0.20.4+dfsg/crt/aws-c-http/source/connection_monitor.c000066400000000000000000000204641456575232400255670ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include static void s_process_statistics( struct aws_crt_statistics_handler *handler, struct aws_crt_statistics_sample_interval *interval, struct aws_array_list *stats_list, void *context) { (void)interval; struct aws_statistics_handler_http_connection_monitor_impl *impl = handler->impl; if (!aws_http_connection_monitoring_options_is_valid(&impl->options)) { return; } uint64_t pending_read_interval_ms = 0; uint64_t pending_write_interval_ms = 0; uint64_t bytes_read = 0; uint64_t bytes_written = 0; uint32_t h1_current_outgoing_stream_id = 0; uint32_t h1_current_incoming_stream_id = 0; /* * Pull out the data needed to perform the throughput calculation */ size_t stats_count = aws_array_list_length(stats_list); bool h2 = false; bool h2_was_inactive = false; for (size_t i = 0; i < stats_count; ++i) { struct aws_crt_statistics_base *stats_base = NULL; if (aws_array_list_get_at(stats_list, &stats_base, i)) { continue; } switch (stats_base->category) { case AWSCRT_STAT_CAT_SOCKET: { struct aws_crt_statistics_socket *socket_stats = (struct aws_crt_statistics_socket *)stats_base; bytes_read = socket_stats->bytes_read; bytes_written = socket_stats->bytes_written; break; } case AWSCRT_STAT_CAT_HTTP1_CHANNEL: { AWS_ASSERT(!h2); struct aws_crt_statistics_http1_channel *http1_stats = (struct aws_crt_statistics_http1_channel *)stats_base; pending_read_interval_ms = http1_stats->pending_incoming_stream_ms; pending_write_interval_ms = http1_stats->pending_outgoing_stream_ms; h1_current_outgoing_stream_id = http1_stats->current_outgoing_stream_id; h1_current_incoming_stream_id = http1_stats->current_incoming_stream_id; break; } case AWSCRT_STAT_CAT_HTTP2_CHANNEL: { struct aws_crt_statistics_http2_channel *h2_stats = (struct aws_crt_statistics_http2_channel *)stats_base; pending_read_interval_ms = h2_stats->pending_incoming_stream_ms; pending_write_interval_ms = h2_stats->pending_outgoing_stream_ms; h2_was_inactive |= h2_stats->was_inactive; h2 = true; break; } default: break; } } if (impl->options.statistics_observer_fn) { impl->options.statistics_observer_fn( (size_t)(uintptr_t)(context), stats_list, impl->options.statistics_observer_user_data); } struct aws_channel *channel = context; uint64_t bytes_per_second = 0; uint64_t max_pending_io_interval_ms = 0; if (pending_write_interval_ms > 0) { double fractional_bytes_written_per_second = (double)bytes_written * (double)AWS_TIMESTAMP_MILLIS / (double)pending_write_interval_ms; if (fractional_bytes_written_per_second >= (double)UINT64_MAX) { bytes_per_second = UINT64_MAX; } else { bytes_per_second = (uint64_t)fractional_bytes_written_per_second; } max_pending_io_interval_ms = pending_write_interval_ms; } if (pending_read_interval_ms > 0) { double fractional_bytes_read_per_second = (double)bytes_read * (double)AWS_TIMESTAMP_MILLIS / (double)pending_read_interval_ms; if (fractional_bytes_read_per_second >= (double)UINT64_MAX) { bytes_per_second = UINT64_MAX; } else { bytes_per_second = aws_add_u64_saturating(bytes_per_second, (uint64_t)fractional_bytes_read_per_second); } if (pending_read_interval_ms > max_pending_io_interval_ms) { max_pending_io_interval_ms = pending_read_interval_ms; } } AWS_LOGF_DEBUG( AWS_LS_IO_CHANNEL, "id=%p: channel throughput - %" PRIu64 " bytes per second", (void *)channel, bytes_per_second); /* * Check throughput only if the connection has active stream and no gap between. */ bool check_throughput = false; if (h2) { /* For HTTP/2, check throughput only if there always has any active stream on the connection */ check_throughput = !h2_was_inactive; } else { /* For HTTP/1, check throughput only if at least one stream exists and was observed in that role previously */ check_throughput = (h1_current_incoming_stream_id != 0 && h1_current_incoming_stream_id == impl->last_incoming_stream_id) || (h1_current_outgoing_stream_id != 0 && h1_current_outgoing_stream_id == impl->last_outgoing_stream_id); impl->last_outgoing_stream_id = h1_current_outgoing_stream_id; impl->last_incoming_stream_id = h1_current_incoming_stream_id; } impl->last_measured_throughput = bytes_per_second; if (!check_throughput) { AWS_LOGF_TRACE(AWS_LS_IO_CHANNEL, "id=%p: channel throughput does not need to be checked", (void *)channel); impl->throughput_failure_time_ms = 0; return; } if (bytes_per_second >= impl->options.minimum_throughput_bytes_per_second) { impl->throughput_failure_time_ms = 0; return; } impl->throughput_failure_time_ms = aws_add_u64_saturating(impl->throughput_failure_time_ms, max_pending_io_interval_ms); AWS_LOGF_INFO( AWS_LS_IO_CHANNEL, "id=%p: Channel low throughput warning. Currently %" PRIu64 " milliseconds of consecutive failure time", (void *)channel, impl->throughput_failure_time_ms); uint64_t maximum_failure_time_ms = aws_timestamp_convert( impl->options.allowable_throughput_failure_interval_seconds, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_MILLIS, NULL); if (impl->throughput_failure_time_ms <= maximum_failure_time_ms) { return; } AWS_LOGF_INFO( AWS_LS_IO_CHANNEL, "id=%p: Channel low throughput threshold exceeded (< %" PRIu64 " bytes per second for more than %u seconds). Shutting down.", (void *)channel, impl->options.minimum_throughput_bytes_per_second, impl->options.allowable_throughput_failure_interval_seconds); aws_channel_shutdown(channel, AWS_ERROR_HTTP_CHANNEL_THROUGHPUT_FAILURE); } static void s_destroy(struct aws_crt_statistics_handler *handler) { if (handler == NULL) { return; } aws_mem_release(handler->allocator, handler); } static uint64_t s_get_report_interval_ms(struct aws_crt_statistics_handler *handler) { (void)handler; return 1000; } static struct aws_crt_statistics_handler_vtable s_http_connection_monitor_vtable = { .process_statistics = s_process_statistics, .destroy = s_destroy, .get_report_interval_ms = s_get_report_interval_ms, }; struct aws_crt_statistics_handler *aws_crt_statistics_handler_new_http_connection_monitor( struct aws_allocator *allocator, struct aws_http_connection_monitoring_options *options) { struct aws_crt_statistics_handler *handler = NULL; struct aws_statistics_handler_http_connection_monitor_impl *impl = NULL; if (!aws_mem_acquire_many( allocator, 2, &handler, sizeof(struct aws_crt_statistics_handler), &impl, sizeof(struct aws_statistics_handler_http_connection_monitor_impl))) { return NULL; } AWS_ZERO_STRUCT(*handler); AWS_ZERO_STRUCT(*impl); impl->options = *options; handler->vtable = &s_http_connection_monitor_vtable; handler->allocator = allocator; handler->impl = impl; return handler; } bool aws_http_connection_monitoring_options_is_valid(const struct aws_http_connection_monitoring_options *options) { if (options == NULL) { return false; } return options->allowable_throughput_failure_interval_seconds > 0 && options->minimum_throughput_bytes_per_second > 0; } aws-crt-python-0.20.4+dfsg/crt/aws-c-http/source/h1_connection.c000066400000000000000000002662671456575232400244250ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include #include #include #ifdef _MSC_VER # pragma warning(disable : 4204) /* non-constant aggregate initializer */ #endif enum { DECODER_INITIAL_SCRATCH_SIZE = 256, }; static int s_handler_process_read_message( struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_io_message *message); static int s_handler_process_write_message( struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_io_message *message); static int s_handler_increment_read_window( struct aws_channel_handler *handler, struct aws_channel_slot *slot, size_t size); static int s_handler_shutdown( struct aws_channel_handler *handler, struct aws_channel_slot *slot, enum aws_channel_direction dir, int error_code, bool free_scarce_resources_immediately); static size_t s_handler_initial_window_size(struct aws_channel_handler *handler); static size_t s_handler_message_overhead(struct aws_channel_handler *handler); static void s_handler_destroy(struct aws_channel_handler *handler); static void s_handler_installed(struct aws_channel_handler *handler, struct aws_channel_slot *slot); static struct aws_http_stream *s_make_request( struct aws_http_connection *client_connection, const struct aws_http_make_request_options *options); static struct aws_http_stream *s_new_server_request_handler_stream( const struct aws_http_request_handler_options *options); static int s_stream_send_response(struct aws_http_stream *stream, struct aws_http_message *response); static void s_connection_close(struct aws_http_connection *connection_base); static void s_connection_stop_new_request(struct aws_http_connection *connection_base); static bool s_connection_is_open(const struct aws_http_connection *connection_base); static bool s_connection_new_requests_allowed(const struct aws_http_connection *connection_base); static int s_decoder_on_request( enum aws_http_method method_enum, const struct aws_byte_cursor *method_str, const struct aws_byte_cursor *uri, void *user_data); static int s_decoder_on_response(int status_code, void *user_data); static int s_decoder_on_header(const struct aws_h1_decoded_header *header, void *user_data); static int s_decoder_on_body(const struct aws_byte_cursor *data, bool finished, void *user_data); static int s_decoder_on_done(void *user_data); static void s_reset_statistics(struct aws_channel_handler *handler); static void s_gather_statistics(struct aws_channel_handler *handler, struct aws_array_list *stats); static void s_write_outgoing_stream(struct aws_h1_connection *connection, bool first_try); static int s_try_process_next_stream_read_message(struct aws_h1_connection *connection, bool *out_stop_processing); static struct aws_http_connection_vtable s_h1_connection_vtable = { .channel_handler_vtable = { .process_read_message = s_handler_process_read_message, .process_write_message = s_handler_process_write_message, .increment_read_window = s_handler_increment_read_window, .shutdown = s_handler_shutdown, .initial_window_size = s_handler_initial_window_size, .message_overhead = s_handler_message_overhead, .destroy = s_handler_destroy, .reset_statistics = s_reset_statistics, .gather_statistics = s_gather_statistics, }, .on_channel_handler_installed = s_handler_installed, .make_request = s_make_request, .new_server_request_handler_stream = s_new_server_request_handler_stream, .stream_send_response = s_stream_send_response, .close = s_connection_close, .stop_new_requests = s_connection_stop_new_request, .is_open = s_connection_is_open, .new_requests_allowed = s_connection_new_requests_allowed, .change_settings = NULL, .send_ping = NULL, .send_goaway = NULL, .get_sent_goaway = NULL, .get_received_goaway = NULL, .get_local_settings = NULL, .get_remote_settings = NULL, }; static const struct aws_h1_decoder_vtable s_h1_decoder_vtable = { .on_request = s_decoder_on_request, .on_response = s_decoder_on_response, .on_header = s_decoder_on_header, .on_body = s_decoder_on_body, .on_done = s_decoder_on_done, }; void aws_h1_connection_lock_synced_data(struct aws_h1_connection *connection) { int err = aws_mutex_lock(&connection->synced_data.lock); AWS_ASSERT(!err); (void)err; } void aws_h1_connection_unlock_synced_data(struct aws_h1_connection *connection) { int err = aws_mutex_unlock(&connection->synced_data.lock); AWS_ASSERT(!err); (void)err; } /** * Internal function for bringing connection to a stop. * Invoked multiple times, including when: * - Channel is shutting down in the read direction. * - Channel is shutting down in the write direction. * - An error occurs. * - User wishes to close the connection (this is the only case where the function may run off-thread). */ static void s_stop( struct aws_h1_connection *connection, bool stop_reading, bool stop_writing, bool schedule_shutdown, int error_code) { AWS_ASSERT(stop_reading || stop_writing || schedule_shutdown); /* You are required to stop at least 1 thing */ if (stop_reading) { AWS_ASSERT(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel)); connection->thread_data.is_reading_stopped = true; } if (stop_writing) { AWS_ASSERT(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel)); connection->thread_data.is_writing_stopped = true; } { /* BEGIN CRITICAL SECTION */ aws_h1_connection_lock_synced_data(connection); /* Even if we're not scheduling shutdown just yet (ex: sent final request but waiting to read final response) * we don't consider the connection "open" anymore so user can't create more streams */ connection->synced_data.is_open = false; connection->synced_data.new_stream_error_code = AWS_ERROR_HTTP_CONNECTION_CLOSED; aws_h1_connection_unlock_synced_data(connection); } /* END CRITICAL SECTION */ if (schedule_shutdown) { AWS_LOGF_INFO( AWS_LS_HTTP_CONNECTION, "id=%p: Shutting down connection with error code %d (%s).", (void *)&connection->base, error_code, aws_error_name(error_code)); aws_channel_shutdown(connection->base.channel_slot->channel, error_code); } } static void s_shutdown_due_to_error(struct aws_h1_connection *connection, int error_code) { AWS_ASSERT(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel)); if (!error_code) { error_code = AWS_ERROR_UNKNOWN; } /* Stop reading AND writing if an error occurs. * * It doesn't currently seem worth the complexity to distinguish between read errors and write errors. * The only scenarios that would benefit from this are pipelining scenarios (ex: A server * could continue sending a response to request A if there was an error reading request B). * But pipelining in HTTP/1.1 is known to be fragile with regards to errors, so let's just keep it simple. */ s_stop(connection, true /*stop_reading*/, true /*stop_writing*/, true /*schedule_shutdown*/, error_code); } /** * Public function for closing connection. */ static void s_connection_close(struct aws_http_connection *connection_base) { struct aws_h1_connection *connection = AWS_CONTAINER_OF(connection_base, struct aws_h1_connection, base); /* Don't stop reading/writing immediately, let that happen naturally during the channel shutdown process. */ s_stop(connection, false /*stop_reading*/, false /*stop_writing*/, true /*schedule_shutdown*/, AWS_ERROR_SUCCESS); } static void s_connection_stop_new_request(struct aws_http_connection *connection_base) { struct aws_h1_connection *connection = AWS_CONTAINER_OF(connection_base, struct aws_h1_connection, base); { /* BEGIN CRITICAL SECTION */ aws_h1_connection_lock_synced_data(connection); if (!connection->synced_data.new_stream_error_code) { connection->synced_data.new_stream_error_code = AWS_ERROR_HTTP_CONNECTION_CLOSED; } aws_h1_connection_unlock_synced_data(connection); } /* END CRITICAL SECTION */ } static bool s_connection_is_open(const struct aws_http_connection *connection_base) { struct aws_h1_connection *connection = AWS_CONTAINER_OF(connection_base, struct aws_h1_connection, base); bool is_open; { /* BEGIN CRITICAL SECTION */ aws_h1_connection_lock_synced_data(connection); is_open = connection->synced_data.is_open; aws_h1_connection_unlock_synced_data(connection); } /* END CRITICAL SECTION */ return is_open; } static bool s_connection_new_requests_allowed(const struct aws_http_connection *connection_base) { struct aws_h1_connection *connection = AWS_CONTAINER_OF(connection_base, struct aws_h1_connection, base); int new_stream_error_code; { /* BEGIN CRITICAL SECTION */ aws_h1_connection_lock_synced_data(connection); new_stream_error_code = connection->synced_data.new_stream_error_code; aws_h1_connection_unlock_synced_data(connection); } /* END CRITICAL SECTION */ return new_stream_error_code == 0; } static int s_stream_send_response(struct aws_http_stream *stream, struct aws_http_message *response) { AWS_PRECONDITION(stream); AWS_PRECONDITION(response); struct aws_h1_stream *h1_stream = AWS_CONTAINER_OF(stream, struct aws_h1_stream, base); return aws_h1_stream_send_response(h1_stream, response); } /* Calculate the desired window size for connection that has switched protocols and become a midchannel handler. */ static size_t s_calculate_midchannel_desired_connection_window(struct aws_h1_connection *connection) { AWS_ASSERT(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel)); AWS_ASSERT(connection->thread_data.has_switched_protocols); if (!connection->base.channel_slot->adj_right) { /* No downstream handler installed. */ return 0; } /* Connection is just dumbly forwarding aws_io_messages, so try to match downstream handler. */ return aws_channel_slot_downstream_read_window(connection->base.channel_slot); } /* Calculate the desired window size for a connection that is processing data for aws_http_streams. */ static size_t s_calculate_stream_mode_desired_connection_window(struct aws_h1_connection *connection) { AWS_ASSERT(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel)); AWS_ASSERT(!connection->thread_data.has_switched_protocols); if (!connection->base.stream_manual_window_management) { return SIZE_MAX; } /* Connection window should match the available space in the read-buffer */ AWS_ASSERT( connection->thread_data.read_buffer.pending_bytes <= connection->thread_data.read_buffer.capacity && "This isn't fatal, but our math is off"); const size_t desired_connection_window = aws_sub_size_saturating( connection->thread_data.read_buffer.capacity, connection->thread_data.read_buffer.pending_bytes); AWS_LOGF_TRACE( AWS_LS_HTTP_CONNECTION, "id=%p: Window stats: connection=%zu+%zu stream=%" PRIu64 " buffer=%zu/%zu", (void *)&connection->base, connection->thread_data.connection_window, desired_connection_window - connection->thread_data.connection_window /*increment_size*/, connection->thread_data.incoming_stream ? connection->thread_data.incoming_stream->thread_data.stream_window : 0, connection->thread_data.read_buffer.pending_bytes, connection->thread_data.read_buffer.capacity); return desired_connection_window; } /* Increment connection window, if necessary */ static int s_update_connection_window(struct aws_h1_connection *connection) { AWS_ASSERT(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel)); if (connection->thread_data.is_reading_stopped) { return AWS_OP_SUCCESS; } const size_t desired_size = connection->thread_data.has_switched_protocols ? s_calculate_midchannel_desired_connection_window(connection) : s_calculate_stream_mode_desired_connection_window(connection); const size_t increment_size = aws_sub_size_saturating(desired_size, connection->thread_data.connection_window); if (increment_size > 0) { /* Update local `connection_window`. See comments at variable's declaration site * on why we use this instead of the official `aws_channel_slot.window_size` */ connection->thread_data.connection_window += increment_size; connection->thread_data.recent_window_increments = aws_add_size_saturating(connection->thread_data.recent_window_increments, increment_size); if (aws_channel_slot_increment_read_window(connection->base.channel_slot, increment_size)) { AWS_LOGF_ERROR( AWS_LS_HTTP_CONNECTION, "id=%p: Failed to increment read window, error %d (%s). Closing connection.", (void *)&connection->base, aws_last_error(), aws_error_name(aws_last_error())); return AWS_OP_ERR; } } return AWS_OP_SUCCESS; } int aws_h1_stream_activate(struct aws_http_stream *stream) { struct aws_h1_stream *h1_stream = AWS_CONTAINER_OF(stream, struct aws_h1_stream, base); struct aws_http_connection *base_connection = stream->owning_connection; struct aws_h1_connection *connection = AWS_CONTAINER_OF(base_connection, struct aws_h1_connection, base); bool should_schedule_task = false; { /* BEGIN CRITICAL SECTION */ /* Note: We're touching both the connection's and stream's synced_data in this section, * which is OK because an h1_connection and all its h1_streams share a single lock. */ aws_h1_connection_lock_synced_data(connection); if (stream->id) { /* stream has already been activated. */ aws_h1_connection_unlock_synced_data(connection); return AWS_OP_SUCCESS; } if (connection->synced_data.new_stream_error_code) { aws_h1_connection_unlock_synced_data(connection); AWS_LOGF_ERROR( AWS_LS_HTTP_CONNECTION, "id=%p: Failed to activate the stream id=%p, new streams are not allowed now. error %d (%s)", (void *)&connection->base, (void *)stream, connection->synced_data.new_stream_error_code, aws_error_name(connection->synced_data.new_stream_error_code)); return aws_raise_error(connection->synced_data.new_stream_error_code); } stream->id = aws_http_connection_get_next_stream_id(base_connection); if (!stream->id) { aws_h1_connection_unlock_synced_data(connection); /* aws_http_connection_get_next_stream_id() raises its own error. */ return AWS_OP_ERR; } /* ID successfully assigned */ h1_stream->synced_data.api_state = AWS_H1_STREAM_API_STATE_ACTIVE; aws_linked_list_push_back(&connection->synced_data.new_client_stream_list, &h1_stream->node); if (!connection->synced_data.is_cross_thread_work_task_scheduled) { connection->synced_data.is_cross_thread_work_task_scheduled = true; should_schedule_task = true; } aws_h1_connection_unlock_synced_data(connection); } /* END CRITICAL SECTION */ /* connection keeps activated stream alive until stream completes */ aws_atomic_fetch_add(&stream->refcount, 1); stream->metrics.stream_id = stream->id; if (should_schedule_task) { AWS_LOGF_TRACE( AWS_LS_HTTP_CONNECTION, "id=%p: Scheduling connection cross-thread work task.", (void *)base_connection); aws_channel_schedule_task_now(connection->base.channel_slot->channel, &connection->cross_thread_work_task); } else { AWS_LOGF_TRACE( AWS_LS_HTTP_CONNECTION, "id=%p: Connection cross-thread work task was already scheduled", (void *)base_connection); } return AWS_OP_SUCCESS; } void aws_h1_stream_cancel(struct aws_http_stream *stream, int error_code) { struct aws_h1_stream *h1_stream = AWS_CONTAINER_OF(stream, struct aws_h1_stream, base); struct aws_http_connection *base_connection = stream->owning_connection; struct aws_h1_connection *connection = AWS_CONTAINER_OF(base_connection, struct aws_h1_connection, base); { /* BEGIN CRITICAL SECTION */ aws_h1_connection_lock_synced_data(connection); if (h1_stream->synced_data.api_state != AWS_H1_STREAM_API_STATE_ACTIVE || connection->synced_data.is_open == false) { /* Not active, nothing to cancel. */ aws_h1_connection_unlock_synced_data(connection); AWS_LOGF_DEBUG(AWS_LS_HTTP_STREAM, "id=%p: Stream not active, nothing to cancel.", (void *)stream); return; } aws_h1_connection_unlock_synced_data(connection); } /* END CRITICAL SECTION */ AWS_LOGF_INFO( AWS_LS_HTTP_CONNECTION, "id=%p: Connection shutting down due to stream=%p cancelled with error code %d (%s).", (void *)&connection->base, (void *)stream, error_code, aws_error_name(error_code)); s_stop(connection, false /*stop_reading*/, false /*stop_writing*/, true /*schedule_shutdown*/, error_code); } struct aws_http_stream *s_make_request( struct aws_http_connection *client_connection, const struct aws_http_make_request_options *options) { struct aws_h1_stream *stream = aws_h1_stream_new_request(client_connection, options); if (!stream) { AWS_LOGF_ERROR( AWS_LS_HTTP_CONNECTION, "id=%p: Cannot create request stream, error %d (%s)", (void *)client_connection, aws_last_error(), aws_error_name(aws_last_error())); return NULL; } struct aws_h1_connection *connection = AWS_CONTAINER_OF(client_connection, struct aws_h1_connection, base); /* Insert new stream into pending list, and schedule outgoing_stream_task if it's not already running. */ int new_stream_error_code; { /* BEGIN CRITICAL SECTION */ aws_h1_connection_lock_synced_data(connection); new_stream_error_code = connection->synced_data.new_stream_error_code; aws_h1_connection_unlock_synced_data(connection); } /* END CRITICAL SECTION */ if (new_stream_error_code) { AWS_LOGF_ERROR( AWS_LS_HTTP_CONNECTION, "id=%p: Cannot create request stream, error %d (%s)", (void *)client_connection, new_stream_error_code, aws_error_name(new_stream_error_code)); aws_raise_error(new_stream_error_code); goto error; } /* Success! */ struct aws_byte_cursor method; aws_http_message_get_request_method(options->request, &method); stream->base.request_method = aws_http_str_to_method(method); struct aws_byte_cursor path; aws_http_message_get_request_path(options->request, &path); AWS_LOGF_DEBUG( AWS_LS_HTTP_STREAM, "id=%p: Created client request on connection=%p: " PRInSTR " " PRInSTR " " PRInSTR, (void *)&stream->base, (void *)client_connection, AWS_BYTE_CURSOR_PRI(method), AWS_BYTE_CURSOR_PRI(path), AWS_BYTE_CURSOR_PRI(aws_http_version_to_str(connection->base.http_version))); return &stream->base; error: /* Force destruction of the stream, avoiding ref counting */ stream->base.vtable->destroy(&stream->base); return NULL; } /* Extract work items from synced_data, and perform the work on-thread. */ static void s_cross_thread_work_task(struct aws_channel_task *channel_task, void *arg, enum aws_task_status status) { (void)channel_task; struct aws_h1_connection *connection = arg; if (status != AWS_TASK_STATUS_RUN_READY) { return; } AWS_LOGF_TRACE( AWS_LS_HTTP_CONNECTION, "id=%p: Running connection cross-thread work task.", (void *)&connection->base); /* BEGIN CRITICAL SECTION */ aws_h1_connection_lock_synced_data(connection); connection->synced_data.is_cross_thread_work_task_scheduled = false; bool has_new_client_streams = !aws_linked_list_empty(&connection->synced_data.new_client_stream_list); aws_linked_list_move_all_back( &connection->thread_data.stream_list, &connection->synced_data.new_client_stream_list); aws_h1_connection_unlock_synced_data(connection); /* END CRITICAL SECTION */ /* Kick off outgoing-stream task if necessary */ if (has_new_client_streams) { aws_h1_connection_try_write_outgoing_stream(connection); } } static bool s_aws_http_stream_was_successful_connect(struct aws_h1_stream *stream) { struct aws_http_stream *base = &stream->base; if (base->request_method != AWS_HTTP_METHOD_CONNECT) { return false; } if (base->client_data == NULL) { return false; } if (base->client_data->response_status != AWS_HTTP_STATUS_CODE_200_OK) { return false; } return true; } /** * Validate and perform a protocol switch on a connection. Protocol switching essentially turns the connection's * handler into a dummy pass-through. It is valid to switch protocols to the same protocol resulting in a channel * that has a "dead" http handler in the middle of the channel (which negotiated the CONNECT through the proxy) and * a "live" handler on the end which takes the actual http requests. By doing this, we get the exact same * behavior whether we're transitioning to http or any other protocol: once the CONNECT succeeds * the first http handler is put in pass-through mode and a new protocol (which could be http) is tacked onto the end. */ static int s_aws_http1_switch_protocols(struct aws_h1_connection *connection) { AWS_FATAL_ASSERT(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel)); /* Switching protocols while there are multiple streams is too complex to deal with. * Ensure stream_list has exactly this 1 stream in it. */ if (aws_linked_list_begin(&connection->thread_data.stream_list) != aws_linked_list_rbegin(&connection->thread_data.stream_list)) { AWS_LOGF_ERROR( AWS_LS_HTTP_CONNECTION, "id=%p: Cannot switch protocols while further streams are pending, closing connection.", (void *)&connection->base); return aws_raise_error(AWS_ERROR_INVALID_STATE); } AWS_LOGF_TRACE( AWS_LS_HTTP_CONNECTION, "id=%p: Connection has switched protocols, another channel handler must be installed to" " deal with further data.", (void *)&connection->base); connection->thread_data.has_switched_protocols = true; { /* BEGIN CRITICAL SECTION */ aws_h1_connection_lock_synced_data(connection); connection->synced_data.new_stream_error_code = AWS_ERROR_HTTP_SWITCHED_PROTOCOLS; aws_h1_connection_unlock_synced_data(connection); } /* END CRITICAL SECTION */ return AWS_OP_SUCCESS; } static void s_stream_complete(struct aws_h1_stream *stream, int error_code) { struct aws_h1_connection *connection = AWS_CONTAINER_OF(stream->base.owning_connection, struct aws_h1_connection, base); AWS_ASSERT(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel)); /* * If this is the end of a successful CONNECT request, mark ourselves as pass-through since the proxy layer * will be tacking on a new http handler (and possibly a tls handler in-between). */ if (error_code == AWS_ERROR_SUCCESS && s_aws_http_stream_was_successful_connect(stream)) { if (s_aws_http1_switch_protocols(connection)) { error_code = AWS_ERROR_HTTP_PROTOCOL_SWITCH_FAILURE; s_shutdown_due_to_error(connection, error_code); } } if (stream->base.client_data && stream->base.client_data->response_first_byte_timeout_task.fn != NULL) { /* There is an outstanding response timeout task, but stream completed, we can cancel it now. We are * safe to do it as we always on connection thread to schedule the task or cancel it */ struct aws_event_loop *connection_loop = aws_channel_get_event_loop(connection->base.channel_slot->channel); /* The task will be zeroed out within the call */ aws_event_loop_cancel_task(connection_loop, &stream->base.client_data->response_first_byte_timeout_task); } if (error_code != AWS_ERROR_SUCCESS) { if (stream->base.client_data && stream->is_incoming_message_done) { /* As a request that finished receiving the response, we ignore error and * consider it finished successfully */ AWS_LOGF_DEBUG( AWS_LS_HTTP_STREAM, "id=%p: Ignoring error code %d (%s). The response has been fully received," "so the stream will complete successfully.", (void *)&stream->base, error_code, aws_error_name(error_code)); error_code = AWS_ERROR_SUCCESS; } if (stream->base.server_data && stream->is_outgoing_message_done) { /* As a server finished sending the response, but still failed with the request was not finished receiving. * We ignore error and consider it finished successfully */ AWS_LOGF_DEBUG( AWS_LS_HTTP_STREAM, "id=%p: Ignoring error code %d (%s). The response has been fully sent," " so the stream will complete successfully", (void *)&stream->base, error_code, aws_error_name(error_code)); error_code = AWS_ERROR_SUCCESS; } } /* Remove stream from list. */ aws_linked_list_remove(&stream->node); /* Nice logging */ if (error_code) { AWS_LOGF_DEBUG( AWS_LS_HTTP_STREAM, "id=%p: Stream completed with error code %d (%s).", (void *)&stream->base, error_code, aws_error_name(error_code)); } else if (stream->base.client_data) { AWS_LOGF_DEBUG( AWS_LS_HTTP_STREAM, "id=%p: Client request complete, response status: %d (%s).", (void *)&stream->base, stream->base.client_data->response_status, aws_http_status_text(stream->base.client_data->response_status)); } else { AWS_ASSERT(stream->base.server_data); AWS_LOGF_DEBUG( AWS_LS_HTTP_STREAM, "id=%p: Server response to " PRInSTR " request complete.", (void *)&stream->base, AWS_BYTE_CURSOR_PRI(stream->base.server_data->request_method_str)); } /* If connection must shut down, do it BEFORE invoking stream-complete callback. * That way, if aws_http_connection_is_open() is called from stream-complete callback, it returns false. */ if (stream->is_final_stream) { AWS_LOGF_TRACE( AWS_LS_HTTP_CONNECTION, "id=%p: Closing connection due to completion of final stream.", (void *)&connection->base); s_connection_close(&connection->base); } { /* BEGIN CRITICAL SECTION */ /* Note: We're touching the stream's synced_data here, which is OK * because an h1_connection and all its h1_streams share a single lock. */ aws_h1_connection_lock_synced_data(connection); /* Mark stream complete */ stream->synced_data.api_state = AWS_H1_STREAM_API_STATE_COMPLETE; /* Move chunks out of synced data */ aws_linked_list_move_all_back(&stream->thread_data.pending_chunk_list, &stream->synced_data.pending_chunk_list); aws_h1_connection_unlock_synced_data(connection); } /* END CRITICAL SECTION */ /* Complete any leftover chunks */ while (!aws_linked_list_empty(&stream->thread_data.pending_chunk_list)) { struct aws_linked_list_node *node = aws_linked_list_pop_front(&stream->thread_data.pending_chunk_list); struct aws_h1_chunk *chunk = AWS_CONTAINER_OF(node, struct aws_h1_chunk, node); aws_h1_chunk_complete_and_destroy(chunk, &stream->base, AWS_ERROR_HTTP_STREAM_HAS_COMPLETED); } if (stream->base.on_metrics) { stream->base.on_metrics(&stream->base, &stream->base.metrics, stream->base.user_data); } /* Invoke callback and clean up stream. */ if (stream->base.on_complete) { stream->base.on_complete(&stream->base, error_code, stream->base.user_data); } aws_http_stream_release(&stream->base); } static void s_add_time_measurement_to_stats(uint64_t start_ns, uint64_t end_ns, uint64_t *output_ms) { if (end_ns > start_ns) { *output_ms += aws_timestamp_convert(end_ns - start_ns, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_MILLIS, NULL); } } static void s_set_outgoing_stream_ptr( struct aws_h1_connection *connection, struct aws_h1_stream *next_outgoing_stream) { struct aws_h1_stream *prev = connection->thread_data.outgoing_stream; uint64_t now_ns = 0; aws_channel_current_clock_time(connection->base.channel_slot->channel, &now_ns); if (prev == NULL && next_outgoing_stream != NULL) { /* transition from nothing to write -> something to write */ connection->thread_data.outgoing_stream_timestamp_ns = now_ns; } else if (prev != NULL && next_outgoing_stream == NULL) { /* transition from something to write -> nothing to write */ s_add_time_measurement_to_stats( connection->thread_data.outgoing_stream_timestamp_ns, now_ns, &connection->thread_data.stats.pending_outgoing_stream_ms); } connection->thread_data.outgoing_stream = next_outgoing_stream; } static void s_set_incoming_stream_ptr( struct aws_h1_connection *connection, struct aws_h1_stream *next_incoming_stream) { struct aws_h1_stream *prev = connection->thread_data.incoming_stream; uint64_t now_ns = 0; aws_channel_current_clock_time(connection->base.channel_slot->channel, &now_ns); if (prev == NULL && next_incoming_stream != NULL) { /* transition from nothing to read -> something to read */ connection->thread_data.incoming_stream_timestamp_ns = now_ns; } else if (prev != NULL && next_incoming_stream == NULL) { /* transition from something to read -> nothing to read */ s_add_time_measurement_to_stats( connection->thread_data.incoming_stream_timestamp_ns, now_ns, &connection->thread_data.stats.pending_incoming_stream_ms); } connection->thread_data.incoming_stream = next_incoming_stream; } /** * Ensure `incoming_stream` is pointing at the correct stream, and update state if it changes. */ static void s_client_update_incoming_stream_ptr(struct aws_h1_connection *connection) { struct aws_linked_list *list = &connection->thread_data.stream_list; struct aws_h1_stream *desired; if (connection->thread_data.is_reading_stopped) { desired = NULL; } else if (aws_linked_list_empty(list)) { desired = NULL; } else { desired = AWS_CONTAINER_OF(aws_linked_list_begin(list), struct aws_h1_stream, node); } if (connection->thread_data.incoming_stream == desired) { return; } AWS_LOGF_TRACE( AWS_LS_HTTP_CONNECTION, "id=%p: Current incoming stream is now %p.", (void *)&connection->base, desired ? (void *)&desired->base : NULL); s_set_incoming_stream_ptr(connection, desired); } static void s_http_stream_response_first_byte_timeout_task( struct aws_task *task, void *arg, enum aws_task_status status) { (void)task; struct aws_h1_stream *stream = arg; struct aws_http_connection *connection_base = stream->base.owning_connection; /* zero-out task to indicate that it's no longer scheduled */ AWS_ZERO_STRUCT(stream->base.client_data->response_first_byte_timeout_task); if (status == AWS_TASK_STATUS_CANCELED) { return; } struct aws_h1_connection *connection = AWS_CONTAINER_OF(connection_base, struct aws_h1_connection, base); /* Timeout happened, close the connection */ uint64_t response_first_byte_timeout_ms = stream->base.client_data->response_first_byte_timeout_ms == 0 ? connection_base->client_data->response_first_byte_timeout_ms : stream->base.client_data->response_first_byte_timeout_ms; AWS_LOGF_INFO( AWS_LS_HTTP_CONNECTION, "id=%p: Closing connection as timeout after request sent to the first byte received happened. " "response_first_byte_timeout_ms is %" PRIu64 ".", (void *)connection_base, response_first_byte_timeout_ms); /* Don't stop reading/writing immediately, let that happen naturally during the channel shutdown process. */ s_stop( connection, false /*stop_reading*/, false /*stop_writing*/, true /*schedule_shutdown*/, AWS_ERROR_HTTP_RESPONSE_FIRST_BYTE_TIMEOUT); } static void s_set_outgoing_message_done(struct aws_h1_stream *stream) { struct aws_http_connection *connection = stream->base.owning_connection; struct aws_channel *channel = aws_http_connection_get_channel(connection); AWS_ASSERT(aws_channel_thread_is_callers_thread(channel)); if (stream->is_outgoing_message_done) { /* Already did the job */ return; } stream->is_outgoing_message_done = true; AWS_ASSERT(stream->base.metrics.send_end_timestamp_ns == -1); aws_high_res_clock_get_ticks((uint64_t *)&stream->base.metrics.send_end_timestamp_ns); AWS_ASSERT(stream->base.metrics.send_start_timestamp_ns != -1); AWS_ASSERT(stream->base.metrics.send_end_timestamp_ns >= stream->base.metrics.send_start_timestamp_ns); stream->base.metrics.sending_duration_ns = stream->base.metrics.send_end_timestamp_ns - stream->base.metrics.send_start_timestamp_ns; if (stream->base.metrics.receive_start_timestamp_ns == -1) { /* We haven't receive any message, schedule the response timeout task */ uint64_t response_first_byte_timeout_ms = 0; if (stream->base.client_data != NULL && connection->client_data != NULL) { response_first_byte_timeout_ms = stream->base.client_data->response_first_byte_timeout_ms == 0 ? connection->client_data->response_first_byte_timeout_ms : stream->base.client_data->response_first_byte_timeout_ms; } if (response_first_byte_timeout_ms != 0) { /* The task should not be initialized before. */ AWS_ASSERT(stream->base.client_data->response_first_byte_timeout_task.fn == NULL); aws_task_init( &stream->base.client_data->response_first_byte_timeout_task, s_http_stream_response_first_byte_timeout_task, stream, "http_stream_response_first_byte_timeout_task"); uint64_t now_ns = 0; aws_channel_current_clock_time(channel, &now_ns); struct aws_event_loop *connection_loop = aws_channel_get_event_loop(channel); aws_event_loop_schedule_task_future( connection_loop, &stream->base.client_data->response_first_byte_timeout_task, now_ns + aws_timestamp_convert( response_first_byte_timeout_ms, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL)); } } } /** * If necessary, update `outgoing_stream` so it is pointing at a stream * with data to send, or NULL if all streams are done sending data. * * Called from event-loop thread. * This function has lots of side effects. */ static struct aws_h1_stream *s_update_outgoing_stream_ptr(struct aws_h1_connection *connection) { struct aws_h1_stream *current = connection->thread_data.outgoing_stream; bool current_changed = false; int err; /* If current stream is done sending data... */ if (current && !aws_h1_encoder_is_message_in_progress(&connection->thread_data.encoder)) { s_set_outgoing_message_done(current); /* RFC-7230 section 6.6: Tear-down. * If this was the final stream, don't allows any further streams to be sent */ if (current->is_final_stream) { AWS_LOGF_TRACE( AWS_LS_HTTP_CONNECTION, "id=%p: Done sending final stream, no further streams will be sent.", (void *)&connection->base); s_stop( connection, false /*stop_reading*/, true /*stop_writing*/, false /*schedule_shutdown*/, AWS_ERROR_SUCCESS); } /* If it's also done receiving data, then it's complete! */ if (current->is_incoming_message_done) { /* Only 1st stream in list could finish receiving before it finished sending */ AWS_ASSERT(¤t->node == aws_linked_list_begin(&connection->thread_data.stream_list)); /* This removes stream from list */ s_stream_complete(current, AWS_ERROR_SUCCESS); } current = NULL; current_changed = true; } /* If current stream is NULL, look for more work. */ if (!current && !connection->thread_data.is_writing_stopped) { /* Look for next stream we can work on. */ for (struct aws_linked_list_node *node = aws_linked_list_begin(&connection->thread_data.stream_list); node != aws_linked_list_end(&connection->thread_data.stream_list); node = aws_linked_list_next(node)) { struct aws_h1_stream *stream = AWS_CONTAINER_OF(node, struct aws_h1_stream, node); /* If we already sent this stream's data, keep looking... */ if (stream->is_outgoing_message_done) { continue; } /* STOP if we're a server, and this stream's response isn't ready to send. * It's not like we can skip this and start on the next stream because responses must be sent in order. * Don't need a check like this for clients because their streams always start with data to send. */ if (connection->base.server_data && !stream->thread_data.has_outgoing_response) { break; } /* We found a stream to work on! */ current = stream; current_changed = true; break; } } /* Update current incoming and outgoing streams. */ if (current_changed) { AWS_LOGF_TRACE( AWS_LS_HTTP_CONNECTION, "id=%p: Current outgoing stream is now %p.", (void *)&connection->base, current ? (void *)¤t->base : NULL); s_set_outgoing_stream_ptr(connection, current); if (current) { AWS_ASSERT(current->base.metrics.send_start_timestamp_ns == -1); aws_high_res_clock_get_ticks((uint64_t *)¤t->base.metrics.send_start_timestamp_ns); err = aws_h1_encoder_start_message( &connection->thread_data.encoder, ¤t->encoder_message, ¤t->base); (void)err; AWS_ASSERT(connection->thread_data.encoder.state == AWS_H1_ENCODER_STATE_INIT); AWS_ASSERT(!err); } /* incoming_stream update is only for client */ if (connection->base.client_data) { s_client_update_incoming_stream_ptr(connection); } } return current; } /* Runs after an aws_io_message containing HTTP has completed (written to the network, or failed). * This does NOT run after switching protocols, when we're dumbly forwarding aws_io_messages * as a midchannel handler. */ static void s_on_channel_write_complete( struct aws_channel *channel, struct aws_io_message *message, int err_code, void *user_data) { (void)message; struct aws_h1_connection *connection = user_data; AWS_ASSERT(connection->thread_data.is_outgoing_stream_task_active); AWS_ASSERT(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel)); if (err_code) { AWS_LOGF_TRACE( AWS_LS_HTTP_CONNECTION, "id=%p: Message did not write to network, error %d (%s)", (void *)&connection->base, err_code, aws_error_name(err_code)); s_shutdown_due_to_error(connection, err_code); return; } AWS_LOGF_TRACE( AWS_LS_HTTP_CONNECTION, "id=%p: Message finished writing to network. Rescheduling outgoing stream task.", (void *)&connection->base); /* To avoid wasting memory, we only want ONE of our written aws_io_messages in the channel at a time. * Therefore, we wait until it's written to the network before trying to send another * by running the outgoing-stream-task again. * * We also want to share the network with other channels. * Therefore, when the write completes, we SCHEDULE the outgoing-stream-task * to run again instead of calling the function directly. * This way, if the message completes synchronously, * we're not hogging the network by writing message after message in a tight loop */ aws_channel_schedule_task_now(channel, &connection->outgoing_stream_task); } static void s_outgoing_stream_task(struct aws_channel_task *task, void *arg, enum aws_task_status status) { (void)task; if (status != AWS_TASK_STATUS_RUN_READY) { return; } struct aws_h1_connection *connection = arg; AWS_ASSERT(connection->thread_data.is_outgoing_stream_task_active); AWS_ASSERT(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel)); s_write_outgoing_stream(connection, false /*first_try*/); } void aws_h1_connection_try_write_outgoing_stream(struct aws_h1_connection *connection) { AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel)); if (connection->thread_data.is_outgoing_stream_task_active) { /* Task is already active */ return; } connection->thread_data.is_outgoing_stream_task_active = true; s_write_outgoing_stream(connection, true /*first_try*/); } /* Do the actual work of the outgoing-stream-task */ static void s_write_outgoing_stream(struct aws_h1_connection *connection, bool first_try) { AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel)); AWS_PRECONDITION(connection->thread_data.is_outgoing_stream_task_active); /* Just stop if we're no longer writing stream data */ if (connection->thread_data.is_writing_stopped || connection->thread_data.has_switched_protocols) { return; } /* Determine whether we have data available to send, and end task immediately if there's not. * The outgoing stream task will be kicked off again when user adds more data (new stream, new chunk, etc) */ struct aws_h1_stream *outgoing_stream = s_update_outgoing_stream_ptr(connection); bool waiting_for_chunks = aws_h1_encoder_is_waiting_for_chunks(&connection->thread_data.encoder); if (!outgoing_stream || waiting_for_chunks) { if (!first_try) { AWS_LOGF_TRACE( AWS_LS_HTTP_CONNECTION, "id=%p: Outgoing stream task stopped. outgoing_stream=%p waiting_for_chunks:%d", (void *)&connection->base, outgoing_stream ? (void *)&outgoing_stream->base : NULL, waiting_for_chunks); } connection->thread_data.is_outgoing_stream_task_active = false; return; } if (first_try) { AWS_LOGF_TRACE(AWS_LS_HTTP_CONNECTION, "id=%p: Outgoing stream task has begun.", (void *)&connection->base); } struct aws_io_message *msg = aws_channel_slot_acquire_max_message_for_write(connection->base.channel_slot); if (!msg) { AWS_LOGF_ERROR( AWS_LS_HTTP_CONNECTION, "id=%p: Failed to acquire message from pool, error %d (%s). Closing connection.", (void *)&connection->base, aws_last_error(), aws_error_name(aws_last_error())); goto error; } /* Set up callback so we can send another message when this one completes */ msg->on_completion = s_on_channel_write_complete; msg->user_data = connection; /* * Fill message data from the outgoing stream. * Note that we might be resuming work on a stream from a previous run of this task. */ if (AWS_OP_SUCCESS != aws_h1_encoder_process(&connection->thread_data.encoder, &msg->message_data)) { /* Error sending data, abandon ship */ goto error; } if (msg->message_data.len > 0) { AWS_LOGF_TRACE( AWS_LS_HTTP_CONNECTION, "id=%p: Outgoing stream task is sending message of size %zu.", (void *)&connection->base, msg->message_data.len); if (aws_channel_slot_send_message(connection->base.channel_slot, msg, AWS_CHANNEL_DIR_WRITE)) { AWS_LOGF_ERROR( AWS_LS_HTTP_CONNECTION, "id=%p: Failed to send message in write direction, error %d (%s). Closing connection.", (void *)&connection->base, aws_last_error(), aws_error_name(aws_last_error())); goto error; } } else { /* If message is empty, warn that no work is being done * and reschedule the task to try again next tick. * It's likely that body isn't ready, so body streaming function has no data to write yet. * If this scenario turns out to be common we should implement a "pause" feature. */ AWS_LOGF_WARN( AWS_LS_HTTP_CONNECTION, "id=%p: Current outgoing stream %p sent no data, will try again next tick.", (void *)&connection->base, outgoing_stream ? (void *)&outgoing_stream->base : NULL); aws_mem_release(msg->allocator, msg); aws_channel_schedule_task_now(connection->base.channel_slot->channel, &connection->outgoing_stream_task); } return; error: if (msg) { aws_mem_release(msg->allocator, msg); } s_shutdown_due_to_error(connection, aws_last_error()); } static int s_decoder_on_request( enum aws_http_method method_enum, const struct aws_byte_cursor *method_str, const struct aws_byte_cursor *uri, void *user_data) { struct aws_h1_connection *connection = user_data; struct aws_h1_stream *incoming_stream = connection->thread_data.incoming_stream; AWS_FATAL_ASSERT(connection->thread_data.incoming_stream->base.server_data); /* Request but I'm a client?!?!? */ AWS_ASSERT(incoming_stream->base.server_data->request_method_str.len == 0); AWS_ASSERT(incoming_stream->base.server_data->request_path.len == 0); AWS_LOGF_TRACE( AWS_LS_HTTP_STREAM, "id=%p: Incoming request: method=" PRInSTR " uri=" PRInSTR, (void *)&incoming_stream->base, AWS_BYTE_CURSOR_PRI(*method_str), AWS_BYTE_CURSOR_PRI(*uri)); /* Copy strings to internal buffer */ struct aws_byte_buf *storage_buf = &incoming_stream->incoming_storage_buf; AWS_ASSERT(storage_buf->capacity == 0); size_t storage_size = 0; int err = aws_add_size_checked(uri->len, method_str->len, &storage_size); if (err) { goto error; } err = aws_byte_buf_init(storage_buf, incoming_stream->base.alloc, storage_size); if (err) { goto error; } aws_byte_buf_write_from_whole_cursor(storage_buf, *method_str); incoming_stream->base.server_data->request_method_str = aws_byte_cursor_from_buf(storage_buf); aws_byte_buf_write_from_whole_cursor(storage_buf, *uri); incoming_stream->base.server_data->request_path = aws_byte_cursor_from_buf(storage_buf); aws_byte_cursor_advance(&incoming_stream->base.server_data->request_path, storage_buf->len - uri->len); incoming_stream->base.request_method = method_enum; /* No user callbacks, so we're not checking for shutdown */ return AWS_OP_SUCCESS; error: AWS_LOGF_ERROR( AWS_LS_HTTP_CONNECTION, "id=%p: Failed to process new incoming request, error %d (%s).", (void *)&connection->base, aws_last_error(), aws_error_name(aws_last_error())); return AWS_OP_ERR; } static int s_decoder_on_response(int status_code, void *user_data) { struct aws_h1_connection *connection = user_data; AWS_FATAL_ASSERT(connection->thread_data.incoming_stream->base.client_data); /* Response but I'm a server?!?!? */ AWS_LOGF_TRACE( AWS_LS_HTTP_STREAM, "id=%p: Incoming response status: %d (%s).", (void *)&connection->thread_data.incoming_stream->base, status_code, aws_http_status_text(status_code)); connection->thread_data.incoming_stream->base.client_data->response_status = status_code; /* No user callbacks, so we're not checking for shutdown */ return AWS_OP_SUCCESS; } static int s_decoder_on_header(const struct aws_h1_decoded_header *header, void *user_data) { struct aws_h1_connection *connection = user_data; struct aws_h1_stream *incoming_stream = connection->thread_data.incoming_stream; AWS_LOGF_TRACE( AWS_LS_HTTP_STREAM, "id=%p: Incoming header: " PRInSTR ": " PRInSTR, (void *)&incoming_stream->base, AWS_BYTE_CURSOR_PRI(header->name_data), AWS_BYTE_CURSOR_PRI(header->value_data)); enum aws_http_header_block header_block = aws_h1_decoder_get_header_block(connection->thread_data.incoming_stream_decoder); /* RFC-7230 section 6.1. * "Connection: close" header signals that a connection will not persist after the current request/response */ if (header->name == AWS_HTTP_HEADER_CONNECTION) { /* Certain L7 proxies send a connection close header on a 200/OK response to a CONNECT request. This is nutty * behavior, but the obviously desired behavior on a 200 CONNECT response is to leave the connection open * for the tunneling. */ bool ignore_connection_close = incoming_stream->base.request_method == AWS_HTTP_METHOD_CONNECT && incoming_stream->base.client_data && incoming_stream->base.client_data->response_status == AWS_HTTP_STATUS_CODE_200_OK; if (!ignore_connection_close && aws_byte_cursor_eq_c_str_ignore_case(&header->value_data, "close")) { AWS_LOGF_TRACE( AWS_LS_HTTP_STREAM, "id=%p: Received 'Connection: close' header. This will be the final stream on this connection.", (void *)&incoming_stream->base); incoming_stream->is_final_stream = true; { /* BEGIN CRITICAL SECTION */ aws_h1_connection_lock_synced_data(connection); connection->synced_data.new_stream_error_code = AWS_ERROR_HTTP_CONNECTION_CLOSED; aws_h1_connection_unlock_synced_data(connection); } /* END CRITICAL SECTION */ if (connection->base.client_data) { /** * RFC-9112 section 9.6. * A client that receives a "close" connection option MUST cease sending * requests on that connection and close the connection after reading the * response message containing the "close" connection option. * * Mark the stream's outgoing message as complete, * so that we stop sending, and stop waiting for it to finish sending. **/ if (!incoming_stream->is_outgoing_message_done) { AWS_LOGF_DEBUG( AWS_LS_HTTP_STREAM, "id=%p: Received 'Connection: close' header, no more request data will be sent.", (void *)&incoming_stream->base); s_set_outgoing_message_done(incoming_stream); } /* Stop writing right now. * Shutdown will be scheduled after we finishing parsing the response */ s_stop( connection, false /*stop_reading*/, true /*stop_writing*/, false /*schedule_shutdown*/, AWS_ERROR_SUCCESS); } } } if (incoming_stream->base.on_incoming_headers) { struct aws_http_header deliver = { .name = header->name_data, .value = header->value_data, }; int err = incoming_stream->base.on_incoming_headers( &incoming_stream->base, header_block, &deliver, 1, incoming_stream->base.user_data); if (err) { AWS_LOGF_ERROR( AWS_LS_HTTP_STREAM, "id=%p: Incoming header callback raised error %d (%s).", (void *)&incoming_stream->base, aws_last_error(), aws_error_name(aws_last_error())); return AWS_OP_ERR; } } return AWS_OP_SUCCESS; } static int s_mark_head_done(struct aws_h1_stream *incoming_stream) { /* Bail out if we've already done this */ if (incoming_stream->is_incoming_head_done) { return AWS_OP_SUCCESS; } struct aws_h1_connection *connection = AWS_CONTAINER_OF(incoming_stream->base.owning_connection, struct aws_h1_connection, base); enum aws_http_header_block header_block = aws_h1_decoder_get_header_block(connection->thread_data.incoming_stream_decoder); if (header_block == AWS_HTTP_HEADER_BLOCK_MAIN) { AWS_LOGF_TRACE(AWS_LS_HTTP_STREAM, "id=%p: Main header block done.", (void *)&incoming_stream->base); incoming_stream->is_incoming_head_done = true; } else if (header_block == AWS_HTTP_HEADER_BLOCK_INFORMATIONAL) { AWS_LOGF_TRACE(AWS_LS_HTTP_STREAM, "id=%p: Informational header block done.", (void *)&incoming_stream->base); /* Only clients can receive informational headers. * Check whether we're switching protocols */ if (incoming_stream->base.client_data->response_status == AWS_HTTP_STATUS_CODE_101_SWITCHING_PROTOCOLS) { if (s_aws_http1_switch_protocols(connection)) { return AWS_OP_ERR; } } } /* Invoke user cb */ if (incoming_stream->base.on_incoming_header_block_done) { int err = incoming_stream->base.on_incoming_header_block_done( &incoming_stream->base, header_block, incoming_stream->base.user_data); if (err) { AWS_LOGF_ERROR( AWS_LS_HTTP_STREAM, "id=%p: Incoming-header-block-done callback raised error %d (%s).", (void *)&incoming_stream->base, aws_last_error(), aws_error_name(aws_last_error())); return AWS_OP_ERR; } } return AWS_OP_SUCCESS; } static int s_decoder_on_body(const struct aws_byte_cursor *data, bool finished, void *user_data) { (void)finished; struct aws_h1_connection *connection = user_data; struct aws_h1_stream *incoming_stream = connection->thread_data.incoming_stream; AWS_ASSERT(incoming_stream); int err = s_mark_head_done(incoming_stream); if (err) { return AWS_OP_ERR; } /* No need to invoke callback for 0-length data */ if (data->len == 0) { return AWS_OP_SUCCESS; } AWS_LOGF_TRACE( AWS_LS_HTTP_STREAM, "id=%p: Incoming body: %zu bytes received.", (void *)&incoming_stream->base, data->len); if (connection->base.stream_manual_window_management) { /* Let stream window shrink by amount of body data received */ if (data->len > incoming_stream->thread_data.stream_window) { /* This error shouldn't be possible, but it's all complicated, so do runtime check to be safe. */ AWS_LOGF_ERROR( AWS_LS_HTTP_STREAM, "id=%p: Internal error. Data exceeds HTTP-stream's window.", (void *)&incoming_stream->base); return aws_raise_error(AWS_ERROR_INVALID_STATE); } incoming_stream->thread_data.stream_window -= data->len; if (incoming_stream->thread_data.stream_window == 0) { AWS_LOGF_DEBUG( AWS_LS_HTTP_STREAM, "id=%p: Flow-control window has reached 0. No more data can be received until window is updated.", (void *)&incoming_stream->base); } } if (incoming_stream->base.on_incoming_body) { err = incoming_stream->base.on_incoming_body(&incoming_stream->base, data, incoming_stream->base.user_data); if (err) { AWS_LOGF_ERROR( AWS_LS_HTTP_STREAM, "id=%p: Incoming body callback raised error %d (%s).", (void *)&incoming_stream->base, aws_last_error(), aws_error_name(aws_last_error())); return AWS_OP_ERR; } } return AWS_OP_SUCCESS; } static int s_decoder_on_done(void *user_data) { struct aws_h1_connection *connection = user_data; struct aws_h1_stream *incoming_stream = connection->thread_data.incoming_stream; AWS_ASSERT(incoming_stream); /* Ensure head was marked done */ int err = s_mark_head_done(incoming_stream); if (err) { return AWS_OP_ERR; } /* If it is a informational response, we stop here, keep waiting for new response */ enum aws_http_header_block header_block = aws_h1_decoder_get_header_block(connection->thread_data.incoming_stream_decoder); if (header_block == AWS_HTTP_HEADER_BLOCK_INFORMATIONAL) { return AWS_OP_SUCCESS; } /* Otherwise the incoming stream is finished decoding and we will update it if needed */ incoming_stream->is_incoming_message_done = true; aws_high_res_clock_get_ticks((uint64_t *)&incoming_stream->base.metrics.receive_end_timestamp_ns); AWS_ASSERT(incoming_stream->base.metrics.receive_start_timestamp_ns != -1); AWS_ASSERT( incoming_stream->base.metrics.receive_end_timestamp_ns >= incoming_stream->base.metrics.receive_start_timestamp_ns); incoming_stream->base.metrics.receiving_duration_ns = incoming_stream->base.metrics.receive_end_timestamp_ns - incoming_stream->base.metrics.receive_start_timestamp_ns; /* RFC-7230 section 6.6 * After reading the final message, the connection must not read any more */ if (incoming_stream->is_final_stream) { AWS_LOGF_TRACE( AWS_LS_HTTP_CONNECTION, "id=%p: Done reading final stream, no further streams will be read.", (void *)&connection->base); s_stop( connection, true /*stop_reading*/, false /*stop_writing*/, false /*schedule_shutdown*/, AWS_ERROR_SUCCESS); } if (connection->base.server_data) { /* Server side */ aws_http_on_incoming_request_done_fn *on_request_done = incoming_stream->base.server_data->on_request_done; if (on_request_done) { err = on_request_done(&incoming_stream->base, incoming_stream->base.user_data); if (err) { AWS_LOGF_ERROR( AWS_LS_HTTP_STREAM, "id=%p: Incoming request done callback raised error %d (%s).", (void *)&incoming_stream->base, aws_last_error(), aws_error_name(aws_last_error())); return AWS_OP_ERR; } } if (incoming_stream->is_outgoing_message_done) { AWS_ASSERT(&incoming_stream->node == aws_linked_list_begin(&connection->thread_data.stream_list)); s_stream_complete(incoming_stream, AWS_ERROR_SUCCESS); } s_set_incoming_stream_ptr(connection, NULL); } else if (incoming_stream->is_outgoing_message_done) { /* Client side */ AWS_ASSERT(&incoming_stream->node == aws_linked_list_begin(&connection->thread_data.stream_list)); s_stream_complete(incoming_stream, AWS_ERROR_SUCCESS); s_client_update_incoming_stream_ptr(connection); } /* Report success even if user's on_complete() callback shuts down on the connection. * We don't want it to look like something went wrong while decoding. * The decode() function returns after each message completes, * and we won't call decode() again if the connection has been shut down */ return AWS_OP_SUCCESS; } /* Common new() logic for server & client */ static struct aws_h1_connection *s_connection_new( struct aws_allocator *alloc, bool manual_window_management, size_t initial_window_size, const struct aws_http1_connection_options *http1_options, bool server) { struct aws_h1_connection *connection = aws_mem_calloc(alloc, 1, sizeof(struct aws_h1_connection)); if (!connection) { goto error_connection_alloc; } connection->base.vtable = &s_h1_connection_vtable; connection->base.alloc = alloc; connection->base.channel_handler.vtable = &s_h1_connection_vtable.channel_handler_vtable; connection->base.channel_handler.alloc = alloc; connection->base.channel_handler.impl = connection; connection->base.http_version = AWS_HTTP_VERSION_1_1; connection->base.stream_manual_window_management = manual_window_management; /* Init the next stream id (server must use even ids, client odd [RFC 7540 5.1.1])*/ connection->base.next_stream_id = server ? 2 : 1; /* 1 refcount for user */ aws_atomic_init_int(&connection->base.refcount, 1); if (manual_window_management) { connection->initial_stream_window_size = initial_window_size; if (http1_options->read_buffer_capacity > 0) { connection->thread_data.read_buffer.capacity = http1_options->read_buffer_capacity; } else { /* User did not set capacity, choose something reasonable based on initial_window_size */ /* NOTE: These values are currently guesses, we should test to find good values */ const size_t clamp_min = aws_min_size(g_aws_channel_max_fragment_size * 4, /*256KB*/ 256 * 1024); const size_t clamp_max = /*1MB*/ 1 * 1024 * 1024; connection->thread_data.read_buffer.capacity = aws_max_size(clamp_min, aws_min_size(clamp_max, initial_window_size)); } connection->thread_data.connection_window = connection->thread_data.read_buffer.capacity; } else { /* No backpressure, keep connection window at SIZE_MAX */ connection->initial_stream_window_size = SIZE_MAX; connection->thread_data.read_buffer.capacity = SIZE_MAX; connection->thread_data.connection_window = SIZE_MAX; } aws_h1_encoder_init(&connection->thread_data.encoder, alloc); aws_channel_task_init( &connection->outgoing_stream_task, s_outgoing_stream_task, connection, "http1_connection_outgoing_stream"); aws_channel_task_init( &connection->cross_thread_work_task, s_cross_thread_work_task, connection, "http1_connection_cross_thread_work"); aws_linked_list_init(&connection->thread_data.stream_list); aws_linked_list_init(&connection->thread_data.read_buffer.messages); aws_crt_statistics_http1_channel_init(&connection->thread_data.stats); int err = aws_mutex_init(&connection->synced_data.lock); if (err) { AWS_LOGF_ERROR( AWS_LS_HTTP_CONNECTION, "static: Failed to initialize mutex, error %d (%s).", aws_last_error(), aws_error_name(aws_last_error())); goto error_mutex; } aws_linked_list_init(&connection->synced_data.new_client_stream_list); connection->synced_data.is_open = true; struct aws_h1_decoder_params options = { .alloc = alloc, .is_decoding_requests = server, .user_data = connection, .vtable = s_h1_decoder_vtable, .scratch_space_initial_size = DECODER_INITIAL_SCRATCH_SIZE, }; connection->thread_data.incoming_stream_decoder = aws_h1_decoder_new(&options); if (!connection->thread_data.incoming_stream_decoder) { AWS_LOGF_ERROR( AWS_LS_HTTP_CONNECTION, "static: Failed to create decoder, error %d (%s).", aws_last_error(), aws_error_name(aws_last_error())); goto error_decoder; } return connection; error_decoder: aws_mutex_clean_up(&connection->synced_data.lock); error_mutex: aws_mem_release(alloc, connection); error_connection_alloc: return NULL; } struct aws_http_connection *aws_http_connection_new_http1_1_server( struct aws_allocator *allocator, bool manual_window_management, size_t initial_window_size, const struct aws_http1_connection_options *http1_options) { struct aws_h1_connection *connection = s_connection_new(allocator, manual_window_management, initial_window_size, http1_options, true /*is_server*/); if (!connection) { return NULL; } connection->base.server_data = &connection->base.client_or_server_data.server; return &connection->base; } struct aws_http_connection *aws_http_connection_new_http1_1_client( struct aws_allocator *allocator, bool manual_window_management, size_t initial_window_size, const struct aws_http1_connection_options *http1_options) { struct aws_h1_connection *connection = s_connection_new(allocator, manual_window_management, initial_window_size, http1_options, false /*is_server*/); if (!connection) { return NULL; } connection->base.client_data = &connection->base.client_or_server_data.client; return &connection->base; } static void s_handler_destroy(struct aws_channel_handler *handler) { struct aws_h1_connection *connection = handler->impl; AWS_LOGF_TRACE(AWS_LS_HTTP_CONNECTION, "id=%p: Destroying connection.", (void *)&connection->base); AWS_ASSERT(aws_linked_list_empty(&connection->thread_data.stream_list)); AWS_ASSERT(aws_linked_list_empty(&connection->synced_data.new_client_stream_list)); /* Clean up any buffered read messages. */ while (!aws_linked_list_empty(&connection->thread_data.read_buffer.messages)) { struct aws_linked_list_node *node = aws_linked_list_pop_front(&connection->thread_data.read_buffer.messages); struct aws_io_message *msg = AWS_CONTAINER_OF(node, struct aws_io_message, queueing_handle); aws_mem_release(msg->allocator, msg); } aws_h1_decoder_destroy(connection->thread_data.incoming_stream_decoder); aws_h1_encoder_clean_up(&connection->thread_data.encoder); aws_mutex_clean_up(&connection->synced_data.lock); aws_mem_release(connection->base.alloc, connection); } static void s_handler_installed(struct aws_channel_handler *handler, struct aws_channel_slot *slot) { struct aws_h1_connection *connection = handler->impl; connection->base.channel_slot = slot; /* Acquire a hold on the channel to prevent its destruction until the user has * given the go-ahead via aws_http_connection_release() */ aws_channel_acquire_hold(slot->channel); } /* Try to send the next queued aws_io_message to the downstream handler. * This can only be called after the connection has switched protocols and becoming a midchannel handler. */ static int s_try_process_next_midchannel_read_message(struct aws_h1_connection *connection, bool *out_stop_processing) { AWS_ASSERT(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel)); AWS_ASSERT(connection->thread_data.has_switched_protocols); AWS_ASSERT(!connection->thread_data.is_reading_stopped); AWS_ASSERT(!aws_linked_list_empty(&connection->thread_data.read_buffer.messages)); *out_stop_processing = false; struct aws_io_message *sending_msg = NULL; if (!connection->base.channel_slot->adj_right) { AWS_LOGF_ERROR( AWS_LS_HTTP_CONNECTION, "id=%p: Connection has switched protocols, but no handler is installed to deal with this data.", (void *)connection); return aws_raise_error(AWS_ERROR_HTTP_SWITCHED_PROTOCOLS); } size_t downstream_window = aws_channel_slot_downstream_read_window(connection->base.channel_slot); if (downstream_window == 0) { AWS_LOGF_TRACE( AWS_LS_HTTP_CONNECTION, "id=%p: Downstream window is 0, cannot send switched-protocol message now.", (void *)&connection->base); *out_stop_processing = true; return AWS_OP_SUCCESS; } struct aws_linked_list_node *queued_msg_node = aws_linked_list_front(&connection->thread_data.read_buffer.messages); struct aws_io_message *queued_msg = AWS_CONTAINER_OF(queued_msg_node, struct aws_io_message, queueing_handle); /* Note that copy_mark is used to mark the progress of partially sent messages. */ AWS_ASSERT(queued_msg->message_data.len > queued_msg->copy_mark); size_t sending_bytes = aws_min_size(queued_msg->message_data.len - queued_msg->copy_mark, downstream_window); AWS_ASSERT(connection->thread_data.read_buffer.pending_bytes >= sending_bytes); connection->thread_data.read_buffer.pending_bytes -= sending_bytes; /* If we can't send the whole entire queued_msg, copy its data into a new aws_io_message and send that. */ if (sending_bytes != queued_msg->message_data.len) { sending_msg = aws_channel_acquire_message_from_pool( connection->base.channel_slot->channel, AWS_IO_MESSAGE_APPLICATION_DATA, sending_bytes); if (!sending_msg) { goto error; } aws_byte_buf_write( &sending_msg->message_data, queued_msg->message_data.buffer + queued_msg->copy_mark, sending_bytes); queued_msg->copy_mark += sending_bytes; AWS_LOGF_TRACE( AWS_LS_HTTP_CONNECTION, "id=%p: Sending %zu bytes switched-protocol message to downstream handler, %zu bytes remain.", (void *)&connection->base, sending_bytes, queued_msg->message_data.len - queued_msg->copy_mark); /* If the last of queued_msg has been copied, it can be deleted now. */ if (queued_msg->copy_mark == queued_msg->message_data.len) { aws_linked_list_remove(queued_msg_node); aws_mem_release(queued_msg->allocator, queued_msg); } } else { /* Sending all of queued_msg along. */ AWS_LOGF_TRACE( AWS_LS_HTTP_CONNECTION, "id=%p: Sending full switched-protocol message of size %zu to downstream handler.", (void *)&connection->base, queued_msg->message_data.len); aws_linked_list_remove(queued_msg_node); sending_msg = queued_msg; } int err = aws_channel_slot_send_message(connection->base.channel_slot, sending_msg, AWS_CHANNEL_DIR_READ); if (err) { AWS_LOGF_ERROR( AWS_LS_HTTP_CONNECTION, "id=%p: Failed to send message in read direction, error %d (%s).", (void *)&connection->base, aws_last_error(), aws_error_name(aws_last_error())); goto error; } return AWS_OP_SUCCESS; error: if (sending_msg) { aws_mem_release(sending_msg->allocator, sending_msg); } return AWS_OP_ERR; } static struct aws_http_stream *s_new_server_request_handler_stream( const struct aws_http_request_handler_options *options) { struct aws_h1_connection *connection = AWS_CONTAINER_OF(options->server_connection, struct aws_h1_connection, base); if (!aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel) || !connection->thread_data.can_create_request_handler_stream) { AWS_LOGF_ERROR( AWS_LS_HTTP_CONNECTION, "id=%p: aws_http_stream_new_server_request_handler() can only be called during incoming request callback.", (void *)&connection->base); aws_raise_error(AWS_ERROR_INVALID_STATE); return NULL; } struct aws_h1_stream *stream = aws_h1_stream_new_request_handler(options); if (!stream) { AWS_LOGF_ERROR( AWS_LS_HTTP_CONNECTION, "id=%p: Failed to create request handler stream, error %d (%s).", (void *)&connection->base, aws_last_error(), aws_error_name(aws_last_error())); return NULL; } /* * Success! * Everything beyond this point cannot fail */ /* Prevent further streams from being created until it's ok to do so. */ connection->thread_data.can_create_request_handler_stream = false; /* Stream is waiting for response. */ aws_linked_list_push_back(&connection->thread_data.stream_list, &stream->node); /* Connection owns stream, and must outlive stream */ aws_http_connection_acquire(&connection->base); AWS_LOGF_TRACE( AWS_LS_HTTP_STREAM, "id=%p: Created request handler stream on server connection=%p", (void *)&stream->base, (void *)&connection->base); return &stream->base; } /* Invokes the on_incoming_request callback and returns new stream. */ static struct aws_h1_stream *s_server_invoke_on_incoming_request(struct aws_h1_connection *connection) { AWS_PRECONDITION(connection->base.server_data); AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel)); AWS_PRECONDITION(!connection->thread_data.can_create_request_handler_stream); AWS_PRECONDITION(!connection->thread_data.incoming_stream); /** * The user MUST create the new request-handler stream during the on-incoming-request callback. */ connection->thread_data.can_create_request_handler_stream = true; struct aws_http_stream *new_stream = connection->base.server_data->on_incoming_request(&connection->base, connection->base.user_data); connection->thread_data.can_create_request_handler_stream = false; return new_stream ? AWS_CONTAINER_OF(new_stream, struct aws_h1_stream, base) : NULL; } static int s_handler_process_read_message( struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_io_message *message) { (void)slot; struct aws_h1_connection *connection = handler->impl; const size_t message_size = message->message_data.len; AWS_LOGF_TRACE( AWS_LS_HTTP_CONNECTION, "id=%p: Incoming message of size %zu.", (void *)&connection->base, message_size); /* Shrink connection window by amount of data received. See comments at variable's * declaration site on why we use this instead of the official `aws_channel_slot.window_size`. */ if (message_size > connection->thread_data.connection_window) { /* This error shouldn't be possible, but this is all complicated so check at runtime to be safe. */ AWS_LOGF_ERROR( AWS_LS_HTTP_CONNECTION, "id=%p: Internal error. Message exceeds connection's window.", (void *)&connection->base); return aws_raise_error(AWS_ERROR_INVALID_STATE); } connection->thread_data.connection_window -= message_size; /* Push message into queue of buffered messages */ aws_linked_list_push_back(&connection->thread_data.read_buffer.messages, &message->queueing_handle); connection->thread_data.read_buffer.pending_bytes += message_size; /* Try to process messages in queue */ aws_h1_connection_try_process_read_messages(connection); return AWS_OP_SUCCESS; } void aws_h1_connection_try_process_read_messages(struct aws_h1_connection *connection) { /* Protect against this function being called recursively. */ if (connection->thread_data.is_processing_read_messages) { return; } connection->thread_data.is_processing_read_messages = true; /* Process queued messages */ while (!aws_linked_list_empty(&connection->thread_data.read_buffer.messages)) { if (connection->thread_data.is_reading_stopped) { AWS_LOGF_ERROR( AWS_LS_HTTP_CONNECTION, "id=%p: Cannot process message because connection is shutting down.", (void *)&connection->base); aws_raise_error(AWS_ERROR_HTTP_CONNECTION_CLOSED); goto shutdown; } bool stop_processing = false; /* When connection has switched protocols, messages are processed very differently. * We need to do this check in the middle of the normal processing loop, * in case the switch happens in the middle of processing a message. */ if (connection->thread_data.has_switched_protocols) { if (s_try_process_next_midchannel_read_message(connection, &stop_processing)) { goto shutdown; } } else { if (s_try_process_next_stream_read_message(connection, &stop_processing)) { goto shutdown; } } /* Break out of loop if we can't process any more data */ if (stop_processing) { break; } } /* Increment connection window, if necessary */ if (s_update_connection_window(connection)) { goto shutdown; } connection->thread_data.is_processing_read_messages = false; return; shutdown: s_shutdown_due_to_error(connection, aws_last_error()); } /* Try to process the next queued aws_io_message as normal HTTP data for an aws_http_stream. * This MUST NOT be called if the connection has switched protocols and become a midchannel handler. */ static int s_try_process_next_stream_read_message(struct aws_h1_connection *connection, bool *out_stop_processing) { AWS_ASSERT(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel)); AWS_ASSERT(!connection->thread_data.has_switched_protocols); AWS_ASSERT(!connection->thread_data.is_reading_stopped); AWS_ASSERT(!aws_linked_list_empty(&connection->thread_data.read_buffer.messages)); *out_stop_processing = false; /* Ensure that an incoming stream exists to receive the data */ if (!connection->thread_data.incoming_stream) { if (aws_http_connection_is_client(&connection->base)) { /* Client side */ AWS_LOGF_ERROR( AWS_LS_HTTP_CONNECTION, "id=%p: Cannot process message because no requests are currently awaiting response, closing " "connection.", (void *)&connection->base); return aws_raise_error(AWS_ERROR_INVALID_STATE); } else { /* Server side. * Invoke on-incoming-request callback. The user MUST create a new stream from this callback. * The new stream becomes the current incoming stream */ s_set_incoming_stream_ptr(connection, s_server_invoke_on_incoming_request(connection)); if (!connection->thread_data.incoming_stream) { AWS_LOGF_ERROR( AWS_LS_HTTP_CONNECTION, "id=%p: Incoming request callback failed to provide a new stream, last error %d (%s). " "Closing connection.", (void *)&connection->base, aws_last_error(), aws_error_name(aws_last_error())); return AWS_OP_ERR; } } } struct aws_h1_stream *incoming_stream = connection->thread_data.incoming_stream; /* Stop processing if stream's window reaches 0. */ const uint64_t stream_window = incoming_stream->thread_data.stream_window; if (stream_window == 0) { AWS_LOGF_TRACE( AWS_LS_HTTP_CONNECTION, "id=%p: HTTP-stream's window is 0, cannot process message now.", (void *)&connection->base); *out_stop_processing = true; return AWS_OP_SUCCESS; } struct aws_linked_list_node *queued_msg_node = aws_linked_list_front(&connection->thread_data.read_buffer.messages); struct aws_io_message *queued_msg = AWS_CONTAINER_OF(queued_msg_node, struct aws_io_message, queueing_handle); /* Note that copy_mark is used to mark the progress of partially decoded messages */ struct aws_byte_cursor message_cursor = aws_byte_cursor_from_buf(&queued_msg->message_data); aws_byte_cursor_advance(&message_cursor, queued_msg->copy_mark); /* Don't process more data than the stream's window can accept. * * TODO: Let the decoder know about stream-window size so it can stop itself, * instead of limiting the amount of data we feed into the decoder at a time. * This would be more optimal, AND avoid an edge-case where the stream-window goes * to 0 as the body ends, and the connection can't proceed to the trailing headers. */ message_cursor.len = (size_t)aws_min_u64(message_cursor.len, stream_window); const size_t prev_cursor_len = message_cursor.len; /* Set some decoder state, based on current stream */ aws_h1_decoder_set_logging_id(connection->thread_data.incoming_stream_decoder, incoming_stream); bool body_headers_ignored = incoming_stream->base.request_method == AWS_HTTP_METHOD_HEAD; aws_h1_decoder_set_body_headers_ignored(connection->thread_data.incoming_stream_decoder, body_headers_ignored); if (incoming_stream->base.metrics.receive_start_timestamp_ns == -1) { /* That's the first time for the stream receives any message */ aws_high_res_clock_get_ticks((uint64_t *)&incoming_stream->base.metrics.receive_start_timestamp_ns); if (incoming_stream->base.client_data && incoming_stream->base.client_data->response_first_byte_timeout_task.fn != NULL) { /* There is an outstanding response timeout task, as we already received the data, we can cancel it now. We * are safe to do it as we always on connection thread to schedule the task or cancel it */ struct aws_event_loop *connection_loop = aws_channel_get_event_loop(connection->base.channel_slot->channel); /* The task will be zeroed out within the call */ aws_event_loop_cancel_task( connection_loop, &incoming_stream->base.client_data->response_first_byte_timeout_task); } } /* As decoder runs, it invokes the internal s_decoder_X callbacks, which in turn invoke user callbacks. * The decoder will stop once it hits the end of the request/response OR the end of the message data. */ if (aws_h1_decode(connection->thread_data.incoming_stream_decoder, &message_cursor)) { AWS_LOGF_ERROR( AWS_LS_HTTP_CONNECTION, "id=%p: Message processing failed, error %d (%s). Closing connection.", (void *)&connection->base, aws_last_error(), aws_error_name(aws_last_error())); return AWS_OP_ERR; } size_t bytes_processed = prev_cursor_len - message_cursor.len; queued_msg->copy_mark += bytes_processed; AWS_ASSERT(connection->thread_data.read_buffer.pending_bytes >= bytes_processed); connection->thread_data.read_buffer.pending_bytes -= bytes_processed; AWS_LOGF_TRACE( AWS_LS_HTTP_CONNECTION, "id=%p: Decoded %zu bytes of message, %zu bytes remain.", (void *)&connection->base, bytes_processed, queued_msg->message_data.len - queued_msg->copy_mark); /* If the last of queued_msg has been processed, it can be deleted now. * Otherwise, it remains in the queue for further processing later. */ if (queued_msg->copy_mark == queued_msg->message_data.len) { aws_linked_list_remove(&queued_msg->queueing_handle); aws_mem_release(queued_msg->allocator, queued_msg); } return AWS_OP_SUCCESS; } static int s_handler_process_write_message( struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_io_message *message) { struct aws_h1_connection *connection = handler->impl; if (connection->thread_data.is_writing_stopped) { aws_raise_error(AWS_ERROR_HTTP_CONNECTION_CLOSED); goto error; } if (!connection->thread_data.has_switched_protocols) { aws_raise_error(AWS_ERROR_INVALID_STATE); goto error; } /* Pass the message right along. */ int err = aws_channel_slot_send_message(slot, message, AWS_CHANNEL_DIR_WRITE); if (err) { goto error; } return AWS_OP_SUCCESS; error: AWS_LOGF_ERROR( AWS_LS_HTTP_CONNECTION, "id=%p: Destroying write message without passing it along, error %d (%s)", (void *)&connection->base, aws_last_error(), aws_error_name(aws_last_error())); if (message->on_completion) { message->on_completion(connection->base.channel_slot->channel, message, aws_last_error(), message->user_data); } aws_mem_release(message->allocator, message); s_shutdown_due_to_error(connection, aws_last_error()); return AWS_OP_SUCCESS; } static int s_handler_increment_read_window( struct aws_channel_handler *handler, struct aws_channel_slot *slot, size_t size) { (void)slot; struct aws_h1_connection *connection = handler->impl; if (!connection->thread_data.has_switched_protocols) { AWS_LOGF_ERROR( AWS_LS_HTTP_CONNECTION, "id=%p: HTTP connection cannot have a downstream handler without first switching protocols", (void *)&connection->base); aws_raise_error(AWS_ERROR_INVALID_STATE); goto error; } AWS_LOGF_TRACE( AWS_LS_HTTP_CONNECTION, "id=%p: Handler in read direction incremented read window by %zu. Sending queued messages, if any.", (void *)&connection->base, size); /* Send along any queued messages, and increment connection's window if necessary */ aws_h1_connection_try_process_read_messages(connection); return AWS_OP_SUCCESS; error: s_shutdown_due_to_error(connection, aws_last_error()); return AWS_OP_SUCCESS; } static int s_handler_shutdown( struct aws_channel_handler *handler, struct aws_channel_slot *slot, enum aws_channel_direction dir, int error_code, bool free_scarce_resources_immediately) { (void)free_scarce_resources_immediately; struct aws_h1_connection *connection = handler->impl; AWS_LOGF_TRACE( AWS_LS_HTTP_CONNECTION, "id=%p: Channel shutting down in %s direction with error code %d (%s).", (void *)&connection->base, (dir == AWS_CHANNEL_DIR_READ) ? "read" : "write", error_code, aws_error_name(error_code)); if (dir == AWS_CHANNEL_DIR_READ) { /* This call ensures that no further streams will be created or worked on. */ s_stop(connection, true /*stop_reading*/, false /*stop_writing*/, false /*schedule_shutdown*/, error_code); } else /* dir == AWS_CHANNEL_DIR_WRITE */ { s_stop(connection, false /*stop_reading*/, true /*stop_writing*/, false /*schedule_shutdown*/, error_code); /* Mark all pending streams as complete. */ int stream_error_code = error_code == AWS_ERROR_SUCCESS ? AWS_ERROR_HTTP_CONNECTION_CLOSED : error_code; while (!aws_linked_list_empty(&connection->thread_data.stream_list)) { struct aws_linked_list_node *node = aws_linked_list_front(&connection->thread_data.stream_list); s_stream_complete(AWS_CONTAINER_OF(node, struct aws_h1_stream, node), stream_error_code); } /* It's OK to access synced_data.new_client_stream_list without holding the lock because * no more streams can be added after s_stop() has been invoked. */ while (!aws_linked_list_empty(&connection->synced_data.new_client_stream_list)) { struct aws_linked_list_node *node = aws_linked_list_front(&connection->synced_data.new_client_stream_list); s_stream_complete(AWS_CONTAINER_OF(node, struct aws_h1_stream, node), stream_error_code); } } aws_channel_slot_on_handler_shutdown_complete(slot, dir, error_code, free_scarce_resources_immediately); return AWS_OP_SUCCESS; } static size_t s_handler_initial_window_size(struct aws_channel_handler *handler) { struct aws_h1_connection *connection = handler->impl; return connection->thread_data.connection_window; } static size_t s_handler_message_overhead(struct aws_channel_handler *handler) { (void)handler; return 0; } static void s_reset_statistics(struct aws_channel_handler *handler) { struct aws_h1_connection *connection = handler->impl; aws_crt_statistics_http1_channel_reset(&connection->thread_data.stats); } static void s_pull_up_stats_timestamps(struct aws_h1_connection *connection) { uint64_t now_ns = 0; if (aws_channel_current_clock_time(connection->base.channel_slot->channel, &now_ns)) { return; } if (connection->thread_data.outgoing_stream) { s_add_time_measurement_to_stats( connection->thread_data.outgoing_stream_timestamp_ns, now_ns, &connection->thread_data.stats.pending_outgoing_stream_ms); connection->thread_data.outgoing_stream_timestamp_ns = now_ns; connection->thread_data.stats.current_outgoing_stream_id = aws_http_stream_get_id(&connection->thread_data.outgoing_stream->base); } if (connection->thread_data.incoming_stream) { s_add_time_measurement_to_stats( connection->thread_data.incoming_stream_timestamp_ns, now_ns, &connection->thread_data.stats.pending_incoming_stream_ms); connection->thread_data.incoming_stream_timestamp_ns = now_ns; connection->thread_data.stats.current_incoming_stream_id = aws_http_stream_get_id(&connection->thread_data.incoming_stream->base); } } static void s_gather_statistics(struct aws_channel_handler *handler, struct aws_array_list *stats) { struct aws_h1_connection *connection = handler->impl; /* TODO: Need update the way we calculate statistics, to account for user-controlled pauses. * If user is adding chunks 1 by 1, there can naturally be a gap in the upload. * If the user lets the stream-window go to zero, there can naturally be a gap in the download. */ s_pull_up_stats_timestamps(connection); void *stats_base = &connection->thread_data.stats; aws_array_list_push_back(stats, &stats_base); } struct aws_crt_statistics_http1_channel *aws_h1_connection_get_statistics(struct aws_http_connection *connection) { AWS_ASSERT(aws_channel_thread_is_callers_thread(connection->channel_slot->channel)); struct aws_h1_connection *h1_conn = (void *)connection; return &h1_conn->thread_data.stats; } struct aws_h1_window_stats aws_h1_connection_window_stats(struct aws_http_connection *connection_base) { struct aws_h1_connection *connection = AWS_CONTAINER_OF(connection_base, struct aws_h1_connection, base); struct aws_h1_window_stats stats = { .connection_window = connection->thread_data.connection_window, .buffer_capacity = connection->thread_data.read_buffer.capacity, .buffer_pending_bytes = connection->thread_data.read_buffer.pending_bytes, .recent_window_increments = connection->thread_data.recent_window_increments, .has_incoming_stream = connection->thread_data.incoming_stream != NULL, .stream_window = connection->thread_data.incoming_stream ? connection->thread_data.incoming_stream->thread_data.stream_window : 0, }; /* Resets each time it's queried */ connection->thread_data.recent_window_increments = 0; return stats; } aws-crt-python-0.20.4+dfsg/crt/aws-c-http/source/h1_decoder.c000066400000000000000000000715051456575232400236600ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include AWS_STATIC_STRING_FROM_LITERAL(s_transfer_coding_chunked, "chunked"); AWS_STATIC_STRING_FROM_LITERAL(s_transfer_coding_compress, "compress"); AWS_STATIC_STRING_FROM_LITERAL(s_transfer_coding_x_compress, "x-compress"); AWS_STATIC_STRING_FROM_LITERAL(s_transfer_coding_deflate, "deflate"); AWS_STATIC_STRING_FROM_LITERAL(s_transfer_coding_gzip, "gzip"); AWS_STATIC_STRING_FROM_LITERAL(s_transfer_coding_x_gzip, "x-gzip"); /* Decoder runs a state machine. * Each state consumes data until it sets the next state. * A common state is the "line state", which handles consuming one line ending in CRLF * and feeding the line to a linestate_fn, which should process data and set the next state. */ typedef int(state_fn)(struct aws_h1_decoder *decoder, struct aws_byte_cursor *input); typedef int(linestate_fn)(struct aws_h1_decoder *decoder, struct aws_byte_cursor input); struct aws_h1_decoder { /* Implementation data. */ struct aws_allocator *alloc; struct aws_byte_buf scratch_space; state_fn *run_state; linestate_fn *process_line; int transfer_encoding; uint64_t content_processed; uint64_t content_length; uint64_t chunk_processed; uint64_t chunk_size; bool doing_trailers; bool is_done; bool body_headers_ignored; bool body_headers_forbidden; enum aws_http_header_block header_block; const void *logging_id; /* User callbacks and settings. */ struct aws_h1_decoder_vtable vtable; bool is_decoding_requests; void *user_data; }; static int s_linestate_request(struct aws_h1_decoder *decoder, struct aws_byte_cursor input); static int s_linestate_response(struct aws_h1_decoder *decoder, struct aws_byte_cursor input); static int s_linestate_header(struct aws_h1_decoder *decoder, struct aws_byte_cursor input); static int s_linestate_chunk_size(struct aws_h1_decoder *decoder, struct aws_byte_cursor input); static bool s_scan_for_crlf(struct aws_h1_decoder *decoder, struct aws_byte_cursor input, size_t *bytes_processed) { AWS_ASSERT(input.len > 0); /* In a loop, scan for "\n", then look one char back for "\r" */ uint8_t *ptr = input.ptr; uint8_t *end = input.ptr + input.len; while (ptr != end) { uint8_t *newline = (uint8_t *)memchr(ptr, '\n', end - ptr); if (!newline) { break; } uint8_t prev_char; if (newline == input.ptr) { /* If "\n" is first character check scratch_space for previous character */ if (decoder->scratch_space.len > 0) { prev_char = decoder->scratch_space.buffer[decoder->scratch_space.len - 1]; } else { prev_char = 0; } } else { prev_char = *(newline - 1); } if (prev_char == '\r') { *bytes_processed = 1 + (newline - input.ptr); return true; } ptr = newline + 1; } *bytes_processed = input.len; return false; } /* This state consumes an entire line, then calls a linestate_fn to process the line. */ static int s_state_getline(struct aws_h1_decoder *decoder, struct aws_byte_cursor *input) { /* If preceding runs of this state failed to find CRLF, their data is stored in the scratch_space * and new data needs to be combined with the old data for processing. */ bool has_prev_data = decoder->scratch_space.len; size_t line_length = 0; bool found_crlf = s_scan_for_crlf(decoder, *input, &line_length); /* Found end of line! Run the line processor on it */ struct aws_byte_cursor line = aws_byte_cursor_advance(input, line_length); bool use_scratch = !found_crlf | has_prev_data; if (AWS_UNLIKELY(use_scratch)) { if (aws_byte_buf_append_dynamic(&decoder->scratch_space, &line)) { AWS_LOGF_ERROR( AWS_LS_HTTP_STREAM, "id=%p: Internal buffer write failed with error code %d (%s)", decoder->logging_id, aws_last_error(), aws_error_name(aws_last_error())); return AWS_OP_ERR; } /* Line is actually the entire scratch buffer now */ line = aws_byte_cursor_from_buf(&decoder->scratch_space); } if (AWS_LIKELY(found_crlf)) { /* Backup so "\r\n" is not included. */ /* RFC-7230 section 3 Message Format */ AWS_ASSERT(line.len >= 2); line.len -= 2; return decoder->process_line(decoder, line); } /* Didn't find crlf, we'll continue scanning when more data comes in */ return AWS_OP_SUCCESS; } static int s_cursor_split_impl( struct aws_byte_cursor input, char split_on, struct aws_byte_cursor *cursor_array, size_t num_cursors, bool error_if_more_splits_possible) { struct aws_byte_cursor split; AWS_ZERO_STRUCT(split); for (size_t i = 0; i < num_cursors; ++i) { if (!aws_byte_cursor_next_split(&input, split_on, &split)) { return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR); } cursor_array[i] = split; } if (error_if_more_splits_possible) { if (aws_byte_cursor_next_split(&input, split_on, &split)) { return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR); } } else { /* Otherwise, the last cursor will contain the remainder of the string */ struct aws_byte_cursor *last_cursor = &cursor_array[num_cursors - 1]; last_cursor->len = (input.ptr + input.len) - last_cursor->ptr; } return AWS_OP_SUCCESS; } /* Final cursor contains remainder of input. */ static int s_cursor_split_first_n_times( struct aws_byte_cursor input, char split_on, struct aws_byte_cursor *cursor_array, size_t num_cursors) { return s_cursor_split_impl(input, split_on, cursor_array, num_cursors, false); } /* Error if input could have been split more times */ static int s_cursor_split_exactly_n_times( struct aws_byte_cursor input, char split_on, struct aws_byte_cursor *cursor_array, size_t num_cursors) { return s_cursor_split_impl(input, split_on, cursor_array, num_cursors, true); } static void s_set_state(struct aws_h1_decoder *decoder, state_fn *state) { decoder->scratch_space.len = 0; decoder->run_state = state; decoder->process_line = NULL; } /* Set next state to capture a full line, then call the specified linestate_fn on it */ static void s_set_line_state(struct aws_h1_decoder *decoder, linestate_fn *line_processor) { s_set_state(decoder, s_state_getline); decoder->process_line = line_processor; } static int s_mark_done(struct aws_h1_decoder *decoder) { decoder->is_done = true; return decoder->vtable.on_done(decoder->user_data); } /* Reset state, in preparation for processing a new message */ static void s_reset_state(struct aws_h1_decoder *decoder) { if (decoder->is_decoding_requests) { s_set_line_state(decoder, s_linestate_request); } else { s_set_line_state(decoder, s_linestate_response); } decoder->transfer_encoding = 0; decoder->content_processed = 0; decoder->content_length = 0; decoder->chunk_processed = 0; decoder->chunk_size = 0; decoder->doing_trailers = false; decoder->is_done = false; decoder->body_headers_ignored = false; decoder->body_headers_forbidden = false; /* set to normal by default */ decoder->header_block = AWS_HTTP_HEADER_BLOCK_MAIN; } static int s_state_unchunked_body(struct aws_h1_decoder *decoder, struct aws_byte_cursor *input) { size_t processed_bytes = 0; AWS_FATAL_ASSERT(decoder->content_processed < decoder->content_length); /* shouldn't be possible */ if (input->len > (decoder->content_length - decoder->content_processed)) { processed_bytes = (size_t)(decoder->content_length - decoder->content_processed); } else { processed_bytes = input->len; } decoder->content_processed += processed_bytes; bool finished = decoder->content_processed == decoder->content_length; struct aws_byte_cursor body = aws_byte_cursor_advance(input, processed_bytes); int err = decoder->vtable.on_body(&body, finished, decoder->user_data); if (err) { return AWS_OP_ERR; } if (AWS_LIKELY(finished)) { err = s_mark_done(decoder); if (err) { return AWS_OP_ERR; } } return AWS_OP_SUCCESS; } static int s_linestate_chunk_terminator(struct aws_h1_decoder *decoder, struct aws_byte_cursor input) { /* Expecting CRLF at end of each chunk */ /* RFC-7230 section 4.1 Chunked Transfer Encoding */ if (AWS_UNLIKELY(input.len != 0)) { AWS_LOGF_ERROR( AWS_LS_HTTP_STREAM, "id=%p: Incoming chunk is invalid, does not end with CRLF.", decoder->logging_id); return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR); } s_set_line_state(decoder, s_linestate_chunk_size); return AWS_OP_SUCCESS; } static int s_state_chunk(struct aws_h1_decoder *decoder, struct aws_byte_cursor *input) { size_t processed_bytes = 0; AWS_ASSERT(decoder->chunk_processed < decoder->chunk_size); if (input->len > (decoder->chunk_size - decoder->chunk_processed)) { processed_bytes = (size_t)(decoder->chunk_size - decoder->chunk_processed); } else { processed_bytes = input->len; } decoder->chunk_processed += processed_bytes; bool finished = decoder->chunk_processed == decoder->chunk_size; struct aws_byte_cursor body = aws_byte_cursor_advance(input, processed_bytes); int err = decoder->vtable.on_body(&body, false, decoder->user_data); if (err) { return AWS_OP_ERR; } if (AWS_LIKELY(finished)) { s_set_line_state(decoder, s_linestate_chunk_terminator); } return AWS_OP_SUCCESS; } static int s_linestate_chunk_size(struct aws_h1_decoder *decoder, struct aws_byte_cursor input) { struct aws_byte_cursor size; AWS_ZERO_STRUCT(size); if (!aws_byte_cursor_next_split(&input, ';', &size)) { AWS_LOGF_ERROR( AWS_LS_HTTP_STREAM, "id=%p: Incoming chunk is invalid, first line is malformed.", decoder->logging_id); AWS_LOGF_DEBUG( AWS_LS_HTTP_STREAM, "id=%p: Bad chunk line is: '" PRInSTR "'", decoder->logging_id, AWS_BYTE_CURSOR_PRI(input)); return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR); } int err = aws_byte_cursor_utf8_parse_u64_hex(size, &decoder->chunk_size); if (err) { AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=%p: Failed to parse size of incoming chunk.", decoder->logging_id); AWS_LOGF_DEBUG( AWS_LS_HTTP_STREAM, "id=%p: Bad chunk size is: '" PRInSTR "'", decoder->logging_id, AWS_BYTE_CURSOR_PRI(size)); return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR); } decoder->chunk_processed = 0; /* Empty chunk signifies all chunks have been read. */ if (AWS_UNLIKELY(decoder->chunk_size == 0)) { struct aws_byte_cursor cursor; cursor.ptr = NULL; cursor.len = 0; err = decoder->vtable.on_body(&cursor, true, decoder->user_data); if (err) { return AWS_OP_ERR; } /* Expected empty newline and end of message. */ decoder->doing_trailers = true; s_set_line_state(decoder, s_linestate_header); return AWS_OP_SUCCESS; } /* Skip all chunk extensions, as they are optional. */ /* RFC-7230 section 4.1.1 Chunk Extensions */ s_set_state(decoder, s_state_chunk); return AWS_OP_SUCCESS; } static int s_linestate_header(struct aws_h1_decoder *decoder, struct aws_byte_cursor input) { int err; /* The \r\n was just processed by `s_state_getline`. */ /* Empty line signifies end of headers, and beginning of body or end of trailers. */ /* RFC-7230 section 3 Message Format */ if (input.len == 0) { if (AWS_LIKELY(!decoder->doing_trailers)) { if (decoder->body_headers_ignored) { err = s_mark_done(decoder); if (err) { return AWS_OP_ERR; } } else if (decoder->transfer_encoding & AWS_HTTP_TRANSFER_ENCODING_CHUNKED) { s_set_line_state(decoder, s_linestate_chunk_size); } else if (decoder->content_length > 0) { s_set_state(decoder, s_state_unchunked_body); } else { err = s_mark_done(decoder); if (err) { return AWS_OP_ERR; } } } else { /* Empty line means end of message. */ err = s_mark_done(decoder); if (err) { return AWS_OP_ERR; } } return AWS_OP_SUCCESS; } /* Each header field consists of a case-insensitive field name followed by a colon (":"), * optional leading whitespace, the field value, and optional trailing whitespace. * RFC-7230 3.2 */ struct aws_byte_cursor splits[2]; err = s_cursor_split_first_n_times(input, ':', splits, 2); /* value may contain more colons */ if (err) { AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=%p: Invalid incoming header, missing colon.", decoder->logging_id); AWS_LOGF_DEBUG( AWS_LS_HTTP_STREAM, "id=%p: Bad header is: '" PRInSTR "'", decoder->logging_id, AWS_BYTE_CURSOR_PRI(input)); return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR); } struct aws_byte_cursor name = splits[0]; if (!aws_strutil_is_http_token(name)) { AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=%p: Invalid incoming header, bad name.", decoder->logging_id); AWS_LOGF_DEBUG( AWS_LS_HTTP_STREAM, "id=%p: Bad header is: '" PRInSTR "'", decoder->logging_id, AWS_BYTE_CURSOR_PRI(input)); return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR); } struct aws_byte_cursor value = aws_strutil_trim_http_whitespace(splits[1]); if (!aws_strutil_is_http_field_value(value)) { AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=%p: Invalid incoming header, bad value.", decoder->logging_id); AWS_LOGF_DEBUG( AWS_LS_HTTP_STREAM, "id=%p: Bad header is: '" PRInSTR "'", decoder->logging_id, AWS_BYTE_CURSOR_PRI(input)); return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR); } struct aws_h1_decoded_header header; header.name = aws_http_str_to_header_name(name); header.name_data = name; header.value_data = value; header.data = input; switch (header.name) { case AWS_HTTP_HEADER_CONTENT_LENGTH: if (decoder->transfer_encoding) { AWS_LOGF_ERROR( AWS_LS_HTTP_STREAM, "id=%p: Incoming headers for both content-length and transfer-encoding received. This is illegal.", decoder->logging_id); return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR); } if (aws_byte_cursor_utf8_parse_u64(header.value_data, &decoder->content_length)) { AWS_LOGF_ERROR( AWS_LS_HTTP_STREAM, "id=%p: Incoming content-length header has invalid value.", decoder->logging_id); AWS_LOGF_DEBUG( AWS_LS_HTTP_STREAM, "id=%p: Bad content-length value is: '" PRInSTR "'", decoder->logging_id, AWS_BYTE_CURSOR_PRI(header.value_data)); return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR); } if (decoder->body_headers_forbidden && decoder->content_length != 0) { AWS_LOGF_ERROR( AWS_LS_HTTP_STREAM, "id=%p: Incoming headers for content-length received, but it is illegal for this message to have a " "body", decoder->logging_id); return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR); } break; case AWS_HTTP_HEADER_TRANSFER_ENCODING: { if (decoder->content_length) { AWS_LOGF_ERROR( AWS_LS_HTTP_STREAM, "id=%p: Incoming headers for both content-length and transfer-encoding received. This is illegal.", decoder->logging_id); return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR); } if (decoder->body_headers_forbidden) { AWS_LOGF_ERROR( AWS_LS_HTTP_STREAM, "id=%p: Incoming headers for transfer-encoding received, but it is illegal for this message to " "have a body", decoder->logging_id); return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR); } /* RFC-7230 section 3.3.1 Transfer-Encoding */ /* RFC-7230 section 4.2 Compression Codings */ /* Note that it's possible for multiple Transfer-Encoding headers to exist, in which case the values * should be appended with those from any previously encountered Transfer-Encoding headers. */ struct aws_byte_cursor split; AWS_ZERO_STRUCT(split); while (aws_byte_cursor_next_split(&header.value_data, ',', &split)) { struct aws_byte_cursor coding = aws_strutil_trim_http_whitespace(split); int prev_flags = decoder->transfer_encoding; if (aws_string_eq_byte_cursor_ignore_case(s_transfer_coding_chunked, &coding)) { decoder->transfer_encoding |= AWS_HTTP_TRANSFER_ENCODING_CHUNKED; } else if ( aws_string_eq_byte_cursor_ignore_case(s_transfer_coding_compress, &coding) || aws_string_eq_byte_cursor_ignore_case(s_transfer_coding_x_compress, &coding)) { /* A recipient SHOULD consider "x-compress" to be equivalent to "compress". RFC-7230 4.2.1 */ decoder->transfer_encoding |= AWS_HTTP_TRANSFER_ENCODING_DEPRECATED_COMPRESS; } else if (aws_string_eq_byte_cursor_ignore_case(s_transfer_coding_deflate, &coding)) { decoder->transfer_encoding |= AWS_HTTP_TRANSFER_ENCODING_DEFLATE; } else if ( aws_string_eq_byte_cursor_ignore_case(s_transfer_coding_gzip, &coding) || aws_string_eq_byte_cursor_ignore_case(s_transfer_coding_x_gzip, &coding)) { /* A recipient SHOULD consider "x-gzip" to be equivalent to "gzip". RFC-7230 4.2.3 */ decoder->transfer_encoding |= AWS_HTTP_TRANSFER_ENCODING_GZIP; } else if (coding.len > 0) { AWS_LOGF_ERROR( AWS_LS_HTTP_STREAM, "id=%p: Incoming transfer-encoding header lists unrecognized coding.", decoder->logging_id); AWS_LOGF_DEBUG( AWS_LS_HTTP_STREAM, "id=%p: Unrecognized coding is: '" PRInSTR "'", decoder->logging_id, AWS_BYTE_CURSOR_PRI(coding)); return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR); } /* If any transfer coding other than chunked is applied to a request payload body, the sender MUST * apply chunked as the final transfer coding to ensure that the message is properly framed. * RFC-7230 3.3.1 */ if ((prev_flags & AWS_HTTP_TRANSFER_ENCODING_CHUNKED) && (decoder->transfer_encoding != prev_flags)) { AWS_LOGF_ERROR( AWS_LS_HTTP_STREAM, "id=%p: Incoming transfer-encoding header lists a coding after 'chunked', this is illegal.", decoder->logging_id); AWS_LOGF_DEBUG( AWS_LS_HTTP_STREAM, "id=%p: Misplaced coding is '" PRInSTR "'", decoder->logging_id, AWS_BYTE_CURSOR_PRI(coding)); return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR); } } /* TODO: deal with body of indeterminate length, marking it as successful when connection is closed: * * A response that has neither chunked transfer coding nor Content-Length is terminated by closure of * the connection and, thus, is considered complete regardless of the number of message body octets * received, provided that the header section was received intact. * RFC-7230 3.4 */ } break; default: break; } err = decoder->vtable.on_header(&header, decoder->user_data); if (err) { return AWS_OP_ERR; } s_set_line_state(decoder, s_linestate_header); return AWS_OP_SUCCESS; } static int s_linestate_request(struct aws_h1_decoder *decoder, struct aws_byte_cursor input) { struct aws_byte_cursor cursors[3]; int err = s_cursor_split_exactly_n_times(input, ' ', cursors, 3); /* extra spaces not allowed */ if (err) { AWS_LOGF_ERROR( AWS_LS_HTTP_STREAM, "id=%p: Incoming request line has wrong number of spaces.", decoder->logging_id); AWS_LOGF_DEBUG( AWS_LS_HTTP_STREAM, "id=%p: Bad request line is: '" PRInSTR "'", decoder->logging_id, AWS_BYTE_CURSOR_PRI(input)); return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR); } for (size_t i = 0; i < AWS_ARRAY_SIZE(cursors); ++i) { if (cursors[i].len == 0) { AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=%p: Incoming request line has empty values.", decoder->logging_id); AWS_LOGF_DEBUG( AWS_LS_HTTP_STREAM, "id=%p: Bad request line is: '" PRInSTR "'", decoder->logging_id, AWS_BYTE_CURSOR_PRI(input)); return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR); } } struct aws_byte_cursor method = cursors[0]; struct aws_byte_cursor uri = cursors[1]; struct aws_byte_cursor version = cursors[2]; if (!aws_strutil_is_http_token(method)) { AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=%p: Incoming request has invalid method.", decoder->logging_id); AWS_LOGF_DEBUG( AWS_LS_HTTP_STREAM, "id=%p: Bad request line is: '" PRInSTR "'", decoder->logging_id, AWS_BYTE_CURSOR_PRI(input)); return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR); } if (!aws_strutil_is_http_request_target(uri)) { AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=%p: Incoming request has invalid path.", decoder->logging_id); AWS_LOGF_DEBUG( AWS_LS_HTTP_STREAM, "id=%p: Bad request line is: '" PRInSTR "'", decoder->logging_id, AWS_BYTE_CURSOR_PRI(input)); return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR); } struct aws_byte_cursor version_expected = aws_http_version_to_str(AWS_HTTP_VERSION_1_1); if (!aws_byte_cursor_eq(&version, &version_expected)) { AWS_LOGF_ERROR( AWS_LS_HTTP_STREAM, "id=%p: Incoming request uses unsupported HTTP version.", decoder->logging_id); AWS_LOGF_DEBUG( AWS_LS_HTTP_STREAM, "id=%p: Unsupported version is: '" PRInSTR "'", decoder->logging_id, AWS_BYTE_CURSOR_PRI(version)); return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR); } err = decoder->vtable.on_request(aws_http_str_to_method(method), &method, &uri, decoder->user_data); if (err) { return AWS_OP_ERR; } s_set_line_state(decoder, s_linestate_header); return AWS_OP_SUCCESS; } static bool s_check_info_response_status_code(int code_val) { return code_val >= 100 && code_val < 200; } static int s_linestate_response(struct aws_h1_decoder *decoder, struct aws_byte_cursor input) { struct aws_byte_cursor cursors[3]; int err = s_cursor_split_first_n_times(input, ' ', cursors, 3); /* phrase may contain spaces */ if (err) { AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=%p: Incoming response status line is invalid.", decoder->logging_id); AWS_LOGF_DEBUG( AWS_LS_HTTP_STREAM, "id=%p: Bad status line is: '" PRInSTR "'", decoder->logging_id, AWS_BYTE_CURSOR_PRI(input)); return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR); } struct aws_byte_cursor version = cursors[0]; struct aws_byte_cursor code = cursors[1]; struct aws_byte_cursor phrase = cursors[2]; struct aws_byte_cursor version_1_1_expected = aws_http_version_to_str(AWS_HTTP_VERSION_1_1); struct aws_byte_cursor version_1_0_expected = aws_http_version_to_str(AWS_HTTP_VERSION_1_0); if (!aws_byte_cursor_eq(&version, &version_1_1_expected) && !aws_byte_cursor_eq(&version, &version_1_0_expected)) { AWS_LOGF_ERROR( AWS_LS_HTTP_STREAM, "id=%p: Incoming response uses unsupported HTTP version.", decoder->logging_id); AWS_LOGF_DEBUG( AWS_LS_HTTP_STREAM, "id=%p: Unsupported version is: '" PRInSTR "'", decoder->logging_id, AWS_BYTE_CURSOR_PRI(version)); return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR); } /* Validate phrase */ if (!aws_strutil_is_http_reason_phrase(phrase)) { AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=%p: Incoming response has invalid reason phrase.", decoder->logging_id); return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR); } /* Status-code is a 3-digit integer. RFC7230 section 3.1.2 */ uint64_t code_val_u64; err = aws_byte_cursor_utf8_parse_u64(code, &code_val_u64); if (err || code.len != 3 || code_val_u64 > 999) { AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=%p: Incoming response has invalid status code.", decoder->logging_id); AWS_LOGF_DEBUG( AWS_LS_HTTP_STREAM, "id=%p: Bad status code is: '" PRInSTR "'", decoder->logging_id, AWS_BYTE_CURSOR_PRI(code)); return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR); } int code_val = (int)code_val_u64; /* RFC-7230 section 3.3 Message Body */ decoder->body_headers_ignored |= code_val == AWS_HTTP_STATUS_CODE_304_NOT_MODIFIED; decoder->body_headers_forbidden = code_val == AWS_HTTP_STATUS_CODE_204_NO_CONTENT || code_val / 100 == 1; if (s_check_info_response_status_code(code_val)) { decoder->header_block = AWS_HTTP_HEADER_BLOCK_INFORMATIONAL; } err = decoder->vtable.on_response(code_val, decoder->user_data); if (err) { return AWS_OP_ERR; } s_set_line_state(decoder, s_linestate_header); return AWS_OP_SUCCESS; } struct aws_h1_decoder *aws_h1_decoder_new(struct aws_h1_decoder_params *params) { AWS_ASSERT(params); struct aws_h1_decoder *decoder = aws_mem_acquire(params->alloc, sizeof(struct aws_h1_decoder)); if (!decoder) { return NULL; } AWS_ZERO_STRUCT(*decoder); decoder->alloc = params->alloc; decoder->user_data = params->user_data; decoder->vtable = params->vtable; decoder->is_decoding_requests = params->is_decoding_requests; aws_byte_buf_init(&decoder->scratch_space, params->alloc, params->scratch_space_initial_size); s_reset_state(decoder); return decoder; } void aws_h1_decoder_destroy(struct aws_h1_decoder *decoder) { if (!decoder) { return; } aws_byte_buf_clean_up(&decoder->scratch_space); aws_mem_release(decoder->alloc, decoder); } int aws_h1_decode(struct aws_h1_decoder *decoder, struct aws_byte_cursor *data) { AWS_ASSERT(decoder); AWS_ASSERT(data); struct aws_byte_cursor backup = *data; while (data->len && !decoder->is_done) { int err = decoder->run_state(decoder, data); if (err) { /* Reset the data param to how we found it */ *data = backup; return AWS_OP_ERR; } } if (decoder->is_done) { s_reset_state(decoder); } return AWS_OP_SUCCESS; } int aws_h1_decoder_get_encoding_flags(const struct aws_h1_decoder *decoder) { return decoder->transfer_encoding; } uint64_t aws_h1_decoder_get_content_length(const struct aws_h1_decoder *decoder) { return decoder->content_length; } bool aws_h1_decoder_get_body_headers_ignored(const struct aws_h1_decoder *decoder) { return decoder->body_headers_ignored; } enum aws_http_header_block aws_h1_decoder_get_header_block(const struct aws_h1_decoder *decoder) { return decoder->header_block; } void aws_h1_decoder_set_logging_id(struct aws_h1_decoder *decoder, const void *id) { decoder->logging_id = id; } void aws_h1_decoder_set_body_headers_ignored(struct aws_h1_decoder *decoder, bool body_headers_ignored) { decoder->body_headers_ignored = body_headers_ignored; } aws-crt-python-0.20.4+dfsg/crt/aws-c-http/source/h1_encoder.c000066400000000000000000001100721456575232400236630ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #define ENCODER_LOGF(level, encoder, text, ...) \ AWS_LOGF_##level(AWS_LS_HTTP_STREAM, "id=%p: " text, (void *)encoder->current_stream, __VA_ARGS__) #define ENCODER_LOG(level, encoder, text) ENCODER_LOGF(level, encoder, "%s", text) #define MAX_ASCII_HEX_CHUNK_STR_SIZE (sizeof(uint64_t) * 2 + 1) #define CRLF_SIZE 2 /** * Scan headers to detect errors and determine anything we'll need to know later (ex: total length). */ static int s_scan_outgoing_headers( struct aws_h1_encoder_message *encoder_message, const struct aws_http_message *message, size_t *out_header_lines_len, bool body_headers_ignored, bool body_headers_forbidden) { size_t total = 0; bool has_body_stream = aws_http_message_get_body_stream(message); bool has_content_length_header = false; bool has_transfer_encoding_header = false; const size_t num_headers = aws_http_message_get_header_count(message); for (size_t i = 0; i < num_headers; ++i) { struct aws_http_header header; aws_http_message_get_header(message, &header, i); /* Validate header field-name (RFC-7230 3.2): field-name = token */ if (!aws_strutil_is_http_token(header.name)) { AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=static: Header name is invalid"); return aws_raise_error(AWS_ERROR_HTTP_INVALID_HEADER_NAME); } /* Validate header field-value. * The value itself isn't supposed to have whitespace on either side, * but we'll trim it off before validation so we don't start needlessly * failing requests that used to work before we added validation. * This should be OK because field-value can be sent with any amount * of whitespace around it, which the other side will just ignore (RFC-7230 3.2): * header-field = field-name ":" OWS field-value OWS */ struct aws_byte_cursor field_value = aws_strutil_trim_http_whitespace(header.value); if (!aws_strutil_is_http_field_value(field_value)) { AWS_LOGF_ERROR( AWS_LS_HTTP_STREAM, "id=static: Header '" PRInSTR "' has invalid value", AWS_BYTE_CURSOR_PRI(header.name)); return aws_raise_error(AWS_ERROR_HTTP_INVALID_HEADER_VALUE); } enum aws_http_header_name name_enum = aws_http_str_to_header_name(header.name); switch (name_enum) { case AWS_HTTP_HEADER_CONNECTION: { if (aws_byte_cursor_eq_c_str(&field_value, "close")) { encoder_message->has_connection_close_header = true; } } break; case AWS_HTTP_HEADER_CONTENT_LENGTH: { has_content_length_header = true; if (aws_byte_cursor_utf8_parse_u64(field_value, &encoder_message->content_length)) { AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=static: Invalid Content-Length"); return aws_raise_error(AWS_ERROR_HTTP_INVALID_HEADER_VALUE); } } break; case AWS_HTTP_HEADER_TRANSFER_ENCODING: { has_transfer_encoding_header = true; if (0 == field_value.len) { AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=static: Transfer-Encoding must include a valid value"); return aws_raise_error(AWS_ERROR_HTTP_INVALID_HEADER_VALUE); } struct aws_byte_cursor substr; AWS_ZERO_STRUCT(substr); while (aws_byte_cursor_next_split(&field_value, ',', &substr)) { struct aws_byte_cursor trimmed = aws_strutil_trim_http_whitespace(substr); if (0 == trimmed.len) { AWS_LOGF_ERROR( AWS_LS_HTTP_STREAM, "id=static: Transfer-Encoding header whitespace only " "comma delimited header value"); return aws_raise_error(AWS_ERROR_HTTP_INVALID_HEADER_VALUE); } if (encoder_message->has_chunked_encoding_header) { AWS_LOGF_ERROR( AWS_LS_HTTP_STREAM, "id=static: Transfer-Encoding header must end with \"chunked\""); return aws_raise_error(AWS_ERROR_HTTP_INVALID_HEADER_VALUE); } if (aws_byte_cursor_eq_c_str(&trimmed, "chunked")) { encoder_message->has_chunked_encoding_header = true; } } } break; default: break; } /* header-line: "{name}: {value}\r\n" */ int err = 0; err |= aws_add_size_checked(header.name.len, total, &total); err |= aws_add_size_checked(header.value.len, total, &total); err |= aws_add_size_checked(4, total, &total); /* ": " + "\r\n" */ if (err) { return AWS_OP_ERR; } } if (!encoder_message->has_chunked_encoding_header && has_transfer_encoding_header) { AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=static: Transfer-Encoding header must include \"chunked\""); return aws_raise_error(AWS_ERROR_HTTP_INVALID_HEADER_VALUE); } /* Per RFC 7230: A sender MUST NOT send a Content-Length header field in any message that contains a * Transfer-Encoding header field. */ if (encoder_message->has_chunked_encoding_header && has_content_length_header) { AWS_LOGF_ERROR( AWS_LS_HTTP_STREAM, "id=static: Both Content-Length and Transfer-Encoding are set. Only one may be used"); return aws_raise_error(AWS_ERROR_HTTP_INVALID_HEADER_VALUE); } if (encoder_message->has_chunked_encoding_header && has_body_stream) { AWS_LOGF_ERROR( AWS_LS_HTTP_STREAM, "id=static: Both Transfer-Encoding chunked header and body stream is set. " "chunked data must use the chunk API to write the body stream."); return aws_raise_error(AWS_ERROR_HTTP_INVALID_BODY_STREAM); } if (body_headers_forbidden && (encoder_message->content_length > 0 || has_transfer_encoding_header)) { AWS_LOGF_ERROR( AWS_LS_HTTP_STREAM, "id=static: Transfer-Encoding or Content-Length headers may not be present in such a message"); return aws_raise_error(AWS_ERROR_HTTP_INVALID_HEADER_FIELD); } if (body_headers_ignored) { /* Don't send body, no matter what the headers are */ encoder_message->content_length = 0; encoder_message->has_chunked_encoding_header = false; } if (encoder_message->content_length > 0 && !has_body_stream) { return aws_raise_error(AWS_ERROR_HTTP_MISSING_BODY_STREAM); } *out_header_lines_len = total; return AWS_OP_SUCCESS; } static int s_scan_outgoing_trailer(const struct aws_http_headers *headers, size_t *out_size) { const size_t num_headers = aws_http_headers_count(headers); size_t total = 0; for (size_t i = 0; i < num_headers; i++) { struct aws_http_header header; aws_http_headers_get_index(headers, i, &header); /* Validate header field-name (RFC-7230 3.2): field-name = token */ if (!aws_strutil_is_http_token(header.name)) { AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=static: Header name is invalid"); return aws_raise_error(AWS_ERROR_HTTP_INVALID_HEADER_NAME); } /* Validate header field-value. * The value itself isn't supposed to have whitespace on either side, * but we'll trim it off before validation so we don't start needlessly * failing requests that used to work before we added validation. * This should be OK because field-value can be sent with any amount * of whitespace around it, which the other side will just ignore (RFC-7230 3.2): * header-field = field-name ":" OWS field-value OWS */ struct aws_byte_cursor field_value = aws_strutil_trim_http_whitespace(header.value); if (!aws_strutil_is_http_field_value(field_value)) { AWS_LOGF_ERROR( AWS_LS_HTTP_STREAM, "id=static: Header '" PRInSTR "' has invalid value", AWS_BYTE_CURSOR_PRI(header.name)); return aws_raise_error(AWS_ERROR_HTTP_INVALID_HEADER_VALUE); } enum aws_http_header_name name_enum = aws_http_str_to_header_name(header.name); if (name_enum == AWS_HTTP_HEADER_TRANSFER_ENCODING || name_enum == AWS_HTTP_HEADER_CONTENT_LENGTH || name_enum == AWS_HTTP_HEADER_HOST || name_enum == AWS_HTTP_HEADER_EXPECT || name_enum == AWS_HTTP_HEADER_CACHE_CONTROL || name_enum == AWS_HTTP_HEADER_MAX_FORWARDS || name_enum == AWS_HTTP_HEADER_PRAGMA || name_enum == AWS_HTTP_HEADER_RANGE || name_enum == AWS_HTTP_HEADER_TE || name_enum == AWS_HTTP_HEADER_CONTENT_ENCODING || name_enum == AWS_HTTP_HEADER_CONTENT_TYPE || name_enum == AWS_HTTP_HEADER_CONTENT_RANGE || name_enum == AWS_HTTP_HEADER_TRAILER || name_enum == AWS_HTTP_HEADER_WWW_AUTHENTICATE || name_enum == AWS_HTTP_HEADER_AUTHORIZATION || name_enum == AWS_HTTP_HEADER_PROXY_AUTHENTICATE || name_enum == AWS_HTTP_HEADER_PROXY_AUTHORIZATION || name_enum == AWS_HTTP_HEADER_SET_COOKIE || name_enum == AWS_HTTP_HEADER_COOKIE || name_enum == AWS_HTTP_HEADER_AGE || name_enum == AWS_HTTP_HEADER_EXPIRES || name_enum == AWS_HTTP_HEADER_DATE || name_enum == AWS_HTTP_HEADER_LOCATION || name_enum == AWS_HTTP_HEADER_RETRY_AFTER || name_enum == AWS_HTTP_HEADER_VARY || name_enum == AWS_HTTP_HEADER_WARNING) { AWS_LOGF_ERROR( AWS_LS_HTTP_STREAM, "id=static: Trailing Header '" PRInSTR "' has invalid value", AWS_BYTE_CURSOR_PRI(header.name)); return aws_raise_error(AWS_ERROR_HTTP_INVALID_HEADER_FIELD); } int err = 0; err |= aws_add_size_checked(header.name.len, total, &total); err |= aws_add_size_checked(header.value.len, total, &total); err |= aws_add_size_checked(4, total, &total); /* ": " + "\r\n" */ if (err) { return AWS_OP_ERR; } } if (aws_add_size_checked(2, total, &total)) { /* "\r\n" */ return AWS_OP_ERR; } *out_size = total; return AWS_OP_SUCCESS; } static bool s_write_crlf(struct aws_byte_buf *dst) { AWS_PRECONDITION(aws_byte_buf_is_valid(dst)); struct aws_byte_cursor crlf_cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\r\n"); return aws_byte_buf_write_from_whole_cursor(dst, crlf_cursor); } static void s_write_headers(struct aws_byte_buf *dst, const struct aws_http_headers *headers) { const size_t num_headers = aws_http_headers_count(headers); bool wrote_all = true; for (size_t i = 0; i < num_headers; ++i) { struct aws_http_header header; aws_http_headers_get_index(headers, i, &header); /* header-line: "{name}: {value}\r\n" */ wrote_all &= aws_byte_buf_write_from_whole_cursor(dst, header.name); wrote_all &= aws_byte_buf_write_u8(dst, ':'); wrote_all &= aws_byte_buf_write_u8(dst, ' '); wrote_all &= aws_byte_buf_write_from_whole_cursor(dst, header.value); wrote_all &= s_write_crlf(dst); } AWS_ASSERT(wrote_all); (void)wrote_all; } int aws_h1_encoder_message_init_from_request( struct aws_h1_encoder_message *message, struct aws_allocator *allocator, const struct aws_http_message *request, struct aws_linked_list *pending_chunk_list) { AWS_PRECONDITION(aws_linked_list_is_valid(pending_chunk_list)); AWS_ZERO_STRUCT(*message); message->body = aws_input_stream_acquire(aws_http_message_get_body_stream(request)); message->pending_chunk_list = pending_chunk_list; struct aws_byte_cursor method; int err = aws_http_message_get_request_method(request, &method); if (err) { AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=static: Request method not set"); aws_raise_error(AWS_ERROR_HTTP_INVALID_METHOD); goto error; } /* RFC-7230 3.1.1: method = token */ if (!aws_strutil_is_http_token(method)) { AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=static: Request method is invalid"); aws_raise_error(AWS_ERROR_HTTP_INVALID_METHOD); goto error; } struct aws_byte_cursor uri; err = aws_http_message_get_request_path(request, &uri); if (err) { AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=static: Request path not set"); aws_raise_error(AWS_ERROR_HTTP_INVALID_PATH); goto error; } if (!aws_strutil_is_http_request_target(uri)) { AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=static: Request path is invalid"); aws_raise_error(AWS_ERROR_HTTP_INVALID_PATH); goto error; } struct aws_byte_cursor version = aws_http_version_to_str(AWS_HTTP_VERSION_1_1); /** * Calculate total size needed for outgoing_head_buffer, then write to buffer. */ size_t header_lines_len; err = s_scan_outgoing_headers( message, request, &header_lines_len, false /*body_headers_ignored*/, false /*body_headers_forbidden*/); if (err) { goto error; } /* request-line: "{method} {uri} {version}\r\n" */ size_t request_line_len = 4; /* 2 spaces + "\r\n" */ err |= aws_add_size_checked(method.len, request_line_len, &request_line_len); err |= aws_add_size_checked(uri.len, request_line_len, &request_line_len); err |= aws_add_size_checked(version.len, request_line_len, &request_line_len); /* head-end: "\r\n" */ size_t head_end_len = 2; size_t head_total_len = request_line_len; err |= aws_add_size_checked(header_lines_len, head_total_len, &head_total_len); err |= aws_add_size_checked(head_end_len, head_total_len, &head_total_len); if (err) { goto error; } err = aws_byte_buf_init(&message->outgoing_head_buf, allocator, head_total_len); if (err) { goto error; } bool wrote_all = true; wrote_all &= aws_byte_buf_write_from_whole_cursor(&message->outgoing_head_buf, method); wrote_all &= aws_byte_buf_write_u8(&message->outgoing_head_buf, ' '); wrote_all &= aws_byte_buf_write_from_whole_cursor(&message->outgoing_head_buf, uri); wrote_all &= aws_byte_buf_write_u8(&message->outgoing_head_buf, ' '); wrote_all &= aws_byte_buf_write_from_whole_cursor(&message->outgoing_head_buf, version); wrote_all &= s_write_crlf(&message->outgoing_head_buf); s_write_headers(&message->outgoing_head_buf, aws_http_message_get_const_headers(request)); wrote_all &= s_write_crlf(&message->outgoing_head_buf); (void)wrote_all; AWS_ASSERT(wrote_all); return AWS_OP_SUCCESS; error: aws_h1_encoder_message_clean_up(message); return AWS_OP_ERR; } int aws_h1_encoder_message_init_from_response( struct aws_h1_encoder_message *message, struct aws_allocator *allocator, const struct aws_http_message *response, bool body_headers_ignored, struct aws_linked_list *pending_chunk_list) { AWS_PRECONDITION(aws_linked_list_is_valid(pending_chunk_list)); AWS_ZERO_STRUCT(*message); message->body = aws_input_stream_acquire(aws_http_message_get_body_stream(response)); message->pending_chunk_list = pending_chunk_list; struct aws_byte_cursor version = aws_http_version_to_str(AWS_HTTP_VERSION_1_1); int status_int; int err = aws_http_message_get_response_status(response, &status_int); if (err) { return aws_raise_error(AWS_ERROR_HTTP_INVALID_STATUS_CODE); } /* Status code must fit in 3 digits */ AWS_ASSERT(status_int >= 0 && status_int <= 999); /* aws_http_message should have already checked this */ char status_code_str[4] = "XXX"; snprintf(status_code_str, sizeof(status_code_str), "%03d", status_int); struct aws_byte_cursor status_code = aws_byte_cursor_from_c_str(status_code_str); struct aws_byte_cursor status_text = aws_byte_cursor_from_c_str(aws_http_status_text(status_int)); /** * Calculate total size needed for outgoing_head_buffer, then write to buffer. */ size_t header_lines_len; /** * no body needed in the response * RFC-7230 section 3.3 Message Body */ body_headers_ignored |= status_int == AWS_HTTP_STATUS_CODE_304_NOT_MODIFIED; bool body_headers_forbidden = status_int == AWS_HTTP_STATUS_CODE_204_NO_CONTENT || status_int / 100 == 1; err = s_scan_outgoing_headers(message, response, &header_lines_len, body_headers_ignored, body_headers_forbidden); if (err) { goto error; } /* valid status must be three digital code, change it into byte_cursor */ /* response-line: "{version} {status} {status_text}\r\n" */ size_t response_line_len = 4; /* 2 spaces + "\r\n" */ err |= aws_add_size_checked(version.len, response_line_len, &response_line_len); err |= aws_add_size_checked(status_code.len, response_line_len, &response_line_len); err |= aws_add_size_checked(status_text.len, response_line_len, &response_line_len); /* head-end: "\r\n" */ size_t head_end_len = 2; size_t head_total_len = response_line_len; err |= aws_add_size_checked(header_lines_len, head_total_len, &head_total_len); err |= aws_add_size_checked(head_end_len, head_total_len, &head_total_len); if (err) { goto error; } aws_byte_buf_init(&message->outgoing_head_buf, allocator, head_total_len); bool wrote_all = true; wrote_all &= aws_byte_buf_write_from_whole_cursor(&message->outgoing_head_buf, version); wrote_all &= aws_byte_buf_write_u8(&message->outgoing_head_buf, ' '); wrote_all &= aws_byte_buf_write_from_whole_cursor(&message->outgoing_head_buf, status_code); wrote_all &= aws_byte_buf_write_u8(&message->outgoing_head_buf, ' '); wrote_all &= aws_byte_buf_write_from_whole_cursor(&message->outgoing_head_buf, status_text); wrote_all &= s_write_crlf(&message->outgoing_head_buf); s_write_headers(&message->outgoing_head_buf, aws_http_message_get_const_headers(response)); wrote_all &= s_write_crlf(&message->outgoing_head_buf); (void)wrote_all; AWS_ASSERT(wrote_all); /* Success! */ return AWS_OP_SUCCESS; error: aws_h1_encoder_message_clean_up(message); return AWS_OP_ERR; } void aws_h1_encoder_message_clean_up(struct aws_h1_encoder_message *message) { aws_input_stream_release(message->body); aws_byte_buf_clean_up(&message->outgoing_head_buf); aws_h1_trailer_destroy(message->trailer); AWS_ZERO_STRUCT(*message); } void aws_h1_encoder_init(struct aws_h1_encoder *encoder, struct aws_allocator *allocator) { AWS_ZERO_STRUCT(*encoder); encoder->allocator = allocator; } void aws_h1_encoder_clean_up(struct aws_h1_encoder *encoder) { AWS_ZERO_STRUCT(*encoder); } int aws_h1_encoder_start_message( struct aws_h1_encoder *encoder, struct aws_h1_encoder_message *message, struct aws_http_stream *stream) { AWS_PRECONDITION(encoder); AWS_PRECONDITION(message); if (encoder->message) { ENCODER_LOG(ERROR, encoder, "Attempting to start new request while previous request is in progress."); return aws_raise_error(AWS_ERROR_INVALID_STATE); } encoder->current_stream = stream; encoder->message = message; return AWS_OP_SUCCESS; } static bool s_write_chunk_size(struct aws_byte_buf *dst, uint64_t chunk_size) { AWS_PRECONDITION(dst); AWS_PRECONDITION(aws_byte_buf_is_valid(dst)); char ascii_hex_chunk_size_str[MAX_ASCII_HEX_CHUNK_STR_SIZE] = {0}; snprintf(ascii_hex_chunk_size_str, sizeof(ascii_hex_chunk_size_str), "%" PRIX64, chunk_size); return aws_byte_buf_write_from_whole_cursor(dst, aws_byte_cursor_from_c_str(ascii_hex_chunk_size_str)); } static bool s_write_chunk_extension(struct aws_byte_buf *dst, struct aws_http1_chunk_extension *chunk_extension) { AWS_PRECONDITION(chunk_extension); AWS_PRECONDITION(aws_byte_buf_is_valid(dst)); bool wrote_all = true; wrote_all &= aws_byte_buf_write_u8(dst, ';'); wrote_all &= aws_byte_buf_write_from_whole_cursor(dst, chunk_extension->key); wrote_all &= aws_byte_buf_write_u8(dst, '='); wrote_all &= aws_byte_buf_write_from_whole_cursor(dst, chunk_extension->value); return wrote_all; } static size_t s_calculate_chunk_line_size(const struct aws_http1_chunk_options *options) { size_t chunk_line_size = MAX_ASCII_HEX_CHUNK_STR_SIZE + CRLF_SIZE; for (size_t i = 0; i < options->num_extensions; ++i) { struct aws_http1_chunk_extension *chunk_extension = options->extensions + i; chunk_line_size += 1 /* ; */; chunk_line_size += chunk_extension->key.len; chunk_line_size += 1 /* = */; chunk_line_size += chunk_extension->value.len; } return chunk_line_size; } static void s_populate_chunk_line_buffer( struct aws_byte_buf *chunk_line, const struct aws_http1_chunk_options *options) { bool wrote_chunk_line = true; wrote_chunk_line &= s_write_chunk_size(chunk_line, options->chunk_data_size); for (size_t i = 0; i < options->num_extensions; ++i) { wrote_chunk_line &= s_write_chunk_extension(chunk_line, options->extensions + i); } wrote_chunk_line &= s_write_crlf(chunk_line); AWS_ASSERT(wrote_chunk_line); (void)wrote_chunk_line; } struct aws_h1_trailer *aws_h1_trailer_new( struct aws_allocator *allocator, const struct aws_http_headers *trailing_headers) { /* Allocate trailer along with storage for the trailer-line */ size_t trailer_size = 0; if (s_scan_outgoing_trailer(trailing_headers, &trailer_size)) { return NULL; } struct aws_h1_trailer *trailer = aws_mem_calloc(allocator, 1, sizeof(struct aws_h1_trailer)); trailer->allocator = allocator; aws_byte_buf_init(&trailer->trailer_data, allocator, trailer_size); /* cannot fail */ s_write_headers(&trailer->trailer_data, trailing_headers); s_write_crlf(&trailer->trailer_data); /* \r\n */ return trailer; } void aws_h1_trailer_destroy(struct aws_h1_trailer *trailer) { if (trailer == NULL) { return; } aws_byte_buf_clean_up(&trailer->trailer_data); aws_mem_release(trailer->allocator, trailer); } struct aws_h1_chunk *aws_h1_chunk_new(struct aws_allocator *allocator, const struct aws_http1_chunk_options *options) { /* Allocate chunk along with storage for the chunk-line */ struct aws_h1_chunk *chunk; size_t chunk_line_size = s_calculate_chunk_line_size(options); void *chunk_line_storage; if (!aws_mem_acquire_many( allocator, 2, &chunk, sizeof(struct aws_h1_chunk), &chunk_line_storage, chunk_line_size)) { return NULL; } chunk->allocator = allocator; chunk->data = aws_input_stream_acquire(options->chunk_data); chunk->data_size = options->chunk_data_size; chunk->on_complete = options->on_complete; chunk->user_data = options->user_data; chunk->chunk_line = aws_byte_buf_from_empty_array(chunk_line_storage, chunk_line_size); s_populate_chunk_line_buffer(&chunk->chunk_line, options); return chunk; } void aws_h1_chunk_destroy(struct aws_h1_chunk *chunk) { AWS_PRECONDITION(chunk); aws_input_stream_release(chunk->data); aws_mem_release(chunk->allocator, chunk); } void aws_h1_chunk_complete_and_destroy( struct aws_h1_chunk *chunk, struct aws_http_stream *http_stream, int error_code) { AWS_PRECONDITION(chunk); aws_http1_stream_write_chunk_complete_fn *on_complete = chunk->on_complete; void *user_data = chunk->user_data; /* Clean up before firing callback */ aws_h1_chunk_destroy(chunk); if (NULL != on_complete) { on_complete(http_stream, error_code, user_data); } } static void s_clean_up_current_chunk(struct aws_h1_encoder *encoder, int error_code) { AWS_PRECONDITION(encoder->current_chunk); AWS_PRECONDITION(&encoder->current_chunk->node == aws_linked_list_front(encoder->message->pending_chunk_list)); aws_linked_list_remove(&encoder->current_chunk->node); aws_h1_chunk_complete_and_destroy(encoder->current_chunk, encoder->current_stream, error_code); encoder->current_chunk = NULL; } /* Write as much as possible from src_buf to dst, using encoder->progress_len to track progress. * Returns true if the entire src_buf has been copied */ static bool s_encode_buf(struct aws_h1_encoder *encoder, struct aws_byte_buf *dst, const struct aws_byte_buf *src) { /* advance src_cursor to current position in src_buf */ struct aws_byte_cursor src_cursor = aws_byte_cursor_from_buf(src); aws_byte_cursor_advance(&src_cursor, (size_t)encoder->progress_bytes); /* write as much as possible to dst, src_cursor is advanced as write occurs */ struct aws_byte_cursor written = aws_byte_buf_write_to_capacity(dst, &src_cursor); encoder->progress_bytes += written.len; return src_cursor.len == 0; } /* Write as much body stream as possible into dst buffer. * Increments encoder->progress_bytes to track progress */ static int s_encode_stream( struct aws_h1_encoder *encoder, struct aws_byte_buf *dst, struct aws_input_stream *stream, uint64_t total_length, bool *out_done) { *out_done = false; if (dst->capacity == dst->len) { /* Return success because we want to try again later */ return AWS_OP_SUCCESS; } /* Read from stream */ ENCODER_LOG(TRACE, encoder, "Reading from body stream."); const size_t prev_len = dst->len; int err = aws_input_stream_read(stream, dst); const size_t amount_read = dst->len - prev_len; if (err) { ENCODER_LOGF( ERROR, encoder, "Failed to read body stream, error %d (%s)", aws_last_error(), aws_error_name(aws_last_error())); return AWS_OP_ERR; } /* Increment progress_bytes, and make sure we haven't written too much */ int add_err = aws_add_u64_checked(encoder->progress_bytes, amount_read, &encoder->progress_bytes); if (add_err || encoder->progress_bytes > total_length) { ENCODER_LOGF(ERROR, encoder, "Body stream has exceeded expected length: %" PRIu64, total_length); return aws_raise_error(AWS_ERROR_HTTP_OUTGOING_STREAM_LENGTH_INCORRECT); } ENCODER_LOGF( TRACE, encoder, "Sending %zu bytes of body, progress: %" PRIu64 "/%" PRIu64, amount_read, encoder->progress_bytes, total_length); /* Return if we're done sending stream */ if (encoder->progress_bytes == total_length) { *out_done = true; return AWS_OP_SUCCESS; } /* Return if stream failed to write anything. Maybe the data isn't ready yet. */ if (amount_read == 0) { /* Ensure we're not at end-of-stream too early */ struct aws_stream_status status; err = aws_input_stream_get_status(stream, &status); if (err) { ENCODER_LOGF( TRACE, encoder, "Failed to query body stream status, error %d (%s)", aws_last_error(), aws_error_name(aws_last_error())); return AWS_OP_ERR; } if (status.is_end_of_stream) { ENCODER_LOGF( ERROR, encoder, "Reached end of body stream but sent less than declared length %" PRIu64 "/%" PRIu64, encoder->progress_bytes, total_length); return aws_raise_error(AWS_ERROR_HTTP_OUTGOING_STREAM_LENGTH_INCORRECT); } } /* Not done streaming data out yet */ return AWS_OP_SUCCESS; } /* A state function should: * - Raise an error only if unrecoverable error occurs. * - `return s_switch_state(...)` to switch states. * - `return AWS_OP_SUCCESS` if it can't progress any further (waiting for more * space to write into, waiting for more chunks, etc). */ typedef int encoder_state_fn(struct aws_h1_encoder *encoder, struct aws_byte_buf *dst); /* Switch state. * The only reason this returns a value is so it can be called with `return` to conclude a state function */ static int s_switch_state(struct aws_h1_encoder *encoder, enum aws_h1_encoder_state state) { encoder->state = state; encoder->progress_bytes = 0; return AWS_OP_SUCCESS; } /* Initial state. Waits until a new message is set */ static int s_state_fn_init(struct aws_h1_encoder *encoder, struct aws_byte_buf *dst) { (void)dst; if (!encoder->message) { /* Remain in this state. */ return AWS_OP_SUCCESS; } /* Start encoding message */ ENCODER_LOG(TRACE, encoder, "Starting to send data."); return s_switch_state(encoder, AWS_H1_ENCODER_STATE_HEAD); } /* Write out first line of request/response, plus all the headers. * These have been pre-encoded in aws_h1_encoder_message->outgoing_head_buf. */ static int s_state_fn_head(struct aws_h1_encoder *encoder, struct aws_byte_buf *dst) { bool done = s_encode_buf(encoder, dst, &encoder->message->outgoing_head_buf); if (!done) { /* Remain in this state */ return AWS_OP_SUCCESS; } /* Don't NEED to free this buffer now, but we don't need it anymore, so why not */ aws_byte_buf_clean_up(&encoder->message->outgoing_head_buf); /* Pick next state */ if (encoder->message->body && encoder->message->content_length) { return s_switch_state(encoder, AWS_H1_ENCODER_STATE_UNCHUNKED_BODY); } else if (encoder->message->has_chunked_encoding_header) { return s_switch_state(encoder, AWS_H1_ENCODER_STATE_CHUNK_NEXT); } else { return s_switch_state(encoder, AWS_H1_ENCODER_STATE_DONE); } } /* Write out body (not using chunked encoding). */ static int s_state_fn_unchunked_body(struct aws_h1_encoder *encoder, struct aws_byte_buf *dst) { bool done; if (s_encode_stream(encoder, dst, encoder->message->body, encoder->message->content_length, &done)) { return AWS_OP_ERR; } if (!done) { /* Remain in this state until we're done writing out body */ return AWS_OP_SUCCESS; } /* Message is done */ return s_switch_state(encoder, AWS_H1_ENCODER_STATE_DONE); } /* Select next chunk to work on. * Encoder is essentially "paused" here if no chunks are available. */ static int s_state_fn_chunk_next(struct aws_h1_encoder *encoder, struct aws_byte_buf *dst) { (void)dst; if (aws_linked_list_empty(encoder->message->pending_chunk_list)) { /* Remain in this state until more chunks arrive */ ENCODER_LOG(TRACE, encoder, "No chunks ready to send, waiting for more..."); return AWS_OP_SUCCESS; } /* Set next chunk and go to next state */ struct aws_linked_list_node *node = aws_linked_list_front(encoder->message->pending_chunk_list); encoder->current_chunk = AWS_CONTAINER_OF(node, struct aws_h1_chunk, node); encoder->chunk_count++; ENCODER_LOGF( TRACE, encoder, "Begin sending chunk %zu with size %" PRIu64, encoder->chunk_count, encoder->current_chunk->data_size); return s_switch_state(encoder, AWS_H1_ENCODER_STATE_CHUNK_LINE); } /* Write out "chunk-size [chunk-ext] CRLF". * This data is pre-encoded in the chunk's chunk_line buffer */ static int s_state_fn_chunk_line(struct aws_h1_encoder *encoder, struct aws_byte_buf *dst) { bool done = s_encode_buf(encoder, dst, &encoder->current_chunk->chunk_line); if (!done) { /* Remain in state until done writing line */ return AWS_OP_SUCCESS; } /* Pick next state */ if (encoder->current_chunk->data_size == 0) { /* If data_size is 0, then this was the last chunk, which has no body. * Mark it complete and move on to trailer. */ ENCODER_LOG(TRACE, encoder, "Final chunk complete"); s_clean_up_current_chunk(encoder, AWS_ERROR_SUCCESS); return s_switch_state(encoder, AWS_H1_ENCODER_STATE_CHUNK_TRAILER); } return s_switch_state(encoder, AWS_H1_ENCODER_STATE_CHUNK_BODY); } /* Write out data for current chunk */ static int s_state_fn_chunk_body(struct aws_h1_encoder *encoder, struct aws_byte_buf *dst) { bool done; if (s_encode_stream(encoder, dst, encoder->current_chunk->data, encoder->current_chunk->data_size, &done)) { int error_code = aws_last_error(); /* The error was caused by the chunk itself, report that specific error in its completion callback */ s_clean_up_current_chunk(encoder, error_code); /* Re-raise error, in case it got cleared during user callback */ return aws_raise_error(error_code); } if (!done) { /* Remain in this state until we're done writing out body */ return AWS_OP_SUCCESS; } return s_switch_state(encoder, AWS_H1_ENCODER_STATE_CHUNK_END); } /* Write CRLF and mark chunk as complete */ static int s_state_fn_chunk_end(struct aws_h1_encoder *encoder, struct aws_byte_buf *dst) { bool done = s_write_crlf(dst); if (!done) { /* Remain in this state until done writing out CRLF */ return AWS_OP_SUCCESS; } ENCODER_LOG(TRACE, encoder, "Chunk complete"); s_clean_up_current_chunk(encoder, AWS_ERROR_SUCCESS); /* Pick next chunk to work on */ return s_switch_state(encoder, AWS_H1_ENCODER_STATE_CHUNK_NEXT); } /* Write out trailer after last chunk */ static int s_state_fn_chunk_trailer(struct aws_h1_encoder *encoder, struct aws_byte_buf *dst) { bool done; /* if a chunked trailer was set */ if (encoder->message->trailer) { done = s_encode_buf(encoder, dst, &encoder->message->trailer->trailer_data); } else { done = s_write_crlf(dst); } if (!done) { /* Remain in this state until we're done writing out trailer */ return AWS_OP_SUCCESS; } return s_switch_state(encoder, AWS_H1_ENCODER_STATE_DONE); } /* Message is done, loop back to start of state machine */ static int s_state_fn_done(struct aws_h1_encoder *encoder, struct aws_byte_buf *dst) { (void)dst; ENCODER_LOG(TRACE, encoder, "Done sending data."); encoder->message = NULL; return s_switch_state(encoder, AWS_H1_ENCODER_STATE_INIT); } struct encoder_state_def { encoder_state_fn *fn; const char *name; }; static struct encoder_state_def s_encoder_states[] = { [AWS_H1_ENCODER_STATE_INIT] = {.fn = s_state_fn_init, .name = "INIT"}, [AWS_H1_ENCODER_STATE_HEAD] = {.fn = s_state_fn_head, .name = "HEAD"}, [AWS_H1_ENCODER_STATE_UNCHUNKED_BODY] = {.fn = s_state_fn_unchunked_body, .name = "BODY"}, [AWS_H1_ENCODER_STATE_CHUNK_NEXT] = {.fn = s_state_fn_chunk_next, .name = "CHUNK_NEXT"}, [AWS_H1_ENCODER_STATE_CHUNK_LINE] = {.fn = s_state_fn_chunk_line, .name = "CHUNK_LINE"}, [AWS_H1_ENCODER_STATE_CHUNK_BODY] = {.fn = s_state_fn_chunk_body, .name = "CHUNK_BODY"}, [AWS_H1_ENCODER_STATE_CHUNK_END] = {.fn = s_state_fn_chunk_end, .name = "CHUNK_END"}, [AWS_H1_ENCODER_STATE_CHUNK_TRAILER] = {.fn = s_state_fn_chunk_trailer, .name = "CHUNK_TRAILER"}, [AWS_H1_ENCODER_STATE_DONE] = {.fn = s_state_fn_done, .name = "DONE"}, }; int aws_h1_encoder_process(struct aws_h1_encoder *encoder, struct aws_byte_buf *out_buf) { AWS_PRECONDITION(encoder); AWS_PRECONDITION(out_buf); if (!encoder->message) { ENCODER_LOG(ERROR, encoder, "No message is currently set for encoding."); return aws_raise_error(AWS_ERROR_INVALID_STATE); } /* Run state machine until states stop changing. (due to out_buf running * out of space, input_stream stalling, waiting for more chunks, etc) */ enum aws_h1_encoder_state prev_state; do { prev_state = encoder->state; if (s_encoder_states[encoder->state].fn(encoder, out_buf)) { return AWS_OP_ERR; } } while (prev_state != encoder->state); return AWS_OP_SUCCESS; } bool aws_h1_encoder_is_message_in_progress(const struct aws_h1_encoder *encoder) { return encoder->message; } bool aws_h1_encoder_is_waiting_for_chunks(const struct aws_h1_encoder *encoder) { return encoder->state == AWS_H1_ENCODER_STATE_CHUNK_NEXT && aws_linked_list_empty(encoder->message->pending_chunk_list); } aws-crt-python-0.20.4+dfsg/crt/aws-c-http/source/h1_stream.c000066400000000000000000000530431456575232400235430ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include static void s_stream_destroy(struct aws_http_stream *stream_base) { struct aws_h1_stream *stream = AWS_CONTAINER_OF(stream_base, struct aws_h1_stream, base); AWS_ASSERT( stream->synced_data.api_state != AWS_H1_STREAM_API_STATE_ACTIVE && "Stream should be complete (or never-activated) when stream destroyed"); AWS_ASSERT( aws_linked_list_empty(&stream->thread_data.pending_chunk_list) && aws_linked_list_empty(&stream->synced_data.pending_chunk_list) && "Chunks should be marked complete before stream destroyed"); aws_h1_encoder_message_clean_up(&stream->encoder_message); aws_byte_buf_clean_up(&stream->incoming_storage_buf); aws_mem_release(stream->base.alloc, stream); } static struct aws_h1_connection *s_get_h1_connection(const struct aws_h1_stream *stream) { return AWS_CONTAINER_OF(stream->base.owning_connection, struct aws_h1_connection, base); } static void s_stream_lock_synced_data(struct aws_h1_stream *stream) { aws_h1_connection_lock_synced_data(s_get_h1_connection(stream)); } static void s_stream_unlock_synced_data(struct aws_h1_stream *stream) { aws_h1_connection_unlock_synced_data(s_get_h1_connection(stream)); } static void s_stream_cross_thread_work_task(struct aws_channel_task *task, void *arg, enum aws_task_status status) { (void)task; struct aws_h1_stream *stream = arg; struct aws_h1_connection *connection = s_get_h1_connection(stream); if (status != AWS_TASK_STATUS_RUN_READY) { goto done; } AWS_LOGF_TRACE(AWS_LS_HTTP_STREAM, "id=%p: Running stream cross-thread work task.", (void *)&stream->base); /* BEGIN CRITICAL SECTION */ s_stream_lock_synced_data(stream); stream->synced_data.is_cross_thread_work_task_scheduled = false; int api_state = stream->synced_data.api_state; bool found_chunks = !aws_linked_list_empty(&stream->synced_data.pending_chunk_list); aws_linked_list_move_all_back(&stream->thread_data.pending_chunk_list, &stream->synced_data.pending_chunk_list); stream->encoder_message.trailer = stream->synced_data.pending_trailer; stream->synced_data.pending_trailer = NULL; bool has_outgoing_response = stream->synced_data.has_outgoing_response; uint64_t pending_window_update = stream->synced_data.pending_window_update; stream->synced_data.pending_window_update = 0; s_stream_unlock_synced_data(stream); /* END CRITICAL SECTION */ /* If we have any new outgoing data, prompt the connection to try and send it. */ bool new_outgoing_data = found_chunks; /* If we JUST learned about having an outgoing response, that's a reason to try sending data */ if (has_outgoing_response && !stream->thread_data.has_outgoing_response) { stream->thread_data.has_outgoing_response = true; new_outgoing_data = true; } if (new_outgoing_data && (api_state == AWS_H1_STREAM_API_STATE_ACTIVE)) { aws_h1_connection_try_write_outgoing_stream(connection); } /* Add to window size using saturated sum to prevent overflow. * Saturating is fine because it's a u64, the stream could never receive that much data. */ stream->thread_data.stream_window = aws_add_u64_saturating(stream->thread_data.stream_window, pending_window_update); if ((pending_window_update > 0) && (api_state == AWS_H1_STREAM_API_STATE_ACTIVE)) { /* Now that stream window is larger, connection might have buffered * data to send, or might need to increment its own window */ aws_h1_connection_try_process_read_messages(connection); } done: /* Release reference that kept stream alive until task ran */ aws_http_stream_release(&stream->base); } /* Note the update in synced_data, and schedule the cross_thread_work_task if necessary */ static void s_stream_update_window(struct aws_http_stream *stream_base, size_t increment_size) { if (increment_size == 0) { return; } if (!stream_base->owning_connection->stream_manual_window_management) { return; } struct aws_h1_stream *stream = AWS_CONTAINER_OF(stream_base, struct aws_h1_stream, base); bool should_schedule_task = false; { /* BEGIN CRITICAL SECTION */ s_stream_lock_synced_data(stream); /* Saturated sum. It's a u64. The stream could never receive that much data. */ stream->synced_data.pending_window_update = aws_add_u64_saturating(stream->synced_data.pending_window_update, increment_size); /* Don't alert the connection unless the stream is active */ if (stream->synced_data.api_state == AWS_H1_STREAM_API_STATE_ACTIVE) { if (!stream->synced_data.is_cross_thread_work_task_scheduled) { stream->synced_data.is_cross_thread_work_task_scheduled = true; should_schedule_task = true; } } s_stream_unlock_synced_data(stream); } /* END CRITICAL SECTION */ if (should_schedule_task) { /* Keep stream alive until task completes */ aws_atomic_fetch_add(&stream->base.refcount, 1); AWS_LOGF_TRACE(AWS_LS_HTTP_STREAM, "id=%p: Scheduling stream cross-thread work task.", (void *)stream_base); aws_channel_schedule_task_now( stream->base.owning_connection->channel_slot->channel, &stream->cross_thread_work_task); } } static int s_stream_write_chunk(struct aws_http_stream *stream_base, const struct aws_http1_chunk_options *options) { AWS_PRECONDITION(stream_base); AWS_PRECONDITION(options); struct aws_h1_stream *stream = AWS_CONTAINER_OF(stream_base, struct aws_h1_stream, base); if (options->chunk_data == NULL && options->chunk_data_size > 0) { AWS_LOGF_ERROR( AWS_LS_HTTP_STREAM, "id=%p: Chunk data cannot be NULL if data size is non-zero", (void *)stream_base); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } struct aws_h1_chunk *chunk = aws_h1_chunk_new(stream_base->alloc, options); if (AWS_UNLIKELY(NULL == chunk)) { AWS_LOGF_ERROR( AWS_LS_HTTP_STREAM, "id=%p: Failed to initialize streamed chunk, error %d (%s).", (void *)stream_base, aws_last_error(), aws_error_name(aws_last_error())); return AWS_OP_ERR; } int error_code = 0; bool should_schedule_task = false; { /* BEGIN CRITICAL SECTION */ s_stream_lock_synced_data(stream); /* Can only add chunks while stream is active. */ if (stream->synced_data.api_state != AWS_H1_STREAM_API_STATE_ACTIVE) { error_code = (stream->synced_data.api_state == AWS_H1_STREAM_API_STATE_INIT) ? AWS_ERROR_HTTP_STREAM_NOT_ACTIVATED : AWS_ERROR_HTTP_STREAM_HAS_COMPLETED; goto unlock; } /* Prevent user trying to submit chunks without having set the required headers. * This check also prevents a server-user submitting chunks before the response has been submitted. */ if (!stream->synced_data.using_chunked_encoding) { AWS_LOGF_ERROR( AWS_LS_HTTP_STREAM, "id=%p: Cannot write chunks without 'transfer-encoding: chunked' header.", (void *)stream_base); error_code = AWS_ERROR_INVALID_STATE; goto unlock; } if (stream->synced_data.has_final_chunk) { AWS_LOGF_ERROR( AWS_LS_HTTP_STREAM, "id=%p: Cannot write additional chunk after final chunk.", (void *)stream_base); error_code = AWS_ERROR_INVALID_STATE; goto unlock; } /* success */ if (chunk->data_size == 0) { stream->synced_data.has_final_chunk = true; } aws_linked_list_push_back(&stream->synced_data.pending_chunk_list, &chunk->node); should_schedule_task = !stream->synced_data.is_cross_thread_work_task_scheduled; stream->synced_data.is_cross_thread_work_task_scheduled = true; unlock: s_stream_unlock_synced_data(stream); } /* END CRITICAL SECTION */ if (error_code) { AWS_LOGF_ERROR( AWS_LS_HTTP_STREAM, "id=%p: Failed to add chunk, error %d (%s)", (void *)stream_base, error_code, aws_error_name(error_code)); aws_h1_chunk_destroy(chunk); return aws_raise_error(error_code); } AWS_LOGF_TRACE( AWS_LS_HTTP_STREAM, "id=%p: Adding chunk with size %" PRIu64 " to stream", (void *)stream, options->chunk_data_size); if (should_schedule_task) { /* Keep stream alive until task completes */ aws_atomic_fetch_add(&stream->base.refcount, 1); AWS_LOGF_TRACE(AWS_LS_HTTP_STREAM, "id=%p: Scheduling stream cross-thread work task.", (void *)stream_base); aws_channel_schedule_task_now( stream->base.owning_connection->channel_slot->channel, &stream->cross_thread_work_task); } else { AWS_LOGF_TRACE( AWS_LS_HTTP_STREAM, "id=%p: Stream cross-thread work task was already scheduled.", (void *)stream_base); } return AWS_OP_SUCCESS; } static int s_stream_add_trailer(struct aws_http_stream *stream_base, const struct aws_http_headers *trailing_headers) { AWS_PRECONDITION(stream_base); AWS_PRECONDITION(trailing_headers); struct aws_h1_stream *stream = AWS_CONTAINER_OF(stream_base, struct aws_h1_stream, base); struct aws_h1_trailer *trailer = aws_h1_trailer_new(stream_base->alloc, trailing_headers); if (AWS_UNLIKELY(NULL == trailer)) { AWS_LOGF_ERROR( AWS_LS_HTTP_STREAM, "id=%p: Failed to initialize streamed trailer, error %d (%s).", (void *)stream_base, aws_last_error(), aws_error_name(aws_last_error())); return AWS_OP_ERR; } int error_code = 0; bool should_schedule_task = false; { /* BEGIN CRITICAL SECTION */ s_stream_lock_synced_data(stream); /* Can only add trailers while stream is active. */ if (stream->synced_data.api_state != AWS_H1_STREAM_API_STATE_ACTIVE) { error_code = (stream->synced_data.api_state == AWS_H1_STREAM_API_STATE_INIT) ? AWS_ERROR_HTTP_STREAM_NOT_ACTIVATED : AWS_ERROR_HTTP_STREAM_HAS_COMPLETED; goto unlock; } if (!stream->synced_data.using_chunked_encoding) { AWS_LOGF_ERROR( AWS_LS_HTTP_STREAM, "id=%p: Cannot write trailers without 'transfer-encoding: chunked' header.", (void *)stream_base); error_code = AWS_ERROR_INVALID_STATE; goto unlock; } if (stream->synced_data.has_added_trailer) { AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=%p: Cannot write trailers twice.", (void *)stream_base); error_code = AWS_ERROR_INVALID_STATE; goto unlock; } if (stream->synced_data.has_final_chunk) { AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=%p: Cannot write trailers after final chunk.", (void *)stream_base); error_code = AWS_ERROR_INVALID_STATE; goto unlock; } stream->synced_data.has_added_trailer = true; stream->synced_data.pending_trailer = trailer; should_schedule_task = !stream->synced_data.is_cross_thread_work_task_scheduled; stream->synced_data.is_cross_thread_work_task_scheduled = true; unlock: s_stream_unlock_synced_data(stream); } /* END CRITICAL SECTION */ if (error_code) { AWS_LOGF_ERROR( AWS_LS_HTTP_STREAM, "id=%p: Failed to add trailer, error %d (%s)", (void *)stream_base, error_code, aws_error_name(error_code)); aws_h1_trailer_destroy(trailer); return aws_raise_error(error_code); } AWS_LOGF_TRACE(AWS_LS_HTTP_STREAM, "id=%p: Adding trailer to stream", (void *)stream); if (should_schedule_task) { /* Keep stream alive until task completes */ aws_atomic_fetch_add(&stream->base.refcount, 1); AWS_LOGF_TRACE(AWS_LS_HTTP_STREAM, "id=%p: Scheduling stream cross-thread work task.", (void *)stream_base); aws_channel_schedule_task_now( stream->base.owning_connection->channel_slot->channel, &stream->cross_thread_work_task); } else { AWS_LOGF_TRACE( AWS_LS_HTTP_STREAM, "id=%p: Stream cross-thread work task was already scheduled.", (void *)stream_base); } return AWS_OP_SUCCESS; } static const struct aws_http_stream_vtable s_stream_vtable = { .destroy = s_stream_destroy, .update_window = s_stream_update_window, .activate = aws_h1_stream_activate, .cancel = aws_h1_stream_cancel, .http1_write_chunk = s_stream_write_chunk, .http1_add_trailer = s_stream_add_trailer, .http2_reset_stream = NULL, .http2_get_received_error_code = NULL, .http2_get_sent_error_code = NULL, }; static struct aws_h1_stream *s_stream_new_common( struct aws_http_connection *connection_base, void *user_data, aws_http_on_incoming_headers_fn *on_incoming_headers, aws_http_on_incoming_header_block_done_fn *on_incoming_header_block_done, aws_http_on_incoming_body_fn *on_incoming_body, aws_http_on_stream_complete_fn *on_complete, aws_http_on_stream_destroy_fn *on_destroy) { struct aws_h1_connection *connection = AWS_CONTAINER_OF(connection_base, struct aws_h1_connection, base); struct aws_h1_stream *stream = aws_mem_calloc(connection_base->alloc, 1, sizeof(struct aws_h1_stream)); if (!stream) { return NULL; } stream->base.vtable = &s_stream_vtable; stream->base.alloc = connection_base->alloc; stream->base.owning_connection = connection_base; stream->base.user_data = user_data; stream->base.on_incoming_headers = on_incoming_headers; stream->base.on_incoming_header_block_done = on_incoming_header_block_done; stream->base.on_incoming_body = on_incoming_body; stream->base.on_complete = on_complete; stream->base.on_destroy = on_destroy; stream->base.metrics.send_start_timestamp_ns = -1; stream->base.metrics.send_end_timestamp_ns = -1; stream->base.metrics.sending_duration_ns = -1; stream->base.metrics.receive_start_timestamp_ns = -1; stream->base.metrics.receive_end_timestamp_ns = -1; stream->base.metrics.receiving_duration_ns = -1; aws_channel_task_init( &stream->cross_thread_work_task, s_stream_cross_thread_work_task, stream, "http1_stream_cross_thread_work"); aws_linked_list_init(&stream->thread_data.pending_chunk_list); aws_linked_list_init(&stream->synced_data.pending_chunk_list); stream->thread_data.stream_window = connection->initial_stream_window_size; /* Stream refcount starts at 1 for user and is incremented upon activation for the connection */ aws_atomic_init_int(&stream->base.refcount, 1); return stream; } struct aws_h1_stream *aws_h1_stream_new_request( struct aws_http_connection *client_connection, const struct aws_http_make_request_options *options) { struct aws_h1_stream *stream = s_stream_new_common( client_connection, options->user_data, options->on_response_headers, options->on_response_header_block_done, options->on_response_body, options->on_complete, options->on_destroy); if (!stream) { return NULL; } /* Transform request if necessary */ if (client_connection->proxy_request_transform) { if (client_connection->proxy_request_transform(options->request, client_connection->user_data)) { goto error; } } stream->base.client_data = &stream->base.client_or_server_data.client; stream->base.client_data->response_status = AWS_HTTP_STATUS_CODE_UNKNOWN; stream->base.client_data->response_first_byte_timeout_ms = options->response_first_byte_timeout_ms; stream->base.on_metrics = options->on_metrics; /* Validate request and cache info that the encoder will eventually need */ if (aws_h1_encoder_message_init_from_request( &stream->encoder_message, client_connection->alloc, options->request, &stream->thread_data.pending_chunk_list)) { goto error; } /* RFC-7230 Section 6.3: The "close" connection option is used to signal * that a connection will not persist after the current request/response*/ if (stream->encoder_message.has_connection_close_header) { stream->is_final_stream = true; } stream->synced_data.using_chunked_encoding = stream->encoder_message.has_chunked_encoding_header; return stream; error: s_stream_destroy(&stream->base); return NULL; } struct aws_h1_stream *aws_h1_stream_new_request_handler(const struct aws_http_request_handler_options *options) { struct aws_h1_stream *stream = s_stream_new_common( options->server_connection, options->user_data, options->on_request_headers, options->on_request_header_block_done, options->on_request_body, options->on_complete, options->on_destroy); if (!stream) { return NULL; } /* This code is only executed in server mode and can only be invoked from the event-loop thread so don't worry * with the lock here. */ stream->base.id = aws_http_connection_get_next_stream_id(options->server_connection); /* Request-handler (server) streams don't need user to call activate() on them. * Since these these streams can only be created on the event-loop thread, * it's not possible for callbacks to fire before the stream pointer is returned. * (Clients must call stream.activate() because they might create a stream on any thread) */ stream->synced_data.api_state = AWS_H1_STREAM_API_STATE_ACTIVE; stream->base.server_data = &stream->base.client_or_server_data.server; stream->base.server_data->on_request_done = options->on_request_done; aws_atomic_fetch_add(&stream->base.refcount, 1); return stream; } int aws_h1_stream_send_response(struct aws_h1_stream *stream, struct aws_http_message *response) { struct aws_h1_connection *connection = s_get_h1_connection(stream); int error_code = 0; /* Validate the response and cache info that encoder will eventually need. * The encoder_message object will be moved into the stream later while holding the lock */ struct aws_h1_encoder_message encoder_message; bool body_headers_ignored = stream->base.request_method == AWS_HTTP_METHOD_HEAD; if (aws_h1_encoder_message_init_from_response( &encoder_message, stream->base.alloc, response, body_headers_ignored, &stream->thread_data.pending_chunk_list)) { error_code = aws_last_error(); goto error; } bool should_schedule_task = false; { /* BEGIN CRITICAL SECTION */ s_stream_lock_synced_data(stream); if (stream->synced_data.api_state == AWS_H1_STREAM_API_STATE_COMPLETE) { error_code = AWS_ERROR_HTTP_STREAM_HAS_COMPLETED; } else if (stream->synced_data.has_outgoing_response) { AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=%p: Response already created on the stream", (void *)&stream->base); error_code = AWS_ERROR_INVALID_STATE; } else { stream->synced_data.has_outgoing_response = true; stream->encoder_message = encoder_message; if (encoder_message.has_connection_close_header) { /* This will be the last stream connection will process, new streams will be rejected */ stream->is_final_stream = true; /* Note: We're touching the connection's synced_data, which is OK * because an h1_connection and all its h1_streams share a single lock. */ connection->synced_data.new_stream_error_code = AWS_ERROR_HTTP_CONNECTION_CLOSED; } stream->synced_data.using_chunked_encoding = stream->encoder_message.has_chunked_encoding_header; should_schedule_task = !stream->synced_data.is_cross_thread_work_task_scheduled; stream->synced_data.is_cross_thread_work_task_scheduled = true; } s_stream_unlock_synced_data(stream); } /* END CRITICAL SECTION */ if (error_code) { goto error; } /* Success! */ AWS_LOGF_DEBUG( AWS_LS_HTTP_STREAM, "id=%p: Created response on connection=%p: ", (void *)stream, (void *)connection); if (should_schedule_task) { /* Keep stream alive until task completes */ aws_atomic_fetch_add(&stream->base.refcount, 1); AWS_LOGF_TRACE(AWS_LS_HTTP_STREAM, "id=%p: Scheduling stream cross-thread work task.", (void *)&stream->base); aws_channel_schedule_task_now( stream->base.owning_connection->channel_slot->channel, &stream->cross_thread_work_task); } else { AWS_LOGF_TRACE( AWS_LS_HTTP_STREAM, "id=%p: Stream cross-thread work task was already scheduled.", (void *)&stream->base); } return AWS_OP_SUCCESS; error: AWS_LOGF_ERROR( AWS_LS_HTTP_STREAM, "id=%p: Sending response on the stream failed, error %d (%s)", (void *)&stream->base, error_code, aws_error_name(error_code)); aws_h1_encoder_message_clean_up(&encoder_message); return aws_raise_error(error_code); } aws-crt-python-0.20.4+dfsg/crt/aws-c-http/source/h2_connection.c000066400000000000000000003600771456575232400244200ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #ifdef _MSC_VER # pragma warning(disable : 4204) /* non-constant aggregate initializer */ #endif /* Apple toolchains such as xcode and swiftpm define the DEBUG symbol. undef it here so we can actually use the token */ #undef DEBUG #define CONNECTION_LOGF(level, connection, text, ...) \ AWS_LOGF_##level(AWS_LS_HTTP_CONNECTION, "id=%p: " text, (void *)(connection), __VA_ARGS__) #define CONNECTION_LOG(level, connection, text) CONNECTION_LOGF(level, connection, "%s", text) static int s_handler_process_read_message( struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_io_message *message); static int s_handler_process_write_message( struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_io_message *message); static int s_handler_increment_read_window( struct aws_channel_handler *handler, struct aws_channel_slot *slot, size_t size); static int s_handler_shutdown( struct aws_channel_handler *handler, struct aws_channel_slot *slot, enum aws_channel_direction dir, int error_code, bool free_scarce_resources_immediately); static size_t s_handler_initial_window_size(struct aws_channel_handler *handler); static size_t s_handler_message_overhead(struct aws_channel_handler *handler); static void s_handler_destroy(struct aws_channel_handler *handler); static void s_handler_installed(struct aws_channel_handler *handler, struct aws_channel_slot *slot); static struct aws_http_stream *s_connection_make_request( struct aws_http_connection *client_connection, const struct aws_http_make_request_options *options); static void s_connection_close(struct aws_http_connection *connection_base); static void s_connection_stop_new_request(struct aws_http_connection *connection_base); static bool s_connection_is_open(const struct aws_http_connection *connection_base); static bool s_connection_new_requests_allowed(const struct aws_http_connection *connection_base); static void s_connection_update_window(struct aws_http_connection *connection_base, uint32_t increment_size); static int s_connection_change_settings( struct aws_http_connection *connection_base, const struct aws_http2_setting *settings_array, size_t num_settings, aws_http2_on_change_settings_complete_fn *on_completed, void *user_data); static int s_connection_send_ping( struct aws_http_connection *connection_base, const struct aws_byte_cursor *optional_opaque_data, aws_http2_on_ping_complete_fn *on_completed, void *user_data); static void s_connection_send_goaway( struct aws_http_connection *connection_base, uint32_t http2_error, bool allow_more_streams, const struct aws_byte_cursor *optional_debug_data); static int s_connection_get_sent_goaway( struct aws_http_connection *connection_base, uint32_t *out_http2_error, uint32_t *out_last_stream_id); static int s_connection_get_received_goaway( struct aws_http_connection *connection_base, uint32_t *out_http2_error, uint32_t *out_last_stream_id); static void s_connection_get_local_settings( const struct aws_http_connection *connection_base, struct aws_http2_setting out_settings[AWS_HTTP2_SETTINGS_COUNT]); static void s_connection_get_remote_settings( const struct aws_http_connection *connection_base, struct aws_http2_setting out_settings[AWS_HTTP2_SETTINGS_COUNT]); static void s_cross_thread_work_task(struct aws_channel_task *task, void *arg, enum aws_task_status status); static void s_outgoing_frames_task(struct aws_channel_task *task, void *arg, enum aws_task_status status); static int s_encode_outgoing_frames_queue(struct aws_h2_connection *connection, struct aws_byte_buf *output); static int s_encode_data_from_outgoing_streams(struct aws_h2_connection *connection, struct aws_byte_buf *output); static int s_record_closed_stream( struct aws_h2_connection *connection, uint32_t stream_id, enum aws_h2_stream_closed_when closed_when); static void s_stream_complete(struct aws_h2_connection *connection, struct aws_h2_stream *stream, int error_code); static void s_write_outgoing_frames(struct aws_h2_connection *connection, bool first_try); static void s_finish_shutdown(struct aws_h2_connection *connection); static void s_send_goaway( struct aws_h2_connection *connection, uint32_t h2_error_code, bool allow_more_streams, const struct aws_byte_cursor *optional_debug_data); static struct aws_h2_pending_settings *s_new_pending_settings( struct aws_allocator *allocator, const struct aws_http2_setting *settings_array, size_t num_settings, aws_http2_on_change_settings_complete_fn *on_completed, void *user_data); static struct aws_h2err s_decoder_on_headers_begin(uint32_t stream_id, void *userdata); static struct aws_h2err s_decoder_on_headers_i( uint32_t stream_id, const struct aws_http_header *header, enum aws_http_header_name name_enum, enum aws_http_header_block block_type, void *userdata); static struct aws_h2err s_decoder_on_headers_end( uint32_t stream_id, bool malformed, enum aws_http_header_block block_type, void *userdata); static struct aws_h2err s_decoder_on_push_promise(uint32_t stream_id, uint32_t promised_stream_id, void *userdata); static struct aws_h2err s_decoder_on_data_begin( uint32_t stream_id, uint32_t payload_len, uint32_t total_padding_bytes, bool end_stream, void *userdata); static struct aws_h2err s_decoder_on_data_i(uint32_t stream_id, struct aws_byte_cursor data, void *userdata); static struct aws_h2err s_decoder_on_end_stream(uint32_t stream_id, void *userdata); static struct aws_h2err s_decoder_on_rst_stream(uint32_t stream_id, uint32_t h2_error_code, void *userdata); static struct aws_h2err s_decoder_on_ping_ack(uint8_t opaque_data[AWS_HTTP2_PING_DATA_SIZE], void *userdata); static struct aws_h2err s_decoder_on_ping(uint8_t opaque_data[AWS_HTTP2_PING_DATA_SIZE], void *userdata); static struct aws_h2err s_decoder_on_settings( const struct aws_http2_setting *settings_array, size_t num_settings, void *userdata); static struct aws_h2err s_decoder_on_settings_ack(void *userdata); static struct aws_h2err s_decoder_on_window_update(uint32_t stream_id, uint32_t window_size_increment, void *userdata); struct aws_h2err s_decoder_on_goaway( uint32_t last_stream, uint32_t error_code, struct aws_byte_cursor debug_data, void *userdata); static void s_reset_statistics(struct aws_channel_handler *handler); static void s_gather_statistics(struct aws_channel_handler *handler, struct aws_array_list *stats); static struct aws_http_connection_vtable s_h2_connection_vtable = { .channel_handler_vtable = { .process_read_message = s_handler_process_read_message, .process_write_message = s_handler_process_write_message, .increment_read_window = s_handler_increment_read_window, .shutdown = s_handler_shutdown, .initial_window_size = s_handler_initial_window_size, .message_overhead = s_handler_message_overhead, .destroy = s_handler_destroy, .reset_statistics = s_reset_statistics, .gather_statistics = s_gather_statistics, }, .on_channel_handler_installed = s_handler_installed, .make_request = s_connection_make_request, .new_server_request_handler_stream = NULL, .stream_send_response = NULL, .close = s_connection_close, .stop_new_requests = s_connection_stop_new_request, .is_open = s_connection_is_open, .new_requests_allowed = s_connection_new_requests_allowed, .update_window = s_connection_update_window, .change_settings = s_connection_change_settings, .send_ping = s_connection_send_ping, .send_goaway = s_connection_send_goaway, .get_sent_goaway = s_connection_get_sent_goaway, .get_received_goaway = s_connection_get_received_goaway, .get_local_settings = s_connection_get_local_settings, .get_remote_settings = s_connection_get_remote_settings, }; static const struct aws_h2_decoder_vtable s_h2_decoder_vtable = { .on_headers_begin = s_decoder_on_headers_begin, .on_headers_i = s_decoder_on_headers_i, .on_headers_end = s_decoder_on_headers_end, .on_push_promise_begin = s_decoder_on_push_promise, .on_data_begin = s_decoder_on_data_begin, .on_data_i = s_decoder_on_data_i, .on_end_stream = s_decoder_on_end_stream, .on_rst_stream = s_decoder_on_rst_stream, .on_ping_ack = s_decoder_on_ping_ack, .on_ping = s_decoder_on_ping, .on_settings = s_decoder_on_settings, .on_settings_ack = s_decoder_on_settings_ack, .on_window_update = s_decoder_on_window_update, .on_goaway = s_decoder_on_goaway, }; static void s_lock_synced_data(struct aws_h2_connection *connection) { int err = aws_mutex_lock(&connection->synced_data.lock); AWS_ASSERT(!err && "lock failed"); (void)err; } static void s_unlock_synced_data(struct aws_h2_connection *connection) { int err = aws_mutex_unlock(&connection->synced_data.lock); AWS_ASSERT(!err && "unlock failed"); (void)err; } static void s_acquire_stream_and_connection_lock(struct aws_h2_stream *stream, struct aws_h2_connection *connection) { int err = aws_mutex_lock(&stream->synced_data.lock); err |= aws_mutex_lock(&connection->synced_data.lock); AWS_ASSERT(!err && "lock connection and stream failed"); (void)err; } static void s_release_stream_and_connection_lock(struct aws_h2_stream *stream, struct aws_h2_connection *connection) { int err = aws_mutex_unlock(&connection->synced_data.lock); err |= aws_mutex_unlock(&stream->synced_data.lock); AWS_ASSERT(!err && "unlock connection and stream failed"); (void)err; } static void s_add_time_measurement_to_stats(uint64_t start_ns, uint64_t end_ns, uint64_t *output_ms) { if (end_ns > start_ns) { *output_ms += aws_timestamp_convert(end_ns - start_ns, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_MILLIS, NULL); } else { *output_ms = 0; } } /** * Internal function for bringing connection to a stop. * Invoked multiple times, including when: * - Channel is shutting down in the read direction. * - Channel is shutting down in the write direction. * - An error occurs that will shutdown the channel. * - User wishes to close the connection (this is the only case where the function may run off-thread). */ static void s_stop( struct aws_h2_connection *connection, bool stop_reading, bool stop_writing, bool schedule_shutdown, int error_code) { AWS_ASSERT(stop_reading || stop_writing || schedule_shutdown); /* You are required to stop at least 1 thing */ if (stop_reading) { AWS_ASSERT(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel)); connection->thread_data.is_reading_stopped = true; } if (stop_writing) { AWS_ASSERT(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel)); connection->thread_data.is_writing_stopped = true; } /* Even if we're not scheduling shutdown just yet (ex: sent final request but waiting to read final response) * we don't consider the connection "open" anymore so user can't create more streams */ { /* BEGIN CRITICAL SECTION */ s_lock_synced_data(connection); connection->synced_data.new_stream_error_code = AWS_ERROR_HTTP_CONNECTION_CLOSED; connection->synced_data.is_open = false; s_unlock_synced_data(connection); } /* END CRITICAL SECTION */ if (schedule_shutdown) { AWS_LOGF_INFO( AWS_LS_HTTP_CONNECTION, "id=%p: Shutting down connection with error code %d (%s).", (void *)&connection->base, error_code, aws_error_name(error_code)); aws_channel_shutdown(connection->base.channel_slot->channel, error_code); } } void aws_h2_connection_shutdown_due_to_write_err(struct aws_h2_connection *connection, int error_code) { AWS_PRECONDITION(error_code); AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel)); if (connection->thread_data.channel_shutdown_waiting_for_goaway_to_be_written) { /* If shutdown is waiting for writes to complete, but writes are now broken, * then we must finish shutdown now */ s_finish_shutdown(connection); } else { s_stop(connection, false /*stop_reading*/, true /*stop_writing*/, true /*schedule_shutdown*/, error_code); } } /* Common new() logic for server & client */ static struct aws_h2_connection *s_connection_new( struct aws_allocator *alloc, bool manual_window_management, const struct aws_http2_connection_options *http2_options, bool server) { AWS_PRECONDITION(http2_options); struct aws_h2_connection *connection = aws_mem_calloc(alloc, 1, sizeof(struct aws_h2_connection)); if (!connection) { return NULL; } connection->base.vtable = &s_h2_connection_vtable; connection->base.alloc = alloc; connection->base.channel_handler.vtable = &s_h2_connection_vtable.channel_handler_vtable; connection->base.channel_handler.alloc = alloc; connection->base.channel_handler.impl = connection; connection->base.http_version = AWS_HTTP_VERSION_2; /* Init the next stream id (server must use even ids, client odd [RFC 7540 5.1.1])*/ connection->base.next_stream_id = (server ? 2 : 1); /* Stream window management */ connection->base.stream_manual_window_management = manual_window_management; /* Connection window management */ connection->conn_manual_window_management = http2_options->conn_manual_window_management; connection->on_goaway_received = http2_options->on_goaway_received; connection->on_remote_settings_change = http2_options->on_remote_settings_change; aws_channel_task_init( &connection->cross_thread_work_task, s_cross_thread_work_task, connection, "HTTP/2 cross-thread work"); aws_channel_task_init( &connection->outgoing_frames_task, s_outgoing_frames_task, connection, "HTTP/2 outgoing frames"); /* 1 refcount for user */ aws_atomic_init_int(&connection->base.refcount, 1); uint32_t max_stream_id = AWS_H2_STREAM_ID_MAX; connection->synced_data.goaway_sent_last_stream_id = max_stream_id + 1; connection->synced_data.goaway_received_last_stream_id = max_stream_id + 1; aws_linked_list_init(&connection->synced_data.pending_stream_list); aws_linked_list_init(&connection->synced_data.pending_frame_list); aws_linked_list_init(&connection->synced_data.pending_settings_list); aws_linked_list_init(&connection->synced_data.pending_ping_list); aws_linked_list_init(&connection->synced_data.pending_goaway_list); aws_linked_list_init(&connection->thread_data.outgoing_streams_list); aws_linked_list_init(&connection->thread_data.pending_settings_queue); aws_linked_list_init(&connection->thread_data.pending_ping_queue); aws_linked_list_init(&connection->thread_data.stalled_window_streams_list); aws_linked_list_init(&connection->thread_data.waiting_streams_list); aws_linked_list_init(&connection->thread_data.outgoing_frames_queue); if (aws_mutex_init(&connection->synced_data.lock)) { CONNECTION_LOGF( ERROR, connection, "Mutex init error %d (%s).", aws_last_error(), aws_error_name(aws_last_error())); goto error; } if (aws_hash_table_init( &connection->thread_data.active_streams_map, alloc, 8, aws_hash_ptr, aws_ptr_eq, NULL, NULL)) { CONNECTION_LOGF( ERROR, connection, "Hashtable init error %d (%s).", aws_last_error(), aws_error_name(aws_last_error())); goto error; } size_t max_closed_streams = AWS_HTTP2_DEFAULT_MAX_CLOSED_STREAMS; if (http2_options->max_closed_streams) { max_closed_streams = http2_options->max_closed_streams; } connection->thread_data.closed_streams = aws_cache_new_fifo(alloc, aws_hash_ptr, aws_ptr_eq, NULL, NULL, max_closed_streams); if (!connection->thread_data.closed_streams) { CONNECTION_LOGF( ERROR, connection, "FIFO cache init error %d (%s).", aws_last_error(), aws_error_name(aws_last_error())); goto error; } /* Initialize the value of settings */ memcpy(connection->thread_data.settings_peer, aws_h2_settings_initial, sizeof(aws_h2_settings_initial)); memcpy(connection->thread_data.settings_self, aws_h2_settings_initial, sizeof(aws_h2_settings_initial)); memcpy(connection->synced_data.settings_peer, aws_h2_settings_initial, sizeof(aws_h2_settings_initial)); memcpy(connection->synced_data.settings_self, aws_h2_settings_initial, sizeof(aws_h2_settings_initial)); connection->thread_data.window_size_peer = AWS_H2_INIT_WINDOW_SIZE; connection->thread_data.window_size_self = AWS_H2_INIT_WINDOW_SIZE; connection->thread_data.goaway_received_last_stream_id = AWS_H2_STREAM_ID_MAX; connection->thread_data.goaway_sent_last_stream_id = AWS_H2_STREAM_ID_MAX; aws_crt_statistics_http2_channel_init(&connection->thread_data.stats); connection->thread_data.stats.was_inactive = true; /* Start with non active streams */ connection->synced_data.is_open = true; connection->synced_data.new_stream_error_code = AWS_ERROR_SUCCESS; /* Create a new decoder */ struct aws_h2_decoder_params params = { .alloc = alloc, .vtable = &s_h2_decoder_vtable, .userdata = connection, .logging_id = connection, .is_server = server, }; connection->thread_data.decoder = aws_h2_decoder_new(¶ms); if (!connection->thread_data.decoder) { CONNECTION_LOGF( ERROR, connection, "Decoder init error %d (%s)", aws_last_error(), aws_error_name(aws_last_error())); goto error; } if (aws_h2_frame_encoder_init(&connection->thread_data.encoder, alloc, &connection->base)) { CONNECTION_LOGF( ERROR, connection, "Encoder init error %d (%s)", aws_last_error(), aws_error_name(aws_last_error())); goto error; } /* User data from connection base is not ready until the handler installed */ connection->thread_data.init_pending_settings = s_new_pending_settings( connection->base.alloc, http2_options->initial_settings_array, http2_options->num_initial_settings, http2_options->on_initial_settings_completed, NULL /* user_data is set later... */); if (!connection->thread_data.init_pending_settings) { goto error; } /* We enqueue the inital settings when handler get installed */ return connection; error: s_handler_destroy(&connection->base.channel_handler); return NULL; } struct aws_http_connection *aws_http_connection_new_http2_server( struct aws_allocator *allocator, bool manual_window_management, const struct aws_http2_connection_options *http2_options) { struct aws_h2_connection *connection = s_connection_new(allocator, manual_window_management, http2_options, true); if (!connection) { return NULL; } connection->base.server_data = &connection->base.client_or_server_data.server; return &connection->base; } struct aws_http_connection *aws_http_connection_new_http2_client( struct aws_allocator *allocator, bool manual_window_management, const struct aws_http2_connection_options *http2_options) { struct aws_h2_connection *connection = s_connection_new(allocator, manual_window_management, http2_options, false); if (!connection) { return NULL; } connection->base.client_data = &connection->base.client_or_server_data.client; return &connection->base; } static void s_handler_destroy(struct aws_channel_handler *handler) { struct aws_h2_connection *connection = handler->impl; CONNECTION_LOG(TRACE, connection, "Destroying connection"); /* No streams should be left in internal datastructures */ AWS_ASSERT( !aws_hash_table_is_valid(&connection->thread_data.active_streams_map) || aws_hash_table_get_entry_count(&connection->thread_data.active_streams_map) == 0); AWS_ASSERT(aws_linked_list_empty(&connection->thread_data.waiting_streams_list)); AWS_ASSERT(aws_linked_list_empty(&connection->thread_data.stalled_window_streams_list)); AWS_ASSERT(aws_linked_list_empty(&connection->thread_data.outgoing_streams_list)); AWS_ASSERT(aws_linked_list_empty(&connection->synced_data.pending_stream_list)); AWS_ASSERT(aws_linked_list_empty(&connection->synced_data.pending_frame_list)); AWS_ASSERT(aws_linked_list_empty(&connection->synced_data.pending_settings_list)); AWS_ASSERT(aws_linked_list_empty(&connection->synced_data.pending_ping_list)); AWS_ASSERT(aws_linked_list_empty(&connection->synced_data.pending_goaway_list)); AWS_ASSERT(aws_linked_list_empty(&connection->thread_data.pending_ping_queue)); AWS_ASSERT(aws_linked_list_empty(&connection->thread_data.pending_settings_queue)); /* Clean up any unsent frames and structures */ struct aws_linked_list *outgoing_frames_queue = &connection->thread_data.outgoing_frames_queue; while (!aws_linked_list_empty(outgoing_frames_queue)) { struct aws_linked_list_node *node = aws_linked_list_pop_front(outgoing_frames_queue); struct aws_h2_frame *frame = AWS_CONTAINER_OF(node, struct aws_h2_frame, node); aws_h2_frame_destroy(frame); } if (connection->thread_data.init_pending_settings) { /* if initial settings were never sent, we need to clear the memory here */ aws_mem_release(connection->base.alloc, connection->thread_data.init_pending_settings); } aws_h2_decoder_destroy(connection->thread_data.decoder); aws_h2_frame_encoder_clean_up(&connection->thread_data.encoder); aws_hash_table_clean_up(&connection->thread_data.active_streams_map); aws_cache_destroy(connection->thread_data.closed_streams); aws_mutex_clean_up(&connection->synced_data.lock); aws_mem_release(connection->base.alloc, connection); } static struct aws_h2_pending_settings *s_new_pending_settings( struct aws_allocator *allocator, const struct aws_http2_setting *settings_array, size_t num_settings, aws_http2_on_change_settings_complete_fn *on_completed, void *user_data) { size_t settings_storage_size = sizeof(struct aws_http2_setting) * num_settings; struct aws_h2_pending_settings *pending_settings; void *settings_storage; if (!aws_mem_acquire_many( allocator, 2, &pending_settings, sizeof(struct aws_h2_pending_settings), &settings_storage, settings_storage_size)) { return NULL; } AWS_ZERO_STRUCT(*pending_settings); /* We buffer the settings up, incase the caller has freed them when the ACK arrives */ pending_settings->settings_array = settings_storage; if (settings_array) { memcpy(pending_settings->settings_array, settings_array, num_settings * sizeof(struct aws_http2_setting)); } pending_settings->num_settings = num_settings; pending_settings->on_completed = on_completed; pending_settings->user_data = user_data; return pending_settings; } static struct aws_h2_pending_ping *s_new_pending_ping( struct aws_allocator *allocator, const struct aws_byte_cursor *optional_opaque_data, const uint64_t started_time, void *user_data, aws_http2_on_ping_complete_fn *on_completed) { struct aws_h2_pending_ping *pending_ping = aws_mem_calloc(allocator, 1, sizeof(struct aws_h2_pending_ping)); if (!pending_ping) { return NULL; } if (optional_opaque_data) { memcpy(pending_ping->opaque_data, optional_opaque_data->ptr, AWS_HTTP2_PING_DATA_SIZE); } pending_ping->started_time = started_time; pending_ping->on_completed = on_completed; pending_ping->user_data = user_data; return pending_ping; } static struct aws_h2_pending_goaway *s_new_pending_goaway( struct aws_allocator *allocator, uint32_t http2_error, bool allow_more_streams, const struct aws_byte_cursor *optional_debug_data) { struct aws_byte_cursor debug_data; AWS_ZERO_STRUCT(debug_data); if (optional_debug_data) { debug_data = *optional_debug_data; } struct aws_h2_pending_goaway *pending_goaway; void *debug_data_storage; /* mem acquire cannot fail anymore */ aws_mem_acquire_many( allocator, 2, &pending_goaway, sizeof(struct aws_h2_pending_goaway), &debug_data_storage, debug_data.len); if (debug_data.len) { memcpy(debug_data_storage, debug_data.ptr, debug_data.len); debug_data.ptr = debug_data_storage; } pending_goaway->debug_data = debug_data; pending_goaway->http2_error = http2_error; pending_goaway->allow_more_streams = allow_more_streams; return pending_goaway; } void aws_h2_connection_enqueue_outgoing_frame(struct aws_h2_connection *connection, struct aws_h2_frame *frame) { AWS_PRECONDITION(frame->type != AWS_H2_FRAME_T_DATA); AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel)); if (frame->high_priority) { /* Check from the head of the queue, and find a node with normal priority, and insert before it */ struct aws_linked_list_node *iter = aws_linked_list_begin(&connection->thread_data.outgoing_frames_queue); /* one past the last element */ const struct aws_linked_list_node *end = aws_linked_list_end(&connection->thread_data.outgoing_frames_queue); while (iter != end) { struct aws_h2_frame *frame_i = AWS_CONTAINER_OF(iter, struct aws_h2_frame, node); if (connection->thread_data.current_outgoing_frame == frame_i) { iter = iter->next; continue; } if (!frame_i->high_priority) { break; } iter = iter->next; } aws_linked_list_insert_before(iter, &frame->node); } else { aws_linked_list_push_back(&connection->thread_data.outgoing_frames_queue, &frame->node); } } static void s_on_channel_write_complete( struct aws_channel *channel, struct aws_io_message *message, int err_code, void *user_data) { (void)message; struct aws_h2_connection *connection = user_data; if (err_code) { CONNECTION_LOGF(ERROR, connection, "Message did not write to network, error %s", aws_error_name(err_code)); aws_h2_connection_shutdown_due_to_write_err(connection, err_code); return; } CONNECTION_LOG(TRACE, connection, "Message finished writing to network. Rescheduling outgoing frame task"); /* To avoid wasting memory, we only want ONE of our written aws_io_messages in the channel at a time. * Therefore, we wait until it's written to the network before trying to send another * by running the outgoing-frame-task again. * * We also want to share the network with other channels. * Therefore, when the write completes, we SCHEDULE the outgoing-frame-task * to run again instead of calling the function directly. * This way, if the message completes synchronously, * we're not hogging the network by writing message after message in a tight loop */ aws_channel_schedule_task_now(channel, &connection->outgoing_frames_task); } static void s_outgoing_frames_task(struct aws_channel_task *task, void *arg, enum aws_task_status status) { (void)task; if (status != AWS_TASK_STATUS_RUN_READY) { return; } struct aws_h2_connection *connection = arg; s_write_outgoing_frames(connection, false /*first_try*/); } static void s_write_outgoing_frames(struct aws_h2_connection *connection, bool first_try) { AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel)); AWS_PRECONDITION(connection->thread_data.is_outgoing_frames_task_active); struct aws_channel_slot *channel_slot = connection->base.channel_slot; struct aws_linked_list *outgoing_frames_queue = &connection->thread_data.outgoing_frames_queue; struct aws_linked_list *outgoing_streams_list = &connection->thread_data.outgoing_streams_list; if (connection->thread_data.is_writing_stopped) { return; } /* Determine whether there's work to do, and end task immediately if there's not. * Note that we stop writing DATA frames if the channel is trying to shut down */ bool has_control_frames = !aws_linked_list_empty(outgoing_frames_queue); bool has_data_frames = !aws_linked_list_empty(outgoing_streams_list); bool may_write_data_frames = (connection->thread_data.window_size_peer > AWS_H2_MIN_WINDOW_SIZE) && !connection->thread_data.channel_shutdown_waiting_for_goaway_to_be_written; bool will_write = has_control_frames || (has_data_frames && may_write_data_frames); if (!will_write) { if (!first_try) { CONNECTION_LOGF( TRACE, connection, "Outgoing frames task stopped. has_control_frames:%d has_data_frames:%d may_write_data_frames:%d", has_control_frames, has_data_frames, may_write_data_frames); } connection->thread_data.is_outgoing_frames_task_active = false; if (connection->thread_data.channel_shutdown_waiting_for_goaway_to_be_written) { s_finish_shutdown(connection); } return; } if (first_try) { CONNECTION_LOG(TRACE, connection, "Starting outgoing frames task"); } /* Acquire aws_io_message, that we will attempt to fill up */ struct aws_io_message *msg = aws_channel_slot_acquire_max_message_for_write(channel_slot); if (AWS_UNLIKELY(!msg)) { CONNECTION_LOG(ERROR, connection, "Failed to acquire message from pool, closing connection."); goto error; } /* Set up callback so we can send another message when this one completes */ msg->on_completion = s_on_channel_write_complete; msg->user_data = connection; CONNECTION_LOGF( TRACE, connection, "Outgoing frames task acquired message with %zu bytes available", msg->message_data.capacity - msg->message_data.len); /* Write as many frames from outgoing_frames_queue as possible. */ if (s_encode_outgoing_frames_queue(connection, &msg->message_data)) { goto error; } /* If outgoing_frames_queue emptied, and connection is running normally, * then write as many DATA frames from outgoing_streams_list as possible. */ if (aws_linked_list_empty(outgoing_frames_queue) && may_write_data_frames) { if (s_encode_data_from_outgoing_streams(connection, &msg->message_data)) { goto error; } } if (msg->message_data.len) { /* Write message to channel. * outgoing_frames_task will resume when message completes. */ CONNECTION_LOGF(TRACE, connection, "Outgoing frames task sending message of size %zu", msg->message_data.len); if (aws_channel_slot_send_message(channel_slot, msg, AWS_CHANNEL_DIR_WRITE)) { CONNECTION_LOGF( ERROR, connection, "Failed to send channel message: %s. Closing connection.", aws_error_name(aws_last_error())); goto error; } } else { /* Message is empty, warn that no work is being done and reschedule the task to try again next tick. * It's likely that body isn't ready, so body streaming function has no data to write yet. * If this scenario turns out to be common we should implement a "pause" feature. */ CONNECTION_LOG(WARN, connection, "Outgoing frames task sent no data, will try again next tick."); aws_mem_release(msg->allocator, msg); aws_channel_schedule_task_now(channel_slot->channel, &connection->outgoing_frames_task); } return; error:; int error_code = aws_last_error(); if (msg) { aws_mem_release(msg->allocator, msg); } aws_h2_connection_shutdown_due_to_write_err(connection, error_code); } /* Write as many frames from outgoing_frames_queue as possible (contains all non-DATA frames) */ static int s_encode_outgoing_frames_queue(struct aws_h2_connection *connection, struct aws_byte_buf *output) { AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel)); struct aws_linked_list *outgoing_frames_queue = &connection->thread_data.outgoing_frames_queue; /* Write as many frames from outgoing_frames_queue as possible. */ while (!aws_linked_list_empty(outgoing_frames_queue)) { struct aws_linked_list_node *frame_node = aws_linked_list_front(outgoing_frames_queue); struct aws_h2_frame *frame = AWS_CONTAINER_OF(frame_node, struct aws_h2_frame, node); connection->thread_data.current_outgoing_frame = frame; bool frame_complete; if (aws_h2_encode_frame(&connection->thread_data.encoder, frame, output, &frame_complete)) { CONNECTION_LOGF( ERROR, connection, "Error encoding frame: type=%s stream=%" PRIu32 " error=%s", aws_h2_frame_type_to_str(frame->type), frame->stream_id, aws_error_name(aws_last_error())); return AWS_OP_ERR; } if (!frame_complete) { if (output->len == 0) { /* We're in trouble if an empty message isn't big enough for this frame to do any work with */ CONNECTION_LOGF( ERROR, connection, "Message is too small for encoder. frame-type=%s stream=%" PRIu32 " available-space=%zu", aws_h2_frame_type_to_str(frame->type), frame->stream_id, output->capacity); aws_raise_error(AWS_ERROR_INVALID_STATE); return AWS_OP_ERR; } CONNECTION_LOG(TRACE, connection, "Outgoing frames task filled message, and has more frames to send later"); break; } /* Done encoding frame, pop from queue and cleanup*/ aws_linked_list_remove(frame_node); aws_h2_frame_destroy(frame); connection->thread_data.current_outgoing_frame = NULL; } return AWS_OP_SUCCESS; } /* Write as many DATA frames from outgoing_streams_list as possible. */ static int s_encode_data_from_outgoing_streams(struct aws_h2_connection *connection, struct aws_byte_buf *output) { AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel)); struct aws_linked_list *outgoing_streams_list = &connection->thread_data.outgoing_streams_list; if (aws_linked_list_empty(outgoing_streams_list)) { return AWS_OP_SUCCESS; } struct aws_linked_list *stalled_window_streams_list = &connection->thread_data.stalled_window_streams_list; struct aws_linked_list *waiting_streams_list = &connection->thread_data.waiting_streams_list; /* If a stream stalls, put it in this list until the function ends so we don't keep trying to read from it. * We put it back at the end of function. */ struct aws_linked_list stalled_streams_list; aws_linked_list_init(&stalled_streams_list); int aws_error_code = 0; /* We simply round-robin through streams, instead of using stream priority. * Respecting priority is not required (RFC-7540 5.3), so we're ignoring it for now. This also keeps use safe * from priority DOS attacks: https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-9513 */ while (!aws_linked_list_empty(outgoing_streams_list)) { if (connection->thread_data.window_size_peer <= AWS_H2_MIN_WINDOW_SIZE) { CONNECTION_LOGF( DEBUG, connection, "Peer connection's flow-control window is too small now %zu. Connection will stop sending DATA until " "WINDOW_UPDATE is received.", connection->thread_data.window_size_peer); goto done; } /* Stop looping if message is so full it's not worth the bother */ size_t space_available = output->capacity - output->len; size_t worth_trying_threshold = AWS_H2_FRAME_PREFIX_SIZE * 2; if (space_available < worth_trying_threshold) { CONNECTION_LOG(TRACE, connection, "Outgoing frames task filled message, and has more frames to send later"); goto done; } struct aws_linked_list_node *node = aws_linked_list_pop_front(outgoing_streams_list); struct aws_h2_stream *stream = AWS_CONTAINER_OF(node, struct aws_h2_stream, node); /* Ask stream to encode a data frame. * Stream may complete itself as a result of encoding its data, * in which case it will vanish from the connection's datastructures as a side-effect of this call. * But if stream has more data to send, push it back into the appropriate list. */ int data_encode_status; if (aws_h2_stream_encode_data_frame(stream, &connection->thread_data.encoder, output, &data_encode_status)) { aws_error_code = aws_last_error(); CONNECTION_LOGF( ERROR, connection, "Connection error while encoding DATA on stream %" PRIu32 ", %s", stream->base.id, aws_error_name(aws_error_code)); goto done; } /* If stream has more data, push it into the appropriate list. */ switch (data_encode_status) { case AWS_H2_DATA_ENCODE_COMPLETE: break; case AWS_H2_DATA_ENCODE_ONGOING: aws_linked_list_push_back(outgoing_streams_list, node); break; case AWS_H2_DATA_ENCODE_ONGOING_BODY_STREAM_STALLED: aws_linked_list_push_back(&stalled_streams_list, node); break; case AWS_H2_DATA_ENCODE_ONGOING_WAITING_FOR_WRITES: stream->thread_data.waiting_for_writes = true; aws_linked_list_push_back(waiting_streams_list, node); break; case AWS_H2_DATA_ENCODE_ONGOING_WINDOW_STALLED: aws_linked_list_push_back(stalled_window_streams_list, node); AWS_H2_STREAM_LOG( DEBUG, stream, "Peer stream's flow-control window is too small. Data frames on this stream will not be sent until " "WINDOW_UPDATE. "); break; default: CONNECTION_LOG(ERROR, connection, "Data encode status is invalid."); aws_error_code = AWS_ERROR_INVALID_STATE; } } done: /* Return any stalled streams to outgoing_streams_list */ while (!aws_linked_list_empty(&stalled_streams_list)) { aws_linked_list_push_back(outgoing_streams_list, aws_linked_list_pop_front(&stalled_streams_list)); } if (aws_error_code) { return aws_raise_error(aws_error_code); } if (aws_linked_list_empty(outgoing_streams_list)) { /* transition from something to write -> nothing to write */ uint64_t now_ns = 0; aws_channel_current_clock_time(connection->base.channel_slot->channel, &now_ns); s_add_time_measurement_to_stats( connection->thread_data.outgoing_timestamp_ns, now_ns, &connection->thread_data.stats.pending_outgoing_stream_ms); } return AWS_OP_SUCCESS; } /* If the outgoing-frames-task isn't scheduled, run it immediately. */ void aws_h2_try_write_outgoing_frames(struct aws_h2_connection *connection) { AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel)); if (connection->thread_data.is_outgoing_frames_task_active) { return; } connection->thread_data.is_outgoing_frames_task_active = true; s_write_outgoing_frames(connection, true /*first_try*/); } /** * Returns successfully and sets `out_stream` if stream is currently active. * Returns successfully and sets `out_stream` to NULL if the frame should be ignored. * Returns failed aws_h2err if it is a connection error to receive this frame. */ struct aws_h2err s_get_active_stream_for_incoming_frame( struct aws_h2_connection *connection, uint32_t stream_id, enum aws_h2_frame_type frame_type, struct aws_h2_stream **out_stream) { *out_stream = NULL; /* Check active streams */ struct aws_hash_element *found = NULL; const void *stream_id_key = (void *)(size_t)stream_id; aws_hash_table_find(&connection->thread_data.active_streams_map, stream_id_key, &found); if (found) { /* Found it! return */ *out_stream = found->value; return AWS_H2ERR_SUCCESS; } bool client_initiated = (stream_id % 2) == 1; bool self_initiated_stream = client_initiated && (connection->base.client_data != NULL); bool peer_initiated_stream = !self_initiated_stream; if ((self_initiated_stream && stream_id >= connection->base.next_stream_id) || (peer_initiated_stream && stream_id > connection->thread_data.latest_peer_initiated_stream_id)) { /* Illegal to receive frames for a stream in the idle state (stream doesn't exist yet) * (except server receiving HEADERS to start a stream, but that's handled elsewhere) */ CONNECTION_LOGF( ERROR, connection, "Illegal to receive %s frame on stream id=%" PRIu32 " state=IDLE", aws_h2_frame_type_to_str(frame_type), stream_id); return aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR); } if (peer_initiated_stream && stream_id > connection->thread_data.goaway_sent_last_stream_id) { /* Once GOAWAY sent, ignore frames for peer-initiated streams whose id > last-stream-id */ CONNECTION_LOGF( TRACE, connection, "Ignoring %s frame on stream id=%" PRIu32 " because GOAWAY sent with last-stream-id=%" PRIu32, aws_h2_frame_type_to_str(frame_type), stream_id, connection->thread_data.goaway_sent_last_stream_id); return AWS_H2ERR_SUCCESS; } void *cached_value = NULL; /* Stream is closed, check whether it's legal for a few more frames to trickle in */ if (aws_cache_find(connection->thread_data.closed_streams, stream_id_key, &cached_value)) { return aws_h2err_from_last_error(); } if (cached_value) { if (frame_type == AWS_H2_FRAME_T_PRIORITY) { /* If we support PRIORITY, do something here. Right now just ignore it */ return AWS_H2ERR_SUCCESS; } enum aws_h2_stream_closed_when closed_when = (enum aws_h2_stream_closed_when)(size_t)cached_value; switch (closed_when) { case AWS_H2_STREAM_CLOSED_WHEN_BOTH_SIDES_END_STREAM: /* WINDOW_UPDATE or RST_STREAM frames can be received ... for a short period after * a DATA or HEADERS frame containing an END_STREAM flag is sent. * Endpoints MUST ignore WINDOW_UPDATE or RST_STREAM frames received in this state */ if (frame_type == AWS_H2_FRAME_T_WINDOW_UPDATE || frame_type == AWS_H2_FRAME_T_RST_STREAM) { CONNECTION_LOGF( TRACE, connection, "Ignoring %s frame on stream id=%" PRIu32 " because END_STREAM flag was recently sent.", aws_h2_frame_type_to_str(frame_type), stream_id); return AWS_H2ERR_SUCCESS; } else { CONNECTION_LOGF( ERROR, connection, "Illegal to receive %s frame on stream id=%" PRIu32 " after END_STREAM has been received.", aws_h2_frame_type_to_str(frame_type), stream_id); return aws_h2err_from_h2_code(AWS_HTTP2_ERR_STREAM_CLOSED); } break; case AWS_H2_STREAM_CLOSED_WHEN_RST_STREAM_RECEIVED: /* An endpoint that receives any frame other than PRIORITY after receiving a RST_STREAM * MUST treat that as a stream error (Section 5.4.2) of type STREAM_CLOSED */ CONNECTION_LOGF( ERROR, connection, "Illegal to receive %s frame on stream id=%" PRIu32 " after RST_STREAM has been received", aws_h2_frame_type_to_str(frame_type), stream_id); struct aws_h2_frame *rst_stream = aws_h2_frame_new_rst_stream(connection->base.alloc, stream_id, AWS_HTTP2_ERR_STREAM_CLOSED); if (!rst_stream) { CONNECTION_LOGF( ERROR, connection, "Error creating RST_STREAM frame, %s", aws_error_name(aws_last_error())); return aws_h2err_from_last_error(); } aws_h2_connection_enqueue_outgoing_frame(connection, rst_stream); return AWS_H2ERR_SUCCESS; case AWS_H2_STREAM_CLOSED_WHEN_RST_STREAM_SENT: /* An endpoint MUST ignore frames that it receives on closed streams after it has sent a RST_STREAM * frame */ CONNECTION_LOGF( TRACE, connection, "Ignoring %s frame on stream id=%" PRIu32 " because RST_STREAM was recently sent.", aws_h2_frame_type_to_str(frame_type), stream_id); return AWS_H2ERR_SUCCESS; break; default: CONNECTION_LOGF( ERROR, connection, "Invalid state fo cached closed stream, stream id=%" PRIu32, stream_id); return aws_h2err_from_h2_code(AWS_HTTP2_ERR_INTERNAL_ERROR); break; } } if (frame_type == AWS_H2_FRAME_T_PRIORITY) { /* ignored if the stream has been removed from the dependency tree */ return AWS_H2ERR_SUCCESS; } /* Stream closed (purged from closed_streams, or implicitly closed when its ID was skipped) */ CONNECTION_LOGF( ERROR, connection, "Illegal to receive %s frame on stream id=%" PRIu32 ", no memory of closed stream (ID skipped, or removed from cache)", aws_h2_frame_type_to_str(frame_type), stream_id); return aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR); } /* Decoder callbacks */ struct aws_h2err s_decoder_on_headers_begin(uint32_t stream_id, void *userdata) { struct aws_h2_connection *connection = userdata; if (connection->base.server_data) { /* Server would create new request-handler stream... */ return aws_h2err_from_aws_code(AWS_ERROR_UNIMPLEMENTED); } struct aws_h2_stream *stream; struct aws_h2err err = s_get_active_stream_for_incoming_frame(connection, stream_id, AWS_H2_FRAME_T_HEADERS, &stream); if (aws_h2err_failed(err)) { return err; } if (stream) { err = aws_h2_stream_on_decoder_headers_begin(stream); if (aws_h2err_failed(err)) { return err; } } return AWS_H2ERR_SUCCESS; } struct aws_h2err s_decoder_on_headers_i( uint32_t stream_id, const struct aws_http_header *header, enum aws_http_header_name name_enum, enum aws_http_header_block block_type, void *userdata) { struct aws_h2_connection *connection = userdata; struct aws_h2_stream *stream; struct aws_h2err err = s_get_active_stream_for_incoming_frame(connection, stream_id, AWS_H2_FRAME_T_HEADERS, &stream); if (aws_h2err_failed(err)) { return err; } if (stream) { err = aws_h2_stream_on_decoder_headers_i(stream, header, name_enum, block_type); if (aws_h2err_failed(err)) { return err; } } return AWS_H2ERR_SUCCESS; } struct aws_h2err s_decoder_on_headers_end( uint32_t stream_id, bool malformed, enum aws_http_header_block block_type, void *userdata) { struct aws_h2_connection *connection = userdata; struct aws_h2_stream *stream; struct aws_h2err err = s_get_active_stream_for_incoming_frame(connection, stream_id, AWS_H2_FRAME_T_HEADERS, &stream); if (aws_h2err_failed(err)) { return err; } if (stream) { err = aws_h2_stream_on_decoder_headers_end(stream, malformed, block_type); if (aws_h2err_failed(err)) { return err; } } return AWS_H2ERR_SUCCESS; } struct aws_h2err s_decoder_on_push_promise(uint32_t stream_id, uint32_t promised_stream_id, void *userdata) { struct aws_h2_connection *connection = userdata; AWS_ASSERT(connection->base.client_data); /* decoder has already enforced this */ AWS_ASSERT(promised_stream_id % 2 == 0); /* decoder has already enforced this */ /* The identifier of a newly established stream MUST be numerically greater * than all streams that the initiating endpoint has opened or reserved (RFC-7540 5.1.1) */ if (promised_stream_id <= connection->thread_data.latest_peer_initiated_stream_id) { CONNECTION_LOGF( ERROR, connection, "Newly promised stream ID %" PRIu32 " must be higher than previously established ID %" PRIu32, promised_stream_id, connection->thread_data.latest_peer_initiated_stream_id); return aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR); } connection->thread_data.latest_peer_initiated_stream_id = promised_stream_id; /* If we ever fully support PUSH_PROMISE, this is where we'd add the * promised_stream_id to some reserved_streams datastructure */ struct aws_h2_stream *stream; struct aws_h2err err = s_get_active_stream_for_incoming_frame(connection, stream_id, AWS_H2_FRAME_T_PUSH_PROMISE, &stream); if (aws_h2err_failed(err)) { return err; } if (stream) { err = aws_h2_stream_on_decoder_push_promise(stream, promised_stream_id); if (aws_h2err_failed(err)) { return err; } } return AWS_H2ERR_SUCCESS; } static int s_connection_send_update_window(struct aws_h2_connection *connection, uint32_t window_size) { struct aws_h2_frame *connection_window_update_frame = aws_h2_frame_new_window_update(connection->base.alloc, 0, window_size); if (!connection_window_update_frame) { CONNECTION_LOGF( ERROR, connection, "WINDOW_UPDATE frame on connection failed to be sent, error %s", aws_error_name(aws_last_error())); return AWS_OP_ERR; } aws_h2_connection_enqueue_outgoing_frame(connection, connection_window_update_frame); connection->thread_data.window_size_self += window_size; return AWS_OP_SUCCESS; } struct aws_h2err s_decoder_on_data_begin( uint32_t stream_id, uint32_t payload_len, uint32_t total_padding_bytes, bool end_stream, void *userdata) { struct aws_h2_connection *connection = userdata; /* A receiver that receives a flow-controlled frame MUST always account for its contribution against the connection * flow-control window, unless the receiver treats this as a connection error */ if (aws_sub_size_checked( connection->thread_data.window_size_self, payload_len, &connection->thread_data.window_size_self)) { CONNECTION_LOGF( ERROR, connection, "DATA length %" PRIu32 " exceeds flow-control window %zu", payload_len, connection->thread_data.window_size_self); return aws_h2err_from_h2_code(AWS_HTTP2_ERR_FLOW_CONTROL_ERROR); } struct aws_h2_stream *stream; struct aws_h2err err = s_get_active_stream_for_incoming_frame(connection, stream_id, AWS_H2_FRAME_T_DATA, &stream); if (aws_h2err_failed(err)) { return err; } if (stream) { err = aws_h2_stream_on_decoder_data_begin(stream, payload_len, total_padding_bytes, end_stream); if (aws_h2err_failed(err)) { return err; } } /* Handle automatic updates of the connection flow window */ uint32_t auto_window_update; if (connection->conn_manual_window_management) { /* Automatically update the flow-window to account for padding, even though "manual window management" * is enabled. We do this because the current API doesn't have any way to inform the user about padding, * so we can't expect them to manage it themselves. */ auto_window_update = total_padding_bytes; } else { /* Automatically update the full amount we just received */ auto_window_update = payload_len; } if (auto_window_update != 0) { if (s_connection_send_update_window(connection, auto_window_update)) { return aws_h2err_from_last_error(); } CONNECTION_LOGF( TRACE, connection, "Automatically updating connection window by %" PRIu32 "(%" PRIu32 " due to padding).", auto_window_update, total_padding_bytes); } return AWS_H2ERR_SUCCESS; } struct aws_h2err s_decoder_on_data_i(uint32_t stream_id, struct aws_byte_cursor data, void *userdata) { struct aws_h2_connection *connection = userdata; /* Pass data to stream */ struct aws_h2_stream *stream; struct aws_h2err err = s_get_active_stream_for_incoming_frame(connection, stream_id, AWS_H2_FRAME_T_DATA, &stream); if (aws_h2err_failed(err)) { return err; } if (stream) { err = aws_h2_stream_on_decoder_data_i(stream, data); if (aws_h2err_failed(err)) { return err; } } return AWS_H2ERR_SUCCESS; } struct aws_h2err s_decoder_on_end_stream(uint32_t stream_id, void *userdata) { struct aws_h2_connection *connection = userdata; /* Not calling s_get_active_stream_for_incoming_frame() here because END_STREAM * isn't an actual frame type. It's a flag on DATA or HEADERS frames, and we * already checked the legality of those frames in their respective callbacks. */ struct aws_hash_element *found = NULL; aws_hash_table_find(&connection->thread_data.active_streams_map, (void *)(size_t)stream_id, &found); if (found) { struct aws_h2_stream *stream = found->value; struct aws_h2err err = aws_h2_stream_on_decoder_end_stream(stream); if (aws_h2err_failed(err)) { return err; } } return AWS_H2ERR_SUCCESS; } static struct aws_h2err s_decoder_on_rst_stream(uint32_t stream_id, uint32_t h2_error_code, void *userdata) { struct aws_h2_connection *connection = userdata; /* Pass RST_STREAM to stream */ struct aws_h2_stream *stream; struct aws_h2err err = s_get_active_stream_for_incoming_frame(connection, stream_id, AWS_H2_FRAME_T_RST_STREAM, &stream); if (aws_h2err_failed(err)) { return err; } if (stream) { err = aws_h2_stream_on_decoder_rst_stream(stream, h2_error_code); if (aws_h2err_failed(err)) { return err; } } return AWS_H2ERR_SUCCESS; } static struct aws_h2err s_decoder_on_ping_ack(uint8_t opaque_data[AWS_HTTP2_PING_DATA_SIZE], void *userdata) { struct aws_h2_connection *connection = userdata; if (aws_linked_list_empty(&connection->thread_data.pending_ping_queue)) { CONNECTION_LOG(ERROR, connection, "Received extraneous PING ACK."); return aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR); } struct aws_h2err err; struct aws_linked_list_node *node = aws_linked_list_pop_front(&connection->thread_data.pending_ping_queue); struct aws_h2_pending_ping *pending_ping = AWS_CONTAINER_OF(node, struct aws_h2_pending_ping, node); /* Check the payload */ if (!aws_array_eq(opaque_data, AWS_HTTP2_PING_DATA_SIZE, pending_ping->opaque_data, AWS_HTTP2_PING_DATA_SIZE)) { CONNECTION_LOG(ERROR, connection, "Received PING ACK with mismatched opaque-data."); err = aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR); goto error; } uint64_t time_stamp; if (aws_high_res_clock_get_ticks(&time_stamp)) { CONNECTION_LOGF( ERROR, connection, "Failed getting the time stamp when PING ACK received, error %s", aws_error_name(aws_last_error())); err = aws_h2err_from_last_error(); goto error; } uint64_t rtt; if (aws_sub_u64_checked(time_stamp, pending_ping->started_time, &rtt)) { CONNECTION_LOGF( ERROR, connection, "Overflow from time stamp when PING ACK received, error %s", aws_error_name(aws_last_error())); err = aws_h2err_from_last_error(); goto error; } CONNECTION_LOGF(TRACE, connection, "Round trip time is %lf ms, approximately", (double)rtt / 1000000); /* fire the callback */ if (pending_ping->on_completed) { pending_ping->on_completed(&connection->base, rtt, AWS_ERROR_SUCCESS, pending_ping->user_data); } aws_mem_release(connection->base.alloc, pending_ping); return AWS_H2ERR_SUCCESS; error: if (pending_ping->on_completed) { pending_ping->on_completed(&connection->base, 0 /* fake rtt */, err.aws_code, pending_ping->user_data); } aws_mem_release(connection->base.alloc, pending_ping); return err; } static struct aws_h2err s_decoder_on_ping(uint8_t opaque_data[AWS_HTTP2_PING_DATA_SIZE], void *userdata) { struct aws_h2_connection *connection = userdata; /* send a PING frame with the ACK flag set in response, with an identical payload. */ struct aws_h2_frame *ping_ack_frame = aws_h2_frame_new_ping(connection->base.alloc, true, opaque_data); if (!ping_ack_frame) { CONNECTION_LOGF( ERROR, connection, "Ping ACK frame failed to be sent, error %s", aws_error_name(aws_last_error())); return aws_h2err_from_last_error(); } aws_h2_connection_enqueue_outgoing_frame(connection, ping_ack_frame); return AWS_H2ERR_SUCCESS; } static struct aws_h2err s_decoder_on_settings( const struct aws_http2_setting *settings_array, size_t num_settings, void *userdata) { struct aws_h2_connection *connection = userdata; struct aws_h2err err; /* Once all values have been processed, the recipient MUST immediately emit a SETTINGS frame with the ACK flag * set.(RFC-7540 6.5.3) */ CONNECTION_LOG(TRACE, connection, "Setting frame processing ends"); struct aws_h2_frame *settings_ack_frame = aws_h2_frame_new_settings(connection->base.alloc, NULL, 0, true); if (!settings_ack_frame) { CONNECTION_LOGF( ERROR, connection, "Settings ACK frame failed to be sent, error %s", aws_error_name(aws_last_error())); return aws_h2err_from_last_error(); } aws_h2_connection_enqueue_outgoing_frame(connection, settings_ack_frame); /* Allocate a block of memory for settings_array in callback, which will only includes the settings we changed, * freed once the callback finished */ struct aws_http2_setting *callback_array = NULL; if (num_settings) { callback_array = aws_mem_acquire(connection->base.alloc, num_settings * sizeof(struct aws_http2_setting)); if (!callback_array) { return aws_h2err_from_last_error(); } } size_t callback_array_num = 0; /* Apply the change to encoder and connection */ struct aws_h2_frame_encoder *encoder = &connection->thread_data.encoder; for (size_t i = 0; i < num_settings; i++) { if (connection->thread_data.settings_peer[settings_array[i].id] == settings_array[i].value) { /* No change, don't do any work */ continue; } switch (settings_array[i].id) { case AWS_HTTP2_SETTINGS_HEADER_TABLE_SIZE: { aws_h2_frame_encoder_set_setting_header_table_size(encoder, settings_array[i].value); } break; case AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE: { /* When the value of SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST adjust the size of all stream * flow-control windows that it maintains by the difference between the new value and the old value. */ int32_t size_changed = settings_array[i].value - connection->thread_data.settings_peer[settings_array[i].id]; struct aws_hash_iter stream_iter = aws_hash_iter_begin(&connection->thread_data.active_streams_map); while (!aws_hash_iter_done(&stream_iter)) { struct aws_h2_stream *stream = stream_iter.element.value; aws_hash_iter_next(&stream_iter); err = aws_h2_stream_window_size_change(stream, size_changed, false /*self*/); if (aws_h2err_failed(err)) { CONNECTION_LOG( ERROR, connection, "Connection error, change to SETTINGS_INITIAL_WINDOW_SIZE caused a stream's flow-control " "window to exceed the maximum size"); goto error; } } } break; case AWS_HTTP2_SETTINGS_MAX_FRAME_SIZE: { aws_h2_frame_encoder_set_setting_max_frame_size(encoder, settings_array[i].value); } break; default: break; } connection->thread_data.settings_peer[settings_array[i].id] = settings_array[i].value; callback_array[callback_array_num++] = settings_array[i]; } if (connection->on_remote_settings_change) { connection->on_remote_settings_change( &connection->base, callback_array, callback_array_num, connection->base.user_data); } { /* BEGIN CRITICAL SECTION */ s_lock_synced_data(connection); memcpy( connection->synced_data.settings_peer, connection->thread_data.settings_peer, sizeof(connection->thread_data.settings_peer)); s_unlock_synced_data(connection); } /* END CRITICAL SECTION */ aws_mem_release(connection->base.alloc, callback_array); return AWS_H2ERR_SUCCESS; error: aws_mem_release(connection->base.alloc, callback_array); return err; } static struct aws_h2err s_decoder_on_settings_ack(void *userdata) { struct aws_h2_connection *connection = userdata; if (aws_linked_list_empty(&connection->thread_data.pending_settings_queue)) { CONNECTION_LOG(ERROR, connection, "Received a malicious extra SETTINGS acknowledgment"); return aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR); } struct aws_h2err err; struct aws_h2_pending_settings *pending_settings = NULL; struct aws_linked_list_node *node = aws_linked_list_pop_front(&connection->thread_data.pending_settings_queue); pending_settings = AWS_CONTAINER_OF(node, struct aws_h2_pending_settings, node); struct aws_http2_setting *settings_array = pending_settings->settings_array; /* Apply the settings */ struct aws_h2_decoder *decoder = connection->thread_data.decoder; for (size_t i = 0; i < pending_settings->num_settings; i++) { if (connection->thread_data.settings_self[settings_array[i].id] == settings_array[i].value) { /* No change, don't do any work */ continue; } switch (settings_array[i].id) { case AWS_HTTP2_SETTINGS_HEADER_TABLE_SIZE: { aws_h2_decoder_set_setting_header_table_size(decoder, settings_array[i].value); } break; case AWS_HTTP2_SETTINGS_ENABLE_PUSH: { aws_h2_decoder_set_setting_enable_push(decoder, settings_array[i].value); } break; case AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE: { /* When the value of SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST adjust the size of all stream * flow-control windows that it maintains by the difference between the new value and the old value. */ int32_t size_changed = settings_array[i].value - connection->thread_data.settings_self[settings_array[i].id]; struct aws_hash_iter stream_iter = aws_hash_iter_begin(&connection->thread_data.active_streams_map); while (!aws_hash_iter_done(&stream_iter)) { struct aws_h2_stream *stream = stream_iter.element.value; aws_hash_iter_next(&stream_iter); err = aws_h2_stream_window_size_change(stream, size_changed, true /*self*/); if (aws_h2err_failed(err)) { CONNECTION_LOG( ERROR, connection, "Connection error, change to SETTINGS_INITIAL_WINDOW_SIZE from internal caused a stream's " "flow-control window to exceed the maximum size"); goto error; } } } break; case AWS_HTTP2_SETTINGS_MAX_FRAME_SIZE: { aws_h2_decoder_set_setting_max_frame_size(decoder, settings_array[i].value); } break; default: break; } connection->thread_data.settings_self[settings_array[i].id] = settings_array[i].value; } /* invoke the change settings completed user callback */ if (pending_settings->on_completed) { pending_settings->on_completed(&connection->base, AWS_ERROR_SUCCESS, pending_settings->user_data); } { /* BEGIN CRITICAL SECTION */ s_lock_synced_data(connection); memcpy( connection->synced_data.settings_self, connection->thread_data.settings_self, sizeof(connection->thread_data.settings_self)); s_unlock_synced_data(connection); } /* END CRITICAL SECTION */ /* clean up the pending_settings */ aws_mem_release(connection->base.alloc, pending_settings); return AWS_H2ERR_SUCCESS; error: /* invoke the user callback with error code */ if (pending_settings->on_completed) { pending_settings->on_completed(&connection->base, err.aws_code, pending_settings->user_data); } /* clean up the pending settings here */ aws_mem_release(connection->base.alloc, pending_settings); return err; } static struct aws_h2err s_decoder_on_window_update(uint32_t stream_id, uint32_t window_size_increment, void *userdata) { struct aws_h2_connection *connection = userdata; if (stream_id == 0) { /* Let's update the connection flow-control window size */ if (window_size_increment == 0) { /* flow-control window increment of 0 MUST be treated as error (RFC7540 6.9.1) */ CONNECTION_LOG(ERROR, connection, "Window update frame with 0 increment size"); return aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR); } if (connection->thread_data.window_size_peer + window_size_increment > AWS_H2_WINDOW_UPDATE_MAX) { /* We MUST NOT allow a flow-control window to exceed the max */ CONNECTION_LOG( ERROR, connection, "Window update frame causes the connection flow-control window exceeding the maximum size"); return aws_h2err_from_h2_code(AWS_HTTP2_ERR_FLOW_CONTROL_ERROR); } if (connection->thread_data.window_size_peer <= AWS_H2_MIN_WINDOW_SIZE) { CONNECTION_LOGF( DEBUG, connection, "Peer connection's flow-control window is resumed from too small to %" PRIu32 ". Connection will resume sending DATA.", window_size_increment); } connection->thread_data.window_size_peer += window_size_increment; return AWS_H2ERR_SUCCESS; } else { /* Update the flow-control window size for stream */ struct aws_h2_stream *stream; bool window_resume; struct aws_h2err err = s_get_active_stream_for_incoming_frame(connection, stream_id, AWS_H2_FRAME_T_WINDOW_UPDATE, &stream); if (aws_h2err_failed(err)) { return err; } if (stream) { err = aws_h2_stream_on_decoder_window_update(stream, window_size_increment, &window_resume); if (aws_h2err_failed(err)) { return err; } if (window_resume) { /* Set the stream free from stalled list */ AWS_H2_STREAM_LOGF( DEBUG, stream, "Peer stream's flow-control window is resumed from 0 or negative to %" PRIu32 " Stream will resume sending data.", stream->thread_data.window_size_peer); aws_linked_list_remove(&stream->node); aws_linked_list_push_back(&connection->thread_data.outgoing_streams_list, &stream->node); } } } return AWS_H2ERR_SUCCESS; } struct aws_h2err s_decoder_on_goaway( uint32_t last_stream, uint32_t error_code, struct aws_byte_cursor debug_data, void *userdata) { struct aws_h2_connection *connection = userdata; if (last_stream > connection->thread_data.goaway_received_last_stream_id) { CONNECTION_LOGF( ERROR, connection, "Received GOAWAY with invalid last-stream-id=%" PRIu32 ", must not exceed previous last-stream-id=%" PRIu32, last_stream, connection->thread_data.goaway_received_last_stream_id); return aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR); } /* stop sending any new stream and making new request */ { /* BEGIN CRITICAL SECTION */ s_lock_synced_data(connection); connection->synced_data.new_stream_error_code = AWS_ERROR_HTTP_GOAWAY_RECEIVED; connection->synced_data.goaway_received_last_stream_id = last_stream; connection->synced_data.goaway_received_http2_error_code = error_code; s_unlock_synced_data(connection); } /* END CRITICAL SECTION */ connection->thread_data.goaway_received_last_stream_id = last_stream; CONNECTION_LOGF( DEBUG, connection, "Received GOAWAY error-code=%s(0x%x) last-stream-id=%" PRIu32, aws_http2_error_code_to_str(error_code), error_code, last_stream); /* Complete activated streams whose id is higher than last_stream, since they will not process by peer. We should * treat them as they had never been created at all. * This would be more efficient if we could iterate streams in reverse-id order */ struct aws_hash_iter stream_iter = aws_hash_iter_begin(&connection->thread_data.active_streams_map); while (!aws_hash_iter_done(&stream_iter)) { struct aws_h2_stream *stream = stream_iter.element.value; aws_hash_iter_next(&stream_iter); if (stream->base.id > last_stream) { AWS_H2_STREAM_LOG( DEBUG, stream, "stream ID is higher than GOAWAY last stream ID, please retry this stream on a new connection."); s_stream_complete(connection, stream, AWS_ERROR_HTTP_GOAWAY_RECEIVED); } } if (connection->on_goaway_received) { /* Inform user about goaway received and the error code. */ connection->on_goaway_received( &connection->base, last_stream, error_code, debug_data, connection->base.user_data); } return AWS_H2ERR_SUCCESS; } /* End decoder callbacks */ static int s_send_connection_preface_client_string(struct aws_h2_connection *connection) { /* Just send the magic string on its own aws_io_message. */ struct aws_io_message *msg = aws_channel_acquire_message_from_pool( connection->base.channel_slot->channel, AWS_IO_MESSAGE_APPLICATION_DATA, aws_h2_connection_preface_client_string.len); if (!msg) { goto error; } if (!aws_byte_buf_write_from_whole_cursor(&msg->message_data, aws_h2_connection_preface_client_string)) { aws_raise_error(AWS_ERROR_INVALID_STATE); goto error; } if (aws_channel_slot_send_message(connection->base.channel_slot, msg, AWS_CHANNEL_DIR_WRITE)) { goto error; } return AWS_OP_SUCCESS; error: if (msg) { aws_mem_release(msg->allocator, msg); } return AWS_OP_ERR; } static void s_handler_installed(struct aws_channel_handler *handler, struct aws_channel_slot *slot) { AWS_PRECONDITION(aws_channel_thread_is_callers_thread(slot->channel)); struct aws_h2_connection *connection = handler->impl; connection->base.channel_slot = slot; /* Acquire a hold on the channel to prevent its destruction until the user has * given the go-ahead via aws_http_connection_release() */ aws_channel_acquire_hold(slot->channel); /* Send HTTP/2 connection preface (RFC-7540 3.5) * - clients must send magic string * - both client and server must send SETTINGS frame */ if (connection->base.client_data) { if (s_send_connection_preface_client_string(connection)) { CONNECTION_LOGF( ERROR, connection, "Failed to send client connection preface string, %s", aws_error_name(aws_last_error())); goto error; } } struct aws_h2_pending_settings *init_pending_settings = connection->thread_data.init_pending_settings; aws_linked_list_push_back(&connection->thread_data.pending_settings_queue, &init_pending_settings->node); connection->thread_data.init_pending_settings = NULL; /* Set user_data here, the user_data is valid now */ init_pending_settings->user_data = connection->base.user_data; struct aws_h2_frame *init_settings_frame = aws_h2_frame_new_settings( connection->base.alloc, init_pending_settings->settings_array, init_pending_settings->num_settings, false /*ACK*/); if (!init_settings_frame) { CONNECTION_LOGF( ERROR, connection, "Failed to create the initial settings frame, error %s", aws_error_name(aws_last_error())); aws_mem_release(connection->base.alloc, init_pending_settings); goto error; } /* enqueue the initial settings frame here */ aws_linked_list_push_back(&connection->thread_data.outgoing_frames_queue, &init_settings_frame->node); /* If not manual connection window management, update the connection window to max. */ if (!connection->conn_manual_window_management) { uint32_t initial_window_update_size = AWS_H2_WINDOW_UPDATE_MAX - AWS_H2_INIT_WINDOW_SIZE; struct aws_h2_frame *connection_window_update_frame = aws_h2_frame_new_window_update(connection->base.alloc, 0 /* stream_id */, initial_window_update_size); AWS_ASSERT(connection_window_update_frame); /* enqueue the windows update frame here */ aws_linked_list_push_back( &connection->thread_data.outgoing_frames_queue, &connection_window_update_frame->node); connection->thread_data.window_size_self += initial_window_update_size; } aws_h2_try_write_outgoing_frames(connection); return; error: aws_h2_connection_shutdown_due_to_write_err(connection, aws_last_error()); } static void s_stream_complete(struct aws_h2_connection *connection, struct aws_h2_stream *stream, int error_code) { AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel)); /* Nice logging */ if (error_code) { AWS_H2_STREAM_LOGF( ERROR, stream, "Stream completed with error %d (%s).", error_code, aws_error_name(error_code)); } else if (stream->base.client_data) { int status = stream->base.client_data->response_status; AWS_H2_STREAM_LOGF( DEBUG, stream, "Client stream complete, response status %d (%s)", status, aws_http_status_text(status)); } else { AWS_H2_STREAM_LOG(DEBUG, stream, "Server stream complete"); } /* Remove stream from active_streams_map and outgoing_stream_list (if it was in them at all) */ aws_hash_table_remove(&connection->thread_data.active_streams_map, (void *)(size_t)stream->base.id, NULL, NULL); if (stream->node.next) { aws_linked_list_remove(&stream->node); } if (aws_hash_table_get_entry_count(&connection->thread_data.active_streams_map) == 0 && connection->thread_data.incoming_timestamp_ns != 0) { uint64_t now_ns = 0; aws_channel_current_clock_time(connection->base.channel_slot->channel, &now_ns); /* transition from something to read -> nothing to read and nothing to write */ s_add_time_measurement_to_stats( connection->thread_data.incoming_timestamp_ns, now_ns, &connection->thread_data.stats.pending_incoming_stream_ms); connection->thread_data.stats.was_inactive = true; connection->thread_data.incoming_timestamp_ns = 0; } aws_h2_stream_complete(stream, error_code); /* release connection's hold on stream */ aws_http_stream_release(&stream->base); } int aws_h2_connection_on_stream_closed( struct aws_h2_connection *connection, struct aws_h2_stream *stream, enum aws_h2_stream_closed_when closed_when, int aws_error_code) { AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel)); AWS_PRECONDITION(stream->thread_data.state == AWS_H2_STREAM_STATE_CLOSED); AWS_PRECONDITION(stream->base.id != 0); uint32_t stream_id = stream->base.id; /* Mark stream complete. This removes the stream from any "active" datastructures, * invokes its completion callback, and releases its refcount. */ s_stream_complete(connection, stream, aws_error_code); stream = NULL; /* Reference released, do not touch again */ if (s_record_closed_stream(connection, stream_id, closed_when)) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } static int s_record_closed_stream( struct aws_h2_connection *connection, uint32_t stream_id, enum aws_h2_stream_closed_when closed_when) { AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel)); if (aws_cache_put(connection->thread_data.closed_streams, (void *)(size_t)stream_id, (void *)(size_t)closed_when)) { CONNECTION_LOG(ERROR, connection, "Failed inserting ID into cache of recently closed streams"); return AWS_OP_ERR; } return AWS_OP_SUCCESS; } int aws_h2_connection_send_rst_and_close_reserved_stream( struct aws_h2_connection *connection, uint32_t stream_id, uint32_t h2_error_code) { AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel)); struct aws_h2_frame *rst_stream = aws_h2_frame_new_rst_stream(connection->base.alloc, stream_id, h2_error_code); if (!rst_stream) { CONNECTION_LOGF(ERROR, connection, "Error creating RST_STREAM frame, %s", aws_error_name(aws_last_error())); return AWS_OP_ERR; } aws_h2_connection_enqueue_outgoing_frame(connection, rst_stream); /* If we ever fully support PUSH_PROMISE, this is where we'd remove the * promised_stream_id from some reserved_streams datastructure */ return s_record_closed_stream(connection, stream_id, AWS_H2_STREAM_CLOSED_WHEN_RST_STREAM_SENT); } /* Move stream into "active" datastructures and notify stream that it can send frames now */ static void s_move_stream_to_thread( struct aws_h2_connection *connection, struct aws_h2_stream *stream, int new_stream_error_code) { AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel)); if (new_stream_error_code) { aws_raise_error(new_stream_error_code); AWS_H2_STREAM_LOGF( ERROR, stream, "Failed activating stream, error %d (%s)", aws_last_error(), aws_error_name(aws_last_error())); goto error; } uint32_t max_concurrent_streams = connection->thread_data.settings_peer[AWS_HTTP2_SETTINGS_MAX_CONCURRENT_STREAMS]; if (aws_hash_table_get_entry_count(&connection->thread_data.active_streams_map) >= max_concurrent_streams) { AWS_H2_STREAM_LOG(ERROR, stream, "Failed activating stream, max concurrent streams are reached"); aws_raise_error(AWS_ERROR_HTTP_MAX_CONCURRENT_STREAMS_EXCEEDED); goto error; } if (aws_hash_table_put( &connection->thread_data.active_streams_map, (void *)(size_t)stream->base.id, stream, NULL)) { AWS_H2_STREAM_LOG(ERROR, stream, "Failed inserting stream into map"); goto error; } enum aws_h2_stream_body_state body_state = AWS_H2_STREAM_BODY_STATE_NONE; if (aws_h2_stream_on_activated(stream, &body_state)) { goto error; } if (aws_hash_table_get_entry_count(&connection->thread_data.active_streams_map) == 1) { /* transition from nothing to read -> something to read */ uint64_t now_ns = 0; aws_channel_current_clock_time(connection->base.channel_slot->channel, &now_ns); connection->thread_data.incoming_timestamp_ns = now_ns; } switch (body_state) { case AWS_H2_STREAM_BODY_STATE_WAITING_WRITES: aws_linked_list_push_back(&connection->thread_data.waiting_streams_list, &stream->node); break; case AWS_H2_STREAM_BODY_STATE_ONGOING: aws_linked_list_push_back(&connection->thread_data.outgoing_streams_list, &stream->node); break; default: break; } return; error: /* If the stream got into any datastructures, s_stream_complete() will remove it */ s_stream_complete(connection, stream, aws_last_error()); } /* Perform on-thread work that is triggered by calls to the connection/stream API */ static void s_cross_thread_work_task(struct aws_channel_task *task, void *arg, enum aws_task_status status) { (void)task; if (status != AWS_TASK_STATUS_RUN_READY) { return; } struct aws_h2_connection *connection = arg; struct aws_linked_list pending_frames; aws_linked_list_init(&pending_frames); struct aws_linked_list pending_streams; aws_linked_list_init(&pending_streams); struct aws_linked_list pending_settings; aws_linked_list_init(&pending_settings); struct aws_linked_list pending_ping; aws_linked_list_init(&pending_ping); struct aws_linked_list pending_goaway; aws_linked_list_init(&pending_goaway); size_t window_update_size; int new_stream_error_code; { /* BEGIN CRITICAL SECTION */ s_lock_synced_data(connection); connection->synced_data.is_cross_thread_work_task_scheduled = false; aws_linked_list_swap_contents(&connection->synced_data.pending_frame_list, &pending_frames); aws_linked_list_swap_contents(&connection->synced_data.pending_stream_list, &pending_streams); aws_linked_list_swap_contents(&connection->synced_data.pending_settings_list, &pending_settings); aws_linked_list_swap_contents(&connection->synced_data.pending_ping_list, &pending_ping); aws_linked_list_swap_contents(&connection->synced_data.pending_goaway_list, &pending_goaway); window_update_size = connection->synced_data.window_update_size; connection->synced_data.window_update_size = 0; new_stream_error_code = connection->synced_data.new_stream_error_code; s_unlock_synced_data(connection); } /* END CRITICAL SECTION */ /* Enqueue new pending control frames */ while (!aws_linked_list_empty(&pending_frames)) { struct aws_linked_list_node *node = aws_linked_list_pop_front(&pending_frames); struct aws_h2_frame *frame = AWS_CONTAINER_OF(node, struct aws_h2_frame, node); aws_h2_connection_enqueue_outgoing_frame(connection, frame); } /* We already enqueued the window_update frame, just apply the change and let our peer check this value, no matter * overflow happens or not. Peer will detect it for us. */ connection->thread_data.window_size_self = aws_add_size_saturating(connection->thread_data.window_size_self, window_update_size); /* Process new pending_streams */ while (!aws_linked_list_empty(&pending_streams)) { struct aws_linked_list_node *node = aws_linked_list_pop_front(&pending_streams); struct aws_h2_stream *stream = AWS_CONTAINER_OF(node, struct aws_h2_stream, node); s_move_stream_to_thread(connection, stream, new_stream_error_code); } /* Move pending settings to thread data */ while (!aws_linked_list_empty(&pending_settings)) { aws_linked_list_push_back( &connection->thread_data.pending_settings_queue, aws_linked_list_pop_front(&pending_settings)); } /* Move pending PING to thread data */ while (!aws_linked_list_empty(&pending_ping)) { aws_linked_list_push_back( &connection->thread_data.pending_ping_queue, aws_linked_list_pop_front(&pending_ping)); } /* Send user requested goaways */ while (!aws_linked_list_empty(&pending_goaway)) { struct aws_linked_list_node *node = aws_linked_list_pop_front(&pending_goaway); struct aws_h2_pending_goaway *goaway = AWS_CONTAINER_OF(node, struct aws_h2_pending_goaway, node); s_send_goaway(connection, goaway->http2_error, goaway->allow_more_streams, &goaway->debug_data); aws_mem_release(connection->base.alloc, goaway); } /* It's likely that frames were queued while processing cross-thread work. * If so, try writing them now */ aws_h2_try_write_outgoing_frames(connection); } int aws_h2_stream_activate(struct aws_http_stream *stream) { struct aws_h2_stream *h2_stream = AWS_CONTAINER_OF(stream, struct aws_h2_stream, base); struct aws_http_connection *base_connection = stream->owning_connection; struct aws_h2_connection *connection = AWS_CONTAINER_OF(base_connection, struct aws_h2_connection, base); int err; bool was_cross_thread_work_scheduled = false; { /* BEGIN CRITICAL SECTION */ s_acquire_stream_and_connection_lock(h2_stream, connection); if (stream->id) { /* stream has already been activated. */ s_release_stream_and_connection_lock(h2_stream, connection); return AWS_OP_SUCCESS; } err = connection->synced_data.new_stream_error_code; if (err) { s_release_stream_and_connection_lock(h2_stream, connection); goto error; } stream->id = aws_http_connection_get_next_stream_id(base_connection); if (stream->id) { /* success */ was_cross_thread_work_scheduled = connection->synced_data.is_cross_thread_work_task_scheduled; connection->synced_data.is_cross_thread_work_task_scheduled = true; aws_linked_list_push_back(&connection->synced_data.pending_stream_list, &h2_stream->node); h2_stream->synced_data.api_state = AWS_H2_STREAM_API_STATE_ACTIVE; } s_release_stream_and_connection_lock(h2_stream, connection); } /* END CRITICAL SECTION */ if (!stream->id) { /* aws_http_connection_get_next_stream_id() raises its own error. */ return AWS_OP_ERR; } /* connection keeps activated stream alive until stream completes */ aws_atomic_fetch_add(&stream->refcount, 1); stream->metrics.stream_id = stream->id; if (!was_cross_thread_work_scheduled) { CONNECTION_LOG(TRACE, connection, "Scheduling cross-thread work task"); aws_channel_schedule_task_now(connection->base.channel_slot->channel, &connection->cross_thread_work_task); } return AWS_OP_SUCCESS; error: CONNECTION_LOGF( ERROR, connection, "Failed to activate the stream id=%p, new streams are not allowed now. error %d (%s)", (void *)stream, err, aws_error_name(err)); return aws_raise_error(err); } static struct aws_http_stream *s_connection_make_request( struct aws_http_connection *client_connection, const struct aws_http_make_request_options *options) { struct aws_h2_connection *connection = AWS_CONTAINER_OF(client_connection, struct aws_h2_connection, base); /* #TODO: http/2-ify the request (ex: add ":method" header). Should we mutate a copy or the original? Validate? * Or just pass pointer to headers struct and let encoder transform it while encoding? */ struct aws_h2_stream *stream = aws_h2_stream_new_request(client_connection, options); if (!stream) { CONNECTION_LOGF( ERROR, connection, "Failed to create stream, error %d (%s)", aws_last_error(), aws_error_name(aws_last_error())); return NULL; } int new_stream_error_code; { /* BEGIN CRITICAL SECTION */ s_lock_synced_data(connection); new_stream_error_code = connection->synced_data.new_stream_error_code; s_unlock_synced_data(connection); } /* END CRITICAL SECTION */ if (new_stream_error_code) { aws_raise_error(new_stream_error_code); CONNECTION_LOGF( ERROR, connection, "Cannot create request stream, error %d (%s)", aws_last_error(), aws_error_name(aws_last_error())); goto error; } AWS_H2_STREAM_LOG(DEBUG, stream, "Created HTTP/2 request stream"); /* #TODO: print method & path */ return &stream->base; error: /* Force destruction of the stream, avoiding ref counting */ stream->base.vtable->destroy(&stream->base); return NULL; } static void s_connection_close(struct aws_http_connection *connection_base) { struct aws_h2_connection *connection = AWS_CONTAINER_OF(connection_base, struct aws_h2_connection, base); /* Don't stop reading/writing immediately, let that happen naturally during the channel shutdown process. */ s_stop(connection, false /*stop_reading*/, false /*stop_writing*/, true /*schedule_shutdown*/, AWS_ERROR_SUCCESS); } static void s_connection_stop_new_request(struct aws_http_connection *connection_base) { struct aws_h2_connection *connection = AWS_CONTAINER_OF(connection_base, struct aws_h2_connection, base); { /* BEGIN CRITICAL SECTION */ s_lock_synced_data(connection); if (!connection->synced_data.new_stream_error_code) { connection->synced_data.new_stream_error_code = AWS_ERROR_HTTP_CONNECTION_CLOSED; } s_unlock_synced_data(connection); } /* END CRITICAL SECTION */ } static bool s_connection_is_open(const struct aws_http_connection *connection_base) { struct aws_h2_connection *connection = AWS_CONTAINER_OF(connection_base, struct aws_h2_connection, base); bool is_open; { /* BEGIN CRITICAL SECTION */ s_lock_synced_data(connection); is_open = connection->synced_data.is_open; s_unlock_synced_data(connection); } /* END CRITICAL SECTION */ return is_open; } static bool s_connection_new_requests_allowed(const struct aws_http_connection *connection_base) { struct aws_h2_connection *connection = AWS_CONTAINER_OF(connection_base, struct aws_h2_connection, base); int new_stream_error_code; { /* BEGIN CRITICAL SECTION */ s_lock_synced_data(connection); new_stream_error_code = connection->synced_data.new_stream_error_code; s_unlock_synced_data(connection); } /* END CRITICAL SECTION */ return new_stream_error_code == 0; } static void s_connection_update_window(struct aws_http_connection *connection_base, uint32_t increment_size) { struct aws_h2_connection *connection = AWS_CONTAINER_OF(connection_base, struct aws_h2_connection, base); if (!increment_size) { /* Silently do nothing. */ return; } if (!connection->conn_manual_window_management) { /* auto-mode, manual update window is not supported, silently do nothing with warning log. */ CONNECTION_LOG( DEBUG, connection, "Connection manual window management is off, update window operations are not supported."); return; } struct aws_h2_frame *connection_window_update_frame = aws_h2_frame_new_window_update(connection->base.alloc, 0, increment_size); if (!connection_window_update_frame) { CONNECTION_LOGF( ERROR, connection, "Failed to create WINDOW_UPDATE frame on connection, error %s", aws_error_name(aws_last_error())); /* OOM should result in a crash. And the increment size is too huge is the only other failure case, which will * result in overflow. */ goto overflow; } int err = 0; bool cross_thread_work_should_schedule = false; bool connection_open = false; size_t sum_size = 0; { /* BEGIN CRITICAL SECTION */ s_lock_synced_data(connection); err |= aws_add_size_checked(connection->synced_data.window_update_size, increment_size, &sum_size); err |= sum_size > AWS_H2_WINDOW_UPDATE_MAX; connection_open = connection->synced_data.is_open; if (!err && connection_open) { cross_thread_work_should_schedule = !connection->synced_data.is_cross_thread_work_task_scheduled; connection->synced_data.is_cross_thread_work_task_scheduled = true; aws_linked_list_push_back( &connection->synced_data.pending_frame_list, &connection_window_update_frame->node); connection->synced_data.window_update_size = sum_size; } s_unlock_synced_data(connection); } /* END CRITICAL SECTION */ if (err) { CONNECTION_LOG( ERROR, connection, "The connection's flow-control windows has been incremented beyond 2**31 -1, the max for HTTP/2. The "); aws_h2_frame_destroy(connection_window_update_frame); goto overflow; } if (cross_thread_work_should_schedule) { CONNECTION_LOG(TRACE, connection, "Scheduling cross-thread work task"); aws_channel_schedule_task_now(connection->base.channel_slot->channel, &connection->cross_thread_work_task); } if (!connection_open) { /* connection already closed, just do nothing */ aws_h2_frame_destroy(connection_window_update_frame); return; } CONNECTION_LOGF( TRACE, connection, "User requested to update the HTTP/2 connection's flow-control windows by %" PRIu32 ".", increment_size); return; overflow: /* Shutdown the connection as overflow detected */ s_stop( connection, false /*stop_reading*/, false /*stop_writing*/, true /*schedule_shutdown*/, AWS_ERROR_OVERFLOW_DETECTED); } static int s_connection_change_settings( struct aws_http_connection *connection_base, const struct aws_http2_setting *settings_array, size_t num_settings, aws_http2_on_change_settings_complete_fn *on_completed, void *user_data) { struct aws_h2_connection *connection = AWS_CONTAINER_OF(connection_base, struct aws_h2_connection, base); if (!settings_array && num_settings) { CONNECTION_LOG(ERROR, connection, "Settings_array is NULL and num_settings is not zero."); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } struct aws_h2_pending_settings *pending_settings = s_new_pending_settings(connection->base.alloc, settings_array, num_settings, on_completed, user_data); if (!pending_settings) { return AWS_OP_ERR; } struct aws_h2_frame *settings_frame = aws_h2_frame_new_settings(connection->base.alloc, settings_array, num_settings, false /*ACK*/); if (!settings_frame) { CONNECTION_LOGF( ERROR, connection, "Failed to create settings frame, error %s", aws_error_name(aws_last_error())); aws_mem_release(connection->base.alloc, pending_settings); return AWS_OP_ERR; } bool was_cross_thread_work_scheduled = false; bool connection_open; { /* BEGIN CRITICAL SECTION */ s_lock_synced_data(connection); connection_open = connection->synced_data.is_open; if (!connection_open) { s_unlock_synced_data(connection); goto closed; } was_cross_thread_work_scheduled = connection->synced_data.is_cross_thread_work_task_scheduled; connection->synced_data.is_cross_thread_work_task_scheduled = true; aws_linked_list_push_back(&connection->synced_data.pending_frame_list, &settings_frame->node); aws_linked_list_push_back(&connection->synced_data.pending_settings_list, &pending_settings->node); s_unlock_synced_data(connection); } /* END CRITICAL SECTION */ if (!was_cross_thread_work_scheduled) { CONNECTION_LOG(TRACE, connection, "Scheduling cross-thread work task"); aws_channel_schedule_task_now(connection->base.channel_slot->channel, &connection->cross_thread_work_task); } return AWS_OP_SUCCESS; closed: CONNECTION_LOG(ERROR, connection, "Failed to change settings, connection is closed or closing."); aws_h2_frame_destroy(settings_frame); aws_mem_release(connection->base.alloc, pending_settings); return aws_raise_error(AWS_ERROR_INVALID_STATE); } static int s_connection_send_ping( struct aws_http_connection *connection_base, const struct aws_byte_cursor *optional_opaque_data, aws_http2_on_ping_complete_fn *on_completed, void *user_data) { struct aws_h2_connection *connection = AWS_CONTAINER_OF(connection_base, struct aws_h2_connection, base); if (optional_opaque_data && optional_opaque_data->len != 8) { CONNECTION_LOG(ERROR, connection, "Only 8 bytes opaque data supported for PING in HTTP/2"); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } uint64_t time_stamp; if (aws_high_res_clock_get_ticks(&time_stamp)) { CONNECTION_LOGF( ERROR, connection, "Failed getting the time stamp to start PING, error %s", aws_error_name(aws_last_error())); return AWS_OP_ERR; } struct aws_h2_pending_ping *pending_ping = s_new_pending_ping(connection->base.alloc, optional_opaque_data, time_stamp, user_data, on_completed); if (!pending_ping) { return AWS_OP_ERR; } struct aws_h2_frame *ping_frame = aws_h2_frame_new_ping(connection->base.alloc, false /*ACK*/, pending_ping->opaque_data); if (!ping_frame) { CONNECTION_LOGF(ERROR, connection, "Failed to create PING frame, error %s", aws_error_name(aws_last_error())); aws_mem_release(connection->base.alloc, pending_ping); return AWS_OP_ERR; } bool was_cross_thread_work_scheduled = false; bool connection_open; { /* BEGIN CRITICAL SECTION */ s_lock_synced_data(connection); connection_open = connection->synced_data.is_open; if (!connection_open) { s_unlock_synced_data(connection); goto closed; } was_cross_thread_work_scheduled = connection->synced_data.is_cross_thread_work_task_scheduled; connection->synced_data.is_cross_thread_work_task_scheduled = true; aws_linked_list_push_back(&connection->synced_data.pending_frame_list, &ping_frame->node); aws_linked_list_push_back(&connection->synced_data.pending_ping_list, &pending_ping->node); s_unlock_synced_data(connection); } /* END CRITICAL SECTION */ if (!was_cross_thread_work_scheduled) { CONNECTION_LOG(TRACE, connection, "Scheduling cross-thread work task"); aws_channel_schedule_task_now(connection->base.channel_slot->channel, &connection->cross_thread_work_task); } return AWS_OP_SUCCESS; closed: CONNECTION_LOG(ERROR, connection, "Failed to send ping, connection is closed or closing."); aws_h2_frame_destroy(ping_frame); aws_mem_release(connection->base.alloc, pending_ping); return aws_raise_error(AWS_ERROR_INVALID_STATE); } static void s_connection_send_goaway( struct aws_http_connection *connection_base, uint32_t http2_error, bool allow_more_streams, const struct aws_byte_cursor *optional_debug_data) { struct aws_h2_connection *connection = AWS_CONTAINER_OF(connection_base, struct aws_h2_connection, base); struct aws_h2_pending_goaway *pending_goaway = s_new_pending_goaway(connection->base.alloc, http2_error, allow_more_streams, optional_debug_data); bool was_cross_thread_work_scheduled = false; bool connection_open; { /* BEGIN CRITICAL SECTION */ s_lock_synced_data(connection); connection_open = connection->synced_data.is_open; if (!connection_open) { s_unlock_synced_data(connection); CONNECTION_LOG(DEBUG, connection, "Goaway not sent, connection is closed or closing."); aws_mem_release(connection->base.alloc, pending_goaway); return; } was_cross_thread_work_scheduled = connection->synced_data.is_cross_thread_work_task_scheduled; connection->synced_data.is_cross_thread_work_task_scheduled = true; aws_linked_list_push_back(&connection->synced_data.pending_goaway_list, &pending_goaway->node); s_unlock_synced_data(connection); } /* END CRITICAL SECTION */ if (allow_more_streams && (http2_error != AWS_HTTP2_ERR_NO_ERROR)) { CONNECTION_LOGF( DEBUG, connection, "Send goaway with allow more streams on and non-zero error code %s(0x%x)", aws_http2_error_code_to_str(http2_error), http2_error); } if (!was_cross_thread_work_scheduled) { CONNECTION_LOG(TRACE, connection, "Scheduling cross-thread work task"); aws_channel_schedule_task_now(connection->base.channel_slot->channel, &connection->cross_thread_work_task); } } static void s_get_settings_general( const struct aws_http_connection *connection_base, struct aws_http2_setting out_settings[AWS_HTTP2_SETTINGS_COUNT], bool local) { struct aws_h2_connection *connection = AWS_CONTAINER_OF(connection_base, struct aws_h2_connection, base); uint32_t synced_settings[AWS_HTTP2_SETTINGS_END_RANGE]; { /* BEGIN CRITICAL SECTION */ s_lock_synced_data(connection); if (local) { memcpy( synced_settings, connection->synced_data.settings_self, sizeof(connection->synced_data.settings_self)); } else { memcpy( synced_settings, connection->synced_data.settings_peer, sizeof(connection->synced_data.settings_peer)); } s_unlock_synced_data(connection); } /* END CRITICAL SECTION */ for (int i = AWS_HTTP2_SETTINGS_BEGIN_RANGE; i < AWS_HTTP2_SETTINGS_END_RANGE; i++) { /* settings range begin with 1, store them into 0-based array of aws_http2_setting */ out_settings[i - 1].id = i; out_settings[i - 1].value = synced_settings[i]; } return; } static void s_connection_get_local_settings( const struct aws_http_connection *connection_base, struct aws_http2_setting out_settings[AWS_HTTP2_SETTINGS_COUNT]) { s_get_settings_general(connection_base, out_settings, true /*local*/); } static void s_connection_get_remote_settings( const struct aws_http_connection *connection_base, struct aws_http2_setting out_settings[AWS_HTTP2_SETTINGS_COUNT]) { s_get_settings_general(connection_base, out_settings, false /*local*/); } /* Send a GOAWAY with the lowest possible last-stream-id or graceful shutdown warning */ static void s_send_goaway( struct aws_h2_connection *connection, uint32_t h2_error_code, bool allow_more_streams, const struct aws_byte_cursor *optional_debug_data) { AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel)); uint32_t last_stream_id = allow_more_streams ? AWS_H2_STREAM_ID_MAX : aws_min_u32( connection->thread_data.latest_peer_initiated_stream_id, connection->thread_data.goaway_sent_last_stream_id); if (last_stream_id > connection->thread_data.goaway_sent_last_stream_id) { CONNECTION_LOG( DEBUG, connection, "GOAWAY frame with lower last stream id has been sent, ignoring sending graceful shutdown warning."); return; } struct aws_byte_cursor debug_data; AWS_ZERO_STRUCT(debug_data); if (optional_debug_data) { debug_data = *optional_debug_data; } struct aws_h2_frame *goaway = aws_h2_frame_new_goaway(connection->base.alloc, last_stream_id, h2_error_code, debug_data); if (!goaway) { CONNECTION_LOGF(ERROR, connection, "Error creating GOAWAY frame, %s", aws_error_name(aws_last_error())); goto error; } connection->thread_data.goaway_sent_last_stream_id = last_stream_id; { /* BEGIN CRITICAL SECTION */ s_lock_synced_data(connection); connection->synced_data.goaway_sent_last_stream_id = last_stream_id; connection->synced_data.goaway_sent_http2_error_code = h2_error_code; s_unlock_synced_data(connection); } /* END CRITICAL SECTION */ aws_h2_connection_enqueue_outgoing_frame(connection, goaway); return; error: aws_h2_connection_shutdown_due_to_write_err(connection, aws_last_error()); } static int s_connection_get_sent_goaway( struct aws_http_connection *connection_base, uint32_t *out_http2_error, uint32_t *out_last_stream_id) { struct aws_h2_connection *connection = AWS_CONTAINER_OF(connection_base, struct aws_h2_connection, base); uint32_t sent_last_stream_id; uint32_t sent_http2_error; { /* BEGIN CRITICAL SECTION */ s_lock_synced_data(connection); sent_last_stream_id = connection->synced_data.goaway_sent_last_stream_id; sent_http2_error = connection->synced_data.goaway_sent_http2_error_code; s_unlock_synced_data(connection); } /* END CRITICAL SECTION */ uint32_t max_stream_id = AWS_H2_STREAM_ID_MAX; if (sent_last_stream_id == max_stream_id + 1) { CONNECTION_LOG(ERROR, connection, "No GOAWAY has been sent so far."); return aws_raise_error(AWS_ERROR_INVALID_STATE); } *out_http2_error = sent_http2_error; *out_last_stream_id = sent_last_stream_id; return AWS_OP_SUCCESS; } static int s_connection_get_received_goaway( struct aws_http_connection *connection_base, uint32_t *out_http2_error, uint32_t *out_last_stream_id) { struct aws_h2_connection *connection = AWS_CONTAINER_OF(connection_base, struct aws_h2_connection, base); uint32_t received_last_stream_id = 0; uint32_t received_http2_error = 0; bool goaway_not_ready = false; uint32_t max_stream_id = AWS_H2_STREAM_ID_MAX; { /* BEGIN CRITICAL SECTION */ s_lock_synced_data(connection); if (connection->synced_data.goaway_received_last_stream_id == max_stream_id + 1) { goaway_not_ready = true; } else { received_last_stream_id = connection->synced_data.goaway_received_last_stream_id; received_http2_error = connection->synced_data.goaway_received_http2_error_code; } s_unlock_synced_data(connection); } /* END CRITICAL SECTION */ if (goaway_not_ready) { CONNECTION_LOG(ERROR, connection, "No GOAWAY has been received so far."); return aws_raise_error(AWS_ERROR_INVALID_STATE); } *out_http2_error = received_http2_error; *out_last_stream_id = received_last_stream_id; return AWS_OP_SUCCESS; } static int s_handler_process_read_message( struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_io_message *message) { (void)slot; struct aws_h2_connection *connection = handler->impl; CONNECTION_LOGF(TRACE, connection, "Begin processing message of size %zu.", message->message_data.len); if (connection->thread_data.is_reading_stopped) { CONNECTION_LOG(ERROR, connection, "Cannot process message because connection is shutting down."); goto clean_up; } /* Any error that bubbles up from the decoder or its callbacks is treated as * a Connection Error (a GOAWAY frames is sent, and the connection is closed) */ struct aws_byte_cursor message_cursor = aws_byte_cursor_from_buf(&message->message_data); struct aws_h2err err = aws_h2_decode(connection->thread_data.decoder, &message_cursor); if (aws_h2err_failed(err)) { CONNECTION_LOGF( ERROR, connection, "Failure while receiving frames, %s. Sending GOAWAY %s(0x%x) and closing connection", aws_error_name(err.aws_code), aws_http2_error_code_to_str(err.h2_code), err.h2_code); goto shutdown; } /* HTTP/2 protocol uses WINDOW_UPDATE frames to coordinate data rates with peer, * so we can just keep the aws_channel's read-window wide open */ if (aws_channel_slot_increment_read_window(slot, message->message_data.len)) { CONNECTION_LOGF( ERROR, connection, "Incrementing read window failed, error %d (%s). Closing connection", aws_last_error(), aws_error_name(aws_last_error())); err = aws_h2err_from_last_error(); goto shutdown; } goto clean_up; shutdown: s_send_goaway(connection, err.h2_code, false /*allow_more_streams*/, NULL /*optional_debug_data*/); aws_h2_try_write_outgoing_frames(connection); s_stop(connection, true /*stop_reading*/, false /*stop_writing*/, true /*schedule_shutdown*/, err.aws_code); clean_up: aws_mem_release(message->allocator, message); /* Flush any outgoing frames that might have been queued as a result of decoder callbacks. */ aws_h2_try_write_outgoing_frames(connection); return AWS_OP_SUCCESS; } static int s_handler_process_write_message( struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_io_message *message) { (void)handler; (void)slot; (void)message; return aws_raise_error(AWS_ERROR_UNIMPLEMENTED); } static int s_handler_increment_read_window( struct aws_channel_handler *handler, struct aws_channel_slot *slot, size_t size) { (void)handler; (void)slot; (void)size; return aws_raise_error(AWS_ERROR_UNIMPLEMENTED); } static int s_handler_shutdown( struct aws_channel_handler *handler, struct aws_channel_slot *slot, enum aws_channel_direction dir, int error_code, bool free_scarce_resources_immediately) { struct aws_h2_connection *connection = handler->impl; CONNECTION_LOGF( TRACE, connection, "Channel shutting down in %s direction with error code %d (%s).", (dir == AWS_CHANNEL_DIR_READ) ? "read" : "write", error_code, aws_error_name(error_code)); if (dir == AWS_CHANNEL_DIR_READ) { /* This call ensures that no further streams will be created. */ s_stop(connection, true /*stop_reading*/, false /*stop_writing*/, false /*schedule_shutdown*/, error_code); /* Send user requested GOAWAY, if they haven't been sent before. It's OK to access * synced_data.pending_goaway_list without holding the lock because no more user_requested GOAWAY can be added * after s_stop() has been invoked. */ if (!aws_linked_list_empty(&connection->synced_data.pending_goaway_list)) { while (!aws_linked_list_empty(&connection->synced_data.pending_goaway_list)) { struct aws_linked_list_node *node = aws_linked_list_pop_front(&connection->synced_data.pending_goaway_list); struct aws_h2_pending_goaway *goaway = AWS_CONTAINER_OF(node, struct aws_h2_pending_goaway, node); s_send_goaway(connection, goaway->http2_error, goaway->allow_more_streams, &goaway->debug_data); aws_mem_release(connection->base.alloc, goaway); } aws_h2_try_write_outgoing_frames(connection); } /* Send GOAWAY if none have been sent so far, * or if we've only sent a "graceful shutdown warning" that didn't name a last-stream-id */ if (connection->thread_data.goaway_sent_last_stream_id == AWS_H2_STREAM_ID_MAX) { s_send_goaway( connection, error_code ? AWS_HTTP2_ERR_INTERNAL_ERROR : AWS_HTTP2_ERR_NO_ERROR, false /*allow_more_streams*/, NULL /*optional_debug_data*/); aws_h2_try_write_outgoing_frames(connection); } aws_channel_slot_on_handler_shutdown_complete( slot, AWS_CHANNEL_DIR_READ, error_code, free_scarce_resources_immediately); } else /* AWS_CHANNEL_DIR_WRITE */ { connection->thread_data.channel_shutdown_error_code = error_code; connection->thread_data.channel_shutdown_immediately = free_scarce_resources_immediately; connection->thread_data.channel_shutdown_waiting_for_goaway_to_be_written = true; /* We'd prefer to wait until we know GOAWAY has been written, but don't wait if... */ if (free_scarce_resources_immediately /* we must finish ASAP */ || connection->thread_data.is_writing_stopped /* write will never complete */ || !connection->thread_data.is_outgoing_frames_task_active /* write is already complete */) { s_finish_shutdown(connection); } else { CONNECTION_LOG(TRACE, connection, "HTTP/2 handler will finish shutdown once GOAWAY frame is written"); } } return AWS_OP_SUCCESS; } static void s_finish_shutdown(struct aws_h2_connection *connection) { AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel)); AWS_PRECONDITION(connection->thread_data.channel_shutdown_waiting_for_goaway_to_be_written); CONNECTION_LOG(TRACE, connection, "Finishing HTTP/2 handler shutdown"); connection->thread_data.channel_shutdown_waiting_for_goaway_to_be_written = false; s_stop( connection, false /*stop_reading*/, true /*stop_writing*/, false /*schedule_shutdown*/, connection->thread_data.channel_shutdown_error_code); /* Remove remaining streams from internal datastructures and mark them as complete. */ struct aws_hash_iter stream_iter = aws_hash_iter_begin(&connection->thread_data.active_streams_map); while (!aws_hash_iter_done(&stream_iter)) { struct aws_h2_stream *stream = stream_iter.element.value; aws_hash_iter_delete(&stream_iter, true); aws_hash_iter_next(&stream_iter); s_stream_complete(connection, stream, AWS_ERROR_HTTP_CONNECTION_CLOSED); } /* It's OK to access synced_data without holding the lock because * no more streams or user-requested control frames can be added after s_stop() has been invoked. */ while (!aws_linked_list_empty(&connection->synced_data.pending_stream_list)) { struct aws_linked_list_node *node = aws_linked_list_pop_front(&connection->synced_data.pending_stream_list); struct aws_h2_stream *stream = AWS_CONTAINER_OF(node, struct aws_h2_stream, node); s_stream_complete(connection, stream, AWS_ERROR_HTTP_CONNECTION_CLOSED); } while (!aws_linked_list_empty(&connection->synced_data.pending_frame_list)) { struct aws_linked_list_node *node = aws_linked_list_pop_front(&connection->synced_data.pending_frame_list); struct aws_h2_frame *frame = AWS_CONTAINER_OF(node, struct aws_h2_frame, node); aws_h2_frame_destroy(frame); } /* invoke pending callbacks haven't moved into thread, and clean up the data */ while (!aws_linked_list_empty(&connection->synced_data.pending_settings_list)) { struct aws_linked_list_node *node = aws_linked_list_pop_front(&connection->synced_data.pending_settings_list); struct aws_h2_pending_settings *settings = AWS_CONTAINER_OF(node, struct aws_h2_pending_settings, node); if (settings->on_completed) { settings->on_completed(&connection->base, AWS_ERROR_HTTP_CONNECTION_CLOSED, settings->user_data); } aws_mem_release(connection->base.alloc, settings); } while (!aws_linked_list_empty(&connection->synced_data.pending_ping_list)) { struct aws_linked_list_node *node = aws_linked_list_pop_front(&connection->synced_data.pending_ping_list); struct aws_h2_pending_ping *ping = AWS_CONTAINER_OF(node, struct aws_h2_pending_ping, node); if (ping->on_completed) { ping->on_completed(&connection->base, 0 /*fake rtt*/, AWS_ERROR_HTTP_CONNECTION_CLOSED, ping->user_data); } aws_mem_release(connection->base.alloc, ping); } /* invoke pending callbacks moved into thread, and clean up the data */ while (!aws_linked_list_empty(&connection->thread_data.pending_settings_queue)) { struct aws_linked_list_node *node = aws_linked_list_pop_front(&connection->thread_data.pending_settings_queue); struct aws_h2_pending_settings *pending_settings = AWS_CONTAINER_OF(node, struct aws_h2_pending_settings, node); /* fire the user callback with error */ if (pending_settings->on_completed) { pending_settings->on_completed( &connection->base, AWS_ERROR_HTTP_CONNECTION_CLOSED, pending_settings->user_data); } aws_mem_release(connection->base.alloc, pending_settings); } while (!aws_linked_list_empty(&connection->thread_data.pending_ping_queue)) { struct aws_linked_list_node *node = aws_linked_list_pop_front(&connection->thread_data.pending_ping_queue); struct aws_h2_pending_ping *pending_ping = AWS_CONTAINER_OF(node, struct aws_h2_pending_ping, node); /* fire the user callback with error */ if (pending_ping->on_completed) { pending_ping->on_completed( &connection->base, 0 /*fake rtt*/, AWS_ERROR_HTTP_CONNECTION_CLOSED, pending_ping->user_data); } aws_mem_release(connection->base.alloc, pending_ping); } aws_channel_slot_on_handler_shutdown_complete( connection->base.channel_slot, AWS_CHANNEL_DIR_WRITE, connection->thread_data.channel_shutdown_error_code, connection->thread_data.channel_shutdown_immediately); } static size_t s_handler_initial_window_size(struct aws_channel_handler *handler) { (void)handler; /* HTTP/2 protocol uses WINDOW_UPDATE frames to coordinate data rates with peer, * so we can just keep the aws_channel's read-window wide open */ return SIZE_MAX; } static size_t s_handler_message_overhead(struct aws_channel_handler *handler) { (void)handler; /* "All frames begin with a fixed 9-octet header followed by a variable-length payload" (RFC-7540 4.1) */ return 9; } static void s_reset_statistics(struct aws_channel_handler *handler) { struct aws_h2_connection *connection = handler->impl; aws_crt_statistics_http2_channel_reset(&connection->thread_data.stats); if (aws_hash_table_get_entry_count(&connection->thread_data.active_streams_map) == 0) { /* Check the current state */ connection->thread_data.stats.was_inactive = true; } return; } static void s_gather_statistics(struct aws_channel_handler *handler, struct aws_array_list *stats) { struct aws_h2_connection *connection = handler->impl; AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel)); /* TODO: Need update the way we calculate statistics, to account for user-controlled pauses. * If user is adding chunks 1 by 1, there can naturally be a gap in the upload. * If the user lets the stream-window go to zero, there can naturally be a gap in the download. */ uint64_t now_ns = 0; if (aws_channel_current_clock_time(connection->base.channel_slot->channel, &now_ns)) { return; } if (!aws_linked_list_empty(&connection->thread_data.outgoing_streams_list)) { s_add_time_measurement_to_stats( connection->thread_data.outgoing_timestamp_ns, now_ns, &connection->thread_data.stats.pending_outgoing_stream_ms); connection->thread_data.outgoing_timestamp_ns = now_ns; } if (aws_hash_table_get_entry_count(&connection->thread_data.active_streams_map) != 0) { s_add_time_measurement_to_stats( connection->thread_data.incoming_timestamp_ns, now_ns, &connection->thread_data.stats.pending_incoming_stream_ms); connection->thread_data.incoming_timestamp_ns = now_ns; } else { connection->thread_data.stats.was_inactive = true; } void *stats_base = &connection->thread_data.stats; aws_array_list_push_back(stats, &stats_base); } aws-crt-python-0.20.4+dfsg/crt/aws-c-http/source/h2_decoder.c000066400000000000000000002123551456575232400236610ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #ifdef _MSC_VER # pragma warning(disable : 4204) /* Declared initializers */ #endif /*********************************************************************************************************************** * Constants **********************************************************************************************************************/ /* The scratch buffers data for states with bytes_required > 0. Must be big enough for largest state */ static const size_t s_scratch_space_size = 9; /* Stream ids & dependencies should only write the bottom 31 bits */ static const uint32_t s_31_bit_mask = UINT32_MAX >> 1; /* initial size for cookie buffer, buffer will grow if needed */ static const size_t s_decoder_cookie_buffer_initial_size = 512; #define DECODER_LOGF(level, decoder, text, ...) \ AWS_LOGF_##level(AWS_LS_HTTP_DECODER, "id=%p " text, (decoder)->logging_id, __VA_ARGS__) #define DECODER_LOG(level, decoder, text) DECODER_LOGF(level, decoder, "%s", text) #define DECODER_CALL_VTABLE(decoder, fn) \ do { \ if ((decoder)->vtable->fn) { \ DECODER_LOG(TRACE, decoder, "Invoking callback " #fn); \ struct aws_h2err vtable_err = (decoder)->vtable->fn((decoder)->userdata); \ if (aws_h2err_failed(vtable_err)) { \ DECODER_LOGF( \ ERROR, \ decoder, \ "Error from callback " #fn ", %s->%s", \ aws_http2_error_code_to_str(vtable_err.h2_code), \ aws_error_name(vtable_err.aws_code)); \ return vtable_err; \ } \ } \ } while (false) #define DECODER_CALL_VTABLE_ARGS(decoder, fn, ...) \ do { \ if ((decoder)->vtable->fn) { \ DECODER_LOG(TRACE, decoder, "Invoking callback " #fn); \ struct aws_h2err vtable_err = (decoder)->vtable->fn(__VA_ARGS__, (decoder)->userdata); \ if (aws_h2err_failed(vtable_err)) { \ DECODER_LOGF( \ ERROR, \ decoder, \ "Error from callback " #fn ", %s->%s", \ aws_http2_error_code_to_str(vtable_err.h2_code), \ aws_error_name(vtable_err.aws_code)); \ return vtable_err; \ } \ } \ } while (false) #define DECODER_CALL_VTABLE_STREAM(decoder, fn) \ DECODER_CALL_VTABLE_ARGS(decoder, fn, (decoder)->frame_in_progress.stream_id) #define DECODER_CALL_VTABLE_STREAM_ARGS(decoder, fn, ...) \ DECODER_CALL_VTABLE_ARGS(decoder, fn, (decoder)->frame_in_progress.stream_id, __VA_ARGS__) /* for storing things in array without worrying about the specific values of the other AWS_HTTP_HEADER_XYZ enums */ enum pseudoheader_name { PSEUDOHEADER_UNKNOWN = -1, /* Unrecognized value */ /* Request pseudo-headers */ PSEUDOHEADER_METHOD, PSEUDOHEADER_SCHEME, PSEUDOHEADER_AUTHORITY, PSEUDOHEADER_PATH, /* Response pseudo-headers */ PSEUDOHEADER_STATUS, PSEUDOHEADER_COUNT, /* Number of valid enums */ }; static const struct aws_byte_cursor *s_pseudoheader_name_to_cursor[PSEUDOHEADER_COUNT] = { [PSEUDOHEADER_METHOD] = &aws_http_header_method, [PSEUDOHEADER_SCHEME] = &aws_http_header_scheme, [PSEUDOHEADER_AUTHORITY] = &aws_http_header_authority, [PSEUDOHEADER_PATH] = &aws_http_header_path, [PSEUDOHEADER_STATUS] = &aws_http_header_status, }; static const enum aws_http_header_name s_pseudoheader_to_header_name[PSEUDOHEADER_COUNT] = { [PSEUDOHEADER_METHOD] = AWS_HTTP_HEADER_METHOD, [PSEUDOHEADER_SCHEME] = AWS_HTTP_HEADER_SCHEME, [PSEUDOHEADER_AUTHORITY] = AWS_HTTP_HEADER_AUTHORITY, [PSEUDOHEADER_PATH] = AWS_HTTP_HEADER_PATH, [PSEUDOHEADER_STATUS] = AWS_HTTP_HEADER_STATUS, }; static enum pseudoheader_name s_header_to_pseudoheader_name(enum aws_http_header_name name) { /* The compiled switch statement is actually faster than array lookup with bounds-checking. * (the lookup arrays above don't need to do bounds-checking) */ switch (name) { case AWS_HTTP_HEADER_METHOD: return PSEUDOHEADER_METHOD; case AWS_HTTP_HEADER_SCHEME: return PSEUDOHEADER_SCHEME; case AWS_HTTP_HEADER_AUTHORITY: return PSEUDOHEADER_AUTHORITY; case AWS_HTTP_HEADER_PATH: return PSEUDOHEADER_PATH; case AWS_HTTP_HEADER_STATUS: return PSEUDOHEADER_STATUS; default: return PSEUDOHEADER_UNKNOWN; } } /*********************************************************************************************************************** * State Machine **********************************************************************************************************************/ typedef struct aws_h2err(state_fn)(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input); struct h2_decoder_state { state_fn *fn; uint32_t bytes_required; const char *name; }; #define DEFINE_STATE(_name, _bytes_required) \ static state_fn s_state_fn_##_name; \ enum { s_state_##_name##_requires_##_bytes_required##_bytes = _bytes_required }; \ static const struct h2_decoder_state s_state_##_name = { \ .fn = s_state_fn_##_name, \ .bytes_required = s_state_##_name##_requires_##_bytes_required##_bytes, \ .name = #_name, \ } /* Common states */ DEFINE_STATE(prefix, 9); DEFINE_STATE(padding_len, 1); DEFINE_STATE(padding, 0); DEFINE_STATE(priority_block, 5); DEFINE_STATE(header_block_loop, 0); DEFINE_STATE(header_block_entry, 1); /* requires 1 byte, but may consume more */ /* Frame-specific states */ DEFINE_STATE(frame_data, 0); DEFINE_STATE(frame_headers, 0); DEFINE_STATE(frame_priority, 0); DEFINE_STATE(frame_rst_stream, 4); DEFINE_STATE(frame_settings_begin, 0); DEFINE_STATE(frame_settings_loop, 0); DEFINE_STATE(frame_settings_i, 6); DEFINE_STATE(frame_push_promise, 4); DEFINE_STATE(frame_ping, 8); DEFINE_STATE(frame_goaway, 8); DEFINE_STATE(frame_goaway_debug_data, 0); DEFINE_STATE(frame_window_update, 4); DEFINE_STATE(frame_continuation, 0); DEFINE_STATE(frame_unknown, 0); /* States that have nothing to do with frames */ DEFINE_STATE(connection_preface_string, 1); /* requires 1 byte but may consume more */ /* Helper for states that need to transition to frame-type states */ static const struct h2_decoder_state *s_state_frames[AWS_H2_FRAME_TYPE_COUNT] = { [AWS_H2_FRAME_T_DATA] = &s_state_frame_data, [AWS_H2_FRAME_T_HEADERS] = &s_state_frame_headers, [AWS_H2_FRAME_T_PRIORITY] = &s_state_frame_priority, [AWS_H2_FRAME_T_RST_STREAM] = &s_state_frame_rst_stream, [AWS_H2_FRAME_T_SETTINGS] = &s_state_frame_settings_begin, [AWS_H2_FRAME_T_PUSH_PROMISE] = &s_state_frame_push_promise, [AWS_H2_FRAME_T_PING] = &s_state_frame_ping, [AWS_H2_FRAME_T_GOAWAY] = &s_state_frame_goaway, [AWS_H2_FRAME_T_WINDOW_UPDATE] = &s_state_frame_window_update, [AWS_H2_FRAME_T_CONTINUATION] = &s_state_frame_continuation, [AWS_H2_FRAME_T_UNKNOWN] = &s_state_frame_unknown, }; /*********************************************************************************************************************** * Struct **********************************************************************************************************************/ struct aws_h2_decoder { /* Implementation data. */ struct aws_allocator *alloc; const void *logging_id; struct aws_hpack_decoder hpack; bool is_server; struct aws_byte_buf scratch; const struct h2_decoder_state *state; bool state_changed; /* HTTP/2 connection preface must be first thing received (RFC-7540 3.5): * Server must receive (client must send): magic string, then SETTINGS frame. * Client must receive (server must send): SETTINGS frame. */ bool connection_preface_complete; /* Cursor over the canonical client connection preface string */ struct aws_byte_cursor connection_preface_cursor; /* Frame-in-progress */ struct aws_frame_in_progress { enum aws_h2_frame_type type; uint32_t stream_id; uint32_t payload_len; uint8_t padding_len; struct { bool ack; bool end_stream; bool end_headers; bool priority; } flags; } frame_in_progress; /* GOAWAY buffer */ struct aws_goaway_in_progress { uint32_t last_stream; uint32_t error_code; /* Buffer of the received debug data in the latest goaway frame */ struct aws_byte_buf debug_data; } goaway_in_progress; /* A header-block starts with a HEADERS or PUSH_PROMISE frame, followed by 0 or more CONTINUATION frames. * It's an error for any other frame-type or stream ID to arrive while a header-block is in progress. * The header-block ends when a frame has the END_HEADERS flag set. (RFC-7540 4.3) */ struct aws_header_block_in_progress { /* If 0, then no header-block in progress */ uint32_t stream_id; /* Whether these are informational (1xx), normal, or trailing headers */ enum aws_http_header_block block_type; /* Buffer up pseudo-headers and deliver them once they're all validated */ struct aws_string *pseudoheader_values[PSEUDOHEADER_COUNT]; enum aws_http_header_compression pseudoheader_compression[PSEUDOHEADER_COUNT]; /* All pseudo-header fields MUST appear in the header block before regular header fields. */ bool pseudoheaders_done; /* T: PUSH_PROMISE header-block * F: HEADERS header-block */ bool is_push_promise; /* If frame that starts header-block has END_STREAM flag, * then frame that ends header-block also ends the stream. */ bool ends_stream; /* True if something occurs that makes the header-block malformed (ex: invalid header name). * A malformed header-block is not a connection error, it's a Stream Error (RFC-7540 5.4.2). * We continue decoding and report that it's malformed in on_headers_end(). */ bool malformed; bool body_headers_forbidden; /* Buffer up cookie header fields to concatenate separate ones */ struct aws_byte_buf cookies; /* If separate cookie fields have different compression types, the concatenated cookie uses the strictest type. */ enum aws_http_header_compression cookie_header_compression_type; } header_block_in_progress; /* Settings for decoder, which is based on the settings sent to the peer and ACKed by peer */ struct { /* enable/disable server push */ uint32_t enable_push; /* the size of the largest frame payload */ uint32_t max_frame_size; } settings; struct aws_array_list settings_buffer_list; /* User callbacks and settings. */ const struct aws_h2_decoder_vtable *vtable; void *userdata; /* If this is set to true, decode may no longer be called */ bool has_errored; }; /***********************************************************************************************************************/ struct aws_h2_decoder *aws_h2_decoder_new(struct aws_h2_decoder_params *params) { AWS_PRECONDITION(params); AWS_PRECONDITION(params->alloc); AWS_PRECONDITION(params->vtable); struct aws_h2_decoder *decoder = NULL; void *scratch_buf = NULL; void *allocation = aws_mem_acquire_many( params->alloc, 2, &decoder, sizeof(struct aws_h2_decoder), &scratch_buf, s_scratch_space_size); if (!allocation) { goto error; } AWS_ZERO_STRUCT(*decoder); decoder->alloc = params->alloc; decoder->vtable = params->vtable; decoder->userdata = params->userdata; decoder->logging_id = params->logging_id; decoder->is_server = params->is_server; decoder->connection_preface_complete = params->skip_connection_preface; decoder->scratch = aws_byte_buf_from_empty_array(scratch_buf, s_scratch_space_size); aws_hpack_decoder_init(&decoder->hpack, params->alloc, decoder); if (decoder->is_server && !params->skip_connection_preface) { decoder->state = &s_state_connection_preface_string; decoder->connection_preface_cursor = aws_h2_connection_preface_client_string; } else { decoder->state = &s_state_prefix; } decoder->settings.enable_push = aws_h2_settings_initial[AWS_HTTP2_SETTINGS_ENABLE_PUSH]; decoder->settings.max_frame_size = aws_h2_settings_initial[AWS_HTTP2_SETTINGS_MAX_FRAME_SIZE]; if (aws_array_list_init_dynamic( &decoder->settings_buffer_list, decoder->alloc, 0, sizeof(struct aws_http2_setting))) { goto error; } if (aws_byte_buf_init( &decoder->header_block_in_progress.cookies, decoder->alloc, s_decoder_cookie_buffer_initial_size)) { goto error; } return decoder; error: if (decoder) { aws_hpack_decoder_clean_up(&decoder->hpack); aws_array_list_clean_up(&decoder->settings_buffer_list); aws_byte_buf_clean_up(&decoder->header_block_in_progress.cookies); } aws_mem_release(params->alloc, allocation); return NULL; } static void s_reset_header_block_in_progress(struct aws_h2_decoder *decoder) { for (size_t i = 0; i < PSEUDOHEADER_COUNT; ++i) { aws_string_destroy(decoder->header_block_in_progress.pseudoheader_values[i]); } struct aws_byte_buf cookie_backup = decoder->header_block_in_progress.cookies; AWS_ZERO_STRUCT(decoder->header_block_in_progress); decoder->header_block_in_progress.cookies = cookie_backup; aws_byte_buf_reset(&decoder->header_block_in_progress.cookies, false); } void aws_h2_decoder_destroy(struct aws_h2_decoder *decoder) { if (!decoder) { return; } aws_array_list_clean_up(&decoder->settings_buffer_list); aws_hpack_decoder_clean_up(&decoder->hpack); s_reset_header_block_in_progress(decoder); aws_byte_buf_clean_up(&decoder->header_block_in_progress.cookies); aws_byte_buf_clean_up(&decoder->goaway_in_progress.debug_data); aws_mem_release(decoder->alloc, decoder); } struct aws_h2err aws_h2_decode(struct aws_h2_decoder *decoder, struct aws_byte_cursor *data) { AWS_PRECONDITION(decoder); AWS_PRECONDITION(data); AWS_FATAL_ASSERT(!decoder->has_errored); struct aws_h2err err = AWS_H2ERR_SUCCESS; /* Run decoder state machine until we're no longer changing states. * We don't simply loop `while(data->len)` because some states consume no data, * and these states should run even when there is no data left. */ do { decoder->state_changed = false; const uint32_t bytes_required = decoder->state->bytes_required; AWS_ASSERT(bytes_required <= decoder->scratch.capacity); const char *current_state_name = decoder->state->name; const size_t prev_data_len = data->len; (void)prev_data_len; if (!decoder->scratch.len && data->len >= bytes_required) { /* Easy case, there is no scratch and we have enough data, so just send it to the state */ DECODER_LOGF(TRACE, decoder, "Running state '%s' with %zu bytes available", current_state_name, data->len); err = decoder->state->fn(decoder, data); if (aws_h2err_failed(err)) { goto handle_error; } AWS_ASSERT(prev_data_len - data->len >= bytes_required && "Decoder state requested more data than it used"); } else { /* Otherwise, state requires a minimum amount of data and we have to use the scratch */ size_t bytes_to_read = bytes_required - decoder->scratch.len; bool will_finish_state = true; if (bytes_to_read > data->len) { /* Not enough in this cursor, need to read as much as possible and then come back */ bytes_to_read = data->len; will_finish_state = false; } if (AWS_LIKELY(bytes_to_read)) { /* Read the appropriate number of bytes into scratch */ struct aws_byte_cursor to_read = aws_byte_cursor_advance(data, bytes_to_read); bool succ = aws_byte_buf_write_from_whole_cursor(&decoder->scratch, to_read); AWS_ASSERT(succ); (void)succ; } /* If we have the correct number of bytes, call the state */ if (will_finish_state) { DECODER_LOGF(TRACE, decoder, "Running state '%s' (using scratch)", current_state_name); struct aws_byte_cursor state_data = aws_byte_cursor_from_buf(&decoder->scratch); err = decoder->state->fn(decoder, &state_data); if (aws_h2err_failed(err)) { goto handle_error; } AWS_ASSERT(state_data.len == 0 && "Decoder state requested more data than it used"); } else { DECODER_LOGF( TRACE, decoder, "State '%s' requires %" PRIu32 " bytes, but only %zu available, trying again later", current_state_name, bytes_required, decoder->scratch.len); } } } while (decoder->state_changed); return AWS_H2ERR_SUCCESS; handle_error: decoder->has_errored = true; return err; } /*********************************************************************************************************************** * State functions **********************************************************************************************************************/ static struct aws_h2err s_decoder_switch_state(struct aws_h2_decoder *decoder, const struct h2_decoder_state *state) { /* Ensure payload is big enough to enter next state. * If this fails, then the payload length we received is too small for this frame type. * (ex: a RST_STREAM frame with < 4 bytes) */ if (decoder->frame_in_progress.payload_len < state->bytes_required) { DECODER_LOGF( ERROR, decoder, "%s payload is too small", aws_h2_frame_type_to_str(decoder->frame_in_progress.type)); return aws_h2err_from_h2_code(AWS_HTTP2_ERR_FRAME_SIZE_ERROR); } DECODER_LOGF(TRACE, decoder, "Moving from state '%s' to '%s'", decoder->state->name, state->name); decoder->scratch.len = 0; decoder->state = state; decoder->state_changed = true; return AWS_H2ERR_SUCCESS; } static struct aws_h2err s_decoder_switch_to_frame_state(struct aws_h2_decoder *decoder) { AWS_ASSERT(decoder->frame_in_progress.type < AWS_H2_FRAME_TYPE_COUNT); return s_decoder_switch_state(decoder, s_state_frames[decoder->frame_in_progress.type]); } static struct aws_h2err s_decoder_reset_state(struct aws_h2_decoder *decoder) { /* Ensure we've consumed all payload (and padding) when state machine finishes this frame. * If this fails, the payload length we received is too large for this frame type. * (ex: a RST_STREAM frame with > 4 bytes) */ if (decoder->frame_in_progress.payload_len > 0 || decoder->frame_in_progress.padding_len > 0) { DECODER_LOGF( ERROR, decoder, "%s frame payload is too large", aws_h2_frame_type_to_str(decoder->frame_in_progress.type)); return aws_h2err_from_h2_code(AWS_HTTP2_ERR_FRAME_SIZE_ERROR); } DECODER_LOGF(TRACE, decoder, "%s frame complete", aws_h2_frame_type_to_str(decoder->frame_in_progress.type)); decoder->scratch.len = 0; decoder->state = &s_state_prefix; decoder->state_changed = true; AWS_ZERO_STRUCT(decoder->frame_in_progress); return AWS_H2ERR_SUCCESS; } /* Returns as much of the current frame's payload as possible, and updates payload_len */ static struct aws_byte_cursor s_decoder_get_payload(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) { struct aws_byte_cursor result; const uint32_t remaining_length = decoder->frame_in_progress.payload_len; if (input->len < remaining_length) { AWS_ASSERT(input->len <= UINT32_MAX); result = aws_byte_cursor_advance(input, input->len); } else { result = aws_byte_cursor_advance(input, remaining_length); } decoder->frame_in_progress.payload_len -= (uint32_t)result.len; return result; } /* clang-format off */ /* Mask of flags supported by each frame type. * Frames not listed have mask of 0, which means all flags will be ignored. */ static const uint8_t s_acceptable_flags_for_frame[AWS_H2_FRAME_TYPE_COUNT] = { [AWS_H2_FRAME_T_DATA] = AWS_H2_FRAME_F_END_STREAM | AWS_H2_FRAME_F_PADDED, [AWS_H2_FRAME_T_HEADERS] = AWS_H2_FRAME_F_END_STREAM | AWS_H2_FRAME_F_END_HEADERS | AWS_H2_FRAME_F_PADDED | AWS_H2_FRAME_F_PRIORITY, [AWS_H2_FRAME_T_PRIORITY] = 0, [AWS_H2_FRAME_T_RST_STREAM] = 0, [AWS_H2_FRAME_T_SETTINGS] = AWS_H2_FRAME_F_ACK, [AWS_H2_FRAME_T_PUSH_PROMISE] = AWS_H2_FRAME_F_END_HEADERS | AWS_H2_FRAME_F_PADDED, [AWS_H2_FRAME_T_PING] = AWS_H2_FRAME_F_ACK, [AWS_H2_FRAME_T_GOAWAY] = 0, [AWS_H2_FRAME_T_WINDOW_UPDATE] = 0, [AWS_H2_FRAME_T_CONTINUATION] = AWS_H2_FRAME_F_END_HEADERS, [AWS_H2_FRAME_T_UNKNOWN] = 0, }; enum stream_id_rules { STREAM_ID_REQUIRED, STREAM_ID_FORBIDDEN, STREAM_ID_EITHER_WAY, }; /* Frame-types generally either require a stream-id, or require that it be zero. */ static const enum stream_id_rules s_stream_id_rules_for_frame[AWS_H2_FRAME_TYPE_COUNT] = { [AWS_H2_FRAME_T_DATA] = STREAM_ID_REQUIRED, [AWS_H2_FRAME_T_HEADERS] = STREAM_ID_REQUIRED, [AWS_H2_FRAME_T_PRIORITY] = STREAM_ID_REQUIRED, [AWS_H2_FRAME_T_RST_STREAM] = STREAM_ID_REQUIRED, [AWS_H2_FRAME_T_SETTINGS] = STREAM_ID_FORBIDDEN, [AWS_H2_FRAME_T_PUSH_PROMISE] = STREAM_ID_REQUIRED, [AWS_H2_FRAME_T_PING] = STREAM_ID_FORBIDDEN, [AWS_H2_FRAME_T_GOAWAY] = STREAM_ID_FORBIDDEN, [AWS_H2_FRAME_T_WINDOW_UPDATE] = STREAM_ID_EITHER_WAY, /* WINDOW_UPDATE is special and can do either */ [AWS_H2_FRAME_T_CONTINUATION] = STREAM_ID_REQUIRED, [AWS_H2_FRAME_T_UNKNOWN] = STREAM_ID_EITHER_WAY, /* Everything in an UNKNOWN frame type is ignored */ }; /* clang-format on */ /* All frames begin with a fixed 9-octet header followed by a variable-length payload. (RFC-7540 4.1) * This function processes everything preceding Frame Payload in the following diagram: * +-----------------------------------------------+ * | Length (24) | * +---------------+---------------+---------------+ * | Type (8) | Flags (8) | * +-+-------------+---------------+-------------------------------+ * |R| Stream Identifier (31) | * +=+=============================================================+ * | Frame Payload (0...) ... * +---------------------------------------------------------------+ */ static struct aws_h2err s_state_fn_prefix(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) { AWS_ASSERT(input->len >= s_state_prefix_requires_9_bytes); struct aws_frame_in_progress *frame = &decoder->frame_in_progress; uint8_t raw_type = 0; uint8_t raw_flags = 0; /* Read the raw values from the first 9 bytes */ bool all_read = true; all_read &= aws_byte_cursor_read_be24(input, &frame->payload_len); all_read &= aws_byte_cursor_read_u8(input, &raw_type); all_read &= aws_byte_cursor_read_u8(input, &raw_flags); all_read &= aws_byte_cursor_read_be32(input, &frame->stream_id); AWS_ASSERT(all_read); (void)all_read; /* Validate frame type */ frame->type = raw_type < AWS_H2_FRAME_T_UNKNOWN ? raw_type : AWS_H2_FRAME_T_UNKNOWN; /* Validate the frame's flags * Flags that have no defined semantics for a particular frame type MUST be ignored (RFC-7540 4.1) */ const uint8_t flags = raw_flags & s_acceptable_flags_for_frame[decoder->frame_in_progress.type]; bool is_padded = flags & AWS_H2_FRAME_F_PADDED; decoder->frame_in_progress.flags.ack = flags & AWS_H2_FRAME_F_ACK; decoder->frame_in_progress.flags.end_stream = flags & AWS_H2_FRAME_F_END_STREAM; decoder->frame_in_progress.flags.end_headers = flags & AWS_H2_FRAME_F_END_HEADERS; decoder->frame_in_progress.flags.priority = flags & AWS_H2_FRAME_F_PRIORITY || decoder->frame_in_progress.type == AWS_H2_FRAME_T_PRIORITY; /* Connection preface requires that SETTINGS be sent first (RFC-7540 3.5). * This should be the first error we check for, so that a connection sending * total garbage data is likely to trigger this PROTOCOL_ERROR */ if (!decoder->connection_preface_complete) { if (frame->type == AWS_H2_FRAME_T_SETTINGS && !frame->flags.ack) { DECODER_LOG(TRACE, decoder, "Connection preface satisfied."); decoder->connection_preface_complete = true; } else { DECODER_LOG(ERROR, decoder, "First frame must be SETTINGS"); return aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR); } } /* Validate the frame's stream ID. */ /* Reserved bit (1st bit) MUST be ignored when receiving (RFC-7540 4.1) */ frame->stream_id &= s_31_bit_mask; /* Some frame types require a stream ID, some frame types require that stream ID be zero. */ const enum stream_id_rules stream_id_rules = s_stream_id_rules_for_frame[frame->type]; if (frame->stream_id) { if (stream_id_rules == STREAM_ID_FORBIDDEN) { DECODER_LOGF(ERROR, decoder, "Stream ID for %s frame must be 0.", aws_h2_frame_type_to_str(frame->type)); return aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR); } } else { if (stream_id_rules == STREAM_ID_REQUIRED) { DECODER_LOGF(ERROR, decoder, "Stream ID for %s frame cannot be 0.", aws_h2_frame_type_to_str(frame->type)); return aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR); } } /* A header-block starts with a HEADERS or PUSH_PROMISE frame, followed by 0 or more CONTINUATION frames. * It's an error for any other frame-type or stream ID to arrive while a header-block is in progress. * (RFC-7540 4.3) */ if (frame->type == AWS_H2_FRAME_T_CONTINUATION) { if (decoder->header_block_in_progress.stream_id != frame->stream_id) { DECODER_LOG(ERROR, decoder, "Unexpected CONTINUATION frame."); return aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR); } } else { if (decoder->header_block_in_progress.stream_id) { DECODER_LOG(ERROR, decoder, "Expected CONTINUATION frame."); return aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR); } } /* Validate payload length. */ uint32_t max_frame_size = decoder->settings.max_frame_size; if (frame->payload_len > max_frame_size) { DECODER_LOGF( ERROR, decoder, "Decoder's max frame size is %" PRIu32 ", but frame of size %" PRIu32 " was received.", max_frame_size, frame->payload_len); return aws_h2err_from_h2_code(AWS_HTTP2_ERR_FRAME_SIZE_ERROR); } DECODER_LOGF( TRACE, decoder, "Done decoding frame prefix (type=%s stream-id=%" PRIu32 " payload-len=%" PRIu32 "), moving on to payload", aws_h2_frame_type_to_str(frame->type), frame->stream_id, frame->payload_len); if (is_padded) { /* Read padding length if necessary */ return s_decoder_switch_state(decoder, &s_state_padding_len); } if (decoder->frame_in_progress.type == AWS_H2_FRAME_T_DATA) { /* We invoke the on_data_begin here to report the whole payload size */ DECODER_CALL_VTABLE_STREAM_ARGS( decoder, on_data_begin, frame->payload_len, 0 /*padding_len*/, frame->flags.end_stream); } if (decoder->frame_in_progress.flags.priority) { /* Read the stream dependency and weight if PRIORITY is set */ return s_decoder_switch_state(decoder, &s_state_priority_block); } /* Set the state to the appropriate frame's state */ return s_decoder_switch_to_frame_state(decoder); } /* Frames that support padding, and have the PADDED flag set, begin with a 1-byte Pad Length. * (Actual padding comes later at the very end of the frame) * +---------------+ * |Pad Length? (8)| * +---------------+ */ static struct aws_h2err s_state_fn_padding_len(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) { AWS_ASSERT(input->len >= s_state_padding_len_requires_1_bytes); struct aws_frame_in_progress *frame = &decoder->frame_in_progress; /* Read the padding length */ bool succ = aws_byte_cursor_read_u8(input, &frame->padding_len); AWS_ASSERT(succ); (void)succ; /* Adjust payload size so it doesn't include padding (or the 1-byte padding length) */ uint32_t reduce_payload = s_state_padding_len_requires_1_bytes + frame->padding_len; if (reduce_payload > decoder->frame_in_progress.payload_len) { DECODER_LOG(ERROR, decoder, "Padding length exceeds payload length"); return aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR); } if (frame->type == AWS_H2_FRAME_T_DATA) { /* We invoke the on_data_begin here to report the whole payload size and the padding size */ DECODER_CALL_VTABLE_STREAM_ARGS( decoder, on_data_begin, frame->payload_len, frame->padding_len + 1, frame->flags.end_stream); } frame->payload_len -= reduce_payload; DECODER_LOGF(TRACE, decoder, "Padding length of frame: %" PRIu32, frame->padding_len); if (frame->flags.priority) { /* Read the stream dependency and weight if PRIORITY is set */ return s_decoder_switch_state(decoder, &s_state_priority_block); } /* Set the state to the appropriate frame's state */ return s_decoder_switch_to_frame_state(decoder); } static struct aws_h2err s_state_fn_padding(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) { const uint8_t remaining_len = decoder->frame_in_progress.padding_len; const uint8_t consuming_len = input->len < remaining_len ? (uint8_t)input->len : remaining_len; aws_byte_cursor_advance(input, consuming_len); decoder->frame_in_progress.padding_len -= consuming_len; if (remaining_len == consuming_len) { /* Done with the frame! */ return s_decoder_reset_state(decoder); } return AWS_H2ERR_SUCCESS; } /* Shared code for: * PRIORITY frame (RFC-7540 6.3) * Start of HEADERS frame IF the priority flag is set (RFC-7540 6.2) * +-+-------------+-----------------------------------------------+ * |E| Stream Dependency (31) | * +-+-------------+-----------------------------------------------+ * | Weight (8) | * +-+-------------+-----------------------------------------------+ */ static struct aws_h2err s_state_fn_priority_block(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) { AWS_ASSERT(input->len >= s_state_priority_block_requires_5_bytes); /* #NOTE: throw priority data on the GROUND. They make us hecka vulnerable to DDoS and stuff. * https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-9513 */ aws_byte_cursor_advance(input, s_state_priority_block_requires_5_bytes); decoder->frame_in_progress.payload_len -= s_state_priority_block_requires_5_bytes; return s_decoder_switch_to_frame_state(decoder); } static struct aws_h2err s_state_fn_frame_data(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) { const struct aws_byte_cursor body_data = s_decoder_get_payload(decoder, input); if (body_data.len) { DECODER_CALL_VTABLE_STREAM_ARGS(decoder, on_data_i, body_data); } if (decoder->frame_in_progress.payload_len == 0) { DECODER_CALL_VTABLE_STREAM(decoder, on_data_end); /* If frame had END_STREAM flag, alert user now */ if (decoder->frame_in_progress.flags.end_stream) { DECODER_CALL_VTABLE_STREAM(decoder, on_end_stream); } /* Process padding if necessary, otherwise we're done! */ return s_decoder_switch_state(decoder, &s_state_padding); } return AWS_H2ERR_SUCCESS; } static struct aws_h2err s_state_fn_frame_headers(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) { (void)input; /* Start header-block and alert the user */ decoder->header_block_in_progress.stream_id = decoder->frame_in_progress.stream_id; decoder->header_block_in_progress.is_push_promise = false; decoder->header_block_in_progress.ends_stream = decoder->frame_in_progress.flags.end_stream; DECODER_CALL_VTABLE_STREAM(decoder, on_headers_begin); /* Read the header-block fragment */ return s_decoder_switch_state(decoder, &s_state_header_block_loop); } static struct aws_h2err s_state_fn_frame_priority(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) { (void)input; /* We already processed this data in the shared priority_block state, so we're done! */ return s_decoder_reset_state(decoder); } /* RST_STREAM is just a 4-byte error code. * +---------------------------------------------------------------+ * | Error Code (32) | * +---------------------------------------------------------------+ */ static struct aws_h2err s_state_fn_frame_rst_stream(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) { AWS_ASSERT(input->len >= s_state_frame_rst_stream_requires_4_bytes); uint32_t error_code = 0; bool succ = aws_byte_cursor_read_be32(input, &error_code); AWS_ASSERT(succ); (void)succ; decoder->frame_in_progress.payload_len -= s_state_frame_rst_stream_requires_4_bytes; DECODER_CALL_VTABLE_STREAM_ARGS(decoder, on_rst_stream, error_code); return s_decoder_reset_state(decoder); } /* A SETTINGS frame may contain any number of 6-byte entries. * This state consumes no data, but sends us into the appropriate next state */ static struct aws_h2err s_state_fn_frame_settings_begin(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) { (void)input; /* If ack is set, report and we're done */ if (decoder->frame_in_progress.flags.ack) { /* Receipt of a SETTINGS frame with the ACK flag set and a length field value other * than 0 MUST be treated as a connection error of type FRAME_SIZE_ERROR */ if (decoder->frame_in_progress.payload_len) { DECODER_LOGF( ERROR, decoder, "SETTINGS ACK frame received, but it has non-0 payload length %" PRIu32, decoder->frame_in_progress.payload_len); return aws_h2err_from_h2_code(AWS_HTTP2_ERR_FRAME_SIZE_ERROR); } DECODER_CALL_VTABLE(decoder, on_settings_ack); return s_decoder_reset_state(decoder); } if (decoder->frame_in_progress.payload_len % s_state_frame_settings_i_requires_6_bytes != 0) { /* A SETTINGS frame with a length other than a multiple of 6 octets MUST be * treated as a connection error (Section 5.4.1) of type FRAME_SIZE_ERROR */ DECODER_LOGF( ERROR, decoder, "Settings frame payload length is %" PRIu32 ", but it must be divisible by %" PRIu32, decoder->frame_in_progress.payload_len, s_state_frame_settings_i_requires_6_bytes); return aws_h2err_from_h2_code(AWS_HTTP2_ERR_FRAME_SIZE_ERROR); } /* Enter looping states until all entries are consumed. */ return s_decoder_switch_state(decoder, &s_state_frame_settings_loop); } /* Check if we're done consuming settings */ static struct aws_h2err s_state_fn_frame_settings_loop(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) { (void)input; if (decoder->frame_in_progress.payload_len == 0) { /* Huzzah, done with the frame, fire the callback */ struct aws_array_list *buffer = &decoder->settings_buffer_list; DECODER_CALL_VTABLE_ARGS( decoder, on_settings, buffer->data, aws_array_list_length(&decoder->settings_buffer_list)); /* clean up the buffer */ aws_array_list_clear(&decoder->settings_buffer_list); return s_decoder_reset_state(decoder); } return s_decoder_switch_state(decoder, &s_state_frame_settings_i); } /* Each run through this state consumes one 6-byte setting. * There may be multiple settings in a SETTINGS frame. * +-------------------------------+ * | Identifier (16) | * +-------------------------------+-------------------------------+ * | Value (32) | * +---------------------------------------------------------------+ */ static struct aws_h2err s_state_fn_frame_settings_i(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) { AWS_ASSERT(input->len >= s_state_frame_settings_i_requires_6_bytes); uint16_t id = 0; uint32_t value = 0; bool succ = aws_byte_cursor_read_be16(input, &id); AWS_ASSERT(succ); (void)succ; succ = aws_byte_cursor_read_be32(input, &value); AWS_ASSERT(succ); (void)succ; /* An endpoint that receives a SETTINGS frame with any unknown or unsupported identifier MUST ignore that setting. * RFC-7540 6.5.2 */ if (id >= AWS_HTTP2_SETTINGS_BEGIN_RANGE && id < AWS_HTTP2_SETTINGS_END_RANGE) { /* check the value meets the settings bounds */ if (value < aws_h2_settings_bounds[id][0] || value > aws_h2_settings_bounds[id][1]) { DECODER_LOGF( ERROR, decoder, "A value of SETTING frame is invalid, id: %" PRIu16 ", value: %" PRIu32, id, value); if (id == AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE) { return aws_h2err_from_h2_code(AWS_HTTP2_ERR_FLOW_CONTROL_ERROR); } else { return aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR); } } struct aws_http2_setting setting; setting.id = id; setting.value = value; /* array_list will keep a copy of setting, it is fine to be a local variable */ if (aws_array_list_push_back(&decoder->settings_buffer_list, &setting)) { DECODER_LOGF(ERROR, decoder, "Writing setting to buffer failed, %s", aws_error_name(aws_last_error())); return aws_h2err_from_last_error(); } } /* Update payload len */ decoder->frame_in_progress.payload_len -= s_state_frame_settings_i_requires_6_bytes; return s_decoder_switch_state(decoder, &s_state_frame_settings_loop); } /* Read 4-byte Promised Stream ID * The rest of the frame is just like HEADERS, so move on to shared states... * +-+-------------------------------------------------------------+ * |R| Promised Stream ID (31) | * +-+-----------------------------+-------------------------------+ */ static struct aws_h2err s_state_fn_frame_push_promise(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) { if (decoder->settings.enable_push == 0) { /* treat the receipt of a PUSH_PROMISE frame as a connection error of type PROTOCOL_ERROR.(RFC-7540 6.5.2) */ DECODER_LOG(ERROR, decoder, "PUSH_PROMISE is invalid, the seting for enable push is 0"); return aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR); } AWS_ASSERT(input->len >= s_state_frame_push_promise_requires_4_bytes); uint32_t promised_stream_id = 0; bool succ = aws_byte_cursor_read_be32(input, &promised_stream_id); AWS_ASSERT(succ); (void)succ; decoder->frame_in_progress.payload_len -= s_state_frame_push_promise_requires_4_bytes; /* Reserved bit (top bit) must be ignored when receiving (RFC-7540 4.1) */ promised_stream_id &= s_31_bit_mask; /* Promised stream ID must not be 0 (RFC-7540 6.6). * Promised stream ID (server-initiated) must be even-numbered (RFC-7540 5.1.1). */ if ((promised_stream_id == 0) || (promised_stream_id % 2) != 0) { DECODER_LOGF(ERROR, decoder, "PUSH_PROMISE is promising invalid stream ID %" PRIu32, promised_stream_id); return aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR); } /* Server cannot receive PUSH_PROMISE frames */ if (decoder->is_server) { DECODER_LOG(ERROR, decoder, "Server cannot receive PUSH_PROMISE frames"); return aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR); } /* Start header-block and alert the user. */ decoder->header_block_in_progress.stream_id = decoder->frame_in_progress.stream_id; decoder->header_block_in_progress.is_push_promise = true; decoder->header_block_in_progress.ends_stream = false; DECODER_CALL_VTABLE_STREAM_ARGS(decoder, on_push_promise_begin, promised_stream_id); /* Read the header-block fragment */ return s_decoder_switch_state(decoder, &s_state_header_block_loop); } /* PING frame is just 8-bytes of opaque data. * +---------------------------------------------------------------+ * | | * | Opaque Data (64) | * | | * +---------------------------------------------------------------+ */ static struct aws_h2err s_state_fn_frame_ping(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) { AWS_ASSERT(input->len >= s_state_frame_ping_requires_8_bytes); uint8_t opaque_data[AWS_HTTP2_PING_DATA_SIZE] = {0}; bool succ = aws_byte_cursor_read(input, &opaque_data, AWS_HTTP2_PING_DATA_SIZE); AWS_ASSERT(succ); (void)succ; decoder->frame_in_progress.payload_len -= s_state_frame_ping_requires_8_bytes; if (decoder->frame_in_progress.flags.ack) { DECODER_CALL_VTABLE_ARGS(decoder, on_ping_ack, opaque_data); } else { DECODER_CALL_VTABLE_ARGS(decoder, on_ping, opaque_data); } return s_decoder_reset_state(decoder); } /* Read first 8 bytes of GOAWAY. * This may be followed by N bytes of debug data. * +-+-------------------------------------------------------------+ * |R| Last-Stream-ID (31) | * +-+-------------------------------------------------------------+ * | Error Code (32) | * +---------------------------------------------------------------+ */ static struct aws_h2err s_state_fn_frame_goaway(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) { AWS_ASSERT(input->len >= s_state_frame_goaway_requires_8_bytes); uint32_t last_stream = 0; uint32_t error_code = AWS_HTTP2_ERR_NO_ERROR; bool succ = aws_byte_cursor_read_be32(input, &last_stream); AWS_ASSERT(succ); (void)succ; last_stream &= s_31_bit_mask; succ = aws_byte_cursor_read_be32(input, &error_code); AWS_ASSERT(succ); (void)succ; decoder->frame_in_progress.payload_len -= s_state_frame_goaway_requires_8_bytes; uint32_t debug_data_length = decoder->frame_in_progress.payload_len; /* Received new GOAWAY, clean up the previous one. Buffer it up and invoke the callback once the debug data decoded * fully. */ decoder->goaway_in_progress.error_code = error_code; decoder->goaway_in_progress.last_stream = last_stream; int init_result = aws_byte_buf_init(&decoder->goaway_in_progress.debug_data, decoder->alloc, debug_data_length); AWS_ASSERT(init_result == 0); (void)init_result; return s_decoder_switch_state(decoder, &s_state_frame_goaway_debug_data); } /* Optional remainder of GOAWAY frame. * +---------------------------------------------------------------+ * | Additional Debug Data (*) | * +---------------------------------------------------------------+ */ static struct aws_h2err s_state_fn_frame_goaway_debug_data( struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) { struct aws_byte_cursor debug_data = s_decoder_get_payload(decoder, input); if (debug_data.len > 0) { /* As we initialized the buffer to the size of debug data, we can safely append here */ aws_byte_buf_append(&decoder->goaway_in_progress.debug_data, &debug_data); } /* If this is the last data in the frame, reset decoder */ if (decoder->frame_in_progress.payload_len == 0) { struct aws_byte_cursor debug_cursor = aws_byte_cursor_from_buf(&decoder->goaway_in_progress.debug_data); DECODER_CALL_VTABLE_ARGS( decoder, on_goaway, decoder->goaway_in_progress.last_stream, decoder->goaway_in_progress.error_code, debug_cursor); aws_byte_buf_clean_up(&decoder->goaway_in_progress.debug_data); return s_decoder_reset_state(decoder); } return AWS_H2ERR_SUCCESS; } /* WINDOW_UPDATE frame. * +-+-------------------------------------------------------------+ * |R| Window Size Increment (31) | * +-+-------------------------------------------------------------+ */ static struct aws_h2err s_state_fn_frame_window_update(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) { AWS_ASSERT(input->len >= s_state_frame_window_update_requires_4_bytes); uint32_t window_increment = 0; bool succ = aws_byte_cursor_read_be32(input, &window_increment); AWS_ASSERT(succ); (void)succ; decoder->frame_in_progress.payload_len -= s_state_frame_window_update_requires_4_bytes; window_increment &= s_31_bit_mask; DECODER_CALL_VTABLE_STREAM_ARGS(decoder, on_window_update, window_increment); return s_decoder_reset_state(decoder); } /* CONTINUATION is a lot like HEADERS, so it uses shared states. */ static struct aws_h2err s_state_fn_frame_continuation(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) { (void)input; /* Read the header-block fragment */ return s_decoder_switch_state(decoder, &s_state_header_block_loop); } /* Implementations MUST ignore and discard any frame that has a type that is unknown. */ static struct aws_h2err s_state_fn_frame_unknown(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) { /* Read all data possible, and throw it on the floor */ s_decoder_get_payload(decoder, input); /* If there's no more data expected, end the frame */ if (decoder->frame_in_progress.payload_len == 0) { return s_decoder_reset_state(decoder); } return AWS_H2ERR_SUCCESS; } /* Perform analysis that can't be done until all pseudo-headers are received. * Then deliver buffered pseudoheaders via callback */ static struct aws_h2err s_flush_pseudoheaders(struct aws_h2_decoder *decoder) { struct aws_header_block_in_progress *current_block = &decoder->header_block_in_progress; if (current_block->malformed) { goto already_malformed; } if (current_block->pseudoheaders_done) { return AWS_H2ERR_SUCCESS; } current_block->pseudoheaders_done = true; /* s_process_header_field() already checked that we're not mixing request & response pseudoheaders */ bool has_request_pseudoheaders = false; for (int i = PSEUDOHEADER_METHOD; i <= PSEUDOHEADER_PATH; ++i) { if (current_block->pseudoheader_values[i] != NULL) { has_request_pseudoheaders = true; break; } } bool has_response_pseudoheaders = current_block->pseudoheader_values[PSEUDOHEADER_STATUS] != NULL; if (current_block->is_push_promise && !has_request_pseudoheaders) { DECODER_LOG(ERROR, decoder, "PUSH_PROMISE is missing :method"); goto malformed; } if (has_request_pseudoheaders) { /* Request header-block. */ current_block->block_type = AWS_HTTP_HEADER_BLOCK_MAIN; } else if (has_response_pseudoheaders) { /* Response header block. */ /* Determine whether this is an Informational (1xx) response */ struct aws_byte_cursor status_value = aws_byte_cursor_from_string(current_block->pseudoheader_values[PSEUDOHEADER_STATUS]); uint64_t status_code; if (status_value.len != 3 || aws_byte_cursor_utf8_parse_u64(status_value, &status_code)) { DECODER_LOG(ERROR, decoder, ":status header has invalid value"); DECODER_LOGF(DEBUG, decoder, "Bad :status value is '" PRInSTR "'", AWS_BYTE_CURSOR_PRI(status_value)); goto malformed; } if (status_code / 100 == 1) { current_block->block_type = AWS_HTTP_HEADER_BLOCK_INFORMATIONAL; if (current_block->ends_stream) { /* Informational headers do not constitute a full response (RFC-7540 8.1) */ DECODER_LOG(ERROR, decoder, "Informational (1xx) response cannot END_STREAM"); goto malformed; } current_block->body_headers_forbidden = true; } else { current_block->block_type = AWS_HTTP_HEADER_BLOCK_MAIN; } /** * RFC-9110 8.6. * A server MUST NOT send a Content-Length header field in any response with a status code of 1xx * (Informational) or 204 (No Content). */ current_block->body_headers_forbidden |= status_code == AWS_HTTP_STATUS_CODE_204_NO_CONTENT; } else { /* Trailing header block. */ if (!current_block->ends_stream) { DECODER_LOG(ERROR, decoder, "HEADERS appear to be trailer, but lack END_STREAM"); goto malformed; } current_block->block_type = AWS_HTTP_HEADER_BLOCK_TRAILING; } /* #TODO RFC-7540 8.1.2.3 & 8.3 Validate request has correct pseudoheaders. Note different rules for CONNECT */ /* #TODO validate pseudoheader values. each one has its own special rules */ /* Finally, deliver header-fields via callback */ for (size_t i = 0; i < PSEUDOHEADER_COUNT; ++i) { const struct aws_string *value_string = current_block->pseudoheader_values[i]; if (value_string) { struct aws_http_header header_field = { .name = *s_pseudoheader_name_to_cursor[i], .value = aws_byte_cursor_from_string(value_string), .compression = current_block->pseudoheader_compression[i], }; enum aws_http_header_name name_enum = s_pseudoheader_to_header_name[i]; if (current_block->is_push_promise) { DECODER_CALL_VTABLE_STREAM_ARGS(decoder, on_push_promise_i, &header_field, name_enum); } else { DECODER_CALL_VTABLE_STREAM_ARGS( decoder, on_headers_i, &header_field, name_enum, current_block->block_type); } } } return AWS_H2ERR_SUCCESS; malformed: /* A malformed header-block is not a connection error, it's a Stream Error (RFC-7540 5.4.2). * We continue decoding and report that it's malformed in on_headers_end(). */ current_block->malformed = true; return AWS_H2ERR_SUCCESS; already_malformed: return AWS_H2ERR_SUCCESS; } /* Process single header-field. * If it's invalid, mark the header-block as malformed. * If it's valid, and header-block is not malformed, deliver via callback. */ static struct aws_h2err s_process_header_field( struct aws_h2_decoder *decoder, const struct aws_http_header *header_field) { struct aws_header_block_in_progress *current_block = &decoder->header_block_in_progress; if (current_block->malformed) { goto already_malformed; } const struct aws_byte_cursor name = header_field->name; if (name.len == 0) { DECODER_LOG(ERROR, decoder, "Header name is blank"); goto malformed; } enum aws_http_header_name name_enum = aws_http_lowercase_str_to_header_name(name); bool is_pseudoheader = name.ptr[0] == ':'; if (is_pseudoheader) { if (current_block->pseudoheaders_done) { /* Note: being careful not to leak possibly sensitive data except at DEBUG level and lower */ DECODER_LOG(ERROR, decoder, "Pseudo-headers must appear before regular fields."); DECODER_LOGF(DEBUG, decoder, "Misplaced pseudo-header is '" PRInSTR "'", AWS_BYTE_CURSOR_PRI(name)); goto malformed; } enum pseudoheader_name pseudoheader_enum = s_header_to_pseudoheader_name(name_enum); if (pseudoheader_enum == PSEUDOHEADER_UNKNOWN) { DECODER_LOG(ERROR, decoder, "Unrecognized pseudo-header"); DECODER_LOGF(DEBUG, decoder, "Unrecognized pseudo-header is '" PRInSTR "'", AWS_BYTE_CURSOR_PRI(name)); goto malformed; } /* Ensure request pseudo-headers vs response pseudoheaders were sent appropriately. * This also ensures that request and response pseudoheaders aren't being mixed. */ bool expect_request_pseudoheader = decoder->is_server || current_block->is_push_promise; bool is_request_pseudoheader = pseudoheader_enum != PSEUDOHEADER_STATUS; if (expect_request_pseudoheader != is_request_pseudoheader) { DECODER_LOGF( ERROR, /* ok to log name of recognized pseudo-header at ERROR level */ decoder, "'" PRInSTR "' pseudo-header cannot be in %s header-block to %s", AWS_BYTE_CURSOR_PRI(name), current_block->is_push_promise ? "PUSH_PROMISE" : "HEADERS", decoder->is_server ? "server" : "client"); goto malformed; } /* Protect against duplicates. */ if (current_block->pseudoheader_values[pseudoheader_enum] != NULL) { /* ok to log name of recognized pseudo-header at ERROR level */ DECODER_LOGF( ERROR, decoder, "'" PRInSTR "' pseudo-header occurred multiple times", AWS_BYTE_CURSOR_PRI(name)); goto malformed; } /* Buffer up pseudo-headers, we'll deliver them later once they're all validated. */ current_block->pseudoheader_compression[pseudoheader_enum] = header_field->compression; current_block->pseudoheader_values[pseudoheader_enum] = aws_string_new_from_cursor(decoder->alloc, &header_field->value); if (!current_block->pseudoheader_values[pseudoheader_enum]) { return aws_h2err_from_last_error(); } } else { /* Else regular header-field. */ /* Regular header-fields come after pseudo-headers, so make sure pseudo-headers are flushed */ if (!current_block->pseudoheaders_done) { struct aws_h2err err = s_flush_pseudoheaders(decoder); if (aws_h2err_failed(err)) { return err; } /* might have realized that header-block is malformed during flush */ if (current_block->malformed) { goto already_malformed; } } /* Validate header name (not necessary if string already matched against a known enum) */ if (name_enum == AWS_HTTP_HEADER_UNKNOWN) { if (!aws_strutil_is_lowercase_http_token(name)) { DECODER_LOG(ERROR, decoder, "Header name contains invalid characters"); DECODER_LOGF(DEBUG, decoder, "Bad header name is '" PRInSTR "'", AWS_BYTE_CURSOR_PRI(name)); goto malformed; } } /* #TODO Validate characters used in header_field->value */ switch (name_enum) { case AWS_HTTP_HEADER_COOKIE: /* for a header cookie, we will not fire callback until we concatenate them all, let's store it at the * buffer */ if (header_field->compression > current_block->cookie_header_compression_type) { current_block->cookie_header_compression_type = header_field->compression; } if (current_block->cookies.len) { /* add a delimiter */ struct aws_byte_cursor delimiter = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("; "); if (aws_byte_buf_append_dynamic(¤t_block->cookies, &delimiter)) { return aws_h2err_from_last_error(); } } if (aws_byte_buf_append_dynamic(¤t_block->cookies, &header_field->value)) { return aws_h2err_from_last_error(); } /* Early return */ return AWS_H2ERR_SUCCESS; case AWS_HTTP_HEADER_TRANSFER_ENCODING: case AWS_HTTP_HEADER_UPGRADE: case AWS_HTTP_HEADER_KEEP_ALIVE: case AWS_HTTP_HEADER_PROXY_CONNECTION: { /* connection-specific header field are treated as malformed (RFC9113 8.2.2) */ DECODER_LOGF( ERROR, decoder, "Connection-specific header ('" PRInSTR "') found, not allowed in HTTP/2", AWS_BYTE_CURSOR_PRI(name)); goto malformed; } break; case AWS_HTTP_HEADER_CONTENT_LENGTH: if (current_block->body_headers_forbidden) { /* The content-length are forbidden */ DECODER_LOG(ERROR, decoder, "Unexpected Content-Length header found"); goto malformed; } break; default: break; } /* Deliver header-field via callback */ if (current_block->is_push_promise) { DECODER_CALL_VTABLE_STREAM_ARGS(decoder, on_push_promise_i, header_field, name_enum); } else { DECODER_CALL_VTABLE_STREAM_ARGS(decoder, on_headers_i, header_field, name_enum, current_block->block_type); } } return AWS_H2ERR_SUCCESS; malformed: /* A malformed header-block is not a connection error, it's a Stream Error (RFC-7540 5.4.2). * We continue decoding and report that it's malformed in on_headers_end(). */ current_block->malformed = true; return AWS_H2ERR_SUCCESS; already_malformed: return AWS_H2ERR_SUCCESS; } static struct aws_h2err s_flush_cookie_header(struct aws_h2_decoder *decoder) { struct aws_header_block_in_progress *current_block = &decoder->header_block_in_progress; if (current_block->malformed) { return AWS_H2ERR_SUCCESS; } if (current_block->cookies.len == 0) { /* Nothing to flush */ return AWS_H2ERR_SUCCESS; } struct aws_http_header concatenated_cookie; struct aws_byte_cursor header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("cookie"); concatenated_cookie.name = header_name; concatenated_cookie.value = aws_byte_cursor_from_buf(¤t_block->cookies); concatenated_cookie.compression = current_block->cookie_header_compression_type; if (current_block->is_push_promise) { DECODER_CALL_VTABLE_STREAM_ARGS(decoder, on_push_promise_i, &concatenated_cookie, AWS_HTTP_HEADER_COOKIE); } else { DECODER_CALL_VTABLE_STREAM_ARGS( decoder, on_headers_i, &concatenated_cookie, AWS_HTTP_HEADER_COOKIE, current_block->block_type); } return AWS_H2ERR_SUCCESS; } /* This state checks whether we've consumed the current frame's entire header-block fragment. * We revisit this state after each entry is decoded. * This state consumes no data. */ static struct aws_h2err s_state_fn_header_block_loop(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) { (void)input; /* If we're out of payload data, handle frame complete */ if (decoder->frame_in_progress.payload_len == 0) { /* If this is the end of the header-block, invoke callback and clear header_block_in_progress */ if (decoder->frame_in_progress.flags.end_headers) { /* Ensure pseudo-headers have been flushed */ struct aws_h2err err = s_flush_pseudoheaders(decoder); if (aws_h2err_failed(err)) { return err; } /* flush the concatenated cookie header */ err = s_flush_cookie_header(decoder); if (aws_h2err_failed(err)) { return err; } bool malformed = decoder->header_block_in_progress.malformed; DECODER_LOGF(TRACE, decoder, "Done decoding header-block, malformed=%d", malformed); if (decoder->header_block_in_progress.is_push_promise) { DECODER_CALL_VTABLE_STREAM_ARGS(decoder, on_push_promise_end, malformed); } else { DECODER_CALL_VTABLE_STREAM_ARGS( decoder, on_headers_end, malformed, decoder->header_block_in_progress.block_type); } /* If header-block began with END_STREAM flag, alert user now */ if (decoder->header_block_in_progress.ends_stream) { DECODER_CALL_VTABLE_STREAM(decoder, on_end_stream); } s_reset_header_block_in_progress(decoder); } else { DECODER_LOG(TRACE, decoder, "Done decoding header-block fragment, expecting CONTINUATION frames"); } /* Finish this frame */ return s_decoder_switch_state(decoder, &s_state_padding); } DECODER_LOGF( TRACE, decoder, "Decoding header-block entry, %" PRIu32 " bytes remaining in payload", decoder->frame_in_progress.payload_len); return s_decoder_switch_state(decoder, &s_state_header_block_entry); } /* We stay in this state until a single "entry" is decoded from the header-block fragment. * Then we return to the header_block_loop state */ static struct aws_h2err s_state_fn_header_block_entry(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) { /* This state requires at least 1 byte, but will likely consume more */ AWS_ASSERT(input->len >= s_state_header_block_entry_requires_1_bytes); /* Feed header-block fragment to HPACK decoder. * Don't let decoder consume anything beyond payload_len. */ struct aws_byte_cursor fragment = *input; if (fragment.len > decoder->frame_in_progress.payload_len) { fragment.len = decoder->frame_in_progress.payload_len; } const size_t prev_fragment_len = fragment.len; struct aws_hpack_decode_result result; if (aws_hpack_decode(&decoder->hpack, &fragment, &result)) { DECODER_LOGF(ERROR, decoder, "Error decoding header-block fragment: %s", aws_error_name(aws_last_error())); /* Any possible error from HPACK decoder (except OOM) is treated as a COMPRESSION error. */ if (aws_last_error() == AWS_ERROR_OOM) { return aws_h2err_from_last_error(); } else { return aws_h2err_from_h2_code(AWS_HTTP2_ERR_COMPRESSION_ERROR); } } /* HPACK decoder returns when it reaches the end of an entry, or when it's consumed the whole fragment. * Update input & payload_len to reflect the number of bytes consumed. */ const size_t bytes_consumed = prev_fragment_len - fragment.len; aws_byte_cursor_advance(input, bytes_consumed); decoder->frame_in_progress.payload_len -= (uint32_t)bytes_consumed; if (result.type == AWS_HPACK_DECODE_T_ONGOING) { /* HPACK decoder hasn't finished entry */ if (decoder->frame_in_progress.payload_len > 0) { /* More payload is coming. Remain in state until it arrives */ DECODER_LOG(TRACE, decoder, "Header-block entry partially decoded, waiting for more data."); return AWS_H2ERR_SUCCESS; } if (decoder->frame_in_progress.flags.end_headers) { /* Reached end of the frame's payload, and this frame ends the header-block. * Error if we ended up with a partially decoded entry. */ DECODER_LOG(ERROR, decoder, "Compression error: incomplete entry at end of header-block"); return aws_h2err_from_h2_code(AWS_HTTP2_ERR_COMPRESSION_ERROR); } /* Reached end of this frame's payload, but CONTINUATION frames are expected to arrive. * We'll resume decoding this entry when we get them. */ DECODER_LOG(TRACE, decoder, "Header-block entry partially decoded, resumes in CONTINUATION frame"); return s_decoder_switch_state(decoder, &s_state_header_block_loop); } /* Finished decoding HPACK entry! */ /* #TODO Enforces dynamic table resize rules from RFC-7541 4.2 * If dynamic table size changed via SETTINGS frame, next header-block must start with DYNAMIC_TABLE_RESIZE entry. * Is it illegal to receive a resize entry at other times? */ /* #TODO The TE header field ... MUST NOT contain any value other than "trailers" */ if (result.type == AWS_HPACK_DECODE_T_HEADER_FIELD) { const struct aws_http_header *header_field = &result.data.header_field; DECODER_LOGF( TRACE, decoder, "Decoded header field: \"" PRInSTR ": " PRInSTR "\"", AWS_BYTE_CURSOR_PRI(header_field->name), AWS_BYTE_CURSOR_PRI(header_field->value)); struct aws_h2err err = s_process_header_field(decoder, header_field); if (aws_h2err_failed(err)) { return err; } } return s_decoder_switch_state(decoder, &s_state_header_block_loop); } /* The first thing a client sends on a connection is a 24 byte magic string (RFC-7540 3.5). * Note that this state doesn't "require" the full 24 bytes, it runs as data arrives. * This avoids hanging if < 24 bytes rolled in. */ static struct aws_h2err s_state_fn_connection_preface_string( struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) { size_t remaining_len = decoder->connection_preface_cursor.len; size_t consuming_len = input->len < remaining_len ? input->len : remaining_len; struct aws_byte_cursor expected = aws_byte_cursor_advance(&decoder->connection_preface_cursor, consuming_len); struct aws_byte_cursor received = aws_byte_cursor_advance(input, consuming_len); if (!aws_byte_cursor_eq(&expected, &received)) { DECODER_LOG(ERROR, decoder, "Client connection preface is invalid"); return aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR); } if (decoder->connection_preface_cursor.len == 0) { /* Done receiving connection preface string, proceed to decoding normal frames. */ return s_decoder_reset_state(decoder); } /* Remain in state until more data arrives */ return AWS_H2ERR_SUCCESS; } void aws_h2_decoder_set_setting_header_table_size(struct aws_h2_decoder *decoder, uint32_t data) { /* Set the protocol_max_size_setting for hpack. */ aws_hpack_decoder_update_max_table_size(&decoder->hpack, data); } void aws_h2_decoder_set_setting_enable_push(struct aws_h2_decoder *decoder, uint32_t data) { decoder->settings.enable_push = data; } void aws_h2_decoder_set_setting_max_frame_size(struct aws_h2_decoder *decoder, uint32_t data) { decoder->settings.max_frame_size = data; } aws-crt-python-0.20.4+dfsg/crt/aws-c-http/source/h2_frames.c000066400000000000000000001310001456575232400235140ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #ifdef _MSC_VER # pragma warning(disable : 4204) /* non-constant aggregate initializer */ #endif #define ENCODER_LOGF(level, encoder, text, ...) \ AWS_LOGF_##level(AWS_LS_HTTP_ENCODER, "id=%p " text, (encoder)->logging_id, __VA_ARGS__) #define ENCODER_LOG(level, encoder, text) ENCODER_LOGF(level, encoder, "%s", text) const struct aws_byte_cursor aws_h2_connection_preface_client_string = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"); /* Initial values and bounds are from RFC-7540 6.5.2 */ const uint32_t aws_h2_settings_initial[AWS_HTTP2_SETTINGS_END_RANGE] = { [AWS_HTTP2_SETTINGS_HEADER_TABLE_SIZE] = 4096, [AWS_HTTP2_SETTINGS_ENABLE_PUSH] = 1, [AWS_HTTP2_SETTINGS_MAX_CONCURRENT_STREAMS] = UINT32_MAX, /* "Initially there is no limit to this value" */ [AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE] = AWS_H2_INIT_WINDOW_SIZE, [AWS_HTTP2_SETTINGS_MAX_FRAME_SIZE] = 16384, [AWS_HTTP2_SETTINGS_MAX_HEADER_LIST_SIZE] = UINT32_MAX, /* "The initial value of this setting is unlimited" */ }; const uint32_t aws_h2_settings_bounds[AWS_HTTP2_SETTINGS_END_RANGE][2] = { [AWS_HTTP2_SETTINGS_HEADER_TABLE_SIZE][0] = 0, [AWS_HTTP2_SETTINGS_HEADER_TABLE_SIZE][1] = UINT32_MAX, [AWS_HTTP2_SETTINGS_ENABLE_PUSH][0] = 0, [AWS_HTTP2_SETTINGS_ENABLE_PUSH][1] = 1, [AWS_HTTP2_SETTINGS_MAX_CONCURRENT_STREAMS][0] = 0, [AWS_HTTP2_SETTINGS_MAX_CONCURRENT_STREAMS][1] = UINT32_MAX, [AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE][0] = 0, [AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE][1] = AWS_H2_WINDOW_UPDATE_MAX, [AWS_HTTP2_SETTINGS_MAX_FRAME_SIZE][0] = 16384, [AWS_HTTP2_SETTINGS_MAX_FRAME_SIZE][1] = AWS_H2_PAYLOAD_MAX, [AWS_HTTP2_SETTINGS_MAX_HEADER_LIST_SIZE][0] = 0, [AWS_HTTP2_SETTINGS_MAX_HEADER_LIST_SIZE][1] = UINT32_MAX, }; /* Stream ids & dependencies should only write the bottom 31 bits */ static const uint32_t s_u32_top_bit_mask = UINT32_MAX << 31; /* Bytes to initially reserve for encoding of an entire header block. Buffer will grow if necessary. */ static const size_t s_encoded_header_block_reserve = 128; /* Value pulled from thin air */ #define DEFINE_FRAME_VTABLE(NAME) \ static aws_h2_frame_destroy_fn s_frame_##NAME##_destroy; \ static aws_h2_frame_encode_fn s_frame_##NAME##_encode; \ static const struct aws_h2_frame_vtable s_frame_##NAME##_vtable = { \ .destroy = s_frame_##NAME##_destroy, \ .encode = s_frame_##NAME##_encode, \ } const char *aws_h2_frame_type_to_str(enum aws_h2_frame_type type) { switch (type) { case AWS_H2_FRAME_T_DATA: return "DATA"; case AWS_H2_FRAME_T_HEADERS: return "HEADERS"; case AWS_H2_FRAME_T_PRIORITY: return "PRIORITY"; case AWS_H2_FRAME_T_RST_STREAM: return "RST_STREAM"; case AWS_H2_FRAME_T_SETTINGS: return "SETTINGS"; case AWS_H2_FRAME_T_PUSH_PROMISE: return "PUSH_PROMISE"; case AWS_H2_FRAME_T_PING: return "PING"; case AWS_H2_FRAME_T_GOAWAY: return "GOAWAY"; case AWS_H2_FRAME_T_WINDOW_UPDATE: return "WINDOW_UPDATE"; case AWS_H2_FRAME_T_CONTINUATION: return "CONTINUATION"; default: return "**UNKNOWN**"; } } const char *aws_http2_error_code_to_str(enum aws_http2_error_code h2_error_code) { switch (h2_error_code) { case AWS_HTTP2_ERR_NO_ERROR: return "NO_ERROR"; case AWS_HTTP2_ERR_PROTOCOL_ERROR: return "PROTOCOL_ERROR"; case AWS_HTTP2_ERR_INTERNAL_ERROR: return "INTERNAL_ERROR"; case AWS_HTTP2_ERR_FLOW_CONTROL_ERROR: return "FLOW_CONTROL_ERROR"; case AWS_HTTP2_ERR_SETTINGS_TIMEOUT: return "SETTINGS_TIMEOUT"; case AWS_HTTP2_ERR_STREAM_CLOSED: return "STREAM_CLOSED"; case AWS_HTTP2_ERR_FRAME_SIZE_ERROR: return "FRAME_SIZE_ERROR"; case AWS_HTTP2_ERR_REFUSED_STREAM: return "REFUSED_STREAM"; case AWS_HTTP2_ERR_CANCEL: return "CANCEL"; case AWS_HTTP2_ERR_COMPRESSION_ERROR: return "COMPRESSION_ERROR"; case AWS_HTTP2_ERR_CONNECT_ERROR: return "CONNECT_ERROR"; case AWS_HTTP2_ERR_ENHANCE_YOUR_CALM: return "ENHANCE_YOUR_CALM"; case AWS_HTTP2_ERR_INADEQUATE_SECURITY: return "INADEQUATE_SECURITY"; case AWS_HTTP2_ERR_HTTP_1_1_REQUIRED: return "HTTP_1_1_REQUIRED"; default: return "UNKNOWN_ERROR"; } } struct aws_h2err aws_h2err_from_h2_code(enum aws_http2_error_code h2_error_code) { AWS_PRECONDITION(h2_error_code > AWS_HTTP2_ERR_NO_ERROR && h2_error_code < AWS_HTTP2_ERR_COUNT); return (struct aws_h2err){ .h2_code = h2_error_code, .aws_code = AWS_ERROR_HTTP_PROTOCOL_ERROR, }; } struct aws_h2err aws_h2err_from_aws_code(int aws_error_code) { AWS_PRECONDITION(aws_error_code != 0); return (struct aws_h2err){ .h2_code = AWS_HTTP2_ERR_INTERNAL_ERROR, .aws_code = aws_error_code, }; } struct aws_h2err aws_h2err_from_last_error(void) { return aws_h2err_from_aws_code(aws_last_error()); } bool aws_h2err_success(struct aws_h2err err) { return err.h2_code == 0 && err.aws_code == 0; } bool aws_h2err_failed(struct aws_h2err err) { return err.h2_code != 0 || err.aws_code != 0; } int aws_h2_validate_stream_id(uint32_t stream_id) { if (stream_id == 0 || stream_id > AWS_H2_STREAM_ID_MAX) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } return AWS_OP_SUCCESS; } /** * Determine max frame payload length that will: * 1) fit in output's available space * 2) obey encoders current MAX_FRAME_SIZE * * Assumes no part of the frame has been written yet to output. * The total length of the frame would be: returned-payload-len + AWS_H2_FRAME_PREFIX_SIZE * * Raises error if there is not enough space available for even a frame prefix. */ static int s_get_max_contiguous_payload_length( const struct aws_h2_frame_encoder *encoder, const struct aws_byte_buf *output, size_t *max_payload_length) { const size_t space_available = output->capacity - output->len; size_t max_payload_given_space_available; if (aws_sub_size_checked(space_available, AWS_H2_FRAME_PREFIX_SIZE, &max_payload_given_space_available)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } size_t max_payload_given_settings = encoder->settings.max_frame_size; *max_payload_length = aws_min_size(max_payload_given_space_available, max_payload_given_settings); return AWS_OP_SUCCESS; } /*********************************************************************************************************************** * Priority **********************************************************************************************************************/ static size_t s_frame_priority_settings_size = 5; static void s_frame_priority_settings_encode( const struct aws_h2_frame_priority_settings *priority, struct aws_byte_buf *output) { AWS_PRECONDITION(priority); AWS_PRECONDITION(output); AWS_PRECONDITION((priority->stream_dependency & s_u32_top_bit_mask) == 0); (void)s_u32_top_bit_mask; /* PRIORITY is encoded as (RFC-7540 6.3): * +-+-------------------------------------------------------------+ * |E| Stream Dependency (31) | * +-+-------------+-----------------------------------------------+ * | Weight (8) | * +-+-------------+ */ bool writes_ok = true; /* Write the top 4 bytes */ uint32_t top_bytes = priority->stream_dependency | ((uint32_t)priority->stream_dependency_exclusive << 31); writes_ok &= aws_byte_buf_write_be32(output, top_bytes); /* Write the priority weight */ writes_ok &= aws_byte_buf_write_u8(output, priority->weight); AWS_ASSERT(writes_ok); (void)writes_ok; } /*********************************************************************************************************************** * Common Frame Prefix **********************************************************************************************************************/ static void s_init_frame_base( struct aws_h2_frame *frame_base, struct aws_allocator *alloc, enum aws_h2_frame_type type, const struct aws_h2_frame_vtable *vtable, uint32_t stream_id) { frame_base->vtable = vtable; frame_base->alloc = alloc; frame_base->type = type; frame_base->stream_id = stream_id; } static void s_frame_prefix_encode( enum aws_h2_frame_type type, uint32_t stream_id, size_t length, uint8_t flags, struct aws_byte_buf *output) { AWS_PRECONDITION(output); AWS_PRECONDITION(!(stream_id & s_u32_top_bit_mask), "Invalid stream ID"); AWS_PRECONDITION(length <= AWS_H2_PAYLOAD_MAX); /* Frame prefix is encoded like this (RFC-7540 4.1): * +-----------------------------------------------+ * | Length (24) | * +---------------+---------------+---------------+ * | Type (8) | Flags (8) | * +-+-------------+---------------+-------------------------------+ * |R| Stream Identifier (31) | * +=+=============================================================+ */ bool writes_ok = true; /* Write length */ writes_ok &= aws_byte_buf_write_be24(output, (uint32_t)length); /* Write type */ writes_ok &= aws_byte_buf_write_u8(output, type); /* Write flags */ writes_ok &= aws_byte_buf_write_u8(output, flags); /* Write stream id (with reserved first bit) */ writes_ok &= aws_byte_buf_write_be32(output, stream_id); AWS_ASSERT(writes_ok); (void)writes_ok; } /*********************************************************************************************************************** * Encoder **********************************************************************************************************************/ int aws_h2_frame_encoder_init( struct aws_h2_frame_encoder *encoder, struct aws_allocator *allocator, const void *logging_id) { AWS_PRECONDITION(encoder); AWS_PRECONDITION(allocator); AWS_ZERO_STRUCT(*encoder); encoder->allocator = allocator; encoder->logging_id = logging_id; aws_hpack_encoder_init(&encoder->hpack, allocator, logging_id); encoder->settings.max_frame_size = aws_h2_settings_initial[AWS_HTTP2_SETTINGS_MAX_FRAME_SIZE]; return AWS_OP_SUCCESS; } void aws_h2_frame_encoder_clean_up(struct aws_h2_frame_encoder *encoder) { AWS_PRECONDITION(encoder); aws_hpack_encoder_clean_up(&encoder->hpack); } /*********************************************************************************************************************** * DATA **********************************************************************************************************************/ int aws_h2_encode_data_frame( struct aws_h2_frame_encoder *encoder, uint32_t stream_id, struct aws_input_stream *body_stream, bool body_ends_stream, uint8_t pad_length, int32_t *stream_window_size_peer, size_t *connection_window_size_peer, struct aws_byte_buf *output, bool *body_complete, bool *body_stalled) { AWS_PRECONDITION(encoder); AWS_PRECONDITION(body_stream); AWS_PRECONDITION(output); AWS_PRECONDITION(body_complete); AWS_PRECONDITION(body_stalled); AWS_PRECONDITION(*stream_window_size_peer > 0); if (aws_h2_validate_stream_id(stream_id)) { return AWS_OP_ERR; } *body_complete = false; *body_stalled = false; uint8_t flags = 0; /* * Payload-length is the first thing encoded in a frame, but we don't know how * much data we'll get from the body-stream until we actually read it. * Therefore, we determine the exact location that the body data should go, * then stream the body directly into that part of the output buffer. * Then we will go and write the other parts of the frame in around it. */ size_t bytes_preceding_body = AWS_H2_FRAME_PREFIX_SIZE; size_t payload_overhead = 0; /* Amount of "payload" that will not contain body (padding) */ if (pad_length > 0) { flags |= AWS_H2_FRAME_F_PADDED; /* Padding len is 1st byte of payload (padding itself goes at end of payload) */ bytes_preceding_body += 1; payload_overhead = 1 + pad_length; } /* Max amount allowed by stream and connection flow-control window */ size_t min_window_size = aws_min_size(*stream_window_size_peer, *connection_window_size_peer); /* Max amount of payload we can do right now */ size_t max_payload; if (s_get_max_contiguous_payload_length(encoder, output, &max_payload)) { goto handle_waiting_for_more_space; } /* The flow-control window will limit the size for max_payload of a flow-controlled frame */ max_payload = aws_min_size(max_payload, min_window_size); /* Max amount of body we can fit in the payload*/ size_t max_body; if (aws_sub_size_checked(max_payload, payload_overhead, &max_body) || max_body == 0) { goto handle_waiting_for_more_space; } /* Use a sub-buffer to limit where body can go */ struct aws_byte_buf body_sub_buf = aws_byte_buf_from_empty_array(output->buffer + output->len + bytes_preceding_body, max_body); /* Read body into sub-buffer */ if (aws_input_stream_read(body_stream, &body_sub_buf)) { goto error; } /* Check if we've reached the end of the body */ struct aws_stream_status body_status; if (aws_input_stream_get_status(body_stream, &body_status)) { goto error; } if (body_status.is_end_of_stream) { *body_complete = true; if (body_ends_stream) { flags |= AWS_H2_FRAME_F_END_STREAM; } } else { if (body_sub_buf.len < body_sub_buf.capacity) { /* Body stream was unable to provide as much data as it could have */ *body_stalled = true; if (body_sub_buf.len == 0) { /* This frame would have no useful information, don't even bother sending it */ goto handle_nothing_to_send_right_now; } } } ENCODER_LOGF( TRACE, encoder, "Encoding frame type=DATA stream_id=%" PRIu32 " data_len=%zu stalled=%d%s", stream_id, body_sub_buf.len, *body_stalled, (flags & AWS_H2_FRAME_F_END_STREAM) ? " END_STREAM" : ""); /* * Write in the other parts of the frame. */ bool writes_ok = true; /* Write the frame prefix */ const size_t payload_len = body_sub_buf.len + payload_overhead; s_frame_prefix_encode(AWS_H2_FRAME_T_DATA, stream_id, payload_len, flags, output); /* Write pad length */ if (flags & AWS_H2_FRAME_F_PADDED) { writes_ok &= aws_byte_buf_write_u8(output, pad_length); } /* Increment output->len to jump over the body that we already wrote in */ AWS_ASSERT(output->buffer + output->len == body_sub_buf.buffer && "Streamed DATA to wrong position"); output->len += body_sub_buf.len; /* Write padding */ if (flags & AWS_H2_FRAME_F_PADDED) { writes_ok &= aws_byte_buf_write_u8_n(output, 0, pad_length); } /* update the connection window size now, we will update stream window size when this function returns */ AWS_ASSERT(payload_len <= min_window_size); *connection_window_size_peer -= payload_len; *stream_window_size_peer -= (int32_t)payload_len; AWS_ASSERT(writes_ok); (void)writes_ok; return AWS_OP_SUCCESS; handle_waiting_for_more_space: ENCODER_LOGF(TRACE, encoder, "Insufficient space to encode DATA for stream %" PRIu32 " right now", stream_id); return AWS_OP_SUCCESS; handle_nothing_to_send_right_now: ENCODER_LOGF(INFO, encoder, "Stream %" PRIu32 " produced 0 bytes of body data", stream_id); return AWS_OP_SUCCESS; error: return AWS_OP_ERR; } /*********************************************************************************************************************** * HEADERS / PUSH_PROMISE **********************************************************************************************************************/ DEFINE_FRAME_VTABLE(headers); /* Represents a HEADERS or PUSH_PROMISE frame (followed by zero or more CONTINUATION frames) */ struct aws_h2_frame_headers { struct aws_h2_frame base; /* Common data */ const struct aws_http_headers *headers; uint8_t pad_length; /* Set to 0 to disable AWS_H2_FRAME_F_PADDED */ /* HEADERS-only data */ bool end_stream; /* AWS_H2_FRAME_F_END_STREAM */ bool has_priority; /* AWS_H2_FRAME_F_PRIORITY */ struct aws_h2_frame_priority_settings priority; /* PUSH_PROMISE-only data */ uint32_t promised_stream_id; /* State */ enum { AWS_H2_HEADERS_STATE_INIT, AWS_H2_HEADERS_STATE_FIRST_FRAME, /* header-block pre-encoded, no frames written yet */ AWS_H2_HEADERS_STATE_CONTINUATION, /* first frame written, need to write CONTINUATION frames now */ AWS_H2_HEADERS_STATE_COMPLETE, } state; struct aws_byte_buf whole_encoded_header_block; struct aws_byte_cursor header_block_cursor; /* tracks progress sending encoded header-block in fragments */ }; static struct aws_h2_frame *s_frame_new_headers_or_push_promise( struct aws_allocator *allocator, enum aws_h2_frame_type frame_type, uint32_t stream_id, const struct aws_http_headers *headers, uint8_t pad_length, bool end_stream, const struct aws_h2_frame_priority_settings *optional_priority, uint32_t promised_stream_id) { /* TODO: Host and ":authority" are no longer permitted to disagree. Should we enforce it here or sent it as * requested, let the server side reject the request? */ AWS_PRECONDITION(allocator); AWS_PRECONDITION(frame_type == AWS_H2_FRAME_T_HEADERS || frame_type == AWS_H2_FRAME_T_PUSH_PROMISE); AWS_PRECONDITION(headers); /* Validate args */ if (aws_h2_validate_stream_id(stream_id)) { return NULL; } if (frame_type == AWS_H2_FRAME_T_PUSH_PROMISE) { if (aws_h2_validate_stream_id(promised_stream_id)) { return NULL; } } if (optional_priority && aws_h2_validate_stream_id(optional_priority->stream_dependency)) { return NULL; } /* Create */ struct aws_h2_frame_headers *frame = aws_mem_calloc(allocator, 1, sizeof(struct aws_h2_frame_headers)); if (!frame) { return NULL; } if (aws_byte_buf_init(&frame->whole_encoded_header_block, allocator, s_encoded_header_block_reserve)) { goto error; } if (frame_type == AWS_H2_FRAME_T_HEADERS) { frame->end_stream = end_stream; if (optional_priority) { frame->has_priority = true; frame->priority = *optional_priority; } } else { frame->promised_stream_id = promised_stream_id; } s_init_frame_base(&frame->base, allocator, frame_type, &s_frame_headers_vtable, stream_id); aws_http_headers_acquire((struct aws_http_headers *)headers); frame->headers = headers; frame->pad_length = pad_length; return &frame->base; error: s_frame_headers_destroy(&frame->base); return NULL; } struct aws_h2_frame *aws_h2_frame_new_headers( struct aws_allocator *allocator, uint32_t stream_id, const struct aws_http_headers *headers, bool end_stream, uint8_t pad_length, const struct aws_h2_frame_priority_settings *optional_priority) { return s_frame_new_headers_or_push_promise( allocator, AWS_H2_FRAME_T_HEADERS, stream_id, headers, pad_length, end_stream, optional_priority, 0 /* HEADERS doesn't have promised_stream_id */); } struct aws_h2_frame *aws_h2_frame_new_push_promise( struct aws_allocator *allocator, uint32_t stream_id, uint32_t promised_stream_id, const struct aws_http_headers *headers, uint8_t pad_length) { return s_frame_new_headers_or_push_promise( allocator, AWS_H2_FRAME_T_PUSH_PROMISE, stream_id, headers, pad_length, false /* PUSH_PROMISE doesn't have end_stream flag */, NULL /* PUSH_PROMISE doesn't have priority_settings */, promised_stream_id); } static void s_frame_headers_destroy(struct aws_h2_frame *frame_base) { struct aws_h2_frame_headers *frame = AWS_CONTAINER_OF(frame_base, struct aws_h2_frame_headers, base); aws_http_headers_release((struct aws_http_headers *)frame->headers); aws_byte_buf_clean_up(&frame->whole_encoded_header_block); aws_mem_release(frame->base.alloc, frame); } /* Encode the next frame for this header-block (or encode nothing if output buffer is too small). */ static void s_encode_single_header_block_frame( struct aws_h2_frame_headers *frame, struct aws_h2_frame_encoder *encoder, struct aws_byte_buf *output, bool *waiting_for_more_space) { /* * Figure out the details of the next frame to encode. * The first frame will be either HEADERS or PUSH_PROMISE. * All subsequent frames will be CONTINUATION */ enum aws_h2_frame_type frame_type; uint8_t flags = 0; uint8_t pad_length = 0; const struct aws_h2_frame_priority_settings *priority_settings = NULL; const uint32_t *promised_stream_id = NULL; size_t payload_overhead = 0; /* Amount of payload holding things other than header-block (padding, etc) */ if (frame->state == AWS_H2_HEADERS_STATE_FIRST_FRAME) { frame_type = frame->base.type; if (frame->pad_length > 0) { flags |= AWS_H2_FRAME_F_PADDED; pad_length = frame->pad_length; payload_overhead += 1 + pad_length; } if (frame->has_priority) { priority_settings = &frame->priority; flags |= AWS_H2_FRAME_F_PRIORITY; payload_overhead += s_frame_priority_settings_size; } if (frame->end_stream) { flags |= AWS_H2_FRAME_F_END_STREAM; } if (frame_type == AWS_H2_FRAME_T_PUSH_PROMISE) { promised_stream_id = &frame->promised_stream_id; payload_overhead += 4; } } else /* CONTINUATION */ { frame_type = AWS_H2_FRAME_T_CONTINUATION; } /* * Figure out what size header-block fragment should go in this frame. */ size_t max_payload; if (s_get_max_contiguous_payload_length(encoder, output, &max_payload)) { goto handle_waiting_for_more_space; } size_t max_fragment; if (aws_sub_size_checked(max_payload, payload_overhead, &max_fragment)) { goto handle_waiting_for_more_space; } const size_t fragment_len = aws_min_size(max_fragment, frame->header_block_cursor.len); if (fragment_len == frame->header_block_cursor.len) { /* This will finish the header-block */ flags |= AWS_H2_FRAME_F_END_HEADERS; } else { /* If we're not finishing the header-block, is it even worth trying to send this frame now? */ const size_t even_worth_sending_threshold = AWS_H2_FRAME_PREFIX_SIZE + payload_overhead; if (fragment_len < even_worth_sending_threshold) { goto handle_waiting_for_more_space; } } /* * Ok, it fits! Write the frame */ ENCODER_LOGF( TRACE, encoder, "Encoding frame type=%s stream_id=%" PRIu32 "%s%s", aws_h2_frame_type_to_str(frame_type), frame->base.stream_id, (flags & AWS_H2_FRAME_F_END_HEADERS) ? " END_HEADERS" : "", (flags & AWS_H2_FRAME_F_END_STREAM) ? " END_STREAM" : ""); bool writes_ok = true; /* Write the frame prefix */ const size_t payload_len = fragment_len + payload_overhead; s_frame_prefix_encode(frame_type, frame->base.stream_id, payload_len, flags, output); /* Write pad length */ if (flags & AWS_H2_FRAME_F_PADDED) { AWS_ASSERT(frame_type != AWS_H2_FRAME_T_CONTINUATION); writes_ok &= aws_byte_buf_write_u8(output, pad_length); } /* Write priority */ if (flags & AWS_H2_FRAME_F_PRIORITY) { AWS_ASSERT(frame_type == AWS_H2_FRAME_T_HEADERS); s_frame_priority_settings_encode(priority_settings, output); } /* Write promised stream ID */ if (promised_stream_id) { AWS_ASSERT(frame_type == AWS_H2_FRAME_T_PUSH_PROMISE); writes_ok &= aws_byte_buf_write_be32(output, *promised_stream_id); } /* Write header-block fragment */ if (fragment_len > 0) { struct aws_byte_cursor fragment = aws_byte_cursor_advance(&frame->header_block_cursor, fragment_len); writes_ok &= aws_byte_buf_write_from_whole_cursor(output, fragment); } /* Write padding */ if (flags & AWS_H2_FRAME_F_PADDED) { writes_ok &= aws_byte_buf_write_u8_n(output, 0, pad_length); } AWS_ASSERT(writes_ok); (void)writes_ok; /* Success! Wrote entire frame. It's safe to change state now */ frame->state = flags & AWS_H2_FRAME_F_END_HEADERS ? AWS_H2_HEADERS_STATE_COMPLETE : AWS_H2_HEADERS_STATE_CONTINUATION; *waiting_for_more_space = false; return; handle_waiting_for_more_space: ENCODER_LOGF( TRACE, encoder, "Insufficient space to encode %s for stream %" PRIu32 " right now", aws_h2_frame_type_to_str(frame->base.type), frame->base.stream_id); *waiting_for_more_space = true; } static int s_frame_headers_encode( struct aws_h2_frame *frame_base, struct aws_h2_frame_encoder *encoder, struct aws_byte_buf *output, bool *complete) { struct aws_h2_frame_headers *frame = AWS_CONTAINER_OF(frame_base, struct aws_h2_frame_headers, base); /* Pre-encode the entire header-block into another buffer * the first time we're called. */ if (frame->state == AWS_H2_HEADERS_STATE_INIT) { if (aws_hpack_encode_header_block(&encoder->hpack, frame->headers, &frame->whole_encoded_header_block)) { ENCODER_LOGF( ERROR, encoder, "Error doing HPACK encoding on %s of stream %" PRIu32 ": %s", aws_h2_frame_type_to_str(frame->base.type), frame->base.stream_id, aws_error_name(aws_last_error())); goto error; } frame->header_block_cursor = aws_byte_cursor_from_buf(&frame->whole_encoded_header_block); frame->state = AWS_H2_HEADERS_STATE_FIRST_FRAME; } /* Write frames (HEADER or PUSH_PROMISE, followed by N CONTINUATION frames) * until we're done writing header-block or the buffer is too full to continue */ bool waiting_for_more_space = false; while (frame->state < AWS_H2_HEADERS_STATE_COMPLETE && !waiting_for_more_space) { s_encode_single_header_block_frame(frame, encoder, output, &waiting_for_more_space); } *complete = frame->state == AWS_H2_HEADERS_STATE_COMPLETE; return AWS_OP_SUCCESS; error: return AWS_OP_ERR; } /*********************************************************************************************************************** * aws_h2_frame_prebuilt - Used by small simple frame types that we can pre-encode at the time of creation. * The pre-encoded buffer is then just copied bit-by-bit during the actual "encode()" function. * * It's safe to pre-encode a frame if it doesn't query/mutate any external state. So PING is totally great * to pre-encode, but HEADERS (which queries MAX_FRAME_SIZE and mutates the HPACK table) would be a bad candidate. **********************************************************************************************************************/ struct aws_h2_frame_prebuilt { struct aws_h2_frame base; /* The whole entire frame is pre-encoded to this buffer during construction. * The buffer has the exact capacity necessary to hold the frame */ struct aws_byte_buf encoded_buf; /* After construction, this cursor points to the full contents of encoded_buf. * As encode() is called, we copy the contents to output and advance the cursor.*/ struct aws_byte_cursor cursor; }; DEFINE_FRAME_VTABLE(prebuilt); /* Can't pre-encode a frame unless it's guaranteed to fit, regardless of current settings. */ static size_t s_prebuilt_payload_max(void) { return aws_h2_settings_bounds[AWS_HTTP2_SETTINGS_MAX_FRAME_SIZE][0]; } /* Create aws_h2_frame_prebuilt and encode frame prefix into frame->encoded_buf. * Caller must encode the payload to fill the rest of the encoded_buf. */ static struct aws_h2_frame_prebuilt *s_h2_frame_new_prebuilt( struct aws_allocator *allocator, enum aws_h2_frame_type type, uint32_t stream_id, size_t payload_len, uint8_t flags) { AWS_PRECONDITION(payload_len <= s_prebuilt_payload_max()); const size_t encoded_frame_len = AWS_H2_FRAME_PREFIX_SIZE + payload_len; /* Use single allocation for frame and buffer storage */ struct aws_h2_frame_prebuilt *frame; void *storage; if (!aws_mem_acquire_many( allocator, 2, &frame, sizeof(struct aws_h2_frame_prebuilt), &storage, encoded_frame_len)) { return NULL; } AWS_ZERO_STRUCT(*frame); s_init_frame_base(&frame->base, allocator, type, &s_frame_prebuilt_vtable, stream_id); /* encoded_buf has the exact amount of space necessary for the full encoded frame. * The constructor of our subclass must finish filling up encoded_buf with the payload. */ frame->encoded_buf = aws_byte_buf_from_empty_array(storage, encoded_frame_len); /* cursor points to full capacity of encoded_buf. * Our subclass's constructor will finish writing the payload and fill encoded_buf to capacity. * When encode() is called, we'll copy cursor's contents into available output space and advance the cursor. */ frame->cursor = aws_byte_cursor_from_array(storage, encoded_frame_len); /* Write frame prefix */ s_frame_prefix_encode(type, stream_id, payload_len, flags, &frame->encoded_buf); return frame; } static void s_frame_prebuilt_destroy(struct aws_h2_frame *frame_base) { aws_mem_release(frame_base->alloc, frame_base); } static int s_frame_prebuilt_encode( struct aws_h2_frame *frame_base, struct aws_h2_frame_encoder *encoder, struct aws_byte_buf *output, bool *complete) { (void)encoder; struct aws_h2_frame_prebuilt *frame = AWS_CONTAINER_OF(frame_base, struct aws_h2_frame_prebuilt, base); /* encoded_buf should have been filled to capacity during construction */ AWS_ASSERT(frame->encoded_buf.len == frame->encoded_buf.capacity); /* After construction, cursor points to the full contents of encoded_buf. * As encode() is called, we copy the contents to output and advance the cursor. */ if (frame->cursor.len == frame->encoded_buf.len) { /* We haven't sent anything yet, announce start of frame */ ENCODER_LOGF( TRACE, encoder, "Encoding frame type=%s stream_id=%" PRIu32, aws_h2_frame_type_to_str(frame->base.type), frame->base.stream_id); } else { /* We've already sent a bit, announce that we're resuming */ ENCODER_LOGF( TRACE, encoder, "Resume encoding frame type=%s stream_id=%" PRIu32, aws_h2_frame_type_to_str(frame->base.type), frame->base.stream_id); } bool writes_ok = true; /* Copy as much as we can from cursor (pre-encoded frame contents) to output. * Advance the cursor to mark our progress. */ size_t chunk_len = aws_min_size(frame->cursor.len, output->capacity - output->len); struct aws_byte_cursor chunk = aws_byte_cursor_advance(&frame->cursor, chunk_len); writes_ok &= aws_byte_buf_write_from_whole_cursor(output, chunk); AWS_ASSERT(writes_ok); (void)writes_ok; if (frame->cursor.len == 0) { *complete = true; } else { ENCODER_LOGF( TRACE, encoder, "Incomplete encoding of frame type=%s stream_id=%" PRIu32 ", will resume later...", aws_h2_frame_type_to_str(frame->base.type), frame->base.stream_id); *complete = false; } return AWS_OP_SUCCESS; } /*********************************************************************************************************************** * PRIORITY **********************************************************************************************************************/ struct aws_h2_frame *aws_h2_frame_new_priority( struct aws_allocator *allocator, uint32_t stream_id, const struct aws_h2_frame_priority_settings *priority) { AWS_PRECONDITION(allocator); AWS_PRECONDITION(priority); if (aws_h2_validate_stream_id(stream_id) || aws_h2_validate_stream_id(priority->stream_dependency)) { return NULL; } /* PRIORITY can be pre-encoded */ const uint8_t flags = 0; const size_t payload_len = s_frame_priority_settings_size; struct aws_h2_frame_prebuilt *frame = s_h2_frame_new_prebuilt(allocator, AWS_H2_FRAME_T_PRIORITY, stream_id, payload_len, flags); if (!frame) { return NULL; } /* Write the priority settings */ s_frame_priority_settings_encode(priority, &frame->encoded_buf); return &frame->base; } /*********************************************************************************************************************** * RST_STREAM **********************************************************************************************************************/ static const size_t s_frame_rst_stream_length = 4; struct aws_h2_frame *aws_h2_frame_new_rst_stream( struct aws_allocator *allocator, uint32_t stream_id, uint32_t error_code) { if (aws_h2_validate_stream_id(stream_id)) { return NULL; } /* RST_STREAM can be pre-encoded */ const uint8_t flags = 0; const size_t payload_len = s_frame_rst_stream_length; struct aws_h2_frame_prebuilt *frame = s_h2_frame_new_prebuilt(allocator, AWS_H2_FRAME_T_RST_STREAM, stream_id, payload_len, flags); if (!frame) { return NULL; } /* Write RST_STREAM payload (RFC-7540 6.4): * +---------------------------------------------------------------+ * | Error Code (32) | * +---------------------------------------------------------------+ */ bool writes_ok = true; writes_ok &= aws_byte_buf_write_be32(&frame->encoded_buf, error_code); AWS_ASSERT(writes_ok); (void)writes_ok; return &frame->base; } /*********************************************************************************************************************** * SETTINGS **********************************************************************************************************************/ static const size_t s_frame_setting_length = 6; struct aws_h2_frame *aws_h2_frame_new_settings( struct aws_allocator *allocator, const struct aws_http2_setting *settings_array, size_t num_settings, bool ack) { AWS_PRECONDITION(settings_array || num_settings == 0); /* Cannot send settings in an ACK frame */ if (ack && num_settings > 0) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } /* Check against insane edge case of too many settings to fit in a frame. */ const size_t max_settings = s_prebuilt_payload_max() / s_frame_setting_length; if (num_settings > max_settings) { AWS_LOGF_ERROR( AWS_LS_HTTP_ENCODER, "Cannot create SETTINGS frame with %zu settings, the limit is %zu.", num_settings, max_settings); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } /* SETTINGS can be pre-encoded */ const uint8_t flags = ack ? AWS_H2_FRAME_F_ACK : 0; const size_t payload_len = num_settings * s_frame_setting_length; const uint32_t stream_id = 0; struct aws_h2_frame_prebuilt *frame = s_h2_frame_new_prebuilt(allocator, AWS_H2_FRAME_T_SETTINGS, stream_id, payload_len, flags); if (!frame) { return NULL; } /* Write the settings, each one is encoded like (RFC-7540 6.5.1): * +-------------------------------+ * | Identifier (16) | * +-------------------------------+-------------------------------+ * | Value (32) | * +---------------------------------------------------------------+ */ bool writes_ok = true; for (size_t i = 0; i < num_settings; ++i) { writes_ok &= aws_byte_buf_write_be16(&frame->encoded_buf, settings_array[i].id); writes_ok &= aws_byte_buf_write_be32(&frame->encoded_buf, settings_array[i].value); } AWS_ASSERT(writes_ok); (void)writes_ok; return &frame->base; } /*********************************************************************************************************************** * PING **********************************************************************************************************************/ struct aws_h2_frame *aws_h2_frame_new_ping( struct aws_allocator *allocator, bool ack, const uint8_t opaque_data[AWS_HTTP2_PING_DATA_SIZE]) { /* PING can be pre-encoded */ const uint8_t flags = ack ? AWS_H2_FRAME_F_ACK : 0; const size_t payload_len = AWS_HTTP2_PING_DATA_SIZE; const uint32_t stream_id = 0; struct aws_h2_frame_prebuilt *frame = s_h2_frame_new_prebuilt(allocator, AWS_H2_FRAME_T_PING, stream_id, payload_len, flags); if (!frame) { return NULL; } /* Write the PING payload (RFC-7540 6.7): * +---------------------------------------------------------------+ * | | * | Opaque Data (64) | * | | * +---------------------------------------------------------------+ */ bool writes_ok = true; writes_ok &= aws_byte_buf_write(&frame->encoded_buf, opaque_data, AWS_HTTP2_PING_DATA_SIZE); AWS_ASSERT(writes_ok); (void)writes_ok; /* PING responses SHOULD be given higher priority than any other frame */ frame->base.high_priority = ack; return &frame->base; } /*********************************************************************************************************************** * GOAWAY **********************************************************************************************************************/ static const size_t s_frame_goaway_length_min = 8; struct aws_h2_frame *aws_h2_frame_new_goaway( struct aws_allocator *allocator, uint32_t last_stream_id, uint32_t error_code, struct aws_byte_cursor debug_data) { /* If debug_data is too long, don't sent it. * It's more important that the GOAWAY frame gets sent. */ const size_t debug_data_max = s_prebuilt_payload_max() - s_frame_goaway_length_min; if (debug_data.len > debug_data_max) { AWS_LOGF_WARN( AWS_LS_HTTP_ENCODER, "Sending GOAWAY without debug-data. Debug-data size %zu exceeds internal limit of %zu", debug_data.len, debug_data_max); debug_data.len = 0; } /* It would be illegal to send a lower value, this is unrecoverable */ AWS_FATAL_ASSERT(last_stream_id <= AWS_H2_STREAM_ID_MAX); /* GOAWAY can be pre-encoded */ const uint8_t flags = 0; const size_t payload_len = debug_data.len + s_frame_goaway_length_min; const uint32_t stream_id = 0; struct aws_h2_frame_prebuilt *frame = s_h2_frame_new_prebuilt(allocator, AWS_H2_FRAME_T_GOAWAY, stream_id, payload_len, flags); if (!frame) { return NULL; } /* Write the GOAWAY payload (RFC-7540 6.8): * +-+-------------------------------------------------------------+ * |R| Last-Stream-ID (31) | * +-+-------------------------------------------------------------+ * | Error Code (32) | * +---------------------------------------------------------------+ * | Additional Debug Data (*) | * +---------------------------------------------------------------+ */ bool writes_ok = true; writes_ok &= aws_byte_buf_write_be32(&frame->encoded_buf, last_stream_id); writes_ok &= aws_byte_buf_write_be32(&frame->encoded_buf, error_code); writes_ok &= aws_byte_buf_write_from_whole_cursor(&frame->encoded_buf, debug_data); AWS_ASSERT(writes_ok); (void)writes_ok; return &frame->base; } /*********************************************************************************************************************** * WINDOW_UPDATE **********************************************************************************************************************/ static const size_t s_frame_window_update_length = 4; struct aws_h2_frame *aws_h2_frame_new_window_update( struct aws_allocator *allocator, uint32_t stream_id, uint32_t window_size_increment) { /* Note: stream_id may be zero or non-zero */ if (stream_id > AWS_H2_STREAM_ID_MAX) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } if (window_size_increment > AWS_H2_WINDOW_UPDATE_MAX) { AWS_LOGF_ERROR( AWS_LS_HTTP_ENCODER, "Window increment size %" PRIu32 " exceeds HTTP/2 max %" PRIu32, window_size_increment, AWS_H2_WINDOW_UPDATE_MAX); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } /* WINDOW_UPDATE can be pre-encoded */ const uint8_t flags = 0; const size_t payload_len = s_frame_window_update_length; struct aws_h2_frame_prebuilt *frame = s_h2_frame_new_prebuilt(allocator, AWS_H2_FRAME_T_WINDOW_UPDATE, stream_id, payload_len, flags); if (!frame) { return NULL; } /* Write the WINDOW_UPDATE payload (RFC-7540 6.9): * +-+-------------------------------------------------------------+ * |R| Window Size Increment (31) | * +-+-------------------------------------------------------------+ */ bool writes_ok = true; writes_ok &= aws_byte_buf_write_be32(&frame->encoded_buf, window_size_increment); AWS_ASSERT(writes_ok); (void)writes_ok; return &frame->base; } void aws_h2_frame_destroy(struct aws_h2_frame *frame) { if (frame) { frame->vtable->destroy(frame); } } int aws_h2_encode_frame( struct aws_h2_frame_encoder *encoder, struct aws_h2_frame *frame, struct aws_byte_buf *output, bool *frame_complete) { AWS_PRECONDITION(encoder); AWS_PRECONDITION(frame); AWS_PRECONDITION(output); AWS_PRECONDITION(frame_complete); if (encoder->has_errored) { ENCODER_LOG(ERROR, encoder, "Encoder cannot be used again after an error"); return aws_raise_error(AWS_ERROR_INVALID_STATE); } if (encoder->current_frame && (encoder->current_frame != frame)) { ENCODER_LOG(ERROR, encoder, "Cannot encode new frame until previous frame completes"); return aws_raise_error(AWS_ERROR_INVALID_STATE); } *frame_complete = false; if (frame->vtable->encode(frame, encoder, output, frame_complete)) { ENCODER_LOGF( ERROR, encoder, "Failed to encode frame type=%s stream_id=%" PRIu32 ", %s", aws_h2_frame_type_to_str(frame->type), frame->stream_id, aws_error_name(aws_last_error())); encoder->has_errored = true; return AWS_OP_ERR; } encoder->current_frame = *frame_complete ? NULL : frame; return AWS_OP_SUCCESS; } void aws_h2_frame_encoder_set_setting_header_table_size(struct aws_h2_frame_encoder *encoder, uint32_t data) { /* Setting for dynamic table size changed from peer, we will update the dynamic table size when we encoder the next * header block */ aws_hpack_encoder_update_max_table_size(&encoder->hpack, data); } void aws_h2_frame_encoder_set_setting_max_frame_size(struct aws_h2_frame_encoder *encoder, uint32_t data) { encoder->settings.max_frame_size = data; } aws-crt-python-0.20.4+dfsg/crt/aws-c-http/source/h2_stream.c000066400000000000000000001666531456575232400235600ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include /* Apple toolchains such as xcode and swiftpm define the DEBUG symbol. undef it here so we can actually use the token */ #undef DEBUG static void s_stream_destroy(struct aws_http_stream *stream_base); static void s_stream_update_window(struct aws_http_stream *stream_base, size_t increment_size); static int s_stream_reset_stream(struct aws_http_stream *stream_base, uint32_t http2_error); static int s_stream_get_received_error_code(struct aws_http_stream *stream_base, uint32_t *out_http2_error); static int s_stream_get_sent_error_code(struct aws_http_stream *stream_base, uint32_t *out_http2_error); static int s_stream_write_data( struct aws_http_stream *stream_base, const struct aws_http2_stream_write_data_options *options); static void s_stream_cross_thread_work_task(struct aws_channel_task *task, void *arg, enum aws_task_status status); static struct aws_h2err s_send_rst_and_close_stream(struct aws_h2_stream *stream, struct aws_h2err stream_error); static int s_stream_reset_stream_internal( struct aws_http_stream *stream_base, struct aws_h2err stream_error, bool cancelling); static void s_stream_cancel(struct aws_http_stream *stream, int error_code); struct aws_http_stream_vtable s_h2_stream_vtable = { .destroy = s_stream_destroy, .update_window = s_stream_update_window, .activate = aws_h2_stream_activate, .cancel = s_stream_cancel, .http1_write_chunk = NULL, .http2_reset_stream = s_stream_reset_stream, .http2_get_received_error_code = s_stream_get_received_error_code, .http2_get_sent_error_code = s_stream_get_sent_error_code, .http2_write_data = s_stream_write_data, }; const char *aws_h2_stream_state_to_str(enum aws_h2_stream_state state) { switch (state) { case AWS_H2_STREAM_STATE_IDLE: return "IDLE"; case AWS_H2_STREAM_STATE_RESERVED_LOCAL: return "RESERVED_LOCAL"; case AWS_H2_STREAM_STATE_RESERVED_REMOTE: return "RESERVED_REMOTE"; case AWS_H2_STREAM_STATE_OPEN: return "OPEN"; case AWS_H2_STREAM_STATE_HALF_CLOSED_LOCAL: return "HALF_CLOSED_LOCAL"; case AWS_H2_STREAM_STATE_HALF_CLOSED_REMOTE: return "HALF_CLOSED_REMOTE"; case AWS_H2_STREAM_STATE_CLOSED: return "CLOSED"; default: /* unreachable */ AWS_ASSERT(0); return "*** UNKNOWN ***"; } } static struct aws_h2_connection *s_get_h2_connection(const struct aws_h2_stream *stream) { return AWS_CONTAINER_OF(stream->base.owning_connection, struct aws_h2_connection, base); } static void s_lock_synced_data(struct aws_h2_stream *stream) { int err = aws_mutex_lock(&stream->synced_data.lock); AWS_ASSERT(!err && "lock failed"); (void)err; } static void s_unlock_synced_data(struct aws_h2_stream *stream) { int err = aws_mutex_unlock(&stream->synced_data.lock); AWS_ASSERT(!err && "unlock failed"); (void)err; } #define AWS_PRECONDITION_ON_CHANNEL_THREAD(STREAM) \ AWS_PRECONDITION(aws_channel_thread_is_callers_thread(s_get_h2_connection(STREAM)->base.channel_slot->channel)) static bool s_client_state_allows_frame_type[AWS_H2_STREAM_STATE_COUNT][AWS_H2_FRAME_TYPE_COUNT] = { /* State before anything is sent or received */ [AWS_H2_STREAM_STATE_IDLE] = {0}, /* Client streams are never in reserved (local) state */ [AWS_H2_STREAM_STATE_RESERVED_LOCAL] = {0}, /* Client received push-request via PUSH_PROMISE on another stream. * Waiting for push-response to start arriving on this server-initiated stream. */ [AWS_H2_STREAM_STATE_RESERVED_REMOTE] = { [AWS_H2_FRAME_T_HEADERS] = true, [AWS_H2_FRAME_T_RST_STREAM] = true, }, /* Client is sending request and has not received full response yet. */ [AWS_H2_STREAM_STATE_OPEN] = { [AWS_H2_FRAME_T_DATA] = true, [AWS_H2_FRAME_T_HEADERS] = true, [AWS_H2_FRAME_T_RST_STREAM] = true, [AWS_H2_FRAME_T_PUSH_PROMISE] = true, [AWS_H2_FRAME_T_WINDOW_UPDATE] = true, }, /* Client has sent full request (END_STREAM), but has not received full response yet. */ [AWS_H2_STREAM_STATE_HALF_CLOSED_LOCAL] = { [AWS_H2_FRAME_T_DATA] = true, [AWS_H2_FRAME_T_HEADERS] = true, [AWS_H2_FRAME_T_RST_STREAM] = true, [AWS_H2_FRAME_T_PUSH_PROMISE] = true, [AWS_H2_FRAME_T_WINDOW_UPDATE] = true, }, /* Client has received full response (END_STREAM), but is still sending request (uncommon). */ [AWS_H2_STREAM_STATE_HALF_CLOSED_REMOTE] = { [AWS_H2_FRAME_T_RST_STREAM] = true, [AWS_H2_FRAME_T_WINDOW_UPDATE] = true, }, /* Full request sent (END_STREAM) and full response received (END_STREAM). * OR sent RST_STREAM. OR received RST_STREAM. */ [AWS_H2_STREAM_STATE_CLOSED] = {0}, }; static bool s_server_state_allows_frame_type[AWS_H2_STREAM_STATE_COUNT][AWS_H2_FRAME_TYPE_COUNT] = { /* State before anything is sent or received, waiting for request headers to arrives and start things off */ [AWS_H2_STREAM_STATE_IDLE] = { [AWS_H2_FRAME_T_HEADERS] = true, }, /* Server sent push-request via PUSH_PROMISE on a client-initiated stream, * but hasn't started sending the push-response on this server-initiated stream yet. */ [AWS_H2_STREAM_STATE_RESERVED_LOCAL] = { [AWS_H2_FRAME_T_RST_STREAM] = true, [AWS_H2_FRAME_T_WINDOW_UPDATE] = true, }, /* Server streams are never in reserved (remote) state */ [AWS_H2_STREAM_STATE_RESERVED_REMOTE] = {0}, /* Server is receiving request, and has sent full response yet. */ [AWS_H2_STREAM_STATE_OPEN] = { [AWS_H2_FRAME_T_HEADERS] = true, [AWS_H2_FRAME_T_DATA] = true, [AWS_H2_FRAME_T_RST_STREAM] = true, [AWS_H2_FRAME_T_WINDOW_UPDATE] = true, }, /* Server has sent full response (END_STREAM), but has not received full response yet (uncommon). */ [AWS_H2_STREAM_STATE_HALF_CLOSED_LOCAL] = { [AWS_H2_FRAME_T_HEADERS] = true, [AWS_H2_FRAME_T_DATA] = true, [AWS_H2_FRAME_T_RST_STREAM] = true, [AWS_H2_FRAME_T_WINDOW_UPDATE] = true, }, /* Server has received full request (END_STREAM), and is still sending response. */ [AWS_H2_STREAM_STATE_HALF_CLOSED_REMOTE] = { [AWS_H2_FRAME_T_RST_STREAM] = true, [AWS_H2_FRAME_T_WINDOW_UPDATE] = true, }, /* Full request received (END_STREAM) and full response sent (END_STREAM). * OR sent RST_STREAM. OR received RST_STREAM. */ [AWS_H2_STREAM_STATE_CLOSED] = {0}, }; /* Returns the appropriate Stream Error if given frame not allowed in current state */ static struct aws_h2err s_check_state_allows_frame_type( const struct aws_h2_stream *stream, enum aws_h2_frame_type frame_type) { AWS_PRECONDITION(frame_type < AWS_H2_FRAME_T_UNKNOWN); /* Decoder won't invoke callbacks for unknown frame types */ AWS_PRECONDITION_ON_CHANNEL_THREAD(stream); const enum aws_h2_stream_state state = stream->thread_data.state; bool allowed; if (stream->base.server_data) { allowed = s_server_state_allows_frame_type[state][frame_type]; } else { allowed = s_client_state_allows_frame_type[state][frame_type]; } if (allowed) { return AWS_H2ERR_SUCCESS; } /* Determine specific error code */ enum aws_http2_error_code h2_error_code = AWS_HTTP2_ERR_PROTOCOL_ERROR; /* If peer knows the state is closed, then it's a STREAM_CLOSED error */ if (state == AWS_H2_STREAM_STATE_CLOSED || state == AWS_H2_STREAM_STATE_HALF_CLOSED_REMOTE) { h2_error_code = AWS_HTTP2_ERR_STREAM_CLOSED; } AWS_H2_STREAM_LOGF( ERROR, stream, "Malformed message, cannot receive %s frame in %s state", aws_h2_frame_type_to_str(frame_type), aws_h2_stream_state_to_str(state)); return aws_h2err_from_h2_code(h2_error_code); } static int s_stream_send_update_window_frame(struct aws_h2_stream *stream, size_t increment_size) { AWS_PRECONDITION_ON_CHANNEL_THREAD(stream); AWS_PRECONDITION(increment_size <= AWS_H2_WINDOW_UPDATE_MAX); struct aws_h2_connection *connection = s_get_h2_connection(stream); struct aws_h2_frame *stream_window_update_frame = aws_h2_frame_new_window_update(stream->base.alloc, stream->base.id, (uint32_t)increment_size); if (!stream_window_update_frame) { AWS_H2_STREAM_LOGF( ERROR, stream, "Failed to create WINDOW_UPDATE frame on connection, error %s", aws_error_name(aws_last_error())); return AWS_OP_ERR; } aws_h2_connection_enqueue_outgoing_frame(connection, stream_window_update_frame); return AWS_OP_SUCCESS; } struct aws_h2_stream *aws_h2_stream_new_request( struct aws_http_connection *client_connection, const struct aws_http_make_request_options *options) { AWS_PRECONDITION(client_connection); AWS_PRECONDITION(options); struct aws_h2_stream *stream = aws_mem_calloc(client_connection->alloc, 1, sizeof(struct aws_h2_stream)); /* Initialize base stream */ stream->base.vtable = &s_h2_stream_vtable; stream->base.alloc = client_connection->alloc; stream->base.owning_connection = client_connection; stream->base.user_data = options->user_data; stream->base.on_incoming_headers = options->on_response_headers; stream->base.on_incoming_header_block_done = options->on_response_header_block_done; stream->base.on_incoming_body = options->on_response_body; stream->base.on_metrics = options->on_metrics; stream->base.on_complete = options->on_complete; stream->base.on_destroy = options->on_destroy; stream->base.client_data = &stream->base.client_or_server_data.client; stream->base.client_data->response_status = AWS_HTTP_STATUS_CODE_UNKNOWN; stream->base.metrics.send_start_timestamp_ns = -1; stream->base.metrics.send_end_timestamp_ns = -1; stream->base.metrics.sending_duration_ns = -1; stream->base.metrics.receive_start_timestamp_ns = -1; stream->base.metrics.receive_end_timestamp_ns = -1; stream->base.metrics.receiving_duration_ns = -1; aws_linked_list_init(&stream->thread_data.outgoing_writes); aws_linked_list_init(&stream->synced_data.pending_write_list); /* Stream refcount starts at 1, and gets incremented again for the connection upon a call to activate() */ aws_atomic_init_int(&stream->base.refcount, 1); enum aws_http_version message_version = aws_http_message_get_protocol_version(options->request); switch (message_version) { case AWS_HTTP_VERSION_1_1: /* TODO: don't automatic transform HTTP/1 message. Let user explicitly pass in HTTP/2 request */ stream->thread_data.outgoing_message = aws_http2_message_new_from_http1(stream->base.alloc, options->request); if (!stream->thread_data.outgoing_message) { AWS_H2_STREAM_LOG(ERROR, stream, "Stream failed to create the HTTP/2 message from HTTP/1.1 message"); goto error; } break; case AWS_HTTP_VERSION_2: stream->thread_data.outgoing_message = options->request; aws_http_message_acquire(stream->thread_data.outgoing_message); break; default: /* Not supported */ aws_raise_error(AWS_ERROR_HTTP_UNSUPPORTED_PROTOCOL); goto error; } struct aws_byte_cursor method; AWS_ZERO_STRUCT(method); if (aws_http_message_get_request_method(options->request, &method)) { goto error; } stream->base.request_method = aws_http_str_to_method(method); /* Init H2 specific stuff */ stream->thread_data.state = AWS_H2_STREAM_STATE_IDLE; /* stream end is implicit if the request isn't using manual data writes */ stream->synced_data.manual_write_ended = !options->http2_use_manual_data_writes; stream->manual_write = options->http2_use_manual_data_writes; /* if there's a request body to write, add it as the first outgoing write */ struct aws_input_stream *body_stream = aws_http_message_get_body_stream(options->request); if (body_stream) { struct aws_h2_stream_data_write *body_write = aws_mem_calloc(stream->base.alloc, 1, sizeof(struct aws_h2_stream_data_write)); body_write->data_stream = aws_input_stream_acquire(body_stream); body_write->end_stream = !stream->manual_write; aws_linked_list_push_back(&stream->thread_data.outgoing_writes, &body_write->node); } stream->sent_reset_error_code = -1; stream->received_reset_error_code = -1; stream->synced_data.reset_error.h2_code = AWS_HTTP2_ERR_COUNT; stream->synced_data.api_state = AWS_H2_STREAM_API_STATE_INIT; if (aws_mutex_init(&stream->synced_data.lock)) { AWS_H2_STREAM_LOGF( ERROR, stream, "Mutex init error %d (%s).", aws_last_error(), aws_error_name(aws_last_error())); goto error; } aws_channel_task_init( &stream->cross_thread_work_task, s_stream_cross_thread_work_task, stream, "HTTP/2 stream cross-thread work"); return stream; error: s_stream_destroy(&stream->base); return NULL; } static void s_stream_cross_thread_work_task(struct aws_channel_task *task, void *arg, enum aws_task_status status) { (void)task; struct aws_h2_stream *stream = arg; if (status != AWS_TASK_STATUS_RUN_READY) { goto end; } struct aws_h2_connection *connection = s_get_h2_connection(stream); if (aws_h2_stream_get_state(stream) == AWS_H2_STREAM_STATE_CLOSED) { /* stream is closed, silently ignoring the requests from user */ AWS_H2_STREAM_LOG( TRACE, stream, "Stream closed before cross thread work task runs, ignoring everything was sent by user."); goto end; } /* Not sending window update at half closed remote state */ bool ignore_window_update = (aws_h2_stream_get_state(stream) == AWS_H2_STREAM_STATE_HALF_CLOSED_REMOTE); bool reset_called; size_t window_update_size; struct aws_h2err reset_error; struct aws_linked_list pending_writes; aws_linked_list_init(&pending_writes); { /* BEGIN CRITICAL SECTION */ s_lock_synced_data(stream); stream->synced_data.is_cross_thread_work_task_scheduled = false; /* window_update_size is ensured to be not greater than AWS_H2_WINDOW_UPDATE_MAX */ window_update_size = stream->synced_data.window_update_size; stream->synced_data.window_update_size = 0; reset_called = stream->synced_data.reset_called; reset_error = stream->synced_data.reset_error; /* copy out pending writes */ aws_linked_list_swap_contents(&pending_writes, &stream->synced_data.pending_write_list); s_unlock_synced_data(stream); } /* END CRITICAL SECTION */ if (window_update_size > 0 && !ignore_window_update) { if (s_stream_send_update_window_frame(stream, window_update_size)) { /* Treat this as a connection error */ aws_h2_connection_shutdown_due_to_write_err(connection, aws_last_error()); } } /* The largest legal value will be 2 * max window size, which is way less than INT64_MAX, so if the window_size_self * overflows, remote peer will find it out. So just apply the change and ignore the possible overflow.*/ stream->thread_data.window_size_self += window_update_size; if (reset_called) { struct aws_h2err returned_h2err = s_send_rst_and_close_stream(stream, reset_error); if (aws_h2err_failed(returned_h2err)) { aws_h2_connection_shutdown_due_to_write_err(connection, returned_h2err.aws_code); } } if (stream->thread_data.waiting_for_writes && !aws_linked_list_empty(&pending_writes)) { /* Got more to write, move the stream back to outgoing list */ aws_linked_list_remove(&stream->node); aws_linked_list_push_back(&connection->thread_data.outgoing_streams_list, &stream->node); stream->thread_data.waiting_for_writes = false; } /* move any pending writes to the outgoing write queue */ aws_linked_list_move_all_back(&stream->thread_data.outgoing_writes, &pending_writes); /* It's likely that frames were queued while processing cross-thread work. * If so, try writing them now */ aws_h2_try_write_outgoing_frames(connection); end: aws_http_stream_release(&stream->base); } static void s_stream_data_write_destroy( struct aws_h2_stream *stream, struct aws_h2_stream_data_write *write, int error_code) { AWS_PRECONDITION(stream); AWS_PRECONDITION(write); if (write->on_complete) { write->on_complete(&stream->base, error_code, write->user_data); } if (write->data_stream) { aws_input_stream_release(write->data_stream); } aws_mem_release(stream->base.alloc, write); } static void s_h2_stream_destroy_pending_writes(struct aws_h2_stream *stream) { /** * Only called when stream is not active and will never be active afterward (destroying). * Under this assumption, we can safely touch `stream->synced_data.pending_write_list` without * lock, as the user can only add write to the list when the stream is ACTIVE */ AWS_ASSERT(stream->synced_data.api_state != AWS_H2_STREAM_API_STATE_ACTIVE); aws_linked_list_move_all_back( &stream->thread_data.outgoing_writes, &stream->synced_data.pending_write_list); /* clean up any outgoing writes */ while (!aws_linked_list_empty(&stream->thread_data.outgoing_writes)) { struct aws_linked_list_node *node = aws_linked_list_pop_front(&stream->thread_data.outgoing_writes); struct aws_h2_stream_data_write *write = AWS_CONTAINER_OF(node, struct aws_h2_stream_data_write, node); AWS_LOGF_DEBUG(AWS_LS_HTTP_STREAM, "Stream closing, cancelling write of stream %p", (void *)write->data_stream); s_stream_data_write_destroy(stream, write, AWS_ERROR_HTTP_STREAM_HAS_COMPLETED); } } static void s_stream_destroy(struct aws_http_stream *stream_base) { AWS_PRECONDITION(stream_base); struct aws_h2_stream *stream = AWS_CONTAINER_OF(stream_base, struct aws_h2_stream, base); s_h2_stream_destroy_pending_writes(stream); AWS_H2_STREAM_LOG(DEBUG, stream, "Destroying stream"); aws_mutex_clean_up(&stream->synced_data.lock); aws_http_message_release(stream->thread_data.outgoing_message); aws_mem_release(stream->base.alloc, stream); } void aws_h2_stream_complete(struct aws_h2_stream *stream, int error_code) { { /* BEGIN CRITICAL SECTION */ /* clean up any pending writes */ s_lock_synced_data(stream); /* The stream is complete now, this will prevent further writes from being queued */ stream->synced_data.api_state = AWS_H2_STREAM_API_STATE_COMPLETE; s_unlock_synced_data(stream); } /* END CRITICAL SECTION */ s_h2_stream_destroy_pending_writes(stream); /* Invoke callback */ if (stream->base.on_metrics) { stream->base.on_metrics(&stream->base, &stream->base.metrics, stream->base.user_data); } if (stream->base.on_complete) { stream->base.on_complete(&stream->base, error_code, stream->base.user_data); } } static void s_stream_update_window(struct aws_http_stream *stream_base, size_t increment_size) { AWS_PRECONDITION(stream_base); struct aws_h2_stream *stream = AWS_CONTAINER_OF(stream_base, struct aws_h2_stream, base); struct aws_h2_connection *connection = s_get_h2_connection(stream); if (!increment_size) { return; } if (!connection->base.stream_manual_window_management) { /* auto-mode, manual update window is not supported */ AWS_H2_STREAM_LOG( DEBUG, stream, "Manual window management is off, update window operations are not supported."); return; } int err = 0; bool stream_is_init; bool cross_thread_work_should_schedule = false; size_t sum_size; { /* BEGIN CRITICAL SECTION */ s_lock_synced_data(stream); err |= aws_add_size_checked(stream->synced_data.window_update_size, increment_size, &sum_size); err |= sum_size > AWS_H2_WINDOW_UPDATE_MAX; stream_is_init = stream->synced_data.api_state == AWS_H2_STREAM_API_STATE_INIT; if (!err && !stream_is_init) { cross_thread_work_should_schedule = !stream->synced_data.is_cross_thread_work_task_scheduled; stream->synced_data.is_cross_thread_work_task_scheduled = true; stream->synced_data.window_update_size = sum_size; } s_unlock_synced_data(stream); } /* END CRITICAL SECTION */ if (cross_thread_work_should_schedule) { AWS_H2_STREAM_LOG(TRACE, stream, "Scheduling stream cross-thread work task"); /* increment the refcount of stream to keep it alive until the task runs */ aws_atomic_fetch_add(&stream->base.refcount, 1); aws_channel_schedule_task_now(connection->base.channel_slot->channel, &stream->cross_thread_work_task); return; } if (stream_is_init) { AWS_H2_STREAM_LOG( ERROR, stream, "Stream update window failed. Stream is in initialized state, please activate the stream first."); aws_raise_error(AWS_ERROR_INVALID_STATE); return; } if (err) { /* The increment_size is still not 100% safe, since we cannot control the incoming data frame. So just * ruled out the value that is obviously wrong values */ AWS_H2_STREAM_LOG( ERROR, stream, "The stream's flow-control window has been incremented beyond 2**31 -1, the max for HTTP/2. The stream " "will close."); aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED); struct aws_h2err stream_error = { .aws_code = AWS_ERROR_OVERFLOW_DETECTED, .h2_code = AWS_HTTP2_ERR_INTERNAL_ERROR, }; /* Only when stream is not initialized reset will fail. So, we can assert it to be succeed. */ AWS_FATAL_ASSERT( s_stream_reset_stream_internal(stream_base, stream_error, false /*cancelling*/) == AWS_OP_SUCCESS); } return; } static int s_stream_reset_stream_internal( struct aws_http_stream *stream_base, struct aws_h2err stream_error, bool cancelling) { struct aws_h2_stream *stream = AWS_CONTAINER_OF(stream_base, struct aws_h2_stream, base); struct aws_h2_connection *connection = s_get_h2_connection(stream); bool reset_called; bool stream_is_init; bool cross_thread_work_should_schedule = false; { /* BEGIN CRITICAL SECTION */ s_lock_synced_data(stream); reset_called = stream->synced_data.reset_called; stream_is_init = stream->synced_data.api_state == AWS_H2_STREAM_API_STATE_INIT; if (!reset_called && !stream_is_init) { cross_thread_work_should_schedule = !stream->synced_data.is_cross_thread_work_task_scheduled; stream->synced_data.reset_called = true; stream->synced_data.reset_error = stream_error; } s_unlock_synced_data(stream); } /* END CRITICAL SECTION */ if (stream_is_init) { if (cancelling) { /* Not an error if we are just cancelling. */ AWS_LOGF_DEBUG(AWS_LS_HTTP_STREAM, "id=%p: Stream not in process, nothing to cancel.", (void *)stream); return AWS_OP_SUCCESS; } AWS_H2_STREAM_LOG( ERROR, stream, "Reset stream failed. Stream is in initialized state, please activate the stream first."); return aws_raise_error(AWS_ERROR_INVALID_STATE); } if (reset_called) { AWS_H2_STREAM_LOG(DEBUG, stream, "Reset stream ignored. Reset stream has been called already."); } if (cross_thread_work_should_schedule) { AWS_H2_STREAM_LOG(TRACE, stream, "Scheduling stream cross-thread work task"); /* increment the refcount of stream to keep it alive until the task runs */ aws_atomic_fetch_add(&stream->base.refcount, 1); aws_channel_schedule_task_now(connection->base.channel_slot->channel, &stream->cross_thread_work_task); } return AWS_OP_SUCCESS; } static int s_stream_reset_stream(struct aws_http_stream *stream_base, uint32_t http2_error) { struct aws_h2err stream_error = { .aws_code = AWS_ERROR_HTTP_RST_STREAM_SENT, .h2_code = http2_error, }; AWS_LOGF_TRACE( AWS_LS_HTTP_STREAM, "id=%p: User requested RST_STREAM with error code %s (0x%x)", (void *)stream_base, aws_http2_error_code_to_str(http2_error), http2_error); return s_stream_reset_stream_internal(stream_base, stream_error, false /*cancelling*/); } void s_stream_cancel(struct aws_http_stream *stream_base, int error_code) { struct aws_h2err stream_error = { .aws_code = error_code, .h2_code = AWS_HTTP2_ERR_CANCEL, }; s_stream_reset_stream_internal(stream_base, stream_error, true /*cancelling*/); return; } static int s_stream_get_received_error_code(struct aws_http_stream *stream_base, uint32_t *out_http2_error) { struct aws_h2_stream *stream = AWS_CONTAINER_OF(stream_base, struct aws_h2_stream, base); if (stream->received_reset_error_code == -1) { return aws_raise_error(AWS_ERROR_HTTP_DATA_NOT_AVAILABLE); } *out_http2_error = (uint32_t)stream->received_reset_error_code; return AWS_OP_SUCCESS; } static int s_stream_get_sent_error_code(struct aws_http_stream *stream_base, uint32_t *out_http2_error) { struct aws_h2_stream *stream = AWS_CONTAINER_OF(stream_base, struct aws_h2_stream, base); if (stream->sent_reset_error_code == -1) { return aws_raise_error(AWS_ERROR_HTTP_DATA_NOT_AVAILABLE); } *out_http2_error = (uint32_t)stream->sent_reset_error_code; return AWS_OP_SUCCESS; } enum aws_h2_stream_state aws_h2_stream_get_state(const struct aws_h2_stream *stream) { AWS_PRECONDITION_ON_CHANNEL_THREAD(stream); return stream->thread_data.state; } /* Given a Stream Error, send RST_STREAM frame and close stream. * A Connection Error is returned if something goes catastrophically wrong */ static struct aws_h2err s_send_rst_and_close_stream(struct aws_h2_stream *stream, struct aws_h2err stream_error) { AWS_PRECONDITION_ON_CHANNEL_THREAD(stream); AWS_PRECONDITION(stream->thread_data.state != AWS_H2_STREAM_STATE_CLOSED); struct aws_h2_connection *connection = s_get_h2_connection(stream); stream->thread_data.state = AWS_H2_STREAM_STATE_CLOSED; AWS_H2_STREAM_LOGF( DEBUG, stream, "Sending RST_STREAM with error code %s (0x%x). State -> CLOSED", aws_http2_error_code_to_str(stream_error.h2_code), stream_error.h2_code); /* Send RST_STREAM */ struct aws_h2_frame *rst_stream_frame = aws_h2_frame_new_rst_stream(stream->base.alloc, stream->base.id, stream_error.h2_code); AWS_FATAL_ASSERT(rst_stream_frame != NULL); aws_h2_connection_enqueue_outgoing_frame(connection, rst_stream_frame); /* connection takes ownership of frame */ stream->sent_reset_error_code = stream_error.h2_code; /* Tell connection that stream is now closed */ if (aws_h2_connection_on_stream_closed( connection, stream, AWS_H2_STREAM_CLOSED_WHEN_RST_STREAM_SENT, stream_error.aws_code)) { return aws_h2err_from_last_error(); } return AWS_H2ERR_SUCCESS; } struct aws_h2err aws_h2_stream_window_size_change(struct aws_h2_stream *stream, int32_t size_changed, bool self) { if (self) { if (stream->thread_data.window_size_self + size_changed > AWS_H2_WINDOW_UPDATE_MAX) { return aws_h2err_from_h2_code(AWS_HTTP2_ERR_FLOW_CONTROL_ERROR); } stream->thread_data.window_size_self += size_changed; } else { if ((int64_t)stream->thread_data.window_size_peer + size_changed > AWS_H2_WINDOW_UPDATE_MAX) { return aws_h2err_from_h2_code(AWS_HTTP2_ERR_FLOW_CONTROL_ERROR); } stream->thread_data.window_size_peer += size_changed; } return AWS_H2ERR_SUCCESS; } static inline bool s_h2_stream_has_outgoing_writes(struct aws_h2_stream *stream) { return !aws_linked_list_empty(&stream->thread_data.outgoing_writes); } static void s_h2_stream_write_data_complete(struct aws_h2_stream *stream, bool *waiting_writes) { AWS_PRECONDITION(waiting_writes); AWS_PRECONDITION(s_h2_stream_has_outgoing_writes(stream)); /* finish/clean up the current write operation */ struct aws_linked_list_node *node = aws_linked_list_pop_front(&stream->thread_data.outgoing_writes); struct aws_h2_stream_data_write *write_op = AWS_CONTAINER_OF(node, struct aws_h2_stream_data_write, node); const bool ending_stream = write_op->end_stream; s_stream_data_write_destroy(stream, write_op, AWS_OP_SUCCESS); /* check to see if there are more queued writes or stream_end was called */ *waiting_writes = !ending_stream && !s_h2_stream_has_outgoing_writes(stream); } static struct aws_h2_stream_data_write *s_h2_stream_get_current_write(struct aws_h2_stream *stream) { AWS_PRECONDITION(s_h2_stream_has_outgoing_writes(stream)); struct aws_linked_list_node *node = aws_linked_list_front(&stream->thread_data.outgoing_writes); struct aws_h2_stream_data_write *write = AWS_CONTAINER_OF(node, struct aws_h2_stream_data_write, node); return write; } static struct aws_input_stream *s_h2_stream_get_data_stream(struct aws_h2_stream *stream) { struct aws_h2_stream_data_write *write = s_h2_stream_get_current_write(stream); return write->data_stream; } static bool s_h2_stream_does_current_write_end_stream(struct aws_h2_stream *stream) { struct aws_h2_stream_data_write *write = s_h2_stream_get_current_write(stream); return write->end_stream; } int aws_h2_stream_on_activated(struct aws_h2_stream *stream, enum aws_h2_stream_body_state *body_state) { AWS_PRECONDITION_ON_CHANNEL_THREAD(stream); struct aws_h2_connection *connection = s_get_h2_connection(stream); /* Create HEADERS frame */ struct aws_http_message *msg = stream->thread_data.outgoing_message; /* Should be ensured when the stream is created */ AWS_ASSERT(aws_http_message_get_protocol_version(msg) == AWS_HTTP_VERSION_2); /* If manual write, always has data to be sent. */ bool with_data = aws_http_message_get_body_stream(msg) != NULL || stream->manual_write; struct aws_http_headers *h2_headers = aws_http_message_get_headers(msg); struct aws_h2_frame *headers_frame = aws_h2_frame_new_headers( stream->base.alloc, stream->base.id, h2_headers, !with_data /* end_stream */, 0 /* padding - not currently configurable via public API */, NULL /* priority - not currently configurable via public API */); if (!headers_frame) { AWS_H2_STREAM_LOGF(ERROR, stream, "Failed to create HEADERS frame: %s", aws_error_name(aws_last_error())); goto error; } AWS_ASSERT(stream->base.metrics.send_start_timestamp_ns == -1); aws_high_res_clock_get_ticks((uint64_t *)&stream->base.metrics.send_start_timestamp_ns); /* Initialize the flow-control window size */ stream->thread_data.window_size_peer = connection->thread_data.settings_peer[AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE]; stream->thread_data.window_size_self = connection->thread_data.settings_self[AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE]; if (with_data) { /* If stream has DATA to send, put it in the outgoing_streams_list, and we'll send data later */ stream->thread_data.state = AWS_H2_STREAM_STATE_OPEN; AWS_H2_STREAM_LOG(TRACE, stream, "Sending HEADERS. State -> OPEN"); } else { /* If stream has no body, then HEADERS frame marks the end of outgoing data */ stream->thread_data.state = AWS_H2_STREAM_STATE_HALF_CLOSED_LOCAL; AWS_H2_STREAM_LOG(TRACE, stream, "Sending HEADERS with END_STREAM. State -> HALF_CLOSED_LOCAL"); /* There is no further frames to be sent, now is the end timestamp of sending. */ AWS_ASSERT(stream->base.metrics.send_end_timestamp_ns == -1); aws_high_res_clock_get_ticks((uint64_t *)&stream->base.metrics.send_end_timestamp_ns); stream->base.metrics.sending_duration_ns = stream->base.metrics.send_end_timestamp_ns - stream->base.metrics.send_start_timestamp_ns; } if (s_h2_stream_has_outgoing_writes(stream)) { *body_state = AWS_H2_STREAM_BODY_STATE_ONGOING; } else { if (stream->manual_write) { stream->thread_data.waiting_for_writes = true; *body_state = AWS_H2_STREAM_BODY_STATE_WAITING_WRITES; } else { *body_state = AWS_H2_STREAM_BODY_STATE_NONE; } } aws_h2_connection_enqueue_outgoing_frame(connection, headers_frame); return AWS_OP_SUCCESS; error: return AWS_OP_ERR; } int aws_h2_stream_encode_data_frame( struct aws_h2_stream *stream, struct aws_h2_frame_encoder *encoder, struct aws_byte_buf *output, int *data_encode_status) { AWS_PRECONDITION_ON_CHANNEL_THREAD(stream); AWS_PRECONDITION( stream->thread_data.state == AWS_H2_STREAM_STATE_OPEN || stream->thread_data.state == AWS_H2_STREAM_STATE_HALF_CLOSED_REMOTE); struct aws_h2_connection *connection = s_get_h2_connection(stream); AWS_PRECONDITION(connection->thread_data.window_size_peer > AWS_H2_MIN_WINDOW_SIZE); if (stream->thread_data.window_size_peer <= AWS_H2_MIN_WINDOW_SIZE) { /* The stream is stalled now */ *data_encode_status = AWS_H2_DATA_ENCODE_ONGOING_WINDOW_STALLED; return AWS_OP_SUCCESS; } *data_encode_status = AWS_H2_DATA_ENCODE_COMPLETE; struct aws_input_stream *input_stream = s_h2_stream_get_data_stream(stream); AWS_ASSERT(input_stream); bool input_stream_complete = false; bool input_stream_stalled = false; bool ends_stream = s_h2_stream_does_current_write_end_stream(stream); if (aws_h2_encode_data_frame( encoder, stream->base.id, input_stream, ends_stream, 0 /*pad_length*/, &stream->thread_data.window_size_peer, &connection->thread_data.window_size_peer, output, &input_stream_complete, &input_stream_stalled)) { /* Failed to write DATA, treat it as a Stream Error */ AWS_H2_STREAM_LOGF(ERROR, stream, "Error encoding stream DATA, %s", aws_error_name(aws_last_error())); struct aws_h2err returned_h2err = s_send_rst_and_close_stream(stream, aws_h2err_from_last_error()); if (aws_h2err_failed(returned_h2err)) { aws_h2_connection_shutdown_due_to_write_err(connection, returned_h2err.aws_code); } return AWS_OP_SUCCESS; } bool waiting_writes = false; if (input_stream_complete) { s_h2_stream_write_data_complete(stream, &waiting_writes); } /* * input_stream_complete for manual writes just means the current outgoing_write is complete. The body is not * complete for real until the stream is told to close */ if (input_stream_complete && ends_stream) { /* Done sending data. No more data will be sent. */ AWS_ASSERT(stream->base.metrics.send_end_timestamp_ns == -1); aws_high_res_clock_get_ticks((uint64_t *)&stream->base.metrics.send_end_timestamp_ns); stream->base.metrics.sending_duration_ns = stream->base.metrics.send_end_timestamp_ns - stream->base.metrics.send_start_timestamp_ns; if (stream->thread_data.state == AWS_H2_STREAM_STATE_HALF_CLOSED_REMOTE) { /* Both sides have sent END_STREAM */ stream->thread_data.state = AWS_H2_STREAM_STATE_CLOSED; AWS_H2_STREAM_LOG(TRACE, stream, "Sent END_STREAM. State -> CLOSED"); /* Tell connection that stream is now closed */ if (aws_h2_connection_on_stream_closed( connection, stream, AWS_H2_STREAM_CLOSED_WHEN_BOTH_SIDES_END_STREAM, AWS_ERROR_SUCCESS)) { return AWS_OP_ERR; } } else { /* Else can't close until we receive END_STREAM */ stream->thread_data.state = AWS_H2_STREAM_STATE_HALF_CLOSED_LOCAL; AWS_H2_STREAM_LOG(TRACE, stream, "Sent END_STREAM. State -> HALF_CLOSED_LOCAL"); } } else { *data_encode_status = AWS_H2_DATA_ENCODE_ONGOING; if (input_stream_stalled) { AWS_ASSERT(!input_stream_complete); *data_encode_status = AWS_H2_DATA_ENCODE_ONGOING_BODY_STREAM_STALLED; } if (stream->thread_data.window_size_peer <= AWS_H2_MIN_WINDOW_SIZE) { /* if body and window both stalled, we take the window stalled status, which will take the stream out * from outgoing list */ *data_encode_status = AWS_H2_DATA_ENCODE_ONGOING_WINDOW_STALLED; } if (waiting_writes) { /* if window stalled and we waiting for manual writes, we take waiting writes status, which will be handled * properly if more writes coming, but windows is still stalled. But not the other way around. */ AWS_ASSERT(input_stream_complete); *data_encode_status = AWS_H2_DATA_ENCODE_ONGOING_WAITING_FOR_WRITES; } } return AWS_OP_SUCCESS; } struct aws_h2err aws_h2_stream_on_decoder_headers_begin(struct aws_h2_stream *stream) { AWS_PRECONDITION_ON_CHANNEL_THREAD(stream); struct aws_h2err stream_err = s_check_state_allows_frame_type(stream, AWS_H2_FRAME_T_HEADERS); if (aws_h2err_failed(stream_err)) { return s_send_rst_and_close_stream(stream, stream_err); } aws_high_res_clock_get_ticks((uint64_t *)&stream->base.metrics.receive_start_timestamp_ns); return AWS_H2ERR_SUCCESS; } struct aws_h2err aws_h2_stream_on_decoder_headers_i( struct aws_h2_stream *stream, const struct aws_http_header *header, enum aws_http_header_name name_enum, enum aws_http_header_block block_type) { AWS_PRECONDITION_ON_CHANNEL_THREAD(stream); /* Not calling s_check_state_allows_frame_type() here because we already checked * at start of HEADERS frame in aws_h2_stream_on_decoder_headers_begin() */ bool is_server = stream->base.server_data; /* RFC-7540 8.1 - Message consists of: * - 0+ Informational 1xx headers (response-only, decoder validates that this only occurs in responses) * - 1 main headers with normal request or response. * - 0 or 1 trailing headers with no pseudo-headers */ switch (block_type) { case AWS_HTTP_HEADER_BLOCK_INFORMATIONAL: if (stream->thread_data.received_main_headers) { AWS_H2_STREAM_LOG( ERROR, stream, "Malformed message, received informational (1xx) response after main response"); goto malformed; } break; case AWS_HTTP_HEADER_BLOCK_MAIN: if (stream->thread_data.received_main_headers) { AWS_H2_STREAM_LOG(ERROR, stream, "Malformed message, received second set of headers"); goto malformed; } break; case AWS_HTTP_HEADER_BLOCK_TRAILING: if (!stream->thread_data.received_main_headers) { /* A HEADERS frame without any pseudo-headers looks like trailing headers to the decoder */ AWS_H2_STREAM_LOG(ERROR, stream, "Malformed headers lack required pseudo-header fields."); goto malformed; } break; default: AWS_ASSERT(0); } if (is_server) { return aws_h2err_from_aws_code(AWS_ERROR_UNIMPLEMENTED); } else { /* Client */ switch (name_enum) { case AWS_HTTP_HEADER_STATUS: { uint64_t status_code = 0; int err = aws_byte_cursor_utf8_parse_u64(header->value, &status_code); AWS_ASSERT(!err && "Invalid :status value. Decoder should have already validated this"); (void)err; stream->base.client_data->response_status = (int)status_code; } break; case AWS_HTTP_HEADER_CONTENT_LENGTH: { if (stream->thread_data.content_length_received) { AWS_H2_STREAM_LOG(ERROR, stream, "Duplicate content-length value"); goto malformed; } if (aws_byte_cursor_utf8_parse_u64(header->value, &stream->thread_data.incoming_content_length)) { AWS_H2_STREAM_LOG(ERROR, stream, "Invalid content-length value"); goto malformed; } stream->thread_data.content_length_received = true; } break; default: break; } } if (stream->base.on_incoming_headers) { if (stream->base.on_incoming_headers(&stream->base, block_type, header, 1, stream->base.user_data)) { AWS_H2_STREAM_LOGF( ERROR, stream, "Incoming header callback raised error, %s", aws_error_name(aws_last_error())); return s_send_rst_and_close_stream(stream, aws_h2err_from_last_error()); } } return AWS_H2ERR_SUCCESS; malformed: /* RFC-9113 8.1.1 Malformed requests or responses that are detected MUST be treated as a stream error * (Section 5.4.2) of type PROTOCOL_ERROR.*/ return s_send_rst_and_close_stream(stream, aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR)); } struct aws_h2err aws_h2_stream_on_decoder_headers_end( struct aws_h2_stream *stream, bool malformed, enum aws_http_header_block block_type) { AWS_PRECONDITION_ON_CHANNEL_THREAD(stream); /* Not calling s_check_state_allows_frame_type() here because we already checked * at start of HEADERS frame in aws_h2_stream_on_decoder_headers_begin() */ if (malformed) { AWS_H2_STREAM_LOG(ERROR, stream, "Headers are malformed"); return s_send_rst_and_close_stream(stream, aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR)); } switch (block_type) { case AWS_HTTP_HEADER_BLOCK_INFORMATIONAL: AWS_H2_STREAM_LOG(TRACE, stream, "Informational 1xx header-block done."); break; case AWS_HTTP_HEADER_BLOCK_MAIN: AWS_H2_STREAM_LOG(TRACE, stream, "Main header-block done."); stream->thread_data.received_main_headers = true; break; case AWS_HTTP_HEADER_BLOCK_TRAILING: AWS_H2_STREAM_LOG(TRACE, stream, "Trailing 1xx header-block done."); break; default: AWS_ASSERT(0); } if (stream->base.on_incoming_header_block_done) { if (stream->base.on_incoming_header_block_done(&stream->base, block_type, stream->base.user_data)) { AWS_H2_STREAM_LOGF( ERROR, stream, "Incoming-header-block-done callback raised error, %s", aws_error_name(aws_last_error())); return s_send_rst_and_close_stream(stream, aws_h2err_from_last_error()); } } return AWS_H2ERR_SUCCESS; } struct aws_h2err aws_h2_stream_on_decoder_push_promise(struct aws_h2_stream *stream, uint32_t promised_stream_id) { AWS_PRECONDITION_ON_CHANNEL_THREAD(stream); struct aws_h2err stream_err = s_check_state_allows_frame_type(stream, AWS_H2_FRAME_T_PUSH_PROMISE); if (aws_h2err_failed(stream_err)) { return s_send_rst_and_close_stream(stream, stream_err); } /* Note: Until we have a need for it, PUSH_PROMISE is not a fully supported feature. * Promised streams are automatically rejected in a manner compliant with RFC-7540. */ AWS_H2_STREAM_LOG(DEBUG, stream, "Automatically rejecting promised stream, PUSH_PROMISE is not fully supported"); if (aws_h2_connection_send_rst_and_close_reserved_stream( s_get_h2_connection(stream), promised_stream_id, AWS_HTTP2_ERR_REFUSED_STREAM)) { return aws_h2err_from_last_error(); } return AWS_H2ERR_SUCCESS; } static int s_stream_send_update_window(struct aws_h2_stream *stream, uint32_t window_size) { struct aws_h2_frame *stream_window_update_frame = aws_h2_frame_new_window_update(stream->base.alloc, stream->base.id, window_size); if (!stream_window_update_frame) { AWS_H2_STREAM_LOGF( ERROR, stream, "WINDOW_UPDATE frame on stream failed to be sent, error %s", aws_error_name(aws_last_error())); return AWS_OP_ERR; } aws_h2_connection_enqueue_outgoing_frame(s_get_h2_connection(stream), stream_window_update_frame); stream->thread_data.window_size_self += window_size; return AWS_OP_SUCCESS; } struct aws_h2err aws_h2_stream_on_decoder_data_begin( struct aws_h2_stream *stream, uint32_t payload_len, uint32_t total_padding_bytes, bool end_stream) { AWS_PRECONDITION_ON_CHANNEL_THREAD(stream); struct aws_h2err stream_err = s_check_state_allows_frame_type(stream, AWS_H2_FRAME_T_DATA); if (aws_h2err_failed(stream_err)) { return s_send_rst_and_close_stream(stream, stream_err); } if (!stream->thread_data.received_main_headers) { AWS_H2_STREAM_LOG(ERROR, stream, "Malformed message, received DATA before main HEADERS"); return s_send_rst_and_close_stream(stream, aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR)); } if (stream->thread_data.content_length_received) { uint64_t data_len = payload_len - total_padding_bytes; if (aws_add_u64_checked( stream->thread_data.incoming_data_length, data_len, &stream->thread_data.incoming_data_length)) { return s_send_rst_and_close_stream(stream, aws_h2err_from_aws_code(AWS_ERROR_OVERFLOW_DETECTED)); } if (stream->thread_data.incoming_data_length > stream->thread_data.incoming_content_length) { AWS_H2_STREAM_LOGF( ERROR, stream, "Total received data payload=%" PRIu64 " has exceed the received content-length header, which=%" PRIi64 ". Closing malformed stream", stream->thread_data.incoming_data_length, stream->thread_data.incoming_content_length); return s_send_rst_and_close_stream(stream, aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR)); } } /* RFC-7540 6.9.1: * The sender MUST NOT send a flow-controlled frame with a length that exceeds * the space available in either of the flow-control windows advertised by the receiver. * Frames with zero length with the END_STREAM flag set (that is, an empty DATA frame) * MAY be sent if there is no available space in either flow-control window. */ if ((int32_t)payload_len > stream->thread_data.window_size_self && payload_len != 0) { AWS_H2_STREAM_LOGF( ERROR, stream, "DATA length=%" PRIu32 " exceeds flow-control window=%" PRIi64, payload_len, stream->thread_data.window_size_self); return s_send_rst_and_close_stream(stream, aws_h2err_from_h2_code(AWS_HTTP2_ERR_FLOW_CONTROL_ERROR)); } stream->thread_data.window_size_self -= payload_len; /* If stream isn't over, we may need to send automatic window updates to keep data flowing */ if (!end_stream) { uint32_t auto_window_update; if (stream->base.owning_connection->stream_manual_window_management) { /* Automatically update the flow-window to account for padding, even though "manual window management" * is enabled, because the current API doesn't have any way to inform the user about padding, * so we can't expect them to manage it themselves. */ auto_window_update = total_padding_bytes; } else { /* Automatically update the full amount we just received */ auto_window_update = payload_len; } if (auto_window_update != 0) { if (s_stream_send_update_window(stream, auto_window_update)) { return aws_h2err_from_last_error(); } AWS_H2_STREAM_LOGF( TRACE, stream, "Automatically updating stream window by %" PRIu32 "(%" PRIu32 " due to padding).", auto_window_update, total_padding_bytes); } } return AWS_H2ERR_SUCCESS; } struct aws_h2err aws_h2_stream_on_decoder_data_i(struct aws_h2_stream *stream, struct aws_byte_cursor data) { AWS_PRECONDITION_ON_CHANNEL_THREAD(stream); /* Not calling s_check_state_allows_frame_type() here because we already checked at start of DATA frame in * aws_h2_stream_on_decoder_data_begin() */ if (stream->base.on_incoming_body) { if (stream->base.on_incoming_body(&stream->base, &data, stream->base.user_data)) { AWS_H2_STREAM_LOGF( ERROR, stream, "Incoming body callback raised error, %s", aws_error_name(aws_last_error())); return s_send_rst_and_close_stream(stream, aws_h2err_from_last_error()); } } return AWS_H2ERR_SUCCESS; } struct aws_h2err aws_h2_stream_on_decoder_window_update( struct aws_h2_stream *stream, uint32_t window_size_increment, bool *window_resume) { AWS_PRECONDITION_ON_CHANNEL_THREAD(stream); *window_resume = false; struct aws_h2err stream_err = s_check_state_allows_frame_type(stream, AWS_H2_FRAME_T_WINDOW_UPDATE); if (aws_h2err_failed(stream_err)) { return s_send_rst_and_close_stream(stream, stream_err); } if (window_size_increment == 0) { /* flow-control window increment of 0 MUST be treated as error (RFC7540 6.9.1) */ AWS_H2_STREAM_LOG(ERROR, stream, "Window update frame with 0 increment size"); return s_send_rst_and_close_stream(stream, aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR)); } int32_t old_window_size = stream->thread_data.window_size_peer; stream_err = (aws_h2_stream_window_size_change(stream, window_size_increment, false /*self*/)); if (aws_h2err_failed(stream_err)) { /* We MUST NOT allow a flow-control window to exceed the max */ AWS_H2_STREAM_LOG( ERROR, stream, "Window update frame causes the stream flow-control window to exceed the maximum size"); return s_send_rst_and_close_stream(stream, stream_err); } if (stream->thread_data.window_size_peer > AWS_H2_MIN_WINDOW_SIZE && old_window_size <= AWS_H2_MIN_WINDOW_SIZE) { *window_resume = true; } return AWS_H2ERR_SUCCESS; } struct aws_h2err aws_h2_stream_on_decoder_end_stream(struct aws_h2_stream *stream) { AWS_PRECONDITION_ON_CHANNEL_THREAD(stream); /* Not calling s_check_state_allows_frame_type() here because END_STREAM isn't * an actual frame type. It's a flag on DATA or HEADERS frames, and we * already checked the legality of those frames in their respective callbacks. */ AWS_ASSERT(stream->base.metrics.receive_start_timestamp_ns != -1); AWS_ASSERT(stream->base.metrics.receive_end_timestamp_ns == -1); aws_high_res_clock_get_ticks((uint64_t *)&stream->base.metrics.receive_end_timestamp_ns); AWS_ASSERT(stream->base.metrics.receive_end_timestamp_ns >= stream->base.metrics.receive_start_timestamp_ns); stream->base.metrics.receiving_duration_ns = stream->base.metrics.receive_end_timestamp_ns - stream->base.metrics.receive_start_timestamp_ns; if (stream->thread_data.content_length_received) { if (stream->base.request_method != AWS_HTTP_METHOD_HEAD && stream->base.client_data->response_status != AWS_HTTP_STATUS_CODE_304_NOT_MODIFIED) { /** * RFC-9110 8.6. * A server MAY send a Content-Length header field in a response to a HEAD request. * A server MAY send a Content-Length header field in a 304 (Not Modified) response. * But both of these condition will have no body receive. */ if (stream->thread_data.incoming_data_length != stream->thread_data.incoming_content_length) { /** * RFC-9113 8.1.1: * A request or response is also malformed if the value of a content-length header field does not equal * the sum of the DATA frame payload lengths that form the content, unless the message is defined as * having no content. * * Clients MUST NOT accept a malformed response. */ AWS_H2_STREAM_LOGF( ERROR, stream, "Total received data payload=%" PRIu64 " does not match the received content-length header, which=%" PRIi64 ". Closing malformed stream", stream->thread_data.incoming_data_length, stream->thread_data.incoming_content_length); return s_send_rst_and_close_stream(stream, aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR)); } } } if (stream->thread_data.state == AWS_H2_STREAM_STATE_HALF_CLOSED_LOCAL) { /* Both sides have sent END_STREAM */ stream->thread_data.state = AWS_H2_STREAM_STATE_CLOSED; AWS_H2_STREAM_LOG(TRACE, stream, "Received END_STREAM. State -> CLOSED"); /* Tell connection that stream is now closed */ if (aws_h2_connection_on_stream_closed( s_get_h2_connection(stream), stream, AWS_H2_STREAM_CLOSED_WHEN_BOTH_SIDES_END_STREAM, AWS_ERROR_SUCCESS)) { return aws_h2err_from_last_error(); } } else { /* Else can't close until our side sends END_STREAM */ stream->thread_data.state = AWS_H2_STREAM_STATE_HALF_CLOSED_REMOTE; AWS_H2_STREAM_LOG(TRACE, stream, "Received END_STREAM. State -> HALF_CLOSED_REMOTE"); } return AWS_H2ERR_SUCCESS; } struct aws_h2err aws_h2_stream_on_decoder_rst_stream(struct aws_h2_stream *stream, uint32_t h2_error_code) { AWS_PRECONDITION_ON_CHANNEL_THREAD(stream); /* Check that this state allows RST_STREAM. */ struct aws_h2err err = s_check_state_allows_frame_type(stream, AWS_H2_FRAME_T_RST_STREAM); if (aws_h2err_failed(err)) { /* Usually we send a RST_STREAM when the state doesn't allow a frame type, but RFC-7540 5.4.2 says: * "To avoid looping, an endpoint MUST NOT send a RST_STREAM in response to a RST_STREAM frame." */ return err; } /* RFC-7540 8.1 - a server MAY request that the client abort transmission of a request without error by sending a * RST_STREAM with an error code of NO_ERROR after sending a complete response (i.e., a frame with the END_STREAM * flag). Clients MUST NOT discard responses as a result of receiving such a RST_STREAM */ int aws_error_code; if (stream->base.client_data && (h2_error_code == AWS_HTTP2_ERR_NO_ERROR) && (stream->thread_data.state == AWS_H2_STREAM_STATE_HALF_CLOSED_REMOTE)) { aws_error_code = AWS_ERROR_SUCCESS; } else { aws_error_code = AWS_ERROR_HTTP_RST_STREAM_RECEIVED; AWS_H2_STREAM_LOGF( ERROR, stream, "Peer terminated stream with HTTP/2 RST_STREAM frame, error-code=0x%x(%s)", h2_error_code, aws_http2_error_code_to_str(h2_error_code)); } stream->thread_data.state = AWS_H2_STREAM_STATE_CLOSED; stream->received_reset_error_code = h2_error_code; AWS_H2_STREAM_LOGF( TRACE, stream, "Received RST_STREAM code=0x%x(%s). State -> CLOSED", h2_error_code, aws_http2_error_code_to_str(h2_error_code)); if (aws_h2_connection_on_stream_closed( s_get_h2_connection(stream), stream, AWS_H2_STREAM_CLOSED_WHEN_RST_STREAM_RECEIVED, aws_error_code)) { return aws_h2err_from_last_error(); } return AWS_H2ERR_SUCCESS; } static int s_stream_write_data( struct aws_http_stream *stream_base, const struct aws_http2_stream_write_data_options *options) { struct aws_h2_stream *stream = AWS_CONTAINER_OF(stream_base, struct aws_h2_stream, base); if (!stream->manual_write) { AWS_H2_STREAM_LOG( ERROR, stream, "Manual writes are not enabled. You need to enable manual writes using by setting " "'http2_use_manual_data_writes' to true in 'aws_http_make_request_options'"); return aws_raise_error(AWS_ERROR_HTTP_MANUAL_WRITE_NOT_ENABLED); } struct aws_h2_connection *connection = s_get_h2_connection(stream); /* queue this new write into the pending write list for the stream */ struct aws_h2_stream_data_write *pending_write = aws_mem_calloc(stream->base.alloc, 1, sizeof(struct aws_h2_stream_data_write)); if (options->data) { pending_write->data_stream = aws_input_stream_acquire(options->data); } else { struct aws_byte_cursor empty_cursor; AWS_ZERO_STRUCT(empty_cursor); pending_write->data_stream = aws_input_stream_new_from_cursor(stream->base.alloc, &empty_cursor); } bool schedule_cross_thread_work = false; { /* BEGIN CRITICAL SECTION */ s_lock_synced_data(stream); { if (stream->synced_data.api_state != AWS_H2_STREAM_API_STATE_ACTIVE) { s_unlock_synced_data(stream); int error_code = stream->synced_data.api_state == AWS_H2_STREAM_API_STATE_INIT ? AWS_ERROR_HTTP_STREAM_NOT_ACTIVATED : AWS_ERROR_HTTP_STREAM_HAS_COMPLETED; s_stream_data_write_destroy(stream, pending_write, error_code); AWS_H2_STREAM_LOG(ERROR, stream, "Cannot write DATA frames to an inactive or closed stream"); return aws_raise_error(error_code); } if (stream->synced_data.manual_write_ended) { s_unlock_synced_data(stream); s_stream_data_write_destroy(stream, pending_write, AWS_ERROR_HTTP_MANUAL_WRITE_HAS_COMPLETED); AWS_H2_STREAM_LOG(ERROR, stream, "Cannot write DATA frames to a stream after manual write ended"); /* Fail with error, otherwise, people can wait for on_complete callback that will never be invoked. */ return aws_raise_error(AWS_ERROR_HTTP_MANUAL_WRITE_HAS_COMPLETED); } /* Not setting this until we're sure we succeeded, so that callback doesn't fire on cleanup if we fail */ if (options->end_stream) { stream->synced_data.manual_write_ended = true; } pending_write->end_stream = options->end_stream; pending_write->on_complete = options->on_complete; pending_write->user_data = options->user_data; aws_linked_list_push_back(&stream->synced_data.pending_write_list, &pending_write->node); schedule_cross_thread_work = !stream->synced_data.is_cross_thread_work_task_scheduled; stream->synced_data.is_cross_thread_work_task_scheduled = true; } s_unlock_synced_data(stream); } /* END CRITICAL SECTION */ if (schedule_cross_thread_work) { AWS_H2_STREAM_LOG(TRACE, stream, "Scheduling stream cross-thread work task"); /* increment the refcount of stream to keep it alive until the task runs */ aws_atomic_fetch_add(&stream->base.refcount, 1); aws_channel_schedule_task_now(connection->base.channel_slot->channel, &stream->cross_thread_work_task); } return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-http/source/hpack.c000066400000000000000000000475571456575232400227630ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include /* #TODO test empty strings */ /* #TODO remove all OOM error handling in HTTP/2 & HPACK. make functions void if possible */ /* RFC-7540 6.5.2 */ const size_t s_hpack_dynamic_table_initial_size = 4096; const size_t s_hpack_dynamic_table_initial_elements = 512; /* TODO: shouldn't be a hardcoded max_size, it should be driven by SETTINGS_HEADER_TABLE_SIZE */ const size_t s_hpack_dynamic_table_max_size = 16 * 1024 * 1024; /* Used for growing the dynamic table buffer when it fills up */ const float s_hpack_dynamic_table_buffer_growth_rate = 1.5F; struct aws_http_header s_static_header_table[] = { #define HEADER(_index, _name) \ [_index] = { \ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(_name), \ }, #define HEADER_WITH_VALUE(_index, _name, _value) \ [_index] = { \ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(_name), \ .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(_value), \ }, #include #undef HEADER #undef HEADER_WITH_VALUE }; static const size_t s_static_header_table_size = AWS_ARRAY_SIZE(s_static_header_table); struct aws_byte_cursor s_static_header_table_name_only[] = { #define HEADER(_index, _name) [_index] = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(_name), #define HEADER_WITH_VALUE(_index, _name, _value) HEADER(_index, _name) #include #undef HEADER #undef HEADER_WITH_VALUE }; /* aws_http_header * -> size_t */ static struct aws_hash_table s_static_header_reverse_lookup; /* aws_byte_cursor * -> size_t */ static struct aws_hash_table s_static_header_reverse_lookup_name_only; static uint64_t s_header_hash(const void *key) { const struct aws_http_header *header = key; return aws_hash_combine(aws_hash_byte_cursor_ptr(&header->name), aws_hash_byte_cursor_ptr(&header->value)); } static bool s_header_eq(const void *a, const void *b) { const struct aws_http_header *left = a; const struct aws_http_header *right = b; if (!aws_byte_cursor_eq(&left->name, &right->name)) { return false; } /* If the header stored in the table doesn't have a value, then it's a match */ return aws_byte_cursor_eq(&left->value, &right->value); } void aws_hpack_static_table_init(struct aws_allocator *allocator) { int result = aws_hash_table_init( &s_static_header_reverse_lookup, allocator, s_static_header_table_size - 1, s_header_hash, s_header_eq, NULL, NULL); AWS_FATAL_ASSERT(AWS_OP_SUCCESS == result); result = aws_hash_table_init( &s_static_header_reverse_lookup_name_only, allocator, s_static_header_table_size - 1, aws_hash_byte_cursor_ptr, (aws_hash_callback_eq_fn *)aws_byte_cursor_eq, NULL, NULL); AWS_FATAL_ASSERT(AWS_OP_SUCCESS == result); /* Process in reverse so that name_only prefers lower indices */ for (size_t i = s_static_header_table_size - 1; i > 0; --i) { /* the tables are created as 1-based indexing */ result = aws_hash_table_put(&s_static_header_reverse_lookup, &s_static_header_table[i], (void *)i, NULL); AWS_FATAL_ASSERT(AWS_OP_SUCCESS == result); result = aws_hash_table_put( &s_static_header_reverse_lookup_name_only, &s_static_header_table_name_only[i], (void *)(i), NULL); AWS_FATAL_ASSERT(AWS_OP_SUCCESS == result); } } void aws_hpack_static_table_clean_up(void) { aws_hash_table_clean_up(&s_static_header_reverse_lookup); aws_hash_table_clean_up(&s_static_header_reverse_lookup_name_only); } #define HPACK_LOGF(level, hpack, text, ...) \ AWS_LOGF_##level((hpack)->log_subject, "id=%p [HPACK]: " text, (hpack)->log_id, __VA_ARGS__) #define HPACK_LOG(level, hpack, text) HPACK_LOGF(level, hpack, "%s", text) void aws_hpack_context_init( struct aws_hpack_context *context, struct aws_allocator *allocator, enum aws_http_log_subject log_subject, const void *log_id) { AWS_ZERO_STRUCT(*context); context->allocator = allocator; context->log_subject = log_subject; context->log_id = log_id; /* Initialize dynamic table */ context->dynamic_table.max_size = s_hpack_dynamic_table_initial_size; context->dynamic_table.buffer_capacity = s_hpack_dynamic_table_initial_elements; context->dynamic_table.buffer = aws_mem_calloc(allocator, context->dynamic_table.buffer_capacity, sizeof(struct aws_http_header)); aws_hash_table_init( &context->dynamic_table.reverse_lookup, allocator, s_hpack_dynamic_table_initial_elements, s_header_hash, s_header_eq, NULL, NULL); aws_hash_table_init( &context->dynamic_table.reverse_lookup_name_only, allocator, s_hpack_dynamic_table_initial_elements, aws_hash_byte_cursor_ptr, (aws_hash_callback_eq_fn *)aws_byte_cursor_eq, NULL, NULL); } static struct aws_http_header *s_dynamic_table_get(const struct aws_hpack_context *context, size_t index); static void s_clean_up_dynamic_table_buffer(struct aws_hpack_context *context) { while (context->dynamic_table.num_elements > 0) { struct aws_http_header *back = s_dynamic_table_get(context, context->dynamic_table.num_elements - 1); context->dynamic_table.num_elements -= 1; /* clean-up the memory we allocate for it */ aws_mem_release(context->allocator, back->name.ptr); } aws_mem_release(context->allocator, context->dynamic_table.buffer); } void aws_hpack_context_clean_up(struct aws_hpack_context *context) { if (context->dynamic_table.buffer) { s_clean_up_dynamic_table_buffer(context); } aws_hash_table_clean_up(&context->dynamic_table.reverse_lookup); aws_hash_table_clean_up(&context->dynamic_table.reverse_lookup_name_only); AWS_ZERO_STRUCT(*context); } size_t aws_hpack_get_header_size(const struct aws_http_header *header) { return header->name.len + header->value.len + 32; } size_t aws_hpack_get_dynamic_table_num_elements(const struct aws_hpack_context *context) { return context->dynamic_table.num_elements; } size_t aws_hpack_get_dynamic_table_max_size(const struct aws_hpack_context *context) { return context->dynamic_table.max_size; } /* * Gets the header from the dynamic table. * NOTE: This function only bounds checks on the buffer size, not the number of elements. */ static struct aws_http_header *s_dynamic_table_get(const struct aws_hpack_context *context, size_t index) { AWS_ASSERT(index < context->dynamic_table.buffer_capacity); return &context->dynamic_table .buffer[(context->dynamic_table.index_0 + index) % context->dynamic_table.buffer_capacity]; } const struct aws_http_header *aws_hpack_get_header(const struct aws_hpack_context *context, size_t index) { if (index == 0 || index >= s_static_header_table_size + context->dynamic_table.num_elements) { aws_raise_error(AWS_ERROR_INVALID_INDEX); return NULL; } /* Check static table */ if (index < s_static_header_table_size) { return &s_static_header_table[index]; } /* Check dynamic table */ return s_dynamic_table_get(context, index - s_static_header_table_size); } /* TODO: remove `bool search_value`, this option has no reason to exist */ size_t aws_hpack_find_index( const struct aws_hpack_context *context, const struct aws_http_header *header, bool search_value, bool *found_value) { *found_value = false; struct aws_hash_element *elem = NULL; if (search_value) { /* Check name-and-value first in static table */ aws_hash_table_find(&s_static_header_reverse_lookup, header, &elem); if (elem) { /* TODO: Maybe always set found_value to true? Who cares that the value is empty if they matched? */ /* If an element was found, check if it has a value */ *found_value = ((const struct aws_http_header *)elem->key)->value.len; return (size_t)elem->value; } /* Check name-and-value in dynamic table */ aws_hash_table_find(&context->dynamic_table.reverse_lookup, header, &elem); if (elem) { /* TODO: Maybe always set found_value to true? Who cares that the value is empty if they matched? */ *found_value = ((const struct aws_http_header *)elem->key)->value.len; goto trans_index_from_dynamic_table; } } /* Check the name-only table. Note, even if we search for value, when we fail in searching for name-and-value, we * should also check the name only table */ aws_hash_table_find(&s_static_header_reverse_lookup_name_only, &header->name, &elem); if (elem) { return (size_t)elem->value; } aws_hash_table_find(&context->dynamic_table.reverse_lookup_name_only, &header->name, &elem); if (elem) { goto trans_index_from_dynamic_table; } return 0; trans_index_from_dynamic_table: AWS_ASSERT(elem); size_t index; const size_t absolute_index = (size_t)elem->value; if (absolute_index >= context->dynamic_table.index_0) { index = absolute_index - context->dynamic_table.index_0; } else { index = (context->dynamic_table.buffer_capacity - context->dynamic_table.index_0) + absolute_index; } /* Need to add the static table size to re-base indicies */ index += s_static_header_table_size; return index; } /* Remove elements from the dynamic table until it fits in max_size bytes */ static int s_dynamic_table_shrink(struct aws_hpack_context *context, size_t max_size) { while (context->dynamic_table.size > max_size && context->dynamic_table.num_elements > 0) { struct aws_http_header *back = s_dynamic_table_get(context, context->dynamic_table.num_elements - 1); /* "Remove" the header from the table */ context->dynamic_table.size -= aws_hpack_get_header_size(back); context->dynamic_table.num_elements -= 1; /* Remove old header from hash tables */ if (aws_hash_table_remove(&context->dynamic_table.reverse_lookup, back, NULL, NULL)) { HPACK_LOG(ERROR, context, "Failed to remove header from the reverse lookup table"); goto error; } /* If the name-only lookup is pointing to the element we're removing, it needs to go. * If not, it's pointing to a younger, sexier element. */ struct aws_hash_element *elem = NULL; aws_hash_table_find(&context->dynamic_table.reverse_lookup_name_only, &back->name, &elem); if (elem && elem->key == back) { if (aws_hash_table_remove_element(&context->dynamic_table.reverse_lookup_name_only, elem)) { HPACK_LOG(ERROR, context, "Failed to remove header from the reverse lookup (name-only) table"); goto error; } } /* clean up the memory we allocated to hold the name and value string*/ aws_mem_release(context->allocator, back->name.ptr); } return AWS_OP_SUCCESS; error: return AWS_OP_ERR; } /* * Resizes the dynamic table storage buffer to new_max_elements. * Useful when inserting over capacity, or when downsizing. * Do shrink first, if you want to remove elements, or memory leak will happen. */ static int s_dynamic_table_resize_buffer(struct aws_hpack_context *context, size_t new_max_elements) { /* Clear the old hash tables */ aws_hash_table_clear(&context->dynamic_table.reverse_lookup); aws_hash_table_clear(&context->dynamic_table.reverse_lookup_name_only); struct aws_http_header *new_buffer = NULL; if (AWS_UNLIKELY(new_max_elements == 0)) { /* If new buffer is of size 0, don't both initializing, just clean up the old one. */ goto cleanup_old_buffer; } /* Allocate the new buffer */ new_buffer = aws_mem_calloc(context->allocator, new_max_elements, sizeof(struct aws_http_header)); if (!new_buffer) { return AWS_OP_ERR; } /* Don't bother copying data if old buffer was of size 0 */ if (AWS_UNLIKELY(context->dynamic_table.num_elements == 0)) { goto reset_dyn_table_state; } /* * Take a buffer that looks like this: * * Index 0 * ^ * +---------------------------+ * | Below Block | Above Block | * +---------------------------+ * And make it look like this: * * Index 0 * ^ * +-------------+-------------+ * | Above Block | Below Block | * +-------------+-------------+ */ /* Copy as much the above block as possible */ size_t above_block_size = context->dynamic_table.buffer_capacity - context->dynamic_table.index_0; if (above_block_size > new_max_elements) { above_block_size = new_max_elements; } memcpy( new_buffer, context->dynamic_table.buffer + context->dynamic_table.index_0, above_block_size * sizeof(struct aws_http_header)); /* Copy as much of below block as possible */ const size_t free_blocks_available = new_max_elements - above_block_size; const size_t old_blocks_to_copy = context->dynamic_table.buffer_capacity - above_block_size; const size_t below_block_size = aws_min_size(free_blocks_available, old_blocks_to_copy); if (below_block_size) { memcpy( new_buffer + above_block_size, context->dynamic_table.buffer, below_block_size * sizeof(struct aws_http_header)); } /* Free the old memory */ cleanup_old_buffer: aws_mem_release(context->allocator, context->dynamic_table.buffer); /* Reset state */ reset_dyn_table_state: if (context->dynamic_table.num_elements > new_max_elements) { context->dynamic_table.num_elements = new_max_elements; } context->dynamic_table.buffer_capacity = new_max_elements; context->dynamic_table.index_0 = 0; context->dynamic_table.buffer = new_buffer; /* Re-insert all of the reverse lookup elements */ for (size_t i = 0; i < context->dynamic_table.num_elements; ++i) { if (aws_hash_table_put( &context->dynamic_table.reverse_lookup, &context->dynamic_table.buffer[i], (void *)i, NULL)) { return AWS_OP_ERR; } if (aws_hash_table_put( &context->dynamic_table.reverse_lookup_name_only, &context->dynamic_table.buffer[i].name, (void *)i, NULL)) { return AWS_OP_ERR; } } return AWS_OP_SUCCESS; } int aws_hpack_insert_header(struct aws_hpack_context *context, const struct aws_http_header *header) { /* Don't move forward if no elements allowed in the dynamic table */ if (AWS_UNLIKELY(context->dynamic_table.max_size == 0)) { return AWS_OP_SUCCESS; } const size_t header_size = aws_hpack_get_header_size(header); /* If for whatever reason this new header is bigger than the total table size, burn everything to the ground. */ if (AWS_UNLIKELY(header_size > context->dynamic_table.max_size)) { /* #TODO handle this. It's not an error. It should simply result in an empty table RFC-7541 4.4 */ goto error; } /* Rotate out headers until there's room for the new header (this function will return immediately if nothing needs * to be evicted) */ if (s_dynamic_table_shrink(context, context->dynamic_table.max_size - header_size)) { goto error; } /* If we're out of space in the buffer, grow it */ if (context->dynamic_table.num_elements == context->dynamic_table.buffer_capacity) { /* If the buffer is currently of 0 size, reset it back to its initial size */ const size_t new_size = context->dynamic_table.buffer_capacity ? (size_t)(context->dynamic_table.buffer_capacity * s_hpack_dynamic_table_buffer_growth_rate) : s_hpack_dynamic_table_initial_elements; if (s_dynamic_table_resize_buffer(context, new_size)) { goto error; } } /* Decrement index 0, wrapping if necessary */ if (context->dynamic_table.index_0 == 0) { context->dynamic_table.index_0 = context->dynamic_table.buffer_capacity - 1; } else { context->dynamic_table.index_0--; } /* Increment num_elements */ context->dynamic_table.num_elements++; /* Increment the size */ context->dynamic_table.size += header_size; /* Put the header at the "front" of the table */ struct aws_http_header *table_header = s_dynamic_table_get(context, 0); /* TODO:: We can optimize this with ring buffer. */ /* allocate memory for the name and value, which will be deallocated whenever the entry is evicted from the table or * the table is cleaned up. We keep the pointer in the name pointer of each entry */ const size_t buf_memory_size = header->name.len + header->value.len; if (buf_memory_size) { uint8_t *buf_memory = aws_mem_acquire(context->allocator, buf_memory_size); if (!buf_memory) { return AWS_OP_ERR; } struct aws_byte_buf buf = aws_byte_buf_from_empty_array(buf_memory, buf_memory_size); /* Copy header, then backup strings into our own allocation */ *table_header = *header; aws_byte_buf_append_and_update(&buf, &table_header->name); aws_byte_buf_append_and_update(&buf, &table_header->value); } else { /* if buf_memory_size is 0, no memory needed, we will insert the empty header into dynamic table */ *table_header = *header; table_header->name.ptr = NULL; table_header->value.ptr = NULL; } /* Write the new header to the look up tables */ if (aws_hash_table_put( &context->dynamic_table.reverse_lookup, table_header, (void *)context->dynamic_table.index_0, NULL)) { goto error; } /* Note that we can just blindly put here, we want to overwrite any older entry so it isn't accidentally removed. */ if (aws_hash_table_put( &context->dynamic_table.reverse_lookup_name_only, &table_header->name, (void *)context->dynamic_table.index_0, NULL)) { goto error; } return AWS_OP_SUCCESS; error: /* Do not attempt to handle the error, if something goes wrong, close the connection */ return AWS_OP_ERR; } int aws_hpack_resize_dynamic_table(struct aws_hpack_context *context, size_t new_max_size) { /* Nothing to see here! */ if (new_max_size == context->dynamic_table.max_size) { return AWS_OP_SUCCESS; } if (new_max_size > s_hpack_dynamic_table_max_size) { HPACK_LOGF( ERROR, context, "New dynamic table max size %zu is greater than the supported max size (%zu)", new_max_size, s_hpack_dynamic_table_max_size); aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED); goto error; } /* If downsizing, remove elements until we're within the new size constraints */ if (s_dynamic_table_shrink(context, new_max_size)) { goto error; } /* Resize the buffer to the current size */ if (s_dynamic_table_resize_buffer(context, context->dynamic_table.num_elements)) { goto error; } /* Update the max size */ context->dynamic_table.max_size = new_max_size; return AWS_OP_SUCCESS; error: return AWS_OP_ERR; } aws-crt-python-0.20.4+dfsg/crt/aws-c-http/source/hpack_decoder.c000066400000000000000000000444161456575232400244370ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #define HPACK_LOGF(level, decoder, text, ...) \ AWS_LOGF_##level(AWS_LS_HTTP_DECODER, "id=%p [HPACK]: " text, (decoder)->log_id, __VA_ARGS__) #define HPACK_LOG(level, decoder, text) HPACK_LOGF(level, decoder, "%s", text) struct aws_huffman_symbol_coder *hpack_get_coder(void); /* Used while decoding the header name & value, grows if necessary */ const size_t s_hpack_decoder_scratch_initial_size = 512; void aws_hpack_decoder_init(struct aws_hpack_decoder *decoder, struct aws_allocator *allocator, const void *log_id) { AWS_ZERO_STRUCT(*decoder); decoder->log_id = log_id; aws_huffman_decoder_init(&decoder->huffman_decoder, hpack_get_coder()); aws_huffman_decoder_allow_growth(&decoder->huffman_decoder, true); aws_hpack_context_init(&decoder->context, allocator, AWS_LS_HTTP_DECODER, log_id); aws_byte_buf_init(&decoder->progress_entry.scratch, allocator, s_hpack_decoder_scratch_initial_size); decoder->dynamic_table_protocol_max_size_setting = aws_hpack_get_dynamic_table_max_size(&decoder->context); } void aws_hpack_decoder_clean_up(struct aws_hpack_decoder *decoder) { aws_hpack_context_clean_up(&decoder->context); aws_byte_buf_clean_up(&decoder->progress_entry.scratch); AWS_ZERO_STRUCT(*decoder); } static const struct aws_http_header *s_get_header_u64(const struct aws_hpack_decoder *decoder, uint64_t index) { if (index > SIZE_MAX) { HPACK_LOG(ERROR, decoder, "Header index is absurdly large"); aws_raise_error(AWS_ERROR_INVALID_INDEX); return NULL; } return aws_hpack_get_header(&decoder->context, (size_t)index); } void aws_hpack_decoder_update_max_table_size(struct aws_hpack_decoder *decoder, uint32_t setting_max_size) { decoder->dynamic_table_protocol_max_size_setting = setting_max_size; } /* Return a byte with the N right-most bits masked. * Ex: 2 -> 00000011 */ static uint8_t s_masked_right_bits_u8(uint8_t num_masked_bits) { AWS_ASSERT(num_masked_bits <= 8); const uint8_t cut_bits = 8 - num_masked_bits; return UINT8_MAX >> cut_bits; } int aws_hpack_decode_integer( struct aws_hpack_decoder *decoder, struct aws_byte_cursor *to_decode, uint8_t prefix_size, uint64_t *integer, bool *complete) { AWS_PRECONDITION(decoder); AWS_PRECONDITION(to_decode); AWS_PRECONDITION(prefix_size <= 8); AWS_PRECONDITION(integer); const uint8_t prefix_mask = s_masked_right_bits_u8(prefix_size); struct hpack_progress_integer *progress = &decoder->progress_integer; while (to_decode->len) { switch (progress->state) { case HPACK_INTEGER_STATE_INIT: { /* Read the first byte, and check whether this is it, or we need to continue */ uint8_t byte = 0; bool succ = aws_byte_cursor_read_u8(to_decode, &byte); AWS_FATAL_ASSERT(succ); /* Cut the prefix */ byte &= prefix_mask; /* No matter what, the first byte's value is always added to the integer */ *integer = byte; if (byte != prefix_mask) { goto handle_complete; } progress->state = HPACK_INTEGER_STATE_VALUE; } break; case HPACK_INTEGER_STATE_VALUE: { uint8_t byte = 0; bool succ = aws_byte_cursor_read_u8(to_decode, &byte); AWS_FATAL_ASSERT(succ); uint64_t new_byte_value = (uint64_t)(byte & 127) << progress->bit_count; if (*integer + new_byte_value < *integer) { return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED); } *integer += new_byte_value; /* Check if we're done */ if ((byte & 128) == 0) { goto handle_complete; } /* Increment the bit count */ progress->bit_count += 7; /* 7 Bits are expected to be used, so if we get to the point where any of * those bits can't be used it's a decoding error */ if (progress->bit_count > 64 - 7) { return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED); } } break; } } /* Fell out of data loop, must need more data */ *complete = false; return AWS_OP_SUCCESS; handle_complete: AWS_ZERO_STRUCT(decoder->progress_integer); *complete = true; return AWS_OP_SUCCESS; } int aws_hpack_decode_string( struct aws_hpack_decoder *decoder, struct aws_byte_cursor *to_decode, struct aws_byte_buf *output, bool *complete) { AWS_PRECONDITION(decoder); AWS_PRECONDITION(to_decode); AWS_PRECONDITION(output); AWS_PRECONDITION(complete); struct hpack_progress_string *progress = &decoder->progress_string; while (to_decode->len) { switch (progress->state) { case HPACK_STRING_STATE_INIT: { /* Do init stuff */ progress->state = HPACK_STRING_STATE_LENGTH; progress->use_huffman = *to_decode->ptr >> 7; aws_huffman_decoder_reset(&decoder->huffman_decoder); /* fallthrough, since we didn't consume any data */ } /* FALLTHRU */ case HPACK_STRING_STATE_LENGTH: { bool length_complete = false; if (aws_hpack_decode_integer(decoder, to_decode, 7, &progress->length, &length_complete)) { return AWS_OP_ERR; } if (!length_complete) { goto handle_ongoing; } if (progress->length == 0) { goto handle_complete; } if (progress->length > SIZE_MAX) { return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED); } progress->state = HPACK_STRING_STATE_VALUE; } break; case HPACK_STRING_STATE_VALUE: { /* Take either as much data as we need, or as much as we can */ size_t to_process = aws_min_size((size_t)progress->length, to_decode->len); progress->length -= to_process; struct aws_byte_cursor chunk = aws_byte_cursor_advance(to_decode, to_process); if (progress->use_huffman) { if (aws_huffman_decode(&decoder->huffman_decoder, &chunk, output)) { HPACK_LOGF(ERROR, decoder, "Error from Huffman decoder: %s", aws_error_name(aws_last_error())); return AWS_OP_ERR; } /* Decoder should consume all bytes we feed it. * EOS (end-of-string) symbol could stop it early, but HPACK says to treat EOS as error. */ if (chunk.len != 0) { HPACK_LOG(ERROR, decoder, "Huffman encoded end-of-string symbol is illegal"); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } } else { if (aws_byte_buf_append_dynamic(output, &chunk)) { return AWS_OP_ERR; } } /* If whole length consumed, we're done */ if (progress->length == 0) { /* #TODO Validate any padding bits left over in final byte of string. * "A padding not corresponding to the most significant bits of the * code for the EOS symbol MUST be treated as a decoding error" */ /* #TODO impose limits on string length */ goto handle_complete; } } break; } } handle_ongoing: /* Fell out of to_decode loop, must still be in progress */ AWS_ASSERT(to_decode->len == 0); *complete = false; return AWS_OP_SUCCESS; handle_complete: AWS_ASSERT(decoder->progress_string.length == 0); AWS_ZERO_STRUCT(decoder->progress_string); *complete = true; return AWS_OP_SUCCESS; } /* Implements RFC-7541 Section 6 - Binary Format */ int aws_hpack_decode( struct aws_hpack_decoder *decoder, struct aws_byte_cursor *to_decode, struct aws_hpack_decode_result *result) { AWS_PRECONDITION(decoder); AWS_PRECONDITION(to_decode); AWS_PRECONDITION(result); /* Run state machine until we decode a complete entry. * Every state requires data, so we can simply loop until no more data available. */ while (to_decode->len) { switch (decoder->progress_entry.state) { case HPACK_ENTRY_STATE_INIT: { /* Reset entry */ AWS_ZERO_STRUCT(decoder->progress_entry.u); decoder->progress_entry.scratch.len = 0; /* Determine next state by looking at first few bits of the next byte: * 1xxxxxxx: Indexed Header Field Representation * 01xxxxxx: Literal Header Field with Incremental Indexing * 001xxxxx: Dynamic Table Size Update * 0001xxxx: Literal Header Field Never Indexed * 0000xxxx: Literal Header Field without Indexing */ uint8_t first_byte = to_decode->ptr[0]; if (first_byte & (1 << 7)) { /* 1xxxxxxx: Indexed Header Field Representation */ decoder->progress_entry.state = HPACK_ENTRY_STATE_INDEXED; } else if (first_byte & (1 << 6)) { /* 01xxxxxx: Literal Header Field with Incremental Indexing */ decoder->progress_entry.u.literal.compression = AWS_HTTP_HEADER_COMPRESSION_USE_CACHE; decoder->progress_entry.u.literal.prefix_size = 6; decoder->progress_entry.state = HPACK_ENTRY_STATE_LITERAL_BEGIN; } else if (first_byte & (1 << 5)) { /* 001xxxxx: Dynamic Table Size Update */ decoder->progress_entry.state = HPACK_ENTRY_STATE_DYNAMIC_TABLE_RESIZE; } else if (first_byte & (1 << 4)) { /* 0001xxxx: Literal Header Field Never Indexed */ decoder->progress_entry.u.literal.compression = AWS_HTTP_HEADER_COMPRESSION_NO_FORWARD_CACHE; decoder->progress_entry.u.literal.prefix_size = 4; decoder->progress_entry.state = HPACK_ENTRY_STATE_LITERAL_BEGIN; } else { /* 0000xxxx: Literal Header Field without Indexing */ decoder->progress_entry.u.literal.compression = AWS_HTTP_HEADER_COMPRESSION_NO_CACHE; decoder->progress_entry.u.literal.prefix_size = 4; decoder->progress_entry.state = HPACK_ENTRY_STATE_LITERAL_BEGIN; } } break; /* RFC-7541 6.1. Indexed Header Field Representation. * Decode one integer, which is an index into the table. * Result is the header name and value stored there. */ case HPACK_ENTRY_STATE_INDEXED: { bool complete = false; uint64_t *index = &decoder->progress_entry.u.indexed.index; if (aws_hpack_decode_integer(decoder, to_decode, 7, index, &complete)) { return AWS_OP_ERR; } if (!complete) { break; } const struct aws_http_header *header = s_get_header_u64(decoder, *index); if (!header) { return AWS_OP_ERR; } result->type = AWS_HPACK_DECODE_T_HEADER_FIELD; result->data.header_field = *header; goto handle_complete; } break; /* RFC-7541 6.2. Literal Header Field Representation. * We use multiple states to decode a literal... * The header-name MAY come from the table and MAY be encoded as a string. * The header-value is ALWAYS encoded as a string. * * This BEGIN state decodes one integer. * If it's non-zero, then it's the index in the table where we'll get the header-name from. * If it's zero, then we move to the HEADER_NAME state and decode header-name as a string instead */ case HPACK_ENTRY_STATE_LITERAL_BEGIN: { struct hpack_progress_literal *literal = &decoder->progress_entry.u.literal; bool index_complete = false; if (aws_hpack_decode_integer( decoder, to_decode, literal->prefix_size, &literal->name_index, &index_complete)) { return AWS_OP_ERR; } if (!index_complete) { break; } if (literal->name_index == 0) { /* Index 0 means header-name is not in table. Need to decode header-name as a string instead */ decoder->progress_entry.state = HPACK_ENTRY_STATE_LITERAL_NAME_STRING; break; } /* Otherwise we found index of header-name in table. */ const struct aws_http_header *header = s_get_header_u64(decoder, literal->name_index); if (!header) { return AWS_OP_ERR; } /* Store the name in scratch. We don't just keep a pointer to it because it could be * evicted from the dynamic table later, when we save the literal. */ if (aws_byte_buf_append_dynamic(&decoder->progress_entry.scratch, &header->name)) { return AWS_OP_ERR; } /* Move on to decoding header-value. * Value will also decode into the scratch, so save where name ends. */ literal->name_length = header->name.len; decoder->progress_entry.state = HPACK_ENTRY_STATE_LITERAL_VALUE_STRING; } break; /* We only end up in this state if header-name is encoded as string. */ case HPACK_ENTRY_STATE_LITERAL_NAME_STRING: { bool string_complete = false; if (aws_hpack_decode_string(decoder, to_decode, &decoder->progress_entry.scratch, &string_complete)) { return AWS_OP_ERR; } if (!string_complete) { break; } /* Done decoding name string! Move on to decoding the value string. * Value will also decode into the scratch, so save where name ends. */ decoder->progress_entry.u.literal.name_length = decoder->progress_entry.scratch.len; decoder->progress_entry.state = HPACK_ENTRY_STATE_LITERAL_VALUE_STRING; } break; /* Final state for "literal" entries. * Decode the header-value string, then deliver the results. */ case HPACK_ENTRY_STATE_LITERAL_VALUE_STRING: { bool string_complete = false; if (aws_hpack_decode_string(decoder, to_decode, &decoder->progress_entry.scratch, &string_complete)) { return AWS_OP_ERR; } if (!string_complete) { break; } /* Done decoding value string. Done decoding entry. */ struct hpack_progress_literal *literal = &decoder->progress_entry.u.literal; /* Set up a header with name and value (which are packed one after the other in scratch) */ struct aws_http_header header; header.value = aws_byte_cursor_from_buf(&decoder->progress_entry.scratch); header.name = aws_byte_cursor_advance(&header.value, literal->name_length); header.compression = literal->compression; /* Save to table if necessary */ if (literal->compression == AWS_HTTP_HEADER_COMPRESSION_USE_CACHE) { if (aws_hpack_insert_header(&decoder->context, &header)) { return AWS_OP_ERR; } } result->type = AWS_HPACK_DECODE_T_HEADER_FIELD; result->data.header_field = header; goto handle_complete; } break; /* RFC-7541 6.3. Dynamic Table Size Update * Read one integer, which is the new maximum size for the dynamic table. */ case HPACK_ENTRY_STATE_DYNAMIC_TABLE_RESIZE: { uint64_t *size64 = &decoder->progress_entry.u.dynamic_table_resize.size; bool size_complete = false; if (aws_hpack_decode_integer(decoder, to_decode, 5, size64, &size_complete)) { return AWS_OP_ERR; } if (!size_complete) { break; } /* The new maximum size MUST be lower than or equal to the limit determined by the protocol using HPACK. * A value that exceeds this limit MUST be treated as a decoding error. */ if (*size64 > decoder->dynamic_table_protocol_max_size_setting) { HPACK_LOG(ERROR, decoder, "Dynamic table update size is larger than the protocal setting"); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } size_t size = (size_t)*size64; HPACK_LOGF(TRACE, decoder, "Dynamic table size update %zu", size); if (aws_hpack_resize_dynamic_table(&decoder->context, size)) { return AWS_OP_ERR; } result->type = AWS_HPACK_DECODE_T_DYNAMIC_TABLE_RESIZE; result->data.dynamic_table_resize = size; goto handle_complete; } break; default: { AWS_ASSERT(0 && "invalid state"); } break; } } AWS_ASSERT(to_decode->len == 0); result->type = AWS_HPACK_DECODE_T_ONGOING; return AWS_OP_SUCCESS; handle_complete: AWS_ASSERT(result->type != AWS_HPACK_DECODE_T_ONGOING); decoder->progress_entry.state = HPACK_ENTRY_STATE_INIT; return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-http/source/hpack_encoder.c000066400000000000000000000375061456575232400244530ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #define HPACK_LOGF(level, encoder, text, ...) \ AWS_LOGF_##level(AWS_LS_HTTP_ENCODER, "id=%p [HPACK]: " text, (encoder)->log_id, __VA_ARGS__) #define HPACK_LOG(level, encoder, text) HPACK_LOGF(level, encoder, "%s", text) struct aws_huffman_symbol_coder *hpack_get_coder(void); void aws_hpack_encoder_init(struct aws_hpack_encoder *encoder, struct aws_allocator *allocator, const void *log_id) { AWS_ZERO_STRUCT(*encoder); encoder->log_id = log_id; aws_huffman_encoder_init(&encoder->huffman_encoder, hpack_get_coder()); aws_hpack_context_init(&encoder->context, allocator, AWS_LS_HTTP_ENCODER, log_id); encoder->dynamic_table_size_update.pending = false; encoder->dynamic_table_size_update.latest_value = SIZE_MAX; encoder->dynamic_table_size_update.smallest_value = SIZE_MAX; } void aws_hpack_encoder_clean_up(struct aws_hpack_encoder *encoder) { aws_hpack_context_clean_up(&encoder->context); AWS_ZERO_STRUCT(*encoder); } void aws_hpack_encoder_set_huffman_mode(struct aws_hpack_encoder *encoder, enum aws_hpack_huffman_mode mode) { encoder->huffman_mode = mode; } void aws_hpack_encoder_update_max_table_size(struct aws_hpack_encoder *encoder, uint32_t new_max_size) { if (!encoder->dynamic_table_size_update.pending) { encoder->dynamic_table_size_update.pending = true; } encoder->dynamic_table_size_update.smallest_value = aws_min_size(new_max_size, encoder->dynamic_table_size_update.smallest_value); /* TODO: don't necessarily go as high as possible. The peer said the encoder's * dynamic table COULD get this big, but it's not required to. * It's probably not a good idea to let the peer decide how much memory we allocate. * Not sure how to cap it though... Use a hardcoded number? * Match whatever SETTINGS_HEADER_TABLE_SIZE this side sends? */ encoder->dynamic_table_size_update.latest_value = new_max_size; } /* Return a byte with the N right-most bits masked. * Ex: 2 -> 00000011 */ static uint8_t s_masked_right_bits_u8(uint8_t num_masked_bits) { AWS_ASSERT(num_masked_bits <= 8); const uint8_t cut_bits = 8 - num_masked_bits; return UINT8_MAX >> cut_bits; } /* If buffer isn't big enough, grow it intelligently */ static int s_ensure_space(struct aws_byte_buf *output, size_t required_space) { size_t available_space = output->capacity - output->len; if (required_space <= available_space) { return AWS_OP_SUCCESS; } /* Capacity must grow to at least this size */ size_t required_capacity; if (aws_add_size_checked(output->len, required_space, &required_capacity)) { return AWS_OP_ERR; } /* Prefer to double capacity, but if that's not enough grow to exactly required_capacity */ size_t double_capacity = aws_add_size_saturating(output->capacity, output->capacity); size_t reserve = aws_max_size(required_capacity, double_capacity); return aws_byte_buf_reserve(output, reserve); } int aws_hpack_encode_integer( uint64_t integer, uint8_t starting_bits, uint8_t prefix_size, struct aws_byte_buf *output) { AWS_ASSERT(prefix_size <= 8); const uint8_t prefix_mask = s_masked_right_bits_u8(prefix_size); AWS_ASSERT((starting_bits & prefix_mask) == 0); const size_t original_len = output->len; if (integer < prefix_mask) { /* If the integer fits inside the specified number of bits but won't be all 1's, just write it */ /* Just write out the bits we care about */ uint8_t first_byte = starting_bits | (uint8_t)integer; if (aws_byte_buf_append_byte_dynamic(output, first_byte)) { goto error; } } else { /* Set all of the bits in the first octet to 1 */ uint8_t first_byte = starting_bits | prefix_mask; if (aws_byte_buf_append_byte_dynamic(output, first_byte)) { goto error; } integer -= prefix_mask; const uint64_t hi_57bit_mask = UINT64_MAX - (UINT8_MAX >> 1); do { /* Take top 7 bits from the integer */ uint8_t this_octet = integer % 128; if (integer & hi_57bit_mask) { /* If there's more after this octet, set the hi bit */ this_octet += 128; } if (aws_byte_buf_append_byte_dynamic(output, this_octet)) { goto error; } /* Remove the written bits */ integer >>= 7; } while (integer); } return AWS_OP_SUCCESS; error: output->len = original_len; return AWS_OP_ERR; } int aws_hpack_encode_string( struct aws_hpack_encoder *encoder, struct aws_byte_cursor to_encode, struct aws_byte_buf *output) { AWS_PRECONDITION(encoder); AWS_PRECONDITION(aws_byte_cursor_is_valid(&to_encode)); AWS_PRECONDITION(output); const size_t original_len = output->len; /* Determine length of encoded string (and whether or not to use huffman) */ uint8_t use_huffman; size_t str_length; switch (encoder->huffman_mode) { case AWS_HPACK_HUFFMAN_NEVER: use_huffman = 0; str_length = to_encode.len; break; case AWS_HPACK_HUFFMAN_ALWAYS: use_huffman = 1; str_length = aws_huffman_get_encoded_length(&encoder->huffman_encoder, to_encode); break; case AWS_HPACK_HUFFMAN_SMALLEST: str_length = aws_huffman_get_encoded_length(&encoder->huffman_encoder, to_encode); if (str_length < to_encode.len) { use_huffman = 1; } else { str_length = to_encode.len; use_huffman = 0; } break; default: aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); goto error; } /* * String literals are encoded like so (RFC-7541 5.2): * H is whether or not data is huffman-encoded. * * 0 1 2 3 4 5 6 7 * +---+---+---+---+---+---+---+---+ * | H | String Length (7+) | * +---+---------------------------+ * | String Data (Length octets) | * +-------------------------------+ */ /* Encode string length */ uint8_t starting_bits = use_huffman << 7; if (aws_hpack_encode_integer(str_length, starting_bits, 7, output)) { HPACK_LOGF(ERROR, encoder, "Error encoding HPACK integer: %s", aws_error_name(aws_last_error())); goto error; } /* Encode string data */ if (str_length > 0) { if (use_huffman) { /* Huffman encoder doesn't grow buffer, so we ensure it's big enough here */ if (s_ensure_space(output, str_length)) { goto error; } if (aws_huffman_encode(&encoder->huffman_encoder, &to_encode, output)) { HPACK_LOGF(ERROR, encoder, "Error from Huffman encoder: %s", aws_error_name(aws_last_error())); goto error; } } else { if (aws_byte_buf_append_dynamic(output, &to_encode)) { goto error; } } } return AWS_OP_SUCCESS; error: output->len = original_len; aws_huffman_encoder_reset(&encoder->huffman_encoder); return AWS_OP_ERR; } /* All types that HPACK might encode/decode (RFC-7541 6 - Binary Format) */ enum aws_hpack_entry_type { AWS_HPACK_ENTRY_INDEXED_HEADER_FIELD, /* RFC-7541 6.1 */ AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITH_INCREMENTAL_INDEXING, /* RFC-7541 6.2.1 */ AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITHOUT_INDEXING, /* RFC-7541 6.2.2 */ AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_NEVER_INDEXED, /* RFC-7541 6.2.3 */ AWS_HPACK_ENTRY_DYNAMIC_TABLE_RESIZE, /* RFC-7541 6.3 */ AWS_HPACK_ENTRY_TYPE_COUNT, }; /** * First byte each entry type looks like this (RFC-7541 6): * The "xxxxx" part is the "N-bit prefix" of the entry's first encoded integer. * * 1xxxxxxx: Indexed Header Field Representation * 01xxxxxx: Literal Header Field with Incremental Indexing * 001xxxxx: Dynamic Table Size Update * 0001xxxx: Literal Header Field Never Indexed * 0000xxxx: Literal Header Field without Indexing */ static const uint8_t s_hpack_entry_starting_bit_pattern[AWS_HPACK_ENTRY_TYPE_COUNT] = { [AWS_HPACK_ENTRY_INDEXED_HEADER_FIELD] = 1 << 7, [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITH_INCREMENTAL_INDEXING] = 1 << 6, [AWS_HPACK_ENTRY_DYNAMIC_TABLE_RESIZE] = 1 << 5, [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_NEVER_INDEXED] = 1 << 4, [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITHOUT_INDEXING] = 0 << 4, }; static const uint8_t s_hpack_entry_num_prefix_bits[AWS_HPACK_ENTRY_TYPE_COUNT] = { [AWS_HPACK_ENTRY_INDEXED_HEADER_FIELD] = 7, [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITH_INCREMENTAL_INDEXING] = 6, [AWS_HPACK_ENTRY_DYNAMIC_TABLE_RESIZE] = 5, [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_NEVER_INDEXED] = 4, [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITHOUT_INDEXING] = 4, }; static int s_convert_http_compression_to_literal_entry_type( enum aws_http_header_compression compression, enum aws_hpack_entry_type *out_entry_type) { switch (compression) { case AWS_HTTP_HEADER_COMPRESSION_USE_CACHE: *out_entry_type = AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITH_INCREMENTAL_INDEXING; return AWS_OP_SUCCESS; case AWS_HTTP_HEADER_COMPRESSION_NO_CACHE: *out_entry_type = AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITHOUT_INDEXING; return AWS_OP_SUCCESS; case AWS_HTTP_HEADER_COMPRESSION_NO_FORWARD_CACHE: *out_entry_type = AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_NEVER_INDEXED; return AWS_OP_SUCCESS; } return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } static int s_encode_header_field( struct aws_hpack_encoder *encoder, const struct aws_http_header *header, struct aws_byte_buf *output) { AWS_PRECONDITION(encoder); AWS_PRECONDITION(header); AWS_PRECONDITION(output); size_t original_len = output->len; /* Search for header-field in tables */ bool found_indexed_value; size_t header_index = aws_hpack_find_index(&encoder->context, header, true, &found_indexed_value); if (header->compression != AWS_HTTP_HEADER_COMPRESSION_USE_CACHE) { /* If user doesn't want to use indexed value, then don't use it */ found_indexed_value = false; } if (header_index && found_indexed_value) { /* Indexed header field */ const enum aws_hpack_entry_type entry_type = AWS_HPACK_ENTRY_INDEXED_HEADER_FIELD; /* encode the one index (along with the entry type), and we're done! */ uint8_t starting_bit_pattern = s_hpack_entry_starting_bit_pattern[entry_type]; uint8_t num_prefix_bits = s_hpack_entry_num_prefix_bits[entry_type]; if (aws_hpack_encode_integer(header_index, starting_bit_pattern, num_prefix_bits, output)) { goto error; } return AWS_OP_SUCCESS; } /* Else, Literal header field... */ /* determine exactly which type of literal header-field to encode. */ enum aws_hpack_entry_type literal_entry_type = AWS_HPACK_ENTRY_TYPE_COUNT; if (s_convert_http_compression_to_literal_entry_type(header->compression, &literal_entry_type)) { goto error; } /* the entry type makes up the first few bits of the next integer we encode */ uint8_t starting_bit_pattern = s_hpack_entry_starting_bit_pattern[literal_entry_type]; uint8_t num_prefix_bits = s_hpack_entry_num_prefix_bits[literal_entry_type]; if (header_index) { /* Literal header field, indexed name */ /* first encode the index of name */ if (aws_hpack_encode_integer(header_index, starting_bit_pattern, num_prefix_bits, output)) { goto error; } } else { /* Literal header field, new name */ /* first encode index of 0 to indicate that header-name is not indexed */ if (aws_hpack_encode_integer(0, starting_bit_pattern, num_prefix_bits, output)) { goto error; } /* next encode header-name string */ if (aws_hpack_encode_string(encoder, header->name, output)) { goto error; } } /* then encode header-value string, and we're done encoding! */ if (aws_hpack_encode_string(encoder, header->value, output)) { goto error; } /* if "incremental indexing" type, insert header into the dynamic table. */ if (AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITH_INCREMENTAL_INDEXING == literal_entry_type) { if (aws_hpack_insert_header(&encoder->context, header)) { goto error; } } return AWS_OP_SUCCESS; error: output->len = original_len; return AWS_OP_ERR; } int aws_hpack_encode_header_block( struct aws_hpack_encoder *encoder, const struct aws_http_headers *headers, struct aws_byte_buf *output) { /* Encode a dynamic table size update at the beginning of the first header-block * following the change to the dynamic table size RFC-7541 4.2 */ if (encoder->dynamic_table_size_update.pending) { if (encoder->dynamic_table_size_update.smallest_value != encoder->dynamic_table_size_update.latest_value) { size_t smallest_update_value = encoder->dynamic_table_size_update.smallest_value; HPACK_LOGF( TRACE, encoder, "Encoding smallest dynamic table size update entry size: %zu", smallest_update_value); if (aws_hpack_resize_dynamic_table(&encoder->context, smallest_update_value)) { HPACK_LOGF(ERROR, encoder, "Dynamic table resize failed, size: %zu", smallest_update_value); return AWS_OP_ERR; } uint8_t starting_bit_pattern = s_hpack_entry_starting_bit_pattern[AWS_HPACK_ENTRY_DYNAMIC_TABLE_RESIZE]; uint8_t num_prefix_bits = s_hpack_entry_num_prefix_bits[AWS_HPACK_ENTRY_DYNAMIC_TABLE_RESIZE]; if (aws_hpack_encode_integer(smallest_update_value, starting_bit_pattern, num_prefix_bits, output)) { HPACK_LOGF( ERROR, encoder, "Integer encoding failed for table size update entry, integer: %zu", smallest_update_value); return AWS_OP_ERR; } } size_t last_update_value = encoder->dynamic_table_size_update.latest_value; HPACK_LOGF(TRACE, encoder, "Encoding last dynamic table size update entry size: %zu", last_update_value); if (aws_hpack_resize_dynamic_table(&encoder->context, last_update_value)) { HPACK_LOGF(ERROR, encoder, "Dynamic table resize failed, size: %zu", last_update_value); return AWS_OP_ERR; } uint8_t starting_bit_pattern = s_hpack_entry_starting_bit_pattern[AWS_HPACK_ENTRY_DYNAMIC_TABLE_RESIZE]; uint8_t num_prefix_bits = s_hpack_entry_num_prefix_bits[AWS_HPACK_ENTRY_DYNAMIC_TABLE_RESIZE]; if (aws_hpack_encode_integer(last_update_value, starting_bit_pattern, num_prefix_bits, output)) { HPACK_LOGF( ERROR, encoder, "Integer encoding failed for table size update entry, integer: %zu", last_update_value); return AWS_OP_ERR; } encoder->dynamic_table_size_update.pending = false; encoder->dynamic_table_size_update.latest_value = SIZE_MAX; encoder->dynamic_table_size_update.smallest_value = SIZE_MAX; } const size_t num_headers = aws_http_headers_count(headers); for (size_t i = 0; i < num_headers; ++i) { struct aws_http_header header; aws_http_headers_get_index(headers, i, &header); if (s_encode_header_field(encoder, &header, output)) { return AWS_OP_ERR; } } return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-http/source/hpack_huffman_static.c000066400000000000000000001472201456575232400260220ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ /* WARNING: THIS FILE WAS AUTOMATICALLY GENERATED. DO NOT EDIT. */ /* clang-format off */ #include static struct aws_huffman_code code_points[] = { { .pattern = 0x1ff8, .num_bits = 13 }, /* ' ' 0 */ { .pattern = 0x7fffd8, .num_bits = 23 }, /* ' ' 1 */ { .pattern = 0xfffffe2, .num_bits = 28 }, /* ' ' 2 */ { .pattern = 0xfffffe3, .num_bits = 28 }, /* ' ' 3 */ { .pattern = 0xfffffe4, .num_bits = 28 }, /* ' ' 4 */ { .pattern = 0xfffffe5, .num_bits = 28 }, /* ' ' 5 */ { .pattern = 0xfffffe6, .num_bits = 28 }, /* ' ' 6 */ { .pattern = 0xfffffe7, .num_bits = 28 }, /* ' ' 7 */ { .pattern = 0xfffffe8, .num_bits = 28 }, /* ' ' 8 */ { .pattern = 0xffffea, .num_bits = 24 }, /* ' ' 9 */ { .pattern = 0x3ffffffc, .num_bits = 30 }, /* ' ' 10 */ { .pattern = 0xfffffe9, .num_bits = 28 }, /* ' ' 11 */ { .pattern = 0xfffffea, .num_bits = 28 }, /* ' ' 12 */ { .pattern = 0x3ffffffd, .num_bits = 30 }, /* ' ' 13 */ { .pattern = 0xfffffeb, .num_bits = 28 }, /* ' ' 14 */ { .pattern = 0xfffffec, .num_bits = 28 }, /* ' ' 15 */ { .pattern = 0xfffffed, .num_bits = 28 }, /* ' ' 16 */ { .pattern = 0xfffffee, .num_bits = 28 }, /* ' ' 17 */ { .pattern = 0xfffffef, .num_bits = 28 }, /* ' ' 18 */ { .pattern = 0xffffff0, .num_bits = 28 }, /* ' ' 19 */ { .pattern = 0xffffff1, .num_bits = 28 }, /* ' ' 20 */ { .pattern = 0xffffff2, .num_bits = 28 }, /* ' ' 21 */ { .pattern = 0x3ffffffe, .num_bits = 30 }, /* ' ' 22 */ { .pattern = 0xffffff3, .num_bits = 28 }, /* ' ' 23 */ { .pattern = 0xffffff4, .num_bits = 28 }, /* ' ' 24 */ { .pattern = 0xffffff5, .num_bits = 28 }, /* ' ' 25 */ { .pattern = 0xffffff6, .num_bits = 28 }, /* ' ' 26 */ { .pattern = 0xffffff7, .num_bits = 28 }, /* ' ' 27 */ { .pattern = 0xffffff8, .num_bits = 28 }, /* ' ' 28 */ { .pattern = 0xffffff9, .num_bits = 28 }, /* ' ' 29 */ { .pattern = 0xffffffa, .num_bits = 28 }, /* ' ' 30 */ { .pattern = 0xffffffb, .num_bits = 28 }, /* ' ' 31 */ { .pattern = 0x14, .num_bits = 6 }, /* ' ' 32 */ { .pattern = 0x3f8, .num_bits = 10 }, /* '!' 33 */ { .pattern = 0x3f9, .num_bits = 10 }, /* '"' 34 */ { .pattern = 0xffa, .num_bits = 12 }, /* '#' 35 */ { .pattern = 0x1ff9, .num_bits = 13 }, /* '$' 36 */ { .pattern = 0x15, .num_bits = 6 }, /* '%' 37 */ { .pattern = 0xf8, .num_bits = 8 }, /* '&' 38 */ { .pattern = 0x7fa, .num_bits = 11 }, /* ''' 39 */ { .pattern = 0x3fa, .num_bits = 10 }, /* '(' 40 */ { .pattern = 0x3fb, .num_bits = 10 }, /* ')' 41 */ { .pattern = 0xf9, .num_bits = 8 }, /* '*' 42 */ { .pattern = 0x7fb, .num_bits = 11 }, /* '+' 43 */ { .pattern = 0xfa, .num_bits = 8 }, /* ',' 44 */ { .pattern = 0x16, .num_bits = 6 }, /* '-' 45 */ { .pattern = 0x17, .num_bits = 6 }, /* '.' 46 */ { .pattern = 0x18, .num_bits = 6 }, /* '/' 47 */ { .pattern = 0x0, .num_bits = 5 }, /* '0' 48 */ { .pattern = 0x1, .num_bits = 5 }, /* '1' 49 */ { .pattern = 0x2, .num_bits = 5 }, /* '2' 50 */ { .pattern = 0x19, .num_bits = 6 }, /* '3' 51 */ { .pattern = 0x1a, .num_bits = 6 }, /* '4' 52 */ { .pattern = 0x1b, .num_bits = 6 }, /* '5' 53 */ { .pattern = 0x1c, .num_bits = 6 }, /* '6' 54 */ { .pattern = 0x1d, .num_bits = 6 }, /* '7' 55 */ { .pattern = 0x1e, .num_bits = 6 }, /* '8' 56 */ { .pattern = 0x1f, .num_bits = 6 }, /* '9' 57 */ { .pattern = 0x5c, .num_bits = 7 }, /* ':' 58 */ { .pattern = 0xfb, .num_bits = 8 }, /* ';' 59 */ { .pattern = 0x7ffc, .num_bits = 15 }, /* '<' 60 */ { .pattern = 0x20, .num_bits = 6 }, /* '=' 61 */ { .pattern = 0xffb, .num_bits = 12 }, /* '>' 62 */ { .pattern = 0x3fc, .num_bits = 10 }, /* '?' 63 */ { .pattern = 0x1ffa, .num_bits = 13 }, /* '@' 64 */ { .pattern = 0x21, .num_bits = 6 }, /* 'A' 65 */ { .pattern = 0x5d, .num_bits = 7 }, /* 'B' 66 */ { .pattern = 0x5e, .num_bits = 7 }, /* 'C' 67 */ { .pattern = 0x5f, .num_bits = 7 }, /* 'D' 68 */ { .pattern = 0x60, .num_bits = 7 }, /* 'E' 69 */ { .pattern = 0x61, .num_bits = 7 }, /* 'F' 70 */ { .pattern = 0x62, .num_bits = 7 }, /* 'G' 71 */ { .pattern = 0x63, .num_bits = 7 }, /* 'H' 72 */ { .pattern = 0x64, .num_bits = 7 }, /* 'I' 73 */ { .pattern = 0x65, .num_bits = 7 }, /* 'J' 74 */ { .pattern = 0x66, .num_bits = 7 }, /* 'K' 75 */ { .pattern = 0x67, .num_bits = 7 }, /* 'L' 76 */ { .pattern = 0x68, .num_bits = 7 }, /* 'M' 77 */ { .pattern = 0x69, .num_bits = 7 }, /* 'N' 78 */ { .pattern = 0x6a, .num_bits = 7 }, /* 'O' 79 */ { .pattern = 0x6b, .num_bits = 7 }, /* 'P' 80 */ { .pattern = 0x6c, .num_bits = 7 }, /* 'Q' 81 */ { .pattern = 0x6d, .num_bits = 7 }, /* 'R' 82 */ { .pattern = 0x6e, .num_bits = 7 }, /* 'S' 83 */ { .pattern = 0x6f, .num_bits = 7 }, /* 'T' 84 */ { .pattern = 0x70, .num_bits = 7 }, /* 'U' 85 */ { .pattern = 0x71, .num_bits = 7 }, /* 'V' 86 */ { .pattern = 0x72, .num_bits = 7 }, /* 'W' 87 */ { .pattern = 0xfc, .num_bits = 8 }, /* 'X' 88 */ { .pattern = 0x73, .num_bits = 7 }, /* 'Y' 89 */ { .pattern = 0xfd, .num_bits = 8 }, /* 'Z' 90 */ { .pattern = 0x1ffb, .num_bits = 13 }, /* '[' 91 */ { .pattern = 0x7fff0, .num_bits = 19 }, /* '\' 92 */ { .pattern = 0x1ffc, .num_bits = 13 }, /* ']' 93 */ { .pattern = 0x3ffc, .num_bits = 14 }, /* '^' 94 */ { .pattern = 0x22, .num_bits = 6 }, /* '_' 95 */ { .pattern = 0x7ffd, .num_bits = 15 }, /* '`' 96 */ { .pattern = 0x3, .num_bits = 5 }, /* 'a' 97 */ { .pattern = 0x23, .num_bits = 6 }, /* 'b' 98 */ { .pattern = 0x4, .num_bits = 5 }, /* 'c' 99 */ { .pattern = 0x24, .num_bits = 6 }, /* 'd' 100 */ { .pattern = 0x5, .num_bits = 5 }, /* 'e' 101 */ { .pattern = 0x25, .num_bits = 6 }, /* 'f' 102 */ { .pattern = 0x26, .num_bits = 6 }, /* 'g' 103 */ { .pattern = 0x27, .num_bits = 6 }, /* 'h' 104 */ { .pattern = 0x6, .num_bits = 5 }, /* 'i' 105 */ { .pattern = 0x74, .num_bits = 7 }, /* 'j' 106 */ { .pattern = 0x75, .num_bits = 7 }, /* 'k' 107 */ { .pattern = 0x28, .num_bits = 6 }, /* 'l' 108 */ { .pattern = 0x29, .num_bits = 6 }, /* 'm' 109 */ { .pattern = 0x2a, .num_bits = 6 }, /* 'n' 110 */ { .pattern = 0x7, .num_bits = 5 }, /* 'o' 111 */ { .pattern = 0x2b, .num_bits = 6 }, /* 'p' 112 */ { .pattern = 0x76, .num_bits = 7 }, /* 'q' 113 */ { .pattern = 0x2c, .num_bits = 6 }, /* 'r' 114 */ { .pattern = 0x8, .num_bits = 5 }, /* 's' 115 */ { .pattern = 0x9, .num_bits = 5 }, /* 't' 116 */ { .pattern = 0x2d, .num_bits = 6 }, /* 'u' 117 */ { .pattern = 0x77, .num_bits = 7 }, /* 'v' 118 */ { .pattern = 0x78, .num_bits = 7 }, /* 'w' 119 */ { .pattern = 0x79, .num_bits = 7 }, /* 'x' 120 */ { .pattern = 0x7a, .num_bits = 7 }, /* 'y' 121 */ { .pattern = 0x7b, .num_bits = 7 }, /* 'z' 122 */ { .pattern = 0x7ffe, .num_bits = 15 }, /* '{' 123 */ { .pattern = 0x7fc, .num_bits = 11 }, /* '|' 124 */ { .pattern = 0x3ffd, .num_bits = 14 }, /* '}' 125 */ { .pattern = 0x1ffd, .num_bits = 13 }, /* '~' 126 */ { .pattern = 0xffffffc, .num_bits = 28 }, /* ' ' 127 */ { .pattern = 0xfffe6, .num_bits = 20 }, /* ' ' 128 */ { .pattern = 0x3fffd2, .num_bits = 22 }, /* ' ' 129 */ { .pattern = 0xfffe7, .num_bits = 20 }, /* ' ' 130 */ { .pattern = 0xfffe8, .num_bits = 20 }, /* ' ' 131 */ { .pattern = 0x3fffd3, .num_bits = 22 }, /* ' ' 132 */ { .pattern = 0x3fffd4, .num_bits = 22 }, /* ' ' 133 */ { .pattern = 0x3fffd5, .num_bits = 22 }, /* ' ' 134 */ { .pattern = 0x7fffd9, .num_bits = 23 }, /* ' ' 135 */ { .pattern = 0x3fffd6, .num_bits = 22 }, /* ' ' 136 */ { .pattern = 0x7fffda, .num_bits = 23 }, /* ' ' 137 */ { .pattern = 0x7fffdb, .num_bits = 23 }, /* ' ' 138 */ { .pattern = 0x7fffdc, .num_bits = 23 }, /* ' ' 139 */ { .pattern = 0x7fffdd, .num_bits = 23 }, /* ' ' 140 */ { .pattern = 0x7fffde, .num_bits = 23 }, /* ' ' 141 */ { .pattern = 0xffffeb, .num_bits = 24 }, /* ' ' 142 */ { .pattern = 0x7fffdf, .num_bits = 23 }, /* ' ' 143 */ { .pattern = 0xffffec, .num_bits = 24 }, /* ' ' 144 */ { .pattern = 0xffffed, .num_bits = 24 }, /* ' ' 145 */ { .pattern = 0x3fffd7, .num_bits = 22 }, /* ' ' 146 */ { .pattern = 0x7fffe0, .num_bits = 23 }, /* ' ' 147 */ { .pattern = 0xffffee, .num_bits = 24 }, /* ' ' 148 */ { .pattern = 0x7fffe1, .num_bits = 23 }, /* ' ' 149 */ { .pattern = 0x7fffe2, .num_bits = 23 }, /* ' ' 150 */ { .pattern = 0x7fffe3, .num_bits = 23 }, /* ' ' 151 */ { .pattern = 0x7fffe4, .num_bits = 23 }, /* ' ' 152 */ { .pattern = 0x1fffdc, .num_bits = 21 }, /* ' ' 153 */ { .pattern = 0x3fffd8, .num_bits = 22 }, /* ' ' 154 */ { .pattern = 0x7fffe5, .num_bits = 23 }, /* ' ' 155 */ { .pattern = 0x3fffd9, .num_bits = 22 }, /* ' ' 156 */ { .pattern = 0x7fffe6, .num_bits = 23 }, /* ' ' 157 */ { .pattern = 0x7fffe7, .num_bits = 23 }, /* ' ' 158 */ { .pattern = 0xffffef, .num_bits = 24 }, /* ' ' 159 */ { .pattern = 0x3fffda, .num_bits = 22 }, /* ' ' 160 */ { .pattern = 0x1fffdd, .num_bits = 21 }, /* ' ' 161 */ { .pattern = 0xfffe9, .num_bits = 20 }, /* ' ' 162 */ { .pattern = 0x3fffdb, .num_bits = 22 }, /* ' ' 163 */ { .pattern = 0x3fffdc, .num_bits = 22 }, /* ' ' 164 */ { .pattern = 0x7fffe8, .num_bits = 23 }, /* ' ' 165 */ { .pattern = 0x7fffe9, .num_bits = 23 }, /* ' ' 166 */ { .pattern = 0x1fffde, .num_bits = 21 }, /* ' ' 167 */ { .pattern = 0x7fffea, .num_bits = 23 }, /* ' ' 168 */ { .pattern = 0x3fffdd, .num_bits = 22 }, /* ' ' 169 */ { .pattern = 0x3fffde, .num_bits = 22 }, /* ' ' 170 */ { .pattern = 0xfffff0, .num_bits = 24 }, /* ' ' 171 */ { .pattern = 0x1fffdf, .num_bits = 21 }, /* ' ' 172 */ { .pattern = 0x3fffdf, .num_bits = 22 }, /* ' ' 173 */ { .pattern = 0x7fffeb, .num_bits = 23 }, /* ' ' 174 */ { .pattern = 0x7fffec, .num_bits = 23 }, /* ' ' 175 */ { .pattern = 0x1fffe0, .num_bits = 21 }, /* ' ' 176 */ { .pattern = 0x1fffe1, .num_bits = 21 }, /* ' ' 177 */ { .pattern = 0x3fffe0, .num_bits = 22 }, /* ' ' 178 */ { .pattern = 0x1fffe2, .num_bits = 21 }, /* ' ' 179 */ { .pattern = 0x7fffed, .num_bits = 23 }, /* ' ' 180 */ { .pattern = 0x3fffe1, .num_bits = 22 }, /* ' ' 181 */ { .pattern = 0x7fffee, .num_bits = 23 }, /* ' ' 182 */ { .pattern = 0x7fffef, .num_bits = 23 }, /* ' ' 183 */ { .pattern = 0xfffea, .num_bits = 20 }, /* ' ' 184 */ { .pattern = 0x3fffe2, .num_bits = 22 }, /* ' ' 185 */ { .pattern = 0x3fffe3, .num_bits = 22 }, /* ' ' 186 */ { .pattern = 0x3fffe4, .num_bits = 22 }, /* ' ' 187 */ { .pattern = 0x7ffff0, .num_bits = 23 }, /* ' ' 188 */ { .pattern = 0x3fffe5, .num_bits = 22 }, /* ' ' 189 */ { .pattern = 0x3fffe6, .num_bits = 22 }, /* ' ' 190 */ { .pattern = 0x7ffff1, .num_bits = 23 }, /* ' ' 191 */ { .pattern = 0x3ffffe0, .num_bits = 26 }, /* ' ' 192 */ { .pattern = 0x3ffffe1, .num_bits = 26 }, /* ' ' 193 */ { .pattern = 0xfffeb, .num_bits = 20 }, /* ' ' 194 */ { .pattern = 0x7fff1, .num_bits = 19 }, /* ' ' 195 */ { .pattern = 0x3fffe7, .num_bits = 22 }, /* ' ' 196 */ { .pattern = 0x7ffff2, .num_bits = 23 }, /* ' ' 197 */ { .pattern = 0x3fffe8, .num_bits = 22 }, /* ' ' 198 */ { .pattern = 0x1ffffec, .num_bits = 25 }, /* ' ' 199 */ { .pattern = 0x3ffffe2, .num_bits = 26 }, /* ' ' 200 */ { .pattern = 0x3ffffe3, .num_bits = 26 }, /* ' ' 201 */ { .pattern = 0x3ffffe4, .num_bits = 26 }, /* ' ' 202 */ { .pattern = 0x7ffffde, .num_bits = 27 }, /* ' ' 203 */ { .pattern = 0x7ffffdf, .num_bits = 27 }, /* ' ' 204 */ { .pattern = 0x3ffffe5, .num_bits = 26 }, /* ' ' 205 */ { .pattern = 0xfffff1, .num_bits = 24 }, /* ' ' 206 */ { .pattern = 0x1ffffed, .num_bits = 25 }, /* ' ' 207 */ { .pattern = 0x7fff2, .num_bits = 19 }, /* ' ' 208 */ { .pattern = 0x1fffe3, .num_bits = 21 }, /* ' ' 209 */ { .pattern = 0x3ffffe6, .num_bits = 26 }, /* ' ' 210 */ { .pattern = 0x7ffffe0, .num_bits = 27 }, /* ' ' 211 */ { .pattern = 0x7ffffe1, .num_bits = 27 }, /* ' ' 212 */ { .pattern = 0x3ffffe7, .num_bits = 26 }, /* ' ' 213 */ { .pattern = 0x7ffffe2, .num_bits = 27 }, /* ' ' 214 */ { .pattern = 0xfffff2, .num_bits = 24 }, /* ' ' 215 */ { .pattern = 0x1fffe4, .num_bits = 21 }, /* ' ' 216 */ { .pattern = 0x1fffe5, .num_bits = 21 }, /* ' ' 217 */ { .pattern = 0x3ffffe8, .num_bits = 26 }, /* ' ' 218 */ { .pattern = 0x3ffffe9, .num_bits = 26 }, /* ' ' 219 */ { .pattern = 0xffffffd, .num_bits = 28 }, /* ' ' 220 */ { .pattern = 0x7ffffe3, .num_bits = 27 }, /* ' ' 221 */ { .pattern = 0x7ffffe4, .num_bits = 27 }, /* ' ' 222 */ { .pattern = 0x7ffffe5, .num_bits = 27 }, /* ' ' 223 */ { .pattern = 0xfffec, .num_bits = 20 }, /* ' ' 224 */ { .pattern = 0xfffff3, .num_bits = 24 }, /* ' ' 225 */ { .pattern = 0xfffed, .num_bits = 20 }, /* ' ' 226 */ { .pattern = 0x1fffe6, .num_bits = 21 }, /* ' ' 227 */ { .pattern = 0x3fffe9, .num_bits = 22 }, /* ' ' 228 */ { .pattern = 0x1fffe7, .num_bits = 21 }, /* ' ' 229 */ { .pattern = 0x1fffe8, .num_bits = 21 }, /* ' ' 230 */ { .pattern = 0x7ffff3, .num_bits = 23 }, /* ' ' 231 */ { .pattern = 0x3fffea, .num_bits = 22 }, /* ' ' 232 */ { .pattern = 0x3fffeb, .num_bits = 22 }, /* ' ' 233 */ { .pattern = 0x1ffffee, .num_bits = 25 }, /* ' ' 234 */ { .pattern = 0x1ffffef, .num_bits = 25 }, /* ' ' 235 */ { .pattern = 0xfffff4, .num_bits = 24 }, /* ' ' 236 */ { .pattern = 0xfffff5, .num_bits = 24 }, /* ' ' 237 */ { .pattern = 0x3ffffea, .num_bits = 26 }, /* ' ' 238 */ { .pattern = 0x7ffff4, .num_bits = 23 }, /* ' ' 239 */ { .pattern = 0x3ffffeb, .num_bits = 26 }, /* ' ' 240 */ { .pattern = 0x7ffffe6, .num_bits = 27 }, /* ' ' 241 */ { .pattern = 0x3ffffec, .num_bits = 26 }, /* ' ' 242 */ { .pattern = 0x3ffffed, .num_bits = 26 }, /* ' ' 243 */ { .pattern = 0x7ffffe7, .num_bits = 27 }, /* ' ' 244 */ { .pattern = 0x7ffffe8, .num_bits = 27 }, /* ' ' 245 */ { .pattern = 0x7ffffe9, .num_bits = 27 }, /* ' ' 246 */ { .pattern = 0x7ffffea, .num_bits = 27 }, /* ' ' 247 */ { .pattern = 0x7ffffeb, .num_bits = 27 }, /* ' ' 248 */ { .pattern = 0xffffffe, .num_bits = 28 }, /* ' ' 249 */ { .pattern = 0x7ffffec, .num_bits = 27 }, /* ' ' 250 */ { .pattern = 0x7ffffed, .num_bits = 27 }, /* ' ' 251 */ { .pattern = 0x7ffffee, .num_bits = 27 }, /* ' ' 252 */ { .pattern = 0x7ffffef, .num_bits = 27 }, /* ' ' 253 */ { .pattern = 0x7fffff0, .num_bits = 27 }, /* ' ' 254 */ { .pattern = 0x3ffffee, .num_bits = 26 }, /* ' ' 255 */ }; static struct aws_huffman_code encode_symbol(uint8_t symbol, void *userdata) { (void)userdata; return code_points[symbol]; } /* NOLINTNEXTLINE(readability-function-size) */ static uint8_t decode_symbol(uint32_t bits, uint8_t *symbol, void *userdata) { (void)userdata; if (bits & 0x80000000) { goto node_1; } else { goto node_0; } node_0: if (bits & 0x40000000) { goto node_01; } else { goto node_00; } node_00: if (bits & 0x20000000) { goto node_001; } else { goto node_000; } node_000: if (bits & 0x10000000) { goto node_0001; } else { goto node_0000; } node_0000: if (bits & 0x8000000) { *symbol = 49; return 5; } else { *symbol = 48; return 5; } node_0001: if (bits & 0x8000000) { *symbol = 97; return 5; } else { *symbol = 50; return 5; } node_001: if (bits & 0x10000000) { goto node_0011; } else { goto node_0010; } node_0010: if (bits & 0x8000000) { *symbol = 101; return 5; } else { *symbol = 99; return 5; } node_0011: if (bits & 0x8000000) { *symbol = 111; return 5; } else { *symbol = 105; return 5; } node_01: if (bits & 0x20000000) { goto node_011; } else { goto node_010; } node_010: if (bits & 0x10000000) { goto node_0101; } else { goto node_0100; } node_0100: if (bits & 0x8000000) { *symbol = 116; return 5; } else { *symbol = 115; return 5; } node_0101: if (bits & 0x8000000) { goto node_01011; } else { goto node_01010; } node_01010: if (bits & 0x4000000) { *symbol = 37; return 6; } else { *symbol = 32; return 6; } node_01011: if (bits & 0x4000000) { *symbol = 46; return 6; } else { *symbol = 45; return 6; } node_011: if (bits & 0x10000000) { goto node_0111; } else { goto node_0110; } node_0110: if (bits & 0x8000000) { goto node_01101; } else { goto node_01100; } node_01100: if (bits & 0x4000000) { *symbol = 51; return 6; } else { *symbol = 47; return 6; } node_01101: if (bits & 0x4000000) { *symbol = 53; return 6; } else { *symbol = 52; return 6; } node_0111: if (bits & 0x8000000) { goto node_01111; } else { goto node_01110; } node_01110: if (bits & 0x4000000) { *symbol = 55; return 6; } else { *symbol = 54; return 6; } node_01111: if (bits & 0x4000000) { *symbol = 57; return 6; } else { *symbol = 56; return 6; } node_1: if (bits & 0x40000000) { goto node_11; } else { goto node_10; } node_10: if (bits & 0x20000000) { goto node_101; } else { goto node_100; } node_100: if (bits & 0x10000000) { goto node_1001; } else { goto node_1000; } node_1000: if (bits & 0x8000000) { goto node_10001; } else { goto node_10000; } node_10000: if (bits & 0x4000000) { *symbol = 65; return 6; } else { *symbol = 61; return 6; } node_10001: if (bits & 0x4000000) { *symbol = 98; return 6; } else { *symbol = 95; return 6; } node_1001: if (bits & 0x8000000) { goto node_10011; } else { goto node_10010; } node_10010: if (bits & 0x4000000) { *symbol = 102; return 6; } else { *symbol = 100; return 6; } node_10011: if (bits & 0x4000000) { *symbol = 104; return 6; } else { *symbol = 103; return 6; } node_101: if (bits & 0x10000000) { goto node_1011; } else { goto node_1010; } node_1010: if (bits & 0x8000000) { goto node_10101; } else { goto node_10100; } node_10100: if (bits & 0x4000000) { *symbol = 109; return 6; } else { *symbol = 108; return 6; } node_10101: if (bits & 0x4000000) { *symbol = 112; return 6; } else { *symbol = 110; return 6; } node_1011: if (bits & 0x8000000) { goto node_10111; } else { goto node_10110; } node_10110: if (bits & 0x4000000) { *symbol = 117; return 6; } else { *symbol = 114; return 6; } node_10111: if (bits & 0x4000000) { goto node_101111; } else { goto node_101110; } node_101110: if (bits & 0x2000000) { *symbol = 66; return 7; } else { *symbol = 58; return 7; } node_101111: if (bits & 0x2000000) { *symbol = 68; return 7; } else { *symbol = 67; return 7; } node_11: if (bits & 0x20000000) { goto node_111; } else { goto node_110; } node_110: if (bits & 0x10000000) { goto node_1101; } else { goto node_1100; } node_1100: if (bits & 0x8000000) { goto node_11001; } else { goto node_11000; } node_11000: if (bits & 0x4000000) { goto node_110001; } else { goto node_110000; } node_110000: if (bits & 0x2000000) { *symbol = 70; return 7; } else { *symbol = 69; return 7; } node_110001: if (bits & 0x2000000) { *symbol = 72; return 7; } else { *symbol = 71; return 7; } node_11001: if (bits & 0x4000000) { goto node_110011; } else { goto node_110010; } node_110010: if (bits & 0x2000000) { *symbol = 74; return 7; } else { *symbol = 73; return 7; } node_110011: if (bits & 0x2000000) { *symbol = 76; return 7; } else { *symbol = 75; return 7; } node_1101: if (bits & 0x8000000) { goto node_11011; } else { goto node_11010; } node_11010: if (bits & 0x4000000) { goto node_110101; } else { goto node_110100; } node_110100: if (bits & 0x2000000) { *symbol = 78; return 7; } else { *symbol = 77; return 7; } node_110101: if (bits & 0x2000000) { *symbol = 80; return 7; } else { *symbol = 79; return 7; } node_11011: if (bits & 0x4000000) { goto node_110111; } else { goto node_110110; } node_110110: if (bits & 0x2000000) { *symbol = 82; return 7; } else { *symbol = 81; return 7; } node_110111: if (bits & 0x2000000) { *symbol = 84; return 7; } else { *symbol = 83; return 7; } node_111: if (bits & 0x10000000) { goto node_1111; } else { goto node_1110; } node_1110: if (bits & 0x8000000) { goto node_11101; } else { goto node_11100; } node_11100: if (bits & 0x4000000) { goto node_111001; } else { goto node_111000; } node_111000: if (bits & 0x2000000) { *symbol = 86; return 7; } else { *symbol = 85; return 7; } node_111001: if (bits & 0x2000000) { *symbol = 89; return 7; } else { *symbol = 87; return 7; } node_11101: if (bits & 0x4000000) { goto node_111011; } else { goto node_111010; } node_111010: if (bits & 0x2000000) { *symbol = 107; return 7; } else { *symbol = 106; return 7; } node_111011: if (bits & 0x2000000) { *symbol = 118; return 7; } else { *symbol = 113; return 7; } node_1111: if (bits & 0x8000000) { goto node_11111; } else { goto node_11110; } node_11110: if (bits & 0x4000000) { goto node_111101; } else { goto node_111100; } node_111100: if (bits & 0x2000000) { *symbol = 120; return 7; } else { *symbol = 119; return 7; } node_111101: if (bits & 0x2000000) { *symbol = 122; return 7; } else { *symbol = 121; return 7; } node_11111: if (bits & 0x4000000) { goto node_111111; } else { goto node_111110; } node_111110: if (bits & 0x2000000) { goto node_1111101; } else { goto node_1111100; } node_1111100: if (bits & 0x1000000) { *symbol = 42; return 8; } else { *symbol = 38; return 8; } node_1111101: if (bits & 0x1000000) { *symbol = 59; return 8; } else { *symbol = 44; return 8; } node_111111: if (bits & 0x2000000) { goto node_1111111; } else { goto node_1111110; } node_1111110: if (bits & 0x1000000) { *symbol = 90; return 8; } else { *symbol = 88; return 8; } node_1111111: if (bits & 0x1000000) { goto node_11111111; } else { goto node_11111110; } node_11111110: if (bits & 0x800000) { goto node_111111101; } else { goto node_111111100; } node_111111100: if (bits & 0x400000) { *symbol = 34; return 10; } else { *symbol = 33; return 10; } node_111111101: if (bits & 0x400000) { *symbol = 41; return 10; } else { *symbol = 40; return 10; } node_11111111: if (bits & 0x800000) { goto node_111111111; } else { goto node_111111110; } node_111111110: if (bits & 0x400000) { goto node_1111111101; } else { *symbol = 63; return 10; } node_1111111101: if (bits & 0x200000) { *symbol = 43; return 11; } else { *symbol = 39; return 11; } node_111111111: if (bits & 0x400000) { goto node_1111111111; } else { goto node_1111111110; } node_1111111110: if (bits & 0x200000) { goto node_11111111101; } else { *symbol = 124; return 11; } node_11111111101: if (bits & 0x100000) { *symbol = 62; return 12; } else { *symbol = 35; return 12; } node_1111111111: if (bits & 0x200000) { goto node_11111111111; } else { goto node_11111111110; } node_11111111110: if (bits & 0x100000) { goto node_111111111101; } else { goto node_111111111100; } node_111111111100: if (bits & 0x80000) { *symbol = 36; return 13; } else { *symbol = 0; return 13; } node_111111111101: if (bits & 0x80000) { *symbol = 91; return 13; } else { *symbol = 64; return 13; } node_11111111111: if (bits & 0x100000) { goto node_111111111111; } else { goto node_111111111110; } node_111111111110: if (bits & 0x80000) { *symbol = 126; return 13; } else { *symbol = 93; return 13; } node_111111111111: if (bits & 0x80000) { goto node_1111111111111; } else { goto node_1111111111110; } node_1111111111110: if (bits & 0x40000) { *symbol = 125; return 14; } else { *symbol = 94; return 14; } node_1111111111111: if (bits & 0x40000) { goto node_11111111111111; } else { goto node_11111111111110; } node_11111111111110: if (bits & 0x20000) { *symbol = 96; return 15; } else { *symbol = 60; return 15; } node_11111111111111: if (bits & 0x20000) { goto node_111111111111111; } else { *symbol = 123; return 15; } node_111111111111111: if (bits & 0x10000) { goto node_1111111111111111; } else { goto node_1111111111111110; } node_1111111111111110: if (bits & 0x8000) { goto node_11111111111111101; } else { goto node_11111111111111100; } node_11111111111111100: if (bits & 0x4000) { goto node_111111111111111001; } else { goto node_111111111111111000; } node_111111111111111000: if (bits & 0x2000) { *symbol = 195; return 19; } else { *symbol = 92; return 19; } node_111111111111111001: if (bits & 0x2000) { goto node_1111111111111110011; } else { *symbol = 208; return 19; } node_1111111111111110011: if (bits & 0x1000) { *symbol = 130; return 20; } else { *symbol = 128; return 20; } node_11111111111111101: if (bits & 0x4000) { goto node_111111111111111011; } else { goto node_111111111111111010; } node_111111111111111010: if (bits & 0x2000) { goto node_1111111111111110101; } else { goto node_1111111111111110100; } node_1111111111111110100: if (bits & 0x1000) { *symbol = 162; return 20; } else { *symbol = 131; return 20; } node_1111111111111110101: if (bits & 0x1000) { *symbol = 194; return 20; } else { *symbol = 184; return 20; } node_111111111111111011: if (bits & 0x2000) { goto node_1111111111111110111; } else { goto node_1111111111111110110; } node_1111111111111110110: if (bits & 0x1000) { *symbol = 226; return 20; } else { *symbol = 224; return 20; } node_1111111111111110111: if (bits & 0x1000) { goto node_11111111111111101111; } else { goto node_11111111111111101110; } node_11111111111111101110: if (bits & 0x800) { *symbol = 161; return 21; } else { *symbol = 153; return 21; } node_11111111111111101111: if (bits & 0x800) { *symbol = 172; return 21; } else { *symbol = 167; return 21; } node_1111111111111111: if (bits & 0x8000) { goto node_11111111111111111; } else { goto node_11111111111111110; } node_11111111111111110: if (bits & 0x4000) { goto node_111111111111111101; } else { goto node_111111111111111100; } node_111111111111111100: if (bits & 0x2000) { goto node_1111111111111111001; } else { goto node_1111111111111111000; } node_1111111111111111000: if (bits & 0x1000) { goto node_11111111111111110001; } else { goto node_11111111111111110000; } node_11111111111111110000: if (bits & 0x800) { *symbol = 177; return 21; } else { *symbol = 176; return 21; } node_11111111111111110001: if (bits & 0x800) { *symbol = 209; return 21; } else { *symbol = 179; return 21; } node_1111111111111111001: if (bits & 0x1000) { goto node_11111111111111110011; } else { goto node_11111111111111110010; } node_11111111111111110010: if (bits & 0x800) { *symbol = 217; return 21; } else { *symbol = 216; return 21; } node_11111111111111110011: if (bits & 0x800) { *symbol = 229; return 21; } else { *symbol = 227; return 21; } node_111111111111111101: if (bits & 0x2000) { goto node_1111111111111111011; } else { goto node_1111111111111111010; } node_1111111111111111010: if (bits & 0x1000) { goto node_11111111111111110101; } else { goto node_11111111111111110100; } node_11111111111111110100: if (bits & 0x800) { goto node_111111111111111101001; } else { *symbol = 230; return 21; } node_111111111111111101001: if (bits & 0x400) { *symbol = 132; return 22; } else { *symbol = 129; return 22; } node_11111111111111110101: if (bits & 0x800) { goto node_111111111111111101011; } else { goto node_111111111111111101010; } node_111111111111111101010: if (bits & 0x400) { *symbol = 134; return 22; } else { *symbol = 133; return 22; } node_111111111111111101011: if (bits & 0x400) { *symbol = 146; return 22; } else { *symbol = 136; return 22; } node_1111111111111111011: if (bits & 0x1000) { goto node_11111111111111110111; } else { goto node_11111111111111110110; } node_11111111111111110110: if (bits & 0x800) { goto node_111111111111111101101; } else { goto node_111111111111111101100; } node_111111111111111101100: if (bits & 0x400) { *symbol = 156; return 22; } else { *symbol = 154; return 22; } node_111111111111111101101: if (bits & 0x400) { *symbol = 163; return 22; } else { *symbol = 160; return 22; } node_11111111111111110111: if (bits & 0x800) { goto node_111111111111111101111; } else { goto node_111111111111111101110; } node_111111111111111101110: if (bits & 0x400) { *symbol = 169; return 22; } else { *symbol = 164; return 22; } node_111111111111111101111: if (bits & 0x400) { *symbol = 173; return 22; } else { *symbol = 170; return 22; } node_11111111111111111: if (bits & 0x4000) { goto node_111111111111111111; } else { goto node_111111111111111110; } node_111111111111111110: if (bits & 0x2000) { goto node_1111111111111111101; } else { goto node_1111111111111111100; } node_1111111111111111100: if (bits & 0x1000) { goto node_11111111111111111001; } else { goto node_11111111111111111000; } node_11111111111111111000: if (bits & 0x800) { goto node_111111111111111110001; } else { goto node_111111111111111110000; } node_111111111111111110000: if (bits & 0x400) { *symbol = 181; return 22; } else { *symbol = 178; return 22; } node_111111111111111110001: if (bits & 0x400) { *symbol = 186; return 22; } else { *symbol = 185; return 22; } node_11111111111111111001: if (bits & 0x800) { goto node_111111111111111110011; } else { goto node_111111111111111110010; } node_111111111111111110010: if (bits & 0x400) { *symbol = 189; return 22; } else { *symbol = 187; return 22; } node_111111111111111110011: if (bits & 0x400) { *symbol = 196; return 22; } else { *symbol = 190; return 22; } node_1111111111111111101: if (bits & 0x1000) { goto node_11111111111111111011; } else { goto node_11111111111111111010; } node_11111111111111111010: if (bits & 0x800) { goto node_111111111111111110101; } else { goto node_111111111111111110100; } node_111111111111111110100: if (bits & 0x400) { *symbol = 228; return 22; } else { *symbol = 198; return 22; } node_111111111111111110101: if (bits & 0x400) { *symbol = 233; return 22; } else { *symbol = 232; return 22; } node_11111111111111111011: if (bits & 0x800) { goto node_111111111111111110111; } else { goto node_111111111111111110110; } node_111111111111111110110: if (bits & 0x400) { goto node_1111111111111111101101; } else { goto node_1111111111111111101100; } node_1111111111111111101100: if (bits & 0x200) { *symbol = 135; return 23; } else { *symbol = 1; return 23; } node_1111111111111111101101: if (bits & 0x200) { *symbol = 138; return 23; } else { *symbol = 137; return 23; } node_111111111111111110111: if (bits & 0x400) { goto node_1111111111111111101111; } else { goto node_1111111111111111101110; } node_1111111111111111101110: if (bits & 0x200) { *symbol = 140; return 23; } else { *symbol = 139; return 23; } node_1111111111111111101111: if (bits & 0x200) { *symbol = 143; return 23; } else { *symbol = 141; return 23; } node_111111111111111111: if (bits & 0x2000) { goto node_1111111111111111111; } else { goto node_1111111111111111110; } node_1111111111111111110: if (bits & 0x1000) { goto node_11111111111111111101; } else { goto node_11111111111111111100; } node_11111111111111111100: if (bits & 0x800) { goto node_111111111111111111001; } else { goto node_111111111111111111000; } node_111111111111111111000: if (bits & 0x400) { goto node_1111111111111111110001; } else { goto node_1111111111111111110000; } node_1111111111111111110000: if (bits & 0x200) { *symbol = 149; return 23; } else { *symbol = 147; return 23; } node_1111111111111111110001: if (bits & 0x200) { *symbol = 151; return 23; } else { *symbol = 150; return 23; } node_111111111111111111001: if (bits & 0x400) { goto node_1111111111111111110011; } else { goto node_1111111111111111110010; } node_1111111111111111110010: if (bits & 0x200) { *symbol = 155; return 23; } else { *symbol = 152; return 23; } node_1111111111111111110011: if (bits & 0x200) { *symbol = 158; return 23; } else { *symbol = 157; return 23; } node_11111111111111111101: if (bits & 0x800) { goto node_111111111111111111011; } else { goto node_111111111111111111010; } node_111111111111111111010: if (bits & 0x400) { goto node_1111111111111111110101; } else { goto node_1111111111111111110100; } node_1111111111111111110100: if (bits & 0x200) { *symbol = 166; return 23; } else { *symbol = 165; return 23; } node_1111111111111111110101: if (bits & 0x200) { *symbol = 174; return 23; } else { *symbol = 168; return 23; } node_111111111111111111011: if (bits & 0x400) { goto node_1111111111111111110111; } else { goto node_1111111111111111110110; } node_1111111111111111110110: if (bits & 0x200) { *symbol = 180; return 23; } else { *symbol = 175; return 23; } node_1111111111111111110111: if (bits & 0x200) { *symbol = 183; return 23; } else { *symbol = 182; return 23; } node_1111111111111111111: if (bits & 0x1000) { goto node_11111111111111111111; } else { goto node_11111111111111111110; } node_11111111111111111110: if (bits & 0x800) { goto node_111111111111111111101; } else { goto node_111111111111111111100; } node_111111111111111111100: if (bits & 0x400) { goto node_1111111111111111111001; } else { goto node_1111111111111111111000; } node_1111111111111111111000: if (bits & 0x200) { *symbol = 191; return 23; } else { *symbol = 188; return 23; } node_1111111111111111111001: if (bits & 0x200) { *symbol = 231; return 23; } else { *symbol = 197; return 23; } node_111111111111111111101: if (bits & 0x400) { goto node_1111111111111111111011; } else { goto node_1111111111111111111010; } node_1111111111111111111010: if (bits & 0x200) { goto node_11111111111111111110101; } else { *symbol = 239; return 23; } node_11111111111111111110101: if (bits & 0x100) { *symbol = 142; return 24; } else { *symbol = 9; return 24; } node_1111111111111111111011: if (bits & 0x200) { goto node_11111111111111111110111; } else { goto node_11111111111111111110110; } node_11111111111111111110110: if (bits & 0x100) { *symbol = 145; return 24; } else { *symbol = 144; return 24; } node_11111111111111111110111: if (bits & 0x100) { *symbol = 159; return 24; } else { *symbol = 148; return 24; } node_11111111111111111111: if (bits & 0x800) { goto node_111111111111111111111; } else { goto node_111111111111111111110; } node_111111111111111111110: if (bits & 0x400) { goto node_1111111111111111111101; } else { goto node_1111111111111111111100; } node_1111111111111111111100: if (bits & 0x200) { goto node_11111111111111111111001; } else { goto node_11111111111111111111000; } node_11111111111111111111000: if (bits & 0x100) { *symbol = 206; return 24; } else { *symbol = 171; return 24; } node_11111111111111111111001: if (bits & 0x100) { *symbol = 225; return 24; } else { *symbol = 215; return 24; } node_1111111111111111111101: if (bits & 0x200) { goto node_11111111111111111111011; } else { goto node_11111111111111111111010; } node_11111111111111111111010: if (bits & 0x100) { *symbol = 237; return 24; } else { *symbol = 236; return 24; } node_11111111111111111111011: if (bits & 0x100) { goto node_111111111111111111110111; } else { goto node_111111111111111111110110; } node_111111111111111111110110: if (bits & 0x80) { *symbol = 207; return 25; } else { *symbol = 199; return 25; } node_111111111111111111110111: if (bits & 0x80) { *symbol = 235; return 25; } else { *symbol = 234; return 25; } node_111111111111111111111: if (bits & 0x400) { goto node_1111111111111111111111; } else { goto node_1111111111111111111110; } node_1111111111111111111110: if (bits & 0x200) { goto node_11111111111111111111101; } else { goto node_11111111111111111111100; } node_11111111111111111111100: if (bits & 0x100) { goto node_111111111111111111111001; } else { goto node_111111111111111111111000; } node_111111111111111111111000: if (bits & 0x80) { goto node_1111111111111111111110001; } else { goto node_1111111111111111111110000; } node_1111111111111111111110000: if (bits & 0x40) { *symbol = 193; return 26; } else { *symbol = 192; return 26; } node_1111111111111111111110001: if (bits & 0x40) { *symbol = 201; return 26; } else { *symbol = 200; return 26; } node_111111111111111111111001: if (bits & 0x80) { goto node_1111111111111111111110011; } else { goto node_1111111111111111111110010; } node_1111111111111111111110010: if (bits & 0x40) { *symbol = 205; return 26; } else { *symbol = 202; return 26; } node_1111111111111111111110011: if (bits & 0x40) { *symbol = 213; return 26; } else { *symbol = 210; return 26; } node_11111111111111111111101: if (bits & 0x100) { goto node_111111111111111111111011; } else { goto node_111111111111111111111010; } node_111111111111111111111010: if (bits & 0x80) { goto node_1111111111111111111110101; } else { goto node_1111111111111111111110100; } node_1111111111111111111110100: if (bits & 0x40) { *symbol = 219; return 26; } else { *symbol = 218; return 26; } node_1111111111111111111110101: if (bits & 0x40) { *symbol = 240; return 26; } else { *symbol = 238; return 26; } node_111111111111111111111011: if (bits & 0x80) { goto node_1111111111111111111110111; } else { goto node_1111111111111111111110110; } node_1111111111111111111110110: if (bits & 0x40) { *symbol = 243; return 26; } else { *symbol = 242; return 26; } node_1111111111111111111110111: if (bits & 0x40) { goto node_11111111111111111111101111; } else { *symbol = 255; return 26; } node_11111111111111111111101111: if (bits & 0x20) { *symbol = 204; return 27; } else { *symbol = 203; return 27; } node_1111111111111111111111: if (bits & 0x200) { goto node_11111111111111111111111; } else { goto node_11111111111111111111110; } node_11111111111111111111110: if (bits & 0x100) { goto node_111111111111111111111101; } else { goto node_111111111111111111111100; } node_111111111111111111111100: if (bits & 0x80) { goto node_1111111111111111111111001; } else { goto node_1111111111111111111111000; } node_1111111111111111111111000: if (bits & 0x40) { goto node_11111111111111111111110001; } else { goto node_11111111111111111111110000; } node_11111111111111111111110000: if (bits & 0x20) { *symbol = 212; return 27; } else { *symbol = 211; return 27; } node_11111111111111111111110001: if (bits & 0x20) { *symbol = 221; return 27; } else { *symbol = 214; return 27; } node_1111111111111111111111001: if (bits & 0x40) { goto node_11111111111111111111110011; } else { goto node_11111111111111111111110010; } node_11111111111111111111110010: if (bits & 0x20) { *symbol = 223; return 27; } else { *symbol = 222; return 27; } node_11111111111111111111110011: if (bits & 0x20) { *symbol = 244; return 27; } else { *symbol = 241; return 27; } node_111111111111111111111101: if (bits & 0x80) { goto node_1111111111111111111111011; } else { goto node_1111111111111111111111010; } node_1111111111111111111111010: if (bits & 0x40) { goto node_11111111111111111111110101; } else { goto node_11111111111111111111110100; } node_11111111111111111111110100: if (bits & 0x20) { *symbol = 246; return 27; } else { *symbol = 245; return 27; } node_11111111111111111111110101: if (bits & 0x20) { *symbol = 248; return 27; } else { *symbol = 247; return 27; } node_1111111111111111111111011: if (bits & 0x40) { goto node_11111111111111111111110111; } else { goto node_11111111111111111111110110; } node_11111111111111111111110110: if (bits & 0x20) { *symbol = 251; return 27; } else { *symbol = 250; return 27; } node_11111111111111111111110111: if (bits & 0x20) { *symbol = 253; return 27; } else { *symbol = 252; return 27; } node_11111111111111111111111: if (bits & 0x100) { goto node_111111111111111111111111; } else { goto node_111111111111111111111110; } node_111111111111111111111110: if (bits & 0x80) { goto node_1111111111111111111111101; } else { goto node_1111111111111111111111100; } node_1111111111111111111111100: if (bits & 0x40) { goto node_11111111111111111111111001; } else { goto node_11111111111111111111111000; } node_11111111111111111111111000: if (bits & 0x20) { goto node_111111111111111111111110001; } else { *symbol = 254; return 27; } node_111111111111111111111110001: if (bits & 0x10) { *symbol = 3; return 28; } else { *symbol = 2; return 28; } node_11111111111111111111111001: if (bits & 0x20) { goto node_111111111111111111111110011; } else { goto node_111111111111111111111110010; } node_111111111111111111111110010: if (bits & 0x10) { *symbol = 5; return 28; } else { *symbol = 4; return 28; } node_111111111111111111111110011: if (bits & 0x10) { *symbol = 7; return 28; } else { *symbol = 6; return 28; } node_1111111111111111111111101: if (bits & 0x40) { goto node_11111111111111111111111011; } else { goto node_11111111111111111111111010; } node_11111111111111111111111010: if (bits & 0x20) { goto node_111111111111111111111110101; } else { goto node_111111111111111111111110100; } node_111111111111111111111110100: if (bits & 0x10) { *symbol = 11; return 28; } else { *symbol = 8; return 28; } node_111111111111111111111110101: if (bits & 0x10) { *symbol = 14; return 28; } else { *symbol = 12; return 28; } node_11111111111111111111111011: if (bits & 0x20) { goto node_111111111111111111111110111; } else { goto node_111111111111111111111110110; } node_111111111111111111111110110: if (bits & 0x10) { *symbol = 16; return 28; } else { *symbol = 15; return 28; } node_111111111111111111111110111: if (bits & 0x10) { *symbol = 18; return 28; } else { *symbol = 17; return 28; } node_111111111111111111111111: if (bits & 0x80) { goto node_1111111111111111111111111; } else { goto node_1111111111111111111111110; } node_1111111111111111111111110: if (bits & 0x40) { goto node_11111111111111111111111101; } else { goto node_11111111111111111111111100; } node_11111111111111111111111100: if (bits & 0x20) { goto node_111111111111111111111111001; } else { goto node_111111111111111111111111000; } node_111111111111111111111111000: if (bits & 0x10) { *symbol = 20; return 28; } else { *symbol = 19; return 28; } node_111111111111111111111111001: if (bits & 0x10) { *symbol = 23; return 28; } else { *symbol = 21; return 28; } node_11111111111111111111111101: if (bits & 0x20) { goto node_111111111111111111111111011; } else { goto node_111111111111111111111111010; } node_111111111111111111111111010: if (bits & 0x10) { *symbol = 25; return 28; } else { *symbol = 24; return 28; } node_111111111111111111111111011: if (bits & 0x10) { *symbol = 27; return 28; } else { *symbol = 26; return 28; } node_1111111111111111111111111: if (bits & 0x40) { goto node_11111111111111111111111111; } else { goto node_11111111111111111111111110; } node_11111111111111111111111110: if (bits & 0x20) { goto node_111111111111111111111111101; } else { goto node_111111111111111111111111100; } node_111111111111111111111111100: if (bits & 0x10) { *symbol = 29; return 28; } else { *symbol = 28; return 28; } node_111111111111111111111111101: if (bits & 0x10) { *symbol = 31; return 28; } else { *symbol = 30; return 28; } node_11111111111111111111111111: if (bits & 0x20) { goto node_111111111111111111111111111; } else { goto node_111111111111111111111111110; } node_111111111111111111111111110: if (bits & 0x10) { *symbol = 220; return 28; } else { *symbol = 127; return 28; } node_111111111111111111111111111: if (bits & 0x10) { goto node_1111111111111111111111111111; } else { *symbol = 249; return 28; } node_1111111111111111111111111111: if (bits & 0x8) { goto node_11111111111111111111111111111; } else { goto node_11111111111111111111111111110; } node_11111111111111111111111111110: if (bits & 0x4) { *symbol = 13; return 30; } else { *symbol = 10; return 30; } node_11111111111111111111111111111: if (bits & 0x4) { return 0; /* invalid node */ } else { *symbol = 22; return 30; } } struct aws_huffman_symbol_coder *hpack_get_coder(void) { static struct aws_huffman_symbol_coder coder = { .encode = encode_symbol, .decode = decode_symbol, .userdata = NULL, }; return &coder; } aws-crt-python-0.20.4+dfsg/crt/aws-c-http/source/http.c000066400000000000000000000635141456575232400226430ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #define AWS_DEFINE_ERROR_INFO_HTTP(CODE, STR) [(CODE)-0x0800] = AWS_DEFINE_ERROR_INFO(CODE, STR, "aws-c-http") /* clang-format off */ static struct aws_error_info s_errors[] = { AWS_DEFINE_ERROR_INFO_HTTP( AWS_ERROR_HTTP_UNKNOWN, "Encountered an unknown error."), AWS_DEFINE_ERROR_INFO_HTTP( AWS_ERROR_HTTP_HEADER_NOT_FOUND, "The specified header was not found"), AWS_DEFINE_ERROR_INFO_HTTP( AWS_ERROR_HTTP_INVALID_HEADER_FIELD, "Invalid header field, including a forbidden header field."), AWS_DEFINE_ERROR_INFO_HTTP( AWS_ERROR_HTTP_INVALID_HEADER_NAME, "Invalid header name."), AWS_DEFINE_ERROR_INFO_HTTP( AWS_ERROR_HTTP_INVALID_HEADER_VALUE, "Invalid header value."), AWS_DEFINE_ERROR_INFO_HTTP( AWS_ERROR_HTTP_INVALID_METHOD, "Method is invalid."), AWS_DEFINE_ERROR_INFO_HTTP( AWS_ERROR_HTTP_INVALID_PATH, "Path is invalid."), AWS_DEFINE_ERROR_INFO_HTTP( AWS_ERROR_HTTP_INVALID_STATUS_CODE, "Status code is invalid."), AWS_DEFINE_ERROR_INFO_HTTP( AWS_ERROR_HTTP_MISSING_BODY_STREAM, "Given the provided headers (ex: Content-Length), a body is expected."), AWS_DEFINE_ERROR_INFO_HTTP( AWS_ERROR_HTTP_INVALID_BODY_STREAM, "A body stream provided, but the message does not allow body (ex: response for HEAD Request and 304 response)"), AWS_DEFINE_ERROR_INFO_HTTP( AWS_ERROR_HTTP_CONNECTION_CLOSED, "The connection has closed or is closing."), AWS_DEFINE_ERROR_INFO_HTTP( AWS_ERROR_HTTP_SWITCHED_PROTOCOLS, "The connection has switched protocols."), AWS_DEFINE_ERROR_INFO_HTTP( AWS_ERROR_HTTP_UNSUPPORTED_PROTOCOL, "An unsupported protocol was encountered."), AWS_DEFINE_ERROR_INFO_HTTP( AWS_ERROR_HTTP_REACTION_REQUIRED, "A necessary function was not invoked from a user callback."), AWS_DEFINE_ERROR_INFO_HTTP( AWS_ERROR_HTTP_DATA_NOT_AVAILABLE, "This data is not yet available."), AWS_DEFINE_ERROR_INFO_HTTP( AWS_ERROR_HTTP_OUTGOING_STREAM_LENGTH_INCORRECT, "Amount of data streamed out does not match the previously declared length."), AWS_DEFINE_ERROR_INFO_HTTP( AWS_ERROR_HTTP_CALLBACK_FAILURE, "A callback has reported failure."), AWS_DEFINE_ERROR_INFO_HTTP( AWS_ERROR_HTTP_WEBSOCKET_UPGRADE_FAILURE, "Failed to upgrade HTTP connection to Websocket."), AWS_DEFINE_ERROR_INFO_HTTP( AWS_ERROR_HTTP_WEBSOCKET_CLOSE_FRAME_SENT, "Websocket has sent CLOSE frame, no more data will be sent."), AWS_DEFINE_ERROR_INFO_HTTP( AWS_ERROR_HTTP_WEBSOCKET_IS_MIDCHANNEL_HANDLER, "Operation cannot be performed because websocket has been converted to a midchannel handler."), AWS_DEFINE_ERROR_INFO_HTTP( AWS_ERROR_HTTP_CONNECTION_MANAGER_INVALID_STATE_FOR_ACQUIRE, "Acquire called after the connection manager's ref count has reached zero"), AWS_DEFINE_ERROR_INFO_HTTP( AWS_ERROR_HTTP_CONNECTION_MANAGER_VENDED_CONNECTION_UNDERFLOW, "Release called when the connection manager's vended connection count was zero"), AWS_DEFINE_ERROR_INFO_HTTP( AWS_ERROR_HTTP_SERVER_CLOSED, "The http server is closed, no more connections will be accepted"), AWS_DEFINE_ERROR_INFO_HTTP( AWS_ERROR_HTTP_PROXY_CONNECT_FAILED, "Proxy-based connection establishment failed because the CONNECT call failed"), AWS_DEFINE_ERROR_INFO_HTTP( AWS_ERROR_HTTP_CONNECTION_MANAGER_SHUTTING_DOWN, "Connection acquisition failed because connection manager is shutting down"), AWS_DEFINE_ERROR_INFO_HTTP( AWS_ERROR_HTTP_CHANNEL_THROUGHPUT_FAILURE, "Http connection channel shut down due to failure to meet throughput minimum"), AWS_DEFINE_ERROR_INFO_HTTP( AWS_ERROR_HTTP_PROTOCOL_ERROR, "Protocol rules violated by peer"), AWS_DEFINE_ERROR_INFO_HTTP( AWS_ERROR_HTTP_STREAM_IDS_EXHAUSTED, "Connection exhausted all possible HTTP-stream IDs. Establish a new connection for new streams."), AWS_DEFINE_ERROR_INFO_HTTP( AWS_ERROR_HTTP_GOAWAY_RECEIVED, "Peer sent GOAWAY to initiate connection shutdown. Establish a new connection to retry the HTTP-streams."), AWS_DEFINE_ERROR_INFO_HTTP( AWS_ERROR_HTTP_RST_STREAM_RECEIVED, "Peer sent RST_STREAM to terminate HTTP-stream."), AWS_DEFINE_ERROR_INFO_HTTP( AWS_ERROR_HTTP_RST_STREAM_SENT, "RST_STREAM has sent from local implementation and HTTP-stream has been terminated."), AWS_DEFINE_ERROR_INFO_HTTP( AWS_ERROR_HTTP_STREAM_NOT_ACTIVATED, "HTTP-stream must be activated before use."), AWS_DEFINE_ERROR_INFO_HTTP( AWS_ERROR_HTTP_STREAM_HAS_COMPLETED, "HTTP-stream has completed, action cannot be performed."), AWS_DEFINE_ERROR_INFO_HTTP( AWS_ERROR_HTTP_PROXY_STRATEGY_NTLM_CHALLENGE_TOKEN_MISSING, "NTLM Proxy strategy was initiated without a challenge token"), AWS_DEFINE_ERROR_INFO_HTTP( AWS_ERROR_HTTP_PROXY_STRATEGY_TOKEN_RETRIEVAL_FAILURE, "Failure in user code while retrieving proxy auth token"), AWS_DEFINE_ERROR_INFO_HTTP( AWS_ERROR_HTTP_PROXY_CONNECT_FAILED_RETRYABLE, "Proxy connection attempt failed but the negotiation could be continued on a new connection"), AWS_DEFINE_ERROR_INFO_HTTP( AWS_ERROR_HTTP_PROTOCOL_SWITCH_FAILURE, "Internal state failure prevent connection from switching protocols"), AWS_DEFINE_ERROR_INFO_HTTP( AWS_ERROR_HTTP_MAX_CONCURRENT_STREAMS_EXCEEDED, "Max concurrent stream reached"), AWS_DEFINE_ERROR_INFO_HTTP( AWS_ERROR_HTTP_STREAM_MANAGER_SHUTTING_DOWN, "Stream acquisition failed because stream manager is shutting down"), AWS_DEFINE_ERROR_INFO_HTTP( AWS_ERROR_HTTP_STREAM_MANAGER_CONNECTION_ACQUIRE_FAILURE, "Stream acquisition failed because stream manager failed to acquire a connection"), AWS_DEFINE_ERROR_INFO_HTTP( AWS_ERROR_HTTP_STREAM_MANAGER_UNEXPECTED_HTTP_VERSION, "Stream acquisition failed because stream manager got an unexpected version of HTTP connection"), AWS_DEFINE_ERROR_INFO_HTTP( AWS_ERROR_HTTP_WEBSOCKET_PROTOCOL_ERROR, "Websocket protocol rules violated by peer"), AWS_DEFINE_ERROR_INFO_HTTP( AWS_ERROR_HTTP_MANUAL_WRITE_NOT_ENABLED, "Manual write failed because manual writes are not enabled."), AWS_DEFINE_ERROR_INFO_HTTP( AWS_ERROR_HTTP_MANUAL_WRITE_HAS_COMPLETED, "Manual write failed because manual writes are already completed."), AWS_DEFINE_ERROR_INFO_HTTP( AWS_ERROR_HTTP_RESPONSE_FIRST_BYTE_TIMEOUT, "The server does not begin responding within the configuration after a request is fully sent."), }; /* clang-format on */ static struct aws_error_info_list s_error_list = { .error_list = s_errors, .count = AWS_ARRAY_SIZE(s_errors), }; static struct aws_log_subject_info s_log_subject_infos[] = { DEFINE_LOG_SUBJECT_INFO(AWS_LS_HTTP_GENERAL, "http", "Misc HTTP logging"), DEFINE_LOG_SUBJECT_INFO(AWS_LS_HTTP_CONNECTION, "http-connection", "HTTP client or server connection"), DEFINE_LOG_SUBJECT_INFO(AWS_LS_HTTP_ENCODER, "http-encoder", "HTTP data encoder"), DEFINE_LOG_SUBJECT_INFO(AWS_LS_HTTP_DECODER, "http-decoder", "HTTP data decoder"), DEFINE_LOG_SUBJECT_INFO(AWS_LS_HTTP_SERVER, "http-server", "HTTP server socket listening for incoming connections"), DEFINE_LOG_SUBJECT_INFO(AWS_LS_HTTP_STREAM, "http-stream", "HTTP request-response exchange"), DEFINE_LOG_SUBJECT_INFO(AWS_LS_HTTP_CONNECTION_MANAGER, "connection-manager", "HTTP connection manager"), DEFINE_LOG_SUBJECT_INFO(AWS_LS_HTTP_STREAM_MANAGER, "http2-stream-manager", "HTTP/2 stream manager"), DEFINE_LOG_SUBJECT_INFO(AWS_LS_HTTP_WEBSOCKET, "websocket", "Websocket"), DEFINE_LOG_SUBJECT_INFO(AWS_LS_HTTP_WEBSOCKET_SETUP, "websocket-setup", "Websocket setup"), DEFINE_LOG_SUBJECT_INFO( AWS_LS_HTTP_PROXY_NEGOTIATION, "proxy-negotiation", "Negotiating an http connection with a proxy server"), }; static struct aws_log_subject_info_list s_log_subject_list = { .subject_list = s_log_subject_infos, .count = AWS_ARRAY_SIZE(s_log_subject_infos), }; struct aws_enum_value { struct aws_allocator *allocator; int value; }; static void s_destroy_enum_value(void *value) { struct aws_enum_value *enum_value = value; aws_mem_release(enum_value->allocator, enum_value); } /** * Given array of aws_byte_cursors, init hashtable where... * Key is aws_byte_cursor* (pointing into cursor from array) and comparisons are case-insensitive. * Value is the array index cast to a void*. */ static void s_init_str_to_enum_hash_table( struct aws_hash_table *table, struct aws_allocator *alloc, struct aws_byte_cursor *str_array, int start_index, int end_index, bool ignore_case) { int err = aws_hash_table_init( table, alloc, end_index - start_index, ignore_case ? aws_hash_byte_cursor_ptr_ignore_case : aws_hash_byte_cursor_ptr, (aws_hash_callback_eq_fn *)(ignore_case ? aws_byte_cursor_eq_ignore_case : aws_byte_cursor_eq), NULL, s_destroy_enum_value); AWS_FATAL_ASSERT(!err); for (int i = start_index; i < end_index; ++i) { int was_created = 0; struct aws_enum_value *enum_value = aws_mem_calloc(alloc, 1, sizeof(struct aws_enum_value)); AWS_FATAL_ASSERT(enum_value); enum_value->allocator = alloc; enum_value->value = i; AWS_FATAL_ASSERT(str_array[i].ptr && "Missing enum string"); err = aws_hash_table_put(table, &str_array[i], (void *)enum_value, &was_created); AWS_FATAL_ASSERT(!err && was_created); } } /** * Given key, get value from table initialized by s_init_str_to_enum_hash_table(). * Returns -1 if key not found. */ static int s_find_in_str_to_enum_hash_table(const struct aws_hash_table *table, struct aws_byte_cursor *key) { struct aws_hash_element *elem; aws_hash_table_find(table, key, &elem); if (elem) { struct aws_enum_value *enum_value = elem->value; return enum_value->value; } return -1; } /* METHODS */ static struct aws_hash_table s_method_str_to_enum; /* for string -> enum lookup */ static struct aws_byte_cursor s_method_enum_to_str[AWS_HTTP_METHOD_COUNT]; /* for enum -> string lookup */ static void s_methods_init(struct aws_allocator *alloc) { s_method_enum_to_str[AWS_HTTP_METHOD_GET] = aws_http_method_get; s_method_enum_to_str[AWS_HTTP_METHOD_HEAD] = aws_http_method_head; s_method_enum_to_str[AWS_HTTP_METHOD_CONNECT] = aws_http_method_connect; s_init_str_to_enum_hash_table( &s_method_str_to_enum, alloc, s_method_enum_to_str, AWS_HTTP_METHOD_UNKNOWN + 1, AWS_HTTP_METHOD_COUNT, false /* DO NOT ignore case of method */); } static void s_methods_clean_up(void) { aws_hash_table_clean_up(&s_method_str_to_enum); } enum aws_http_method aws_http_str_to_method(struct aws_byte_cursor cursor) { int method = s_find_in_str_to_enum_hash_table(&s_method_str_to_enum, &cursor); if (method >= 0) { return (enum aws_http_method)method; } return AWS_HTTP_METHOD_UNKNOWN; } /* VERSIONS */ static struct aws_byte_cursor s_version_enum_to_str[AWS_HTTP_HEADER_COUNT]; /* for enum -> string lookup */ static void s_versions_init(struct aws_allocator *alloc) { (void)alloc; s_version_enum_to_str[AWS_HTTP_VERSION_UNKNOWN] = aws_byte_cursor_from_c_str("Unknown"); s_version_enum_to_str[AWS_HTTP_VERSION_1_0] = aws_byte_cursor_from_c_str("HTTP/1.0"); s_version_enum_to_str[AWS_HTTP_VERSION_1_1] = aws_byte_cursor_from_c_str("HTTP/1.1"); s_version_enum_to_str[AWS_HTTP_VERSION_2] = aws_byte_cursor_from_c_str("HTTP/2"); } static void s_versions_clean_up(void) {} struct aws_byte_cursor aws_http_version_to_str(enum aws_http_version version) { if ((int)version < AWS_HTTP_VERSION_UNKNOWN || (int)version >= AWS_HTTP_VERSION_COUNT) { version = AWS_HTTP_VERSION_UNKNOWN; } return s_version_enum_to_str[version]; } /* HEADERS */ static struct aws_hash_table s_header_str_to_enum; /* for case-insensitive string -> enum lookup */ static struct aws_hash_table s_lowercase_header_str_to_enum; /* for case-sensitive string -> enum lookup */ static struct aws_byte_cursor s_header_enum_to_str[AWS_HTTP_HEADER_COUNT]; /* for enum -> string lookup */ static void s_headers_init(struct aws_allocator *alloc) { s_header_enum_to_str[AWS_HTTP_HEADER_METHOD] = aws_byte_cursor_from_c_str(":method"); s_header_enum_to_str[AWS_HTTP_HEADER_SCHEME] = aws_byte_cursor_from_c_str(":scheme"); s_header_enum_to_str[AWS_HTTP_HEADER_AUTHORITY] = aws_byte_cursor_from_c_str(":authority"); s_header_enum_to_str[AWS_HTTP_HEADER_PATH] = aws_byte_cursor_from_c_str(":path"); s_header_enum_to_str[AWS_HTTP_HEADER_STATUS] = aws_byte_cursor_from_c_str(":status"); s_header_enum_to_str[AWS_HTTP_HEADER_COOKIE] = aws_byte_cursor_from_c_str("cookie"); s_header_enum_to_str[AWS_HTTP_HEADER_SET_COOKIE] = aws_byte_cursor_from_c_str("set-cookie"); s_header_enum_to_str[AWS_HTTP_HEADER_HOST] = aws_byte_cursor_from_c_str("host"); s_header_enum_to_str[AWS_HTTP_HEADER_CONNECTION] = aws_byte_cursor_from_c_str("connection"); s_header_enum_to_str[AWS_HTTP_HEADER_CONTENT_LENGTH] = aws_byte_cursor_from_c_str("content-length"); s_header_enum_to_str[AWS_HTTP_HEADER_EXPECT] = aws_byte_cursor_from_c_str("expect"); s_header_enum_to_str[AWS_HTTP_HEADER_TRANSFER_ENCODING] = aws_byte_cursor_from_c_str("transfer-encoding"); s_header_enum_to_str[AWS_HTTP_HEADER_CACHE_CONTROL] = aws_byte_cursor_from_c_str("cache-control"); s_header_enum_to_str[AWS_HTTP_HEADER_MAX_FORWARDS] = aws_byte_cursor_from_c_str("max-forwards"); s_header_enum_to_str[AWS_HTTP_HEADER_PRAGMA] = aws_byte_cursor_from_c_str("pragma"); s_header_enum_to_str[AWS_HTTP_HEADER_RANGE] = aws_byte_cursor_from_c_str("range"); s_header_enum_to_str[AWS_HTTP_HEADER_TE] = aws_byte_cursor_from_c_str("te"); s_header_enum_to_str[AWS_HTTP_HEADER_CONTENT_ENCODING] = aws_byte_cursor_from_c_str("content-encoding"); s_header_enum_to_str[AWS_HTTP_HEADER_CONTENT_TYPE] = aws_byte_cursor_from_c_str("content-type"); s_header_enum_to_str[AWS_HTTP_HEADER_CONTENT_RANGE] = aws_byte_cursor_from_c_str("content-range"); s_header_enum_to_str[AWS_HTTP_HEADER_TRAILER] = aws_byte_cursor_from_c_str("trailer"); s_header_enum_to_str[AWS_HTTP_HEADER_WWW_AUTHENTICATE] = aws_byte_cursor_from_c_str("www-authenticate"); s_header_enum_to_str[AWS_HTTP_HEADER_AUTHORIZATION] = aws_byte_cursor_from_c_str("authorization"); s_header_enum_to_str[AWS_HTTP_HEADER_PROXY_AUTHENTICATE] = aws_byte_cursor_from_c_str("proxy-authenticate"); s_header_enum_to_str[AWS_HTTP_HEADER_PROXY_AUTHORIZATION] = aws_byte_cursor_from_c_str("proxy-authorization"); s_header_enum_to_str[AWS_HTTP_HEADER_AGE] = aws_byte_cursor_from_c_str("age"); s_header_enum_to_str[AWS_HTTP_HEADER_EXPIRES] = aws_byte_cursor_from_c_str("expires"); s_header_enum_to_str[AWS_HTTP_HEADER_DATE] = aws_byte_cursor_from_c_str("date"); s_header_enum_to_str[AWS_HTTP_HEADER_LOCATION] = aws_byte_cursor_from_c_str("location"); s_header_enum_to_str[AWS_HTTP_HEADER_RETRY_AFTER] = aws_byte_cursor_from_c_str("retry-after"); s_header_enum_to_str[AWS_HTTP_HEADER_VARY] = aws_byte_cursor_from_c_str("vary"); s_header_enum_to_str[AWS_HTTP_HEADER_WARNING] = aws_byte_cursor_from_c_str("warning"); s_header_enum_to_str[AWS_HTTP_HEADER_UPGRADE] = aws_byte_cursor_from_c_str("upgrade"); s_header_enum_to_str[AWS_HTTP_HEADER_KEEP_ALIVE] = aws_byte_cursor_from_c_str("keep-alive"); s_header_enum_to_str[AWS_HTTP_HEADER_PROXY_CONNECTION] = aws_byte_cursor_from_c_str("proxy-connection"); s_init_str_to_enum_hash_table( &s_header_str_to_enum, alloc, s_header_enum_to_str, AWS_HTTP_HEADER_UNKNOWN + 1, AWS_HTTP_HEADER_COUNT, true /* ignore case */); s_init_str_to_enum_hash_table( &s_lowercase_header_str_to_enum, alloc, s_header_enum_to_str, AWS_HTTP_HEADER_UNKNOWN + 1, AWS_HTTP_HEADER_COUNT, false /* ignore case */); } static void s_headers_clean_up(void) { aws_hash_table_clean_up(&s_header_str_to_enum); aws_hash_table_clean_up(&s_lowercase_header_str_to_enum); } enum aws_http_header_name aws_http_str_to_header_name(struct aws_byte_cursor cursor) { int header = s_find_in_str_to_enum_hash_table(&s_header_str_to_enum, &cursor); if (header >= 0) { return (enum aws_http_header_name)header; } return AWS_HTTP_HEADER_UNKNOWN; } enum aws_http_header_name aws_http_lowercase_str_to_header_name(struct aws_byte_cursor cursor) { int header = s_find_in_str_to_enum_hash_table(&s_lowercase_header_str_to_enum, &cursor); if (header >= 0) { return (enum aws_http_header_name)header; } return AWS_HTTP_HEADER_UNKNOWN; } /* STATUS */ const char *aws_http_status_text(int status_code) { /** * Data from Internet Assigned Numbers Authority (IANA): * https://www.iana.org/assignments/http-status-codes/http-status-codes.txt */ switch (status_code) { case AWS_HTTP_STATUS_CODE_100_CONTINUE: return "Continue"; case AWS_HTTP_STATUS_CODE_101_SWITCHING_PROTOCOLS: return "Switching Protocols"; case AWS_HTTP_STATUS_CODE_102_PROCESSING: return "Processing"; case AWS_HTTP_STATUS_CODE_103_EARLY_HINTS: return "Early Hints"; case AWS_HTTP_STATUS_CODE_200_OK: return "OK"; case AWS_HTTP_STATUS_CODE_201_CREATED: return "Created"; case AWS_HTTP_STATUS_CODE_202_ACCEPTED: return "Accepted"; case AWS_HTTP_STATUS_CODE_203_NON_AUTHORITATIVE_INFORMATION: return "Non-Authoritative Information"; case AWS_HTTP_STATUS_CODE_204_NO_CONTENT: return "No Content"; case AWS_HTTP_STATUS_CODE_205_RESET_CONTENT: return "Reset Content"; case AWS_HTTP_STATUS_CODE_206_PARTIAL_CONTENT: return "Partial Content"; case AWS_HTTP_STATUS_CODE_207_MULTI_STATUS: return "Multi-Status"; case AWS_HTTP_STATUS_CODE_208_ALREADY_REPORTED: return "Already Reported"; case AWS_HTTP_STATUS_CODE_226_IM_USED: return "IM Used"; case AWS_HTTP_STATUS_CODE_300_MULTIPLE_CHOICES: return "Multiple Choices"; case AWS_HTTP_STATUS_CODE_301_MOVED_PERMANENTLY: return "Moved Permanently"; case AWS_HTTP_STATUS_CODE_302_FOUND: return "Found"; case AWS_HTTP_STATUS_CODE_303_SEE_OTHER: return "See Other"; case AWS_HTTP_STATUS_CODE_304_NOT_MODIFIED: return "Not Modified"; case AWS_HTTP_STATUS_CODE_305_USE_PROXY: return "Use Proxy"; case AWS_HTTP_STATUS_CODE_307_TEMPORARY_REDIRECT: return "Temporary Redirect"; case AWS_HTTP_STATUS_CODE_308_PERMANENT_REDIRECT: return "Permanent Redirect"; case AWS_HTTP_STATUS_CODE_400_BAD_REQUEST: return "Bad Request"; case AWS_HTTP_STATUS_CODE_401_UNAUTHORIZED: return "Unauthorized"; case AWS_HTTP_STATUS_CODE_402_PAYMENT_REQUIRED: return "Payment Required"; case AWS_HTTP_STATUS_CODE_403_FORBIDDEN: return "Forbidden"; case AWS_HTTP_STATUS_CODE_404_NOT_FOUND: return "Not Found"; case AWS_HTTP_STATUS_CODE_405_METHOD_NOT_ALLOWED: return "Method Not Allowed"; case AWS_HTTP_STATUS_CODE_406_NOT_ACCEPTABLE: return "Not Acceptable"; case AWS_HTTP_STATUS_CODE_407_PROXY_AUTHENTICATION_REQUIRED: return "Proxy Authentication Required"; case AWS_HTTP_STATUS_CODE_408_REQUEST_TIMEOUT: return "Request Timeout"; case AWS_HTTP_STATUS_CODE_409_CONFLICT: return "Conflict"; case AWS_HTTP_STATUS_CODE_410_GONE: return "Gone"; case AWS_HTTP_STATUS_CODE_411_LENGTH_REQUIRED: return "Length Required"; case AWS_HTTP_STATUS_CODE_412_PRECONDITION_FAILED: return "Precondition Failed"; case AWS_HTTP_STATUS_CODE_413_REQUEST_ENTITY_TOO_LARGE: return "Payload Too Large"; case AWS_HTTP_STATUS_CODE_414_REQUEST_URI_TOO_LONG: return "URI Too Long"; case AWS_HTTP_STATUS_CODE_415_UNSUPPORTED_MEDIA_TYPE: return "Unsupported Media Type"; case AWS_HTTP_STATUS_CODE_416_REQUESTED_RANGE_NOT_SATISFIABLE: return "Range Not Satisfiable"; case AWS_HTTP_STATUS_CODE_417_EXPECTATION_FAILED: return "Expectation Failed"; case AWS_HTTP_STATUS_CODE_421_MISDIRECTED_REQUEST: return "Misdirected Request"; case AWS_HTTP_STATUS_CODE_422_UNPROCESSABLE_ENTITY: return "Unprocessable Entity"; case AWS_HTTP_STATUS_CODE_423_LOCKED: return "Locked"; case AWS_HTTP_STATUS_CODE_424_FAILED_DEPENDENCY: return "Failed Dependency"; case AWS_HTTP_STATUS_CODE_425_TOO_EARLY: return "Too Early"; case AWS_HTTP_STATUS_CODE_426_UPGRADE_REQUIRED: return "Upgrade Required"; case AWS_HTTP_STATUS_CODE_428_PRECONDITION_REQUIRED: return "Precondition Required"; case AWS_HTTP_STATUS_CODE_429_TOO_MANY_REQUESTS: return "Too Many Requests"; case AWS_HTTP_STATUS_CODE_431_REQUEST_HEADER_FIELDS_TOO_LARGE: return "Request Header Fields Too Large"; case AWS_HTTP_STATUS_CODE_451_UNAVAILABLE_FOR_LEGAL_REASON: return "Unavailable For Legal Reasons"; case AWS_HTTP_STATUS_CODE_500_INTERNAL_SERVER_ERROR: return "Internal Server Error"; case AWS_HTTP_STATUS_CODE_501_NOT_IMPLEMENTED: return "Not Implemented"; case AWS_HTTP_STATUS_CODE_502_BAD_GATEWAY: return "Bad Gateway"; case AWS_HTTP_STATUS_CODE_503_SERVICE_UNAVAILABLE: return "Service Unavailable"; case AWS_HTTP_STATUS_CODE_504_GATEWAY_TIMEOUT: return "Gateway Timeout"; case AWS_HTTP_STATUS_CODE_505_HTTP_VERSION_NOT_SUPPORTED: return "HTTP Version Not Supported"; case AWS_HTTP_STATUS_CODE_506_VARIANT_ALSO_NEGOTIATES: return "Variant Also Negotiates"; case AWS_HTTP_STATUS_CODE_507_INSUFFICIENT_STORAGE: return "Insufficient Storage"; case AWS_HTTP_STATUS_CODE_508_LOOP_DETECTED: return "Loop Detected"; case AWS_HTTP_STATUS_CODE_510_NOT_EXTENDED: return "Not Extended"; case AWS_HTTP_STATUS_CODE_511_NETWORK_AUTHENTICATION_REQUIRED: return "Network Authentication Required"; default: return ""; } } static bool s_library_initialized = false; void aws_http_library_init(struct aws_allocator *alloc) { if (s_library_initialized) { return; } s_library_initialized = true; aws_io_library_init(alloc); aws_compression_library_init(alloc); aws_register_error_info(&s_error_list); aws_register_log_subject_info_list(&s_log_subject_list); s_methods_init(alloc); s_headers_init(alloc); s_versions_init(alloc); aws_hpack_static_table_init(alloc); } void aws_http_library_clean_up(void) { if (!s_library_initialized) { return; } s_library_initialized = false; aws_thread_join_all_managed(); aws_unregister_error_info(&s_error_list); aws_unregister_log_subject_info_list(&s_log_subject_list); s_methods_clean_up(); s_headers_clean_up(); s_versions_clean_up(); aws_hpack_static_table_clean_up(); aws_compression_library_clean_up(); aws_io_library_clean_up(); } void aws_http_fatal_assert_library_initialized(void) { if (!s_library_initialized) { AWS_LOGF_FATAL( AWS_LS_HTTP_GENERAL, "aws_http_library_init() must be called before using any functionality in aws-c-http."); AWS_FATAL_ASSERT(s_library_initialized); } } const struct aws_byte_cursor aws_http_method_get = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("GET"); const struct aws_byte_cursor aws_http_method_head = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("HEAD"); const struct aws_byte_cursor aws_http_method_post = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("POST"); const struct aws_byte_cursor aws_http_method_put = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("PUT"); const struct aws_byte_cursor aws_http_method_delete = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("DELETE"); const struct aws_byte_cursor aws_http_method_connect = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("CONNECT"); const struct aws_byte_cursor aws_http_method_options = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("OPTIONS"); const struct aws_byte_cursor aws_http_header_method = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(":method"); const struct aws_byte_cursor aws_http_header_scheme = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(":scheme"); const struct aws_byte_cursor aws_http_header_authority = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(":authority"); const struct aws_byte_cursor aws_http_header_path = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(":path"); const struct aws_byte_cursor aws_http_header_status = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(":status"); const struct aws_byte_cursor aws_http_scheme_http = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("http"); const struct aws_byte_cursor aws_http_scheme_https = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("https"); aws-crt-python-0.20.4+dfsg/crt/aws-c-http/source/http2_stream_manager.c000066400000000000000000001716321456575232400257730ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef _MSC_VER # pragma warning(disable : 4204) /* non-constant aggregate initializer */ #endif /* Apple toolchains such as xcode and swiftpm define the DEBUG symbol. undef it here so we can actually use the token */ #undef DEBUG #define STREAM_MANAGER_LOGF(level, stream_manager, text, ...) \ AWS_LOGF_##level(AWS_LS_HTTP_STREAM_MANAGER, "id=%p: " text, (void *)(stream_manager), __VA_ARGS__) #define STREAM_MANAGER_LOG(level, stream_manager, text) STREAM_MANAGER_LOGF(level, stream_manager, "%s", text) /* 3 seconds */ static const size_t s_default_ping_timeout_ms = 3000; static void s_stream_manager_start_destroy(struct aws_http2_stream_manager *stream_manager); static void s_aws_http2_stream_manager_build_transaction_synced(struct aws_http2_stream_management_transaction *work); static void s_aws_http2_stream_manager_execute_transaction(struct aws_http2_stream_management_transaction *work); static struct aws_h2_sm_pending_stream_acquisition *s_new_pending_stream_acquisition( struct aws_allocator *allocator, const struct aws_http_make_request_options *options, aws_http2_stream_manager_on_stream_acquired_fn *callback, void *user_data) { struct aws_h2_sm_pending_stream_acquisition *pending_stream_acquisition = aws_mem_calloc(allocator, 1, sizeof(struct aws_h2_sm_pending_stream_acquisition)); /* Copy the options and keep the underlying message alive */ pending_stream_acquisition->options = *options; pending_stream_acquisition->request = options->request; aws_http_message_acquire(pending_stream_acquisition->request); pending_stream_acquisition->callback = callback; pending_stream_acquisition->user_data = user_data; pending_stream_acquisition->allocator = allocator; return pending_stream_acquisition; } static void s_pending_stream_acquisition_destroy( struct aws_h2_sm_pending_stream_acquisition *pending_stream_acquisition) { if (pending_stream_acquisition == NULL) { return; } if (pending_stream_acquisition->request) { aws_http_message_release(pending_stream_acquisition->request); } aws_mem_release(pending_stream_acquisition->allocator, pending_stream_acquisition); } static void s_lock_synced_data(struct aws_http2_stream_manager *stream_manager) { int err = aws_mutex_lock(&stream_manager->synced_data.lock); AWS_ASSERT(!err && "lock failed"); (void)err; } static void s_unlock_synced_data(struct aws_http2_stream_manager *stream_manager) { int err = aws_mutex_unlock(&stream_manager->synced_data.lock); AWS_ASSERT(!err && "unlock failed"); (void)err; } static void s_sm_log_stats_synced(struct aws_http2_stream_manager *stream_manager) { STREAM_MANAGER_LOGF( TRACE, stream_manager, "Stream manager internal counts status: " "connection acquiring=%zu, streams opening=%zu, pending make request count=%zu, pending acquisition count=%zu, " "holding connections count=%zu", stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_CONNECTIONS_ACQUIRING], stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_OPEN_STREAM], stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_PENDING_MAKE_REQUESTS], stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_PENDING_ACQUISITION], stream_manager->synced_data.holding_connections_count); } /* The count acquire and release all needs to be invoked helding the lock */ static void s_sm_count_increase_synced( struct aws_http2_stream_manager *stream_manager, enum aws_sm_count_type count_type, size_t num) { stream_manager->synced_data.internal_refcount_stats[count_type] += num; for (size_t i = 0; i < num; i++) { aws_ref_count_acquire(&stream_manager->internal_ref_count); } } static void s_sm_count_decrease_synced( struct aws_http2_stream_manager *stream_manager, enum aws_sm_count_type count_type, size_t num) { stream_manager->synced_data.internal_refcount_stats[count_type] -= num; for (size_t i = 0; i < num; i++) { aws_ref_count_release(&stream_manager->internal_ref_count); } } static void s_aws_stream_management_transaction_init( struct aws_http2_stream_management_transaction *work, struct aws_http2_stream_manager *stream_manager) { AWS_ZERO_STRUCT(*work); aws_linked_list_init(&work->pending_make_requests); work->stream_manager = stream_manager; work->allocator = stream_manager->allocator; aws_ref_count_acquire(&stream_manager->internal_ref_count); } static void s_aws_stream_management_transaction_clean_up(struct aws_http2_stream_management_transaction *work) { (void)work; AWS_ASSERT(aws_linked_list_empty(&work->pending_make_requests)); aws_ref_count_release(&work->stream_manager->internal_ref_count); } static struct aws_h2_sm_connection *s_get_best_sm_connection_from_set(struct aws_random_access_set *set) { /* Use the best two algorithm */ int errored = AWS_ERROR_SUCCESS; struct aws_h2_sm_connection *sm_connection_a = NULL; errored = aws_random_access_set_random_get_ptr(set, (void **)&sm_connection_a); struct aws_h2_sm_connection *sm_connection_b = NULL; errored |= aws_random_access_set_random_get_ptr(set, (void **)&sm_connection_b); struct aws_h2_sm_connection *chosen_connection = sm_connection_a->num_streams_assigned > sm_connection_b->num_streams_assigned ? sm_connection_b : sm_connection_a; return errored == AWS_ERROR_SUCCESS ? chosen_connection : NULL; (void)errored; } /* helper function for building the transaction: Try to assign connection for a pending stream acquisition */ /* *_synced should only be called with LOCK HELD or from another synced function */ static void s_sm_try_assign_connection_to_pending_stream_acquisition_synced( struct aws_http2_stream_manager *stream_manager, struct aws_h2_sm_pending_stream_acquisition *pending_stream_acquisition) { AWS_ASSERT(pending_stream_acquisition->sm_connection == NULL); int errored = 0; if (aws_random_access_set_get_size(&stream_manager->synced_data.ideal_available_set)) { /** * Try assigning to connection from ideal set */ struct aws_h2_sm_connection *chosen_connection = s_get_best_sm_connection_from_set(&stream_manager->synced_data.ideal_available_set); AWS_ASSERT(chosen_connection); pending_stream_acquisition->sm_connection = chosen_connection; chosen_connection->num_streams_assigned++; STREAM_MANAGER_LOGF( DEBUG, stream_manager, "Picking connection:%p for acquisition:%p. Streams assigned to the connection=%" PRIu32 "", (void *)chosen_connection->connection, (void *)pending_stream_acquisition, chosen_connection->num_streams_assigned); /* Check if connection is still available or ideal, and move it if it's not */ if (chosen_connection->num_streams_assigned >= chosen_connection->max_concurrent_streams) { /* It becomes not available for new streams any more, remove it from the set, but still alive (streams * created will track the lifetime) */ chosen_connection->state = AWS_H2SMCST_FULL; errored |= aws_random_access_set_remove(&stream_manager->synced_data.ideal_available_set, chosen_connection); STREAM_MANAGER_LOGF( DEBUG, stream_manager, "connection:%p reaches max concurrent streams limits. " "Connection max limits=%" PRIu32 ". Moving it out of available connections.", (void *)chosen_connection->connection, chosen_connection->max_concurrent_streams); } else if (chosen_connection->num_streams_assigned >= stream_manager->ideal_concurrent_streams_per_connection) { /* It meets the ideal limit, but still available for new streams, move it to the nonidea-available set */ errored |= aws_random_access_set_remove(&stream_manager->synced_data.ideal_available_set, chosen_connection); bool added = false; errored |= aws_random_access_set_add( &stream_manager->synced_data.nonideal_available_set, chosen_connection, &added); errored |= !added; chosen_connection->state = AWS_H2SMCST_NEARLY_FULL; STREAM_MANAGER_LOGF( DEBUG, stream_manager, "connection:%p reaches ideal concurrent streams limits. Ideal limits=%zu. Moving it to nonlimited set.", (void *)chosen_connection->connection, stream_manager->ideal_concurrent_streams_per_connection); } } else if (stream_manager->synced_data.holding_connections_count == stream_manager->max_connections) { /** * Try assigning to connection from nonideal available set. * * Note that we do not assign to nonideal connections until we're holding all the connections we can ever * possibly get. This way, we don't overfill the first connections we get our hands on. */ if (aws_random_access_set_get_size(&stream_manager->synced_data.nonideal_available_set)) { struct aws_h2_sm_connection *chosen_connection = s_get_best_sm_connection_from_set(&stream_manager->synced_data.nonideal_available_set); AWS_ASSERT(chosen_connection); pending_stream_acquisition->sm_connection = chosen_connection; chosen_connection->num_streams_assigned++; STREAM_MANAGER_LOGF( DEBUG, stream_manager, "Picking connection:%p for acquisition:%p. Streams assigned to the connection=%" PRIu32 "", (void *)chosen_connection->connection, (void *)pending_stream_acquisition, chosen_connection->num_streams_assigned); if (chosen_connection->num_streams_assigned >= chosen_connection->max_concurrent_streams) { /* It becomes not available for new streams any more, remove it from the set, but still alive (streams * created will track the lifetime) */ chosen_connection->state = AWS_H2SMCST_FULL; errored |= aws_random_access_set_remove( &stream_manager->synced_data.nonideal_available_set, chosen_connection); STREAM_MANAGER_LOGF( DEBUG, stream_manager, "connection %p reaches max concurrent streams limits. " "Connection max limits=%" PRIu32 ". Moving it out of available connections.", (void *)chosen_connection->connection, chosen_connection->max_concurrent_streams); } } } AWS_ASSERT(errored == 0 && "random access set went wrong"); (void)errored; } /* NOTE: never invoke with lock held */ static void s_finish_pending_stream_acquisitions_list_helper( struct aws_http2_stream_manager *stream_manager, struct aws_linked_list *pending_stream_acquisitions, int error_code) { while (!aws_linked_list_empty(pending_stream_acquisitions)) { struct aws_linked_list_node *node = aws_linked_list_pop_front(pending_stream_acquisitions); struct aws_h2_sm_pending_stream_acquisition *pending_stream_acquisition = AWS_CONTAINER_OF(node, struct aws_h2_sm_pending_stream_acquisition, node); /* Make sure no connection assigned. */ AWS_ASSERT(pending_stream_acquisition->sm_connection == NULL); if (pending_stream_acquisition->callback) { pending_stream_acquisition->callback(NULL, error_code, pending_stream_acquisition->user_data); } STREAM_MANAGER_LOGF( DEBUG, stream_manager, "acquisition:%p failed with error: %d(%s)", (void *)pending_stream_acquisition, error_code, aws_error_str(error_code)); s_pending_stream_acquisition_destroy(pending_stream_acquisition); } } /* This is scheduled to run on a separate event loop to finish pending acquisition asynchronously */ static void s_finish_pending_stream_acquisitions_task(struct aws_task *task, void *arg, enum aws_task_status status) { (void)status; struct aws_http2_stream_manager *stream_manager = arg; STREAM_MANAGER_LOG(TRACE, stream_manager, "Stream Manager final task runs"); struct aws_http2_stream_management_transaction work; struct aws_linked_list pending_stream_acquisitions; aws_linked_list_init(&pending_stream_acquisitions); s_aws_stream_management_transaction_init(&work, stream_manager); { /* BEGIN CRITICAL SECTION */ s_lock_synced_data(stream_manager); AWS_ASSERT(stream_manager->synced_data.state == AWS_H2SMST_DESTROYING); /* swap list to avoid callback with lock held. */ aws_linked_list_swap_contents( &pending_stream_acquisitions, &stream_manager->synced_data.pending_stream_acquisitions); /* After the callbacks invoked, now we can update the count */ s_sm_count_decrease_synced( stream_manager, AWS_SMCT_PENDING_ACQUISITION, stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_PENDING_ACQUISITION]); s_aws_http2_stream_manager_build_transaction_synced(&work); s_unlock_synced_data(stream_manager); } /* END CRITICAL SECTION */ s_finish_pending_stream_acquisitions_list_helper( stream_manager, &pending_stream_acquisitions, AWS_ERROR_HTTP_STREAM_MANAGER_SHUTTING_DOWN); aws_mem_release(stream_manager->allocator, task); s_aws_http2_stream_manager_execute_transaction(&work); } /* helper function for building the transaction: how many new connections we should request */ static void s_check_new_connections_needed_synced(struct aws_http2_stream_management_transaction *work) { struct aws_http2_stream_manager *stream_manager = work->stream_manager; /* The ideal new connection we need to fit all the pending stream acquisitions */ size_t ideal_new_connection_count = stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_PENDING_ACQUISITION] / stream_manager->ideal_concurrent_streams_per_connection; /* Rounding up */ if (stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_PENDING_ACQUISITION] % stream_manager->ideal_concurrent_streams_per_connection) { ++ideal_new_connection_count; } /* The ideal new connections sub the number of connections we are acquiring to avoid the async acquiring */ work->new_connections = aws_sub_size_saturating( ideal_new_connection_count, stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_CONNECTIONS_ACQUIRING]); /* The real number we can have is the min of how many more we can still have and how many we need */ size_t new_connections_available = stream_manager->max_connections - stream_manager->synced_data.holding_connections_count - stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_CONNECTIONS_ACQUIRING]; work->new_connections = aws_min_size(new_connections_available, work->new_connections); /* Update the number of connections we acquiring */ s_sm_count_increase_synced(stream_manager, AWS_SMCT_CONNECTIONS_ACQUIRING, work->new_connections); STREAM_MANAGER_LOGF( DEBUG, stream_manager, "number of acquisition that waiting for connections to use=%zu. connection acquiring=%zu, connection held=%zu, " "max connection=%zu", stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_PENDING_ACQUISITION], stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_CONNECTIONS_ACQUIRING], stream_manager->synced_data.holding_connections_count, stream_manager->max_connections); } /** * It can be invoked from: * - User release last refcount of stream manager * - User acquires stream from stream manager * - Connection acquired callback from connection manager * - Stream completed callback from HTTP */ /* *_synced should only be called with LOCK HELD or from another synced function */ static void s_aws_http2_stream_manager_build_transaction_synced(struct aws_http2_stream_management_transaction *work) { struct aws_http2_stream_manager *stream_manager = work->stream_manager; if (stream_manager->synced_data.state == AWS_H2SMST_READY) { /* Steps 1: Pending acquisitions of stream */ while (!aws_linked_list_empty(&stream_manager->synced_data.pending_stream_acquisitions)) { struct aws_linked_list_node *node = aws_linked_list_pop_front(&stream_manager->synced_data.pending_stream_acquisitions); struct aws_h2_sm_pending_stream_acquisition *pending_stream_acquisition = AWS_CONTAINER_OF(node, struct aws_h2_sm_pending_stream_acquisition, node); s_sm_try_assign_connection_to_pending_stream_acquisition_synced(stream_manager, pending_stream_acquisition); if (pending_stream_acquisition->sm_connection == NULL) { /* Cannot find any connection, push it back to the front and break the loop */ aws_linked_list_push_front(&stream_manager->synced_data.pending_stream_acquisitions, node); STREAM_MANAGER_LOGF( DEBUG, stream_manager, "acquisition:%p cannot find any connection to use.", (void *)pending_stream_acquisition); break; } else { /* found connection for the request. Move it to pending make requests and update the count */ aws_linked_list_push_back(&work->pending_make_requests, node); s_sm_count_decrease_synced(stream_manager, AWS_SMCT_PENDING_ACQUISITION, 1); s_sm_count_increase_synced(stream_manager, AWS_SMCT_PENDING_MAKE_REQUESTS, 1); } } /* Step 2: Check for new connections needed */ if (stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_PENDING_ACQUISITION]) { s_check_new_connections_needed_synced(work); } } else { /* Stream manager is shutting down */ if (stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_PENDING_ACQUISITION] && !stream_manager->synced_data.finish_pending_stream_acquisitions_task_scheduled) { /* schedule a task to finish the pending acquisitions if there doesn't have one and needed */ stream_manager->finish_pending_stream_acquisitions_task_event_loop = aws_event_loop_group_get_next_loop(stream_manager->bootstrap->event_loop_group); struct aws_task *finish_pending_stream_acquisitions_task = aws_mem_calloc(stream_manager->allocator, 1, sizeof(struct aws_task)); aws_task_init( finish_pending_stream_acquisitions_task, s_finish_pending_stream_acquisitions_task, stream_manager, "sm_finish_pending_stream_acquisitions"); aws_event_loop_schedule_task_now( stream_manager->finish_pending_stream_acquisitions_task_event_loop, finish_pending_stream_acquisitions_task); stream_manager->synced_data.finish_pending_stream_acquisitions_task_scheduled = true; } } s_sm_log_stats_synced(stream_manager); } static void s_on_ping_complete( struct aws_http_connection *http2_connection, uint64_t round_trip_time_ns, int error_code, void *user_data) { (void)http2_connection; struct aws_h2_sm_connection *sm_connection = user_data; if (error_code) { goto done; } if (!sm_connection->connection) { goto done; } AWS_ASSERT(aws_channel_thread_is_callers_thread(aws_http_connection_get_channel(sm_connection->connection))); STREAM_MANAGER_LOGF( TRACE, sm_connection->stream_manager, "PING ACK received for connection: %p. Round trip time in ns is: %" PRIu64 ".", (void *)sm_connection->connection, round_trip_time_ns); sm_connection->thread_data.ping_received = true; done: /* Release refcount held for ping complete */ aws_ref_count_release(&sm_connection->ref_count); } static void s_connection_ping_timeout_task(struct aws_channel_task *task, void *arg, enum aws_task_status status) { (void)task; (void)status; struct aws_h2_sm_connection *sm_connection = arg; if (status != AWS_TASK_STATUS_RUN_READY) { goto done; } if (!sm_connection->connection) { /* The connection has been released before timeout happens, just release the refcount */ goto done; } AWS_ASSERT(aws_channel_thread_is_callers_thread(aws_http_connection_get_channel(sm_connection->connection))); if (!sm_connection->thread_data.ping_received) { /* Timeout happened */ STREAM_MANAGER_LOGF( ERROR, sm_connection->stream_manager, "ping timeout detected for connection: %p, closing connection.", (void *)sm_connection->connection); aws_http_connection_close(sm_connection->connection); } else { struct aws_channel *channel = aws_http_connection_get_channel(sm_connection->connection); /* acquire a refcount for next set of tasks to run */ aws_ref_count_acquire(&sm_connection->ref_count); aws_channel_schedule_task_future( channel, &sm_connection->ping_task, sm_connection->thread_data.next_ping_task_time); } done: /* Release refcount for current set of tasks */ aws_ref_count_release(&sm_connection->ref_count); } static void s_connection_ping_task(struct aws_channel_task *task, void *arg, enum aws_task_status status) { (void)task; (void)status; struct aws_h2_sm_connection *sm_connection = arg; if (status != AWS_TASK_STATUS_RUN_READY) { aws_ref_count_release(&sm_connection->ref_count); return; } if (!sm_connection->connection) { /* The connection has been released before ping task, just release the refcount */ aws_ref_count_release(&sm_connection->ref_count); return; } AWS_ASSERT(aws_channel_thread_is_callers_thread(aws_http_connection_get_channel(sm_connection->connection))); STREAM_MANAGER_LOGF( TRACE, sm_connection->stream_manager, "Sending PING for connection: %p.", (void *)sm_connection->connection); aws_http2_connection_ping(sm_connection->connection, NULL, s_on_ping_complete, sm_connection); /* Acquire refcount for PING complete to be invoked. */ aws_ref_count_acquire(&sm_connection->ref_count); sm_connection->thread_data.ping_received = false; /* schedule timeout task */ struct aws_channel *channel = aws_http_connection_get_channel(sm_connection->connection); uint64_t current_time = 0; aws_channel_current_clock_time(channel, ¤t_time); sm_connection->thread_data.next_ping_task_time = current_time + sm_connection->stream_manager->connection_ping_period_ns; uint64_t timeout_time = current_time + sm_connection->stream_manager->connection_ping_timeout_ns; aws_channel_task_init( &sm_connection->ping_timeout_task, s_connection_ping_timeout_task, sm_connection, "Stream manager connection ping timeout task"); /* keep the refcount for timeout task to run */ aws_channel_schedule_task_future(channel, &sm_connection->ping_timeout_task, timeout_time); } static void s_sm_connection_destroy(void *user_data) { struct aws_h2_sm_connection *sm_connection = user_data; aws_mem_release(sm_connection->allocator, sm_connection); } static struct aws_h2_sm_connection *s_sm_connection_new( struct aws_http2_stream_manager *stream_manager, struct aws_http_connection *connection) { struct aws_h2_sm_connection *sm_connection = aws_mem_calloc(stream_manager->allocator, 1, sizeof(struct aws_h2_sm_connection)); sm_connection->allocator = stream_manager->allocator; /* Max concurrent stream reached, we need to update the max for the sm_connection */ struct aws_http2_setting out_settings[AWS_HTTP2_SETTINGS_COUNT]; /* The setting id equals to the index plus one. */ aws_http2_connection_get_remote_settings(connection, out_settings); uint32_t remote_max_con_streams = out_settings[AWS_HTTP2_SETTINGS_MAX_CONCURRENT_STREAMS - 1].value; sm_connection->max_concurrent_streams = aws_min_u32((uint32_t)stream_manager->max_concurrent_streams_per_connection, remote_max_con_streams); sm_connection->connection = connection; sm_connection->stream_manager = stream_manager; sm_connection->state = AWS_H2SMCST_IDEAL; aws_ref_count_init(&sm_connection->ref_count, sm_connection, s_sm_connection_destroy); if (stream_manager->connection_ping_period_ns) { struct aws_channel *channel = aws_http_connection_get_channel(connection); uint64_t schedule_time = 0; aws_channel_current_clock_time(channel, &schedule_time); schedule_time += stream_manager->connection_ping_period_ns; aws_channel_task_init( &sm_connection->ping_task, s_connection_ping_task, sm_connection, "Stream manager connection ping task"); /* Keep a refcount on sm_connection for the task to run. */ aws_ref_count_acquire(&sm_connection->ref_count); aws_channel_schedule_task_future(channel, &sm_connection->ping_task, schedule_time); } return sm_connection; } static void s_sm_connection_release_connection(struct aws_h2_sm_connection *sm_connection) { AWS_ASSERT(sm_connection->num_streams_assigned == 0); if (sm_connection->connection) { /* Should only be invoked from the connection thread. */ AWS_ASSERT(aws_channel_thread_is_callers_thread(aws_http_connection_get_channel(sm_connection->connection))); int error = aws_http_connection_manager_release_connection( sm_connection->stream_manager->connection_manager, sm_connection->connection); AWS_ASSERT(!error); (void)error; sm_connection->connection = NULL; } aws_ref_count_release(&sm_connection->ref_count); } static void s_sm_on_connection_acquired_failed_synced( struct aws_http2_stream_manager *stream_manager, struct aws_linked_list *stream_acquisitions_to_fail) { /* Once we failed to acquire a connection, we fail the stream acquisitions that cannot fit into the remaining * acquiring connections. */ size_t num_can_fit = aws_mul_size_saturating( stream_manager->ideal_concurrent_streams_per_connection, stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_CONNECTIONS_ACQUIRING]); size_t num_to_fail = aws_sub_size_saturating( stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_PENDING_ACQUISITION], num_can_fit); /* Get a list to fail instead of fail them with in the lock. */ for (size_t i = 0; i < num_to_fail; i++) { struct aws_linked_list_node *node = aws_linked_list_pop_front(&stream_manager->synced_data.pending_stream_acquisitions); aws_linked_list_push_back(stream_acquisitions_to_fail, node); } s_sm_count_decrease_synced(stream_manager, AWS_SMCT_PENDING_ACQUISITION, num_to_fail); } static void s_sm_on_connection_acquired(struct aws_http_connection *connection, int error_code, void *user_data) { struct aws_http2_stream_manager *stream_manager = user_data; struct aws_http2_stream_management_transaction work; STREAM_MANAGER_LOGF(TRACE, stream_manager, "connection=%p acquired from connection manager", (void *)connection); int re_error = 0; int stream_fail_error_code = AWS_ERROR_SUCCESS; bool should_release_connection = false; struct aws_linked_list stream_acquisitions_to_fail; aws_linked_list_init(&stream_acquisitions_to_fail); s_aws_stream_management_transaction_init(&work, stream_manager); { /* BEGIN CRITICAL SECTION */ s_lock_synced_data(stream_manager); s_sm_count_decrease_synced(stream_manager, AWS_SMCT_CONNECTIONS_ACQUIRING, 1); if (error_code || !connection) { STREAM_MANAGER_LOGF( ERROR, stream_manager, "connection acquired from connection manager failed, with error: %d(%s)", error_code, aws_error_str(error_code)); s_sm_on_connection_acquired_failed_synced(stream_manager, &stream_acquisitions_to_fail); stream_fail_error_code = AWS_ERROR_HTTP_STREAM_MANAGER_CONNECTION_ACQUIRE_FAILURE; } else if (aws_http_connection_get_version(connection) != AWS_HTTP_VERSION_2) { STREAM_MANAGER_LOGF( ERROR, stream_manager, "Unexpected HTTP version acquired, release the connection=%p acquired immediately", (void *)connection); should_release_connection = true; s_sm_on_connection_acquired_failed_synced(stream_manager, &stream_acquisitions_to_fail); stream_fail_error_code = AWS_ERROR_HTTP_STREAM_MANAGER_UNEXPECTED_HTTP_VERSION; } else if (stream_manager->synced_data.state != AWS_H2SMST_READY) { STREAM_MANAGER_LOGF( DEBUG, stream_manager, "shutting down, release the connection=%p acquired immediately", (void *)connection); /* Release the acquired connection */ should_release_connection = true; } else if (stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_PENDING_ACQUISITION] == 0) { STREAM_MANAGER_LOGF( DEBUG, stream_manager, "No pending acquisition, release the connection=%p acquired immediately", (void *)connection); /* Release the acquired connection */ should_release_connection = true; } else { struct aws_h2_sm_connection *sm_connection = s_sm_connection_new(stream_manager, connection); bool added = false; re_error |= aws_random_access_set_add(&stream_manager->synced_data.ideal_available_set, sm_connection, &added); re_error |= !added; ++stream_manager->synced_data.holding_connections_count; } s_aws_http2_stream_manager_build_transaction_synced(&work); s_unlock_synced_data(stream_manager); } /* END CRITICAL SECTION */ if (should_release_connection) { STREAM_MANAGER_LOGF(DEBUG, stream_manager, "Releasing connection: %p", (void *)connection); re_error |= aws_http_connection_manager_release_connection(stream_manager->connection_manager, connection); } AWS_ASSERT(!re_error && "connection acquired callback fails with programming errors"); (void)re_error; /* Fail acquisitions if any */ s_finish_pending_stream_acquisitions_list_helper( stream_manager, &stream_acquisitions_to_fail, stream_fail_error_code); s_aws_http2_stream_manager_execute_transaction(&work); } static int s_on_incoming_headers( struct aws_http_stream *stream, enum aws_http_header_block header_block, const struct aws_http_header *header_array, size_t num_headers, void *user_data) { struct aws_h2_sm_pending_stream_acquisition *pending_stream_acquisition = user_data; struct aws_h2_sm_connection *sm_connection = pending_stream_acquisition->sm_connection; struct aws_http2_stream_manager *stream_manager = sm_connection->stream_manager; if (pending_stream_acquisition->options.on_response_headers) { return pending_stream_acquisition->options.on_response_headers( stream, header_block, header_array, num_headers, pending_stream_acquisition->options.user_data); } if (stream_manager->close_connection_on_server_error) { /* Check status code if stream completed successfully. */ int status_code = 0; aws_http_stream_get_incoming_response_status(stream, &status_code); AWS_ASSERT(status_code != 0); /* The get status should not fail */ switch (status_code) { case AWS_HTTP_STATUS_CODE_500_INTERNAL_SERVER_ERROR: case AWS_HTTP_STATUS_CODE_502_BAD_GATEWAY: case AWS_HTTP_STATUS_CODE_503_SERVICE_UNAVAILABLE: case AWS_HTTP_STATUS_CODE_504_GATEWAY_TIMEOUT: /* For those error code if the retry happens, it should not use the same connection. */ if (!sm_connection->thread_data.stopped_new_requests) { STREAM_MANAGER_LOGF( DEBUG, stream_manager, "no longer using connection: %p due to receiving %d server error status code for stream: %p", (void *)sm_connection->connection, status_code, (void *)stream); aws_http_connection_stop_new_requests(sm_connection->connection); sm_connection->thread_data.stopped_new_requests = true; } break; default: break; } } return AWS_OP_SUCCESS; } static int s_on_incoming_header_block_done( struct aws_http_stream *stream, enum aws_http_header_block header_block, void *user_data) { struct aws_h2_sm_pending_stream_acquisition *pending_stream_acquisition = user_data; if (pending_stream_acquisition->options.on_response_header_block_done) { return pending_stream_acquisition->options.on_response_header_block_done( stream, header_block, pending_stream_acquisition->options.user_data); } return AWS_OP_SUCCESS; } static int s_on_incoming_body(struct aws_http_stream *stream, const struct aws_byte_cursor *data, void *user_data) { struct aws_h2_sm_pending_stream_acquisition *pending_stream_acquisition = user_data; if (pending_stream_acquisition->options.on_response_body) { return pending_stream_acquisition->options.on_response_body( stream, data, pending_stream_acquisition->options.user_data); } return AWS_OP_SUCCESS; } /* Helper invoked when underlying connections is still available and the num stream assigned has been updated */ static void s_update_sm_connection_set_on_stream_finishes_synced( struct aws_h2_sm_connection *sm_connection, struct aws_http2_stream_manager *stream_manager) { int re_error = 0; size_t cur_num = sm_connection->num_streams_assigned; size_t ideal_num = stream_manager->ideal_concurrent_streams_per_connection; size_t max_num = sm_connection->max_concurrent_streams; /** * TODO: When the MAX_CONCURRENT_STREAMS from other side changed after the initial settings. We need to: * - figure out where I am * - figure out where I should be * - if they're different, remove from where I am, put where should be */ if (sm_connection->state == AWS_H2SMCST_NEARLY_FULL && cur_num < ideal_num) { /* this connection is back from soft limited to ideal */ bool exist = false; (void)exist; AWS_ASSERT( aws_random_access_set_exist(&stream_manager->synced_data.nonideal_available_set, sm_connection, &exist) == AWS_OP_SUCCESS && exist); re_error |= aws_random_access_set_remove(&stream_manager->synced_data.nonideal_available_set, sm_connection); bool added = false; re_error |= aws_random_access_set_add(&stream_manager->synced_data.ideal_available_set, sm_connection, &added); re_error |= !added; sm_connection->state = AWS_H2SMCST_IDEAL; } else if (sm_connection->state == AWS_H2SMCST_FULL && cur_num < max_num) { /* this connection is back from full */ STREAM_MANAGER_LOGF( DEBUG, stream_manager, "connection:%p back to available, assigned stream=%zu, max concurrent streams=%" PRIu32 "", (void *)sm_connection->connection, cur_num, sm_connection->max_concurrent_streams); bool added = false; if (cur_num >= ideal_num) { sm_connection->state = AWS_H2SMCST_NEARLY_FULL; STREAM_MANAGER_LOGF( TRACE, stream_manager, "connection:%p added to soft limited set", (void *)sm_connection->connection); re_error |= aws_random_access_set_add(&stream_manager->synced_data.nonideal_available_set, sm_connection, &added); } else { sm_connection->state = AWS_H2SMCST_IDEAL; STREAM_MANAGER_LOGF( TRACE, stream_manager, "connection:%p added to ideal set", (void *)sm_connection->connection); re_error |= aws_random_access_set_add(&stream_manager->synced_data.ideal_available_set, sm_connection, &added); } re_error |= !added; } AWS_ASSERT(re_error == AWS_OP_SUCCESS); (void)re_error; } static void s_sm_connection_on_scheduled_stream_finishes( struct aws_h2_sm_connection *sm_connection, struct aws_http2_stream_manager *stream_manager) { /* Reach the max current will still allow new requests, but the new stream will complete with error */ bool connection_available = aws_http_connection_new_requests_allowed(sm_connection->connection); struct aws_http2_stream_management_transaction work; s_aws_stream_management_transaction_init(&work, stream_manager); { /* BEGIN CRITICAL SECTION */ s_lock_synced_data(stream_manager); s_sm_count_decrease_synced(stream_manager, AWS_SMCT_OPEN_STREAM, 1); --sm_connection->num_streams_assigned; if (!connection_available) { /* It might be removed already, but, it's fine */ aws_random_access_set_remove(&stream_manager->synced_data.ideal_available_set, sm_connection); aws_random_access_set_remove(&stream_manager->synced_data.nonideal_available_set, sm_connection); } else { s_update_sm_connection_set_on_stream_finishes_synced(sm_connection, stream_manager); } s_aws_http2_stream_manager_build_transaction_synced(&work); /* After we build transaction, if the sm_connection still have zero assigned stream, we can kill the * sm_connection */ if (sm_connection->num_streams_assigned == 0) { /* It might be removed already, but, it's fine */ aws_random_access_set_remove(&stream_manager->synced_data.ideal_available_set, sm_connection); work.sm_connection_to_release = sm_connection; --stream_manager->synced_data.holding_connections_count; /* After we release one connection back, we should check if we need more connections */ if (stream_manager->synced_data.state == AWS_H2SMST_READY && stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_PENDING_ACQUISITION]) { s_check_new_connections_needed_synced(&work); } } s_unlock_synced_data(stream_manager); } /* END CRITICAL SECTION */ s_aws_http2_stream_manager_execute_transaction(&work); } static void s_on_stream_complete(struct aws_http_stream *stream, int error_code, void *user_data) { struct aws_h2_sm_pending_stream_acquisition *pending_stream_acquisition = user_data; struct aws_h2_sm_connection *sm_connection = pending_stream_acquisition->sm_connection; struct aws_http2_stream_manager *stream_manager = sm_connection->stream_manager; if (pending_stream_acquisition->options.on_complete) { pending_stream_acquisition->options.on_complete( stream, error_code, pending_stream_acquisition->options.user_data); } s_sm_connection_on_scheduled_stream_finishes(sm_connection, stream_manager); } static void s_on_stream_destroy(void *user_data) { struct aws_h2_sm_pending_stream_acquisition *pending_stream_acquisition = user_data; if (pending_stream_acquisition->options.on_destroy) { pending_stream_acquisition->options.on_destroy(pending_stream_acquisition->options.user_data); } s_pending_stream_acquisition_destroy(pending_stream_acquisition); } /* Scheduled to happen from connection's thread */ static void s_make_request_task(struct aws_channel_task *task, void *arg, enum aws_task_status status) { (void)task; struct aws_h2_sm_pending_stream_acquisition *pending_stream_acquisition = arg; struct aws_h2_sm_connection *sm_connection = pending_stream_acquisition->sm_connection; struct aws_http2_stream_manager *stream_manager = sm_connection->stream_manager; int error_code = AWS_ERROR_SUCCESS; STREAM_MANAGER_LOGF( TRACE, stream_manager, "Make request task running for acquisition:%p from connection:%p thread", (void *)pending_stream_acquisition, (void *)sm_connection->connection); bool is_shutting_down = false; { /* BEGIN CRITICAL SECTION */ s_lock_synced_data(stream_manager); is_shutting_down = stream_manager->synced_data.state != AWS_H2SMST_READY; s_sm_count_decrease_synced(stream_manager, AWS_SMCT_PENDING_MAKE_REQUESTS, 1); /* The stream has not open yet, but we increase the count here, if anything fails, the count will be decreased */ s_sm_count_increase_synced(stream_manager, AWS_SMCT_OPEN_STREAM, 1); AWS_ASSERT( sm_connection->max_concurrent_streams >= sm_connection->num_streams_assigned && "The max concurrent streams exceed"); s_unlock_synced_data(stream_manager); } /* END CRITICAL SECTION */ /* this is a channel task. If it is canceled, that means the channel shutdown. In that case, that's equivalent * to a closed connection. */ if (status != AWS_TASK_STATUS_RUN_READY) { STREAM_MANAGER_LOGF( ERROR, stream_manager, "acquisition:%p failed as the task is cancelled.", (void *)pending_stream_acquisition); error_code = AWS_ERROR_HTTP_CONNECTION_CLOSED; goto error; } if (is_shutting_down) { STREAM_MANAGER_LOGF( ERROR, stream_manager, "acquisition:%p failed as stream manager is shutting down before task runs.", (void *)pending_stream_acquisition); error_code = AWS_ERROR_HTTP_STREAM_MANAGER_SHUTTING_DOWN; goto error; } struct aws_http_make_request_options request_options = { .self_size = sizeof(request_options), .request = pending_stream_acquisition->request, .on_response_headers = s_on_incoming_headers, .on_response_header_block_done = s_on_incoming_header_block_done, .on_response_body = s_on_incoming_body, .on_complete = s_on_stream_complete, .on_destroy = s_on_stream_destroy, .user_data = pending_stream_acquisition, .http2_use_manual_data_writes = pending_stream_acquisition->options.http2_use_manual_data_writes, }; /* TODO: we could put the pending acquisition back to the list if the connection is not available for new request. */ struct aws_http_stream *stream = aws_http_connection_make_request(sm_connection->connection, &request_options); if (!stream) { error_code = aws_last_error(); STREAM_MANAGER_LOGF( ERROR, stream_manager, "acquisition:%p failed as HTTP level make request failed with error: %d(%s).", (void *)pending_stream_acquisition, error_code, aws_error_str(error_code)); goto error; } /* Since we're in the connection's thread, this should be safe, there won't be any other callbacks to the user */ if (aws_http_stream_activate(stream)) { /* Activate failed, the on_completed callback will NOT be invoked from HTTP, but we already told user about * the stream. Invoke the user completed callback here */ error_code = aws_last_error(); STREAM_MANAGER_LOGF( ERROR, stream_manager, "acquisition:%p failed as stream activate failed with error: %d(%s).", (void *)pending_stream_acquisition, error_code, aws_error_str(error_code)); goto error; } if (pending_stream_acquisition->callback) { pending_stream_acquisition->callback(stream, 0, pending_stream_acquisition->user_data); } /* Happy case, the complete callback will be invoked, and we clean things up at the callback, but we can release the * request now */ aws_http_message_release(pending_stream_acquisition->request); pending_stream_acquisition->request = NULL; return; error: if (pending_stream_acquisition->callback) { pending_stream_acquisition->callback(NULL, error_code, pending_stream_acquisition->user_data); } s_pending_stream_acquisition_destroy(pending_stream_acquisition); /* task should happen after destroy, as the task can trigger the whole stream manager to be destroyed */ s_sm_connection_on_scheduled_stream_finishes(sm_connection, stream_manager); } /* NEVER invoke with lock held */ static void s_aws_http2_stream_manager_execute_transaction(struct aws_http2_stream_management_transaction *work) { struct aws_http2_stream_manager *stream_manager = work->stream_manager; /* Step1: Release connection */ if (work->sm_connection_to_release) { AWS_ASSERT(work->sm_connection_to_release->num_streams_assigned == 0); STREAM_MANAGER_LOGF( DEBUG, stream_manager, "Release connection:%p back to connection manager as no outstanding streams", (void *)work->sm_connection_to_release->connection); s_sm_connection_release_connection(work->sm_connection_to_release); } /* Step2: Make request. The work should know what connection for the request to be made. */ while (!aws_linked_list_empty(&work->pending_make_requests)) { /* The completions can also fail as the connection can be unavailable after the decision made. We just fail * the acquisition */ struct aws_linked_list_node *node = aws_linked_list_pop_front(&work->pending_make_requests); struct aws_h2_sm_pending_stream_acquisition *pending_stream_acquisition = AWS_CONTAINER_OF(node, struct aws_h2_sm_pending_stream_acquisition, node); AWS_ASSERT( pending_stream_acquisition->sm_connection && "Stream manager internal bug: connection is not decided before execute transaction"); STREAM_MANAGER_LOGF( TRACE, stream_manager, "acquisition:%p is scheduled to be made request from connection:%p thread", (void *)pending_stream_acquisition, (void *)pending_stream_acquisition->sm_connection->connection); /** * schedule a task from the connection's event loop to make request, so that: * - We can activate the stream for user and then invoked the callback * - The callback will happen asynced even the stream failed to be created * - We can make sure we will not break the settings */ struct aws_channel *channel = aws_http_connection_get_channel(pending_stream_acquisition->sm_connection->connection); aws_channel_task_init( &pending_stream_acquisition->make_request_task, s_make_request_task, pending_stream_acquisition, "Stream manager make request task"); aws_channel_schedule_task_now(channel, &pending_stream_acquisition->make_request_task); } /* Step 3: Acquire connections if needed */ if (work->new_connections) { STREAM_MANAGER_LOGF(DEBUG, stream_manager, "acquiring %zu new connections", work->new_connections); } for (size_t i = 0; i < work->new_connections; ++i) { aws_http_connection_manager_acquire_connection( stream_manager->connection_manager, s_sm_on_connection_acquired, stream_manager); } /* * Step 4: Clean up work. Do this here rather than at the end of every caller. Destroy the manager if necessary */ s_aws_stream_management_transaction_clean_up(work); } void s_stream_manager_destroy_final(struct aws_http2_stream_manager *stream_manager) { if (!stream_manager) { return; } STREAM_MANAGER_LOG(TRACE, stream_manager, "Stream Manager finishes destroying self"); /* Connection manager has already been cleaned up */ AWS_FATAL_ASSERT(stream_manager->connection_manager == NULL); AWS_FATAL_ASSERT(aws_linked_list_empty(&stream_manager->synced_data.pending_stream_acquisitions)); aws_mutex_clean_up(&stream_manager->synced_data.lock); aws_random_access_set_clean_up(&stream_manager->synced_data.ideal_available_set); aws_random_access_set_clean_up(&stream_manager->synced_data.nonideal_available_set); aws_client_bootstrap_release(stream_manager->bootstrap); if (stream_manager->shutdown_complete_callback) { stream_manager->shutdown_complete_callback(stream_manager->shutdown_complete_user_data); } aws_mem_release(stream_manager->allocator, stream_manager); } void s_stream_manager_on_cm_shutdown_complete(void *user_data) { struct aws_http2_stream_manager *stream_manager = (struct aws_http2_stream_manager *)user_data; STREAM_MANAGER_LOGF( TRACE, stream_manager, "Underlying connection manager (ip=%p) finished shutdown, stream manager can finish destroying now", (void *)stream_manager->connection_manager); stream_manager->connection_manager = NULL; s_stream_manager_destroy_final(stream_manager); } static void s_stream_manager_start_destroy(struct aws_http2_stream_manager *stream_manager) { STREAM_MANAGER_LOG(TRACE, stream_manager, "Stream Manager reaches the condition to destroy, start to destroy"); /* If there is no outstanding streams, the connections set should be empty. */ AWS_ASSERT(aws_random_access_set_get_size(&stream_manager->synced_data.ideal_available_set) == 0); AWS_ASSERT(aws_random_access_set_get_size(&stream_manager->synced_data.nonideal_available_set) == 0); AWS_ASSERT(stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_CONNECTIONS_ACQUIRING] == 0); AWS_ASSERT(stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_OPEN_STREAM] == 0); AWS_ASSERT(stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_PENDING_MAKE_REQUESTS] == 0); AWS_ASSERT(stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_PENDING_ACQUISITION] == 0); AWS_ASSERT(stream_manager->connection_manager); struct aws_http_connection_manager *cm = stream_manager->connection_manager; stream_manager->connection_manager = NULL; aws_http_connection_manager_release(cm); } void s_stream_manager_on_zero_external_ref(struct aws_http2_stream_manager *stream_manager) { STREAM_MANAGER_LOG( TRACE, stream_manager, "Last refcount released, manager stop accepting new stream request and will start to clean up when not " "outstanding tasks remaining."); struct aws_http2_stream_management_transaction work; s_aws_stream_management_transaction_init(&work, stream_manager); { /* BEGIN CRITICAL SECTION */ s_lock_synced_data(stream_manager); stream_manager->synced_data.state = AWS_H2SMST_DESTROYING; s_aws_http2_stream_manager_build_transaction_synced(&work); /* Release the internal ref count as no external usage anymore */ aws_ref_count_release(&stream_manager->internal_ref_count); s_unlock_synced_data(stream_manager); } /* END CRITICAL SECTION */ s_aws_http2_stream_manager_execute_transaction(&work); } struct aws_http2_stream_manager *aws_http2_stream_manager_new( struct aws_allocator *allocator, const struct aws_http2_stream_manager_options *options) { AWS_PRECONDITION(allocator); /* The other options are validated by the aws_http_connection_manager_new */ if (!options->http2_prior_knowledge && !options->tls_connection_options) { AWS_LOGF_ERROR( AWS_LS_HTTP_CONNECTION_MANAGER, "Invalid options - Prior knowledge must be used for cleartext HTTP/2 connections." " Upgrade from HTTP/1.1 is not supported."); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } struct aws_http2_stream_manager *stream_manager = aws_mem_calloc(allocator, 1, sizeof(struct aws_http2_stream_manager)); stream_manager->allocator = allocator; aws_linked_list_init(&stream_manager->synced_data.pending_stream_acquisitions); if (aws_mutex_init(&stream_manager->synced_data.lock)) { goto on_error; } if (aws_random_access_set_init( &stream_manager->synced_data.ideal_available_set, allocator, aws_hash_ptr, aws_ptr_eq, NULL /* destroy function */, 2)) { goto on_error; } if (aws_random_access_set_init( &stream_manager->synced_data.nonideal_available_set, allocator, aws_hash_ptr, aws_ptr_eq, NULL /* destroy function */, 2)) { goto on_error; } aws_ref_count_init( &stream_manager->external_ref_count, stream_manager, (aws_simple_completion_callback *)s_stream_manager_on_zero_external_ref); aws_ref_count_init( &stream_manager->internal_ref_count, stream_manager, (aws_simple_completion_callback *)s_stream_manager_start_destroy); if (options->connection_ping_period_ms) { stream_manager->connection_ping_period_ns = aws_timestamp_convert(options->connection_ping_period_ms, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL); size_t connection_ping_timeout_ms = options->connection_ping_timeout_ms ? options->connection_ping_timeout_ms : s_default_ping_timeout_ms; stream_manager->connection_ping_timeout_ns = aws_timestamp_convert(connection_ping_timeout_ms, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL); if (stream_manager->connection_ping_period_ns < stream_manager->connection_ping_timeout_ns) { STREAM_MANAGER_LOGF( WARN, stream_manager, "connection_ping_period_ms: %zu is shorter than connection_ping_timeout_ms: %zu. Clapping " "connection_ping_timeout_ms to %zu", options->connection_ping_period_ms, connection_ping_timeout_ms, options->connection_ping_period_ms); stream_manager->connection_ping_timeout_ns = stream_manager->connection_ping_period_ns; } } stream_manager->bootstrap = aws_client_bootstrap_acquire(options->bootstrap); struct aws_http_connection_manager_options cm_options = { .bootstrap = options->bootstrap, .socket_options = options->socket_options, .tls_connection_options = options->tls_connection_options, .http2_prior_knowledge = options->http2_prior_knowledge, .host = options->host, .port = options->port, .enable_read_back_pressure = options->enable_read_back_pressure, .monitoring_options = options->monitoring_options, .proxy_options = options->proxy_options, .proxy_ev_settings = options->proxy_ev_settings, .max_connections = options->max_connections, .shutdown_complete_user_data = stream_manager, .shutdown_complete_callback = s_stream_manager_on_cm_shutdown_complete, .initial_settings_array = options->initial_settings_array, .num_initial_settings = options->num_initial_settings, .max_closed_streams = options->max_closed_streams, .http2_conn_manual_window_management = options->conn_manual_window_management, }; /* aws_http_connection_manager_new needs to be the last thing that can fail */ stream_manager->connection_manager = aws_http_connection_manager_new(allocator, &cm_options); if (!stream_manager->connection_manager) { goto on_error; } /* Nothing can fail after here */ stream_manager->synced_data.state = AWS_H2SMST_READY; stream_manager->shutdown_complete_callback = options->shutdown_complete_callback; stream_manager->shutdown_complete_user_data = options->shutdown_complete_user_data; stream_manager->ideal_concurrent_streams_per_connection = options->ideal_concurrent_streams_per_connection ? options->ideal_concurrent_streams_per_connection : UINT32_MAX; stream_manager->max_concurrent_streams_per_connection = options->max_concurrent_streams_per_connection ? options->max_concurrent_streams_per_connection : UINT32_MAX; stream_manager->max_connections = options->max_connections; stream_manager->close_connection_on_server_error = options->close_connection_on_server_error; return stream_manager; on_error: s_stream_manager_destroy_final(stream_manager); return NULL; } struct aws_http2_stream_manager *aws_http2_stream_manager_acquire(struct aws_http2_stream_manager *stream_manager) { if (stream_manager) { aws_ref_count_acquire(&stream_manager->external_ref_count); } return stream_manager; } struct aws_http2_stream_manager *aws_http2_stream_manager_release(struct aws_http2_stream_manager *stream_manager) { if (stream_manager) { aws_ref_count_release(&stream_manager->external_ref_count); } return NULL; } void aws_http2_stream_manager_acquire_stream( struct aws_http2_stream_manager *stream_manager, const struct aws_http2_stream_manager_acquire_stream_options *acquire_stream_option) { AWS_PRECONDITION(stream_manager); AWS_PRECONDITION(acquire_stream_option); AWS_PRECONDITION(acquire_stream_option->callback); AWS_PRECONDITION(acquire_stream_option->options); struct aws_http2_stream_management_transaction work; struct aws_h2_sm_pending_stream_acquisition *pending_stream_acquisition = s_new_pending_stream_acquisition( stream_manager->allocator, acquire_stream_option->options, acquire_stream_option->callback, acquire_stream_option->user_data); STREAM_MANAGER_LOGF( TRACE, stream_manager, "Stream Manager creates acquisition:%p for user", (void *)pending_stream_acquisition); s_aws_stream_management_transaction_init(&work, stream_manager); { /* BEGIN CRITICAL SECTION */ s_lock_synced_data(stream_manager); /* it's use after free crime */ AWS_FATAL_ASSERT(stream_manager->synced_data.state != AWS_H2SMST_DESTROYING); aws_linked_list_push_back( &stream_manager->synced_data.pending_stream_acquisitions, &pending_stream_acquisition->node); s_sm_count_increase_synced(stream_manager, AWS_SMCT_PENDING_ACQUISITION, 1); s_aws_http2_stream_manager_build_transaction_synced(&work); s_unlock_synced_data(stream_manager); } /* END CRITICAL SECTION */ s_aws_http2_stream_manager_execute_transaction(&work); } static size_t s_get_available_streams_num_from_connection_set(const struct aws_random_access_set *set) { size_t all_available_streams_num = 0; size_t ideal_connection_num = aws_random_access_set_get_size(set); for (size_t i = 0; i < ideal_connection_num; i++) { struct aws_h2_sm_connection *sm_connection = NULL; AWS_FATAL_ASSERT(aws_random_access_set_random_get_ptr_index(set, (void **)&sm_connection, i) == AWS_OP_SUCCESS); uint32_t available_streams = sm_connection->max_concurrent_streams - sm_connection->num_streams_assigned; all_available_streams_num += (size_t)available_streams; } return all_available_streams_num; } void aws_http2_stream_manager_fetch_metrics( const struct aws_http2_stream_manager *stream_manager, struct aws_http_manager_metrics *out_metrics) { AWS_PRECONDITION(stream_manager); AWS_PRECONDITION(out_metrics); { /* BEGIN CRITICAL SECTION */ s_lock_synced_data((struct aws_http2_stream_manager *)(void *)stream_manager); size_t all_available_streams_num = 0; all_available_streams_num += s_get_available_streams_num_from_connection_set(&stream_manager->synced_data.ideal_available_set); all_available_streams_num += s_get_available_streams_num_from_connection_set(&stream_manager->synced_data.nonideal_available_set); out_metrics->pending_concurrency_acquires = stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_PENDING_ACQUISITION]; out_metrics->available_concurrency = all_available_streams_num; out_metrics->leased_concurrency = stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_OPEN_STREAM]; s_unlock_synced_data((struct aws_http2_stream_manager *)(void *)stream_manager); } /* END CRITICAL SECTION */ } aws-crt-python-0.20.4+dfsg/crt/aws-c-http/source/proxy_connection.c000066400000000000000000001747311456575232400252700ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef _MSC_VER # pragma warning(disable : 4204) /* non-constant aggregate initializer */ # pragma warning(disable : 4232) /* function pointer to dll symbol */ #endif AWS_STATIC_STRING_FROM_LITERAL(s_host_header_name, "Host"); AWS_STATIC_STRING_FROM_LITERAL(s_proxy_connection_header_name, "Proxy-Connection"); AWS_STATIC_STRING_FROM_LITERAL(s_proxy_connection_header_value, "Keep-Alive"); AWS_STATIC_STRING_FROM_LITERAL(s_options_method, "OPTIONS"); AWS_STATIC_STRING_FROM_LITERAL(s_star_path, "*"); AWS_STATIC_STRING_FROM_LITERAL(s_http_proxy_env_var, "HTTP_PROXY"); AWS_STATIC_STRING_FROM_LITERAL(s_http_proxy_env_var_low, "http_proxy"); AWS_STATIC_STRING_FROM_LITERAL(s_https_proxy_env_var, "HTTPS_PROXY"); AWS_STATIC_STRING_FROM_LITERAL(s_https_proxy_env_var_low, "https_proxy"); #ifndef BYO_CRYPTO AWS_STATIC_STRING_FROM_LITERAL(s_proxy_no_verify_peer_env_var, "AWS_PROXY_NO_VERIFY_PEER"); #endif static struct aws_http_proxy_system_vtable s_default_vtable = { .aws_channel_setup_client_tls = &aws_channel_setup_client_tls, }; static struct aws_http_proxy_system_vtable *s_vtable = &s_default_vtable; void aws_http_proxy_system_set_vtable(struct aws_http_proxy_system_vtable *vtable) { s_vtable = vtable; } void aws_http_proxy_user_data_destroy(struct aws_http_proxy_user_data *user_data) { if (user_data == NULL) { return; } aws_hash_table_clean_up(&user_data->alpn_string_map); /* * For tunneling connections, this is now internal and never surfaced to the user, so it's our responsibility * to clean up the last reference. */ if (user_data->proxy_connection != NULL && user_data->proxy_config->connection_type == AWS_HPCT_HTTP_TUNNEL) { aws_http_connection_release(user_data->proxy_connection); user_data->proxy_connection = NULL; } aws_string_destroy(user_data->original_host); if (user_data->proxy_config) { aws_http_proxy_config_destroy(user_data->proxy_config); } if (user_data->original_tls_options) { aws_tls_connection_options_clean_up(user_data->original_tls_options); aws_mem_release(user_data->allocator, user_data->original_tls_options); } aws_http_proxy_negotiator_release(user_data->proxy_negotiator); aws_client_bootstrap_release(user_data->original_bootstrap); aws_mem_release(user_data->allocator, user_data); } struct aws_http_proxy_user_data *aws_http_proxy_user_data_new( struct aws_allocator *allocator, const struct aws_http_client_connection_options *orig_options, aws_client_bootstrap_on_channel_event_fn *on_channel_setup, aws_client_bootstrap_on_channel_event_fn *on_channel_shutdown) { AWS_FATAL_ASSERT(orig_options->proxy_options != NULL); /* make copy of options, and add defaults for missing optional structs */ struct aws_http_client_connection_options options = *orig_options; struct aws_http1_connection_options default_http1_options; AWS_ZERO_STRUCT(default_http1_options); if (options.http1_options == NULL) { options.http1_options = &default_http1_options; } struct aws_http2_connection_options default_http2_options; AWS_ZERO_STRUCT(default_http2_options); if (options.http2_options == NULL) { options.http2_options = &default_http2_options; } struct aws_http2_setting *setting_array = NULL; struct aws_http_proxy_user_data *user_data = NULL; aws_mem_acquire_many( options.allocator, 2, &user_data, sizeof(struct aws_http_proxy_user_data), &setting_array, options.http2_options->num_initial_settings * sizeof(struct aws_http2_setting)); AWS_ZERO_STRUCT(*user_data); user_data->allocator = allocator; user_data->state = AWS_PBS_SOCKET_CONNECT; user_data->error_code = AWS_ERROR_SUCCESS; user_data->connect_status_code = AWS_HTTP_STATUS_CODE_UNKNOWN; user_data->original_bootstrap = aws_client_bootstrap_acquire(options.bootstrap); if (options.socket_options != NULL) { user_data->original_socket_options = *options.socket_options; } user_data->original_manual_window_management = options.manual_window_management; user_data->original_initial_window_size = options.initial_window_size; user_data->original_host = aws_string_new_from_cursor(allocator, &options.host_name); if (user_data->original_host == NULL) { goto on_error; } user_data->original_port = options.port; user_data->proxy_config = aws_http_proxy_config_new_from_connection_options(allocator, &options); if (user_data->proxy_config == NULL) { goto on_error; } user_data->proxy_negotiator = aws_http_proxy_strategy_create_negotiator(user_data->proxy_config->proxy_strategy, allocator); if (user_data->proxy_negotiator == NULL) { goto on_error; } if (options.tls_options) { /* clone tls options, but redirect user data to what we're creating */ user_data->original_tls_options = aws_mem_calloc(allocator, 1, sizeof(struct aws_tls_connection_options)); if (user_data->original_tls_options == NULL || aws_tls_connection_options_copy(user_data->original_tls_options, options.tls_options)) { goto on_error; } user_data->original_tls_options->user_data = user_data; } if (aws_http_alpn_map_init_copy(options.allocator, &user_data->alpn_string_map, options.alpn_string_map)) { goto on_error; } user_data->original_http_on_setup = options.on_setup; user_data->original_http_on_shutdown = options.on_shutdown; user_data->original_channel_on_setup = on_channel_setup; user_data->original_channel_on_shutdown = on_channel_shutdown; user_data->requested_event_loop = options.requested_event_loop; user_data->host_resolution_config = options.host_resolution_config; user_data->prior_knowledge_http2 = options.prior_knowledge_http2; /* one and only one setup callback must be valid */ AWS_FATAL_ASSERT((user_data->original_http_on_setup == NULL) != (user_data->original_channel_on_setup == NULL)); /* one and only one shutdown callback must be valid */ AWS_FATAL_ASSERT( (user_data->original_http_on_shutdown == NULL) != (user_data->original_channel_on_shutdown == NULL)); /* callback set must be self-consistent. Technically the second check is redundant given the previous checks */ AWS_FATAL_ASSERT((user_data->original_http_on_setup == NULL) == (user_data->original_http_on_shutdown == NULL)); AWS_FATAL_ASSERT( (user_data->original_channel_on_setup == NULL) == (user_data->original_channel_on_shutdown == NULL)); user_data->original_user_data = options.user_data; user_data->original_http1_options = *options.http1_options; user_data->original_http2_options = *options.http2_options; /* keep a copy of the settings array if it's not NULL */ if (options.http2_options->num_initial_settings > 0) { memcpy( setting_array, options.http2_options->initial_settings_array, options.http2_options->num_initial_settings * sizeof(struct aws_http2_setting)); user_data->original_http2_options.initial_settings_array = setting_array; } return user_data; on_error: AWS_LOGF_ERROR( AWS_LS_HTTP_CONNECTION, "(STATIC) Proxy connection failed to create user data with error %d(%s)", aws_last_error(), aws_error_str(aws_last_error())); aws_http_proxy_user_data_destroy(user_data); return NULL; } struct aws_http_proxy_user_data *aws_http_proxy_user_data_new_reset_clone( struct aws_allocator *allocator, struct aws_http_proxy_user_data *old_user_data) { AWS_FATAL_ASSERT(old_user_data != NULL); struct aws_http2_setting *setting_array = NULL; struct aws_http_proxy_user_data *user_data = NULL; aws_mem_acquire_many( allocator, 2, &user_data, sizeof(struct aws_http_proxy_user_data), &setting_array, old_user_data->original_http2_options.num_initial_settings * sizeof(struct aws_http2_setting)); AWS_ZERO_STRUCT(*user_data); user_data->allocator = allocator; user_data->state = AWS_PBS_SOCKET_CONNECT; user_data->error_code = AWS_ERROR_SUCCESS; user_data->connect_status_code = AWS_HTTP_STATUS_CODE_UNKNOWN; user_data->original_bootstrap = aws_client_bootstrap_acquire(old_user_data->original_bootstrap); user_data->original_socket_options = old_user_data->original_socket_options; user_data->original_manual_window_management = old_user_data->original_manual_window_management; user_data->original_initial_window_size = old_user_data->original_initial_window_size; user_data->prior_knowledge_http2 = old_user_data->prior_knowledge_http2; user_data->original_host = aws_string_new_from_string(allocator, old_user_data->original_host); if (user_data->original_host == NULL) { goto on_error; } user_data->original_port = old_user_data->original_port; user_data->proxy_config = aws_http_proxy_config_new_clone(allocator, old_user_data->proxy_config); if (user_data->proxy_config == NULL) { goto on_error; } user_data->proxy_negotiator = aws_http_proxy_negotiator_acquire(old_user_data->proxy_negotiator); if (user_data->proxy_negotiator == NULL) { goto on_error; } if (old_user_data->original_tls_options) { /* clone tls options, but redirect user data to what we're creating */ user_data->original_tls_options = aws_mem_calloc(allocator, 1, sizeof(struct aws_tls_connection_options)); if (user_data->original_tls_options == NULL || aws_tls_connection_options_copy(user_data->original_tls_options, old_user_data->original_tls_options)) { goto on_error; } user_data->original_tls_options->user_data = user_data; } if (aws_http_alpn_map_init_copy(allocator, &user_data->alpn_string_map, &old_user_data->alpn_string_map)) { goto on_error; } user_data->original_http_on_setup = old_user_data->original_http_on_setup; user_data->original_http_on_shutdown = old_user_data->original_http_on_shutdown; user_data->original_channel_on_setup = old_user_data->original_channel_on_setup; user_data->original_channel_on_shutdown = old_user_data->original_channel_on_shutdown; user_data->original_user_data = old_user_data->original_user_data; user_data->original_http1_options = old_user_data->original_http1_options; user_data->original_http2_options = old_user_data->original_http2_options; /* keep a copy of the settings array if it's not NULL */ if (old_user_data->original_http2_options.num_initial_settings > 0) { memcpy( setting_array, old_user_data->original_http2_options.initial_settings_array, old_user_data->original_http2_options.num_initial_settings * sizeof(struct aws_http2_setting)); user_data->original_http2_options.initial_settings_array = setting_array; } return user_data; on_error: AWS_LOGF_ERROR( AWS_LS_HTTP_CONNECTION, "(STATIC) Proxy connection failed to create user data with error %d(%s)", aws_last_error(), aws_error_str(aws_last_error())); aws_http_proxy_user_data_destroy(user_data); return NULL; } /* * Examines the proxy user data state and determines whether to make an http-interface setup callback * or a raw channel setup callback */ static void s_do_on_setup_callback( struct aws_http_proxy_user_data *proxy_ud, struct aws_http_connection *connection, int error_code) { if (proxy_ud->original_http_on_setup) { proxy_ud->original_http_on_setup(connection, error_code, proxy_ud->original_user_data); proxy_ud->original_http_on_setup = NULL; } if (proxy_ud->original_channel_on_setup) { struct aws_channel *channel = NULL; if (connection != NULL) { channel = aws_http_connection_get_channel(connection); } proxy_ud->original_channel_on_setup( proxy_ud->original_bootstrap, error_code, channel, proxy_ud->original_user_data); proxy_ud->original_channel_on_setup = NULL; } } /* * Examines the proxy user data state and determines whether to make an http-interface shutdown callback * or a raw channel shutdown callback */ static void s_do_on_shutdown_callback(struct aws_http_proxy_user_data *proxy_ud, int error_code) { AWS_FATAL_ASSERT(proxy_ud->proxy_connection); if (proxy_ud->original_http_on_shutdown) { AWS_FATAL_ASSERT(proxy_ud->final_connection); proxy_ud->original_http_on_shutdown(proxy_ud->final_connection, error_code, proxy_ud->original_user_data); proxy_ud->original_http_on_shutdown = NULL; } if (proxy_ud->original_channel_on_shutdown) { struct aws_channel *channel = aws_http_connection_get_channel(proxy_ud->proxy_connection); proxy_ud->original_channel_on_shutdown( proxy_ud->original_bootstrap, error_code, channel, proxy_ud->original_user_data); proxy_ud->original_channel_on_shutdown = NULL; } } /* * Connection callback used ONLY by forwarding http proxy connections. After this, * the connection is live and the user is notified */ static void s_aws_http_on_client_connection_http_forwarding_proxy_setup_fn( struct aws_http_connection *connection, int error_code, void *user_data) { struct aws_http_proxy_user_data *proxy_ud = user_data; s_do_on_setup_callback(proxy_ud, connection, error_code); if (error_code != AWS_ERROR_SUCCESS) { aws_http_proxy_user_data_destroy(user_data); } else { /* * The proxy connection and final connection are the same in forwarding proxy connections. This lets * us unconditionally use fatal asserts on these being non-null regardless of proxy configuration. */ proxy_ud->proxy_connection = connection; proxy_ud->final_connection = connection; proxy_ud->state = AWS_PBS_SUCCESS; } } /* * Connection shutdown callback used by both http and https proxy connections. Only invokes * user shutdown if the connection was successfully established. Otherwise, it invokes * the user setup function with an error. */ static void s_aws_http_on_client_connection_http_proxy_shutdown_fn( struct aws_http_connection *connection, int error_code, void *user_data) { struct aws_http_proxy_user_data *proxy_ud = user_data; if (proxy_ud->state == AWS_PBS_SUCCESS) { AWS_LOGF_INFO( AWS_LS_HTTP_CONNECTION, "(%p) Proxy connection (channel %p) shutting down.", (void *)connection, (void *)aws_http_connection_get_channel(connection)); s_do_on_shutdown_callback(proxy_ud, error_code); } else { int ec = error_code; if (ec == AWS_ERROR_SUCCESS) { ec = proxy_ud->error_code; } if (ec == AWS_ERROR_SUCCESS) { ec = AWS_ERROR_UNKNOWN; } AWS_LOGF_WARN( AWS_LS_HTTP_CONNECTION, "(%p) Error %d while connecting to \"%s\" via proxy.", (void *)connection, ec, (char *)proxy_ud->original_host->bytes); s_do_on_setup_callback(proxy_ud, NULL, ec); } aws_http_proxy_user_data_destroy(user_data); } /* * On-any-error entry point that releases all resources involved in establishing the proxy connection. * This must not be invoked any time after a successful setup callback. */ static void s_aws_http_proxy_user_data_shutdown(struct aws_http_proxy_user_data *user_data) { user_data->state = AWS_PBS_FAILURE; if (user_data->proxy_connection == NULL) { s_do_on_setup_callback(user_data, NULL, user_data->error_code); aws_http_proxy_user_data_destroy(user_data); return; } if (user_data->connect_stream) { aws_http_stream_release(user_data->connect_stream); user_data->connect_stream = NULL; } if (user_data->connect_request) { aws_http_message_destroy(user_data->connect_request); user_data->connect_request = NULL; } struct aws_http_connection *http_connection = user_data->proxy_connection; user_data->proxy_connection = NULL; aws_channel_shutdown(http_connection->channel_slot->channel, user_data->error_code); aws_http_connection_release(http_connection); } static struct aws_http_message *s_build_h1_proxy_connect_request(struct aws_http_proxy_user_data *user_data) { struct aws_http_message *request = aws_http_message_new_request(user_data->allocator); if (request == NULL) { return NULL; } struct aws_byte_buf path_buffer; AWS_ZERO_STRUCT(path_buffer); if (aws_http_message_set_request_method(request, aws_http_method_connect)) { goto on_error; } if (aws_byte_buf_init(&path_buffer, user_data->allocator, user_data->original_host->len + 10)) { goto on_error; } struct aws_byte_cursor host_cursor = aws_byte_cursor_from_string(user_data->original_host); if (aws_byte_buf_append(&path_buffer, &host_cursor)) { goto on_error; } struct aws_byte_cursor colon_cursor = aws_byte_cursor_from_c_str(":"); if (aws_byte_buf_append(&path_buffer, &colon_cursor)) { goto on_error; } char port_str[20] = "\0"; snprintf(port_str, sizeof(port_str), "%u", user_data->original_port); struct aws_byte_cursor port_cursor = aws_byte_cursor_from_c_str(port_str); if (aws_byte_buf_append(&path_buffer, &port_cursor)) { goto on_error; } struct aws_byte_cursor path_cursor = aws_byte_cursor_from_array(path_buffer.buffer, path_buffer.len); if (aws_http_message_set_request_path(request, path_cursor)) { goto on_error; } struct aws_http_header host_header = { .name = aws_byte_cursor_from_string(s_host_header_name), .value = aws_byte_cursor_from_array(path_buffer.buffer, path_buffer.len), }; if (aws_http_message_add_header(request, host_header)) { goto on_error; } struct aws_http_header keep_alive_header = { .name = aws_byte_cursor_from_string(s_proxy_connection_header_name), .value = aws_byte_cursor_from_string(s_proxy_connection_header_value), }; if (aws_http_message_add_header(request, keep_alive_header)) { goto on_error; } aws_byte_buf_clean_up(&path_buffer); return request; on_error: AWS_LOGF_ERROR( AWS_LS_HTTP_CONNECTION, "(%p) TLS proxy connection failed to build CONNECT request with error %d(%s)", (void *)user_data->proxy_connection, aws_last_error(), aws_error_str(aws_last_error())); aws_byte_buf_clean_up(&path_buffer); aws_http_message_destroy(request); return NULL; } /* * Builds the CONNECT request issued after proxy connection establishment, during the creation of * tls-enabled proxy connections. */ static struct aws_http_message *s_build_proxy_connect_request(struct aws_http_proxy_user_data *user_data) { struct aws_http_connection *proxy_connection = user_data->proxy_connection; switch (proxy_connection->http_version) { case AWS_HTTP_VERSION_1_1: return s_build_h1_proxy_connect_request(user_data); default: aws_raise_error(AWS_ERROR_HTTP_UNSUPPORTED_PROTOCOL); return NULL; } } static int s_aws_http_on_incoming_body_tunnel_proxy( struct aws_http_stream *stream, const struct aws_byte_cursor *data, void *user_data) { (void)stream; struct aws_http_proxy_user_data *context = user_data; aws_http_proxy_negotiator_connect_on_incoming_body_fn *on_incoming_body = context->proxy_negotiator->strategy_vtable.tunnelling_vtable->on_incoming_body_callback; if (on_incoming_body != NULL) { (*on_incoming_body)(context->proxy_negotiator, data); } aws_http_stream_update_window(stream, data->len); return AWS_OP_SUCCESS; } static int s_aws_http_on_response_headers_tunnel_proxy( struct aws_http_stream *stream, enum aws_http_header_block header_block, const struct aws_http_header *header_array, size_t num_headers, void *user_data) { (void)stream; struct aws_http_proxy_user_data *context = user_data; aws_http_proxy_negotiation_connect_on_incoming_headers_fn *on_incoming_headers = context->proxy_negotiator->strategy_vtable.tunnelling_vtable->on_incoming_headers_callback; if (on_incoming_headers != NULL) { (*on_incoming_headers)(context->proxy_negotiator, header_block, header_array, num_headers); } return AWS_OP_SUCCESS; } /* * Headers done callback for the CONNECT request made during tls proxy connections */ static int s_aws_http_on_incoming_header_block_done_tunnel_proxy( struct aws_http_stream *stream, enum aws_http_header_block header_block, void *user_data) { struct aws_http_proxy_user_data *context = user_data; if (header_block == AWS_HTTP_HEADER_BLOCK_MAIN) { int status_code = AWS_HTTP_STATUS_CODE_UNKNOWN; aws_http_stream_get_incoming_response_status(stream, &status_code); context->connect_status_code = (enum aws_http_status_code)status_code; if (context->connect_status_code != AWS_HTTP_STATUS_CODE_200_OK) { AWS_LOGF_ERROR( AWS_LS_HTTP_CONNECTION, "(%p) Proxy CONNECT request failed with status code %d", (void *)context->proxy_connection, context->connect_status_code); context->error_code = AWS_ERROR_HTTP_PROXY_CONNECT_FAILED; } aws_http_proxy_negotiator_connect_status_fn *on_status = context->proxy_negotiator->strategy_vtable.tunnelling_vtable->on_status_callback; if (on_status != NULL) { (*on_status)(context->proxy_negotiator, context->connect_status_code); } } return AWS_OP_SUCCESS; } static int s_aws_http_apply_http_connection_to_proxied_channel(struct aws_http_proxy_user_data *context) { AWS_FATAL_ASSERT(context->proxy_connection != NULL); AWS_FATAL_ASSERT(context->original_http_on_setup != NULL); struct aws_channel *channel = aws_http_connection_get_channel(context->proxy_connection); struct aws_http_connection *connection = aws_http_connection_new_channel_handler( context->allocator, channel, false, context->original_tls_options != NULL, context->original_manual_window_management, context->prior_knowledge_http2, context->original_initial_window_size, context->alpn_string_map.p_impl == NULL ? NULL : &context->alpn_string_map, &context->original_http1_options, &context->original_http2_options, context->original_user_data); if (connection == NULL) { AWS_LOGF_ERROR( AWS_LS_HTTP_CONNECTION, "static: Failed to create the client connection object, error %d (%s).", aws_last_error(), aws_error_name(aws_last_error())); return AWS_OP_ERR; } AWS_LOGF_INFO( AWS_LS_HTTP_CONNECTION, "id=%p: " PRInSTR " client connection established.", (void *)connection, AWS_BYTE_CURSOR_PRI(aws_http_version_to_str(connection->http_version))); context->final_connection = connection; return AWS_OP_SUCCESS; } static void s_do_final_proxied_channel_setup(struct aws_http_proxy_user_data *proxy_ud) { if (proxy_ud->original_http_on_setup != NULL) { /* * If we're transitioning to http with http setup/shutdown callbacks, try to apply a new http connection to * the channel */ if (s_aws_http_apply_http_connection_to_proxied_channel(proxy_ud)) { proxy_ud->error_code = aws_last_error(); s_aws_http_proxy_user_data_shutdown(proxy_ud); return; } s_do_on_setup_callback(proxy_ud, proxy_ud->final_connection, AWS_ERROR_SUCCESS); } else { /* * Otherwise invoke setup directly (which will end up being channel setup) */ s_do_on_setup_callback(proxy_ud, proxy_ud->proxy_connection, AWS_ERROR_SUCCESS); } /* Tell user of successful connection. */ proxy_ud->state = AWS_PBS_SUCCESS; } /* * Tls negotiation callback for tls proxy connections */ static void s_on_origin_server_tls_negotation_result( struct aws_channel_handler *handler, struct aws_channel_slot *slot, int error_code, void *user_data) { (void)handler; (void)slot; struct aws_http_proxy_user_data *context = user_data; if (error_code != AWS_ERROR_SUCCESS) { AWS_LOGF_ERROR( AWS_LS_HTTP_CONNECTION, "(%p) Proxy connection failed origin server TLS negotiation with error %d(%s)", (void *)context->proxy_connection, error_code, aws_error_str(error_code)); context->error_code = error_code; s_aws_http_proxy_user_data_shutdown(context); return; } s_do_final_proxied_channel_setup(context); } static int s_create_tunneling_connection(struct aws_http_proxy_user_data *user_data); static int s_make_proxy_connect_request(struct aws_http_proxy_user_data *user_data); static void s_zero_callbacks(struct aws_http_proxy_user_data *proxy_ud) { proxy_ud->original_http_on_shutdown = NULL; proxy_ud->original_http_on_setup = NULL; proxy_ud->original_channel_on_shutdown = NULL; proxy_ud->original_channel_on_setup = NULL; } /* * Stream done callback for the CONNECT request made during tls proxy connections */ static void s_aws_http_on_stream_complete_tunnel_proxy( struct aws_http_stream *stream, int error_code, void *user_data) { struct aws_http_proxy_user_data *context = user_data; AWS_FATAL_ASSERT(stream == context->connect_stream); if (context->error_code == AWS_ERROR_SUCCESS && error_code != AWS_ERROR_SUCCESS) { context->error_code = error_code; } if (context->error_code != AWS_ERROR_SUCCESS) { context->error_code = AWS_ERROR_HTTP_PROXY_CONNECT_FAILED; if (context->connect_status_code == AWS_HTTP_STATUS_CODE_407_PROXY_AUTHENTICATION_REQUIRED) { enum aws_http_proxy_negotiation_retry_directive retry_directive = aws_http_proxy_negotiator_get_retry_directive(context->proxy_negotiator); if (retry_directive == AWS_HPNRD_NEW_CONNECTION) { struct aws_http_proxy_user_data *new_context = aws_http_proxy_user_data_new_reset_clone(context->allocator, context); if (new_context != NULL && s_create_tunneling_connection(new_context) == AWS_OP_SUCCESS) { /* * We successfully kicked off a new connection. By NULLing the callbacks on the old one, we can * shut it down quietly without the user being notified. The new connection will notify the user * based on its success or failure. */ s_zero_callbacks(context); context->error_code = AWS_ERROR_HTTP_PROXY_CONNECT_FAILED_RETRYABLE; } } else if (retry_directive == AWS_HPNRD_CURRENT_CONNECTION) { context->error_code = AWS_ERROR_SUCCESS; if (s_make_proxy_connect_request(context) == AWS_OP_SUCCESS) { return; } } } s_aws_http_proxy_user_data_shutdown(context); return; } AWS_LOGF_INFO( AWS_LS_HTTP_CONNECTION, "(%p) Proxy connection made successful CONNECT request to \"%s\" via proxy", (void *)context->proxy_connection, context->original_host->bytes); /* * We're finished with these, let's release */ aws_http_stream_release(stream); context->connect_stream = NULL; aws_http_message_destroy(context->connect_request); context->connect_request = NULL; AWS_LOGF_INFO( AWS_LS_HTTP_CONNECTION, "(%p) Beginning TLS negotiation through proxy", (void *)context->proxy_connection); if (context->original_tls_options != NULL) { /* * Perform TLS negotiation to the origin server through proxy */ context->original_tls_options->on_negotiation_result = s_on_origin_server_tls_negotation_result; context->state = AWS_PBS_TLS_NEGOTIATION; struct aws_channel *channel = aws_http_connection_get_channel(context->proxy_connection); struct aws_channel_slot *last_slot = aws_channel_get_first_slot(channel); while (last_slot->adj_right != NULL) { last_slot = last_slot->adj_right; } if (s_vtable->aws_channel_setup_client_tls(last_slot, context->original_tls_options)) { AWS_LOGF_ERROR( AWS_LS_HTTP_CONNECTION, "(%p) Proxy connection failed to start TLS negotiation with error %d(%s)", (void *)context->proxy_connection, aws_last_error(), aws_error_str(aws_last_error())); s_aws_http_proxy_user_data_shutdown(context); return; } } else { s_do_final_proxied_channel_setup(context); } } static void s_terminate_tunneling_connect( struct aws_http_message *message, int error_code, void *internal_proxy_user_data) { (void)message; struct aws_http_proxy_user_data *proxy_ud = internal_proxy_user_data; AWS_LOGF_ERROR( AWS_LS_HTTP_CONNECTION, "(%p) Tunneling proxy connection failed to create request stream for CONNECT request with error %d(%s)", (void *)proxy_ud->proxy_connection, error_code, aws_error_str(error_code)); proxy_ud->error_code = error_code; s_aws_http_proxy_user_data_shutdown(proxy_ud); } static void s_continue_tunneling_connect(struct aws_http_message *message, void *internal_proxy_user_data) { struct aws_http_proxy_user_data *proxy_ud = internal_proxy_user_data; struct aws_http_make_request_options request_options = { .self_size = sizeof(request_options), .request = message, .user_data = proxy_ud, .on_response_headers = s_aws_http_on_response_headers_tunnel_proxy, .on_response_header_block_done = s_aws_http_on_incoming_header_block_done_tunnel_proxy, .on_response_body = s_aws_http_on_incoming_body_tunnel_proxy, .on_complete = s_aws_http_on_stream_complete_tunnel_proxy, }; if (proxy_ud->connect_stream != NULL) { aws_http_stream_release(proxy_ud->connect_stream); } proxy_ud->connect_stream = aws_http_connection_make_request(proxy_ud->proxy_connection, &request_options); if (proxy_ud->connect_stream == NULL) { goto on_error; } aws_http_stream_activate(proxy_ud->connect_stream); return; on_error: s_aws_http_proxy_user_data_shutdown(proxy_ud); } /* * Issues a CONNECT request on an http connection */ static int s_make_proxy_connect_request(struct aws_http_proxy_user_data *user_data) { if (user_data->connect_request != NULL) { aws_http_message_destroy(user_data->connect_request); user_data->connect_request = NULL; } user_data->connect_request = s_build_proxy_connect_request(user_data); if (user_data->connect_request == NULL) { return AWS_OP_ERR; } (*user_data->proxy_negotiator->strategy_vtable.tunnelling_vtable->connect_request_transform)( user_data->proxy_negotiator, user_data->connect_request, s_terminate_tunneling_connect, s_continue_tunneling_connect, user_data); return AWS_OP_SUCCESS; } /* * Connection setup callback for tunneling proxy connections. */ static void s_aws_http_on_client_connection_http_tunneling_proxy_setup_fn( struct aws_http_connection *connection, int error_code, void *user_data) { struct aws_http_proxy_user_data *proxy_ud = user_data; proxy_ud->error_code = error_code; if (error_code != AWS_ERROR_SUCCESS) { goto on_error; } AWS_LOGF_INFO(AWS_LS_HTTP_CONNECTION, "(%p) Making CONNECT request to proxy", (void *)proxy_ud->proxy_connection); proxy_ud->proxy_connection = connection; proxy_ud->state = AWS_PBS_HTTP_CONNECT; if (s_make_proxy_connect_request(proxy_ud)) { goto on_error; } return; on_error: s_aws_http_proxy_user_data_shutdown(proxy_ud); } /* * Checks for the special case when a request is an OPTIONS request with * * path and no query params */ static bool s_is_star_path_options_method(const struct aws_http_message *request) { struct aws_byte_cursor method_cursor; if (aws_http_message_get_request_method(request, &method_cursor)) { return false; } struct aws_byte_cursor options_cursor = aws_byte_cursor_from_string(s_options_method); if (!aws_byte_cursor_eq_ignore_case(&method_cursor, &options_cursor)) { return false; } struct aws_byte_cursor path_cursor; if (aws_http_message_get_request_path(request, &path_cursor)) { return false; } struct aws_byte_cursor star_cursor = aws_byte_cursor_from_string(s_star_path); if (!aws_byte_cursor_eq_ignore_case(&path_cursor, &star_cursor)) { return false; } return true; } /* * Modifies a requests uri by transforming it to absolute form according to * section 5.3.2 of rfc 7230 * * We do this by parsing the existing uri and then rebuilding it as an * absolute resource path (using the original connection options) */ int aws_http_rewrite_uri_for_proxy_request( struct aws_http_message *request, struct aws_http_proxy_user_data *proxy_user_data) { int result = AWS_OP_ERR; struct aws_uri target_uri; AWS_ZERO_STRUCT(target_uri); struct aws_byte_cursor path_cursor; AWS_ZERO_STRUCT(path_cursor); if (aws_http_message_get_request_path(request, &path_cursor)) { goto done; } /* Pull out the original path/query */ struct aws_uri uri; if (aws_uri_init_parse(&uri, proxy_user_data->allocator, &path_cursor)) { goto done; } const struct aws_byte_cursor *actual_path_cursor = aws_uri_path(&uri); const struct aws_byte_cursor *actual_query_cursor = aws_uri_query_string(&uri); /* now rebuild the uri with scheme, host and port subbed in from the original connection options */ struct aws_uri_builder_options target_uri_builder; AWS_ZERO_STRUCT(target_uri_builder); target_uri_builder.scheme = aws_http_scheme_http; target_uri_builder.path = *actual_path_cursor; target_uri_builder.host_name = aws_byte_cursor_from_string(proxy_user_data->original_host); target_uri_builder.port = proxy_user_data->original_port; target_uri_builder.query_string = *actual_query_cursor; if (aws_uri_init_from_builder_options(&target_uri, proxy_user_data->allocator, &target_uri_builder)) { goto done; } struct aws_byte_cursor full_target_uri = aws_byte_cursor_from_array(target_uri.uri_str.buffer, target_uri.uri_str.len); /* * By rfc 7230, Section 5.3.4, a star-pathed options request made through a proxy MUST be transformed (at the last * proxy) back into a star-pathed request if the proxy request has an empty path and no query string. This * is behavior we want to support. So from our side, we need to make sure that star-pathed options requests * get translated into options requests with the authority as the uri and an empty path-query. * * Our URI transform always ends with a '/' which is technically not an empty path. To address this, * the easiest thing to do is just detect if this was originally a star-pathed options request * and drop the final '/' from the path. */ if (s_is_star_path_options_method(request)) { if (full_target_uri.len > 0 && *(full_target_uri.ptr + full_target_uri.len - 1) == '/') { full_target_uri.len -= 1; } } /* mutate the request with the new path value */ if (aws_http_message_set_request_path(request, full_target_uri)) { goto done; } result = AWS_OP_SUCCESS; done: aws_uri_clean_up(&target_uri); aws_uri_clean_up(&uri); return result; } /* * Plaintext proxy request transformation function * * Rewrites the target uri to absolute form and injects any desired headers */ static int s_proxy_http_request_transform(struct aws_http_message *request, void *user_data) { struct aws_http_proxy_user_data *proxy_ud = user_data; if (aws_http_rewrite_uri_for_proxy_request(request, proxy_ud)) { return AWS_OP_ERR; } if ((*proxy_ud->proxy_negotiator->strategy_vtable.forwarding_vtable->forward_request_transform)( proxy_ud->proxy_negotiator, request)) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } /* * Top-level function to route a connection request through a proxy server, with no channel security */ static int s_aws_http_client_connect_via_forwarding_proxy(const struct aws_http_client_connection_options *options) { AWS_FATAL_ASSERT(options->tls_options == NULL); AWS_LOGF_INFO( AWS_LS_HTTP_CONNECTION, "(STATIC) Connecting to \"" PRInSTR "\" via proxy \"" PRInSTR "\"", AWS_BYTE_CURSOR_PRI(options->host_name), AWS_BYTE_CURSOR_PRI(options->proxy_options->host)); /* Create a wrapper user data that contains all proxy-related information, state, and user-facing callbacks */ struct aws_http_proxy_user_data *proxy_user_data = aws_http_proxy_user_data_new(options->allocator, options, NULL, NULL); if (proxy_user_data == NULL) { return AWS_OP_ERR; } AWS_FATAL_ASSERT(options->proxy_options != NULL); /* Fill in a new connection options pointing at the proxy */ struct aws_http_client_connection_options options_copy = *options; options_copy.proxy_options = NULL; options_copy.host_name = options->proxy_options->host; options_copy.port = options->proxy_options->port; options_copy.user_data = proxy_user_data; options_copy.on_setup = s_aws_http_on_client_connection_http_forwarding_proxy_setup_fn; options_copy.on_shutdown = s_aws_http_on_client_connection_http_proxy_shutdown_fn; options_copy.tls_options = options->proxy_options->tls_options; options_copy.requested_event_loop = options->requested_event_loop; options_copy.host_resolution_config = options->host_resolution_config; options_copy.prior_knowledge_http2 = false; /* ToDo, expose the protocol specific config for proxy connection. */ int result = aws_http_client_connect_internal(&options_copy, s_proxy_http_request_transform); if (result == AWS_OP_ERR) { AWS_LOGF_ERROR( AWS_LS_HTTP_CONNECTION, "(STATIC) Proxy http connection failed client connect with error %d(%s)", aws_last_error(), aws_error_str(aws_last_error())); aws_http_proxy_user_data_destroy(proxy_user_data); } return result; } static int s_create_tunneling_connection(struct aws_http_proxy_user_data *user_data) { struct aws_http_client_connection_options connect_options; AWS_ZERO_STRUCT(connect_options); connect_options.self_size = sizeof(struct aws_http_client_connection_options); connect_options.allocator = user_data->allocator; connect_options.bootstrap = user_data->original_bootstrap; connect_options.host_name = aws_byte_cursor_from_buf(&user_data->proxy_config->host); connect_options.port = user_data->proxy_config->port; connect_options.socket_options = &user_data->original_socket_options; connect_options.tls_options = user_data->proxy_config->tls_options; connect_options.monitoring_options = NULL; /* ToDo */ connect_options.manual_window_management = user_data->original_manual_window_management; connect_options.initial_window_size = user_data->original_initial_window_size; connect_options.user_data = user_data; connect_options.on_setup = s_aws_http_on_client_connection_http_tunneling_proxy_setup_fn; connect_options.on_shutdown = s_aws_http_on_client_connection_http_proxy_shutdown_fn; connect_options.http1_options = NULL; /* ToDo, expose the protocol specific config for proxy connection. */ connect_options.http2_options = NULL; /* ToDo */ connect_options.requested_event_loop = user_data->requested_event_loop; connect_options.host_resolution_config = user_data->host_resolution_config; int result = aws_http_client_connect(&connect_options); if (result == AWS_OP_ERR) { AWS_LOGF_ERROR( AWS_LS_HTTP_CONNECTION, "(STATIC) Proxy tunnel connection failed client connect with error %d(%s)", aws_last_error(), aws_error_str(aws_last_error())); aws_http_proxy_user_data_destroy(user_data); } return result; } /* * Top-level function to route a connection through a proxy server via a CONNECT request */ static int s_aws_http_client_connect_via_tunneling_proxy( const struct aws_http_client_connection_options *options, aws_client_bootstrap_on_channel_event_fn *on_channel_setup, aws_client_bootstrap_on_channel_event_fn *on_channel_shutdown) { AWS_FATAL_ASSERT(options->proxy_options != NULL); AWS_LOGF_INFO( AWS_LS_HTTP_CONNECTION, "(STATIC) Connecting to \"" PRInSTR "\" through a tunnel via proxy \"" PRInSTR "\"", AWS_BYTE_CURSOR_PRI(options->host_name), AWS_BYTE_CURSOR_PRI(options->proxy_options->host)); /* Create a wrapper user data that contains all proxy-related information, state, and user-facing callbacks */ struct aws_http_proxy_user_data *user_data = aws_http_proxy_user_data_new(options->allocator, options, on_channel_setup, on_channel_shutdown); if (user_data == NULL) { return AWS_OP_ERR; } return s_create_tunneling_connection(user_data); } static enum aws_http_proxy_connection_type s_determine_proxy_connection_type( enum aws_http_proxy_connection_type proxy_connection_type, bool is_tls_connection) { if (proxy_connection_type != AWS_HPCT_HTTP_LEGACY) { return proxy_connection_type; } if (is_tls_connection) { return AWS_HPCT_HTTP_TUNNEL; } else { return AWS_HPCT_HTTP_FORWARD; } } static struct aws_string *s_get_proxy_environment_value( struct aws_allocator *allocator, const struct aws_string *env_name) { struct aws_string *out_string = NULL; if (aws_get_environment_value(allocator, env_name, &out_string) == AWS_OP_SUCCESS && out_string != NULL && out_string->len > 0) { AWS_LOGF_DEBUG( AWS_LS_HTTP_CONNECTION, "%s environment found, %s", aws_string_c_str(env_name), aws_string_c_str(out_string)); return out_string; } aws_string_destroy(out_string); return NULL; } static int s_proxy_uri_init_from_env_variable( struct aws_allocator *allocator, const struct aws_http_client_connection_options *options, struct aws_uri *proxy_uri, bool *found) { struct aws_string *proxy_uri_string = NULL; *found = false; if (options->tls_options) { proxy_uri_string = s_get_proxy_environment_value(allocator, s_https_proxy_env_var_low); if (proxy_uri_string == NULL) { proxy_uri_string = s_get_proxy_environment_value(allocator, s_https_proxy_env_var); } if (proxy_uri_string == NULL) { return AWS_OP_SUCCESS; } } else { proxy_uri_string = s_get_proxy_environment_value(allocator, s_http_proxy_env_var_low); if (proxy_uri_string == NULL) { proxy_uri_string = s_get_proxy_environment_value(allocator, s_http_proxy_env_var); } if (proxy_uri_string == NULL) { return AWS_OP_SUCCESS; } } struct aws_byte_cursor proxy_uri_cursor = aws_byte_cursor_from_string(proxy_uri_string); if (aws_uri_init_parse(proxy_uri, allocator, &proxy_uri_cursor)) { AWS_LOGF_ERROR(AWS_LS_HTTP_CONNECTION, "Could not parse found proxy URI."); aws_string_destroy(proxy_uri_string); return AWS_OP_ERR; } *found = true; aws_string_destroy(proxy_uri_string); return AWS_OP_SUCCESS; } static int s_connect_proxy(const struct aws_http_client_connection_options *options) { if (aws_http_options_validate_proxy_configuration(options)) { return AWS_OP_ERR; } enum aws_http_proxy_connection_type proxy_connection_type = s_determine_proxy_connection_type(options->proxy_options->connection_type, options->tls_options != NULL); switch (proxy_connection_type) { case AWS_HPCT_HTTP_FORWARD: return s_aws_http_client_connect_via_forwarding_proxy(options); case AWS_HPCT_HTTP_TUNNEL: return s_aws_http_client_connect_via_tunneling_proxy(options, NULL, NULL); default: return aws_raise_error(AWS_ERROR_UNIMPLEMENTED); } } static int s_setup_proxy_tls_env_variable( const struct aws_http_client_connection_options *options, struct aws_tls_connection_options *default_tls_connection_options, struct aws_http_proxy_options *proxy_options, struct aws_uri *proxy_uri) { (void)default_tls_connection_options; (void)proxy_uri; if (options->proxy_ev_settings->tls_options) { proxy_options->tls_options = options->proxy_ev_settings->tls_options; } else { #ifdef BYO_CRYPTO AWS_LOGF_ERROR( AWS_LS_HTTP_CONNECTION, "Failed making default TLS context because of BYO_CRYPTO, set up the tls_options for proxy_env_settings to " "make it work."); return aws_raise_error(AWS_ERROR_UNIMPLEMENTED); #else struct aws_tls_ctx *tls_ctx = NULL; struct aws_tls_ctx_options tls_ctx_options; AWS_ZERO_STRUCT(tls_ctx_options); /* create a default tls options */ aws_tls_ctx_options_init_default_client(&tls_ctx_options, options->allocator); struct aws_string *proxy_no_verify_peer_string = NULL; if (aws_get_environment_value( options->allocator, s_proxy_no_verify_peer_env_var, &proxy_no_verify_peer_string) == AWS_OP_SUCCESS && proxy_no_verify_peer_string != NULL) { /* turn off the peer verification, if setup from envrionment variable. Mostly for testing. */ aws_tls_ctx_options_set_verify_peer(&tls_ctx_options, false); aws_string_destroy(proxy_no_verify_peer_string); } tls_ctx = aws_tls_client_ctx_new(options->allocator, &tls_ctx_options); aws_tls_ctx_options_clean_up(&tls_ctx_options); if (!tls_ctx) { AWS_LOGF_ERROR(AWS_LS_HTTP_CONNECTION, "Failed to create default TLS context."); return AWS_OP_ERR; } aws_tls_connection_options_init_from_ctx(default_tls_connection_options, tls_ctx); /* tls options hold a ref to the ctx */ aws_tls_ctx_release(tls_ctx); if (aws_tls_connection_options_set_server_name( default_tls_connection_options, options->allocator, &proxy_uri->host_name)) { AWS_LOGF_ERROR(AWS_LS_HTTP_CONNECTION, "Failed set server name for TLS connection options."); return AWS_OP_ERR; } proxy_options->tls_options = default_tls_connection_options; #endif } return AWS_OP_SUCCESS; } static int s_connect_proxy_via_env_variable(const struct aws_http_client_connection_options *options) { struct aws_http_proxy_options proxy_options; AWS_ZERO_STRUCT(proxy_options); struct aws_uri proxy_uri; AWS_ZERO_STRUCT(proxy_uri); struct aws_tls_connection_options default_tls_connection_options; AWS_ZERO_STRUCT(default_tls_connection_options); bool found = false; bool success = false; if (s_proxy_uri_init_from_env_variable(options->allocator, options, &proxy_uri, &found)) { /* Envrionment is set but failed to parse it */ goto done; } if (found) { proxy_options.host = proxy_uri.host_name; proxy_options.port = proxy_uri.port; proxy_options.connection_type = options->proxy_ev_settings->connection_type; if (proxy_options.connection_type == AWS_HPCT_HTTP_LEGACY) { if (options->tls_options) { /* Use tunneling when main connection use TLS. */ proxy_options.connection_type = AWS_HPCT_HTTP_TUNNEL; } else { /* Use forwarding proxy when main connection use clear text. */ proxy_options.connection_type = AWS_HPCT_HTTP_FORWARD; } } if (aws_byte_cursor_eq_ignore_case(&proxy_uri.scheme, &aws_http_scheme_https)) { if (s_setup_proxy_tls_env_variable(options, &default_tls_connection_options, &proxy_options, &proxy_uri)) { goto done; } } /* Support basic authentication. */ if (proxy_uri.password.len) { /* Has no empty password set */ struct aws_http_proxy_strategy_basic_auth_options config = { .proxy_connection_type = proxy_options.connection_type, .user_name = proxy_uri.user, .password = proxy_uri.password, }; proxy_options.proxy_strategy = aws_http_proxy_strategy_new_basic_auth(options->allocator, &config); } } else { success = true; goto done; } struct aws_http_client_connection_options copied_options = *options; copied_options.proxy_options = &proxy_options; if (s_connect_proxy(&copied_options)) { goto done; } success = true; done: aws_tls_connection_options_clean_up(&default_tls_connection_options); aws_http_proxy_strategy_release(proxy_options.proxy_strategy); aws_uri_clean_up(&proxy_uri); if (success && !found) { /* Successfully, but no envrionment variable found. Connect without proxy */ return aws_http_client_connect_internal(options, NULL); } return success ? AWS_OP_SUCCESS : AWS_OP_ERR; } /* * Dispatches a proxy-enabled connection request to the appropriate top-level connection function */ int aws_http_client_connect_via_proxy(const struct aws_http_client_connection_options *options) { if (options->proxy_options == NULL && options->proxy_ev_settings && options->proxy_ev_settings->env_var_type == AWS_HPEV_ENABLE) { return s_connect_proxy_via_env_variable(options); } return s_connect_proxy(options); } static struct aws_http_proxy_config *s_aws_http_proxy_config_new( struct aws_allocator *allocator, const struct aws_http_proxy_options *proxy_options, enum aws_http_proxy_connection_type override_proxy_connection_type) { AWS_FATAL_ASSERT(proxy_options != NULL); struct aws_http_proxy_config *config = aws_mem_calloc(allocator, 1, sizeof(struct aws_http_proxy_config)); if (config == NULL) { return NULL; } config->allocator = allocator; config->connection_type = override_proxy_connection_type; if (aws_byte_buf_init_copy_from_cursor(&config->host, allocator, proxy_options->host)) { goto on_error; } if (proxy_options->tls_options) { config->tls_options = aws_mem_calloc(allocator, 1, sizeof(struct aws_tls_connection_options)); if (aws_tls_connection_options_copy(config->tls_options, proxy_options->tls_options)) { goto on_error; } } config->port = proxy_options->port; if (proxy_options->proxy_strategy != NULL) { config->proxy_strategy = aws_http_proxy_strategy_acquire(proxy_options->proxy_strategy); } else if (proxy_options->auth_type == AWS_HPAT_BASIC) { struct aws_http_proxy_strategy_basic_auth_options basic_config; AWS_ZERO_STRUCT(basic_config); basic_config.proxy_connection_type = override_proxy_connection_type; basic_config.user_name = proxy_options->auth_username; basic_config.password = proxy_options->auth_password; config->proxy_strategy = aws_http_proxy_strategy_new_basic_auth(allocator, &basic_config); } if (config->proxy_strategy == NULL) { switch (override_proxy_connection_type) { case AWS_HPCT_HTTP_FORWARD: config->proxy_strategy = aws_http_proxy_strategy_new_forwarding_identity(allocator); break; case AWS_HPCT_HTTP_TUNNEL: config->proxy_strategy = aws_http_proxy_strategy_new_tunneling_one_time_identity(allocator); break; default: break; } if (config->proxy_strategy == NULL) { goto on_error; } } return config; on_error: aws_http_proxy_config_destroy(config); return NULL; } struct aws_http_proxy_config *aws_http_proxy_config_new_from_connection_options( struct aws_allocator *allocator, const struct aws_http_client_connection_options *options) { AWS_FATAL_ASSERT(options != NULL); AWS_FATAL_ASSERT(options->proxy_options != NULL); return s_aws_http_proxy_config_new( allocator, options->proxy_options, s_determine_proxy_connection_type(options->proxy_options->connection_type, options->tls_options != NULL)); } struct aws_http_proxy_config *aws_http_proxy_config_new_from_manager_options( struct aws_allocator *allocator, const struct aws_http_connection_manager_options *options) { AWS_FATAL_ASSERT(options != NULL); AWS_FATAL_ASSERT(options->proxy_options != NULL); return s_aws_http_proxy_config_new( allocator, options->proxy_options, s_determine_proxy_connection_type( options->proxy_options->connection_type, options->tls_connection_options != NULL)); } struct aws_http_proxy_config *aws_http_proxy_config_new_tunneling_from_proxy_options( struct aws_allocator *allocator, const struct aws_http_proxy_options *proxy_options) { return s_aws_http_proxy_config_new(allocator, proxy_options, AWS_HPCT_HTTP_TUNNEL); } struct aws_http_proxy_config *aws_http_proxy_config_new_from_proxy_options( struct aws_allocator *allocator, const struct aws_http_proxy_options *proxy_options) { if (proxy_options->connection_type == AWS_HPCT_HTTP_LEGACY) { AWS_LOGF_ERROR(AWS_LS_HTTP_PROXY_NEGOTIATION, "LEGACY type is not supported to create proxy config"); return NULL; } return s_aws_http_proxy_config_new(allocator, proxy_options, proxy_options->connection_type); } struct aws_http_proxy_config *aws_http_proxy_config_new_from_proxy_options_with_tls_info( struct aws_allocator *allocator, const struct aws_http_proxy_options *proxy_options, bool is_tls_connection) { AWS_FATAL_ASSERT(proxy_options != NULL); return s_aws_http_proxy_config_new( allocator, proxy_options, s_determine_proxy_connection_type(proxy_options->connection_type, is_tls_connection)); } struct aws_http_proxy_config *aws_http_proxy_config_new_clone( struct aws_allocator *allocator, const struct aws_http_proxy_config *proxy_config) { AWS_FATAL_ASSERT(proxy_config != NULL); struct aws_http_proxy_config *config = aws_mem_calloc(allocator, 1, sizeof(struct aws_http_proxy_config)); if (config == NULL) { return NULL; } config->connection_type = proxy_config->connection_type; if (aws_byte_buf_init_copy_from_cursor(&config->host, allocator, aws_byte_cursor_from_buf(&proxy_config->host))) { goto on_error; } if (proxy_config->tls_options) { config->tls_options = aws_mem_calloc(allocator, 1, sizeof(struct aws_tls_connection_options)); if (aws_tls_connection_options_copy(config->tls_options, proxy_config->tls_options)) { goto on_error; } } config->allocator = allocator; config->port = proxy_config->port; config->proxy_strategy = aws_http_proxy_strategy_acquire(proxy_config->proxy_strategy); return config; on_error: aws_http_proxy_config_destroy(config); return NULL; } void aws_http_proxy_config_destroy(struct aws_http_proxy_config *config) { if (config == NULL) { return; } aws_byte_buf_clean_up(&config->host); if (config->tls_options) { aws_tls_connection_options_clean_up(config->tls_options); aws_mem_release(config->allocator, config->tls_options); } aws_http_proxy_strategy_release(config->proxy_strategy); aws_mem_release(config->allocator, config); } void aws_http_proxy_options_init_from_config( struct aws_http_proxy_options *options, const struct aws_http_proxy_config *config) { AWS_FATAL_ASSERT(options && config); options->connection_type = config->connection_type; options->host = aws_byte_cursor_from_buf(&config->host); options->port = config->port; options->tls_options = config->tls_options; options->proxy_strategy = config->proxy_strategy; } int aws_http_options_validate_proxy_configuration(const struct aws_http_client_connection_options *options) { if (options == NULL || options->proxy_options == NULL) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } enum aws_http_proxy_connection_type proxy_type = options->proxy_options->connection_type; if (proxy_type == AWS_HPCT_HTTP_FORWARD && options->tls_options != NULL) { return aws_raise_error(AWS_ERROR_INVALID_STATE); } struct aws_http_proxy_strategy *proxy_strategy = options->proxy_options->proxy_strategy; if (proxy_strategy != NULL) { if (proxy_strategy->proxy_connection_type != proxy_type) { return aws_raise_error(AWS_ERROR_INVALID_STATE); } } return AWS_OP_SUCCESS; } struct aws_proxied_socket_channel_user_data { struct aws_allocator *allocator; struct aws_client_bootstrap *bootstrap; struct aws_channel *channel; aws_client_bootstrap_on_channel_event_fn *original_setup_callback; aws_client_bootstrap_on_channel_event_fn *original_shutdown_callback; void *original_user_data; }; static void s_proxied_socket_channel_user_data_destroy(struct aws_proxied_socket_channel_user_data *user_data) { if (user_data == NULL) { return; } aws_client_bootstrap_release(user_data->bootstrap); aws_mem_release(user_data->allocator, user_data); } static struct aws_proxied_socket_channel_user_data *s_proxied_socket_channel_user_data_new( struct aws_allocator *allocator, struct aws_socket_channel_bootstrap_options *channel_options) { struct aws_proxied_socket_channel_user_data *user_data = aws_mem_calloc(allocator, 1, sizeof(struct aws_proxied_socket_channel_user_data)); if (user_data == NULL) { return NULL; } user_data->allocator = allocator; user_data->original_setup_callback = channel_options->setup_callback; user_data->original_shutdown_callback = channel_options->shutdown_callback; user_data->original_user_data = channel_options->user_data; user_data->bootstrap = aws_client_bootstrap_acquire(channel_options->bootstrap); return user_data; } static void s_http_proxied_socket_channel_setup( struct aws_client_bootstrap *bootstrap, int error_code, struct aws_channel *channel, void *user_data) { (void)bootstrap; struct aws_proxied_socket_channel_user_data *proxied_user_data = user_data; if (error_code != AWS_ERROR_SUCCESS || channel == NULL) { proxied_user_data->original_setup_callback( proxied_user_data->bootstrap, error_code, NULL, proxied_user_data->original_user_data); s_proxied_socket_channel_user_data_destroy(proxied_user_data); return; } proxied_user_data->channel = channel; proxied_user_data->original_setup_callback( proxied_user_data->bootstrap, AWS_ERROR_SUCCESS, proxied_user_data->channel, proxied_user_data->original_user_data); } static void s_http_proxied_socket_channel_shutdown( struct aws_client_bootstrap *bootstrap, int error_code, struct aws_channel *channel, void *user_data) { (void)bootstrap; (void)channel; struct aws_proxied_socket_channel_user_data *proxied_user_data = user_data; proxied_user_data->original_shutdown_callback( proxied_user_data->bootstrap, error_code, proxied_user_data->channel, proxied_user_data->original_user_data); s_proxied_socket_channel_user_data_destroy(proxied_user_data); } int aws_http_proxy_new_socket_channel( struct aws_socket_channel_bootstrap_options *channel_options, const struct aws_http_proxy_options *proxy_options) { AWS_FATAL_ASSERT(channel_options != NULL && channel_options->bootstrap != NULL); AWS_FATAL_ASSERT(proxy_options != NULL); if (proxy_options->connection_type != AWS_HPCT_HTTP_TUNNEL) { AWS_LOGF_ERROR( AWS_LS_HTTP_PROXY_NEGOTIATION, "Creating a raw protocol channel through an http proxy requires a tunneling proxy " "configuration"); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } if (channel_options->tls_options == NULL) { AWS_LOGF_ERROR( AWS_LS_HTTP_PROXY_NEGOTIATION, "Creating a raw protocol channel through an http proxy requires tls to the endpoint"); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } struct aws_allocator *allocator = channel_options->bootstrap->allocator; struct aws_proxied_socket_channel_user_data *user_data = s_proxied_socket_channel_user_data_new(allocator, channel_options); struct aws_http_client_connection_options http_connection_options = AWS_HTTP_CLIENT_CONNECTION_OPTIONS_INIT; http_connection_options.allocator = allocator; http_connection_options.bootstrap = channel_options->bootstrap; http_connection_options.host_name = aws_byte_cursor_from_c_str(channel_options->host_name); http_connection_options.port = channel_options->port; http_connection_options.socket_options = channel_options->socket_options; http_connection_options.tls_options = channel_options->tls_options; http_connection_options.proxy_options = proxy_options; http_connection_options.user_data = user_data; http_connection_options.on_setup = NULL; /* use channel callbacks, not http callbacks */ http_connection_options.on_shutdown = NULL; /* use channel callbacks, not http callbacks */ http_connection_options.requested_event_loop = channel_options->requested_event_loop; http_connection_options.host_resolution_config = channel_options->host_resolution_override_config; if (s_aws_http_client_connect_via_tunneling_proxy( &http_connection_options, s_http_proxied_socket_channel_setup, s_http_proxied_socket_channel_shutdown)) { goto on_error; } return AWS_OP_SUCCESS; on_error: s_proxied_socket_channel_user_data_destroy(user_data); return AWS_OP_ERR; } aws-crt-python-0.20.4+dfsg/crt/aws-c-http/source/proxy_strategy.c000066400000000000000000001747411456575232400247740ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #if defined(_MSC_VER) # pragma warning(push) # pragma warning(disable : 4221) #endif /* _MSC_VER */ struct aws_http_proxy_negotiator *aws_http_proxy_negotiator_acquire( struct aws_http_proxy_negotiator *proxy_negotiator) { if (proxy_negotiator != NULL) { aws_ref_count_acquire(&proxy_negotiator->ref_count); } return proxy_negotiator; } void aws_http_proxy_negotiator_release(struct aws_http_proxy_negotiator *proxy_negotiator) { if (proxy_negotiator != NULL) { aws_ref_count_release(&proxy_negotiator->ref_count); } } struct aws_http_proxy_negotiator *aws_http_proxy_strategy_create_negotiator( struct aws_http_proxy_strategy *strategy, struct aws_allocator *allocator) { if (strategy == NULL || allocator == NULL) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } return strategy->vtable->create_negotiator(strategy, allocator); } enum aws_http_proxy_negotiation_retry_directive aws_http_proxy_negotiator_get_retry_directive( struct aws_http_proxy_negotiator *proxy_negotiator) { if (proxy_negotiator != NULL) { if (proxy_negotiator->strategy_vtable.tunnelling_vtable->get_retry_directive != NULL) { return proxy_negotiator->strategy_vtable.tunnelling_vtable->get_retry_directive(proxy_negotiator); } } return AWS_HPNRD_STOP; } struct aws_http_proxy_strategy *aws_http_proxy_strategy_acquire(struct aws_http_proxy_strategy *proxy_strategy) { if (proxy_strategy != NULL) { aws_ref_count_acquire(&proxy_strategy->ref_count); } return proxy_strategy; } void aws_http_proxy_strategy_release(struct aws_http_proxy_strategy *proxy_strategy) { if (proxy_strategy != NULL) { aws_ref_count_release(&proxy_strategy->ref_count); } } /*****************************************************************************************************************/ enum proxy_negotiator_connect_state { AWS_PNCS_READY, AWS_PNCS_IN_PROGRESS, AWS_PNCS_SUCCESS, AWS_PNCS_FAILURE, }; /* Functions for basic auth strategy */ struct aws_http_proxy_strategy_basic_auth { struct aws_allocator *allocator; struct aws_string *user_name; struct aws_string *password; struct aws_http_proxy_strategy strategy_base; }; static void s_destroy_basic_auth_strategy(struct aws_http_proxy_strategy *proxy_strategy) { struct aws_http_proxy_strategy_basic_auth *basic_auth_strategy = proxy_strategy->impl; aws_string_destroy(basic_auth_strategy->user_name); aws_string_destroy(basic_auth_strategy->password); aws_mem_release(basic_auth_strategy->allocator, basic_auth_strategy); } struct aws_http_proxy_negotiator_basic_auth { struct aws_allocator *allocator; struct aws_http_proxy_strategy *strategy; enum proxy_negotiator_connect_state connect_state; struct aws_http_proxy_negotiator negotiator_base; }; static void s_destroy_basic_auth_negotiator(struct aws_http_proxy_negotiator *proxy_negotiator) { struct aws_http_proxy_negotiator_basic_auth *basic_auth_negotiator = proxy_negotiator->impl; aws_http_proxy_strategy_release(basic_auth_negotiator->strategy); aws_mem_release(basic_auth_negotiator->allocator, basic_auth_negotiator); } AWS_STATIC_STRING_FROM_LITERAL(s_proxy_authorization_header_name, "Proxy-Authorization"); AWS_STATIC_STRING_FROM_LITERAL(s_proxy_authorization_header_basic_prefix, "Basic "); /* * Adds a proxy authentication header based on the basic authentication mode, rfc7617 */ static int s_add_basic_proxy_authentication_header( struct aws_allocator *allocator, struct aws_http_message *request, struct aws_http_proxy_negotiator_basic_auth *basic_auth_negotiator) { struct aws_byte_buf base64_input_value; AWS_ZERO_STRUCT(base64_input_value); struct aws_byte_buf header_value; AWS_ZERO_STRUCT(header_value); int result = AWS_OP_ERR; struct aws_http_proxy_strategy_basic_auth *basic_auth_strategy = basic_auth_negotiator->strategy->impl; if (aws_byte_buf_init( &base64_input_value, allocator, basic_auth_strategy->user_name->len + basic_auth_strategy->password->len + 1)) { goto done; } /* First build a buffer with "username:password" in it */ struct aws_byte_cursor username_cursor = aws_byte_cursor_from_string(basic_auth_strategy->user_name); if (aws_byte_buf_append(&base64_input_value, &username_cursor)) { goto done; } struct aws_byte_cursor colon_cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(":"); if (aws_byte_buf_append(&base64_input_value, &colon_cursor)) { goto done; } struct aws_byte_cursor password_cursor = aws_byte_cursor_from_string(basic_auth_strategy->password); if (aws_byte_buf_append(&base64_input_value, &password_cursor)) { goto done; } struct aws_byte_cursor base64_source_cursor = aws_byte_cursor_from_array(base64_input_value.buffer, base64_input_value.len); /* Figure out how much room we need in our final header value buffer */ size_t required_size = 0; if (aws_base64_compute_encoded_len(base64_source_cursor.len, &required_size)) { goto done; } required_size += s_proxy_authorization_header_basic_prefix->len + 1; if (aws_byte_buf_init(&header_value, allocator, required_size)) { goto done; } /* Build the final header value by appending the authorization type and the base64 encoding string together */ struct aws_byte_cursor basic_prefix = aws_byte_cursor_from_string(s_proxy_authorization_header_basic_prefix); if (aws_byte_buf_append_dynamic(&header_value, &basic_prefix)) { goto done; } if (aws_base64_encode(&base64_source_cursor, &header_value)) { goto done; } struct aws_http_header header = { .name = aws_byte_cursor_from_string(s_proxy_authorization_header_name), .value = aws_byte_cursor_from_array(header_value.buffer, header_value.len), }; if (aws_http_message_add_header(request, header)) { goto done; } result = AWS_OP_SUCCESS; done: aws_byte_buf_clean_up(&header_value); aws_byte_buf_clean_up(&base64_input_value); return result; } int s_basic_auth_forward_add_header( struct aws_http_proxy_negotiator *proxy_negotiator, struct aws_http_message *message) { struct aws_http_proxy_negotiator_basic_auth *basic_auth_negotiator = proxy_negotiator->impl; return s_add_basic_proxy_authentication_header(basic_auth_negotiator->allocator, message, basic_auth_negotiator); } void s_basic_auth_tunnel_add_header( struct aws_http_proxy_negotiator *proxy_negotiator, struct aws_http_message *message, aws_http_proxy_negotiation_terminate_fn *negotiation_termination_callback, aws_http_proxy_negotiation_http_request_forward_fn *negotiation_http_request_forward_callback, void *internal_proxy_user_data) { struct aws_http_proxy_negotiator_basic_auth *basic_auth_negotiator = proxy_negotiator->impl; if (basic_auth_negotiator->connect_state != AWS_PNCS_READY) { negotiation_termination_callback(message, AWS_ERROR_HTTP_PROXY_CONNECT_FAILED, internal_proxy_user_data); return; } basic_auth_negotiator->connect_state = AWS_PNCS_IN_PROGRESS; if (s_add_basic_proxy_authentication_header(basic_auth_negotiator->allocator, message, basic_auth_negotiator)) { negotiation_termination_callback(message, aws_last_error(), internal_proxy_user_data); return; } negotiation_http_request_forward_callback(message, internal_proxy_user_data); } static int s_basic_auth_on_connect_status( struct aws_http_proxy_negotiator *proxy_negotiator, enum aws_http_status_code status_code) { struct aws_http_proxy_negotiator_basic_auth *basic_auth_negotiator = proxy_negotiator->impl; if (basic_auth_negotiator->connect_state == AWS_PNCS_IN_PROGRESS) { if (AWS_HTTP_STATUS_CODE_200_OK != status_code) { basic_auth_negotiator->connect_state = AWS_PNCS_FAILURE; } else { basic_auth_negotiator->connect_state = AWS_PNCS_SUCCESS; } } return AWS_OP_SUCCESS; } static struct aws_http_proxy_negotiator_forwarding_vtable s_basic_auth_proxy_negotiator_forwarding_vtable = { .forward_request_transform = s_basic_auth_forward_add_header, }; static struct aws_http_proxy_negotiator_tunnelling_vtable s_basic_auth_proxy_negotiator_tunneling_vtable = { .on_status_callback = s_basic_auth_on_connect_status, .connect_request_transform = s_basic_auth_tunnel_add_header, }; static struct aws_http_proxy_negotiator *s_create_basic_auth_negotiator( struct aws_http_proxy_strategy *proxy_strategy, struct aws_allocator *allocator) { if (proxy_strategy == NULL || allocator == NULL) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } struct aws_http_proxy_negotiator_basic_auth *basic_auth_negotiator = aws_mem_calloc(allocator, 1, sizeof(struct aws_http_proxy_negotiator_basic_auth)); if (basic_auth_negotiator == NULL) { return NULL; } basic_auth_negotiator->allocator = allocator; basic_auth_negotiator->connect_state = AWS_PNCS_READY; basic_auth_negotiator->negotiator_base.impl = basic_auth_negotiator; aws_ref_count_init( &basic_auth_negotiator->negotiator_base.ref_count, &basic_auth_negotiator->negotiator_base, (aws_simple_completion_callback *)s_destroy_basic_auth_negotiator); if (proxy_strategy->proxy_connection_type == AWS_HPCT_HTTP_FORWARD) { basic_auth_negotiator->negotiator_base.strategy_vtable.forwarding_vtable = &s_basic_auth_proxy_negotiator_forwarding_vtable; } else { basic_auth_negotiator->negotiator_base.strategy_vtable.tunnelling_vtable = &s_basic_auth_proxy_negotiator_tunneling_vtable; } basic_auth_negotiator->strategy = aws_http_proxy_strategy_acquire(proxy_strategy); return &basic_auth_negotiator->negotiator_base; } static struct aws_http_proxy_strategy_vtable s_basic_auth_proxy_strategy_vtable = { .create_negotiator = s_create_basic_auth_negotiator, }; struct aws_http_proxy_strategy *aws_http_proxy_strategy_new_basic_auth( struct aws_allocator *allocator, struct aws_http_proxy_strategy_basic_auth_options *config) { if (config == NULL || allocator == NULL) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } if (config->proxy_connection_type != AWS_HPCT_HTTP_FORWARD && config->proxy_connection_type != AWS_HPCT_HTTP_TUNNEL) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } struct aws_http_proxy_strategy_basic_auth *basic_auth_strategy = aws_mem_calloc(allocator, 1, sizeof(struct aws_http_proxy_strategy_basic_auth)); if (basic_auth_strategy == NULL) { return NULL; } basic_auth_strategy->strategy_base.impl = basic_auth_strategy; basic_auth_strategy->strategy_base.vtable = &s_basic_auth_proxy_strategy_vtable; basic_auth_strategy->allocator = allocator; basic_auth_strategy->strategy_base.proxy_connection_type = config->proxy_connection_type; aws_ref_count_init( &basic_auth_strategy->strategy_base.ref_count, &basic_auth_strategy->strategy_base, (aws_simple_completion_callback *)s_destroy_basic_auth_strategy); basic_auth_strategy->user_name = aws_string_new_from_cursor(allocator, &config->user_name); if (basic_auth_strategy->user_name == NULL) { goto on_error; } basic_auth_strategy->password = aws_string_new_from_cursor(allocator, &config->password); if (basic_auth_strategy->password == NULL) { goto on_error; } return &basic_auth_strategy->strategy_base; on_error: aws_http_proxy_strategy_release(&basic_auth_strategy->strategy_base); return NULL; } /*****************************************************************************************************************/ struct aws_http_proxy_strategy_one_time_identity { struct aws_allocator *allocator; struct aws_http_proxy_strategy strategy_base; }; struct aws_http_proxy_negotiator_one_time_identity { struct aws_allocator *allocator; enum proxy_negotiator_connect_state connect_state; struct aws_http_proxy_negotiator negotiator_base; }; static void s_destroy_one_time_identity_negotiator(struct aws_http_proxy_negotiator *proxy_negotiator) { struct aws_http_proxy_negotiator_one_time_identity *identity_negotiator = proxy_negotiator->impl; aws_mem_release(identity_negotiator->allocator, identity_negotiator); } void s_one_time_identity_connect_transform( struct aws_http_proxy_negotiator *proxy_negotiator, struct aws_http_message *message, aws_http_proxy_negotiation_terminate_fn *negotiation_termination_callback, aws_http_proxy_negotiation_http_request_forward_fn *negotiation_http_request_forward_callback, void *internal_proxy_user_data) { struct aws_http_proxy_negotiator_one_time_identity *one_time_identity_negotiator = proxy_negotiator->impl; if (one_time_identity_negotiator->connect_state != AWS_PNCS_READY) { negotiation_termination_callback(message, AWS_ERROR_HTTP_PROXY_CONNECT_FAILED, internal_proxy_user_data); return; } one_time_identity_negotiator->connect_state = AWS_PNCS_IN_PROGRESS; negotiation_http_request_forward_callback(message, internal_proxy_user_data); } static int s_one_time_identity_on_connect_status( struct aws_http_proxy_negotiator *proxy_negotiator, enum aws_http_status_code status_code) { struct aws_http_proxy_negotiator_one_time_identity *one_time_identity_negotiator = proxy_negotiator->impl; if (one_time_identity_negotiator->connect_state == AWS_PNCS_IN_PROGRESS) { if (AWS_HTTP_STATUS_CODE_200_OK != status_code) { one_time_identity_negotiator->connect_state = AWS_PNCS_FAILURE; } else { one_time_identity_negotiator->connect_state = AWS_PNCS_SUCCESS; } } return AWS_OP_SUCCESS; } static struct aws_http_proxy_negotiator_tunnelling_vtable s_one_time_identity_proxy_negotiator_tunneling_vtable = { .on_status_callback = s_one_time_identity_on_connect_status, .connect_request_transform = s_one_time_identity_connect_transform, }; static struct aws_http_proxy_negotiator *s_create_one_time_identity_negotiator( struct aws_http_proxy_strategy *proxy_strategy, struct aws_allocator *allocator) { if (proxy_strategy == NULL || allocator == NULL) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } struct aws_http_proxy_negotiator_one_time_identity *identity_negotiator = aws_mem_calloc(allocator, 1, sizeof(struct aws_http_proxy_negotiator_one_time_identity)); if (identity_negotiator == NULL) { return NULL; } identity_negotiator->allocator = allocator; identity_negotiator->connect_state = AWS_PNCS_READY; identity_negotiator->negotiator_base.impl = identity_negotiator; aws_ref_count_init( &identity_negotiator->negotiator_base.ref_count, &identity_negotiator->negotiator_base, (aws_simple_completion_callback *)s_destroy_one_time_identity_negotiator); identity_negotiator->negotiator_base.strategy_vtable.tunnelling_vtable = &s_one_time_identity_proxy_negotiator_tunneling_vtable; return &identity_negotiator->negotiator_base; } static struct aws_http_proxy_strategy_vtable s_one_time_identity_proxy_strategy_vtable = { .create_negotiator = s_create_one_time_identity_negotiator, }; static void s_destroy_one_time_identity_strategy(struct aws_http_proxy_strategy *proxy_strategy) { struct aws_http_proxy_strategy_one_time_identity *identity_strategy = proxy_strategy->impl; aws_mem_release(identity_strategy->allocator, identity_strategy); } struct aws_http_proxy_strategy *aws_http_proxy_strategy_new_tunneling_one_time_identity( struct aws_allocator *allocator) { if (allocator == NULL) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } struct aws_http_proxy_strategy_one_time_identity *identity_strategy = aws_mem_calloc(allocator, 1, sizeof(struct aws_http_proxy_strategy_one_time_identity)); if (identity_strategy == NULL) { return NULL; } identity_strategy->strategy_base.impl = identity_strategy; identity_strategy->strategy_base.vtable = &s_one_time_identity_proxy_strategy_vtable; identity_strategy->strategy_base.proxy_connection_type = AWS_HPCT_HTTP_TUNNEL; identity_strategy->allocator = allocator; aws_ref_count_init( &identity_strategy->strategy_base.ref_count, &identity_strategy->strategy_base, (aws_simple_completion_callback *)s_destroy_one_time_identity_strategy); return &identity_strategy->strategy_base; } /******************************************************************************************************************/ struct aws_http_proxy_strategy_forwarding_identity { struct aws_allocator *allocator; struct aws_http_proxy_strategy strategy_base; }; struct aws_http_proxy_negotiator_forwarding_identity { struct aws_allocator *allocator; struct aws_http_proxy_negotiator negotiator_base; }; static void s_destroy_forwarding_identity_negotiator(struct aws_http_proxy_negotiator *proxy_negotiator) { struct aws_http_proxy_negotiator_forwarding_identity *identity_negotiator = proxy_negotiator->impl; aws_mem_release(identity_negotiator->allocator, identity_negotiator); } int s_forwarding_identity_connect_transform( struct aws_http_proxy_negotiator *proxy_negotiator, struct aws_http_message *message) { (void)message; (void)proxy_negotiator; return AWS_OP_SUCCESS; } static struct aws_http_proxy_negotiator_forwarding_vtable s_forwarding_identity_proxy_negotiator_tunneling_vtable = { .forward_request_transform = s_forwarding_identity_connect_transform, }; static struct aws_http_proxy_negotiator *s_create_forwarding_identity_negotiator( struct aws_http_proxy_strategy *proxy_strategy, struct aws_allocator *allocator) { if (proxy_strategy == NULL || allocator == NULL) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } struct aws_http_proxy_negotiator_forwarding_identity *identity_negotiator = aws_mem_calloc(allocator, 1, sizeof(struct aws_http_proxy_negotiator_forwarding_identity)); if (identity_negotiator == NULL) { return NULL; } identity_negotiator->allocator = allocator; identity_negotiator->negotiator_base.impl = identity_negotiator; aws_ref_count_init( &identity_negotiator->negotiator_base.ref_count, &identity_negotiator->negotiator_base, (aws_simple_completion_callback *)s_destroy_forwarding_identity_negotiator); identity_negotiator->negotiator_base.strategy_vtable.forwarding_vtable = &s_forwarding_identity_proxy_negotiator_tunneling_vtable; return &identity_negotiator->negotiator_base; } static struct aws_http_proxy_strategy_vtable s_forwarding_identity_strategy_vtable = { .create_negotiator = s_create_forwarding_identity_negotiator, }; static void s_destroy_forwarding_identity_strategy(struct aws_http_proxy_strategy *proxy_strategy) { struct aws_http_proxy_strategy_forwarding_identity *identity_strategy = proxy_strategy->impl; aws_mem_release(identity_strategy->allocator, identity_strategy); } struct aws_http_proxy_strategy *aws_http_proxy_strategy_new_forwarding_identity(struct aws_allocator *allocator) { if (allocator == NULL) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } struct aws_http_proxy_strategy_forwarding_identity *identity_strategy = aws_mem_calloc(allocator, 1, sizeof(struct aws_http_proxy_strategy_forwarding_identity)); if (identity_strategy == NULL) { return NULL; } identity_strategy->strategy_base.impl = identity_strategy; identity_strategy->strategy_base.vtable = &s_forwarding_identity_strategy_vtable; identity_strategy->strategy_base.proxy_connection_type = AWS_HPCT_HTTP_FORWARD; identity_strategy->allocator = allocator; aws_ref_count_init( &identity_strategy->strategy_base.ref_count, &identity_strategy->strategy_base, (aws_simple_completion_callback *)s_destroy_forwarding_identity_strategy); return &identity_strategy->strategy_base; } /******************************************************************************************************************/ /* kerberos */ AWS_STATIC_STRING_FROM_LITERAL(s_proxy_authorization_header_kerberos_prefix, "Negotiate "); struct aws_http_proxy_strategy_tunneling_kerberos { struct aws_allocator *allocator; aws_http_proxy_negotiation_get_token_sync_fn *get_token; void *get_token_user_data; struct aws_http_proxy_strategy strategy_base; }; struct aws_http_proxy_negotiator_tunneling_kerberos { struct aws_allocator *allocator; struct aws_http_proxy_strategy *strategy; enum proxy_negotiator_connect_state connect_state; /* * ToDo: make adaptive and add any state needed here * * Likely things include response code (from the vanilla CONNECT) and the appropriate headers in * the response */ struct aws_http_proxy_negotiator negotiator_base; }; /* * Adds a proxy authentication header based on the user kerberos authentication token * This uses a token that is already base64 encoded */ static int s_add_kerberos_proxy_usertoken_authentication_header( struct aws_allocator *allocator, struct aws_http_message *request, struct aws_byte_cursor user_token) { struct aws_byte_buf header_value; AWS_ZERO_STRUCT(header_value); int result = AWS_OP_ERR; if (aws_byte_buf_init( &header_value, allocator, s_proxy_authorization_header_kerberos_prefix->len + user_token.len)) { goto done; } /* First append proxy authorization header kerberos prefix */ struct aws_byte_cursor auth_header_cursor = aws_byte_cursor_from_string(s_proxy_authorization_header_kerberos_prefix); if (aws_byte_buf_append(&header_value, &auth_header_cursor)) { goto done; } /* Append token to it */ if (aws_byte_buf_append(&header_value, &user_token)) { goto done; } struct aws_http_header header = { .name = aws_byte_cursor_from_string(s_proxy_authorization_header_name), .value = aws_byte_cursor_from_array(header_value.buffer, header_value.len), }; if (aws_http_message_add_header(request, header)) { goto done; } result = AWS_OP_SUCCESS; done: aws_byte_buf_clean_up(&header_value); return result; } static void s_kerberos_tunnel_transform_connect( struct aws_http_proxy_negotiator *proxy_negotiator, struct aws_http_message *message, aws_http_proxy_negotiation_terminate_fn *negotiation_termination_callback, aws_http_proxy_negotiation_http_request_forward_fn *negotiation_http_request_forward_callback, void *internal_proxy_user_data) { struct aws_http_proxy_negotiator_tunneling_kerberos *kerberos_negotiator = proxy_negotiator->impl; struct aws_http_proxy_strategy_tunneling_kerberos *kerberos_strategy = kerberos_negotiator->strategy->impl; int result = AWS_OP_ERR; int error_code = AWS_ERROR_SUCCESS; struct aws_string *kerberos_token = NULL; if (kerberos_negotiator->connect_state == AWS_PNCS_FAILURE) { error_code = AWS_ERROR_HTTP_PROXY_CONNECT_FAILED; goto done; } if (kerberos_negotiator->connect_state != AWS_PNCS_READY) { error_code = AWS_ERROR_INVALID_STATE; goto done; } kerberos_negotiator->connect_state = AWS_PNCS_IN_PROGRESS; kerberos_token = kerberos_strategy->get_token(kerberos_strategy->get_token_user_data, &error_code); if (kerberos_token == NULL || error_code != AWS_ERROR_SUCCESS) { goto done; } /*transform the header with proxy authenticate:Negotiate and kerberos token*/ if (s_add_kerberos_proxy_usertoken_authentication_header( kerberos_negotiator->allocator, message, aws_byte_cursor_from_string(kerberos_token))) { error_code = aws_last_error(); goto done; } kerberos_negotiator->connect_state = AWS_PNCS_IN_PROGRESS; result = AWS_OP_SUCCESS; done: if (result != AWS_OP_SUCCESS) { if (error_code == AWS_ERROR_SUCCESS) { error_code = AWS_ERROR_UNKNOWN; } negotiation_termination_callback(message, error_code, internal_proxy_user_data); } else { negotiation_http_request_forward_callback(message, internal_proxy_user_data); } aws_string_destroy(kerberos_token); } static int s_kerberos_on_incoming_header_adaptive( struct aws_http_proxy_negotiator *proxy_negotiator, enum aws_http_header_block header_block, const struct aws_http_header *header_array, size_t num_headers) { struct aws_http_proxy_negotiator_tunneling_kerberos *kerberos_negotiator = proxy_negotiator->impl; (void)kerberos_negotiator; (void)header_block; (void)header_array; (void)num_headers; /* TODO: process vanilla CONNECT response headers here to improve usage/application */ return AWS_OP_SUCCESS; } static int s_kerberos_on_connect_status( struct aws_http_proxy_negotiator *proxy_negotiator, enum aws_http_status_code status_code) { struct aws_http_proxy_negotiator_tunneling_kerberos *kerberos_negotiator = proxy_negotiator->impl; /* TODO: process status code of vanilla CONNECT request here to improve usage/application */ if (kerberos_negotiator->connect_state == AWS_PNCS_IN_PROGRESS) { if (AWS_HTTP_STATUS_CODE_200_OK != status_code) { kerberos_negotiator->connect_state = AWS_PNCS_FAILURE; } else { kerberos_negotiator->connect_state = AWS_PNCS_SUCCESS; } } return AWS_OP_SUCCESS; } static int s_kerberos_on_incoming_body( struct aws_http_proxy_negotiator *proxy_negotiator, const struct aws_byte_cursor *data) { struct aws_http_proxy_negotiator_tunneling_kerberos *kerberos_negotiator = proxy_negotiator->impl; (void)kerberos_negotiator; (void)data; return AWS_OP_SUCCESS; } static struct aws_http_proxy_negotiator_tunnelling_vtable s_tunneling_kerberos_proxy_negotiator_tunneling_vtable = { .on_incoming_body_callback = s_kerberos_on_incoming_body, .on_incoming_headers_callback = s_kerberos_on_incoming_header_adaptive, .on_status_callback = s_kerberos_on_connect_status, .connect_request_transform = s_kerberos_tunnel_transform_connect, }; static void s_destroy_tunneling_kerberos_negotiator(struct aws_http_proxy_negotiator *proxy_negotiator) { struct aws_http_proxy_negotiator_tunneling_kerberos *kerberos_negotiator = proxy_negotiator->impl; aws_http_proxy_strategy_release(kerberos_negotiator->strategy); aws_mem_release(kerberos_negotiator->allocator, kerberos_negotiator); } static struct aws_http_proxy_negotiator *s_create_tunneling_kerberos_negotiator( struct aws_http_proxy_strategy *proxy_strategy, struct aws_allocator *allocator) { if (proxy_strategy == NULL || allocator == NULL) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } struct aws_http_proxy_negotiator_tunneling_kerberos *kerberos_negotiator = aws_mem_calloc(allocator, 1, sizeof(struct aws_http_proxy_negotiator_tunneling_kerberos)); if (kerberos_negotiator == NULL) { return NULL; } kerberos_negotiator->allocator = allocator; kerberos_negotiator->negotiator_base.impl = kerberos_negotiator; aws_ref_count_init( &kerberos_negotiator->negotiator_base.ref_count, &kerberos_negotiator->negotiator_base, (aws_simple_completion_callback *)s_destroy_tunneling_kerberos_negotiator); kerberos_negotiator->negotiator_base.strategy_vtable.tunnelling_vtable = &s_tunneling_kerberos_proxy_negotiator_tunneling_vtable; kerberos_negotiator->strategy = aws_http_proxy_strategy_acquire(proxy_strategy); return &kerberos_negotiator->negotiator_base; } static struct aws_http_proxy_strategy_vtable s_tunneling_kerberos_strategy_vtable = { .create_negotiator = s_create_tunneling_kerberos_negotiator, }; static void s_destroy_tunneling_kerberos_strategy(struct aws_http_proxy_strategy *proxy_strategy) { struct aws_http_proxy_strategy_tunneling_kerberos *kerberos_strategy = proxy_strategy->impl; aws_mem_release(kerberos_strategy->allocator, kerberos_strategy); } struct aws_http_proxy_strategy *aws_http_proxy_strategy_new_tunneling_kerberos( struct aws_allocator *allocator, struct aws_http_proxy_strategy_tunneling_kerberos_options *config) { if (allocator == NULL || config == NULL || config->get_token == NULL) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } struct aws_http_proxy_strategy_tunneling_kerberos *kerberos_strategy = aws_mem_calloc(allocator, 1, sizeof(struct aws_http_proxy_strategy_tunneling_kerberos)); if (kerberos_strategy == NULL) { return NULL; } kerberos_strategy->strategy_base.impl = kerberos_strategy; kerberos_strategy->strategy_base.vtable = &s_tunneling_kerberos_strategy_vtable; kerberos_strategy->strategy_base.proxy_connection_type = AWS_HPCT_HTTP_TUNNEL; kerberos_strategy->allocator = allocator; aws_ref_count_init( &kerberos_strategy->strategy_base.ref_count, &kerberos_strategy->strategy_base, (aws_simple_completion_callback *)s_destroy_tunneling_kerberos_strategy); kerberos_strategy->get_token = config->get_token; kerberos_strategy->get_token_user_data = config->get_token_user_data; return &kerberos_strategy->strategy_base; } /******************************************************************************************************************/ struct aws_http_proxy_strategy_tunneling_ntlm { struct aws_allocator *allocator; aws_http_proxy_negotiation_get_token_sync_fn *get_token; aws_http_proxy_negotiation_get_challenge_token_sync_fn *get_challenge_token; void *get_challenge_token_user_data; struct aws_http_proxy_strategy strategy_base; }; struct aws_http_proxy_negotiator_tunneling_ntlm { struct aws_allocator *allocator; struct aws_http_proxy_strategy *strategy; enum proxy_negotiator_connect_state connect_state; struct aws_string *challenge_token; struct aws_http_proxy_negotiator negotiator_base; }; AWS_STATIC_STRING_FROM_LITERAL(s_proxy_authorization_header_ntlm_prefix, "NTLM "); /* * Adds a proxy authentication header based on ntlm credential or response provided by user */ static int s_add_ntlm_proxy_usertoken_authentication_header( struct aws_allocator *allocator, struct aws_http_message *request, struct aws_byte_cursor credential_response) { struct aws_byte_buf header_value; AWS_ZERO_STRUCT(header_value); int result = AWS_OP_ERR; if (aws_byte_buf_init( &header_value, allocator, s_proxy_authorization_header_ntlm_prefix->len + credential_response.len)) { goto done; } /* First append proxy authorization header prefix */ struct aws_byte_cursor auth_header_cursor = aws_byte_cursor_from_string(s_proxy_authorization_header_ntlm_prefix); if (aws_byte_buf_append(&header_value, &auth_header_cursor)) { goto done; } /* Append the credential response to it; assumes already encoded properly (base64) */ if (aws_byte_buf_append(&header_value, &credential_response)) { goto done; } struct aws_http_header header = { .name = aws_byte_cursor_from_string(s_proxy_authorization_header_name), .value = aws_byte_cursor_from_array(header_value.buffer, header_value.len), }; if (aws_http_message_add_header(request, header)) { goto done; } result = AWS_OP_SUCCESS; done: aws_byte_buf_clean_up(&header_value); return result; } static void s_ntlm_tunnel_transform_connect( struct aws_http_proxy_negotiator *proxy_negotiator, struct aws_http_message *message, aws_http_proxy_negotiation_terminate_fn *negotiation_termination_callback, aws_http_proxy_negotiation_http_request_forward_fn *negotiation_http_request_forward_callback, void *internal_proxy_user_data) { struct aws_http_proxy_negotiator_tunneling_ntlm *ntlm_negotiator = proxy_negotiator->impl; struct aws_http_proxy_strategy_tunneling_ntlm *ntlm_strategy = ntlm_negotiator->strategy->impl; int result = AWS_OP_ERR; int error_code = AWS_ERROR_SUCCESS; struct aws_string *challenge_answer_token = NULL; struct aws_byte_cursor challenge_token; AWS_ZERO_STRUCT(challenge_token); if (ntlm_negotiator->connect_state == AWS_PNCS_FAILURE) { error_code = AWS_ERROR_HTTP_PROXY_CONNECT_FAILED; goto done; } if (ntlm_negotiator->connect_state != AWS_PNCS_READY) { error_code = AWS_ERROR_INVALID_STATE; goto done; } if (ntlm_negotiator->challenge_token == NULL) { error_code = AWS_ERROR_HTTP_PROXY_STRATEGY_NTLM_CHALLENGE_TOKEN_MISSING; goto done; } ntlm_negotiator->connect_state = AWS_PNCS_IN_PROGRESS; challenge_token = aws_byte_cursor_from_string(ntlm_negotiator->challenge_token); challenge_answer_token = ntlm_strategy->get_challenge_token(ntlm_strategy->get_challenge_token_user_data, &challenge_token, &error_code); if (challenge_answer_token == NULL || error_code != AWS_ERROR_SUCCESS) { goto done; } /*transform the header with proxy authenticate:Negotiate and kerberos token*/ if (s_add_ntlm_proxy_usertoken_authentication_header( ntlm_negotiator->allocator, message, aws_byte_cursor_from_string(challenge_answer_token))) { error_code = aws_last_error(); goto done; } ntlm_negotiator->connect_state = AWS_PNCS_IN_PROGRESS; result = AWS_OP_SUCCESS; done: if (result != AWS_OP_SUCCESS) { if (error_code == AWS_ERROR_SUCCESS) { error_code = AWS_ERROR_UNKNOWN; } negotiation_termination_callback(message, error_code, internal_proxy_user_data); } else { negotiation_http_request_forward_callback(message, internal_proxy_user_data); } aws_string_destroy(challenge_answer_token); } AWS_STATIC_STRING_FROM_LITERAL(s_ntlm_challenge_token_header, "Proxy-Authenticate"); static int s_ntlm_on_incoming_header_adaptive( struct aws_http_proxy_negotiator *proxy_negotiator, enum aws_http_header_block header_block, const struct aws_http_header *header_array, size_t num_headers) { struct aws_http_proxy_negotiator_tunneling_ntlm *ntlm_negotiator = proxy_negotiator->impl; /* * only extract the challenge before we've started our own CONNECT attempt * * ToDo: we currently overwrite previous challenge tokens since it is unknown if multiple CONNECT requests * cause new challenges to be issued such that old challenges become invalid even if successfully computed */ if (ntlm_negotiator->connect_state == AWS_PNCS_READY) { if (header_block == AWS_HTTP_HEADER_BLOCK_MAIN) { struct aws_byte_cursor proxy_authenticate_header_name = aws_byte_cursor_from_string(s_ntlm_challenge_token_header); for (size_t i = 0; i < num_headers; ++i) { struct aws_byte_cursor header_name_cursor = header_array[i].name; if (aws_byte_cursor_eq_ignore_case(&proxy_authenticate_header_name, &header_name_cursor)) { aws_string_destroy(ntlm_negotiator->challenge_token); struct aws_byte_cursor challenge_value_cursor = header_array[i].value; ntlm_negotiator->challenge_token = aws_string_new_from_cursor(ntlm_negotiator->allocator, &challenge_value_cursor); break; } } } } return AWS_OP_SUCCESS; } static int s_ntlm_on_connect_status( struct aws_http_proxy_negotiator *proxy_negotiator, enum aws_http_status_code status_code) { struct aws_http_proxy_negotiator_tunneling_ntlm *ntlm_negotiator = proxy_negotiator->impl; if (ntlm_negotiator->connect_state == AWS_PNCS_IN_PROGRESS) { if (AWS_HTTP_STATUS_CODE_200_OK != status_code) { ntlm_negotiator->connect_state = AWS_PNCS_FAILURE; } else { ntlm_negotiator->connect_state = AWS_PNCS_SUCCESS; } } return AWS_OP_SUCCESS; } static int s_ntlm_on_incoming_body( struct aws_http_proxy_negotiator *proxy_negotiator, const struct aws_byte_cursor *data) { struct aws_http_proxy_negotiator_tunneling_ntlm *ntlm_negotiator = proxy_negotiator->impl; (void)ntlm_negotiator; (void)data; return AWS_OP_SUCCESS; } static enum aws_http_proxy_negotiation_retry_directive s_ntlm_tunnel_get_retry_directive( struct aws_http_proxy_negotiator *proxy_negotiator) { (void)proxy_negotiator; return AWS_HPNRD_CURRENT_CONNECTION; } static struct aws_http_proxy_negotiator_tunnelling_vtable s_tunneling_ntlm_proxy_negotiator_tunneling_vtable = { .on_incoming_body_callback = s_ntlm_on_incoming_body, .on_incoming_headers_callback = s_ntlm_on_incoming_header_adaptive, .on_status_callback = s_ntlm_on_connect_status, .connect_request_transform = s_ntlm_tunnel_transform_connect, .get_retry_directive = s_ntlm_tunnel_get_retry_directive, }; static void s_destroy_tunneling_ntlm_negotiator(struct aws_http_proxy_negotiator *proxy_negotiator) { struct aws_http_proxy_negotiator_tunneling_ntlm *ntlm_negotiator = proxy_negotiator->impl; aws_string_destroy(ntlm_negotiator->challenge_token); aws_http_proxy_strategy_release(ntlm_negotiator->strategy); aws_mem_release(ntlm_negotiator->allocator, ntlm_negotiator); } static struct aws_http_proxy_negotiator *s_create_tunneling_ntlm_negotiator( struct aws_http_proxy_strategy *proxy_strategy, struct aws_allocator *allocator) { if (proxy_strategy == NULL || allocator == NULL) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } struct aws_http_proxy_negotiator_tunneling_ntlm *ntlm_negotiator = aws_mem_calloc(allocator, 1, sizeof(struct aws_http_proxy_negotiator_tunneling_ntlm)); if (ntlm_negotiator == NULL) { return NULL; } ntlm_negotiator->allocator = allocator; ntlm_negotiator->negotiator_base.impl = ntlm_negotiator; aws_ref_count_init( &ntlm_negotiator->negotiator_base.ref_count, &ntlm_negotiator->negotiator_base, (aws_simple_completion_callback *)s_destroy_tunneling_ntlm_negotiator); ntlm_negotiator->negotiator_base.strategy_vtable.tunnelling_vtable = &s_tunneling_ntlm_proxy_negotiator_tunneling_vtable; ntlm_negotiator->strategy = aws_http_proxy_strategy_acquire(proxy_strategy); return &ntlm_negotiator->negotiator_base; } static struct aws_http_proxy_strategy_vtable s_tunneling_ntlm_strategy_vtable = { .create_negotiator = s_create_tunneling_ntlm_negotiator, }; static void s_destroy_tunneling_ntlm_strategy(struct aws_http_proxy_strategy *proxy_strategy) { struct aws_http_proxy_strategy_tunneling_ntlm *ntlm_strategy = proxy_strategy->impl; aws_mem_release(ntlm_strategy->allocator, ntlm_strategy); } struct aws_http_proxy_strategy *aws_http_proxy_strategy_new_tunneling_ntlm( struct aws_allocator *allocator, struct aws_http_proxy_strategy_tunneling_ntlm_options *config) { if (allocator == NULL || config == NULL || config->get_challenge_token == NULL) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } struct aws_http_proxy_strategy_tunneling_ntlm *ntlm_strategy = aws_mem_calloc(allocator, 1, sizeof(struct aws_http_proxy_strategy_tunneling_ntlm)); if (ntlm_strategy == NULL) { return NULL; } ntlm_strategy->strategy_base.impl = ntlm_strategy; ntlm_strategy->strategy_base.vtable = &s_tunneling_ntlm_strategy_vtable; ntlm_strategy->strategy_base.proxy_connection_type = AWS_HPCT_HTTP_TUNNEL; ntlm_strategy->allocator = allocator; aws_ref_count_init( &ntlm_strategy->strategy_base.ref_count, &ntlm_strategy->strategy_base, (aws_simple_completion_callback *)s_destroy_tunneling_ntlm_strategy); ntlm_strategy->get_challenge_token = config->get_challenge_token; ntlm_strategy->get_challenge_token_user_data = config->get_challenge_token_user_data; return &ntlm_strategy->strategy_base; } /******************************************************************************************************/ static void s_ntlm_credential_tunnel_transform_connect( struct aws_http_proxy_negotiator *proxy_negotiator, struct aws_http_message *message, aws_http_proxy_negotiation_terminate_fn *negotiation_termination_callback, aws_http_proxy_negotiation_http_request_forward_fn *negotiation_http_request_forward_callback, void *internal_proxy_user_data) { struct aws_http_proxy_negotiator_tunneling_ntlm *ntlm_credential_negotiator = proxy_negotiator->impl; struct aws_http_proxy_strategy_tunneling_ntlm *ntlm_credential_strategy = ntlm_credential_negotiator->strategy->impl; int result = AWS_OP_ERR; int error_code = AWS_ERROR_SUCCESS; struct aws_string *token = NULL; if (ntlm_credential_negotiator->connect_state == AWS_PNCS_FAILURE) { error_code = AWS_ERROR_HTTP_PROXY_CONNECT_FAILED; goto done; } if (ntlm_credential_negotiator->connect_state != AWS_PNCS_READY) { error_code = AWS_ERROR_INVALID_STATE; goto done; } ntlm_credential_negotiator->connect_state = AWS_PNCS_IN_PROGRESS; token = ntlm_credential_strategy->get_token(ntlm_credential_strategy->get_challenge_token_user_data, &error_code); if (token == NULL || error_code != AWS_ERROR_SUCCESS) { goto done; } /*transform the header with proxy authenticate:Negotiate and kerberos token*/ if (s_add_ntlm_proxy_usertoken_authentication_header( ntlm_credential_negotiator->allocator, message, aws_byte_cursor_from_string(token))) { error_code = aws_last_error(); goto done; } ntlm_credential_negotiator->connect_state = AWS_PNCS_IN_PROGRESS; result = AWS_OP_SUCCESS; done: if (result != AWS_OP_SUCCESS) { if (error_code == AWS_ERROR_SUCCESS) { error_code = AWS_ERROR_UNKNOWN; } negotiation_termination_callback(message, error_code, internal_proxy_user_data); } else { negotiation_http_request_forward_callback(message, internal_proxy_user_data); } aws_string_destroy(token); } static struct aws_http_proxy_negotiator_tunnelling_vtable s_tunneling_ntlm_proxy_credential_negotiator_tunneling_vtable = { .on_incoming_body_callback = s_ntlm_on_incoming_body, .on_incoming_headers_callback = s_ntlm_on_incoming_header_adaptive, .on_status_callback = s_ntlm_on_connect_status, .connect_request_transform = s_ntlm_credential_tunnel_transform_connect, }; static void s_destroy_tunneling_ntlm_credential_negotiator(struct aws_http_proxy_negotiator *proxy_negotiator) { struct aws_http_proxy_negotiator_tunneling_ntlm *ntlm_credential_negotiator = proxy_negotiator->impl; aws_string_destroy(ntlm_credential_negotiator->challenge_token); aws_http_proxy_strategy_release(ntlm_credential_negotiator->strategy); aws_mem_release(ntlm_credential_negotiator->allocator, ntlm_credential_negotiator); } static struct aws_http_proxy_negotiator *s_create_tunneling_ntlm_credential_negotiator( struct aws_http_proxy_strategy *proxy_strategy, struct aws_allocator *allocator) { if (proxy_strategy == NULL || allocator == NULL) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } struct aws_http_proxy_negotiator_tunneling_ntlm *ntlm_credential_negotiator = aws_mem_calloc(allocator, 1, sizeof(struct aws_http_proxy_negotiator_tunneling_ntlm)); if (ntlm_credential_negotiator == NULL) { return NULL; } ntlm_credential_negotiator->allocator = allocator; ntlm_credential_negotiator->negotiator_base.impl = ntlm_credential_negotiator; aws_ref_count_init( &ntlm_credential_negotiator->negotiator_base.ref_count, &ntlm_credential_negotiator->negotiator_base, (aws_simple_completion_callback *)s_destroy_tunneling_ntlm_credential_negotiator); ntlm_credential_negotiator->negotiator_base.strategy_vtable.tunnelling_vtable = &s_tunneling_ntlm_proxy_credential_negotiator_tunneling_vtable; ntlm_credential_negotiator->strategy = aws_http_proxy_strategy_acquire(proxy_strategy); return &ntlm_credential_negotiator->negotiator_base; } static struct aws_http_proxy_strategy_vtable s_tunneling_ntlm_credential_strategy_vtable = { .create_negotiator = s_create_tunneling_ntlm_credential_negotiator, }; static void s_destroy_tunneling_ntlm_credential_strategy(struct aws_http_proxy_strategy *proxy_strategy) { struct aws_http_proxy_strategy_tunneling_ntlm *ntlm_credential_strategy = proxy_strategy->impl; aws_mem_release(ntlm_credential_strategy->allocator, ntlm_credential_strategy); } struct aws_http_proxy_strategy *aws_http_proxy_strategy_new_tunneling_ntlm_credential( struct aws_allocator *allocator, struct aws_http_proxy_strategy_tunneling_ntlm_options *config) { if (allocator == NULL || config == NULL || config->get_token == NULL) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } struct aws_http_proxy_strategy_tunneling_ntlm *ntlm_credential_strategy = aws_mem_calloc(allocator, 1, sizeof(struct aws_http_proxy_strategy_tunneling_ntlm)); if (ntlm_credential_strategy == NULL) { return NULL; } ntlm_credential_strategy->strategy_base.impl = ntlm_credential_strategy; ntlm_credential_strategy->strategy_base.vtable = &s_tunneling_ntlm_credential_strategy_vtable; ntlm_credential_strategy->strategy_base.proxy_connection_type = AWS_HPCT_HTTP_TUNNEL; ntlm_credential_strategy->allocator = allocator; aws_ref_count_init( &ntlm_credential_strategy->strategy_base.ref_count, &ntlm_credential_strategy->strategy_base, (aws_simple_completion_callback *)s_destroy_tunneling_ntlm_credential_strategy); ntlm_credential_strategy->get_token = config->get_token; ntlm_credential_strategy->get_challenge_token_user_data = config->get_challenge_token_user_data; return &ntlm_credential_strategy->strategy_base; } /******************************************************************************************************************/ #define PROXY_STRATEGY_MAX_ADAPTIVE_STRATEGIES 4 struct aws_http_proxy_strategy *aws_http_proxy_strategy_new_tunneling_adaptive( struct aws_allocator *allocator, struct aws_http_proxy_strategy_tunneling_adaptive_options *config) { if (allocator == NULL || config == NULL) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } struct aws_http_proxy_strategy *strategies[PROXY_STRATEGY_MAX_ADAPTIVE_STRATEGIES]; uint32_t strategy_count = 0; struct aws_http_proxy_strategy *identity_strategy = NULL; struct aws_http_proxy_strategy *kerberos_strategy = NULL; struct aws_http_proxy_strategy *ntlm_credential_strategy = NULL; struct aws_http_proxy_strategy *ntlm_strategy = NULL; struct aws_http_proxy_strategy *adaptive_sequence_strategy = NULL; identity_strategy = aws_http_proxy_strategy_new_tunneling_one_time_identity(allocator); if (identity_strategy == NULL) { goto done; } strategies[strategy_count++] = identity_strategy; if (config->kerberos_options != NULL) { kerberos_strategy = aws_http_proxy_strategy_new_tunneling_kerberos(allocator, config->kerberos_options); if (kerberos_strategy == NULL) { goto done; } strategies[strategy_count++] = kerberos_strategy; } if (config->ntlm_options != NULL) { ntlm_credential_strategy = aws_http_proxy_strategy_new_tunneling_ntlm_credential(allocator, config->ntlm_options); if (ntlm_credential_strategy == NULL) { goto done; } strategies[strategy_count++] = ntlm_credential_strategy; ntlm_strategy = aws_http_proxy_strategy_new_tunneling_ntlm(allocator, config->ntlm_options); if (ntlm_strategy == NULL) { goto done; } strategies[strategy_count++] = ntlm_strategy; } AWS_FATAL_ASSERT(strategy_count <= PROXY_STRATEGY_MAX_ADAPTIVE_STRATEGIES); struct aws_http_proxy_strategy_tunneling_sequence_options sequence_config = { .strategies = strategies, .strategy_count = strategy_count, }; adaptive_sequence_strategy = aws_http_proxy_strategy_new_tunneling_sequence(allocator, &sequence_config); if (adaptive_sequence_strategy == NULL) { goto done; } done: aws_http_proxy_strategy_release(identity_strategy); aws_http_proxy_strategy_release(kerberos_strategy); aws_http_proxy_strategy_release(ntlm_credential_strategy); aws_http_proxy_strategy_release(ntlm_strategy); return adaptive_sequence_strategy; } /******************************************************************************************************************/ struct aws_http_proxy_strategy_tunneling_sequence { struct aws_allocator *allocator; struct aws_array_list strategies; struct aws_http_proxy_strategy strategy_base; }; struct aws_http_proxy_negotiator_tunneling_sequence { struct aws_allocator *allocator; struct aws_array_list negotiators; size_t current_negotiator_transform_index; void *original_internal_proxy_user_data; aws_http_proxy_negotiation_terminate_fn *original_negotiation_termination_callback; aws_http_proxy_negotiation_http_request_forward_fn *original_negotiation_http_request_forward_callback; struct aws_http_proxy_negotiator negotiator_base; }; static void s_sequence_tunnel_iteration_termination_callback( struct aws_http_message *message, int error_code, void *user_data) { struct aws_http_proxy_negotiator *proxy_negotiator = user_data; struct aws_http_proxy_negotiator_tunneling_sequence *sequence_negotiator = proxy_negotiator->impl; AWS_LOGF_WARN( AWS_LS_HTTP_PROXY_NEGOTIATION, "(id=%p) Proxy negotiation step failed with error %d", (void *)proxy_negotiator, error_code); int connection_error_code = AWS_ERROR_HTTP_PROXY_CONNECT_FAILED_RETRYABLE; if (sequence_negotiator->current_negotiator_transform_index >= aws_array_list_length(&sequence_negotiator->negotiators)) { connection_error_code = AWS_ERROR_HTTP_PROXY_CONNECT_FAILED; } sequence_negotiator->original_negotiation_termination_callback( message, connection_error_code, sequence_negotiator->original_internal_proxy_user_data); } static void s_sequence_tunnel_iteration_forward_callback(struct aws_http_message *message, void *user_data) { struct aws_http_proxy_negotiator *proxy_negotiator = user_data; struct aws_http_proxy_negotiator_tunneling_sequence *sequence_negotiator = proxy_negotiator->impl; sequence_negotiator->original_negotiation_http_request_forward_callback( message, sequence_negotiator->original_internal_proxy_user_data); } static void s_sequence_tunnel_try_next_negotiator( struct aws_http_proxy_negotiator *proxy_negotiator, struct aws_http_message *message) { struct aws_http_proxy_negotiator_tunneling_sequence *sequence_negotiator = proxy_negotiator->impl; size_t negotiator_count = aws_array_list_length(&sequence_negotiator->negotiators); if (sequence_negotiator->current_negotiator_transform_index >= negotiator_count) { goto on_error; } struct aws_http_proxy_negotiator *current_negotiator = NULL; if (aws_array_list_get_at( &sequence_negotiator->negotiators, ¤t_negotiator, sequence_negotiator->current_negotiator_transform_index++)) { goto on_error; } current_negotiator->strategy_vtable.tunnelling_vtable->connect_request_transform( current_negotiator, message, s_sequence_tunnel_iteration_termination_callback, s_sequence_tunnel_iteration_forward_callback, proxy_negotiator); return; on_error: sequence_negotiator->original_negotiation_termination_callback( message, AWS_ERROR_HTTP_PROXY_CONNECT_FAILED, sequence_negotiator->original_internal_proxy_user_data); } static void s_sequence_tunnel_transform_connect( struct aws_http_proxy_negotiator *proxy_negotiator, struct aws_http_message *message, aws_http_proxy_negotiation_terminate_fn *negotiation_termination_callback, aws_http_proxy_negotiation_http_request_forward_fn *negotiation_http_request_forward_callback, void *internal_proxy_user_data) { struct aws_http_proxy_negotiator_tunneling_sequence *sequence_negotiator = proxy_negotiator->impl; sequence_negotiator->original_internal_proxy_user_data = internal_proxy_user_data; sequence_negotiator->original_negotiation_termination_callback = negotiation_termination_callback; sequence_negotiator->original_negotiation_http_request_forward_callback = negotiation_http_request_forward_callback; s_sequence_tunnel_try_next_negotiator(proxy_negotiator, message); } static int s_sequence_on_incoming_headers( struct aws_http_proxy_negotiator *proxy_negotiator, enum aws_http_header_block header_block, const struct aws_http_header *header_array, size_t num_headers) { struct aws_http_proxy_negotiator_tunneling_sequence *sequence_negotiator = proxy_negotiator->impl; size_t negotiator_count = aws_array_list_length(&sequence_negotiator->negotiators); for (size_t i = 0; i < negotiator_count; ++i) { struct aws_http_proxy_negotiator *negotiator = NULL; if (aws_array_list_get_at(&sequence_negotiator->negotiators, &negotiator, i)) { continue; } aws_http_proxy_negotiation_connect_on_incoming_headers_fn *on_incoming_headers = negotiator->strategy_vtable.tunnelling_vtable->on_incoming_headers_callback; if (on_incoming_headers != NULL) { (*on_incoming_headers)(negotiator, header_block, header_array, num_headers); } } return AWS_OP_SUCCESS; } static int s_sequence_on_connect_status( struct aws_http_proxy_negotiator *proxy_negotiator, enum aws_http_status_code status_code) { struct aws_http_proxy_negotiator_tunneling_sequence *sequence_negotiator = proxy_negotiator->impl; size_t negotiator_count = aws_array_list_length(&sequence_negotiator->negotiators); for (size_t i = 0; i < negotiator_count; ++i) { struct aws_http_proxy_negotiator *negotiator = NULL; if (aws_array_list_get_at(&sequence_negotiator->negotiators, &negotiator, i)) { continue; } aws_http_proxy_negotiator_connect_status_fn *on_status = negotiator->strategy_vtable.tunnelling_vtable->on_status_callback; if (on_status != NULL) { (*on_status)(negotiator, status_code); } } return AWS_OP_SUCCESS; } static int s_sequence_on_incoming_body( struct aws_http_proxy_negotiator *proxy_negotiator, const struct aws_byte_cursor *data) { struct aws_http_proxy_negotiator_tunneling_sequence *sequence_negotiator = proxy_negotiator->impl; size_t negotiator_count = aws_array_list_length(&sequence_negotiator->negotiators); for (size_t i = 0; i < negotiator_count; ++i) { struct aws_http_proxy_negotiator *negotiator = NULL; if (aws_array_list_get_at(&sequence_negotiator->negotiators, &negotiator, i)) { continue; } aws_http_proxy_negotiator_connect_on_incoming_body_fn *on_incoming_body = negotiator->strategy_vtable.tunnelling_vtable->on_incoming_body_callback; if (on_incoming_body != NULL) { (*on_incoming_body)(negotiator, data); } } return AWS_OP_SUCCESS; } static enum aws_http_proxy_negotiation_retry_directive s_sequence_get_retry_directive( struct aws_http_proxy_negotiator *proxy_negotiator) { struct aws_http_proxy_negotiator_tunneling_sequence *sequence_negotiator = proxy_negotiator->impl; if (sequence_negotiator->current_negotiator_transform_index < aws_array_list_length(&sequence_negotiator->negotiators)) { struct aws_http_proxy_negotiator *next_negotiator = NULL; aws_array_list_get_at( &sequence_negotiator->negotiators, &next_negotiator, sequence_negotiator->current_negotiator_transform_index); enum aws_http_proxy_negotiation_retry_directive next_negotiator_directive = aws_http_proxy_negotiator_get_retry_directive(next_negotiator); if (next_negotiator_directive == AWS_HPNRD_CURRENT_CONNECTION) { return AWS_HPNRD_CURRENT_CONNECTION; } else { return AWS_HPNRD_NEW_CONNECTION; } } return AWS_HPNRD_STOP; } static struct aws_http_proxy_negotiator_tunnelling_vtable s_tunneling_sequence_proxy_negotiator_tunneling_vtable = { .on_incoming_body_callback = s_sequence_on_incoming_body, .on_incoming_headers_callback = s_sequence_on_incoming_headers, .on_status_callback = s_sequence_on_connect_status, .connect_request_transform = s_sequence_tunnel_transform_connect, .get_retry_directive = s_sequence_get_retry_directive, }; static void s_destroy_tunneling_sequence_negotiator(struct aws_http_proxy_negotiator *proxy_negotiator) { struct aws_http_proxy_negotiator_tunneling_sequence *sequence_negotiator = proxy_negotiator->impl; size_t negotiator_count = aws_array_list_length(&sequence_negotiator->negotiators); for (size_t i = 0; i < negotiator_count; ++i) { struct aws_http_proxy_negotiator *negotiator = NULL; if (aws_array_list_get_at(&sequence_negotiator->negotiators, &negotiator, i)) { continue; } aws_http_proxy_negotiator_release(negotiator); } aws_array_list_clean_up(&sequence_negotiator->negotiators); aws_mem_release(sequence_negotiator->allocator, sequence_negotiator); } static struct aws_http_proxy_negotiator *s_create_tunneling_sequence_negotiator( struct aws_http_proxy_strategy *proxy_strategy, struct aws_allocator *allocator) { if (proxy_strategy == NULL || allocator == NULL) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } struct aws_http_proxy_negotiator_tunneling_sequence *sequence_negotiator = aws_mem_calloc(allocator, 1, sizeof(struct aws_http_proxy_negotiator_tunneling_sequence)); if (sequence_negotiator == NULL) { return NULL; } sequence_negotiator->allocator = allocator; sequence_negotiator->negotiator_base.impl = sequence_negotiator; aws_ref_count_init( &sequence_negotiator->negotiator_base.ref_count, &sequence_negotiator->negotiator_base, (aws_simple_completion_callback *)s_destroy_tunneling_sequence_negotiator); sequence_negotiator->negotiator_base.strategy_vtable.tunnelling_vtable = &s_tunneling_sequence_proxy_negotiator_tunneling_vtable; struct aws_http_proxy_strategy_tunneling_sequence *sequence_strategy = proxy_strategy->impl; size_t strategy_count = aws_array_list_length(&sequence_strategy->strategies); if (aws_array_list_init_dynamic( &sequence_negotiator->negotiators, allocator, strategy_count, sizeof(struct aws_http_proxy_negotiator *))) { goto on_error; } for (size_t i = 0; i < strategy_count; ++i) { struct aws_http_proxy_strategy *strategy = NULL; if (aws_array_list_get_at(&sequence_strategy->strategies, &strategy, i)) { goto on_error; } struct aws_http_proxy_negotiator *negotiator = aws_http_proxy_strategy_create_negotiator(strategy, allocator); if (negotiator == NULL) { goto on_error; } if (aws_array_list_push_back(&sequence_negotiator->negotiators, &negotiator)) { aws_http_proxy_negotiator_release(negotiator); goto on_error; } } return &sequence_negotiator->negotiator_base; on_error: aws_http_proxy_negotiator_release(&sequence_negotiator->negotiator_base); return NULL; } static struct aws_http_proxy_strategy_vtable s_tunneling_sequence_strategy_vtable = { .create_negotiator = s_create_tunneling_sequence_negotiator, }; static void s_destroy_tunneling_sequence_strategy(struct aws_http_proxy_strategy *proxy_strategy) { struct aws_http_proxy_strategy_tunneling_sequence *sequence_strategy = proxy_strategy->impl; size_t strategy_count = aws_array_list_length(&sequence_strategy->strategies); for (size_t i = 0; i < strategy_count; ++i) { struct aws_http_proxy_strategy *strategy = NULL; if (aws_array_list_get_at(&sequence_strategy->strategies, &strategy, i)) { continue; } aws_http_proxy_strategy_release(strategy); } aws_array_list_clean_up(&sequence_strategy->strategies); aws_mem_release(sequence_strategy->allocator, sequence_strategy); } struct aws_http_proxy_strategy *aws_http_proxy_strategy_new_tunneling_sequence( struct aws_allocator *allocator, struct aws_http_proxy_strategy_tunneling_sequence_options *config) { if (allocator == NULL || config == NULL) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } struct aws_http_proxy_strategy_tunneling_sequence *sequence_strategy = aws_mem_calloc(allocator, 1, sizeof(struct aws_http_proxy_strategy_tunneling_sequence)); if (sequence_strategy == NULL) { return NULL; } sequence_strategy->strategy_base.impl = sequence_strategy; sequence_strategy->strategy_base.vtable = &s_tunneling_sequence_strategy_vtable; sequence_strategy->strategy_base.proxy_connection_type = AWS_HPCT_HTTP_TUNNEL; sequence_strategy->allocator = allocator; aws_ref_count_init( &sequence_strategy->strategy_base.ref_count, &sequence_strategy->strategy_base, (aws_simple_completion_callback *)s_destroy_tunneling_sequence_strategy); if (aws_array_list_init_dynamic( &sequence_strategy->strategies, allocator, config->strategy_count, sizeof(struct aws_http_proxy_strategy *))) { goto on_error; } for (size_t i = 0; i < config->strategy_count; ++i) { struct aws_http_proxy_strategy *strategy = config->strategies[i]; if (aws_array_list_push_back(&sequence_strategy->strategies, &strategy)) { goto on_error; } aws_http_proxy_strategy_acquire(strategy); } return &sequence_strategy->strategy_base; on_error: aws_http_proxy_strategy_release(&sequence_strategy->strategy_base); return NULL; } #if defined(_MSC_VER) # pragma warning(pop) #endif /* _MSC_VER */ aws-crt-python-0.20.4+dfsg/crt/aws-c-http/source/random_access_set.c000066400000000000000000000151601456575232400253320ustar00rootroot00000000000000 /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include struct aws_random_access_set_impl { struct aws_allocator *allocator; struct aws_array_list list; /* Always store the pointer of the element. */ struct aws_hash_table map; /* Map from the element to the index in the array. */ aws_hash_callback_destroy_fn *destroy_element_fn; }; static void s_impl_destroy(struct aws_random_access_set_impl *impl) { if (!impl) { return; } aws_array_list_clean_up(&impl->list); aws_hash_table_clean_up(&impl->map); aws_mem_release(impl->allocator, impl); } static struct aws_random_access_set_impl *s_impl_new( struct aws_allocator *allocator, aws_hash_fn *hash_fn, aws_hash_callback_eq_fn *equals_fn, aws_hash_callback_destroy_fn *destroy_element_fn, size_t initial_item_allocation) { struct aws_random_access_set_impl *impl = aws_mem_calloc(allocator, 1, sizeof(struct aws_random_access_set_impl)); impl->allocator = allocator; /* Will always store the pointer of the element. */ if (aws_array_list_init_dynamic(&impl->list, allocator, initial_item_allocation, sizeof(void *))) { s_impl_destroy(impl); return NULL; } if (aws_hash_table_init( &impl->map, allocator, initial_item_allocation, hash_fn, equals_fn, destroy_element_fn, NULL)) { s_impl_destroy(impl); return NULL; } impl->destroy_element_fn = destroy_element_fn; return impl; } int aws_random_access_set_init( struct aws_random_access_set *set, struct aws_allocator *allocator, aws_hash_fn *hash_fn, aws_hash_callback_eq_fn *equals_fn, aws_hash_callback_destroy_fn *destroy_element_fn, size_t initial_item_allocation) { AWS_FATAL_PRECONDITION(set); AWS_FATAL_PRECONDITION(allocator); AWS_FATAL_PRECONDITION(hash_fn); AWS_FATAL_PRECONDITION(equals_fn); struct aws_random_access_set_impl *impl = s_impl_new(allocator, hash_fn, equals_fn, destroy_element_fn, initial_item_allocation); if (!impl) { return AWS_OP_ERR; } set->impl = impl; return AWS_OP_SUCCESS; } void aws_random_access_set_clean_up(struct aws_random_access_set *set) { if (!set) { return; } s_impl_destroy(set->impl); } int aws_random_access_set_add(struct aws_random_access_set *set, const void *element, bool *added) { AWS_PRECONDITION(set); AWS_PRECONDITION(element); AWS_PRECONDITION(added); bool exist = false; if (aws_random_access_set_exist(set, element, &exist) || exist) { *added = false; return AWS_OP_SUCCESS; } /* deep copy the pointer of element to store at the array list */ if (aws_array_list_push_back(&set->impl->list, (void *)&element)) { goto list_push_error; } if (aws_hash_table_put(&set->impl->map, element, (void *)(aws_array_list_length(&set->impl->list) - 1), NULL)) { goto error; } *added = true; return AWS_OP_SUCCESS; error: aws_array_list_pop_back(&set->impl->list); list_push_error: *added = false; return AWS_OP_ERR; } int aws_random_access_set_remove(struct aws_random_access_set *set, const void *element) { AWS_PRECONDITION(set); AWS_PRECONDITION(element); size_t current_length = aws_array_list_length(&set->impl->list); if (current_length == 0) { /* Nothing to remove */ return AWS_OP_SUCCESS; } struct aws_hash_element *find = NULL; /* find and remove the element from table */ if (aws_hash_table_find(&set->impl->map, element, &find)) { return AWS_OP_ERR; } if (!find) { /* It's removed already */ return AWS_OP_SUCCESS; } size_t index_to_remove = (size_t)find->value; if (aws_hash_table_remove_element(&set->impl->map, find)) { return AWS_OP_ERR; } /* If assert code failed, we won't be recovered from the failure */ int assert_re = AWS_OP_SUCCESS; (void)assert_re; /* Nothing else can fail after here. */ if (index_to_remove != current_length - 1) { /* It's not the last element, we need to swap it with the end of the list and remove the last element */ void *last_element = NULL; /* The last element is a pointer of pointer of element. */ assert_re = aws_array_list_get_at_ptr(&set->impl->list, &last_element, current_length - 1); AWS_ASSERT(assert_re == AWS_OP_SUCCESS); /* Update the last element index in the table */ struct aws_hash_element *element_to_update = NULL; assert_re = aws_hash_table_find(&set->impl->map, *(void **)last_element, &element_to_update); AWS_ASSERT(assert_re == AWS_OP_SUCCESS); AWS_ASSERT(element_to_update != NULL); element_to_update->value = (void *)index_to_remove; /* Swap the last element with the element to remove in the list */ aws_array_list_swap(&set->impl->list, index_to_remove, current_length - 1); } /* Remove the current last element from the list */ assert_re = aws_array_list_pop_back(&set->impl->list); AWS_ASSERT(assert_re == AWS_OP_SUCCESS); if (set->impl->destroy_element_fn) { set->impl->destroy_element_fn((void *)element); } return AWS_OP_SUCCESS; } int aws_random_access_set_random_get_ptr(const struct aws_random_access_set *set, void **out) { AWS_PRECONDITION(set); AWS_PRECONDITION(out != NULL); size_t length = aws_array_list_length(&set->impl->list); if (length == 0) { return aws_raise_error(AWS_ERROR_LIST_EMPTY); } uint64_t random_64_bit_num = 0; aws_device_random_u64(&random_64_bit_num); size_t index = (size_t)random_64_bit_num % length; /* The array list stores the pointer of the element. */ return aws_array_list_get_at(&set->impl->list, (void *)out, index); } size_t aws_random_access_set_get_size(const struct aws_random_access_set *set) { return aws_array_list_length(&set->impl->list); } int aws_random_access_set_exist(const struct aws_random_access_set *set, const void *element, bool *exist) { AWS_PRECONDITION(set); AWS_PRECONDITION(element); AWS_PRECONDITION(exist); struct aws_hash_element *find = NULL; int re = aws_hash_table_find(&set->impl->map, element, &find); *exist = find != NULL; return re; } int aws_random_access_set_random_get_ptr_index(const struct aws_random_access_set *set, void **out, size_t index) { AWS_PRECONDITION(set); AWS_PRECONDITION(out != NULL); return aws_array_list_get_at(&set->impl->list, (void *)out, index); } aws-crt-python-0.20.4+dfsg/crt/aws-c-http/source/request_response.c000066400000000000000000001316471456575232400252750ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include #ifdef _MSC_VER # pragma warning(disable : 4204) /* non-constant aggregate initializer */ #endif enum { /* Initial capacity for the aws_http_message.headers array_list. */ AWS_HTTP_REQUEST_NUM_RESERVED_HEADERS = 16, }; bool aws_http_header_name_eq(struct aws_byte_cursor name_a, struct aws_byte_cursor name_b) { return aws_byte_cursor_eq_ignore_case(&name_a, &name_b); } /** * -- Data Structure Notes -- * Headers are stored in a linear array, rather than a hash-table of arrays. * The linear array was simpler to implement and may be faster due to having fewer allocations. * The API has been designed so we can swap out the implementation later if desired. * * -- String Storage Notes -- * We use a single allocation to hold the name and value of each aws_http_header. * We could optimize storage by using something like a string pool. If we do this, be sure to maintain * the address of existing strings when adding new strings (a dynamic aws_byte_buf would not suffice). */ struct aws_http_headers { struct aws_allocator *alloc; struct aws_array_list array_list; /* Contains aws_http_header */ struct aws_atomic_var refcount; }; struct aws_http_headers *aws_http_headers_new(struct aws_allocator *allocator) { AWS_PRECONDITION(allocator); struct aws_http_headers *headers = aws_mem_calloc(allocator, 1, sizeof(struct aws_http_headers)); if (!headers) { goto alloc_failed; } headers->alloc = allocator; aws_atomic_init_int(&headers->refcount, 1); if (aws_array_list_init_dynamic( &headers->array_list, allocator, AWS_HTTP_REQUEST_NUM_RESERVED_HEADERS, sizeof(struct aws_http_header))) { goto array_list_failed; } return headers; array_list_failed: aws_mem_release(headers->alloc, headers); alloc_failed: return NULL; } void aws_http_headers_release(struct aws_http_headers *headers) { AWS_PRECONDITION(!headers || headers->alloc); if (!headers) { return; } size_t prev_refcount = aws_atomic_fetch_sub(&headers->refcount, 1); if (prev_refcount == 1) { aws_http_headers_clear(headers); aws_array_list_clean_up(&headers->array_list); aws_mem_release(headers->alloc, headers); } else { AWS_ASSERT(prev_refcount != 0); } } void aws_http_headers_acquire(struct aws_http_headers *headers) { AWS_PRECONDITION(headers); aws_atomic_fetch_add(&headers->refcount, 1); } static int s_http_headers_add_header_impl( struct aws_http_headers *headers, const struct aws_http_header *header_orig, bool front) { AWS_PRECONDITION(headers); AWS_PRECONDITION(header_orig); AWS_PRECONDITION(aws_byte_cursor_is_valid(&header_orig->name) && aws_byte_cursor_is_valid(&header_orig->value)); struct aws_http_header header_copy = *header_orig; if (header_copy.name.len == 0) { return aws_raise_error(AWS_ERROR_HTTP_INVALID_HEADER_NAME); } /* Whitespace around header values is ignored (RFC-7230 - Section 3.2). * Trim it off here, so anyone querying this value has an easier time. */ header_copy.value = aws_strutil_trim_http_whitespace(header_copy.value); size_t total_len; if (aws_add_size_checked(header_copy.name.len, header_copy.value.len, &total_len)) { return AWS_OP_ERR; } /* Store our own copy of the strings. * We put the name and value into the same allocation. */ uint8_t *strmem = aws_mem_acquire(headers->alloc, total_len); struct aws_byte_buf strbuf = aws_byte_buf_from_empty_array(strmem, total_len); aws_byte_buf_append_and_update(&strbuf, &header_copy.name); aws_byte_buf_append_and_update(&strbuf, &header_copy.value); if (front) { if (aws_array_list_push_front(&headers->array_list, &header_copy)) { goto error; } } else { if (aws_array_list_push_back(&headers->array_list, &header_copy)) { goto error; } } return AWS_OP_SUCCESS; error: aws_mem_release(headers->alloc, strmem); return AWS_OP_ERR; } int aws_http_headers_add_header(struct aws_http_headers *headers, const struct aws_http_header *header) { /* Add pseudo headers to the front and not checking any violation until we send the header to the wire */ bool pseudo = aws_strutil_is_http_pseudo_header_name(header->name); bool front = false; if (pseudo && aws_http_headers_count(headers)) { struct aws_http_header last_header; /* TODO: instead if checking the last header, maybe we can add the pseudo headers to the end of the existing * pseudo headers, which needs to insert to the middle of the array list. */ AWS_ZERO_STRUCT(last_header); aws_http_headers_get_index(headers, aws_http_headers_count(headers) - 1, &last_header); front = !aws_strutil_is_http_pseudo_header_name(last_header.name); } return s_http_headers_add_header_impl(headers, header, front); } int aws_http_headers_add(struct aws_http_headers *headers, struct aws_byte_cursor name, struct aws_byte_cursor value) { struct aws_http_header header = {.name = name, .value = value}; return aws_http_headers_add_header(headers, &header); } void aws_http_headers_clear(struct aws_http_headers *headers) { AWS_PRECONDITION(headers); struct aws_http_header *header = NULL; const size_t count = aws_http_headers_count(headers); for (size_t i = 0; i < count; ++i) { aws_array_list_get_at_ptr(&headers->array_list, (void **)&header, i); AWS_ASSUME(header); /* Storage for name & value is in the same allocation */ aws_mem_release(headers->alloc, header->name.ptr); } aws_array_list_clear(&headers->array_list); } /* Does not check index */ static void s_http_headers_erase_index(struct aws_http_headers *headers, size_t index) { struct aws_http_header *header = NULL; aws_array_list_get_at_ptr(&headers->array_list, (void **)&header, index); AWS_ASSUME(header); /* Storage for name & value is in the same allocation */ aws_mem_release(headers->alloc, header->name.ptr); aws_array_list_erase(&headers->array_list, index); } int aws_http_headers_erase_index(struct aws_http_headers *headers, size_t index) { AWS_PRECONDITION(headers); if (index >= aws_http_headers_count(headers)) { return aws_raise_error(AWS_ERROR_INVALID_INDEX); } s_http_headers_erase_index(headers, index); return AWS_OP_SUCCESS; } /* Erase entries with name, stop at end_index */ static int s_http_headers_erase( struct aws_http_headers *headers, struct aws_byte_cursor name, size_t start_index, size_t end_index) { bool erased_any = false; struct aws_http_header *header = NULL; /* Iterating in reverse is simpler */ for (size_t n = end_index; n > start_index; --n) { const size_t i = n - 1; aws_array_list_get_at_ptr(&headers->array_list, (void **)&header, i); AWS_ASSUME(header); if (aws_http_header_name_eq(header->name, name)) { s_http_headers_erase_index(headers, i); erased_any = true; } } if (!erased_any) { return aws_raise_error(AWS_ERROR_HTTP_HEADER_NOT_FOUND); } return AWS_OP_SUCCESS; } int aws_http_headers_erase(struct aws_http_headers *headers, struct aws_byte_cursor name) { AWS_PRECONDITION(headers); AWS_PRECONDITION(aws_byte_cursor_is_valid(&name)); return s_http_headers_erase(headers, name, 0, aws_http_headers_count(headers)); } int aws_http_headers_erase_value( struct aws_http_headers *headers, struct aws_byte_cursor name, struct aws_byte_cursor value) { AWS_PRECONDITION(headers); AWS_PRECONDITION(aws_byte_cursor_is_valid(&name) && aws_byte_cursor_is_valid(&value)); struct aws_http_header *header = NULL; const size_t count = aws_http_headers_count(headers); for (size_t i = 0; i < count; ++i) { aws_array_list_get_at_ptr(&headers->array_list, (void **)&header, i); AWS_ASSUME(header); if (aws_http_header_name_eq(header->name, name) && aws_byte_cursor_eq(&header->value, &value)) { s_http_headers_erase_index(headers, i); return AWS_OP_SUCCESS; } } return aws_raise_error(AWS_ERROR_HTTP_HEADER_NOT_FOUND); } int aws_http_headers_add_array(struct aws_http_headers *headers, const struct aws_http_header *array, size_t count) { AWS_PRECONDITION(headers); AWS_PRECONDITION(AWS_MEM_IS_READABLE(array, count)); const size_t orig_count = aws_http_headers_count(headers); for (size_t i = 0; i < count; ++i) { if (aws_http_headers_add_header(headers, &array[i])) { goto error; } } return AWS_OP_SUCCESS; error: /* Erase headers from the end until we're back to our previous state */ for (size_t new_count = aws_http_headers_count(headers); new_count > orig_count; --new_count) { s_http_headers_erase_index(headers, new_count - 1); } return AWS_OP_ERR; } int aws_http_headers_set(struct aws_http_headers *headers, struct aws_byte_cursor name, struct aws_byte_cursor value) { AWS_PRECONDITION(headers); AWS_PRECONDITION(aws_byte_cursor_is_valid(&name) && aws_byte_cursor_is_valid(&value)); const size_t prev_count = aws_http_headers_count(headers); bool pseudo = aws_strutil_is_http_pseudo_header_name(name); const size_t start = pseudo ? 1 : 0; struct aws_http_header header = {.name = name, .value = value}; if (s_http_headers_add_header_impl(headers, &header, pseudo)) { return AWS_OP_ERR; } /* Erase pre-existing headers AFTER add, in case name or value was referencing their memory. */ s_http_headers_erase(headers, name, start, prev_count); return AWS_OP_SUCCESS; } size_t aws_http_headers_count(const struct aws_http_headers *headers) { AWS_PRECONDITION(headers); return aws_array_list_length(&headers->array_list); } int aws_http_headers_get_index( const struct aws_http_headers *headers, size_t index, struct aws_http_header *out_header) { AWS_PRECONDITION(headers); AWS_PRECONDITION(out_header); return aws_array_list_get_at(&headers->array_list, out_header, index); } /* RFC-9110 - 5.3 * A recipient MAY combine multiple field lines within a field section that * have the same field name into one field line, without changing the semantics * of the message, by appending each subsequent field line value to the initial * field line value in order, separated by a comma (",") and optional whitespace * (OWS, defined in Section 5.6.3). For consistency, use comma SP. */ AWS_HTTP_API struct aws_string *aws_http_headers_get_all(const struct aws_http_headers *headers, struct aws_byte_cursor name) { AWS_PRECONDITION(headers); AWS_PRECONDITION(aws_byte_cursor_is_valid(&name)); struct aws_string *value_str = NULL; const struct aws_byte_cursor separator = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(", "); struct aws_byte_buf value_builder; aws_byte_buf_init(&value_builder, headers->alloc, 0); bool found = false; struct aws_http_header *header = NULL; const size_t count = aws_http_headers_count(headers); for (size_t i = 0; i < count; ++i) { aws_array_list_get_at_ptr(&headers->array_list, (void **)&header, i); if (aws_http_header_name_eq(name, header->name)) { if (!found) { found = true; } else { aws_byte_buf_append_dynamic(&value_builder, &separator); } aws_byte_buf_append_dynamic(&value_builder, &header->value); } } if (found) { value_str = aws_string_new_from_buf(headers->alloc, &value_builder); } else { aws_raise_error(AWS_ERROR_HTTP_HEADER_NOT_FOUND); } aws_byte_buf_clean_up(&value_builder); return value_str; } int aws_http_headers_get( const struct aws_http_headers *headers, struct aws_byte_cursor name, struct aws_byte_cursor *out_value) { AWS_PRECONDITION(headers); AWS_PRECONDITION(out_value); AWS_PRECONDITION(aws_byte_cursor_is_valid(&name)); struct aws_http_header *header = NULL; const size_t count = aws_http_headers_count(headers); for (size_t i = 0; i < count; ++i) { aws_array_list_get_at_ptr(&headers->array_list, (void **)&header, i); AWS_ASSUME(header); if (aws_http_header_name_eq(header->name, name)) { *out_value = header->value; return AWS_OP_SUCCESS; } } return aws_raise_error(AWS_ERROR_HTTP_HEADER_NOT_FOUND); } bool aws_http_headers_has(const struct aws_http_headers *headers, struct aws_byte_cursor name) { struct aws_byte_cursor out_value; if (aws_http_headers_get(headers, name, &out_value)) { return false; } return true; } int aws_http2_headers_get_request_method( const struct aws_http_headers *h2_headers, struct aws_byte_cursor *out_method) { return aws_http_headers_get(h2_headers, aws_http_header_method, out_method); } int aws_http2_headers_get_request_scheme( const struct aws_http_headers *h2_headers, struct aws_byte_cursor *out_scheme) { return aws_http_headers_get(h2_headers, aws_http_header_scheme, out_scheme); } int aws_http2_headers_get_request_authority( const struct aws_http_headers *h2_headers, struct aws_byte_cursor *out_authority) { return aws_http_headers_get(h2_headers, aws_http_header_authority, out_authority); } int aws_http2_headers_get_request_path(const struct aws_http_headers *h2_headers, struct aws_byte_cursor *out_path) { return aws_http_headers_get(h2_headers, aws_http_header_path, out_path); } int aws_http2_headers_get_response_status(const struct aws_http_headers *h2_headers, int *out_status_code) { struct aws_byte_cursor status_code_cur; int return_code = aws_http_headers_get(h2_headers, aws_http_header_status, &status_code_cur); if (return_code == AWS_OP_SUCCESS) { uint64_t code_val_u64; if (aws_byte_cursor_utf8_parse_u64(status_code_cur, &code_val_u64)) { return AWS_OP_ERR; } *out_status_code = (int)code_val_u64; } return return_code; } int aws_http2_headers_set_request_method(struct aws_http_headers *h2_headers, struct aws_byte_cursor method) { return aws_http_headers_set(h2_headers, aws_http_header_method, method); } int aws_http2_headers_set_request_scheme(struct aws_http_headers *h2_headers, struct aws_byte_cursor scheme) { return aws_http_headers_set(h2_headers, aws_http_header_scheme, scheme); } int aws_http2_headers_set_request_authority(struct aws_http_headers *h2_headers, struct aws_byte_cursor authority) { return aws_http_headers_set(h2_headers, aws_http_header_authority, authority); } int aws_http2_headers_set_request_path(struct aws_http_headers *h2_headers, struct aws_byte_cursor path) { return aws_http_headers_set(h2_headers, aws_http_header_path, path); } int aws_http2_headers_set_response_status(struct aws_http_headers *h2_headers, int status_code) { /* Status code must fit in 3 digits */ if (status_code < 0 || status_code > 999) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } char status_code_str[4] = "000"; snprintf(status_code_str, sizeof(status_code_str), "%03d", status_code); struct aws_byte_cursor status_code_cur = aws_byte_cursor_from_c_str(status_code_str); return aws_http_headers_set(h2_headers, aws_http_header_status, status_code_cur); } struct aws_http_message { struct aws_allocator *allocator; struct aws_http_headers *headers; struct aws_input_stream *body_stream; struct aws_atomic_var refcount; enum aws_http_version http_version; /* Data specific to the request or response subclasses */ union { struct aws_http_message_request_data { struct aws_string *method; struct aws_string *path; } request; struct aws_http_message_response_data { int status; } response; } subclass_data; struct aws_http_message_request_data *request_data; struct aws_http_message_response_data *response_data; }; static int s_set_string_from_cursor( struct aws_string **dst, struct aws_byte_cursor cursor, struct aws_allocator *alloc) { AWS_PRECONDITION(dst); /* If the cursor is empty, set dst to NULL */ struct aws_string *new_str; if (cursor.len) { new_str = aws_string_new_from_cursor(alloc, &cursor); if (!new_str) { return AWS_OP_ERR; } } else { new_str = NULL; } /* Replace existing value */ aws_string_destroy(*dst); *dst = new_str; return AWS_OP_SUCCESS; } static struct aws_http_message *s_message_new_common( struct aws_allocator *allocator, struct aws_http_headers *existing_headers) { /* allocation cannot fail */ struct aws_http_message *message = aws_mem_calloc(allocator, 1, sizeof(struct aws_http_message)); message->allocator = allocator; aws_atomic_init_int(&message->refcount, 1); if (existing_headers) { message->headers = existing_headers; aws_http_headers_acquire(message->headers); } else { message->headers = aws_http_headers_new(allocator); if (!message->headers) { goto error; } } return message; error: aws_http_message_destroy(message); return NULL; } static struct aws_http_message *s_message_new_request_common( struct aws_allocator *allocator, struct aws_http_headers *existing_headers, enum aws_http_version version) { struct aws_http_message *message = s_message_new_common(allocator, existing_headers); if (message) { message->request_data = &message->subclass_data.request; message->http_version = version; } return message; } struct aws_http_message *aws_http_message_new_request_with_headers( struct aws_allocator *allocator, struct aws_http_headers *existing_headers) { AWS_PRECONDITION(allocator); AWS_PRECONDITION(existing_headers); return s_message_new_request_common(allocator, existing_headers, AWS_HTTP_VERSION_1_1); } struct aws_http_message *aws_http_message_new_request(struct aws_allocator *allocator) { AWS_PRECONDITION(allocator); return s_message_new_request_common(allocator, NULL, AWS_HTTP_VERSION_1_1); } struct aws_http_message *aws_http2_message_new_request(struct aws_allocator *allocator) { AWS_PRECONDITION(allocator); return s_message_new_request_common(allocator, NULL, AWS_HTTP_VERSION_2); } static struct aws_http_message *s_http_message_new_response_common( struct aws_allocator *allocator, enum aws_http_version version) { AWS_PRECONDITION(allocator); struct aws_http_message *message = s_message_new_common(allocator, NULL); if (message) { message->response_data = &message->subclass_data.response; message->response_data->status = AWS_HTTP_STATUS_CODE_UNKNOWN; message->http_version = version; } return message; } struct aws_http_message *aws_http_message_new_response(struct aws_allocator *allocator) { AWS_PRECONDITION(allocator); return s_http_message_new_response_common(allocator, AWS_HTTP_VERSION_1_1); } struct aws_http_message *aws_http2_message_new_response(struct aws_allocator *allocator) { AWS_PRECONDITION(allocator); return s_http_message_new_response_common(allocator, AWS_HTTP_VERSION_2); } void aws_http_message_destroy(struct aws_http_message *message) { aws_http_message_release(message); } struct aws_http_message *aws_http_message_release(struct aws_http_message *message) { /* Note that release() may also be used by new() functions to clean up if something goes wrong */ AWS_PRECONDITION(!message || message->allocator); if (!message) { return NULL; } size_t prev_refcount = aws_atomic_fetch_sub(&message->refcount, 1); if (prev_refcount == 1) { if (message->request_data) { aws_string_destroy(message->request_data->method); aws_string_destroy(message->request_data->path); } aws_http_headers_release(message->headers); aws_input_stream_release(message->body_stream); aws_mem_release(message->allocator, message); } else { AWS_ASSERT(prev_refcount != 0); } return NULL; } struct aws_http_message *aws_http_message_acquire(struct aws_http_message *message) { if (message != NULL) { aws_atomic_fetch_add(&message->refcount, 1); } return message; } bool aws_http_message_is_request(const struct aws_http_message *message) { AWS_PRECONDITION(message); return message->request_data; } bool aws_http_message_is_response(const struct aws_http_message *message) { AWS_PRECONDITION(message); return message->response_data; } enum aws_http_version aws_http_message_get_protocol_version(const struct aws_http_message *message) { AWS_PRECONDITION(message); return message->http_version; } int aws_http_message_set_request_method(struct aws_http_message *request_message, struct aws_byte_cursor method) { AWS_PRECONDITION(request_message); AWS_PRECONDITION(aws_byte_cursor_is_valid(&method)); AWS_PRECONDITION(request_message->request_data); if (request_message->request_data) { switch (request_message->http_version) { case AWS_HTTP_VERSION_1_1: return s_set_string_from_cursor( &request_message->request_data->method, method, request_message->allocator); case AWS_HTTP_VERSION_2: return aws_http2_headers_set_request_method(request_message->headers, method); default: return aws_raise_error(AWS_ERROR_UNIMPLEMENTED); } } return aws_raise_error(AWS_ERROR_INVALID_STATE); } int aws_http_message_get_request_method( const struct aws_http_message *request_message, struct aws_byte_cursor *out_method) { AWS_PRECONDITION(request_message); AWS_PRECONDITION(out_method); AWS_PRECONDITION(request_message->request_data); int error = AWS_ERROR_HTTP_DATA_NOT_AVAILABLE; if (request_message->request_data) { switch (request_message->http_version) { case AWS_HTTP_VERSION_1_1: if (request_message->request_data->method) { *out_method = aws_byte_cursor_from_string(request_message->request_data->method); return AWS_OP_SUCCESS; } break; case AWS_HTTP_VERSION_2: return aws_http2_headers_get_request_method(request_message->headers, out_method); default: error = AWS_ERROR_UNIMPLEMENTED; } } AWS_ZERO_STRUCT(*out_method); return aws_raise_error(error); } int aws_http_message_set_request_path(struct aws_http_message *request_message, struct aws_byte_cursor path) { AWS_PRECONDITION(request_message); AWS_PRECONDITION(aws_byte_cursor_is_valid(&path)); AWS_PRECONDITION(request_message->request_data); if (request_message->request_data) { switch (request_message->http_version) { case AWS_HTTP_VERSION_1_1: return s_set_string_from_cursor(&request_message->request_data->path, path, request_message->allocator); case AWS_HTTP_VERSION_2: return aws_http2_headers_set_request_path(request_message->headers, path); default: return aws_raise_error(AWS_ERROR_UNIMPLEMENTED); } } return aws_raise_error(AWS_ERROR_INVALID_STATE); } int aws_http_message_get_request_path( const struct aws_http_message *request_message, struct aws_byte_cursor *out_path) { AWS_PRECONDITION(request_message); AWS_PRECONDITION(out_path); AWS_PRECONDITION(request_message->request_data); if (request_message->request_data) { switch (request_message->http_version) { case AWS_HTTP_VERSION_1_1: if (request_message->request_data->path) { *out_path = aws_byte_cursor_from_string(request_message->request_data->path); return AWS_OP_SUCCESS; } break; case AWS_HTTP_VERSION_2: return aws_http2_headers_get_request_path(request_message->headers, out_path); default: return aws_raise_error(AWS_ERROR_UNIMPLEMENTED); } } AWS_ZERO_STRUCT(*out_path); return aws_raise_error(AWS_ERROR_HTTP_DATA_NOT_AVAILABLE); } int aws_http_message_get_response_status(const struct aws_http_message *response_message, int *out_status_code) { AWS_PRECONDITION(response_message); AWS_PRECONDITION(out_status_code); AWS_PRECONDITION(response_message->response_data); *out_status_code = AWS_HTTP_STATUS_CODE_UNKNOWN; if (response_message->response_data) { switch (response_message->http_version) { case AWS_HTTP_VERSION_1_1: if (response_message->response_data->status != AWS_HTTP_STATUS_CODE_UNKNOWN) { *out_status_code = response_message->response_data->status; return AWS_OP_SUCCESS; } break; case AWS_HTTP_VERSION_2: return aws_http2_headers_get_response_status(response_message->headers, out_status_code); default: return aws_raise_error(AWS_ERROR_UNIMPLEMENTED); } } return aws_raise_error(AWS_ERROR_HTTP_DATA_NOT_AVAILABLE); } int aws_http_message_set_response_status(struct aws_http_message *response_message, int status_code) { AWS_PRECONDITION(response_message); AWS_PRECONDITION(response_message->response_data); if (response_message->response_data) { /* Status code must be printable with exactly 3 digits */ if (status_code >= 0 && status_code <= 999) { switch (response_message->http_version) { case AWS_HTTP_VERSION_1_1: response_message->response_data->status = status_code; return AWS_OP_SUCCESS; case AWS_HTTP_VERSION_2: return aws_http2_headers_set_response_status(response_message->headers, status_code); default: return aws_raise_error(AWS_ERROR_UNIMPLEMENTED); } } return aws_raise_error(AWS_ERROR_HTTP_INVALID_STATUS_CODE); } return aws_raise_error(AWS_ERROR_INVALID_STATE); } void aws_http_message_set_body_stream(struct aws_http_message *message, struct aws_input_stream *body_stream) { AWS_PRECONDITION(message); /* release previous stream, if any */ aws_input_stream_release(message->body_stream); message->body_stream = body_stream; if (message->body_stream) { aws_input_stream_acquire(message->body_stream); } } int aws_http1_stream_write_chunk(struct aws_http_stream *http1_stream, const struct aws_http1_chunk_options *options) { AWS_PRECONDITION(http1_stream); AWS_PRECONDITION(http1_stream->vtable); AWS_PRECONDITION(options); if (!http1_stream->vtable->http1_write_chunk) { AWS_LOGF_TRACE( AWS_LS_HTTP_STREAM, "id=%p: HTTP/1 stream only function invoked on other stream, ignoring call.", (void *)http1_stream); return aws_raise_error(AWS_ERROR_INVALID_STATE); } return http1_stream->vtable->http1_write_chunk(http1_stream, options); } int aws_http2_stream_write_data( struct aws_http_stream *http2_stream, const struct aws_http2_stream_write_data_options *options) { AWS_PRECONDITION(http2_stream); AWS_PRECONDITION(http2_stream->vtable); AWS_PRECONDITION(http2_stream->vtable->http2_write_data); AWS_PRECONDITION(options); return http2_stream->vtable->http2_write_data(http2_stream, options); } int aws_http1_stream_add_chunked_trailer( struct aws_http_stream *http1_stream, const struct aws_http_headers *trailing_headers) { AWS_PRECONDITION(http1_stream); AWS_PRECONDITION(http1_stream->vtable); AWS_PRECONDITION(trailing_headers); if (!http1_stream->vtable->http1_add_trailer) { AWS_LOGF_TRACE( AWS_LS_HTTP_STREAM, "id=%p: HTTP/1 stream only function invoked on other stream, ignoring call.", (void *)http1_stream); return aws_raise_error(AWS_ERROR_INVALID_STATE); } return http1_stream->vtable->http1_add_trailer(http1_stream, trailing_headers); } struct aws_input_stream *aws_http_message_get_body_stream(const struct aws_http_message *message) { AWS_PRECONDITION(message); return message->body_stream; } struct aws_http_headers *aws_http_message_get_headers(const struct aws_http_message *message) { AWS_PRECONDITION(message); return message->headers; } const struct aws_http_headers *aws_http_message_get_const_headers(const struct aws_http_message *message) { AWS_PRECONDITION(message); return message->headers; } int aws_http_message_add_header(struct aws_http_message *message, struct aws_http_header header) { return aws_http_headers_add(message->headers, header.name, header.value); } int aws_http_message_add_header_array( struct aws_http_message *message, const struct aws_http_header *headers, size_t num_headers) { return aws_http_headers_add_array(message->headers, headers, num_headers); } int aws_http_message_erase_header(struct aws_http_message *message, size_t index) { return aws_http_headers_erase_index(message->headers, index); } size_t aws_http_message_get_header_count(const struct aws_http_message *message) { return aws_http_headers_count(message->headers); } int aws_http_message_get_header( const struct aws_http_message *message, struct aws_http_header *out_header, size_t index) { return aws_http_headers_get_index(message->headers, index, out_header); } AWS_FUTURE_T_POINTER_WITH_RELEASE_IMPLEMENTATION( aws_future_http_message, struct aws_http_message, aws_http_message_release) struct aws_http_stream *aws_http_connection_make_request( struct aws_http_connection *client_connection, const struct aws_http_make_request_options *options) { AWS_PRECONDITION(client_connection); AWS_PRECONDITION(aws_http_connection_is_client(client_connection)); AWS_PRECONDITION(options); if (options->self_size == 0 || !options->request || !aws_http_message_is_request(options->request)) { AWS_LOGF_ERROR( AWS_LS_HTTP_CONNECTION, "id=%p: Cannot create client request, options are invalid.", (void *)client_connection); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } /* Connection owns stream, and must outlive stream */ aws_http_connection_acquire(client_connection); struct aws_http_stream *stream = client_connection->vtable->make_request(client_connection, options); if (!stream) { aws_http_connection_release(client_connection); return NULL; } return stream; } struct aws_http_message *aws_http2_message_new_from_http1( struct aws_allocator *alloc, const struct aws_http_message *http1_msg) { struct aws_http_headers *old_headers = aws_http_message_get_headers(http1_msg); struct aws_http_header header_iter; struct aws_byte_buf lower_name_buf; AWS_ZERO_STRUCT(lower_name_buf); struct aws_http_message *message = aws_http_message_is_request(http1_msg) ? aws_http2_message_new_request(alloc) : aws_http2_message_new_response(alloc); if (!message) { return NULL; } struct aws_http_headers *copied_headers = message->headers; AWS_LOGF_TRACE(AWS_LS_HTTP_GENERAL, "Creating HTTP/2 message from HTTP/1 message id: %p", (void *)http1_msg); /* Set pseudo headers from HTTP/1.1 message */ if (aws_http_message_is_request(http1_msg)) { struct aws_byte_cursor method; if (aws_http_message_get_request_method(http1_msg, &method)) { AWS_LOGF_ERROR( AWS_LS_HTTP_GENERAL, "Failed to create HTTP/2 message from HTTP/1 message, ip: %p, due to no method found.", (void *)http1_msg); /* error will happen when the request is invalid */ aws_raise_error(AWS_ERROR_HTTP_INVALID_METHOD); goto error; } /* Use add instead of set method to avoid push front to the array list */ if (aws_http_headers_add(copied_headers, aws_http_header_method, method)) { goto error; } AWS_LOGF_TRACE( AWS_LS_HTTP_GENERAL, "Added header to new HTTP/2 header - \"%.*s\": \"%.*s\" ", (int)aws_http_header_method.len, aws_http_header_method.ptr, (int)method.len, method.ptr); /** * we set a default value, "https", for now. * TODO: as we support prior knowledge, we may also want to support http? */ struct aws_byte_cursor scheme_cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("https"); if (aws_http_headers_add(copied_headers, aws_http_header_scheme, scheme_cursor)) { goto error; } AWS_LOGF_TRACE( AWS_LS_HTTP_GENERAL, "Added header to new HTTP/2 header - \"%.*s\": \"%.*s\" ", (int)aws_http_header_scheme.len, aws_http_header_scheme.ptr, (int)scheme_cursor.len, scheme_cursor.ptr); /** * An intermediary that forwards a request over HTTP/2 MUST construct an ":authority" pseudo-header field * using the authority information from the control data of the original request. (RFC=9113 8.3.1) */ struct aws_byte_cursor host_value; AWS_ZERO_STRUCT(host_value); if (aws_http_headers_get(http1_msg->headers, aws_byte_cursor_from_c_str("host"), &host_value) == AWS_OP_SUCCESS) { if (aws_http_headers_add(copied_headers, aws_http_header_authority, host_value)) { goto error; } AWS_LOGF_TRACE( AWS_LS_HTTP_GENERAL, "Added header to new HTTP/2 header - \"%.*s\": \"%.*s\" ", (int)aws_http_header_authority.len, aws_http_header_authority.ptr, (int)host_value.len, host_value.ptr); } /* TODO: If the host headers is missing, the target URI could be the other source of the authority * information */ struct aws_byte_cursor path_cursor; if (aws_http_message_get_request_path(http1_msg, &path_cursor)) { AWS_LOGF_ERROR( AWS_LS_HTTP_GENERAL, "Failed to create HTTP/2 message from HTTP/1 message, ip: %p, due to no path found.", (void *)http1_msg); aws_raise_error(AWS_ERROR_HTTP_INVALID_PATH); goto error; } if (aws_http_headers_add(copied_headers, aws_http_header_path, path_cursor)) { goto error; } AWS_LOGF_TRACE( AWS_LS_HTTP_GENERAL, "Added header to new HTTP/2 header - \"%.*s\": \"%.*s\" ", (int)aws_http_header_path.len, aws_http_header_path.ptr, (int)path_cursor.len, path_cursor.ptr); } else { int status = 0; if (aws_http_message_get_response_status(http1_msg, &status)) { AWS_LOGF_ERROR( AWS_LS_HTTP_GENERAL, "Failed to create HTTP/2 response message from HTTP/1 response message, ip: %p, due to no status " "found.", (void *)http1_msg); /* error will happen when the request is invalid */ aws_raise_error(AWS_ERROR_HTTP_INVALID_STATUS_CODE); goto error; } if (aws_http2_headers_set_response_status(copied_headers, status)) { goto error; } AWS_LOGF_TRACE( AWS_LS_HTTP_GENERAL, "Added header to new HTTP/2 header - \"%.*s\": \"%d\" ", (int)aws_http_header_status.len, aws_http_header_status.ptr, status); } if (aws_byte_buf_init(&lower_name_buf, alloc, 256)) { goto error; } for (size_t iter = 0; iter < aws_http_headers_count(old_headers); iter++) { aws_byte_buf_reset(&lower_name_buf, false); bool copy_header = true; /* name should be converted to lower case */ if (aws_http_headers_get_index(old_headers, iter, &header_iter)) { goto error; } /* append lower case name to the buffer */ aws_byte_buf_append_with_lookup(&lower_name_buf, &header_iter.name, aws_lookup_table_to_lower_get()); struct aws_byte_cursor lower_name_cursor = aws_byte_cursor_from_buf(&lower_name_buf); enum aws_http_header_name name_enum = aws_http_lowercase_str_to_header_name(lower_name_cursor); switch (name_enum) { case AWS_HTTP_HEADER_TRANSFER_ENCODING: case AWS_HTTP_HEADER_UPGRADE: case AWS_HTTP_HEADER_KEEP_ALIVE: case AWS_HTTP_HEADER_PROXY_CONNECTION: case AWS_HTTP_HEADER_HOST: /** * An intermediary transforming an HTTP/1.x message to HTTP/2 MUST remove connection-specific header * fields as discussed in Section 7.6.1 of [HTTP]. (RFC=9113 8.2.2) */ AWS_LOGF_TRACE( AWS_LS_HTTP_GENERAL, "Skip connection-specific headers - \"%.*s\" ", (int)lower_name_cursor.len, lower_name_cursor.ptr); copy_header = false; break; default: break; } if (copy_header) { if (aws_http_headers_add(copied_headers, lower_name_cursor, header_iter.value)) { goto error; } AWS_LOGF_TRACE( AWS_LS_HTTP_GENERAL, "Added header to new HTTP/2 header - \"%.*s\": \"%.*s\" ", (int)lower_name_cursor.len, lower_name_cursor.ptr, (int)header_iter.value.len, header_iter.value.ptr); } } aws_byte_buf_clean_up(&lower_name_buf); aws_http_message_set_body_stream(message, aws_http_message_get_body_stream(http1_msg)); return message; error: aws_http_message_release(message); aws_byte_buf_clean_up(&lower_name_buf); return NULL; } int aws_http_stream_activate(struct aws_http_stream *stream) { AWS_PRECONDITION(stream); AWS_PRECONDITION(stream->vtable); AWS_PRECONDITION(stream->vtable->activate); /* make sure it's actually a client calling us. This is always a programmer bug, so just assert and die. */ AWS_PRECONDITION(aws_http_connection_is_client(stream->owning_connection)); return stream->vtable->activate(stream); } struct aws_http_stream *aws_http_stream_new_server_request_handler( const struct aws_http_request_handler_options *options) { AWS_PRECONDITION(options); if (options->self_size == 0 || !options->server_connection || !aws_http_connection_is_server(options->server_connection)) { AWS_LOGF_ERROR( AWS_LS_HTTP_CONNECTION, "id=%p: Cannot create server request handler stream, options are invalid.", (void *)options->server_connection); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } return options->server_connection->vtable->new_server_request_handler_stream(options); } int aws_http_stream_send_response(struct aws_http_stream *stream, struct aws_http_message *response) { AWS_PRECONDITION(stream); AWS_PRECONDITION(response); AWS_PRECONDITION(aws_http_message_is_response(response)); return stream->owning_connection->vtable->stream_send_response(stream, response); } struct aws_http_stream *aws_http_stream_acquire(struct aws_http_stream *stream) { AWS_PRECONDITION(stream); size_t prev_refcount = aws_atomic_fetch_add(&stream->refcount, 1); AWS_LOGF_TRACE( AWS_LS_HTTP_STREAM, "id=%p: Stream refcount acquired, %zu remaining.", (void *)stream, prev_refcount + 1); return stream; } void aws_http_stream_release(struct aws_http_stream *stream) { if (!stream) { return; } size_t prev_refcount = aws_atomic_fetch_sub(&stream->refcount, 1); if (prev_refcount == 1) { AWS_LOGF_TRACE(AWS_LS_HTTP_STREAM, "id=%p: Final stream refcount released.", (void *)stream); void *user_data = stream->user_data; aws_http_on_stream_destroy_fn *on_destroy_callback = stream->on_destroy; struct aws_http_connection *owning_connection = stream->owning_connection; stream->vtable->destroy(stream); if (on_destroy_callback) { /* info user that destroy completed. */ on_destroy_callback(user_data); } /* Connection needed to outlive stream, but it's free to go now */ aws_http_connection_release(owning_connection); } else { AWS_ASSERT(prev_refcount != 0); AWS_LOGF_TRACE( AWS_LS_HTTP_STREAM, "id=%p: Stream refcount released, %zu remaining.", (void *)stream, prev_refcount - 1); } } struct aws_http_connection *aws_http_stream_get_connection(const struct aws_http_stream *stream) { AWS_ASSERT(stream); return stream->owning_connection; } int aws_http_stream_get_incoming_response_status(const struct aws_http_stream *stream, int *out_status) { AWS_ASSERT(stream && stream->client_data); if (stream->client_data->response_status == (int)AWS_HTTP_STATUS_CODE_UNKNOWN) { AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=%p: Status code not yet received.", (void *)stream); return aws_raise_error(AWS_ERROR_HTTP_DATA_NOT_AVAILABLE); } *out_status = stream->client_data->response_status; return AWS_OP_SUCCESS; } int aws_http_stream_get_incoming_request_method( const struct aws_http_stream *stream, struct aws_byte_cursor *out_method) { AWS_ASSERT(stream && stream->server_data); if (!stream->server_data->request_method_str.ptr) { AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=%p: Request method not yet received.", (void *)stream); return aws_raise_error(AWS_ERROR_HTTP_DATA_NOT_AVAILABLE); } *out_method = stream->server_data->request_method_str; return AWS_OP_SUCCESS; } int aws_http_stream_get_incoming_request_uri(const struct aws_http_stream *stream, struct aws_byte_cursor *out_uri) { AWS_ASSERT(stream && stream->server_data); if (!stream->server_data->request_path.ptr) { AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=%p: Request URI not yet received.", (void *)stream); return aws_raise_error(AWS_ERROR_HTTP_DATA_NOT_AVAILABLE); } *out_uri = stream->server_data->request_path; return AWS_OP_SUCCESS; } void aws_http_stream_update_window(struct aws_http_stream *stream, size_t increment_size) { stream->vtable->update_window(stream, increment_size); } uint32_t aws_http_stream_get_id(const struct aws_http_stream *stream) { return stream->id; } void aws_http_stream_cancel(struct aws_http_stream *stream, int error_code) { stream->vtable->cancel(stream, error_code); } int aws_http2_stream_reset(struct aws_http_stream *http2_stream, uint32_t http2_error) { AWS_PRECONDITION(http2_stream); AWS_PRECONDITION(http2_stream->vtable); if (!http2_stream->vtable->http2_reset_stream) { AWS_LOGF_TRACE( AWS_LS_HTTP_STREAM, "id=%p: HTTP/2 stream only function invoked on other stream, ignoring call.", (void *)http2_stream); return aws_raise_error(AWS_ERROR_INVALID_STATE); } return http2_stream->vtable->http2_reset_stream(http2_stream, http2_error); } int aws_http2_stream_get_received_reset_error_code(struct aws_http_stream *http2_stream, uint32_t *out_http2_error) { AWS_PRECONDITION(http2_stream); AWS_PRECONDITION(http2_stream->vtable); AWS_PRECONDITION(out_http2_error); if (!http2_stream->vtable->http2_get_received_error_code) { AWS_LOGF_TRACE( AWS_LS_HTTP_STREAM, "id=%p: HTTP/2 stream only function invoked on other stream, ignoring call.", (void *)http2_stream); return aws_raise_error(AWS_ERROR_INVALID_STATE); } return http2_stream->vtable->http2_get_received_error_code(http2_stream, out_http2_error); } int aws_http2_stream_get_sent_reset_error_code(struct aws_http_stream *http2_stream, uint32_t *out_http2_error) { AWS_PRECONDITION(http2_stream); AWS_PRECONDITION(http2_stream->vtable); AWS_PRECONDITION(out_http2_error); if (!http2_stream->vtable->http2_get_sent_error_code) { AWS_LOGF_TRACE( AWS_LS_HTTP_STREAM, "id=%p: HTTP/2 stream only function invoked on other stream, ignoring call.", (void *)http2_stream); return aws_raise_error(AWS_ERROR_INVALID_STATE); } return http2_stream->vtable->http2_get_sent_error_code(http2_stream, out_http2_error); } aws-crt-python-0.20.4+dfsg/crt/aws-c-http/source/statistics.c000066400000000000000000000021601456575232400240440ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include int aws_crt_statistics_http1_channel_init(struct aws_crt_statistics_http1_channel *stats) { AWS_ZERO_STRUCT(*stats); stats->category = AWSCRT_STAT_CAT_HTTP1_CHANNEL; return AWS_OP_SUCCESS; } void aws_crt_statistics_http1_channel_cleanup(struct aws_crt_statistics_http1_channel *stats) { (void)stats; } void aws_crt_statistics_http1_channel_reset(struct aws_crt_statistics_http1_channel *stats) { stats->pending_outgoing_stream_ms = 0; stats->pending_incoming_stream_ms = 0; stats->current_outgoing_stream_id = 0; stats->current_incoming_stream_id = 0; } void aws_crt_statistics_http2_channel_init(struct aws_crt_statistics_http2_channel *stats) { AWS_ZERO_STRUCT(*stats); stats->category = AWSCRT_STAT_CAT_HTTP2_CHANNEL; } void aws_crt_statistics_http2_channel_reset(struct aws_crt_statistics_http2_channel *stats) { stats->pending_outgoing_stream_ms = 0; stats->pending_incoming_stream_ms = 0; stats->was_inactive = false; } aws-crt-python-0.20.4+dfsg/crt/aws-c-http/source/strutil.c000066400000000000000000000234431456575232400233670ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include static struct aws_byte_cursor s_trim(struct aws_byte_cursor cursor, const bool trim_table[256]) { /* trim leading whitespace */ size_t i; for (i = 0; i < cursor.len; ++i) { const uint8_t c = cursor.ptr[i]; if (!trim_table[c]) { break; } } cursor.ptr += i; cursor.len -= i; /* trim trailing whitespace */ for (; cursor.len; --cursor.len) { const uint8_t c = cursor.ptr[cursor.len - 1]; if (!trim_table[c]) { break; } } return cursor; } static const bool s_http_whitespace_table[256] = { [' '] = true, ['\t'] = true, }; struct aws_byte_cursor aws_strutil_trim_http_whitespace(struct aws_byte_cursor cursor) { return s_trim(cursor, s_http_whitespace_table); } /* RFC7230 section 3.2.6: * token = 1*tchar * tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" * / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~" * / DIGIT / ALPHA */ static const bool s_http_token_table[256] = { ['!'] = true, ['#'] = true, ['$'] = true, ['%'] = true, ['&'] = true, ['\''] = true, ['*'] = true, ['+'] = true, ['-'] = true, ['.'] = true, ['^'] = true, ['_'] = true, ['`'] = true, ['|'] = true, ['~'] = true, ['0'] = true, ['1'] = true, ['2'] = true, ['3'] = true, ['4'] = true, ['5'] = true, ['6'] = true, ['7'] = true, ['8'] = true, ['9'] = true, ['A'] = true, ['B'] = true, ['C'] = true, ['D'] = true, ['E'] = true, ['F'] = true, ['G'] = true, ['H'] = true, ['I'] = true, ['J'] = true, ['K'] = true, ['L'] = true, ['M'] = true, ['N'] = true, ['O'] = true, ['P'] = true, ['Q'] = true, ['R'] = true, ['S'] = true, ['T'] = true, ['U'] = true, ['V'] = true, ['W'] = true, ['X'] = true, ['Y'] = true, ['Z'] = true, ['a'] = true, ['b'] = true, ['c'] = true, ['d'] = true, ['e'] = true, ['f'] = true, ['g'] = true, ['h'] = true, ['i'] = true, ['j'] = true, ['k'] = true, ['l'] = true, ['m'] = true, ['n'] = true, ['o'] = true, ['p'] = true, ['q'] = true, ['r'] = true, ['s'] = true, ['t'] = true, ['u'] = true, ['v'] = true, ['w'] = true, ['x'] = true, ['y'] = true, ['z'] = true, }; /* Same as above, but with uppercase characters removed */ static const bool s_http_lowercase_token_table[256] = { ['!'] = true, ['#'] = true, ['$'] = true, ['%'] = true, ['&'] = true, ['\''] = true, ['*'] = true, ['+'] = true, ['-'] = true, ['.'] = true, ['^'] = true, ['_'] = true, ['`'] = true, ['|'] = true, ['~'] = true, ['0'] = true, ['1'] = true, ['2'] = true, ['3'] = true, ['4'] = true, ['5'] = true, ['6'] = true, ['7'] = true, ['8'] = true, ['9'] = true, ['a'] = true, ['b'] = true, ['c'] = true, ['d'] = true, ['e'] = true, ['f'] = true, ['g'] = true, ['h'] = true, ['i'] = true, ['j'] = true, ['k'] = true, ['l'] = true, ['m'] = true, ['n'] = true, ['o'] = true, ['p'] = true, ['q'] = true, ['r'] = true, ['s'] = true, ['t'] = true, ['u'] = true, ['v'] = true, ['w'] = true, ['x'] = true, ['y'] = true, ['z'] = true, }; static bool s_is_token(struct aws_byte_cursor token, const bool token_table[256]) { if (token.len == 0) { return false; } for (size_t i = 0; i < token.len; ++i) { const uint8_t c = token.ptr[i]; if (token_table[c] == false) { return false; } } return true; } bool aws_strutil_is_http_token(struct aws_byte_cursor token) { return s_is_token(token, s_http_token_table); } bool aws_strutil_is_lowercase_http_token(struct aws_byte_cursor token) { return s_is_token(token, s_http_lowercase_token_table); } /* clang-format off */ /** * Table with true for all octets allowed in field-content, * as defined in RFC7230 section 3.2 and 3.2.6 and RFC5234 appendix-B.1: * * field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] * field-vchar = VCHAR / obs-text * VCHAR = %x21-7E ; visible (printing) characters * obs-text = %x80-FF */ static const bool s_http_field_content_table[256] = { /* clang-format off */ /* whitespace */ ['\t'] = true, [' '] = true, /* VCHAR = 0x21-7E */ [0x21] = true, [0x22] = true, [0x23] = true, [0x24] = true, [0x25] = true, [0x26] = true, [0x27] = true, [0x28] = true, [0x29] = true, [0x2A] = true, [0x2B] = true, [0x2C] = true, [0x2D] = true, [0x2E] = true, [0x2F] = true, [0x30] = true, [0x31] = true, [0x32] = true, [0x33] = true, [0x34] = true, [0x35] = true, [0x36] = true, [0x37] = true, [0x38] = true, [0x39] = true, [0x3A] = true, [0x3B] = true, [0x3C] = true, [0x3D] = true, [0x3E] = true, [0x3F] = true, [0x40] = true, [0x41] = true, [0x42] = true, [0x43] = true, [0x44] = true, [0x45] = true, [0x46] = true, [0x47] = true, [0x48] = true, [0x49] = true, [0x4A] = true, [0x4B] = true, [0x4C] = true, [0x4D] = true, [0x4E] = true, [0x4F] = true, [0x50] = true, [0x51] = true, [0x52] = true, [0x53] = true, [0x54] = true, [0x55] = true, [0x56] = true, [0x57] = true, [0x58] = true, [0x59] = true, [0x5A] = true, [0x5B] = true, [0x5C] = true, [0x5D] = true, [0x5E] = true, [0x5F] = true, [0x60] = true, [0x61] = true, [0x62] = true, [0x63] = true, [0x64] = true, [0x65] = true, [0x66] = true, [0x67] = true, [0x68] = true, [0x69] = true, [0x6A] = true, [0x6B] = true, [0x6C] = true, [0x6D] = true, [0x6E] = true, [0x6F] = true, [0x70] = true, [0x71] = true, [0x72] = true, [0x73] = true, [0x74] = true, [0x75] = true, [0x76] = true, [0x77] = true, [0x78] = true, [0x79] = true, [0x7A] = true, [0x7B] = true, [0x7C] = true, [0x7D] = true, [0x7E] = true, /* obs-text = %x80-FF */ [0x80] = true, [0x81] = true, [0x82] = true, [0x83] = true, [0x84] = true, [0x85] = true, [0x86] = true, [0x87] = true, [0x88] = true, [0x89] = true, [0x8A] = true, [0x8B] = true, [0x8C] = true, [0x8D] = true, [0x8E] = true, [0x8F] = true, [0x90] = true, [0x91] = true, [0x92] = true, [0x93] = true, [0x94] = true, [0x95] = true, [0x96] = true, [0x97] = true, [0x98] = true, [0x99] = true, [0x9A] = true, [0x9B] = true, [0x9C] = true, [0x9D] = true, [0x9E] = true, [0x9F] = true, [0xA0] = true, [0xA1] = true, [0xA2] = true, [0xA3] = true, [0xA4] = true, [0xA5] = true, [0xA6] = true, [0xA7] = true, [0xA8] = true, [0xA9] = true, [0xAA] = true, [0xAB] = true, [0xAC] = true, [0xAD] = true, [0xAE] = true, [0xAF] = true, [0xB0] = true, [0xB1] = true, [0xB2] = true, [0xB3] = true, [0xB4] = true, [0xB5] = true, [0xB6] = true, [0xB7] = true, [0xB8] = true, [0xB9] = true, [0xBA] = true, [0xBB] = true, [0xBC] = true, [0xBD] = true, [0xBE] = true, [0xBF] = true, [0xC0] = true, [0xC1] = true, [0xC2] = true, [0xC3] = true, [0xC4] = true, [0xC5] = true, [0xC6] = true, [0xC7] = true, [0xC8] = true, [0xC9] = true, [0xCA] = true, [0xCB] = true, [0xCC] = true, [0xCD] = true, [0xCE] = true, [0xCF] = true, [0xD0] = true, [0xD1] = true, [0xD2] = true, [0xD3] = true, [0xD4] = true, [0xD5] = true, [0xD6] = true, [0xD7] = true, [0xD8] = true, [0xD9] = true, [0xDA] = true, [0xDB] = true, [0xDC] = true, [0xDD] = true, [0xDE] = true, [0xDF] = true, [0xE0] = true, [0xE1] = true, [0xE2] = true, [0xE3] = true, [0xE4] = true, [0xE5] = true, [0xE6] = true, [0xE7] = true, [0xE8] = true, [0xE9] = true, [0xEA] = true, [0xEB] = true, [0xEC] = true, [0xED] = true, [0xEE] = true, [0xEF] = true, [0xF0] = true, [0xF1] = true, [0xF2] = true, [0xF3] = true, [0xF4] = true, [0xF5] = true, [0xF6] = true, [0xF7] = true, [0xF8] = true, [0xF9] = true, [0xFA] = true, [0xFB] = true, [0xFC] = true, [0xFD] = true, [0xFE] = true, [0xFF] = true, /* clang-format on */ }; /** * From RFC7230 section 3.2: * field-value = *( field-content / obs-fold ) * field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] * * But we're forbidding obs-fold */ bool aws_strutil_is_http_field_value(struct aws_byte_cursor cursor) { if (cursor.len == 0) { return true; } /* first and last char cannot be whitespace */ const uint8_t first_c = cursor.ptr[0]; const uint8_t last_c = cursor.ptr[cursor.len - 1]; if (s_http_whitespace_table[first_c] || s_http_whitespace_table[last_c]) { return false; } /* ensure every char is legal field-content */ size_t i = 0; do { const uint8_t c = cursor.ptr[i++]; if (s_http_field_content_table[c] == false) { return false; } } while (i < cursor.len); return true; } /** * From RFC7230 section 3.1.2: * reason-phrase = *( HTAB / SP / VCHAR / obs-text ) * VCHAR = %x21-7E ; visible (printing) characters * obs-text = %x80-FF */ bool aws_strutil_is_http_reason_phrase(struct aws_byte_cursor cursor) { for (size_t i = 0; i < cursor.len; ++i) { const uint8_t c = cursor.ptr[i]; /* the field-content table happens to allow the exact same characters as reason-phrase */ if (s_http_field_content_table[c] == false) { return false; } } return true; } bool aws_strutil_is_http_request_target(struct aws_byte_cursor cursor) { if (cursor.len == 0) { return false; } /* TODO: Actually check the complete grammar as defined in RFC7230 5.3 and * RFC3986. Currently this just checks whether the sequence is blatantly illegal */ size_t i = 0; do { const uint8_t c = cursor.ptr[i++]; /* everything <= ' ' is non-visible ascii*/ if (c <= ' ') { return false; } } while (i < cursor.len); return true; } bool aws_strutil_is_http_pseudo_header_name(struct aws_byte_cursor cursor) { if (cursor.len == 0) { return false; } const uint8_t c = cursor.ptr[0]; if (c != ':') { /* short cut */ return false; } return true; } aws-crt-python-0.20.4+dfsg/crt/aws-c-http/source/websocket.c000066400000000000000000002126011456575232400236430ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include #include #include #ifdef _MSC_VER # pragma warning(disable : 4204) /* non-constant aggregate initializer */ #endif /* TODO: If something goes wrong during normal shutdown, do I change the error_code? */ struct outgoing_frame { struct aws_websocket_send_frame_options def; struct aws_linked_list_node node; }; struct aws_websocket { struct aws_allocator *alloc; struct aws_ref_count ref_count; struct aws_channel_handler channel_handler; struct aws_channel_slot *channel_slot; size_t initial_window_size; bool manual_window_update; void *user_data; aws_websocket_on_incoming_frame_begin_fn *on_incoming_frame_begin; aws_websocket_on_incoming_frame_payload_fn *on_incoming_frame_payload; aws_websocket_on_incoming_frame_complete_fn *on_incoming_frame_complete; struct aws_channel_task move_synced_data_to_thread_task; struct aws_channel_task shutdown_channel_task; struct aws_channel_task increment_read_window_task; struct aws_channel_task waiting_on_payload_stream_task; struct aws_channel_task close_timeout_task; bool is_server; /* Data that should only be accessed from the websocket's channel thread. */ struct { struct aws_websocket_encoder encoder; /* list of outbound frames that have yet to be encoded and sent to the socket */ struct aws_linked_list outgoing_frame_list; /* current outbound frame being encoded and sent to the socket */ struct outgoing_frame *current_outgoing_frame; /* * list of outbound frames that have been completely written to the io message heading to the socket. * When the socket write completes we can in turn invoke completion callbacks for all of these frames */ struct aws_linked_list write_completion_frames; struct aws_websocket_decoder decoder; struct aws_websocket_incoming_frame *current_incoming_frame; struct aws_websocket_incoming_frame incoming_frame_storage; /* Payload of incoming PING frame. * The PONG frame we send in response must have an identical payload */ struct aws_byte_buf incoming_ping_payload; /* If current incoming frame is CONTINUATION, this is the data type it is a continuation of. */ enum aws_websocket_opcode continuation_of_opcode; /* Amount to increment window after a channel message has been processed. */ size_t incoming_message_window_update; /* Cached slot to right */ struct aws_channel_slot *last_known_right_slot; /* True when no more frames will be read, due to: * - a CLOSE frame was received * - decoder error * - channel shutdown in read-dir */ bool is_reading_stopped; /* True when no more frames will be written, due to: * - a CLOSE frame was sent * - encoder error * - channel shutdown in write-dir */ bool is_writing_stopped; /* During normal shutdown websocket ensures that a CLOSE frame is sent */ bool is_shutting_down_and_waiting_for_close_frame_to_be_written; int channel_shutdown_error_code; bool channel_shutdown_free_scarce_resources_immediately; /* Wait until each aws_io_message is completely written to * the socket before sending the next aws_io_message */ bool is_waiting_for_write_completion; /* If, while writing out data from a payload stream, we experience "read would block", * schedule a task to try again in the near-future. */ bool is_waiting_on_payload_stream_task; /* True if this websocket is being used as a dumb mid-channel handler. * The websocket will no longer respond to its public API or invoke callbacks. */ bool is_midchannel_handler; } thread_data; /* Data that may be touched from any thread (lock must be held). */ struct { struct aws_mutex lock; struct aws_linked_list outgoing_frame_list; /* If non-zero, then increment_read_window_task is scheduled */ size_t window_increment_size; /* Error-code returned by aws_websocket_send_frame() when is_writing_stopped is true */ int send_frame_error_code; /* Use a task to issue a channel shutdown. */ int shutdown_channel_task_error_code; bool is_shutdown_channel_task_scheduled; bool is_move_synced_data_to_thread_task_scheduled; /* Mirrors variable from thread_data */ bool is_midchannel_handler; } synced_data; }; static int s_handler_process_read_message( struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_io_message *message); static int s_handler_process_write_message( struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_io_message *message); static int s_handler_increment_read_window( struct aws_channel_handler *handler, struct aws_channel_slot *slot, size_t size); static int s_handler_shutdown( struct aws_channel_handler *handler, struct aws_channel_slot *slot, enum aws_channel_direction dir, int error_code, bool free_scarce_resources_immediately); static size_t s_handler_initial_window_size(struct aws_channel_handler *handler); static size_t s_handler_message_overhead(struct aws_channel_handler *handler); static void s_handler_destroy(struct aws_channel_handler *handler); static void s_websocket_on_refcount_zero(void *user_data); static int s_encoder_stream_outgoing_payload(struct aws_byte_buf *out_buf, void *user_data); static int s_decoder_on_frame(const struct aws_websocket_frame *frame, void *user_data); static int s_decoder_on_payload(struct aws_byte_cursor data, void *user_data); static int s_decoder_on_user_payload(struct aws_websocket *websocket, struct aws_byte_cursor data); static int s_decoder_on_midchannel_payload(struct aws_websocket *websocket, struct aws_byte_cursor data); static void s_destroy_outgoing_frame(struct aws_websocket *websocket, struct outgoing_frame *frame, int error_code); static void s_complete_frame_list(struct aws_websocket *websocket, struct aws_linked_list *frames, int error_code); static void s_complete_incoming_frame(struct aws_websocket *websocket, int error_code, bool *out_callback_result); static void s_finish_shutdown(struct aws_websocket *websocket); static void s_io_message_write_completed( struct aws_channel *channel, struct aws_io_message *message, int err_code, void *user_data); static int s_send_frame( struct aws_websocket *websocket, const struct aws_websocket_send_frame_options *options, bool from_public_api); static bool s_midchannel_send_payload(struct aws_websocket *websocket, struct aws_byte_buf *out_buf, void *user_data); static void s_midchannel_send_complete(struct aws_websocket *websocket, int error_code, void *user_data); static void s_move_synced_data_to_thread_task(struct aws_channel_task *task, void *arg, enum aws_task_status status); static void s_increment_read_window_task(struct aws_channel_task *task, void *arg, enum aws_task_status status); static void s_shutdown_channel_task(struct aws_channel_task *task, void *arg, enum aws_task_status status); static void s_waiting_on_payload_stream_task(struct aws_channel_task *task, void *arg, enum aws_task_status status); static void s_close_timeout_task(struct aws_channel_task *task, void *arg, enum aws_task_status status); static void s_schedule_channel_shutdown(struct aws_websocket *websocket, int error_code); static void s_shutdown_due_to_write_err(struct aws_websocket *websocket, int error_code); static void s_shutdown_due_to_read_err(struct aws_websocket *websocket, int error_code); static void s_stop_writing(struct aws_websocket *websocket, int send_frame_error_code); static void s_try_write_outgoing_frames(struct aws_websocket *websocket); static struct aws_channel_handler_vtable s_channel_handler_vtable = { .process_read_message = s_handler_process_read_message, .process_write_message = s_handler_process_write_message, .increment_read_window = s_handler_increment_read_window, .shutdown = s_handler_shutdown, .initial_window_size = s_handler_initial_window_size, .message_overhead = s_handler_message_overhead, .destroy = s_handler_destroy, }; const char *aws_websocket_opcode_str(uint8_t opcode) { switch (opcode) { case AWS_WEBSOCKET_OPCODE_CONTINUATION: return "continuation"; case AWS_WEBSOCKET_OPCODE_TEXT: return "text"; case AWS_WEBSOCKET_OPCODE_BINARY: return "binary"; case AWS_WEBSOCKET_OPCODE_CLOSE: return "close"; case AWS_WEBSOCKET_OPCODE_PING: return "ping"; case AWS_WEBSOCKET_OPCODE_PONG: return "pong"; default: return ""; } } bool aws_websocket_is_data_frame(uint8_t opcode) { /* RFC-6455 Section 5.6: Most significant bit of (4 bit) data frame opcode is 0 */ return !(opcode & 0x08); } static void s_lock_synced_data(struct aws_websocket *websocket) { int err = aws_mutex_lock(&websocket->synced_data.lock); AWS_ASSERT(!err); (void)err; } static void s_unlock_synced_data(struct aws_websocket *websocket) { int err = aws_mutex_unlock(&websocket->synced_data.lock); AWS_ASSERT(!err); (void)err; } struct aws_websocket *aws_websocket_handler_new(const struct aws_websocket_handler_options *options) { struct aws_channel_slot *slot = NULL; struct aws_websocket *websocket = NULL; int err; slot = aws_channel_slot_new(options->channel); if (!slot) { goto error; } err = aws_channel_slot_insert_end(options->channel, slot); if (err) { goto error; } websocket = aws_mem_calloc(options->allocator, 1, sizeof(struct aws_websocket)); if (!websocket) { goto error; } websocket->alloc = options->allocator; aws_ref_count_init(&websocket->ref_count, websocket, s_websocket_on_refcount_zero); websocket->channel_handler.vtable = &s_channel_handler_vtable; websocket->channel_handler.alloc = options->allocator; websocket->channel_handler.impl = websocket; websocket->channel_slot = slot; websocket->initial_window_size = options->initial_window_size; websocket->manual_window_update = options->manual_window_update; websocket->user_data = options->user_data; websocket->on_incoming_frame_begin = options->on_incoming_frame_begin; websocket->on_incoming_frame_payload = options->on_incoming_frame_payload; websocket->on_incoming_frame_complete = options->on_incoming_frame_complete; websocket->is_server = options->is_server; aws_channel_task_init( &websocket->move_synced_data_to_thread_task, s_move_synced_data_to_thread_task, websocket, "websocket_move_synced_data_to_thread"); aws_channel_task_init( &websocket->shutdown_channel_task, s_shutdown_channel_task, websocket, "websocket_shutdown_channel"); aws_channel_task_init( &websocket->increment_read_window_task, s_increment_read_window_task, websocket, "websocket_increment_read_window"); aws_channel_task_init( &websocket->waiting_on_payload_stream_task, s_waiting_on_payload_stream_task, websocket, "websocket_waiting_on_payload_stream"); aws_channel_task_init(&websocket->close_timeout_task, s_close_timeout_task, websocket, "websocket_close_timeout"); aws_linked_list_init(&websocket->thread_data.outgoing_frame_list); aws_linked_list_init(&websocket->thread_data.write_completion_frames); aws_byte_buf_init(&websocket->thread_data.incoming_ping_payload, websocket->alloc, 0); aws_websocket_encoder_init(&websocket->thread_data.encoder, s_encoder_stream_outgoing_payload, websocket); aws_websocket_decoder_init( &websocket->thread_data.decoder, options->allocator, s_decoder_on_frame, s_decoder_on_payload, websocket); aws_linked_list_init(&websocket->synced_data.outgoing_frame_list); err = aws_mutex_init(&websocket->synced_data.lock); if (err) { AWS_LOGF_ERROR( AWS_LS_HTTP_WEBSOCKET, "static: Failed to initialize mutex, error %d (%s).", aws_last_error(), aws_error_name(aws_last_error())); goto error; } err = aws_channel_slot_set_handler(slot, &websocket->channel_handler); if (err) { goto error; } /* Ensure websocket (and the rest of the channel) can't be destroyed until aws_websocket_release() is called */ aws_channel_acquire_hold(options->channel); return websocket; error: if (slot) { if (websocket && !slot->handler) { websocket->channel_handler.vtable->destroy(&websocket->channel_handler); } aws_channel_slot_remove(slot); } return NULL; } static void s_handler_destroy(struct aws_channel_handler *handler) { struct aws_websocket *websocket = handler->impl; AWS_ASSERT(!websocket->thread_data.current_outgoing_frame); AWS_ASSERT(!websocket->thread_data.current_incoming_frame); AWS_LOGF_TRACE(AWS_LS_HTTP_WEBSOCKET, "id=%p: Destroying websocket.", (void *)websocket); aws_websocket_decoder_clean_up(&websocket->thread_data.decoder); aws_byte_buf_clean_up(&websocket->thread_data.incoming_ping_payload); aws_mutex_clean_up(&websocket->synced_data.lock); aws_mem_release(websocket->alloc, websocket); } struct aws_websocket *aws_websocket_acquire(struct aws_websocket *websocket) { AWS_PRECONDITION(websocket); AWS_LOGF_TRACE(AWS_LS_HTTP_WEBSOCKET, "id=%p: Acquiring websocket ref-count.", (void *)websocket); aws_ref_count_acquire(&websocket->ref_count); return websocket; } void aws_websocket_release(struct aws_websocket *websocket) { if (!websocket) { return; } AWS_LOGF_TRACE(AWS_LS_HTTP_WEBSOCKET, "id=%p: Releasing websocket ref-count.", (void *)websocket); aws_ref_count_release(&websocket->ref_count); } static void s_websocket_on_refcount_zero(void *user_data) { struct aws_websocket *websocket = user_data; AWS_ASSERT(websocket->channel_slot); AWS_LOGF_TRACE( AWS_LS_HTTP_WEBSOCKET, "id=%p: Websocket ref-count is zero, shut down if necessary.", (void *)websocket); /* Channel might already be shut down, but make sure */ s_schedule_channel_shutdown(websocket, AWS_ERROR_SUCCESS); /* Channel won't destroy its slots/handlers until its refcount reaches 0 */ aws_channel_release_hold(websocket->channel_slot->channel); } struct aws_channel *aws_websocket_get_channel(const struct aws_websocket *websocket) { return websocket->channel_slot->channel; } int aws_websocket_convert_to_midchannel_handler(struct aws_websocket *websocket) { if (!aws_channel_thread_is_callers_thread(websocket->channel_slot->channel)) { AWS_LOGF_ERROR( AWS_LS_HTTP_WEBSOCKET, "id=%p: Cannot convert to midchannel handler on this thread.", (void *)websocket); return aws_raise_error(AWS_ERROR_IO_EVENT_LOOP_THREAD_ONLY); } if (websocket->thread_data.is_midchannel_handler) { AWS_LOGF_ERROR( AWS_LS_HTTP_WEBSOCKET, "id=%p: Websocket has already converted to midchannel handler.", (void *)websocket); return aws_raise_error(AWS_ERROR_HTTP_WEBSOCKET_IS_MIDCHANNEL_HANDLER); } if (websocket->thread_data.is_reading_stopped || websocket->thread_data.is_writing_stopped) { AWS_LOGF_ERROR( AWS_LS_HTTP_WEBSOCKET, "id=%p: Cannot convert websocket to midchannel handler because it is closed or closing.", (void *)websocket); return aws_raise_error(AWS_ERROR_HTTP_CONNECTION_CLOSED); } if (websocket->thread_data.current_incoming_frame) { AWS_LOGF_ERROR( AWS_LS_HTTP_WEBSOCKET, "id=%p: Cannot convert to midchannel handler in the middle of an incoming frame.", (void *)websocket); return aws_raise_error(AWS_ERROR_INVALID_STATE); } websocket->thread_data.is_midchannel_handler = true; return AWS_OP_SUCCESS; } static int s_send_frame( struct aws_websocket *websocket, const struct aws_websocket_send_frame_options *options, bool from_public_api) { AWS_ASSERT(websocket); AWS_ASSERT(options); /* Check for bad input. Log about non-obvious errors. */ if (options->payload_length > 0 && !options->stream_outgoing_payload) { AWS_LOGF_ERROR( AWS_LS_HTTP_WEBSOCKET, "id=%p: Invalid frame options, payload streaming function required when payload length is non-zero.", (void *)websocket); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } struct outgoing_frame *frame = aws_mem_calloc(websocket->alloc, 1, sizeof(struct outgoing_frame)); if (!frame) { return AWS_OP_ERR; } frame->def = *options; /* Enqueue frame, unless no further sending is allowed. */ int send_error = 0; bool should_schedule_task = false; /* BEGIN CRITICAL SECTION */ s_lock_synced_data(websocket); if (websocket->synced_data.is_midchannel_handler && from_public_api) { send_error = AWS_ERROR_HTTP_WEBSOCKET_IS_MIDCHANNEL_HANDLER; } else if (websocket->synced_data.send_frame_error_code) { send_error = websocket->synced_data.send_frame_error_code; } else { aws_linked_list_push_back(&websocket->synced_data.outgoing_frame_list, &frame->node); if (!websocket->synced_data.is_move_synced_data_to_thread_task_scheduled) { websocket->synced_data.is_move_synced_data_to_thread_task_scheduled = true; should_schedule_task = true; } } s_unlock_synced_data(websocket); /* END CRITICAL SECTION */ if (send_error) { AWS_LOGF_ERROR( AWS_LS_HTTP_WEBSOCKET, "id=%p: Cannot send frame, error %d (%s).", (void *)websocket, send_error, aws_error_name(send_error)); aws_mem_release(websocket->alloc, frame); return aws_raise_error(send_error); } AWS_LOGF_DEBUG( AWS_LS_HTTP_WEBSOCKET, "id=%p: Enqueuing outgoing frame with opcode=%" PRIu8 "(%s) length=%" PRIu64 " fin=%s", (void *)websocket, options->opcode, aws_websocket_opcode_str(options->opcode), options->payload_length, options->fin ? "T" : "F"); if (should_schedule_task) { AWS_LOGF_TRACE(AWS_LS_HTTP_WEBSOCKET, "id=%p: Scheduling synced data task.", (void *)websocket); aws_channel_schedule_task_now(websocket->channel_slot->channel, &websocket->move_synced_data_to_thread_task); } return AWS_OP_SUCCESS; } int aws_websocket_send_frame(struct aws_websocket *websocket, const struct aws_websocket_send_frame_options *options) { return s_send_frame(websocket, options, true); } static void s_move_synced_data_to_thread_task(struct aws_channel_task *task, void *arg, enum aws_task_status status) { (void)task; if (status != AWS_TASK_STATUS_RUN_READY) { return; } struct aws_websocket *websocket = arg; struct aws_linked_list tmp_list; aws_linked_list_init(&tmp_list); /* BEGIN CRITICAL SECTION */ s_lock_synced_data(websocket); aws_linked_list_swap_contents(&websocket->synced_data.outgoing_frame_list, &tmp_list); websocket->synced_data.is_move_synced_data_to_thread_task_scheduled = false; s_unlock_synced_data(websocket); /* END CRITICAL SECTION */ if (!aws_linked_list_empty(&tmp_list)) { aws_linked_list_move_all_back(&websocket->thread_data.outgoing_frame_list, &tmp_list); s_try_write_outgoing_frames(websocket); } } static void s_try_write_outgoing_frames(struct aws_websocket *websocket) { AWS_ASSERT(aws_channel_thread_is_callers_thread(websocket->channel_slot->channel)); int err; /* Check whether we should be writing data */ if (!websocket->thread_data.current_outgoing_frame && aws_linked_list_empty(&websocket->thread_data.outgoing_frame_list)) { AWS_LOGF_TRACE(AWS_LS_HTTP_WEBSOCKET, "id=%p: No data to write at this time.", (void *)websocket); return; } if (websocket->thread_data.is_waiting_for_write_completion) { AWS_LOGF_TRACE( AWS_LS_HTTP_WEBSOCKET, "id=%p: Waiting until outstanding aws_io_message is written to socket before sending more data.", (void *)websocket); return; } if (websocket->thread_data.is_writing_stopped) { AWS_LOGF_TRACE(AWS_LS_HTTP_WEBSOCKET, "id=%p: Websocket is no longer sending data.", (void *)websocket); return; } /* Acquire aws_io_message */ struct aws_io_message *io_msg = aws_channel_slot_acquire_max_message_for_write(websocket->channel_slot); if (!io_msg) { AWS_LOGF_ERROR( AWS_LS_HTTP_WEBSOCKET, "id=%p: Failed acquire message from pool, error %d (%s).", (void *)websocket, aws_last_error(), aws_error_name(aws_last_error())); goto error; } io_msg->user_data = websocket; io_msg->on_completion = s_io_message_write_completed; /* Loop through frames, writing their data into the io_msg */ bool wrote_close_frame = false; while (!websocket->thread_data.is_writing_stopped) { if (websocket->thread_data.current_outgoing_frame) { AWS_LOGF_TRACE( AWS_LS_HTTP_WEBSOCKET, "id=%p: Resuming write of frame=%p opcode=%" PRIu8 "(%s) payload-length=%" PRIu64 ".", (void *)websocket, (void *)websocket->thread_data.current_outgoing_frame, websocket->thread_data.current_outgoing_frame->def.opcode, aws_websocket_opcode_str(websocket->thread_data.current_outgoing_frame->def.opcode), websocket->thread_data.current_outgoing_frame->def.payload_length); } else { /* We're not in the middle of encoding a frame, so pop off the next one to encode. */ if (aws_linked_list_empty(&websocket->thread_data.outgoing_frame_list)) { AWS_LOGF_TRACE(AWS_LS_HTTP_WEBSOCKET, "id=%p: No more frames to write.", (void *)websocket); break; } struct aws_linked_list_node *node = aws_linked_list_pop_front(&websocket->thread_data.outgoing_frame_list); websocket->thread_data.current_outgoing_frame = AWS_CONTAINER_OF(node, struct outgoing_frame, node); struct aws_websocket_frame frame = { .fin = websocket->thread_data.current_outgoing_frame->def.fin, .opcode = websocket->thread_data.current_outgoing_frame->def.opcode, .payload_length = websocket->thread_data.current_outgoing_frame->def.payload_length, }; /* RFC-6455 Section 5.3 Client-to-Server Masking * Clients must mask payload with key derived from an unpredictable source of entropy. */ if (!websocket->is_server) { frame.masked = true; /* TODO: faster source of random (but still seeded by device_random) */ struct aws_byte_buf masking_key_buf = aws_byte_buf_from_empty_array(frame.masking_key, 4); err = aws_device_random_buffer(&masking_key_buf); if (err) { AWS_LOGF_ERROR( AWS_LS_HTTP_WEBSOCKET, "id=%p: Failed to derive masking key, error %d (%s).", (void *)websocket, aws_last_error(), aws_error_name(aws_last_error())); goto error; } } err = aws_websocket_encoder_start_frame(&websocket->thread_data.encoder, &frame); if (err) { AWS_LOGF_ERROR( AWS_LS_HTTP_WEBSOCKET, "id=%p: Failed to start frame encoding, error %d (%s).", (void *)websocket, aws_last_error(), aws_error_name(aws_last_error())); goto error; } AWS_LOGF_TRACE( AWS_LS_HTTP_WEBSOCKET, "id=%p: Start writing frame=%p opcode=%" PRIu8 "(%s) payload-length=%" PRIu64 ".", (void *)websocket, (void *)websocket->thread_data.current_outgoing_frame, websocket->thread_data.current_outgoing_frame->def.opcode, aws_websocket_opcode_str(websocket->thread_data.current_outgoing_frame->def.opcode), websocket->thread_data.current_outgoing_frame->def.payload_length); } err = aws_websocket_encoder_process(&websocket->thread_data.encoder, &io_msg->message_data); if (err) { AWS_LOGF_ERROR( AWS_LS_HTTP_WEBSOCKET, "id=%p: Frame encoding failed with error %d (%s).", (void *)websocket, aws_last_error(), aws_error_name(aws_last_error())); goto error; } if (aws_websocket_encoder_is_frame_in_progress(&websocket->thread_data.encoder)) { AWS_LOGF_TRACE( AWS_LS_HTTP_WEBSOCKET, "id=%p: Outgoing frame still in progress, but no more data can be written at this time.", (void *)websocket); break; } if (websocket->thread_data.current_outgoing_frame->def.opcode == AWS_WEBSOCKET_OPCODE_CLOSE) { wrote_close_frame = true; } /* * a completely-written frame gets added to the write completion list so that when the socket write completes * we can complete all of the outbound frames that were finished as part of the io message */ aws_linked_list_push_back( &websocket->thread_data.write_completion_frames, &websocket->thread_data.current_outgoing_frame->node); websocket->thread_data.current_outgoing_frame = NULL; if (wrote_close_frame) { break; } } /* If payload stream didn't have any bytes available to read right now, then the aws_io_message might be empty. * If this is the case schedule a task to try again in the future. */ if (io_msg->message_data.len == 0) { AWS_LOGF_TRACE( AWS_LS_HTTP_WEBSOCKET, "id=%p: Reading from payload stream would block, will try again later.", (void *)websocket); if (!websocket->thread_data.is_waiting_on_payload_stream_task) { websocket->thread_data.is_waiting_on_payload_stream_task = true; /* Future Optimization Idea: Minimize work while we wait. Use some kind of backoff for the retry timing, * or have some way for stream to notify when more data is available. */ aws_channel_schedule_task_now(websocket->channel_slot->channel, &websocket->waiting_on_payload_stream_task); } aws_mem_release(io_msg->allocator, io_msg); return; } /* Prepare to send aws_io_message up the channel. */ /* If CLOSE frame was written, that's the last data we'll write */ if (wrote_close_frame) { s_stop_writing(websocket, AWS_ERROR_HTTP_WEBSOCKET_CLOSE_FRAME_SENT); } AWS_LOGF_TRACE( AWS_LS_HTTP_WEBSOCKET, "id=%p: Sending aws_io_message of size %zu in write direction.", (void *)websocket, io_msg->message_data.len); websocket->thread_data.is_waiting_for_write_completion = true; err = aws_channel_slot_send_message(websocket->channel_slot, io_msg, AWS_CHANNEL_DIR_WRITE); if (err) { websocket->thread_data.is_waiting_for_write_completion = false; AWS_LOGF_ERROR( AWS_LS_HTTP_WEBSOCKET, "id=%p: Failed to send message in write direction, error %d (%s).", (void *)websocket, aws_last_error(), aws_error_name(aws_last_error())); goto error; } /* Finish shutdown if we were waiting for the CLOSE frame to be written */ if (wrote_close_frame && websocket->thread_data.is_shutting_down_and_waiting_for_close_frame_to_be_written) { AWS_LOGF_TRACE( AWS_LS_HTTP_WEBSOCKET, "id=%p: CLOSE frame sent, finishing handler shutdown sequence.", (void *)websocket); s_finish_shutdown(websocket); } return; error: if (io_msg) { aws_mem_release(io_msg->allocator, io_msg); } s_shutdown_due_to_write_err(websocket, aws_last_error()); } /* Encoder's outgoing_payload callback invokes current frame's callback */ static int s_encoder_stream_outgoing_payload(struct aws_byte_buf *out_buf, void *user_data) { struct aws_websocket *websocket = user_data; AWS_ASSERT(aws_channel_thread_is_callers_thread(websocket->channel_slot->channel)); AWS_ASSERT(websocket->thread_data.current_outgoing_frame); struct outgoing_frame *current_frame = websocket->thread_data.current_outgoing_frame; AWS_ASSERT(current_frame->def.stream_outgoing_payload); bool callback_result = current_frame->def.stream_outgoing_payload(websocket, out_buf, current_frame->def.user_data); if (!callback_result) { AWS_LOGF_ERROR( AWS_LS_HTTP_WEBSOCKET, "id=%p: Outgoing payload callback has reported a failure.", (void *)websocket); return aws_raise_error(AWS_ERROR_HTTP_CALLBACK_FAILURE); } return AWS_OP_SUCCESS; } static void s_waiting_on_payload_stream_task(struct aws_channel_task *task, void *arg, enum aws_task_status status) { (void)task; if (status != AWS_TASK_STATUS_RUN_READY) { /* If channel has shut down, don't need to resume sending payload */ return; } struct aws_websocket *websocket = arg; AWS_ASSERT(aws_channel_thread_is_callers_thread(websocket->channel_slot->channel)); AWS_LOGF_TRACE( AWS_LS_HTTP_WEBSOCKET, "id=%p: Done waiting for payload stream, sending more data...", (void *)websocket); websocket->thread_data.is_waiting_on_payload_stream_task = false; s_try_write_outgoing_frames(websocket); } static void s_io_message_write_completed( struct aws_channel *channel, struct aws_io_message *message, int err_code, void *user_data) { (void)channel; (void)message; struct aws_websocket *websocket = user_data; AWS_ASSERT(aws_channel_thread_is_callers_thread(channel)); /* * Invoke the completion callbacks (and then destroy) for all the frames that were completely written as * part of this message completion at the socket layer */ s_complete_frame_list(websocket, &websocket->thread_data.write_completion_frames, err_code); if (err_code == AWS_ERROR_SUCCESS) { AWS_LOGF_TRACE( AWS_LS_HTTP_WEBSOCKET, "id=%p: aws_io_message written to socket, sending more data...", (void *)websocket); websocket->thread_data.is_waiting_for_write_completion = false; s_try_write_outgoing_frames(websocket); } else { AWS_LOGF_TRACE( AWS_LS_HTTP_WEBSOCKET, "id=%p: aws_io_message did not finish writing to socket, error %d (%s).", (void *)websocket, err_code, aws_error_name(err_code)); s_shutdown_due_to_write_err(websocket, err_code); } } static int s_handler_process_write_message( struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_io_message *message) { (void)slot; struct aws_websocket *websocket = handler->impl; AWS_ASSERT(aws_channel_thread_is_callers_thread(websocket->channel_slot->channel)); /* For each aws_io_message headed in the write direction, send a BINARY frame, * where the frame's payload is the data from this aws_io_message. */ struct aws_websocket_send_frame_options options = { .payload_length = message->message_data.len, .user_data = message, .stream_outgoing_payload = s_midchannel_send_payload, .on_complete = s_midchannel_send_complete, .opcode = AWS_WEBSOCKET_OPCODE_BINARY, .fin = true, }; /* Use copy_mark to track progress as the data is streamed out */ message->copy_mark = 0; int err = s_send_frame(websocket, &options, false); if (err) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } /* Callback for writing data from downstream aws_io_messages into payload of BINARY frames headed upstream */ static bool s_midchannel_send_payload(struct aws_websocket *websocket, struct aws_byte_buf *out_buf, void *user_data) { (void)websocket; struct aws_io_message *io_msg = user_data; /* copy_mark is used to track progress */ size_t src_available = io_msg->message_data.len - io_msg->copy_mark; size_t dst_available = out_buf->capacity - out_buf->len; size_t sending = dst_available < src_available ? dst_available : src_available; bool success = aws_byte_buf_write(out_buf, io_msg->message_data.buffer + io_msg->copy_mark, sending); io_msg->copy_mark += sending; return success; } /* Callback when data from downstream aws_io_messages, finishes being sent as a BINARY frame upstream. */ static void s_midchannel_send_complete(struct aws_websocket *websocket, int error_code, void *user_data) { (void)websocket; struct aws_io_message *io_msg = user_data; if (io_msg->on_completion) { io_msg->on_completion(io_msg->owning_channel, io_msg, error_code, io_msg->user_data); } aws_mem_release(io_msg->allocator, io_msg); } static void s_destroy_outgoing_frame(struct aws_websocket *websocket, struct outgoing_frame *frame, int error_code) { AWS_LOGF_TRACE( AWS_LS_HTTP_WEBSOCKET, "id=%p: Completed outgoing frame=%p opcode=%" PRIu8 "(%s) payload-length=%" PRIu64 " with error_code %d (%s).", (void *)websocket, (void *)frame, frame->def.opcode, aws_websocket_opcode_str(frame->def.opcode), frame->def.payload_length, error_code, aws_error_name(error_code)); if (frame->def.on_complete) { frame->def.on_complete(websocket, error_code, frame->def.user_data); } aws_mem_release(websocket->alloc, frame); } static void s_complete_frame_list(struct aws_websocket *websocket, struct aws_linked_list *frames, int error_code) { struct aws_linked_list_node *node = aws_linked_list_begin(frames); while (node != aws_linked_list_end(frames)) { struct outgoing_frame *frame = AWS_CONTAINER_OF(node, struct outgoing_frame, node); node = aws_linked_list_next(node); s_destroy_outgoing_frame(websocket, frame, error_code); } /* we've released everything, so reset the list to empty */ aws_linked_list_init(frames); } static void s_stop_writing(struct aws_websocket *websocket, int send_frame_error_code) { AWS_ASSERT(aws_channel_thread_is_callers_thread(websocket->channel_slot->channel)); AWS_ASSERT(send_frame_error_code != AWS_ERROR_SUCCESS); if (websocket->thread_data.is_writing_stopped) { return; } AWS_LOGF_TRACE( AWS_LS_HTTP_WEBSOCKET, "id=%p: Websocket will send no more data, future attempts to send will get error %d (%s).", (void *)websocket, send_frame_error_code, aws_error_name(send_frame_error_code)); /* BEGIN CRITICAL SECTION */ s_lock_synced_data(websocket); websocket->synced_data.send_frame_error_code = send_frame_error_code; s_unlock_synced_data(websocket); /* END CRITICAL SECTION */ websocket->thread_data.is_writing_stopped = true; } static void s_shutdown_due_to_write_err(struct aws_websocket *websocket, int error_code) { AWS_ASSERT(aws_channel_thread_is_callers_thread(websocket->channel_slot->channel)); /* No more writing allowed (it's ok to call this redundantly). */ s_stop_writing(websocket, AWS_ERROR_HTTP_CONNECTION_CLOSED); /* If there's a current outgoing frame, complete it with the specific error code. * Any other pending frames will complete with the generic CONNECTION_CLOSED error. */ if (websocket->thread_data.current_outgoing_frame) { s_destroy_outgoing_frame(websocket, websocket->thread_data.current_outgoing_frame, error_code); websocket->thread_data.current_outgoing_frame = NULL; } /* If we're in the final stages of shutdown, ensure shutdown completes. * Otherwise tell the channel to shutdown (it's ok to shutdown the channel redundantly). */ if (websocket->thread_data.is_shutting_down_and_waiting_for_close_frame_to_be_written) { s_finish_shutdown(websocket); } else { AWS_LOGF_ERROR( AWS_LS_HTTP_WEBSOCKET, "id=%p: Closing websocket due to failure during write, error %d (%s).", (void *)websocket, error_code, aws_error_name(error_code)); s_schedule_channel_shutdown(websocket, error_code); } } static void s_shutdown_due_to_read_err(struct aws_websocket *websocket, int error_code) { AWS_ASSERT(aws_channel_thread_is_callers_thread(websocket->channel_slot->channel)); AWS_LOGF_ERROR( AWS_LS_HTTP_WEBSOCKET, "id=%p: Closing websocket due to failure during read, error %d (%s).", (void *)websocket, error_code, aws_error_name(error_code)); websocket->thread_data.is_reading_stopped = true; /* If there's a current incoming frame, complete it with the specific error code. */ if (websocket->thread_data.current_incoming_frame) { s_complete_incoming_frame(websocket, error_code, NULL); } /* Tell channel to shutdown (it's ok to call this redundantly) */ s_schedule_channel_shutdown(websocket, error_code); } static void s_shutdown_channel_task(struct aws_channel_task *task, void *arg, enum aws_task_status status) { (void)task; if (status != AWS_TASK_STATUS_RUN_READY) { return; } struct aws_websocket *websocket = arg; int error_code; /* BEGIN CRITICAL SECTION */ s_lock_synced_data(websocket); error_code = websocket->synced_data.shutdown_channel_task_error_code; s_unlock_synced_data(websocket); /* END CRITICAL SECTION */ aws_channel_shutdown(websocket->channel_slot->channel, error_code); } /* Tell the channel to shut down. It is safe to call this multiple times. * The call to aws_channel_shutdown() is delayed so that a user invoking aws_websocket_close doesn't * have completion callbacks firing before the function call even returns */ static void s_schedule_channel_shutdown(struct aws_websocket *websocket, int error_code) { bool schedule_shutdown = false; /* BEGIN CRITICAL SECTION */ s_lock_synced_data(websocket); if (!websocket->synced_data.is_shutdown_channel_task_scheduled) { schedule_shutdown = true; websocket->synced_data.is_shutdown_channel_task_scheduled = true; websocket->synced_data.shutdown_channel_task_error_code = error_code; } s_unlock_synced_data(websocket); /* END CRITICAL SECTION */ if (schedule_shutdown) { aws_channel_schedule_task_now(websocket->channel_slot->channel, &websocket->shutdown_channel_task); } } void aws_websocket_close(struct aws_websocket *websocket, bool free_scarce_resources_immediately) { bool is_midchannel_handler; /* BEGIN CRITICAL SECTION */ s_lock_synced_data(websocket); is_midchannel_handler = websocket->synced_data.is_midchannel_handler; s_unlock_synced_data(websocket); /* END CRITICAL SECTION */ if (is_midchannel_handler) { AWS_LOGF_ERROR( AWS_LS_HTTP_WEBSOCKET, "id=%p: Ignoring close call, websocket has converted to midchannel handler.", (void *)websocket); return; } /* TODO: aws_channel_shutdown() should let users specify error_code and "immediate" as separate parameters. * Currently, any non-zero error_code results in "immediate" shutdown */ int error_code = AWS_ERROR_SUCCESS; if (free_scarce_resources_immediately) { error_code = AWS_ERROR_HTTP_CONNECTION_CLOSED; } s_schedule_channel_shutdown(websocket, error_code); } static int s_handler_shutdown( struct aws_channel_handler *handler, struct aws_channel_slot *slot, enum aws_channel_direction dir, int error_code, bool free_scarce_resources_immediately) { AWS_ASSERT(aws_channel_thread_is_callers_thread(slot->channel)); struct aws_websocket *websocket = handler->impl; int err; AWS_LOGF_DEBUG( AWS_LS_HTTP_WEBSOCKET, "id=%p: Websocket handler shutting down dir=%s error_code=%d immediate=%d.", (void *)websocket, dir == AWS_CHANNEL_DIR_READ ? "READ" : "WRITE", error_code, free_scarce_resources_immediately); if (dir == AWS_CHANNEL_DIR_READ) { /* Shutdown in the read direction is immediate and simple. */ websocket->thread_data.is_reading_stopped = true; aws_channel_slot_on_handler_shutdown_complete(slot, dir, error_code, free_scarce_resources_immediately); } else { websocket->thread_data.channel_shutdown_error_code = error_code; websocket->thread_data.channel_shutdown_free_scarce_resources_immediately = free_scarce_resources_immediately; websocket->thread_data.is_shutting_down_and_waiting_for_close_frame_to_be_written = true; if (websocket->thread_data.channel_shutdown_free_scarce_resources_immediately || websocket->thread_data.is_writing_stopped) { AWS_LOGF_TRACE( AWS_LS_HTTP_WEBSOCKET, "id=%p: Finishing handler shutdown immediately, without ensuring a CLOSE frame was sent.", (void *)websocket); s_stop_writing(websocket, AWS_ERROR_HTTP_CONNECTION_CLOSED); s_finish_shutdown(websocket); } else { /* Attempt to queue a CLOSE frame, then wait for it to send before finishing shutdown. */ struct aws_websocket_send_frame_options close_frame = { .opcode = AWS_WEBSOCKET_OPCODE_CLOSE, .fin = true, }; err = s_send_frame(websocket, &close_frame, false); if (err) { AWS_LOGF_WARN( AWS_LS_HTTP_WEBSOCKET, "id=%p: Failed to send CLOSE frame, error %d (%s).", (void *)websocket, aws_last_error(), aws_error_name(aws_last_error())); s_stop_writing(websocket, AWS_ERROR_HTTP_CONNECTION_CLOSED); s_finish_shutdown(websocket); } else { AWS_LOGF_TRACE( AWS_LS_HTTP_WEBSOCKET, "id=%p: Outgoing CLOSE frame queued, handler will finish shutdown once it's sent.", (void *)websocket); /* schedule a task to run after 1 sec. If the CLOSE still not sent at that time, we should just cancel * sending it and shutdown the channel. */ uint64_t schedule_time = 0; aws_channel_current_clock_time(websocket->channel_slot->channel, &schedule_time); schedule_time += AWS_WEBSOCKET_CLOSE_TIMEOUT; AWS_LOGF_TRACE( AWS_LS_HTTP_WEBSOCKET, "id=%p: websocket_close_timeout task will be run at timestamp %" PRIu64, (void *)websocket, schedule_time); aws_channel_schedule_task_future( websocket->channel_slot->channel, &websocket->close_timeout_task, schedule_time); } } } return AWS_OP_SUCCESS; } static void s_close_timeout_task(struct aws_channel_task *task, void *arg, enum aws_task_status status) { (void)task; if (status != AWS_TASK_STATUS_RUN_READY) { /* If channel has shut down, don't need to resume sending payload */ return; } struct aws_websocket *websocket = arg; AWS_ASSERT(aws_channel_thread_is_callers_thread(websocket->channel_slot->channel)); if (!websocket->thread_data.is_shutting_down_and_waiting_for_close_frame_to_be_written) { /* Not waiting for write to complete, which means the CLOSE frame has sent, just do nothing */ return; } AWS_LOGF_WARN( AWS_LS_HTTP_WEBSOCKET, "id=%p: Failed to send CLOSE frame, timeout happened, shutdown the channel", (void *)websocket); s_stop_writing(websocket, AWS_ERROR_HTTP_CONNECTION_CLOSED); s_finish_shutdown(websocket); } static void s_finish_shutdown(struct aws_websocket *websocket) { AWS_ASSERT(aws_channel_thread_is_callers_thread(websocket->channel_slot->channel)); AWS_ASSERT(websocket->thread_data.is_writing_stopped); AWS_ASSERT(websocket->thread_data.is_shutting_down_and_waiting_for_close_frame_to_be_written); AWS_LOGF_TRACE(AWS_LS_HTTP_WEBSOCKET, "id=%p: Finishing websocket handler shutdown.", (void *)websocket); websocket->thread_data.is_shutting_down_and_waiting_for_close_frame_to_be_written = false; /* Cancel all incomplete frames */ if (websocket->thread_data.current_incoming_frame) { s_complete_incoming_frame(websocket, AWS_ERROR_HTTP_CONNECTION_CLOSED, NULL); } if (websocket->thread_data.current_outgoing_frame) { s_destroy_outgoing_frame( websocket, websocket->thread_data.current_outgoing_frame, AWS_ERROR_HTTP_CONNECTION_CLOSED); websocket->thread_data.current_outgoing_frame = NULL; } /* BEGIN CRITICAL SECTION */ s_lock_synced_data(websocket); while (!aws_linked_list_empty(&websocket->synced_data.outgoing_frame_list)) { /* Move frames from synced_data to thread_data, then cancel them together outside critical section */ struct aws_linked_list_node *node = aws_linked_list_pop_front(&websocket->synced_data.outgoing_frame_list); aws_linked_list_push_back(&websocket->thread_data.outgoing_frame_list, node); } s_unlock_synced_data(websocket); /* END CRITICAL SECTION */ s_complete_frame_list(websocket, &websocket->thread_data.write_completion_frames, AWS_ERROR_HTTP_CONNECTION_CLOSED); while (!aws_linked_list_empty(&websocket->thread_data.outgoing_frame_list)) { struct aws_linked_list_node *node = aws_linked_list_pop_front(&websocket->thread_data.outgoing_frame_list); struct outgoing_frame *frame = AWS_CONTAINER_OF(node, struct outgoing_frame, node); s_destroy_outgoing_frame(websocket, frame, AWS_ERROR_HTTP_CONNECTION_CLOSED); } aws_channel_slot_on_handler_shutdown_complete( websocket->channel_slot, AWS_CHANNEL_DIR_WRITE, websocket->thread_data.channel_shutdown_error_code, websocket->thread_data.channel_shutdown_free_scarce_resources_immediately); } static int s_handler_process_read_message( struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_io_message *message) { AWS_ASSERT(message); AWS_ASSERT(aws_channel_thread_is_callers_thread(slot->channel)); struct aws_websocket *websocket = handler->impl; struct aws_byte_cursor cursor = aws_byte_cursor_from_buf(&message->message_data); int err; /* At the end of this function we'll bump the window back up by this amount. * We start off assuming we'll re-open the window by the whole amount, * but this number will go down if we process any payload data that ought to shrink the window */ websocket->thread_data.incoming_message_window_update = message->message_data.len; AWS_LOGF_TRACE( AWS_LS_HTTP_WEBSOCKET, "id=%p: Begin processing incoming message of size %zu.", (void *)websocket, message->message_data.len); while (cursor.len) { if (websocket->thread_data.is_reading_stopped) { goto clean_up; } bool frame_complete; err = aws_websocket_decoder_process(&websocket->thread_data.decoder, &cursor, &frame_complete); if (err) { AWS_LOGF_ERROR( AWS_LS_HTTP_WEBSOCKET, "id=%p: Failed processing incoming message, error %d (%s). Closing connection.", (void *)websocket, aws_last_error(), aws_error_name(aws_last_error())); goto error; } if (frame_complete) { bool callback_result; s_complete_incoming_frame(websocket, AWS_ERROR_SUCCESS, &callback_result); if (!callback_result) { AWS_LOGF_ERROR( AWS_LS_HTTP_WEBSOCKET, "id=%p: Incoming frame completion callback has reported a failure. Closing connection", (void *)websocket); aws_raise_error(AWS_ERROR_HTTP_CALLBACK_FAILURE); goto error; } } } if (websocket->thread_data.incoming_message_window_update > 0) { err = aws_channel_slot_increment_read_window(slot, websocket->thread_data.incoming_message_window_update); if (err) { AWS_LOGF_ERROR( AWS_LS_HTTP_WEBSOCKET, "id=%p: Failed to increment read window after message processing, error %d (%s). Closing " "connection.", (void *)websocket, aws_last_error(), aws_error_name(aws_last_error())); goto error; } } goto clean_up; error: s_shutdown_due_to_read_err(websocket, aws_last_error()); clean_up: if (cursor.len > 0) { AWS_LOGF_TRACE( AWS_LS_HTTP_WEBSOCKET, "id=%p: Done processing incoming message, final %zu bytes ignored.", (void *)websocket, cursor.len); } else { AWS_LOGF_TRACE(AWS_LS_HTTP_WEBSOCKET, "id=%p: Done processing incoming message.", (void *)websocket); } aws_mem_release(message->allocator, message); return AWS_OP_SUCCESS; } static int s_decoder_on_frame(const struct aws_websocket_frame *frame, void *user_data) { struct aws_websocket *websocket = user_data; AWS_ASSERT(aws_channel_thread_is_callers_thread(websocket->channel_slot->channel)); AWS_ASSERT(!websocket->thread_data.current_incoming_frame); AWS_ASSERT(!websocket->thread_data.is_reading_stopped); websocket->thread_data.current_incoming_frame = &websocket->thread_data.incoming_frame_storage; websocket->thread_data.current_incoming_frame->payload_length = frame->payload_length; websocket->thread_data.current_incoming_frame->opcode = frame->opcode; websocket->thread_data.current_incoming_frame->fin = frame->fin; /* If CONTINUATION frames are expected, remember which type of data is being continued. * RFC-6455 Section 5.4 Fragmentation */ if (aws_websocket_is_data_frame(frame->opcode)) { if (frame->opcode != AWS_WEBSOCKET_OPCODE_CONTINUATION) { if (frame->fin) { websocket->thread_data.continuation_of_opcode = 0; } else { websocket->thread_data.continuation_of_opcode = frame->opcode; } } } else if (frame->opcode == AWS_WEBSOCKET_OPCODE_PING) { /* Prepare to store payload of PING so we can echo it back in the PONG */ aws_byte_buf_reset(&websocket->thread_data.incoming_ping_payload, false /*zero_contents*/); /* Note: we are NOT calling aws_byte_buf_reserve(). * This works around an attack where a malicious peer CLAIMS they'll send a huge frame, * which would case OOM if we did the reserve immediately. * If a malicious peer wants to run us out of memory, they'll need to do * it the costly way and actually send a billion bytes. * Or we could impose our own internal limits, but for now this is simpler */ } /* Invoke user cb */ bool callback_result = true; if (websocket->on_incoming_frame_begin && !websocket->thread_data.is_midchannel_handler) { callback_result = websocket->on_incoming_frame_begin( websocket, websocket->thread_data.current_incoming_frame, websocket->user_data); } if (!callback_result) { AWS_LOGF_ERROR( AWS_LS_HTTP_WEBSOCKET, "id=%p: Incoming frame callback has reported a failure.", (void *)websocket); return aws_raise_error(AWS_ERROR_HTTP_CALLBACK_FAILURE); } return AWS_OP_SUCCESS; } static int s_decoder_on_payload(struct aws_byte_cursor data, void *user_data) { struct aws_websocket *websocket = user_data; AWS_ASSERT(aws_channel_thread_is_callers_thread(websocket->channel_slot->channel)); AWS_ASSERT(websocket->thread_data.current_incoming_frame); AWS_ASSERT(!websocket->thread_data.is_reading_stopped); /* Store payload of PING so we can echo it back in the PONG */ if (websocket->thread_data.current_incoming_frame->opcode == AWS_WEBSOCKET_OPCODE_PING) { aws_byte_buf_append_dynamic(&websocket->thread_data.incoming_ping_payload, &data); } if (websocket->thread_data.is_midchannel_handler) { return s_decoder_on_midchannel_payload(websocket, data); } return s_decoder_on_user_payload(websocket, data); } /* Invoke user cb */ static int s_decoder_on_user_payload(struct aws_websocket *websocket, struct aws_byte_cursor data) { if (websocket->on_incoming_frame_payload) { if (!websocket->on_incoming_frame_payload( websocket, websocket->thread_data.current_incoming_frame, data, websocket->user_data)) { AWS_LOGF_ERROR( AWS_LS_HTTP_WEBSOCKET, "id=%p: Incoming payload callback has reported a failure.", (void *)websocket); return aws_raise_error(AWS_ERROR_HTTP_CALLBACK_FAILURE); } } /* If this is a "data" frame's payload, let the window shrink */ if (aws_websocket_is_data_frame(websocket->thread_data.current_incoming_frame->opcode) && websocket->manual_window_update) { websocket->thread_data.incoming_message_window_update -= data.len; AWS_LOGF_DEBUG( AWS_LS_HTTP_WEBSOCKET, "id=%p: The read window is shrinking by %zu due to incoming payload from 'data' frame.", (void *)websocket, data.len); } return AWS_OP_SUCCESS; } /* Pass data to channel handler on the right */ static int s_decoder_on_midchannel_payload(struct aws_websocket *websocket, struct aws_byte_cursor data) { struct aws_io_message *io_msg = NULL; /* Only pass data to next handler if it's from a BINARY frame (or the CONTINUATION of a BINARY frame) */ bool is_binary_data = websocket->thread_data.current_incoming_frame->opcode == AWS_WEBSOCKET_OPCODE_BINARY || (websocket->thread_data.current_incoming_frame->opcode == AWS_WEBSOCKET_OPCODE_CONTINUATION && websocket->thread_data.continuation_of_opcode == AWS_WEBSOCKET_OPCODE_BINARY); if (!is_binary_data) { return AWS_OP_SUCCESS; } AWS_ASSERT(websocket->channel_slot->adj_right); /* Expected another slot in the read direction */ /* Note that current implementation of websocket handler does not buffer data travelling in the "read" direction, * so the downstream read window needs to be large enough to immediately receive incoming data. */ if (aws_channel_slot_downstream_read_window(websocket->channel_slot) < data.len) { AWS_LOGF_ERROR( AWS_LS_HTTP_WEBSOCKET, "id=%p: Cannot send entire message without exceeding read window.", (void *)websocket); aws_raise_error(AWS_IO_CHANNEL_READ_WOULD_EXCEED_WINDOW); goto error; } io_msg = aws_channel_acquire_message_from_pool( websocket->channel_slot->channel, AWS_IO_MESSAGE_APPLICATION_DATA, data.len); if (!io_msg) { AWS_LOGF_ERROR(AWS_LS_HTTP_WEBSOCKET, "id=%p: Failed to acquire message.", (void *)websocket); goto error; } if (io_msg->message_data.capacity < data.len) { /* Probably can't happen. Data is coming an aws_io_message, should be able to acquire another just as big */ AWS_LOGF_ERROR( AWS_LS_HTTP_WEBSOCKET, "id=%p: Failed to acquire sufficiently large message.", (void *)websocket); aws_raise_error(AWS_ERROR_UNKNOWN); goto error; } if (!aws_byte_buf_write_from_whole_cursor(&io_msg->message_data, data)) { AWS_LOGF_ERROR(AWS_LS_HTTP_WEBSOCKET, "id=%p: Unexpected error while copying data.", (void *)websocket); aws_raise_error(AWS_ERROR_UNKNOWN); goto error; } int err = aws_channel_slot_send_message(websocket->channel_slot, io_msg, AWS_CHANNEL_DIR_READ); if (err) { AWS_LOGF_ERROR( AWS_LS_HTTP_WEBSOCKET, "id=%p: Failed to send read message, error %d (%s).", (void *)websocket, aws_last_error(), aws_error_name(aws_last_error())); goto error; } /* Reduce amount by which websocket will update its read window */ AWS_ASSERT(websocket->thread_data.incoming_message_window_update >= data.len); websocket->thread_data.incoming_message_window_update -= data.len; return AWS_OP_SUCCESS; error: if (io_msg) { aws_mem_release(io_msg->allocator, io_msg); } return AWS_OP_ERR; } /* When the websocket sends a frame automatically (PONG, CLOSE), * this holds the payload. */ struct aws_websocket_autopayload { struct aws_allocator *alloc; struct aws_byte_buf buf; struct aws_byte_cursor advancing_cursor; }; static struct aws_websocket_autopayload *s_autopayload_new( struct aws_allocator *alloc, const struct aws_byte_buf *src) { struct aws_websocket_autopayload *autopayload = aws_mem_calloc(alloc, 1, sizeof(struct aws_websocket_autopayload)); autopayload->alloc = alloc; if (src->len > 0) { aws_byte_buf_init_copy(&autopayload->buf, alloc, src); autopayload->advancing_cursor = aws_byte_cursor_from_buf(&autopayload->buf); } return autopayload; } static void s_autopayload_destroy(struct aws_websocket_autopayload *autopayload) { aws_byte_buf_clean_up(&autopayload->buf); aws_mem_release(autopayload->alloc, autopayload); } static void s_autopayload_send_complete(struct aws_websocket *websocket, int error_code, void *user_data) { (void)websocket; (void)error_code; struct aws_websocket_autopayload *autopayload = user_data; s_autopayload_destroy(autopayload); } static bool s_autopayload_stream_outgoing_payload( struct aws_websocket *websocket, struct aws_byte_buf *out_buf, void *user_data) { (void)websocket; struct aws_websocket_autopayload *autopayload = user_data; aws_byte_buf_write_to_capacity(out_buf, &autopayload->advancing_cursor); return true; } static void s_complete_incoming_frame(struct aws_websocket *websocket, int error_code, bool *out_callback_result) { AWS_ASSERT(aws_channel_thread_is_callers_thread(websocket->channel_slot->channel)); AWS_ASSERT(websocket->thread_data.current_incoming_frame); if (error_code == 0) { /* If this was a CLOSE frame, don't read any more data. */ if (websocket->thread_data.current_incoming_frame->opcode == AWS_WEBSOCKET_OPCODE_CLOSE) { AWS_LOGF_DEBUG( AWS_LS_HTTP_WEBSOCKET, "id=%p: Close frame received, any further data received will be ignored.", (void *)websocket); websocket->thread_data.is_reading_stopped = true; /* TODO: auto-close if there's a channel-handler to the right */ } else if (websocket->thread_data.current_incoming_frame->opcode == AWS_WEBSOCKET_OPCODE_PING) { /* Automatically respond to a PING with a PONG */ if (!websocket->thread_data.is_writing_stopped) { /* Optimization idea: avoid allocations/copies each time we send an auto-PONG. * Maybe have a small autopayload pool, instead of allocating one each time. * Maybe encode directly to aws_io_message, instead of copying to a buf, that's copied to a msg later. * Maybe "std::move()" the aws_byte_bufs around instead of copying them. */ struct aws_websocket_autopayload *autopong = s_autopayload_new(websocket->alloc, &websocket->thread_data.incoming_ping_payload); struct aws_websocket_send_frame_options pong_frame = { .opcode = AWS_WEBSOCKET_OPCODE_PONG, .fin = true, .payload_length = autopong->buf.len, .stream_outgoing_payload = s_autopayload_stream_outgoing_payload, .on_complete = s_autopayload_send_complete, .user_data = autopong, }; int send_err = s_send_frame(websocket, &pong_frame, false /*from_public_api*/); /* Failure should be impossible. We already checked that writing is not stopped */ AWS_FATAL_ASSERT(!send_err && "Unexpected failure sending websocket PONG"); } } } /* Invoke user cb */ bool callback_result = true; if (websocket->on_incoming_frame_complete && !websocket->thread_data.is_midchannel_handler) { callback_result = websocket->on_incoming_frame_complete( websocket, websocket->thread_data.current_incoming_frame, error_code, websocket->user_data); } if (out_callback_result) { *out_callback_result = callback_result; } websocket->thread_data.current_incoming_frame = NULL; } static size_t s_handler_initial_window_size(struct aws_channel_handler *handler) { struct aws_websocket *websocket = handler->impl; return websocket->initial_window_size; } static size_t s_handler_message_overhead(struct aws_channel_handler *handler) { (void)handler; return AWS_WEBSOCKET_MAX_FRAME_OVERHEAD; } static int s_handler_increment_read_window( struct aws_channel_handler *handler, struct aws_channel_slot *slot, size_t size) { struct aws_websocket *websocket = handler->impl; AWS_ASSERT(aws_channel_thread_is_callers_thread(slot->channel)); AWS_ASSERT(websocket->thread_data.is_midchannel_handler); /* NOTE: This is pretty hacky and should change if it ever causes issues. * * Currently, all read messages are processed the moment they're received. * If the downstream read window is open enough to accept this data, we can send it right along. * BUT if the downstream window were too small, we'd need to buffer the data and wait until * the downstream window opened again to finish sending. * * To avoid that complexity, we go to pains here to ensure that the websocket's window exactly * matches the window to the right, allowing us to avoid buffering in the read direction. */ size_t increment = size; if (websocket->thread_data.last_known_right_slot != slot->adj_right) { if (size < slot->window_size) { AWS_LOGF_ERROR( AWS_LS_HTTP_WEBSOCKET, "id=%p: The websocket does not support downstream handlers with a smaller window.", (void *)websocket); aws_raise_error(AWS_IO_CHANNEL_READ_WOULD_EXCEED_WINDOW); goto error; } /* New handler to the right, make sure websocket's window matches its window. */ websocket->thread_data.last_known_right_slot = slot->adj_right; increment = size - slot->window_size; } if (increment != 0) { int err = aws_channel_slot_increment_read_window(slot, increment); if (err) { goto error; } } return AWS_OP_SUCCESS; error: websocket->thread_data.is_reading_stopped = true; /* Shutting down channel because I know that no one ever checks these errors */ s_shutdown_due_to_read_err(websocket, aws_last_error()); return AWS_OP_ERR; } static void s_increment_read_window_action(struct aws_websocket *websocket, size_t size) { AWS_ASSERT(aws_channel_thread_is_callers_thread(websocket->channel_slot->channel)); int err = aws_channel_slot_increment_read_window(websocket->channel_slot, size); if (err) { AWS_LOGF_ERROR( AWS_LS_HTTP_WEBSOCKET, "id=%p: Failed to increment read window, error %d (%s). Closing websocket.", (void *)websocket, aws_last_error(), aws_error_name(aws_last_error())); s_schedule_channel_shutdown(websocket, aws_last_error()); } } static void s_increment_read_window_task(struct aws_channel_task *task, void *arg, enum aws_task_status status) { (void)task; if (status != AWS_TASK_STATUS_RUN_READY) { return; } struct aws_websocket *websocket = arg; size_t size; /* BEGIN CRITICAL SECTION */ s_lock_synced_data(websocket); size = websocket->synced_data.window_increment_size; websocket->synced_data.window_increment_size = 0; s_unlock_synced_data(websocket); /* END CRITICAL SECTION */ AWS_LOGF_TRACE( AWS_LS_HTTP_WEBSOCKET, "id=%p: Running task to increment read window by %zu.", (void *)websocket, size); s_increment_read_window_action(websocket, size); } void aws_websocket_increment_read_window(struct aws_websocket *websocket, size_t size) { if (size == 0) { AWS_LOGF_TRACE(AWS_LS_HTTP_WEBSOCKET, "id=%p: Ignoring window increment of size 0.", (void *)websocket); return; } if (!websocket->manual_window_update) { AWS_LOGF_DEBUG( AWS_LS_HTTP_WEBSOCKET, "id=%p: Ignoring window increment. Manual window management (aka read backpressure) is not enabled.", (void *)websocket); return; } /* Schedule a task to do the increment. * If task is already scheduled, just increase size to be incremented */ bool is_midchannel_handler = false; bool should_schedule_task = false; /* BEGIN CRITICAL SECTION */ s_lock_synced_data(websocket); if (websocket->synced_data.is_midchannel_handler) { is_midchannel_handler = true; } else if (websocket->synced_data.window_increment_size == 0) { should_schedule_task = true; websocket->synced_data.window_increment_size = size; } else { websocket->synced_data.window_increment_size = aws_add_size_saturating(websocket->synced_data.window_increment_size, size); } s_unlock_synced_data(websocket); /* END CRITICAL SECTION */ if (is_midchannel_handler) { AWS_LOGF_TRACE( AWS_LS_HTTP_WEBSOCKET, "id=%p: Ignoring window increment call, websocket has converted to midchannel handler.", (void *)websocket); } else if (should_schedule_task) { AWS_LOGF_TRACE( AWS_LS_HTTP_WEBSOCKET, "id=%p: Scheduling task to increment read window by %zu.", (void *)websocket, size); aws_channel_schedule_task_now(websocket->channel_slot->channel, &websocket->increment_read_window_task); } else { AWS_LOGF_TRACE( AWS_LS_HTTP_WEBSOCKET, "id=%p: Task to increment read window already scheduled, increasing scheduled size by %zu.", (void *)websocket, size); } } int aws_websocket_random_handshake_key(struct aws_byte_buf *dst) { /* RFC-6455 Section 4.1. * Derive random 16-byte value, base64-encoded, for the Sec-WebSocket-Key header */ uint8_t key_random_storage[16] = {0}; struct aws_byte_buf key_random_buf = aws_byte_buf_from_empty_array(key_random_storage, sizeof(key_random_storage)); int err = aws_device_random_buffer(&key_random_buf); if (err) { return AWS_OP_ERR; } struct aws_byte_cursor key_random_cur = aws_byte_cursor_from_buf(&key_random_buf); err = aws_base64_encode(&key_random_cur, dst); if (err) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } struct aws_http_message *aws_http_message_new_websocket_handshake_request( struct aws_allocator *allocator, struct aws_byte_cursor path, struct aws_byte_cursor host) { AWS_PRECONDITION(allocator); AWS_PRECONDITION(aws_byte_cursor_is_valid(&path)); AWS_PRECONDITION(aws_byte_cursor_is_valid(&host)); struct aws_http_message *request = aws_http_message_new_request(allocator); if (!request) { goto error; } int err = aws_http_message_set_request_method(request, aws_http_method_get); if (err) { goto error; } err = aws_http_message_set_request_path(request, path); if (err) { goto error; } uint8_t key_storage[AWS_WEBSOCKET_MAX_HANDSHAKE_KEY_LENGTH]; struct aws_byte_buf key_buf = aws_byte_buf_from_empty_array(key_storage, sizeof(key_storage)); err = aws_websocket_random_handshake_key(&key_buf); if (err) { goto error; } struct aws_http_header required_headers[] = { { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Host"), .value = host, }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Upgrade"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("websocket"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Connection"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Upgrade"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Sec-WebSocket-Key"), .value = aws_byte_cursor_from_buf(&key_buf), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Sec-WebSocket-Version"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("13"), }, }; for (size_t i = 0; i < AWS_ARRAY_SIZE(required_headers); ++i) { err = aws_http_message_add_header(request, required_headers[i]); if (err) { goto error; } } return request; error: aws_http_message_destroy(request); return NULL; } aws-crt-python-0.20.4+dfsg/crt/aws-c-http/source/websocket_bootstrap.c000066400000000000000000001070571456575232400257500ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include #include #include #ifdef _MSC_VER # pragma warning(disable : 4204) /* non-constant aggregate initializer */ #endif /** * Allow unit-tests to mock interactions with external systems. */ static const struct aws_websocket_client_bootstrap_system_vtable s_default_system_vtable = { .aws_http_client_connect = aws_http_client_connect, .aws_http_connection_release = aws_http_connection_release, .aws_http_connection_close = aws_http_connection_close, .aws_http_connection_get_channel = aws_http_connection_get_channel, .aws_http_connection_make_request = aws_http_connection_make_request, .aws_http_stream_activate = aws_http_stream_activate, .aws_http_stream_release = aws_http_stream_release, .aws_http_stream_get_connection = aws_http_stream_get_connection, .aws_http_stream_update_window = aws_http_stream_update_window, .aws_http_stream_get_incoming_response_status = aws_http_stream_get_incoming_response_status, .aws_websocket_handler_new = aws_websocket_handler_new, }; static const struct aws_websocket_client_bootstrap_system_vtable *s_system_vtable = &s_default_system_vtable; void aws_websocket_client_bootstrap_set_system_vtable( const struct aws_websocket_client_bootstrap_system_vtable *system_vtable) { s_system_vtable = system_vtable; } /** * The websocket bootstrap brings a websocket connection into this world, and sees it out again. * Spins up an HTTP client, performs the opening handshake (HTTP Upgrade request), * creates the websocket handler, and inserts it into the channel. * The bootstrap is responsible for firing the on_connection_setup and on_connection_shutdown callbacks. */ struct aws_websocket_client_bootstrap { /* Settings copied in from aws_websocket_client_connection_options */ struct aws_allocator *alloc; size_t initial_window_size; bool manual_window_update; void *user_data; /* Setup callback will be set NULL once it's invoked. * This is used to determine whether setup or shutdown should be invoked * from the HTTP-shutdown callback. */ aws_websocket_on_connection_setup_fn *websocket_setup_callback; aws_websocket_on_connection_shutdown_fn *websocket_shutdown_callback; aws_websocket_on_incoming_frame_begin_fn *websocket_frame_begin_callback; aws_websocket_on_incoming_frame_payload_fn *websocket_frame_payload_callback; aws_websocket_on_incoming_frame_complete_fn *websocket_frame_complete_callback; /* Handshake request data */ struct aws_http_message *handshake_request; /* Given the "Sec-WebSocket-Key" from the request, * this is what we expect the response's "Sec-WebSocket-Accept" to be */ struct aws_byte_buf expected_sec_websocket_accept; /* Comma-separated values from the request's "Sec-WebSocket-Protocol" (or NULL if none) */ struct aws_string *expected_sec_websocket_protocols; /* Handshake response data */ int response_status; struct aws_http_headers *response_headers; bool got_full_response_headers; struct aws_byte_buf response_body; bool got_full_response_body; int setup_error_code; struct aws_websocket *websocket; }; static void s_ws_bootstrap_destroy(struct aws_websocket_client_bootstrap *ws_bootstrap); static int s_ws_bootstrap_calculate_sec_websocket_accept( struct aws_byte_cursor sec_websocket_key, struct aws_byte_buf *out_buf, struct aws_allocator *alloc); static void s_ws_bootstrap_cancel_setup_due_to_err( struct aws_websocket_client_bootstrap *ws_bootstrap, struct aws_http_connection *http_connection, int error_code); static void s_ws_bootstrap_on_http_setup(struct aws_http_connection *http_connection, int error_code, void *user_data); static void s_ws_bootstrap_on_http_shutdown( struct aws_http_connection *http_connection, int error_code, void *user_data); static int s_ws_bootstrap_on_handshake_response_headers( struct aws_http_stream *stream, enum aws_http_header_block header_block, const struct aws_http_header *header_array, size_t num_headers, void *user_data); static int s_ws_bootstrap_on_handshake_response_header_block_done( struct aws_http_stream *stream, enum aws_http_header_block header_block, void *user_data); static int s_ws_bootstrap_on_handshake_response_body( struct aws_http_stream *stream, const struct aws_byte_cursor *data, void *user_data); static void s_ws_bootstrap_on_stream_complete(struct aws_http_stream *stream, int error_code, void *user_data); int aws_websocket_client_connect(const struct aws_websocket_client_connection_options *options) { aws_http_fatal_assert_library_initialized(); AWS_ASSERT(options); /* Validate options */ struct aws_byte_cursor path; aws_http_message_get_request_path(options->handshake_request, &path); if (!options->allocator || !options->bootstrap || !options->socket_options || !options->host.len || !path.len || !options->on_connection_setup) { AWS_LOGF_ERROR(AWS_LS_HTTP_WEBSOCKET_SETUP, "id=static: Missing required websocket connection options."); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } struct aws_byte_cursor method; aws_http_message_get_request_method(options->handshake_request, &method); if (aws_http_str_to_method(method) != AWS_HTTP_METHOD_GET) { AWS_LOGF_ERROR(AWS_LS_HTTP_WEBSOCKET_SETUP, "id=static: Websocket request must have method be 'GET'."); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } if (!options->handshake_request) { AWS_LOGF_ERROR( AWS_LS_HTTP_WEBSOCKET_SETUP, "id=static: Invalid connection options, missing required request for websocket client handshake."); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } const struct aws_http_headers *request_headers = aws_http_message_get_headers(options->handshake_request); struct aws_byte_cursor sec_websocket_key; if (aws_http_headers_get(request_headers, aws_byte_cursor_from_c_str("Sec-WebSocket-Key"), &sec_websocket_key)) { AWS_LOGF_ERROR( AWS_LS_HTTP_WEBSOCKET_SETUP, "id=static: Websocket handshake request is missing required 'Sec-WebSocket-Key' header"); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } /* Extensions are not currently supported */ if (aws_http_headers_has(request_headers, aws_byte_cursor_from_c_str("Sec-WebSocket-Extensions"))) { AWS_LOGF_ERROR( AWS_LS_HTTP_WEBSOCKET_SETUP, "id=static: 'Sec-WebSocket-Extensions' are not currently supported"); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } /* Create bootstrap */ struct aws_websocket_client_bootstrap *ws_bootstrap = aws_mem_calloc(options->allocator, 1, sizeof(struct aws_websocket_client_bootstrap)); ws_bootstrap->alloc = options->allocator; ws_bootstrap->initial_window_size = options->initial_window_size; ws_bootstrap->manual_window_update = options->manual_window_management; ws_bootstrap->user_data = options->user_data; ws_bootstrap->websocket_setup_callback = options->on_connection_setup; ws_bootstrap->websocket_shutdown_callback = options->on_connection_shutdown; ws_bootstrap->websocket_frame_begin_callback = options->on_incoming_frame_begin; ws_bootstrap->websocket_frame_payload_callback = options->on_incoming_frame_payload; ws_bootstrap->websocket_frame_complete_callback = options->on_incoming_frame_complete; ws_bootstrap->handshake_request = aws_http_message_acquire(options->handshake_request); ws_bootstrap->response_status = AWS_HTTP_STATUS_CODE_UNKNOWN; ws_bootstrap->response_headers = aws_http_headers_new(ws_bootstrap->alloc); aws_byte_buf_init(&ws_bootstrap->response_body, ws_bootstrap->alloc, 0); if (s_ws_bootstrap_calculate_sec_websocket_accept( sec_websocket_key, &ws_bootstrap->expected_sec_websocket_accept, ws_bootstrap->alloc)) { goto error; } ws_bootstrap->expected_sec_websocket_protocols = aws_http_headers_get_all(request_headers, aws_byte_cursor_from_c_str("Sec-WebSocket-Protocol")); /* Initiate HTTP connection */ struct aws_http_client_connection_options http_options = AWS_HTTP_CLIENT_CONNECTION_OPTIONS_INIT; http_options.allocator = ws_bootstrap->alloc; http_options.bootstrap = options->bootstrap; http_options.host_name = options->host; http_options.socket_options = options->socket_options; http_options.tls_options = options->tls_options; http_options.proxy_options = options->proxy_options; if (options->manual_window_management) { http_options.manual_window_management = true; /* Give HTTP handler enough window to comfortably receive the handshake response. * * If the upgrade is unsuccessful, the HTTP window will shrink as the response body is received. * In this case, we'll keep incrementing the window back to its original size so data keeps arriving. * * If the upgrade is successful, then the websocket handler is installed, and * the HTTP handler will take over its own window management. */ http_options.initial_window_size = 1024; } http_options.user_data = ws_bootstrap; http_options.on_setup = s_ws_bootstrap_on_http_setup; http_options.on_shutdown = s_ws_bootstrap_on_http_shutdown; http_options.requested_event_loop = options->requested_event_loop; http_options.host_resolution_config = options->host_resolution_config; /* Infer port, if not explicitly specified in URI */ http_options.port = options->port; if (!http_options.port) { http_options.port = options->tls_options ? 443 : 80; } if (s_system_vtable->aws_http_client_connect(&http_options)) { AWS_LOGF_ERROR( AWS_LS_HTTP_WEBSOCKET_SETUP, "id=static: Websocket failed to initiate HTTP connection, error %d (%s)", aws_last_error(), aws_error_name(aws_last_error())); goto error; } /* Success! (so far) */ AWS_LOGF_TRACE( AWS_LS_HTTP_WEBSOCKET_SETUP, "id=%p: Websocket setup begun, connecting to " PRInSTR ":%" PRIu32 PRInSTR, (void *)ws_bootstrap, AWS_BYTE_CURSOR_PRI(options->host), options->port, AWS_BYTE_CURSOR_PRI(path)); return AWS_OP_SUCCESS; error: s_ws_bootstrap_destroy(ws_bootstrap); return AWS_OP_ERR; } static void s_ws_bootstrap_destroy(struct aws_websocket_client_bootstrap *ws_bootstrap) { if (!ws_bootstrap) { return; } aws_http_message_release(ws_bootstrap->handshake_request); aws_http_headers_release(ws_bootstrap->response_headers); aws_byte_buf_clean_up(&ws_bootstrap->expected_sec_websocket_accept); aws_string_destroy(ws_bootstrap->expected_sec_websocket_protocols); aws_byte_buf_clean_up(&ws_bootstrap->response_body); aws_mem_release(ws_bootstrap->alloc, ws_bootstrap); } /* Given the handshake request's "Sec-WebSocket-Key" value, * calculate the expected value for the response's "Sec-WebSocket-Accept". * RFC-6455 Section 4.1: * base64-encoded SHA-1 of the concatenation of the |Sec-WebSocket-Key| * (as a string, not base64-decoded) with the string * "258EAFA5-E914-47DA-95CA-C5AB0DC85B11" but ignoring any leading and * trailing whitespace */ static int s_ws_bootstrap_calculate_sec_websocket_accept( struct aws_byte_cursor sec_websocket_key, struct aws_byte_buf *out_buf, struct aws_allocator *alloc) { AWS_ASSERT(out_buf && !out_buf->allocator && out_buf->len == 0); /* expect buf to be uninitialized */ /* note: leading and trailing whitespace was already trimmed by aws_http_headers */ /* optimization: skip concatenating Sec-WebSocket-Key and the magic string. * just run the SHA1 over the first string, and then the 2nd. */ bool success = false; struct aws_byte_cursor magic_string = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("258EAFA5-E914-47DA-95CA-C5AB0DC85B11"); /* SHA-1 */ struct aws_hash *sha1 = aws_sha1_new(alloc); if (!sha1) { AWS_LOGF_ERROR( AWS_LS_HTTP_WEBSOCKET_SETUP, "id=static: Failed to initiate SHA1, error %d (%s)", aws_last_error(), aws_error_name(aws_last_error())); goto cleanup; } if (aws_hash_update(sha1, &sec_websocket_key) || aws_hash_update(sha1, &magic_string)) { AWS_LOGF_ERROR( AWS_LS_HTTP_WEBSOCKET_SETUP, "id=static: Failed to update SHA1, error %d (%s)", aws_last_error(), aws_error_name(aws_last_error())); goto cleanup; } uint8_t sha1_storage[AWS_SHA1_LEN]; struct aws_byte_buf sha1_buf = aws_byte_buf_from_empty_array(sha1_storage, sizeof(sha1_storage)); if (aws_hash_finalize(sha1, &sha1_buf, 0)) { AWS_LOGF_ERROR( AWS_LS_HTTP_WEBSOCKET_SETUP, "id=static: Failed to finalize SHA1, error %d (%s)", aws_last_error(), aws_error_name(aws_last_error())); goto cleanup; } /* base64-encoded SHA-1 (clear out_buf, and write to it again) */ size_t base64_encode_sha1_len; if (aws_base64_compute_encoded_len(sha1_buf.len, &base64_encode_sha1_len)) { AWS_LOGF_ERROR( AWS_LS_HTTP_WEBSOCKET_SETUP, "id=static: Failed to determine Base64-encoded length, error %d (%s)", aws_last_error(), aws_error_name(aws_last_error())); goto cleanup; } aws_byte_buf_init(out_buf, alloc, base64_encode_sha1_len); struct aws_byte_cursor sha1_cursor = aws_byte_cursor_from_buf(&sha1_buf); if (aws_base64_encode(&sha1_cursor, out_buf)) { AWS_LOGF_ERROR( AWS_LS_HTTP_WEBSOCKET_SETUP, "id=static: Failed to Base64-encode, error %d (%s)", aws_last_error(), aws_error_name(aws_last_error())); goto cleanup; } success = true; cleanup: if (sha1) { aws_hash_destroy(sha1); } return success ? AWS_OP_SUCCESS : AWS_OP_ERR; } /* Called if something goes wrong after an HTTP connection is established. * The HTTP connection is closed. * We must wait for its shutdown to complete before informing user of the failed websocket setup. */ static void s_ws_bootstrap_cancel_setup_due_to_err( struct aws_websocket_client_bootstrap *ws_bootstrap, struct aws_http_connection *http_connection, int error_code) { AWS_ASSERT(error_code); AWS_ASSERT(http_connection); if (!ws_bootstrap->setup_error_code) { AWS_LOGF_ERROR( AWS_LS_HTTP_WEBSOCKET_SETUP, "id=%p: Canceling websocket setup due to error %d (%s).", (void *)ws_bootstrap, error_code, aws_error_name(error_code)); ws_bootstrap->setup_error_code = error_code; s_system_vtable->aws_http_connection_close(http_connection); } } static void s_ws_bootstrap_invoke_setup_callback(struct aws_websocket_client_bootstrap *ws_bootstrap, int error_code) { /* sanity check: websocket XOR error_code is set. both cannot be set. both cannot be unset */ AWS_FATAL_ASSERT((error_code != 0) ^ (ws_bootstrap->websocket != NULL)); /* Report things about the response, if we received them */ int *response_status_ptr = NULL; struct aws_http_header *response_header_array = NULL; size_t num_response_headers = 0; struct aws_byte_cursor *response_body_ptr = NULL; struct aws_byte_cursor response_body_cursor = {.len = 0}; if (ws_bootstrap->got_full_response_headers) { response_status_ptr = &ws_bootstrap->response_status; num_response_headers = aws_http_headers_count(ws_bootstrap->response_headers); response_header_array = aws_mem_calloc(ws_bootstrap->alloc, aws_max_size(1, num_response_headers), sizeof(struct aws_http_header)); for (size_t i = 0; i < num_response_headers; ++i) { aws_http_headers_get_index(ws_bootstrap->response_headers, i, &response_header_array[i]); } if (ws_bootstrap->got_full_response_body) { response_body_cursor = aws_byte_cursor_from_buf(&ws_bootstrap->response_body); response_body_ptr = &response_body_cursor; } } struct aws_websocket_on_connection_setup_data setup_data = { .error_code = error_code, .websocket = ws_bootstrap->websocket, .handshake_response_status = response_status_ptr, .handshake_response_header_array = response_header_array, .num_handshake_response_headers = num_response_headers, .handshake_response_body = response_body_ptr, }; ws_bootstrap->websocket_setup_callback(&setup_data, ws_bootstrap->user_data); /* Clear setup callback so that we know that it's been invoked. */ ws_bootstrap->websocket_setup_callback = NULL; if (response_header_array) { aws_mem_release(ws_bootstrap->alloc, response_header_array); } } /* Invoked when HTTP connection has been established (or failed to be established) */ static void s_ws_bootstrap_on_http_setup(struct aws_http_connection *http_connection, int error_code, void *user_data) { struct aws_websocket_client_bootstrap *ws_bootstrap = user_data; /* Setup callback contract is: if error_code is non-zero then connection is NULL. */ AWS_FATAL_ASSERT((error_code != 0) == (http_connection == NULL)); /* If http connection failed, inform the user immediately and clean up the websocket bootstrapper. */ if (error_code) { AWS_LOGF_ERROR( AWS_LS_HTTP_WEBSOCKET_SETUP, "id=%p: Websocket setup failed to establish HTTP connection, error %d (%s).", (void *)ws_bootstrap, error_code, aws_error_name(error_code)); s_ws_bootstrap_invoke_setup_callback(ws_bootstrap, error_code); s_ws_bootstrap_destroy(ws_bootstrap); return; } /* Connection exists! * Note that if anything goes wrong with websocket setup from hereon out, we must close the http connection * first and wait for shutdown to complete before informing the user of setup failure. */ /* Send the handshake request */ struct aws_http_make_request_options options = { .self_size = sizeof(options), .request = ws_bootstrap->handshake_request, .user_data = ws_bootstrap, .on_response_headers = s_ws_bootstrap_on_handshake_response_headers, .on_response_header_block_done = s_ws_bootstrap_on_handshake_response_header_block_done, .on_response_body = s_ws_bootstrap_on_handshake_response_body, .on_complete = s_ws_bootstrap_on_stream_complete, }; struct aws_http_stream *handshake_stream = s_system_vtable->aws_http_connection_make_request(http_connection, &options); if (!handshake_stream) { AWS_LOGF_ERROR( AWS_LS_HTTP_WEBSOCKET_SETUP, "id=%p: Failed to make websocket upgrade request, error %d (%s).", (void *)ws_bootstrap, aws_last_error(), aws_error_name(aws_last_error())); goto error; } if (s_system_vtable->aws_http_stream_activate(handshake_stream)) { AWS_LOGF_ERROR( AWS_LS_HTTP_WEBSOCKET_SETUP, "id=%p: Failed to activate websocket upgrade request, error %d (%s).", (void *)ws_bootstrap, aws_last_error(), aws_error_name(aws_last_error())); goto error; } /* Success! (so far) */ AWS_LOGF_TRACE( AWS_LS_HTTP_WEBSOCKET_SETUP, "id=%p: HTTP connection established, sending websocket upgrade request.", (void *)ws_bootstrap); return; error: s_system_vtable->aws_http_stream_release(handshake_stream); s_ws_bootstrap_cancel_setup_due_to_err(ws_bootstrap, http_connection, aws_last_error()); } /* Invoked when the HTTP connection has shut down. * This is never called if the HTTP connection failed its setup */ static void s_ws_bootstrap_on_http_shutdown( struct aws_http_connection *http_connection, int error_code, void *user_data) { struct aws_websocket_client_bootstrap *ws_bootstrap = user_data; /* Inform user that connection has completely shut down. * If setup callback still hasn't fired, invoke it now and indicate failure. * Otherwise, invoke shutdown callback. */ if (ws_bootstrap->websocket_setup_callback) { AWS_ASSERT(!ws_bootstrap->websocket); /* If there's already a setup_error_code, use that */ if (ws_bootstrap->setup_error_code) { error_code = ws_bootstrap->setup_error_code; } /* Ensure non-zero error_code is passed */ if (!error_code) { error_code = AWS_ERROR_UNKNOWN; } AWS_LOGF_ERROR( AWS_LS_HTTP_WEBSOCKET_SETUP, "id=%p: Websocket setup failed, error %d (%s).", (void *)ws_bootstrap, error_code, aws_error_name(error_code)); s_ws_bootstrap_invoke_setup_callback(ws_bootstrap, error_code); } else if (ws_bootstrap->websocket_shutdown_callback) { AWS_ASSERT(ws_bootstrap->websocket); AWS_LOGF_DEBUG( AWS_LS_HTTP_WEBSOCKET, "id=%p: Websocket client connection shut down with error %d (%s).", (void *)ws_bootstrap->websocket, error_code, aws_error_name(error_code)); ws_bootstrap->websocket_shutdown_callback(ws_bootstrap->websocket, error_code, ws_bootstrap->user_data); } /* Clean up HTTP connection and websocket-bootstrap. * It's still up to the user to release the websocket itself. */ s_system_vtable->aws_http_connection_release(http_connection); s_ws_bootstrap_destroy(ws_bootstrap); } /* Invoked repeatedly as handshake response headers arrive */ static int s_ws_bootstrap_on_handshake_response_headers( struct aws_http_stream *stream, enum aws_http_header_block header_block, const struct aws_http_header *header_array, size_t num_headers, void *user_data) { (void)stream; (void)header_block; struct aws_websocket_client_bootstrap *ws_bootstrap = user_data; /* Deep-copy headers into ws_bootstrap */ aws_http_headers_add_array(ws_bootstrap->response_headers, header_array, num_headers); /* Don't report a partially-received response */ ws_bootstrap->got_full_response_headers = false; return AWS_OP_SUCCESS; } static int s_ws_bootstrap_validate_header( struct aws_websocket_client_bootstrap *ws_bootstrap, const char *name, struct aws_byte_cursor expected_value, bool case_sensitive) { struct aws_byte_cursor actual_value; if (aws_http_headers_get(ws_bootstrap->response_headers, aws_byte_cursor_from_c_str(name), &actual_value)) { AWS_LOGF_ERROR( AWS_LS_HTTP_WEBSOCKET_SETUP, "id=%p: Response lacks required '%s' header", (void *)ws_bootstrap, name); return aws_raise_error(AWS_ERROR_HTTP_WEBSOCKET_UPGRADE_FAILURE); } bool matches = case_sensitive ? aws_byte_cursor_eq(&expected_value, &actual_value) : aws_byte_cursor_eq_ignore_case(&expected_value, &actual_value); if (!matches) { AWS_LOGF_ERROR( AWS_LS_HTTP_WEBSOCKET_SETUP, "id=%p: Response '%s' header has wrong value. Expected '" PRInSTR "'. Received '" PRInSTR "'", (void *)ws_bootstrap, name, AWS_BYTE_CURSOR_PRI(expected_value), AWS_BYTE_CURSOR_PRI(actual_value)); return aws_raise_error(AWS_ERROR_HTTP_WEBSOCKET_UPGRADE_FAILURE); } return AWS_OP_SUCCESS; } static int s_ws_bootstrap_validate_sec_websocket_protocol(const struct aws_websocket_client_bootstrap *ws_bootstrap) { /* First handle the easy case: * If client requested no protocols, then the response should not pick any */ if (ws_bootstrap->expected_sec_websocket_protocols == NULL) { if (aws_http_headers_has( ws_bootstrap->response_headers, aws_byte_cursor_from_c_str("Sec-WebSocket-Protocol"))) { AWS_LOGF_ERROR( AWS_LS_HTTP_WEBSOCKET_SETUP, "id=%p: Response has 'Sec-WebSocket-Protocol' header, no protocol was requested", (void *)ws_bootstrap); return aws_raise_error(AWS_ERROR_HTTP_WEBSOCKET_UPGRADE_FAILURE); } else { return AWS_OP_SUCCESS; } } /* Check that server has picked one of the protocols listed in the request */ struct aws_byte_cursor response_protocol; if (aws_http_headers_get( ws_bootstrap->response_headers, aws_byte_cursor_from_c_str("Sec-WebSocket-Protocol"), &response_protocol)) { AWS_LOGF_ERROR( AWS_LS_HTTP_WEBSOCKET_SETUP, "id=%p: Response lacks required 'Sec-WebSocket-Protocol' header", (void *)ws_bootstrap); return aws_raise_error(AWS_ERROR_HTTP_WEBSOCKET_UPGRADE_FAILURE); } struct aws_byte_cursor request_protocols = aws_byte_cursor_from_string(ws_bootstrap->expected_sec_websocket_protocols); struct aws_byte_cursor request_protocol_i; AWS_ZERO_STRUCT(request_protocol_i); while (aws_byte_cursor_next_split(&request_protocols, ',', &request_protocol_i)) { struct aws_byte_cursor request_protocol = aws_strutil_trim_http_whitespace(request_protocol_i); if (aws_byte_cursor_eq(&response_protocol, &request_protocol)) { /* Success! */ AWS_LOGF_DEBUG( AWS_LS_HTTP_WEBSOCKET_SETUP, "id=%p: Server selected Sec-WebSocket-Protocol: " PRInSTR, (void *)ws_bootstrap, AWS_BYTE_CURSOR_PRI(response_protocol)); return AWS_OP_SUCCESS; } } AWS_LOGF_ERROR( AWS_LS_HTTP_WEBSOCKET_SETUP, "id=%p: Response 'Sec-WebSocket-Protocol' header has wrong value. Received '" PRInSTR "'. Expected one of '" PRInSTR "'", (void *)ws_bootstrap, AWS_BYTE_CURSOR_PRI(response_protocol), AWS_BYTE_CURSOR_PRI(request_protocols)); return aws_raise_error(AWS_ERROR_HTTP_WEBSOCKET_UPGRADE_FAILURE); } /* OK, we've got all the headers for the 101 Switching Protocols response. * Validate the handshake response, install the websocket handler into the channel, * and invoke the on_connection_setup callback. */ static int s_ws_bootstrap_validate_response_and_install_websocket_handler( struct aws_websocket_client_bootstrap *ws_bootstrap, struct aws_http_connection *http_connection) { /* RFC-6455 Section 4.1 - The client MUST validate the server's response as follows... */ /* (we already checked step 1, that status code is 101) */ AWS_FATAL_ASSERT(ws_bootstrap->response_status == AWS_HTTP_STATUS_CODE_101_SWITCHING_PROTOCOLS); /* 2. If the response lacks an |Upgrade| header field or the |Upgrade| * header field contains a value that is not an ASCII case- * insensitive match for the value "websocket", the client MUST * _Fail the WebSocket Connection_. */ if (s_ws_bootstrap_validate_header( ws_bootstrap, "Upgrade", aws_byte_cursor_from_c_str("websocket"), false /*case_sensitive*/)) { goto error; } /* 3. If the response lacks a |Connection| header field or the * |Connection| header field doesn't contain a token that is an * ASCII case-insensitive match for the value "Upgrade", the client * MUST _Fail the WebSocket Connection_. */ if (s_ws_bootstrap_validate_header( ws_bootstrap, "Connection", aws_byte_cursor_from_c_str("Upgrade"), false /*case_sensitive*/)) { goto error; } /* 4. If the response lacks a |Sec-WebSocket-Accept| header field or * the |Sec-WebSocket-Accept| contains a value other than the * base64-encoded SHA-1 of the concatenation of the |Sec-WebSocket- * Key| (as a string, not base64-decoded) with the string "258EAFA5- * E914-47DA-95CA-C5AB0DC85B11" but ignoring any leading and * trailing whitespace, the client MUST _Fail the WebSocket * Connection_. */ if (s_ws_bootstrap_validate_header( ws_bootstrap, "Sec-WebSocket-Accept", aws_byte_cursor_from_buf(&ws_bootstrap->expected_sec_websocket_accept), true /*case_sensitive*/)) { goto error; } /* (step 5 is about validating Sec-WebSocket-Extensions, but we don't support extensions) */ if (aws_http_headers_has(ws_bootstrap->response_headers, aws_byte_cursor_from_c_str("Sec-WebSocket-Extensions"))) { AWS_LOGF_ERROR( AWS_LS_HTTP_WEBSOCKET_SETUP, "id=%p: Response has 'Sec-WebSocket-Extensions' header, but client does not support extensions.", (void *)ws_bootstrap); aws_raise_error(AWS_ERROR_HTTP_WEBSOCKET_UPGRADE_FAILURE); goto error; } /* 6. If the response includes a |Sec-WebSocket-Protocol| header field * and this header field indicates the use of a subprotocol that was * not present in the client's handshake (the server has indicated a * subprotocol not requested by the client), the client MUST _Fail * the WebSocket Connection_. */ if (s_ws_bootstrap_validate_sec_websocket_protocol(ws_bootstrap)) { goto error; } /* Insert websocket handler into channel */ struct aws_channel *channel = s_system_vtable->aws_http_connection_get_channel(http_connection); AWS_ASSERT(channel); struct aws_websocket_handler_options ws_options = { .allocator = ws_bootstrap->alloc, .channel = channel, .initial_window_size = ws_bootstrap->initial_window_size, .user_data = ws_bootstrap->user_data, .on_incoming_frame_begin = ws_bootstrap->websocket_frame_begin_callback, .on_incoming_frame_payload = ws_bootstrap->websocket_frame_payload_callback, .on_incoming_frame_complete = ws_bootstrap->websocket_frame_complete_callback, .is_server = false, .manual_window_update = ws_bootstrap->manual_window_update, }; ws_bootstrap->websocket = s_system_vtable->aws_websocket_handler_new(&ws_options); if (!ws_bootstrap->websocket) { AWS_LOGF_ERROR( AWS_LS_HTTP_WEBSOCKET_SETUP, "id=%p: Failed to create websocket handler, error %d (%s)", (void *)ws_bootstrap, aws_last_error(), aws_error_name(aws_last_error())); goto error; } /* Success! Setup complete! */ AWS_LOGF_TRACE(/* Log for tracing setup id to websocket id. */ AWS_LS_HTTP_WEBSOCKET_SETUP, "id=%p: Setup success, created websocket=%p", (void *)ws_bootstrap, (void *)ws_bootstrap->websocket); AWS_LOGF_DEBUG(/* Debug log about creation of websocket. */ AWS_LS_HTTP_WEBSOCKET, "id=%p: Websocket client connection established.", (void *)ws_bootstrap->websocket); s_ws_bootstrap_invoke_setup_callback(ws_bootstrap, 0 /*error_code*/); return AWS_OP_SUCCESS; error: s_ws_bootstrap_cancel_setup_due_to_err(ws_bootstrap, http_connection, aws_last_error()); /* Returning error stops HTTP from processing any further data */ return AWS_OP_ERR; } /** * Invoked each time we reach the end of a block of response headers. * If we got a valid 101 Switching Protocols response, we insert the websocket handler. * Note: * In HTTP, 1xx responses are "interim" responses. So a 101 Switching Protocols * response does not "complete" the stream. Once the connection has switched * protocols, the stream does not end until the whole connection is closed. */ static int s_ws_bootstrap_on_handshake_response_header_block_done( struct aws_http_stream *stream, enum aws_http_header_block header_block, void *user_data) { struct aws_websocket_client_bootstrap *ws_bootstrap = user_data; struct aws_http_connection *http_connection = s_system_vtable->aws_http_stream_get_connection(stream); AWS_ASSERT(http_connection); /* Get status code from stream */ s_system_vtable->aws_http_stream_get_incoming_response_status(stream, &ws_bootstrap->response_status); ws_bootstrap->got_full_response_headers = true; if (header_block == AWS_HTTP_HEADER_BLOCK_INFORMATIONAL) { if (ws_bootstrap->response_status == AWS_HTTP_STATUS_CODE_101_SWITCHING_PROTOCOLS) { /* OK, got 101 response, proceed with upgrade! */ return s_ws_bootstrap_validate_response_and_install_websocket_handler(ws_bootstrap, http_connection); } else { /* It would be weird to get any other kind of 1xx response, but anything is possible. * Another response should come eventually. Just ignore the headers from this one... */ AWS_LOGF_DEBUG( AWS_LS_HTTP_WEBSOCKET_SETUP, "id=%p: Server sent interim response with status code %d", (void *)ws_bootstrap, ws_bootstrap->response_status); aws_http_headers_clear(ws_bootstrap->response_headers); ws_bootstrap->got_full_response_headers = false; return AWS_OP_SUCCESS; } } /* Otherwise, we got normal headers (from a non-1xx response), or trailing headers. * This can only happen if the handshake did not succeed. Keep the connection going. * We'll report failed setup to the user after we've received the complete response */ ws_bootstrap->setup_error_code = AWS_ERROR_HTTP_WEBSOCKET_UPGRADE_FAILURE; return AWS_OP_SUCCESS; } /** * Invoked as we receive the body of a failed response. * This is never invoked if the handshake succeeds. */ static int s_ws_bootstrap_on_handshake_response_body( struct aws_http_stream *stream, const struct aws_byte_cursor *data, void *user_data) { struct aws_websocket_client_bootstrap *ws_bootstrap = user_data; aws_byte_buf_append_dynamic(&ws_bootstrap->response_body, data); /* If we're managing the read window... * bump the HTTP window back to its starting size, so that we keep receiving the whole response. */ if (ws_bootstrap->manual_window_update) { s_system_vtable->aws_http_stream_update_window(stream, data->len); } return AWS_OP_SUCCESS; } /** * Invoked when the stream completes. * * If the handshake succeeded and the websocket was installed, * then this is invoked at the end of the websocket connection. * * If the handshake response was not 101, then this is invoked * after we've received the whole response. * * Or this is invoked because the connection failed unexpectedly before the handshake could complete, * (or we killed the connection because the 101 response didn't pass validation). */ static void s_ws_bootstrap_on_stream_complete(struct aws_http_stream *stream, int error_code, void *user_data) { struct aws_websocket_client_bootstrap *ws_bootstrap = user_data; struct aws_http_connection *http_connection = s_system_vtable->aws_http_stream_get_connection(stream); /* Only report the body if we received a complete response */ if (error_code == 0) { ws_bootstrap->got_full_response_body = true; } /* Make sure the connection closes. * We'll deal with finishing setup or shutdown from the http-shutdown callback */ s_system_vtable->aws_http_connection_close(http_connection); /* Done with stream, let it be cleaned up */ s_system_vtable->aws_http_stream_release(stream); } aws-crt-python-0.20.4+dfsg/crt/aws-c-http/source/websocket_decoder.c000066400000000000000000000356611456575232400253410ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include typedef int(state_fn)(struct aws_websocket_decoder *decoder, struct aws_byte_cursor *data); /* STATE_INIT: Resets things, consumes no data */ static int s_state_init(struct aws_websocket_decoder *decoder, struct aws_byte_cursor *data) { (void)data; AWS_ZERO_STRUCT(decoder->current_frame); decoder->state = AWS_WEBSOCKET_DECODER_STATE_OPCODE_BYTE; return AWS_OP_SUCCESS; } /* STATE_OPCODE_BYTE: Decode first byte of frame, which has all kinds of goodies in it. */ static int s_state_opcode_byte(struct aws_websocket_decoder *decoder, struct aws_byte_cursor *data) { if (data->len == 0) { return AWS_OP_SUCCESS; } uint8_t byte = data->ptr[0]; aws_byte_cursor_advance(data, 1); /* first 4 bits are all bools */ decoder->current_frame.fin = byte & 0x80; decoder->current_frame.rsv[0] = byte & 0x40; decoder->current_frame.rsv[1] = byte & 0x20; decoder->current_frame.rsv[2] = byte & 0x10; /* next 4 bits are opcode */ decoder->current_frame.opcode = byte & 0x0F; /* RFC-6455 Section 5.2 - Opcode * If an unknown opcode is received, the receiving endpoint MUST _Fail the WebSocket Connection_. */ switch (decoder->current_frame.opcode) { case AWS_WEBSOCKET_OPCODE_CONTINUATION: case AWS_WEBSOCKET_OPCODE_TEXT: case AWS_WEBSOCKET_OPCODE_BINARY: case AWS_WEBSOCKET_OPCODE_CLOSE: case AWS_WEBSOCKET_OPCODE_PING: case AWS_WEBSOCKET_OPCODE_PONG: break; default: AWS_LOGF_ERROR( AWS_LS_HTTP_WEBSOCKET, "id=%p: Received frame with unknown opcode 0x%" PRIx8, (void *)decoder->user_data, decoder->current_frame.opcode); return aws_raise_error(AWS_ERROR_HTTP_WEBSOCKET_PROTOCOL_ERROR); } /* RFC-6455 Section 5.2 Fragmentation * * Data frames with the FIN bit clear are considered fragmented and must be followed by * 1+ CONTINUATION frames, where only the final CONTINUATION frame's FIN bit is set. * * Control frames may be injected in the middle of a fragmented message, * but control frames may not be fragmented themselves. */ if (aws_websocket_is_data_frame(decoder->current_frame.opcode)) { bool is_continuation_frame = AWS_WEBSOCKET_OPCODE_CONTINUATION == decoder->current_frame.opcode; if (decoder->expecting_continuation_data_frame != is_continuation_frame) { AWS_LOGF_ERROR( AWS_LS_HTTP_WEBSOCKET, "id=%p: Fragmentation error. Received start of new message before end of previous message", (void *)decoder->user_data); return aws_raise_error(AWS_ERROR_HTTP_WEBSOCKET_PROTOCOL_ERROR); } decoder->expecting_continuation_data_frame = !decoder->current_frame.fin; } else { /* Control frames themselves MUST NOT be fragmented. */ if (!decoder->current_frame.fin) { AWS_LOGF_ERROR( AWS_LS_HTTP_WEBSOCKET, "id=%p: Received fragmented control frame. This is illegal", (void *)decoder->user_data); return aws_raise_error(AWS_ERROR_HTTP_WEBSOCKET_PROTOCOL_ERROR); } } if (decoder->current_frame.opcode == AWS_WEBSOCKET_OPCODE_TEXT) { decoder->processing_text_message = true; } decoder->state = AWS_WEBSOCKET_DECODER_STATE_LENGTH_BYTE; return AWS_OP_SUCCESS; } /* STATE_LENGTH_BYTE: Decode byte containing length, determine if we need to decode extended length. */ static int s_state_length_byte(struct aws_websocket_decoder *decoder, struct aws_byte_cursor *data) { if (data->len == 0) { return AWS_OP_SUCCESS; } uint8_t byte = data->ptr[0]; aws_byte_cursor_advance(data, 1); /* first bit is a bool */ decoder->current_frame.masked = byte & 0x80; /* remaining 7 bits are payload length */ decoder->current_frame.payload_length = byte & 0x7F; if (decoder->current_frame.payload_length >= AWS_WEBSOCKET_7BIT_VALUE_FOR_2BYTE_EXTENDED_LENGTH) { /* If 7bit payload length has a high value, then the next few bytes contain the real payload length */ decoder->state_bytes_processed = 0; decoder->state = AWS_WEBSOCKET_DECODER_STATE_EXTENDED_LENGTH; } else { /* If 7bit payload length has low value, that's the actual payload size, jump past EXTENDED_LENGTH state */ decoder->state = AWS_WEBSOCKET_DECODER_STATE_MASKING_KEY_CHECK; } return AWS_OP_SUCCESS; } /* STATE_EXTENDED_LENGTH: Decode extended length (state skipped if no extended length). */ static int s_state_extended_length(struct aws_websocket_decoder *decoder, struct aws_byte_cursor *data) { if (data->len == 0) { return AWS_OP_SUCCESS; } /* The 7bit payload value loaded during the previous state indicated that * actual payload length is encoded across the next 2 or 8 bytes. */ uint8_t total_bytes_extended_length; uint64_t min_acceptable_value; uint64_t max_acceptable_value; if (decoder->current_frame.payload_length == AWS_WEBSOCKET_7BIT_VALUE_FOR_2BYTE_EXTENDED_LENGTH) { total_bytes_extended_length = 2; min_acceptable_value = AWS_WEBSOCKET_2BYTE_EXTENDED_LENGTH_MIN_VALUE; max_acceptable_value = AWS_WEBSOCKET_2BYTE_EXTENDED_LENGTH_MAX_VALUE; } else { AWS_ASSERT(decoder->current_frame.payload_length == AWS_WEBSOCKET_7BIT_VALUE_FOR_8BYTE_EXTENDED_LENGTH); total_bytes_extended_length = 8; min_acceptable_value = AWS_WEBSOCKET_8BYTE_EXTENDED_LENGTH_MIN_VALUE; max_acceptable_value = AWS_WEBSOCKET_8BYTE_EXTENDED_LENGTH_MAX_VALUE; } /* Copy bytes of extended-length to state_cache, we'll process them later.*/ AWS_ASSERT(total_bytes_extended_length > decoder->state_bytes_processed); size_t remaining_bytes = (size_t)(total_bytes_extended_length - decoder->state_bytes_processed); size_t bytes_to_consume = remaining_bytes <= data->len ? remaining_bytes : data->len; AWS_ASSERT(bytes_to_consume + decoder->state_bytes_processed <= sizeof(decoder->state_cache)); memcpy(decoder->state_cache + decoder->state_bytes_processed, data->ptr, bytes_to_consume); aws_byte_cursor_advance(data, bytes_to_consume); decoder->state_bytes_processed += bytes_to_consume; /* Return, still waiting on more bytes */ if (decoder->state_bytes_processed < total_bytes_extended_length) { return AWS_OP_SUCCESS; } /* All bytes have been copied into state_cache, now read them together as one number, * transforming from network byte order (big endian) to native endianness. */ struct aws_byte_cursor cache_cursor = aws_byte_cursor_from_array(decoder->state_cache, total_bytes_extended_length); if (total_bytes_extended_length == 2) { uint16_t val; aws_byte_cursor_read_be16(&cache_cursor, &val); decoder->current_frame.payload_length = val; } else { aws_byte_cursor_read_be64(&cache_cursor, &decoder->current_frame.payload_length); } if (decoder->current_frame.payload_length < min_acceptable_value || decoder->current_frame.payload_length > max_acceptable_value) { AWS_LOGF_ERROR(AWS_LS_HTTP_WEBSOCKET, "id=%p: Failed to decode payload length", (void *)decoder->user_data); return aws_raise_error(AWS_ERROR_HTTP_WEBSOCKET_PROTOCOL_ERROR); } decoder->state = AWS_WEBSOCKET_DECODER_STATE_MASKING_KEY_CHECK; return AWS_OP_SUCCESS; } /* MASKING_KEY_CHECK: Determine if we need to decode masking-key. Consumes no data. */ static int s_state_masking_key_check(struct aws_websocket_decoder *decoder, struct aws_byte_cursor *data) { (void)data; /* If mask bit was set, move to next state to process 4 bytes of masking key. * Otherwise skip next step, there is no masking key. */ if (decoder->current_frame.masked) { decoder->state = AWS_WEBSOCKET_DECODER_STATE_MASKING_KEY; decoder->state_bytes_processed = 0; } else { decoder->state = AWS_WEBSOCKET_DECODER_STATE_PAYLOAD_CHECK; } return AWS_OP_SUCCESS; } /* MASKING_KEY: Decode masking-key (state skipped if no masking key). */ static int s_state_masking_key(struct aws_websocket_decoder *decoder, struct aws_byte_cursor *data) { if (data->len == 0) { return AWS_OP_SUCCESS; } AWS_ASSERT(4 > decoder->state_bytes_processed); size_t bytes_remaining = 4 - (size_t)decoder->state_bytes_processed; size_t bytes_to_consume = bytes_remaining < data->len ? bytes_remaining : data->len; memcpy(decoder->current_frame.masking_key + decoder->state_bytes_processed, data->ptr, bytes_to_consume); aws_byte_cursor_advance(data, bytes_to_consume); decoder->state_bytes_processed += bytes_to_consume; /* If all bytes consumed, proceed to next state */ if (decoder->state_bytes_processed == 4) { decoder->state = AWS_WEBSOCKET_DECODER_STATE_PAYLOAD_CHECK; } return AWS_OP_SUCCESS; } /* PAYLOAD_CHECK: Determine if we need to decode a payload. Consumes no data. */ static int s_state_payload_check(struct aws_websocket_decoder *decoder, struct aws_byte_cursor *data) { (void)data; /* Invoke on_frame() callback to inform user of non-payload data. */ int err = decoder->on_frame(&decoder->current_frame, decoder->user_data); if (err) { return AWS_OP_ERR; } /* Choose next state: either we have payload to process or we don't. */ if (decoder->current_frame.payload_length > 0) { decoder->state_bytes_processed = 0; decoder->state = AWS_WEBSOCKET_DECODER_STATE_PAYLOAD; } else { decoder->state = AWS_WEBSOCKET_DECODER_STATE_FRAME_END; } return AWS_OP_SUCCESS; } /* PAYLOAD: Decode payload until we're done (state skipped if no payload). */ static int s_state_payload(struct aws_websocket_decoder *decoder, struct aws_byte_cursor *data) { if (data->len == 0) { return AWS_OP_SUCCESS; } AWS_ASSERT(decoder->current_frame.payload_length > decoder->state_bytes_processed); uint64_t bytes_remaining = decoder->current_frame.payload_length - decoder->state_bytes_processed; size_t bytes_to_consume = bytes_remaining < data->len ? (size_t)bytes_remaining : data->len; struct aws_byte_cursor payload = aws_byte_cursor_advance(data, bytes_to_consume); /* Unmask data, if necessary. * RFC-6455 Section 5.3 Client-to-Server Masking * Each byte of payload is XOR against a byte of the masking-key */ if (decoder->current_frame.masked) { uint64_t mask_index = decoder->state_bytes_processed; /* Optimization idea: don't do this 1 byte at a time */ uint8_t *current_byte = payload.ptr; uint8_t *end_byte = payload.ptr + payload.len; while (current_byte != end_byte) { *current_byte++ ^= decoder->current_frame.masking_key[mask_index++ % 4]; } } /* TODO: validate payload of CLOSE frame */ /* Validate the UTF-8 for TEXT messages (a TEXT frame and any subsequent CONTINUATION frames) */ if (decoder->processing_text_message && aws_websocket_is_data_frame(decoder->current_frame.opcode)) { if (aws_utf8_decoder_update(decoder->text_message_validator, payload)) { AWS_LOGF_ERROR(AWS_LS_HTTP_WEBSOCKET, "id=%p: Received invalid UTF-8", (void *)decoder->user_data); return aws_raise_error(AWS_ERROR_HTTP_WEBSOCKET_PROTOCOL_ERROR); } } /* Invoke on_payload() callback to inform user of payload data */ int err = decoder->on_payload(payload, decoder->user_data); if (err) { return AWS_OP_ERR; } decoder->state_bytes_processed += payload.len; AWS_ASSERT(decoder->state_bytes_processed <= decoder->current_frame.payload_length); /* If all data consumed, proceed to next state. */ if (decoder->state_bytes_processed == decoder->current_frame.payload_length) { decoder->state = AWS_WEBSOCKET_DECODER_STATE_FRAME_END; } return AWS_OP_SUCCESS; } /* FRAME_END: Perform checks once we reach the end of the frame. */ static int s_state_frame_end(struct aws_websocket_decoder *decoder, struct aws_byte_cursor *data) { (void)data; /* If we're done processing a text message (a TEXT frame and any subsequent CONTINUATION frames), * complete the UTF-8 validation */ if (decoder->processing_text_message && aws_websocket_is_data_frame(decoder->current_frame.opcode) && decoder->current_frame.fin) { if (aws_utf8_decoder_finalize(decoder->text_message_validator)) { AWS_LOGF_ERROR( AWS_LS_HTTP_WEBSOCKET, "id=%p: Received invalid UTF-8 (incomplete encoding)", (void *)decoder->user_data); return aws_raise_error(AWS_ERROR_HTTP_WEBSOCKET_PROTOCOL_ERROR); } decoder->processing_text_message = false; } /* Done! */ decoder->state = AWS_WEBSOCKET_DECODER_STATE_DONE; return AWS_OP_SUCCESS; } static state_fn *s_state_functions[AWS_WEBSOCKET_DECODER_STATE_DONE] = { s_state_init, s_state_opcode_byte, s_state_length_byte, s_state_extended_length, s_state_masking_key_check, s_state_masking_key, s_state_payload_check, s_state_payload, s_state_frame_end, }; int aws_websocket_decoder_process( struct aws_websocket_decoder *decoder, struct aws_byte_cursor *data, bool *frame_complete) { /* Run state machine until frame is completely decoded, or the state stops changing. * Note that we don't stop looping when data->len reaches zero, because some states consume no data. */ while (decoder->state != AWS_WEBSOCKET_DECODER_STATE_DONE) { enum aws_websocket_decoder_state prev_state = decoder->state; int err = s_state_functions[decoder->state](decoder, data); if (err) { return AWS_OP_ERR; } if (decoder->state == prev_state) { AWS_ASSERT(data->len == 0); /* If no more work to do, all possible data should have been consumed */ break; } } if (decoder->state == AWS_WEBSOCKET_DECODER_STATE_DONE) { decoder->state = AWS_WEBSOCKET_DECODER_STATE_INIT; *frame_complete = true; return AWS_OP_SUCCESS; } *frame_complete = false; return AWS_OP_SUCCESS; } void aws_websocket_decoder_init( struct aws_websocket_decoder *decoder, struct aws_allocator *alloc, aws_websocket_decoder_frame_fn *on_frame, aws_websocket_decoder_payload_fn *on_payload, void *user_data) { AWS_ZERO_STRUCT(*decoder); decoder->user_data = user_data; decoder->on_frame = on_frame; decoder->on_payload = on_payload; decoder->text_message_validator = aws_utf8_decoder_new(alloc, NULL /*options*/); } void aws_websocket_decoder_clean_up(struct aws_websocket_decoder *decoder) { aws_utf8_decoder_destroy(decoder->text_message_validator); AWS_ZERO_STRUCT(*decoder); } aws-crt-python-0.20.4+dfsg/crt/aws-c-http/source/websocket_encoder.c000066400000000000000000000342431456575232400253460ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include typedef int(state_fn)(struct aws_websocket_encoder *encoder, struct aws_byte_buf *out_buf); /* STATE_INIT: Outputs no data */ static int s_state_init(struct aws_websocket_encoder *encoder, struct aws_byte_buf *out_buf) { (void)out_buf; if (!encoder->is_frame_in_progress) { return aws_raise_error(AWS_ERROR_INVALID_STATE); } encoder->state = AWS_WEBSOCKET_ENCODER_STATE_OPCODE_BYTE; return AWS_OP_SUCCESS; } /* STATE_OPCODE_BYTE: Outputs 1st byte of frame, which is packed with goodies. */ static int s_state_opcode_byte(struct aws_websocket_encoder *encoder, struct aws_byte_buf *out_buf) { AWS_ASSERT((encoder->frame.opcode & 0xF0) == 0); /* Should be impossible, the opcode was checked in start_frame() */ /* Right 4 bits are opcode, left 4 bits are fin|rsv1|rsv2|rsv3 */ uint8_t byte = encoder->frame.opcode; byte |= (encoder->frame.fin << 7); byte |= (encoder->frame.rsv[0] << 6); byte |= (encoder->frame.rsv[1] << 5); byte |= (encoder->frame.rsv[2] << 4); /* If buffer has room to write, proceed to next state */ if (aws_byte_buf_write_u8(out_buf, byte)) { encoder->state = AWS_WEBSOCKET_ENCODER_STATE_LENGTH_BYTE; } return AWS_OP_SUCCESS; } /* STATE_LENGTH_BYTE: Output 2nd byte of frame, which indicates payload length */ static int s_state_length_byte(struct aws_websocket_encoder *encoder, struct aws_byte_buf *out_buf) { /* First bit is masking bool */ uint8_t byte = (uint8_t)(encoder->frame.masked << 7); /* Next 7bits are length, if length is small. * Otherwise next 7bits are a magic number indicating how many bytes will be required to encode actual length */ bool extended_length_required; if (encoder->frame.payload_length < AWS_WEBSOCKET_2BYTE_EXTENDED_LENGTH_MIN_VALUE) { byte |= (uint8_t)encoder->frame.payload_length; extended_length_required = false; } else if (encoder->frame.payload_length <= AWS_WEBSOCKET_2BYTE_EXTENDED_LENGTH_MAX_VALUE) { byte |= AWS_WEBSOCKET_7BIT_VALUE_FOR_2BYTE_EXTENDED_LENGTH; extended_length_required = true; } else { AWS_ASSERT(encoder->frame.payload_length <= AWS_WEBSOCKET_8BYTE_EXTENDED_LENGTH_MAX_VALUE); byte |= AWS_WEBSOCKET_7BIT_VALUE_FOR_8BYTE_EXTENDED_LENGTH; extended_length_required = true; } /* If buffer has room to write, proceed to next appropriate state */ if (aws_byte_buf_write_u8(out_buf, byte)) { if (extended_length_required) { encoder->state = AWS_WEBSOCKET_ENCODER_STATE_EXTENDED_LENGTH; encoder->state_bytes_processed = 0; } else { encoder->state = AWS_WEBSOCKET_ENCODER_STATE_MASKING_KEY_CHECK; } } return AWS_OP_SUCCESS; } /* STATE_EXTENDED_LENGTH: Output extended length (state skipped if not using extended length). */ static int s_state_extended_length(struct aws_websocket_encoder *encoder, struct aws_byte_buf *out_buf) { /* Fill tmp buffer with extended-length in network byte order */ uint8_t network_bytes_array[8] = {0}; struct aws_byte_buf network_bytes_buf = aws_byte_buf_from_empty_array(network_bytes_array, sizeof(network_bytes_array)); if (encoder->frame.payload_length <= AWS_WEBSOCKET_2BYTE_EXTENDED_LENGTH_MAX_VALUE) { aws_byte_buf_write_be16(&network_bytes_buf, (uint16_t)encoder->frame.payload_length); } else { aws_byte_buf_write_be64(&network_bytes_buf, encoder->frame.payload_length); } /* Use cursor to iterate over tmp buffer */ struct aws_byte_cursor network_bytes_cursor = aws_byte_cursor_from_buf(&network_bytes_buf); /* Advance cursor if some bytes already written */ aws_byte_cursor_advance(&network_bytes_cursor, (size_t)encoder->state_bytes_processed); /* Shorten cursor if it won't all fit in out_buf */ bool all_data_written = true; size_t space_available = out_buf->capacity - out_buf->len; if (network_bytes_cursor.len > space_available) { network_bytes_cursor.len = space_available; all_data_written = false; } aws_byte_buf_write_from_whole_cursor(out_buf, network_bytes_cursor); encoder->state_bytes_processed += network_bytes_cursor.len; /* If all bytes written, advance to next state */ if (all_data_written) { encoder->state = AWS_WEBSOCKET_ENCODER_STATE_MASKING_KEY_CHECK; } return AWS_OP_SUCCESS; } /* MASKING_KEY_CHECK: Outputs no data. Gets things ready for (or decides to skip) the STATE_MASKING_KEY */ static int s_state_masking_key_check(struct aws_websocket_encoder *encoder, struct aws_byte_buf *out_buf) { (void)out_buf; if (encoder->frame.masked) { encoder->state_bytes_processed = 0; encoder->state = AWS_WEBSOCKET_ENCODER_STATE_MASKING_KEY; } else { encoder->state = AWS_WEBSOCKET_ENCODER_STATE_PAYLOAD_CHECK; } return AWS_OP_SUCCESS; } /* MASKING_KEY: Output masking-key (state skipped if no masking key). */ static int s_state_masking_key(struct aws_websocket_encoder *encoder, struct aws_byte_buf *out_buf) { /* Prepare cursor to iterate over masking-key bytes */ struct aws_byte_cursor cursor = aws_byte_cursor_from_array(encoder->frame.masking_key, sizeof(encoder->frame.masking_key)); /* Advance cursor if some bytes already written (moves ptr forward but shortens len so end stays in place) */ aws_byte_cursor_advance(&cursor, (size_t)encoder->state_bytes_processed); /* Shorten cursor if it won't all fit in out_buf */ bool all_data_written = true; size_t space_available = out_buf->capacity - out_buf->len; if (cursor.len > space_available) { cursor.len = space_available; all_data_written = false; } aws_byte_buf_write_from_whole_cursor(out_buf, cursor); encoder->state_bytes_processed += cursor.len; /* If all bytes written, advance to next state */ if (all_data_written) { encoder->state = AWS_WEBSOCKET_ENCODER_STATE_PAYLOAD_CHECK; } return AWS_OP_SUCCESS; } /* MASKING_KEY_CHECK: Outputs no data. Gets things ready for (or decides to skip) STATE_PAYLOAD */ static int s_state_payload_check(struct aws_websocket_encoder *encoder, struct aws_byte_buf *out_buf) { (void)out_buf; if (encoder->frame.payload_length > 0) { encoder->state_bytes_processed = 0; encoder->state = AWS_WEBSOCKET_ENCODER_STATE_PAYLOAD; } else { encoder->state = AWS_WEBSOCKET_ENCODER_STATE_DONE; } return AWS_OP_SUCCESS; } /* PAYLOAD: Output payload until we're done (state skipped if no payload). */ static int s_state_payload(struct aws_websocket_encoder *encoder, struct aws_byte_buf *out_buf) { /* Bail early if out_buf has no space for writing */ if (out_buf->len >= out_buf->capacity) { return AWS_OP_SUCCESS; } const uint64_t prev_bytes_processed = encoder->state_bytes_processed; const struct aws_byte_buf prev_buf = *out_buf; /* Invoke callback which will write to buffer */ int err = encoder->stream_outgoing_payload(out_buf, encoder->user_data); if (err) { return AWS_OP_ERR; } /* Ensure that user did not commit forbidden acts upon the out_buf */ AWS_FATAL_ASSERT( (out_buf->buffer == prev_buf.buffer) && (out_buf->capacity == prev_buf.capacity) && (out_buf->len >= prev_buf.len)); size_t bytes_written = out_buf->len - prev_buf.len; err = aws_add_u64_checked(encoder->state_bytes_processed, bytes_written, &encoder->state_bytes_processed); if (err) { return aws_raise_error(AWS_ERROR_HTTP_OUTGOING_STREAM_LENGTH_INCORRECT); } /* Mask data, if necessary. * RFC-6455 Section 5.3 Client-to-Server Masking * Each byte of payload is XOR against a byte of the masking-key */ if (encoder->frame.masked) { uint64_t mask_index = prev_bytes_processed; /* Optimization idea: don't do this 1 byte at a time */ uint8_t *current_byte = out_buf->buffer + prev_buf.len; uint8_t *end_byte = out_buf->buffer + out_buf->len; while (current_byte != end_byte) { *current_byte++ ^= encoder->frame.masking_key[mask_index++ % 4]; } } /* If done writing payload, proceed to next state */ if (encoder->state_bytes_processed == encoder->frame.payload_length) { encoder->state = AWS_WEBSOCKET_ENCODER_STATE_DONE; } else { /* Some more error-checking... */ if (encoder->state_bytes_processed > encoder->frame.payload_length) { AWS_LOGF_ERROR( AWS_LS_HTTP_WEBSOCKET, "id=%p: Outgoing stream has exceeded stated payload length of %" PRIu64, (void *)encoder->user_data, encoder->frame.payload_length); return aws_raise_error(AWS_ERROR_HTTP_OUTGOING_STREAM_LENGTH_INCORRECT); } } return AWS_OP_SUCCESS; } static state_fn *s_state_functions[AWS_WEBSOCKET_ENCODER_STATE_DONE] = { s_state_init, s_state_opcode_byte, s_state_length_byte, s_state_extended_length, s_state_masking_key_check, s_state_masking_key, s_state_payload_check, s_state_payload, }; int aws_websocket_encoder_process(struct aws_websocket_encoder *encoder, struct aws_byte_buf *out_buf) { /* Run state machine until frame is completely decoded, or the state stops changing. * Note that we don't necessarily stop looping when out_buf is full, because not all states need to output data */ while (encoder->state != AWS_WEBSOCKET_ENCODER_STATE_DONE) { const enum aws_websocket_encoder_state prev_state = encoder->state; int err = s_state_functions[encoder->state](encoder, out_buf); if (err) { return AWS_OP_ERR; } if (prev_state == encoder->state) { /* dev-assert: Check that each state is doing as much work as it possibly can. * Except for the PAYLOAD state, where it's up to the user to fill the buffer. */ AWS_ASSERT((out_buf->len == out_buf->capacity) || (encoder->state == AWS_WEBSOCKET_ENCODER_STATE_PAYLOAD)); break; } } if (encoder->state == AWS_WEBSOCKET_ENCODER_STATE_DONE) { encoder->state = AWS_WEBSOCKET_ENCODER_STATE_INIT; encoder->is_frame_in_progress = false; } return AWS_OP_SUCCESS; } int aws_websocket_encoder_start_frame(struct aws_websocket_encoder *encoder, const struct aws_websocket_frame *frame) { /* Error-check as much as possible before accepting next frame */ if (encoder->is_frame_in_progress) { return aws_raise_error(AWS_ERROR_INVALID_STATE); } /* RFC-6455 Section 5.2 contains all these rules... */ /* Opcode must fit in 4bits */ if (frame->opcode != (frame->opcode & 0x0F)) { AWS_LOGF_ERROR( AWS_LS_HTTP_WEBSOCKET, "id=%p: Outgoing frame has unknown opcode 0x%" PRIx8, (void *)encoder->user_data, frame->opcode); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } /* High bit of 8byte length must be clear */ if (frame->payload_length > AWS_WEBSOCKET_8BYTE_EXTENDED_LENGTH_MAX_VALUE) { AWS_LOGF_ERROR( AWS_LS_HTTP_WEBSOCKET, "id=%p: Outgoing frame's payload length exceeds the max", (void *)encoder->user_data); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } /* Data frames with the FIN bit clear are considered fragmented and must be followed by * 1+ CONTINUATION frames, where only the final CONTINUATION frame's FIN bit is set. * * Control frames may be injected in the middle of a fragmented message, * but control frames may not be fragmented themselves. */ bool keep_expecting_continuation_data_frame = encoder->expecting_continuation_data_frame; if (aws_websocket_is_data_frame(frame->opcode)) { bool is_continuation_frame = (AWS_WEBSOCKET_OPCODE_CONTINUATION == frame->opcode); if (encoder->expecting_continuation_data_frame != is_continuation_frame) { AWS_LOGF_ERROR( AWS_LS_HTTP_WEBSOCKET, "id=%p: Fragmentation error. Outgoing frame starts a new message but previous message has not ended", (void *)encoder->user_data); return aws_raise_error(AWS_ERROR_INVALID_STATE); } keep_expecting_continuation_data_frame = !frame->fin; } else { /* Control frames themselves MUST NOT be fragmented. */ if (!frame->fin) { AWS_LOGF_ERROR( AWS_LS_HTTP_WEBSOCKET, "id=%p: It is illegal to send a fragmented control frame", (void *)encoder->user_data); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } } /* Frame accepted */ encoder->frame = *frame; encoder->is_frame_in_progress = true; encoder->expecting_continuation_data_frame = keep_expecting_continuation_data_frame; return AWS_OP_SUCCESS; } bool aws_websocket_encoder_is_frame_in_progress(const struct aws_websocket_encoder *encoder) { return encoder->is_frame_in_progress; } void aws_websocket_encoder_init( struct aws_websocket_encoder *encoder, aws_websocket_encoder_payload_fn *stream_outgoing_payload, void *user_data) { AWS_ZERO_STRUCT(*encoder); encoder->user_data = user_data; encoder->stream_outgoing_payload = stream_outgoing_payload; } uint64_t aws_websocket_frame_encoded_size(const struct aws_websocket_frame *frame) { /* This is an internal function, so asserts are sufficient error handling */ AWS_ASSERT(frame); AWS_ASSERT(frame->payload_length <= AWS_WEBSOCKET_8BYTE_EXTENDED_LENGTH_MAX_VALUE); /* All frames start with at least 2 bytes */ uint64_t total = 2; /* If masked, add 4 bytes for masking-key */ if (frame->masked) { total += 4; } /* If extended payload length, add 2 or 8 bytes */ if (frame->payload_length >= AWS_WEBSOCKET_8BYTE_EXTENDED_LENGTH_MIN_VALUE) { total += 8; } else if (frame->payload_length >= AWS_WEBSOCKET_2BYTE_EXTENDED_LENGTH_MIN_VALUE) { total += 2; } /* Plus payload itself */ total += frame->payload_length; return total; } aws-crt-python-0.20.4+dfsg/crt/aws-c-http/tests/000077500000000000000000000000001456575232400213515ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-http/tests/CMakeLists.txt000066400000000000000000001121641456575232400241160ustar00rootroot00000000000000include(AwsTestHarness) include(AwsLibFuzzer) enable_testing() file(GLOB TEST_HDRS "*.h") file(GLOB TEST_SRC "*.c") file(GLOB TESTS ${TEST_HDRS} ${TEST_SRC}) # Adds two test cases, where second one ends with "_one_byte_at_a_time" macro(add_one_byte_at_a_time_test_set NAME) add_test_case("${NAME}") add_test_case("${NAME}_one_byte_at_a_time") endmacro() # Each h2_decoder test is run multiple ways. This adds each test case. macro(add_h2_decoder_test_set NAME) add_test_case("${NAME}") add_test_case("${NAME}_one_byte_at_a_time") endmacro() add_test_case(headers_add) add_test_case(headers_add_array) add_test_case(headers_set) add_test_case(headers_erase_index) add_test_case(headers_erase) add_test_case(headers_erase_value) add_test_case(headers_clear) add_test_case(headers_get_all) add_test_case(h2_headers_request_pseudos_get_set) add_test_case(h2_headers_response_pseudos_get_set) add_test_case(message_sanity_check) add_test_case(message_request_method) add_test_case(message_request_path) add_test_case(message_response_status) add_test_case(message_refcounts) add_test_case(message_with_existing_headers) add_test_case(h1_test_get_request) add_test_case(h1_test_request_bad_version) add_test_case(h1_test_response_unsupported_version) add_test_case(h1_test_response_1_0) add_test_case(h1_test_get_status_code) add_test_case(h1_test_overflow_scratch_space) add_test_case(h1_test_receive_request_headers) add_test_case(h1_test_receive_response_headers) add_test_case(h1_test_get_transfer_encoding_flags) add_test_case(h1_test_body_unchunked) add_test_case(h1_test_body_chunked) add_test_case(h1_decode_trailers) add_test_case(h1_decode_one_byte_at_a_time) add_test_case(h1_decode_messages_at_random_intervals) add_test_case(h1_decode_bad_requests_and_assert_failure) add_test_case(h1_decode_bad_responses_and_assert_failure) add_test_case(h1_test_extraneous_buffer_data_ensure_not_processed) add_test_case(h1_test_ignore_chunk_extensions) add_test_case(h1_encoder_content_length_put_request_headers) add_test_case(h1_encoder_transfer_encoding_chunked_put_request_headers) add_test_case(h1_encoder_transfer_encoding_chunked_put_request_multiple_te_headers) add_test_case(h1_encoder_transfer_encoding_chunked_put_request_headers_case_insensitivity) add_test_case(h1_encoder_transfer_encoding_chunked_and_content_length_put_request_headers) add_test_case(h1_encoder_transfer_encoding_not_chunked_put_request_headers) add_test_case(h1_encoder_transfer_encoding_set_body_stream_errors) add_test_case(h1_encoder_transfer_encoding_chunked_multiple_put_request_headers) add_test_case(h1_encoder_transfer_encoding_chunked_not_final_encoding_put_request_headers) add_test_case(h1_encoder_transfer_encoding_not_ending_in_chunked_put_request_headers) add_test_case(h1_encoder_rejects_bad_method) add_test_case(h1_encoder_rejects_missing_method) add_test_case(h1_encoder_rejects_bad_path) add_test_case(h1_encoder_rejects_missing_path) add_test_case(h1_encoder_rejects_bad_header_name) add_test_case(h1_encoder_rejects_bad_header_value) add_test_case(h1_client_sanity_check) add_test_case(h1_client_request_send_1liner) add_test_case(h1_client_request_send_headers) add_test_case(h1_client_request_send_body) add_test_case(h1_client_request_send_body_chunked) add_test_case(h1_client_request_send_chunked_trailer) add_test_case(h1_client_request_forbidden_trailer) add_test_case(h1_client_request_send_empty_chunked_trailer) add_test_case(h1_client_request_send_large_body) add_test_case(h1_client_request_send_large_body_chunked) add_test_case(h1_client_request_send_large_head) add_test_case(h1_client_request_content_length_0_ok) add_test_case(h1_client_request_waits_for_chunks) add_test_case(h1_client_request_send_chunk_from_chunk_complete_callback) add_test_case(h1_client_request_write_chunk_as_write_completes_regression) add_test_case(h1_client_request_send_chunked_extensions) add_test_case(h1_client_request_send_large_chunk_extensions) add_test_case(h1_client_request_send_chunk_size_0_ok) add_test_case(h1_client_request_send_chunk_size_0_with_extensions_ok) add_test_case(h1_client_request_content_length_too_small_is_error) add_test_case(h1_client_request_content_length_too_large_is_error) add_test_case(h1_client_request_chunk_size_too_small_is_error) add_test_case(h1_client_request_chunk_size_too_large_is_error) add_test_case(h1_client_request_chunks_cancelled_by_channel_shutdown) add_test_case(h1_client_request_send_multiple) add_test_case(h1_client_request_send_multiple_chunked_encoding) add_test_case(h1_client_request_close_header_ends_connection) add_test_case(h1_client_request_close_header_with_pipelining) add_test_case(h1_client_request_close_header_with_chunked_encoding_and_pipelining) add_test_case(h1_client_stream_release_after_complete) add_test_case(h1_client_stream_release_before_complete) add_test_case(h1_client_response_get_1liner) add_test_case(h1_client_response_get_headers) add_test_case(h1_client_response_get_body) add_test_case(h1_client_response_get_no_body_for_head_request) add_test_case(h1_client_response_get_no_body_from_304) add_test_case(h1_client_response_get_100) add_test_case(h1_client_response_get_1_from_multiple_io_messages) add_test_case(h1_client_response_get_multiple_from_1_io_message) add_test_case(h1_client_response_with_bad_data_shuts_down_connection) add_test_case(h1_client_response_with_too_much_data_shuts_down_connection) add_test_case(h1_client_response_arrives_before_request_done_sending_is_ok) add_test_case(h1_client_response_arrives_before_request_chunks_done_sending_is_ok) add_test_case(h1_client_response_without_request_shuts_down_connection) add_test_case(h1_client_response_close_header_ends_connection) add_test_case(h1_client_response_close_header_with_pipelining) add_test_case(h1_client_respects_stream_window) add_test_case(h1_client_connection_window_with_buffer) add_test_case(h1_client_connection_window_with_small_buffer) add_test_case(h1_client_request_cancelled_by_channel_shutdown) add_test_case(h1_client_multiple_requests_cancelled_by_channel_shutdown) add_test_case(h1_client_new_request_fails_if_channel_shut_down) add_test_case(h1_client_error_from_outgoing_body_callback_stops_decoder) add_test_case(h1_client_error_from_incoming_headers_callback_stops_decoder) add_test_case(h1_client_error_from_incoming_headers_done_callback_stops_decoder) add_test_case(h1_client_error_from_incoming_body_callback_stops_decoder) add_test_case(h1_client_close_from_off_thread_makes_not_open) add_test_case(h1_client_close_from_on_thread_makes_not_open) add_test_case(h1_client_unactivated_stream_cleans_up) add_test_case(h1_client_new_request_allowed) add_test_case(h1_client_midchannel_sanity_check) add_test_case(h1_client_midchannel_read) add_test_case(h1_client_midchannel_read_immediately) add_test_case(h1_client_midchannel_read_with_small_downstream_window) add_test_case(h1_client_midchannel_write) add_test_case(h1_client_midchannel_write_continues_after_shutdown_in_read_dir) add_test_case(h1_client_midchannel_requires_switching_protocols) add_test_case(h1_client_switching_protocols_fails_pending_requests) add_test_case(h1_client_switching_protocols_fails_subsequent_requests) add_test_case(h1_client_switching_protocols_requires_downstream_handler) add_test_case(h1_client_connection_close_before_request_finishes) add_test_case(h1_client_stream_cancel) add_test_case(h1_client_response_close_connection_before_request_finishes) add_test_case(h1_client_response_first_byte_timeout_connection) add_test_case(h1_client_response_first_byte_timeout_request_override) add_test_case(strutil_trim_http_whitespace) add_test_case(strutil_is_http_token) add_test_case(strutil_is_lowercase_http_token) add_test_case(strutil_is_http_field_value) add_test_case(strutil_is_http_reason_phrase) add_test_case(strutil_is_http_request_target) add_test_case(strutil_is_http_pseudo_header_name) add_net_test_case(tls_download_medium_file_h1) add_net_test_case(tls_download_medium_file_h2) add_test_case(websocket_decoder_sanity_check) add_test_case(websocket_decoder_simplest_frame) add_test_case(websocket_decoder_rsv) add_test_case(websocket_decoder_data_frame) add_test_case(websocket_decoder_stops_at_frame_end) add_test_case(websocket_decoder_masking) add_test_case(websocket_decoder_extended_length_2byte) add_test_case(websocket_decoder_extended_length_8byte) add_test_case(websocket_decoder_1byte_at_a_time) add_test_case(websocket_decoder_fail_on_unknown_opcode) add_test_case(websocket_decoder_fragmented_message) add_test_case(websocket_decoder_fail_on_bad_fragmentation) add_test_case(websocket_decoder_control_frame_cannot_be_fragmented) add_test_case(websocket_decoder_utf8_text) add_test_case(websocket_decoder_fail_on_bad_utf8_text) add_test_case(websocket_decoder_fragmented_utf8_text) add_test_case(websocket_decoder_fail_on_fragmented_bad_utf8_text) add_test_case(websocket_decoder_on_frame_callback_can_fail_decoder) add_test_case(websocket_decoder_on_payload_callback_can_fail_decoder) add_test_case(websocket_encoder_sanity_check) add_test_case(websocket_encoder_simplest_frame) add_test_case(websocket_encoder_rsv) add_test_case(websocket_encoder_data_frame) add_test_case(websocket_encoder_fail_if_payload_exceeds_stated_length) add_test_case(websocket_encoder_masking) add_test_case(websocket_encoder_extended_length) add_test_case(websocket_encoder_1_byte_at_a_time) add_test_case(websocket_encoder_fragmented_message) add_test_case(websocket_encoder_fragmentation_failure_checks) add_test_case(websocket_encoder_payload_callback_can_fail_encoder) add_test_case(websocket_handler_sanity_check) add_test_case(websocket_handler_refcounting) add_test_case(websocket_handler_send_frame) add_test_case(websocket_handler_send_frame_off_thread) add_test_case(websocket_handler_send_multiple_frames) add_test_case(websocket_handler_send_huge_frame) add_test_case(websocket_handler_send_payload_slowly) add_test_case(websocket_handler_send_payload_with_pauses) add_test_case(websocket_handler_sends_nothing_after_close_frame) add_test_case(websocket_handler_send_frames_always_complete) add_test_case(websocket_handler_send_one_io_msg_at_a_time) add_test_case(websocket_handler_delayed_write_completion) add_test_case(websocket_handler_send_halts_if_payload_fn_returns_false) add_test_case(websocket_handler_shutdown_automatically_sends_close_frame) add_test_case(websocket_handler_shutdown_handles_queued_close_frame) # add_test_case(websocket_handler_shutdown_immediately_in_emergency) disabled until channel API exposes immediate shutdown add_test_case(websocket_handler_shutdown_handles_unexpected_write_error) add_test_case(websocket_handler_close_on_thread) add_test_case(websocket_handler_close_off_thread) add_test_case(websocket_handler_read_frame) add_test_case(websocket_handler_read_multiple_frames) add_test_case(websocket_handler_read_frames_split_across_io_messages) add_test_case(websocket_handler_read_frames_complete_on_shutdown) add_test_case(websocket_handler_read_halts_if_begin_fn_returns_false) add_test_case(websocket_handler_read_halts_if_payload_fn_returns_false) add_test_case(websocket_handler_read_halts_if_complete_fn_returns_false) add_test_case(websocket_handler_window_manual_increment) add_test_case(websocket_handler_window_manual_increment_off_thread) add_test_case(websocket_handler_sends_pong_automatically) add_test_case(websocket_handler_wont_send_pong_after_close_frame) add_test_case(websocket_midchannel_sanity_check) add_test_case(websocket_midchannel_write_message) add_test_case(websocket_midchannel_write_multiple_messages) add_test_case(websocket_midchannel_write_huge_message) add_test_case(websocket_midchannel_read_message) add_test_case(websocket_midchannel_read_multiple_messages) add_test_case(websocket_boot_sanity_check) add_test_case(websocket_boot_golden_path) add_test_case(websocket_boot_fail_at_http_connect) add_test_case(websocket_boot_fail_at_http_connect_error) add_test_case(websocket_boot_fail_at_new_request) add_test_case(websocket_boot_fail_at_activate_request) add_test_case(websocket_boot_fail_before_response_headers) add_test_case(websocket_boot_fail_before_response_headers_done) add_test_case(websocket_boot_fail_at_new_handler) add_test_case(websocket_boot_report_unexpected_http_shutdown) add_test_case(websocket_boot_fail_from_handshake_rejection) add_test_case(websocket_boot_fail_before_handshake_rejection_body) add_test_case(websocket_boot_fail_before_handshake_rejection_stream_complete) add_test_case(websocket_boot_fail_from_invalid_upgrade_header) add_test_case(websocket_boot_fail_from_missing_upgrade_header) add_test_case(websocket_boot_fail_from_invalid_connection_header) add_test_case(websocket_boot_fail_from_invalid_sec_websocket_accept_header) add_test_case(websocket_boot_fail_from_unsupported_sec_websocket_extensions_in_request) add_test_case(websocket_boot_fail_from_unsupported_sec_websocket_extensions_in_response) add_test_case(websocket_boot_ok_with_sec_websocket_protocol_header) add_test_case(websocket_boot_ok_with_sec_websocket_protocol_split_across_headers) add_test_case(websocket_boot_fail_from_missing_sec_websocket_protocol_header) add_test_case(websocket_boot_fail_from_invalid_sec_websocket_protocol_header) add_test_case(websocket_handshake_key_max_length) add_test_case(websocket_handshake_key_randomness) add_test_case(hpack_encode_integer) add_one_byte_at_a_time_test_set(hpack_decode_integer_5bits) add_one_byte_at_a_time_test_set(hpack_decode_integer_14bits) add_one_byte_at_a_time_test_set(hpack_decode_integer_8bits) add_one_byte_at_a_time_test_set(hpack_decode_integer_21bits) add_one_byte_at_a_time_test_set(hpack_decode_integer_ongoing) add_one_byte_at_a_time_test_set(hpack_decode_integer_too_big) add_one_byte_at_a_time_test_set(hpack_decode_integer_few_in_a_row) add_test_case(hpack_decode_string_blank) add_one_byte_at_a_time_test_set(hpack_decode_string_uncompressed) add_one_byte_at_a_time_test_set(hpack_decode_string_huffman) add_one_byte_at_a_time_test_set(hpack_decode_string_ongoing) add_one_byte_at_a_time_test_set(hpack_decode_string_short_buffer) add_test_case(hpack_static_table_find) add_test_case(hpack_static_table_get) add_test_case(hpack_dynamic_table_find) add_test_case(hpack_dynamic_table_get) add_test_case(hpack_decode_indexed_from_dynamic_table) add_test_case(hpack_dynamic_table_empty_value) add_test_case(hpack_dynamic_table_with_empty_header) add_test_case(hpack_dynamic_table_size_update_from_setting) if(ENABLE_LOCALHOST_INTEGRATION_TESTS) # Tests should be named with localhost_integ_* add_net_test_case(localhost_integ_hpack_stress) add_net_test_case(localhost_integ_hpack_compression_stress) add_net_test_case(localhost_integ_h2_upload_stress) add_net_test_case(localhost_integ_h2_download_stress) endif() add_test_case(h2_header_empty_payload) add_one_byte_at_a_time_test_set(h2_header_ex_2_1) add_one_byte_at_a_time_test_set(h2_header_ex_2_2) add_one_byte_at_a_time_test_set(h2_header_ex_2_3) add_one_byte_at_a_time_test_set(h2_header_ex_2_4) add_one_byte_at_a_time_test_set(h2_header_ex_3) add_one_byte_at_a_time_test_set(h2_header_ex_4) add_one_byte_at_a_time_test_set(h2_header_ex_5) add_one_byte_at_a_time_test_set(h2_header_ex_6) add_test_case(h2_encoder_data) add_test_case(h2_encoder_data_stalled) add_test_case(h2_encoder_data_stalled_completely) add_test_case(h2_encoder_headers) add_test_case(h2_encoder_priority) add_test_case(h2_encoder_rst_stream) add_test_case(h2_encoder_settings) add_test_case(h2_encoder_settings_ack) add_test_case(h2_encoder_push_promise) add_test_case(h2_encoder_ping) add_test_case(h2_encoder_goaway) add_test_case(h2_encoder_window_update) add_test_case(h2_decoder_sanity_check) add_h2_decoder_test_set(h2_decoder_data) add_h2_decoder_test_set(h2_decoder_data_padded) add_h2_decoder_test_set(h2_decoder_data_pad_length_zero) add_h2_decoder_test_set(h2_decoder_data_empty) add_h2_decoder_test_set(h2_decoder_data_empty_padded) add_h2_decoder_test_set(h2_decoder_data_ignores_unknown_flags) add_h2_decoder_test_set(h2_decoder_data_payload_max_size_update) add_h2_decoder_test_set(h2_decoder_err_data_payload_exceed_max_size) add_h2_decoder_test_set(h2_decoder_err_data_requires_stream_id) add_h2_decoder_test_set(h2_decoder_err_payload_too_small_for_pad_length) add_h2_decoder_test_set(h2_decoder_stream_id_ignores_reserved_bit) add_h2_decoder_test_set(h2_decoder_headers) add_h2_decoder_test_set(h2_decoder_headers_padded) add_h2_decoder_test_set(h2_decoder_headers_priority) add_h2_decoder_test_set(h2_decoder_headers_ignores_unknown_flags) add_h2_decoder_test_set(h2_decoder_headers_response_informational) add_h2_decoder_test_set(h2_decoder_headers_request) add_h2_decoder_test_set(h2_decoder_headers_cookies) add_h2_decoder_test_set(h2_decoder_headers_trailer) add_h2_decoder_test_set(h2_decoder_headers_empty_trailer) add_h2_decoder_test_set(h2_decoder_err_headers_requires_stream_id) add_h2_decoder_test_set(h2_decoder_err_headers_payload_too_small_for_padding) add_h2_decoder_test_set(h2_decoder_err_headers_payload_too_small_for_priority) add_h2_decoder_test_set(h2_decoder_malformed_headers_blank_name) add_h2_decoder_test_set(h2_decoder_malformed_headers_illegal_name) add_h2_decoder_test_set(h2_decoder_malformed_headers_response_to_server) add_h2_decoder_test_set(h2_decoder_malformed_headers_request_to_client) add_h2_decoder_test_set(h2_decoder_malformed_headers_mixed_pseudoheaders) add_h2_decoder_test_set(h2_decoder_malformed_headers_late_pseudoheaders) add_h2_decoder_test_set(h2_decoder_malformed_headers_trailer_must_end_stream) add_h2_decoder_test_set(h2_decoder_malformed_header_continues_hpack_parsing) add_h2_decoder_test_set(h2_decoder_continuation) add_h2_decoder_test_set(h2_decoder_continuation_ignores_unknown_flags) add_h2_decoder_test_set(h2_decoder_continuation_header_field_spans_frames) add_h2_decoder_test_set(h2_decoder_continuation_many_frames) add_h2_decoder_test_set(h2_decoder_continuation_empty_payloads) add_h2_decoder_test_set(h2_decoder_err_continuation_frame_expected) add_h2_decoder_test_set(h2_decoder_err_continuation_frame_same_stream_expected) add_h2_decoder_test_set(h2_decoder_err_partial_header) add_h2_decoder_test_set(h2_decoder_err_bad_hpack_data) add_h2_decoder_test_set(h2_decoder_priority) add_h2_decoder_test_set(h2_decoder_priority_ignores_unknown_flags) add_h2_decoder_test_set(h2_decoder_err_priority_requires_stream_id) add_h2_decoder_test_set(h2_decoder_err_priority_payload_too_small) add_h2_decoder_test_set(h2_decoder_err_priority_payload_too_large) add_h2_decoder_test_set(h2_decoder_rst_stream) add_h2_decoder_test_set(h2_decoder_rst_stream_ignores_unknown_flags) add_h2_decoder_test_set(h2_decoder_err_rst_stream_requires_stream_id) add_h2_decoder_test_set(h2_decoder_err_rst_stream_payload_too_small) add_h2_decoder_test_set(h2_decoder_err_rst_stream_payload_too_large) add_h2_decoder_test_set(h2_decoder_settings) add_h2_decoder_test_set(h2_decoder_settings_empty) add_h2_decoder_test_set(h2_decoder_settings_ack) add_h2_decoder_test_set(h2_decoder_settings_ignores_unknown_ids) add_h2_decoder_test_set(h2_decoder_settings_ignores_unknown_flags) add_h2_decoder_test_set(h2_decoder_err_settings_ack_with_data) add_h2_decoder_test_set(h2_decoder_err_settings_forbids_stream_id) add_h2_decoder_test_set(h2_decoder_err_settings_payload_size) add_h2_decoder_test_set(h2_decoder_err_settings_invalid_values_enable_push) add_h2_decoder_test_set(h2_decoder_err_settings_invalid_values_initial_window_size) add_h2_decoder_test_set(h2_decoder_err_settings_invalid_values_max_frame_size) add_h2_decoder_test_set(h2_decoder_push_promise) add_h2_decoder_test_set(h2_decoder_push_promise_ignores_unknown_flags) add_h2_decoder_test_set(h2_decoder_push_promise_continuation) add_h2_decoder_test_set(h2_decoder_err_push_promise_continuation_expected) add_h2_decoder_test_set(h2_decoder_err_push_promise_requires_stream_id) add_h2_decoder_test_set(h2_decoder_err_push_promise_requires_promised_stream_id) add_h2_decoder_test_set(h2_decoder_err_push_promise_with_enable_push_0) add_h2_decoder_test_set(h2_decoder_malformed_push_promise_must_be_request_1) add_h2_decoder_test_set(h2_decoder_malformed_push_promise_must_be_request_2) add_h2_decoder_test_set(h2_decoder_ping) add_h2_decoder_test_set(h2_decoder_ping_ack) add_h2_decoder_test_set(h2_decoder_err_ping_forbids_stream_id) add_h2_decoder_test_set(h2_decoder_err_ping_payload_too_small) add_h2_decoder_test_set(h2_decoder_err_ping_payload_too_large) add_h2_decoder_test_set(h2_decoder_goaway) add_h2_decoder_test_set(h2_decoder_goaway_empty) add_h2_decoder_test_set(h2_decoder_err_goaway_forbids_stream_id) add_h2_decoder_test_set(h2_decoder_err_goaway_payload_too_small) add_h2_decoder_test_set(h2_decoder_window_update_connection) add_h2_decoder_test_set(h2_decoder_window_update_stream) add_h2_decoder_test_set(h2_decoder_err_window_update_payload_too_small) add_h2_decoder_test_set(h2_decoder_err_window_update_payload_too_large) add_h2_decoder_test_set(h2_decoder_unknown_frame_type_ignored) add_h2_decoder_test_set(h2_decoder_many_frames_in_a_row) add_h2_decoder_test_set(h2_decoder_preface_from_server) add_h2_decoder_test_set(h2_decoder_err_bad_preface_from_server_1) add_h2_decoder_test_set(h2_decoder_err_bad_preface_from_server_2) add_h2_decoder_test_set(h2_decoder_err_bad_preface_from_server_3) add_h2_decoder_test_set(h2_decoder_preface_from_client) add_h2_decoder_test_set(h2_decoder_err_bad_preface_from_client_1) add_h2_decoder_test_set(h2_decoder_err_bad_preface_from_client_2) add_h2_decoder_test_set(h2_decoder_err_bad_preface_from_client_3) add_test_case(h2_client_sanity_check) add_test_case(h2_client_stream_create) add_test_case(h2_client_stream_release_after_complete) add_test_case(h2_client_unactivated_stream_cleans_up) add_test_case(h2_client_connection_preface_sent) add_test_case(h2_client_auto_ping_ack) add_test_case(h2_client_auto_ping_ack_higher_priority) # TODO add_test_case(h2_client_auto_ping_ack_higher_priority_not_break_encoding_frame) add_test_case(h2_client_auto_settings_ack) add_test_case(h2_client_stream_complete) add_test_case(h2_client_close) add_test_case(h2_client_connection_init_settings_applied_after_ack_by_peer) add_test_case(h2_client_stream_with_h1_request_message) add_test_case(h2_client_stream_with_cookies_headers) add_test_case(h2_client_stream_err_malformed_header) add_test_case(h2_client_stream_err_state_forbids_frame) add_test_case(h2_client_conn_err_stream_frames_received_for_idle_stream) add_test_case(h2_client_stream_ignores_some_frames_received_soon_after_closing) add_test_case(h2_client_conn_err_stream_frames_received_soon_after_closing) add_test_case(h2_client_stream_err_stream_frames_received_soon_after_rst_stream_received) add_test_case(h2_client_conn_err_stream_frames_received_after_removed_from_cache) add_test_case(h2_client_stream_receive_info_headers) add_test_case(h2_client_stream_err_receive_info_headers_after_main) add_test_case(h2_client_stream_receive_trailing_headers) add_test_case(h2_client_stream_err_receive_trailing_before_main) add_test_case(h2_client_stream_receive_data) add_test_case(h2_client_stream_err_receive_data_before_headers) add_test_case(h2_client_stream_err_receive_data_not_match_content_length) add_test_case(h2_client_stream_send_data) add_test_case(h2_client_stream_send_lots_of_data) add_test_case(h2_client_stream_send_stalled_data) add_test_case(h2_client_stream_send_data_controlled_by_stream_window_size) add_test_case(h2_client_stream_send_data_controlled_by_negative_stream_window_size) add_test_case(h2_client_stream_send_data_controlled_by_connection_window_size) add_test_case(h2_client_stream_send_data_controlled_by_connection_and_stream_window_size) add_test_case(h2_client_stream_send_window_update) add_test_case(h2_client_stream_send_window_update) add_test_case(h2_client_stream_err_received_data_flow_control) add_test_case(h2_client_conn_err_received_data_flow_control) add_test_case(h2_client_conn_err_window_update_exceed_max) add_test_case(h2_client_conn_err_window_update_size_zero) add_test_case(h2_client_conn_err_initial_window_size_settings_cause_window_exceed_max) add_test_case(h2_client_stream_receive_end_stream_before_done_sending) add_test_case(h2_client_stream_receive_end_stream_and_rst_before_done_sending) add_test_case(h2_client_stream_err_input_stream_failure) add_test_case(h2_client_stream_err_receive_rst_stream) add_test_case(h2_client_push_promise_automatically_rejected) add_test_case(h2_client_conn_receive_goaway) add_test_case(h2_client_conn_receive_goaway_debug_data) add_test_case(h2_client_conn_err_invalid_last_stream_id_goaway) # TODO add_test_case(h2_client_send_goaway_with_push_promises) id of 1st should be in GOAWAY 2nd should be ignored add_test_case(h2_client_change_settings_succeed) add_test_case(h2_client_change_settings_failed_no_ack_received) add_test_case(h2_client_manual_window_management_disabled_auto_window_update) add_test_case(h2_client_manual_window_management_user_send_stream_window_update) add_test_case(h2_client_manual_window_management_user_send_stream_window_update_with_padding) add_test_case(h2_client_manual_window_management_user_send_stream_window_update_overflow) add_test_case(h2_client_manual_window_management_user_send_conn_window_update) add_test_case(h2_client_manual_window_management_user_send_conn_window_update_with_padding) add_test_case(h2_client_manual_window_management_user_send_connection_window_update_overflow) # Build these when we address window_update() differences in H1 vs H2 # TODO add_test_case(h2_client_manual_updated_window_ignored_when_automatical_on) # TODO add_test_case(h2_client_manual_stream_updated_window_ignored_invalid_state) # TODO add_test_case(h2_client_manual_window_management_window_overflow) #we cannot ensure the increment_size is safe or not, let our peer detect the maximum exceed or not. But we can test the obviously overflows here. add_test_case(h2_client_send_ping_successfully_receive_ack) add_test_case(h2_client_send_ping_no_ack_received) add_test_case(h2_client_conn_err_extraneous_ping_ack_received) add_test_case(h2_client_conn_err_mismatched_ping_ack_received) add_test_case(h2_client_empty_initial_settings) add_test_case(h2_client_conn_failed_initial_settings_completed_not_invoked) add_test_case(h2_client_stream_reset_stream) add_test_case(h2_client_stream_reset_ignored_stream_closed) add_test_case(h2_client_stream_reset_failed_before_activate_called) add_test_case(h2_client_stream_cancel_stream) add_test_case(h2_client_stream_keeps_alive_for_cross_thread_task) add_test_case(h2_client_stream_get_received_reset_error_code) add_test_case(h2_client_stream_get_sent_reset_error_code) add_test_case(h2_client_new_request_allowed) add_test_case(h2_client_send_multiple_goaway) add_test_case(h2_client_get_sent_goaway) add_test_case(h2_client_get_received_goaway) add_test_case(h2_client_request_apis_failed_after_connection_begin_shutdown) add_test_case(h2_client_get_local_settings) add_test_case(h2_client_get_remote_settings) add_test_case(h2_client_error_from_outgoing_body_callback_reset_stream) add_test_case(h2_client_error_from_incoming_headers_callback_reset_stream) add_test_case(h2_client_error_from_incoming_headers_done_callback_reset_stream) add_test_case(h2_client_error_from_incoming_body_callback_reset_stream) add_test_case(h2_client_manual_data_write) add_test_case(h2_client_manual_data_write_not_enabled) add_test_case(h2_client_manual_data_write_with_body) add_test_case(h2_client_manual_data_write_no_data) add_test_case(h2_client_manual_data_write_connection_close) add_test_case(server_new_destroy) add_test_case(server_new_destroy_tcp) add_test_case(connection_setup_shutdown) add_test_case(connection_setup_shutdown_tls) add_test_case(connection_setup_shutdown_proxy_setting_on_ev_not_found) add_test_case(connection_setup_shutdown_pinned_event_loop) add_test_case(connection_h2_prior_knowledge) add_test_case(connection_h2_prior_knowledge_not_work_with_tls) add_test_case(connection_customized_alpn) add_test_case(connection_customized_alpn_error_with_unknown_return_string) # These server tests occasionally fail. Resurrect if/when we get back to work on HTTP server. # add_test_case(connection_destroy_server_with_connection_existing) # add_test_case(connection_destroy_server_with_multiple_connections_existing) # add_test_case(connection_server_shutting_down_new_connection_setup_fail) # connection manager tests # unit tests where connections are mocked add_net_test_case(test_connection_manager_setup_shutdown) add_net_test_case(test_connection_manager_acquire_release_mix_synchronous) add_net_test_case(test_connection_manager_connect_callback_failure) add_net_test_case(test_connection_manager_connect_immediate_failure) add_net_test_case(test_connection_manager_proxy_setup_shutdown) add_net_test_case(test_connection_manager_idle_culling_single) add_net_test_case(test_connection_manager_idle_culling_many) add_net_test_case(test_connection_manager_idle_culling_mixture) add_net_test_case(test_connection_manager_idle_culling_refcount) # tests where we establish real connections add_net_test_case(test_connection_manager_single_connection) add_net_test_case(test_connection_manager_proxy_envrionment_empty_string) add_net_test_case(test_connection_manager_single_http2_connection) add_net_test_case(test_connection_manager_single_http2_connection_failed) add_net_test_case(test_connection_manager_single_http2_connection_with_settings) add_net_test_case(test_connection_manager_many_connections) add_net_test_case(test_connection_manager_many_http2_connections) add_net_test_case(test_connection_manager_acquire_release) add_net_test_case(test_connection_manager_close_and_release) add_net_test_case(test_connection_manager_acquire_release_mix) # Integration test that requires proxy envrionment in us-east-1 region. # TODO: test the server name validation properly if(ENABLE_PROXY_INTEGRATION_TESTS) add_net_test_case(connection_manager_proxy_integration_forwarding_proxy_no_auth) add_net_test_case(connection_manager_proxy_integration_forwarding_proxy_no_auth_env) add_net_test_case(connection_manager_proxy_integration_legacy_http_no_auth) add_net_test_case(connection_manager_proxy_integration_legacy_http_no_auth_env) add_net_test_case(connection_manager_proxy_integration_legacy_https_no_auth) add_net_test_case(connection_manager_proxy_integration_legacy_https_no_auth_env) add_net_test_case(connection_manager_proxy_integration_tunneling_proxy_http_no_auth_env) add_net_test_case(connection_manager_proxy_integration_tunneling_proxy_https_no_auth) add_net_test_case(connection_manager_proxy_integration_tunneling_proxy_https_no_auth_env) add_net_test_case(connection_manager_proxy_integration_tunneling_proxy_double_tls_no_auth) add_net_test_case(connection_manager_proxy_integration_tunneling_proxy_double_tls_no_auth_env) add_net_test_case(connection_manager_proxy_integration_tunneling_proxy_double_tls_no_auth_configured_tls_env) add_net_test_case(connection_manager_proxy_integration_forwarding_proxy_basic_auth) add_net_test_case(connection_manager_proxy_integration_forwarding_proxy_basic_auth_env) add_net_test_case(connection_manager_proxy_integration_legacy_http_basic_auth) add_net_test_case(connection_manager_proxy_integration_legacy_http_basic_auth_env) add_net_test_case(connection_manager_proxy_integration_legacy_https_basic_auth) add_net_test_case(connection_manager_proxy_integration_legacy_https_basic_auth_env) add_net_test_case(connection_manager_proxy_integration_tunneling_proxy_http_basic_auth_env) add_net_test_case(connection_manager_proxy_integration_tunneling_proxy_https_basic_auth) add_net_test_case(connection_manager_proxy_integration_tunneling_proxy_https_basic_auth_env) add_net_test_case(h1_proxy_h2_host_tunneling_double_tls_no_auth) endif() add_test_case(h1_server_sanity_check) add_test_case(h1_server_receive_1line_request) add_test_case(h1_server_receive_headers) add_test_case(h1_server_receive_body) add_test_case(h1_server_receive_1_request_from_multiple_io_messages) add_test_case(h1_server_receive_multiple_requests_from_1_io_messages) add_test_case(h1_server_receive_bad_request_shut_down_connection) add_test_case(h1_server_receive_close_header_ends_connection) add_test_case(h1_server_receive_close_header_more_requests_illegal) add_test_case(h1_server_send_1line_response) add_test_case(h1_server_send_response_headers) add_test_case(h1_server_send_response_body) add_test_case(h1_server_send_response_to_HEAD_request) add_test_case(h1_server_send_304_response) add_test_case(h1_server_send_multiple_responses_in_order) add_test_case(h1_server_send_multiple_responses_out_of_order) add_test_case(h1_server_send_multiple_responses_out_of_order_only_one_sent) add_test_case(h1_server_send_response_before_request_finished) add_test_case(h1_server_send_response_large_body) add_test_case(h1_server_send_response_large_head) add_test_case(h1_server_send_close_header_ends_connection) add_test_case(h1_server_send_close_header_with_pipelining) add_test_case(h1_server_close_before_message_is_sent) add_test_case(h1_server_error_from_incoming_request_callback_stops_decoder) add_test_case(h1_server_error_from_incoming_headers_callback_stops_decoder) add_test_case(h1_server_error_from_incoming_headers_done_callback_stops_decoder) add_test_case(h1_server_error_from_incoming_request_done_callback_stops_decoder) add_test_case(h1_server_error_from_incoming_body_callback_stops_decoder) add_test_case(h1_server_error_from_outgoing_body_callback_stops_sending) add_test_case(h1_server_close_from_off_thread_makes_not_open) add_test_case(h1_server_close_from_on_thread_makes_not_open) add_test_case(test_http_forwarding_proxy_connection_proxy_target) add_test_case(test_http_forwarding_proxy_connection_channel_failure) add_test_case(test_http_forwarding_proxy_connection_connect_failure) add_test_case(test_http_forwarding_proxy_request_transform) add_test_case(test_http_forwarding_proxy_request_transform_basic_auth) add_test_case(test_http_forwarding_proxy_request_transform_legacy_basic_auth) add_test_case(test_http_proxy_request_transform_kerberos) add_test_case(test_http_proxy_kerberos_token_failure) add_test_case(test_http_proxy_kerberos_connect_failure) add_test_case(test_http_proxy_adaptive_identity_success) add_test_case(test_http_proxy_adaptive_kerberos_success) add_test_case(test_http_proxy_adaptive_ntlm_success) add_test_case(test_http_proxy_adaptive_failure) add_test_case(test_http_forwarding_proxy_uri_rewrite) add_test_case(test_http_forwarding_proxy_uri_rewrite_options_star) add_test_case(test_http_tunnel_proxy_connection_success) add_test_case(test_https_tunnel_proxy_connection_success) add_test_case(test_http_tunnel_proxy_connection_failure_connect) add_test_case(test_https_tunnel_proxy_connection_failure_connect) add_test_case(test_https_tunnel_proxy_connection_failure_tls) add_test_case(test_http_connection_monitor_options_is_valid) add_test_case(test_http_connection_monitor_rw_above) add_test_case(test_http_connection_monitor_r_above) add_test_case(test_http_connection_monitor_w_above) add_test_case(test_http_connection_monitor_write_then_read_above) add_test_case(test_http_connection_monitor_below_but_undetectable) add_test_case(test_http_connection_monitor_rw_below) add_test_case(test_http_connection_monitor_below_then_above) add_test_case(test_http_connection_monitor_failure_reset_when_empty) add_test_case(test_http_connection_monitor_bytes_overflow) add_test_case(test_http_connection_monitor_time_overflow) add_test_case(test_http_connection_monitor_shutdown) add_test_case(test_http_stats_trivial) add_test_case(test_http_stats_basic_request) add_test_case(test_http_stats_split_across_gather_boundary) add_test_case(test_http_stats_pipelined) add_test_case(test_http_stats_multiple_requests_with_gap) # Tests that not make real connection but use TLS. So, still need to be marked as net test add_net_test_case(h2_sm_sanity_check) add_net_test_case(h2_sm_mock_connection) add_net_test_case(h2_sm_mock_multiple_connections) add_net_test_case(h2_sm_mock_bad_connection_acquired) add_net_test_case(h2_sm_mock_connections_closed_before_request_made) add_net_test_case(h2_sm_mock_max_concurrent_streams_remote) add_net_test_case(h2_sm_mock_fetch_metric) add_net_test_case(h2_sm_mock_complete_stream) add_net_test_case(h2_sm_mock_ideal_num_streams) add_net_test_case(h2_sm_mock_large_ideal_num_streams) add_net_test_case(h2_sm_mock_goaway) add_net_test_case(h2_sm_connection_ping) # Tests against real world server add_net_test_case(h2_sm_acquire_stream) add_net_test_case(h2_sm_acquire_stream_multiple_connections) add_net_test_case(h2_sm_closing_before_connection_acquired) add_net_test_case(h2_sm_close_connection_on_server_error) # Tests against local server if(ENABLE_LOCALHOST_INTEGRATION_TESTS) # Tests should be named with localhost_integ_* add_net_test_case(localhost_integ_h2_sm_prior_knowledge) add_net_test_case(localhost_integ_h2_sm_acquire_stream_stress) add_net_test_case(localhost_integ_h2_sm_acquire_stream_stress_with_body) add_net_test_case(localhost_integ_h2_sm_connection_monitor_kill_slow_connection) endif() add_test_case(random_access_set_sanitize_test) add_test_case(random_access_set_insert_test) add_test_case(random_access_set_get_random_test) add_test_case(random_access_set_exist_test) add_test_case(random_access_set_remove_test) add_test_case(random_access_set_owns_element_test) set(TEST_BINARY_NAME ${PROJECT_NAME}-tests) generate_test_driver(${TEST_BINARY_NAME}) file(GLOB FUZZ_TESTS "fuzz/*.c") aws_add_fuzz_tests("${FUZZ_TESTS}" "" "") # SSL certificates to use for testing. add_custom_command(TARGET ${TEST_BINARY_NAME} PRE_BUILD COMMAND ${CMAKE_COMMAND} -E copy_directory ${CMAKE_CURRENT_SOURCE_DIR}/resources $) aws-crt-python-0.20.4+dfsg/crt/aws-c-http/tests/fuzz/000077500000000000000000000000001456575232400223475ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-http/tests/fuzz/fuzz_h2_decoder_correct.c000066400000000000000000000470651456575232400273240ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include static const uint32_t MAX_PAYLOAD_SIZE = 16384; enum header_style { HEADER_STYLE_REQUEST, HEADER_STYLE_RESPONSE, HEADER_STYLE_TRAILER, }; static struct aws_http_headers *s_generate_headers( struct aws_allocator *allocator, struct aws_byte_cursor *input, enum header_style header_style) { struct aws_http_headers *headers = aws_http_headers_new(allocator); /* There are pretty strict requirements about pseudo-headers, no randomness for now */ if (header_style == HEADER_STYLE_REQUEST) { struct aws_http_header method = {.name = aws_http_header_method, .value = aws_http_method_get}; aws_http_headers_add_header(headers, &method); struct aws_http_header scheme = {.name = aws_http_header_scheme, .value = aws_http_scheme_https}; aws_http_headers_add_header(headers, &scheme); struct aws_http_header path = {.name = aws_http_header_path, .value = aws_byte_cursor_from_c_str("/")}; aws_http_headers_add_header(headers, &path); struct aws_http_header authority = { .name = aws_http_header_authority, .value = aws_byte_cursor_from_c_str("example.com"), }; aws_http_headers_add_header(headers, &authority); } else if (header_style == HEADER_STYLE_RESPONSE) { struct aws_http_header status = {.name = aws_http_header_status, .value = aws_byte_cursor_from_c_str("200")}; aws_http_headers_add_header(headers, &status); } struct aws_byte_buf buf; aws_byte_buf_init(&buf, allocator, 1024); while (input->len) { buf.len = 0; struct aws_http_header header; AWS_ZERO_STRUCT(header); uint8_t type = 0; aws_byte_cursor_read_u8(input, &type); switch (type % 3) { case 0: header.compression = AWS_HTTP_HEADER_COMPRESSION_USE_CACHE; break; case 1: header.compression = AWS_HTTP_HEADER_COMPRESSION_NO_CACHE; break; case 2: header.compression = AWS_HTTP_HEADER_COMPRESSION_NO_FORWARD_CACHE; break; } /* Start name with "x-" so we don't violate some rule for an official header. * Then add some more valid characters. */ struct aws_byte_cursor header_name_prefix = aws_byte_cursor_from_c_str("x-"); aws_byte_buf_append(&buf, &header_name_prefix); uint8_t name_suffix_len = 0; aws_byte_cursor_read_u8(input, &name_suffix_len); for (size_t i = 0; i < name_suffix_len; ++i) { uint8_t c = 0; aws_byte_cursor_read_u8(input, &c); c = 'a' + (c % 26); /* a-z */ aws_byte_buf_write_u8(&buf, c); } header.name = aws_byte_cursor_from_buf(&buf); /* Fill header.value with valid characters */ uint8_t value_len = 0; aws_byte_cursor_read_u8(input, &value_len); for (size_t i = 0; i < value_len; ++i) { uint8_t c = 0; aws_byte_cursor_read_u8(input, &c); c = 'a' + (c % 26); /* a-z */ aws_byte_buf_write_u8(&buf, c); } header.value = aws_byte_cursor_from_buf(&buf); aws_byte_cursor_advance(&header.value, header.name.len); aws_http_headers_add_header(headers, &header); } aws_byte_buf_clean_up(&buf); return headers; } static uint32_t s_generate_stream_id(struct aws_byte_cursor *input) { uint32_t stream_id = 0; aws_byte_cursor_read_be32(input, &stream_id); return aws_min_u32(AWS_H2_STREAM_ID_MAX, aws_max_u32(1, stream_id)); } /* Server-initiated stream-IDs must be even */ static uint32_t s_generate_even_stream_id(struct aws_byte_cursor *input) { uint32_t stream_id = 0; aws_byte_cursor_read_be32(input, &stream_id); stream_id = aws_min_u32(AWS_H2_STREAM_ID_MAX, aws_max_u32(2, stream_id)); if (stream_id % 2 != 0) { stream_id -= 1; } return stream_id; } /* Client-initiated stream-IDs must be odd */ static uint32_t s_generate_odd_stream_id(struct aws_byte_cursor *input) { uint32_t stream_id = 0; aws_byte_cursor_read_be32(input, &stream_id); stream_id = aws_min_u32(AWS_H2_STREAM_ID_MAX, aws_max_u32(1, stream_id)); if (stream_id % 2 == 0) { stream_id += 1; } return stream_id; } static struct aws_h2_frame_priority_settings s_generate_priority(struct aws_byte_cursor *input) { struct aws_h2_frame_priority_settings priority; priority.stream_dependency = s_generate_stream_id(input); uint8_t exclusive = 0; aws_byte_cursor_read_u8(input, &exclusive); priority.stream_dependency_exclusive = (bool)exclusive; aws_byte_cursor_read_u8(input, &priority.weight); return priority; } AWS_EXTERN_C_BEGIN /** * This test generates valid frames from the random input. * It feeds these frames through the encoder and ensures that they're output without error. * Then it feeds the encoder's output to the decoder and ensures that it does not report an error. * It does not currently investigate the outputs to see if they line up with they inputs, * it just checks for errors from the encoder & decoder. */ int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) { /* Setup allocator and parameters */ struct aws_allocator *allocator = aws_mem_tracer_new(aws_default_allocator(), NULL, AWS_MEMTRACE_BYTES, 0); struct aws_byte_cursor input = aws_byte_cursor_from_array(data, size); /* Enable logging */ struct aws_logger logger; struct aws_logger_standard_options log_options = { .level = AWS_LL_TRACE, .file = stdout, }; aws_logger_init_standard(&logger, allocator, &log_options); aws_logger_set(&logger); /* Init HTTP (s2n init is weird, so don't do this under the tracer) */ aws_http_library_init(aws_default_allocator()); /* Create the encoder */ struct aws_h2_frame_encoder encoder; aws_h2_frame_encoder_init(&encoder, allocator, NULL /*logging_id*/); /* Create the decoder */ uint8_t decoder_is_server = 0; aws_byte_cursor_read_u8(&input, &decoder_is_server); const struct aws_h2_decoder_vtable decoder_vtable = {0}; struct aws_h2_decoder_params decoder_params = { .alloc = allocator, .vtable = &decoder_vtable, .skip_connection_preface = true, .is_server = decoder_is_server, }; struct aws_h2_decoder *decoder = aws_h2_decoder_new(&decoder_params); /* Init the buffer */ struct aws_byte_buf frame_data; aws_byte_buf_init(&frame_data, allocator, AWS_H2_FRAME_PREFIX_SIZE + MAX_PAYLOAD_SIZE); /* * Generate the frame to decode */ uint8_t frame_type = 0; aws_byte_cursor_read_u8(&input, &frame_type); frame_type = frame_type % (AWS_H2_FRAME_TYPE_COUNT); if (decoder_is_server && frame_type == AWS_H2_FRAME_T_PUSH_PROMISE) { /* Client can't send push-promise to server */ frame_type = AWS_H2_FRAME_T_HEADERS; } /* figure out if we should use huffman encoding */ uint8_t huffman_choice = 0; aws_byte_cursor_read_u8(&input, &huffman_choice); aws_hpack_encoder_set_huffman_mode(&encoder.hpack, huffman_choice % 3); switch (frame_type) { case AWS_H2_FRAME_T_DATA: { uint32_t stream_id = s_generate_stream_id(&input); uint8_t flags = 0; aws_byte_cursor_read_u8(&input, &flags); bool body_ends_stream = flags & AWS_H2_FRAME_F_END_STREAM; uint8_t pad_length = 0; aws_byte_cursor_read_u8(&input, &pad_length); /* Allow body to exceed available space. Data encoder should just write what it can fit */ struct aws_input_stream *body = aws_input_stream_new_from_cursor(allocator, &input); bool body_complete; bool body_stalled; int32_t stream_window_size_peer = AWS_H2_WINDOW_UPDATE_MAX; size_t connection_window_size_peer = AWS_H2_WINDOW_UPDATE_MAX; AWS_FATAL_ASSERT( aws_h2_encode_data_frame( &encoder, stream_id, body, (bool)body_ends_stream, pad_length, &stream_window_size_peer, &connection_window_size_peer, &frame_data, &body_complete, &body_stalled) == AWS_OP_SUCCESS); struct aws_stream_status body_status; aws_input_stream_get_status(body, &body_status); AWS_FATAL_ASSERT(body_complete == body_status.is_end_of_stream); aws_input_stream_release(body); break; } case AWS_H2_FRAME_T_HEADERS: { /* If decoder is server, headers can only arrive on client-initiated streams * If decoder is client, header might arrive on server-initiated or client-initiated streams */ uint32_t stream_id = decoder_is_server ? s_generate_odd_stream_id(&input) : s_generate_stream_id(&input); uint8_t flags = 0; aws_byte_cursor_read_u8(&input, &flags); bool end_stream = flags & AWS_H2_FRAME_F_END_STREAM; bool use_priority = flags & AWS_H2_FRAME_F_PRIORITY; uint8_t pad_length = 0; aws_byte_cursor_read_u8(&input, &pad_length); struct aws_h2_frame_priority_settings priority = s_generate_priority(&input); struct aws_h2_frame_priority_settings *priority_ptr = use_priority ? &priority : NULL; /* Server can only receive request-style HEADERS, client can only receive response-style HEADERS. * But either side can receive trailer-style HEADERS */ uint8_t is_normal_header = 0; aws_byte_cursor_read_u8(&input, &is_normal_header); enum header_style header_style; if (is_normal_header) { if (decoder_is_server) { header_style = HEADER_STYLE_REQUEST; } else { header_style = HEADER_STYLE_RESPONSE; } } else { header_style = HEADER_STYLE_TRAILER; end_stream = true; /* Trailer must END_STREAM */ } /* generate headers last since it uses up the rest of input */ struct aws_http_headers *headers = s_generate_headers(allocator, &input, header_style); struct aws_h2_frame *frame = aws_h2_frame_new_headers(allocator, stream_id, headers, end_stream, pad_length, priority_ptr); AWS_FATAL_ASSERT(frame); bool frame_complete; AWS_FATAL_ASSERT(aws_h2_encode_frame(&encoder, frame, &frame_data, &frame_complete) == AWS_OP_SUCCESS); AWS_FATAL_ASSERT(frame_complete == true); aws_h2_frame_destroy(frame); aws_http_headers_release(headers); break; } case AWS_H2_FRAME_T_PRIORITY: { uint32_t stream_id = s_generate_stream_id(&input); struct aws_h2_frame_priority_settings priority = s_generate_priority(&input); struct aws_h2_frame *frame = aws_h2_frame_new_priority(allocator, stream_id, &priority); AWS_FATAL_ASSERT(frame); bool frame_complete; AWS_FATAL_ASSERT(aws_h2_encode_frame(&encoder, frame, &frame_data, &frame_complete) == AWS_OP_SUCCESS); AWS_FATAL_ASSERT(frame_complete == true); aws_h2_frame_destroy(frame); break; } case AWS_H2_FRAME_T_RST_STREAM: { uint32_t stream_id = s_generate_stream_id(&input); uint32_t error_code = 0; aws_byte_cursor_read_be32(&input, &error_code); struct aws_h2_frame *frame = aws_h2_frame_new_rst_stream(allocator, stream_id, error_code); AWS_FATAL_ASSERT(frame); bool frame_complete; AWS_FATAL_ASSERT(aws_h2_encode_frame(&encoder, frame, &frame_data, &frame_complete) == AWS_OP_SUCCESS); AWS_FATAL_ASSERT(frame_complete == true); aws_h2_frame_destroy(frame); break; } case AWS_H2_FRAME_T_SETTINGS: { uint8_t flags = 0; aws_byte_cursor_read_u8(&input, &flags); bool ack = flags & AWS_H2_FRAME_F_ACK; size_t settings_count = 0; struct aws_http2_setting *settings_array = NULL; if (!ack) { settings_count = aws_min_size(input.len / 6, MAX_PAYLOAD_SIZE); if (settings_count > 0) { settings_array = aws_mem_calloc(allocator, settings_count, sizeof(struct aws_http2_setting)); for (size_t i = 0; i < settings_count; ++i) { uint16_t id = 0; uint32_t value = 0; aws_byte_cursor_read_be16(&input, &id); aws_byte_cursor_read_be32(&input, &value); if (id >= AWS_HTTP2_SETTINGS_BEGIN_RANGE && id < AWS_HTTP2_SETTINGS_END_RANGE) { value = aws_max_u32(value, aws_h2_settings_bounds[id][0]); value = aws_min_u32(value, aws_h2_settings_bounds[id][1]); } settings_array[i].id = id; settings_array[i].value = value; } } } struct aws_h2_frame *frame = aws_h2_frame_new_settings(allocator, settings_array, settings_count, ack); AWS_FATAL_ASSERT(frame); bool frame_complete; AWS_FATAL_ASSERT(aws_h2_encode_frame(&encoder, frame, &frame_data, &frame_complete) == AWS_OP_SUCCESS); AWS_FATAL_ASSERT(frame_complete == true); aws_h2_frame_destroy(frame); aws_mem_release(allocator, settings_array); break; } case AWS_H2_FRAME_T_PUSH_PROMISE: { uint32_t stream_id = s_generate_odd_stream_id(&input); uint32_t promised_stream_id = s_generate_even_stream_id(&input); uint8_t pad_length = 0; aws_byte_cursor_read_u8(&input, &pad_length); /* generate headers last since it uses up the rest of input */ struct aws_http_headers *headers = s_generate_headers(allocator, &input, HEADER_STYLE_REQUEST); struct aws_h2_frame *frame = aws_h2_frame_new_push_promise(allocator, stream_id, promised_stream_id, headers, pad_length); AWS_FATAL_ASSERT(frame); bool frame_complete; AWS_FATAL_ASSERT(aws_h2_encode_frame(&encoder, frame, &frame_data, &frame_complete) == AWS_OP_SUCCESS); AWS_FATAL_ASSERT(frame_complete == true); aws_h2_frame_destroy(frame); aws_http_headers_release(headers); break; } case AWS_H2_FRAME_T_PING: { uint8_t flags; aws_byte_cursor_read_u8(&input, &flags); bool ack = flags & AWS_H2_FRAME_F_ACK; uint8_t opaque_data[AWS_HTTP2_PING_DATA_SIZE] = {0}; size_t copy_len = aws_min_size(input.len, AWS_HTTP2_PING_DATA_SIZE); if (copy_len > 0) { struct aws_byte_cursor copy = aws_byte_cursor_advance(&input, copy_len); memcpy(opaque_data, copy.ptr, copy.len); } struct aws_h2_frame *frame = aws_h2_frame_new_ping(allocator, ack, opaque_data); AWS_FATAL_ASSERT(frame); bool frame_complete; AWS_FATAL_ASSERT(aws_h2_encode_frame(&encoder, frame, &frame_data, &frame_complete) == AWS_OP_SUCCESS); AWS_FATAL_ASSERT(frame_complete == true); aws_h2_frame_destroy(frame); break; } case AWS_H2_FRAME_T_GOAWAY: { uint32_t last_stream_id = s_generate_stream_id(&input); uint32_t error_code = 0; aws_byte_cursor_read_be32(&input, &error_code); /* Pass debug_data that might be too large (it will get truncated if necessary) */ struct aws_byte_cursor debug_data = aws_byte_cursor_advance(&input, input.len); struct aws_h2_frame *frame = aws_h2_frame_new_goaway(allocator, last_stream_id, error_code, debug_data); AWS_FATAL_ASSERT(frame); bool frame_complete; AWS_FATAL_ASSERT(aws_h2_encode_frame(&encoder, frame, &frame_data, &frame_complete) == AWS_OP_SUCCESS); AWS_FATAL_ASSERT(frame_complete == true); aws_h2_frame_destroy(frame); break; } case AWS_H2_FRAME_T_WINDOW_UPDATE: { /* WINDOW_UPDATE's stream-id can be zero or non-zero */ uint32_t stream_id = 0; aws_byte_cursor_read_be32(&input, &stream_id); stream_id = aws_min_u32(stream_id, AWS_H2_STREAM_ID_MAX); uint32_t window_size_increment = 0; aws_byte_cursor_read_be32(&input, &window_size_increment); window_size_increment = aws_min_u32(window_size_increment, AWS_H2_WINDOW_UPDATE_MAX); struct aws_h2_frame *frame = aws_h2_frame_new_window_update(allocator, stream_id, window_size_increment); AWS_FATAL_ASSERT(frame); bool frame_complete; AWS_FATAL_ASSERT(aws_h2_encode_frame(&encoder, frame, &frame_data, &frame_complete) == AWS_OP_SUCCESS); AWS_FATAL_ASSERT(frame_complete == true); aws_h2_frame_destroy(frame); break; } case AWS_H2_FRAME_T_CONTINUATION: /* We don't directly create CONTINUATION frames (they occur when HEADERS or PUSH_PROMISE gets too big) */ frame_type = AWS_H2_FRAME_T_UNKNOWN; /* fallthrough */ case AWS_H2_FRAME_T_UNKNOWN: { /* #YOLO roll our own frame */ uint32_t payload_length = aws_min_u32(input.len, MAX_PAYLOAD_SIZE - AWS_H2_FRAME_PREFIX_SIZE); /* Write payload length */ aws_byte_buf_write_be24(&frame_data, payload_length); /* Write type */ aws_byte_buf_write_u8(&frame_data, frame_type); /* Write flags */ uint8_t flags = 0; aws_byte_cursor_read_u8(&input, &flags); aws_byte_buf_write_u8(&frame_data, flags); /* Write stream-id */ uint32_t stream_id = 0; aws_byte_cursor_read_be32(&input, &stream_id); aws_byte_buf_write_be32(&frame_data, stream_id); /* Write payload */ aws_byte_buf_write_from_whole_cursor(&frame_data, aws_byte_cursor_advance(&input, payload_length)); break; } default: { AWS_FATAL_ASSERT(false); } } /* Decode whatever we got */ AWS_FATAL_ASSERT(frame_data.len > 0); struct aws_byte_cursor to_decode = aws_byte_cursor_from_buf(&frame_data); struct aws_h2err err = aws_h2_decode(decoder, &to_decode); AWS_FATAL_ASSERT(aws_h2err_success(err)); AWS_FATAL_ASSERT(to_decode.len == 0); /* Clean up */ aws_byte_buf_clean_up(&frame_data); aws_h2_decoder_destroy(decoder); aws_h2_frame_encoder_clean_up(&encoder); aws_logger_set(NULL); aws_logger_clean_up(&logger); atexit(aws_http_library_clean_up); /* Check for leaks */ AWS_FATAL_ASSERT(aws_mem_tracer_count(allocator) == 0); allocator = aws_mem_tracer_destroy(allocator); return 0; } AWS_EXTERN_C_END aws-crt-python-0.20.4+dfsg/crt/aws-c-http/tests/fuzz/fuzz_h2_decoder_random.c000066400000000000000000000032531456575232400271320ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include AWS_EXTERN_C_BEGIN int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) { /* Setup allocator and parameters */ struct aws_allocator *allocator = aws_mem_tracer_new(aws_default_allocator(), NULL, AWS_MEMTRACE_BYTES, 0); struct aws_byte_cursor to_decode = aws_byte_cursor_from_array(data, size); /* Enable logging */ struct aws_logger logger; struct aws_logger_standard_options log_options = { .level = AWS_LL_TRACE, .file = stdout, }; aws_logger_init_standard(&logger, allocator, &log_options); aws_logger_set(&logger); /* Init HTTP (s2n init is weird, so don't do this under the tracer) */ aws_http_library_init(aws_default_allocator()); /* Create the decoder */ struct aws_h2_decoder_vtable decoder_vtable = {0}; struct aws_h2_decoder_params decoder_params = { .alloc = allocator, .vtable = &decoder_vtable, .skip_connection_preface = true, }; struct aws_h2_decoder *decoder = aws_h2_decoder_new(&decoder_params); /* Decode whatever we got */ aws_h2_decode(decoder, &to_decode); /* Clean up */ aws_h2_decoder_destroy(decoder); aws_logger_set(NULL); aws_logger_clean_up(&logger); atexit(aws_http_library_clean_up); /* Check for leaks */ ASSERT_UINT_EQUALS(0, aws_mem_tracer_count(allocator)); allocator = aws_mem_tracer_destroy(allocator); return 0; } AWS_EXTERN_C_END aws-crt-python-0.20.4+dfsg/crt/aws-c-http/tests/h2_test_helper.c000066400000000000000000000746111456575232400244350ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "h2_test_helper.h" #include #include #include /******************************************************************************* * h2_decoded_frame ******************************************************************************/ static void s_frame_init( struct h2_decoded_frame *frame, struct aws_allocator *alloc, enum aws_h2_frame_type type, uint32_t stream_id) { AWS_ZERO_STRUCT(*frame); frame->type = type; frame->stream_id = stream_id; frame->headers = aws_http_headers_new(alloc); AWS_FATAL_ASSERT(0 == aws_array_list_init_dynamic(&frame->settings, alloc, 16, sizeof(struct aws_http2_setting))); AWS_FATAL_ASSERT(0 == aws_byte_buf_init(&frame->data, alloc, 1024)); } static void s_frame_clean_up(struct h2_decoded_frame *frame) { if (!frame) { return; } aws_http_headers_release(frame->headers); aws_array_list_clean_up(&frame->settings); aws_byte_buf_clean_up(&frame->data); } int h2_decoded_frame_check_finished( const struct h2_decoded_frame *frame, enum aws_h2_frame_type expected_type, uint32_t expected_stream_id) { ASSERT_INT_EQUALS(expected_type, frame->type); ASSERT_UINT_EQUALS(expected_stream_id, frame->stream_id); ASSERT_TRUE(frame->finished); return AWS_OP_SUCCESS; } /******************************************************************************* * h2_decode_tester ******************************************************************************/ size_t h2_decode_tester_frame_count(const struct h2_decode_tester *decode_tester) { return aws_array_list_length(&decode_tester->frames); } struct h2_decoded_frame *h2_decode_tester_get_frame(const struct h2_decode_tester *decode_tester, size_t i) { AWS_FATAL_ASSERT(h2_decode_tester_frame_count(decode_tester) > i); struct h2_decoded_frame *frame = NULL; aws_array_list_get_at_ptr(&decode_tester->frames, (void **)&frame, i); return frame; } struct h2_decoded_frame *h2_decode_tester_latest_frame(const struct h2_decode_tester *decode_tester) { size_t frame_count = h2_decode_tester_frame_count(decode_tester); AWS_FATAL_ASSERT(frame_count != 0); return h2_decode_tester_get_frame(decode_tester, frame_count - 1); } struct h2_decoded_frame *h2_decode_tester_find_frame( const struct h2_decode_tester *decode_tester, enum aws_h2_frame_type type, size_t search_start_idx, size_t *out_idx) { return h2_decode_tester_find_stream_frame(decode_tester, type, UINT32_MAX /*stream_id*/, search_start_idx, out_idx); } struct h2_decoded_frame *h2_decode_tester_find_stream_frame_any_type( const struct h2_decode_tester *decode_tester, uint32_t stream_id, size_t search_start_idx, size_t *out_idx) { return h2_decode_tester_find_stream_frame( decode_tester, AWS_H2_FRAME_TYPE_COUNT /*frame_type*/, stream_id, search_start_idx, out_idx); } struct h2_decoded_frame *h2_decode_tester_find_stream_frame( const struct h2_decode_tester *decode_tester, enum aws_h2_frame_type type, uint32_t stream_id, size_t search_start_idx, size_t *out_idx) { size_t frame_count = h2_decode_tester_frame_count(decode_tester); if (out_idx) { *out_idx = frame_count; } for (size_t i = search_start_idx; i < frame_count; ++i) { struct h2_decoded_frame *frame = h2_decode_tester_get_frame(decode_tester, i); if (frame->type == type || type == AWS_H2_FRAME_TYPE_COUNT) { if (frame->stream_id == stream_id || stream_id == UINT32_MAX) { if (out_idx) { *out_idx = i; } return frame; } } } return NULL; } int h2_decode_tester_check_data_across_frames( const struct h2_decode_tester *decode_tester, uint32_t stream_id, struct aws_byte_cursor expected, bool expect_end_stream) { struct aws_byte_buf data; ASSERT_SUCCESS(aws_byte_buf_init(&data, decode_tester->alloc, 128)); bool found_end_stream = false; for (size_t frame_i = 0; frame_i < h2_decode_tester_frame_count(decode_tester); ++frame_i) { struct h2_decoded_frame *frame = h2_decode_tester_get_frame(decode_tester, frame_i); if (frame->type == AWS_H2_FRAME_T_DATA && frame->stream_id == stream_id) { struct aws_byte_cursor frame_data = aws_byte_cursor_from_buf(&frame->data); ASSERT_SUCCESS(aws_byte_buf_append_dynamic(&data, &frame_data)); found_end_stream = frame->end_stream; } } ASSERT_BIN_ARRAYS_EQUALS(expected.ptr, expected.len, data.buffer, data.len); ASSERT_UINT_EQUALS(expect_end_stream, found_end_stream); aws_byte_buf_clean_up(&data); return AWS_OP_SUCCESS; } int h2_decode_tester_check_data_str_across_frames( const struct h2_decode_tester *decode_tester, uint32_t stream_id, const char *expected, bool expect_end_stream) { return h2_decode_tester_check_data_across_frames( decode_tester, stream_id, aws_byte_cursor_from_c_str(expected), expect_end_stream); } /* decode-tester begins recording a new frame's data */ static void s_begin_new_frame( struct h2_decode_tester *decode_tester, enum aws_h2_frame_type type, uint32_t stream_id, struct h2_decoded_frame **out_frame) { /* If there's a previous frame, assert that we know it was finished. * If this fails, some on_X_begin(), on_X_i(), on_X_end() loop didn't fire correctly. * It should be impossible for an unrelated callback to fire during these loops */ if (aws_array_list_length(&decode_tester->frames) > 0) { const struct h2_decoded_frame *prev_frame = h2_decode_tester_latest_frame(decode_tester); AWS_FATAL_ASSERT(prev_frame->finished); } /* Create new frame */ struct h2_decoded_frame new_frame; s_frame_init(&new_frame, decode_tester->alloc, type, stream_id); AWS_FATAL_ASSERT(0 == aws_array_list_push_back(&decode_tester->frames, &new_frame)); if (out_frame) { aws_array_list_get_at_ptr( &decode_tester->frames, (void **)out_frame, aws_array_list_length(&decode_tester->frames) - 1); } } /* decode-tester stops recording the latest frame's data */ static void s_end_current_frame( struct h2_decode_tester *decode_tester, enum aws_h2_frame_type type, uint32_t stream_id) { struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(decode_tester); AWS_FATAL_ASSERT(!frame->finished); frame->finished = true; AWS_FATAL_ASSERT(0 == h2_decoded_frame_check_finished(frame, type, stream_id)); } static struct aws_h2err s_decoder_on_headers_begin(uint32_t stream_id, void *userdata) { struct h2_decode_tester *decode_tester = userdata; s_begin_new_frame(decode_tester, AWS_H2_FRAME_T_HEADERS, stream_id, NULL /*out_frame*/); return AWS_H2ERR_SUCCESS; } static struct aws_h2err s_on_header( bool is_push_promise, uint32_t stream_id, const struct aws_http_header *header, enum aws_http_header_name name_enum, enum aws_http_header_block block_type, void *userdata) { struct h2_decode_tester *decode_tester = userdata; struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(decode_tester); /* Validate */ if (is_push_promise) { AWS_FATAL_ASSERT(AWS_H2_FRAME_T_PUSH_PROMISE == frame->type); } else { AWS_FATAL_ASSERT(AWS_H2_FRAME_T_HEADERS == frame->type); /* block-type should be same for each header in block */ if (aws_http_headers_count(frame->headers) > 0) { AWS_FATAL_ASSERT(frame->header_block_type == block_type); } } AWS_FATAL_ASSERT(!frame->finished); AWS_FATAL_ASSERT(frame->stream_id == stream_id); AWS_FATAL_ASSERT(aws_http_lowercase_str_to_header_name(header->name) == name_enum); /* Stash header */ AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_http_headers_add_header(frame->headers, header)); frame->header_block_type = block_type; return AWS_H2ERR_SUCCESS; } static struct aws_h2err s_decoder_on_headers_i( uint32_t stream_id, const struct aws_http_header *header, enum aws_http_header_name name_enum, enum aws_http_header_block block_type, void *userdata) { return s_on_header(false /* is_push_promise */, stream_id, header, name_enum, block_type, userdata); } static struct aws_h2err s_on_headers_end( bool is_push_promise, uint32_t stream_id, bool malformed, enum aws_http_header_block block_type, void *userdata) { struct h2_decode_tester *decode_tester = userdata; struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(decode_tester); /* end() should report same block-type as i() calls */ if (!is_push_promise && aws_http_headers_count(frame->headers) > 0) { AWS_FATAL_ASSERT(frame->header_block_type == block_type); } frame->header_block_type = block_type; frame->headers_malformed = malformed; s_end_current_frame( decode_tester, is_push_promise ? AWS_H2_FRAME_T_PUSH_PROMISE : AWS_H2_FRAME_T_HEADERS, stream_id); return AWS_H2ERR_SUCCESS; } static struct aws_h2err s_decoder_on_headers_end( uint32_t stream_id, bool malformed, enum aws_http_header_block block_type, void *userdata) { return s_on_headers_end(false /*is_push_promise*/, stream_id, malformed, block_type, userdata); } static struct aws_h2err s_decoder_on_push_promise_begin( uint32_t stream_id, uint32_t promised_stream_id, void *userdata) { struct h2_decode_tester *decode_tester = userdata; struct h2_decoded_frame *frame; s_begin_new_frame(decode_tester, AWS_H2_FRAME_T_PUSH_PROMISE, stream_id, &frame /*out_frame*/); frame->promised_stream_id = promised_stream_id; return AWS_H2ERR_SUCCESS; } static struct aws_h2err s_decoder_on_push_promise_i( uint32_t stream_id, const struct aws_http_header *header, enum aws_http_header_name name_enum, void *userdata) { return s_on_header(true /* is_push_promise */, stream_id, header, name_enum, AWS_HTTP_HEADER_BLOCK_MAIN, userdata); } static struct aws_h2err s_decoder_on_push_promise_end(uint32_t stream_id, bool malformed, void *userdata) { return s_on_headers_end(true /*is_push_promise*/, stream_id, malformed, AWS_HTTP_HEADER_BLOCK_MAIN, userdata); } static struct aws_h2err s_decoder_on_data_begin( uint32_t stream_id, uint32_t payload_len, uint32_t total_padding_bytes, bool end_stream, void *userdata) { (void)total_padding_bytes; struct h2_decode_tester *decode_tester = userdata; struct h2_decoded_frame *frame; s_begin_new_frame(decode_tester, AWS_H2_FRAME_T_DATA, stream_id, &frame); frame->data_payload_len = payload_len; frame->data_end_stream = end_stream; return AWS_H2ERR_SUCCESS; } static struct aws_h2err s_decoder_on_data_i(uint32_t stream_id, struct aws_byte_cursor data, void *userdata) { (void)stream_id; struct h2_decode_tester *decode_tester = userdata; struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(decode_tester); /* Validate */ AWS_FATAL_ASSERT(AWS_H2_FRAME_T_DATA == frame->type); AWS_FATAL_ASSERT(!frame->finished); AWS_FATAL_ASSERT(frame->stream_id == stream_id); /* Stash data*/ AWS_FATAL_ASSERT(0 == aws_byte_buf_append_dynamic(&frame->data, &data)); return AWS_H2ERR_SUCCESS; } static struct aws_h2err s_decoder_on_data_end(uint32_t stream_id, void *userdata) { struct h2_decode_tester *decode_tester = userdata; s_end_current_frame(decode_tester, AWS_H2_FRAME_T_DATA, stream_id); struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(decode_tester); AWS_FATAL_ASSERT(frame->data.len <= frame->data_payload_len); return AWS_H2ERR_SUCCESS; } static struct aws_h2err s_decoder_on_end_stream(uint32_t stream_id, void *userdata) { struct h2_decode_tester *decode_tester = userdata; struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(decode_tester); /* Validate */ /* on_end_stream should fire IMMEDIATELY after on_data OR after on_headers_end. * This timing lets the user close the stream from a single callback */ AWS_FATAL_ASSERT(frame->finished); AWS_FATAL_ASSERT(frame->type == AWS_H2_FRAME_T_HEADERS || frame->type == AWS_H2_FRAME_T_DATA); AWS_FATAL_ASSERT(frame->stream_id == stream_id); if (frame->type == AWS_H2_FRAME_T_DATA) { AWS_FATAL_ASSERT(frame->data_end_stream); } /* Stash */ frame->end_stream = true; return AWS_H2ERR_SUCCESS; } static struct aws_h2err s_decoder_on_rst_stream(uint32_t stream_id, uint32_t error_code, void *userdata) { struct h2_decode_tester *decode_tester = userdata; struct h2_decoded_frame *frame; s_begin_new_frame(decode_tester, AWS_H2_FRAME_T_RST_STREAM, stream_id, &frame); /* Stash data*/ frame->error_code = error_code; s_end_current_frame(decode_tester, AWS_H2_FRAME_T_RST_STREAM, stream_id); return AWS_H2ERR_SUCCESS; } static struct aws_h2err s_decoder_on_settings( const struct aws_http2_setting *settings_array, size_t num_settings, void *userdata) { struct h2_decode_tester *decode_tester = userdata; struct h2_decoded_frame *frame; s_begin_new_frame(decode_tester, AWS_H2_FRAME_T_SETTINGS, 0, &frame); /* Stash setting */ for (size_t i = 0; i < num_settings; i++) { AWS_FATAL_ASSERT(0 == aws_array_list_push_back(&frame->settings, &settings_array[i])); } s_end_current_frame(decode_tester, AWS_H2_FRAME_T_SETTINGS, 0); return AWS_H2ERR_SUCCESS; } static struct aws_h2err s_decoder_on_settings_ack(void *userdata) { struct h2_decode_tester *decode_tester = userdata; struct h2_decoded_frame *frame; s_begin_new_frame(decode_tester, AWS_H2_FRAME_T_SETTINGS, 0 /*stream_id*/, &frame); /* Stash data*/ frame->ack = true; s_end_current_frame(decode_tester, AWS_H2_FRAME_T_SETTINGS, 0 /*stream_id*/); return AWS_H2ERR_SUCCESS; } static struct aws_h2err s_decoder_on_ping(uint8_t opaque_data[AWS_HTTP2_PING_DATA_SIZE], void *userdata) { struct h2_decode_tester *decode_tester = userdata; struct h2_decoded_frame *frame; s_begin_new_frame(decode_tester, AWS_H2_FRAME_T_PING, 0 /*stream_id*/, &frame); /* Stash data*/ memcpy(frame->ping_opaque_data, opaque_data, AWS_HTTP2_PING_DATA_SIZE); s_end_current_frame(decode_tester, AWS_H2_FRAME_T_PING, 0 /*stream_id*/); return AWS_H2ERR_SUCCESS; } static struct aws_h2err s_decoder_on_ping_ack(uint8_t opaque_data[AWS_HTTP2_PING_DATA_SIZE], void *userdata) { struct h2_decode_tester *decode_tester = userdata; struct h2_decoded_frame *frame; s_begin_new_frame(decode_tester, AWS_H2_FRAME_T_PING, 0 /*stream_id*/, &frame); /* Stash data*/ memcpy(frame->ping_opaque_data, opaque_data, AWS_HTTP2_PING_DATA_SIZE); frame->ack = true; s_end_current_frame(decode_tester, AWS_H2_FRAME_T_PING, 0 /*stream_id*/); return AWS_H2ERR_SUCCESS; } static struct aws_h2err s_decoder_on_goaway( uint32_t last_stream, uint32_t error_code, struct aws_byte_cursor debug_data, void *userdata) { struct h2_decode_tester *decode_tester = userdata; struct h2_decoded_frame *frame; s_begin_new_frame(decode_tester, AWS_H2_FRAME_T_GOAWAY, 0, &frame); frame->goaway_last_stream_id = last_stream; frame->error_code = error_code; /* Stash data */ AWS_FATAL_ASSERT(0 == aws_byte_buf_append_dynamic(&frame->data, &debug_data)); s_end_current_frame(decode_tester, AWS_H2_FRAME_T_GOAWAY, 0); return AWS_H2ERR_SUCCESS; } static struct aws_h2err s_decoder_on_window_update(uint32_t stream_id, uint32_t window_size_increment, void *userdata) { struct h2_decode_tester *decode_tester = userdata; struct h2_decoded_frame *frame; s_begin_new_frame(decode_tester, AWS_H2_FRAME_T_WINDOW_UPDATE, stream_id, &frame); frame->window_size_increment = window_size_increment; s_end_current_frame(decode_tester, AWS_H2_FRAME_T_WINDOW_UPDATE, stream_id); return AWS_H2ERR_SUCCESS; } static struct aws_h2_decoder_vtable s_decoder_vtable = { .on_headers_begin = s_decoder_on_headers_begin, .on_headers_i = s_decoder_on_headers_i, .on_headers_end = s_decoder_on_headers_end, .on_push_promise_begin = s_decoder_on_push_promise_begin, .on_push_promise_i = s_decoder_on_push_promise_i, .on_push_promise_end = s_decoder_on_push_promise_end, .on_data_begin = s_decoder_on_data_begin, .on_data_i = s_decoder_on_data_i, .on_data_end = s_decoder_on_data_end, .on_end_stream = s_decoder_on_end_stream, .on_rst_stream = s_decoder_on_rst_stream, .on_settings = s_decoder_on_settings, .on_settings_ack = s_decoder_on_settings_ack, .on_ping = s_decoder_on_ping, .on_ping_ack = s_decoder_on_ping_ack, .on_goaway = s_decoder_on_goaway, .on_window_update = s_decoder_on_window_update, }; int h2_decode_tester_init(struct h2_decode_tester *decode_tester, const struct h2_decode_tester_options *options) { AWS_ZERO_STRUCT(*decode_tester); decode_tester->alloc = options->alloc; struct aws_h2_decoder_params decoder_params = { .alloc = options->alloc, .vtable = &s_decoder_vtable, .userdata = decode_tester, .is_server = options->is_server, .skip_connection_preface = options->skip_connection_preface, }; decode_tester->decoder = aws_h2_decoder_new(&decoder_params); ASSERT_NOT_NULL(decode_tester->decoder); ASSERT_SUCCESS( aws_array_list_init_dynamic(&decode_tester->frames, options->alloc, 16, sizeof(struct h2_decoded_frame))); return AWS_OP_SUCCESS; } void h2_decode_tester_clean_up(struct h2_decode_tester *decode_tester) { aws_h2_decoder_destroy(decode_tester->decoder); for (size_t i = 0; i < aws_array_list_length(&decode_tester->frames); ++i) { struct h2_decoded_frame *frame; aws_array_list_get_at_ptr(&decode_tester->frames, (void **)&frame, i); s_frame_clean_up(frame); } aws_array_list_clean_up(&decode_tester->frames); AWS_ZERO_STRUCT(*decode_tester); } /******************************************************************************* * h2_fake_peer ******************************************************************************/ int h2_fake_peer_init(struct h2_fake_peer *peer, const struct h2_fake_peer_options *options) { AWS_ZERO_STRUCT(*peer); peer->alloc = options->alloc; peer->testing_channel = options->testing_channel; peer->is_server = options->is_server; ASSERT_SUCCESS(aws_h2_frame_encoder_init(&peer->encoder, peer->alloc, NULL /*logging_id*/)); struct h2_decode_tester_options decode_options = {.alloc = options->alloc, .is_server = options->is_server}; ASSERT_SUCCESS(h2_decode_tester_init(&peer->decode, &decode_options)); return AWS_OP_SUCCESS; } void h2_fake_peer_clean_up(struct h2_fake_peer *peer) { if (!peer) { return; } aws_h2_frame_encoder_clean_up(&peer->encoder); h2_decode_tester_clean_up(&peer->decode); AWS_ZERO_STRUCT(peer); } int h2_fake_peer_decode_messages_from_testing_channel(struct h2_fake_peer *peer) { struct aws_byte_buf msg_buf; ASSERT_SUCCESS(aws_byte_buf_init(&msg_buf, peer->alloc, 128)); ASSERT_SUCCESS(testing_channel_drain_written_messages(peer->testing_channel, &msg_buf)); struct aws_byte_cursor msg_cursor = aws_byte_cursor_from_buf(&msg_buf); ASSERT_H2ERR_SUCCESS(aws_h2_decode(peer->decode.decoder, &msg_cursor)); ASSERT_UINT_EQUALS(0, msg_cursor.len); aws_byte_buf_clean_up(&msg_buf); return AWS_OP_SUCCESS; } int h2_fake_peer_send_frame(struct h2_fake_peer *peer, struct aws_h2_frame *frame) { ASSERT_NOT_NULL(frame); bool frame_complete = false; while (!frame_complete) { struct aws_io_message *msg = aws_channel_acquire_message_from_pool( peer->testing_channel->channel, AWS_IO_MESSAGE_APPLICATION_DATA, g_aws_channel_max_fragment_size); ASSERT_NOT_NULL(msg); ASSERT_SUCCESS(aws_h2_encode_frame(&peer->encoder, frame, &msg->message_data, &frame_complete)); ASSERT_TRUE(msg->message_data.len != 0); ASSERT_SUCCESS(testing_channel_push_read_message(peer->testing_channel, msg)); } aws_h2_frame_destroy(frame); return AWS_OP_SUCCESS; } int h2_fake_peer_send_data_frame( struct h2_fake_peer *peer, uint32_t stream_id, struct aws_byte_cursor data, bool end_stream) { return h2_fake_peer_send_data_frame_with_padding_length(peer, stream_id, data, end_stream, 0); } int h2_fake_peer_send_data_frame_with_padding_length( struct h2_fake_peer *peer, uint32_t stream_id, struct aws_byte_cursor data, bool end_stream, uint8_t padding_length) { struct aws_input_stream *body_stream = aws_input_stream_new_from_cursor(peer->alloc, &data); ASSERT_NOT_NULL(body_stream); struct aws_io_message *msg = aws_channel_acquire_message_from_pool( peer->testing_channel->channel, AWS_IO_MESSAGE_APPLICATION_DATA, g_aws_channel_max_fragment_size); ASSERT_NOT_NULL(msg); bool body_complete; bool body_stalled; int32_t stream_window_size_peer = AWS_H2_WINDOW_UPDATE_MAX; size_t connection_window_size_peer = AWS_H2_WINDOW_UPDATE_MAX; ASSERT_SUCCESS(aws_h2_encode_data_frame( &peer->encoder, stream_id, body_stream, end_stream, padding_length /*pad_length*/, &stream_window_size_peer, &connection_window_size_peer, &msg->message_data, &body_complete, &body_stalled)); ASSERT_TRUE(body_complete); ASSERT_FALSE(body_stalled); ASSERT_TRUE(msg->message_data.len != 0); ASSERT_SUCCESS(testing_channel_push_read_message(peer->testing_channel, msg)); aws_input_stream_release(body_stream); return AWS_OP_SUCCESS; } int h2_fake_peer_send_data_frame_str(struct h2_fake_peer *peer, uint32_t stream_id, const char *data, bool end_stream) { return h2_fake_peer_send_data_frame(peer, stream_id, aws_byte_cursor_from_c_str(data), end_stream); } int h2_fake_peer_send_connection_preface(struct h2_fake_peer *peer, struct aws_h2_frame *settings) { if (!peer->is_server) { /* Client must first send magic string */ ASSERT_SUCCESS(testing_channel_push_read_data(peer->testing_channel, aws_h2_connection_preface_client_string)); } /* Both server and client send SETTINGS as first proper frame */ ASSERT_SUCCESS(h2_fake_peer_send_frame(peer, settings)); return AWS_OP_SUCCESS; } int h2_fake_peer_send_connection_preface_default_settings(struct h2_fake_peer *peer) { /* Empty SETTINGS frame means "everything default" */ struct aws_h2_frame *settings = aws_h2_frame_new_settings(peer->alloc, NULL, 0, false /*ack*/); ASSERT_NOT_NULL(settings); ASSERT_SUCCESS(h2_fake_peer_send_connection_preface(peer, settings)); return AWS_OP_SUCCESS; } /******************************************************************************/ struct aws_input_stream_tester { struct aws_input_stream base; struct aws_allocator *allocator; /* aws_input_stream_byte_cursor provides our actual functionality */ struct aws_input_stream *cursor_stream; size_t max_bytes_per_read; bool is_reading_broken; }; static int s_aws_input_stream_tester_seek( struct aws_input_stream *stream, int64_t offset, enum aws_stream_seek_basis basis) { struct aws_input_stream_tester *impl = AWS_CONTAINER_OF(stream, struct aws_input_stream_tester, base); return aws_input_stream_seek(impl->cursor_stream, offset, basis); } static int s_aws_input_stream_tester_read(struct aws_input_stream *stream, struct aws_byte_buf *dest) { struct aws_input_stream_tester *impl = AWS_CONTAINER_OF(stream, struct aws_input_stream_tester, base); if (impl->is_reading_broken) { return aws_raise_error(AWS_IO_STREAM_READ_FAILED); } /* prevent more than max_bytes_per_read by temporarily limiting the buffer's capacity */ size_t prev_capacity = dest->capacity; size_t max_capacity = aws_add_size_saturating(dest->len, impl->max_bytes_per_read); dest->capacity = aws_min_size(prev_capacity, max_capacity); int err = aws_input_stream_read(impl->cursor_stream, dest); dest->capacity = prev_capacity; return err; } static int s_aws_input_stream_tester_get_status(struct aws_input_stream *stream, struct aws_stream_status *status) { struct aws_input_stream_tester *impl = AWS_CONTAINER_OF(stream, struct aws_input_stream_tester, base); return aws_input_stream_get_status(impl->cursor_stream, status); } static int s_aws_input_stream_tester_get_length(struct aws_input_stream *stream, int64_t *out_length) { struct aws_input_stream_tester *impl = AWS_CONTAINER_OF(stream, struct aws_input_stream_tester, base); return aws_input_stream_get_length(impl->cursor_stream, out_length); } static void s_aws_input_stream_tester_destroy(struct aws_input_stream_tester *impl) { aws_input_stream_release(impl->cursor_stream); aws_mem_release(impl->allocator, impl); } static struct aws_input_stream_vtable s_aws_input_stream_tester_vtable = { .seek = s_aws_input_stream_tester_seek, .read = s_aws_input_stream_tester_read, .get_status = s_aws_input_stream_tester_get_status, .get_length = s_aws_input_stream_tester_get_length, }; struct aws_input_stream *aws_input_stream_new_tester(struct aws_allocator *alloc, struct aws_byte_cursor cursor) { struct aws_input_stream_tester *impl = aws_mem_calloc(alloc, 1, sizeof(struct aws_input_stream_tester)); AWS_FATAL_ASSERT(impl); impl->max_bytes_per_read = SIZE_MAX; impl->cursor_stream = aws_input_stream_new_from_cursor(alloc, &cursor); AWS_FATAL_ASSERT(impl->cursor_stream); impl->allocator = alloc; impl->base.vtable = &s_aws_input_stream_tester_vtable; aws_ref_count_init( &impl->base.ref_count, impl, (aws_simple_completion_callback *)s_aws_input_stream_tester_destroy); return &impl->base; } void aws_input_stream_tester_set_max_bytes_per_read(struct aws_input_stream *input_stream, size_t max_bytes) { struct aws_input_stream_tester *impl = AWS_CONTAINER_OF(input_stream, struct aws_input_stream_tester, base); impl->max_bytes_per_read = max_bytes; } void aws_input_stream_tester_set_reading_broken(struct aws_input_stream *input_stream, bool is_broken) { struct aws_input_stream_tester *impl = AWS_CONTAINER_OF(input_stream, struct aws_input_stream_tester, base); impl->is_reading_broken = is_broken; } struct aws_input_stream_tester_upload_impl { struct aws_input_stream base; size_t position; size_t length; size_t num_sentence_sent; struct aws_allocator *allocator; }; static int s_aws_input_stream_tester_upload_seek( struct aws_input_stream *stream, int64_t offset, enum aws_stream_seek_basis basis) { (void)stream; (void)offset; (void)basis; /* Stream should never be seeked; all reads should be sequential. */ aws_raise_error(AWS_ERROR_UNKNOWN); return AWS_OP_ERR; } const struct aws_byte_cursor s_test_string = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("This is CRT HTTP test."); static int s_aws_input_stream_tester_upload_read(struct aws_input_stream *stream, struct aws_byte_buf *dest) { (void)stream; (void)dest; struct aws_input_stream_tester_upload_impl *test_input_stream = AWS_CONTAINER_OF(stream, struct aws_input_stream_tester_upload_impl, base); while (dest->len < dest->capacity && test_input_stream->length - test_input_stream->position > 0) { size_t buffer_pos = test_input_stream->position % s_test_string.len; struct aws_byte_cursor source_byte_cursor = { .len = s_test_string.len - buffer_pos, .ptr = s_test_string.ptr + buffer_pos, }; size_t remaining_in_buffer = aws_min_size(dest->capacity - dest->len, test_input_stream->length - test_input_stream->position); if (remaining_in_buffer < source_byte_cursor.len) { source_byte_cursor.len = remaining_in_buffer; } aws_byte_buf_append(dest, &source_byte_cursor); buffer_pos += source_byte_cursor.len; test_input_stream->position += source_byte_cursor.len; } return AWS_OP_SUCCESS; } static int s_aws_input_stream_tester_upload_get_status( struct aws_input_stream *stream, struct aws_stream_status *status) { (void)stream; (void)status; struct aws_input_stream_tester_upload_impl *test_input_stream = AWS_CONTAINER_OF(stream, struct aws_input_stream_tester_upload_impl, base); status->is_end_of_stream = test_input_stream->position == test_input_stream->length; status->is_valid = true; return AWS_OP_SUCCESS; } static int s_aws_input_stream_tester_upload_get_length(struct aws_input_stream *stream, int64_t *out_length) { AWS_ASSERT(stream != NULL); struct aws_input_stream_tester_upload_impl *test_input_stream = AWS_CONTAINER_OF(stream, struct aws_input_stream_tester_upload_impl, base); *out_length = (int64_t)test_input_stream->length; return AWS_OP_SUCCESS; } static void s_aws_input_stream_tester_upload_destroy(struct aws_input_stream_tester_upload_impl *test_input_stream) { aws_mem_release(test_input_stream->allocator, test_input_stream); } static struct aws_input_stream_vtable s_aws_input_stream_tester_upload_vtable = { .seek = s_aws_input_stream_tester_upload_seek, .read = s_aws_input_stream_tester_upload_read, .get_status = s_aws_input_stream_tester_upload_get_status, .get_length = s_aws_input_stream_tester_upload_get_length, }; struct aws_input_stream *aws_input_stream_tester_upload_new(struct aws_allocator *alloc, size_t length) { struct aws_input_stream_tester_upload_impl *test_input_stream = aws_mem_calloc(alloc, 1, sizeof(struct aws_input_stream_tester_upload_impl)); test_input_stream->base.vtable = &s_aws_input_stream_tester_upload_vtable; aws_ref_count_init( &test_input_stream->base.ref_count, test_input_stream, (aws_simple_completion_callback *)s_aws_input_stream_tester_upload_destroy); struct aws_input_stream *input_stream = &test_input_stream->base; test_input_stream->position = 0; test_input_stream->length = length; test_input_stream->allocator = alloc; test_input_stream->num_sentence_sent = length / s_test_string.len; return input_stream; } size_t aws_input_stream_tester_upload_get_num_sentence_sent(struct aws_input_stream *stream) { struct aws_input_stream_tester_upload_impl *test_input_stream = AWS_CONTAINER_OF(stream, struct aws_input_stream_tester_upload_impl, base); return test_input_stream->num_sentence_sent; } aws-crt-python-0.20.4+dfsg/crt/aws-c-http/tests/h2_test_helper.h000066400000000000000000000321021456575232400244270ustar00rootroot00000000000000#ifndef AWS_HTTP_H2_TEST_HELPER_H #define AWS_HTTP_H2_TEST_HELPER_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include struct aws_input_stream; #define ASSERT_H2ERR_SUCCESS(condition, ...) \ do { \ struct aws_h2err assert_rv = (condition); \ if (!aws_h2err_success(assert_rv)) { \ if (!PRINT_FAIL_INTERNAL0(__VA_ARGS__)) { \ PRINT_FAIL_INTERNAL0( \ "Expected success at %s; got aws_h2err{%s, %s}\n", \ #condition, \ aws_http2_error_code_to_str(assert_rv.h2_code), \ aws_error_name(assert_rv.aws_code)); \ } \ POSTFAIL_INTERNAL(); \ } \ } while (0) #define ASSERT_H2ERR_FAILS(condition, ...) \ do { \ struct aws_h2err assert_rv = (condition); \ if (!aws_h2err_failed(assert_rv)) { \ if (!PRINT_FAIL_INTERNAL0(__VA_ARGS__)) { \ PRINT_FAIL_INTERNAL0("Expected failure at %s; got AWS_H2ERR_SUCCESS\n", #condition); \ } \ POSTFAIL_INTERNAL(); \ } \ } while (0) #define ASSERT_H2ERR_ERROR(h2_error, condition, ...) \ do { \ struct aws_h2err assert_rv = (condition); \ if (!aws_h2err_failed(assert_rv)) { \ if (!PRINT_FAIL_INTERNAL0(__VA_ARGS__)) { \ PRINT_FAIL_INTERNAL0( \ "Expected %s failure at %s; got AWS_H2ERR_SUCCESS\n", \ aws_http2_error_code_to_str(h2_error), \ #condition); \ } \ POSTFAIL_INTERNAL(); \ } \ if (assert_rv.h2_code != h2_error) { \ PRINT_FAIL_INTERNAL0( \ "Expected %s failure at %s; got aws_h2err{%s, %s}\n", \ aws_http2_error_code_to_str(h2_error), \ #condition, \ aws_http2_error_code_to_str(assert_rv.h2_code), \ aws_error_name(assert_rv.aws_code)); \ } \ } while (0) /** * Information gathered about a given frame from decoder callbacks. * These aren't 1:1 with literal H2 frames: * - The decoder hides the existence of CONTINUATION frames, * their data continues the preceding HEADERS or PUSH_PROMISE frame. * * - A DATA frame could appear as N on_data callbacks. * * - The on_end_stream callback fires after all other callbacks for that frame, * so we count it as part of the preceding "finished" frame. */ struct h2_decoded_frame { /* If true, we expect no further callbacks regarding this frame */ bool finished; enum aws_h2_frame_type type; /* All frame types have this */ uint32_t stream_id; /* All frame types have this */ /* * Everything else is only found in certain frame types */ bool end_stream; /* HEADERS and DATA might have this */ bool ack; /* PING and SETTINGS might have this */ uint32_t error_code; /* RST_STREAM and GOAWAY have this */ uint32_t promised_stream_id; /* PUSH_PROMISE has this */ uint32_t goaway_last_stream_id; /* GOAWAY has this */ uint8_t ping_opaque_data[AWS_HTTP2_PING_DATA_SIZE]; /* PING has this */ uint32_t window_size_increment; /* WINDOW_UPDATE has this */ struct aws_http_headers *headers; /* HEADERS and PUSH_PROMISE have this */ bool headers_malformed; /* HEADERS and PUSH_PROMISE have this */ enum aws_http_header_block header_block_type; /* HEADERS have this */ struct aws_array_list settings; /* contains aws_http2_setting, SETTINGS has this */ struct aws_byte_buf data; /* DATA and GOAWAY have this */ uint32_t data_payload_len; /* DATA has this */ bool data_end_stream; /* DATA has this */ }; /** * Check that: * - frame finished (ex: if HEADERS frame, then on_headers_end() fired) * - frame was in fact using the expected type and stream_id. */ int h2_decoded_frame_check_finished( const struct h2_decoded_frame *frame, enum aws_h2_frame_type expected_type, uint32_t expected_stream_id); /******************************************************************************/ /** * Translates decoder callbacks into an array-list of h2_decoded_frames. */ struct h2_decode_tester { struct aws_allocator *alloc; struct aws_h2_decoder *decoder; struct aws_array_list frames; /* contains h2_decoded_frame */ }; struct h2_decode_tester_options { struct aws_allocator *alloc; bool is_server; bool skip_connection_preface; }; int h2_decode_tester_init(struct h2_decode_tester *decode_tester, const struct h2_decode_tester_options *options); void h2_decode_tester_clean_up(struct h2_decode_tester *decode_tester); size_t h2_decode_tester_frame_count(const struct h2_decode_tester *decode_tester); struct h2_decoded_frame *h2_decode_tester_get_frame(const struct h2_decode_tester *decode_tester, size_t i); struct h2_decoded_frame *h2_decode_tester_latest_frame(const struct h2_decode_tester *decode_tester); /** * Search for frame of a given type, starting at specified index. * To search for the next frame, pass search_start_idx = prev_idx + 1 */ struct h2_decoded_frame *h2_decode_tester_find_frame( const struct h2_decode_tester *decode_tester, enum aws_h2_frame_type type, size_t search_start_idx, size_t *out_idx); /** * Search for frame of a given stream-id, starting at specified index. * To search for the next frame, pass search_start_idx = prev_idx + 1 */ struct h2_decoded_frame *h2_decode_tester_find_stream_frame_any_type( const struct h2_decode_tester *decode_tester, uint32_t stream_id, size_t search_start_idx, size_t *out_idx); /** * Search for frame of a given type and stream-id, starting at specified index. * To search for the next frame, pass search_start_idx = prev_idx + 1 */ struct h2_decoded_frame *h2_decode_tester_find_stream_frame( const struct h2_decode_tester *decode_tester, enum aws_h2_frame_type type, uint32_t stream_id, size_t search_start_idx, size_t *out_idx); /** * Compare data (which may be split across N frames) against expected */ int h2_decode_tester_check_data_across_frames( const struct h2_decode_tester *decode_tester, uint32_t stream_id, struct aws_byte_cursor expected, bool expect_end_stream); /** * Compare data (which may be split across N frames) against expected */ int h2_decode_tester_check_data_str_across_frames( const struct h2_decode_tester *decode_tester, uint32_t stream_id, const char *expected, bool expect_end_stream); /******************************************************************************/ /** * Fake HTTP/2 peer. * Can decode H2 frames that are are written to the testing channel. * Can encode H2 frames and push it into the channel in the read direction. */ struct h2_fake_peer { struct aws_allocator *alloc; struct testing_channel *testing_channel; struct aws_h2_frame_encoder encoder; struct h2_decode_tester decode; bool is_server; }; struct h2_fake_peer_options { struct aws_allocator *alloc; struct testing_channel *testing_channel; bool is_server; }; int h2_fake_peer_init(struct h2_fake_peer *peer, const struct h2_fake_peer_options *options); void h2_fake_peer_clean_up(struct h2_fake_peer *peer); /** * Pop all written messages off the testing-channel and run them through the peer's decode-tester */ int h2_fake_peer_decode_messages_from_testing_channel(struct h2_fake_peer *peer); /** * Encode frame and push it into the testing-channel in the read-direction. * Takes ownership of frame and destroys after sending. */ int h2_fake_peer_send_frame(struct h2_fake_peer *peer, struct aws_h2_frame *frame); /** * Encode the entire byte cursor into a single DATA frame. * Fails if the cursor is too large for this to work. */ int h2_fake_peer_send_data_frame( struct h2_fake_peer *peer, uint32_t stream_id, struct aws_byte_cursor data, bool end_stream); /** * Encode the entire byte cursor into a single DATA frame. * Fails if the cursor is too large for this to work. */ int h2_fake_peer_send_data_frame_with_padding_length( struct h2_fake_peer *peer, uint32_t stream_id, struct aws_byte_cursor data, bool end_stream, uint8_t padding_length); /** * Encode the entire string into a single DATA frame. * Fails if the string is too large for this to work. */ int h2_fake_peer_send_data_frame_str(struct h2_fake_peer *peer, uint32_t stream_id, const char *data, bool end_stream); /** * Peer sends the connection preface with specified settings. * Takes ownership of frame and destroys after sending */ int h2_fake_peer_send_connection_preface(struct h2_fake_peer *peer, struct aws_h2_frame *settings); /** * Peer sends the connection preface with default settings. */ int h2_fake_peer_send_connection_preface_default_settings(struct h2_fake_peer *peer); /******************************************************************************/ /** * Create input stream that can do weird stuff in tests */ struct aws_input_stream *aws_input_stream_new_tester(struct aws_allocator *alloc, struct aws_byte_cursor cursor); void aws_input_stream_tester_set_max_bytes_per_read(struct aws_input_stream *input_stream, size_t max_bytes); void aws_input_stream_tester_set_reading_broken(struct aws_input_stream *input_stream, bool is_broken); /** * Create input stream that can upload a certain length of stuff */ struct aws_input_stream *aws_input_stream_tester_upload_new(struct aws_allocator *alloc, size_t length); size_t aws_input_stream_tester_upload_get_num_sentence_sent(struct aws_input_stream *stream); #endif /* AWS_HTTP_H2_TEST_HELPER_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-http/tests/proxy_test_helper.c000066400000000000000000000450171456575232400253030ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "proxy_test_helper.h" enum { TESTER_TIMEOUT_SEC = 60, /* Give enough time for non-sudo users to enter password */ }; struct testing_channel_bootstrap_wrapper { struct testing_channel *channel; struct aws_http_client_bootstrap *bootstrap; }; static struct testing_channel_bootstrap_wrapper *s_get_current_channel_bootstrap_wrapper(struct proxy_tester *tester) { struct testing_channel_bootstrap_wrapper *wrapper = NULL; size_t count = aws_array_list_length(&tester->testing_channels); aws_array_list_get_at_ptr(&tester->testing_channels, (void **)&wrapper, count - 1); return wrapper; } void proxy_tester_on_client_connection_setup(struct aws_http_connection *connection, int error_code, void *user_data) { struct proxy_tester *tester = user_data; AWS_FATAL_ASSERT(aws_mutex_lock(&tester->wait_lock) == AWS_OP_SUCCESS); tester->client_connection_is_setup = true; if (error_code) { tester->client_connection = NULL; tester->wait_result = error_code; goto done; } tester->client_connection = connection; done: AWS_FATAL_ASSERT(aws_mutex_unlock(&tester->wait_lock) == AWS_OP_SUCCESS); aws_condition_variable_notify_one(&tester->wait_cvar); } void proxy_tester_on_client_connection_shutdown( struct aws_http_connection *connection, int error_code, void *user_data) { (void)connection; (void)error_code; struct proxy_tester *tester = user_data; AWS_FATAL_ASSERT(aws_mutex_lock(&tester->wait_lock) == AWS_OP_SUCCESS); tester->client_connection_is_shutdown = true; AWS_FATAL_ASSERT(aws_mutex_unlock(&tester->wait_lock) == AWS_OP_SUCCESS); aws_condition_variable_notify_one(&tester->wait_cvar); } int proxy_tester_wait(struct proxy_tester *tester, bool (*pred)(void *user_data)) { ASSERT_SUCCESS(aws_mutex_lock(&tester->wait_lock)); ASSERT_SUCCESS(aws_condition_variable_wait_pred(&tester->wait_cvar, &tester->wait_lock, pred, tester)); ASSERT_SUCCESS(aws_mutex_unlock(&tester->wait_lock)); return AWS_OP_SUCCESS; } bool proxy_tester_connection_setup_pred(void *user_data) { struct proxy_tester *tester = user_data; return tester->wait_result || tester->client_connection; } bool proxy_tester_connection_complete_pred(void *user_data) { struct proxy_tester *tester = user_data; return tester->client_connection_is_setup; } bool proxy_tester_connection_shutdown_pred(void *user_data) { struct proxy_tester *tester = user_data; return tester->wait_result || tester->client_connection_is_shutdown; } bool proxy_tester_request_complete_pred_fn(void *user_data) { struct proxy_tester *tester = user_data; return tester->request_complete || tester->client_connection_is_shutdown; } int proxy_tester_init(struct proxy_tester *tester, const struct proxy_tester_options *options) { AWS_ZERO_STRUCT(*tester); tester->alloc = options->alloc; aws_http_library_init(options->alloc); ASSERT_SUCCESS(aws_array_list_init_dynamic( &tester->testing_channels, options->alloc, 1, sizeof(struct testing_channel_bootstrap_wrapper))); tester->host = options->host; tester->port = options->port; tester->proxy_options = *options->proxy_options; tester->test_mode = options->test_mode; tester->failure_type = options->failure_type; ASSERT_SUCCESS(aws_byte_buf_init(&tester->connection_host_name, tester->alloc, 128)); ASSERT_SUCCESS(aws_mutex_init(&tester->wait_lock)); ASSERT_SUCCESS(aws_condition_variable_init(&tester->wait_cvar)); ASSERT_SUCCESS( aws_array_list_init_dynamic(&tester->connect_requests, tester->alloc, 1, sizeof(struct aws_http_message *))); uint32_t connect_response_count = 1; if (options->desired_connect_response_count > connect_response_count) { connect_response_count = options->desired_connect_response_count; } ASSERT_SUCCESS(aws_array_list_init_dynamic( &tester->desired_connect_responses, tester->alloc, connect_response_count, sizeof(struct aws_string *))); for (size_t i = 0; i < options->desired_connect_response_count; ++i) { struct aws_byte_cursor response_cursor = options->desired_connect_responses[i]; struct aws_string *response = aws_string_new_from_cursor(tester->alloc, &response_cursor); ASSERT_SUCCESS(aws_array_list_push_back(&tester->desired_connect_responses, &response)); } tester->event_loop_group = aws_event_loop_group_new_default(tester->alloc, 1, NULL); struct aws_host_resolver_default_options resolver_options = { .el_group = tester->event_loop_group, .max_entries = 8, }; tester->host_resolver = aws_host_resolver_new_default(tester->alloc, &resolver_options); struct aws_socket_options socket_options = { .type = AWS_SOCKET_STREAM, .domain = AWS_SOCKET_IPV4, .connect_timeout_ms = (uint32_t)aws_timestamp_convert(TESTER_TIMEOUT_SEC, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_MILLIS, NULL), }; struct aws_client_bootstrap_options bootstrap_options = { .event_loop_group = tester->event_loop_group, .host_resolver = tester->host_resolver, }; tester->client_bootstrap = aws_client_bootstrap_new(tester->alloc, &bootstrap_options); ASSERT_NOT_NULL(tester->client_bootstrap); bool use_tls = options->test_mode == PTTM_HTTPS_TUNNEL; if (use_tls) { aws_tls_ctx_options_init_default_client(&tester->tls_ctx_options, tester->alloc); aws_tls_ctx_options_set_alpn_list(&tester->tls_ctx_options, "http/1.1"); tester->tls_ctx_options.verify_peer = false; tester->tls_ctx = aws_tls_client_ctx_new(tester->alloc, &tester->tls_ctx_options); aws_tls_connection_options_init_from_ctx(&tester->tls_connection_options, tester->tls_ctx); aws_tls_connection_options_set_server_name(&tester->tls_connection_options, tester->alloc, &tester->host); } /* Connect */ struct aws_http_client_connection_options client_options = AWS_HTTP_CLIENT_CONNECTION_OPTIONS_INIT; client_options.allocator = tester->alloc; client_options.bootstrap = tester->client_bootstrap; client_options.host_name = tester->host; client_options.port = tester->port; client_options.socket_options = &socket_options; client_options.tls_options = use_tls ? &tester->tls_connection_options : NULL; client_options.user_data = tester; client_options.on_setup = proxy_tester_on_client_connection_setup; client_options.on_shutdown = proxy_tester_on_client_connection_shutdown; if (options->proxy_options) { client_options.proxy_options = options->proxy_options; } aws_http_client_connect(&client_options); /* Wait for server & client connections to finish setup */ ASSERT_SUCCESS(proxy_tester_wait(tester, proxy_tester_connection_setup_pred)); return AWS_OP_SUCCESS; } int proxy_tester_clean_up(struct proxy_tester *tester) { if (tester->client_connection) { aws_http_connection_release(tester->client_connection); } size_t channel_count = aws_array_list_length(&tester->testing_channels); for (size_t i = 0; i < channel_count; ++i) { struct testing_channel_bootstrap_wrapper wrapper; aws_array_list_get_at(&tester->testing_channels, &wrapper, i); struct testing_channel *channel = wrapper.channel; if (channel) { ASSERT_SUCCESS(testing_channel_clean_up(channel)); while (!testing_channel_is_shutdown_completed(channel)) { aws_thread_current_sleep(1000000000); } aws_mem_release(tester->alloc, channel); } } ASSERT_SUCCESS(proxy_tester_wait(tester, proxy_tester_connection_shutdown_pred)); for (size_t i = 0; i < channel_count; ++i) { struct testing_channel_bootstrap_wrapper wrapper; aws_array_list_get_at(&tester->testing_channels, &wrapper, i); if (wrapper.bootstrap != NULL) { if (channel_count == 0 && wrapper.bootstrap->user_data) { aws_http_proxy_user_data_destroy(wrapper.bootstrap->user_data); } if (i + 1 < channel_count) { wrapper.bootstrap->on_shutdown(tester->client_connection, 0, wrapper.bootstrap->user_data); } aws_http_client_bootstrap_destroy(wrapper.bootstrap); } } aws_array_list_clean_up(&tester->testing_channels); aws_client_bootstrap_release(tester->client_bootstrap); aws_host_resolver_release(tester->host_resolver); aws_event_loop_group_release(tester->event_loop_group); if (tester->tls_ctx) { aws_tls_connection_options_clean_up(&tester->tls_connection_options); aws_tls_ctx_release(tester->tls_ctx); aws_tls_ctx_options_clean_up(&tester->tls_ctx_options); } size_t connect_request_count = aws_array_list_length(&tester->connect_requests); for (size_t i = 0; i < connect_request_count; ++i) { struct aws_http_message *request = NULL; aws_array_list_get_at(&tester->connect_requests, &request, i); aws_http_message_release(request); } aws_array_list_clean_up(&tester->connect_requests); size_t connect_response_count = aws_array_list_length(&tester->desired_connect_responses); for (size_t i = 0; i < connect_response_count; ++i) { struct aws_string *response = NULL; aws_array_list_get_at(&tester->desired_connect_responses, &response, i); aws_string_destroy(response); } aws_array_list_clean_up(&tester->desired_connect_responses); aws_http_library_clean_up(); aws_byte_buf_clean_up(&tester->connection_host_name); return AWS_OP_SUCCESS; } static void s_testing_channel_shutdown_callback(int error_code, void *user_data) { struct proxy_tester *tester = user_data; if (tester->wait_result == AWS_ERROR_SUCCESS) { tester->wait_result = error_code; } struct testing_channel_bootstrap_wrapper *wrapper = s_get_current_channel_bootstrap_wrapper(tester); wrapper->bootstrap->on_shutdown(tester->client_connection, tester->wait_result, wrapper->bootstrap->user_data); } int proxy_tester_create_testing_channel_connection( struct proxy_tester *tester, struct aws_http_client_bootstrap *http_bootstrap) { struct testing_channel_bootstrap_wrapper *old_wrapper = s_get_current_channel_bootstrap_wrapper(tester); if (old_wrapper != NULL) { old_wrapper->channel->channel_shutdown = NULL; } struct testing_channel *testing_channel = aws_mem_calloc(tester->alloc, 1, sizeof(struct testing_channel)); struct aws_testing_channel_options test_channel_options = {.clock_fn = aws_high_res_clock_get_ticks}; ASSERT_SUCCESS(testing_channel_init(testing_channel, tester->alloc, &test_channel_options)); testing_channel->channel_shutdown = s_testing_channel_shutdown_callback; testing_channel->channel_shutdown_user_data = tester; /* Use small window so that we can observe it opening in tests. * Channel may wait until the window is small before issuing the increment command. */ struct aws_http1_connection_options http1_options; AWS_ZERO_STRUCT(http1_options); struct aws_http_connection *connection = aws_http_connection_new_http1_1_client(tester->alloc, true, 256, &http1_options); ASSERT_NOT_NULL(connection); connection->user_data = http_bootstrap->user_data; connection->client_data = &connection->client_or_server_data.client; connection->proxy_request_transform = http_bootstrap->proxy_request_transform; struct aws_channel_slot *slot = aws_channel_slot_new(testing_channel->channel); ASSERT_NOT_NULL(slot); ASSERT_SUCCESS(aws_channel_slot_insert_end(testing_channel->channel, slot)); ASSERT_SUCCESS(aws_channel_slot_set_handler(slot, &connection->channel_handler)); connection->vtable->on_channel_handler_installed(&connection->channel_handler, slot); testing_channel_drain_queued_tasks(testing_channel); tester->client_connection = connection; struct testing_channel_bootstrap_wrapper wrapper; wrapper.channel = testing_channel; wrapper.bootstrap = http_bootstrap; aws_array_list_push_back(&tester->testing_channels, &wrapper); return AWS_OP_SUCCESS; } bool s_line_feed_predicate(uint8_t value) { return value == '\r'; } /* * A very crude, sloppy http request parser that does just enough to test what we want to test */ static int s_record_connect_request(struct aws_byte_buf *request_buffer, struct proxy_tester *tester) { struct aws_byte_cursor request_cursor = aws_byte_cursor_from_buf(request_buffer); struct aws_array_list lines; ASSERT_SUCCESS(aws_array_list_init_dynamic(&lines, tester->alloc, 10, sizeof(struct aws_byte_cursor))); aws_byte_cursor_split_on_char(&request_cursor, '\n', &lines); size_t line_count = aws_array_list_length(&lines); ASSERT_TRUE(line_count > 1); struct aws_http_message *message = aws_http_message_new_request(tester->alloc); struct aws_byte_cursor first_line_cursor; AWS_ZERO_STRUCT(first_line_cursor); aws_array_list_get_at(&lines, &first_line_cursor, 0); first_line_cursor = aws_byte_cursor_trim_pred(&first_line_cursor, s_line_feed_predicate); struct aws_byte_cursor method_cursor; AWS_ZERO_STRUCT(method_cursor); aws_byte_cursor_next_split(&first_line_cursor, ' ', &method_cursor); aws_http_message_set_request_method(message, method_cursor); aws_byte_cursor_advance(&first_line_cursor, method_cursor.len + 1); struct aws_byte_cursor uri_cursor; AWS_ZERO_STRUCT(uri_cursor); aws_byte_cursor_next_split(&first_line_cursor, ' ', &uri_cursor); aws_http_message_set_request_path(message, uri_cursor); for (size_t i = 1; i < line_count; ++i) { struct aws_byte_cursor line_cursor; AWS_ZERO_STRUCT(line_cursor); aws_array_list_get_at(&lines, &line_cursor, i); line_cursor = aws_byte_cursor_trim_pred(&line_cursor, s_line_feed_predicate); if (line_cursor.len == 0) { break; } struct aws_byte_cursor name_cursor; AWS_ZERO_STRUCT(name_cursor); aws_byte_cursor_next_split(&line_cursor, ':', &name_cursor); aws_byte_cursor_advance(&line_cursor, name_cursor.len + 1); line_cursor = aws_byte_cursor_trim_pred(&line_cursor, aws_isspace); struct aws_http_header header = { .name = name_cursor, .value = line_cursor, }; aws_http_message_add_header(message, header); } /* we don't care about the body */ aws_array_list_push_back(&tester->connect_requests, &message); aws_array_list_clean_up(&lines); return AWS_OP_SUCCESS; } int proxy_tester_verify_connect_request(struct proxy_tester *tester) { struct aws_byte_buf output; ASSERT_SUCCESS(aws_byte_buf_init(&output, tester->alloc, 1024)); struct testing_channel *testing_channel = proxy_tester_get_current_channel(tester); ASSERT_NOT_NULL(testing_channel); ASSERT_SUCCESS(testing_channel_drain_written_messages(testing_channel, &output)); char connect_request_buffer[1024]; snprintf( connect_request_buffer, AWS_ARRAY_SIZE(connect_request_buffer), "CONNECT " PRInSTR ":%u HTTP/1.1", AWS_BYTE_CURSOR_PRI(tester->host), tester->port); struct aws_byte_cursor expected_connect_message_first_line_cursor = aws_byte_cursor_from_c_str(connect_request_buffer); ASSERT_TRUE(output.len >= expected_connect_message_first_line_cursor.len); struct aws_byte_cursor request_prefix = aws_byte_cursor_from_array(output.buffer, output.len); struct aws_byte_cursor first_line_cursor; AWS_ZERO_STRUCT(first_line_cursor); ASSERT_TRUE(aws_byte_cursor_next_split(&request_prefix, '\r', &first_line_cursor)); ASSERT_TRUE(aws_byte_cursor_eq(&first_line_cursor, &expected_connect_message_first_line_cursor)); ASSERT_SUCCESS(s_record_connect_request(&output, tester)); aws_byte_buf_clean_up(&output); return AWS_OP_SUCCESS; } int proxy_tester_send_connect_response(struct proxy_tester *tester) { (void)tester; const char *response_string = NULL; size_t desired_response_count = aws_array_list_length(&tester->desired_connect_responses); if (desired_response_count > 0) { struct aws_string *response = NULL; aws_array_list_get_at(&tester->desired_connect_responses, &response, tester->current_response_index++); response_string = (const char *)response->bytes; } else if (tester->failure_type == PTFT_CONNECT_REQUEST) { response_string = "HTTP/1.0 407 Unauthorized\r\n\r\n"; } else { /* adding close here because it's an edge case we need to exercise. The desired behavior is that it has * absolutely no effect. */ response_string = "HTTP/1.0 200 Connection established\r\nconnection: close\r\n\r\n"; } struct testing_channel *channel = proxy_tester_get_current_channel(tester); /* send response */ ASSERT_SUCCESS(testing_channel_push_read_str(channel, response_string)); testing_channel_drain_queued_tasks(channel); return AWS_OP_SUCCESS; } int proxy_tester_verify_connection_attempt_was_to_proxy( struct proxy_tester *tester, struct aws_byte_cursor expected_host, uint32_t expected_port) { ASSERT_BIN_ARRAYS_EQUALS( tester->connection_host_name.buffer, tester->connection_host_name.len, expected_host.ptr, expected_host.len, "Connection host should have been \"" PRInSTR "\", but was \"" PRInSTR "\".", AWS_BYTE_CURSOR_PRI(expected_host), AWS_BYTE_BUF_PRI(tester->connection_host_name)); ASSERT_TRUE(tester->connection_port == expected_port); return AWS_OP_SUCCESS; } struct testing_channel *proxy_tester_get_current_channel(struct proxy_tester *tester) { struct testing_channel_bootstrap_wrapper *wrapper = s_get_current_channel_bootstrap_wrapper(tester); if (wrapper == NULL) { return NULL; } return wrapper->channel; } aws-crt-python-0.20.4+dfsg/crt/aws-c-http/tests/proxy_test_helper.h000066400000000000000000000073441456575232400253110ustar00rootroot00000000000000#ifndef AWS_HTTP_PROXY_TEST_HELPER_H #define AWS_HTTP_PROXY_TEST_HELPER_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include struct aws_http_client_bootstrap; struct testing_channel; typedef void(aws_http_release_connection_fn)(struct aws_http_connection *connection); enum proxy_tester_test_mode { PTTM_HTTP_FORWARD = 0, PTTM_HTTP_TUNNEL, PTTM_HTTPS_TUNNEL, }; enum proxy_tester_failure_type { PTFT_NONE = 0, PTFT_CONNECT_REQUEST, PTFT_TLS_NEGOTIATION, PTFT_CHANNEL, PTFT_CONNECTION, PTFT_PROXY_STRATEGY, }; struct proxy_tester_options { struct aws_allocator *alloc; struct aws_http_proxy_options *proxy_options; struct aws_byte_cursor host; uint32_t port; enum proxy_tester_test_mode test_mode; enum proxy_tester_failure_type failure_type; uint32_t desired_connect_response_count; struct aws_byte_cursor *desired_connect_responses; }; struct proxy_tester { struct aws_allocator *alloc; struct aws_logger logger; struct aws_event_loop_group *event_loop_group; struct aws_host_resolver *host_resolver; struct aws_client_bootstrap *client_bootstrap; struct aws_tls_ctx *tls_ctx; struct aws_tls_ctx_options tls_ctx_options; struct aws_tls_connection_options tls_connection_options; struct aws_http_proxy_options proxy_options; struct aws_byte_cursor host; uint32_t port; enum proxy_tester_test_mode test_mode; enum proxy_tester_failure_type failure_type; struct aws_http_connection *client_connection; struct aws_array_list testing_channels; bool client_connection_is_setup; bool client_connection_is_shutdown; /* If we need to wait for some async process*/ struct aws_mutex wait_lock; struct aws_condition_variable wait_cvar; int wait_result; bool request_successful; bool request_complete; bool tls_finished; bool tls_successful; struct aws_byte_buf connection_host_name; uint32_t connection_port; struct aws_array_list connect_requests; uint32_t current_response_index; struct aws_array_list desired_connect_responses; }; int proxy_tester_wait(struct proxy_tester *tester, bool (*pred)(void *user_data)); bool proxy_tester_connection_setup_pred(void *user_data); bool proxy_tester_connection_complete_pred(void *user_data); bool proxy_tester_connection_shutdown_pred(void *user_data); bool proxy_tester_request_complete_pred_fn(void *user_data); int proxy_tester_init(struct proxy_tester *tester, const struct proxy_tester_options *options); int proxy_tester_clean_up(struct proxy_tester *tester); void proxy_tester_on_client_connection_setup(struct aws_http_connection *connection, int error_code, void *user_data); void proxy_tester_on_client_connection_shutdown( struct aws_http_connection *connection, int error_code, void *user_data); void proxy_tester_on_client_bootstrap_shutdown(void *user_data); int proxy_tester_create_testing_channel_connection( struct proxy_tester *tester, struct aws_http_client_bootstrap *http_bootstrap); int proxy_tester_verify_connect_request(struct proxy_tester *tester); int proxy_tester_send_connect_response(struct proxy_tester *tester); int proxy_tester_verify_connection_attempt_was_to_proxy( struct proxy_tester *tester, struct aws_byte_cursor expected_host, uint32_t expected_port); struct testing_channel *proxy_tester_get_current_channel(struct proxy_tester *tester); #endif /* AWS_HTTP_PROXY_TEST_HELPER_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-http/tests/py_localhost/000077500000000000000000000000001456575232400240515ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-http/tests/py_localhost/README.md000066400000000000000000000041331456575232400253310ustar00rootroot00000000000000# Local server Local server based on [python-hyper/h2](https://github.com/python-hyper/h2). ## How to run the server Python 3.5+ required. - Install hyper h2 python module. `python3 -m pip install h2` ### TLS server - The code is based the [example](https://github.com/python-hyper/h2/blob/master/examples/asyncio/asyncio-server.py) from hyper h2 server. - Have the cert/key ready. The script now using `../resources/unittests.crt`, you can either just run the script within this directory, which will find the certificates and key from the related path, or you can use your own and change the code coordinately. - Run python. `python3 ./server.py`. #### Echo - Minor changed based on the example to response the headers of requests back within the headers from `/echo`. - To test the server runs correctly, you can do `curl -k -v -H "foo:bar" https://localhost:3443/echo` and check the result. #### Download test - To test download, when `:path` is `/downloadTest`, server will response a repeated string with length `self.download_test_length`, which is 2,500,000,000 now. It will be repeats of sting "This is CRT HTTP test." - To test the server runs correctly, you can do `curl -k -v -H "foo:bar" https://localhost:3443/downloadTest` and check the result. #### Slow Connection Test - Simulate a slow connection when `:path` is `/slowConnTest`. The speed is controlled by `out_bytes_per_second`. Default speed is 900 B/s, which will send 900 bytes of data and wait a sec to send new 900 bytes of data. #### Upload test - To test upload, when `:method` is `POST` or `PUT`, server will response the length received from response body - To test the server runs correctly, you can do `curl -k -X POST -F'data=@upload_test.txt' https://localhost:3443/upload_test` where `upload_test.txt` is file to upload. ### Non-TLS server - The code is based the non-tls [example](http://python-hyper.org/projects/h2/en/stable/basic-usage.html) from hyper h2 server. - Run python. `python3 ./non_tls_server.py`. - To test the server runs correctly, you can do `curl -v --http2-prior-knowledge http://localhost:3280` and check the result. aws-crt-python-0.20.4+dfsg/crt/aws-c-http/tests/py_localhost/non_tls_server.py000066400000000000000000000024121456575232400274640ustar00rootroot00000000000000# example server from http://python-hyper.org/projects/h2/en/stable/basic-usage.html import json import socket import h2.connection import h2.events import h2.config def send_response(conn, event): stream_id = event.stream_id response_data = b"success" conn.send_headers( stream_id=stream_id, headers=[ (':status', '200'), ('content-length', str(len(response_data))), ], ) conn.send_data( stream_id=stream_id, data=response_data, end_stream=True ) def handle(sock): config = h2.config.H2Configuration(client_side=False) conn = h2.connection.H2Connection(config=config) conn.initiate_connection() sock.sendall(conn.data_to_send()) while True: data = sock.recv(65535) if not data: break events = conn.receive_data(data) for event in events: if isinstance(event, h2.events.RequestReceived): send_response(conn, event) data_to_send = conn.data_to_send() if data_to_send: sock.sendall(data_to_send) sock = socket.socket() sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.bind(('0.0.0.0', 3280)) sock.listen(5) while True: handle(sock.accept()[0]) aws-crt-python-0.20.4+dfsg/crt/aws-c-http/tests/py_localhost/server.py000066400000000000000000000301201456575232400257250ustar00rootroot00000000000000# -*- coding: utf-8 -*- # Minor change based on the example from hyper h2 server # https://github.com/python-hyper/h2/blob/master/examples/asyncio/asyncio-server.py """ asyncio-server.py ~~~~~~~~~~~~~~~~~ A fully-functional HTTP/2 server using asyncio. Requires Python 3.5+. This example demonstrates handling requests with bodies, as well as handling those without. In particular, it demonstrates the fact that DataReceived may be called multiple times, and that applications must handle that possibility. """ import asyncio import io import json import ssl import time import os import collections from typing import List, Tuple from h2.config import H2Configuration from h2.connection import H2Connection from h2.events import ( ConnectionTerminated, DataReceived, RemoteSettingsChanged, RequestReceived, StreamEnded, StreamReset, WindowUpdated ) from h2.errors import ErrorCodes from h2.exceptions import ProtocolError, StreamClosedError from h2.settings import SettingCodes RequestData = collections.namedtuple('RequestData', ['headers', 'data']) class H2Protocol(asyncio.Protocol): def __init__(self): config = H2Configuration(client_side=False, header_encoding='utf-8') self.conn = H2Connection(config=config) self.transport = None self.stream_data = {} self.flow_control_futures = {} self.file_path = None self.num_sentence_received = {} self.raw_headers = None self.download_test_length = 2500000000 self.out_bytes_per_second = 900 def connection_made(self, transport: asyncio.Transport): self.transport = transport self.conn.initiate_connection() self.transport.write(self.conn.data_to_send()) def connection_lost(self, exc): for future in self.flow_control_futures.values(): future.cancel() self.flow_control_futures = {} def data_received(self, data: bytes): try: events = self.conn.receive_data(data) except ProtocolError as e: self.transport.write(self.conn.data_to_send()) self.transport.close() else: self.transport.write(self.conn.data_to_send()) for event in events: if isinstance(event, RequestReceived): self.request_received(event.headers, event.stream_id) elif isinstance(event, DataReceived): self.receive_data(event.data, event.stream_id) elif isinstance(event, StreamEnded): self.stream_complete(event.stream_id) elif isinstance(event, ConnectionTerminated): self.transport.close() elif isinstance(event, StreamReset): self.stream_reset(event.stream_id) elif isinstance(event, WindowUpdated): self.window_updated(event.stream_id, event.delta) elif isinstance(event, RemoteSettingsChanged): if SettingCodes.INITIAL_WINDOW_SIZE in event.changed_settings: self.window_updated(None, 0) self.transport.write(self.conn.data_to_send()) def request_received(self, headers: List[Tuple[str, str]], stream_id: int): self.raw_headers = headers headers = collections.OrderedDict(headers) path = headers[':path'] method = headers[':method'] if method == "PUT" or method == "POST": self.file_path = os.path.join(os.path.curdir, path[1:]) if os.path.exists(self.file_path): os.remove(self.file_path) # Store off the request data. request_data = RequestData(headers, io.BytesIO()) self.stream_data[stream_id] = request_data def handle_request_echo(self, stream_id: int, request_data: RequestData): response_headers = [(':status', '200')] for i in self.raw_headers: # Response headers back and exclude pseudo headers if i[0][0] != ':': response_headers.append(i) body = request_data.data.getvalue().decode('utf-8') data = json.dumps( {"body": body}, indent=4 ).encode("utf8") self.conn.send_headers(stream_id, response_headers) asyncio.ensure_future(self.send_data(data, stream_id)) def stream_complete(self, stream_id: int): """ When a stream is complete, we can send our response. """ try: request_data = self.stream_data[stream_id] except KeyError: # Just return, we probably 405'd this already return path = request_data.headers[':path'] method = request_data.headers[':method'] if method == "PUT" or method == "POST": self.conn.send_headers(stream_id, [(':status', '200')]) asyncio.ensure_future(self.send_data( str(self.num_sentence_received[stream_id]).encode(), stream_id)) elif path == '/echo': self.handle_request_echo(stream_id, request_data) elif path == '/downloadTest': length = self.download_test_length self.conn.send_headers( stream_id, [(':status', '200'), ('content-length', str(length))]) asyncio.ensure_future(self.send_repeat_data(length, stream_id)) elif path == '/slowConnTest': length = int(self.download_test_length/1000) self.conn.send_headers( stream_id, [(':status', '200'), ('content-length', str(length))]) asyncio.ensure_future( self.send_slow_repeat_data(length, stream_id)) else: self.conn.send_headers(stream_id, [(':status', '404')]) asyncio.ensure_future(self.send_data(b"Not Found", stream_id)) def receive_data(self, data: bytes, stream_id: int): """ We've received some data on a stream. If that stream is one we're expecting data on, save it off. Otherwise, reset the stream. """ try: stream_data = self.stream_data[stream_id] except KeyError: self.conn.reset_stream( stream_id, error_code=ErrorCodes.PROTOCOL_ERROR ) else: method = stream_data.headers[':method'] if method == "PUT" or method == "POST": if stream_id in self.num_sentence_received: self.num_sentence_received[stream_id] = self.num_sentence_received[stream_id] + \ len(data) else: self.num_sentence_received[stream_id] = len(data) # update window for stream if len(data) > 0: self.conn.increment_flow_control_window(len(data)) self.conn.increment_flow_control_window( len(data), stream_id) else: stream_data.data.write(data) def stream_reset(self, stream_id): """ A stream reset was sent. Stop sending data. """ if stream_id in self.flow_control_futures: future = self.flow_control_futures.pop(stream_id) future.cancel() async def send_data(self, data, stream_id): """ Send data according to the flow control rules. """ while data: while self.conn.local_flow_control_window(stream_id) < 1: try: await self.wait_for_flow_control(stream_id) except asyncio.CancelledError: return chunk_size = min( self.conn.local_flow_control_window(stream_id), len(data), self.conn.max_outbound_frame_size, ) try: self.conn.send_data( stream_id, data[:chunk_size], end_stream=(chunk_size == len(data)) ) except (StreamClosedError, ProtocolError): # The stream got closed and we didn't get told. We're done # here. break self.transport.write(self.conn.data_to_send()) data = data[chunk_size:] async def send_repeat_data(self, length, stream_id): """ Send data with length according to the flow control rules. """ while length > 0: while self.conn.local_flow_control_window(stream_id) < 1: try: await self.wait_for_flow_control(stream_id) except asyncio.CancelledError: return chunk_size = min( self.conn.local_flow_control_window(stream_id), length, self.conn.max_outbound_frame_size, ) repeated = b"This is CRT HTTP test." data = int(chunk_size/len(repeated)) * repeated + \ repeated[:chunk_size % len(repeated)] try: self.conn.send_data( stream_id, data, end_stream=(chunk_size == length) ) except (StreamClosedError, ProtocolError): # The stream got closed and we didn't get told. We're done # here. break self.transport.write(self.conn.data_to_send()) length = length - chunk_size async def send_slow_repeat_data(self, length, stream_id): """ Send data with length slowly (less than 1000 bytes per second) """ while length > 0: while self.conn.local_flow_control_window(stream_id) < 1: try: await self.wait_for_flow_control(stream_id) except asyncio.CancelledError: return chunk_size = min( self.conn.local_flow_control_window(stream_id), length, self.conn.max_outbound_frame_size, self.out_bytes_per_second ) repeated = b"This is CRT HTTP test." data = int(chunk_size/len(repeated)) * repeated + \ repeated[:chunk_size % len(repeated)] try: # Sleep for a sec to make the out bytes per second slower than the expected time.sleep(1) self.conn.send_data( stream_id, data, end_stream=(chunk_size == length) ) except (StreamClosedError, ProtocolError): # The stream got closed and we didn't get told. We're done # here. break self.transport.write(self.conn.data_to_send()) length = length - chunk_size async def wait_for_flow_control(self, stream_id): """ Waits for a Future that fires when the flow control window is opened. """ f = asyncio.Future() self.flow_control_futures[stream_id] = f await f def window_updated(self, stream_id, delta): """ A window update frame was received. Unblock some number of flow control Futures. """ if stream_id and stream_id in self.flow_control_futures: f = self.flow_control_futures.pop(stream_id) f.set_result(delta) elif not stream_id: for f in self.flow_control_futures.values(): f.set_result(delta) self.flow_control_futures = {} ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) ssl_context.options |= (ssl.OP_NO_COMPRESSION) ssl_context.load_cert_chain( certfile="../resources/unittests.crt", keyfile="../resources/unittests.key") ssl_context.set_alpn_protocols(["h2"]) loop = asyncio.new_event_loop() # Each client connection will create a new protocol instance coro = loop.create_server(H2Protocol, '127.0.0.1', 3443, ssl=ssl_context) server = loop.run_until_complete(coro) # Serve requests until Ctrl+C is pressed print('Serving on {}'.format(server.sockets[0].getsockname())) try: loop.run_forever() except KeyboardInterrupt: pass # Close the server server.close() loop.run_until_complete(server.wait_closed()) loop.close() aws-crt-python-0.20.4+dfsg/crt/aws-c-http/tests/resources/000077500000000000000000000000001456575232400233635ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-http/tests/resources/unittests.conf000066400000000000000000000010501456575232400262700ustar00rootroot00000000000000[ req ] prompt = no default_md = sha256 distinguished_name = req_distinguished_name x509_extensions = v3_ext [ req_distinguished_name ] countryName = US stateOrProvinceName = Washington localityName = Seattle organizationName = Amazon organizationalUnitName = SDKs commonName = localhost emailAddress = aws-sdk-common-runtime@amazon.com [ v3_ext ] # iOS 13+ and macOS 10.15+ require Subject Alternative Name and ExtendedKeyUsage with serverAuth extendedKeyUsage = serverAuth subjectAltName = @alt_names [ alt_names ] DNS.1 = localhost IP.1 = 127.0.0.1 aws-crt-python-0.20.4+dfsg/crt/aws-c-http/tests/resources/unittests.crt000077500000000000000000000026131456575232400261440ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIID7DCCAtSgAwIBAgIJALEv6FDrJ/8NMA0GCSqGSIb3DQEBCwUAMIGaMQswCQYD VQQGEwJVUzETMBEGA1UECAwKV2FzaGluZ3RvbjEQMA4GA1UEBwwHU2VhdHRsZTEP MA0GA1UECgwGQW1hem9uMQ0wCwYDVQQLDARTREtzMRIwEAYDVQQDDAlsb2NhbGhv c3QxMDAuBgkqhkiG9w0BCQEWIWF3cy1zZGstY29tbW9uLXJ1bnRpbWVAYW1hem9u LmNvbTAeFw0yMzA5MTgxNDIyMTZaFw0yNTEyMjAxNDIyMTZaMIGaMQswCQYDVQQG EwJVUzETMBEGA1UECAwKV2FzaGluZ3RvbjEQMA4GA1UEBwwHU2VhdHRsZTEPMA0G A1UECgwGQW1hem9uMQ0wCwYDVQQLDARTREtzMRIwEAYDVQQDDAlsb2NhbGhvc3Qx MDAuBgkqhkiG9w0BCQEWIWF3cy1zZGstY29tbW9uLXJ1bnRpbWVAYW1hem9uLmNv bTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANdqV0j4DkQDJULWEW8b s/znGqK2p9wthY8o4btL7nEhGUsMQyae+UwUBDGn0qUhCgEC3g7e8bg0Q2J+dleF BOnBfsU1obc7H+5oTf5R2gz3L0dgEjwBJM5IpfCgi2OHurU8UsEPe7KZTbhGdPfR 6CWE0yxWkXiH3dQ982dRGHEsPMPhmdksRFH2FEi9ghZiGEpEI55bCQiKQqBoA4gQ D2yFCTtylgQ19CYBg28d1n941xv2Ok+tyz7DvgEttEQr3BBdBf65QyDcyORABztU zhHfXyjrviQCtOj8NZu+wYDqxOxbbyBu5GDVbjhD3iJzh5Drqq8g4rAdT8IsjzSG 6nUCAwEAAaMzMDEwEwYDVR0lBAwwCgYIKwYBBQUHAwEwGgYDVR0RBBMwEYIJbG9j YWxob3N0hwR/AAABMA0GCSqGSIb3DQEBCwUAA4IBAQDSURKIog6XLQQDbVpyfAW0 V8exQDzWyjwSF+ZwiTzATPZAiRg5K4UcBa9rB/+I9nkkWeSBBBSYlF5D4QKPEp9a fZLQ5GRU4AQ1FOQyvvbt+bQJx5nEE68ebuVPkZVQdHlQKmrJVuOzFlO+6tZwvyfP YppnMJsQawlRgZqPKAronU/5U2S7Z3CPHzAhWH3TsyJAEuu94UabLE3cXM2243rN HOT7JxKHrCxJotxvsxQEl42wwSZ7tw2cIK6MtavLs3k7OpDol8uge7jWCE32oNQ2 heg5USXHy1qAdw7YXG0WjDq9WN8pz6FwNT51IQSKvn1dcLIVW2uLVv8wn/v69A8z -----END CERTIFICATE----- aws-crt-python-0.20.4+dfsg/crt/aws-c-http/tests/resources/unittests.key000077500000000000000000000032171456575232400261450ustar00rootroot00000000000000-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEA12pXSPgORAMlQtYRbxuz/Ocaoran3C2Fjyjhu0vucSEZSwxD Jp75TBQEMafSpSEKAQLeDt7xuDRDYn52V4UE6cF+xTWhtzsf7mhN/lHaDPcvR2AS PAEkzkil8KCLY4e6tTxSwQ97splNuEZ099HoJYTTLFaReIfd1D3zZ1EYcSw8w+GZ 2SxEUfYUSL2CFmIYSkQjnlsJCIpCoGgDiBAPbIUJO3KWBDX0JgGDbx3Wf3jXG/Y6 T63LPsO+AS20RCvcEF0F/rlDINzI5EAHO1TOEd9fKOu+JAK06Pw1m77BgOrE7Ftv IG7kYNVuOEPeInOHkOuqryDisB1PwiyPNIbqdQIDAQABAoIBAESQuI+lRQUo6ydG 8+2lp7iL5tJ7yRov8x8KKC9xj8e6fU6B7K3SVA9/H4aeoFGnHoQL4ZpiJBY5rGkh T5Gz6UhuKmejFoI384Xy9UBJ1VnjI81YKvWmd4yhWxAoSbW4chlVxhFlWD4UxcQt yPVIftfSW1T1iQAQXu87eMod6eW7VWlyMKicYkBGB2ohI0hW8chx361z96QcpxhA yBAfnhxuTgKFYSRVfwYSOjHYPOvozmU7Wj0iURT+1MM4iO8YlBDuZEJArs3WAdIe pmCq6snzOAJ6Y9iE0EGti9QGiAo6na/nWAfVlRSMyS/C1GC0oM0MnpRKSLW0tvLV vtJG81ECgYEA7lzGpdlAKwWNKPc2YIbtUNomD/eOr7TzYedYxJ88SG52THjgE3Pu poF3wZFjdtlwx1u4nsxlVe50FBTCN5s2FV4/8YP980zis+HtUC5pWCO3Oy6+DjSj K9st+mGyzYjl3opVqcQZkHj1LPqNxBmvFpDgAtVZfdKSdyuzZpj8s5sCgYEA51rj EFa/ijILp1P5vKn8b3pIfQFSsUsX5NXTy31f/2UwVV491djMyNyhtaRcrXP9CYpq 38o1xvUaxe2hlND/jiBjBHfsC13oUOVz8TrAzxDKAzbGLcOT2trgxMFbR8Ez+jur 1yQbPnoKZrB7SopAkcVqZv4ks0LLu+BLfEFXYy8CgYEApN8xXDgoRVnCqQpN53iM n/c0iqjOXkTIb/jIksAdv3AAjaayP2JaOXul7RL2fJeshYiw684vbb/RNK6jJDlM sH0Pt6t3tZmB2bC1KFfh7+BMdjg/p63LC6PAasa3GanObh67YADPOfoghCsOcgzd 6brt56fRDdHgE2P75ER/zm8CgYEArAxx6bepT3syIWiYww3itYBJofS26zP9++Zs T9rX5hT5IbMo5vwIJqO0+mDVrwQfu9Wc7vnwjhm+pEy4qfPW6Hn7SNppxnY6itZo J4/azOIeaM92B5h3Pv0gxBFK8YyjO8beXurx+79ENuOtfFxd8knOe/Mplcnpurjt SeVJuG8CgYBxEYouOM9UuZlblXQXfudTWWf+x5CEWxyJgKaktHEh3iees1gB7ZPb OewLa8AYVjqbNgS/r/aUFjpBbCov8ICxcy86SuGda10LDFX83sbyMm8XhktfyC3L 54irVW5mNUDcA8s9+DloeTlUlJIr8J/RADC9rpqHLaZzcdvpIMhVsw== -----END RSA PRIVATE KEY----- aws-crt-python-0.20.4+dfsg/crt/aws-c-http/tests/resources/unittests.p12000066400000000000000000000050651456575232400257570ustar00rootroot000000000000000 10  *H   0 0 *H 00} *H 0 *H  0%P\>)]1ᶸ rOc>G/Eއ}F8& 2*p'u'SrБb3etJ&\dQ&L_j`or%^4B/P! KQ!,~p  G4ĩ.VTZ8SlٞC} )0kX #N`?۔];D#)cxFSKɟNL̹Z _J'xyD*GC эB'9L>& h<=' L42m? MԵYgNw>=؃eMKa##ZmAURvQ7zuff1q{{0[S](u;/5;9+qw+]j%e3ܚޗQ&ZhO_V2 ^qYrЅ4PBwXtbl+hPXY8v(x]$w`I*;* *f~uT62gF=20#׋:B@L%'aƒO~RP:D OMӛX9e*ҁiFNQw- nqr4$8Okw]$ZXvAW BD=2`Uh7uEXf>q, m::I^ dJnz?'t'\%p{(wF}<dw8"3g`KŢR{~.8rLbȌtRTɏUJUޡ|@$CIڻ`Cr(?%htq4QjxoerSUrdfbj g"yC$0A *H 2.0*0& *H  00 *H  0_oHY ,ۨUJP>).mNo)U(/tD>+:p2_Tl{d˟W*3$Ml` }]=Pu,qUJ(CgƲeX[DAGl:#ZF.$gk֪_hIQKōo?u J"ŪydAv/|htcxs N S#W 5 rƪqf&H! ̙ *0`4~z fnWvbV^tNBˢa+mzވ{#)X2D]Ά:u_-ZЫXZN~ ~co{X=߀yp`^*7FvH/δ/gVOukf/>!֫T8/[_<+QvZ:nrPE܄.W² OЄ:E;1(j[k.M`MPNEfCq+YSjY)ekLSоy7c/{׌+9F'1fS1%0# *H  1Y1K40#f_010!0 +2f θ bzT+EC!.aws-crt-python-0.20.4+dfsg/crt/aws-c-http/tests/resources/unittests.readme000066400000000000000000000014651456575232400266120ustar00rootroot00000000000000--- README FOR unittests.* files --- These files are used in unit tests that create TLS connections between a localhost server and client. We use a single self-signed certificate which serves as both the server's certificate and the client's root CA. unittests.key: private key unittests.crt: self-signed certificate unittests.conf: configuration for generating unittests.crt unittests.p12: pkcs#12 file bundling the certificate and private key. Password is "1234" Apple won't trust any certificate whose lifetime is over 825 days. Once it expires unit tests will start failing and it will need to be updated like so: $ openssl req -x509 -new -key unittests.key -config unittests.conf -out unittests.crt -days 824 $ openssl pkcs12 -export -out unittests.p12 -inkey unittests.key -in unittests.crt -password pass:1234 aws-crt-python-0.20.4+dfsg/crt/aws-c-http/tests/stream_test_helper.c000066400000000000000000000205131456575232400254070ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "stream_test_helper.h" #include #include #include #include #include #define UNKNOWN_HEADER_BLOCK ((enum aws_http_header_block) - 1) static int s_on_headers( struct aws_http_stream *stream, enum aws_http_header_block header_block, const struct aws_http_header *header_array, size_t num_headers, void *user_data) { (void)stream; struct client_stream_tester *tester = user_data; ASSERT_FALSE(tester->complete); if (tester->current_header_block == UNKNOWN_HEADER_BLOCK) { tester->current_header_block = header_block; } else { ASSERT_INT_EQUALS(tester->current_header_block, header_block); } /* Response consists of: * - 0+ informational (1xx) header-blocks * - 1 block of main headers arrives * - Optional trailing header-block may come after body */ switch (header_block) { case AWS_HTTP_HEADER_BLOCK_INFORMATIONAL: ASSERT_SUCCESS(aws_http_headers_add_array(tester->current_info_headers, header_array, num_headers)); break; case AWS_HTTP_HEADER_BLOCK_MAIN: ASSERT_SUCCESS(aws_http_headers_add_array(tester->response_headers, header_array, num_headers)); break; case AWS_HTTP_HEADER_BLOCK_TRAILING: ASSERT_SUCCESS(aws_http_headers_add_array(tester->response_trailer, header_array, num_headers)); break; } return AWS_OP_SUCCESS; } static int s_on_header_block_done( struct aws_http_stream *stream, enum aws_http_header_block header_block, void *user_data) { struct client_stream_tester *tester = user_data; ASSERT_FALSE(tester->complete); if (tester->current_header_block != UNKNOWN_HEADER_BLOCK) { ASSERT_INT_EQUALS(tester->current_header_block, header_block); } tester->current_header_block = UNKNOWN_HEADER_BLOCK; /* Response consists of: * - 0+ informational (1xx) header-blocks * - 1 block of main headers arrives * - Optional trailing header-block may come after body */ switch (header_block) { case AWS_HTTP_HEADER_BLOCK_INFORMATIONAL: { ASSERT_FALSE(tester->response_headers_done); ASSERT_FALSE(tester->response_trailer_done); ASSERT_UINT_EQUALS(0, tester->response_body.len); /* Create new entry in info_responses[], copy in headers and status_code */ struct aws_http_message *info_response = aws_http_message_new_response(tester->alloc); ASSERT_NOT_NULL(info_response); tester->info_responses[tester->num_info_responses++] = info_response; int status_code; ASSERT_SUCCESS(aws_http_stream_get_incoming_response_status(stream, &status_code)); ASSERT_SUCCESS(aws_http_message_set_response_status(info_response, status_code)); for (size_t i = 0; i < aws_http_headers_count(tester->current_info_headers); ++i) { struct aws_http_header header; ASSERT_SUCCESS(aws_http_headers_get_index(tester->current_info_headers, i, &header)); ASSERT_SUCCESS(aws_http_message_add_header(info_response, header)); } aws_http_headers_clear(tester->current_info_headers); break; } case AWS_HTTP_HEADER_BLOCK_MAIN: ASSERT_FALSE(tester->response_headers_done); ASSERT_FALSE(tester->response_trailer_done); ASSERT_UINT_EQUALS(0, tester->response_body.len); tester->response_headers_done = true; break; case AWS_HTTP_HEADER_BLOCK_TRAILING: ASSERT_FALSE(tester->response_trailer_done); ASSERT_TRUE(tester->response_headers_done || aws_http_headers_count(tester->response_headers) == 0); tester->response_trailer_done = true; break; } return AWS_OP_SUCCESS; } static int s_on_body(struct aws_http_stream *stream, const struct aws_byte_cursor *data, void *user_data) { (void)stream; struct client_stream_tester *tester = user_data; ASSERT_FALSE(tester->complete); ASSERT_SUCCESS(aws_byte_buf_append_dynamic(&tester->response_body, data)); return AWS_OP_SUCCESS; } static void s_on_metrics( struct aws_http_stream *stream, const struct aws_http_stream_metrics *metrics, void *user_data) { (void)stream; struct client_stream_tester *tester = user_data; tester->metrics = *metrics; AWS_FATAL_ASSERT(metrics->stream_id == stream->id); if (metrics->receive_end_timestamp_ns > 0) { AWS_FATAL_ASSERT( metrics->receiving_duration_ns == metrics->receive_end_timestamp_ns - metrics->receive_start_timestamp_ns); } if (metrics->send_end_timestamp_ns > 0) { AWS_FATAL_ASSERT( metrics->sending_duration_ns == metrics->send_end_timestamp_ns - metrics->send_start_timestamp_ns); } if (metrics->receiving_duration_ns != -1) { AWS_FATAL_ASSERT(metrics->receive_end_timestamp_ns > 0); } if (metrics->sending_duration_ns != -1) { AWS_FATAL_ASSERT(metrics->send_end_timestamp_ns > 0); } } static void s_on_complete(struct aws_http_stream *stream, int error_code, void *user_data) { struct client_stream_tester *tester = user_data; /* Validate things are firing properly */ AWS_FATAL_ASSERT(!tester->complete); if (error_code == AWS_ERROR_SUCCESS) { AWS_FATAL_ASSERT(tester->current_header_block == UNKNOWN_HEADER_BLOCK); AWS_FATAL_ASSERT(aws_http_headers_count(tester->current_info_headers) == 0); /* is cleared when block done */ AWS_FATAL_ASSERT(tester->response_headers_done || aws_http_headers_count(tester->response_headers) == 0); AWS_FATAL_ASSERT(tester->response_trailer_done || aws_http_headers_count(tester->response_trailer) == 0); } tester->complete = true; tester->on_complete_error_code = error_code; tester->on_complete_connection_is_open = aws_http_connection_is_open(aws_http_stream_get_connection(stream)); aws_http_stream_get_incoming_response_status(stream, &tester->response_status); } static void s_on_destroy(void *user_data) { struct client_stream_tester *tester = user_data; /* Validate things are firing properly */ AWS_FATAL_ASSERT(!tester->destroyed); tester->destroyed = true; } int client_stream_tester_init( struct client_stream_tester *tester, struct aws_allocator *alloc, const struct client_stream_tester_options *options) { AWS_ZERO_STRUCT(*tester); tester->alloc = alloc; tester->response_status = AWS_HTTP_STATUS_CODE_UNKNOWN; tester->current_header_block = UNKNOWN_HEADER_BLOCK; tester->current_info_headers = aws_http_headers_new(alloc); ASSERT_NOT_NULL(tester->current_info_headers); tester->response_headers = aws_http_headers_new(alloc); ASSERT_NOT_NULL(tester->response_headers); tester->response_trailer = aws_http_headers_new(alloc); ASSERT_NOT_NULL(tester->response_trailer); ASSERT_SUCCESS(aws_byte_buf_init(&tester->response_body, alloc, 128)); struct aws_http_make_request_options request_options = { .self_size = sizeof(request_options), .request = options->request, .user_data = tester, .on_response_headers = s_on_headers, .on_response_header_block_done = s_on_header_block_done, .on_response_body = s_on_body, .on_metrics = s_on_metrics, .on_complete = s_on_complete, .on_destroy = s_on_destroy, }; tester->stream = aws_http_connection_make_request(options->connection, &request_options); ASSERT_NOT_NULL(tester->stream); ASSERT_SUCCESS(aws_http_stream_activate(tester->stream)); return AWS_OP_SUCCESS; } void client_stream_tester_clean_up(struct client_stream_tester *tester) { for (size_t i = 0; i < tester->num_info_responses; ++i) { aws_http_message_release(tester->info_responses[i]); } aws_http_headers_release(tester->current_info_headers); aws_http_headers_release(tester->response_headers); aws_http_headers_release(tester->response_trailer); aws_byte_buf_clean_up(&tester->response_body); aws_http_stream_release(tester->stream); AWS_ZERO_STRUCT(*tester); } aws-crt-python-0.20.4+dfsg/crt/aws-c-http/tests/stream_test_helper.h000066400000000000000000000033171456575232400254170ustar00rootroot00000000000000#ifndef AWS_HTTP_STREAM_TEST_HELPER_H #define AWS_HTTP_STREAM_TEST_HELPER_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include struct aws_http_connection; struct aws_http_headers; struct aws_http_message; struct aws_http_stream; struct client_stream_tester { struct aws_allocator *alloc; struct aws_http_stream *stream; int response_status; enum aws_http_header_block current_header_block; /* Array of completed Informational (1xx) responses */ struct aws_http_message *info_responses[4]; size_t num_info_responses; /* As Informational (1xx) headers arrive, they're buffered here. * They copied into a new `info_responses` entry when the block is done */ struct aws_http_headers *current_info_headers; /* Main header-block */ struct aws_http_headers *response_headers; bool response_headers_done; /* Trailing header-block */ struct aws_http_headers *response_trailer; bool response_trailer_done; struct aws_byte_buf response_body; bool complete; int on_complete_error_code; /* Whether connection is open when on_complete fires */ bool on_complete_connection_is_open; struct aws_http_stream_metrics metrics; bool destroyed; }; struct client_stream_tester_options { struct aws_http_message *request; struct aws_http_connection *connection; }; int client_stream_tester_init( struct client_stream_tester *tester, struct aws_allocator *alloc, const struct client_stream_tester_options *options); void client_stream_tester_clean_up(struct client_stream_tester *tester); #endif /* AWS_HTTP_STREAM_TEST_HELPER_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-http/tests/test_connection.c000066400000000000000000001161721456575232400247230ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef _MSC_VER # pragma warning(disable : 4204) /* non-constant aggregate initializer */ #endif #ifdef _WIN32 # define LOCAL_SOCK_TEST_FORMAT "\\\\.\\pipe\\testsock-%s" #else # define LOCAL_SOCK_TEST_FORMAT "testsock-%s.sock" #endif enum { TESTER_TIMEOUT_SEC = 60, /* Give enough time for non-sudo users to enter password */ }; /* Options for setting up `tester` singleton */ struct tester_options { struct aws_allocator *alloc; bool tls; char *server_alpn_list; char *client_alpn_list; bool no_connection; /* don't connect server to client */ bool pin_event_loop; bool use_tcp; /* otherwise uses domain sockets */ }; /* Singleton used by tests in this file */ struct tester { struct aws_allocator *alloc; struct aws_event_loop_group *server_event_loop_group; struct aws_event_loop_group *client_event_loop_group; struct aws_host_resolver *host_resolver; struct aws_server_bootstrap *server_bootstrap; struct aws_http_server *server; struct aws_client_bootstrap *client_bootstrap; struct aws_http_client_connection_options client_options; int server_connection_num; int client_connection_num; int wait_server_connection_num; int wait_client_connection_num; struct aws_http_connection *server_connections[10]; struct aws_http_connection *client_connections[10]; struct aws_socket_endpoint endpoint; struct aws_socket_options socket_options; int client_connection_is_shutdown; int server_connection_is_shutdown; int wait_client_connection_is_shutdown; int wait_server_connection_is_shutdown; bool server_is_shutdown; struct aws_http_connection *new_client_connection; bool new_client_shut_down; bool new_client_setup_finished; enum aws_http_version connection_version; /* Tls context */ struct aws_tls_ctx_options server_ctx_options; struct aws_tls_ctx_options client_ctx_options; struct aws_tls_ctx *server_ctx; struct aws_tls_ctx *client_ctx; struct aws_tls_connection_options server_tls_connection_options; struct aws_tls_connection_options client_tls_connection_options; struct aws_byte_buf negotiated_protocol; /* If we need to wait for some async process*/ struct aws_mutex wait_lock; struct aws_condition_variable wait_cvar; /* we need wait result for both server side and client side */ int server_wait_result; int client_wait_result; }; static struct aws_http_stream *s_tester_on_incoming_request(struct aws_http_connection *connection, void *user_data) { (void)connection; (void)user_data; aws_raise_error(AWS_ERROR_UNIMPLEMENTED); return NULL; } static void s_tester_http_server_on_destroy(void *user_data) { struct tester *tester = user_data; AWS_FATAL_ASSERT(aws_mutex_lock(&tester->wait_lock) == AWS_OP_SUCCESS); tester->server_is_shutdown = true; tester->server = NULL; AWS_FATAL_ASSERT(aws_mutex_unlock(&tester->wait_lock) == AWS_OP_SUCCESS); aws_condition_variable_notify_one(&tester->wait_cvar); } static void s_tester_on_server_connection_shutdown( struct aws_http_connection *connection, int error_code, void *user_data) { (void)connection; (void)error_code; struct tester *tester = user_data; AWS_FATAL_ASSERT(aws_mutex_lock(&tester->wait_lock) == AWS_OP_SUCCESS); tester->server_connection_is_shutdown++; AWS_FATAL_ASSERT(aws_mutex_unlock(&tester->wait_lock) == AWS_OP_SUCCESS); aws_condition_variable_notify_one(&tester->wait_cvar); } static void s_tester_on_server_connection_setup( struct aws_http_server *server, struct aws_http_connection *connection, int error_code, void *user_data) { (void)server; struct tester *tester = user_data; AWS_FATAL_ASSERT(aws_mutex_lock(&tester->wait_lock) == AWS_OP_SUCCESS); if (error_code) { tester->server_wait_result = error_code; goto done; } struct aws_http_server_connection_options options = AWS_HTTP_SERVER_CONNECTION_OPTIONS_INIT; options.connection_user_data = tester; options.on_incoming_request = s_tester_on_incoming_request; options.on_shutdown = s_tester_on_server_connection_shutdown; int err = aws_http_connection_configure_server(connection, &options); if (err) { tester->server_wait_result = aws_last_error(); goto done; } tester->server_connections[tester->server_connection_num++] = connection; done: AWS_FATAL_ASSERT(aws_mutex_unlock(&tester->wait_lock) == AWS_OP_SUCCESS); aws_condition_variable_notify_one(&tester->wait_cvar); } static void s_tester_on_client_connection_setup( struct aws_http_connection *connection, int error_code, void *user_data) { struct tester *tester = user_data; AWS_FATAL_ASSERT(aws_mutex_lock(&tester->wait_lock) == AWS_OP_SUCCESS); if (error_code) { tester->client_wait_result = error_code; goto done; } tester->connection_version = aws_http_connection_get_version(connection); tester->client_connections[tester->client_connection_num++] = connection; done: AWS_FATAL_ASSERT(aws_mutex_unlock(&tester->wait_lock) == AWS_OP_SUCCESS); aws_condition_variable_notify_one(&tester->wait_cvar); } static void s_tester_on_client_connection_shutdown( struct aws_http_connection *connection, int error_code, void *user_data) { (void)connection; (void)error_code; struct tester *tester = user_data; AWS_FATAL_ASSERT(aws_mutex_lock(&tester->wait_lock) == AWS_OP_SUCCESS); tester->client_connection_is_shutdown++; AWS_FATAL_ASSERT(aws_mutex_unlock(&tester->wait_lock) == AWS_OP_SUCCESS); aws_condition_variable_notify_one(&tester->wait_cvar); } static int s_tester_wait(struct tester *tester, bool (*pred)(void *user_data)) { int local_wait_result; ASSERT_SUCCESS(aws_mutex_lock(&tester->wait_lock)); int err = aws_condition_variable_wait_for_pred( &tester->wait_cvar, &tester->wait_lock, aws_timestamp_convert(TESTER_TIMEOUT_SEC, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL), pred, tester); if (tester->server_wait_result) { local_wait_result = tester->server_wait_result; } else { local_wait_result = tester->client_wait_result; } tester->server_wait_result = 0; tester->client_wait_result = 0; ASSERT_SUCCESS(aws_mutex_unlock(&tester->wait_lock)); ASSERT_SUCCESS(err); if (local_wait_result) { return aws_raise_error(local_wait_result); } return AWS_OP_SUCCESS; } static bool s_tester_connection_setup_pred(void *user_data) { struct tester *tester = user_data; return (tester->server_wait_result || tester->client_wait_result) || (tester->client_connection_num == tester->wait_client_connection_num && tester->server_connection_num == tester->wait_server_connection_num); } static bool s_tester_connection_shutdown_pred(void *user_data) { struct tester *tester = user_data; return (tester->server_wait_result || tester->client_wait_result) || (tester->client_connection_is_shutdown == tester->wait_client_connection_is_shutdown && tester->server_connection_is_shutdown == tester->wait_server_connection_is_shutdown); } static bool s_tester_server_shutdown_pred(void *user_data) { struct tester *tester = user_data; return tester->server_is_shutdown; } static void s_client_connection_options_init_tester( struct aws_http_client_connection_options *client_options, struct tester *tester) { struct aws_client_bootstrap_options bootstrap_options = { .event_loop_group = tester->client_event_loop_group, .host_resolver = tester->host_resolver, }; tester->client_bootstrap = aws_client_bootstrap_new(tester->alloc, &bootstrap_options); AWS_FATAL_ASSERT(tester->client_bootstrap != NULL); client_options->allocator = tester->alloc; client_options->bootstrap = tester->client_bootstrap; client_options->host_name = aws_byte_cursor_from_c_str(tester->endpoint.address); client_options->port = tester->endpoint.port; client_options->socket_options = &tester->socket_options; client_options->user_data = tester; client_options->on_setup = s_tester_on_client_connection_setup; client_options->on_shutdown = s_tester_on_client_connection_shutdown; } static int s_tls_client_opt_tester_init( struct tester *tester, const char *alpn_list, struct aws_byte_cursor server_name) { aws_tls_ctx_options_init_default_client(&tester->client_ctx_options, tester->alloc); aws_tls_ctx_options_override_default_trust_store_from_path(&tester->client_ctx_options, NULL, "unittests.crt"); tester->client_ctx = aws_tls_client_ctx_new(tester->alloc, &tester->client_ctx_options); aws_tls_connection_options_init_from_ctx(&tester->client_tls_connection_options, tester->client_ctx); aws_tls_connection_options_set_alpn_list(&tester->client_tls_connection_options, tester->alloc, alpn_list); aws_tls_connection_options_set_server_name(&tester->client_tls_connection_options, tester->alloc, &server_name); return AWS_OP_SUCCESS; } static int s_tls_server_opt_tester_init(struct tester *tester, const char *alpn_list) { #ifdef __APPLE__ struct aws_byte_cursor pwd_cur = aws_byte_cursor_from_c_str("1234"); ASSERT_SUCCESS(aws_tls_ctx_options_init_server_pkcs12_from_path( &tester->server_ctx_options, tester->alloc, "unittests.p12", &pwd_cur)); #else ASSERT_SUCCESS(aws_tls_ctx_options_init_default_server_from_path( &tester->server_ctx_options, tester->alloc, "unittests.crt", "unittests.key")); #endif /* __APPLE__ */ aws_tls_ctx_options_set_alpn_list(&tester->server_ctx_options, alpn_list); tester->server_ctx = aws_tls_server_ctx_new(tester->alloc, &tester->server_ctx_options); ASSERT_NOT_NULL(tester->server_ctx); aws_tls_connection_options_init_from_ctx(&tester->server_tls_connection_options, tester->server_ctx); return AWS_OP_SUCCESS; } static int s_tester_init(struct tester *tester, const struct tester_options *options) { AWS_ZERO_STRUCT(*tester); tester->alloc = options->alloc; aws_http_library_init(options->alloc); ASSERT_SUCCESS(aws_mutex_init(&tester->wait_lock)); ASSERT_SUCCESS(aws_condition_variable_init(&tester->wait_cvar)); /* * The current http testing framework has several issues that hinder testing event loop pinning: * (1) Server shutdown can crash with memory corruption if the server uses an event loop group with more than one * thread * (2) s_tester_wait mixes results from both client and server and once you unlink them out of the same, single- * threaded event loop, the test assumptions start breaking due to different serializations of io events. * * This leads to a self-defeating situation: in order to test event loop pinning we need event loop groups with * many threads, but as soon as we use one, existing tests start breaking. * * Event loop pinning is a critical blocker for an upcoming release, so rather than trying to figure out the * underlying race condition within the http testing framework (I suspect it's socket listener related), we * instead add some complexity to the testing framework such that * (1) Existing tests continue to use a single event loop group with one thread * (2) The event loop pinning test uses two event loop groups, the server elg with a single thread and the * client elg with many threads to actually test pinning. */ tester->server_event_loop_group = aws_event_loop_group_new_default(tester->alloc, 1, NULL); if (options->pin_event_loop) { tester->client_event_loop_group = aws_event_loop_group_new_default(tester->alloc, 16, NULL); } else { tester->client_event_loop_group = aws_event_loop_group_acquire(tester->server_event_loop_group); } struct aws_host_resolver_default_options resolver_options = { .el_group = tester->client_event_loop_group, .max_entries = 8, }; tester->host_resolver = aws_host_resolver_new_default(tester->alloc, &resolver_options); tester->server_bootstrap = aws_server_bootstrap_new(tester->alloc, tester->server_event_loop_group); ASSERT_NOT_NULL(tester->server_bootstrap); struct aws_socket_options socket_options = { .type = AWS_SOCKET_STREAM, .domain = options->use_tcp ? AWS_SOCKET_IPV4 : AWS_SOCKET_LOCAL, .connect_timeout_ms = (uint32_t)aws_timestamp_convert(TESTER_TIMEOUT_SEC, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_MILLIS, NULL), }; tester->socket_options = socket_options; struct aws_socket_endpoint endpoint; AWS_ZERO_STRUCT(endpoint); if (options->use_tcp) { snprintf(endpoint.address, sizeof(endpoint.address), "127.0.0.1"); } else { aws_socket_endpoint_init_local_address_for_test(&endpoint); } tester->endpoint = endpoint; /* Create server (listening socket) */ struct aws_http_server_options server_options = AWS_HTTP_SERVER_OPTIONS_INIT; server_options.allocator = tester->alloc; server_options.bootstrap = tester->server_bootstrap; server_options.endpoint = &tester->endpoint; server_options.socket_options = &tester->socket_options; server_options.server_user_data = tester; server_options.on_incoming_connection = s_tester_on_server_connection_setup; server_options.on_destroy_complete = s_tester_http_server_on_destroy; if (options->tls) { ASSERT_SUCCESS(s_tls_server_opt_tester_init( tester, options->server_alpn_list ? options->server_alpn_list : "h2;http/1.1")); server_options.tls_options = &tester->server_tls_connection_options; } tester->server = aws_http_server_new(&server_options); ASSERT_NOT_NULL(tester->server); /* * localhost server binds to any port, so let's get the final listener endpoint whether or not we're making * connections to it. */ if (options->use_tcp) { tester->endpoint = *aws_http_server_get_listener_endpoint(tester->server); } /* If test doesn't need a connection, we're done setting up. */ if (options->no_connection) { return AWS_OP_SUCCESS; } /* Connect */ struct aws_http_client_connection_options client_options = AWS_HTTP_CLIENT_CONNECTION_OPTIONS_INIT; s_client_connection_options_init_tester(&client_options, tester); if (options->tls) { ASSERT_SUCCESS(s_tls_client_opt_tester_init( tester, options->client_alpn_list ? options->client_alpn_list : "h2;http/1.1", aws_byte_cursor_from_c_str("localhost"))); client_options.tls_options = &tester->client_tls_connection_options; } if (options->pin_event_loop) { client_options.requested_event_loop = aws_event_loop_group_get_next_loop(tester->client_event_loop_group); } tester->client_options = client_options; tester->server_connection_num = 0; tester->client_connection_num = 0; ASSERT_SUCCESS(aws_http_client_connect(&tester->client_options)); /* Wait for server & client connections to finish setup */ tester->wait_client_connection_num = 1; tester->wait_server_connection_num = 1; ASSERT_SUCCESS(s_tester_wait(tester, s_tester_connection_setup_pred)); return AWS_OP_SUCCESS; } static int s_tester_clean_up(struct tester *tester) { if (tester->server) { /* server is not shut down by test, let's shut down the server here */ aws_http_server_release(tester->server); /* wait for the server to finish shutdown process */ ASSERT_SUCCESS(s_tester_wait(tester, s_tester_server_shutdown_pred)); } if (tester->server_ctx) { aws_tls_connection_options_clean_up(&tester->server_tls_connection_options); aws_tls_ctx_release(tester->server_ctx); aws_tls_ctx_options_clean_up(&tester->server_ctx_options); } if (tester->client_ctx) { aws_tls_connection_options_clean_up(&tester->client_tls_connection_options); aws_tls_ctx_release(tester->client_ctx); aws_tls_ctx_options_clean_up(&tester->client_ctx_options); } aws_byte_buf_clean_up(&tester->negotiated_protocol); aws_server_bootstrap_release(tester->server_bootstrap); aws_client_bootstrap_release(tester->client_bootstrap); aws_host_resolver_release(tester->host_resolver); aws_event_loop_group_release(tester->client_event_loop_group); aws_event_loop_group_release(tester->server_event_loop_group); aws_http_library_clean_up(); aws_mutex_clean_up(&tester->wait_lock); return AWS_OP_SUCCESS; } static int s_test_server_new_destroy(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct tester_options options = { .alloc = allocator, .no_connection = true, }; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, &options)); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(server_new_destroy, s_test_server_new_destroy); static int s_test_server_new_destroy_tcp(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct tester_options options = {.alloc = allocator, .no_connection = true, .use_tcp = true}; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, &options)); const struct aws_socket_endpoint *listener_endpoint = aws_http_server_get_listener_endpoint(tester.server); ASSERT_TRUE(listener_endpoint->port > 0); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(server_new_destroy_tcp, s_test_server_new_destroy_tcp); void release_all_client_connections(struct tester *tester) { for (int i = 0; i < tester->client_connection_num; i++) { aws_http_connection_release(tester->client_connections[i]); } /* wait for all the connections to shutdown */ tester->wait_client_connection_is_shutdown = tester->client_connection_num; } void release_all_server_connections(struct tester *tester) { for (int i = 0; i < tester->server_connection_num; i++) { aws_http_connection_release(tester->server_connections[i]); } /* wait for all the connections to shutdown */ tester->wait_server_connection_is_shutdown = tester->server_connection_num; } static int s_test_connection_setup_shutdown(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct tester_options options = { .alloc = allocator, }; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, &options)); release_all_client_connections(&tester); release_all_server_connections(&tester); ASSERT_SUCCESS(s_tester_wait(&tester, s_tester_connection_shutdown_pred)); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(connection_setup_shutdown, s_test_connection_setup_shutdown); static int s_test_connection_setup_shutdown_tls(struct aws_allocator *allocator, void *ctx) { (void)ctx; #ifdef __APPLE__ /* Something is wrong with APPLE */ return AWS_OP_SUCCESS; #endif struct tester_options options = { .alloc = allocator, .tls = true, }; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, &options)); release_all_client_connections(&tester); release_all_server_connections(&tester); ASSERT_SUCCESS(s_tester_wait(&tester, s_tester_connection_shutdown_pred)); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(connection_setup_shutdown_tls, s_test_connection_setup_shutdown_tls); static int s_test_connection_setup_shutdown_proxy_setting_on_ev_not_found(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct tester_options options = { .alloc = allocator, .no_connection = true, }; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, &options)); struct aws_http_client_connection_options client_options = AWS_HTTP_CLIENT_CONNECTION_OPTIONS_INIT; struct proxy_env_var_settings proxy_ev_settings; AWS_ZERO_STRUCT(proxy_ev_settings); proxy_ev_settings.env_var_type = AWS_HPEV_ENABLE; client_options.proxy_ev_settings = &proxy_ev_settings; s_client_connection_options_init_tester(&client_options, &tester); tester.client_options = client_options; tester.server_connection_num = 0; tester.client_connection_num = 0; ASSERT_SUCCESS(aws_http_client_connect(&tester.client_options)); /* Wait for server & client connections to finish setup */ tester.wait_client_connection_num = 1; tester.wait_server_connection_num = 1; ASSERT_SUCCESS(s_tester_wait(&tester, s_tester_connection_setup_pred)); release_all_client_connections(&tester); release_all_server_connections(&tester); ASSERT_SUCCESS(s_tester_wait(&tester, s_tester_connection_shutdown_pred)); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } AWS_TEST_CASE( connection_setup_shutdown_proxy_setting_on_ev_not_found, s_test_connection_setup_shutdown_proxy_setting_on_ev_not_found); static int s_test_connection_h2_prior_knowledge(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct tester_options options = { .alloc = allocator, .no_connection = true, }; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, &options)); /* Connect */ struct aws_http_client_connection_options client_options = AWS_HTTP_CLIENT_CONNECTION_OPTIONS_INIT; s_client_connection_options_init_tester(&client_options, &tester); client_options.prior_knowledge_http2 = true; tester.client_options = client_options; tester.server_connection_num = 0; tester.client_connection_num = 0; ASSERT_SUCCESS(aws_http_client_connect(&tester.client_options)); /* Wait for server & client connections to finish setup */ tester.wait_client_connection_num = 1; tester.wait_server_connection_num = 1; ASSERT_SUCCESS(s_tester_wait(&tester, s_tester_connection_setup_pred)); /* Assert that we made an http2 connection */ ASSERT_INT_EQUALS(tester.connection_version, AWS_HTTP_VERSION_2); /* clean up */ release_all_client_connections(&tester); release_all_server_connections(&tester); ASSERT_SUCCESS(s_tester_wait(&tester, s_tester_connection_shutdown_pred)); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(connection_h2_prior_knowledge, s_test_connection_h2_prior_knowledge); static int s_test_connection_h2_prior_knowledge_not_work_with_tls(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct tester_options options = { .alloc = allocator, .no_connection = true, .tls = true, .server_alpn_list = "http/1.1", }; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, &options)); /* Connect with prior knowledge */ struct aws_http_client_connection_options client_options = AWS_HTTP_CLIENT_CONNECTION_OPTIONS_INIT; s_client_connection_options_init_tester(&client_options, &tester); ASSERT_SUCCESS(s_tls_client_opt_tester_init(&tester, "http/1.1", aws_byte_cursor_from_c_str("localhost"))); client_options.tls_options = &tester.client_tls_connection_options; client_options.prior_knowledge_http2 = true; tester.client_options = client_options; tester.server_connection_num = 0; tester.client_connection_num = 0; /* prior knowledge only works with cleartext TCP */ ASSERT_FAILS(aws_http_client_connect(&tester.client_options)); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(connection_h2_prior_knowledge_not_work_with_tls, s_test_connection_h2_prior_knowledge_not_work_with_tls); static void s_on_tester_negotiation_result( struct aws_channel_handler *handler, struct aws_channel_slot *slot, int err_code, void *user_data) { (void)slot; (void)err_code; struct tester *tester = (struct tester *)user_data; struct aws_byte_buf src = aws_tls_handler_protocol(handler); aws_byte_buf_init_copy(&tester->negotiated_protocol, tester->alloc, &src); } static int s_test_connection_customized_alpn(struct aws_allocator *allocator, void *ctx) { (void)ctx; char customized_alpn_string[] = "myh2"; enum aws_http_version expected_version = AWS_HTTP_VERSION_2; struct tester_options options = { .alloc = allocator, .no_connection = true, .tls = true, .server_alpn_list = "myh2;myh1.1;h2;http/1.1", }; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, &options)); /* Connect with ALPN and the customized alpn string map */ struct aws_http_client_connection_options client_options = AWS_HTTP_CLIENT_CONNECTION_OPTIONS_INIT; s_client_connection_options_init_tester(&client_options, &tester); ASSERT_SUCCESS( s_tls_client_opt_tester_init(&tester, customized_alpn_string, aws_byte_cursor_from_c_str("localhost"))); aws_tls_connection_options_set_callbacks( &tester.client_tls_connection_options, s_on_tester_negotiation_result, NULL, NULL, &tester); client_options.tls_options = &tester.client_tls_connection_options; /* create the alpn map */ struct aws_hash_table alpn_map; AWS_ZERO_STRUCT(alpn_map); ASSERT_SUCCESS(aws_http_alpn_map_init(allocator, &alpn_map)); /* We don't need to clean up the string as the map will own the string */ struct aws_string *alpn_string = aws_string_new_from_c_str(allocator, customized_alpn_string); ASSERT_SUCCESS(aws_hash_table_put(&alpn_map, alpn_string, (void *)(size_t)expected_version, NULL)); client_options.alpn_string_map = &alpn_map; tester.client_options = client_options; tester.server_connection_num = 0; tester.client_connection_num = 0; ASSERT_SUCCESS(aws_http_client_connect(&tester.client_options)); /* We should be safe to free the map */ aws_hash_table_clean_up(&alpn_map); /* Wait for server & client connections to finish setup */ tester.wait_client_connection_num = 1; tester.wait_server_connection_num = 1; ASSERT_SUCCESS(s_tester_wait(&tester, s_tester_connection_setup_pred)); #ifndef __APPLE__ /* Server side ALPN doesn't work for MacOS */ /* Assert that we have the negotiated protocol and the expected version */ ASSERT_INT_EQUALS(tester.connection_version, expected_version); ASSERT_TRUE(aws_byte_buf_eq_c_str(&tester.negotiated_protocol, customized_alpn_string)); #endif /* clean up */ release_all_client_connections(&tester); release_all_server_connections(&tester); ASSERT_SUCCESS(s_tester_wait(&tester, s_tester_connection_shutdown_pred)); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(connection_customized_alpn, s_test_connection_customized_alpn); static int s_test_connection_customized_alpn_error_with_unknown_return_string( struct aws_allocator *allocator, void *ctx) { (void)ctx; char customized_alpn_string[] = "myh2"; struct tester_options options = { .alloc = allocator, .no_connection = true, .tls = true, .server_alpn_list = "myh2;myh1.1;h2;http/1.1", }; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, &options)); /* Connect with ALPN and the customized alpn string map */ struct aws_http_client_connection_options client_options = AWS_HTTP_CLIENT_CONNECTION_OPTIONS_INIT; s_client_connection_options_init_tester(&client_options, &tester); ASSERT_SUCCESS( s_tls_client_opt_tester_init(&tester, customized_alpn_string, aws_byte_cursor_from_c_str("localhost"))); aws_tls_connection_options_set_callbacks( &tester.client_tls_connection_options, s_on_tester_negotiation_result, NULL, NULL, &tester); client_options.tls_options = &tester.client_tls_connection_options; /* create the alpn map */ struct aws_hash_table alpn_map; AWS_ZERO_STRUCT(alpn_map); ASSERT_SUCCESS(aws_http_alpn_map_init(allocator, &alpn_map)); /* put an empty ALPN map, you will not found the returned string, and should error out when trying to connect*/ client_options.alpn_string_map = &alpn_map; tester.client_options = client_options; tester.server_connection_num = 0; tester.client_connection_num = 0; ASSERT_SUCCESS(aws_http_client_connect(&tester.client_options)); /* We should be safe to free the map */ aws_hash_table_clean_up(&alpn_map); /* Wait for server & client connections to finish setup */ tester.wait_client_connection_num = 1; tester.wait_server_connection_num = 1; #ifndef __APPLE__ /* Server side ALPN doesn't work for MacOS */ ASSERT_FAILS(s_tester_wait(&tester, s_tester_connection_setup_pred)); /* Assert that we have the negotiated protocol and error returned from callback */ ASSERT_TRUE(aws_byte_buf_eq_c_str(&tester.negotiated_protocol, customized_alpn_string)); ASSERT_INT_EQUALS(aws_last_error(), AWS_ERROR_HTTP_UNSUPPORTED_PROTOCOL); #else ASSERT_SUCCESS(s_tester_wait(&tester, s_tester_connection_setup_pred)); #endif /* clean up */ release_all_client_connections(&tester); release_all_server_connections(&tester); ASSERT_SUCCESS(s_tester_wait(&tester, s_tester_connection_shutdown_pred)); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } AWS_TEST_CASE( connection_customized_alpn_error_with_unknown_return_string, s_test_connection_customized_alpn_error_with_unknown_return_string); static int s_test_connection_destroy_server_with_connection_existing(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct tester_options options = { .alloc = allocator, }; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, &options)); aws_http_server_release(tester.server); /* wait for all connections to be shut down */ tester.wait_client_connection_is_shutdown = tester.client_connection_num; tester.wait_server_connection_is_shutdown = tester.server_connection_num; ASSERT_SUCCESS(s_tester_wait(&tester, s_tester_connection_shutdown_pred)); /* check the server is destroyed */ ASSERT_TRUE(tester.server_is_shutdown); /* release memory */ release_all_client_connections(&tester); release_all_server_connections(&tester); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } AWS_TEST_CASE( connection_destroy_server_with_connection_existing, s_test_connection_destroy_server_with_connection_existing); /* multiple connections */ static int s_test_connection_destroy_server_with_multiple_connections_existing( struct aws_allocator *allocator, void *ctx) { (void)ctx; struct tester_options options = { .alloc = allocator, }; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, &options)); /* more connections! */ int more_connection_num = 1; /* set waiting condition */ tester.wait_client_connection_num += more_connection_num; tester.wait_server_connection_num += more_connection_num; /* connect */ for (int i = 0; i < more_connection_num; i++) { ASSERT_SUCCESS(aws_http_client_connect(&tester.client_options)); } /* wait for connections */ ASSERT_SUCCESS(s_tester_wait(&tester, s_tester_connection_setup_pred)); aws_http_server_release(tester.server); /* wait for all connections to be shut down */ tester.wait_client_connection_is_shutdown = tester.client_connection_num; tester.wait_server_connection_is_shutdown = tester.server_connection_num; ASSERT_SUCCESS(s_tester_wait(&tester, s_tester_connection_shutdown_pred)); /* check the server is destroyed */ ASSERT_TRUE(tester.server_is_shutdown); /* release memory */ release_all_client_connections(&tester); release_all_server_connections(&tester); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } AWS_TEST_CASE( connection_destroy_server_with_multiple_connections_existing, s_test_connection_destroy_server_with_multiple_connections_existing); static void s_block_task(struct aws_task *task, void *arg, enum aws_task_status status) { (void)status; /* sleep for 2 sec */ struct tester *tester = arg; aws_thread_current_sleep(2000000000); aws_mem_release(tester->alloc, task); } static void s_tester_on_new_client_connection_setup( struct aws_http_connection *connection, int error_code, void *user_data) { struct tester *tester = user_data; AWS_FATAL_ASSERT(aws_mutex_lock(&tester->wait_lock) == AWS_OP_SUCCESS); tester->new_client_setup_finished = true; if (error_code) { tester->client_wait_result = error_code; goto done; } tester->new_client_connection = connection; done: AWS_FATAL_ASSERT(aws_mutex_unlock(&tester->wait_lock) == AWS_OP_SUCCESS); aws_condition_variable_notify_one(&tester->wait_cvar); } static void s_tester_on_new_client_connection_shutdown( struct aws_http_connection *connection, int error_code, void *user_data) { (void)connection; (void)error_code; struct tester *tester = user_data; AWS_FATAL_ASSERT(aws_mutex_lock(&tester->wait_lock) == AWS_OP_SUCCESS); tester->new_client_shut_down = true; AWS_FATAL_ASSERT(aws_mutex_unlock(&tester->wait_lock) == AWS_OP_SUCCESS); aws_condition_variable_notify_one(&tester->wait_cvar); } static bool s_tester_new_client_setup_pred(void *user_data) { struct tester *tester = user_data; return tester->new_client_setup_finished; } static bool s_tester_new_client_shutdown_pred(void *user_data) { struct tester *tester = user_data; return tester->new_client_shut_down; } /* when we shutdown the server, no more new connection will be accepted */ static int s_test_connection_server_shutting_down_new_connection_setup_fail( struct aws_allocator *allocator, void *ctx) { (void)ctx; struct tester_options options = { .alloc = allocator, }; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, &options)); /* Connect */ struct aws_socket_options socket_options = { .type = AWS_SOCKET_STREAM, .domain = AWS_SOCKET_LOCAL, .connect_timeout_ms = (uint32_t)aws_timestamp_convert(TESTER_TIMEOUT_SEC, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_MILLIS, NULL), }; /* create a new eventloop for the new connection and block the new connection. Waiting server to begin shutting * down. */ struct aws_event_loop_group *event_loop_group = aws_event_loop_group_new_default(allocator, 1, NULL); /* get the first eventloop, which will be the eventloop for client to connect */ struct aws_event_loop *current_eventloop = aws_event_loop_group_get_loop_at(event_loop_group, 0); struct aws_task *block_task = aws_mem_acquire(allocator, sizeof(struct aws_task)); aws_task_init(block_task, s_block_task, &tester, "wait_a_bit"); aws_event_loop_schedule_task_now(current_eventloop, block_task); /* get the first eventloop of tester, which will be the eventloop for server listener socket, block the listener * socket */ struct aws_event_loop *server_eventloop = aws_event_loop_group_get_loop_at(tester.server_event_loop_group, 0); struct aws_task *server_block_task = aws_mem_acquire(allocator, sizeof(struct aws_task)); aws_task_init(server_block_task, s_block_task, &tester, "wait_a_bit"); aws_event_loop_schedule_task_now(server_eventloop, server_block_task); struct aws_client_bootstrap_options bootstrap_options = { .event_loop_group = event_loop_group, .host_resolver = tester.host_resolver, }; struct aws_client_bootstrap *bootstrap = aws_client_bootstrap_new(allocator, &bootstrap_options); struct aws_http_client_connection_options client_options = AWS_HTTP_CLIENT_CONNECTION_OPTIONS_INIT; client_options.allocator = tester.alloc; client_options.bootstrap = bootstrap; client_options.host_name = aws_byte_cursor_from_c_str(tester.endpoint.address); client_options.port = tester.endpoint.port; client_options.socket_options = &socket_options; client_options.user_data = &tester; client_options.on_setup = s_tester_on_new_client_connection_setup; client_options.on_shutdown = s_tester_on_new_client_connection_shutdown; /* new connection will be blocked for 2 sec */ tester.wait_server_connection_num++; ASSERT_SUCCESS(aws_http_client_connect(&client_options)); /* shutting down the server */ aws_http_server_release(tester.server); /* the server side connection failed with error code, closed */ ASSERT_FAILS(s_tester_wait(&tester, s_tester_connection_setup_pred)); /* wait for the client side connection */ s_tester_wait(&tester, s_tester_new_client_setup_pred); if (tester.new_client_connection && !tester.client_connection_is_shutdown) { /* wait for it to shut down, we do not need to call shut down, the socket will know */ ASSERT_SUCCESS(s_tester_wait(&tester, s_tester_new_client_shutdown_pred)); } if (tester.new_client_connection) { aws_http_connection_release(tester.new_client_connection); } /* wait for the old connections to be shut down */ tester.wait_client_connection_is_shutdown = tester.client_connection_num; tester.wait_server_connection_is_shutdown = tester.server_connection_num; /* assert the new connection fail to set up in user's perspective */ ASSERT_TRUE(tester.client_connection_num == 1); ASSERT_TRUE(tester.server_connection_num == 1); ASSERT_SUCCESS(s_tester_wait(&tester, s_tester_connection_shutdown_pred)); /* release memory */ release_all_client_connections(&tester); release_all_server_connections(&tester); aws_client_bootstrap_release(bootstrap); aws_event_loop_group_release(event_loop_group); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } AWS_TEST_CASE( connection_server_shutting_down_new_connection_setup_fail, s_test_connection_server_shutting_down_new_connection_setup_fail); static int s_test_connection_setup_shutdown_pinned_event_loop(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct tester_options options = { .alloc = allocator, .pin_event_loop = true, }; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, &options)); for (int i = 0; i < tester.client_connection_num; i++) { struct aws_http_connection *connection = tester.client_connections[i]; ASSERT_PTR_EQUALS( tester.client_options.requested_event_loop, aws_channel_get_event_loop(connection->channel_slot->channel)); } release_all_client_connections(&tester); release_all_server_connections(&tester); ASSERT_SUCCESS(s_tester_wait(&tester, s_tester_connection_shutdown_pred)); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(connection_setup_shutdown_pinned_event_loop, s_test_connection_setup_shutdown_pinned_event_loop); aws-crt-python-0.20.4+dfsg/crt/aws-c-http/tests/test_connection_manager.c000066400000000000000000002104371456575232400264140ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef _MSC_VER # pragma warning(disable : 4232) /* function pointer to dll symbol */ #endif AWS_STATIC_STRING_FROM_LITERAL(s_http_proxy_env_var, "HTTP_PROXY"); AWS_STATIC_STRING_FROM_LITERAL(s_http_proxy_env_var_low, "http_proxy"); AWS_STATIC_STRING_FROM_LITERAL(s_https_proxy_env_var, "HTTPS_PROXY"); AWS_STATIC_STRING_FROM_LITERAL(s_https_proxy_env_var_low, "https_proxy"); enum new_connection_result_type { AWS_NCRT_SUCCESS, AWS_NCRT_ERROR_VIA_CALLBACK, AWS_NCRT_ERROR_FROM_CREATE, }; struct mock_connection { enum new_connection_result_type result; bool is_closed_on_release; }; struct cm_tester_options { struct aws_allocator *allocator; struct aws_http_connection_manager_system_vtable *mock_table; struct aws_http_proxy_options *proxy_options; bool use_proxy_env; bool use_tls; struct aws_tls_connection_options *env_configured_tls; size_t max_connections; uint64_t max_connection_idle_in_ms; uint64_t starting_mock_time; bool http2; struct aws_http2_setting *initial_settings_array; size_t num_initial_settings; bool self_lib_init; }; struct cm_tester { struct aws_allocator *allocator; struct aws_event_loop_group *event_loop_group; struct aws_host_resolver *host_resolver; struct aws_client_bootstrap *client_bootstrap; struct aws_http_connection_manager *connection_manager; struct aws_tls_ctx *tls_ctx; struct aws_tls_ctx_options tls_ctx_options; struct aws_tls_connection_options tls_connection_options; struct aws_http_proxy_options *verify_proxy_options; struct aws_mutex lock; struct aws_condition_variable signal; struct aws_array_list connections; size_t connection_errors; size_t connection_releases; size_t wait_for_connection_count; bool is_shutdown_complete; struct aws_http_connection_manager_system_vtable *mock_table; struct aws_atomic_var next_connection_id; struct aws_array_list mock_connections; aws_http_on_client_connection_shutdown_fn *release_connection_fn; struct aws_mutex mock_time_lock; uint64_t mock_time; struct proxy_env_var_settings proxy_ev_settings; bool proxy_request_complete; bool proxy_request_successful; bool self_lib_init; }; static struct cm_tester s_tester; static int s_tester_get_mock_time(uint64_t *current_time) { aws_mutex_lock(&s_tester.mock_time_lock); *current_time = s_tester.mock_time; aws_mutex_unlock(&s_tester.mock_time_lock); return AWS_OP_SUCCESS; } static void s_tester_set_mock_time(uint64_t current_time) { aws_mutex_lock(&s_tester.mock_time_lock); s_tester.mock_time = current_time; aws_mutex_unlock(&s_tester.mock_time_lock); } static void s_cm_tester_on_cm_shutdown_complete(void *user_data) { struct cm_tester *tester = user_data; AWS_FATAL_ASSERT(tester == &s_tester); aws_mutex_lock(&tester->lock); tester->is_shutdown_complete = true; aws_condition_variable_notify_one(&tester->signal); aws_mutex_unlock(&tester->lock); } static struct aws_event_loop *s_new_event_loop( struct aws_allocator *alloc, const struct aws_event_loop_options *options, void *new_loop_user_data) { (void)new_loop_user_data; return aws_event_loop_new_default(alloc, options->clock); } static int s_cm_tester_init(struct cm_tester_options *options) { struct cm_tester *tester = &s_tester; AWS_ZERO_STRUCT(*tester); tester->self_lib_init = options->self_lib_init; if (!tester->self_lib_init) { aws_http_library_init(options->allocator); } tester->allocator = options->allocator; ASSERT_SUCCESS(aws_mutex_init(&tester->lock)); ASSERT_SUCCESS(aws_condition_variable_init(&tester->signal)); ASSERT_SUCCESS( aws_array_list_init_dynamic(&tester->connections, tester->allocator, 10, sizeof(struct aws_http_connection *))); aws_mutex_init(&tester->mock_time_lock); s_tester_set_mock_time(options->starting_mock_time); aws_io_clock_fn *clock_fn = &aws_high_res_clock_get_ticks; if (options->mock_table) { clock_fn = options->mock_table->aws_high_res_clock_get_ticks; } tester->event_loop_group = aws_event_loop_group_new(tester->allocator, clock_fn, 1, s_new_event_loop, NULL, NULL); struct aws_host_resolver_default_options resolver_options = { .el_group = tester->event_loop_group, .max_entries = 8, }; tester->host_resolver = aws_host_resolver_new_default(tester->allocator, &resolver_options); struct aws_client_bootstrap_options bootstrap_options = { .event_loop_group = tester->event_loop_group, .host_resolver = tester->host_resolver, }; tester->client_bootstrap = aws_client_bootstrap_new(tester->allocator, &bootstrap_options); ASSERT_NOT_NULL(tester->client_bootstrap); struct aws_socket_options socket_options = { .type = AWS_SOCKET_STREAM, .domain = AWS_SOCKET_IPV4, .connect_timeout_ms = (uint32_t)aws_timestamp_convert(10, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_MILLIS, NULL), }; aws_tls_ctx_options_init_default_client(&tester->tls_ctx_options, options->allocator); aws_tls_ctx_options_set_verify_peer(&tester->tls_ctx_options, false); if (options->http2) { ASSERT_SUCCESS(aws_tls_ctx_options_set_alpn_list(&tester->tls_ctx_options, "h2")); } tester->tls_ctx = aws_tls_client_ctx_new(options->allocator, &tester->tls_ctx_options); ASSERT_NOT_NULL(tester->tls_ctx); struct aws_byte_cursor server_name = aws_byte_cursor_from_c_str("www.google.com"); aws_tls_connection_options_init_from_ctx(&tester->tls_connection_options, tester->tls_ctx); aws_tls_connection_options_set_server_name(&tester->tls_connection_options, options->allocator, &server_name); tester->verify_proxy_options = options->proxy_options; tester->proxy_ev_settings.env_var_type = options->use_proxy_env ? AWS_HPEV_ENABLE : AWS_HPEV_DISABLE; struct aws_tls_connection_options default_tls_connection_options; AWS_ZERO_STRUCT(default_tls_connection_options); if (options->env_configured_tls) { ASSERT_SUCCESS(aws_tls_connection_options_copy(&default_tls_connection_options, options->env_configured_tls)); tester->proxy_ev_settings.tls_options = &default_tls_connection_options; } struct aws_http_connection_manager_options cm_options = { .bootstrap = tester->client_bootstrap, .initial_window_size = SIZE_MAX, .socket_options = &socket_options, .tls_connection_options = options->use_tls ? &tester->tls_connection_options : NULL, .proxy_options = options->proxy_options, .proxy_ev_settings = &tester->proxy_ev_settings, .host = server_name, .port = options->use_tls ? 443 : 80, .max_connections = options->max_connections, .shutdown_complete_user_data = tester, .shutdown_complete_callback = s_cm_tester_on_cm_shutdown_complete, .max_connection_idle_in_milliseconds = options->max_connection_idle_in_ms, .http2_prior_knowledge = !options->use_tls && options->http2, .initial_settings_array = options->initial_settings_array, .num_initial_settings = options->num_initial_settings, }; if (options->mock_table) { g_aws_http_connection_manager_default_system_vtable_ptr = options->mock_table; } tester->connection_manager = aws_http_connection_manager_new(tester->allocator, &cm_options); ASSERT_NOT_NULL(tester->connection_manager); aws_tls_connection_options_clean_up(&default_tls_connection_options); if (options->mock_table) { aws_http_connection_manager_set_system_vtable(tester->connection_manager, options->mock_table); } tester->mock_table = options->mock_table; aws_atomic_store_int(&tester->next_connection_id, 0); ASSERT_SUCCESS(aws_array_list_init_dynamic( &tester->mock_connections, tester->allocator, 10, sizeof(struct mock_connection *))); return AWS_OP_SUCCESS; } static void s_add_mock_connections(size_t count, enum new_connection_result_type result, bool closed_on_release) { struct cm_tester *tester = &s_tester; for (size_t i = 0; i < count; ++i) { struct mock_connection *mock = aws_mem_acquire(tester->allocator, sizeof(struct mock_connection)); AWS_ZERO_STRUCT(*mock); mock->result = result; mock->is_closed_on_release = closed_on_release; aws_array_list_push_back(&tester->mock_connections, &mock); } } static int s_release_connections(size_t count, bool close_first) { struct cm_tester *tester = &s_tester; struct aws_array_list to_release; AWS_ZERO_STRUCT(to_release); ASSERT_SUCCESS(aws_mutex_lock(&tester->lock)); size_t release_count = aws_array_list_length(&tester->connections); if (release_count > count) { release_count = count; } if (release_count == 0) { goto release; } if (aws_array_list_init_dynamic( &to_release, tester->allocator, release_count, sizeof(struct aws_http_connection *))) { goto release; } for (size_t i = 0; i < release_count; ++i) { struct aws_http_connection *connection = NULL; if (aws_array_list_back(&tester->connections, &connection)) { continue; } aws_array_list_pop_back(&tester->connections); aws_array_list_push_back(&to_release, &connection); } release: ASSERT_SUCCESS(aws_mutex_unlock(&tester->lock)); if (aws_array_list_is_valid(&to_release)) { for (size_t i = 0; i < aws_array_list_length(&to_release); ++i) { struct aws_http_connection *connection = NULL; if (aws_array_list_get_at(&to_release, &connection, i)) { continue; } if (close_first) { if (tester->mock_table) { tester->mock_table->aws_http_connection_close(connection); } else { aws_http_connection_close(connection); } } aws_http_connection_manager_release_connection(tester->connection_manager, connection); ASSERT_SUCCESS(aws_mutex_lock(&tester->lock)); ++tester->connection_releases; aws_condition_variable_notify_one(&tester->signal); ASSERT_SUCCESS(aws_mutex_unlock(&tester->lock)); } aws_array_list_clean_up(&to_release); } else { ASSERT_UINT_EQUALS(0, release_count); } return AWS_OP_SUCCESS; } static void s_on_acquire_connection(struct aws_http_connection *connection, int error_code, void *user_data) { (void)error_code; (void)user_data; struct cm_tester *tester = &s_tester; AWS_FATAL_ASSERT(aws_mutex_lock(&tester->lock) == AWS_OP_SUCCESS); if (connection == NULL) { ++tester->connection_errors; } else { aws_array_list_push_back(&tester->connections, &connection); } aws_condition_variable_notify_one(&tester->signal); AWS_FATAL_ASSERT(aws_mutex_unlock(&tester->lock) == AWS_OP_SUCCESS); } static void s_acquire_connections(size_t count) { struct cm_tester *tester = &s_tester; for (size_t i = 0; i < count; ++i) { aws_http_connection_manager_acquire_connection(tester->connection_manager, s_on_acquire_connection, tester); } } static bool s_is_connection_reply_count_at_least(void *context) { (void)context; struct cm_tester *tester = &s_tester; return tester->wait_for_connection_count <= aws_array_list_length(&tester->connections) + tester->connection_errors + tester->connection_releases; } static int s_wait_on_connection_reply_count(size_t count) { struct cm_tester *tester = &s_tester; ASSERT_SUCCESS(aws_mutex_lock(&tester->lock)); tester->wait_for_connection_count = count; int signal_error = aws_condition_variable_wait_pred(&tester->signal, &tester->lock, s_is_connection_reply_count_at_least, tester); ASSERT_SUCCESS(aws_mutex_unlock(&tester->lock)); return signal_error; } static bool s_is_shutdown_complete(void *context) { (void)context; struct cm_tester *tester = &s_tester; return tester->is_shutdown_complete; } static int s_wait_on_shutdown_complete(void) { struct cm_tester *tester = &s_tester; ASSERT_SUCCESS(aws_mutex_lock(&tester->lock)); int signal_error = aws_condition_variable_wait_pred(&tester->signal, &tester->lock, s_is_shutdown_complete, tester); ASSERT_SUCCESS(aws_mutex_unlock(&tester->lock)); return signal_error; } static int s_cm_tester_clean_up(void) { struct cm_tester *tester = &s_tester; ASSERT_SUCCESS(s_release_connections(aws_array_list_length(&tester->connections), false)); aws_array_list_clean_up(&tester->connections); for (size_t i = 0; i < aws_array_list_length(&tester->mock_connections); ++i) { struct mock_connection *mock = NULL; if (aws_array_list_get_at(&tester->mock_connections, &mock, i)) { continue; } aws_mem_release(tester->allocator, mock); } aws_array_list_clean_up(&tester->mock_connections); aws_http_connection_manager_release(tester->connection_manager); s_wait_on_shutdown_complete(); aws_client_bootstrap_release(tester->client_bootstrap); aws_host_resolver_release(tester->host_resolver); aws_event_loop_group_release(tester->event_loop_group); aws_tls_ctx_options_clean_up(&tester->tls_ctx_options); aws_tls_connection_options_clean_up(&tester->tls_connection_options); aws_tls_ctx_release(tester->tls_ctx); if (!tester->self_lib_init) { aws_http_library_clean_up(); } aws_mutex_clean_up(&tester->lock); aws_condition_variable_clean_up(&tester->signal); aws_mutex_clean_up(&tester->mock_time_lock); return AWS_OP_SUCCESS; } static int s_test_connection_manager_setup_shutdown(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct cm_tester_options options = { .allocator = allocator, .max_connections = 5, }; ASSERT_SUCCESS(s_cm_tester_init(&options)); ASSERT_SUCCESS(s_cm_tester_clean_up()); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_connection_manager_setup_shutdown, s_test_connection_manager_setup_shutdown); static int s_test_connection_manager_single_connection(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct cm_tester_options options = { .allocator = allocator, .max_connections = 5, }; ASSERT_SUCCESS(s_cm_tester_init(&options)); s_acquire_connections(1); ASSERT_SUCCESS(s_wait_on_connection_reply_count(1)); ASSERT_SUCCESS(s_release_connections(1, false)); ASSERT_UINT_EQUALS(0, s_tester.connection_errors); ASSERT_SUCCESS(s_cm_tester_clean_up()); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_connection_manager_single_connection, s_test_connection_manager_single_connection); static int s_test_connection_manager_proxy_envrionment_empty_string(struct aws_allocator *allocator, void *ctx) { (void)ctx; /* Set proxy related envrionment variables to empty string and make sure we just skip proxy */ struct aws_string *empty = aws_string_new_from_c_str(allocator, ""); ASSERT_SUCCESS(aws_set_environment_value(s_http_proxy_env_var, empty)); ASSERT_SUCCESS(aws_set_environment_value(s_http_proxy_env_var_low, empty)); ASSERT_SUCCESS(aws_set_environment_value(s_https_proxy_env_var, empty)); ASSERT_SUCCESS(aws_set_environment_value(s_https_proxy_env_var_low, empty)); struct cm_tester_options options = { .allocator = allocator, .max_connections = 5, .use_proxy_env = true, }; ASSERT_SUCCESS(s_cm_tester_init(&options)); s_acquire_connections(1); ASSERT_SUCCESS(s_wait_on_connection_reply_count(1)); ASSERT_SUCCESS(s_release_connections(1, false)); ASSERT_UINT_EQUALS(0, s_tester.connection_errors); ASSERT_SUCCESS(s_cm_tester_clean_up()); aws_string_destroy(empty); return AWS_OP_SUCCESS; } AWS_TEST_CASE( test_connection_manager_proxy_envrionment_empty_string, s_test_connection_manager_proxy_envrionment_empty_string); static int s_test_connection_manager_single_http2_connection(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct cm_tester_options options = { .allocator = allocator, .max_connections = 5, .http2 = true, .use_tls = true, }; ASSERT_SUCCESS(s_cm_tester_init(&options)); s_acquire_connections(1); ASSERT_SUCCESS(s_wait_on_connection_reply_count(1)); ASSERT_SUCCESS(s_release_connections(1, false)); ASSERT_SUCCESS(s_cm_tester_clean_up()); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_connection_manager_single_http2_connection, s_test_connection_manager_single_http2_connection); static int s_test_connection_manager_single_http2_connection_failed(struct aws_allocator *allocator, void *ctx) { (void)ctx; /* google don't support prior_knowledge, so, this will fail to create the connection. Check we are good when acquire * failed. */ struct cm_tester_options options = { .allocator = allocator, .max_connections = 5, .http2 = true, }; ASSERT_SUCCESS(s_cm_tester_init(&options)); s_acquire_connections(1); ASSERT_SUCCESS(s_wait_on_connection_reply_count(1)); ASSERT_SUCCESS(s_release_connections(1, false)); ASSERT_UINT_EQUALS(1, s_tester.connection_errors); ASSERT_SUCCESS(s_cm_tester_clean_up()); return AWS_OP_SUCCESS; } AWS_TEST_CASE( test_connection_manager_single_http2_connection_failed, s_test_connection_manager_single_http2_connection_failed); static int s_test_connection_manager_single_http2_connection_with_settings(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_http2_setting settings_array[] = { {.id = AWS_HTTP2_SETTINGS_ENABLE_PUSH, .value = 0}, }; struct cm_tester_options options = { .allocator = allocator, .max_connections = 5, .http2 = true, .use_tls = true, .initial_settings_array = settings_array, .num_initial_settings = AWS_ARRAY_SIZE(settings_array), }; ASSERT_SUCCESS(s_cm_tester_init(&options)); s_acquire_connections(1); ASSERT_SUCCESS(s_wait_on_connection_reply_count(1)); ASSERT_SUCCESS(s_release_connections(1, false)); ASSERT_UINT_EQUALS(0, s_tester.connection_errors); ASSERT_SUCCESS(s_cm_tester_clean_up()); return AWS_OP_SUCCESS; } AWS_TEST_CASE( test_connection_manager_single_http2_connection_with_settings, s_test_connection_manager_single_http2_connection_with_settings); static int s_test_connection_manager_many_connections(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct cm_tester_options options = { .allocator = allocator, .max_connections = 20, }; ASSERT_SUCCESS(s_cm_tester_init(&options)); s_acquire_connections(20); ASSERT_SUCCESS(s_wait_on_connection_reply_count(20)); ASSERT_SUCCESS(s_release_connections(20, false)); ASSERT_UINT_EQUALS(0, s_tester.connection_errors); ASSERT_SUCCESS(s_cm_tester_clean_up()); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_connection_manager_many_connections, s_test_connection_manager_many_connections); static int s_test_connection_manager_many_http2_connections(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct cm_tester_options options = { .allocator = allocator, .max_connections = 20, .http2 = true, .use_tls = true, }; ASSERT_SUCCESS(s_cm_tester_init(&options)); s_acquire_connections(20); ASSERT_SUCCESS(s_wait_on_connection_reply_count(20)); ASSERT_SUCCESS(s_release_connections(20, false)); ASSERT_UINT_EQUALS(0, s_tester.connection_errors); ASSERT_SUCCESS(s_cm_tester_clean_up()); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_connection_manager_many_http2_connections, s_test_connection_manager_many_http2_connections); static int s_test_connection_manager_acquire_release(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct cm_tester_options options = { .allocator = allocator, .max_connections = 4, }; ASSERT_SUCCESS(s_cm_tester_init(&options)); s_acquire_connections(20); ASSERT_SUCCESS(s_wait_on_connection_reply_count(4)); for (size_t i = 4; i < 20; ++i) { ASSERT_SUCCESS(s_release_connections(1, false)); ASSERT_SUCCESS(s_wait_on_connection_reply_count(i + 1)); } ASSERT_UINT_EQUALS(0, s_tester.connection_errors); ASSERT_SUCCESS(s_cm_tester_clean_up()); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_connection_manager_acquire_release, s_test_connection_manager_acquire_release); static int s_test_connection_manager_close_and_release(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct cm_tester_options options = { .allocator = allocator, .max_connections = 4, }; ASSERT_SUCCESS(s_cm_tester_init(&options)); s_acquire_connections(20); ASSERT_SUCCESS(s_wait_on_connection_reply_count(4)); for (size_t i = 4; i < 20; ++i) { ASSERT_SUCCESS(s_release_connections(1, i % 1 == 0)); ASSERT_SUCCESS(s_wait_on_connection_reply_count(i + 1)); } ASSERT_UINT_EQUALS(0, s_tester.connection_errors); ASSERT_SUCCESS(s_cm_tester_clean_up()); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_connection_manager_close_and_release, s_test_connection_manager_close_and_release); static int s_test_connection_manager_acquire_release_mix(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct cm_tester_options options = { .allocator = allocator, .max_connections = 5, }; ASSERT_SUCCESS(s_cm_tester_init(&options)); for (size_t i = 0; i < 10; ++i) { s_acquire_connections(2); ASSERT_SUCCESS(s_wait_on_connection_reply_count(i + 1)); ASSERT_SUCCESS(s_release_connections(1, i % 1 == 0)); } ASSERT_SUCCESS(s_wait_on_connection_reply_count(15)); for (size_t i = 15; i < 20; ++i) { ASSERT_SUCCESS(s_release_connections(1, i % 1 == 0)); ASSERT_SUCCESS(s_wait_on_connection_reply_count(i + 1)); } ASSERT_UINT_EQUALS(0, s_tester.connection_errors); ASSERT_SUCCESS(s_cm_tester_clean_up()); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_connection_manager_acquire_release_mix, s_test_connection_manager_acquire_release_mix); static int s_aws_http_connection_manager_create_connection_sync_mock( const struct aws_http_client_connection_options *options) { struct cm_tester *tester = &s_tester; size_t next_connection_id = aws_atomic_fetch_add(&tester->next_connection_id, 1); ASSERT_SUCCESS(aws_mutex_lock(&tester->lock)); tester->release_connection_fn = options->on_shutdown; ASSERT_SUCCESS(aws_mutex_unlock(&tester->lock)); /* Verify that any proxy options have been propagated to the connection attempt */ if (tester->verify_proxy_options) { ASSERT_BIN_ARRAYS_EQUALS( tester->verify_proxy_options->host.ptr, tester->verify_proxy_options->host.len, options->proxy_options->host.ptr, options->proxy_options->host.len); ASSERT_TRUE(options->proxy_options->port == tester->verify_proxy_options->port); ASSERT_UINT_EQUALS(options->proxy_options->connection_type, tester->verify_proxy_options->connection_type); } struct mock_connection *connection = NULL; if (next_connection_id < aws_array_list_length(&tester->mock_connections)) { aws_array_list_get_at(&tester->mock_connections, &connection, next_connection_id); } if (connection) { if (connection->result == AWS_NCRT_SUCCESS) { options->on_setup((struct aws_http_connection *)connection, AWS_ERROR_SUCCESS, options->user_data); } else if (connection->result == AWS_NCRT_ERROR_VIA_CALLBACK) { options->on_setup(NULL, AWS_ERROR_HTTP_UNKNOWN, options->user_data); } if (connection->result != AWS_NCRT_ERROR_FROM_CREATE) { return AWS_OP_SUCCESS; } } return aws_raise_error(AWS_ERROR_HTTP_UNKNOWN); } static void s_aws_http_connection_manager_release_connection_sync_mock(struct aws_http_connection *connection) { (void)connection; struct cm_tester *tester = &s_tester; tester->release_connection_fn(connection, AWS_ERROR_SUCCESS, tester->connection_manager); } static void s_aws_http_connection_manager_close_connection_sync_mock(struct aws_http_connection *connection) { (void)connection; } static bool s_aws_http_connection_manager_is_connection_available_sync_mock( const struct aws_http_connection *connection) { (void)connection; struct mock_connection *proxy = (struct mock_connection *)(void *)connection; return !proxy->is_closed_on_release; } static bool s_aws_http_connection_manager_is_callers_thread_sync_mock(struct aws_channel *channel) { (void)channel; return true; } static struct aws_channel *s_aws_http_connection_manager_connection_get_channel_sync_mock( struct aws_http_connection *connection) { (void)connection; return (struct aws_channel *)1; } static enum aws_http_version s_aws_http_connection_manager_connection_get_version_sync_mock( const struct aws_http_connection *connection) { (void)connection; return AWS_HTTP_VERSION_1_1; } static struct aws_http_connection_manager_system_vtable s_synchronous_mocks = { .aws_http_client_connect = s_aws_http_connection_manager_create_connection_sync_mock, .aws_http_connection_release = s_aws_http_connection_manager_release_connection_sync_mock, .aws_http_connection_close = s_aws_http_connection_manager_close_connection_sync_mock, .aws_http_connection_new_requests_allowed = s_aws_http_connection_manager_is_connection_available_sync_mock, .aws_high_res_clock_get_ticks = aws_high_res_clock_get_ticks, .aws_http_connection_get_channel = s_aws_http_connection_manager_connection_get_channel_sync_mock, .aws_channel_thread_is_callers_thread = s_aws_http_connection_manager_is_callers_thread_sync_mock, .aws_http_connection_get_version = s_aws_http_connection_manager_connection_get_version_sync_mock, }; static int s_test_connection_manager_acquire_release_mix_synchronous(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct cm_tester_options options = { .allocator = allocator, .max_connections = 5, .mock_table = &s_synchronous_mocks, }; ASSERT_SUCCESS(s_cm_tester_init(&options)); for (size_t i = 0; i < 20; ++i) { s_add_mock_connections(1, AWS_NCRT_SUCCESS, i % 1 == 0); } for (size_t i = 0; i < 10; ++i) { s_acquire_connections(2); ASSERT_SUCCESS(s_wait_on_connection_reply_count(i + 1)); ASSERT_SUCCESS(s_release_connections(1, false)); } ASSERT_SUCCESS(s_wait_on_connection_reply_count(15)); for (size_t i = 15; i < 20; ++i) { ASSERT_SUCCESS(s_release_connections(1, false)); ASSERT_SUCCESS(s_wait_on_connection_reply_count(i + 1)); } ASSERT_TRUE(s_tester.connection_errors == 0); ASSERT_SUCCESS(s_cm_tester_clean_up()); return AWS_OP_SUCCESS; } AWS_TEST_CASE( test_connection_manager_acquire_release_mix_synchronous, s_test_connection_manager_acquire_release_mix_synchronous); static int s_test_connection_manager_connect_callback_failure(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct cm_tester_options options = { .allocator = allocator, .max_connections = 5, .mock_table = &s_synchronous_mocks, }; ASSERT_SUCCESS(s_cm_tester_init(&options)); s_add_mock_connections(5, AWS_NCRT_ERROR_VIA_CALLBACK, false); s_acquire_connections(5); ASSERT_SUCCESS(s_wait_on_connection_reply_count(5)); ASSERT_TRUE(s_tester.connection_errors == 5); ASSERT_SUCCESS(s_cm_tester_clean_up()); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_connection_manager_connect_callback_failure, s_test_connection_manager_connect_callback_failure); static int s_test_connection_manager_connect_immediate_failure(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct cm_tester_options options = { .allocator = allocator, .max_connections = 5, .mock_table = &s_synchronous_mocks, }; ASSERT_SUCCESS(s_cm_tester_init(&options)); s_add_mock_connections(5, AWS_NCRT_ERROR_FROM_CREATE, false); s_acquire_connections(5); ASSERT_SUCCESS(s_wait_on_connection_reply_count(5)); ASSERT_TRUE(s_tester.connection_errors == 5); ASSERT_SUCCESS(s_cm_tester_clean_up()); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_connection_manager_connect_immediate_failure, s_test_connection_manager_connect_immediate_failure); static int s_test_connection_manager_proxy_setup_shutdown(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_http_proxy_options proxy_options = { .host = aws_byte_cursor_from_c_str("127.0.0.1"), .port = 3280, }; struct cm_tester_options options = { .allocator = allocator, .max_connections = 1, .mock_table = &s_synchronous_mocks, .proxy_options = &proxy_options, }; ASSERT_SUCCESS(s_cm_tester_init(&options)); ASSERT_SUCCESS(s_cm_tester_clean_up()); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_connection_manager_proxy_setup_shutdown, s_test_connection_manager_proxy_setup_shutdown); static struct aws_http_connection_manager_system_vtable s_idle_mocks = { .aws_http_client_connect = s_aws_http_connection_manager_create_connection_sync_mock, .aws_http_connection_release = s_aws_http_connection_manager_release_connection_sync_mock, .aws_http_connection_close = s_aws_http_connection_manager_close_connection_sync_mock, .aws_http_connection_new_requests_allowed = s_aws_http_connection_manager_is_connection_available_sync_mock, .aws_high_res_clock_get_ticks = s_tester_get_mock_time, .aws_http_connection_get_channel = s_aws_http_connection_manager_connection_get_channel_sync_mock, .aws_channel_thread_is_callers_thread = s_aws_http_connection_manager_is_callers_thread_sync_mock, .aws_http_connection_get_version = s_aws_http_connection_manager_connection_get_version_sync_mock, }; static int s_register_acquired_connections(struct aws_array_list *seen_connections) { aws_mutex_lock(&s_tester.lock); size_t acquired_count = aws_array_list_length(&s_tester.connections); for (size_t i = 0; i < acquired_count; ++i) { struct aws_http_connection *connection = NULL; aws_array_list_get_at(&s_tester.connections, &connection, i); aws_array_list_push_back(seen_connections, &connection); } aws_mutex_unlock(&s_tester.lock); return AWS_OP_SUCCESS; } static size_t s_get_acquired_connections_seen_count(struct aws_array_list *seen_connections) { size_t actual_seen_count = 0; aws_mutex_lock(&s_tester.lock); size_t seen_count = aws_array_list_length(seen_connections); size_t acquired_count = aws_array_list_length(&s_tester.connections); for (size_t i = 0; i < acquired_count; ++i) { struct aws_http_connection *acquired_connection = NULL; aws_array_list_get_at(&s_tester.connections, &acquired_connection, i); for (size_t j = 0; j < seen_count; ++j) { struct aws_http_connection *seen_connection = NULL; aws_array_list_get_at(seen_connections, &seen_connection, j); if (seen_connection == acquired_connection) { actual_seen_count++; } } } aws_mutex_unlock(&s_tester.lock); return actual_seen_count; } static int s_test_connection_manager_idle_culling_single(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_array_list seen_connections; AWS_ZERO_STRUCT(seen_connections); ASSERT_SUCCESS(aws_array_list_init_dynamic(&seen_connections, allocator, 10, sizeof(struct aws_http_connection *))); uint64_t now = 0; struct cm_tester_options options = { .allocator = allocator, .max_connections = 1, .mock_table = &s_idle_mocks, .max_connection_idle_in_ms = 1000, .starting_mock_time = now, }; ASSERT_SUCCESS(s_cm_tester_init(&options)); /* add enough fake connections to cover all the acquires */ s_add_mock_connections(2, AWS_NCRT_SUCCESS, false); /* acquire some connections */ s_acquire_connections(1); ASSERT_SUCCESS(s_wait_on_connection_reply_count(1)); /* remember what connections we acquired */ s_register_acquired_connections(&seen_connections); /* release the connections */ s_release_connections(1, false); /* advance fake time enough to cause the connections to be culled, also sleep for real to give the cull task * a chance to run in the real event loop */ uint64_t one_sec_in_nanos = aws_timestamp_convert(1, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL); s_tester_set_mock_time(now + one_sec_in_nanos); aws_thread_current_sleep(2 * one_sec_in_nanos); /* acquire some connections */ s_acquire_connections(1); ASSERT_SUCCESS(s_wait_on_connection_reply_count(2)); /* make sure the connections acquired were not ones that we expected to cull */ ASSERT_INT_EQUALS(s_get_acquired_connections_seen_count(&seen_connections), 0); /* release everything and clean up */ s_release_connections(1, false); ASSERT_SUCCESS(s_cm_tester_clean_up()); aws_array_list_clean_up(&seen_connections); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_connection_manager_idle_culling_single, s_test_connection_manager_idle_culling_single); static int s_test_connection_manager_idle_culling_many(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_array_list seen_connections; AWS_ZERO_STRUCT(seen_connections); ASSERT_SUCCESS(aws_array_list_init_dynamic(&seen_connections, allocator, 10, sizeof(struct aws_http_connection *))); uint64_t now = 0; struct cm_tester_options options = { .allocator = allocator, .max_connections = 5, .mock_table = &s_idle_mocks, .max_connection_idle_in_ms = 1000, .starting_mock_time = now, }; ASSERT_SUCCESS(s_cm_tester_init(&options)); /* add enough fake connections to cover all the acquires */ s_add_mock_connections(10, AWS_NCRT_SUCCESS, false); /* acquire some connections */ s_acquire_connections(5); ASSERT_SUCCESS(s_wait_on_connection_reply_count(5)); /* remember what connections we acquired */ s_register_acquired_connections(&seen_connections); /* release the connections */ s_release_connections(5, false); /* advance fake time enough to cause the connections to be culled, also sleep for real to give the cull task * a chance to run in the real event loop */ uint64_t one_sec_in_nanos = aws_timestamp_convert(1, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL); s_tester_set_mock_time(now + one_sec_in_nanos); aws_thread_current_sleep(2 * one_sec_in_nanos); /* acquire some connections */ s_acquire_connections(5); ASSERT_SUCCESS(s_wait_on_connection_reply_count(10)); /* make sure the connections acquired were not ones that we expected to cull */ ASSERT_INT_EQUALS(s_get_acquired_connections_seen_count(&seen_connections), 0); /* release everything and clean up */ s_release_connections(5, false); ASSERT_SUCCESS(s_cm_tester_clean_up()); aws_array_list_clean_up(&seen_connections); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_connection_manager_idle_culling_many, s_test_connection_manager_idle_culling_many); static int s_test_connection_manager_idle_culling_mixture(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_array_list seen_connections; AWS_ZERO_STRUCT(seen_connections); ASSERT_SUCCESS(aws_array_list_init_dynamic(&seen_connections, allocator, 10, sizeof(struct aws_http_connection *))); uint64_t now = 0; struct cm_tester_options options = { .allocator = allocator, .max_connections = 10, .mock_table = &s_idle_mocks, .max_connection_idle_in_ms = 1000, .starting_mock_time = now, }; ASSERT_SUCCESS(s_cm_tester_init(&options)); /* add enough fake connections to cover all the acquires */ s_add_mock_connections(15, AWS_NCRT_SUCCESS, false); /* acquire some connections */ s_acquire_connections(10); ASSERT_SUCCESS(s_wait_on_connection_reply_count(10)); /* remember what connections we acquired */ s_register_acquired_connections(&seen_connections); /* * release the connections * Previous tests created situations where the entire block of idle connections end up getting culled. We also * want to create a situation where just some of the connections get culled. */ s_release_connections(5, false); s_tester_set_mock_time(now + 1); s_release_connections(5, false); s_tester_set_mock_time(now); uint64_t one_sec_in_nanos = aws_timestamp_convert(1, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL); /* * advance fake time enough to cause half of the connections to be culled, also sleep for real to give the cull task * a chance to run in the real event loop. */ s_tester_set_mock_time(now + one_sec_in_nanos); aws_thread_current_sleep(2 * one_sec_in_nanos); /* acquire some connections */ s_acquire_connections(10); ASSERT_SUCCESS(s_wait_on_connection_reply_count(20)); /* make sure the connections acquired are half old and half new */ ASSERT_INT_EQUALS(s_get_acquired_connections_seen_count(&seen_connections), 5); /* release everything and clean up */ s_release_connections(10, false); ASSERT_SUCCESS(s_cm_tester_clean_up()); aws_array_list_clean_up(&seen_connections); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_connection_manager_idle_culling_mixture, s_test_connection_manager_idle_culling_mixture); /** * Once upon time, if the culling test is running while the connection manager is shutting, the refcount will be messed * up (back from zero to one and trigger the destroy to happen twice) */ static int s_test_connection_manager_idle_culling_refcount(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_http_library_init(allocator); for (size_t i = 0; i < 10; i++) { /* To reproduce that more stable, repeat it 10 times. */ struct cm_tester_options options = { .allocator = allocator, .max_connections = 10, .max_connection_idle_in_ms = 10, .self_lib_init = true, }; ASSERT_SUCCESS(s_cm_tester_init(&options)); uint64_t ten_ms_in_nanos = aws_timestamp_convert(10, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL); /* Don't ask me how I got the number. :) */ aws_thread_current_sleep(ten_ms_in_nanos - 10000); ASSERT_SUCCESS(s_cm_tester_clean_up()); } aws_http_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_connection_manager_idle_culling_refcount, s_test_connection_manager_idle_culling_refcount); /** * Proxy integration tests. Maybe we should move this to another file. But let's do it later. Someday. * AWS_TEST_HTTP_PROXY_HOST - host address of the proxy to use for tests that make open connections to the proxy * AWS_TEST_HTTP_PROXY_PORT - port to use for tests that make open connections to the proxy * AWS_TEST_HTTP_PROXY_URL - whole URL to use for tests that make open connections to the proxy * AWS_TEST_HTTPS_PROXY_HOST - host address of the proxy to use for tests that make tls-protected connections to the * proxy * AWS_TEST_HTTPS_PROXY_PORT - port to use for tests that make tls-protected connections to the proxy * AWS_TEST_HTTPS_PROXY_URL - whole URL to use for tests that make tls-protected connections to the proxy * AWS_TEST_HTTP_PROXY_BASIC_HOST - host address of the proxy to use for tests that make open connections to the proxy * with basic authentication * AWS_TEST_HTTP_PROXY_BASIC_PORT - port to use for tests that make open connections to the proxy with basic * authentication * AWS_TEST_HTTP_PROXY_BASIC_URL - whole URL to use for tests that make open connections to the proxy with basic * authentication * AWS_TEST_BASIC_AUTH_USERNAME - username to use when using basic authentication to the proxy * AWS_TEST_BASIC_AUTH_PASSWORD - password to use when using basic authentication to the proxy * * AWS_TEST_TLS_CERT_PATH - file path to certificate used to initialize the tls context of the mqtt connection * AWS_TEST_TLS_KEY_PATH - file path to the key used to initialize the tls context of the mqtt connection * AWS_TEST_TLS_ROOT_CERT_PATH - file path to the root CA used to initialize the tls context of the mqtt connection */ struct proxy_integration_configurations { struct aws_string *http_proxy_host; struct aws_string *http_proxy_port; struct aws_string *http_proxy_url; struct aws_string *https_proxy_host; struct aws_string *https_proxy_port; struct aws_string *https_proxy_url; struct aws_string *http_proxy_basic_host; struct aws_string *http_proxy_basic_port; struct aws_string *http_proxy_basic_url; struct aws_string *basic_auth_username; struct aws_string *basic_auth_password; struct aws_string *tls_cert_path; struct aws_string *tls_key_path; struct aws_string *tls_root_cert_path; struct aws_string *https_proxy_host_h2; struct aws_string *https_proxy_port_h2; struct aws_string *https_proxy_url_h2; }; enum proxy_test_type { FORWARDING = 0, TUNNELING_HTTP = 1, TUNNELING_HTTPS = 2, TUNNELING_DOUBLE_TLS = 3, LEGACY_HTTP = 4, LEGACY_HTTPS = 5, }; AWS_STATIC_STRING_FROM_LITERAL(s_http_proxy_host_env_var, "AWS_TEST_HTTP_PROXY_HOST"); AWS_STATIC_STRING_FROM_LITERAL(s_http_proxy_port_env_var, "AWS_TEST_HTTP_PROXY_PORT"); AWS_STATIC_STRING_FROM_LITERAL(s_http_proxy_url_env_var, "AWS_TEST_HTTP_PROXY_URL"); AWS_STATIC_STRING_FROM_LITERAL(s_https_proxy_host_env_var, "AWS_TEST_HTTPS_PROXY_HOST"); AWS_STATIC_STRING_FROM_LITERAL(s_https_proxy_port_env_var, "AWS_TEST_HTTPS_PROXY_PORT"); AWS_STATIC_STRING_FROM_LITERAL(s_https_proxy_url_env_var, "AWS_TEST_HTTPS_PROXY_URL"); AWS_STATIC_STRING_FROM_LITERAL(s_http_proxy_basic_host_env_var, "AWS_TEST_HTTP_PROXY_BASIC_HOST"); AWS_STATIC_STRING_FROM_LITERAL(s_http_proxy_basic_port_env_var, "AWS_TEST_HTTP_PROXY_BASIC_PORT"); AWS_STATIC_STRING_FROM_LITERAL(s_http_proxy_basic_url_env_var, "AWS_TEST_HTTP_PROXY_BASIC_URL"); AWS_STATIC_STRING_FROM_LITERAL(s_basic_auth_username_env_var, "AWS_TEST_BASIC_AUTH_USERNAME"); AWS_STATIC_STRING_FROM_LITERAL(s_basic_auth_password_env_var, "AWS_TEST_BASIC_AUTH_PASSWORD"); AWS_STATIC_STRING_FROM_LITERAL(s_tls_cert_path_env_var, "AWS_TEST_TLS_CERT_PATH"); AWS_STATIC_STRING_FROM_LITERAL(s_tls_key_path_env_var, "AWS_TEST_TLS_KEY_PATH"); AWS_STATIC_STRING_FROM_LITERAL(s_tls_root_cert_path_env_var, "AWS_TEST_TLS_ROOT_CERT_PATH"); AWS_STATIC_STRING_FROM_LITERAL(s_https_proxy_host_h2_env_var, "AWS_TEST_HTTPS_H2_PROXY_HOST"); AWS_STATIC_STRING_FROM_LITERAL(s_https_proxy_port_h2_env_var, "AWS_TEST_HTTPS_H2_PROXY_PORT"); AWS_STATIC_STRING_FROM_LITERAL(s_https_proxy_url_h2_env_var, "AWS_TEST_HTTPS_H2_PROXY_URL"); static int s_get_proxy_environment_configurations( struct aws_allocator *allocator, struct proxy_integration_configurations *configs) { /* get the envrionment configurations, and fail if any one is not set */ if (aws_get_environment_value(allocator, s_http_proxy_host_env_var, &configs->http_proxy_host) || configs->http_proxy_host == NULL) { return AWS_OP_ERR; } if (aws_get_environment_value(allocator, s_http_proxy_port_env_var, &configs->http_proxy_port) || configs->http_proxy_port == NULL) { return AWS_OP_ERR; } if (aws_get_environment_value(allocator, s_http_proxy_url_env_var, &configs->http_proxy_url) || configs->http_proxy_url == NULL) { return AWS_OP_ERR; } if (aws_get_environment_value(allocator, s_https_proxy_host_env_var, &configs->https_proxy_host) || configs->https_proxy_host == NULL) { return AWS_OP_ERR; } if (aws_get_environment_value(allocator, s_https_proxy_port_env_var, &configs->https_proxy_port) || configs->https_proxy_port == NULL) { return AWS_OP_ERR; } if (aws_get_environment_value(allocator, s_https_proxy_url_env_var, &configs->https_proxy_url) || configs->https_proxy_url == NULL) { return AWS_OP_ERR; } if (aws_get_environment_value(allocator, s_http_proxy_basic_host_env_var, &configs->http_proxy_basic_host) || configs->http_proxy_basic_host == NULL) { return AWS_OP_ERR; } if (aws_get_environment_value(allocator, s_http_proxy_basic_port_env_var, &configs->http_proxy_basic_port) || configs->http_proxy_basic_port == NULL) { return AWS_OP_ERR; } if (aws_get_environment_value(allocator, s_http_proxy_basic_url_env_var, &configs->http_proxy_basic_url) || configs->http_proxy_basic_url == NULL) { return AWS_OP_ERR; } if (aws_get_environment_value(allocator, s_basic_auth_username_env_var, &configs->basic_auth_username) || configs->basic_auth_username == NULL) { return AWS_OP_ERR; } if (aws_get_environment_value(allocator, s_basic_auth_password_env_var, &configs->basic_auth_password) || configs->basic_auth_password == NULL) { return AWS_OP_ERR; } if (aws_get_environment_value(allocator, s_tls_cert_path_env_var, &configs->tls_cert_path) || configs->tls_cert_path == NULL) { return AWS_OP_ERR; } if (aws_get_environment_value(allocator, s_tls_key_path_env_var, &configs->tls_key_path) || configs->tls_key_path == NULL) { return AWS_OP_ERR; } if (aws_get_environment_value(allocator, s_tls_root_cert_path_env_var, &configs->tls_root_cert_path) || configs->tls_root_cert_path == NULL) { return AWS_OP_ERR; } if (aws_get_environment_value(allocator, s_https_proxy_host_h2_env_var, &configs->https_proxy_host_h2) || configs->https_proxy_host_h2 == NULL) { return AWS_OP_ERR; } if (aws_get_environment_value(allocator, s_https_proxy_port_h2_env_var, &configs->https_proxy_port_h2) || configs->https_proxy_port_h2 == NULL) { return AWS_OP_ERR; } if (aws_get_environment_value(allocator, s_https_proxy_url_h2_env_var, &configs->https_proxy_url_h2) || configs->https_proxy_url_h2 == NULL) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } static void s_proxy_environment_configurations_clean_up(struct proxy_integration_configurations *configs) { aws_string_destroy(configs->http_proxy_host); aws_string_destroy(configs->http_proxy_port); aws_string_destroy(configs->http_proxy_url); aws_string_destroy(configs->https_proxy_host); aws_string_destroy(configs->https_proxy_port); aws_string_destroy(configs->https_proxy_url); aws_string_destroy(configs->http_proxy_basic_host); aws_string_destroy(configs->http_proxy_basic_port); aws_string_destroy(configs->http_proxy_basic_url); aws_string_destroy(configs->basic_auth_username); aws_string_destroy(configs->basic_auth_password); aws_string_destroy(configs->tls_cert_path); aws_string_destroy(configs->tls_key_path); aws_string_destroy(configs->tls_root_cert_path); aws_string_destroy(configs->https_proxy_host_h2); aws_string_destroy(configs->https_proxy_port_h2); aws_string_destroy(configs->https_proxy_url_h2); } static int s_response_status_code = 0; static bool s_is_proxy_request_complete(void *context) { (void)context; struct cm_tester *tester = &s_tester; return tester->proxy_request_complete; } static int s_wait_on_proxy_request_complete(void) { struct cm_tester *tester = &s_tester; ASSERT_SUCCESS(aws_mutex_lock(&tester->lock)); int signal_error = aws_condition_variable_wait_pred(&tester->signal, &tester->lock, s_is_proxy_request_complete, tester); ASSERT_SUCCESS(aws_mutex_unlock(&tester->lock)); return signal_error; } static int s_aws_http_on_incoming_header_block_done_proxy_test( struct aws_http_stream *stream, enum aws_http_header_block header_block, void *user_data) { (void)header_block; (void)user_data; struct cm_tester *tester = &s_tester; if (aws_http_stream_get_incoming_response_status(stream, &s_response_status_code) == AWS_OP_SUCCESS) { aws_mutex_lock(&tester->lock); tester->proxy_request_successful = s_response_status_code == 200; aws_mutex_unlock(&tester->lock); } return AWS_OP_SUCCESS; } static void s_aws_http_on_stream_complete_proxy_test(struct aws_http_stream *stream, int error_code, void *user_data) { (void)stream; (void)error_code; (void)user_data; struct cm_tester *tester = &s_tester; aws_mutex_lock(&tester->lock); tester->proxy_request_complete = true; aws_condition_variable_notify_one(&tester->signal); aws_mutex_unlock(&tester->lock); } static struct aws_byte_cursor s_get_proxy_host_for_test( struct proxy_integration_configurations *configs, enum proxy_test_type proxy_test_type, enum aws_http_proxy_authentication_type auth_type, bool h2) { struct aws_string *host_string; if (h2) { host_string = configs->https_proxy_host; } else if (auth_type == AWS_HPAT_BASIC) { host_string = configs->http_proxy_basic_host; } else if (proxy_test_type == TUNNELING_DOUBLE_TLS) { host_string = configs->https_proxy_host; } else { host_string = configs->http_proxy_host; } return aws_byte_cursor_from_string(host_string); } static uint32_t s_get_proxy_port_for_test( struct proxy_integration_configurations *configs, enum proxy_test_type proxy_test_type, enum aws_http_proxy_authentication_type auth_type, bool h2) { struct aws_string *port_string; if (h2) { port_string = configs->https_proxy_port_h2; } else if (auth_type == AWS_HPAT_BASIC) { port_string = configs->http_proxy_basic_port; } else if (proxy_test_type == TUNNELING_DOUBLE_TLS) { port_string = configs->https_proxy_port; } else { port_string = configs->http_proxy_port; } return (uint32_t)atoi(aws_string_c_str(port_string)); } static struct aws_string *s_get_proxy_url_for_test( struct proxy_integration_configurations *configs, enum proxy_test_type proxy_test_type, enum aws_http_proxy_authentication_type auth_type) { if (auth_type == AWS_HPAT_BASIC) { return configs->http_proxy_basic_url; } if (proxy_test_type == TUNNELING_DOUBLE_TLS) { return configs->https_proxy_url; } return configs->http_proxy_url; } static int s_get_proxy_connection_type_for_test(enum proxy_test_type proxy_test_type) { if (proxy_test_type == FORWARDING) { return AWS_HPCT_HTTP_FORWARD; } if (proxy_test_type == TUNNELING_DOUBLE_TLS || proxy_test_type == TUNNELING_HTTP || proxy_test_type == TUNNELING_HTTPS) { return AWS_HPCT_HTTP_TUNNEL; } return AWS_HPCT_HTTP_LEGACY; } static bool s_get_use_tls_from_proxy_test_type(enum proxy_test_type test_type) { if (test_type == FORWARDING || test_type == LEGACY_HTTP || test_type == TUNNELING_HTTP) { return false; } return true; } static int s_get_tls_options_from_proxy_test_type( struct aws_allocator *allocator, enum proxy_test_type proxy_test_type, struct aws_tls_connection_options *proxy_tls_options, struct aws_byte_cursor host_name) { if (proxy_test_type == TUNNELING_DOUBLE_TLS) { struct aws_tls_ctx *tls_ctx = NULL; struct aws_tls_ctx_options tls_ctx_options; AWS_ZERO_STRUCT(tls_ctx_options); /* create a default tls options */ aws_tls_ctx_options_init_default_client(&tls_ctx_options, allocator); aws_tls_ctx_options_set_verify_peer(&tls_ctx_options, false); tls_ctx = aws_tls_client_ctx_new(allocator, &tls_ctx_options); aws_tls_ctx_options_clean_up(&tls_ctx_options); if (!tls_ctx) { return AWS_OP_ERR; } aws_tls_connection_options_init_from_ctx(proxy_tls_options, tls_ctx); /* tls options hold a ref to the ctx */ aws_tls_ctx_release(tls_ctx); if (aws_tls_connection_options_set_server_name(proxy_tls_options, allocator, &host_name)) { return AWS_OP_ERR; } } return AWS_OP_SUCCESS; } static int s_proxy_integration_test_helper_general( struct aws_allocator *allocator, enum proxy_test_type proxy_test_type, enum aws_http_proxy_authentication_type auth_type, bool use_env, bool configured_tls, bool h2) { aws_http_library_init(allocator); struct proxy_integration_configurations configs; AWS_ZERO_STRUCT(configs); ASSERT_SUCCESS(s_get_proxy_environment_configurations(allocator, &configs)); /* not creating new strings */ struct aws_tls_connection_options proxy_tls_options; AWS_ZERO_STRUCT(proxy_tls_options); ASSERT_SUCCESS(s_get_tls_options_from_proxy_test_type( allocator, proxy_test_type, &proxy_tls_options, s_get_proxy_host_for_test(&configs, proxy_test_type, auth_type, h2))); struct aws_http_proxy_options proxy_options = { .host = s_get_proxy_host_for_test(&configs, proxy_test_type, auth_type, h2), .port = s_get_proxy_port_for_test(&configs, proxy_test_type, auth_type, h2), .connection_type = s_get_proxy_connection_type_for_test(proxy_test_type), .tls_options = proxy_test_type == TUNNELING_DOUBLE_TLS ? &proxy_tls_options : NULL, .auth_type = auth_type, .auth_username = aws_byte_cursor_from_string(configs.basic_auth_username), .auth_password = aws_byte_cursor_from_string(configs.basic_auth_password), }; if (use_env) { /* set the environment variables */ struct aws_string *proxy_url = s_get_proxy_url_for_test(&configs, proxy_test_type, auth_type); ASSERT_SUCCESS(aws_set_environment_value(s_http_proxy_env_var, proxy_url)); ASSERT_SUCCESS(aws_set_environment_value(s_https_proxy_env_var, proxy_url)); } struct cm_tester_options options = { .allocator = allocator, .max_connections = 5, .use_proxy_env = use_env, .env_configured_tls = configured_tls ? &proxy_tls_options : NULL, .proxy_options = use_env ? NULL : &proxy_options, .use_tls = s_get_use_tls_from_proxy_test_type(proxy_test_type), .self_lib_init = true, }; struct aws_http2_setting settings_array[] = { { .id = AWS_HTTP2_SETTINGS_ENABLE_PUSH, .value = 0, }, }; if (h2) { options.http2 = true; options.initial_settings_array = settings_array; options.num_initial_settings = AWS_ARRAY_SIZE(settings_array); } ASSERT_SUCCESS(s_cm_tester_init(&options)); s_acquire_connections(1); ASSERT_SUCCESS(s_wait_on_connection_reply_count(1)); /* Have a connection now, need to make a request and verify the request made successfully */ struct aws_http_message *request = aws_http_message_new_request(allocator); aws_http_message_set_request_method(request, aws_byte_cursor_from_c_str("GET")); aws_http_message_set_request_path(request, aws_byte_cursor_from_c_str("/")); struct aws_http_header host_header = { .name = aws_byte_cursor_from_c_str("Host"), .value = aws_byte_cursor_from_c_str("www.google.com"), }; aws_http_message_add_header(request, host_header); struct aws_http_header accept_header = { .name = aws_byte_cursor_from_c_str("Accept"), .value = aws_byte_cursor_from_c_str("*/*"), }; aws_http_message_add_header(request, accept_header); struct aws_http_make_request_options request_options = { .self_size = sizeof(request_options), .request = request, .user_data = &s_tester, .on_response_header_block_done = s_aws_http_on_incoming_header_block_done_proxy_test, .on_complete = s_aws_http_on_stream_complete_proxy_test, }; struct aws_http_connection *connection = NULL; ASSERT_SUCCESS(aws_array_list_front(&s_tester.connections, &connection)); struct aws_http_stream *stream = aws_http_connection_make_request(connection, &request_options); ASSERT_NOT_NULL(stream); aws_http_stream_activate(stream); ASSERT_SUCCESS(s_wait_on_proxy_request_complete()); ASSERT_TRUE(s_response_status_code == 200); aws_http_stream_release(stream); aws_http_message_destroy(request); aws_tls_connection_options_clean_up(&proxy_tls_options); s_proxy_environment_configurations_clean_up(&configs); ASSERT_SUCCESS(s_release_connections(1, false)); ASSERT_SUCCESS(s_cm_tester_clean_up()); aws_http_library_clean_up(); return AWS_OP_SUCCESS; } static int s_proxy_integration_test_helper( struct aws_allocator *allocator, enum proxy_test_type proxy_test_type, enum aws_http_proxy_authentication_type auth_type, bool use_env, bool configured_tls) { return s_proxy_integration_test_helper_general( allocator, proxy_test_type, auth_type, use_env, configured_tls, false); } static int s_test_connection_manager_proxy_integration_forwarding_proxy_no_auth( struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_proxy_integration_test_helper( allocator, FORWARDING, AWS_HPAT_NONE, false /*use_env*/, false /*configured_tls*/); } AWS_TEST_CASE( connection_manager_proxy_integration_forwarding_proxy_no_auth, s_test_connection_manager_proxy_integration_forwarding_proxy_no_auth); static int s_test_connection_manager_proxy_integration_forwarding_proxy_no_auth_env( struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_proxy_integration_test_helper( allocator, FORWARDING, AWS_HPAT_NONE, true /*use_env*/, false /*configured_tls*/); } AWS_TEST_CASE( connection_manager_proxy_integration_forwarding_proxy_no_auth_env, s_test_connection_manager_proxy_integration_forwarding_proxy_no_auth_env); static int s_test_connection_manager_proxy_integration_legacy_http_no_auth(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_proxy_integration_test_helper( allocator, LEGACY_HTTP, AWS_HPAT_NONE, false /*use_env*/, false /*configured_tls*/); } AWS_TEST_CASE( connection_manager_proxy_integration_legacy_http_no_auth, s_test_connection_manager_proxy_integration_legacy_http_no_auth); static int s_test_connection_manager_proxy_integration_legacy_http_no_auth_env( struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_proxy_integration_test_helper( allocator, LEGACY_HTTP, AWS_HPAT_NONE, true /*use_env*/, false /*configured_tls*/); } AWS_TEST_CASE( connection_manager_proxy_integration_legacy_http_no_auth_env, s_test_connection_manager_proxy_integration_legacy_http_no_auth_env); static int s_test_connection_manager_proxy_integration_legacy_https_no_auth( struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_proxy_integration_test_helper( allocator, LEGACY_HTTPS, AWS_HPAT_NONE, false /*use_env*/, false /*configured_tls*/); } AWS_TEST_CASE( connection_manager_proxy_integration_legacy_https_no_auth, s_test_connection_manager_proxy_integration_legacy_https_no_auth); static int s_test_connection_manager_proxy_integration_legacy_https_no_auth_env( struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_proxy_integration_test_helper( allocator, LEGACY_HTTPS, AWS_HPAT_NONE, true /*use_env*/, false /*configured_tls*/); } AWS_TEST_CASE( connection_manager_proxy_integration_legacy_https_no_auth_env, s_test_connection_manager_proxy_integration_legacy_https_no_auth_env); static int s_test_connection_manager_proxy_integration_tunneling_proxy_http_no_auth( struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_proxy_integration_test_helper( allocator, TUNNELING_HTTP, AWS_HPAT_NONE, false /*use_env*/, false /*configured_tls*/); } AWS_TEST_CASE( connection_manager_proxy_integration_tunneling_proxy_http_no_auth, s_test_connection_manager_proxy_integration_tunneling_proxy_http_no_auth); static int s_test_connection_manager_proxy_integration_tunneling_proxy_http_no_auth_env( struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_proxy_integration_test_helper( allocator, TUNNELING_HTTP, AWS_HPAT_NONE, true /*use_env*/, false /*configured_tls*/); } AWS_TEST_CASE( connection_manager_proxy_integration_tunneling_proxy_http_no_auth_env, s_test_connection_manager_proxy_integration_tunneling_proxy_http_no_auth_env); static int s_test_connection_manager_proxy_integration_tunneling_proxy_https_no_auth( struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_proxy_integration_test_helper( allocator, TUNNELING_HTTPS, AWS_HPAT_NONE, false /*use_env*/, false /*configured_tls*/); } AWS_TEST_CASE( connection_manager_proxy_integration_tunneling_proxy_https_no_auth, s_test_connection_manager_proxy_integration_tunneling_proxy_https_no_auth); static int s_test_connection_manager_proxy_integration_tunneling_proxy_https_no_auth_env( struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_proxy_integration_test_helper( allocator, TUNNELING_HTTPS, AWS_HPAT_NONE, true /*use_env*/, false /*configured_tls*/); } AWS_TEST_CASE( connection_manager_proxy_integration_tunneling_proxy_https_no_auth_env, s_test_connection_manager_proxy_integration_tunneling_proxy_https_no_auth_env); static int s_test_connection_manager_proxy_integration_tunneling_proxy_double_tls_no_auth( struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_proxy_integration_test_helper( allocator, TUNNELING_DOUBLE_TLS, AWS_HPAT_NONE, false /*use_env*/, false /*configured_tls*/); } AWS_TEST_CASE( connection_manager_proxy_integration_tunneling_proxy_double_tls_no_auth, s_test_connection_manager_proxy_integration_tunneling_proxy_double_tls_no_auth); static int s_test_connection_manager_proxy_integration_tunneling_proxy_double_tls_no_auth_env( struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_proxy_integration_test_helper( allocator, TUNNELING_DOUBLE_TLS, AWS_HPAT_NONE, true /*use_env*/, false /*configured_tls*/); } AWS_TEST_CASE( connection_manager_proxy_integration_tunneling_proxy_double_tls_no_auth_env, s_test_connection_manager_proxy_integration_tunneling_proxy_double_tls_no_auth_env); static int s_test_connection_manager_proxy_integration_tunneling_proxy_double_tls_no_auth_configured_tls_env( struct aws_allocator *allocator, void *ctx) { /* TLS set from settings instead of creating temporary one */ (void)ctx; return s_proxy_integration_test_helper( allocator, TUNNELING_DOUBLE_TLS, AWS_HPAT_NONE, true /*use_env*/, true /*configured_tls*/); } AWS_TEST_CASE( connection_manager_proxy_integration_tunneling_proxy_double_tls_no_auth_configured_tls_env, s_test_connection_manager_proxy_integration_tunneling_proxy_double_tls_no_auth_configured_tls_env); static int s_test_connection_manager_proxy_integration_forwarding_proxy_basic_auth( struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_proxy_integration_test_helper( allocator, FORWARDING, AWS_HPAT_BASIC, false /*use_env*/, false /*configured_tls*/); } AWS_TEST_CASE( connection_manager_proxy_integration_forwarding_proxy_basic_auth, s_test_connection_manager_proxy_integration_forwarding_proxy_basic_auth); static int s_test_connection_manager_proxy_integration_forwarding_proxy_basic_auth_env( struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_proxy_integration_test_helper( allocator, FORWARDING, AWS_HPAT_BASIC, true /*use_env*/, false /*configured_tls*/); } AWS_TEST_CASE( connection_manager_proxy_integration_forwarding_proxy_basic_auth_env, s_test_connection_manager_proxy_integration_forwarding_proxy_basic_auth_env); static int s_test_connection_manager_proxy_integration_legacy_http_basic_auth( struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_proxy_integration_test_helper( allocator, LEGACY_HTTP, AWS_HPAT_BASIC, false /*use_env*/, false /*configured_tls*/); } AWS_TEST_CASE( connection_manager_proxy_integration_legacy_http_basic_auth, s_test_connection_manager_proxy_integration_legacy_http_basic_auth); static int s_test_connection_manager_proxy_integration_legacy_http_basic_auth_env( struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_proxy_integration_test_helper( allocator, LEGACY_HTTP, AWS_HPAT_BASIC, true /*use_env*/, false /*configured_tls*/); } AWS_TEST_CASE( connection_manager_proxy_integration_legacy_http_basic_auth_env, s_test_connection_manager_proxy_integration_legacy_http_basic_auth_env); static int s_test_connection_manager_proxy_integration_legacy_https_basic_auth( struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_proxy_integration_test_helper( allocator, LEGACY_HTTPS, AWS_HPAT_BASIC, false /*use_env*/, false /*configured_tls*/); } AWS_TEST_CASE( connection_manager_proxy_integration_legacy_https_basic_auth, s_test_connection_manager_proxy_integration_legacy_https_basic_auth); static int s_test_connection_manager_proxy_integration_legacy_https_basic_auth_env( struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_proxy_integration_test_helper( allocator, LEGACY_HTTPS, AWS_HPAT_BASIC, true /*use_env*/, false /*configured_tls*/); } AWS_TEST_CASE( connection_manager_proxy_integration_legacy_https_basic_auth_env, s_test_connection_manager_proxy_integration_legacy_https_basic_auth_env); static int s_test_connection_manager_proxy_integration_tunneling_proxy_http_basic_auth( struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_proxy_integration_test_helper( allocator, TUNNELING_HTTP, AWS_HPAT_BASIC, false /*use_env*/, false /*configured_tls*/); } AWS_TEST_CASE( connection_manager_proxy_integration_tunneling_proxy_http_basic_auth, s_test_connection_manager_proxy_integration_tunneling_proxy_http_basic_auth); static int s_test_connection_manager_proxy_integration_tunneling_proxy_http_basic_auth_env( struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_proxy_integration_test_helper( allocator, TUNNELING_HTTP, AWS_HPAT_BASIC, true /*use_env*/, false /*configured_tls*/); } AWS_TEST_CASE( connection_manager_proxy_integration_tunneling_proxy_http_basic_auth_env, s_test_connection_manager_proxy_integration_tunneling_proxy_http_basic_auth_env); static int s_test_connection_manager_proxy_integration_tunneling_proxy_https_basic_auth( struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_proxy_integration_test_helper( allocator, TUNNELING_HTTPS, AWS_HPAT_BASIC, false /*use_env*/, false /*configured_tls*/); } AWS_TEST_CASE( connection_manager_proxy_integration_tunneling_proxy_https_basic_auth, s_test_connection_manager_proxy_integration_tunneling_proxy_https_basic_auth); static int s_test_connection_manager_proxy_integration_tunneling_proxy_https_basic_auth_env( struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_proxy_integration_test_helper( allocator, TUNNELING_HTTPS, AWS_HPAT_BASIC, true /*use_env*/, false /*configured_tls*/); } AWS_TEST_CASE( connection_manager_proxy_integration_tunneling_proxy_https_basic_auth_env, s_test_connection_manager_proxy_integration_tunneling_proxy_https_basic_auth_env); static int s_test_h1_proxy_h2_host_tunneling_double_tls_no_auth(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_proxy_integration_test_helper_general( allocator, TUNNELING_DOUBLE_TLS, AWS_HPAT_NONE, false /*use_env*/, false /*configured_tls*/, true /*h2*/); } AWS_TEST_CASE(h1_proxy_h2_host_tunneling_double_tls_no_auth, s_test_h1_proxy_h2_host_tunneling_double_tls_no_auth); aws-crt-python-0.20.4+dfsg/crt/aws-c-http/tests/test_connection_monitor.c000066400000000000000000001455261456575232400264770ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include #include static int s_test_http_connection_monitor_options_is_valid(struct aws_allocator *allocator, void *ctx) { (void)ctx; (void)allocator; struct aws_http_connection_monitoring_options options; AWS_ZERO_STRUCT(options); ASSERT_FALSE(aws_http_connection_monitoring_options_is_valid(NULL)); ASSERT_FALSE(aws_http_connection_monitoring_options_is_valid(&options)); options.allowable_throughput_failure_interval_seconds = 5; ASSERT_FALSE(aws_http_connection_monitoring_options_is_valid(&options)); options.allowable_throughput_failure_interval_seconds = 0; options.minimum_throughput_bytes_per_second = 1000; ASSERT_FALSE(aws_http_connection_monitoring_options_is_valid(&options)); options.allowable_throughput_failure_interval_seconds = 2; ASSERT_TRUE(aws_http_connection_monitoring_options_is_valid(&options)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_http_connection_monitor_options_is_valid, s_test_http_connection_monitor_options_is_valid); static void s_testing_channel_shutdown_callback(int error_code, void *user_data) { (void)error_code; (void)user_data; } enum monitor_test_event_type { MTET_EMPTY, MTET_STATS }; struct http_monitor_test_stats_event { uint64_t timestamp; uint64_t expected_throughput; struct aws_crt_statistics_socket socket_stats; struct aws_crt_statistics_http1_channel http_stats; enum monitor_test_event_type event_type; uint32_t expected_consecutive_failure_time_ms; }; struct http_request_info { struct aws_http_message *request; struct aws_http_stream *stream; struct aws_input_stream *body; bool response_completed; }; struct monitor_test_context { struct aws_allocator *allocator; struct testing_channel test_channel; struct aws_http_connection *connection; struct aws_crt_statistics_handler *monitor; struct aws_array_list requests; struct aws_byte_buf large_body_buf; }; static struct monitor_test_context s_test_context; static uint64_t s_clock_value = 0; static int s_mock_clock(uint64_t *timestamp) { *timestamp = s_clock_value; return AWS_OP_SUCCESS; } /* big enough to spill into a second io message when headers/method included */ #define TICK_BODY_SIZE 16384 #define MAX_BODY_SIZE (1024 * 1024) static int s_init_monitor_test(struct aws_allocator *allocator, struct aws_crt_statistics_handler *monitor) { aws_http_library_init(allocator); s_clock_value = 0; AWS_ZERO_STRUCT(s_test_context); s_test_context.allocator = allocator; struct aws_testing_channel_options test_channel_options = {.clock_fn = s_mock_clock}; testing_channel_init(&s_test_context.test_channel, allocator, &test_channel_options); s_test_context.test_channel.channel_shutdown = s_testing_channel_shutdown_callback; s_test_context.test_channel.channel_shutdown_user_data = &s_test_context; struct aws_http1_connection_options http1_options; AWS_ZERO_STRUCT(http1_options); struct aws_http_connection *connection = aws_http_connection_new_http1_1_client(allocator, true, SIZE_MAX, &http1_options); ASSERT_NOT_NULL(connection); connection->next_stream_id = 1; struct aws_channel_slot *slot = aws_channel_slot_new(s_test_context.test_channel.channel); ASSERT_NOT_NULL(slot); ASSERT_SUCCESS(aws_channel_slot_insert_end(s_test_context.test_channel.channel, slot)); ASSERT_SUCCESS(aws_channel_slot_set_handler(slot, &connection->channel_handler)); connection->vtable->on_channel_handler_installed(&connection->channel_handler, slot); s_test_context.connection = connection; testing_channel_drain_queued_tasks(&s_test_context.test_channel); s_test_context.monitor = monitor; aws_channel_set_statistics_handler(s_test_context.test_channel.channel, s_test_context.monitor); ASSERT_SUCCESS( aws_array_list_init_dynamic(&s_test_context.requests, allocator, 1, sizeof(struct http_request_info))); aws_byte_buf_init(&s_test_context.large_body_buf, allocator, MAX_BODY_SIZE); memset(s_test_context.large_body_buf.buffer, '0', MAX_BODY_SIZE); s_test_context.large_body_buf.len = MAX_BODY_SIZE; return AWS_OP_SUCCESS; } static void s_clean_up_monitor_test(void) { size_t request_count = aws_array_list_length(&s_test_context.requests); for (size_t i = 0; i < request_count; ++i) { struct http_request_info *request_info = NULL; aws_array_list_get_at_ptr(&s_test_context.requests, (void **)&request_info, i); if (request_info) { aws_http_message_destroy(request_info->request); aws_http_stream_release(request_info->stream); aws_input_stream_release(request_info->body); } } aws_http_connection_release(s_test_context.connection); testing_channel_clean_up(&s_test_context.test_channel); aws_array_list_clean_up(&s_test_context.requests); aws_byte_buf_clean_up(&s_test_context.large_body_buf); aws_http_library_clean_up(); } static void s_apply_stats_event_to_testing_channel(struct http_monitor_test_stats_event *event) { (void)event; struct aws_channel *channel = s_test_context.test_channel.channel; struct aws_channel_slot *first_slot = aws_channel_get_first_slot(channel); struct aws_channel_handler *first_handler = first_slot->handler; struct testing_channel_handler *testing_handler = first_handler->impl; testing_handler->stats = event->socket_stats; struct aws_channel_handler *second_handler = first_slot->adj_right->handler; struct aws_http_connection *connection = second_handler->impl; struct aws_crt_statistics_http1_channel *h1_stats = aws_h1_connection_get_statistics(connection); *h1_stats = event->http_stats; } /* Test Pattern 1 (monitor calculations and side affect): Create a testing channel Create and attach a (1000, 1) http connection monitor Loop over test-specific event list: [(t_i, stats_i)] Inject socket and http statistics SetCurrentChannelTime(t_i) cause ProcessStatistics() to be invoked by running channel tasks verify monitor's state is as expected if met the monitoring failure condition verify the channel was shutdown */ static int s_do_http_monitoring_test( struct aws_allocator *allocator, struct aws_http_connection_monitoring_options *monitoring_options, struct http_monitor_test_stats_event *events, size_t event_count) { s_clock_value = 0; s_init_monitor_test( allocator, aws_crt_statistics_handler_new_http_connection_monitor(allocator, monitoring_options)); struct aws_statistics_handler_http_connection_monitor_impl *monitor_impl = s_test_context.monitor->impl; for (size_t i = 0; i < event_count; ++i) { struct http_monitor_test_stats_event *event = events + i; s_clock_value = aws_timestamp_convert(event->timestamp, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL); switch (event->event_type) { case MTET_EMPTY: break; case MTET_STATS: s_apply_stats_event_to_testing_channel(event); break; } testing_channel_drain_queued_tasks(&s_test_context.test_channel); ASSERT_TRUE(monitor_impl->throughput_failure_time_ms == event->expected_consecutive_failure_time_ms); ASSERT_TRUE(monitor_impl->last_measured_throughput == event->expected_throughput); if (monitor_impl->throughput_failure_time_ms > aws_timestamp_convert( monitoring_options->allowable_throughput_failure_interval_seconds, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_MILLIS, NULL)) { ASSERT_TRUE(testing_channel_is_shutdown_completed(&s_test_context.test_channel)); } } s_clean_up_monitor_test(); return AWS_OP_SUCCESS; } static struct aws_http_connection_monitoring_options s_test_options = { .allowable_throughput_failure_interval_seconds = 1, .minimum_throughput_bytes_per_second = 1000, }; /* * A test where the combined read and write throughput stays above the threshold */ static struct http_monitor_test_stats_event s_test_rw_above_events[] = { { .event_type = MTET_STATS, .timestamp = AWS_TIMESTAMP_MILLIS, .socket_stats = { .category = AWSCRT_STAT_CAT_SOCKET, .bytes_read = 500, .bytes_written = 500, }, .http_stats = { .category = AWSCRT_STAT_CAT_HTTP1_CHANNEL, .pending_incoming_stream_ms = AWS_TIMESTAMP_MILLIS, .pending_outgoing_stream_ms = AWS_TIMESTAMP_MILLIS, .current_incoming_stream_id = 1, .current_outgoing_stream_id = 3, }, .expected_consecutive_failure_time_ms = 0, .expected_throughput = 1000, }, { .event_type = MTET_STATS, .timestamp = 2 * AWS_TIMESTAMP_MILLIS, .socket_stats = { .category = AWSCRT_STAT_CAT_SOCKET, .bytes_read = 500, .bytes_written = 500, }, .http_stats = { .category = AWSCRT_STAT_CAT_HTTP1_CHANNEL, .pending_incoming_stream_ms = AWS_TIMESTAMP_MILLIS, .pending_outgoing_stream_ms = AWS_TIMESTAMP_MILLIS, .current_incoming_stream_id = 1, .current_outgoing_stream_id = 3, }, .expected_consecutive_failure_time_ms = 0, .expected_throughput = 1000, }, }; struct observer_cb_data { bool invoked; size_t nonce; size_t number_of_stats; struct aws_crt_statistics_socket socket_stats; struct aws_crt_statistics_http1_channel http_stats; }; static void s_observer_cb(size_t connection_nonce, const struct aws_array_list *stats, void *user_data) { struct observer_cb_data *cb_data = user_data; cb_data->invoked = true; cb_data->nonce = connection_nonce; cb_data->number_of_stats = aws_array_list_length(stats); for (size_t i = 0; i < cb_data->number_of_stats; ++i) { struct aws_crt_statistics_base *base_ptr = NULL; aws_array_list_get_at(stats, (void **)&base_ptr, i); if (base_ptr->category == AWSCRT_STAT_CAT_SOCKET) { cb_data->socket_stats = *(struct aws_crt_statistics_socket *)base_ptr; } if (base_ptr->category == AWSCRT_STAT_CAT_HTTP1_CHANNEL) { cb_data->http_stats = *(struct aws_crt_statistics_http1_channel *)base_ptr; } } } static int s_test_http_connection_monitor_rw_above(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct observer_cb_data cb_data; AWS_ZERO_STRUCT(cb_data); s_test_options.statistics_observer_fn = s_observer_cb; s_test_options.statistics_observer_user_data = &cb_data; int result = s_do_http_monitoring_test( allocator, &s_test_options, s_test_rw_above_events, AWS_ARRAY_SIZE(s_test_rw_above_events)); ASSERT_TRUE(result == AWS_OP_SUCCESS); ASSERT_TRUE(cb_data.invoked); ASSERT_TRUE(cb_data.nonce > 0); ASSERT_UINT_EQUALS(2U, cb_data.number_of_stats); ASSERT_UINT_EQUALS(s_test_rw_above_events[0].socket_stats.bytes_written, cb_data.socket_stats.bytes_written); ASSERT_UINT_EQUALS(s_test_rw_above_events[0].socket_stats.bytes_read, cb_data.socket_stats.bytes_read); ASSERT_UINT_EQUALS( s_test_rw_above_events[0].http_stats.current_outgoing_stream_id, cb_data.http_stats.current_outgoing_stream_id); ASSERT_UINT_EQUALS( s_test_rw_above_events[0].http_stats.current_incoming_stream_id, cb_data.http_stats.current_incoming_stream_id); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_http_connection_monitor_rw_above, s_test_http_connection_monitor_rw_above); /* * A test where the read throughput stays above the threshold */ static struct http_monitor_test_stats_event s_test_r_above_events[] = { { .event_type = MTET_STATS, .timestamp = AWS_TIMESTAMP_MILLIS, .socket_stats = { .category = AWSCRT_STAT_CAT_SOCKET, .bytes_read = 1000, }, .http_stats = { .category = AWSCRT_STAT_CAT_HTTP1_CHANNEL, .pending_incoming_stream_ms = AWS_TIMESTAMP_MILLIS, .pending_outgoing_stream_ms = AWS_TIMESTAMP_MILLIS, .current_incoming_stream_id = 1, .current_outgoing_stream_id = 3, }, .expected_consecutive_failure_time_ms = 0, .expected_throughput = 1000, }, { .event_type = MTET_STATS, .timestamp = 2 * AWS_TIMESTAMP_MILLIS, .socket_stats = { .category = AWSCRT_STAT_CAT_SOCKET, .bytes_read = 1000, }, .http_stats = { .category = AWSCRT_STAT_CAT_HTTP1_CHANNEL, .pending_incoming_stream_ms = AWS_TIMESTAMP_MILLIS, .pending_outgoing_stream_ms = AWS_TIMESTAMP_MILLIS, .current_incoming_stream_id = 1, .current_outgoing_stream_id = 3, }, .expected_consecutive_failure_time_ms = 0, .expected_throughput = 1000, }, }; static int s_test_http_connection_monitor_r_above(struct aws_allocator *allocator, void *ctx) { (void)ctx; int result = s_do_http_monitoring_test( allocator, &s_test_options, s_test_r_above_events, AWS_ARRAY_SIZE(s_test_r_above_events)); ASSERT_TRUE(result == AWS_OP_SUCCESS); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_http_connection_monitor_r_above, s_test_http_connection_monitor_r_above); /* * A test where the write throughput stays above the threshold */ static struct http_monitor_test_stats_event s_test_w_above_events[] = { { .event_type = MTET_STATS, .timestamp = AWS_TIMESTAMP_MILLIS, .socket_stats = { .category = AWSCRT_STAT_CAT_SOCKET, .bytes_written = 1000, }, .http_stats = { .category = AWSCRT_STAT_CAT_HTTP1_CHANNEL, .pending_incoming_stream_ms = AWS_TIMESTAMP_MILLIS, .pending_outgoing_stream_ms = AWS_TIMESTAMP_MILLIS, .current_incoming_stream_id = 1, .current_outgoing_stream_id = 3, }, .expected_consecutive_failure_time_ms = 0, .expected_throughput = 1000, }, { .event_type = MTET_STATS, .timestamp = 2 * AWS_TIMESTAMP_MILLIS, .socket_stats = { .category = AWSCRT_STAT_CAT_SOCKET, .bytes_written = 1000, }, .http_stats = { .category = AWSCRT_STAT_CAT_HTTP1_CHANNEL, .pending_incoming_stream_ms = AWS_TIMESTAMP_MILLIS, .pending_outgoing_stream_ms = AWS_TIMESTAMP_MILLIS, .current_incoming_stream_id = 1, .current_outgoing_stream_id = 3, }, .expected_consecutive_failure_time_ms = 0, .expected_throughput = 1000, }, }; static int s_test_http_connection_monitor_w_above(struct aws_allocator *allocator, void *ctx) { (void)ctx; int result = s_do_http_monitoring_test( allocator, &s_test_options, s_test_w_above_events, AWS_ARRAY_SIZE(s_test_w_above_events)); ASSERT_TRUE(result == AWS_OP_SUCCESS); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_http_connection_monitor_w_above, s_test_http_connection_monitor_w_above); /* * A more realistic test where the write throughput stays above and then the read throughput stays above * A fractional event in the middle contains both read and writes */ static struct http_monitor_test_stats_event s_test_write_then_read_above_events[] = { { .event_type = MTET_STATS, .timestamp = AWS_TIMESTAMP_MILLIS, .socket_stats = { .category = AWSCRT_STAT_CAT_SOCKET, .bytes_written = 1000, }, .http_stats = { .category = AWSCRT_STAT_CAT_HTTP1_CHANNEL, .pending_incoming_stream_ms = AWS_TIMESTAMP_MILLIS, .pending_outgoing_stream_ms = AWS_TIMESTAMP_MILLIS, .current_incoming_stream_id = 1, .current_outgoing_stream_id = 1, }, .expected_consecutive_failure_time_ms = 0, .expected_throughput = 1000, }, { .event_type = MTET_STATS, .timestamp = 2 * AWS_TIMESTAMP_MILLIS, .socket_stats = { .category = AWSCRT_STAT_CAT_SOCKET, .bytes_written = 1000, }, .http_stats = { .category = AWSCRT_STAT_CAT_HTTP1_CHANNEL, .pending_incoming_stream_ms = AWS_TIMESTAMP_MILLIS, .pending_outgoing_stream_ms = AWS_TIMESTAMP_MILLIS, .current_incoming_stream_id = 1, .current_outgoing_stream_id = 1, }, .expected_consecutive_failure_time_ms = 0, .expected_throughput = 1000, }, { .event_type = MTET_STATS, .timestamp = 3 * AWS_TIMESTAMP_MILLIS, .socket_stats = { .category = AWSCRT_STAT_CAT_SOCKET, .bytes_written = 100, .bytes_read = 500, }, .http_stats = { .category = AWSCRT_STAT_CAT_HTTP1_CHANNEL, .pending_incoming_stream_ms = 1000, .pending_outgoing_stream_ms = 200, .current_incoming_stream_id = 1, .current_outgoing_stream_id = 1, }, .expected_consecutive_failure_time_ms = 0, .expected_throughput = 1000, }, { .event_type = MTET_STATS, .timestamp = 4 * AWS_TIMESTAMP_MILLIS, .socket_stats = { .category = AWSCRT_STAT_CAT_SOCKET, .bytes_read = 1000, }, .http_stats = { .category = AWSCRT_STAT_CAT_HTTP1_CHANNEL, .pending_incoming_stream_ms = AWS_TIMESTAMP_MILLIS, .current_incoming_stream_id = 1, }, .expected_consecutive_failure_time_ms = 0, .expected_throughput = 1000, }, }; static int s_test_http_connection_monitor_write_then_read_above(struct aws_allocator *allocator, void *ctx) { (void)ctx; int result = s_do_http_monitoring_test( allocator, &s_test_options, s_test_write_then_read_above_events, AWS_ARRAY_SIZE(s_test_write_then_read_above_events)); ASSERT_TRUE(result == AWS_OP_SUCCESS); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_http_connection_monitor_write_then_read_above, s_test_http_connection_monitor_write_then_read_above); /* * A test where the throughput is below the threshold but the requests do not last long enough to register the * failure. */ static struct http_monitor_test_stats_event s_test_below_but_undetectable_events[] = { { .event_type = MTET_STATS, .timestamp = AWS_TIMESTAMP_MILLIS, .socket_stats = { .category = AWSCRT_STAT_CAT_SOCKET, .bytes_written = 100, }, .http_stats = { .category = AWSCRT_STAT_CAT_HTTP1_CHANNEL, .pending_outgoing_stream_ms = AWS_TIMESTAMP_MILLIS, .current_outgoing_stream_id = 1, }, .expected_consecutive_failure_time_ms = 0, .expected_throughput = 100, }, { .event_type = MTET_STATS, .timestamp = 2 * AWS_TIMESTAMP_MILLIS, .socket_stats = { .category = AWSCRT_STAT_CAT_SOCKET, .bytes_read = 100, .bytes_written = 100, }, .http_stats = { .category = AWSCRT_STAT_CAT_HTTP1_CHANNEL, .pending_incoming_stream_ms = AWS_TIMESTAMP_MILLIS, .pending_outgoing_stream_ms = AWS_TIMESTAMP_MILLIS, .current_incoming_stream_id = 1, .current_outgoing_stream_id = 3, }, .expected_consecutive_failure_time_ms = 0, .expected_throughput = 200, }, { .event_type = MTET_STATS, .timestamp = 3 * AWS_TIMESTAMP_MILLIS, .socket_stats = { .category = AWSCRT_STAT_CAT_SOCKET, .bytes_read = 100, }, .http_stats = { .category = AWSCRT_STAT_CAT_HTTP1_CHANNEL, .pending_incoming_stream_ms = AWS_TIMESTAMP_MILLIS, .current_incoming_stream_id = 3, }, .expected_consecutive_failure_time_ms = 0, .expected_throughput = 100, }, }; static int s_test_http_connection_monitor_below_but_undetectable(struct aws_allocator *allocator, void *ctx) { (void)ctx; int result = s_do_http_monitoring_test( allocator, &s_test_options, s_test_below_but_undetectable_events, AWS_ARRAY_SIZE(s_test_below_but_undetectable_events)); ASSERT_TRUE(result == AWS_OP_SUCCESS); return AWS_OP_SUCCESS; } AWS_TEST_CASE( test_http_connection_monitor_below_but_undetectable, s_test_http_connection_monitor_below_but_undetectable); /* * A test where we drop below the threshold with a combination of read and write io */ static struct http_monitor_test_stats_event s_test_below_rw_events[] = { { .event_type = MTET_STATS, .timestamp = AWS_TIMESTAMP_MILLIS, .socket_stats = { .category = AWSCRT_STAT_CAT_SOCKET, .bytes_read = 500, .bytes_written = 500, }, .http_stats = { .category = AWSCRT_STAT_CAT_HTTP1_CHANNEL, .pending_incoming_stream_ms = AWS_TIMESTAMP_MILLIS, .pending_outgoing_stream_ms = AWS_TIMESTAMP_MILLIS, .current_incoming_stream_id = 1, .current_outgoing_stream_id = 1, }, .expected_consecutive_failure_time_ms = 0, .expected_throughput = 1000, }, { .event_type = MTET_STATS, .timestamp = 2 * AWS_TIMESTAMP_MILLIS, .socket_stats = { .category = AWSCRT_STAT_CAT_SOCKET, .bytes_read = 249, .bytes_written = 125, }, .http_stats = { .category = AWSCRT_STAT_CAT_HTTP1_CHANNEL, .pending_incoming_stream_ms = 500, .pending_outgoing_stream_ms = 250, .current_incoming_stream_id = 1, .current_outgoing_stream_id = 1, }, .expected_consecutive_failure_time_ms = 500, .expected_throughput = 998, }, }; static int s_test_http_connection_monitor_rw_below(struct aws_allocator *allocator, void *ctx) { (void)ctx; int result = s_do_http_monitoring_test( allocator, &s_test_options, s_test_below_rw_events, AWS_ARRAY_SIZE(s_test_below_rw_events)); ASSERT_TRUE(result == AWS_OP_SUCCESS); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_http_connection_monitor_rw_below, s_test_http_connection_monitor_rw_below); /* * A test where we drop below the threshold then recover */ static struct http_monitor_test_stats_event s_test_below_then_above_events[] = { { .event_type = MTET_STATS, .timestamp = AWS_TIMESTAMP_MILLIS, .socket_stats = { .category = AWSCRT_STAT_CAT_SOCKET, .bytes_written = 1500, }, .http_stats = { .category = AWSCRT_STAT_CAT_HTTP1_CHANNEL, .pending_incoming_stream_ms = AWS_TIMESTAMP_MILLIS, .pending_outgoing_stream_ms = AWS_TIMESTAMP_MILLIS, .current_incoming_stream_id = 1, .current_outgoing_stream_id = 1, }, .expected_consecutive_failure_time_ms = 0, .expected_throughput = 1500, }, { .event_type = MTET_STATS, .timestamp = 2 * AWS_TIMESTAMP_MILLIS, .socket_stats = { .category = AWSCRT_STAT_CAT_SOCKET, .bytes_read = 499, .bytes_written = 250, }, .http_stats = { .category = AWSCRT_STAT_CAT_HTTP1_CHANNEL, .pending_incoming_stream_ms = AWS_TIMESTAMP_MILLIS, .pending_outgoing_stream_ms = 500, .current_incoming_stream_id = 1, .current_outgoing_stream_id = 1, }, .expected_consecutive_failure_time_ms = AWS_TIMESTAMP_MILLIS, .expected_throughput = 999, }, { .event_type = MTET_STATS, .timestamp = 3 * AWS_TIMESTAMP_MILLIS, .socket_stats = { .category = AWSCRT_STAT_CAT_SOCKET, .bytes_read = 2000, }, .http_stats = { .category = AWSCRT_STAT_CAT_HTTP1_CHANNEL, .pending_incoming_stream_ms = AWS_TIMESTAMP_MILLIS, .current_incoming_stream_id = 1, }, .expected_consecutive_failure_time_ms = 0, .expected_throughput = 2000, }, }; static int s_test_http_connection_monitor_below_then_above(struct aws_allocator *allocator, void *ctx) { (void)ctx; int result = s_do_http_monitoring_test( allocator, &s_test_options, s_test_below_then_above_events, AWS_ARRAY_SIZE(s_test_below_then_above_events)); ASSERT_TRUE(result == AWS_OP_SUCCESS); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_http_connection_monitor_below_then_above, s_test_http_connection_monitor_below_then_above); /* * Test that verifies that the failure time is reset when there's no streams * */ static struct http_monitor_test_stats_event s_test_failure_reset_when_empty_events[] = { { .event_type = MTET_STATS, .timestamp = AWS_TIMESTAMP_MILLIS, .socket_stats = { .category = AWSCRT_STAT_CAT_SOCKET, .bytes_written = 1500, }, .http_stats = { .category = AWSCRT_STAT_CAT_HTTP1_CHANNEL, .pending_incoming_stream_ms = AWS_TIMESTAMP_MILLIS, .pending_outgoing_stream_ms = AWS_TIMESTAMP_MILLIS, .current_incoming_stream_id = 1, .current_outgoing_stream_id = 1, }, .expected_consecutive_failure_time_ms = 0, .expected_throughput = 1500, }, { .event_type = MTET_STATS, .timestamp = 2 * AWS_TIMESTAMP_MILLIS, .socket_stats = { .category = AWSCRT_STAT_CAT_SOCKET, .bytes_read = 499, .bytes_written = 250, }, .http_stats = { .category = AWSCRT_STAT_CAT_HTTP1_CHANNEL, .pending_incoming_stream_ms = AWS_TIMESTAMP_MILLIS, .pending_outgoing_stream_ms = 500, .current_incoming_stream_id = 1, .current_outgoing_stream_id = 1, }, .expected_consecutive_failure_time_ms = AWS_TIMESTAMP_MILLIS, .expected_throughput = 999, }, { .event_type = MTET_STATS, .timestamp = 3 * AWS_TIMESTAMP_MILLIS, .socket_stats = { .category = AWSCRT_STAT_CAT_SOCKET, }, .http_stats = { .category = AWSCRT_STAT_CAT_HTTP1_CHANNEL, }, .expected_consecutive_failure_time_ms = 0, .expected_throughput = 0, }, }; static int s_test_http_connection_monitor_failure_reset_when_empty(struct aws_allocator *allocator, void *ctx) { (void)ctx; int result = s_do_http_monitoring_test( allocator, &s_test_options, s_test_failure_reset_when_empty_events, AWS_ARRAY_SIZE(s_test_failure_reset_when_empty_events)); ASSERT_TRUE(result == AWS_OP_SUCCESS); return AWS_OP_SUCCESS; } AWS_TEST_CASE( test_http_connection_monitor_failure_reset_when_empty, s_test_http_connection_monitor_failure_reset_when_empty); /* * Edge case test when throughput calculations overflow */ static struct http_monitor_test_stats_event s_test_bytes_overflow_events[] = { { .event_type = MTET_STATS, .timestamp = AWS_TIMESTAMP_MILLIS, .socket_stats = { .category = AWSCRT_STAT_CAT_SOCKET, .bytes_read = UINT64_MAX / 2 + 10, .bytes_written = UINT64_MAX / 2 + 10, }, .http_stats = { .category = AWSCRT_STAT_CAT_HTTP1_CHANNEL, .pending_incoming_stream_ms = AWS_TIMESTAMP_MILLIS, .pending_outgoing_stream_ms = AWS_TIMESTAMP_MILLIS, }, .expected_consecutive_failure_time_ms = 0, .expected_throughput = UINT64_MAX, }, }; static int s_test_http_connection_monitor_bytes_overflow(struct aws_allocator *allocator, void *ctx) { (void)ctx; int result = s_do_http_monitoring_test( allocator, &s_test_options, s_test_bytes_overflow_events, AWS_ARRAY_SIZE(s_test_bytes_overflow_events)); ASSERT_TRUE(result == AWS_OP_SUCCESS); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_http_connection_monitor_bytes_overflow, s_test_http_connection_monitor_bytes_overflow); /* * Another edge case test when throughput calculations overflow due to time scaling */ static struct http_monitor_test_stats_event s_test_time_overflow_events[] = { { .event_type = MTET_STATS, .timestamp = AWS_TIMESTAMP_MILLIS, .socket_stats = { .category = AWSCRT_STAT_CAT_SOCKET, .bytes_read = UINT64_MAX / 2 + 10, .bytes_written = UINT64_MAX / 2 - 10, }, .http_stats = { .category = AWSCRT_STAT_CAT_HTTP1_CHANNEL, .pending_incoming_stream_ms = AWS_TIMESTAMP_MILLIS, .pending_outgoing_stream_ms = 1, }, .expected_consecutive_failure_time_ms = 0, .expected_throughput = UINT64_MAX, }, }; static int s_test_http_connection_monitor_time_overflow(struct aws_allocator *allocator, void *ctx) { (void)ctx; int result = s_do_http_monitoring_test( allocator, &s_test_options, s_test_time_overflow_events, AWS_ARRAY_SIZE(s_test_time_overflow_events)); ASSERT_TRUE(result == AWS_OP_SUCCESS); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_http_connection_monitor_time_overflow, s_test_http_connection_monitor_time_overflow); /* * Test that verifies the channel shuts down when we exceed the failure time threshold */ static struct http_monitor_test_stats_event s_test_shutdown_events[] = { { .event_type = MTET_STATS, .timestamp = AWS_TIMESTAMP_MILLIS, .socket_stats = { .category = AWSCRT_STAT_CAT_SOCKET, .bytes_written = 1500, }, .http_stats = { .category = AWSCRT_STAT_CAT_HTTP1_CHANNEL, .pending_incoming_stream_ms = AWS_TIMESTAMP_MILLIS, .pending_outgoing_stream_ms = AWS_TIMESTAMP_MILLIS, .current_incoming_stream_id = 1, .current_outgoing_stream_id = 1, }, .expected_consecutive_failure_time_ms = 0, .expected_throughput = 1500, }, { .event_type = MTET_STATS, .timestamp = 2 * AWS_TIMESTAMP_MILLIS, .socket_stats = { .category = AWSCRT_STAT_CAT_SOCKET, .bytes_read = 499, .bytes_written = 250, }, .http_stats = { .category = AWSCRT_STAT_CAT_HTTP1_CHANNEL, .pending_incoming_stream_ms = AWS_TIMESTAMP_MILLIS, .pending_outgoing_stream_ms = 500, .current_incoming_stream_id = 1, .current_outgoing_stream_id = 1, }, .expected_consecutive_failure_time_ms = AWS_TIMESTAMP_MILLIS, .expected_throughput = 999, }, { .event_type = MTET_STATS, .timestamp = 3 * AWS_TIMESTAMP_MILLIS, .socket_stats = { .category = AWSCRT_STAT_CAT_SOCKET, .bytes_read = 250, }, .http_stats = { .category = AWSCRT_STAT_CAT_HTTP1_CHANNEL, .pending_incoming_stream_ms = AWS_TIMESTAMP_MILLIS, .current_incoming_stream_id = 1, }, .expected_consecutive_failure_time_ms = 2000, .expected_throughput = 250, }, }; static int s_test_http_connection_monitor_shutdown(struct aws_allocator *allocator, void *ctx) { (void)ctx; int result = s_do_http_monitoring_test( allocator, &s_test_options, s_test_shutdown_events, AWS_ARRAY_SIZE(s_test_shutdown_events)); ASSERT_TRUE(result == AWS_OP_SUCCESS); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_http_connection_monitor_shutdown, s_test_http_connection_monitor_shutdown); /* Pattern 2 (http statistics verification) Create an io testing channel (test_handler <-> http_handler) Create and attach a mock stats handler Loop over h1 connection events [(t_i, http_event)] SetCurrentChannelTime(t_i) ApplyEvent(http_event) where events include instances of verification of mock-captured http stat state */ enum monitor_test_http_stats_event_type { MTHSET_ADD_OUTGOING_STREAM, MTHSET_ADD_RESPONSE_DATA, MTHSET_FLUSH, MTHSET_TICK, MTHSET_VERIFY }; struct test_http_stats_event { uint64_t timestamp; enum monitor_test_http_stats_event_type event_type; const char *response_stream_data; size_t request_body_size; struct aws_crt_statistics_http1_channel expected_stats; }; struct mock_http_connection_monitor_impl { struct aws_http_connection_monitoring_options options; struct aws_crt_statistics_http1_channel last_seen_stats; }; static void s_mock_process_statistics( struct aws_crt_statistics_handler *handler, struct aws_crt_statistics_sample_interval *interval, struct aws_array_list *stats_list, void *context) { (void)interval; (void)context; struct mock_http_connection_monitor_impl *impl = handler->impl; size_t stats_count = aws_array_list_length(stats_list); for (size_t i = 0; i < stats_count; ++i) { struct aws_crt_statistics_base *stats_base = NULL; if (aws_array_list_get_at(stats_list, &stats_base, i)) { continue; } switch (stats_base->category) { case AWSCRT_STAT_CAT_HTTP1_CHANNEL: { struct aws_crt_statistics_http1_channel *http1_stats = (struct aws_crt_statistics_http1_channel *)stats_base; impl->last_seen_stats = *http1_stats; break; } default: break; } } } static void s_mock_destroy(struct aws_crt_statistics_handler *handler) { if (handler == NULL) { return; } aws_mem_release(handler->allocator, handler); } static uint64_t s_mock_get_report_interval_ms(struct aws_crt_statistics_handler *handler) { (void)handler; return 1000; } static struct aws_crt_statistics_handler_vtable s_http_mock_monitor_vtable = { .process_statistics = s_mock_process_statistics, .destroy = s_mock_destroy, .get_report_interval_ms = s_mock_get_report_interval_ms, }; static struct aws_crt_statistics_handler *s_aws_crt_statistics_handler_new_http_mock(struct aws_allocator *allocator) { struct aws_crt_statistics_handler *handler = NULL; struct mock_http_connection_monitor_impl *impl = NULL; if (!aws_mem_acquire_many( allocator, 2, &handler, sizeof(struct aws_crt_statistics_handler), &impl, sizeof(struct mock_http_connection_monitor_impl))) { return NULL; } AWS_ZERO_STRUCT(*handler); AWS_ZERO_STRUCT(*impl); handler->vtable = &s_http_mock_monitor_vtable; handler->allocator = allocator; handler->impl = impl; return handler; } static int s_aws_http_on_incoming_body( struct aws_http_stream *stream, const struct aws_byte_cursor *data, void *user_data) { (void)stream; (void)data; (void)user_data; return AWS_OP_SUCCESS; } static void s_aws_http_on_stream_complete(struct aws_http_stream *stream, int error_code, void *user_data) { (void)stream; (void)error_code; size_t request_index = (size_t)user_data; struct http_request_info *request_info = NULL; aws_array_list_get_at_ptr(&s_test_context.requests, (void **)&request_info, request_index); if (request_info != NULL) { request_info->response_completed = true; } } static void s_add_outgoing_stream(struct test_http_stats_event *event) { (void)event; struct http_request_info request_info; AWS_ZERO_STRUCT(request_info); request_info.request = aws_http_message_new_request(s_test_context.allocator); aws_http_message_set_request_method(request_info.request, aws_byte_cursor_from_c_str("GET")); struct aws_http_header host_header = { .name = aws_byte_cursor_from_c_str("host"), .value = aws_byte_cursor_from_c_str("www.derp.com"), }; aws_http_message_add_header(request_info.request, host_header); aws_http_message_set_request_path(request_info.request, aws_byte_cursor_from_c_str("/index.html?queryparam=value")); AWS_FATAL_ASSERT(event->request_body_size <= MAX_BODY_SIZE); if (event->request_body_size > 0) { char cl_buffer[256]; snprintf(cl_buffer, sizeof(cl_buffer), "%zu", event->request_body_size); struct aws_http_header content_length_header = { .name = aws_byte_cursor_from_c_str("content-length"), .value = aws_byte_cursor_from_c_str(cl_buffer), }; aws_http_message_add_header(request_info.request, content_length_header); struct aws_byte_cursor body_cursor = aws_byte_cursor_from_buf(&s_test_context.large_body_buf); body_cursor.len = event->request_body_size; request_info.body = aws_input_stream_new_from_cursor(s_test_context.allocator, &body_cursor); aws_http_message_set_body_stream(request_info.request, request_info.body); } struct aws_http_make_request_options request_options; AWS_ZERO_STRUCT(request_options); request_options.request = request_info.request; request_options.on_complete = s_aws_http_on_stream_complete; request_options.on_response_body = s_aws_http_on_incoming_body; request_options.self_size = sizeof(struct aws_http_make_request_options); request_options.user_data = (void *)aws_array_list_length(&s_test_context.requests); request_info.stream = aws_http_connection_make_request(s_test_context.connection, &request_options); aws_http_stream_activate(request_info.stream); aws_array_list_push_back(&s_test_context.requests, &request_info); } static void s_add_response_data(struct test_http_stats_event *event) { testing_channel_push_read_str(&s_test_context.test_channel, event->response_stream_data); } static int s_do_http_statistics_test( struct aws_allocator *allocator, struct test_http_stats_event *events, size_t event_count) { s_clock_value = 0; s_init_monitor_test(allocator, s_aws_crt_statistics_handler_new_http_mock(allocator)); struct mock_http_connection_monitor_impl *monitor_impl = s_test_context.monitor->impl; for (size_t i = 0; i < event_count; ++i) { struct test_http_stats_event *event = events + i; s_clock_value = aws_timestamp_convert(event->timestamp, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL); switch (event->event_type) { case MTHSET_FLUSH: testing_channel_drain_queued_tasks(&s_test_context.test_channel); break; case MTHSET_ADD_OUTGOING_STREAM: s_add_outgoing_stream(event); break; case MTHSET_ADD_RESPONSE_DATA: s_add_response_data(event); break; case MTHSET_TICK: testing_channel_run_currently_queued_tasks(&s_test_context.test_channel); break; case MTHSET_VERIFY: ASSERT_TRUE( event->expected_stats.pending_incoming_stream_ms == monitor_impl->last_seen_stats.pending_incoming_stream_ms); ASSERT_TRUE( event->expected_stats.pending_outgoing_stream_ms == monitor_impl->last_seen_stats.pending_outgoing_stream_ms); ASSERT_TRUE( event->expected_stats.current_incoming_stream_id == monitor_impl->last_seen_stats.current_incoming_stream_id); ASSERT_TRUE( event->expected_stats.current_outgoing_stream_id == monitor_impl->last_seen_stats.current_outgoing_stream_id); break; } } size_t request_count = aws_array_list_length(&s_test_context.requests); for (size_t i = 0; i < request_count; ++i) { struct http_request_info *request_info = NULL; aws_array_list_get_at_ptr(&s_test_context.requests, (void **)&request_info, i); ASSERT_TRUE(request_info && request_info->response_completed); } s_clean_up_monitor_test(); return AWS_OP_SUCCESS; } static struct test_http_stats_event s_http_stats_test_trivial[] = { { .event_type = MTHSET_VERIFY, .timestamp = 0, .expected_stats = { .pending_outgoing_stream_ms = 0, .pending_incoming_stream_ms = 0, .current_outgoing_stream_id = 0, .current_incoming_stream_id = 0, }, }, }; static int s_test_http_stats_trivial(struct aws_allocator *allocator, void *ctx) { (void)ctx; int result = s_do_http_statistics_test(allocator, s_http_stats_test_trivial, AWS_ARRAY_SIZE(s_http_stats_test_trivial)); ASSERT_TRUE(result == AWS_OP_SUCCESS); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_http_stats_trivial, s_test_http_stats_trivial); static struct test_http_stats_event s_http_stats_test_basic_request[] = { { .event_type = MTHSET_ADD_OUTGOING_STREAM, .timestamp = 100, .request_body_size = TICK_BODY_SIZE, }, { .event_type = MTHSET_TICK, .timestamp = 100, }, { .event_type = MTHSET_FLUSH, .timestamp = 200, }, { .event_type = MTHSET_ADD_RESPONSE_DATA, .timestamp = 500, .response_stream_data = "HTTP/1.1 200 OK\r\n", }, { .event_type = MTHSET_ADD_RESPONSE_DATA, .timestamp = 700, .response_stream_data = "Content-Length: 9\r\n\r\nSomething", }, { .event_type = MTHSET_FLUSH, .timestamp = 700, }, { .event_type = MTHSET_FLUSH, .timestamp = AWS_TIMESTAMP_MILLIS, }, { .event_type = MTHSET_VERIFY, .timestamp = AWS_TIMESTAMP_MILLIS, .expected_stats = { .pending_outgoing_stream_ms = 100, /* [100, 200] */ .pending_incoming_stream_ms = 600, /* [100, 700] */ .current_outgoing_stream_id = 0, .current_incoming_stream_id = 0, }, }, }; static int s_test_http_stats_basic_request(struct aws_allocator *allocator, void *ctx) { (void)ctx; int result = s_do_http_statistics_test( allocator, s_http_stats_test_basic_request, AWS_ARRAY_SIZE(s_http_stats_test_basic_request)); ASSERT_TRUE(result == AWS_OP_SUCCESS); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_http_stats_basic_request, s_test_http_stats_basic_request); static struct test_http_stats_event s_http_stats_test_split_across_gather_boundary[] = { { .event_type = MTHSET_ADD_OUTGOING_STREAM, .timestamp = AWS_TIMESTAMP_MILLIS - 100, .request_body_size = 2 * TICK_BODY_SIZE, }, { .event_type = MTHSET_TICK, .timestamp = AWS_TIMESTAMP_MILLIS - 100, }, { .event_type = MTHSET_TICK, .timestamp = AWS_TIMESTAMP_MILLIS, }, { .event_type = MTHSET_VERIFY, .timestamp = AWS_TIMESTAMP_MILLIS, .expected_stats = { .pending_outgoing_stream_ms = 100, .pending_incoming_stream_ms = 100, .current_outgoing_stream_id = 1, .current_incoming_stream_id = 1, }, }, { .event_type = MTHSET_FLUSH, .timestamp = AWS_TIMESTAMP_MILLIS + 100, }, { .event_type = MTHSET_ADD_RESPONSE_DATA, .timestamp = AWS_TIMESTAMP_MILLIS + 500, .response_stream_data = "HTTP/1.1 200 OK\r\n", }, { .event_type = MTHSET_ADD_RESPONSE_DATA, .timestamp = AWS_TIMESTAMP_MILLIS + 700, .response_stream_data = "Content-Length: 9\r\n\r\nSomething", }, { .event_type = MTHSET_FLUSH, .timestamp = AWS_TIMESTAMP_MILLIS + 700, }, { .event_type = MTHSET_FLUSH, .timestamp = 2 * AWS_TIMESTAMP_MILLIS, }, { .event_type = MTHSET_VERIFY, .timestamp = 2 * AWS_TIMESTAMP_MILLIS, .expected_stats = { .pending_outgoing_stream_ms = 100, .pending_incoming_stream_ms = 700, .current_outgoing_stream_id = 0, .current_incoming_stream_id = 0, }, }, }; static int s_test_http_stats_split_across_gather_boundary(struct aws_allocator *allocator, void *ctx) { (void)ctx; int result = s_do_http_statistics_test( allocator, s_http_stats_test_split_across_gather_boundary, AWS_ARRAY_SIZE(s_http_stats_test_split_across_gather_boundary)); ASSERT_TRUE(result == AWS_OP_SUCCESS); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_http_stats_split_across_gather_boundary, s_test_http_stats_split_across_gather_boundary); /* * Pipeline 3 requests before beginning response data. * * The request body sizes have a total length of 4 * TICK_BODY_SIZE which means it will take 5 ticks * to completely "write" them. */ static struct test_http_stats_event s_http_stats_test_pipelined[] = { { .event_type = MTHSET_ADD_OUTGOING_STREAM, .timestamp = 100, .request_body_size = 2 * TICK_BODY_SIZE, }, { .event_type = MTHSET_TICK, .timestamp = 100, }, { .event_type = MTHSET_ADD_OUTGOING_STREAM, .timestamp = 200, .request_body_size = 1 * TICK_BODY_SIZE, }, { .event_type = MTHSET_ADD_OUTGOING_STREAM, .timestamp = 300, .request_body_size = 1 * TICK_BODY_SIZE, }, { .event_type = MTHSET_TICK, .timestamp = 400, }, { .event_type = MTHSET_ADD_RESPONSE_DATA, .timestamp = 500, .response_stream_data = "HTTP/1.1 200 OK\r\n", }, { .event_type = MTHSET_ADD_RESPONSE_DATA, .timestamp = 600, .response_stream_data = "Content-Length: 9\r\n\r\nSomething", }, { .event_type = MTHSET_FLUSH, .timestamp = 690, }, { .event_type = MTHSET_ADD_RESPONSE_DATA, .timestamp = 700, .response_stream_data = "HTTP/1.1 200 OK\r\n", }, { .event_type = MTHSET_ADD_RESPONSE_DATA, .timestamp = 800, .response_stream_data = "Content-Length: 9\r\n\r\nSomethingHTTP/1.1 200 OK\r\n", }, { .event_type = MTHSET_ADD_RESPONSE_DATA, .timestamp = 900, .response_stream_data = "Content-Length: 9\r\n\r\nSomething", }, { .event_type = MTHSET_TICK, .timestamp = AWS_TIMESTAMP_MILLIS, }, { .event_type = MTHSET_VERIFY, .timestamp = AWS_TIMESTAMP_MILLIS, .expected_stats = { .pending_outgoing_stream_ms = 590, /* [100, 690] */ .pending_incoming_stream_ms = 800, /* [100, 900] */ .current_outgoing_stream_id = 0, .current_incoming_stream_id = 0, }, }, }; static int s_test_http_stats_pipelined(struct aws_allocator *allocator, void *ctx) { (void)ctx; int result = s_do_http_statistics_test(allocator, s_http_stats_test_pipelined, AWS_ARRAY_SIZE(s_http_stats_test_pipelined)); ASSERT_TRUE(result == AWS_OP_SUCCESS); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_http_stats_pipelined, s_test_http_stats_pipelined); static struct test_http_stats_event s_http_stats_test_multiple_requests_with_gap[] = { { .event_type = MTHSET_ADD_OUTGOING_STREAM, .timestamp = 100, .request_body_size = TICK_BODY_SIZE, }, { .event_type = MTHSET_TICK, .timestamp = 100, }, { .event_type = MTHSET_FLUSH, .timestamp = 200, }, { .event_type = MTHSET_ADD_RESPONSE_DATA, .timestamp = 300, .response_stream_data = "HTTP/1.1 200 OK\r\n", }, { .event_type = MTHSET_ADD_RESPONSE_DATA, .timestamp = 400, .response_stream_data = "Content-Length: 9\r\n\r\nSomething", }, { .event_type = MTHSET_FLUSH, .timestamp = 400, }, { .event_type = MTHSET_ADD_OUTGOING_STREAM, .timestamp = 500, .request_body_size = TICK_BODY_SIZE, }, { .event_type = MTHSET_TICK, .timestamp = 500, }, { .event_type = MTHSET_FLUSH, .timestamp = 600, }, { .event_type = MTHSET_ADD_RESPONSE_DATA, .timestamp = 700, .response_stream_data = "HTTP/1.1 200 OK\r\n", }, { .event_type = MTHSET_ADD_RESPONSE_DATA, .timestamp = 800, .response_stream_data = "Content-Length: 9\r\n\r\nSomething", }, { .event_type = MTHSET_FLUSH, .timestamp = AWS_TIMESTAMP_MILLIS, }, { .event_type = MTHSET_VERIFY, .timestamp = AWS_TIMESTAMP_MILLIS, .expected_stats = { .pending_outgoing_stream_ms = 200, /* [100, 200] + [500, 600]*/ .pending_incoming_stream_ms = 600, /* [100, 400] + [500, 800] */ .current_outgoing_stream_id = 0, .current_incoming_stream_id = 0, }, }, }; static int s_test_http_stats_multiple_requests_with_gap(struct aws_allocator *allocator, void *ctx) { (void)ctx; int result = s_do_http_statistics_test( allocator, s_http_stats_test_multiple_requests_with_gap, AWS_ARRAY_SIZE(s_http_stats_test_multiple_requests_with_gap)); ASSERT_TRUE(result == AWS_OP_SUCCESS); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_http_stats_multiple_requests_with_gap, s_test_http_stats_multiple_requests_with_gap); aws-crt-python-0.20.4+dfsg/crt/aws-c-http/tests/test_h1_client.c000066400000000000000000005474371456575232400244460ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "stream_test_helper.h" #include #include #include #include #include #include #include #ifdef _MSC_VER # pragma warning(disable : 4204) /* non-constant aggregate initializer */ #endif #define H1_CLIENT_TEST_CASE(NAME) \ AWS_TEST_CASE(NAME, s_test_##NAME); \ static int s_test_##NAME(struct aws_allocator *allocator, void *ctx) static struct aws_http_message *s_new_default_get_request(struct aws_allocator *allocator) { struct aws_http_message *request = aws_http_message_new_request(allocator); AWS_FATAL_ASSERT(request); AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_http_message_set_request_method(request, aws_http_method_get)); AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_http_message_set_request_path(request, aws_byte_cursor_from_c_str("/"))); return request; } static void s_destroy_stream_on_complete(struct aws_http_stream *stream, int error_code, void *user_data) { (void)stream; (void)error_code; struct aws_input_stream *data_stream = user_data; aws_input_stream_release(data_stream); } static struct aws_http1_chunk_options s_default_chunk_options(struct aws_input_stream *stream, size_t stream_size) { struct aws_http1_chunk_options options; AWS_ZERO_STRUCT(options); options.chunk_data = stream; options.chunk_data_size = stream_size; options.on_complete = s_destroy_stream_on_complete; options.user_data = stream; return options; } static int s_write_termination_chunk(struct aws_allocator *allocator, struct aws_http_stream *stream) { static const struct aws_byte_cursor empty_str = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(""); struct aws_input_stream *termination_marker = aws_input_stream_new_from_cursor(allocator, &empty_str); ASSERT_NOT_NULL(termination_marker); struct aws_http1_chunk_options options = s_default_chunk_options(termination_marker, empty_str.len); ASSERT_SUCCESS(aws_http1_stream_write_chunk(stream, &options)); return AWS_OP_SUCCESS; } static struct aws_http_message *s_new_default_chunked_put_request(struct aws_allocator *allocator) { struct aws_http_header headers[] = { { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Transfer-Encoding"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("chunked"), }, }; struct aws_http_message *request = aws_http_message_new_request(allocator); AWS_FATAL_ASSERT(request); AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_http_message_set_request_method(request, aws_byte_cursor_from_c_str("PUT"))); AWS_FATAL_ASSERT( AWS_OP_SUCCESS == aws_http_message_set_request_path(request, aws_byte_cursor_from_c_str("/plan.txt"))); AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_http_message_add_header_array(request, headers, AWS_ARRAY_SIZE(headers))); return request; } static struct aws_http_message *s_new_default_head_request(struct aws_allocator *allocator) { struct aws_http_message *request = aws_http_message_new_request(allocator); AWS_FATAL_ASSERT(request); AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_http_message_set_request_method(request, aws_http_method_head)); AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_http_message_set_request_path(request, aws_byte_cursor_from_c_str("/"))); return request; } struct tester { struct aws_allocator *alloc; struct testing_channel testing_channel; struct aws_http_connection *connection; struct aws_logger logger; bool manual_window_management; }; struct tester_options { bool manual_window_management; size_t initial_stream_window_size; size_t read_buffer_capacity; }; static int s_tester_init_ex(struct tester *tester, struct aws_allocator *alloc, const struct tester_options *options) { aws_http_library_init(alloc); AWS_ZERO_STRUCT(*tester); tester->alloc = alloc; struct aws_logger_standard_options logger_options = { .level = AWS_LOG_LEVEL_TRACE, .file = stderr, }; ASSERT_SUCCESS(aws_logger_init_standard(&tester->logger, tester->alloc, &logger_options)); aws_logger_set(&tester->logger); struct aws_testing_channel_options test_channel_options = {.clock_fn = aws_high_res_clock_get_ticks}; ASSERT_SUCCESS(testing_channel_init(&tester->testing_channel, alloc, &test_channel_options)); struct aws_http1_connection_options http1_options; AWS_ZERO_STRUCT(http1_options); http1_options.read_buffer_capacity = options->read_buffer_capacity; tester->connection = aws_http_connection_new_http1_1_client( alloc, options->manual_window_management, options->initial_stream_window_size, &http1_options); ASSERT_NOT_NULL(tester->connection); struct aws_channel_slot *slot = aws_channel_slot_new(tester->testing_channel.channel); ASSERT_NOT_NULL(slot); ASSERT_SUCCESS(aws_channel_slot_insert_end(tester->testing_channel.channel, slot)); ASSERT_SUCCESS(aws_channel_slot_set_handler(slot, &tester->connection->channel_handler)); tester->connection->vtable->on_channel_handler_installed(&tester->connection->channel_handler, slot); testing_channel_drain_queued_tasks(&tester->testing_channel); return AWS_OP_SUCCESS; } static int s_tester_init(struct tester *tester, struct aws_allocator *alloc) { struct tester_options options = { .manual_window_management = false, }; return s_tester_init_ex(tester, alloc, &options); } static int s_tester_clean_up(struct tester *tester) { aws_http_connection_release(tester->connection); ASSERT_SUCCESS(testing_channel_clean_up(&tester->testing_channel)); aws_http_library_clean_up(); aws_logger_clean_up(&tester->logger); return AWS_OP_SUCCESS; } /* Check that we can set and tear down the `tester` used by all other tests in this file */ H1_CLIENT_TEST_CASE(h1_client_sanity_check) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } /* Send 1 line request, doesn't care about response */ H1_CLIENT_TEST_CASE(h1_client_request_send_1liner) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); /* send request */ struct aws_http_make_request_options opt = { .self_size = sizeof(opt), .request = s_new_default_get_request(allocator), }; struct aws_http_stream *stream = aws_http_connection_make_request(tester.connection, &opt); ASSERT_NOT_NULL(stream); ASSERT_SUCCESS(aws_http_stream_activate(stream)); testing_channel_drain_queued_tasks(&tester.testing_channel); /* check result */ const char *expected = "GET / HTTP/1.1\r\n" "\r\n"; ASSERT_SUCCESS(testing_channel_check_written_message_str(&tester.testing_channel, expected)); /* clean up */ aws_http_message_destroy(opt.request); aws_http_stream_release(stream); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } H1_CLIENT_TEST_CASE(h1_client_request_send_headers) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); /* send request */ struct aws_http_header headers[] = { { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Host"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("example.com"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Accept"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("*/*"), }, }; struct aws_http_message *request = s_new_default_get_request(allocator); ASSERT_NOT_NULL(request); ASSERT_SUCCESS(aws_http_message_add_header_array(request, headers, AWS_ARRAY_SIZE(headers))); struct aws_http_make_request_options opt = { .self_size = sizeof(opt), .request = request, }; struct aws_http_stream *stream = aws_http_connection_make_request(tester.connection, &opt); ASSERT_NOT_NULL(stream); ASSERT_SUCCESS(aws_http_stream_activate(stream)); testing_channel_drain_queued_tasks(&tester.testing_channel); /* check result */ const char *expected = "GET / HTTP/1.1\r\n" "Host: example.com\r\n" "Accept: */*\r\n" "\r\n"; ASSERT_SUCCESS(testing_channel_check_written_message_str(&tester.testing_channel, expected)); /* clean up */ aws_http_message_destroy(request); aws_http_stream_release(stream); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } H1_CLIENT_TEST_CASE(h1_client_request_send_body) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); /* send request */ static const struct aws_byte_cursor body = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("write more tests"); struct aws_input_stream *body_stream = aws_input_stream_new_from_cursor(allocator, &body); struct aws_http_header headers[] = { { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Length"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("16"), }, }; struct aws_http_message *request = aws_http_message_new_request(allocator); ASSERT_NOT_NULL(request); ASSERT_SUCCESS(aws_http_message_set_request_method(request, aws_byte_cursor_from_c_str("PUT"))); ASSERT_SUCCESS(aws_http_message_set_request_path(request, aws_byte_cursor_from_c_str("/plan.txt"))); aws_http_message_add_header_array(request, headers, AWS_ARRAY_SIZE(headers)); aws_http_message_set_body_stream(request, body_stream); struct aws_http_make_request_options opt = { .self_size = sizeof(opt), .request = request, }; struct aws_http_stream *stream = aws_http_connection_make_request(tester.connection, &opt); ASSERT_NOT_NULL(stream); aws_http_stream_activate(stream); testing_channel_drain_queued_tasks(&tester.testing_channel); /* check result */ const char *expected = "PUT /plan.txt HTTP/1.1\r\n" "Content-Length: 16\r\n" "\r\n" "write more tests"; ASSERT_SUCCESS(testing_channel_check_written_messages_str(&tester.testing_channel, allocator, expected)); /* clean up */ aws_input_stream_release(body_stream); aws_http_message_destroy(request); aws_http_stream_release(stream); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } H1_CLIENT_TEST_CASE(h1_client_request_send_body_chunked) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); /* send request */ struct aws_http_message *request = s_new_default_chunked_put_request(allocator); struct aws_http_make_request_options opt = { .self_size = sizeof(opt), .request = request, }; struct aws_http_stream *stream = aws_http_connection_make_request(tester.connection, &opt); ASSERT_NOT_NULL(stream); /* Initialize and send the stream chunks */ static const struct aws_byte_cursor body = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("write more tests"); struct aws_input_stream *body_stream = aws_input_stream_new_from_cursor(allocator, &body); struct aws_http1_chunk_options options = s_default_chunk_options(body_stream, body.len); ASSERT_SUCCESS(aws_http_stream_activate(stream)); ASSERT_SUCCESS(aws_http1_stream_write_chunk(stream, &options)); ASSERT_SUCCESS(s_write_termination_chunk(allocator, stream)); testing_channel_drain_queued_tasks(&tester.testing_channel); /* check result */ const char *expected = "PUT /plan.txt HTTP/1.1\r\n" "Transfer-Encoding: chunked\r\n" "\r\n" "10\r\n" "write more tests" "\r\n" "0\r\n" "\r\n"; ASSERT_SUCCESS(testing_channel_check_written_messages_str(&tester.testing_channel, allocator, expected)); /* clean up */ aws_http_message_destroy(request); aws_http_stream_release(stream); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } int chunked_test_helper( const struct aws_byte_cursor *body, struct aws_http_headers *trailers, const char *expected, struct tester tester, struct aws_allocator *allocator) { /* send request */ struct aws_http_message *request = s_new_default_chunked_put_request(allocator); struct aws_http_make_request_options opt = { .self_size = sizeof(opt), .request = request, }; struct aws_http_stream *stream = aws_http_connection_make_request(tester.connection, &opt); ASSERT_NOT_NULL(stream); /* Initialize and send the stream chunks */ ASSERT_SUCCESS(aws_http_stream_activate(stream)); if (body != NULL) { struct aws_input_stream *body_stream = aws_input_stream_new_from_cursor(allocator, body); struct aws_http1_chunk_options options = s_default_chunk_options(body_stream, body->len); ASSERT_SUCCESS(aws_http1_stream_write_chunk(stream, &options)); } ASSERT_SUCCESS(aws_http1_stream_add_chunked_trailer(stream, trailers)); ASSERT_SUCCESS(s_write_termination_chunk(allocator, stream)); testing_channel_drain_queued_tasks(&tester.testing_channel); /* check result */ ASSERT_SUCCESS(testing_channel_check_written_messages_str(&tester.testing_channel, allocator, expected)); /* clean up */ aws_http_message_destroy(request); aws_http_stream_release(stream); return AWS_OP_SUCCESS; } int chunked_trailer_succeed( const struct aws_byte_cursor *body, struct aws_http_headers *trailers, struct tester tester, struct aws_allocator *allocator) { /* send request */ struct aws_http_message *request = s_new_default_chunked_put_request(allocator); struct aws_http_make_request_options opt = { .self_size = sizeof(opt), .request = request, }; struct aws_http_stream *stream = aws_http_connection_make_request(tester.connection, &opt); ASSERT_NOT_NULL(stream); /* Initialize and send the stream chunks */ ASSERT_SUCCESS(aws_http_stream_activate(stream)); if (body != NULL) { struct aws_input_stream *body_stream = aws_input_stream_new_from_cursor(allocator, body); struct aws_http1_chunk_options options = s_default_chunk_options(body_stream, body->len); ASSERT_SUCCESS(aws_http1_stream_write_chunk(stream, &options)); } /* kind of gross, but good enough for now */ int err = aws_http1_stream_add_chunked_trailer(stream, trailers); if (err) { aws_http_message_destroy(request); aws_http_stream_release(stream); return err; } ASSERT_SUCCESS(s_write_termination_chunk(allocator, stream)); testing_channel_drain_queued_tasks(&tester.testing_channel); /* clean up */ aws_http_message_destroy(request); aws_http_stream_release(stream); return AWS_OP_SUCCESS; } H1_CLIENT_TEST_CASE(h1_client_request_send_chunked_trailer) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); struct aws_http_headers *trailers = aws_http_headers_new(allocator); const struct aws_http_header trailer = { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("chunked"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("trailer"), }; const struct aws_http_header trailer1 = { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("another"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("test"), }; aws_http_headers_add_header(trailers, &trailer); aws_http_headers_add_header(trailers, &trailer1); /* Initialize and send the stream chunks */ static const struct aws_byte_cursor body = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("write more tests"); const char *expected = "PUT /plan.txt HTTP/1.1\r\n" "Transfer-Encoding: chunked\r\n" "\r\n" "10\r\n" "write more tests" "\r\n" "0\r\n" "chunked: trailer\r\n" "another: test\r\n" "\r\n"; ASSERT_SUCCESS(chunked_test_helper(&body, trailers, expected, tester, allocator)); /* clean up */ aws_http_headers_release(trailers); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } H1_CLIENT_TEST_CASE(h1_client_request_send_empty_chunked_trailer) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); struct aws_http_headers *trailers = aws_http_headers_new(allocator); /* Initialize and send the stream chunks */ static const struct aws_byte_cursor body = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("write more tests"); const char *expected = "PUT /plan.txt HTTP/1.1\r\n" "Transfer-Encoding: chunked\r\n" "\r\n" "10\r\n" "write more tests" "\r\n" "0\r\n" "\r\n"; ASSERT_SUCCESS(chunked_test_helper(&body, trailers, expected, tester, allocator)); /* clean up */ aws_http_headers_release(trailers); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } H1_CLIENT_TEST_CASE(h1_client_request_forbidden_trailer) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); struct aws_http_headers *success = aws_http_headers_new(allocator); aws_http_headers_add_header( success, &(struct aws_http_header){ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("should"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("succeed"), }); struct aws_http_headers *transfer_encoding = aws_http_headers_new(allocator); aws_http_headers_add_header( transfer_encoding, &(struct aws_http_header){ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Transfer-Encoding"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("gzip, chunked"), }); struct aws_http_headers *content_length = aws_http_headers_new(allocator); aws_http_headers_add_header( content_length, &(struct aws_http_header){ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Length"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("3495"), }); struct aws_http_headers *host = aws_http_headers_new(allocator); aws_http_headers_add_header( host, &(struct aws_http_header){ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Host"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("www.example.org"), }); struct aws_http_headers *cache_control = aws_http_headers_new(allocator); aws_http_headers_add_header( cache_control, &(struct aws_http_header){ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Cache-Control"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("private"), }); struct aws_http_headers *expect = aws_http_headers_new(allocator); aws_http_headers_add_header( expect, &(struct aws_http_header){ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Expect"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("100-continue"), }); struct aws_http_headers *max_forwards = aws_http_headers_new(allocator); aws_http_headers_add_header( max_forwards, &(struct aws_http_header){ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("max-forwards"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("123"), }); struct aws_http_headers *pragma = aws_http_headers_new(allocator); aws_http_headers_add_header( pragma, &(struct aws_http_header){ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("pragma"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("no-cache"), }); struct aws_http_headers *range = aws_http_headers_new(allocator); aws_http_headers_add_header( range, &(struct aws_http_header){ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("range"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("bytes=0-1023"), }); struct aws_http_headers *te = aws_http_headers_new(allocator); aws_http_headers_add_header( te, &(struct aws_http_header){ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("te"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("trailers, deflate;q=0.5"), }); struct aws_http_headers *www_authenticate = aws_http_headers_new(allocator); aws_http_headers_add_header( www_authenticate, &(struct aws_http_header){ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("www-authenticate"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL( "Newauth realm=\"apps\", type=1,title=\"Login to \"apps\"\", Basic realm=\"simple\""), }); struct aws_http_headers *authorization = aws_http_headers_new(allocator); aws_http_headers_add_header( authorization, &(struct aws_http_header){ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("authorization"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("credentials"), }); struct aws_http_headers *proxy_authenticate = aws_http_headers_new(allocator); aws_http_headers_add_header( proxy_authenticate, &(struct aws_http_header){ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("proxy-authenticate"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Basic YWxhZGRpbjpvcGVuc2VzYW1l"), }); struct aws_http_headers *proxy_authorization = aws_http_headers_new(allocator); aws_http_headers_add_header( proxy_authorization, &(struct aws_http_header){ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("proxy-authorization"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("credentials"), }); struct aws_http_headers *set_cookie = aws_http_headers_new(allocator); aws_http_headers_add_header( set_cookie, &(struct aws_http_header){ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("set-cookie"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("sessionId=38afes7a8"), }); struct aws_http_headers *cookie = aws_http_headers_new(allocator); aws_http_headers_add_header( cookie, &(struct aws_http_header){ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("cookie"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("PHPSESSID=298zf09hf012fh2; csrftoken=u32t4o3tb3gg43; _gat=1"), }); struct aws_http_headers *age = aws_http_headers_new(allocator); aws_http_headers_add_header( age, &(struct aws_http_header){ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("age"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("24"), }); struct aws_http_headers *expires = aws_http_headers_new(allocator); aws_http_headers_add_header( expires, &(struct aws_http_header){ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("expires"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Wed, 21 Oct 2015 07:28:00 GMT"), }); struct aws_http_headers *date = aws_http_headers_new(allocator); aws_http_headers_add_header( date, &(struct aws_http_header){ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("date"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Wed, 21 Oct 2015 07:28:00 GMT"), }); struct aws_http_headers *location = aws_http_headers_new(allocator); aws_http_headers_add_header( location, &(struct aws_http_header){ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("location"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/index.html"), }); struct aws_http_headers *retry_after = aws_http_headers_new(allocator); aws_http_headers_add_header( retry_after, &(struct aws_http_header){ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("retry-after"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("120"), }); struct aws_http_headers *vary = aws_http_headers_new(allocator); aws_http_headers_add_header( vary, &(struct aws_http_header){ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("vary"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("User-Agent"), }); struct aws_http_headers *warning = aws_http_headers_new(allocator); aws_http_headers_add_header( warning, &(struct aws_http_header){ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("warning"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("110 anderson/1.3.37 \"Response is stale\""), }); struct aws_http_headers *content_encoding = aws_http_headers_new(allocator); aws_http_headers_add_header( content_encoding, &(struct aws_http_header){ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("content-encoding"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("gzip"), }); struct aws_http_headers *content_type = aws_http_headers_new(allocator); aws_http_headers_add_header( content_type, &(struct aws_http_header){ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("content-type"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("text/html"), }); struct aws_http_headers *content_range = aws_http_headers_new(allocator); aws_http_headers_add_header( content_range, &(struct aws_http_header){ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("content-range"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("bytes 200-1000/67589"), }); struct aws_http_headers *trailer = aws_http_headers_new(allocator); aws_http_headers_add_header( trailer, &(struct aws_http_header){ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("trailer"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Expires"), }); ASSERT_SUCCESS(chunked_trailer_succeed(NULL, success, tester, allocator)); ASSERT_ERROR( AWS_ERROR_HTTP_INVALID_HEADER_FIELD, chunked_trailer_succeed(NULL, transfer_encoding, tester, allocator)); ASSERT_ERROR(AWS_ERROR_HTTP_INVALID_HEADER_FIELD, chunked_trailer_succeed(NULL, content_length, tester, allocator)); ASSERT_ERROR(AWS_ERROR_HTTP_INVALID_HEADER_FIELD, chunked_trailer_succeed(NULL, host, tester, allocator)); ASSERT_ERROR(AWS_ERROR_HTTP_INVALID_HEADER_FIELD, chunked_trailer_succeed(NULL, cache_control, tester, allocator)); ASSERT_ERROR(AWS_ERROR_HTTP_INVALID_HEADER_FIELD, chunked_trailer_succeed(NULL, expect, tester, allocator)); ASSERT_ERROR(AWS_ERROR_HTTP_INVALID_HEADER_FIELD, chunked_trailer_succeed(NULL, max_forwards, tester, allocator)); ASSERT_ERROR(AWS_ERROR_HTTP_INVALID_HEADER_FIELD, chunked_trailer_succeed(NULL, pragma, tester, allocator)); ASSERT_ERROR(AWS_ERROR_HTTP_INVALID_HEADER_FIELD, chunked_trailer_succeed(NULL, range, tester, allocator)); ASSERT_ERROR(AWS_ERROR_HTTP_INVALID_HEADER_FIELD, chunked_trailer_succeed(NULL, te, tester, allocator)); ASSERT_ERROR( AWS_ERROR_HTTP_INVALID_HEADER_FIELD, chunked_trailer_succeed(NULL, www_authenticate, tester, allocator)); ASSERT_ERROR(AWS_ERROR_HTTP_INVALID_HEADER_FIELD, chunked_trailer_succeed(NULL, authorization, tester, allocator)); ASSERT_ERROR( AWS_ERROR_HTTP_INVALID_HEADER_FIELD, chunked_trailer_succeed(NULL, proxy_authenticate, tester, allocator)); ASSERT_ERROR( AWS_ERROR_HTTP_INVALID_HEADER_FIELD, chunked_trailer_succeed(NULL, proxy_authorization, tester, allocator)); ASSERT_ERROR(AWS_ERROR_HTTP_INVALID_HEADER_FIELD, chunked_trailer_succeed(NULL, set_cookie, tester, allocator)); ASSERT_ERROR(AWS_ERROR_HTTP_INVALID_HEADER_FIELD, chunked_trailer_succeed(NULL, cookie, tester, allocator)); ASSERT_ERROR(AWS_ERROR_HTTP_INVALID_HEADER_FIELD, chunked_trailer_succeed(NULL, age, tester, allocator)); ASSERT_ERROR(AWS_ERROR_HTTP_INVALID_HEADER_FIELD, chunked_trailer_succeed(NULL, expires, tester, allocator)); ASSERT_ERROR(AWS_ERROR_HTTP_INVALID_HEADER_FIELD, chunked_trailer_succeed(NULL, date, tester, allocator)); ASSERT_ERROR(AWS_ERROR_HTTP_INVALID_HEADER_FIELD, chunked_trailer_succeed(NULL, location, tester, allocator)); ASSERT_ERROR(AWS_ERROR_HTTP_INVALID_HEADER_FIELD, chunked_trailer_succeed(NULL, retry_after, tester, allocator)); ASSERT_ERROR(AWS_ERROR_HTTP_INVALID_HEADER_FIELD, chunked_trailer_succeed(NULL, vary, tester, allocator)); ASSERT_ERROR(AWS_ERROR_HTTP_INVALID_HEADER_FIELD, chunked_trailer_succeed(NULL, warning, tester, allocator)); ASSERT_ERROR( AWS_ERROR_HTTP_INVALID_HEADER_FIELD, chunked_trailer_succeed(NULL, content_encoding, tester, allocator)); ASSERT_ERROR(AWS_ERROR_HTTP_INVALID_HEADER_FIELD, chunked_trailer_succeed(NULL, content_type, tester, allocator)); ASSERT_ERROR(AWS_ERROR_HTTP_INVALID_HEADER_FIELD, chunked_trailer_succeed(NULL, content_range, tester, allocator)); ASSERT_ERROR(AWS_ERROR_HTTP_INVALID_HEADER_FIELD, chunked_trailer_succeed(NULL, trailer, tester, allocator)); /* clean up */ aws_http_headers_release(success); aws_http_headers_release(transfer_encoding); aws_http_headers_release(content_length); aws_http_headers_release(host); aws_http_headers_release(cache_control); aws_http_headers_release(expect); aws_http_headers_release(max_forwards); aws_http_headers_release(pragma); aws_http_headers_release(range); aws_http_headers_release(te); aws_http_headers_release(www_authenticate); aws_http_headers_release(authorization); aws_http_headers_release(proxy_authenticate); aws_http_headers_release(proxy_authorization); aws_http_headers_release(set_cookie); aws_http_headers_release(cookie); aws_http_headers_release(age); aws_http_headers_release(expires); aws_http_headers_release(date); aws_http_headers_release(location); aws_http_headers_release(retry_after); aws_http_headers_release(vary); aws_http_headers_release(warning); aws_http_headers_release(content_encoding); aws_http_headers_release(content_type); aws_http_headers_release(content_range); aws_http_headers_release(trailer); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } H1_CLIENT_TEST_CASE(h1_client_request_send_chunked_extensions) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); /* send request */ struct aws_http_message *request = s_new_default_chunked_put_request(allocator); struct aws_http_make_request_options opt = { .self_size = sizeof(opt), .request = request, }; struct aws_http_stream *stream = aws_http_connection_make_request(tester.connection, &opt); ASSERT_NOT_NULL(stream); ASSERT_SUCCESS(aws_http_stream_activate(stream)); /* Initialize and send the stream chunks */ static const struct aws_byte_cursor body = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("write more tests"); /* create a chunk with a single extension */ struct aws_input_stream *body_stream = aws_input_stream_new_from_cursor(allocator, &body); struct aws_http1_chunk_options options = s_default_chunk_options(body_stream, body.len); struct aws_http1_chunk_extension single_extension[] = { { .key = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("foo"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("bar"), }, }; options.extensions = (struct aws_http1_chunk_extension *)&single_extension; options.num_extensions = AWS_ARRAY_SIZE(single_extension); ASSERT_SUCCESS(aws_http1_stream_write_chunk(stream, &options)); /* create a chunk with a multiple_single extensions */ static const struct aws_byte_cursor multi_ext_body = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("write more tests"); struct aws_input_stream *multi_ext_body_stream = aws_input_stream_new_from_cursor(allocator, &multi_ext_body); struct aws_http1_chunk_options multi_ext_opts = s_default_chunk_options(multi_ext_body_stream, multi_ext_body.len); struct aws_http1_chunk_extension multi_extension[] = { { .key = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("foo"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("bar"), }, { .key = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("baz"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("cux"), }, }; multi_ext_opts.extensions = (struct aws_http1_chunk_extension *)&multi_extension; multi_ext_opts.num_extensions = AWS_ARRAY_SIZE(multi_extension); ASSERT_SUCCESS(aws_http1_stream_write_chunk(stream, &multi_ext_opts)); /* terminate the stream */ ASSERT_SUCCESS(s_write_termination_chunk(allocator, stream)); /* Run it! */ testing_channel_drain_queued_tasks(&tester.testing_channel); /* check result */ const char *expected = "PUT /plan.txt HTTP/1.1\r\n" "Transfer-Encoding: chunked\r\n" "\r\n" "10;foo=bar\r\n" "write more tests" "\r\n" "10;foo=bar;baz=cux\r\n" "write more tests" "\r\n" "0\r\n" "\r\n"; ASSERT_SUCCESS(testing_channel_check_written_messages_str(&tester.testing_channel, allocator, expected)); /* clean up */ aws_http_message_destroy(request); aws_http_stream_release(stream); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } struct chunk_writer_data { size_t num_chunks; const char **payloads; struct aws_http_stream *stream; struct aws_allocator *allocator; long delay_between_writes_ns; }; H1_CLIENT_TEST_CASE(h1_client_request_waits_for_chunks) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); /* send request with Transfer-Encoding: chunked and body stream */ struct aws_http_message *request = s_new_default_chunked_put_request(allocator); struct aws_http_make_request_options opt = { .self_size = sizeof(opt), .request = request, }; struct aws_http_stream *stream = aws_http_connection_make_request(tester.connection, &opt); ASSERT_NOT_NULL(stream); /* activate stream *before* sending any data. */ ASSERT_SUCCESS(aws_http_stream_activate(stream)); char *payloads[] = {"write more tests", "write more tests", ""}; struct chunk_writer_data chunk_data = { .num_chunks = sizeof(payloads) / sizeof(payloads[0]), .payloads = (const char **)&payloads, .stream = stream, .allocator = allocator, .delay_between_writes_ns = 10000, }; /* write and pause, in a loop. This exercises the rescheduling path. */ for (size_t i = 0; i < chunk_data.num_chunks; ++i) { testing_channel_drain_queued_tasks(&tester.testing_channel); ASSERT_FALSE( aws_task_scheduler_has_tasks(&tester.testing_channel.loop_impl->scheduler, NULL), "Everything should be paused when no chunks are pending"); struct aws_byte_cursor body = aws_byte_cursor_from_c_str(chunk_data.payloads[i]); struct aws_input_stream *body_stream = aws_input_stream_new_from_cursor(chunk_data.allocator, &body); struct aws_http1_chunk_options options = s_default_chunk_options(body_stream, body.len); ASSERT_SUCCESS(aws_http1_stream_write_chunk(stream, &options)); testing_channel_drain_queued_tasks(&tester.testing_channel); ASSERT_FALSE( aws_task_scheduler_has_tasks(&tester.testing_channel.loop_impl->scheduler, NULL), "Everything should be paused when no chunks are pending"); } /* check result */ const char *expected = "PUT /plan.txt HTTP/1.1\r\n" "Transfer-Encoding: chunked\r\n" "\r\n" "10\r\n" "write more tests" "\r\n" "10\r\n" "write more tests" "\r\n" "0\r\n" "\r\n"; /* check result */ ASSERT_SUCCESS(testing_channel_check_written_messages( &tester.testing_channel, allocator, aws_byte_cursor_from_c_str(expected))); /* clean up */ aws_http_message_destroy(request); aws_http_stream_release(stream); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } struct chunk_that_writes_another { struct aws_allocator *allocator; struct aws_input_stream *body_data; }; static void s_on_chunk_complete_write_another_chunk(struct aws_http_stream *stream, int error_code, void *user_data) { AWS_FATAL_ASSERT(0 == error_code); struct chunk_that_writes_another *chunk = user_data; aws_input_stream_release(chunk->body_data); const struct aws_byte_cursor chunk2_body = aws_byte_cursor_from_c_str("chunk 2."); struct aws_input_stream *chunk2_body_stream = aws_input_stream_new_from_cursor(chunk->allocator, &chunk2_body); AWS_FATAL_ASSERT(chunk2_body_stream != NULL); struct aws_http1_chunk_options chunk2_options = s_default_chunk_options(chunk2_body_stream, chunk2_body.len); AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_http1_stream_write_chunk(stream, &chunk2_options)); } /* Test that it's safe to start a new chunk from the chunk-complete callback */ H1_CLIENT_TEST_CASE(h1_client_request_send_chunk_from_chunk_complete_callback) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); /* send request with Transfer-Encoding: chunked and body stream */ struct aws_http_message *request = s_new_default_chunked_put_request(allocator); struct aws_http_make_request_options opt = { .self_size = sizeof(opt), .request = request, }; struct aws_http_stream *stream = aws_http_connection_make_request(tester.connection, &opt); ASSERT_NOT_NULL(stream); /* activate stream *before* sending any data. */ aws_http_stream_activate(stream); /* write chunk 1 */ const struct aws_byte_cursor chunk1_body = aws_byte_cursor_from_c_str("chunk 1."); struct aws_input_stream *chunk1_body_stream = aws_input_stream_new_from_cursor(allocator, &chunk1_body); ASSERT_NOT_NULL(chunk1_body_stream); struct chunk_that_writes_another chunk1_userdata = { .allocator = allocator, .body_data = chunk1_body_stream, }; struct aws_http1_chunk_options chunk1_options = { .chunk_data = chunk1_body_stream, .chunk_data_size = chunk1_body.len, .on_complete = s_on_chunk_complete_write_another_chunk, .user_data = &chunk1_userdata, }; ASSERT_SUCCESS(aws_http1_stream_write_chunk(stream, &chunk1_options)); /* run tasks, the 1st chunk should complete, which writes the 2nd chunk, * which should also complete */ testing_channel_drain_queued_tasks(&tester.testing_channel); const char *expected = "PUT /plan.txt HTTP/1.1\r\n" "Transfer-Encoding: chunked\r\n" "\r\n" "8\r\n" "chunk 1." "\r\n" "8\r\n" "chunk 2." "\r\n"; ASSERT_SUCCESS(testing_channel_check_written_messages_str(&tester.testing_channel, allocator, expected)); /* clean up */ aws_http_message_destroy(request); aws_http_stream_release(stream); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } /* Regression test: Once upon a time there was a bug where the outgoing_stream_task got double-scheduled. * The situation was: * - An aws_io_message had been written to the channel, but not yet completed. * - The encoder was paused, waiting for more chunks. * Then at more-or-less the same time: * - The aws_io_message finished writing, which resulted in the outgoing_stream_task getting rescheduled. * - A new chunk was added, which resulted in the outgoing_stream_task getting rescheduled. * And then the task's linked_list_node got all screwed up and we crashed iterating a list. */ H1_CLIENT_TEST_CASE(h1_client_request_write_chunk_as_write_completes_regression) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); /* To repro this bug, testing channel must wait to mark aws_io_messages complete */ testing_channel_complete_written_messages_immediately(&tester.testing_channel, false, 0); /* Send request */ struct aws_http_message *request = s_new_default_chunked_put_request(allocator); struct aws_http_make_request_options opt = { .self_size = sizeof(opt), .request = request, }; struct aws_http_stream *stream = aws_http_connection_make_request(tester.connection, &opt); ASSERT_NOT_NULL(stream); ASSERT_SUCCESS(aws_http_stream_activate(stream)); /* Drain tasks. The head of the request gets written to aws_io_message and sent down channel. * The outgoing_stream_task should be paused waiting for more chunks. */ testing_channel_drain_queued_tasks(&tester.testing_channel); /* Write a chunk. When bug occurred, this would reschedule the outgoing_stream_task */ static const struct aws_byte_cursor body = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("write more tests"); struct aws_input_stream *body_stream = aws_input_stream_new_from_cursor(allocator, &body); struct aws_http1_chunk_options options = s_default_chunk_options(body_stream, body.len); ASSERT_SUCCESS(aws_http1_stream_write_chunk(stream, &options)); ASSERT_SUCCESS(s_write_termination_chunk(allocator, stream)); /* Have the previously sent aws_io_message complete. * When bug occurred, this would ALSO reschedule the outgoing_stream_task */ struct aws_linked_list *written_msgs = testing_channel_get_written_message_queue(&tester.testing_channel); for (struct aws_linked_list_node *node = aws_linked_list_begin(written_msgs); node != aws_linked_list_end(written_msgs); node = aws_linked_list_next(node)) { struct aws_io_message *msg = AWS_CONTAINER_OF(node, struct aws_io_message, queueing_handle); msg->on_completion(tester.testing_channel.channel, msg, 0, msg->user_data); msg->on_completion = NULL; } /* Run the scheduler. When bug occurred, this would crash */ testing_channel_drain_queued_tasks(&tester.testing_channel); /* check result */ const char *expected = "PUT /plan.txt HTTP/1.1\r\n" "Transfer-Encoding: chunked\r\n" "\r\n" "10\r\n" "write more tests" "\r\n" "0\r\n" "\r\n"; ASSERT_SUCCESS(testing_channel_check_written_messages_str(&tester.testing_channel, allocator, expected)); /* clean up */ aws_http_message_destroy(request); aws_http_stream_release(stream); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } H1_CLIENT_TEST_CASE(h1_client_request_content_length_0_ok) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); /* send request with Content-Length: 0 and NO body stream */ struct aws_http_header headers[] = { { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Length"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("0"), }, }; struct aws_http_message *request = aws_http_message_new_request(allocator); ASSERT_NOT_NULL(request); ASSERT_SUCCESS(aws_http_message_set_request_method(request, aws_byte_cursor_from_c_str("PUT"))); ASSERT_SUCCESS(aws_http_message_set_request_path(request, aws_byte_cursor_from_c_str("/plan.txt"))); aws_http_message_add_header_array(request, headers, AWS_ARRAY_SIZE(headers)); struct aws_http_make_request_options opt = { .self_size = sizeof(opt), .request = request, }; struct aws_http_stream *stream = aws_http_connection_make_request(tester.connection, &opt); ASSERT_NOT_NULL(stream); ASSERT_SUCCESS(aws_http_stream_activate(stream)); testing_channel_drain_queued_tasks(&tester.testing_channel); /* check result */ const char *expected = "PUT /plan.txt HTTP/1.1\r\n" "Content-Length: 0\r\n" "\r\n"; ASSERT_SUCCESS(testing_channel_check_written_message_str(&tester.testing_channel, expected)); aws_http_stream_release(stream); /* send Content-Length: 0 request again, but this time with a body stream whose length is 0 */ static const struct aws_byte_cursor body = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(""); struct aws_input_stream *body_stream = aws_input_stream_new_from_cursor(allocator, &body); aws_http_message_set_body_stream(request, body_stream); stream = aws_http_connection_make_request(tester.connection, &opt); ASSERT_NOT_NULL(stream); ASSERT_SUCCESS(aws_http_stream_activate(stream)); testing_channel_drain_queued_tasks(&tester.testing_channel); /* check result */ ASSERT_SUCCESS(testing_channel_check_written_message_str(&tester.testing_channel, expected)); /* clean up */ aws_input_stream_release(body_stream); aws_http_message_destroy(request); aws_http_stream_release(stream); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } H1_CLIENT_TEST_CASE(h1_client_request_send_chunk_size_0_ok) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); /* Send request with Transfer-Encoding: chunked and an empty body stream. */ struct aws_http_message *request = s_new_default_chunked_put_request(allocator); struct aws_http_make_request_options opt = { .self_size = sizeof(opt), .request = request, }; struct aws_http_stream *stream = aws_http_connection_make_request(tester.connection, &opt); ASSERT_NOT_NULL(stream); ASSERT_SUCCESS(aws_http_stream_activate(stream)); ASSERT_SUCCESS(s_write_termination_chunk(allocator, stream)); testing_channel_drain_queued_tasks(&tester.testing_channel); /* check result */ const char *expected = "PUT /plan.txt HTTP/1.1\r\n" "Transfer-Encoding: chunked\r\n" "\r\n" "0\r\n" "\r\n"; ASSERT_SUCCESS(testing_channel_check_written_messages_str(&tester.testing_channel, allocator, expected)); /* clean up */ aws_http_message_destroy(request); aws_http_stream_release(stream); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } H1_CLIENT_TEST_CASE(h1_client_request_send_chunk_size_0_with_extensions_ok) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); /* Send request with Transfer-Encoding: chunked and an empty body stream. */ struct aws_http_message *request = s_new_default_chunked_put_request(allocator); struct aws_http_make_request_options opt = { .self_size = sizeof(opt), .request = request, }; struct aws_http_stream *stream = aws_http_connection_make_request(tester.connection, &opt); ASSERT_NOT_NULL(stream); ASSERT_SUCCESS(aws_http_stream_activate(stream)); static const struct aws_byte_cursor empty_str = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(""); struct aws_input_stream *termination_marker = aws_input_stream_new_from_cursor(allocator, &empty_str); struct aws_http1_chunk_options options = s_default_chunk_options(termination_marker, empty_str.len); struct aws_http1_chunk_extension single_extension[] = { { .key = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("foo"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("bar"), }, { .key = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("baz"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("cux"), }, }; options.extensions = (struct aws_http1_chunk_extension *)&single_extension; options.num_extensions = AWS_ARRAY_SIZE(single_extension); ASSERT_SUCCESS(aws_http1_stream_write_chunk(stream, &options)); testing_channel_drain_queued_tasks(&tester.testing_channel); /* check result */ const char *expected = "PUT /plan.txt HTTP/1.1\r\n" "Transfer-Encoding: chunked\r\n" "\r\n" "0;foo=bar;baz=cux\r\n" "\r\n"; ASSERT_SUCCESS(testing_channel_check_written_messages_str(&tester.testing_channel, allocator, expected)); /* clean up */ aws_http_message_destroy(request); aws_http_stream_release(stream); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } /* Send a request whose body doesn't fit in a single aws_io_message using content length*/ H1_CLIENT_TEST_CASE(h1_client_request_send_large_body) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); /* send request with large body full of random data */ size_t body_len = 1024 * 1024 * 1; /* 1MB */ struct aws_byte_buf body_buf; ASSERT_SUCCESS(aws_byte_buf_init(&body_buf, allocator, body_len)); while (body_buf.len < body_len) { int r = rand(); aws_byte_buf_write_be32(&body_buf, (uint32_t)r); } const struct aws_byte_cursor body = aws_byte_cursor_from_buf(&body_buf); struct aws_input_stream *body_stream = aws_input_stream_new_from_cursor(allocator, &body); char content_length_value[100]; snprintf(content_length_value, sizeof(content_length_value), "%zu", body_len); struct aws_http_header headers[] = { { .name = aws_byte_cursor_from_c_str("Content-Length"), .value = aws_byte_cursor_from_c_str(content_length_value), }, }; struct aws_http_message *request = aws_http_message_new_request(allocator); ASSERT_NOT_NULL(request); ASSERT_SUCCESS(aws_http_message_set_request_method(request, aws_byte_cursor_from_c_str("PUT"))); ASSERT_SUCCESS(aws_http_message_set_request_path(request, aws_byte_cursor_from_c_str("/large.txt"))); aws_http_message_add_header_array(request, headers, AWS_ARRAY_SIZE(headers)); aws_http_message_set_body_stream(request, body_stream); struct aws_http_make_request_options opt = { .self_size = sizeof(opt), .request = request, }; struct aws_http_stream *stream = aws_http_connection_make_request(tester.connection, &opt); ASSERT_NOT_NULL(stream); ASSERT_SUCCESS(aws_http_stream_activate(stream)); /* check result */ const char *expected_head_fmt = "PUT /large.txt HTTP/1.1\r\n" "Content-Length: %zu\r\n" "\r\n"; char expected_head[1024]; int expected_head_len = snprintf(expected_head, sizeof(expected_head), expected_head_fmt, body_len); struct aws_byte_buf expected_buf; ASSERT_SUCCESS(aws_byte_buf_init(&expected_buf, allocator, body_len + expected_head_len)); ASSERT_TRUE(aws_byte_buf_write(&expected_buf, (uint8_t *)expected_head, expected_head_len)); ASSERT_TRUE(aws_byte_buf_write_from_whole_buffer(&expected_buf, body_buf)); testing_channel_drain_queued_tasks(&tester.testing_channel); ASSERT_SUCCESS(testing_channel_check_written_messages( &tester.testing_channel, allocator, aws_byte_cursor_from_buf(&expected_buf))); /* clean up */ aws_input_stream_release(body_stream); aws_http_message_destroy(request); aws_http_stream_release(stream); ASSERT_SUCCESS(s_tester_clean_up(&tester)); aws_byte_buf_clean_up(&body_buf); aws_byte_buf_clean_up(&expected_buf); return AWS_OP_SUCCESS; } static int s_parse_chunked_extensions( const char *extensions, struct aws_http1_chunk_extension *expected_extensions, size_t num_extensions) { size_t i; for (i = 0; i < num_extensions; ++i) { struct aws_http1_chunk_extension *expected_extension = expected_extensions + i; /* parse the key */ char *key_val_delimiter = strchr(extensions, '='); if (NULL == key_val_delimiter) { return false; } *key_val_delimiter = '\0'; struct aws_byte_cursor key = aws_byte_cursor_from_c_str(extensions); ASSERT_BIN_ARRAYS_EQUALS(expected_extension->key.ptr, expected_extension->key.len, key.ptr, key.len); extensions = key_val_delimiter + 1; /* parse the value */ char *val_end_delimiter = strchr(extensions, ';'); if (NULL != val_end_delimiter) { *val_end_delimiter = '\0'; } struct aws_byte_cursor value = aws_byte_cursor_from_c_str(extensions++); ASSERT_BIN_ARRAYS_EQUALS(expected_extension->value.ptr, expected_extension->value.len, value.ptr, value.len); extensions = val_end_delimiter + 1; } if (i == num_extensions) { return AWS_OP_SUCCESS; } else { return AWS_OP_ERR; } } static int s_can_parse_as_chunked_encoding( struct aws_allocator *allocator, struct aws_byte_buf *chunked_http_request_headers_and_body, struct aws_byte_buf *expected_head, struct aws_http1_chunk_extension *expected_extensions, size_t num_extensions, char body_char) { /* Check that the HTTP header matches the expected value */ ASSERT_TRUE(chunked_http_request_headers_and_body->len > expected_head->len); ASSERT_BIN_ARRAYS_EQUALS( expected_head->buffer, expected_head->len, chunked_http_request_headers_and_body->buffer, expected_head->len); /* move the cursor past the head and enter the chunked body */ struct aws_byte_cursor request_cursor = aws_byte_cursor_from_buf(chunked_http_request_headers_and_body); aws_byte_cursor_advance(&request_cursor, expected_head->len); struct aws_byte_cursor crlf_cursor = aws_byte_cursor_from_c_str("\r\n"); struct aws_byte_cursor match_cursor; /* Provide a max iterations in case of a bug the test doesn't infinite loop but fails fast. */ int max_iter = 128; int i = 0; /* 3MB to hold a massive chunked extension size */ char *chunk_ascii_hex = aws_mem_calloc(allocator, 3, 1024 * 1024); for (i = 0; i < max_iter; ++i) { ASSERT_SUCCESS(aws_byte_cursor_find_exact(&request_cursor, &crlf_cursor, &match_cursor)); memset(chunk_ascii_hex, 0, 3 * 1024 * 1024); memcpy(chunk_ascii_hex, (char *)request_cursor.ptr, match_cursor.ptr - request_cursor.ptr); char *chunk_ext_start = strchr(chunk_ascii_hex, ';'); if (NULL != chunk_ext_start) { /* write a null character over where the first ';' of the stream so that strtol finds the right hex size. */ *chunk_ext_start = '\0'; if (0 < num_extensions) { ++chunk_ext_start; ASSERT_SUCCESS(s_parse_chunked_extensions(chunk_ext_start, expected_extensions, num_extensions)); } } long chunk_size = strtol((char *)chunk_ascii_hex, 0, 16); long total_chunk_size_with_overhead = (long) (match_cursor.ptr - request_cursor.ptr /* size of the chunk in ascii hex */ + crlf_cursor.len /* size of the crlf */ + chunk_size /* size of the payload */ + crlf_cursor.len); /* size of the chunk terminating crlf */ /* 0 length chunk signals end of stream. Check for the terminatino string and exit with success */ if (0 == chunk_size) { struct aws_byte_cursor terminate_cursor = aws_byte_cursor_from_c_str("0\r\n\r\n"); ASSERT_TRUE(aws_byte_cursor_eq(&request_cursor, &terminate_cursor)); break; } /* The buffer should be filled with the character specified for the whole length of the chunk */ for (int j = (int)(match_cursor.ptr - request_cursor.ptr + crlf_cursor.len); j < chunk_size; ++j) { ASSERT_TRUE(body_char == (char)request_cursor.ptr[j]); } /* advance to the next chunk */ aws_byte_cursor_advance(&request_cursor, total_chunk_size_with_overhead); } aws_mem_release(allocator, chunk_ascii_hex); /* Test that we didn't exit the loop due to hitting the max iterations */ ASSERT_TRUE(i < (max_iter - 1)); return AWS_OP_SUCCESS; } /* Send a request whose body doesn't fit in a single aws_io_message using chunked transfer encoding*/ H1_CLIENT_TEST_CASE(h1_client_request_send_large_body_chunked) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); struct aws_http_header headers[] = { { .name = aws_byte_cursor_from_c_str("Transfer-Encoding"), .value = aws_byte_cursor_from_c_str("chunked"), }, }; struct aws_http_message *request = aws_http_message_new_request(allocator); ASSERT_NOT_NULL(request); ASSERT_SUCCESS(aws_http_message_set_request_method(request, aws_byte_cursor_from_c_str("PUT"))); ASSERT_SUCCESS(aws_http_message_set_request_path(request, aws_byte_cursor_from_c_str("/large.txt"))); aws_http_message_add_header_array(request, headers, AWS_ARRAY_SIZE(headers)); struct aws_http_make_request_options opt = { .self_size = sizeof(opt), .request = request, }; struct aws_http_stream *stream = aws_http_connection_make_request(tester.connection, &opt); ASSERT_NOT_NULL(stream); ASSERT_SUCCESS(aws_http_stream_activate(stream)); /* Initialize and send the stream chunks */ /* send request with large body full of data */ size_t body_len = 1024 * 1024 * 1; /* 1MB */ struct aws_byte_buf body_buf; ASSERT_SUCCESS(aws_byte_buf_init(&body_buf, allocator, body_len)); char body_char = 'z'; while (body_buf.len < body_len) { aws_byte_buf_write_u8(&body_buf, body_char); } const struct aws_byte_cursor body = aws_byte_cursor_from_buf(&body_buf); struct aws_input_stream *body_stream = aws_input_stream_new_from_cursor(allocator, &body); struct aws_http1_chunk_options options = s_default_chunk_options(body_stream, body.len); ASSERT_SUCCESS(aws_http1_stream_write_chunk(stream, &options)); /* this call will trigger a pause/wake internally after a large write */ testing_channel_drain_queued_tasks(&tester.testing_channel); ASSERT_SUCCESS(s_write_termination_chunk(allocator, stream)); /* check result */ const char expected_head_fmt[] = "PUT /large.txt HTTP/1.1\r\n" "Transfer-Encoding: chunked\r\n" "\r\n"; struct aws_byte_buf expected_head_buf = aws_byte_buf_from_c_str((char *)&expected_head_fmt); testing_channel_drain_queued_tasks(&tester.testing_channel); struct aws_byte_buf written_buf; ASSERT_SUCCESS(aws_byte_buf_init(&written_buf, allocator, body_len * 2)); ASSERT_SUCCESS(testing_channel_drain_written_messages(&tester.testing_channel, &written_buf)); ASSERT_SUCCESS(s_can_parse_as_chunked_encoding(allocator, &written_buf, &expected_head_buf, NULL, 0, body_char)); /* clean up */ aws_http_message_destroy(request); aws_http_stream_release(stream); ASSERT_SUCCESS(s_tester_clean_up(&tester)); aws_byte_buf_clean_up(&body_buf); aws_byte_buf_clean_up(&expected_head_buf); aws_byte_buf_clean_up(&written_buf); return AWS_OP_SUCCESS; } H1_CLIENT_TEST_CASE(h1_client_request_send_large_chunk_extensions) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); struct aws_http_header headers[] = { { .name = aws_byte_cursor_from_c_str("Transfer-Encoding"), .value = aws_byte_cursor_from_c_str("chunked"), }, }; struct aws_http_message *request = aws_http_message_new_request(allocator); ASSERT_NOT_NULL(request); ASSERT_SUCCESS(aws_http_message_set_request_method(request, aws_byte_cursor_from_c_str("PUT"))); ASSERT_SUCCESS(aws_http_message_set_request_path(request, aws_byte_cursor_from_c_str("/large.txt"))); aws_http_message_add_header_array(request, headers, AWS_ARRAY_SIZE(headers)); struct aws_http_make_request_options opt = { .self_size = sizeof(opt), .request = request, }; struct aws_http_stream *stream = aws_http_connection_make_request(tester.connection, &opt); ASSERT_NOT_NULL(stream); ASSERT_SUCCESS(aws_http_stream_activate(stream)); /* Initialize and send the stream chunks */ /* send request with large body full of data */ size_t body_len = 1024 * 1024 * 1; /* 1MB */ struct aws_byte_buf body_buf; ASSERT_SUCCESS(aws_byte_buf_init(&body_buf, allocator, body_len)); char body_char = 'z'; while (body_buf.len < body_len) { aws_byte_buf_write_u8(&body_buf, body_char); } const struct aws_byte_cursor body = aws_byte_cursor_from_buf(&body_buf); struct aws_input_stream *body_stream = aws_input_stream_new_from_cursor(allocator, &body); struct aws_http1_chunk_options options = s_default_chunk_options(body_stream, body.len); /* No one should ever be using 1MB extensions. In fact, it is a DDoS vector to your server and you should protect * against it for any sort of production software. That said, the spec doesn't place a size limit on how much the * client can send. For this test, we have a 1MB key and a 1MB value in each pair respectively to test that the * state machine can fill across the key/value larger than the size of a message in the channel. */ struct aws_http1_chunk_extension extensions[] = { { .key = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("foo"), .value = aws_byte_cursor_from_buf(&body_buf), }, { .key = aws_byte_cursor_from_buf(&body_buf), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("bar"), }, }; options.extensions = (struct aws_http1_chunk_extension *)&extensions; options.num_extensions = AWS_ARRAY_SIZE(extensions); ASSERT_SUCCESS(aws_http1_stream_write_chunk(stream, &options)); /* this call will trigger a pause/wake internally after a large write */ testing_channel_drain_queued_tasks(&tester.testing_channel); ASSERT_SUCCESS(s_write_termination_chunk(allocator, stream)); /* check result */ const char expected_head_fmt[] = "PUT /large.txt HTTP/1.1\r\n" "Transfer-Encoding: chunked\r\n" "\r\n"; struct aws_byte_buf expected_head_buf = aws_byte_buf_from_c_str((char *)&expected_head_fmt); struct aws_http1_chunk_extension expected_extensions[] = { { .key = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("foo"), .value = aws_byte_cursor_from_buf(&body_buf), }, { .key = aws_byte_cursor_from_buf(&body_buf), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("bar"), }, }; testing_channel_drain_queued_tasks(&tester.testing_channel); struct aws_byte_buf written_buf; ASSERT_SUCCESS(aws_byte_buf_init(&written_buf, allocator, body_len * 2)); ASSERT_SUCCESS(testing_channel_drain_written_messages(&tester.testing_channel, &written_buf)); ASSERT_SUCCESS(s_can_parse_as_chunked_encoding( allocator, &written_buf, &expected_head_buf, expected_extensions, AWS_ARRAY_SIZE(extensions), body_char)); /* clean up */ aws_http_message_destroy(request); aws_http_stream_release(stream); ASSERT_SUCCESS(s_tester_clean_up(&tester)); aws_byte_buf_clean_up(&body_buf); aws_byte_buf_clean_up(&expected_head_buf); aws_byte_buf_clean_up(&written_buf); return AWS_OP_SUCCESS; } /* Send a request whose headers don't fit in a single aws_io_message */ H1_CLIENT_TEST_CASE(h1_client_request_send_large_head) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); /* Generate headers while filling in contents of `expected` buffer */ struct aws_http_header headers[1000]; size_t num_headers = AWS_ARRAY_SIZE(headers); AWS_ZERO_STRUCT(headers); struct aws_byte_buf expected; aws_byte_buf_init(&expected, allocator, num_headers * 128); /* approx capacity */ struct aws_byte_cursor request_line = aws_byte_cursor_from_c_str("GET / HTTP/1.1\r\n"); ASSERT_TRUE(aws_byte_buf_write_from_whole_cursor(&expected, request_line)); /* Each header just has a UUID for its name and value */ for (size_t i = 0; i < num_headers; ++i) { struct aws_http_header *header = headers + i; /* Point to where the UUID is going to be written in the `expected` buffer */ header->name = aws_byte_cursor_from_array(expected.buffer + expected.len, AWS_UUID_STR_LEN - 1); header->value = header->name; struct aws_uuid uuid; ASSERT_SUCCESS(aws_uuid_init(&uuid)); ASSERT_SUCCESS(aws_uuid_to_str(&uuid, &expected)); ASSERT_TRUE(aws_byte_buf_write(&expected, (uint8_t *)": ", 2)); ASSERT_SUCCESS(aws_uuid_to_str(&uuid, &expected)); ASSERT_TRUE(aws_byte_buf_write(&expected, (uint8_t *)"\r\n", 2)); } ASSERT_TRUE(aws_byte_buf_write(&expected, (uint8_t *)"\r\n", 2)); struct aws_http_message *request = s_new_default_get_request(allocator); ASSERT_SUCCESS(aws_http_message_add_header_array(request, headers, AWS_ARRAY_SIZE(headers))); /* send request */ struct aws_http_make_request_options opt = { .self_size = sizeof(opt), .request = request, }; struct aws_http_stream *stream = aws_http_connection_make_request(tester.connection, &opt); ASSERT_NOT_NULL(stream); ASSERT_SUCCESS(aws_http_stream_activate(stream)); /* check result */ testing_channel_drain_queued_tasks(&tester.testing_channel); ASSERT_SUCCESS(testing_channel_check_written_messages( &tester.testing_channel, allocator, aws_byte_cursor_from_buf(&expected))); /* clean up */ aws_http_message_destroy(request); aws_http_stream_release(stream); ASSERT_SUCCESS(s_tester_clean_up(&tester)); aws_byte_buf_clean_up(&expected); return AWS_OP_SUCCESS; } /* Check that if many requests are made (pipelining) they all get sent */ H1_CLIENT_TEST_CASE(h1_client_request_send_multiple) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); /* send requests */ struct aws_http_make_request_options opt = { .self_size = sizeof(opt), .request = s_new_default_get_request(allocator), }; struct aws_http_stream *streams[3]; size_t num_streams = AWS_ARRAY_SIZE(streams); for (size_t i = 0; i < num_streams; ++i) { streams[i] = aws_http_connection_make_request(tester.connection, &opt); ASSERT_NOT_NULL(streams[i]); ASSERT_SUCCESS(aws_http_stream_activate(streams[i])); } testing_channel_drain_queued_tasks(&tester.testing_channel); /* Ensure the request can be destroyed after request is sent */ aws_http_message_destroy(opt.request); /* check result */ const char *expected = "GET / HTTP/1.1\r\n" "\r\n" "GET / HTTP/1.1\r\n" "\r\n" "GET / HTTP/1.1\r\n" "\r\n"; ASSERT_SUCCESS(testing_channel_check_written_messages_str(&tester.testing_channel, allocator, expected)); /* clean up */ for (size_t i = 0; i < num_streams; ++i) { aws_http_stream_release(streams[i]); } ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } /* Check that if many requests are made (pipelining) they all get sent */ H1_CLIENT_TEST_CASE(h1_client_request_send_multiple_chunked_encoding) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); /* send requests */ struct aws_http_make_request_options opt = { .self_size = sizeof(opt), .request = s_new_default_chunked_put_request(allocator), }; struct aws_http_stream *streams[3]; struct aws_byte_buf index_strs[AWS_ARRAY_SIZE(streams)]; size_t num_streams = AWS_ARRAY_SIZE(streams); for (size_t i = 0; i < num_streams; ++i) { streams[i] = aws_http_connection_make_request(tester.connection, &opt); ASSERT_NOT_NULL(streams[i]); ASSERT_SUCCESS(aws_byte_buf_init(&index_strs[i], allocator, 4)); index_strs[i].len = snprintf((char *)index_strs[i].buffer, index_strs[i].capacity, "%03zu", i); ASSERT_SUCCESS(aws_http_stream_activate(streams[i])); } /* All streams will pause and wait for data */ testing_channel_drain_queued_tasks(&tester.testing_channel); /* Write to all the streams */ for (size_t i = 0; i < num_streams; ++i) { static const struct aws_byte_cursor body = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("write more tests"); struct aws_byte_cursor index_str_cursor = aws_byte_cursor_from_buf(&index_strs[i]); struct aws_input_stream *body_stream = aws_input_stream_new_from_cursor(allocator, &body); struct aws_input_stream *index_stream = aws_input_stream_new_from_cursor(allocator, &index_str_cursor); struct aws_http1_chunk_options options_1 = s_default_chunk_options(body_stream, body.len); struct aws_http1_chunk_options options_2 = s_default_chunk_options(index_stream, index_str_cursor.len); ASSERT_SUCCESS(aws_http1_stream_write_chunk(streams[i], &options_1)); ASSERT_SUCCESS(aws_http1_stream_write_chunk(streams[i], &options_2)); ASSERT_SUCCESS(s_write_termination_chunk(allocator, streams[i])); } testing_channel_drain_queued_tasks(&tester.testing_channel); /* Ensure the request can be destroyed after request is sent */ aws_http_message_destroy(opt.request); /* check result */ const char *expected = "PUT /plan.txt HTTP/1.1\r\n" "Transfer-Encoding: chunked\r\n" "\r\n" "10\r\n" "write more tests" "\r\n" "3\r\n" "000" "\r\n" "0\r\n" "\r\n" "PUT /plan.txt HTTP/1.1\r\n" "Transfer-Encoding: chunked\r\n" "\r\n" "10\r\n" "write more tests" "\r\n" "3\r\n" "001" "\r\n" "0\r\n" "\r\n" "PUT /plan.txt HTTP/1.1\r\n" "Transfer-Encoding: chunked\r\n" "\r\n" "10\r\n" "write more tests" "\r\n" "3\r\n" "002" "\r\n" "0\r\n" "\r\n"; testing_channel_drain_queued_tasks(&tester.testing_channel); ASSERT_SUCCESS(testing_channel_check_written_messages_str(&tester.testing_channel, allocator, expected)); /* clean up */ for (size_t i = 0; i < num_streams; ++i) { aws_http_stream_release(streams[i]); aws_byte_buf_clean_up(&index_strs[i]); } ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } static int s_stream_tester_init( struct client_stream_tester *tester, struct tester *main_tester, struct aws_http_message *request) { struct client_stream_tester_options options = { .request = request, .connection = main_tester->connection, }; return client_stream_tester_init(tester, main_tester->alloc, &options); } H1_CLIENT_TEST_CASE(h1_client_stream_release_after_complete) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); /* send request */ struct aws_http_message *request = s_new_default_get_request(allocator); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, &tester, request)); testing_channel_drain_queued_tasks(&tester.testing_channel); /* Ensure the request can be destroyed after request is sent */ aws_http_message_destroy(request); /* send response */ ASSERT_SUCCESS(testing_channel_push_read_str(&tester.testing_channel, "HTTP/1.1 204 No Content\r\n\r\n")); testing_channel_drain_queued_tasks(&tester.testing_channel); /* check result */ ASSERT_TRUE(stream_tester.complete); ASSERT_FALSE(stream_tester.destroyed); aws_http_stream_release(stream_tester.stream); stream_tester.stream = NULL; ASSERT_TRUE(stream_tester.destroyed); /* clean up */ client_stream_tester_clean_up(&stream_tester); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } H1_CLIENT_TEST_CASE(h1_client_stream_release_before_complete) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); /* send request */ struct aws_http_message *request = s_new_default_get_request(allocator); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, &tester, request)); aws_http_stream_release(stream_tester.stream); stream_tester.stream = NULL; ASSERT_FALSE(stream_tester.destroyed); testing_channel_drain_queued_tasks(&tester.testing_channel); /* Ensure the request can be destroyed after request is sent */ aws_http_message_destroy(request); /* send response */ ASSERT_SUCCESS(testing_channel_push_read_str(&tester.testing_channel, "HTTP/1.1 204 No Content\r\n\r\n")); testing_channel_drain_queued_tasks(&tester.testing_channel); /* check result */ ASSERT_TRUE(stream_tester.complete); ASSERT_TRUE(stream_tester.destroyed); /* clean up */ client_stream_tester_clean_up(&stream_tester); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } H1_CLIENT_TEST_CASE(h1_client_response_get_1liner) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); /* send request */ struct aws_http_message *request = s_new_default_get_request(allocator); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, &tester, request)); testing_channel_drain_queued_tasks(&tester.testing_channel); /* Ensure the request can be destroyed after request is sent */ aws_http_message_destroy(request); /* send response */ ASSERT_SUCCESS(testing_channel_push_read_str(&tester.testing_channel, "HTTP/1.1 204 No Content\r\n\r\n")); testing_channel_drain_queued_tasks(&tester.testing_channel); /* check result */ ASSERT_TRUE(stream_tester.complete); ASSERT_INT_EQUALS(AWS_ERROR_SUCCESS, stream_tester.on_complete_error_code); ASSERT_INT_EQUALS(204, stream_tester.response_status); ASSERT_UINT_EQUALS(0, aws_http_headers_count(stream_tester.response_headers)); ASSERT_UINT_EQUALS(0, stream_tester.response_body.len); /* clean up */ client_stream_tester_clean_up(&stream_tester); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } static int s_check_header(const struct aws_http_headers *headers, size_t i, const char *name_str, const char *value) { size_t headers_num = aws_http_headers_count(headers); ASSERT_TRUE(i < headers_num); struct aws_http_header header; ASSERT_SUCCESS(aws_http_headers_get_index(headers, i, &header)); ASSERT_TRUE(aws_byte_cursor_eq_c_str(&header.name, name_str)); ASSERT_TRUE(aws_byte_cursor_eq_c_str(&header.value, value)); return AWS_OP_SUCCESS; } static int s_check_info_response_header( const struct client_stream_tester *stream_tester, size_t response_i, size_t header_i, const char *name_str, const char *value) { ASSERT_TRUE(response_i < stream_tester->num_info_responses); const struct aws_http_headers *headers = aws_http_message_get_const_headers(stream_tester->info_responses[response_i]); return s_check_header(headers, header_i, name_str, value); } H1_CLIENT_TEST_CASE(h1_client_response_get_headers) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); /* send request */ struct aws_http_message *request = s_new_default_get_request(allocator); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, &tester, request)); testing_channel_drain_queued_tasks(&tester.testing_channel); /* Ensure the request can be destroyed after request is sent */ aws_http_message_destroy(request); /* send response */ ASSERT_SUCCESS(testing_channel_push_read_str( &tester.testing_channel, "HTTP/1.1 308 Permanent Redirect\r\n" "Date: Fri, 01 Mar 2019 17:18:55 GMT\r\n" "Location: /index.html\r\n" "\r\n")); testing_channel_drain_queued_tasks(&tester.testing_channel); /* check result */ ASSERT_TRUE(stream_tester.complete); ASSERT_INT_EQUALS(AWS_ERROR_SUCCESS, stream_tester.on_complete_error_code); ASSERT_INT_EQUALS(308, stream_tester.response_status); ASSERT_UINT_EQUALS(2, aws_http_headers_count(stream_tester.response_headers)); ASSERT_SUCCESS(s_check_header(stream_tester.response_headers, 0, "Date", "Fri, 01 Mar 2019 17:18:55 GMT")); ASSERT_SUCCESS(s_check_header(stream_tester.response_headers, 1, "Location", "/index.html")); ASSERT_UINT_EQUALS(0, stream_tester.response_body.len); /* clean up */ client_stream_tester_clean_up(&stream_tester); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } H1_CLIENT_TEST_CASE(h1_client_response_get_body) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); /* send request */ struct aws_http_message *request = s_new_default_get_request(allocator); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, &tester, request)); testing_channel_drain_queued_tasks(&tester.testing_channel); /* Ensure the request can be destroyed after request is sent */ aws_http_message_destroy(request); /* send response */ ASSERT_SUCCESS(testing_channel_push_read_str( &tester.testing_channel, "HTTP/1.1 200 OK\r\n" "Content-Length: 9\r\n" "\r\n" "Call Momo")); testing_channel_drain_queued_tasks(&tester.testing_channel); /* check result */ ASSERT_TRUE(stream_tester.complete); ASSERT_INT_EQUALS(AWS_ERROR_SUCCESS, stream_tester.on_complete_error_code); ASSERT_INT_EQUALS(200, stream_tester.response_status); ASSERT_UINT_EQUALS(1, aws_http_headers_count(stream_tester.response_headers)); ASSERT_SUCCESS(s_check_header(stream_tester.response_headers, 0, "Content-Length", "9")); ASSERT_TRUE(aws_byte_buf_eq_c_str(&stream_tester.response_body, "Call Momo")); ASSERT_TRUE(stream_tester.metrics.receive_end_timestamp_ns > 0); ASSERT_TRUE(stream_tester.metrics.receive_start_timestamp_ns > 0); ASSERT_TRUE(stream_tester.metrics.receive_end_timestamp_ns > stream_tester.metrics.receive_start_timestamp_ns); ASSERT_TRUE( stream_tester.metrics.receiving_duration_ns == stream_tester.metrics.receive_end_timestamp_ns - stream_tester.metrics.receive_start_timestamp_ns); ASSERT_TRUE(stream_tester.metrics.send_start_timestamp_ns > 0); ASSERT_TRUE(stream_tester.metrics.send_end_timestamp_ns > 0); ASSERT_TRUE(stream_tester.metrics.send_end_timestamp_ns > stream_tester.metrics.send_start_timestamp_ns); ASSERT_TRUE( stream_tester.metrics.sending_duration_ns == stream_tester.metrics.send_end_timestamp_ns - stream_tester.metrics.send_start_timestamp_ns); ASSERT_TRUE(stream_tester.metrics.stream_id == stream_tester.stream->id); /* clean up */ client_stream_tester_clean_up(&stream_tester); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } static int s_test_expected_no_body_response(struct aws_allocator *allocator, int status_int, bool head_request) { struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); /* send request */ struct aws_http_message *request = head_request ? s_new_default_head_request(allocator) : s_new_default_get_request(allocator); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, &tester, request)); testing_channel_drain_queued_tasks(&tester.testing_channel); /* Ensure the request can be destroyed after request is sent */ aws_http_message_destroy(request); /* form response */ struct aws_byte_cursor status_text = aws_byte_cursor_from_c_str(aws_http_status_text(status_int)); char c_status_text[100]; memcpy(c_status_text, status_text.ptr, status_text.len); c_status_text[status_text.len] = '\0'; char response_text[500]; char *response_headers = "Content-Length: 9\r\n" "\r\n"; snprintf(response_text, sizeof(response_text), "HTTP/1.1 %d %s\r\n%s", status_int, c_status_text, response_headers); /* send response */ ASSERT_SUCCESS(testing_channel_push_read_str(&tester.testing_channel, response_text)); testing_channel_drain_queued_tasks(&tester.testing_channel); /* check result */ ASSERT_TRUE(stream_tester.complete); ASSERT_INT_EQUALS(AWS_ERROR_SUCCESS, stream_tester.on_complete_error_code); ASSERT_INT_EQUALS(status_int, stream_tester.response_status); ASSERT_UINT_EQUALS(1, aws_http_headers_count(stream_tester.response_headers)); ASSERT_SUCCESS(s_check_header(stream_tester.response_headers, 0, "Content-Length", "9")); /* clean up */ client_stream_tester_clean_up(&stream_tester); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } H1_CLIENT_TEST_CASE(h1_client_response_get_no_body_for_head_request) { (void)ctx; ASSERT_SUCCESS(s_test_expected_no_body_response(allocator, 200, true)); return AWS_OP_SUCCESS; } H1_CLIENT_TEST_CASE(h1_client_response_get_no_body_from_304) { (void)ctx; ASSERT_SUCCESS(s_test_expected_no_body_response(allocator, 304, false)); return AWS_OP_SUCCESS; } H1_CLIENT_TEST_CASE(h1_client_response_get_100) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); /* send request */ struct aws_http_message *request = s_new_default_get_request(allocator); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, &tester, request)); testing_channel_drain_queued_tasks(&tester.testing_channel); /* Ensure the request can be destroyed after request is sent */ aws_http_message_destroy(request); /* send response */ ASSERT_SUCCESS(testing_channel_push_read_str( &tester.testing_channel, "HTTP/1.1 100 Continue\r\n" "Date: Fri, 01 Mar 2019 17:18:55 GMT\r\n" "\r\n" "HTTP/1.1 200 OK\r\n" "Content-Length: 9\r\n" "\r\n" "Call Momo")); testing_channel_drain_queued_tasks(&tester.testing_channel); /* check result */ ASSERT_TRUE(stream_tester.complete); ASSERT_INT_EQUALS(AWS_ERROR_SUCCESS, stream_tester.on_complete_error_code); ASSERT_INT_EQUALS(200, stream_tester.response_status); ASSERT_UINT_EQUALS(1, stream_tester.num_info_responses); int info_response_status; ASSERT_SUCCESS(aws_http_message_get_response_status(stream_tester.info_responses[0], &info_response_status)); ASSERT_INT_EQUALS(100, info_response_status); ASSERT_SUCCESS(s_check_info_response_header(&stream_tester, 0, 0, "Date", "Fri, 01 Mar 2019 17:18:55 GMT")); ASSERT_UINT_EQUALS(1, aws_http_headers_count(stream_tester.response_headers)); ASSERT_SUCCESS(s_check_header(stream_tester.response_headers, 0, "Content-Length", "9")); ASSERT_TRUE(aws_byte_buf_eq_c_str(&stream_tester.response_body, "Call Momo")); /* clean up */ client_stream_tester_clean_up(&stream_tester); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } /* Check that a response spread across multiple aws_io_messages comes through */ H1_CLIENT_TEST_CASE(h1_client_response_get_1_from_multiple_io_messages) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); /* send request */ struct aws_http_message *request = s_new_default_get_request(allocator); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, &tester, request)); testing_channel_drain_queued_tasks(&tester.testing_channel); /* Ensure the request can be destroyed after request is sent */ aws_http_message_destroy(request); /* send response with each byte in its own aws_io_message */ const char *response_str = "HTTP/1.1 200 OK\r\n" "Content-Length: 9\r\n" "\r\n" "Call Momo"; size_t response_str_len = strlen(response_str); for (size_t i = 0; i < response_str_len; ++i) { ASSERT_SUCCESS( testing_channel_push_read_data(&tester.testing_channel, aws_byte_cursor_from_array(response_str + i, 1))); } testing_channel_drain_queued_tasks(&tester.testing_channel); /* check result */ ASSERT_TRUE(stream_tester.complete); ASSERT_INT_EQUALS(AWS_ERROR_SUCCESS, stream_tester.on_complete_error_code); ASSERT_INT_EQUALS(200, stream_tester.response_status); ASSERT_UINT_EQUALS(1, aws_http_headers_count(stream_tester.response_headers)); ASSERT_SUCCESS(s_check_header(stream_tester.response_headers, 0, "Content-Length", "9")); ASSERT_TRUE(aws_byte_buf_eq_c_str(&stream_tester.response_body, "Call Momo")); /* clean up */ client_stream_tester_clean_up(&stream_tester); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } /* Check that multiple responses in a single aws_io_message all come through */ H1_CLIENT_TEST_CASE(h1_client_response_get_multiple_from_1_io_message) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); /* send requests */ struct aws_http_message *request = s_new_default_get_request(allocator); struct client_stream_tester stream_testers[3]; for (size_t i = 0; i < AWS_ARRAY_SIZE(stream_testers); ++i) { ASSERT_SUCCESS(s_stream_tester_init(&stream_testers[i], &tester, request)); } testing_channel_drain_queued_tasks(&tester.testing_channel); /* Ensure the request can be destroyed after request is sent */ aws_http_message_destroy(request); /* send all responses in a single aws_io_message */ ASSERT_SUCCESS(testing_channel_push_read_str( &tester.testing_channel, "HTTP/1.1 204 No Content\r\n\r\n" "HTTP/1.1 204 No Content\r\n\r\n" "HTTP/1.1 204 No Content\r\n\r\n")); testing_channel_drain_queued_tasks(&tester.testing_channel); /* check results */ for (size_t i = 0; i < AWS_ARRAY_SIZE(stream_testers); ++i) { ASSERT_TRUE(stream_testers[i].complete); ASSERT_INT_EQUALS(AWS_ERROR_SUCCESS, stream_testers[i].on_complete_error_code); ASSERT_INT_EQUALS(204, stream_testers[i].response_status); ASSERT_UINT_EQUALS(0, aws_http_headers_count(stream_testers[i].response_headers)); ASSERT_UINT_EQUALS(0, stream_testers[i].response_body.len); client_stream_tester_clean_up(&stream_testers[i]); } ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } H1_CLIENT_TEST_CASE(h1_client_response_with_bad_data_shuts_down_connection) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); /* send request */ struct aws_http_message *request = s_new_default_get_request(allocator); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, &tester, request)); testing_channel_drain_queued_tasks(&tester.testing_channel); /* Ensure the request can be destroyed after request is sent */ aws_http_message_destroy(request); /* send response */ ASSERT_SUCCESS(testing_channel_push_read_str_ignore_errors(&tester.testing_channel, "Mmmm garbage data\r\n\r\n")); testing_channel_drain_queued_tasks(&tester.testing_channel); /* check result */ ASSERT_TRUE(stream_tester.complete); ASSERT_INT_EQUALS(AWS_ERROR_HTTP_PROTOCOL_ERROR, stream_tester.on_complete_error_code); /* clean up */ client_stream_tester_clean_up(&stream_tester); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } /* Test case is: 1 request has been sent. Then 2 responses arrive in 1 io message. * The 1st request should complete just fine, then the connection should shutdown with error */ H1_CLIENT_TEST_CASE(h1_client_response_with_too_much_data_shuts_down_connection) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); /* send 1 request */ struct aws_http_message *request = s_new_default_get_request(allocator); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, &tester, request)); testing_channel_drain_queued_tasks(&tester.testing_channel); /* Ensure the request can be destroyed after request is sent */ aws_http_message_destroy(request); /* send 2 responses in a single aws_io_message. */ ASSERT_SUCCESS(testing_channel_push_read_str_ignore_errors( &tester.testing_channel, "HTTP/1.1 204 No Content\r\n\r\n" "HTTP/1.1 204 No Content\r\n\r\n")); testing_channel_drain_queued_tasks(&tester.testing_channel); /* 1st response should have come across successfully */ ASSERT_TRUE(stream_tester.complete); ASSERT_INT_EQUALS(AWS_ERROR_SUCCESS, stream_tester.on_complete_error_code); ASSERT_INT_EQUALS(204, stream_tester.response_status); ASSERT_UINT_EQUALS(0, aws_http_headers_count(stream_tester.response_headers)); ASSERT_UINT_EQUALS(0, stream_tester.response_body.len); client_stream_tester_clean_up(&stream_tester); /* extra data should have caused channel shutdown */ testing_channel_drain_queued_tasks(&tester.testing_channel); ASSERT_TRUE(testing_channel_is_shutdown_completed(&tester.testing_channel)); ASSERT_TRUE(testing_channel_get_shutdown_error_code(&tester.testing_channel) != AWS_ERROR_SUCCESS); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } struct slow_body_sender { struct aws_input_stream base; struct aws_stream_status status; struct aws_byte_cursor cursor; size_t delay_ticks; /* Don't send anything the first N ticks */ size_t bytes_per_tick; /* Don't send more than N bytes per tick */ }; static int s_slow_stream_read(struct aws_input_stream *stream, struct aws_byte_buf *dest) { struct slow_body_sender *sender = AWS_CONTAINER_OF(stream, struct slow_body_sender, base); size_t dst_available = dest->capacity - dest->len; size_t writing = 0; if (sender->delay_ticks > 0) { sender->delay_ticks--; } else { writing = sender->cursor.len; if (dst_available < writing) { writing = dst_available; } if ((sender->bytes_per_tick < writing) && (sender->bytes_per_tick > 0)) { writing = sender->bytes_per_tick; } } aws_byte_buf_write(dest, sender->cursor.ptr, writing); aws_byte_cursor_advance(&sender->cursor, writing); if (sender->cursor.len == 0) { sender->status.is_end_of_stream = true; } return AWS_OP_SUCCESS; } static int s_slow_stream_get_status(struct aws_input_stream *stream, struct aws_stream_status *status) { struct slow_body_sender *sender = AWS_CONTAINER_OF(stream, struct slow_body_sender, base); *status = sender->status; return AWS_OP_SUCCESS; } static int s_slow_stream_get_length(struct aws_input_stream *stream, int64_t *out_length) { struct slow_body_sender *sender = AWS_CONTAINER_OF(stream, struct slow_body_sender, base); *out_length = sender->cursor.len; return AWS_OP_SUCCESS; } static void s_slow_stream_destroy(struct aws_input_stream *stream) { (void)stream; } static struct aws_input_stream_vtable s_slow_stream_vtable = { .seek = NULL, .read = s_slow_stream_read, .get_status = s_slow_stream_get_status, .get_length = s_slow_stream_get_length, }; static void s_slow_body_sender_init(struct slow_body_sender *body_sender) { /* set up request whose body won't send immediately */ struct aws_input_stream empty_stream_base; AWS_ZERO_STRUCT(empty_stream_base); body_sender->base = empty_stream_base; body_sender->status.is_end_of_stream = false; body_sender->status.is_valid = true; struct aws_byte_cursor body = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("write more tests"); body_sender->cursor = body; body_sender->delay_ticks = 5; body_sender->bytes_per_tick = 1; body_sender->base.vtable = &s_slow_stream_vtable; aws_ref_count_init( &body_sender->base.ref_count, &body_sender, (aws_simple_completion_callback *)s_slow_stream_destroy); } /* It should be fine to receive a response before the request has finished sending */ H1_CLIENT_TEST_CASE(h1_client_response_arrives_before_request_done_sending_is_ok) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); /* set up request whose body won't send immediately */ struct slow_body_sender body_sender; AWS_ZERO_STRUCT(body_sender); s_slow_body_sender_init(&body_sender); struct aws_input_stream *body_stream = &body_sender.base; struct aws_http_header headers[] = { { .name = aws_byte_cursor_from_c_str("Content-Length"), .value = aws_byte_cursor_from_c_str("16"), }, }; struct aws_http_message *request = aws_http_message_new_request(allocator); ASSERT_NOT_NULL(request); ASSERT_SUCCESS(aws_http_message_set_request_method(request, aws_byte_cursor_from_c_str("PUT"))); ASSERT_SUCCESS(aws_http_message_set_request_path(request, aws_byte_cursor_from_c_str("/plan.txt"))); ASSERT_SUCCESS(aws_http_message_add_header_array(request, headers, AWS_ARRAY_SIZE(headers))); aws_http_message_set_body_stream(request, body_stream); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, &tester, request)); /* send head of request */ testing_channel_run_currently_queued_tasks(&tester.testing_channel); /* Ensure the request can be destroyed after request is sent */ aws_http_message_destroy(request); aws_input_stream_release(body_stream); /* send response */ ASSERT_SUCCESS(testing_channel_push_read_str(&tester.testing_channel, "HTTP/1.1 200 OK\r\n\r\n")); /* tick loop until body finishes sending.*/ while (body_sender.cursor.len > 0) { /* on_complete shouldn't fire until all outgoing data sent AND all incoming data received */ ASSERT_FALSE(stream_tester.complete); testing_channel_run_currently_queued_tasks(&tester.testing_channel); } /* flush any further work so that stream completes */ testing_channel_drain_queued_tasks(&tester.testing_channel); /* check result */ const char *expected = "PUT /plan.txt HTTP/1.1\r\n" "Content-Length: 16\r\n" "\r\n" "write more tests"; ASSERT_SUCCESS(testing_channel_check_written_messages_str(&tester.testing_channel, allocator, expected)); ASSERT_TRUE(stream_tester.complete); ASSERT_INT_EQUALS(AWS_ERROR_SUCCESS, stream_tester.on_complete_error_code); ASSERT_INT_EQUALS(200, stream_tester.response_status); ASSERT_UINT_EQUALS(0, aws_http_headers_count(stream_tester.response_headers)); ASSERT_UINT_EQUALS(0, stream_tester.response_body.len); /* clean up */ client_stream_tester_clean_up(&stream_tester); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } /* It should be fine to receive a response before the request has finished sending */ H1_CLIENT_TEST_CASE(h1_client_response_arrives_before_request_chunks_done_sending_is_ok) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); /* set up request whose body won't send immediately */ struct aws_input_stream empty_stream_base; AWS_ZERO_STRUCT(empty_stream_base); struct slow_body_sender body_sender = { .base = empty_stream_base, .status = { .is_end_of_stream = false, .is_valid = true, }, .cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("write more tests"), .delay_ticks = 5, .bytes_per_tick = 1, }; body_sender.base.vtable = &s_slow_stream_vtable; aws_ref_count_init( &body_sender.base.ref_count, &body_sender, (aws_simple_completion_callback *)s_slow_stream_destroy); struct aws_input_stream *body_stream = &body_sender.base; struct aws_http_message *request = s_new_default_chunked_put_request(allocator); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, &tester, request)); /* send head of request */ testing_channel_run_currently_queued_tasks(&tester.testing_channel); /* send response */ ASSERT_SUCCESS(testing_channel_push_read_str(&tester.testing_channel, "HTTP/1.1 200 OK\r\n\r\n")); testing_channel_run_currently_queued_tasks(&tester.testing_channel); struct aws_http1_chunk_options options = s_default_chunk_options(body_stream, body_sender.cursor.len); options.on_complete = NULL; /* The stream_tester takes care of the stream deletion */ ASSERT_SUCCESS(aws_http1_stream_write_chunk(stream_tester.stream, &options)); ASSERT_SUCCESS(s_write_termination_chunk(allocator, stream_tester.stream)); /* Ensure the request can be destroyed after request is sent */ aws_http_message_destroy(request); aws_input_stream_release(body_stream); /* tick loop until body finishes sending.*/ while (body_sender.cursor.len > 0) { /* on_complete shouldn't fire until all outgoing data sent AND all incoming data received */ ASSERT_FALSE(stream_tester.complete); testing_channel_run_currently_queued_tasks(&tester.testing_channel); } /* flush any further work so that stream completes */ testing_channel_drain_queued_tasks(&tester.testing_channel); /* check result */ const char *expected = "PUT /plan.txt HTTP/1.1\r\n" "Transfer-Encoding: chunked\r\n" "\r\n" "10\r\n" "write more tests" "\r\n" "0\r\n" "\r\n"; ASSERT_SUCCESS(testing_channel_check_written_messages_str(&tester.testing_channel, allocator, expected)); ASSERT_TRUE(stream_tester.complete); ASSERT_INT_EQUALS(AWS_ERROR_SUCCESS, stream_tester.on_complete_error_code); ASSERT_INT_EQUALS(200, stream_tester.response_status); ASSERT_UINT_EQUALS(0, aws_http_headers_count(stream_tester.response_headers)); ASSERT_UINT_EQUALS(0, stream_tester.response_body.len); /* clean up */ client_stream_tester_clean_up(&stream_tester); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } /* Response data arrives, but there was no outstanding request */ H1_CLIENT_TEST_CASE(h1_client_response_without_request_shuts_down_connection) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); ASSERT_SUCCESS(testing_channel_push_read_str_ignore_errors(&tester.testing_channel, "HTTP/1.1 200 OK\r\n\r\n")); testing_channel_drain_queued_tasks(&tester.testing_channel); ASSERT_TRUE(testing_channel_is_shutdown_completed(&tester.testing_channel)); ASSERT_TRUE(testing_channel_get_shutdown_error_code(&tester.testing_channel) != AWS_ERROR_SUCCESS); /* clean up */ ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } /* A response with the "Connection: close" header should result in the connection shutting down * after the stream completes. */ H1_CLIENT_TEST_CASE(h1_client_response_close_header_ends_connection) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); /* send request */ struct aws_http_message *request = s_new_default_get_request(allocator); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, &tester, request)); testing_channel_drain_queued_tasks(&tester.testing_channel); /* Ensure the request can be destroyed after request is sent */ aws_http_message_destroy(request); /* send response */ ASSERT_SUCCESS(testing_channel_push_read_str( &tester.testing_channel, "HTTP/1.1 200 OK\r\n" "Connection: close\r\n" "\r\n")); testing_channel_drain_queued_tasks(&tester.testing_channel); /* Response should come across successfully * but connection should be closing when the stream-complete callback fires */ ASSERT_TRUE(stream_tester.complete); ASSERT_INT_EQUALS(AWS_ERROR_SUCCESS, stream_tester.on_complete_error_code); ASSERT_INT_EQUALS(200, stream_tester.response_status); ASSERT_FALSE(stream_tester.on_complete_connection_is_open); /* Connection should have shut down cleanly after delivering response */ ASSERT_TRUE(testing_channel_is_shutdown_completed(&tester.testing_channel)); ASSERT_INT_EQUALS(AWS_ERROR_SUCCESS, testing_channel_get_shutdown_error_code(&tester.testing_channel)); /* clean up */ client_stream_tester_clean_up(&stream_tester); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } /* A request with the "Connection: close" header should result in the connection shutting down * after the stream completes. */ H1_CLIENT_TEST_CASE(h1_client_request_close_header_ends_connection) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); /* Request has "Connection: close" header */ struct aws_http_message *request = s_new_default_get_request(allocator); struct aws_http_header headers[] = { { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Host"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("example.com"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Connection"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("close"), }, }; ASSERT_SUCCESS(aws_http_message_add_header_array(request, headers, AWS_ARRAY_SIZE(headers))); /* Set up response tester, which sends the request as a side-effect */ struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, &tester, request)); testing_channel_drain_queued_tasks(&tester.testing_channel); /* Check that request was sent */ const char *expected = "GET / HTTP/1.1\r\n" "Host: example.com\r\n" "Connection: close\r\n" "\r\n"; ASSERT_SUCCESS(testing_channel_check_written_message_str(&tester.testing_channel, expected)); /* Connection shouldn't be "open" at this point, but it also shouldn't shut down until response is received */ ASSERT_FALSE(aws_http_connection_is_open(tester.connection)); ASSERT_FALSE(testing_channel_is_shutdown_completed(&tester.testing_channel)); /* Send response */ ASSERT_SUCCESS(testing_channel_push_read_str( &tester.testing_channel, "HTTP/1.1 200 OK\r\n" "\r\n")); testing_channel_drain_queued_tasks(&tester.testing_channel); /* Response should come across successfully */ ASSERT_TRUE(stream_tester.complete); ASSERT_INT_EQUALS(AWS_ERROR_SUCCESS, stream_tester.on_complete_error_code); ASSERT_INT_EQUALS(200, stream_tester.response_status); ASSERT_FALSE(stream_tester.on_complete_connection_is_open); /* Connection should have shut down cleanly after delivering response */ ASSERT_TRUE(testing_channel_is_shutdown_completed(&tester.testing_channel)); ASSERT_INT_EQUALS(AWS_ERROR_SUCCESS, testing_channel_get_shutdown_error_code(&tester.testing_channel)); /* clean up */ aws_http_message_destroy(request); client_stream_tester_clean_up(&stream_tester); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } /* While pipelining 3 requests, and 2nd response has a "Connection: close" header. * 2 requests should complete successfully and the connection should close. */ H1_CLIENT_TEST_CASE(h1_client_response_close_header_with_pipelining) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); /* Send 3 requests before receiving any responses */ enum { NUM_STREAMS = 3 }; struct aws_http_message *requests[NUM_STREAMS]; struct client_stream_tester stream_testers[NUM_STREAMS]; for (size_t i = 0; i < NUM_STREAMS; ++i) { requests[i] = s_new_default_get_request(allocator); ASSERT_SUCCESS(s_stream_tester_init(&stream_testers[i], &tester, requests[i])); }; testing_channel_drain_queued_tasks(&tester.testing_channel); /* Send "Connection: close" header in 2nd response. * Do not send 3rd response. */ ASSERT_SUCCESS(testing_channel_push_read_str( &tester.testing_channel, /* Response 1 */ "HTTP/1.1 200 OK\r\n" "\r\n" /* Response 2 */ "HTTP/1.1 200 OK\r\n" "Connection: close\r\n" "\r\n")); testing_channel_drain_queued_tasks(&tester.testing_channel); { /* First stream should be successful, and connection should be open when it completes */ const struct client_stream_tester *first = &stream_testers[0]; ASSERT_TRUE(first->complete); ASSERT_INT_EQUALS(AWS_ERROR_SUCCESS, first->on_complete_error_code); ASSERT_INT_EQUALS(200, first->response_status); ASSERT_TRUE(first->on_complete_connection_is_open); } { /* Second stream should be successful, BUT connection should NOT be open when it completes */ const struct client_stream_tester *second = &stream_testers[1]; ASSERT_TRUE(second->complete); ASSERT_INT_EQUALS(AWS_ERROR_SUCCESS, second->on_complete_error_code); ASSERT_INT_EQUALS(200, second->response_status); ASSERT_FALSE(second->on_complete_connection_is_open); } { /* Third stream should complete with error, since connection should close after 2nd stream completes. */ const struct client_stream_tester *third = &stream_testers[2]; ASSERT_TRUE(third->complete); ASSERT_INT_EQUALS(AWS_ERROR_HTTP_CONNECTION_CLOSED, third->on_complete_error_code); ASSERT_FALSE(third->on_complete_connection_is_open); } /* Connection should have shut down after delivering response. * Not going to check error_code because it's pretty ambiguous what it ought to be in this circumstance */ ASSERT_TRUE(testing_channel_is_shutdown_completed(&tester.testing_channel)); /* clean up */ for (size_t i = 0; i < NUM_STREAMS; ++i) { aws_http_message_destroy(requests[i]); client_stream_tester_clean_up(&stream_testers[i]); } ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } /* While pipelining 3 requests, and 2nd request has a "Connection: close" header. * 2 requests should complete successfully and the connection should close. */ H1_CLIENT_TEST_CASE(h1_client_request_close_header_with_pipelining) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); /* Queue up 3 requests, where the middle request has a "Connection: close" header */ enum { NUM_STREAMS = 3 }; struct aws_http_message *requests[NUM_STREAMS]; struct client_stream_tester stream_testers[NUM_STREAMS]; for (size_t i = 0; i < NUM_STREAMS; ++i) { requests[i] = s_new_default_get_request(allocator); if (i == 1) { struct aws_http_header close_header = { .name = aws_byte_cursor_from_c_str("Connection"), .value = aws_byte_cursor_from_c_str("close"), }; ASSERT_SUCCESS(aws_http_message_add_header(requests[i], close_header)); } /* Response tester sends requests as a side-effect */ ASSERT_SUCCESS(s_stream_tester_init(&stream_testers[i], &tester, requests[i])); }; testing_channel_drain_queued_tasks(&tester.testing_channel); /* Check that ONLY first 2 requests were sent */ const char *expected = "GET / HTTP/1.1\r\n" "\r\n" "GET / HTTP/1.1\r\n" "Connection: close\r\n" "\r\n"; ASSERT_SUCCESS(testing_channel_check_written_messages_str(&tester.testing_channel, allocator, expected)); /* Send 2 responses. */ ASSERT_SUCCESS(testing_channel_push_read_str( &tester.testing_channel, /* Response 1 */ "HTTP/1.1 200 OK\r\n" "\r\n" /* Response 2 */ "HTTP/1.1 200 OK\r\n" "\r\n")); testing_channel_drain_queued_tasks(&tester.testing_channel); { /* First stream should be successful */ const struct client_stream_tester *first = &stream_testers[0]; ASSERT_TRUE(first->complete); ASSERT_INT_EQUALS(AWS_ERROR_SUCCESS, first->on_complete_error_code); ASSERT_INT_EQUALS(200, first->response_status); } { /* Second stream should be successful */ const struct client_stream_tester *second = &stream_testers[1]; ASSERT_TRUE(second->complete); ASSERT_INT_EQUALS(AWS_ERROR_SUCCESS, second->on_complete_error_code); ASSERT_INT_EQUALS(200, second->response_status); } { /* Third stream should complete with error, since connection should close after 2nd stream completes. */ const struct client_stream_tester *third = &stream_testers[2]; ASSERT_TRUE(third->complete); ASSERT_INT_EQUALS(AWS_ERROR_HTTP_CONNECTION_CLOSED, third->on_complete_error_code); } /* Connection should have shut down after delivering second response. * Not going to check error_code because it's pretty ambiguous what it ought to be in this circumstance */ ASSERT_TRUE(testing_channel_is_shutdown_completed(&tester.testing_channel)); /* clean up */ for (size_t i = 0; i < NUM_STREAMS; ++i) { aws_http_message_destroy(requests[i]); client_stream_tester_clean_up(&stream_testers[i]); } ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } /* While pipelining 3 requests, and 2nd request has a "Connection: close" header. * 2 requests should complete successfully and the connection should close. */ H1_CLIENT_TEST_CASE(h1_client_request_close_header_with_chunked_encoding_and_pipelining) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); /* Queue up 3 requests, where the middle request has a "Connection: close" header */ enum { NUM_STREAMS = 3 }; struct aws_http_message *requests[NUM_STREAMS]; struct client_stream_tester stream_testers[NUM_STREAMS]; for (size_t i = 0; i < NUM_STREAMS; ++i) { requests[i] = s_new_default_chunked_put_request(allocator); if (i == 1) { struct aws_http_header close_header = { .name = aws_byte_cursor_from_c_str("Connection"), .value = aws_byte_cursor_from_c_str("close"), }; ASSERT_SUCCESS(aws_http_message_add_header(requests[i], close_header)); } /* Response tester sends requests as a side-effect */ ASSERT_SUCCESS(s_stream_tester_init(&stream_testers[i], &tester, requests[i])); }; testing_channel_drain_queued_tasks(&tester.testing_channel); /* Write to all the streams */ for (size_t i = 0; i < NUM_STREAMS; ++i) { static const struct aws_byte_cursor body = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("write more tests"); struct aws_input_stream *body_stream = aws_input_stream_new_from_cursor(allocator, &body); struct aws_http1_chunk_options options = s_default_chunk_options(body_stream, body.len); ASSERT_SUCCESS(aws_http1_stream_write_chunk(stream_testers[i].stream, &options)); ASSERT_SUCCESS(s_write_termination_chunk(allocator, stream_testers[i].stream)); } testing_channel_drain_queued_tasks(&tester.testing_channel); /* Check that ONLY first 2 requests were sent */ const char *expected = "PUT /plan.txt HTTP/1.1\r\n" "Transfer-Encoding: chunked\r\n" "\r\n" "10\r\n" "write more tests" "\r\n" "0\r\n" "\r\n" "PUT /plan.txt HTTP/1.1\r\n" "Transfer-Encoding: chunked\r\n" "Connection: close\r\n" "\r\n" "10\r\n" "write more tests" "\r\n" "0\r\n" "\r\n"; ASSERT_SUCCESS(testing_channel_check_written_messages_str(&tester.testing_channel, allocator, expected)); /* Send 2 responses. */ ASSERT_SUCCESS(testing_channel_push_read_str( &tester.testing_channel, /* Response 1 */ "HTTP/1.1 200 OK\r\n" "\r\n" /* Response 2 */ "HTTP/1.1 200 OK\r\n" "\r\n")); testing_channel_drain_queued_tasks(&tester.testing_channel); { /* First stream should be successful */ const struct client_stream_tester *first = &stream_testers[0]; ASSERT_TRUE(first->complete); ASSERT_INT_EQUALS(AWS_ERROR_SUCCESS, first->on_complete_error_code); ASSERT_INT_EQUALS(200, first->response_status); } { /* Second stream should be successful */ const struct client_stream_tester *second = &stream_testers[1]; ASSERT_TRUE(second->complete); ASSERT_INT_EQUALS(AWS_ERROR_SUCCESS, second->on_complete_error_code); ASSERT_INT_EQUALS(200, second->response_status); } { /* Third stream should complete with error, since connection should close after 2nd stream completes. */ const struct client_stream_tester *third = &stream_testers[2]; ASSERT_TRUE(third->complete); ASSERT_INT_EQUALS(AWS_ERROR_HTTP_CONNECTION_CLOSED, third->on_complete_error_code); } /* Connection should have shut down after delivering second response. * Not going to check error_code because it's pretty ambiguous what it ought to be in this circumstance */ ASSERT_TRUE(testing_channel_is_shutdown_completed(&tester.testing_channel)); /* clean up */ for (size_t i = 0; i < NUM_STREAMS; ++i) { aws_http_message_destroy(requests[i]); client_stream_tester_clean_up(&stream_testers[i]); } ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } /* Test that the stream window rules are respected. These rules are: * - Each new stream's window starts at initial_stream_window_size. * - Only body data counts against the stream's window. * - The stream will not receive more body data than its window allows. * - Any future streams on the same connection also start with initial_stream_window_size, * they should not be affected if a previous stream had a very small or very large window when it ended. */ H1_CLIENT_TEST_CASE(h1_client_respects_stream_window) { (void)ctx; /* This test only checks that the stream window is respected. * We're not testing the connection window, so just use a giant buffer */ struct tester_options tester_opts = { .manual_window_management = true, .initial_stream_window_size = 5, .read_buffer_capacity = SIZE_MAX, }; struct tester tester; ASSERT_SUCCESS(s_tester_init_ex(&tester, allocator, &tester_opts)); /** * Request/Response 1 */ struct aws_http_message *request = s_new_default_get_request(allocator); struct client_stream_tester stream_tester1; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester1, &tester, request)); testing_channel_drain_queued_tasks(&tester.testing_channel); /* send response whose body is 2X the initial_stream_window_size */ const char *response_str = "HTTP/1.1 200 OK\r\n" "Content-Length: 10\r\n" "\r\n" "0123456789"; ASSERT_SUCCESS(testing_channel_push_read_str(&tester.testing_channel, response_str)); testing_channel_drain_queued_tasks(&tester.testing_channel); /* stream window should reach 0 before entire body has been received */ ASSERT_BIN_ARRAYS_EQUALS("01234", 5, stream_tester1.response_body.buffer, stream_tester1.response_body.len); struct aws_h1_window_stats window_stats = aws_h1_connection_window_stats(tester.connection); ASSERT_TRUE(window_stats.has_incoming_stream); ASSERT_UINT_EQUALS(0, window_stats.stream_window); /* open window just enough to get the rest of the body */ aws_http_stream_update_window(stream_tester1.stream, 5); testing_channel_drain_queued_tasks(&tester.testing_channel); ASSERT_BIN_ARRAYS_EQUALS("0123456789", 10, stream_tester1.response_body.buffer, stream_tester1.response_body.len); ASSERT_TRUE(stream_tester1.complete); ASSERT_SUCCESS(stream_tester1.on_complete_error_code); client_stream_tester_clean_up(&stream_tester1); /** * Stream 2. * Send same request/response as before. * Everything should work fine, even though the previous stream left off with 0 window. */ struct client_stream_tester stream_tester2; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester2, &tester, request)); testing_channel_drain_queued_tasks(&tester.testing_channel); ASSERT_SUCCESS(testing_channel_push_read_str(&tester.testing_channel, response_str)); testing_channel_drain_queued_tasks(&tester.testing_channel); ASSERT_BIN_ARRAYS_EQUALS("01234", 5, stream_tester2.response_body.buffer, stream_tester2.response_body.len); window_stats = aws_h1_connection_window_stats(tester.connection); ASSERT_TRUE(window_stats.has_incoming_stream); ASSERT_UINT_EQUALS(0, window_stats.stream_window); /** * Stream 3. * Stress pipelining by sending the 3rd request and response before stream 2 * has opened its window enough to complete. */ struct client_stream_tester stream_tester3; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester3, &tester, request)); testing_channel_drain_queued_tasks(&tester.testing_channel); ASSERT_SUCCESS(testing_channel_push_read_str(&tester.testing_channel, response_str)); testing_channel_drain_queued_tasks(&tester.testing_channel); /* now open stream 2's window TO THE MAX to complete it. */ aws_http_stream_update_window(stream_tester2.stream, SIZE_MAX); testing_channel_drain_queued_tasks(&tester.testing_channel); ASSERT_BIN_ARRAYS_EQUALS("0123456789", 10, stream_tester2.response_body.buffer, stream_tester2.response_body.len); ASSERT_TRUE(stream_tester2.complete); ASSERT_SUCCESS(stream_tester2.on_complete_error_code); client_stream_tester_clean_up(&stream_tester2); /* even though stream2 completed with a WIDE OPEN window, stream3's window should be at the initial size */ ASSERT_BIN_ARRAYS_EQUALS("01234", 5, stream_tester3.response_body.buffer, stream_tester3.response_body.len); window_stats = aws_h1_connection_window_stats(tester.connection); ASSERT_TRUE(window_stats.has_incoming_stream); ASSERT_UINT_EQUALS(0, window_stats.stream_window); /* finish up stream 3 */ aws_http_stream_update_window(stream_tester3.stream, 100); testing_channel_drain_queued_tasks(&tester.testing_channel); ASSERT_BIN_ARRAYS_EQUALS("0123456789", 10, stream_tester3.response_body.buffer, stream_tester3.response_body.len); ASSERT_TRUE(stream_tester3.complete); ASSERT_SUCCESS(stream_tester3.on_complete_error_code); client_stream_tester_clean_up(&stream_tester3); /* clean up */ aws_http_message_destroy(request); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } /* This tests the specific way that HTTP/1 manages its connection window. */ H1_CLIENT_TEST_CASE(h1_client_connection_window_with_buffer) { (void)ctx; struct tester_options tester_opts = { .manual_window_management = true, .initial_stream_window_size = 0, .read_buffer_capacity = 100, }; struct tester tester; ASSERT_SUCCESS(s_tester_init_ex(&tester, allocator, &tester_opts)); struct aws_http_message *request = s_new_default_get_request(allocator); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, &tester, request)); testing_channel_drain_queued_tasks(&tester.testing_channel); /* confirm starting stats before any data received. * connection window should match buffer capacity. */ struct aws_h1_window_stats window_stats = aws_h1_connection_window_stats(tester.connection); ASSERT_UINT_EQUALS(100, window_stats.buffer_capacity); ASSERT_UINT_EQUALS(0, window_stats.buffer_pending_bytes); ASSERT_UINT_EQUALS(100, window_stats.connection_window); ASSERT_UINT_EQUALS(0, window_stats.recent_window_increments); if (window_stats.has_incoming_stream) { /* It's an implementation detail whether the incoming stream ptr is set at this point, * but if it is, it should use initial-window-size*/ ASSERT_UINT_EQUALS(0, window_stats.stream_window); } /* send 49 byte response 1 byte at a time, so we can see the message queue in action */ const char *response_str = "HTTP/1.1 200 OK\r\n" "Content-Length: 10\r\n" "\r\n" "0123456789"; struct aws_byte_cursor response_cursor = aws_byte_cursor_from_c_str(response_str); while (response_cursor.len > 0) { struct aws_byte_cursor one_byte = aws_byte_cursor_advance(&response_cursor, 1); ASSERT_SUCCESS(testing_channel_push_read_data(&tester.testing_channel, one_byte)); } testing_channel_drain_queued_tasks(&tester.testing_channel); /* The stream should not have received any body, since its initial-window-size is zero. * At time of writing, the connection would not process headers if stream window was zero, * but that might change in the future, so not testing window stats here. */ ASSERT_UINT_EQUALS(0, stream_tester.response_body.len); /* Open the stream window by 1 byte and check stats. * 40 bytes should be processed: 39 bytes of headers and metadata + 1 byte of body data */ aws_http_stream_update_window(stream_tester.stream, 1); testing_channel_drain_queued_tasks(&tester.testing_channel); ASSERT_UINT_EQUALS(1, stream_tester.response_body.len); window_stats = aws_h1_connection_window_stats(tester.connection); ASSERT_UINT_EQUALS( 40, window_stats.recent_window_increments); /* window incremented to account for processed data */ ASSERT_UINT_EQUALS(100, window_stats.buffer_capacity); ASSERT_UINT_EQUALS(9, window_stats.buffer_pending_bytes); ASSERT_UINT_EQUALS(91, window_stats.connection_window); ASSERT_TRUE(window_stats.has_incoming_stream); ASSERT_UINT_EQUALS(0, window_stats.stream_window); /* Open stream window enough to finish */ aws_http_stream_update_window(stream_tester.stream, 9); testing_channel_drain_queued_tasks(&tester.testing_channel); ASSERT_UINT_EQUALS(10, stream_tester.response_body.len); ASSERT_TRUE(stream_tester.complete); ASSERT_SUCCESS(stream_tester.on_complete_error_code); window_stats = aws_h1_connection_window_stats(tester.connection); ASSERT_UINT_EQUALS(9, window_stats.recent_window_increments); /* window incremented to account for processed data */ ASSERT_UINT_EQUALS(100, window_stats.buffer_capacity); ASSERT_UINT_EQUALS(0, window_stats.buffer_pending_bytes); ASSERT_UINT_EQUALS(100, window_stats.connection_window); ASSERT_FALSE(window_stats.has_incoming_stream); /* clean up */ client_stream_tester_clean_up(&stream_tester); aws_http_message_release(request); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } /* Test a connection with read_buffer_capacity < initial_window_size */ H1_CLIENT_TEST_CASE(h1_client_connection_window_with_small_buffer) { (void)ctx; struct tester_options tester_opts = { .manual_window_management = true, .initial_stream_window_size = 10, .read_buffer_capacity = 5, }; struct tester tester; ASSERT_SUCCESS(s_tester_init_ex(&tester, allocator, &tester_opts)); struct aws_http_message *request = s_new_default_get_request(allocator); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, &tester, request)); testing_channel_drain_queued_tasks(&tester.testing_channel); /* can't send response all at once because channel-window is too small. */ const char *response_head_str = "HTTP/1.1 200 OK\r\n" "Content-Length: 20\r\n" "\r\n"; const char *response_body_str = "0123456789" "ABCDEFGHIJ"; /* send response head in little increments, it should flow through to the stream no problem */ struct aws_byte_cursor response_cursor = aws_byte_cursor_from_c_str(response_head_str); while (response_cursor.len > 0) { struct aws_byte_cursor one_byte = aws_byte_cursor_advance(&response_cursor, 1); ASSERT_SUCCESS(testing_channel_push_read_data(&tester.testing_channel, one_byte)); testing_channel_drain_queued_tasks(&tester.testing_channel); } ASSERT_UINT_EQUALS(0, stream_tester.response_body.len); /* send enough body data that stream's window is reduced to zero. */ response_cursor = aws_byte_cursor_from_c_str(response_body_str); for (int i = 0; i < 10; ++i) { struct aws_byte_cursor one_byte = aws_byte_cursor_advance(&response_cursor, 1); ASSERT_SUCCESS(testing_channel_push_read_data(&tester.testing_channel, one_byte)); testing_channel_drain_queued_tasks(&tester.testing_channel); } ASSERT_UINT_EQUALS(10, stream_tester.response_body.len); struct aws_h1_window_stats window_stats = aws_h1_connection_window_stats(tester.connection); ASSERT_UINT_EQUALS(0, window_stats.stream_window); ASSERT_UINT_EQUALS(0, window_stats.buffer_pending_bytes); ASSERT_UINT_EQUALS(5, window_stats.buffer_capacity); /* now that stream's window is 0, further data should fill the connection's read-buffer */ for (int i = 0; i < 5; ++i) { struct aws_byte_cursor one_byte = aws_byte_cursor_advance(&response_cursor, 1); ASSERT_SUCCESS(testing_channel_push_read_data(&tester.testing_channel, one_byte)); } testing_channel_drain_queued_tasks(&tester.testing_channel); window_stats = aws_h1_connection_window_stats(tester.connection); ASSERT_UINT_EQUALS(5, window_stats.buffer_pending_bytes); /* open the stream's window enough to finish the response, the buffered bytes should be consumed */ aws_http_stream_update_window(stream_tester.stream, SIZE_MAX); testing_channel_drain_queued_tasks(&tester.testing_channel); ASSERT_UINT_EQUALS(15, stream_tester.response_body.len); window_stats = aws_h1_connection_window_stats(tester.connection); ASSERT_UINT_EQUALS(0, window_stats.buffer_pending_bytes); /* send the remainder of the response */ while (response_cursor.len > 0) { struct aws_byte_cursor one_byte = aws_byte_cursor_advance(&response_cursor, 1); ASSERT_SUCCESS(testing_channel_push_read_data(&tester.testing_channel, one_byte)); testing_channel_drain_queued_tasks(&tester.testing_channel); } ASSERT_UINT_EQUALS(20, stream_tester.response_body.len); ASSERT_TRUE(stream_tester.complete); ASSERT_UINT_EQUALS(0, stream_tester.on_complete_error_code); /* clean up */ client_stream_tester_clean_up(&stream_tester); aws_http_message_release(request); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } static void s_on_complete(struct aws_http_stream *stream, int error_code, void *user_data) { (void)stream; int *completion_error_code = user_data; *completion_error_code = error_code; } static int s_test_content_length_mismatch_is_error( struct aws_allocator *allocator, const char *body, const char *wrong_length) { struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); /* send request whose Content-Length does not match body length */ const struct aws_byte_cursor body_cur = aws_byte_cursor_from_c_str(body); struct aws_input_stream *body_stream = aws_input_stream_new_from_cursor(allocator, &body_cur); struct aws_http_header headers[] = { { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Length"), .value = aws_byte_cursor_from_c_str(wrong_length), }, }; struct aws_http_message *request = aws_http_message_new_request(allocator); ASSERT_NOT_NULL(request); ASSERT_SUCCESS(aws_http_message_set_request_method(request, aws_byte_cursor_from_c_str("PUT"))); ASSERT_SUCCESS(aws_http_message_set_request_path(request, aws_byte_cursor_from_c_str("/plan.txt"))); aws_http_message_add_header_array(request, headers, AWS_ARRAY_SIZE(headers)); aws_http_message_set_body_stream(request, body_stream); int completion_error_code = 0; struct aws_http_make_request_options opt = { .self_size = sizeof(opt), .request = request, .on_complete = s_on_complete, .user_data = &completion_error_code, }; struct aws_http_stream *stream = aws_http_connection_make_request(tester.connection, &opt); ASSERT_NOT_NULL(stream); ASSERT_SUCCESS(aws_http_stream_activate(stream)); testing_channel_drain_queued_tasks(&tester.testing_channel); /* check result */ ASSERT_INT_EQUALS(AWS_ERROR_HTTP_OUTGOING_STREAM_LENGTH_INCORRECT, completion_error_code); /* clean up */ aws_input_stream_release(body_stream); aws_http_message_destroy(request); aws_http_stream_release(stream); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } H1_CLIENT_TEST_CASE(h1_client_request_content_length_too_small_is_error) { (void)ctx; return s_test_content_length_mismatch_is_error(allocator, "I am very long", "1"); } H1_CLIENT_TEST_CASE(h1_client_request_content_length_too_large_is_error) { (void)ctx; return s_test_content_length_mismatch_is_error(allocator, "I am very short", "999"); } static int s_test_chunk_length_mismatch_is_error( struct aws_allocator *allocator, const char *body, size_t wrong_length) { struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); const struct aws_byte_cursor body_cur = aws_byte_cursor_from_c_str(body); struct aws_input_stream *body_stream = aws_input_stream_new_from_cursor(allocator, &body_cur); /* send request */ struct aws_http_message *request = s_new_default_chunked_put_request(allocator); int completion_error_code = 0; struct aws_http_make_request_options opt = { .self_size = sizeof(opt), .request = request, .on_complete = s_on_complete, .user_data = &completion_error_code, }; struct aws_http_stream *stream = aws_http_connection_make_request(tester.connection, &opt); ASSERT_NOT_NULL(stream); ASSERT_SUCCESS(aws_http_stream_activate(stream)); /* Initialize with an off by one body length */ struct aws_http1_chunk_options options = s_default_chunk_options(body_stream, wrong_length); ASSERT_SUCCESS(aws_http1_stream_write_chunk(stream, &options)); ASSERT_SUCCESS(s_write_termination_chunk(allocator, stream)); testing_channel_drain_queued_tasks(&tester.testing_channel); /* check result */ ASSERT_INT_EQUALS(AWS_ERROR_HTTP_OUTGOING_STREAM_LENGTH_INCORRECT, completion_error_code); /* clean up */ aws_http_message_destroy(request); aws_http_stream_release(stream); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } H1_CLIENT_TEST_CASE(h1_client_request_chunk_size_too_small_is_error) { (void)ctx; return s_test_chunk_length_mismatch_is_error(allocator, "I am very long", 2); } H1_CLIENT_TEST_CASE(h1_client_request_chunk_size_too_large_is_error) { (void)ctx; return s_test_chunk_length_mismatch_is_error(allocator, "I am very short", 999); } H1_CLIENT_TEST_CASE(h1_client_request_chunks_cancelled_by_channel_shutdown) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); /* send request */ struct aws_http_message *request = s_new_default_chunked_put_request(allocator); int completion_error_code = 0; struct aws_http_make_request_options opt = { .self_size = sizeof(opt), .request = request, .user_data = &completion_error_code, .on_complete = s_on_complete, }; struct aws_http_stream *stream = aws_http_connection_make_request(tester.connection, &opt); ASSERT_NOT_NULL(stream); ASSERT_SUCCESS(aws_http_stream_activate(stream)); const struct aws_byte_cursor body_cur = aws_byte_cursor_from_c_str("write more tests"); struct aws_input_stream *body_stream = aws_input_stream_new_from_cursor(allocator, &body_cur); /* This will "pause" the connection loop as there is an empty stream. */ testing_channel_drain_queued_tasks(&tester.testing_channel); /* Now write 2 chunks. The chunk memory should be automatically released when the http stream is destroyed. */ struct aws_http1_chunk_options options = s_default_chunk_options(body_stream, body_cur.len); ASSERT_SUCCESS(aws_http1_stream_write_chunk(stream, &options)); ASSERT_SUCCESS(s_write_termination_chunk(allocator, stream)); /* Ensure the request can be destroyed after request is sent */ aws_http_message_destroy(opt.request); /* shutdown channel before request completes */ aws_channel_shutdown(tester.testing_channel.channel, AWS_ERROR_SUCCESS); testing_channel_drain_queued_tasks(&tester.testing_channel); /* even though the channel shut down with error_code 0, * the stream should not get code 0 because it did not complete successfully */ ASSERT_TRUE(completion_error_code != AWS_ERROR_SUCCESS); /* clean up */ aws_http_stream_release(stream); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } H1_CLIENT_TEST_CASE(h1_client_request_cancelled_by_channel_shutdown) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); int completion_error_code = 0; /* send request */ struct aws_http_make_request_options opt = { .self_size = sizeof(opt), .request = s_new_default_get_request(allocator), .user_data = &completion_error_code, .on_complete = s_on_complete, }; struct aws_http_stream *stream = aws_http_connection_make_request(tester.connection, &opt); ASSERT_NOT_NULL(stream); ASSERT_SUCCESS(aws_http_stream_activate(stream)); testing_channel_drain_queued_tasks(&tester.testing_channel); /* Ensure the request can be destroyed after request is sent */ aws_http_message_destroy(opt.request); /* shutdown channel before request completes */ aws_channel_shutdown(tester.testing_channel.channel, AWS_ERROR_SUCCESS); testing_channel_drain_queued_tasks(&tester.testing_channel); /* even though the channel shut down with error_code 0, * the stream should not get code 0 because it did not complete successfully */ ASSERT_TRUE(completion_error_code != AWS_ERROR_SUCCESS); /* clean up */ aws_http_stream_release(stream); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } H1_CLIENT_TEST_CASE(h1_client_multiple_requests_cancelled_by_channel_shutdown) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); struct aws_http_stream *streams[3]; int completion_error_codes[3]; memset(completion_error_codes, 0, sizeof(completion_error_codes)); struct aws_http_make_request_options opt = { .self_size = sizeof(opt), .request = s_new_default_get_request(allocator), .on_complete = s_on_complete, }; for (int i = 0; i < 2; ++i) { opt.user_data = &completion_error_codes[i]; streams[i] = aws_http_connection_make_request(tester.connection, &opt); ASSERT_NOT_NULL(streams[i]); ASSERT_SUCCESS(aws_http_stream_activate(streams[i])); } /* 2 streams are now in-progress */ testing_channel_drain_queued_tasks(&tester.testing_channel); /* Make 1 more stream that's still locked away in the pending queue */ opt.user_data = &completion_error_codes[2]; streams[2] = aws_http_connection_make_request(tester.connection, &opt); ASSERT_NOT_NULL(streams[2]); ASSERT_SUCCESS(aws_http_stream_activate(streams[2])); /* shutdown channel */ aws_channel_shutdown(tester.testing_channel.channel, AWS_ERROR_SUCCESS); testing_channel_drain_queued_tasks(&tester.testing_channel); /* Ensure the request can be destroyed after request is sent */ aws_http_message_destroy(opt.request); /* check results */ for (int i = 0; i < 3; ++i) { ASSERT_TRUE(completion_error_codes[i] != AWS_ERROR_SUCCESS); aws_http_stream_release(streams[i]); } ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } H1_CLIENT_TEST_CASE(h1_client_new_request_fails_if_channel_shut_down) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); aws_channel_shutdown(tester.testing_channel.channel, AWS_ERROR_SUCCESS); /* wait for shutdown complete */ testing_channel_drain_queued_tasks(&tester.testing_channel); /* send request */ struct aws_http_make_request_options opt = { .self_size = sizeof(opt), .request = s_new_default_get_request(allocator), }; struct aws_http_stream *stream = aws_http_connection_make_request(tester.connection, &opt); ASSERT_NULL(stream); ASSERT_INT_EQUALS(aws_last_error(), AWS_ERROR_HTTP_CONNECTION_CLOSED); aws_http_message_destroy(opt.request); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } enum request_callback { REQUEST_CALLBACK_OUTGOING_BODY, REQUEST_CALLBACK_INCOMING_HEADERS, REQUEST_CALLBACK_INCOMING_HEADERS_DONE, REQUEST_CALLBACK_INCOMING_BODY, REQUEST_CALLBACK_COMPLETE, REQUEST_CALLBACK_COUNT, }; static const int ERROR_FROM_CALLBACK_ERROR_CODE = (int)0xBEEFCAFE; struct error_from_callback_tester { struct aws_input_stream base; enum request_callback error_at; int callback_counts[REQUEST_CALLBACK_COUNT]; bool has_errored; struct aws_stream_status status; int on_complete_error_code; struct aws_allocator *alloc; }; static int s_error_from_callback_common( struct error_from_callback_tester *error_tester, enum request_callback current_callback) { error_tester->callback_counts[current_callback]++; /* After error code returned, no more callbacks should fire (except for on_complete) */ AWS_FATAL_ASSERT(!error_tester->has_errored); AWS_FATAL_ASSERT(current_callback <= error_tester->error_at); if (current_callback == error_tester->error_at) { error_tester->has_errored = true; return aws_raise_error(ERROR_FROM_CALLBACK_ERROR_CODE); } return AWS_OP_SUCCESS; } static int s_error_from_outgoing_body_read(struct aws_input_stream *body, struct aws_byte_buf *dest) { (void)dest; struct error_from_callback_tester *error_tester = AWS_CONTAINER_OF(body, struct error_from_callback_tester, base); if (s_error_from_callback_common(error_tester, REQUEST_CALLBACK_OUTGOING_BODY)) { return AWS_OP_ERR; } /* If the common fn was successful, write out some data and end the stream */ ASSERT_TRUE(aws_byte_buf_write(dest, (const uint8_t *)"abcd", 4)); error_tester->status.is_end_of_stream = true; return AWS_OP_SUCCESS; } static int s_error_from_outgoing_body_get_status(struct aws_input_stream *body, struct aws_stream_status *status) { struct error_from_callback_tester *error_tester = AWS_CONTAINER_OF(body, struct error_from_callback_tester, base); *status = error_tester->status; return AWS_OP_SUCCESS; } static void s_error_from_outgoing_body_destroy(struct aws_input_stream *stream) { (void)stream; } static struct aws_input_stream_vtable s_error_from_outgoing_body_vtable = { .seek = NULL, .read = s_error_from_outgoing_body_read, .get_status = s_error_from_outgoing_body_get_status, .get_length = NULL, }; static int s_error_from_incoming_headers( struct aws_http_stream *stream, enum aws_http_header_block header_block, const struct aws_http_header *header_array, size_t num_headers, void *user_data) { (void)stream; (void)header_block; (void)header_array; (void)num_headers; return s_error_from_callback_common(user_data, REQUEST_CALLBACK_INCOMING_HEADERS); } static int s_error_from_incoming_headers_done( struct aws_http_stream *stream, enum aws_http_header_block header_block, void *user_data) { (void)stream; (void)header_block; return s_error_from_callback_common(user_data, REQUEST_CALLBACK_INCOMING_HEADERS_DONE); } static int s_error_from_incoming_body( struct aws_http_stream *stream, const struct aws_byte_cursor *data, void *user_data) { (void)stream; (void)data; return s_error_from_callback_common(user_data, REQUEST_CALLBACK_INCOMING_BODY); } static void s_error_tester_on_stream_complete(struct aws_http_stream *stream, int error_code, void *user_data) { (void)stream; struct error_from_callback_tester *error_tester = user_data; error_tester->callback_counts[REQUEST_CALLBACK_COMPLETE]++; error_tester->on_complete_error_code = error_code; } static int s_test_error_from_callback(struct aws_allocator *allocator, enum request_callback error_at) { struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); struct aws_input_stream empty_stream_base; AWS_ZERO_STRUCT(empty_stream_base); struct error_from_callback_tester error_tester = { .base = empty_stream_base, .error_at = error_at, .status = { .is_valid = true, .is_end_of_stream = false, }, }; error_tester.base.vtable = &s_error_from_outgoing_body_vtable; aws_ref_count_init( &error_tester.base.ref_count, &error_tester, (aws_simple_completion_callback *)s_error_from_outgoing_body_destroy); struct aws_input_stream *error_from_outgoing_body_stream = &error_tester.base; /* send request */ struct aws_http_header headers[] = { { .name = aws_byte_cursor_from_c_str("Content-Length"), .value = aws_byte_cursor_from_c_str("4"), }, }; struct aws_http_message *request = aws_http_message_new_request(allocator); ASSERT_NOT_NULL(request); ASSERT_SUCCESS(aws_http_message_set_request_method(request, aws_http_method_post)); ASSERT_SUCCESS(aws_http_message_set_request_path(request, aws_byte_cursor_from_c_str("/"))); ASSERT_SUCCESS(aws_http_message_add_header_array(request, headers, AWS_ARRAY_SIZE(headers))); aws_http_message_set_body_stream(request, error_from_outgoing_body_stream); struct aws_http_make_request_options opt = { .self_size = sizeof(opt), .request = request, .on_response_headers = s_error_from_incoming_headers, .on_response_header_block_done = s_error_from_incoming_headers_done, .on_response_body = s_error_from_incoming_body, .on_complete = s_error_tester_on_stream_complete, .user_data = &error_tester, }; struct aws_http_stream *stream = aws_http_connection_make_request(tester.connection, &opt); ASSERT_NOT_NULL(stream); ASSERT_SUCCESS(aws_http_stream_activate(stream)); testing_channel_drain_queued_tasks(&tester.testing_channel); /* Ensure the request can be destroyed after request is sent */ aws_http_message_destroy(opt.request); aws_input_stream_release(error_from_outgoing_body_stream); /* send response */ ASSERT_SUCCESS(testing_channel_push_read_str_ignore_errors( &tester.testing_channel, "HTTP/1.1 200 OK\r\n" "Transfer-Encoding: chunked\r\n" "Date: Fri, 01 Mar 2019 17:18:55 GMT\r\n" "\r\n" "3\r\n" "two\r\n" "6\r\n" "chunks\r\n" "0\r\n" "\r\n")); testing_channel_drain_queued_tasks(&tester.testing_channel); /* check that callbacks were invoked before error_at, but not after */ for (int i = 0; i < REQUEST_CALLBACK_COMPLETE; ++i) { if (i <= error_at) { ASSERT_TRUE(error_tester.callback_counts[i] > 0); } else { ASSERT_INT_EQUALS(0, error_tester.callback_counts[i]); } } /* the on_complete callback should always fire though, and should receive the proper error_code */ ASSERT_INT_EQUALS(1, error_tester.callback_counts[REQUEST_CALLBACK_COMPLETE]); ASSERT_INT_EQUALS(ERROR_FROM_CALLBACK_ERROR_CODE, error_tester.on_complete_error_code); aws_http_stream_release(stream); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } H1_CLIENT_TEST_CASE(h1_client_error_from_outgoing_body_callback_stops_decoder) { (void)ctx; ASSERT_SUCCESS(s_test_error_from_callback(allocator, REQUEST_CALLBACK_OUTGOING_BODY)); return AWS_OP_SUCCESS; } H1_CLIENT_TEST_CASE(h1_client_error_from_incoming_headers_callback_stops_decoder) { (void)ctx; ASSERT_SUCCESS(s_test_error_from_callback(allocator, REQUEST_CALLBACK_INCOMING_HEADERS)); return AWS_OP_SUCCESS; } H1_CLIENT_TEST_CASE(h1_client_error_from_incoming_headers_done_callback_stops_decoder) { (void)ctx; ASSERT_SUCCESS(s_test_error_from_callback(allocator, REQUEST_CALLBACK_INCOMING_HEADERS_DONE)); return AWS_OP_SUCCESS; } H1_CLIENT_TEST_CASE(h1_client_error_from_incoming_body_callback_stops_decoder) { (void)ctx; ASSERT_SUCCESS(s_test_error_from_callback(allocator, REQUEST_CALLBACK_INCOMING_BODY)); return AWS_OP_SUCCESS; } /* After aws_http_connection_close() is called, aws_http_connection_is_open() should return false, * even if both calls were made from outside the event-loop thread. */ H1_CLIENT_TEST_CASE(h1_client_close_from_off_thread_makes_not_open) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); testing_channel_set_is_on_users_thread(&tester.testing_channel, false); ASSERT_TRUE(aws_http_connection_is_open(tester.connection)); aws_http_connection_close(tester.connection); ASSERT_FALSE(aws_http_connection_is_open(tester.connection)); testing_channel_set_is_on_users_thread(&tester.testing_channel, true); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } H1_CLIENT_TEST_CASE(h1_client_close_from_on_thread_makes_not_open) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); testing_channel_set_is_on_users_thread(&tester.testing_channel, false); ASSERT_TRUE(aws_http_connection_is_open(tester.connection)); testing_channel_set_is_on_users_thread(&tester.testing_channel, true); aws_http_connection_close(tester.connection); testing_channel_set_is_on_users_thread(&tester.testing_channel, false); ASSERT_FALSE(aws_http_connection_is_open(tester.connection)); testing_channel_set_is_on_users_thread(&tester.testing_channel, true); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } struct s_callback_invoked { bool destroy_invoked; bool complete_invoked; }; static void s_unactivated_stream_cleans_up_on_destroy(void *data) { struct s_callback_invoked *callback_data = data; callback_data->destroy_invoked = true; } static void s_unactivated_stream_complete(struct aws_http_stream *stream, int error_code, void *data) { (void)stream; (void)error_code; struct s_callback_invoked *callback_data = data; callback_data->complete_invoked = true; } H1_CLIENT_TEST_CASE(h1_client_unactivated_stream_cleans_up) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); ASSERT_TRUE(aws_http_connection_is_open(tester.connection)); struct aws_http_message *request = aws_http_message_new_request(allocator); ASSERT_SUCCESS(aws_http_message_set_request_method(request, aws_byte_cursor_from_c_str("GET"))); ASSERT_SUCCESS(aws_http_message_set_request_path(request, aws_byte_cursor_from_c_str("/"))); struct s_callback_invoked callback_data = {0}; struct aws_http_make_request_options options = { .self_size = sizeof(struct aws_http_make_request_options), .request = request, .on_destroy = s_unactivated_stream_cleans_up_on_destroy, .on_complete = s_unactivated_stream_complete, .user_data = &callback_data, }; struct aws_http_stream *stream = aws_http_connection_make_request(tester.connection, &options); aws_http_message_release(request); ASSERT_NOT_NULL(stream); /* we do not activate, that is the test. */ ASSERT_FALSE(callback_data.destroy_invoked); ASSERT_FALSE(callback_data.complete_invoked); aws_http_stream_release(stream); /* Only destroy invoked, the complete was not invoked */ ASSERT_TRUE(callback_data.destroy_invoked); ASSERT_FALSE(callback_data.complete_invoked); aws_http_connection_close(tester.connection); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } struct protocol_switcher { /* Settings */ struct tester *tester; size_t downstream_handler_window_size; const char *data_after_upgrade_response; bool install_downstream_handler; /* Results */ int upgrade_response_status; bool has_installed_downstream_handler; }; static int s_switch_protocols_on_response_header_block_done( struct aws_http_stream *stream, enum aws_http_header_block header_block, void *user_data) { (void)header_block; struct protocol_switcher *switcher = user_data; aws_http_stream_get_incoming_response_status(stream, &switcher->upgrade_response_status); /* install downstream hander */ if (switcher->install_downstream_handler && (switcher->upgrade_response_status == AWS_HTTP_STATUS_CODE_101_SWITCHING_PROTOCOLS)) { int err = testing_channel_install_downstream_handler( &switcher->tester->testing_channel, switcher->downstream_handler_window_size); if (!err) { switcher->has_installed_downstream_handler = true; } } return AWS_OP_SUCCESS; } /* Send "Connection: Upgrade" request and receive "101 Switching Protocols" response. * Optionally, install a downstream handler when response is received */ static int s_switch_protocols(struct protocol_switcher *switcher) { /* send upgrade request */ struct aws_http_header request_headers[] = { { .name = aws_byte_cursor_from_c_str("Connection"), .value = aws_byte_cursor_from_c_str("Upgrade"), }, { .name = aws_byte_cursor_from_c_str("Upgrade"), .value = aws_byte_cursor_from_c_str("MyProtocol"), }, }; struct aws_http_message *request = aws_http_message_new_request(switcher->tester->alloc); ASSERT_NOT_NULL(request); ASSERT_SUCCESS(aws_http_message_set_request_method(request, aws_http_method_get)); ASSERT_SUCCESS(aws_http_message_set_request_path(request, aws_byte_cursor_from_c_str("/"))); ASSERT_SUCCESS(aws_http_message_add_header_array(request, request_headers, AWS_ARRAY_SIZE(request_headers))); struct aws_http_make_request_options upgrade_request = { .self_size = sizeof(upgrade_request), .request = request, .user_data = switcher, .on_response_header_block_done = s_switch_protocols_on_response_header_block_done, }; struct aws_http_stream *upgrade_stream = aws_http_connection_make_request(switcher->tester->connection, &upgrade_request); ASSERT_NOT_NULL(upgrade_stream); ASSERT_SUCCESS(aws_http_stream_activate(upgrade_stream)); testing_channel_drain_queued_tasks(&switcher->tester->testing_channel); /* Ensure the request can be destroyed after request is sent */ aws_http_message_destroy(upgrade_request.request); /* clear all messages written thus far to the testing-channel */ while (!aws_linked_list_empty(testing_channel_get_written_message_queue(&switcher->tester->testing_channel))) { struct aws_linked_list_node *node = aws_linked_list_pop_front(testing_channel_get_written_message_queue(&switcher->tester->testing_channel)); struct aws_io_message *msg = AWS_CONTAINER_OF(node, struct aws_io_message, queueing_handle); aws_mem_release(msg->allocator, msg); } /* send upgrade response (followed by any extra data) */ struct aws_byte_cursor response = aws_byte_cursor_from_c_str("HTTP/1.1 101 Switching Protocols\r\n" "Upgrade: MyProtocol\r\n" "\r\n"); struct aws_byte_cursor extra_data = aws_byte_cursor_from_c_str(switcher->data_after_upgrade_response); struct aws_byte_buf sending_buf; ASSERT_SUCCESS(aws_byte_buf_init(&sending_buf, switcher->tester->alloc, response.len + extra_data.len)); ASSERT_TRUE(aws_byte_buf_write_from_whole_cursor(&sending_buf, response)); if (extra_data.len) { ASSERT_TRUE(aws_byte_buf_write_from_whole_cursor(&sending_buf, extra_data)); } ASSERT_SUCCESS( testing_channel_push_read_data(&switcher->tester->testing_channel, aws_byte_cursor_from_buf(&sending_buf))); /* wait for response to complete, and check results */ testing_channel_drain_queued_tasks(&switcher->tester->testing_channel); ASSERT_INT_EQUALS(101, switcher->upgrade_response_status); /* if we wanted downstream handler installed, ensure that happened */ if (switcher->install_downstream_handler) { ASSERT_TRUE(switcher->has_installed_downstream_handler); } /* cleanup */ aws_byte_buf_clean_up(&sending_buf); aws_http_stream_release(upgrade_stream); return AWS_OP_SUCCESS; } H1_CLIENT_TEST_CASE(h1_client_new_request_allowed) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); /* prepare request */ struct aws_http_make_request_options options = { .self_size = sizeof(options), .request = s_new_default_get_request(allocator), }; /* validate the new request is allowed for now */ ASSERT_TRUE(aws_http_connection_new_requests_allowed(tester.connection)); /* switch protocols */ struct protocol_switcher switcher = { .tester = &tester, .install_downstream_handler = true, }; ASSERT_SUCCESS(s_switch_protocols(&switcher)); /* validate the new request is not allowed anymore when goaway received */ ASSERT_FALSE(aws_http_connection_new_requests_allowed(tester.connection)); /* Make new request will fail */ ASSERT_NULL(aws_http_connection_make_request(tester.connection, &options)); ASSERT_UINT_EQUALS(AWS_ERROR_HTTP_SWITCHED_PROTOCOLS, aws_last_error()); /* close connection */ aws_http_connection_close(tester.connection); /* Make new request will fail */ ASSERT_NULL(aws_http_connection_make_request(tester.connection, &options)); ASSERT_UINT_EQUALS(AWS_ERROR_HTTP_CONNECTION_CLOSED, aws_last_error()); /* clean up */ aws_http_message_destroy(options.request); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } H1_CLIENT_TEST_CASE(h1_client_midchannel_sanity_check) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); struct protocol_switcher switcher = { .tester = &tester, .install_downstream_handler = true, }; ASSERT_SUCCESS(s_switch_protocols(&switcher)); /* clean up */ ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } /* confirm data passes through http-handler untouched in the read direction */ H1_CLIENT_TEST_CASE(h1_client_midchannel_read) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); struct protocol_switcher switcher = { .tester = &tester, .install_downstream_handler = true, .downstream_handler_window_size = SIZE_MAX, }; ASSERT_SUCCESS(s_switch_protocols(&switcher)); const char *test_str = "inmyprotocolspacesarestrictlyforbidden"; ASSERT_SUCCESS(testing_channel_push_read_str(&tester.testing_channel, test_str)); testing_channel_drain_queued_tasks(&tester.testing_channel); ASSERT_SUCCESS(testing_channel_check_midchannel_read_messages_str(&tester.testing_channel, allocator, test_str)); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } /* confirm that, if new-protocol-data arrives packed into the same aws_io_message as the upgrade response, * that data is properly passed dowstream. */ H1_CLIENT_TEST_CASE(h1_client_midchannel_read_immediately) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); const char *test_str = "inmyprotocoleverythingwillbebetter"; struct protocol_switcher switcher = { .tester = &tester, .install_downstream_handler = true, .downstream_handler_window_size = SIZE_MAX, .data_after_upgrade_response = test_str, /* Note extra data */ }; ASSERT_SUCCESS(s_switch_protocols(&switcher)); ASSERT_SUCCESS(testing_channel_check_midchannel_read_messages_str(&tester.testing_channel, allocator, test_str)); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } /* Have a tiny downstream read-window and increment it in little chunks. */ H1_CLIENT_TEST_CASE(h1_client_midchannel_read_with_small_downstream_window) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); struct protocol_switcher switcher = { .tester = &tester, .install_downstream_handler = true, .downstream_handler_window_size = 1 /* Note tiny starting window. */, }; ASSERT_SUCCESS(s_switch_protocols(&switcher)); const char *test_str = "inmyprotocolcapitallettersarethedevil"; ASSERT_SUCCESS(testing_channel_push_read_str(&tester.testing_channel, test_str)); /* open window in tiny increments */ for (size_t i = 0; i < strlen(test_str); ++i) { ASSERT_SUCCESS(testing_channel_increment_read_window(&tester.testing_channel, 1)); testing_channel_drain_queued_tasks(&tester.testing_channel); } /* ensure that the handler actually sent multiple messages */ size_t num_read_messages = 0; struct aws_linked_list *list = testing_channel_get_read_message_queue(&tester.testing_channel); struct aws_linked_list_node *node = aws_linked_list_front(list); while (node != aws_linked_list_end(list)) { num_read_messages++; node = aws_linked_list_next(node); } ASSERT_TRUE(num_read_messages > 1); ASSERT_SUCCESS(testing_channel_check_midchannel_read_messages_str(&tester.testing_channel, allocator, test_str)); /* cleanup */ ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } /* confirm data passes through http-handler untouched in the write direction */ H1_CLIENT_TEST_CASE(h1_client_midchannel_write) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); struct protocol_switcher switcher = { .tester = &tester, .install_downstream_handler = true, .downstream_handler_window_size = SIZE_MAX, }; ASSERT_SUCCESS(s_switch_protocols(&switcher)); const char *test_str = "inmyprotocolthereisnomoney"; testing_channel_push_write_str(&tester.testing_channel, test_str); testing_channel_drain_queued_tasks(&tester.testing_channel); ASSERT_SUCCESS(testing_channel_check_written_messages_str(&tester.testing_channel, allocator, test_str)); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } /* Test that, when HTTP is a midchannel handler, it will continue processing aws_io_messages write messages * in the time between shutdown-in-the-read-direction and shutdown-in-the-write-direction */ static const char *s_write_after_shutdown_in_read_dir_str = "inmyprotocolfrowningisnotallowed"; static void s_downstream_handler_write_on_shutdown( enum aws_channel_direction dir, int error_code, bool free_scarce_resources_immediately, void *user_data) { (void)error_code; (void)free_scarce_resources_immediately; struct tester *tester = user_data; if (dir == AWS_CHANNEL_DIR_WRITE) { testing_channel_push_write_str(&tester->testing_channel, s_write_after_shutdown_in_read_dir_str); } } H1_CLIENT_TEST_CASE(h1_client_midchannel_write_continues_after_shutdown_in_read_dir) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); struct protocol_switcher switcher = { .tester = &tester, .install_downstream_handler = true, .downstream_handler_window_size = SIZE_MAX, }; ASSERT_SUCCESS(s_switch_protocols(&switcher)); /* Downstream handler will write data while shutting down in write direction */ testing_channel_set_downstream_handler_shutdown_callback( &tester.testing_channel, s_downstream_handler_write_on_shutdown, &tester); /* Shutdown cannel */ aws_channel_shutdown(tester.testing_channel.channel, AWS_ERROR_SUCCESS); testing_channel_drain_queued_tasks(&tester.testing_channel); /* Did the late message get through? */ ASSERT_SUCCESS(testing_channel_check_written_messages_str( &tester.testing_channel, tester.alloc, s_write_after_shutdown_in_read_dir_str)); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } static void s_on_message_write_complete_save_error_code( struct aws_channel *channel, struct aws_io_message *message, int err_code, void *user_data) { (void)channel; (void)message; int *save = user_data; *save = err_code; } /* Ensure that things fail if a downstream handler is installed without switching protocols. * This test is weird in that failure must occur, but we're not prescriptive about where it occurs. */ H1_CLIENT_TEST_CASE(h1_client_midchannel_requires_switching_protocols) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); /* The act of installing the downstream handler might fail */ int err = testing_channel_install_downstream_handler(&tester.testing_channel, SIZE_MAX); if (err) { goto installation_failed; } /* Sending the message might fail */ int msg_completion_error_code = 0; struct aws_io_message *msg = aws_channel_acquire_message_from_pool( tester.testing_channel.channel, AWS_IO_MESSAGE_APPLICATION_DATA, SIZE_MAX); ASSERT_NOT_NULL(msg); msg->on_completion = s_on_message_write_complete_save_error_code; msg->user_data = &msg_completion_error_code; err = testing_channel_push_write_message(&tester.testing_channel, msg); if (err) { aws_mem_release(msg->allocator, msg); goto push_message_failed; } /* The message might fail to reach the socket */ testing_channel_drain_queued_tasks(&tester.testing_channel); if (msg_completion_error_code) { goto message_completion_failed; } /* This is bad, we should have failed by now */ ASSERT_TRUE(false); message_completion_failed: push_message_failed: installation_failed: ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } H1_CLIENT_TEST_CASE(h1_client_switching_protocols_fails_pending_requests) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); /* queue a connection upgrade request */ struct aws_http_header headers[] = { { .name = aws_byte_cursor_from_c_str("Connection"), .value = aws_byte_cursor_from_c_str("Upgrade"), }, { .name = aws_byte_cursor_from_c_str("Upgrade"), .value = aws_byte_cursor_from_c_str("MyProtocol"), }, }; struct aws_http_message *upgrade_request = aws_http_message_new_request(allocator); ASSERT_NOT_NULL(upgrade_request); ASSERT_SUCCESS(aws_http_message_set_request_method(upgrade_request, aws_http_method_get)); ASSERT_SUCCESS(aws_http_message_set_request_path(upgrade_request, aws_byte_cursor_from_c_str("/"))); ASSERT_SUCCESS(aws_http_message_add_header_array(upgrade_request, headers, AWS_ARRAY_SIZE(headers))); struct client_stream_tester upgrade_stream; ASSERT_SUCCESS(s_stream_tester_init(&upgrade_stream, &tester, upgrade_request)); /* queue another request behind it */ struct aws_http_message *next_request = s_new_default_get_request(allocator); struct client_stream_tester next_stream; ASSERT_SUCCESS(s_stream_tester_init(&next_stream, &tester, next_request)); /* send upgrade response */ testing_channel_drain_queued_tasks(&tester.testing_channel); /* Ensure the request can be destroyed after request is sent */ aws_http_message_destroy(upgrade_request); aws_http_message_destroy(next_request); ASSERT_SUCCESS(testing_channel_push_read_str( &tester.testing_channel, "HTTP/1.1 101 Switching Protocols\r\n" "Upgrade: MyProtocol\r\n" "\r\n")); testing_channel_drain_queued_tasks(&tester.testing_channel); /* confirm that the next request was cancelled */ ASSERT_TRUE(next_stream.complete); ASSERT_TRUE(next_stream.on_complete_error_code != AWS_OP_SUCCESS); /* clean up */ client_stream_tester_clean_up(&upgrade_stream); client_stream_tester_clean_up(&next_stream); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } H1_CLIENT_TEST_CASE(h1_client_switching_protocols_fails_subsequent_requests) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); /* Successfully switch protocols */ struct protocol_switcher switcher = { .tester = &tester, .install_downstream_handler = true, }; ASSERT_SUCCESS(s_switch_protocols(&switcher)); /* Attempting to send a request after this should fail. */ struct aws_http_message *request = s_new_default_get_request(allocator); struct client_stream_tester stream_tester; int err = s_stream_tester_init(&stream_tester, &tester, request); if (err) { ASSERT_INT_EQUALS(AWS_ERROR_HTTP_SWITCHED_PROTOCOLS, aws_last_error()); } else { testing_channel_drain_queued_tasks(&tester.testing_channel); ASSERT_TRUE(stream_tester.complete); ASSERT_INT_EQUALS(AWS_ERROR_HTTP_SWITCHED_PROTOCOLS, stream_tester.on_complete_error_code); } /* clean up */ aws_http_message_destroy(request); client_stream_tester_clean_up(&stream_tester); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } H1_CLIENT_TEST_CASE(h1_client_switching_protocols_requires_downstream_handler) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); /* Successfully switch protocols, but don't install downstream handler. */ struct protocol_switcher switcher = { .tester = &tester, .install_downstream_handler = false, }; ASSERT_SUCCESS(s_switch_protocols(&switcher)); /* If new data arrives and no downstream handler is installed to deal with it, the connection should shut down. */ ASSERT_SUCCESS( testing_channel_push_read_str_ignore_errors(&tester.testing_channel, "herecomesnewprotocoldatachoochoo")); testing_channel_drain_queued_tasks(&tester.testing_channel); ASSERT_TRUE(testing_channel_is_shutdown_completed(&tester.testing_channel)); ASSERT_TRUE(testing_channel_get_shutdown_error_code(&tester.testing_channel) != AWS_ERROR_SUCCESS); /* clean up */ ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } H1_CLIENT_TEST_CASE(h1_client_connection_close_before_request_finishes) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); /* set up request whose body won't send immediately */ struct slow_body_sender body_sender; AWS_ZERO_STRUCT(body_sender); s_slow_body_sender_init(&body_sender); struct aws_input_stream *body_stream = &body_sender.base; struct aws_http_header headers[] = { { .name = aws_byte_cursor_from_c_str("Content-Length"), .value = aws_byte_cursor_from_c_str("16"), }, }; struct aws_http_message *request = aws_http_message_new_request(allocator); ASSERT_NOT_NULL(request); ASSERT_SUCCESS(aws_http_message_set_request_method(request, aws_byte_cursor_from_c_str("PUT"))); ASSERT_SUCCESS(aws_http_message_set_request_path(request, aws_byte_cursor_from_c_str("/plan.txt"))); ASSERT_SUCCESS(aws_http_message_add_header_array(request, headers, AWS_ARRAY_SIZE(headers))); aws_http_message_set_body_stream(request, body_stream); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, &tester, request)); /* send head of request */ testing_channel_run_currently_queued_tasks(&tester.testing_channel); /* Ensure the request can be destroyed after request is sent */ aws_http_message_destroy(request); aws_input_stream_release(body_stream); /* send close connection response */ ASSERT_SUCCESS(testing_channel_push_read_str( &tester.testing_channel, "HTTP/1.1 404 Not Found\r\n" "Date: Fri, 01 Mar 2019 17:18:55 GMT\r\n" "\r\n")); testing_channel_run_currently_queued_tasks(&tester.testing_channel); aws_channel_shutdown(tester.testing_channel.channel, AWS_ERROR_SUCCESS); /* Wait for channel to finish shutdown */ testing_channel_drain_queued_tasks(&tester.testing_channel); /* check result, should not receive any body */ const char *expected = "PUT /plan.txt HTTP/1.1\r\n" "Content-Length: 16\r\n" "\r\n"; ASSERT_SUCCESS(testing_channel_check_written_messages_str(&tester.testing_channel, allocator, expected)); ASSERT_TRUE(stream_tester.complete); ASSERT_INT_EQUALS(AWS_ERROR_SUCCESS, stream_tester.on_complete_error_code); /* clean up */ client_stream_tester_clean_up(&stream_tester); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } H1_CLIENT_TEST_CASE(h1_client_stream_cancel) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); /* set up request whose body won't send immediately */ struct slow_body_sender body_sender; AWS_ZERO_STRUCT(body_sender); s_slow_body_sender_init(&body_sender); struct aws_input_stream *body_stream = &body_sender.base; struct aws_http_header headers[] = { { .name = aws_byte_cursor_from_c_str("Content-Length"), .value = aws_byte_cursor_from_c_str("16"), }, }; struct aws_http_message *request = aws_http_message_new_request(allocator); ASSERT_NOT_NULL(request); ASSERT_SUCCESS(aws_http_message_set_request_method(request, aws_byte_cursor_from_c_str("PUT"))); ASSERT_SUCCESS(aws_http_message_set_request_path(request, aws_byte_cursor_from_c_str("/plan.txt"))); ASSERT_SUCCESS(aws_http_message_add_header_array(request, headers, AWS_ARRAY_SIZE(headers))); aws_http_message_set_body_stream(request, body_stream); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, &tester, request)); /* send head of request */ testing_channel_run_currently_queued_tasks(&tester.testing_channel); /* Ensure the request can be destroyed after request is sent */ aws_http_message_destroy(request); aws_input_stream_release(body_stream); /* Something absurd */ aws_http_stream_cancel(stream_tester.stream, AWS_ERROR_COND_VARIABLE_ERROR_UNKNOWN); /* The second call will take not action */ aws_http_stream_cancel(stream_tester.stream, AWS_ERROR_SUCCESS); /* Wait for channel to finish shutdown */ testing_channel_drain_queued_tasks(&tester.testing_channel); /* check result, should not receive any body */ const char *expected = "PUT /plan.txt HTTP/1.1\r\n" "Content-Length: 16\r\n" "\r\n"; ASSERT_SUCCESS(testing_channel_check_written_messages_str(&tester.testing_channel, allocator, expected)); ASSERT_TRUE(stream_tester.complete); ASSERT_INT_EQUALS(AWS_ERROR_COND_VARIABLE_ERROR_UNKNOWN, stream_tester.on_complete_error_code); /* clean up */ client_stream_tester_clean_up(&stream_tester); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } /* When response has `connection: close` any further request body should not be sent. */ H1_CLIENT_TEST_CASE(h1_client_response_close_connection_before_request_finishes) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); /* Okay to set a timeout */ size_t connection_response_first_byte_timeout_ms = 200; tester.connection->client_data->response_first_byte_timeout_ms = connection_response_first_byte_timeout_ms; /* set up request whose body won't send immediately */ struct slow_body_sender body_sender; AWS_ZERO_STRUCT(body_sender); s_slow_body_sender_init(&body_sender); struct aws_input_stream *body_stream = &body_sender.base; struct aws_http_header headers[] = { { .name = aws_byte_cursor_from_c_str("Content-Length"), .value = aws_byte_cursor_from_c_str("16"), }, }; struct aws_http_message *request = aws_http_message_new_request(allocator); ASSERT_NOT_NULL(request); ASSERT_SUCCESS(aws_http_message_set_request_method(request, aws_byte_cursor_from_c_str("PUT"))); ASSERT_SUCCESS(aws_http_message_set_request_path(request, aws_byte_cursor_from_c_str("/plan.txt"))); ASSERT_SUCCESS(aws_http_message_add_header_array(request, headers, AWS_ARRAY_SIZE(headers))); aws_http_message_set_body_stream(request, body_stream); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, &tester, request)); /* send head of request */ testing_channel_run_currently_queued_tasks(&tester.testing_channel); /* Ensure the request can be destroyed after request is sent */ aws_http_message_release(request); aws_input_stream_release(body_stream); /* send close connection response */ ASSERT_SUCCESS(testing_channel_push_read_str( &tester.testing_channel, "HTTP/1.1 404 Not Found\r\n" "Date: Fri, 01 Mar 2019 17:18:55 GMT\r\n" "Connection: close\r\n" "\r\n")); testing_channel_drain_queued_tasks(&tester.testing_channel); /* check result, should not receive any body */ const char *expected = "PUT /plan.txt HTTP/1.1\r\n" "Content-Length: 16\r\n" "\r\n"; ASSERT_SUCCESS(testing_channel_check_written_messages_str(&tester.testing_channel, allocator, expected)); /* Check if the testing channel has shut down. */ ASSERT_TRUE(testing_channel_is_shutdown_completed(&tester.testing_channel)); ASSERT_TRUE(stream_tester.complete); ASSERT_INT_EQUALS(AWS_ERROR_SUCCESS, stream_tester.on_complete_error_code); /* clean up */ client_stream_tester_clean_up(&stream_tester); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } H1_CLIENT_TEST_CASE(h1_client_response_first_byte_timeout_connection) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); /* with test channel, we don't use bootstrap to propagate the settings. Hack around it by set the setting directly */ size_t connection_response_first_byte_timeout_ms = 200; tester.connection->client_data->response_first_byte_timeout_ms = connection_response_first_byte_timeout_ms; /* send request */ struct aws_http_header headers[] = { { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Host"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("example.com"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Accept"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("*/*"), }, }; struct aws_http_message *request = s_new_default_get_request(allocator); ASSERT_NOT_NULL(request); ASSERT_SUCCESS(aws_http_message_add_header_array(request, headers, AWS_ARRAY_SIZE(headers))); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, &tester, request)); testing_channel_drain_queued_tasks(&tester.testing_channel); /* Sleep to trigger the timeout */ aws_thread_current_sleep(aws_timestamp_convert( connection_response_first_byte_timeout_ms + 1, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL)); testing_channel_drain_queued_tasks(&tester.testing_channel); /* Check if the testing channel has shut down. */ ASSERT_TRUE(testing_channel_is_shutdown_completed(&tester.testing_channel)); ASSERT_TRUE(testing_channel_is_shutdown_completed(&tester.testing_channel)); ASSERT_TRUE(stream_tester.complete); ASSERT_INT_EQUALS(AWS_ERROR_HTTP_RESPONSE_FIRST_BYTE_TIMEOUT, stream_tester.on_complete_error_code); /* clean up */ aws_http_message_release(request); client_stream_tester_clean_up(&stream_tester); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } H1_CLIENT_TEST_CASE(h1_client_response_first_byte_timeout_request_override) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); /* with test channel, we don't use bootstrap to propagate the settings. Hack around it by set the setting directly */ size_t connection_response_first_byte_timeout_ms = 1000; tester.connection->client_data->response_first_byte_timeout_ms = connection_response_first_byte_timeout_ms; /* send request */ struct aws_http_header headers[] = { { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Host"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("example.com"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Accept"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("*/*"), }, }; struct aws_http_message *request = s_new_default_get_request(allocator); ASSERT_NOT_NULL(request); ASSERT_SUCCESS(aws_http_message_add_header_array(request, headers, AWS_ARRAY_SIZE(headers))); size_t response_first_byte_timeout_ms = 100; int completion_error_code = 0; struct aws_http_make_request_options opt = { .self_size = sizeof(opt), .request = request, .response_first_byte_timeout_ms = response_first_byte_timeout_ms, .on_complete = s_on_complete, .user_data = &completion_error_code, }; struct aws_http_stream *stream = aws_http_connection_make_request(tester.connection, &opt); ASSERT_NOT_NULL(stream); ASSERT_SUCCESS(aws_http_stream_activate(stream)); testing_channel_drain_queued_tasks(&tester.testing_channel); aws_thread_current_sleep( aws_timestamp_convert(response_first_byte_timeout_ms + 1, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL)); testing_channel_drain_queued_tasks(&tester.testing_channel); /* Check if the testing channel has shut down. */ ASSERT_TRUE(testing_channel_is_shutdown_completed(&tester.testing_channel)); ASSERT_INT_EQUALS(AWS_ERROR_HTTP_RESPONSE_FIRST_BYTE_TIMEOUT, completion_error_code); /* clean up */ aws_http_message_release(request); aws_http_stream_release(stream); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-http/tests/test_h1_decoder.c000066400000000000000000001051441456575232400245560ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include static const struct aws_byte_cursor s_typical_request = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("GET / HTTP/1.1\r\n" "Host: amazon.com\r\n" "Accept-Language: fr\r\n" "\r\n"); static const struct aws_byte_cursor s_typical_response = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("HTTP/1.1 200 OK\r\n" "Server: some-server\r\n" "Content-Length: 11\r\n" "\r\n" "Hello noob."); static const bool s_request = true; static const bool s_response = false; static struct aws_logger s_logger; static int s_on_header_stub(const struct aws_h1_decoded_header *header, void *user_data) { (void)header; (void)user_data; return AWS_OP_SUCCESS; } static int s_on_body_stub(const struct aws_byte_cursor *data, bool finished, void *user_data) { (void)data; (void)finished; (void)user_data; return AWS_OP_SUCCESS; } static int s_on_response(int code, void *user_data) { int *ptr = (int *)user_data; if (ptr) { *ptr = code; } return AWS_OP_SUCCESS; } static int s_on_response_stub(int code, void *user_data) { (void)code; (void)user_data; return AWS_OP_SUCCESS; } struct request_data { enum aws_http_method method_enum; struct aws_byte_cursor method_str; struct aws_byte_cursor uri; uint8_t buffer[1024]; }; static int s_on_request( enum aws_http_method method_enum, const struct aws_byte_cursor *method_str, const struct aws_byte_cursor *uri, void *user_data) { struct request_data *request_data = (struct request_data *)user_data; AWS_ASSERT(sizeof(request_data->buffer) >= uri->len + method_str->len); if (request_data) { request_data->method_enum = method_enum; memcpy(request_data->buffer, method_str->ptr, method_str->len); request_data->method_str = aws_byte_cursor_from_array(request_data->buffer, method_str->len); uint8_t *uri_dst = request_data->buffer + method_str->len; memcpy(uri_dst, uri->ptr, uri->len); request_data->uri = aws_byte_cursor_from_array(uri_dst, uri->len); } return AWS_OP_SUCCESS; } static int s_on_request_stub( enum aws_http_method method_enum, const struct aws_byte_cursor *method_str, const struct aws_byte_cursor *uri, void *user_data) { (void)method_enum; (void)method_str; (void)uri; (void)user_data; return AWS_OP_SUCCESS; } static int s_on_done(void *user_data) { (void)user_data; return AWS_OP_SUCCESS; } static void s_test_init(struct aws_allocator *allocator) { aws_http_library_init(allocator); struct aws_logger_standard_options logger_options = { .level = AWS_LOG_LEVEL_TRACE, .file = stderr, }; aws_logger_init_standard(&s_logger, allocator, &logger_options); aws_logger_set(&s_logger); } static void s_test_clean_up(void) { aws_http_library_clean_up(); aws_logger_clean_up(&s_logger); } static void s_common_decoder_setup( struct aws_allocator *allocator, size_t scratch_space_size, struct aws_h1_decoder_params *params, bool type, void *user_data) { params->alloc = allocator; params->scratch_space_initial_size = scratch_space_size; params->is_decoding_requests = type; params->user_data = user_data; params->vtable.on_header = s_on_header_stub; params->vtable.on_body = s_on_body_stub; params->vtable.on_request = s_on_request_stub; params->vtable.on_response = s_on_response_stub; params->vtable.on_done = s_on_done; } AWS_TEST_CASE(h1_test_get_request, s_h1_test_get_request); static int s_h1_test_get_request(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_test_init(allocator); struct request_data request_data; struct aws_byte_cursor msg = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("HEAD / HTTP/1.1\r\n\r\n"); struct aws_h1_decoder_params params; s_common_decoder_setup(allocator, 1024, ¶ms, s_request, &request_data); params.vtable.on_request = s_on_request; struct aws_h1_decoder *decoder = aws_h1_decoder_new(¶ms); ASSERT_SUCCESS(aws_h1_decode(decoder, &msg)); ASSERT_INT_EQUALS(AWS_HTTP_METHOD_HEAD, request_data.method_enum); ASSERT_TRUE(aws_byte_cursor_eq(&request_data.method_str, &aws_http_method_head)); ASSERT_TRUE(aws_byte_cursor_eq_c_str(&request_data.uri, "/")); aws_h1_decoder_destroy(decoder); s_test_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(h1_test_request_bad_version, s_h1_test_request_bad_version); static int s_h1_test_request_bad_version(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_test_init(allocator); struct aws_byte_cursor msg = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("GET / HTTP/1.0\r\n\r\n"); /* Note version is 1.0 */ struct aws_h1_decoder_params params; s_common_decoder_setup(allocator, 1024, ¶ms, s_request, NULL); struct aws_h1_decoder *decoder = aws_h1_decoder_new(¶ms); ASSERT_FAILS(aws_h1_decode(decoder, &msg)); aws_h1_decoder_destroy(decoder); s_test_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(h1_test_response_1_0, s_h1_test_response_1_0); static int s_h1_test_response_1_0(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_test_init(allocator); int code = 0; struct aws_byte_cursor msg = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("HTTP/1.0 200 OK\r\n\r\n"); /* Note version is "1.0" */ struct aws_h1_decoder_params params; s_common_decoder_setup(allocator, 1024, ¶ms, s_response, &code); params.vtable.on_response = s_on_response; struct aws_h1_decoder *decoder = aws_h1_decoder_new(¶ms); ASSERT_SUCCESS(aws_h1_decode(decoder, &msg)); ASSERT_INT_EQUALS(200, code); aws_h1_decoder_destroy(decoder); s_test_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(h1_test_response_unsupported_version, s_h1_test_response_unsupported_version); static int s_h1_test_response_unsupported_version(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_test_init(allocator); struct aws_byte_cursor msg = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("HTTP/1.2 200 OK\r\n\r\n"); /* Note version is "1.0" */ struct aws_h1_decoder_params params; s_common_decoder_setup(allocator, 1024, ¶ms, s_response, NULL); struct aws_h1_decoder *decoder = aws_h1_decoder_new(¶ms); ASSERT_FAILS(aws_h1_decode(decoder, &msg)); aws_h1_decoder_destroy(decoder); s_test_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(h1_test_get_status_code, s_h1_test_get_status_code); static int s_h1_test_get_status_code(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_test_init(allocator); int code; struct aws_byte_cursor msg = s_typical_response; struct aws_h1_decoder_params params; s_common_decoder_setup(allocator, 1024, ¶ms, s_response, &code); params.vtable.on_response = s_on_response; struct aws_h1_decoder *decoder = aws_h1_decoder_new(¶ms); ASSERT_SUCCESS(aws_h1_decode(decoder, &msg)); ASSERT_INT_EQUALS(200, code); aws_h1_decoder_destroy(decoder); s_test_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(h1_test_overflow_scratch_space, s_h1_test_overflow_scratch_space); static int s_h1_test_overflow_scratch_space(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_test_init(allocator); struct aws_byte_cursor msg = s_typical_response; struct aws_h1_decoder_params params; s_common_decoder_setup(allocator, 4, ¶ms, s_response, NULL); struct aws_h1_decoder *decoder = aws_h1_decoder_new(¶ms); ASSERT_SUCCESS(aws_h1_decode(decoder, &msg)); aws_h1_decoder_destroy(decoder); s_test_clean_up(); return AWS_OP_SUCCESS; } struct s_header_params { int index; int max_index; int first_error; const char **header_names; }; static int s_got_header(const struct aws_h1_decoded_header *header, void *user_data) { struct s_header_params *params = (struct s_header_params *)user_data; if (params->index < params->max_index) { if (params->first_error == AWS_OP_SUCCESS) { if (!aws_byte_cursor_eq_c_str(&header->name_data, params->header_names[params->index])) { params->first_error = AWS_OP_ERR; } } params->index++; } else { return aws_raise_error(AWS_ERROR_UNKNOWN); } return AWS_OP_SUCCESS; } AWS_TEST_CASE(h1_test_receive_request_headers, s_h1_test_receive_request_headers); static int s_h1_test_receive_request_headers(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_test_init(allocator); struct aws_byte_cursor msg = s_typical_request; struct aws_h1_decoder_params params; struct s_header_params header_params; s_common_decoder_setup(allocator, 1024, ¶ms, s_request, &header_params); const char *header_names[] = {"Host", "Accept-Language"}; header_params.index = 0; header_params.max_index = AWS_ARRAY_SIZE(header_names); header_params.first_error = AWS_OP_SUCCESS; header_params.header_names = header_names; params.vtable.on_header = s_got_header; struct aws_h1_decoder *decoder = aws_h1_decoder_new(¶ms); ASSERT_SUCCESS(aws_h1_decode(decoder, &msg)); ASSERT_SUCCESS(header_params.first_error); aws_h1_decoder_destroy(decoder); s_test_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(h1_test_receive_response_headers, s_h1_test_receive_response_headers); static int s_h1_test_receive_response_headers(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_test_init(allocator); struct aws_byte_cursor msg = s_typical_response; struct aws_h1_decoder_params params; struct s_header_params header_params; s_common_decoder_setup(allocator, 1024, ¶ms, s_response, &header_params); const char *header_names[] = {"Server", "Content-Length"}; header_params.index = 0; header_params.max_index = AWS_ARRAY_SIZE(header_names); header_params.first_error = AWS_OP_SUCCESS; header_params.header_names = header_names; params.vtable.on_header = s_got_header; struct aws_h1_decoder *decoder = aws_h1_decoder_new(¶ms); ASSERT_SUCCESS(aws_h1_decode(decoder, &msg)); ASSERT_SUCCESS(header_params.first_error); aws_h1_decoder_destroy(decoder); s_test_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(h1_test_get_transfer_encoding_flags, s_h1_test_get_transfer_encoding_flags); static int s_h1_test_get_transfer_encoding_flags(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_test_init(allocator); struct aws_byte_cursor msg = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("HTTP/1.1 200 OK\r\n" "Server: some-server\r\n" "Transfer-Encoding: compress\r\n" "Transfer-Encoding: gzip, ,deflate\r\n" "Transfer-Encoding: chunked\r\n" "Transfer-Encoding:\r\n" "\r\n" "Hello noob."); struct aws_h1_decoder_params params; s_common_decoder_setup(allocator, 1024, ¶ms, s_response, NULL); struct aws_h1_decoder *decoder = aws_h1_decoder_new(¶ms); /* Not a valid HTTP1.1 message, but not the job of decoder to return error here. */ /* Instead, the user should know their buffer has been processed without returning any body data, and * report the error in user-space. */ ASSERT_SUCCESS(aws_h1_decode(decoder, &msg)); int flags = aws_h1_decoder_get_encoding_flags(decoder); ASSERT_INT_EQUALS( (AWS_HTTP_TRANSFER_ENCODING_CHUNKED | AWS_HTTP_TRANSFER_ENCODING_GZIP | AWS_HTTP_TRANSFER_ENCODING_DEFLATE | AWS_HTTP_TRANSFER_ENCODING_DEPRECATED_COMPRESS), flags); aws_h1_decoder_destroy(decoder); s_test_clean_up(); return AWS_OP_SUCCESS; } struct s_body_params { struct aws_array_list body_data; }; static int s_on_body(const struct aws_byte_cursor *data, bool finished, void *user_data) { (void)finished; struct s_body_params *params = (struct s_body_params *)user_data; for (int i = 0; i < (int)data->len; ++i) { aws_array_list_push_back(¶ms->body_data, data->ptr + i); } return AWS_OP_SUCCESS; } AWS_TEST_CASE(h1_test_body_unchunked, s_h1_test_body_unchunked); static int s_h1_test_body_unchunked(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_test_init(allocator); struct aws_byte_cursor msg = s_typical_response; struct aws_h1_decoder_params params; struct s_body_params body_params; s_common_decoder_setup(allocator, 1024, ¶ms, s_response, NULL); aws_array_list_init_dynamic(&body_params.body_data, allocator, 256, sizeof(uint8_t)); params.alloc = allocator; params.scratch_space_initial_size = 1024; params.vtable.on_header = s_on_header_stub; params.vtable.on_body = s_on_body; params.is_decoding_requests = false; params.user_data = &body_params; struct aws_h1_decoder *decoder = aws_h1_decoder_new(¶ms); ASSERT_SUCCESS(aws_h1_decode(decoder, &msg)); ASSERT_SUCCESS(memcmp(body_params.body_data.data, "Hello noob.", body_params.body_data.length)); aws_h1_decoder_destroy(decoder); aws_array_list_clean_up(&body_params.body_data); s_test_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(h1_test_body_chunked, s_h1_test_body_chunked); static int s_h1_test_body_chunked(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_test_init(allocator); struct aws_byte_cursor msg = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("GET / HTTP/1.1\r\n" "Host: amazon.com\r\n" "Transfer-Encoding: chunked\r\n" "\r\n" "D\r\n" "Hello, there \r\n" "1c\r\n" "should be a carriage return \r\n" "9\r\n" "in\r\nhere.\r\n" "0\r\n" "\r\n"); struct aws_h1_decoder_params params; struct s_body_params body_params; s_common_decoder_setup(allocator, 1024, ¶ms, s_request, &body_params); aws_array_list_init_dynamic(&body_params.body_data, allocator, 256, sizeof(uint8_t)); params.vtable.on_body = s_on_body; struct aws_h1_decoder *decoder = aws_h1_decoder_new(¶ms); ASSERT_SUCCESS(aws_h1_decode(decoder, &msg)); ASSERT_SUCCESS(memcmp( body_params.body_data.data, "Hello, there should be a carriage return in\r\nhere.", body_params.body_data.length)); aws_h1_decoder_destroy(decoder); aws_array_list_clean_up(&body_params.body_data); s_test_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(h1_decode_trailers, s_h1_decode_trailers); static int s_h1_decode_trailers(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_test_init(allocator); struct aws_byte_cursor msg = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("GET / HTTP/1.1\r\n" "Host: amazon.com\r\n" "Accept-Language: fr\r\n" "Transfer-Encoding: chunked \r\n" "Trailer: Expires\r\n" "\r\n" "7\r\n" "Mozilla\r\n" "9\r\n" "Developer\r\n" "7\r\n" "Network\r\n" "0\r\n" "Expires: Wed, 21 Oct 2015 07:28:00 GMT\r\n" "\r\n"); struct aws_h1_decoder_params params; s_common_decoder_setup(allocator, 1024, ¶ms, s_request, NULL); struct aws_h1_decoder *decoder = aws_h1_decoder_new(¶ms); ASSERT_SUCCESS(aws_h1_decode(decoder, &msg)); aws_h1_decoder_destroy(decoder); s_test_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(h1_decode_one_byte_at_a_time, s_h1_decode_one_byte_at_a_time); static int s_h1_decode_one_byte_at_a_time(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_test_init(allocator); struct aws_byte_cursor msg = s_typical_request; struct aws_h1_decoder_params params; s_common_decoder_setup(allocator, 1024, ¶ms, s_request, NULL); struct aws_h1_decoder *decoder = aws_h1_decoder_new(¶ms); for (size_t i = 0; i < msg.len; ++i) { struct aws_byte_cursor chunk = aws_byte_cursor_advance(&msg, 1); ASSERT_SUCCESS(aws_h1_decode(decoder, &chunk)); } aws_h1_decoder_destroy(decoder); s_test_clean_up(); return AWS_OP_SUCCESS; } static int s_rand(int lo, int hi) { return rand() % (hi + 1 - lo) + lo; } AWS_TEST_CASE(h1_decode_messages_at_random_intervals, s_h1_decode_messages_at_random_intervals); static int s_h1_decode_messages_at_random_intervals(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_test_init(allocator); const struct aws_byte_cursor requests[] = { AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("GET / HTTP/1.1\r\n" "Host: amazon.com\r\n" "Accept-Language: fr\r\n" "Content-Length: 6\r\n" "\r\n" "123456"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("CONNECT server.example.com:80 HTTP/1.1\r\n" "Host: server.example.com:80\r\n" "Proxy-Authorization: basic aGVsbG86d29ybGQ=\r\n"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("DELETE /file.html HTTP/1.1\r\n"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("HEAD /index.html HTTP/1.1\r\n"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("OPTIONS /index.html HTTP/1.1\r\n"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("OPTIONS * HTTP/1.1\r\n"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("PATCH /file.txt HTTP/1.1\r\n" "Host: www.example.com\r\n" "Content-Type: application/example\r\n" "If-Match: \"e0023aa4e\"\r\n" "Content-Length: 10\r\n" "\r\n" "0123456789"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("POST / HTTP/1.1\r\n" "Host: foo.com\r\n" "Content-Type: application/x-www-form-urlencoded\r\n" "Content-Length: 13\r\n" "\r\n" "say=Hi&to=Mom"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("PUT /new.html HTTP/1.1\r\n" "Host: example.com\r\n" "Content-type: text/html\r\n" "Content-length: 16\r\n" "\r\n" "

New File

"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("TRACE /index.html HTTP/1.1\r\n"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("GET /home.html HTTP/1.1\r\n" "Host: example.com\r\n" "a-fake-header: oh what is this odd whitespace \r\n" "Content-Length: 1\r\n" "\r\n" "X"), }; /* Just seed something for determinism. */ srand(1); for (size_t iter = 0; iter < AWS_ARRAY_SIZE(requests); ++iter) { struct aws_byte_cursor request = requests[iter]; struct aws_h1_decoder_params params; s_common_decoder_setup(allocator, 1024, ¶ms, s_request, NULL); struct aws_h1_decoder *decoder = aws_h1_decoder_new(¶ms); /* Decode message at randomized input buffer sizes from 0 to 10 bytes. */ while (request.len) { int lo = 1; int hi = 10; if (hi > (int)request.len) { hi = (int)request.len; } int interval = s_rand(lo, hi); struct aws_byte_cursor chunk = aws_byte_cursor_advance(&request, interval); ASSERT_SUCCESS(aws_h1_decode(decoder, &chunk)); } aws_h1_decoder_destroy(decoder); } s_test_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(h1_decode_bad_requests_and_assert_failure, s_h1_decode_bad_requests_and_assert_failure); static int s_h1_decode_bad_requests_and_assert_failure(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_test_init(allocator); const struct aws_byte_cursor requests[] = { /* Incorrect chunk size. */ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("GET / HTTP/1.1\r\n" "Transfer-Encoding: chunked\r\n" "\r\n" "7\r\n" "Mozilla\r\n" "2\r\n" /* Incorrect chunk size here. */ "Developer\r\n" "7\r\n" "Network\r\n" "0\r\n" "\r\n"), /* Chunked should be final encoding */ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("GET / HTTP/1.1\r\n" "Transfer-Encoding: chunked, gzip\r\n" "\r\n"), /* Chunked should be final encoding, p2 */ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("GET / HTTP/1.1\r\n" "Transfer-Encoding: chunked\r\n" "Transfer-Encoding: gzip\r\n" "\r\n"), /* Invalid hex-int as chunk size. */ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("GET / HTTP/1.1\r\n" "Transfer-Encoding: chunked\r\n" "\r\n" "7\r\n" "Mozilla\r\n" "S\r\n" /* Incorrect chunk size here. */ "Developer\r\n" "7\r\n" "Network\r\n" "0\r\n" "\r\n"), /* Chunk size should not have spaces. */ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("GET / HTTP/1.1\r\n" "Transfer-Encoding: chunked\r\n" "\r\n" " 7 \r\n"), /* Chunk size should not start with "0x". */ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("GET / HTTP/1.1\r\n" "Transfer-Encoding: chunked\r\n" "\r\n" "0x7\r\n"), /* Invalid chunk size terminator. */ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("GET / HTTP/1.1\r\n" "Transfer-Encoding: chunked\r\n" "\r\n" "7\r0asa90\r\n" "0\r\n" "\r\n"), /* Invalid transfer coding. */ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("GET / HTTP/1.1\r\n" "Transfer-Encoding: shrinkydinky, chunked\r\n"), /* My chunk size is too big */ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("GET / HTTP/1.1\r\n" "Transfer-Encoding: chunked\r\n" "\r\n" "FFFFFFFFFFFFFFFFF\r\n"), /* My content-Length is too big */ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("POST / HTTP/1.1\r\n" "Content-Length: 99999999999999999999\r\n"), /* My content-Length is empty */ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("POST / HTTP/1.1\r\n" "Content-Length:\r\n"), /* Has both content-Length and transfer-encoding */ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("POST / HTTP/1.1\r\n" "Content-Length: 999\r\n" "Transfer-Encoding: chunked\r\n"), /* Header is missing colon */ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("GET / HTTP/1.1\r\n" "Header-Missing-Colon yes it is\r\n" "\r\n"), /* Header with empty name */ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("GET / HTTP/1.1\r\n" ": header with empty name\r\n" "\r\n"), /* Header name with illegal characters */ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("POST / HTTP/1.1\r\n" "H@st: bad-char-in-name.com\r\n"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("POST / HTTP/1.1\r\n" "Host : space-after-name.com\r\n"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("POST / HTTP/1.1\r\n" " Host: space-before-name.com\r\n"), /* Header value with illegal characters */ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("POST / HTTP/1.1\r\n" "Host: carriage-return\r.com\r\n"), /* Forbid line folding */ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("POST / HTTP/1.1\r\n" "Host: \r\n" " obsolete-line-folding.com\r\n"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("POST / HTTP/1.1\r\n" "Host: \r\n" "\tobsolete-line-folding.com\r\n"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("POST / HTTP/1.1\r\n" "Host: amazon.com\r\n" "X-Line-Folding-Forbidden: one line of value\r\n" " next line of value\r\n"), /* Method is blank */ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(" / HTTP/1.1\r\n"), /* URI is blank */ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("GET HTTP/1.1\r\n"), /* HTTP version is blank */ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("GET / \r\n"), /* Missing spaces */ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("GET /HTTP/1.1\r\n"), /* Missing spaces */ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("GET/HTTP/1.1\r\n"), /* Extra space at end */ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("GET / HTTP/1.1 \r\n"), /* Illegal characters in method */ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("G@T / HTTP/1.1\r\n"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("G\rT / HTTP/1.1\r\n"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("G\nT / HTTP/1.1\r\n"), /* Illegal characters in path */ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("G@T /\rindex.html HTTP/1.1\r\n"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("G@T /\tindex.html HTTP/1.1\r\n"), /* Go ahead and add more cases here. */ }; for (size_t iter = 0; iter < AWS_ARRAY_SIZE(requests); ++iter) { struct aws_byte_cursor request = requests[iter]; struct aws_h1_decoder_params params; s_common_decoder_setup(allocator, 1024, ¶ms, s_request, NULL); struct aws_h1_decoder *decoder = aws_h1_decoder_new(¶ms); ASSERT_FAILS( aws_h1_decode(decoder, &request), "Entry [%zu] should have failed, but it passed:\n------\n" PRInSTR "\n------\n", iter, AWS_BYTE_CURSOR_PRI(requests[iter])); aws_h1_decoder_destroy(decoder); } s_test_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(h1_decode_bad_responses_and_assert_failure, s_h1_decode_bad_responses_and_assert_failure); static int s_h1_decode_bad_responses_and_assert_failure(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_test_init(allocator); const struct aws_byte_cursor responses[] = { /* Response code not 3 digits */ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("HTTP/1.1 1000 PHRASE\r\n"), /* Response code not 3 digits */ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("HTTP/1.1 99 PHRASE\r\n"), /* Response code should not be in hex */ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("HTTP/1.1 0x1 PHRASE\r\n"), /* Response code should not be in hex */ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("HTTP/1.1 FFF PHRASE\r\n"), /* Phrase should not contain illegal characters */ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("HTTP/1.1 200 BAD\nPHRASE\r\n"), /* Go ahead and add more cases here. */ }; for (size_t iter = 0; iter < AWS_ARRAY_SIZE(responses); ++iter) { struct aws_byte_cursor response = responses[iter]; struct aws_h1_decoder_params params; s_common_decoder_setup(allocator, 1024, ¶ms, s_response, NULL); struct aws_h1_decoder *decoder = aws_h1_decoder_new(¶ms); ASSERT_FAILS(aws_h1_decode(decoder, &response)); aws_h1_decoder_destroy(decoder); } s_test_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE( h1_test_extraneous_buffer_data_ensure_not_processed, s_h1_test_extraneous_buffer_data_ensure_not_processed); static int s_h1_test_extraneous_buffer_data_ensure_not_processed(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_test_init(allocator); struct aws_byte_cursor msg = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("GET / HTTP/1.1\r\n" "Wow look here. That's a lot of extra random stuff!"); struct aws_h1_decoder_params params; s_common_decoder_setup(allocator, 1024, ¶ms, s_request, NULL); struct aws_h1_decoder *decoder = aws_h1_decoder_new(¶ms); ASSERT_SUCCESS(aws_h1_decode(decoder, &msg)); ASSERT_INT_EQUALS(0, msg.len); aws_h1_decoder_destroy(decoder); s_test_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(h1_test_ignore_chunk_extensions, s_h1_test_ignore_chunk_extensions); static int s_h1_test_ignore_chunk_extensions(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_test_init(allocator); struct aws_byte_cursor msg = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("GET / HTTP/1.1\r\n" "Host: amazon.com\r\n" "Accept-Language: fr\r\n" "Transfer-Encoding: chunked \r\n" "Trailer: Expires\r\n" "\r\n" "7;some-dumb-chunk-extension-name=some-dumb-chunk-extension-value\r\n" "Mozilla\r\n" "9\r\n" "Developer\r\n" "7\r\n" "Network\r\n" "0\r\n" "Expires: Wed, 21 Oct 2015 07:28:00 GMT\r\n" "\r\n"); struct aws_h1_decoder_params params; s_common_decoder_setup(allocator, 1024, ¶ms, s_request, NULL); struct aws_h1_decoder *decoder = aws_h1_decoder_new(¶ms); ASSERT_SUCCESS(aws_h1_decode(decoder, &msg)); aws_h1_decoder_destroy(decoder); s_test_clean_up(); return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-http/tests/test_h1_encoder.c000066400000000000000000000532351456575232400245730ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #define H1_ENCODER_TEST_CASE(NAME) \ AWS_TEST_CASE(NAME, s_test_##NAME); \ static int s_test_##NAME(struct aws_allocator *allocator, void *ctx) static const struct aws_http_header s_typical_request_headers[] = { { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Host"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("amazon.com"), }, }; static struct aws_logger s_logger; static void s_test_init(struct aws_allocator *allocator) { aws_http_library_init(allocator); struct aws_logger_standard_options logger_options = { .level = AWS_LOG_LEVEL_TRACE, .file = stderr, }; aws_logger_init_standard(&s_logger, allocator, &logger_options); aws_logger_set(&s_logger); } static void s_test_clean_up(void) { aws_http_library_clean_up(); aws_logger_clean_up(&s_logger); } H1_ENCODER_TEST_CASE(h1_encoder_content_length_put_request_headers) { (void)ctx; s_test_init(allocator); struct aws_h1_encoder encoder; aws_h1_encoder_init(&encoder, allocator); /* request to send - we won't actually send it, we want to validate headers are set correctly. */ static const struct aws_byte_cursor body = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("write more tests"); struct aws_input_stream *body_stream = aws_input_stream_new_from_cursor(allocator, &body); struct aws_http_header headers[] = { { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Length"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("16"), }, }; struct aws_http_message *request = aws_http_message_new_request(allocator); ASSERT_SUCCESS(aws_http_message_set_request_method(request, aws_byte_cursor_from_c_str("PUT"))); ASSERT_SUCCESS(aws_http_message_set_request_path(request, aws_byte_cursor_from_c_str("/"))); aws_http_message_add_header_array(request, headers, AWS_ARRAY_SIZE(headers)); aws_http_message_set_body_stream(request, body_stream); struct aws_linked_list chunk_list; aws_linked_list_init(&chunk_list); struct aws_h1_encoder_message encoder_message; aws_h1_encoder_message_init_from_request(&encoder_message, allocator, request, &chunk_list); ASSERT_FALSE(encoder_message.has_chunked_encoding_header); ASSERT_FALSE(encoder_message.has_connection_close_header); ASSERT_UINT_EQUALS(body.len, encoder_message.content_length); aws_input_stream_release(body_stream); aws_http_message_destroy(request); aws_h1_encoder_message_clean_up(&encoder_message); aws_h1_encoder_clean_up(&encoder); s_test_clean_up(); return AWS_OP_SUCCESS; } H1_ENCODER_TEST_CASE(h1_encoder_transfer_encoding_chunked_put_request_headers) { (void)ctx; s_test_init(allocator); struct aws_h1_encoder encoder; aws_h1_encoder_init(&encoder, allocator); /* request to send - we won't actually send it, we want to validate headers are set correctly. */ struct aws_http_header headers[] = { { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Transfer-Encoding"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("chunked"), }, }; struct aws_http_message *request = aws_http_message_new_request(allocator); ASSERT_SUCCESS(aws_http_message_set_request_method(request, aws_byte_cursor_from_c_str("PUT"))); ASSERT_SUCCESS(aws_http_message_set_request_path(request, aws_byte_cursor_from_c_str("/"))); aws_http_message_add_header_array(request, headers, AWS_ARRAY_SIZE(headers)); struct aws_linked_list chunk_list; aws_linked_list_init(&chunk_list); struct aws_h1_encoder_message encoder_message; aws_h1_encoder_message_init_from_request(&encoder_message, allocator, request, &chunk_list); ASSERT_TRUE(encoder_message.has_chunked_encoding_header); ASSERT_FALSE(encoder_message.has_connection_close_header); ASSERT_UINT_EQUALS(0, encoder_message.content_length); aws_http_message_destroy(request); aws_h1_encoder_message_clean_up(&encoder_message); aws_h1_encoder_clean_up(&encoder); s_test_clean_up(); return AWS_OP_SUCCESS; } H1_ENCODER_TEST_CASE(h1_encoder_transfer_encoding_chunked_put_request_multiple_te_headers) { (void)ctx; s_test_init(allocator); struct aws_h1_encoder encoder; aws_h1_encoder_init(&encoder, allocator); /* request to send - we won't actually send it, we want to validate headers are set correctly. */ struct aws_http_header headers[] = { { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Transfer-Encoding"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("gzip"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Transfer-Encoding"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("chunked"), }, }; struct aws_http_message *request = aws_http_message_new_request(allocator); ASSERT_SUCCESS(aws_http_message_set_request_method(request, aws_byte_cursor_from_c_str("PUT"))); ASSERT_SUCCESS(aws_http_message_set_request_path(request, aws_byte_cursor_from_c_str("/"))); aws_http_message_add_header_array(request, headers, AWS_ARRAY_SIZE(headers)); struct aws_linked_list chunk_list; aws_linked_list_init(&chunk_list); struct aws_h1_encoder_message encoder_message; aws_h1_encoder_message_init_from_request(&encoder_message, allocator, request, &chunk_list); ASSERT_TRUE(encoder_message.has_chunked_encoding_header); ASSERT_FALSE(encoder_message.has_connection_close_header); ASSERT_UINT_EQUALS(0, encoder_message.content_length); aws_http_message_destroy(request); aws_h1_encoder_message_clean_up(&encoder_message); aws_h1_encoder_clean_up(&encoder); s_test_clean_up(); return AWS_OP_SUCCESS; } H1_ENCODER_TEST_CASE(h1_encoder_transfer_encoding_chunked_put_request_headers_case_insensitivity) { (void)ctx; s_test_init(allocator); struct aws_h1_encoder encoder; aws_h1_encoder_init(&encoder, allocator); /* request to send - we won't actually send it, we want to validate headers are set correctly. */ struct aws_http_header headers[] = { { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("traNsfeR-EncODIng"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("chunked"), }, }; struct aws_http_message *request = aws_http_message_new_request(allocator); ASSERT_SUCCESS(aws_http_message_set_request_method(request, aws_byte_cursor_from_c_str("PUT"))); ASSERT_SUCCESS(aws_http_message_set_request_path(request, aws_byte_cursor_from_c_str("/"))); aws_http_message_add_header_array(request, headers, AWS_ARRAY_SIZE(headers)); struct aws_linked_list chunk_list; aws_linked_list_init(&chunk_list); struct aws_h1_encoder_message encoder_message; aws_h1_encoder_message_init_from_request(&encoder_message, allocator, request, &chunk_list); ASSERT_TRUE(encoder_message.has_chunked_encoding_header); ASSERT_FALSE(encoder_message.has_connection_close_header); ASSERT_UINT_EQUALS(0, encoder_message.content_length); aws_http_message_destroy(request); aws_h1_encoder_message_clean_up(&encoder_message); aws_h1_encoder_clean_up(&encoder); s_test_clean_up(); return AWS_OP_SUCCESS; } H1_ENCODER_TEST_CASE(h1_encoder_transfer_encoding_not_chunked_put_request_headers) { (void)ctx; s_test_init(allocator); struct aws_h1_encoder encoder; aws_h1_encoder_init(&encoder, allocator); /* request to send - we won't actually send it, we want to validate headers are set correctly. */ static const struct aws_byte_cursor body = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("write more tests"); struct aws_input_stream *body_stream = aws_input_stream_new_from_cursor(allocator, &body); struct aws_http_header headers[] = { { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Transfer-Encoding"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("gzip"), }, }; struct aws_http_message *request = aws_http_message_new_request(allocator); ASSERT_SUCCESS(aws_http_message_set_request_method(request, aws_byte_cursor_from_c_str("PUT"))); ASSERT_SUCCESS(aws_http_message_set_request_path(request, aws_byte_cursor_from_c_str("/"))); aws_http_message_add_header_array(request, headers, AWS_ARRAY_SIZE(headers)); aws_http_message_set_body_stream(request, body_stream); struct aws_linked_list chunk_list; aws_linked_list_init(&chunk_list); struct aws_h1_encoder_message encoder_message; aws_h1_encoder_message_init_from_request(&encoder_message, allocator, request, &chunk_list); ASSERT_FALSE(encoder_message.has_chunked_encoding_header); ASSERT_FALSE(encoder_message.has_connection_close_header); ASSERT_UINT_EQUALS(0, encoder_message.content_length); aws_input_stream_release(body_stream); aws_http_message_destroy(request); aws_h1_encoder_message_clean_up(&encoder_message); aws_h1_encoder_clean_up(&encoder); s_test_clean_up(); return AWS_OP_SUCCESS; } H1_ENCODER_TEST_CASE(h1_encoder_transfer_encoding_set_body_stream_errors) { (void)ctx; s_test_init(allocator); struct aws_h1_encoder encoder; aws_h1_encoder_init(&encoder, allocator); /* request to send - we won't actually send it, we want to validate headers are set correctly. */ static const struct aws_byte_cursor body = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("write more tests"); struct aws_input_stream *body_stream = aws_input_stream_new_from_cursor(allocator, &body); struct aws_http_header headers[] = { { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Transfer-Encoding"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("chunked"), }, }; struct aws_http_message *request = aws_http_message_new_request(allocator); ASSERT_SUCCESS(aws_http_message_set_request_method(request, aws_byte_cursor_from_c_str("PUT"))); ASSERT_SUCCESS(aws_http_message_set_request_path(request, aws_byte_cursor_from_c_str("/"))); aws_http_message_add_header_array(request, headers, AWS_ARRAY_SIZE(headers)); /* Setting the body stream should cause an error */ aws_http_message_set_body_stream(request, body_stream); struct aws_linked_list chunk_list; aws_linked_list_init(&chunk_list); struct aws_h1_encoder_message encoder_message; aws_h1_encoder_message_init_from_request(&encoder_message, allocator, request, &chunk_list); ASSERT_FALSE(encoder_message.has_chunked_encoding_header); ASSERT_FALSE(encoder_message.has_connection_close_header); ASSERT_UINT_EQUALS(0, encoder_message.content_length); aws_input_stream_release(body_stream); aws_http_message_destroy(request); aws_h1_encoder_message_clean_up(&encoder_message); aws_h1_encoder_clean_up(&encoder); s_test_clean_up(); return AWS_OP_SUCCESS; } H1_ENCODER_TEST_CASE(h1_encoder_transfer_encoding_not_ending_in_chunked_put_request_headers) { (void)ctx; s_test_init(allocator); struct aws_h1_encoder encoder; aws_h1_encoder_init(&encoder, allocator); /* request to send - we won't actually send it, we want to validate headers are set correctly. */ struct aws_http_header headers[] = { { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Transfer-Encoding"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("chunked"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Transfer-Encoding"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("gzip"), }, }; struct aws_http_message *request = aws_http_message_new_request(allocator); ASSERT_SUCCESS(aws_http_message_set_request_method(request, aws_byte_cursor_from_c_str("PUT"))); ASSERT_SUCCESS(aws_http_message_set_request_path(request, aws_byte_cursor_from_c_str("/"))); aws_http_message_add_header_array(request, headers, AWS_ARRAY_SIZE(headers)); struct aws_linked_list chunk_list; aws_linked_list_init(&chunk_list); struct aws_h1_encoder_message encoder_message; aws_h1_encoder_message_init_from_request(&encoder_message, allocator, request, &chunk_list); ASSERT_FALSE(encoder_message.has_chunked_encoding_header); ASSERT_FALSE(encoder_message.has_connection_close_header); ASSERT_UINT_EQUALS(0, encoder_message.content_length); aws_http_message_destroy(request); aws_h1_encoder_message_clean_up(&encoder_message); aws_h1_encoder_clean_up(&encoder); s_test_clean_up(); return AWS_OP_SUCCESS; } H1_ENCODER_TEST_CASE(h1_encoder_transfer_encoding_chunked_multiple_put_request_headers) { (void)ctx; s_test_init(allocator); struct aws_h1_encoder encoder; aws_h1_encoder_init(&encoder, allocator); /* request to send - we won't actually send it, we want to validate headers are set correctly. */ struct aws_http_header headers[] = { { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Transfer-Encoding"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("gzip, chunked"), }, }; struct aws_http_message *request = aws_http_message_new_request(allocator); ASSERT_SUCCESS(aws_http_message_set_request_method(request, aws_byte_cursor_from_c_str("PUT"))); ASSERT_SUCCESS(aws_http_message_set_request_path(request, aws_byte_cursor_from_c_str("/"))); aws_http_message_add_header_array(request, headers, AWS_ARRAY_SIZE(headers)); struct aws_linked_list chunk_list; aws_linked_list_init(&chunk_list); struct aws_h1_encoder_message encoder_message; aws_h1_encoder_message_init_from_request(&encoder_message, allocator, request, &chunk_list); ASSERT_TRUE(encoder_message.has_chunked_encoding_header); ASSERT_FALSE(encoder_message.has_connection_close_header); ASSERT_UINT_EQUALS(0, encoder_message.content_length); aws_http_message_destroy(request); aws_h1_encoder_message_clean_up(&encoder_message); aws_h1_encoder_clean_up(&encoder); s_test_clean_up(); return AWS_OP_SUCCESS; } H1_ENCODER_TEST_CASE(h1_encoder_transfer_encoding_chunked_and_content_length_put_request_headers) { (void)ctx; s_test_init(allocator); struct aws_h1_encoder encoder; aws_h1_encoder_init(&encoder, allocator); /* request to send - we won't actually send it, we want to validate headers are set correctly. */ static const struct aws_byte_cursor body = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("write more tests"); struct aws_input_stream *body_stream = aws_input_stream_new_from_cursor(allocator, &body); struct aws_http_header headers[] = { { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Transfer-Encoding"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("chunked"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Length"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("16"), }, }; struct aws_http_message *request = aws_http_message_new_request(allocator); ASSERT_SUCCESS(aws_http_message_set_request_method(request, aws_byte_cursor_from_c_str("PUT"))); ASSERT_SUCCESS(aws_http_message_set_request_path(request, aws_byte_cursor_from_c_str("/"))); aws_http_message_add_header_array(request, headers, AWS_ARRAY_SIZE(headers)); struct aws_linked_list chunk_list; aws_linked_list_init(&chunk_list); struct aws_h1_encoder_message encoder_message; /* Per RFC 2656 (https://tools.ietf.org/html/rfc2616#section-4.4), if both the Content-Length and Transfer-Encoding * header are defined, the client should not send the request. */ ASSERT_INT_EQUALS( AWS_OP_ERR, aws_h1_encoder_message_init_from_request(&encoder_message, allocator, request, &chunk_list)); aws_input_stream_release(body_stream); aws_http_message_destroy(request); aws_h1_encoder_message_clean_up(&encoder_message); aws_h1_encoder_clean_up(&encoder); s_test_clean_up(); return AWS_OP_SUCCESS; } H1_ENCODER_TEST_CASE(h1_encoder_transfer_encoding_chunked_not_final_encoding_put_request_headers) { (void)ctx; s_test_init(allocator); struct aws_h1_encoder encoder; aws_h1_encoder_init(&encoder, allocator); /* request to send - we won't actually send it, we want to validate headers are set correctly. */ struct aws_http_header headers[] = { { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Transfer-Encoding"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("chunked;gzip"), /* must end with chunked */ }, }; struct aws_http_message *request = aws_http_message_new_request(allocator); ASSERT_SUCCESS(aws_http_message_set_request_method(request, aws_byte_cursor_from_c_str("PUT"))); ASSERT_SUCCESS(aws_http_message_set_request_path(request, aws_byte_cursor_from_c_str("/"))); aws_http_message_add_header_array(request, headers, AWS_ARRAY_SIZE(headers)); struct aws_linked_list chunk_list; aws_linked_list_init(&chunk_list); struct aws_h1_encoder_message encoder_message; /* Per RFC 2656 (https://tools.ietf.org/html/rfc2616#section-4.4), if both the Content-Length and Transfer-Encoding * header are defined, the client should not send the request. */ ASSERT_INT_EQUALS( AWS_OP_ERR, aws_h1_encoder_message_init_from_request(&encoder_message, allocator, request, &chunk_list)); aws_http_message_destroy(request); aws_h1_encoder_message_clean_up(&encoder_message); aws_h1_encoder_clean_up(&encoder); s_test_clean_up(); return AWS_OP_SUCCESS; } static int s_test_bad_request( struct aws_allocator *allocator, const char *method, const char *path, const struct aws_http_header *header_array, size_t header_count, int expected_error) { s_test_init(allocator); struct aws_http_message *request = aws_http_message_new_request(allocator); ASSERT_NOT_NULL(request); if (method) { ASSERT_SUCCESS(aws_http_message_set_request_method(request, aws_byte_cursor_from_c_str(method))); } if (path) { ASSERT_SUCCESS(aws_http_message_set_request_path(request, aws_byte_cursor_from_c_str(path))); } if (header_array) { ASSERT_SUCCESS(aws_http_message_add_header_array(request, header_array, header_count)); } struct aws_linked_list chunk_list; aws_linked_list_init(&chunk_list); struct aws_h1_encoder_message encoder_message; ASSERT_ERROR( expected_error, aws_h1_encoder_message_init_from_request(&encoder_message, allocator, request, &chunk_list)); aws_http_message_destroy(request); aws_h1_encoder_message_clean_up(&encoder_message); s_test_clean_up(); return AWS_OP_SUCCESS; } H1_ENCODER_TEST_CASE(h1_encoder_rejects_bad_method) { (void)ctx; return s_test_bad_request( allocator, "G@T" /*method*/, "/" /*path*/, s_typical_request_headers /*header_array*/, AWS_ARRAY_SIZE(s_typical_request_headers) /*header_count*/, AWS_ERROR_HTTP_INVALID_METHOD /*expected_error*/); } H1_ENCODER_TEST_CASE(h1_encoder_rejects_missing_method) { (void)ctx; return s_test_bad_request( allocator, NULL /*method*/, "/" /*path*/, s_typical_request_headers /*header_array*/, AWS_ARRAY_SIZE(s_typical_request_headers) /*header_count*/, AWS_ERROR_HTTP_INVALID_METHOD /*expected_error*/); } H1_ENCODER_TEST_CASE(h1_encoder_rejects_bad_path) { (void)ctx; return s_test_bad_request( allocator, "GET" /*method*/, "/\r\n/index.html" /*path*/, s_typical_request_headers /*header_array*/, AWS_ARRAY_SIZE(s_typical_request_headers) /*header_count*/, AWS_ERROR_HTTP_INVALID_PATH /*expected_error*/); } H1_ENCODER_TEST_CASE(h1_encoder_rejects_missing_path) { (void)ctx; return s_test_bad_request( allocator, "GET" /*method*/, NULL /*path*/, s_typical_request_headers /*header_array*/, AWS_ARRAY_SIZE(s_typical_request_headers) /*header_count*/, AWS_ERROR_HTTP_INVALID_PATH /*expected_error*/); } H1_ENCODER_TEST_CASE(h1_encoder_rejects_bad_header_name) { (void)ctx; const struct aws_http_header headers[] = { { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Host"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("amazon.com"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Line-\r\n-Folds"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("bad header name"), }, }; return s_test_bad_request( allocator, "GET" /*method*/, "/" /*path*/, headers /*header_array*/, AWS_ARRAY_SIZE(headers) /*header_count*/, AWS_ERROR_HTTP_INVALID_HEADER_NAME /*expected_error*/); } H1_ENCODER_TEST_CASE(h1_encoder_rejects_bad_header_value) { (void)ctx; const struct aws_http_header headers[] = { { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Host"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("amazon.com"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("X-Line-Folds-Are-Bad-Mkay"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("item1,\r\n item2"), }, }; return s_test_bad_request( allocator, "GET" /*method*/, "/" /*path*/, headers /*header_array*/, AWS_ARRAY_SIZE(headers) /*header_count*/, AWS_ERROR_HTTP_INVALID_HEADER_VALUE /*expected_error*/); } aws-crt-python-0.20.4+dfsg/crt/aws-c-http/tests/test_h1_server.c000066400000000000000000002022361456575232400244570ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef _MSC_VER # pragma warning(disable : 4204) /* non-constant aggregate initializer */ #endif #define TEST_CASE(NAME) \ AWS_TEST_CASE(NAME, s_test_##NAME); \ static int s_test_##NAME(struct aws_allocator *allocator, void *ctx) struct tester_request { struct aws_http_stream *request_handler; /* All cursors in tester_request point into here */ struct aws_byte_buf storage; struct aws_byte_cursor method; struct aws_byte_cursor uri; struct aws_http_header headers[100]; size_t num_headers; bool header_done; size_t on_complete_cb_count; int on_complete_error_code; struct aws_byte_cursor body; struct aws_input_stream *response_body; }; /* Singleton used by tests in this file */ static struct tester { struct aws_allocator *alloc; struct aws_logger logger; struct aws_http_connection *server_connection; struct testing_channel testing_channel; struct tester_request requests[100]; int request_num; bool server_connection_is_shutdown; } s_tester; static int s_tester_on_request_header( struct aws_http_stream *stream, enum aws_http_header_block header_block, const struct aws_http_header *header_array, size_t num_headers, void *user_data) { (void)stream; (void)header_block; struct tester_request *request = user_data; struct aws_byte_buf *storage = &request->storage; const struct aws_http_header *in_header = header_array; struct aws_http_header *my_header = request->headers + request->num_headers; for (size_t i = 0; i < num_headers; ++i) { /* copy-by-value, then update cursors to point into permanent storage */ *my_header = *in_header; my_header->name.ptr = storage->buffer + storage->len; AWS_FATAL_ASSERT(aws_byte_buf_write_from_whole_cursor(storage, in_header->name)); my_header->value.ptr = storage->buffer + storage->len; AWS_FATAL_ASSERT(aws_byte_buf_write_from_whole_cursor(storage, in_header->value)); in_header++; my_header++; request->num_headers++; } return AWS_OP_SUCCESS; } static int s_tester_on_request_header_block_done( struct aws_http_stream *stream, enum aws_http_header_block header_block, void *user_data) { (void)stream; (void)header_block; struct tester_request *request = user_data; if (header_block == AWS_HTTP_HEADER_BLOCK_MAIN) { AWS_FATAL_ASSERT(request->header_done == false); request->header_done = true; } struct aws_http_stream *r_handler = request->request_handler; AWS_FATAL_ASSERT(!aws_http_stream_get_incoming_request_method(r_handler, &request->method)); AWS_FATAL_ASSERT(!aws_http_stream_get_incoming_request_uri(r_handler, &request->uri)); return AWS_OP_SUCCESS; } static int s_tester_on_request_body( struct aws_http_stream *stream, const struct aws_byte_cursor *data, void *user_data) { (void)stream; struct tester_request *request = user_data; AWS_FATAL_ASSERT(request->header_done == true); /* Copy data into storage, and point body cursor at that */ AWS_FATAL_ASSERT(aws_byte_buf_write_from_whole_cursor(&request->storage, *data)); request->body.len += data->len; if (!request->body.ptr) { request->body.ptr = request->storage.buffer + request->storage.len - request->body.len; } return AWS_OP_SUCCESS; } static void s_tester_on_stream_complete(struct aws_http_stream *stream, int error_code, void *user_data) { struct tester_request *request = user_data; (void)stream; request->on_complete_cb_count++; request->on_complete_error_code = error_code; } /* Create a new request handler */ static struct aws_http_stream *s_tester_on_incoming_request(struct aws_http_connection *connection, void *user_data) { struct aws_http_request_handler_options options = AWS_HTTP_REQUEST_HANDLER_OPTIONS_INIT; struct tester *tester = user_data; int index = tester->request_num; /* initialize the new request */ tester->requests[index].num_headers = 0; tester->requests[index].header_done = false; aws_byte_buf_init(&tester->requests[index].storage, tester->alloc, 1024 * 1024 * 1); options.user_data = &tester->requests[index]; options.server_connection = connection; options.on_request_headers = s_tester_on_request_header; options.on_request_header_block_done = s_tester_on_request_header_block_done; options.on_request_body = s_tester_on_request_body; options.on_complete = s_tester_on_stream_complete; tester->requests[index].request_handler = aws_http_stream_new_server_request_handler(&options); tester->request_num++; return tester->requests[index].request_handler; } static int s_tester_init(struct aws_allocator *alloc) { aws_http_library_init(alloc); AWS_ZERO_STRUCT(s_tester); s_tester.alloc = alloc; s_tester.request_num = 0; struct aws_logger_standard_options logger_options = { .level = AWS_LOG_LEVEL_TRACE, .file = stderr, }; ASSERT_SUCCESS(aws_logger_init_standard(&s_tester.logger, s_tester.alloc, &logger_options)); aws_logger_set(&s_tester.logger); struct aws_testing_channel_options test_channel_options = {.clock_fn = aws_high_res_clock_get_ticks}; ASSERT_SUCCESS(testing_channel_init(&s_tester.testing_channel, alloc, &test_channel_options)); struct aws_http1_connection_options http1_options; AWS_ZERO_STRUCT(http1_options); s_tester.server_connection = aws_http_connection_new_http1_1_server(alloc, true, SIZE_MAX, &http1_options); ASSERT_NOT_NULL(s_tester.server_connection); struct aws_http_server_connection_options options = AWS_HTTP_SERVER_CONNECTION_OPTIONS_INIT; options.connection_user_data = &s_tester; options.on_incoming_request = s_tester_on_incoming_request; ASSERT_SUCCESS(aws_http_connection_configure_server(s_tester.server_connection, &options)); struct aws_channel_slot *slot = aws_channel_slot_new(s_tester.testing_channel.channel); ASSERT_NOT_NULL(slot); ASSERT_SUCCESS(aws_channel_slot_insert_end(s_tester.testing_channel.channel, slot)); ASSERT_SUCCESS(aws_channel_slot_set_handler(slot, &s_tester.server_connection->channel_handler)); s_tester.server_connection->vtable->on_channel_handler_installed( &s_tester.server_connection->channel_handler, slot); testing_channel_drain_queued_tasks(&s_tester.testing_channel); return AWS_OP_SUCCESS; } static int s_server_request_clean_up(void) { for (int i = 0; i < s_tester.request_num; i++) { aws_http_stream_release(s_tester.requests[i].request_handler); aws_byte_buf_clean_up(&s_tester.requests[i].storage); aws_input_stream_release(s_tester.requests[i].response_body); } return AWS_OP_SUCCESS; } static int s_server_tester_clean_up(void) { s_server_request_clean_up(); aws_http_connection_release(s_tester.server_connection); ASSERT_SUCCESS(testing_channel_clean_up(&s_tester.testing_channel)); aws_http_library_clean_up(); aws_logger_clean_up(&s_tester.logger); return AWS_OP_SUCCESS; } /* For sending an aws_io_message into the channel, in the write or read direction */ static int s_send_message_cursor(struct aws_byte_cursor data) { struct aws_io_message *msg = aws_channel_acquire_message_from_pool( s_tester.testing_channel.channel, AWS_IO_MESSAGE_APPLICATION_DATA, data.len); ASSERT_NOT_NULL(msg); ASSERT_TRUE(aws_byte_buf_write_from_whole_cursor(&msg->message_data, data)); ASSERT_SUCCESS(testing_channel_push_read_message(&s_tester.testing_channel, msg)); return AWS_OP_SUCCESS; } static int s_send_message_c_str(const char *str) { return s_send_message_cursor(aws_byte_cursor_from_c_str(str)); } /* Check that we can set and tear down the `tester` used by all other tests in this file */ TEST_CASE(h1_server_sanity_check) { (void)ctx; ASSERT_SUCCESS(s_tester_init(allocator)); ASSERT_SUCCESS(s_server_tester_clean_up()); return AWS_OP_SUCCESS; } TEST_CASE(h1_server_receive_1line_request) { (void)ctx; ASSERT_SUCCESS(s_tester_init(allocator)); const char *incoming_request = "GET / HTTP/1.1\r\n" "\r\n"; ASSERT_SUCCESS(s_send_message_c_str(incoming_request)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_TRUE(s_tester.request_num == 1); ASSERT_TRUE(aws_byte_cursor_eq_c_str(&s_tester.requests[0].method, "GET")); ASSERT_TRUE(aws_byte_cursor_eq_c_str(&s_tester.requests[0].uri, "/")); ASSERT_SUCCESS(s_server_tester_clean_up()); return AWS_OP_SUCCESS; } static int s_check_header(struct tester_request *request, size_t i, const char *name_str, const char *value) { ASSERT_TRUE(i < request->num_headers); struct aws_http_header *header = request->headers + i; ASSERT_TRUE(aws_byte_cursor_eq_c_str_ignore_case(&header->name, name_str)); ASSERT_TRUE(aws_byte_cursor_eq_c_str_ignore_case(&header->value, value)); return AWS_OP_SUCCESS; } TEST_CASE(h1_server_receive_headers) { (void)ctx; ASSERT_SUCCESS(s_tester_init(allocator)); const char *incoming_request = "GET / HTTP/1.1\r\n" "Host: example.com\r\n" "Accept: */*\r\n" "\r\n"; ASSERT_SUCCESS(s_send_message_c_str(incoming_request)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_TRUE(s_tester.request_num == 1); struct tester_request request = s_tester.requests[0]; ASSERT_TRUE(request.num_headers == 2); ASSERT_SUCCESS(s_check_header(&request, 0, "Host", "example.com")); ASSERT_SUCCESS(s_check_header(&request, 1, "Accept", "*/*")); ASSERT_TRUE(aws_byte_cursor_eq_c_str(&request.method, "GET")); ASSERT_TRUE(aws_byte_cursor_eq_c_str(&request.uri, "/")); ASSERT_TRUE(request.body.len == 0); /* clean up */ ASSERT_SUCCESS(s_server_tester_clean_up()); return AWS_OP_SUCCESS; } TEST_CASE(h1_server_receive_body) { (void)ctx; ASSERT_SUCCESS(s_tester_init(allocator)); const char *incoming_request = "PUT /plan.txt HTTP/1.1\r\n" "Content-Length: 16\r\n" "\r\n" "write more tests"; ASSERT_SUCCESS(s_send_message_c_str(incoming_request)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_TRUE(s_tester.request_num == 1); struct tester_request request = s_tester.requests[0]; ASSERT_TRUE(request.num_headers == 1); ASSERT_SUCCESS(s_check_header(&request, 0, "Content-Length", "16")); ASSERT_TRUE(aws_byte_cursor_eq_c_str(&request.method, "PUT")); ASSERT_TRUE(aws_byte_cursor_eq_c_str(&request.uri, "/plan.txt")); ASSERT_TRUE(aws_byte_cursor_eq_c_str(&request.body, "write more tests")); /* clean up */ ASSERT_SUCCESS(s_server_tester_clean_up()); return AWS_OP_SUCCESS; } TEST_CASE(h1_server_receive_1_request_from_multiple_io_messages) { (void)ctx; ASSERT_SUCCESS(s_tester_init(allocator)); const char *incoming_request = "PUT /plan.txt HTTP/1.1\r\n" "Content-Length: 16\r\n" "\r\n" "write more tests"; size_t str_len = strlen(incoming_request); for (size_t i = 0; i < str_len; ++i) { s_send_message_cursor(aws_byte_cursor_from_array(incoming_request + i, 1)); } testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_TRUE(s_tester.request_num == 1); struct tester_request request = s_tester.requests[0]; ASSERT_TRUE(request.num_headers == 1); ASSERT_SUCCESS(s_check_header(&request, 0, "Content-Length", "16")); ASSERT_TRUE(aws_byte_cursor_eq_c_str(&request.method, "PUT")); ASSERT_TRUE(aws_byte_cursor_eq_c_str(&request.uri, "/plan.txt")); ASSERT_TRUE(aws_byte_cursor_eq_c_str(&request.body, "write more tests")); /* clean up */ ASSERT_SUCCESS(s_server_tester_clean_up()); return AWS_OP_SUCCESS; } TEST_CASE(h1_server_receive_multiple_requests_from_1_io_messages) { (void)ctx; ASSERT_SUCCESS(s_tester_init(allocator)); const char *incoming_request = "PUT /plan.txt HTTP/1.1\r\n" "Content-Length: 16\r\n" "\r\n" "write more tests" "GET / HTTP/1.1\r\n" "\r\n"; ASSERT_SUCCESS(s_send_message_c_str(incoming_request)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_TRUE(s_tester.request_num == 2); struct tester_request request = s_tester.requests[0]; ASSERT_TRUE(request.num_headers == 1); ASSERT_SUCCESS(s_check_header(&request, 0, "Content-Length", "16")); ASSERT_TRUE(aws_byte_cursor_eq_c_str(&request.method, "PUT")); ASSERT_TRUE(aws_byte_cursor_eq_c_str(&request.uri, "/plan.txt")); ASSERT_TRUE(aws_byte_cursor_eq_c_str(&request.body, "write more tests")); request = s_tester.requests[1]; ASSERT_TRUE(request.num_headers == 0); ASSERT_TRUE(aws_byte_cursor_eq_c_str(&request.method, "GET")); ASSERT_TRUE(aws_byte_cursor_eq_c_str(&request.uri, "/")); /* clean up */ ASSERT_SUCCESS(s_server_tester_clean_up()); return AWS_OP_SUCCESS; } TEST_CASE(h1_server_receive_bad_request_shut_down_connection) { (void)ctx; ASSERT_SUCCESS(s_tester_init(allocator)); const char *incoming_request = "Mmmm garbage data\r\n\r\n"; ASSERT_SUCCESS(s_send_message_c_str(incoming_request)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_TRUE(s_tester.request_num == 1); struct tester_request request = s_tester.requests[0]; ASSERT_TRUE(request.on_complete_cb_count == 1); ASSERT_INT_EQUALS(AWS_ERROR_HTTP_PROTOCOL_ERROR, request.on_complete_error_code); /* clean up */ ASSERT_SUCCESS(s_server_tester_clean_up()); return AWS_OP_SUCCESS; } /* Response creation helper function */ static int s_create_response( struct aws_http_message **out_response, int status_code, const struct aws_http_header *header_array, size_t num_headers, struct aws_input_stream *body) { struct aws_http_message *response = aws_http_message_new_response(s_tester.alloc); ASSERT_NOT_NULL(response); ASSERT_SUCCESS(aws_http_message_set_response_status(response, status_code)); if (num_headers) { ASSERT_SUCCESS(aws_http_message_add_header_array(response, header_array, num_headers)); } aws_http_message_set_body_stream(response, body); *out_response = response; return AWS_OP_SUCCESS; } TEST_CASE(h1_server_send_1line_response) { (void)ctx; ASSERT_SUCCESS(s_tester_init(allocator)); const char *incoming_request = "GET / HTTP/1.1\r\n" "\r\n"; ASSERT_SUCCESS(s_send_message_c_str(incoming_request)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_TRUE(s_tester.request_num == 1); struct tester_request request = s_tester.requests[0]; struct aws_http_message *response; ASSERT_SUCCESS(s_create_response(&response, 204, NULL, 0, NULL)); ASSERT_SUCCESS(aws_http_stream_send_response(request.request_handler, response)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); const char *expected = "HTTP/1.1 204 No Content\r\n" "\r\n"; ASSERT_SUCCESS(testing_channel_check_written_messages_str(&s_tester.testing_channel, allocator, expected)); aws_http_message_destroy(response); ASSERT_SUCCESS(s_server_tester_clean_up()); return AWS_OP_SUCCESS; } TEST_CASE(h1_server_send_response_headers) { (void)ctx; ASSERT_SUCCESS(s_tester_init(allocator)); const char *incoming_request = "GET / HTTP/1.1\r\n" "\r\n"; ASSERT_SUCCESS(s_send_message_c_str(incoming_request)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_TRUE(s_tester.request_num == 1); struct tester_request request = s_tester.requests[0]; /* send response */ struct aws_http_header headers[] = { { .name = aws_byte_cursor_from_c_str("Date"), .value = aws_byte_cursor_from_c_str("Fri, 01 Mar 2019 17:18:55 GMT"), }, { .name = aws_byte_cursor_from_c_str("Location"), .value = aws_byte_cursor_from_c_str("/index.html"), }, }; struct aws_http_message *response; ASSERT_SUCCESS(s_create_response(&response, 308, headers, AWS_ARRAY_SIZE(headers), NULL)); ASSERT_SUCCESS(aws_http_stream_send_response(request.request_handler, response)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); const char *expected = "HTTP/1.1 308 Permanent Redirect\r\n" "Date: Fri, 01 Mar 2019 17:18:55 GMT\r\n" "Location: /index.html\r\n" "\r\n"; ASSERT_SUCCESS(testing_channel_check_written_messages_str(&s_tester.testing_channel, allocator, expected)); aws_http_message_destroy(response); ASSERT_SUCCESS(s_server_tester_clean_up()); return AWS_OP_SUCCESS; } TEST_CASE(h1_server_send_response_body) { (void)ctx; ASSERT_SUCCESS(s_tester_init(allocator)); const char *incoming_request = "GET / HTTP/1.1\r\n" "\r\n"; ASSERT_SUCCESS(s_send_message_c_str(incoming_request)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_TRUE(s_tester.request_num == 1); struct tester_request *request = s_tester.requests; /* send response */ struct aws_byte_cursor body_src = aws_byte_cursor_from_c_str("write more tests"); request->response_body = aws_input_stream_new_from_cursor(allocator, &body_src); ASSERT_NOT_NULL(request->response_body); struct aws_http_header headers[] = { { .name = aws_byte_cursor_from_c_str("Date"), .value = aws_byte_cursor_from_c_str("Fri, 01 Mar 2019 17:18:55 GMT"), }, { .name = aws_byte_cursor_from_c_str("Location"), .value = aws_byte_cursor_from_c_str("/index.html"), }, { .name = aws_byte_cursor_from_c_str("Content-Length"), .value = aws_byte_cursor_from_c_str("16"), }, }; struct aws_http_message *response; ASSERT_SUCCESS(s_create_response(&response, 308, headers, AWS_ARRAY_SIZE(headers), request->response_body)); ASSERT_SUCCESS(aws_http_stream_send_response(request->request_handler, response)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); const char *expected = "HTTP/1.1 308 Permanent Redirect\r\n" "Date: Fri, 01 Mar 2019 17:18:55 GMT\r\n" "Location: /index.html\r\n" "Content-Length: 16\r\n" "\r\n" "write more tests"; ASSERT_SUCCESS(testing_channel_check_written_messages_str(&s_tester.testing_channel, allocator, expected)); aws_http_message_destroy(response); ASSERT_SUCCESS(s_server_tester_clean_up()); return AWS_OP_SUCCESS; } static int s_test_send_expected_no_body_response(int status_int, bool head_request) { const char *incoming_request; if (head_request) { incoming_request = "HEAD / HTTP/1.1\r\n" "\r\n"; } else { incoming_request = "GET / HTTP/1.1\r\n" "\r\n"; } ASSERT_SUCCESS(s_send_message_c_str(incoming_request)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_TRUE(s_tester.request_num == 1); struct tester_request *request = s_tester.requests; /* send response */ struct aws_http_header headers[] = { { .name = aws_byte_cursor_from_c_str("Date"), .value = aws_byte_cursor_from_c_str("Fri, 01 Mar 2019 17:18:55 GMT"), }, { .name = aws_byte_cursor_from_c_str("Location"), .value = aws_byte_cursor_from_c_str("/index.html"), }, { .name = aws_byte_cursor_from_c_str("Content-Length"), .value = aws_byte_cursor_from_c_str("16"), }, }; struct aws_http_message *response; ASSERT_SUCCESS(s_create_response(&response, status_int, headers, AWS_ARRAY_SIZE(headers), NULL)); ASSERT_SUCCESS(aws_http_stream_send_response(request->request_handler, response)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); char expected[500]; const char *expected_headers = "Date: Fri, 01 Mar 2019 17:18:55 GMT\r\n" "Location: /index.html\r\n" "Content-Length: 16\r\n" "\r\n"; struct aws_byte_cursor status_text = aws_byte_cursor_from_c_str(aws_http_status_text(status_int)); char c_status_text[100]; memcpy(c_status_text, status_text.ptr, status_text.len); c_status_text[status_text.len] = '\0'; snprintf(expected, sizeof(expected), "HTTP/1.1 %d %s\r\n%s", status_int, c_status_text, expected_headers); ASSERT_SUCCESS(testing_channel_check_written_messages_str(&s_tester.testing_channel, s_tester.alloc, expected)); aws_http_message_destroy(response); return AWS_OP_SUCCESS; } TEST_CASE(h1_server_send_response_to_HEAD_request) { (void)ctx; ASSERT_SUCCESS(s_tester_init(allocator)); ASSERT_SUCCESS(s_test_send_expected_no_body_response(308, true)); ASSERT_SUCCESS(s_server_tester_clean_up()); return AWS_OP_SUCCESS; } TEST_CASE(h1_server_send_304_response) { (void)ctx; ASSERT_SUCCESS(s_tester_init(allocator)); ASSERT_SUCCESS(s_test_send_expected_no_body_response(304, false)); ASSERT_SUCCESS(s_server_tester_clean_up()); return AWS_OP_SUCCESS; } TEST_CASE(h1_server_send_multiple_responses_in_order) { (void)ctx; ASSERT_SUCCESS(s_tester_init(allocator)); const char *incoming_request = "GET / HTTP/1.1\r\n" "\r\n" "GET / HTTP/1.1\r\n" "\r\n" "GET / HTTP/1.1\r\n" "\r\n"; ASSERT_SUCCESS(s_send_message_c_str(incoming_request)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_TRUE(s_tester.request_num == 3); struct tester_request *request1 = s_tester.requests; struct tester_request *request2 = s_tester.requests + 1; struct tester_request *request3 = s_tester.requests + 2; /* send response */ /* response1 */ struct aws_byte_cursor body_src = aws_byte_cursor_from_c_str("response1"); request1->response_body = aws_input_stream_new_from_cursor(allocator, &body_src); ASSERT_NOT_NULL(request1->response_body); struct aws_http_header headers[] = { { .name = aws_byte_cursor_from_c_str("Content-Length"), .value = aws_byte_cursor_from_c_str("9"), }, }; struct aws_http_message *response1; ASSERT_SUCCESS(s_create_response(&response1, 200, headers, AWS_ARRAY_SIZE(headers), request1->response_body)); ASSERT_SUCCESS(aws_http_stream_send_response(request1->request_handler, response1)); /* response2 */ body_src = aws_byte_cursor_from_c_str("response2"); request2->response_body = aws_input_stream_new_from_cursor(allocator, &body_src); ASSERT_NOT_NULL(request2->response_body); struct aws_http_message *response2; ASSERT_SUCCESS(s_create_response(&response2, 200, headers, AWS_ARRAY_SIZE(headers), request2->response_body)); ASSERT_SUCCESS(aws_http_stream_send_response(request2->request_handler, response2)); /* response3 */ body_src = aws_byte_cursor_from_c_str("response3"); request3->response_body = aws_input_stream_new_from_cursor(allocator, &body_src); ASSERT_NOT_NULL(request3->response_body); struct aws_http_message *response3; ASSERT_SUCCESS(s_create_response(&response3, 200, headers, AWS_ARRAY_SIZE(headers), request3->response_body)); ASSERT_SUCCESS(aws_http_stream_send_response(request3->request_handler, response3)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* Check the result */ const char *expected = "HTTP/1.1 200 OK\r\n" "Content-Length: 9\r\n" "\r\n" "response1" "HTTP/1.1 200 OK\r\n" "Content-Length: 9\r\n" "\r\n" "response2" "HTTP/1.1 200 OK\r\n" "Content-Length: 9\r\n" "\r\n" "response3"; ASSERT_SUCCESS(testing_channel_check_written_messages_str(&s_tester.testing_channel, allocator, expected)); aws_http_message_destroy(response1); aws_http_message_destroy(response2); aws_http_message_destroy(response3); ASSERT_SUCCESS(s_server_tester_clean_up()); ASSERT_TRUE(request1->on_complete_cb_count == 1); ASSERT_TRUE(request2->on_complete_cb_count == 1); ASSERT_TRUE(request3->on_complete_cb_count == 1); ASSERT_TRUE(request1->on_complete_error_code == AWS_ERROR_SUCCESS); ASSERT_TRUE(request2->on_complete_error_code == AWS_ERROR_SUCCESS); ASSERT_TRUE(request3->on_complete_error_code == AWS_ERROR_SUCCESS); return AWS_OP_SUCCESS; } TEST_CASE(h1_server_send_multiple_responses_out_of_order) { (void)ctx; ASSERT_SUCCESS(s_tester_init(allocator)); const char *incoming_request = "GET / HTTP/1.1\r\n" "\r\n" "GET / HTTP/1.1\r\n" "\r\n" "GET / HTTP/1.1\r\n" "\r\n"; ASSERT_SUCCESS(s_send_message_c_str(incoming_request)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_TRUE(s_tester.request_num == 3); struct tester_request *request1 = s_tester.requests; ASSERT_TRUE(aws_byte_cursor_eq_c_str(&request1->method, "GET")); ASSERT_TRUE(aws_byte_cursor_eq_c_str(&request1->uri, "/")); struct tester_request *request2 = s_tester.requests + 1; ASSERT_TRUE(aws_byte_cursor_eq_c_str(&request2->method, "GET")); ASSERT_TRUE(aws_byte_cursor_eq_c_str(&request2->uri, "/")); struct tester_request *request3 = s_tester.requests + 2; ASSERT_TRUE(aws_byte_cursor_eq_c_str(&request3->method, "GET")); ASSERT_TRUE(aws_byte_cursor_eq_c_str(&request3->uri, "/")); /* send response */ /* response1 */ struct aws_byte_cursor body_src = aws_byte_cursor_from_c_str("response1"); request1->response_body = aws_input_stream_new_from_cursor(allocator, &body_src); ASSERT_NOT_NULL(request1->response_body); struct aws_http_header headers[] = { { .name = aws_byte_cursor_from_c_str("Content-Length"), .value = aws_byte_cursor_from_c_str("9"), }, }; struct aws_http_message *response1; ASSERT_SUCCESS(s_create_response(&response1, 200, headers, AWS_ARRAY_SIZE(headers), request1->response_body)); ASSERT_SUCCESS(aws_http_stream_send_response(request1->request_handler, response1)); /* response3 */ body_src = aws_byte_cursor_from_c_str("response3"); request3->response_body = aws_input_stream_new_from_cursor(allocator, &body_src); ASSERT_NOT_NULL(request3->response_body); struct aws_http_message *response3; ASSERT_SUCCESS(s_create_response(&response3, 200, headers, AWS_ARRAY_SIZE(headers), request3->response_body)); ASSERT_SUCCESS(aws_http_stream_send_response(request3->request_handler, response3)); /* response2 */ body_src = aws_byte_cursor_from_c_str("response2"); request2->response_body = aws_input_stream_new_from_cursor(allocator, &body_src); ASSERT_NOT_NULL(request2->response_body); struct aws_http_message *response2; ASSERT_SUCCESS(s_create_response(&response2, 200, headers, AWS_ARRAY_SIZE(headers), request2->response_body)); ASSERT_SUCCESS(aws_http_stream_send_response(request2->request_handler, response2)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* Check the result */ const char *expected = "HTTP/1.1 200 OK\r\n" "Content-Length: 9\r\n" "\r\n" "response1" "HTTP/1.1 200 OK\r\n" "Content-Length: 9\r\n" "\r\n" "response2" "HTTP/1.1 200 OK\r\n" "Content-Length: 9\r\n" "\r\n" "response3"; ASSERT_SUCCESS(testing_channel_check_written_messages_str(&s_tester.testing_channel, allocator, expected)); aws_http_message_destroy(response1); aws_http_message_destroy(response2); aws_http_message_destroy(response3); ASSERT_SUCCESS(s_server_tester_clean_up()); ASSERT_TRUE(request1->on_complete_cb_count == 1); ASSERT_TRUE(request2->on_complete_cb_count == 1); ASSERT_TRUE(request3->on_complete_cb_count == 1); ASSERT_TRUE(request1->on_complete_error_code == AWS_ERROR_SUCCESS); ASSERT_TRUE(request2->on_complete_error_code == AWS_ERROR_SUCCESS); ASSERT_TRUE(request3->on_complete_error_code == AWS_ERROR_SUCCESS); return AWS_OP_SUCCESS; } TEST_CASE(h1_server_send_multiple_responses_out_of_order_only_one_sent) { (void)ctx; ASSERT_SUCCESS(s_tester_init(allocator)); const char *incoming_request = "GET / HTTP/1.1\r\n" "\r\n" "GET / HTTP/1.1\r\n" "\r\n" "GET / HTTP/1.1\r\n" "\r\n"; ASSERT_SUCCESS(s_send_message_c_str(incoming_request)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_TRUE(s_tester.request_num == 3); struct tester_request *request1 = s_tester.requests; struct tester_request *request2 = s_tester.requests + 1; struct tester_request *request3 = s_tester.requests + 2; /* send response */ /* response1 */ struct aws_byte_cursor body_src = aws_byte_cursor_from_c_str("response1"); request1->response_body = aws_input_stream_new_from_cursor(allocator, &body_src); ASSERT_NOT_NULL(request1->response_body); struct aws_http_header headers[] = { { .name = aws_byte_cursor_from_c_str("Content-Length"), .value = aws_byte_cursor_from_c_str("9"), }, }; struct aws_http_message *response1; ASSERT_SUCCESS(s_create_response(&response1, 200, headers, AWS_ARRAY_SIZE(headers), request1->response_body)); ASSERT_SUCCESS(aws_http_stream_send_response(request1->request_handler, response1)); /* response3 */ body_src = aws_byte_cursor_from_c_str("response3"); request3->response_body = aws_input_stream_new_from_cursor(allocator, &body_src); ASSERT_NOT_NULL(request3->response_body); struct aws_http_message *response3; ASSERT_SUCCESS(s_create_response(&response3, 200, headers, AWS_ARRAY_SIZE(headers), request1->response_body)); ASSERT_SUCCESS(aws_http_stream_send_response(request3->request_handler, response3)); /* no response2 */ testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* Check the result */ const char *expected = "HTTP/1.1 200 OK\r\n" "Content-Length: 9\r\n" "\r\n" "response1"; ASSERT_SUCCESS(testing_channel_check_written_messages_str(&s_tester.testing_channel, allocator, expected)); aws_http_message_destroy(response1); aws_http_message_destroy(response3); ASSERT_SUCCESS(s_server_tester_clean_up()); ASSERT_TRUE(request1->on_complete_cb_count == 1); ASSERT_TRUE(request2->on_complete_cb_count == 1); ASSERT_TRUE(request3->on_complete_cb_count == 1); ASSERT_TRUE(request1->on_complete_error_code == AWS_ERROR_SUCCESS); /* last two failed, response 2 is missing */ ASSERT_TRUE(request2->on_complete_error_code == AWS_ERROR_HTTP_CONNECTION_CLOSED); ASSERT_TRUE(request3->on_complete_error_code == AWS_ERROR_HTTP_CONNECTION_CLOSED); return AWS_OP_SUCCESS; } TEST_CASE(h1_server_send_response_before_request_finished) { (void)ctx; ASSERT_SUCCESS(s_tester_init(allocator)); const char *incoming_request_part1 = "PUT /plan.txt HTTP/1.1\r\n" "Content-Length: 16\r\n" "\r\n" "write "; ASSERT_SUCCESS(s_send_message_c_str(incoming_request_part1)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* Only part 1 is sent and the response is made and sent */ ASSERT_TRUE(s_tester.request_num == 1); struct tester_request *request = s_tester.requests; struct aws_http_message *response; ASSERT_SUCCESS(s_create_response(&response, 200, NULL, 0, NULL)); ASSERT_SUCCESS(aws_http_stream_send_response(request->request_handler, response)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* stream is not completed */ ASSERT_TRUE(request->on_complete_cb_count == 0); /* check the response */ const char *expected = "HTTP/1.1 200 OK\r\n" "\r\n"; ASSERT_SUCCESS(testing_channel_check_written_messages_str(&s_tester.testing_channel, allocator, expected)); const char *incoming_request_part2 = "more tests" "GET / HTTP/1.1\r\n" "\r\n"; ASSERT_SUCCESS(s_send_message_c_str(incoming_request_part2)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* finish sending the whole request * the stream should be completed now */ ASSERT_TRUE(request->on_complete_cb_count == 1); ASSERT_TRUE(request->on_complete_error_code == AWS_ERROR_SUCCESS); /* check the request */ ASSERT_SUCCESS(s_check_header(request, 0, "Content-Length", "16")); ASSERT_TRUE(aws_byte_cursor_eq_c_str(&request->method, "PUT")); ASSERT_TRUE(aws_byte_cursor_eq_c_str(&request->uri, "/plan.txt")); ASSERT_TRUE(aws_byte_cursor_eq_c_str(&request->body, "write more tests")); ASSERT_TRUE(s_tester.request_num == 2); request = s_tester.requests + 1; ASSERT_TRUE(aws_byte_cursor_eq_c_str(&request->method, "GET")); ASSERT_TRUE(aws_byte_cursor_eq_c_str(&request->uri, "/")); /* clean up */ aws_http_message_destroy(response); ASSERT_SUCCESS(s_server_tester_clean_up()); return AWS_OP_SUCCESS; } /* Check that expected matches data stretched across multiple messages. * The event-loop is ticked, and messages are dequed, as this function progresses. */ static int s_check_multiple_messages(struct tester *tester, struct aws_byte_cursor expected, size_t *out_num_messages) { size_t num_messages = 0; struct aws_linked_list *msgs = testing_channel_get_written_message_queue(&tester->testing_channel); size_t progress = 0; size_t remaining = expected.len; while (remaining > 0) { /* Tick event loop if there are no messages already */ if (aws_linked_list_empty(msgs)) { testing_channel_run_currently_queued_tasks(&tester->testing_channel); } /* There should be EXACTLY 1 aws_io_message after ticking. */ ASSERT_TRUE(!aws_linked_list_empty(msgs)); struct aws_linked_list_node *node = aws_linked_list_pop_front(msgs); ASSERT_TRUE(aws_linked_list_empty(msgs)); num_messages++; struct aws_io_message *msg = AWS_CONTAINER_OF(node, struct aws_io_message, queueing_handle); /* */ ASSERT_TRUE(msg->message_data.len <= remaining); size_t comparing = msg->message_data.len < remaining ? msg->message_data.len : remaining; struct aws_byte_cursor compare_cur = aws_byte_cursor_from_array(expected.ptr + progress, comparing); ASSERT_TRUE(aws_byte_cursor_eq_byte_buf(&compare_cur, &msg->message_data)); aws_mem_release(msg->allocator, msg); progress += comparing; remaining -= comparing; } /* Check that no more messages are produced unexpectedly */ testing_channel_drain_queued_tasks(&tester->testing_channel); ASSERT_TRUE(aws_linked_list_empty(msgs)); *out_num_messages = num_messages; return AWS_OP_SUCCESS; } /* Send a response whose body doesn't fit in a single aws_io_message */ TEST_CASE(h1_server_send_response_large_body) { (void)ctx; ASSERT_SUCCESS(s_tester_init(allocator)); const char *incoming_request = "GET / HTTP/1.1\r\n" "\r\n"; ASSERT_SUCCESS(s_send_message_c_str(incoming_request)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_TRUE(s_tester.request_num == 1); struct tester_request *request = s_tester.requests; /* send response */ size_t body_len = 1024 * 1024 * 1; /* 1MB */ struct aws_byte_buf body_buf; ASSERT_SUCCESS(aws_byte_buf_init(&body_buf, allocator, body_len)); while (body_buf.len < body_len) { int r = rand(); aws_byte_buf_write_be32(&body_buf, (uint32_t)r); } struct aws_byte_cursor body_src = aws_byte_cursor_from_buf(&body_buf); request->response_body = aws_input_stream_new_from_cursor(allocator, &body_src); ASSERT_NOT_NULL(request->response_body); char content_length_value[100]; snprintf(content_length_value, sizeof(content_length_value), "%zu", body_len); struct aws_http_header headers[] = { { .name = aws_byte_cursor_from_c_str("Content-Length"), .value = aws_byte_cursor_from_c_str(content_length_value), }, }; struct aws_http_message *response; ASSERT_SUCCESS(s_create_response(&response, 200, headers, AWS_ARRAY_SIZE(headers), request->response_body)); ASSERT_SUCCESS(aws_http_stream_send_response(request->request_handler, response)); const char *expected_head_fmt = "HTTP/1.1 200 OK\r\n" "Content-Length: %zu\r\n" "\r\n"; char expected_head[1024]; int expected_head_len = snprintf(expected_head, sizeof(expected_head), expected_head_fmt, body_len); struct aws_byte_buf expected_buf; ASSERT_SUCCESS(aws_byte_buf_init(&expected_buf, allocator, body_len + expected_head_len)); ASSERT_TRUE(aws_byte_buf_write(&expected_buf, (uint8_t *)expected_head, expected_head_len)); ASSERT_TRUE(aws_byte_buf_write_from_whole_buffer(&expected_buf, body_buf)); size_t num_io_messages; ASSERT_SUCCESS(s_check_multiple_messages(&s_tester, aws_byte_cursor_from_buf(&expected_buf), &num_io_messages)); ASSERT_TRUE(num_io_messages > 1); ASSERT_SUCCESS(s_server_tester_clean_up()); aws_http_message_destroy(response); aws_byte_buf_clean_up(&body_buf); aws_byte_buf_clean_up(&expected_buf); return AWS_OP_SUCCESS; } /* Send a response whose headers doesn't fit in a single aws_io_message */ TEST_CASE(h1_server_send_response_large_head) { (void)ctx; ASSERT_SUCCESS(s_tester_init(allocator)); const char *incoming_request = "GET / HTTP/1.1\r\n" "\r\n"; ASSERT_SUCCESS(s_send_message_c_str(incoming_request)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_TRUE(s_tester.request_num == 1); struct tester_request *request = s_tester.requests; /* send response */ /* Generate headers while filling in contents of `expected` buffer */ struct aws_http_header headers[1000]; size_t num_headers = AWS_ARRAY_SIZE(headers); AWS_ZERO_STRUCT(headers); struct aws_byte_buf expected; aws_byte_buf_init(&expected, allocator, num_headers * 128); /* approx capacity */ struct aws_byte_cursor request_line = aws_byte_cursor_from_c_str("HTTP/1.1 200 OK\r\n"); ASSERT_TRUE(aws_byte_buf_write_from_whole_cursor(&expected, request_line)); /* Each header just has a UUID for its name and value */ for (size_t i = 0; i < num_headers; ++i) { struct aws_http_header *header = headers + i; /* Point to where the UUID is going to be written in the `expected` buffer */ header->name = aws_byte_cursor_from_array(expected.buffer + expected.len, AWS_UUID_STR_LEN - 1); header->value = header->name; struct aws_uuid uuid; ASSERT_SUCCESS(aws_uuid_init(&uuid)); ASSERT_SUCCESS(aws_uuid_to_str(&uuid, &expected)); ASSERT_TRUE(aws_byte_buf_write(&expected, (uint8_t *)": ", 2)); ASSERT_SUCCESS(aws_uuid_to_str(&uuid, &expected)); ASSERT_TRUE(aws_byte_buf_write(&expected, (uint8_t *)"\r\n", 2)); } ASSERT_TRUE(aws_byte_buf_write(&expected, (uint8_t *)"\r\n", 2)); /* sending response */ struct aws_http_message *response; ASSERT_SUCCESS(s_create_response(&response, 200, headers, num_headers, NULL)); ASSERT_SUCCESS(aws_http_stream_send_response(request->request_handler, response)); /* check result */ size_t num_io_messages; ASSERT_SUCCESS(s_check_multiple_messages(&s_tester, aws_byte_cursor_from_buf(&expected), &num_io_messages)); ASSERT_TRUE(num_io_messages > 1); ASSERT_SUCCESS(s_server_tester_clean_up()); aws_http_message_destroy(response); aws_byte_buf_clean_up(&expected); return AWS_OP_SUCCESS; } TEST_CASE(h1_server_receive_close_header_ends_connection) { (void)ctx; (void)ctx; ASSERT_SUCCESS(s_tester_init(allocator)); /* receive request with "Connection: close" header */ const char *incoming_request = "GET / HTTP/1.1\r\n" "Host: example.com\r\n" "Connection: close\r\n" "\r\n"; ASSERT_SUCCESS(s_send_message_c_str(incoming_request)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_TRUE(s_tester.request_num == 1); /* send response */ struct tester_request request = s_tester.requests[0]; struct aws_http_message *response; ASSERT_SUCCESS(s_create_response(&response, 200, NULL, 0, NULL)); ASSERT_SUCCESS(aws_http_stream_send_response(request.request_handler, response)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); const char *expected = "HTTP/1.1 200 OK\r\n" "\r\n"; ASSERT_SUCCESS(testing_channel_check_written_messages_str(&s_tester.testing_channel, allocator, expected)); /* stream should complete successfully */ ASSERT_INT_EQUALS(AWS_ERROR_SUCCESS, s_tester.requests[0].on_complete_error_code); /* connection should have shut down cleanly after sending response */ ASSERT_TRUE(testing_channel_is_shutdown_completed(&s_tester.testing_channel)); ASSERT_INT_EQUALS(AWS_ERROR_SUCCESS, testing_channel_get_shutdown_error_code(&s_tester.testing_channel)); /* clean up */ aws_http_message_destroy(response); ASSERT_SUCCESS(s_server_tester_clean_up()); return AWS_OP_SUCCESS; } /* It's not legal for a client to send another request after sending one with a "Connection: close" */ TEST_CASE(h1_server_receive_close_header_more_requests_illegal) { (void)ctx; (void)ctx; ASSERT_SUCCESS(s_tester_init(allocator)); /* Receive 2 requests, where first one has "Connection: close" header */ const char *incoming_request = "GET /first HTTP/1.1\r\n" "Host: example.com\r\n" "Connection: close\r\n" "\r\n" "GET /second HTTP/1.1\r\n" "Host: example.com\r\n" "\r\n"; ASSERT_SUCCESS(s_send_message_c_str(incoming_request)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* Only the first request should have registered */ ASSERT_TRUE(s_tester.request_num == 1); struct tester_request *request = &s_tester.requests[0]; ASSERT_TRUE(aws_byte_cursor_eq_c_str(&request->uri, "/first")); /* Not checking any more state. * It would be valid behavior for connection to shutdown with an error code * OR silently ignore the second request. */ /* clean up */ ASSERT_SUCCESS(s_server_tester_clean_up()); return AWS_OP_SUCCESS; } TEST_CASE(h1_server_send_close_header_ends_connection) { (void)ctx; (void)ctx; ASSERT_SUCCESS(s_tester_init(allocator)); /* receive request */ const char *incoming_request = "GET / HTTP/1.1\r\n" "Host: example.com\r\n" "\r\n"; ASSERT_SUCCESS(s_send_message_c_str(incoming_request)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_TRUE(s_tester.request_num == 1); struct tester_request request = s_tester.requests[0]; /* send response with "Connection: close" header */ struct aws_http_message *response; struct aws_http_header headers[] = { { .name = aws_byte_cursor_from_c_str("Connection"), .value = aws_byte_cursor_from_c_str("close"), }, }; ASSERT_SUCCESS(s_create_response(&response, 200, headers, AWS_ARRAY_SIZE(headers), NULL)); ASSERT_SUCCESS(aws_http_stream_send_response(request.request_handler, response)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); const char *expected = "HTTP/1.1 200 OK\r\n" "Connection: close\r\n" "\r\n"; ASSERT_SUCCESS(testing_channel_check_written_messages_str(&s_tester.testing_channel, allocator, expected)); /* stream should complete successfully */ ASSERT_INT_EQUALS(AWS_ERROR_SUCCESS, s_tester.requests[0].on_complete_error_code); /* connection should have shut down cleanly after sending response */ ASSERT_TRUE(testing_channel_is_shutdown_completed(&s_tester.testing_channel)); ASSERT_INT_EQUALS(AWS_ERROR_SUCCESS, testing_channel_get_shutdown_error_code(&s_tester.testing_channel)); /* clean up */ aws_http_message_destroy(response); ASSERT_SUCCESS(s_server_tester_clean_up()); return AWS_OP_SUCCESS; } /* When pipelining multiple requests * and one of the responses has a "Connection: close" header * ensure that everything goes correctly */ TEST_CASE(h1_server_send_close_header_with_pipelining) { (void)ctx; (void)ctx; ASSERT_SUCCESS(s_tester_init(allocator)); /* receive 3 requests at once */ const char *incoming_request = "GET /first HTTP/1.1\r\n" "Host: example.com\r\n" "\r\n" "GET /second HTTP/1.1\r\n" "Host: example.com\r\n" "\r\n" "GET /third HTTP/1.1\r\n" "Host: example.com\r\n" "\r\n"; ASSERT_SUCCESS(s_send_message_c_str(incoming_request)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_TRUE(s_tester.request_num == 3); /* Send 3 responses. * Only the middle response has the "Connection: close" header */ struct aws_http_message *responses[3]; struct aws_http_header close_headers[] = { { .name = aws_byte_cursor_from_c_str("Connection"), .value = aws_byte_cursor_from_c_str("close"), }, }; /* Create responses in order: third, second, first. * This lets us check that we can still send a response to the first message * even after queueing the response to the second message with a close header. */ for (int i = 2; i >= 0; --i) { struct aws_http_header *headers = NULL; size_t num_headers = 0; if (i == 1) { headers = close_headers; num_headers = AWS_ARRAY_SIZE(close_headers); } ASSERT_SUCCESS(s_create_response(&responses[i], 200, headers, num_headers, NULL)); ASSERT_SUCCESS(aws_http_stream_send_response(s_tester.requests[i].request_handler, responses[i])); } testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* Only the first two responses should be sent. * The third should not send because the second had the close header. */ const char *expected = "HTTP/1.1 200 OK\r\n" "\r\n" "HTTP/1.1 200 OK\r\n" "Connection: close\r\n" "\r\n"; ASSERT_SUCCESS(testing_channel_check_written_messages_str(&s_tester.testing_channel, allocator, expected)); /* Only the first two streams should complete successfully */ ASSERT_INT_EQUALS(AWS_ERROR_SUCCESS, s_tester.requests[0].on_complete_error_code); ASSERT_INT_EQUALS(AWS_ERROR_SUCCESS, s_tester.requests[1].on_complete_error_code); ASSERT_INT_EQUALS(AWS_ERROR_HTTP_CONNECTION_CLOSED, s_tester.requests[2].on_complete_error_code); /* Connection should have shut down due to sending close header. */ ASSERT_TRUE(testing_channel_is_shutdown_completed(&s_tester.testing_channel)); /* clean up */ for (size_t i = 0; i < 3; ++i) { aws_http_message_destroy(responses[i]); } ASSERT_SUCCESS(s_server_tester_clean_up()); return AWS_OP_SUCCESS; } /* Test for errors returned from callbacks */ /* The connection is closed before the message is sent */ enum request_handler_callback { REQUEST_HANDLER_CALLBACK_INCOMING_REQUEST, REQUEST_HANDLER_CALLBACK_INCOMING_HEADERS, REQUEST_HANDLER_CALLBACK_INCOMING_HEADERS_DONE, REQUEST_HANDLER_CALLBACK_INCOMING_BODY, REQUEST_HANDLER_CALLBACK_INCOMING_REQUEST_DONE, REQUEST_HANDLER_CALLBACK_OUTGOING_BODY, REQUEST_HANDLER_CALLBACK_COMPLETE, REQUEST_HANDLER_CALLBACK_COUNT, }; static const int ERROR_FROM_CALLBACK_ERROR_CODE = (int)0xBEEFCAFE; struct error_from_callback_tester { struct aws_input_stream base; enum request_handler_callback error_at; int callback_counts[REQUEST_HANDLER_CALLBACK_COUNT]; bool has_errored; struct aws_allocator *alloc; struct aws_logger logger; struct aws_http_connection *server_connection; struct testing_channel testing_channel; struct tester_request requests[100]; struct aws_stream_status outgoing_body_status; int request_num; int on_complete_error_code; }; static int s_error_from_callback_common( struct error_from_callback_tester *error_tester, enum request_handler_callback current_callback) { error_tester->callback_counts[current_callback]++; /* After error code returned, no more callbacks should fire (except for on_complete) */ AWS_FATAL_ASSERT(!error_tester->has_errored); AWS_FATAL_ASSERT(current_callback <= error_tester->error_at); if (current_callback == error_tester->error_at) { error_tester->has_errored = true; return aws_raise_error(ERROR_FROM_CALLBACK_ERROR_CODE); } return AWS_OP_SUCCESS; } static int s_error_from_outgoing_body_read(struct aws_input_stream *body, struct aws_byte_buf *dest) { (void)dest; struct error_from_callback_tester *error_tester = AWS_CONTAINER_OF(body, struct error_from_callback_tester, base); ASSERT_SUCCESS(s_error_from_callback_common(error_tester, REQUEST_HANDLER_CALLBACK_OUTGOING_BODY)); /* If the common fn was successful, write out some data and end the stream */ ASSERT_TRUE(aws_byte_buf_write(dest, (const uint8_t *)"abcd", 4)); error_tester->outgoing_body_status.is_end_of_stream = true; return AWS_OP_SUCCESS; } static int s_error_from_outgoing_body_get_status(struct aws_input_stream *body, struct aws_stream_status *status) { struct error_from_callback_tester *error_tester = AWS_CONTAINER_OF(body, struct error_from_callback_tester, base); *status = error_tester->outgoing_body_status; return AWS_OP_SUCCESS; } static void s_error_from_outgoing_body_destroy(struct aws_input_stream *body) { (void)body; } static struct aws_input_stream_vtable s_error_from_outgoing_body_vtable = { .seek = NULL, .read = s_error_from_outgoing_body_read, .get_status = s_error_from_outgoing_body_get_status, .get_length = NULL, }; static int s_error_from_incoming_headers( struct aws_http_stream *stream, enum aws_http_header_block header_block, const struct aws_http_header *header_array, size_t num_headers, void *user_data) { (void)stream; (void)header_block; (void)header_array; (void)num_headers; return s_error_from_callback_common(user_data, REQUEST_HANDLER_CALLBACK_INCOMING_HEADERS); } static int s_error_from_incoming_headers_done( struct aws_http_stream *stream, enum aws_http_header_block header_block, void *user_data) { (void)stream; (void)header_block; return s_error_from_callback_common(user_data, REQUEST_HANDLER_CALLBACK_INCOMING_HEADERS_DONE); } static int s_error_from_incoming_body( struct aws_http_stream *stream, const struct aws_byte_cursor *data, void *user_data) { (void)stream; (void)data; return s_error_from_callback_common(user_data, REQUEST_HANDLER_CALLBACK_INCOMING_BODY); } static int s_error_from_incoming_request_done(struct aws_http_stream *stream, void *user_data) { (void)stream; return s_error_from_callback_common(user_data, REQUEST_HANDLER_CALLBACK_INCOMING_REQUEST_DONE); } static void s_error_tester_on_stream_complete(struct aws_http_stream *stream, int error_code, void *user_data) { (void)stream; struct error_from_callback_tester *error_tester = user_data; error_tester->callback_counts[REQUEST_HANDLER_CALLBACK_COMPLETE]++; error_tester->on_complete_error_code = error_code; } static struct aws_http_stream *s_tester_close_on_incoming_request( struct aws_http_connection *connection, void *user_data) { struct aws_http_request_handler_options options = AWS_HTTP_REQUEST_HANDLER_OPTIONS_INIT; struct error_from_callback_tester *tester = user_data; int index = tester->request_num; /* initialize the new request */ tester->requests[index].num_headers = 0; tester->requests[index].header_done = false; aws_byte_buf_init(&tester->requests[index].storage, tester->alloc, 1024 * 1024 * 1); options.server_connection = connection; options.user_data = tester; options.on_request_headers = s_error_from_incoming_headers; options.on_request_header_block_done = s_error_from_incoming_headers_done; options.on_request_body = s_error_from_incoming_body; options.on_request_done = s_error_from_incoming_request_done; options.on_complete = s_error_tester_on_stream_complete; struct aws_http_stream *stream = aws_http_stream_new_server_request_handler(&options); AWS_FATAL_ASSERT(stream); tester->requests[index].request_handler = stream; tester->request_num++; int err = s_error_from_callback_common(tester, REQUEST_HANDLER_CALLBACK_INCOMING_REQUEST); if (err) { return NULL; } return stream; } static int s_error_tester_init(struct aws_allocator *alloc, struct error_from_callback_tester *tester) { aws_http_library_init(alloc); tester->alloc = alloc; s_tester.alloc = alloc; tester->request_num = 0; tester->outgoing_body_status.is_valid = true; struct aws_logger_standard_options logger_options = { .level = AWS_LOG_LEVEL_TRACE, .file = stderr, }; ASSERT_SUCCESS(aws_logger_init_standard(&tester->logger, tester->alloc, &logger_options)); aws_logger_set(&tester->logger); struct aws_testing_channel_options test_channel_options = {.clock_fn = aws_high_res_clock_get_ticks}; ASSERT_SUCCESS(testing_channel_init(&tester->testing_channel, alloc, &test_channel_options)); struct aws_http1_connection_options http1_options; AWS_ZERO_STRUCT(http1_options); tester->server_connection = aws_http_connection_new_http1_1_server(alloc, true, SIZE_MAX, &http1_options); ASSERT_NOT_NULL(tester->server_connection); struct aws_http_server_connection_options options = AWS_HTTP_SERVER_CONNECTION_OPTIONS_INIT; options.connection_user_data = tester; options.on_incoming_request = s_tester_close_on_incoming_request; ASSERT_SUCCESS(aws_http_connection_configure_server(tester->server_connection, &options)); struct aws_channel_slot *slot = aws_channel_slot_new(tester->testing_channel.channel); ASSERT_NOT_NULL(slot); ASSERT_SUCCESS(aws_channel_slot_insert_end(tester->testing_channel.channel, slot)); ASSERT_SUCCESS(aws_channel_slot_set_handler(slot, &tester->server_connection->channel_handler)); tester->server_connection->vtable->on_channel_handler_installed(&tester->server_connection->channel_handler, slot); testing_channel_drain_queued_tasks(&tester->testing_channel); return AWS_OP_SUCCESS; } static int s_server_close_request_clean_up(struct error_from_callback_tester *tester) { for (int i = 0; i < tester->request_num; i++) { aws_http_stream_release(tester->requests[i].request_handler); aws_byte_buf_clean_up(&tester->requests[i].storage); } return AWS_OP_SUCCESS; } static int s_server_error_tester_clean_up(struct error_from_callback_tester *tester) { s_server_close_request_clean_up(tester); aws_http_connection_release(tester->server_connection); ASSERT_SUCCESS(testing_channel_clean_up(&tester->testing_channel)); aws_http_library_clean_up(); aws_logger_clean_up(&tester->logger); return AWS_OP_SUCCESS; } static int s_send_message_cursor_close(struct aws_byte_cursor data, struct error_from_callback_tester *tester) { struct aws_io_message *msg = aws_channel_acquire_message_from_pool( tester->testing_channel.channel, AWS_IO_MESSAGE_APPLICATION_DATA, data.len); ASSERT_NOT_NULL(msg); ASSERT_TRUE(aws_byte_buf_write_from_whole_cursor(&msg->message_data, data)); ASSERT_SUCCESS(testing_channel_push_read_message(&tester->testing_channel, msg)); return AWS_OP_SUCCESS; } static int s_test_error_from_callback(struct aws_allocator *allocator, enum request_handler_callback error_at) { struct error_from_callback_tester error_tester; AWS_ZERO_STRUCT(error_tester); error_tester.error_at = error_at; ASSERT_SUCCESS(s_error_tester_init(allocator, &error_tester)); /* send request */ const char *incoming_request = "POST / HTTP/1.1\r\n" "Transfer-Encoding: chunked\r\n" "\r\n" "3\r\n" "two\r\n" "6\r\n" "chunks\r\n" "0\r\n" "\r\n"; ASSERT_SUCCESS(s_send_message_cursor_close(aws_byte_cursor_from_c_str(incoming_request), &error_tester)); testing_channel_drain_queued_tasks(&error_tester.testing_channel); ASSERT_TRUE(error_tester.request_num == 1); struct tester_request *request = error_tester.requests; /* send response */ struct aws_http_header headers[] = { { .name = aws_byte_cursor_from_c_str("Content-Length"), .value = aws_byte_cursor_from_c_str("4"), }, }; error_tester.base.vtable = &s_error_from_outgoing_body_vtable; aws_ref_count_init( &error_tester.base.ref_count, &error_tester, (aws_simple_completion_callback *)s_error_from_outgoing_body_destroy); struct aws_input_stream *error_from_outgoing_body_stream = &error_tester.base; struct aws_http_message *response; ASSERT_SUCCESS( s_create_response(&response, 200, headers, AWS_ARRAY_SIZE(headers), error_from_outgoing_body_stream)); /* send_response() may succeed or fail, depending on when things shut down */ aws_http_stream_send_response(request->request_handler, response); testing_channel_drain_queued_tasks(&error_tester.testing_channel); /* check that callbacks were invoked before error_at, but not after */ for (int i = 0; i < REQUEST_HANDLER_CALLBACK_COMPLETE; ++i) { if (i <= error_at) { ASSERT_TRUE(error_tester.callback_counts[i] > 0); } else { ASSERT_INT_EQUALS(0, error_tester.callback_counts[i]); } } /* the on_complete callback should always fire though */ ASSERT_INT_EQUALS(1, error_tester.callback_counts[REQUEST_HANDLER_CALLBACK_COMPLETE]); ASSERT_INT_EQUALS(ERROR_FROM_CALLBACK_ERROR_CODE, error_tester.on_complete_error_code); aws_http_message_destroy(response); aws_input_stream_release(error_from_outgoing_body_stream); ASSERT_SUCCESS(s_server_error_tester_clean_up(&error_tester)); return AWS_OP_SUCCESS; } TEST_CASE(h1_server_close_before_message_is_sent) { (void)ctx; struct error_from_callback_tester error_tester; AWS_ZERO_STRUCT(error_tester); ASSERT_SUCCESS(s_error_tester_init(allocator, &error_tester)); /* close the connection */ aws_http_connection_close(error_tester.server_connection); testing_channel_drain_queued_tasks(&error_tester.testing_channel); /* send request */ const char *incoming_request = "POST / HTTP/1.1\r\n" "Transfer-Encoding: chunked\r\n" "\r\n" "3\r\n" "two\r\n" "6\r\n" "chunks\r\n" "0\r\n" "\r\n"; ASSERT_SUCCESS(s_send_message_cursor_close(aws_byte_cursor_from_c_str(incoming_request), &error_tester)); testing_channel_drain_queued_tasks(&error_tester.testing_channel); /* no request handler was made */ ASSERT_TRUE(error_tester.request_num == 0); /* all callbacks were not invoked */ for (int i = 0; i < REQUEST_HANDLER_CALLBACK_COMPLETE; ++i) { ASSERT_INT_EQUALS(0, error_tester.callback_counts[i]); } ASSERT_SUCCESS(s_server_error_tester_clean_up(&error_tester)); return AWS_OP_SUCCESS; } TEST_CASE(h1_server_error_from_incoming_request_callback_stops_decoder) { (void)ctx; ASSERT_SUCCESS(s_test_error_from_callback(allocator, REQUEST_HANDLER_CALLBACK_INCOMING_REQUEST)); return AWS_OP_SUCCESS; } TEST_CASE(h1_server_error_from_incoming_headers_callback_stops_decoder) { (void)ctx; ASSERT_SUCCESS(s_test_error_from_callback(allocator, REQUEST_HANDLER_CALLBACK_INCOMING_HEADERS)); return AWS_OP_SUCCESS; } TEST_CASE(h1_server_error_from_incoming_headers_done_callback_stops_decoder) { (void)ctx; ASSERT_SUCCESS(s_test_error_from_callback(allocator, REQUEST_HANDLER_CALLBACK_INCOMING_HEADERS_DONE)); return AWS_OP_SUCCESS; } TEST_CASE(h1_server_error_from_incoming_body_callback_stops_decoder) { (void)ctx; ASSERT_SUCCESS(s_test_error_from_callback(allocator, REQUEST_HANDLER_CALLBACK_INCOMING_BODY)); return AWS_OP_SUCCESS; } TEST_CASE(h1_server_error_from_incoming_request_done_callback_stops_decoder) { (void)ctx; ASSERT_SUCCESS(s_test_error_from_callback(allocator, REQUEST_HANDLER_CALLBACK_INCOMING_REQUEST_DONE)); return AWS_OP_SUCCESS; } TEST_CASE(h1_server_error_from_outgoing_body_callback_stops_sending) { (void)ctx; ASSERT_SUCCESS(s_test_error_from_callback(allocator, REQUEST_HANDLER_CALLBACK_OUTGOING_BODY)); return AWS_OP_SUCCESS; } /* After aws_http_connection_close() is called, aws_http_connection_is_open() should return false, * even if both calls were made from outside the event-loop thread. */ TEST_CASE(h1_server_close_from_off_thread_makes_not_open) { (void)ctx; ASSERT_SUCCESS(s_tester_init(allocator)); testing_channel_set_is_on_users_thread(&s_tester.testing_channel, false); ASSERT_TRUE(aws_http_connection_is_open(s_tester.server_connection)); aws_http_connection_close(s_tester.server_connection); ASSERT_FALSE(aws_http_connection_is_open(s_tester.server_connection)); testing_channel_set_is_on_users_thread(&s_tester.testing_channel, true); ASSERT_SUCCESS(s_server_tester_clean_up()); return AWS_OP_SUCCESS; } TEST_CASE(h1_server_close_from_on_thread_makes_not_open) { (void)ctx; ASSERT_SUCCESS(s_tester_init(allocator)); testing_channel_set_is_on_users_thread(&s_tester.testing_channel, false); ASSERT_TRUE(aws_http_connection_is_open(s_tester.server_connection)); testing_channel_set_is_on_users_thread(&s_tester.testing_channel, true); aws_http_connection_close(s_tester.server_connection); testing_channel_set_is_on_users_thread(&s_tester.testing_channel, false); ASSERT_FALSE(aws_http_connection_is_open(s_tester.server_connection)); testing_channel_set_is_on_users_thread(&s_tester.testing_channel, true); ASSERT_SUCCESS(s_server_tester_clean_up()); return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-http/tests/test_h2_client.c000066400000000000000000010223751456575232400244350ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "h2_test_helper.h" #include "stream_test_helper.h" #include #include #include #include #include #define TEST_CASE(NAME) \ AWS_TEST_CASE(NAME, s_test_##NAME); \ static int s_test_##NAME(struct aws_allocator *allocator, void *ctx) #define DEFINE_HEADER(NAME, VALUE) \ { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(NAME), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(VALUE), } struct connection_user_data { struct aws_allocator *allocator; int initial_settings_error_code; uint32_t last_stream_id; uint32_t http2_error; struct aws_http2_setting remote_settings_array[10]; struct aws_byte_buf debug_data; size_t num_settings; }; static void s_on_initial_settings_completed( struct aws_http_connection *http2_connection, int error_code, void *user_data) { (void)http2_connection; struct connection_user_data *data = user_data; data->initial_settings_error_code = error_code; } static void s_on_goaway_received( struct aws_http_connection *http2_connection, uint32_t last_stream_id, uint32_t http2_error, const struct aws_byte_cursor debug_data, void *user_data) { (void)http2_connection; struct connection_user_data *data = user_data; data->last_stream_id = last_stream_id; data->http2_error = http2_error; if (data->debug_data.capacity != 0) { /* If multiple goaway received, clean up the previous one */ aws_byte_buf_clean_up(&data->debug_data); } aws_byte_buf_init_copy_from_cursor(&data->debug_data, data->allocator, debug_data); } static void s_on_remote_settings_change( struct aws_http_connection *http2_connection, const struct aws_http2_setting *settings_array, size_t num_settings, void *user_data) { (void)http2_connection; struct connection_user_data *data = user_data; if (num_settings) { memcpy(data->remote_settings_array, settings_array, num_settings * sizeof(struct aws_http2_setting)); } data->num_settings = num_settings; } /* Singleton used by tests in this file */ static struct tester { struct aws_allocator *alloc; struct aws_http_connection *connection; struct testing_channel testing_channel; struct h2_fake_peer peer; struct connection_user_data user_data; bool no_conn_manual_win_management; } s_tester; static int s_tester_init(struct aws_allocator *alloc, void *ctx) { (void)ctx; aws_http_library_init(alloc); s_tester.alloc = alloc; AWS_ZERO_STRUCT(s_tester.user_data.debug_data); struct aws_testing_channel_options options = {.clock_fn = aws_high_res_clock_get_ticks}; ASSERT_SUCCESS(testing_channel_init(&s_tester.testing_channel, alloc, &options)); struct aws_http2_setting settings_array[] = { {.id = AWS_HTTP2_SETTINGS_ENABLE_PUSH, .value = 0}, }; struct aws_http2_connection_options http2_options = { .initial_settings_array = settings_array, .num_initial_settings = AWS_ARRAY_SIZE(settings_array), .on_initial_settings_completed = s_on_initial_settings_completed, .max_closed_streams = AWS_HTTP2_DEFAULT_MAX_CLOSED_STREAMS, .on_goaway_received = s_on_goaway_received, .on_remote_settings_change = s_on_remote_settings_change, .conn_manual_window_management = !s_tester.no_conn_manual_win_management, }; s_tester.connection = aws_http_connection_new_http2_client(alloc, false /* manual window management */, &http2_options); ASSERT_NOT_NULL(s_tester.connection); { s_tester.user_data.allocator = s_tester.alloc; /* set connection user_data (handled by http-bootstrap in real world) */ s_tester.connection->user_data = &s_tester.user_data; /* re-enact marriage vows of http-connection and channel (handled by http-bootstrap in real world) */ struct aws_channel_slot *slot = aws_channel_slot_new(s_tester.testing_channel.channel); ASSERT_NOT_NULL(slot); ASSERT_SUCCESS(aws_channel_slot_insert_end(s_tester.testing_channel.channel, slot)); ASSERT_SUCCESS(aws_channel_slot_set_handler(slot, &s_tester.connection->channel_handler)); s_tester.connection->vtable->on_channel_handler_installed(&s_tester.connection->channel_handler, slot); } struct h2_fake_peer_options peer_options = { .alloc = alloc, .testing_channel = &s_tester.testing_channel, .is_server = true, }; ASSERT_SUCCESS(h2_fake_peer_init(&s_tester.peer, &peer_options)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); return AWS_OP_SUCCESS; } static int s_tester_clean_up(void) { aws_byte_buf_clean_up(&s_tester.user_data.debug_data); h2_fake_peer_clean_up(&s_tester.peer); aws_http_connection_release(s_tester.connection); ASSERT_SUCCESS(testing_channel_clean_up(&s_tester.testing_channel)); aws_http_library_clean_up(); return AWS_OP_SUCCESS; } /* Test the common setup/teardown used by all tests in this file */ TEST_CASE(h2_client_sanity_check) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); return s_tester_clean_up(); } /* Test that a stream can be created and destroyed. */ TEST_CASE(h2_client_stream_create) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* create request */ struct aws_http_message *request = aws_http2_message_new_request(allocator); ASSERT_NOT_NULL(request); struct aws_http_header headers[] = { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/"), }; ASSERT_SUCCESS(aws_http_message_add_header_array(request, headers, AWS_ARRAY_SIZE(headers))); struct aws_http_make_request_options options = { .self_size = sizeof(options), .request = request, }; struct aws_http_stream *stream = aws_http_connection_make_request(s_tester.connection, &options); ASSERT_NOT_NULL(stream); aws_http_stream_activate(stream); /* shutdown channel so request can be released */ aws_channel_shutdown(s_tester.testing_channel.channel, AWS_ERROR_SUCCESS); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_TRUE(testing_channel_is_shutdown_completed(&s_tester.testing_channel)); /* release request */ aws_http_stream_release(stream); aws_http_message_release(request); return s_tester_clean_up(); } static void s_stream_cleans_up_on_destroy(void *data) { bool *destroyed = data; *destroyed = true; } TEST_CASE(h2_client_stream_release_after_complete) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* create request */ struct aws_http_message *request = aws_http2_message_new_request(allocator); ASSERT_NOT_NULL(request); struct aws_http_header headers[] = { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/"), }; ASSERT_SUCCESS(aws_http_message_add_header_array(request, headers, AWS_ARRAY_SIZE(headers))); bool destroyed = false; struct aws_http_make_request_options options = { .self_size = sizeof(options), .request = request, .on_destroy = s_stream_cleans_up_on_destroy, .user_data = &destroyed, }; struct aws_http_stream *stream = aws_http_connection_make_request(s_tester.connection, &options); ASSERT_NOT_NULL(stream); aws_http_stream_activate(stream); /* shutdown channel so request can be released */ aws_channel_shutdown(s_tester.testing_channel.channel, AWS_ERROR_SUCCESS); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_TRUE(testing_channel_is_shutdown_completed(&s_tester.testing_channel)); /* release request */ ASSERT_FALSE(destroyed); aws_http_stream_release(stream); ASSERT_TRUE(destroyed); aws_http_message_release(request); return s_tester_clean_up(); } struct s_callback_invoked { bool destroy_invoked; bool complete_invoked; }; static void s_unactivated_stream_cleans_up_on_destroy(void *data) { struct s_callback_invoked *callback_data = data; callback_data->destroy_invoked = true; } static void s_unactivated_stream_complete(struct aws_http_stream *stream, int error_code, void *data) { (void)stream; (void)error_code; struct s_callback_invoked *callback_data = data; callback_data->complete_invoked = true; } TEST_CASE(h2_client_unactivated_stream_cleans_up) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* create request */ struct aws_http_message *request = aws_http2_message_new_request(allocator); ASSERT_NOT_NULL(request); struct aws_http_header headers[] = { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/"), }; ASSERT_SUCCESS(aws_http_message_add_header_array(request, headers, AWS_ARRAY_SIZE(headers))); struct s_callback_invoked callback_data = {0}; struct aws_http_make_request_options options = { .self_size = sizeof(options), .request = request, .on_destroy = s_unactivated_stream_cleans_up_on_destroy, .on_complete = s_unactivated_stream_complete, .user_data = &callback_data, }; struct aws_http_stream *stream = aws_http_connection_make_request(s_tester.connection, &options); ASSERT_NOT_NULL(stream); /* do not activate the stream, that's the test. */ ASSERT_FALSE(callback_data.destroy_invoked); ASSERT_FALSE(callback_data.complete_invoked); /* shutdown channel so request can be released */ aws_channel_shutdown(s_tester.testing_channel.channel, AWS_ERROR_SUCCESS); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_TRUE(testing_channel_is_shutdown_completed(&s_tester.testing_channel)); aws_http_stream_release(stream); ASSERT_TRUE(callback_data.destroy_invoked); ASSERT_FALSE(callback_data.complete_invoked); aws_http_message_release(request); return s_tester_clean_up(); } /* Test that client automatically sends the HTTP/2 Connection Preface (magic string, followed by SETTINGS frame) */ TEST_CASE(h2_client_connection_preface_sent) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* Have the fake peer to run its decoder on what the client has written. * The decoder will raise an error if it doesn't receive the "client connection preface string" first. */ ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); /* Now check that client sent SETTINGS frame */ struct h2_decoded_frame *first_written_frame = h2_decode_tester_get_frame(&s_tester.peer.decode, 0); ASSERT_UINT_EQUALS(AWS_H2_FRAME_T_SETTINGS, first_written_frame->type); ASSERT_FALSE(first_written_frame->ack); return s_tester_clean_up(); } static int s_stream_tester_init(struct client_stream_tester *stream_tester, struct aws_http_message *request) { struct client_stream_tester_options options = { .request = request, .connection = s_tester.connection, }; return client_stream_tester_init(stream_tester, s_tester.alloc, &options); } /* Test that client will automatically send the PING ACK frame back, when the PING frame is received */ TEST_CASE(h2_client_auto_ping_ack) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* Connection preface requires that SETTINGS be sent first (RFC-7540 3.5). */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); uint8_t opaque_data[AWS_HTTP2_PING_DATA_SIZE] = {0, 1, 2, 3, 4, 5, 6, 7}; struct aws_h2_frame *frame = aws_h2_frame_new_ping(allocator, false /*ack*/, opaque_data); ASSERT_NOT_NULL(frame); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, frame)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); /* Now check that client sent PING ACK frame, it should be the latest frame received by peer * The last frame should be a ping type with ack on, and identical payload */ struct h2_decoded_frame *latest_frame = h2_decode_tester_latest_frame(&s_tester.peer.decode); ASSERT_UINT_EQUALS(AWS_H2_FRAME_T_PING, latest_frame->type); ASSERT_TRUE(latest_frame->ack); ASSERT_BIN_ARRAYS_EQUALS( opaque_data, AWS_HTTP2_PING_DATA_SIZE, latest_frame->ping_opaque_data, AWS_HTTP2_PING_DATA_SIZE); return s_tester_clean_up(); } TEST_CASE(h2_client_auto_ping_ack_higher_priority) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* get connection preface and acks out of the way */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); size_t frames_count = h2_decode_tester_frame_count(&s_tester.peer.decode); /* send request */ struct aws_http_message *request = aws_http2_message_new_request(allocator); ASSERT_NOT_NULL(request); struct aws_http_header request_headers_src[] = { DEFINE_HEADER(":method", "POST"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/"), }; aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); const char *body_src = "hello"; struct aws_byte_cursor body_cursor = aws_byte_cursor_from_c_str(body_src); struct aws_input_stream *request_body = aws_input_stream_new_from_cursor(allocator, &body_cursor); aws_http_message_set_body_stream(request, request_body); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, request)); /* Frames for the request are activated. Fake peer send PING frame now */ uint8_t opaque_data[AWS_HTTP2_PING_DATA_SIZE] = {0, 1, 2, 3, 4, 5, 6, 7}; struct aws_h2_frame *frame = aws_h2_frame_new_ping(allocator, false /*ack*/, opaque_data); ASSERT_NOT_NULL(frame); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, frame)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* validate PING ACK frame has higher priority than the normal request frames, and be received earliest */ ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); struct h2_decoded_frame *fastest_frame = h2_decode_tester_get_frame(&s_tester.peer.decode, frames_count); ASSERT_UINT_EQUALS(AWS_H2_FRAME_T_PING, fastest_frame->type); ASSERT_TRUE(fastest_frame->ack); ASSERT_BIN_ARRAYS_EQUALS( opaque_data, AWS_HTTP2_PING_DATA_SIZE, fastest_frame->ping_opaque_data, AWS_HTTP2_PING_DATA_SIZE); /* clean up */ aws_http_message_release(request); client_stream_tester_clean_up(&stream_tester); aws_input_stream_release(request_body); return s_tester_clean_up(); } /* Test client can automatically send SETTINGs ACK */ TEST_CASE(h2_client_auto_settings_ack) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* Connection preface requires that SETTINGS be sent first (RFC-7540 3.5). */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* Have the fake peer to run its decoder on what the client has written. */ ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); /* The Setting ACK frame should be sent back */ struct h2_decoded_frame *latest_frame = h2_decode_tester_latest_frame(&s_tester.peer.decode); ASSERT_UINT_EQUALS(AWS_H2_FRAME_T_SETTINGS, latest_frame->type); ASSERT_TRUE(latest_frame->ack); return s_tester_clean_up(); } static int s_compare_headers(const struct aws_http_headers *expected, const struct aws_http_headers *got) { ASSERT_UINT_EQUALS(aws_http_headers_count(expected), aws_http_headers_count(got)); for (size_t i = 0; i < aws_http_headers_count(expected); ++i) { struct aws_http_header expected_field; aws_http_headers_get_index(expected, i, &expected_field); struct aws_http_header got_field; aws_http_headers_get_index(got, i, &got_field); ASSERT_TRUE(aws_byte_cursor_eq(&expected_field.name, &got_field.name)); ASSERT_TRUE(aws_byte_cursor_eq(&expected_field.value, &got_field.value)); ASSERT_INT_EQUALS(expected_field.compression, got_field.compression); } return AWS_OP_SUCCESS; } /* Test that a simple request/response can be carried to completion. * The request consists of a single HEADERS frame and the response consists of a single HEADERS frame. */ TEST_CASE(h2_client_stream_complete) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* fake peer sends connection preface */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* send request */ struct aws_http_message *request = aws_http2_message_new_request(allocator); ASSERT_NOT_NULL(request); struct aws_http_header request_headers_src[] = { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/"), }; aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, request)); /* validate sent request, */ testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); struct h2_decoded_frame *sent_headers_frame = h2_decode_tester_latest_frame(&s_tester.peer.decode); ASSERT_INT_EQUALS(AWS_H2_FRAME_T_HEADERS, sent_headers_frame->type); ASSERT_TRUE(sent_headers_frame->end_stream); ASSERT_SUCCESS(s_compare_headers(aws_http_message_get_headers(request), sent_headers_frame->headers)); /* fake peer sends response */ struct aws_http_header response_headers_src[] = { DEFINE_HEADER(":status", "404"), DEFINE_HEADER("date", "Wed, 01 Apr 2020 23:02:49 GMT"), }; struct aws_http_headers *response_headers = aws_http_headers_new(allocator); aws_http_headers_add_array(response_headers, response_headers_src, AWS_ARRAY_SIZE(response_headers_src)); struct aws_h2_frame *response_frame = aws_h2_frame_new_headers( allocator, aws_http_stream_get_id(stream_tester.stream), response_headers, true /*end_stream*/, 0, NULL); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, response_frame)); /* validate that client received complete response */ testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_TRUE(stream_tester.complete); ASSERT_INT_EQUALS(AWS_ERROR_SUCCESS, stream_tester.on_complete_error_code); ASSERT_INT_EQUALS(404, stream_tester.response_status); ASSERT_SUCCESS(s_compare_headers(response_headers, stream_tester.response_headers)); ASSERT_TRUE(stream_tester.metrics.receive_end_timestamp_ns > 0); ASSERT_TRUE(stream_tester.metrics.receive_start_timestamp_ns > 0); ASSERT_TRUE(stream_tester.metrics.receive_end_timestamp_ns > stream_tester.metrics.receive_start_timestamp_ns); ASSERT_TRUE( stream_tester.metrics.receiving_duration_ns == stream_tester.metrics.receive_end_timestamp_ns - stream_tester.metrics.receive_start_timestamp_ns); ASSERT_TRUE(stream_tester.metrics.send_start_timestamp_ns > 0); ASSERT_TRUE(stream_tester.metrics.send_end_timestamp_ns > 0); ASSERT_TRUE(stream_tester.metrics.send_end_timestamp_ns > stream_tester.metrics.send_start_timestamp_ns); ASSERT_TRUE( stream_tester.metrics.sending_duration_ns == stream_tester.metrics.send_end_timestamp_ns - stream_tester.metrics.send_start_timestamp_ns); ASSERT_TRUE(stream_tester.metrics.stream_id == stream_tester.stream->id); ASSERT_TRUE(aws_http_connection_is_open(s_tester.connection)); /* clean up */ aws_http_headers_release(response_headers); aws_http_message_release(request); client_stream_tester_clean_up(&stream_tester); return s_tester_clean_up(); } /* Calling aws_http_connection_close() should cleanly shut down connection */ TEST_CASE(h2_client_close) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* fake peer sends connection preface */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* send request */ struct aws_http_message *request = aws_http2_message_new_request(allocator); ASSERT_NOT_NULL(request); struct aws_http_header request_headers_src[] = { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/"), }; aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, request)); /* close connection */ aws_http_connection_close(s_tester.connection); /* connection should immediately lose "open" status */ ASSERT_FALSE(aws_http_connection_is_open(s_tester.connection)); /* finish shutting down */ testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); /* validate that pending streams complete with error */ ASSERT_TRUE(stream_tester.complete); ASSERT_INT_EQUALS(AWS_ERROR_HTTP_CONNECTION_CLOSED, stream_tester.on_complete_error_code); /* validate that GOAWAY sent */ struct h2_decoded_frame *goaway = h2_decode_tester_find_frame(&s_tester.peer.decode, AWS_H2_FRAME_T_GOAWAY, 0, NULL); ASSERT_NOT_NULL(goaway); ASSERT_UINT_EQUALS(AWS_HTTP2_ERR_NO_ERROR, goaway->error_code); ASSERT_UINT_EQUALS(0, goaway->goaway_last_stream_id); /* clean up */ aws_http_message_release(request); client_stream_tester_clean_up(&stream_tester); return s_tester_clean_up(); } /* Test that client automatically sends the HTTP/2 Connection Preface (magic string, followed by initial SETTINGS frame, * which we disabled the push_promise) And it will not be applied until the SETTINGS ack is received. Once SETTINGS ack * received, the initial settings will be applied and callback will be invoked */ TEST_CASE(h2_client_connection_init_settings_applied_after_ack_by_peer) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* send request */ struct aws_http_message *request = aws_http2_message_new_request(allocator); ASSERT_NOT_NULL(request); struct aws_http_header request_headers_src[] = { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/"), }; aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, request)); /* validate sent request, */ testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); struct h2_decoded_frame *sent_headers_frame = h2_decode_tester_latest_frame(&s_tester.peer.decode); ASSERT_INT_EQUALS(AWS_H2_FRAME_T_HEADERS, sent_headers_frame->type); ASSERT_TRUE(sent_headers_frame->end_stream); ASSERT_SUCCESS(s_compare_headers(aws_http_message_get_headers(request), sent_headers_frame->headers)); /* fake peer sends connection preface */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* fake peer sends push_promise */ uint32_t stream_id = aws_http_stream_get_id(stream_tester.stream); /* fake peer sends push request (PUSH_PROMISE) */ struct aws_http_header push_request_headers_src[] = { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":authority", "veryblackpage.com"), DEFINE_HEADER(":path", "/style.css"), }; struct aws_http_headers *push_request_headers = aws_http_headers_new(allocator); ASSERT_SUCCESS(aws_http_headers_add_array( push_request_headers, push_request_headers_src, AWS_ARRAY_SIZE(push_request_headers_src))); uint32_t promised_stream_id = 2; struct aws_h2_frame *peer_frame = aws_h2_frame_new_push_promise(allocator, stream_id, promised_stream_id, push_request_headers, 0); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* validate the connection is still open */ ASSERT_TRUE(aws_http_connection_is_open(s_tester.connection)); /* set initial_settings_error_code as AWS_ERROR_UNKNOWN to make sure callback invoked later */ s_tester.user_data.initial_settings_error_code = AWS_ERROR_UNKNOWN; /* fake peer sends setting ack */ struct aws_h2_frame *settings_ack_frame = aws_h2_frame_new_settings(allocator, NULL, 0, true); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, settings_ack_frame)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* validate the callback invoked */ ASSERT_INT_EQUALS(AWS_ERROR_SUCCESS, s_tester.user_data.initial_settings_error_code); /* fake peer sends another push_promise again, after setting applied, connection will be closed */ peer_frame = aws_h2_frame_new_push_promise(allocator, stream_id, promised_stream_id + 2, push_request_headers, 0); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* validate the connection completed with error */ ASSERT_FALSE(aws_http_connection_is_open(s_tester.connection)); ASSERT_INT_EQUALS( AWS_ERROR_HTTP_PROTOCOL_ERROR, testing_channel_get_shutdown_error_code(&s_tester.testing_channel)); /* clean up */ aws_http_headers_release(push_request_headers); aws_http_message_release(request); client_stream_tester_clean_up(&stream_tester); return s_tester_clean_up(); } /* Test that h2 stream can take a h1 request massega and transfrom it to h2 style to send it. */ TEST_CASE(h2_client_stream_with_h1_request_message) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* fake peer sends connection preface */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* send an h1 request */ struct aws_http_message *request = aws_http_message_new_request(allocator); ASSERT_NOT_NULL(request); AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_http_message_set_request_method(request, aws_http_method_post)); AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_http_message_set_request_path(request, aws_byte_cursor_from_c_str("/"))); struct aws_http_header request_headers_src[] = { DEFINE_HEADER("Accept", "*/*"), DEFINE_HEADER("Host", "example.com"), DEFINE_HEADER("Content-Length", "5"), DEFINE_HEADER("Upgrade", "HTTP/2.0"), /* Connection-specific header should be skiped */ }; aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); /* body */ const char *body_src = "hello"; struct aws_byte_cursor body_cursor = aws_byte_cursor_from_c_str(body_src); struct aws_input_stream *request_body = aws_input_stream_new_from_cursor(allocator, &body_cursor); aws_http_message_set_body_stream(request, request_body); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, request)); /* validate sent request (client should have sent SETTINGS, SETTINGS ACK, HEADERS, DATA (END_STREAM) */ testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); ASSERT_UINT_EQUALS(4, h2_decode_tester_frame_count(&s_tester.peer.decode)); /* set expected h2 style headers */ struct aws_http_header expected_headers_src[] = { DEFINE_HEADER(":method", "POST"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":authority", "example.com"), DEFINE_HEADER(":path", "/"), DEFINE_HEADER("accept", "*/*"), DEFINE_HEADER("content-length", "5"), }; struct aws_http_headers *expected_headers = aws_http_headers_new(allocator); ASSERT_SUCCESS( aws_http_headers_add_array(expected_headers, expected_headers_src, AWS_ARRAY_SIZE(expected_headers_src))); struct h2_decoded_frame *sent_headers_frame = h2_decode_tester_get_frame(&s_tester.peer.decode, 2); ASSERT_INT_EQUALS(AWS_H2_FRAME_T_HEADERS, sent_headers_frame->type); ASSERT_SUCCESS(s_compare_headers(expected_headers, sent_headers_frame->headers)); struct h2_decoded_frame *sent_data_frame = h2_decode_tester_get_frame(&s_tester.peer.decode, 3); ASSERT_INT_EQUALS(AWS_H2_FRAME_T_DATA, sent_data_frame->type); ASSERT_TRUE(sent_data_frame->end_stream); ASSERT_TRUE(aws_byte_buf_eq_c_str(&sent_data_frame->data, body_src)); /* clean up */ aws_http_headers_release(expected_headers); aws_http_message_release(request); client_stream_tester_clean_up(&stream_tester); aws_input_stream_destroy(request_body); return s_tester_clean_up(); } /* Test that h2 stream can split the cookies header correctly */ TEST_CASE(h2_client_stream_with_cookies_headers) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* fake peer sends connection preface */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* send an h1 request */ struct aws_http_message *request = aws_http_message_new_request(allocator); ASSERT_NOT_NULL(request); AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_http_message_set_request_method(request, aws_http_method_get)); AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_http_message_set_request_path(request, aws_byte_cursor_from_c_str("/"))); struct aws_http_header request_headers_src[] = { DEFINE_HEADER("Accept", "*/*"), DEFINE_HEADER("Host", "example.com"), DEFINE_HEADER("cookie", "a=b; c=d; e=f"), }; aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, request)); /* set expected h2 style headers */ struct aws_http_header expected_headers_src[] = { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":authority", "example.com"), DEFINE_HEADER(":path", "/"), DEFINE_HEADER("accept", "*/*"), DEFINE_HEADER("cookie", "a=b; c=d; e=f"), }; struct aws_http_headers *expected_headers = aws_http_headers_new(allocator); ASSERT_SUCCESS( aws_http_headers_add_array(expected_headers, expected_headers_src, AWS_ARRAY_SIZE(expected_headers_src))); /* validate sent request, */ testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); struct h2_decoded_frame *sent_headers_frame = h2_decode_tester_latest_frame(&s_tester.peer.decode); ASSERT_INT_EQUALS(AWS_H2_FRAME_T_HEADERS, sent_headers_frame->type); ASSERT_TRUE(sent_headers_frame->end_stream); ASSERT_SUCCESS(s_compare_headers(expected_headers, sent_headers_frame->headers)); /* clean up */ aws_http_headers_release(expected_headers); aws_http_message_release(request); client_stream_tester_clean_up(&stream_tester); return s_tester_clean_up(); } /* Receiving malformed headers should result in a "Stream Error", not a "Connection Error". */ TEST_CASE(h2_client_stream_err_malformed_header) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* fake peer sends connection preface */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* send request */ struct aws_http_message *request = aws_http2_message_new_request(allocator); ASSERT_NOT_NULL(request); struct aws_http_header request_headers_src[] = { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/"), }; aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, request)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* fake peer sends response with malformed header */ struct aws_http_header response_headers_src[] = { DEFINE_HEADER(":STATUS", "404"), /* uppercase name forbidden in h2 */ }; struct aws_http_headers *response_headers = aws_http_headers_new(allocator); aws_http_headers_add_array(response_headers, response_headers_src, AWS_ARRAY_SIZE(response_headers_src)); struct aws_h2_frame *response_frame = aws_h2_frame_new_headers( allocator, aws_http_stream_get_id(stream_tester.stream), response_headers, true /*end_stream*/, 0, NULL); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, response_frame)); /* validate that stream completed with error */ testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_TRUE(stream_tester.complete); ASSERT_INT_EQUALS(AWS_ERROR_HTTP_PROTOCOL_ERROR, stream_tester.on_complete_error_code); /* a stream error should not affect the connection */ ASSERT_TRUE(aws_http_connection_is_open(s_tester.connection)); /* validate that stream sent RST_STREAM */ ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); struct h2_decoded_frame *rst_stream_frame = h2_decode_tester_latest_frame(&s_tester.peer.decode); ASSERT_INT_EQUALS(AWS_H2_FRAME_T_RST_STREAM, rst_stream_frame->type); ASSERT_UINT_EQUALS(AWS_HTTP2_ERR_PROTOCOL_ERROR, rst_stream_frame->error_code); /* clean up */ aws_http_headers_release(response_headers); aws_http_message_release(request); client_stream_tester_clean_up(&stream_tester); return s_tester_clean_up(); } TEST_CASE(h2_client_stream_err_state_forbids_frame) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* fake peer sends connection preface */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* send request */ struct aws_http_message *request = aws_http2_message_new_request(allocator); ASSERT_NOT_NULL(request); struct aws_http_header request_headers_src[] = { DEFINE_HEADER(":method", "PUT"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/"), }; aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); const char *body_src = "hello"; struct aws_byte_cursor body_cursor = aws_byte_cursor_from_c_str(body_src); struct aws_input_stream *request_body = aws_input_stream_new_tester(allocator, body_cursor); /* Prevent END_STREAM from being sent */ aws_input_stream_tester_set_max_bytes_per_read(request_body, 0); aws_http_message_set_body_stream(request, request_body); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, request)); /* Execute 1 event-loop tick. Request is sent, but no end_stream received */ testing_channel_run_currently_queued_tasks(&s_tester.testing_channel); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); uint32_t stream_id = aws_http_stream_get_id(stream_tester.stream); struct h2_decoded_frame *sent_headers_frame = h2_decode_tester_latest_frame(&s_tester.peer.decode); ASSERT_INT_EQUALS(AWS_H2_FRAME_T_HEADERS, sent_headers_frame->type); ASSERT_FALSE(sent_headers_frame->end_stream); ASSERT_SUCCESS(s_compare_headers(aws_http_message_get_headers(request), sent_headers_frame->headers)); /* fake peer sends response */ struct aws_http_header response_headers_src[] = { DEFINE_HEADER(":status", "404"), DEFINE_HEADER("date", "Wed, 01 Apr 2020 23:02:49 GMT"), }; struct aws_http_headers *response_headers = aws_http_headers_new(allocator); aws_http_headers_add_array(response_headers, response_headers_src, AWS_ARRAY_SIZE(response_headers_src)); /* fake peer sends response headers with end_stream set, which cause the stream to be * AWS_H2_STREAM_STATE_HALF_CLOSED_REMOTE */ struct aws_h2_frame *response_frame = aws_h2_frame_new_headers(allocator, stream_id, response_headers, true /*end_stream*/, 0, NULL); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, response_frame)); /* AWS_H2_STREAM_STATE_HALF_CLOSED_REMOTE will reject body frame */ ASSERT_SUCCESS(h2_fake_peer_send_data_frame_str(&s_tester.peer, stream_id, body_src, true /*end_stream*/)); /* validate that stream completed with error */ testing_channel_run_currently_queued_tasks(&s_tester.testing_channel); ASSERT_INT_EQUALS(AWS_ERROR_HTTP_PROTOCOL_ERROR, stream_tester.on_complete_error_code); /* a stream error should not affect the connection */ ASSERT_TRUE(aws_http_connection_is_open(s_tester.connection)); /* validate that stream sent RST_STREAM */ ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); struct h2_decoded_frame *rst_stream_frame = h2_decode_tester_find_stream_frame(&s_tester.peer.decode, AWS_H2_FRAME_T_RST_STREAM, stream_id, 0, NULL); ASSERT_INT_EQUALS(AWS_H2_FRAME_T_RST_STREAM, rst_stream_frame->type); ASSERT_UINT_EQUALS(AWS_HTTP2_ERR_STREAM_CLOSED, rst_stream_frame->error_code); /* clean up */ aws_http_headers_release(response_headers); aws_http_message_release(request); client_stream_tester_clean_up(&stream_tester); aws_input_stream_release(request_body); return s_tester_clean_up(); } TEST_CASE(h2_client_conn_err_stream_frames_received_for_idle_stream) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* fake peer sends connection preface */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* fake peer sends response to "idle" (aka doesn't exist yet) stream 99 */ struct aws_http_header response_headers_src[] = { DEFINE_HEADER(":status", "200"), }; struct aws_http_headers *response_headers = aws_http_headers_new(allocator); aws_http_headers_add_array(response_headers, response_headers_src, AWS_ARRAY_SIZE(response_headers_src)); struct aws_h2_frame *response_frame = aws_h2_frame_new_headers(allocator, 99 /*stream_id*/, response_headers, true /* end_stream */, 0, NULL); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, response_frame)); /* validate that connection has closed due to PROTOCOL_ERROR */ testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_FALSE(aws_http_connection_is_open(s_tester.connection)); ASSERT_INT_EQUALS( AWS_ERROR_HTTP_PROTOCOL_ERROR, testing_channel_get_shutdown_error_code(&s_tester.testing_channel)); /* validate that client sent GOAWAY */ ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); struct h2_decoded_frame *goaway = h2_decode_tester_find_frame(&s_tester.peer.decode, AWS_H2_FRAME_T_GOAWAY, 0, NULL); ASSERT_NOT_NULL(goaway); ASSERT_UINT_EQUALS(AWS_HTTP2_ERR_PROTOCOL_ERROR, goaway->error_code); ASSERT_UINT_EQUALS(0, goaway->goaway_last_stream_id); /* clean up */ aws_http_headers_release(response_headers); return s_tester_clean_up(); } /* Peer may have sent certain frames (WINDOW_UPDATE and RST_STREAM) before realizing * that we have closed the stream. These frames should be ignored. */ TEST_CASE(h2_client_stream_ignores_some_frames_received_soon_after_closing) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* fake peer sends connection preface */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* send request */ struct aws_http_message *request = aws_http2_message_new_request(allocator); ASSERT_NOT_NULL(request); struct aws_http_header request_headers_src[] = { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/"), }; aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, request)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); uint32_t stream_id = aws_http_stream_get_id(stream_tester.stream); /* fake peer sends complete response */ struct aws_http_header response_headers_src[] = { DEFINE_HEADER(":status", "404"), DEFINE_HEADER("date", "Wed, 01 Apr 2020 23:02:49 GMT"), }; struct aws_http_headers *response_headers = aws_http_headers_new(allocator); aws_http_headers_add_array(response_headers, response_headers_src, AWS_ARRAY_SIZE(response_headers_src)); struct aws_h2_frame *peer_frame = aws_h2_frame_new_headers(allocator, stream_id, response_headers, true /*end_stream*/, 0, NULL); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); /* fake peer sends WINDOW_UPDATE */ peer_frame = aws_h2_frame_new_window_update(allocator, stream_id, 99); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); /* fake peer sends RST_STREAM */ peer_frame = aws_h2_frame_new_rst_stream(allocator, stream_id, AWS_HTTP2_ERR_ENHANCE_YOUR_CALM); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); /* validate that stream completed successfully. * the WINDOW_UPDATE and RST_STREAM should be ignored because * they arrived soon after the client had sent END_STREAM */ testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_TRUE(stream_tester.complete); ASSERT_INT_EQUALS(AWS_ERROR_SUCCESS, stream_tester.on_complete_error_code); ASSERT_TRUE(aws_http_connection_is_open(s_tester.connection)); /* clean up */ aws_http_headers_release(response_headers); aws_http_message_release(request); client_stream_tester_clean_up(&stream_tester); return s_tester_clean_up(); } TEST_CASE(h2_client_stream_receive_info_headers) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* fake peer sends connection preface */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* send request */ struct aws_http_message *request = aws_http2_message_new_request(allocator); ASSERT_NOT_NULL(request); struct aws_http_header request_headers_src[] = { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/"), }; aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, request)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); uint32_t stream_id = aws_http_stream_get_id(stream_tester.stream); /* fake peer sends a info-header-block response */ struct aws_http_header info_response_headers_src[] = { DEFINE_HEADER(":status", "100"), DEFINE_HEADER("date", "Wed, 01 Apr 2020 23:03:49 GMT"), }; struct aws_http_headers *info_response_headers = aws_http_headers_new(allocator); aws_http_headers_add_array( info_response_headers, info_response_headers_src, AWS_ARRAY_SIZE(info_response_headers_src)); struct aws_h2_frame *peer_frame = aws_h2_frame_new_headers(allocator, stream_id, info_response_headers, false /*end_stream*/, 0, NULL); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); /* check info response */ ASSERT_INT_EQUALS(1, stream_tester.num_info_responses); ASSERT_SUCCESS(aws_http_message_set_response_status(stream_tester.info_responses[0], 100)); struct aws_http_headers *rev_info_headers = aws_http_message_get_headers(stream_tester.info_responses[0]); ASSERT_SUCCESS(s_compare_headers(info_response_headers, rev_info_headers)); /* fake peer sends a main-header-block response */ struct aws_http_header response_headers_src[] = { DEFINE_HEADER(":status", "404"), DEFINE_HEADER("date", "Wed, 01 Apr 2020 23:02:49 GMT"), }; struct aws_http_headers *response_headers = aws_http_headers_new(allocator); aws_http_headers_add_array(response_headers, response_headers_src, AWS_ARRAY_SIZE(response_headers_src)); peer_frame = aws_h2_frame_new_headers(allocator, stream_id, response_headers, true /*end_stream*/, 0, NULL); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); /* validate that client received complete response */ testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_TRUE(stream_tester.complete); ASSERT_INT_EQUALS(AWS_ERROR_SUCCESS, stream_tester.on_complete_error_code); ASSERT_INT_EQUALS(404, stream_tester.response_status); ASSERT_SUCCESS(s_compare_headers(response_headers, stream_tester.response_headers)); /* clean up */ aws_http_headers_release(response_headers); aws_http_headers_release(info_response_headers); aws_http_message_release(request); client_stream_tester_clean_up(&stream_tester); return s_tester_clean_up(); } TEST_CASE(h2_client_stream_err_receive_info_headers_after_main) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* fake peer sends connection preface */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* send request */ struct aws_http_message *request = aws_http2_message_new_request(allocator); ASSERT_NOT_NULL(request); struct aws_http_header request_headers_src[] = { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/"), }; aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, request)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); uint32_t stream_id = aws_http_stream_get_id(stream_tester.stream); /* fake peer sends a main-header-block response */ struct aws_http_header response_headers_src[] = { DEFINE_HEADER(":status", "404"), DEFINE_HEADER("date", "Wed, 01 Apr 2020 23:02:49 GMT"), }; struct aws_http_headers *response_headers = aws_http_headers_new(allocator); aws_http_headers_add_array(response_headers, response_headers_src, AWS_ARRAY_SIZE(response_headers_src)); struct aws_h2_frame *peer_frame = aws_h2_frame_new_headers(allocator, stream_id, response_headers, false /*end_stream*/, 0, NULL); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); /* fake peer sends a info-header-block response */ struct aws_http_header info_response_headers_src[] = { DEFINE_HEADER(":status", "100"), DEFINE_HEADER("date", "Wed, 01 Apr 2020 23:03:49 GMT"), }; struct aws_http_headers *info_response_headers = aws_http_headers_new(allocator); aws_http_headers_add_array( info_response_headers, info_response_headers_src, AWS_ARRAY_SIZE(info_response_headers_src)); peer_frame = aws_h2_frame_new_headers(allocator, stream_id, info_response_headers, false /*end_stream*/, 0, NULL); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* validate the stream completed with error */ ASSERT_TRUE(stream_tester.complete); ASSERT_INT_EQUALS(AWS_ERROR_HTTP_PROTOCOL_ERROR, stream_tester.on_complete_error_code); /* validate the connection is not affected */ ASSERT_TRUE(aws_http_connection_is_open(s_tester.connection)); /* validate that stream sent RST_STREAM */ ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); struct h2_decoded_frame *rst_stream_frame = h2_decode_tester_find_stream_frame(&s_tester.peer.decode, AWS_H2_FRAME_T_RST_STREAM, stream_id, 0, NULL); ASSERT_INT_EQUALS(AWS_H2_FRAME_T_RST_STREAM, rst_stream_frame->type); ASSERT_UINT_EQUALS(AWS_HTTP2_ERR_PROTOCOL_ERROR, rst_stream_frame->error_code); /* clean up */ aws_http_headers_release(response_headers); aws_http_headers_release(info_response_headers); aws_http_message_release(request); client_stream_tester_clean_up(&stream_tester); return s_tester_clean_up(); } TEST_CASE(h2_client_stream_receive_trailing_headers) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* fake peer sends connection preface */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* send request */ struct aws_http_message *request = aws_http2_message_new_request(allocator); ASSERT_NOT_NULL(request); struct aws_http_header request_headers_src[] = { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/"), }; aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, request)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); uint32_t stream_id = aws_http_stream_get_id(stream_tester.stream); /* fake peer sends a main-header-block response */ struct aws_http_header response_headers_src[] = { DEFINE_HEADER(":status", "404"), DEFINE_HEADER("date", "Wed, 01 Apr 2020 23:02:49 GMT"), }; struct aws_http_headers *response_headers = aws_http_headers_new(allocator); aws_http_headers_add_array(response_headers, response_headers_src, AWS_ARRAY_SIZE(response_headers_src)); struct aws_h2_frame *peer_frame = aws_h2_frame_new_headers(allocator, stream_id, response_headers, false /*end_stream*/, 0, NULL); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); /* fake peer sends a trailing-header-block response */ struct aws_http_header response_trailer_src[] = { DEFINE_HEADER("user-agent", "test"), }; struct aws_http_headers *response_trailer = aws_http_headers_new(allocator); aws_http_headers_add_array(response_trailer, response_trailer_src, AWS_ARRAY_SIZE(response_trailer_src)); peer_frame = aws_h2_frame_new_headers(allocator, stream_id, response_trailer, true /*end_stream*/, 0, NULL); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); /* validate that client received complete response */ testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_TRUE(stream_tester.complete); ASSERT_INT_EQUALS(AWS_ERROR_SUCCESS, stream_tester.on_complete_error_code); ASSERT_INT_EQUALS(404, stream_tester.response_status); ASSERT_SUCCESS(s_compare_headers(response_headers, stream_tester.response_headers)); ASSERT_SUCCESS(s_compare_headers(response_trailer, stream_tester.response_trailer)); /* clean up */ aws_http_headers_release(response_headers); aws_http_headers_release(response_trailer); aws_http_message_release(request); client_stream_tester_clean_up(&stream_tester); return s_tester_clean_up(); } TEST_CASE(h2_client_stream_err_receive_trailing_before_main) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* fake peer sends connection preface */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* send request */ struct aws_http_message *request = aws_http2_message_new_request(allocator); ASSERT_NOT_NULL(request); struct aws_http_header request_headers_src[] = { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/"), }; aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, request)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); uint32_t stream_id = aws_http_stream_get_id(stream_tester.stream); /* fake peer sends a trailing-header-block response */ struct aws_http_header response_trailer_src[] = { DEFINE_HEADER("user-agent", "test"), }; struct aws_http_headers *response_trailer = aws_http_headers_new(allocator); aws_http_headers_add_array(response_trailer, response_trailer_src, AWS_ARRAY_SIZE(response_trailer_src)); struct aws_h2_frame *peer_frame = aws_h2_frame_new_headers(allocator, stream_id, response_trailer, true /*end_stream*/, 0, NULL); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* validate the stream completed with error */ ASSERT_TRUE(stream_tester.complete); ASSERT_INT_EQUALS(AWS_ERROR_HTTP_PROTOCOL_ERROR, stream_tester.on_complete_error_code); /* validate the connection is not affected */ ASSERT_TRUE(aws_http_connection_is_open(s_tester.connection)); /* validate that stream sent RST_STREAM */ ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); struct h2_decoded_frame *rst_stream_frame = h2_decode_tester_find_stream_frame(&s_tester.peer.decode, AWS_H2_FRAME_T_RST_STREAM, stream_id, 0, NULL); ASSERT_INT_EQUALS(AWS_H2_FRAME_T_RST_STREAM, rst_stream_frame->type); ASSERT_UINT_EQUALS(AWS_HTTP2_ERR_PROTOCOL_ERROR, rst_stream_frame->error_code); /* clean up */ aws_http_headers_release(response_trailer); aws_http_message_release(request); client_stream_tester_clean_up(&stream_tester); return s_tester_clean_up(); } /* Peer should not send any frames other than WINDOW_UPDATE and RST_STREAM once they send END_STREAM flag, we will treat * that as connection error (STREAM_CLOSED) */ TEST_CASE(h2_client_conn_err_stream_frames_received_soon_after_closing) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* fake peer sends connection preface */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* send request */ struct aws_http_message *request = aws_http2_message_new_request(allocator); ASSERT_NOT_NULL(request); struct aws_http_header request_headers_src[] = { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/"), }; aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, request)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); uint32_t stream_id = aws_http_stream_get_id(stream_tester.stream); /* fake peer sends complete response */ struct aws_http_header response_headers_src[] = { DEFINE_HEADER(":status", "404"), DEFINE_HEADER("date", "Wed, 01 Apr 2020 23:02:49 GMT"), }; struct aws_http_headers *response_headers = aws_http_headers_new(allocator); aws_http_headers_add_array(response_headers, response_headers_src, AWS_ARRAY_SIZE(response_headers_src)); struct aws_h2_frame *peer_frame = aws_h2_frame_new_headers(allocator, stream_id, response_headers, true /*end_stream*/, 0, NULL); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); /* fake peer try to send data frame */ ASSERT_SUCCESS(h2_fake_peer_send_data_frame_str(&s_tester.peer, stream_id, "hello", true /*end_stream*/)); /* validate that connection has closed. */ testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_FALSE(aws_http_connection_is_open(s_tester.connection)); ASSERT_INT_EQUALS( AWS_ERROR_HTTP_PROTOCOL_ERROR, testing_channel_get_shutdown_error_code(&s_tester.testing_channel)); /* validate that client sent GOAWAY */ ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); struct h2_decoded_frame *goaway = h2_decode_tester_find_frame(&s_tester.peer.decode, AWS_H2_FRAME_T_GOAWAY, 0, NULL); ASSERT_NOT_NULL(goaway); ASSERT_UINT_EQUALS(AWS_HTTP2_ERR_STREAM_CLOSED, goaway->error_code); ASSERT_UINT_EQUALS(0, goaway->goaway_last_stream_id); /* clean up */ aws_http_headers_release(response_headers); aws_http_message_release(request); client_stream_tester_clean_up(&stream_tester); return s_tester_clean_up(); } TEST_CASE(h2_client_stream_err_stream_frames_received_soon_after_rst_stream_received) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* fake peer sends connection preface */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* send request */ struct aws_http_message *request = aws_http2_message_new_request(allocator); ASSERT_NOT_NULL(request); struct aws_http_header request_headers_src[] = { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/"), }; aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, request)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); uint32_t stream_id = aws_http_stream_get_id(stream_tester.stream); /* fake peer sends RST_STREAM */ struct aws_h2_frame *peer_frame = aws_h2_frame_new_rst_stream(allocator, stream_id, AWS_HTTP2_ERR_ENHANCE_YOUR_CALM); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); struct aws_http_headers *response_headers; /* fake peer try sending complete response */ struct aws_http_header response_headers_src[] = { DEFINE_HEADER(":status", "404"), DEFINE_HEADER("date", "Wed, 01 Apr 2020 23:02:49 GMT"), }; response_headers = aws_http_headers_new(allocator); aws_http_headers_add_array(response_headers, response_headers_src, AWS_ARRAY_SIZE(response_headers_src)); peer_frame = aws_h2_frame_new_headers(allocator, stream_id, response_headers, true /*end_stream*/, 0, NULL); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* validate the stream completed with error */ ASSERT_TRUE(stream_tester.complete); ASSERT_INT_EQUALS(AWS_ERROR_HTTP_RST_STREAM_RECEIVED, stream_tester.on_complete_error_code); /* We treat this as a stream error. So, validate the connection is still open and a rst stream is sent by * client. */ ASSERT_TRUE(aws_http_connection_is_open(s_tester.connection)); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); struct h2_decoded_frame *rst_stream_frame = h2_decode_tester_find_stream_frame(&s_tester.peer.decode, AWS_H2_FRAME_T_RST_STREAM, stream_id, 0, NULL); ASSERT_UINT_EQUALS(AWS_HTTP2_ERR_STREAM_CLOSED, rst_stream_frame->error_code); /* clean up */ aws_http_headers_release(response_headers); aws_http_message_release(request); client_stream_tester_clean_up(&stream_tester); return s_tester_clean_up(); } /* Connection error for frames received on a closed stream we have removed from cache, which may because it closed too * long ago */ TEST_CASE(h2_client_conn_err_stream_frames_received_after_removed_from_cache) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* fake peer sends connection preface */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); enum { NUM_STREAMS = AWS_HTTP2_DEFAULT_MAX_CLOSED_STREAMS + 2 }; /* send request */ struct aws_http_message *requests[NUM_STREAMS]; struct aws_http_header request_headers_src[] = { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/"), }; struct client_stream_tester stream_tester[NUM_STREAMS]; /* fill out the cache */ for (size_t i = 0; i < NUM_STREAMS; i++) { requests[i] = aws_http2_message_new_request(allocator); aws_http_message_add_header_array(requests[i], request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); ASSERT_SUCCESS(s_stream_tester_init(&stream_tester[i], requests[i])); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* close the streams immediately */ struct aws_h2_frame *peer_frame = aws_h2_frame_new_rst_stream( allocator, aws_http_stream_get_id(stream_tester[i].stream), AWS_HTTP2_ERR_ENHANCE_YOUR_CALM); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); } uint32_t stream_id = aws_http_stream_get_id(stream_tester[0].stream); struct aws_http_headers *response_headers; /* fake peer try sending complete response */ struct aws_http_header response_headers_src[] = { DEFINE_HEADER(":status", "404"), DEFINE_HEADER("date", "Wed, 01 Apr 2020 23:02:49 GMT"), }; response_headers = aws_http_headers_new(allocator); aws_http_headers_add_array(response_headers, response_headers_src, AWS_ARRAY_SIZE(response_headers_src)); struct aws_h2_frame *peer_frame = aws_h2_frame_new_headers(allocator, stream_id, response_headers, true /*end_stream*/, 0, NULL); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); /* validate the connection completed with error */ testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_FALSE(aws_http_connection_is_open(s_tester.connection)); ASSERT_INT_EQUALS( AWS_ERROR_HTTP_PROTOCOL_ERROR, testing_channel_get_shutdown_error_code(&s_tester.testing_channel)); /* client should send GOAWAY */ ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); struct h2_decoded_frame *goaway = h2_decode_tester_find_frame(&s_tester.peer.decode, AWS_H2_FRAME_T_GOAWAY, 0, NULL); ASSERT_NOT_NULL(goaway); ASSERT_UINT_EQUALS(AWS_HTTP2_ERR_PROTOCOL_ERROR, goaway->error_code); /* clean up */ aws_http_headers_release(response_headers); for (size_t i = 0; i < NUM_STREAMS; i++) { aws_http_message_release(requests[i]); client_stream_tester_clean_up(&stream_tester[i]); } return s_tester_clean_up(); } /* Test receiving a response with DATA frames */ TEST_CASE(h2_client_stream_receive_data) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* fake peer sends connection preface */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* send request */ struct aws_http_message *request = aws_http2_message_new_request(allocator); ASSERT_NOT_NULL(request); struct aws_http_header request_headers_src[] = { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/"), }; aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, request)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); uint32_t stream_id = aws_http_stream_get_id(stream_tester.stream); /* fake peer sends response headers */ struct aws_http_header response_headers_src[] = { DEFINE_HEADER(":status", "200"), }; struct aws_http_headers *response_headers = aws_http_headers_new(allocator); aws_http_headers_add_array(response_headers, response_headers_src, AWS_ARRAY_SIZE(response_headers_src)); struct aws_h2_frame *response_frame = aws_h2_frame_new_headers(allocator, stream_id, response_headers, false /*end_stream*/, 0, NULL); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, response_frame)); /* fake peer sends response body */ const char *body_src = "hello"; ASSERT_SUCCESS(h2_fake_peer_send_data_frame_str(&s_tester.peer, stream_id, body_src, true /*end_stream*/)); /* validate that client received complete response */ testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_TRUE(stream_tester.complete); ASSERT_INT_EQUALS(AWS_ERROR_SUCCESS, stream_tester.on_complete_error_code); ASSERT_INT_EQUALS(200, stream_tester.response_status); ASSERT_SUCCESS(s_compare_headers(response_headers, stream_tester.response_headers)); ASSERT_TRUE(aws_byte_buf_eq_c_str(&stream_tester.response_body, body_src)); ASSERT_TRUE(aws_http_connection_is_open(s_tester.connection)); /* clean up */ aws_http_headers_release(response_headers); aws_http_message_release(request); client_stream_tester_clean_up(&stream_tester); return s_tester_clean_up(); } /* A message is malformed if DATA is received before HEADERS */ TEST_CASE(h2_client_stream_err_receive_data_before_headers) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* fake peer sends connection preface */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* send request */ struct aws_http_message *request = aws_http2_message_new_request(allocator); ASSERT_NOT_NULL(request); struct aws_http_header request_headers_src[] = { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/"), }; aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, request)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); uint32_t stream_id = aws_http_stream_get_id(stream_tester.stream); /* fake peer sends response body BEFORE any response headers */ const char *body_src = "hello"; ASSERT_SUCCESS(h2_fake_peer_send_data_frame_str(&s_tester.peer, stream_id, body_src, true /*end_stream*/)); /* validate that stream completed with error */ testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_TRUE(stream_tester.complete); ASSERT_INT_EQUALS(AWS_ERROR_HTTP_PROTOCOL_ERROR, stream_tester.on_complete_error_code); /* a stream error should not affect the connection */ ASSERT_TRUE(aws_http_connection_is_open(s_tester.connection)); /* validate that stream sent RST_STREAM */ ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); struct h2_decoded_frame *rst_stream_frame = h2_decode_tester_find_stream_frame(&s_tester.peer.decode, AWS_H2_FRAME_T_RST_STREAM, stream_id, 0, NULL); ASSERT_INT_EQUALS(AWS_H2_FRAME_T_RST_STREAM, rst_stream_frame->type); ASSERT_UINT_EQUALS(AWS_HTTP2_ERR_PROTOCOL_ERROR, rst_stream_frame->error_code); /* clean up */ aws_http_message_release(request); client_stream_tester_clean_up(&stream_tester); return s_tester_clean_up(); } /* A message is malformed if DATA is received not match the content_length received */ TEST_CASE(h2_client_stream_err_receive_data_not_match_content_length) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* fake peer sends connection preface */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* send request */ struct aws_http_message *request = aws_http2_message_new_request(allocator); ASSERT_NOT_NULL(request); struct aws_http_header request_headers_src[] = { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/"), }; aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, request)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); uint32_t stream_id = aws_http_stream_get_id(stream_tester.stream); /* fake peer sends response headers */ struct aws_http_header response_headers_src[] = { DEFINE_HEADER(":status", "200"), DEFINE_HEADER("content-length", "200"), }; struct aws_http_headers *response_headers = aws_http_headers_new(allocator); aws_http_headers_add_array(response_headers, response_headers_src, AWS_ARRAY_SIZE(response_headers_src)); struct aws_h2_frame *response_frame = aws_h2_frame_new_headers(allocator, stream_id, response_headers, false /*end_stream*/, 0, NULL); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, response_frame)); /* fake peer sends response body */ const char *body_src = "hello"; ASSERT_SUCCESS(h2_fake_peer_send_data_frame_str(&s_tester.peer, stream_id, body_src, true /*end_stream*/)); /* validate that stream completed with error */ testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_TRUE(stream_tester.complete); ASSERT_INT_EQUALS(AWS_ERROR_HTTP_PROTOCOL_ERROR, stream_tester.on_complete_error_code); /* a stream error should not affect the connection */ ASSERT_TRUE(aws_http_connection_is_open(s_tester.connection)); /* validate that stream sent RST_STREAM */ ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); struct h2_decoded_frame *rst_stream_frame = h2_decode_tester_find_stream_frame(&s_tester.peer.decode, AWS_H2_FRAME_T_RST_STREAM, stream_id, 0, NULL); ASSERT_INT_EQUALS(AWS_H2_FRAME_T_RST_STREAM, rst_stream_frame->type); ASSERT_UINT_EQUALS(AWS_HTTP2_ERR_PROTOCOL_ERROR, rst_stream_frame->error_code); /* clean up */ aws_http_headers_release(response_headers); aws_http_message_release(request); client_stream_tester_clean_up(&stream_tester); return s_tester_clean_up(); } /* Test sending a request with DATA frames */ TEST_CASE(h2_client_stream_send_data) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* send request */ struct aws_http_message *request = aws_http2_message_new_request(allocator); ASSERT_NOT_NULL(request); struct aws_http_header request_headers_src[] = { DEFINE_HEADER(":method", "POST"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/"), DEFINE_HEADER("content-length", "5"), }; aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); const char *body_src = "hello"; struct aws_byte_cursor body_cursor = aws_byte_cursor_from_c_str(body_src); struct aws_input_stream *request_body = aws_input_stream_new_from_cursor(allocator, &body_cursor); aws_http_message_set_body_stream(request, request_body); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, request)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); uint32_t stream_id = aws_http_stream_get_id(stream_tester.stream); /* validate sent request (client should have sent SETTINGS, HEADERS, DATA (END_STREAM) */ testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); ASSERT_UINT_EQUALS(3, h2_decode_tester_frame_count(&s_tester.peer.decode)); struct h2_decoded_frame *sent_headers_frame = h2_decode_tester_get_frame(&s_tester.peer.decode, 1); ASSERT_INT_EQUALS(AWS_H2_FRAME_T_HEADERS, sent_headers_frame->type); ASSERT_SUCCESS(s_compare_headers(aws_http_message_get_headers(request), sent_headers_frame->headers)); ASSERT_FALSE(sent_headers_frame->end_stream); struct h2_decoded_frame *sent_data_frame = h2_decode_tester_get_frame(&s_tester.peer.decode, 2); ASSERT_INT_EQUALS(AWS_H2_FRAME_T_DATA, sent_data_frame->type); ASSERT_TRUE(sent_data_frame->end_stream); ASSERT_TRUE(aws_byte_buf_eq_c_str(&sent_data_frame->data, body_src)); /* fake peer sends connection preface */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); /* fake peer sends response headers */ struct aws_http_header response_headers_src[] = { DEFINE_HEADER(":status", "200"), }; struct aws_http_headers *response_headers = aws_http_headers_new(allocator); aws_http_headers_add_array(response_headers, response_headers_src, AWS_ARRAY_SIZE(response_headers_src)); struct aws_h2_frame *response_frame = aws_h2_frame_new_headers(allocator, stream_id, response_headers, true /*end_stream*/, 0, NULL); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, response_frame)); /* validate that request completed successfully */ testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_TRUE(stream_tester.complete); ASSERT_INT_EQUALS(AWS_ERROR_SUCCESS, stream_tester.on_complete_error_code); ASSERT_INT_EQUALS(200, stream_tester.response_status); ASSERT_TRUE(aws_http_connection_is_open(s_tester.connection)); /* clean up */ aws_http_headers_release(response_headers); aws_http_message_release(request); client_stream_tester_clean_up(&stream_tester); aws_input_stream_release(request_body); return s_tester_clean_up(); } /* Test sending multiple requests, each with large bodies that must be sent across multiple DATA frames. * The connection should not let one stream hog the connection, the streams should take turns sending DATA. * Also, the stream should not send more than one aws_io_message full of frames per event-loop-tick */ TEST_CASE(h2_client_stream_send_lots_of_data) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* bodies must be big enough to span multiple H2-frames and multiple aws_io_messages */ size_t body_size = aws_max_size(aws_h2_settings_initial[AWS_HTTP2_SETTINGS_MAX_FRAME_SIZE], g_aws_channel_max_fragment_size) * 5; /* get connection preface and acks out of the way */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); /* send multiple requests */ enum { NUM_STREAMS = 3 }; struct aws_http_message *requests[NUM_STREAMS]; struct aws_http_header request_headers_src[NUM_STREAMS][3] = { { DEFINE_HEADER(":method", "POST"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/a.txt"), }, { DEFINE_HEADER(":method", "POST"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/b.txt"), }, { DEFINE_HEADER(":method", "POST"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/c.txt"), }, }; struct aws_byte_buf request_body_bufs[NUM_STREAMS]; struct aws_input_stream *request_bodies[NUM_STREAMS]; struct client_stream_tester stream_testers[NUM_STREAMS]; for (size_t i = 0; i < NUM_STREAMS; ++i) { requests[i] = aws_http2_message_new_request(allocator); aws_http_message_add_header_array(requests[i], request_headers_src[i], AWS_ARRAY_SIZE(request_headers_src[i])); /* fill first body with "aaaa...", second with "bbbb...", etc */ ASSERT_SUCCESS(aws_byte_buf_init(&request_body_bufs[i], allocator, body_size)); ASSERT_TRUE(aws_byte_buf_write_u8_n(&request_body_bufs[i], (uint8_t)('a' + i), body_size)); struct aws_byte_cursor body_cursor = aws_byte_cursor_from_buf(&request_body_bufs[i]); request_bodies[i] = aws_input_stream_new_from_cursor(allocator, &body_cursor); ASSERT_NOT_NULL(request_bodies[i]); aws_http_message_set_body_stream(requests[i], request_bodies[i]); ASSERT_SUCCESS(s_stream_tester_init(&stream_testers[i], requests[i])); } /* now loop until all requests are done sending. * 1 aws_io_message should be written with each tick of the event-loop. * determine when (based on event-loop tick count) each request sent its END_STREAM. */ struct aws_linked_list *written_msg_queue = testing_channel_get_written_message_queue(&s_tester.testing_channel); size_t tick_i = 0; size_t end_stream_count = 0; size_t end_stream_tick[NUM_STREAMS]; while (end_stream_count < NUM_STREAMS) { /* check that connection sends exactly 1 aws_io_message per event-loop tick */ testing_channel_run_currently_queued_tasks(&s_tester.testing_channel); size_t written_msg_queue_len = 0; for (struct aws_linked_list_node *node = aws_linked_list_begin(written_msg_queue); node != aws_linked_list_end(written_msg_queue); node = aws_linked_list_next(node)) { written_msg_queue_len++; } ASSERT_UINT_EQUALS(1, written_msg_queue_len); /* decode all new frames and examine them to see if any request has finished */ const size_t prev_frame_count = h2_decode_tester_frame_count(&s_tester.peer.decode); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); const size_t frame_count = h2_decode_tester_frame_count(&s_tester.peer.decode); for (size_t i = prev_frame_count; i < frame_count; ++i) { struct h2_decoded_frame *frame = h2_decode_tester_get_frame(&s_tester.peer.decode, i); if (frame->type == AWS_H2_FRAME_T_DATA) { /* Send a Window update frame back */ struct aws_h2_frame *connection_window_update = aws_h2_frame_new_window_update(allocator, 0, (uint32_t)frame->data.len); ASSERT_NOT_NULL(connection_window_update); h2_fake_peer_send_frame(&s_tester.peer, connection_window_update); struct aws_h2_frame *stream_window_update = aws_h2_frame_new_window_update(allocator, frame->stream_id, (uint32_t)frame->data.len); ASSERT_NOT_NULL(stream_window_update); h2_fake_peer_send_frame(&s_tester.peer, stream_window_update); } if (frame->type == AWS_H2_FRAME_T_DATA && frame->end_stream) { end_stream_tick[end_stream_count++] = tick_i; } } tick_i++; } for (size_t i = 1; i < NUM_STREAMS; ++i) { /* as a simple fairness test, check that each of the requests finished within 1 event-loop tick of the last. */ size_t streams_finished_n_ticks_apart = end_stream_tick[i] - end_stream_tick[i - 1]; ASSERT_TRUE(streams_finished_n_ticks_apart <= 1); /* validate that all data sent successfully */ ASSERT_SUCCESS(h2_decode_tester_check_data_across_frames( &s_tester.peer.decode, aws_http_stream_get_id(stream_testers[i].stream), aws_byte_cursor_from_buf(&request_body_bufs[i]), true /*expect_end_frame*/)); } /* finally, send responses and ensure all streams complete successfully */ struct aws_http_header response_headers_src[] = {DEFINE_HEADER(":status", "200")}; struct aws_http_headers *response_headers = aws_http_headers_new(allocator); aws_http_headers_add_array(response_headers, response_headers_src, AWS_ARRAY_SIZE(response_headers_src)); for (size_t i = 0; i < NUM_STREAMS; ++i) { struct aws_h2_frame *response_frame = aws_h2_frame_new_headers( allocator, aws_http_stream_get_id(stream_testers[i].stream), response_headers, true /* end_stream */, 0, NULL); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, response_frame)); } testing_channel_drain_queued_tasks(&s_tester.testing_channel); for (size_t i = 0; i < NUM_STREAMS; ++i) { ASSERT_TRUE(stream_testers[i].complete); ASSERT_INT_EQUALS(200, stream_testers[i].response_status); } ASSERT_TRUE(aws_http_connection_is_open(s_tester.connection)); /* clean up */ aws_http_headers_release(response_headers); for (size_t i = 0; i < NUM_STREAMS; ++i) { client_stream_tester_clean_up(&stream_testers[i]); aws_http_message_release(requests[i]); aws_input_stream_release(request_bodies[i]); aws_byte_buf_clean_up(&request_body_bufs[i]); } return s_tester_clean_up(); } /* Test sending a request whose aws_input_stream is not providing body data all at once */ TEST_CASE(h2_client_stream_send_stalled_data) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* get connection preface and acks out of the way */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); /* get request ready * the body_stream will stall and provide no data when we try to read from it */ struct aws_http_message *request = aws_http2_message_new_request(allocator); ASSERT_NOT_NULL(request); struct aws_http_header request_headers_src[] = { DEFINE_HEADER(":method", "POST"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/"), }; aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); const char *body_src = "hello"; struct aws_byte_cursor body_cursor = aws_byte_cursor_from_c_str(body_src); struct aws_input_stream *request_body = aws_input_stream_new_tester(allocator, body_cursor); aws_input_stream_tester_set_max_bytes_per_read(request_body, 0); aws_http_message_set_body_stream(request, request_body); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, request)); /* Execute 1 event-loop tick. Validate that no DATA frames were written */ testing_channel_run_currently_queued_tasks(&s_tester.testing_channel); uint32_t stream_id = aws_http_stream_get_id(stream_tester.stream); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); ASSERT_NULL(h2_decode_tester_find_frame(&s_tester.peer.decode, AWS_H2_FRAME_T_DATA, 0 /*search_start_idx*/, NULL)); /* Execute a few more event-loop ticks. No more frames should be written */ testing_channel_run_currently_queued_tasks(&s_tester.testing_channel); testing_channel_run_currently_queued_tasks(&s_tester.testing_channel); testing_channel_run_currently_queued_tasks(&s_tester.testing_channel); ASSERT_TRUE(aws_linked_list_empty(testing_channel_get_written_message_queue(&s_tester.testing_channel))); /* Let aws_input_stream produce just 1 byte. This should result in 1 DATA frame with 1 byte of payload */ aws_input_stream_tester_set_max_bytes_per_read(request_body, 1); testing_channel_run_currently_queued_tasks(&s_tester.testing_channel); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); size_t data_frame_idx; struct h2_decoded_frame *data_frame = h2_decode_tester_find_frame( &s_tester.peer.decode, AWS_H2_FRAME_T_DATA, 0 /*search_start_idx*/, &data_frame_idx); ASSERT_NOT_NULL(data_frame); ASSERT_UINT_EQUALS(1, data_frame->data.len); ASSERT_FALSE(data_frame->end_stream); ASSERT_NULL(h2_decode_tester_find_frame( &s_tester.peer.decode, AWS_H2_FRAME_T_DATA, data_frame_idx + 1 /*search_start_idx*/, NULL)); /* finish up. Let aws_input_stream produce the rest of its data */ aws_input_stream_tester_set_max_bytes_per_read(request_body, SIZE_MAX); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); ASSERT_SUCCESS( h2_decode_tester_check_data_str_across_frames(&s_tester.peer.decode, stream_id, body_src, true /*end_stream*/)); /* clean up */ aws_http_message_release(request); client_stream_tester_clean_up(&stream_tester); aws_input_stream_release(request_body); return s_tester_clean_up(); } static int s_fake_peer_window_update_check( struct aws_allocator *alloc, uint32_t stream_id, uint32_t window_size_increment, const char *expected_data, size_t expected_data_len, bool end_stream, bool skip_check_data) { struct aws_h2_frame *stream_window_update = aws_h2_frame_new_window_update(alloc, stream_id, window_size_increment); ASSERT_NOT_NULL(stream_window_update); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, stream_window_update)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); if (expected_data) { /* DATA should be received now as the last frame, check the result */ struct h2_decoded_frame *latest_frame = h2_decode_tester_latest_frame(&s_tester.peer.decode); ASSERT_INT_EQUALS(AWS_H2_FRAME_T_DATA, latest_frame->type); ASSERT_TRUE(latest_frame->end_stream == end_stream); if (!skip_check_data) { ASSERT_BIN_ARRAYS_EQUALS( latest_frame->data.buffer, latest_frame->data.len, expected_data, expected_data_len); } } else { ASSERT_TRUE(aws_linked_list_empty(testing_channel_get_written_message_queue(&s_tester.testing_channel))); } return AWS_OP_SUCCESS; } /* Test sending DATA frames is blocked by stream window size, and will resume when we receive window update */ TEST_CASE(h2_client_stream_send_data_controlled_by_stream_window_size) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* get connection preface and acks out of the way */ /* fake peer sends setting with 5 initial window size */ struct aws_http2_setting settings_array[] = { {.id = AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE, .value = 5}, }; struct aws_h2_frame *settings = aws_h2_frame_new_settings(allocator, settings_array, AWS_ARRAY_SIZE(settings_array), false /*ack*/); ASSERT_NOT_NULL(settings); ASSERT_SUCCESS(h2_fake_peer_send_connection_preface(&s_tester.peer, settings)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); size_t frames_count = h2_decode_tester_frame_count(&s_tester.peer.decode); /* send request */ struct aws_http_message *request = aws_http2_message_new_request(allocator); ASSERT_NOT_NULL(request); struct aws_http_header request_headers_src[] = { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/"), }; aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); const char *body_src = "hello CRT!"; struct aws_byte_cursor body_cursor = aws_byte_cursor_from_c_str(body_src); struct aws_input_stream *request_body = aws_input_stream_new_from_cursor(allocator, &body_cursor); aws_http_message_set_body_stream(request, request_body); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, request)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); uint32_t stream_id = aws_http_stream_get_id(stream_tester.stream); /* validate sent request (client should only have sent HEADERS) */ ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); frames_count += 1; ASSERT_UINT_EQUALS(frames_count, h2_decode_tester_frame_count(&s_tester.peer.decode)); struct h2_decoded_frame *sent_headers_frame = h2_decode_tester_get_frame(&s_tester.peer.decode, frames_count - 1); ASSERT_INT_EQUALS(AWS_H2_FRAME_T_HEADERS, sent_headers_frame->type); ASSERT_SUCCESS(s_compare_headers(aws_http_message_get_headers(request), sent_headers_frame->headers)); ASSERT_FALSE(sent_headers_frame->end_stream); /* fake peer sends a WINDOW_UPDATE on stream to unblock the DATA frame. We need to release the min window size */ ASSERT_SUCCESS(s_fake_peer_window_update_check( allocator, stream_id, 256, "hello CRT!", 10, true /*end_stream*/, false /*skip_check_data*/)); /* fake peer sends response headers */ struct aws_http_header response_headers_src[] = { DEFINE_HEADER(":status", "200"), }; struct aws_http_headers *response_headers = aws_http_headers_new(allocator); aws_http_headers_add_array(response_headers, response_headers_src, AWS_ARRAY_SIZE(response_headers_src)); struct aws_h2_frame *response_frame = aws_h2_frame_new_headers(allocator, stream_id, response_headers, true /*end_stream*/, 0, NULL); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, response_frame)); /* validate that request completed successfully */ testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_TRUE(stream_tester.complete); ASSERT_INT_EQUALS(AWS_ERROR_SUCCESS, stream_tester.on_complete_error_code); ASSERT_INT_EQUALS(200, stream_tester.response_status); ASSERT_TRUE(aws_http_connection_is_open(s_tester.connection)); /* clean up */ aws_http_headers_release(response_headers); aws_http_message_release(request); client_stream_tester_clean_up(&stream_tester); aws_input_stream_release(request_body); return s_tester_clean_up(); } /* Test stream window size becomes negative, and will resume only when it back to positive again. */ TEST_CASE(h2_client_stream_send_data_controlled_by_negative_stream_window_size) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* get connection preface and acks out of the way */ /* fake peer sends setting with 300 initial window size */ struct aws_http2_setting settings_array[] = { {.id = AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE, .value = 300}, }; struct aws_h2_frame *settings = aws_h2_frame_new_settings(allocator, settings_array, AWS_ARRAY_SIZE(settings_array), false /*ack*/); ASSERT_NOT_NULL(settings); ASSERT_SUCCESS(h2_fake_peer_send_connection_preface(&s_tester.peer, settings)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); size_t frames_count = h2_decode_tester_frame_count(&s_tester.peer.decode); /* send request */ struct aws_http_message *request = aws_http2_message_new_request(allocator); ASSERT_NOT_NULL(request); struct aws_http_header request_headers_src[] = { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/"), }; aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); char body_src[400]; for (int i = 0; i < 400; i++) { body_src[i] = 'a'; } struct aws_byte_cursor body_cursor = aws_byte_cursor_from_array(body_src, 400); struct aws_input_stream *request_body = aws_input_stream_new_from_cursor(allocator, &body_cursor); aws_http_message_set_body_stream(request, request_body); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, request)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); uint32_t stream_id = aws_http_stream_get_id(stream_tester.stream); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); /* validate sent request (client should have sent HEADERS, part of DATA(first 300 bytes) */ frames_count += 2; ASSERT_UINT_EQUALS(frames_count, h2_decode_tester_frame_count(&s_tester.peer.decode)); struct h2_decoded_frame *sent_headers_frame = h2_decode_tester_get_frame(&s_tester.peer.decode, frames_count - 2); ASSERT_INT_EQUALS(AWS_H2_FRAME_T_HEADERS, sent_headers_frame->type); ASSERT_SUCCESS(s_compare_headers(aws_http_message_get_headers(request), sent_headers_frame->headers)); ASSERT_FALSE(sent_headers_frame->end_stream); struct h2_decoded_frame *sent_data_frame = h2_decode_tester_get_frame(&s_tester.peer.decode, frames_count - 1); ASSERT_INT_EQUALS(AWS_H2_FRAME_T_DATA, sent_data_frame->type); ASSERT_FALSE(sent_data_frame->end_stream); ASSERT_BIN_ARRAYS_EQUALS(sent_data_frame->data.buffer, sent_data_frame->data.len, body_src, 300); /* fake peer set new INITIAL_WINDOW_SIZE to 0 to make stream window size to be negative,which should be -300 */ settings_array[0].value = 0; settings = aws_h2_frame_new_settings(allocator, settings_array, AWS_ARRAY_SIZE(settings_array), false /*ack*/); ASSERT_NOT_NULL(settings); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, settings)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* Check for setting ACK */ testing_channel_drain_queued_tasks(&s_tester.testing_channel); frames_count += 1; ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); ASSERT_UINT_EQUALS(frames_count, h2_decode_tester_frame_count(&s_tester.peer.decode)); struct h2_decoded_frame *setting_ack_frame = h2_decode_tester_get_frame(&s_tester.peer.decode, frames_count - 1); ASSERT_UINT_EQUALS(AWS_H2_FRAME_T_SETTINGS, setting_ack_frame->type); ASSERT_TRUE(setting_ack_frame->ack); /* fake peer sends a WINDOW_UPDATE on stream to try unblocking the DATA frame. But just release (300+min window * size) bytes, it will still be min window size, nothing will be sent */ ASSERT_SUCCESS(s_fake_peer_window_update_check( allocator, stream_id, 300 + AWS_H2_MIN_WINDOW_SIZE, NULL, 0, false /*end_stream*/, false /*skip_check_data*/)); /* Release one more bytes, rest of the data will be sent */ ASSERT_SUCCESS(s_fake_peer_window_update_check( allocator, stream_id, 1, body_src, 100, true /*end_stream*/, false /*skip_check_data*/)); /* fake peer sends response headers */ struct aws_http_header response_headers_src[] = { DEFINE_HEADER(":status", "200"), }; struct aws_http_headers *response_headers = aws_http_headers_new(allocator); aws_http_headers_add_array(response_headers, response_headers_src, AWS_ARRAY_SIZE(response_headers_src)); struct aws_h2_frame *response_frame = aws_h2_frame_new_headers(allocator, stream_id, response_headers, true /*end_stream*/, 0, NULL); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, response_frame)); /* validate that request completed successfully */ testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_TRUE(stream_tester.complete); ASSERT_INT_EQUALS(AWS_ERROR_SUCCESS, stream_tester.on_complete_error_code); ASSERT_INT_EQUALS(200, stream_tester.response_status); ASSERT_TRUE(aws_http_connection_is_open(s_tester.connection)); /* clean up */ aws_http_headers_release(response_headers); aws_http_message_release(request); client_stream_tester_clean_up(&stream_tester); aws_input_stream_release(request_body); return s_tester_clean_up(); } /* Test when connection window size becomes zero, no stream can send data */ TEST_CASE(h2_client_stream_send_data_controlled_by_connection_window_size) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* bodies must be big enough to span multiple H2-frames and multiple aws_io_messages */ size_t body_size = aws_h2_settings_initial[AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE] - AWS_H2_MIN_WINDOW_SIZE; /* get connection preface and acks out of the way */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); /* send multiple requests */ enum { NUM_STREAMS = 2 }; struct aws_http_message *requests[NUM_STREAMS]; struct aws_http_header request_headers_src[NUM_STREAMS][3] = { { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/a.txt"), }, { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/b.txt"), }, }; struct aws_byte_buf request_body_bufs[NUM_STREAMS]; struct aws_input_stream *request_bodies[NUM_STREAMS]; struct client_stream_tester stream_testers[NUM_STREAMS]; for (size_t i = 0; i < NUM_STREAMS; ++i) { requests[i] = aws_http2_message_new_request(allocator); aws_http_message_add_header_array(requests[i], request_headers_src[i], AWS_ARRAY_SIZE(request_headers_src[i])); /* fill first body with "aaaa...", second with "bbbb...", etc */ ASSERT_SUCCESS(aws_byte_buf_init(&request_body_bufs[i], allocator, body_size)); ASSERT_TRUE(aws_byte_buf_write_u8_n(&request_body_bufs[i], (uint8_t)('a' + i), body_size)); struct aws_byte_cursor body_cursor = aws_byte_cursor_from_buf(&request_body_bufs[i]); request_bodies[i] = aws_input_stream_new_from_cursor(allocator, &body_cursor); ASSERT_NOT_NULL(request_bodies[i]); aws_http_message_set_body_stream(requests[i], request_bodies[i]); } /* Send the first request, which will make the connection window to the min_window_size and stop connection from * sending more data */ ASSERT_SUCCESS(s_stream_tester_init(&stream_testers[0], requests[0])); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); /* Check the last frame is the end of the stream, if all the data is send. */ struct h2_decoded_frame *latest_frame = h2_decode_tester_latest_frame(&s_tester.peer.decode); ASSERT_INT_EQUALS(AWS_H2_FRAME_T_DATA, latest_frame->type); ASSERT_TRUE(latest_frame->end_stream); size_t frames_count = h2_decode_tester_frame_count(&s_tester.peer.decode); /* Send the rest requst, which only data frames will be blocked */ ASSERT_SUCCESS(s_stream_tester_init(&stream_testers[1], requests[1])); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); frames_count += 1; /* Check only the HEADERS frame is received */ ASSERT_UINT_EQUALS(frames_count, h2_decode_tester_frame_count(&s_tester.peer.decode)); struct h2_decoded_frame *sent_headers_frame = h2_decode_tester_get_frame(&s_tester.peer.decode, frames_count - 1); ASSERT_INT_EQUALS(AWS_H2_FRAME_T_HEADERS, sent_headers_frame->type); ASSERT_SUCCESS(s_compare_headers(aws_http_message_get_headers(requests[1]), sent_headers_frame->headers)); ASSERT_FALSE(sent_headers_frame->end_stream); /* WINDOW UPDATE at the second stream will no help */ ASSERT_SUCCESS(s_fake_peer_window_update_check( allocator, aws_http_stream_get_id(stream_testers[1].stream), 400, NULL, 0, false /*end_stream*/, false /*skip_check_data*/)); char expected[400]; for (int i = 0; i < 400; i++) { expected[i] = 'b'; } /* Connection window update will help, and the rest of the previous request is sent now */ ASSERT_SUCCESS(s_fake_peer_window_update_check( allocator, 0, 400 - AWS_H2_MIN_WINDOW_SIZE, expected, 400, false /*end_stream*/, false /*skip_check_data*/)); /* Release all the window */ ASSERT_SUCCESS(s_fake_peer_window_update_check( allocator, 0, (uint32_t)body_size, "", 0, true /*end_stream*/, true /*skip_check_data*/)); /* finally, send responses and ensure all streams complete successfully */ struct aws_http_header response_headers_src[] = {DEFINE_HEADER(":status", "200")}; struct aws_http_headers *response_headers = aws_http_headers_new(allocator); aws_http_headers_add_array(response_headers, response_headers_src, AWS_ARRAY_SIZE(response_headers_src)); for (size_t i = 0; i < NUM_STREAMS; ++i) { struct aws_h2_frame *response_frame = aws_h2_frame_new_headers( allocator, aws_http_stream_get_id(stream_testers[i].stream), response_headers, true /* end_stream */, 0, NULL); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, response_frame)); } testing_channel_drain_queued_tasks(&s_tester.testing_channel); for (size_t i = 0; i < NUM_STREAMS; ++i) { ASSERT_TRUE(stream_testers[i].complete); ASSERT_INT_EQUALS(200, stream_testers[i].response_status); } ASSERT_TRUE(aws_http_connection_is_open(s_tester.connection)); /* clean up */ aws_http_headers_release(response_headers); for (size_t i = 0; i < NUM_STREAMS; ++i) { client_stream_tester_clean_up(&stream_testers[i]); aws_http_message_release(requests[i]); aws_input_stream_release(request_bodies[i]); aws_byte_buf_clean_up(&request_body_bufs[i]); } return s_tester_clean_up(); } /* Test when connection window size becomes zero, and stream window size is zero, window_update on connection and stream * will not affect eachother */ TEST_CASE(h2_client_stream_send_data_controlled_by_connection_and_stream_window_size) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* bodies must be big enough to span multiple H2-frames and multiple aws_io_messages */ size_t body_size = aws_h2_settings_initial[AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE]; /* get connection preface and acks out of the way */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); /* send multiple requests */ enum { NUM_STREAMS = 3 }; struct aws_http_message *requests[NUM_STREAMS]; struct aws_http_header request_headers_src[NUM_STREAMS][3] = { { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/a.txt"), }, { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/b.txt"), }, { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/c.txt"), }, }; struct aws_byte_buf request_body_bufs[NUM_STREAMS]; struct aws_input_stream *request_bodies[NUM_STREAMS]; struct client_stream_tester stream_testers[NUM_STREAMS]; for (size_t i = 0; i < NUM_STREAMS; ++i) { requests[i] = aws_http2_message_new_request(allocator); aws_http_message_add_header_array(requests[i], request_headers_src[i], AWS_ARRAY_SIZE(request_headers_src[i])); /* fill first body with "aaaa...", second with "bbbb...", etc */ ASSERT_SUCCESS(aws_byte_buf_init(&request_body_bufs[i], allocator, body_size)); ASSERT_TRUE(aws_byte_buf_write_u8_n(&request_body_bufs[i], (uint8_t)('a' + i), body_size)); struct aws_byte_cursor body_cursor = aws_byte_cursor_from_buf(&request_body_bufs[i]); request_bodies[i] = aws_input_stream_new_from_cursor(allocator, &body_cursor); ASSERT_NOT_NULL(request_bodies[i]); aws_http_message_set_body_stream(requests[i], request_bodies[i]); } /* Send the first request, which will take all the connection window */ ASSERT_SUCCESS(s_stream_tester_init(&stream_testers[0], requests[0])); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); /* Check the last frame is the end of the stream, if all the data is send. But it's not true in this test. */ /* Since we stop sending data when the connection window size is smaller than 256 bytes, we actually cannot receive * the end of the stream here. */ struct h2_decoded_frame *latest_frame = h2_decode_tester_latest_frame(&s_tester.peer.decode); ASSERT_INT_EQUALS(AWS_H2_FRAME_T_DATA, latest_frame->type); ASSERT_FALSE(latest_frame->end_stream); size_t frames_count = h2_decode_tester_frame_count(&s_tester.peer.decode); /* fake peer set new INITIAL_WINDOW_SIZE to 0 to set window size for rest stream to be 0 */ struct aws_http2_setting settings_array[] = { {.id = AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE, .value = 0}, }; struct aws_h2_frame *settings = aws_h2_frame_new_settings(allocator, settings_array, AWS_ARRAY_SIZE(settings_array), false /*ack*/); ASSERT_NOT_NULL(settings); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, settings)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* Get setting ACK */ frames_count += 1; ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); ASSERT_UINT_EQUALS(frames_count, h2_decode_tester_frame_count(&s_tester.peer.decode)); struct h2_decoded_frame *setting_ack_frame = h2_decode_tester_get_frame(&s_tester.peer.decode, frames_count - 1); ASSERT_UINT_EQUALS(AWS_H2_FRAME_T_SETTINGS, setting_ack_frame->type); ASSERT_TRUE(setting_ack_frame->ack); /* Send the rest requst, which only data frames will be blocked */ ASSERT_SUCCESS(s_stream_tester_init(&stream_testers[1], requests[1])); ASSERT_SUCCESS(s_stream_tester_init(&stream_testers[2], requests[2])); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); /* Check only the HEADERS frames of two streams are received */ frames_count += 2; ASSERT_UINT_EQUALS(frames_count, h2_decode_tester_frame_count(&s_tester.peer.decode)); /* Header for requests[1] */ struct h2_decoded_frame *sent_headers_frame = h2_decode_tester_get_frame(&s_tester.peer.decode, frames_count - 2); ASSERT_INT_EQUALS(AWS_H2_FRAME_T_HEADERS, sent_headers_frame->type); ASSERT_SUCCESS(s_compare_headers(aws_http_message_get_headers(requests[1]), sent_headers_frame->headers)); ASSERT_FALSE(sent_headers_frame->end_stream); /* Header for requests[2] */ sent_headers_frame = h2_decode_tester_get_frame(&s_tester.peer.decode, frames_count - 1); ASSERT_INT_EQUALS(AWS_H2_FRAME_T_HEADERS, sent_headers_frame->type); ASSERT_SUCCESS(s_compare_headers(aws_http_message_get_headers(requests[2]), sent_headers_frame->headers)); ASSERT_FALSE(sent_headers_frame->end_stream); char expected_b[400]; for (int i = 0; i < 400; i++) { expected_b[i] = 'b'; } char expected_c[400]; for (int i = 0; i < 400; i++) { expected_c[i] = 'c'; } /* WINDOW UPDATE at requests[1] will no help */ ASSERT_SUCCESS(s_fake_peer_window_update_check( allocator, aws_http_stream_get_id(stream_testers[1].stream), 400, NULL, 0, false /*end_stream*/, false /*skip_check_data*/)); /* WINDOW UPDATE at the connection to keep connection wide open, but only 10 bytes of requests[1] will be sent */ ASSERT_SUCCESS(s_fake_peer_window_update_check( allocator, 0, (uint32_t)body_size * 3, expected_b, 400, false /*end_stream*/, false /*skip_check_data*/)); /* WINDOW UPDATE at requests[1] will help requests[1] to send data now */ ASSERT_SUCCESS(s_fake_peer_window_update_check( allocator, aws_http_stream_get_id(stream_testers[1].stream), 400, expected_b, 400, false /*end_stream*/, false /*skip_check_data*/)); /* WINDOW UPDATE at requests[2] will help requests[2] to send data now */ ASSERT_SUCCESS(s_fake_peer_window_update_check( allocator, aws_http_stream_get_id(stream_testers[2].stream), 400, expected_c, 400, false /*end_stream*/, false /*skip_check_data*/)); /* Release all the window for requests[0] */ ASSERT_SUCCESS(s_fake_peer_window_update_check( allocator, aws_http_stream_get_id(stream_testers[0].stream), (uint32_t)body_size + AWS_H2_MIN_WINDOW_SIZE, "", 0, true /*end_stream*/, true /*skip_check_data*/)); /* Release all the window for requests[1] */ ASSERT_SUCCESS(s_fake_peer_window_update_check( allocator, aws_http_stream_get_id(stream_testers[1].stream), (uint32_t)body_size + AWS_H2_MIN_WINDOW_SIZE, "", 0, true /*end_stream*/, true /*skip_check_data*/)); /* Release all the window for requests[2] */ ASSERT_SUCCESS(s_fake_peer_window_update_check( allocator, aws_http_stream_get_id(stream_testers[2].stream), (uint32_t)body_size + AWS_H2_MIN_WINDOW_SIZE, "", 0, true /*end_stream*/, true /*skip_check_data*/)); /* finally, send responses and ensure all streams complete successfully */ struct aws_http_header response_headers_src[] = {DEFINE_HEADER(":status", "200")}; struct aws_http_headers *response_headers = aws_http_headers_new(allocator); aws_http_headers_add_array(response_headers, response_headers_src, AWS_ARRAY_SIZE(response_headers_src)); for (size_t i = 0; i < NUM_STREAMS; ++i) { struct aws_h2_frame *response_frame = aws_h2_frame_new_headers( allocator, aws_http_stream_get_id(stream_testers[i].stream), response_headers, true /* end_stream */, 0, NULL); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, response_frame)); } testing_channel_drain_queued_tasks(&s_tester.testing_channel); for (size_t i = 0; i < NUM_STREAMS; ++i) { ASSERT_TRUE(stream_testers[i].complete); ASSERT_INT_EQUALS(200, stream_testers[i].response_status); } ASSERT_TRUE(aws_http_connection_is_open(s_tester.connection)); /* clean up */ aws_http_headers_release(response_headers); for (size_t i = 0; i < NUM_STREAMS; ++i) { client_stream_tester_clean_up(&stream_testers[i]); aws_http_message_release(requests[i]); aws_input_stream_release(request_bodies[i]); aws_byte_buf_clean_up(&request_body_bufs[i]); } return s_tester_clean_up(); } /* Test receiving a response with DATA frames, the window update frame will be sent */ TEST_CASE(h2_client_stream_send_window_update) { /* Enable automatic window manager management */ s_tester.no_conn_manual_win_management = true; ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* fake peer sends connection preface */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); /* Check the inital window update frame has been sent to maximize the connection window */ size_t initial_window_update_index = 0; struct h2_decoded_frame *initial_connection_window_update_frame = h2_decode_tester_find_stream_frame( &s_tester.peer.decode, AWS_H2_FRAME_T_WINDOW_UPDATE, 0 /*stream_id*/, 0 /*idx*/, &initial_window_update_index); ASSERT_NOT_NULL(initial_connection_window_update_frame); ASSERT_UINT_EQUALS( AWS_H2_WINDOW_UPDATE_MAX - AWS_H2_INIT_WINDOW_SIZE, initial_connection_window_update_frame->window_size_increment); /* send request */ struct aws_http_message *request = aws_http2_message_new_request(allocator); ASSERT_NOT_NULL(request); struct aws_http_header request_headers_src[] = { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/"), }; aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, request)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); uint32_t stream_id = aws_http_stream_get_id(stream_tester.stream); /* fake peer sends response headers */ struct aws_http_header response_headers_src[] = { DEFINE_HEADER(":status", "200"), }; struct aws_http_headers *response_headers = aws_http_headers_new(allocator); aws_http_headers_add_array(response_headers, response_headers_src, AWS_ARRAY_SIZE(response_headers_src)); struct aws_h2_frame *response_frame = aws_h2_frame_new_headers(allocator, stream_id, response_headers, false /*end_stream*/, 0, NULL); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, response_frame)); /* fake peer sends 1 DATA frame */ const char *body_src = "hello"; ASSERT_SUCCESS(h2_fake_peer_send_data_frame_str(&s_tester.peer, stream_id, body_src, false /*end_stream*/)); /* check that 2 WINDOW_UPDATE frames have been sent. * 1 for the connection, and 1 for the stream */ testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); struct h2_decoded_frame *stream_window_update_frame = h2_decode_tester_find_stream_frame( &s_tester.peer.decode, AWS_H2_FRAME_T_WINDOW_UPDATE, stream_id, 0 /*idx*/, NULL); ASSERT_NOT_NULL(stream_window_update_frame); ASSERT_UINT_EQUALS(5, stream_window_update_frame->window_size_increment); struct h2_decoded_frame *connection_window_update_frame = h2_decode_tester_find_stream_frame( &s_tester.peer.decode, AWS_H2_FRAME_T_WINDOW_UPDATE, 0 /*stream_id*/, initial_window_update_index + 1 /*idx*/, NULL); ASSERT_NOT_NULL(connection_window_update_frame); ASSERT_UINT_EQUALS(5, connection_window_update_frame->window_size_increment); /* clean up */ aws_http_headers_release(response_headers); aws_http_message_release(request); client_stream_tester_clean_up(&stream_tester); return s_tester_clean_up(); } /* Peer sends a frame larger than the window size we had on stream, will result in stream error */ TEST_CASE(h2_client_stream_err_received_data_flow_control) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* fake peer sends connection preface */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); size_t window_size = 10; /* change the settings of the initial window size for new stream flow-control window */ struct aws_http2_setting settings_array[] = { {.id = AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE, .value = (uint32_t)window_size}, }; ASSERT_SUCCESS(aws_http2_connection_change_settings( s_tester.connection, settings_array, AWS_ARRAY_SIZE(settings_array), NULL /*callback function*/, NULL /*user_data*/)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* fake peer sends two settings ack back, one for the initial settings, one for the user settings we just sent */ struct aws_h2_frame *peer_frame = aws_h2_frame_new_settings(allocator, NULL, 0, true); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); peer_frame = aws_h2_frame_new_settings(allocator, NULL, 0, true); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* send request */ struct aws_http_message *request = aws_http2_message_new_request(allocator); ASSERT_NOT_NULL(request); struct aws_http_header request_headers_src[] = { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/"), }; aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, request)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); uint32_t stream_id = aws_http_stream_get_id(stream_tester.stream); /* fake peer sends response headers */ struct aws_http_header response_headers_src[] = { DEFINE_HEADER(":status", "200"), }; struct aws_http_headers *response_headers = aws_http_headers_new(allocator); aws_http_headers_add_array(response_headers, response_headers_src, AWS_ARRAY_SIZE(response_headers_src)); struct aws_h2_frame *response_frame = aws_h2_frame_new_headers(allocator, stream_id, response_headers, false /*end_stream*/, 0, NULL); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, response_frame)); struct aws_byte_buf response_body_bufs; /* fake peer sends a DATA frame larger than the window size we have */ ASSERT_SUCCESS(aws_byte_buf_init(&response_body_bufs, allocator, window_size + 1)); ASSERT_TRUE(aws_byte_buf_write_u8_n(&response_body_bufs, (uint8_t)'a', window_size + 1)); struct aws_byte_cursor body_cursor = aws_byte_cursor_from_buf(&response_body_bufs); ASSERT_SUCCESS(h2_fake_peer_send_data_frame(&s_tester.peer, stream_id, body_cursor, true /*end_stream*/)); /* validate that stream completed with error */ testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_TRUE(stream_tester.complete); ASSERT_INT_EQUALS(AWS_ERROR_HTTP_PROTOCOL_ERROR, stream_tester.on_complete_error_code); /* a stream error should not affect the connection */ ASSERT_TRUE(aws_http_connection_is_open(s_tester.connection)); /* validate that stream sent RST_STREAM with AWS_HTTP2_ERR_FLOW_CONTROL_ERROR */ ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); struct h2_decoded_frame *rst_stream_frame = h2_decode_tester_find_stream_frame(&s_tester.peer.decode, AWS_H2_FRAME_T_RST_STREAM, stream_id, 0, NULL); ASSERT_UINT_EQUALS(AWS_HTTP2_ERR_FLOW_CONTROL_ERROR, rst_stream_frame->error_code); /* clean up */ aws_byte_buf_clean_up(&response_body_bufs); aws_http_headers_release(response_headers); aws_http_message_release(request); client_stream_tester_clean_up(&stream_tester); return s_tester_clean_up(); } static int s_manual_window_management_tester_init(struct aws_allocator *alloc, bool conn, bool stream, void *ctx) { (void)ctx; aws_http_library_init(alloc); s_tester.alloc = alloc; struct aws_testing_channel_options options = {.clock_fn = aws_high_res_clock_get_ticks}; ASSERT_SUCCESS(testing_channel_init(&s_tester.testing_channel, alloc, &options)); struct aws_http2_setting settings_array[] = { {.id = AWS_HTTP2_SETTINGS_ENABLE_PUSH, .value = 0}, }; struct aws_http2_connection_options http2_options = { .initial_settings_array = settings_array, .num_initial_settings = AWS_ARRAY_SIZE(settings_array), .max_closed_streams = AWS_HTTP2_DEFAULT_MAX_CLOSED_STREAMS, .conn_manual_window_management = conn, }; s_tester.connection = aws_http_connection_new_http2_client(alloc, stream /* manual window management */, &http2_options); ASSERT_NOT_NULL(s_tester.connection); { /* re-enact marriage vows of http-connection and channel (handled by http-bootstrap in real world) */ struct aws_channel_slot *slot = aws_channel_slot_new(s_tester.testing_channel.channel); ASSERT_NOT_NULL(slot); ASSERT_SUCCESS(aws_channel_slot_insert_end(s_tester.testing_channel.channel, slot)); ASSERT_SUCCESS(aws_channel_slot_set_handler(slot, &s_tester.connection->channel_handler)); s_tester.connection->vtable->on_channel_handler_installed(&s_tester.connection->channel_handler, slot); } struct h2_fake_peer_options peer_options = { .alloc = alloc, .testing_channel = &s_tester.testing_channel, .is_server = true, }; ASSERT_SUCCESS(h2_fake_peer_init(&s_tester.peer, &peer_options)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); return AWS_OP_SUCCESS; } /* Peer sends a flow-controlled frame when the connection window-size is not enough for it will result in connection * flow-control error */ TEST_CASE(h2_client_conn_err_received_data_flow_control) { /* disable the connection automatic window update */ ASSERT_SUCCESS(s_manual_window_management_tester_init(allocator, true /*conn*/, false /*stream*/, ctx)); /* get connection preface and acks out of the way */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); /* send request */ struct aws_http_message *request = aws_http2_message_new_request(allocator); ASSERT_NOT_NULL(request); struct aws_http_header request_headers_src[] = { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/"), }; aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, request)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); uint32_t stream_id = aws_http_stream_get_id(stream_tester.stream); /* fake peer sends response headers */ struct aws_http_header response_headers_src[] = { DEFINE_HEADER(":status", "200"), }; struct aws_http_headers *response_headers = aws_http_headers_new(allocator); aws_http_headers_add_array(response_headers, response_headers_src, AWS_ARRAY_SIZE(response_headers_src)); struct aws_h2_frame *response_frame = aws_h2_frame_new_headers(allocator, stream_id, response_headers, false /*end_stream*/, 0, NULL); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, response_frame)); struct aws_byte_buf response_body_bufs; /* The max body size here is limited. So we need to send multiple bodies to get the flow-control error */ size_t body_size = aws_max_size(aws_h2_settings_initial[AWS_HTTP2_SETTINGS_MAX_FRAME_SIZE], g_aws_channel_max_fragment_size) - AWS_H2_FRAME_PREFIX_SIZE; /* fake peer sends a DATA frame larger than the window size we have */ ASSERT_SUCCESS(aws_byte_buf_init(&response_body_bufs, allocator, body_size)); ASSERT_TRUE(aws_byte_buf_write_u8_n(&response_body_bufs, (uint8_t)'a', body_size)); struct aws_byte_cursor body_cursor = aws_byte_cursor_from_buf(&response_body_bufs); for (uint32_t i = 0; i < aws_h2_settings_initial[AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE] / body_size; i++) { ASSERT_SUCCESS(h2_fake_peer_send_data_frame(&s_tester.peer, stream_id, body_cursor, false /*end_stream*/)); /* manually update the stream flow-control window, ensure that stream window is available all the time */ aws_http_stream_update_window(stream_tester.stream, body_size); testing_channel_drain_queued_tasks(&s_tester.testing_channel); } ASSERT_TRUE(aws_http_connection_is_open(s_tester.connection)); /* the last one will result in the connection flow control error */ ASSERT_SUCCESS(h2_fake_peer_send_data_frame(&s_tester.peer, stream_id, body_cursor, true /*end_stream*/)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* validate the connection completed with error */ ASSERT_FALSE(aws_http_connection_is_open(s_tester.connection)); ASSERT_INT_EQUALS( AWS_ERROR_HTTP_PROTOCOL_ERROR, testing_channel_get_shutdown_error_code(&s_tester.testing_channel)); /* client should send GOAWAY */ ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); struct h2_decoded_frame *goaway = h2_decode_tester_find_frame(&s_tester.peer.decode, AWS_H2_FRAME_T_GOAWAY, 0, NULL); ASSERT_NOT_NULL(goaway); ASSERT_UINT_EQUALS(AWS_HTTP2_ERR_FLOW_CONTROL_ERROR, goaway->error_code); ASSERT_UINT_EQUALS(0, goaway->goaway_last_stream_id); /* clean up */ aws_byte_buf_clean_up(&response_body_bufs); aws_http_headers_release(response_headers); aws_http_message_release(request); client_stream_tester_clean_up(&stream_tester); return s_tester_clean_up(); } /* Receiving invalid WINDOW_UPDATE frame of stream should result in a "Stream Error", invalid WINDOW_UPDATE frame of * connection should result in a "Connection Error". */ static int s_invalid_window_update( struct aws_allocator *allocator, void *ctx, uint32_t window_update_size, enum aws_http2_error_code h2_error_code) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* fake peer sends connection preface */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* send request */ struct aws_http_message *request = aws_http2_message_new_request(allocator); ASSERT_NOT_NULL(request); struct aws_http_header request_headers_src[] = { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/"), }; aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, request)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* Send the largest update on stream, which will cause the flow-control window of stream exceeding the max */ struct aws_h2_frame *stream_window_update = aws_h2_frame_new_window_update(allocator, aws_http_stream_get_id(stream_tester.stream), window_update_size); ASSERT_NOT_NULL(stream_window_update); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, stream_window_update)); /* validate that stream completed with error */ testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_TRUE(stream_tester.complete); ASSERT_INT_EQUALS(AWS_ERROR_HTTP_PROTOCOL_ERROR, stream_tester.on_complete_error_code); /* a stream error should not affect the connection */ ASSERT_TRUE(aws_http_connection_is_open(s_tester.connection)); /* validate that stream sent RST_STREAM */ ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); struct h2_decoded_frame *rst_stream_frame = h2_decode_tester_latest_frame(&s_tester.peer.decode); ASSERT_INT_EQUALS(AWS_H2_FRAME_T_RST_STREAM, rst_stream_frame->type); ASSERT_INT_EQUALS(h2_error_code, rst_stream_frame->error_code); /* Send the largest update on stream, which will cause the flow-control window of stream exceeding the max */ stream_window_update = aws_h2_frame_new_window_update(allocator, 0, window_update_size); ASSERT_NOT_NULL(stream_window_update); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, stream_window_update)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* validate the connection completed with error */ ASSERT_FALSE(aws_http_connection_is_open(s_tester.connection)); ASSERT_INT_EQUALS( AWS_ERROR_HTTP_PROTOCOL_ERROR, testing_channel_get_shutdown_error_code(&s_tester.testing_channel)); /* client should send GOAWAY */ ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); struct h2_decoded_frame *goaway = h2_decode_tester_find_frame(&s_tester.peer.decode, AWS_H2_FRAME_T_GOAWAY, 0, NULL); ASSERT_NOT_NULL(goaway); ASSERT_UINT_EQUALS(h2_error_code, goaway->error_code); /* clean up */ aws_http_message_release(request); client_stream_tester_clean_up(&stream_tester); return s_tester_clean_up(); } /* Window update cause window to exceed max size will lead to FLOW_CONTROL_ERROR */ TEST_CASE(h2_client_conn_err_window_update_exceed_max) { return s_invalid_window_update(allocator, ctx, AWS_H2_WINDOW_UPDATE_MAX, AWS_HTTP2_ERR_FLOW_CONTROL_ERROR); } /* Window update with zero update size will lead to PROTOCOL_ERROR */ TEST_CASE(h2_client_conn_err_window_update_size_zero) { return s_invalid_window_update(allocator, ctx, 0, AWS_HTTP2_ERR_PROTOCOL_ERROR); } static int s_compare_settings_array( const struct aws_http2_setting *expected, const struct aws_http2_setting *got, int num_settings) { for (int i = 0; i < num_settings; ++i) { struct aws_http2_setting expected_settings = expected[i]; struct aws_http2_setting got_settings = got[i]; ASSERT_INT_EQUALS(expected_settings.id, got_settings.id); ASSERT_INT_EQUALS(expected_settings.value, got_settings.value); } return AWS_OP_SUCCESS; } /* SETTINGS_INITIAL_WINDOW_SIZE cause stream window to exceed the max size is a Connection ERROR... */ TEST_CASE(h2_client_conn_err_initial_window_size_settings_cause_window_exceed_max) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* fake peer sends connection preface */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* send request */ struct aws_http_message *request = aws_http2_message_new_request(allocator); ASSERT_NOT_NULL(request); struct aws_http_header request_headers_src[] = { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/"), }; aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, request)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* Send a small update on stream */ struct aws_h2_frame *stream_window_update = aws_h2_frame_new_window_update(allocator, aws_http_stream_get_id(stream_tester.stream), 1); ASSERT_NOT_NULL(stream_window_update); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, stream_window_update)); /* Then we set INITIAL_WINDOW_SIZE to largest - 1, which will not lead to any error */ struct aws_http2_setting settings_array[] = { {.id = AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE, .value = AWS_H2_WINDOW_UPDATE_MAX - 1}, }; struct aws_h2_frame *settings = aws_h2_frame_new_settings(allocator, settings_array, AWS_ARRAY_SIZE(settings_array), false /*ack*/); ASSERT_NOT_NULL(settings); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, settings)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* validate connection is still open and callback invoked */ ASSERT_TRUE(aws_http_connection_is_open(s_tester.connection)); ASSERT_INT_EQUALS(s_tester.user_data.num_settings, AWS_ARRAY_SIZE(settings_array)); ASSERT_SUCCESS(s_compare_settings_array( settings_array, s_tester.user_data.remote_settings_array, AWS_ARRAY_SIZE(settings_array))); s_tester.user_data.num_settings = 0; /* Finally we set INITIAL_WINDOW_SIZE to largest, which cause the stream window size to exceed the max size */ settings_array[0].value = AWS_H2_WINDOW_UPDATE_MAX; settings = aws_h2_frame_new_settings(allocator, settings_array, AWS_ARRAY_SIZE(settings_array), false /*ack*/); ASSERT_NOT_NULL(settings); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, settings)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* validate callback is not invoked, num_settings is still 0 */ ASSERT_INT_EQUALS(0, s_tester.user_data.num_settings); /* validate the connection completed with error */ ASSERT_FALSE(aws_http_connection_is_open(s_tester.connection)); ASSERT_INT_EQUALS( AWS_ERROR_HTTP_PROTOCOL_ERROR, testing_channel_get_shutdown_error_code(&s_tester.testing_channel)); /* client should send GOAWAY */ ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); struct h2_decoded_frame *goaway = h2_decode_tester_find_frame(&s_tester.peer.decode, AWS_H2_FRAME_T_GOAWAY, 0, NULL); ASSERT_NOT_NULL(goaway); ASSERT_UINT_EQUALS(AWS_HTTP2_ERR_FLOW_CONTROL_ERROR, goaway->error_code); ASSERT_UINT_EQUALS(0, goaway->goaway_last_stream_id); /* clean up */ aws_http_message_release(request); client_stream_tester_clean_up(&stream_tester); return s_tester_clean_up(); } /* A server MAY finish the response before client done sending, and client just keep sending the rest of request. */ TEST_CASE(h2_client_stream_receive_end_stream_before_done_sending) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* get connection preface and acks out of the way */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); /* get request ready */ struct aws_http_message *request = aws_http2_message_new_request(allocator); ASSERT_NOT_NULL(request); struct aws_http_header request_headers_src[] = { DEFINE_HEADER(":method", "POST"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/"), }; aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); /* use a stalled body-stream so our test can send the response before the request is completely sent */ const char *body_src = "hello"; struct aws_byte_cursor body_cursor = aws_byte_cursor_from_c_str(body_src); struct aws_input_stream *request_body = aws_input_stream_new_tester(allocator, body_cursor); aws_http_message_set_body_stream(request, request_body); aws_input_stream_tester_set_max_bytes_per_read(request_body, 1); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, request)); /* execute 1 event-loop tick, 1 byte of the body and header should be written */ testing_channel_run_currently_queued_tasks(&s_tester.testing_channel); uint32_t stream_id = aws_http_stream_get_id(stream_tester.stream); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); ASSERT_NOT_NULL( h2_decode_tester_find_frame(&s_tester.peer.decode, AWS_H2_FRAME_T_HEADERS, 0 /*search_start_idx*/, NULL)); struct h2_decoded_frame *sent_data_frame = h2_decode_tester_find_frame(&s_tester.peer.decode, AWS_H2_FRAME_T_DATA, 0 /*search_start_idx*/, NULL); ASSERT_FALSE(sent_data_frame->end_stream); ASSERT_TRUE(aws_byte_buf_eq_c_str(&sent_data_frame->data, "h")); /* fake peer sends complete response */ struct aws_http_header response_headers_src[] = { DEFINE_HEADER(":status", "404"), }; /* stop stalling the input stream */ aws_input_stream_tester_set_max_bytes_per_read(request_body, 5); size_t frames_count = h2_decode_tester_frame_count(&s_tester.peer.decode); struct aws_http_headers *response_headers = aws_http_headers_new(allocator); aws_http_headers_add_array(response_headers, response_headers_src, AWS_ARRAY_SIZE(response_headers_src)); struct aws_h2_frame *response_frame = aws_h2_frame_new_headers(allocator, stream_id, response_headers, true /*end_stream*/, 0, NULL); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, response_frame)); /* No rst stream sent, we wait until the client finish sending body */ /* validate the client request completes successfully */ testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_TRUE(stream_tester.complete); ASSERT_INT_EQUALS(AWS_ERROR_SUCCESS, stream_tester.on_complete_error_code); ASSERT_INT_EQUALS(404, stream_tester.response_status); /* Check the rest of the body received by peer */ ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); struct h2_decoded_frame *rest_data_frame = h2_decode_tester_find_frame( &s_tester.peer.decode, AWS_H2_FRAME_T_DATA, frames_count /*search_start_idx*/, NULL); ASSERT_TRUE(rest_data_frame->end_stream); ASSERT_TRUE(aws_byte_buf_eq_c_str(&rest_data_frame->data, "ello")); /* clean up */ aws_http_headers_release(response_headers); client_stream_tester_clean_up(&stream_tester); aws_http_message_release(request); aws_input_stream_release(request_body); return s_tester_clean_up(); } /* A server MAY request that the client abort transmission of a request without error by sending a * RST_STREAM with an error code of NO_ERROR after sending a complete response. */ TEST_CASE(h2_client_stream_receive_end_stream_and_rst_before_done_sending) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* get connection preface and acks out of the way */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); /* get request ready */ struct aws_http_message *request = aws_http2_message_new_request(allocator); ASSERT_NOT_NULL(request); struct aws_http_header request_headers_src[] = { DEFINE_HEADER(":method", "POST"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/"), }; aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); /* use a stalled body-stream so our test can send the response before the request is completely sent */ const char *body_src = "hello"; struct aws_byte_cursor body_cursor = aws_byte_cursor_from_c_str(body_src); struct aws_input_stream *request_body = aws_input_stream_new_tester(allocator, body_cursor); aws_http_message_set_body_stream(request, request_body); aws_input_stream_tester_set_max_bytes_per_read(request_body, 0); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, request)); /* execute 1 event-loop tick, the HEADERS should be written * (don't drain task queue or we'll infinite loop waiting for stalled body) */ testing_channel_run_currently_queued_tasks(&s_tester.testing_channel); uint32_t stream_id = aws_http_stream_get_id(stream_tester.stream); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); ASSERT_NOT_NULL( h2_decode_tester_find_frame(&s_tester.peer.decode, AWS_H2_FRAME_T_HEADERS, 0 /*search_start_idx*/, NULL)); /* fake peer sends complete response */ struct aws_http_header response_headers_src[] = { DEFINE_HEADER(":status", "404"), }; struct aws_http_headers *response_headers = aws_http_headers_new(allocator); aws_http_headers_add_array(response_headers, response_headers_src, AWS_ARRAY_SIZE(response_headers_src)); struct aws_h2_frame *response_frame = aws_h2_frame_new_headers(allocator, stream_id, response_headers, true /*end_stream*/, 0, NULL); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, response_frame)); /* fake peer sends RST_STREAM with error-code NO_ERROR */ response_frame = aws_h2_frame_new_rst_stream(allocator, stream_id, AWS_HTTP2_ERR_NO_ERROR); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, response_frame)); /* validate the client request completes successfully */ testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_TRUE(stream_tester.complete); ASSERT_INT_EQUALS(AWS_ERROR_SUCCESS, stream_tester.on_complete_error_code); ASSERT_INT_EQUALS(404, stream_tester.response_status); /* Check no data frame received by the peer */ ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); ASSERT_NULL(h2_decode_tester_find_frame(&s_tester.peer.decode, AWS_H2_FRAME_T_DATA, 0 /*search_start_idx*/, NULL)); /* clean up */ aws_http_headers_release(response_headers); client_stream_tester_clean_up(&stream_tester); aws_http_message_release(request); aws_input_stream_release(request_body); return s_tester_clean_up(); } TEST_CASE(h2_client_stream_err_input_stream_failure) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* get connection preface and acks out of the way */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); /* get request ready */ struct aws_http_message *request = aws_http2_message_new_request(allocator); ASSERT_NOT_NULL(request); struct aws_http_header request_headers_src[] = { DEFINE_HEADER(":method", "POST"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/"), }; aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); /* use a stalled body-stream so our test can send the response before the request is completely sent */ const char *body_src = "hello"; struct aws_byte_cursor body_cursor = aws_byte_cursor_from_c_str(body_src); struct aws_input_stream *request_body = aws_input_stream_new_tester(allocator, body_cursor); aws_http_message_set_body_stream(request, request_body); aws_input_stream_tester_set_reading_broken(request_body, true /*is_broken*/); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, request)); /* validate that stream completed with error */ testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_TRUE(stream_tester.complete); ASSERT_INT_EQUALS(AWS_IO_STREAM_READ_FAILED, stream_tester.on_complete_error_code); /* a stream error should not affect the connection */ ASSERT_TRUE(aws_http_connection_is_open(s_tester.connection)); /* validate that stream sent RST_STREAM */ ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); struct h2_decoded_frame *rst_stream_frame = h2_decode_tester_latest_frame(&s_tester.peer.decode); ASSERT_INT_EQUALS(AWS_HTTP2_ERR_INTERNAL_ERROR, rst_stream_frame->error_code); /* clean up */ client_stream_tester_clean_up(&stream_tester); aws_http_message_release(request); aws_input_stream_release(request_body); return s_tester_clean_up(); } /* A request stream that receives RST_STREAM should terminate */ TEST_CASE(h2_client_stream_err_receive_rst_stream) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* fake peer sends connection preface */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* send request */ struct aws_http_message *request = aws_http2_message_new_request(allocator); ASSERT_NOT_NULL(request); struct aws_http_header request_headers_src[] = { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/"), }; aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, request)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); uint32_t stream_id = aws_http_stream_get_id(stream_tester.stream); /* fake peer sends RST_STREAM */ struct aws_h2_frame *rst_stream = aws_h2_frame_new_rst_stream(allocator, stream_id, AWS_HTTP2_ERR_HTTP_1_1_REQUIRED); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, rst_stream)); /* validate that stream completed with error */ testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_TRUE(stream_tester.complete); ASSERT_INT_EQUALS(AWS_ERROR_HTTP_RST_STREAM_RECEIVED, stream_tester.on_complete_error_code); /* a stream error should not affect the connection */ ASSERT_TRUE(aws_http_connection_is_open(s_tester.connection)); /* validate that stream did NOT send RST_STREAM */ ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); ASSERT_NULL(h2_decode_tester_find_frame(&s_tester.peer.decode, AWS_H2_FRAME_T_RST_STREAM, 0, NULL)); /* clean up */ aws_http_message_release(request); client_stream_tester_clean_up(&stream_tester); return s_tester_clean_up(); } /* We don't fully support PUSH_PROMISE, so we automatically send RST_STREAM to reject any promised streams. * Why, you ask, don't we simply send SETTINGS_ENABLE_PUSH=0 in the initial SETTINGS frame and call it a day? * Because it's theoretically possible for a server to start sending PUSH_PROMISE frames in the initial * response, before sending the ACK to the initial SETTINGS. */ TEST_CASE(h2_client_push_promise_automatically_rejected) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* fake peer sends connection preface */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* send request */ struct aws_http_message *request = aws_http2_message_new_request(allocator); ASSERT_NOT_NULL(request); struct aws_http_header request_headers_src[] = { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":authority", "veryblackpage.com"), DEFINE_HEADER(":path", "/"), }; aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, request)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); uint32_t stream_id = aws_http_stream_get_id(stream_tester.stream); /* fake peer sends push request (PUSH_PROMISE) */ struct aws_http_header push_request_headers_src[] = { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":authority", "veryblackpage.com"), DEFINE_HEADER(":path", "/style.css"), }; struct aws_http_headers *push_request_headers = aws_http_headers_new(allocator); ASSERT_SUCCESS(aws_http_headers_add_array( push_request_headers, push_request_headers_src, AWS_ARRAY_SIZE(push_request_headers_src))); uint32_t promised_stream_id = 2; struct aws_h2_frame *peer_frame = aws_h2_frame_new_push_promise(allocator, stream_id, promised_stream_id, push_request_headers, 0); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); /* fake peer sends push response RIGHT AWAY before there's any possibility of receiving RST_STREAM */ struct aws_http_header push_response_headers_src[] = { DEFINE_HEADER(":status", "200"), }; struct aws_http_headers *push_response_headers = aws_http_headers_new(allocator); ASSERT_SUCCESS(aws_http_headers_add_array( push_response_headers, push_response_headers_src, AWS_ARRAY_SIZE(push_response_headers_src))); peer_frame = aws_h2_frame_new_headers(allocator, promised_stream_id, push_response_headers, false /*end_stream*/, 0, NULL); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); ASSERT_SUCCESS(h2_fake_peer_send_data_frame_str( &s_tester.peer, promised_stream_id, "body {background-color: black;}", true /*end_stream*/)); /* fake peer sends response to the initial request */ struct aws_http_header response_headers_src[] = { DEFINE_HEADER(":status", "200"), }; struct aws_http_headers *response_headers = aws_http_headers_new(allocator); aws_http_headers_add_array(response_headers, response_headers_src, AWS_ARRAY_SIZE(response_headers_src)); peer_frame = aws_h2_frame_new_headers(allocator, stream_id, response_headers, false /*end_stream*/, 0, NULL); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); const char *body_src = ""; ASSERT_SUCCESS(h2_fake_peer_send_data_frame_str(&s_tester.peer, stream_id, body_src, true /*end_stream*/)); /* validate that stream completed successfully. */ testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_TRUE(stream_tester.complete); ASSERT_INT_EQUALS(AWS_ERROR_SUCCESS, stream_tester.on_complete_error_code); ASSERT_INT_EQUALS(200, stream_tester.response_status); ASSERT_BIN_ARRAYS_EQUALS( body_src, strlen(body_src), stream_tester.response_body.buffer, stream_tester.response_body.len); ASSERT_TRUE(aws_http_connection_is_open(s_tester.connection)); /* validate that client automatically sent RST_STREAM to reject the promised stream */ ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); struct h2_decoded_frame *client_sent_rst_stream = h2_decode_tester_find_stream_frame( &s_tester.peer.decode, AWS_H2_FRAME_T_RST_STREAM, promised_stream_id, 0, NULL); ASSERT_NOT_NULL(client_sent_rst_stream); /* clean up */ aws_http_headers_release(push_request_headers); aws_http_headers_release(push_response_headers); aws_http_headers_release(response_headers); aws_http_message_release(request); client_stream_tester_clean_up(&stream_tester); return s_tester_clean_up(); } /* Test client receives the GOAWAY frame, stop creating new stream and complete the streams whose id are higher than the * last stream id included in GOAWAY frame, and callback invoked */ TEST_CASE(h2_client_conn_receive_goaway) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* get connection preface and acks out of the way */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); /* send multiple requests */ enum { NUM_STREAMS = 3 }; struct aws_http_message *requests[NUM_STREAMS]; struct aws_http_header request_headers_src[NUM_STREAMS][3] = { { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/a.txt"), }, { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/b.txt"), }, { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/c.txt"), }, }; struct client_stream_tester stream_testers[NUM_STREAMS]; for (size_t i = 0; i < NUM_STREAMS; ++i) { requests[i] = aws_http2_message_new_request(allocator); aws_http_message_add_header_array(requests[i], request_headers_src[i], AWS_ARRAY_SIZE(request_headers_src[i])); } /* Send the first two requests */ ASSERT_SUCCESS(s_stream_tester_init(&stream_testers[0], requests[0])); ASSERT_SUCCESS(s_stream_tester_init(&stream_testers[1], requests[1])); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* fake peer send a GOAWAY frame indicating only the first request will be processed */ uint32_t stream_id = aws_http_stream_get_id(stream_testers[0].stream); struct aws_byte_cursor debug_info; AWS_ZERO_STRUCT(debug_info); struct aws_h2_frame *peer_frame = aws_h2_frame_new_goaway(allocator, stream_id, AWS_HTTP2_ERR_NO_ERROR, debug_info); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* validate the callback invoked and the information recorded during callback */ ASSERT_INT_EQUALS(s_tester.user_data.http2_error, AWS_HTTP2_ERR_NO_ERROR); ASSERT_INT_EQUALS(s_tester.user_data.last_stream_id, stream_id); /* validate the connection is still open, and the second request finished with GOAWAY_RECEIVED */ ASSERT_TRUE(aws_http_connection_is_open(s_tester.connection)); ASSERT_FALSE(stream_testers[0].complete); ASSERT_TRUE(stream_testers[1].complete); ASSERT_INT_EQUALS(AWS_ERROR_HTTP_GOAWAY_RECEIVED, stream_testers[1].on_complete_error_code); /* validate the new requst will no be accepted */ ASSERT_FAILS(s_stream_tester_init(&stream_testers[2], requests[2])); /* Try gracefully shutting down the connection */ struct aws_http_header response_headers_src[] = {DEFINE_HEADER(":status", "200")}; struct aws_http_headers *response_headers = aws_http_headers_new(allocator); aws_http_headers_add_array(response_headers, response_headers_src, AWS_ARRAY_SIZE(response_headers_src)); struct aws_h2_frame *response_frame = aws_h2_frame_new_headers( allocator, aws_http_stream_get_id(stream_testers[0].stream), response_headers, true /* end_stream */, 0, NULL); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, response_frame)); /* shutdown channel */ aws_channel_shutdown(s_tester.testing_channel.channel, AWS_ERROR_SUCCESS); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_TRUE(testing_channel_is_shutdown_completed(&s_tester.testing_channel)); /* validate the first request finishes successfully */ testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_TRUE(stream_testers[0].complete); ASSERT_INT_EQUALS(200, stream_testers[0].response_status); ASSERT_FALSE(aws_http_connection_is_open(s_tester.connection)); /* clean up */ aws_http_headers_release(response_headers); for (size_t i = 0; i < NUM_STREAMS; ++i) { client_stream_tester_clean_up(&stream_testers[i]); aws_http_message_release(requests[i]); } return s_tester_clean_up(); } /* Test client receives the GOAWAY frame with the debug data correctly */ TEST_CASE(h2_client_conn_receive_goaway_debug_data) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* get connection preface and acks out of the way */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); /* fake peer send a GOAWAY frame indicating only the first request will be processed */ uint32_t stream_id = 1; const char debug_string[] = "Error, Core Dump 0XFFFFFFFF"; struct aws_byte_cursor debug_info = aws_byte_cursor_from_c_str(debug_string); struct aws_h2_frame *peer_frame = aws_h2_frame_new_goaway(allocator, stream_id, AWS_HTTP2_ERR_NO_ERROR, debug_info); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* validate the callback invoked and the information recorded during callback */ ASSERT_INT_EQUALS(s_tester.user_data.http2_error, AWS_HTTP2_ERR_NO_ERROR); ASSERT_INT_EQUALS(s_tester.user_data.last_stream_id, stream_id); ASSERT_TRUE(aws_byte_buf_eq_c_str(&s_tester.user_data.debug_data, debug_string)); return s_tester_clean_up(); } /* Test client receives the GOAWAY frame with invalid last stream id and connection error happened, and callback will * not be invoked for the invalid GOAWAY frame */ TEST_CASE(h2_client_conn_err_invalid_last_stream_id_goaway) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* get connection preface and acks out of the way */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); /* fake peer send multiple GOAWAY frames */ struct aws_byte_cursor debug_info; AWS_ZERO_STRUCT(debug_info); /* First on with last_stream_id as AWS_H2_STREAM_ID_MAX */ struct aws_h2_frame *peer_frame = aws_h2_frame_new_goaway(allocator, AWS_H2_STREAM_ID_MAX, AWS_HTTP2_ERR_NO_ERROR, debug_info); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); /* validate the callback invoked and the information recorded during callback */ ASSERT_INT_EQUALS(s_tester.user_data.http2_error, AWS_HTTP2_ERR_NO_ERROR); ASSERT_INT_EQUALS(s_tester.user_data.last_stream_id, AWS_H2_STREAM_ID_MAX); int last_stream_id = 1; /* Second one with last_stream_id as 1 and some error */ peer_frame = aws_h2_frame_new_goaway(allocator, last_stream_id, AWS_HTTP2_ERR_FLOW_CONTROL_ERROR, debug_info); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_INT_EQUALS(s_tester.user_data.http2_error, AWS_HTTP2_ERR_FLOW_CONTROL_ERROR); ASSERT_INT_EQUALS(s_tester.user_data.last_stream_id, last_stream_id); /* validate the connection is still open, everything is fine */ ASSERT_TRUE(aws_http_connection_is_open(s_tester.connection)); /* Another GOAWAY with higher last stream id will cause connection closed with an error */ peer_frame = aws_h2_frame_new_goaway(allocator, last_stream_id + 1, AWS_HTTP2_ERR_FLOW_CONTROL_ERROR, debug_info); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* validate the callback is not invoked and the information is still the same as the second one */ ASSERT_INT_EQUALS(s_tester.user_data.http2_error, AWS_HTTP2_ERR_FLOW_CONTROL_ERROR); ASSERT_INT_EQUALS(s_tester.user_data.last_stream_id, last_stream_id); ASSERT_FALSE(aws_http_connection_is_open(s_tester.connection)); ASSERT_INT_EQUALS( AWS_ERROR_HTTP_PROTOCOL_ERROR, testing_channel_get_shutdown_error_code(&s_tester.testing_channel)); /* clean up */ return s_tester_clean_up(); } static void s_on_completed(struct aws_http_connection *connection, int error_code, void *user_data) { (void)connection; int *callback_error_code = user_data; *callback_error_code = error_code; } /* Test the user API for changing HTTP/2 connection settings */ TEST_CASE(h2_client_change_settings_succeed) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* client sent the preface and first settings */ ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); struct h2_decoded_frame *first_written_frame = h2_decode_tester_get_frame(&s_tester.peer.decode, 0); ASSERT_UINT_EQUALS(AWS_H2_FRAME_T_SETTINGS, first_written_frame->type); ASSERT_FALSE(first_written_frame->ack); /* We disabled the push_promise at the initial setting, let's use user API to enable it. */ /* Use user API to change HTTP/2 connection settings */ struct aws_http2_setting settings_array[] = { {.id = AWS_HTTP2_SETTINGS_ENABLE_PUSH, .value = 1}, }; int callback_error_code = INT32_MAX; ASSERT_SUCCESS(aws_http2_connection_change_settings( s_tester.connection, settings_array, AWS_ARRAY_SIZE(settings_array), s_on_completed, &callback_error_code)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* check the settings frame is sent */ ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); struct h2_decoded_frame *second_frame = h2_decode_tester_get_frame(&s_tester.peer.decode, 1); ASSERT_UINT_EQUALS(AWS_H2_FRAME_T_SETTINGS, second_frame->type); ASSERT_FALSE(second_frame->ack); ASSERT_INT_EQUALS(1, second_frame->settings.length); struct aws_http2_setting setting_received; aws_array_list_front(&second_frame->settings, &setting_received); ASSERT_INT_EQUALS(AWS_HTTP2_SETTINGS_ENABLE_PUSH, setting_received.id); ASSERT_INT_EQUALS(1, setting_received.value); /* fake peer sends connection preface */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); /* fake peer sends two settings ack back, one for the initial settings, one for the user settings we just sent */ struct aws_h2_frame *peer_frame = aws_h2_frame_new_settings(allocator, NULL, 0, true); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* Check the callback has NOT fired after the first settings ack frame, the user_data has not changed */ ASSERT_INT_EQUALS(INT32_MAX, callback_error_code); peer_frame = aws_h2_frame_new_settings(allocator, NULL, 0, true); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); /* Check the callback has fired after the second settings ack frame, the error code we got is NO_ERROR(0) */ ASSERT_INT_EQUALS(0, callback_error_code); /* Check empty settings can be sent */ callback_error_code = INT32_MAX; ASSERT_SUCCESS( aws_http2_connection_change_settings(s_tester.connection, NULL, 0, s_on_completed, &callback_error_code)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* check the empty settings frame is sent */ ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); struct h2_decoded_frame *second_settings = h2_decode_tester_latest_frame(&s_tester.peer.decode); ASSERT_UINT_EQUALS(AWS_H2_FRAME_T_SETTINGS, second_settings->type); ASSERT_FALSE(second_settings->ack); ASSERT_INT_EQUALS(0, second_settings->settings.length); peer_frame = aws_h2_frame_new_settings(allocator, NULL, 0, true); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); /* Check the callback has fired after the second settings ack frame, the error code we got is NO_ERROR(0) */ ASSERT_INT_EQUALS(0, callback_error_code); struct aws_http_message *request = aws_http2_message_new_request(allocator); ASSERT_NOT_NULL(request); struct aws_http_header request_headers_src[] = { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/"), }; aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, request)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); uint32_t stream_id = aws_http_stream_get_id(stream_tester.stream); /* fake peer sends push request (PUSH_PROMISE) */ struct aws_http_header push_request_headers_src[] = { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":authority", "veryblackpage.com"), DEFINE_HEADER(":path", "/style.css"), }; struct aws_http_headers *push_request_headers = aws_http_headers_new(allocator); ASSERT_SUCCESS(aws_http_headers_add_array( push_request_headers, push_request_headers_src, AWS_ARRAY_SIZE(push_request_headers_src))); uint32_t promised_stream_id = 2; peer_frame = aws_h2_frame_new_push_promise(allocator, stream_id, promised_stream_id, push_request_headers, 0); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* validate the connection is still open */ ASSERT_TRUE(aws_http_connection_is_open(s_tester.connection)); /* clean up */ aws_http_headers_release(push_request_headers); aws_http_message_release(request); client_stream_tester_clean_up(&stream_tester); return s_tester_clean_up(); } /* Test the user API for changing HTTP/2 connection settings and no settings ACK received from peer */ TEST_CASE(h2_client_change_settings_failed_no_ack_received) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* client sent the preface and first settings */ ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); struct h2_decoded_frame *first_written_frame = h2_decode_tester_get_frame(&s_tester.peer.decode, 0); ASSERT_UINT_EQUALS(AWS_H2_FRAME_T_SETTINGS, first_written_frame->type); ASSERT_FALSE(first_written_frame->ack); /* request changing setting */ struct aws_http2_setting settings_array[] = { {.id = AWS_HTTP2_SETTINGS_ENABLE_PUSH, .value = 1}, }; int callback_error_code = INT32_MAX; ASSERT_SUCCESS(aws_http2_connection_change_settings( s_tester.connection, settings_array, AWS_ARRAY_SIZE(settings_array), s_on_completed, &callback_error_code)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* fake peer sends connection preface */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); /* fake peer sends one settings ack back the initial settings */ struct aws_h2_frame *peer_frame = aws_h2_frame_new_settings(allocator, NULL, 0, true); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* Check the callback has NOT fired after the first settings ack frame, the user_data has not changed */ ASSERT_INT_EQUALS(INT32_MAX, callback_error_code); /* shutdown the connection */ h2_fake_peer_clean_up(&s_tester.peer); aws_http_connection_release(s_tester.connection); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* Check the callback has fired with error, after connection shutdown */ ASSERT_INT_EQUALS(AWS_ERROR_HTTP_CONNECTION_CLOSED, callback_error_code); ASSERT_SUCCESS(testing_channel_clean_up(&s_tester.testing_channel)); /* clean up */ aws_http_library_clean_up(); return AWS_OP_SUCCESS; } /* Test manual window management for stream successfully disabled the automatically window update */ TEST_CASE(h2_client_manual_window_management_disabled_auto_window_update) { ASSERT_SUCCESS(s_manual_window_management_tester_init(allocator, false /*conn*/, true /*stream*/, ctx)); /* fake peer sends connection preface */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); size_t window_size = 10; /* change the settings of the initial window size for new stream flow-control window */ struct aws_http2_setting settings_array[] = { {.id = AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE, .value = (uint32_t)window_size}, }; ASSERT_SUCCESS(aws_http2_connection_change_settings( s_tester.connection, settings_array, AWS_ARRAY_SIZE(settings_array), NULL /*callback function*/, NULL /*user_data*/)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* fake peer sends two settings ack back, one for the initial settings, one for the user settings we just sent */ struct aws_h2_frame *peer_frame = aws_h2_frame_new_settings(allocator, NULL, 0, true); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); peer_frame = aws_h2_frame_new_settings(allocator, NULL, 0, true); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* send request */ struct aws_http_message *request = aws_http2_message_new_request(allocator); ASSERT_NOT_NULL(request); struct aws_http_header request_headers_src[] = { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/"), }; aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, request)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); uint32_t stream_id = aws_http_stream_get_id(stream_tester.stream); /* fake peer sends response headers */ struct aws_http_header response_headers_src[] = { DEFINE_HEADER(":status", "200"), }; struct aws_http_headers *response_headers = aws_http_headers_new(allocator); aws_http_headers_add_array(response_headers, response_headers_src, AWS_ARRAY_SIZE(response_headers_src)); struct aws_h2_frame *response_frame = aws_h2_frame_new_headers(allocator, stream_id, response_headers, false /*end_stream*/, 0, NULL); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, response_frame)); struct aws_byte_buf response_body_bufs; /* fake peer sends a DATA frame take all the window size we have */ ASSERT_SUCCESS(aws_byte_buf_init(&response_body_bufs, allocator, window_size)); ASSERT_TRUE(aws_byte_buf_write_u8_n(&response_body_bufs, (uint8_t)'a', window_size)); struct aws_byte_cursor body_cursor = aws_byte_cursor_from_buf(&response_body_bufs); ASSERT_SUCCESS(h2_fake_peer_send_data_frame(&s_tester.peer, stream_id, body_cursor, false /*end_stream*/)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* validate no window_update for stream frame sent automatically */ ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); ASSERT_NULL(h2_decode_tester_find_stream_frame( &s_tester.peer.decode, AWS_H2_FRAME_T_WINDOW_UPDATE, stream_id, 0 /*idx*/, NULL)); /* validate that stream is still open */ ASSERT_FALSE(stream_tester.complete); /* peer send another flow-controlled frame will result in stream flow control error */ ASSERT_SUCCESS(h2_fake_peer_send_data_frame(&s_tester.peer, stream_id, body_cursor, true /*end_stream*/)); /* validate that stream completed with error */ testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_TRUE(stream_tester.complete); ASSERT_INT_EQUALS(AWS_ERROR_HTTP_PROTOCOL_ERROR, stream_tester.on_complete_error_code); /* a stream error should not affect the connection */ ASSERT_TRUE(aws_http_connection_is_open(s_tester.connection)); /* validate that stream sent RST_STREAM with AWS_HTTP2_ERR_FLOW_CONTROL_ERROR */ ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); struct h2_decoded_frame *rst_stream_frame = h2_decode_tester_find_stream_frame(&s_tester.peer.decode, AWS_H2_FRAME_T_RST_STREAM, stream_id, 0, NULL); ASSERT_UINT_EQUALS(AWS_HTTP2_ERR_FLOW_CONTROL_ERROR, rst_stream_frame->error_code); /* clean up */ aws_byte_buf_clean_up(&response_body_bufs); aws_http_headers_release(response_headers); aws_http_message_release(request); client_stream_tester_clean_up(&stream_tester); return s_tester_clean_up(); } TEST_CASE(h2_client_manual_window_management_user_send_stream_window_update) { ASSERT_SUCCESS(s_manual_window_management_tester_init(allocator, false /*conn*/, true /*stream*/, ctx)); /* fake peer sends connection preface */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); size_t window_size = 10; /* change the settings of the initial window size for new stream flow-control window */ struct aws_http2_setting settings_array[] = { {.id = AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE, .value = (uint32_t)window_size}, }; ASSERT_SUCCESS(aws_http2_connection_change_settings( s_tester.connection, settings_array, AWS_ARRAY_SIZE(settings_array), NULL /*callback function*/, NULL /*user_data*/)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* fake peer sends two settings ack back, one for the initial settings, one for the user settings we just sent */ struct aws_h2_frame *peer_frame = aws_h2_frame_new_settings(allocator, NULL, 0, true); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); peer_frame = aws_h2_frame_new_settings(allocator, NULL, 0, true); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* send request */ struct aws_http_message *request = aws_http2_message_new_request(allocator); ASSERT_NOT_NULL(request); struct aws_http_header request_headers_src[] = { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/"), }; aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, request)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); uint32_t stream_id = aws_http_stream_get_id(stream_tester.stream); /* fake peer sends response headers */ struct aws_http_header response_headers_src[] = { DEFINE_HEADER(":status", "200"), }; struct aws_http_headers *response_headers = aws_http_headers_new(allocator); aws_http_headers_add_array(response_headers, response_headers_src, AWS_ARRAY_SIZE(response_headers_src)); struct aws_h2_frame *response_frame = aws_h2_frame_new_headers(allocator, stream_id, response_headers, false /*end_stream*/, 0, NULL); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, response_frame)); struct aws_byte_buf response_body_bufs; /* fake peer sends a DATA frame take all the window size we have */ ASSERT_SUCCESS(aws_byte_buf_init(&response_body_bufs, allocator, window_size)); ASSERT_TRUE(aws_byte_buf_write_u8_n(&response_body_bufs, (uint8_t)'a', window_size)); struct aws_byte_cursor body_cursor = aws_byte_cursor_from_buf(&response_body_bufs); ASSERT_SUCCESS(h2_fake_peer_send_data_frame(&s_tester.peer, stream_id, body_cursor, false /*end_stream*/)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* validate no window_update frame for stream sent automatically */ ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); ASSERT_NULL(h2_decode_tester_find_stream_frame( &s_tester.peer.decode, AWS_H2_FRAME_T_WINDOW_UPDATE, stream_id, 0 /*idx*/, NULL)); /* call API to update the stream window */ aws_http_stream_update_window(stream_tester.stream, window_size); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* validate stream window_update frame was sent */ ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); struct h2_decoded_frame *stream_window_update_frame = h2_decode_tester_find_stream_frame( &s_tester.peer.decode, AWS_H2_FRAME_T_WINDOW_UPDATE, stream_id, 0 /*idx*/, NULL); ASSERT_NOT_NULL(stream_window_update_frame); ASSERT_UINT_EQUALS(window_size, stream_window_update_frame->window_size_increment); /* validate that stream is still open */ ASSERT_FALSE(stream_tester.complete); /* peer send another flow-controlled frame will success */ ASSERT_SUCCESS(h2_fake_peer_send_data_frame(&s_tester.peer, stream_id, body_cursor, true /*end_stream*/)); /* validate that stream received complete response */ struct aws_byte_buf expected_body; ASSERT_SUCCESS(aws_byte_buf_init(&expected_body, allocator, 2 * window_size)); ASSERT_TRUE(aws_byte_buf_write_u8_n(&expected_body, (uint8_t)'a', 2 * window_size)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_TRUE(stream_tester.complete); ASSERT_INT_EQUALS(AWS_ERROR_SUCCESS, stream_tester.on_complete_error_code); ASSERT_INT_EQUALS(200, stream_tester.response_status); ASSERT_SUCCESS(s_compare_headers(response_headers, stream_tester.response_headers)); ASSERT_TRUE(aws_byte_buf_eq(&stream_tester.response_body, &expected_body)); ASSERT_TRUE(aws_http_connection_is_open(s_tester.connection)); /* clean up */ aws_byte_buf_clean_up(&response_body_bufs); aws_byte_buf_clean_up(&expected_body); aws_http_headers_release(response_headers); aws_http_message_release(request); client_stream_tester_clean_up(&stream_tester); return s_tester_clean_up(); } TEST_CASE(h2_client_manual_window_management_user_send_stream_window_update_with_padding) { ASSERT_SUCCESS(s_manual_window_management_tester_init(allocator, false /*conn*/, true /*stream*/, ctx)); /* fake peer sends connection preface */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); size_t window_size = 20; size_t padding_length = 10; size_t data_length = window_size - padding_length - 1; /* change the settings of the initial window size for new stream flow-control window */ struct aws_http2_setting settings_array[] = { {.id = AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE, .value = (uint32_t)window_size}, }; ASSERT_SUCCESS(aws_http2_connection_change_settings( s_tester.connection, settings_array, AWS_ARRAY_SIZE(settings_array), NULL /*callback function*/, NULL /*user_data*/)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* fake peer sends two settings ack back, one for the initial settings, one for the user settings we just sent */ struct aws_h2_frame *peer_frame = aws_h2_frame_new_settings(allocator, NULL, 0, true); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); peer_frame = aws_h2_frame_new_settings(allocator, NULL, 0, true); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* send request */ struct aws_http_message *request = aws_http2_message_new_request(allocator); ASSERT_NOT_NULL(request); struct aws_http_header request_headers_src[] = { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/"), }; aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, request)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); uint32_t stream_id = aws_http_stream_get_id(stream_tester.stream); /* fake peer sends response headers */ struct aws_http_header response_headers_src[] = { DEFINE_HEADER(":status", "200"), }; struct aws_http_headers *response_headers = aws_http_headers_new(allocator); aws_http_headers_add_array(response_headers, response_headers_src, AWS_ARRAY_SIZE(response_headers_src)); struct aws_h2_frame *response_frame = aws_h2_frame_new_headers(allocator, stream_id, response_headers, false /*end_stream*/, 0, NULL); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, response_frame)); struct aws_byte_buf response_body_bufs; /* fake peer sends a DATA frame take all the window size we have */ ASSERT_SUCCESS(aws_byte_buf_init(&response_body_bufs, allocator, data_length)); ASSERT_TRUE(aws_byte_buf_write_u8_n(&response_body_bufs, (uint8_t)'a', data_length)); struct aws_byte_cursor body_cursor = aws_byte_cursor_from_buf(&response_body_bufs); ASSERT_SUCCESS(h2_fake_peer_send_data_frame_with_padding_length( &s_tester.peer, stream_id, body_cursor, false /*end_stream*/, (uint8_t)padding_length)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* validate no window_update frame for stream sent automatically */ ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); /* padding and padding length should be updated automatically */ size_t end_index = 0; struct h2_decoded_frame *stream_window_update_frame = h2_decode_tester_find_stream_frame( &s_tester.peer.decode, AWS_H2_FRAME_T_WINDOW_UPDATE, stream_id, 0 /*idx*/, &end_index); ASSERT_NOT_NULL(stream_window_update_frame); ASSERT_UINT_EQUALS( padding_length + 1 /*one byte for padding length*/, stream_window_update_frame->window_size_increment); /* call API to update the stream window */ aws_http_stream_update_window(stream_tester.stream, data_length); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* validate stream window_update frame from user was sent */ ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); stream_window_update_frame = h2_decode_tester_find_stream_frame( &s_tester.peer.decode, AWS_H2_FRAME_T_WINDOW_UPDATE, stream_id, end_index + 1, NULL); ASSERT_NOT_NULL(stream_window_update_frame); ASSERT_UINT_EQUALS(data_length, stream_window_update_frame->window_size_increment); /* validate that stream is still open */ ASSERT_FALSE(stream_tester.complete); /* peer send another flow-controlled frame will success */ ASSERT_SUCCESS(h2_fake_peer_send_data_frame(&s_tester.peer, stream_id, body_cursor, true /*end_stream*/)); /* validate that stream received complete response */ struct aws_byte_buf expected_body; ASSERT_SUCCESS(aws_byte_buf_init(&expected_body, allocator, 2 * data_length)); ASSERT_TRUE(aws_byte_buf_write_u8_n(&expected_body, (uint8_t)'a', 2 * data_length)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_TRUE(stream_tester.complete); ASSERT_INT_EQUALS(AWS_ERROR_SUCCESS, stream_tester.on_complete_error_code); ASSERT_INT_EQUALS(200, stream_tester.response_status); ASSERT_SUCCESS(s_compare_headers(response_headers, stream_tester.response_headers)); ASSERT_TRUE(aws_byte_buf_eq(&stream_tester.response_body, &expected_body)); ASSERT_TRUE(aws_http_connection_is_open(s_tester.connection)); /* clean up */ aws_byte_buf_clean_up(&response_body_bufs); aws_byte_buf_clean_up(&expected_body); aws_http_headers_release(response_headers); aws_http_message_release(request); client_stream_tester_clean_up(&stream_tester); return s_tester_clean_up(); } TEST_CASE(h2_client_manual_window_management_user_send_stream_window_update_overflow) { ASSERT_SUCCESS(s_manual_window_management_tester_init(allocator, false /*conn*/, true /*stream*/, ctx)); /* fake peer sends connection preface */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* send request */ struct aws_http_message *request = aws_http2_message_new_request(allocator); ASSERT_NOT_NULL(request); struct aws_http_header request_headers_src[] = { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/"), }; aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, request)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* call API to update the stream window and cause a overflow */ aws_http_stream_update_window(stream_tester.stream, INT32_MAX); aws_http_stream_update_window(stream_tester.stream, INT32_MAX); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* validate that stream completed with error */ ASSERT_TRUE(aws_http_connection_is_open(s_tester.connection)); ASSERT_TRUE(stream_tester.complete); /* overflow happens */ ASSERT_INT_EQUALS(AWS_ERROR_OVERFLOW_DETECTED, stream_tester.on_complete_error_code); /* validate that stream sent RST_STREAM */ ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); struct h2_decoded_frame *rst_stream_frame = h2_decode_tester_find_frame(&s_tester.peer.decode, AWS_H2_FRAME_T_RST_STREAM, 0, NULL); /* But the error code is not the same as user was trying to send */ ASSERT_UINT_EQUALS(AWS_HTTP2_ERR_INTERNAL_ERROR, rst_stream_frame->error_code); /* clean up */ aws_http_message_release(request); client_stream_tester_clean_up(&stream_tester); return s_tester_clean_up(); } /* Peer sends a flow-controlled frame when the connection window-size is not enough for it will result in connection * flow-control error */ TEST_CASE(h2_client_manual_window_management_user_send_conn_window_update) { ASSERT_SUCCESS(s_manual_window_management_tester_init(allocator, true /*conn*/, false /*stream*/, ctx)); /* get connection preface and acks out of the way */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); /* send request */ struct aws_http_message *request = aws_http2_message_new_request(allocator); ASSERT_NOT_NULL(request); struct aws_http_header request_headers_src[] = { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/"), }; aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, request)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); uint32_t stream_id = aws_http_stream_get_id(stream_tester.stream); /* fake peer sends response headers */ struct aws_http_header response_headers_src[] = { DEFINE_HEADER(":status", "200"), }; struct aws_http_headers *response_headers = aws_http_headers_new(allocator); aws_http_headers_add_array(response_headers, response_headers_src, AWS_ARRAY_SIZE(response_headers_src)); struct aws_h2_frame *response_frame = aws_h2_frame_new_headers(allocator, stream_id, response_headers, false /*end_stream*/, 0, NULL); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, response_frame)); struct aws_byte_buf response_body_bufs; /* The max body size here is limited. So we need to send multiple bodies to get the flow-control error */ size_t body_size = aws_max_size(aws_h2_settings_initial[AWS_HTTP2_SETTINGS_MAX_FRAME_SIZE], g_aws_channel_max_fragment_size) - AWS_H2_FRAME_PREFIX_SIZE; /* fake peer sends a DATA frame larger than the window size we have */ ASSERT_SUCCESS(aws_byte_buf_init(&response_body_bufs, allocator, body_size)); ASSERT_TRUE(aws_byte_buf_write_u8_n(&response_body_bufs, (uint8_t)'a', body_size)); struct aws_byte_cursor body_cursor = aws_byte_cursor_from_buf(&response_body_bufs); /* number of bodies peer will send, just to ensure the connection flow-control window will not be blocked when we * manually update it */ size_t body_number = 2 * aws_h2_settings_initial[AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE] / body_size; for (size_t i = 0; i < body_number; i++) { if (i == body_number - 1) { ASSERT_SUCCESS(h2_fake_peer_send_data_frame(&s_tester.peer, stream_id, body_cursor, true /*end_stream*/)); } else { ASSERT_SUCCESS(h2_fake_peer_send_data_frame(&s_tester.peer, stream_id, body_cursor, false /*end_stream*/)); } /* manually update the stream and connection flow-control window. */ aws_http_stream_update_window(stream_tester.stream, body_size); aws_http2_connection_update_window(s_tester.connection, (uint32_t)body_size); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); struct h2_decoded_frame *stream_window_update_frame = h2_decode_tester_find_stream_frame( &s_tester.peer.decode, AWS_H2_FRAME_T_WINDOW_UPDATE, stream_id, 0 /*idx*/, NULL); ASSERT_NOT_NULL(stream_window_update_frame); ASSERT_UINT_EQUALS(body_size, stream_window_update_frame->window_size_increment); struct h2_decoded_frame *connection_window_update_frame = h2_decode_tester_find_stream_frame( &s_tester.peer.decode, AWS_H2_FRAME_T_WINDOW_UPDATE, 0 /*stream_id*/, 0 /*idx*/, NULL); ASSERT_NOT_NULL(connection_window_update_frame); ASSERT_UINT_EQUALS(body_size, connection_window_update_frame->window_size_increment); } ASSERT_TRUE(aws_http_connection_is_open(s_tester.connection)); /* validate that stream received complete response */ struct aws_byte_buf expected_body; ASSERT_SUCCESS(aws_byte_buf_init(&expected_body, allocator, body_number * body_size)); ASSERT_TRUE(aws_byte_buf_write_u8_n(&expected_body, (uint8_t)'a', body_number * body_size)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_TRUE(stream_tester.complete); ASSERT_INT_EQUALS(AWS_ERROR_SUCCESS, stream_tester.on_complete_error_code); ASSERT_INT_EQUALS(200, stream_tester.response_status); ASSERT_SUCCESS(s_compare_headers(response_headers, stream_tester.response_headers)); ASSERT_TRUE(aws_byte_buf_eq(&stream_tester.response_body, &expected_body)); ASSERT_TRUE(aws_http_connection_is_open(s_tester.connection)); /* clean up */ aws_byte_buf_clean_up(&response_body_bufs); aws_byte_buf_clean_up(&expected_body); aws_http_headers_release(response_headers); aws_http_message_release(request); client_stream_tester_clean_up(&stream_tester); return s_tester_clean_up(); } TEST_CASE(h2_client_manual_window_management_user_send_conn_window_update_with_padding) { ASSERT_SUCCESS(s_manual_window_management_tester_init(allocator, true /*conn*/, false /*stream*/, ctx)); /* get connection preface and acks out of the way */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); /* send request */ struct aws_http_message *request = aws_http2_message_new_request(allocator); ASSERT_NOT_NULL(request); struct aws_http_header request_headers_src[] = { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/"), }; aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, request)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); uint32_t stream_id = aws_http_stream_get_id(stream_tester.stream); /* fake peer sends response headers */ struct aws_http_header response_headers_src[] = { DEFINE_HEADER(":status", "200"), }; struct aws_http_headers *response_headers = aws_http_headers_new(allocator); aws_http_headers_add_array(response_headers, response_headers_src, AWS_ARRAY_SIZE(response_headers_src)); struct aws_h2_frame *response_frame = aws_h2_frame_new_headers(allocator, stream_id, response_headers, false /*end_stream*/, 0, NULL); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, response_frame)); struct aws_byte_buf response_body_bufs; /* The max body size here is limited. So we need to send multiple bodies to get the flow-control error */ size_t padding_size = 10; size_t body_size = aws_max_size(aws_h2_settings_initial[AWS_HTTP2_SETTINGS_MAX_FRAME_SIZE], g_aws_channel_max_fragment_size) - AWS_H2_FRAME_PREFIX_SIZE - padding_size - 1; /* fake peer sends a DATA frame larger than the window size we have */ ASSERT_SUCCESS(aws_byte_buf_init(&response_body_bufs, allocator, body_size)); ASSERT_TRUE(aws_byte_buf_write_u8_n(&response_body_bufs, (uint8_t)'a', body_size)); struct aws_byte_cursor body_cursor = aws_byte_cursor_from_buf(&response_body_bufs); /* number of bodies peer will send, just to ensure the connection flow-control window will not be blocked when we * manually update it */ size_t body_number = 2 * aws_h2_settings_initial[AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE] / body_size; for (size_t i = 0; i < body_number; i++) { if (i == body_number - 1) { ASSERT_SUCCESS(h2_fake_peer_send_data_frame_with_padding_length( &s_tester.peer, stream_id, body_cursor, true /*end_stream*/, (uint8_t)padding_size)); } else { ASSERT_SUCCESS(h2_fake_peer_send_data_frame_with_padding_length( &s_tester.peer, stream_id, body_cursor, false /*end_stream*/, (uint8_t)padding_size)); } testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); size_t out_index = 0; /* The update for padding and padding length should be sent */ struct h2_decoded_frame *connection_window_update_frame = h2_decode_tester_find_stream_frame( &s_tester.peer.decode, AWS_H2_FRAME_T_WINDOW_UPDATE, 0 /*stream_id*/, 0 /*idx*/, &out_index); ASSERT_NOT_NULL(connection_window_update_frame); ASSERT_UINT_EQUALS( padding_size + 1 /* one byte for padding length */, connection_window_update_frame->window_size_increment); /* manually update the stream and connection flow-control window. */ aws_http_stream_update_window(stream_tester.stream, body_size); aws_http2_connection_update_window(s_tester.connection, (uint32_t)body_size); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); connection_window_update_frame = h2_decode_tester_find_stream_frame( &s_tester.peer.decode, AWS_H2_FRAME_T_WINDOW_UPDATE, 0 /*stream_id*/, out_index + 1 /*idx*/, &out_index); ASSERT_NOT_NULL(connection_window_update_frame); ASSERT_UINT_EQUALS(body_size, connection_window_update_frame->window_size_increment); } ASSERT_TRUE(aws_http_connection_is_open(s_tester.connection)); /* validate that stream received complete response */ struct aws_byte_buf expected_body; ASSERT_SUCCESS(aws_byte_buf_init(&expected_body, allocator, body_number * body_size)); ASSERT_TRUE(aws_byte_buf_write_u8_n(&expected_body, (uint8_t)'a', body_number * body_size)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_TRUE(stream_tester.complete); ASSERT_INT_EQUALS(AWS_ERROR_SUCCESS, stream_tester.on_complete_error_code); ASSERT_INT_EQUALS(200, stream_tester.response_status); ASSERT_SUCCESS(s_compare_headers(response_headers, stream_tester.response_headers)); ASSERT_TRUE(aws_byte_buf_eq(&stream_tester.response_body, &expected_body)); ASSERT_TRUE(aws_http_connection_is_open(s_tester.connection)); /* clean up */ aws_byte_buf_clean_up(&response_body_bufs); aws_byte_buf_clean_up(&expected_body); aws_http_headers_release(response_headers); aws_http_message_release(request); client_stream_tester_clean_up(&stream_tester); return s_tester_clean_up(); } TEST_CASE(h2_client_manual_window_management_user_send_connection_window_update_overflow) { ASSERT_SUCCESS(s_manual_window_management_tester_init(allocator, true /*conn*/, false /*stream*/, ctx)); /* fake peer sends connection preface */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* update the connection window to cause an overflow */ aws_http2_connection_update_window(s_tester.connection, INT32_MAX); aws_http2_connection_update_window(s_tester.connection, INT32_MAX); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* validate that connection closed with error */ ASSERT_FALSE(aws_http_connection_is_open(s_tester.connection)); /* client should send GOAWAY */ ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); struct h2_decoded_frame *goaway = h2_decode_tester_find_frame(&s_tester.peer.decode, AWS_H2_FRAME_T_GOAWAY, 0, NULL); ASSERT_NOT_NULL(goaway); ASSERT_UINT_EQUALS(AWS_HTTP2_ERR_INTERNAL_ERROR, goaway->error_code); ASSERT_UINT_EQUALS(0, goaway->goaway_last_stream_id); /* clean up */ return s_tester_clean_up(); } struct ping_user_data { uint64_t rtt_ns; int error_code; }; static void on_ping_complete( struct aws_http_connection *connection, uint64_t round_trip_time_ns, int error_code, void *user_data) { (void)connection; struct ping_user_data *data = user_data; data->error_code = error_code; data->rtt_ns = round_trip_time_ns; } /* Test the user API for PING successfully get the round trip time */ TEST_CASE(h2_client_send_ping_successfully_receive_ack) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* get connection preface and acks out of the way */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); struct aws_byte_cursor opaque_data = aws_byte_cursor_from_c_str("12345678"); struct ping_user_data data = {.rtt_ns = 0, .error_code = INT32_MAX}; /* client request a PING */ ASSERT_SUCCESS(aws_http2_connection_ping(s_tester.connection, &opaque_data, on_ping_complete, &data)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* check ping frame received */ ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); struct h2_decoded_frame *ping_frame = h2_decode_tester_find_frame(&s_tester.peer.decode, AWS_H2_FRAME_T_PING, 0, NULL); ASSERT_BIN_ARRAYS_EQUALS( opaque_data.ptr, AWS_HTTP2_PING_DATA_SIZE, ping_frame->ping_opaque_data, AWS_HTTP2_PING_DATA_SIZE); ASSERT_FALSE(ping_frame->ack); /* fake peer send PING ACK */ struct aws_h2_frame *peer_frame = aws_h2_frame_new_ping(allocator, true /*ACK*/, ping_frame->ping_opaque_data); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* check callback fired, and succeed */ ASSERT_INT_EQUALS(0, data.error_code); ASSERT_FALSE(data.rtt_ns == 0); /* clean up */ return s_tester_clean_up(); } /* Test the user request a PING, but peer never sends PING ACK back */ TEST_CASE(h2_client_send_ping_no_ack_received) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* get connection preface and acks out of the way */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); struct ping_user_data data = {.rtt_ns = 0, .error_code = INT32_MAX}; /* client request a PING */ ASSERT_SUCCESS(aws_http2_connection_ping(s_tester.connection, NULL, on_ping_complete, &data)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* check ping frame received */ ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); struct h2_decoded_frame *ping_frame = h2_decode_tester_find_frame(&s_tester.peer.decode, AWS_H2_FRAME_T_PING, 0, NULL); uint8_t opaque_data[AWS_HTTP2_PING_DATA_SIZE]; AWS_ZERO_ARRAY(opaque_data); /* Zeroed 8 bytes data received */ ASSERT_BIN_ARRAYS_EQUALS( opaque_data, AWS_HTTP2_PING_DATA_SIZE, ping_frame->ping_opaque_data, AWS_HTTP2_PING_DATA_SIZE); ASSERT_FALSE(ping_frame->ack); /* shutdown the connection */ h2_fake_peer_clean_up(&s_tester.peer); aws_http_connection_release(s_tester.connection); ASSERT_SUCCESS(testing_channel_clean_up(&s_tester.testing_channel)); /* Check the callback has fired with error */ ASSERT_INT_EQUALS(AWS_ERROR_HTTP_CONNECTION_CLOSED, data.error_code); ASSERT_TRUE(data.rtt_ns == 0); /* clean up */ aws_http_library_clean_up(); return AWS_OP_SUCCESS; } /* Test the user request a PING, but peer sends an extra PING ACK */ TEST_CASE(h2_client_conn_err_extraneous_ping_ack_received) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* get connection preface and acks out of the way */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); struct aws_byte_cursor opaque_data = aws_byte_cursor_from_c_str("12345678"); /* client request a PING */ ASSERT_SUCCESS(aws_http2_connection_ping(s_tester.connection, &opaque_data, NULL, NULL)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); struct aws_h2_frame *peer_frame = aws_h2_frame_new_ping(allocator, true /*ACK*/, opaque_data.ptr); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); /* fake peer send an extra PING ACK */ peer_frame = aws_h2_frame_new_ping(allocator, true /*ACK*/, opaque_data.ptr); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* validate the connection completed with error */ ASSERT_FALSE(aws_http_connection_is_open(s_tester.connection)); ASSERT_INT_EQUALS( AWS_ERROR_HTTP_PROTOCOL_ERROR, testing_channel_get_shutdown_error_code(&s_tester.testing_channel)); /* client should send GOAWAY */ ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); struct h2_decoded_frame *goaway = h2_decode_tester_find_frame(&s_tester.peer.decode, AWS_H2_FRAME_T_GOAWAY, 0, NULL); ASSERT_NOT_NULL(goaway); ASSERT_UINT_EQUALS(AWS_HTTP2_ERR_PROTOCOL_ERROR, goaway->error_code); ASSERT_UINT_EQUALS(0, goaway->goaway_last_stream_id); /* clean up */ return s_tester_clean_up(); } /* Test the user request a PING, but peer sends the PING ACK with mismatched opaque_data */ TEST_CASE(h2_client_conn_err_mismatched_ping_ack_received) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* get connection preface and acks out of the way */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); struct aws_byte_cursor opaque_data = aws_byte_cursor_from_c_str("12345678"); /* client request a PING with all zero opaque_data */ ASSERT_SUCCESS(aws_http2_connection_ping(s_tester.connection, NULL, NULL, NULL)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* peer sends PING ACK with numbers in payload */ struct aws_h2_frame *peer_frame = aws_h2_frame_new_ping(allocator, true /*ACK*/, opaque_data.ptr); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* validate the connection completed with error */ ASSERT_FALSE(aws_http_connection_is_open(s_tester.connection)); ASSERT_INT_EQUALS( AWS_ERROR_HTTP_PROTOCOL_ERROR, testing_channel_get_shutdown_error_code(&s_tester.testing_channel)); /* client should send GOAWAY */ ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); struct h2_decoded_frame *goaway = h2_decode_tester_find_frame(&s_tester.peer.decode, AWS_H2_FRAME_T_GOAWAY, 0, NULL); ASSERT_NOT_NULL(goaway); ASSERT_UINT_EQUALS(AWS_HTTP2_ERR_PROTOCOL_ERROR, goaway->error_code); ASSERT_UINT_EQUALS(0, goaway->goaway_last_stream_id); /* clean up */ return s_tester_clean_up(); } TEST_CASE(h2_client_empty_initial_settings) { (void)ctx; aws_http_library_init(allocator); s_tester.alloc = allocator; struct aws_testing_channel_options options = {.clock_fn = aws_high_res_clock_get_ticks}; ASSERT_SUCCESS(testing_channel_init(&s_tester.testing_channel, allocator, &options)); /* empty initial settings */ struct aws_http2_connection_options http2_options = { .on_initial_settings_completed = s_on_initial_settings_completed, .max_closed_streams = AWS_HTTP2_DEFAULT_MAX_CLOSED_STREAMS, .on_remote_settings_change = s_on_remote_settings_change, }; s_tester.connection = aws_http_connection_new_http2_client(allocator, false /* manual window management */, &http2_options); ASSERT_NOT_NULL(s_tester.connection); { /* set connection user_data (handled by http-bootstrap in real world) */ s_tester.connection->user_data = &s_tester.user_data; /* re-enact marriage vows of http-connection and channel (handled by http-bootstrap in real world) */ struct aws_channel_slot *slot = aws_channel_slot_new(s_tester.testing_channel.channel); ASSERT_NOT_NULL(slot); ASSERT_SUCCESS(aws_channel_slot_insert_end(s_tester.testing_channel.channel, slot)); ASSERT_SUCCESS(aws_channel_slot_set_handler(slot, &s_tester.connection->channel_handler)); s_tester.connection->vtable->on_channel_handler_installed(&s_tester.connection->channel_handler, slot); } struct h2_fake_peer_options peer_options = { .alloc = allocator, .testing_channel = &s_tester.testing_channel, .is_server = true, }; ASSERT_SUCCESS(h2_fake_peer_init(&s_tester.peer, &peer_options)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* shutdown the connection */ h2_fake_peer_clean_up(&s_tester.peer); aws_http_connection_release(s_tester.connection); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* Check the callback has fired with error, after connection shutdown */ ASSERT_INT_EQUALS(AWS_ERROR_HTTP_CONNECTION_CLOSED, s_tester.user_data.initial_settings_error_code); ASSERT_SUCCESS(testing_channel_clean_up(&s_tester.testing_channel)); /* clean up */ aws_http_library_clean_up(); return AWS_OP_SUCCESS; } TEST_CASE(h2_client_conn_failed_initial_settings_completed_not_invoked) { (void)ctx; aws_http_library_init(allocator); s_tester.alloc = allocator; struct aws_http2_setting settings_array[] = { {.id = AWS_HTTP2_SETTINGS_ENABLE_PUSH, .value = 0}, }; struct aws_http2_connection_options http2_options = { .initial_settings_array = settings_array, .num_initial_settings = AWS_ARRAY_SIZE(settings_array), .on_initial_settings_completed = s_on_initial_settings_completed, .max_closed_streams = AWS_HTTP2_DEFAULT_MAX_CLOSED_STREAMS, .on_remote_settings_change = s_on_remote_settings_change, }; s_tester.connection = aws_http_connection_new_http2_client(allocator, false /* manual window management */, &http2_options); ASSERT_NOT_NULL(s_tester.connection); s_tester.user_data.initial_settings_error_code = INT32_MAX; { /* set connection user_data (handled by http-bootstrap in real world) */ s_tester.connection->user_data = &s_tester.user_data; /* pretent the connection failed, and destroy the handler (handled by http-bootstrap in real world) */ aws_channel_handler_destroy(&s_tester.connection->channel_handler); } /* Check callback has not fired and the error code is still INT32_MAX */ ASSERT_INT_EQUALS(INT32_MAX, s_tester.user_data.initial_settings_error_code); /* clean up */ aws_http_library_clean_up(); return AWS_OP_SUCCESS; } TEST_CASE(h2_client_stream_reset_stream) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* get connection preface and acks out of the way */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); /* send request */ struct aws_http_message *request = aws_http2_message_new_request(allocator); ASSERT_NOT_NULL(request); struct aws_http_header request_headers_src[] = { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/"), }; aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, request)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); size_t frames_count = h2_decode_tester_frame_count(&s_tester.peer.decode); /* reset stream with no error */ ASSERT_SUCCESS(aws_http2_stream_reset(stream_tester.stream, AWS_HTTP2_ERR_NO_ERROR)); /* stream can only be reset once, the second reset will not fail but will be ignored */ ASSERT_SUCCESS(aws_http2_stream_reset(stream_tester.stream, AWS_HTTP2_ERR_CANCEL)); /* validate that stream completed with error. */ testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_TRUE(stream_tester.complete); ASSERT_INT_EQUALS(AWS_ERROR_HTTP_RST_STREAM_SENT, stream_tester.on_complete_error_code); /* a stream error should not affect the connection */ ASSERT_TRUE(aws_http_connection_is_open(s_tester.connection)); /* validate that stream sent only the first RST_STREAM */ ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); ASSERT_TRUE(frames_count + 1 == h2_decode_tester_frame_count(&s_tester.peer.decode)); struct h2_decoded_frame *rst_stream_frame = h2_decode_tester_latest_frame(&s_tester.peer.decode); ASSERT_UINT_EQUALS(AWS_H2_FRAME_T_RST_STREAM, rst_stream_frame->type); ASSERT_INT_EQUALS(AWS_HTTP2_ERR_NO_ERROR, rst_stream_frame->error_code); /* clean up */ aws_http_message_release(request); client_stream_tester_clean_up(&stream_tester); return s_tester_clean_up(); } TEST_CASE(h2_client_stream_reset_ignored_stream_closed) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* get connection preface and acks out of the way */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); /* send request */ struct aws_http_message *request = aws_http2_message_new_request(allocator); ASSERT_NOT_NULL(request); struct aws_http_header request_headers_src[] = { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/"), }; aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, request)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); uint32_t stream_id = aws_http_stream_get_id(stream_tester.stream); /* Request to reset stream after error has happened, still get success back, but the error code will be ignored */ ASSERT_SUCCESS(aws_http2_stream_reset(stream_tester.stream, AWS_HTTP2_ERR_CANCEL)); /* Before the async call finishes, an error happens and stream closed because of it */ /* fake peer sends response body BEFORE any response headers, which leads to a error and stream will close */ const char *body_src = "hello"; ASSERT_SUCCESS(h2_fake_peer_send_data_frame_str(&s_tester.peer, stream_id, body_src, true /*end_stream*/)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* A possible race condition between "real error" and user request to reset stream in real life, which will lead to * possible different error code in rst_stream. User can aws_http2_stream_get_sent_reset_error_code to query the * error code we sent to peer. */ ASSERT_TRUE(aws_http_connection_is_open(s_tester.connection)); ASSERT_TRUE(stream_tester.complete); ASSERT_INT_EQUALS(AWS_ERROR_HTTP_PROTOCOL_ERROR, stream_tester.on_complete_error_code); /* validate that stream sent RST_STREAM */ ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); struct h2_decoded_frame *rst_stream_frame = h2_decode_tester_find_frame(&s_tester.peer.decode, AWS_H2_FRAME_T_RST_STREAM, 0, NULL); /* But the error code is not the same as user was trying to send */ ASSERT_UINT_EQUALS(AWS_HTTP2_ERR_PROTOCOL_ERROR, rst_stream_frame->error_code); /* clean up */ aws_http_message_release(request); client_stream_tester_clean_up(&stream_tester); return s_tester_clean_up(); } TEST_CASE(h2_client_stream_reset_failed_before_activate_called) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* get connection preface and acks out of the way */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); struct aws_http_message *request = aws_http2_message_new_request(allocator); ASSERT_NOT_NULL(request); struct aws_http_header request_headers_src[] = { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/"), }; aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); struct aws_http_make_request_options request_options = { .self_size = sizeof(request_options), .request = request, }; struct aws_http_stream *stream = aws_http_connection_make_request(s_tester.connection, &request_options); ASSERT_NOT_NULL(stream); /* reset will fail before activate called */ ASSERT_FAILS(aws_http2_stream_reset(stream, AWS_HTTP2_ERR_NO_ERROR)); /* Once you activate the stream, you are able to reset it */ ASSERT_SUCCESS(aws_http_stream_activate(stream)); ASSERT_SUCCESS(aws_http2_stream_reset(stream, AWS_HTTP2_ERR_ENHANCE_YOUR_CALM)); /* validate rst_stream is sent */ testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); struct h2_decoded_frame *rst_stream_frame = h2_decode_tester_find_frame(&s_tester.peer.decode, AWS_H2_FRAME_T_RST_STREAM, 0, NULL); ASSERT_UINT_EQUALS(AWS_HTTP2_ERR_ENHANCE_YOUR_CALM, rst_stream_frame->error_code); /* clean up */ aws_http_message_release(request); aws_http_stream_release(stream); return s_tester_clean_up(); } TEST_CASE(h2_client_stream_cancel_stream) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* get connection preface and acks out of the way */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); struct aws_http_message *request = aws_http2_message_new_request(allocator); ASSERT_NOT_NULL(request); struct aws_http_header request_headers_src[] = { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/"), }; aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); struct aws_http_make_request_options request_options = { .self_size = sizeof(request_options), .request = request, }; struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, request)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* Cancel the request */ aws_http_stream_cancel(stream_tester.stream, AWS_ERROR_COND_VARIABLE_ERROR_UNKNOWN); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_TRUE(aws_http_connection_is_open(s_tester.connection)); ASSERT_TRUE(stream_tester.complete); ASSERT_INT_EQUALS(AWS_ERROR_COND_VARIABLE_ERROR_UNKNOWN, stream_tester.on_complete_error_code); /* validate that stream sent RST_STREAM */ ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); struct h2_decoded_frame *rst_stream_frame = h2_decode_tester_find_frame(&s_tester.peer.decode, AWS_H2_FRAME_T_RST_STREAM, 0, NULL); /* But the error code is not the same as user was trying to send */ ASSERT_UINT_EQUALS(AWS_HTTP2_ERR_CANCEL, rst_stream_frame->error_code); /* clean up */ aws_http_message_release(request); client_stream_tester_clean_up(&stream_tester); return s_tester_clean_up(); } TEST_CASE(h2_client_stream_keeps_alive_for_cross_thread_task) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* get connection preface and acks out of the way */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); /* send request */ struct aws_http_message *request = aws_http2_message_new_request(allocator); ASSERT_NOT_NULL(request); struct aws_http_header request_headers_src[] = { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/"), }; aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, request)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); uint32_t stream_id = aws_http_stream_get_id(stream_tester.stream); /* fake peer sends response */ struct aws_http_header response_headers_src[] = { DEFINE_HEADER(":status", "404"), DEFINE_HEADER("date", "Wed, 01 Apr 2020 23:02:49 GMT"), }; struct aws_http_headers *response_headers = aws_http_headers_new(allocator); aws_http_headers_add_array(response_headers, response_headers_src, AWS_ARRAY_SIZE(response_headers_src)); struct aws_h2_frame *response_frame = aws_h2_frame_new_headers(allocator, stream_id, response_headers, true /*end_stream*/, 0, NULL); /* User reset the stream */ ASSERT_SUCCESS(aws_http2_stream_reset(stream_tester.stream, AWS_HTTP2_ERR_ENHANCE_YOUR_CALM)); /* Before the async call finishes, the stream completes */ ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, response_frame)); /* And user releases the stream */ aws_http_stream_release(stream_tester.stream); /* Task should finish without error */ testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_TRUE(stream_tester.complete); ASSERT_INT_EQUALS(AWS_ERROR_SUCCESS, stream_tester.on_complete_error_code); /* validate that no RST_STREAM sent */ ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); ASSERT_NULL(h2_decode_tester_find_frame(&s_tester.peer.decode, AWS_H2_FRAME_T_RST_STREAM, 0, NULL)); /* clean up */ aws_http_message_release(request); aws_http_headers_release(response_headers); /* clean up stream_tester */ for (size_t i = 0; i < stream_tester.num_info_responses; ++i) { aws_http_message_release(stream_tester.info_responses[i]); } aws_http_headers_release(stream_tester.current_info_headers); aws_http_headers_release(stream_tester.response_headers); aws_http_headers_release(stream_tester.response_trailer); aws_byte_buf_clean_up(&stream_tester.response_body); return s_tester_clean_up(); } TEST_CASE(h2_client_stream_get_received_reset_error_code) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* get connection preface and acks out of the way */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); /* send request */ struct aws_http_message *request = aws_http2_message_new_request(allocator); ASSERT_NOT_NULL(request); struct aws_http_header request_headers_src[] = { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/"), }; aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, request)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); uint32_t stream_id = aws_http_stream_get_id(stream_tester.stream); uint32_t http2_error; /* Before rst_stream received, get function will fail */ ASSERT_FAILS(aws_http2_stream_get_received_reset_error_code(stream_tester.stream, &http2_error)); /* fake peer sends RST_STREAM */ struct aws_h2_frame *rst_stream = aws_h2_frame_new_rst_stream(allocator, stream_id, AWS_HTTP2_ERR_ENHANCE_YOUR_CALM); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, rst_stream)); /* validate that stream completed with error */ testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_TRUE(stream_tester.complete); ASSERT_INT_EQUALS(AWS_ERROR_HTTP_RST_STREAM_RECEIVED, stream_tester.on_complete_error_code); /* After rst_stream received, and stream completed with RST_STREAM_RECEIVED, get function will get the error_code * received in rst_stream */ ASSERT_SUCCESS(aws_http2_stream_get_received_reset_error_code(stream_tester.stream, &http2_error)); ASSERT_UINT_EQUALS(AWS_HTTP2_ERR_ENHANCE_YOUR_CALM, http2_error); /* clean up */ aws_http_message_release(request); client_stream_tester_clean_up(&stream_tester); return s_tester_clean_up(); } TEST_CASE(h2_client_stream_get_sent_reset_error_code) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* get connection preface and acks out of the way */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); /* send request */ struct aws_http_message *request = aws_http2_message_new_request(allocator); ASSERT_NOT_NULL(request); struct aws_http_header request_headers_src[] = { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/"), }; aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); struct client_stream_tester stream_tester; ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, request)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); uint32_t stream_id = aws_http_stream_get_id(stream_tester.stream); uint32_t http2_error; /* Before rst_stream sent, get function will fail */ ASSERT_FAILS(aws_http2_stream_get_sent_reset_error_code(stream_tester.stream, &http2_error)); /* fake peer sends response body BEFORE any response headers, which leads to a error and stream will close */ const char *body_src = "hello"; ASSERT_SUCCESS(h2_fake_peer_send_data_frame_str(&s_tester.peer, stream_id, body_src, true /*end_stream*/)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* validate that stream completed with protocol error */ testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_TRUE(stream_tester.complete); ASSERT_INT_EQUALS(AWS_ERROR_HTTP_PROTOCOL_ERROR, stream_tester.on_complete_error_code); /* Stream completed with error code, it's time to get what we sent */ ASSERT_SUCCESS(aws_http2_stream_get_sent_reset_error_code(stream_tester.stream, &http2_error)); ASSERT_UINT_EQUALS(AWS_HTTP2_ERR_PROTOCOL_ERROR, http2_error); /* validate that stream sent RST_STREAM */ ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); struct h2_decoded_frame *rst_stream_frame = h2_decode_tester_find_frame(&s_tester.peer.decode, AWS_H2_FRAME_T_RST_STREAM, 0, NULL); ASSERT_NOT_NULL(rst_stream_frame); ASSERT_UINT_EQUALS(AWS_HTTP2_ERR_PROTOCOL_ERROR, rst_stream_frame->error_code); /* clean up */ aws_http_message_release(request); client_stream_tester_clean_up(&stream_tester); return s_tester_clean_up(); } TEST_CASE(h2_client_new_request_allowed) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* get connection preface and acks out of the way */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); /* prepare request */ struct aws_http_message *request = aws_http2_message_new_request(allocator); ASSERT_NOT_NULL(request); struct aws_http_header headers[] = { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":authority", "veryblackpage.com"), DEFINE_HEADER(":path", "/"), }; ASSERT_SUCCESS(aws_http_message_add_header_array(request, headers, AWS_ARRAY_SIZE(headers))); struct aws_http_make_request_options options = { .self_size = sizeof(options), .request = request, }; /* validate the new request is allowed for now */ ASSERT_TRUE(aws_http_connection_new_requests_allowed(s_tester.connection)); /* fake peer send a GOAWAY frame */ uint32_t stream_id = 0; struct aws_byte_cursor debug_info; AWS_ZERO_STRUCT(debug_info); struct aws_h2_frame *peer_frame = aws_h2_frame_new_goaway(allocator, stream_id, AWS_HTTP2_ERR_NO_ERROR, debug_info); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* validate the new request is not allowed anymore when goaway received */ ASSERT_FALSE(aws_http_connection_new_requests_allowed(s_tester.connection)); /* Make new request will fail */ ASSERT_NULL(aws_http_connection_make_request(s_tester.connection, &options)); ASSERT_UINT_EQUALS(AWS_ERROR_HTTP_GOAWAY_RECEIVED, aws_last_error()); /* close connection */ aws_http_connection_close(s_tester.connection); /* Make new request will fail */ ASSERT_NULL(aws_http_connection_make_request(s_tester.connection, &options)); ASSERT_UINT_EQUALS(AWS_ERROR_HTTP_CONNECTION_CLOSED, aws_last_error()); /* clean up */ aws_http_message_release(request); return s_tester_clean_up(); } static void s_default_settings(struct aws_http2_setting settings[AWS_HTTP2_SETTINGS_COUNT]) { for (int i = AWS_HTTP2_SETTINGS_BEGIN_RANGE; i < AWS_HTTP2_SETTINGS_END_RANGE; i++) { /* settings range begin with 1, store them into 0-based array of aws_http2_setting */ settings[i - 1].id = i; settings[i - 1].value = aws_h2_settings_initial[i]; } } static int s_apply_changed_settings( struct aws_http2_setting settings[AWS_HTTP2_SETTINGS_COUNT], struct aws_http2_setting *settings_to_change, int number_settings_to_change) { for (int i = 0; i < number_settings_to_change; i++) { struct aws_http2_setting setting = settings_to_change[i]; ASSERT_UINT_EQUALS(settings[setting.id - 1].id, setting.id); settings[setting.id - 1].value = setting.value; } return AWS_OP_SUCCESS; } TEST_CASE(h2_client_send_multiple_goaway) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* get connection preface and acks out of the way */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); struct aws_byte_buf info_buf = aws_byte_buf_from_c_str("this is a debug info"); struct aws_byte_cursor debug_info = aws_byte_cursor_from_buf(&info_buf); /* First graceful shutdown warning */ aws_http2_connection_send_goaway( s_tester.connection, AWS_HTTP2_ERR_NO_ERROR, true /*allow_more_streams*/, &debug_info /*debug_data*/); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* Check the goaway frame received */ ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); struct h2_decoded_frame *latest_frame = h2_decode_tester_latest_frame(&s_tester.peer.decode); ASSERT_UINT_EQUALS(AWS_HTTP2_ERR_NO_ERROR, latest_frame->error_code); ASSERT_UINT_EQUALS(AWS_H2_STREAM_ID_MAX, latest_frame->goaway_last_stream_id); ASSERT_TRUE(aws_byte_buf_eq_c_str(&latest_frame->data, "this is a debug info")); /* Real GOAWAY */ aws_http2_connection_send_goaway( s_tester.connection, AWS_HTTP2_ERR_PROTOCOL_ERROR, false /*allow_more_streams*/, &debug_info); /* It is fine to free the buffer right after the call, since we keep it in the connection's memory */ aws_byte_buf_clean_up(&info_buf); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* Check the goaway frame received */ ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); latest_frame = h2_decode_tester_latest_frame(&s_tester.peer.decode); ASSERT_UINT_EQUALS(AWS_HTTP2_ERR_PROTOCOL_ERROR, latest_frame->error_code); ASSERT_UINT_EQUALS(0, latest_frame->goaway_last_stream_id); ASSERT_TRUE(aws_byte_buf_eq_c_str(&latest_frame->data, "this is a debug info")); size_t frames_count = h2_decode_tester_frame_count(&s_tester.peer.decode); /* Graceful shutdown warning after real GOAWAY will be ignored */ aws_http2_connection_send_goaway( s_tester.connection, AWS_HTTP2_ERR_NO_ERROR, true /*allow_more_streams*/, NULL /*debug_data*/); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* Check the goaway frame received */ ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); ASSERT_UINT_EQUALS(frames_count, h2_decode_tester_frame_count(&s_tester.peer.decode)); /* clean up */ return s_tester_clean_up(); } TEST_CASE(h2_client_get_sent_goaway) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* get connection preface and acks out of the way */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); uint32_t last_stream_id; uint32_t http2_error; ASSERT_FAILS(aws_http2_connection_get_sent_goaway(s_tester.connection, &http2_error, &last_stream_id)); /* First graceful shutdown warning */ aws_http2_connection_send_goaway( s_tester.connection, AWS_HTTP2_ERR_NO_ERROR, true /*allow_more_streams*/, NULL /*debug_data*/); /* User send goaway asynchronously, you are not able to get the sent goaway right after the call */ ASSERT_FAILS(aws_http2_connection_get_sent_goaway(s_tester.connection, &http2_error, &last_stream_id)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_SUCCESS(aws_http2_connection_get_sent_goaway(s_tester.connection, &http2_error, &last_stream_id)); ASSERT_UINT_EQUALS(AWS_H2_STREAM_ID_MAX, last_stream_id); ASSERT_UINT_EQUALS(AWS_HTTP2_ERR_NO_ERROR, http2_error); /* Second graceful shutdown warning, with non-zero error. Well it's not against the law, just do what user wants */ aws_http2_connection_send_goaway( s_tester.connection, AWS_HTTP2_ERR_ENHANCE_YOUR_CALM, true /*allow_more_streams*/, NULL /*debug_data*/); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_SUCCESS(aws_http2_connection_get_sent_goaway(s_tester.connection, &http2_error, &last_stream_id)); ASSERT_UINT_EQUALS(AWS_H2_STREAM_ID_MAX, last_stream_id); ASSERT_UINT_EQUALS(AWS_HTTP2_ERR_ENHANCE_YOUR_CALM, http2_error); struct aws_byte_cursor opaque_data = aws_byte_cursor_from_c_str("12345678"); /* peer send extra ping ack will lead to a connection error and goaway will be sent */ struct aws_h2_frame *peer_frame = aws_h2_frame_new_ping(allocator, true /*ACK*/, opaque_data.ptr); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* Check the sent goaway */ ASSERT_SUCCESS(aws_http2_connection_get_sent_goaway(s_tester.connection, &http2_error, &last_stream_id)); ASSERT_UINT_EQUALS(0, last_stream_id); ASSERT_UINT_EQUALS(AWS_HTTP2_ERR_PROTOCOL_ERROR, http2_error); /* clean up */ return s_tester_clean_up(); } TEST_CASE(h2_client_get_received_goaway) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* get connection preface and acks out of the way */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); uint32_t last_stream_id; uint32_t http2_error; /* you are not able to get the received goaway if no GOAWAY received */ ASSERT_FAILS(aws_http2_connection_get_received_goaway(s_tester.connection, &http2_error, &last_stream_id)); /* fake peer send goaway */ const char debug_string[] = "Error, Core Dump 0XFFFFFFFF"; struct aws_byte_cursor debug_info = aws_byte_cursor_from_c_str(debug_string); struct aws_h2_frame *peer_frame = aws_h2_frame_new_goaway(allocator, AWS_H2_STREAM_ID_MAX, AWS_HTTP2_ERR_NO_ERROR, debug_info); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* Try to get the received goaway */ ASSERT_SUCCESS(aws_http2_connection_get_received_goaway(s_tester.connection, &http2_error, &last_stream_id)); ASSERT_UINT_EQUALS(AWS_H2_STREAM_ID_MAX, last_stream_id); ASSERT_UINT_EQUALS(AWS_HTTP2_ERR_NO_ERROR, http2_error); /* clean up */ return s_tester_clean_up(); } TEST_CASE(h2_client_get_local_settings) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); struct aws_http2_setting settings_get[AWS_HTTP2_SETTINGS_COUNT]; struct aws_http2_setting settings_expected[AWS_HTTP2_SETTINGS_COUNT]; s_default_settings(settings_expected); aws_http2_connection_get_local_settings(s_tester.connection, settings_get); /* Altough we disabled the push_promise at the initial settings, but without the settings ACK from peer, the * settings we are using locally is still the default settings */ ASSERT_SUCCESS(s_compare_settings_array(settings_expected, settings_get, AWS_HTTP2_SETTINGS_COUNT)); /* fake peer sends connection preface */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); /* fake peer sends settings ack back for the initial settings. */ struct aws_h2_frame *peer_frame = aws_h2_frame_new_settings(allocator, NULL, 0, true); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* Set expected setting */ settings_expected[AWS_HTTP2_SETTINGS_ENABLE_PUSH - 1].value = false; /* Initial settings got ACK by peer, know we will get the settings with push_promise disabled */ aws_http2_connection_get_local_settings(s_tester.connection, settings_get); ASSERT_SUCCESS(s_compare_settings_array(settings_expected, settings_get, AWS_HTTP2_SETTINGS_COUNT)); /* Request to change the local settings */ struct aws_http2_setting settings_array[] = { {.id = AWS_HTTP2_SETTINGS_ENABLE_PUSH, .value = 1}, {.id = AWS_HTTP2_SETTINGS_HEADER_TABLE_SIZE, .value = 0}, {.id = AWS_HTTP2_SETTINGS_HEADER_TABLE_SIZE, .value = 1000}, {.id = AWS_HTTP2_SETTINGS_MAX_CONCURRENT_STREAMS, .value = 1}, {.id = AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE, .value = 1}, {.id = AWS_HTTP2_SETTINGS_MAX_FRAME_SIZE, .value = AWS_H2_PAYLOAD_MAX}, {.id = AWS_HTTP2_SETTINGS_MAX_HEADER_LIST_SIZE, .value = 1}, }; ASSERT_SUCCESS(aws_http2_connection_change_settings( s_tester.connection, settings_array, AWS_ARRAY_SIZE(settings_array), NULL /*call_back*/, NULL /*user_data*/)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* Settings sent, but not ACKed, still the same settings, we will get */ aws_http2_connection_get_local_settings(s_tester.connection, settings_get); ASSERT_SUCCESS(s_compare_settings_array(settings_expected, settings_get, AWS_HTTP2_SETTINGS_COUNT)); /* Peer ACKed the settings */ peer_frame = aws_h2_frame_new_settings(allocator, NULL, 0, true); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* Set expected setting */ ASSERT_SUCCESS(s_apply_changed_settings(settings_expected, settings_array, AWS_ARRAY_SIZE(settings_array))); aws_http2_connection_get_local_settings(s_tester.connection, settings_get); ASSERT_SUCCESS(s_compare_settings_array(settings_expected, settings_get, AWS_HTTP2_SETTINGS_COUNT)); /* clean up */ return s_tester_clean_up(); } TEST_CASE(h2_client_get_remote_settings) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); struct aws_http2_setting settings_get[AWS_HTTP2_SETTINGS_COUNT]; struct aws_http2_setting settings_expected[AWS_HTTP2_SETTINGS_COUNT]; s_default_settings(settings_expected); /* Once connection setup and no settings from peer, remote settings will be default init settings */ aws_http2_connection_get_remote_settings(s_tester.connection, settings_get); ASSERT_SUCCESS(s_compare_settings_array(settings_expected, settings_get, AWS_HTTP2_SETTINGS_COUNT)); /* fake peer sends connection preface */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); /* fake peer sends settings and change the remote settings */ struct aws_http2_setting settings_array[] = { {.id = AWS_HTTP2_SETTINGS_ENABLE_PUSH, .value = 1}, {.id = AWS_HTTP2_SETTINGS_HEADER_TABLE_SIZE, .value = 0}, {.id = AWS_HTTP2_SETTINGS_HEADER_TABLE_SIZE, .value = 1000}, {.id = AWS_HTTP2_SETTINGS_MAX_CONCURRENT_STREAMS, .value = 1}, {.id = AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE, .value = 1}, {.id = AWS_HTTP2_SETTINGS_MAX_FRAME_SIZE, .value = AWS_H2_PAYLOAD_MAX}, {.id = AWS_HTTP2_SETTINGS_MAX_HEADER_LIST_SIZE, .value = 1}, }; struct aws_h2_frame *settings_frame = aws_h2_frame_new_settings(allocator, settings_array, AWS_ARRAY_SIZE(settings_array), false /*ack*/); ASSERT_NOT_NULL(settings_frame); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, settings_frame)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* Set expected setting */ ASSERT_SUCCESS(s_apply_changed_settings(settings_expected, settings_array, AWS_ARRAY_SIZE(settings_array))); aws_http2_connection_get_remote_settings(s_tester.connection, settings_get); ASSERT_SUCCESS(s_compare_settings_array(settings_expected, settings_get, AWS_HTTP2_SETTINGS_COUNT)); /* clean up */ return s_tester_clean_up(); } /* User apis that want to add stuff into connection.synced_data will fail after connection shutdown starts */ TEST_CASE(h2_client_request_apis_failed_after_connection_begin_shutdown) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* get connection preface and acks out of the way */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); struct aws_http_message *request = aws_http2_message_new_request(allocator); ASSERT_NOT_NULL(request); struct aws_http_header request_headers_src[] = { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/"), }; aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); struct aws_http_make_request_options request_options = { .self_size = sizeof(request_options), .request = request, }; struct aws_http_stream *stream = aws_http_connection_make_request(s_tester.connection, &request_options); ASSERT_NOT_NULL(stream); /* close the connection */ aws_http_connection_close(s_tester.connection); /* Send goaway will silently do nothing as the connection already closed */ aws_http2_connection_send_goaway( s_tester.connection, AWS_HTTP2_ERR_NO_ERROR, false /*allow_more_streams*/, NULL /*debug_data*/); /* validate all those user apis to add stuff into synced data will fail */ ASSERT_FAILS(aws_http_stream_activate(stream)); ASSERT_FAILS(aws_http2_connection_change_settings( s_tester.connection, NULL, 0, NULL /*callback function*/, NULL /*user_data*/)); ASSERT_FAILS(aws_http2_connection_ping(s_tester.connection, NULL, NULL /*callback function*/, NULL /*user_data*/)); /* clean up */ aws_http_message_release(request); aws_http_stream_release(stream); return s_tester_clean_up(); } enum request_callback { REQUEST_CALLBACK_OUTGOING_BODY, REQUEST_CALLBACK_INCOMING_HEADERS, REQUEST_CALLBACK_INCOMING_HEADERS_DONE, REQUEST_CALLBACK_INCOMING_BODY, REQUEST_CALLBACK_COMPLETE, REQUEST_CALLBACK_COUNT, }; struct error_from_callback_tester { struct aws_input_stream base; enum request_callback error_at; int callback_counts[REQUEST_CALLBACK_COUNT]; bool has_errored; struct aws_stream_status status; int on_complete_error_code; }; static const int ERROR_FROM_CALLBACK_ERROR_CODE = (int)0xBEEFCAFE; static int s_error_from_callback_common( struct error_from_callback_tester *error_tester, enum request_callback current_callback) { error_tester->callback_counts[current_callback]++; /* After error code returned, no more callbacks should fire (except for on_complete) */ AWS_FATAL_ASSERT(!error_tester->has_errored); AWS_FATAL_ASSERT(current_callback <= error_tester->error_at); if (current_callback == error_tester->error_at) { error_tester->has_errored = true; return aws_raise_error(ERROR_FROM_CALLBACK_ERROR_CODE); } return AWS_OP_SUCCESS; } static int s_error_from_outgoing_body_read(struct aws_input_stream *body, struct aws_byte_buf *dest) { (void)dest; struct error_from_callback_tester *error_tester = AWS_CONTAINER_OF(body, struct error_from_callback_tester, base); if (s_error_from_callback_common(error_tester, REQUEST_CALLBACK_OUTGOING_BODY)) { return AWS_OP_ERR; } /* If the common fn was successful, write out some data and end the stream */ ASSERT_TRUE(aws_byte_buf_write(dest, (const uint8_t *)"abcd", 4)); error_tester->status.is_end_of_stream = true; return AWS_OP_SUCCESS; } static int s_error_from_outgoing_body_get_status(struct aws_input_stream *body, struct aws_stream_status *status) { struct error_from_callback_tester *error_tester = AWS_CONTAINER_OF(body, struct error_from_callback_tester, base); *status = error_tester->status; return AWS_OP_SUCCESS; } static void s_error_from_outgoing_body_destroy(void *stream) { /* allocated from stack, nothing to do */ (void)stream; } static struct aws_input_stream_vtable s_error_from_outgoing_body_vtable = { .seek = NULL, .read = s_error_from_outgoing_body_read, .get_status = s_error_from_outgoing_body_get_status, .get_length = NULL, }; static int s_error_from_incoming_headers( struct aws_http_stream *stream, enum aws_http_header_block header_block, const struct aws_http_header *header_array, size_t num_headers, void *user_data) { (void)stream; (void)header_block; (void)header_array; (void)num_headers; return s_error_from_callback_common(user_data, REQUEST_CALLBACK_INCOMING_HEADERS); } static int s_error_from_incoming_headers_done( struct aws_http_stream *stream, enum aws_http_header_block header_block, void *user_data) { (void)stream; (void)header_block; return s_error_from_callback_common(user_data, REQUEST_CALLBACK_INCOMING_HEADERS_DONE); } static int s_error_from_incoming_body( struct aws_http_stream *stream, const struct aws_byte_cursor *data, void *user_data) { (void)stream; (void)data; return s_error_from_callback_common(user_data, REQUEST_CALLBACK_INCOMING_BODY); } static void s_error_tester_on_stream_complete(struct aws_http_stream *stream, int error_code, void *user_data) { (void)stream; struct error_from_callback_tester *error_tester = user_data; error_tester->callback_counts[REQUEST_CALLBACK_COMPLETE]++; error_tester->on_complete_error_code = error_code; } static int s_test_error_from_callback(struct aws_allocator *allocator, void *ctx, enum request_callback error_at) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* get connection preface and acks out of the way */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); /* send request */ struct aws_http_message *request = aws_http2_message_new_request(allocator); ASSERT_NOT_NULL(request); struct aws_http_header request_headers_src[] = { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/"), }; aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); struct error_from_callback_tester error_tester = { .error_at = error_at, .status = { .is_valid = true, .is_end_of_stream = false, }, }; error_tester.base.vtable = &s_error_from_outgoing_body_vtable; aws_ref_count_init(&error_tester.base.ref_count, &error_tester, s_error_from_outgoing_body_destroy); aws_http_message_set_body_stream(request, &error_tester.base); struct aws_http_make_request_options opt = { .self_size = sizeof(opt), .request = request, .on_response_headers = s_error_from_incoming_headers, .on_response_header_block_done = s_error_from_incoming_headers_done, .on_response_body = s_error_from_incoming_body, .on_complete = s_error_tester_on_stream_complete, .user_data = &error_tester, }; testing_channel_drain_queued_tasks(&s_tester.testing_channel); struct aws_http_stream *stream = aws_http_connection_make_request(s_tester.connection, &opt); ASSERT_NOT_NULL(stream); ASSERT_SUCCESS(aws_http_stream_activate(stream)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* Ensure the request can be destroyed after request is sent */ aws_http_message_release(opt.request); /* fake peer sends response headers */ struct aws_http_header response_headers_src[] = { DEFINE_HEADER(":status", "200"), DEFINE_HEADER("date", "Fri, 01 Mar 2019 17:18:55 GMT"), }; struct aws_http_headers *response_headers = aws_http_headers_new(allocator); aws_http_headers_add_array(response_headers, response_headers_src, AWS_ARRAY_SIZE(response_headers_src)); uint32_t stream_id = aws_http_stream_get_id(stream); struct aws_h2_frame *response_frame = aws_h2_frame_new_headers(allocator, stream_id, response_headers, false /*end_stream*/, 0, NULL); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, response_frame)); struct aws_byte_buf response_body_bufs; size_t body_length = 5; /* fake peer sends a DATA frame larger than the window size we have */ ASSERT_SUCCESS(aws_byte_buf_init(&response_body_bufs, allocator, body_length)); ASSERT_TRUE(aws_byte_buf_write_u8_n(&response_body_bufs, (uint8_t)'a', body_length)); struct aws_byte_cursor body_cursor = aws_byte_cursor_from_buf(&response_body_bufs); ASSERT_SUCCESS(h2_fake_peer_send_data_frame(&s_tester.peer, stream_id, body_cursor, true /*end_stream*/)); /* validate that stream completed with error */ testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* check that callbacks were invoked before error_at, but not after */ for (int i = 0; i < REQUEST_CALLBACK_COMPLETE; ++i) { if (i <= error_at) { ASSERT_TRUE(error_tester.callback_counts[i] > 0); } else { ASSERT_INT_EQUALS(0, error_tester.callback_counts[i]); } } /* validate the RST_STREAM sent and connection is still open */ ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); struct h2_decoded_frame *rst_stream_frame = h2_decode_tester_find_frame(&s_tester.peer.decode, AWS_H2_FRAME_T_RST_STREAM, 0, NULL); ASSERT_NOT_NULL(rst_stream_frame); ASSERT_UINT_EQUALS(AWS_HTTP2_ERR_INTERNAL_ERROR, rst_stream_frame->error_code); ASSERT_TRUE(aws_http_connection_is_open(s_tester.connection)); /* the on_complete callback should always fire though, and should receive the proper error_code */ ASSERT_INT_EQUALS(1, error_tester.callback_counts[REQUEST_CALLBACK_COMPLETE]); ASSERT_INT_EQUALS(ERROR_FROM_CALLBACK_ERROR_CODE, error_tester.on_complete_error_code); aws_http_headers_release(response_headers); aws_byte_buf_clean_up(&response_body_bufs); aws_http_stream_release(stream); return s_tester_clean_up(); } TEST_CASE(h2_client_error_from_outgoing_body_callback_reset_stream) { (void)ctx; ASSERT_SUCCESS(s_test_error_from_callback(allocator, ctx, REQUEST_CALLBACK_OUTGOING_BODY)); return AWS_OP_SUCCESS; } TEST_CASE(h2_client_error_from_incoming_headers_callback_reset_stream) { (void)ctx; ASSERT_SUCCESS(s_test_error_from_callback(allocator, ctx, REQUEST_CALLBACK_INCOMING_HEADERS)); return AWS_OP_SUCCESS; } TEST_CASE(h2_client_error_from_incoming_headers_done_callback_reset_stream) { (void)ctx; ASSERT_SUCCESS(s_test_error_from_callback(allocator, ctx, REQUEST_CALLBACK_INCOMING_HEADERS_DONE)); return AWS_OP_SUCCESS; } TEST_CASE(h2_client_error_from_incoming_body_callback_reset_stream) { (void)ctx; ASSERT_SUCCESS(s_test_error_from_callback(allocator, ctx, REQUEST_CALLBACK_INCOMING_BODY)); return AWS_OP_SUCCESS; } struct h2_client_manual_data_write_ctx { struct aws_allocator *allocator; struct aws_byte_buf data; int complete_error_code; }; static struct aws_input_stream *s_h2_client_manual_data_write_generate_data( struct h2_client_manual_data_write_ctx *ctx) { struct aws_byte_cursor data = aws_byte_cursor_from_buf(&ctx->data); data.len = aws_max_size(rand() % ctx->data.capacity, 1); return aws_input_stream_new_from_cursor(ctx->allocator, &data); } TEST_CASE(h2_client_manual_data_write) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* get connection preface and acks out of the way */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); size_t frame_count = h2_decode_tester_frame_count(&s_tester.peer.decode); struct aws_http_message *request = aws_http2_message_new_request(allocator); ASSERT_NOT_NULL(request); struct aws_http_header request_headers_src[] = { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/"), }; aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); struct aws_http_make_request_options request_options = { .self_size = sizeof(request_options), .request = request, .http2_use_manual_data_writes = true, }; struct aws_http_stream *stream = aws_http_connection_make_request(s_tester.connection, &request_options); ASSERT_NOT_NULL(stream); aws_http_stream_activate(stream); testing_channel_drain_queued_tasks(&s_tester.testing_channel); uint32_t stream_id = aws_http_stream_get_id(stream); struct aws_byte_buf payload; aws_byte_buf_init(&payload, allocator, 1024); struct h2_client_manual_data_write_ctx test_ctx = { .allocator = allocator, .data = payload, }; size_t total_length = 0; /* Simulate writes coming in over time */ for (int idx = 0; idx < 1000; ++idx) { struct aws_input_stream *data_stream = s_h2_client_manual_data_write_generate_data(&test_ctx); int64_t stream_length = 0; ASSERT_SUCCESS(aws_input_stream_get_length(data_stream, &stream_length)); total_length += (size_t)stream_length; struct aws_http2_stream_write_data_options write = { .data = data_stream, .on_complete = NULL, .user_data = NULL, }; ASSERT_SUCCESS(aws_http2_stream_write_data(stream, &write)); /* fake peer sends WINDOW_UPDATE */ struct aws_h2_frame *peer_frame = aws_h2_frame_new_window_update(allocator, stream_id, (uint32_t)stream_length); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); /* Connection level window update */ peer_frame = aws_h2_frame_new_window_update(allocator, 0, (uint32_t)stream_length); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); if (idx % 10 == 0) { testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); } aws_input_stream_release(data_stream); } struct aws_http2_stream_write_data_options last_write = {.end_stream = true}; ASSERT_SUCCESS(aws_http2_stream_write_data(stream, &last_write)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); size_t frame_count2 = h2_decode_tester_frame_count(&s_tester.peer.decode); /* Peer should received header frame without end_stream and mutiple data frames and combined payload length should * be the same as total length sent. */ struct h2_decoded_frame *header_frame = h2_decode_tester_get_frame(&s_tester.peer.decode, frame_count); ASSERT_UINT_EQUALS(AWS_H2_FRAME_T_HEADERS, header_frame->type); ASSERT_FALSE(header_frame->end_stream); size_t received_length = 0; for (size_t i = frame_count + 1; i < frame_count2; i++) { struct h2_decoded_frame *data_frame = h2_decode_tester_get_frame(&s_tester.peer.decode, i); ASSERT_UINT_EQUALS(AWS_H2_FRAME_T_DATA, data_frame->type); received_length += data_frame->data_payload_len; if (i == frame_count2 - 1) { ASSERT_TRUE(data_frame->end_stream); } else { ASSERT_FALSE(data_frame->end_stream); } } ASSERT_UINT_EQUALS(received_length, total_length); aws_http_message_release(request); aws_http_stream_release(stream); /* close the connection */ aws_http_connection_close(s_tester.connection); aws_byte_buf_clean_up(&test_ctx.data); /* clean up */ return s_tester_clean_up(); } TEST_CASE(h2_client_manual_data_write_not_enabled) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); struct aws_http_message *request = aws_http2_message_new_request(allocator); ASSERT_NOT_NULL(request); struct aws_http_header request_headers_src[] = { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/"), }; aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); struct aws_http_make_request_options request_options = { .self_size = sizeof(request_options), .request = request, .http2_use_manual_data_writes = false, }; struct aws_http_stream *stream = aws_http_connection_make_request(s_tester.connection, &request_options); ASSERT_NOT_NULL(stream); aws_http_stream_activate(stream); struct aws_byte_buf payload; aws_byte_buf_init(&payload, allocator, 1024); struct h2_client_manual_data_write_ctx test_ctx = { .allocator = allocator, .data = payload, }; /* Try writing the data */ struct aws_input_stream *data_stream = s_h2_client_manual_data_write_generate_data(&test_ctx); int64_t stream_length = 0; ASSERT_SUCCESS(aws_input_stream_get_length(data_stream, &stream_length)); struct aws_http2_stream_write_data_options write_options = { .data = data_stream, }; ASSERT_ERROR(AWS_ERROR_HTTP_MANUAL_WRITE_NOT_ENABLED, aws_http2_stream_write_data(stream, &write_options)); aws_input_stream_release(data_stream); aws_http_message_release(request); aws_http_stream_release(stream); /* close the connection */ aws_http_connection_close(s_tester.connection); aws_byte_buf_clean_up(&test_ctx.data); /* clean up */ return s_tester_clean_up(); } TEST_CASE(h2_client_manual_data_write_with_body) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* get connection preface and acks out of the way */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); size_t frame_count = h2_decode_tester_frame_count(&s_tester.peer.decode); struct aws_http_message *request = aws_http2_message_new_request(allocator); ASSERT_NOT_NULL(request); struct aws_http_header request_headers_src[] = { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/"), }; aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); struct aws_http_make_request_options request_options = { .self_size = sizeof(request_options), .request = request, .http2_use_manual_data_writes = true, }; size_t total_length = 0; /* set request body */ const char *body_src = "hello"; struct aws_byte_cursor body_cursor = aws_byte_cursor_from_c_str(body_src); struct aws_input_stream *request_body = aws_input_stream_new_from_cursor(allocator, &body_cursor); aws_http_message_set_body_stream(request, request_body); int64_t body_length = 0; ASSERT_SUCCESS(aws_input_stream_get_length(request_body, &body_length)); total_length += (size_t)body_length; aws_input_stream_release(request_body); struct aws_http_stream *stream = aws_http_connection_make_request(s_tester.connection, &request_options); ASSERT_NOT_NULL(stream); aws_http_stream_activate(stream); testing_channel_drain_queued_tasks(&s_tester.testing_channel); uint32_t stream_id = aws_http_stream_get_id(stream); struct aws_byte_buf payload; aws_byte_buf_init(&payload, allocator, 1024); struct h2_client_manual_data_write_ctx test_ctx = { .allocator = allocator, .data = payload, }; /* Simulate writes coming in over time */ for (int idx = 0; idx < 1000; ++idx) { struct aws_input_stream *data_stream = s_h2_client_manual_data_write_generate_data(&test_ctx); int64_t stream_length = 0; ASSERT_SUCCESS(aws_input_stream_get_length(data_stream, &stream_length)); total_length += (size_t)stream_length; struct aws_http2_stream_write_data_options write = { .data = data_stream, .on_complete = NULL, .user_data = NULL, }; ASSERT_SUCCESS(aws_http2_stream_write_data(stream, &write)); /* fake peer sends WINDOW_UPDATE */ struct aws_h2_frame *peer_frame = aws_h2_frame_new_window_update(allocator, stream_id, (uint32_t)stream_length); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); /* Connection level window update */ peer_frame = aws_h2_frame_new_window_update(allocator, 0, (uint32_t)stream_length); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); if (idx % 10 == 0) { testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); } aws_input_stream_release(data_stream); } struct aws_http2_stream_write_data_options last_write = {.end_stream = true}; ASSERT_SUCCESS(aws_http2_stream_write_data(stream, &last_write)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); size_t frame_count2 = h2_decode_tester_frame_count(&s_tester.peer.decode); /* Peer should received header frame without end_stream and mutiple data frames and combined payload length should * be the same as total length sent. */ struct h2_decoded_frame *header_frame = h2_decode_tester_get_frame(&s_tester.peer.decode, frame_count); ASSERT_UINT_EQUALS(AWS_H2_FRAME_T_HEADERS, header_frame->type); ASSERT_FALSE(header_frame->end_stream); size_t received_length = 0; for (size_t i = frame_count + 1; i < frame_count2; i++) { struct h2_decoded_frame *data_frame = h2_decode_tester_get_frame(&s_tester.peer.decode, i); ASSERT_UINT_EQUALS(AWS_H2_FRAME_T_DATA, data_frame->type); received_length += data_frame->data_payload_len; if (i == frame_count2 - 1) { ASSERT_TRUE(data_frame->end_stream); } else { ASSERT_FALSE(data_frame->end_stream); } } ASSERT_UINT_EQUALS(received_length, total_length); aws_http_message_release(request); aws_http_stream_release(stream); /* close the connection */ aws_http_connection_close(s_tester.connection); aws_byte_buf_clean_up(&test_ctx.data); /* clean up */ return s_tester_clean_up(); } TEST_CASE(h2_client_manual_data_write_no_data) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* get connection preface and acks out of the way */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); size_t frame_count = h2_decode_tester_frame_count(&s_tester.peer.decode); struct aws_http_message *request = aws_http2_message_new_request(allocator); ASSERT_NOT_NULL(request); struct aws_http_header request_headers_src[] = { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/"), }; aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); struct aws_http_make_request_options request_options = { .self_size = sizeof(request_options), .request = request, .http2_use_manual_data_writes = true, }; struct aws_http_stream *stream = aws_http_connection_make_request(s_tester.connection, &request_options); ASSERT_NOT_NULL(stream); aws_http_stream_activate(stream); struct aws_http2_stream_write_data_options last_write = {.end_stream = true}; ASSERT_SUCCESS(aws_http2_stream_write_data(stream, &last_write)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); size_t frame_count_2 = h2_decode_tester_frame_count(&s_tester.peer.decode); /* Peer should received header frame without end_stream and empty data frame with end_stream */ ASSERT_UINT_EQUALS(frame_count + 2, frame_count_2); struct h2_decoded_frame *header_frame = h2_decode_tester_get_frame(&s_tester.peer.decode, frame_count); ASSERT_UINT_EQUALS(AWS_H2_FRAME_T_HEADERS, header_frame->type); ASSERT_FALSE(header_frame->end_stream); struct h2_decoded_frame *empty_data_frame = h2_decode_tester_get_frame(&s_tester.peer.decode, frame_count + 1); ASSERT_UINT_EQUALS(AWS_H2_FRAME_T_DATA, empty_data_frame->type); ASSERT_UINT_EQUALS(0, empty_data_frame->data_payload_len); ASSERT_TRUE(empty_data_frame->end_stream); aws_http_message_release(request); aws_http_stream_release(stream); /* close the connection */ aws_http_connection_close(s_tester.connection); /* clean up */ return s_tester_clean_up(); } static void s_on_manual_data_stream_complete(struct aws_http_stream *stream, int error_code, void *user_data) { (void)stream; struct h2_client_manual_data_write_ctx *test_ctx = (struct h2_client_manual_data_write_ctx *)user_data; test_ctx->complete_error_code = error_code; } /* Close the connection before finishes writing data */ TEST_CASE(h2_client_manual_data_write_connection_close) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); /* get connection preface and acks out of the way */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); struct aws_http_message *request = aws_http2_message_new_request(allocator); ASSERT_NOT_NULL(request); struct aws_http_header request_headers_src[] = { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/"), }; aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); struct aws_byte_buf payload; aws_byte_buf_init(&payload, allocator, 1024); struct h2_client_manual_data_write_ctx test_ctx = { .allocator = allocator, .data = payload, }; struct aws_http_make_request_options request_options = { .self_size = sizeof(request_options), .request = request, .http2_use_manual_data_writes = true, .on_complete = s_on_manual_data_stream_complete, .user_data = &test_ctx, }; struct aws_http_stream *stream = aws_http_connection_make_request(s_tester.connection, &request_options); ASSERT_NOT_NULL(stream); struct aws_input_stream *data_stream = s_h2_client_manual_data_write_generate_data(&test_ctx); struct aws_http2_stream_write_data_options write = { .data = data_stream, .on_complete = NULL, .user_data = NULL, }; /* Cannot write before activate the stream */ ASSERT_FAILS(aws_http2_stream_write_data(stream, &write)); aws_http_stream_activate(stream); ASSERT_SUCCESS(aws_http2_stream_write_data(stream, &write)); /* close connection */ aws_http_connection_close(s_tester.connection); ASSERT_SUCCESS(aws_http2_stream_write_data(stream, &write)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* Cannot write after stream closed */ ASSERT_FAILS(aws_http2_stream_write_data(stream, &write)); ASSERT_INT_EQUALS(AWS_ERROR_HTTP_CONNECTION_CLOSED, test_ctx.complete_error_code); aws_http_message_release(request); aws_http_stream_release(stream); /* clean up */ aws_byte_buf_clean_up(&test_ctx.data); aws_input_stream_release(data_stream); return s_tester_clean_up(); } aws-crt-python-0.20.4+dfsg/crt/aws-c-http/tests/test_h2_decoder.c000066400000000000000000003662431456575232400245700ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "h2_test_helper.h" #include struct fixture { struct aws_allocator *allocator; struct h2_decode_tester decode; /* If true, run decoder over input one byte at a time */ bool one_byte_at_a_time; bool is_server; bool skip_connection_preface; }; static int s_fixture_init(struct fixture *fixture, struct aws_allocator *allocator) { fixture->allocator = allocator; struct h2_decode_tester_options options = { .alloc = allocator, .is_server = fixture->is_server, .skip_connection_preface = fixture->skip_connection_preface, }; ASSERT_SUCCESS(h2_decode_tester_init(&fixture->decode, &options)); return AWS_OP_SUCCESS; } static void s_fixture_clean_up(struct fixture *fixture) { h2_decode_tester_clean_up(&fixture->decode); } static int s_fixture_test_setup(struct aws_allocator *allocator, void *ctx) { aws_http_library_init(allocator); struct fixture *fixture = ctx; ASSERT_SUCCESS(s_fixture_init(fixture, allocator)); return AWS_OP_SUCCESS; } static int s_fixture_test_teardown(struct aws_allocator *allocator, int setup_result, void *ctx) { (void)allocator; if (setup_result) { return AWS_OP_ERR; } struct fixture *fixture = ctx; s_fixture_clean_up(fixture); aws_http_library_clean_up(); return AWS_OP_SUCCESS; } /* declare 1 test using the fixture */ #define TEST_CASE(NAME) \ static struct fixture s_fixture_##NAME; \ AWS_TEST_CASE_FIXTURE(NAME, s_fixture_test_setup, s_test_##NAME, s_fixture_test_teardown, &s_fixture_##NAME); \ static int s_test_##NAME(struct aws_allocator *allocator, void *ctx) /* declare 2 tests, where: * 1) NAME runs the decoder over input all at once * 2) NAME_one_byte_at_a_time runs the decoder on one byte of input at a time. */ #define H2_DECODER_TEST_CASE_IMPL(NAME, IS_SERVER, SKIP_PREFACE) \ static struct fixture s_fixture_##NAME = { \ .is_server = (IS_SERVER), \ .skip_connection_preface = (SKIP_PREFACE), \ }; \ AWS_TEST_CASE_FIXTURE(NAME, s_fixture_test_setup, s_test_##NAME, s_fixture_test_teardown, &s_fixture_##NAME); \ static struct fixture s_fixture_##NAME##_one_byte_at_a_time = { \ .one_byte_at_a_time = true, \ .is_server = (IS_SERVER), \ .skip_connection_preface = (SKIP_PREFACE), \ }; \ AWS_TEST_CASE_FIXTURE( \ NAME##_one_byte_at_a_time, \ s_fixture_test_setup, \ s_test_##NAME, \ s_fixture_test_teardown, \ &s_fixture_##NAME##_one_byte_at_a_time) \ static int s_test_##NAME(struct aws_allocator *allocator, void *ctx) #define H2_DECODER_ON_CLIENT_TEST(NAME) H2_DECODER_TEST_CASE_IMPL(NAME, false /*server*/, true /*skip_preface*/) #define H2_DECODER_ON_SERVER_TEST(NAME) H2_DECODER_TEST_CASE_IMPL(NAME, true /*server*/, true /*skip_preface*/) #define H2_DECODER_ON_CLIENT_PREFACE_TEST(NAME) H2_DECODER_TEST_CASE_IMPL(NAME, false, false) #define H2_DECODER_ON_SERVER_PREFACE_TEST(NAME) H2_DECODER_TEST_CASE_IMPL(NAME, true, false) /* Make sure fixture works */ TEST_CASE(h2_decoder_sanity_check) { (void)allocator; struct fixture *fixture = ctx; ASSERT_NOT_NULL(fixture); return AWS_OP_SUCCESS; } /* Run aws_h2_decode() on input in special ways determined by the fixture */ static struct aws_h2err s_decode_all(struct fixture *fixture, struct aws_byte_cursor input) { if (fixture->one_byte_at_a_time) { /* Decode input one byte at a time */ while (input.len) { struct aws_byte_cursor one_byte = aws_byte_cursor_advance(&input, 1); struct aws_h2err err = aws_h2_decode(fixture->decode.decoder, &one_byte); if (aws_h2err_failed(err)) { return err; } AWS_FATAL_ASSERT(0 == one_byte.len); } } else { /* Decode buffer all at once */ struct aws_h2err err = aws_h2_decode(fixture->decode.decoder, &input); if (aws_h2err_failed(err)) { return err; } AWS_FATAL_ASSERT(0 == input.len); } return AWS_H2ERR_SUCCESS; } /* Test DATA frame */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_data) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 0x05, /* Length (24) */ AWS_H2_FRAME_T_DATA, /* Type (8) */ AWS_H2_FRAME_F_END_STREAM, /* Flags (8) */ 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Stream Identifier (31) */ /* DATA */ 'h', 'e', 'l', 'l', 'o', /* Data (*) */ }; /* clang-format on */ ASSERT_H2ERR_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate. */ struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_DATA, 0x76543210 /*stream_id*/)); ASSERT_UINT_EQUALS(5, frame->data_payload_len); ASSERT_TRUE(frame->end_stream); ASSERT_TRUE(aws_byte_buf_eq_c_str(&frame->data, "hello")); return AWS_OP_SUCCESS; } /* Test padded DATA frame */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_data_padded) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 0x08, /* Length (24) */ AWS_H2_FRAME_T_DATA, /* Type (8) */ AWS_H2_FRAME_F_PADDED, /* Flags (8) */ 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Stream Identifier (31) */ /* DATA */ 0x02, /* Pad Length (8) - F_PADDED */ 'h', 'e', 'l', 'l', 'o', /* Data (*) */ 0x00, 0x00, /* Padding (*) - F_PADDED */ }; /* clang-format on */ ASSERT_H2ERR_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate. */ struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_DATA, 0x76543210 /*stream_id*/)); ASSERT_UINT_EQUALS(8, frame->data_payload_len); ASSERT_FALSE(frame->end_stream); ASSERT_TRUE(aws_byte_buf_eq_c_str(&frame->data, "hello")); return AWS_OP_SUCCESS; } /* OK for PADDED frame to have pad length of zero */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_data_pad_length_zero) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 0x06, /* Length (24) */ AWS_H2_FRAME_T_DATA, /* Type (8) */ AWS_H2_FRAME_F_PADDED | AWS_H2_FRAME_F_END_STREAM, /* Flags (8) */ 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Stream Identifier (31) */ /* DATA */ 0x00, /* Pad Length (8) - F_PADDED */ 'h', 'e', 'l', 'l', 'o', /* Data (*) */ /* Padding (*) - F_PADDED */ }; /* clang-format on */ ASSERT_H2ERR_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate. */ struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_DATA, 0x76543210 /*stream_id*/)); ASSERT_UINT_EQUALS(6, frame->data_payload_len); ASSERT_TRUE(frame->end_stream); ASSERT_TRUE(aws_byte_buf_eq_c_str(&frame->data, "hello")); return AWS_OP_SUCCESS; } /* OK for DATA frame to have no data */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_data_empty) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 0x00, /* Length (24) */ AWS_H2_FRAME_T_DATA, /* Type (8) */ AWS_H2_FRAME_F_END_STREAM, /* Flags (8) */ 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Stream Identifier (31) */ /* DATA */ /* Data (*) */ }; /* clang-format on */ ASSERT_H2ERR_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate. */ ASSERT_UINT_EQUALS(1, h2_decode_tester_frame_count(&fixture->decode)); struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_DATA, 0x76543210 /*stream_id*/)); ASSERT_TRUE(frame->data.len == 0); ASSERT_TRUE(frame->end_stream); return AWS_OP_SUCCESS; } /* OK for padded DATA frame to have no data */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_data_empty_padded) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 0x03, /* Length (24) */ AWS_H2_FRAME_T_DATA, /* Type (8) */ AWS_H2_FRAME_F_PADDED, /* Flags (8) */ 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Stream Identifier (31) */ /* DATA */ 0x02, /* Pad Length (8) - F_PADDED */ /* Data (*) */ 0x00, 0x00, /* Padding (*) - F_PADDED */ }; /* clang-format on */ ASSERT_H2ERR_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate. */ struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_DATA, 0x76543210 /*stream_id*/)); ASSERT_UINT_EQUALS(3, frame->data_payload_len); ASSERT_FALSE(frame->end_stream); ASSERT_TRUE(aws_byte_buf_eq_c_str(&frame->data, "")); return AWS_OP_SUCCESS; } /* Unexpected flags should be ignored. * DATA frames only support END_STREAM and PADDED*/ H2_DECODER_ON_CLIENT_TEST(h2_decoder_data_ignores_unknown_flags) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 0x08, /* Length (24) */ AWS_H2_FRAME_T_DATA, /* Type (8) */ 0xFF, /* Flags (8) */ 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Stream Identifier (31) */ /* DATA */ 0x02, /* Pad Length (8) - F_PADDED */ 'h', 'e', 'l', 'l', 'o', /* Data (*) */ 0x00, 0x00, /* Padding (*) - F_PADDED */ }; /* clang-format on */ ASSERT_H2ERR_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate. */ struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_DATA, 0x76543210 /*stream_id*/)); ASSERT_UINT_EQUALS(8, frame->data_payload_len); ASSERT_TRUE(frame->end_stream); ASSERT_TRUE(aws_byte_buf_eq_c_str(&frame->data, "hello")); return AWS_OP_SUCCESS; } H2_DECODER_ON_CLIENT_TEST(h2_decoder_data_payload_max_size_update) { (void)allocator; struct fixture *fixture = ctx; /* The initial max size is set as 16384. Let's create a data frame with 16500 bytes data, and update the setting to * make it valid */ aws_h2_decoder_set_setting_max_frame_size(fixture->decode.decoder, 16500); /* clang-format off */ uint8_t input[16509] = { 0x00, 0x40, 0x74, /* Length (24) */ AWS_H2_FRAME_T_DATA, /* Type (8) */ AWS_H2_FRAME_F_END_STREAM, /* Flags (8) */ 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Stream Identifier (31) */ /* DATA */ }; /* clang-format on */ /* set the data and expected to 16500 'a' */ char expected[16500]; for (int i = 9; i < 16509; i++) { input[i] = 'a'; expected[i - 9] = 'a'; } struct aws_byte_cursor expected_cursor = aws_byte_cursor_from_array(expected, sizeof(expected)); ASSERT_H2ERR_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate. */ struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_DATA, 0x76543210 /*stream_id*/)); ASSERT_UINT_EQUALS(16500, frame->data_payload_len); ASSERT_TRUE(frame->end_stream); ASSERT_TRUE(aws_byte_cursor_eq_byte_buf(&expected_cursor, &frame->data)); return AWS_OP_SUCCESS; } /* The size of a frame payload is limited by the maximum size. An endpoint MUST send an error code of FRAME_SIZE_ERROR * if a frame exceeds the size */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_err_data_payload_exceed_max_size) { (void)allocator; struct fixture *fixture = ctx; /* The initial max size is set as 16384. Let's create a data frame with 16500 bytes data, which will be invalid in * this case */ /* clang-format off */ uint8_t input[16509] = { 0x00, 0x40, 0x74, /* Length (24) */ AWS_H2_FRAME_T_DATA, /* Type (8) */ AWS_H2_FRAME_F_END_STREAM, /* Flags (8) */ 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Stream Identifier (31) */ /* DATA */ }; /* clang-format on */ ASSERT_H2ERR_ERROR( AWS_HTTP2_ERR_FRAME_SIZE_ERROR, s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); return AWS_OP_SUCCESS; } /* DATA frames MUST specify a stream-id */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_err_data_requires_stream_id) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 0x05, /* Length (24) */ AWS_H2_FRAME_T_DATA, /* Type (8) */ AWS_H2_FRAME_F_END_STREAM, /* Flags (8) */ 0x00, 0x00, 0x00, 0x00, /* Reserved (1) | Stream Identifier (31) */ /* DATA */ 'h', 'e', 'l', 'l', 'o', /* Data (*) */ }; /* clang-format on */ ASSERT_H2ERR_ERROR( AWS_HTTP2_ERR_PROTOCOL_ERROR, s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); return AWS_OP_SUCCESS; } /* Error if frame is padded, but not big enough to contain the padding length */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_err_payload_too_small_for_pad_length) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 0x00, /* Length (24) */ AWS_H2_FRAME_T_DATA, /* Type (8) */ AWS_H2_FRAME_F_PADDED, /* Flags (8) */ 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Stream Identifier (31) */ /* DATA */ /* Pad Length (8) - F_PADDED */ /* Data (*) */ /* Padding (*) - F_PADDED */ }; /* clang-format on */ ASSERT_H2ERR_ERROR( AWS_HTTP2_ERR_FRAME_SIZE_ERROR, s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); return AWS_OP_SUCCESS; } /* The most-significant-bit of the encoded stream ID is reserved, and should be ignored when decoding */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_stream_id_ignores_reserved_bit) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 0x05, /* Length (24) */ AWS_H2_FRAME_T_DATA, /* Type (8) */ AWS_H2_FRAME_F_END_STREAM, /* Flags (8) */ 0xFF, 0xFF, 0xFF, 0xFF, /* Reserved (1) | Stream Identifier (31) */ /* DATA */ 'h', 'e', 'l', 'l', 'o', /* Data (*) */ }; /* clang-format on */ ASSERT_H2ERR_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate. */ struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_DATA, 0x7FFFFFFF /*stream_id*/)); ASSERT_UINT_EQUALS(5, frame->data_payload_len); ASSERT_TRUE(frame->end_stream); ASSERT_TRUE(aws_byte_buf_eq_c_str(&frame->data, "hello")); return AWS_OP_SUCCESS; } static int s_check_header( struct h2_decoded_frame *frame, size_t header_idx, const char *name, const char *value, enum aws_http_header_compression compression) { struct aws_http_header header_field; ASSERT_SUCCESS(aws_http_headers_get_index(frame->headers, header_idx, &header_field)); ASSERT_BIN_ARRAYS_EQUALS(name, strlen(name), header_field.name.ptr, header_field.name.len); ASSERT_BIN_ARRAYS_EQUALS(value, strlen(value), header_field.value.ptr, header_field.value.len); ASSERT_INT_EQUALS(compression, header_field.compression); return AWS_OP_SUCCESS; } /* Test a simple HEADERS frame * Note that we're not stressing the HPACK decoder here, that's done in other test files */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_headers) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 11, /* Length (24) */ AWS_H2_FRAME_T_HEADERS, /* Type (8) */ AWS_H2_FRAME_F_END_HEADERS | AWS_H2_FRAME_F_END_STREAM, /* Flags (8) */ 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Stream Identifier (31) */ /* HEADERS */ 0x48, 0x03, '3', '0', '2', /* ":status: 302" - indexed name, uncompressed value */ 0x7a, 0x04, 't', 'e', 's', 't' /* "user-agent: test" - indexed name, uncompressed value */ }; /* clang-format on */ /* Decode */ ASSERT_H2ERR_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate */ struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_HEADERS, 0x76543210 /*stream_id*/)); ASSERT_FALSE(frame->headers_malformed); ASSERT_UINT_EQUALS(2, aws_http_headers_count(frame->headers)); ASSERT_SUCCESS(s_check_header(frame, 0, ":status", "302", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_SUCCESS(s_check_header(frame, 1, "user-agent", "test", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_INT_EQUALS(AWS_HTTP_HEADER_BLOCK_MAIN, frame->header_block_type); ASSERT_TRUE(frame->end_stream); return AWS_OP_SUCCESS; } /* Test a HEADERS frame with padding */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_headers_padded) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 0x08, /* Length (24) */ AWS_H2_FRAME_T_HEADERS, /* Type (8) */ AWS_H2_FRAME_F_PADDED | AWS_H2_FRAME_F_END_HEADERS, /* Flags (8) */ 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Stream Identifier (31) */ /* HEADERS */ 0x02, /* Pad Length (8) - F_PADDED */ 0x48, 0x03, '3', '0', '2', /* ":status: 302" - indexed name, uncompressed value */ 0x00, 0x00, /* Padding (*) - F_PADDED */ }; /* clang-format on */ /* Decode */ ASSERT_H2ERR_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate */ struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_HEADERS, 0x76543210 /*stream_id*/)); ASSERT_FALSE(frame->headers_malformed); ASSERT_UINT_EQUALS(1, aws_http_headers_count(frame->headers)); ASSERT_SUCCESS(s_check_header(frame, 0, ":status", "302", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_INT_EQUALS(AWS_HTTP_HEADER_BLOCK_MAIN, frame->header_block_type); return AWS_OP_SUCCESS; } /* Test a HEADERS frame with priority information * Note that priority information is ignored for now. * We're not testing that it was reported properly, just that decoder can properly consume it */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_headers_priority) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 10, /* Length (24) */ AWS_H2_FRAME_T_HEADERS, /* Type (8) */ AWS_H2_FRAME_F_END_HEADERS | AWS_H2_FRAME_F_PRIORITY, /* Flags (8) */ 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Stream Identifier (31) */ /* HEADERS */ 0x81, 0x23, 0x45, 0x67, /* Exclusive (1) | Stream Dependency (31) - F_PRIORITY*/ 0x09, /* Weight (8) - F_PRIORITY */ 0x48, 0x03, '3', '0', '2' /* ":status: 302" - indexed name, uncompressed value */ }; /* clang-format on */ /* Decode */ ASSERT_H2ERR_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate */ struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_HEADERS, 0x76543210 /*stream_id*/)); ASSERT_FALSE(frame->headers_malformed); ASSERT_UINT_EQUALS(1, aws_http_headers_count(frame->headers)); ASSERT_SUCCESS(s_check_header(frame, 0, ":status", "302", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_INT_EQUALS(AWS_HTTP_HEADER_BLOCK_MAIN, frame->header_block_type); return AWS_OP_SUCCESS; } /* Test a HEADERS frame with ALL flags set. * Unexpected flags should be ignored, but HEADERS supports: priority and padding and end-headers and end-stream */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_headers_ignores_unknown_flags) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 13, /* Length (24) */ AWS_H2_FRAME_T_HEADERS, /* Type (8) */ 0xFF, /* Flags (8) */ 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Stream Identifier (31) */ /* HEADERS */ 0x02, /* Pad Length (8) - F_PADDED */ 0x81, 0x23, 0x45, 0x67, /* Exclusive (1) | Stream Dependency (31) - F_PRIORITY*/ 0x09, /* Weight (8) - F_PRIORITY */ 0x48, 0x03, '3', '0', '2', /* ":status: 302" - indexed name, uncompressed value */ 0x00, 0x00 /* Padding (*) - F_PADDED */ }; /* clang-format on */ /* Decode */ ASSERT_H2ERR_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate */ struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_HEADERS, 0x76543210 /*stream_id*/)); ASSERT_FALSE(frame->headers_malformed); ASSERT_UINT_EQUALS(1, aws_http_headers_count(frame->headers)); ASSERT_SUCCESS(s_check_header(frame, 0, ":status", "302", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_INT_EQUALS(AWS_HTTP_HEADER_BLOCK_MAIN, frame->header_block_type); ASSERT_TRUE(frame->end_stream); return AWS_OP_SUCCESS; } H2_DECODER_ON_CLIENT_TEST(h2_decoder_headers_response_informational) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 0x05, /* Length (24) */ AWS_H2_FRAME_T_HEADERS, /* Type (8) */ AWS_H2_FRAME_F_END_HEADERS, /* Flags (8) */ 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Stream Identifier (31) */ /* HEADERS */ 0x48, 0x03, '1', '0', '0', /* ":status: 100" - indexed name, uncompressed value */ }; /* clang-format on */ /* Decode */ ASSERT_H2ERR_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate */ struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_HEADERS, 0x76543210 /*stream_id*/)); ASSERT_FALSE(frame->headers_malformed); ASSERT_UINT_EQUALS(1, aws_http_headers_count(frame->headers)); ASSERT_SUCCESS(s_check_header(frame, 0, ":status", "100", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_INT_EQUALS(AWS_HTTP_HEADER_BLOCK_INFORMATIONAL, frame->header_block_type); ASSERT_FALSE(frame->end_stream); return AWS_OP_SUCCESS; } /* Test decoding a request frame (Note: must use decoder on server) */ H2_DECODER_ON_SERVER_TEST(h2_decoder_headers_request) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 21, /* Length (24) */ AWS_H2_FRAME_T_HEADERS, /* Type (8) */ AWS_H2_FRAME_F_END_HEADERS | AWS_H2_FRAME_F_END_STREAM, /* Flags (8) */ 0x00, 0x00, 0x00, 0x01, /* Reserved (1) | Stream Identifier (31) */ /* HEADERS */ 0x82, /* ":method: GET" - indexed */ 0x86, /* ":scheme: http" - indexed */ 0x41, 10, 'a', 'm', 'a', 'z', 'o', 'n', '.', 'c', 'o', 'm', /* ":authority: amazon.com" - indexed name */ 0x84, /* ":path: /" - indexed */ 0x7a, 0x04, 't', 'e', 's', 't' /* "user-agent: test" - indexed name, uncompressed value */ }; /* clang-format on */ /* Decode */ ASSERT_H2ERR_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate */ struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_HEADERS, 1 /*stream_id*/)); ASSERT_FALSE(frame->headers_malformed); ASSERT_UINT_EQUALS(5, aws_http_headers_count(frame->headers)); ASSERT_SUCCESS(s_check_header(frame, 0, ":method", "GET", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_SUCCESS(s_check_header(frame, 1, ":scheme", "http", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_SUCCESS(s_check_header(frame, 2, ":authority", "amazon.com", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_SUCCESS(s_check_header(frame, 3, ":path", "/", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_SUCCESS(s_check_header(frame, 4, "user-agent", "test", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_INT_EQUALS(AWS_HTTP_HEADER_BLOCK_MAIN, frame->header_block_type); ASSERT_TRUE(frame->end_stream); return AWS_OP_SUCCESS; } H2_DECODER_ON_SERVER_TEST(h2_decoder_headers_cookies) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { /* HEADERS FRAME*/ 0x00, 0x00, 0x06, /* Length (24) */ AWS_H2_FRAME_T_HEADERS, /* Type (8) */ AWS_H2_FRAME_F_END_STREAM, /* Flags (8) */ 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Stream Identifier (31) */ /* HEADERS */ 0x82, /* ":method: GET" - indexed */ 0x60, 0x03, 'a', '=', 'b', /* "cache: a=b" - indexed name, uncompressed value */ /* CONTINUATION FRAME*/ 0x00, 0x00, 16, /* Length (24) */ AWS_H2_FRAME_T_CONTINUATION,/* Type (8) */ AWS_H2_FRAME_F_END_HEADERS, /* Flags (8) */ 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Stream Identifier (31) */ /* PAYLOAD */ 0x7a, 0x04, 't', 'e', 's', 't', /* "user-agent: test" - indexed name, uncompressed value */ 0x60, 0x03, 'c', '=', 'd', /* "cache: c=d" - indexed name, uncompressed value */ 0x60, 0x03, 'e', '=', 'f', /* "cache: e=f" - indexed name, uncompressed value */ }; /* clang-format on */ /* Decode */ ASSERT_H2ERR_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate */ struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_HEADERS, 0x76543210 /*stream_id*/)); ASSERT_FALSE(frame->headers_malformed); /* two sepaprate cookie headers are concatenated and moved as the last header*/ ASSERT_UINT_EQUALS(3, aws_http_headers_count(frame->headers)); ASSERT_SUCCESS(s_check_header(frame, 0, ":method", "GET", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_SUCCESS(s_check_header(frame, 1, "user-agent", "test", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_SUCCESS(s_check_header(frame, 2, "cookie", "a=b; c=d; e=f", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_INT_EQUALS(AWS_HTTP_HEADER_BLOCK_MAIN, frame->header_block_type); ASSERT_TRUE(frame->end_stream); return AWS_OP_SUCCESS; } /* A trailing header has no pseudo-headers, and always ends the stream */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_headers_trailer) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 0x06, /* Length (24) */ AWS_H2_FRAME_T_HEADERS, /* Type (8) */ AWS_H2_FRAME_F_END_HEADERS | AWS_H2_FRAME_F_END_STREAM, /* Flags (8) */ 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Stream Identifier (31) */ /* HEADERS */ 0x7a, 0x04, 't', 'e', 's', 't' /* "user-agent: test" - indexed name, uncompressed value */ }; /* clang-format on */ /* Decode */ ASSERT_H2ERR_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate */ struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_HEADERS, 0x76543210 /*stream_id*/)); ASSERT_FALSE(frame->headers_malformed); ASSERT_UINT_EQUALS(1, aws_http_headers_count(frame->headers)); ASSERT_SUCCESS(s_check_header(frame, 0, "user-agent", "test", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_INT_EQUALS(AWS_HTTP_HEADER_BLOCK_TRAILING, frame->header_block_type); ASSERT_TRUE(frame->end_stream); return AWS_OP_SUCCESS; } /* A trailing header can be empty */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_headers_empty_trailer) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 0x00, /* Length (24) */ AWS_H2_FRAME_T_HEADERS, /* Type (8) */ AWS_H2_FRAME_F_END_HEADERS | AWS_H2_FRAME_F_END_STREAM, /* Flags (8) */ 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Stream Identifier (31) */ /* HEADERS - none */ }; /* clang-format on */ /* Decode */ ASSERT_H2ERR_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate */ struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_HEADERS, 0x76543210 /*stream_id*/)); ASSERT_FALSE(frame->headers_malformed); ASSERT_UINT_EQUALS(0, aws_http_headers_count(frame->headers)); ASSERT_INT_EQUALS(AWS_HTTP_HEADER_BLOCK_TRAILING, frame->header_block_type); ASSERT_TRUE(frame->end_stream); return AWS_OP_SUCCESS; } /* HEADERS must specify a valid stream-id */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_err_headers_requires_stream_id) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 0x05, /* Length (24) */ AWS_H2_FRAME_T_HEADERS, /* Type (8) */ AWS_H2_FRAME_F_END_HEADERS | AWS_H2_FRAME_F_END_STREAM, /* Flags (8) */ 0x00, 0x00, 0x00, 0x00, /* Reserved (1) | Stream Identifier (31) */ /* HEADERS */ 0x48, 0x03, '3', '0', '2' /* ":status: 302" - indexed name, uncompressed value */ }; /* clang-format on */ /* Decode */ ASSERT_H2ERR_ERROR( AWS_HTTP2_ERR_PROTOCOL_ERROR, s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); return AWS_OP_SUCCESS; } H2_DECODER_ON_CLIENT_TEST(h2_decoder_err_headers_payload_too_small_for_padding) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 0x00, /* Length (24) */ AWS_H2_FRAME_T_HEADERS, /* Type (8) */ AWS_H2_FRAME_F_END_HEADERS | AWS_H2_FRAME_F_PRIORITY | AWS_H2_FRAME_F_PADDED | AWS_H2_FRAME_F_END_STREAM, /* Flags (8) */ 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Stream Identifier (31) */ /* HEADERS */ 0x02, /* Pad Length (8) - F_PADDED */ 0x81, 0x23, 0x45, 0x67, /* Exclusive (1) | Stream Dependency (31) - F_PRIORITY*/ 0x09, /* Weight (8) - F_PRIORITY */ 0x48, 0x03, '3', '0', '2', /* ":status: 302" - indexed name, uncompressed value */ 0x00, 0x00 /* Padding (*) - F_PADDED */ }; /* clang-format on */ /* Decode */ ASSERT_H2ERR_ERROR( AWS_HTTP2_ERR_FRAME_SIZE_ERROR, s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); return AWS_OP_SUCCESS; } H2_DECODER_ON_CLIENT_TEST(h2_decoder_err_headers_payload_too_small_for_priority) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 0x05, /* Length (24) */ AWS_H2_FRAME_T_HEADERS, /* Type (8) */ AWS_H2_FRAME_F_END_HEADERS | AWS_H2_FRAME_F_PRIORITY | AWS_H2_FRAME_F_PADDED | AWS_H2_FRAME_F_END_STREAM, /* Flags (8) */ 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Stream Identifier (31) */ /* HEADERS */ 0x02, /* Pad Length (8) - F_PADDED */ 0x81, 0x23, 0x45, 0x67, /* Exclusive (1) | Stream Dependency (31) - F_PRIORITY*/ 0x09, /* Weight (8) - F_PRIORITY */ 0x48, 0x03, '3', '0', '2', /* ":status: 302" - indexed name, uncompressed value */ 0x00, 0x00 /* Padding (*) - F_PADDED */ }; /* clang-format on */ /* Decode */ ASSERT_H2ERR_ERROR( AWS_HTTP2_ERR_FRAME_SIZE_ERROR, s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); return AWS_OP_SUCCESS; } /* Message is malformed if a header-name is blank. * A malformed message is a Stream Error, not a Connection Error, so the decoder should continue */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_malformed_headers_blank_name) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 0x09, /* Length (24) */ AWS_H2_FRAME_T_HEADERS, /* Type (8) */ AWS_H2_FRAME_F_END_HEADERS | AWS_H2_FRAME_F_END_STREAM, /* Flags (8) */ 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Stream Identifier (31) */ /* HEADERS */ 0x48, 0x03, '3', '0', '2', /* ":status: 302" - indexed name, uncompressed value */ 0x40, 0x00, 0x01, 'a', /* ": a" - literal blank name, uncompressed value */ }; /* clang-format on */ /* Decode */ ASSERT_H2ERR_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate */ struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_HEADERS, 0x76543210 /*stream_id*/)); ASSERT_TRUE(frame->headers_malformed); ASSERT_TRUE(frame->end_stream); return AWS_OP_SUCCESS; } /* Message is malformed if a header-name has illegal characters. * A malformed message is a Stream Error, not a Connection Error, so the decoder should continue */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_malformed_headers_illegal_name) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 10, /* Length (24) */ AWS_H2_FRAME_T_HEADERS, /* Type (8) */ AWS_H2_FRAME_F_END_HEADERS | AWS_H2_FRAME_F_END_STREAM, /* Flags (8) */ 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Stream Identifier (31) */ /* HEADERS */ 0x48, 0x03, '3', '0', '2', /* ":status: 302" - indexed name, uncompressed value */ 0x40, 0x01, ',', 0x01, 'a', /* ",: a" - literal name with illegal character */ }; /* clang-format on */ /* Decode */ ASSERT_H2ERR_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate */ struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_HEADERS, 0x76543210 /*stream_id*/)); ASSERT_TRUE(frame->headers_malformed); ASSERT_TRUE(frame->end_stream); return AWS_OP_SUCCESS; } /* Message is malformed if server receives a response. * A malformed message is a Stream Error, not a Connection Error, so the decoder should continue */ H2_DECODER_ON_SERVER_TEST(h2_decoder_malformed_headers_response_to_server) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 0x05, /* Length (24) */ AWS_H2_FRAME_T_HEADERS, /* Type (8) */ AWS_H2_FRAME_F_END_HEADERS | AWS_H2_FRAME_F_END_STREAM, /* Flags (8) */ 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Stream Identifier (31) */ /* HEADERS */ 0x48, 0x03, '3', '0', '2', /* ":status: 302" - indexed name, uncompressed value */ }; /* clang-format on */ /* Decode */ ASSERT_H2ERR_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate */ struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_HEADERS, 0x76543210 /*stream_id*/)); ASSERT_TRUE(frame->headers_malformed); ASSERT_TRUE(frame->end_stream); return AWS_OP_SUCCESS; } /* Message is malformed if client cannot receive requests in HEADERS * (though they can get requests in PUSH_PROMISE frames). * A malformed message is a Stream Error, not a Connection Error, so the decoder should continue */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_malformed_headers_request_to_client) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 0x03, /* Length (24) */ AWS_H2_FRAME_T_HEADERS, /* Type (8) */ AWS_H2_FRAME_F_END_HEADERS | AWS_H2_FRAME_F_END_STREAM, /* Flags (8) */ 0x00, 0x00, 0x00, 0x01, /* Reserved (1) | Stream Identifier (31) */ /* HEADERS */ 0x82, /* ":method: GET" - indexed */ 0x86, /* ":scheme: http" - indexed */ 0x84, /* ":path: /" - indexed */ }; /* clang-format on */ /* Decode */ ASSERT_H2ERR_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate */ struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_HEADERS, 1 /*stream_id*/)); ASSERT_TRUE(frame->headers_malformed); ASSERT_TRUE(frame->end_stream); return AWS_OP_SUCCESS; } /* Message is malformed if it contains both request and response pseudo-headers. * A malformed message is a Stream Error, not a Connection Error, so the decoder should continue */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_malformed_headers_mixed_pseudoheaders) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 0x06, /* Length (24) */ AWS_H2_FRAME_T_HEADERS, /* Type (8) */ AWS_H2_FRAME_F_END_HEADERS | AWS_H2_FRAME_F_END_STREAM, /* Flags (8) */ 0x00, 0x00, 0x00, 0x01, /* Reserved (1) | Stream Identifier (31) */ /* HEADERS */ 0x82, /* ":method: GET" - REQUEST PSEUDO-HEADER */ 0x48, 0x03, '3', '0', '2', /* ":status: 302" - RESPONSE PSEUDO-HEADER */ }; /* clang-format on */ /* Decode */ ASSERT_H2ERR_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate */ struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_HEADERS, 1 /*stream_id*/)); ASSERT_TRUE(frame->headers_malformed); ASSERT_TRUE(frame->end_stream); return AWS_OP_SUCCESS; } /* Message is malformed if pseudo-headers come after regular headers. * A malformed message is a Stream Error, not a Connection Error, so the decoder should continue */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_malformed_headers_late_pseudoheaders) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 11, /* Length (24) */ AWS_H2_FRAME_T_HEADERS, /* Type (8) */ AWS_H2_FRAME_F_END_HEADERS | AWS_H2_FRAME_F_END_STREAM, /* Flags (8) */ 0x00, 0x00, 0x00, 0x01, /* Reserved (1) | Stream Identifier (31) */ /* HEADERS */ 0x7a, 0x04, 't', 'e', 's', 't', /* "user-agent: test" - REGULAR HEADER */ 0x48, 0x03, '3', '0', '2', /* ":status: 302" - PSEUDO-HEADER after regular header*/ }; /* clang-format on */ /* Decode */ ASSERT_H2ERR_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate */ struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_HEADERS, 1 /*stream_id*/)); ASSERT_TRUE(frame->headers_malformed); ASSERT_TRUE(frame->end_stream); return AWS_OP_SUCCESS; } /* Message is malformed if trailing header does not end stream. */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_malformed_headers_trailer_must_end_stream) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 0x00, /* Length (24) */ AWS_H2_FRAME_T_HEADERS, /* Type (8) */ AWS_H2_FRAME_F_END_HEADERS, /* Flags (8) */ 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Stream Identifier (31) */ /* HEADERS - blank*/ }; /* clang-format on */ /* Decode */ ASSERT_H2ERR_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate */ struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_HEADERS, 0x76543210 /*stream_id*/)); ASSERT_TRUE(frame->headers_malformed); ASSERT_FALSE(frame->end_stream); return AWS_OP_SUCCESS; } /* Even if a header-block is malformed, we still process its fields, which may mutate the hpack tables. */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_malformed_header_continues_hpack_parsing) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { /* FRAME 1 - malformed HEADERS */ 0x00, 0x00, 15, /* Length (24) */ AWS_H2_FRAME_T_HEADERS, /* Type (8) */ AWS_H2_FRAME_F_END_HEADERS, /* Flags (8) */ 0x00, 0x00, 0x00, 0x01, /* Reserved (1) | Stream Identifier (31) */ /* HEADERS */ 0x48, 0x03, '3', '0', '2', /* ":status: 302" - stored to dynamic table */ 0x40, 0x01, ',', 0x01, 'a', /* ",: a" - INVALID character - stored to dynamic table */ 0x40, 0x01, 'b', 0x01, 'c', /* "b: c" - stored to dynamic table */ /* So at this point dynamic table should look like: * INDEX NAME VALUE * 62 b c * 63 , a * 64 :status 302 */ /* FRAME 2 - valid HEADERS referencing entry from malformed HEADERS */ 0x00, 0x00, 2, /* Length (24) */ AWS_H2_FRAME_T_HEADERS, /* Type (8) */ AWS_H2_FRAME_F_END_HEADERS, /* Flags (8) */ 0x00, 0x00, 0x00, 0x03, /* Reserved (1) | Stream Identifier (31) */ /* HEADERS */ 0xc0, /* ":status: 302" - indexed from dynamic table */ 0xbe, /* "b: c" - indexed from dynamic table */ }; /* clang-format on */ /* Decode */ ASSERT_H2ERR_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate */ ASSERT_UINT_EQUALS(2, h2_decode_tester_frame_count(&fixture->decode)); /* frame 1 should be malformed */ struct h2_decoded_frame *frame = h2_decode_tester_get_frame(&fixture->decode, 0); ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_HEADERS, 1 /*stream_id*/)); ASSERT_TRUE(frame->headers_malformed); /* frame 2 should be able to index fields stored by previous malformed frame */ frame = h2_decode_tester_get_frame(&fixture->decode, 1); ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_HEADERS, 3 /*stream_id*/)); ASSERT_FALSE(frame->headers_malformed); ASSERT_UINT_EQUALS(2, aws_http_headers_count(frame->headers)); ASSERT_SUCCESS(s_check_header(frame, 0, ":status", "302", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_SUCCESS(s_check_header(frame, 1, "b", "c", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_INT_EQUALS(AWS_HTTP_HEADER_BLOCK_MAIN, frame->header_block_type); return AWS_OP_SUCCESS; } /* Test CONTINUATION frame. * Decoder requires that a HEADERS or PUSH_PROMISE frame be sent first */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_continuation) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { /* HEADERS FRAME*/ 0x00, 0x00, 0x05, /* Length (24) */ AWS_H2_FRAME_T_HEADERS, /* Type (8) */ AWS_H2_FRAME_F_END_STREAM, /* Flags (8) */ 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Stream Identifier (31) */ /* PAYLOAD */ 0x48, 0x03, '3', '0', '2', /* ":status: 302" - indexed name, uncompressed value */ /* CONTINUATION FRAME*/ 0x00, 0x00, 0x09, /* Length (24) */ AWS_H2_FRAME_T_CONTINUATION,/* Type (8) */ AWS_H2_FRAME_F_END_HEADERS, /* Flags (8) */ 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Stream Identifier (31) */ /* PAYLOAD */ 0x58, 0x07, 'p', 'r', 'i', 'v', 'a', 't', 'e', /* "cache-control: private" */ }; /* clang-format on */ /* Decode */ ASSERT_H2ERR_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate */ struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_HEADERS, 0x76543210 /*stream_id*/)); ASSERT_FALSE(frame->headers_malformed); ASSERT_UINT_EQUALS(2, aws_http_headers_count(frame->headers)); ASSERT_SUCCESS(s_check_header(frame, 0, ":status", "302", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_SUCCESS(s_check_header(frame, 1, "cache-control", "private", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_INT_EQUALS(AWS_HTTP_HEADER_BLOCK_MAIN, frame->header_block_type); ASSERT_TRUE(frame->end_stream); return AWS_OP_SUCCESS; } /* Try setting ALL the flags on CONTINUATION frame. * Only END_HEADERS and should trigger. * Continuation doesn't support PRIORITY and PADDING like HEADERS does, so they should just be ignored */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_continuation_ignores_unknown_flags) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { /* HEADERS FRAME*/ 0x00, 0x00, 0x05, /* Length (24) */ AWS_H2_FRAME_T_HEADERS, /* Type (8) */ 0x0, /* Flags (8) */ 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Stream Identifier (31) */ /* PAYLOAD */ 0x48, 0x03, '3', '0', '2', /* ":status: 302" - indexed name, uncompressed value */ /* CONTINUATION FRAME*/ 0x00, 0x00, 0x09, /* Length (24) */ AWS_H2_FRAME_T_CONTINUATION,/* Type (8) */ 0xFF, /* Flags (8) */ 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Stream Identifier (31) */ /* PAYLOAD */ 0x58, 0x07, 'p', 'r', 'i', 'v', 'a', 't', 'e', /* "cache-control: private" */ }; /* clang-format on */ /* Decode */ ASSERT_H2ERR_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate */ struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_HEADERS, 0x76543210 /*stream_id*/)); ASSERT_FALSE(frame->headers_malformed); ASSERT_UINT_EQUALS(2, aws_http_headers_count(frame->headers)); ASSERT_SUCCESS(s_check_header(frame, 0, ":status", "302", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_SUCCESS(s_check_header(frame, 1, "cache-control", "private", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_INT_EQUALS(AWS_HTTP_HEADER_BLOCK_MAIN, frame->header_block_type); return AWS_OP_SUCCESS; } /* Test that we an handle a header-field whose encoding is spread across multiple frames. * Throw some padding in to make it extra complicated */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_continuation_header_field_spans_frames) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { /* HEADERS FRAME*/ 0x00, 0x00, 0x06, /* Length (24) */ AWS_H2_FRAME_T_HEADERS, /* Type (8) */ AWS_H2_FRAME_F_PADDED, /* Flags (8) */ 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Stream Identifier (31) */ /* PAYLOAD */ 0x02, /* Pad Length (8) - F_PADDED */ 0x48, 0x03, '3', /* ":status: 302" - beginning 3/5 bytes encoded in this frame. */ 0x00, 0x00, /* Padding (*) - F_PADDED */ /* CONTINUATION FRAME*/ 0x00, 0x00, 0x02, /* Length (24) */ AWS_H2_FRAME_T_CONTINUATION,/* Type (8) */ AWS_H2_FRAME_F_END_HEADERS, /* Flags (8) */ 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Stream Identifier (31) */ /* PAYLOAD */ '0', '2', /* :status: 302" - last 2/5 bytes encoded in this frame*/ }; /* clang-format on */ /* Decode */ ASSERT_H2ERR_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate */ struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_HEADERS, 0x76543210 /*stream_id*/)); ASSERT_FALSE(frame->headers_malformed); ASSERT_UINT_EQUALS(1, aws_http_headers_count(frame->headers)); ASSERT_SUCCESS(s_check_header(frame, 0, ":status", "302", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_INT_EQUALS(AWS_HTTP_HEADER_BLOCK_MAIN, frame->header_block_type); ASSERT_FALSE(frame->end_stream); return AWS_OP_SUCCESS; } /* Test having multiple CONTINUATION frames in a row */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_continuation_many_frames) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { /* HEADERS FRAME*/ 0x00, 0x00, 0x05, /* Length (24) */ AWS_H2_FRAME_T_HEADERS, /* Type (8) */ AWS_H2_FRAME_F_END_STREAM, /* Flags (8) */ 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Stream Identifier (31) */ /* PAYLOAD */ 0x48, 0x03, '3', '0', '2', /* ":status: 302" - indexed name, uncompressed value */ /* CONTINUATION FRAME*/ 0x00, 0x00, 0x09, /* Length (24) */ AWS_H2_FRAME_T_CONTINUATION,/* Type (8) */ 0x0, /* Flags (8) */ 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Stream Identifier (31) */ /* PAYLOAD */ 0x58, 0x07, 'p', 'r', 'i', 'v', 'a', 't', 'e', /* "cache-control: private" */ /* CONTINUATION FRAME*/ 0x00, 0x00, 0x08, /* Length (24) */ AWS_H2_FRAME_T_CONTINUATION,/* Type (8) */ AWS_H2_FRAME_F_END_HEADERS, /* Flags (8) */ 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Stream Identifier (31) */ /* PAYLOAD */ 0x40, 0x02, 'h', 'i', 0x03, 'm', 'o', 'm', /* "hi: mom" */ }; /* clang-format on */ /* Decode */ ASSERT_H2ERR_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate */ struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_HEADERS, 0x76543210 /*stream_id*/)); ASSERT_FALSE(frame->headers_malformed); ASSERT_UINT_EQUALS(3, aws_http_headers_count(frame->headers)); ASSERT_SUCCESS(s_check_header(frame, 0, ":status", "302", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_SUCCESS(s_check_header(frame, 1, "cache-control", "private", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_SUCCESS(s_check_header(frame, 2, "hi", "mom", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_INT_EQUALS(AWS_HTTP_HEADER_BLOCK_MAIN, frame->header_block_type); ASSERT_TRUE(frame->end_stream); return AWS_OP_SUCCESS; } /* Test having HEADERS and CONTINUATION frames with empty header-block-fragments */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_continuation_empty_payloads) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { /* HEADERS FRAME*/ 0x00, 0x00, 0x00, /* Length (24) */ AWS_H2_FRAME_T_HEADERS, /* Type (8) */ AWS_H2_FRAME_F_END_STREAM, /* Flags (8) */ 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Stream Identifier (31) */ /* PAYLOAD */ /* CONTINUATION FRAME*/ 0x00, 0x00, 0x00, /* Length (24) */ AWS_H2_FRAME_T_CONTINUATION,/* Type (8) */ 0x0, /* Flags (8) */ 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Stream Identifier (31) */ /* PAYLOAD */ /* CONTINUATION FRAME*/ 0x00, 0x00, 0x05, /* Length (24) */ AWS_H2_FRAME_T_CONTINUATION,/* Type (8) */ AWS_H2_FRAME_F_END_HEADERS, /* Flags (8) */ 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Stream Identifier (31) */ /* PAYLOAD */ 0x48, 0x03, '3', '0', '2', /* ":status: 302" - indexed name, uncompressed value */ }; /* clang-format on */ /* Decode */ ASSERT_H2ERR_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate */ struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_HEADERS, 0x76543210 /*stream_id*/)); ASSERT_FALSE(frame->headers_malformed); ASSERT_UINT_EQUALS(1, aws_http_headers_count(frame->headers)); ASSERT_SUCCESS(s_check_header(frame, 0, ":status", "302", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_INT_EQUALS(AWS_HTTP_HEADER_BLOCK_MAIN, frame->header_block_type); ASSERT_TRUE(frame->end_stream); return AWS_OP_SUCCESS; } /* Once a header-block starts, it's illegal for any frame but a CONTINUATION on that same stream to arrive. * This test sends a different frame type next */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_err_continuation_frame_expected) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { /* HEADERS FRAME*/ 0x00, 0x00, 0x05, /* Length (24) */ AWS_H2_FRAME_T_HEADERS, /* Type (8) */ 0x0, /* Flags (8) */ 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Stream Identifier (31) */ /* PAYLOAD */ 0x48, 0x03, '3', '0', '2', /* ":status: 302" - indexed name, uncompressed value */ /* HEADERS FRAME*/ 0x00, 0x00, 0x05, /* Length (24) */ AWS_H2_FRAME_T_HEADERS, /* Type (8) */ 0x0, /* Flags (8) */ 0x76, 0x54, 0x32, 0x12, /* Reserved (1) | Stream Identifier (31) */ /* PAYLOAD */ 0x48, 0x03, '3', '0', '2', /* ":status: 302" - indexed name, uncompressed value */ }; /* clang-format on */ /* Decode */ ASSERT_H2ERR_ERROR( AWS_HTTP2_ERR_PROTOCOL_ERROR, s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); return AWS_OP_SUCCESS; } /* Once a header-block starts, it's illegal for any frame but a CONTINUATION on that same stream to arrive. * This test sends a different stream-id next */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_err_continuation_frame_same_stream_expected) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { /* HEADERS FRAME*/ 0x00, 0x00, 0x05, /* Length (24) */ AWS_H2_FRAME_T_HEADERS, /* Type (8) */ AWS_H2_FRAME_F_END_STREAM, /* Flags (8) */ 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Stream Identifier (31) */ /* PAYLOAD */ 0x48, 0x03, '3', '0', '2', /* ":status: 302" - indexed name, uncompressed value */ /* CONTINUATION FRAME*/ 0x00, 0x00, 0x09, /* Length (24) */ AWS_H2_FRAME_T_CONTINUATION,/* Type (8) */ AWS_H2_FRAME_F_END_HEADERS, /* Flags (8) */ 0x76, 0x54, 0x32, 0x12, /* Reserved (1) | Stream Identifier (31) */ /* PAYLOAD */ 0x58, 0x07, 'p', 'r', 'i', 'v', 'a', 't', 'e', /* "cache-control: private" */ }; /* clang-format on */ /* Decode */ ASSERT_H2ERR_ERROR( AWS_HTTP2_ERR_PROTOCOL_ERROR, s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); return AWS_OP_SUCCESS; } /* It's an error for a header-block to end with a partially decoded header-field */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_err_partial_header) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { /* HEADERS FRAME*/ 0x00, 0x00, 0x03, /* Length (24) */ AWS_H2_FRAME_T_HEADERS, /* Type (8) */ AWS_H2_FRAME_F_END_HEADERS, /* Flags (8) */ 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Stream Identifier (31) */ /* PAYLOAD */ 0x48, 0x03, '3', /* ":status: 302" - Note that final 2 characters are not encoded */ }; /* clang-format on */ /* Decode */ ASSERT_H2ERR_ERROR( AWS_HTTP2_ERR_COMPRESSION_ERROR, s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); return AWS_OP_SUCCESS; } /* Ensure that random HPACK decoding errors are reported as ERROR_COMPRESSION */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_err_bad_hpack_data) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { /* HEADERS FRAME*/ 0x00, 0x00, 32, /* Length (24) */ AWS_H2_FRAME_T_HEADERS, /* Type (8) */ AWS_H2_FRAME_F_END_HEADERS, /* Flags (8) */ 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Stream Identifier (31) */ /* PAYLOAD */ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, /* indexed header field, with index bigger than 64bits */ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, }; /* clang-format on */ /* Decode */ ASSERT_H2ERR_ERROR( AWS_HTTP2_ERR_COMPRESSION_ERROR, s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); return AWS_OP_SUCCESS; } /* Test PRIORITY frame */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_priority) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 0x05, /* Length (24) */ AWS_H2_FRAME_T_PRIORITY, /* Type (8) */ 0x00, /* Flags (8) */ 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Stream Identifier (31) */ /* PRIORITY */ 0x81, 0x23, 0x45, 0x67, /* Exclusive (1) | Stream Dependency (31) */ 0x09, /* Weight (8) */ }; /* clang-format on */ ASSERT_H2ERR_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Our implementation currently chooses to ignore PRIORITY frames, so no callbacks should have fired */ ASSERT_UINT_EQUALS(0, h2_decode_tester_frame_count(&fixture->decode)); return AWS_OP_SUCCESS; } /* Unknown flags should be ignored. PRIORITY frames don't have any flags. */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_priority_ignores_unknown_flags) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 0x05, /* Length (24) */ AWS_H2_FRAME_T_PRIORITY, /* Type (8) */ 0xFF, /* Flags (8) */ 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Stream Identifier (31) */ /* PRIORITY */ 0x81, 0x23, 0x45, 0x67, /* Exclusive (1) | Stream Dependency (31) */ 0x09, /* Weight (8) */ }; /* clang-format on */ ASSERT_H2ERR_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Our implementation currently chooses to ignore PRIORITY frames, so no callbacks should have fired */ ASSERT_UINT_EQUALS(0, h2_decode_tester_frame_count(&fixture->decode)); return AWS_OP_SUCCESS; } H2_DECODER_ON_CLIENT_TEST(h2_decoder_err_priority_requires_stream_id) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 0x05, /* Length (24) */ AWS_H2_FRAME_T_PRIORITY, /* Type (8) */ 0x00, /* Flags (8) */ 0x00, 0x00, 0x00, 0x00, /* Reserved (1) | Stream Identifier (31) */ /* PRIORITY */ 0x81, 0x23, 0x45, 0x67, /* Exclusive (1) | Stream Dependency (31) */ 0x09, /* Weight (8) */ }; /* clang-format on */ ASSERT_H2ERR_ERROR( AWS_HTTP2_ERR_PROTOCOL_ERROR, s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); return AWS_OP_SUCCESS; } /* Test PRIORITY frame */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_err_priority_payload_too_small) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 0x04, /* Length (24) */ AWS_H2_FRAME_T_PRIORITY, /* Type (8) */ 0x00, /* Flags (8) */ 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Stream Identifier (31) */ /* PRIORITY */ 0x81, 0x23, 0x45, 0x67, /* Exclusive (1) | Stream Dependency (31) */ /* Weight (8) */ }; /* clang-format on */ ASSERT_H2ERR_ERROR( AWS_HTTP2_ERR_FRAME_SIZE_ERROR, s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); return AWS_OP_SUCCESS; } /* Test PRIORITY frame */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_err_priority_payload_too_large) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 0x06, /* Length (24) */ AWS_H2_FRAME_T_PRIORITY, /* Type (8) */ 0x00, /* Flags (8) */ 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Stream Identifier (31) */ /* PRIORITY */ 0x81, 0x23, 0x45, 0x67, /* Exclusive (1) | Stream Dependency (31) */ 0x09, /* Weight (8) */ 0x00, /* TOO MUCH PAYLOAD*/ }; /* clang-format on */ ASSERT_H2ERR_ERROR( AWS_HTTP2_ERR_FRAME_SIZE_ERROR, s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); return AWS_OP_SUCCESS; } /* Test RST_STREAM frame */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_rst_stream) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 0x04, /* Length (24) */ AWS_H2_FRAME_T_RST_STREAM, /* Type (8) */ 0x00, /* Flags (8) */ 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Stream Identifier (31) */ /* RST_STREAM */ 0xFF, 0xEE, 0xDD, 0xCC, /* Error Code (32) */ }; /* clang-format on */ ASSERT_H2ERR_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate */ ASSERT_UINT_EQUALS(1, h2_decode_tester_frame_count(&fixture->decode)); struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_RST_STREAM, 0x76543210 /*stream_id*/)); ASSERT_UINT_EQUALS(0xFFEEDDCC, frame->error_code); return AWS_OP_SUCCESS; } /* Unknown flags should be ignored. RST_STREAM frame doesn't support any flags */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_rst_stream_ignores_unknown_flags) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 0x04, /* Length (24) */ AWS_H2_FRAME_T_RST_STREAM, /* Type (8) */ 0xFF, /* Flags (8) */ 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Stream Identifier (31) */ /* RST_STREAM */ 0xFF, 0xEE, 0xDD, 0xCC, /* Error Code (32) */ }; /* clang-format on */ ASSERT_H2ERR_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate */ ASSERT_UINT_EQUALS(1, h2_decode_tester_frame_count(&fixture->decode)); struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_RST_STREAM, 0x76543210 /*stream_id*/)); ASSERT_UINT_EQUALS(0xFFEEDDCC, frame->error_code); return AWS_OP_SUCCESS; } H2_DECODER_ON_CLIENT_TEST(h2_decoder_err_rst_stream_requires_stream_id) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 0x04, /* Length (24) */ AWS_H2_FRAME_T_RST_STREAM, /* Type (8) */ 0x00, /* Flags (8) */ 0x00, 0x00, 0x00, 0x00, /* Reserved (1) | Stream Identifier (31) */ /* RST_STREAM */ 0xFF, 0xEE, 0xDD, 0xCC, /* Error Code (32) */ }; /* clang-format on */ ASSERT_H2ERR_ERROR( AWS_HTTP2_ERR_PROTOCOL_ERROR, s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); return AWS_OP_SUCCESS; } /* Payload must be 4 bytes exactly */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_err_rst_stream_payload_too_small) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 0x03, /* Length (24) */ AWS_H2_FRAME_T_RST_STREAM, /* Type (8) */ 0x00, /* Flags (8) */ 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Stream Identifier (31) */ /* RST_STREAM */ 0xFF, 0xEE, 0xDD, /* Error Code (32) <-- missing one byte */ }; /* clang-format on */ ASSERT_H2ERR_ERROR( AWS_HTTP2_ERR_FRAME_SIZE_ERROR, s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); return AWS_OP_SUCCESS; } /* Payload must be 4 bytes exactly */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_err_rst_stream_payload_too_large) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 0x05, /* Length (24) */ AWS_H2_FRAME_T_RST_STREAM, /* Type (8) */ 0x00, /* Flags (8) */ 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Stream Identifier (31) */ /* RST_STREAM */ 0xFF, 0xEE, 0xDD, 0xCC, /* Error Code (32) */ 0x00, /* TOO MUCH PAYLOAD */ }; /* clang-format on */ ASSERT_H2ERR_ERROR( AWS_HTTP2_ERR_FRAME_SIZE_ERROR, s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); return AWS_OP_SUCCESS; } /* Test SETTINGS frame */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_settings) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 12, /* Length (24) */ AWS_H2_FRAME_T_SETTINGS, /* Type (8) */ 0x00, /* Flags (8) */ 0x00, 0x00, 0x00, 0x00, /* Reserved (1) | Stream Identifier (31) */ /* SETTINGS */ 0x00, 0x05, /* Identifier (16) */ 0x00, 0xFF, 0xFF, 0xFF, /* Value (32) */ 0x00, 0x02, /* Identifier (16) */ 0x00, 0x00, 0x00, 0x01, /* Value (32) */ }; /* clang-format on */ ASSERT_H2ERR_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate. */ ASSERT_UINT_EQUALS(1, h2_decode_tester_frame_count(&fixture->decode)); struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_SETTINGS, 0 /*stream_id*/)); ASSERT_FALSE(frame->ack); ASSERT_UINT_EQUALS(2, aws_array_list_length(&frame->settings)); struct aws_http2_setting setting; aws_array_list_get_at(&frame->settings, &setting, 0); ASSERT_UINT_EQUALS(0x0005, setting.id); ASSERT_UINT_EQUALS(0x00FFFFFF, setting.value); aws_array_list_get_at(&frame->settings, &setting, 1); ASSERT_UINT_EQUALS(0x0002, setting.id); ASSERT_UINT_EQUALS(0x00000001, setting.value); return AWS_OP_SUCCESS; } /* Test SETTINGS frame */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_settings_empty) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 0x00, /* Length (24) */ AWS_H2_FRAME_T_SETTINGS, /* Type (8) */ 0x00, /* Flags (8) */ 0x00, 0x00, 0x00, 0x00, /* Reserved (1) | Stream Identifier (31) */ /* SETTINGS */ }; /* clang-format on */ ASSERT_H2ERR_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate. */ ASSERT_UINT_EQUALS(1, h2_decode_tester_frame_count(&fixture->decode)); struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_SETTINGS, 0 /*stream_id*/)); ASSERT_FALSE(frame->ack); ASSERT_UINT_EQUALS(0, aws_array_list_length(&frame->settings)); return AWS_OP_SUCCESS; } /* SETTINGS frame with ACK flag set */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_settings_ack) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 0x00, /* Length (24) */ AWS_H2_FRAME_T_SETTINGS, /* Type (8) */ AWS_H2_FRAME_F_ACK, /* Flags (8) */ 0x00, 0x00, 0x00, 0x00, /* Reserved (1) | Stream Identifier (31) */ /* SETTINGS */ }; /* clang-format on */ ASSERT_H2ERR_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate. */ ASSERT_UINT_EQUALS(1, h2_decode_tester_frame_count(&fixture->decode)); struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_SETTINGS, 0 /*stream_id*/)); ASSERT_TRUE(frame->ack); ASSERT_UINT_EQUALS(0, aws_array_list_length(&frame->settings)); return AWS_OP_SUCCESS; } /* Decoder must ignore settings with unknown IDs */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_settings_ignores_unknown_ids) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 18, /* Length (24) */ AWS_H2_FRAME_T_SETTINGS, /* Type (8) */ 0x00, /* Flags (8) */ 0x00, 0x00, 0x00, 0x00, /* Reserved (1) | Stream Identifier (31) */ /* SETTINGS */ 0x00, 0x00, /* Identifier (16) <-- SHOULD IGNORE. 0 is invalid ID */ 0x00, 0xFF, 0xFF, 0xFF, /* Value (32) */ 0x00, 0x01, /* Identifier (16) <-- This is OK */ 0x00, 0x00, 0x00, 0x01, /* Value (32) */ 0x00, AWS_HTTP2_SETTINGS_END_RANGE, /* Identifier (16) <-- SHOULD IGNORE */ 0x00, 0x00, 0x00, 0x01, /* Value (32) */ }; /* clang-format on */ ASSERT_H2ERR_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate. */ ASSERT_UINT_EQUALS(1, h2_decode_tester_frame_count(&fixture->decode)); struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_SETTINGS, 0 /*stream_id*/)); ASSERT_FALSE(frame->ack); ASSERT_UINT_EQUALS(1, aws_array_list_length(&frame->settings)); struct aws_http2_setting setting; aws_array_list_get_at(&frame->settings, &setting, 0); ASSERT_UINT_EQUALS(0x0001, setting.id); ASSERT_UINT_EQUALS(0x00000001, setting.value); return AWS_OP_SUCCESS; } /* Unexpected flags should be ignored. * SETTINGS frames only support ACK */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_settings_ignores_unknown_flags) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 0x00, /* Length (24) */ AWS_H2_FRAME_T_SETTINGS, /* Type (8) */ 0xFF, /* Flags (8) */ 0x00, 0x00, 0x00, 0x00, /* Reserved (1) | Stream Identifier (31) */ /* SETTINGS */ }; /* clang-format on */ ASSERT_H2ERR_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate. */ ASSERT_UINT_EQUALS(1, h2_decode_tester_frame_count(&fixture->decode)); struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_SETTINGS, 0 /*stream_id*/)); ASSERT_TRUE(frame->ack); ASSERT_UINT_EQUALS(0, aws_array_list_length(&frame->settings)); return AWS_OP_SUCCESS; } /* Error if SETTINGS ACK frame has any individual settings in it */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_err_settings_ack_with_data) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 0x06, /* Length (24) */ AWS_H2_FRAME_T_SETTINGS, /* Type (8) */ AWS_H2_FRAME_F_ACK, /* Flags (8) */ 0x00, 0x00, 0x00, 0x00, /* Reserved (1) | Stream Identifier (31) */ /* SETTINGS */ 0x00, 0x05, /* Identifier (16) */ 0x00, 0xFF, 0xFF, 0xFF, /* Value (32) */ }; /* clang-format on */ ASSERT_H2ERR_ERROR( AWS_HTTP2_ERR_FRAME_SIZE_ERROR, s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); return AWS_OP_SUCCESS; } H2_DECODER_ON_CLIENT_TEST(h2_decoder_err_settings_forbids_stream_id) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 12, /* Length (24) */ AWS_H2_FRAME_T_SETTINGS, /* Type (8) */ 0x00, /* Flags (8) */ 0x00, 0x00, 0x00, 0x01, /* Reserved (1) | Stream Identifier (31) */ /* SETTINGS */ 0x00, 0x05, /* Identifier (16) */ 0x00, 0xFF, 0xFF, 0xFF, /* Value (32) */ 0x00, 0x02, /* Identifier (16) */ 0x00, 0x00, 0x00, 0x01, /* Value (32) */ }; /* clang-format on */ ASSERT_H2ERR_ERROR( AWS_HTTP2_ERR_PROTOCOL_ERROR, s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); return AWS_OP_SUCCESS; } /* Error if SETTINGS payload is not a multiple of 6 */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_err_settings_payload_size) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 0x08, /* Length (24) */ AWS_H2_FRAME_T_SETTINGS, /* Type (8) */ 0x00, /* Flags (8) */ 0x00, 0x00, 0x00, 0x00, /* Reserved (1) | Stream Identifier (31) */ /* SETTINGS */ 0x00, 0x05, /* Identifier (16) */ 0x00, 0xFF, 0xFF, 0xFF, /* Value (32) */ 0x00, 0x02, /* Identifier (16) */ /* Value (32) <-- MISSING */ }; /* clang-format on */ ASSERT_H2ERR_ERROR( AWS_HTTP2_ERR_FRAME_SIZE_ERROR, s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); return AWS_OP_SUCCESS; } /* Error if SETTINGS has invalid values */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_err_settings_invalid_values_enable_push) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 12, /* Length (24) */ AWS_H2_FRAME_T_SETTINGS, /* Type (8) */ 0x00, /* Flags (8) */ 0x00, 0x00, 0x00, 0x00, /* Reserved (1) | Stream Identifier (31) */ /* SETTINGS */ 0x00, 0x05, /* Identifier (16) */ 0x00, 0xFF, 0xFF, 0xFF, /* Value (32) */ 0x00, 0x02, /* Identifier (16) */ 0x00, 0xFF, 0xFF, 0xFF, /* Value (32) <-- INVALID value FOR ENABLE_PUSH */ }; /* clang-format on */ ASSERT_H2ERR_ERROR( AWS_HTTP2_ERR_PROTOCOL_ERROR, s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); return AWS_OP_SUCCESS; } H2_DECODER_ON_CLIENT_TEST(h2_decoder_err_settings_invalid_values_initial_window_size) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 12, /* Length (24) */ AWS_H2_FRAME_T_SETTINGS, /* Type (8) */ 0x00, /* Flags (8) */ 0x00, 0x00, 0x00, 0x00, /* Reserved (1) | Stream Identifier (31) */ /* SETTINGS */ 0x00, 0x05, /* Identifier (16) */ 0x00, 0xFF, 0xFF, 0xFF, /* Value (32) */ 0x00, 0x04, /* Identifier (16) */ 0x80, 0xFF, 0xFF, 0xFF, /* Value (32) <-- INVALID value FOR INITIAL_WINDOW_SIZE */ }; /* clang-format on */ ASSERT_H2ERR_ERROR( AWS_HTTP2_ERR_FLOW_CONTROL_ERROR, s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); return AWS_OP_SUCCESS; } H2_DECODER_ON_CLIENT_TEST(h2_decoder_err_settings_invalid_values_max_frame_size) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 12, /* Length (24) */ AWS_H2_FRAME_T_SETTINGS, /* Type (8) */ 0x00, /* Flags (8) */ 0x00, 0x00, 0x00, 0x00, /* Reserved (1) | Stream Identifier (31) */ /* SETTINGS */ 0x00, 0x05, /* Identifier (16) */ 0x00, 0xFF, 0xFF, 0xFF, /* Value (32) */ 0x00, 0x05, /* Identifier (16) */ 0x00, 0x00, 0x00, 0x00, /* Value (32) <-- INVALID value FOR MAX_FRAME_SIZE */ }; /* clang-format on */ ASSERT_H2ERR_ERROR( AWS_HTTP2_ERR_PROTOCOL_ERROR, s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); return AWS_OP_SUCCESS; } H2_DECODER_ON_CLIENT_TEST(h2_decoder_push_promise) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 0x07, /* Length (24) */ AWS_H2_FRAME_T_PUSH_PROMISE,/* Type (8) */ AWS_H2_FRAME_F_END_HEADERS, /* Flags (8) */ 0x00, 0x00, 0x00, 0x01, /* Reserved (1) | Stream Identifier (31) */ /* PUSH_PROMISE */ 0x80, 0x00, 0x00, 0x02, /* Reserved (1) | Promised Stream ID (31) */ 0x82, /* ":method: GET" - indexed header field */ 0x87, /* ":scheme: https" - indexed header field */ 0x85, /* ":path: /index.html" - indexed header field */ }; /* clang-format on */ /* Decode */ ASSERT_H2ERR_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate */ struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_PUSH_PROMISE, 0x1 /*stream_id*/)); ASSERT_UINT_EQUALS(2, frame->promised_stream_id); ASSERT_FALSE(frame->headers_malformed); ASSERT_UINT_EQUALS(3, aws_http_headers_count(frame->headers)); ASSERT_SUCCESS(s_check_header(frame, 0, ":method", "GET", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_SUCCESS(s_check_header(frame, 1, ":scheme", "https", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_SUCCESS(s_check_header(frame, 2, ":path", "/index.html", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_FALSE(frame->end_stream); return AWS_OP_SUCCESS; } /* Unknown flags should be ignored. * PUSH_PROMISE supports END_HEADERS and PADDED */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_push_promise_ignores_unknown_flags) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 10, /* Length (24) */ AWS_H2_FRAME_T_PUSH_PROMISE,/* Type (8) */ 0xFF, /* Flags (8) */ 0x00, 0x00, 0x00, 0x01, /* Reserved (1) | Stream Identifier (31) */ /* PUSH_PROMISE */ 0x02, /* Pad Length (8) - F_PADDED */ 0x00, 0x00, 0x00, 0x02, /* Reserved (1) | Promised Stream ID (31) */ 0x82, /* ":method: GET" - indexed header field */ 0x87, /* ":scheme: https" - indexed header field */ 0x85, /* ":path: /index.html" - indexed header field */ 0x00, 0x00, /* Padding (*) - F_PADDED */ }; /* clang-format on */ /* Decode */ ASSERT_H2ERR_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate */ struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_PUSH_PROMISE, 0x1 /*stream_id*/)); ASSERT_UINT_EQUALS(2, frame->promised_stream_id); ASSERT_FALSE(frame->headers_malformed); ASSERT_UINT_EQUALS(3, aws_http_headers_count(frame->headers)); ASSERT_SUCCESS(s_check_header(frame, 0, ":method", "GET", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_SUCCESS(s_check_header(frame, 1, ":scheme", "https", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_SUCCESS(s_check_header(frame, 2, ":path", "/index.html", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_FALSE(frame->end_stream); return AWS_OP_SUCCESS; } H2_DECODER_ON_CLIENT_TEST(h2_decoder_push_promise_continuation) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { /* PUSH_PROMISE FRAME */ 0x00, 0x00, 0x08, /* Length (24) */ AWS_H2_FRAME_T_PUSH_PROMISE,/* Type (8) */ AWS_H2_FRAME_F_PADDED, /* Flags (8) */ 0x00, 0x00, 0x00, 0x01, /* Reserved (1) | Stream Identifier (31) */ /* PAYLOAD */ 0x02, /* Pad Length (8) - F_PADDED */ 0x00, 0x00, 0x00, 0x02, /* Reserved (1) | Promised Stream ID (31) */ 0x82, /* ":method: GET" - indexed header field */ 0x00, 0x00, /* Padding (*) - F_PADDED */ /* CONTINUATION FRAME - empty payload just for kicks */ 0x00, 0x00, 0x00, /* Length (24) */ AWS_H2_FRAME_T_CONTINUATION,/* Type (8) */ 0x00, /* Flags (8) */ 0x00, 0x00, 0x00, 0x01, /* Reserved (1) | Stream Identifier (31) */ /* PAYLOAD */ /* CONTINUATION FRAME */ 0x00, 0x00, 0x02, /* Length (24) */ AWS_H2_FRAME_T_CONTINUATION,/* Type (8) */ AWS_H2_FRAME_F_END_HEADERS, /* Flags (8) */ 0x00, 0x00, 0x00, 0x01, /* Reserved (1) | Stream Identifier (31) */ /* PAYLOAD */ 0x87, /* ":scheme: https" - indexed header field */ 0x85, /* ":path: /index.html" - indexed header field */ }; /* clang-format on */ /* Decode */ ASSERT_H2ERR_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate */ struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_PUSH_PROMISE, 0x1 /*stream_id*/)); ASSERT_UINT_EQUALS(2, frame->promised_stream_id); ASSERT_FALSE(frame->headers_malformed); ASSERT_UINT_EQUALS(3, aws_http_headers_count(frame->headers)); ASSERT_SUCCESS(s_check_header(frame, 0, ":method", "GET", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_SUCCESS(s_check_header(frame, 1, ":scheme", "https", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_SUCCESS(s_check_header(frame, 2, ":path", "/index.html", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_FALSE(frame->end_stream); return AWS_OP_SUCCESS; } /* Once a header-block starts, it's illegal for any frame but a CONTINUATION on that same stream to arrive. * This test sends a different frame type next */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_err_push_promise_continuation_expected) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { /* PUSH_PROMISE FRAME */ 0x00, 0x00, 0x07, /* Length (24) */ AWS_H2_FRAME_T_PUSH_PROMISE,/* Type (8) */ 0x00, /* Flags (8) */ 0x00, 0x00, 0x00, 0x01, /* Reserved (1) | Stream Identifier (31) */ /* PAYLOAD */ 0x00, 0x00, 0x00, 0x02, /* Reserved (1) | Promised Stream ID (31) */ 0x82, /* ":method: GET" - indexed header field */ 0x87, /* ":scheme: https" - indexed header field */ 0x85, /* ":path: /index.html" - indexed header field */ /* DATA FRAME <-- ERROR should be CONTINUATION because PUSH_PROMISE lacked END_HEADERS flag */ 0x00, 0x00, 0x05, /* Length (24) */ AWS_H2_FRAME_T_DATA, /* Type (8) */ AWS_H2_FRAME_F_END_STREAM, /* Flags (8) */ 0x00, 0x00, 0x00, 0x02, /* Reserved (1) | Stream Identifier (31) */ /* DATA */ 'h', 'e', 'l', 'l', 'o', /* Data (*) */ }; /* clang-format on */ /* Decode */ ASSERT_H2ERR_ERROR( AWS_HTTP2_ERR_PROTOCOL_ERROR, s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); return AWS_OP_SUCCESS; } H2_DECODER_ON_CLIENT_TEST(h2_decoder_err_push_promise_requires_stream_id) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 0x07, /* Length (24) */ AWS_H2_FRAME_T_PUSH_PROMISE,/* Type (8) */ AWS_H2_FRAME_F_END_HEADERS, /* Flags (8) */ 0x00, 0x00, 0x00, 0x00, /* Reserved (1) | Stream Identifier (31) */ /* PUSH_PROMISE */ 0x00, 0x00, 0x00, 0x02, /* Reserved (1) | Promised Stream ID (31) */ 0x82, /* ":method: GET" - indexed header field */ 0x87, /* ":scheme: https" - indexed header field */ 0x85, /* ":path: /index.html" - indexed header field */ }; /* clang-format on */ /* Decode */ ASSERT_H2ERR_ERROR( AWS_HTTP2_ERR_PROTOCOL_ERROR, s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); return AWS_OP_SUCCESS; } H2_DECODER_ON_CLIENT_TEST(h2_decoder_malformed_push_promise_must_be_request_1) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 0x09, /* Length (24) */ AWS_H2_FRAME_T_PUSH_PROMISE, /* Type (8) */ AWS_H2_FRAME_F_END_HEADERS, /* Flags (8) */ 0x00, 0x00, 0x00, 0x01, /* Reserved (1) | Stream Identifier (31) */ /* PUSH_PROMISE */ 0x00, 0x00, 0x00, 0x02, /* Reserved (1) | Promised Stream ID (31) */ 0x48, 0x03, '3', '0', '2', /* ":status: 302" - RESPONSE pseudo-header is incorrect */ }; /* clang-format on */ /* Decode */ ASSERT_H2ERR_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate */ struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_PUSH_PROMISE, 1 /*stream_id*/)); ASSERT_UINT_EQUALS(2, frame->promised_stream_id); ASSERT_TRUE(frame->headers_malformed); return AWS_OP_SUCCESS; } /* Malformed if PUSH_PROMISE missing request pseudo-headers */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_malformed_push_promise_must_be_request_2) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 0x04, /* Length (24) */ AWS_H2_FRAME_T_PUSH_PROMISE, /* Type (8) */ AWS_H2_FRAME_F_END_HEADERS, /* Flags (8) */ 0x00, 0x00, 0x00, 0x01, /* Reserved (1) | Stream Identifier (31) */ /* PUSH_PROMISE */ 0x00, 0x00, 0x00, 0x02, /* Reserved (1) | Promised Stream ID (31) */ /* No headers */ }; /* clang-format on */ /* Decode */ ASSERT_H2ERR_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate */ struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_PUSH_PROMISE, 1 /*stream_id*/)); ASSERT_UINT_EQUALS(2, frame->promised_stream_id); ASSERT_TRUE(frame->headers_malformed); return AWS_OP_SUCCESS; } /* Promised stream ID must be valid */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_err_push_promise_requires_promised_stream_id) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 0x07, /* Length (24) */ AWS_H2_FRAME_T_PUSH_PROMISE,/* Type (8) */ AWS_H2_FRAME_F_END_HEADERS, /* Flags (8) */ 0x00, 0x00, 0x00, 0x01, /* Reserved (1) | Stream Identifier (31) */ /* PUSH_PROMISE */ 0x00, 0x00, 0x00, 0x00, /* Reserved (1) | Promised Stream ID (31) */ 0x82, /* ":method: GET" - indexed header field */ 0x87, /* ":scheme: https" - indexed header field */ 0x85, /* ":path: /index.html" - indexed header field */ }; /* clang-format on */ /* Decode */ ASSERT_H2ERR_ERROR( AWS_HTTP2_ERR_PROTOCOL_ERROR, s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); return AWS_OP_SUCCESS; } /* Promised stream will be invalid, if enable_push is set to 0 */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_err_push_promise_with_enable_push_0) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 0x07, /* Length (24) */ AWS_H2_FRAME_T_PUSH_PROMISE,/* Type (8) */ AWS_H2_FRAME_F_END_HEADERS, /* Flags (8) */ 0x00, 0x00, 0x00, 0x01, /* Reserved (1) | Stream Identifier (31) */ /* PUSH_PROMISE */ 0x00, 0x00, 0x00, 0x00, /* Reserved (1) | Promised Stream ID (31) */ 0x82, /* ":method: GET" - indexed header field */ 0x87, /* ":scheme: https" - indexed header field */ 0x85, /* ":path: /index.html" - indexed header field */ }; /* clang-format on */ aws_h2_decoder_set_setting_enable_push(fixture->decode.decoder, (uint32_t)0); /* Decode */ ASSERT_H2ERR_ERROR( AWS_HTTP2_ERR_PROTOCOL_ERROR, s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); return AWS_OP_SUCCESS; } /* Test PING frame */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_ping) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 0x08, /* Length (24) */ AWS_H2_FRAME_T_PING, /* Type (8) */ 0x0, /* Flags (8) */ 0x00, 0x00, 0x00, 0x00, /* Reserved (1) | Stream Identifier (31) */ /* PING */ 'p', 'i', 'n', 'g', 'p', 'o', 'n', 'g' /* Opaque Data (64) */ }; /* clang-format on */ ASSERT_H2ERR_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate. */ struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_PING, 0x0 /*stream_id*/)); ASSERT_BIN_ARRAYS_EQUALS("pingpong", AWS_HTTP2_PING_DATA_SIZE, frame->ping_opaque_data, AWS_HTTP2_PING_DATA_SIZE); ASSERT_FALSE(frame->ack); return AWS_OP_SUCCESS; } /* Test PING frame with ALL flags set (ACK is only supported flag) */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_ping_ack) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 0x08, /* Length (24) */ AWS_H2_FRAME_T_PING, /* Type (8) */ 0xFF, /* Flags (8) */ 0x00, 0x00, 0x00, 0x00, /* Reserved (1) | Stream Identifier (31) */ /* PING */ 'p', 'i', 'n', 'g', 'p', 'o', 'n', 'g' /* Opaque Data (64) */ }; /* clang-format on */ ASSERT_H2ERR_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate. */ struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_PING, 0x0 /*stream_id*/)); ASSERT_BIN_ARRAYS_EQUALS("pingpong", AWS_HTTP2_PING_DATA_SIZE, frame->ping_opaque_data, AWS_HTTP2_PING_DATA_SIZE); ASSERT_TRUE(frame->ack); return AWS_OP_SUCCESS; } H2_DECODER_ON_CLIENT_TEST(h2_decoder_err_ping_forbids_stream_id) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 0x08, /* Length (24) */ AWS_H2_FRAME_T_PING, /* Type (8) */ 0x0, /* Flags (8) */ 0x00, 0x00, 0x00, 0x01, /* Reserved (1) | Stream Identifier (31) */ /* PING */ 'p', 'i', 'n', 'g', 'p', 'o', 'n', 'g' /* Opaque Data (64) */ }; /* clang-format on */ ASSERT_H2ERR_ERROR( AWS_HTTP2_ERR_PROTOCOL_ERROR, s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); return AWS_OP_SUCCESS; } /* PING payload MUST be 8 bytes */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_err_ping_payload_too_small) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 0x00, /* Length (24) */ AWS_H2_FRAME_T_PING, /* Type (8) */ 0x0, /* Flags (8) */ 0x00, 0x00, 0x00, 0x00, /* Reserved (1) | Stream Identifier (31) */ /* PING */ /* Opaque Data (64) <-- MISSING */ }; /* clang-format on */ ASSERT_H2ERR_ERROR( AWS_HTTP2_ERR_FRAME_SIZE_ERROR, s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); return AWS_OP_SUCCESS; } /* PING payload MUST be 8 bytes */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_err_ping_payload_too_large) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 0x09, /* Length (24) */ AWS_H2_FRAME_T_PING, /* Type (8) */ 0x0, /* Flags (8) */ 0x00, 0x00, 0x00, 0x00, /* Reserved (1) | Stream Identifier (31) */ /* PING */ 'p', 'i', 'n', 'g', 'p', 'o', 'n', 'g', 0x00 /* Opaque Data (64) <-- ERROR: TOO LARGE */ }; /* clang-format on */ ASSERT_H2ERR_ERROR( AWS_HTTP2_ERR_FRAME_SIZE_ERROR, s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); return AWS_OP_SUCCESS; } /* Test GOAWAY frame */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_goaway) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 11, /* Length (24) */ AWS_H2_FRAME_T_GOAWAY, /* Type (8) */ 0xFF, /* Flags (8) <-- set all flags, all of which should be ignored */ 0x00, 0x00, 0x00, 0x00, /* Reserved (1) | Stream Identifier (31) */ /* GOAWAY */ 0xFF, 0x00, 0x00, 0x01, /* Reserved (1) | Last Stream ID (31) */ 0xFE, 0xED, 0xBE, 0xEF, /* Error Code (32) */ 'b', 'y', 'e' /* Additional Debug Data (*) */ }; /* clang-format on */ ASSERT_H2ERR_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate. */ struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_GOAWAY, 0x0 /*stream_id*/)); ASSERT_UINT_EQUALS(0x7F000001, frame->goaway_last_stream_id); ASSERT_UINT_EQUALS(0xFEEDBEEF, frame->error_code); ASSERT_BIN_ARRAYS_EQUALS("bye", 3, frame->data.buffer, frame->data.len); return AWS_OP_SUCCESS; } /* Test GOAWAY frame with no debug data */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_goaway_empty) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 0x08, /* Length (24) */ AWS_H2_FRAME_T_GOAWAY, /* Type (8) */ 0x00, /* Flags (8) */ 0x00, 0x00, 0x00, 0x00, /* Reserved (1) | Stream Identifier (31) */ /* GOAWAY */ 0xFF, 0x00, 0x00, 0x01, /* Reserved (1) | Last Stream ID (31) */ 0xFE, 0xED, 0xBE, 0xEF, /* Error Code (32) */ /* Additional Debug Data (*) */ }; /* clang-format on */ ASSERT_H2ERR_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate. */ struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_GOAWAY, 0x0 /*stream_id*/)); ASSERT_UINT_EQUALS(0x7F000001, frame->goaway_last_stream_id); ASSERT_UINT_EQUALS(0xFEEDBEEF, frame->error_code); ASSERT_BIN_ARRAYS_EQUALS("", 0, frame->data.buffer, frame->data.len); return AWS_OP_SUCCESS; } H2_DECODER_ON_CLIENT_TEST(h2_decoder_err_goaway_forbids_stream_id) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 11, /* Length (24) */ AWS_H2_FRAME_T_GOAWAY, /* Type (8) */ 0xFF, /* Flags (8) <-- set all flags, all of which should be ignored */ 0x00, 0x00, 0x00, 0x01, /* Reserved (1) | Stream Identifier (31) */ /* GOAWAY */ 0xFF, 0x00, 0x00, 0x01, /* Reserved (1) | Last Stream ID (31) */ 0xFE, 0xED, 0xBE, 0xEF, /* Error Code (32) */ 'b', 'y', 'e' /* Additional Debug Data (*) */ }; /* clang-format on */ ASSERT_H2ERR_ERROR( AWS_HTTP2_ERR_PROTOCOL_ERROR, s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); return AWS_OP_SUCCESS; } H2_DECODER_ON_CLIENT_TEST(h2_decoder_err_goaway_payload_too_small) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 0x00, /* Length (24) */ AWS_H2_FRAME_T_GOAWAY, /* Type (8) */ 0x00, /* Flags (8) */ 0x00, 0x00, 0x00, 0x00, /* Reserved (1) | Stream Identifier (31) */ /* GOAWAY */ /* Reserved (1) | Last Stream ID (31) <-- MISSING */ /* Error Code (32) <-- MISSING */ /* Additional Debug Data (*) */ }; /* clang-format on */ ASSERT_H2ERR_ERROR( AWS_HTTP2_ERR_FRAME_SIZE_ERROR, s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); return AWS_OP_SUCCESS; } /* Test WINDOW_UPDATE frame on stream 0 */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_window_update_connection) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 0x04, /* Length (24) */ AWS_H2_FRAME_T_WINDOW_UPDATE,/* Type (8) */ 0xFF, /* Flags (8) <-- set all flags, all of which should be ignored */ 0x00, 0x00, 0x00, 0x00, /* Reserved (1) | Stream Identifier (31) */ /* WINDOW_UPDATE */ 0xFF, 0x00, 0x00, 0x01, /* Reserved (1) | Window Size Increment (31) */ }; /* clang-format on */ ASSERT_H2ERR_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate. */ struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_WINDOW_UPDATE, 0x0 /*stream_id*/)); ASSERT_UINT_EQUALS(0x7F000001, frame->window_size_increment); return AWS_OP_SUCCESS; } /* Test WINDOW_UPDATE frame on a specific stream. * This the only frame type whose stream-id can be zero OR non-zero*/ H2_DECODER_ON_CLIENT_TEST(h2_decoder_window_update_stream) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 0x04, /* Length (24) */ AWS_H2_FRAME_T_WINDOW_UPDATE,/* Type (8) */ 0x00, /* Flags (8) */ 0x00, 0x00, 0x00, 0x01, /* Reserved (1) | Stream Identifier (31) */ /* WINDOW_UPDATE */ 0xFF, 0x00, 0x00, 0x01, /* Reserved (1) | Window Size Increment (31) */ }; /* clang-format on */ ASSERT_H2ERR_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate. */ struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_WINDOW_UPDATE, 0x1 /*stream_id*/)); ASSERT_UINT_EQUALS(0x7F000001, frame->window_size_increment); return AWS_OP_SUCCESS; } /* WINDOW_UPDATE payload must always be 4 bytes */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_err_window_update_payload_too_small) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 0x00, /* Length (24) */ AWS_H2_FRAME_T_WINDOW_UPDATE,/* Type (8) */ 0x00, /* Flags (8) */ 0x00, 0x00, 0x00, 0x01, /* Reserved (1) | Stream Identifier (31) */ /* WINDOW_UPDATE */ /* Reserved (1) | Window Size Increment (31) <-- MISSING */ }; /* clang-format on */ ASSERT_H2ERR_ERROR( AWS_HTTP2_ERR_FRAME_SIZE_ERROR, s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); return AWS_OP_SUCCESS; } /* WINDOW_UPDATE payload must always be 4 bytes */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_err_window_update_payload_too_large) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { 0x00, 0x00, 0x08, /* Length (24) */ AWS_H2_FRAME_T_WINDOW_UPDATE,/* Type (8) */ 0x00, /* Flags (8) */ 0x00, 0x00, 0x00, 0x01, /* Reserved (1) | Stream Identifier (31) */ /* WINDOW_UPDATE */ 0x00, 0x00, 0x00, 0x01, /* Reserved (1) | Window Size Increment (31) */ 0x00, 0x00, 0x00, 0x02, /* ERROR TOO BIG */ }; /* clang-format on */ ASSERT_H2ERR_ERROR( AWS_HTTP2_ERR_FRAME_SIZE_ERROR, s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); return AWS_OP_SUCCESS; } /* Frames of unknown type must be ignored */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_unknown_frame_type_ignored) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { /* UNKNOWN FRAME WITHOUT FLAGS OR STREAM-ID */ 0x00, 0x00, 0x04, /* Length (24) */ 0xFF, /* Type (8) */ 0x00, /* Flags (8) */ 0x00, 0x00, 0x00, 0x00, /* Reserved (1) | Stream Identifier (31) */ 0xFF, 0xFF, 0xFF, 0xFF, /* Payload (*) */ /* UNKNOWN FRAME WITH FLAGS AND STREAM-ID */ 0x00, 0x00, 0x04, /* Length (24) */ 0xFF, /* Type (8) */ 0xFF, /* Flags (8) */ 0xFF, 0xFF, 0xFF, 0xFF, /* Reserved (1) | Stream Identifier (31) */ 0xFF, 0xFF, 0xFF, 0xFF, /* Payload (*) */ /* UNKNOWN FRAME WITH NO PAYLOAD */ 0x00, 0x00, 0x00, /* Length (24) */ 0xFF, /* Type (8) */ 0xFF, /* Flags (8) */ 0xFF, 0xFF, 0xFF, 0xFF, /* Reserved (1) | Stream Identifier (31) */ /* Payload (*) */ }; /* clang-format on */ ASSERT_H2ERR_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* No callbacks should have fired about any of these frames */ ASSERT_UINT_EQUALS(0, h2_decode_tester_frame_count(&fixture->decode)); return AWS_OP_SUCCESS; } static int s_get_finished_frame_i( struct fixture *fixture, size_t i, enum aws_h2_frame_type type, uint32_t stream_id, struct h2_decoded_frame **out_frame) { ASSERT_TRUE(i < h2_decode_tester_frame_count(&fixture->decode)); *out_frame = h2_decode_tester_get_frame(&fixture->decode, i); ASSERT_SUCCESS(h2_decoded_frame_check_finished(*out_frame, type, stream_id)); return AWS_OP_SUCCESS; } /* Test processing many different frame types in a row. * (most other tests just operate on 1 frame) */ H2_DECODER_ON_CLIENT_TEST(h2_decoder_many_frames_in_a_row) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { /* HEADERS FRAME*/ 0x00, 0x00, 0x05, /* Length (24) */ AWS_H2_FRAME_T_HEADERS, /* Type (8) */ 0x00, /* Flags (8) */ 0x00, 0x00, 0x00, 0x01, /* Reserved (1) | Stream Identifier (31) */ /* Payload */ 0x48, 0x03, '3', '0', '2', /* ":status: 302" - indexed name, uncompressed value */ /* CONTINUATION FRAME*/ 0x00, 0x00, 0x09, /* Length (24) */ AWS_H2_FRAME_T_CONTINUATION,/* Type (8) */ AWS_H2_FRAME_F_END_HEADERS, /* Flags (8) */ 0x00, 0x00, 0x00, 0x01, /* Reserved (1) | Stream Identifier (31) */ /* Payload */ 0x58, 0x07, 'p', 'r', 'i', 'v', 'a', 't', 'e', /* "cache-control: private" */ /* SETTINGS ACK FRAME*/ 0x00, 0x00, 0x00, /* Length (24) */ AWS_H2_FRAME_T_SETTINGS, /* Type (8) */ AWS_H2_FRAME_F_ACK, /* Flags (8) */ 0x00, 0x00, 0x00, 0x00, /* Reserved (1) | Stream Identifier (31) */ /* PUSH_PROMISE FRAME */ 0x00, 0x00, 0x05, /* Length (24) */ AWS_H2_FRAME_T_PUSH_PROMISE,/* Type (8) */ 0x00, /* Flags (8) */ 0x00, 0x00, 0x00, 0x01, /* Reserved (1) | Stream Identifier (31) */ /* Payload */ 0x80, 0x00, 0x00, 0x02, /* Reserved (1) | Promised Stream ID (31) */ 0x82, /* ":method: GET" - indexed header field */ /* CONTINUATION FRAME */ 0x00, 0x00, 0x02, /* Length (24) */ AWS_H2_FRAME_T_CONTINUATION,/* Type (8) */ AWS_H2_FRAME_F_END_HEADERS, /* Flags (8) */ 0x00, 0x00, 0x00, 0x01, /* Reserved (1) | Stream Identifier (31) */ /* Payload */ 0x87, /* ":scheme: https" - indexed header field */ 0x85, /* ":path: /index.html" - indexed header field */ /* PRIORITY FRAME */ 0x00, 0x00, 0x05, /* Length (24) */ AWS_H2_FRAME_T_PRIORITY, /* Type (8) */ 0x00, /* Flags (8) */ 0x00, 0x00, 0x00, 0x01, /* Reserved (1) | Stream Identifier (31) */ /* Payload */ 0x00, 0x00, 0x00, 0x02, /* Exclusive (1) | Stream Dependency (31) */ 0x09, /* Weight (8) */ /* WINDOW_UPDATE FRAME */ 0x00, 0x00, 0x04, /* Length (24) */ AWS_H2_FRAME_T_WINDOW_UPDATE,/* Type (8) */ 0x00, /* Flags (8) */ 0x00, 0x00, 0x00, 0x00, /* Reserved (1) | Stream Identifier (31) */ /* WINDOW_UPDATE */ 0x00, 0x00, 0x00, 0x01, /* Reserved (1) | Window Size Increment (31) */ /* DATA FRAME */ 0x00, 0x00, 0x01, /* Length (24) */ AWS_H2_FRAME_T_DATA, /* Type (8) */ AWS_H2_FRAME_F_END_STREAM, /* Flags (8) */ 0x00, 0x00, 0x00, 0x01, /* Reserved (1) | Stream Identifier (31) */ /* Payload */ 'h', /* Data (*) */ /* UNKNOWN FRAME */ 0x00, 0x00, 0x01, /* Length (24) */ 0xFF, /* Type (8) */ 0xFF, /* Flags (8) */ 0xFF, 0xFF, 0xFF, 0xFF, /* Reserved (1) | Stream Identifier (31) */ /* Payload (*) */ 'z', /* RST_STREAM FRAME */ 0x00, 0x00, 0x04, /* Length (24) */ AWS_H2_FRAME_T_RST_STREAM, /* Type (8) */ 0x00, /* Flags (8) */ 0x00, 0x00, 0x00, 0x02, /* Reserved (1) | Stream Identifier (31) */ /* Payload */ 0xFF, 0xEE, 0xDD, 0xCC, /* Error Code (32) */ /* GOAWAY FRAME */ 0x00, 0x00, 11, /* Length (24) */ AWS_H2_FRAME_T_GOAWAY, /* Type (8) */ 0xFF, /* Flags (8) <-- set all flags, all of which should be ignored */ 0x00, 0x00, 0x00, 0x00, /* Reserved (1) | Stream Identifier (31) */ /* Payload */ 0x00, 0x00, 0x00, 0x01, /* Reserved (1) | Last Stream ID (31) */ 0xFE, 0xED, 0xBE, 0xEF, /* Error Code (32) */ 'b', 'y', 'e', /* Additional Debug Data (*) */ /* PING ACK FRAME */ 0x00, 0x00, 0x08, /* Length (24) */ AWS_H2_FRAME_T_PING, /* Type (8) */ AWS_H2_FRAME_F_ACK, /* Flags (8) */ 0x00, 0x00, 0x00, 0x00, /* Reserved (1) | Stream Identifier (31) */ /* Payload */ 'p', 'i', 'n', 'g', 'p', 'o', 'n', 'g', /* Opaque Data (64) */ }; /* clang-format on */ ASSERT_H2ERR_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); size_t frame_i = 0; struct h2_decoded_frame *frame; /* Validate HEADERS (and its CONTINUATION) */ ASSERT_SUCCESS(s_get_finished_frame_i(fixture, frame_i++, AWS_H2_FRAME_T_HEADERS, 0x1 /*stream-id*/, &frame)); ASSERT_UINT_EQUALS(2, aws_http_headers_count(frame->headers)); ASSERT_SUCCESS(s_check_header(frame, 0, ":status", "302", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_SUCCESS(s_check_header(frame, 1, "cache-control", "private", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_FALSE(frame->end_stream); /* Validate SETTINGS ACK */ ASSERT_SUCCESS(s_get_finished_frame_i(fixture, frame_i++, AWS_H2_FRAME_T_SETTINGS, 0x0 /*stream-id*/, &frame)); ASSERT_TRUE(frame->ack); /* Validate PUSH_PROMISE (and its CONTINUATION) */ ASSERT_SUCCESS(s_get_finished_frame_i(fixture, frame_i++, AWS_H2_FRAME_T_PUSH_PROMISE, 0x1 /*stream-id*/, &frame)); ASSERT_UINT_EQUALS(2, frame->promised_stream_id); ASSERT_UINT_EQUALS(3, aws_http_headers_count(frame->headers)); ASSERT_SUCCESS(s_check_header(frame, 0, ":method", "GET", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_SUCCESS(s_check_header(frame, 1, ":scheme", "https", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_SUCCESS(s_check_header(frame, 2, ":path", "/index.html", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_FALSE(frame->end_stream); /* PRIORITY frame is ignored by decoder */ /* Validate WINDOW_UPDATE */ ASSERT_SUCCESS(s_get_finished_frame_i(fixture, frame_i++, AWS_H2_FRAME_T_WINDOW_UPDATE, 0x0 /*stream-id*/, &frame)); ASSERT_UINT_EQUALS(0x1, frame->window_size_increment); /* Validate DATA */ ASSERT_SUCCESS(s_get_finished_frame_i(fixture, frame_i++, AWS_H2_FRAME_T_DATA, 0x1 /*stream-id*/, &frame)); ASSERT_BIN_ARRAYS_EQUALS("h", 1, frame->data.buffer, frame->data.len); ASSERT_TRUE(frame->end_stream); /* UNKNOWN frame is ignored */ /* Validate RST_STREAM */ ASSERT_SUCCESS(s_get_finished_frame_i(fixture, frame_i++, AWS_H2_FRAME_T_RST_STREAM, 0x2 /*stream-id*/, &frame)); ASSERT_UINT_EQUALS(0xFFEEDDCC, frame->error_code); /* Validate GOAWAY */ ASSERT_SUCCESS(s_get_finished_frame_i(fixture, frame_i++, AWS_H2_FRAME_T_GOAWAY, 0x0 /*stream-id*/, &frame)); ASSERT_UINT_EQUALS(0x1, frame->goaway_last_stream_id); ASSERT_UINT_EQUALS(0xFEEDBEEF, frame->error_code); ASSERT_BIN_ARRAYS_EQUALS("bye", 3, frame->data.buffer, frame->data.len); /* Validate PING */ ASSERT_SUCCESS(s_get_finished_frame_i(fixture, frame_i++, AWS_H2_FRAME_T_PING, 0x0 /*stream-id*/, &frame)); ASSERT_TRUE(frame->ack); /* Ensure no further frames reported */ ASSERT_UINT_EQUALS(frame_i, h2_decode_tester_frame_count(&fixture->decode)); return AWS_OP_SUCCESS; } /* Test that client can decode a proper connection preface sent by the server. * A server connection preface is just a settings frame */ H2_DECODER_ON_CLIENT_PREFACE_TEST(h2_decoder_preface_from_server) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { /* SETTINGS FRAME - empty settings frame is acceptable in preface */ 0x00, 0x00, 0x00, /* Length (24) */ AWS_H2_FRAME_T_SETTINGS, /* Type (8) */ 0x00, /* Flags (8) */ 0x00, 0x00, 0x00, 0x00, /* Reserved (1) | Stream Identifier (31) */ /* PING FRAME - send another frame to be sure decoder is now functioning normally */ 0x00, 0x00, 0x08, /* Length (24) */ AWS_H2_FRAME_T_PING, /* Type (8) */ 0x0, /* Flags (8) */ 0x00, 0x00, 0x00, 0x00, /* Reserved (1) | Stream Identifier (31) */ 'p', 'i', 'n', 'g', 'p', 'o', 'n', 'g' /* Opaque Data (64) */ }; /* clang-format on */ /* Decode */ ASSERT_H2ERR_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate */ ASSERT_UINT_EQUALS(2, h2_decode_tester_frame_count(&fixture->decode)); struct h2_decoded_frame *frame; ASSERT_SUCCESS(s_get_finished_frame_i(fixture, 0, AWS_H2_FRAME_T_SETTINGS, 0 /*stream-id*/, &frame)); ASSERT_SUCCESS(s_get_finished_frame_i(fixture, 1, AWS_H2_FRAME_T_PING, 0 /*stream-id*/, &frame)); return AWS_OP_SUCCESS; } /* The server must send a SETTINGS frame first. * It's an error to send any other frame type */ H2_DECODER_ON_CLIENT_PREFACE_TEST(h2_decoder_err_bad_preface_from_server_1) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { /* PING FRAME - but should be SETTINGS */ 0x00, 0x00, 0x08, /* Length (24) */ AWS_H2_FRAME_T_PING, /* Type (8) */ 0x0, /* Flags (8) */ 0x00, 0x00, 0x00, 0x00, /* Reserved (1) | Stream Identifier (31) */ 'p', 'i', 'n', 'g', 'p', 'o', 'n', 'g' /* Opaque Data (64) */ }; /* clang-format on */ /* Decode */ ASSERT_H2ERR_ERROR( AWS_HTTP2_ERR_PROTOCOL_ERROR, s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); return AWS_OP_SUCCESS; } /* The server must send a SETTINGS frame first. * It's an error if SETTINGS frame is an ACK */ H2_DECODER_ON_CLIENT_PREFACE_TEST(h2_decoder_err_bad_preface_from_server_2) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { /* SETTINGS FRAME */ 0x00, 0x00, 0x00, /* Length (24) */ AWS_H2_FRAME_T_SETTINGS, /* Type (8) */ AWS_H2_FRAME_F_ACK, /* Flags (8) <-- Preface SETTINGS should not have ACK */ 0x00, 0x00, 0x00, 0x00, /* Reserved (1) | Stream Identifier (31) */ }; /* clang-format on */ /* Decode */ ASSERT_H2ERR_ERROR( AWS_HTTP2_ERR_PROTOCOL_ERROR, s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); return AWS_OP_SUCCESS; } /* The server mustn't send the "client connection preface string" */ H2_DECODER_ON_CLIENT_PREFACE_TEST(h2_decoder_err_bad_preface_from_server_3) { (void)allocator; struct fixture *fixture = ctx; const struct aws_byte_cursor input = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"); /* Decode */ ASSERT_H2ERR_ERROR(AWS_HTTP2_ERR_PROTOCOL_ERROR, s_decode_all(fixture, input)); return AWS_OP_SUCCESS; } /* Test that client can decode a proper connection preface sent by the client. */ H2_DECODER_ON_SERVER_PREFACE_TEST(h2_decoder_preface_from_client) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { /* Client connection preface string */ 'P','R','I',' ','*',' ','H','T','T','P','/','2','.','0','\r','\n','\r','\n','S','M','\r','\n','\r','\n', /* SETTINGS FRAME - empty settings frame is acceptable in preface */ 0x00, 0x00, 0x00, /* Length (24) */ AWS_H2_FRAME_T_SETTINGS, /* Type (8) */ 0x00, /* Flags (8) */ 0x00, 0x00, 0x00, 0x00, /* Reserved (1) | Stream Identifier (31) */ /* PING FRAME - send another frame to be sure decoder is now functioning normally */ 0x00, 0x00, 0x08, /* Length (24) */ AWS_H2_FRAME_T_PING, /* Type (8) */ 0x0, /* Flags (8) */ 0x00, 0x00, 0x00, 0x00, /* Reserved (1) | Stream Identifier (31) */ 'p', 'i', 'n', 'g', 'p', 'o', 'n', 'g' /* Opaque Data (64) */ }; /* clang-format on */ /* Decode */ ASSERT_H2ERR_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate */ ASSERT_UINT_EQUALS(2, h2_decode_tester_frame_count(&fixture->decode)); struct h2_decoded_frame *frame; ASSERT_SUCCESS(s_get_finished_frame_i(fixture, 0, AWS_H2_FRAME_T_SETTINGS, 0 /*stream-id*/, &frame)); ASSERT_SUCCESS(s_get_finished_frame_i(fixture, 1, AWS_H2_FRAME_T_PING, 0 /*stream-id*/, &frame)); return AWS_OP_SUCCESS; } /* Should fail because we're not sending the "client connection preface string" */ H2_DECODER_ON_SERVER_PREFACE_TEST(h2_decoder_err_bad_preface_from_client_1) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { /* SETTINGS FRAME - empty settings frame is acceptable in preface */ 0x00, 0x00, 0x00, /* Length (24) */ AWS_H2_FRAME_T_SETTINGS, /* Type (8) */ 0x00, /* Flags (8) */ 0x00, 0x00, 0x00, 0x00, /* Reserved (1) | Stream Identifier (31) */ }; /* clang-format on */ /* Decode */ ASSERT_H2ERR_ERROR( AWS_HTTP2_ERR_PROTOCOL_ERROR, s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); return AWS_OP_SUCCESS; } /* Should fail because we're sending something different from (and shorter than) "client connection preface string" */ H2_DECODER_ON_SERVER_PREFACE_TEST(h2_decoder_err_bad_preface_from_client_2) { (void)allocator; struct fixture *fixture = ctx; /* This is the shortest valid HTTP query I can come up with */ struct aws_byte_cursor input = aws_byte_cursor_from_c_str("GET / HTTP/1.0\r\n\r\n"); /* Decode */ ASSERT_H2ERR_ERROR(AWS_HTTP2_ERR_PROTOCOL_ERROR, s_decode_all(fixture, input)); return AWS_OP_SUCCESS; } /* Should fail because we're not sending SETTINGS as the first frame */ H2_DECODER_ON_SERVER_PREFACE_TEST(h2_decoder_err_bad_preface_from_client_3) { (void)allocator; struct fixture *fixture = ctx; /* clang-format off */ uint8_t input[] = { /* Client connection preface string */ 'P','R','I',' ','*',' ','H','T','T','P','/','2','.','0','\r','\n','\r','\n','S','M','\r','\n','\r','\n', /* PING FRAME - but should be SETTINGS */ 0x00, 0x00, 0x08, /* Length (24) */ AWS_H2_FRAME_T_PING, /* Type (8) */ 0x0, /* Flags (8) */ 0x00, 0x00, 0x00, 0x00, /* Reserved (1) | Stream Identifier (31) */ /* Payload */ 'p', 'i', 'n', 'g', 'p', 'o', 'n', 'g' /* Opaque Data (64) */ }; /* clang-format on */ /* Decode */ ASSERT_H2ERR_ERROR( AWS_HTTP2_ERR_PROTOCOL_ERROR, s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-http/tests/test_h2_encoder.c000066400000000000000000000420151456575232400245660ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "h2_test_helper.h" #include #include #include static int s_fixture_init(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_http_library_init(allocator); return AWS_OP_SUCCESS; } static int s_fixture_clean_up(struct aws_allocator *allocator, int setup_res, void *ctx) { (void)allocator; (void)ctx; (void)setup_res; aws_http_library_clean_up(); return AWS_OP_SUCCESS; } #define TEST_CASE(NAME) \ AWS_TEST_CASE_FIXTURE(NAME, s_fixture_init, s_test_##NAME, s_fixture_clean_up, NULL); \ static int s_test_##NAME(struct aws_allocator *allocator, void *ctx) #define DEFINE_STATIC_HEADER(_key, _value, _behavior) \ { \ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(_key), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(_value), \ .compression = AWS_HTTP_HEADER_COMPRESSION_##_behavior, \ } /* Run the given frame's encoder and check that it outputs the expected bytes */ static int s_encode_frame( struct aws_allocator *allocator, struct aws_h2_frame *frame, const uint8_t *expected, size_t expected_size) { struct aws_h2_frame_encoder encoder; ASSERT_SUCCESS(aws_h2_frame_encoder_init(&encoder, allocator, NULL /*logging_id*/)); struct aws_byte_buf buffer; /* Allocate more room than necessary, easier to debug the full output than a failed aws_h2_encode_frame() call */ ASSERT_SUCCESS(aws_byte_buf_init(&buffer, allocator, expected_size * 2)); bool frame_complete; ASSERT_SUCCESS(aws_h2_encode_frame(&encoder, frame, &buffer, &frame_complete)); ASSERT_BIN_ARRAYS_EQUALS(expected, expected_size, buffer.buffer, buffer.len); ASSERT_UINT_EQUALS(true, frame_complete); aws_byte_buf_clean_up(&buffer); aws_h2_frame_encoder_clean_up(&encoder); return AWS_OP_SUCCESS; } TEST_CASE(h2_encoder_data) { (void)ctx; struct aws_h2_frame_encoder encoder; ASSERT_SUCCESS(aws_h2_frame_encoder_init(&encoder, allocator, NULL /*logging_id*/)); struct aws_byte_buf output; ASSERT_SUCCESS(aws_byte_buf_init(&output, allocator, 1024)); struct aws_byte_cursor body_src = aws_byte_cursor_from_c_str("hello"); struct aws_input_stream *body = aws_input_stream_new_from_cursor(allocator, &body_src); ASSERT_NOT_NULL(body); /* clang-format off */ uint8_t expected[] = { 0x00, 0x00, 0x08, /* Length (24) */ AWS_H2_FRAME_T_DATA, /* Type (8) */ AWS_H2_FRAME_F_END_STREAM | AWS_H2_FRAME_F_PADDED, /* Flags (8) */ 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Stream Identifier (31) */ /* DATA */ 0x02, /* Pad Length (8) - F_PADDED */ 'h', 'e', 'l', 'l', 'o', /* Data (*) */ 0x00, 0x00 /* Padding (*) - F_PADDED */ }; /* clang-format on */ bool body_complete; bool body_stalled; int32_t stream_window_size_peer = AWS_H2_WINDOW_UPDATE_MAX; size_t connection_window_size_peer = AWS_H2_WINDOW_UPDATE_MAX; ASSERT_SUCCESS(aws_h2_encode_data_frame( &encoder, 0x76543210 /*stream_id*/, body, true /*body_ends_stream*/, 2 /*pad_length*/, &stream_window_size_peer, &connection_window_size_peer, &output, &body_complete, &body_stalled)); ASSERT_BIN_ARRAYS_EQUALS(expected, sizeof(expected), output.buffer, output.len); ASSERT_TRUE(body_complete); ASSERT_FALSE(body_stalled); aws_byte_buf_clean_up(&output); aws_input_stream_release(body); aws_h2_frame_encoder_clean_up(&encoder); return AWS_OP_SUCCESS; } /* Test that we set body_stalled to true if the aws_input_stream is unable to fill the available space */ TEST_CASE(h2_encoder_data_stalled) { (void)ctx; struct aws_h2_frame_encoder encoder; ASSERT_SUCCESS(aws_h2_frame_encoder_init(&encoder, allocator, NULL /*logging_id*/)); struct aws_byte_buf output; ASSERT_SUCCESS(aws_byte_buf_init(&output, allocator, 1024)); struct aws_byte_cursor body_src = aws_byte_cursor_from_c_str("hello"); struct aws_input_stream *body = aws_input_stream_new_tester(allocator, body_src); ASSERT_NOT_NULL(body); /* Run encoder where body produces only 1 byte */ aws_input_stream_tester_set_max_bytes_per_read(body, 1); /* clang-format off */ uint8_t expected[] = { 0x00, 0x00, 0x01, /* Length (24) */ AWS_H2_FRAME_T_DATA, /* Type (8) */ 0x0, /* Flags (8) */ 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Stream Identifier (31) */ /* DATA */ 'h', /* Data (*) */ }; /* clang-format on */ bool body_complete; bool body_stalled; int32_t stream_window_size_peer = AWS_H2_WINDOW_UPDATE_MAX; size_t connection_window_size_peer = AWS_H2_WINDOW_UPDATE_MAX; ASSERT_SUCCESS(aws_h2_encode_data_frame( &encoder, 0x76543210 /*stream_id*/, body, true /*body_ends_stream*/, 0 /*pad_length*/, &stream_window_size_peer, &connection_window_size_peer, &output, &body_complete, &body_stalled)); ASSERT_BIN_ARRAYS_EQUALS(expected, sizeof(expected), output.buffer, output.len); ASSERT_FALSE(body_complete); ASSERT_TRUE(body_stalled); aws_byte_buf_clean_up(&output); aws_input_stream_release(body); aws_h2_frame_encoder_clean_up(&encoder); return AWS_OP_SUCCESS; } /* Run encoder where body produces zero bytes. The encoder should not even bother writing a frame. */ TEST_CASE(h2_encoder_data_stalled_completely) { (void)ctx; struct aws_h2_frame_encoder encoder; ASSERT_SUCCESS(aws_h2_frame_encoder_init(&encoder, allocator, NULL /*logging_id*/)); struct aws_byte_buf output; ASSERT_SUCCESS(aws_byte_buf_init(&output, allocator, 1024)); struct aws_byte_cursor body_src = aws_byte_cursor_from_c_str("hello"); struct aws_input_stream *body = aws_input_stream_new_tester(allocator, body_src); ASSERT_NOT_NULL(body); aws_input_stream_tester_set_max_bytes_per_read(body, 0); bool body_complete; bool body_stalled; int32_t stream_window_size_peer = AWS_H2_WINDOW_UPDATE_MAX; size_t connection_window_size_peer = AWS_H2_WINDOW_UPDATE_MAX; ASSERT_SUCCESS(aws_h2_encode_data_frame( &encoder, 0x76543210 /*stream_id*/, body, true /*body_ends_stream*/, 0 /*pad_length*/, &stream_window_size_peer, &connection_window_size_peer, &output, &body_complete, &body_stalled)); ASSERT_FALSE(body_complete); ASSERT_TRUE(body_stalled); ASSERT_UINT_EQUALS(0, output.len); /* clean up */ aws_byte_buf_clean_up(&output); aws_input_stream_release(body); aws_h2_frame_encoder_clean_up(&encoder); return AWS_OP_SUCCESS; } TEST_CASE(h2_encoder_headers) { (void)ctx; struct aws_http_headers *headers = aws_http_headers_new(allocator); ASSERT_NOT_NULL(headers); struct aws_http_header h = DEFINE_STATIC_HEADER(":status", "302", USE_CACHE); ASSERT_SUCCESS(aws_http_headers_add_header(headers, &h)); struct aws_h2_frame_priority_settings priority = { .stream_dependency_exclusive = true, .stream_dependency = 0x01234567, .weight = 9, }; struct aws_h2_frame *frame = aws_h2_frame_new_headers( allocator, 0x76543210 /*stream_id*/, headers, true /*end_stream*/, 2 /*pad_length*/, &priority); ASSERT_NOT_NULL(frame); /* clang-format off */ uint8_t expected[] = { 0x00, 0x00, 12, /* Length (24) */ AWS_H2_FRAME_T_HEADERS, /* Type (8) */ AWS_H2_FRAME_F_END_STREAM | AWS_H2_FRAME_F_END_HEADERS | AWS_H2_FRAME_F_PADDED | AWS_H2_FRAME_F_PRIORITY, /* Flags (8) */ 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Stream Identifier (31) */ /* HEADERS */ 0x02, /* Pad Length (8) - F_PADDED */ 0x81, 0x23, 0x45, 0x67, /* Exclusive (1) | Stream Dependency (31) - F_PRIORITY*/ 0x09, /* Weight (8) - F_PRIORITY */ 0x48, 0x82, 0x64, 0x02, /* ":status: 302" - indexed name, huffman-compressed value */ 0x00, 0x00 /* Padding (*) - F_PADDED */ }; /* clang-format on */ ASSERT_SUCCESS(s_encode_frame(allocator, frame, expected, sizeof(expected))); aws_h2_frame_destroy(frame); aws_http_headers_release(headers); return AWS_OP_SUCCESS; } TEST_CASE(h2_encoder_priority) { (void)ctx; struct aws_h2_frame_priority_settings priority = { .stream_dependency_exclusive = true, .stream_dependency = 0x01234567, .weight = 9, }; struct aws_h2_frame *frame = aws_h2_frame_new_priority(allocator, 0x76543210 /*stream_id*/, &priority); ASSERT_NOT_NULL(frame); /* clang-format off */ uint8_t expected[] = { 0x00, 0x00, 0x05, /* Length (24) */ AWS_H2_FRAME_T_PRIORITY, /* Type (8) */ 0x0, /* Flags (8) */ 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Stream Identifier (31) */ /* PRIORITY */ 0x81, 0x23, 0x45, 0x67, /* Exclusive (1) | Stream Dependency (31) */ 0x09, /* Weight (8) */ }; /* clang-format on */ ASSERT_SUCCESS(s_encode_frame(allocator, frame, expected, sizeof(expected))); aws_h2_frame_destroy(frame); return AWS_OP_SUCCESS; } TEST_CASE(h2_encoder_rst_stream) { (void)ctx; struct aws_h2_frame *frame = aws_h2_frame_new_rst_stream(allocator, 0x76543210 /*stream_id*/, 0xFEEDBEEF /*error_code*/); ASSERT_NOT_NULL(frame); /* clang-format off */ uint8_t expected[] = { 0x00, 0x00, 0x04, /* Length (24) */ AWS_H2_FRAME_T_RST_STREAM, /* Type (8) */ 0x0, /* Flags (8) */ 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Stream Identifier (31) */ /* RST_STREAM */ 0xFE, 0xED, 0xBE, 0xEF, /* Error Code (32) */ }; /* clang-format on */ ASSERT_SUCCESS(s_encode_frame(allocator, frame, expected, sizeof(expected))); aws_h2_frame_destroy(frame); return AWS_OP_SUCCESS; } TEST_CASE(h2_encoder_settings) { (void)ctx; struct aws_http2_setting settings[] = { {.id = AWS_HTTP2_SETTINGS_ENABLE_PUSH, .value = 1}, /* real world value */ {.id = 0x0000, .value = 0x00000000}, /* min value */ {.id = 0xFFFF, .value = 0xFFFFFFFF}, /* max value */ }; struct aws_h2_frame *frame = aws_h2_frame_new_settings(allocator, settings, AWS_ARRAY_SIZE(settings), false /*ack*/); ASSERT_NOT_NULL(frame); /* clang-format off */ uint8_t expected[] = { 0x00, 0x00, 18, /* Length (24) */ AWS_H2_FRAME_T_SETTINGS, /* Type (8) */ 0x0, /* Flags (8) */ 0x00, 0x00, 0x00, 0x00, /* Reserved (1) | Stream Identifier (31) */ /* SETTINGS */ 0x00, 0x02, /* Identifier (16) */ 0x00, 0x00, 0x00, 0x01, /* Value (32) */ 0x00, 0x00, /* Identifier (16) */ 0x00, 0x00, 0x00, 0x00, /* Value (32) */ 0xFF, 0xFF, /* Identifier (16) */ 0xFF, 0xFF, 0xFF, 0xFF, /* Value (32) */ }; /* clang-format on */ ASSERT_SUCCESS(s_encode_frame(allocator, frame, expected, sizeof(expected))); aws_h2_frame_destroy(frame); return AWS_OP_SUCCESS; } TEST_CASE(h2_encoder_settings_ack) { (void)ctx; struct aws_h2_frame *frame = aws_h2_frame_new_settings(allocator, NULL /*settings_array*/, 0 /*num_settings*/, true /*ack*/); ASSERT_NOT_NULL(frame); /* clang-format off */ uint8_t expected[] = { 0x00, 0x00, 0x00, /* Length (24) */ AWS_H2_FRAME_T_SETTINGS, /* Type (8) */ AWS_H2_FRAME_F_ACK, /* Flags (8) */ 0x00, 0x00, 0x00, 0x00, /* Reserved (1) | Stream Identifier (31) */ /* SETTINGS */ }; /* clang-format on */ ASSERT_SUCCESS(s_encode_frame(allocator, frame, expected, sizeof(expected))); aws_h2_frame_destroy(frame); return AWS_OP_SUCCESS; } TEST_CASE(h2_encoder_push_promise) { (void)ctx; struct aws_http_header headers_array[] = { DEFINE_STATIC_HEADER(":method", "GET", USE_CACHE), DEFINE_STATIC_HEADER(":scheme", "http", USE_CACHE), DEFINE_STATIC_HEADER(":path", "/", USE_CACHE), DEFINE_STATIC_HEADER(":authority", "www.example.com", USE_CACHE), }; struct aws_http_headers *headers = aws_http_headers_new(allocator); ASSERT_NOT_NULL(headers); ASSERT_SUCCESS(aws_http_headers_add_array(headers, headers_array, AWS_ARRAY_SIZE(headers_array))); struct aws_h2_frame *frame = aws_h2_frame_new_push_promise( allocator, 0x00000001 /*stream_id*/, 0x76543210 /*promised_stream_id*/, headers, 2 /*pad_length*/); ASSERT_NOT_NULL(frame); /* clang-format off */ uint8_t expected[] = { 0x00, 0x00, 24, /* Length (24) */ AWS_H2_FRAME_T_PUSH_PROMISE,/* Type (8) */ AWS_H2_FRAME_F_END_HEADERS | AWS_H2_FRAME_F_PADDED, /* Flags (8) */ 0x00, 0x00, 0x00, 0x01, /* Reserved (1) | Stream Identifier (31) */ /* PUSH_PROMISE */ 0x02, /* Pad Length (8) | F_PADDED */ 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Promised Stream ID (31) */ /* Header Block Fragment (*) (values from RFC-7541 example C.4.1) */ 0x82, 0x86, 0x84, 0x41, 0x8c, 0xf1, 0xe3, 0xc2, 0xe5, 0xf2, 0x3a, 0x6b, 0xa0, 0xab, 0x90, 0xf4, 0xff, 0x00, 0x00, /* Padding (*) | F_PADDED*/ }; /* clang-format on */ ASSERT_SUCCESS(s_encode_frame(allocator, frame, expected, sizeof(expected))); aws_h2_frame_destroy(frame); aws_http_headers_release(headers); return AWS_OP_SUCCESS; } TEST_CASE(h2_encoder_ping) { (void)ctx; uint8_t opaque_data[AWS_HTTP2_PING_DATA_SIZE] = {0, 1, 2, 3, 4, 5, 6, 7}; struct aws_h2_frame *frame = aws_h2_frame_new_ping(allocator, true /*ack*/, opaque_data); ASSERT_NOT_NULL(frame); /* clang-format off */ uint8_t expected[] = { 0x00, 0x00, 0x08, /* Length (24) */ AWS_H2_FRAME_T_PING, /* Type (8) */ AWS_H2_FRAME_F_ACK, /* Flags (8) */ 0x00, 0x00, 0x00, 0x00, /* Reserved (1) | Stream Identifier (31) */ /* PING */ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* Opaque Data (64) */ }; /* clang-format on */ ASSERT_SUCCESS(s_encode_frame(allocator, frame, expected, sizeof(expected))); aws_h2_frame_destroy(frame); return AWS_OP_SUCCESS; } TEST_CASE(h2_encoder_goaway) { (void)ctx; struct aws_h2_frame *frame = aws_h2_frame_new_goaway( allocator, 0x77665544 /*last_stream_id*/, 0xFFEEDDCC /*error_code*/, aws_byte_cursor_from_c_str("goodbye") /*debug_data*/); ASSERT_NOT_NULL(frame); /* clang-format off */ uint8_t expected[] = { 0x00, 0x00, 15, /* Length (24) */ AWS_H2_FRAME_T_GOAWAY, /* Type (8) */ 0x0, /* Flags (8) */ 0x00, 0x00, 0x00, 0x00, /* Reserved (1) | Stream Identifier (31) */ /* GOAWAY */ 0x77, 0x66, 0x55, 0x44, /* Reserved (1) | Last-Stream-ID (31) */ 0xFF, 0xEE, 0xDD, 0xCC, /* Error Code (32) */ 'g','o','o','d','b','y','e',/* Additional Debug Data (*) */ }; /* clang-format on */ ASSERT_SUCCESS(s_encode_frame(allocator, frame, expected, sizeof(expected))); aws_h2_frame_destroy(frame); return AWS_OP_SUCCESS; } TEST_CASE(h2_encoder_window_update) { (void)ctx; struct aws_h2_frame *frame = aws_h2_frame_new_window_update(allocator, 0x76543210 /*stream_id*/, 0x7FFFFFFF /*window_size_increment*/); ASSERT_NOT_NULL(frame); /* clang-format off */ uint8_t expected[] = { 0x00, 0x00, 0x04, /* Length (24) */ AWS_H2_FRAME_T_WINDOW_UPDATE,/* Type (8) */ 0x0, /* Flags (8) */ 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Stream Identifier (31) */ /* WINDOW_UPDATE */ 0x7F, 0xFF, 0xFF, 0xFF, /* Window Size Increment (31) */ }; /* clang-format on */ ASSERT_SUCCESS(s_encode_frame(allocator, frame, expected, sizeof(expected))); aws_h2_frame_destroy(frame); return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-http/tests/test_h2_headers.c000066400000000000000000001043461456575232400245700ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include enum { S_BUFFER_SIZE = 128 }; struct header_test_fixture; /* Function type used to init and cleanup a fixture */ typedef int(header_init_fn)(struct header_test_fixture *); /* Function used to tear down header instances */ typedef int(header_clean_up_fn)(void *); /* Header compare function */ static int s_header_block_eq( const struct aws_http_headers *l_header_fields, const struct aws_http_headers *r_header_fields) { const size_t l_size = aws_http_headers_count(l_header_fields); const size_t r_size = aws_http_headers_count(r_header_fields); ASSERT_UINT_EQUALS(l_size, r_size); for (size_t i = 0; i < l_size; ++i) { struct aws_http_header l_field; ASSERT_SUCCESS(aws_http_headers_get_index(l_header_fields, i, &l_field)); struct aws_http_header r_field; ASSERT_SUCCESS(aws_http_headers_get_index(r_header_fields, i, &r_field)); ASSERT_INT_EQUALS(l_field.compression, r_field.compression); ASSERT_TRUE(aws_byte_cursor_eq(&l_field.name, &r_field.name)); ASSERT_TRUE(aws_byte_cursor_eq(&l_field.value, &r_field.value)); } return AWS_OP_SUCCESS; } /* Contains all of the information required to run a header's test case */ struct header_test_fixture { header_init_fn *init; header_clean_up_fn *header_clean_up; header_init_fn *teardown; struct aws_allocator *allocator; bool one_byte_at_a_time; /* T: decode one byte at a time. F: decode whole buffer at once */ struct aws_hpack_encoder encoder; struct aws_hpack_decoder decoder; struct aws_http_headers *headers_to_encode; struct aws_byte_buf expected_encoding_buf; struct aws_http_headers *decoded_headers; }; static int s_header_test_before(struct aws_allocator *allocator, void *ctx) { struct header_test_fixture *fixture = ctx; fixture->allocator = allocator; aws_http_library_init(allocator); aws_hpack_encoder_init(&fixture->encoder, allocator, NULL); aws_hpack_decoder_init(&fixture->decoder, allocator, NULL); fixture->headers_to_encode = aws_http_headers_new(allocator); ASSERT_NOT_NULL(fixture->headers_to_encode); ASSERT_SUCCESS(aws_byte_buf_init(&fixture->expected_encoding_buf, allocator, S_BUFFER_SIZE)); fixture->decoded_headers = aws_http_headers_new(allocator); ASSERT_NOT_NULL(fixture->decoded_headers); return AWS_OP_SUCCESS; } static int s_header_test_run(struct aws_allocator *allocator, void *ctx) { struct header_test_fixture *fixture = ctx; /* Init the in_header & buffer */ ASSERT_SUCCESS(fixture->init(fixture)); /* Encode */ /* Create the output buffer */ struct aws_byte_buf output_buffer; ASSERT_SUCCESS(aws_byte_buf_init(&output_buffer, allocator, S_BUFFER_SIZE)); /* Encode the headers */ ASSERT_SUCCESS(aws_hpack_encode_header_block(&fixture->encoder, fixture->headers_to_encode, &output_buffer)); /* Compare the encoded output against the expected header block fragment */ ASSERT_BIN_ARRAYS_EQUALS( fixture->expected_encoding_buf.buffer, fixture->expected_encoding_buf.len, output_buffer.buffer, output_buffer.len); /* Decode */ struct aws_byte_cursor payload = aws_byte_cursor_from_buf(&output_buffer); while (payload.len) { struct aws_hpack_decode_result result; if (fixture->one_byte_at_a_time) { struct aws_byte_cursor one_byte_payload = aws_byte_cursor_advance(&payload, 1); ASSERT_SUCCESS(aws_hpack_decode(&fixture->decoder, &one_byte_payload, &result)); ASSERT_UINT_EQUALS(0, one_byte_payload.len); } else { ASSERT_SUCCESS(aws_hpack_decode(&fixture->decoder, &payload, &result)); } if (result.type == AWS_HPACK_DECODE_T_HEADER_FIELD) { ASSERT_SUCCESS(aws_http_headers_add_header(fixture->decoded_headers, &result.data.header_field)); } } /* Compare the headers */ ASSERT_SUCCESS(s_header_block_eq(fixture->headers_to_encode, fixture->decoded_headers)); aws_byte_buf_clean_up(&output_buffer); return AWS_OP_SUCCESS; } static int s_header_test_after(struct aws_allocator *allocator, int setup_res, void *ctx) { (void)allocator; if (!setup_res) { struct header_test_fixture *fixture = ctx; /* Tear down the header & buffer */ if (fixture->teardown) { fixture->teardown(fixture); } /* Tear down the fixture */ aws_http_headers_release(fixture->decoded_headers); aws_byte_buf_clean_up(&fixture->expected_encoding_buf); aws_http_headers_release(fixture->headers_to_encode); aws_hpack_decoder_clean_up(&fixture->decoder); aws_hpack_encoder_clean_up(&fixture->encoder); } aws_http_library_clean_up(); return AWS_OP_SUCCESS; } #define HEADER_TEST(t_name, i, t) \ static struct header_test_fixture s_##t_name##_fixture = { \ .init = (i), \ .teardown = (t), \ }; \ AWS_TEST_CASE_FIXTURE(t_name, s_header_test_before, s_header_test_run, s_header_test_after, &s_##t_name##_fixture) \ static struct header_test_fixture s_##t_name##_one_byte_at_a_time_fixture = { \ .init = (i), \ .teardown = (t), \ .one_byte_at_a_time = true, \ }; \ AWS_TEST_CASE_FIXTURE( \ t_name##_one_byte_at_a_time, \ s_header_test_before, \ s_header_test_run, \ s_header_test_after, \ &s_##t_name##_one_byte_at_a_time_fixture) #define DEFINE_STATIC_HEADER(_key, _value, _behavior) \ { \ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(_key), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(_value), \ .compression = AWS_HTTP_HEADER_COMPRESSION_##_behavior, \ } /* Test HEADERS frame with empty payload */ static int s_test_empty_payload(struct header_test_fixture *fixture) { (void)fixture; return AWS_OP_SUCCESS; } HEADER_TEST(h2_header_empty_payload, s_test_empty_payload, NULL); /* RFC-7541 - Header Field Representation Examples - C.2.1. Literal Header Field with Indexing */ static int s_test_ex_2_1_init(struct header_test_fixture *fixture) { aws_hpack_encoder_set_huffman_mode(&fixture->encoder, AWS_HPACK_HUFFMAN_NEVER); struct aws_http_header headers[] = { DEFINE_STATIC_HEADER("custom-key", "custom-header", USE_CACHE), }; ASSERT_SUCCESS(aws_http_headers_add_array(fixture->headers_to_encode, headers, AWS_ARRAY_SIZE(headers))); static const uint8_t encoded[] = { 0x40, 0x0a, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x2d, 0x6b, 0x65, 0x79, 0x0d, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x2d, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, }; aws_byte_buf_write(&fixture->expected_encoding_buf, encoded, sizeof(encoded)); return AWS_OP_SUCCESS; } HEADER_TEST(h2_header_ex_2_1, s_test_ex_2_1_init, NULL); /* RFC-7541 - Header Field Representation Examples - C.2.2. Literal Header Field without Indexing */ static int s_test_ex_2_2_init(struct header_test_fixture *fixture) { aws_hpack_encoder_set_huffman_mode(&fixture->encoder, AWS_HPACK_HUFFMAN_NEVER); struct aws_http_header headers[] = { DEFINE_STATIC_HEADER(":path", "/sample/path", NO_CACHE), }; ASSERT_SUCCESS(aws_http_headers_add_array(fixture->headers_to_encode, headers, AWS_ARRAY_SIZE(headers))); static const uint8_t encoded[] = { 0x04, 0x0c, 0x2f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x2f, 0x70, 0x61, 0x74, 0x68}; aws_byte_buf_write(&fixture->expected_encoding_buf, encoded, sizeof(encoded)); return AWS_OP_SUCCESS; } HEADER_TEST(h2_header_ex_2_2, s_test_ex_2_2_init, NULL); /* RFC-7541 - Header Field Representation Examples - C.2.3. Literal Header Field Never Indexed */ static int s_test_ex_2_3_init(struct header_test_fixture *fixture) { aws_hpack_encoder_set_huffman_mode(&fixture->encoder, AWS_HPACK_HUFFMAN_NEVER); struct aws_http_header headers[] = { DEFINE_STATIC_HEADER("password", "secret", NO_FORWARD_CACHE), }; ASSERT_SUCCESS(aws_http_headers_add_array(fixture->headers_to_encode, headers, AWS_ARRAY_SIZE(headers))); static const uint8_t encoded[] = { 0x10, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74}; aws_byte_buf_write(&fixture->expected_encoding_buf, encoded, sizeof(encoded)); return AWS_OP_SUCCESS; } HEADER_TEST(h2_header_ex_2_3, s_test_ex_2_3_init, NULL); /* RFC-7541 - Header Field Representation Examples - C.2.3. Indexed Header Field */ static int s_test_ex_2_4_init(struct header_test_fixture *fixture) { aws_hpack_encoder_set_huffman_mode(&fixture->encoder, AWS_HPACK_HUFFMAN_NEVER); struct aws_http_header headers[] = { DEFINE_STATIC_HEADER(":method", "GET", USE_CACHE), }; ASSERT_SUCCESS(aws_http_headers_add_array(fixture->headers_to_encode, headers, AWS_ARRAY_SIZE(headers))); static const uint8_t encoded[] = { 0x82, }; aws_byte_buf_write(&fixture->expected_encoding_buf, encoded, sizeof(encoded)); return AWS_OP_SUCCESS; } HEADER_TEST(h2_header_ex_2_4, s_test_ex_2_4_init, NULL); struct header_request_response_test_fixture; /* Function type used to init for request and response */ typedef int(header_request_response_init_fn)(struct header_request_response_test_fixture *); /* Contains all of the information required to run a header's test case */ struct header_request_response_test_fixture { header_request_response_init_fn *init; header_clean_up_fn *header_clean_up; header_request_response_init_fn *teardown; struct aws_allocator *allocator; bool one_byte_at_a_time; /* T: decode one byte at a time. F: decode whole buffer at once */ struct aws_hpack_encoder encoder; struct aws_hpack_decoder decoder; struct aws_http_headers *headers_to_encode[3]; struct aws_byte_buf expected_encoding_buf[3]; struct aws_http_header last_entry_dynamic_table[3]; size_t dynamic_table_len[3]; struct aws_http_headers *decoded_headers; }; static int s_header_request_response_test_before(struct aws_allocator *allocator, void *ctx) { struct header_request_response_test_fixture *fixture = ctx; fixture->allocator = allocator; aws_http_library_init(allocator); aws_hpack_encoder_init(&fixture->encoder, allocator, NULL); aws_hpack_decoder_init(&fixture->decoder, allocator, NULL); for (int i = 0; i < 3; i++) { fixture->headers_to_encode[i] = aws_http_headers_new(allocator); ASSERT_NOT_NULL(fixture->headers_to_encode[i]); ASSERT_SUCCESS(aws_byte_buf_init(&fixture->expected_encoding_buf[i], allocator, S_BUFFER_SIZE)); } fixture->decoded_headers = aws_http_headers_new(allocator); ASSERT_NOT_NULL(fixture->decoded_headers); return AWS_OP_SUCCESS; } static int s_encoder_result_check( struct header_request_response_test_fixture *fixture, struct aws_http_headers *headers_to_encode, struct aws_byte_buf expected_encoding_buf, struct aws_byte_buf *output_buffer) { /* Encode the headers */ ASSERT_SUCCESS(aws_hpack_encode_header_block(&fixture->encoder, headers_to_encode, output_buffer)); /* Compare the encoded output against the expected header block fragment */ ASSERT_BIN_ARRAYS_EQUALS( expected_encoding_buf.buffer, expected_encoding_buf.len, output_buffer->buffer, output_buffer->len); /* Decode */ struct aws_byte_cursor payload = aws_byte_cursor_from_buf(output_buffer); while (payload.len) { struct aws_hpack_decode_result result; if (fixture->one_byte_at_a_time) { struct aws_byte_cursor one_byte_payload = aws_byte_cursor_advance(&payload, 1); ASSERT_SUCCESS(aws_hpack_decode(&fixture->decoder, &one_byte_payload, &result)); ASSERT_UINT_EQUALS(0, one_byte_payload.len); } else { ASSERT_SUCCESS(aws_hpack_decode(&fixture->decoder, &payload, &result)); } if (result.type == AWS_HPACK_DECODE_T_HEADER_FIELD) { ASSERT_SUCCESS(aws_http_headers_add_header(fixture->decoded_headers, &result.data.header_field)); } } /* Compare the headers */ ASSERT_SUCCESS(s_header_block_eq(headers_to_encode, fixture->decoded_headers)); /* Reset state */ aws_byte_buf_reset(output_buffer, false); aws_http_headers_clear(fixture->decoded_headers); return AWS_OP_SUCCESS; } static int s_dynamic_table_last_entry_check( struct header_request_response_test_fixture *fixture, struct aws_http_header *expected_entry, size_t dynamic_table_len) { /* check the decoder's dynamic table */ const struct aws_hpack_context *context = &fixture->decoder.context; /* get the last element in dynamic table, which will be the absolute index plus all the elements in static table */ ASSERT_TRUE(dynamic_table_len == aws_hpack_get_dynamic_table_num_elements(context)); const struct aws_http_header *back = aws_hpack_get_header(context, dynamic_table_len + 61); ASSERT_TRUE(aws_byte_cursor_eq(&back->name, &expected_entry->name)); ASSERT_TRUE(aws_byte_cursor_eq(&back->value, &expected_entry->value)); /* check the encoder's dynamic table */ context = &fixture->encoder.context; ASSERT_TRUE(dynamic_table_len == aws_hpack_get_dynamic_table_num_elements(context)); back = aws_hpack_get_header(context, dynamic_table_len + 61); ASSERT_TRUE(aws_byte_cursor_eq(&back->name, &expected_entry->name)); ASSERT_TRUE(aws_byte_cursor_eq(&back->value, &expected_entry->value)); return AWS_OP_SUCCESS; } static int s_header_request_response_test_run(struct aws_allocator *allocator, void *ctx) { struct header_request_response_test_fixture *fixture = ctx; /* Init the in_header & buffer */ ASSERT_SUCCESS(fixture->init(fixture)); /* Encode */ /* Create the output buffer */ struct aws_byte_buf output_buffer; ASSERT_SUCCESS(aws_byte_buf_init(&output_buffer, allocator, S_BUFFER_SIZE)); /* check three results */ for (int i = 0; i < 3; i++) { ASSERT_SUCCESS(s_encoder_result_check( fixture, fixture->headers_to_encode[i], fixture->expected_encoding_buf[i], &output_buffer)); ASSERT_SUCCESS(s_dynamic_table_last_entry_check( fixture, &fixture->last_entry_dynamic_table[i], fixture->dynamic_table_len[i])); } aws_byte_buf_clean_up(&output_buffer); return AWS_OP_SUCCESS; } static int s_header_request_response_test_after(struct aws_allocator *allocator, int setup_res, void *ctx) { (void)allocator; if (!setup_res) { struct header_request_response_test_fixture *fixture = ctx; /* Tear down the header & buffer */ if (fixture->teardown) { fixture->teardown(fixture); } /* Tear down the fixture */ aws_http_headers_release(fixture->decoded_headers); for (int i = 0; i < 3; i++) { aws_byte_buf_clean_up(&fixture->expected_encoding_buf[i]); aws_http_headers_release(fixture->headers_to_encode[i]); } aws_hpack_decoder_clean_up(&fixture->decoder); aws_hpack_encoder_clean_up(&fixture->encoder); } aws_http_library_clean_up(); return AWS_OP_SUCCESS; } #define HEADER_REQUEST_RESPONSE_TEST(t_name, i, t) \ static struct header_request_response_test_fixture s_##t_name##_fixture = { \ .init = (i), \ .teardown = (t), \ }; \ AWS_TEST_CASE_FIXTURE( \ t_name, \ s_header_request_response_test_before, \ s_header_request_response_test_run, \ s_header_request_response_test_after, \ &s_##t_name##_fixture) \ static struct header_request_response_test_fixture s_##t_name##_one_byte_at_a_time_fixture = { \ .init = (i), \ .teardown = (t), \ .one_byte_at_a_time = true, \ }; \ AWS_TEST_CASE_FIXTURE( \ t_name##_one_byte_at_a_time, \ s_header_request_response_test_before, \ s_header_request_response_test_run, \ s_header_request_response_test_after, \ &s_##t_name##_one_byte_at_a_time_fixture) /* RFC-7541 - Request Examples without Huffman Coding - C.3 */ static int s_test_ex_3_init(struct header_request_response_test_fixture *fixture) { aws_hpack_encoder_set_huffman_mode(&fixture->encoder, AWS_HPACK_HUFFMAN_NEVER); int index = 0; /* First Request RFC-7541 C.3.1 */ struct aws_http_header headers_1[] = { DEFINE_STATIC_HEADER(":method", "GET", USE_CACHE), DEFINE_STATIC_HEADER(":scheme", "http", USE_CACHE), DEFINE_STATIC_HEADER(":path", "/", USE_CACHE), DEFINE_STATIC_HEADER(":authority", "www.example.com", USE_CACHE), }; ASSERT_SUCCESS(aws_http_headers_add_array(fixture->headers_to_encode[index], headers_1, AWS_ARRAY_SIZE(headers_1))); static const uint8_t encoded_1[] = { 0x82, 0x86, 0x84, 0x41, 0x0f, 0x77, 0x77, 0x77, 0x2e, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, }; aws_byte_buf_write(&fixture->expected_encoding_buf[index], encoded_1, sizeof(encoded_1)); struct aws_http_header last_entry_1 = DEFINE_STATIC_HEADER(":authority", "www.example.com", USE_CACHE); fixture->last_entry_dynamic_table[index] = last_entry_1; fixture->dynamic_table_len[index] = 1; index++; /* Second Request RFC-7541 C.3.2 */ struct aws_http_header headers_2[] = { DEFINE_STATIC_HEADER(":method", "GET", USE_CACHE), DEFINE_STATIC_HEADER(":scheme", "http", USE_CACHE), DEFINE_STATIC_HEADER(":path", "/", USE_CACHE), DEFINE_STATIC_HEADER(":authority", "www.example.com", USE_CACHE), DEFINE_STATIC_HEADER("cache-control", "no-cache", USE_CACHE), }; ASSERT_SUCCESS(aws_http_headers_add_array(fixture->headers_to_encode[index], headers_2, AWS_ARRAY_SIZE(headers_2))); static const uint8_t encoded_2[] = { 0x82, 0x86, 0x84, 0xbe, 0x58, 0x08, 0x6e, 0x6f, 0x2d, 0x63, 0x61, 0x63, 0x68, 0x65}; aws_byte_buf_write(&fixture->expected_encoding_buf[index], encoded_2, sizeof(encoded_2)); struct aws_http_header last_entry_2 = DEFINE_STATIC_HEADER(":authority", "www.example.com", USE_CACHE); fixture->last_entry_dynamic_table[index] = last_entry_2; fixture->dynamic_table_len[index] = 2; index++; /* Third Request RFC-7541 C.3.3 */ struct aws_http_header headers_3[] = { DEFINE_STATIC_HEADER(":method", "GET", USE_CACHE), DEFINE_STATIC_HEADER(":scheme", "https", USE_CACHE), DEFINE_STATIC_HEADER(":path", "/index.html", USE_CACHE), DEFINE_STATIC_HEADER(":authority", "www.example.com", USE_CACHE), DEFINE_STATIC_HEADER("custom-key", "custom-value", USE_CACHE), }; ASSERT_SUCCESS(aws_http_headers_add_array(fixture->headers_to_encode[index], headers_3, AWS_ARRAY_SIZE(headers_3))); static const uint8_t encoded_3[] = { 0x82, 0x87, 0x85, 0xbf, 0x40, 0x0a, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x2d, 0x6b, 0x65, 0x79, 0x0c, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x2d, 0x76, 0x61, 0x6c, 0x75, 0x65, }; aws_byte_buf_write(&fixture->expected_encoding_buf[index], encoded_3, sizeof(encoded_3)); struct aws_http_header last_entry_3 = DEFINE_STATIC_HEADER(":authority", "www.example.com", USE_CACHE); fixture->last_entry_dynamic_table[index] = last_entry_3; fixture->dynamic_table_len[index] = 3; return AWS_OP_SUCCESS; } HEADER_REQUEST_RESPONSE_TEST(h2_header_ex_3, s_test_ex_3_init, NULL); /* RFC-7541 - Request Examples with Huffman Coding - C.4 */ static int s_test_ex_4_init(struct header_request_response_test_fixture *fixture) { aws_hpack_encoder_set_huffman_mode(&fixture->encoder, AWS_HPACK_HUFFMAN_ALWAYS); int index = 0; /* First Request RFC-7541 C.4.1 */ struct aws_http_header headers_1[] = { DEFINE_STATIC_HEADER(":method", "GET", USE_CACHE), DEFINE_STATIC_HEADER(":scheme", "http", USE_CACHE), DEFINE_STATIC_HEADER(":path", "/", USE_CACHE), DEFINE_STATIC_HEADER(":authority", "www.example.com", USE_CACHE), }; ASSERT_SUCCESS(aws_http_headers_add_array(fixture->headers_to_encode[index], headers_1, AWS_ARRAY_SIZE(headers_1))); static const uint8_t encoded_1[] = { 0x82, 0x86, 0x84, 0x41, 0x8c, 0xf1, 0xe3, 0xc2, 0xe5, 0xf2, 0x3a, 0x6b, 0xa0, 0xab, 0x90, 0xf4, 0xff}; aws_byte_buf_write(&fixture->expected_encoding_buf[index], encoded_1, sizeof(encoded_1)); struct aws_http_header last_entry_1 = DEFINE_STATIC_HEADER(":authority", "www.example.com", USE_CACHE); fixture->last_entry_dynamic_table[index] = last_entry_1; fixture->dynamic_table_len[index] = 1; index++; /* Second Request RFC-7541 C.4.2 */ struct aws_http_header headers_2[] = { DEFINE_STATIC_HEADER(":method", "GET", USE_CACHE), DEFINE_STATIC_HEADER(":scheme", "http", USE_CACHE), DEFINE_STATIC_HEADER(":path", "/", USE_CACHE), DEFINE_STATIC_HEADER(":authority", "www.example.com", USE_CACHE), DEFINE_STATIC_HEADER("cache-control", "no-cache", USE_CACHE), }; ASSERT_SUCCESS(aws_http_headers_add_array(fixture->headers_to_encode[index], headers_2, AWS_ARRAY_SIZE(headers_2))); static const uint8_t encoded_2[] = {0x82, 0x86, 0x84, 0xbe, 0x58, 0x86, 0xa8, 0xeb, 0x10, 0x64, 0x9c, 0xbf}; aws_byte_buf_write(&fixture->expected_encoding_buf[index], encoded_2, sizeof(encoded_2)); struct aws_http_header last_entry_2 = DEFINE_STATIC_HEADER(":authority", "www.example.com", USE_CACHE); fixture->last_entry_dynamic_table[index] = last_entry_2; fixture->dynamic_table_len[index] = 2; index++; /* Third Request RFC-7541 C.4.3 */ struct aws_http_header headers_3[] = { DEFINE_STATIC_HEADER(":method", "GET", USE_CACHE), DEFINE_STATIC_HEADER(":scheme", "https", USE_CACHE), DEFINE_STATIC_HEADER(":path", "/index.html", USE_CACHE), DEFINE_STATIC_HEADER(":authority", "www.example.com", USE_CACHE), DEFINE_STATIC_HEADER("custom-key", "custom-value", USE_CACHE), }; ASSERT_SUCCESS(aws_http_headers_add_array(fixture->headers_to_encode[index], headers_3, AWS_ARRAY_SIZE(headers_3))); static const uint8_t encoded_3[] = { 0x82, 0x87, 0x85, 0xbf, 0x40, 0x88, 0x25, 0xa8, 0x49, 0xe9, 0x5b, 0xa9, 0x7d, 0x7f, 0x89, 0x25, 0xa8, 0x49, 0xe9, 0x5b, 0xb8, 0xe8, 0xb4, 0xbf, }; aws_byte_buf_write(&fixture->expected_encoding_buf[index], encoded_3, sizeof(encoded_3)); struct aws_http_header last_entry_3 = DEFINE_STATIC_HEADER(":authority", "www.example.com", USE_CACHE); fixture->last_entry_dynamic_table[index] = last_entry_3; fixture->dynamic_table_len[index] = 3; return AWS_OP_SUCCESS; } HEADER_REQUEST_RESPONSE_TEST(h2_header_ex_4, s_test_ex_4_init, NULL); /* RFC-7541 - Response Examples without Huffman Coding - C.5 */ static int s_test_ex_5_init(struct header_request_response_test_fixture *fixture) { /* set the max table size to 256 */ ASSERT_SUCCESS(aws_hpack_resize_dynamic_table(&fixture->encoder.context, 256)); ASSERT_SUCCESS(aws_hpack_resize_dynamic_table(&fixture->decoder.context, 256)); aws_hpack_encoder_set_huffman_mode(&fixture->encoder, AWS_HPACK_HUFFMAN_NEVER); int index = 0; /* First Response RFC-7541 C.5.1 */ struct aws_http_header headers_1[] = { DEFINE_STATIC_HEADER(":status", "302", USE_CACHE), DEFINE_STATIC_HEADER("cache-control", "private", USE_CACHE), DEFINE_STATIC_HEADER("date", "Mon, 21 Oct 2013 20:13:21 GMT", USE_CACHE), DEFINE_STATIC_HEADER("location", "https://www.example.com", USE_CACHE), }; ASSERT_SUCCESS(aws_http_headers_add_array(fixture->headers_to_encode[index], headers_1, AWS_ARRAY_SIZE(headers_1))); static const uint8_t encoded_1[] = { 0x48, 0x03, 0x33, 0x30, 0x32, 0x58, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x61, 0x1d, 0x4d, 0x6f, 0x6e, 0x2c, 0x20, 0x32, 0x31, 0x20, 0x4f, 0x63, 0x74, 0x20, 0x32, 0x30, 0x31, 0x33, 0x20, 0x32, 0x30, 0x3a, 0x31, 0x33, 0x3a, 0x32, 0x31, 0x20, 0x47, 0x4d, 0x54, 0x6e, 0x17, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, }; aws_byte_buf_write(&fixture->expected_encoding_buf[index], encoded_1, sizeof(encoded_1)); struct aws_http_header last_entry_1 = DEFINE_STATIC_HEADER(":status", "302", USE_CACHE); fixture->last_entry_dynamic_table[index] = last_entry_1; fixture->dynamic_table_len[index] = 4; index++; /* Second Response RFC-7541 C.5.2 */ struct aws_http_header headers_2[] = { DEFINE_STATIC_HEADER(":status", "307", USE_CACHE), DEFINE_STATIC_HEADER("cache-control", "private", USE_CACHE), DEFINE_STATIC_HEADER("date", "Mon, 21 Oct 2013 20:13:21 GMT", USE_CACHE), DEFINE_STATIC_HEADER("location", "https://www.example.com", USE_CACHE), }; ASSERT_SUCCESS(aws_http_headers_add_array(fixture->headers_to_encode[index], headers_2, AWS_ARRAY_SIZE(headers_2))); static const uint8_t encoded_2[] = {0x48, 0x03, 0x33, 0x30, 0x37, 0xc1, 0xc0, 0xbf}; aws_byte_buf_write(&fixture->expected_encoding_buf[index], encoded_2, sizeof(encoded_2)); struct aws_http_header last_entry_2 = DEFINE_STATIC_HEADER("cache-control", "private", USE_CACHE); fixture->last_entry_dynamic_table[index] = last_entry_2; fixture->dynamic_table_len[index] = 4; index++; /* Third Response RFC-7541 C.5.3 */ struct aws_http_header headers_3[] = { DEFINE_STATIC_HEADER(":status", "200", USE_CACHE), DEFINE_STATIC_HEADER("cache-control", "private", USE_CACHE), DEFINE_STATIC_HEADER("date", "Mon, 21 Oct 2013 20:13:22 GMT", USE_CACHE), DEFINE_STATIC_HEADER("location", "https://www.example.com", USE_CACHE), DEFINE_STATIC_HEADER("content-encoding", "gzip", USE_CACHE), DEFINE_STATIC_HEADER("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1", USE_CACHE), }; ASSERT_SUCCESS(aws_http_headers_add_array(fixture->headers_to_encode[index], headers_3, AWS_ARRAY_SIZE(headers_3))); static const uint8_t encoded_3[] = { 0x88, 0xc1, 0x61, 0x1d, 0x4d, 0x6f, 0x6e, 0x2c, 0x20, 0x32, 0x31, 0x20, 0x4f, 0x63, 0x74, 0x20, 0x32, 0x30, 0x31, 0x33, 0x20, 0x32, 0x30, 0x3a, 0x31, 0x33, 0x3a, 0x32, 0x32, 0x20, 0x47, 0x4d, 0x54, 0xc0, 0x5a, 0x04, 0x67, 0x7a, 0x69, 0x70, 0x77, 0x38, 0x66, 0x6f, 0x6f, 0x3d, 0x41, 0x53, 0x44, 0x4a, 0x4b, 0x48, 0x51, 0x4b, 0x42, 0x5a, 0x58, 0x4f, 0x51, 0x57, 0x45, 0x4f, 0x50, 0x49, 0x55, 0x41, 0x58, 0x51, 0x57, 0x45, 0x4f, 0x49, 0x55, 0x3b, 0x20, 0x6d, 0x61, 0x78, 0x2d, 0x61, 0x67, 0x65, 0x3d, 0x33, 0x36, 0x30, 0x30, 0x3b, 0x20, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x3d, 0x31, }; aws_byte_buf_write(&fixture->expected_encoding_buf[index], encoded_3, sizeof(encoded_3)); struct aws_http_header last_entry_3 = DEFINE_STATIC_HEADER("date", "Mon, 21 Oct 2013 20:13:22 GMT", USE_CACHE); fixture->last_entry_dynamic_table[index] = last_entry_3; fixture->dynamic_table_len[index] = 3; return AWS_OP_SUCCESS; } HEADER_REQUEST_RESPONSE_TEST(h2_header_ex_5, s_test_ex_5_init, NULL); /* RFC-7541 - Response Examples with Huffman Coding - C.6 */ static int s_test_ex_6_init(struct header_request_response_test_fixture *fixture) { /* set the max table size to 256 */ ASSERT_SUCCESS(aws_hpack_resize_dynamic_table(&fixture->encoder.context, 256)); ASSERT_SUCCESS(aws_hpack_resize_dynamic_table(&fixture->decoder.context, 256)); aws_hpack_encoder_set_huffman_mode(&fixture->encoder, AWS_HPACK_HUFFMAN_ALWAYS); int index = 0; /* First Response RFC-7541 C.6.1 */ struct aws_http_header headers_1[] = { DEFINE_STATIC_HEADER(":status", "302", USE_CACHE), DEFINE_STATIC_HEADER("cache-control", "private", USE_CACHE), DEFINE_STATIC_HEADER("date", "Mon, 21 Oct 2013 20:13:21 GMT", USE_CACHE), DEFINE_STATIC_HEADER("location", "https://www.example.com", USE_CACHE), }; ASSERT_SUCCESS(aws_http_headers_add_array(fixture->headers_to_encode[index], headers_1, AWS_ARRAY_SIZE(headers_1))); static const uint8_t encoded_1[] = { 0x48, 0x82, 0x64, 0x02, 0x58, 0x85, 0xae, 0xc3, 0x77, 0x1a, 0x4b, 0x61, 0x96, 0xd0, 0x7a, 0xbe, 0x94, 0x10, 0x54, 0xd4, 0x44, 0xa8, 0x20, 0x05, 0x95, 0x04, 0x0b, 0x81, 0x66, 0xe0, 0x82, 0xa6, 0x2d, 0x1b, 0xff, 0x6e, 0x91, 0x9d, 0x29, 0xad, 0x17, 0x18, 0x63, 0xc7, 0x8f, 0x0b, 0x97, 0xc8, 0xe9, 0xae, 0x82, 0xae, 0x43, 0xd3, }; aws_byte_buf_write(&fixture->expected_encoding_buf[index], encoded_1, sizeof(encoded_1)); struct aws_http_header last_entry_1 = DEFINE_STATIC_HEADER(":status", "302", USE_CACHE); fixture->last_entry_dynamic_table[index] = last_entry_1; fixture->dynamic_table_len[index] = 4; index++; /* Second Response RFC-7541 C.6.2 */ struct aws_http_header headers_2[] = { DEFINE_STATIC_HEADER(":status", "307", USE_CACHE), DEFINE_STATIC_HEADER("cache-control", "private", USE_CACHE), DEFINE_STATIC_HEADER("date", "Mon, 21 Oct 2013 20:13:21 GMT", USE_CACHE), DEFINE_STATIC_HEADER("location", "https://www.example.com", USE_CACHE), }; ASSERT_SUCCESS(aws_http_headers_add_array(fixture->headers_to_encode[index], headers_2, AWS_ARRAY_SIZE(headers_2))); static const uint8_t encoded_2[] = {0x48, 0x83, 0x64, 0x0e, 0xff, 0xc1, 0xc0, 0xbf}; aws_byte_buf_write(&fixture->expected_encoding_buf[index], encoded_2, sizeof(encoded_2)); struct aws_http_header last_entry_2 = DEFINE_STATIC_HEADER("cache-control", "private", USE_CACHE); fixture->last_entry_dynamic_table[index] = last_entry_2; fixture->dynamic_table_len[index] = 4; index++; /* Third Response RFC-7541 C.6.3 */ struct aws_http_header headers_3[] = { DEFINE_STATIC_HEADER(":status", "200", USE_CACHE), DEFINE_STATIC_HEADER("cache-control", "private", USE_CACHE), DEFINE_STATIC_HEADER("date", "Mon, 21 Oct 2013 20:13:22 GMT", USE_CACHE), DEFINE_STATIC_HEADER("location", "https://www.example.com", USE_CACHE), DEFINE_STATIC_HEADER("content-encoding", "gzip", USE_CACHE), DEFINE_STATIC_HEADER("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1", USE_CACHE), }; ASSERT_SUCCESS(aws_http_headers_add_array(fixture->headers_to_encode[index], headers_3, AWS_ARRAY_SIZE(headers_3))); static const uint8_t encoded_3[] = { 0x88, 0xc1, 0x61, 0x96, 0xd0, 0x7a, 0xbe, 0x94, 0x10, 0x54, 0xd4, 0x44, 0xa8, 0x20, 0x05, 0x95, 0x04, 0x0b, 0x81, 0x66, 0xe0, 0x84, 0xa6, 0x2d, 0x1b, 0xff, 0xc0, 0x5a, 0x83, 0x9b, 0xd9, 0xab, 0x77, 0xad, 0x94, 0xe7, 0x82, 0x1d, 0xd7, 0xf2, 0xe6, 0xc7, 0xb3, 0x35, 0xdf, 0xdf, 0xcd, 0x5b, 0x39, 0x60, 0xd5, 0xaf, 0x27, 0x08, 0x7f, 0x36, 0x72, 0xc1, 0xab, 0x27, 0x0f, 0xb5, 0x29, 0x1f, 0x95, 0x87, 0x31, 0x60, 0x65, 0xc0, 0x03, 0xed, 0x4e, 0xe5, 0xb1, 0x06, 0x3d, 0x50, 0x07}; aws_byte_buf_write(&fixture->expected_encoding_buf[index], encoded_3, sizeof(encoded_3)); struct aws_http_header last_entry_3 = DEFINE_STATIC_HEADER("date", "Mon, 21 Oct 2013 20:13:22 GMT", USE_CACHE); fixture->last_entry_dynamic_table[index] = last_entry_3; fixture->dynamic_table_len[index] = 3; return AWS_OP_SUCCESS; } HEADER_REQUEST_RESPONSE_TEST(h2_header_ex_6, s_test_ex_6_init, NULL); aws-crt-python-0.20.4+dfsg/crt/aws-c-http/tests/test_hpack.c000066400000000000000000001022051456575232400236420ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include /* #TODO test that buffer is resized if space is insufficient */ AWS_TEST_CASE(hpack_encode_integer, test_hpack_encode_integer) static int test_hpack_encode_integer(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; /* Test encoding integers Test cases taken from https://httpwg.org/specs/rfc7541.html#integer.representation.examples */ uint8_t zeros[4]; AWS_ZERO_ARRAY(zeros); struct aws_byte_buf output; ASSERT_SUCCESS(aws_byte_buf_init(&output, allocator, 4)); /* Test 10 in 5 bits */ aws_byte_buf_secure_zero(&output); ASSERT_SUCCESS(aws_hpack_encode_integer(10, 0, 5, &output)); /** * Expected: * 0 1 2 3 4 5 6 7 * +---+---+---+---+---+---+---+---+ * | X | X | X | 0 | 1 | 0 | 1 | 0 | 10 * +---+---+---+---+---+---+---+---+ */ ASSERT_UINT_EQUALS(1, output.len); ASSERT_UINT_EQUALS(10, output.buffer[0]); ASSERT_BIN_ARRAYS_EQUALS(zeros, 3, &output.buffer[1], 3); /* Test full first byte (6 bits) */ aws_byte_buf_secure_zero(&output); ASSERT_SUCCESS(aws_hpack_encode_integer(63, 0, 6, &output)); /** * Expected: * 0 1 2 3 4 5 6 7 * +---+---+---+---+---+---+---+---+ * | X | X | 1 | 1 | 1 | 1 | 1 | 1 | 63 * +---+---+---+---+---+---+---+---+ * | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 * +---+---+---+---+---+---+---+---+ */ ASSERT_UINT_EQUALS(2, output.len); ASSERT_UINT_EQUALS(63, output.buffer[0]); ASSERT_UINT_EQUALS(0, output.buffer[1]); ASSERT_BIN_ARRAYS_EQUALS(zeros, 2, &output.buffer[2], 2); /* Test 42 in 8 bits */ aws_byte_buf_secure_zero(&output); ASSERT_SUCCESS(aws_hpack_encode_integer(42, 0, 8, &output)); /** * Expected: * 0 1 2 3 4 5 6 7 * +---+---+---+---+---+---+---+---+ * | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 42 * +---+---+---+---+---+---+---+---+ */ ASSERT_UINT_EQUALS(1, output.len); ASSERT_UINT_EQUALS(42, output.buffer[0]); ASSERT_BIN_ARRAYS_EQUALS(zeros, 3, &output.buffer[1], 3); /* Test 1337 with 5bit prefix */ aws_byte_buf_secure_zero(&output); ASSERT_SUCCESS(aws_hpack_encode_integer(1337, 0, 5, &output)); /** * Expected: * 0 1 2 3 4 5 6 7 * +---+---+---+---+---+---+---+---+ * | X | X | X | 1 | 1 | 1 | 1 | 1 | 31 * | 1 | 0 | 0 | 1 | 1 | 0 | 1 | 0 | 154 * | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 10 * +---+---+---+---+---+---+---+---+ */ ASSERT_UINT_EQUALS(3, output.len); ASSERT_UINT_EQUALS(UINT8_MAX >> 3, output.buffer[0]); ASSERT_UINT_EQUALS(154, output.buffer[1]); ASSERT_UINT_EQUALS(10, output.buffer[2]); ASSERT_UINT_EQUALS(0, output.buffer[3]); aws_byte_buf_clean_up(&output); return AWS_OP_SUCCESS; } struct decode_fixture { struct aws_hpack_decoder hpack; bool one_byte_at_a_time; }; static int s_decode_fixture_setup(struct aws_allocator *allocator, void *ctx) { struct decode_fixture *fixture = ctx; aws_hpack_decoder_init(&fixture->hpack, allocator, NULL); return AWS_OP_SUCCESS; } static int s_decode_fixture_teardown(struct aws_allocator *allocator, int setup_result, void *ctx) { (void)allocator; if (setup_result) { return AWS_OP_ERR; } struct decode_fixture *fixture = ctx; aws_hpack_decoder_clean_up(&fixture->hpack); return AWS_OP_SUCCESS; } /* Call aws_hpack_decode_integer() either one-byte-at-a-time, or all at once */ static int s_decode_integer( struct decode_fixture *fixture, struct aws_byte_cursor *to_decode, uint8_t prefix_size, uint64_t *integer, bool *complete) { if (fixture->one_byte_at_a_time) { do { struct aws_byte_cursor one_byte = aws_byte_cursor_advance(to_decode, 1); if (aws_hpack_decode_integer(&fixture->hpack, &one_byte, prefix_size, integer, complete)) { return AWS_OP_ERR; } ASSERT_UINT_EQUALS(0, one_byte.len); } while (!*complete && to_decode->len); return AWS_OP_SUCCESS; } else { return aws_hpack_decode_integer(&fixture->hpack, to_decode, prefix_size, integer, complete); } } /* Call aws_hpack_decode_string() either one-byte-at-a-time, or all at once */ static int s_decode_string( struct decode_fixture *fixture, struct aws_byte_cursor *to_decode, struct aws_byte_buf *output, bool *complete) { if (fixture->one_byte_at_a_time) { do { struct aws_byte_cursor one_byte = aws_byte_cursor_advance(to_decode, 1); if (aws_hpack_decode_string(&fixture->hpack, &one_byte, output, complete)) { return AWS_OP_ERR; } ASSERT_UINT_EQUALS(0, one_byte.len); } while (!*complete && to_decode->len); return AWS_OP_SUCCESS; } else { return aws_hpack_decode_string(&fixture->hpack, to_decode, output, complete); } } /* declare 2 tests, where the first decodes the input all at once, * and the other decodes the input one byte at a time. */ #define TEST_DECODE_ONE_BYTE_AT_A_TIME(NAME) \ static struct decode_fixture s_##NAME##_fixture = {.one_byte_at_a_time = false}; \ static struct decode_fixture s_##NAME##_one_byte_at_a_time_fixture = {.one_byte_at_a_time = true}; \ AWS_TEST_CASE_FIXTURE(NAME, s_decode_fixture_setup, s_test_##NAME, s_decode_fixture_teardown, &s_##NAME##_fixture) \ AWS_TEST_CASE_FIXTURE( \ NAME##_one_byte_at_a_time, \ s_decode_fixture_setup, \ s_test_##NAME, \ s_decode_fixture_teardown, \ &s_##NAME##_one_byte_at_a_time_fixture) \ static int s_test_##NAME(struct aws_allocator *allocator, void *ctx) /* RFC-7541 - Integer Representation Examples - C.1.1. Encoding 10 Using a 5-Bit Prefix */ TEST_DECODE_ONE_BYTE_AT_A_TIME(hpack_decode_integer_5bits) { (void)allocator; struct decode_fixture *fixture = ctx; /* Layout: * 0 1 2 3 4 5 6 7 * +---+---+---+---+---+---+---+---+ * | X | X | X | 0 | 1 | 0 | 1 | 0 | 10 * +---+---+---+---+---+---+---+---+ */ uint8_t test_0[] = {10}; struct aws_byte_cursor to_decode = aws_byte_cursor_from_array(test_0, AWS_ARRAY_SIZE(test_0)); uint64_t result; bool complete; ASSERT_SUCCESS(s_decode_integer(fixture, &to_decode, 5, &result, &complete)); ASSERT_TRUE(complete); ASSERT_UINT_EQUALS(0, to_decode.len); ASSERT_UINT_EQUALS(10, result); return AWS_OP_SUCCESS; } /* Encoding 63 across a 6-bit prefix + one byte */ TEST_DECODE_ONE_BYTE_AT_A_TIME(hpack_decode_integer_14bits) { (void)allocator; struct decode_fixture *fixture = ctx; /* Layout: * 0 1 2 3 4 5 6 7 * +---+---+---+---+---+---+---+---+ * | X | X | 1 | 1 | 1 | 1 | 1 | 1 | 63 * +---+---+---+---+---+---+---+---+ * | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 * +---+---+---+---+---+---+---+---+ */ uint8_t test_1[] = {63, 0}; struct aws_byte_cursor to_decode = aws_byte_cursor_from_array(test_1, AWS_ARRAY_SIZE(test_1)); uint64_t result; bool complete; ASSERT_SUCCESS(s_decode_integer(fixture, &to_decode, 6, &result, &complete)); ASSERT_TRUE(complete); ASSERT_UINT_EQUALS(0, to_decode.len); ASSERT_UINT_EQUALS(63, result); return AWS_OP_SUCCESS; } /* RFC-7541 - Integer Representation Examples - C.1.3. Encoding 42 Starting at an Octet Boundary */ TEST_DECODE_ONE_BYTE_AT_A_TIME(hpack_decode_integer_8bits) { (void)allocator; struct decode_fixture *fixture = ctx; /* Layout: * 0 1 2 3 4 5 6 7 * +---+---+---+---+---+---+---+---+ * | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 42 * +---+---+---+---+---+---+---+---+ */ uint8_t test_2[] = {42}; struct aws_byte_cursor to_decode = aws_byte_cursor_from_array(test_2, AWS_ARRAY_SIZE(test_2)); uint64_t result; bool complete; ASSERT_SUCCESS(s_decode_integer(fixture, &to_decode, 8, &result, &complete)); ASSERT_TRUE(complete); ASSERT_UINT_EQUALS(0, to_decode.len); ASSERT_UINT_EQUALS(42, result); return AWS_OP_SUCCESS; } /* RFC-7541 - Integer Representation Examples - C.1.2. Encoding 1337 Using a 5-Bit Prefix */ TEST_DECODE_ONE_BYTE_AT_A_TIME(hpack_decode_integer_21bits) { (void)allocator; struct decode_fixture *fixture = ctx; /* Layout: * 0 1 2 3 4 5 6 7 * +---+---+---+---+---+---+---+---+ * | X | X | X | 1 | 1 | 1 | 1 | 1 | 31 * | 1 | 0 | 0 | 1 | 1 | 0 | 1 | 0 | 154 * | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 10 * +---+---+---+---+---+---+---+---+ */ uint8_t test_3[] = {UINT8_MAX >> 3, 154, 10}; struct aws_byte_cursor to_decode = aws_byte_cursor_from_array(test_3, AWS_ARRAY_SIZE(test_3)); uint64_t result; bool complete; ASSERT_SUCCESS(s_decode_integer(fixture, &to_decode, 5, &result, &complete)); ASSERT_TRUE(complete); ASSERT_UINT_EQUALS(0, to_decode.len); ASSERT_UINT_EQUALS(1337, result); return AWS_OP_SUCCESS; } TEST_DECODE_ONE_BYTE_AT_A_TIME(hpack_decode_integer_ongoing) { (void)allocator; struct decode_fixture *fixture = ctx; /* Test number ending with continue byte * Layout: * 0 1 2 3 4 5 6 7 * +---+---+---+---+---+---+---+---+ * | X | X | X | 1 | 1 | 1 | 1 | 1 | 31 * | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 127 * +---+---+---+---+---+---+---+---+ */ uint8_t test_4[] = {UINT8_MAX >> 3, UINT8_MAX}; struct aws_byte_cursor to_decode = aws_byte_cursor_from_array(test_4, AWS_ARRAY_SIZE(test_4)); uint64_t result; bool complete; ASSERT_SUCCESS(s_decode_integer(fixture, &to_decode, 5, &result, &complete)); ASSERT_FALSE(complete); ASSERT_UINT_EQUALS(0, to_decode.len); return AWS_OP_SUCCESS; } TEST_DECODE_ONE_BYTE_AT_A_TIME(hpack_decode_integer_too_big) { (void)allocator; struct decode_fixture *fixture = ctx; /* Test number too big * Layout: * 0 1 2 3 4 5 6 7 * +---+---+---+---+---+---+---+---+ * | X | X | X | 1 | 1 | 1 | 1 | 1 | 31 * | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 127 * | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 127 * | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 127 * | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 127 * | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 127 * | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 127 * | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 127 * | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 127 * | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 127 * | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 127 * +---+---+---+---+---+---+---+---+ */ uint8_t test_5[] = { UINT8_MAX >> 3, UINT8_MAX, UINT8_MAX, UINT8_MAX, UINT8_MAX, UINT8_MAX, UINT8_MAX, UINT8_MAX, UINT8_MAX, UINT8_MAX, UINT8_MAX, }; struct aws_byte_cursor to_decode = aws_byte_cursor_from_array(test_5, AWS_ARRAY_SIZE(test_5)); uint64_t result; bool complete; ASSERT_FAILS(s_decode_integer(fixture, &to_decode, 5, &result, &complete)); ASSERT_UINT_EQUALS(AWS_ERROR_OVERFLOW_DETECTED, aws_last_error()); return AWS_OP_SUCCESS; } /* Test that decoder properly resets itself between integers. * Trying every type of transition: * - from 1 byte to 1 byte * - from 1 byte to multibyte * - from multibyte to multibyte * - from multibyte to 1 byte */ TEST_DECODE_ONE_BYTE_AT_A_TIME(hpack_decode_integer_few_in_a_row) { (void)allocator; struct decode_fixture *fixture = ctx; uint8_t input[] = { /* 10 with 5-bit prefix * +---+---+---+---+---+---+---+---+ * | X | X | X | 0 | 1 | 0 | 1 | 0 | * +---+---+---+---+---+---+---+---+ */ 10, /* 42 with 8-bit prefix * +---+---+---+---+---+---+---+---+ * | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | * +---+---+---+---+---+---+---+---+ */ 42, /* 63 with 6-bit prefix * +---+---+---+---+---+---+---+---+ * | X | X | 1 | 1 | 1 | 1 | 1 | 1 | * +---+---+---+---+---+---+---+---+ * | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | * +---+---+---+---+---+---+---+---+ */ 63, 0, /* 1337 with 5-bit prefix * +---+---+---+---+---+---+---+---+ * | X | X | X | 1 | 1 | 1 | 1 | 1 | * | 1 | 0 | 0 | 1 | 1 | 0 | 1 | 0 | * | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | * +---+---+---+---+---+---+---+---+ */ UINT8_MAX >> 3, 154, 10, /* 10 with 5-bit prefix * +---+---+---+---+---+---+---+---+ * | X | X | X | 0 | 1 | 0 | 1 | 0 | * +---+---+---+---+---+---+---+---+ */ 10, }; struct aws_byte_cursor to_decode = aws_byte_cursor_from_array(input, AWS_ARRAY_SIZE(input)); uint64_t result; bool complete; ASSERT_SUCCESS(s_decode_integer(fixture, &to_decode, 5, &result, &complete)); ASSERT_TRUE(complete); ASSERT_UINT_EQUALS(10, result); ASSERT_SUCCESS(s_decode_integer(fixture, &to_decode, 8, &result, &complete)); ASSERT_TRUE(complete); ASSERT_UINT_EQUALS(42, result); ASSERT_SUCCESS(s_decode_integer(fixture, &to_decode, 6, &result, &complete)); ASSERT_TRUE(complete); ASSERT_UINT_EQUALS(63, result); ASSERT_SUCCESS(s_decode_integer(fixture, &to_decode, 5, &result, &complete)); ASSERT_TRUE(complete); ASSERT_UINT_EQUALS(1337, result); ASSERT_SUCCESS(s_decode_integer(fixture, &to_decode, 5, &result, &complete)); ASSERT_TRUE(complete); ASSERT_UINT_EQUALS(10, result); ASSERT_UINT_EQUALS(0, to_decode.len); return AWS_OP_SUCCESS; } TEST_DECODE_ONE_BYTE_AT_A_TIME(hpack_decode_string_blank) { struct decode_fixture *fixture = ctx; uint8_t input[] = {0}; struct aws_byte_cursor to_decode = aws_byte_cursor_from_array(input, AWS_ARRAY_SIZE(input)); struct aws_byte_buf output; ASSERT_SUCCESS(aws_byte_buf_init(&output, allocator, 4)); bool complete; ASSERT_SUCCESS(s_decode_string(fixture, &to_decode, &output, &complete)); ASSERT_TRUE(complete); ASSERT_UINT_EQUALS(0, to_decode.len); ASSERT_BIN_ARRAYS_EQUALS("", 0, output.buffer, output.len); aws_byte_buf_clean_up(&output); return AWS_OP_SUCCESS; } /* Test a string that is NOT Huffman encoded */ TEST_DECODE_ONE_BYTE_AT_A_TIME(hpack_decode_string_uncompressed) { struct decode_fixture *fixture = ctx; uint8_t input[] = {5, 'h', 'e', 'l', 'l', 'o'}; struct aws_byte_cursor to_decode = aws_byte_cursor_from_array(input, AWS_ARRAY_SIZE(input)); struct aws_byte_buf output; ASSERT_SUCCESS(aws_byte_buf_init(&output, allocator, 5)); bool complete; ASSERT_SUCCESS(s_decode_string(fixture, &to_decode, &output, &complete)); ASSERT_TRUE(complete); ASSERT_UINT_EQUALS(0, to_decode.len); ASSERT_BIN_ARRAYS_EQUALS("hello", 5, output.buffer, output.len); aws_byte_buf_clean_up(&output); return AWS_OP_SUCCESS; } TEST_DECODE_ONE_BYTE_AT_A_TIME(hpack_decode_string_huffman) { struct decode_fixture *fixture = ctx; /* This is Huffman-encoded "www.example.com", copied from: * RFC-7541 - Request Examples with Huffman Coding - C.4.1. First Request */ uint8_t input[] = {0x8c, 0xf1, 0xe3, 0xc2, 0xe5, 0xf2, 0x3a, 0x6b, 0xa0, 0xab, 0x90, 0xf4, 0xff}; struct aws_byte_cursor to_decode = aws_byte_cursor_from_array(input, AWS_ARRAY_SIZE(input)); const char *expected = "www.example.com"; struct aws_byte_buf output; ASSERT_SUCCESS(aws_byte_buf_init(&output, allocator, strlen(expected))); bool complete; ASSERT_SUCCESS(s_decode_string(fixture, &to_decode, &output, &complete)); ASSERT_TRUE(complete); ASSERT_UINT_EQUALS(0, to_decode.len); ASSERT_BIN_ARRAYS_EQUALS(expected, strlen(expected), output.buffer, output.len); aws_byte_buf_clean_up(&output); return AWS_OP_SUCCESS; } /* Test that partial input doesn't register as "complete" */ TEST_DECODE_ONE_BYTE_AT_A_TIME(hpack_decode_string_ongoing) { struct decode_fixture *fixture = ctx; uint8_t input[] = {5, 'h', 'e', 'l'}; struct aws_byte_cursor to_decode = aws_byte_cursor_from_array(input, AWS_ARRAY_SIZE(input)); struct aws_byte_buf output; ASSERT_SUCCESS(aws_byte_buf_init(&output, allocator, 5)); bool complete; ASSERT_SUCCESS(s_decode_string(fixture, &to_decode, &output, &complete)); ASSERT_FALSE(complete); ASSERT_UINT_EQUALS(0, to_decode.len); aws_byte_buf_clean_up(&output); return AWS_OP_SUCCESS; } /* Test that output buffer is gets resized if it's too small */ TEST_DECODE_ONE_BYTE_AT_A_TIME(hpack_decode_string_short_buffer) { struct decode_fixture *fixture = ctx; uint8_t input[] = {5, 'h', 'e', 'l', 'l', 'o'}; struct aws_byte_cursor to_decode = aws_byte_cursor_from_array(input, AWS_ARRAY_SIZE(input)); struct aws_byte_buf output; ASSERT_SUCCESS(aws_byte_buf_init(&output, allocator, 1)); /* Note buffer is initially too small */ bool complete; ASSERT_SUCCESS(s_decode_string(fixture, &to_decode, &output, &complete)); ASSERT_TRUE(complete); ASSERT_UINT_EQUALS(0, to_decode.len); ASSERT_BIN_ARRAYS_EQUALS("hello", 5, output.buffer, output.len); aws_byte_buf_clean_up(&output); return AWS_OP_SUCCESS; } #define DEFINE_STATIC_HEADER(_name, _header, _value) \ static const struct aws_http_header _name = { \ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(_header), \ .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(_value), \ } AWS_TEST_CASE(hpack_static_table_find, test_hpack_static_table_find) static int test_hpack_static_table_find(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_http_library_init(allocator); struct aws_hpack_context context; aws_hpack_context_init(&context, allocator, AWS_LS_HTTP_GENERAL, NULL); ASSERT_SUCCESS(aws_hpack_resize_dynamic_table(&context, 0)); bool found_value = false; DEFINE_STATIC_HEADER(s_authority, ":authority", "amazon.com"); DEFINE_STATIC_HEADER(s_get, ":method", "GET"); DEFINE_STATIC_HEADER(s_other_method, ":method", "TEAPOT"); DEFINE_STATIC_HEADER(s_garbage, "colden's favorite ice cream flavor", "cookie dough"); /* Test header without value */ ASSERT_UINT_EQUALS(1, aws_hpack_find_index(&context, &s_authority, false, &found_value)); ASSERT_FALSE(found_value); /* Test header with value */ ASSERT_UINT_EQUALS(2, aws_hpack_find_index(&context, &s_get, true, &found_value)); ASSERT_TRUE(found_value); ASSERT_UINT_EQUALS(2, aws_hpack_find_index(&context, &s_other_method, true, &found_value)); ASSERT_FALSE(found_value); /* Check invalid header */ ASSERT_UINT_EQUALS(0, aws_hpack_find_index(&context, &s_garbage, true, &found_value)); aws_hpack_context_clean_up(&context); aws_http_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(hpack_static_table_get, test_hpack_static_table_get) static int test_hpack_static_table_get(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_http_library_init(allocator); struct aws_hpack_context context; aws_hpack_context_init(&context, allocator, AWS_LS_HTTP_GENERAL, NULL); ASSERT_SUCCESS(aws_hpack_resize_dynamic_table(&context, 0)); const struct aws_http_header *found = NULL; DEFINE_STATIC_HEADER(s_get, ":path", "/index.html"); DEFINE_STATIC_HEADER(s_age, "age", "25"); found = aws_hpack_get_header(&context, 21); ASSERT_NOT_NULL(found); ASSERT_TRUE(aws_byte_cursor_eq(&s_age.name, &found->name)); ASSERT_NULL(found->value.ptr); ASSERT_UINT_EQUALS(0, found->value.len); found = aws_hpack_get_header(&context, 5); ASSERT_NOT_NULL(found); ASSERT_TRUE(aws_byte_cursor_eq(&s_get.name, &found->name)); ASSERT_TRUE(aws_byte_cursor_eq(&s_get.value, &found->value)); found = aws_hpack_get_header(&context, 69); ASSERT_NULL(found); aws_hpack_context_clean_up(&context); aws_http_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(hpack_dynamic_table_find, test_hpack_dynamic_table_find) static int test_hpack_dynamic_table_find(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_http_library_init(allocator); struct aws_hpack_context context; aws_hpack_context_init(&context, allocator, AWS_LS_HTTP_GENERAL, NULL); bool found_value = false; DEFINE_STATIC_HEADER(s_herp, "herp", "derp"); DEFINE_STATIC_HEADER(s_herp2, "herp", "something else"); DEFINE_STATIC_HEADER(s_fizz, "fizz", "buzz"); /* Test single header */ ASSERT_SUCCESS(aws_hpack_insert_header(&context, &s_herp)); ASSERT_UINT_EQUALS(62, aws_hpack_find_index(&context, &s_herp, true, &found_value)); ASSERT_TRUE(found_value); ASSERT_UINT_EQUALS(62, aws_hpack_find_index(&context, &s_herp2, true, &found_value)); ASSERT_FALSE(found_value); /* Test 2 headers */ ASSERT_SUCCESS(aws_hpack_insert_header(&context, &s_fizz)); ASSERT_UINT_EQUALS(62, aws_hpack_find_index(&context, &s_fizz, true, &found_value)); ASSERT_TRUE(found_value); ASSERT_UINT_EQUALS(63, aws_hpack_find_index(&context, &s_herp, true, &found_value)); ASSERT_TRUE(found_value); ASSERT_UINT_EQUALS(63, aws_hpack_find_index(&context, &s_herp2, true, &found_value)); ASSERT_FALSE(found_value); /* Test resizing up doesn't break anything */ ASSERT_SUCCESS(aws_hpack_resize_dynamic_table(&context, 8 * 1024 * 1024)); /* Check invalid header */ DEFINE_STATIC_HEADER(s_garbage, "colden's mother's maiden name", "nice try mr hacker"); ASSERT_UINT_EQUALS(0, aws_hpack_find_index(&context, &s_garbage, true, &found_value)); /* Test resizing so only the first element stays */ ASSERT_SUCCESS(aws_hpack_resize_dynamic_table(&context, aws_hpack_get_header_size(&s_fizz))); ASSERT_UINT_EQUALS(62, aws_hpack_find_index(&context, &s_fizz, true, &found_value)); ASSERT_TRUE(found_value); ASSERT_UINT_EQUALS(0, aws_hpack_find_index(&context, &s_herp, true, &found_value)); ASSERT_FALSE(found_value); aws_hpack_context_clean_up(&context); aws_http_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(hpack_dynamic_table_get, test_hpack_dynamic_table_get) static int test_hpack_dynamic_table_get(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_http_library_init(allocator); struct aws_hpack_context context; aws_hpack_context_init(&context, allocator, AWS_LS_HTTP_GENERAL, NULL); const struct aws_http_header *found = NULL; DEFINE_STATIC_HEADER(s_herp, "herp", "derp"); DEFINE_STATIC_HEADER(s_fizz, "fizz", "buzz"); DEFINE_STATIC_HEADER(s_status, ":status", "418"); /* Make the dynamic table only big enough for 2 headers */ ASSERT_SUCCESS(aws_hpack_resize_dynamic_table( &context, aws_hpack_get_header_size(&s_fizz) + aws_hpack_get_header_size(&s_status))); ASSERT_SUCCESS(aws_hpack_insert_header(&context, &s_herp)); found = aws_hpack_get_header(&context, 62); ASSERT_NOT_NULL(found); ASSERT_TRUE(aws_byte_cursor_eq(&s_herp.name, &found->name)); ASSERT_TRUE(aws_byte_cursor_eq(&s_herp.value, &found->value)); ASSERT_SUCCESS(aws_hpack_insert_header(&context, &s_fizz)); found = aws_hpack_get_header(&context, 62); ASSERT_NOT_NULL(found); ASSERT_TRUE(aws_byte_cursor_eq(&s_fizz.name, &found->name)); ASSERT_TRUE(aws_byte_cursor_eq(&s_fizz.value, &found->value)); found = aws_hpack_get_header(&context, 63); ASSERT_NOT_NULL(found); ASSERT_TRUE(aws_byte_cursor_eq(&s_herp.name, &found->name)); ASSERT_TRUE(aws_byte_cursor_eq(&s_herp.value, &found->value)); /* This one will result in the first header being evicted */ ASSERT_SUCCESS(aws_hpack_insert_header(&context, &s_status)); found = aws_hpack_get_header(&context, 62); ASSERT_NOT_NULL(found); ASSERT_TRUE(aws_byte_cursor_eq(&s_status.name, &found->name)); ASSERT_TRUE(aws_byte_cursor_eq(&s_status.value, &found->value)); found = aws_hpack_get_header(&context, 63); ASSERT_NOT_NULL(found); ASSERT_TRUE(aws_byte_cursor_eq(&s_fizz.name, &found->name)); ASSERT_TRUE(aws_byte_cursor_eq(&s_fizz.value, &found->value)); found = aws_hpack_get_header(&context, 64); ASSERT_NULL(found); /* Test resizing to evict entries */ ASSERT_SUCCESS(aws_hpack_resize_dynamic_table(&context, aws_hpack_get_header_size(&s_status))); found = aws_hpack_get_header(&context, 62); ASSERT_NOT_NULL(found); ASSERT_TRUE(aws_byte_cursor_eq(&s_status.name, &found->name)); ASSERT_TRUE(aws_byte_cursor_eq(&s_status.value, &found->value)); found = aws_hpack_get_header(&context, 63); ASSERT_NULL(found); aws_hpack_context_clean_up(&context); aws_http_library_clean_up(); return AWS_OP_SUCCESS; } static int s_check_header( const struct aws_http_header *header_field, const char *name, const char *value, enum aws_http_header_compression compression) { ASSERT_BIN_ARRAYS_EQUALS(name, strlen(name), header_field->name.ptr, header_field->name.len); ASSERT_BIN_ARRAYS_EQUALS(value, strlen(value), header_field->value.ptr, header_field->value.len); ASSERT_INT_EQUALS(compression, header_field->compression); return AWS_OP_SUCCESS; } AWS_TEST_CASE(hpack_decode_indexed_from_dynamic_table, test_hpack_decode_indexed_from_dynamic_table) static int test_hpack_decode_indexed_from_dynamic_table(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_http_library_init(allocator); struct aws_hpack_decoder decoder; aws_hpack_decoder_init(&decoder, allocator, NULL); /* clang-format off */ uint8_t input[] = { 0x48, 0x03, '3', '0', '2', /* ":status: 302" - stored to dynamic table */ 0x40, 0x01, 'a', 0x01, 'b', /* "a: b" - stored to dynamic table */ /* So at this point dynamic table should look like: * INDEX NAME VALUE * 62 a b * 63 :status 302 */ 0xbf, /* ":status: 302" - indexed from dynamic table */ }; /* clang-format on */ struct aws_hpack_decode_result result; struct aws_byte_cursor input_cursor = aws_byte_cursor_from_array(input, sizeof(input)); /* Three entries in total, decode them all, and check the result */ /* First entry */ ASSERT_SUCCESS(aws_hpack_decode(&decoder, &input_cursor, &result)); ASSERT_TRUE(result.type == AWS_HPACK_DECODE_T_HEADER_FIELD); ASSERT_SUCCESS(s_check_header(&result.data.header_field, ":status", "302", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); /* Second entry */ ASSERT_SUCCESS(aws_hpack_decode(&decoder, &input_cursor, &result)); ASSERT_TRUE(result.type == AWS_HPACK_DECODE_T_HEADER_FIELD); ASSERT_SUCCESS(s_check_header(&result.data.header_field, "a", "b", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); /* Third entry */ ASSERT_SUCCESS(aws_hpack_decode(&decoder, &input_cursor, &result)); ASSERT_TRUE(result.type == AWS_HPACK_DECODE_T_HEADER_FIELD); ASSERT_SUCCESS(s_check_header(&result.data.header_field, ":status", "302", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); /* Check the input is fully consumed */ ASSERT_TRUE(input_cursor.len == 0); /* Clean up */ aws_hpack_decoder_clean_up(&decoder); aws_http_library_clean_up(); return AWS_OP_SUCCESS; } /* Test header with empty value */ AWS_TEST_CASE(hpack_dynamic_table_empty_value, test_hpack_dynamic_table_empty_value) static int test_hpack_dynamic_table_empty_value(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_http_library_init(allocator); struct aws_hpack_context context; aws_hpack_context_init(&context, allocator, AWS_LS_HTTP_GENERAL, NULL); DEFINE_STATIC_HEADER(header1, ":status", "302"); DEFINE_STATIC_HEADER(empty_value_header, "c", ""); DEFINE_STATIC_HEADER(header2, "a", "b"); ASSERT_SUCCESS(aws_hpack_insert_header(&context, &header1)); ASSERT_SUCCESS(aws_hpack_insert_header(&context, &empty_value_header)); ASSERT_SUCCESS(aws_hpack_insert_header(&context, &header2)); /* So at this point dynamic table should look like: * INDEX NAME VALUE * 62 a b * 63 "c" "" * 64 :status 302 */ bool found_value = false; ASSERT_UINT_EQUALS(64, aws_hpack_find_index(&context, &header1, true, &found_value)); ASSERT_UINT_EQUALS(63, aws_hpack_find_index(&context, &empty_value_header, true, &found_value)); ASSERT_UINT_EQUALS(62, aws_hpack_find_index(&context, &header2, true, &found_value)); /* Clean up */ aws_hpack_context_clean_up(&context); aws_http_library_clean_up(); return AWS_OP_SUCCESS; } /* Test header with empty name and value */ AWS_TEST_CASE(hpack_dynamic_table_with_empty_header, test_hpack_dynamic_table_with_empty_header) static int test_hpack_dynamic_table_with_empty_header(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_http_library_init(allocator); struct aws_hpack_context context; aws_hpack_context_init(&context, allocator, AWS_LS_HTTP_GENERAL, NULL); DEFINE_STATIC_HEADER(header1, ":status", "302"); DEFINE_STATIC_HEADER(empty_header, "", ""); DEFINE_STATIC_HEADER(header2, "a", "b"); ASSERT_SUCCESS(aws_hpack_insert_header(&context, &header1)); ASSERT_SUCCESS(aws_hpack_insert_header(&context, &empty_header)); ASSERT_SUCCESS(aws_hpack_insert_header(&context, &header2)); /* So at this point dynamic table should look like: * INDEX NAME VALUE * 62 a b * 63 "" "" * 64 :status 302 */ bool found_value = false; ASSERT_UINT_EQUALS(64, aws_hpack_find_index(&context, &header1, true, &found_value)); ASSERT_UINT_EQUALS(63, aws_hpack_find_index(&context, &empty_header, true, &found_value)); ASSERT_UINT_EQUALS(62, aws_hpack_find_index(&context, &header2, true, &found_value)); /* Clean up */ aws_hpack_context_clean_up(&context); aws_http_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(hpack_dynamic_table_size_update_from_setting, test_hpack_dynamic_table_size_update_from_setting) static int test_hpack_dynamic_table_size_update_from_setting(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_http_library_init(allocator); struct aws_hpack_encoder encoder; aws_hpack_encoder_init(&encoder, allocator, NULL); /* let's pretend multiple times max size update happened from encoder setting */ aws_hpack_encoder_update_max_table_size(&encoder, 10); aws_hpack_encoder_update_max_table_size(&encoder, 0); aws_hpack_encoder_update_max_table_size(&encoder, 1337); /* encode a header block */ struct aws_http_headers *headers = aws_http_headers_new(allocator); /* the 2 entry of static table */ DEFINE_STATIC_HEADER(header, ":method", "GET"); ASSERT_SUCCESS(aws_http_headers_add_header(headers, &header)); struct aws_byte_buf output; ASSERT_SUCCESS(aws_byte_buf_init(&output, allocator, 5)); ASSERT_SUCCESS(aws_hpack_encode_header_block(&encoder, headers, &output)); /* Check the output result, it should contain two dynamic table size updates, besides the header */ /** * Expected first table size update (0 0 1) for dynamic table size update, rest is the integer with 5-bit Prefix: * size is 0 * 0 1 2 3 4 5 6 7 * +---+---+---+---+---+---+---+---+ * | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 32 * +---+---+---+---+---+---+---+---+ * * Expected second table size update: * size is 1337 * 0 1 2 3 4 5 6 7 * +---+---+---+---+---+---+---+---+ * | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 63 * | 1 | 0 | 0 | 1 | 1 | 0 | 1 | 0 | 154 * | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 10 * +---+---+---+---+---+---+---+---+ * * Expected header block: (1) for indexed header field, rest is the index, which is 2 * 0 1 2 3 4 5 6 7 * +---+---+---+---+---+---+---+---+ * | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 130 * +---+---+---+---+---+---+---+---+ */ ASSERT_UINT_EQUALS(5, output.len); ASSERT_UINT_EQUALS(32, output.buffer[0]); ASSERT_UINT_EQUALS(63, output.buffer[1]); ASSERT_UINT_EQUALS(154, output.buffer[2]); ASSERT_UINT_EQUALS(10, output.buffer[3]); ASSERT_UINT_EQUALS(130, output.buffer[4]); /* clean up */ aws_byte_buf_clean_up(&output); aws_http_headers_release(headers); aws_hpack_encoder_clean_up(&encoder); aws_http_library_clean_up(); return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-http/tests/test_localhost_integ.c000066400000000000000000000524601456575232400257410ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "h2_test_helper.h" static int s_tester_on_headers( struct aws_http_stream *stream, enum aws_http_header_block header_block, const struct aws_http_header *header_array, size_t num_headers, void *user_data) { (void)stream; (void)header_block; struct aws_http_headers *received_headers = (struct aws_http_headers *)user_data; for (size_t i = 0; i < num_headers; ++i) { ASSERT_SUCCESS(aws_http_headers_add_header(received_headers, &header_array[i])); } return AWS_OP_SUCCESS; } static bool s_check_headers_received( const struct aws_http_headers *received_headers, const struct aws_http_headers *headers_to_check) { for (size_t i = 0; i < aws_http_headers_count(headers_to_check); i++) { struct aws_http_header header; if (aws_http_headers_get_index(headers_to_check, i, &header)) { return false; } struct aws_http_header received_header; if (aws_http_headers_get_index(received_headers, i + 1, &received_header)) { /* Not found */ return false; } if (!aws_byte_cursor_eq(&received_header.value, &header.value) || !aws_byte_cursor_eq(&received_header.name, &header.name)) { return false; } } return true; } struct tester { struct aws_allocator *alloc; struct aws_event_loop_group *event_loop_group; struct aws_host_resolver *host_resolver; struct aws_client_bootstrap *client_bootstrap; struct aws_tls_ctx_options tls_ctx_options; struct aws_tls_ctx *tls_ctx; struct aws_tls_connection_options tls_connection_options; struct aws_http_connection *connection; struct aws_mutex wait_lock; struct aws_condition_variable wait_cvar; bool shutdown_finished; size_t wait_for_stream_completed_count; size_t stream_completed_count; size_t stream_complete_errors; size_t stream_200_count; size_t stream_4xx_count; size_t stream_status_not_200_count; uint64_t num_sen_received; int stream_completed_error_code; bool stream_completed_with_200; size_t download_body_len; size_t content_len; int wait_result; }; static struct tester s_tester; #define DEFINE_HEADER(NAME, VALUE) \ { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(NAME), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(VALUE), } enum { TESTER_TIMEOUT_SEC = 60, /* Give enough time for non-sudo users to enter password */ }; static void s_on_connection_setup(struct aws_http_connection *connection, int error_code, void *user_data) { struct tester *tester = user_data; AWS_FATAL_ASSERT(aws_mutex_lock(&tester->wait_lock) == AWS_OP_SUCCESS); if (error_code) { tester->wait_result = error_code; goto done; } tester->connection = connection; done: AWS_FATAL_ASSERT(aws_mutex_unlock(&tester->wait_lock) == AWS_OP_SUCCESS); aws_condition_variable_notify_one(&tester->wait_cvar); } static void s_on_connection_shutdown(struct aws_http_connection *connection, int error_code, void *user_data) { (void)connection; (void)error_code; struct tester *tester = user_data; AWS_FATAL_ASSERT(aws_mutex_lock(&tester->wait_lock) == AWS_OP_SUCCESS); tester->shutdown_finished = true; AWS_FATAL_ASSERT(aws_mutex_unlock(&tester->wait_lock) == AWS_OP_SUCCESS); aws_condition_variable_notify_one(&tester->wait_cvar); } static bool s_is_connected(void *context) { struct tester *tester = context; return tester->connection != NULL; } static int s_wait_on_connection_connected(struct tester *tester) { ASSERT_SUCCESS(aws_mutex_lock(&tester->wait_lock)); int signal_error = aws_condition_variable_wait_pred(&tester->wait_cvar, &tester->wait_lock, s_is_connected, tester); ASSERT_SUCCESS(aws_mutex_unlock(&tester->wait_lock)); return signal_error; } static bool s_is_shutdown(void *context) { struct tester *tester = context; return tester->shutdown_finished; } static int s_wait_on_connection_shutdown(struct tester *tester) { ASSERT_SUCCESS(aws_mutex_lock(&tester->wait_lock)); int signal_error = aws_condition_variable_wait_pred(&tester->wait_cvar, &tester->wait_lock, s_is_shutdown, tester); ASSERT_SUCCESS(aws_mutex_unlock(&tester->wait_lock)); return signal_error; } static bool s_is_stream_completed_count_at_least(void *context) { (void)context; return s_tester.wait_for_stream_completed_count <= s_tester.stream_completed_count; } static int s_wait_on_streams_completed_count(size_t count) { ASSERT_SUCCESS(aws_mutex_lock(&s_tester.wait_lock)); s_tester.wait_for_stream_completed_count = count; int signal_error = aws_condition_variable_wait_pred( &s_tester.wait_cvar, &s_tester.wait_lock, s_is_stream_completed_count_at_least, &s_tester); ASSERT_SUCCESS(aws_mutex_unlock(&s_tester.wait_lock)); return signal_error; } static void s_tester_on_stream_completed(struct aws_http_stream *stream, int error_code, void *user_data) { (void)user_data; (void)stream; AWS_FATAL_ASSERT(aws_mutex_lock(&s_tester.wait_lock) == AWS_OP_SUCCESS); if (error_code) { ++s_tester.stream_complete_errors; s_tester.stream_completed_error_code = error_code; } else { int status = 0; if (aws_http_stream_get_incoming_response_status(stream, &status)) { ++s_tester.stream_complete_errors; s_tester.stream_completed_error_code = aws_last_error(); } else { if (status == 200) { s_tester.stream_completed_with_200 = true; ++s_tester.stream_200_count; } else if (status / 100 == 4) { } else { ++s_tester.stream_status_not_200_count; } } } ++s_tester.stream_completed_count; aws_condition_variable_notify_one(&s_tester.wait_cvar); AWS_FATAL_ASSERT(aws_mutex_unlock(&s_tester.wait_lock) == AWS_OP_SUCCESS); } static struct aws_logger s_logger; static int s_tester_init(struct tester *tester, struct aws_allocator *allocator, struct aws_byte_cursor host_name) { aws_http_library_init(allocator); ASSERT_SUCCESS(aws_mutex_init(&tester->wait_lock)); ASSERT_SUCCESS(aws_condition_variable_init(&tester->wait_cvar)); tester->event_loop_group = aws_event_loop_group_new_default(allocator, 1, NULL); struct aws_host_resolver_default_options resolver_options = { .el_group = tester->event_loop_group, .max_entries = 8, }; tester->host_resolver = aws_host_resolver_new_default(allocator, &resolver_options); /* Create http connection */ struct aws_client_bootstrap_options bootstrap_options = { .event_loop_group = tester->event_loop_group, .host_resolver = tester->host_resolver, }; tester->client_bootstrap = aws_client_bootstrap_new(allocator, &bootstrap_options); aws_tls_ctx_options_init_default_client(&tester->tls_ctx_options, allocator); aws_tls_ctx_options_set_alpn_list(&tester->tls_ctx_options, "h2"); /* Turn off peer verification as a localhost cert used */ tester->tls_ctx_options.verify_peer = false; tester->tls_ctx = aws_tls_client_ctx_new(allocator, &tester->tls_ctx_options); aws_tls_connection_options_init_from_ctx(&tester->tls_connection_options, tester->tls_ctx); aws_tls_connection_options_set_server_name(&tester->tls_connection_options, allocator, &host_name); struct aws_socket_options socket_options = { .type = AWS_SOCKET_STREAM, .connect_timeout_ms = (uint32_t)aws_timestamp_convert(TESTER_TIMEOUT_SEC, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_MILLIS, NULL), .keep_alive_timeout_sec = 0, .keepalive = false, .keep_alive_interval_sec = 0, }; struct aws_http_connection_monitoring_options monitor_opt = { .allowable_throughput_failure_interval_seconds = 2, .minimum_throughput_bytes_per_second = 1000, }; struct aws_http_client_connection_options client_options = { .self_size = sizeof(struct aws_http_client_connection_options), .allocator = allocator, .bootstrap = tester->client_bootstrap, .host_name = host_name, .port = 3443, .socket_options = &socket_options, .user_data = tester, .tls_options = &tester->tls_connection_options, .on_setup = s_on_connection_setup, .on_shutdown = s_on_connection_shutdown, .monitoring_options = &monitor_opt, }; ASSERT_SUCCESS(aws_http_client_connect(&client_options)); struct aws_logger_standard_options logger_options = { .level = AWS_LOG_LEVEL_DEBUG, /* We are stress testing, and if this ever failed, the default trace level log is too much to handle, let's do debug level instead */ .file = stderr, }; aws_logger_init_standard(&s_logger, allocator, &logger_options); aws_logger_set(&s_logger); return AWS_OP_SUCCESS; } static int s_tester_clean_up(struct tester *tester) { aws_http_connection_release(tester->connection); ASSERT_SUCCESS(s_wait_on_connection_shutdown(tester)); aws_tls_connection_options_clean_up(&tester->tls_connection_options); aws_tls_ctx_release(tester->tls_ctx); aws_tls_ctx_options_clean_up(&tester->tls_ctx_options); aws_client_bootstrap_release(tester->client_bootstrap); aws_host_resolver_release(tester->host_resolver); aws_event_loop_group_release(tester->event_loop_group); aws_mutex_clean_up(&tester->wait_lock); aws_http_library_clean_up(); aws_logger_clean_up(&s_logger); return AWS_OP_SUCCESS; } AWS_STATIC_STRING_FROM_LITERAL(s_http_localhost_env_var, "AWS_TEST_LOCALHOST_HOST"); static int s_test_hpack_stress_helper(struct aws_allocator *allocator, bool compression) { /* Test that makes tons of streams with all sorts of headers to stress hpack */ struct aws_string *http_localhost_host = NULL; if (aws_get_environment_value(allocator, s_http_localhost_env_var, &http_localhost_host) || http_localhost_host == NULL) { /* The envrionment variable is not set, default to localhost */ http_localhost_host = aws_string_new_from_c_str(allocator, "localhost"); } struct aws_byte_cursor host_name = aws_byte_cursor_from_string(http_localhost_host); ASSERT_SUCCESS(s_tester_init(&s_tester, allocator, host_name)); /* wait for connection connected */ ASSERT_SUCCESS(s_wait_on_connection_connected(&s_tester)); // localhost/echo is an echo server that will return the headers of your request from the body. struct aws_http_header request_headers_src[] = { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/echo"), { .name = aws_byte_cursor_from_c_str(":authority"), .value = host_name, }, }; size_t num_to_acquire = 2000; size_t num_headers_to_make = 100; /* Use a pool of headers and a pool of values, pick up randomly from both pool to stress hpack */ size_t headers_pool_size = 500; size_t values_pool_size = 66; for (size_t i = 0; i < num_to_acquire; i++) { struct aws_http_message *request = aws_http2_message_new_request(allocator); ASSERT_NOT_NULL(request); aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); struct aws_http_headers *request_headers = aws_http_message_get_headers(request); struct aws_http_headers *test_headers = aws_http_headers_new(allocator); /* as request headers has the pesudo headers, make a copy of the real headers to check the result */ for (size_t j = 0; j < num_headers_to_make; j++) { char test_header_str[256]; uint64_t random_64_bit_num = 0; aws_device_random_u64(&random_64_bit_num); size_t headers = (size_t)random_64_bit_num % headers_pool_size; snprintf(test_header_str, sizeof(test_header_str), "crttest-%zu", headers); char test_value_str[256]; size_t value = (size_t)random_64_bit_num % values_pool_size; snprintf(test_value_str, sizeof(test_value_str), "value-%zu", value); struct aws_http_header request_header = { .compression = compression ? random_64_bit_num % 3 : AWS_HTTP_HEADER_COMPRESSION_USE_CACHE, // With random type of compression, make sure it works .name = aws_byte_cursor_from_c_str(test_header_str), .value = aws_byte_cursor_from_c_str(test_value_str), }; ASSERT_SUCCESS(aws_http_headers_add_header(request_headers, &request_header)); ASSERT_SUCCESS(aws_http_headers_add_header(test_headers, &request_header)); } struct aws_http_headers *received_headers = aws_http_headers_new(allocator); struct aws_http_make_request_options request_options = { .self_size = sizeof(request_options), .request = request, .user_data = received_headers, .on_response_headers = s_tester_on_headers, .on_complete = s_tester_on_stream_completed, }; struct aws_http_stream *stream = aws_http_connection_make_request(s_tester.connection, &request_options); ASSERT_NOT_NULL(stream); aws_http_stream_activate(stream); aws_http_stream_release(stream); /* Wait for the stream to complete */ ASSERT_SUCCESS(s_wait_on_streams_completed_count(1)); --s_tester.stream_completed_count; ASSERT_TRUE(s_tester.stream_completed_with_200); ASSERT_TRUE(s_check_headers_received(received_headers, test_headers)); aws_http_message_release(request); aws_http_headers_release(test_headers); aws_http_headers_release(received_headers); } aws_string_destroy(http_localhost_host); const struct aws_socket_endpoint *remote_endpoint = aws_http_connection_get_remote_endpoint(s_tester.connection); ASSERT_NOT_NULL(remote_endpoint); struct aws_byte_cursor remote_ip = aws_byte_cursor_from_c_str(remote_endpoint->address); /* Local host IP should always be 127.0.0.1 */ ASSERT_TRUE(aws_byte_cursor_eq_c_str(&remote_ip, "127.0.0.1")); return s_tester_clean_up(&s_tester); } AWS_TEST_CASE(localhost_integ_hpack_stress, test_hpack_stress) static int test_hpack_stress(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_test_hpack_stress_helper(allocator, false /*compression*/); } AWS_TEST_CASE(localhost_integ_hpack_compression_stress, test_hpack_compression_stress) static int test_hpack_compression_stress(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_test_hpack_stress_helper(allocator, true /*compression*/); } static int s_tester_on_put_body(struct aws_http_stream *stream, const struct aws_byte_cursor *data, void *user_data) { (void)stream; (void)user_data; struct aws_string *content_length_header_str = aws_string_new_from_cursor(s_tester.alloc, data); s_tester.num_sen_received = (uint64_t)strtoull((const char *)content_length_header_str->bytes, NULL, 10); aws_string_destroy(content_length_header_str); return AWS_OP_SUCCESS; } /* Test that upload a 2.5GB data to local server */ AWS_TEST_CASE(localhost_integ_h2_upload_stress, s_localhost_integ_h2_upload_stress) static int s_localhost_integ_h2_upload_stress(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_tester.alloc = allocator; size_t length = 2500000000UL; #ifdef AWS_OS_LINUX /* Using Python hyper h2 server frame work, met a weird upload performance issue on Linux. Our client against nginx * platform has not met the same issue. We assume it's because the server framework implementation. Use lower * number of linux */ length = 250000000UL; #endif struct aws_string *http_localhost_host = NULL; if (aws_get_environment_value(allocator, s_http_localhost_env_var, &http_localhost_host) || http_localhost_host == NULL) { /* The envrionment variable is not set, default to localhost */ http_localhost_host = aws_string_new_from_c_str(allocator, "localhost"); } struct aws_byte_cursor host_name = aws_byte_cursor_from_string(http_localhost_host); ASSERT_SUCCESS(s_tester_init(&s_tester, allocator, host_name)); /* wait for connection connected */ ASSERT_SUCCESS(s_wait_on_connection_connected(&s_tester)); char content_length_sprintf_buffer[128] = ""; snprintf(content_length_sprintf_buffer, sizeof(content_length_sprintf_buffer), "%zu", length); struct aws_http_header request_headers_src[] = { DEFINE_HEADER(":method", "PUT"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/upload_test.txt"), { .name = aws_byte_cursor_from_c_str(":authority"), .value = host_name, }, { .name = aws_byte_cursor_from_c_str("content_length"), .value = aws_byte_cursor_from_c_str(content_length_sprintf_buffer), }, }; struct aws_http_message *request = aws_http2_message_new_request(allocator); aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); struct aws_input_stream *body_stream = aws_input_stream_tester_upload_new(allocator, length); aws_http_message_set_body_stream(request, body_stream); aws_input_stream_release(body_stream); struct aws_http_make_request_options request_options = { .self_size = sizeof(request_options), .request = request, .on_complete = s_tester_on_stream_completed, .on_response_body = s_tester_on_put_body, }; struct aws_http_stream *stream = aws_http_connection_make_request(s_tester.connection, &request_options); ASSERT_NOT_NULL(stream); aws_http_stream_activate(stream); aws_http_stream_release(stream); /* Wait for the stream to complete */ ASSERT_SUCCESS(s_wait_on_streams_completed_count(1)); ASSERT_UINT_EQUALS(s_tester.num_sen_received, length); ASSERT_TRUE(s_tester.stream_completed_with_200); aws_http_message_release(request); aws_string_destroy(http_localhost_host); return s_tester_clean_up(&s_tester); } static int s_tester_on_download_body( struct aws_http_stream *stream, const struct aws_byte_cursor *data, void *user_data) { (void)stream; (void)user_data; s_tester.download_body_len += data->len; return AWS_OP_SUCCESS; } /* Test that download a 2.5GB data from local server */ AWS_TEST_CASE(localhost_integ_h2_download_stress, s_localhost_integ_h2_download_stress) static int s_localhost_integ_h2_download_stress(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_tester.alloc = allocator; size_t length = 2500000000UL; /* over int max, which it the max for settings */ struct aws_string *http_localhost_host = NULL; if (aws_get_environment_value(allocator, s_http_localhost_env_var, &http_localhost_host) || http_localhost_host == NULL) { /* The envrionment variable is not set, default to localhost */ http_localhost_host = aws_string_new_from_c_str(allocator, "localhost"); } struct aws_byte_cursor host_name = aws_byte_cursor_from_string(http_localhost_host); ASSERT_SUCCESS(s_tester_init(&s_tester, allocator, host_name)); /* wait for connection connected */ ASSERT_SUCCESS(s_wait_on_connection_connected(&s_tester)); struct aws_http_header request_headers_src[] = { DEFINE_HEADER(":method", "GET"), DEFINE_HEADER(":scheme", "https"), DEFINE_HEADER(":path", "/downloadTest"), { .name = aws_byte_cursor_from_c_str(":authority"), .value = host_name, }, }; struct aws_http_message *request = aws_http2_message_new_request(allocator); ASSERT_NOT_NULL(request); aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); struct aws_http_make_request_options request_options = { .self_size = sizeof(request_options), .request = request, .on_complete = s_tester_on_stream_completed, .on_response_body = s_tester_on_download_body, }; struct aws_http_stream *stream = aws_http_connection_make_request(s_tester.connection, &request_options); ASSERT_NOT_NULL(stream); aws_http_stream_activate(stream); aws_http_stream_release(stream); /* Wait for the stream to complete */ ASSERT_SUCCESS(s_wait_on_streams_completed_count(1)); ASSERT_UINT_EQUALS(s_tester.download_body_len, length); ASSERT_TRUE(s_tester.stream_completed_with_200); aws_http_message_release(request); aws_string_destroy(http_localhost_host); return s_tester_clean_up(&s_tester); } aws-crt-python-0.20.4+dfsg/crt/aws-c-http/tests/test_message.c000066400000000000000000000521521456575232400242050ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #define TEST_CASE(NAME) \ AWS_TEST_CASE(NAME, s_test_##NAME); \ static int s_test_##NAME(struct aws_allocator *allocator, void *ctx) #define DEFINE_HEADER(NAME, VALUE) \ { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(NAME), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(VALUE), } TEST_CASE(message_sanity_check) { (void)ctx; struct aws_http_message *request = aws_http_message_new_request(allocator); ASSERT_NOT_NULL(request); aws_http_message_destroy(request); struct aws_http_message *response = aws_http_message_new_response(allocator); ASSERT_NOT_NULL(response); aws_http_message_destroy(response); return AWS_OP_SUCCESS; } TEST_CASE(message_request_path) { (void)ctx; struct aws_http_message *request = aws_http_message_new_request(allocator); ASSERT_NOT_NULL(request); /* Assert that query fails when there's no data */ struct aws_byte_cursor get; ASSERT_ERROR(AWS_ERROR_HTTP_DATA_NOT_AVAILABLE, aws_http_message_get_request_path(request, &get)); /* Test simple set/get */ char path1[] = "/"; ASSERT_SUCCESS(aws_http_message_set_request_path(request, aws_byte_cursor_from_c_str(path1))); ASSERT_SUCCESS(aws_http_message_get_request_path(request, &get)); ASSERT_TRUE(aws_byte_cursor_eq_c_str(&get, path1)); /* Mutilate the original string to be sure request wasn't referencing its memory */ path1[0] = 'z'; struct aws_byte_cursor path1_repro = aws_byte_cursor_from_c_str("/"); ASSERT_TRUE(aws_byte_cursor_eq(&path1_repro, &get)); /* Set a new path */ ASSERT_SUCCESS(aws_http_message_set_request_path(request, aws_byte_cursor_from_c_str("/index.html"))); ASSERT_SUCCESS(aws_http_message_get_request_path(request, &get)); ASSERT_TRUE(aws_byte_cursor_eq_c_str(&get, "/index.html")); aws_http_message_destroy(request); return AWS_OP_SUCCESS; } TEST_CASE(message_request_method) { (void)ctx; struct aws_http_message *request = aws_http_message_new_request(allocator); ASSERT_NOT_NULL(request); /* Assert that query fails when there's no data */ struct aws_byte_cursor get; ASSERT_ERROR(AWS_ERROR_HTTP_DATA_NOT_AVAILABLE, aws_http_message_get_request_method(request, &get)); /* Test simple set/get */ char method1[] = "GET"; ASSERT_SUCCESS(aws_http_message_set_request_method(request, aws_byte_cursor_from_c_str(method1))); ASSERT_SUCCESS(aws_http_message_get_request_method(request, &get)); ASSERT_TRUE(aws_byte_cursor_eq_c_str(&get, method1)); /* Mutilate the original string to be sure request wasn't referencing its memory */ method1[0] = 'B'; ASSERT_TRUE(aws_byte_cursor_eq(&aws_http_method_get, &get)); /* Set a new method */ ASSERT_SUCCESS(aws_http_message_set_request_method(request, aws_http_method_post)); ASSERT_SUCCESS(aws_http_message_get_request_method(request, &get)); ASSERT_TRUE(aws_byte_cursor_eq_c_str(&get, "POST")); aws_http_message_destroy(request); return AWS_OP_SUCCESS; } TEST_CASE(message_response_status) { (void)ctx; struct aws_http_message *response = aws_http_message_new_response(allocator); ASSERT_NOT_NULL(response); /* Assert that query fails when there's no data */ int get; ASSERT_ERROR(AWS_ERROR_HTTP_DATA_NOT_AVAILABLE, aws_http_message_get_response_status(response, &get)); /* Test simple set/get */ ASSERT_SUCCESS(aws_http_message_set_response_status(response, AWS_HTTP_STATUS_CODE_200_OK)); ASSERT_SUCCESS(aws_http_message_get_response_status(response, &get)); ASSERT_INT_EQUALS(AWS_HTTP_STATUS_CODE_200_OK, get); /* Set a new status */ ASSERT_SUCCESS(aws_http_message_set_response_status(response, AWS_HTTP_STATUS_CODE_404_NOT_FOUND)); ASSERT_SUCCESS(aws_http_message_get_response_status(response, &get)); ASSERT_INT_EQUALS(AWS_HTTP_STATUS_CODE_404_NOT_FOUND, get); aws_http_message_destroy(response); return AWS_OP_SUCCESS; } static struct aws_http_header s_make_header(const char *name, const char *value) { return (struct aws_http_header){ .name = aws_byte_cursor_from_c_str(name), .value = aws_byte_cursor_from_c_str(value), }; } static int s_check_headers_eq(struct aws_http_header a, struct aws_http_header b) { ASSERT_TRUE(aws_byte_cursor_eq(&a.name, &b.name)); ASSERT_TRUE(aws_byte_cursor_eq(&a.value, &b.value)); return AWS_OP_SUCCESS; } static int s_check_header_eq(struct aws_http_header header, const char *name, const char *value) { ASSERT_TRUE(aws_byte_cursor_eq_c_str(&header.name, name)); ASSERT_TRUE(aws_byte_cursor_eq_c_str(&header.value, value)); return AWS_OP_SUCCESS; } static int s_check_value_eq(struct aws_byte_cursor cursor, const char *value) { ASSERT_TRUE(aws_byte_cursor_eq_c_str(&cursor, value)); return AWS_OP_SUCCESS; } TEST_CASE(headers_add) { (void)ctx; struct aws_http_headers *headers = aws_http_headers_new(allocator); ASSERT_NOT_NULL(headers); char name_src[] = "Host"; char value_src[] = "example.com"; ASSERT_SUCCESS( aws_http_headers_add(headers, aws_byte_cursor_from_c_str(name_src), aws_byte_cursor_from_c_str(value_src))); ASSERT_UINT_EQUALS(1, aws_http_headers_count(headers)); /* Mutilate source strings to be sure the datastructure isn't referencing their memory */ name_src[0] = 0; value_src[0] = 0; /* get-by-index */ struct aws_http_header get; ASSERT_SUCCESS(aws_http_headers_get_index(headers, 0, &get)); ASSERT_SUCCESS(s_check_header_eq(get, "Host", "example.com")); /* get-by-name (ignore case) */ struct aws_byte_cursor value_get; ASSERT_SUCCESS(aws_http_headers_get(headers, aws_byte_cursor_from_c_str("host"), &value_get)); /* ignore case */ ASSERT_SUCCESS(s_check_value_eq(value_get, "example.com")); aws_http_headers_release(headers); return AWS_OP_SUCCESS; } TEST_CASE(headers_add_array) { (void)ctx; struct aws_http_headers *headers = aws_http_headers_new(allocator); ASSERT_NOT_NULL(headers); const struct aws_http_header src_headers[] = { s_make_header("Cookie", "a=1"), s_make_header("COOKIE", "b=2"), }; ASSERT_SUCCESS(aws_http_headers_add_array(headers, src_headers, AWS_ARRAY_SIZE(src_headers))); ASSERT_UINT_EQUALS(AWS_ARRAY_SIZE(src_headers), aws_http_headers_count(headers)); for (size_t i = 0; i < AWS_ARRAY_SIZE(src_headers); ++i) { struct aws_http_header get; ASSERT_SUCCESS(aws_http_headers_get_index(headers, i, &get)); ASSERT_SUCCESS(s_check_headers_eq(src_headers[i], get)); } /* check that get-by-name returns first one it sees */ struct aws_byte_cursor get; ASSERT_SUCCESS(aws_http_headers_get(headers, aws_byte_cursor_from_c_str("COOKIE"), &get)); ASSERT_SUCCESS(s_check_value_eq(get, "a=1")); aws_http_headers_release(headers); return AWS_OP_SUCCESS; } TEST_CASE(headers_set) { (void)ctx; struct aws_http_headers *headers = aws_http_headers_new(allocator); ASSERT_NOT_NULL(headers); /* Check that set() can add a new header */ ASSERT_SUCCESS( aws_http_headers_set(headers, aws_byte_cursor_from_c_str("Cookie"), aws_byte_cursor_from_c_str("a=1"))); struct aws_http_header get; ASSERT_SUCCESS(aws_http_headers_get_index(headers, 0, &get)); ASSERT_SUCCESS(s_check_header_eq(get, "Cookie", "a=1")); /* Add more headers with same name, then check that set() replaces them ALL */ const struct aws_http_header src_headers[] = { s_make_header("Cookie", "b=2"), s_make_header("COOKIE", "c=3"), }; ASSERT_SUCCESS(aws_http_headers_add_array(headers, src_headers, AWS_ARRAY_SIZE(src_headers))); ASSERT_SUCCESS( aws_http_headers_set(headers, aws_byte_cursor_from_c_str("Cookie"), aws_byte_cursor_from_c_str("d=4"))); ASSERT_UINT_EQUALS(1, aws_http_headers_count(headers)); struct aws_byte_cursor value_get; ASSERT_SUCCESS(aws_http_headers_get(headers, aws_byte_cursor_from_c_str("cookie"), &value_get)); ASSERT_SUCCESS(s_check_value_eq(value_get, "d=4")); aws_http_headers_release(headers); return AWS_OP_SUCCESS; } TEST_CASE(headers_erase_index) { (void)ctx; struct aws_http_headers *headers = aws_http_headers_new(allocator); ASSERT_NOT_NULL(headers); const struct aws_http_header src_headers[] = { s_make_header("Cookie", "a=1"), s_make_header("Cookie", "b=2"), }; ASSERT_SUCCESS(aws_http_headers_add_array(headers, src_headers, AWS_ARRAY_SIZE(src_headers))); /* Ensure bad attempts to erase data are detected */ ASSERT_ERROR(AWS_ERROR_INVALID_INDEX, aws_http_headers_erase_index(headers, 99)); /* Erase by index */ ASSERT_SUCCESS(aws_http_headers_erase_index(headers, 0)); ASSERT_UINT_EQUALS(1, aws_http_headers_count(headers)); struct aws_http_header get; ASSERT_SUCCESS(aws_http_headers_get_index(headers, 0, &get)); ASSERT_SUCCESS(s_check_header_eq(get, "Cookie", "b=2")); aws_http_headers_release(headers); return AWS_OP_SUCCESS; } TEST_CASE(headers_erase) { (void)ctx; struct aws_http_headers *headers = aws_http_headers_new(allocator); ASSERT_NOT_NULL(headers); const struct aws_http_header src_headers[] = { s_make_header("cookie", "a=1"), s_make_header("CoOkIe", "b=2"), }; ASSERT_SUCCESS(aws_http_headers_add_array(headers, src_headers, AWS_ARRAY_SIZE(src_headers))); /* Ensure bad attempts to erase data are detected */ ASSERT_ERROR(AWS_ERROR_HTTP_HEADER_NOT_FOUND, aws_http_headers_erase(headers, aws_byte_cursor_from_c_str("asdf"))); ASSERT_SUCCESS(aws_http_headers_erase(headers, aws_byte_cursor_from_c_str("COOKIE"))); ASSERT_UINT_EQUALS(0, aws_http_headers_count(headers)); aws_http_headers_release(headers); return AWS_OP_SUCCESS; } TEST_CASE(headers_erase_value) { (void)ctx; struct aws_http_headers *headers = aws_http_headers_new(allocator); ASSERT_NOT_NULL(headers); const struct aws_http_header src_headers[] = { s_make_header("Cookie", "a=1"), s_make_header("CoOkIe", "b=2"), s_make_header("COOKIE", "b=2"), }; ASSERT_SUCCESS(aws_http_headers_add_array(headers, src_headers, AWS_ARRAY_SIZE(src_headers))); /* Ensure bad attempts to erase data are detected */ ASSERT_ERROR( AWS_ERROR_HTTP_HEADER_NOT_FOUND, aws_http_headers_erase_value( headers, aws_byte_cursor_from_c_str("cookie"), aws_byte_cursor_from_c_str("asdf"))); /* Pluck out the first instance of b=2 */ ASSERT_SUCCESS( aws_http_headers_erase_value(headers, aws_byte_cursor_from_c_str("cookie"), aws_byte_cursor_from_c_str("b=2"))); ASSERT_UINT_EQUALS(2, aws_http_headers_count(headers)); struct aws_http_header get; ASSERT_SUCCESS(aws_http_headers_get_index(headers, 0, &get)); ASSERT_SUCCESS(s_check_header_eq(get, "Cookie", "a=1")); ASSERT_SUCCESS(aws_http_headers_get_index(headers, 1, &get)); ASSERT_SUCCESS(s_check_header_eq(get, "COOKIE", "b=2")); aws_http_headers_release(headers); return AWS_OP_SUCCESS; } TEST_CASE(headers_clear) { (void)ctx; struct aws_http_headers *headers = aws_http_headers_new(allocator); ASSERT_NOT_NULL(headers); const struct aws_http_header src_headers[] = { s_make_header("Host", "example.com"), s_make_header("Cookie", "a=1"), }; ASSERT_SUCCESS(aws_http_headers_add_array(headers, src_headers, AWS_ARRAY_SIZE(src_headers))); aws_http_headers_clear(headers); ASSERT_UINT_EQUALS(0, aws_http_headers_count(headers)); aws_http_headers_release(headers); return AWS_OP_SUCCESS; } TEST_CASE(headers_get_all) { (void)ctx; struct aws_http_headers *headers = aws_http_headers_new(allocator); /* Check when no such headers exist */ aws_http_headers_clear(headers); aws_http_headers_add(headers, aws_byte_cursor_from_c_str("Host"), aws_byte_cursor_from_c_str("example.com")); ASSERT_NULL(aws_http_headers_get_all(headers, aws_byte_cursor_from_c_str("X-My-List"))); ASSERT_INT_EQUALS(AWS_ERROR_HTTP_HEADER_NOT_FOUND, aws_last_error()); /* Check getting a single value */ aws_http_headers_clear(headers); aws_http_headers_add(headers, aws_byte_cursor_from_c_str("Host"), aws_byte_cursor_from_c_str("example.com")); aws_http_headers_add(headers, aws_byte_cursor_from_c_str("X-My-List"), aws_byte_cursor_from_c_str("A")); struct aws_string *value = aws_http_headers_get_all(headers, aws_byte_cursor_from_c_str("X-My-List")); ASSERT_NOT_NULL(value); ASSERT_TRUE(aws_string_eq_c_str(value, "A")); aws_string_destroy(value); /* Check getting a blank-string value */ aws_http_headers_clear(headers); aws_http_headers_add(headers, aws_byte_cursor_from_c_str("Host"), aws_byte_cursor_from_c_str("example.com")); aws_http_headers_add(headers, aws_byte_cursor_from_c_str("X-My-List"), aws_byte_cursor_from_c_str("")); value = aws_http_headers_get_all(headers, aws_byte_cursor_from_c_str("X-My-List")); ASSERT_NOT_NULL(value); ASSERT_TRUE(aws_string_eq_c_str(value, "")); aws_string_destroy(value); /* Check getting multiple values */ aws_http_headers_clear(headers); aws_http_headers_add(headers, aws_byte_cursor_from_c_str("Host"), aws_byte_cursor_from_c_str("example.com")); aws_http_headers_add(headers, aws_byte_cursor_from_c_str("X-My-List"), aws_byte_cursor_from_c_str("A")); aws_http_headers_add(headers, aws_byte_cursor_from_c_str("X-My-List"), aws_byte_cursor_from_c_str("B")); value = aws_http_headers_get_all(headers, aws_byte_cursor_from_c_str("X-My-List")); ASSERT_NOT_NULL(value); ASSERT_TRUE(aws_string_eq_c_str(value, "A, B")); aws_string_destroy(value); /* Check more edge cases */ aws_http_headers_clear(headers); aws_http_headers_add(headers, aws_byte_cursor_from_c_str("Host"), aws_byte_cursor_from_c_str("example.com")); /* some fields have single entry, some fields have multiple entries */ aws_http_headers_add(headers, aws_byte_cursor_from_c_str("X-My-List"), aws_byte_cursor_from_c_str("A, B")); /* preserve whitespace within middle of value. also name is different case */ aws_http_headers_add(headers, aws_byte_cursor_from_c_str("x-my-list"), aws_byte_cursor_from_c_str("C,D")); aws_http_headers_add( headers, aws_byte_cursor_from_c_str("X-My-List-"), aws_byte_cursor_from_c_str("BAD-EXTRA-DASH")); /* name is different case, and blank value*/ aws_http_headers_add(headers, aws_byte_cursor_from_c_str("X-MY-LIST"), aws_byte_cursor_from_c_str("")); aws_http_headers_add(headers, aws_byte_cursor_from_c_str("x-my-list"), aws_byte_cursor_from_c_str("E")); value = aws_http_headers_get_all(headers, aws_byte_cursor_from_c_str("X-My-List")); ASSERT_NOT_NULL(value); ASSERT_TRUE(aws_string_eq_c_str(value, "A, B, C,D, , E")); aws_string_destroy(value); /* Done */ aws_http_headers_release(headers); return AWS_OP_SUCCESS; } TEST_CASE(h2_headers_request_pseudos_get_set) { (void)ctx; struct aws_http_headers *headers = aws_http_headers_new(allocator); ASSERT_NOT_NULL(headers); const struct aws_http_header src_headers[] = { s_make_header("Host", "example.com"), s_make_header("Cookie", "a=1"), }; ASSERT_SUCCESS(aws_http_headers_add_array(headers, src_headers, AWS_ARRAY_SIZE(src_headers))); ASSERT_SUCCESS(aws_http2_headers_set_request_method(headers, aws_byte_cursor_from_c_str("GET"))); ASSERT_SUCCESS(aws_http2_headers_set_request_scheme(headers, aws_byte_cursor_from_c_str("https"))); ASSERT_SUCCESS(aws_http2_headers_set_request_authority(headers, aws_byte_cursor_from_c_str("www.amazon.com"))); ASSERT_SUCCESS(aws_http2_headers_set_request_path(headers, aws_byte_cursor_from_c_str("/"))); /* pseudo headers should be in the front of headers */ struct aws_byte_cursor get; ASSERT_SUCCESS(aws_http2_headers_get_request_method(headers, &get)); ASSERT_TRUE(aws_byte_cursor_eq_c_str(&get, "GET")); ASSERT_SUCCESS(aws_http2_headers_get_request_scheme(headers, &get)); ASSERT_TRUE(aws_byte_cursor_eq_c_str(&get, "https")); ASSERT_SUCCESS(aws_http2_headers_get_request_authority(headers, &get)); ASSERT_TRUE(aws_byte_cursor_eq_c_str(&get, "www.amazon.com")); ASSERT_SUCCESS(aws_http2_headers_get_request_path(headers, &get)); ASSERT_TRUE(aws_byte_cursor_eq_c_str(&get, "/")); /* normal headers should be the end of the headers list */ struct aws_http_header get_header; ASSERT_SUCCESS(aws_http_headers_get_index(headers, 4, &get_header)); ASSERT_SUCCESS(s_check_header_eq(get_header, "Host", "example.com")); ASSERT_SUCCESS(aws_http_headers_get_index(headers, 5, &get_header)); ASSERT_SUCCESS(s_check_header_eq(get_header, "Cookie", "a=1")); /* overwrite method should not change the normal headers */ ASSERT_SUCCESS(aws_http2_headers_set_request_method(headers, aws_byte_cursor_from_c_str("PUT"))); ASSERT_SUCCESS(aws_http_headers_get_index(headers, 4, &get_header)); ASSERT_SUCCESS(s_check_header_eq(get_header, "Host", "example.com")); ASSERT_SUCCESS(aws_http_headers_get_index(headers, 5, &get_header)); ASSERT_SUCCESS(s_check_header_eq(get_header, "Cookie", "a=1")); ASSERT_SUCCESS(aws_http2_headers_get_request_method(headers, &get)); ASSERT_TRUE(aws_byte_cursor_eq_c_str(&get, "PUT")); aws_http_headers_release(headers); return AWS_OP_SUCCESS; } TEST_CASE(h2_headers_response_pseudos_get_set) { (void)ctx; struct aws_http_headers *headers = aws_http_headers_new(allocator); ASSERT_NOT_NULL(headers); const struct aws_http_header src_headers[] = { s_make_header("Host", "example.com"), s_make_header("Cookie", "a=1"), }; ASSERT_SUCCESS(aws_http_headers_add_array(headers, src_headers, AWS_ARRAY_SIZE(src_headers))); ASSERT_SUCCESS(aws_http2_headers_set_response_status(headers, 200)); /* pseudo headers should be in the front of headers */ int get; ASSERT_SUCCESS(aws_http2_headers_get_response_status(headers, &get)); ASSERT_INT_EQUALS(get, 200); /* normal headers should be the end of the headers list */ struct aws_http_header get_header; ASSERT_SUCCESS(aws_http_headers_get_index(headers, 1, &get_header)); ASSERT_SUCCESS(s_check_header_eq(get_header, "Host", "example.com")); ASSERT_SUCCESS(aws_http_headers_get_index(headers, 2, &get_header)); ASSERT_SUCCESS(s_check_header_eq(get_header, "Cookie", "a=1")); /* overwrite method should not change the normal headers */ ASSERT_SUCCESS(aws_http2_headers_set_response_status(headers, 404)); ASSERT_SUCCESS(aws_http_headers_get_index(headers, 1, &get_header)); ASSERT_SUCCESS(s_check_header_eq(get_header, "Host", "example.com")); ASSERT_SUCCESS(aws_http_headers_get_index(headers, 2, &get_header)); ASSERT_SUCCESS(s_check_header_eq(get_header, "Cookie", "a=1")); ASSERT_SUCCESS(aws_http2_headers_get_response_status(headers, &get)); ASSERT_INT_EQUALS(get, 404); aws_http_headers_release(headers); return AWS_OP_SUCCESS; } TEST_CASE(message_refcounts) { (void)ctx; struct aws_http_message *message = aws_http_message_new_request(allocator); ASSERT_NOT_NULL(message); struct aws_http_headers *headers = aws_http_message_get_headers(message); ASSERT_NOT_NULL(headers); /* assert message is still valid after acquire/release */ aws_http_message_acquire(message); aws_http_message_release(message); ASSERT_SUCCESS(aws_http_message_set_request_path(message, aws_byte_cursor_from_c_str("PATCH"))); /* keep headers alive after message is destroyed */ aws_http_headers_acquire(headers); aws_http_message_release(message); ASSERT_FALSE(aws_http_headers_has(headers, aws_byte_cursor_from_c_str("Host"))); ASSERT_SUCCESS( aws_http_headers_add(headers, aws_byte_cursor_from_c_str("Host"), aws_byte_cursor_from_c_str("example.com"))); ASSERT_TRUE(aws_http_headers_has(headers, aws_byte_cursor_from_c_str("Host"))); aws_http_headers_release(headers); return AWS_OP_SUCCESS; } TEST_CASE(message_with_existing_headers) { (void)ctx; struct aws_http_headers *headers = aws_http_headers_new(allocator); ASSERT_NOT_NULL(headers); struct aws_http_message *message = aws_http_message_new_request_with_headers(allocator, headers); ASSERT_NOT_NULL(message); ASSERT_PTR_EQUALS(headers, aws_http_message_get_headers(message)); /* assert message has acquired hold on headers */ aws_http_headers_release(headers); /* still valid, right? */ struct aws_http_header new_header = {aws_byte_cursor_from_c_str("Host"), aws_byte_cursor_from_c_str("example.com")}; ASSERT_SUCCESS(aws_http_message_add_header(message, new_header)); /* clean up*/ aws_http_message_release(message); return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-http/tests/test_proxy.c000066400000000000000000001212631456575232400237420ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include "proxy_test_helper.h" static struct proxy_tester tester; static char *s_host_name = "aws.amazon.com"; static uint32_t s_port = 80; static char *s_proxy_host_name = "www.myproxy.hmm"; static uint32_t s_proxy_port = 777; AWS_STATIC_STRING_FROM_LITERAL(s_mock_request_method, "GET"); AWS_STATIC_STRING_FROM_LITERAL(s_mock_request_path, "/"); AWS_STATIC_STRING_FROM_LITERAL(s_mock_request_host, "aws.amazon.com"); AWS_STATIC_STRING_FROM_LITERAL(s_expected_basic_auth_header_name, "Proxy-Authorization"); AWS_STATIC_STRING_FROM_LITERAL(s_expected_basic_auth_header_value, "Basic U29tZVVzZXI6U3VwZXJTZWNyZXQ="); AWS_STATIC_STRING_FROM_LITERAL(s_mock_request_username, "SomeUser"); AWS_STATIC_STRING_FROM_LITERAL(s_mock_request_password, "SuperSecret"); /* * Request utility functions */ struct aws_http_message *s_build_dummy_http_request( struct aws_allocator *allocator, struct aws_byte_cursor method, struct aws_byte_cursor path, struct aws_byte_cursor host) { struct aws_http_message *request = aws_http_message_new_request(allocator); aws_http_message_set_request_method(request, method); aws_http_message_set_request_path(request, path); struct aws_http_header host_header = { .name = aws_byte_cursor_from_c_str("Host"), .value = host, }; aws_http_message_add_header(request, host_header); struct aws_http_header accept_header = { .name = aws_byte_cursor_from_c_str("Accept"), .value = aws_byte_cursor_from_c_str("*/*"), }; aws_http_message_add_header(request, accept_header); return request; } static struct aws_http_message *s_build_http_request(struct aws_allocator *allocator) { return s_build_dummy_http_request( allocator, aws_byte_cursor_from_string(s_mock_request_method), aws_byte_cursor_from_string(s_mock_request_path), aws_byte_cursor_from_string(s_mock_request_host)); } static bool s_is_header_in_request(struct aws_http_message *request, struct aws_byte_cursor header_name) { size_t header_count = aws_http_message_get_header_count(request); for (size_t i = 0; i < header_count; ++i) { struct aws_http_header current_header; ASSERT_SUCCESS(aws_http_message_get_header(request, ¤t_header, i)); if (aws_byte_cursor_eq_ignore_case(¤t_header.name, &header_name)) { return true; } } return false; } static bool s_is_header_and_value_in_request(struct aws_http_message *request, struct aws_http_header *header) { size_t header_count = aws_http_message_get_header_count(request); for (size_t i = 0; i < header_count; ++i) { struct aws_http_header current_header; ASSERT_SUCCESS(aws_http_message_get_header(request, ¤t_header, i)); if (aws_byte_cursor_eq_ignore_case(¤t_header.name, &header->name) && aws_byte_cursor_eq(¤t_header.value, &header->value)) { return true; } } return false; } /* * TLS mock and vtable */ static int s_test_proxy_setup_client_tls( struct aws_channel_slot *right_of_slot, struct aws_tls_connection_options *tls_options) { /* * apply a dummy handler, but don't kick off negotiation, instead invoke success/failure immediately. * The tls handler being in a newly-created state won't affect the proxied tests which don't try and send * data through it. */ AWS_FATAL_ASSERT(right_of_slot != NULL); struct aws_channel *channel = right_of_slot->channel; struct aws_allocator *allocator = right_of_slot->alloc; struct aws_channel_slot *tls_slot = aws_channel_slot_new(channel); if (!tls_slot) { return AWS_OP_ERR; } struct aws_channel_handler *tls_handler = aws_tls_client_handler_new(allocator, tls_options, tls_slot); if (!tls_handler) { aws_mem_release(allocator, tls_slot); return AWS_OP_ERR; } aws_channel_slot_insert_right(right_of_slot, tls_slot); if (aws_channel_slot_set_handler(tls_slot, tls_handler) != AWS_OP_SUCCESS) { return AWS_OP_ERR; } if (tester.failure_type == PTFT_TLS_NEGOTIATION) { tls_options->on_negotiation_result(NULL, NULL, AWS_ERROR_UNKNOWN, tls_options->user_data); } else { tls_options->on_negotiation_result(NULL, NULL, AWS_ERROR_SUCCESS, tls_options->user_data); } return AWS_OP_SUCCESS; } struct aws_http_proxy_system_vtable s_proxy_table_for_tls = { .aws_channel_setup_client_tls = s_test_proxy_setup_client_tls, }; /* * Channel setup mock and vtable */ static int s_test_aws_proxy_new_socket_channel(struct aws_socket_channel_bootstrap_options *channel_options) { aws_mutex_lock(&tester.wait_lock); /* * Record where we were trying to connect to */ struct aws_byte_cursor host_cursor = aws_byte_cursor_from_c_str(channel_options->host_name); aws_byte_buf_append_dynamic(&tester.connection_host_name, &host_cursor); tester.connection_port = channel_options->port; /* * Conditional failure logic based on how the test was configured to fail */ if (tester.failure_type == PTFT_CHANNEL) { tester.wait_result = AWS_ERROR_UNKNOWN; } else if (tester.failure_type != PTFT_CONNECTION) { ASSERT_SUCCESS(proxy_tester_create_testing_channel_connection(&tester, channel_options->user_data)); } aws_mutex_unlock(&tester.wait_lock); /* * More conditional failure logic based on how the test was configured to fail */ if (tester.failure_type == PTFT_CHANNEL) { return AWS_OP_ERR; } if (tester.failure_type == PTFT_CONNECTION) { channel_options->setup_callback(tester.client_bootstrap, AWS_ERROR_UNKNOWN, NULL, channel_options->user_data); return AWS_OP_SUCCESS; } /* * We're not supposed to fail yet, so let's keep going */ struct aws_http_client_bootstrap *http_bootstrap = channel_options->user_data; http_bootstrap->on_setup(tester.client_connection, AWS_ERROR_SUCCESS, http_bootstrap->user_data); struct testing_channel *channel = proxy_tester_get_current_channel(&tester); if (tester.failure_type == PTFT_PROXY_STRATEGY) { testing_channel_drain_queued_tasks(channel); } else { testing_channel_run_currently_queued_tasks(channel); } if (tester.failure_type == PTFT_NONE || tester.failure_type == PTFT_CONNECT_REQUEST || tester.failure_type == PTFT_TLS_NEGOTIATION) { if (tester.proxy_options.connection_type == AWS_HPCT_HTTP_TUNNEL) { /* For tunnel proxies, send the CONNECT request and response */ ASSERT_SUCCESS(proxy_tester_verify_connect_request(&tester)); ASSERT_SUCCESS(proxy_tester_send_connect_response(&tester)); } } return AWS_OP_SUCCESS; } struct aws_http_connection_system_vtable s_proxy_connection_system_vtable = { .aws_client_bootstrap_new_socket_channel = s_test_aws_proxy_new_socket_channel, }; struct mocked_proxy_test_options { enum proxy_tester_test_mode test_mode; enum proxy_tester_failure_type failure_type; struct aws_http_proxy_strategy *proxy_strategy; enum aws_http_proxy_authentication_type auth_type; struct aws_byte_cursor legacy_basic_username; struct aws_byte_cursor legacy_basic_password; uint32_t mocked_response_count; struct aws_byte_cursor *mocked_responses; }; /* * Basic setup common to all mocked proxy tests - set vtables, options, call init, wait for setup completion */ static int s_setup_proxy_test(struct aws_allocator *allocator, struct mocked_proxy_test_options *config) { aws_http_connection_set_system_vtable(&s_proxy_connection_system_vtable); aws_http_proxy_system_set_vtable(&s_proxy_table_for_tls); struct aws_http_proxy_options proxy_options = { .connection_type = (config->test_mode == PTTM_HTTP_FORWARD) ? AWS_HPCT_HTTP_FORWARD : AWS_HPCT_HTTP_TUNNEL, .host = aws_byte_cursor_from_c_str(s_proxy_host_name), .port = s_proxy_port, .proxy_strategy = config->proxy_strategy, .auth_type = config->auth_type, .auth_username = config->legacy_basic_username, .auth_password = config->legacy_basic_password, }; struct proxy_tester_options options = { .alloc = allocator, .proxy_options = &proxy_options, .host = aws_byte_cursor_from_c_str(s_host_name), .port = s_port, .test_mode = config->test_mode, .failure_type = config->failure_type, .desired_connect_response_count = config->mocked_response_count, .desired_connect_responses = config->mocked_responses, }; ASSERT_SUCCESS(proxy_tester_init(&tester, &options)); proxy_tester_wait(&tester, proxy_tester_connection_setup_pred); return AWS_OP_SUCCESS; } /* * For forwarding proxy connections: * If we do pass in proxy options, verify we try and connect to the proxy */ static int s_test_http_forwarding_proxy_connection_proxy_target(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct mocked_proxy_test_options options = { .test_mode = PTTM_HTTP_FORWARD, .failure_type = PTFT_NONE, }; ASSERT_SUCCESS(s_setup_proxy_test(allocator, &options)); ASSERT_SUCCESS(proxy_tester_verify_connection_attempt_was_to_proxy( &tester, aws_byte_cursor_from_c_str(s_proxy_host_name), s_proxy_port)); ASSERT_SUCCESS(proxy_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_http_forwarding_proxy_connection_proxy_target, s_test_http_forwarding_proxy_connection_proxy_target); /* * For forwarding proxy connections: * Verify a channel creation failure cleans up properly */ static int s_test_http_forwarding_proxy_connection_channel_failure(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct mocked_proxy_test_options options = { .test_mode = PTTM_HTTP_FORWARD, .failure_type = PTFT_CHANNEL, }; ASSERT_SUCCESS(s_setup_proxy_test(allocator, &options)); ASSERT_SUCCESS(proxy_tester_verify_connection_attempt_was_to_proxy( &tester, aws_byte_cursor_from_c_str(s_proxy_host_name), s_proxy_port)); ASSERT_TRUE(tester.wait_result != AWS_ERROR_SUCCESS); ASSERT_TRUE(tester.client_connection == NULL); ASSERT_SUCCESS(proxy_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } AWS_TEST_CASE( test_http_forwarding_proxy_connection_channel_failure, s_test_http_forwarding_proxy_connection_channel_failure); /* * For forwarding proxy connections: * Verify a connection establishment failure cleans up properly */ static int s_test_http_forwarding_proxy_connection_connect_failure(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct mocked_proxy_test_options options = { .test_mode = PTTM_HTTP_FORWARD, .failure_type = PTFT_CONNECTION, }; ASSERT_SUCCESS(s_setup_proxy_test(allocator, &options)); ASSERT_SUCCESS(proxy_tester_verify_connection_attempt_was_to_proxy( &tester, aws_byte_cursor_from_c_str(s_proxy_host_name), s_proxy_port)); ASSERT_TRUE(tester.wait_result != AWS_ERROR_SUCCESS); ASSERT_TRUE(tester.client_connection == NULL); ASSERT_SUCCESS(proxy_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } AWS_TEST_CASE( test_http_forwarding_proxy_connection_connect_failure, s_test_http_forwarding_proxy_connection_connect_failure); /* * For tls-enabled tunneling proxy connections: * Test the happy path by verifying CONNECT request, tls upgrade attempt */ static int s_test_https_tunnel_proxy_connection_success(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct mocked_proxy_test_options options = { .test_mode = PTTM_HTTPS_TUNNEL, .failure_type = PTFT_NONE, }; ASSERT_SUCCESS(s_setup_proxy_test(allocator, &options)); ASSERT_SUCCESS(proxy_tester_verify_connection_attempt_was_to_proxy( &tester, aws_byte_cursor_from_c_str(s_proxy_host_name), s_proxy_port)); ASSERT_TRUE(tester.client_connection != NULL); ASSERT_TRUE(tester.wait_result == AWS_ERROR_SUCCESS); ASSERT_SUCCESS(proxy_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_https_tunnel_proxy_connection_success, s_test_https_tunnel_proxy_connection_success); /* * For plaintext tunneling proxy connections: * Test the happy path by verifying CONNECT request */ static int s_test_http_tunnel_proxy_connection_success(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct mocked_proxy_test_options options = { .test_mode = PTTM_HTTP_TUNNEL, .failure_type = PTFT_NONE, }; ASSERT_SUCCESS(s_setup_proxy_test(allocator, &options)); ASSERT_SUCCESS(proxy_tester_verify_connection_attempt_was_to_proxy( &tester, aws_byte_cursor_from_c_str(s_proxy_host_name), s_proxy_port)); ASSERT_TRUE(tester.client_connection != NULL); ASSERT_TRUE(tester.wait_result == AWS_ERROR_SUCCESS); ASSERT_SUCCESS(proxy_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_http_tunnel_proxy_connection_success, s_test_http_tunnel_proxy_connection_success); /* * For tls-enabled tunneling proxy connections: * If the CONNECT request fails, verify error propagation and cleanup */ static int s_test_https_tunnel_proxy_connection_failure_connect(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct mocked_proxy_test_options options = { .test_mode = PTTM_HTTPS_TUNNEL, .failure_type = PTFT_CONNECT_REQUEST, }; ASSERT_SUCCESS(s_setup_proxy_test(allocator, &options)); ASSERT_SUCCESS(proxy_tester_verify_connection_attempt_was_to_proxy( &tester, aws_byte_cursor_from_c_str(s_proxy_host_name), s_proxy_port)); ASSERT_TRUE(tester.client_connection == NULL); ASSERT_TRUE(tester.wait_result != AWS_ERROR_SUCCESS); ASSERT_SUCCESS(proxy_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_https_tunnel_proxy_connection_failure_connect, s_test_https_tunnel_proxy_connection_failure_connect); /* * For plaintext tunneling proxy connections: * If the CONNECT request fails, verify error propagation and cleanup */ static int s_test_http_tunnel_proxy_connection_failure_connect(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct mocked_proxy_test_options options = { .test_mode = PTTM_HTTP_TUNNEL, .failure_type = PTFT_CONNECT_REQUEST, }; ASSERT_SUCCESS(s_setup_proxy_test(allocator, &options)); ASSERT_SUCCESS(proxy_tester_verify_connection_attempt_was_to_proxy( &tester, aws_byte_cursor_from_c_str(s_proxy_host_name), s_proxy_port)); ASSERT_TRUE(tester.client_connection == NULL); ASSERT_TRUE(tester.wait_result != AWS_ERROR_SUCCESS); ASSERT_SUCCESS(proxy_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_http_tunnel_proxy_connection_failure_connect, s_test_http_tunnel_proxy_connection_failure_connect); /* * For tls-enabled tunneling proxy connections: * If the TLS upgrade fails, verify error propagation and cleanup */ static int s_test_https_tunnel_proxy_connection_failure_tls(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct mocked_proxy_test_options options = { .test_mode = PTTM_HTTPS_TUNNEL, .failure_type = PTFT_TLS_NEGOTIATION, }; ASSERT_SUCCESS(s_setup_proxy_test(allocator, &options)); ASSERT_SUCCESS(proxy_tester_verify_connection_attempt_was_to_proxy( &tester, aws_byte_cursor_from_c_str(s_proxy_host_name), s_proxy_port)); ASSERT_NULL(tester.client_connection); ASSERT_TRUE(AWS_ERROR_SUCCESS != tester.wait_result); ASSERT_SUCCESS(proxy_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_https_tunnel_proxy_connection_failure_tls, s_test_https_tunnel_proxy_connection_failure_tls); static int s_verify_transformed_request( struct aws_http_message *untransformed_request, struct aws_http_message *transformed_request, struct aws_allocator *allocator) { /* method shouldn't change */ struct aws_byte_cursor method_cursor; ASSERT_SUCCESS(aws_http_message_get_request_method(transformed_request, &method_cursor)); struct aws_byte_cursor starting_method_cursor; ASSERT_SUCCESS(aws_http_message_get_request_method(untransformed_request, &starting_method_cursor)); ASSERT_TRUE(aws_byte_cursor_eq(&method_cursor, &starting_method_cursor)); /* path should be the full uri */ struct aws_byte_cursor path; ASSERT_SUCCESS(aws_http_message_get_request_path(transformed_request, &path)); struct aws_uri uri; ASSERT_SUCCESS(aws_uri_init_parse(&uri, allocator, &path)); struct aws_byte_cursor expected_scheme = aws_byte_cursor_from_c_str("http"); ASSERT_TRUE(aws_byte_cursor_eq(aws_uri_scheme(&uri), &expected_scheme)); struct aws_byte_cursor expected_host = aws_byte_cursor_from_string(s_mock_request_host); ASSERT_TRUE(aws_byte_cursor_eq(aws_uri_host_name(&uri), &expected_host)); struct aws_byte_cursor expected_query = aws_byte_cursor_from_c_str(""); ASSERT_TRUE(aws_byte_cursor_eq(aws_uri_query_string(&uri), &expected_query)); struct aws_byte_cursor expected_path = aws_byte_cursor_from_c_str("/"); ASSERT_TRUE(aws_byte_cursor_eq(aws_uri_path(&uri), &expected_path)); /* all old headers should still be present */ size_t untransformed_header_count = aws_http_message_get_header_count(untransformed_request); for (size_t i = 0; i < untransformed_header_count; ++i) { struct aws_http_header header; ASSERT_SUCCESS(aws_http_message_get_header(untransformed_request, &header, i)); ASSERT_TRUE(s_is_header_and_value_in_request(transformed_request, &header)); } aws_uri_clean_up(&uri); return AWS_OP_SUCCESS; } static int s_do_http_forwarding_proxy_request_transform_test( struct aws_allocator *allocator, struct mocked_proxy_test_options *test_options, int (*transformed_request_verifier_fn)(struct aws_http_message *)) { ASSERT_SUCCESS(s_setup_proxy_test(allocator, test_options)); struct aws_http_message *untransformed_request = s_build_http_request(allocator); struct aws_http_message *request = s_build_http_request(allocator); struct aws_http_make_request_options request_options = { .self_size = sizeof(request_options), .request = request, .user_data = &tester, }; struct aws_http_stream *stream = aws_http_connection_make_request(tester.client_connection, &request_options); ASSERT_NOT_NULL(stream); aws_http_stream_activate(stream); struct testing_channel *channel = proxy_tester_get_current_channel(&tester); testing_channel_run_currently_queued_tasks(channel); s_verify_transformed_request(untransformed_request, request, allocator); if (transformed_request_verifier_fn != NULL) { ASSERT_SUCCESS(transformed_request_verifier_fn(request)); } /* double release the stream because the dummy connection doesn't actually process (and release) it */ aws_http_stream_release(stream); aws_http_message_destroy(request); aws_http_message_destroy(untransformed_request); ASSERT_SUCCESS(proxy_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } /* * If we do pass in proxy options, verify requests get properly transformed */ static int s_test_http_forwarding_proxy_request_transform(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct mocked_proxy_test_options options = { .test_mode = PTTM_HTTP_FORWARD, .failure_type = PTFT_NONE, .proxy_strategy = NULL, }; ASSERT_SUCCESS(s_do_http_forwarding_proxy_request_transform_test(allocator, &options, NULL)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_http_forwarding_proxy_request_transform, s_test_http_forwarding_proxy_request_transform); static int s_check_for_basic_auth_header(struct aws_http_message *transformed_request) { /* Check for basic auth header */ struct aws_http_header auth_header; auth_header.name = aws_byte_cursor_from_string(s_expected_basic_auth_header_name); auth_header.value = aws_byte_cursor_from_string(s_expected_basic_auth_header_value); ASSERT_TRUE(s_is_header_and_value_in_request(transformed_request, &auth_header)); return AWS_OP_SUCCESS; } /* * If we do pass in proxy options, verify requests get properly transformed with basic authentication */ static int s_test_http_forwarding_proxy_request_transform_basic_auth(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_http_proxy_strategy_basic_auth_options config = { .proxy_connection_type = AWS_HPCT_HTTP_FORWARD, .user_name = aws_byte_cursor_from_string(s_mock_request_username), .password = aws_byte_cursor_from_string(s_mock_request_password), }; struct aws_http_proxy_strategy *proxy_strategy = aws_http_proxy_strategy_new_basic_auth(allocator, &config); struct mocked_proxy_test_options options = { .test_mode = PTTM_HTTP_FORWARD, .failure_type = PTFT_NONE, .proxy_strategy = proxy_strategy, }; ASSERT_SUCCESS( s_do_http_forwarding_proxy_request_transform_test(allocator, &options, s_check_for_basic_auth_header)); aws_http_proxy_strategy_release(proxy_strategy); return AWS_OP_SUCCESS; } AWS_TEST_CASE( test_http_forwarding_proxy_request_transform_basic_auth, s_test_http_forwarding_proxy_request_transform_basic_auth); static int s_test_http_forwarding_proxy_request_transform_legacy_basic_auth( struct aws_allocator *allocator, void *ctx) { (void)ctx; struct mocked_proxy_test_options options = { .test_mode = PTTM_HTTP_FORWARD, .failure_type = PTFT_NONE, .auth_type = AWS_HPAT_BASIC, .legacy_basic_username = aws_byte_cursor_from_string(s_mock_request_username), .legacy_basic_password = aws_byte_cursor_from_string(s_mock_request_password), }; ASSERT_SUCCESS( s_do_http_forwarding_proxy_request_transform_test(allocator, &options, s_check_for_basic_auth_header)); return AWS_OP_SUCCESS; } AWS_TEST_CASE( test_http_forwarding_proxy_request_transform_legacy_basic_auth, s_test_http_forwarding_proxy_request_transform_legacy_basic_auth); AWS_STATIC_STRING_FROM_LITERAL(s_mock_kerberos_token_value, "abcdefABCDEF123"); static struct aws_string *s_mock_aws_http_proxy_negotiation_kerberos_get_token_sync_fn( void *user_data, int *out_error_code) { struct aws_allocator *allocator = user_data; *out_error_code = AWS_ERROR_SUCCESS; return aws_string_new_from_string(allocator, s_mock_kerberos_token_value); } AWS_STATIC_STRING_FROM_LITERAL(s_expected_auth_header_name, "Proxy-Authorization"); AWS_STATIC_STRING_FROM_LITERAL(s_expected_kerberos_auth_header_value, "Negotiate abcdefABCDEF123"); static int s_verify_kerberos_connect_request(struct aws_http_message *request) { /* Check for auth header */ struct aws_http_header auth_header; auth_header.name = aws_byte_cursor_from_string(s_expected_auth_header_name); auth_header.value = aws_byte_cursor_from_string(s_expected_kerberos_auth_header_value); ASSERT_TRUE(s_is_header_and_value_in_request(request, &auth_header)); return AWS_OP_SUCCESS; } /* * Verify requests get properly transformed with kerberos strategy */ static int s_test_http_proxy_request_transform_kerberos(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_http_proxy_strategy_tunneling_kerberos_options config = { .get_token = s_mock_aws_http_proxy_negotiation_kerberos_get_token_sync_fn, .get_token_user_data = allocator, }; struct aws_http_proxy_strategy *kerberos_strategy = aws_http_proxy_strategy_new_tunneling_kerberos(allocator, &config); struct mocked_proxy_test_options options = { .test_mode = PTTM_HTTP_TUNNEL, .failure_type = PTFT_NONE, .proxy_strategy = kerberos_strategy, }; ASSERT_SUCCESS(s_setup_proxy_test(allocator, &options)); ASSERT_SUCCESS(proxy_tester_verify_connection_attempt_was_to_proxy( &tester, aws_byte_cursor_from_c_str(s_proxy_host_name), s_proxy_port)); ASSERT_TRUE(tester.client_connection != NULL); ASSERT_TRUE(tester.wait_result == AWS_ERROR_SUCCESS); ASSERT_INT_EQUALS(1, aws_array_list_length(&tester.connect_requests)); struct aws_http_message *connect_request = NULL; aws_array_list_get_at(&tester.connect_requests, &connect_request, 0); ASSERT_SUCCESS(s_verify_kerberos_connect_request(connect_request)); aws_http_proxy_strategy_release(kerberos_strategy); ASSERT_SUCCESS(proxy_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_http_proxy_request_transform_kerberos, s_test_http_proxy_request_transform_kerberos); static struct aws_string *s_mock_aws_http_proxy_negotiation_kerberos_get_token_sync_failure_fn( void *user_data, int *out_error_code) { (void)user_data; *out_error_code = AWS_ERROR_UNKNOWN; return NULL; } static int s_test_http_proxy_kerberos_token_failure(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_http_proxy_strategy_tunneling_kerberos_options config = { .get_token = s_mock_aws_http_proxy_negotiation_kerberos_get_token_sync_failure_fn, .get_token_user_data = NULL, }; struct aws_http_proxy_strategy *kerberos_strategy = aws_http_proxy_strategy_new_tunneling_kerberos(allocator, &config); struct mocked_proxy_test_options options = { .test_mode = PTTM_HTTP_TUNNEL, .failure_type = PTFT_PROXY_STRATEGY, .proxy_strategy = kerberos_strategy, }; ASSERT_SUCCESS(s_setup_proxy_test(allocator, &options)); ASSERT_SUCCESS(proxy_tester_verify_connection_attempt_was_to_proxy( &tester, aws_byte_cursor_from_c_str(s_proxy_host_name), s_proxy_port)); ASSERT_TRUE(tester.client_connection == NULL); ASSERT_TRUE(tester.wait_result == AWS_ERROR_UNKNOWN); aws_http_proxy_strategy_release(kerberos_strategy); ASSERT_SUCCESS(proxy_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_http_proxy_kerberos_token_failure, s_test_http_proxy_kerberos_token_failure); static int s_test_http_proxy_kerberos_connect_failure(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_http_proxy_strategy_tunneling_kerberos_options config = { .get_token = s_mock_aws_http_proxy_negotiation_kerberos_get_token_sync_fn, .get_token_user_data = allocator, }; struct aws_http_proxy_strategy *kerberos_strategy = aws_http_proxy_strategy_new_tunneling_kerberos(allocator, &config); struct mocked_proxy_test_options options = { .test_mode = PTTM_HTTP_TUNNEL, .failure_type = PTFT_CONNECT_REQUEST, .proxy_strategy = kerberos_strategy, }; ASSERT_SUCCESS(s_setup_proxy_test(allocator, &options)); ASSERT_SUCCESS(proxy_tester_verify_connection_attempt_was_to_proxy( &tester, aws_byte_cursor_from_c_str(s_proxy_host_name), s_proxy_port)); ASSERT_TRUE(tester.client_connection == NULL); ASSERT_TRUE(tester.wait_result == AWS_ERROR_HTTP_PROXY_CONNECT_FAILED); aws_http_proxy_strategy_release(kerberos_strategy); ASSERT_SUCCESS(proxy_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_http_proxy_kerberos_connect_failure, s_test_http_proxy_kerberos_connect_failure); AWS_STATIC_STRING_FROM_LITERAL(s_mock_ntlm_token_value, "NTLM_TOKEN"); AWS_STATIC_STRING_FROM_LITERAL(s_mock_ntlm_challenge_token_value, "NTLM_CHALLENGE_TOKEN"); static struct aws_string *s_mock_aws_http_proxy_negotiation_ntlm_get_challenge_token_sync_fn( void *user_data, const struct aws_byte_cursor *challenge_value, int *out_error_code) { (void)challenge_value; struct aws_allocator *allocator = user_data; *out_error_code = AWS_ERROR_SUCCESS; return aws_string_new_from_string(allocator, s_mock_ntlm_challenge_token_value); } static struct aws_string *s_mock_aws_http_proxy_negotiation_ntlm_get_token_sync_fn( void *user_data, int *out_error_code) { struct aws_allocator *allocator = user_data; *out_error_code = AWS_ERROR_SUCCESS; return aws_string_new_from_string(allocator, s_mock_ntlm_token_value); } static int s_verify_identity_connect_request(struct aws_http_message *request) { ASSERT_FALSE(s_is_header_in_request(request, aws_byte_cursor_from_string(s_expected_auth_header_name))); return AWS_OP_SUCCESS; } static struct aws_http_proxy_strategy *s_create_adaptive_strategy(struct aws_allocator *allocator) { struct aws_http_proxy_strategy_tunneling_kerberos_options kerberos_config = { .get_token = s_mock_aws_http_proxy_negotiation_kerberos_get_token_sync_fn, .get_token_user_data = allocator, }; struct aws_http_proxy_strategy_tunneling_ntlm_options ntlm_config = { .get_token = s_mock_aws_http_proxy_negotiation_ntlm_get_token_sync_fn, .get_challenge_token = s_mock_aws_http_proxy_negotiation_ntlm_get_challenge_token_sync_fn, .get_challenge_token_user_data = allocator, }; struct aws_http_proxy_strategy_tunneling_adaptive_options config = { .ntlm_options = &ntlm_config, .kerberos_options = &kerberos_config, }; return aws_http_proxy_strategy_new_tunneling_adaptive(allocator, &config); } static int s_test_http_proxy_adaptive_identity_success(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_http_proxy_strategy *adaptive_strategy = s_create_adaptive_strategy(allocator); struct mocked_proxy_test_options options = { .test_mode = PTTM_HTTP_TUNNEL, .failure_type = PTFT_NONE, .proxy_strategy = adaptive_strategy, }; ASSERT_SUCCESS(s_setup_proxy_test(allocator, &options)); ASSERT_SUCCESS(proxy_tester_verify_connection_attempt_was_to_proxy( &tester, aws_byte_cursor_from_c_str(s_proxy_host_name), s_proxy_port)); ASSERT_TRUE(tester.client_connection != NULL); ASSERT_TRUE(tester.wait_result == AWS_ERROR_SUCCESS); ASSERT_INT_EQUALS(1, aws_array_list_length(&tester.connect_requests)); struct aws_http_message *connect_request = NULL; aws_array_list_get_at(&tester.connect_requests, &connect_request, 0); ASSERT_SUCCESS(s_verify_identity_connect_request(connect_request)); aws_http_proxy_strategy_release(adaptive_strategy); ASSERT_SUCCESS(proxy_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_http_proxy_adaptive_identity_success, s_test_http_proxy_adaptive_identity_success); AWS_STATIC_STRING_FROM_LITERAL(s_unauthorized_response, "HTTP/1.0 407 Unauthorized\r\n\r\n"); AWS_STATIC_STRING_FROM_LITERAL(s_good_response, "HTTP/1.0 200 Connection established\r\nconnection: close\r\n\r\n"); typedef int (*aws_proxy_test_verify_connect_fn)(struct aws_http_message *); static int s_verify_connect_requests(aws_proxy_test_verify_connect_fn verify_functions[], size_t function_count) { size_t connect_requests = aws_array_list_length(&tester.connect_requests); ASSERT_INT_EQUALS(function_count, connect_requests); for (size_t i = 0; i < connect_requests; ++i) { struct aws_http_message *request = NULL; aws_array_list_get_at(&tester.connect_requests, &request, i); ASSERT_SUCCESS(verify_functions[i](request)); } return AWS_OP_SUCCESS; } static int s_test_http_proxy_adaptive_kerberos_success(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_http_proxy_strategy *adaptive_strategy = s_create_adaptive_strategy(allocator); struct aws_byte_cursor first_response = aws_byte_cursor_from_string(s_unauthorized_response); struct aws_byte_cursor second_response = aws_byte_cursor_from_string(s_good_response); struct aws_byte_cursor connect_responses[] = { first_response, second_response, }; aws_proxy_test_verify_connect_fn verifiers[] = { s_verify_identity_connect_request, s_verify_kerberos_connect_request, }; struct mocked_proxy_test_options options = { .test_mode = PTTM_HTTP_TUNNEL, .failure_type = PTFT_NONE, .proxy_strategy = adaptive_strategy, .mocked_response_count = 2, .mocked_responses = connect_responses, }; ASSERT_SUCCESS(s_setup_proxy_test(allocator, &options)); ASSERT_SUCCESS(proxy_tester_wait(&tester, proxy_tester_connection_setup_pred)); ASSERT_TRUE(tester.client_connection != NULL); ASSERT_TRUE(tester.wait_result == AWS_ERROR_SUCCESS); s_verify_connect_requests(verifiers, 2); aws_http_proxy_strategy_release(adaptive_strategy); ASSERT_SUCCESS(proxy_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_http_proxy_adaptive_kerberos_success, s_test_http_proxy_adaptive_kerberos_success); AWS_STATIC_STRING_FROM_LITERAL(s_expected_ntlm_token_auth_header_value, "NTLM NTLM_TOKEN"); static int s_verify_ntlm_connect_token_request(struct aws_http_message *request) { /* Check for auth header */ struct aws_http_header auth_header; auth_header.name = aws_byte_cursor_from_string(s_expected_auth_header_name); auth_header.value = aws_byte_cursor_from_string(s_expected_ntlm_token_auth_header_value); ASSERT_TRUE(s_is_header_and_value_in_request(request, &auth_header)); return AWS_OP_SUCCESS; } AWS_STATIC_STRING_FROM_LITERAL(s_expected_ntlm_challenge_token_auth_header_value, "NTLM NTLM_CHALLENGE_TOKEN"); static int s_verify_ntlm_connect_challenge_token_request(struct aws_http_message *request) { /* Check for auth header */ struct aws_http_header auth_header; auth_header.name = aws_byte_cursor_from_string(s_expected_auth_header_name); auth_header.value = aws_byte_cursor_from_string(s_expected_ntlm_challenge_token_auth_header_value); ASSERT_TRUE(s_is_header_and_value_in_request(request, &auth_header)); return AWS_OP_SUCCESS; } AWS_STATIC_STRING_FROM_LITERAL(s_ntlm_response, "HTTP/1.0 407 Bad\r\nProxy-Authenticate: TestChallenge\r\n\r\n"); static int s_test_http_proxy_adaptive_ntlm_success(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_http_proxy_strategy *adaptive_strategy = s_create_adaptive_strategy(allocator); struct aws_byte_cursor bad_response = aws_byte_cursor_from_string(s_ntlm_response); struct aws_byte_cursor good_response = aws_byte_cursor_from_string(s_good_response); struct aws_byte_cursor connect_responses[] = { bad_response, bad_response, bad_response, good_response, }; aws_proxy_test_verify_connect_fn verifiers[] = { s_verify_identity_connect_request, s_verify_kerberos_connect_request, s_verify_ntlm_connect_token_request, s_verify_ntlm_connect_challenge_token_request, }; struct mocked_proxy_test_options options = { .test_mode = PTTM_HTTP_TUNNEL, .failure_type = PTFT_NONE, .proxy_strategy = adaptive_strategy, .mocked_response_count = 4, .mocked_responses = connect_responses, }; ASSERT_SUCCESS(s_setup_proxy_test(allocator, &options)); ASSERT_SUCCESS(proxy_tester_verify_connect_request(&tester)); ASSERT_SUCCESS(proxy_tester_send_connect_response(&tester)); ASSERT_SUCCESS(proxy_tester_wait(&tester, proxy_tester_connection_setup_pred)); ASSERT_TRUE(tester.client_connection != NULL); ASSERT_TRUE(tester.wait_result == AWS_ERROR_SUCCESS); ASSERT_SUCCESS(s_verify_connect_requests(verifiers, 4)); aws_http_proxy_strategy_release(adaptive_strategy); ASSERT_SUCCESS(proxy_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_http_proxy_adaptive_ntlm_success, s_test_http_proxy_adaptive_ntlm_success); static int s_test_http_proxy_adaptive_failure(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_http_proxy_strategy *adaptive_strategy = s_create_adaptive_strategy(allocator); struct aws_byte_cursor bad_response = aws_byte_cursor_from_string(s_ntlm_response); struct aws_byte_cursor connect_responses[] = { bad_response, bad_response, bad_response, bad_response, }; aws_proxy_test_verify_connect_fn verifiers[] = { s_verify_identity_connect_request, s_verify_kerberos_connect_request, s_verify_ntlm_connect_token_request, s_verify_ntlm_connect_challenge_token_request, }; struct mocked_proxy_test_options options = { .test_mode = PTTM_HTTP_TUNNEL, .failure_type = PTFT_NONE, .proxy_strategy = adaptive_strategy, .mocked_response_count = 4, .mocked_responses = connect_responses, }; ASSERT_SUCCESS(s_setup_proxy_test(allocator, &options)); ASSERT_SUCCESS(proxy_tester_verify_connect_request(&tester)); ASSERT_SUCCESS(proxy_tester_send_connect_response(&tester)); ASSERT_SUCCESS(proxy_tester_wait(&tester, proxy_tester_connection_setup_pred)); ASSERT_TRUE(tester.wait_result == AWS_ERROR_HTTP_PROXY_CONNECT_FAILED); ASSERT_SUCCESS(s_verify_connect_requests(verifiers, 4)); aws_http_proxy_strategy_release(adaptive_strategy); ASSERT_SUCCESS(proxy_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_http_proxy_adaptive_failure, s_test_http_proxy_adaptive_failure); AWS_STATIC_STRING_FROM_LITERAL(s_rewrite_host, "www.uri.com"); AWS_STATIC_STRING_FROM_LITERAL(s_rewrite_path, "/main/index.html?foo=bar"); AWS_STATIC_STRING_FROM_LITERAL(s_expected_rewritten_path, "http://www.uri.com:80/main/index.html?foo=bar"); static void s_proxy_forwarding_request_rewrite_setup_fn( struct aws_http_connection *connection, int error_code, void *user_data) { (void)connection; (void)error_code; (void)user_data; } static void s_proxy_forwarding_request_rewrite_shutdown_fn( struct aws_http_connection *connection, int error_code, void *user_data) { (void)connection; (void)error_code; (void)user_data; } /* * Given some basic request parameters, (method, path, host), builds a simple http request and then applies the proxy * transform to it * * Verifies that the transform's final path matches what was expected */ static int s_do_request_rewrite_test( struct aws_allocator *allocator, const struct aws_string *method, const struct aws_string *path, const struct aws_string *host, const struct aws_string *expected_path) { struct aws_http_proxy_options proxy_options = { .host = aws_byte_cursor_from_c_str(s_proxy_host_name), .port = s_proxy_port, }; struct aws_http_client_connection_options connection_options = { .allocator = allocator, .host_name = aws_byte_cursor_from_string(s_rewrite_host), .port = 80, .proxy_options = &proxy_options, .on_setup = s_proxy_forwarding_request_rewrite_setup_fn, .on_shutdown = s_proxy_forwarding_request_rewrite_shutdown_fn, }; struct aws_http_proxy_user_data *user_data = aws_http_proxy_user_data_new(allocator, &connection_options, NULL, NULL); struct aws_http_message *request = s_build_dummy_http_request( allocator, aws_byte_cursor_from_string(method), aws_byte_cursor_from_string(path), aws_byte_cursor_from_string(host)); ASSERT_SUCCESS(aws_http_rewrite_uri_for_proxy_request(request, user_data)); struct aws_byte_cursor expected_rewritten_path = aws_byte_cursor_from_string(expected_path); struct aws_byte_cursor rewritten_path; ASSERT_SUCCESS(aws_http_message_get_request_path(request, &rewritten_path)); ASSERT_TRUE(aws_byte_cursor_eq(&rewritten_path, &expected_rewritten_path)); aws_http_message_destroy(request); aws_http_proxy_user_data_destroy(user_data); return AWS_OP_SUCCESS; } static int s_test_http_forwarding_proxy_uri_rewrite(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(s_do_request_rewrite_test( allocator, s_mock_request_method, s_rewrite_path, s_rewrite_host, s_expected_rewritten_path)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_http_forwarding_proxy_uri_rewrite, s_test_http_forwarding_proxy_uri_rewrite); AWS_STATIC_STRING_FROM_LITERAL(s_options_request_method, "OPTIONS"); AWS_STATIC_STRING_FROM_LITERAL(s_options_star_path, "*"); AWS_STATIC_STRING_FROM_LITERAL(s_expected_rewritten_options_path, "http://www.uri.com:80"); static int s_test_http_forwarding_proxy_uri_rewrite_options_star(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(s_do_request_rewrite_test( allocator, s_options_request_method, s_options_star_path, s_rewrite_host, s_expected_rewritten_options_path)); return AWS_OP_SUCCESS; } AWS_TEST_CASE( test_http_forwarding_proxy_uri_rewrite_options_star, s_test_http_forwarding_proxy_uri_rewrite_options_star); aws-crt-python-0.20.4+dfsg/crt/aws-c-http/tests/test_random_access_set.c000066400000000000000000000205061456575232400262330ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include static uint64_t s_hash_string_ptr(const void *item) { const struct aws_string *str = *(const struct aws_string **)item; return aws_hash_string((void *)str); } static bool s_hash_string_ptr_eq(const void *a, const void *b) { const struct aws_string *str_a = *(const struct aws_string **)a; const struct aws_string *str_b = *(const struct aws_string **)b; return aws_string_eq(str_a, str_b); } static int s_random_access_set_sanitize_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_random_access_set list_with_map; ASSERT_SUCCESS(aws_random_access_set_init(&list_with_map, allocator, s_hash_string_ptr, aws_ptr_eq, NULL, 0)); aws_random_access_set_clean_up(&list_with_map); return AWS_OP_SUCCESS; } AWS_TEST_CASE(random_access_set_sanitize_test, s_random_access_set_sanitize_fn) static int s_random_access_set_insert_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; AWS_STATIC_STRING_FROM_LITERAL(foo, "foo"); AWS_STATIC_STRING_FROM_LITERAL(bar, "bar"); AWS_STATIC_STRING_FROM_LITERAL(foobar, "foobar"); struct aws_random_access_set list_with_map; /* With only 1 initial element. */ ASSERT_SUCCESS( aws_random_access_set_init(&list_with_map, allocator, s_hash_string_ptr, s_hash_string_ptr_eq, NULL, 1)); bool added = true; ASSERT_SUCCESS(aws_random_access_set_add(&list_with_map, &foobar, &added)); ASSERT_TRUE(added); ASSERT_SUCCESS(aws_random_access_set_add(&list_with_map, &bar, &added)); ASSERT_TRUE(added); ASSERT_SUCCESS(aws_random_access_set_add(&list_with_map, &foo, &added)); ASSERT_TRUE(added); /* You cannot have duplicates */ ASSERT_SUCCESS(aws_random_access_set_add(&list_with_map, &foobar, &added)); ASSERT_FALSE(added); /* Check the size */ ASSERT_UINT_EQUALS(aws_random_access_set_get_size(&list_with_map), 3); aws_random_access_set_clean_up(&list_with_map); return AWS_OP_SUCCESS; } AWS_TEST_CASE(random_access_set_insert_test, s_random_access_set_insert_fn) static int s_random_access_set_get_random_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; AWS_STATIC_STRING_FROM_LITERAL(foo, "foo"); struct aws_random_access_set list_with_map; /* Insert a pointer of pointer of string to the structure */ ASSERT_SUCCESS( aws_random_access_set_init(&list_with_map, allocator, s_hash_string_ptr, s_hash_string_ptr_eq, NULL, 1)); /* Get the pointer of pointer to the string from the struct */ struct aws_string **left_element = NULL; /* Fail to get any, when there is nothing in it. */ ASSERT_FAILS(aws_random_access_set_random_get_ptr(&list_with_map, (void **)&left_element)); bool added = false; ASSERT_SUCCESS(aws_random_access_set_add(&list_with_map, &foo, &added)); ASSERT_TRUE(added); /* Check the size */ ASSERT_UINT_EQUALS(aws_random_access_set_get_size(&list_with_map), 1); ASSERT_SUCCESS(aws_random_access_set_random_get_ptr(&list_with_map, (void **)&left_element)); ASSERT_TRUE(aws_string_eq(*left_element, foo)); aws_random_access_set_clean_up(&list_with_map); return AWS_OP_SUCCESS; } AWS_TEST_CASE(random_access_set_get_random_test, s_random_access_set_get_random_fn) static int s_random_access_set_exist_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; AWS_STATIC_STRING_FROM_LITERAL(foo, "foo"); AWS_STATIC_STRING_FROM_LITERAL(bar, "bar"); struct aws_random_access_set list_with_map; ASSERT_SUCCESS( aws_random_access_set_init(&list_with_map, allocator, s_hash_string_ptr, s_hash_string_ptr_eq, NULL, 1)); bool added = false; ASSERT_SUCCESS(aws_random_access_set_add(&list_with_map, &foo, &added)); ASSERT_TRUE(added); bool exist = false; ASSERT_SUCCESS(aws_random_access_set_exist(&list_with_map, &foo, &exist)); ASSERT_TRUE(exist); ASSERT_SUCCESS(aws_random_access_set_exist(&list_with_map, &bar, &exist)); ASSERT_FALSE(exist); aws_random_access_set_clean_up(&list_with_map); return AWS_OP_SUCCESS; } AWS_TEST_CASE(random_access_set_exist_test, s_random_access_set_exist_fn) static int s_random_access_set_remove_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; (void)ctx; AWS_STATIC_STRING_FROM_LITERAL(foo, "foo"); AWS_STATIC_STRING_FROM_LITERAL(bar, "bar"); AWS_STATIC_STRING_FROM_LITERAL(foobar, "foobar"); struct aws_random_access_set list_with_map; /* With only 1 initial element. */ ASSERT_SUCCESS( aws_random_access_set_init(&list_with_map, allocator, aws_hash_string, aws_hash_callback_string_eq, NULL, 1)); bool added = false; ASSERT_SUCCESS(aws_random_access_set_add(&list_with_map, bar, &added)); ASSERT_TRUE(added); ASSERT_SUCCESS(aws_random_access_set_add(&list_with_map, foobar, &added)); ASSERT_TRUE(added); ASSERT_SUCCESS(aws_random_access_set_add(&list_with_map, foo, &added)); ASSERT_TRUE(added); ASSERT_SUCCESS(aws_random_access_set_remove(&list_with_map, foo)); /* Check the size */ ASSERT_UINT_EQUALS(aws_random_access_set_get_size(&list_with_map), 2); /* Should success and do nothing */ ASSERT_SUCCESS(aws_random_access_set_remove(&list_with_map, foo)); /* Remove all beside foobar, so, if we get one random, it will be foobar */ ASSERT_SUCCESS(aws_random_access_set_remove(&list_with_map, bar)); ASSERT_UINT_EQUALS(aws_random_access_set_get_size(&list_with_map), 1); struct aws_string *left_element = NULL; ASSERT_SUCCESS(aws_random_access_set_random_get_ptr(&list_with_map, (void **)&left_element)); ASSERT_TRUE(aws_string_eq(left_element, foobar)); /* Remove last thing and make sure everything should still work */ ASSERT_SUCCESS(aws_random_access_set_remove(&list_with_map, foobar)); ASSERT_UINT_EQUALS(aws_random_access_set_get_size(&list_with_map), 0); ASSERT_SUCCESS(aws_random_access_set_add(&list_with_map, foo, &added)); ASSERT_TRUE(added); ASSERT_UINT_EQUALS(aws_random_access_set_get_size(&list_with_map), 1); ASSERT_SUCCESS(aws_random_access_set_random_get_ptr(&list_with_map, (void **)&left_element)); ASSERT_TRUE(aws_string_eq(left_element, foo)); aws_random_access_set_clean_up(&list_with_map); return AWS_OP_SUCCESS; } AWS_TEST_CASE(random_access_set_remove_test, s_random_access_set_remove_fn) static void s_aws_string_destroy_callback(void *key) { struct aws_string *str = *(struct aws_string **)key; aws_string_destroy(str); } static int s_random_access_set_owns_element_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; /* If we copy the aws string itself, the underlying data will be copied as a pointer, if the data is * less than the size of a pointer, we will be fine. The long string will test against it. */ struct aws_string *foo = aws_string_new_from_c_str(allocator, "foo123456"); struct aws_string *bar = aws_string_new_from_c_str(allocator, "bar7894156132121"); struct aws_string *foobar = aws_string_new_from_c_str(allocator, "foobar970712389709123"); struct aws_random_access_set list_with_map; /* With only 1 initial element. Add clean up for the string */ ASSERT_SUCCESS(aws_random_access_set_init( &list_with_map, allocator, s_hash_string_ptr, s_hash_string_ptr_eq, s_aws_string_destroy_callback, 1)); bool added = false; ASSERT_SUCCESS(aws_random_access_set_add(&list_with_map, &foobar, &added)); ASSERT_TRUE(added); ASSERT_SUCCESS(aws_random_access_set_add(&list_with_map, &bar, &added)); ASSERT_TRUE(added); ASSERT_SUCCESS(aws_random_access_set_add(&list_with_map, &foo, &added)); ASSERT_TRUE(added); /* You cannot have duplicates */ ASSERT_SUCCESS(aws_random_access_set_add(&list_with_map, &foobar, &added)); ASSERT_FALSE(added); ASSERT_SUCCESS(aws_random_access_set_remove(&list_with_map, &foo)); ASSERT_SUCCESS(aws_random_access_set_remove(&list_with_map, &foobar)); /* Check the size */ ASSERT_UINT_EQUALS(aws_random_access_set_get_size(&list_with_map), 1); aws_random_access_set_clean_up(&list_with_map); return AWS_OP_SUCCESS; } AWS_TEST_CASE(random_access_set_owns_element_test, s_random_access_set_owns_element_fn) aws-crt-python-0.20.4+dfsg/crt/aws-c-http/tests/test_stream_manager.c000066400000000000000000001726271456575232400255600ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "h2_test_helper.h" #include "stream_test_helper.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define TEST_CASE(NAME) \ AWS_TEST_CASE(NAME, s_test_##NAME); \ static int s_test_##NAME(struct aws_allocator *allocator, void *ctx) #define DEFINE_HEADER(NAME, VALUE) \ { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(NAME), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(VALUE), } struct sm_tester_options { struct aws_allocator *alloc; struct aws_http_connection_manager_system_vtable *mock_table; bool no_http2; size_t max_connections; size_t ideal_concurrent_streams_per_connection; size_t max_concurrent_streams_per_connection; const struct aws_http_connection_monitoring_options *monitor_opt; struct aws_byte_cursor *uri_cursor; const enum aws_log_level *log_level; bool prior_knowledge; bool close_connection_on_server_error; size_t connection_ping_period_ms; size_t connection_ping_timeout_ms; }; static struct aws_logger s_logger; struct sm_tester { struct aws_allocator *allocator; struct aws_event_loop_group *event_loop_group; struct aws_host_resolver *host_resolver; struct aws_client_bootstrap *client_bootstrap; struct aws_http2_stream_manager *stream_manager; struct aws_http_connection_manager *connection_manager; struct aws_uri endpoint; struct aws_tls_ctx *tls_ctx; struct aws_tls_ctx_options tls_ctx_options; struct aws_tls_connection_options tls_connection_options; struct aws_http_proxy_options *verify_proxy_options; struct aws_mutex lock; struct aws_condition_variable signal; struct aws_array_list streams; size_t wait_for_stream_acquire_count; size_t acquiring_stream_errors; int error_code; size_t wait_for_stream_completed_count; size_t stream_completed_count; struct aws_atomic_var stream_destroyed_count; size_t stream_complete_errors; size_t stream_200_count; size_t stream_status_not_200_count; int stream_completed_error_code; bool is_shutdown_complete; /* Fake HTTP/2 connection */ size_t wait_for_fake_connection_count; size_t bad_connection_to_offer; size_t offer_bad_connection_count; /* Mock will wait for delay_finished to offer connections synced. Once flip async finished to true, you should offer * the count connections */ size_t delay_offer_connection_count; bool delay_finished; struct aws_array_list fake_connections; bool release_sm_during_connection_acquiring; uint32_t max_con_stream_remote; /* To invoke the real on_setup */ aws_http_on_client_connection_setup_fn *on_setup; size_t length_sent; }; static struct sm_tester s_tester; struct sm_fake_connection { struct testing_channel testing_channel; struct h2_fake_peer peer; struct aws_http_client_connection_options options; struct aws_http_connection *connection; }; static void s_testing_channel_shutdown(int error_code, void *user_data) { struct sm_fake_connection *fake_connection = (struct sm_fake_connection *)user_data; if (!fake_connection->connection) { /* If there is no connection, which means the fake_connection is a bad connection and we should not invoke on * shutdown as setup failed for them */ return; } if (fake_connection->options.on_shutdown) { /* In real world, this is trigger by the bootstrap */ fake_connection->options.on_shutdown( fake_connection->connection, error_code, fake_connection->options.user_data); } } static struct sm_fake_connection *s_get_fake_connection(size_t i) { AWS_FATAL_ASSERT(aws_array_list_length(&s_tester.fake_connections) > i); struct sm_fake_connection *fake_connection = NULL; aws_array_list_get_at(&s_tester.fake_connections, &fake_connection, i); return fake_connection; } static struct sm_fake_connection *s_sm_fake_connection_new(void) { struct sm_fake_connection *fake_connection = aws_mem_calloc(s_tester.allocator, 1, sizeof(struct sm_fake_connection)); struct aws_testing_channel_options options = {.clock_fn = aws_high_res_clock_get_ticks}; AWS_FATAL_ASSERT( testing_channel_init(&fake_connection->testing_channel, s_tester.allocator, &options) == AWS_OP_SUCCESS); fake_connection->testing_channel.channel_shutdown_user_data = fake_connection; fake_connection->testing_channel.channel_shutdown = s_testing_channel_shutdown; struct h2_fake_peer_options peer_options = { .alloc = s_tester.allocator, .testing_channel = &fake_connection->testing_channel, .is_server = true, }; AWS_FATAL_ASSERT(h2_fake_peer_init(&fake_connection->peer, &peer_options) == AWS_OP_SUCCESS); return fake_connection; } static void s_sm_fake_connection_destroy(struct sm_fake_connection *fake_connection) { AWS_FATAL_ASSERT(testing_channel_clean_up(&fake_connection->testing_channel) == AWS_OP_SUCCESS); aws_mem_release(s_tester.allocator, fake_connection); } static bool s_is_shutdown_complete(void *context) { (void)context; return s_tester.is_shutdown_complete; } static int s_wait_on_shutdown_complete(void) { ASSERT_SUCCESS(aws_mutex_lock(&s_tester.lock)); int signal_error = aws_condition_variable_wait_pred(&s_tester.signal, &s_tester.lock, s_is_shutdown_complete, NULL); ASSERT_SUCCESS(aws_mutex_unlock(&s_tester.lock)); return signal_error; } static void s_sm_tester_on_sm_shutdown_complete(void *user_data) { struct sm_tester *tester = user_data; AWS_FATAL_ASSERT(tester == &s_tester); aws_mutex_lock(&s_tester.lock); s_tester.is_shutdown_complete = true; aws_mutex_unlock(&s_tester.lock); aws_condition_variable_notify_one(&s_tester.signal); } static int s_tester_init(struct sm_tester_options *options) { struct aws_allocator *alloc = options->alloc; aws_http_library_init(alloc); s_tester.allocator = alloc; struct aws_logger_standard_options logger_options = { .level = options->log_level ? *options->log_level : AWS_LOG_LEVEL_TRACE, .file = stderr, }; aws_logger_init_standard(&s_logger, alloc, &logger_options); aws_logger_set(&s_logger); ASSERT_SUCCESS(aws_mutex_init(&s_tester.lock)); ASSERT_SUCCESS(aws_condition_variable_init(&s_tester.signal)); s_tester.event_loop_group = aws_event_loop_group_new_default(alloc, 0, NULL); ASSERT_SUCCESS(aws_array_list_init_dynamic(&s_tester.streams, alloc, 1, sizeof(struct aws_http_stream *))); ASSERT_SUCCESS( aws_array_list_init_dynamic(&s_tester.fake_connections, alloc, 3, sizeof(struct sm_fake_connection *))); struct aws_host_resolver_default_options resolver_options = { .el_group = s_tester.event_loop_group, .max_entries = 8, }; s_tester.host_resolver = aws_host_resolver_new_default(s_tester.allocator, &resolver_options); struct aws_client_bootstrap_options bootstrap_options = { .event_loop_group = s_tester.event_loop_group, .host_resolver = s_tester.host_resolver, }; s_tester.client_bootstrap = aws_client_bootstrap_new(s_tester.allocator, &bootstrap_options); ASSERT_NOT_NULL(s_tester.client_bootstrap); struct aws_socket_options socket_options = { .type = AWS_SOCKET_STREAM, .domain = AWS_SOCKET_IPV4, .connect_timeout_ms = (uint32_t)aws_timestamp_convert(10, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_MILLIS, NULL), }; if (options->uri_cursor) { ASSERT_SUCCESS(aws_uri_init_parse(&s_tester.endpoint, alloc, options->uri_cursor)); } else { struct aws_byte_cursor default_host = aws_byte_cursor_from_c_str("https://www.amazon.com"); ASSERT_SUCCESS(aws_uri_init_parse(&s_tester.endpoint, alloc, &default_host)); } bool use_tls = true; uint32_t port = 443; if (!s_tester.endpoint.scheme.len && (s_tester.endpoint.port == 80 || s_tester.endpoint.port == 3280)) { use_tls = false; } else { if (aws_byte_cursor_eq_c_str_ignore_case(&s_tester.endpoint.scheme, "http")) { use_tls = false; } } if (s_tester.endpoint.port) { port = s_tester.endpoint.port; } else if (aws_byte_cursor_eq_c_str_ignore_case(&s_tester.endpoint.scheme, "http")) { port = 80; } if (use_tls) { aws_tls_ctx_options_init_default_client(&s_tester.tls_ctx_options, alloc); if (!options->no_http2) { ASSERT_SUCCESS(aws_tls_ctx_options_set_alpn_list(&s_tester.tls_ctx_options, "h2")); } if (aws_byte_cursor_eq_c_str_ignore_case(&s_tester.endpoint.host_name, "localhost")) { /* Turn off peer verification as a localhost cert used */ s_tester.tls_ctx_options.verify_peer = false; } s_tester.tls_ctx = aws_tls_client_ctx_new(alloc, &s_tester.tls_ctx_options); ASSERT_NOT_NULL(s_tester.tls_ctx); aws_tls_connection_options_init_from_ctx(&s_tester.tls_connection_options, s_tester.tls_ctx); aws_tls_connection_options_set_server_name( &s_tester.tls_connection_options, alloc, &s_tester.endpoint.host_name); } struct aws_http2_stream_manager_options sm_options = { .bootstrap = s_tester.client_bootstrap, .socket_options = &socket_options, .tls_connection_options = use_tls ? &s_tester.tls_connection_options : NULL, .host = s_tester.endpoint.host_name, .port = port, .ideal_concurrent_streams_per_connection = options->ideal_concurrent_streams_per_connection, .max_concurrent_streams_per_connection = options->max_concurrent_streams_per_connection, .max_connections = options->max_connections, .shutdown_complete_user_data = &s_tester, .shutdown_complete_callback = s_sm_tester_on_sm_shutdown_complete, .monitoring_options = options->monitor_opt, .close_connection_on_server_error = options->close_connection_on_server_error, .connection_ping_period_ms = options->connection_ping_period_ms, .connection_ping_timeout_ms = options->connection_ping_timeout_ms, .http2_prior_knowledge = options->prior_knowledge, }; s_tester.stream_manager = aws_http2_stream_manager_new(alloc, &sm_options); s_tester.max_con_stream_remote = 100; aws_atomic_init_int(&s_tester.stream_destroyed_count, 0); return AWS_OP_SUCCESS; } static void s_release_all_streams(void) { AWS_FATAL_ASSERT(aws_mutex_lock(&s_tester.lock) == AWS_OP_SUCCESS); size_t release_count = aws_array_list_length(&s_tester.streams); for (size_t i = 0; i < release_count; ++i) { struct aws_http_stream *stream = NULL; if (aws_array_list_back(&s_tester.streams, &stream)) { continue; } aws_http_stream_release(stream); aws_array_list_pop_back(&s_tester.streams); } AWS_FATAL_ASSERT(aws_mutex_unlock(&s_tester.lock) == AWS_OP_SUCCESS); } static int s_fake_connection_get_stream_received(struct sm_fake_connection *fake_connection) { AWS_FATAL_ASSERT(h2_fake_peer_decode_messages_from_testing_channel(&fake_connection->peer) == AWS_OP_SUCCESS); size_t frames_count = h2_decode_tester_frame_count(&fake_connection->peer.decode); int streams_received = 0; for (size_t i = 0; i < frames_count; ++i) { struct h2_decoded_frame *frame = h2_decode_tester_get_frame(&fake_connection->peer.decode, i); if (frame->end_stream) { ++streams_received; } } return streams_received; } /* complete first num_streams_to_complete. If num_streams_to_complete is zero, complete all the streams. */ static void s_fake_connection_complete_streams( struct sm_fake_connection *fake_connection, int num_streams_to_complete) { if (!fake_connection->connection) { return; } testing_channel_drain_queued_tasks(&fake_connection->testing_channel); AWS_FATAL_ASSERT(h2_fake_peer_decode_messages_from_testing_channel(&fake_connection->peer) == AWS_OP_SUCCESS); struct aws_http_header response_headers_src[] = { DEFINE_HEADER(":status", "404"), DEFINE_HEADER("date", "Wed, 01 Apr 2020 23:02:49 GMT"), }; struct aws_http_headers *response_headers = aws_http_headers_new(s_tester.allocator); aws_http_headers_add_array(response_headers, response_headers_src, AWS_ARRAY_SIZE(response_headers_src)); size_t frames_count = h2_decode_tester_frame_count(&fake_connection->peer.decode); int streams_completed = 0; for (size_t i = 0; i < frames_count; ++i) { struct h2_decoded_frame *frame = h2_decode_tester_get_frame(&fake_connection->peer.decode, i); if (frame->end_stream) { struct aws_h2_frame *response_frame = aws_h2_frame_new_headers( s_tester.allocator, frame->stream_id, response_headers, true /*end_stream*/, 0, NULL); AWS_FATAL_ASSERT(h2_fake_peer_send_frame(&fake_connection->peer, response_frame) == AWS_OP_SUCCESS); if (num_streams_to_complete && ++streams_completed >= num_streams_to_complete) { break; } } } aws_http_headers_release(response_headers); testing_channel_drain_queued_tasks(&fake_connection->testing_channel); } static void s_clean_fake_connections(void) { size_t release_count = aws_array_list_length(&s_tester.fake_connections); for (size_t i = 0; i < release_count; ++i) { struct sm_fake_connection *fake_connection = NULL; if (aws_array_list_back(&s_tester.fake_connections, &fake_connection)) { continue; } aws_array_list_pop_back(&s_tester.fake_connections); s_sm_fake_connection_destroy(fake_connection); } aws_array_list_clean_up(&s_tester.fake_connections); } static void s_drain_all_fake_connection_testing_channel(void) { size_t count = aws_array_list_length(&s_tester.fake_connections); for (size_t i = 0; i < count; ++i) { struct sm_fake_connection *fake_connection = NULL; aws_array_list_get_at(&s_tester.fake_connections, &fake_connection, i); testing_channel_drain_queued_tasks(&fake_connection->testing_channel); } } static void s_release_fake_connections(void) { size_t count = aws_array_list_length(&s_tester.fake_connections); for (size_t i = 0; i < count; ++i) { struct sm_fake_connection *fake_connection = NULL; aws_array_list_get_at(&s_tester.fake_connections, &fake_connection, i); aws_http_connection_release(fake_connection->connection); h2_fake_peer_clean_up(&fake_connection->peer); } s_drain_all_fake_connection_testing_channel(); } static int s_complete_all_fake_connection_streams(void) { size_t count = aws_array_list_length(&s_tester.fake_connections); for (size_t i = 0; i < count; ++i) { struct sm_fake_connection *fake_connection = NULL; ASSERT_SUCCESS(aws_array_list_get_at(&s_tester.fake_connections, &fake_connection, i)); /* complete all the streams from the fake connection */ s_fake_connection_complete_streams(fake_connection, 0 /*all streams*/); testing_channel_drain_queued_tasks(&fake_connection->testing_channel); } return AWS_OP_SUCCESS; } static int s_tester_clean_up(void) { s_release_all_streams(); if (s_tester.stream_manager) { s_release_fake_connections(); aws_http2_stream_manager_release(s_tester.stream_manager); } s_drain_all_fake_connection_testing_channel(); s_wait_on_shutdown_complete(); s_clean_fake_connections(); aws_client_bootstrap_release(s_tester.client_bootstrap); aws_host_resolver_release(s_tester.host_resolver); aws_event_loop_group_release(s_tester.event_loop_group); aws_tls_ctx_options_clean_up(&s_tester.tls_ctx_options); aws_tls_connection_options_clean_up(&s_tester.tls_connection_options); aws_tls_ctx_release(s_tester.tls_ctx); aws_http_library_clean_up(); aws_mutex_clean_up(&s_tester.lock); aws_condition_variable_clean_up(&s_tester.signal); aws_array_list_clean_up(&s_tester.streams); aws_uri_clean_up(&s_tester.endpoint); aws_logger_clean_up(&s_logger); return AWS_OP_SUCCESS; } static void s_sm_tester_on_stream_acquired(struct aws_http_stream *stream, int error_code, void *user_data) { (void)user_data; AWS_FATAL_ASSERT(aws_mutex_lock(&s_tester.lock) == AWS_OP_SUCCESS); if (error_code) { ++s_tester.acquiring_stream_errors; ++s_tester.stream_completed_count; /* As the stream will never be completed through complete callback */ s_tester.error_code = error_code; } else { aws_array_list_push_back(&s_tester.streams, &stream); } aws_condition_variable_notify_one(&s_tester.signal); AWS_FATAL_ASSERT(aws_mutex_unlock(&s_tester.lock) == AWS_OP_SUCCESS); } static bool s_is_stream_acquired_count_at_least(void *context) { (void)context; return s_tester.wait_for_stream_acquire_count <= aws_array_list_length(&s_tester.streams) + s_tester.acquiring_stream_errors; } static int s_wait_on_streams_acquired_count(size_t count) { ASSERT_SUCCESS(aws_mutex_lock(&s_tester.lock)); s_tester.wait_for_stream_acquire_count = count; int signal_error = aws_condition_variable_wait_pred(&s_tester.signal, &s_tester.lock, s_is_stream_acquired_count_at_least, NULL); ASSERT_SUCCESS(aws_mutex_unlock(&s_tester.lock)); return signal_error; } static bool s_is_stream_completed_count_at_least(void *context) { (void)context; return s_tester.wait_for_stream_completed_count <= s_tester.stream_completed_count; } static int s_wait_on_streams_completed_count(size_t count) { ASSERT_SUCCESS(aws_mutex_lock(&s_tester.lock)); s_tester.wait_for_stream_completed_count = count; int signal_error = aws_condition_variable_wait_pred(&s_tester.signal, &s_tester.lock, s_is_stream_completed_count_at_least, NULL); ASSERT_SUCCESS(aws_mutex_unlock(&s_tester.lock)); return signal_error; } static void s_sm_tester_on_stream_complete(struct aws_http_stream *stream, int error_code, void *user_data) { (void)user_data; (void)stream; AWS_FATAL_ASSERT(aws_mutex_lock(&s_tester.lock) == AWS_OP_SUCCESS); if (error_code) { ++s_tester.stream_complete_errors; s_tester.stream_completed_error_code = error_code; } else { int status = 0; if (aws_http_stream_get_incoming_response_status(stream, &status)) { ++s_tester.stream_complete_errors; s_tester.stream_completed_error_code = aws_last_error(); } else { if (status == 200) { ++s_tester.stream_200_count; } else { ++s_tester.stream_status_not_200_count; } } } ++s_tester.stream_completed_count; aws_condition_variable_notify_one(&s_tester.signal); AWS_FATAL_ASSERT(aws_mutex_unlock(&s_tester.lock) == AWS_OP_SUCCESS); } static void s_sm_tester_on_stream_destroy(void *user_data) { (void)user_data; aws_atomic_fetch_add(&s_tester.stream_destroyed_count, 1); } static int s_sm_stream_acquiring_customize_request( int num_streams, struct aws_http_make_request_options *request_options) { struct aws_http2_stream_manager_acquire_stream_options acquire_stream_option = { .options = request_options, .callback = s_sm_tester_on_stream_acquired, .user_data = &s_tester, }; for (int i = 0; i < num_streams; ++i) { /* TODO: Test the callback will always be fired asynced, as now the CM cannot ensure the callback happens * asynchronously, we cannot ensure it as well. */ aws_http2_stream_manager_acquire_stream(s_tester.stream_manager, &acquire_stream_option); } return AWS_OP_SUCCESS; } static struct aws_byte_cursor s_default_empty_path = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/"); struct aws_byte_cursor s_normalize_path(struct aws_byte_cursor path) { return path.len == 0 ? s_default_empty_path : path; } static int s_sm_stream_acquiring(int num_streams) { struct aws_http_message *request = aws_http2_message_new_request(s_tester.allocator); ASSERT_NOT_NULL(request); struct aws_http_header request_headers_src[] = { DEFINE_HEADER(":method", "GET"), { .name = aws_byte_cursor_from_c_str(":scheme"), .value = *aws_uri_scheme(&s_tester.endpoint), }, { .name = aws_byte_cursor_from_c_str(":path"), .value = s_normalize_path(*aws_uri_path(&s_tester.endpoint)), }, { .name = aws_byte_cursor_from_c_str(":authority"), .value = *aws_uri_host_name(&s_tester.endpoint), }, }; aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); struct aws_http_make_request_options request_options = { .self_size = sizeof(request_options), .request = request, .user_data = &s_tester, .on_complete = s_sm_tester_on_stream_complete, .on_destroy = s_sm_tester_on_stream_destroy, }; int return_code = s_sm_stream_acquiring_customize_request(num_streams, &request_options); aws_http_message_release(request); return return_code; } /* Test the common setup/teardown used by all tests in this file */ TEST_CASE(h2_sm_sanity_check) { (void)ctx; struct sm_tester_options options = { .max_connections = 5, .alloc = allocator, }; ASSERT_SUCCESS(s_tester_init(&options)); return s_tester_clean_up(); } static bool s_is_fake_connection_count(void *context) { (void)context; return s_tester.wait_for_fake_connection_count <= aws_array_list_length(&s_tester.fake_connections); } static int s_wait_on_fake_connection_count(size_t count) { ASSERT_SUCCESS(aws_mutex_lock(&s_tester.lock)); s_tester.wait_for_fake_connection_count = count; int signal_error = aws_condition_variable_wait_pred(&s_tester.signal, &s_tester.lock, s_is_fake_connection_count, NULL); ASSERT_SUCCESS(aws_mutex_unlock(&s_tester.lock)); return signal_error; } static struct sm_fake_connection *s_sm_tester_fake_connection_new_from_options( const struct aws_http_client_connection_options *options) { struct sm_fake_connection *fake_connection = s_sm_fake_connection_new(); fake_connection->options = *options; AWS_FATAL_ASSERT(aws_array_list_push_back(&s_tester.fake_connections, &fake_connection) == AWS_OP_SUCCESS); if (s_tester.offer_bad_connection_count < s_tester.bad_connection_to_offer) { /* Offer a bad connection */ s_tester.offer_bad_connection_count++; return fake_connection; } struct aws_http_connection *connection = aws_http_connection_new_http2_client( options->allocator, options->manual_window_management /* manual window management */, options->http2_options); AWS_FATAL_ASSERT(connection); aws_http_connection_acquire(connection); { /* set connection user_data (handled by http-bootstrap in real world) */ connection->user_data = options->user_data; /* re-enact marriage vows of http-connection and channel (handled by http-bootstrap in real world) */ struct aws_channel_slot *slot = aws_channel_slot_new(fake_connection->testing_channel.channel); AWS_FATAL_ASSERT(slot); AWS_FATAL_ASSERT(aws_channel_slot_insert_end(fake_connection->testing_channel.channel, slot) == AWS_OP_SUCCESS); AWS_FATAL_ASSERT(aws_channel_slot_set_handler(slot, &connection->channel_handler) == AWS_OP_SUCCESS); connection->vtable->on_channel_handler_installed(&connection->channel_handler, slot); } fake_connection->connection = connection; return fake_connection; } static int s_sm_tester_finish_up_fake_connection_set_up(struct sm_fake_connection *fake_connection) { if (!fake_connection->connection) { fake_connection->options.on_setup(NULL, aws_last_error(), fake_connection->options.user_data); return AWS_OP_SUCCESS; } /* Invoke callback outside lock */ fake_connection->options.on_setup( fake_connection->connection, AWS_ERROR_SUCCESS, fake_connection->options.user_data); testing_channel_drain_queued_tasks(&fake_connection->testing_channel); ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&fake_connection->peer)); struct aws_http2_setting settings_array[] = { { .id = AWS_HTTP2_SETTINGS_MAX_CONCURRENT_STREAMS, .value = s_tester.max_con_stream_remote, }, }; struct aws_h2_frame *settings_frame = aws_h2_frame_new_settings(s_tester.allocator, settings_array, AWS_ARRAY_SIZE(settings_array), false /*ack*/); ASSERT_NOT_NULL(settings_frame); ASSERT_SUCCESS(h2_fake_peer_send_frame(&fake_connection->peer, settings_frame)); struct aws_h2_frame *settings_ack = aws_h2_frame_new_settings(s_tester.allocator, NULL, 0, true /*ack*/); ASSERT_NOT_NULL(settings_ack); ASSERT_SUCCESS(h2_fake_peer_send_frame(&fake_connection->peer, settings_ack)); return AWS_OP_SUCCESS; } static int s_aws_http_connection_manager_create_connection_sync_mock( const struct aws_http_client_connection_options *options) { AWS_FATAL_ASSERT(aws_mutex_lock(&s_tester.lock) == AWS_OP_SUCCESS); struct sm_fake_connection *fake_connection = s_sm_tester_fake_connection_new_from_options(options); aws_condition_variable_notify_one(&s_tester.signal); AWS_FATAL_ASSERT(aws_mutex_unlock(&s_tester.lock) == AWS_OP_SUCCESS); ASSERT_SUCCESS(s_sm_tester_finish_up_fake_connection_set_up(fake_connection)); return AWS_OP_SUCCESS; } static int s_aws_http_connection_manager_create_connection_delay_mock( const struct aws_http_client_connection_options *options) { if (s_tester.delay_finished) { return s_aws_http_connection_manager_create_connection_sync_mock(options); } AWS_FATAL_ASSERT(aws_mutex_lock(&s_tester.lock) == AWS_OP_SUCCESS); ++s_tester.delay_offer_connection_count; struct sm_fake_connection *fake_connection = s_sm_tester_fake_connection_new_from_options(options); AWS_FATAL_ASSERT(aws_mutex_unlock(&s_tester.lock) == AWS_OP_SUCCESS); /* don't do anything as it's delivered delay */ (void)fake_connection; return AWS_OP_SUCCESS; } static int s_sm_tester_offer_waiting_connections(void) { for (size_t i = 0; i < s_tester.delay_offer_connection_count; i++) { struct sm_fake_connection *fake_connection = s_get_fake_connection(i); ASSERT_SUCCESS(s_sm_tester_finish_up_fake_connection_set_up(fake_connection)); } s_tester.delay_finished = true; /* We are not haveing any threads. so, not invoking anything */ return AWS_OP_SUCCESS; } static struct aws_http_connection_manager_system_vtable s_mocks; static void s_override_cm_connect_function(int (*fn)(const struct aws_http_client_connection_options *options)) { s_mocks = *g_aws_http_connection_manager_default_system_vtable_ptr; s_mocks.aws_http_client_connect = fn; s_tester.connection_manager = s_tester.stream_manager->connection_manager; aws_http_connection_manager_set_system_vtable(s_tester.connection_manager, &s_mocks); } TEST_CASE(h2_sm_mock_connection) { (void)ctx; struct sm_tester_options options = { .max_connections = 5, .alloc = allocator, }; ASSERT_SUCCESS(s_tester_init(&options)); s_override_cm_connect_function(s_aws_http_connection_manager_create_connection_sync_mock); int num_to_acquire = 5; ASSERT_SUCCESS(s_sm_stream_acquiring(num_to_acquire)); /* waiting for one fake connection made */ ASSERT_SUCCESS(s_wait_on_fake_connection_count(1)); s_drain_all_fake_connection_testing_channel(); ASSERT_SUCCESS(s_wait_on_streams_acquired_count(num_to_acquire)); ASSERT_SUCCESS(s_complete_all_fake_connection_streams()); size_t destroyed = aws_atomic_load_int(&s_tester.stream_destroyed_count); ASSERT_INT_EQUALS(0, destroyed); s_release_all_streams(); destroyed = aws_atomic_load_int(&s_tester.stream_destroyed_count); ASSERT_INT_EQUALS(num_to_acquire, destroyed); return s_tester_clean_up(); } TEST_CASE(h2_sm_mock_multiple_connections) { (void)ctx; size_t max_concurrent_streams_per_connection = 3; int num_streams_to_acquire = 9; int num_expected_connection = num_streams_to_acquire / (int)max_concurrent_streams_per_connection; if (num_streams_to_acquire % max_concurrent_streams_per_connection) { ++num_expected_connection; } struct sm_tester_options options = { .max_connections = 5, .max_concurrent_streams_per_connection = max_concurrent_streams_per_connection, .alloc = allocator, }; ASSERT_SUCCESS(s_tester_init(&options)); s_override_cm_connect_function(s_aws_http_connection_manager_create_connection_sync_mock); ASSERT_SUCCESS(s_sm_stream_acquiring(num_streams_to_acquire)); /* waiting for one fake connection made */ ASSERT_SUCCESS(s_wait_on_fake_connection_count(num_expected_connection)); s_drain_all_fake_connection_testing_channel(); ASSERT_SUCCESS(s_wait_on_streams_acquired_count(num_streams_to_acquire)); ASSERT_TRUE(aws_array_list_length(&s_tester.fake_connections) == (size_t)num_expected_connection); ASSERT_SUCCESS(s_complete_all_fake_connection_streams()); return s_tester_clean_up(); } /* Test stream manager got an bad connection and fail the expected number of stream requests. */ TEST_CASE(h2_sm_mock_bad_connection_acquired) { (void)ctx; struct sm_tester_options options = { .max_connections = 5, .ideal_concurrent_streams_per_connection = 2, .max_concurrent_streams_per_connection = 5, .alloc = allocator, }; ASSERT_SUCCESS(s_tester_init(&options)); s_tester.bad_connection_to_offer = 2; size_t good_connections_num = options.max_connections - s_tester.bad_connection_to_offer; size_t streams_acquiring_num = 15; s_override_cm_connect_function(s_aws_http_connection_manager_create_connection_delay_mock); ASSERT_SUCCESS(s_sm_stream_acquiring((int)streams_acquiring_num)); /* The count should be max connection now */ ASSERT_UINT_EQUALS(s_tester.delay_offer_connection_count, options.max_connections); /* Offer the connections waiting */ ASSERT_SUCCESS(s_sm_tester_offer_waiting_connections()); /* waiting for 3 fake connection made as the first two connection will fail */ ASSERT_SUCCESS(s_wait_on_fake_connection_count(good_connections_num)); s_drain_all_fake_connection_testing_channel(); ASSERT_SUCCESS(s_wait_on_streams_acquired_count(streams_acquiring_num)); /* We fail the number of streams cannot fit into the health connections based on the ideal. */ ASSERT_INT_EQUALS( streams_acquiring_num - options.ideal_concurrent_streams_per_connection * good_connections_num, s_tester.acquiring_stream_errors); ASSERT_INT_EQUALS(AWS_ERROR_HTTP_STREAM_MANAGER_CONNECTION_ACQUIRE_FAILURE, s_tester.error_code); ASSERT_TRUE(aws_array_list_length(&s_tester.streams) == 6); /* Acquire more streams, which should succeed as we don't close the connection */ ASSERT_SUCCESS(s_sm_stream_acquiring(4)); /* waiting for the new connection */ ASSERT_SUCCESS(s_wait_on_fake_connection_count(options.max_connections + 2)); s_drain_all_fake_connection_testing_channel(); ASSERT_SUCCESS(s_wait_on_streams_acquired_count(streams_acquiring_num + 4)); /* all the new streams succeed */ ASSERT_TRUE(aws_array_list_length(&s_tester.streams) == 10); ASSERT_SUCCESS(s_complete_all_fake_connection_streams()); return s_tester_clean_up(); } /* Test a connection offered, and before the stream was made, the connection dies. The stream should fail */ TEST_CASE(h2_sm_mock_connections_closed_before_request_made) { (void)ctx; struct sm_tester_options options = { .max_connections = 1, .max_concurrent_streams_per_connection = 3, .alloc = allocator, }; ASSERT_SUCCESS(s_tester_init(&options)); s_override_cm_connect_function(s_aws_http_connection_manager_create_connection_sync_mock); ASSERT_SUCCESS(s_sm_stream_acquiring(2)); /* waiting for one fake connection made */ ASSERT_SUCCESS(s_wait_on_fake_connection_count(1)); s_drain_all_fake_connection_testing_channel(); ASSERT_SUCCESS(s_wait_on_streams_acquired_count(2)); /* No error happens */ ASSERT_INT_EQUALS(0, s_tester.acquiring_stream_errors); /* Now, we close the connection, the stream manager will fail the new stream, if the opening streams not completed. */ struct sm_fake_connection *fake_connection = s_get_fake_connection(0); aws_http_connection_close(fake_connection->connection); ASSERT_SUCCESS(s_sm_stream_acquiring(1)); s_drain_all_fake_connection_testing_channel(); ASSERT_SUCCESS(s_wait_on_streams_acquired_count(3)); /* ASSERT new one failed. */ ASSERT_INT_EQUALS(1, s_tester.acquiring_stream_errors); ASSERT_INT_EQUALS(AWS_ERROR_HTTP_CONNECTION_CLOSED, s_tester.error_code); /* Reset errors */ s_tester.acquiring_stream_errors = 0; s_tester.error_code = 0; s_drain_all_fake_connection_testing_channel(); /* As long as the connection finishes shutting down, we can still make more requests from new connection. */ ASSERT_SUCCESS(s_sm_stream_acquiring(2)); /* waiting for one fake connection made */ ASSERT_SUCCESS(s_wait_on_fake_connection_count(2)); s_drain_all_fake_connection_testing_channel(); /* No error happens */ ASSERT_INT_EQUALS(0, s_tester.acquiring_stream_errors); /* We made 4 streams successfully */ ASSERT_INT_EQUALS(4, aws_array_list_length(&s_tester.streams)); /* Finish all the opening streams */ ASSERT_SUCCESS(s_complete_all_fake_connection_streams()); return s_tester_clean_up(); } /* Test that the remote max concurrent streams setting hit */ TEST_CASE(h2_sm_mock_max_concurrent_streams_remote) { (void)ctx; struct sm_tester_options options = { .max_connections = 5, .alloc = allocator, }; ASSERT_SUCCESS(s_tester_init(&options)); s_override_cm_connect_function(s_aws_http_connection_manager_create_connection_sync_mock); /* Set the remote max to be 2 */ s_tester.max_con_stream_remote = 2; /* Acquire a stream to trigger */ ASSERT_SUCCESS(s_sm_stream_acquiring(1)); /* waiting for one fake connection made */ ASSERT_SUCCESS(s_wait_on_fake_connection_count(1)); s_drain_all_fake_connection_testing_channel(); ASSERT_SUCCESS(s_wait_on_streams_acquired_count(1)); ASSERT_INT_EQUALS(0, s_tester.acquiring_stream_errors); ASSERT_INT_EQUALS(0, s_tester.stream_complete_errors); /* Fake peer send settings that only allow 2 concurrent streams */ /* Acquire tow more streams */ ASSERT_SUCCESS(s_sm_stream_acquiring(2)); /* We created a new connection */ ASSERT_SUCCESS(s_wait_on_fake_connection_count(2)); s_drain_all_fake_connection_testing_channel(); ASSERT_SUCCESS(s_wait_on_streams_acquired_count(1 + 2)); ASSERT_INT_EQUALS(0, s_tester.acquiring_stream_errors); ASSERT_INT_EQUALS(2, aws_array_list_length(&s_tester.fake_connections)); ASSERT_SUCCESS(s_complete_all_fake_connection_streams()); return s_tester_clean_up(); } /* Test that the remote max concurrent streams setting hit */ TEST_CASE(h2_sm_mock_fetch_metric) { (void)ctx; struct sm_tester_options options = { .max_connections = 5, .alloc = allocator, }; ASSERT_SUCCESS(s_tester_init(&options)); s_override_cm_connect_function(s_aws_http_connection_manager_create_connection_sync_mock); /* Set the remote max to be 2 */ s_tester.max_con_stream_remote = 2; /* Acquire a stream to trigger */ ASSERT_SUCCESS(s_sm_stream_acquiring(1)); /* waiting for one fake connection made */ ASSERT_SUCCESS(s_wait_on_fake_connection_count(1)); s_drain_all_fake_connection_testing_channel(); ASSERT_SUCCESS(s_wait_on_streams_acquired_count(1)); struct aws_http_manager_metrics out_metrics; AWS_ZERO_STRUCT(out_metrics); aws_http2_stream_manager_fetch_metrics(s_tester.stream_manager, &out_metrics); /* Acquired 1 stream, and we hold one connection, the max streams per connection is 2. */ ASSERT_UINT_EQUALS(out_metrics.available_concurrency, 1); ASSERT_UINT_EQUALS(out_metrics.pending_concurrency_acquires, 0); ASSERT_UINT_EQUALS(out_metrics.leased_concurrency, 1); ASSERT_SUCCESS(s_sm_stream_acquiring(1)); ASSERT_SUCCESS(s_wait_on_fake_connection_count(1)); s_drain_all_fake_connection_testing_channel(); ASSERT_SUCCESS(s_wait_on_streams_acquired_count(2)); aws_http2_stream_manager_fetch_metrics(s_tester.stream_manager, &out_metrics); ASSERT_UINT_EQUALS(out_metrics.available_concurrency, 0); ASSERT_UINT_EQUALS(out_metrics.pending_concurrency_acquires, 0); ASSERT_UINT_EQUALS(out_metrics.leased_concurrency, 2); ASSERT_SUCCESS(s_sm_stream_acquiring(10)); ASSERT_SUCCESS(s_wait_on_fake_connection_count(5)); s_drain_all_fake_connection_testing_channel(); ASSERT_SUCCESS(s_wait_on_streams_acquired_count(10)); aws_http2_stream_manager_fetch_metrics(s_tester.stream_manager, &out_metrics); ASSERT_UINT_EQUALS(out_metrics.available_concurrency, 0); ASSERT_UINT_EQUALS(out_metrics.pending_concurrency_acquires, 2); ASSERT_UINT_EQUALS(out_metrics.leased_concurrency, 10); ASSERT_SUCCESS(s_complete_all_fake_connection_streams()); /* Still have two more streams that have not been completed */ s_drain_all_fake_connection_testing_channel(); ASSERT_SUCCESS(s_complete_all_fake_connection_streams()); return s_tester_clean_up(); } /* Test that the stream completed will free the connection for more streams */ TEST_CASE(h2_sm_mock_complete_stream) { (void)ctx; struct sm_tester_options options = { .max_connections = 5, .ideal_concurrent_streams_per_connection = 2, .max_concurrent_streams_per_connection = 2, .alloc = allocator, }; ASSERT_SUCCESS(s_tester_init(&options)); s_override_cm_connect_function(s_aws_http_connection_manager_create_connection_sync_mock); ASSERT_SUCCESS(s_sm_stream_acquiring(2)); /* waiting for one fake connection made */ ASSERT_SUCCESS(s_wait_on_fake_connection_count(1)); s_drain_all_fake_connection_testing_channel(); ASSERT_SUCCESS(s_wait_on_streams_acquired_count(2)); ASSERT_INT_EQUALS(1, aws_array_list_length(&s_tester.fake_connections)); /* Fake peer send settings that only allow 2 concurrent streams */ struct sm_fake_connection *fake_connection = s_get_fake_connection(0); ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&fake_connection->peer)); s_fake_connection_complete_streams(fake_connection, 1); /* Acquire a new streams */ ASSERT_SUCCESS(s_sm_stream_acquiring(1)); s_drain_all_fake_connection_testing_channel(); ASSERT_SUCCESS(s_wait_on_streams_acquired_count(2 + 1)); ASSERT_INT_EQUALS(0, s_tester.acquiring_stream_errors); /* No error happens */ ASSERT_INT_EQUALS(0, s_tester.stream_complete_errors); /* We have no extra connection made. */ ASSERT_INT_EQUALS(1, aws_array_list_length(&s_tester.fake_connections)); ASSERT_SUCCESS(s_complete_all_fake_connection_streams()); return s_tester_clean_up(); } /* Test the soft limit from user works as we want */ TEST_CASE(h2_sm_mock_ideal_num_streams) { (void)ctx; struct sm_tester_options options = { .max_connections = 5, .ideal_concurrent_streams_per_connection = 3, .max_concurrent_streams_per_connection = 5, .alloc = allocator, }; ASSERT_SUCCESS(s_tester_init(&options)); s_override_cm_connect_function(s_aws_http_connection_manager_create_connection_sync_mock); ASSERT_SUCCESS(s_sm_stream_acquiring(15)); /* We will create 5 connections instead of 3 */ ASSERT_SUCCESS(s_wait_on_fake_connection_count(5)); s_drain_all_fake_connection_testing_channel(); ASSERT_SUCCESS(s_wait_on_streams_acquired_count(15)); ASSERT_INT_EQUALS(5, aws_array_list_length(&s_tester.fake_connections)); s_drain_all_fake_connection_testing_channel(); /* Check all the 5 fake connections received 3 streams each */ for (size_t i = 0; i < aws_array_list_length(&s_tester.fake_connections); ++i) { struct sm_fake_connection *fake_connection = s_get_fake_connection(i); ASSERT_INT_EQUALS( s_fake_connection_get_stream_received(fake_connection), options.ideal_concurrent_streams_per_connection); } /* Acquire 15 more, we can only have 25 (5*5) in total */ ASSERT_SUCCESS(s_sm_stream_acquiring(15)); s_drain_all_fake_connection_testing_channel(); ASSERT_SUCCESS(s_wait_on_streams_acquired_count(10)); s_drain_all_fake_connection_testing_channel(); /* Check all the 5 fake connections received 5 streams each */ for (size_t i = 0; i < aws_array_list_length(&s_tester.fake_connections); ++i) { struct sm_fake_connection *fake_connection = s_get_fake_connection(i); ASSERT_INT_EQUALS( s_fake_connection_get_stream_received(fake_connection), options.max_concurrent_streams_per_connection); } ASSERT_SUCCESS(s_complete_all_fake_connection_streams()); s_drain_all_fake_connection_testing_channel(); /* completed the remain streams */ ASSERT_SUCCESS(s_complete_all_fake_connection_streams()); return s_tester_clean_up(); } TEST_CASE(h2_sm_mock_large_ideal_num_streams) { (void)ctx; struct sm_tester_options options = { .max_connections = 5, .ideal_concurrent_streams_per_connection = 3, .max_concurrent_streams_per_connection = 5, .alloc = allocator, }; ASSERT_SUCCESS(s_tester_init(&options)); /* Set the remote max to be 2 */ s_tester.max_con_stream_remote = 2; s_override_cm_connect_function(s_aws_http_connection_manager_create_connection_sync_mock); ASSERT_SUCCESS(s_sm_stream_acquiring(6)); /* We will create 3 connections instead of 2 */ ASSERT_SUCCESS(s_wait_on_fake_connection_count(3)); s_drain_all_fake_connection_testing_channel(); ASSERT_SUCCESS(s_wait_on_streams_acquired_count(6)); ASSERT_INT_EQUALS(3, aws_array_list_length(&s_tester.fake_connections)); s_drain_all_fake_connection_testing_channel(); for (size_t i = 0; i < aws_array_list_length(&s_tester.fake_connections); ++i) { struct sm_fake_connection *fake_connection = s_get_fake_connection(i); ASSERT_INT_EQUALS(s_fake_connection_get_stream_received(fake_connection), s_tester.max_con_stream_remote); } /* Acquire 15 more, we can only have 10 (2*5) in total. 21 acquisitions made */ ASSERT_SUCCESS(s_sm_stream_acquiring(15)); s_drain_all_fake_connection_testing_channel(); ASSERT_SUCCESS(s_wait_on_streams_acquired_count(10 - 6)); s_drain_all_fake_connection_testing_channel(); for (size_t i = 0; i < aws_array_list_length(&s_tester.fake_connections); ++i) { struct sm_fake_connection *fake_connection = s_get_fake_connection(i); ASSERT_INT_EQUALS(s_fake_connection_get_stream_received(fake_connection), s_tester.max_con_stream_remote); } ASSERT_UINT_EQUALS(10, aws_array_list_length(&s_tester.streams)); ASSERT_SUCCESS(s_complete_all_fake_connection_streams()); s_drain_all_fake_connection_testing_channel(); /* Completed 10 streams, 10 more streams created */ ASSERT_UINT_EQUALS(20, aws_array_list_length(&s_tester.streams)); /* Completed remain 10 streams */ ASSERT_SUCCESS(s_complete_all_fake_connection_streams()); s_drain_all_fake_connection_testing_channel(); /* Should have 1 more streams made now, which will have all 21 made */ ASSERT_UINT_EQUALS(21, aws_array_list_length(&s_tester.streams)); /* Completed all of them again, and we are good */ ASSERT_SUCCESS(s_complete_all_fake_connection_streams()); s_drain_all_fake_connection_testing_channel(); return s_tester_clean_up(); } /* Test that goaway received from peer, new connection will be made */ TEST_CASE(h2_sm_mock_goaway) { (void)ctx; struct sm_tester_options options = { .max_connections = 5, .alloc = allocator, }; ASSERT_SUCCESS(s_tester_init(&options)); s_override_cm_connect_function(s_aws_http_connection_manager_create_connection_sync_mock); ASSERT_SUCCESS(s_sm_stream_acquiring(5)); /* waiting for one fake connection made */ ASSERT_SUCCESS(s_wait_on_fake_connection_count(1)); s_drain_all_fake_connection_testing_channel(); ASSERT_SUCCESS(s_wait_on_streams_acquired_count(5)); ASSERT_INT_EQUALS(1, aws_array_list_length(&s_tester.fake_connections)); ASSERT_INT_EQUALS(0, s_tester.acquiring_stream_errors); /* Fake peer send goaway */ struct sm_fake_connection *fake_connection = s_get_fake_connection(0); ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&fake_connection->peer)); struct aws_byte_cursor debug_info; AWS_ZERO_STRUCT(debug_info); struct aws_http_stream *stream = NULL; aws_array_list_front(&s_tester.streams, &stream); struct aws_h2_frame *peer_frame = aws_h2_frame_new_goaway(allocator, aws_http_stream_get_id(stream), AWS_HTTP2_ERR_NO_ERROR, debug_info); ASSERT_SUCCESS(h2_fake_peer_send_frame(&fake_connection->peer, peer_frame)); testing_channel_drain_queued_tasks(&fake_connection->testing_channel); /* Should be the streams with id larger than the first stream all completed with error */ ASSERT_INT_EQUALS(4, s_tester.stream_complete_errors); ASSERT_INT_EQUALS(AWS_ERROR_HTTP_GOAWAY_RECEIVED, s_tester.stream_completed_error_code); /* When we create new streams, stream manager should create a new connection to use */ ASSERT_SUCCESS(s_sm_stream_acquiring(5)); /* waiting for one fake connection made */ ASSERT_SUCCESS(s_wait_on_fake_connection_count(2)); s_drain_all_fake_connection_testing_channel(); ASSERT_SUCCESS(s_wait_on_streams_acquired_count(5 + 5)); ASSERT_INT_EQUALS(0, s_tester.acquiring_stream_errors); /* No more stream completed with error */ ASSERT_INT_EQUALS(4, s_tester.stream_complete_errors); /* Two connection made */ ASSERT_INT_EQUALS(2, aws_array_list_length(&s_tester.fake_connections)); fake_connection = s_get_fake_connection(1); ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&fake_connection->peer)); ASSERT_SUCCESS(s_complete_all_fake_connection_streams()); return s_tester_clean_up(); } /* Test that PING works as expected. */ TEST_CASE(h2_sm_connection_ping) { (void)ctx; size_t connection_ping_timeout_ms = AWS_TIMESTAMP_MILLIS; /* 1 sec */ struct sm_tester_options options = { .max_connections = 3, .alloc = allocator, .max_concurrent_streams_per_connection = 2, .connection_ping_period_ms = 2 * AWS_TIMESTAMP_MILLIS, .connection_ping_timeout_ms = connection_ping_timeout_ms, }; ASSERT_SUCCESS(s_tester_init(&options)); s_override_cm_connect_function(s_aws_http_connection_manager_create_connection_sync_mock); ASSERT_SUCCESS(s_sm_stream_acquiring(6)); /* waiting for one fake connection made */ ASSERT_SUCCESS(s_wait_on_fake_connection_count(3)); s_drain_all_fake_connection_testing_channel(); ASSERT_SUCCESS(s_wait_on_streams_acquired_count(6)); ASSERT_INT_EQUALS(0, s_tester.acquiring_stream_errors); aws_thread_current_sleep(2 * AWS_TIMESTAMP_NANOS); /* Sleep 2 sec */ /* Check PING received for all the connections */ struct sm_fake_connection *fake_connection_1 = s_get_fake_connection(0); struct sm_fake_connection *fake_connection_2 = s_get_fake_connection(1); struct sm_fake_connection *fake_connection_3 = s_get_fake_connection(2); testing_channel_drain_queued_tasks(&fake_connection_1->testing_channel); testing_channel_drain_queued_tasks(&fake_connection_2->testing_channel); testing_channel_drain_queued_tasks(&fake_connection_3->testing_channel); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&fake_connection_1->peer)); struct h2_decoded_frame *ping_frame = h2_decode_tester_find_frame(&fake_connection_1->peer.decode, AWS_H2_FRAME_T_PING, 0, NULL); ASSERT_NOT_NULL(ping_frame); /* Fake peer only send PINGACK to the first connection immediately */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&fake_connection_1->peer)); struct aws_h2_frame *peer_frame = aws_h2_frame_new_ping(allocator, true /*ACK*/, ping_frame->ping_opaque_data); ASSERT_SUCCESS(h2_fake_peer_send_frame(&fake_connection_1->peer, peer_frame)); testing_channel_drain_queued_tasks(&fake_connection_1->testing_channel); s_fake_connection_complete_streams( fake_connection_1, 0 /*all streams*/); /* Make sure the streams completed successfully */ /* Check fake connection 2 received PING */ ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&fake_connection_2->peer)); ping_frame = h2_decode_tester_find_frame(&fake_connection_2->peer.decode, AWS_H2_FRAME_T_PING, 0, NULL); ASSERT_NOT_NULL(ping_frame); /* Check fake connection 3 received PING, but never send ping for connection 3 */ ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&fake_connection_3->peer)); ping_frame = h2_decode_tester_find_frame(&fake_connection_3->peer.decode, AWS_H2_FRAME_T_PING, 0, NULL); ASSERT_NOT_NULL(ping_frame); aws_thread_current_sleep(AWS_TIMESTAMP_NANOS); /* Sleep 1 sec */ testing_channel_drain_queued_tasks(&fake_connection_2->testing_channel); testing_channel_drain_queued_tasks(&fake_connection_3->testing_channel); /* Send PINGACK for connection 2 after timeout has happened */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&fake_connection_2->peer)); peer_frame = aws_h2_frame_new_ping(allocator, true /*ACK*/, ping_frame->ping_opaque_data); ASSERT_SUCCESS(h2_fake_peer_send_frame(&fake_connection_2->peer, peer_frame)); testing_channel_drain_queued_tasks(&fake_connection_2->testing_channel); /* The streams on second and third connection should failed to complete */ ASSERT_INT_EQUALS(4, s_tester.stream_complete_errors); ASSERT_INT_EQUALS(AWS_ERROR_HTTP_CONNECTION_CLOSED, s_tester.stream_completed_error_code); return s_tester_clean_up(); } /******************************************************************************* * Net test, that makes real HTTP/2 connection and requests ******************************************************************************/ /* Test that makes real streams */ TEST_CASE(h2_sm_acquire_stream) { (void)ctx; struct sm_tester_options options = { .max_connections = 5, .alloc = allocator, }; ASSERT_SUCCESS(s_tester_init(&options)); int num_to_acquire = 5; ASSERT_SUCCESS(s_sm_stream_acquiring(num_to_acquire)); ASSERT_SUCCESS(s_wait_on_streams_completed_count(num_to_acquire)); ASSERT_INT_EQUALS(0, s_tester.acquiring_stream_errors); ASSERT_INT_EQUALS(num_to_acquire, s_tester.stream_200_count); return s_tester_clean_up(); } /* Test that makes real streams and trigger multiple connections to be created */ TEST_CASE(h2_sm_acquire_stream_multiple_connections) { (void)ctx; struct sm_tester_options options = { .max_connections = 5, .alloc = allocator, .max_concurrent_streams_per_connection = 5, }; ASSERT_SUCCESS(s_tester_init(&options)); int num_to_acquire = 20; ASSERT_SUCCESS(s_sm_stream_acquiring(num_to_acquire)); ASSERT_SUCCESS(s_wait_on_streams_completed_count(num_to_acquire)); ASSERT_INT_EQUALS(0, s_tester.acquiring_stream_errors); ASSERT_INT_EQUALS(num_to_acquire, s_tester.stream_200_count); return s_tester_clean_up(); } /* Test that makes tons of real streams against real world */ TEST_CASE(h2_sm_close_connection_on_server_error) { (void)ctx; /* server that will return 500 status code all the time. */ struct aws_byte_cursor uri_cursor = aws_byte_cursor_from_c_str("https://postman-echo.com/status/500"); struct sm_tester_options options = { .max_connections = 1, .max_concurrent_streams_per_connection = 10, .alloc = allocator, .uri_cursor = &uri_cursor, .close_connection_on_server_error = true, }; ASSERT_SUCCESS(s_tester_init(&options)); int num_to_acquire = 50; ASSERT_SUCCESS(s_sm_stream_acquiring(num_to_acquire)); ASSERT_SUCCESS(s_wait_on_streams_completed_count(num_to_acquire)); ASSERT_TRUE((int)s_tester.acquiring_stream_errors == 0); ASSERT_TRUE((int)s_tester.stream_200_count == 0); return s_tester_clean_up(); } static void s_sm_tester_on_connection_setup(struct aws_http_connection *connection, int error_code, void *user_data) { if (s_tester.release_sm_during_connection_acquiring) { aws_http2_stream_manager_release(s_tester.stream_manager); s_tester.stream_manager = NULL; } s_tester.on_setup(connection, error_code, user_data); } static int s_aws_http_connection_manager_create_real_connection_sync( const struct aws_http_client_connection_options *options) { struct aws_http_client_connection_options local_options = *options; s_tester.on_setup = options->on_setup; local_options.on_setup = s_sm_tester_on_connection_setup; return aws_http_client_connect(&local_options); } /* Test that the stream manager closing before connection acquired, all the pending stream acquiring should fail */ TEST_CASE(h2_sm_closing_before_connection_acquired) { (void)ctx; struct sm_tester_options options = { .max_connections = 5, .max_concurrent_streams_per_connection = 2, .alloc = allocator, }; ASSERT_SUCCESS(s_tester_init(&options)); s_tester.release_sm_during_connection_acquiring = true; s_override_cm_connect_function(s_aws_http_connection_manager_create_real_connection_sync); /* only acquire one as the connection create happens synced, the stream manager refcount will be released as the * first stream acquiring */ ASSERT_SUCCESS(s_sm_stream_acquiring(1)); ASSERT_SUCCESS(s_wait_on_streams_acquired_count(1)); /* all acquiring stream failed */ ASSERT_INT_EQUALS(1, s_tester.acquiring_stream_errors); ASSERT_INT_EQUALS(AWS_ERROR_HTTP_STREAM_MANAGER_SHUTTING_DOWN, s_tester.error_code); return s_tester_clean_up(); } /* Test our http2 stream manager works with prior knowledge */ TEST_CASE(localhost_integ_h2_sm_prior_knowledge) { (void)ctx; struct aws_byte_cursor uri_cursor = aws_byte_cursor_from_c_str("http://localhost:3280"); struct sm_tester_options options = { .max_connections = 100, .max_concurrent_streams_per_connection = 100, .alloc = allocator, .uri_cursor = &uri_cursor, .prior_knowledge = true, }; ASSERT_SUCCESS(s_tester_init(&options)); int num_to_acquire = 2; ASSERT_SUCCESS(s_sm_stream_acquiring(num_to_acquire)); ASSERT_SUCCESS(s_wait_on_streams_completed_count(num_to_acquire)); ASSERT_TRUE((int)s_tester.acquiring_stream_errors == 0); ASSERT_TRUE((int)s_tester.stream_200_count == num_to_acquire); return s_tester_clean_up(); } /* Test that makes tons of real streams against local host */ TEST_CASE(localhost_integ_h2_sm_acquire_stream_stress) { (void)ctx; struct aws_byte_cursor uri_cursor = aws_byte_cursor_from_c_str("https://localhost:3443/echo"); struct aws_http_connection_monitoring_options monitor_opt = { .allowable_throughput_failure_interval_seconds = 2, .minimum_throughput_bytes_per_second = 1000, }; enum aws_log_level log_level = AWS_LOG_LEVEL_DEBUG; struct sm_tester_options options = { .max_connections = 50, .max_concurrent_streams_per_connection = 100, .connection_ping_period_ms = 100 * AWS_TIMESTAMP_MILLIS, .alloc = allocator, .uri_cursor = &uri_cursor, .monitor_opt = &monitor_opt, .log_level = &log_level, }; ASSERT_SUCCESS(s_tester_init(&options)); size_t num_to_acquire = 500 * 100; ASSERT_SUCCESS(s_sm_stream_acquiring((int)num_to_acquire)); ASSERT_SUCCESS(s_wait_on_streams_completed_count(num_to_acquire)); ASSERT_UINT_EQUALS(s_tester.acquiring_stream_errors, 0); ASSERT_UINT_EQUALS(s_tester.stream_200_count, num_to_acquire); return s_tester_clean_up(); } static int s_tester_on_put_body(struct aws_http_stream *stream, const struct aws_byte_cursor *data, void *user_data) { (void)user_data; (void)stream; struct aws_string *content_length_header_str = aws_string_new_from_cursor(s_tester.allocator, data); size_t num_received = (uint32_t)atoi((const char *)content_length_header_str->bytes); AWS_FATAL_ASSERT(s_tester.length_sent == num_received); aws_string_destroy(content_length_header_str); return AWS_OP_SUCCESS; } static int s_sm_stream_acquiring_with_body(int num_streams) { char content_length_sprintf_buffer[128] = ""; snprintf(content_length_sprintf_buffer, sizeof(content_length_sprintf_buffer), "%zu", s_tester.length_sent); struct aws_http_header request_headers_src[] = { DEFINE_HEADER(":method", "PUT"), { .name = aws_byte_cursor_from_c_str(":scheme"), .value = *aws_uri_scheme(&s_tester.endpoint), }, { .name = aws_byte_cursor_from_c_str(":path"), .value = s_normalize_path(*aws_uri_path(&s_tester.endpoint)), }, { .name = aws_byte_cursor_from_c_str(":authority"), .value = *aws_uri_host_name(&s_tester.endpoint), }, { .name = aws_byte_cursor_from_c_str("content_length"), .value = aws_byte_cursor_from_c_str(content_length_sprintf_buffer), }, }; for (int i = 0; i < num_streams; ++i) { /* TODO: Test the callback will always be fired asynced, as now the CM cannot ensure the callback happens * asynchronously, we cannot ensure it as well. */ struct aws_http_message *request = aws_http2_message_new_request(s_tester.allocator); aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); struct aws_input_stream *body_stream = aws_input_stream_tester_upload_new(s_tester.allocator, s_tester.length_sent); aws_http_message_set_body_stream(request, body_stream); aws_input_stream_release(body_stream); struct aws_http_make_request_options request_options = { .self_size = sizeof(request_options), .request = request, .on_response_body = s_tester_on_put_body, .on_complete = s_sm_tester_on_stream_complete, }; struct aws_http2_stream_manager_acquire_stream_options acquire_stream_option = { .options = &request_options, .callback = s_sm_tester_on_stream_acquired, .user_data = &s_tester, }; aws_http2_stream_manager_acquire_stream(s_tester.stream_manager, &acquire_stream_option); aws_http_message_release(request); } return AWS_OP_SUCCESS; } /* Test that makes tons of real streams with body against local host */ TEST_CASE(localhost_integ_h2_sm_acquire_stream_stress_with_body) { (void)ctx; struct aws_byte_cursor uri_cursor = aws_byte_cursor_from_c_str("https://localhost:3443/upload_test"); enum aws_log_level log_level = AWS_LOG_LEVEL_DEBUG; struct sm_tester_options options = { .max_connections = 100, .max_concurrent_streams_per_connection = 100, .connection_ping_period_ms = 100 * AWS_TIMESTAMP_MILLIS, .alloc = allocator, .uri_cursor = &uri_cursor, .log_level = &log_level, }; ASSERT_SUCCESS(s_tester_init(&options)); s_tester.length_sent = 2000; int num_to_acquire = 500 * 100; ASSERT_SUCCESS(s_sm_stream_acquiring_with_body(num_to_acquire)); ASSERT_SUCCESS(s_wait_on_streams_completed_count(num_to_acquire)); ASSERT_UINT_EQUALS(s_tester.acquiring_stream_errors, 0); ASSERT_UINT_EQUALS(s_tester.stream_200_count, num_to_acquire); return s_tester_clean_up(); } /* Test that connection monitor works properly with HTTP/2 stream manager */ TEST_CASE(localhost_integ_h2_sm_connection_monitor_kill_slow_connection) { (void)ctx; struct aws_byte_cursor uri_cursor = aws_byte_cursor_from_c_str("https://localhost:3443/slowConnTest"); struct aws_http_connection_monitoring_options monitor_opt = { .allowable_throughput_failure_interval_seconds = 1, .minimum_throughput_bytes_per_second = 1000, }; struct sm_tester_options options = { .max_connections = 100, .max_concurrent_streams_per_connection = 100, .alloc = allocator, .uri_cursor = &uri_cursor, .monitor_opt = &monitor_opt, }; ASSERT_SUCCESS(s_tester_init(&options)); ASSERT_SUCCESS(s_sm_stream_acquiring(1)); ASSERT_SUCCESS(s_wait_on_streams_completed_count(1)); /* Check the connection closed by connection monitor and the stream should completed with corresponding error */ ASSERT_UINT_EQUALS(s_tester.stream_completed_error_code, AWS_ERROR_HTTP_CONNECTION_CLOSED); return s_tester_clean_up(); } aws-crt-python-0.20.4+dfsg/crt/aws-c-http/tests/test_strutil.c000066400000000000000000000310571456575232400242700ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #define TEST_CASE(NAME) \ AWS_TEST_CASE(NAME, s_test_##NAME); \ static int s_test_##NAME(struct aws_allocator *allocator, void *ctx) AWS_TEST_CASE(strutil_trim_http_whitespace, s_strutil_trim_http_whitespace); static int s_strutil_trim_http_whitespace(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct test { const char *input; const char *expected; }; struct test tests[] = { {"a", "a"}, {" a", "a"}, {"a ", "a"}, {" a ", "a"}, {"", ""}, {" ", ""}, {" ", ""}, {"a", "a"}, {"\t", ""}, {"\ta", "a"}, {"a\t", "a"}, {"\t a \t", "a"}, }; for (size_t i = 0; i < AWS_ARRAY_SIZE(tests); ++i) { struct aws_byte_cursor input = aws_byte_cursor_from_c_str(tests[i].input); struct aws_byte_cursor expected = aws_byte_cursor_from_c_str(tests[i].expected); struct aws_byte_cursor trimmed = aws_strutil_trim_http_whitespace(input); ASSERT_TRUE(aws_byte_cursor_eq(&expected, &trimmed)); } return 0; } AWS_TEST_CASE(strutil_is_http_token, s_strutil_is_http_token); static int s_strutil_is_http_token(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; /* sanity check */ ASSERT_TRUE(aws_strutil_is_http_token(aws_byte_cursor_from_c_str("A"))); ASSERT_TRUE(aws_strutil_is_http_token(aws_byte_cursor_from_c_str("Host"))); /* must be at least 1 character long*/ ASSERT_FALSE(aws_strutil_is_http_token(aws_byte_cursor_from_c_str(""))); /* all acceptable characters (RFC-7230 3.2.6 - tchar)*/ const char *all_acceptable = "!#$%&'*+-.^_`|~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"; const size_t all_acceptable_strlen = strlen(all_acceptable); ASSERT_TRUE(aws_strutil_is_http_token(aws_byte_cursor_from_c_str(all_acceptable))); /* brute force over every character, and be sure it fails if it's not in the acceptable list */ for (size_t i = 0; i < 256; ++i) { uint8_t c = (uint8_t)i; bool is_acceptable = memchr(all_acceptable, c, all_acceptable_strlen) != NULL; ASSERT_UINT_EQUALS(is_acceptable, aws_strutil_is_http_token(aws_byte_cursor_from_array(&c, 1))); } return 0; } AWS_TEST_CASE(strutil_is_lowercase_http_token, s_strutil_is_lowercase_http_token); static int s_strutil_is_lowercase_http_token(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; /* sanity check */ ASSERT_TRUE(aws_strutil_is_lowercase_http_token(aws_byte_cursor_from_c_str("a"))); ASSERT_TRUE(aws_strutil_is_lowercase_http_token(aws_byte_cursor_from_c_str("host"))); /* must be at least 1 character long*/ ASSERT_FALSE(aws_strutil_is_lowercase_http_token(aws_byte_cursor_from_c_str(""))); /* forbidden characters */ ASSERT_FALSE(aws_strutil_is_lowercase_http_token(aws_byte_cursor_from_c_str("Host"))); ASSERT_FALSE(aws_strutil_is_lowercase_http_token(aws_byte_cursor_from_c_str(":\""))); /* all acceptable characters (RFC-7230 3.2.6 - tchar, but with uppercase removed) */ const char *all_acceptable = "!#$%&'*+-.^_`|~0123456789abcdefghijklmnopqrstuvwxyz"; const size_t all_acceptable_strlen = strlen(all_acceptable); ASSERT_TRUE(aws_strutil_is_lowercase_http_token(aws_byte_cursor_from_c_str(all_acceptable))); /* brute force over every character, and be sure it fails if it's not in the acceptable list */ for (size_t i = 0; i < 256; ++i) { uint8_t c = (uint8_t)i; bool is_acceptable = memchr(all_acceptable, c, all_acceptable_strlen) != NULL; ASSERT_UINT_EQUALS(is_acceptable, aws_strutil_is_lowercase_http_token(aws_byte_cursor_from_array(&c, 1))); } return 0; } AWS_TEST_CASE(strutil_is_http_field_value, s_strutil_is_http_field_value); static int s_strutil_is_http_field_value(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; /* sanity check */ ASSERT_TRUE(aws_strutil_is_http_field_value(aws_byte_cursor_from_c_str("0"))); ASSERT_TRUE(aws_strutil_is_http_field_value(aws_byte_cursor_from_c_str("en"))); /* OK to have empty value */ ASSERT_TRUE(aws_strutil_is_http_field_value(aws_byte_cursor_from_c_str(""))); /* OK to have whitespace in the middle */ ASSERT_TRUE(aws_strutil_is_http_field_value(aws_byte_cursor_from_c_str("a b"))); ASSERT_TRUE(aws_strutil_is_http_field_value(aws_byte_cursor_from_c_str("a\tb"))); ASSERT_TRUE(aws_strutil_is_http_field_value(aws_byte_cursor_from_c_str("a\t\t \t\t b"))); /* Bad to have whitespace at the start or the end */ ASSERT_FALSE(aws_strutil_is_http_field_value(aws_byte_cursor_from_c_str(" 999"))); ASSERT_FALSE(aws_strutil_is_http_field_value(aws_byte_cursor_from_c_str("999 "))); ASSERT_FALSE(aws_strutil_is_http_field_value(aws_byte_cursor_from_c_str("\t999"))); ASSERT_FALSE(aws_strutil_is_http_field_value(aws_byte_cursor_from_c_str("999\t"))); /* OK to use UTF-8 */ ASSERT_TRUE(aws_strutil_is_http_field_value( aws_byte_cursor_from_c_str("\xF0\x9F\x91\x81\xF0\x9F\x91\x84\xF0\x9F\x91\x81"))); /* Bad to have line-folds */ ASSERT_FALSE(aws_strutil_is_http_field_value(aws_byte_cursor_from_c_str("item1\r\n item2"))); ASSERT_FALSE(aws_strutil_is_http_field_value(aws_byte_cursor_from_c_str("item1\r item2"))); ASSERT_FALSE(aws_strutil_is_http_field_value(aws_byte_cursor_from_c_str("item1\n item2"))); /* The implementation uses a table of valid characters (for speed reasons). * Lets test every possible byte value and make sure it lines up with what we expect. * We'll put the test byte at index [1] of an otherwise valid string */ char mutable_str[] = {'a', 'b', 'c'}; for (size_t i = 0; i < 256; ++i) { /* Grammar looks like: * field-value = *( field-content / obs-fold ) ; we're forbidding obs-fold so ignore it * field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] * field-vchar = VCHAR / obs-text * VCHAR = %x21-7E ; visible (printing) characters * obs-text = %x80-FF */ bool allowed_in_grammar = (/*SP*/ i == ' ') || (/*HTAB*/ i == '\t') || (/*VCHAR*/ i >= 0x21 && i <= 0x7E) || (/*obs-text*/ i >= 0x80 && i <= 0xFF); mutable_str[1] = (char)i; struct aws_byte_cursor cursor = aws_byte_cursor_from_array(mutable_str, AWS_ARRAY_SIZE(mutable_str)); bool passes = aws_strutil_is_http_field_value(cursor); ASSERT_INT_EQUALS(allowed_in_grammar, passes, "failed at character 0x%02X", i); } return 0; } AWS_TEST_CASE(strutil_is_http_reason_phrase, s_strutil_is_http_reason_phrase); static int s_strutil_is_http_reason_phrase(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; /* sanity check */ ASSERT_TRUE(aws_strutil_is_http_reason_phrase(aws_byte_cursor_from_c_str("OK"))); /* OK to have empty value */ ASSERT_TRUE(aws_strutil_is_http_reason_phrase(aws_byte_cursor_from_c_str(""))); /* OK to have whitespace in the middle, beginning, or end */ ASSERT_TRUE(aws_strutil_is_http_reason_phrase(aws_byte_cursor_from_c_str("Not Found"))); ASSERT_TRUE(aws_strutil_is_http_reason_phrase(aws_byte_cursor_from_c_str("Not\tFound"))); ASSERT_TRUE(aws_strutil_is_http_reason_phrase(aws_byte_cursor_from_c_str(" Not Found"))); ASSERT_TRUE(aws_strutil_is_http_reason_phrase(aws_byte_cursor_from_c_str("Not Found "))); ASSERT_TRUE(aws_strutil_is_http_reason_phrase(aws_byte_cursor_from_c_str("\t Not\t\t Found \t"))); ASSERT_TRUE(aws_strutil_is_http_reason_phrase(aws_byte_cursor_from_c_str(" "))); ASSERT_TRUE(aws_strutil_is_http_reason_phrase(aws_byte_cursor_from_c_str("\t"))); /* OK to use UTF-8 */ ASSERT_TRUE(aws_strutil_is_http_reason_phrase( aws_byte_cursor_from_c_str("\xF0\x9F\x91\x81\xF0\x9F\x91\x84\xF0\x9F\x91\x81"))); /* Bad to have line-folds or other anything like it*/ ASSERT_FALSE(aws_strutil_is_http_reason_phrase(aws_byte_cursor_from_c_str("Line\r\nFolds"))); ASSERT_FALSE(aws_strutil_is_http_reason_phrase(aws_byte_cursor_from_c_str("Line\rFeed"))); ASSERT_FALSE(aws_strutil_is_http_reason_phrase(aws_byte_cursor_from_c_str("New\nLine"))); /* The implementation uses a table of valid characters (for speed reasons). * Lets test every possible byte value and make sure it lines up with what we expect. * We'll put the test byte at index [1] of an otherwise valid string */ char mutable_str[] = {'a', 'b', 'c'}; for (size_t i = 0; i < 256; ++i) { /* Grammar looks like: * reason-phrase = *( HTAB / SP / VCHAR / obs-text ) * VCHAR = %x21-7E ; visible (printing) characters * obs-text = %x80-FF */ bool allowed_in_grammar = (/*SP*/ i == ' ') || (/*HTAB*/ i == '\t') || (/*VCHAR*/ i >= 0x21 && i <= 0x7E) || (/*obs-text*/ i >= 0x80 && i <= 0xFF); mutable_str[1] = (char)i; struct aws_byte_cursor cursor = aws_byte_cursor_from_array(mutable_str, AWS_ARRAY_SIZE(mutable_str)); bool passes = aws_strutil_is_http_reason_phrase(cursor); ASSERT_INT_EQUALS(allowed_in_grammar, passes, "failed at character 0x%02X", i); } return 0; } AWS_TEST_CASE(strutil_is_http_request_target, s_strutil_is_http_request_target); static int s_strutil_is_http_request_target(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; /* sanity check */ ASSERT_TRUE(aws_strutil_is_http_request_target(aws_byte_cursor_from_c_str("/"))); /* Bad to have empty value */ ASSERT_FALSE(aws_strutil_is_http_request_target(aws_byte_cursor_from_c_str(""))); /* Bad to have non-visible ascii */ ASSERT_FALSE(aws_strutil_is_http_request_target(aws_byte_cursor_from_c_str(" "))); ASSERT_FALSE(aws_strutil_is_http_request_target(aws_byte_cursor_from_c_str("/spaces-are-bad .html"))); ASSERT_FALSE(aws_strutil_is_http_request_target(aws_byte_cursor_from_c_str("/tabs-are-bad\t.html"))); ASSERT_FALSE(aws_strutil_is_http_request_target(aws_byte_cursor_from_c_str("/crlf-is-really-bad\r\n.html"))); ASSERT_FALSE(aws_strutil_is_http_request_target(aws_byte_cursor_from_c_str("/newline-is-bad\n.html"))); ASSERT_FALSE(aws_strutil_is_http_request_target(aws_byte_cursor_from_c_str("/linefeed-is-bad\r.html"))); /* OK origin-form */ ASSERT_TRUE(aws_strutil_is_http_request_target(aws_byte_cursor_from_c_str("/where?q=now"))); /* OK absolute-form */ ASSERT_TRUE(aws_strutil_is_http_request_target(aws_byte_cursor_from_c_str("http://www.amazon.com/index.html"))); /* OK authority-form */ ASSERT_TRUE(aws_strutil_is_http_request_target(aws_byte_cursor_from_c_str("www.example.com:80"))); /* OK asterisk-form */ ASSERT_TRUE(aws_strutil_is_http_request_target(aws_byte_cursor_from_c_str("*"))); /* TODO: Actually check the complete grammar as defined in RFC7230 5.3 and * RFC3986. Currently this just checks whether the sequence is blatantly illegal * (ex: contains CR or LF) */ return 0; } AWS_TEST_CASE(strutil_is_http_pseudo_header_name, s_strutil_is_http_pseudo_header_name); static int s_strutil_is_http_pseudo_header_name(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; /* sanity check */ ASSERT_TRUE(aws_strutil_is_http_pseudo_header_name(aws_byte_cursor_from_c_str(":method"))); ASSERT_TRUE(aws_strutil_is_http_pseudo_header_name(aws_byte_cursor_from_c_str(":scheme"))); ASSERT_TRUE(aws_strutil_is_http_pseudo_header_name(aws_byte_cursor_from_c_str(":authority"))); ASSERT_TRUE(aws_strutil_is_http_pseudo_header_name(aws_byte_cursor_from_c_str(":path"))); ASSERT_TRUE(aws_strutil_is_http_pseudo_header_name(aws_byte_cursor_from_c_str(":status"))); /* Bad to have empty value */ ASSERT_FALSE(aws_strutil_is_http_pseudo_header_name(aws_byte_cursor_from_c_str(""))); /* Bad to have other values */ ASSERT_FALSE(aws_strutil_is_http_pseudo_header_name(aws_byte_cursor_from_c_str("connect"))); ASSERT_FALSE(aws_strutil_is_http_pseudo_header_name(aws_byte_cursor_from_c_str("Method"))); ASSERT_FALSE(aws_strutil_is_http_pseudo_header_name(aws_byte_cursor_from_c_str("httpCRT"))); return 0; } aws-crt-python-0.20.4+dfsg/crt/aws-c-http/tests/test_tls.c000066400000000000000000000222271456575232400233630ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include #include #include #include /* Singleton used by tests in this file */ struct test_ctx { struct aws_allocator *alloc; struct aws_event_loop_group *event_loop_group; struct aws_host_resolver *host_resolver; struct aws_tls_ctx *tls_ctx; struct aws_client_bootstrap *client_bootstrap; struct aws_http_connection *client_connection; struct aws_http_stream *stream; size_t body_size; bool stream_complete; bool client_connection_is_shutdown; struct aws_mutex wait_lock; struct aws_condition_variable wait_cvar; int wait_result; }; static const uint32_t TEST_TIMEOUT_SEC = 4; static void s_on_connection_setup(struct aws_http_connection *connection, int error_code, void *user_data) { struct test_ctx *test = user_data; AWS_FATAL_ASSERT(aws_mutex_lock(&test->wait_lock) == AWS_OP_SUCCESS); test->client_connection = connection; test->wait_result = error_code; AWS_FATAL_ASSERT(aws_mutex_unlock(&test->wait_lock) == AWS_OP_SUCCESS); aws_condition_variable_notify_one(&test->wait_cvar); } static void s_on_connection_shutdown(struct aws_http_connection *connection, int error_code, void *user_data) { (void)connection; struct test_ctx *test = user_data; AWS_FATAL_ASSERT(aws_mutex_lock(&test->wait_lock) == AWS_OP_SUCCESS); test->client_connection_is_shutdown = true; test->wait_result = error_code; AWS_FATAL_ASSERT(aws_mutex_unlock(&test->wait_lock) == AWS_OP_SUCCESS); aws_condition_variable_notify_one(&test->wait_cvar); } static int s_test_wait(struct test_ctx *test, bool (*pred)(void *user_data)) { ASSERT_SUCCESS(aws_mutex_lock(&test->wait_lock)); int wait_result = aws_condition_variable_wait_pred(&test->wait_cvar, &test->wait_lock, pred, test); ASSERT_SUCCESS(aws_mutex_unlock(&test->wait_lock)); ASSERT_SUCCESS(wait_result); return AWS_OP_SUCCESS; } static bool s_test_connection_setup_pred(void *user_data) { struct test_ctx *test = user_data; return test->wait_result || test->client_connection; } static bool s_test_connection_shutdown_pred(void *user_data) { struct test_ctx *test = user_data; return test->wait_result || test->client_connection_is_shutdown; } static int s_on_stream_body(struct aws_http_stream *stream, const struct aws_byte_cursor *data, void *user_data) { (void)stream; struct test_ctx *test = user_data; AWS_FATAL_ASSERT(aws_mutex_lock(&test->wait_lock) == AWS_OP_SUCCESS); test->body_size += data->len; AWS_FATAL_ASSERT(aws_mutex_unlock(&test->wait_lock) == AWS_OP_SUCCESS); return AWS_OP_SUCCESS; } static void s_on_stream_complete(struct aws_http_stream *stream, int error_code, void *user_data) { (void)stream; struct test_ctx *test = user_data; AWS_FATAL_ASSERT(aws_mutex_lock(&test->wait_lock) == AWS_OP_SUCCESS); test->wait_result = error_code; test->stream_complete = true; AWS_FATAL_ASSERT(aws_mutex_unlock(&test->wait_lock) == AWS_OP_SUCCESS); aws_condition_variable_notify_one(&test->wait_cvar); } static bool s_stream_wait_pred(void *user_data) { struct test_ctx *test = user_data; return test->wait_result || test->stream_complete; } static int s_test_tls_download_medium_file_general( struct aws_allocator *allocator, struct aws_byte_cursor url, bool h2_required) { aws_http_library_init(allocator); struct aws_uri uri; aws_uri_init_parse(&uri, allocator, &url); struct aws_socket_options socket_options = { .type = AWS_SOCKET_STREAM, .domain = AWS_SOCKET_IPV4, .connect_timeout_ms = (uint32_t)aws_timestamp_convert(TEST_TIMEOUT_SEC, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_MILLIS, NULL), }; struct test_ctx test; AWS_ZERO_STRUCT(test); test.alloc = allocator; aws_mutex_init(&test.wait_lock); aws_condition_variable_init(&test.wait_cvar); test.event_loop_group = aws_event_loop_group_new_default(test.alloc, 1, NULL); struct aws_host_resolver_default_options resolver_options = { .el_group = test.event_loop_group, .max_entries = 1, }; test.host_resolver = aws_host_resolver_new_default(test.alloc, &resolver_options); struct aws_client_bootstrap_options bootstrap_options = { .event_loop_group = test.event_loop_group, .host_resolver = test.host_resolver, }; ASSERT_NOT_NULL(test.client_bootstrap = aws_client_bootstrap_new(test.alloc, &bootstrap_options)); struct aws_tls_ctx_options tls_ctx_options; aws_tls_ctx_options_init_default_client(&tls_ctx_options, allocator); char *apln = h2_required ? "h2" : "http/1.1"; aws_tls_ctx_options_set_alpn_list(&tls_ctx_options, apln); ASSERT_NOT_NULL(test.tls_ctx = aws_tls_client_ctx_new(allocator, &tls_ctx_options)); struct aws_tls_connection_options tls_connection_options; aws_tls_connection_options_init_from_ctx(&tls_connection_options, test.tls_ctx); aws_tls_connection_options_set_server_name( &tls_connection_options, allocator, (struct aws_byte_cursor *)aws_uri_host_name(&uri)); struct aws_http_client_connection_options http_options = AWS_HTTP_CLIENT_CONNECTION_OPTIONS_INIT; http_options.allocator = test.alloc; http_options.bootstrap = test.client_bootstrap; http_options.host_name = *aws_uri_host_name(&uri); http_options.port = 443; http_options.on_setup = s_on_connection_setup; http_options.on_shutdown = s_on_connection_shutdown; http_options.socket_options = &socket_options; http_options.tls_options = &tls_connection_options; http_options.user_data = &test; ASSERT_SUCCESS(aws_http_client_connect(&http_options)); ASSERT_SUCCESS(s_test_wait(&test, s_test_connection_setup_pred)); ASSERT_INT_EQUALS(0, test.wait_result); ASSERT_NOT_NULL(test.client_connection); if (h2_required) { ASSERT_INT_EQUALS(aws_http_connection_get_version(test.client_connection), AWS_HTTP_VERSION_2); } else { ASSERT_INT_EQUALS(aws_http_connection_get_version(test.client_connection), AWS_HTTP_VERSION_1_1); } struct aws_http_message *request = aws_http_message_new_request(allocator); ASSERT_NOT_NULL(request); ASSERT_SUCCESS(aws_http_message_set_request_method(request, aws_http_method_get)); ASSERT_SUCCESS(aws_http_message_set_request_path(request, *aws_uri_path_and_query(&uri))); struct aws_http_header header_host = { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Host"), .value = *aws_uri_host_name(&uri), }; ASSERT_SUCCESS(aws_http_message_add_header(request, header_host)); struct aws_http_make_request_options req_options = { .self_size = sizeof(req_options), .request = request, .on_response_body = s_on_stream_body, .on_complete = s_on_stream_complete, .user_data = &test, }; ASSERT_NOT_NULL(test.stream = aws_http_connection_make_request(test.client_connection, &req_options)); aws_http_stream_activate(test.stream); /* wait for the request to complete */ s_test_wait(&test, s_stream_wait_pred); ASSERT_INT_EQUALS(14428801, test.body_size); aws_http_message_destroy(request); aws_http_stream_release(test.stream); test.stream = NULL; aws_http_connection_release(test.client_connection); ASSERT_SUCCESS(s_test_wait(&test, s_test_connection_shutdown_pred)); aws_client_bootstrap_release(test.client_bootstrap); aws_host_resolver_release(test.host_resolver); aws_event_loop_group_release(test.event_loop_group); aws_tls_ctx_options_clean_up(&tls_ctx_options); aws_tls_connection_options_clean_up(&tls_connection_options); aws_tls_ctx_release(test.tls_ctx); aws_uri_clean_up(&uri); aws_http_library_clean_up(); aws_mutex_clean_up(&test.wait_lock); aws_condition_variable_clean_up(&test.wait_cvar); return AWS_OP_SUCCESS; } static int s_test_tls_download_medium_file_h1(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor url = aws_byte_cursor_from_c_str("https://aws-crt-test-stuff.s3.amazonaws.com/http_test_doc.txt"); ASSERT_SUCCESS(s_test_tls_download_medium_file_general(allocator, url, false /*h2_required*/)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(tls_download_medium_file_h1, s_test_tls_download_medium_file_h1); static int s_tls_download_medium_file_h2(struct aws_allocator *allocator, void *ctx) { (void)ctx; /* The cloudfront domain for aws-crt-test-stuff */ struct aws_byte_cursor url = aws_byte_cursor_from_c_str("https://d1cz66xoahf9cl.cloudfront.net/http_test_doc.txt"); ASSERT_SUCCESS(s_test_tls_download_medium_file_general(allocator, url, true /*h2_required*/)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(tls_download_medium_file_h2, s_tls_download_medium_file_h2); aws-crt-python-0.20.4+dfsg/crt/aws-c-http/tests/test_websocket_bootstrap.c000066400000000000000000001344371456575232400266530ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #ifdef _MSC_VER # pragma warning(disable : 4204) /* non-constant aggregate initializer */ #endif #define TEST_CASE(NAME) \ AWS_TEST_CASE(NAME, s_test_##NAME); \ static int s_test_##NAME(struct aws_allocator *allocator, void *ctx) static int s_mock_http_client_connect(const struct aws_http_client_connection_options *options); static void s_mock_http_connection_release(struct aws_http_connection *connection); static void s_mock_http_connection_close(struct aws_http_connection *connection); static struct aws_channel *s_mock_http_connection_get_channel(struct aws_http_connection *connection); static struct aws_http_stream *s_mock_http_connection_make_request( struct aws_http_connection *client_connection, const struct aws_http_make_request_options *options); static int s_mock_http_stream_activate(struct aws_http_stream *stream); static void s_mock_http_stream_release(struct aws_http_stream *stream); static struct aws_http_connection *s_mock_http_stream_get_connection(const struct aws_http_stream *stream); static void s_mock_http_stream_update_window(struct aws_http_stream *stream, size_t increment_size); static int s_mock_http_stream_get_incoming_response_status(const struct aws_http_stream *stream, int *out_status); static struct aws_websocket *s_mock_websocket_handler_new(const struct aws_websocket_handler_options *options); static const struct aws_websocket_client_bootstrap_system_vtable s_mock_system_vtable = { .aws_http_client_connect = s_mock_http_client_connect, .aws_http_connection_release = s_mock_http_connection_release, .aws_http_connection_close = s_mock_http_connection_close, .aws_http_connection_get_channel = s_mock_http_connection_get_channel, .aws_http_connection_make_request = s_mock_http_connection_make_request, .aws_http_stream_activate = s_mock_http_stream_activate, .aws_http_stream_release = s_mock_http_stream_release, .aws_http_stream_get_connection = s_mock_http_stream_get_connection, .aws_http_stream_update_window = s_mock_http_stream_update_window, .aws_http_stream_get_incoming_response_status = s_mock_http_stream_get_incoming_response_status, .aws_websocket_handler_new = s_mock_websocket_handler_new, }; /* Hardcoded value for "Sec-WebSocket-Key" header in handshake request. */ static const char *s_sec_websocket_key_value = "dGhlIHNhbXBsZSBub25jZQ=="; struct test_response { int status_code; struct aws_http_header headers[10]; const char *body; }; static const struct test_response s_accepted_response = { .status_code = 101, .headers = { { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Upgrade"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("websocket"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Connection"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Upgrade"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Sec-WebSocket-Accept"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("s3pPLMBiTxaQ9kYGzzhZRbK+xOo="), }, }, }; static const struct test_response s_rejected_response = { .status_code = 403, .headers = { { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Length"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("43"), }, }, .body = "your request is bad and you should feel bad", }; /* If fail_at_step is set to one of these, that step will explicitly fail. * These represent the steps where an external system could fail. */ enum boot_step { BOOT_STEP_HTTP_CONNECT = 0x4000000, /* Use values that don't overlap with another aws-c-xyz library */ BOOT_STEP_HTTP_CONNECT_COMPLETE, BOOT_STEP_REQUEST_NEW, BOOT_STEP_REQUEST_ACTIVATE, BOOT_STEP_BEFORE_HEADERS, BOOT_STEP_BEFORE_HEADERS_DONE, BOOT_STEP_BEFORE_REJECTION_BODY, BOOT_STEP_BEFORE_REJECTION_STREAM_COMPLETE, BOOT_STEP_WEBSOCKET_NEW, BOOT_STEP_HTTP_SHUTDOWN, }; /* Needs to be a static singleton so that mock functions can access it */ static struct tester { /* Settings */ struct aws_allocator *alloc; enum boot_step fail_at_step; struct aws_http_header *extra_handshake_request_header_array; size_t num_extra_handshake_request_headers; struct aws_http_message *handshake_request; const struct test_response *handshake_response; size_t num_handshake_response_headers; /* State */ bool http_connect_called_successfully; aws_http_on_client_connection_setup_fn *http_connect_setup_callback; aws_http_on_client_connection_shutdown_fn *http_connect_shutdown_callback; void *http_connect_user_data; bool http_connection_release_called; bool http_connection_close_called; bool http_stream_new_called_successfully; aws_http_on_incoming_headers_fn *http_stream_on_response_headers; aws_http_on_incoming_header_block_done_fn *http_stream_on_response_header_block_done; aws_http_on_incoming_body_fn *http_stream_on_response_body; aws_http_on_stream_complete_fn *http_stream_on_complete; void *http_stream_user_data; bool http_stream_on_complete_invoked; bool websocket_new_called_successfully; bool http_stream_release_called; bool http_stream_activate_called_successfully; bool websocket_setup_invoked; int websocket_setup_error_code; bool websocket_setup_had_response_status; bool websocket_setup_had_response_headers; bool websocket_setup_had_response_body; bool websocket_shutdown_invoked; int websocket_shutdown_error_code; /* Track the sum of all calls to aws_http_stream_update_window() */ size_t window_increment_total; } s_tester; static int s_tester_init(struct aws_allocator *alloc) { aws_http_library_init(alloc); aws_websocket_client_bootstrap_set_system_vtable(&s_mock_system_vtable); /* Set default settings for tester (unless the test already configured it) */ s_tester.alloc = alloc; if (!s_tester.handshake_response) { s_tester.handshake_response = &s_accepted_response; } /* Count number of headers being sent */ for (size_t i = 0; i < AWS_ARRAY_SIZE(s_tester.handshake_response->headers); ++i) { if (s_tester.handshake_response->headers[i].name.len == 0) { break; } s_tester.num_handshake_response_headers = i + 1; } return AWS_OP_SUCCESS; } static int s_tester_clean_up(void) { aws_http_library_clean_up(); return AWS_OP_SUCCESS; } static bool s_headers_eq( const struct aws_http_header *headers_a, size_t num_headers_a, const struct aws_http_header *headers_b, size_t num_headers_b) { if (num_headers_a != num_headers_b) { return false; } for (size_t i = 0; i < num_headers_a; ++i) { struct aws_http_header a = headers_a[i]; struct aws_http_header b = headers_b[i]; if (!aws_byte_cursor_eq_ignore_case(&a.name, &b.name) || !aws_byte_cursor_eq(&a.value, &b.value)) { printf( "Header did not match '" PRInSTR ": " PRInSTR "'\n", AWS_BYTE_CURSOR_PRI(a.name), AWS_BYTE_CURSOR_PRI(a.value)); return false; } } return true; } static bool s_request_eq(const struct aws_http_message *request_a, const struct aws_http_message *request_b) { const size_t num_headers_a = aws_http_message_get_header_count(request_a); const size_t num_headers_b = aws_http_message_get_header_count(request_b); if (num_headers_a != num_headers_b) { return false; } for (size_t a_i = 0; a_i < num_headers_a; ++a_i) { struct aws_http_header a; aws_http_message_get_header(request_a, &a, a_i); bool found_match = false; for (size_t b_i = 0; b_i < num_headers_b; ++b_i) { struct aws_http_header b; aws_http_message_get_header(request_b, &b, b_i); if (aws_byte_cursor_eq_ignore_case(&a.name, &b.name) && aws_byte_cursor_eq_ignore_case(&a.value, &b.value)) { found_match = true; break; } } if (!found_match) { printf( "Failed to find header '" PRInSTR ": " PRInSTR "'\n", AWS_BYTE_CURSOR_PRI(a.name), AWS_BYTE_CURSOR_PRI(a.value)); return false; } } return true; } /* Totally fake and not real objects created by the mocked functions */ static struct aws_http_connection *s_mock_http_connection = (void *)"http connection"; static struct aws_http_stream *s_mock_stream = (void *)"stream"; static struct aws_channel *s_mock_channel = (void *)"channel"; static struct aws_websocket *s_mock_websocket = (void *)"websocket"; static int s_mock_http_client_connect(const struct aws_http_client_connection_options *options) { AWS_FATAL_ASSERT(options); AWS_FATAL_ASSERT(!s_tester.http_connect_called_successfully); if (s_tester.fail_at_step == BOOT_STEP_HTTP_CONNECT) { return aws_raise_error(BOOT_STEP_HTTP_CONNECT); } s_tester.http_connect_called_successfully = true; s_tester.http_connect_setup_callback = options->on_setup; s_tester.http_connect_shutdown_callback = options->on_shutdown; s_tester.http_connect_user_data = options->user_data; return AWS_OP_SUCCESS; } static void s_mock_http_connection_release(struct aws_http_connection *connection) { if (connection == NULL) { return; } AWS_FATAL_ASSERT(connection == s_mock_http_connection); AWS_FATAL_ASSERT(!s_tester.http_connection_release_called); s_tester.http_connection_release_called = true; } static void s_mock_http_connection_close(struct aws_http_connection *connection) { AWS_FATAL_ASSERT(connection == s_mock_http_connection); AWS_FATAL_ASSERT(!s_tester.http_connection_release_called); s_tester.http_connection_close_called = true; } static struct aws_channel *s_mock_http_connection_get_channel(struct aws_http_connection *connection) { AWS_FATAL_ASSERT(connection == s_mock_http_connection); AWS_FATAL_ASSERT(!s_tester.http_connection_release_called); return s_mock_channel; } static struct aws_http_stream *s_mock_http_connection_make_request( struct aws_http_connection *client_connection, const struct aws_http_make_request_options *options) { AWS_FATAL_ASSERT(client_connection); AWS_FATAL_ASSERT(options); AWS_FATAL_ASSERT(!s_tester.http_connection_release_called); AWS_FATAL_ASSERT(!s_tester.http_stream_new_called_successfully); /* ensure we're only called once */ if (s_tester.fail_at_step == BOOT_STEP_REQUEST_NEW) { aws_raise_error(BOOT_STEP_REQUEST_NEW); return NULL; } /* Check that headers passed into websocket_connect() carry through. */ AWS_FATAL_ASSERT(s_request_eq(s_tester.handshake_request, options->request)); s_tester.http_stream_new_called_successfully = true; s_tester.http_stream_on_response_headers = options->on_response_headers; s_tester.http_stream_on_response_header_block_done = options->on_response_header_block_done; s_tester.http_stream_on_response_body = options->on_response_body; s_tester.http_stream_on_complete = options->on_complete; s_tester.http_stream_user_data = options->user_data; return s_mock_stream; } static int s_mock_http_stream_activate(struct aws_http_stream *stream) { AWS_FATAL_ASSERT(stream == s_mock_stream); AWS_FATAL_ASSERT(!s_tester.http_connection_release_called); AWS_FATAL_ASSERT(!s_tester.http_stream_release_called); if (s_tester.fail_at_step == BOOT_STEP_REQUEST_ACTIVATE) { return aws_raise_error(BOOT_STEP_REQUEST_ACTIVATE); } s_tester.http_stream_activate_called_successfully = true; return AWS_OP_SUCCESS; } static void s_mock_http_stream_release(struct aws_http_stream *stream) { if (stream == NULL) { return; } AWS_FATAL_ASSERT(stream == s_mock_stream); AWS_FATAL_ASSERT(!s_tester.http_connection_release_called); AWS_FATAL_ASSERT(!s_tester.http_stream_release_called); s_tester.http_stream_release_called = true; } static struct aws_http_connection *s_mock_http_stream_get_connection(const struct aws_http_stream *stream) { AWS_FATAL_ASSERT(stream == s_mock_stream); AWS_FATAL_ASSERT(!s_tester.http_connection_release_called); AWS_FATAL_ASSERT(!s_tester.http_stream_release_called); return s_mock_http_connection; } static void s_mock_http_stream_update_window(struct aws_http_stream *stream, size_t increment_size) { AWS_FATAL_ASSERT(stream == s_mock_stream); AWS_FATAL_ASSERT(!s_tester.http_connection_release_called); AWS_FATAL_ASSERT(!s_tester.http_stream_release_called); s_tester.window_increment_total += increment_size; } static int s_mock_http_stream_get_incoming_response_status(const struct aws_http_stream *stream, int *out_status) { AWS_FATAL_ASSERT(stream == s_mock_stream); AWS_FATAL_ASSERT(!s_tester.http_connection_release_called); AWS_FATAL_ASSERT(!s_tester.http_stream_release_called); AWS_FATAL_ASSERT(out_status); *out_status = s_tester.handshake_response->status_code; return AWS_OP_SUCCESS; } static struct aws_websocket *s_mock_websocket_handler_new(const struct aws_websocket_handler_options *options) { AWS_FATAL_ASSERT(options); AWS_FATAL_ASSERT(!s_tester.http_connection_release_called); AWS_FATAL_ASSERT(!s_tester.websocket_new_called_successfully); /* ensure we're only called once */ if (s_tester.fail_at_step == BOOT_STEP_WEBSOCKET_NEW) { aws_raise_error(BOOT_STEP_WEBSOCKET_NEW); return NULL; } s_tester.websocket_new_called_successfully = true; return s_mock_websocket; } static void s_on_websocket_setup(const struct aws_websocket_on_connection_setup_data *setup, void *user_data) { /* error-code is set XOR websocket is set. Must be one, but not both. */ AWS_FATAL_ASSERT((setup->error_code != 0) ^ (setup->websocket != NULL)); /* We may not get the full handshake response. * But any parts we do get should match what the mock sent us. */ if (setup->handshake_response_status) { s_tester.websocket_setup_had_response_status = true; AWS_FATAL_ASSERT(*setup->handshake_response_status == s_tester.handshake_response->status_code); /* If we're reporting a status code, we should also be reporting the headers */ AWS_FATAL_ASSERT(setup->handshake_response_header_array != NULL); } if (setup->handshake_response_header_array) { s_tester.websocket_setup_had_response_headers = true; AWS_FATAL_ASSERT(s_headers_eq( s_tester.handshake_response->headers, s_tester.num_handshake_response_headers, setup->handshake_response_header_array, setup->num_handshake_response_headers)); /* If we're reporting headers, we should also be reporting the status code */ AWS_FATAL_ASSERT(setup->handshake_response_status != NULL); } if (setup->handshake_response_body) { s_tester.websocket_setup_had_response_body = true; AWS_FATAL_ASSERT(aws_byte_cursor_eq_c_str(setup->handshake_response_body, s_tester.handshake_response->body)); /* If we're reporting the body, we should also be reporting the headers and status code */ AWS_FATAL_ASSERT(setup->handshake_response_status != NULL); AWS_FATAL_ASSERT(setup->handshake_response_header_array != NULL); } AWS_FATAL_ASSERT(user_data == &s_tester); s_tester.websocket_setup_invoked = true; s_tester.websocket_setup_error_code = setup->error_code; /* Don't need the request anymore */ aws_http_message_destroy(s_tester.handshake_request); s_tester.handshake_request = NULL; } static void s_on_websocket_shutdown(struct aws_websocket *websocket, int error_code, void *user_data) { AWS_FATAL_ASSERT(websocket == s_mock_websocket); AWS_FATAL_ASSERT(user_data == &s_tester); s_tester.websocket_shutdown_invoked = true; s_tester.websocket_shutdown_error_code = error_code; } static void s_complete_http_stream_and_connection(int error_code) { if (s_tester.http_stream_activate_called_successfully && !s_tester.http_stream_on_complete_invoked) { s_tester.http_stream_on_complete(s_mock_stream, error_code, s_tester.http_stream_user_data); } s_tester.http_connect_shutdown_callback(s_mock_http_connection, error_code, s_tester.http_stream_user_data); } /* Calls aws_websocket_client_connect(), and drives the async call to its conclusions. * Reports the reason for the failure via `out_error_code`. */ static int s_drive_websocket_connect(int *out_error_code) { ASSERT_NOT_NULL(out_error_code); bool websocket_connect_called_successfully = false; bool http_connect_setup_reported_success = false; /* Build handshake request */ static struct aws_byte_cursor path = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/"); static const struct aws_byte_cursor host = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("server.example.com"); s_tester.handshake_request = aws_http_message_new_websocket_handshake_request(s_tester.alloc, path, host); if (!s_tester.handshake_request) { goto finishing_checks; } struct aws_http_headers *request_headers = aws_http_message_get_headers(s_tester.handshake_request); ASSERT_SUCCESS(aws_http_headers_set( request_headers, aws_byte_cursor_from_c_str("Sec-WebSocket-Key"), aws_byte_cursor_from_c_str(s_sec_websocket_key_value))); for (size_t i = 0; i < s_tester.num_extra_handshake_request_headers; ++i) { ASSERT_SUCCESS(aws_http_headers_add_header(request_headers, &s_tester.extra_handshake_request_header_array[i])); } /* Call websocket_connect() */ struct aws_websocket_client_connection_options ws_options = { .allocator = s_tester.alloc, .bootstrap = (void *)"client channel bootstrap", .socket_options = (void *)"socket options", .host = host, .handshake_request = s_tester.handshake_request, .user_data = &s_tester, .on_connection_setup = s_on_websocket_setup, .on_connection_shutdown = s_on_websocket_shutdown, }; int err = aws_websocket_client_connect(&ws_options); if (err) { goto finishing_checks; } websocket_connect_called_successfully = true; /* Bootstrap should have started HTTP connection */ ASSERT_TRUE(s_tester.http_connect_called_successfully); /* Invoke HTTP setup callback */ if (s_tester.fail_at_step == BOOT_STEP_HTTP_CONNECT_COMPLETE) { s_tester.http_connect_setup_callback(NULL, BOOT_STEP_HTTP_CONNECT_COMPLETE, s_tester.http_connect_user_data); goto finishing_checks; } http_connect_setup_reported_success = true; s_tester.http_connect_setup_callback(s_mock_http_connection, AWS_ERROR_SUCCESS, s_tester.http_connect_user_data); /* Once websocket has valid HTTP connection, if anything goes wrong, the HTTP connection must be closed in order to * wrap things up. We manually check at every opportunity whether close has been called, and if so invoke the HTTP * shutdown callback */ if (s_tester.http_connection_close_called) { s_tester.http_connect_shutdown_callback( s_mock_http_connection, AWS_ERROR_SUCCESS, s_tester.http_connect_user_data); goto finishing_checks; } /* Bootstrap should have created new stream */ ASSERT_TRUE(s_tester.http_stream_new_called_successfully); ASSERT_TRUE(s_tester.http_stream_activate_called_successfully); /* HTTP connection could fail before any headers arrive */ if (s_tester.fail_at_step == BOOT_STEP_BEFORE_HEADERS) { s_complete_http_stream_and_connection(BOOT_STEP_BEFORE_HEADERS); goto finishing_checks; } /* Headers arrive, HTTP connection ends if callback returns error */ enum aws_http_header_block header_block = s_tester.handshake_response->status_code / 100 == 1 ? AWS_HTTP_HEADER_BLOCK_INFORMATIONAL : AWS_HTTP_HEADER_BLOCK_MAIN; if (s_tester.http_stream_on_response_headers( s_mock_stream, header_block, s_tester.handshake_response->headers, s_tester.num_handshake_response_headers, s_tester.http_stream_user_data)) { s_complete_http_stream_and_connection(aws_last_error()); goto finishing_checks; } /* HTTP connection could fail before headers are done */ if (s_tester.fail_at_step == BOOT_STEP_BEFORE_HEADERS_DONE) { s_complete_http_stream_and_connection(BOOT_STEP_BEFORE_HEADERS_DONE); goto finishing_checks; } /* Headers are done, HTTP connection ends if error returned */ if (s_tester.http_stream_on_response_header_block_done( s_mock_stream, header_block, s_tester.http_stream_user_data)) { s_complete_http_stream_and_connection(aws_last_error()); goto finishing_checks; } if (s_tester.http_connection_close_called) { s_complete_http_stream_and_connection(AWS_ERROR_SUCCESS); goto finishing_checks; } if (header_block == AWS_HTTP_HEADER_BLOCK_MAIN) { /* If the response is a rejection, it will have a body */ struct aws_byte_cursor body = aws_byte_cursor_from_c_str(s_tester.handshake_response->body); /* HTTP connection could fail before the body is delivered */ if (s_tester.fail_at_step == BOOT_STEP_BEFORE_REJECTION_BODY) { s_complete_http_stream_and_connection(BOOT_STEP_BEFORE_REJECTION_BODY); goto finishing_checks; } /* Response body arrives, HTTP connection ends if error returned */ if (body.len > 0) { /* If we're testing the stream dying before the whole body is delivered, then only deliver a bit of it */ if (s_tester.fail_at_step == BOOT_STEP_BEFORE_REJECTION_STREAM_COMPLETE) { body.len = 1; } if (s_tester.http_stream_on_response_body(s_mock_stream, &body, s_tester.http_stream_user_data)) { s_complete_http_stream_and_connection(aws_last_error()); goto finishing_checks; } if (s_tester.http_connection_close_called) { s_complete_http_stream_and_connection(AWS_ERROR_SUCCESS); goto finishing_checks; } } /* HTTP connection could fail before the stream completes on its own */ if (s_tester.fail_at_step == BOOT_STEP_BEFORE_REJECTION_STREAM_COMPLETE) { s_complete_http_stream_and_connection(BOOT_STEP_BEFORE_REJECTION_STREAM_COMPLETE); goto finishing_checks; } /* HTTP stream completes on its own after delivering rejection */ s_tester.http_stream_on_complete(s_mock_stream, AWS_ERROR_SUCCESS, s_tester.http_stream_user_data); s_tester.http_stream_on_complete_invoked = true; /* Bootstrap should have closed the connection after receiving the completed response */ ASSERT_TRUE(s_tester.http_connection_close_called); s_tester.http_connect_shutdown_callback( s_mock_http_connection, AWS_ERROR_SUCCESS, s_tester.http_stream_user_data); goto finishing_checks; } /* Bootstrap should have created new websocket */ ASSERT_TRUE(s_tester.websocket_new_called_successfully); /* Bootstrap should have notified that setup was successful */ ASSERT_TRUE(s_tester.websocket_setup_invoked); if (s_tester.websocket_setup_error_code) { goto finishing_checks; } /* Invoke HTTP shutdown callback */ if (s_tester.fail_at_step == BOOT_STEP_HTTP_SHUTDOWN) { s_complete_http_stream_and_connection(BOOT_STEP_HTTP_SHUTDOWN); goto finishing_checks; } s_complete_http_stream_and_connection(AWS_ERROR_SUCCESS); finishing_checks: /* Free the request */ if (s_tester.handshake_request) { aws_http_message_destroy(s_tester.handshake_request); s_tester.handshake_request = NULL; } if (!websocket_connect_called_successfully) { /* If we didn't even kick off the async process, aws_last_error() has reason for failure */ *out_error_code = aws_last_error(); ASSERT_FALSE(s_tester.websocket_setup_invoked); ASSERT_FALSE(s_tester.websocket_shutdown_invoked); } else { /* If connection kicked off at all, setup callback must fire. */ ASSERT_TRUE(s_tester.websocket_setup_invoked); if (s_tester.websocket_setup_error_code) { *out_error_code = s_tester.websocket_setup_error_code; /* If setup callback reported failure, shutdown callback must never fire. */ ASSERT_FALSE(s_tester.websocket_shutdown_invoked); } else { *out_error_code = s_tester.websocket_shutdown_error_code; /* If setup callback reports success, shutdown callback must fire. */ ASSERT_TRUE(s_tester.websocket_shutdown_invoked); } } /* If request was created, it must be released eventually. */ if (s_tester.http_stream_new_called_successfully) { ASSERT_TRUE(s_tester.http_stream_release_called); } /* If HTTP connection was established, it must be released eventually. */ if (http_connect_setup_reported_success) { ASSERT_TRUE(s_tester.http_connection_release_called); } return AWS_OP_SUCCESS; } /* Test the infrastructure of this file */ TEST_CASE(websocket_boot_sanity_check) { (void)ctx; ASSERT_SUCCESS(s_tester_init(allocator)); ASSERT_SUCCESS(s_tester_clean_up()); return AWS_OP_SUCCESS; } /* Test that connection and shutdown proceed as expected if we don't make anything go wrong. */ TEST_CASE(websocket_boot_golden_path) { (void)ctx; ASSERT_SUCCESS(s_tester_init(allocator)); int websocket_connect_error_code; ASSERT_SUCCESS(s_drive_websocket_connect(&websocket_connect_error_code)); ASSERT_INT_EQUALS(AWS_ERROR_SUCCESS, websocket_connect_error_code); ASSERT_TRUE(s_tester.websocket_setup_had_response_status); ASSERT_TRUE(s_tester.websocket_setup_had_response_headers); ASSERT_FALSE(s_tester.websocket_setup_had_response_body); ASSERT_SUCCESS(s_tester_clean_up()); return AWS_OP_SUCCESS; } /* Function to be reused by all the "fail at step X" tests. */ static int s_websocket_boot_fail_at_step_test(struct aws_allocator *alloc, void *ctx, enum boot_step fail_at_step) { (void)ctx; s_tester.fail_at_step = fail_at_step; ASSERT_SUCCESS(s_tester_init(alloc)); int websocket_connect_error_code; ASSERT_SUCCESS(s_drive_websocket_connect(&websocket_connect_error_code)); ASSERT_INT_EQUALS(fail_at_step, websocket_connect_error_code); ASSERT_SUCCESS(s_tester_clean_up()); return AWS_OP_SUCCESS; } TEST_CASE(websocket_boot_fail_at_http_connect) { return s_websocket_boot_fail_at_step_test(allocator, ctx, BOOT_STEP_HTTP_CONNECT); } TEST_CASE(websocket_boot_fail_at_http_connect_error) { return s_websocket_boot_fail_at_step_test(allocator, ctx, BOOT_STEP_HTTP_CONNECT_COMPLETE); } TEST_CASE(websocket_boot_fail_at_new_request) { return s_websocket_boot_fail_at_step_test(allocator, ctx, BOOT_STEP_REQUEST_NEW); } TEST_CASE(websocket_boot_fail_at_activate_request) { return s_websocket_boot_fail_at_step_test(allocator, ctx, BOOT_STEP_REQUEST_ACTIVATE); } TEST_CASE(websocket_boot_fail_before_response_headers) { return s_websocket_boot_fail_at_step_test(allocator, ctx, BOOT_STEP_BEFORE_HEADERS); } TEST_CASE(websocket_boot_fail_before_response_headers_done) { return s_websocket_boot_fail_at_step_test(allocator, ctx, BOOT_STEP_BEFORE_HEADERS_DONE); } TEST_CASE(websocket_boot_fail_at_new_handler) { return s_websocket_boot_fail_at_step_test(allocator, ctx, BOOT_STEP_WEBSOCKET_NEW); } TEST_CASE(websocket_boot_report_unexpected_http_shutdown) { return s_websocket_boot_fail_at_step_test(allocator, ctx, BOOT_STEP_HTTP_SHUTDOWN); } /* Test receiving a 4xx rejection response from the server. * Note that this test doesn't use fail_at_step, because we're not modeling * an "unexpected" HTTP failure. */ TEST_CASE(websocket_boot_fail_from_handshake_rejection) { (void)ctx; s_tester.handshake_response = &s_rejected_response; ASSERT_SUCCESS(s_tester_init(allocator)); int websocket_connect_error_code; ASSERT_SUCCESS(s_drive_websocket_connect(&websocket_connect_error_code)); ASSERT_INT_EQUALS(AWS_ERROR_HTTP_WEBSOCKET_UPGRADE_FAILURE, websocket_connect_error_code); ASSERT_SUCCESS(s_tester_clean_up()); return AWS_OP_SUCCESS; } /* Test the connection dying early, while processing a 4xx rejection response. * Specifically, after the headers are received but before the body is received. */ TEST_CASE(websocket_boot_fail_before_handshake_rejection_body) { (void)ctx; s_tester.handshake_response = &s_rejected_response; s_tester.fail_at_step = BOOT_STEP_BEFORE_REJECTION_BODY; ASSERT_SUCCESS(s_tester_init(allocator)); int websocket_connect_error_code; ASSERT_SUCCESS(s_drive_websocket_connect(&websocket_connect_error_code)); /* It's ambiguous what the error-code should be here. * The connection died early, AND we know from the status code that it was an UPGRADE_FAILURE. * Currently, the bootstrap is programmed to report it as a normal UPGRADE_FAILURE, * but don't report a body, because we didn't receive any */ ASSERT_INT_EQUALS(AWS_ERROR_HTTP_WEBSOCKET_UPGRADE_FAILURE, websocket_connect_error_code); ASSERT_TRUE(s_tester.websocket_setup_had_response_status); ASSERT_TRUE(s_tester.websocket_setup_had_response_headers); ASSERT_FALSE(s_tester.websocket_setup_had_response_body); ASSERT_SUCCESS(s_tester_clean_up()); return AWS_OP_SUCCESS; } /* Test the connection dying early, while processing a 4xx rejection response. * Specifically, after some of the body is received, but before the stream completes. */ TEST_CASE(websocket_boot_fail_before_handshake_rejection_stream_complete) { (void)ctx; s_tester.handshake_response = &s_rejected_response; s_tester.fail_at_step = BOOT_STEP_BEFORE_REJECTION_STREAM_COMPLETE; ASSERT_SUCCESS(s_tester_init(allocator)); int websocket_connect_error_code; ASSERT_SUCCESS(s_drive_websocket_connect(&websocket_connect_error_code)); /* It's ambiguous what the error-code should be here. * The connection died early, AND we know from the status code that it was an UPGRADE_FAILURE. * Currently, the bootstrap is programmed to report it as a normal UPGRADE_FAILURE, * but don't report a body, because we can't be 100% sure we got the whole thing. */ ASSERT_INT_EQUALS(AWS_ERROR_HTTP_WEBSOCKET_UPGRADE_FAILURE, websocket_connect_error_code); ASSERT_TRUE(s_tester.websocket_setup_had_response_status); ASSERT_TRUE(s_tester.websocket_setup_had_response_headers); ASSERT_FALSE(s_tester.websocket_setup_had_response_body); ASSERT_SUCCESS(s_tester_clean_up()); return AWS_OP_SUCCESS; } /* Function to be reused by all tests that pass a bad 101 response */ static int s_websocket_boot_fail_from_bad_101_response( struct aws_allocator *alloc, const struct test_response *bad_response) { ASSERT_INT_EQUALS(101, bad_response->status_code, "This helper function is only for bad 101 responses"); s_tester.handshake_response = bad_response; ASSERT_SUCCESS(s_tester_init(alloc)); int websocket_connect_error_code; ASSERT_SUCCESS(s_drive_websocket_connect(&websocket_connect_error_code)); ASSERT_INT_EQUALS(AWS_ERROR_HTTP_WEBSOCKET_UPGRADE_FAILURE, websocket_connect_error_code); ASSERT_TRUE(s_tester.websocket_setup_had_response_status); ASSERT_TRUE(s_tester.websocket_setup_had_response_headers); ASSERT_FALSE(s_tester.websocket_setup_had_response_body); ASSERT_SUCCESS(s_tester_clean_up()); return AWS_OP_SUCCESS; } TEST_CASE(websocket_boot_fail_from_invalid_upgrade_header) { (void)ctx; struct test_response bad_response = { .status_code = 101, .headers = { { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Upgrade"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("HTTP/9000"), /* ought to be "websocket" */ }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Connection"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Upgrade"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Sec-WebSocket-Accept"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("s3pPLMBiTxaQ9kYGzzhZRbK+xOo="), }, }, }; return s_websocket_boot_fail_from_bad_101_response(allocator, &bad_response); } TEST_CASE(websocket_boot_fail_from_missing_upgrade_header) { (void)ctx; struct test_response bad_response = { .status_code = 101, .headers = { /* Commenting out required header { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Upgrade"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("websocket"), }, */ { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Connection"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Upgrade"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Sec-WebSocket-Accept"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("s3pPLMBiTxaQ9kYGzzhZRbK+xOo="), }, }, }; return s_websocket_boot_fail_from_bad_101_response(allocator, &bad_response); } TEST_CASE(websocket_boot_fail_from_invalid_connection_header) { (void)ctx; struct test_response bad_response = { .status_code = 101, .headers = { { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Upgrade"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("websocket"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Connection"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("HeartToHeart"), /* ought to be "Upgrade" */ }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Sec-WebSocket-Accept"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("s3pPLMBiTxaQ9kYGzzhZRbK+xOo="), }, }, }; return s_websocket_boot_fail_from_bad_101_response(allocator, &bad_response); } TEST_CASE(websocket_boot_fail_from_invalid_sec_websocket_accept_header) { (void)ctx; struct test_response bad_response = { .status_code = 101, .headers = { { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Upgrade"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("websocket"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Connection"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Upgrade"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Sec-WebSocket-Accept"), /* ought to be "s3pPLMBiTxaQ9kYGzzhZRbK+xOo="*/ .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("S3PPLMBITXAQ9KYGZZHZRBK+XOO="), }, }, }; return s_websocket_boot_fail_from_bad_101_response(allocator, &bad_response); } TEST_CASE(websocket_boot_fail_from_unsupported_sec_websocket_extensions_in_request) { (void)ctx; struct aws_http_header extra_request_headers[] = { /* extensions are not currently supported */ { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Sec-WebSocket-Extensions"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("permessage-deflate"), }, }; s_tester.extra_handshake_request_header_array = extra_request_headers; s_tester.num_extra_handshake_request_headers = AWS_ARRAY_SIZE(extra_request_headers); ASSERT_SUCCESS(s_tester_init(allocator)); int websocket_connect_error_code; ASSERT_SUCCESS(s_drive_websocket_connect(&websocket_connect_error_code)); ASSERT_INT_EQUALS(AWS_ERROR_INVALID_ARGUMENT, websocket_connect_error_code); ASSERT_FALSE(s_tester.websocket_setup_invoked); ASSERT_SUCCESS(s_tester_clean_up()); return AWS_OP_SUCCESS; } TEST_CASE(websocket_boot_fail_from_unsupported_sec_websocket_extensions_in_response) { (void)ctx; struct test_response bad_response = { .status_code = 101, .headers = { { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Upgrade"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("websocket"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Connection"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Upgrade"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Sec-WebSocket-Accept"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("s3pPLMBiTxaQ9kYGzzhZRbK+xOo="), }, { /* extensions are not currently supported */ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Sec-WebSocket-Extensions"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("permessage-deflate"), }, }, }; return s_websocket_boot_fail_from_bad_101_response(allocator, &bad_response); } /* If client requests a specific protocol, the server response must say it's being used */ TEST_CASE(websocket_boot_ok_with_sec_websocket_protocol_header) { (void)ctx; struct aws_http_header extra_request_headers[] = { { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Sec-WebSocket-Protocol"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("mqtt"), }, }; s_tester.extra_handshake_request_header_array = extra_request_headers; s_tester.num_extra_handshake_request_headers = AWS_ARRAY_SIZE(extra_request_headers); struct test_response response = { .status_code = 101, .headers = { { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Upgrade"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("websocket"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Connection"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Upgrade"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Sec-WebSocket-Accept"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("s3pPLMBiTxaQ9kYGzzhZRbK+xOo="), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Sec-WebSocket-Protocol"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("mqtt"), }, }, }; s_tester.handshake_response = &response; ASSERT_SUCCESS(s_tester_init(allocator)); int websocket_connect_error_code; ASSERT_SUCCESS(s_drive_websocket_connect(&websocket_connect_error_code)); ASSERT_INT_EQUALS(0, websocket_connect_error_code); ASSERT_SUCCESS(s_tester_clean_up()); return AWS_OP_SUCCESS; } /* The client can request a list of acceptable protocols (may be split across headers), and server must pick one */ TEST_CASE(websocket_boot_ok_with_sec_websocket_protocol_split_across_headers) { (void)ctx; struct aws_http_header extra_request_headers[] = { { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Sec-WebSocket-Protocol"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("http/1.1, http/2"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Sec-WebSocket-Protocol"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("mqtt, mqtt5, mqtt6"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Sec-WebSocket-Protocol"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("klingon, esperanto"), }, }; s_tester.extra_handshake_request_header_array = extra_request_headers; s_tester.num_extra_handshake_request_headers = AWS_ARRAY_SIZE(extra_request_headers); struct test_response response = { .status_code = 101, .headers = { { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Upgrade"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("websocket"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Connection"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Upgrade"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Sec-WebSocket-Accept"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("s3pPLMBiTxaQ9kYGzzhZRbK+xOo="), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Sec-WebSocket-Protocol"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("mqtt5"), }, }, }; s_tester.handshake_response = &response; ASSERT_SUCCESS(s_tester_init(allocator)); int websocket_connect_error_code; ASSERT_SUCCESS(s_drive_websocket_connect(&websocket_connect_error_code)); ASSERT_INT_EQUALS(0, websocket_connect_error_code); ASSERT_SUCCESS(s_tester_clean_up()); return AWS_OP_SUCCESS; } TEST_CASE(websocket_boot_fail_from_missing_sec_websocket_protocol_header) { (void)ctx; struct aws_http_header extra_request_headers[] = { { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Sec-WebSocket-Protocol"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("mqtt, mqtt5"), }, }; s_tester.extra_handshake_request_header_array = extra_request_headers; s_tester.num_extra_handshake_request_headers = AWS_ARRAY_SIZE(extra_request_headers); struct test_response bad_response = { .status_code = 101, .headers = { { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Upgrade"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("websocket"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Connection"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Upgrade"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Sec-WebSocket-Accept"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("s3pPLMBiTxaQ9kYGzzhZRbK+xOo="), }, /* commenting out required header { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Sec-WebSocket-Protocol"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("mqtt5"), }, */ }, }; return s_websocket_boot_fail_from_bad_101_response(allocator, &bad_response); } TEST_CASE(websocket_boot_fail_from_invalid_sec_websocket_protocol_header) { (void)ctx; struct aws_http_header extra_request_headers[] = { { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Sec-WebSocket-Protocol"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("mqtt, mqtt5"), }, }; s_tester.extra_handshake_request_header_array = extra_request_headers; s_tester.num_extra_handshake_request_headers = AWS_ARRAY_SIZE(extra_request_headers); struct test_response bad_response = { .status_code = 101, .headers = { { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Upgrade"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("websocket"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Connection"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Upgrade"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Sec-WebSocket-Accept"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("s3pPLMBiTxaQ9kYGzzhZRbK+xOo="), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Sec-WebSocket-Protocol"), /* ought to be "mqtt" or "mqtt5" */ .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("mqtt, mqtt5"), }, }, }; return s_websocket_boot_fail_from_bad_101_response(allocator, &bad_response); } /* Check that AWS_WEBSOCKET_MAX_HANDSHAKE_KEY_LENGTH is sufficiently large */ TEST_CASE(websocket_handshake_key_max_length) { (void)allocator; (void)ctx; uint8_t small_buf_storage[AWS_WEBSOCKET_MAX_HANDSHAKE_KEY_LENGTH]; for (size_t i = 0; i < 100; ++i) { struct aws_byte_buf small_buf = aws_byte_buf_from_empty_array(small_buf_storage, sizeof(small_buf_storage)); ASSERT_SUCCESS(aws_websocket_random_handshake_key(&small_buf)); } return AWS_OP_SUCCESS; } /* Ensure keys are random */ TEST_CASE(websocket_handshake_key_randomness) { (void)ctx; enum { count = 100 }; struct aws_byte_buf keys[count]; for (int i = 0; i < count; ++i) { struct aws_byte_buf *key = &keys[i]; ASSERT_SUCCESS(aws_byte_buf_init(key, allocator, AWS_WEBSOCKET_MAX_HANDSHAKE_KEY_LENGTH)); ASSERT_SUCCESS(aws_websocket_random_handshake_key(key)); for (int existing_i = 0; existing_i < i; ++existing_i) { struct aws_byte_buf *existing = &keys[existing_i]; ASSERT_FALSE(aws_byte_buf_eq(key, existing)); } } for (int i = 0; i < count; ++i) { aws_byte_buf_clean_up(&keys[i]); } return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-http/tests/test_websocket_decoder.c000066400000000000000000000767071456575232400262500ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #ifdef _MSC_VER # pragma warning(disable : 4204) /* non-constant aggregate initializer */ #endif #define DECODER_TEST_CASE(NAME) \ AWS_TEST_CASE(NAME, s_test_##NAME); \ static int s_test_##NAME(struct aws_allocator *allocator, void *ctx) struct decoder_tester { struct aws_allocator *alloc; struct aws_logger logger; struct aws_websocket_decoder decoder; void *specific_test_data; struct aws_websocket_frame frame; size_t on_frame_count; size_t fail_on_nth_frame; struct aws_byte_buf payload; size_t on_payload_count; size_t fail_on_nth_payload; }; static int s_on_frame(const struct aws_websocket_frame *frame, void *user_data) { struct decoder_tester *tester = user_data; tester->frame = *frame; tester->on_frame_count++; if (tester->on_frame_count == tester->fail_on_nth_frame) { return aws_raise_error(AWS_ERROR_HTTP_UNKNOWN); } return AWS_OP_SUCCESS; } static int s_on_payload(struct aws_byte_cursor data, void *user_data) { struct decoder_tester *tester = user_data; ASSERT_SUCCESS(aws_byte_buf_append_dynamic(&tester->payload, &data)); tester->on_payload_count++; if (tester->on_payload_count == tester->fail_on_nth_payload) { return aws_raise_error(AWS_ERROR_HTTP_UNKNOWN); } return AWS_OP_SUCCESS; } /* For resetting the decoder and its results mid-test */ static void s_decoder_tester_reset(struct decoder_tester *tester) { aws_websocket_decoder_clean_up(&tester->decoder); aws_websocket_decoder_init(&tester->decoder, tester->alloc, s_on_frame, s_on_payload, tester); AWS_ZERO_STRUCT(tester->frame); tester->on_frame_count = 0; tester->payload.len = 0; tester->on_payload_count = 0; } static int s_decoder_tester_init(struct decoder_tester *tester, struct aws_allocator *alloc) { aws_http_library_init(alloc); AWS_ZERO_STRUCT(*tester); tester->alloc = alloc; struct aws_logger_standard_options logger_options = { .level = AWS_LOG_LEVEL_TRACE, .file = stderr, }; ASSERT_SUCCESS(aws_logger_init_standard(&tester->logger, tester->alloc, &logger_options)); aws_logger_set(&tester->logger); ASSERT_SUCCESS(aws_byte_buf_init(&tester->payload, alloc, 1024)); s_decoder_tester_reset(tester); return AWS_OP_SUCCESS; } static int s_decoder_tester_clean_up(struct decoder_tester *tester) { aws_byte_buf_clean_up(&tester->payload); aws_websocket_decoder_clean_up(&tester->decoder); aws_http_library_clean_up(); aws_logger_clean_up(&tester->logger); return AWS_OP_SUCCESS; } static int s_compare_frame(const struct aws_websocket_frame *expected, const struct aws_websocket_frame *decoded) { uint8_t a[24]; memcpy(a, expected, 24); uint8_t b[24]; memcpy(b, decoded, 24); /* compare each field so it's clear where test failed */ ASSERT_UINT_EQUALS(expected->fin, decoded->fin); ASSERT_UINT_EQUALS(expected->rsv[0], decoded->rsv[0]); ASSERT_UINT_EQUALS(expected->rsv[1], decoded->rsv[1]); ASSERT_UINT_EQUALS(expected->rsv[2], decoded->rsv[2]); ASSERT_UINT_EQUALS(expected->masked, decoded->masked); ASSERT_UINT_EQUALS(expected->opcode, decoded->opcode); ASSERT_UINT_EQUALS(expected->payload_length, decoded->payload_length); ASSERT_UINT_EQUALS(expected->masking_key[0], decoded->masking_key[0]); ASSERT_UINT_EQUALS(expected->masking_key[1], decoded->masking_key[1]); ASSERT_UINT_EQUALS(expected->masking_key[2], decoded->masking_key[2]); ASSERT_UINT_EQUALS(expected->masking_key[3], decoded->masking_key[3]); return AWS_OP_SUCCESS; }; DECODER_TEST_CASE(websocket_decoder_sanity_check) { (void)ctx; struct decoder_tester tester; ASSERT_SUCCESS(s_decoder_tester_init(&tester, allocator)); ASSERT_SUCCESS(s_decoder_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } /* Test decoding simplest possible frame, no payload */ DECODER_TEST_CASE(websocket_decoder_simplest_frame) { (void)ctx; struct decoder_tester tester; ASSERT_SUCCESS(s_decoder_tester_init(&tester, allocator)); uint8_t input[] = { 0x89, // fin | rsv1 | rsv2 | rsv3 | 4bit opcode 0x00, // mask | 7bit payload len }; struct aws_websocket_frame expected_frame = { .fin = true, .opcode = 9, }; bool frame_complete; struct aws_byte_cursor input_cursor = aws_byte_cursor_from_array(input, sizeof(input)); ASSERT_SUCCESS(aws_websocket_decoder_process(&tester.decoder, &input_cursor, &frame_complete)); /* check result */ ASSERT_TRUE(frame_complete); ASSERT_UINT_EQUALS(1, tester.on_frame_count); ASSERT_UINT_EQUALS(0, tester.on_payload_count); ASSERT_UINT_EQUALS(0, tester.payload.len); ASSERT_UINT_EQUALS(0, input_cursor.len); ASSERT_SUCCESS(s_compare_frame(&expected_frame, &tester.frame)); ASSERT_SUCCESS(s_decoder_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } /* Test the 3 RSV bools */ DECODER_TEST_CASE(websocket_decoder_rsv) { (void)ctx; struct decoder_tester tester; ASSERT_SUCCESS(s_decoder_tester_init(&tester, allocator)); /* Test 3 times, each time with one RSV bool set */ for (int rsv = 0; rsv < 3; ++rsv) { uint8_t input[] = { 0x89, // fin | rsv1 | rsv2 | rsv3 | 4bit opcode 0x00, // mask | 7bit payload len }; /* Set the appropriate RSV */ /* the bit arithmetic is setup this way to avoid Conversion warnings from the compiler. */ input[0] |= (1 << (6 - rsv)); struct aws_websocket_frame expected_frame = { .fin = true, .opcode = 9, }; expected_frame.rsv[rsv] = true; bool frame_complete; struct aws_byte_cursor input_cursor = aws_byte_cursor_from_array(input, sizeof(input)); ASSERT_SUCCESS(aws_websocket_decoder_process(&tester.decoder, &input_cursor, &frame_complete)); /* check result */ ASSERT_TRUE(frame_complete); ASSERT_SUCCESS(s_compare_frame(&expected_frame, &tester.frame)); } ASSERT_SUCCESS(s_decoder_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } /* Test decoding a simple data frame, with a payload */ DECODER_TEST_CASE(websocket_decoder_data_frame) { (void)ctx; struct decoder_tester tester; ASSERT_SUCCESS(s_decoder_tester_init(&tester, allocator)); uint8_t input[] = { 0x82, /* fin | rsv1 | rsv2 | rsv3 | 4bit opcode */ 0x04, /* mask | 7bit payload len */ /* payload */ 0x00, 0x0F, 0xF0, 0xFF, }; const uint8_t expected_payload[] = {0x00, 0x0F, 0xF0, 0xFF}; struct aws_websocket_frame expected_frame = { .fin = true, .opcode = 2, .payload_length = sizeof(expected_payload), }; bool frame_complete; struct aws_byte_cursor input_cursor = aws_byte_cursor_from_array(input, sizeof(input)); ASSERT_SUCCESS(aws_websocket_decoder_process(&tester.decoder, &input_cursor, &frame_complete)); /* check result */ ASSERT_TRUE(frame_complete); ASSERT_UINT_EQUALS(1, tester.on_frame_count); ASSERT_UINT_EQUALS(1, tester.on_payload_count); ASSERT_UINT_EQUALS(0, input_cursor.len); ASSERT_SUCCESS(s_compare_frame(&expected_frame, &tester.frame)); struct aws_byte_cursor expected_cursor = aws_byte_cursor_from_array(expected_payload, sizeof(expected_payload)); ASSERT_TRUE(aws_byte_cursor_eq_byte_buf(&expected_cursor, &tester.payload)); ASSERT_SUCCESS(s_decoder_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } /* Test aws_websocket_decoder_process() returns at the end of each frame */ DECODER_TEST_CASE(websocket_decoder_stops_at_frame_end) { (void)ctx; struct decoder_tester tester; ASSERT_SUCCESS(s_decoder_tester_init(&tester, allocator)); uint8_t input[] = { 0x82, /* fin | rsv1 | rsv2 | rsv3 | 4bit opcode */ 0x04, /* mask | 7bit payload len */ /* payload */ 0x00, 0x0F, 0xF0, 0xFF, /* extra data that should not be processed */ 0x11, 0x22, }; bool frame_complete; struct aws_byte_cursor input_cursor = aws_byte_cursor_from_array(input, sizeof(input)); ASSERT_SUCCESS(aws_websocket_decoder_process(&tester.decoder, &input_cursor, &frame_complete)); /* check result */ ASSERT_TRUE(frame_complete); ASSERT_UINT_EQUALS(1, tester.on_frame_count); ASSERT_UINT_EQUALS(1, tester.on_payload_count); ASSERT_UINT_EQUALS(2, input_cursor.len); /* Check that there's data left over */ ASSERT_SUCCESS(s_decoder_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } /* Test a single frame masked text message */ DECODER_TEST_CASE(websocket_decoder_masking) { (void)ctx; struct decoder_tester tester; ASSERT_SUCCESS(s_decoder_tester_init(&tester, allocator)); /* Test from RFC-6545 Section 5.7 - Examples - A single-frame masked text message */ uint8_t input[] = { 0x81, /* fin | rsv1 | rsv2 | rsv3 | 4bit opcode */ 0x85, /* mask | 7bit payload len */ /* masking key */ 0x37, 0xfa, 0x21, 0x3d, /* payload */ 0x7f, 0x9f, 0x4d, 0x51, 0x58, }; const char *expected_payload = "Hello"; struct aws_websocket_frame expected_frame = { .fin = true, .opcode = 1, .masked = true, .masking_key = {0x37, 0xfa, 0x21, 0x3d}, .payload_length = strlen(expected_payload), }; bool frame_complete; struct aws_byte_cursor input_cursor = aws_byte_cursor_from_array(input, sizeof(input)); ASSERT_SUCCESS(aws_websocket_decoder_process(&tester.decoder, &input_cursor, &frame_complete)); /* check result */ ASSERT_TRUE(frame_complete); ASSERT_SUCCESS(s_compare_frame(&expected_frame, &tester.frame)); ASSERT_TRUE(aws_byte_buf_eq_c_str(&tester.payload, expected_payload)); ASSERT_SUCCESS(s_decoder_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } /* Test a data frame which uses the 2 byte extended-length encoding */ DECODER_TEST_CASE(websocket_decoder_extended_length_2byte) { (void)ctx; struct decoder_tester tester; ASSERT_SUCCESS(s_decoder_tester_init(&tester, allocator)); struct length_validity_pair { uint16_t len; bool valid; }; uint8_t input[4] = { 0x82, /* fin | rsv1 | rsv2 | rsv3 | 4bit opcode */ 0x7E, /* mask | 7bit payload len */ /* 2byte extended length... */ }; struct aws_websocket_frame expected_frame = { .fin = true, .opcode = 2, }; /* lengths greater than 125 should be encoded in 2 bytes */ struct length_validity_pair length_validity_pairs[] = { {0, false}, /* should use 7bit length encoding */ {1, false}, /* should use 7bit length encoding */ {125, false}, /* highest number for 7bit length encoding */ {126, true}, /* lowest number for 2byte extended length */ {127, true}, /* should be encoded in 2byte extended length */ {0x0100, true}, /* just another value for 2byte extended length */ {0xFFFF, true}, /* highest number for 2byte extended length */ }; for (size_t i = 0; i < AWS_ARRAY_SIZE(length_validity_pairs); ++i) { struct length_validity_pair pair_i = length_validity_pairs[i]; s_decoder_tester_reset(&tester); /* write extended-length to input buffer */ uint16_t network_num = aws_hton16(pair_i.len); memcpy(input + 2, &network_num, sizeof(network_num)); /* adapt expected_frame */ expected_frame.payload_length = pair_i.len; /* Process input (only sending non-payload portion of frame) */ bool frame_complete; struct aws_byte_cursor input_cursor = aws_byte_cursor_from_array(input, sizeof(input)); if (pair_i.valid) { ASSERT_SUCCESS(aws_websocket_decoder_process(&tester.decoder, &input_cursor, &frame_complete)); /* check result */ ASSERT_FALSE(frame_complete); ASSERT_UINT_EQUALS(0, input_cursor.len); ASSERT_UINT_EQUALS(1, tester.on_frame_count); ASSERT_UINT_EQUALS(0, tester.on_payload_count); ASSERT_SUCCESS(s_compare_frame(&expected_frame, &tester.frame)); } else { aws_raise_error(-1); /* overwrite last-error */ ASSERT_FAILS(aws_websocket_decoder_process(&tester.decoder, &input_cursor, &frame_complete)); ASSERT_INT_EQUALS(AWS_ERROR_HTTP_WEBSOCKET_PROTOCOL_ERROR, aws_last_error()); } } ASSERT_SUCCESS(s_decoder_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } DECODER_TEST_CASE(websocket_decoder_extended_length_8byte) { (void)ctx; struct decoder_tester tester; ASSERT_SUCCESS(s_decoder_tester_init(&tester, allocator)); struct length_validity_pair { uint64_t len; bool valid; }; uint8_t input[10] = { 0x82, /* fin | rsv1 | rsv2 | rsv3 | 4bit opcode */ 0x7F, /* mask | 7bit payload len */ /* 8byte extended length... */ }; struct aws_websocket_frame expected_frame = { .fin = true, .opcode = 2, }; /* 8byte lengths should require at least 2 bytes to encode, and the high-order bit should be 0 */ struct length_validity_pair length_validity_pairs[] = { {125, false}, /* highest number for 7bit length encoding */ {126, false}, /* lowest number for 2byte extended length */ {127, false}, /* should be encoded in 2byte extended length */ {0x0100, false}, /* just another value for 2byte extended length */ {0xFFFF, false}, /* highest number for 2byte extended length */ {0x0000000000010000, true}, /* lowest number for 8byte extended length */ {0x7FFFFFFFFFFFFFFF, true}, /* highest number for 8byte extended length */ {0x123456789ABCDEF0, true}, /* just another value for 8byte extended length */ {0x8000000000000000, false}, /* illegal use high bit in 8byte extended length */ {0xFFFFFFFFFFFFFFFF, false}, /* illegal use high bit in 8byte extended length */ }; for (size_t i = 0; i < AWS_ARRAY_SIZE(length_validity_pairs); ++i) { struct length_validity_pair pair_i = length_validity_pairs[i]; s_decoder_tester_reset(&tester); /* write extended-length to input buffer */ uint64_t network_num = aws_hton64(pair_i.len); memcpy(input + 2, &network_num, sizeof(network_num)); /* adapt expected_frame */ expected_frame.payload_length = pair_i.len; /* Process input (only sending non-payload portion of frame) */ bool frame_complete; struct aws_byte_cursor input_cursor = aws_byte_cursor_from_array(input, sizeof(input)); if (pair_i.valid) { ASSERT_SUCCESS(aws_websocket_decoder_process(&tester.decoder, &input_cursor, &frame_complete)); /* check result */ ASSERT_FALSE(frame_complete); ASSERT_UINT_EQUALS(0, input_cursor.len); ASSERT_UINT_EQUALS(1, tester.on_frame_count); ASSERT_UINT_EQUALS(0, tester.on_payload_count); ASSERT_SUCCESS(s_compare_frame(&expected_frame, &tester.frame)); } else { aws_raise_error(-1); /* overwrite last-error */ ASSERT_FAILS(aws_websocket_decoder_process(&tester.decoder, &input_cursor, &frame_complete)); ASSERT_INT_EQUALS(AWS_ERROR_HTTP_WEBSOCKET_PROTOCOL_ERROR, aws_last_error()); } } ASSERT_SUCCESS(s_decoder_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } /* Test that decoder can handle data that's split at any possible point */ DECODER_TEST_CASE(websocket_decoder_1byte_at_a_time) { (void)ctx; struct decoder_tester tester; ASSERT_SUCCESS(s_decoder_tester_init(&tester, allocator)); /* Use all optional frame features in this test (8byte extended payload length and masking-key). * Even though we say the payload is long, we're only going to send a portion of it in this test */ uint8_t input[] = { 0x81, /* fin | rsv1 | rsv2 | rsv3 | 4bit opcode */ 0xFF, /* mask | 7bit payload len */ /* 8byte extended payload len */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, /* masking key */ 0x37, 0xfa, 0x21, 0x3d, /* payload */ 0x7f, 0x9f, 0x4d, 0x51, 0x58, }; const char *expected_payload = "Hello"; struct aws_websocket_frame expected_frame = { .fin = true, .opcode = 1, .masked = true, .masking_key = {0x37, 0xfa, 0x21, 0x3d}, .payload_length = 0x10000, }; for (size_t i = 0; i < sizeof(input); ++i) { bool frame_complete; struct aws_byte_cursor input_cursor = aws_byte_cursor_from_array(input + i, 1); ASSERT_SUCCESS(aws_websocket_decoder_process(&tester.decoder, &input_cursor, &frame_complete)); ASSERT_FALSE(frame_complete); ASSERT_UINT_EQUALS(0, input_cursor.len); } /* check result */ ASSERT_UINT_EQUALS(1, tester.on_frame_count); ASSERT_UINT_EQUALS(5, tester.on_payload_count); ASSERT_SUCCESS(s_compare_frame(&expected_frame, &tester.frame)); ASSERT_TRUE(aws_byte_buf_eq_c_str(&tester.payload, expected_payload)); ASSERT_SUCCESS(s_decoder_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } DECODER_TEST_CASE(websocket_decoder_fail_on_unknown_opcode) { (void)ctx; struct decoder_tester tester; ASSERT_SUCCESS(s_decoder_tester_init(&tester, allocator)); uint8_t input[] = { 0x07, /* fin | rsv1 | rsv2 | rsv3 | 4bit opcode */ 0x00, /* mask | 7bit payload len */ }; bool frame_complete; struct aws_byte_cursor input_cursor = aws_byte_cursor_from_array(input, sizeof(input)); ASSERT_FAILS(aws_websocket_decoder_process(&tester.decoder, &input_cursor, &frame_complete)); ASSERT_INT_EQUALS(AWS_ERROR_HTTP_WEBSOCKET_PROTOCOL_ERROR, aws_last_error()); ASSERT_SUCCESS(s_decoder_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } /* Test fragmented messages, which arrive across multiple frames whose FIN bit is cleared */ DECODER_TEST_CASE(websocket_decoder_fragmented_message) { (void)ctx; struct decoder_tester tester; ASSERT_SUCCESS(s_decoder_tester_init(&tester, allocator)); uint8_t input[] = { /* TEXT FRAME */ 0x01, /* fin | rsv1 | rsv2 | rsv3 | 4bit opcode */ 0x03, /* mask | 7bit payload len */ 'h', 'o', 't', /* CONTINUATION FRAME */ 0x00, /* fin | rsv1 | rsv2 | rsv3 | 4bit opcode */ 0x02, /* mask | 7bit payload len */ 'd', 'o', /* PING FRAME - Control frames may be injected in the middle of a fragmented message. */ 0x89, /* fin | rsv1 | rsv2 | rsv3 | 4bit opcode */ 0x00, /* mask | 7bit payload len */ /* CONTINUATION FRAME */ 0x80, /* fin | rsv1 | rsv2 | rsv3 | 4bit opcode */ 0x01, /* mask | 7bit payload len */ 'g', }; struct aws_websocket_frame expected_frames[] = { { .fin = false, .opcode = 1, .payload_length = 3, }, { .fin = false, .opcode = 0, .payload_length = 2, }, { .fin = true, .opcode = 9, }, { .fin = true, .opcode = 0, .payload_length = 1, }, }; const char *expected_payload = "hotdog"; struct aws_byte_cursor input_cursor = aws_byte_cursor_from_array(input, sizeof(input)); for (size_t i = 0; i < AWS_ARRAY_SIZE(expected_frames); ++i) { bool frame_complete; ASSERT_SUCCESS(aws_websocket_decoder_process(&tester.decoder, &input_cursor, &frame_complete)); ASSERT_TRUE(frame_complete); ASSERT_UINT_EQUALS(i + 1, tester.on_frame_count); ASSERT_SUCCESS(s_compare_frame(&expected_frames[i], &tester.frame)); } ASSERT_TRUE(aws_byte_buf_eq_c_str(&tester.payload, expected_payload)); ASSERT_SUCCESS(s_decoder_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } DECODER_TEST_CASE(websocket_decoder_fail_on_bad_fragmentation) { (void)ctx; struct decoder_tester tester; ASSERT_SUCCESS(s_decoder_tester_init(&tester, allocator)); uint8_t input[] = { /* TEXT FRAME with FIN=0 */ 0x01, /* fin | rsv1 | rsv2 | rsv3 | 4bit opcode */ 0x01, /* mask | 7bit payload len */ 'a', /* TEXT FRAME - but ought to be a CONTINUATION frame */ 0x01, /* fin | rsv1 | rsv2 | rsv3 | 4bit opcode */ 0x01, /* mask | 7bit payload len */ 'b', }; bool frame_complete; struct aws_byte_cursor input_cursor = aws_byte_cursor_from_array(input, sizeof(input)); ASSERT_SUCCESS(aws_websocket_decoder_process(&tester.decoder, &input_cursor, &frame_complete)); ASSERT_FAILS(aws_websocket_decoder_process(&tester.decoder, &input_cursor, &frame_complete)); ASSERT_INT_EQUALS(AWS_ERROR_HTTP_WEBSOCKET_PROTOCOL_ERROR, aws_last_error()); ASSERT_SUCCESS(s_decoder_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } /* Control frames must have FIN bit set */ DECODER_TEST_CASE(websocket_decoder_control_frame_cannot_be_fragmented) { (void)ctx; struct decoder_tester tester; ASSERT_SUCCESS(s_decoder_tester_init(&tester, allocator)); uint8_t input[] = { 0x0A, // fin | rsv1 | rsv2 | rsv3 | 4bit opcode 0x00, // mask | 7bit payload len }; bool frame_complete; struct aws_byte_cursor input_cursor = aws_byte_cursor_from_array(input, sizeof(input)); ASSERT_FAILS(aws_websocket_decoder_process(&tester.decoder, &input_cursor, &frame_complete)); ASSERT_INT_EQUALS(AWS_ERROR_HTTP_WEBSOCKET_PROTOCOL_ERROR, aws_last_error()); ASSERT_SUCCESS(s_decoder_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } /* Test that we can process a TEXT frame with UTF-8 in it */ DECODER_TEST_CASE(websocket_decoder_utf8_text) { (void)ctx; struct decoder_tester tester; ASSERT_SUCCESS(s_decoder_tester_init(&tester, allocator)); uint8_t input[] = { /* TEXT FRAME */ 0x81, /* fin | rsv1 | rsv2 | rsv3 | 4bit opcode */ 0x04, /* mask | 7bit payload len */ /* payload - codepoint U+10348 as 4-byte UTF-8 */ 0xF0, 0x90, 0x8D, 0x88, }; struct aws_websocket_frame expected_frame = { .fin = true, .opcode = AWS_WEBSOCKET_OPCODE_TEXT, .payload_length = 4, }; const char *expected_payload = "\xF0\x90\x8D\x88"; bool frame_complete; struct aws_byte_cursor input_cursor = aws_byte_cursor_from_array(input, sizeof(input)); ASSERT_SUCCESS(aws_websocket_decoder_process(&tester.decoder, &input_cursor, &frame_complete)); /* check result */ ASSERT_TRUE(frame_complete); ASSERT_SUCCESS(s_compare_frame(&expected_frame, &tester.frame)); ASSERT_TRUE(aws_byte_buf_eq_c_str(&tester.payload, expected_payload)); ASSERT_SUCCESS(s_decoder_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } /* Test that a TEXT frame with invalid UTF-8 fails */ DECODER_TEST_CASE(websocket_decoder_fail_on_bad_utf8_text) { (void)ctx; struct decoder_tester tester; ASSERT_SUCCESS(s_decoder_tester_init(&tester, allocator)); { /* Test validation failing when it hits totally bad byte values */ uint8_t input[] = { /* TEXT FRAME */ 0x81, /* fin | rsv1 | rsv2 | rsv3 | 4bit opcode */ 0x01, /* mask | 7bit payload len */ /* payload - illegal UTF-8 value */ 0xFF, }; bool frame_complete; struct aws_byte_cursor input_cursor = aws_byte_cursor_from_array(input, sizeof(input)); ASSERT_FAILS(aws_websocket_decoder_process(&tester.decoder, &input_cursor, &frame_complete)); ASSERT_INT_EQUALS(AWS_ERROR_HTTP_WEBSOCKET_PROTOCOL_ERROR, aws_last_error()); } s_decoder_tester_reset(&tester); { /* Test validation failing at the end, due to a 4-byte codepoint missing 1 byte */ uint8_t input[] = { /* TEXT FRAME */ 0x81, /* fin | rsv1 | rsv2 | rsv3 | 4bit opcode */ 0x03, /* mask | 7bit payload len */ /* payload - codepoint U+10348 as 4-byte UTF-8, but missing 4th byte */ 0xF0, 0x90, 0x8D, /* 0x88, <-- missing 4th byte */ }; bool frame_complete; struct aws_byte_cursor input_cursor = aws_byte_cursor_from_array(input, sizeof(input)); ASSERT_FAILS(aws_websocket_decoder_process(&tester.decoder, &input_cursor, &frame_complete)); ASSERT_INT_EQUALS(AWS_ERROR_HTTP_WEBSOCKET_PROTOCOL_ERROR, aws_last_error()); } ASSERT_SUCCESS(s_decoder_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } /* Test that UTF-8 can be validated even if it's fragmented across frames */ DECODER_TEST_CASE(websocket_decoder_fragmented_utf8_text) { (void)ctx; struct decoder_tester tester; ASSERT_SUCCESS(s_decoder_tester_init(&tester, allocator)); /* Split a 4-byte UTF-8 codepoint across a fragmented message. * codepoint U+10348 is UTF-8 bytes: 0xF0, 0x90, 0x8D, 0x88 */ uint8_t input[] = { /* TEXT FRAME */ 0x01, /* fin | rsv1 | rsv2 | rsv3 | 4bit opcode */ 0x01, /* mask | 7bit payload len */ /* payload */ 0xF0, /* 1/4 UTF-8 bytes */ /* CONTINUATION FRAME */ 0x00, /* fin | rsv1 | rsv2 | rsv3 | 4bit opcode */ 0x02, /* mask | 7bit payload len */ /* payload */ 0x90, /* 2/4 UTF-8 bytes */ 0x8D, /* 3/4 UTF-8 bytes */ /* PING FRAME - Control frames may be injected in the middle of a fragmented message. */ 0x89, /* fin | rsv1 | rsv2 | rsv3 | 4bit opcode */ 0x01, /* mask | 7bit payload len */ /* payload - PING payload should not interfere with validation */ 0xFF, /* CONTINUATION FRAME */ 0x80, /* fin | rsv1 | rsv2 | rsv3 | 4bit opcode */ 0x01, /* mask | 7bit payload len */ /* payload */ 0x88, /* 4/4 UTF-8 bytes */ }; struct aws_websocket_frame expected_frames[] = { { .fin = false, .opcode = 1, .payload_length = 1, }, { .fin = false, .opcode = 0, .payload_length = 2, }, { .fin = true, .opcode = 9, .payload_length = 1, }, { .fin = true, .opcode = 0, .payload_length = 1, }, }; struct aws_byte_cursor input_cursor = aws_byte_cursor_from_array(input, sizeof(input)); for (size_t i = 0; i < AWS_ARRAY_SIZE(expected_frames); ++i) { bool frame_complete; ASSERT_SUCCESS(aws_websocket_decoder_process(&tester.decoder, &input_cursor, &frame_complete)); ASSERT_TRUE(frame_complete); ASSERT_UINT_EQUALS(i + 1, tester.on_frame_count); ASSERT_SUCCESS(s_compare_frame(&expected_frames[i], &tester.frame)); } ASSERT_UINT_EQUALS(0, input_cursor.len); ASSERT_SUCCESS(s_decoder_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } /* Test that UTF-8 validator works even when text is fragmented across multiple frames */ DECODER_TEST_CASE(websocket_decoder_fail_on_fragmented_bad_utf8_text) { (void)ctx; struct decoder_tester tester; ASSERT_SUCCESS(s_decoder_tester_init(&tester, allocator)); /* Split a 4-byte UTF-8 codepoint across a fragmented message, but omit he last byte. * codepoint U+10348 is UTF-8 bytes: 0xF0, 0x90, 0x8D, 0x88 */ uint8_t input[] = { /* TEXT FRAME */ 0x01, /* fin | rsv1 | rsv2 | rsv3 | 4bit opcode */ 0x01, /* mask | 7bit payload len */ /* payload */ 0xF0, /* 1/4 UTF-8 bytes */ /* CONTINUATION FRAME */ 0x00, /* fin | rsv1 | rsv2 | rsv3 | 4bit opcode */ 0x01, /* mask | 7bit payload len */ /* payload */ 0x90, /* 2/4 UTF-8 bytes */ /* PING FRAME - Control frames may be injected in the middle of a fragmented message. */ 0x89, /* fin | rsv1 | rsv2 | rsv3 | 4bit opcode */ 0x01, /* mask | 7bit payload len */ /* payload - PING payload shouldn't interfere with the TEXT's validation */ 0x8D, /* CONTINUATION FRAME */ 0x80, /* fin | rsv1 | rsv2 | rsv3 | 4bit opcode */ 0x01, /* mask | 7bit payload len */ /* payload */ 0x8D, /* 3/4 UTF-8 bytes */ /* 0x88, <-- MISSING 4/4 UTF-8 bytes */ }; bool frame_complete; struct aws_byte_cursor input_cursor = aws_byte_cursor_from_array(input, sizeof(input)); /* TEXT should pass */ ASSERT_SUCCESS(aws_websocket_decoder_process(&tester.decoder, &input_cursor, &frame_complete)); ASSERT_TRUE(frame_complete); /* CONTINUATION should pass */ ASSERT_SUCCESS(aws_websocket_decoder_process(&tester.decoder, &input_cursor, &frame_complete)); ASSERT_TRUE(frame_complete); /* PING should pass */ ASSERT_SUCCESS(aws_websocket_decoder_process(&tester.decoder, &input_cursor, &frame_complete)); ASSERT_TRUE(frame_complete); /* final CONTINUATION should fail because the message ended with an incomplete UTF-8 encoding */ ASSERT_FAILS(aws_websocket_decoder_process(&tester.decoder, &input_cursor, &frame_complete)); ASSERT_INT_EQUALS(AWS_ERROR_HTTP_WEBSOCKET_PROTOCOL_ERROR, aws_last_error()); ASSERT_SUCCESS(s_decoder_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } /* Test that an error from the on_frame callback fails the decoder */ DECODER_TEST_CASE(websocket_decoder_on_frame_callback_can_fail_decoder) { (void)ctx; struct decoder_tester tester; ASSERT_SUCCESS(s_decoder_tester_init(&tester, allocator)); uint8_t input[] = { 0x81, // fin | rsv1 | rsv2 | rsv3 | 4bit opcode 0x01, // mask | 7bit payload len 'a', }; tester.fail_on_nth_frame = 1; bool frame_complete; struct aws_byte_cursor input_cursor = aws_byte_cursor_from_array(input, sizeof(input)); ASSERT_FAILS(aws_websocket_decoder_process(&tester.decoder, &input_cursor, &frame_complete)); /* Check that error returned by callback bubbles up. * UNKNOWN error just happens to be what our test callback throws */ ASSERT_INT_EQUALS(AWS_ERROR_HTTP_UNKNOWN, aws_last_error()); ASSERT_SUCCESS(s_decoder_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } DECODER_TEST_CASE(websocket_decoder_on_payload_callback_can_fail_decoder) { (void)ctx; struct decoder_tester tester; ASSERT_SUCCESS(s_decoder_tester_init(&tester, allocator)); uint8_t input[] = { 0x81, // fin | rsv1 | rsv2 | rsv3 | 4bit opcode 0x01, // mask | 7bit payload len 'a', }; tester.fail_on_nth_payload = 1; bool frame_complete; struct aws_byte_cursor input_cursor = aws_byte_cursor_from_array(input, sizeof(input)); ASSERT_FAILS(aws_websocket_decoder_process(&tester.decoder, &input_cursor, &frame_complete)); /* Check that error returned by callback bubbles up. * UNKNOWN error just happens to be what our test callback throws */ ASSERT_INT_EQUALS(AWS_ERROR_HTTP_UNKNOWN, aws_last_error()); ASSERT_SUCCESS(s_decoder_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-http/tests/test_websocket_encoder.c000066400000000000000000000511131456575232400262420ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #define ENCODER_TEST_CASE(NAME) \ AWS_TEST_CASE(NAME, s_test_##NAME); \ static int s_test_##NAME(struct aws_allocator *allocator, void *ctx) struct encoder_tester { struct aws_allocator *alloc; struct aws_logger logger; struct aws_websocket_encoder encoder; /* payload to encode */ struct aws_byte_cursor payload; size_t on_payload_count; size_t fail_on_nth_payload; bool payload_length_is_wrong_on_purpose; uint8_t out_buf_storage[1024]; struct aws_byte_buf out_buf; }; static int s_on_payload(struct aws_byte_buf *out_buf, void *user_data) { struct encoder_tester *tester = user_data; tester->on_payload_count++; if (tester->fail_on_nth_payload == tester->on_payload_count) { return aws_raise_error(AWS_ERROR_UNKNOWN); } if (tester->payload.len > 0) { size_t space_available = out_buf->capacity - out_buf->len; size_t bytes_to_write = space_available < tester->payload.len ? space_available : tester->payload.len; if (!aws_byte_buf_write(out_buf, tester->payload.ptr, bytes_to_write)) { return aws_raise_error(AWS_ERROR_UNKNOWN); /* write shouldn't fail, but just in case */ } aws_byte_cursor_advance(&tester->payload, bytes_to_write); } else { if (!tester->payload_length_is_wrong_on_purpose) { return aws_raise_error(AWS_ERROR_UNKNOWN); /* encoder should have stopped asking for more payload */ } } return AWS_OP_SUCCESS; } static void s_encoder_tester_reset(struct encoder_tester *tester) { aws_websocket_encoder_init(&tester->encoder, s_on_payload, tester); tester->out_buf.len = 0; } static int s_encoder_tester_init(struct encoder_tester *tester, struct aws_allocator *alloc) { aws_http_library_init(alloc); AWS_ZERO_STRUCT(*tester); tester->alloc = alloc; struct aws_logger_standard_options logger_options = { .level = AWS_LOG_LEVEL_TRACE, .file = stderr, }; ASSERT_SUCCESS(aws_logger_init_standard(&tester->logger, tester->alloc, &logger_options)); aws_logger_set(&tester->logger); tester->out_buf = aws_byte_buf_from_empty_array(tester->out_buf_storage, sizeof(tester->out_buf_storage)); s_encoder_tester_reset(tester); return AWS_OP_SUCCESS; } static int s_encoder_tester_clean_up(struct encoder_tester *tester) { aws_http_library_clean_up(); aws_logger_clean_up(&tester->logger); return AWS_OP_SUCCESS; } static bool aws_byte_buf_eq_array(const struct aws_byte_buf *buf, const void *array, size_t array_len) { return aws_array_eq(buf->buffer, buf->len, array, array_len); } ENCODER_TEST_CASE(websocket_encoder_sanity_check) { (void)ctx; struct encoder_tester tester; ASSERT_SUCCESS(s_encoder_tester_init(&tester, allocator)); ASSERT_SUCCESS(s_encoder_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } /* Test encoding a frame with no payload or mask */ ENCODER_TEST_CASE(websocket_encoder_simplest_frame) { (void)ctx; struct encoder_tester tester; ASSERT_SUCCESS(s_encoder_tester_init(&tester, allocator)); struct aws_websocket_frame input_frame = { .fin = true, .opcode = 9, }; uint8_t expected_output[] = { 0x89, // fin | rsv1 | rsv2 | rsv3 | 4bit opcode 0x00, // mask | 7bit payload len }; ASSERT_SUCCESS(aws_websocket_encoder_start_frame(&tester.encoder, &input_frame)); ASSERT_SUCCESS(aws_websocket_encoder_process(&tester.encoder, &tester.out_buf)); ASSERT_FALSE(aws_websocket_encoder_is_frame_in_progress(&tester.encoder)); ASSERT_TRUE(aws_byte_buf_eq_array(&tester.out_buf, expected_output, sizeof(expected_output))); ASSERT_SUCCESS(s_encoder_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } /* Test the 3 RSV bools */ ENCODER_TEST_CASE(websocket_encoder_rsv) { (void)ctx; struct encoder_tester tester; ASSERT_SUCCESS(s_encoder_tester_init(&tester, allocator)); for (int rsv = 0; rsv < 3; ++rsv) { struct aws_websocket_frame input_frame = { .fin = true, .opcode = 9, }; input_frame.rsv[rsv] = true; uint8_t expected_output[] = { 0x89, // fin | rsv1 | rsv2 | rsv3 | 4bit opcode 0x00, // mask | 7bit payload len }; expected_output[0] |= (1 << (6 - rsv)); tester.out_buf.len = 0; /* reset output buffer */ ASSERT_SUCCESS(aws_websocket_encoder_start_frame(&tester.encoder, &input_frame)); ASSERT_SUCCESS(aws_websocket_encoder_process(&tester.encoder, &tester.out_buf)); ASSERT_FALSE(aws_websocket_encoder_is_frame_in_progress(&tester.encoder)); ASSERT_TRUE(aws_byte_buf_eq_array(&tester.out_buf, expected_output, sizeof(expected_output))); } ASSERT_SUCCESS(s_encoder_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } ENCODER_TEST_CASE(websocket_encoder_data_frame) { (void)ctx; struct encoder_tester tester; ASSERT_SUCCESS(s_encoder_tester_init(&tester, allocator)); uint8_t input_payload[] = {0x00, 0x0F, 0xF0, 0xFF}; tester.payload = aws_byte_cursor_from_array(input_payload, sizeof(input_payload)); struct aws_websocket_frame input_frame = { .fin = true, .opcode = 2, .payload_length = sizeof(input_payload), }; uint8_t expected_output[] = { 0x82, /* fin | rsv1 | rsv2 | rsv3 | 4bit opcode */ 0x04, /* mask | 7bit payload len */ /* payload */ 0x00, 0x0F, 0xF0, 0xFF, }; ASSERT_SUCCESS(aws_websocket_encoder_start_frame(&tester.encoder, &input_frame)); ASSERT_SUCCESS(aws_websocket_encoder_process(&tester.encoder, &tester.out_buf)); ASSERT_FALSE(aws_websocket_encoder_is_frame_in_progress(&tester.encoder)); ASSERT_TRUE(aws_byte_buf_eq_array(&tester.out_buf, expected_output, sizeof(expected_output))); ASSERT_SUCCESS(s_encoder_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } ENCODER_TEST_CASE(websocket_encoder_fail_if_payload_exceeds_stated_length) { (void)ctx; struct encoder_tester tester; ASSERT_SUCCESS(s_encoder_tester_init(&tester, allocator)); const struct aws_websocket_frame input_frame = { .fin = true, .opcode = 2, .payload_length = 4, }; const uint8_t input_payload[5] = {0}; tester.payload = aws_byte_cursor_from_array(input_payload, sizeof(input_payload)); ASSERT_SUCCESS(aws_websocket_encoder_start_frame(&tester.encoder, &input_frame)); ASSERT_FAILS(aws_websocket_encoder_process(&tester.encoder, &tester.out_buf)); ASSERT_INT_EQUALS(AWS_ERROR_HTTP_OUTGOING_STREAM_LENGTH_INCORRECT, aws_last_error()); ASSERT_SUCCESS(s_encoder_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } ENCODER_TEST_CASE(websocket_encoder_masking) { (void)ctx; struct encoder_tester tester; ASSERT_SUCCESS(s_encoder_tester_init(&tester, allocator)); /* Test from RFC-6545 Section 5.7 - Examples - A single-frame masked text message */ const char *input_payload = "Hello"; tester.payload = aws_byte_cursor_from_c_str(input_payload); struct aws_websocket_frame input_frame = { .fin = true, .opcode = 1, .masked = true, .masking_key = {0x37, 0xfa, 0x21, 0x3d}, .payload_length = strlen(input_payload), }; uint8_t expected_output[] = { 0x81, /* fin | rsv1 | rsv2 | rsv3 | 4bit opcode */ 0x85, /* mask | 7bit payload len */ /* masking key */ 0x37, 0xfa, 0x21, 0x3d, /* payload */ 0x7f, 0x9f, 0x4d, 0x51, 0x58, }; ASSERT_SUCCESS(aws_websocket_encoder_start_frame(&tester.encoder, &input_frame)); ASSERT_SUCCESS(aws_websocket_encoder_process(&tester.encoder, &tester.out_buf)); ASSERT_FALSE(aws_websocket_encoder_is_frame_in_progress(&tester.encoder)); ASSERT_TRUE(aws_byte_buf_eq_array(&tester.out_buf, expected_output, sizeof(expected_output))); ASSERT_SUCCESS(s_encoder_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } ENCODER_TEST_CASE(websocket_encoder_extended_length) { (void)ctx; struct encoder_tester tester; ASSERT_SUCCESS(s_encoder_tester_init(&tester, allocator)); enum length_type { LENGTH_IN_7BITS, LENGTH_IN_2BYTES, LENGTH_IN_8BYTES, LENGTH_ILLEGAL, }; struct actual_length_type_pair { uint64_t len; enum length_type type; }; struct actual_length_type_pair test_pairs[] = { {0, LENGTH_IN_7BITS}, {1, LENGTH_IN_7BITS}, {125, LENGTH_IN_7BITS}, /* highest number for 7bit length encoding */ {126, LENGTH_IN_2BYTES}, {127, LENGTH_IN_2BYTES}, {0x00FF, LENGTH_IN_2BYTES}, {0x0100, LENGTH_IN_2BYTES}, {0xFFFF, LENGTH_IN_2BYTES}, /* highest number for 2byte extended length */ {0x0000000000010000, LENGTH_IN_8BYTES}, {0x7FFFFFFFFFFFFFFF, LENGTH_IN_8BYTES}, {0x123456789ABCDEF0, LENGTH_IN_8BYTES}, {0x8000000000000000, LENGTH_ILLEGAL}, /* illegal to use high bit in 8byte extended length */ {0xFFFFFFFFFFFFFFFF, LENGTH_ILLEGAL}, }; for (size_t i = 0; i < AWS_ARRAY_SIZE(test_pairs); ++i) { struct actual_length_type_pair pair_i = test_pairs[i]; /* Reset encoder for each pair. */ s_encoder_tester_reset(&tester); /* Don't actually encode the payload, we're just testing the non-payload portion of the frame here */ tester.payload.len = 0; tester.payload_length_is_wrong_on_purpose = true; struct aws_websocket_frame input_frame = { .fin = true, .opcode = 2, .payload_length = pair_i.len, }; if (pair_i.type == LENGTH_ILLEGAL) { ASSERT_FAILS(aws_websocket_encoder_start_frame(&tester.encoder, &input_frame)); ASSERT_INT_EQUALS(AWS_ERROR_INVALID_ARGUMENT, aws_last_error()); } else { uint8_t expected_output_array[10]; struct aws_byte_buf expected_output = aws_byte_buf_from_empty_array(expected_output_array, sizeof(expected_output_array)); aws_byte_buf_write_u8(&expected_output, 0x82); /* fin | rsv1 | rsv2 | rsv3 | 4bit opcode */ switch (pair_i.type) { case LENGTH_IN_7BITS: aws_byte_buf_write_u8(&expected_output, (uint8_t)pair_i.len); /* 7bit length */ break; case LENGTH_IN_2BYTES: aws_byte_buf_write_u8(&expected_output, AWS_WEBSOCKET_7BIT_VALUE_FOR_2BYTE_EXTENDED_LENGTH); aws_byte_buf_write_be16(&expected_output, (uint16_t)pair_i.len); /* extended length */ break; default: aws_byte_buf_write_u8(&expected_output, AWS_WEBSOCKET_7BIT_VALUE_FOR_8BYTE_EXTENDED_LENGTH); aws_byte_buf_write_be64(&expected_output, pair_i.len); /* extended length */ break; } ASSERT_SUCCESS(aws_websocket_encoder_start_frame(&tester.encoder, &input_frame)); ASSERT_SUCCESS(aws_websocket_encoder_process(&tester.encoder, &tester.out_buf)); ASSERT_TRUE(aws_byte_buf_eq(&tester.out_buf, &expected_output)); } } ASSERT_SUCCESS(s_encoder_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } /* Ensure the encoder can handle outputing data across split buffers. * Best way I know is to output 1 byte at a time, that covers EVERY possible splitting point. */ ENCODER_TEST_CASE(websocket_encoder_1_byte_at_a_time) { (void)ctx; struct encoder_tester tester; ASSERT_SUCCESS(s_encoder_tester_init(&tester, allocator)); /* Use all optional frame features in this test (8byte extended payload length and masking-key). * Even though we say the payload is long, we're only going to send a portion of it in this test */ const char *input_payload = "Hello"; tester.payload = aws_byte_cursor_from_c_str(input_payload); tester.payload_length_is_wrong_on_purpose = true; const struct aws_websocket_frame input_frame = { .fin = true, .opcode = 1, .masked = true, .masking_key = {0x37, 0xfa, 0x21, 0x3d}, .payload_length = 0x0102030405060708, }; const uint8_t expected_output[] = { 0x81, /* fin | rsv1 | rsv2 | rsv3 | 4bit opcode */ 0xFF, /* mask | 7bit payload len */ /* 8byte extended payload len */ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, /* masking key */ 0x37, 0xfa, 0x21, 0x3d, /* payload */ 0x7f, 0x9f, 0x4d, 0x51, 0x58, }; ASSERT_SUCCESS(aws_websocket_encoder_start_frame(&tester.encoder, &input_frame)); for (size_t i = 0; i < sizeof(expected_output); ++i) { uint8_t one_sad_byte; struct aws_byte_buf one_sad_byte_buf = aws_byte_buf_from_empty_array(&one_sad_byte, 1); ASSERT_TRUE(aws_websocket_encoder_is_frame_in_progress(&tester.encoder)); ASSERT_SUCCESS(aws_websocket_encoder_process(&tester.encoder, &one_sad_byte_buf)); aws_byte_buf_write_from_whole_buffer(&tester.out_buf, one_sad_byte_buf); } ASSERT_TRUE(aws_byte_buf_eq_array(&tester.out_buf, expected_output, sizeof(expected_output))); ASSERT_SUCCESS(s_encoder_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } /* Test fragmented messages, which are sent via multiple frames whose FIN bit is cleared */ ENCODER_TEST_CASE(websocket_encoder_fragmented_message) { (void)ctx; struct encoder_tester tester; ASSERT_SUCCESS(s_encoder_tester_init(&tester, allocator)); struct frame_payload_pair { struct aws_websocket_frame frame; const char *payload; }; const struct frame_payload_pair input_pairs[] = { /* TEXT FRAME */ { { .fin = false, .opcode = 1, .payload_length = 3, }, "hot", }, { /* CONTINUATION FRAME */ { .fin = false, .opcode = 0, .payload_length = 2, }, "do", }, /* PING FRAME - Control frames may be injected in the middle of a fragmented message. */ { { .fin = true, .opcode = 9, }, "", }, /* CONTINUATION FRAME */ { { .fin = true, .opcode = 0, .payload_length = 1, }, "g", }, }; const uint8_t expected_output[] = { /* TEXT FRAME */ 0x01, /* fin | rsv1 | rsv2 | rsv3 | 4bit opcode */ 0x03, /* mask | 7bit payload len */ 'h', 'o', 't', /* CONTINUATION FRAME */ 0x00, /* fin | rsv1 | rsv2 | rsv3 | 4bit opcode */ 0x02, /* mask | 7bit payload len */ 'd', 'o', /* PING FRAME */ 0x89, /* fin | rsv1 | rsv2 | rsv3 | 4bit opcode */ 0x00, /* mask | 7bit payload len */ /* CONTINUATION FRAME */ 0x80, /* fin | rsv1 | rsv2 | rsv3 | 4bit opcode */ 0x01, /* mask | 7bit payload len */ 'g', }; for (size_t i = 0; i < AWS_ARRAY_SIZE(input_pairs); ++i) { const struct frame_payload_pair *pair_i = &input_pairs[i]; tester.payload = aws_byte_cursor_from_c_str(pair_i->payload); ASSERT_SUCCESS(aws_websocket_encoder_start_frame(&tester.encoder, &pair_i->frame)); ASSERT_SUCCESS(aws_websocket_encoder_process(&tester.encoder, &tester.out_buf)); ASSERT_FALSE(aws_websocket_encoder_is_frame_in_progress(&tester.encoder)); } ASSERT_TRUE(aws_byte_buf_eq_array(&tester.out_buf, expected_output, sizeof(expected_output))); ASSERT_SUCCESS(s_encoder_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } /* Test illegal sequences of fragmented (FIN bit is clear) frames */ ENCODER_TEST_CASE(websocket_encoder_fragmentation_failure_checks) { (void)ctx; struct encoder_tester tester; ASSERT_SUCCESS(s_encoder_tester_init(&tester, allocator)); struct aws_websocket_frame fragmented_control_frames[] = { { .fin = false, .opcode = AWS_WEBSOCKET_OPCODE_PING, }, }; struct aws_websocket_frame no_fin_bit_between_messages[] = { { .fin = false, .opcode = AWS_WEBSOCKET_OPCODE_TEXT, }, { .fin = true, .opcode = AWS_WEBSOCKET_OPCODE_TEXT, }, }; struct aws_websocket_frame no_fin_bit_between_messages2[] = { { .fin = false, .opcode = AWS_WEBSOCKET_OPCODE_TEXT, }, { .fin = false, .opcode = AWS_WEBSOCKET_OPCODE_CONTINUATION, }, { .fin = true, .opcode = AWS_WEBSOCKET_OPCODE_TEXT, }, }; struct aws_websocket_frame continuation_frame_without_preceding_data_frame[] = { { .fin = false, .opcode = AWS_WEBSOCKET_OPCODE_CONTINUATION, }, }; struct aws_websocket_frame continuation_frame_without_preceding_data_frame2[] = { { .fin = true, .opcode = AWS_WEBSOCKET_OPCODE_CONTINUATION, }, }; struct test_def { struct aws_websocket_frame *frames; size_t num_frames; int error_code; }; struct test_def test_defs[] = { { .frames = fragmented_control_frames, .num_frames = AWS_ARRAY_SIZE(fragmented_control_frames), .error_code = AWS_ERROR_INVALID_ARGUMENT, }, { .frames = no_fin_bit_between_messages, .num_frames = AWS_ARRAY_SIZE(no_fin_bit_between_messages), .error_code = AWS_ERROR_INVALID_STATE, }, { .frames = no_fin_bit_between_messages2, .num_frames = AWS_ARRAY_SIZE(no_fin_bit_between_messages2), .error_code = AWS_ERROR_INVALID_STATE, }, { .frames = continuation_frame_without_preceding_data_frame, .num_frames = AWS_ARRAY_SIZE(continuation_frame_without_preceding_data_frame), .error_code = AWS_ERROR_INVALID_STATE, }, { .frames = continuation_frame_without_preceding_data_frame2, .num_frames = AWS_ARRAY_SIZE(continuation_frame_without_preceding_data_frame2), .error_code = AWS_ERROR_INVALID_STATE, }, }; for (size_t i = 0; i < AWS_ARRAY_SIZE(test_defs); ++i) { struct test_def *test_i = &test_defs[i]; s_encoder_tester_reset(&tester); int err = 0; for (size_t frame_i = 0; frame_i < test_i->num_frames; ++frame_i) { /* We expect the encoder to fail at some point in this test. * Currently, fragmentation errors are detected in the frame_start() call */ err = aws_websocket_encoder_start_frame(&tester.encoder, &test_i->frames[frame_i]); if (err) { ASSERT_INT_EQUALS(test_i->error_code, aws_last_error()); /* Error code */ break; } ASSERT_SUCCESS(aws_websocket_encoder_process(&tester.encoder, &tester.out_buf)); ASSERT_FALSE(aws_websocket_encoder_is_frame_in_progress(&tester.encoder)); } /* Assert that test did fail at some point */ ASSERT_INT_EQUALS(AWS_OP_ERR, err); } ASSERT_SUCCESS(s_encoder_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } ENCODER_TEST_CASE(websocket_encoder_payload_callback_can_fail_encoder) { (void)ctx; struct encoder_tester tester; ASSERT_SUCCESS(s_encoder_tester_init(&tester, allocator)); const struct aws_websocket_frame input_frame = { .fin = true, .opcode = 2, .payload_length = 4, }; tester.fail_on_nth_payload = 1; ASSERT_SUCCESS(aws_websocket_encoder_start_frame(&tester.encoder, &input_frame)); ASSERT_FAILS(aws_websocket_encoder_process(&tester.encoder, &tester.out_buf)); /* Check that error returned by callback bubbles up. * UNKNOWN error just happens to be what our test callback throws */ ASSERT_INT_EQUALS(AWS_ERROR_UNKNOWN, aws_last_error()); ASSERT_SUCCESS(s_encoder_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-http/tests/test_websocket_handler.c000066400000000000000000002156351456575232400262530ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #ifdef _MSC_VER # pragma warning(disable : 4204) /* non-constant aggregate initializer */ #endif /* Use small window so that we can observe it opening in tests. * Channel may wait until the window is small before issuing the increment command. */ static const size_t s_default_initial_window_size = 256; #define TEST_CASE(NAME) \ AWS_TEST_CASE(NAME, s_test_##NAME); \ static int s_test_##NAME(struct aws_allocator *allocator, void *ctx) struct written_frame { struct aws_websocket_frame def; struct aws_byte_buf payload; bool is_complete; }; struct incoming_frame { struct aws_websocket_incoming_frame def; struct aws_byte_buf payload; size_t on_payload_count; int on_complete_error_code; bool has_begun; bool is_complete; }; static struct tester_options { bool manual_window_update; } s_tester_options; struct tester { struct aws_allocator *alloc; struct aws_logger logger; void *specific_test_data; struct testing_channel testing_channel; struct aws_websocket *websocket; bool is_midchannel_handler; size_t on_send_complete_count; /* To make the written output of the websocket-handler easier to check, * we translate the written bytes back into `written_frames` using a websocket-decoder. * We're not testing the decoder here, just using it as a tool (decoder tests go in test_websocket_decoder.c). */ struct written_frame written_frames[100]; size_t num_written_frames; size_t num_written_io_messages; struct aws_websocket_decoder written_frame_decoder; /* Frames reported via the websocket's on_incoming_frame callbacks are recorded here */ struct incoming_frame incoming_frames[100]; size_t num_incoming_frames; size_t fail_on_incoming_frame_begin_n; /* If set, return false on Nth incoming_frame_begin callback */ size_t fail_on_incoming_frame_payload_n; /* If set, return false on Nth incoming_frame_payload callback */ size_t fail_on_incoming_frame_complete_n; /* If set, return false on Nth incoming_frame_complete callback */ /* For pushing messages downstream, to be read by websocket handler. * readpush_frame is for tests to define websocket frames to be pushed downstream. * An encoder is used to turn these into proper bits */ struct readpush_frame *readpush_frames; size_t num_readpush_frames; size_t readpush_frame_index; struct aws_websocket_encoder readpush_encoder; /* For pushing messages upstream, to test a websocket that's been converted to midchannel handler. */ size_t num_writepush_messages; struct aws_byte_buf all_writepush_data; /* All data that's been writepushed, concatenated together */ }; /* Helps track the progress of a frame being sent. */ struct send_tester { struct aws_websocket_send_frame_options def; /* some properties are autoconfigured */ struct aws_byte_cursor payload; size_t delay_ticks; /* Don't send anything the first N ticks */ size_t bytes_per_tick; /* Don't send more than N bytes per tick */ size_t send_wrong_payload_amount; /* Everything below this line is auto-configured */ struct tester *owner; struct aws_byte_cursor cursor; /* iterates as payload is written */ size_t on_payload_count; size_t fail_on_nth_payload; /* If set, returns false on Nth callback (1 is first callback)*/ size_t on_complete_count; size_t on_complete_order; /* Order that frame sent, amongst all frames sent this test */ int on_complete_error_code; bool fail_on_complete; /* If true, return false from on_complete callback */ }; struct readpush_frame { struct aws_websocket_frame def; struct aws_byte_cursor payload; /* Everything below this is auto-configured */ struct aws_byte_cursor cursor; /* advances as payload is written */ }; /* Run loop that keeps the websocket-handler chugging. We need this because: * 1) The websocket-handler won't write the next aws_io_message until the preceding one is processed. * 2) The websocket-handler won't finish shutdown until it can write a CLOSE frame. * * Repeat until no more work is being done: * - Drain task queue. * - Decode written aws_io_messages from raw bytes into tester->written_frames[]. * - Mark aws_io_messages completed. */ static int s_drain_written_messages(struct tester *tester) { struct aws_linked_list *io_msgs = testing_channel_get_written_message_queue(&tester->testing_channel); bool still_draining; do { still_draining = false; testing_channel_drain_queued_tasks(&tester->testing_channel); while (!aws_linked_list_empty(io_msgs)) { struct aws_linked_list_node *node = aws_linked_list_pop_front(io_msgs); struct aws_io_message *msg = AWS_CONTAINER_OF(node, struct aws_io_message, queueing_handle); tester->num_written_io_messages++; struct aws_byte_cursor msg_cursor = aws_byte_cursor_from_buf(&msg->message_data); while (msg_cursor.len) { /* Make sure our arbitrarily sized buffer hasn't overflowed. */ ASSERT_TRUE(tester->num_written_frames < AWS_ARRAY_SIZE(tester->written_frames)); bool frame_complete; ASSERT_SUCCESS( aws_websocket_decoder_process(&tester->written_frame_decoder, &msg_cursor, &frame_complete)); if (frame_complete) { tester->written_frames[tester->num_written_frames].is_complete = true; tester->num_written_frames++; } } if (msg->on_completion) { msg->on_completion(tester->testing_channel.channel, msg, 0, msg->user_data); still_draining = true; } aws_mem_release(msg->allocator, msg); } } while (still_draining); return AWS_OP_SUCCESS; } static int s_on_written_frame(const struct aws_websocket_frame *frame, void *user_data) { struct tester *tester = user_data; struct written_frame *written = &tester->written_frames[tester->num_written_frames]; written->def = *frame; if (frame->payload_length) { AWS_FATAL_ASSERT(frame->payload_length <= SIZE_MAX); ASSERT_SUCCESS(aws_byte_buf_init(&written->payload, tester->alloc, (size_t)frame->payload_length)); } return AWS_OP_SUCCESS; } static int s_on_written_frame_payload(struct aws_byte_cursor data, void *user_data) { struct tester *tester = user_data; struct written_frame *written = &tester->written_frames[tester->num_written_frames]; ASSERT_TRUE(aws_byte_buf_write_from_whole_cursor(&written->payload, data)); return AWS_OP_SUCCESS; } static bool s_on_incoming_frame_begin( struct aws_websocket *websocket, const struct aws_websocket_incoming_frame *frame, void *user_data) { (void)websocket; struct tester *tester = user_data; /* Make sure our arbitrarily-sized testing buffer hasn't overflowed */ AWS_FATAL_ASSERT(tester->num_incoming_frames < AWS_ARRAY_SIZE(tester->incoming_frames)); if (tester->num_incoming_frames > 0) { /* Make sure previous frame was marked complete */ AWS_FATAL_ASSERT(tester->incoming_frames[tester->num_incoming_frames - 1].is_complete); } struct incoming_frame *incoming_frame = &tester->incoming_frames[tester->num_incoming_frames]; AWS_FATAL_ASSERT(!incoming_frame->has_begun); incoming_frame->has_begun = true; incoming_frame->def = *frame; AWS_FATAL_ASSERT(frame->payload_length <= SIZE_MAX); int err = aws_byte_buf_init(&incoming_frame->payload, tester->alloc, (size_t)frame->payload_length); AWS_FATAL_ASSERT(!err); if (tester->fail_on_incoming_frame_begin_n) { AWS_FATAL_ASSERT(tester->num_incoming_frames < tester->fail_on_incoming_frame_begin_n); if ((tester->num_incoming_frames + 1) == tester->fail_on_incoming_frame_begin_n) { return false; } } return true; } static bool s_on_incoming_frame_payload( struct aws_websocket *websocket, const struct aws_websocket_incoming_frame *frame, struct aws_byte_cursor data, void *user_data) { (void)websocket; (void)frame; struct tester *tester = user_data; struct incoming_frame *incoming_frame = &tester->incoming_frames[tester->num_incoming_frames]; AWS_FATAL_ASSERT(incoming_frame->has_begun); AWS_FATAL_ASSERT(!incoming_frame->is_complete); /* buffer was allocated to exact payload length, so write should succeed */ AWS_FATAL_ASSERT(aws_byte_buf_write_from_whole_cursor(&incoming_frame->payload, data)); incoming_frame->on_payload_count++; if (tester->fail_on_incoming_frame_payload_n) { AWS_FATAL_ASSERT(incoming_frame->on_payload_count <= tester->fail_on_incoming_frame_payload_n); if (incoming_frame->on_payload_count == tester->fail_on_incoming_frame_payload_n) { return false; } } return true; } static bool s_on_incoming_frame_complete( struct aws_websocket *websocket, const struct aws_websocket_incoming_frame *frame, int error_code, void *user_data) { (void)websocket; (void)frame; struct tester *tester = user_data; struct incoming_frame *incoming_frame = &tester->incoming_frames[tester->num_incoming_frames++]; AWS_FATAL_ASSERT(incoming_frame->has_begun); AWS_FATAL_ASSERT(!incoming_frame->is_complete); incoming_frame->is_complete = true; incoming_frame->on_complete_error_code = error_code; if (error_code == AWS_ERROR_SUCCESS) { AWS_FATAL_ASSERT(incoming_frame->payload.len == incoming_frame->def.payload_length); } if (tester->fail_on_incoming_frame_complete_n) { AWS_FATAL_ASSERT(tester->num_incoming_frames <= tester->fail_on_incoming_frame_complete_n); if (tester->num_incoming_frames == tester->fail_on_incoming_frame_complete_n) { return false; } } return true; } static void s_set_readpush_frames(struct tester *tester, struct readpush_frame *frames, size_t num_frames) { tester->readpush_frames = frames; tester->num_readpush_frames = num_frames; tester->readpush_frame_index = 0; for (size_t i = 0; i < num_frames; ++i) { struct readpush_frame *frame = &frames[i]; frame->cursor = frame->payload; frame->def.payload_length = frame->payload.len; } } static int s_stream_readpush_payload(struct aws_byte_buf *out_buf, void *user_data) { struct tester *tester = user_data; struct readpush_frame *frame = &tester->readpush_frames[tester->readpush_frame_index]; size_t available_bytes = out_buf->capacity - out_buf->len; size_t sending_bytes = available_bytes < frame->cursor.len ? available_bytes : frame->cursor.len; struct aws_byte_cursor sending_cursor = aws_byte_cursor_advance(&frame->cursor, sending_bytes); AWS_FATAL_ASSERT(sending_cursor.len > 0); ASSERT_TRUE(aws_byte_buf_write_from_whole_cursor(out_buf, sending_cursor)); return AWS_OP_SUCCESS; } /* Options for pushing readpush_frames. Anything set to 0 is treated as "unlimited" */ struct readpush_options { size_t num_frames; /* Stop after pushing this many frames. */ size_t num_bytes; /* Stop after pushing this many total bytes of aws_io_messages */ size_t num_messages; /* Stop after pushing this many aws_io_messages */ size_t message_size; /* Force fragmentation by limiting amount packed into each aws_io_message */ }; /* Encode readpush_frames into aws_io_messages and push those to websocket-handler. */ static int s_do_readpush(struct tester *tester, struct readpush_options options) { const size_t max_frames = options.num_frames ? options.num_frames : SIZE_MAX; const size_t max_bytes = options.num_bytes ? options.num_bytes : SIZE_MAX; const size_t max_messages = options.num_messages ? options.num_messages : SIZE_MAX; const size_t message_size = options.message_size ? options.message_size : (16 * 1024); size_t sum_frames = 0; size_t sum_bytes = 0; size_t sum_messages = 0; bool done = tester->readpush_frame_index >= tester->num_readpush_frames; while (!done) { size_t remaining_bytes = max_bytes - sum_bytes; size_t request_bytes = remaining_bytes < message_size ? remaining_bytes : message_size; struct aws_io_message *msg = aws_channel_acquire_message_from_pool( tester->testing_channel.channel, AWS_IO_MESSAGE_APPLICATION_DATA, request_bytes); ASSERT_NOT_NULL(msg); while (!done && (msg->message_data.len < msg->message_data.capacity)) { if (!aws_websocket_encoder_is_frame_in_progress(&tester->readpush_encoder)) { ASSERT_SUCCESS(aws_websocket_encoder_start_frame( &tester->readpush_encoder, &tester->readpush_frames[tester->readpush_frame_index].def)); } ASSERT_SUCCESS(aws_websocket_encoder_process(&tester->readpush_encoder, &msg->message_data)); if (aws_websocket_encoder_is_frame_in_progress(&tester->readpush_encoder)) { /* This function doesn't expect encoder to stop until frame is done or buffer is full */ ASSERT_UINT_EQUALS(msg->message_data.len, msg->message_data.capacity); } else { /* Frame done */ if (++tester->readpush_frame_index >= tester->num_readpush_frames) { done = true; } if (++sum_frames >= max_frames) { done = true; } } } sum_bytes += msg->message_data.len; if (sum_bytes >= max_bytes) { done = true; } if (++sum_messages >= max_messages) { done = true; } ASSERT_SUCCESS(testing_channel_push_read_message(&tester->testing_channel, msg)); } return AWS_OP_SUCCESS; } static int s_do_readpush_all(struct tester *tester) { struct readpush_options options; AWS_ZERO_STRUCT(options); return s_do_readpush(tester, options); } /* Check that a readpush_frame was received by websocket */ static int s_readpush_check(struct tester *tester, size_t frame_i, int expected_error_code) { ASSERT_TRUE(frame_i < tester->num_readpush_frames); struct readpush_frame *pushed = &tester->readpush_frames[frame_i]; struct incoming_frame *received = &tester->incoming_frames[frame_i]; ASSERT_TRUE(received->has_begun); ASSERT_TRUE(received->is_complete); ASSERT_INT_EQUALS(expected_error_code, received->on_complete_error_code); ASSERT_UINT_EQUALS(pushed->def.payload_length, received->def.payload_length); ASSERT_UINT_EQUALS(pushed->def.opcode, received->def.opcode); ASSERT_INT_EQUALS(pushed->def.fin, received->def.fin); if (received->on_complete_error_code == AWS_ERROR_SUCCESS) { ASSERT_UINT_EQUALS(received->def.payload_length, received->payload.len); ASSERT_TRUE(aws_byte_cursor_eq_byte_buf(&pushed->payload, &received->payload)); } return AWS_OP_SUCCESS; } /* Check that a readpush_frame's payload was passed to the next handler downstream */ static int s_readpush_midchannel_check(struct tester *tester, size_t frame_i) { ASSERT_TRUE(frame_i < tester->num_readpush_frames); struct readpush_frame *pushed = &tester->readpush_frames[frame_i]; struct aws_byte_cursor payload = pushed->payload; struct aws_linked_list *downstream_messages = testing_channel_get_read_message_queue(&tester->testing_channel); while (payload.len > 0) { ASSERT_FALSE(aws_linked_list_empty(downstream_messages)); struct aws_linked_list_node *message_node = aws_linked_list_front(downstream_messages); struct aws_io_message *message = AWS_CONTAINER_OF(message_node, struct aws_io_message, queueing_handle); /* This function might be called multiple times, the copy_mark is used to track where the last check ended */ size_t message_remainder = message->message_data.len - message->copy_mark; size_t compare_bytes = message_remainder < payload.len ? message_remainder : payload.len; struct aws_byte_cursor message_chunk = aws_byte_cursor_from_array(message->message_data.buffer + message->copy_mark, compare_bytes); struct aws_byte_cursor payload_chunk = aws_byte_cursor_advance(&payload, compare_bytes); ASSERT_TRUE(aws_byte_cursor_eq(&message_chunk, &payload_chunk)); message->copy_mark += compare_bytes; if (message->copy_mark == message->message_data.len) { aws_linked_list_pop_front(downstream_messages); aws_mem_release(message->allocator, message); } } return AWS_OP_SUCCESS; } static int s_writepush(struct tester *tester, struct aws_byte_cursor data) { if (!tester->all_writepush_data.allocator) { ASSERT_SUCCESS(aws_byte_buf_init(&tester->all_writepush_data, tester->alloc, data.len)); } while (data.len) { /* Ask for slightly more data than we need so that capacity != length. * This is to repro a bug where capacity and length were confused */ size_t size_hint = data.len + 1; struct aws_io_message *msg = aws_channel_acquire_message_from_pool( tester->testing_channel.channel, AWS_IO_MESSAGE_APPLICATION_DATA, size_hint); ASSERT_NOT_NULL(msg); size_t chunk_size = msg->message_data.capacity < data.len ? msg->message_data.capacity : data.len; struct aws_byte_cursor chunk = aws_byte_cursor_advance(&data, chunk_size); ASSERT_NOT_NULL(chunk.ptr); ASSERT_TRUE(aws_byte_buf_write_from_whole_cursor(&msg->message_data, chunk)); ASSERT_SUCCESS(testing_channel_push_write_message(&tester->testing_channel, msg)); /* Update tracking data in tester */ tester->num_writepush_messages++; ASSERT_SUCCESS(aws_byte_buf_append_dynamic(&tester->all_writepush_data, &chunk)); } return AWS_OP_SUCCESS; } /* Scan all written_frames, and ensure that payloads of the binary frames match data */ static int s_writepush_check(struct tester *tester, size_t ignore_n_written_frames) { struct aws_byte_cursor expected_cursor = aws_byte_cursor_from_buf(&tester->all_writepush_data); for (size_t i = ignore_n_written_frames; i < tester->num_written_frames; ++i) { struct written_frame *frame_i = &tester->written_frames[i]; if (aws_websocket_is_data_frame(frame_i->def.opcode)) { ASSERT_UINT_EQUALS(AWS_WEBSOCKET_OPCODE_BINARY, frame_i->def.opcode); struct aws_byte_cursor expected_i = aws_byte_cursor_advance(&expected_cursor, (size_t)frame_i->def.payload_length); ASSERT_TRUE(expected_i.len > 0); ASSERT_TRUE(aws_byte_cursor_eq_byte_buf(&expected_i, &frame_i->payload)); } } ASSERT_UINT_EQUALS(0, expected_cursor.len); return AWS_OP_SUCCESS; } static int s_tester_init(struct tester *tester, struct aws_allocator *alloc) { aws_http_library_init(alloc); AWS_ZERO_STRUCT(*tester); tester->alloc = alloc; struct aws_logger_standard_options logger_options = { .level = AWS_LOG_LEVEL_TRACE, .file = stderr, }; ASSERT_SUCCESS(aws_logger_init_standard(&tester->logger, tester->alloc, &logger_options)); aws_logger_set(&tester->logger); struct aws_testing_channel_options test_channel_options = {.clock_fn = aws_high_res_clock_get_ticks}; ASSERT_SUCCESS(testing_channel_init(&tester->testing_channel, alloc, &test_channel_options)); struct aws_websocket_handler_options ws_options = { .allocator = alloc, .channel = tester->testing_channel.channel, .initial_window_size = s_default_initial_window_size, .user_data = tester, .on_incoming_frame_begin = s_on_incoming_frame_begin, .on_incoming_frame_payload = s_on_incoming_frame_payload, .on_incoming_frame_complete = s_on_incoming_frame_complete, .manual_window_update = s_tester_options.manual_window_update, }; tester->websocket = aws_websocket_handler_new(&ws_options); ASSERT_NOT_NULL(tester->websocket); testing_channel_drain_queued_tasks(&tester->testing_channel); aws_websocket_decoder_init( &tester->written_frame_decoder, alloc, s_on_written_frame, s_on_written_frame_payload, tester); aws_websocket_encoder_init(&tester->readpush_encoder, s_stream_readpush_payload, tester); return AWS_OP_SUCCESS; } static int s_tester_clean_up(struct tester *tester) { aws_websocket_release(tester->websocket); ASSERT_SUCCESS(s_drain_written_messages(tester)); ASSERT_SUCCESS(testing_channel_clean_up(&tester->testing_channel)); for (size_t i = 0; i < AWS_ARRAY_SIZE(tester->written_frames); ++i) { aws_byte_buf_clean_up(&tester->written_frames[i].payload); } for (size_t i = 0; i < AWS_ARRAY_SIZE(tester->incoming_frames); ++i) { aws_byte_buf_clean_up(&tester->incoming_frames[i].payload); } aws_byte_buf_clean_up(&tester->all_writepush_data); aws_websocket_decoder_clean_up(&tester->written_frame_decoder); aws_http_library_clean_up(); aws_logger_clean_up(&tester->logger); return AWS_OP_SUCCESS; } static int s_install_downstream_handler(struct tester *tester, size_t initial_window) { ASSERT_SUCCESS(aws_websocket_convert_to_midchannel_handler(tester->websocket)); tester->is_midchannel_handler = true; ASSERT_SUCCESS(testing_channel_install_downstream_handler(&tester->testing_channel, initial_window)); testing_channel_drain_queued_tasks(&tester->testing_channel); return AWS_OP_SUCCESS; } static bool s_on_stream_outgoing_payload( struct aws_websocket *websocket, struct aws_byte_buf *out_buf, void *user_data) { struct send_tester *send_tester = user_data; AWS_FATAL_ASSERT(websocket == send_tester->owner->websocket); /* If user wants frame to break websocket, write an extra byte */ if (send_tester->send_wrong_payload_amount && (send_tester->on_payload_count == 0)) { aws_byte_buf_write_u8(out_buf, 'X'); } send_tester->on_payload_count++; size_t space_available = out_buf->capacity - out_buf->len; size_t bytes_max = send_tester->cursor.len; if (send_tester->delay_ticks > 0) { bytes_max = 0; send_tester->delay_ticks--; } else if (send_tester->bytes_per_tick > 0) { bytes_max = bytes_max < send_tester->bytes_per_tick ? bytes_max : send_tester->bytes_per_tick; } size_t amount_to_send = bytes_max < space_available ? bytes_max : space_available; struct aws_byte_cursor send_cursor = aws_byte_cursor_advance(&send_tester->cursor, amount_to_send); if (send_cursor.len) { aws_byte_buf_write_from_whole_cursor(out_buf, send_cursor); } if (send_tester->fail_on_nth_payload) { AWS_FATAL_ASSERT(send_tester->on_payload_count <= send_tester->fail_on_nth_payload); if (send_tester->on_payload_count == send_tester->fail_on_nth_payload) { return false; } } return true; } static void s_on_outgoing_frame_complete(struct aws_websocket *websocket, int error_code, void *user_data) { struct send_tester *send_tester = user_data; AWS_FATAL_ASSERT(websocket == send_tester->owner->websocket); send_tester->on_complete_error_code = error_code; AWS_FATAL_ASSERT(send_tester->on_complete_count == 0); send_tester->on_complete_count++; send_tester->on_complete_order = send_tester->owner->on_send_complete_count; send_tester->owner->on_send_complete_count++; } static int s_send_frame_ex(struct tester *tester, struct send_tester *send_tester, bool assert_on_error) { send_tester->owner = tester; send_tester->cursor = send_tester->payload; send_tester->def.payload_length = send_tester->payload.len; send_tester->def.stream_outgoing_payload = s_on_stream_outgoing_payload; send_tester->def.on_complete = s_on_outgoing_frame_complete; send_tester->def.user_data = send_tester; if (assert_on_error) { ASSERT_SUCCESS(aws_websocket_send_frame(tester->websocket, &send_tester->def)); return AWS_OP_SUCCESS; } else { return aws_websocket_send_frame(tester->websocket, &send_tester->def); } } static int s_send_frame(struct tester *tester, struct send_tester *send_tester) { return s_send_frame_ex(tester, send_tester, true); } static int s_send_frame_no_assert(struct tester *tester, struct send_tester *send_tester) { return s_send_frame_ex(tester, send_tester, false); } static int s_check_written_message(struct send_tester *send, size_t expected_order) { struct tester *tester = send->owner; ASSERT_UINT_EQUALS(1, send->on_complete_count); ASSERT_UINT_EQUALS(expected_order, send->on_complete_order); ASSERT_TRUE(expected_order < tester->num_written_frames); struct written_frame *written = &tester->written_frames[expected_order]; ASSERT_TRUE(written->is_complete); ASSERT_UINT_EQUALS(send->def.opcode, written->def.opcode); ASSERT_UINT_EQUALS(send->def.payload_length, written->def.payload_length); ASSERT_INT_EQUALS(send->def.fin, written->def.fin); /* All payloads sent from client should have been masked (assuming client is being tested here) */ ASSERT_TRUE(written->def.masked); if (written->def.masked) { bool valid_masking_key = false; for (int i = 0; i < 4; i++) { if (written->def.masking_key[i]) { valid_masking_key = true; } } ASSERT_TRUE(valid_masking_key); } /* If payload was masked, decoder already unmasked it for us, so we can directly compare contents here */ ASSERT_TRUE(aws_byte_cursor_eq_byte_buf(&send->payload, &written->payload)); return AWS_OP_SUCCESS; } TEST_CASE(websocket_handler_sanity_check) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } TEST_CASE(websocket_handler_refcounting) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); /* acquire() and then release() a refcount. The websocket should not shut down yet */ ASSERT_PTR_EQUALS(tester.websocket, aws_websocket_acquire(tester.websocket)); aws_websocket_release(tester.websocket); testing_channel_drain_queued_tasks(&tester.testing_channel); ASSERT_FALSE(tester.testing_channel.channel_shutdown_completed); /* should be safe to call release() on NULL */ aws_websocket_release(NULL); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } static int s_websocket_handler_send_frame_common(struct aws_allocator *allocator, bool on_thread) { struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); const char *payload = "Shall I come in to cut off your threads?"; struct send_tester send = { .payload = aws_byte_cursor_from_c_str(payload), .def = { .opcode = AWS_WEBSOCKET_OPCODE_PING, .fin = true, }, }; testing_channel_set_is_on_users_thread(&tester.testing_channel, on_thread); ASSERT_SUCCESS(s_send_frame(&tester, &send)); testing_channel_set_is_on_users_thread(&tester.testing_channel, true); ASSERT_SUCCESS(s_drain_written_messages(&tester)); ASSERT_SUCCESS(s_check_written_message(&send, 0)); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } TEST_CASE(websocket_handler_send_frame) { (void)ctx; return s_websocket_handler_send_frame_common(allocator, true); } TEST_CASE(websocket_handler_send_frame_off_thread) { (void)ctx; return s_websocket_handler_send_frame_common(allocator, false); } TEST_CASE(websocket_handler_send_multiple_frames) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); struct send_tester sending[] = { { .payload = aws_byte_cursor_from_c_str("Wee Willie Winkie runs through the town."), .def = { .opcode = AWS_WEBSOCKET_OPCODE_TEXT, .fin = false, }, }, { .payload = aws_byte_cursor_from_c_str("Upstairs and downstairs in his nightgown."), .def = { .opcode = AWS_WEBSOCKET_OPCODE_CONTINUATION, .fin = false, }, }, { .payload = aws_byte_cursor_from_c_str("Rapping at the window, crying through the lock."), .def = { .opcode = AWS_WEBSOCKET_OPCODE_CONTINUATION, .fin = false, }, }, { .def = { .opcode = AWS_WEBSOCKET_OPCODE_PING, .fin = true, }, }, { .payload = aws_byte_cursor_from_c_str("Are the children all in bed, for now it's eight o'clock?"), .def = { .opcode = AWS_WEBSOCKET_OPCODE_CONTINUATION, .fin = true, }, }, }; for (size_t i = 0; i < AWS_ARRAY_SIZE(sending); ++i) { ASSERT_SUCCESS(s_send_frame(&tester, &sending[i])); } ASSERT_SUCCESS(s_drain_written_messages(&tester)); for (size_t i = 0; i < AWS_ARRAY_SIZE(sending); ++i) { ASSERT_SUCCESS(s_check_written_message(&sending[i], i)); } ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } TEST_CASE(websocket_handler_send_huge_frame) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); /* transmit giant buffer with random contents */ struct aws_byte_buf giant_buf; ASSERT_SUCCESS(aws_byte_buf_init(&giant_buf, allocator, 100000)); while (aws_byte_buf_write_be32(&giant_buf, (uint32_t)rand())) { } while (aws_byte_buf_write_u8(&giant_buf, (uint8_t)rand())) { } struct send_tester sending[] = { { .payload = aws_byte_cursor_from_c_str("Little frame before big one."), .def = { .opcode = AWS_WEBSOCKET_OPCODE_BINARY, .fin = false, }, }, { .payload = aws_byte_cursor_from_buf(&giant_buf), .def = { .opcode = AWS_WEBSOCKET_OPCODE_CONTINUATION, .fin = false, }, }, { .payload = aws_byte_cursor_from_c_str("Little frame after big one."), .def = { .opcode = AWS_WEBSOCKET_OPCODE_CONTINUATION, .fin = true, }, }, }; for (size_t i = 0; i < AWS_ARRAY_SIZE(sending); ++i) { ASSERT_SUCCESS(s_send_frame(&tester, &sending[i])); } ASSERT_SUCCESS(s_drain_written_messages(&tester)); for (size_t i = 0; i < AWS_ARRAY_SIZE(sending); ++i) { ASSERT_SUCCESS(s_check_written_message(&sending[i], i)); } /* Ensure this was actually big enough to be split across aws_io_messages */ ASSERT_TRUE(sending[1].on_payload_count > 1); aws_byte_buf_clean_up(&giant_buf); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } TEST_CASE(websocket_handler_send_payload_slowly) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); struct send_tester sending[] = { { .payload = aws_byte_cursor_from_c_str("quick A."), .def = { .opcode = AWS_WEBSOCKET_OPCODE_TEXT, .fin = false, }, }, { .payload = aws_byte_cursor_from_c_str("s l o o w w w l l y B."), .def = { .opcode = AWS_WEBSOCKET_OPCODE_CONTINUATION, .fin = false, }, .bytes_per_tick = 1, }, { .payload = aws_byte_cursor_from_c_str("quick C."), .def = { .opcode = AWS_WEBSOCKET_OPCODE_CONTINUATION, .fin = true, }, }, }; for (size_t i = 0; i < AWS_ARRAY_SIZE(sending); ++i) { ASSERT_SUCCESS(s_send_frame(&tester, &sending[i])); } ASSERT_SUCCESS(s_drain_written_messages(&tester)); for (size_t i = 0; i < AWS_ARRAY_SIZE(sending); ++i) { ASSERT_SUCCESS(s_check_written_message(&sending[i], i)); } /* Ensure this test really did send data over multiple callbacks */ ASSERT_TRUE(sending[1].on_payload_count > 1); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } TEST_CASE(websocket_handler_send_payload_with_pauses) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); struct send_tester sending = { .payload = aws_byte_cursor_from_c_str("delayed B."), .def = { .opcode = AWS_WEBSOCKET_OPCODE_TEXT, .fin = true, }, .delay_ticks = 5, }; ASSERT_SUCCESS(s_send_frame(&tester, &sending)); ASSERT_SUCCESS(s_drain_written_messages(&tester)); ASSERT_SUCCESS(s_check_written_message(&sending, 0)); /* Ensure this test really did send data over multiple callbacks */ ASSERT_TRUE(sending.on_payload_count > 1); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } TEST_CASE(websocket_handler_sends_nothing_after_close_frame) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); struct send_tester sending[] = { { .payload = aws_byte_cursor_from_c_str("Last text frame"), .def = { .opcode = AWS_WEBSOCKET_OPCODE_TEXT, .fin = true, }, }, { .def = { .opcode = AWS_WEBSOCKET_OPCODE_CLOSE, .fin = true, }, }, { .payload = aws_byte_cursor_from_c_str("Should not be sent."), .def = { .opcode = AWS_WEBSOCKET_OPCODE_TEXT, .fin = true, }, }, }; /* Ensure these frames are queued and processed later */ testing_channel_set_is_on_users_thread(&tester.testing_channel, false); for (size_t i = 0; i < AWS_ARRAY_SIZE(sending); ++i) { ASSERT_SUCCESS(s_send_frame(&tester, &sending[i])); } testing_channel_set_is_on_users_thread(&tester.testing_channel, true); ASSERT_SUCCESS(s_drain_written_messages(&tester)); /* Ensure that only 1st frame and CLOSE frame were written*/ ASSERT_UINT_EQUALS(2, tester.num_written_frames); ASSERT_SUCCESS(s_check_written_message(&sending[0], 0)); ASSERT_SUCCESS(s_check_written_message(&sending[1], 1)); /* Ensure no more frames written during shutdown */ aws_channel_shutdown(tester.testing_channel.channel, AWS_ERROR_SUCCESS); ASSERT_SUCCESS(s_drain_written_messages(&tester)); ASSERT_UINT_EQUALS(2, tester.num_written_frames); /* Ensure 3rd frame completed with error code */ ASSERT_UINT_EQUALS(1, sending[2].on_complete_count); ASSERT_TRUE(sending[2].on_complete_error_code != AWS_ERROR_SUCCESS); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } /* Send a frame while the handler is in every conceivable state. * Ensure that the completion callback always fires. */ TEST_CASE(websocket_handler_send_frames_always_complete) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); enum { ON_THREAD_BEFORE_CLOSE, OFF_THREAD_BEFORE_CLOSE, CLOSE, ON_THREAD_AFTER_CLOSE, OFF_THREAD_AFTER_CLOSE, ON_THREAD_DURING_SHUTDOWN, OFF_THREAD_DURING_SHUTDOWN, ON_THREAD_AFTER_SHUTDOWN, OFF_THREAD_AFTER_SHUTDOWN, COUNT, }; struct send_tester sending[COUNT]; memset(sending, 0, sizeof(sending)); for (int i = 0; i < COUNT; ++i) { struct send_tester *send = &sending[i]; send->def.opcode = (i == CLOSE) ? AWS_WEBSOCKET_OPCODE_CLOSE : AWS_WEBSOCKET_OPCODE_PING; send->def.fin = true; } int sending_err[AWS_ARRAY_SIZE(sending)]; /* Start sending frames */ sending_err[ON_THREAD_BEFORE_CLOSE] = s_send_frame_no_assert(&tester, &sending[ON_THREAD_BEFORE_CLOSE]); testing_channel_set_is_on_users_thread(&tester.testing_channel, false); sending_err[OFF_THREAD_BEFORE_CLOSE] = s_send_frame_no_assert(&tester, &sending[OFF_THREAD_BEFORE_CLOSE]); testing_channel_set_is_on_users_thread(&tester.testing_channel, true); /* Send close frame */ sending_err[CLOSE] = s_send_frame_no_assert(&tester, &sending[CLOSE]); sending_err[ON_THREAD_AFTER_CLOSE] = s_send_frame_no_assert(&tester, &sending[ON_THREAD_AFTER_CLOSE]); testing_channel_set_is_on_users_thread(&tester.testing_channel, false); sending_err[OFF_THREAD_AFTER_CLOSE] = s_send_frame_no_assert(&tester, &sending[OFF_THREAD_AFTER_CLOSE]); testing_channel_set_is_on_users_thread(&tester.testing_channel, true); /* Issue channel shutdown */ aws_channel_shutdown(tester.testing_channel.channel, AWS_ERROR_SUCCESS); sending_err[ON_THREAD_DURING_SHUTDOWN] = s_send_frame_no_assert(&tester, &sending[ON_THREAD_DURING_SHUTDOWN]); testing_channel_set_is_on_users_thread(&tester.testing_channel, false); sending_err[OFF_THREAD_DURING_SHUTDOWN] = s_send_frame_no_assert(&tester, &sending[OFF_THREAD_DURING_SHUTDOWN]); testing_channel_set_is_on_users_thread(&tester.testing_channel, true); /* Wait for shutdown to complete */ ASSERT_SUCCESS(s_drain_written_messages(&tester)); /* Try to send even more frames */ sending_err[ON_THREAD_AFTER_SHUTDOWN] = s_send_frame_no_assert(&tester, &sending[ON_THREAD_AFTER_SHUTDOWN]); testing_channel_set_is_on_users_thread(&tester.testing_channel, false); sending_err[OFF_THREAD_AFTER_SHUTDOWN] = s_send_frame_no_assert(&tester, &sending[OFF_THREAD_AFTER_SHUTDOWN]); testing_channel_set_is_on_users_thread(&tester.testing_channel, true); /* Check that each send() failed immediately, or had its completion callback invoked. */ ASSERT_SUCCESS(s_drain_written_messages(&tester)); for (int i = 0; i < COUNT; ++i) { if (sending_err[i] == AWS_OP_SUCCESS) { ASSERT_UINT_EQUALS(1, sending[i].on_complete_count); } } ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } TEST_CASE(websocket_handler_send_one_io_msg_at_a_time) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); struct aws_byte_cursor payload = aws_byte_cursor_from_c_str("bitter butter."); const size_t count = 10000; struct send_tester *sending = aws_mem_acquire(allocator, sizeof(struct send_tester) * count); ASSERT_NOT_NULL(sending); memset(sending, 0, sizeof(struct send_tester) * count); for (size_t i = 0; i < count; ++i) { struct send_tester *send = &sending[i]; send->payload = payload; send->def.opcode = AWS_WEBSOCKET_OPCODE_TEXT; send->def.fin = true; ASSERT_SUCCESS(s_send_frame(&tester, send)); } /* Turn off instant write completion */ testing_channel_complete_written_messages_immediately(&tester.testing_channel, false, AWS_OP_SUCCESS); /* Repeatedly drain event loop and ensure that only 1 aws_io_message is written */ struct aws_linked_list *io_msgs = testing_channel_get_written_message_queue(&tester.testing_channel); size_t total_io_msg_count = 0; while (true) { testing_channel_drain_queued_tasks(&tester.testing_channel); if (aws_linked_list_empty(io_msgs)) { break; } total_io_msg_count++; struct aws_linked_list_node *node = aws_linked_list_pop_front(io_msgs); struct aws_io_message *msg = AWS_CONTAINER_OF(node, struct aws_io_message, queueing_handle); ASSERT_TRUE(aws_linked_list_empty(io_msgs)); /* Only 1 aws_io_message should be in the channel at a time */ if (msg->on_completion) { msg->on_completion(tester.testing_channel.channel, msg, AWS_ERROR_SUCCESS, msg->user_data); } aws_mem_release(msg->allocator, msg); } /* Assert that every frame sent */ ASSERT_UINT_EQUALS(1, sending[count - 1].on_complete_count); /* Assert this test actually actually involved several aws_io_messages */ ASSERT_TRUE(total_io_msg_count >= 3); ASSERT_SUCCESS(s_tester_clean_up(&tester)); aws_mem_release(allocator, sending); return AWS_OP_SUCCESS; } /* * Verifies that the write completion callbacks for websocket frames are not invoked immediately after relaying * towards the left end (socket) of the channel */ TEST_CASE(websocket_handler_delayed_write_completion) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); struct aws_byte_cursor payload = aws_byte_cursor_from_c_str("bitter butter."); const size_t count = 2; struct send_tester *sending = aws_mem_acquire(allocator, sizeof(struct send_tester) * count); ASSERT_NOT_NULL(sending); memset(sending, 0, sizeof(struct send_tester) * count); for (size_t i = 0; i < count; ++i) { struct send_tester *send = &sending[i]; send->payload = payload; send->def.opcode = AWS_WEBSOCKET_OPCODE_TEXT; send->def.fin = true; ASSERT_SUCCESS(s_send_frame(&tester, send)); } /* Turn off instant write completion and run the channel */ testing_channel_complete_written_messages_immediately(&tester.testing_channel, false, AWS_OP_SUCCESS); testing_channel_drain_queued_tasks(&tester.testing_channel); struct aws_linked_list *io_msgs = testing_channel_get_written_message_queue(&tester.testing_channel); ASSERT_FALSE(aws_linked_list_empty(io_msgs)); struct aws_linked_list_node *node = aws_linked_list_pop_front(io_msgs); struct aws_io_message *msg = AWS_CONTAINER_OF(node, struct aws_io_message, queueing_handle); /* we've relayed the frames, but no frame write completions should have been invoked */ for (size_t i = 0; i < count; ++i) { struct send_tester *send = &sending[i]; ASSERT_UINT_EQUALS(0, send->on_complete_count); } /* manually invoke the write completion on the downstream io message */ msg->on_completion(tester.testing_channel.channel, msg, AWS_ERROR_SUCCESS, msg->user_data); aws_mem_release(msg->allocator, msg); /* now all frame write completions should have been invoked exactly once */ for (size_t i = 0; i < count; ++i) { struct send_tester *send = &sending[i]; ASSERT_UINT_EQUALS(1, send->on_complete_count); } ASSERT_SUCCESS(s_tester_clean_up(&tester)); aws_mem_release(allocator, sending); return AWS_OP_SUCCESS; } TEST_CASE(websocket_handler_send_halts_if_payload_fn_returns_false) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); struct send_tester sending[] = { { /* Sending should halt after 1st frame sends 1byte of payload */ .payload = aws_byte_cursor_from_c_str("Stop"), .fail_on_nth_payload = 1, .bytes_per_tick = 1, .def = { .opcode = AWS_WEBSOCKET_OPCODE_TEXT, .fin = true, }, }, { .payload = aws_byte_cursor_from_c_str("Should never send"), .def = { .opcode = AWS_WEBSOCKET_OPCODE_TEXT, .fin = true, }, }, }; for (size_t i = 0; i < AWS_ARRAY_SIZE(sending); ++i) { ASSERT_SUCCESS(s_send_frame(&tester, &sending[i])); } ASSERT_SUCCESS(s_drain_written_messages(&tester)); /* Check that frame stopped processing */ ASSERT_UINT_EQUALS(1, sending[0].on_payload_count); ASSERT_UINT_EQUALS(1, sending[0].on_complete_count); ASSERT_INT_EQUALS(AWS_ERROR_HTTP_CALLBACK_FAILURE, sending[0].on_complete_error_code); /* The websocket should close when a callback returns false */ ASSERT_TRUE(testing_channel_is_shutdown_completed(&tester.testing_channel)); /* Other send should have been cancelled without it payload callback ever being invoked */ ASSERT_UINT_EQUALS(0, sending[1].on_payload_count); ASSERT_UINT_EQUALS(1, sending[1].on_complete_count); ASSERT_INT_EQUALS(AWS_ERROR_HTTP_CONNECTION_CLOSED, sending[1].on_complete_error_code); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } TEST_CASE(websocket_handler_shutdown_automatically_sends_close_frame) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); /* Shutdown channel normally */ aws_channel_shutdown(tester.testing_channel.channel, AWS_ERROR_SUCCESS); ASSERT_SUCCESS(s_drain_written_messages(&tester)); /* Check that CLOSE frame written */ ASSERT_UINT_EQUALS(AWS_WEBSOCKET_OPCODE_CLOSE, tester.written_frames[0].def.opcode); ASSERT_TRUE(tester.written_frames[0].is_complete); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } /* Ensure that, if user had queued their own CLOSE frame before shutdown, * The user frame is the only one that gets written. */ TEST_CASE(websocket_handler_shutdown_handles_queued_close_frame) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); /* Try to make it so we issue channel-shutdown while user CLOSE frame is mid-send. * We use the "payload delay" feature in the `send_tester` struct */ uint8_t payload_bytes[] = {0x01, 0x02}; struct send_tester send = { .payload = aws_byte_cursor_from_array(payload_bytes, sizeof(payload_bytes)), .def = { .opcode = AWS_WEBSOCKET_OPCODE_CLOSE, .fin = true, }, .delay_ticks = 5, }; ASSERT_SUCCESS(s_send_frame(&tester, &send)); /* Assert that test has one aws_io_message written, containing a partially sent frame */ testing_channel_run_currently_queued_tasks(&tester.testing_channel); ASSERT_TRUE(send.on_payload_count > 0); ASSERT_UINT_EQUALS(0, send.on_complete_count); /* Shutdown channel normally */ aws_channel_shutdown(tester.testing_channel.channel, AWS_ERROR_SUCCESS); ASSERT_SUCCESS(s_drain_written_messages(&tester)); ASSERT_TRUE(testing_channel_is_shutdown_completed(&tester.testing_channel)); /* Check that user's CLOSE frame was written, and nothing further */ ASSERT_SUCCESS(s_check_written_message(&send, 0)); ASSERT_UINT_EQUALS(1, tester.num_written_frames); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } TEST_CASE(websocket_handler_shutdown_immediately_in_emergency) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); /* Try to make it so we issue channel-shutdown while a frame is mid-send. * We use the "payload delay" feature in the `send_tester` struct */ struct send_tester send = { .payload = aws_byte_cursor_from_c_str("delayed payload"), .def = { .opcode = AWS_WEBSOCKET_OPCODE_TEXT, .fin = true, }, .delay_ticks = 15, }; ASSERT_SUCCESS(s_send_frame(&tester, &send)); /* Assert that test is issuing shutdown while frame is partially written */ testing_channel_run_currently_queued_tasks(&tester.testing_channel); ASSERT_TRUE(send.on_payload_count > 0); ASSERT_UINT_EQUALS(0, send.on_complete_count); /* Shutdown channel with error code, which should result in IMMEDIATE style shutdown */ aws_channel_shutdown(tester.testing_channel.channel, AWS_IO_SOCKET_CLOSED); ASSERT_SUCCESS(s_drain_written_messages(&tester)); /* Ensure shutdown is complete at this point*/ ASSERT_TRUE(testing_channel_is_shutdown_completed(&tester.testing_channel)); /* Frame should not have sent completely, no CLOSE frame should have been sent either */ ASSERT_UINT_EQUALS(1, send.on_complete_count); ASSERT_TRUE(send.on_complete_error_code != AWS_ERROR_SUCCESS); ASSERT_UINT_EQUALS(0, tester.num_written_frames); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } /* During normal shutdown, the websocket delays until a CLOSE frame can be sent. * This test checks that, if unexpected errors occur during that waiting period, shutdown doesn't hang forever */ TEST_CASE(websocket_handler_shutdown_handles_unexpected_write_error) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); /* Queue a frame that delays a while, and then breaks the websocket entirely. */ struct send_tester send = { .payload = aws_byte_cursor_from_c_str("bad frame"), .def = { .opcode = AWS_WEBSOCKET_OPCODE_TEXT, .fin = true, }, .delay_ticks = 15, .send_wrong_payload_amount = 1, }; ASSERT_SUCCESS(s_send_frame(&tester, &send)); /* Assert that test is issuing shutdown while frame is partially written */ testing_channel_run_currently_queued_tasks(&tester.testing_channel); ASSERT_TRUE(send.on_payload_count > 0); ASSERT_UINT_EQUALS(0, send.on_complete_count); /* Shutdown channel normally, which should cause the websocket to queue a CLOSE frame and wait until it's sent. */ aws_channel_shutdown(tester.testing_channel.channel, AWS_IO_SOCKET_CLOSED); /* Wait for shutdown to complete */ ASSERT_SUCCESS(s_drain_written_messages(&tester)); /* Assert that test did actually experience a write error */ ASSERT_UINT_EQUALS(1, send.on_complete_count); ASSERT_TRUE(send.on_complete_error_code != AWS_ERROR_SUCCESS); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } TEST_CASE(websocket_handler_close_on_thread) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); aws_websocket_close(tester.websocket, false); ASSERT_SUCCESS(s_drain_written_messages(&tester)); ASSERT_TRUE(testing_channel_is_shutdown_completed(&tester.testing_channel)); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } TEST_CASE(websocket_handler_close_off_thread) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); testing_channel_set_is_on_users_thread(&tester.testing_channel, false); aws_websocket_close(tester.websocket, false); testing_channel_set_is_on_users_thread(&tester.testing_channel, true); ASSERT_SUCCESS(s_drain_written_messages(&tester)); ASSERT_TRUE(testing_channel_is_shutdown_completed(&tester.testing_channel)); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } TEST_CASE(websocket_handler_read_frame) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); struct readpush_frame pushing[] = { { .payload = aws_byte_cursor_from_c_str("guten morgen"), .def = { .opcode = AWS_WEBSOCKET_OPCODE_TEXT, .fin = true, }, }, }; s_set_readpush_frames(&tester, pushing, AWS_ARRAY_SIZE(pushing)); s_do_readpush_all(&tester); testing_channel_drain_queued_tasks(&tester.testing_channel); for (size_t i = 0; i < AWS_ARRAY_SIZE(pushing); ++i) { ASSERT_SUCCESS(s_readpush_check(&tester, i, AWS_ERROR_SUCCESS)); } ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } TEST_CASE(websocket_handler_read_multiple_frames) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); struct readpush_frame pushing[] = { { .payload = aws_byte_cursor_from_c_str("Uno."), .def = { .opcode = AWS_WEBSOCKET_OPCODE_TEXT, .fin = false, }, }, { .payload = aws_byte_cursor_from_c_str("Dos."), .def = { .opcode = AWS_WEBSOCKET_OPCODE_CONTINUATION, .fin = false, }, }, { .payload = aws_byte_cursor_from_c_str("Tres."), .def = { .opcode = AWS_WEBSOCKET_OPCODE_CONTINUATION, .fin = true, }, }, }; s_set_readpush_frames(&tester, pushing, AWS_ARRAY_SIZE(pushing)); ASSERT_SUCCESS(s_do_readpush_all(&tester)); testing_channel_drain_queued_tasks(&tester.testing_channel); for (size_t i = 0; i < AWS_ARRAY_SIZE(pushing); ++i) { ASSERT_SUCCESS(s_readpush_check(&tester, i, AWS_ERROR_SUCCESS)); } ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } TEST_CASE(websocket_handler_read_frames_split_across_io_messages) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); struct readpush_frame pushing[] = { { .payload = aws_byte_cursor_from_c_str("As dry leaves that before the wild hurricane fly,"), .def = { .opcode = AWS_WEBSOCKET_OPCODE_TEXT, .fin = false, }, }, { .payload = aws_byte_cursor_from_c_str("when they meet with an obstacle,"), .def = { .opcode = AWS_WEBSOCKET_OPCODE_CONTINUATION, .fin = false, }, }, { .payload = aws_byte_cursor_from_c_str("mount to the sky"), .def = { .opcode = AWS_WEBSOCKET_OPCODE_CONTINUATION, .fin = true, }, }, }; s_set_readpush_frames(&tester, pushing, AWS_ARRAY_SIZE(pushing)); /* Send 1 byte at a time to ensure we can tolerate frames split across multiple aws_io_messages */ struct readpush_options options = {.message_size = 1}; ASSERT_SUCCESS(s_do_readpush(&tester, options)); testing_channel_drain_queued_tasks(&tester.testing_channel); for (size_t i = 0; i < AWS_ARRAY_SIZE(pushing); ++i) { ASSERT_SUCCESS(s_readpush_check(&tester, i, AWS_ERROR_SUCCESS)); } ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } TEST_CASE(websocket_handler_read_frames_complete_on_shutdown) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); struct readpush_frame pushing[] = { { .payload = aws_byte_cursor_from_c_str("This frame will not be completely sent."), .def = { .opcode = AWS_WEBSOCKET_OPCODE_TEXT, .fin = true, }, }, }; s_set_readpush_frames(&tester, pushing, AWS_ARRAY_SIZE(pushing)); /* Push most, but not all, of a frame */ struct readpush_options options = { .num_bytes = (size_t)(aws_websocket_frame_encoded_size(&pushing[0].def) - 1), }; s_do_readpush(&tester, options); /* Shut down channel */ aws_channel_shutdown(tester.testing_channel.channel, AWS_ERROR_SUCCESS); s_drain_written_messages(&tester); /* Check that completion callbacks fired */ ASSERT_SUCCESS(s_readpush_check(&tester, 0, AWS_ERROR_HTTP_CONNECTION_CLOSED)); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } TEST_CASE(websocket_handler_read_halts_if_begin_fn_returns_false) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); struct readpush_frame pushing[] = { { .payload = aws_byte_cursor_from_c_str("Fail on frame begin."), .def = { .opcode = AWS_WEBSOCKET_OPCODE_TEXT, .fin = true, }, }, { .payload = aws_byte_cursor_from_c_str("This frame should never get read."), .def = { .opcode = AWS_WEBSOCKET_OPCODE_TEXT, .fin = true, }, }, }; tester.fail_on_incoming_frame_begin_n = 1; s_set_readpush_frames(&tester, pushing, AWS_ARRAY_SIZE(pushing)); ASSERT_SUCCESS(s_do_readpush_all(&tester)); s_drain_written_messages(&tester); /* First frame should have completed immediately with an error */ ASSERT_SUCCESS(s_readpush_check(&tester, 0, AWS_ERROR_HTTP_CALLBACK_FAILURE)); ASSERT_UINT_EQUALS(0, tester.incoming_frames[0].on_payload_count); /* No further frames should have been read */ ASSERT_UINT_EQUALS(1, tester.num_incoming_frames); /* Callback failure should have caused connection to close */ ASSERT_TRUE(testing_channel_is_shutdown_completed(&tester.testing_channel)); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } TEST_CASE(websocket_handler_read_halts_if_payload_fn_returns_false) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); struct readpush_frame pushing[] = { { .payload = aws_byte_cursor_from_c_str("Fail on payload."), .def = { .opcode = AWS_WEBSOCKET_OPCODE_TEXT, .fin = true, }, }, { .payload = aws_byte_cursor_from_c_str("This frame should never get read."), .def = { .opcode = AWS_WEBSOCKET_OPCODE_TEXT, .fin = true, }, }, }; /* Return false from 1st on_payload callback. */ tester.fail_on_incoming_frame_payload_n = 1; s_set_readpush_frames(&tester, pushing, AWS_ARRAY_SIZE(pushing)); ASSERT_SUCCESS(s_do_readpush_all(&tester)); s_drain_written_messages(&tester); /* First frame should complete with error */ ASSERT_SUCCESS(s_readpush_check(&tester, 0, AWS_ERROR_HTTP_CALLBACK_FAILURE)); ASSERT_UINT_EQUALS(1, tester.incoming_frames[0].on_payload_count); /* No further frames should have been read */ ASSERT_UINT_EQUALS(1, tester.num_incoming_frames); /* Callback failure should have caused connection to close */ ASSERT_TRUE(testing_channel_is_shutdown_completed(&tester.testing_channel)); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } TEST_CASE(websocket_handler_read_halts_if_complete_fn_returns_false) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); struct readpush_frame pushing[] = { { .payload = aws_byte_cursor_from_c_str("Fail on completion."), .def = { .opcode = AWS_WEBSOCKET_OPCODE_TEXT, .fin = true, }, }, { .payload = aws_byte_cursor_from_c_str("This frame should never get read."), .def = { .opcode = AWS_WEBSOCKET_OPCODE_TEXT, .fin = true, }, }, }; /* Return false when 1st frame's on_complete callback */ tester.fail_on_incoming_frame_complete_n = 1; s_set_readpush_frames(&tester, pushing, AWS_ARRAY_SIZE(pushing)); ASSERT_SUCCESS(s_do_readpush_all(&tester)); s_drain_written_messages(&tester); /* First frame should have succeeded */ ASSERT_SUCCESS(s_readpush_check(&tester, 0, AWS_ERROR_SUCCESS)); /* No further frames should have been read */ ASSERT_UINT_EQUALS(1, tester.num_incoming_frames); /* Callback failure should have caused connection to close */ ASSERT_TRUE(testing_channel_is_shutdown_completed(&tester.testing_channel)); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } static int s_window_manual_increment_common(struct aws_allocator *allocator, bool on_thread) { struct tester tester; s_tester_options.manual_window_update = true; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); /* Push "data" frame to websocket */ struct readpush_frame pushing = { .payload = aws_byte_cursor_from_c_str("Shrink, then open"), .def = { .opcode = AWS_WEBSOCKET_OPCODE_TEXT, .fin = true, }, }; s_set_readpush_frames(&tester, &pushing, 1); ASSERT_SUCCESS(s_do_readpush_all(&tester)); testing_channel_drain_queued_tasks(&tester.testing_channel); /* Assert that window did not fully re-open*/ uint64_t frame_minus_payload_size = aws_websocket_frame_encoded_size(&pushing.def) - pushing.def.payload_length; ASSERT_UINT_EQUALS(frame_minus_payload_size, testing_channel_last_window_update(&tester.testing_channel)); /* Manually increment window */ testing_channel_set_is_on_users_thread(&tester.testing_channel, on_thread); aws_websocket_increment_read_window(tester.websocket, (size_t)pushing.def.payload_length); /* Assert it re-opened that much */ testing_channel_set_is_on_users_thread(&tester.testing_channel, true); testing_channel_drain_queued_tasks(&tester.testing_channel); ASSERT_UINT_EQUALS(pushing.def.payload_length, testing_channel_last_window_update(&tester.testing_channel)); /* Now push a control frame, and ensure the window automatically re-opens by the whole amount */ struct readpush_frame pushing_control_frame = { .payload = aws_byte_cursor_from_c_str("free data"), .def = {.opcode = AWS_WEBSOCKET_OPCODE_PONG, .fin = true}, }; s_set_readpush_frames(&tester, &pushing_control_frame, 1); ASSERT_SUCCESS(s_do_readpush_all(&tester)); testing_channel_drain_queued_tasks(&tester.testing_channel); ASSERT_UINT_EQUALS( aws_websocket_frame_encoded_size(&pushing_control_frame.def), testing_channel_last_window_update(&tester.testing_channel)); /* Done */ ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } TEST_CASE(websocket_handler_window_manual_increment) { (void)ctx; return s_window_manual_increment_common(allocator, true); } TEST_CASE(websocket_handler_window_manual_increment_off_thread) { (void)ctx; return s_window_manual_increment_common(allocator, false); } TEST_CASE(websocket_midchannel_sanity_check) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); ASSERT_SUCCESS(s_install_downstream_handler(&tester, s_default_initial_window_size)); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } TEST_CASE(websocket_midchannel_write_message) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); ASSERT_SUCCESS(s_install_downstream_handler(&tester, s_default_initial_window_size)); /* Write data */ struct aws_byte_cursor writing = aws_byte_cursor_from_c_str("My hat it has three corners"); ASSERT_SUCCESS(s_writepush(&tester, writing)); /* Compare results */ ASSERT_SUCCESS(s_drain_written_messages(&tester)); ASSERT_SUCCESS(s_writepush_check(&tester, 0)); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } TEST_CASE(websocket_midchannel_write_multiple_messages) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); ASSERT_SUCCESS(s_install_downstream_handler(&tester, s_default_initial_window_size)); struct aws_byte_cursor writing[] = { aws_byte_cursor_from_c_str("My hat it has three corners."), aws_byte_cursor_from_c_str("Three corners has my hat."), aws_byte_cursor_from_c_str("And had it not three corners, it would not be my hat."), }; /* Write data */ for (size_t i = 0; i < AWS_ARRAY_SIZE(writing); ++i) { ASSERT_SUCCESS(s_writepush(&tester, writing[i])); } /* Compare results */ ASSERT_SUCCESS(s_drain_written_messages(&tester)); ASSERT_SUCCESS(s_writepush_check(&tester, 0)); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } TEST_CASE(websocket_midchannel_write_huge_message) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); ASSERT_SUCCESS(s_install_downstream_handler(&tester, s_default_initial_window_size)); /* Fill big buffer with random data */ struct aws_byte_buf writing; ASSERT_SUCCESS(aws_byte_buf_init(&writing, allocator, 1000000)); while (aws_byte_buf_write_be32(&writing, (uint32_t)rand())) { } while (aws_byte_buf_write_u8(&writing, (uint8_t)rand())) { } /* Send as multiple aws_io_messages that are as full as they can be */ ASSERT_SUCCESS(s_writepush(&tester, aws_byte_cursor_from_buf(&writing))); /* Compare results */ ASSERT_SUCCESS(s_drain_written_messages(&tester)); ASSERT_TRUE(tester.num_written_io_messages > 1); /* Assert that message was huge enough to stress limits */ ASSERT_SUCCESS(s_writepush_check(&tester, 0)); aws_byte_buf_clean_up(&writing); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } TEST_CASE(websocket_handler_sends_pong_automatically) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); /* Read PING with a payload */ struct readpush_frame read_ping_with_payload = { .def = { .opcode = AWS_WEBSOCKET_OPCODE_PING, .fin = true, }, .payload = aws_byte_cursor_from_c_str("echo me pls"), }; s_set_readpush_frames(&tester, &read_ping_with_payload, 1); ASSERT_SUCCESS(s_do_readpush_all(&tester)); /* Check that PONG is automatically written, with payload echoing the PING */ s_drain_written_messages(&tester); const struct written_frame *written_frame = &tester.written_frames[0]; ASSERT_UINT_EQUALS(AWS_WEBSOCKET_OPCODE_PONG, written_frame->def.opcode); ASSERT_TRUE(written_frame->is_complete); ASSERT_BIN_ARRAYS_EQUALS( read_ping_with_payload.payload.ptr, read_ping_with_payload.payload.len, written_frame->payload.buffer, written_frame->payload.len); /* Read PING without empty payload */ struct readpush_frame read_ping_with_empty_payload = { .def = { .opcode = AWS_WEBSOCKET_OPCODE_PING, .fin = true, }, }; s_set_readpush_frames(&tester, &read_ping_with_empty_payload, 1); ASSERT_SUCCESS(s_do_readpush_all(&tester)); /* Check that PONG with empty payload is automatically written */ s_drain_written_messages(&tester); written_frame = &tester.written_frames[1]; ASSERT_UINT_EQUALS(AWS_WEBSOCKET_OPCODE_PONG, written_frame->def.opcode); ASSERT_TRUE(written_frame->is_complete); ASSERT_UINT_EQUALS(0, written_frame->payload.len); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } TEST_CASE(websocket_handler_wont_send_pong_after_close_frame) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); /* Send a CLOSE frame */ struct send_tester send_close = { .def = { .opcode = AWS_WEBSOCKET_OPCODE_CLOSE, .fin = true, }, }; ASSERT_SUCCESS(s_send_frame(&tester, &send_close)); /* Now have the websocket read a PING */ struct readpush_frame read_ping = { .def = { .opcode = AWS_WEBSOCKET_OPCODE_PING, .fin = true, }, }; s_set_readpush_frames(&tester, &read_ping, 1); ASSERT_SUCCESS(s_do_readpush_all(&tester)); /* Check that PONG is NOT sent automatically, because a CLOSE was sent before it */ s_drain_written_messages(&tester); ASSERT_TRUE(tester.num_written_frames == 1); ASSERT_INT_EQUALS(AWS_WEBSOCKET_OPCODE_CLOSE, tester.written_frames[0].def.opcode); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } TEST_CASE(websocket_midchannel_read_message) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); ASSERT_SUCCESS(s_install_downstream_handler(&tester, s_default_initial_window_size)); struct readpush_frame pushing = { .payload = aws_byte_cursor_from_c_str("Hello hello can you hear me Joe?"), .def = {.opcode = AWS_WEBSOCKET_OPCODE_BINARY, .fin = true}, }; s_set_readpush_frames(&tester, &pushing, 1); ASSERT_SUCCESS(s_do_readpush_all(&tester)); testing_channel_drain_queued_tasks(&tester.testing_channel); ASSERT_SUCCESS(s_readpush_midchannel_check(&tester, 0)); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } TEST_CASE(websocket_midchannel_read_multiple_messages) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); ASSERT_SUCCESS(s_install_downstream_handler(&tester, s_default_initial_window_size)); /* Read a mix of different frame types, most of which shouldn't get passed along to next handler. */ struct readpush_frame pushing[] = { { .payload = aws_byte_cursor_from_c_str("Message 1."), .def = {.opcode = AWS_WEBSOCKET_OPCODE_BINARY, .fin = true}, }, { .payload = aws_byte_cursor_from_c_str("Ignore ping frame"), .def = {.opcode = AWS_WEBSOCKET_OPCODE_PING, .fin = true}, }, { .payload = aws_byte_cursor_from_c_str("Ignore text frame"), .def = {.opcode = AWS_WEBSOCKET_OPCODE_TEXT, .fin = false}, }, { .payload = aws_byte_cursor_from_c_str("Ignore continuation of text frame"), .def = {.opcode = AWS_WEBSOCKET_OPCODE_CONTINUATION, .fin = true}, }, { .payload = aws_byte_cursor_from_c_str("Message 2 fragment 1/3."), .def = {.opcode = AWS_WEBSOCKET_OPCODE_BINARY, .fin = false}, }, { .payload = aws_byte_cursor_from_c_str("Message 2 fragment 2/3"), .def = {.opcode = AWS_WEBSOCKET_OPCODE_CONTINUATION, .fin = false}, }, { .payload = aws_byte_cursor_from_c_str("Ignore ping frame"), .def = {.opcode = AWS_WEBSOCKET_OPCODE_PING, .fin = true}, }, { .payload = aws_byte_cursor_from_c_str("Message 2 fragment 3/3."), .def = {.opcode = AWS_WEBSOCKET_OPCODE_CONTINUATION, .fin = true}, }, }; s_set_readpush_frames(&tester, pushing, AWS_ARRAY_SIZE(pushing)); ASSERT_SUCCESS(s_do_readpush_all(&tester)); testing_channel_drain_queued_tasks(&tester.testing_channel); /* Check that only BINARY (and continuation of BINARY) frames passed through */ ASSERT_SUCCESS(s_readpush_midchannel_check(&tester, 0)); ASSERT_SUCCESS(s_readpush_midchannel_check(&tester, 4)); ASSERT_SUCCESS(s_readpush_midchannel_check(&tester, 5)); ASSERT_SUCCESS(s_readpush_midchannel_check(&tester, 7)); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-io/000077500000000000000000000000001456575232400176375ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-io/.builder/000077500000000000000000000000001456575232400213435ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-io/.builder/actions/000077500000000000000000000000001456575232400230035ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-io/.builder/actions/pkcs11_test_setup.py000066400000000000000000000103241456575232400267360ustar00rootroot00000000000000""" Prepare for PKCS#11 tests by configuring SoftHSM2, if it is installed. """ import Builder import os import re class Pkcs11TestSetup(Builder.Action): """ Set up this machine for running the PKCS#11 tests. If SoftHSM2 cannot be installed, the tests are skipped. This action should be run in the 'pre_build_steps' or 'build_steps' stage. """ def run(self, env): if not env.project.needs_tests(env): print("Skipping PKCS#11 setup because tests disabled for project") return self.env = env # total hack: don't run PKCS#11 tests when building all C libs with -DBUILD_SHARED_LIBS=ON. # here's what happens: libsofthsm2.so loads the system libcrypto.so and # s2n loads the aws-lc's libcrypto.so and really strange things start happening. # this wouldn't happen in the real world, just in our tests, so just bail out if hasattr(env.args, "cmake_extra"): if any('BUILD_SHARED_LIBS=ON' in arg for arg in env.args.cmake_extra): print( "WARNING: PKCS#11 tests disabled when BUILD_SHARED_LIBS=ON due to weird libcrypto.so behavior") return # try to install softhsm try: softhsm_install_acion = Builder.InstallPackages(['softhsm']) softhsm_install_acion.run(env) except: print("WARNING: softhsm could not be installed. PKCS#11 tests are disabled") return softhsm_lib = self._find_softhsm_lib() if softhsm_lib is None: print("WARNING: libsofthsm2.so not found. PKCS#11 tests are disabled") return # set cmake flag so PKCS#11 tests are enabled env.project.config['cmake_args'].append('-DENABLE_PKCS11_TESTS=ON') # put SoftHSM config file and token directory under the build dir. softhsm2_dir = os.path.join(env.build_dir, 'softhsm2') conf_path = os.path.join(softhsm2_dir, 'softhsm2.conf') token_dir = os.path.join(softhsm2_dir, 'tokens') env.shell.mkdir(token_dir) self._setenv('SOFTHSM2_CONF', conf_path) with open(conf_path, 'w') as conf_file: conf_file.write(f"directories.tokendir = {token_dir}\n") # print SoftHSM version self._exec_softhsm2_util('--version') # sanity check SoftHSM is working self._exec_softhsm2_util('--show-slots') # set env vars for tests self._setenv('TEST_PKCS11_LIB', softhsm_lib) self._setenv('TEST_PKCS11_TOKEN_DIR', token_dir) def _find_softhsm_lib(self): """Return path to SoftHSM2 shared lib, or None if not found""" # note: not using `ldconfig --print-cache` to find it because # some installers put it in weird places where ldconfig doesn't look # (like in a subfolder under lib/) for lib_dir in ['lib64', 'lib']: # search lib64 before lib for base_dir in ['/usr/local', '/usr', '/', ]: search_dir = os.path.join(base_dir, lib_dir) for root, dirs, files in os.walk(search_dir): for file_name in files: if 'libsofthsm2.so' in file_name: return os.path.join(root, file_name) return None def _exec_softhsm2_util(self, *args, **kwargs): if not 'check' in kwargs: kwargs['check'] = True result = self.env.shell.exec('softhsm2-util', *args, **kwargs) # older versions of softhsm2-util (2.1.0 is a known offender) # return error code 0 and print the help if invalid args are passed. # This should be an error. # # invalid args can happen because newer versions of softhsm2-util # support more args than older versions, so what works on your # machine might not work on some ancient docker image. if 'Usage: softhsm2-util' in result.output: raise Exception('softhsm2-util failed') return result def _setenv(self, var, value): """ Set environment variable now, and ensure the environment variable is set again when tests run """ self.env.shell.setenv(var, value) self.env.project.config['test_env'][var] = value aws-crt-python-0.20.4+dfsg/crt/aws-c-io/.clang-format000066400000000000000000000031611456575232400222130ustar00rootroot00000000000000--- Language: Cpp # BasedOnStyle: Mozilla AlignAfterOpenBracket: AlwaysBreak AlignConsecutiveAssignments: false AlignConsecutiveDeclarations: false AlignEscapedNewlines: Right AlignOperands: true AlignTrailingComments: true AllowAllParametersOfDeclarationOnNextLine: false AllowShortBlocksOnASingleLine: false AllowShortCaseLabelsOnASingleLine: false AllowShortFunctionsOnASingleLine: Inline AllowShortIfStatementsOnASingleLine: false AllowShortLoopsOnASingleLine: false AlwaysBreakAfterReturnType: None AlwaysBreakBeforeMultilineStrings: false BinPackArguments: false BinPackParameters: false BreakBeforeBinaryOperators: None BreakBeforeBraces: Attach BreakBeforeTernaryOperators: true BreakStringLiterals: true ColumnLimit: 120 ContinuationIndentWidth: 4 DerivePointerAlignment: false IncludeBlocks: Preserve IndentCaseLabels: true IndentPPDirectives: AfterHash IndentWidth: 4 IndentWrappedFunctionNames: true KeepEmptyLinesAtTheStartOfBlocks: true MacroBlockBegin: '' MacroBlockEnd: '' MaxEmptyLinesToKeep: 1 PenaltyBreakAssignment: 2 PenaltyBreakBeforeFirstCallParameter: 19 PenaltyBreakComment: 300 PenaltyBreakFirstLessLess: 120 PenaltyBreakString: 1000 PenaltyExcessCharacter: 1000000 PenaltyReturnTypeOnItsOwnLine: 100000 PointerAlignment: Right ReflowComments: true SortIncludes: true SpaceAfterCStyleCast: false SpaceBeforeAssignmentOperators: true SpaceBeforeParens: ControlStatements SpaceInEmptyParentheses: false SpacesInContainerLiterals: true SpacesInCStyleCastParentheses: false SpacesInParentheses: false SpacesInSquareBrackets: false Standard: Cpp11 TabWidth: 4 UseTab: Never ... aws-crt-python-0.20.4+dfsg/crt/aws-c-io/.clang-tidy000066400000000000000000000015101456575232400216700ustar00rootroot00000000000000--- Checks: 'clang-diagnostic-*,clang-analyzer-*,readability-*,modernize-*,bugprone-*,misc-*,google-runtime-int,fuchsia-restrict-system-includes,-clang-analyzer-valist.Uninitialized,-clang-analyzer-security.insecureAPI.rand,-clang-analyzer-alpha.*,-readability-magic-numbers,-readability-non-const-parameter,-readability-isolate-declaration,-readability-uppercase-literal-suffix' WarningsAsErrors: '*' HeaderFilterRegex: '.*\.[h|inl]$' FormatStyle: 'file' # Use empty line filter to skip linting code we don't own CheckOptions: - key: readability-braces-around-statements.ShortStatementLines value: '1' - key: google-runtime-int.TypeSufix value: '_t' - key: fuchsia-restrict-system-includes.Includes value: '*,-stdint.h,-stdbool.h,-assert.h' ... aws-crt-python-0.20.4+dfsg/crt/aws-c-io/.gitattributes000066400000000000000000000003131456575232400225270ustar00rootroot00000000000000# Set the default behavior, in case people don't have core.autocrlf set. * text=auto # Declare files that will always have CRLF line endings on checkout. tests/resources/testparse_crlf.crt text eol=crlfaws-crt-python-0.20.4+dfsg/crt/aws-c-io/.github/000077500000000000000000000000001456575232400211775ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-io/.github/ISSUE_TEMPLATE/000077500000000000000000000000001456575232400233625ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-io/.github/ISSUE_TEMPLATE/bug-report.yml000066400000000000000000000045131456575232400261760ustar00rootroot00000000000000--- name: "🐛 Bug Report" description: Report a bug title: "(short issue description)" labels: [bug, needs-triage] assignees: [] body: - type: textarea id: description attributes: label: Describe the bug description: What is the problem? A clear and concise description of the bug. validations: required: true - type: textarea id: expected attributes: label: Expected Behavior description: | What did you expect to happen? validations: required: true - type: textarea id: current attributes: label: Current Behavior description: | What actually happened? Please include full errors, uncaught exceptions, stack traces, and relevant logs. If service responses are relevant, please include wire logs. validations: required: true - type: textarea id: reproduction attributes: label: Reproduction Steps description: | Provide a self-contained, concise snippet of code that can be used to reproduce the issue. For more complex issues provide a repo with the smallest sample that reproduces the bug. Avoid including business logic or unrelated code, it makes diagnosis more difficult. The code sample should be an SSCCE. See http://sscce.org/ for details. In short, please provide a code sample that we can copy/paste, run and reproduce. validations: required: true - type: textarea id: solution attributes: label: Possible Solution description: | Suggest a fix/reason for the bug validations: required: false - type: textarea id: context attributes: label: Additional Information/Context description: | Anything else that might be relevant for troubleshooting this bug. Providing context helps us come up with a solution that is most useful in the real world. validations: required: false - type: input id: aws-c-io-version attributes: label: aws-c-io version used validations: required: true - type: input id: compiler-version attributes: label: Compiler and version used validations: required: true - type: input id: operating-system attributes: label: Operating System and version validations: required: true aws-crt-python-0.20.4+dfsg/crt/aws-c-io/.github/ISSUE_TEMPLATE/config.yml000066400000000000000000000003251456575232400253520ustar00rootroot00000000000000blank_issues_enabled: false contact_links: - name: 💬 General Question url: https://github.com/awslabs/aws-c-io/discussions/categories/q-a about: Please ask and answer questions as a discussion thread aws-crt-python-0.20.4+dfsg/crt/aws-c-io/.github/ISSUE_TEMPLATE/documentation.yml000066400000000000000000000011141456575232400267530ustar00rootroot00000000000000--- name: "📕 Documentation Issue" description: Report an issue in the API Reference documentation or Developer Guide title: "(short issue description)" labels: [documentation, needs-triage] assignees: [] body: - type: textarea id: description attributes: label: Describe the issue description: A clear and concise description of the issue. validations: required: true - type: textarea id: links attributes: label: Links description: | Include links to affected documentation page(s). validations: required: true aws-crt-python-0.20.4+dfsg/crt/aws-c-io/.github/ISSUE_TEMPLATE/feature-request.yml000066400000000000000000000026231456575232400272310ustar00rootroot00000000000000--- name: 🚀 Feature Request description: Suggest an idea for this project title: "(short issue description)" labels: [feature-request, needs-triage] assignees: [] body: - type: textarea id: description attributes: label: Describe the feature description: A clear and concise description of the feature you are proposing. validations: required: true - type: textarea id: use-case attributes: label: Use Case description: | Why do you need this feature? For example: "I'm always frustrated when..." validations: required: true - type: textarea id: solution attributes: label: Proposed Solution description: | Suggest how to implement the addition or change. Please include prototype/workaround/sketch/reference implementation. validations: required: false - type: textarea id: other attributes: label: Other Information description: | Any alternative solutions or features you considered, a more detailed explanation, stack traces, related issues, links for context, etc. validations: required: false - type: checkboxes id: ack attributes: label: Acknowledgements options: - label: I may be able to implement this feature request required: false - label: This feature might incur a breaking change required: false aws-crt-python-0.20.4+dfsg/crt/aws-c-io/.github/PULL_REQUEST_TEMPLATE.md000066400000000000000000000002511456575232400247760ustar00rootroot00000000000000*Issue #, if available:* *Description of changes:* By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license. aws-crt-python-0.20.4+dfsg/crt/aws-c-io/.github/workflows/000077500000000000000000000000001456575232400232345ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-io/.github/workflows/ci.yml000066400000000000000000000165531456575232400243640ustar00rootroot00000000000000name: CI on: push: branches-ignore: - 'main' env: BUILDER_VERSION: v0.9.55 BUILDER_SOURCE: releases BUILDER_HOST: https://d19elf31gohf1l.cloudfront.net PACKAGE_NAME: aws-c-io LINUX_BASE_IMAGE: ubuntu-18-x64 RUN: ${{ github.run_id }}-${{ github.run_number }} AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} AWS_REGION: us-east-1 jobs: linux-compat: runs-on: ubuntu-22.04 # latest strategy: fail-fast: false matrix: image: - manylinux1-x64 - manylinux1-x86 - manylinux2014-x64 - manylinux2014-x86 - fedora-34-x64 - opensuse-leap - rhel8-x64 - al2-x64 steps: - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ matrix.image }} build -p ${{ env.PACKAGE_NAME }} linux-byo-crypto: runs-on: ubuntu-22.04 # latest steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-al2-x64 build -p ${{ env.PACKAGE_NAME }} --cmake-extra=-DBYO_CRYPTO=ON linux-compiler-compat: runs-on: ubuntu-22.04 # latest strategy: matrix: compiler: - clang-3 - clang-6 - clang-8 - clang-9 - clang-10 - clang-11 - gcc-4.8 - gcc-5 - gcc-6 - gcc-7 - gcc-8 steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ env.LINUX_BASE_IMAGE }} build -p ${{ env.PACKAGE_NAME }} --compiler=${{ matrix.compiler }} clang-sanitizers: runs-on: ubuntu-22.04 # latest strategy: matrix: sanitizers: [",thread", ",address,undefined"] steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ env.LINUX_BASE_IMAGE }} build -p ${{ env.PACKAGE_NAME }} --compiler=clang-11 --cmake-extra=-DENABLE_SANITIZERS=ON --cmake-extra=-DSANITIZERS="${{ matrix.sanitizers }}" linux-shared-libs: runs-on: ubuntu-22.04 # latest steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ env.LINUX_BASE_IMAGE }} build -p ${{ env.PACKAGE_NAME }} --cmake-extra=-DBUILD_SHARED_LIBS=ON # Test downstream repos. # This should not be required because we can run into a chicken and egg problem if there is a change that needs some fix in a downstream repo. downstream: runs-on: ubuntu-22.04 # latest steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ env.LINUX_BASE_IMAGE }} build downstream -p ${{ env.PACKAGE_NAME }} windows: runs-on: windows-2022 # latest steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python builder.pyz build -p ${{ env.PACKAGE_NAME }} --compiler msvc-16 windows-vc14: runs-on: windows-2019 # windows-2019 is last env with Visual Studio 2015 (v14.0) strategy: matrix: arch: [x86, x64] steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python builder.pyz build -p ${{ env.PACKAGE_NAME }} --target windows-${{ matrix.arch }} --compiler msvc-14 windows-shared-libs: runs-on: windows-2022 # latest steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python builder.pyz build -p ${{ env.PACKAGE_NAME }} --cmake-extra=-DBUILD_SHARED_LIBS=ON windows-app-verifier: runs-on: windows-2022 # latest steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python builder.pyz build -p ${{ env.PACKAGE_NAME }} run_tests=false --cmake-extra=-DBUILD_TESTING=ON - name: Run and check AppVerifier run: | python .\aws-c-io\build\deps\aws-c-common\scripts\appverifier_ctest.py --build_directory .\aws-c-io\build\aws-c-io osx: runs-on: macos-13 # latest steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python3 -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder')" chmod a+x builder ./builder build -p ${{ env.PACKAGE_NAME }} openbsd: runs-on: ubuntu-22.04 # latest steps: - uses: actions/checkout@v4 - name: Build ${{ env.PACKAGE_NAME }} + consumers id: test uses: cross-platform-actions/action@v0.23.0 with: operating_system: openbsd architecture: x86-64 version: '7.4' cpu_count: 4 shell: bash run: | sudo pkg_add py3-urllib3 python3 -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder')" chmod a+x builder ./builder build -p ${{ env.PACKAGE_NAME }} aws-crt-python-0.20.4+dfsg/crt/aws-c-io/.github/workflows/clang-format.yml000066400000000000000000000004671456575232400263400ustar00rootroot00000000000000name: Lint on: [push] jobs: clang-format: runs-on: ubuntu-20.04 # latest steps: - name: Checkout Sources uses: actions/checkout@v1 - name: clang-format lint uses: DoozyX/clang-format-lint-action@v0.3.1 with: # List of extensions to check extensions: c,h aws-crt-python-0.20.4+dfsg/crt/aws-c-io/.github/workflows/closed-issue-message.yml000066400000000000000000000013271456575232400300030ustar00rootroot00000000000000name: Closed Issue Message on: issues: types: [closed] jobs: auto_comment: runs-on: ubuntu-latest steps: - uses: aws-actions/closed-issue-message@v1 with: # These inputs are both required repo-token: "${{ secrets.GITHUB_TOKEN }}" message: | ### ⚠️COMMENT VISIBILITY WARNING⚠️ Comments on closed issues are hard for our team to see. If you need more assistance, please either tag a team member or open a new issue that references this one. If you wish to keep having a conversation with other community members under this issue feel free to do so. aws-crt-python-0.20.4+dfsg/crt/aws-c-io/.github/workflows/handle-stale-discussions.yml000066400000000000000000000006471456575232400306730ustar00rootroot00000000000000name: HandleStaleDiscussions on: schedule: - cron: '0 */4 * * *' discussion_comment: types: [created] jobs: handle-stale-discussions: name: Handle stale discussions runs-on: ubuntu-latest permissions: discussions: write steps: - name: Stale discussions action uses: aws-github-ops/handle-stale-discussions@v1 env: GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}}aws-crt-python-0.20.4+dfsg/crt/aws-c-io/.github/workflows/proof-alarm.yml000066400000000000000000000015271456575232400262030ustar00rootroot00000000000000# Alarm on changes to epoll_event_loop implementation name: ProofAlarm on: [push] jobs: check-for-changes: runs-on: ubuntu-20.04 # latest steps: - name: Checkout Sources uses: actions/checkout@v1 - name: Check run: | TMPFILE=$(mktemp) echo "c624a28de5af7f851a240a1e65a26c01 source/linux/epoll_event_loop.c" > $TMPFILE md5sum --check $TMPFILE # No further steps if successful - name: Echo fail if: failure() run: | echo "The VCC proofs are based on a snapshot of epoll_event_loop.c. This push updates this file so the proofs must be rechecked to ensure they remain valid. Please contact Nathan Chong. You can also update md5sum value by running `md5sum source/linux/epoll_event_loop.c` if the changes are trivial." aws-crt-python-0.20.4+dfsg/crt/aws-c-io/.github/workflows/stale_issue.yml000066400000000000000000000046321456575232400263040ustar00rootroot00000000000000name: "Close stale issues" # Controls when the action will run. on: schedule: - cron: "*/60 * * * *" jobs: cleanup: runs-on: ubuntu-latest name: Stale issue job permissions: issues: write pull-requests: write steps: - uses: aws-actions/stale-issue-cleanup@v3 with: # Setting messages to an empty string will cause the automation to skip # that category ancient-issue-message: Greetings! Sorry to say but this is a very old issue that is probably not getting as much attention as it deservers. We encourage you to check if this is still an issue in the latest release and if you find that this is still a problem, please feel free to open a new one. stale-issue-message: Greetings! It looks like this issue hasn’t been active in longer than a week. We encourage you to check if this is still an issue in the latest release. Because it has been longer than a week since the last update on this, and in the absence of more information, we will be closing this issue soon. If you find that this is still a problem, please feel free to provide a comment or add an upvote to prevent automatic closure, or if the issue is already closed, please feel free to open a new one. stale-pr-message: Greetings! It looks like this PR hasn’t been active in longer than a week, add a comment or an upvote to prevent automatic closure, or if the issue is already closed, please feel free to open a new one. # These labels are required stale-issue-label: closing-soon exempt-issue-label: automation-exempt stale-pr-label: closing-soon exempt-pr-label: pr/needs-review response-requested-label: response-requested # Don't set closed-for-staleness label to skip closing very old issues # regardless of label closed-for-staleness-label: closed-for-staleness # Issue timing days-before-stale: 2 days-before-close: 5 days-before-ancient: 36500 # If you don't want to mark a issue as being ancient based on a # threshold of "upvotes", you can set this here. An "upvote" is # the total number of +1, heart, hooray, and rocket reactions # on an issue. minimum-upvotes-to-exempt: 1 repo-token: ${{ secrets.GITHUB_TOKEN }} loglevel: DEBUG # Set dry-run to true to not perform label or close actions. dry-run: false aws-crt-python-0.20.4+dfsg/crt/aws-c-io/.gitignore000066400000000000000000000010371456575232400216300ustar00rootroot00000000000000# IDE Artifacts .metadata .build .vscode .idea *.d Debug Release *~ *# *.iml tags #vim swap file *.swp #compiled python files *.pyc #Vagrant stuff Vagrantfile .vagrant #Mac stuff .DS_Store #doxygen doxygen/html/ doxygen/latex/ #cmake artifacts dependencies _build build _build_* cmake-build* # Compiled Object files *.slo *.lo *.o *.obj # Precompiled Headers *.gch *.pch # Compiled Dynamic libraries *.so *.dylib *.dll # Fortran module files *.mod # Compiled Static libraries *.lai *.la *.a *.lib # Executables *.exe *.out *.app aws-crt-python-0.20.4+dfsg/crt/aws-c-io/CMakeLists.txt000066400000000000000000000153561456575232400224110ustar00rootroot00000000000000 cmake_minimum_required(VERSION 3.1) project(aws-c-io C) if (POLICY CMP0069) cmake_policy(SET CMP0069 NEW) # Enable LTO/IPO if available in the compiler, see AwsCFlags endif() if (DEFINED CMAKE_PREFIX_PATH) file(TO_CMAKE_PATH "${CMAKE_PREFIX_PATH}" CMAKE_PREFIX_PATH) endif() if (DEFINED CMAKE_INSTALL_PREFIX) file(TO_CMAKE_PATH "${CMAKE_INSTALL_PREFIX}" CMAKE_INSTALL_PREFIX) endif() if (UNIX AND NOT APPLE) include(GNUInstallDirs) elseif(NOT DEFINED CMAKE_INSTALL_LIBDIR) set(CMAKE_INSTALL_LIBDIR "lib") endif() # This is required in order to append /lib/cmake to each element in CMAKE_PREFIX_PATH set(AWS_MODULE_DIR "/${CMAKE_INSTALL_LIBDIR}/cmake") string(REPLACE ";" "${AWS_MODULE_DIR};" AWS_MODULE_PATH "${CMAKE_PREFIX_PATH}${AWS_MODULE_DIR}") # Append that generated list to the module search path list(APPEND CMAKE_MODULE_PATH ${AWS_MODULE_PATH}) include(AwsCFlags) include(AwsCheckHeaders) include(AwsSharedLibSetup) include(AwsSanitizers) include(AwsFindPackage) include(CTest) option(BUILD_RELOCATABLE_BINARIES "Build Relocatable Binaries, this will turn off features that will fail on older kernels than used for the build." OFF) option(BYO_CRYPTO "Don't build a tls implementation or link against a crypto interface. This feature is only for unix builds currently." OFF) file(GLOB AWS_IO_HEADERS "include/aws/io/*.h" ) file(GLOB AWS_IO_UV_HEADERS "include/aws/io/uv/*.h" ) file(GLOB AWS_IO_TESTING_HEADERS "include/aws/testing/*.h" ) file(GLOB AWS_IO_PRIV_HEADERS "include/aws/io/private/*.h" ) file(GLOB AWS_IO_SRC "source/*.c" ) set(USE_S2N OFF) if (WIN32) option(USE_IO_COMPLETION_PORTS "Use I/O Completion Ports to drive event-loops. \ If disabled, a less performant implementation based on select() is used. \ Disable this if implementing your own event-loop whose interface does not match the IOCP interface." ON) file(GLOB AWS_IO_OS_HEADERS ) file(GLOB AWS_IO_OS_SRC "source/windows/*.c" ) if (USE_IO_COMPLETION_PORTS) file(GLOB AWS_IO_IOCP_SRC "source/windows/iocp/*.c" ) list(APPEND AWS_IO_OS_SRC ${AWS_IO_IOCP_SRC}) set(EVENT_LOOP_DEFINE "IO_COMPLETION_PORTS") endif () if (MSVC) source_group("Header Files\\aws\\io" FILES ${AWS_IO_HEADERS}) source_group("Header Files\\aws\\io\\private" FILES ${AWS_IO_PRIV_HEADERS}) source_group("Source Files" FILES ${AWS_IO_SRC}) source_group("Source Files\\windows" FILES ${AWS_IO_OS_SRC}) endif () #platform libs come from aws-c-common transitively, so we don't specify them here, but for documentation purposes, #Kernel32 and wsock2 are pulled in automatically. Here we add the lib containing the schannel API. #Also note, you don't get a choice on TLS implementation for Windows. set(PLATFORM_LIBS secur32 crypt32) elseif (CMAKE_SYSTEM_NAME STREQUAL "Linux" OR CMAKE_SYSTEM_NAME STREQUAL "Android") option(USE_VSOCK "Build in support for VSOCK sockets" OFF) file(GLOB AWS_IO_OS_HEADERS ) file(GLOB AWS_IO_OS_SRC "source/linux/*.c" "source/posix/*.c" ) set(PLATFORM_LIBS "") set(EVENT_LOOP_DEFINE "EPOLL") set(USE_S2N ON) elseif (APPLE) file(GLOB AWS_IO_OS_HEADERS ) file(GLOB AWS_IO_OS_SRC "source/bsd/*.c" "source/posix/*.c" "source/darwin/*.c" ) find_library(SECURITY_LIB Security) if (NOT SECURITY_LIB) message(FATAL_ERROR "Security framework not found") endif () #No choice on TLS for apple, darwinssl will always be used. list(APPEND PLATFORM_LIBS "-framework Security") set(EVENT_LOOP_DEFINE "KQUEUE") elseif (CMAKE_SYSTEM_NAME STREQUAL "FreeBSD" OR CMAKE_SYSTEM_NAME STREQUAL "NetBSD" OR CMAKE_SYSTEM_NAME STREQUAL "OpenBSD") file(GLOB AWS_IO_OS_HEADERS ) file(GLOB AWS_IO_OS_SRC "source/bsd/*.c" "source/posix/*.c" ) set(EVENT_LOOP_DEFINE "KQUEUE") set(USE_S2N ON) endif() if (BYO_CRYPTO) set(USE_S2N OFF) if (APPLE OR WIN32) message(FATAL_ERROR "BYO_CRYPTO is only for use with unix systems. It cannot be used on your current platform target") endif() endif() if (USE_S2N) file(GLOB AWS_IO_TLS_SRC "source/s2n/*.c" ) aws_use_package(s2n) endif() file(GLOB IO_HEADERS ${AWS_IO_HEADERS} ${AWS_IO_OS_HEADERS} ${AWS_IO_PRIV_HEADERS} ) file(GLOB IO_SRC ${AWS_IO_SRC} ${AWS_IO_OS_SRC} ${AWS_IO_TLS_SRC} ) add_library(${PROJECT_NAME} ${LIBTYPE} ${IO_HEADERS} ${IO_SRC}) aws_set_common_properties(${PROJECT_NAME}) aws_prepare_symbol_visibility_args(${PROJECT_NAME} "AWS_IO") aws_check_headers(${PROJECT_NAME} ${AWS_IO_HEADERS}) aws_add_sanitizers(${PROJECT_NAME}) # We are not ABI stable yet set_target_properties(${PROJECT_NAME} PROPERTIES VERSION 1.0.0) target_compile_definitions(${PROJECT_NAME} PUBLIC "-DAWS_USE_${EVENT_LOOP_DEFINE}") if (BYO_CRYPTO) target_compile_definitions(${PROJECT_NAME} PUBLIC "-DBYO_CRYPTO") endif() if (USE_S2N) target_compile_definitions(${PROJECT_NAME} PRIVATE "-DUSE_S2N") endif() if (BUILD_RELOCATABLE_BINARIES) target_compile_definitions(${PROJECT_NAME} PRIVATE "-DCOMPAT_MODE") endif() if (USE_VSOCK) target_compile_definitions(${PROJECT_NAME} PUBLIC "-DUSE_VSOCK") endif() target_include_directories(${PROJECT_NAME} PUBLIC $ $) aws_use_package(aws-c-common) aws_use_package(aws-c-cal) target_link_libraries(${PROJECT_NAME} PUBLIC ${DEP_AWS_LIBS}) target_link_libraries(${PROJECT_NAME} PRIVATE ${PLATFORM_LIBS}) aws_prepare_shared_lib_exports(${PROJECT_NAME}) install(FILES ${AWS_IO_HEADERS} DESTINATION "include/aws/io" COMPONENT Development) install(FILES ${AWS_IO_TESTING_HEADERS} DESTINATION "include/aws/testing" COMPONENT Development) if (BUILD_SHARED_LIBS) set (TARGET_DIR "shared") else() set (TARGET_DIR "static") endif() install(EXPORT "${PROJECT_NAME}-targets" DESTINATION "${LIBRARY_DIRECTORY}/${PROJECT_NAME}/cmake/${TARGET_DIR}" NAMESPACE AWS:: COMPONENT Development) configure_file("cmake/${PROJECT_NAME}-config.cmake" "${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-config.cmake" @ONLY) install(FILES "${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-config.cmake" DESTINATION "${LIBRARY_DIRECTORY}/${PROJECT_NAME}/cmake/" COMPONENT Development) if (NOT CMAKE_CROSSCOMPILING) if (BUILD_TESTING) add_subdirectory(tests) endif() endif() aws-crt-python-0.20.4+dfsg/crt/aws-c-io/CODE_OF_CONDUCT.md000066400000000000000000000004671456575232400224450ustar00rootroot00000000000000## Code of Conduct This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact opensource-codeofconduct@amazon.com with any additional questions or comments. aws-crt-python-0.20.4+dfsg/crt/aws-c-io/CONTRIBUTING.md000066400000000000000000000067341456575232400221020ustar00rootroot00000000000000# Contributing Guidelines Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional documentation, we greatly value feedback and contributions from our community. Please read through this document before submitting any issues or pull requests to ensure we have all the necessary information to effectively respond to your bug report or contribution. ## Reporting Bugs/Feature Requests We welcome you to use the GitHub issue tracker to report bugs or suggest features. When filing an issue, please check [existing open](https://github.com/awslabs/aws-c-io/issues), or [recently closed](https://github.com/awslabs/aws-c-io/issues?utf8=%E2%9C%93&q=is%3Aissue%20is%3Aclosed%20), issues to make sure somebody else hasn't already reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: * A reproducible test case or series of steps * The version of our code being used * Any modifications you've made relevant to the bug * Anything unusual about your environment or deployment ## Contributing via Pull Requests Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: 1. You are working against the latest source on the *main* branch. 2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. 3. You open an issue to discuss any significant work - we would hate for your time to be wasted. To send us a pull request, please: 1. Fork the repository. 2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. 3. Ensure local tests pass. 4. Commit to your fork using clear commit messages. 5. Send us a pull request, answering any default questions in the pull request interface. 6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and [creating a pull request](https://help.github.com/articles/creating-a-pull-request/). ## Finding contributions to work on Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels ((enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any ['help wanted'](https://github.com/awslabs/aws-c-io/labels/help%20wanted) issues is a great place to start. ## Code of Conduct This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact opensource-codeofconduct@amazon.com with any additional questions or comments. ## Security issue notifications If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. ## Licensing See the [LICENSE](https://github.com/awslabs/aws-c-io/blob/main/LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. We may ask you to sign a [Contributor License Agreement (CLA)](http://en.wikipedia.org/wiki/Contributor_License_Agreement) for larger changes. aws-crt-python-0.20.4+dfsg/crt/aws-c-io/LICENSE000066400000000000000000000261361456575232400206540ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. aws-crt-python-0.20.4+dfsg/crt/aws-c-io/NOTICE000066400000000000000000000003611456575232400205430ustar00rootroot00000000000000AWS C Io Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: Apache-2.0. The source/pkcs11/v2.40/pkcs11.h header file is based on Public Domain content from https://github.com/latchset/pkcs11-headers aws-crt-python-0.20.4+dfsg/crt/aws-c-io/PKCS11.md000066400000000000000000000037001456575232400210630ustar00rootroot00000000000000# PKCS#11 tests To run the PKCS#11 tests, configure cmake with: `-DENABLE_PKCS11_TESTS=ON` and set the following environment variables: ``` TEST_PKCS11_LIB = TEST_PKCS11_TOKEN_DIR = ``` TEST_PKCS11_LIB is used by the tests to peform pkcs11 operations. TEST_PKCS11_TOKEN_DIR is used by the tests to clear the softhsm tokens before a test begins. This is achieved by cleaning the token directory NOTE: Any tokens created outside the tests will be cleaned up along with all the objects/keys on it as part of the tests. ## The suggested way to set up your machine 1) Install [SoftHSM2](https://www.opendnssec.org/softhsm/) via brew / apt / apt-get / yum: ``` > apt install softhsm ``` Check that it's working: ``` > softhsm2-util --show-slots ``` If this spits out an error message, create a config file: * Default location: `~/.config/softhsm2/softhsm2.conf` * This file must specify token dir, default value is: ``` directories.tokendir = /usr/local/var/lib/softhsm/tokens/ ``` 2) Set env vars like so: ``` TEST_PKCS11_LIB = TEST_PKCS11_TOKEN_DIR = /usr/local/var/lib/softhsm/tokens/ ``` 3) [Example to import your keys, Not used by tests] Create token and private key You can use any values for the labels, pin, key, cert, CA etc. Here are copy-paste friendly commands for using files available in this repo. ``` > softhsm2-util --init-token --free --label my-test-token --pin 0000 --so-pin 0000 ``` Note which slot the token ended up in ``` > softhsm2-util --import tests/resources/unittests.p8 --slot --label my-test-key --id BEEFCAFE --pin 0000 ``` WARN: All tokens created outside the tests would be cleaned up as part of the tests, Use a separate token directory for running the tests if you would like to keep your tokens intact. aws-crt-python-0.20.4+dfsg/crt/aws-c-io/README.md000066400000000000000000001145731456575232400211310ustar00rootroot00000000000000## AWS-C-IO This is a module for the AWS SDK for C. It handles all IO and TLS work for application protocols. aws-c-io is an event driven framework for implementing application protocols. It is built on top of cross-platform abstractions that allow you as a developer to think only about the state machine and API for your protocols. A typical use-case would be to write something like Http on top of asynchronous-io with TLS already baked in. All of the platform and security concerns are already handled for you. It is designed to be light-weight, fast, portable, and flexible for multiple domain use-cases such as: embedded, server, client, and mobile. ## License This library is licensed under the Apache 2.0 License. ## Usage ### Building CMake 3.1+ is required to build. `` must be an absolute path in the following instructions. #### Linux-Only Dependencies If you are building on Linux, you will need to build aws-lc and s2n-tls first. ``` git clone git@github.com:awslabs/aws-lc.git cmake -S aws-lc -B aws-lc/build -DCMAKE_INSTALL_PREFIX= cmake --build aws-lc/build --target install git clone git@github.com:aws/s2n-tls.git cmake -S s2n-tls -B s2n-tls/build -DCMAKE_INSTALL_PREFIX= -DCMAKE_PREFIX_PATH= cmake --build s2n-tls/build --target install ``` #### Building aws-c-io and Remaining Dependencies ``` git clone git@github.com:awslabs/aws-c-common.git cmake -S aws-c-common -B aws-c-common/build -DCMAKE_INSTALL_PREFIX= cmake --build aws-c-common/build --target install git clone git@github.com:awslabs/aws-c-cal.git cmake -S aws-c-cal -B aws-c-cal/build -DCMAKE_INSTALL_PREFIX= -DCMAKE_PREFIX_PATH= cmake --build aws-c-cal/build --target install git clone git@github.com:awslabs/aws-c-io.git cmake -S aws-c-io -B aws-c-io/build -DCMAKE_INSTALL_PREFIX= -DCMAKE_PREFIX_PATH= cmake --build aws-c-io/build --target install ``` ### Usage Patterns This library contains many primitive building blocks that can be configured in a myriad of ways. However, most likely you simply need to use the `aws_event_loop_group` and `aws_channel_bootstrap` APIs. Typical Client API Usage Pattern: /* setup */ aws_io_library_init(allocator); struct aws_event_loop_group el_group; if (aws_event_loop_group_init_default(&el_group, allocator)) { goto cleanup; } struct aws_tls_ctx_options tls_options = { ... }; struct aws_tls_ctx *tls_ctx = aws_tls_client_ctx_new(allocator, &tls_options); struct aws_tls_connection_options tls_client_conn_options = { ... }; struct aws_client_bootstrap client_bootstrap; if (aws_client_bootstrap_init(&client_bootstrap, allocator, &el_group) { goto cleanup; } aws_client_bootstrap_set_tls_ctx(&client_bootstrap, tls_ctx); aws_client_bootstrap_set_alpn_callback(&client_bootstrap, your_alpn_callback); /* throughout your application's lifetime */ struct aws_socket_options sock_options = { ... }; struct aws_socket_endpoint endpoint = { ... }; if (aws_client_bootstrap_new_tls_socket_channel(&client_bootstrap, &endpoint, &sock_options, &tls_options, your_channel_setup_callback, your_channel_shutdown_callback, your_context_data) { goto cleanup; } /* shutdown */ aws_client_bootstrap_clean_up(&client_bootstrap); aws_tls_client_ctx_destroy(tls_ctx); aws_event_loop_group_clean_up(&el_group); aws_io_library_clean_up(); Typical Server API Usage Pattern: /* setup */ aws_io_library_init(allocator); struct aws_event_loop_group el_group; if (aws_event_loop_group_init_default(&el_group, allocator)) { goto cleanup; } struct aws_tls_ctx_options tls_options = { ... }; struct aws_tls_ctx *tls_ctx = aws_tls_server_ctx_new(allocator, &tls_options); struct aws_tls_connection_options tls_server_conn_options = { ... }; struct aws_socket_options sock_options = { ... }; struct aws_socket_endpoint endpoint = { ... }; struct aws_server_bootstrap server_bootstrap; if (aws_server_bootstrap_init(&server_bootstrap, allocator, &el_group) { goto cleanup; } aws_server_bootstrap_set_tls_ctx(&server_bootstrap, tls_ctx); aws_server_bootstrap_set_alpn_callback(&server_bootstrap, your_alpn_callback); struct aws_socket *listener = aws_server_bootstrap_add_tls_socket_listener(&server_bootstrap, &endpoint, &sock_options, &tls_options, your_incoming_channel_callback, your_channel_shutdown_callback, your_context_data); if (!listener) { goto cleanup; } /* shutdown */ aws_server_bootstrap_remove_socket_listener(listener); aws_server_bootstrap_clean_up(&server_bootstrap); aws_tls_server_ctx_destroy(tls_ctx); aws_event_loop_group_clean_up(&el_group); aws_io_library_clean_up(); If you are building a protocol on top of sockets without the use of TLS, you can still use this pattern as your starting point. Simply call the `aws_client_bootstrap_new_socket_channel` `aws_server_bootstrap_add_socket_listener` respectively: instead of the TLS variants. ## Concepts ### Event Loop Core to Async-IO is the event-loop. We provide an implementation for most platforms out of the box: Platform | Implementation --- | --- Linux | Edge-Triggered Epoll BSD Variants and Apple Devices | KQueue Windows | IOCP (IO Completion Ports) Also, you can always implement your own as well. An Event Loop has a few jobs. 1. Notify subscribers of IO Events 2. Execute and maintain a task scheduler 3. Maintain an opaque data store for consumers The threading model for a channel (see below) is pinned to the thread of the event-loop. Each event-loop implementation provides an API to move a cross-thread call into the event-loop thread if necessary. ### Channels and Slots A channel is simply a container that drives the slots. It is responsible for providing an interface between slots and the underlying event-loop as well as invoking the slots to pass messages. As a channel runs. It also provides utilities for making sure slots and their handlers run in the correct thread and moving execution to that thread if necessary. ![Channels and Slots Diagram](docs/images/channels_slots.png) In this diagram, a channel is a collection of slots, and it knows how to make them communicate. It also controls the lifetime of slots. When a channel is being shutdown, it will issue shutdown_direction messages in the appropriate direction. If it is in the read direction, it will call shutdown_direction on the first slot. Conversely, in the write direction, it will call shutdown_direction on the last slot in the channel. When all slots have successfully shutdown, the channel can be safely cleaned up and de-allocated. ### Slots ![Slots Diagram](docs/images/slots.png) Slots maintain their links to adjacent slots in the channel. So as the channel is processed, each slot will read from its left-adjacent slot, send those messages to the handler, and call their right-adjacent slot when it needs to send a message. Conversely, each slot will read from its right-adjacent slot, send those messages to the handler, and send messages to the left-adjacent slot in the channel. Most importantly, slots contain a reference to a handler. Handlers are responsible for doing most of the work (see below). Finally, slots have utilities for manipulating the connections of the slots themselves. Slots can also be added, removed, or replaced dynamically from a channel. ### Channel Handlers The channel handler is the fundamental unit that protocol developers will implement. It contains all of your state machinery, framing, and optionally end-user APIs. ![Handler Diagram](docs/images/handler.png) #### Special, pre-defined handlers Out of the box you get a few handlers pre-implemented. 1. Sockets. We've done the heavy lifting of implementing a consistent sockets interface for each platform. Sockets interact directly with the underlying io and are invoked directly by the event-loop for io events. 2. Pipes (or something like them depending on platform), these are particularly useful for testing. 3. TLS. We provide TLS implementations for most platforms. Platform | Implementation --- | --- Linux | Signal-to-noise (s2n) see: https://github.com/aws/s2n-tls BSD Variants | s2n Apple Devices | Security Framework/ Secure Transport. See https://developer.apple.com/documentation/security/secure_transport Windows | Secure Channel. See https://msdn.microsoft.com/en-us/library/windows/desktop/aa380123(v=vs.85).aspx In addition, you can always write your own handler around your favorite implementation and use that. To provide your own TLS implementation, you must build this library with the cmake argument `-DBYO_CRYPTO=ON`. You will no longer need s2n or libcrypto once you do this. Instead, your application provides an implementation of `aws_tls_ctx`, and `aws_channel_handler`. At startup time, you must invoke the functions: `aws_tls_byo_crypto_set_client_setup_options()` and `aws_tls_byo_crypto_set_server_setup_options()`. ### Typical Channel ![Typical Channel Diagram](docs/images/typical_channel.png) A typical channel will contain a socket handler, which receives io events from the event-loop. It will read up to 16 kb and pass the data to the next handler. The next handler is typically feeding a TLS implementation (see the above section on pre-defined handlers). The TLS handler will then pass the data to an application protocol. The application protocol could then expose an API to an application. When the application wants to send data, the whole process runs in reverse. Channels can be much more complex though. For example, there could be nested channels for multiplexing/de-multiplexing, or there could be more handlers to cut down on handler complexity. Note however, that a channel is always pinned to a single thread. It provides utilities for applications and handlers to move a task into that thread, but it is very important that handlers and application users of your handlers never block. ### Channel IO Operation Fairness Since multiple channels run in the same event-loop, we need to make sure channels are not starved by other active channels. To address this, the handlers consuming IO events from the event-loop should determine the appropriate max read and write and context switch before continuing. A context switch is performed, simply by scheduling a task to run at the current timestamp, to continue the IO operation. A reasonable default is 16kb, but a savvy implementation may want to upgrade a few connections to 256kb if they notice a particularly fast connection (e.g. you notice EAGAIN or EWOULDBLOCK is never returned from write() calls). ### Read Back Pressure One of the most challenging aspects of asynchronous io programming, is managing when back-pressure should be applied to the underlying io layer. In the read direction, this is managed via update_window messages. Let's look at the below diagram for an example of how this works. In this example, we have a channel setup with an event-loop which manages io event notifications. The first slot contains a socket handler. The socket handler will read directly from the socket. The second slot has a TLS handler. Its only job is to encrypt/decrypt the data passed to it and pass it back to the channel. The third and final slot contains the actual application protocol handler (could be Http, SIP, RTP it doesn't really matter). The application protocol exposes an API to the application. As data is processed, we don't want to endlessly read, allocate, and process data faster than the application can use it. As a result, it has a 20kb window. ![Read Back Pressure Diagram](docs/images/read_backpressure.png) 1. The event-loop notifies the socket handler that it has data available to read. The handler knows it can read up to 20kb so it reads a full 16kb from the socket and passes it to the next slot. Since the socket sees that there is still an open window, it, schedules a task to read again after the other channels have had a chance to process their pending reads. Likewise, the TLS handler decrypts the data and passes it to the slot containing the application protocol. The application protocol processes the 16 kb and hands it off to the application. At this point, the application hasn't notified the channel it is finished with the data (suppose application queues it), so the new window for the slot is 4 kb. 2. The event-loop runs the scheduled read task from (1) after processing the other channels. The socket handler sees it can read 4kb more of data. Even though it can read 16kb at a time, to honor the window, it reads 4kb and passes it on. This time however, the window is 0, so the socket does not schedule another read task. The TLS handler decrypts the data and passes it on The application protocol reads 4kb, passes it to the application and its window is 0kb. The channel now goes idle waiting on the application to finish processing its data. 3. The application notifies the channel (via the API on the application protocol handler) it has processed 20kb of data. This causes the protocol handler to issue an update_window message with an update of 20kb. Slot 2 passes the message on to the TLS handler. It evaluates the message and simply, sends a 20kb window update message to its slot. The socket handler receives the update_window message and schedules a new read task. 4. The event-loop runs the scheduled read task from (3). The socket reads on the io-handle, but it returns EAGAIN or EWOULD_BLOCK. The channel now goes back to an idle state waiting on the event-loop to notify it that the socket is readable. ### Write Back Pressure Write back pressure comes into play when the application can produce data more quickly than it can be sent to the underlying io. To manage this, messages have members to attach a promise fn and context data to. When a handler exposes an API, it has the responsibility to take a fn and data from the user if over write is a possibility. The io-handler will invoke the promise after it has successfully written the last byte to the underlying io. ### Thread Safety In general, the plan for addressing thread-safety is to not share memory across threads. This library is designed around single threaded event-loops which process one or more channels. Anywhere a handler or channel exposes a back-channel API, it is responsible for checking which thread it was invoked from. If it is invoked from the event-loop's thread, then it may proceed as planned. If it is not, it is required to queue a task to do the work. When the task is executed, it will be executed in the correct thread. The functions we specify as thread-safe, we do so because those functions are necessary for abiding by the stated threading model. For example, since scheduling a task is the main function for addressing cross-threaded operations, it has to be thread-safe. ## Terminology We use a few terms in the following sections that are not necessarily "C concepts". We assume you know C, but here are some definitions that may be helpful. ### Run-time Polymorphic This means that the API is driven by a virtual-table. This is simply a struct of function pointers. They are invoked via a c extern style API, but ultimately those public functions simply invoke the corresponding function in the v-table. These are reserved for types that: a.) Need to be configurable, changable at runtime b.) Do not have immediate performance concerns caused by an indirect function call. ### Compile-time Polymorphic This means that the API is not necessarily driven by a virtual-table. It is exposed as a c extern style API, but the build system makes a decision about which symbols to compile based on factors such as platform and compile-time flags. These are reserved for types that: a.) Need to be configurable at compile-time based on platform or compile-time options. b.) Have performance concerns caused by an indirect function call. Note: that runtime configurability may still be something we need to expose here. In that case, a compiler flag will be used to denote that we are using a custom implementation for x feature. Then we will expose an implementation that indirectly invokes from a v-table and provides hooks for the application to plug into at runtime. ### Promise, Promise Context There are many phrases for this, callback, baton, event-handler etc... The key idea is that APIs that need to notify a caller when an asynchronous action is completed, should take a callback function and a pointer to an opaque object and invoke it upon completion. This term doesn't refer to the layout of the data. A promise in some instances may be a collection of functions in a structure. It's simply the language we use for the concept. ## API **Note: unless otherwise stated,** * no functions in this API are allowed to block. * nothing is thread-safe unless explicitly stated. ### Event Loop Event Loops are run-time polymorphic. We provide some implementations out of the box and a way to get an implementation without having to call a different function per platform. However, you can also create your own implementation and use it on any API that takes an event-loop as a parameter. From a design perspective, the event-loop is not aware of channels or any of its handlers. It interacts with other entities only via its API. #### Layout struct aws_event_loop { struct aws_event_loop_vtable vtable; aws_clock clock; struct aws_allocator *allocator; struct aws_common_hash_table local_storage; void *impl_data; }; #### V-Table struct aws_event_loop_vtable { void (*destroy)(struct aws_event_loop *); int (*run) (struct aws_event_loop *); int (*stop) (struct aws_event_loop *, void (*on_stopped) (struct aws_event_loop *, void *), void *promise_user_data); int (*schedule_task) (struct aws_event_loop *, struct aws_task *task, uint64_t run_at); int (*subscribe_to_io_events) (struct aws_event_loop *, struct aws_io_handle *, int events, void(*on_event)(struct aws_event_loop *, struct aws_io_handle *, void *), void *user_data); int (*unsubscribe_from_io_events) (struct aws_event_loop *, struct aws_io_handle *); BOOL (*is_on_callers_thread) (struct aws_event_loop *); }; Every implementation of aws_event_loop must implement this table. Let's look at some details for what each entry does. void (*destroy)(struct aws_event_loop *); This function is invoked when the event-loop is finished processing and is ready to be cleaned up and deallocated. int (*run) (struct aws_event_loop *); This function starts the running of the event-loop and then immediately returns. This could kick off a thread, or setup some resources to run and receive events in a back channel API. For example, you could have an epoll loop that runs in a thread, or you could have an event-loop pumped by a system loop such as glib, or libevent etc... and then publish events to your event-loop implementation. int (*stop) (struct aws_event_loop *, void (*on_stopped) (struct aws_event_loop *, void *), void *promise_user_data); The stop function signals the event-loop to shutdown. This function should not block but it should remove active io handles from the currently monitored or polled set and should begin notifying current subscribers via the on_event callback that the handle was removed._ Once the event-loop has shutdown to a safe state, it should invoke the on_stopped function. int (*schedule_task) (struct aws_event_loop *, struct aws_task *task, uint64_t run_at); This function schedules a task to run in its task scheduler at the time specified by run_at. Each event-loop is responsible for implementing a task scheduler. This function must not block, and must be thread-safe. How this is implemented will depend on platform. For example, one reasonable implementation is if the call comes from the event-loop's thread, to queue it in the task scheduler directly. Otherwise, write to a pipe that the event-loop is listening for events on. Upon noticing the write to the pipe, it can read the task from the pipe and schedule the task. `task` must be copied. `run_at` is using the system `RAW_MONOTONIC` clock (or the closest thing to it for that platform). It is represented as nanos since unix epoch. int (*subscribe_to_io_events) (struct aws_event_loop *, struct aws_io_handle *, int events, void(*on_event)(struct aws_event_loop *, struct aws_io_handle *, int events, void *), void *user_data); A subscriber will call this function to register an io_handle for event monitoring. This function is thread-safe. `events` is a bit field of the events the subscriber wants to receive. A few events will always be registered (regardless of the value passed here), such as `AWS_IO_EVENT_HANDLE_REMOVED`. The event-loop will invoke `on_event` anytime it receives one or more of the registered events. **NOTE: The event-loop is not responsible for manipulating or setting io flags on io_handles. It will never call, read(), write(), connect(), accept(), close() etc... on any io handle it does not explicitly own. It is the subscriber's responsibility to know how to respond to the event.** **NOTE: The event-loop will not maintain any state other than the io handles it is polling. So, for example, in edge-triggered epoll, it does not maintain a read ready list. It is the subscriber's responsibility to know it has more data to read or write and to schedule its tasks appropriately.** int (*unsubscribe_from_io_events) (struct aws_event_loop *, struct aws_io_handle *); A subscriber will call this function to remove its io handle from the monitored events. For example, it would may this immediately before calling close() on a socket or pipe. `on_event` will still be invoked with `AWS_IO_EVENT_HANDLE_REMOVED` when this occurs. BOOL (*is_on_callers_thread) (struct aws_event_loop *); Returns `TRUE` if the caller is on the same thread as the event-loop. Returns `FALSE` otherwise. This allows users of the event-loop to make a decision about whether it is safe to interact with the loop directly, or if they need to schedule a task to run in the correct thread. This function is thread-safe. #### API int aws_event_loop_init_base (struct aws_allocator *, aws_clock clock, ...); Initializes common data for all event-loops regardless of implementation. All implementations must call this function before returning from their allocation function. struct aws_event_loop *aws_event_loop_new_default (struct aws_allocator *, aws_clock clock, ...); Allocates and initializes the default event-loop implementation for the current platform. Calls `aws_event_loop_init_base` before returning. struct aws_event_loop *aws_event_loop_destroy (struct aws_event_loop *); Cleans up internal state of the event-loop implementation, and then calls the v-table `destroy` function. int aws_event_loop_fetch_local_object ( struct aws_event_loop *, void *key, void **item); All event-loops contain local storage for all users of the event-loop to store common data into. This function is for fetching one of those objects by key. The key for this store is of type `void *`. This function is NOT thread safe, and it expects the caller to be calling from the event-loop's thread. If this is not the case, the caller must first schedule a task on the event-loop to enter the correct thread. int aws_event_loop_put_local_object ( struct aws_event_loop *, void *key, void *item); All event-loops contain local storage for all users of the event-loop to store common data into. This function is for putting one of those objects by key. The key for this store is of type `size_t`. This function is NOT thread safe, and it expects the caller to be calling from the event-loop's thread. If this is not the case, the caller must first schedule a task on the event-loop to enter the correct thread. int aws_event_loop_remove_local_object ( struct aws_event_loop *, void *key, void **item); All event loops contain local storage for all users of the event loop to store common data into. This function is for removing one of those objects by key. The key for this store is of type `void *`. This function is NOT thread safe, and it expects the caller to be calling from the event loop's thread. If this is not the case, the caller must first schedule a task on the event loop to enter the correct thread. If found, and item is not NULL, the removed item is moved to `item`. It is the removers responsibility to free the memory pointed to by item. If it is NULL, the default deallocation strategy for the event loop will be used. int aws_event_loop_current_ticks ( struct aws_event_loop *, uint64_t *ticks); Gets the current tick count/timestamp for the event loop's clock. This function is thread-safe. #### V-Table Shims The remaining exported functions on event loop simply invoke the v-table functions and return. See the v-table section for more details. ### Channels and Slots #### Layout struct aws_channel { struct aws_allocator *alloc; struct aws_event_loop *loop; struct aws_channel_slot *first; }; struct aws_channel_slot { struct aws_allocator *alloc; struct aws_channel *channel; struct aws_channel_slot *adj_left; struct aws_channel_slot *adj_right; struct aws_channel_handler *handler; }; #### API (Channel/Slot interaction) struct aws_channel_slot_ref *aws_channel_slot_new (struct aws_channel *channel); Creates a new slot using the channel's allocator, if it is the first slot in the channel, it will be added as the first slot in the channel. Otherwise, you'll need to use the insert or replace APIs for slots. int aws_channel_slot_set_handler ( struct aws_channel_slot *, struct aws_channel_handler *handler ); Sets the handler on the slot. This should only be called once per slot. int aws_channel_slot_remove (struct aws_channel_slot *slot); Removes a slot from its channel. The slot and its handler will be cleaned up and deallocated. int aws_channel_slot_replace (struct aws_channel_slot *remove, struct aws_channel_slot *new); Replaces `remove` in the channel with `new` and cleans up and deallocates `remove` and its handler. int aws_channel_slot_insert_right (struct aws_channel_slot *slot, struct aws_channel_slot_ref *right); Adds a slot to the right of slot. int aws_channel_slot_insert_left (struct aws_channel_slot *slot, struct aws_channel_slot_ref *left); Adds a slot to the left of slot. int aws_channel_slot_send_message (struct aws_channel_slot *slot, struct aws_io_message *message, enum aws_channel_direction dir); Usually called by a handler, this calls the adjacent slot in the channel based on the `dir` argument. You may want to return any unneeded messages to the channel pool to avoid unnecessary allocations. int aws_channel_slot_increment_read_window (struct aws_channel_slot *slot, size_t window); Usually called by a handler, this function calls the left-adjacent slot. int aws_channel_slot_on_handler_shutdown_complete(struct aws_channel_slot *slot, enum aws_channel_direction dir, int err_code, bool abort_immediately); Usually called by a handler, this function calls the adjacent slot's shutdown based on the `dir` argument. ### API (Channel specific) int aws_channel_init (struct aws_channel *channel, struct aws_allocator *alloc, struct aws_event_loop *el); Initializes a channel for operation. The event loop will be used for driving the channel. int aws_channel_clean_up (struct aws_channel *channel); Cleans up resources for the channel. int aws_channel_shutdown (struct aws_channel *channel, void (*on_shutdown_completed)(struct aws_channel *channel, void *user_data), void *user_data); Starts the shutdown process, invokes on_shutdown_completed once each handler has shutdown. int aws_channel_current_clock_time( struct aws_channel *, uint64_t *ticks); Gets the current ticks from the event loop's clock. int aws_channel_fetch_local_object ( struct aws_channel *, void *key, void **item); Fetches data from the event loop's data store. This data is shared by each channel using that event loop. int aws_channel_put_local_object ( struct aws_channel *, void *key, void *item); Puts data into the event loop's data store. This data is shared by each channel using that event loop. int aws_channel_schedule_task (struct aws_channel *, struct aws_task *task, uint64_t run_at); Schedules a task to run on the event loop. This function is thread-safe. BOOL aws_channel_thread_is_callers_thread (struct aws_channel *); Checks if the caller is on the event loop's thread. This function is thread-safe. ### Channel Handlers Channel Handlers are runtime polymorphic. Here's some details on the virtual table (v-table): #### Layout struct aws_channel_handler { struct aws_channel_handler_vtable *vtable; struct aws_allocator *alloc; void *impl; }; #### V-Table struct aws_channel_handler_vtable { int (*data_in) ( struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_io_message *message ); int (*data_out) ( struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_io_message *message ); int (*on_window_update) (struct aws_channel_handler *handler, struct aws_channel_slot *slot, size_t size) int (*on_shutdown_notify) (struct aws_channel_handler *handler, struct aws_channel_slot *slot, enum aws_channel_direction dir, int error_code); int (*shutdown_direction) (struct aws_channel_handler *handler, struct aws_channel_slot *slot, enum aws_channel_direction dir); size_t (*initial_window_size) (struct aws_channel_handler *handler); void (*destroy)(struct aws_channel_handler *handler); }; `int data_in ( struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_io_message *message)` Data in is invoked by the slot when an application level message is received in the read direction (from the io). The job of the implementer is to process the data in msg and either notify a user or queue a new message on the slot's read queue. `int data_out (struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_io_message *message)` Data Out is invoked by the slot when an application level message is received in the write direction (to the io). The job of the implementer is to process the data in msg and either notify a user or queue a new message on the slot's write queue. `int increment_window (struct aws_channel_handler *handler, struct aws_channel_slot *slot, size_t size)` Increment Window is invoked by the slot when a framework level message is received from a downstream handler. It only applies in the read direction. This gives the handler a chance to make a programmatic decision about what its read window should be. Upon receiving an update_window message, a handler decides what its window should be and likely issues an increment window message to its slot. Shrinking a window has no effect. If a handler makes its window larger than a downstream window, it is responsible for honoring the downstream window and buffering any data it produces that is greater than that window. `int (*shutdown) (struct aws_channel_handler *handler, struct aws_channel_slot *slot, enum aws_channel_direction dir, int error_code, bool abort_immediately);` Shutdown is invoked by the slot when a framework level message is received from an adjacent handler. This notifies the handler that the previous handler in the chain has shutdown and will no longer be sending or receiving messages. The handler should make a decision about what it wants to do in response, and likely begins its shutdown process (if any). Once the handler has safely reached a safe state, if should call 'aws_channel_slot_on_handler_shutdown_complete' `size_t initial_window_size (struct aws_channel_handler *handler)` When a handler is added to a slot, the slot will call this function to determine the initial window size and will propagate a window_update message down the channel. `void destroy(struct aws_channel_handler *handler)` Clean up any memory or resources owned by this handler, and then deallocate the handler itself. #### API All exported functions, simply shim into the v-table and return. ### Sockets We include a cross-platform API for sockets. We support TCP and UDP using IPv4 and IPv6, and Unix Domain sockets. On Windows, we use Named Pipes to support the functionality of Unix Domain sockets. On Windows, this is implemented with winsock2, and on all unix platforms we use the posix API. Upon a connection being established, the new socket (either as the result of a `connect()` or `start_accept()` call) will not be attached to any event loops. It is your responsibility to register it with an event loop to begin receiving notifications. #### API typedef enum aws_socket_domain { AWS_SOCKET_IPV4, AWS_SOCKET_IPV6, AWS_SOCKET_LOCAL, AWS_SOCKET_VSOCK, } aws_socket_domain; `AWS_SOCKET_IPV4` means an IPv4 address will be used. `AWS_SOCKET_IPV6` means an IPv6 address will be used. `AWS_SOCKET_LOCAL` means a socket path will be used for either a Unix Domain Socket or a Named Pipe on Windows. typedef enum aws_socket_type { AWS_SOCKET_STREAM, AWS_SOCKET_DGRAM } aws_socket_type; `AWS_SOCKET_VSOCK` means a CID address will be used. Note: VSOCK is currently only available on Linux with an appropriate VSOCK kernel driver installed. `-DUSE_VSOCK` needs to be passed during compilation to enable VSOCK support. `AWS_SOCKET_STREAM` is TCP or a connection oriented socket. `AWS_SOCKET_DGRAM` is UDP struct aws_socket_creation_args { void(*on_incoming_connection)(struct aws_socket *socket, struct aws_socket *new_socket, void *user_data); void(*on_connection_established)(struct aws_socket *socket, void *user_data); void(*on_error)(struct aws_socket *socket, int err_code, void *user_data); void *user_data; }; `on_incoming_connection()` will be invoked on a listening socket when new connections arrive. `socket` is the listening socket. `new_socket` is the newly created socket. It is the connection to the remote endpoint. NOTE: You are responsible for calling `aws_socket_clean_up()` and `aws_mem_release()` on `new_socket` when you are finished with it. `on_connection_established()` will be invoked after a connect call, upon a successful connection to the remote endpoint. `on_error()` will be invoked on both listening and connecting sockets to indicate any error conditions. struct aws_socket_endpoint { char address[48]; char socket_name[108]; char port[10]; }; `address` can be either an IPv4, IPv6 or VSOCK CID address. This can be used for UDP or TCP. `socket_name` is only used in LOCAL mode. `port` can be used for TCP or UDP. int aws_socket_init(struct aws_socket *socket, struct aws_allocator *alloc, struct aws_socket_options *options, struct aws_event_loop *connection_loop, struct aws_socket_creation_args *creation_args); Initializes a socket object with socket options, an event loop to use for non-blocking operations, and callbacks to invoke upon completion of asynchronous operations. If you are using UDP or LOCAL, `connection_loop` may be `NULL`. void aws_socket_clean_up(struct aws_socket *socket); Shuts down any pending operations on the socket, and cleans up state. The socket object can be re initialized after this operation. int aws_socket_connect(struct aws_socket *socket, struct aws_socket_endpoint *remote_endpoint); Connects to a remote endpoint. In UDP, this simply binds the socket to a remote address for use with `aws_socket_write()`, and if the operation is successful, the socket can immediately be used for write operations. In TCP, this will function will not block. If the return value is successful, then you must wait on the `on_connection_established()` callback to be invoked before using the socket. For LOCAL (Unix Domain Sockets or Named Pipes), the socket will be immediately ready for use upon a successful return. int aws_socket_bind(struct aws_socket *socket, struct aws_socket_endpoint *local_endpoint); Binds the socket to a local address. In UDP mode, the socket is ready for `aws_socket_read()` operations. In connection oriented modes, you still must call `aws_socket_listen()` and `aws_socket_start_accept()` before using the socket. int aws_socket_listen(struct aws_socket *socket, int backlog_size); TCP and LOCAL only. Sets up the socket to listen on the address bound to in `aws_socket_bind()`. int aws_socket_start_accept(struct aws_socket *socket); TCP and LOCAL only. The socket will begin accepting new connections. This is an asynchronous operation. New connections will arrive via the `on_incoming_connection()` callback. int aws_socket_stop_accept(struct aws_socket *socket); TCP and LOCAL only. The socket will shutdown the listener. It is safe to call `aws_socket_start_accept()` again after this operation. int aws_socket_close(struct aws_socket *socket); Calls `close()` on the socket and unregisters all io operations from the event loop. struct aws_io_handle *aws_socket_get_io_handle(struct aws_socket *socket); Fetches the underlying io handle for use in event loop registrations and channel handlers. int aws_socket_set_options(struct aws_socket *socket, struct aws_socket_options *options); Sets new socket options on the underlying socket. This is mainly useful in context of accepting a new connection via: `on_incoming_connection()`. int aws_socket_read(struct aws_socket *socket, struct aws_byte_buf *buffer, size_t *amount_read); Reads from the socket. This call is non-blocking and will return `AWS_IO_SOCKET_READ_WOULD_BLOCK` if no data is available. `amount_read` is the amount of data read into `buffer`. int aws_socket_write(struct aws_socket *socket, const struct aws_byte_buf *buffer, size_t *written); Writes to the socket. This call is non-blocking and will return `AWS_IO_SOCKET_WRITE_WOULD_BLOCK` if no data could be written. `written` is the amount of data read from `buffer` and successfully written to `socket`. aws-crt-python-0.20.4+dfsg/crt/aws-c-io/builder.json000066400000000000000000000014361456575232400221640ustar00rootroot00000000000000{ "name": "aws-c-io", "upstream": [ { "name": "aws-c-common" }, { "name": "aws-c-cal" }, { "name": "s2n", "targets": ["linux", "android", "openbsd"] } ], "downstream": [ { "name": "aws-c-http" }, { "name": "aws-c-mqtt" }, { "name": "aws-c-event-stream" } ], "targets": { "linux": { "_comment": "set up SoftHSM2 for PKCS#11 tests (see: ./builder/actions/pkcs11_test_setup.py)", "+pre_build_steps": ["pkcs11-test-setup"] } }, "build_env": { "LSAN_OPTIONS": "suppressions={source_dir}/tests/resources/suppressions-lsan.txt:allow_addr2line=1", "ASAN_OPTIONS": "suppressions={source_dir}/tests/resources/suppressions-asan.txt" } } aws-crt-python-0.20.4+dfsg/crt/aws-c-io/cmake/000077500000000000000000000000001456575232400207175ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-io/cmake/aws-c-io-config.cmake000066400000000000000000000012451456575232400246050ustar00rootroot00000000000000include(CMakeFindDependencyMacro) if (UNIX AND NOT APPLE AND NOT BYO_CRYPTO) find_dependency(s2n) endif() find_dependency(aws-c-common) find_dependency(aws-c-cal) macro(aws_load_targets type) include(${CMAKE_CURRENT_LIST_DIR}/${type}/@PROJECT_NAME@-targets.cmake) endmacro() # try to load the lib follow BUILD_SHARED_LIBS. Fall back if not exist. if (BUILD_SHARED_LIBS) if (EXISTS "${CMAKE_CURRENT_LIST_DIR}/shared") aws_load_targets(shared) else() aws_load_targets(static) endif() else() if (EXISTS "${CMAKE_CURRENT_LIST_DIR}/static") aws_load_targets(static) else() aws_load_targets(shared) endif() endif() aws-crt-python-0.20.4+dfsg/crt/aws-c-io/codebuild/000077500000000000000000000000001456575232400215715ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-io/codebuild/linux-integration-tests.yml000066400000000000000000000024311456575232400271340ustar00rootroot00000000000000version: 0.2 #this build spec assumes the manylinux1 image for pypi #additional packages we installed: cmake 3.5, libcrypto 1.1.0j, gcc 4.8.4 phases: install: commands: - add-apt-repository ppa:ubuntu-toolchain-r/test - apt-get update -y - apt-get install gcc-7 cmake ninja-build python3 -y pre_build: commands: - export CC=gcc-7 - export BUILDER_VERSION=$(cat .github/workflows/ci.yml | grep 'BUILDER_VERSION:' | sed 's/\s*BUILDER_VERSION:\s*\(.*\)/\1/') - export BUILDER_SOURCE=$(cat .github/workflows/ci.yml | grep 'BUILDER_SOURCE:' | sed 's/\s*BUILDER_SOURCE:\s*\(.*\)/\1/') - echo "Using builder version='${BUILDER_VERSION}' source='${BUILDER_SOURCE}'" - export BUILDER_HOST=https://d19elf31gohf1l.cloudfront.net build: commands: - echo Build started on `date` - aws s3 cp s3://aws-crt-test-stuff/setup_proxy_test_env.sh /tmp/setup_proxy_test_env.sh - chmod a+xr /tmp/setup_proxy_test_env.sh - python3 -c "from urllib.request import urlretrieve; urlretrieve('$BUILDER_HOST/$BUILDER_SOURCE/$BUILDER_VERSION/builder.pyz', 'builder.pyz')" - python3 builder.pyz build -p aws-c-io --cmake-extra=-DENABLE_PROXY_INTEGRATION_TESTS=ON --coverage post_build: commands: - echo Build completed on `date` aws-crt-python-0.20.4+dfsg/crt/aws-c-io/docs/000077500000000000000000000000001456575232400205675ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-io/docs/epoll_event_loop_proof.md000066400000000000000000000133161456575232400256670ustar00rootroot00000000000000# VCC Proof Signoff Verification tool: VCC (code-level proof) Proofs: `tests/vcc/` Implementation: Linux event loop (`source/linux/epoll_event_loop.c`) Specification / Properties (`preamble.h`): - *Memory safety*: the implementation only accesses valid memory. - *Thread safety*: threads only update objects that they own. - *Functional correctness*: sequential task and event callback execution in the presence of multiple client threads. The proofs verify that: - The scheduler for task execution is thread-local to the event loop thread (so is sequential since no other threads read or write directly to the scheduler). Tasks move from client threads to the event loop via properly-synchronized ownership transfers, using locking. - Subscribe/notify for event execution is properly-synchronized via epoll. ## Assumptions Generally, we assume well-behaved clients; the correctness of underlying primitives (memory allocation, AWS C Common library, syscalls); and, minor assumptions due to limitations in VCC. More precisely, the proofs assume: - Well-behaved client: all client threads use the event loop API in a manner that adheres to the specification. A badly-behaved client can invalidate the proven properties. For example, a client that reads, writes, or frees a task struct object that is scheduled on the event loop is racy and no longer thread safe. The specification given in `preamble.h` forbids this behavior (the ownership of the task struct changes as a result of the schedule function) but we cannot, in general, enforce this behavior since we do not verify client code. - Thread safety of the allocator functions `aws_mem_{calloc,release}`. This is important in the case where a client uses a custom allocator. - Memory safety and function contracts for the following AWS C Common functions: aws_atomic_compare_exchange_ptr aws_atomic_{init, load, store}_{int, ptr} aws_linked_list_{init, pop_front, swap_contents} aws_mutex_{lock, unlock} aws_raise_error aws_task_init aws_task_scheduler_schedule_{now, future} aws_task_scheduler_{init, run_all, clean_up, cancel_tasks, has_tasks} aws_thread_{clean_up, current_thread_id, decrement_unjoined_count, increment_unjoined_count, init, join, launch, thread_id_equal} and similarly for the AWS C-IO functions: aws_event_loop_{init_base, clean_up_base} aws_open_nonblocking_posix_pipe and similarly for the system calls: close epoll_{ctl, wait, create} eventfd read, write The contracts are given in the `preamble.h` and proof files. The contracts are assumed, not proven. The memory safety of the AWS C Common linked list functions have been proven in CBMC. - Thread safety of the epoll syscalls `epoll_{ctl, wait}`. We additionally assume that the `ctl` (subscribe) and `wait` syscalls induce "happens before" so that the litmus test (See Appendix) is data-race free and therefore properly-synchronizes event subscribe/notify. - Minor assumptions due to limitations of the VCC tool. - In `s_is_on_callers_thread` we assume the loaded value from the atomic var `running_thread_id` is thread-local and either `NULL` or the address of the owner of the event loop. We cannot make this an object invariant because the access is atomic. We manually validate that this assumption is reasonable. - In `s_run` we do not model the ownership transfer of the event loop from the client thread to the freshly-launched event loop thread. We manually validate that this assumption is reasonable. - The Sequentially Consistent Data Race Free (SC-DRF) guarantee required by the C11 standard: if a program is race-free and contains no non-SC atomic operations, then it has only SC semantics [Note 12, N1570]. We rely on SC-DRF to justify the use of VCC's SC memory model. We manually validate that the event loop implementation contains no non-SC atomic operations. Validation is required for pre-C11 compilers. ## Simplifications - Omit modeling of hash-table `local_data` in event loop. - The log functions `AWS_LOGF_{...}` are no-ops (hash-defined out). - Allocator functions are hash-defined to malloc/free. - In `s_destroy`, we (re-)take the `epoll_loop` pointer after stop and wait have been called. This has no semantic change to the program but is necessary for the proof. - Workarounds for VCC frontend (no semantic diff, but changes to syntax) // Function pointer declarations // For example, the following typedef int(aws_io_clock_fn)(uint64_t *timestamp); // is replaced with typedef int(* aws_io_clock_fn_ptr)(uint64_t *timestamp); // Array and struct literal initializers // For example, the following int pipe_fds[2] = {0}; // is replaced with int pipe_fds[2]; pipe_fds[0] = 0; pipe_fds[1] = 0; ## Trusted computing base - Soundness of verification tools: VCC, Boogie, Z3 - C Compiler, because the verification is at the C code-level and the properties proved may not be preserved by compilation. ## References [N1570] ISO/IEC. Programming languages – C. International standard 9899:201x, 2011 ## Appendix Assumption on "happens before" induced by `epoll_{ctl/wait}`. Informally, we need "message-passing" to hold so that the shared data passed from T1-to-T2 is guaranteed not-to-race. // Initially *data == 0 (non-atomic location) // T1 *data = 1; epoll_ctl(...); // register event // T2 if (1 == epoll_wait(...)) { // receive event r0 = *data; // guaranteed that r0==1 } aws-crt-python-0.20.4+dfsg/crt/aws-c-io/docs/graphs/000077500000000000000000000000001456575232400220535ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-io/docs/graphs/channels_slots.graphml000066400000000000000000000352441456575232400264560ustar00rootroot00000000000000 Channel 4 Slot 1 Slot 2 Slot 3 Slot 4 Back Channel Producer/Consumer Likely An Event Loop or another handler that manages a subchannel. Back Channel Producer/Consumer User API such as an Http Connection aws-crt-python-0.20.4+dfsg/crt/aws-c-io/docs/graphs/handler.graphml000066400000000000000000000255761456575232400250630ustar00rootroot00000000000000 Handler 4 V-Table: data_in ( msg ) data_out ( msg, promise, promise_user_data ) update_window ( window_size ) shutdown_notify ( error ) shutdown_direction ( direction ) initial_window_size () destroy ( ) Data: void *state Back Channel APIs (Optional) These are usually interacting with an event loop or an application layer send_message () The slot will manage passing the message to the appropriate handler Slot update_window(), shutdown_notify(), shutdown_direction() These are functions only for the handler, the channel will propogate to the appropriate handler aws-crt-python-0.20.4+dfsg/crt/aws-c-io/docs/graphs/read_backpressure.graphml000066400000000000000000001030171456575232400271150ustar00rootroot00000000000000 Socket has 20kb read window due to downstream handler. It reads 16kb from the fd TLS Decrypt Data Application Protocol Data Queued Socket has 4kb window due to downstream handler. It reads 4kb TLS Decrypt Data Application Protocol Data Queued Event Loop yields control back to channel Context Switch Application Protocol Clears its internal buffer. TLS Handler sees new window it can take more data Socket Handler Sees new window schedules read task Context Switch Socket has 20kb window due to downstream handler. read returns EAGAIN. Event Loop yields control back to channel yield until event-loop notifies on fd state change Terminal Nothing left for channel to do until window opens Event Loop observes fd state change. Notifies Subscriber send_message send_message send_message send_message update_window message update_window aws-crt-python-0.20.4+dfsg/crt/aws-c-io/docs/graphs/slots.graphml000066400000000000000000000165661456575232400246110ustar00rootroot00000000000000 Slot 4 adj_right slot adj_left slot Handler aws-crt-python-0.20.4+dfsg/crt/aws-c-io/docs/graphs/typical_channel.graphml000066400000000000000000000562171456575232400265770ustar00rootroot00000000000000 Event Loop Processes io_handles, notifies of changes that have been registered for listen. This could be a pipe, socket, or really anything. Channel 4 Slot 1 1 socket_handler interfaces with the event loop and platform-specific io apis Slot 2 1 tls_handler handles negotiation, and then decrypts incoming data, encrypts outgoing data Interfaces with the platform specific TLS implementation. Slot 3 1 application_protocol_handler Figures out framing, runs state machinery, exposes API to user. User/Application Completion Promise aws-crt-python-0.20.4+dfsg/crt/aws-c-io/docs/images/000077500000000000000000000000001456575232400220345ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-io/docs/images/channels_slots.png000066400000000000000000000512121456575232400255620ustar00rootroot00000000000000PNG  IHDR:73RQIDATxT5`]"4c{/5vDņ ]Ć ],X`ŝ=g̝{s3;"@ BB&@ @ @ @ @ < @ < @ < @ x@ x@ (@ (@ Q@ Q@ R,\?cgqfb"CqrcxIbxI.i󻘽b|7}0~_`1+xP.ASb(MpG2IIq')Eh qL4bn(MKI$'p[1i剔}`#??8= %3;PwLYHƾ+e'3䱊w PHz9NmMGZ4iWQhi┤&vT'Ecpk/3'r398si {Y(S%xe{E蓋hK,$Cc01nQ/>L?< d-PQ$3+3ȎW.$G/ Zͯ=QX;wf/G x@Dyl{'|'fcxs(@S8|5MgW{&)33OOTG!=}-< pdmK딸#|FƫwJ⟽^NOUQ(7* %9&*ͧ(`}7h/\HTglO}c;TQfן ?(=i4E=p#T^Th 5((G< < 4գf@CGIz O%;h|(I/@CGy4x̣$<{ a#xgo!ʣ<} O%[h(o!ɣf' 4x̣yo!ɣD'r/-yiQ"R!ɣI'r;%<ʭQR2r#54>yQR2r+h|(a_@CG M| OZ+h|(W^@CG  ?z)l pipEO%8 OLxI0CBcS/?|<(ŕGYA8/\IG ypIpgR!<.^ڥ2c]} pN•>yG/G.L%y sH$dzQ5]yJx7~F, p$ɣ\~R0qLekңPI *hp=簇w|i=| pN•>yyh$i[WIk6wb"R< ͂pA8'TJ< k&ɟHۂ$l8^:jwǾ3XhKA8~  <(YypI~ ɯ)}6G].s!7B#|4[ىw~[SCfܸO180U RA&#_zۂ$](ScwX:ip|1^"_i#NPGy!HL/鑶I~uInS (Nj֩7`hM[l(vnt34hS%FOmK/ް|;#p*^f&}HNs[EkpL0|2q]'q a60~I-H+.{.at+3TȔHԹKL‘Lh멲$ɣG=$LWRleI~7veguC)nO!S'|ns_$mҌ#n Fܔe=t `, |(nQ s͐a%=Ҷ ɯƣv[sQsoF>pNU1g\#*!p}nWZR$SlG$'\v?6<))pA8 WQ" &d%=Ҷ ɯM'.=qTPLjL pB$ɣ8}&HL~y$4:l־ݯcs=N&pA8 WQ`]gc%=Ҷ ɯfL p(Gx&HߩLQ_#m ;l־ܮ9bw;A8 P+ |(v y$_aI~in׏X΄pA8'TJ<ʝgs/鑶I~5>va9׏[A8*\IGT0^㙈lG$\{1m| xp6,'A8*\IG9 a<9KzmA_AÃpj%367@8 p%OSA¸dmA_5Y8ofth'A8*\IGT0nz=lGV)Yv9~UyE&4u|gυN]K>w pA8 WQz*H-S_#m}Sr5+];ԴU[M]hB;@h4{-ә]OWpu3fdO pAwNҮᷥˈoJͪRZ|q;82g퍓C8'bu3S0t[&҄.:9TxRWjʽUN F A_^J\ev~Qxv=r)j *: Z4!eBݪ|opq~[:߃2t1Pbz x!Ǽ >g-LOLA8 dnd%=v ѹ7'%v\m@lӘ^5! \u>҉3+HoQMgMC_>q5LxSpZT^Nݵ,淂p|.pkAk5gMLO̍LA8enlg%=vykۧR2KFտiI%v ݎ'avts+%D'5vY&Ѣt!Wȯ ͝6C~CN_πp*<ݳ߄"\ֹ^X.lٰfynQ?U6ݣJ'rF a{1~I],צ*]"в4mp]tv ߥM=qj,U6QR}"+rކ3޻CE I:u=©\8BsuhJQ7<˕"}Jc07h:Nǣt=)H8c pPTԣhL"""֟v;4I4g;PK KU_siiNfI'2 ʅM׎K8"kcq\+Үm>Y4#FG9)HT88爪Tl\5a=GY}G,2 j]Fv?߈XÊ_3lˉʕawp*OARAE6hg1iB}.lͪN'B! #B0{H%8+>]1OSGp%O'/ʘ}sRyFk%QEK~tiZU؋ (Q*xKs)m|xh:SpY{Da7p9fntleYI)S(B$ɣ{"HT&Mj[]@ӵZv}P܁dNOzou}pc!pīl*È (xj fL G)GG}"H4j-k:K-{.?xѫA8*\IGpA8R2rZ Ѩhk-/nfeh pN•>y#2F {t\cyq5C p U+r5Ch3s\s&k&QA8 p%O AQo%o]QA8 p%OPH Ѩ7ߔx_`{mG>ăW p U'r0$]h[l۫;<']IpA8'TJ+WI*2vH">yݗF Yg?w26{DC8T(ܨy6oʈxC-G\&{}MR*W%0Mh/' p*^v~NgV]OX-14 rowPAɣ L$5~+:!BW.g<7g"sV!:ELZE~ebL/{[wuT<΀4AqoV^⪮NyR <܁5w pN•>yM)DmgMX|Fq} p U''Eh?fy,:yenq p U''E|8\6_Fa7IqnV[ew[wpA8 WQIIMq(+^.Or]3/ Zi5ߋ!p U'w 1qD#{eK˔[wOj5~A'kkt1pA8 WQ.& uk\bF,8<״ݎ[fI[Ϊ?{ZG=j5e4SY..c&͙.B : 7aÉVݜ9^C+Yx&z$r[x_K)SF~708~nڦv"h+N,8s};TծnmՎՔA85w2(jpsfqV,JR[iwa+PZ9h=d_{\yW+4RʃOecӢ^1^ǂ+Va#a,txXb٧y]U3-C!LQ>* D"ZPJkc~YbԁkkMis-T&|(]Uh .-.%`@~# ߠ]O*>yEJ8k0gʪXJ'B 18Uǯ :c7hRbԁ4nޞfN* <ʂ UM0ՙs>2eU/WtvLu:=edW9Q7j^Nz,%.SW* <'譎+WOKr@?dAe,COis\Qn+SQ2-yQHI? ),Vb~/hI AAJ4M)bd bUϰd'"0335k FE ^:Cw 4jpÆ Dرcmll/<#G0 ( ///L"e}n@_xuVZEFFbA L/۷o_t)GQPBvv6&6tP !L/Z(i^P~} !L/999e˖(i^}Q0P5E"<:\ RtCVxu !GQ !w{ߔGQ}t@ YYY+WNOO@]>}JuG ~s Wtbرfff<-Gh (׷O>iȬ-[WFGG߰a4Fu>:("hҥ=57&(n8=dJh411>ƇV( GySN|!E j>sݻ% M3׮][X( ^oӳ,\p@"Ξ=ӏ{8BQbErZs%Ҷm[---777( v+=O{044FVV- +ɓ' *x5i ³=GuRQ̉7SG!'233UfÇC;!Qv}~M>$Pͤ\a`b,=|LqËG9o^|XL1'cz$3@& H a7o~.Y|[+_©ǣ||Ig]K .s c^9$@PP~>>W\sNBB³gT8&ϯ X;̺@mCr|<͛Ou30ux ~~~iii*^=JZހܳ@㠞O?ÃdXX؃`Q_ģL¬ |>s{dee}-ʎC"1G~wwwv]7ã GyY| Sry–oqxDiyx̺@[(ɗGrt #ã Jѣ`j=GI d2hQJ#̺@jQXL#ã Jˣaj>xۨGy[&{F4(-Y|m%їCp*= ƑQQ?Ĭ |> xNM<7cÖ@((1uχ!ã$x11ֹef@(R(uZχQ<2c !oQNв[L"Uӵ^c\-}j:P_:?|Gy*8eS9oJ9nJWv֪V[h#z7mƺ.n'ewa_z)$JN֊wMkeR\>m?ӥ[O^9 =Sqc,!kUH)t^:Exe:6"9άRrt UTU'ʺk~GAG.*C <&Łw-=^W:YmǜeѨSh{ kW٧辍e_iב΁VJʮQft )% ùm}䶥O`gt*g| QIX"OΖ2}Zgঔۭ8k0gn]/NHn=,xEۚ-1ePA3Sݝs|+D6mpLTF~GAGɌ.*C<,ã9n b4Eӣc-!>K4_S_mxOm\w6QzgJO:|[OA9;7a>LƔYB-,q2rS%ã Jˣ)tTjMOng@r=Kѽ/Bbd8a`ClAzZ廲qcϟt)b@)1}/*NyyH,/+~ &j{0zϋ's:veKGQ74MnK֤Fƒ-)}5'(qf+YS^, ڠRИ2K(qv )q^3#Y\yC_xJ[f~{M-]ya1(VLā2`<M q$QZvNZǗue;Mut]7w6U?a@u6g?m)JE(;6CMÉ$=InR^D*P N-ogyT1}Q&zPս(([J՚?Z#mFU lMOuԦ )襘CS43SoM)|o޺v(dRzT 4l)K7̔CfߓgVuT,bIg*UUH&M-Qax3̝lcG~GAGɸU謫ӱf#:xm-~,lڟR"bF)sVwLu~}wA)ѨMgI,3Y|.qf'OZվ-XAq 1~n#rslmuv?0rv=ׯ޲qTT.]K-ߚZX#.QR϶>z(hLy%8d\$DYfFq^u(Ḋ!nGã?˖FqxDiyκB[mc(AzB*Ygt:tbܝu׶DUPf2)O/olm/ڐۖyjy;Lr?+PVo)+J zHaXE"Ζr}~Bz\nOtji]$N^!%N,ln{)6(AZ;ɹ0\^%r<-viC2k!Qazxm22Ô!vHЂE: hK (-rYWc Oi{z5YS^o^˿]hmCZ$rln5ۯ$?eR!mK7_N_m;̝!kW>#Q6uSRnM|'~iM'ǥnPYNcڐ\t+ - )yccn-T^%rG˥mڠ j!Qa5('[2ѣ '4U"Qj9Mi#](%8ل~u_\`5P&EA&V+'/7_oJ2)DzgxD)y f]5hr:?߰'5+8"Nt1D۔RzyUG\>u(/Ϊ F6mݠRѾD,1T׭GN-sgyt8v TqWm䲾GI"^l"#Q" -dRD1z$QZ%\+X<}g >Vѫ#Ծ:r\z.kUպZwlRK<އ^XY6մz?U&hI;gj> N-ogy4'Suz]I2h%oʜZ%uK{ 鎆ڔ9;7Qدi_BakPe.CdR6+1ePfmo@Q{*(s&-AO4up~ q=%҂3L;hWZD⢑#t ;F}ymv0o6w͉kn(>*jۯn{|a+q˺vZJAjl4ʼn iczՔy[hcQ{2)}* M:?Gz3kGc:Uq:>{X]J/XB3rNP[bOTgtO-z5D/ŭ't}IhUm׀2 &/"6L%PiM {R)h+(-r][(4 h|)toy/) d,%yƩYzniOYCVU+Ywwčkhѭ&X2hJ 6Z֫Hr6I [HWJށm~gJbʄrQ׭3c4̂+@JK=mt紟h_2KX?< eH>]J>y(R(> f]&=3~<ٟb@k-0kȮ3v)]J *-Ez(_]KQⷰz_T(tu!,Gh}f&PNXHI@.U|,y-E (J6` [ҶSTT "T(xEvϛG n7jڶig^H,{ˆe UZh,G!M.jW/既r< HSƣr[;bv;ӄu܁tS{}zc`v(sݥDZ~IEP//sJdQZjEA+l(JPg2R[nC@8RkeȽE*׼1DJT4RE!ԑ#'4p4'm<+E:]OD\] ǣ lqK^ZLɻ2nY[ԱiE94IFcA!~ #ڋF|B{! Tq 2rjtF&שʦ_>t:Itn$X]]z+%@y#ڐ|Os3%+-BPzTZrNt.ړ;GW*x:{e?[HË?9U4@ qZyg,7ʻGt.lb4;QeiΡ=jsZt&Ŧ](%WIAAQOa(H0t'f(ԫ%-m5`JcƖVGAG[^~u/~˧LƳtKaQd,{ʦN65[iӮNP>ـ͜\~yW|TH-+ʇ(t :r6ܗ&ilt&Z2l eaQPQ>1UfLѥ`bVY٧G!-8>8zx= ⑵糾j 6tL*dvoh]9է }Zge57r?QdDڣP(Qb7l/eW7!@W:䝴У9ձ]=΢g=iygl0iӑo(h ufD+bG)Z%2;}Rأk>PYtI= _"|Mo%7QP$"dx*G"0$$,G((D2=2"}Kx7._;1Ź#!P2?c2Mbg_c7-oQ) ~Ay"$-k#cGٲxy> =J\?A`?u#q+2?أ]fKiVVMZQQxZ6t۰Ȁ ~K_֖M$eG a?堼GYZڹDY`Jgold(sA whHGځJxb\V/b(MSnWLnoɣ| XIHɻGR)$oÓ[7&@jC2o?PV-s(xY.viI|Fh( U?%UYKB$(vI~ߡPѕlpe"ݗ%}V%B+7rZ-xV6{}o>Ӂ+ uŸ Pr*ڪe;##2,xݑen{>T6=CWG+ uE Pr2oW+Rk9x< < ~$E G)?r{q>tM_7畿MY6d=f5g\u+!D,a&;LGAGpY|mM=<ʇ0A 1F`j8>>3C5(o߾}cߗ0w`.g0 !?Rؕ%[7w\r.] ΆGA|9B~T̺@#]lW^JIIyW(ϟ'DŽ&d9k꩸.Р^M}.,~8Er+ ƍqqqo޼GA|zrrhOIJt3.ǀ9ƻ;<==kz_|G}7. ~Iff>pad7V]w 2(4 !888"""11\;yw HBz֥ab\V;sǀ8i"ϽtJ<zxxݺufSʢ*D||sεk._@7#!88 Ǐʁz_ڣ ~UFFƣG޽K+Wh֥C_ɓׯ_ꃞ"{[)Ȧݻw֭[TDrA N"""|H^/k͇RWIlAQ(( TpdVA]R? 7oD)ZӧO7RD|"{o|vjA)G7h*MdV^!׹AC9 ? ͺ1"|H]Qo gV@;u{ &^&̇"v@ j(@ Q@ Q@ @ GA GA `8ЙIENDB`aws-crt-python-0.20.4+dfsg/crt/aws-c-io/docs/images/handler.png000066400000000000000000001224051456575232400241630ustar00rootroot00000000000000PNG  IHDR]5uIDATx\""Q" bAC`  MXB7&DѠXPņgA4QDD37wz{z<쳳3ܾoˌACCCCCCCCCc444444444B444444444B444444444B444444444B444444444B444444444B444444444B444444444B444444444B444444444B444444444B444444444B444444444B44444444…6kc򧚨Mr \4I])RMQpڢs+7V!*+TN0ymSVT_WU9M^"CNQB+ECCCCCS'9BQȅhhhhhȅ(  B!!Pڮ@4=Tڢ3ȅhhhhhȅ(ԇ׆شBԨJ UQc| M+-{N\\\BC/?^ w0mȅhhhhhȅ(r!r!r! \&8^z_*~O~^{r%ůA d;| 4ԧ/`5:G9B9)E9.YZV?J..~:^r!r! p[Lι{s0.-%x'&n+< V/Ms K q!@a2(|\kC3Z2 J| lA.DCCCCC.Dυ%erb#HPouH&˅!%g//tRNqӗU2!^I|ɭ ڣ"E\BU!_͓¥->;/A42c odZ9~)w+`"/BWe-:dSZ8$"r!r! #<}y10sl}raQ)?Ixf4,Uo-2.?yhhhhhȅ({/^]H~n/]E.'r;ż\H3˽} \(v.*\H5R-OUZ‡OJ=.A.DCCCCC.D4 ={Ҁ : ΅P#y~q›E/^ q!#VUVz{{AZXXGB=XTTbi=ѴiӠ <1!ʅEP(V.6 ģF_ǰرc܀~,4Fhiz! KIQ ː;@'А +ȅ(f~ZqL3lX NT;)**#ņ vλwΝ;' -,,`!А +§EP()|_~ۯIech܂bn|Kk֬8q"bݺu;udoo/p_U#1444BBJ t5ys[B H166Xy=yrɋR +>0wl5vĦ-ۮ߾JF.| OhTVZFCCXYY*j >yJE?2¤CPj?9tGy8yvI-/ ^?K~dF2i瘴*O.֭qN1`,>V[1PڪG/=i^I$VM—YLZYϾBԬȒgyϳ@:\u&,HUf^`bU'-d5˾ݿy*>ʾHUæʌ-|ŋ 9(UQL }1;D(J:8씬GyϊT%Pya2™+k3ØGQ( tOpytI8;!r!g7Y RrHz+ wnܹ hSP\uqn3.%Lz(B4Qi!L jۜ9ЎI ]eهsrr ->~Qȟ+Q($Yѱ]%Nz$3EnK.\Ia]3dBi176Λyx*vDbb233 rR<y-/<+ٗͅHtBidY}M.{RK^IBKU0}e(J[%Cȅȅr\dI^a$B%YϪ>\upAN'̭(J[%⨃ȅ5 JU*=|?` i .z!o?3cbNL~0٣.\x e6ߠjӜn9UdY\XC"%H^! Nz!gG\KLLx(XKj4YΐFjӜ%3{\\:%ȅȅora^ {Q($YՇ E4arUd[߲iMK3C|)YG_؇\X#0ॊbv+Q($YOpa[B]]5 /0ɾQ7~D 6fOH tN%c& 52yl_[Fy_0?dX9:",rG9 UCitw_WZ4lu Fp)V'plQ4(?olilxZ?\%Ůltj)~[&YG_؋\X5ѳ* !B% 8u51AƃB]R0#GN'>?Q$%4{Z#.S/ݗloZ4t`>ܨAӎ!3߂ڼѽ qHBuVNz[{uzC-VM:3]ϵ_BqwsDBBjy=+ضU1V&' 8mu OIhA. -j#C. \쥊cC:PU1Bc~A. ~V~b.\?N˫PR9wn8o^Sl Iq2|ւ0dsZ zE_V= ]/$; pT[~JF>\;*9e.9]aۿiPmV=}IұqfƭFطcpYv4exn]>;pQ ^O~?nf~ʓb.\H`*:w;9tx=V =nԇ#YGnC.D.M.ܤ9ц tdp,vs_7Ǡ笝?ܨm 6[dP @K 6 }|t2JBqvPS‡gBidY\X#0i· vB\|I$e V:fB70~=@2O",!&~8ַ#d;9 ͠8%U:t [ Gӄ ;Bfj Rd=pSL^d/>BidY\X#0iΓƹ,]#c< 4P#paip$J6AvfLqNi ~ Z4>-S|j1[`l ag wAC¯ܺz/N".\MP(J֑ k>)QQ̕u OשQֽ 5WVy6…d&M6~ @7Af=Mژw7: Yi ;𽱁Y'4y,8vPJֳ;}7tHT[Wy4qPt}EsI.d- V:8h#raM³k'$$ i P^%1+)ٱ WWCZB=q &у4 P(J֑A.ȅ5 >)QQq*q*fpoH˷'=zqPh.wI)9dBAP*YG Fpaĭ,BHP(uÅ|ND]unGO?.꒚ͅG[F udȅ5 >Wߘ{$g#i .zSAEݺ"N~nPh.{INˠP(J֑Ł "q3suխe [pҘU[](͕gU.b%wD udq:JTsI[Nzd8n\ƙ-u|c<*ȫQ.ުCid=k9\^nPl.<4 Q(J֑k ‹˙V&)ɠӴ^ו-v=P~uE/¿X꿷P*YϪ>\xNjUr:AP*YGA.D.| c2wI^HZ ZhAX;M͓wgqW2N;gDzaujdhӨyx׽;Bb#dfd3sBidY|f%ra{UK^;+ӡ^Ze>d[_1z]130_Ɋ~-Ɯ v ԨD(]ϐ:L CVz O KDBKj6eMK5MiE ˦U:oȅȅoraOKr"wNi}ڶl4q;6q8 mͣ(v/`pcPN1sykgcۗ4 Xeݭ{8`iBhNN.3Q!Eid=p=̵MQ\]5lT#X Ƶ֘)p.{-ߥX6]a]5Ax%b_ p!#> 9{ w1&&NL!k _oiS ፧Nqt̆zMx+a4 JF8[9O74"d7D[(dԞj9(mgU.L\(9ldN Uoןt~ 0SKL.x+튪f#O;#.XE1?0w,!* sj 7KI= D yt+6t}Iʣ((6q lDX>qTgӇl^8PΦCi L_Ghuv-/p<_(iCVzpSgEK~3ɝB]aOs QP [e/fB_y9]m?_)t/XN:S{+]k)a]#+n[J <;7Sl{~?(Vw zg{)$nE=lܺXBmuRz?0Bd0%8![ vGA f;wX}ȹyV{E@*1\[=sZ{dx$)YG^\X30XE11wI^HZY?pL:i|W3#6>M=\FsoCtZ9T(t { $mOˬG}9ph(J!4 XDq ֶhgeJ:hΘ'4}i/C\?G:(g>{wp؄ؤiOdͅ<IN'1'U{.sA&V>\ٙLyЪǗv%Sn!ImRv Gǻv'aU݇54YPŽ=!$Nmכ[n_6kX̰MG’[,:m}}MulP#`|# \hD@2^=7Iֺ`GU9b6 (Ӱe]%!) 8oBudrB70‰I$XBZM* 3CџwƱ0;jzVB='i,"\܉\sl ĭ}%G-}:BV~RpМi\ DVC6_2VAQ _ƮR)o_oeV]Ŧye'KǷiY9 jf+;D^Anѳm+UeF4_ZPdȅ5 Ź*$한t5U̎^ۨ(2t):7[1ҩf~T͒g:؄KI7Ҟ$e}07|(jTW7Ҵђ$j$k*NC83r gZ"NDقJw1)Ϲ^d C(rN2(:>G`h(4oQܶ@3iH@+lEm?1%-m$=734ͶJ#ƭEFTPdY|h*raM Jpar.(xM/}9tXjudՐ JӅ\X24("ss\L uʳ8XTSE0BEj¸JIFʸ7hG [1MC%!)^% B"U.SۻFP꒬gwwɳ'".%\LJv .9 PC)36LrY=Z`SH:ϕ0$ݵ4URE-hmW3V(^l"o s!of ̡YęHa#Fk޷܁(^ B"L.tpU4T%K!35<·SA4hƩz/p!eRdhDWQzzEumJWjyLi՘M!RF/G 'hT7yT}d(Ƙ{#fY&2iMv#3hPسg/ X6A(TBj…I9* %9{"iui HIC 8nWUXK,4"/krjjAֳ(<{)!&)ZĬB]PKU\ ֲo\A؅n F/ȩr(aPy-WW.P jNg#XBZr $ CjU8~e!Ӫ4"hr59J}. R1zr!fuhr rBQg=QJ\e/ixȅh+ŅӘD˃աS_/y€v-Bt0mL086<;}QG}80:7 ιcKhs*ZTX&laTP!{y~!d=c܆ooD!?DX5.RUq{ ru=``C(GaT=TfgG J+(ОtoQ-'^Yȅ䆸ʑ֞颌#TB4E* I)9{$iuڬ/5xܞvkLbvfw#FwdU?ӎLëa ̡{Ɓ8TqDZbrzR,v13"rߠn-k}vM BȐ9Fi]$#ZiU6թἂ Ɵ6 i٠) @:M@lqB ڂ>w}fƩzmE!z(~W𝄏)?xlً7o]MɻY(% Q(r!:prTֿKHw W磲)tuXNc:[_p+I&1[G%(᭢ߎNvWCN+ :w`UJFE? Ϛ4=ӥvӴ^C !oAĹBi*` U_я~g֡?rVj8w2|` SP!|r}g>3m)l>P-@*YB.G[jF.D. fn퐜v;{UaZ;1S:խ?8K}8=Nrk7b6=Z;~m7$|X\\.6~{.!AUf-71+ l+n#B-U%<Պq֮zQA68'-VP!ۀn~=p H eZzT\:4N7-Wd`ȷ8DȅhZÅE* pݻoU!Z TQtfuҐ+W*կ:\jAӐU6[iW;5w<  WPKXPKU jnPA*NZ*‘oQ_-,+ gM/LL^ g<|jwѳoFNOK,"o>!\*_=(wr!p"Mf,!*ؤA̓!}l FwtAM ~t]HwjKP*58 yc [7%}!{|A^*4}h6 i iH yd4Au *;;IyC|/6(*l *vv5;bB-0<?* Jw %rZ}Rȯ_U@s_Ɉ>!1źF_+S] xldśӮ,ȅA;B6d`p\a Ѵ Tsv".9{:䳪/ᘾzfit{eڨMCɺZKp_f۴Nzʣ? @6X.ؑ:WmPmVM^*ʁZˇ᛬hSϟ{4$`aҺL[@I3fIư/zB6K&ӽ3RaHF̍?RZ"~oq7|R;8*?7U2}+S/hp+*WlHJ'p{ֿ$\s321ü 䴽W Z΄r֎!#IҦd~P3&/-NSBg 2MZb!IɘP8&aPHWZe2 CyrMϰd(͎lGCZqKRl5ԑ.^'w#WSlrC.D. gnCTzf\ OQ*y@erf^P~8c5B-89wUVtv&0owB^,Qytkp5cB@٭#JPhM,zxY(RbϔJ呫\nUuȅh YE* &9{m#隧V^z(9]N,|GS_)oiԡЖ8BK'wd?/IֳX.y!1ü $r}{Gd\*W *"3mTzs-x)Kv)v\J&C~7ߩ|!=JTk 9B/Q [\V0` ss쵕5LEF٘850TPG~]a{ 8,BmB敘 < {HqHPhM:o;\XaM`KcC-PSjUٗmP`rC\VsÍ|?` i .zrk򻢼\[5BBS!-E' B/[P 7aeDm2լ(',@Z&GJy6oUOz)WP˒P`&rH Ѵ of(sc쵅?|lA҂JO B-FuU(Yq#Dߌv99zFPD.|??=hrBz˸%yRmDWQzeeEA."JUjȊQHƄ$eey8 У 6PƤpX.R! +m- طg;rqT_AЩv/{E70&W:B4E*9c,9{m&*@J2Z=cz+iq:suEX)-ǂǣQ}t/l_gm?xj׉C~7o%](ȅ8CF UŷL)t, ^;|h˶qo(A, 9U\}U< }p.|/뒳,!A4-HI VoP`PueS0V-(K>$Y. 8x/FԸ2 uIBrN&cx  ;땿]%]ή!#.\[rա#>-=5=uM9lЯ%3E i \a޸/tio xvN8MFsu$˖SZCɼ@] ;F{~ֻھC{_H8]~ljx<tVa+?C.:t:ZP.ZBfgOP,ubw8NR]=͒g!ԨvOKg:όsw)\\(^(溫J`J h"I7WXϯ}nت'!֦L \ccGf-hmϵt0ҫ-VV&Ӆ(iְYF4 A:kC l5AP D$a=Cw <,++( %c8|l<‡Beޱ P#EhUͳ,zp\8u#ڿsZ̰Qfz_٩A9'lf lB!kӭ/[S䍧Aݏ2ZŽvZwp/]6[eB>h1F(B/:.}m+K.ufoblTU-~>{@gw١Q᱁@,zr!?m^T{(B-B`k%g$.<1vцe0i +;r3Ԯmcn~N)H2?zԡM7 }[x-+Nsm_e(_FrF1#!mL~(ٮnu%V1TUEa[wkqfR7WeF{{id=  Ok jB.D.腊bN dm6:Pt?Mϴi4BeWu5~ji}vG4sܦm[)91ANHH(DZx tSD75 )-S[ic1k_1-l.`wڨV1TUE˪{|9Վ|dz|7S*]2Su}4K.B Q7'^z8>PD.D! ̅W3^(Ƨ?sՅ|?` iuȸML iLܒkۺe4O@G7\م :[gHwmi @VD%ދ7m=u&ӿp_(l=)JfuW6B=ABYZ!kɎyꢁ)+8ghCQUQ̽}^= ]6=}ೀO_ȪK埫뻧YNy '\v3WҞ ui{!ޝ -LYpxc? 2lbҘ?H's+' rd2CsхK4f?B4m“\iu{Ihfj`TO~@5Mٞ}s@1}6ѫ<3hWƴ)l1p V6v;B!; Rf-A&B6MKaK8B ("1mT+Zuтu+|4Id?HSoAwGx?u}4KȅDždPy:[;a.|lR k-cBB4M+/TBu ʎ-GyTl;dP ?s'Fȍ{R uIBTGՍ\&1!;)B !٧]y;[!2n) 9w#"i.Å84Kv7Z&ՆUdS%C$} 0*B FD^}/7.@KNG.DU\Enf?.2"!V! \ȍi>= oV{@eD.D.D #^N/TQUaM!_^ sc!r$:D]패c/QVJ&ݺ~htizhi[śSFh!7"CrL)>X1:`GUPqTXeƭ 0-FK޸fz剄wXcVyCU>,pJ~y8y&)ws/uIB \V}J=AVy{gVhlUgҘԓ<u Yȸ%eձ^FQ>dfIkC~w ( ] Ux. Q(B/BuYw"W uUȅBcƤܿÞ@$& 7yrT6P*ri^3$f'kCȱlrWBUB.D.v\Ֆsi*; \ 27 Z3pت7)%(M1'7pkBEywBUB8  W+4rr. U9B+(n9]…'.\rnŔ.\B ͅ*J\R~ ph"qKMX.FzUhlU7/J0,U ʽ .\(T#7Z%\Hov#8/}B#*BPsUGQj>ۏxEt-8.\xm>D|/ C:o\\X\<*gۅpW! Mz,Ir[ߪ1a ECUJ!)V ΅JjF ^vu'?ȱʹP(T%\\eq;su'AC2|Q(-Ť#rlra BsR UTGsUu `!Z˓AR7tEȱ uxQB#{"G\?{-%B@Kj233ØEXыA!ȅ5 /*BҙZN?bJDx/{ݙ3g k\L)TQ5 lLUO`.~8`ɍN.΅W]U6)PZ&q+I֡ȅȅȅ(rsa篎2dv5f.ū(])(|!.IylQ; E[?NRuBJhrT|U}'9tWLKNߧ\{ 4$7# D|OڋB4Psȋ&ыcΝNHHHKK%r!rasRuh(ߛJMSKKV0׳7aT7}F..t/!C1& LQ/ƠP*I}7"/g@_@@@hhŋo߾ѣBBMB!\!nzҸM{ϸ3/kc -;Tg.\3ˢ~ێwbPZz}'\y\g 8g#++K,ߊ:){\Ys eCw? #""\rRjȅ°EdF =Ȝo)\H'܁w!=gHaOABF ZJgY4q\XF&?dKXIWQjB86"Q--wr>qZ5.ۙw~d'SYw"s.7gpXg+w#{t<`fɸ04 4(JY4N^V_("]E?rS-P9 q -M*R#zzo? Mʹ .aſ>E uXYY͛7߽{7j?xk[o޼ EFF%,,, M3UVg<~9DF.T?%ȽLp L ȃeG[L)۷Ark@1nI6Yu 9 U*Uttc!S ZRdBͩ0`h/N{@\ȫ;c7~܀ OޫW/ EHԽ{wXuwwPH/&''߾}W\L,f[Bυ gϊw#">2u'rP;]\TL-I=AHLAnF;'@zz(̃'TTEA 83`Tyf6€>EML(\(t ::`VZ޽{7'Bp"x{b444Ͱg mzCυ ~^ 9F>|"shʹ4͗rNtNd)fkBEyJqPBP'<6"T 5€a/zX{@^\\I…paHRNăgB]rts 3>}tĦMUÒ/^% Mc,11Q֜sB/PXݹ0R;r]=80n)p ˅]Ho>BQM PfP\ Ӎq ~TPR!kZ~OT V{[ rM(Ԝ ޗ<-jȪc5 ˅;~8+CnD&%UB9(|oH5<}eqq1Z 4ƀl}!1ȅUąaCo]HW ̦d; $x{y9cJ#ѵjs4.TD9sXYY!&!"J'1Wr!b"Z­ T^/D zo9xfî}^P|\"&DDD &i?(PQȅjWJEQ/OV* U|lYzo>x_ ^IJ/%?8bΝ;/^liiYV:8::>yА զC*~D⾢[3>W* -xT7fOFU{.T7n͙3{ 6ҥ=錘\EӢs7iV*E7+в2_B{ŋܹ366VtFCCCC. =@EU湒& C.{9d2ss5d.dX;:3`/DS܁7F=g?)v`jr

: fGUPb`kH]4½BZDen@QpMF; P=zty >o߾w)wGCCCC.|\La®3m$"Sұ<>Ewla/` wp!w&7P퐇Gr%RƌH* UG9EVN,H~I=S+1`Rl P]vak)ݎ!p-brae>hgg_n]++RYߌFCCC.T (s!PmU_#2 /8 `$T…܉ݸB V!NA_;'7@ &w5GˇHyIT^@8>֝HzMQ-NyPU˄[(o3;|s99Vv|.Yk#Glժ9s<(+711VZݻwА +{*J\'{擙v+ߌTj3ąB5M1'*^CX v4'Uȅ 5L`Q~| u+l<+Hɰ2 ȝbIxVcM:,\ATF<=L\noW3Ord\r@Bv8_K.KDŋ ]ҥH$jذ!g444B²<"i9s};Ӫ1yT}c-n"X  .ąʣ7uI` Aar.,&jRO:Kfwl@c4%);7RdOZuV.Yrrr׬YciiI_p3gΝ;E2h4444ąt^Ft%AkKq+ҙ}JF;sL=Yh_@(53@I:n>NO.] + J daH"Q[>0T ?icBT\Jf"T>5a T ,@ȅȅ*ZAAADD]֭;qDXݵkcxА ynkhȽUk7eJ p ]o7ծvA ?ūk~o=NPeg%Q~X)ygxr%R\nM^gOr;.?!8_K"*'O޽ѱu"7o\˅TTUpd2ȋ r^j,+nt~oDž~.?!PF.|~m׮]jY.]_i0rajrÁ<:C@Ny}p̨490T/ ^?K~dE2}[ժ6˴ݘA]D}?P(T:rӻ(AVLE}yPjυ } TKj.-~Ϥ1gP&3Ɋa.Un[cO[ž7Ɵ.M~UP(Ի*E~33̸g _xQ\\LQE44} jERwH*( \Q…{]=fVНB]RbQd3W0g1 :ᐊ`EPZ+_KALọp&5UZǏ R:T¡s!w_uh[S\KPըnPȅ|1'?pG00KFP(JPi!L jۜ9ЎI />LtXTTh\XN(B.T 1>x1so&#P(Jucnl&I{2nG$&&>x 33аʕmaAg**0ϕ 7M ]'_ [DD̳R֓"#9ZJ?d^0X:ZC:o\ўU.v )A(`@F: U\. 87KN:TKj2)~IRxݕyxyBPJ ~? 4=؛7o޿?++˗Պ ;I2.d;&<LѪrBc8<2[Lޥ)Jgcّ֙M։Ƶ@Oy*L*7 \ qLLY1 BR2'g zҥj…v܉(&"~vfM$ NF g_S:V̧ jPG:ŜУ1( I>I^4wBP+k('E_|Y,gee=\S³g*J\= h)BK7LN-r=o$j\N%ؘoa/m4rUpēMj*]m=q~Oգ Lz*%5 $7N2w3wP( Nw$Ɯ ILLx[ɕ;Tڹo>]ur! @dn r^B!:VJ'tI/Iһrmh^8Pjuys;q~w#pPq@0*yУ yBP"-_ԌQQQ7oLKK/))&\XMԤ (5-#L"zyA.Bfw\ 9:Ftjz_eOZuT! \(œx&i,P($E qFjjӧO ǘW@NV[Ja=yAr HWR ˅\4 !ߺM^-l Pf_f҂%b]( z'~QG 䫨ޙpzjMQ4EjZRK#vw""""DDlXJV""x^;Nƽ3s'ɽɽ>gg3g|3s_H6+{G٥>O7>s,<͆J"{VlptsQ#w\`_}Ge7Qv=q_%~#eH(Z] DfTm6ˆKU_}e~k6Lyj kaQL[DfTmVB :]ۛ?N/P"t36Bq=Q<@ͨ-/W |! ,pǮ9~!9ʒ}aajH,¥(U{t}a}`v_ym6G$_ݓ~!iپ05)9xr|Ni(ia,yTPU]Lz$-}zbr۟h#|݌= BI_xr}CܕY,;97kU__ڀ.RFkVn†eEN6a$~JfjY2mX|wËZB2B-&G/ZI2oA rEE|%8-;B!iѾF4wyg,;>t8e/Go)0Gv: ;Gzb{~L=q`MS*+b&NY=ohd3Yc]ޘJlۃO/|zs{-S}{!BO5R<޳{(Gh-FZ4.NAN ;(ahmo5]B R {̄m+I$shkOkPzOGw!" |VF#ʡf*/ޡ[#sz7!9\=bzt3B |a%(NEc?'WTagBo(ѽF1 8O+QzјvesoH'*O;i6O4oXF؊᫾nPUy=h%G7WO2SFP6oאن,]*'}6V6~5JmJgkd)0hWrAJva-ra"ӰFh]uHO};v*i _8F>}+WoIשuB5rOE-AaGHN NYͨ# ]J 0/w#^N'] >[!iѾHQLH'u_|*U|iWMr"kZ)fm4!ёLR@vȧz70O|:'Ÿ&LWvoM $z|^P.t֑l`+dM:5$cw`\ޝ3lEFgZ\rݢ\}zc86jDv c╆A*tum\az#J'n-Tj|±#..Jx:~ SdE#fA7j{]J (_:']}:{oyrEguhӤNkO_.m!v3NGiM&O/o,Ewo]_Py߬ڷC~߯URz5Ҷ^zN*<l(Q5 Yk@[Q [!{fWG.BnQzF(gvC7T/T#mեe?rtd ݮc&l\Hdz#J'e߃O̲J8iٷ ǎӿzK@2bӣQf /4/ 2Xh0L[ګj |/ǸXaߠN5!a-{4g0,bQ'oʧս4mHSߩ!lH%SLJmJ9j5֮Q^: fѶv琐E=q_"_׏O{L9"h!aڠ?K֪q? Rz̄m ^QoDtj!B "soj&.GxZU7^[)#ɀGLnFƮ/|}\ƶYl?tGs7ɧ`WckTxح7ɋIs}|f ^b֯;%rSф~Mtѳ!v񸯛PVGg?Zo8g_ח|#Δ^mh"Vҫ>S _6;`' 1f R|{F' R.BnQ68N.7Y#xÆSE|z9P2Hni"!*Yވt>6&f;šoj~r vT )=t3y} T |!n YUϨ~WNeϓ;$-qF!6WyO烳>O('9Z֥>ۙmtJE\fnT`o~z'_O9۴lXU7補a2lɗtz ̴MWRK:{֘_R!^?wk&ENU6բ…f{;:}H!BnQ6ڟ#z~8zaǹ}dOޢޠ%\ "!9%kԋVkkWLmw<\BQ8^7!ڶmߢOv\[v)=t3 T |!6,ݿ2Ȇ:|!iѾj(w'F&ObGd;<:8\.#C=nO 7,$/|Fz5r$#,jH9`T >WJnB6זROn$+ Vb/TnEj)=t36fmz앾H@y&÷=\b wdDBi9>`nG>=cPG5=Q߬m^}kͨژ ouθϨQ^ a@FV6k7+сv-҃Vδ@JoSo5{S"R%Q阢#L!eS8$-.[ DfTmBK5 :{᎓~Ǯ&^ٚpaSpy[U:"SHǚB;leTo%B7jW$v#k+-g읩wdJMtLёEet) 3sg1w.Q.  $vGKahپЏ;bDfTm䯖 eΨ /mιbnJnFF̃/X/ʝ]f1ZwP"t36by}aй{kĢ}awvU,QzsgVYa݌?B}᮳ybg/,c˥`-C(53ty}.rx[vՔ݈n+ HLn?#޿녉?p'Y(53y}a{*ɼ|#ˊ粏 %+>m'Oݝih_2Kf1~ $fTmKu cmKh[mMIY3yP>p<FsE[PL.hL;´eP)FuM橄.RXYXwRɊw8> OH~/+bKσ/o&AϚ/345_a1GO6Ai5Y+ c~,֩ nxw̄N 4tn!.^cӊ}γy*;[GXX44t{g鮸{"Ӽdl=vԝ3ZxN|a*Uи.Hk-phksn):|a A\Q9&iB`F_hP-fr,,ZRX( veu$^K _X.9B!c5jx;>[4 Mer:wX&M6~Y-•Y~ZC-g M.4N5څq_ p*,3yl[>cgSVOiCfisK%K@΅4#q\-}ˣ"z!$܎)ye}arM3G3B%.vQI}/0 1 fL:(: 0 ,k*fxlM  7RX44t{gΦP˂Emn%E?_h[O6QZI:rRgk:hftgCg >o)bL5(EN9)*-s3gMXf\Y,[doQ_8h` 37eZV@wPnGKf&w;-|!1a56 ͑l\"#owT2$| ZL4|Ad1&dkrx¾7cS c0LJ Q,,ZRX3sC\}zi}Od8~GkPpSJg9NS3n+Mώ,:=.H+BɶP~q؆IO:'Q -5Ӿ-ٰ7iӠ @ WooCpCm\d&|\ M3m3%B .HTR]`h2d'kZI ķ>ilM- =_(\Έ/$qMR j>cgqBOCe9RJZJ hq-P [ʅa*_hF4# r nŜ+y22Tm=עB.6}Jy VcJfpr&Uh,^-) , ށ//|n}!Y\|ÎN?+ '}،/M:*frHHvAM9b?̹͂kiCRᵫ/K _e|o?QjT <%8"exڞ%}3#X$n]BfJ?,YŚ ky3dfJ^$_(/3=pg G-'G0 LQQA{r&wjFT¥箆hIi`is6[#vT#ZYOZ/F͓j6s't NU*[B>dg,fXcY~QFvAM9zOHl•}!M:}]_?V)?HI'r*-vS%ʢ Ot#I>Pr(Bqzhqf:Sᔓw'a5jᰑkdË Qxh qῑ6 X E}~!EB%|]C cJf>>]MiZ~VM53“sأ),ZRX__=p/R"\812a6=ӇũN'E~mU1<̵@e${xETbb}Q-D$a4SCo35S)RM%y#թc *H?JgǧsYXt0w?2ӓ>ÌnO;ɮx8+߲"m-{ -\߃Gp{ZJ)-)]*}V1n[6}ꕖm ; go޷`G}h;~GkXގF9\Xr.kSX*~¢%KO]A&cPJGأQ & g޹b{yǩGCrpiwڴoʰ>vk[];X %xg5 .>UMMٝ(7s1k9}fSUӴ-V@k"V "pcKn偻stˣuױMqK>,,ZRTkY'p^g~ޙЯ1~zx䃨aDwͯ1{%얯Kn8hwK 0Bw"w |a}]¢%KoAKڮ?Mתҷڿ]K:jZ1feW>-|;?Ba-Իw4̀/ee?٪1YkT wa# .Z8s젾^z!.u×kU~5_oSj|(|;?cWM][)wh`-5gS:1=`k.ϿvrWQ7[^$N7z2{\l[񥸉Yq_F*S C[Fr)vQ̒vud)*0s+{\=P._h _ݟm . ݊OwԦk}?;{ُKڱ/lgk^0ϕGq+;Z+J ށ///%$m.31g/ބ=H{z)m[=]ITr0dhY 2PQ6qT dѧ j89ɕ&uoHKFr[ofEF['-ܥ{,6IPx J._( }O.7wރEKJ vߨ +P/iTcGO#?MU(Ckw5zZ+VRryzwZZQJugn{S=bߔ|kk];X #TQ[A>N)vo_,<7s%,oN*H8CrpYN9r5500jZ'-rKfÿ8zP. vB9_T_ѝ;EKJ[ySBiTH{cy(^>ז}Tٴ [^.r5ۊ%^mܫ$ CBRʅQn _hI0 wv nBr* j>ބWܪT[Rg3{$\벩/nR-TJ"W(;%Ep>lVa=_(|!|Z_ԟ*:pgVhIi`i"_|' B`V-IF傷2%|$_D%z!jɭTm+jJXpmc0|aGF_B宒\3_( } }a3&:9Yg[Su{gGt_- Mq;ᙔ}2G|!x~!ʒs 䪤}!96c&<]ƒ%/j>CJTxĹƲe/ /~DV֊Ȕ?i+z[%o)e m whɞ137Q}_hJ_9%W%e Ռj{|cy&8x=>l_bW,Q=SKư9~IqNZ/XVp^ vaS|>oѓٳ| O7yB`._  M 7UIYK#fu Tx>oV MY Oy}Bϰ9-Iq=!!s;X _hb_dJ`>l5r.I>^*ϊ;}Ȯf,(X/~Sy>tdpQHʚ2Z/JbjGOO2a8|!|!/_hz_u"W%2h&1_837Q_&OЊ|Oa} Lc9w4 B/4/B{g, 9 oz~Gk}5\3]KcJJX6&ٌpW*/_8qmД {gnY|r 77$Q`Ѿ.ʞـ;1&?5X/|ˆ &cSd B`m5?a'r{ҿm}'22Ҽp*/_ڠ7읱5f~2܎\|Gւ'g\##ٍS98B8҄1GB _m˝]TN-UC(ΕB24-k;ƒ 7>RH//_HS;Wv`֐ K~0G+(?Tvϴw ۾cǎ]p!33L k ɺ¿[,{ލ|3|b\v\6溤;w4޿JڿBptvɐ&88T@ 'i$ǕwdDCad #""8pȑ'N\t<0/<]I #o9W =u NY~ 5G;X i&;whڋ{M C0*(2?1y!2v߻!B'2JHH8}˗srr>|ɓI0Eevx=BȂݓ<1/n+w= __Xwƍ .i&/޼y޽{=zllF_HނƊڅ{ Hs a†=~3wgҢ+cH΁*0Ȣ}!+dx~沤$@A"*|J&M4׮]믿o"z$U{❵Go$"%Ԝ3,>f HrziձrOQ`ɾPdH޿/]tܹӧOӼA)M4yJ)M4_MӬ}aJܓ%^wse̕%=:/ļ]ǁt'iYN{h&5{X yk4feeW\jCA"b0!,|a^5|M0//f Y+|z]=EX V >y?K3C YS|J)9BQ8-)M6!pM , ~>*ւB_ Yϟ\~-%_/ ,\BAP|ڤ5_A&B ˘  㮉9-3r&a.32bNGe-ض" ! 59E%*_cbD1 ʘ\v7}31B Ȥps}3^ಎrrݹ 82!\c> [;2x?/AB߅NN͆&' 9*^ F蜠CѺƯ \>h]Y&OO/qmt:|HR:&Dq훿k׍XB Фn1:ҒҦîa/_xA(zHǯdYg ]w19/ٻ-ΔxhSQB槩QedaȪ!TMWwɾc2Kح>c5 buM6oKqUm+6׹Q3ʤ(*q'{|baZAAؚJWRHdU{Щg@Z3eH=S(0y˘>ru+ȿfdq%A|q_,JS {Y6vA܌=Gmѳ߂klh|W+7oaǰ&vWV\1s|`򓷎&4ed퇵 &j )W\fɕ|~Dt'ʚ+0Z5GnDNT£cĝ[>={T +jV*0^LՃt3ON3PoSBSJ7[z C3~hCjt{seߔbt+mZ/ӎ+U?έB ľpi\J_+,KKJV }P߫Y?bļGgN'TNj^!ao}> ]}*W;}ځOOf7J֫Yr%YK=gь =@B^)|wsԨQ}Jj,ckaWTTpNbM_؟O,мamh?v=JïhѴamљ©_}bq%ǹ_A/<9 biaPf2ٮ~j}_jTp.t77, ٠yN ?LiM|{SO$U, uިC Ô|.'&;jqלz=FuQh?=96KXHL kG mxr~R(a׬^_~TrRס=v\G,|!A_hz6KgYZRtصz7pQ_Ϛ+nA~q4~5"\F^#ѽNWBWJ idx sz:W.Jݷ^5Z ^،Z5*wo}yiCHv'ӣݿk䭹ekP<3V&tFi4ʤG|ثi1:S\86iǕxBA}ᒸlpǦqڝ,KKJ \؛OGhS:sP7=I*X=z}y' H<iw;5Jpqjׯ7kƗ N}>W.Jڜ" YWb%3Cߊc [яܒxc&1ˎ8'~͂i+l#웺M Y?@=ydiOSJJkT}'()RgSCmq%ǹ_AV ݾ)Cwivie&wߪֶYma->g#aS9_ղȹ- ȱ!=Sre\I Pm{^3h%3CnāQ]..^ZaʸTw;GfFP5Tm޶.96MoBpQFo6uQNXZkZ5*G{ S3rQIdӎ+U?έB |a$wv%KcƎ7\,^+IE? -jk&L z9 .RS/dXN1:sٸv ژt# /4 V 0cgYZRXǽMMΪ+įW&AQǹ1: 2/\t$[%\.7,Ҡ|;sV.݈U?έB |Q'Vv%|t#«  *ᎌҷ,-) ǹ_A\x8[%a\fэX܊/ ]bgYZRG7b ! B˜o YэXB и/\{[%܅ g , ǹ_AΏ@_u,KKJ`FqnEBAe {,-) A|I}adO[Y֍|t#\BgB ȴmpq\YэX܊/ wήagYZRG7b ! B°/3YэXB И/rCUmǝY]X>~[APBz9,KKJ`Fl90AA&Ê 8p A|q_8/JL kx>6Eh~r:ܑȕ_zu#V8" ! _bnHо?k̖ϭ/&gEJњY\x.I[aǹ_AFL,RK4 )gGSy5qz !Y*)}d:#2V3f@ 39] lJ8`MT,s͍eJ9DO8kj.8M2*PM!(q7R$\N0 }a8g^S4 Z*oKЂ_ۋ,& v] ! 9Tb>_H}]zs^ѓ5e6B9'OlfwfSK-w{G{$S. I_H3/e.//Thx[&ZiԫQ TҾṼj2SHc_4/T?έB ľpvtJ թ]Nr + ˂>Gr9QdM+yHh!D7 2aN7!I CӾd_ofrY݂۬ Mo+Y zY큣ٚ ./ovw bV|!AT}! V7z7}gS3Z7r9j1oט$H)$t\AX I2\rIF}('٦dx3]k>"DpdV-ߕlPU_` }^BB <¢BL%\2%dNz:[ؒ6 I!0_]žT^a+Cvh /  YY*̈́Hvqá<[pjрθ2ǹ_A7і/(L!|!A_X4_x K%|qnE8\ǔA<k _X"tB{JP Տs+ 2/qJOO~]-yэX܊/  6 ŝ@5%__A[_==_s*3UhWr]ؒ"qz gSV!CԤm 3KV5gS;T΅<`/^ )T 8 ^ )KvSj:a{x_8|"[^ߊKa4# 3GLT _AӣnD$_m[]MdA.І!cYJ:If"m.[O{؅toʤM<X+eWR᛬ jfPn&u?[ӌBWF(}d ޾sTu4"q 5 ןH׍X܊p_AY/$- $-lp&J )9]˿BPS2+\.\F# l^!T·(l(~c]m\؆KK ="=eXc%.xwȑpB |}mZU /(,t‹]<hsE}5 pR+! S)rhZi.E*_ Kn(wYӌBWF(Ngpmg7o"%[Yr5#"7rC_8 (__"zQ7^l5+ ]ʭtʥ)PNJ2Ba]LE=u'gI8V; ! Bpj-M({ngL`F?cBA}*ILadwO0:86 |fD܊/ L }N?S%[w{m\'s '3-3dqnEBAe  /7#W Oy˅g lj_A[O<~wrv9S;W ^)$SH4m5BAg]KϬr$3 -cMMei|-=#܊/ ۃwؑ;1]2wE\eC᝹c?=NB C ! b0emAv߰b(k8^3Jƶ_AN X#A}L5_A ! s}7Ș  SX#A|! 2/<+A|!nB ľp|M5_A ! B|!Ad&_8nM5_A ! B|!Ad_{zޛk 2/|41&pF5g {Jg2_ԲCQFrP$ҟax7J@j~/`WNdzm \Yk>{aL!'T*!k) 2/   !  B    /   !  B    /   !  B     !  B  Pf*,BIENDB`aws-crt-python-0.20.4+dfsg/crt/aws-c-io/docs/images/read_backpressure.png000066400000000000000000003505021456575232400262330ustar00rootroot00000000000000PNG  IHDRd5UCIDATxT;͌  "fǜ30Ø# A@r $I"b1;s͚htitu) 9WTӿ( BP( ( BP(E BP(" BP( aBP( BP( BXDP( B!,P( BQ( BP( BP(E BP(" BP( BP( BXDP( B!,P( BQǏ/\tܲm% eFSg 'Zlll_#q?BP(E?aqq1U 'LT]q'_t NI)˺S[V'(}YXW͆m*: e/3<~tF-dG BQeffٻOEM]PPpym>ur\JnU)AKhF˷Qy-1茓 -Tv\x->| BQo޼ihd2x)Sg9~ۦ:=fXiؔۻ=w!zTLH( B!,>fdl"(8LK9eM HYZ ,R#񊟮2F5 ظ8C5P( o;wNNN^~#'F RFRj)64SRQ1nWoغ[\BRKG/v>*Q( BXD5&(LbY]}ZQ( BXDT6kJJJ=uɻ/%)s-=^DHFa%bl˗WBP("K9:9l{MӫvL$iVf tn+ni4 BP)77w9 /*n~b?zBP(Z'^?4""5$:Z,#3' BP?}cdMo^E~1q8UP( BX4NZ&mRh b.hQ1"F8BP(H@w)K+xg)R$#9JRJz?pڠP( Cɓc$J ~/Xߏ9I+~M8NfGqP( wp`Fܤy_w)Ro/=p BP|+ǎ)R SX,6}Ŋ77ףQ( BXK%&&ާ84E_yQhpd4>BP("@L\QKKأ"u|Ӹ#XtX&Z( B!,>| /?KH/^Z~].@3orRPQ( BXym56`O|ٯŠӊ_j[m=ϟ??C(gBpb#,򶜜f6\~"-,46gK.85ȵ+o_/ߴ4k`e>i<="e`eF Zw+OJ+7f6dstV~(Rj{]B7,E2`ҎQm|ɸe}eT:[3|q12`& B¨4ZE(`b:/лM'nwsc#,@*..͸]w __,@ӎUBDRRq}k rt+X|),~| I{Jgﱎ IkIF3 X`B x] ;R5I,",r#&[@OYb ++z ;_t+R| X^ FXGy .^ag;qM;8__2"N+2y[i^L+>ar3qǰ"XyZN{ViEHvvv,.eaE t6^܏ ;WiŦL+JZtg*ȹ0Ci<DRu-VUg?ӊe_#YvY$.F WV:qҽ5hlV,b+Ӵ"b`ӊ-V,iE?̙q-Ӵ⏺]ճhZ;y+(N?|2Y>QZ)WiG4Ҋ\9""w>´"o999-Z.@؝8Yd);m<#qZoҊ-V-V|;ӊsEL+""UDD4.@Wg.)Rq=Rh'pY?Uϴ⽾L+VbZ٣-1ȉ0siEEu6o=;uMk Z$7X|EcL+>:8*fŜ.Ҋ}V,""WiŁV̫FXk566JK>h~haE T$+\LB.NE]>BXaK+r L+",444b"P ""B* ,18ӊ|r\K/.@?,@ed3,č;""簈iEJ+",-]y{^~r2SicXB?x!nX|]XĴO+",$Wg?NҊ;uH+-i7g=/}џ_'i.KW7seqݳ?w0Yek^rV*{["">´"oVk׮;x4v~I2+pZ ,,)$$5vͬ IIZ;c#DƧR\Di\m)WXBܜ"]RY݊`iEEN|iEK+",Lw_fV=`1%?t 7{74]i.œM-`ut {_|}0,ļ̌Y&ew Л֊Ukndoڍt:SvO4Xkdq[a}bИTe+-W-n`S{9=U3IWްr-&^oޱ'$,Wo5:sɋjͦUk,'w䌦d*q U-+iT.}zv5m]NX5oͣg\Iܾsn$Xn;Tr͖2-VmKYnIUTԼYdv,ؙQxʚV%A N>xʅͿ=lJHqq݁EN+>.~K+.BX7}Y^~B@Xv~lӊdEKHo(WܲۥtNEMlyx Æ +,ht= {_-VRXgdjg`>ZL<~əCCTާISyD=Z;0:0"i2y""GD\wV*qR\2dȐM#>zKLzGIUj_y>J(+7'Zqg# 07(𝎁x'@Q[`:=>2NF*p_ 0:X ΐSHYXrC,yA/15^p !,*EJz&+KE ,mXUC+jᳰ="+xYφ0ibf3"y,ȇJKKSPV1;kaEH,=qҴX< !i9 )C ZeSs)bw$mݱ;jРI<|&Mz^VA%w$Mp̒dJڗ%pDbvZr_U7XYˊ9*+lU})x) 毠8y7ʒ71yk`KAĀŃ )E|)']#sP)'0O N% me `5t@r^_섌 Wf[LB&XL$QsŚ- jd <$% ۷g>!SUjvC eV_V1'+hE@"y(+5{ݐq <O,@Sw&NQ>Ey[74TYy.ZE.Xb}I _%ƴ1:zFh`!3XO#>|Z;IJ65]obC?z4qb`bXqkJNϔI/^P+,ز +ac{*#B |5_~-&A(a5-0-%ƌUS2%" s@,fba ;¤{lźs֛6n?@B&-,z'yee!js @'llŻg\,P@85 f}=}|-9EL+ZZa'!ԣ-|,@74eF7oDٌ1Æ 8 -3ʎ:`n*$2qc!nًƎfZ1b@ˌ'/4aos iZށT={x}GḼkaeHI~]DGdRXq_$q%g19x+,,_.B eFI8RKUv-,`'.Sb~7_配BWgE#5"evc1R)me4SLpd7G X*o+7gr~㘱2g] v' JHZ,&YYK+>&؀iEEEW:N+adNT^ſ JN>S@@PEMSXXd䩷JI hn; {c̙`%S3 W7'u##_N^a괙j:G8xĿ~"aZ_`\'3^~1CU/ToV:Ou7jnI܄g-f~+*9Q̢zɱҧ/E#S͜6cIrJ` KȒ?Aqt%U-`#ಥj:d/VQЛp)E%GY]Wq%*ZCDTLؒ˩3,QզBf'X yJ𢒚¤ijXO§W؜KE,l'|m)g/R"ȉ0siEE~[2dY~ߥ_IWjJp.,ȓV6?z.;Ǐ^+}]onn|fЬ q=J'^r߯aUSCLZ;0BOrʛiM]\p׊^4p^r'2&Zr^$fd=eSܭ]XĴ"y(z#effN1Z1sȘ8 ]cW A? qw+wwAӆLN qhbi!nEL+""_i{~ߥ_ iE2>n| ,H7aZ~Ll.}PViE[{`yu/%ZGVBX1CilE~ftbvؓ"y p$,vV|uZ1S,a/yiʾL+}""WțiGܤ+y3?ﱳ߀XfCb{k~i\)G6`caҊeZ%,v31O+Vܩ{խ5BUs+N`1a+XĴ"oGs8>B_ONIˋ+X\a3 :X|),?7ܥWbC7ӊWpV*,y襴b{&seh5A#,v1iE^׵n܂4k68|'-"ӻa5hƴ"~]??tuZt(4J'8.ؽ"k%bm`6=,rVDXGV乴"" v 8z抁9NZ^̈0/7o]!YgHػ@LYȤ˔u2h`u-UkcodA{7\6-O7lީo|Y 69glaeUBp묓C'9_%ݹHҭp굛2o5:}4 "RSjmFa-xpJ*,ׄĦus-&ď332JlkhBM`zbֶ~V6յ /{׷nݾFk|%pŚMjZG](g^3Tзt#DMUR[oR2<|28o6Yݦic)iU-#M=3$;Yml٤ V䍴""HQqRa;wa%JiX͎,Y:,: `qذa>O"u67gdjg`}?|y/9PpذݩoE`N𭜢:S/\ljE؁_h1{GAa c&L=|3b3<|='OsCtOp|rvh7k!N1y`O{/֝zx-0-7 chc#HdXH|&UWqw#S20Сf^!N]dEũN!!"bY'{8y]dHԄ'(N CXTw+陬pX}ڛtEu]SeMghK~"b^a7!|ž'/w eh bwaYӊj:߆ƗNپ`d|:-)޺]gh S]Rj,\M%1m;tjIV7<*c[u~kq{>p?ӊB=ӊ)oغ?ZԎamO+Rz='V>:}Xo-@_+H[G~hbgr ++q(,Ҧr:ZX|B|#Wq1d@d3[V_a?B p񊁉9埧_8"Ķ| ob%Uʿϻj;AtL"xe+ր7_Y t?POBxj/:ZL MMˀp=cU4`H’N]$vZfDlub5qMH-UU%,zj tG< 9@D*p/x ,q -Cd8l\G`"9_FŏY455=<#{X|݊_"8l #4$矻,> OEB8Ԝr?oolFE.qC7WkP)F8 Z) 2bzuC{GO;4hrkțw5sĢS`2I_ Znd;4fwMw{VBXŜKU7m}-8Լ*8j_X"%*Kg?&ߝ"x{VhxN]> gnOߒgr ŀ⦅EиqҲ,nݹb%BxY~" ḾK7(ngbnEl hhb1wⴼJ E6EEZX<aT?x?m;bs'Xtf\z KR,I'-#Ja3,z1b@- /b89ΰ tgFia'G8JҊ---Æ !CX[[ŞޭGP w޸UnE:X(6ysB3Zk,:Z,z6xQYMj`wPܮ'ȝNwb$KHV6&iҔ闽*; B]å/´O̅vqLͫa_&N~L~]f6<U Yp dkwx\OҊ߿?77>NNN[v_#\ {+被7e!ߐ3B8k`jh<,>i lتgpэ"KU5!ߢE'X,oxm!M]#C$lL/H\Y:sɋko5q7P -HW n氿_zQSvy 'aV=Oʩe߯;7n7rXvX|AS?~VOZpmsnݺ vv~M+IHHO]III yZڊ_`\Y oaH`E~y0bxRc+.!4,oK xǐt5 i'7Xpbᩬ`Luݲ+9?VZsXݚPBux866TO@Yf(1b>u[B;]Q hX IE76,v/ 6RAY]e.)?s6:ĤY4jȂ*Tp:pY.DpOփ pC% NV2 A#q]ۼ{j[pEIU? 邚I[{@͛7.0 _VmʸjE?6s܋$BܜAߦE qSiE6l{̂9455ẅ >|=,zyyaܮ8ҕZ[[[8GBBBtiE`V8QV e[ա;l6lFk|Ŗ3gUS4L=SQh1'⅂Gx#WEŠ2+bF+ד?aK!)X6meu]]8}Ey܂luLug,rV䕛qw?H;v,90!#AZrKHQbd\JX'(k!,g~<ǎFfVQJZncdrWP^x'}7Jj 6nchEOX_kk3foܶ'r8"nVnּ ^2a/]}LnViEѩ#G ܽ{wu?iſc=Wf}ߏô"fbO (;;;;@ϟ{{V`#hmc8eꌎ(4F L4%)5 ?Hq{9r&O=lg]!qc=μœ`Ebr8πXV LWL:¤o5JDpɛv,‰3RN]+.`89JP'/zl_=VZnBe%58E3_a_og c`KIIױ/H" +/?z WTW|BWIwC m#F ' #^ ھ1,\/7w8xJ,,_ ҆WF̼3FV'5ܭ;*h]Z"XyZ1"譭螧9N2 -[,VRNйL+FXWSSD_~XPP@#Eg%5-n[z2/olM]~ٽLd^X̢ƒW06P5߮zBd6sWX1&Z?f=fLF9\@ɴt`bK/zr?V*AZO>u٘.'vݲYW!tS NЭ\laVDX 9'O$I/;Ӵb!)#!!.մL+˴bu/=CiE,r;Ⱦ4y@?~Kr0a h;'^g3~U= 48дuPr_+uEҊx]O+VeZh Vd Gӊc]E7~[O{_ݧmҊSRDXEbZ1WiUat.aK+rh 39߻b">´"ϥzV|!"(aumxZSNI#_v=Vr {}O{bN_J+rX;T:QZۊ9t-X izҊV+аa*p1Ff'^f|.+/& /AHhOpi*Ȗ+4&A<Y qGZV9"giEJ+",dddrz幖I.XlaeWY\ӲngbtPdʵwNu4hkB2H#3+ mf?s}ekBbӉ=ggdu#gt `9}Gʵ[4tu=c LWiٜ^XO\6 ꦕfumcg\ouM[ua cCka) #,,b!n,č,bZҊ2/@ÈMΝ;ށ1%5-@|C,ҥS˅EF] MtM ܹ﨧tVQ%/wHDGex1y|uN=xOpOpgh,:+#.U } Ry{⤩`uliwH :_ ;ǐpۜfV~1{p},Vm 7S6tp%hQRX qK+",rV䵴""HEE54& d-s7 R1eΰ.`DJQ"Biu&L #x% i/a,hRBFA >$ݗ-MoWfr=eβ?W=/?10:"`"u[We*@vV;,{rΧqN:潵MЋ0r4,b!n,č,bZҊ|" V~TXEkaTXa+k `g6eE{HUS6E_~u-ؓvjy’g#8?jܚ8scU4Դ?'⵰dcg\M,N5(Fp>bdV4Wr74[A qc!nX䉴;Ҋo,'ںu멳z"EW`܄,/NF >stXqNZVaXi6O@{EKH2pzX MRloaq2rl`ʵ(1cglA`wq=r"|֬7"w>´"ϥDNNN[zRX/u? |UP"]t𣢡+-+pEKr眽EDa.~9'L2c#9\bPtg[8e:+X#-#7޼K EKTԴ 7 ͔?aK^M}zMbH< =,]+ ,18ӊ|j}ٯM +ީ~\ZM Gu/rGl:=SŠewuK8_uonEKfɃ]&&0#)] q>']$\.'% }];Q^g勛w*цi%9,čMXĴ"3C?~9R.@Ӗfjz[u]੦m(7A1y_tK+f?* Ȧ 17`9vF'f@t,~wWwHlڪU5t8O>! -1Rhμs#S`}cD b@MQJZ1}QORN q0$.Ҋ#.tIV4\i E`'ӊI+fZa8)>%Šӊ]v ߱˼_H:Ŧvz國SR,'iL7Nf< ,UL7JX$$: qSeZ౳wHyي5 EaQku/oݾě,g)+=roL`C)-,vYaiEJ+",򏬭O; Oؑb}H~Ux9xኁyߥ?L}?-t:[E{E qXj(iiiss 6;>^vp7iOD Wq=Hq<%]cKGE8 ŬA#,r#L+\ZaTPP0gݍb-@sV~jΚ8)%կRSSsW\!_&&&qNT۷Tˋq{wg {{ӧOpJ=`;;-CG%1]X #0=|Ȑ ߐpx꜋rb{OW5!?A1zVbZ!CJ'O+oxbİ+g4eFPMc<~W?)'ϹRg.yWu8e: ]%H!91B8zh[oI,9=cK{<ڗg{I\r-^0xg$=zQSό.㢮cr7ksvsM-REҊS,bZ'ҊeD?~xmo=ҳ}* {6x!32:XV;uꔹmCCp===a{ss3|UVRp ߼yghOfMŵgLzi3;`qPvhä a/۾b#]u#x"ezO]7#Doz3e b=)geVL%)$T],; Ry}UM+",v cZ[%_Za bn,@^\栰"e򀘬 Ν;G:::U@N=p>sLjj/,cEE2p'l=I*XRRl cҞ?AA}nHo׀;v젨NQQ JGO?ԅ {N ;i3.C3xބn⦅`&_:X +MW1'>5Hkk0&(RF61oDk!gb ,A,)%M`bWC(cyW^ % h`Xϥc嚲޺ͻM,ttH  I`G8x-Tfw6#~"`Za=⏾۝Ҋ[v`KXlhh9r$@@WC8 M@***v{cc#" Eòq7Q!//IS}fMbG lly'&>Z;_-/4NHGD$V5bˇ${xBܝ`ç7 &3YjC{od/^p`hJbL+3KX|M / Le<pt ai`\ //=3E9UEGK88SA4<,*}*8l{ Y@b&`vt`ӊcf/@Š\ZzM3f-ᅍ͡C(X-j7`"_*hffF3 Ef9'~GVòqzGZXV!n DDGx⦅E 7oW2mW0xEJ +,=iz,tX E XWIFV>%ιVkl՚Ī+gΞ/0iYs7SL/>uލSgN1{⤩%YFzX߹ƈ[ ZE0WװXv(aQxɋ^H>EdH4mRMa#seJϸ/.Qՙ8m-,fvU1{ViEEҧO Wzk>y{FJ=>pO<rww'*''Gv"DVVGedd̟?رcl0)) P"ܰa-,qkh6vn"a8}WἿBՍ/F'[b7M?2i3GE8WR*4շ,h(yѭBK+jNo䤿߭qw[jݹsgQQѮ]`g???j۷6UXӅ=kpDn`g}7A_ʕ+b31EL+VZadhhhwxaEZqkR5D xpQF?mdӧS"IIIfB0% K+C}׭[Gem#C111TSX~P9Wˢc3h:{)ƆRE^!nvyf8J+2_cu70{maΜ93C4RWW_,>`5k,D 6AAA '1a# E'8Is]~۷ooϜa8~UwmV|4@ӊ.XUZmZ1KӊLc*7iÅ"MfVxL`>r䈴 NSSSpxnln&fViRES~~SX|gU4E"X,FZL+sV饴" e dƆkXds/t[***T8ZZZlbmmM΍"y-ȟ+ ~j|852&a9Vחi^L+62qߤY"ӻYk#G$=^~-**5,2\3dzpdBe{]{L18ӊi'pkqhG+4E\BUIO"煸z]O+VeZWҊ*,齶Laߎ ٟkXdg꜍KMMM8uuuRdp@sss+b~Zl iEE)v0 >3Y{!D.އŏhm,\9{N+pV,ʹ"0M+Zfu70uf[[[oMfeVz|yE%NKއE,čY;E9`g 19uM]|X qJ+!,jmmESyyȑBO4v`F¤9qBB(PKK (Ł\s?t{}bg,R©77"""? ,򞚛˛~~[R*{"&.YPx""B(EE׺ul6l~ُ3ͻ p ,PQaI""iy=_~J+Ƥ ~9NB {M6 yUNNN,}{,@ " O>B~檪"k%7ٯ:}Y>8;AP(" a7T]]-)%u ;&1qɴ[8P( a3SUתy;ko)BP(E^Ǐ'Lp6ɋ^ҲrSB~%&&"ϫZFF6%/yZDT,6kAPK֭E~PhhofW6bg^Ud h BXD!,򲬬L-_/@kZX BP(E֧OΝ{ ٯ+Rw7oCP(E"碢^QٯWFy B!,D17r_s%" Brpp8v? ,򛒓GJ*~\oU BY:4@Nd𗂰ȇrrrȿ;y_lzSq"P(TkGQ}.I))Z^hNI?QB( M"v$8Vzc'pP(TgmmmnnygϞ...˗/_jUaa!ٲs΢]v~~~߿o>p(ϟ?ۛ]pv7ez[222lll^"СCܮ\!BX1988HK$ebg.ӊ caNB&11̘;`x 28|3g&u6#[X}X"|k֬Y@l'O !0\3dzp88l9s挂l'hH{nL?ȓrrr ~lH18!ORjQ( 0~8tK//UV>#ٳm۶;ٮ:tvwwagȸNc@KJJL}B!,.Wxu +^KBPJ]]ωW'ow Pl`zM%9e铅of'Tbg^j}FP*//OPPPOOOUUUAA 6VTT(**Κ5XLJ,`?װsϐ9kMMM8uuuE]]]WWɓ'G8P|"//#.x4V<-:ZY ( [z="sϟ?0\3dͥdQ<&2斫 *~Y iwWˎiBP(:ܱcǙ7y{:8& EQCJI1!d*J 'p ǿw~{_AdB?bi aaa~~~111h(?#٫VIڐS>OM/Y^Vl񒌌LA"44bgg7}?yZn'frr\Nָ! ""2`ڌyg:cuè7'ߘǯI?bAdHHHeA}}BQ[5dʷmė-((ďABBB7,",())104sh:gPÏAAYDYDFcc)Sn1p ='+vq9e-f7a9XVQfCAPDD2 ,"ׯyyy;vXHt"v8gs喇gpbtꓻe41)1,.W=k;SnUP](p|a[PAF1鉿Edܱ"11-[7-\$:O@@˧Z( ,Bo'Sh{2H˞L-)64E:YcEzS&_e,71gKMdeAAvƊtoz{4Eb)+VLq<Ŋ( ,g8 bwSd+4ű+," 6N@MX)+," >ZF)V ֆ  "kX^W," 8ܱ"kO }V__o: rrqݮ39(j9ebE6_BLeA<~g{3fdhtSXEU/,gzJ̀9@s'^$Q38-Cyyfmݶ3AppwYrFc] 1EEAd~7Xp/a(E`^܈yzGoQŶq/e$b^&QAׯ__?c]Y(VdQR'frr23^'YbZQAq·oqs׽ ~^?2^j?YcGy] i( 8uժUr ?t%d.@b( ۓ9k ;W8{ ^q1d׵FH?MJEAqqq]H45#q"3.^4~d'bEEA|2bUlcņa?,)E"c9VZ)," 슍ek_XNJTJV,h˗ǁ,1Уb" ŽR"Ɗ=bWJt7J@p~@@ฑE<z䎁f( QQQ1gܜ Ɗ~99JJJơ,1#+," l-..?+VX-~@ں+," ltҀb"s*U;ֲ@準EA())llbE%*,,@1̦S ֭srXq"Z%ɦ@#b6" .dff.Y"NJb~ɨMXdQrr2ĊZ"ݠ}}]\ŧq%EVY++," lV@hŊ1Եfrrۜ7XVŊ7ϫXȢ:;u-.z] |I레Ŋn9 ,Μ8XqS2*ДԴ-.5ܚZ2}w4}AYDA؀vaa="Lj}ө;Ʀ+Θ0XqLNI@Me_}8qؤkE0kH߰ļCJɁ/zrL?cE/=e&Խ;x쬲֝ )n3bx gw F2 p/x_D~ ͠+^3?]%*9_'N\AIpox*G-:-m#<)*GX+A;euWZ)oM+PABRR^ZŊQw3$Y%Mm} WoO2AnEx-X'7WP|Lr.i~_TƓZE5-}%5mx{ .Lmmzw ^j-I'Oz9GAc%Q׵C; 䣟2uᅾA~!BH}(yBv> H|CNB-G/π(cVZ:?)+Gj_EMCK*kC8w\S/-pw Ǽ,n$@eԠo@X "f NFAEs_[%! Ł<=VZAYC'GiEO)Ye(VWR׃wY^+#S|lEAhkk3g.0?2+&qppT4~ sjz%O+Hw9eǞ_h d1I< O*[狘5NϫE##"1n VM|JϘK9yRG>}gq/ˢK$7#HׁIi%'N*|zi[h DEͬ#)%RYMɅϽYV5w""ɛwܼ} [M.^w1~#m@2y^e+!Jj:{ihXO&M){C݇:qbӗZc•[HoW6u: MPA"66VYEmAǂ"0Ozyi+75ӊ0Gغo wKYt _V'7?5'g`L7Y|HȢW:i9j]wԵ,Du-o8oڲyJ/p_@ȢOX2E;5eNby^EstJtZ^*g.)k:Xc:Y⢝.E{:Y> uN>7r{+־jHHg[+0f%7"Y :Yl?GytIǥ+$(,Z\PZJ[$#+f3;v¢_o1$?rq)(+*w65p*"Qwɧ HbP]#i7ѭd/$o6?)ƥ)V5}$Z {b׊"s QZnWpjQ<ȢOhbY3~I`ye/AIIkYCb,#+>mG`ţAB/Z|-t 5 Oҟ4t 79y})5@W*}N~q[R7ם^ " 2{dl9~ yxgp rû^Z3b謿&78l} "拈-Y v8ixg5 䖷\`JB=frqXy )V;V*{+,"3550a z tyy+V/_n0^Qpͪ+ J> uk_Α-O_={Gb/K+ܑ-K^G0NJ Y5\\oׯ(;eZ@ 4LD[Ϡ9UEC.O芦GOJF w^{95 ]%񳌢W%]bVufk EAGOӟ?"e]dnn>cƌ>e.Dkb|:1}eAaYē+V|]Ɍ3[Пc휼V0c.V|T'IAYDA:bbbţ+fiM>c6\ {g 'YWSb`\pw``$E!!_-Y%V;U44uLM~ghS."g構j q]ˀbEmvK A+Vh=r|e BWpBNspFϠޟV!+Bq iԔjĊRi[p[2K=ŊdzL.5mhP *+Wu-D%VDYDA,G3V/09/Zr%fp%?VPyoB+;'NUۼ30*T7nfyŎF 7]WP:o8w{;;1uKpĊD=> |R~ FjZ;7YrqI ? 7RR2my;/e[gigS4ܾ1Ek:jZ) Լ'}Mwk;/?kQ+4t'FʫhH'\°peAƪkӊ?6y>|*I@dAO,3(k&"m38>2)gʔNwJ{\*(#c~"ɍ@ۻعpq:ƌZY~$;zֵt"ݺ%l<rKSNI9p]NYXeAalllN0b/{/3<>eTK5u WJ*S߲s_= QZhO#h^>~Uő97_e")=L̈K jHz|aY}\S>  3bQ{T Z$<~*Du oE's1kud]v"p1GЂT?Q8u-QA6y"o=:i$9Q?W̠ I $@dQdђ;aDڸymIUE/JdqbE ^R2wbtVRՄn"Xzoh<AmB%>SᚊOh׸G1lX‹2 Hh~jςEnB*j3Ӻ6^[.)Ƨ,7^=Mn0PA@NN.,&#k+<*Y,ɓegWK#yx7m%Zl: u>?| t7@@}[G"VtK\kkY0׉9O_kteueU4JRxћgʔ}I)(kYQ6;z&Xw {Yed`x_JVi5" !qA {H˩L}ڹ+1n.^Ai*"1B"/UGk)m쥛s2 ꩍײ<%as}rN5O^?iȩx]3\Z EAmMX$藽KIdw/4_۷ol-lw g.dccEEA XEkX8 z@b P^Բ|Z_"͢O"" f8 bǬ1Vs t/(=[EAhnn+XbdbpbӦ"hYXq$&G葎QAvv+\r5D5V ]ׂ 17j>Ɗ bXQY@^AM'ֵF8ZH(AYDAؐaa#'1V3V42=]~Aꖌtx;'KַLv5ӳw_2 ,,`׬=t丳_ o]1"iu5h7ȉ3ppp˃8fff~  8FWKKK333;v"2>5!C)vj_ZͫJBbO_ <7~ 8V;w.84Ϳ4qбb-,0̢#'-5ZYYG  (($ZZZNNtD:MqLX eWK(:pYjjꉉ8=  (Ȩb ))SH'fźbſ#Ъ^UqqqWWWTFAAYDFHU/ q,VBk/G DeDAEd())YnttYM@}QQB(AeNMMM|Z<ۃ)-VX,V~o~" Ew2mkkÏAAPa 00PDDqgohGnn̲͡cgYiAAYDDGGY&}b׵ {~Ճ)dmKjNEE%<AAPw֮ "KYlZz1EY^?gNhh(~  +++)i*4bwY -%\rS,pJAAPPWWe'FSr8+s7}.unYY9\  (HoԈy4/etUД)?j`7@AE!ŀNSSd+wEhA񼼳FAAYDquux_vc'MZ ݹ]AAP.ZLq' hI)yj[oc@AEfk֮m:=67V=V,o ߭Z6" ,)S|}c>cE0^ }AAPj\]]occLNK ߮D_DAşHqqg-`̲m1 +I.y!,($G# DMM ü2lt7YwwVq1  (?"""~QM=7V|9{ZC{@BQ<AAP =)XXqZMٹl !Hb*n8ްe'S97ViLEhL9]>wۗ{hwi-]k5ih}^_^to Зm/PEKy&CIPeۭ]A+V"=c!]+֞@@Z>ժi-Gk{\խBj9*go8 cїU_=כO,ʪzG5,S^U bLXHG]t/ rqNddL}s '6V=V$im3\삲}$,!,~e)tcח="rBf+6E/#wYn[镰O̲{+Y \:Ya(ZeJWO`)諉Mden_lMRjz>DYtttU(Vy6V,IK*?6o"Sҫ,mlƊU#+la"0V@bb؏+QߴXwY,l+**Ë?]1"+X~бb++}Ɗ}ƊOG2V!VDYNYXMbqL``vXXI`BYd0hƊ+V+VcXs(#Pe+QU8>ioo~R1FֵԎu-="&ĝt~ ߌd4Xs5#Җ'Zڒۿ9haE'VDY:si+Ѷص~BYbAb󆶴eq cEQ'%%%"" oqca2Eh9e a&cθGz,,_SA{X:Xb8> Ǎ"4}Y:xI,]+]~*YsF5V|6Xq;+C8*XbEqHhhtu-Ɗ-캱bosaMr@vd 7ƍLeqdcEvs*P߾}K"4,4_şDq#j#nE VFYgklčbO"SRvvv6ne7ƍ{6EE(VDYoX""67V,u-aab`8>1܈7FY=YXƊ=b1 0iEXq0+4E8WH."noM'p,b^"BKK+(<7Vu- i&+]nfF5\܎9j_~ ߔ5~d('UoJ> e#\ڒ[(8ܲ"Ċ(znng>Ɗ#7M'5grr`8X1rΦ3fϙ7XqԩQ v/kdYMbӆ1V|ZӪRSL La(Ix?2e(⌙nq 6;SDYzav"؉Ӹ7VwHng-cײ8_7SgZ>='02!%kO+Eb/"ܫF$,^~b?l3ٿjdqq)f6[ dq4cEaE)VDY'|mܹrz~+4բSAo~O+&e/,S-klݺcWFn 5Y]kkhsӑA_uؙ$RjhimJxksyI *J`EP=f:6]I)icS@jPTumm20{1ik^ufŲ N xJj6=cŀn4?uMWRzZB)Ame*n^[wWP:oH M*h=sG-:-m#c2.zq"Cq\3!CPE1V|?Xc"vGZZ"QQXq$6Vd-u /\o6~ɓ;x4SDV^Y[0$Ok``MTPV O{Ubeûr t 4u"|xxfŧd'Õ\ }\\$b%"qM:y?/,񨅕0)ՔG%\<ᾡn) ZAtDjN%eIe˚@I=›BE=Ü#P!qA յM6CtR(um'N,m15 n ECCy}cHI٨g Y\+} x36FDt a\̭x ŖИSĉ孃1eqdcE6Q fffOYu-#b(m;ܹk7vq&1'15kz_~-La` cCK;47?t,qĉj>0l (*&nkF AN vRWo8jm1 j4xh"]22C՗,L̄" K,mi#=d[~ E\L$yދT;>eokT7n/Udj4;zYS$m)Ϛ񤦍 qY&qd!㢢݇ȸ䤬*'%u=!7B"0"Ɗ+P_"X1bO;E ǧ,v#Ic -]|#e톝W@u;,6yԍ<'LwlUԵd,gfmn@?|r6\EMe5-N.R[w]/{µʦO*IjAA/I3)f1Wu?NLR=-c o|>}FD\*m >AMo:xxgzٛe12!=I%G TqCxFrF^s=#{gtsdE;ԩoHv`TXq\8x$%%c2 qXRs%V9/,eNZ7VS|T4E n?S@Tl v%و;R|ɓ*(srr-OTHFҲ uQ2%?;O`5뤗H,E(҆9n:C-'ϝ/p2i7aw\*Yᅼ|*kd,E%6yu2B)Y%=m,Pk\BdQKobV>|r*.nލz[IJ,ܠF"n!-^*%O4|dѮC#'7/[%-:s(SDYFYX]bűc ԩS˞9 HStɋߐk]IdQOKwsX|:U6;LG$dcc} lj'mW6jPKHUq*koݙTWQۃG;ܲ3!6}AQԴy2G[n)D],.הuԵ "ReѭSv>uUC:Y m$QE/[i۱+3Y~/i w)~ CRVRclx_Rz(V6={<=xUԹeսi∝_.%a\̫zS]:q,,b8cEE';;{Yxε֪^UԿHH3W*?Vբc~;gVLR)S.pXeqXdq8cwc=V,gXe푐H۟eu5/?KɁ3WƊ̲X$5ʢyBP M[Mț1p>2&y6`^R\;>T2qu),:DwdXȢw" 5uY$ df'{1EhaB +H@|6XbE>@Z΁NJYnnb[S۳g+2[2ţmč8b"Ċ(M[[3ZgcE,0gdu6m]$LϼCI)&dqW+,Y>L>/< J+%8wxi}M'p":Șiw\m\x U{DxxnREQLjf~ӟÛDU%eYާà677cgk~h 29hŠ烋wmb~YϪF2VdN~,X6:n2 gpFܔ),0VdXq<8233׭R2T;gSe3U}ڕxdѠSo:prveE&#+8n^#jaR`o&)3fhqHtnƑƊw3JǔvEED,/cvb?e",2 cEaE#VDYdo<==Mv~=ViۧU#q^KES mU=iCɳ63g5bH\&ye=o[ޒU~k|ƥ hl; c)?tTSKU[Jw}dl@ oojjo{/a~yIr*[F:eۮGE̲XRz#j:SXf~RUC$SR H͞<{)#''eVwب hjhDR%UAe6H52ٯu+ѳʚ.2g' 7ܧuڑ8'h -rJ[ˮ&?R6WѲr7:(Sf~&$2@h+,,b""{sc'da:ٯnN(b[p[]E>.ikps9LcE râ@NJdtE_dl3aj{?޴ 56Eu~,&gO:ً~qO` k~ѣ*Z^'-yX"\@dQI/46KF^%oLREP5/ڧMX|TkvQ^C \|¡qz'gprx$ڹ$@W7'ZxmW(մ6)i{ۻqgzt ;tRm¯9xdUM;'rgŅvn!By|I]uss<d\EDoCEƕkd4t:xE,5 eQQ]OVyw'g} J!a˷m]B{QO+*(hJ]-2!3 bFnp<MXqH wb㷖% ,t."o斓Φ- ;ih$g/?Pƶt;H#,&qppT>@*W}ޞGI6yrnq_X +]V! Hd-;#>>ie؈~{0dEuIIjm@E&%W@/((:SQO^+?K8}&oŊQuuu~Ɗ +֎X8X)6S,beOg<#ddC'Y@mӞd4 :o6i0ih3rJ'w$EdRr'/T471ˢWH)ִHwl94grr{YtN"C;Z;`hHf,Vv5@yfAonP ˎ9]eG:::EDD\]]zš>8eTDjZA[{/_~iE#7jKLˇ0Q ^cF mgkC'B{N#Ssw̠TYyiAQPE-}#H٦dsr̙+[Rv6I/$Ee H>qN{V2-]޽'OWT],TYU"{GK,WPVu F~\<098~gvuρ#̵ D|"Ԭ_hH uu>s7"5Iy%ujǜnhKŀ&OS\+- ^f1n8xqrq>H@"b7`}ԣ5RrrJm`O̲MF^u>7(Z+c׳,U:/(/XAAaH'$ n2\y6,H)eqГbB^^>22KEE[ K[r"hfY\+%{ gP`RRr ]FiE(6mr9uXE“hA3jY> I9~ֆSHLq^5Pp-Ċ( 1,l5]~j*[rxJ"mӍ ڦ7]`|ݑ֝aqLKo bQ*kØ^л,v ؼ}5;JoƱ'u,:Mu&hQHfֵy$yjc;w՞<khy?çTt #+}-.Ssd"1_ݿk_1lXp/>TԷ2A܈MGaES]s{Oս9C 5_ŧoĝWM0U.y?*iʯ|3y_zAcVIs6↖]ޚ?i[ύIˤ[bksIvdff[[nL| ߿NJ+\]]Ű]C{[? ފDy\#G@4\z ϼ.BwJHM%>]E& ) h8Xeq &5u ||CO ##x*mKd!nFvm o9u*3v{@wh(-)YEu-}8˷R*dn"N^a>s p?t⼀2,m$# w"CbN6,2 Qac#Rr(hnr9uޏP2SDYd`\;w.Ukkө۠6~̅<<qaУwdq@q\9čX"=SN^TLYYظ?V:EOO8hZZZv Y~Eࣉa,ފD bE(TvL-[y2^#3! 7WUMxg9zS'\H<.{ɼ.hwS0hJoP"ۻZLX SꩌtOeY^K0) 5pUEŊcQ]ֵ>*8qRi[jIA Rpĉ+ZAC)޸ gu͉Ɗp9 ~V2D)9)Z3( ,MZ`ӊ&x"dZpD6Y윴bW}򜽭KrSⷝ'hΏIEFAA19C2TNyUv}YE Rejq;3@e@ )WT}"n~̜5|5_^]Tn'[b&1jtNjh!?r.boiEޗݙiA3ەV| ȴ"d(ivݴ"dQ֎Ɗi]~xI ] Zۯms_ g5d -Ҋ cNҊ%J+V1Cs1}iEb"ҊV,6_Ң)_K]AM3'SdNj|Rd}w̴b ӊ#b)B#|L+V'Xb[ҊEfَu-ٯY*j)%+bFiE2vi"k:;}Vӊ(@x:[{"q!E)x=d=̘*5ѡSjm{M .@өk̹b!hčF-!|EE+Ymjkk%$$]~&SdVӊS;M/WAUUUHE4F#nȢdiEI+BEAd;ȗqezQYu7جou^].zW/5Y99Vw-\=A߰UԵA oz^!q?ʢ \ӊ dd[hDhC#n4n9YUȥ!"ʕ+ʻn] q9E:pQc'ػ!)%I.6!n׷9*'?N35kt{6HUp:'-]P'/9q;CۗVl[K+Z;jj!Ow)qwV#n"eiEQJ+BE+++{bnݺgfj;ar:zu9&#FD&1HL^ik-H4^o&[ZKޣGf[z>eg&9^k-ؽ(&Fhݢ)B$’V|ޞb0!$%%w?j][骜?vϢyWk١j: uJH:z1r^i'-[Me+8^oȜds_~8.ˢ +k^'Ms (fFh Y,"(biEȢ=z>~%]|#r6V.M}bzɏdE/n! lb'k!ݼWݺw(ƲFY%ohҖk^ދGoE7dQ bnuIcŇOKJI8+*yQ3zb~;r9FwŞ\dS]Z+%-sɗrɵE;M[PSg"LE{;%\p'FܩUj:\rv v1҈[gMEE+YmU"e)Tnj,6^WLz4t͜E+ųMHF@ԝ#G8KMwGed3+2aQeQhqV#H/$<׈,6V\nAy EbA]zTE7dQp!@ZZڈ ]X~~eNɛV+?|TƊ[مkDxʢpwad]niXz嚬`jEh9f?PpRk:u*5u[h9r,y,2Xdy\J,zߚ:sαv\e1f=^T]\BMS䅫BވǫۓVlbXr!bA2YX,Eki9oƊ̵.!)âXɢ4V$qzs1kw3KY/[r\Aie'_gkj[t;Reiuմs r HĎW\]|#/{G](};I]T>:c6E&(7B)9Hvp܈[S,!H+DZ1)(l߾}_~XUkuC~)B߈"xW.+.q;{ej=vRz6z*4&W^*Չ,G5f—ՠ7y 9e񬭇¨GOe.KKnNFBbYDZQҊE1!99YQi49 <?r>sb] #DŽGF#iFȢ_Xl<%PV.*sԙhj"4Zf֭[i3>t:JcKoo=[cYog1<>{܈%Y,"(*iEȢ!C޼iZQ.G+֠/?1EQhͻ,kwP9 [OZz2= +ThE2r^?{Y3݈ѽG" dd񠕍,{\WgDwX4"β!bÙ3g,wtQL-i\3ڞV\i}x.ۈE?)̙,aŇ磻e83Wko=ޓԅUNiM>}}%kNV>"3d=~Ks'w5Z\fY2n7#/ȢJz.^a!̍B5"{Zigu^Z1iv߶'۔V, *z`׵Q: Щo$$^B, #nVYh>PB'K6b5zH1&͙3W{Zis" r&V$o=U.2}ְ#-Yllx^!Î'9c|̸&*+TqwI=NeiEI+B ###7kYܺ6kƊtX;Q@ȉ, q#nWqqg>z>ss.-{QvyͭeAg7qiO7k (55xӦM_-[fbbΜcǎ]vO>۷mYOӧ/^|E8r}IIIa{y!D-"ҊV,3U;"iŇBh2"U7/A7+rx3Ej[111ᅅʕ+]#QQQ222>}L<9r9naaA'HNhA*++ՋB1!1&(K[ۑV,1Xjǜwzrw؜O frgs?%2(H+XZ(XXXly@+ wcErgdB ؅cҊ#$oYh`ggwـccǷc7¿( ZІƊ/~ƊK+F$-B'E1kQ_k[ǜS__?vxWqkZhgzfY)())t39GTֵtacŠt IiY0ODhh1TV,u-hQ1|8uJ.kX!ԍ潚<*`&ş+W(O]RƊlZS$`&şEh+Ҝ"<@!WnĿbBFLd~IAqԍ9?ycŠ#} E#!!!R҃\Yd?Fz3>,BWJJJFT6Vv T3nJd^mƊ Z7eƬg/^"@!uv7Yyj|j7VH̛CmܺA 1h|C]r?xY,RWWgbj.'?G+ں oU5VBRSŠ7mk渝ݲeLLL;vdffڵŜӧ}-"y8Qᱱ_~=}ŋ/^zr}$OQSSC̈́ym!D(>M8鏩3=cX]cPPJL ii阘Bz|ʕD(\rO>'Oi$rFOVWW $HiN_)++M$֫W/>>>qqqcǎ hyB!?9{5ٳgFԐqȢA  sյ\D" G*9x?_@ClϟYo]]]MLL?ciiI!FNKkii /^|!ysr=H /s#D@+eVWW(|c,{HV[!lͥTG=o}Bņ wﮮnkkKr$::_$q;j{<"'Adi-"+W2D c HJJ[>/Wyd4+A 8 ER}@_f |×mZ38~G(G45_}Ljjj^tttԔ***ѣG+++twwoN 111CCCz~er}\544+QWW=vX8p ,ܻuێq')3,6:g]Ztr.AnjnݻO6s}1 Z|s$c;\_!ArgsfB̒fE\Io8xD{⨞={M>pm9Yf9^ Vw:d4҃djjA ~~c;;g?^x",a6!e[YaSɏӷ_~2Tn$Ys nܼ͕Ϟ!K,bAAA^^^0E"3,@!E"d_ E%:|ڦLjkk+,F[[;44Lld?5Q3quuѣG}}=~?E˗/yyyQׯ9{~Mj(+OޫW￈҃ jĉYYYCl FػwCL/̙3er OaaᙳTTf)TT5o{O|=2Dž˨|CQ+ge'dgF)U 13^|*ax4_ƳQČʿ6;OR:kuZcM޽iӉ;Bŀ۷s~H dk||urrO}AK_V~~AF Nۿs.o-bX)LY)6x4 GZ+wPP,;d fϞ_ d=rҨѓ~ɳψ 2)iŖMSdSigͦ)rł :+:xj̸&Nt 7"YeY IVV*arw?Ug2XM)'( fMwSluyu)KVoJ n-,h?D )*>gmwn;7tsȌ۹zWvc]2 Je@>6o(!q؟Qy6v;Vx,斑!*)Gʁ%NBJ$ HDWWWYf#u羒o_+St;ֵδb7Y$#õq~8ir^^BHȩ%hdd_d@'QC&&=x EݎSi|YlƓ]۷}~pf.]d@HII2dC?2BZ? u Mbdd?p3hߧNQWW'*"NرGL~M5S`Z+d\fd1(1]u!,_̨46Q 1FGkksG\Q?~@59B?Mb|42(yGF:3K{}QtZY'#^٫W@@ 5e5S|ۜ)~`7W͙by뷜Z\kDf)qnh1Ӝ)#"Υ~ʔK|)hQjX")2gHW,ִVd1n6BlZi[H+Bt zo͘bbhaG(,$w;O}LO_yeLgm u-V$Y @<==ǎ_Z ]X ֵC cFVĺҊElȐ2"vݦu-\Eo={ɢEp] 5E")S9.5PcŧX1mmV*]Yw則,]' Ak, .\X&ih5 FPdBsk?@=z[+GEuZu?zaz1LX҉ixHHH=OiEp2z}awp2}Zd7J[2oT]B tg!ς\Qoc^{Mdԏkg4(2M-6E$^EhMfkhj_ -}ϑZzZ1u7ng ~ƀDSVo5uMlQZ, "4b[YJ׋,bN\BMPU7AyZ& xpp K^z+s'|?v$gPkz_r撓]vuÆ2u2ǓG`Lo$ظ8EmsTN~$, "DkHoc'D\.S׵4bd'.)@Ttw/G>=åd5N{'`:/[eM_C&҄Ze&t~WdPDfFYu-UwddDXE@ V6}FkiNj*W=WA2t (Gv|,_Sstv?wU[o ]@eQ}Bo94~١j:5uJH:z5tkiEjk۶}x""d?VpcEVY$ *ǵ\"ZgbsJkL/'?у]^n&W+)%gElZ)Bؑ w+ GcE"s,<(=H-R eMdrX$DV~Ed1}"Ȕ(ueS ".Z,bYMk e;o~-Hؔ#Ǝ4O}a>}O+eJ 03Hfwb]dq%'?r<5 i38z3XZX ;ƌŞ,bhn͙V,ս{.*5Zx/MVq@vcSeW_+r5E.5fƒib!3ҊEf;X;}73wUcfevΣ6,b\ג YG:݁b[w{Ԝ)btQdC_|qY6_BM1ݻGa.@7Vs״b+r5E*Jc&dee,bNML1ㄆ.Y wjOޖPcEtE-\eihtUԔU#7>Agw !eG7Vܺ렞Ҋ${gy̤ޫWxy} p6ͳa  y;gBcNS'̚9hhjSZ<#Ai:V:oy,@s׺FS B}}}_[ мkQin)A%ߓYU۱UfE{s44nzM֎\h~k\*&k6E&ݢ#0߰MC[ <QYYMCm}c8* >meftMuOX<3 _g4fnp ]nQKo)_ LmQī< 5.ULY%fٚ :L~㶩m]HooXq[bJ5ţVt 8u E+5H8-'u9j+̃np2_gy39hb^[]H-Y::)*q\׵Y`I׵?v^e|"+6xĭ4ߤcD΃W7 49O][79 HxǑrjzƦ1iL{2{]?qR޽Ӌ҈/<%=CzHJI{D2gCݯL9/Ȣԙ&n~#'4[߬,>x_U? )d2^45w:xz٫1!rT ;Bd?%S5-v wT̘5_Cޣ CQabeqVFܑu93V6#)Euُb>}=~=zWUBm]wLLo]i1#/k!H ЫY.7]s﷞=u-s/\"MfD]'fu#oLgy"SfُLgm=h%G=(hu-uۨ,`c~Eۏ5Ynw_Uwiػ2hE׵80E['/`v>&L.e1!%[r֠7dTz#G2)*2I--@/\dhqEbmrZu-D0?qغRYK?fE7IZQ5 Zf22{Pdv떜5T3侤qrV! Y?lr%A4VH&r6mȢgl5f:?}~1WY *t9,6mө KVw =OlՌ,ɢ_K/*%-C98_S7+y,yEzϙ4̭N]\dD ֵ.YsUf%r6lN18<6(}_NYCv A;TfO+0vϟ?N0!%%EdDW#Xm2mSY\” 5k6#Ǎ[jzv9xGIA~TIt DiN(J,XR=3YSOB#0 Nbiω8ssac:#lxFN8qNb&9)G$fFt5ޮp'$Z3*^t'JJIC%.}$ArA99 ">#//_ʿ5Xe1ⷞ=ԵhOdRWFݸf.9x "W3]~E2E0՜ 5l_&Y$];|qgii[NY2Kۺ^ ޱgZZ)(Ϛ6\jDh]ZCC|Xn.&1DZ;JHJ'!'cԌCk.2"RqއDeʌً W\q۾ ou-E|@ )ݺuXm~dF7_u:Z~orKG* "D:^gtE0[kdݣyYG߸Ya] )Eoh qmp9!NDOL?n=R ^;ꛬƊU5YϺF{4L7Ҹzo z# t9K,*nE;@WlZNY@eq !*$r"3--@3!a 8yi%~wH=(3]u>@ts }3x Uv9=DO_t\œ,;T]s-R.>ךܯq6[T.8,YLW\W)i:#mE(Ɗ,\2ȟ5-fL%`q,^t2YpFCLj{÷3h,Z >7T{W<>G,6ƭ 6†sz_cWlv 5t YQIr4QM,Z;%e7`.8G %ȇ"d@Gׯz5V ,˹b),L!ywt^.@e,7Ȣ#>,wX(NaC8+~Ev9,ڹ6Eͻ.g+n!#Y1\Ne%Xvpf][-@c] 䊜,],<%};V* 4PAv9!'p;Msא|uuԄI>'ŬGoh,kE{Cqk[t]*$ZlZl\Ih1"_ҊE|@AQf}5V,}QRj*F,V5Ȣ s2G"]JeV O5&wR21x#9,Fr:Q5%0.>}]'3D/;N+zӻO_82'=!Ie~@'tNj*@ d˗/-5I+6l+dZ̝,\D*0"Oɳ:Z CdV=#O#HD5 <4BaTrf)}𰪠|Iͩ Ge1/cQ9$`|7d*io%e0wnH/ ,o]Kȇ,0/ bh!C唧̘:CuI?bwY$ 3g͛h\^eņ̂ߕ8f9jDBd@ ɞz.!)=]e.jv+m8W]{؉s5,YyȴsJJ\c)4+1MbLگ/ڭ[=zSPBDr44MW.ߚ.myz>eߧ޼[@:s3~|-*F@N5!䎜L[ 'Y4zlW2|o+6qăV6$Z Jc&XM;R& YqBCC/YK3^<,ibFዼҷ/UJ3wk!fKH/M/~~=x$/&6$a +zmY~-Ue5-גE㷭ג_vF顓e%g1!611UwrFNF G\pyQzokjᛘa7EWe/_@_Kvy2?vkehAv#a1##SteQ@6G|E3Y}CwS|)8ekIyٯ9Sla6Vd< 4>hݰ*:OگI+RS,666+V~<"Ǽށ:V|ƴbvksZqÎ#D.b{׵4]i*> t+iŎ!Fuuu޽ Wb _YJn2JjؒC3f\ddqmᑉd'+X;_(;tx/6fhг?{{GHɊǟ#q %`L.]BNc{ō[hh1u-V,'_~UPPsm,ʢw(lغjz>EZ&:dzUD~bvSg̢S;FYw Ze&kCcRWND,H!3i^C`eu3AOG(~jYl}@]ԂtW20YʢEg:럲v5f"bһwq[k,ZBem2\5Q3*y)@H-$ib!\PS Z,>ol@򳗜4gEfQIDATW}vmO[nf˝8e;O_lKu&!ďegiۅi^}ޙn,J+Voڰ[o\f?huYCLjLcEL}WI):8W]~,:ܠ+[%&tڭ[Z%n!cdW(rDq] dP5뼍S Z,>z+̾bxGY]~"S˿dc'xddэEOe3l(&!kʢ{aÙ~݋ W8TbK͔Fwu :71XNe{=‰Ys0u-\d'K fv7VOEZ.Y`$"SRR2PB"-v9#.ObviM>}}4UNbf۾}Gnn%d1@BradOɯD߱_OW})?1?E"t_bj"qo_I)W?$6E+j&d~89-㺮SU;tP"ֵ:Ș"d8vss|1&ЦHfqFYvddރ[/ЬȚ_N~:O]I}ŏlhl5㯸_T[3zj 8;yKHJOWGL䏋 ~)kqmcq9Me.j%i,=nҸ ʊ/sghҘ *s4z Ɏ X*^Q=~9cSUʍNjNM,Ȣo-NZ)B&&M\c'/>痗ĝrx9tFfG#ֵ4b.d`(++f2@-\*mg? !״E;%cƜΈA85"~hb.ՐEcqy /iI+~l~9BwcbkiZ;_C \eZVLjj"}.9jE5Xf->jT;Ik!r>ԉG @wf&Y h TUX H+Vl)CnKHJ'߹'6Q*sֵд"dSl*v;V)E-R{S1v9"d@gP__?b֝VD=d؈ ֗,~EkpZ!:j6(}]iE-@#Bx-@F'gA*ӊE_TcŕXm(t;ҊEL+:?%[ Y6Нi@t"?72mf?[cEA=` >}kr:y] 5"CE&- y/@i]WWo$#+Nk[El)i""9!;Vt0GEY(+XlɳW_~|om[+V͎@VJJ ںP\J5VP{Z3,hb>&>}Z ?;oXN~K)o-ƍ 91zQc'O 1VYo̸;FϯTwKȬ4Y]N^QZFv2 @b2R2{#f]nV"S u-7^u u릝GgV=[&)=p8"֯YfBedF>fbbWCK0PvSg.Y{wiD    Y,Bd@d@d@d@dY,",",",","dYEYEYEYEYEY,@!Z{NGشR]cጙ#!G>eԗXeusUU5E &|' ;(*M:xE褤y%Uo8'SJB'zF]rª릫̖$%=HGW#7c> @k^e)LhF'U;~lFŷgֽ{Ys8&$+~d)7l$-=HAiԖ{C"߼sft+^o]pTƭ{GJIXzt̗/_vE ,YSv∖;޺.S|Ó)Qiݶoe0~y/ (_DAoNqq O+6E2%Z޴΃  (_?\7EO?عʬVAe+̙X )L"7Yw!AAYj [%+MqX2>~x ,~tvv dWr))VFÌ2i߽ ,~mvMqЦX5]' !³ދA55(\ҳ7364^H*9廮S!({j=Zi֎Ѝk+2e`<K4 G>,cWTCJų`GJ"} 3H>5t"0($}}3j>\o!6{ƓvjǢQaYSd:*c!c(a Edz15taJMRCϣ#AY}:::DgN)ES)Bz^6chk+IƲGW)C|D^xVQ*RXb1+S,^H=dR3㫿^$5r|U_(<Π)rI:eaZȈ=l+f[eR3㫯X1bhPy9Uo!S|>qK1qǸ!#Q8zm,EH yUцc|w"M { .0=l+r[qe`[wڊ(öbf[e+RmEj+,^^^jڣk,8Hcᎇpa[ڊ(ܐ_ViVL[1CmEEӧO3fL,C;"{񹂂7~Ȱ=l+^[ekRmEi+,IIIRRh#fdHH͋rö"wQG=5V乶""o>cGajyv~G"/QyaaBΚbyC[]$'/> kT>)S,iέn# ."C+{V`^U_ݚ_J"ea];Q"5V,*V̭|SD&9mTڊ(Nvv4)?sIp{S> )*jzS{W{O[`#] Cl+neyI BFqhQGZ;`Mc8kXx̣}$'`/S/S)VSW_+,2}3li/SgiQ􎤂쎝{p'D[8V $os^0rJU\|eA'Η77u Ia({UQmEG !aE.i+f,vj8Vlz)($|ԅ^w |6ي[ en++r"$($2ي`wȰői+"5(#+"0תk?s̜*$vynMێT6w|luǃA!!$fZu{hC35M=5@f.pUUTQ[q[Ht $gۮqi`ŵQ7h~?9m{XSҿl3MGoӗU!Qf'l)*Wk5L#r!]؍3FNiVDYN뽇Nj>;wUCۤJ=.{5z뷆eSD's-XlE(lT#HVw?uϲ(UT5tXWGm{"A*!V7Ul+ #][egO-=7_?bmEEnH gۊ1XsPL7mO(L񼕵 7eq|,>S>55\ƪ*A Jotɢ7KYt3Rhf//ޓT@#!o-GrEumE"$!ihpDeUMπ(g/cc9r9Pi:|⢫oDF~a:os ?@z"'GA!w(y4YMv?odPBjMG'pjEUM]cӗlƍ04~&<8pgX*j5>.Y?Ŧ)#+;z;yG]o4tבݸي(ŋX`y瞃+>  WZM)BN"$5B"m{WEIɆ|,d1]+̦igiMt`$=BZz,u̙}dSlr>_O\9=s;h'5t?b޾JNuw; c o:\D8{V]LjAIv8y1a,',2\ {AK^m,|yWwzNC)>p FM[UCw*]p2[ʰx"]clj\JM72 &QEM̻p$g+,rUjt|EO[h/+t !5!r*lū6&.!+eo2q9Cjg0B"d@[G_o2E; M8r:c&2}uZId !qD/]u5,\ ޘQYME׀5ZQ^Ihj'5k5ɀ#N_H,IP@J}BbIQ Xɢg@ȴ  :-&XYh!q7STzd1L |Ai^WY$_fSw;ka0x5OqǯGu 78z Lg=Iͩ߾zYju{o5 mΖ!I=$Yv @1^d1A=Q1q,6}ER"9s{+B0iI"i3f[G8@j8xoEp lu]>F7yECcV A!2~3fTG_XXFJ^z"E] UA~AX_u0Q OzLȀ b~~DmBRaif]d>mRQL7md^!!DɵT6ßAP3u@TӵK8{PIrN4;&Ybs,>c%=.YbлG j&NxL1 T%ηs "W q >غմ p&))7?MyKo1s,76c]H{+~Ů{PHb㲀{,ܷxr'J!X~Y;Ȭ }"dsO“ "Y̯myɏG}Q$5l+fUF%Rؽɲ吗I&FCdk^85?1Ţꗔ,E&=*_S8RI*!U*gM,R[!2 o!5. d,6t\E(d(DKrn=&ue1M@HїJMDb,rȽ&IU]sw $"p̴h2ɼ *7"l_س=%$!DAhl H|ՊUr.!UԴfIWQUבS"h1_uы0= Z@Pn}&aݲhE/tw&pqtr[% nymkvP}"hYnqT&D8[ʟW;1R^gzf$rEGHrqD8UVdņ.Y&f%)(ϝ 6)”,G"VYUKTL\F^YEM{jŻ^ei,C<݂)Ֆ^XAEdN-0*uI.~#VDY *-l+vy RHj|CbUϝ{-Id»JZE(|unߍ˜#.OPRՀ9\x/җ,7}PRդJ=,[נ@iY|')s$@MxʶOYP[1ekIMMc(I/:@#ET=/O1yY}ͥumG՛ o9F mߥ״s?łڷrjs0SLk*{EG'5O*ްjPjÇqC"=ZYTƷLr~yɖo\ IΦuӬ§uoš=lEK?sKj_g8jxu #ý *_T4qɖ7yC_3Y^e +A״A!$//Rފ"ikYd^Ⱞ\=|}f#Տ)BL6HH[qW.^Lʕ˹o%/_VDY^,a5n̵-X-"O:swHddް<\+A3b[ ^8ʩ@W㫁.\jAY!:t@Wc0E`p) ) atsM"oQMM-πAby}k݋4EЅ5-MjKލ)fȩje!rvnkpDF}me/ j,{^""wۊaȜh+,r/r btm=)SEM72(C6ES'O %䓞_NO= S㕜SyS9&cv?#$+q}v{{;ހڊ i+RKV\i'n+,>}b/5u 7i雘m]0Oq1$?f{%mZMK@.\3fZ#ӭ$Q]S23M ڰuWHٶ!ZWčлpՁJW5{ⲪeMS(qC~|se` g][14:du$.h&Sy?oeյ1,U" 7jhll_P !)hum+6NT;}񺆎$RnLޑS]B{\!VNY3 g#s -E&AكTХ*{O7[_we/TMs?w){^<͉]O>~yq r,5~ɹ\\}#3 :{xphENCC˂/&J -uj@*8i1THZjڊ$8 ,iȐVd2 JM,b_,r,o0\COFKBF_ճnuBEQ70%Ŧq $wXLaMK_5N=ǒW1PiϹUgYM$-oQJ{"튨ļ[ICۚz&ѯ\Ŧ֙YPż0 ?aI̲ESFYQQ8 AMMtPV `O۩U7Dwk}#GŰU?/߰32C_TZ4mLUuT~Au $D 2N]V.9,I6оjjv].9yAhKm-ņyA!2TtI<ɢGCb:J[="˓_,{e9IQ`rqi%P7ϝ+!Cٻp^K䗕cSAl-zZzɢ[U 6g2}l*2jnrٰ~„^+au] CjjYN]C5j܅+v,SJ}j ,FwjzfmEEސEH屳ҩ9$ ܁$P#*G@eF$,V=`xFr, eY$Tu~FzYdQeK[Šg/?w^!$<+|jvy]3mGURAsJv0yiyEe"T2JHc @E$C>G> e/:) O_L=9cEC7~L̮'GW9DCr8@ L* 2}֓ghyVLu[1eqN=#!`+6qUvLx/\QE}9씽b,Q\t͞>vx %UM5H,^Bq&d UϦ{ܺTIUkrJp]!4U@l~*W-}SrE%5VDYV+Vxyy1=԰|]`Ү~-9y RJ ǘ,n}95RS;5PCF˩twjHpI"CjBj~m|ȵHHHgeOMAe3}*+KbNꖁyn/,<5ZJ 2OK?7fеٽP ٩WH٣Φֽz0G] E/tyo>̯n.{? j%TSފO*fUgsoASQ[u_R zⳮ /3[ejWFaފկKފ]>5}VJjPGyyGl]Ɲ3L;oٿv_-(pt_ Uڽ镠9rzvڊH 7MmPeU!/{-0%[^ nɖal+r {+A7’-#v|ũ""baaqޕæMq`⍻6pVlL٫Ŷ$U"OQ'O>;b~wbIW7ϮxUTvX?ҦԕCGpDZj}mmłꖲy^*%ۊ(+D[mY~ /Oߏn[qל:JmJ+y=ж""iǏϦ)K<_QE7ү<9Sai3@ޔ_P4mE[g†ۊ 'D[qclܣRVDYp8~„A7m "Yvtۊ&tAG2*V7~O@ۊ(KffMsg,yY$3ﶼS$crRR@bY+R@=)Vxh LmIںh[e+RيD1[b?WDGw"% Wh zYdȽ p)YaN5-kn$'wR ]zr֌YW*jyz|?2s55--}*9M0۶WL&,2wHdeoCi٥Pr3GeUG=Lch+^lghwk!F7ko۵?}_cSܧmp3s[ .*j&D'{D3[uǾz*ݲ쏲w{^x鏓kj'0=3=>a Usp)mYuC'.՞ChMtnญ{ד9sUU@Cw[P,&4m߯weJ!N+wƌW5|7 j-0 km\W[eKRN[14:du$5]Y:Y y1~˙*第t MCQUyǞC/^dq֝ޓRxWoYSk@F K$W!u 7_g-q%F$8:P鎟?M=9yGnPQh7q5)m)-Q׳<{/Y[YUu[܂õ7(u$6:r5QR7TI'i%>z;mEEFXD7"E1q;!N3g]QTD[v\e|Ö]_V{+#>NaMPl\n9 DYW\rG(WbBNȴPŧB1;~OPT^i}jZMhؓ=sop詋H+̛; WP4C[1: yPzyZ *&Ab^UAIDSoe1~ -#%$2@Fֻ̝(U?6`LV2mM=ռ,i!&>',)0:[Nʙjre&?1[\-8"(9w!Dj0xKٸK?Q++ǹ,9MsZ .>HuƎ)jꕽkP`Q}+ZBBlE ]o^颡c|nJlųmu)Yt |@BjH@HIS*ա;~PmŊ̐WTT՞2U9Ej]F{TNA,$(\/Vܸy;hjyV7_;2tB,[Φ MHX 3XAi-ۺdOzY\$nQЅ hJY׿eH Np2XtHd,6SIU mr*m&[EY4LFK+ 7ي0f/"A[;nV\jPGe? Hd?LU#*HKyIAM+92\dE"Bd3lY,hE1E.OLī[=JY 0['8FdtSI| X󢃜R=GAd.q,{ŏ\TL^/^r9{O!Eݲ[e#tƬT`n QE;אi3fs-RT$M,z&)&gaE$0—C'oP*5?e9ME!5ݲ5e"fLQ"%R1XgFbI};)Ʀ[,V]3EY^]7rђ_v8|5%w<`g: 2L1 )pf,,2EE0$83E)Y6o{Ykx 75sdZņY}AHX#Jʦw?41$:"]W4Y|񧘸[dI3^SHˈKuôzډ?rVh#E]GͶ2\5#!bnud|8`O*LigH&1nҔ,ek] e-O.!^Zq)5g1A%76ۊ(܎ZPD"E=-Cf6Bis ;vlQREyeF͞#)`:v$9D;9i6Edp'DWoc/ %ŗ.qRgeJi͞#!VCGVN^dy ̛^*kD$dרZ-tIRrkﲍ3Tt8`bW0/\lŮZeQ]ǘvEWJ" ߈Q1ɹ)M0m{+Z;/*ђhEr3E,QWy N* ygEFQRGߠ,DjۊQcǍSVUY(6G"Isd>bT/YDgZDYU R%z_UuT5@ :RPQHy"D!83@(-Xx;!)+dEcR o2E72)!FslqȎ EM@VWXM6`%k)]o6򴀈UJȢO4q2VA|PWJE:uٞ""ceekQ7EREugߦh5e/Ӌ8geYw G^V,j|ފE/+ۇdKNًVWΩxUtV̫y]}B{+>Omk̲UV{R(yKϬK}e R3%1K^Wu]BoŢޑފ%u z񸸑>V4{+U~\.V*mN+|PR fd(pٕk_%!!'6 dɖ7I −=G\KL>wp)BĥRSSq'D[=%[nm+AJ(ܟ/[ +] ي0Qa)E_ )rc /{Y 9eWKGueo+A3b2"Ͷb_K bI̺;*㫁QyC#c#` <`0vb ^ڊh+&,rjsmAeq̙3))s7~ȠUbȵX:brQ "pՃGN))Bƶ=z?~dj樼ïa[qۊ({aL y 7ESd6\NbZkA>E?Ookk7`/oZ mőEu_"Qy###O{hmk4FT^Ç(#V8mE.o*׽RmEi+>BYdWq)i0@䱥eNN l''N;;;_۶m366>r:xxxڻw#Ospp055ݼy327%mll+WƒcǎדOooۛz_ue= voiRaeeemݺj޽_fcYC`cdq}UͶb&ۊ+<뾢=Lf?5V䡶""/l2hjN_0+¯%.[~~8+PBW6$N֭[gacRö"o"k\l+\[e˖-sS,Lc\i,--- O>}QƤIȬwdEC#RT/^LN>}:FTW"Ǐgž!l2eJFF)O[^r*>=x𸶶bcbH[fpmŦ6A"e/lۊVDjPGٳgW4oUh1ܦ9R??~6|P˗Z ~GF] g2<PJJJMM IUU` C,))F^zK"""C‹SԿ,;?O8b_鲲gOcc#>=`5MLL|,?ƾy8hY\aR3"gmV0k+f X2b"/GN)6)捠)رĦM[8f. ?۷/G3o ,:pE -z 8菑mEi+&>CۊjLR466{{{GDD aa[ڊE RmEj+,0zzz]AS)j+xsZ\ׂ|=GS kjPNGYh6Űigttt  уC@YFV/kGS)yl``H "Wٳg7m@S)0ڰ܅AEů---_4 E% ,,~tvv.\3 #X ~{  ((q"S \޿)<...AAYDY [`㒦o>n‹ZAEogggկAS|̆)>*hy[qWAAPQQVɫz0R_w-IAEEEo 7n ,,"ݾ)"Jh ""B%%%޽[DDDJJбޡ/S _wTBr.8}bccq-Aeej233=lٲ&{ {πց",&4޺V'/pYTtDAee1>~zE##y?aٖΞp['u =8g5[3o۱WQyLx5ĸ8\AAYDYDۧ ))9yXcffvQ*f`AAYDYDAAYDYAAEEAAPxPAAPxPAAPQAAPQAAPQAAPQAAPQAAPQAAPQCAee?;AAEAAEAAEEAA!Ǹ8ٙ  7VSS,," MF_xIIO>,," ^ggocAYDAqf„ ((  kYeq," tvH/," tS\\L~ (  G$"  H/AxDDD;," Q@@ŋQ," ʪ?EAAO>:}FK[W\\b*??FL-&fK/eAAILH~_XrfaY/`F뿟7d)-H6WݣN2ja4F /UϻG?h=e0vRQAFqC(QF!u ?mzTh|w{]#4qZuW7?," hoo?xФI.Zr]yݫo5>#RG݇,"kY7ŗtLg/(SgYlb!eKd,_wE/l\VV7nm;F^( &۷[ `RzޫwSEbѶb_|mcDȭyVsI~!%eں:EAA9&Mln5Ϧȉb_ȭmEimNیWO6yc'NʉiEAAFeee%eմ'EdVli+ ζ;VS$HUo6n{lxDD"  _93f<~|sۿ^giVb6gVd)]7"m CGFiEAAFkkk~~I-jS|W[g+VjooGYDAkj )])yiۊLgwb[ql+xR#dNQ#spEAAFkkk0:bl+qC[j+v"0Ww|eAAT~~SEK[Fا,L["mT|q񒥝( 6---3f|=OڊVd;VSUѦklf`h  o|oo:t[#9F%) /FYDAWRRV}/zYkqsHŬ򶰄‰&ף," {tttL4993Ql+vn[bfy+JJ( ۷[[SL[[j+7bfH+yaaa( &M.|մ{N݈--8iƌñ "  vzFrۊVl02Tj988," ߓ&MNR<"o5mE0E7]} eAA ,,l%#V~bL|"fƥ Fvw}5 x;-Rm8qd3jŤV{/ai7\YW9MM3 ,5<6e}:n9Sl}g̘U֛m IS Ø1i.YM$'75ܰK[M5eAAEvۊ]5f̏,ۊ!r*s8Y J"xzmEoN'AHX)SjQ^fO]ݗ,sB/[Gd\عmM6Y8m4߻੾d1QQe[;V $3bmFTr "`NH9,"" HA &PO1|X-*QP"gKn=_Kg+YO xֆ-<Kޠ," 2/(b/ڊ_~ߣřx,V>kHbrf ŝUP|&]d"G(C[1!+.&.!u/OA}*m0qpwd" ZRjm'x!DSrlEJAhث-+&&  ȠF]cO`87mE^8gX"db_hZy!a-Ȭ,FwU:Y5^PxԨ֎>Qw3%f̙PVAuc.^S 0) d14q P7ްd1.p#F\Fo9alEA!M]C21gkԨ#[(Yx!KݟTI8iꂿTZj n"M%%eEAA5vܰqm[mŢ m _v<)oy#[Zg+ּ~Rӑ-=W-qU^ֳȖ=K/j~mEX^@YDAdP褳ހ 8c_ItAxC[3"  R3YYnm$d~\يܴo/m.YmE'~  VSGNc[-VykK{}_>h]ӧO7eAA|ɵbo+mm ?jԨׯ_," 3{C_ڊ|k+6 L[Sn .weAAZDDXmm韶"/sشoO4( |8`{k[m:9wE\B}(  ,2 Y|ޟ7`mh+nyXAQ7A," ?gϙ["|PA/>^L\\V^+ {nk o+qh+h5E:eAA_z{{ϛ?_PPhc 9/>|Ȱ%81o%&M c5(  "---EEEuן8}'ޣ?A[~lۊ5!w]Cg8e4AA-[LRSSgEAA~zꂃ?5c !! IJΐpwwlEAAeAAAYDAAPAAEAAeAAAYDAAPAAEAAeeAAeeAAAYDAAPAAEAAeAAAYDAAPAAEAAeAAAYDYDAAYDYDAAPAAEAAeAAAYDAAPAAEAAeAAAYDAAP?  ""    ,"  (  "    ,"  (  "    ,," ,,"  (  "    ,"  (  "    ,"  ((  ((  "    ,"  (  "    ,"  (  ""  ""    ,"  (  "    ,"  (  "    ,," ,,"4233]\Ν{~%eՕ2ӧK;L&t 9yECrrvMIIinn  > vxE] \|T;l5fϵwDeDAEKHH)%5 E^LhYֳ}C直OEeDAEd(..^b岕GǶ"edUyJAAYDvSSR3}-LW{0q2>FAEOtttJˬM*n"hFYls*_ZNVNπAAEEDE_U7lM}[[&No7 ,"sW<-Mn}~]7?i+9l2iCG4 ,"=ѡf٬{b|~];M Vw̥e־}ۊ ,"lunz//[s"7e,[iEx  "K"n>a4t"mE+$, ,"prr=gnZVIs V$X)v%n%  (Hvvv+VW7w"|b)S$+en AAP).Y`Enڊ@[Uo/" "bwSw<]ȾHdi__DA_'g2`;@`ۊ`%+ ⍛RAA_sW73"gRx  /DuuhZv )r=.bRE AA_ II00E.ۊCu")2b}{q}_L<AAP TTl⿿`޷,²q Yͅ ܁aAšƩӧS}-?`M,}c=,udݧ/mjkZm;m[?Я}}zIwB[jn^jֿzK[}~&)ie)sB>-zݣWYzaվϧ[y5ʥ_U}=ɪZY]5š (C2/>pB+VAm_K0_FMd O­,~3VFSa᥏-2DMHILNtO"60E&Y|$M1p2w Yt (CQ O*RmE3&LC;^N,E[s[,'kY{[,bs[eAP֭;@?I+rahY++"Ϡže[g9bg(8_zmOt]խ2nxQ:tm;dڊ5<+m+~䡭X88ڊ(?W,]LLL|ѿQFO[/++柴iooLUk9IJ|X:yʴ6~AY{[j+~bo+,/_ZXX +s#.QNQme b/ఉ }^XVuKJ*wS HJK\2e!,"":y;w+E[XdדI+V4U`sr51Wszj+VnbN@NVDYJJJ;x]j+2ǁ!~iLw5ظeҥ\~hں̝w }YdS[5wm91?'Vsk+,hhd$"*zƷw\?X;(ۊAUuPƽ{~eaApp iҧ}-uVgPX틗E/0ettt>l xz݋ ""C fVrvvFYDWI`|MT$1q l.eYA8i7@kVl?}[mB.ڊ KAY,-Ft Vd~M#^lCRq7VDYmM aqӅmEڊ عXuu5"BcѢڊ%`EmEUϐbsE⦓ŷX/_)Vֿzgl+k+,ޝ:MeW'))i巷`A Kn"Aۊ(HssEn_( >+V,ꏶ"uǥKܸqef:-fk4;n!k²ȷA5Mm>X^GΫ|URV,{ 漚V.ۊY`񳭈؏()9c飄m~o+Ro6HH<7mE7'e , HHLj'T>%ˤnjb_XͲ L=wWIQ}Óh%Vxg.ۊ nŁ0ţ'βc[q ڊZ4yʴ_DY\zuc2+ hjm{ĩ3x IYrkKs?"NohW@xNR! Kd#ž .kxg}_pLZQsm dLQz\^EKdR`Tc$,62bq)>or7, n3P̗/_-1Vdbeۊ}n+>aVVdj}kɓd`~N#sA~8bOmŻ3|[Z[SE•[zr }J_~:p$Aܥ[Z6FħAvuUPٸ,2!ab!Ξj\v& 7VVں}ovQ=րdm=C px=gMP$[젓ŏ%Do;lަ;hPIDAT}ڍY"R.p"8v3ݞC"s6L~BBqNT^'Tr>y'.w;ϵ$<2|{-ϟH*'yk+,]]o)b[mE67!:_T)Yw,MIJ͎o"s4re0( Fp0j(rX\`xsWer ii'z LL˅=3lT+֮ L ?)mc̸Ys@kZ WJ{ C6rhYl\l\D#!wVYg _@ zvGހϣR IHA*ǘRC,{a^j7 >cmm-+_Nm2dۊԕ_"ٻwI.ۊ ~_`nML7eǭ-2VZG@`fӲ-PR?6l'NS{6;xnq!.-~$Wnkm <0E\7mA2[7$f OHϚ3M[[dT7m% T6m]E'L"xGj܋7$‡x$^5wsGS2%pɊ],""I AÇ V HwG%/ITPlj ^QJz%__L"b_ə6My?a[Sۊ%}n+H]3gϡ,*NH($9/cV.k)mc[mEXɹB¢8pq("o[N|x -]s^kd:wI[?|j9y#GkVT`h+>*~[_ڊOk.)q/q4}-WR 2XoVa={,F`q ǰ{ /qvXܛb'iE8$es|"AZh41q8||aq&Nfoz^Tt"8"y>1NcnYlN+ޓM:(^A -2'TKPFVvŤ='4 ,rGXd[,EI1ųWlմ/[Vk`"5{{PQd=T\B*:D︅N"FɢچP&O\QܰhtxRֆ-3fsmk b_8liij!J,ۊ D%k+B#ۊ&ah]TO5AS;$'|i+u;{Y!N>V$G HeXZZZX챭(V~~KhehH-N,ڊҫl|?QaД,2<S4dhY}"Z UQ"}/YlziqUԿq|CXD3by+J#g<5; rzN9Dx&_8 Mbpt*bvHwp,Y|\PD h+$iC6}cAugnt.YܼuIՃzgC"cb'<mN|+g;'=,rF"s[Ĺ:)Pl:UW['u"hHW][7)OF&;"RMwC :́w &#k`"EHO )S|Xܼ}q9e- [wu ם|.6]EEg+,^q.juoO27sb Mp J Cw;(6cбn~Q2Redյ6xĞ ^V)!bȇədޯ^aS]qch+F&eNT\⬹ C DC[c)0%Lzzcg(E(K ѩ꺽OP#+cƶ8Td=OaEcˏ o?獯fYZ{nllh-}Ԗz..l.keڙW\TKqs_AMk&~9ok{*+[fVgAܔ),@ffhC{bZVI[%=E lz }BgN7$o(w%d1r[\LD#W\mdl ԗs"q)yoy"˷,] J~3qd""sJF-[: _%oMzKକS LHbYξ?O6oյhr;M\no^ UC|Zi%8H["h"f̿b%GLW$ڛ퇋߿s&@FKr+jo2Od Wr_&5{>e*""8ރGhL'{,xvĜ@GYMŠRxr202.X"`E`jZ\nwfjtC!>,1kZV~t$D¿CyuhH*]c觴>lزkC[[:ŞڊLY9?8]ϵ,r>ߏ"n;<ɦ\پ{[eՍ;Vo+qpWPRm޺$AD=w ɂbN}+Q1d,>@/$&P_BSIXv206ZI@=AEuGlD{x7Bg1 I"o+fI]V˩y%NZ(k,u`Gd?\aIy4dj_\&.Q'on+"Uc2 j0qRLrD3 ji,̲X wYV\}#LJ}GDT5"סHY(2k){7^P(6vߖ^'Mx GY Kq"-fwbsmE'FڊPȣ޶k{Vv{tOϠ9+_[q6X(v<LeW|eqmYsթY|CtDwP*>*eSX>B'$/%=!&/&Oٔ,Μ5ׅV8"(kxOHG}XB r鵚g̜KBQ|?(Fe3eəshY2Y̦H4/Oh{o9B2Ki3}b⿗SO @dQCLj\6l54bu@YjA[氯N79ʬ'斝;שz5]BjμrK7IaENYe5^P~ћYC<bEGH)rpZ_4sOeFA w*.Ƞ؇bg̚T[zYu5>UR5gj9E#FZdbQ)M,3z" Y| mER8ؿ ]{]ݓ,S30_ڊ\nm)Zp+m,dA(,D"GvA' Hg,Bv=7c\vEXP֑4YW#G*̞;_AY,Ftt~4+(H\b ʫB)9Վm7:z 8bA/[9^~~D'^B"˿f+z| `d}YsΙXr܄,6wGʚa(C2:%o *dG%um}z],=m(l0X1(&߽>ed5;/55abzAc&4Xa-_% '9de[4t/\I";%}to+B6lfU?{5M]Mf \\rj:/\diU s Hr\q!=zꆭ g@ZI3ٶWE]-'S$-=Ceumcܪ 9 [Ss6YkHdk*~H$]Tm2}?JN{d>Gh~M۞C5-]`~[-GT^Bc(ihnfE;}]}BAGxʬ{> ރ/ݰ76dm~Ӕ,R'_h+~dZ,Rs*Hn{d¤)rJ|jtە~Y=lek4icB,&Jyֆ-P)o8 R>dk_HX(|VQȢo<9]e[9' `[b._eW˫,)Լ"{0EeSlfSLZ~ի mżW%MhfS|\R)YuS rHb`X܌(Y7lJ'/?:gi+EjA3u8U^ђ3fQ)/ nc$ZVȤf)Κ3? "^/}EX<b/Z1FY>ԹsC*C2 jޒ0Np /N "~~QߐMYe#֮bRTTH/P2oO4j"""\zu^ڳC[pSYǚV⵭X\6͐zR& ڊ 8amw(Cիׄkq}TP׫"CӂqVlE޻&3v^)@!^,]`˗/BPH y1ce@y+2~`,_ I)0su/A,R>jW0{yյ|;v\pT2Լڗ~Yyc"@lʟ惘~aqIª]a$E'O-}3qҔUf' HH9B*5kg(Sr_co҅ςt~366Qy>ߏàžWaAp'}9ߏNߠ,*P<,)l}`N2a ;&=K$u:v~$&D3U6'Ixn;6=5һ Ym+Κy2--[#҇n {oHDh|&m$kb#5k0V̩lM/|K\B+4\#I(yE.n+klU[áP,+tJbj ][:H!cY;߸ybEHK5 AĠ,10}ذe+:y9-,R 겝l\CsP"czVDYd^1')ĩzQ txۄ6zYE/.SS튝h7.+ot&= ~JqԶMYt0y"K6]YVdރR-5u {5XteS!$$111!ⲭrH? \[xxo+bg@$87d6aJLJ;l"!R]d1$)mG ]h )dkE',d3|L *N,v“ޑs,޶IpEHQM&A/v4Yv 0i*op( V~.$,bOMAesE1c߅ W48v>zf+R:y2")?%mG$BrJC0V~ΆM$x: Qvo4:y>mKHIY{PxΛDIΜs5™[1\1 6AsQ*,,,&..neeNyn+zdo+>""|g-dp0 >a323gσS^މNpbU .n(oxGaqc74BxL 3gYlgż귣AH{R )99o) ]C%v"p ].~RdQBjMKHxU;_Mm[c|gq!((TX+$ j,b#OmŧV,繭XM-sD[e@IJ/m[bŷ] #{RMQ:ˢ&,BF2[[(cnӶBwNW^.FL\2a1C:H![ѿ:EGd%/)Ϛ` MFjҒ2S$"ⷺU.v1eҝV&mn/x+(FY䖤$9^?c[ǵiQpp0'^{[@wWe {2Eۊ=bvYKAm@3?;t! , nč?zXP}[I(C ooM&GIu~h5%~@8:Lȟh}6 , nQ{ y]j+:v/[)ݙV77n+^ tŋȢOhr)](#DeAȢug@x)kfk m,ڊfΞQGff_ob-:asYA؊"4M\^}Xɢ7,:xO`Ŧ.Y OԿG7,3"dMXd?Ubӊ&H7x)MYLs6 nk'ߌ:> ޾j9j=|>*b'($ǘqrJnnlR,Mw[*e%U-(iVLCY=MMMBeO[&&ZOYn zYExA(\,vSOCa YL%,BIʫipџBtjj+.Z͝,%e5X1()ZbrsvC=ST{2r˥R86xKa>>cUF1e[JH[# i,A۶?V\JtA{0ө$f4>l)bATn:=՝W^d&K/|e1׀D^.dA%Щj^^<Q=XfmEBbҺ ZbơxDtou?təKpe)I(^3,ϡxt֖޷(%5[[Pׇw}-<V.i*~`gyU>X1%Vk 8deڊDħاTV756ǥoe}oNI=paU˶]:'_g'[vG7ٶWKϐc ix~ذk7m; INqi!56qoMݍLMLos5u6*im04Mά`v^,xҝ5 j{ڰIuȹwfꮃ׭N7o/$^(=1e/NxS۶bV|\meA|a"AKWF@w˔^8V_2kW."?ə.!}-,_~ZK{Bԃx%YAܣF9#;(*:}5MDg!aјL(~~a '΋M$]qwpFMd!m]";k{۹ gD%eX= X\Fp)|N?XP:Whf\ZxA!;`7 A_@[y Y%T.Xr/!|@v+gH?ɭjce} .Q, ȁPZyCLegmV~38U:zھg/vUTՒ1+5#[z×*j?d";~L)y-lߣhX!¢8aqhbYt!~f]a-’1+2a|j@m'5J̲X)A$w Aޑ䞿jc@C{Qe'w\"cAL+}d| v[[@%g΁:z: `xRcUA\scQYV@C[H?ƌ}\ [b͙+,q٣FI~\]V;JJJoxqѪu?.\o]tKu/f_gWܝ9w2r?3{[[g{|XױXǏljђUՌ+]ȓ/hwƊT?V\h _X .3Vgsɰ_kEܕc13{UL*޸%(?-0c|+>kywx`X\ʶbRq_gUbsb},}qn~8'UXt1|ӫZJ8]A#QeXX劫Z'O2h؀^lY1M<3s.S?XEܕbصkW۶1$NJ{;VqI \N:Ӯ]??ŊUk X1\|t먆},bqEE,㟷je6o۷ Yh$(C˿bݱ-gʍ\:*zK٧ o\]~.Epv0wAv-Emcbkî~osgEܕc5%:UoL >۵=n:2Kz7' b1, 7_ӽ|V;hz <; .Y/ܢeo>7/#N罴K.Mް]ސ3b7b1c S o`7V*XV%} .#'<258?ce먶W߃zw;9o)xxn˯-X5[o_rio7 i̝C[,֬Yڴٺm̹bOֱS#W,6}ȸ%xmq[8q‰cq?IQGHYNn..,>c߇Nk?n[{Joz]~|_R2s6o}cobxƊ0V1NJo}<q+b̘1/g bzO>^ӗ 6)Tc1tcź.=PD>߯^X~_&:siѢyopcbNJA)&vRRR?rbXONil?k\+r5XUa`+~^bqwnM]Xq+z֭澘yAcsRF:VXb3+n XQ,և3fCN]8ƊG#`xFc7i'bŮ^&~0^r KVnh֋7̨ %$v6VlE!+>NJk=V]bfa񃇥XqX֖yv~vIݯOa.LJz%t_K]\ϥ/IHbcPrַX~dAuWk5sj7V,X127ņ|\8aۻr:C0VQ7~ϻڤ}殊w+fEEEqq>j4Fb൹obƊC9Vl9+~Zbm7\6l|[nE[n}Wy?~Vc:V, X3~oouui7o\8~Ű7nܣSX1@]wj&΍9^}U"Rb om)l9a+n-7vFi{дI?6{+9k v}f5VY//s/=0gӢ۶n{}r__ݸ@+ɓqqܺbŐ|CN;„4-nbZ-#đ#ǖ.'~z˭cy}LK/[%^&}kKeS\ۃ5_!+++%uP}-V-V ޱy%#ILb"n5VHČ5j,Vbf}]XG4)$-n拸WXDŦmȴ,XƊӗ LWO  Nw=#ukX ˮXl.~b~lVWGS@f$''nݷ+XbG;'f")bϛbŷt}9s8U.b7o^ O4Ŋ% XqR=[) ]Ģ^oWE+yv"-X"E, 鉝+( bcou;:'{HH999QQыmdn~H'M_Ŋ'øXqvصk"9CyؽGL+n0A'Np ]"/&Xqզ^}'O_ "5_a7ŊՎqK,@,E.XyyQRސ{MlME 2dɓ'}bʊi~+ㄇoy)_ĢXN<9nܸN.jԋ羴SB{̷( IIS-_+.^79s.۷o@,EҥK5bbn 21++EH +33bʘbW_"p~ɩݺuOOOEHCKKKkٲG~a{|bvu#GZJ& "TRRSOŴkyҔ'WnvŊGe⊼~V3|7!((/ݗz!K8 Pyt^G~O ;aûp@6C 9\npm_UO5ݗ;)GLdI10 D'9S puwW{{@@.Kq5c?2E|*S:/^P'0W2; +C: "0 E@(=. 6q0^?vqd0 l⌟:w靷 5ݗm_X ,B|*z]iPYoR… !VvppKxu* Aơ ~S^tpd fALp6xʀ\ǀ@vY]֢۟8nFiڱQ`@ N@4b@   zҁ/(  0m 0W_ +/DsA&62 {Aـ﹌ l@u_FDa6 j/#05]_ K/KP+Pv3z2S$Q@Ai I2!!IAB:T!X>|8U.$M>&$I*#bՓӢӔ0 H $:FGP[fasoQkxL2T.%N$&B& 4" %7Tb􁞥rEdǴ䴱0F+eۈ}ZUjd-}e**#ԑƑXˉ4 5, (h #ߕ*M4Ȃ-P!祅eԴI.4v%DdD\C!9f M9xD$gdU$We7 Mo5D na&w!s5\y^QIUҜITe4v% ׶B0-TIvGѦ8  ay[V}O~k& *M4Yĉgp1gdU&W'Wi0aZUƬa) [x??3*"MTߓ:dHU멁(a6 .҂|kOvGѦcO D֌ADG\O F zDǂdq56&"2*̓Te8f""-2k 90f Mĺl[xFE|ikZmwVj#UYNd=rD1'0f M |Ԛ̦ADG\O F r_7EƬa)ۀXP7W^/iM5 &:Rzj4J ;Af-Bs Ap1kmLSH7#$<%+q@5ё,SQl@hA#c0ԩ31!#+iVw7&:Rzj4J !ZyAp1km?18ޓV-nI:ߕ0`HU멁(a6 hA$c0ڔ_}p{lFPjylQKBUWjj#UYNـHm ";#YhS'bb3ScZU])Te8 DJ -,.`rF:v:&# ,9\grDG\O F 0H B=CvGkK?0J\w-mD&UUYFhSm#v&,SXwa5a jE=U-i0m8F"AX?H "Ø#;#W/0B-U fgO5_&Ʒ1qߘSA|3eqI^ZSEkѲٌ9yU-(Yd@$ ҂H!;#?_` MgYN3[ߞ-|x_־6?x)|9Qh@St4<[!׺&Z'Գ֖^#y|2wJ7|&! a{bJTNဨ-c0];Dhȏ> 䲅|n≥&_>dX/?|*tc*m;x~dۂs:xsPkq/I+g-yosVPYLq-0l6=i4J ^-6Б"E~edd?+v1Lv|ʚY}{S_SӴ(ו?|2tk938P;T0acgv鞉gsJ;Udr|TZlyUQX*zR YG5W'&wTF~7^ =i4J  >%*cGuǯE-[u\ 3 GM L|n7&:"E/<}$_/XPW]{ݯ}6eu?.<#$(1sOL9ڬGixNWZ'Գy͟|k̀<[^zݫ[~=Ƙ'bdh˞j[xFvF!ݖϟαyfV//|Wav_LV> $~הk썟Ǿ,7g1m'tzRJskQ8f"-!a%dO<|%Z<- F:egvZPr&1#o*5xޛXNf=r%"d͜gMOΘc6-痽=ڮ kL> &ɀT,Xޝ F}>B;/|KCJUꩉ=R r)}:f"-bHXKH|za|)| LjQ0lȶ JC;*Rhv)_0~2 F_P;KOOϳ3pIs(u6~ JL`|~‡LNΓqx ]‚+ 6nL٢L_ЭSk՞^yυ~Aki0QiA56u#ҏg_RY;|QO7?h4!x ُ}諅F0o!'gk$ڥXZ>I`o` yf90w6]Zi4J ~ZL 9]sͶp<;/5GN&D~2*l^zI_4> `?Q|uS:fNـl̚m3sMuN&\rC'n5"7q@@<8(ǔw"p8mԆ_ފl(a6 +iq.d}xCR}D AaeS$J8xXdp6+>crߒUX鱆ـP];' o3͜F VO &6/}њwVnl̰K* z򴻌VBNHjqnc\!=w5>{RmBf9*Cz6i0}Z;تFnٹVEQ;&5%'a |Is<=+ʨ(L&{CLyL3Qd@'B;ivC6䢘 ݟ2vЇ{48ŞWQ@\3Ś2 TW =i4JG0gh-C77\|L\;^{z_,g5aY@R!=i4J >Z0yap1pk7w5Wc: FyqΦr}d=:фMAKUԽ4s%D`i-y67obogC" v8G H9S\-)6PMtNـ)bSwхZ,w~j#U $ p%Wa-XۜwsF kOgI< Tj (a0 O,bSN7ٙ#QvI.DGH@4J Z> ~q$mUTmbHeHU 5F M CnVQkEbNDGN@u%[^7-X[·',Iu4PMtNgFt^7-X·5,Nut&:R@B Ql@v 5WhQpRP.v4PMtMQl@vӂa5v<>]iHU 5F E &_\bv& ?_caHU 5Fѭ cv-X¶I:,TjCfj#U $ p%Cv-X6lہ56mkrBFdnPjVZH@4J $.Z> ~ 3qd_cf'\3J"d.Cw, ƴT5Pi0vY]`}@ ;ihdJcrMJג݁p ; Ңoij (a6 l3;ia)fW⾐<˸ɭ2'ׄ$?7g rxKu?gzL@v%Scz&Y [G>i?Ň0H,@+-lij Ө`2 E01旑m3ULo\2N=&eIGHQz Kġ;`5$ϘVY$4I4q%ѴZINl=V_-a3!>Zq1䞸?sOx:KHjinn=ȫ5(+yN8!KQ[O'H;eG`idHz8 \di0GR;ha";$>o}nق_D&&^)[2w:Z K;44qjL#v2pMBֈBA@3NR(\Rv{LCaAK FVlm9t=(ځFWAmЬ,֐Z2pBv;#z3J@(V6c ð4H' ZqjMcΛFW:v0ڄf(#%Ih5~լQF=1Z,V6f"] ̅CI m[f&V @S4m̡D1)QGLbsvـ0PB. 02Gm1֓2jiwt@ k vAf@ Aـ0K!`@O!0d/4A l@H_ ) +^ l@쌗"0;Afb{Aɀpn 0[yAfb O l@l_ 9 M8bAfbG l@l#0#$II8 8I"V+HW: jN"[ 2u$Kw} JrG\*u kn{,!/I!ES |N-+)󿱜& ~OT %(8+jkIΓSR d,5 j'n]1$ '9J)ӤdMGMDh?'FI%LA.U߉IENDB`aws-crt-python-0.20.4+dfsg/crt/aws-c-io/docs/images/typical_channel.png000066400000000000000000001355751456575232400257170ustar00rootroot00000000000000PNG  IHDRsRIDATx X_o"W="ޢx_T(*JB*"XZVBE oQQEDEE@E76$$ͺyL&3y'd'32Q E<x44444444444hhhhhhhhhh(PģGCCCCCCCCCC" E<x44444444444hhhhhhhhhhh(Pģ$Fc&T)YF',d61J6N b0:qUfVxI*skyFʅ HE-Tz^xrH>$ c'| Wij1EG\VcPf6R-tZp̌ V FmgV젓&џpp]|K1F(Qtҵ^! L3{rt)}t"~ܡ'    "E<  GAAx4{걧V ?)xAA1dN콿ޟ/ /M x["AA)t^}늤(Q#  ǼSwrΦ/98x  >/D[Rω{"_7Á 6?H<%p\{۾[t1-}@  w @_ܫ/AAAtxAAAAAAt">    "SAAAtD  E7"  C$AAA"AAAP*sAAA"AAAP*/   :D|ӗSg̲l٪599dp٧d< xϟ~ Wp1㋌.9 Qg^ ۷oRRK444~մi<ADG}4{Ap !VRxG3+8wWjժ(Qq㆖oGvhkJ۵kxP?dd6mҥfJ AYL?ٱ=ߺ+8}_~K+lbii[z= @3^)Smضm۳gȘN:lr9,x&M?-^"ǂ#q9yK| P9|K.zЌ #޽T|y{q[[C *Ri"^ǡoݰaBBBcIOO:LJjgg+8˷+8$Vo3 H p#믿m_^.ڴiS~}fhhƮuPB; $w 'hFoϞ=džzM6n݂xwVVlٲE{E x44/; 4P)z |fWpoA Vd3,Bd \lٲjfyc%bmۦHO6S44I/;P$]t}"h&yyzwBWWJ.?/_\Z\B3fP֯_g~rrKU- 8<}?(MoYZZr=txG3+8˷+8XLL y"> @3^СÆ y|ܸqf'VTرcx͚5tB=VکS=zD~ߥVZ5oޜ ?}q'hhR?{5|rM^h&yy. >m4Ң6lhggG w\gvhkp1|'3KN aÆA2ч^ɾյt?fͣ@ ӰtqhhN#HJJEhh<GCC3 {Dvhh|GCCV{ 44\ħ&.]qAfJ"^^gضhhQV3,Oдy(>'hhEA 8L͋f2"^^hhzJE. M^@I<^,Lokk'龗ZħBG\NϾ W>|JӡqӠYV'OR?!3K<5/+++Y$Vel௞\Wd)_P&#¿Ygdܽ>u/RH-?f*%*tQDq~[ܹCJy<q0YǏrne"y& yΣ:N_&E/+'ij6R#F{8Ŵ4߻w.E>wփEa$A> W3,99ڵko7:o@ode|>AtwYʀ|'T}Ҡz +uVE|DĈxĿvʕ+puVzzz]%W"AųD'Dx򩩩pAgΜIII' Q_~ɟgP<=C/|BA ܶmݻcbb6===]{KA-DsV||APyˇ8}4x\~]%t$>s"G%|BAթ{=a]:tԩ˗/qx[[;N'?U4^)≰FÝMs EF ܺu+#G={VW"z!A"EĘxBv\Pj_] :;ĄN?@"`dV'+(qz'h[":mEI;)rFPyN">-AWMCAթ+?YtKYGrk"RB'†Mѹ*F}>`O}3xW^-bmybDB[׎) e򟗖tmSߐr`>敒 gH9bߘ5~  @a [>zd֫%wb(o!4H&&DƂY5 e33P"/o܍hCkU+mf~Uasĕ5"Πz #OXa1"TF"^I- *37W( 6l֏)oԽ˕)ѳiШnw,SZJY#YG4kKɯsUpW%Ó; iO?^_Ըc [bؐtui?&V0jцc @uyT ;m l_5WW + "7㷉uvIR a1"Tڀ:i>\[ 1\J1fٱEA+=T ,UH̫wuxE|Q*q?7 >쭪2V!f[b8kz{alge"@)D|k Ƭ]ǶuOW-c[澠g M|p2ve]U3RI(!7 hseW~%#ٛClaCm *7(vC:qaݳ4iRJJ ψwtBj|bA1f[1t~7ꕢ'Axϊ -b[C{H X~Y9vtG"sZ܉:=r@07;, <ݜACxWyTn@S~DD$=7oC?\_Ih]#72PZRuO䡶v"ΰDcS!ŅO6O|3ݻO3@LJ@9 裗!ee{~J&x2@( Z 1 [ӱ%Q2+2VDnHѳ2TgDcyn>f'[n25hЀ~οwX- )j*P_}<:B|HCQf~ުzn{4^)&b[⃫&oL1j=@bPEe1.!acU|ęj|~rsNO{~l!OY'yDPlpQRx!IAW%K>5H1">>>?}?N4+Zy FNnݤl>l+t0?gP}s쭪WFLD [bP׊ͻ;j%l̋<4YP{yF^<88(4i2k,Kho-ص}[wm8CddLn`&[ ū%[2wM'#)>{ ۱" "ung@^u_O8lJ;D/`Slւ(Eb+fSR1ejaQS;AL贾.ʷԏ E4w3;c;HԕǮ,;"#2g>! GuLlmm'MǍj5Iqo_m痼 ߻[>KEp D<L"E%TX.i ɽ GW[ 򱶨8X-X& E/]u~}E2e<<<:z{mQR Ea :ȑ#"|OpgoNFlP#`Sj;^}o:4!h.{Zj@$P=cƌQ۷߸q#9kM="ڰ* ޹x |%-SʕաqմH?6 Gr֡WmĬ8zx񐲙['GadI wf5ȭ2bp(p؄_ldժ\V FOɷ}zҤM͜;P;+nݺLbοVZ˗/k.xsaPV? [w:YCk-/x.*pdb ڗUjT12ңcЈT&{ya{{ +glG1 cXNh6cb򠐘tmWO-;(*1iYrRk_{3 xvK*#QxA6uaT/T[FN4WTo.Í @I OQ$AMKiҥKh 83kpM &S/x&K6XŀAɛ Lv=[Ψ,X^9eܻ|mQ͌| V!BjE^"Cul^籨}YAԥ"#ftuxӌ5*^$4Ϸ11ԵWTn$PNZ]"!O=e:uW g/\qi\'>|ßq"c'Ѿ a?3P UA6O &M,Z SL٩: 7 Nn_^yA[YYw|{'k?霔m߃X|q.}^ʛ[3fWTl-*</=+TǦ*9stGoT׏&ÑsjUvg; Hi+'7rU&_RVkZU+SW~l |DI@ŽD'D@)DGaaalSL;hd ?eIwjP<# y7ԼǏx{m"sݡ}DX$ zå N |(+/=f~ZaxB/e {~Dn}uz:DZB}ҬƇ{8@ +29/jUvg; Hi+Ȗ8lT[C1&VP&ih=9 ъޒA1Buʏ-8O!G12 #QRxk[;AǟqLpZ Ӷf[ VN`V/E/ ~m+++r*(6:y"sR !зCzvպݿS-k,{;hhպSl^\5H0UvMl;6oXC[T5w-=ذҭkSHoCX>pl7MQjw1 cSR5b+fu-qGKlݠfur+f;Hԕ,:&^38Nh_0(јxDG⊔6[\( ZQ8]1%K0NM~{v=Px^O='r</x~|"NDQhVP2nIpP4oBe yL)o#?;VqwѾ a?3P"/3}"D͊GBAPy"R+.)R*]DX_ xPdQ򣋅xN'??ĊwHD@D Ӄoܨx7>S"*oX%ZT/a SPlMΑFN^ 48]!?HtL{+A'Db#7Ц|A(Z3haC;0SW~d)75QiNW^G/|BA H 87"*eAŵhʛô/E/14<PۦTBۈ0SW~Wċp:=ZZ7Dh_0(?v ">nK ZQ D{G9RHWF#Bu/:O\ğFJ$ EZRBa?3P 0WGn<Ap)ڈx)z =y㟀N=_<"Nr_|L|j}TgD 8|(vvImAإdD! GuJ4&^"P<{c"y6 Z&Ky㟀N~'SqGrJZID@D 'aLABMBīR:gF#a1"T<"^̥xWōD'D@ħP܏Rʎ(`ABM%E-SYTB0SW~GS'3"H7D'YxC]9(gSuJ$y|3ϲ"z/T/10dĘPsxN'FH<;ΊJ JY0T*g'QA_liǑ@ߚCn-DЎ^5z-*+k3}jg}k(q/.-8A(\7mKKjv 1Yf ЄE8a 牏n/&>!B\H ]Ҙ#&^-ЫFR7W|v'l?KZTg"HU) v4APy"Cwt5Oa+lG۪:T;?>BXmX:5tudj;+NT9釆|<]l9 !![(o󬵍9@6u O_SUS(AujJGl0Xj L5 Bb^dzd펦[ Z#\xzt B_}kx"~v v 7H ag&Rl-` * *o1=F9" FԕA<(O?=PɂyY$"PV=,6LM/ e/"b<[6'v?HCe{|d)3eɂ]edGI=B>n-sz̢"Q$ [Xlax̑Q.Sp/d"xf)VP7(*/]p `^lܲ~V_͂A8$T#sU翬Q/Zx z }h+AcBuM,;"x1@O*K [ӞA/>.UBf]L}_5?̾Ң"̧cO3%ߵ,ԃxV 9,‹иp/M6o""^mYe] ' ߆eQ vw(Ɓ1R(p_BXD DO-\AARo|@&;Չ Ldž{ڰ)22`cRįh @RS, pRwXz׸1 azև|};1++;@ƿ;ot+㐶b":)U=@3yӝC̑1sbk}#Shel@AP^ Z>"E/3 ZAĈPABEXć N ¨q">e&, D2۪3 h@YZXWJtXk'(x(,a"5ZXg ax[vf$B)!b.ŏ<_Y);6+} zN_UV>67uV# [6ߧU=܉6٫>| w4 KTvL^瞾cErD{ꆁ< DobI`Y@[kk[Ha\ >3TodN/f93fQlFod ͢ j@AItK|a"$Vty=zUG{&&f "X1hS8ĴuOF΀t "||2cYN5 BNQ+ @\t#wa+0ƃ#A׆26Q7,ۏkBU-/Rм!=c$y `_d"^7/I &Y[/=;P>fKnp/W$$&A ;AF § ^69}U 2T0ƬR}kV$[[;dFk mB/ feўhtԊʿE3&yt6T/!nEan)RWH Qli%1/v+XD'KL*E8">nBQ [w<\oP6(\x=o|wbD (WRInWxNf9a*1Wcb;PĽs√#j͸ cp$< ("1kϩ="!h5:leKF 8~Rݨ bJ72ǭ>d_B{5-Yؓ-\[PBM T/˓@Λg[)fawİɡfe*CumBS.I&eYNy.ٺ1z@i^= XUgoeVe342o@A6T$?4DOuKG|r=%D9͝-!jGlgblݝSd^t ['5 ؜_4Wg rӲ1u=\8A*kCC~Кէeڪ_L/z)Xڤ gmVy]j;CLfgj/*pa BjmB&##''#ʘP݁B7D2'G+/yċ;J6(29[=@ qA8['*NZ ¹W.mC nq@>3uЌmɯ! GxmOWZtҞ~ ;z~;֩JHrU11Gl홰U:id:jE߀l 7ٓC. oJ%6x;Ui#p}yS2xS#w4EFԮXo+2ڑa5xHȾ1snhYtb'L_ܢr)S5U+k*#1ll03`_H0xM8\VT)NU ` 񯶷Q+I&;ֳ.Gɚ1M;7+[ʰ1`[v ةAi9ʘPBtҋ;@#4F'lyƊ&izyˠ d8rOp߶l*vonŮk3˘_p$seRWܜm7HF&H RFLnYْZ[wvJҏg4Kq @AC&.fY>z8gXT) wjO/'ceX쑮u4l|ZQ5 [bEMs:"uQMݩc !#+uʒ&feR˒ z3aA(7 #jZtaquu4d ͮ֌I]ӆѮzзlʩg{0T$ /-rEА ;W"llm._xr?+ojp1:u-tL\'P#cy',Pj,:8m`?z>ΝS)lYQtvB}G!:2ժ~Y4T$?0G/E/3YF4&|BQߗR4qSJ/ *ҧC .%pk? }, d *r/B^PkwI_*f`/RR3JWMXT@d1%02ЏEO9$ թ{sc?"^I.y 3[swBw1TD_3J,y?0&=̯QM;`9A/~IWtI-ZG'HKL,HSDb8*ҫ̿sAT5 FGrfmdqL^*iՒJ `ؚ9|÷BE^~p?܉Lws,(H/DZEӃo柟H[KT>#:'}S3DNigN &ʛf_T/a"Zbdrk'q a;&ח -ځ P?q3 pk };rX6XтX,#{JON,Gz''#vCd2OVz4k"^ 2|ƺG[7~81&/E/1^WӘk r.|fP14d<4RU#kY /??n2XjT/({PVnj-/5)8W35y$%:bDbM`W" ڈPp0O" uC=!g熛reb)r ŀ%@B㈦H O$ F'uLI3x?[lkm,"PB x8UqkRvRoz Q?3yqOt{"VP;*oH{E& %.ߑ+9:ȴE|ƣ'"Nr05<[HK3 5lrw.)˕"5Eaj6iN-oyDthQ'>^`f&àՑ#LE^w~" @'Cd z&#vܝ?[R1XDM-C}V,Wҿ=jKjfYLd[/ G'S^jVCӘy-r5/+YJsiؚAȏ-X: D`Z[T) l,*zA+27!1e°͵9CLظa ݡ`Pӝ;Zmep@cU%m'Vd!А9Tuyvڙ1i&iڈx)z  n.E8"kgSQăp\x¹Ku(G+|Ká$N٠(6PVHyK8DZV"ރ)Rij)9g,*P➽DO'(n,Wʎ劘&@jXrxյyu>vB,ݝw "˔;nj?ˉܰrfHY J}#9I0tOʚӡCn!sc,Yh_#h`-ZM5f]Ȫ7dٰ$E.-mC?GDMSE )R(⥀>6iCMf|MF w:E[ǐ0Ω(!%93uآ&ɖj'=N٠"qJ)-E<\fTDC/՜J‰g'Յ"X)LR=cY:sp-J5!: [ `g2uyHliL7L?OxmPٯSḑ &\g{ cyɿ5NL6f9&fYshr ͚R75c!m{Oy'wG:~-$1ޒJ$=,eqݩ*|NV:|s,fa+vA<oPA(lڤJv9![j1@M݁ڈx)z[fxD S3n*9u2= Ϛ3ٴځfWKQHuSo̢0jNgX"33sƍ;w ѳčQFT>CXY鑄_ؔltVyY#Ƈ42EMmDD)⩕F!⋜Xm+]MLO8Px 9ExB-BOB&<&^ӡGCׯ_^n]p[[{/O\w"uRv,Tl0_j'ܔiP=A] *ozx j[JhS=1VlS'0,T3mh"/$Fɻ9~2tTzoHAAA?ҥKwܹ}CVTIu։{Ug':+&U2hʛnn'Z;sE^؊~;Ssx)f:Y- B0qh""=P`av5'E8">A' g̘ѢE%JhJr&a7}BV5%dи`FzxN.ݾn5Yf,_,YaNk7#U(DK@ ]|7+5K¸ yN?8N1m6LA}M#kY"N"P#nР34xD١~!8a޹C ;tKII$uFK99[.:ߙa<=ɋx+;Af=_JPlE<$\E`\?Cpv\7m)}}}E/#~v!2d5q]w]x: SJHobRq3.p:n案Gۋ7o4h}@|ɒ%E{㯠3lP! 4~ܨѫj4r/6L6/+\iRsWdS{B$ ݲYli&DYmI73K8!WO:;͕+WϟߪU+R;;;8ϧ?qAǢ+2.vX򄐦A "|_VǼ@ӗiL ZpmDIE '՜Zs_p(U͘'QtmXD<=*טD(%I ӧpRJ1s/ bI!тM+46+S܄iX#7R1p@OP%["^^2pgKԃxe[0Ia~EXl)=LGp*Mk |kx.Pb-''7K*y|Ӵ_l %a\쐺P[Ҁ.oQ;ꖌiƵ $)jg++Mxitcgyrx(-ߩU)gJ4M1cƘ'3Kg{7m9(57L_ta<+1 3r_lQ(};9EV⋂a>i8^kP1"Nr$VqMq >F>'w[D xWF,rC IA 9J3½#\'_ nxSRRQi8ލ!oa{DEcqCY@C D>*{I<ۻ1 m7[&^x ]J df$ O8^|߆4E|$\rmqK.s3bhO̙ӹsg@vޭ[0a{EXܯHA0oÿFpxNWD|ffݻw_mxihXSʕ+ŋMFǎ㙸F#F0J)g˗lLOO3ӐE<>G111suttD-`O%,PoxMW"t<;wnܸqڵdh܋&aӧO?ٻwiox&eܣGggGrଃs@8lsR4d/ (Doӓ&Mݻ7W&yfPDE\oL?RxR?x2==iiiв7._ܧOSrMXkk3ge+V0JYw[i3x8' O4!{n͚53vf͚:u+E:>חC椈o߾U6<EnE:iΝ;)MKK+rL\ć_z)ge +rfdlVaKFH&Q:Om<Ђ-6i qmUL̍pYq+7,kZ}s EXa#W\<udRb}Xǟ{KKLX~ա(VDY0‚-W_%rW?tD=yyyO>rHjj={vڵS6mZ:uf͚% 433KHHi4ݕͽ{IW\SA7T/Xϴa׍9}s0L%ӽq|kcg3D'hy^XšK%$Oז:ejY@Ly*lJ(+&z)gԒX6H1)PNc7Vp,[/Osr+Gv>DTicJ%֎re -O)0E1Š-W_\ew.]p'^!ƍ>999i"U`` 7*ri╝ݜ9sҌI>{,t_Jz>Bڜ_a]s,#BԾK^6c(|fضuZ%A|^^իWU)'&!˜_j /թc~FwbWF]fqe5(Dğ ew߶*Դ%M^(AYɜ,H+_ƠXlyU7_CQ+ tk VlU*+xU uq8_MsM+W\t0ŋĥ9sNW^h6liӦ F#i騤k׮}};txהN}BȟD3m?*Vkl.}Iߩe B=Fn:xJ$N:w.Ru#6(Fi+07bh'_L1*r4)@|5XzWvˍPRxR(4_4z.6/aŖ+^ůR\h!ze~4~x9Z#Ff4=[ځvRCO=+\^UܨUК]~c;Z-8ANkNeLn/\oߜ IؔuN33cZF8<\Ȼk,.Xr6}N\"B{ڳ嶢RW'` ֵ v<\Uʂ#Wl< i{iz]0P*A`[l{J/A@.5&^9ƍ'QPbbbebbrBcGe @z-d #늖>׶KFc `xɩ~*3V?,Rj *G[eϫrVre۫UVj Vm. ^dq%BE 񨜑tGp; YwUc1fft֚]Fa艇u}DZ^~Zg @|7lذgϞaÆҝ A9į9FωK15o<uvXu3Wﲰ7"#AuM~ϷB J,a:z7|C]vto_58fL8(#XutkojR@< ۴׺Ɩ"""(tư WxWW*] Egtfz?RPuIkF@|,1m<˓wWW'NÆ={6<77; ?AI-:!$ tugױk"/ ^}$`nXvW#J4`S4fMQ,Pnu } UibS-4vY"R1PLM^Ã[&#AX]v&s9.w{2;05jsT=nѪMޞh(c WߩmZ=o[h޺XI' HtpA:5~Ux]%%fM<@ D \LNz&G~8(ۉk[lDQd7h2/ ?]LOHTy9Rǚ tm {?[ѧddVENy7gO?lrO,"g[mYudJ$^0l+x+J)Uyiryd[8#ayn|"@^A<[6oMfΣ]z͉t#Kywf,ЌkWf? 7}qPFnfjjݪ E؉sזǛ֓L#(%Y'GJ *_&JyN-_&"{hRVp[]Yq'@쯫d+8"_&J,OU-U5MNbQv;E9>ʱ _j8GV':n6c;XiX{,XӞuux>+i*>ۨ l Izu$6tBvuJVgIl"fVjXA(J-o@|ŻXԖ]315ؗ y:ϳ]3.t Pm:- _װ )sNV&+[ǮY%5,Y!l(#x U%O[VO7O'W:88XEg=i{^sݡ|* ^_)ZPvΛ7jxDܰgq}w mPduVeN-GЧtwAcm!~#\eZlꏉǕL'H!ֶ1qҀxp܆Mu,&n5iّVG'Ƭr õ?kv?l[>^l(+;)fɕ([36ɰά}dcELdȶXߺO V?U27 Ӆ.OާP ďYٱ_`=MzDk,ߪWMa7|f~z!;?cӳ΁`Z>!QV`EZ*~t !ZOY3cُ6=8'[y֯:tB:1`> P  W0օ>m6q:Gw׻X8i.mv6nEdQGPHS/7U

pUzE~zcaX4z.5K;8u"[uS>fq{ <$c=odvn8#6Yϛ a1kD{8M` ⠱`C'"U W> :9*+-z " kD w>qĥ" f݇tQSMqm9ްղڈ 6ktjlam|uS@<5:Z8zƬf-_E4!>>۹ ܈8T67/ؚcһ]g}om3X6T}0\=ӛsߎ qYG~N7 9}ل8hfC'a-MoB =1r#~ QUixMg8}0\-Nt]6}m۶$/]T%Mq#~ͬs_y{m P[ryM[ v}hRO_XXX|z=c~Hց6m}a?IHHؾ}={:t7n@԰LJǷ5qPtqo/Z8hfC6e*ngK6zƥzc4'/(( mwm c "]v۷/33ٳ/_V |ADt{|ҥx5J+V:԰8(n8)Gk#E4sӖrRٲs'Z"!Qs|s$-ա&MJ8(>}3ژ Aote$vhP`yS7]z@lZP4:9#nCwamlZ)k" AP So̴QSg O! 0 ! b: |qPf1?B40 ^mw\v_3+Χ<aXKY2 ݅q.qBU=`4⠙WŘ6jƈI_D4`մeG5Eդe AO!~66}^ :; 9{nzL'8JƈD#'⠙uwamzΈfU^C;hG)L20}{q:Ű6dž-1=ӠvC5mԴׄv\F@`=r&),oGlE43 ^~ҍ/ ewňN=uvCVs袵sB@`ϓtS[uYB}ŰH$38h`8Ԁ}HoދI0 ]qn3:usҶzx⠁-Zؽ0Pc~{Yb.^~-aYW78zڴG46 },2PU_v &McZY#u0Lnqr'⠿?g_1xq'?nnq|DŽ꛽} F.o8ŤBAcSa-=CAo U=Oے:/7b{oΎv6 zz ^q:Sa"Ur?oGjݓ92Ÿ`3MA{n!&0 }Vjq5Z\⠱uSa4>S*ޓ#z/odՊH̎bðNzw/ W {[kA8Tmx G3w]pCM^9q Fbq5tF!>ho'~I33Pߍ[؍=8D';Zw]0,zuā3wK8hc@ PqPӁT."l g\G01kDgsza{Z%6f0 \WrE*S_q8t% lmz @X5cV1NXu}«qT>Teݧ/w*[>\%uh Y5,aUGgk?W>{|]8-}\OTu|ROTodOqY{MZkfסהh} +5s'z>_7)s'rI _aNPyUVN֗/ri|NTWxWUoU|3aX~:*.2ONew11KsvT\X|NR;[U={fnn^XXPll, >edd"**_E@ 2D9rƹ ^^^jEZ֘1c_rttE*r膖pO A@70 D8PQQ% zϝ;ښ. AǏ߻w/TϞ=A()%%?JKKcƍ1 Hg Ⱦӵ7}@jܹAAA2 vsHׯ bA>+33NYr+>mܸriiuFF}@*''8*SnKѢ|RR AAW0蝜D*?SRREկ_F#Gr|A(???qo#% G,ݺu7vEGG"o!#WAAAXX]9t!AEEEuxkkdXPAQoٳgKx{{UD?"$++}S' v@3);; Aݻw/**~ HsΞ={x$ -((7^эA>::ZDyyyAxzxx7vݻw#;WsssLH)Ubccrҭ[7I jRs5wwwe1#@?ni4ӏ?H;%" դ򬭭:Ņ'={~c iQiii\\EtňyjLtvvv6xHb3x:G۷O/8Y@);;;((ښ^333t{{{⃃1 wsW 2Ntttttqqf m㨨(ڑ0 A>*LK,W v/+Cեlss1c`y5=[_IQKķ]Q"֘xHCsg"߻w{T*--]~{LL fW+_r%GOH9-}psI='료3S]\\@Noii@giQ^2AD{itŴCn_{Yҿ⡪h;e'%G(T3zqtt-`<U>̕KoeIzw3Y< Az.B&fCҿ⡪8;yC51cƘ`<Uߍe҂ WKK[[aҢ4t~<@8;9'GW(Tȸ8nݺEGGcLi>8Na}7=,w 3 ~̘11haa1rD[B&Pzs|t8-ðA(C7g _\و%Fɫ?׀}/29;G&$$! V2';`ؐ|.ϕv৿ y?faEða>{ǟӧOge ;ȑ# \SEذK'°!ND~;`g*>s??믿8{$///o  Havڹ ȴҮ[rAX ß&ʒr aL魤f*>XZ|/>|o?}]򕪴411yCmaj]'a d~% ?Η(> %hI}tDaaݻw ܹ}T+Ň +-ðu=/ Mug?ðᚎgz^|+Wܾ}_?otWIcƌƿ`!Hf WZa2zN  '{HZðP,ZotܹswyQii):www_1Q]0 疲N%/9;;8ڵk'} DK߸qGݺu===ccc1n2.]Z+H^i ( ^6S°LfʮÇ,,,;;Tg¤7vN d~->, ]6=}ðH,s\  ufii뛔m ߳3 2 ]8 la>"o5 @UTTiaaMlN} e6 nAQ@<"~ŅxCWiiΝ;mmmiR""ՎrH 3 2 ](]?!uMڷeA^U%>RV 3|QYċ{~ޖJ˘i+-c0l@w;i)3ZgESHUY*ݾ|/*׀xvϧ988῰P k[^i {g8 :)\.i+o˿M/㷹cv0\l^'7̙{N|ua#M^;O;QJ+oR ҮөF=plJm'\9}ݺuKHH(**BX ]){vWZa2NW2-ՙWMM8;li *Xm@*G۱N"j9K-=־ek@ٳgaaaNNNa1U%T=::Mze#;+-ðuoWV|02TJ%S v][UXCI]|oo͍077' Z~=PYSW6 d~-#Qv!=!\t{u'&8~z"֪-S)%5f]Ɗ9ːz6 TJ%۷߆N3iH'Tczx.=־ek@<ɓ''DEEw֍gϞsݹs'`zH]eNdz3 2 ]0I=/)рnm|;uzl,w/P;^įR??™[?NOФa#_MGs+*K;sRu{ԣҝ^ T0}dd1GHHHRR'T}׳3 2 ]nq@I4um|h D ⿞eޙ[޺۪It[0ՕN1};(^LL2inpnk@G=z(ݩLd///KKKbzOOOzcСHh[a2% /H,>Q `PĂ3Wi:mi3uek@<⽽-,,|||bcc3220ؕ1NzkvWZa2)矻65pĿ8ek@Y:k* [=ȳs])V3 Ҳ( l)]S4;+- #AmGlF Ytr ;v3ļg?~_ޙKmCsP~.vttֽ26S=pnAoNu:pk- 9.ЁKIwߊO)N8k'W8˖[SEѥ/ l:S(D-^V'~#=<aX+ğ:ujر&LQETEFFOo^RRffu6LG^fMMn999 1cpջ/Y$11v[;+- ;og6gӫk]K^HX>w3^(qDwĆu:n@R:?&.ѓ[ŭ9e/JT~ʇ2tk!L-ڵIǯ/y}mO)?HMlݶE}JSVMAT;Us.\̨d@T4_tŮ, ÓaXO'JONNnذu=zɓM6͞=V ^SZ~6yzzR9;vj훧za=?ήnݺ>>>O|W6"=a蕖Ea16~jQ]M(Źu',es/oj~[6n^&аvng;b1enҧM&b' ðH,s\6666l?$=zKDBǏϙ3SRR"""F|r~E.O`` ٲe Č;v„ ]'ORʼy5rr-%f͚AɶWu7o)++JӧOs,Y2fzsss/rtt ܸq#}tT-gW&u%s+Ore c%XJR^E-s6/9\QsLnn.l2*X6`3ZDP:l0n;ر*R]wU+++j FR@r\GEEѝK% Q>ᾗQ>7=PLrrr0O!޺$y ,=}N6%&k&PSI(کtW@hέB%s !>}mf etm۶V >|a'OðH,s\V?"555GM;v…rFZhLS.n„ iʚ!גy*BNJ*\fZZ6|k%mNdd$mQ%+b7^pƾ&KGWpr%X'r|0ހJ!\V]HLm!^1]%tczy"5^ ܏t'@ SRh4_:=o8[Xi0EbZ|jbb*`c:n;%R N \<+"ZY.իg?Q"Q2W ZA /kYfʶ00`ʕ6ӽ{)tw;99֭[^]\\(¢~ --MB-"諧fȈ֣2?KZ 13 vbW#*z7bؔB_"e#ʗu{.bkV!چprWCkzv`#j8C֩*y %PcN2낁*(!?aX?}pSp6񽽽=644V-h2\N%)(ʚ&ixɮfB<76͍XSBAGhmm-HׯoW={zdZeeLNqqq[n?9?]pز| ̔u~+RxqL:]+f~ 9bkʞ@FðH,s\rv:c׭[}&M9Bˏ="凣R|/5?Ѕ_]~L [O:uG5Z ^UC3tlj11999 +W!>4[Bqy(_aIũLn@?WK~~>UΘ,,,؊A_]JW!~u~t[pUdכϒz.% Sr):j_~ ,s\ue˖:sӦgh  Ԥ[.l׮]ܨ={}Cmܸ1w۰uV+++Zw۶mQ-f!>++lm۶z sHw@5艉A|@@1=7}GGG~n̏D#yyy˛f: WЅUeO]%MyV6ա-ɣ{[ޮ˞cZU9ku5`P[ƍ?p*.-}hl5vq-'j+1 iVSZ< {:j 5U-skXH!!!x7bRףV$l\H:Zh`jTiFQՀYC[2PvU*_q4T-n9 (n2N],x/S4zM[+tؾБ[^3AMhas>ޖ/}ٕ_7={dϊJR>LYM0Eue=ox]UPDC:ЦQ\bzӤVz6M=+2?ׂr!6d.tD'eRZ^m\7IwmB)_6 kg8[ق>*A+_.⎡~]6촗n[Hx ׀x5iӦ2r͝;cҩC>  VvLOA|p{n9tMic;Ouz;§ezxiэu])Rvk딾|RJݫYҎ9Z7yLկX5ځוk<PIZhJ2mJe6mmLUgй+Z^9U0kH (n%V-6RqZ6Tmw6]ܯcEoGP6uX\w .ޡ^f[s׀xT^^CttGa@@| =ԫVx[g6M:/+y#%~E :R۱-iI$EΞ۸ ,3_'&ͩrlTP*i6 o՜[~C/ԊN2suQf.v`knVʶT. ܲ*m[ SVlߣQ 'mnlT+|t㧘nx4Ss.a[l-'n@y&Q#W%֫#gKVWϤd@+}?`ʚe,l Be+iT>-zn׺TUk,35ȩ.JrܐJEu#m#W\@eʰ=Tne˗ͬcT)9®Մek@<A)@< Cѳd4Uf? ̞d~dM9 ,=N6D8J=X =l({H2VWLuhӛgy7FنՍTϐ{ӽ⛭m*jy U X̗ۣ"Y  @<  _pX']|cS_J-TT↨CuUg Qܖ6T÷#V-chvDr6x0 3ú07FV1HlºK9O!0 Vy+6v(~po#  @< a6  @< 4虛 ѓZ y_*i~ғh 6&X!0 ^W}3Ti] 5SƖ]iY3BډVPS}ׂ_;Uv@<Ax帤Jd dU"~BoIDmwܜ6' V$~ ÀxA:_I]61{sv9GwagS2ʕb!Φ 3_?خ n(xZ= Vl!VTLݟ\)e * &AB֌cӮs3/-ϐ@Y.:UZ;_ʱ4v<{GVWA$KJ+RjUZ*&8YOwfF0 dگ3wͦ_iaC cTlClKDnT> &eJ麲X`9Dxv*VZ4XDHJ8X7gUPĩ+[Q0]Y;ehn8TV{N?JT(nSּJk':&䫖PEX!:r(M6/W4lh 񊅨h\԰ؽ"m"2ݖs}A 5= |F;`t8`!s$ "%M K8 &]ZN-_lEe*)-++(rGĝr 橮 )b\" MOH۵U lh 񊅨noj ;  @< k]:XҺ){(Ws:cBldjOgg Izu$6Z(ZJيҕS vXEזK$(:nA<'ˬl`'G;W#BWi!e''Y u'TjhrU UxMOD56, H(ڵh1S'x0 !īG1l2., n`!>)ϯ:WAErneEŁ;bŒY& dkƱc Bzl4D=P]L@|MC|8)C19&%":$Jn$9X`EIPe#س{,Y+Wig)3݇䆕 !faUء$ioW@ְ'[s N%SfLJ#X/-ta͓{.G6\0Ί#m6:z}_ g[LR%ڦ,{/xZ\iC!B|:oKo4oZ͟[RiMd}Â=C `JKVxm#=9@J৒ە=5+j>MҩeE'+"X/W Ap8 i_NgC}Q3xܟ~QQק+fL* '!2Pb4g>60,g;EZ^P2)PRƢd\V ,iݔ=ܞ>j(,WAU;cgD<;TdXz)_[M~]J!^^#e?RQ`!~d6օD WmPHUWx \7 b]tz99uʃ aC1sylq^D`wv*Yl(#B縭0˕HT-gbw*ͮ|H `oNF(LSq1V򃿕^) ֫>+ZY!BAtu Q} x.%|=ARgki֝yntkK!ll8igH/DkzWtVvHAZgb\714IJV@P *^͔Ҥ?PX'dZ7b?VISۧ;|abcUU,fYg?S/e,.Xl4_WRxŘxZ1!%^X!Hzna'57u_ O۵ v<}s6H)άC׳1QKx&ܟ4f&llG;֒ӰSuēXWwظnt>+G&}T3MpS(6FYjB I|_X\(8hS!lP>}J[:KFB<  Rt[kl\͉\g 뉏,`hhG%O== |G2uZ+3}gl%K4bm{Ql?V^~zWj1U߂U,V5Ir*-L7`\<@oi.=2IzOoN_XL;$)8QV#ZyRčTUcd'tm&ԫ݅S!?^O-M"G/G)zOzd4SxkM~R?\%O%8֩ Adh0  @< Àx 0 ! 0  @< Àx 0 ! 0  @< Àx Mg͞ aX;CAH!DitN&x !spt i|p 9 )otCA!̇ғs\}`X&|ӛݙǛm@<AfNa[*Æm:φ#:{='x !} FsÆOOrn4 mRSSA$N/vGl\͉\g` `dYFQLo3ⳲΞ={u@<Aj G&IO:MЯ Æa:ZMgϠ|S'gwԾ} /dev/null ; then echo "No appropriate clang-format found." exit 1 fi FAIL=0 SOURCE_FILES=`find source include tests -type f \( -name '*.h' -o -name '*.c' \)` for i in $SOURCE_FILES do $CLANG_FORMAT -output-replacements-xml $i | grep -c " /dev/null if [ $? -ne 1 ] then echo "$i failed clang-format check." FAIL=1 fi done exit $FAIL aws-crt-python-0.20.4+dfsg/crt/aws-c-io/include/000077500000000000000000000000001456575232400212625ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-io/include/aws/000077500000000000000000000000001456575232400220545ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-io/include/aws/io/000077500000000000000000000000001456575232400224635ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-io/include/aws/io/async_stream.h000066400000000000000000000076311456575232400253330ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #ifndef AWS_IO_ASYNC_STREAM_H #define AWS_IO_ASYNC_STREAM_H /** * THIS IS AN EXPERIMENTAL AND UNSTABLE API * TODO: logging * TODO: modify API to return byte-bufs, instead of filling in the provided byte-buf? * this would avoid a copy in the use-cases we know of, but it's more complex * TODO: vtable acquire()/release()? * TODO: protect against simultaneous reads? * TODO: check results of vtable->read() (i.e. 0 byte reads not allowed)? * this would require 1 or 2 additional allocations per read */ #include #include AWS_PUSH_SANE_WARNING_LEVEL struct aws_async_input_stream; struct aws_byte_buf; struct aws_future_bool; struct aws_input_stream; struct aws_async_input_stream { const struct aws_async_input_stream_vtable *vtable; struct aws_allocator *alloc; struct aws_ref_count ref_count; void *impl; }; struct aws_async_input_stream_vtable { /** * Destroy the stream, its refcount has reached 0. */ void (*destroy)(struct aws_async_input_stream *stream); /** * Read once into the buffer. * Complete the read when at least 1 byte is read, the buffer is full, or EOF is reached. * Do not resize the buffer (do not use "aws_byte_buf_xyz_dynamic()" functions) * Do not assume that buffer len starts at 0. * You may assume that read() won't be called again until the current one completes. * You may assume that the buffer has some space available. * Return a future, which will contain an error code if something went wrong, * or a result bool indicating whether EOF has been reached. */ struct aws_future_bool *(*read)(struct aws_async_input_stream *stream, struct aws_byte_buf *dest); }; AWS_EXTERN_C_BEGIN /** * Initialize aws_async_input_stream "base class" */ AWS_IO_API void aws_async_input_stream_init_base( struct aws_async_input_stream *stream, struct aws_allocator *alloc, const struct aws_async_input_stream_vtable *vtable, void *impl); /** * Increment reference count. * You may pass in NULL (has no effect). * Returns whatever pointer was passed in. */ AWS_IO_API struct aws_async_input_stream *aws_async_input_stream_acquire(struct aws_async_input_stream *stream); /** * Decrement reference count. * You may pass in NULL (has no effect). * Always returns NULL. */ AWS_IO_API struct aws_async_input_stream *aws_async_input_stream_release(struct aws_async_input_stream *stream); /** * Read once from the async stream into the buffer. * The read completes when at least 1 byte is read, the buffer is full, or EOF is reached. * Depending on implementation, the read could complete at any time. * It may complete synchronously. It may complete on another thread. * Returns a future, which will contain an error code if something went wrong, * or a result bool indicating whether EOF has been reached. * * WARNING: The buffer must have space available. * WARNING: Do not read again until the previous read is complete. */ AWS_IO_API struct aws_future_bool *aws_async_input_stream_read(struct aws_async_input_stream *stream, struct aws_byte_buf *dest); /** * Read repeatedly from the async stream until the buffer is full, or EOF is reached. * Depending on implementation, this could complete at any time. * It may complete synchronously. It may complete on another thread. * Returns a future, which will contain an error code if something went wrong, * or a result bool indicating whether EOF has been reached. * * WARNING: The buffer must have space available. * WARNING: Do not read again until the previous read is complete. */ AWS_IO_API struct aws_future_bool *aws_async_input_stream_read_to_fill( struct aws_async_input_stream *stream, struct aws_byte_buf *dest); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_IO_ASYNC_STREAM_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-io/include/aws/io/channel.h000066400000000000000000000462731456575232400242600ustar00rootroot00000000000000#ifndef AWS_IO_CHANNEL_H #define AWS_IO_CHANNEL_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include AWS_PUSH_SANE_WARNING_LEVEL enum aws_channel_direction { AWS_CHANNEL_DIR_READ, AWS_CHANNEL_DIR_WRITE, }; struct aws_channel; struct aws_channel_slot; struct aws_channel_handler; struct aws_event_loop; struct aws_event_loop_local_object; typedef void(aws_channel_on_setup_completed_fn)(struct aws_channel *channel, int error_code, void *user_data); /* Callback called when a channel is completely shutdown. error_code refers to the reason the channel was closed. */ typedef void(aws_channel_on_shutdown_completed_fn)(struct aws_channel *channel, int error_code, void *user_data); struct aws_channel_slot { struct aws_allocator *alloc; struct aws_channel *channel; struct aws_channel_slot *adj_left; struct aws_channel_slot *adj_right; struct aws_channel_handler *handler; size_t window_size; size_t upstream_message_overhead; size_t current_window_update_batch_size; }; struct aws_channel_task; typedef void(aws_channel_task_fn)(struct aws_channel_task *channel_task, void *arg, enum aws_task_status status); struct aws_channel_task { struct aws_task wrapper_task; aws_channel_task_fn *task_fn; void *arg; const char *type_tag; struct aws_linked_list_node node; }; struct aws_channel_handler_vtable { /** * Called by the channel when a message is available for processing in the read direction. It is your * responsibility to call aws_mem_release(message->allocator, message); on message when you are finished with it. * * Also keep in mind that your slot's internal window has been decremented. You'll want to call * aws_channel_slot_increment_read_window() at some point in the future if you want to keep receiving data. */ int (*process_read_message)( struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_io_message *message); /** * Called by the channel when a message is available for processing in the write direction. It is your * responsibility to call aws_mem_release(message->allocator, message); on message when you are finished with it. */ int (*process_write_message)( struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_io_message *message); /** * Called by the channel when a downstream handler has issued a window increment. You'll want to update your * internal state and likely propagate a window increment message of your own by calling * 'aws_channel_slot_increment_read_window()' */ int (*increment_read_window)(struct aws_channel_handler *handler, struct aws_channel_slot *slot, size_t size); /** * The channel calls shutdown on all handlers twice, once to shut down reading, and once to shut down writing. * Shutdown always begins with the left-most handler, and proceeds to the right with dir set to * AWS_CHANNEL_DIR_READ. Then shutdown is called on handlers from right to left with dir set to * AWS_CHANNEL_DIR_WRITE. * * The shutdown process does not need to complete immediately and may rely on scheduled tasks. * The handler must call aws_channel_slot_on_handler_shutdown_complete() when it is finished, * which propagates shutdown to the next handler. If 'free_scarce_resources_immediately' is true, * then resources vulnerable to denial-of-service attacks (such as sockets and file handles) * must be closed immediately before the shutdown() call returns. */ int (*shutdown)( struct aws_channel_handler *handler, struct aws_channel_slot *slot, enum aws_channel_direction dir, int error_code, bool free_scarce_resources_immediately); /** * Called by the channel when the handler is added to a slot, to get the initial window size. */ size_t (*initial_window_size)(struct aws_channel_handler *handler); /** Called by the channel anytime a handler is added or removed, provides a hint for downstream * handlers to avoid message fragmentation due to message overhead. */ size_t (*message_overhead)(struct aws_channel_handler *handler); /** * Clean up any resources and deallocate yourself. The shutdown process will already be completed before this * function is called. */ void (*destroy)(struct aws_channel_handler *handler); /** * Directs the channel handler to reset all of the internal statistics it tracks about itself. */ void (*reset_statistics)(struct aws_channel_handler *handler); /** * Adds a pointer to the handler's internal statistics (if they exist) to a list of statistics structures * associated with the channel's handler chain. */ void (*gather_statistics)(struct aws_channel_handler *handler, struct aws_array_list *stats_list); /* * If this handler represents a source of data (like the socket_handler), then this will trigger a read * from the data source. */ void (*trigger_read)(struct aws_channel_handler *handler); }; struct aws_channel_handler { struct aws_channel_handler_vtable *vtable; struct aws_allocator *alloc; struct aws_channel_slot *slot; void *impl; }; /** * Args for creating a new channel. * event_loop to use for IO and tasks. on_setup_completed will be invoked when * the setup process is finished It will be executed in the event loop's thread. * on_shutdown_completed will be executed upon channel shutdown. * * enable_read_back_pressure toggles whether or not back pressure will be applied in the channel. * Leave this option off unless you're using something like reactive-streams, since it is a slight throughput * penalty. * * Unless otherwise * specified all functions for channels and channel slots must be executed within that channel's event-loop's thread. **/ struct aws_channel_options { struct aws_event_loop *event_loop; aws_channel_on_setup_completed_fn *on_setup_completed; aws_channel_on_shutdown_completed_fn *on_shutdown_completed; void *setup_user_data; void *shutdown_user_data; bool enable_read_back_pressure; }; AWS_EXTERN_C_BEGIN extern AWS_IO_API size_t g_aws_channel_max_fragment_size; /** * Initializes channel_task for use. */ AWS_IO_API void aws_channel_task_init( struct aws_channel_task *channel_task, aws_channel_task_fn *task_fn, void *arg, const char *type_tag); /** * Allocates new channel, Unless otherwise specified all functions for channels and channel slots must be executed * within that channel's event-loop's thread. channel_options are copied. */ AWS_IO_API struct aws_channel *aws_channel_new(struct aws_allocator *allocator, const struct aws_channel_options *creation_args); /** * Mark the channel, along with all slots and handlers, for destruction. * Must be called after shutdown has completed. * Can be called from any thread assuming 'aws_channel_shutdown()' has completed. * Note that memory will not be freed until all users which acquired holds on the channel via * aws_channel_acquire_hold(), release them via aws_channel_release_hold(). */ AWS_IO_API void aws_channel_destroy(struct aws_channel *channel); /** * Initiates shutdown of the channel. Shutdown will begin with the left-most slot. Each handler will invoke * 'aws_channel_slot_on_handler_shutdown_complete' once they've finished their shutdown process for the read direction. * Once the right-most slot has shutdown in the read direction, the process will start shutting down starting on the * right-most slot. Once the left-most slot has shutdown in the write direction, 'callbacks->shutdown_completed' will be * invoked in the event loop's thread. * * This function can be called from any thread. */ AWS_IO_API int aws_channel_shutdown(struct aws_channel *channel, int error_code); /** * Prevent a channel's memory from being freed. * Any number of users may acquire a hold to prevent a channel and its handlers from being unexpectedly freed. * Any user which acquires a hold must release it via aws_channel_release_hold(). * Memory will be freed once all holds are released and aws_channel_destroy() has been called. */ AWS_IO_API void aws_channel_acquire_hold(struct aws_channel *channel); /** * Release a hold on the channel's memory, allowing it to be freed. * This may be called before or after aws_channel_destroy(). */ AWS_IO_API void aws_channel_release_hold(struct aws_channel *channel); /** * Allocates and initializes a new slot for use with the channel. If this is the first slot in the channel, it will * automatically be added to the channel as the first slot. For all subsequent calls on a given channel, the slot will * need to be added to the channel via. the aws_channel_slot_insert_right(), aws_channel_slot_insert_end(), and * aws_channel_slot_insert_left() APIs. */ AWS_IO_API struct aws_channel_slot *aws_channel_slot_new(struct aws_channel *channel); /** * Fetches the event loop the channel is a part of. */ AWS_IO_API struct aws_event_loop *aws_channel_get_event_loop(struct aws_channel *channel); /** * Fetches the current timestamp from the event-loop's clock, in nanoseconds. */ AWS_IO_API int aws_channel_current_clock_time(struct aws_channel *channel, uint64_t *time_nanos); /** * Retrieves an object by key from the event loop's local storage. */ AWS_IO_API int aws_channel_fetch_local_object( struct aws_channel *channel, const void *key, struct aws_event_loop_local_object *obj); /** * Stores an object by key in the event loop's local storage. */ AWS_IO_API int aws_channel_put_local_object( struct aws_channel *channel, const void *key, const struct aws_event_loop_local_object *obj); /** * Removes an object by key from the event loop's local storage. */ AWS_IO_API int aws_channel_remove_local_object( struct aws_channel *channel, const void *key, struct aws_event_loop_local_object *removed_obj); /** * Acquires a message from the event loop's message pool. size_hint is merely a hint, it may be smaller than you * requested and you are responsible for checking the bounds of it. If the returned message is not large enough, you * must send multiple messages. */ AWS_IO_API struct aws_io_message *aws_channel_acquire_message_from_pool( struct aws_channel *channel, enum aws_io_message_type message_type, size_t size_hint); /** * Schedules a task to run on the event loop as soon as possible. * This is the ideal way to move a task into the correct thread. It's also handy for context switches. * This function is safe to call from any thread. * * If called from the channel's event loop, the task will get directly added to the run-now list. * If called from outside the channel's event loop, the task will go into a cross-thread task queue. * * If tasks must be serialized relative to some source synchronization, you may not want to use this API * because tasks submitted from the event loop thread can "jump ahead" of tasks submitted from external threads * due to this optimization. If this is a problem, you can either refactor your submission logic or use * the aws_channel_schedule_task_now_serialized variant which does not perform this optimization. * * The task should not be cleaned up or modified until its function is executed. */ AWS_IO_API void aws_channel_schedule_task_now(struct aws_channel *channel, struct aws_channel_task *task); /** * Schedules a task to run on the event loop as soon as possible. * * This variant always uses the cross thread queue rather than conditionally skipping it when already in * the destination event loop. While not "optimal", this allows us to serialize task execution no matter where * the task was submitted from: if you are submitting tasks from a critical section, the serialized order that you * submit is guaranteed to be the order that they execute on the event loop. * * The task should not be cleaned up or modified until its function is executed. */ AWS_IO_API void aws_channel_schedule_task_now_serialized(struct aws_channel *channel, struct aws_channel_task *task); /** * Schedules a task to run on the event loop at the specified time. * This is the ideal way to move a task into the correct thread. It's also handy for context switches. * Use aws_channel_current_clock_time() to get the current time in nanoseconds. * This function is safe to call from any thread. * * The task should not be cleaned up or modified until its function is executed. */ AWS_IO_API void aws_channel_schedule_task_future( struct aws_channel *channel, struct aws_channel_task *task, uint64_t run_at_nanos); /** * Instrument a channel with a statistics handler. While instrumented with a statistics handler, the channel * will periodically report per-channel-handler-specific statistics about handler performance and state. * * Assigning a statistics handler to a channel is a transfer of ownership -- the channel will clean up * the handler appropriately. Statistics handlers may be changed dynamically (for example, the upgrade * from a vanilla http channel to a websocket channel), but this function may only be called from the * event loop thread that the channel is a part of. * * The first possible hook to set a statistics handler is the channel's creation callback. */ AWS_IO_API int aws_channel_set_statistics_handler(struct aws_channel *channel, struct aws_crt_statistics_handler *handler); /** * Returns true if the caller is on the event loop's thread. If false, you likely need to use * aws_channel_schedule_task(). This function is safe to call from any thread. */ AWS_IO_API bool aws_channel_thread_is_callers_thread(struct aws_channel *channel); /** * Sets the handler for a slot, the slot will also call get_current_window_size() and propagate a window update * upstream. */ AWS_IO_API int aws_channel_slot_set_handler(struct aws_channel_slot *slot, struct aws_channel_handler *handler); /** * Removes slot from the channel and deallocates the slot and its handler. */ AWS_IO_API int aws_channel_slot_remove(struct aws_channel_slot *slot); /** * Replaces remove with new_slot. Deallocates remove and its handler. */ AWS_IO_API int aws_channel_slot_replace(struct aws_channel_slot *remove, struct aws_channel_slot *new_slot); /** * inserts 'to_add' to the position immediately to the right of slot. Note that the first call to * aws_channel_slot_new() adds it to the channel implicitly. */ AWS_IO_API int aws_channel_slot_insert_right(struct aws_channel_slot *slot, struct aws_channel_slot *to_add); /** * Inserts to 'to_add' the end of the channel. Note that the first call to * aws_channel_slot_new() adds it to the channel implicitly. */ AWS_IO_API int aws_channel_slot_insert_end(struct aws_channel *channel, struct aws_channel_slot *to_add); /** * inserts 'to_add' to the position immediately to the left of slot. Note that the first call to * aws_channel_slot_new() adds it to the channel implicitly. */ AWS_IO_API int aws_channel_slot_insert_left(struct aws_channel_slot *slot, struct aws_channel_slot *to_add); /** * Sends a message to the adjacent slot in the channel based on dir. Also does window size checking. * * NOTE: if this function returns an error code, it is the caller's responsibility to release message * back to the pool. If this function returns AWS_OP_SUCCESS, the recipient of the message has taken * ownership of the message. So, for example, don't release a message to the pool and then return an error. * If you encounter an error condition in this case, shutdown the channel with the appropriate error code. */ AWS_IO_API int aws_channel_slot_send_message( struct aws_channel_slot *slot, struct aws_io_message *message, enum aws_channel_direction dir); /** * Convenience function that invokes aws_channel_acquire_message_from_pool(), * asking for the largest reasonable DATA message that can be sent in the write direction, * with upstream overhead accounted for. */ AWS_IO_API struct aws_io_message *aws_channel_slot_acquire_max_message_for_write(struct aws_channel_slot *slot); /** * Issues a window update notification upstream (to the left.) */ AWS_IO_API int aws_channel_slot_increment_read_window(struct aws_channel_slot *slot, size_t window); /** * Called by handlers once they have finished their shutdown in the 'dir' direction. Propagates the shutdown process * to the next handler in the channel. */ AWS_IO_API int aws_channel_slot_on_handler_shutdown_complete( struct aws_channel_slot *slot, enum aws_channel_direction dir, int err_code, bool free_scarce_resources_immediately); /** * Initiates shutdown on slot. callbacks->on_shutdown_completed will be called * once the shutdown process is completed. */ AWS_IO_API int aws_channel_slot_shutdown( struct aws_channel_slot *slot, enum aws_channel_direction dir, int err_code, bool free_scarce_resources_immediately); /** * Fetches the downstream read window. This gives you the information necessary to honor the read window. If you call * send_message() and it exceeds this window, the message will be rejected. */ AWS_IO_API size_t aws_channel_slot_downstream_read_window(struct aws_channel_slot *slot); /** Fetches the current overhead of upstream handlers. This provides a hint to avoid fragmentation if you care. */ AWS_IO_API size_t aws_channel_slot_upstream_message_overhead(struct aws_channel_slot *slot); /** * Calls destroy on handler's vtable */ AWS_IO_API void aws_channel_handler_destroy(struct aws_channel_handler *handler); /** * Calls process_read_message on handler's vtable */ AWS_IO_API int aws_channel_handler_process_read_message( struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_io_message *message); /** * Calls process_write_message on handler's vtable. */ AWS_IO_API int aws_channel_handler_process_write_message( struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_io_message *message); /** * Calls on_window_update on handler's vtable. */ AWS_IO_API int aws_channel_handler_increment_read_window( struct aws_channel_handler *handler, struct aws_channel_slot *slot, size_t size); /** * calls shutdown_direction on handler's vtable. */ AWS_IO_API int aws_channel_handler_shutdown( struct aws_channel_handler *handler, struct aws_channel_slot *slot, enum aws_channel_direction dir, int error_code, bool free_scarce_resources_immediately); /** * Calls initial_window_size on handler's vtable. */ AWS_IO_API size_t aws_channel_handler_initial_window_size(struct aws_channel_handler *handler); AWS_IO_API struct aws_channel_slot *aws_channel_get_first_slot(struct aws_channel *channel); /** * A way for external processes to force a read by the data-source channel handler. Necessary in certain cases, like * when a server channel finishes setting up its initial handlers, a read may have already been triggered on the * socket (the client's CLIENT_HELLO tls payload, for example) and absent further data/notifications, this data * would never get processed. */ AWS_IO_API int aws_channel_trigger_read(struct aws_channel *channel); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_IO_CHANNEL_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-io/include/aws/io/channel_bootstrap.h000066400000000000000000000323531456575232400263470ustar00rootroot00000000000000#ifndef AWS_IO_CHANNEL_BOOTSTRAP_H #define AWS_IO_CHANNEL_BOOTSTRAP_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include AWS_PUSH_SANE_WARNING_LEVEL struct aws_client_bootstrap; struct aws_socket; struct aws_socket_options; struct aws_socket_endpoint; /** * Generic event function for channel lifecycle events. * * Callbacks are provided for: * (1) Channel creation * (2) Channel setup - If TLS is being used, this function is called once the socket has connected, the channel has * been initialized, and TLS has been successfully negotiated. A TLS handler has already been added to the channel. If * TLS negotiation fails, this function will be called with the corresponding error code. If TLS is not being used, this * function is called once the socket has connected and the channel has been initialized. * (3) Channel shutdown * * These callbacks are always invoked within the thread of the event-loop that the channel is assigned to. * * This function does NOT always imply "success" -- if error_code is AWS_OP_SUCCESS then everything was successful, * otherwise an error condition occurred. */ typedef void(aws_client_bootstrap_on_channel_event_fn)( struct aws_client_bootstrap *bootstrap, int error_code, struct aws_channel *channel, void *user_data); /** * If ALPN is being used this function will be invoked by the channel once an ALPN message is received. The returned * channel_handler will be added to, and managed by, the channel. */ typedef struct aws_channel_handler *(aws_channel_on_protocol_negotiated_fn)( struct aws_channel_slot *new_slot, struct aws_byte_buf *protocol, void *user_data); struct aws_tls_connection_options; struct aws_event_loop_group; /** * Called after client bootstrap has been completely cleaned up, after its last refcount is released. */ typedef void aws_client_bootstrap_shutdown_complete_fn(void *user_data); /** * aws_client_bootstrap handles creation and setup of channels that communicate via socket with a specific endpoint. */ struct aws_client_bootstrap { struct aws_allocator *allocator; struct aws_event_loop_group *event_loop_group; struct aws_host_resolver *host_resolver; struct aws_host_resolution_config host_resolver_config; aws_channel_on_protocol_negotiated_fn *on_protocol_negotiated; struct aws_ref_count ref_count; aws_client_bootstrap_shutdown_complete_fn *on_shutdown_complete; void *user_data; }; /** * aws_client_bootstrap creation options. */ struct aws_client_bootstrap_options { /* Required. Must outlive the client bootstrap. */ struct aws_event_loop_group *event_loop_group; /* Required. Must outlive the client bootstrap. */ struct aws_host_resolver *host_resolver; /* Optional. If none is provided then default settings are used. * This object is deep-copied by bootstrap. * */ const struct aws_host_resolution_config *host_resolution_config; /* Optional. If provided, callback is invoked when client bootstrap has completely shut down. */ aws_client_bootstrap_shutdown_complete_fn *on_shutdown_complete; /* Optional. Passed to callbacks */ void *user_data; }; struct aws_server_bootstrap; /** * If TLS is being used, this function is called once the socket has received an incoming connection, the channel has * been initialized, and TLS has been successfully negotiated. A TLS handler has already been added to the channel. If * TLS negotiation fails, this function will be called with the corresponding error code. * * If TLS is not being used, this function is called once the socket has received an incoming connection and the channel * has been initialized. * * This function is always called within the thread of the event-loop that the new channel is assigned to upon success. * * On failure, the channel might not be assigned to an event loop yet, and will thus be invoked on the listener's * event-loop thread. * * This function does NOT mean "success", if error_code is AWS_OP_SUCCESS then everything was successful, otherwise an * error condition occurred. * * If an error occurred, you do not need to shutdown the channel. The `aws_channel_client_shutdown_callback` will be * invoked once the channel has finished shutting down. */ typedef void(aws_server_bootstrap_on_accept_channel_setup_fn)( struct aws_server_bootstrap *bootstrap, int error_code, struct aws_channel *channel, void *user_data); /** * Once the channel shuts down, this function will be invoked within the thread of * the event-loop that the channel is assigned to. * * Note: this function is only invoked if the channel was successfully setup, * e.g. aws_server_bootstrap_on_accept_channel_setup_fn() was invoked without an error code. */ typedef void(aws_server_bootstrap_on_accept_channel_shutdown_fn)( struct aws_server_bootstrap *bootstrap, int error_code, struct aws_channel *channel, void *user_data); /** * Once the server listener socket is finished destroying, and all the existing connections are closed, this fuction * will be invoked. */ typedef void( aws_server_bootstrap_on_server_listener_destroy_fn)(struct aws_server_bootstrap *bootstrap, void *user_data); /** * aws_server_bootstrap manages listening sockets, creating and setting up channels to handle each incoming connection. */ struct aws_server_bootstrap { struct aws_allocator *allocator; struct aws_event_loop_group *event_loop_group; aws_channel_on_protocol_negotiated_fn *on_protocol_negotiated; struct aws_ref_count ref_count; }; /** * Socket-based channel creation options. * * bootstrap - configs name resolution and which event loop group the connection will be seated into * host_name - host to connect to; if a dns address, will be resolved prior to connecting * port - port to connect to * socket_options - socket properties, including type (tcp vs. udp vs. unix domain) and connect timeout. TLS * connections are currently restricted to tcp (AWS_SOCKET_STREAM) only. * tls_options - (optional) tls context to apply after connection establishment. If NULL, the connection will * not be protected by TLS. * creation_callback - (optional) callback invoked when the channel is first created. This is always right after * the connection was successfully established. *Does NOT* get called if the initial connect failed. * setup_callback - callback invoked once the channel is ready for use and TLS has been negotiated or if an error * is encountered * shutdown_callback - callback invoked once the channel has shutdown. * enable_read_back_pressure - controls whether or not back pressure will be applied in the channel * user_data - arbitrary data to pass back to the various callbacks * requested_event_loop - if set, the connection will be placed on the requested event loop rather than one * chosen internally from the bootstrap's associated event loop group. It is an error to pass in an event loop * that is not associated with the bootstrap's event loop group. * * Immediately after the `shutdown_callback` returns, the channel is cleaned up automatically. All callbacks are invoked * in the thread of the event-loop that the new channel is assigned to. * */ struct aws_socket_channel_bootstrap_options { struct aws_client_bootstrap *bootstrap; const char *host_name; uint32_t port; const struct aws_socket_options *socket_options; const struct aws_tls_connection_options *tls_options; aws_client_bootstrap_on_channel_event_fn *creation_callback; aws_client_bootstrap_on_channel_event_fn *setup_callback; aws_client_bootstrap_on_channel_event_fn *shutdown_callback; bool enable_read_back_pressure; void *user_data; struct aws_event_loop *requested_event_loop; const struct aws_host_resolution_config *host_resolution_override_config; }; /** * Arguments to setup a server socket listener which will also negotiate and configure TLS. * This creates a socket listener bound to `host` and 'port' using socket options `options`, and TLS options * `tls_options`. `incoming_callback` will be invoked once an incoming channel is ready for use and TLS is * finished negotiating, or if an error is encountered. `shutdown_callback` will be invoked once the channel has * shutdown. `destroy_callback` will be invoked after the server socket listener is destroyed, and all associated * connections and channels have finished shutting down. Immediately after the `shutdown_callback` returns, the channel * is cleaned up automatically. All callbacks are invoked in the thread of the event-loop that listener is assigned to. * * Upon shutdown of your application, you'll want to call `aws_server_bootstrap_destroy_socket_listener` with the return * value from this function. * * The socket type in `options` must be AWS_SOCKET_STREAM if tls_options is set. * DTLS is not currently supported for tls. */ struct aws_server_socket_channel_bootstrap_options { struct aws_server_bootstrap *bootstrap; const char *host_name; uint32_t port; const struct aws_socket_options *socket_options; const struct aws_tls_connection_options *tls_options; aws_server_bootstrap_on_accept_channel_setup_fn *incoming_callback; aws_server_bootstrap_on_accept_channel_shutdown_fn *shutdown_callback; aws_server_bootstrap_on_server_listener_destroy_fn *destroy_callback; bool enable_read_back_pressure; void *user_data; }; AWS_EXTERN_C_BEGIN /** * Create the client bootstrap. */ AWS_IO_API struct aws_client_bootstrap *aws_client_bootstrap_new( struct aws_allocator *allocator, const struct aws_client_bootstrap_options *options); /** * Increments a client bootstrap's ref count, allowing the caller to take a reference to it. * * Returns the same client bootstrap passed in. */ AWS_IO_API struct aws_client_bootstrap *aws_client_bootstrap_acquire(struct aws_client_bootstrap *bootstrap); /** * Decrements a client bootstrap's ref count. When the ref count drops to zero, the bootstrap will be destroyed. */ AWS_IO_API void aws_client_bootstrap_release(struct aws_client_bootstrap *bootstrap); /** * When using TLS, if ALPN is used, this callback will be invoked from the channel. The returned handler will be added * to the channel. */ AWS_IO_API int aws_client_bootstrap_set_alpn_callback( struct aws_client_bootstrap *bootstrap, aws_channel_on_protocol_negotiated_fn *on_protocol_negotiated); /** * Sets up a client socket channel. */ AWS_IO_API int aws_client_bootstrap_new_socket_channel(struct aws_socket_channel_bootstrap_options *options); /** * Initializes the server bootstrap with `allocator` and `el_group`. This object manages listeners, server connections, * and channels. */ AWS_IO_API struct aws_server_bootstrap *aws_server_bootstrap_new( struct aws_allocator *allocator, struct aws_event_loop_group *el_group); /** * Increments a server bootstrap's ref count, allowing the caller to take a reference to it. * * Returns the same server bootstrap passed in. */ AWS_IO_API struct aws_server_bootstrap *aws_server_bootstrap_acquire(struct aws_server_bootstrap *bootstrap); /** * Decrements a server bootstrap's ref count. When the ref count drops to zero, the bootstrap will be destroyed. */ AWS_IO_API void aws_server_bootstrap_release(struct aws_server_bootstrap *bootstrap); /** * When using TLS, if ALPN is used, this callback will be invoked from the channel. The returned handler will be added * to the channel. */ AWS_IO_API int aws_server_bootstrap_set_alpn_callback( struct aws_server_bootstrap *bootstrap, aws_channel_on_protocol_negotiated_fn *on_protocol_negotiated); /** * Sets up a server socket listener. If you are planning on using TLS, use * `aws_server_bootstrap_new_tls_socket_listener` instead. This creates a socket listener bound to `local_endpoint` * using socket options `options`. `incoming_callback` will be invoked once an incoming channel is ready for use or if * an error is encountered. `shutdown_callback` will be invoked once the channel has shutdown. `destroy_callback` will * be invoked after the server socket listener is destroyed, and all associated connections and channels have finished * shutting down. Immediately after the `shutdown_callback` returns, the channel is cleaned up automatically. All * callbacks are invoked the thread of the event-loop that the listening socket is assigned to * * Upon shutdown of your application, you'll want to call `aws_server_bootstrap_destroy_socket_listener` with the return * value from this function. * * bootstrap_options is copied. */ AWS_IO_API struct aws_socket *aws_server_bootstrap_new_socket_listener( const struct aws_server_socket_channel_bootstrap_options *bootstrap_options); /** * Shuts down 'listener' and cleans up any resources associated with it. Any incoming channels on `listener` will still * be active. `destroy_callback` will be invoked after the server socket listener is destroyed, and all associated * connections and channels have finished shutting down. */ AWS_IO_API void aws_server_bootstrap_destroy_socket_listener( struct aws_server_bootstrap *bootstrap, struct aws_socket *listener); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_IO_CHANNEL_BOOTSTRAP_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-io/include/aws/io/event_loop.h000066400000000000000000000445531456575232400250210ustar00rootroot00000000000000#ifndef AWS_IO_EVENT_LOOP_H #define AWS_IO_EVENT_LOOP_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include AWS_PUSH_SANE_WARNING_LEVEL enum aws_io_event_type { AWS_IO_EVENT_TYPE_READABLE = 1, AWS_IO_EVENT_TYPE_WRITABLE = 2, AWS_IO_EVENT_TYPE_REMOTE_HANG_UP = 4, AWS_IO_EVENT_TYPE_CLOSED = 8, AWS_IO_EVENT_TYPE_ERROR = 16, }; struct aws_event_loop; struct aws_task; struct aws_thread_options; #if AWS_USE_IO_COMPLETION_PORTS struct aws_overlapped; typedef void(aws_event_loop_on_completion_fn)( struct aws_event_loop *event_loop, struct aws_overlapped *overlapped, int status_code, size_t num_bytes_transferred); /** * The aws_win32_OVERLAPPED struct is layout-compatible with OVERLAPPED as defined in . It is used * here to avoid pulling in a dependency on which would also bring along a lot of bad macros, such * as redefinitions of GetMessage and GetObject. Note that the OVERLAPPED struct layout in the Windows SDK can * never be altered without breaking binary compatibility for every existing third-party executable, so there * is no need to worry about keeping this definition in sync. */ struct aws_win32_OVERLAPPED { uintptr_t Internal; uintptr_t InternalHigh; union { struct { uint32_t Offset; uint32_t OffsetHigh; } s; void *Pointer; } u; void *hEvent; }; /** * Use aws_overlapped when a handle connected to the event loop needs an OVERLAPPED struct. * OVERLAPPED structs are needed to make OS-level async I/O calls. * When the I/O completes, the assigned aws_event_loop_on_completion_fn is called from the event_loop's thread. * While the I/O is pending, it is not safe to modify or delete aws_overlapped. * Call aws_overlapped_init() before first use. If the aws_overlapped will be used multiple times, call * aws_overlapped_reset() or aws_overlapped_init() between uses. */ struct aws_overlapped { struct aws_win32_OVERLAPPED overlapped; aws_event_loop_on_completion_fn *on_completion; void *user_data; }; #else /* !AWS_USE_IO_COMPLETION_PORTS */ typedef void(aws_event_loop_on_event_fn)( struct aws_event_loop *event_loop, struct aws_io_handle *handle, int events, void *user_data); #endif /* AWS_USE_IO_COMPLETION_PORTS */ struct aws_event_loop_vtable { void (*destroy)(struct aws_event_loop *event_loop); int (*run)(struct aws_event_loop *event_loop); int (*stop)(struct aws_event_loop *event_loop); int (*wait_for_stop_completion)(struct aws_event_loop *event_loop); void (*schedule_task_now)(struct aws_event_loop *event_loop, struct aws_task *task); void (*schedule_task_future)(struct aws_event_loop *event_loop, struct aws_task *task, uint64_t run_at_nanos); void (*cancel_task)(struct aws_event_loop *event_loop, struct aws_task *task); #if AWS_USE_IO_COMPLETION_PORTS int (*connect_to_io_completion_port)(struct aws_event_loop *event_loop, struct aws_io_handle *handle); #else int (*subscribe_to_io_events)( struct aws_event_loop *event_loop, struct aws_io_handle *handle, int events, aws_event_loop_on_event_fn *on_event, void *user_data); #endif int (*unsubscribe_from_io_events)(struct aws_event_loop *event_loop, struct aws_io_handle *handle); void (*free_io_event_resources)(void *user_data); bool (*is_on_callers_thread)(struct aws_event_loop *event_loop); }; struct aws_event_loop { struct aws_event_loop_vtable *vtable; struct aws_allocator *alloc; aws_io_clock_fn *clock; struct aws_hash_table local_data; struct aws_atomic_var current_load_factor; uint64_t latest_tick_start; size_t current_tick_latency_sum; struct aws_atomic_var next_flush_time; void *impl_data; }; struct aws_event_loop_local_object; typedef void(aws_event_loop_on_local_object_removed_fn)(struct aws_event_loop_local_object *); struct aws_event_loop_local_object { const void *key; void *object; aws_event_loop_on_local_object_removed_fn *on_object_removed; }; struct aws_event_loop_options { aws_io_clock_fn *clock; struct aws_thread_options *thread_options; }; typedef struct aws_event_loop *(aws_new_event_loop_fn)( struct aws_allocator *alloc, const struct aws_event_loop_options *options, void *new_loop_user_data); struct aws_event_loop_group { struct aws_allocator *allocator; struct aws_array_list event_loops; struct aws_ref_count ref_count; struct aws_shutdown_callback_options shutdown_options; }; AWS_EXTERN_C_BEGIN #ifdef AWS_USE_IO_COMPLETION_PORTS /** * Prepares aws_overlapped for use, and sets a function to call when the overlapped operation completes. */ AWS_IO_API void aws_overlapped_init( struct aws_overlapped *overlapped, aws_event_loop_on_completion_fn *on_completion, void *user_data); /** * Prepares aws_overlapped for re-use without changing the assigned aws_event_loop_on_completion_fn. * Call aws_overlapped_init(), instead of aws_overlapped_reset(), to change the aws_event_loop_on_completion_fn. */ AWS_IO_API void aws_overlapped_reset(struct aws_overlapped *overlapped); /** * Casts an aws_overlapped pointer for use as a LPOVERLAPPED parameter to Windows API functions */ AWS_IO_API struct _OVERLAPPED *aws_overlapped_to_windows_overlapped(struct aws_overlapped *overlapped); #endif /* AWS_USE_IO_COMPLETION_PORTS */ /** * Creates an instance of the default event loop implementation for the current architecture and operating system. */ AWS_IO_API struct aws_event_loop *aws_event_loop_new_default(struct aws_allocator *alloc, aws_io_clock_fn *clock); /** * Creates an instance of the default event loop implementation for the current architecture and operating system using * extendable options. */ AWS_IO_API struct aws_event_loop *aws_event_loop_new_default_with_options( struct aws_allocator *alloc, const struct aws_event_loop_options *options); /** * Invokes the destroy() fn for the event loop implementation. * If the event loop is still in a running state, this function will block waiting on the event loop to shutdown. * If you do not want this function to block, call aws_event_loop_stop() manually first. * If the event loop is shared by multiple threads then destroy must be called by exactly one thread. All other threads * must ensure their API calls to the event loop happen-before the call to destroy. */ AWS_IO_API void aws_event_loop_destroy(struct aws_event_loop *event_loop); /** * Initializes common event-loop data structures. * This is only called from the *new() function of event loop implementations. */ AWS_IO_API int aws_event_loop_init_base(struct aws_event_loop *event_loop, struct aws_allocator *alloc, aws_io_clock_fn *clock); /** * Common cleanup code for all implementations. * This is only called from the *destroy() function of event loop implementations. */ AWS_IO_API void aws_event_loop_clean_up_base(struct aws_event_loop *event_loop); /** * Fetches an object from the event-loop's data store. Key will be taken as the memory address of the memory pointed to * by key. This function is not thread safe and should be called inside the event-loop's thread. */ AWS_IO_API int aws_event_loop_fetch_local_object( struct aws_event_loop *event_loop, void *key, struct aws_event_loop_local_object *obj); /** * Puts an item object the event-loop's data store. Key will be taken as the memory address of the memory pointed to by * key. The lifetime of item must live until remove or a put item overrides it. This function is not thread safe and * should be called inside the event-loop's thread. */ AWS_IO_API int aws_event_loop_put_local_object(struct aws_event_loop *event_loop, struct aws_event_loop_local_object *obj); /** * Removes an object from the event-loop's data store. Key will be taken as the memory address of the memory pointed to * by key. If removed_item is not null, the removed item will be moved to it if it exists. Otherwise, the default * deallocation strategy will be used. This function is not thread safe and should be called inside the event-loop's * thread. */ AWS_IO_API int aws_event_loop_remove_local_object( struct aws_event_loop *event_loop, void *key, struct aws_event_loop_local_object *removed_obj); /** * Triggers the running of the event loop. This function must not block. The event loop is not active until this * function is invoked. This function can be called again on an event loop after calling aws_event_loop_stop() and * aws_event_loop_wait_for_stop_completion(). */ AWS_IO_API int aws_event_loop_run(struct aws_event_loop *event_loop); /** * Triggers the event loop to stop, but does not wait for the loop to stop completely. * This function may be called from outside or inside the event loop thread. It is safe to call multiple times. * This function is called from destroy(). * * If you do not call destroy(), an event loop can be run again by calling stop(), wait_for_stop_completion(), run(). */ AWS_IO_API int aws_event_loop_stop(struct aws_event_loop *event_loop); /** * For event-loop implementations to use for providing metrics info to the base event-loop. This enables the * event-loop load balancer to take into account load when vending another event-loop to a caller. * * Call this function at the beginning of your event-loop tick: after wake-up, but before processing any IO or tasks. */ AWS_IO_API void aws_event_loop_register_tick_start(struct aws_event_loop *event_loop); /** * For event-loop implementations to use for providing metrics info to the base event-loop. This enables the * event-loop load balancer to take into account load when vending another event-loop to a caller. * * Call this function at the end of your event-loop tick: after processing IO and tasks. */ AWS_IO_API void aws_event_loop_register_tick_end(struct aws_event_loop *event_loop); /** * Returns the current load factor (however that may be calculated). If the event-loop is not invoking * aws_event_loop_register_tick_start() and aws_event_loop_register_tick_end(), this value will always be 0. */ AWS_IO_API size_t aws_event_loop_get_load_factor(struct aws_event_loop *event_loop); /** * Blocks until the event loop stops completely. * If you want to call aws_event_loop_run() again, you must call this after aws_event_loop_stop(). * It is not safe to call this function from inside the event loop thread. */ AWS_IO_API int aws_event_loop_wait_for_stop_completion(struct aws_event_loop *event_loop); /** * The event loop will schedule the task and run it on the event loop thread as soon as possible. * Note that cancelled tasks may execute outside the event loop thread. * This function may be called from outside or inside the event loop thread. * * The task should not be cleaned up or modified until its function is executed. */ AWS_IO_API void aws_event_loop_schedule_task_now(struct aws_event_loop *event_loop, struct aws_task *task); /** * The event loop will schedule the task and run it at the specified time. * Use aws_event_loop_current_clock_time() to query the current time in nanoseconds. * Note that cancelled tasks may execute outside the event loop thread. * This function may be called from outside or inside the event loop thread. * * The task should not be cleaned up or modified until its function is executed. */ AWS_IO_API void aws_event_loop_schedule_task_future( struct aws_event_loop *event_loop, struct aws_task *task, uint64_t run_at_nanos); /** * Cancels task. * This function must be called from the event loop's thread, and is only guaranteed * to work properly on tasks scheduled from within the event loop's thread. * The task will be executed with the AWS_TASK_STATUS_CANCELED status inside this call. */ AWS_IO_API void aws_event_loop_cancel_task(struct aws_event_loop *event_loop, struct aws_task *task); #if AWS_USE_IO_COMPLETION_PORTS /** * Associates an aws_io_handle with the event loop's I/O Completion Port. * * The handle must use aws_overlapped for all async operations requiring an OVERLAPPED struct. * When the operation completes, the aws_overlapped's completion function will run on the event loop thread. * Note that completion functions will not be invoked while the event loop is stopped. Users should wait for all async * operations on connected handles to complete before cleaning up or destroying the event loop. * * A handle may only be connected to one event loop in its lifetime. */ AWS_IO_API int aws_event_loop_connect_handle_to_io_completion_port( struct aws_event_loop *event_loop, struct aws_io_handle *handle); #else /* !AWS_USE_IO_COMPLETION_PORTS */ /** * Subscribes on_event to events on the event-loop for handle. events is a bitwise concatenation of the events that were * received. The definition for these values can be found in aws_io_event_type. Currently, only * AWS_IO_EVENT_TYPE_READABLE and AWS_IO_EVENT_TYPE_WRITABLE are honored. You always are registered for error conditions * and closure. This function may be called from outside or inside the event loop thread. However, the unsubscribe * function must be called inside the event-loop's thread. */ AWS_IO_API int aws_event_loop_subscribe_to_io_events( struct aws_event_loop *event_loop, struct aws_io_handle *handle, int events, aws_event_loop_on_event_fn *on_event, void *user_data); #endif /* AWS_USE_IO_COMPLETION_PORTS */ /** * Unsubscribes handle from event-loop notifications. * This function is not thread safe and should be called inside the event-loop's thread. * * NOTE: if you are using io completion ports, this is a risky call. We use it in places, but only when we're certain * there's no pending events. If you want to use it, it's your job to make sure you don't have pending events before * calling it. */ AWS_IO_API int aws_event_loop_unsubscribe_from_io_events(struct aws_event_loop *event_loop, struct aws_io_handle *handle); /** * Cleans up resources (user_data) associated with the I/O eventing subsystem for a given handle. This should only * ever be necessary in the case where you are cleaning up an event loop during shutdown and its thread has already * been joined. */ AWS_IO_API void aws_event_loop_free_io_event_resources(struct aws_event_loop *event_loop, struct aws_io_handle *handle); /** * Returns true if the event loop's thread is the same thread that called this function, otherwise false. */ AWS_IO_API bool aws_event_loop_thread_is_callers_thread(struct aws_event_loop *event_loop); /** * Gets the current timestamp for the event loop's clock, in nanoseconds. This function is thread-safe. */ AWS_IO_API int aws_event_loop_current_clock_time(struct aws_event_loop *event_loop, uint64_t *time_nanos); /** * Creates an event loop group, with clock, number of loops to manage, and the function to call for creating a new * event loop. */ AWS_IO_API struct aws_event_loop_group *aws_event_loop_group_new( struct aws_allocator *alloc, aws_io_clock_fn *clock, uint16_t el_count, aws_new_event_loop_fn *new_loop_fn, void *new_loop_user_data, const struct aws_shutdown_callback_options *shutdown_options); /** Creates an event loop group, with clock, number of loops to manage, the function to call for creating a new * event loop, and also pins all loops to hw threads on the same cpu_group (e.g. NUMA nodes). Note: * If el_count exceeds the number of hw threads in the cpu_group it will be ignored on the assumption that if you * care about NUMA, you don't want hyper-threads doing your IO and you especially don't want IO on a different node. */ AWS_IO_API struct aws_event_loop_group *aws_event_loop_group_new_pinned_to_cpu_group( struct aws_allocator *alloc, aws_io_clock_fn *clock, uint16_t el_count, uint16_t cpu_group, aws_new_event_loop_fn *new_loop_fn, void *new_loop_user_data, const struct aws_shutdown_callback_options *shutdown_options); /** * Initializes an event loop group with platform defaults. If max_threads == 0, then the * loop count will be the number of available processors on the machine / 2 (to exclude hyper-threads). * Otherwise, max_threads will be the number of event loops in the group. */ AWS_IO_API struct aws_event_loop_group *aws_event_loop_group_new_default( struct aws_allocator *alloc, uint16_t max_threads, const struct aws_shutdown_callback_options *shutdown_options); /** Creates an event loop group, with clock, number of loops to manage, the function to call for creating a new * event loop, and also pins all loops to hw threads on the same cpu_group (e.g. NUMA nodes). Note: * If el_count exceeds the number of hw threads in the cpu_group it will be clamped to the number of hw threads * on the assumption that if you care about NUMA, you don't want hyper-threads doing your IO and you especially * don't want IO on a different node. * * If max_threads == 0, then the * loop count will be the number of available processors in the cpu_group / 2 (to exclude hyper-threads) */ AWS_IO_API struct aws_event_loop_group *aws_event_loop_group_new_default_pinned_to_cpu_group( struct aws_allocator *alloc, uint16_t max_threads, uint16_t cpu_group, const struct aws_shutdown_callback_options *shutdown_options); /** * Increments the reference count on the event loop group, allowing the caller to take a reference to it. * * Returns the same event loop group passed in. */ AWS_IO_API struct aws_event_loop_group *aws_event_loop_group_acquire(struct aws_event_loop_group *el_group); /** * Decrements an event loop group's ref count. When the ref count drops to zero, the event loop group will be * destroyed. */ AWS_IO_API void aws_event_loop_group_release(struct aws_event_loop_group *el_group); AWS_IO_API struct aws_event_loop *aws_event_loop_group_get_loop_at(struct aws_event_loop_group *el_group, size_t index); AWS_IO_API size_t aws_event_loop_group_get_loop_count(struct aws_event_loop_group *el_group); /** * Fetches the next loop for use. The purpose is to enable load balancing across loops. You should not depend on how * this load balancing is done as it is subject to change in the future. Currently it uses the "best-of-two" algorithm * based on the load factor of each loop. */ AWS_IO_API struct aws_event_loop *aws_event_loop_group_get_next_loop(struct aws_event_loop_group *el_group); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_IO_EVENT_LOOP_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-io/include/aws/io/exports.h000066400000000000000000000015761456575232400243510ustar00rootroot00000000000000#ifndef AWS_IO_EXPORTS_H #define AWS_IO_EXPORTS_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #if defined(USE_WINDOWS_DLL_SEMANTICS) || defined(WIN32) # ifdef AWS_IO_USE_IMPORT_EXPORT # ifdef AWS_IO_EXPORTS # define AWS_IO_API __declspec(dllexport) # else # define AWS_IO_API __declspec(dllimport) # endif /* AWS_IO_EXPORTS */ # else # define AWS_IO_API # endif /* USE_IMPORT_EXPORT */ #else # if ((__GNUC__ >= 4) || defined(__clang__)) && defined(AWS_IO_USE_IMPORT_EXPORT) && defined(AWS_IO_EXPORTS) # define AWS_IO_API __attribute__((visibility("default"))) # else # define AWS_IO_API # endif /* __GNUC__ >= 4 || defined(__clang__) */ #endif /* defined(USE_WINDOWS_DLL_SEMANTICS) || defined(WIN32) */ #endif /* AWS_IO_EXPORTS_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-io/include/aws/io/file_utils.h000066400000000000000000000004731456575232400247770ustar00rootroot00000000000000#ifndef AWS_IO_FILE_UTILS_H #define AWS_IO_FILE_UTILS_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ /* Just shim the code that's in to common, maintain the public interface */ #include #endif /* AWS_IO_FILE_UTILS_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-io/include/aws/io/future.h000066400000000000000000001134731456575232400241570ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #ifndef AWS_IO_FUTURE_H #define AWS_IO_FUTURE_H /* // THIS IS AN EXPERIMENTAL AND UNSTABLE API // // An aws_future is used to deliver the result of an asynchronous function. // // When an async function is called, it creates a future and returns it to the caller. // When the async work is finished, it completes the future by setting an error or result value. // The caller waits until the future is done, checks for error, and then gets // the result if everything was OK. Typically, the caller waits by registering // a callback that the future invokes when it's done. // // If result type T has a "destructor" (clean_up(), destroy(), or release() function), // then the future has set_result_by_move() and get_result_by_move() functions // that explicitly transfer ownership to and from the future. // If the future dies, and still "owns" the resource, it calls the destructor. // If T has no destructor, then the future has set_result() and get_result() // functions that simply copy T by value. // // Macros are used to define a type-safe API for each result type T, // similar to C++ templates. This makes the API hard to browse, so functions // are documented in comments below. The result setter/getter functions // are mildly different based on T's destructor type, and are documented later. // // --- API (common to all aws_future) --- // // Create a new future, with refcount of 1. struct aws_future_T *aws_future_T_new(struct aws_allocator *alloc); // Increment the refcount. // You can pass NULL (has no effect). // Returns the same pointer that was passed in. struct aws_future_T *aws_future_T_acquire(struct aws_future_T *future); // Decrement the refcount. // You can pass NULL (has no effect). // Always returns NULL. struct aws_future_T *aws_future_T_release(struct aws_future_T *future); // Set future as done, with an error_code. // If the future is already done this call is ignored. void aws_future_T_set_error(struct aws_future_T *future, int error_code); // Return whether the future is done. bool aws_future_T_is_done(const struct aws_future_T *future); // Get the error-code of a completed future. // If 0 is returned, then the future completed successfully, // you may now get the result. // // WARNING: You MUST NOT call this until the future is done. int aws_future_T_get_error(const struct aws_future_T *future); // Register callback to be invoked when the future completes. // // If the future is already done, the callback runs synchronously on the calling thread. // If the future isn't done yet, the callback is registered, and it // will run synchronously on whatever thread completes the future. // // WARNING: You MUST NOT register more than one callback. void aws_future_T_register_callback(struct aws_future_T *future, aws_future_callback_fn *on_done, void *user_data); // If the future isn't done yet, then register the completion callback. // // Returns true if the callback was registered, // or false if the future is already done. // // Use this when you can't risk the callback running synchronously. // For example: If you're calling an async function repeatedly, // and synchronous completion could lead to stack overflow due to recursion. // Or if you are holding a non-recursive mutex, and the callback also // needs the mutex, and an immediate callback would deadlock. // // WARNING: If a callback is registered, you MUST NOT call this again until // the callback has been invoked. bool aws_future_T_register_callback_if_not_done( struct aws_future_T *future, aws_future_callback_fn *on_done, void *user_data); // Register completion callback to run async on an event-loop thread. // // When the future completes, the callback is scheduled to run as an event-loop task. // // Use this when you want the callback to run on the event-loop's thread, // or to ensure the callback runs async even if the future completed synchronously. // // WARNING: You MUST NOT register more than one callback. void aws_future_T_register_event_loop_callback( struct aws_future_T *future, struct aws_event_loop *event_loop, aws_future_callback_fn *on_done, void *user_data); // Register completion callback to run async on an aws_channel's thread. // // When the future completes, the callback is scheduled to run as a channel task. // // Use this when you want the callback to run on the channel's thread, // or to ensure the callback runs async even if the future completed synchronously. // // WARNING: You MUST NOT register more than one callback. void aws_future_T_register_channel_callback( struct aws_future_T *future, struct aws_channel *channel, aws_future_callback_fn *on_done, void *user_data); // Wait (up to timeout_ns) for future to complete. // Returns true if future completes in this time. // This blocks the current thread, and is probably only useful for tests and sample programs. bool aws_future_T_wait(struct aws_future_T *future, uint64_t timeout_ns); // // --- Defining new aws_future types --- // TODO UPDATE THESE DOCS // To define new types of aws_future, add the appropriate macro to the appropriate header. // The macros are: // // AWS_DECLARE_FUTURE_T_BY_VALUE(FUTURE, T) // For T stored by value, with no destructor. // Use with types like bool, size_t, etc // // AWS_DECLARE_FUTURE_T_BY_VALUE_WITH_CLEAN_UP(FUTURE, T, CLEAN_UP_FN) // For T stored by value, with destructor like: void aws_T_clean_up(T*) // Use with types like `struct aws_byte_buf` // // AWS_DECLARE_FUTURE_T_POINTER_WITH_DESTROY(FUTURE, T, DESTROY_FN) // For T stored by pointer, with destructor like: void aws_T_destroy(T*) // Use with types like `struct aws_string *` // // AWS_DECLARE_FUTURE_T_POINTER_WITH_RELEASE(FUTURE, T, RELEASE_FN) // For T stored by pointer, with destructor like: T* aws_T_release(T*) // Use with types like `struct aws_http_message *` // Note: if T's release() function doesn't return a pointer, use _WITH_DESTROY instead of _WITH_RELEASE. // // This file declares several common types: aws_future, aws_future, etc. // But new future types should be declared in the header where that type's API is declared. // For example: AWS_DECLARE_FUTURE_T_POINTER_WITH_RELEASE(aws_future_http_message, struct aws_http_message) // would go in: aws-c-http/include/aws/http/request_response.h // // The APIs generated by these macros are identical except for the "setter" and "getter" functions. // // --- Design (if you're curious) --- // // This class was developed to give the user more control over how the completion // callback is invoked. In the past, we passed completion callbacks to the async // function. But this could lead to issues when an async function "sometimes" // completed synchronously and "sometimes" completed async. The async function // would need to stress about how to schedule the callback so it was always async, // or more typically just invoke it whenever and leave the caller to figure it out. // // This class is also an experiment with "templates/generics in C". // In order to make the class type-safe, we use macros to define a unique // API for each result type T we need to store in a future. // If we refer to aws_future, we mean a struct named // aws_future_byte_buf, which stores an aws_byte_buf by value. // This could lead to code bloat, but the type-safety seems worth it. // // future is defined in aws-c-io, instead of aws-c-common, so it can // easily integrate with aws_event_loop and aws_channel. // // It's legal to call set_error() or set_result() multiple times. // If the future is already done, it ignores the call. // If result T has a destructor, the new result is immediately freed instead of saved. // This design lets us deal with ambiguity where it's not 100% certain whether a handoff occurred. // For example: if we call from C->Java and an exception is thrown, // it's not clear whether Java got the handoff. In this case, we can safely // call set_error(), completing the future if necessary, // or being ignored if the future was already done. */ #include AWS_PUSH_SANE_WARNING_LEVEL struct aws_channel; struct aws_event_loop; struct aws_future_impl; /** Completion callback for aws_future */ typedef void(aws_future_callback_fn)(void *user_data); typedef void(aws_future_impl_result_clean_up_fn)(void *result_addr); typedef void(aws_future_impl_result_destroy_fn)(void *result); typedef void *(aws_future_impl_result_release_fn)(void *result); AWS_EXTERN_C_BEGIN AWS_IO_API struct aws_future_impl *aws_future_impl_new_by_value(struct aws_allocator *alloc, size_t sizeof_result); AWS_IO_API struct aws_future_impl *aws_future_impl_new_by_value_with_clean_up( struct aws_allocator *alloc, size_t sizeof_result, aws_future_impl_result_clean_up_fn *result_clean_up); AWS_IO_API struct aws_future_impl *aws_future_impl_new_pointer(struct aws_allocator *alloc); AWS_IO_API struct aws_future_impl *aws_future_impl_new_pointer_with_destroy( struct aws_allocator *alloc, aws_future_impl_result_destroy_fn *result_destroy); AWS_IO_API struct aws_future_impl *aws_future_impl_new_pointer_with_release( struct aws_allocator *alloc, aws_future_impl_result_release_fn *result_release); AWS_IO_API struct aws_future_impl *aws_future_impl_release(struct aws_future_impl *promise); AWS_IO_API struct aws_future_impl *aws_future_impl_acquire(struct aws_future_impl *promise); AWS_IO_API void aws_future_impl_set_error(struct aws_future_impl *promise, int error_code); AWS_IO_API void aws_future_impl_set_result_by_move(struct aws_future_impl *promise, void *src_address); AWS_IO_API bool aws_future_impl_is_done(const struct aws_future_impl *future); AWS_IO_API void aws_future_impl_register_callback( struct aws_future_impl *future, aws_future_callback_fn *on_done, void *user_data); AWS_IO_API bool aws_future_impl_register_callback_if_not_done( struct aws_future_impl *future, aws_future_callback_fn *on_done, void *user_data); AWS_IO_API void aws_future_impl_register_event_loop_callback( struct aws_future_impl *future, struct aws_event_loop *event_loop, aws_future_callback_fn *on_done, void *user_data); AWS_IO_API void aws_future_impl_register_channel_callback( struct aws_future_impl *future, struct aws_channel *channel, aws_future_callback_fn *on_done, void *user_data); AWS_IO_API bool aws_future_impl_wait(const struct aws_future_impl *future, uint64_t timeout_ns); AWS_IO_API int aws_future_impl_get_error(const struct aws_future_impl *future); AWS_IO_API void *aws_future_impl_get_result_address(const struct aws_future_impl *future); AWS_IO_API void aws_future_impl_get_result_by_move(struct aws_future_impl *future, void *dst_address); /* Common beginning to all aws_future declarations */ #define AWS_FUTURE_T_DECLARATION_BEGIN(FUTURE, API) struct FUTURE; /* Common beginning to all aws_future implementations */ #define AWS_FUTURE_T_IMPLEMENTATION_BEGIN(FUTURE) /* Common end to all aws_future declarations */ #define AWS_FUTURE_T_DECLARATION_END(FUTURE, API) \ API struct FUTURE *FUTURE##_acquire(struct FUTURE *future); \ API struct FUTURE *FUTURE##_release(struct FUTURE *future); \ API void FUTURE##_set_error(struct FUTURE *future, int error_code); \ API bool FUTURE##_is_done(const struct FUTURE *future); \ API int FUTURE##_get_error(const struct FUTURE *future); \ API void FUTURE##_register_callback(struct FUTURE *future, aws_future_callback_fn *on_done, void *user_data); \ API bool FUTURE##_register_callback_if_not_done( \ struct FUTURE *future, aws_future_callback_fn *on_done, void *user_data); \ API void FUTURE##_register_event_loop_callback( \ struct FUTURE *future, struct aws_event_loop *event_loop, aws_future_callback_fn *on_done, void *user_data); \ API void FUTURE##_register_channel_callback( \ struct FUTURE *future, struct aws_channel *channel, aws_future_callback_fn *on_done, void *user_data); \ API bool FUTURE##_wait(struct FUTURE *future, uint64_t timeout_ns); /* Common end to all aws_future implementations */ #define AWS_FUTURE_T_IMPLEMENTATION_END(FUTURE) \ struct FUTURE *FUTURE##_acquire(struct FUTURE *future) { \ return (struct FUTURE *)aws_future_impl_acquire((struct aws_future_impl *)future); \ } \ \ struct FUTURE *FUTURE##_release(struct FUTURE *future) { \ return (struct FUTURE *)aws_future_impl_release((struct aws_future_impl *)future); \ } \ \ void FUTURE##_set_error(struct FUTURE *future, int error_code) { \ aws_future_impl_set_error((struct aws_future_impl *)future, error_code); \ } \ \ bool FUTURE##_is_done(const struct FUTURE *future) { \ return aws_future_impl_is_done((const struct aws_future_impl *)future); \ } \ \ int FUTURE##_get_error(const struct FUTURE *future) { \ return aws_future_impl_get_error((const struct aws_future_impl *)future); \ } \ \ void FUTURE##_register_callback(struct FUTURE *future, aws_future_callback_fn *on_done, void *user_data) { \ aws_future_impl_register_callback((struct aws_future_impl *)future, on_done, user_data); \ } \ \ bool FUTURE##_register_callback_if_not_done( \ struct FUTURE *future, aws_future_callback_fn *on_done, void *user_data) { \ \ return aws_future_impl_register_callback_if_not_done((struct aws_future_impl *)future, on_done, user_data); \ } \ \ void FUTURE##_register_event_loop_callback( \ struct FUTURE *future, struct aws_event_loop *event_loop, aws_future_callback_fn *on_done, void *user_data) { \ \ aws_future_impl_register_event_loop_callback( \ (struct aws_future_impl *)future, event_loop, on_done, user_data); \ } \ \ void FUTURE##_register_channel_callback( \ struct FUTURE *future, struct aws_channel *channel, aws_future_callback_fn *on_done, void *user_data) { \ \ aws_future_impl_register_channel_callback((struct aws_future_impl *)future, channel, on_done, user_data); \ } \ \ bool FUTURE##_wait(struct FUTURE *future, uint64_t timeout_ns) { \ return aws_future_impl_wait((struct aws_future_impl *)future, timeout_ns); \ } /** * Declare a future that holds a simple T by value, that needs no destructor. * Use with types like bool, size_t, etc. * * See top of future.h for most API docs. * The result setters and getters are: // Set the result. // // If the future is already done this call is ignored. void aws_future_T_set_result(const struct aws_future_T *future, T result); // Get the result of a completed future. // // WARNING: You MUST NOT call this until the future is done. // WARNING: You MUST NOT call this unless get_error() returned 0. T aws_future_T_get_result(const struct aws_future_T *future); */ #define AWS_FUTURE_T_BY_VALUE_DECLARATION(FUTURE, T, API) \ AWS_FUTURE_T_DECLARATION_BEGIN(FUTURE, API) \ API struct FUTURE *FUTURE##_new(struct aws_allocator *alloc); \ API void FUTURE##_set_result(struct FUTURE *future, T result); \ API T FUTURE##_get_result(const struct FUTURE *future); \ AWS_FUTURE_T_DECLARATION_END(FUTURE, API) #define AWS_FUTURE_T_BY_VALUE_IMPLEMENTATION(FUTURE, T) \ AWS_FUTURE_T_IMPLEMENTATION_BEGIN(FUTURE) \ struct FUTURE *FUTURE##_new(struct aws_allocator *alloc) { \ return (struct FUTURE *)aws_future_impl_new_by_value(alloc, sizeof(T)); \ } \ \ void FUTURE##_set_result(struct FUTURE *future, T result) { \ aws_future_impl_set_result_by_move((struct aws_future_impl *)future, &result); \ } \ \ T FUTURE##_get_result(const struct FUTURE *future) { \ return *(T *)aws_future_impl_get_result_address((const struct aws_future_impl *)future); \ } \ AWS_FUTURE_T_IMPLEMENTATION_END(FUTURE) /** * Declares a future that holds T by value, with destructor like: void aws_T_clean_up(T*) * Use with types like aws_byte_buf. * * See top of future.h for most API docs. * The result setters and getters are: // Set the result, transferring ownership. // // The memory at `value_address` is memcpy'd into the future, // and then zeroed out to help prevent accidental reuse. // It is safe to call this multiple times. If the future is already done, // the new result is destroyed instead of saved. void aws_future_T_set_result_by_move(struct aws_future_T *future, T *value_address); // Get the result, transferring ownership. // // WARNING: You MUST NOT call this until the future is done. // WARNING: You MUST NOT call this unless get_error() returned 0. // WARNING: You MUST NOT call this multiple times. T aws_future_T_get_result_by_move(struct aws_future_T *future); // Get the result, without transferring ownership. // // WARNING: You MUST NOT call this until the future is done. // WARNING: You MUST NOT call this unless get_error() returned 0. // WARNING: You MUST NOT call this multiple times. T* aws_future_T_peek_result(const struct aws_future_T *future); */ #define AWS_FUTURE_T_BY_VALUE_WITH_CLEAN_UP_DECLARATION(FUTURE, T, API) \ AWS_FUTURE_T_DECLARATION_BEGIN(FUTURE, API) \ API struct FUTURE *FUTURE##_new(struct aws_allocator *alloc); \ API void FUTURE##_set_result_by_move(struct FUTURE *future, T *value_address); \ API T *FUTURE##_peek_result(const struct FUTURE *future); \ API T FUTURE##_get_result_by_move(struct FUTURE *future); \ AWS_FUTURE_T_DECLARATION_END(FUTURE, API) #define AWS_FUTURE_T_BY_VALUE_WITH_CLEAN_UP_IMPLEMENTATION(FUTURE, T, CLEAN_UP_FN) \ AWS_FUTURE_T_IMPLEMENTATION_BEGIN(FUTURE) \ \ struct FUTURE *FUTURE##_new(struct aws_allocator *alloc) { \ void (*clean_up_fn)(T *) = CLEAN_UP_FN; /* check clean_up() function signature */ \ return (struct FUTURE *)aws_future_impl_new_by_value_with_clean_up( \ alloc, sizeof(T), (aws_future_impl_result_clean_up_fn)clean_up_fn); \ } \ \ void FUTURE##_set_result_by_move(struct FUTURE *future, T *value_address) { \ aws_future_impl_set_result_by_move((struct aws_future_impl *)future, value_address); \ } \ \ T *FUTURE##_peek_result(const struct FUTURE *future) { \ return aws_future_impl_get_result_address((const struct aws_future_impl *)future); \ } \ \ T FUTURE##_get_result_by_move(struct FUTURE *future) { \ T value; \ aws_future_impl_get_result_by_move((struct aws_future_impl *)future, &value); \ return value; \ } \ \ AWS_FUTURE_T_IMPLEMENTATION_END(FUTURE) /** * Declares a future that holds T*, with no destructor. */ #define AWS_FUTURE_T_POINTER_DECLARATION(FUTURE, T, API) \ AWS_FUTURE_T_DECLARATION_BEGIN(FUTURE, API) \ API struct FUTURE *FUTURE##_new(struct aws_allocator *alloc); \ API void FUTURE##_set_result(struct FUTURE *future, T *result); \ API T *FUTURE##_get_result(const struct FUTURE *future); \ AWS_FUTURE_T_DECLARATION_END(FUTURE, API) #define AWS_FUTURE_T_POINTER_IMPLEMENTATION(FUTURE, T) \ AWS_FUTURE_T_IMPLEMENTATION_BEGIN(FUTURE) \ \ struct FUTURE *FUTURE##_new(struct aws_allocator *alloc) { \ return (struct FUTURE *)aws_future_impl_new_pointer(alloc); \ } \ \ void FUTURE##_set_result(struct FUTURE *future, T *result) { \ aws_future_impl_set_result_by_move((struct aws_future_impl *)future, &result); \ } \ \ T *FUTURE##_get_result(const struct FUTURE *future) { \ return *(T **)aws_future_impl_get_result_address((const struct aws_future_impl *)future); \ } \ \ AWS_FUTURE_T_IMPLEMENTATION_END(FUTURE) /** * Declares a future that holds T*, with destructor like: void aws_T_destroy(T*) * Use with types like aws_string. * * See top of future.h for most API docs. * The result setters and getters are: // Set the result, transferring ownership. // // The value at `pointer_address` is copied into the future, // and then set NULL to prevent accidental reuse. // If the future is already done, this new result is destroyed instead of saved. void aws_future_T_set_result_by_move(struct aws_future_T *future, T **pointer_address); // Get the result, transferring ownership. // // WARNING: You MUST NOT call this until the future is done. // WARNING: You MUST NOT call this unless get_error() returned 0. // WARNING: You MUST NOT call this multiple times. T* aws_future_T_get_result_by_move(struct aws_future_T *future); // Get the result, without transferring ownership. // // WARNING: You MUST NOT call this until the future is done. // WARNING: You MUST NOT call this unless get_error() returned 0. // WARNING: You MUST NOT call this multiple times. T* aws_future_T_peek_result(const struct aws_future_T *future); */ #define AWS_FUTURE_T_POINTER_WITH_DESTROY_DECLARATION(FUTURE, T, API) \ AWS_FUTURE_T_DECLARATION_BEGIN(FUTURE, API) \ API struct FUTURE *FUTURE##_new(struct aws_allocator *alloc); \ API void FUTURE##_set_result_by_move(struct FUTURE *future, T **pointer_address); \ API T *FUTURE##_get_result_by_move(struct FUTURE *future); \ API T *FUTURE##_peek_result(const struct FUTURE *future); \ AWS_FUTURE_T_DECLARATION_END(FUTURE, API) #define AWS_FUTURE_T_POINTER_WITH_DESTROY_IMPLEMENTATION(FUTURE, T, DESTROY_FN) \ AWS_FUTURE_T_IMPLEMENTATION_BEGIN(FUTURE) \ \ struct FUTURE *FUTURE##_new(struct aws_allocator *alloc) { \ void (*destroy_fn)(T *) = DESTROY_FN; /* check destroy() function signature */ \ return (struct FUTURE *)aws_future_impl_new_pointer_with_destroy( \ alloc, (aws_future_impl_result_destroy_fn *)destroy_fn); \ } \ \ void FUTURE##_set_result_by_move(struct FUTURE *future, T **pointer_address) { \ aws_future_impl_set_result_by_move((struct aws_future_impl *)future, pointer_address); \ } \ \ T *FUTURE##_get_result_by_move(struct FUTURE *future) { \ T *pointer; \ aws_future_impl_get_result_by_move((struct aws_future_impl *)future, &pointer); \ return pointer; \ } \ \ T *FUTURE##_peek_result(const struct FUTURE *future) { \ return *(T **)aws_future_impl_get_result_address((const struct aws_future_impl *)future); \ } \ \ AWS_FUTURE_T_IMPLEMENTATION_END(FUTURE) /** * Declares a future that holds T*, with destructor like: T* aws_T_release(T*) * Use with types like aws_http_message * * See top of future.h for most API docs. * The result setters and getters are: // Set the result, transferring ownership. // // The value at `pointer_address` is copied into the future, // and then set NULL to prevent accidental reuse. // If the future is already done, this new result is destroyed instead of saved. void aws_future_T_set_result_by_move(struct aws_future_T *future, T **pointer_address); // Get the result, transferring ownership. // // WARNING: You MUST NOT call this until the future is done. // WARNING: You MUST NOT call this unless get_error() returned 0. // WARNING: You MUST NOT call this multiple times. T* aws_future_T_get_result_by_move(struct aws_future_T *future); // Get the result, without transferring ownership. // // WARNING: You MUST NOT call this until the future is done. // WARNING: You MUST NOT call this unless get_error() returned 0. // WARNING: You MUST NOT call this multiple times. T* aws_future_T_peek_result(const struct aws_future_T *future); */ #define AWS_FUTURE_T_POINTER_WITH_RELEASE_DECLARATION(FUTURE, T, API) \ AWS_FUTURE_T_DECLARATION_BEGIN(FUTURE, API) \ API struct FUTURE *FUTURE##_new(struct aws_allocator *alloc); \ API void FUTURE##_set_result_by_move(struct FUTURE *future, T **pointer_address); \ API T *FUTURE##_get_result_by_move(struct FUTURE *future); \ API T *FUTURE##_peek_result(const struct FUTURE *future); \ AWS_FUTURE_T_DECLARATION_END(FUTURE, API) #define AWS_FUTURE_T_POINTER_WITH_RELEASE_IMPLEMENTATION(FUTURE, T, RELEASE_FN) \ AWS_FUTURE_T_IMPLEMENTATION_BEGIN(FUTURE) \ \ struct FUTURE *FUTURE##_new(struct aws_allocator *alloc) { \ T *(*release_fn)(T *) = RELEASE_FN; /* check release() function signature */ \ return (struct FUTURE *)aws_future_impl_new_pointer_with_release( \ alloc, (aws_future_impl_result_release_fn *)release_fn); \ } \ \ void FUTURE##_set_result_by_move(struct FUTURE *future, T **pointer_address) { \ aws_future_impl_set_result_by_move((struct aws_future_impl *)future, pointer_address); \ } \ \ T *FUTURE##_get_result_by_move(struct FUTURE *future) { \ T *pointer; \ aws_future_impl_get_result_by_move((struct aws_future_impl *)future, &pointer); \ return pointer; \ } \ \ T *FUTURE##_peek_result(const struct FUTURE *future) { \ return *(T **)aws_future_impl_get_result_address((const struct aws_future_impl *)future); \ } \ \ AWS_FUTURE_T_IMPLEMENTATION_END(FUTURE) /** * aws_future */ AWS_FUTURE_T_BY_VALUE_DECLARATION(aws_future_size, size_t, AWS_IO_API) /** * aws_future */ AWS_FUTURE_T_BY_VALUE_DECLARATION(aws_future_bool, bool, AWS_IO_API) /** * aws_future */ AWS_FUTURE_T_DECLARATION_BEGIN(aws_future_void, AWS_IO_API) AWS_IO_API struct aws_future_void *aws_future_void_new(struct aws_allocator *alloc); AWS_IO_API void aws_future_void_set_result(struct aws_future_void *future); AWS_FUTURE_T_DECLARATION_END(aws_future_void, AWS_IO_API) AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_IO_FUTURE_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-io/include/aws/io/host_resolver.h000066400000000000000000000243501456575232400255360ustar00rootroot00000000000000#ifndef AWS_IO_HOST_RESOLVER_H #define AWS_IO_HOST_RESOLVER_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include AWS_PUSH_SANE_WARNING_LEVEL struct aws_event_loop_group; enum aws_address_record_type { /* ipv4 address. */ AWS_ADDRESS_RECORD_TYPE_A, /* ipv6 address. */ AWS_ADDRESS_RECORD_TYPE_AAAA }; enum aws_get_host_address_flags { /* get number of ipv4 addresses. */ AWS_GET_HOST_ADDRESS_COUNT_RECORD_TYPE_A = 0x00000001, /* get number of ipv6 addresses. */ AWS_GET_HOST_ADDRESS_COUNT_RECORD_TYPE_AAAA = 0x00000002 }; struct aws_string; struct aws_host_address { struct aws_allocator *allocator; const struct aws_string *host; const struct aws_string *address; enum aws_address_record_type record_type; uint64_t expiry; /* This next section is strictly for mitigating the impact of sticky hosts that aren't performing well. */ /*for use in DNS-based load balancing.*/ size_t use_count; /* give a hint on when to remove a bad host from service. */ size_t connection_failure_count; /* we don't implement this yet, but we will asap. */ uint8_t weight; }; struct aws_host_resolver; /** * Invoked once an address has been resolved for host. The type in host_addresses is struct aws_host_address (by-value). * The caller does not own this memory and you must copy the host address before returning from this function if you * plan to use it later. For convenience, we've provided the aws_host_address_copy() and aws_host_address_clean_up() * functions. */ typedef void(aws_on_host_resolved_result_fn)( struct aws_host_resolver *resolver, const struct aws_string *host_name, int err_code, const struct aws_array_list *host_addresses, void *user_data); /** * Function signature for configuring your own resolver (the default just uses getaddrinfo()). The type in * output_addresses is struct aws_host_address (by-value). We assume this function blocks, hence this absurdly * complicated design. */ typedef int(aws_resolve_host_implementation_fn)( struct aws_allocator *allocator, const struct aws_string *host_name, struct aws_array_list *output_addresses, void *user_data); struct aws_host_resolution_config { aws_resolve_host_implementation_fn *impl; size_t max_ttl; void *impl_data; uint64_t resolve_frequency_ns; /* 0 defaults to 1 second interval */ }; struct aws_host_listener; struct aws_host_listener_options; struct aws_host_resolver_purge_host_options { /* the host to purge the cache for */ const struct aws_string *host; /* Callback to invoke when the purge is complete */ aws_simple_completion_callback *on_host_purge_complete_callback; /* user_data will be passed as it is in the callback. */ void *user_data; }; /** should you absolutely disdain the default implementation, feel free to implement your own. */ struct aws_host_resolver_vtable { /** clean up everything you allocated, but not resolver itself. */ void (*destroy)(struct aws_host_resolver *resolver); /** resolve the host by host_name, the user owns host_name, so it needs to be copied if you persist it, * invoke res with the result. This function should never block. */ int (*resolve_host)( struct aws_host_resolver *resolver, const struct aws_string *host_name, aws_on_host_resolved_result_fn *res, const struct aws_host_resolution_config *config, void *user_data); /** gives your implementation a hint that an address has some failed connections occuring. Do whatever you want (or * nothing) about it. */ int (*record_connection_failure)(struct aws_host_resolver *resolver, const struct aws_host_address *address); /** * @Deprecated Use purge_cache_with_callback instead * wipe out anything you have cached. */ int (*purge_cache)(struct aws_host_resolver *resolver); /** wipe out anything you have cached. */ int (*purge_cache_with_callback)( struct aws_host_resolver *resolver, aws_simple_completion_callback *on_purge_cache_complete_callback, void *user_data); /** wipe out anything cached for a specific host */ int (*purge_host_cache)( struct aws_host_resolver *resolver, const struct aws_host_resolver_purge_host_options *options); /** get number of addresses for a given host. */ size_t (*get_host_address_count)( struct aws_host_resolver *resolver, const struct aws_string *host_name, uint32_t flags); }; struct aws_host_resolver { struct aws_allocator *allocator; void *impl; struct aws_host_resolver_vtable *vtable; struct aws_ref_count ref_count; struct aws_shutdown_callback_options shutdown_options; }; struct aws_host_resolver_default_options { size_t max_entries; struct aws_event_loop_group *el_group; const struct aws_shutdown_callback_options *shutdown_options; aws_io_clock_fn *system_clock_override_fn; }; AWS_EXTERN_C_BEGIN /** * Copies `from` to `to`. */ AWS_IO_API int aws_host_address_copy(const struct aws_host_address *from, struct aws_host_address *to); /** * Moves `from` to `to`. After this call, from is no longer usable. Though, it could be resused for another * move or copy operation. */ AWS_IO_API void aws_host_address_move(struct aws_host_address *from, struct aws_host_address *to); /** * Cleans up the memory for `address` */ AWS_IO_API void aws_host_address_clean_up(struct aws_host_address *address); /** WARNING! do not call this function directly (getaddrinfo()): it blocks. Provide a pointer to this function for other * resolution functions. */ AWS_IO_API int aws_default_dns_resolve( struct aws_allocator *allocator, const struct aws_string *host_name, struct aws_array_list *output_addresses, void *user_data); /** * Creates a host resolver with the default behavior. Here's the behavior: * * Since there's not a reliable way to do non-blocking DNS without a ton of risky work that would need years of testing * on every Unix system in existence, we work around it by doing a threaded implementation. * * When you request an address, it checks the cache. If the entry isn't in the cache it creates a new one. * Each entry has a potentially short lived back-ground thread based on ttl for the records. Once we've populated the * cache and you keep the resolver active, the resolution callback will be invoked immediately. When it's idle, it will * take a little while in the background thread to fetch more, evaluate TTLs etc... In that case your callback will be * invoked from the background thread. * * -------------------------------------------------------------------------------------------------------------------- * * A few things to note about TTLs and connection failures. * * We attempt to honor your max ttl but will not honor it if dns queries are failing or all of your connections are * marked as failed. Once we are able to query dns again, we will re-evaluate the TTLs. * * Upon notification connection failures, we move them to a separate list. Eventually we retry them when it's likely * that the endpoint is healthy again or we don't really have another choice, but we try to keep them out of your * hot path. * * --------------------------------------------------------------------------------------------------------------------- * * Finally, this entire design attempts to prevent problems where developers have to choose between large TTLs and thus * sticky hosts or short TTLs and good fleet utilization but now higher latencies. In this design, we resolve every * second in the background (only while you're actually using the record), but we do not expire the earlier resolved * addresses until max ttl has passed. * * This for example, should enable you to hit thousands of hosts in the Amazon S3 fleet instead of just one or two. */ AWS_IO_API struct aws_host_resolver *aws_host_resolver_new_default( struct aws_allocator *allocator, const struct aws_host_resolver_default_options *options); /** * Increments the reference count on the host resolver, allowing the caller to take a reference to it. * * Returns the same host resolver passed in. */ AWS_IO_API struct aws_host_resolver *aws_host_resolver_acquire(struct aws_host_resolver *resolver); /** * Decrements a host resolver's ref count. When the ref count drops to zero, the resolver will be destroyed. */ AWS_IO_API void aws_host_resolver_release(struct aws_host_resolver *resolver); /** * calls resolve_host on the vtable. config will be copied. */ AWS_IO_API int aws_host_resolver_resolve_host( struct aws_host_resolver *resolver, const struct aws_string *host_name, aws_on_host_resolved_result_fn *res, const struct aws_host_resolution_config *config, void *user_data); /** * calls record_connection_failure on the vtable. */ AWS_IO_API int aws_host_resolver_record_connection_failure( struct aws_host_resolver *resolver, const struct aws_host_address *address); /** * @Deprecated Use purge_cache_with_callback instead * calls purge_cache on the vtable. */ AWS_IO_API int aws_host_resolver_purge_cache(struct aws_host_resolver *resolver); /** * Calls aws_host_resolver_purge_cache_with_callback on the vtable which will wipe out everything host resolver has * cached. */ AWS_IO_API int aws_host_resolver_purge_cache_with_callback( struct aws_host_resolver *resolver, aws_simple_completion_callback *on_purge_cache_complete_callback, void *user_data); /** * Removes the cache for a host asynchronously. */ AWS_IO_API int aws_host_resolver_purge_host_cache( struct aws_host_resolver *resolver, const struct aws_host_resolver_purge_host_options *options); /** * get number of addresses for a given host. */ AWS_IO_API size_t aws_host_resolver_get_host_address_count( struct aws_host_resolver *resolver, const struct aws_string *host_name, uint32_t flags); /** * Returns the default host resolution config used internally if none specified. * * @return default host resolution config */ AWS_IO_API struct aws_host_resolution_config aws_host_resolver_init_default_resolution_config(void); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_IO_HOST_RESOLVER_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-io/include/aws/io/io.h000066400000000000000000000226331456575232400232510ustar00rootroot00000000000000#ifndef AWS_IO_IO_H #define AWS_IO_IO_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include AWS_PUSH_SANE_WARNING_LEVEL #define AWS_C_IO_PACKAGE_ID 1 struct aws_io_handle { union { int fd; void *handle; } data; void *additional_data; }; enum aws_io_message_type { AWS_IO_MESSAGE_APPLICATION_DATA, }; struct aws_io_message; struct aws_channel; typedef void(aws_channel_on_message_write_completed_fn)( struct aws_channel *channel, struct aws_io_message *message, int err_code, void *user_data); struct aws_io_message { /** * Allocator used for the message and message data. If this is null, the message belongs to a pool or some other * message manager. */ struct aws_allocator *allocator; /** * Buffer containing the data for message */ struct aws_byte_buf message_data; /** * type of the message. This is used for framework control messages. Currently the only type is * AWS_IO_MESSAGE_APPLICATION_DATA */ enum aws_io_message_type message_type; /** * Conveys information about the contents of message_data (e.g. cast the ptr to some type). If 0, it's just opaque * data. */ int message_tag; /** * In order to avoid excess allocations/copies, on a partial read or write, the copy mark is set to indicate how * much of this message has already been processed or copied. */ size_t copy_mark; /** * The channel that the message is bound to. */ struct aws_channel *owning_channel; /** * Invoked by the channel once the entire message has been written to the data sink. */ aws_channel_on_message_write_completed_fn *on_completion; /** * arbitrary user data for the on_completion callback */ void *user_data; /** it's incredibly likely something is going to need to queue this, * go ahead and make sure the list info is part of the original allocation. */ struct aws_linked_list_node queueing_handle; }; typedef int(aws_io_clock_fn)(uint64_t *timestamp); enum aws_io_errors { AWS_IO_CHANNEL_ERROR_ERROR_CANT_ACCEPT_INPUT = AWS_ERROR_ENUM_BEGIN_RANGE(AWS_C_IO_PACKAGE_ID), AWS_IO_CHANNEL_UNKNOWN_MESSAGE_TYPE, AWS_IO_CHANNEL_READ_WOULD_EXCEED_WINDOW, AWS_IO_EVENT_LOOP_ALREADY_ASSIGNED, AWS_IO_EVENT_LOOP_SHUTDOWN, AWS_IO_TLS_ERROR_NEGOTIATION_FAILURE, AWS_IO_TLS_ERROR_NOT_NEGOTIATED, AWS_IO_TLS_ERROR_WRITE_FAILURE, AWS_IO_TLS_ERROR_ALERT_RECEIVED, AWS_IO_TLS_CTX_ERROR, AWS_IO_TLS_VERSION_UNSUPPORTED, AWS_IO_TLS_CIPHER_PREF_UNSUPPORTED, AWS_IO_MISSING_ALPN_MESSAGE, AWS_IO_UNHANDLED_ALPN_PROTOCOL_MESSAGE, AWS_IO_FILE_VALIDATION_FAILURE, AWS_ERROR_IO_EVENT_LOOP_THREAD_ONLY, AWS_ERROR_IO_ALREADY_SUBSCRIBED, AWS_ERROR_IO_NOT_SUBSCRIBED, AWS_ERROR_IO_OPERATION_CANCELLED, AWS_IO_READ_WOULD_BLOCK, AWS_IO_BROKEN_PIPE, AWS_IO_SOCKET_UNSUPPORTED_ADDRESS_FAMILY, AWS_IO_SOCKET_INVALID_OPERATION_FOR_TYPE, AWS_IO_SOCKET_CONNECTION_REFUSED, AWS_IO_SOCKET_TIMEOUT, AWS_IO_SOCKET_NO_ROUTE_TO_HOST, AWS_IO_SOCKET_NETWORK_DOWN, AWS_IO_SOCKET_CLOSED, AWS_IO_SOCKET_NOT_CONNECTED, AWS_IO_SOCKET_INVALID_OPTIONS, AWS_IO_SOCKET_ADDRESS_IN_USE, AWS_IO_SOCKET_INVALID_ADDRESS, AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE, AWS_IO_SOCKET_CONNECT_ABORTED, AWS_IO_DNS_QUERY_FAILED, AWS_IO_DNS_INVALID_NAME, AWS_IO_DNS_NO_ADDRESS_FOR_HOST, AWS_IO_DNS_HOST_REMOVED_FROM_CACHE, AWS_IO_STREAM_INVALID_SEEK_POSITION, AWS_IO_STREAM_READ_FAILED, DEPRECATED_AWS_IO_INVALID_FILE_HANDLE, AWS_IO_SHARED_LIBRARY_LOAD_FAILURE, AWS_IO_SHARED_LIBRARY_FIND_SYMBOL_FAILURE, AWS_IO_TLS_NEGOTIATION_TIMEOUT, AWS_IO_TLS_ALERT_NOT_GRACEFUL, AWS_IO_MAX_RETRIES_EXCEEDED, AWS_IO_RETRY_PERMISSION_DENIED, AWS_IO_TLS_DIGEST_ALGORITHM_UNSUPPORTED, AWS_IO_TLS_SIGNATURE_ALGORITHM_UNSUPPORTED, AWS_ERROR_PKCS11_VERSION_UNSUPPORTED, AWS_ERROR_PKCS11_TOKEN_NOT_FOUND, AWS_ERROR_PKCS11_KEY_NOT_FOUND, AWS_ERROR_PKCS11_KEY_TYPE_UNSUPPORTED, AWS_ERROR_PKCS11_UNKNOWN_CRYPTOKI_RETURN_VALUE, /* PKCS#11 "CKR_" (Cryptoki Return Value) as AWS error-codes */ AWS_ERROR_PKCS11_CKR_CANCEL, AWS_ERROR_PKCS11_CKR_HOST_MEMORY, AWS_ERROR_PKCS11_CKR_SLOT_ID_INVALID, AWS_ERROR_PKCS11_CKR_GENERAL_ERROR, AWS_ERROR_PKCS11_CKR_FUNCTION_FAILED, AWS_ERROR_PKCS11_CKR_ARGUMENTS_BAD, AWS_ERROR_PKCS11_CKR_NO_EVENT, AWS_ERROR_PKCS11_CKR_NEED_TO_CREATE_THREADS, AWS_ERROR_PKCS11_CKR_CANT_LOCK, AWS_ERROR_PKCS11_CKR_ATTRIBUTE_READ_ONLY, AWS_ERROR_PKCS11_CKR_ATTRIBUTE_SENSITIVE, AWS_ERROR_PKCS11_CKR_ATTRIBUTE_TYPE_INVALID, AWS_ERROR_PKCS11_CKR_ATTRIBUTE_VALUE_INVALID, AWS_ERROR_PKCS11_CKR_ACTION_PROHIBITED, AWS_ERROR_PKCS11_CKR_DATA_INVALID, AWS_ERROR_PKCS11_CKR_DATA_LEN_RANGE, AWS_ERROR_PKCS11_CKR_DEVICE_ERROR, AWS_ERROR_PKCS11_CKR_DEVICE_MEMORY, AWS_ERROR_PKCS11_CKR_DEVICE_REMOVED, AWS_ERROR_PKCS11_CKR_ENCRYPTED_DATA_INVALID, AWS_ERROR_PKCS11_CKR_ENCRYPTED_DATA_LEN_RANGE, AWS_ERROR_PKCS11_CKR_FUNCTION_CANCELED, AWS_ERROR_PKCS11_CKR_FUNCTION_NOT_PARALLEL, AWS_ERROR_PKCS11_CKR_FUNCTION_NOT_SUPPORTED, AWS_ERROR_PKCS11_CKR_KEY_HANDLE_INVALID, AWS_ERROR_PKCS11_CKR_KEY_SIZE_RANGE, AWS_ERROR_PKCS11_CKR_KEY_TYPE_INCONSISTENT, AWS_ERROR_PKCS11_CKR_KEY_NOT_NEEDED, AWS_ERROR_PKCS11_CKR_KEY_CHANGED, AWS_ERROR_PKCS11_CKR_KEY_NEEDED, AWS_ERROR_PKCS11_CKR_KEY_INDIGESTIBLE, AWS_ERROR_PKCS11_CKR_KEY_FUNCTION_NOT_PERMITTED, AWS_ERROR_PKCS11_CKR_KEY_NOT_WRAPPABLE, AWS_ERROR_PKCS11_CKR_KEY_UNEXTRACTABLE, AWS_ERROR_PKCS11_CKR_MECHANISM_INVALID, AWS_ERROR_PKCS11_CKR_MECHANISM_PARAM_INVALID, AWS_ERROR_PKCS11_CKR_OBJECT_HANDLE_INVALID, AWS_ERROR_PKCS11_CKR_OPERATION_ACTIVE, AWS_ERROR_PKCS11_CKR_OPERATION_NOT_INITIALIZED, AWS_ERROR_PKCS11_CKR_PIN_INCORRECT, AWS_ERROR_PKCS11_CKR_PIN_INVALID, AWS_ERROR_PKCS11_CKR_PIN_LEN_RANGE, AWS_ERROR_PKCS11_CKR_PIN_EXPIRED, AWS_ERROR_PKCS11_CKR_PIN_LOCKED, AWS_ERROR_PKCS11_CKR_SESSION_CLOSED, AWS_ERROR_PKCS11_CKR_SESSION_COUNT, AWS_ERROR_PKCS11_CKR_SESSION_HANDLE_INVALID, AWS_ERROR_PKCS11_CKR_SESSION_PARALLEL_NOT_SUPPORTED, AWS_ERROR_PKCS11_CKR_SESSION_READ_ONLY, AWS_ERROR_PKCS11_CKR_SESSION_EXISTS, AWS_ERROR_PKCS11_CKR_SESSION_READ_ONLY_EXISTS, AWS_ERROR_PKCS11_CKR_SESSION_READ_WRITE_SO_EXISTS, AWS_ERROR_PKCS11_CKR_SIGNATURE_INVALID, AWS_ERROR_PKCS11_CKR_SIGNATURE_LEN_RANGE, AWS_ERROR_PKCS11_CKR_TEMPLATE_INCOMPLETE, AWS_ERROR_PKCS11_CKR_TEMPLATE_INCONSISTENT, AWS_ERROR_PKCS11_CKR_TOKEN_NOT_PRESENT, AWS_ERROR_PKCS11_CKR_TOKEN_NOT_RECOGNIZED, AWS_ERROR_PKCS11_CKR_TOKEN_WRITE_PROTECTED, AWS_ERROR_PKCS11_CKR_UNWRAPPING_KEY_HANDLE_INVALID, AWS_ERROR_PKCS11_CKR_UNWRAPPING_KEY_SIZE_RANGE, AWS_ERROR_PKCS11_CKR_UNWRAPPING_KEY_TYPE_INCONSISTENT, AWS_ERROR_PKCS11_CKR_USER_ALREADY_LOGGED_IN, AWS_ERROR_PKCS11_CKR_USER_NOT_LOGGED_IN, AWS_ERROR_PKCS11_CKR_USER_PIN_NOT_INITIALIZED, AWS_ERROR_PKCS11_CKR_USER_TYPE_INVALID, AWS_ERROR_PKCS11_CKR_USER_ANOTHER_ALREADY_LOGGED_IN, AWS_ERROR_PKCS11_CKR_USER_TOO_MANY_TYPES, AWS_ERROR_PKCS11_CKR_WRAPPED_KEY_INVALID, AWS_ERROR_PKCS11_CKR_WRAPPED_KEY_LEN_RANGE, AWS_ERROR_PKCS11_CKR_WRAPPING_KEY_HANDLE_INVALID, AWS_ERROR_PKCS11_CKR_WRAPPING_KEY_SIZE_RANGE, AWS_ERROR_PKCS11_CKR_WRAPPING_KEY_TYPE_INCONSISTENT, AWS_ERROR_PKCS11_CKR_RANDOM_SEED_NOT_SUPPORTED, AWS_ERROR_PKCS11_CKR_RANDOM_NO_RNG, AWS_ERROR_PKCS11_CKR_DOMAIN_PARAMS_INVALID, AWS_ERROR_PKCS11_CKR_CURVE_NOT_SUPPORTED, AWS_ERROR_PKCS11_CKR_BUFFER_TOO_SMALL, AWS_ERROR_PKCS11_CKR_SAVED_STATE_INVALID, AWS_ERROR_PKCS11_CKR_INFORMATION_SENSITIVE, AWS_ERROR_PKCS11_CKR_STATE_UNSAVEABLE, AWS_ERROR_PKCS11_CKR_CRYPTOKI_NOT_INITIALIZED, AWS_ERROR_PKCS11_CKR_CRYPTOKI_ALREADY_INITIALIZED, AWS_ERROR_PKCS11_CKR_MUTEX_BAD, AWS_ERROR_PKCS11_CKR_MUTEX_NOT_LOCKED, AWS_ERROR_PKCS11_CKR_NEW_PIN_MODE, AWS_ERROR_PKCS11_CKR_NEXT_OTP, AWS_ERROR_PKCS11_CKR_EXCEEDED_MAX_ITERATIONS, AWS_ERROR_PKCS11_CKR_FIPS_SELF_TEST_FAILED, AWS_ERROR_PKCS11_CKR_LIBRARY_LOAD_FAILED, AWS_ERROR_PKCS11_CKR_PIN_TOO_WEAK, AWS_ERROR_PKCS11_CKR_PUBLIC_KEY_INVALID, AWS_ERROR_PKCS11_CKR_FUNCTION_REJECTED, AWS_ERROR_IO_PINNED_EVENT_LOOP_MISMATCH, AWS_ERROR_PKCS11_ENCODING_ERROR, AWS_IO_TLS_ERROR_DEFAULT_TRUST_STORE_NOT_FOUND, AWS_IO_STREAM_SEEK_FAILED, AWS_IO_STREAM_GET_LENGTH_FAILED, AWS_IO_STREAM_SEEK_UNSUPPORTED, AWS_IO_STREAM_GET_LENGTH_UNSUPPORTED, AWS_IO_TLS_ERROR_READ_FAILURE, AWS_ERROR_PEM_MALFORMED, AWS_IO_ERROR_END_RANGE = AWS_ERROR_ENUM_END_RANGE(AWS_C_IO_PACKAGE_ID), AWS_IO_INVALID_FILE_HANDLE = AWS_ERROR_INVALID_FILE_HANDLE, }; AWS_EXTERN_C_BEGIN /** * Initializes internal datastructures used by aws-c-io. * Must be called before using any functionality in aws-c-io. */ AWS_IO_API void aws_io_library_init(struct aws_allocator *allocator); /** * Shuts down the internal datastructures used by aws-c-io. */ AWS_IO_API void aws_io_library_clean_up(void); AWS_IO_API void aws_io_fatal_assert_library_initialized(void); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_IO_IO_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-io/include/aws/io/logging.h000066400000000000000000000016741456575232400242720ustar00rootroot00000000000000#ifndef AWS_IO_LOGGING_H #define AWS_IO_LOGGING_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include AWS_PUSH_SANE_WARNING_LEVEL struct aws_log_channel; struct aws_log_formatter; struct aws_log_writer; enum aws_io_log_subject { AWS_LS_IO_GENERAL = AWS_LOG_SUBJECT_BEGIN_RANGE(AWS_C_IO_PACKAGE_ID), AWS_LS_IO_EVENT_LOOP, AWS_LS_IO_SOCKET, AWS_LS_IO_SOCKET_HANDLER, AWS_LS_IO_TLS, AWS_LS_IO_ALPN, AWS_LS_IO_DNS, AWS_LS_IO_PKI, AWS_LS_IO_CHANNEL, AWS_LS_IO_CHANNEL_BOOTSTRAP, AWS_LS_IO_FILE_UTILS, AWS_LS_IO_SHARED_LIBRARY, AWS_LS_IO_EXPONENTIAL_BACKOFF_RETRY_STRATEGY, AWS_LS_IO_STANDARD_RETRY_STRATEGY, AWS_LS_IO_PKCS11, AWS_LS_IO_PEM, AWS_IO_LS_LAST = AWS_LOG_SUBJECT_END_RANGE(AWS_C_IO_PACKAGE_ID) }; AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_IO_LOGGING_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-io/include/aws/io/message_pool.h000066400000000000000000000046311456575232400253150ustar00rootroot00000000000000#ifndef AWS_IO_MESSAGE_POOL_H #define AWS_IO_MESSAGE_POOL_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include AWS_PUSH_SANE_WARNING_LEVEL struct aws_memory_pool { struct aws_allocator *alloc; struct aws_array_list stack; uint16_t ideal_segment_count; size_t segment_size; void *data_ptr; }; struct aws_message_pool { struct aws_allocator *alloc; struct aws_memory_pool application_data_pool; struct aws_memory_pool small_block_pool; }; struct aws_message_pool_creation_args { size_t application_data_msg_data_size; uint8_t application_data_msg_count; size_t small_block_msg_data_size; uint8_t small_block_msg_count; }; AWS_EXTERN_C_BEGIN AWS_IO_API int aws_memory_pool_init( struct aws_memory_pool *mempool, struct aws_allocator *alloc, uint16_t ideal_segment_count, size_t segment_size); AWS_IO_API void aws_memory_pool_clean_up(struct aws_memory_pool *mempool); /** * Acquires memory from the pool if available, otherwise, it attempts to allocate and returns the result. */ AWS_IO_API void *aws_memory_pool_acquire(struct aws_memory_pool *mempool); /** * Releases memory to the pool if space is available, otherwise frees `to_release` */ AWS_IO_API void aws_memory_pool_release(struct aws_memory_pool *mempool, void *to_release); /** * Initializes message pool using 'msg_pool' as the backing pool, 'args' is copied. */ AWS_IO_API int aws_message_pool_init( struct aws_message_pool *msg_pool, struct aws_allocator *alloc, struct aws_message_pool_creation_args *args); AWS_IO_API void aws_message_pool_clean_up(struct aws_message_pool *msg_pool); /** * Acquires a message from the pool if available, otherwise, it attempts to allocate. If a message is acquired, * note that size_hint is just a hint. the return value's capacity will be set to the actual buffer size. */ AWS_IO_API struct aws_io_message *aws_message_pool_acquire( struct aws_message_pool *msg_pool, enum aws_io_message_type message_type, size_t size_hint); /** * Releases message to the pool if space is available, otherwise frees `message` * @param message */ AWS_IO_API void aws_message_pool_release(struct aws_message_pool *msg_pool, struct aws_io_message *message); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_IO_MESSAGE_POOL_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-io/include/aws/io/pem.h000066400000000000000000000101731456575232400234170ustar00rootroot00000000000000#ifndef AWS_IO_PEM_H #define AWS_IO_PEM_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include AWS_EXTERN_C_BEGIN /* * Naming follows OpenSSL convention for PEM types. * Refer to comment after each enum value for the type string it represents. */ enum aws_pem_object_type { AWS_PEM_TYPE_UNKNOWN = 0, AWS_PEM_TYPE_X509_OLD, /* X509 CERTIFICATE */ AWS_PEM_TYPE_X509, /* CERTIFICATE */ AWS_PEM_TYPE_X509_TRUSTED, /* TRUSTED CERTIFICATE */ AWS_PEM_TYPE_X509_REQ_OLD, /* NEW CERTIFICATE REQUEST */ AWS_PEM_TYPE_X509_REQ, /* CERTIFICATE REQUEST */ AWS_PEM_TYPE_X509_CRL, /* X509 CRL */ AWS_PEM_TYPE_EVP_PKEY, /* ANY PRIVATE KEY */ AWS_PEM_TYPE_PUBLIC_PKCS8, /* PUBLIC KEY */ AWS_PEM_TYPE_PRIVATE_RSA_PKCS1, /* RSA PRIVATE KEY */ AWS_PEM_TYPE_PUBLIC_RSA_PKCS1, /* RSA PUBLIC KEY */ AWS_PEM_TYPE_PRIVATE_DSA_PKCS1, /* RSA PRIVATE KEY */ AWS_PEM_TYPE_PUBLIC_DSA_PKCS1, /* RSA PUBLIC KEY */ AWS_PEM_TYPE_PKCS7, /* PKCS7 */ AWS_PEM_TYPE_PKCS7_SIGNED_DATA, /* PKCS #7 SIGNED DATA */ AWS_PEM_TYPE_PRIVATE_PKCS8_ENCRYPTED, /* ENCRYPTED PRIVATE KEY */ AWS_PEM_TYPE_PRIVATE_PKCS8, /* PRIVATE KEY */ AWS_PEM_TYPE_DH_PARAMETERS, /* X9.42 DH PARAMETERS */ AWS_PEM_TYPE_DH_PARAMETERS_X942, /* X9.42 DH PARAMETERS */ AWS_PEM_TYPE_SSL_SESSION_PARAMETERS, /* SSL SESSION PARAMETERS */ AWS_PEM_TYPE_DSA_PARAMETERS, /* DSA PARAMETERS */ AWS_PEM_TYPE_ECDSA_PUBLIC, /* ECDSA PUBLIC KEY */ AWS_PEM_TYPE_EC_PARAMETERS, /* EC PARAMETERS */ AWS_PEM_TYPE_EC_PRIVATE, /* EC PRIVATE KEY */ AWS_PEM_TYPE_PARAMETERS, /* PARAMETERS */ AWS_PEM_TYPE_CMS, /* CMS */ AWS_PEM_TYPE_SM2_PARAMETERS /* SM2 PARAMETERS */ }; /* * Describes PEM object decoded from file. * data points to raw data bytes of object (decoding will do additional base 64 * decoding for each object). * type will be set to object type or to AWS_PEM_TYPE_UNKNOWN if it could not * figure out type. * type_string is the string between -----BEGIN and ----- */ struct aws_pem_object { enum aws_pem_object_type type; struct aws_string *type_string; struct aws_byte_buf data; }; /** * Cleans up elements of pem_objects list 'aws_pem_objects_init_from_file_contents()' * and 'aws_pem_objects_init_from_file_path()'. */ AWS_IO_API void aws_pem_objects_clean_up(struct aws_array_list *pem_objects); /** * Decodes PEM data and reads objects sequentially adding them to pem_objects. * If it comes across an object it cannot read, list of all object read until * that point is returned. * If no objects can be read from PEM or objects could not be base 64 decoded, * AWS_ERROR_PEM_MALFORMED is raised. * out_pem_objects stores aws_pem_object struct by value. * Function will initialize pem_objects list. * This code is slow, and it allocates, so please try * not to call this in the middle of something that needs to be fast or resource sensitive. */ AWS_IO_API int aws_pem_objects_init_from_file_contents( struct aws_array_list *pem_objects, struct aws_allocator *alloc, struct aws_byte_cursor pem_cursor); /** * Decodes PEM data from file and reads objects sequentially adding them to pem_objects. * If it comes across an object it cannot read, list of all object read until * that point is returned. * If no objects can be read from PEM or objects could not be base 64 decoded, * AWS_ERROR_PEM_MALFORMED is raised. * out_pem_objects stores aws_pem_object struct by value. * Function will initialize pem_objects list. * This code is slow, and it allocates, so please try * not to call this in the middle of something that needs to be fast or resource sensitive. */ AWS_IO_API int aws_pem_objects_init_from_file_path( struct aws_array_list *pem_objects, struct aws_allocator *allocator, const char *filename); AWS_EXTERN_C_END #endif /* AWS_IO_PEM_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-io/include/aws/io/pipe.h000066400000000000000000000131321456575232400235710ustar00rootroot00000000000000#ifndef AWS_IO_PIPE_H #define AWS_IO_PIPE_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include AWS_PUSH_SANE_WARNING_LEVEL struct aws_event_loop; struct aws_pipe_read_end { void *impl_data; }; struct aws_pipe_write_end { void *impl_data; }; /** * Callback for when the pipe is readable (edge-triggered), or an error has occurred. * Afer subscribing, the callback is invoked when the pipe has data to read, or the pipe has an error. * The readable callback is invoked again any time the user reads all data, and then more data arrives. * Note that it will not be invoked again if the pipe still has unread data when more data arrives. * `error_code` of AWS_ERROR_SUCCESS indicates a readable event, and otherwise contains the value of the error. * `user_data` corresponds to the `user_data` passed into aws_pipe_subscribe_to_read_events(). * This callback is always invoked on the read-end's event-loop thread. */ typedef void(aws_pipe_on_readable_fn)(struct aws_pipe_read_end *read_end, int error_code, void *user_data); /** * Callback for when the asynchronous aws_pipe_write() operation has either completed or failed. * `write_end` will be NULL if this callback is invoked after the the write-end has been cleaned up, * this does not necessarily mean that the write operation failed. * `error_code` will be AWS_ERROR_SUCCESS if all data was written, or a code corresponding to the error. * `src_buffer` corresponds to the buffer passed into aws_pipe_write() * `user_data` corresponds to the `user_data` passed into aws_pipe_write(). * This callback is always invoked on the write-end's event-loop thread. */ typedef void(aws_pipe_on_write_completed_fn)( struct aws_pipe_write_end *write_end, int error_code, struct aws_byte_cursor src_buffer, void *user_data); AWS_EXTERN_C_BEGIN /** * Opens an OS specific bidirectional pipe. * The read direction is stored in read_end. Write direction is stored in write_end. * Each end must be connected to an event-loop, and further calls to each end must happen on that event-loop's thread. */ AWS_IO_API int aws_pipe_init( struct aws_pipe_read_end *read_end, struct aws_event_loop *read_end_event_loop, struct aws_pipe_write_end *write_end, struct aws_event_loop *write_end_event_loop, struct aws_allocator *allocator); /** * Clean up the read-end of the pipe. * This must be called on the thread of the connected event-loop. */ AWS_IO_API int aws_pipe_clean_up_read_end(struct aws_pipe_read_end *read_end); /** * Clean up the write-end of the pipe. * This must be called on the thread of the connected event-loop. */ AWS_IO_API int aws_pipe_clean_up_write_end(struct aws_pipe_write_end *write_end); /** * Get the event-loop connected to the read-end of the pipe. * This may be called on any thread. */ AWS_IO_API struct aws_event_loop *aws_pipe_get_read_end_event_loop(const struct aws_pipe_read_end *read_end); /** * Get the event-loop connected to the write-end of the pipe. * This may be called on any thread. */ AWS_IO_API struct aws_event_loop *aws_pipe_get_write_end_event_loop(const struct aws_pipe_write_end *write_end); /** * Initiates an asynchrous write from the source buffer to the pipe. * The data referenced by `src_buffer` must remain in memory until the operation completes. * `on_complete` is called on the event-loop thread when the operation has either completed or failed. * The callback's pipe argument will be NULL if the callback is invoked after the pipe has been cleaned up. * This must be called on the thread of the connected event-loop. */ AWS_IO_API int aws_pipe_write( struct aws_pipe_write_end *write_end, struct aws_byte_cursor src_buffer, aws_pipe_on_write_completed_fn *on_completed, void *user_data); /** * Read data from the pipe into the destination buffer. * Attempts to read enough to fill all remaining space in the buffer, from `dst_buffer->len` to `dst_buffer->capacity`. * `dst_buffer->len` is updated to reflect the buffer's new length. * `num_bytes_read` (optional) is set to the total number of bytes read. * This function never blocks. If no bytes could be read without blocking, then AWS_OP_ERR is returned and * aws_last_error() code will be AWS_IO_READ_WOULD_BLOCK. * This must be called on the thread of the connected event-loop. */ AWS_IO_API int aws_pipe_read(struct aws_pipe_read_end *read_end, struct aws_byte_buf *dst_buffer, size_t *num_bytes_read); /** * Subscribe to be notified when the pipe becomes readable (edge-triggered), or an error occurs. * `on_readable` is invoked on the event-loop's thread when the pipe has data to read, or the pipe has an error. * `on_readable` is invoked again any time the user reads all data, and then more data arrives. * Note that it will not be invoked again if the pipe still has unread data when more data arrives. * This must be called on the thread of the connected event-loop. */ AWS_IO_API int aws_pipe_subscribe_to_readable_events( struct aws_pipe_read_end *read_end, aws_pipe_on_readable_fn *on_readable, void *user_data); /** * Stop receiving notifications about events on the read-end of the pipe. * This must be called on the thread of the connected event-loop. */ AWS_IO_API int aws_pipe_unsubscribe_from_readable_events(struct aws_pipe_read_end *read_end); #if defined(_WIN32) /** * Generate a unique pipe name. * The suggested dst_size is 256. */ AWS_IO_API int aws_pipe_get_unique_name(char *dst, size_t dst_size); #endif AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_IO_PIPE_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-io/include/aws/io/pkcs11.h000066400000000000000000000056721456575232400237500ustar00rootroot00000000000000#ifndef AWS_IO_PKCS11_H #define AWS_IO_PKCS11_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include AWS_PUSH_SANE_WARNING_LEVEL struct aws_allocator; /** * Handle to a loaded PKCS#11 library. */ struct aws_pkcs11_lib; /** * Controls how aws_pkcs11_lib calls C_Initialize() and C_Finalize() on the PKCS#11 library. */ enum aws_pkcs11_lib_behavior { /** * Default behavior that accommodates most use cases. * C_Initialize() is called on creation, and "already-initialized" errors are ignored. * C_Finalize() is never called, just in case another part of your * application is still using the PKCS#11 library. */ AWS_PKCS11_LIB_DEFAULT_BEHAVIOR, /** * Skip calling C_Initialize() and C_Finalize(). * Use this if your application has already initialized the PKCS#11 library, * and you do not want C_Initialize() called again. */ AWS_PKCS11_LIB_OMIT_INITIALIZE, /** * C_Initialize() is called on creation and C_Finalize() is called on cleanup. * If C_Initialize() reports that's it's already initialized, this is treated as an error. * Use this if you need perfect cleanup (ex: running valgrind with --leak-check). */ AWS_PKCS11_LIB_STRICT_INITIALIZE_FINALIZE, }; /* The enum above was misspelled, and later got fixed (pcks11 -> pkcs11). * This macro maintain backwards compatibility with the old spelling */ #define aws_pcks11_lib_behavior aws_pkcs11_lib_behavior /** * Options for aws_pkcs11_lib_new() */ struct aws_pkcs11_lib_options { /** * Name of PKCS#11 library file to load (UTF-8). * Zero out if your application is compiled with PKCS#11 symbols linked in. */ struct aws_byte_cursor filename; /** * Behavior for calling C_Initialize() and C_Finalize() on the PKCS#11 library. */ enum aws_pkcs11_lib_behavior initialize_finalize_behavior; }; AWS_EXTERN_C_BEGIN /** * Load and initialize a PKCS#11 library. * See `aws_pkcs11_lib_options` for options. * * If successful a valid pointer is returned. You must call aws_pkcs11_lib_release() when you are done with it. * If unsuccessful, NULL is returned and an error is set. */ AWS_IO_API struct aws_pkcs11_lib *aws_pkcs11_lib_new( struct aws_allocator *allocator, const struct aws_pkcs11_lib_options *options); /** * Acquire a reference to a PKCS#11 library, preventing it from being cleaned up. * You must call aws_pkcs11_lib_release() when you are done with it. * This function returns whatever was passed in. It cannot fail. */ AWS_IO_API struct aws_pkcs11_lib *aws_pkcs11_lib_acquire(struct aws_pkcs11_lib *pkcs11_lib); /** * Release a reference to the PKCS#11 library. * When the last reference is released, the library is cleaned up. */ AWS_IO_API void aws_pkcs11_lib_release(struct aws_pkcs11_lib *pkcs11_lib); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_IO_PKCS11_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-io/include/aws/io/private/000077500000000000000000000000001456575232400241355ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-io/include/aws/io/private/pem_utils.h000066400000000000000000000014131456575232400263060ustar00rootroot00000000000000#ifndef AWS_IO_PEM_UTILS_H #define AWS_IO_PEM_UTILS_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include AWS_EXTERN_C_BEGIN /** * Cleanup Function that parses the full PEM Chain object once and strip the comments out for the pem parser not * handling the comments. The passed in pem will be cleaned up. * * - Garbage characters in-between PEM objects (characters before the first BEGIN or after an END and before the next * BEGIN) are removed * * - AWS_ERROR_INVALID_ARGUMENT will be raised if the file contains no PEM encoded data. */ AWS_IO_API int aws_sanitize_pem(struct aws_byte_buf *pem, struct aws_allocator *allocator); AWS_EXTERN_C_END #endif /* AWS_IO_PEM_UTILS_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-io/include/aws/io/private/pki_utils.h000066400000000000000000000065471456575232400263250ustar00rootroot00000000000000#ifndef AWS_IO_PKI_UTILS_H #define AWS_IO_PKI_UTILS_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #ifdef _WIN32 /* It's ok to include external headers because this is a PRIVATE header file * (it is usually a crime to include windows.h from header file) */ # include #endif /* _WIN32 */ #ifdef AWS_OS_APPLE /* It's ok to include external headers because this is a PRIVATE header file */ # include #endif /* AWS_OS_APPLE */ struct aws_string; AWS_EXTERN_C_BEGIN /** * Returns the path to the directory and file, respectively, which holds the * SSL certificate trust store on the system. */ AWS_IO_API const char *aws_determine_default_pki_dir(void); AWS_IO_API const char *aws_determine_default_pki_ca_file(void); #ifdef AWS_OS_APPLE # if !defined(AWS_OS_IOS) /** * Imports a PEM armored PKCS#7 public/private key pair * into identity for use with SecurityFramework. */ int aws_import_public_and_private_keys_to_identity( struct aws_allocator *alloc, CFAllocatorRef cf_alloc, const struct aws_byte_cursor *public_cert_chain, const struct aws_byte_cursor *private_key, CFArrayRef *identity, const struct aws_string *keychain_path); # endif /* AWS_OS_IOS */ /** * Imports a PKCS#12 file into identity for use with * SecurityFramework */ int aws_import_pkcs12_to_identity( CFAllocatorRef cf_alloc, const struct aws_byte_cursor *pkcs12_cursor, const struct aws_byte_cursor *password, CFArrayRef *identity); /** * Loads PRM armored PKCS#7 certificates into certs * for use with custom CA. */ int aws_import_trusted_certificates( struct aws_allocator *alloc, CFAllocatorRef cf_alloc, const struct aws_byte_cursor *certificates_blob, CFArrayRef *certs); /** * Releases identity (the output of the aws_import_* functions). */ void aws_release_identity(CFArrayRef identity); /** * releases the output of aws_import_trusted_certificates. */ void aws_release_certificates(CFArrayRef certs); #endif /* AWS_OS_APPLE */ #ifdef _WIN32 /** * Returns AWS_OP_SUCCESS if we were able to successfully load the certificate and cert_store. * * Returns AWS_OP_ERR otherwise. */ AWS_IO_API int aws_load_cert_from_system_cert_store( const char *cert_path, HCERTSTORE *cert_store, PCCERT_CONTEXT *certs); /** * Imports a PEM armored PKCS#7 blob into an ephemeral certificate store for use * as a custom CA. */ AWS_IO_API int aws_import_trusted_certificates( struct aws_allocator *alloc, const struct aws_byte_cursor *certificates_blob, HCERTSTORE *cert_store); /** * Closes a cert store that was opened by aws_is_system_cert_store, aws_import_trusted_certificates, * or aws_import_key_pair_to_cert_context. */ AWS_IO_API void aws_close_cert_store(HCERTSTORE cert_store); /** * Imports a PEM armored PKCS#7 public/private key pair into certs for use as a certificate with SSPI. */ AWS_IO_API int aws_import_key_pair_to_cert_context( struct aws_allocator *alloc, const struct aws_byte_cursor *public_cert_chain, const struct aws_byte_cursor *private_key, bool is_client_mode, HCERTSTORE *cert_store, PCCERT_CONTEXT *certs, HCRYPTPROV *crypto_provider, HCRYPTKEY *private_key_handle); #endif /* _WIN32 */ AWS_EXTERN_C_END #endif /* AWS_IO_PKI_UTILS_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-io/include/aws/io/private/tls_channel_handler_shared.h000066400000000000000000000032061456575232400316240ustar00rootroot00000000000000#ifndef AWS_IO_TLS_CHANNEL_HANDLER_SHARED_H #define AWS_IO_TLS_CHANNEL_HANDLER_SHARED_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include struct aws_tls_connection_options; struct aws_tls_channel_handler_shared { uint32_t tls_timeout_ms; struct aws_channel_handler *handler; struct aws_channel_task timeout_task; struct aws_crt_statistics_tls stats; }; AWS_EXTERN_C_BEGIN AWS_IO_API void aws_tls_channel_handler_shared_init( struct aws_tls_channel_handler_shared *tls_handler_shared, struct aws_channel_handler *handler, struct aws_tls_connection_options *options); AWS_IO_API void aws_tls_channel_handler_shared_clean_up(struct aws_tls_channel_handler_shared *tls_handler_shared); AWS_IO_API void aws_on_drive_tls_negotiation(struct aws_tls_channel_handler_shared *tls_handler_shared); AWS_IO_API void aws_on_tls_negotiation_completed( struct aws_tls_channel_handler_shared *tls_handler_shared, int error_code); /** * Returns true if an aws_byte_buf on aws_tls_ctx_options was set by the user. * Use this to determine whether a buf was set. DO NOT simply check if buf.len > 0. * * Reasoning: * If the user calls a setter function but passes a 0 length file or cursor, buf.len will be zero. * TLS should still respect the fact that the setter was called. * TLS should not use defaults instead just because length is 0. */ AWS_IO_API bool aws_tls_options_buf_is_set(const struct aws_byte_buf *buf); AWS_EXTERN_C_END #endif /* AWS_IO_TLS_CHANNEL_HANDLER_SHARED_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-io/include/aws/io/private/tracing.h000066400000000000000000000011271456575232400257360ustar00rootroot00000000000000#ifndef AWS_IO_TRACING_H #define AWS_IO_TRACING_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include extern __itt_domain *io_tracing_domain; extern __itt_string_handle *tracing_input_stream_read; extern __itt_string_handle *tracing_event_loop_run_tasks; extern __itt_string_handle *tracing_event_loop_event; extern __itt_string_handle *tracing_event_loop_events; AWS_EXTERN_C_BEGIN void aws_io_tracing_init(void); AWS_EXTERN_C_END #endif /* AWS_IO_TRACING_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-io/include/aws/io/retry_strategy.h000066400000000000000000000245351456575232400257340ustar00rootroot00000000000000#ifndef AWS_IO_CLIENT_RETRY_STRATEGY_H #define AWS_IO_CLIENT_RETRY_STRATEGY_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include AWS_PUSH_SANE_WARNING_LEVEL struct aws_retry_strategy; struct aws_retry_token; struct aws_event_loop_group; /** * Invoked upon the acquisition, or failure to acquire a retry token. This function will always be invoked if and only * if aws_retry_strategy_acquire_retry_token() returns AWS_OP_SUCCESS. It will never be invoked synchronously from * aws_retry_strategy_acquire_retry_token(). Token will always be NULL if error_code is non-zero, and vice-versa. If * token is non-null, it will have a reference count of 1, and you must call aws_retry_token_release() on it later. See * the comments for aws_retry_strategy_on_retry_ready_fn for more info. */ typedef void(aws_retry_strategy_on_retry_token_acquired_fn)( struct aws_retry_strategy *retry_strategy, int error_code, struct aws_retry_token *token, void *user_data); /** * Invoked after a successful call to aws_retry_strategy_schedule_retry(). This function will always be invoked if and * only if aws_retry_strategy_schedule_retry() returns AWS_OP_SUCCESS. It will never be invoked synchronously from * aws_retry_strategy_schedule_retry(). After attempting the operation, either call aws_retry_strategy_schedule_retry() * with an aws_retry_error_type or call aws_retry_token_record_success() and then release the token via. * aws_retry_token_release(). */ typedef void(aws_retry_strategy_on_retry_ready_fn)(struct aws_retry_token *token, int error_code, void *user_data); /** * Optional function to supply your own generate random implementation */ typedef uint64_t(aws_generate_random_fn)(void *user_data); enum aws_retry_error_type { /** This is a connection level error such as a socket timeout, socket connect error, tls negotiation timeout etc... * Typically these should never be applied for non-idempotent request types since in this scenario, it's impossible * to know whether the operation had a side effect on the server. */ AWS_RETRY_ERROR_TYPE_TRANSIENT, /** This is an error where the server explicitly told the client to back off, such as a 429 or 503 Http error. */ AWS_RETRY_ERROR_TYPE_THROTTLING, /** This is a server error that isn't explicitly throttling but is considered by the client * to be something that should be retried. */ AWS_RETRY_ERROR_TYPE_SERVER_ERROR, /** Doesn't count against any budgets. This could be something like a 401 challenge in Http. */ AWS_RETRY_ERROR_TYPE_CLIENT_ERROR, }; struct aws_retry_strategy_vtable { void (*destroy)(struct aws_retry_strategy *retry_strategy); int (*acquire_token)( struct aws_retry_strategy *retry_strategy, const struct aws_byte_cursor *partition_id, aws_retry_strategy_on_retry_token_acquired_fn *on_acquired, void *user_data, uint64_t timeout_ms); int (*schedule_retry)( struct aws_retry_token *token, enum aws_retry_error_type error_type, aws_retry_strategy_on_retry_ready_fn *retry_ready, void *user_data); int (*record_success)(struct aws_retry_token *token); void (*release_token)(struct aws_retry_token *token); }; struct aws_retry_strategy { struct aws_allocator *allocator; struct aws_retry_strategy_vtable *vtable; struct aws_atomic_var ref_count; void *impl; }; struct aws_retry_token { struct aws_allocator *allocator; struct aws_retry_strategy *retry_strategy; struct aws_atomic_var ref_count; void *impl; }; /** * Jitter mode for exponential backoff. * * For a great writeup on these options see: * https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/ */ enum aws_exponential_backoff_jitter_mode { /* Uses AWS_EXPONENTIAL_BACKOFF_JITTER_FULL */ AWS_EXPONENTIAL_BACKOFF_JITTER_DEFAULT, AWS_EXPONENTIAL_BACKOFF_JITTER_NONE, AWS_EXPONENTIAL_BACKOFF_JITTER_FULL, AWS_EXPONENTIAL_BACKOFF_JITTER_DECORRELATED, }; /** * Options for exponential backoff retry strategy. el_group must be set, any other option, if set to 0 will signify * "use defaults" */ struct aws_exponential_backoff_retry_options { /** Event loop group to use for scheduling tasks. */ struct aws_event_loop_group *el_group; /** Max retries to allow. The default value is 10 */ size_t max_retries; /** Scaling factor to add for the backoff. Default is 500ms */ uint32_t backoff_scale_factor_ms; /** Max retry backoff in seconds. Default is 20 seconds */ uint32_t max_backoff_secs; /** Jitter mode to use, see comments for aws_exponential_backoff_jitter_mode. * Default is AWS_EXPONENTIAL_BACKOFF_JITTER_DEFAULT */ enum aws_exponential_backoff_jitter_mode jitter_mode; /** Deprecated. Use generate_random_impl instead * By default this will be set to use aws_device_random. If you want something else, set it here. * */ uint64_t (*generate_random)(void); /* * By default this will be set to use aws_device_random. If you want something else, set it here. */ aws_generate_random_fn *generate_random_impl; /** * Optional user data for the generate random generate_random_impl. */ void *generate_random_user_data; /** * Optional shutdown callback that gets invoked, with appropriate user data, * when the resources used by the retry_strategy are no longer in use. */ const struct aws_shutdown_callback_options *shutdown_options; }; struct aws_standard_retry_options { struct aws_exponential_backoff_retry_options backoff_retry_options; /** capacity for partitions. Defaults to 500 */ size_t initial_bucket_capacity; }; AWS_EXTERN_C_BEGIN /** * Acquire a reference count on retry_strategy. */ AWS_IO_API void aws_retry_strategy_acquire(struct aws_retry_strategy *retry_strategy); /** * Releases a reference count on retry_strategy. */ AWS_IO_API void aws_retry_strategy_release(struct aws_retry_strategy *retry_strategy); /** * Attempts to acquire a retry token for use with retries. On success, on_acquired will be invoked when a token is * available, or an error will be returned if the timeout expires. partition_id identifies operations that should be * grouped together. This allows for more sophisticated strategies such as AIMD and circuit breaker patterns. Pass NULL * to use the global partition. */ AWS_IO_API int aws_retry_strategy_acquire_retry_token( struct aws_retry_strategy *retry_strategy, const struct aws_byte_cursor *partition_id, aws_retry_strategy_on_retry_token_acquired_fn *on_acquired, void *user_data, uint64_t timeout_ms); /** * Schedules a retry based on the backoff and token based strategies. retry_ready is invoked when the retry is either * ready for execution or if it has been canceled due to application shutdown. * * This function can return an error to reject the retry attempt if, for example, a circuit breaker has opened. If this * occurs users should fail their calls back to their callers. * * error_type is used for book keeping. See the comments above for aws_retry_error_type. */ AWS_IO_API int aws_retry_strategy_schedule_retry( struct aws_retry_token *token, enum aws_retry_error_type error_type, aws_retry_strategy_on_retry_ready_fn *retry_ready, void *user_data); /** * Records a successful retry. This is used for making future decisions to open up token buckets, AIMD breakers etc... * some strategies such as exponential backoff will ignore this, but you should always call it after a successful * operation or your system will never recover during an outage. */ AWS_IO_API int aws_retry_token_record_success(struct aws_retry_token *token); /** * Increments reference count for token. This should be called any time you seat the token to a pointer you own. */ AWS_IO_API void aws_retry_token_acquire(struct aws_retry_token *token); /** * Releases the reference count for token. This should always be invoked after either calling * aws_retry_strategy_schedule_retry() and failing, or after calling aws_retry_token_record_success(). */ AWS_IO_API void aws_retry_token_release(struct aws_retry_token *token); /** * Creates a retry strategy using exponential backoff. This strategy does not perform any bookkeeping on error types and * success. There is no circuit breaker functionality in here. See the comments above for * aws_exponential_backoff_retry_options. */ AWS_IO_API struct aws_retry_strategy *aws_retry_strategy_new_exponential_backoff( struct aws_allocator *allocator, const struct aws_exponential_backoff_retry_options *config); /** * This is a retry implementation that cuts off traffic if it's * detected that an endpoint partition is having availability * problems. This is necessary to keep from making outages worse * by scheduling work that's unlikely to succeed yet increases * load on an already ailing system. * * We do this by creating a bucket for each partition. A partition * is an arbitrary specifier. It can be anything: a region, a service, * a combination of region and service, a literal dns name.... doesn't matter. * * Each bucket has a budget for maximum allowed retries. Different types of events * carry different weights. Things that indicate an unhealthy partition such as * transient errors (timeouts, unhealthy connection etc...) cost more. * A retry for any other reason (service sending a 5xx response code) cost a bit less. * When a retry is attempted this capacity is leased out to the retry. On success it is * released back to the capacity pool. On failure, it remains leased. * Operations that succeed without a retry slowly restore the capacity pool. * * If a partition runs out of capacity it is assumed unhealthy and retries will be blocked * until capacity returns to the pool. To prevent a partition from staying unhealthy after * an outage has recovered, new requests that succeed without a retry will increase the capacity * slowly ( a new request gets a payback lease of 1, but the lease is never actually deducted from the capacity pool). */ AWS_IO_API struct aws_retry_strategy *aws_retry_strategy_new_standard( struct aws_allocator *allocator, const struct aws_standard_retry_options *config); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_IO_CLIENT_RETRY_STRATEGY_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-io/include/aws/io/shared_library.h000066400000000000000000000022621456575232400256300ustar00rootroot00000000000000#ifndef AWS_COMMON_SHARED_LIBRARY_H #define AWS_COMMON_SHARED_LIBRARY_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include AWS_PUSH_SANE_WARNING_LEVEL struct aws_byte_cursor; /* * A simple platform wrapper for dynamically loading and examining shared libraries */ struct aws_shared_library { void *library_handle; }; typedef void (*aws_generic_function)(void); AWS_EXTERN_C_BEGIN /* * Initializes a dynamically-loaded shared library from its file path location */ AWS_IO_API int aws_shared_library_init(struct aws_shared_library *library, const char *library_path); /* * Closes a dynamically-loaded shared library */ AWS_IO_API void aws_shared_library_clean_up(struct aws_shared_library *library); /* * Finds a function symbol within a shared library. function_address may be * safely cast into any other function type as appropriate. */ AWS_IO_API int aws_shared_library_find_function( struct aws_shared_library *library, const char *symbol_name, aws_generic_function *function_address); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_COMMON_SHARED_LIBRARY_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-io/include/aws/io/socket.h000066400000000000000000000334101456575232400241250ustar00rootroot00000000000000#ifndef AWS_IO_SOCKET_H #define AWS_IO_SOCKET_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include AWS_PUSH_SANE_WARNING_LEVEL enum aws_socket_domain { AWS_SOCKET_IPV4, AWS_SOCKET_IPV6, /* Unix domain sockets (or at least something like them) */ AWS_SOCKET_LOCAL, /* VSOCK used in inter-VM communication */ AWS_SOCKET_VSOCK, }; enum aws_socket_type { /* A streaming socket sends reliable messages over a two-way connection. * This means TCP when used with IPV4/6, and Unix domain sockets, when used with * AWS_SOCKET_LOCAL*/ AWS_SOCKET_STREAM, /* A datagram socket is connectionless and sends unreliable messages. * This means UDP when used with IPV4/6. * LOCAL and VSOCK sockets are not compatible with DGRAM.*/ AWS_SOCKET_DGRAM, }; struct aws_socket_options { enum aws_socket_type type; enum aws_socket_domain domain; uint32_t connect_timeout_ms; /* Keepalive properties are TCP only. * Set keepalive true to periodically transmit messages for detecting a disconnected peer. * If interval or timeout are zero, then default values are used. */ uint16_t keep_alive_interval_sec; uint16_t keep_alive_timeout_sec; /* If set, sets the number of keep alive probes allowed to fail before the connection is considered * lost. If zero OS defaults are used. On Windows, this option is meaningless until Windows 10 1703.*/ uint16_t keep_alive_max_failed_probes; bool keepalive; }; struct aws_socket; struct aws_event_loop; /** * Called in client mode when an outgoing connection has succeeded or an error has occurred. * If the connection was successful error_code will be AWS_ERROR_SUCCESS and the socket has already been assigned * to the event loop specified in aws_socket_connect(). * * If an error occurred error_code will be non-zero. */ typedef void(aws_socket_on_connection_result_fn)(struct aws_socket *socket, int error_code, void *user_data); /** * Called by a listening socket when either an incoming connection has been received or an error occurred. * * In the normal use-case, this function will be called multiple times over the lifetime of a single listening socket. * new_socket is already connected and initialized, and is using the same options and allocator as the listening socket. * A user may want to call aws_socket_set_options() on the new socket if different options are desired. * * new_socket is not yet assigned to an event-loop. The user should call aws_socket_assign_to_event_loop() before * performing IO operations. * * When error_code is AWS_ERROR_SUCCESS, new_socket is the recently accepted connection. * If error_code is non-zero, an error occurred and you should aws_socket_close() the socket. * * Do not call aws_socket_clean_up() from this callback. */ typedef void(aws_socket_on_accept_result_fn)( struct aws_socket *socket, int error_code, struct aws_socket *new_socket, void *user_data); /** * Callback for when the data passed to a call to aws_socket_write() has either completed or failed. * On success, error_code will be AWS_ERROR_SUCCESS. */ typedef void( aws_socket_on_write_completed_fn)(struct aws_socket *socket, int error_code, size_t bytes_written, void *user_data); /** * Callback for when socket is either readable (edge-triggered) or when an error has occurred. If the socket is * readable, error_code will be AWS_ERROR_SUCCESS. */ typedef void(aws_socket_on_readable_fn)(struct aws_socket *socket, int error_code, void *user_data); #ifdef _WIN32 # define AWS_ADDRESS_MAX_LEN 256 #else # include # define AWS_ADDRESS_MAX_LEN sizeof(((struct sockaddr_un *)0)->sun_path) #endif struct aws_socket_endpoint { char address[AWS_ADDRESS_MAX_LEN]; uint32_t port; }; struct aws_socket { struct aws_allocator *allocator; struct aws_socket_endpoint local_endpoint; struct aws_socket_endpoint remote_endpoint; struct aws_socket_options options; struct aws_io_handle io_handle; struct aws_event_loop *event_loop; struct aws_channel_handler *handler; int state; aws_socket_on_readable_fn *readable_fn; void *readable_user_data; aws_socket_on_connection_result_fn *connection_result_fn; aws_socket_on_accept_result_fn *accept_result_fn; void *connect_accept_user_data; void *impl; }; struct aws_byte_buf; struct aws_byte_cursor; /* These are hacks for working around headers and functions we need for IO work but aren't directly includable or linkable. these are purposely not exported. These functions only get called internally. The awkward aws_ prefixes are just in case someone includes this header somewhere they were able to get these definitions included. */ #ifdef _WIN32 typedef void (*aws_ms_fn_ptr)(void); void aws_check_and_init_winsock(void); aws_ms_fn_ptr aws_winsock_get_connectex_fn(void); aws_ms_fn_ptr aws_winsock_get_acceptex_fn(void); #endif AWS_EXTERN_C_BEGIN /** * Initializes a socket object with socket options. options will be copied. */ AWS_IO_API int aws_socket_init( struct aws_socket *socket, struct aws_allocator *alloc, const struct aws_socket_options *options); /** * Shuts down any pending operations on the socket, and cleans up state. The socket object can be re-initialized after * this operation. This function calls aws_socket_close. If you have not already called aws_socket_close() on the * socket, all of the rules for aws_socket_close() apply here. In this case it will not fail if you use the function * improperly, but on some platforms you will certainly leak memory. * * If the socket has already been closed, you can safely, call this from any thread. */ AWS_IO_API void aws_socket_clean_up(struct aws_socket *socket); /** * Connects to a remote endpoint. In UDP, this simply binds the socket to a remote address for use with * `aws_socket_write()`, and if the operation is successful, the socket can immediately be used for write operations. * * In TCP, LOCAL and VSOCK this function will not block. If the return value is successful, then you must wait on the * `on_connection_result()` callback to be invoked before using the socket. * * If an event_loop is provided for UDP sockets, a notification will be sent on * on_connection_result in the event-loop's thread. Upon completion, the socket will already be assigned * an event loop. If NULL is passed for UDP, it will immediately return upon success, but you must call * aws_socket_assign_to_event_loop before use. */ AWS_IO_API int aws_socket_connect( struct aws_socket *socket, const struct aws_socket_endpoint *remote_endpoint, struct aws_event_loop *event_loop, aws_socket_on_connection_result_fn *on_connection_result, void *user_data); /** * Binds the socket to a local address. In UDP mode, the socket is ready for `aws_socket_read()` operations. In * connection oriented modes, you still must call `aws_socket_listen()` and `aws_socket_start_accept()` before using the * socket. local_endpoint is copied. */ AWS_IO_API int aws_socket_bind(struct aws_socket *socket, const struct aws_socket_endpoint *local_endpoint); /** * Get the local address which the socket is bound to. * Raises an error if no address is bound. */ AWS_IO_API int aws_socket_get_bound_address(const struct aws_socket *socket, struct aws_socket_endpoint *out_address); /** * TCP, LOCAL and VSOCK only. Sets up the socket to listen on the address bound to in `aws_socket_bind()`. */ AWS_IO_API int aws_socket_listen(struct aws_socket *socket, int backlog_size); /** * TCP, LOCAL and VSOCK only. The socket will begin accepting new connections. This is an asynchronous operation. New * connections or errors will arrive via the `on_accept_result` callback. * * aws_socket_bind() and aws_socket_listen() must be called before calling this function. */ AWS_IO_API int aws_socket_start_accept( struct aws_socket *socket, struct aws_event_loop *accept_loop, aws_socket_on_accept_result_fn *on_accept_result, void *user_data); /** * TCP, LOCAL and VSOCK only. The listening socket will stop accepting new connections. * It is safe to call `aws_socket_start_accept()` again after * this operation. This can be called from any thread but be aware, * on some platforms, if you call this from outside of the current event loop's thread, it will block * until the event loop finishes processing the request for unsubscribe in it's own thread. */ AWS_IO_API int aws_socket_stop_accept(struct aws_socket *socket); /** * Calls `close()` on the socket and unregisters all io operations from the event loop. This function must be called * from the event-loop's thread unless this is a listening socket. If it's a listening socket it can be called from any * non-event-loop thread or the event-loop the socket is currently assigned to. If called from outside the event-loop, * this function will block waiting on the socket to close. If this is called from an event-loop thread other than * the one it's assigned to, it presents the possibility of a deadlock, so don't do it. */ AWS_IO_API int aws_socket_close(struct aws_socket *socket); /** * Calls `shutdown()` on the socket based on direction. */ AWS_IO_API int aws_socket_shutdown_dir(struct aws_socket *socket, enum aws_channel_direction dir); /** * Sets new socket options on the underlying socket. This is mainly useful in context of accepting a new connection via: * `on_incoming_connection()`. options is copied. */ AWS_IO_API int aws_socket_set_options(struct aws_socket *socket, const struct aws_socket_options *options); /** * Assigns the socket to the event-loop. The socket will begin receiving read/write/error notifications after this call. * * Note: If you called connect for TCP or Unix Domain Sockets and received a connection_success callback, this has * already happened. You only need to call this function when: * * a.) This socket is a server socket (e.g. a result of a call to start_accept()) * b.) This socket is a UDP socket. */ AWS_IO_API int aws_socket_assign_to_event_loop(struct aws_socket *socket, struct aws_event_loop *event_loop); /** * Gets the event-loop the socket is assigned to. */ AWS_IO_API struct aws_event_loop *aws_socket_get_event_loop(struct aws_socket *socket); /** * Subscribes on_readable to notifications when the socket goes readable (edge-triggered). Errors will also be recieved * in the callback. * * Note! This function is technically not thread safe, but we do not enforce which thread you call from. * It's your responsibility to either call this in safely (e.g. just don't call it in parallel from multiple threads) or * schedule a task to call it. If you call it before your first call to read, it will be fine. */ AWS_IO_API int aws_socket_subscribe_to_readable_events( struct aws_socket *socket, aws_socket_on_readable_fn *on_readable, void *user_data); /** * Reads from the socket. This call is non-blocking and will return `AWS_IO_SOCKET_READ_WOULD_BLOCK` if no data is * available. `read` is the amount of data read into `buffer`. * * Attempts to read enough to fill all remaining space in the buffer, from `buffer->len` to `buffer->capacity`. * `buffer->len` is updated to reflect the buffer's new length. * * * Use aws_socket_subscribe_to_readable_events() to receive notifications of when the socket goes readable. * * NOTE! This function must be called from the event-loop used in aws_socket_assign_to_event_loop */ AWS_IO_API int aws_socket_read(struct aws_socket *socket, struct aws_byte_buf *buffer, size_t *amount_read); /** * Writes to the socket. This call is non-blocking and will attempt to write as much as it can, but will queue any * remaining portion of the data for write when available. written_fn will be invoked once the entire cursor has been * written, or the write failed or was cancelled. * * NOTE! This function must be called from the event-loop used in aws_socket_assign_to_event_loop * * For client sockets, connect() and aws_socket_assign_to_event_loop() must be called before calling this. * * For incoming sockets from a listener, aws_socket_assign_to_event_loop() must be called first. */ AWS_IO_API int aws_socket_write( struct aws_socket *socket, const struct aws_byte_cursor *cursor, aws_socket_on_write_completed_fn *written_fn, void *user_data); /** * Gets the latest error from the socket. If no error has occurred AWS_OP_SUCCESS will be returned. This function does * not raise any errors to the installed error handlers. */ AWS_IO_API int aws_socket_get_error(struct aws_socket *socket); /** * Returns true if the socket is still open (doesn't mean connected or listening, only that it hasn't had close() * called. */ AWS_IO_API bool aws_socket_is_open(struct aws_socket *socket); /** * Raises AWS_IO_SOCKET_INVALID_ADDRESS and logs an error if connecting to this port is illegal. * For example, port must be in range 1-65535 to connect with IPv4. * These port values would fail eventually in aws_socket_connect(), * but you can use this function to validate earlier. */ AWS_IO_API int aws_socket_validate_port_for_connect(uint32_t port, enum aws_socket_domain domain); /** * Raises AWS_IO_SOCKET_INVALID_ADDRESS and logs an error if binding to this port is illegal. * For example, port must in range 0-65535 to bind with IPv4. * These port values would fail eventually in aws_socket_bind(), * but you can use this function to validate earlier. */ AWS_IO_API int aws_socket_validate_port_for_bind(uint32_t port, enum aws_socket_domain domain); /** * Assigns a random address (UUID) for use with AWS_SOCKET_LOCAL (Unix Domain Sockets). * For use in internal tests only. */ AWS_IO_API void aws_socket_endpoint_init_local_address_for_test(struct aws_socket_endpoint *endpoint); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_IO_SOCKET_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-io/include/aws/io/socket_channel_handler.h000066400000000000000000000021331456575232400273100ustar00rootroot00000000000000#ifndef AWS_IO_SOCKET_CHANNEL_HANDLER_H #define AWS_IO_SOCKET_CHANNEL_HANDLER_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include AWS_PUSH_SANE_WARNING_LEVEL struct aws_socket; struct aws_channel_handler; struct aws_channel_slot; struct aws_event_loop; AWS_EXTERN_C_BEGIN /** * Socket handlers should be the first slot/handler in a channel. It interacts directly with the channel's event loop * for read and write notifications. max_read_size is the maximum amount of data it will read from the socket * before a context switch (a continuation task will be scheduled). */ AWS_IO_API struct aws_channel_handler *aws_socket_handler_new( struct aws_allocator *allocator, struct aws_socket *socket, struct aws_channel_slot *slot, size_t max_read_size); /* Get aws_socket from socket channel handler */ AWS_IO_API const struct aws_socket *aws_socket_handler_get_socket(const struct aws_channel_handler *handler); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_IO_SOCKET_CHANNEL_HANDLER_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-io/include/aws/io/statistics.h000066400000000000000000000036771456575232400250430ustar00rootroot00000000000000#ifndef AWS_IO_STATISTICS_H #define AWS_IO_STATISTICS_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include AWS_PUSH_SANE_WARNING_LEVEL enum aws_crt_io_statistics_category { AWSCRT_STAT_CAT_SOCKET = AWS_CRT_STATISTICS_CATEGORY_BEGIN_RANGE(AWS_C_IO_PACKAGE_ID), AWSCRT_STAT_CAT_TLS, }; /** * Socket channel handler statistics record */ struct aws_crt_statistics_socket { aws_crt_statistics_category_t category; uint64_t bytes_read; uint64_t bytes_written; }; /** * Tls channel handler statistics record */ struct aws_crt_statistics_tls { aws_crt_statistics_category_t category; uint64_t handshake_start_ns; uint64_t handshake_end_ns; enum aws_tls_negotiation_status handshake_status; }; AWS_EXTERN_C_BEGIN /** * Initializes socket channel handler statistics */ AWS_IO_API int aws_crt_statistics_socket_init(struct aws_crt_statistics_socket *stats); /** * Cleans up socket channel handler statistics */ AWS_IO_API void aws_crt_statistics_socket_cleanup(struct aws_crt_statistics_socket *stats); /** * Resets socket channel handler statistics for the next gather interval. Calculate-once results are left alone. */ AWS_IO_API void aws_crt_statistics_socket_reset(struct aws_crt_statistics_socket *stats); /** * Initializes tls channel handler statistics */ AWS_IO_API int aws_crt_statistics_tls_init(struct aws_crt_statistics_tls *stats); /** * Cleans up tls channel handler statistics */ AWS_IO_API void aws_crt_statistics_tls_cleanup(struct aws_crt_statistics_tls *stats); /** * Resets tls channel handler statistics for the next gather interval. Calculate-once results are left alone. */ AWS_IO_API void aws_crt_statistics_tls_reset(struct aws_crt_statistics_tls *stats); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_IO_STATISTICS_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-io/include/aws/io/stream.h000066400000000000000000000110761456575232400241340ustar00rootroot00000000000000#ifndef AWS_IO_STREAM_H #define AWS_IO_STREAM_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include AWS_PUSH_SANE_WARNING_LEVEL struct aws_input_stream; struct aws_byte_buf; /* * For seek calls, where in the stream to seek from. * CUR support can come later * Intentionally mirror libc constants */ enum aws_stream_seek_basis { AWS_SSB_BEGIN = 0, AWS_SSB_END = 2 }; struct aws_stream_status { bool is_end_of_stream; bool is_valid; }; struct aws_input_stream_vtable { int (*seek)(struct aws_input_stream *stream, int64_t offset, enum aws_stream_seek_basis basis); /** * Stream as much data as will fit into the destination buffer and update its length. * The destination buffer's capacity MUST NOT be changed. * * Return AWS_OP_SUCCESS if the read is successful. * If AWS_OP_ERR is returned, the stream is assumed to be invalid and any data written to the buffer is ignored. * * If no more data is currently available, or the end of the stream has been reached, simply return AWS_OP_SUCCESS * without touching the destination buffer. */ int (*read)(struct aws_input_stream *stream, struct aws_byte_buf *dest); int (*get_status)(struct aws_input_stream *stream, struct aws_stream_status *status); int (*get_length)(struct aws_input_stream *stream, int64_t *out_length); /** * Optional. * If not set, the default aws_ref_count_acquire/release will be used. * Set for high level language binding that has its own refcounting implementation and needs to be kept alive from * C. * If set, ref_count member will not be used. */ void (*acquire)(struct aws_input_stream *stream); void (*release)(struct aws_input_stream *stream); }; /** * Base class for input streams. * Note: when you implement one input stream, the ref_count needs to be initialized to clean up the resource when * reaches to zero. */ struct aws_input_stream { /* point to the impl only set if needed. */ void *impl; const struct aws_input_stream_vtable *vtable; struct aws_ref_count ref_count; }; AWS_EXTERN_C_BEGIN /** * Increments the reference count on the input stream, allowing the caller to take a reference to it. * * Returns the same input stream passed in. */ AWS_IO_API struct aws_input_stream *aws_input_stream_acquire(struct aws_input_stream *stream); /** * Decrements a input stream's ref count. When the ref count drops to zero, the input stream will be destroyed. * * Returns NULL always. */ AWS_IO_API struct aws_input_stream *aws_input_stream_release(struct aws_input_stream *stream); /* * Seek to a position within a stream; analagous to fseek() and its relatives */ AWS_IO_API int aws_input_stream_seek(struct aws_input_stream *stream, int64_t offset, enum aws_stream_seek_basis basis); /* * Read data from a stream. If data is available, will read up to the (capacity - len) open bytes * in the destination buffer. If AWS_OP_ERR is returned, the destination buffer will be unchanged. */ AWS_IO_API int aws_input_stream_read(struct aws_input_stream *stream, struct aws_byte_buf *dest); /* * Queries miscellaneous properties of the stream */ AWS_IO_API int aws_input_stream_get_status(struct aws_input_stream *stream, struct aws_stream_status *status); /* * Returns the total stream length, if able, regardless of current stream position. Under certain conditions, * a valid stream may return an error instead when there is not a good answer (socket stream, for example). * */ AWS_IO_API int aws_input_stream_get_length(struct aws_input_stream *stream, int64_t *out_length); /* DEPRECATED * Tears down the stream. Equivalent to aws_input_stream_release() */ AWS_IO_API void aws_input_stream_destroy(struct aws_input_stream *stream); /* * Creates a stream that operates on a range of bytes */ AWS_IO_API struct aws_input_stream *aws_input_stream_new_from_cursor( struct aws_allocator *allocator, const struct aws_byte_cursor *cursor); /* * Creates a stream that operates on a (not-yet-opened) file. * Destruction closes the file. */ AWS_IO_API struct aws_input_stream *aws_input_stream_new_from_file( struct aws_allocator *allocator, const char *file_name); /* * Creates an input stream that reads from an already opened file. * Destruction does not close the file. */ AWS_IO_API struct aws_input_stream *aws_input_stream_new_from_open_file(struct aws_allocator *allocator, FILE *file); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_IO_STREAM_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-io/include/aws/io/tls_channel_handler.h000066400000000000000000001022571456575232400266320ustar00rootroot00000000000000#ifndef AWS_IO_TLS_CHANNEL_HANDLER_H #define AWS_IO_TLS_CHANNEL_HANDLER_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include AWS_PUSH_SANE_WARNING_LEVEL #define AWS_TLS_NEGOTIATED_PROTOCOL_MESSAGE 0x01 struct aws_channel_slot; struct aws_channel_handler; struct aws_pkcs11_session; struct aws_string; enum aws_tls_versions { AWS_IO_SSLv3, AWS_IO_TLSv1, AWS_IO_TLSv1_1, AWS_IO_TLSv1_2, AWS_IO_TLSv1_3, AWS_IO_TLS_VER_SYS_DEFAULTS = 128, }; enum aws_tls_cipher_pref { AWS_IO_TLS_CIPHER_PREF_SYSTEM_DEFAULT = 0, /* Deprecated */ AWS_IO_TLS_CIPHER_PREF_KMS_PQ_TLSv1_0_2019_06 = 1, /* Deprecated */ AWS_IO_TLS_CIPHER_PREF_KMS_PQ_SIKE_TLSv1_0_2019_11 = 2, /* Deprecated */ AWS_IO_TLS_CIPHER_PREF_KMS_PQ_TLSv1_0_2020_02 = 3, /* Deprecated */ AWS_IO_TLS_CIPHER_PREF_KMS_PQ_SIKE_TLSv1_0_2020_02 = 4, /* Deprecated */ AWS_IO_TLS_CIPHER_PREF_KMS_PQ_TLSv1_0_2020_07 = 5, /* * This TLS cipher preference list contains post-quantum key exchange algorithms that have been submitted to NIST * for potential future standardization. Support for this preference list, or PQ algorithms present in it, may be * removed at any time in the future. PQ algorithms in this preference list will be used in hybrid mode, and always * combined with a classical ECDHE key exchange. */ AWS_IO_TLS_CIPHER_PREF_PQ_TLSv1_0_2021_05 = 6, AWS_IO_TLS_CIPHER_PREF_END_RANGE = 0xFFFF }; /** * The hash algorithm of a TLS private key operation. Any custom private key operation handlers are expected to perform * operations on the input TLS data using the correct hash algorithm or fail the operation. */ enum aws_tls_hash_algorithm { AWS_TLS_HASH_UNKNOWN, AWS_TLS_HASH_SHA1, AWS_TLS_HASH_SHA224, AWS_TLS_HASH_SHA256, AWS_TLS_HASH_SHA384, AWS_TLS_HASH_SHA512, }; /** * The signature of a TLS private key operation. Any custom private key operation handlers are expected to perform * operations on the input TLS data using the correct signature algorithm or fail the operation. */ enum aws_tls_signature_algorithm { AWS_TLS_SIGNATURE_UNKNOWN, AWS_TLS_SIGNATURE_RSA, AWS_TLS_SIGNATURE_ECDSA, }; /** * The TLS private key operation that needs to be performed by a custom private key operation handler when making * a connection using mutual TLS. */ enum aws_tls_key_operation_type { AWS_TLS_KEY_OPERATION_UNKNOWN, AWS_TLS_KEY_OPERATION_SIGN, AWS_TLS_KEY_OPERATION_DECRYPT, }; struct aws_tls_ctx { struct aws_allocator *alloc; void *impl; struct aws_ref_count ref_count; }; /** * Invoked upon completion of the TLS handshake. If successful error_code will be AWS_OP_SUCCESS, otherwise * the negotiation failed and immediately after this function is invoked, the channel will be shutting down. */ typedef void(aws_tls_on_negotiation_result_fn)( struct aws_channel_handler *handler, struct aws_channel_slot *slot, int error_code, void *user_data); /** * Only used if the TLS handler is the last handler in the channel. This allows you to read any data that * was read and decrypted by the handler. If you have application protocol channel handlers, this function * is not necessary and certainly not recommended. */ typedef void(aws_tls_on_data_read_fn)( struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_byte_buf *buffer, void *user_data); /** * Invoked when an error occurs in the TLS state machine AFTER the handshake has completed. This function should only * be used in conjunction with the rules of aws_tls_on_data_read_fn. */ typedef void(aws_tls_on_error_fn)( struct aws_channel_handler *handler, struct aws_channel_slot *slot, int err, const char *message, void *user_data); struct aws_tls_connection_options { /** semi-colon delimited list of protocols. Example: * h2;http/1.1 */ struct aws_string *alpn_list; /** * Serves two purposes. If SNI is supported (hint... it is), * this sets the SNI extension. * * For X.509 validation this also sets the name that will be used * for verifying the subj alt name and common name of the peer's certificate. */ struct aws_string *server_name; aws_tls_on_negotiation_result_fn *on_negotiation_result; aws_tls_on_data_read_fn *on_data_read; aws_tls_on_error_fn *on_error; void *user_data; struct aws_tls_ctx *ctx; bool advertise_alpn_message; uint32_t timeout_ms; }; /** * A struct containing all of the data needed for a private key operation when * making a mutual TLS connection. This struct contains the data that needs * to be operated on, like performing a sign operation or a decrypt operation. */ struct aws_tls_key_operation; struct aws_tls_ctx_options { struct aws_allocator *allocator; /** * minimum tls version to use. If you just want us to use the * system defaults, you can set: AWS_IO_TLS_VER_SYS_DEFAULTS. This * has the added benefit of automatically picking up new TLS versions * as your OS or distribution adds support. */ enum aws_tls_versions minimum_tls_version; /** * The Cipher Preference List to use */ enum aws_tls_cipher_pref cipher_pref; /** * A PEM armored PKCS#7 collection of CAs you want to trust as a string. * Only use this if it's a CA not currently installed on your system. */ struct aws_byte_buf ca_file; /** * Only used on Unix systems using an openssl style trust API. * this is typically something like /etc/pki/tls/certs/" */ struct aws_string *ca_path; /** * Sets ctx wide alpn string. This is most useful for servers. * This is a semi-colon delimited list. example: * h2;http/1.1 */ struct aws_string *alpn_list; /** * A PEM armored PKCS#7 certificate as a string. * It is supported on every operating system. */ struct aws_byte_buf certificate; #ifdef _WIN32 /** The path to a system * installed certficate/private key pair. Example: * CurrentUser\\MY\\ */ const char *system_certificate_path; #endif /** * A PEM armored PKCS#7 private key as a string. * * On windows, this field should be NULL only if you are * using a system installed certficate. */ struct aws_byte_buf private_key; #ifdef __APPLE__ /** * Apple Only! * * On Apple OS you can also use a pkcs#12 for your certificate * and private key. This is the contents the certificate. */ struct aws_byte_buf pkcs12; /** * Password for the pkcs12 data in pkcs12. */ struct aws_byte_buf pkcs12_password; # if !defined(AWS_OS_IOS) /** * On Apple OS you can also use a custom keychain instead of * the default keychain of the account. */ struct aws_string *keychain_path; # endif #endif /** max tls fragment size. Default is the value of g_aws_channel_max_fragment_size. */ size_t max_fragment_size; /** * default is true for clients and false for servers. * You should not change this default for clients unless * you're testing and don't want to fool around with CA trust stores. * Before you release to production, you'll want to turn this back on * and add your custom CA to the aws_tls_ctx_options. * * If you set this in server mode, it enforces client authentication. */ bool verify_peer; /** * For use when adding BYO_CRYPTO implementations. You can set extra data in here for use with your TLS * implementation. */ void *ctx_options_extension; /** * Set if using custom private key operations. * See aws_custom_key_op_handler for more details * * Note: Custom key operations (and PKCS#11 integration) hasn't been tested with TLS 1.3, so don't use * cipher preferences that allow TLS 1.3. If this is set, we will always use non TLS 1.3 preferences. */ struct aws_custom_key_op_handler *custom_key_op_handler; }; struct aws_tls_negotiated_protocol_message { struct aws_byte_buf protocol; }; typedef struct aws_channel_handler *( *aws_tls_on_protocol_negotiated)(struct aws_channel_slot *new_slot, struct aws_byte_buf *protocol, void *user_data); /** * An enum for the current state of tls negotiation within a tls channel handler */ enum aws_tls_negotiation_status { AWS_TLS_NEGOTIATION_STATUS_NONE, AWS_TLS_NEGOTIATION_STATUS_ONGOING, AWS_TLS_NEGOTIATION_STATUS_SUCCESS, AWS_TLS_NEGOTIATION_STATUS_FAILURE }; #ifdef BYO_CRYPTO /** * Callback for creating a TLS handler. If you're using this you're using BYO_CRYPTO. This function should return * a fully implemented aws_channel_handler instance for TLS. Note: the aws_tls_options passed to your * aws_tls_handler_new_fn contains multiple callbacks. Namely: aws_tls_on_negotiation_result_fn. You are responsible for * invoking this function when TLs session negotiation has completed. */ typedef struct aws_channel_handler *(aws_tls_handler_new_fn)( struct aws_allocator *allocator, struct aws_tls_connection_options *options, struct aws_channel_slot *slot, void *user_data); /** * Invoked when it's time to start TLS negotiation. Note: the aws_tls_options passed to your aws_tls_handler_new_fn * contains multiple callbacks. Namely: aws_tls_on_negotiation_result_fn. You are responsible for invoking this function * when TLS session negotiation has completed. */ typedef int(aws_tls_client_handler_start_negotiation_fn)(struct aws_channel_handler *handler, void *user_data); struct aws_tls_byo_crypto_setup_options { aws_tls_handler_new_fn *new_handler_fn; /* ignored for server implementations, required for clients. */ aws_tls_client_handler_start_negotiation_fn *start_negotiation_fn; void *user_data; }; #endif /* BYO_CRYPTO */ AWS_EXTERN_C_BEGIN /******************************** tls options init stuff ***********************/ /** * Initializes options with default client options */ AWS_IO_API void aws_tls_ctx_options_init_default_client( struct aws_tls_ctx_options *options, struct aws_allocator *allocator); /** * Cleans up resources allocated by init_* functions */ AWS_IO_API void aws_tls_ctx_options_clean_up(struct aws_tls_ctx_options *options); /** * Initializes options for use with mutual tls in client mode. * cert_path and pkey_path are paths to files on disk. cert_path * and pkey_path are treated as PKCS#7 PEM armored. They are loaded * from disk and stored in buffers internally. * * NOTE: This is unsupported on iOS. */ AWS_IO_API int aws_tls_ctx_options_init_client_mtls_from_path( struct aws_tls_ctx_options *options, struct aws_allocator *allocator, const char *cert_path, const char *pkey_path); /** * Initializes options for use with mutual tls in client mode. * cert and pkey are copied. cert and pkey are treated as PKCS#7 PEM * armored. * * NOTE: This is unsupported on iOS. */ AWS_IO_API int aws_tls_ctx_options_init_client_mtls( struct aws_tls_ctx_options *options, struct aws_allocator *allocator, const struct aws_byte_cursor *cert, const struct aws_byte_cursor *pkey); /** * vtable for aws_custom_key_op_handler. */ struct aws_custom_key_op_handler_vtable { /** * Called when the a TLS handshake has an operation it needs the custom key operation handler to perform. * NOTE: You must call aws_tls_key_operation_complete() or aws_tls_key_operation_complete_with_error() * otherwise the TLS handshake will stall the TLS connection indefinitely and leak memory. */ void (*on_key_operation)(struct aws_custom_key_op_handler *key_op_handler, struct aws_tls_key_operation *operation); }; /** * The custom key operation that is used when performing a mutual TLS handshake. This can * be extended to provide custom private key operations, like PKCS11 or similar. */ struct aws_custom_key_op_handler { /** * A void* intended to be populated with a reference to whatever class is extending this class. For example, * if you have extended aws_custom_key_op_handler with a custom struct, you would put a pointer to this struct * to *impl so you can retrieve it back in the vtable functions. */ void *impl; /** * A vtable containing all of the functions the aws_custom_key_op_handler implements. Is intended to be extended. * NOTE: Use "aws_custom_key_op_handler_" to access vtable functions. */ const struct aws_custom_key_op_handler_vtable *vtable; /** * A reference count for handling memory usage. * Use aws_custom_key_op_handler_acquire and aws_custom_key_op_handler_release to increase/decrease count. */ struct aws_ref_count ref_count; }; /** * Increases the reference count for the passed-in aws_custom_key_op_handler and returns it. */ AWS_IO_API struct aws_custom_key_op_handler *aws_custom_key_op_handler_acquire( struct aws_custom_key_op_handler *key_op_handler); /** * Decreases the reference count for the passed-in aws_custom_key_op_handler and returns NULL. */ AWS_IO_API struct aws_custom_key_op_handler *aws_custom_key_op_handler_release( struct aws_custom_key_op_handler *key_op_handler); /** * Calls the on_key_operation vtable function. See aws_custom_key_op_handler_vtable for function details. */ AWS_IO_API void aws_custom_key_op_handler_perform_operation( struct aws_custom_key_op_handler *key_op_handler, struct aws_tls_key_operation *operation); /** * Initializes options for use with mutual TLS in client mode, * where private key operations are handled by custom code. * * Note: cert_file_contents will be copied into a new buffer after this * function is called, so you do not need to keep that data alive * after calling this function. * * @param options aws_tls_ctx_options to be initialized. * @param allocator Allocator to use. * @param custom Options for custom key operations. * @param cert_file_contents The contents of a certificate file. */ AWS_IO_API int aws_tls_ctx_options_init_client_mtls_with_custom_key_operations( struct aws_tls_ctx_options *options, struct aws_allocator *allocator, struct aws_custom_key_op_handler *custom, const struct aws_byte_cursor *cert_file_contents); /** * This struct exists as a graceful way to pass many arguments when * calling init-with-pkcs11 functions on aws_tls_ctx_options (this also makes * it easy to introduce optional arguments in the future). * Instances of this struct should only exist briefly on the stack. * * Instructions for binding this to high-level languages: * - Python: The members of this struct should be the keyword args to the init-with-pkcs11 functions. * - JavaScript: This should be an options map passed to init-with-pkcs11 functions. * - Java: This should be an options class passed to init-with-pkcs11 functions. * - C++: Same as Java * * Notes on integer types: * PKCS#11 uses `unsigned long` for IDs, handles, etc but we expose them as `uint64_t` in public APIs. * We do this because sizeof(long) is inconsistent across platform/arch/language * (ex: always 64bit in Java, always 32bit in C on Windows, matches CPU in C on Linux and Apple). * By using uint64_t in our public API, we can keep the careful bounds-checking all in one * place, instead of expecting each high-level language binding to get it just right. */ struct aws_tls_ctx_pkcs11_options { /** * The PKCS#11 library to use. * This field is required. */ struct aws_pkcs11_lib *pkcs11_lib; /** * User PIN, for logging into the PKCS#11 token (UTF-8). * Zero out to log into a token with a "protected authentication path". */ struct aws_byte_cursor user_pin; /** * ID of slot containing PKCS#11 token. * If set to NULL, the token will be chosen based on other criteria * (such as token label). */ const uint64_t *slot_id; /** * Label of PKCS#11 token to use. * If zeroed out, the token will be chosen based on other criteria * (such as slot ID). */ struct aws_byte_cursor token_label; /** * Label of private key object on PKCS#11 token (UTF-8). * If zeroed out, the private key will be chosen based on other criteria * (such as being the only available private key on the token). */ struct aws_byte_cursor private_key_object_label; /** * Certificate's file path on disk (UTF-8). * The certificate must be PEM formatted and UTF-8 encoded. * Zero out if passing in certificate by some other means (such as file contents). */ struct aws_byte_cursor cert_file_path; /** * Certificate's file contents (UTF-8). * The certificate must be PEM formatted and UTF-8 encoded. * Zero out if passing in certificate by some other means (such as file path). */ struct aws_byte_cursor cert_file_contents; }; /** * Initializes options for use with mutual TLS in client mode, * where a PKCS#11 library provides access to the private key. * * NOTE: This only works on Unix devices. * * @param options aws_tls_ctx_options to be initialized. * @param allocator Allocator to use. * @param pkcs11_options Options for using PKCS#11 (contents are copied) */ AWS_IO_API int aws_tls_ctx_options_init_client_mtls_with_pkcs11( struct aws_tls_ctx_options *options, struct aws_allocator *allocator, const struct aws_tls_ctx_pkcs11_options *pkcs11_options); /** * @Deprecated * * Sets a custom keychain path for storing the cert and pkey with mutual tls in client mode. * * NOTE: This only works on MacOS. */ AWS_IO_API int aws_tls_ctx_options_set_keychain_path( struct aws_tls_ctx_options *options, struct aws_byte_cursor *keychain_path_cursor); /** * Initializes options for use with in server mode. * cert_path and pkey_path are paths to files on disk. cert_path * and pkey_path are treated as PKCS#7 PEM armored. They are loaded * from disk and stored in buffers internally. */ AWS_IO_API int aws_tls_ctx_options_init_default_server_from_path( struct aws_tls_ctx_options *options, struct aws_allocator *allocator, const char *cert_path, const char *pkey_path); /** * Initializes options for use with in server mode. * cert and pkey are copied. cert and pkey are treated as PKCS#7 PEM * armored. */ AWS_IO_API int aws_tls_ctx_options_init_default_server( struct aws_tls_ctx_options *options, struct aws_allocator *allocator, struct aws_byte_cursor *cert, struct aws_byte_cursor *pkey); /** * Initializes options for use with mutual tls in client mode. * cert_reg_path is the path to a system * installed certficate/private key pair. Example: * CurrentUser\\MY\\ * * NOTE: This only works on Windows. */ AWS_IO_API int aws_tls_ctx_options_init_client_mtls_from_system_path( struct aws_tls_ctx_options *options, struct aws_allocator *allocator, const char *cert_reg_path); /** * Initializes options for use with server mode. * cert_reg_path is the path to a system * installed certficate/private key pair. Example: * CurrentUser\\MY\\ * * NOTE: This only works on Windows. */ AWS_IO_API int aws_tls_ctx_options_init_default_server_from_system_path( struct aws_tls_ctx_options *options, struct aws_allocator *allocator, const char *cert_reg_path); /** * Initializes options for use with mutual tls in client mode. * pkcs12_path is a path to a file on disk containing a pkcs#12 file. The file is loaded * into an internal buffer. pkcs_pwd is the corresponding password for the pkcs#12 file; it is copied. * * NOTE: This only works on Apple devices. */ AWS_IO_API int aws_tls_ctx_options_init_client_mtls_pkcs12_from_path( struct aws_tls_ctx_options *options, struct aws_allocator *allocator, const char *pkcs12_path, const struct aws_byte_cursor *pkcs_pwd); /** * Initializes options for use with mutual tls in client mode. * pkcs12 is a buffer containing a pkcs#12 certificate and private key; it is copied. * pkcs_pwd is the corresponding password for the pkcs#12 buffer; it is copied. * * NOTE: This only works on Apple devices. */ AWS_IO_API int aws_tls_ctx_options_init_client_mtls_pkcs12( struct aws_tls_ctx_options *options, struct aws_allocator *allocator, struct aws_byte_cursor *pkcs12, struct aws_byte_cursor *pkcs_pwd); /** * Initializes options for use in server mode. * pkcs12_path is a path to a file on disk containing a pkcs#12 file. The file is loaded * into an internal buffer. pkcs_pwd is the corresponding password for the pkcs#12 file; it is copied. * * NOTE: This only works on Apple devices. */ AWS_IO_API int aws_tls_ctx_options_init_server_pkcs12_from_path( struct aws_tls_ctx_options *options, struct aws_allocator *allocator, const char *pkcs12_path, struct aws_byte_cursor *pkcs_password); /** * Initializes options for use in server mode. * pkcs12 is a buffer containing a pkcs#12 certificate and private key; it is copied. * pkcs_pwd is the corresponding password for the pkcs#12 buffer; it is copied. * * NOTE: This only works on Apple devices. */ AWS_IO_API int aws_tls_ctx_options_init_server_pkcs12( struct aws_tls_ctx_options *options, struct aws_allocator *allocator, struct aws_byte_cursor *pkcs12, struct aws_byte_cursor *pkcs_password); /** * Sets alpn list in the form . A maximum of 4 protocols are supported. * alpn_list is copied. */ AWS_IO_API int aws_tls_ctx_options_set_alpn_list(struct aws_tls_ctx_options *options, const char *alpn_list); /** * Enables or disables x.509 validation. Disable this only for testing. To enable mutual TLS in server mode, * set verify_peer to true. */ AWS_IO_API void aws_tls_ctx_options_set_verify_peer(struct aws_tls_ctx_options *options, bool verify_peer); /** * Sets preferred TLS Cipher List */ AWS_IO_API void aws_tls_ctx_options_set_tls_cipher_preference( struct aws_tls_ctx_options *options, enum aws_tls_cipher_pref cipher_pref); /** * Sets the minimum TLS version to allow. */ AWS_IO_API void aws_tls_ctx_options_set_minimum_tls_version( struct aws_tls_ctx_options *options, enum aws_tls_versions minimum_tls_version); /** * Override the default trust store. ca_file is a buffer containing a PEM armored chain of trusted CA certificates. * ca_file is copied. */ AWS_IO_API int aws_tls_ctx_options_override_default_trust_store( struct aws_tls_ctx_options *options, const struct aws_byte_cursor *ca_file); /** * Override the default trust store. ca_path is a path to a directory on disk containing trusted certificates. This is * only supported on Unix systems (otherwise this parameter is ignored). ca_file is a path to a file on disk containing * trusted certificates. ca_file is loaded from disk and stored in an internal buffer. */ AWS_IO_API int aws_tls_ctx_options_override_default_trust_store_from_path( struct aws_tls_ctx_options *options, const char *ca_path, const char *ca_file); /** * When implementing BYO_CRYPTO, if you need extra data to pass to your tls implementation, set it here. The lifetime of * extension_data must outlive the options object and be cleaned up after options is cleaned up. */ AWS_IO_API void aws_tls_ctx_options_set_extension_data(struct aws_tls_ctx_options *options, void *extension_data); /** * Initializes default connection options from an instance ot aws_tls_ctx. */ AWS_IO_API void aws_tls_connection_options_init_from_ctx( struct aws_tls_connection_options *conn_options, struct aws_tls_ctx *ctx); /** * Cleans up resources in aws_tls_connection_options. This can be called immediately after initializing * a tls handler, or if using the bootstrap api, immediately after asking for a channel. */ AWS_IO_API void aws_tls_connection_options_clean_up(struct aws_tls_connection_options *connection_options); /** * Copies 'from' to 'to' */ AWS_IO_API int aws_tls_connection_options_copy( struct aws_tls_connection_options *to, const struct aws_tls_connection_options *from); /** * Sets callbacks for use with a tls connection. */ AWS_IO_API void aws_tls_connection_options_set_callbacks( struct aws_tls_connection_options *conn_options, aws_tls_on_negotiation_result_fn *on_negotiation_result, aws_tls_on_data_read_fn *on_data_read, aws_tls_on_error_fn *on_error, void *user_data); /** * Sets server name to use for the SNI extension (supported everywhere), as well as x.509 validation. If you don't * set this, your x.509 validation will likely fail. */ AWS_IO_API int aws_tls_connection_options_set_server_name( struct aws_tls_connection_options *conn_options, struct aws_allocator *allocator, const struct aws_byte_cursor *server_name); /** * Sets alpn list in the form . A maximum of 4 protocols are supported. * alpn_list is copied. This value is already inherited from aws_tls_ctx, but the aws_tls_ctx is expensive, * and should be used across as many connections as possible. If you want to set this per connection, set it here. */ AWS_IO_API int aws_tls_connection_options_set_alpn_list( struct aws_tls_connection_options *conn_options, struct aws_allocator *allocator, const char *alpn_list); /********************************* TLS context and state management *********************************/ /** * Returns true if alpn is available in the underlying tls implementation. * This function should always be called before setting an alpn list. */ AWS_IO_API bool aws_tls_is_alpn_available(void); /** * Returns true if this Cipher Preference is available in the underlying TLS implementation. * This function should always be called before setting a Cipher Preference */ AWS_IO_API bool aws_tls_is_cipher_pref_supported(enum aws_tls_cipher_pref cipher_pref); /** * Creates a new tls channel handler in client mode. Options will be copied. * You must call aws_tls_client_handler_start_negotiation and wait on the * aws_tls_on_negotiation_result_fn callback before the handler can begin processing * application data. */ AWS_IO_API struct aws_channel_handler *aws_tls_client_handler_new( struct aws_allocator *allocator, struct aws_tls_connection_options *options, struct aws_channel_slot *slot); /** * Creates a new tls channel handler in server mode. Options will be copied. * You must wait on the aws_tls_on_negotiation_result_fn callback before the handler can begin processing * application data. */ AWS_IO_API struct aws_channel_handler *aws_tls_server_handler_new( struct aws_allocator *allocator, struct aws_tls_connection_options *options, struct aws_channel_slot *slot); #ifdef BYO_CRYPTO /** * If using BYO_CRYPTO, you need to call this function prior to creating any client channels in the application. */ AWS_IO_API void aws_tls_byo_crypto_set_client_setup_options(const struct aws_tls_byo_crypto_setup_options *options); /** * If using BYO_CRYPTO, you need to call this function prior to creating any server channels in the application. */ AWS_IO_API void aws_tls_byo_crypto_set_server_setup_options(const struct aws_tls_byo_crypto_setup_options *options); #endif /* BYO_CRYPTO */ /** * Creates a channel handler, for client or server mode, that handles alpn. This isn't necessarily required * since you can always call aws_tls_handler_protocol in the aws_tls_on_negotiation_result_fn callback, but * this makes channel bootstrap easier to handle. */ AWS_IO_API struct aws_channel_handler *aws_tls_alpn_handler_new( struct aws_allocator *allocator, aws_tls_on_protocol_negotiated on_protocol_negotiated, void *user_data); /** * Kicks off the negotiation process. This function must be called when in client mode to initiate the * TLS handshake. Once the handshake has completed the aws_tls_on_negotiation_result_fn will be invoked. */ AWS_IO_API int aws_tls_client_handler_start_negotiation(struct aws_channel_handler *handler); #ifndef BYO_CRYPTO /** * Creates a new server ctx. This ctx can be used for the lifetime of the application assuming you want the same * options for every incoming connection. Options will be copied. */ AWS_IO_API struct aws_tls_ctx *aws_tls_server_ctx_new( struct aws_allocator *alloc, const struct aws_tls_ctx_options *options); /** * Creates a new client ctx. This ctx can be used for the lifetime of the application assuming you want the same * options for every outgoing connection. Options will be copied. */ AWS_IO_API struct aws_tls_ctx *aws_tls_client_ctx_new( struct aws_allocator *alloc, const struct aws_tls_ctx_options *options); #endif /* BYO_CRYPTO */ /** * Increments the reference count on the tls context, allowing the caller to take a reference to it. * * Returns the same tls context passed in. */ AWS_IO_API struct aws_tls_ctx *aws_tls_ctx_acquire(struct aws_tls_ctx *ctx); /** * Decrements a tls context's ref count. When the ref count drops to zero, the object will be destroyed. */ AWS_IO_API void aws_tls_ctx_release(struct aws_tls_ctx *ctx); /** * Not necessary if you are installing more handlers into the channel, but if you just want to have TLS for arbitrary * data and use the channel handler directly, this function allows you to write data to the channel and have it * encrypted. */ AWS_IO_API int aws_tls_handler_write( struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_byte_buf *buf, aws_channel_on_message_write_completed_fn *on_write_completed, void *completion_user_data); /** * Returns a byte buffer by copy of the negotiated protocols. If there is no agreed upon protocol, len will be 0 and * buffer will be NULL. */ AWS_IO_API struct aws_byte_buf aws_tls_handler_protocol(struct aws_channel_handler *handler); /** * Client mode only. This is the server name that was used for SNI and host name validation. */ AWS_IO_API struct aws_byte_buf aws_tls_handler_server_name(struct aws_channel_handler *handler); /**************************** TLS KEY OPERATION *******************************/ /* Note: Currently this assumes the user knows what key is being used for key/cert pairs but s2n supports multiple cert/key pairs. This functionality is not used in the CRT currently, but in the future, we may need to implement this */ /** * Complete a successful TLS private key operation by providing its output. * The output is copied into the TLS connection. * The operation is freed by this call. * * You MUST call this or aws_tls_key_operation_complete_with_error(). * Failure to do so will stall the TLS connection indefinitely and leak memory. */ AWS_IO_API void aws_tls_key_operation_complete(struct aws_tls_key_operation *operation, struct aws_byte_cursor output); /** * Complete an failed TLS private key operation. * The TLS connection will fail. * The operation is freed by this call. * * You MUST call this or aws_tls_key_operation_complete(). * Failure to do so will stall the TLS connection indefinitely and leak memory. */ AWS_IO_API void aws_tls_key_operation_complete_with_error(struct aws_tls_key_operation *operation, int error_code); /** * Returns the input data that needs to be operated on by the custom key operation. */ AWS_IO_API struct aws_byte_cursor aws_tls_key_operation_get_input(const struct aws_tls_key_operation *operation); /** * Returns the type of operation that needs to be performed by the custom key operation. * If the implementation cannot perform the operation, * use aws_tls_key_operation_complete_with_error() to preventing stalling the TLS connection. */ AWS_IO_API enum aws_tls_key_operation_type aws_tls_key_operation_get_type(const struct aws_tls_key_operation *operation); /** * Returns the algorithm the operation is expected to be operated with. * If the implementation does not support the signature algorithm, * use aws_tls_key_operation_complete_with_error() to preventing stalling the TLS connection. */ AWS_IO_API enum aws_tls_signature_algorithm aws_tls_key_operation_get_signature_algorithm( const struct aws_tls_key_operation *operation); /** * Returns the algorithm the operation digest is signed with. * If the implementation does not support the digest algorithm, * use aws_tls_key_operation_complete_with_error() to preventing stalling the TLS connection. */ AWS_IO_API enum aws_tls_hash_algorithm aws_tls_key_operation_get_digest_algorithm(const struct aws_tls_key_operation *operation); /********************************* Misc TLS related *********************************/ /* * Injects a tls handler/slot into a channel and begins tls negotiation. * If desired, ALPN must be handled separately * * right_of_slot must be an existing slot in a channel */ AWS_IO_API int aws_channel_setup_client_tls( struct aws_channel_slot *right_of_slot, struct aws_tls_connection_options *tls_options); /** * Given enum, return string like: AWS_TLS_HASH_SHA256 -> "SHA256" */ AWS_IO_API const char *aws_tls_hash_algorithm_str(enum aws_tls_hash_algorithm hash); /** * Given enum, return string like: AWS_TLS_SIGNATURE_RSA -> "RSA" */ AWS_IO_API const char *aws_tls_signature_algorithm_str(enum aws_tls_signature_algorithm signature); /** * Given enum, return string like: AWS_TLS_SIGNATURE_RSA -> "RSA" */ AWS_IO_API const char *aws_tls_key_operation_type_str(enum aws_tls_key_operation_type operation_type); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_IO_TLS_CHANNEL_HANDLER_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-io/include/aws/io/uri.h000066400000000000000000000003571456575232400234400ustar00rootroot00000000000000#ifndef AWS_IO_URI_H #define AWS_IO_URI_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #endif /* AWS_IO_URI_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-io/include/aws/testing/000077500000000000000000000000001456575232400235315ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-io/include/aws/testing/async_stream_tester.h000066400000000000000000000236521456575232400277700ustar00rootroot00000000000000#ifndef AWS_TESTING_ASYNC_STREAM_TESTER_H #define AWS_TESTING_ASYNC_STREAM_TESTER_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #ifndef AWS_UNSTABLE_TESTING_API # error This code is designed for use by AWS owned libraries for the AWS C99 SDK. \ You are welcome to use it, but we make no promises on the stability of this API. \ To enable use of this code, set the AWS_UNSTABLE_TESTING_API compiler flag. #endif /** * Use aws_async_input_stream_tester to test edge cases in systems that take async streams. * You can customize its behavior (e.g. fail on 3rd read, always complete async, always complete synchronously, etc) */ enum aws_async_read_completion_strategy { /* the tester has its own thread, and reads always complete from there */ AWS_ASYNC_READ_COMPLETES_ON_ANOTHER_THREAD, /* reads complete before read() even returns */ AWS_ASYNC_READ_COMPLETES_IMMEDIATELY, /* sometimes reads complete immediately, sometimes they complete on another thread */ AWS_ASYNC_READ_COMPLETES_ON_RANDOM_THREAD, }; struct aws_async_input_stream_tester_options { /* the async tester uses the synchronous tester under the hood, * so here are those options */ struct aws_input_stream_tester_options base; enum aws_async_read_completion_strategy completion_strategy; /* if non-zero, a read will take at least this long to complete */ uint64_t read_duration_ns; }; struct aws_async_input_stream_tester { struct aws_async_input_stream base; struct aws_allocator *alloc; struct aws_async_input_stream_tester_options options; struct aws_input_stream *source_stream; struct aws_thread thread; struct { struct aws_mutex lock; struct aws_condition_variable cvar; /* when thread should perform a read, these are set */ struct aws_byte_buf *read_dest; struct aws_future_bool *read_future; /* if true, thread should shut down */ bool do_shutdown; } synced_data; struct aws_atomic_var num_outstanding_reads; }; static inline void s_async_input_stream_tester_do_actual_read( struct aws_async_input_stream_tester *impl, struct aws_byte_buf *dest, struct aws_future_bool *read_future) { int error_code = 0; /* delay, if that's how we're configured */ if (impl->options.read_duration_ns != 0) { aws_thread_current_sleep(impl->options.read_duration_ns); } /* Keep calling read() until we get some data, or hit EOF. * We do this because the synchronous aws_input_stream API allows * 0 byte reads, but the aws_async_input_stream API does not. */ size_t prev_len = dest->len; struct aws_stream_status status = {.is_end_of_stream = false, .is_valid = true}; while ((dest->len == prev_len) && !status.is_end_of_stream) { /* read from stream */ if (aws_input_stream_read(impl->source_stream, dest) != AWS_OP_SUCCESS) { error_code = aws_last_error(); goto done; } /* check if stream is done */ if (aws_input_stream_get_status(impl->source_stream, &status) != AWS_OP_SUCCESS) { error_code = aws_last_error(); goto done; } } done: aws_atomic_fetch_sub(&impl->num_outstanding_reads, 1); if (error_code != 0) { aws_future_bool_set_error(read_future, error_code); } else { aws_future_bool_set_result(read_future, status.is_end_of_stream); } aws_future_bool_release(read_future); } static inline struct aws_future_bool *s_async_input_stream_tester_read( struct aws_async_input_stream *stream, struct aws_byte_buf *dest) { struct aws_async_input_stream_tester *impl = (struct aws_async_input_stream_tester *)stream->impl; size_t prev_outstanding_reads = aws_atomic_fetch_add(&impl->num_outstanding_reads, 1); AWS_FATAL_ASSERT(prev_outstanding_reads == 0 && "Overlapping read() calls are forbidden"); struct aws_future_bool *read_future = aws_future_bool_new(stream->alloc); bool do_on_thread = false; switch (impl->options.completion_strategy) { case AWS_ASYNC_READ_COMPLETES_ON_ANOTHER_THREAD: do_on_thread = true; break; case AWS_ASYNC_READ_COMPLETES_IMMEDIATELY: do_on_thread = false; break; case AWS_ASYNC_READ_COMPLETES_ON_RANDOM_THREAD: do_on_thread = (rand() % 2 == 0); break; } if (do_on_thread) { /* BEGIN CRITICAL SECTION */ aws_mutex_lock(&impl->synced_data.lock); impl->synced_data.read_dest = dest; impl->synced_data.read_future = aws_future_bool_acquire(read_future); AWS_FATAL_ASSERT(aws_condition_variable_notify_all(&impl->synced_data.cvar) == AWS_OP_SUCCESS); aws_mutex_unlock(&impl->synced_data.lock); /* END CRITICAL SECTION */ } else { /* acquire additional refcount on future, since we call release once it's complete */ aws_future_bool_acquire(read_future); s_async_input_stream_tester_do_actual_read(impl, dest, read_future); } return read_future; } static inline void s_async_input_stream_tester_do_actual_destroy(struct aws_async_input_stream_tester *impl) { if (impl->options.completion_strategy != AWS_ASYNC_READ_COMPLETES_IMMEDIATELY) { aws_condition_variable_clean_up(&impl->synced_data.cvar); aws_mutex_clean_up(&impl->synced_data.lock); } aws_input_stream_release(impl->source_stream); aws_mem_release(impl->base.alloc, impl); } /* refcount has reached zero */ static inline void s_async_input_stream_tester_destroy(struct aws_async_input_stream *async_stream) { struct aws_async_input_stream_tester *impl = (struct aws_async_input_stream_tester *)async_stream->impl; if (impl->options.completion_strategy == AWS_ASYNC_READ_COMPLETES_IMMEDIATELY) { s_async_input_stream_tester_do_actual_destroy(impl); } else { /* signal thread to finish cleaning things up */ /* BEGIN CRITICAL SECTION */ aws_mutex_lock(&impl->synced_data.lock); impl->synced_data.do_shutdown = true; AWS_FATAL_ASSERT(aws_condition_variable_notify_all(&impl->synced_data.cvar) == AWS_OP_SUCCESS); aws_mutex_unlock(&impl->synced_data.lock); /* END CRITICAL SECTION */ } } static inline bool s_async_input_stream_tester_thread_pred(void *arg) { struct aws_async_input_stream_tester *impl = (struct aws_async_input_stream_tester *)arg; return impl->synced_data.do_shutdown || (impl->synced_data.read_dest != NULL); } static inline void s_async_input_stream_tester_thread(void *arg) { struct aws_async_input_stream_tester *impl = (struct aws_async_input_stream_tester *)arg; bool do_shutdown = false; struct aws_byte_buf *read_dest = NULL; struct aws_future_bool *read_future = NULL; while (!do_shutdown) { /* BEGIN CRITICAL SECTION */ aws_mutex_lock(&impl->synced_data.lock); AWS_FATAL_ASSERT( aws_condition_variable_wait_pred( &impl->synced_data.cvar, &impl->synced_data.lock, s_async_input_stream_tester_thread_pred, impl) == AWS_OP_SUCCESS); /* acquire work */ do_shutdown = impl->synced_data.do_shutdown; read_dest = impl->synced_data.read_dest; impl->synced_data.read_dest = NULL; read_future = impl->synced_data.read_future; impl->synced_data.read_future = NULL; aws_mutex_unlock(&impl->synced_data.lock); /* END CRITICAL SECTION */ if (read_dest != NULL) { s_async_input_stream_tester_do_actual_read(impl, read_dest, read_future); } } /* thread has shut down, finish destruction */ s_async_input_stream_tester_do_actual_destroy(impl); } static inline uint64_t aws_async_input_stream_tester_total_bytes_read( const struct aws_async_input_stream *async_stream) { const struct aws_async_input_stream_tester *async_impl = (const struct aws_async_input_stream_tester *)async_stream->impl; const struct aws_input_stream_tester *synchronous_impl = (const struct aws_input_stream_tester *)async_impl->source_stream->impl; return synchronous_impl->total_bytes_read; } static struct aws_async_input_stream_vtable s_async_input_stream_tester_vtable = { .destroy = s_async_input_stream_tester_destroy, .read = s_async_input_stream_tester_read, }; static inline struct aws_async_input_stream *aws_async_input_stream_new_tester( struct aws_allocator *alloc, const struct aws_async_input_stream_tester_options *options) { struct aws_async_input_stream_tester *impl = (struct aws_async_input_stream_tester *)aws_mem_calloc(alloc, 1, sizeof(struct aws_async_input_stream_tester)); aws_async_input_stream_init_base(&impl->base, alloc, &s_async_input_stream_tester_vtable, impl); impl->options = *options; aws_atomic_init_int(&impl->num_outstanding_reads, 0); impl->source_stream = aws_input_stream_new_tester(alloc, &options->base); AWS_FATAL_ASSERT(impl->source_stream); if (options->completion_strategy != AWS_ASYNC_READ_COMPLETES_IMMEDIATELY) { aws_mutex_init(&impl->synced_data.lock); aws_condition_variable_init(&impl->synced_data.cvar); AWS_FATAL_ASSERT(aws_thread_init(&impl->thread, alloc) == AWS_OP_SUCCESS); struct aws_thread_options thread_options = *aws_default_thread_options(); thread_options.name = aws_byte_cursor_from_c_str("AsyncStream"); thread_options.join_strategy = AWS_TJS_MANAGED; AWS_FATAL_ASSERT( aws_thread_launch(&impl->thread, s_async_input_stream_tester_thread, impl, &thread_options) == AWS_OP_SUCCESS); } return &impl->base; } #endif /* AWS_TESTING_ASYNC_STREAM_TESTER_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-io/include/aws/testing/io_testing_channel.h000066400000000000000000000633411456575232400275450ustar00rootroot00000000000000#ifndef AWS_TESTING_IO_TESTING_CHANNEL_H #define AWS_TESTING_IO_TESTING_CHANNEL_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include struct testing_loop { struct aws_task_scheduler scheduler; bool mock_on_callers_thread; }; static int s_testing_loop_run(struct aws_event_loop *event_loop) { (void)event_loop; return AWS_OP_SUCCESS; } static int s_testing_loop_stop(struct aws_event_loop *event_loop) { (void)event_loop; return AWS_OP_SUCCESS; } static int s_testing_loop_wait_for_stop_completion(struct aws_event_loop *event_loop) { (void)event_loop; return AWS_OP_SUCCESS; } static void s_testing_loop_schedule_task_now(struct aws_event_loop *event_loop, struct aws_task *task) { struct testing_loop *testing_loop = event_loop->impl_data; aws_task_scheduler_schedule_now(&testing_loop->scheduler, task); } static void s_testing_loop_schedule_task_future( struct aws_event_loop *event_loop, struct aws_task *task, uint64_t run_at_nanos) { struct testing_loop *testing_loop = event_loop->impl_data; aws_task_scheduler_schedule_future(&testing_loop->scheduler, task, run_at_nanos); } static void s_testing_loop_cancel_task(struct aws_event_loop *event_loop, struct aws_task *task) { struct testing_loop *testing_loop = event_loop->impl_data; aws_task_scheduler_cancel_task(&testing_loop->scheduler, task); } static bool s_testing_loop_is_on_callers_thread(struct aws_event_loop *event_loop) { struct testing_loop *testing_loop = event_loop->impl_data; return testing_loop->mock_on_callers_thread; } static void s_testing_loop_destroy(struct aws_event_loop *event_loop) { struct testing_loop *testing_loop = event_loop->impl_data; aws_task_scheduler_clean_up(&testing_loop->scheduler); aws_mem_release(event_loop->alloc, testing_loop); aws_event_loop_clean_up_base(event_loop); aws_mem_release(event_loop->alloc, event_loop); } static struct aws_event_loop_vtable s_testing_loop_vtable = { .destroy = s_testing_loop_destroy, .is_on_callers_thread = s_testing_loop_is_on_callers_thread, .run = s_testing_loop_run, .schedule_task_now = s_testing_loop_schedule_task_now, .schedule_task_future = s_testing_loop_schedule_task_future, .cancel_task = s_testing_loop_cancel_task, .stop = s_testing_loop_stop, .wait_for_stop_completion = s_testing_loop_wait_for_stop_completion, }; static struct aws_event_loop *s_testing_loop_new(struct aws_allocator *allocator, aws_io_clock_fn clock) { struct aws_event_loop *event_loop = aws_mem_acquire(allocator, sizeof(struct aws_event_loop)); aws_event_loop_init_base(event_loop, allocator, clock); struct testing_loop *testing_loop = aws_mem_calloc(allocator, 1, sizeof(struct testing_loop)); aws_task_scheduler_init(&testing_loop->scheduler, allocator); testing_loop->mock_on_callers_thread = true; event_loop->impl_data = testing_loop; event_loop->vtable = &s_testing_loop_vtable; return event_loop; } typedef void(testing_channel_handler_on_shutdown_fn)( enum aws_channel_direction dir, int error_code, bool free_scarce_resources_immediately, void *user_data); struct testing_channel_handler { struct aws_linked_list messages; size_t latest_window_update; size_t initial_window; bool complete_write_immediately; int complete_write_error_code; testing_channel_handler_on_shutdown_fn *on_shutdown; void *on_shutdown_user_data; struct aws_crt_statistics_socket stats; }; static int s_testing_channel_handler_process_read_message( struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_io_message *message) { (void)handler; (void)slot; (void)message; struct testing_channel_handler *testing_handler = handler->impl; aws_linked_list_push_back(&testing_handler->messages, &message->queueing_handle); return AWS_OP_SUCCESS; } static int s_testing_channel_handler_process_write_message( struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_io_message *message) { (void)slot; struct testing_channel_handler *testing_handler = handler->impl; aws_linked_list_push_back(&testing_handler->messages, &message->queueing_handle); /* Invoke completion callback if this is the left-most handler */ if (message->on_completion && !slot->adj_left && testing_handler->complete_write_immediately) { message->on_completion(slot->channel, message, testing_handler->complete_write_error_code, message->user_data); message->on_completion = NULL; } return AWS_OP_SUCCESS; } static int s_testing_channel_handler_increment_read_window( struct aws_channel_handler *handler, struct aws_channel_slot *slot, size_t size) { (void)slot; struct testing_channel_handler *testing_handler = handler->impl; testing_handler->latest_window_update = size; return AWS_OP_SUCCESS; } static int s_testing_channel_handler_shutdown( struct aws_channel_handler *handler, struct aws_channel_slot *slot, enum aws_channel_direction dir, int error_code, bool free_scarce_resources_immediately) { struct testing_channel_handler *testing_handler = handler->impl; /* If user has registered a callback, invoke it */ if (testing_handler->on_shutdown) { testing_handler->on_shutdown( dir, error_code, free_scarce_resources_immediately, testing_handler->on_shutdown_user_data); } if (dir == AWS_CHANNEL_DIR_WRITE) { if (!slot->adj_left) { /* Invoke the on_completion callbacks for any queued messages */ struct aws_linked_list_node *node = aws_linked_list_begin(&testing_handler->messages); while (node != aws_linked_list_end(&testing_handler->messages)) { struct aws_io_message *msg = AWS_CONTAINER_OF(node, struct aws_io_message, queueing_handle); if (msg->on_completion) { msg->on_completion(slot->channel, msg, AWS_IO_SOCKET_CLOSED, msg->user_data); msg->on_completion = NULL; } node = aws_linked_list_next(node); } } } return aws_channel_slot_on_handler_shutdown_complete(slot, dir, error_code, free_scarce_resources_immediately); } static size_t s_testing_channel_handler_initial_window_size(struct aws_channel_handler *handler) { struct testing_channel_handler *testing_handler = handler->impl; return testing_handler->initial_window; } static size_t s_testing_channel_handler_message_overhead(struct aws_channel_handler *handler) { (void)handler; return 0; } static void s_testing_channel_handler_destroy(struct aws_channel_handler *handler) { struct testing_channel_handler *testing_handler = handler->impl; while (!aws_linked_list_empty(&testing_handler->messages)) { struct aws_linked_list_node *node = aws_linked_list_pop_front(&testing_handler->messages); struct aws_io_message *msg = AWS_CONTAINER_OF(node, struct aws_io_message, queueing_handle); aws_mem_release(msg->allocator, msg); } aws_mem_release(handler->alloc, testing_handler); aws_mem_release(handler->alloc, handler); } static void s_testing_channel_handler_reset_statistics(struct aws_channel_handler *handler) { struct testing_channel_handler *testing_handler = handler->impl; aws_crt_statistics_socket_reset(&testing_handler->stats); } static void s_testing_channel_handler_gather_statistics( struct aws_channel_handler *handler, struct aws_array_list *stats) { struct testing_channel_handler *testing_handler = handler->impl; void *stats_base = &testing_handler->stats; aws_array_list_push_back(stats, &stats_base); } static struct aws_channel_handler_vtable s_testing_channel_handler_vtable = { .process_read_message = s_testing_channel_handler_process_read_message, .process_write_message = s_testing_channel_handler_process_write_message, .increment_read_window = s_testing_channel_handler_increment_read_window, .shutdown = s_testing_channel_handler_shutdown, .initial_window_size = s_testing_channel_handler_initial_window_size, .message_overhead = s_testing_channel_handler_message_overhead, .destroy = s_testing_channel_handler_destroy, .gather_statistics = s_testing_channel_handler_gather_statistics, .reset_statistics = s_testing_channel_handler_reset_statistics, }; static struct aws_channel_handler *s_new_testing_channel_handler( struct aws_allocator *allocator, size_t initial_window) { struct aws_channel_handler *handler = aws_mem_calloc(allocator, 1, sizeof(struct aws_channel_handler)); struct testing_channel_handler *testing_handler = aws_mem_calloc(allocator, 1, sizeof(struct testing_channel_handler)); aws_linked_list_init(&testing_handler->messages); testing_handler->initial_window = initial_window; testing_handler->latest_window_update = 0; testing_handler->complete_write_immediately = true; testing_handler->complete_write_error_code = AWS_ERROR_SUCCESS; handler->impl = testing_handler; handler->vtable = &s_testing_channel_handler_vtable; handler->alloc = allocator; return handler; } struct testing_channel { struct aws_event_loop *loop; struct testing_loop *loop_impl; struct aws_channel *channel; struct testing_channel_handler *left_handler_impl; struct testing_channel_handler *right_handler_impl; struct aws_channel_slot *left_handler_slot; struct aws_channel_slot *right_handler_slot; void (*channel_shutdown)(int error_code, void *user_data); void *channel_shutdown_user_data; bool channel_setup_completed; bool channel_shutdown_completed; int channel_shutdown_error_code; }; static void s_testing_channel_on_setup_completed(struct aws_channel *channel, int error_code, void *user_data) { (void)channel; (void)error_code; struct testing_channel *testing = user_data; testing->channel_setup_completed = true; } static void s_testing_channel_on_shutdown_completed(struct aws_channel *channel, int error_code, void *user_data) { (void)channel; (void)error_code; struct testing_channel *testing = user_data; testing->channel_shutdown_completed = true; testing->channel_shutdown_error_code = error_code; if (testing->channel_shutdown) { testing->channel_shutdown(error_code, testing->channel_shutdown_user_data); } } /** API for testing, use this for testing purely your channel handlers and nothing else. Because of that, the s_ * convention isn't used on the functions (since they're intended for you to call). */ /** when you want to test the read path of your handler, call this with the message you want it to read. */ static inline int testing_channel_push_read_message(struct testing_channel *testing, struct aws_io_message *message) { return aws_channel_slot_send_message(testing->left_handler_slot, message, AWS_CHANNEL_DIR_READ); } /** when you want to test the write path of your handler, call this with the message you want it to write. * A downstream handler must have been installed */ static inline int testing_channel_push_write_message(struct testing_channel *testing, struct aws_io_message *message) { ASSERT_NOT_NULL(testing->right_handler_slot); return aws_channel_slot_send_message(testing->right_handler_slot, message, AWS_CHANNEL_DIR_WRITE); } /** when you want to test the write output of your handler, call this, get the queue and iterate the messages. */ static inline struct aws_linked_list *testing_channel_get_written_message_queue(struct testing_channel *testing) { return &testing->left_handler_impl->messages; } /** Set whether written messages have their on_complete callbacks invoked immediately. * The on_complete callback will be cleared after it is invoked. */ static inline void testing_channel_complete_written_messages_immediately( struct testing_channel *testing, bool complete_immediately, int complete_error_code) { testing->left_handler_impl->complete_write_immediately = complete_immediately; testing->left_handler_impl->complete_write_error_code = complete_error_code; } /** when you want to test the read output of your handler, call this, get the queue and iterate the messages. * A downstream handler must have been installed */ static inline struct aws_linked_list *testing_channel_get_read_message_queue(struct testing_channel *testing) { AWS_ASSERT(testing->right_handler_impl); return &testing->right_handler_impl->messages; } /** When you want to see what the latest window update issues from your channel handler was, call this. */ static inline size_t testing_channel_last_window_update(struct testing_channel *testing) { return testing->left_handler_impl->latest_window_update; } /** When you want the downstream handler to issue a window update */ static inline int testing_channel_increment_read_window(struct testing_channel *testing, size_t size) { ASSERT_NOT_NULL(testing->right_handler_slot); return aws_channel_slot_increment_read_window(testing->right_handler_slot, size); } /** Executes all currently scheduled tasks whose time has come. * Use testing_channel_drain_queued_tasks() to repeatedly run tasks until only future-tasks remain. */ static inline void testing_channel_run_currently_queued_tasks(struct testing_channel *testing) { AWS_ASSERT(aws_channel_thread_is_callers_thread(testing->channel)); uint64_t now = 0; aws_event_loop_current_clock_time(testing->loop, &now); aws_task_scheduler_run_all(&testing->loop_impl->scheduler, now); } /** Repeatedly executes scheduled tasks until only those in the future remain. * This covers the common case where there's a chain reaction of now-tasks scheduling further now-tasks. */ static inline void testing_channel_drain_queued_tasks(struct testing_channel *testing) { AWS_ASSERT(aws_channel_thread_is_callers_thread(testing->channel)); uint64_t now = 0; uint64_t next_task_time = 0; size_t count = 0; while (true) { aws_event_loop_current_clock_time(testing->loop, &now); if (aws_task_scheduler_has_tasks(&testing->loop_impl->scheduler, &next_task_time) && (next_task_time <= now)) { aws_task_scheduler_run_all(&testing->loop_impl->scheduler, now); } else { break; } /* NOTE: This will loop infinitely if there's a task the perpetually re-schedules another task. * Consider capping the number of loops if we want to support that behavior. */ if ((++count % 1000) == 0) { AWS_LOGF_WARN( AWS_LS_IO_CHANNEL, "id=%p: testing_channel_drain_queued_tasks() has looped %zu times.", (void *)testing->channel, count); } } } /** When you want to force the "not on channel thread path" for your handler, set 'on_users_thread' to false. * when you want to undo that, set it back to true. If you set it to false, you'll need to call * 'testing_channel_execute_queued_tasks()' to invoke the tasks that ended up being scheduled. */ static inline void testing_channel_set_is_on_users_thread(struct testing_channel *testing, bool on_users_thread) { testing->loop_impl->mock_on_callers_thread = on_users_thread; } struct aws_testing_channel_options { aws_io_clock_fn *clock_fn; }; static inline int testing_channel_init( struct testing_channel *testing, struct aws_allocator *allocator, struct aws_testing_channel_options *options) { AWS_ZERO_STRUCT(*testing); testing->loop = s_testing_loop_new(allocator, options->clock_fn); testing->loop_impl = testing->loop->impl_data; struct aws_channel_options args = { .on_setup_completed = s_testing_channel_on_setup_completed, .on_shutdown_completed = s_testing_channel_on_shutdown_completed, .setup_user_data = testing, .shutdown_user_data = testing, .event_loop = testing->loop, .enable_read_back_pressure = true, }; testing->channel = aws_channel_new(allocator, &args); /* Wait for channel to finish setup */ testing_channel_drain_queued_tasks(testing); ASSERT_TRUE(testing->channel_setup_completed); testing->left_handler_slot = aws_channel_slot_new(testing->channel); struct aws_channel_handler *handler = s_new_testing_channel_handler(allocator, 16 * 1024); testing->left_handler_impl = handler->impl; ASSERT_SUCCESS(aws_channel_slot_set_handler(testing->left_handler_slot, handler)); return AWS_OP_SUCCESS; } static inline int testing_channel_clean_up(struct testing_channel *testing) { aws_channel_shutdown(testing->channel, AWS_ERROR_SUCCESS); /* Wait for channel to finish shutdown */ testing_channel_drain_queued_tasks(testing); ASSERT_TRUE(testing->channel_shutdown_completed); aws_channel_destroy(testing->channel); /* event_loop can't be destroyed from its own thread */ testing_channel_set_is_on_users_thread(testing, false); aws_event_loop_destroy(testing->loop); return AWS_OP_SUCCESS; } /** When you want to test your handler with a downstream handler installed to the right. */ static inline int testing_channel_install_downstream_handler(struct testing_channel *testing, size_t initial_window) { ASSERT_NULL(testing->right_handler_slot); testing->right_handler_slot = aws_channel_slot_new(testing->channel); ASSERT_NOT_NULL(testing->right_handler_slot); ASSERT_SUCCESS(aws_channel_slot_insert_end(testing->channel, testing->right_handler_slot)); struct aws_channel_handler *handler = s_new_testing_channel_handler(testing->left_handler_slot->alloc, initial_window); ASSERT_NOT_NULL(handler); testing->right_handler_impl = handler->impl; ASSERT_SUCCESS(aws_channel_slot_set_handler(testing->right_handler_slot, handler)); return AWS_OP_SUCCESS; } /** Return whether channel is completely shut down */ static inline bool testing_channel_is_shutdown_completed(const struct testing_channel *testing) { return testing->channel_shutdown_completed; } /** Return channel's shutdown error_code */ static inline int testing_channel_get_shutdown_error_code(const struct testing_channel *testing) { AWS_ASSERT(testing->channel_shutdown_completed); return testing->channel_shutdown_error_code; } /** * Set a callback which is invoked during the handler's shutdown, * once in the read direction and again in the write direction. * Use this to inject actions that might occur in the middle of channel shutdown. */ static inline void testing_channel_set_downstream_handler_shutdown_callback( struct testing_channel *testing, testing_channel_handler_on_shutdown_fn *on_shutdown, void *user_data) { AWS_ASSERT(testing->right_handler_impl); testing->right_handler_impl->on_shutdown = on_shutdown; testing->right_handler_impl->on_shutdown_user_data = user_data; } /* Pop first message from queue and compare its contents to expected data. */ static inline int testing_channel_check_written_message( struct testing_channel *channel, struct aws_byte_cursor expected) { struct aws_linked_list *msgs = testing_channel_get_written_message_queue(channel); ASSERT_TRUE(!aws_linked_list_empty(msgs)); struct aws_linked_list_node *node = aws_linked_list_pop_front(msgs); struct aws_io_message *msg = AWS_CONTAINER_OF(node, struct aws_io_message, queueing_handle); ASSERT_BIN_ARRAYS_EQUALS(expected.ptr, expected.len, msg->message_data.buffer, msg->message_data.len); aws_mem_release(msg->allocator, msg); return AWS_OP_SUCCESS; } /* Pop first message from queue and compare its contents to expected data. */ static inline int testing_channel_check_written_message_str(struct testing_channel *channel, const char *expected) { return testing_channel_check_written_message(channel, aws_byte_cursor_from_c_str(expected)); } /* copies all messages in a list into a buffer, cleans up messages*/ static inline int testing_channel_drain_messages(struct aws_linked_list *msgs, struct aws_byte_buf *buffer) { while (!aws_linked_list_empty(msgs)) { struct aws_linked_list_node *node = aws_linked_list_pop_front(msgs); struct aws_io_message *msg = AWS_CONTAINER_OF(node, struct aws_io_message, queueing_handle); struct aws_byte_cursor msg_cursor = aws_byte_cursor_from_buf(&msg->message_data); aws_byte_buf_append_dynamic(buffer, &msg_cursor); aws_mem_release(msg->allocator, msg); } return AWS_OP_SUCCESS; } /* Pop all messages from queue and compare their contents to expected data */ static inline int testing_channel_check_messages_ex( struct aws_linked_list *msgs, struct aws_allocator *allocator, struct aws_byte_cursor expected) { struct aws_byte_buf all_msgs; ASSERT_SUCCESS(aws_byte_buf_init(&all_msgs, allocator, 1024)); ASSERT_SUCCESS(testing_channel_drain_messages(msgs, &all_msgs)); ASSERT_BIN_ARRAYS_EQUALS(expected.ptr, expected.len, all_msgs.buffer, all_msgs.len); aws_byte_buf_clean_up(&all_msgs); return AWS_OP_SUCCESS; } /* Check contents of all messages sent in the write direction. */ static inline int testing_channel_check_written_messages( struct testing_channel *channel, struct aws_allocator *allocator, struct aws_byte_cursor expected) { struct aws_linked_list *msgs = testing_channel_get_written_message_queue(channel); return testing_channel_check_messages_ex(msgs, allocator, expected); } /* Check contents of all messages sent in the write direction. */ static inline int testing_channel_check_written_messages_str( struct testing_channel *channel, struct aws_allocator *allocator, const char *expected) { return testing_channel_check_written_messages(channel, allocator, aws_byte_cursor_from_c_str(expected)); } /* Extract contents of all messages sent in the write direction. */ static inline int testing_channel_drain_written_messages(struct testing_channel *channel, struct aws_byte_buf *output) { struct aws_linked_list *msgs = testing_channel_get_written_message_queue(channel); ASSERT_SUCCESS(testing_channel_drain_messages(msgs, output)); return AWS_OP_SUCCESS; } /* Check contents of all read-messages sent in the read direction by a midchannel http-handler */ static inline int testing_channel_check_midchannel_read_messages( struct testing_channel *channel, struct aws_allocator *allocator, struct aws_byte_cursor expected) { struct aws_linked_list *msgs = testing_channel_get_read_message_queue(channel); return testing_channel_check_messages_ex(msgs, allocator, expected); } /* Check contents of all read-messages sent in the read direction by a midchannel http-handler */ static inline int testing_channel_check_midchannel_read_messages_str( struct testing_channel *channel, struct aws_allocator *allocator, const char *expected) { return testing_channel_check_midchannel_read_messages(channel, allocator, aws_byte_cursor_from_c_str(expected)); } /* For sending an aws_io_message into the channel, in the write or read direction */ static inline int testing_channel_send_data( struct testing_channel *channel, struct aws_byte_cursor data, enum aws_channel_direction dir, bool ignore_send_message_errors) { struct aws_io_message *msg = aws_channel_acquire_message_from_pool(channel->channel, AWS_IO_MESSAGE_APPLICATION_DATA, data.len); ASSERT_NOT_NULL(msg); ASSERT_TRUE(aws_byte_buf_write_from_whole_cursor(&msg->message_data, data)); int err; if (dir == AWS_CHANNEL_DIR_READ) { err = testing_channel_push_read_message(channel, msg); } else { err = testing_channel_push_write_message(channel, msg); } if (err) { /* If an error happens, clean the message here. Else, the recipient of the message will take the ownership */ aws_mem_release(msg->allocator, msg); } if (!ignore_send_message_errors) { ASSERT_SUCCESS(err); } return AWS_OP_SUCCESS; } /** Create an aws_io_message, containing the following data, and pushes it up the channel in the read direction */ static inline int testing_channel_push_read_data(struct testing_channel *channel, struct aws_byte_cursor data) { return testing_channel_send_data(channel, data, AWS_CHANNEL_DIR_READ, false); } /** Create an aws_io_message, containing the following data, and pushes it up the channel in the read direction */ static inline int testing_channel_push_read_str(struct testing_channel *channel, const char *str) { return testing_channel_send_data(channel, aws_byte_cursor_from_c_str(str), AWS_CHANNEL_DIR_READ, false); } /** Create an aws_io_message, containing the following data. * Tries to push it up the channel in the read direction, but don't assert if the message can't be sent. * Useful for testing data that arrives during handler shutdown */ static inline int testing_channel_push_read_str_ignore_errors(struct testing_channel *channel, const char *str) { return testing_channel_send_data(channel, aws_byte_cursor_from_c_str(str), AWS_CHANNEL_DIR_READ, true); } /** Create an aws_io_message, containing the following data, and pushes it up the channel in the write direction */ static inline int testing_channel_push_write_data(struct testing_channel *channel, struct aws_byte_cursor data) { return testing_channel_send_data(channel, data, AWS_CHANNEL_DIR_WRITE, false); } /** Create an aws_io_message, containing the following data, and pushes it up the channel in the write direction */ static inline int testing_channel_push_write_str(struct testing_channel *channel, const char *str) { return testing_channel_send_data(channel, aws_byte_cursor_from_c_str(str), AWS_CHANNEL_DIR_WRITE, false); } #endif /* AWS_TESTING_IO_TESTING_CHANNEL_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-io/include/aws/testing/stream_tester.h000066400000000000000000000212511456575232400265640ustar00rootroot00000000000000#ifndef AWS_TESTING_STREAM_TESTER_H #define AWS_TESTING_STREAM_TESTER_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #ifndef AWS_UNSTABLE_TESTING_API # error This code is designed for use by AWS owned libraries for the AWS C99 SDK. \ You are welcome to use it, but we make no promises on the stability of this API. \ To enable use of this code, set the AWS_UNSTABLE_TESTING_API compiler flag. #endif /** * Use aws_input_stream tester to test edge cases in systems that take input streams. * You can make it behave in specific weird ways (e.g. fail on 3rd read). * * There are a few ways to set what gets streamed. * - source_bytes: if set, stream these bytes. * - source_stream: if set, wrap this stream (but insert weird behavior like failing on 3rd read). * - autogen_length: autogen streaming content N bytes in length. */ enum aws_autogen_style { AWS_AUTOGEN_LOREM_IPSUM, AWS_AUTOGEN_ALPHABET, AWS_AUTOGEN_NUMBERS, }; struct aws_input_stream_tester_options { /* bytes to be streamed. * the stream copies these to its own internal buffer. * or you can set the autogen_length */ struct aws_byte_cursor source_bytes; /* wrap another stream */ struct aws_input_stream *source_stream; /* if non-zero, autogen streaming content N bytes in length */ size_t autogen_length; /* style of contents (if using autogen) */ enum aws_autogen_style autogen_style; /* if non-zero, read at most N bytes per read() */ size_t max_bytes_per_read; /* if non-zero, read 0 bytes the Nth time read() is called */ size_t read_zero_bytes_on_nth_read; /* If false, EOF is reported by the read() which produces the last few bytes. * If true, EOF isn't reported until there's one more read(), producing zero bytes. * This emulates an underlying stream that reports EOF by reading 0 bytes */ bool eof_requires_extra_read; /* if non-zero, fail the Nth time read() is called, raising `fail_with_error_code` */ size_t fail_on_nth_read; /* error-code to raise if failing on purpose */ int fail_with_error_code; }; struct aws_input_stream_tester { struct aws_input_stream base; struct aws_allocator *alloc; struct aws_input_stream_tester_options options; struct aws_byte_buf source_buf; struct aws_input_stream *source_stream; size_t read_count; bool num_bytes_last_read; /* number of bytes read in the most recent successful read() */ uint64_t total_bytes_read; }; static inline int s_input_stream_tester_seek( struct aws_input_stream *stream, int64_t offset, enum aws_stream_seek_basis basis) { struct aws_input_stream_tester *impl = (struct aws_input_stream_tester *)stream->impl; return aws_input_stream_seek(impl->source_stream, offset, basis); } static inline int s_input_stream_tester_read(struct aws_input_stream *stream, struct aws_byte_buf *original_dest) { struct aws_input_stream_tester *impl = (struct aws_input_stream_tester *)stream->impl; impl->read_count++; /* if we're configured to fail, then do it */ if (impl->read_count == impl->options.fail_on_nth_read) { AWS_FATAL_ASSERT(impl->options.fail_with_error_code != 0); return aws_raise_error(impl->options.fail_with_error_code); } /* cap how much is read, if that's how we're configured */ size_t bytes_to_read = original_dest->capacity - original_dest->len; if (impl->options.max_bytes_per_read != 0) { bytes_to_read = aws_min_size(bytes_to_read, impl->options.max_bytes_per_read); } if (impl->read_count == impl->options.read_zero_bytes_on_nth_read) { bytes_to_read = 0; } /* pass artificially capped buffer to actual stream */ struct aws_byte_buf capped_buf = aws_byte_buf_from_empty_array(original_dest->buffer + original_dest->len, bytes_to_read); if (aws_input_stream_read(impl->source_stream, &capped_buf)) { return AWS_OP_ERR; } size_t bytes_actually_read = capped_buf.len; original_dest->len += bytes_actually_read; impl->num_bytes_last_read = bytes_actually_read; impl->total_bytes_read += bytes_actually_read; return AWS_OP_SUCCESS; } static inline int s_input_stream_tester_get_status(struct aws_input_stream *stream, struct aws_stream_status *status) { struct aws_input_stream_tester *impl = (struct aws_input_stream_tester *)stream->impl; if (aws_input_stream_get_status(impl->source_stream, status)) { return AWS_OP_ERR; } /* if we're emulating a stream that requires an additional 0 byte read to realize it's EOF */ if (impl->options.eof_requires_extra_read) { if (impl->num_bytes_last_read > 0) { status->is_end_of_stream = false; } } return AWS_OP_SUCCESS; } static inline int s_input_stream_tester_get_length(struct aws_input_stream *stream, int64_t *out_length) { struct aws_input_stream_tester *impl = (struct aws_input_stream_tester *)stream->impl; return aws_input_stream_get_length(impl->source_stream, out_length); } static struct aws_input_stream_vtable s_input_stream_tester_vtable = { .seek = s_input_stream_tester_seek, .read = s_input_stream_tester_read, .get_status = s_input_stream_tester_get_status, .get_length = s_input_stream_tester_get_length, }; /* init byte-buf and fill it autogenned content */ static inline void s_byte_buf_init_autogenned( struct aws_byte_buf *buf, struct aws_allocator *alloc, size_t length, enum aws_autogen_style style) { aws_byte_buf_init(buf, alloc, length); struct aws_byte_cursor pattern = {0}; switch (style) { case AWS_AUTOGEN_LOREM_IPSUM: pattern = aws_byte_cursor_from_c_str( "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore " "et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut " "aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse " "cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa " "qui officia deserunt mollit anim id est laborum. "); break; case AWS_AUTOGEN_ALPHABET: pattern = aws_byte_cursor_from_c_str("abcdefghijklmnopqrstuvwxyz"); break; case AWS_AUTOGEN_NUMBERS: pattern = aws_byte_cursor_from_c_str("1234567890"); break; } struct aws_byte_cursor pattern_cursor = {0}; while (buf->len < buf->capacity) { if (pattern_cursor.len == 0) { pattern_cursor = pattern; } aws_byte_buf_write_to_capacity(buf, &pattern_cursor); } } static inline uint64_t aws_input_stream_tester_total_bytes_read(const struct aws_input_stream *stream) { const struct aws_input_stream_tester *impl = (const struct aws_input_stream_tester *)stream->impl; return impl->total_bytes_read; } static inline void s_input_stream_tester_destroy(void *user_data) { struct aws_input_stream_tester *impl = (struct aws_input_stream_tester *)user_data; aws_input_stream_release(impl->source_stream); aws_byte_buf_clean_up(&impl->source_buf); aws_mem_release(impl->alloc, impl); } static inline struct aws_input_stream *aws_input_stream_new_tester( struct aws_allocator *alloc, const struct aws_input_stream_tester_options *options) { struct aws_input_stream_tester *impl = (struct aws_input_stream_tester *)aws_mem_calloc(alloc, 1, sizeof(struct aws_input_stream_tester)); impl->base.impl = impl; impl->base.vtable = &s_input_stream_tester_vtable; aws_ref_count_init(&impl->base.ref_count, impl, s_input_stream_tester_destroy); impl->alloc = alloc; impl->options = *options; if (options->source_stream != NULL) { AWS_FATAL_ASSERT((options->autogen_length == 0) && (options->source_bytes.len == 0)); impl->source_stream = aws_input_stream_acquire(options->source_stream); } else { if (options->autogen_length > 0) { AWS_FATAL_ASSERT(options->source_bytes.len == 0); s_byte_buf_init_autogenned(&impl->source_buf, alloc, options->autogen_length, options->autogen_style); } else { aws_byte_buf_init_copy_from_cursor(&impl->source_buf, alloc, options->source_bytes); } struct aws_byte_cursor source_buf_cursor = aws_byte_cursor_from_buf(&impl->source_buf); impl->source_stream = aws_input_stream_new_from_cursor(alloc, &source_buf_cursor); AWS_FATAL_ASSERT(impl->source_stream); } return &impl->base; } #endif /* AWS_TESTING_STREAM_TESTER_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-io/source/000077500000000000000000000000001456575232400211375ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-io/source/alpn_handler.c000066400000000000000000000066751456575232400237500ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include struct alpn_handler { aws_tls_on_protocol_negotiated on_protocol_negotiated; void *user_data; }; static int s_alpn_process_read_message( struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_io_message *message) { if (message->message_tag != AWS_TLS_NEGOTIATED_PROTOCOL_MESSAGE) { return aws_raise_error(AWS_IO_MISSING_ALPN_MESSAGE); } struct aws_tls_negotiated_protocol_message *protocol_message = (struct aws_tls_negotiated_protocol_message *)message->message_data.buffer; struct aws_channel_slot *new_slot = aws_channel_slot_new(slot->channel); struct alpn_handler *alpn_handler = (struct alpn_handler *)handler->impl; if (!new_slot) { return AWS_OP_ERR; } struct aws_channel_handler *new_handler = alpn_handler->on_protocol_negotiated(new_slot, &protocol_message->protocol, alpn_handler->user_data); if (!new_handler) { aws_mem_release(handler->alloc, (void *)new_slot); return aws_raise_error(AWS_IO_UNHANDLED_ALPN_PROTOCOL_MESSAGE); } aws_channel_slot_replace(slot, new_slot); aws_channel_slot_set_handler(new_slot, new_handler); return AWS_OP_SUCCESS; } static int s_alpn_shutdown( struct aws_channel_handler *handler, struct aws_channel_slot *slot, enum aws_channel_direction dir, int error_code, bool abort_immediately) { (void)handler; return aws_channel_slot_on_handler_shutdown_complete(slot, dir, error_code, abort_immediately); } static size_t s_alpn_get_initial_window_size(struct aws_channel_handler *handler) { (void)handler; return sizeof(struct aws_tls_negotiated_protocol_message); } static void s_alpn_destroy(struct aws_channel_handler *handler) { struct alpn_handler *alpn_handler = (struct alpn_handler *)handler->impl; aws_mem_release(handler->alloc, alpn_handler); aws_mem_release(handler->alloc, handler); } static size_t s_alpn_message_overhead(struct aws_channel_handler *handler) { (void)handler; return 0; } static struct aws_channel_handler_vtable s_alpn_handler_vtable = { .initial_window_size = s_alpn_get_initial_window_size, .increment_read_window = NULL, .shutdown = s_alpn_shutdown, .process_write_message = NULL, .process_read_message = s_alpn_process_read_message, .destroy = s_alpn_destroy, .message_overhead = s_alpn_message_overhead, }; struct aws_channel_handler *aws_tls_alpn_handler_new( struct aws_allocator *allocator, aws_tls_on_protocol_negotiated on_protocol_negotiated, void *user_data) { struct aws_channel_handler *channel_handler = (struct aws_channel_handler *)aws_mem_calloc(allocator, 1, sizeof(struct aws_channel_handler)); if (!channel_handler) { return NULL; } struct alpn_handler *alpn_handler = (struct alpn_handler *)aws_mem_calloc(allocator, 1, sizeof(struct alpn_handler)); if (!alpn_handler) { aws_mem_release(allocator, (void *)channel_handler); return NULL; } alpn_handler->on_protocol_negotiated = on_protocol_negotiated; alpn_handler->user_data = user_data; channel_handler->impl = alpn_handler; channel_handler->alloc = allocator; channel_handler->vtable = &s_alpn_handler_vtable; return channel_handler; } aws-crt-python-0.20.4+dfsg/crt/aws-c-io/source/async_stream.c000066400000000000000000000126151456575232400240000ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include void aws_async_input_stream_init_base( struct aws_async_input_stream *stream, struct aws_allocator *alloc, const struct aws_async_input_stream_vtable *vtable, void *impl) { AWS_PRECONDITION(stream); AWS_PRECONDITION(alloc); AWS_PRECONDITION(vtable); AWS_PRECONDITION(vtable->read); AWS_PRECONDITION(vtable->destroy); AWS_ZERO_STRUCT(*stream); stream->alloc = alloc; stream->vtable = vtable; stream->impl = impl; aws_ref_count_init(&stream->ref_count, stream, (aws_simple_completion_callback *)vtable->destroy); } struct aws_async_input_stream *aws_async_input_stream_acquire(struct aws_async_input_stream *stream) { if (stream != NULL) { aws_ref_count_acquire(&stream->ref_count); } return stream; } struct aws_async_input_stream *aws_async_input_stream_release(struct aws_async_input_stream *stream) { if (stream) { aws_ref_count_release(&stream->ref_count); } return NULL; } struct aws_future_bool *aws_async_input_stream_read(struct aws_async_input_stream *stream, struct aws_byte_buf *dest) { AWS_PRECONDITION(stream); AWS_PRECONDITION(dest); /* Ensure the buffer has space available */ if (dest->len == dest->capacity) { struct aws_future_bool *future = aws_future_bool_new(stream->alloc); aws_future_bool_set_error(future, AWS_ERROR_SHORT_BUFFER); return future; } struct aws_future_bool *future = stream->vtable->read(stream, dest); AWS_POSTCONDITION(future != NULL); return future; } /* Data to perform the aws_async_input_stream_read_to_fill() job */ struct aws_async_input_stream_fill_job { struct aws_allocator *alloc; struct aws_async_input_stream *stream; struct aws_byte_buf *dest; /* Future for each read() step */ struct aws_future_bool *read_step_future; /* Future to set when this fill job completes */ struct aws_future_bool *on_complete_future; }; static void s_async_stream_fill_job_complete( struct aws_async_input_stream_fill_job *fill_job, bool eof, int error_code) { if (error_code) { aws_future_bool_set_error(fill_job->on_complete_future, error_code); } else { aws_future_bool_set_result(fill_job->on_complete_future, eof); } aws_future_bool_release(fill_job->on_complete_future); aws_async_input_stream_release(fill_job->stream); aws_mem_release(fill_job->alloc, fill_job); } /* Call read() in a loop. * It would be simpler to set a completion callback for each read() call, * but this risks our call stack growing large if there are many small, synchronous, reads. * So be complicated and loop until a read() ) call is actually async, * and only then set the completion callback (which is this same function, where we resume looping). */ static void s_async_stream_fill_job_loop(void *user_data) { struct aws_async_input_stream_fill_job *fill_job = user_data; while (true) { /* Process read_step_future from previous iteration of loop. * It's NULL the first time the job ever enters the loop. * But it's set in subsequent runs of the loop, * and when this is a read_step_future completion callback. */ if (fill_job->read_step_future) { if (aws_future_bool_register_callback_if_not_done( fill_job->read_step_future, s_async_stream_fill_job_loop, fill_job)) { /* not done, we'll resume this loop when callback fires */ return; } /* read_step_future is done */ int error_code = aws_future_bool_get_error(fill_job->read_step_future); bool eof = error_code ? false : aws_future_bool_get_result(fill_job->read_step_future); bool reached_capacity = fill_job->dest->len == fill_job->dest->capacity; fill_job->read_step_future = aws_future_bool_release(fill_job->read_step_future); /* release and NULL */ if (error_code || eof || reached_capacity) { /* job complete! */ s_async_stream_fill_job_complete(fill_job, eof, error_code); return; } } /* Kick off a read, which may or may not complete async */ fill_job->read_step_future = aws_async_input_stream_read(fill_job->stream, fill_job->dest); } } struct aws_future_bool *aws_async_input_stream_read_to_fill( struct aws_async_input_stream *stream, struct aws_byte_buf *dest) { AWS_PRECONDITION(stream); AWS_PRECONDITION(dest); struct aws_future_bool *future = aws_future_bool_new(stream->alloc); /* Ensure the buffer has space available */ if (dest->len == dest->capacity) { aws_future_bool_set_error(future, AWS_ERROR_SHORT_BUFFER); return future; } /* Prepare for async job */ struct aws_async_input_stream_fill_job *fill_job = aws_mem_calloc(stream->alloc, 1, sizeof(struct aws_async_input_stream_fill_job)); fill_job->alloc = stream->alloc; fill_job->stream = aws_async_input_stream_acquire(stream); fill_job->dest = dest; fill_job->on_complete_future = aws_future_bool_acquire(future); /* Kick off work */ s_async_stream_fill_job_loop(fill_job); return future; } aws-crt-python-0.20.4+dfsg/crt/aws-c-io/source/bsd/000077500000000000000000000000001456575232400217075ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-io/source/bsd/kqueue_event_loop.c000066400000000000000000001131211456575232400256030ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #if defined(__FreeBSD__) || defined(__NetBSD__) # define __BSD_VISIBLE 1 # include #endif #include #include #include #include static void s_destroy(struct aws_event_loop *event_loop); static int s_run(struct aws_event_loop *event_loop); static int s_stop(struct aws_event_loop *event_loop); static int s_wait_for_stop_completion(struct aws_event_loop *event_loop); static void s_schedule_task_now(struct aws_event_loop *event_loop, struct aws_task *task); static void s_schedule_task_future(struct aws_event_loop *event_loop, struct aws_task *task, uint64_t run_at_nanos); static void s_cancel_task(struct aws_event_loop *event_loop, struct aws_task *task); static int s_subscribe_to_io_events( struct aws_event_loop *event_loop, struct aws_io_handle *handle, int events, aws_event_loop_on_event_fn *on_event, void *user_data); static int s_unsubscribe_from_io_events(struct aws_event_loop *event_loop, struct aws_io_handle *handle); static void s_free_io_event_resources(void *user_data); static bool s_is_event_thread(struct aws_event_loop *event_loop); static void aws_event_loop_thread(void *user_data); int aws_open_nonblocking_posix_pipe(int pipe_fds[2]); enum event_thread_state { EVENT_THREAD_STATE_READY_TO_RUN, EVENT_THREAD_STATE_RUNNING, EVENT_THREAD_STATE_STOPPING, }; enum pipe_fd_index { READ_FD, WRITE_FD, }; struct kqueue_loop { /* thread_created_on is the handle to the event loop thread. */ struct aws_thread thread_created_on; /* thread_joined_to is used by the thread destroying the event loop. */ aws_thread_id_t thread_joined_to; /* running_thread_id is NULL if the event loop thread is stopped or points-to the thread_id of the thread running * the event loop (either thread_created_on or thread_joined_to). Atomic because of concurrent writes (e.g., * run/stop) and reads (e.g., is_event_loop_thread). * An aws_thread_id_t variable itself cannot be atomic because it is an opaque type that is platform-dependent. */ struct aws_atomic_var running_thread_id; int kq_fd; /* kqueue file descriptor */ /* Pipe for signaling to event-thread that cross_thread_data has changed. */ int cross_thread_signal_pipe[2]; /* cross_thread_data holds things that must be communicated across threads. * When the event-thread is running, the mutex must be locked while anyone touches anything in cross_thread_data. * If this data is modified outside the thread, the thread is signaled via activity on a pipe. */ struct { struct aws_mutex mutex; bool thread_signaled; /* whether thread has been signaled about changes to cross_thread_data */ struct aws_linked_list tasks_to_schedule; enum event_thread_state state; } cross_thread_data; /* thread_data holds things which, when the event-thread is running, may only be touched by the thread */ struct { struct aws_task_scheduler scheduler; int connected_handle_count; /* These variables duplicate ones in cross_thread_data. We move values out while holding the mutex and operate * on them later */ enum event_thread_state state; } thread_data; struct aws_thread_options thread_options; }; /* Data attached to aws_io_handle while the handle is subscribed to io events */ struct handle_data { struct aws_io_handle *owner; struct aws_event_loop *event_loop; aws_event_loop_on_event_fn *on_event; void *on_event_user_data; int events_subscribed; /* aws_io_event_types this handle should be subscribed to */ int events_this_loop; /* aws_io_event_types received during current loop of the event-thread */ enum { HANDLE_STATE_SUBSCRIBING, HANDLE_STATE_SUBSCRIBED, HANDLE_STATE_UNSUBSCRIBED } state; struct aws_task subscribe_task; struct aws_task cleanup_task; }; enum { DEFAULT_TIMEOUT_SEC = 100, /* Max kevent() timeout per loop of the event-thread */ MAX_EVENTS = 100, /* Max kevents to process per loop of the event-thread */ }; struct aws_event_loop_vtable s_kqueue_vtable = { .destroy = s_destroy, .run = s_run, .stop = s_stop, .wait_for_stop_completion = s_wait_for_stop_completion, .schedule_task_now = s_schedule_task_now, .schedule_task_future = s_schedule_task_future, .subscribe_to_io_events = s_subscribe_to_io_events, .cancel_task = s_cancel_task, .unsubscribe_from_io_events = s_unsubscribe_from_io_events, .free_io_event_resources = s_free_io_event_resources, .is_on_callers_thread = s_is_event_thread, }; struct aws_event_loop *aws_event_loop_new_default_with_options( struct aws_allocator *alloc, const struct aws_event_loop_options *options) { AWS_ASSERT(alloc); AWS_ASSERT(clock); AWS_ASSERT(options); AWS_ASSERT(options->clock); bool clean_up_event_loop_mem = false; bool clean_up_event_loop_base = false; bool clean_up_impl_mem = false; bool clean_up_thread = false; bool clean_up_kqueue = false; bool clean_up_signal_pipe = false; bool clean_up_signal_kevent = false; bool clean_up_mutex = false; struct aws_event_loop *event_loop = aws_mem_acquire(alloc, sizeof(struct aws_event_loop)); if (!event_loop) { return NULL; } AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: Initializing edge-triggered kqueue", (void *)event_loop); clean_up_event_loop_mem = true; int err = aws_event_loop_init_base(event_loop, alloc, options->clock); if (err) { goto clean_up; } clean_up_event_loop_base = true; struct kqueue_loop *impl = aws_mem_calloc(alloc, 1, sizeof(struct kqueue_loop)); if (!impl) { goto clean_up; } if (options->thread_options) { impl->thread_options = *options->thread_options; } else { impl->thread_options = *aws_default_thread_options(); } /* intialize thread id to NULL. It will be set when the event loop thread starts. */ aws_atomic_init_ptr(&impl->running_thread_id, NULL); clean_up_impl_mem = true; err = aws_thread_init(&impl->thread_created_on, alloc); if (err) { goto clean_up; } clean_up_thread = true; impl->kq_fd = kqueue(); if (impl->kq_fd == -1) { AWS_LOGF_FATAL(AWS_LS_IO_EVENT_LOOP, "id=%p: Failed to open kqueue handle.", (void *)event_loop); aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); goto clean_up; } clean_up_kqueue = true; err = aws_open_nonblocking_posix_pipe(impl->cross_thread_signal_pipe); if (err) { AWS_LOGF_FATAL(AWS_LS_IO_EVENT_LOOP, "id=%p: failed to open pipe handle.", (void *)event_loop); goto clean_up; } AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p: pipe descriptors read %d, write %d.", (void *)event_loop, impl->cross_thread_signal_pipe[READ_FD], impl->cross_thread_signal_pipe[WRITE_FD]); clean_up_signal_pipe = true; /* Set up kevent to handle activity on the cross_thread_signal_pipe */ struct kevent thread_signal_kevent; EV_SET( &thread_signal_kevent, impl->cross_thread_signal_pipe[READ_FD], EVFILT_READ /*filter*/, EV_ADD | EV_CLEAR /*flags*/, 0 /*fflags*/, 0 /*data*/, NULL /*udata*/); int res = kevent( impl->kq_fd, &thread_signal_kevent /*changelist*/, 1 /*nchanges*/, NULL /*eventlist*/, 0 /*nevents*/, NULL /*timeout*/); if (res == -1) { AWS_LOGF_FATAL(AWS_LS_IO_EVENT_LOOP, "id=%p: failed to create cross-thread signal kevent.", (void *)event_loop); aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); goto clean_up; } clean_up_signal_kevent = true; err = aws_mutex_init(&impl->cross_thread_data.mutex); if (err) { goto clean_up; } clean_up_mutex = true; impl->cross_thread_data.thread_signaled = false; aws_linked_list_init(&impl->cross_thread_data.tasks_to_schedule); impl->cross_thread_data.state = EVENT_THREAD_STATE_READY_TO_RUN; err = aws_task_scheduler_init(&impl->thread_data.scheduler, alloc); if (err) { goto clean_up; } impl->thread_data.state = EVENT_THREAD_STATE_READY_TO_RUN; event_loop->impl_data = impl; event_loop->vtable = &s_kqueue_vtable; /* success */ return event_loop; clean_up: if (clean_up_mutex) { aws_mutex_clean_up(&impl->cross_thread_data.mutex); } if (clean_up_signal_kevent) { thread_signal_kevent.flags = EV_DELETE; kevent( impl->kq_fd, &thread_signal_kevent /*changelist*/, 1 /*nchanges*/, NULL /*eventlist*/, 0 /*nevents*/, NULL /*timeout*/); } if (clean_up_signal_pipe) { close(impl->cross_thread_signal_pipe[READ_FD]); close(impl->cross_thread_signal_pipe[WRITE_FD]); } if (clean_up_kqueue) { close(impl->kq_fd); } if (clean_up_thread) { aws_thread_clean_up(&impl->thread_created_on); } if (clean_up_impl_mem) { aws_mem_release(alloc, impl); } if (clean_up_event_loop_base) { aws_event_loop_clean_up_base(event_loop); } if (clean_up_event_loop_mem) { aws_mem_release(alloc, event_loop); } return NULL; } static void s_destroy(struct aws_event_loop *event_loop) { AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: destroying event_loop", (void *)event_loop); struct kqueue_loop *impl = event_loop->impl_data; /* Stop the event-thread. This might have already happened. It's safe to call multiple times. */ s_stop(event_loop); int err = s_wait_for_stop_completion(event_loop); if (err) { AWS_LOGF_WARN( AWS_LS_IO_EVENT_LOOP, "id=%p: failed to destroy event-thread, resources have been leaked", (void *)event_loop); AWS_ASSERT("Failed to destroy event-thread, resources have been leaked." == NULL); return; } /* setting this so that canceled tasks don't blow up when asking if they're on the event-loop thread. */ impl->thread_joined_to = aws_thread_current_thread_id(); aws_atomic_store_ptr(&impl->running_thread_id, &impl->thread_joined_to); /* Clean up task-related stuff first. It's possible the a cancelled task adds further tasks to this event_loop. * Tasks added in this way will be in cross_thread_data.tasks_to_schedule, so we clean that up last */ aws_task_scheduler_clean_up(&impl->thread_data.scheduler); /* Tasks in scheduler get cancelled*/ while (!aws_linked_list_empty(&impl->cross_thread_data.tasks_to_schedule)) { struct aws_linked_list_node *node = aws_linked_list_pop_front(&impl->cross_thread_data.tasks_to_schedule); struct aws_task *task = AWS_CONTAINER_OF(node, struct aws_task, node); task->fn(task, task->arg, AWS_TASK_STATUS_CANCELED); } /* Warn user if aws_io_handle was subscribed, but never unsubscribed. This would cause memory leaks. */ AWS_ASSERT(impl->thread_data.connected_handle_count == 0); /* Clean up everything else */ aws_mutex_clean_up(&impl->cross_thread_data.mutex); struct kevent thread_signal_kevent; EV_SET( &thread_signal_kevent, impl->cross_thread_signal_pipe[READ_FD], EVFILT_READ /*filter*/, EV_DELETE /*flags*/, 0 /*fflags*/, 0 /*data*/, NULL /*udata*/); kevent( impl->kq_fd, &thread_signal_kevent /*changelist*/, 1 /*nchanges*/, NULL /*eventlist*/, 0 /*nevents*/, NULL /*timeout*/); close(impl->cross_thread_signal_pipe[READ_FD]); close(impl->cross_thread_signal_pipe[WRITE_FD]); close(impl->kq_fd); aws_thread_clean_up(&impl->thread_created_on); aws_mem_release(event_loop->alloc, impl); aws_event_loop_clean_up_base(event_loop); aws_mem_release(event_loop->alloc, event_loop); } static int s_run(struct aws_event_loop *event_loop) { struct kqueue_loop *impl = event_loop->impl_data; AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: starting event-loop thread.", (void *)event_loop); /* to re-run, call stop() and wait_for_stop_completion() */ AWS_ASSERT(impl->cross_thread_data.state == EVENT_THREAD_STATE_READY_TO_RUN); AWS_ASSERT(impl->thread_data.state == EVENT_THREAD_STATE_READY_TO_RUN); /* Since thread isn't running it's ok to touch thread_data, * and it's ok to touch cross_thread_data without locking the mutex */ impl->cross_thread_data.state = EVENT_THREAD_STATE_RUNNING; aws_thread_increment_unjoined_count(); int err = aws_thread_launch(&impl->thread_created_on, aws_event_loop_thread, (void *)event_loop, &impl->thread_options); if (err) { aws_thread_decrement_unjoined_count(); AWS_LOGF_FATAL(AWS_LS_IO_EVENT_LOOP, "id=%p: thread creation failed.", (void *)event_loop); goto clean_up; } return AWS_OP_SUCCESS; clean_up: impl->cross_thread_data.state = EVENT_THREAD_STATE_READY_TO_RUN; return AWS_OP_ERR; } /* This function can't fail, we're relying on the thread responding to critical messages (ex: stop thread) */ void signal_cross_thread_data_changed(struct aws_event_loop *event_loop) { struct kqueue_loop *impl = event_loop->impl_data; AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p: signaling event-loop that cross-thread tasks need to be scheduled.", (void *)event_loop); /* Doesn't actually matter what we write, any activity on pipe signals that cross_thread_data has changed, * If the pipe is full and the write fails, that's fine, the event-thread will get the signal from some previous * write */ uint32_t write_whatever = 0xC0FFEE; write(impl->cross_thread_signal_pipe[WRITE_FD], &write_whatever, sizeof(write_whatever)); } static int s_stop(struct aws_event_loop *event_loop) { struct kqueue_loop *impl = event_loop->impl_data; bool signal_thread = false; { /* Begin critical section */ aws_mutex_lock(&impl->cross_thread_data.mutex); if (impl->cross_thread_data.state == EVENT_THREAD_STATE_RUNNING) { impl->cross_thread_data.state = EVENT_THREAD_STATE_STOPPING; signal_thread = !impl->cross_thread_data.thread_signaled; impl->cross_thread_data.thread_signaled = true; } aws_mutex_unlock(&impl->cross_thread_data.mutex); } /* End critical section */ if (signal_thread) { signal_cross_thread_data_changed(event_loop); } return AWS_OP_SUCCESS; } static int s_wait_for_stop_completion(struct aws_event_loop *event_loop) { struct kqueue_loop *impl = event_loop->impl_data; #ifdef DEBUG_BUILD aws_mutex_lock(&impl->cross_thread_data.mutex); /* call stop() before wait_for_stop_completion() or you'll wait forever */ AWS_ASSERT(impl->cross_thread_data.state != EVENT_THREAD_STATE_RUNNING); aws_mutex_unlock(&impl->cross_thread_data.mutex); #endif int err = aws_thread_join(&impl->thread_created_on); aws_thread_decrement_unjoined_count(); if (err) { return AWS_OP_ERR; } /* Since thread is no longer running it's ok to touch thread_data, * and it's ok to touch cross_thread_data without locking the mutex */ impl->cross_thread_data.state = EVENT_THREAD_STATE_READY_TO_RUN; impl->thread_data.state = EVENT_THREAD_STATE_READY_TO_RUN; return AWS_OP_SUCCESS; } /* Common functionality for "now" and "future" task scheduling. * If `run_at_nanos` is zero then the task is scheduled as a "now" task. */ static void s_schedule_task_common(struct aws_event_loop *event_loop, struct aws_task *task, uint64_t run_at_nanos) { AWS_ASSERT(task); struct kqueue_loop *impl = event_loop->impl_data; /* If we're on the event-thread, just schedule it directly */ if (s_is_event_thread(event_loop)) { AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p: scheduling task %p in-thread for timestamp %llu", (void *)event_loop, (void *)task, (unsigned long long)run_at_nanos); if (run_at_nanos == 0) { aws_task_scheduler_schedule_now(&impl->thread_data.scheduler, task); } else { aws_task_scheduler_schedule_future(&impl->thread_data.scheduler, task, run_at_nanos); } return; } /* Otherwise, add it to cross_thread_data.tasks_to_schedule and signal the event-thread to process it */ AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p: scheduling task %p cross-thread for timestamp %llu", (void *)event_loop, (void *)task, (unsigned long long)run_at_nanos); task->timestamp = run_at_nanos; bool should_signal_thread = false; /* Begin critical section */ aws_mutex_lock(&impl->cross_thread_data.mutex); aws_linked_list_push_back(&impl->cross_thread_data.tasks_to_schedule, &task->node); /* Signal thread that cross_thread_data has changed (unless it's been signaled already) */ if (!impl->cross_thread_data.thread_signaled) { should_signal_thread = true; impl->cross_thread_data.thread_signaled = true; } aws_mutex_unlock(&impl->cross_thread_data.mutex); /* End critical section */ if (should_signal_thread) { signal_cross_thread_data_changed(event_loop); } } static void s_schedule_task_now(struct aws_event_loop *event_loop, struct aws_task *task) { s_schedule_task_common(event_loop, task, 0); /* Zero is used to denote "now" tasks */ } static void s_schedule_task_future(struct aws_event_loop *event_loop, struct aws_task *task, uint64_t run_at_nanos) { s_schedule_task_common(event_loop, task, run_at_nanos); } static void s_cancel_task(struct aws_event_loop *event_loop, struct aws_task *task) { struct kqueue_loop *kqueue_loop = event_loop->impl_data; AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: cancelling task %p", (void *)event_loop, (void *)task); aws_task_scheduler_cancel_task(&kqueue_loop->thread_data.scheduler, task); } /* Scheduled task that connects aws_io_handle with the kqueue */ static void s_subscribe_task(struct aws_task *task, void *user_data, enum aws_task_status status) { (void)task; struct handle_data *handle_data = user_data; struct aws_event_loop *event_loop = handle_data->event_loop; struct kqueue_loop *impl = handle_data->event_loop->impl_data; impl->thread_data.connected_handle_count++; /* if task was cancelled, nothing to do */ if (status == AWS_TASK_STATUS_CANCELED) { return; } /* If handle was unsubscribed before this task could execute, nothing to do */ if (handle_data->state == HANDLE_STATE_UNSUBSCRIBED) { return; } AWS_ASSERT(handle_data->state == HANDLE_STATE_SUBSCRIBING); AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p: subscribing to events on fd %d", (void *)event_loop, handle_data->owner->data.fd); /* In order to monitor both reads and writes, kqueue requires you to add two separate kevents. * If we're adding two separate kevents, but one of those fails, we need to remove the other kevent. * Therefore we use the EV_RECEIPT flag. This causes kevent() to tell whether each EV_ADD succeeded, * rather than the usual behavior of telling us about recent events. */ struct kevent changelist[2]; AWS_ZERO_ARRAY(changelist); int changelist_size = 0; if (handle_data->events_subscribed & AWS_IO_EVENT_TYPE_READABLE) { EV_SET( &changelist[changelist_size++], handle_data->owner->data.fd, EVFILT_READ /*filter*/, EV_ADD | EV_RECEIPT | EV_CLEAR /*flags*/, 0 /*fflags*/, 0 /*data*/, handle_data /*udata*/); } if (handle_data->events_subscribed & AWS_IO_EVENT_TYPE_WRITABLE) { EV_SET( &changelist[changelist_size++], handle_data->owner->data.fd, EVFILT_WRITE /*filter*/, EV_ADD | EV_RECEIPT | EV_CLEAR /*flags*/, 0 /*fflags*/, 0 /*data*/, handle_data /*udata*/); } int num_events = kevent( impl->kq_fd, changelist /*changelist*/, changelist_size /*nchanges*/, changelist /*eventlist. It's OK to re-use the same memory for changelist input and eventlist output*/, changelist_size /*nevents*/, NULL /*timeout*/); if (num_events == -1) { goto subscribe_failed; } /* Look through results to see if any failed */ for (int i = 0; i < num_events; ++i) { /* Every result should be flagged as error, that's just how EV_RECEIPT works */ AWS_ASSERT(changelist[i].flags & EV_ERROR); /* If a real error occurred, .data contains the error code */ if (changelist[i].data != 0) { goto subscribe_failed; } } /* Success */ handle_data->state = HANDLE_STATE_SUBSCRIBED; return; subscribe_failed: AWS_LOGF_ERROR( AWS_LS_IO_EVENT_LOOP, "id=%p: failed to subscribe to events on fd %d", (void *)event_loop, handle_data->owner->data.fd); /* Remove any related kevents that succeeded */ for (int i = 0; i < num_events; ++i) { if (changelist[i].data == 0) { changelist[i].flags = EV_DELETE; kevent( impl->kq_fd, &changelist[i] /*changelist*/, 1 /*nchanges*/, NULL /*eventlist*/, 0 /*nevents*/, NULL /*timeout*/); } } /* We can't return an error code because this was a scheduled task. * Notify the user of the failed subscription by passing AWS_IO_EVENT_TYPE_ERROR to the callback. */ handle_data->on_event(event_loop, handle_data->owner, AWS_IO_EVENT_TYPE_ERROR, handle_data->on_event_user_data); } static int s_subscribe_to_io_events( struct aws_event_loop *event_loop, struct aws_io_handle *handle, int events, aws_event_loop_on_event_fn *on_event, void *user_data) { AWS_ASSERT(event_loop); AWS_ASSERT(handle->data.fd != -1); AWS_ASSERT(handle->additional_data == NULL); AWS_ASSERT(on_event); /* Must subscribe for read, write, or both */ AWS_ASSERT(events & (AWS_IO_EVENT_TYPE_READABLE | AWS_IO_EVENT_TYPE_WRITABLE)); struct handle_data *handle_data = aws_mem_calloc(event_loop->alloc, 1, sizeof(struct handle_data)); if (!handle_data) { return AWS_OP_ERR; } handle_data->owner = handle; handle_data->event_loop = event_loop; handle_data->on_event = on_event; handle_data->on_event_user_data = user_data; handle_data->events_subscribed = events; handle_data->state = HANDLE_STATE_SUBSCRIBING; handle->additional_data = handle_data; /* We schedule a task to perform the actual changes to the kqueue, read on for an explanation why... * * kqueue requires separate registrations for read and write events. * If the user wants to know about both read and write, we need register once for read and once for write. * If the first registration succeeds, but the second registration fails, we need to delete the first registration. * If this all happened outside the event-thread, the successful registration's events could begin processing * in the brief window of time before the registration is deleted. */ aws_task_init(&handle_data->subscribe_task, s_subscribe_task, handle_data, "kqueue_event_loop_subscribe"); s_schedule_task_now(event_loop, &handle_data->subscribe_task); return AWS_OP_SUCCESS; } static void s_free_io_event_resources(void *user_data) { struct handle_data *handle_data = user_data; struct kqueue_loop *impl = handle_data->event_loop->impl_data; impl->thread_data.connected_handle_count--; aws_mem_release(handle_data->event_loop->alloc, handle_data); } static void s_clean_up_handle_data_task(struct aws_task *task, void *user_data, enum aws_task_status status) { (void)task; (void)status; struct handle_data *handle_data = user_data; s_free_io_event_resources(handle_data); } static int s_unsubscribe_from_io_events(struct aws_event_loop *event_loop, struct aws_io_handle *handle) { AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p: un-subscribing from events on fd %d", (void *)event_loop, handle->data.fd); AWS_ASSERT(handle->additional_data); struct handle_data *handle_data = handle->additional_data; struct kqueue_loop *impl = event_loop->impl_data; AWS_ASSERT(event_loop == handle_data->event_loop); /* If the handle was successfully subscribed to kqueue, then remove it. */ if (handle_data->state == HANDLE_STATE_SUBSCRIBED) { struct kevent changelist[2]; int changelist_size = 0; if (handle_data->events_subscribed & AWS_IO_EVENT_TYPE_READABLE) { EV_SET( &changelist[changelist_size++], handle_data->owner->data.fd, EVFILT_READ /*filter*/, EV_DELETE /*flags*/, 0 /*fflags*/, 0 /*data*/, handle_data /*udata*/); } if (handle_data->events_subscribed & AWS_IO_EVENT_TYPE_WRITABLE) { EV_SET( &changelist[changelist_size++], handle_data->owner->data.fd, EVFILT_WRITE /*filter*/, EV_DELETE /*flags*/, 0 /*fflags*/, 0 /*data*/, handle_data /*udata*/); } kevent(impl->kq_fd, changelist, changelist_size, NULL /*eventlist*/, 0 /*nevents*/, NULL /*timeout*/); } /* Schedule a task to clean up the memory. This is done in a task to prevent the following scenario: * - While processing a batch of events, some callback unsubscribes another aws_io_handle. * - One of the other events in this batch belongs to that other aws_io_handle. * - If the handle_data were already deleted, there would be an access invalid memory. */ aws_task_init( &handle_data->cleanup_task, s_clean_up_handle_data_task, handle_data, "kqueue_event_loop_clean_up_handle_data"); aws_event_loop_schedule_task_now(event_loop, &handle_data->cleanup_task); handle_data->state = HANDLE_STATE_UNSUBSCRIBED; handle->additional_data = NULL; return AWS_OP_SUCCESS; } static bool s_is_event_thread(struct aws_event_loop *event_loop) { struct kqueue_loop *impl = event_loop->impl_data; aws_thread_id_t *thread_id = aws_atomic_load_ptr(&impl->running_thread_id); return thread_id && aws_thread_thread_id_equal(*thread_id, aws_thread_current_thread_id()); } /* Called from thread. * Takes tasks from tasks_to_schedule and adds them to the scheduler. */ static void s_process_tasks_to_schedule(struct aws_event_loop *event_loop, struct aws_linked_list *tasks_to_schedule) { struct kqueue_loop *impl = event_loop->impl_data; AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: processing cross-thread tasks", (void *)event_loop); while (!aws_linked_list_empty(tasks_to_schedule)) { struct aws_linked_list_node *node = aws_linked_list_pop_front(tasks_to_schedule); struct aws_task *task = AWS_CONTAINER_OF(node, struct aws_task, node); AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p: task %p pulled to event-loop, scheduling now.", (void *)event_loop, (void *)task); /* Timestamp 0 is used to denote "now" tasks */ if (task->timestamp == 0) { aws_task_scheduler_schedule_now(&impl->thread_data.scheduler, task); } else { aws_task_scheduler_schedule_future(&impl->thread_data.scheduler, task, task->timestamp); } } } static void s_process_cross_thread_data(struct aws_event_loop *event_loop) { struct kqueue_loop *impl = event_loop->impl_data; AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: notified of cross-thread data to process", (void *)event_loop); /* If there are tasks to schedule, grab them all out of synced_data.tasks_to_schedule. * We'll process them later, so that we minimize time spent holding the mutex. */ struct aws_linked_list tasks_to_schedule; aws_linked_list_init(&tasks_to_schedule); { /* Begin critical section */ aws_mutex_lock(&impl->cross_thread_data.mutex); impl->cross_thread_data.thread_signaled = false; bool initiate_stop = (impl->cross_thread_data.state == EVENT_THREAD_STATE_STOPPING) && (impl->thread_data.state == EVENT_THREAD_STATE_RUNNING); if (AWS_UNLIKELY(initiate_stop)) { impl->thread_data.state = EVENT_THREAD_STATE_STOPPING; } aws_linked_list_swap_contents(&impl->cross_thread_data.tasks_to_schedule, &tasks_to_schedule); aws_mutex_unlock(&impl->cross_thread_data.mutex); } /* End critical section */ s_process_tasks_to_schedule(event_loop, &tasks_to_schedule); } static int s_aws_event_flags_from_kevent(struct kevent *kevent) { int event_flags = 0; if (kevent->flags & EV_ERROR) { event_flags |= AWS_IO_EVENT_TYPE_ERROR; } else if (kevent->filter == EVFILT_READ) { if (kevent->data != 0) { event_flags |= AWS_IO_EVENT_TYPE_READABLE; } if (kevent->flags & EV_EOF) { event_flags |= AWS_IO_EVENT_TYPE_CLOSED; } } else if (kevent->filter == EVFILT_WRITE) { if (kevent->data != 0) { event_flags |= AWS_IO_EVENT_TYPE_WRITABLE; } if (kevent->flags & EV_EOF) { event_flags |= AWS_IO_EVENT_TYPE_CLOSED; } } return event_flags; } /** * This just calls kevent() * * We broke this out into its own function so that the stacktrace clearly shows * what this thread is doing. We've had a lot of cases where users think this * thread is deadlocked because it's stuck here. We want it to be clear * that it's doing nothing on purpose. It's waiting for events to happen... */ AWS_NO_INLINE static int aws_event_loop_listen_for_io_events(int kq_fd, struct kevent kevents[MAX_EVENTS], struct timespec *timeout) { return kevent(kq_fd, NULL /*changelist*/, 0 /*nchanges*/, kevents /*eventlist*/, MAX_EVENTS /*nevents*/, timeout); } static void s_aws_kqueue_cleanup_aws_lc_thread_local_state(void *user_data) { (void)user_data; aws_cal_thread_clean_up(); } static void aws_event_loop_thread(void *user_data) { struct aws_event_loop *event_loop = user_data; AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: main loop started", (void *)event_loop); struct kqueue_loop *impl = event_loop->impl_data; /* set thread id to the event-loop's thread. */ aws_atomic_store_ptr(&impl->running_thread_id, &impl->thread_created_on.thread_id); AWS_ASSERT(impl->thread_data.state == EVENT_THREAD_STATE_READY_TO_RUN); impl->thread_data.state = EVENT_THREAD_STATE_RUNNING; struct kevent kevents[MAX_EVENTS]; /* A single aws_io_handle could have two separate kevents if subscribed for both read and write. * If both the read and write kevents fire in the same loop of the event-thread, * combine the event-flags and deliver them in a single callback. * This makes the kqueue_event_loop behave more like the other platform implementations. */ struct handle_data *io_handle_events[MAX_EVENTS]; struct timespec timeout = { .tv_sec = DEFAULT_TIMEOUT_SEC, .tv_nsec = 0, }; AWS_LOGF_INFO( AWS_LS_IO_EVENT_LOOP, "id=%p: default timeout %ds, and max events to process per tick %d", (void *)event_loop, DEFAULT_TIMEOUT_SEC, MAX_EVENTS); aws_thread_current_at_exit(s_aws_kqueue_cleanup_aws_lc_thread_local_state, NULL); while (impl->thread_data.state == EVENT_THREAD_STATE_RUNNING) { int num_io_handle_events = 0; bool should_process_cross_thread_data = false; AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p: waiting for a maximum of %ds %lluns", (void *)event_loop, (int)timeout.tv_sec, (unsigned long long)timeout.tv_nsec); /* Process kqueue events */ int num_kevents = aws_event_loop_listen_for_io_events(impl->kq_fd, kevents, &timeout); aws_event_loop_register_tick_start(event_loop); AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p: wake up with %d events to process.", (void *)event_loop, num_kevents); if (num_kevents == -1) { /* Raise an error, in case this is interesting to anyone monitoring, * and continue on with this loop. We can't process events, * but we can still process scheduled tasks */ aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); /* Force the cross_thread_data to be processed. * There might be valuable info in there, like the message to stop the thread. * It's fine to do this even if nothing has changed, it just costs a mutex lock/unlock. */ should_process_cross_thread_data = true; } for (int i = 0; i < num_kevents; ++i) { struct kevent *kevent = &kevents[i]; /* Was this event to signal that cross_thread_data has changed? */ if ((int)kevent->ident == impl->cross_thread_signal_pipe[READ_FD]) { should_process_cross_thread_data = true; /* Drain whatever data was written to the signaling pipe */ uint32_t read_whatever; while (read((int)kevent->ident, &read_whatever, sizeof(read_whatever)) > 0) { } continue; } /* Otherwise this was a normal event on a subscribed handle. Figure out which flags to report. */ int event_flags = s_aws_event_flags_from_kevent(kevent); if (event_flags == 0) { continue; } /* Combine flags, in case multiple kevents correspond to one handle. (see notes at top of function) */ struct handle_data *handle_data = kevent->udata; if (handle_data->events_this_loop == 0) { io_handle_events[num_io_handle_events++] = handle_data; } handle_data->events_this_loop |= event_flags; } /* Invoke each handle's event callback (unless the handle has been unsubscribed) */ for (int i = 0; i < num_io_handle_events; ++i) { struct handle_data *handle_data = io_handle_events[i]; if (handle_data->state == HANDLE_STATE_SUBSCRIBED) { AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p: activity on fd %d, invoking handler.", (void *)event_loop, handle_data->owner->data.fd); handle_data->on_event( event_loop, handle_data->owner, handle_data->events_this_loop, handle_data->on_event_user_data); } handle_data->events_this_loop = 0; } /* Process cross_thread_data */ if (should_process_cross_thread_data) { s_process_cross_thread_data(event_loop); } /* Run scheduled tasks */ uint64_t now_ns = 0; event_loop->clock(&now_ns); /* If clock fails, now_ns will be 0 and tasks scheduled for a specific time will not be run. That's ok, we'll handle them next time around. */ AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: running scheduled tasks.", (void *)event_loop); aws_task_scheduler_run_all(&impl->thread_data.scheduler, now_ns); /* Set timeout for next kevent() call. * If clock fails, or scheduler has no tasks, use default timeout */ bool use_default_timeout = false; int err = event_loop->clock(&now_ns); if (err) { use_default_timeout = true; } uint64_t next_run_time_ns; if (!aws_task_scheduler_has_tasks(&impl->thread_data.scheduler, &next_run_time_ns)) { use_default_timeout = true; } if (use_default_timeout) { AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p: no more scheduled tasks using default timeout.", (void *)event_loop); timeout.tv_sec = DEFAULT_TIMEOUT_SEC; timeout.tv_nsec = 0; } else { /* Convert from timestamp in nanoseconds, to timeout in seconds with nanosecond remainder */ uint64_t timeout_ns = next_run_time_ns > now_ns ? next_run_time_ns - now_ns : 0; uint64_t timeout_remainder_ns = 0; uint64_t timeout_sec = aws_timestamp_convert(timeout_ns, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_SECS, &timeout_remainder_ns); if (timeout_sec > LONG_MAX) { /* Check for overflow. On Darwin, these values are stored as longs */ timeout_sec = LONG_MAX; timeout_remainder_ns = 0; } AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p: detected more scheduled tasks with the next occurring at " "%llu using timeout of %ds %lluns.", (void *)event_loop, (unsigned long long)timeout_ns, (int)timeout_sec, (unsigned long long)timeout_remainder_ns); timeout.tv_sec = (time_t)(timeout_sec); timeout.tv_nsec = (long)(timeout_remainder_ns); } aws_event_loop_register_tick_end(event_loop); } AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: exiting main loop", (void *)event_loop); /* reset to NULL. This should be updated again during destroy before tasks are canceled. */ aws_atomic_store_ptr(&impl->running_thread_id, NULL); } aws-crt-python-0.20.4+dfsg/crt/aws-c-io/source/channel.c000066400000000000000000001252371456575232400227250ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #ifdef _MSC_VER # pragma warning(disable : 4204) /* non-constant aggregate initializer */ #endif static size_t s_message_pool_key = 0; /* Address of variable serves as key in hash table */ enum { KB_16 = 16 * 1024, }; size_t g_aws_channel_max_fragment_size = KB_16; #define INITIAL_STATISTIC_LIST_SIZE 5 enum aws_channel_state { AWS_CHANNEL_SETTING_UP, AWS_CHANNEL_ACTIVE, AWS_CHANNEL_SHUTTING_DOWN, AWS_CHANNEL_SHUT_DOWN, }; struct aws_shutdown_notification_task { struct aws_task task; int error_code; struct aws_channel_slot *slot; bool shutdown_immediately; }; struct shutdown_task { struct aws_channel_task task; struct aws_channel *channel; int error_code; bool shutdown_immediately; }; struct aws_channel { struct aws_allocator *alloc; struct aws_event_loop *loop; struct aws_channel_slot *first; struct aws_message_pool *msg_pool; enum aws_channel_state channel_state; struct aws_shutdown_notification_task shutdown_notify_task; aws_channel_on_shutdown_completed_fn *on_shutdown_completed; void *shutdown_user_data; struct aws_atomic_var refcount; struct aws_task deletion_task; struct aws_task statistics_task; struct aws_crt_statistics_handler *statistics_handler; uint64_t statistics_interval_start_time_ms; struct aws_array_list statistic_list; struct { struct aws_linked_list list; } channel_thread_tasks; struct { struct aws_mutex lock; struct aws_linked_list list; struct aws_task scheduling_task; struct shutdown_task shutdown_task; bool is_channel_shut_down; } cross_thread_tasks; size_t window_update_batch_emit_threshold; struct aws_channel_task window_update_task; bool read_back_pressure_enabled; bool window_update_scheduled; }; struct channel_setup_args { struct aws_allocator *alloc; struct aws_channel *channel; aws_channel_on_setup_completed_fn *on_setup_completed; void *user_data; struct aws_task task; }; static void s_on_msg_pool_removed(struct aws_event_loop_local_object *object) { struct aws_message_pool *msg_pool = object->object; AWS_LOGF_TRACE( AWS_LS_IO_CHANNEL, "static: message pool %p has been purged " "from the event-loop: likely because of shutdown", (void *)msg_pool); struct aws_allocator *alloc = msg_pool->alloc; aws_message_pool_clean_up(msg_pool); aws_mem_release(alloc, msg_pool); aws_mem_release(alloc, object); } static void s_on_channel_setup_complete(struct aws_task *task, void *arg, enum aws_task_status task_status) { (void)task; struct channel_setup_args *setup_args = arg; struct aws_message_pool *message_pool = NULL; struct aws_event_loop_local_object *local_object = NULL; AWS_LOGF_DEBUG(AWS_LS_IO_CHANNEL, "id=%p: setup complete, notifying caller.", (void *)setup_args->channel); if (task_status == AWS_TASK_STATUS_RUN_READY) { struct aws_event_loop_local_object stack_obj; AWS_ZERO_STRUCT(stack_obj); local_object = &stack_obj; if (aws_event_loop_fetch_local_object(setup_args->channel->loop, &s_message_pool_key, local_object)) { local_object = aws_mem_calloc(setup_args->alloc, 1, sizeof(struct aws_event_loop_local_object)); if (!local_object) { goto cleanup_setup_args; } message_pool = aws_mem_acquire(setup_args->alloc, sizeof(struct aws_message_pool)); if (!message_pool) { goto cleanup_local_obj; } AWS_LOGF_DEBUG( AWS_LS_IO_CHANNEL, "id=%p: no message pool is currently stored in the event-loop " "local storage, adding %p with max message size %zu, " "message count 4, with 4 small blocks of 128 bytes.", (void *)setup_args->channel, (void *)message_pool, g_aws_channel_max_fragment_size); struct aws_message_pool_creation_args creation_args = { .application_data_msg_data_size = g_aws_channel_max_fragment_size, .application_data_msg_count = 4, .small_block_msg_count = 4, .small_block_msg_data_size = 128, }; if (aws_message_pool_init(message_pool, setup_args->alloc, &creation_args)) { goto cleanup_msg_pool_mem; } local_object->key = &s_message_pool_key; local_object->object = message_pool; local_object->on_object_removed = s_on_msg_pool_removed; if (aws_event_loop_put_local_object(setup_args->channel->loop, local_object)) { goto cleanup_msg_pool; } } else { message_pool = local_object->object; AWS_LOGF_DEBUG( AWS_LS_IO_CHANNEL, "id=%p: message pool %p found in event-loop local storage: using it.", (void *)setup_args->channel, (void *)message_pool); } setup_args->channel->msg_pool = message_pool; setup_args->channel->channel_state = AWS_CHANNEL_ACTIVE; setup_args->on_setup_completed(setup_args->channel, AWS_OP_SUCCESS, setup_args->user_data); aws_channel_release_hold(setup_args->channel); aws_mem_release(setup_args->alloc, setup_args); return; } goto cleanup_setup_args; cleanup_msg_pool: aws_message_pool_clean_up(message_pool); cleanup_msg_pool_mem: aws_mem_release(setup_args->alloc, message_pool); cleanup_local_obj: aws_mem_release(setup_args->alloc, local_object); cleanup_setup_args: setup_args->on_setup_completed(setup_args->channel, AWS_OP_ERR, setup_args->user_data); aws_channel_release_hold(setup_args->channel); aws_mem_release(setup_args->alloc, setup_args); } static void s_schedule_cross_thread_tasks(struct aws_task *task, void *arg, enum aws_task_status status); static void s_destroy_partially_constructed_channel(struct aws_channel *channel) { if (channel == NULL) { return; } aws_array_list_clean_up(&channel->statistic_list); aws_mem_release(channel->alloc, channel); } struct aws_channel *aws_channel_new(struct aws_allocator *alloc, const struct aws_channel_options *creation_args) { AWS_PRECONDITION(creation_args); AWS_PRECONDITION(creation_args->event_loop); AWS_PRECONDITION(creation_args->on_setup_completed); struct aws_channel *channel = aws_mem_calloc(alloc, 1, sizeof(struct aws_channel)); if (!channel) { return NULL; } AWS_LOGF_DEBUG(AWS_LS_IO_CHANNEL, "id=%p: Beginning creation and setup of new channel.", (void *)channel); channel->alloc = alloc; channel->loop = creation_args->event_loop; channel->on_shutdown_completed = creation_args->on_shutdown_completed; channel->shutdown_user_data = creation_args->shutdown_user_data; if (aws_array_list_init_dynamic( &channel->statistic_list, alloc, INITIAL_STATISTIC_LIST_SIZE, sizeof(struct aws_crt_statistics_base *))) { goto on_error; } /* Start refcount at 2: * 1 for self-reference, released from aws_channel_destroy() * 1 for the setup task, released when task executes */ aws_atomic_init_int(&channel->refcount, 2); struct channel_setup_args *setup_args = aws_mem_calloc(alloc, 1, sizeof(struct channel_setup_args)); if (!setup_args) { goto on_error; } channel->channel_state = AWS_CHANNEL_SETTING_UP; aws_linked_list_init(&channel->channel_thread_tasks.list); aws_linked_list_init(&channel->cross_thread_tasks.list); channel->cross_thread_tasks.lock = (struct aws_mutex)AWS_MUTEX_INIT; if (creation_args->enable_read_back_pressure) { channel->read_back_pressure_enabled = true; /* we probably only need room for one fragment, but let's avoid potential deadlocks * on things like tls that need extra head-room. */ channel->window_update_batch_emit_threshold = g_aws_channel_max_fragment_size * 2; } aws_task_init( &channel->cross_thread_tasks.scheduling_task, s_schedule_cross_thread_tasks, channel, "schedule_cross_thread_tasks"); setup_args->alloc = alloc; setup_args->channel = channel; setup_args->on_setup_completed = creation_args->on_setup_completed; setup_args->user_data = creation_args->setup_user_data; aws_task_init(&setup_args->task, s_on_channel_setup_complete, setup_args, "on_channel_setup_complete"); aws_event_loop_schedule_task_now(creation_args->event_loop, &setup_args->task); return channel; on_error: s_destroy_partially_constructed_channel(channel); return NULL; } static void s_cleanup_slot(struct aws_channel_slot *slot) { if (slot) { if (slot->handler) { aws_channel_handler_destroy(slot->handler); } aws_mem_release(slot->alloc, slot); } } void aws_channel_destroy(struct aws_channel *channel) { AWS_LOGF_DEBUG(AWS_LS_IO_CHANNEL, "id=%p: destroying channel.", (void *)channel); aws_channel_release_hold(channel); } static void s_final_channel_deletion_task(struct aws_task *task, void *arg, enum aws_task_status status) { (void)task; (void)status; struct aws_channel *channel = arg; struct aws_channel_slot *current = channel->first; if (!current || !current->handler) { /* Allow channels with no valid slots to skip shutdown process */ channel->channel_state = AWS_CHANNEL_SHUT_DOWN; } AWS_ASSERT(channel->channel_state == AWS_CHANNEL_SHUT_DOWN); while (current) { struct aws_channel_slot *tmp = current->adj_right; s_cleanup_slot(current); current = tmp; } aws_array_list_clean_up(&channel->statistic_list); aws_channel_set_statistics_handler(channel, NULL); aws_mem_release(channel->alloc, channel); } void aws_channel_acquire_hold(struct aws_channel *channel) { size_t prev_refcount = aws_atomic_fetch_add(&channel->refcount, 1); AWS_ASSERT(prev_refcount != 0); (void)prev_refcount; } void aws_channel_release_hold(struct aws_channel *channel) { size_t prev_refcount = aws_atomic_fetch_sub(&channel->refcount, 1); AWS_ASSERT(prev_refcount != 0); if (prev_refcount == 1) { /* Refcount is now 0, finish cleaning up channel memory. */ if (aws_channel_thread_is_callers_thread(channel)) { s_final_channel_deletion_task(NULL, channel, AWS_TASK_STATUS_RUN_READY); } else { aws_task_init(&channel->deletion_task, s_final_channel_deletion_task, channel, "final_channel_deletion"); aws_event_loop_schedule_task_now(channel->loop, &channel->deletion_task); } } } struct channel_shutdown_task_args { struct aws_channel *channel; struct aws_allocator *alloc; int error_code; struct aws_task task; }; static int s_channel_shutdown(struct aws_channel *channel, int error_code, bool shutdown_immediately); static void s_on_shutdown_completion_task(struct aws_task *task, void *arg, enum aws_task_status status); static void s_shutdown_task(struct aws_channel_task *task, void *arg, enum aws_task_status status) { (void)task; (void)status; struct shutdown_task *shutdown_task = arg; struct aws_channel *channel = shutdown_task->channel; int error_code = shutdown_task->error_code; bool shutdown_immediately = shutdown_task->shutdown_immediately; if (channel->channel_state < AWS_CHANNEL_SHUTTING_DOWN) { AWS_LOGF_DEBUG(AWS_LS_IO_CHANNEL, "id=%p: beginning shutdown process", (void *)channel); struct aws_channel_slot *slot = channel->first; channel->channel_state = AWS_CHANNEL_SHUTTING_DOWN; if (slot) { AWS_LOGF_TRACE( AWS_LS_IO_CHANNEL, "id=%p: shutting down slot %p (the first one) in the read direction", (void *)channel, (void *)slot); aws_channel_slot_shutdown(slot, AWS_CHANNEL_DIR_READ, error_code, shutdown_immediately); return; } channel->channel_state = AWS_CHANNEL_SHUT_DOWN; AWS_LOGF_TRACE(AWS_LS_IO_CHANNEL, "id=%p: shutdown completed", (void *)channel); aws_mutex_lock(&channel->cross_thread_tasks.lock); channel->cross_thread_tasks.is_channel_shut_down = true; aws_mutex_unlock(&channel->cross_thread_tasks.lock); if (channel->on_shutdown_completed) { channel->shutdown_notify_task.task.fn = s_on_shutdown_completion_task; channel->shutdown_notify_task.task.arg = channel; channel->shutdown_notify_task.error_code = error_code; aws_event_loop_schedule_task_now(channel->loop, &channel->shutdown_notify_task.task); } } } static int s_channel_shutdown(struct aws_channel *channel, int error_code, bool shutdown_immediately) { bool need_to_schedule = true; aws_mutex_lock(&channel->cross_thread_tasks.lock); if (channel->cross_thread_tasks.shutdown_task.task.task_fn) { need_to_schedule = false; AWS_LOGF_DEBUG( AWS_LS_IO_CHANNEL, "id=%p: Channel shutdown is already pending, not scheduling another.", (void *)channel); } else { aws_channel_task_init( &channel->cross_thread_tasks.shutdown_task.task, s_shutdown_task, &channel->cross_thread_tasks.shutdown_task, "channel_shutdown"); channel->cross_thread_tasks.shutdown_task.shutdown_immediately = shutdown_immediately; channel->cross_thread_tasks.shutdown_task.channel = channel; channel->cross_thread_tasks.shutdown_task.error_code = error_code; } aws_mutex_unlock(&channel->cross_thread_tasks.lock); if (need_to_schedule) { AWS_LOGF_TRACE(AWS_LS_IO_CHANNEL, "id=%p: channel shutdown task is scheduled", (void *)channel); aws_channel_schedule_task_now(channel, &channel->cross_thread_tasks.shutdown_task.task); } return AWS_OP_SUCCESS; } int aws_channel_shutdown(struct aws_channel *channel, int error_code) { return s_channel_shutdown(channel, error_code, false); } struct aws_io_message *aws_channel_acquire_message_from_pool( struct aws_channel *channel, enum aws_io_message_type message_type, size_t size_hint) { struct aws_io_message *message = aws_message_pool_acquire(channel->msg_pool, message_type, size_hint); if (AWS_LIKELY(message)) { message->owning_channel = channel; AWS_LOGF_TRACE( AWS_LS_IO_CHANNEL, "id=%p: acquired message %p of capacity %zu from pool %p. Requested size was %zu", (void *)channel, (void *)message, message->message_data.capacity, (void *)channel->msg_pool, size_hint); } return message; } struct aws_channel_slot *aws_channel_slot_new(struct aws_channel *channel) { struct aws_channel_slot *new_slot = aws_mem_calloc(channel->alloc, 1, sizeof(struct aws_channel_slot)); if (!new_slot) { return NULL; } AWS_LOGF_TRACE(AWS_LS_IO_CHANNEL, "id=%p: creating new slot %p.", (void *)channel, (void *)new_slot); new_slot->alloc = channel->alloc; new_slot->channel = channel; if (!channel->first) { channel->first = new_slot; } return new_slot; } int aws_channel_current_clock_time(struct aws_channel *channel, uint64_t *time_nanos) { return aws_event_loop_current_clock_time(channel->loop, time_nanos); } int aws_channel_fetch_local_object( struct aws_channel *channel, const void *key, struct aws_event_loop_local_object *obj) { return aws_event_loop_fetch_local_object(channel->loop, (void *)key, obj); } int aws_channel_put_local_object( struct aws_channel *channel, const void *key, const struct aws_event_loop_local_object *obj) { (void)key; return aws_event_loop_put_local_object(channel->loop, (struct aws_event_loop_local_object *)obj); } int aws_channel_remove_local_object( struct aws_channel *channel, const void *key, struct aws_event_loop_local_object *removed_obj) { return aws_event_loop_remove_local_object(channel->loop, (void *)key, removed_obj); } static void s_channel_task_run(struct aws_task *task, void *arg, enum aws_task_status status) { struct aws_channel_task *channel_task = AWS_CONTAINER_OF(task, struct aws_channel_task, wrapper_task); struct aws_channel *channel = arg; /* Any task that runs after shutdown completes is considered canceled */ if (channel->channel_state == AWS_CHANNEL_SHUT_DOWN) { status = AWS_TASK_STATUS_CANCELED; } aws_linked_list_remove(&channel_task->node); channel_task->task_fn(channel_task, channel_task->arg, status); } static void s_schedule_cross_thread_tasks(struct aws_task *task, void *arg, enum aws_task_status status) { (void)task; struct aws_channel *channel = arg; struct aws_linked_list cross_thread_task_list; aws_linked_list_init(&cross_thread_task_list); /* Grab contents of cross-thread task list while we have the lock */ aws_mutex_lock(&channel->cross_thread_tasks.lock); aws_linked_list_swap_contents(&channel->cross_thread_tasks.list, &cross_thread_task_list); aws_mutex_unlock(&channel->cross_thread_tasks.lock); /* If the channel has shut down since the cross-thread tasks were scheduled, run tasks immediately as canceled */ if (channel->channel_state == AWS_CHANNEL_SHUT_DOWN) { status = AWS_TASK_STATUS_CANCELED; } while (!aws_linked_list_empty(&cross_thread_task_list)) { struct aws_linked_list_node *node = aws_linked_list_pop_front(&cross_thread_task_list); struct aws_channel_task *channel_task = AWS_CONTAINER_OF(node, struct aws_channel_task, node); if ((channel_task->wrapper_task.timestamp == 0) || (status == AWS_TASK_STATUS_CANCELED)) { /* Run "now" tasks, and canceled tasks, immediately */ channel_task->task_fn(channel_task, channel_task->arg, status); } else { /* "Future" tasks are scheduled with the event-loop. */ aws_linked_list_push_back(&channel->channel_thread_tasks.list, &channel_task->node); aws_event_loop_schedule_task_future( channel->loop, &channel_task->wrapper_task, channel_task->wrapper_task.timestamp); } } } void aws_channel_task_init( struct aws_channel_task *channel_task, aws_channel_task_fn *task_fn, void *arg, const char *type_tag) { AWS_ZERO_STRUCT(*channel_task); channel_task->task_fn = task_fn; channel_task->arg = arg; channel_task->type_tag = type_tag; } static void s_register_pending_task_in_event_loop( struct aws_channel *channel, struct aws_channel_task *channel_task, uint64_t run_at_nanos) { AWS_LOGF_TRACE( AWS_LS_IO_CHANNEL, "id=%p: scheduling task with wrapper task id %p.", (void *)channel, (void *)&channel_task->wrapper_task); /* If channel is shut down, run task immediately as canceled */ if (channel->channel_state == AWS_CHANNEL_SHUT_DOWN) { AWS_LOGF_DEBUG( AWS_LS_IO_CHANNEL, "id=%p: Running %s channel task immediately as canceled due to shut down channel", (void *)channel, channel_task->type_tag); channel_task->task_fn(channel_task, channel_task->arg, AWS_TASK_STATUS_CANCELED); return; } aws_linked_list_push_back(&channel->channel_thread_tasks.list, &channel_task->node); if (run_at_nanos == 0) { aws_event_loop_schedule_task_now(channel->loop, &channel_task->wrapper_task); } else { aws_event_loop_schedule_task_future( channel->loop, &channel_task->wrapper_task, channel_task->wrapper_task.timestamp); } } static void s_register_pending_task_cross_thread(struct aws_channel *channel, struct aws_channel_task *channel_task) { AWS_LOGF_TRACE( AWS_LS_IO_CHANNEL, "id=%p: scheduling task with wrapper task id %p from " "outside the event-loop thread.", (void *)channel, (void *)&channel_task->wrapper_task); /* Outside event-loop thread... */ bool should_cancel_task = false; /* Begin Critical Section */ aws_mutex_lock(&channel->cross_thread_tasks.lock); if (channel->cross_thread_tasks.is_channel_shut_down) { should_cancel_task = true; /* run task outside critical section to avoid deadlock */ } else { bool list_was_empty = aws_linked_list_empty(&channel->cross_thread_tasks.list); aws_linked_list_push_back(&channel->cross_thread_tasks.list, &channel_task->node); if (list_was_empty) { aws_event_loop_schedule_task_now(channel->loop, &channel->cross_thread_tasks.scheduling_task); } } aws_mutex_unlock(&channel->cross_thread_tasks.lock); /* End Critical Section */ if (should_cancel_task) { channel_task->task_fn(channel_task, channel_task->arg, AWS_TASK_STATUS_CANCELED); } } static void s_reset_pending_channel_task( struct aws_channel *channel, struct aws_channel_task *channel_task, uint64_t run_at_nanos) { /* Reset every property on channel task other than user's fn & arg.*/ aws_task_init(&channel_task->wrapper_task, s_channel_task_run, channel, channel_task->type_tag); channel_task->wrapper_task.timestamp = run_at_nanos; aws_linked_list_node_reset(&channel_task->node); } /* Common functionality for scheduling "now" and "future" tasks. * For "now" tasks, pass 0 for `run_at_nanos` */ static void s_register_pending_task( struct aws_channel *channel, struct aws_channel_task *channel_task, uint64_t run_at_nanos) { s_reset_pending_channel_task(channel, channel_task, run_at_nanos); if (aws_channel_thread_is_callers_thread(channel)) { s_register_pending_task_in_event_loop(channel, channel_task, run_at_nanos); } else { s_register_pending_task_cross_thread(channel, channel_task); } } void aws_channel_schedule_task_now(struct aws_channel *channel, struct aws_channel_task *task) { s_register_pending_task(channel, task, 0); } void aws_channel_schedule_task_now_serialized(struct aws_channel *channel, struct aws_channel_task *task) { s_reset_pending_channel_task(channel, task, 0); s_register_pending_task_cross_thread(channel, task); } void aws_channel_schedule_task_future( struct aws_channel *channel, struct aws_channel_task *task, uint64_t run_at_nanos) { s_register_pending_task(channel, task, run_at_nanos); } bool aws_channel_thread_is_callers_thread(struct aws_channel *channel) { return aws_event_loop_thread_is_callers_thread(channel->loop); } static void s_update_channel_slot_message_overheads(struct aws_channel *channel) { size_t overhead = 0; struct aws_channel_slot *slot_iter = channel->first; while (slot_iter) { slot_iter->upstream_message_overhead = overhead; if (slot_iter->handler) { overhead += slot_iter->handler->vtable->message_overhead(slot_iter->handler); } slot_iter = slot_iter->adj_right; } } int aws_channel_slot_set_handler(struct aws_channel_slot *slot, struct aws_channel_handler *handler) { slot->handler = handler; slot->handler->slot = slot; s_update_channel_slot_message_overheads(slot->channel); return aws_channel_slot_increment_read_window(slot, slot->handler->vtable->initial_window_size(handler)); } int aws_channel_slot_remove(struct aws_channel_slot *slot) { if (slot->adj_right) { slot->adj_right->adj_left = slot->adj_left; if (slot == slot->channel->first) { slot->channel->first = slot->adj_right; } } if (slot->adj_left) { slot->adj_left->adj_right = slot->adj_right; } if (slot == slot->channel->first) { slot->channel->first = NULL; } s_update_channel_slot_message_overheads(slot->channel); s_cleanup_slot(slot); return AWS_OP_SUCCESS; } int aws_channel_slot_replace(struct aws_channel_slot *remove, struct aws_channel_slot *new_slot) { new_slot->adj_left = remove->adj_left; if (remove->adj_left) { remove->adj_left->adj_right = new_slot; } new_slot->adj_right = remove->adj_right; if (remove->adj_right) { remove->adj_right->adj_left = new_slot; } if (remove == remove->channel->first) { remove->channel->first = new_slot; } s_update_channel_slot_message_overheads(remove->channel); s_cleanup_slot(remove); return AWS_OP_SUCCESS; } int aws_channel_slot_insert_right(struct aws_channel_slot *slot, struct aws_channel_slot *to_add) { to_add->adj_right = slot->adj_right; if (slot->adj_right) { slot->adj_right->adj_left = to_add; } slot->adj_right = to_add; to_add->adj_left = slot; return AWS_OP_SUCCESS; } int aws_channel_slot_insert_end(struct aws_channel *channel, struct aws_channel_slot *to_add) { /* It's actually impossible there's not a first if the user went through the aws_channel_slot_new() function. * But also check that a user didn't call insert_end if it's the first slot in the channel since first would already * have been set. */ if (AWS_LIKELY(channel->first && channel->first != to_add)) { struct aws_channel_slot *cur = channel->first; while (cur->adj_right) { cur = cur->adj_right; } return aws_channel_slot_insert_right(cur, to_add); } AWS_ASSERT(0); return aws_raise_error(AWS_ERROR_INVALID_STATE); } int aws_channel_slot_insert_left(struct aws_channel_slot *slot, struct aws_channel_slot *to_add) { to_add->adj_left = slot->adj_left; if (slot->adj_left) { slot->adj_left->adj_right = to_add; } slot->adj_left = to_add; to_add->adj_right = slot; if (slot == slot->channel->first) { slot->channel->first = to_add; } return AWS_OP_SUCCESS; } int aws_channel_slot_send_message( struct aws_channel_slot *slot, struct aws_io_message *message, enum aws_channel_direction dir) { if (dir == AWS_CHANNEL_DIR_READ) { AWS_ASSERT(slot->adj_right); AWS_ASSERT(slot->adj_right->handler); if (!slot->channel->read_back_pressure_enabled || slot->adj_right->window_size >= message->message_data.len) { AWS_LOGF_TRACE( AWS_LS_IO_CHANNEL, "id=%p: sending read message of size %zu, " "from slot %p to slot %p with handler %p.", (void *)slot->channel, message->message_data.len, (void *)slot, (void *)slot->adj_right, (void *)slot->adj_right->handler); slot->adj_right->window_size -= message->message_data.len; return aws_channel_handler_process_read_message(slot->adj_right->handler, slot->adj_right, message); } AWS_LOGF_ERROR( AWS_LS_IO_CHANNEL, "id=%p: sending message of size %zu, " "from slot %p to slot %p with handler %p, but this would exceed the channel's " "read window, this is always a programming error.", (void *)slot->channel, message->message_data.len, (void *)slot, (void *)slot->adj_right, (void *)slot->adj_right->handler); return aws_raise_error(AWS_IO_CHANNEL_READ_WOULD_EXCEED_WINDOW); } AWS_ASSERT(slot->adj_left); AWS_ASSERT(slot->adj_left->handler); AWS_LOGF_TRACE( AWS_LS_IO_CHANNEL, "id=%p: sending write message of size %zu, " "from slot %p to slot %p with handler %p.", (void *)slot->channel, message->message_data.len, (void *)slot, (void *)slot->adj_left, (void *)slot->adj_left->handler); return aws_channel_handler_process_write_message(slot->adj_left->handler, slot->adj_left, message); } struct aws_io_message *aws_channel_slot_acquire_max_message_for_write(struct aws_channel_slot *slot) { AWS_PRECONDITION(slot); AWS_PRECONDITION(slot->channel); AWS_PRECONDITION(aws_channel_thread_is_callers_thread(slot->channel)); const size_t overhead = aws_channel_slot_upstream_message_overhead(slot); if (overhead >= g_aws_channel_max_fragment_size) { AWS_LOGF_ERROR( AWS_LS_IO_CHANNEL, "id=%p: Upstream overhead exceeds channel's max message size.", (void *)slot->channel); aws_raise_error(AWS_ERROR_INVALID_STATE); return NULL; } const size_t size_hint = g_aws_channel_max_fragment_size - overhead; return aws_channel_acquire_message_from_pool(slot->channel, AWS_IO_MESSAGE_APPLICATION_DATA, size_hint); } static void s_window_update_task(struct aws_channel_task *channel_task, void *arg, enum aws_task_status status) { (void)channel_task; struct aws_channel *channel = arg; channel->window_update_scheduled = false; if (status == AWS_TASK_STATUS_RUN_READY && channel->channel_state < AWS_CHANNEL_SHUTTING_DOWN) { /* get the right-most slot to start the updates. */ struct aws_channel_slot *slot = channel->first; while (slot->adj_right) { slot = slot->adj_right; } while (slot->adj_left) { struct aws_channel_slot *upstream_slot = slot->adj_left; if (upstream_slot->handler) { slot->window_size = aws_add_size_saturating(slot->window_size, slot->current_window_update_batch_size); size_t update_size = slot->current_window_update_batch_size; slot->current_window_update_batch_size = 0; if (aws_channel_handler_increment_read_window(upstream_slot->handler, upstream_slot, update_size)) { AWS_LOGF_ERROR( AWS_LS_IO_CHANNEL, "channel %p: channel update task failed with status %d", (void *)slot->channel, aws_last_error()); aws_channel_shutdown(channel, aws_last_error()); return; } } slot = slot->adj_left; } } } int aws_channel_slot_increment_read_window(struct aws_channel_slot *slot, size_t window) { if (slot->channel->read_back_pressure_enabled && slot->channel->channel_state < AWS_CHANNEL_SHUTTING_DOWN) { slot->current_window_update_batch_size = aws_add_size_saturating(slot->current_window_update_batch_size, window); if (!slot->channel->window_update_scheduled && slot->window_size <= slot->channel->window_update_batch_emit_threshold) { slot->channel->window_update_scheduled = true; aws_channel_task_init( &slot->channel->window_update_task, s_window_update_task, slot->channel, "window update task"); aws_channel_schedule_task_now(slot->channel, &slot->channel->window_update_task); } } return AWS_OP_SUCCESS; } int aws_channel_slot_shutdown( struct aws_channel_slot *slot, enum aws_channel_direction dir, int err_code, bool free_scarce_resources_immediately) { AWS_ASSERT(slot->handler); AWS_LOGF_TRACE( AWS_LS_IO_CHANNEL, "id=%p: shutting down slot %p, with handler %p " "in %s direction with error code %d", (void *)slot->channel, (void *)slot, (void *)slot->handler, (dir == AWS_CHANNEL_DIR_READ) ? "read" : "write", err_code); return aws_channel_handler_shutdown(slot->handler, slot, dir, err_code, free_scarce_resources_immediately); } static void s_on_shutdown_completion_task(struct aws_task *task, void *arg, enum aws_task_status status) { (void)status; struct aws_shutdown_notification_task *shutdown_notify = (struct aws_shutdown_notification_task *)task; struct aws_channel *channel = arg; AWS_ASSERT(channel->channel_state == AWS_CHANNEL_SHUT_DOWN); /* Cancel tasks that have been scheduled with the event loop */ while (!aws_linked_list_empty(&channel->channel_thread_tasks.list)) { struct aws_linked_list_node *node = aws_linked_list_front(&channel->channel_thread_tasks.list); struct aws_channel_task *channel_task = AWS_CONTAINER_OF(node, struct aws_channel_task, node); AWS_LOGF_DEBUG( AWS_LS_IO_CHANNEL, "id=%p: during shutdown, canceling task %p", (void *)channel, (void *)&channel_task->wrapper_task); /* The task will remove itself from the list when it's canceled */ aws_event_loop_cancel_task(channel->loop, &channel_task->wrapper_task); } /* Cancel off-thread tasks, which haven't made it to the event-loop thread yet */ aws_mutex_lock(&channel->cross_thread_tasks.lock); bool cancel_cross_thread_tasks = !aws_linked_list_empty(&channel->cross_thread_tasks.list); aws_mutex_unlock(&channel->cross_thread_tasks.lock); if (cancel_cross_thread_tasks) { aws_event_loop_cancel_task(channel->loop, &channel->cross_thread_tasks.scheduling_task); } AWS_ASSERT(aws_linked_list_empty(&channel->channel_thread_tasks.list)); AWS_ASSERT(aws_linked_list_empty(&channel->cross_thread_tasks.list)); channel->on_shutdown_completed(channel, shutdown_notify->error_code, channel->shutdown_user_data); } static void s_run_shutdown_write_direction(struct aws_task *task, void *arg, enum aws_task_status status) { (void)arg; (void)status; struct aws_shutdown_notification_task *shutdown_notify = (struct aws_shutdown_notification_task *)task; task->fn = NULL; task->arg = NULL; struct aws_channel_slot *slot = shutdown_notify->slot; aws_channel_handler_shutdown( slot->handler, slot, AWS_CHANNEL_DIR_WRITE, shutdown_notify->error_code, shutdown_notify->shutdown_immediately); } int aws_channel_slot_on_handler_shutdown_complete( struct aws_channel_slot *slot, enum aws_channel_direction dir, int err_code, bool free_scarce_resources_immediately) { AWS_LOGF_DEBUG( AWS_LS_IO_CHANNEL, "id=%p: handler %p shutdown in %s dir completed.", (void *)slot->channel, (void *)slot->handler, (dir == AWS_CHANNEL_DIR_READ) ? "read" : "write"); if (slot->channel->channel_state == AWS_CHANNEL_SHUT_DOWN) { return AWS_OP_SUCCESS; } if (dir == AWS_CHANNEL_DIR_READ) { if (slot->adj_right && slot->adj_right->handler) { return aws_channel_handler_shutdown( slot->adj_right->handler, slot->adj_right, dir, err_code, free_scarce_resources_immediately); } /* break the shutdown sequence so we don't have handlers having to deal with their memory disappearing out from * under them during a shutdown process. */ slot->channel->shutdown_notify_task.slot = slot; slot->channel->shutdown_notify_task.shutdown_immediately = free_scarce_resources_immediately; slot->channel->shutdown_notify_task.error_code = err_code; slot->channel->shutdown_notify_task.task.fn = s_run_shutdown_write_direction; slot->channel->shutdown_notify_task.task.arg = NULL; aws_event_loop_schedule_task_now(slot->channel->loop, &slot->channel->shutdown_notify_task.task); return AWS_OP_SUCCESS; } if (slot->adj_left && slot->adj_left->handler) { return aws_channel_handler_shutdown( slot->adj_left->handler, slot->adj_left, dir, err_code, free_scarce_resources_immediately); } if (slot->channel->first == slot) { slot->channel->channel_state = AWS_CHANNEL_SHUT_DOWN; aws_mutex_lock(&slot->channel->cross_thread_tasks.lock); slot->channel->cross_thread_tasks.is_channel_shut_down = true; aws_mutex_unlock(&slot->channel->cross_thread_tasks.lock); if (slot->channel->on_shutdown_completed) { slot->channel->shutdown_notify_task.task.fn = s_on_shutdown_completion_task; slot->channel->shutdown_notify_task.task.arg = slot->channel; slot->channel->shutdown_notify_task.error_code = err_code; aws_event_loop_schedule_task_now(slot->channel->loop, &slot->channel->shutdown_notify_task.task); } } return AWS_OP_SUCCESS; } size_t aws_channel_slot_downstream_read_window(struct aws_channel_slot *slot) { AWS_ASSERT(slot->adj_right); return slot->channel->read_back_pressure_enabled ? slot->adj_right->window_size : SIZE_MAX; } size_t aws_channel_slot_upstream_message_overhead(struct aws_channel_slot *slot) { return slot->upstream_message_overhead; } void aws_channel_handler_destroy(struct aws_channel_handler *handler) { AWS_ASSERT(handler->vtable && handler->vtable->destroy); handler->vtable->destroy(handler); } int aws_channel_handler_process_read_message( struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_io_message *message) { AWS_ASSERT(handler->vtable && handler->vtable->process_read_message); return handler->vtable->process_read_message(handler, slot, message); } int aws_channel_handler_process_write_message( struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_io_message *message) { AWS_ASSERT(handler->vtable && handler->vtable->process_write_message); return handler->vtable->process_write_message(handler, slot, message); } int aws_channel_handler_increment_read_window( struct aws_channel_handler *handler, struct aws_channel_slot *slot, size_t size) { AWS_ASSERT(handler->vtable && handler->vtable->increment_read_window); return handler->vtable->increment_read_window(handler, slot, size); } int aws_channel_handler_shutdown( struct aws_channel_handler *handler, struct aws_channel_slot *slot, enum aws_channel_direction dir, int error_code, bool free_scarce_resources_immediately) { AWS_ASSERT(handler->vtable && handler->vtable->shutdown); return handler->vtable->shutdown(handler, slot, dir, error_code, free_scarce_resources_immediately); } size_t aws_channel_handler_initial_window_size(struct aws_channel_handler *handler) { AWS_ASSERT(handler->vtable && handler->vtable->initial_window_size); return handler->vtable->initial_window_size(handler); } struct aws_channel_slot *aws_channel_get_first_slot(struct aws_channel *channel) { return channel->first; } static void s_reset_statistics(struct aws_channel *channel) { AWS_FATAL_ASSERT(aws_channel_thread_is_callers_thread(channel)); struct aws_channel_slot *current_slot = channel->first; while (current_slot) { struct aws_channel_handler *handler = current_slot->handler; if (handler != NULL && handler->vtable->reset_statistics != NULL) { handler->vtable->reset_statistics(handler); } current_slot = current_slot->adj_right; } } static void s_channel_gather_statistics_task(struct aws_task *task, void *arg, enum aws_task_status status) { if (status != AWS_TASK_STATUS_RUN_READY) { return; } struct aws_channel *channel = arg; if (channel->statistics_handler == NULL) { return; } if (channel->channel_state == AWS_CHANNEL_SHUTTING_DOWN || channel->channel_state == AWS_CHANNEL_SHUT_DOWN) { return; } uint64_t now_ns = 0; if (aws_channel_current_clock_time(channel, &now_ns)) { return; } uint64_t now_ms = aws_timestamp_convert(now_ns, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_MILLIS, NULL); struct aws_array_list *statistics_list = &channel->statistic_list; aws_array_list_clear(statistics_list); struct aws_channel_slot *current_slot = channel->first; while (current_slot) { struct aws_channel_handler *handler = current_slot->handler; if (handler != NULL && handler->vtable->gather_statistics != NULL) { handler->vtable->gather_statistics(handler, statistics_list); } current_slot = current_slot->adj_right; } struct aws_crt_statistics_sample_interval sample_interval = { .begin_time_ms = channel->statistics_interval_start_time_ms, .end_time_ms = now_ms}; aws_crt_statistics_handler_process_statistics( channel->statistics_handler, &sample_interval, statistics_list, channel); s_reset_statistics(channel); uint64_t reschedule_interval_ns = aws_timestamp_convert( aws_crt_statistics_handler_get_report_interval_ms(channel->statistics_handler), AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL); aws_event_loop_schedule_task_future(channel->loop, task, now_ns + reschedule_interval_ns); channel->statistics_interval_start_time_ms = now_ms; } int aws_channel_set_statistics_handler(struct aws_channel *channel, struct aws_crt_statistics_handler *handler) { AWS_FATAL_ASSERT(aws_channel_thread_is_callers_thread(channel)); if (channel->statistics_handler) { aws_crt_statistics_handler_destroy(channel->statistics_handler); aws_event_loop_cancel_task(channel->loop, &channel->statistics_task); channel->statistics_handler = NULL; } if (handler != NULL) { aws_task_init(&channel->statistics_task, s_channel_gather_statistics_task, channel, "gather_statistics"); uint64_t now_ns = 0; if (aws_channel_current_clock_time(channel, &now_ns)) { return AWS_OP_ERR; } uint64_t report_time_ns = now_ns + aws_timestamp_convert( aws_crt_statistics_handler_get_report_interval_ms(handler), AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL); channel->statistics_interval_start_time_ms = aws_timestamp_convert(now_ns, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_MILLIS, NULL); s_reset_statistics(channel); aws_event_loop_schedule_task_future(channel->loop, &channel->statistics_task, report_time_ns); } channel->statistics_handler = handler; return AWS_OP_SUCCESS; } struct aws_event_loop *aws_channel_get_event_loop(struct aws_channel *channel) { return channel->loop; } int aws_channel_trigger_read(struct aws_channel *channel) { if (channel == NULL) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } if (!aws_channel_thread_is_callers_thread(channel)) { return aws_raise_error(AWS_ERROR_INVALID_STATE); } struct aws_channel_slot *slot = channel->first; if (slot == NULL) { return aws_raise_error(AWS_ERROR_INVALID_STATE); } struct aws_channel_handler *handler = slot->handler; if (handler == NULL) { return aws_raise_error(AWS_ERROR_INVALID_STATE); } if (handler->vtable->trigger_read != NULL) { handler->vtable->trigger_read(handler); } return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-io/source/channel_bootstrap.c000066400000000000000000001643431456575232400250230ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #ifdef _MSC_VER /* non-constant aggregate initializer */ # pragma warning(disable : 4204) /* allow automatic variable to escape scope (it's intentional and we make sure it doesn't actually return before the task is finished).*/ # pragma warning(disable : 4221) #endif static void s_client_bootstrap_destroy_impl(struct aws_client_bootstrap *bootstrap) { AWS_ASSERT(bootstrap); AWS_LOGF_DEBUG(AWS_LS_IO_CHANNEL_BOOTSTRAP, "id=%p: bootstrap destroying", (void *)bootstrap); aws_client_bootstrap_shutdown_complete_fn *on_shutdown_complete = bootstrap->on_shutdown_complete; void *user_data = bootstrap->user_data; aws_event_loop_group_release(bootstrap->event_loop_group); aws_host_resolver_release(bootstrap->host_resolver); aws_mem_release(bootstrap->allocator, bootstrap); if (on_shutdown_complete) { on_shutdown_complete(user_data); } } struct aws_client_bootstrap *aws_client_bootstrap_acquire(struct aws_client_bootstrap *bootstrap) { if (bootstrap != NULL) { AWS_LOGF_DEBUG(AWS_LS_IO_CHANNEL_BOOTSTRAP, "id=%p: acquiring bootstrap reference", (void *)bootstrap); aws_ref_count_acquire(&bootstrap->ref_count); } return bootstrap; } void aws_client_bootstrap_release(struct aws_client_bootstrap *bootstrap) { if (bootstrap != NULL) { AWS_LOGF_DEBUG(AWS_LS_IO_CHANNEL_BOOTSTRAP, "id=%p: releasing bootstrap reference", (void *)bootstrap); aws_ref_count_release(&bootstrap->ref_count); } } struct aws_client_bootstrap *aws_client_bootstrap_new( struct aws_allocator *allocator, const struct aws_client_bootstrap_options *options) { AWS_ASSERT(allocator); AWS_ASSERT(options); AWS_ASSERT(options->event_loop_group); struct aws_client_bootstrap *bootstrap = aws_mem_calloc(allocator, 1, sizeof(struct aws_client_bootstrap)); if (!bootstrap) { return NULL; } AWS_LOGF_INFO( AWS_LS_IO_CHANNEL_BOOTSTRAP, "id=%p: Initializing client bootstrap with event-loop group %p", (void *)bootstrap, (void *)options->event_loop_group); bootstrap->allocator = allocator; bootstrap->event_loop_group = aws_event_loop_group_acquire(options->event_loop_group); bootstrap->on_protocol_negotiated = NULL; aws_ref_count_init( &bootstrap->ref_count, bootstrap, (aws_simple_completion_callback *)s_client_bootstrap_destroy_impl); bootstrap->host_resolver = aws_host_resolver_acquire(options->host_resolver); bootstrap->on_shutdown_complete = options->on_shutdown_complete; bootstrap->user_data = options->user_data; if (options->host_resolution_config) { bootstrap->host_resolver_config = *options->host_resolution_config; } else { bootstrap->host_resolver_config = aws_host_resolver_init_default_resolution_config(); } return bootstrap; } int aws_client_bootstrap_set_alpn_callback( struct aws_client_bootstrap *bootstrap, aws_channel_on_protocol_negotiated_fn *on_protocol_negotiated) { AWS_ASSERT(on_protocol_negotiated); AWS_LOGF_DEBUG(AWS_LS_IO_CHANNEL_BOOTSTRAP, "id=%p: Setting ALPN callback", (void *)bootstrap); bootstrap->on_protocol_negotiated = on_protocol_negotiated; return AWS_OP_SUCCESS; } struct client_channel_data { struct aws_channel *channel; struct aws_socket *socket; struct aws_tls_connection_options tls_options; aws_channel_on_protocol_negotiated_fn *on_protocol_negotiated; aws_tls_on_data_read_fn *user_on_data_read; aws_tls_on_negotiation_result_fn *user_on_negotiation_result; aws_tls_on_error_fn *user_on_error; void *tls_user_data; bool use_tls; }; struct client_connection_args { struct aws_client_bootstrap *bootstrap; aws_client_bootstrap_on_channel_event_fn *creation_callback; aws_client_bootstrap_on_channel_event_fn *setup_callback; aws_client_bootstrap_on_channel_event_fn *shutdown_callback; struct client_channel_data channel_data; struct aws_socket_options outgoing_options; uint32_t outgoing_port; struct aws_string *host_name; void *user_data; uint8_t addresses_count; uint8_t failed_count; bool connection_chosen; bool setup_called; bool enable_read_back_pressure; struct aws_event_loop *requested_event_loop; /* * It is likely that all reference adjustments to the connection args take place in a single event loop * thread and are thus thread-safe. I can imagine some complex future scenarios where that might not hold true * and so it seems reasonable to switch now to a safe pattern. * */ struct aws_ref_count ref_count; }; static struct client_connection_args *s_client_connection_args_acquire(struct client_connection_args *args) { if (args != NULL) { AWS_LOGF_TRACE(AWS_LS_IO_CHANNEL_BOOTSTRAP, "acquiring client connection args, args=%p", (void *)args); aws_ref_count_acquire(&args->ref_count); } return args; } static void s_client_connection_args_destroy(struct client_connection_args *args) { AWS_ASSERT(args); AWS_LOGF_TRACE(AWS_LS_IO_CHANNEL_BOOTSTRAP, "destroying client connection args, args=%p", (void *)args); struct aws_allocator *allocator = args->bootstrap->allocator; aws_client_bootstrap_release(args->bootstrap); if (args->host_name) { aws_string_destroy(args->host_name); } if (args->channel_data.use_tls) { aws_tls_connection_options_clean_up(&args->channel_data.tls_options); } aws_mem_release(allocator, args); } static void s_client_connection_args_release(struct client_connection_args *args) { if (args != NULL) { AWS_LOGF_TRACE(AWS_LS_IO_CHANNEL_BOOTSTRAP, "releasing client connection args, args=%p", (void *)args); aws_ref_count_release(&args->ref_count); } } static struct aws_event_loop *s_get_connection_event_loop(struct client_connection_args *args) { if (args == NULL) { return NULL; } if (args->requested_event_loop != NULL) { return args->requested_event_loop; } return aws_event_loop_group_get_next_loop(args->bootstrap->event_loop_group); } static void s_connect_args_setup_callback_safe( struct client_connection_args *args, int error_code, struct aws_channel *channel) { AWS_FATAL_ASSERT( (args->requested_event_loop == NULL) || aws_event_loop_thread_is_callers_thread(args->requested_event_loop)); /* setup_callback is always called exactly once */ AWS_FATAL_ASSERT(!args->setup_called); AWS_ASSERT((error_code == AWS_OP_SUCCESS) == (channel != NULL)); aws_client_bootstrap_on_channel_event_fn *setup_callback = args->setup_callback; setup_callback(args->bootstrap, error_code, channel, args->user_data); args->setup_called = true; /* if setup_callback is called with an error, we will not call shutdown_callback */ if (error_code) { args->shutdown_callback = NULL; } s_client_connection_args_release(args); } struct aws_connection_args_setup_callback_task { struct aws_allocator *allocator; struct aws_task task; struct client_connection_args *args; int error_code; struct aws_channel *channel; }; static void s_aws_connection_args_setup_callback_task_delete(struct aws_connection_args_setup_callback_task *task) { if (task == NULL) { return; } s_client_connection_args_release(task->args); if (task->channel) { aws_channel_release_hold(task->channel); } aws_mem_release(task->allocator, task); } void s_aws_connection_args_setup_callback_task_fn(struct aws_task *task, void *arg, enum aws_task_status status) { (void)task; struct aws_connection_args_setup_callback_task *callback_task = arg; if (status == AWS_TASK_STATUS_RUN_READY) { s_connect_args_setup_callback_safe(callback_task->args, callback_task->error_code, callback_task->channel); } s_aws_connection_args_setup_callback_task_delete(callback_task); } static struct aws_connection_args_setup_callback_task *s_aws_connection_args_setup_callback_task_new( struct aws_allocator *allocator, struct client_connection_args *args, int error_code, struct aws_channel *channel) { struct aws_connection_args_setup_callback_task *task = aws_mem_calloc(allocator, 1, sizeof(struct aws_connection_args_setup_callback_task)); task->allocator = allocator; task->args = s_client_connection_args_acquire(args); task->error_code = error_code; task->channel = channel; if (channel != NULL) { aws_channel_acquire_hold(channel); } aws_task_init( &task->task, s_aws_connection_args_setup_callback_task_fn, task, "safe connection args setup callback"); return task; } static void s_connection_args_setup_callback( struct client_connection_args *args, int error_code, struct aws_channel *channel) { if (args->requested_event_loop == NULL || aws_event_loop_thread_is_callers_thread(args->requested_event_loop)) { s_connect_args_setup_callback_safe(args, error_code, channel); } else { struct aws_connection_args_setup_callback_task *callback_task = s_aws_connection_args_setup_callback_task_new(args->bootstrap->allocator, args, error_code, channel); aws_event_loop_schedule_task_now(args->requested_event_loop, &callback_task->task); } } static void s_connection_args_creation_callback(struct client_connection_args *args, struct aws_channel *channel) { AWS_FATAL_ASSERT(channel != NULL); if (args->creation_callback) { args->creation_callback(args->bootstrap, AWS_ERROR_SUCCESS, channel, args->user_data); } } static void s_connection_args_shutdown_callback( struct client_connection_args *args, int error_code, struct aws_channel *channel) { if (!args->setup_called) { /* if setup_callback was not called yet, an error occurred, ensure we tell the user *SOMETHING* */ error_code = (error_code) ? error_code : AWS_ERROR_UNKNOWN; s_connection_args_setup_callback(args, error_code, NULL); return; } aws_client_bootstrap_on_channel_event_fn *shutdown_callback = args->shutdown_callback; if (shutdown_callback) { shutdown_callback(args->bootstrap, error_code, channel, args->user_data); } } static void s_tls_client_on_negotiation_result( struct aws_channel_handler *handler, struct aws_channel_slot *slot, int err_code, void *user_data) { struct client_connection_args *connection_args = user_data; if (connection_args->channel_data.user_on_negotiation_result) { connection_args->channel_data.user_on_negotiation_result( handler, slot, err_code, connection_args->channel_data.tls_user_data); } AWS_LOGF_DEBUG( AWS_LS_IO_CHANNEL_BOOTSTRAP, "id=%p: tls negotiation result %d on channel %p", (void *)connection_args->bootstrap, err_code, (void *)slot->channel); /* if an error occurred, the user callback will be delivered in shutdown */ if (err_code) { aws_channel_shutdown(slot->channel, err_code); return; } struct aws_channel *channel = connection_args->channel_data.channel; s_connection_args_setup_callback(connection_args, AWS_ERROR_SUCCESS, channel); } /* in the context of a channel bootstrap, we don't care about these, but since we're hooking into these APIs we have to * provide a proxy for the user actually receiving their callbacks. */ static void s_tls_client_on_data_read( struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_byte_buf *buffer, void *user_data) { struct client_connection_args *connection_args = user_data; if (connection_args->channel_data.user_on_data_read) { connection_args->channel_data.user_on_data_read( handler, slot, buffer, connection_args->channel_data.tls_user_data); } } /* in the context of a channel bootstrap, we don't care about these, but since we're hooking into these APIs we have to * provide a proxy for the user actually receiving their callbacks. */ static void s_tls_client_on_error( struct aws_channel_handler *handler, struct aws_channel_slot *slot, int err, const char *message, void *user_data) { struct client_connection_args *connection_args = user_data; if (connection_args->channel_data.user_on_error) { connection_args->channel_data.user_on_error( handler, slot, err, message, connection_args->channel_data.tls_user_data); } } static inline int s_setup_client_tls(struct client_connection_args *connection_args, struct aws_channel *channel) { struct aws_channel_slot *tls_slot = aws_channel_slot_new(channel); /* as far as cleanup goes, since this stuff is being added to a channel, the caller will free this memory when they clean up the channel. */ if (!tls_slot) { return AWS_OP_ERR; } struct aws_channel_handler *tls_handler = aws_tls_client_handler_new( connection_args->bootstrap->allocator, &connection_args->channel_data.tls_options, tls_slot); if (!tls_handler) { aws_mem_release(connection_args->bootstrap->allocator, (void *)tls_slot); return AWS_OP_ERR; } aws_channel_slot_insert_end(channel, tls_slot); AWS_LOGF_TRACE( AWS_LS_IO_CHANNEL_BOOTSTRAP, "id=%p: Setting up client TLS on channel %p with handler %p on slot %p", (void *)connection_args->bootstrap, (void *)channel, (void *)tls_handler, (void *)tls_slot); if (aws_channel_slot_set_handler(tls_slot, tls_handler) != AWS_OP_SUCCESS) { return AWS_OP_ERR; } if (connection_args->channel_data.on_protocol_negotiated) { struct aws_channel_slot *alpn_slot = aws_channel_slot_new(channel); if (!alpn_slot) { return AWS_OP_ERR; } struct aws_channel_handler *alpn_handler = aws_tls_alpn_handler_new( connection_args->bootstrap->allocator, connection_args->channel_data.on_protocol_negotiated, connection_args->user_data); if (!alpn_handler) { aws_mem_release(connection_args->bootstrap->allocator, (void *)alpn_slot); return AWS_OP_ERR; } AWS_LOGF_TRACE( AWS_LS_IO_CHANNEL_BOOTSTRAP, "id=%p: Setting up ALPN handler on channel " "%p with handler %p on slot %p", (void *)connection_args->bootstrap, (void *)channel, (void *)alpn_handler, (void *)alpn_slot); aws_channel_slot_insert_right(tls_slot, alpn_slot); if (aws_channel_slot_set_handler(alpn_slot, alpn_handler) != AWS_OP_SUCCESS) { return AWS_OP_ERR; } } if (aws_tls_client_handler_start_negotiation(tls_handler) != AWS_OP_SUCCESS) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } static void s_on_client_channel_on_setup_completed(struct aws_channel *channel, int error_code, void *user_data) { struct client_connection_args *connection_args = user_data; int err_code = error_code; if (!err_code) { AWS_LOGF_DEBUG( AWS_LS_IO_CHANNEL_BOOTSTRAP, "id=%p: channel %p setup succeeded: bootstrapping.", (void *)connection_args->bootstrap, (void *)channel); struct aws_channel_slot *socket_slot = aws_channel_slot_new(channel); if (!socket_slot) { err_code = aws_last_error(); goto error; } struct aws_channel_handler *socket_channel_handler = aws_socket_handler_new( connection_args->bootstrap->allocator, connection_args->channel_data.socket, socket_slot, g_aws_channel_max_fragment_size); if (!socket_channel_handler) { err_code = aws_last_error(); aws_channel_slot_remove(socket_slot); socket_slot = NULL; goto error; } AWS_LOGF_TRACE( AWS_LS_IO_CHANNEL_BOOTSTRAP, "id=%p: Setting up socket handler on channel " "%p with handler %p on slot %p.", (void *)connection_args->bootstrap, (void *)channel, (void *)socket_channel_handler, (void *)socket_slot); if (aws_channel_slot_set_handler(socket_slot, socket_channel_handler)) { err_code = aws_last_error(); goto error; } if (connection_args->channel_data.use_tls) { /* we don't want to notify the user that the channel is ready yet, since tls is still negotiating, wait * for the negotiation callback and handle it then.*/ if (s_setup_client_tls(connection_args, channel)) { err_code = aws_last_error(); goto error; } } else { s_connection_args_setup_callback(connection_args, AWS_OP_SUCCESS, channel); } return; } error: AWS_LOGF_ERROR( AWS_LS_IO_CHANNEL_BOOTSTRAP, "id=%p: channel %p setup failed with error %d.", (void *)connection_args->bootstrap, (void *)channel, err_code); aws_channel_shutdown(channel, err_code); /* the channel shutdown callback will clean the channel up */ } static void s_on_client_channel_on_shutdown(struct aws_channel *channel, int error_code, void *user_data) { struct client_connection_args *connection_args = user_data; AWS_LOGF_DEBUG( AWS_LS_IO_CHANNEL_BOOTSTRAP, "id=%p: channel %p shutdown with error %d.", (void *)connection_args->bootstrap, (void *)channel, error_code); /* note it's not safe to reference the bootstrap after the callback. */ struct aws_allocator *allocator = connection_args->bootstrap->allocator; s_connection_args_shutdown_callback(connection_args, error_code, channel); aws_channel_destroy(channel); aws_socket_clean_up(connection_args->channel_data.socket); aws_mem_release(allocator, connection_args->channel_data.socket); s_client_connection_args_release(connection_args); } static bool s_aws_socket_domain_uses_dns(enum aws_socket_domain domain) { return domain == AWS_SOCKET_IPV4 || domain == AWS_SOCKET_IPV6; } static void s_on_client_connection_established(struct aws_socket *socket, int error_code, void *user_data) { struct client_connection_args *connection_args = user_data; AWS_LOGF_DEBUG( AWS_LS_IO_CHANNEL_BOOTSTRAP, "id=%p: client connection on socket %p completed with error %d.", (void *)connection_args->bootstrap, (void *)socket, error_code); if (error_code) { connection_args->failed_count++; } if (error_code || connection_args->connection_chosen) { if (s_aws_socket_domain_uses_dns(connection_args->outgoing_options.domain) && error_code) { struct aws_host_address host_address; host_address.host = connection_args->host_name; host_address.address = aws_string_new_from_c_str(connection_args->bootstrap->allocator, socket->remote_endpoint.address); host_address.record_type = connection_args->outgoing_options.domain == AWS_SOCKET_IPV6 ? AWS_ADDRESS_RECORD_TYPE_AAAA : AWS_ADDRESS_RECORD_TYPE_A; if (host_address.address) { AWS_LOGF_DEBUG( AWS_LS_IO_CHANNEL_BOOTSTRAP, "id=%p: recording bad address %s.", (void *)connection_args->bootstrap, socket->remote_endpoint.address); aws_host_resolver_record_connection_failure(connection_args->bootstrap->host_resolver, &host_address); aws_string_destroy((void *)host_address.address); } } AWS_LOGF_TRACE( AWS_LS_IO_CHANNEL_BOOTSTRAP, "id=%p: releasing socket %p either because we already have a " "successful connection or because it errored out.", (void *)connection_args->bootstrap, (void *)socket); aws_socket_close(socket); aws_socket_clean_up(socket); aws_mem_release(connection_args->bootstrap->allocator, socket); /* if this is the last attempted connection and it failed, notify the user */ if (connection_args->failed_count == connection_args->addresses_count) { AWS_LOGF_ERROR( AWS_LS_IO_CHANNEL_BOOTSTRAP, "id=%p: Connection failed with error_code %d.", (void *)connection_args->bootstrap, error_code); /* connection_args will be released after setup_callback */ s_connection_args_setup_callback(connection_args, error_code, NULL); } /* every connection task adds a ref, so every failure or cancel needs to dec one */ s_client_connection_args_release(connection_args); return; } connection_args->connection_chosen = true; connection_args->channel_data.socket = socket; struct aws_channel_options args = { .on_setup_completed = s_on_client_channel_on_setup_completed, .setup_user_data = connection_args, .shutdown_user_data = connection_args, .on_shutdown_completed = s_on_client_channel_on_shutdown, }; args.enable_read_back_pressure = connection_args->enable_read_back_pressure; args.event_loop = aws_socket_get_event_loop(socket); AWS_LOGF_TRACE( AWS_LS_IO_CHANNEL_BOOTSTRAP, "id=%p: Successful connection, creating a new channel using socket %p.", (void *)connection_args->bootstrap, (void *)socket); connection_args->channel_data.channel = aws_channel_new(connection_args->bootstrap->allocator, &args); if (!connection_args->channel_data.channel) { aws_socket_clean_up(socket); aws_mem_release(connection_args->bootstrap->allocator, connection_args->channel_data.socket); connection_args->failed_count++; /* if this is the last attempted connection and it failed, notify the user */ if (connection_args->failed_count == connection_args->addresses_count) { s_connection_args_setup_callback(connection_args, error_code, NULL); } } else { s_connection_args_creation_callback(connection_args, connection_args->channel_data.channel); } } struct connection_task_data { struct aws_task task; struct aws_socket_endpoint endpoint; struct aws_socket_options options; struct aws_host_address host_address; struct client_connection_args *args; struct aws_event_loop *connect_loop; }; static void s_attempt_connection(struct aws_task *task, void *arg, enum aws_task_status status) { (void)task; struct connection_task_data *task_data = arg; struct aws_allocator *allocator = task_data->args->bootstrap->allocator; int err_code = 0; if (status != AWS_TASK_STATUS_RUN_READY) { goto task_cancelled; } struct aws_socket *outgoing_socket = aws_mem_acquire(allocator, sizeof(struct aws_socket)); if (aws_socket_init(outgoing_socket, allocator, &task_data->options)) { goto socket_init_failed; } if (aws_socket_connect( outgoing_socket, &task_data->endpoint, task_data->connect_loop, s_on_client_connection_established, task_data->args)) { goto socket_connect_failed; } goto cleanup_task; socket_connect_failed: aws_host_resolver_record_connection_failure(task_data->args->bootstrap->host_resolver, &task_data->host_address); aws_socket_clean_up(outgoing_socket); socket_init_failed: aws_mem_release(allocator, outgoing_socket); task_cancelled: err_code = aws_last_error(); task_data->args->failed_count++; /* if this is the last attempted connection and it failed, notify the user */ if (task_data->args->failed_count == task_data->args->addresses_count) { AWS_LOGF_ERROR( AWS_LS_IO_CHANNEL_BOOTSTRAP, "id=%p: Last attempt failed to create socket with error %d", (void *)task_data->args->bootstrap, err_code); s_connection_args_setup_callback(task_data->args, err_code, NULL); } else { AWS_LOGF_DEBUG( AWS_LS_IO_CHANNEL_BOOTSTRAP, "id=%p: Socket connect attempt %d/%d failed with error %d. More attempts ongoing...", (void *)task_data->args->bootstrap, task_data->args->failed_count, task_data->args->addresses_count, err_code); } s_client_connection_args_release(task_data->args); cleanup_task: aws_host_address_clean_up(&task_data->host_address); aws_mem_release(allocator, task_data); } static void s_on_host_resolved( struct aws_host_resolver *resolver, const struct aws_string *host_name, int err_code, const struct aws_array_list *host_addresses, void *user_data) { (void)resolver; (void)host_name; struct client_connection_args *client_connection_args = user_data; struct aws_allocator *allocator = client_connection_args->bootstrap->allocator; if (err_code) { AWS_LOGF_ERROR( AWS_LS_IO_CHANNEL_BOOTSTRAP, "id=%p: dns resolution failed, or all socket connections to the endpoint failed.", (void *)client_connection_args->bootstrap); s_connection_args_setup_callback(client_connection_args, err_code, NULL); return; } size_t host_addresses_len = aws_array_list_length(host_addresses); AWS_FATAL_ASSERT(host_addresses_len > 0); AWS_LOGF_TRACE( AWS_LS_IO_CHANNEL_BOOTSTRAP, "id=%p: dns resolution completed. Kicking off connections" " on %llu addresses. First one back wins.", (void *)client_connection_args->bootstrap, (unsigned long long)host_addresses_len); /* use this event loop for all outgoing connection attempts (only one will ultimately win). */ struct aws_event_loop *connect_loop = s_get_connection_event_loop(client_connection_args); client_connection_args->addresses_count = (uint8_t)host_addresses_len; /* allocate all the task data first, in case it fails... */ AWS_VARIABLE_LENGTH_ARRAY(struct connection_task_data *, tasks, host_addresses_len); for (size_t i = 0; i < host_addresses_len; ++i) { struct connection_task_data *task_data = tasks[i] = aws_mem_calloc(allocator, 1, sizeof(struct connection_task_data)); bool failed = task_data == NULL; if (!failed) { struct aws_host_address *host_address_ptr = NULL; aws_array_list_get_at_ptr(host_addresses, (void **)&host_address_ptr, i); task_data->endpoint.port = client_connection_args->outgoing_port; AWS_ASSERT(sizeof(task_data->endpoint.address) >= host_address_ptr->address->len + 1); memcpy( task_data->endpoint.address, aws_string_bytes(host_address_ptr->address), host_address_ptr->address->len); task_data->endpoint.address[host_address_ptr->address->len] = 0; task_data->options = client_connection_args->outgoing_options; task_data->options.domain = host_address_ptr->record_type == AWS_ADDRESS_RECORD_TYPE_AAAA ? AWS_SOCKET_IPV6 : AWS_SOCKET_IPV4; failed = aws_host_address_copy(host_address_ptr, &task_data->host_address) != AWS_OP_SUCCESS; task_data->args = client_connection_args; task_data->connect_loop = connect_loop; } if (failed) { for (size_t j = 0; j <= i; ++j) { if (tasks[j]) { aws_host_address_clean_up(&tasks[j]->host_address); aws_mem_release(allocator, tasks[j]); } } int alloc_err_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_IO_CHANNEL_BOOTSTRAP, "id=%p: failed to allocate connection task data: err=%d", (void *)client_connection_args->bootstrap, alloc_err_code); s_connection_args_setup_callback(client_connection_args, alloc_err_code, NULL); return; } } /* ...then schedule all the tasks, which cannot fail */ for (size_t i = 0; i < host_addresses_len; ++i) { struct connection_task_data *task_data = tasks[i]; /** * Acquire on the connection args to make sure connection args outlive the tasks to attempt connection. * * Once upon a time, the connection attempt tasks were scheduled right after acquiring the connection args, * which lead to a crash that when the attempt connection tasks run and the attempt connection succeed and * closed before the other tasks can acquire on the connection args, the connection args had be destroyed before * acquire and lead to a crash. */ s_client_connection_args_acquire(task_data->args); } for (size_t i = 0; i < host_addresses_len; ++i) { struct connection_task_data *task_data = tasks[i]; aws_task_init(&task_data->task, s_attempt_connection, task_data, "attempt_connection"); aws_event_loop_schedule_task_now(connect_loop, &task_data->task); } } static bool s_does_event_loop_belong_to_event_loop_group( struct aws_event_loop *loop, struct aws_event_loop_group *elg) { if (loop == NULL || elg == NULL) { return false; } size_t loop_count = aws_event_loop_group_get_loop_count(elg); for (size_t i = 0; i < loop_count; ++i) { struct aws_event_loop *elg_loop = aws_event_loop_group_get_loop_at(elg, i); if (elg_loop == loop) { return true; } } return false; } int aws_client_bootstrap_new_socket_channel(struct aws_socket_channel_bootstrap_options *options) { struct aws_client_bootstrap *bootstrap = options->bootstrap; AWS_FATAL_ASSERT(options->setup_callback); AWS_FATAL_ASSERT(options->shutdown_callback); AWS_FATAL_ASSERT(bootstrap); const struct aws_socket_options *socket_options = options->socket_options; AWS_FATAL_ASSERT(socket_options != NULL); const struct aws_tls_connection_options *tls_options = options->tls_options; AWS_FATAL_ASSERT(tls_options == NULL || socket_options->type == AWS_SOCKET_STREAM); aws_io_fatal_assert_library_initialized(); if (options->requested_event_loop != NULL) { /* If we're asking for a specific event loop, verify it belongs to the bootstrap's event loop group */ if (!(s_does_event_loop_belong_to_event_loop_group( options->requested_event_loop, bootstrap->event_loop_group))) { return aws_raise_error(AWS_ERROR_IO_PINNED_EVENT_LOOP_MISMATCH); } } struct client_connection_args *client_connection_args = aws_mem_calloc(bootstrap->allocator, 1, sizeof(struct client_connection_args)); if (!client_connection_args) { return AWS_OP_ERR; } const char *host_name = options->host_name; uint32_t port = options->port; AWS_LOGF_TRACE( AWS_LS_IO_CHANNEL_BOOTSTRAP, "id=%p: attempting to initialize a new client channel to %s:%u", (void *)bootstrap, host_name, port); aws_ref_count_init( &client_connection_args->ref_count, client_connection_args, (aws_simple_completion_callback *)s_client_connection_args_destroy); client_connection_args->user_data = options->user_data; client_connection_args->bootstrap = aws_client_bootstrap_acquire(bootstrap); client_connection_args->creation_callback = options->creation_callback; client_connection_args->setup_callback = options->setup_callback; client_connection_args->shutdown_callback = options->shutdown_callback; client_connection_args->outgoing_options = *socket_options; client_connection_args->outgoing_port = port; client_connection_args->enable_read_back_pressure = options->enable_read_back_pressure; client_connection_args->requested_event_loop = options->requested_event_loop; if (tls_options) { if (aws_tls_connection_options_copy(&client_connection_args->channel_data.tls_options, tls_options)) { goto error; } client_connection_args->channel_data.use_tls = true; client_connection_args->channel_data.on_protocol_negotiated = bootstrap->on_protocol_negotiated; client_connection_args->channel_data.tls_user_data = tls_options->user_data; /* in order to honor any callbacks a user may have installed on their tls_connection_options, * we need to wrap them if they were set.*/ if (bootstrap->on_protocol_negotiated) { client_connection_args->channel_data.tls_options.advertise_alpn_message = true; } if (tls_options->on_data_read) { client_connection_args->channel_data.user_on_data_read = tls_options->on_data_read; client_connection_args->channel_data.tls_options.on_data_read = s_tls_client_on_data_read; } if (tls_options->on_error) { client_connection_args->channel_data.user_on_error = tls_options->on_error; client_connection_args->channel_data.tls_options.on_error = s_tls_client_on_error; } if (tls_options->on_negotiation_result) { client_connection_args->channel_data.user_on_negotiation_result = tls_options->on_negotiation_result; } client_connection_args->channel_data.tls_options.on_negotiation_result = s_tls_client_on_negotiation_result; client_connection_args->channel_data.tls_options.user_data = client_connection_args; } if (s_aws_socket_domain_uses_dns(socket_options->domain)) { client_connection_args->host_name = aws_string_new_from_c_str(bootstrap->allocator, host_name); if (!client_connection_args->host_name) { goto error; } const struct aws_host_resolution_config *host_resolution_config = &bootstrap->host_resolver_config; if (options->host_resolution_override_config) { host_resolution_config = options->host_resolution_override_config; } if (aws_host_resolver_resolve_host( bootstrap->host_resolver, client_connection_args->host_name, s_on_host_resolved, host_resolution_config, client_connection_args)) { goto error; } } else { /* ensure that the pipe/domain socket name will fit in the endpoint address */ const size_t host_name_len = strlen(host_name); if (host_name_len >= AWS_ADDRESS_MAX_LEN) { aws_raise_error(AWS_IO_SOCKET_INVALID_ADDRESS); goto error; } struct aws_socket_endpoint endpoint; AWS_ZERO_STRUCT(endpoint); memcpy(endpoint.address, host_name, host_name_len); if (socket_options->domain == AWS_SOCKET_VSOCK) { endpoint.port = port; } else { endpoint.port = 0; } struct aws_socket *outgoing_socket = aws_mem_acquire(bootstrap->allocator, sizeof(struct aws_socket)); if (!outgoing_socket) { goto error; } if (aws_socket_init(outgoing_socket, bootstrap->allocator, socket_options)) { aws_mem_release(bootstrap->allocator, outgoing_socket); goto error; } client_connection_args->addresses_count = 1; struct aws_event_loop *connect_loop = s_get_connection_event_loop(client_connection_args); s_client_connection_args_acquire(client_connection_args); if (aws_socket_connect( outgoing_socket, &endpoint, connect_loop, s_on_client_connection_established, client_connection_args)) { aws_socket_clean_up(outgoing_socket); aws_mem_release(client_connection_args->bootstrap->allocator, outgoing_socket); s_client_connection_args_release(client_connection_args); goto error; } } return AWS_OP_SUCCESS; error: if (client_connection_args) { /* tls opt will also be freed when we clean up the connection arg */ s_client_connection_args_release(client_connection_args); } return AWS_OP_ERR; } void s_server_bootstrap_destroy_impl(struct aws_server_bootstrap *bootstrap) { AWS_ASSERT(bootstrap); aws_event_loop_group_release(bootstrap->event_loop_group); aws_mem_release(bootstrap->allocator, bootstrap); } struct aws_server_bootstrap *aws_server_bootstrap_acquire(struct aws_server_bootstrap *bootstrap) { if (bootstrap != NULL) { aws_ref_count_acquire(&bootstrap->ref_count); } return bootstrap; } void aws_server_bootstrap_release(struct aws_server_bootstrap *bootstrap) { /* if destroy is being called, the user intends to not use the bootstrap anymore * so we clean up the thread local state while the event loop thread is * still alive */ AWS_LOGF_DEBUG(AWS_LS_IO_CHANNEL_BOOTSTRAP, "id=%p: releasing server bootstrap reference", (void *)bootstrap); if (bootstrap != NULL) { aws_ref_count_release(&bootstrap->ref_count); } } struct aws_server_bootstrap *aws_server_bootstrap_new( struct aws_allocator *allocator, struct aws_event_loop_group *el_group) { AWS_ASSERT(allocator); AWS_ASSERT(el_group); struct aws_server_bootstrap *bootstrap = aws_mem_calloc(allocator, 1, sizeof(struct aws_server_bootstrap)); if (!bootstrap) { return NULL; } AWS_LOGF_INFO( AWS_LS_IO_CHANNEL_BOOTSTRAP, "id=%p: Initializing server bootstrap with event-loop group %p", (void *)bootstrap, (void *)el_group); bootstrap->allocator = allocator; bootstrap->event_loop_group = aws_event_loop_group_acquire(el_group); bootstrap->on_protocol_negotiated = NULL; aws_ref_count_init( &bootstrap->ref_count, bootstrap, (aws_simple_completion_callback *)s_server_bootstrap_destroy_impl); return bootstrap; } struct server_connection_args { struct aws_server_bootstrap *bootstrap; struct aws_socket listener; aws_server_bootstrap_on_accept_channel_setup_fn *incoming_callback; aws_server_bootstrap_on_accept_channel_shutdown_fn *shutdown_callback; aws_server_bootstrap_on_server_listener_destroy_fn *destroy_callback; struct aws_tls_connection_options tls_options; aws_channel_on_protocol_negotiated_fn *on_protocol_negotiated; aws_tls_on_data_read_fn *user_on_data_read; aws_tls_on_negotiation_result_fn *user_on_negotiation_result; aws_tls_on_error_fn *user_on_error; struct aws_task listener_destroy_task; void *tls_user_data; void *user_data; bool use_tls; bool enable_read_back_pressure; struct aws_ref_count ref_count; }; struct server_channel_data { struct aws_channel *channel; struct aws_socket *socket; struct server_connection_args *server_connection_args; bool incoming_called; }; static struct server_connection_args *s_server_connection_args_acquire(struct server_connection_args *args) { if (args != NULL) { aws_ref_count_acquire(&args->ref_count); } return args; } static void s_server_connection_args_destroy(struct server_connection_args *args) { if (args == NULL) { return; } /* fire the destroy callback */ if (args->destroy_callback) { args->destroy_callback(args->bootstrap, args->user_data); } struct aws_allocator *allocator = args->bootstrap->allocator; aws_server_bootstrap_release(args->bootstrap); if (args->use_tls) { aws_tls_connection_options_clean_up(&args->tls_options); } aws_mem_release(allocator, args); } static void s_server_connection_args_release(struct server_connection_args *args) { if (args != NULL) { aws_ref_count_release(&args->ref_count); } } static void s_server_incoming_callback( struct server_channel_data *channel_data, int error_code, struct aws_channel *channel) { /* incoming_callback is always called exactly once for each channel */ AWS_ASSERT(!channel_data->incoming_called); struct server_connection_args *args = channel_data->server_connection_args; args->incoming_callback(args->bootstrap, error_code, channel, args->user_data); channel_data->incoming_called = true; } static void s_tls_server_on_negotiation_result( struct aws_channel_handler *handler, struct aws_channel_slot *slot, int err_code, void *user_data) { struct server_channel_data *channel_data = user_data; struct server_connection_args *connection_args = channel_data->server_connection_args; if (connection_args->user_on_negotiation_result) { connection_args->user_on_negotiation_result(handler, slot, err_code, connection_args->tls_user_data); } AWS_LOGF_DEBUG( AWS_LS_IO_CHANNEL_BOOTSTRAP, "id=%p: tls negotiation result %d on channel %p", (void *)connection_args->bootstrap, err_code, (void *)slot->channel); struct aws_channel *channel = slot->channel; if (err_code) { /* shut down the channel */ aws_channel_shutdown(channel, err_code); } else { s_server_incoming_callback(channel_data, err_code, channel); } } /* in the context of a channel bootstrap, we don't care about these, but since we're hooking into these APIs we have to * provide a proxy for the user actually receiving their callbacks. */ static void s_tls_server_on_data_read( struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_byte_buf *buffer, void *user_data) { struct server_connection_args *connection_args = user_data; if (connection_args->user_on_data_read) { connection_args->user_on_data_read(handler, slot, buffer, connection_args->tls_user_data); } } /* in the context of a channel bootstrap, we don't care about these, but since we're hooking into these APIs we have to * provide a proxy for the user actually receiving their callbacks. */ static void s_tls_server_on_error( struct aws_channel_handler *handler, struct aws_channel_slot *slot, int err, const char *message, void *user_data) { struct server_connection_args *connection_args = user_data; if (connection_args->user_on_error) { connection_args->user_on_error(handler, slot, err, message, connection_args->tls_user_data); } } static inline int s_setup_server_tls(struct server_channel_data *channel_data, struct aws_channel *channel) { struct aws_channel_slot *tls_slot = NULL; struct aws_channel_handler *tls_handler = NULL; struct server_connection_args *connection_args = channel_data->server_connection_args; /* as far as cleanup goes here, since we're adding things to a channel, if a slot is ever successfully added to the channel, we leave it there. The caller will clean up the channel and it will clean this memory up as well. */ tls_slot = aws_channel_slot_new(channel); if (!tls_slot) { return AWS_OP_ERR; } /* Shallow-copy tls_options so we can override the user_data, making it specific to this channel */ struct aws_tls_connection_options tls_options = connection_args->tls_options; tls_options.user_data = channel_data; tls_handler = aws_tls_server_handler_new(connection_args->bootstrap->allocator, &tls_options, tls_slot); if (!tls_handler) { aws_mem_release(connection_args->bootstrap->allocator, tls_slot); return AWS_OP_ERR; } AWS_LOGF_TRACE( AWS_LS_IO_CHANNEL_BOOTSTRAP, "id=%p: Setting up server TLS on channel %p with handler %p on slot %p", (void *)connection_args->bootstrap, (void *)channel, (void *)tls_handler, (void *)tls_slot); aws_channel_slot_insert_end(channel, tls_slot); if (aws_channel_slot_set_handler(tls_slot, tls_handler)) { return AWS_OP_ERR; } if (connection_args->on_protocol_negotiated) { struct aws_channel_slot *alpn_slot = NULL; struct aws_channel_handler *alpn_handler = NULL; alpn_slot = aws_channel_slot_new(channel); if (!alpn_slot) { return AWS_OP_ERR; } alpn_handler = aws_tls_alpn_handler_new( connection_args->bootstrap->allocator, connection_args->on_protocol_negotiated, connection_args->user_data); if (!alpn_handler) { aws_channel_slot_remove(alpn_slot); return AWS_OP_ERR; } AWS_LOGF_TRACE( AWS_LS_IO_CHANNEL_BOOTSTRAP, "id=%p: Setting up ALPN handler on channel " "%p with handler %p on slot %p", (void *)connection_args->bootstrap, (void *)channel, (void *)alpn_handler, (void *)alpn_slot); aws_channel_slot_insert_right(tls_slot, alpn_slot); if (aws_channel_slot_set_handler(alpn_slot, alpn_handler)) { return AWS_OP_ERR; } } /* * Server-side channels can reach this point in execution and actually have the CLIENT_HELLO payload already * on the socket in a signalled state, but there was no socket handler or read handler at the time of signal. * So we need to manually trigger a read here to cover that case, otherwise the negotiation will time out because * we will not receive any more data/notifications (unless we read and react). */ if (aws_channel_trigger_read(channel)) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } static void s_on_server_channel_on_setup_completed(struct aws_channel *channel, int error_code, void *user_data) { struct server_channel_data *channel_data = user_data; int err_code = error_code; if (err_code) { /* channel fail to set up no destroy callback will fire */ AWS_LOGF_ERROR( AWS_LS_IO_CHANNEL_BOOTSTRAP, "id=%p: channel %p setup failed with error %d.", (void *)channel_data->server_connection_args->bootstrap, (void *)channel, err_code); aws_channel_destroy(channel); struct aws_allocator *allocator = channel_data->socket->allocator; aws_socket_clean_up(channel_data->socket); aws_mem_release(allocator, (void *)channel_data->socket); s_server_incoming_callback(channel_data, err_code, NULL); aws_mem_release(channel_data->server_connection_args->bootstrap->allocator, channel_data); /* no shutdown call back will be fired, we release the ref_count of connection arg here */ s_server_connection_args_release(channel_data->server_connection_args); return; } AWS_LOGF_DEBUG( AWS_LS_IO_CHANNEL_BOOTSTRAP, "id=%p: channel %p setup succeeded: bootstrapping.", (void *)channel_data->server_connection_args->bootstrap, (void *)channel); struct aws_channel_slot *socket_slot = aws_channel_slot_new(channel); if (!socket_slot) { err_code = aws_last_error(); goto error; } struct aws_channel_handler *socket_channel_handler = aws_socket_handler_new( channel_data->server_connection_args->bootstrap->allocator, channel_data->socket, socket_slot, g_aws_channel_max_fragment_size); if (!socket_channel_handler) { err_code = aws_last_error(); aws_channel_slot_remove(socket_slot); socket_slot = NULL; goto error; } AWS_LOGF_TRACE( AWS_LS_IO_CHANNEL_BOOTSTRAP, "id=%p: Setting up socket handler on channel " "%p with handler %p on slot %p.", (void *)channel_data->server_connection_args->bootstrap, (void *)channel, (void *)socket_channel_handler, (void *)socket_slot); if (aws_channel_slot_set_handler(socket_slot, socket_channel_handler)) { err_code = aws_last_error(); goto error; } if (channel_data->server_connection_args->use_tls) { /* incoming callback will be invoked upon the negotiation completion so don't do it * here. */ if (s_setup_server_tls(channel_data, channel)) { err_code = aws_last_error(); goto error; } } else { s_server_incoming_callback(channel_data, AWS_OP_SUCCESS, channel); } return; error: /* shut down the channel */ aws_channel_shutdown(channel, err_code); } static void s_on_server_channel_on_shutdown(struct aws_channel *channel, int error_code, void *user_data) { struct server_channel_data *channel_data = user_data; struct server_connection_args *args = channel_data->server_connection_args; AWS_LOGF_DEBUG( AWS_LS_IO_CHANNEL_BOOTSTRAP, "id=%p: channel %p shutdown with error %d.", (void *)args->bootstrap, (void *)channel, error_code); void *server_shutdown_user_data = args->user_data; struct aws_server_bootstrap *server_bootstrap = args->bootstrap; struct aws_allocator *allocator = server_bootstrap->allocator; if (!channel_data->incoming_called) { error_code = (error_code) ? error_code : AWS_ERROR_UNKNOWN; s_server_incoming_callback(channel_data, error_code, NULL); } else { args->shutdown_callback(server_bootstrap, error_code, channel, server_shutdown_user_data); } aws_channel_destroy(channel); aws_socket_clean_up(channel_data->socket); aws_mem_release(allocator, channel_data->socket); s_server_connection_args_release(channel_data->server_connection_args); aws_mem_release(allocator, channel_data); } void s_on_server_connection_result( struct aws_socket *socket, int error_code, struct aws_socket *new_socket, void *user_data) { (void)socket; struct server_connection_args *connection_args = user_data; s_server_connection_args_acquire(connection_args); AWS_LOGF_DEBUG( AWS_LS_IO_CHANNEL_BOOTSTRAP, "id=%p: server connection on socket %p completed with error %d.", (void *)connection_args->bootstrap, (void *)socket, error_code); if (!error_code) { AWS_LOGF_TRACE( AWS_LS_IO_CHANNEL_BOOTSTRAP, "id=%p: creating a new channel for incoming " "connection using socket %p.", (void *)connection_args->bootstrap, (void *)socket); struct server_channel_data *channel_data = aws_mem_calloc(connection_args->bootstrap->allocator, 1, sizeof(struct server_channel_data)); if (!channel_data) { goto error_cleanup; } channel_data->incoming_called = false; channel_data->socket = new_socket; channel_data->server_connection_args = connection_args; struct aws_event_loop *event_loop = aws_event_loop_group_get_next_loop(connection_args->bootstrap->event_loop_group); struct aws_channel_options channel_args = { .on_setup_completed = s_on_server_channel_on_setup_completed, .setup_user_data = channel_data, .shutdown_user_data = channel_data, .on_shutdown_completed = s_on_server_channel_on_shutdown, }; channel_args.event_loop = event_loop; channel_args.enable_read_back_pressure = channel_data->server_connection_args->enable_read_back_pressure; if (aws_socket_assign_to_event_loop(new_socket, event_loop)) { aws_mem_release(connection_args->bootstrap->allocator, (void *)channel_data); goto error_cleanup; } channel_data->channel = aws_channel_new(connection_args->bootstrap->allocator, &channel_args); if (!channel_data->channel) { aws_mem_release(connection_args->bootstrap->allocator, (void *)channel_data); goto error_cleanup; } } else { /* no channel is created */ connection_args->incoming_callback(connection_args->bootstrap, error_code, NULL, connection_args->user_data); s_server_connection_args_release(connection_args); } return; error_cleanup: /* no channel is created */ connection_args->incoming_callback(connection_args->bootstrap, aws_last_error(), NULL, connection_args->user_data); struct aws_allocator *allocator = new_socket->allocator; aws_socket_clean_up(new_socket); aws_mem_release(allocator, (void *)new_socket); s_server_connection_args_release(connection_args); } static void s_listener_destroy_task(struct aws_task *task, void *arg, enum aws_task_status status) { (void)status; (void)task; struct server_connection_args *server_connection_args = arg; aws_socket_stop_accept(&server_connection_args->listener); aws_socket_clean_up(&server_connection_args->listener); s_server_connection_args_release(server_connection_args); } struct aws_socket *aws_server_bootstrap_new_socket_listener( const struct aws_server_socket_channel_bootstrap_options *bootstrap_options) { AWS_PRECONDITION(bootstrap_options); AWS_PRECONDITION(bootstrap_options->bootstrap); AWS_PRECONDITION(bootstrap_options->incoming_callback); AWS_PRECONDITION(bootstrap_options->shutdown_callback); struct server_connection_args *server_connection_args = aws_mem_calloc(bootstrap_options->bootstrap->allocator, 1, sizeof(struct server_connection_args)); if (!server_connection_args) { return NULL; } AWS_LOGF_INFO( AWS_LS_IO_CHANNEL_BOOTSTRAP, "id=%p: attempting to initialize a new " "server socket listener for %s:%u", (void *)bootstrap_options->bootstrap, bootstrap_options->host_name, bootstrap_options->port); aws_ref_count_init( &server_connection_args->ref_count, server_connection_args, (aws_simple_completion_callback *)s_server_connection_args_destroy); server_connection_args->user_data = bootstrap_options->user_data; server_connection_args->bootstrap = aws_server_bootstrap_acquire(bootstrap_options->bootstrap); server_connection_args->shutdown_callback = bootstrap_options->shutdown_callback; server_connection_args->incoming_callback = bootstrap_options->incoming_callback; server_connection_args->destroy_callback = bootstrap_options->destroy_callback; server_connection_args->on_protocol_negotiated = bootstrap_options->bootstrap->on_protocol_negotiated; server_connection_args->enable_read_back_pressure = bootstrap_options->enable_read_back_pressure; aws_task_init( &server_connection_args->listener_destroy_task, s_listener_destroy_task, server_connection_args, "listener socket destroy"); if (bootstrap_options->tls_options) { AWS_LOGF_INFO( AWS_LS_IO_CHANNEL_BOOTSTRAP, "id=%p: using tls on listener", (void *)bootstrap_options->tls_options); if (aws_tls_connection_options_copy(&server_connection_args->tls_options, bootstrap_options->tls_options)) { goto cleanup_server_connection_args; } server_connection_args->use_tls = true; server_connection_args->tls_user_data = bootstrap_options->tls_options->user_data; /* in order to honor any callbacks a user may have installed on their tls_connection_options, * we need to wrap them if they were set.*/ if (bootstrap_options->bootstrap->on_protocol_negotiated) { server_connection_args->tls_options.advertise_alpn_message = true; } if (bootstrap_options->tls_options->on_data_read) { server_connection_args->user_on_data_read = bootstrap_options->tls_options->on_data_read; server_connection_args->tls_options.on_data_read = s_tls_server_on_data_read; } if (bootstrap_options->tls_options->on_error) { server_connection_args->user_on_error = bootstrap_options->tls_options->on_error; server_connection_args->tls_options.on_error = s_tls_server_on_error; } if (bootstrap_options->tls_options->on_negotiation_result) { server_connection_args->user_on_negotiation_result = bootstrap_options->tls_options->on_negotiation_result; } server_connection_args->tls_options.on_negotiation_result = s_tls_server_on_negotiation_result; server_connection_args->tls_options.user_data = server_connection_args; } struct aws_event_loop *connection_loop = aws_event_loop_group_get_next_loop(bootstrap_options->bootstrap->event_loop_group); if (aws_socket_init( &server_connection_args->listener, bootstrap_options->bootstrap->allocator, bootstrap_options->socket_options)) { goto cleanup_server_connection_args; } struct aws_socket_endpoint endpoint; AWS_ZERO_STRUCT(endpoint); size_t host_name_len = 0; if (aws_secure_strlen(bootstrap_options->host_name, sizeof(endpoint.address), &host_name_len)) { goto cleanup_server_connection_args; } memcpy(endpoint.address, bootstrap_options->host_name, host_name_len); endpoint.port = bootstrap_options->port; if (aws_socket_bind(&server_connection_args->listener, &endpoint)) { goto cleanup_listener; } if (aws_socket_listen(&server_connection_args->listener, 1024)) { goto cleanup_listener; } if (aws_socket_start_accept( &server_connection_args->listener, connection_loop, s_on_server_connection_result, server_connection_args)) { goto cleanup_listener; } return &server_connection_args->listener; cleanup_listener: aws_socket_clean_up(&server_connection_args->listener); cleanup_server_connection_args: s_server_connection_args_release(server_connection_args); return NULL; } void aws_server_bootstrap_destroy_socket_listener(struct aws_server_bootstrap *bootstrap, struct aws_socket *listener) { struct server_connection_args *server_connection_args = AWS_CONTAINER_OF(listener, struct server_connection_args, listener); AWS_LOGF_DEBUG(AWS_LS_IO_CHANNEL_BOOTSTRAP, "id=%p: releasing bootstrap reference", (void *)bootstrap); aws_event_loop_schedule_task_now(listener->event_loop, &server_connection_args->listener_destroy_task); } int aws_server_bootstrap_set_alpn_callback( struct aws_server_bootstrap *bootstrap, aws_channel_on_protocol_negotiated_fn *on_protocol_negotiated) { AWS_ASSERT(on_protocol_negotiated); AWS_LOGF_DEBUG(AWS_LS_IO_CHANNEL_BOOTSTRAP, "id=%p: Setting ALPN callback", (void *)bootstrap); bootstrap->on_protocol_negotiated = on_protocol_negotiated; return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-io/source/darwin/000077500000000000000000000000001456575232400224235ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-io/source/darwin/darwin_pki_utils.c000066400000000000000000000350651456575232400261470ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include /* SecureTransport is not thread-safe during identity import */ /* https://developer.apple.com/documentation/security/certificate_key_and_trust_services/working_with_concurrency */ static struct aws_mutex s_sec_mutex = AWS_MUTEX_INIT; #if !defined(AWS_OS_IOS) /* * Helper function to import ECC private key in PEM format into `import_keychain`. Return * AWS_OP_SUCCESS if successfully imported a private key or find a duplicate key in the * `import_keychain`, otherwise return AWS_OP_ERR. * `private_key`: UTF-8 key data in PEM format. If the key file contains multiple key sections, * the function will only import the first valid key. * `import_keychain`: The keychain to be imported to. `import_keychain` should not be NULL. */ int aws_import_ecc_key_into_keychain( struct aws_allocator *alloc, CFAllocatorRef cf_alloc, const struct aws_byte_cursor *private_key, SecKeychainRef import_keychain) { // Ensure imported_keychain is not NULL AWS_PRECONDITION(import_keychain != NULL); AWS_PRECONDITION(private_key != NULL); int result = AWS_OP_ERR; struct aws_array_list decoded_key_buffer_list; /* Decode PEM format file to DER format */ if (aws_pem_objects_init_from_file_contents(&decoded_key_buffer_list, alloc, *private_key)) { AWS_LOGF_ERROR(AWS_LS_IO_PKI, "static: Failed to decode PEM private key to DER format."); goto ecc_import_cleanup; } AWS_ASSERT(aws_array_list_is_valid(&decoded_key_buffer_list)); // A PEM file could contains multiple PEM data section. Try importing each PEM section until find the first // succeed key. for (size_t index = 0; index < aws_array_list_length(&decoded_key_buffer_list); index++) { struct aws_pem_object *pem_object_ptr = NULL; /* We only check the first pem section. Currently, we dont support key with multiple pem section. */ aws_array_list_get_at_ptr(&decoded_key_buffer_list, (void **)&pem_object_ptr, index); AWS_ASSERT(pem_object_ptr); CFDataRef key_data = CFDataCreate(cf_alloc, pem_object_ptr->data.buffer, pem_object_ptr->data.len); if (!key_data) { AWS_LOGF_ERROR(AWS_LS_IO_PKI, "static: error in creating ECC key data system call."); continue; } /* Import ECC key data into keychain. */ SecExternalFormat format = kSecFormatOpenSSL; SecExternalItemType item_type = kSecItemTypePrivateKey; SecItemImportExportKeyParameters import_params; AWS_ZERO_STRUCT(import_params); import_params.version = SEC_KEY_IMPORT_EXPORT_PARAMS_VERSION; import_params.passphrase = CFSTR(""); OSStatus key_status = SecItemImport(key_data, NULL, &format, &item_type, 0, &import_params, import_keychain, NULL); /* Clean up key buffer */ CFRelease(key_data); // As long as we found an imported key, ignore the rest of keys if (key_status == errSecSuccess || key_status == errSecDuplicateItem) { result = AWS_OP_SUCCESS; break; } else { // Log the error code for key importing AWS_LOGF_ERROR(AWS_LS_IO_PKI, "static: error importing ECC private key with OSStatus %d", (int)key_status); } } ecc_import_cleanup: // Zero out the array list and release it aws_pem_objects_clean_up(&decoded_key_buffer_list); return result; } int aws_import_public_and_private_keys_to_identity( struct aws_allocator *alloc, CFAllocatorRef cf_alloc, const struct aws_byte_cursor *public_cert_chain, const struct aws_byte_cursor *private_key, CFArrayRef *identity, const struct aws_string *keychain_path) { AWS_PRECONDITION(public_cert_chain != NULL); AWS_PRECONDITION(private_key != NULL); int result = AWS_OP_ERR; CFDataRef cert_data = NULL; CFDataRef key_data = NULL; CFArrayRef cert_import_output = NULL; CFArrayRef key_import_output = NULL; SecExternalFormat format = kSecFormatUnknown; SecExternalItemType item_type = kSecItemTypeCertificate; SecItemImportExportKeyParameters import_params; AWS_ZERO_STRUCT(import_params); import_params.version = SEC_KEY_IMPORT_EXPORT_PARAMS_VERSION; import_params.passphrase = CFSTR(""); struct aws_array_list cert_chain_list; AWS_ZERO_STRUCT(cert_chain_list); CFDataRef root_cert_data = NULL; SecCertificateRef certificate_ref = NULL; SecKeychainRef import_keychain = NULL; cert_data = CFDataCreate(cf_alloc, public_cert_chain->ptr, public_cert_chain->len); if (!cert_data) { AWS_LOGF_ERROR(AWS_LS_IO_PKI, "static: failed creating public cert chain data."); result = aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); goto done; } key_data = CFDataCreate(cf_alloc, private_key->ptr, private_key->len); if (!key_data) { AWS_LOGF_ERROR(AWS_LS_IO_PKI, "static: failed creating private key data."); result = aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); goto done; } # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wdeprecated-declarations" /* SecKeychain functions are marked as deprecated. * Disable compiler warnings for now, but consider removing support for keychain altogether */ if (keychain_path) { OSStatus keychain_status = SecKeychainOpen(aws_string_c_str(keychain_path), &import_keychain); if (keychain_status != errSecSuccess) { AWS_LOGF_ERROR( AWS_LS_IO_PKI, "static: error opening keychain \"%s\" with OSStatus %d", aws_string_c_str(keychain_path), keychain_status); result = aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); goto done; } keychain_status = SecKeychainUnlock(import_keychain, 0, "", true); if (keychain_status != errSecSuccess) { AWS_LOGF_ERROR( AWS_LS_IO_PKI, "static: error unlocking keychain \"%s\" with OSStatus %d", aws_string_c_str(keychain_path), keychain_status); result = aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); goto done; } } else { OSStatus keychain_status = SecKeychainCopyDefault(&import_keychain); if (keychain_status != errSecSuccess) { AWS_LOGF_ERROR( AWS_LS_IO_PKI, "static: error opening the default keychain with OSStatus %d", keychain_status); result = aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); goto done; } } # pragma clang diagnostic pop aws_mutex_lock(&s_sec_mutex); /* import certificate */ OSStatus cert_status = SecItemImport(cert_data, NULL, &format, &item_type, 0, &import_params, import_keychain, &cert_import_output); /* import private key */ format = kSecFormatUnknown; item_type = kSecItemTypePrivateKey; OSStatus key_status = SecItemImport(key_data, NULL, &format, &item_type, 0, &import_params, import_keychain, &key_import_output); if (cert_status != errSecSuccess && cert_status != errSecDuplicateItem) { AWS_LOGF_ERROR(AWS_LS_IO_PKI, "static: error importing certificate with OSStatus %d", (int)cert_status); result = aws_raise_error(AWS_IO_FILE_VALIDATION_FAILURE); goto done; } /* * If the key format is unknown, we tried to decode the key into DER format import it. * The PEM file might contains multiple key sections, we will only add the first succeed key into the keychain. */ if (key_status == errSecUnknownFormat) { AWS_LOGF_TRACE(AWS_LS_IO_PKI, "static: error reading private key format, try ECC key format."); if (aws_import_ecc_key_into_keychain(alloc, cf_alloc, private_key, import_keychain)) { result = aws_raise_error(AWS_IO_FILE_VALIDATION_FAILURE); goto done; } } else if (key_status != errSecSuccess && key_status != errSecDuplicateItem) { AWS_LOGF_ERROR(AWS_LS_IO_PKI, "static: error importing private key with OSStatus %d", (int)key_status); result = aws_raise_error(AWS_IO_FILE_VALIDATION_FAILURE); goto done; } /* if it's already there, just convert this over to a cert and then let the keychain give it back to us. */ if (cert_status == errSecDuplicateItem) { /* The text for this log is also in the README for each CRT and v2 IoT SDK. If changed, please also change * where it is referenced. */ AWS_LOGF_INFO( AWS_LS_IO_PKI, "static: certificate has an existing certificate-key pair that was previously imported into the Keychain. " "Using key from Keychain instead of the one provided."); if (aws_pem_objects_init_from_file_contents(&cert_chain_list, alloc, *public_cert_chain)) { AWS_LOGF_ERROR(AWS_LS_IO_PKI, "static: decoding certificate PEM failed."); result = AWS_OP_ERR; goto done; } struct aws_pem_object *root_cert_ptr = NULL; aws_array_list_get_at_ptr(&cert_chain_list, (void **)&root_cert_ptr, 0); AWS_ASSERT(root_cert_ptr); root_cert_data = CFDataCreate(cf_alloc, root_cert_ptr->data.buffer, root_cert_ptr->data.len); if (!root_cert_data) { AWS_LOGF_ERROR(AWS_LS_IO_PKI, "static: failed creating root cert data."); result = aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); goto done; } certificate_ref = SecCertificateCreateWithData(cf_alloc, root_cert_data); if (!certificate_ref) { AWS_LOGF_ERROR(AWS_LS_IO_PKI, "static: failed to create certificate."); result = aws_raise_error(AWS_IO_FILE_VALIDATION_FAILURE); goto done; } } else { certificate_ref = (SecCertificateRef)CFArrayGetValueAtIndex(cert_import_output, 0); /* SecCertificateCreateWithData returns an object with +1 retain, so we need to match that behavior here */ CFRetain(certificate_ref); } /* we got a cert one way or the other, create the identity and return it */ AWS_ASSERT(certificate_ref); SecIdentityRef identity_output; OSStatus status = SecIdentityCreateWithCertificate(import_keychain, certificate_ref, &identity_output); if (status != errSecSuccess) { AWS_LOGF_ERROR(AWS_LS_IO_PKI, "static: error creating identity with OSStatus %d", key_status); result = aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); goto done; } CFTypeRef certs[] = {identity_output}; *identity = CFArrayCreate(cf_alloc, (const void **)certs, 1L, &kCFTypeArrayCallBacks); result = AWS_OP_SUCCESS; done: aws_mutex_unlock(&s_sec_mutex); if (certificate_ref) { CFRelease(certificate_ref); } if (root_cert_data) { CFRelease(root_cert_data); } if (cert_import_output) { CFRelease(cert_import_output); } if (key_import_output) { CFRelease(key_import_output); } if (import_keychain) { CFRelease(import_keychain); } if (cert_data) { CFRelease(cert_data); } if (key_data) { CFRelease(key_data); } aws_pem_objects_clean_up(&cert_chain_list); return result; } #endif /* !AWS_OS_IOS */ int aws_import_pkcs12_to_identity( CFAllocatorRef cf_alloc, const struct aws_byte_cursor *pkcs12_cursor, const struct aws_byte_cursor *password, CFArrayRef *identity) { CFDataRef pkcs12_data = CFDataCreate(cf_alloc, pkcs12_cursor->ptr, pkcs12_cursor->len); CFArrayRef items = NULL; CFMutableDictionaryRef dictionary = CFDictionaryCreateMutable(cf_alloc, 0, NULL, NULL); CFStringRef password_ref = CFSTR(""); if (password->len) { password_ref = CFStringCreateWithBytes(cf_alloc, password->ptr, password->len, kCFStringEncodingUTF8, false); } CFDictionaryAddValue(dictionary, kSecImportExportPassphrase, password_ref); aws_mutex_lock(&s_sec_mutex); OSStatus status = SecPKCS12Import(pkcs12_data, dictionary, &items); aws_mutex_unlock(&s_sec_mutex); CFRelease(pkcs12_data); if (password_ref) { CFRelease(password_ref); } CFRelease(dictionary); if (status == errSecSuccess) { CFTypeRef item = (CFTypeRef)CFArrayGetValueAtIndex(items, 0); CFTypeRef identity_ref = (CFTypeRef)CFDictionaryGetValue((CFDictionaryRef)item, kSecImportItemIdentity); if (identity_ref) { *identity = CFArrayCreate(cf_alloc, &identity_ref, 1L, &kCFTypeArrayCallBacks); } CFRelease(items); return AWS_OP_SUCCESS; } AWS_LOGF_ERROR(AWS_LS_IO_PKI, "static: error importing pkcs#12 certificate OSStatus %d", (int)status); return AWS_OP_ERR; } int aws_import_trusted_certificates( struct aws_allocator *alloc, CFAllocatorRef cf_alloc, const struct aws_byte_cursor *certificates_blob, CFArrayRef *certs) { AWS_PRECONDITION(certificates_blob != NULL); struct aws_array_list certificates; if (aws_pem_objects_init_from_file_contents(&certificates, alloc, *certificates_blob)) { AWS_LOGF_ERROR(AWS_LS_IO_PKI, "static: decoding CA PEM failed."); aws_array_list_clean_up(&certificates); return AWS_OP_ERR; } size_t cert_count = aws_array_list_length(&certificates); CFMutableArrayRef temp_cert_array = CFArrayCreateMutable(cf_alloc, cert_count, &kCFTypeArrayCallBacks); int err = AWS_OP_SUCCESS; aws_mutex_lock(&s_sec_mutex); for (size_t i = 0; i < cert_count; ++i) { struct aws_pem_object *pem_object_ptr = NULL; aws_array_list_get_at_ptr(&certificates, (void **)&pem_object_ptr, i); CFDataRef cert_blob = CFDataCreate(cf_alloc, pem_object_ptr->data.buffer, pem_object_ptr->data.len); if (cert_blob) { SecCertificateRef certificate_ref = SecCertificateCreateWithData(cf_alloc, cert_blob); CFArrayAppendValue(temp_cert_array, certificate_ref); CFRelease(certificate_ref); CFRelease(cert_blob); } else { err = AWS_OP_SUCCESS; } } aws_mutex_unlock(&s_sec_mutex); *certs = temp_cert_array; aws_pem_objects_clean_up(&certificates); aws_array_list_clean_up(&certificates); return err; } void aws_release_identity(CFArrayRef identity) { CFRelease(identity); } void aws_release_certificates(CFArrayRef certs) { CFRelease(certs); } aws-crt-python-0.20.4+dfsg/crt/aws-c-io/source/darwin/secure_transport_tls_channel_handler.c000066400000000000000000001267421456575232400322540ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wunused-variable" #pragma clang diagnostic ignored "-Wdeprecated-declarations" #pragma clang diagnostic ignored "-Wunused-function" static OSStatus (*s_SSLSetALPNProtocols)(SSLContextRef context, CFArrayRef protocols) = NULL; static OSStatus (*s_SSLCopyALPNProtocols)(SSLContextRef context, CFArrayRef *protocols) = NULL; #define EST_TLS_RECORD_OVERHEAD 53 /* 5 byte header + 32 + 16 bytes for padding */ #define KB_1 1024 #define MAX_RECORD_SIZE (KB_1 * 16) #define EST_HANDSHAKE_SIZE (7 * KB_1) /* We couldn't make SSLSetALPNFunc work, so we have to use the public API which isn't available until High-Sierra */ #if (TARGET_OS_MAC && MAC_OS_X_VERSION_MAX_ALLOWED >= 101302) || \ (TARGET_OS_IPHONE && __IPHONE_OS_VERSION_MAX_ALLOWED >= 110000) || \ (TARGET_OS_TV && __TV_OS_VERSION_MAX_ALLOWED >= 110000) || \ (TARGET_OS_WATCH && __WATCH_OS_VERSION_MAX_ALLOWED >= 40000) # define ALPN_AVAILABLE true # define TLS13_AVAILABLE true #else # define ALPN_AVAILABLE false # define TLS13_AVAILABLE false #endif bool aws_tls_is_alpn_available(void) { #if ALPN_AVAILABLE return s_SSLCopyALPNProtocols != NULL; #endif return false; } bool aws_tls_is_cipher_pref_supported(enum aws_tls_cipher_pref cipher_pref) { switch (cipher_pref) { case AWS_IO_TLS_CIPHER_PREF_SYSTEM_DEFAULT: return true; case AWS_IO_TLS_CIPHER_PREF_KMS_PQ_TLSv1_0_2019_06: default: return false; } } void aws_tls_init_static_state(struct aws_allocator *alloc) { (void)alloc; /* keep from breaking users that built on later versions of the mac os sdk but deployed * to an older version. */ s_SSLSetALPNProtocols = (OSStatus(*)(SSLContextRef, CFArrayRef))dlsym(RTLD_DEFAULT, "SSLSetALPNProtocols"); s_SSLCopyALPNProtocols = (OSStatus(*)(SSLContextRef, CFArrayRef *))dlsym(RTLD_DEFAULT, "SSLCopyALPNProtocols"); AWS_LOGF_INFO(AWS_LS_IO_TLS, "static: initializing TLS implementation as Apple SecureTransport."); if (s_SSLSetALPNProtocols) { AWS_LOGF_INFO(AWS_LS_IO_TLS, "static: ALPN support detected."); } else { AWS_LOGF_WARN( AWS_LS_IO_TLS, "static: ALPN isn't supported on your apple device, you can improve support and performance by upgrading."); } } void aws_tls_clean_up_static_state(void) { /* no op */ } struct secure_transport_handler { struct aws_channel_handler handler; struct aws_tls_channel_handler_shared shared_state; SSLContextRef ctx; CFAllocatorRef wrapped_allocator; struct aws_linked_list input_queue; struct aws_channel_slot *parent_slot; struct aws_byte_buf protocol; /* Note: This is just a copy of the expected server name. * The Secure Transport API doesn't seem to expose actual server name. * SSLGetPeerDomainName just returns whatever was passed earlier to SSLSetPeerDomainName */ struct aws_string *server_name; aws_channel_on_message_write_completed_fn *latest_message_on_completion; void *latest_message_completion_user_data; CFArrayRef ca_certs; struct aws_channel_task read_task; aws_tls_on_negotiation_result_fn *on_negotiation_result; aws_tls_on_data_read_fn *on_data_read; aws_tls_on_error_fn *on_error; void *user_data; bool advertise_alpn_message; bool negotiation_finished; bool verify_peer; bool read_task_pending; }; static OSStatus s_read_cb(SSLConnectionRef conn, void *data, size_t *len) { struct secure_transport_handler *handler = (struct secure_transport_handler *)conn; size_t written = 0; struct aws_byte_buf buf = aws_byte_buf_from_array((const uint8_t *)data, *len); buf.len = 0; while (!aws_linked_list_empty(&handler->input_queue) && written < buf.capacity) { struct aws_linked_list_node *node = aws_linked_list_pop_front(&handler->input_queue); struct aws_io_message *message = AWS_CONTAINER_OF(node, struct aws_io_message, queueing_handle); size_t remaining_message_len = message->message_data.len - message->copy_mark; size_t remaining_buf_len = buf.capacity - written; size_t to_write = remaining_message_len < remaining_buf_len ? remaining_message_len : remaining_buf_len; struct aws_byte_cursor message_cursor = aws_byte_cursor_from_buf(&message->message_data); aws_byte_cursor_advance(&message_cursor, message->copy_mark); aws_byte_cursor_read(&message_cursor, buf.buffer + written, to_write); written += to_write; message->copy_mark += to_write; if (message->copy_mark == message->message_data.len) { /* note: value is the first member of the allocated struct */ aws_mem_release(message->allocator, message); } else { aws_linked_list_push_front(&handler->input_queue, &message->queueing_handle); } } if (*len == written) { return noErr; } *len = written; return errSSLWouldBlock; } static OSStatus s_write_cb(SSLConnectionRef conn, const void *data, size_t *len) { struct secure_transport_handler *handler = (struct secure_transport_handler *)conn; struct aws_byte_buf buf = aws_byte_buf_from_array((const uint8_t *)data, *len); struct aws_byte_cursor buffer_cursor = aws_byte_cursor_from_buf(&buf); size_t processed = 0; while (processed < buf.len) { const size_t overhead = aws_channel_slot_upstream_message_overhead(handler->parent_slot); const size_t message_size_hint = (buf.len - processed) + overhead; struct aws_io_message *message = aws_channel_acquire_message_from_pool( handler->parent_slot->channel, AWS_IO_MESSAGE_APPLICATION_DATA, message_size_hint); if (!message || message->message_data.capacity <= overhead) { return errSecMemoryError; } const size_t available_msg_write_capacity = message->message_data.capacity - overhead; const size_t to_write = available_msg_write_capacity >= buffer_cursor.len ? buffer_cursor.len : available_msg_write_capacity; struct aws_byte_cursor chunk = aws_byte_cursor_advance(&buffer_cursor, to_write); if (aws_byte_buf_append(&message->message_data, &chunk)) { aws_mem_release(message->allocator, message); return errSecBufferTooSmall; } processed += message->message_data.len; if (processed == buf.len) { message->on_completion = handler->latest_message_on_completion; message->user_data = handler->latest_message_completion_user_data; handler->latest_message_on_completion = NULL; handler->latest_message_completion_user_data = NULL; } if (aws_channel_slot_send_message(handler->parent_slot, message, AWS_CHANNEL_DIR_WRITE)) { aws_mem_release(message->allocator, message); return errSSLClosedNoNotify; } } if (*len == processed) { return noErr; } *len = processed; return errSSLWouldBlock; } static void s_destroy(struct aws_channel_handler *handler) { if (handler) { struct secure_transport_handler *secure_transport_handler = handler->impl; CFRelease(secure_transport_handler->ctx); if (secure_transport_handler->protocol.buffer) { aws_byte_buf_clean_up(&secure_transport_handler->protocol); } aws_tls_channel_handler_shared_clean_up(&secure_transport_handler->shared_state); aws_string_destroy(secure_transport_handler->server_name); aws_mem_release(handler->alloc, secure_transport_handler); } } static CFStringRef s_get_protocol(struct secure_transport_handler *handler) { #if ALPN_AVAILABLE if (s_SSLCopyALPNProtocols) { CFArrayRef protocols = NULL; OSStatus status = s_SSLCopyALPNProtocols(handler->ctx, &protocols); (void)status; if (!protocols) { return NULL; } CFIndex count = CFArrayGetCount(protocols); if (count <= 0) { return NULL; } CFStringRef alpn_value = CFArrayGetValueAtIndex(protocols, 0); CFRetain(alpn_value); CFRelease(protocols); return alpn_value; } return NULL; #else (void)handler; return NULL; #endif } static void s_set_protocols( struct secure_transport_handler *handler, struct aws_allocator *alloc, struct aws_string *alpn_list) { (void)handler; (void)alloc; (void)alpn_list; #if ALPN_AVAILABLE if (s_SSLSetALPNProtocols) { struct aws_byte_cursor alpn_data = aws_byte_cursor_from_string(alpn_list); struct aws_array_list alpn_list_array; if (aws_array_list_init_dynamic(&alpn_list_array, alloc, 2, sizeof(struct aws_byte_cursor))) { return; } if (aws_byte_cursor_split_on_char(&alpn_data, ';', &alpn_list_array)) { return; } CFMutableArrayRef alpn_array = CFArrayCreateMutable( handler->wrapped_allocator, aws_array_list_length(&alpn_list_array), &kCFTypeArrayCallBacks); if (!alpn_array) { return; } for (size_t i = 0; i < aws_array_list_length(&alpn_list_array); ++i) { struct aws_byte_cursor protocol_cursor; aws_array_list_get_at(&alpn_list_array, &protocol_cursor, i); CFStringRef protocol = CFStringCreateWithBytes( handler->wrapped_allocator, protocol_cursor.ptr, protocol_cursor.len, kCFStringEncodingASCII, false); if (!protocol) { CFRelease(alpn_array); alpn_array = NULL; break; } CFArrayAppendValue(alpn_array, protocol); CFRelease(protocol); } if (alpn_array) { OSStatus status = s_SSLSetALPNProtocols(handler->ctx, alpn_array); (void)status; CFRelease(alpn_array); } aws_array_list_clean_up(&alpn_list_array); } #endif } static void s_invoke_negotiation_callback(struct aws_channel_handler *handler, int err_code) { struct secure_transport_handler *secure_transport_handler = handler->impl; aws_on_tls_negotiation_completed(&secure_transport_handler->shared_state, err_code); if (secure_transport_handler->on_negotiation_result) { secure_transport_handler->on_negotiation_result( handler, secure_transport_handler->parent_slot, err_code, secure_transport_handler->user_data); } } static int s_drive_negotiation(struct aws_channel_handler *handler) { struct secure_transport_handler *secure_transport_handler = handler->impl; aws_on_drive_tls_negotiation(&secure_transport_handler->shared_state); OSStatus status = SSLHandshake(secure_transport_handler->ctx); /* yay!!!! negotiation finished successfully. */ if (status == noErr) { AWS_LOGF_DEBUG(AWS_LS_IO_TLS, "id=%p: negotiation succeeded", (void *)handler); secure_transport_handler->negotiation_finished = true; CFStringRef protocol = s_get_protocol(secure_transport_handler); if (protocol) { if (aws_byte_buf_init( &secure_transport_handler->protocol, handler->alloc, (size_t)CFStringGetLength(protocol) + 1)) { CFRelease(protocol); s_invoke_negotiation_callback(handler, AWS_IO_TLS_ERROR_NEGOTIATION_FAILURE); return aws_raise_error(AWS_IO_TLS_ERROR_NEGOTIATION_FAILURE); } memset(secure_transport_handler->protocol.buffer, 0, secure_transport_handler->protocol.capacity); CFRange byte_range = CFRangeMake(0, CFStringGetLength(protocol)); CFStringGetBytes( protocol, byte_range, kCFStringEncodingASCII, 0, false, secure_transport_handler->protocol.buffer, secure_transport_handler->protocol.capacity, NULL); secure_transport_handler->protocol.len = secure_transport_handler->protocol.capacity - 1; CFRelease(protocol); AWS_LOGF_DEBUG( AWS_LS_IO_TLS, "id=%p: negotiated protocol: %s", (void *)handler, secure_transport_handler->protocol.buffer); } if (secure_transport_handler->server_name) { /* Log server name to be consistent with other tls_channel_handler implementations, * but this is just a copy of the EXPECTED server name, * the Secure Transport API doesn't seem to expose actual server name. */ AWS_LOGF_DEBUG( AWS_LS_IO_TLS, "id=%p: Remote Server Name: %s", (void *)handler, aws_string_c_str(secure_transport_handler->server_name)); } if (secure_transport_handler->parent_slot->adj_right && secure_transport_handler->advertise_alpn_message && protocol) { struct aws_io_message *message = aws_channel_acquire_message_from_pool( secure_transport_handler->parent_slot->channel, AWS_IO_MESSAGE_APPLICATION_DATA, sizeof(struct aws_tls_negotiated_protocol_message)); message->message_tag = AWS_TLS_NEGOTIATED_PROTOCOL_MESSAGE; struct aws_tls_negotiated_protocol_message *protocol_message = (struct aws_tls_negotiated_protocol_message *)message->message_data.buffer; protocol_message->protocol = secure_transport_handler->protocol; message->message_data.len = sizeof(struct aws_tls_negotiated_protocol_message); if (aws_channel_slot_send_message(secure_transport_handler->parent_slot, message, AWS_CHANNEL_DIR_READ)) { aws_mem_release(message->allocator, message); aws_channel_shutdown(secure_transport_handler->parent_slot->channel, aws_last_error()); return AWS_OP_SUCCESS; } } s_invoke_negotiation_callback(handler, AWS_ERROR_SUCCESS); } else if (status == errSSLPeerAuthCompleted) { /* this branch gets hit only when verification is disabled, * or a custom CA bundle is being used. */ if (secure_transport_handler->verify_peer) { if (!secure_transport_handler->ca_certs) { s_invoke_negotiation_callback(handler, AWS_IO_TLS_ERROR_NEGOTIATION_FAILURE); return aws_raise_error(AWS_IO_TLS_ERROR_NEGOTIATION_FAILURE); } SecTrustRef trust; status = SSLCopyPeerTrust(secure_transport_handler->ctx, &trust); if (status != errSecSuccess) { s_invoke_negotiation_callback(handler, AWS_IO_TLS_ERROR_NEGOTIATION_FAILURE); return aws_raise_error(AWS_IO_TLS_ERROR_NEGOTIATION_FAILURE); } SecPolicyRef policy; if (secure_transport_handler->server_name) { CFStringRef server_name = CFStringCreateWithCString( secure_transport_handler->wrapped_allocator, aws_string_c_str(secure_transport_handler->server_name), kCFStringEncodingUTF8); policy = SecPolicyCreateSSL(true, server_name); CFRelease(server_name); } else { policy = SecPolicyCreateBasicX509(); } status = SecTrustSetPolicies(trust, policy); CFRelease(policy); if (status != errSecSuccess) { AWS_LOGF_ERROR(AWS_LS_IO_TLS, "id=%p: Failed to set trust policy %d\n", (void *)handler, (int)status); CFRelease(trust); s_invoke_negotiation_callback(handler, AWS_IO_TLS_ERROR_NEGOTIATION_FAILURE); return aws_raise_error(AWS_IO_TLS_ERROR_NEGOTIATION_FAILURE); } status = SecTrustSetAnchorCertificates(trust, secure_transport_handler->ca_certs); if (status != errSecSuccess) { AWS_LOGF_ERROR( AWS_LS_IO_TLS, "id=%p: Failed to set anchor certificate with OSStatus %d\n", (void *)handler, (int)status); CFRelease(trust); s_invoke_negotiation_callback(handler, AWS_IO_TLS_ERROR_NEGOTIATION_FAILURE); return aws_raise_error(AWS_IO_TLS_ERROR_NEGOTIATION_FAILURE); } /* Use ONLY the custom CA bundle (ignoring system anchors) */ status = SecTrustSetAnchorCertificatesOnly(trust, true); if (status != errSecSuccess) { AWS_LOGF_ERROR( AWS_LS_IO_TLS, "id=%p: Failed to ignore system anchors with OSStatus %d\n", (void *)handler, (int)status); CFRelease(trust); s_invoke_negotiation_callback(handler, AWS_IO_TLS_ERROR_NEGOTIATION_FAILURE); return aws_raise_error(AWS_IO_TLS_ERROR_NEGOTIATION_FAILURE); } SecTrustResultType trust_eval = 0; status = SecTrustEvaluate(trust, &trust_eval); CFRelease(trust); if (status == errSecSuccess && (trust_eval == kSecTrustResultProceed || trust_eval == kSecTrustResultUnspecified)) { return s_drive_negotiation(handler); } AWS_LOGF_WARN( AWS_LS_IO_TLS, "id=%p: Using custom CA, certificate validation failed with OSStatus %d and Trust Eval %d.", (void *)handler, (int)status, (int)trust_eval); return aws_raise_error(AWS_IO_TLS_ERROR_NEGOTIATION_FAILURE); } return s_drive_negotiation(handler); /* if this is here, everything went wrong. */ } else if (status != errSSLWouldBlock) { secure_transport_handler->negotiation_finished = false; AWS_LOGF_WARN(AWS_LS_IO_TLS, "id=%p: negotiation failed with OSStatus %d.", (void *)handler, (int)status); s_invoke_negotiation_callback(handler, AWS_IO_TLS_ERROR_NEGOTIATION_FAILURE); return aws_raise_error(AWS_IO_TLS_ERROR_NEGOTIATION_FAILURE); } return AWS_OP_SUCCESS; } static void s_negotiation_task(struct aws_channel_task *task, void *arg, aws_task_status status) { struct aws_channel_handler *handler = arg; if (status == AWS_TASK_STATUS_RUN_READY) { s_drive_negotiation(handler); } aws_mem_release(handler->alloc, task); } int aws_tls_client_handler_start_negotiation(struct aws_channel_handler *handler) { struct secure_transport_handler *secure_transport_handler = handler->impl; AWS_LOGF_TRACE(AWS_LS_IO_TLS, "id=%p, starting TLS negotiation", (void *)handler); if (aws_channel_thread_is_callers_thread(secure_transport_handler->parent_slot->channel)) { return s_drive_negotiation(handler); } struct aws_channel_task *negotiation_task = aws_mem_acquire(handler->alloc, sizeof(struct aws_task)); if (!negotiation_task) { return AWS_OP_ERR; } aws_channel_task_init( negotiation_task, s_negotiation_task, handler, "secure_transport_channel_handler_start_negotiation"); aws_channel_schedule_task_now(secure_transport_handler->parent_slot->channel, negotiation_task); return AWS_OP_SUCCESS; } static int s_process_write_message( struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_io_message *message) { (void)slot; struct secure_transport_handler *secure_transport_handler = handler->impl; if (AWS_UNLIKELY(!secure_transport_handler->negotiation_finished)) { return aws_raise_error(AWS_IO_TLS_ERROR_NOT_NEGOTIATED); } secure_transport_handler->latest_message_on_completion = message->on_completion; secure_transport_handler->latest_message_completion_user_data = message->user_data; size_t processed = 0; OSStatus status = SSLWrite(secure_transport_handler->ctx, message->message_data.buffer, message->message_data.len, &processed); AWS_LOGF_TRACE(AWS_LS_IO_TLS, "id=%p: bytes written: %llu", (void *)handler, (unsigned long long)processed); if (status != noErr) { AWS_LOGF_DEBUG( AWS_LS_IO_TLS, "id=%p: SSLWrite failed with OSStatus error code %d.", (void *)handler, (int)status); return aws_raise_error(AWS_IO_TLS_ERROR_WRITE_FAILURE); } aws_mem_release(message->allocator, message); return AWS_OP_SUCCESS; } static int s_handle_shutdown( struct aws_channel_handler *handler, struct aws_channel_slot *slot, enum aws_channel_direction dir, int error_code, bool abort_immediately) { struct secure_transport_handler *secure_transport_handler = handler->impl; if (dir == AWS_CHANNEL_DIR_WRITE) { if (!abort_immediately && error_code != AWS_IO_SOCKET_CLOSED) { AWS_LOGF_TRACE(AWS_LS_IO_TLS, "id=%p: shutting down write direction.", (void *)handler); SSLClose(secure_transport_handler->ctx); } } else { AWS_LOGF_DEBUG( AWS_LS_IO_TLS, "id=%p: shutting down read direction with error %d. Flushing queues.", (void *)handler, error_code); while (!aws_linked_list_empty(&secure_transport_handler->input_queue)) { struct aws_linked_list_node *node = aws_linked_list_pop_front(&secure_transport_handler->input_queue); struct aws_io_message *message = AWS_CONTAINER_OF(node, struct aws_io_message, queueing_handle); aws_mem_release(message->allocator, message); } } return aws_channel_slot_on_handler_shutdown_complete(slot, dir, error_code, abort_immediately); } static int s_process_read_message( struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_io_message *message) { struct secure_transport_handler *secure_transport_handler = handler->impl; if (message) { aws_linked_list_push_back(&secure_transport_handler->input_queue, &message->queueing_handle); if (!secure_transport_handler->negotiation_finished) { size_t message_len = message->message_data.len; if (!s_drive_negotiation(handler)) { aws_channel_slot_increment_read_window(slot, message_len); } else { aws_channel_shutdown( secure_transport_handler->parent_slot->channel, AWS_IO_TLS_ERROR_NEGOTIATION_FAILURE); } return AWS_OP_SUCCESS; } } size_t downstream_window = SIZE_MAX; /* process as much as we have queued that will fit in the downstream window. */ if (slot->adj_right) { downstream_window = aws_channel_slot_downstream_read_window(slot); } AWS_LOGF_TRACE( AWS_LS_IO_TLS, "id=%p: downstream window is %llu", (void *)handler, (unsigned long long)downstream_window); size_t processed = 0; OSStatus status = noErr; while (processed < downstream_window && status == noErr) { struct aws_io_message *outgoing_read_message = aws_channel_acquire_message_from_pool( slot->channel, AWS_IO_MESSAGE_APPLICATION_DATA, downstream_window - processed); if (!outgoing_read_message) { /* even though this is a failure, this handler has taken ownership of the message */ aws_channel_shutdown(secure_transport_handler->parent_slot->channel, aws_last_error()); return AWS_OP_SUCCESS; } size_t read = 0; status = SSLRead( secure_transport_handler->ctx, outgoing_read_message->message_data.buffer, outgoing_read_message->message_data.capacity, &read); AWS_LOGF_TRACE(AWS_LS_IO_TLS, "id=%p: bytes read %llu", (void *)handler, (unsigned long long)read); if (read <= 0) { aws_mem_release(outgoing_read_message->allocator, outgoing_read_message); if (status != errSSLWouldBlock) { AWS_LOGF_ERROR( AWS_LS_IO_TLS, "id=%p: error reported during SSLRead. OSStatus code %d", (void *)handler, (int)status); if (status != errSSLClosedGraceful) { aws_raise_error(AWS_IO_TLS_ERROR_READ_FAILURE); aws_channel_shutdown(secure_transport_handler->parent_slot->channel, AWS_IO_TLS_ERROR_READ_FAILURE); } else { AWS_LOGF_TRACE(AWS_LS_IO_TLS, "id=%p: connection shutting down gracefully.", (void *)handler); aws_channel_shutdown(secure_transport_handler->parent_slot->channel, AWS_ERROR_SUCCESS); } } continue; }; processed += read; outgoing_read_message->message_data.len = read; if (secure_transport_handler->on_data_read) { secure_transport_handler->on_data_read( handler, slot, &outgoing_read_message->message_data, secure_transport_handler->user_data); } if (slot->adj_right) { if (aws_channel_slot_send_message(slot, outgoing_read_message, AWS_CHANNEL_DIR_READ)) { aws_mem_release(outgoing_read_message->allocator, outgoing_read_message); aws_channel_shutdown(secure_transport_handler->parent_slot->channel, aws_last_error()); /* incoming message was pushed to the input_queue, so this handler owns it now */ return AWS_OP_SUCCESS; } } else { aws_mem_release(outgoing_read_message->allocator, outgoing_read_message); } } AWS_LOGF_TRACE( AWS_LS_IO_TLS, "id=%p, Remaining window for this event-loop tick: %llu", (void *)handler, (unsigned long long)downstream_window - processed); return AWS_OP_SUCCESS; } static void s_run_read(struct aws_channel_task *task, void *arg, aws_task_status status) { (void)task; if (status == AWS_TASK_STATUS_RUN_READY) { struct aws_channel_handler *handler = arg; struct secure_transport_handler *secure_transport_handler = handler->impl; secure_transport_handler->read_task_pending = false; s_process_read_message(handler, secure_transport_handler->parent_slot, NULL); } } static int s_increment_read_window(struct aws_channel_handler *handler, struct aws_channel_slot *slot, size_t size) { struct secure_transport_handler *secure_transport_handler = handler->impl; AWS_LOGF_TRACE( AWS_LS_IO_TLS, "id=%p: increment read window message received %llu", (void *)handler, (unsigned long long)size); size_t downstream_size = aws_channel_slot_downstream_read_window(slot); size_t current_window_size = slot->window_size; size_t likely_records_count = (size_t)ceil((double)(downstream_size) / (double)(MAX_RECORD_SIZE)); size_t offset_size = aws_mul_size_saturating(likely_records_count, EST_TLS_RECORD_OVERHEAD); size_t total_desired_size = aws_add_size_saturating(offset_size, downstream_size); if (total_desired_size > current_window_size) { size_t window_update_size = total_desired_size - current_window_size; AWS_LOGF_TRACE( AWS_LS_IO_TLS, "id=%p: Propagating read window increment of size %llu", (void *)handler, (unsigned long long)window_update_size); aws_channel_slot_increment_read_window(slot, window_update_size); } if (secure_transport_handler->negotiation_finished && !secure_transport_handler->read_task.node.next) { /* TLS requires full records before it can decrypt anything. As a result we need to check everything we've * buffered instead of just waiting on a read from the socket, or we'll hit a deadlock. * * We have messages in a queue and they need to be run after the socket has popped (even if it didn't have data * to read). Alternatively, s2n reads entire records at a time, so we'll need to grab whatever we can and we * have no idea what's going on inside there. So we need to attempt another read. */ secure_transport_handler->read_task_pending = true; aws_channel_task_init( &secure_transport_handler->read_task, s_run_read, handler, "secure_transport_channel_handler_read_on_window_increment"); aws_channel_schedule_task_now(slot->channel, &secure_transport_handler->read_task); } return AWS_OP_SUCCESS; } static size_t s_message_overhead(struct aws_channel_handler *handler) { (void)handler; return EST_TLS_RECORD_OVERHEAD; } static size_t s_initial_window_size(struct aws_channel_handler *handler) { (void)handler; return EST_HANDSHAKE_SIZE; } static void s_reset_statistics(struct aws_channel_handler *handler) { struct secure_transport_handler *secure_transport_handler = handler->impl; aws_crt_statistics_tls_reset(&secure_transport_handler->shared_state.stats); } static void s_gather_statistics(struct aws_channel_handler *handler, struct aws_array_list *stats) { struct secure_transport_handler *secure_transport_handler = handler->impl; void *stats_base = &secure_transport_handler->shared_state.stats; aws_array_list_push_back(stats, &stats_base); } struct aws_byte_buf aws_tls_handler_protocol(struct aws_channel_handler *handler) { struct secure_transport_handler *secure_transport_handler = handler->impl; return secure_transport_handler->protocol; } struct aws_byte_buf aws_tls_handler_server_name(struct aws_channel_handler *handler) { struct secure_transport_handler *secure_transport_handler = handler->impl; const uint8_t *bytes = NULL; size_t len = 0; if (secure_transport_handler->server_name) { bytes = secure_transport_handler->server_name->bytes; len = secure_transport_handler->server_name->len; } return aws_byte_buf_from_array(bytes, len); } static struct aws_channel_handler_vtable s_handler_vtable = { .destroy = s_destroy, .process_read_message = s_process_read_message, .process_write_message = s_process_write_message, .shutdown = s_handle_shutdown, .increment_read_window = s_increment_read_window, .initial_window_size = s_initial_window_size, .message_overhead = s_message_overhead, .reset_statistics = s_reset_statistics, .gather_statistics = s_gather_statistics, }; struct secure_transport_ctx { struct aws_tls_ctx ctx; CFAllocatorRef wrapped_allocator; CFArrayRef certs; CFArrayRef ca_cert; enum aws_tls_versions minimum_version; struct aws_string *alpn_list; bool veriify_peer; }; static struct aws_channel_handler *s_tls_handler_new( struct aws_allocator *allocator, struct aws_tls_connection_options *options, struct aws_channel_slot *slot, SSLProtocolSide protocol_side) { AWS_ASSERT(options->ctx); struct secure_transport_ctx *secure_transport_ctx = options->ctx->impl; struct secure_transport_handler *secure_transport_handler = (struct secure_transport_handler *)aws_mem_calloc(allocator, 1, sizeof(struct secure_transport_handler)); if (!secure_transport_handler) { return NULL; } secure_transport_handler->handler.alloc = allocator; secure_transport_handler->handler.impl = secure_transport_handler; secure_transport_handler->handler.vtable = &s_handler_vtable; secure_transport_handler->handler.slot = slot; secure_transport_handler->wrapped_allocator = secure_transport_ctx->wrapped_allocator; secure_transport_handler->advertise_alpn_message = options->advertise_alpn_message; secure_transport_handler->on_data_read = options->on_data_read; secure_transport_handler->on_error = options->on_error; secure_transport_handler->on_negotiation_result = options->on_negotiation_result; secure_transport_handler->user_data = options->user_data; aws_tls_channel_handler_shared_init( &secure_transport_handler->shared_state, &secure_transport_handler->handler, options); secure_transport_handler->ctx = SSLCreateContext(secure_transport_handler->wrapped_allocator, protocol_side, kSSLStreamType); if (!secure_transport_handler->ctx) { AWS_LOGF_FATAL( AWS_LS_IO_TLS, "id=%p: failed to initialize an SSL Context.", (void *)&secure_transport_handler->handler); aws_raise_error(AWS_IO_TLS_CTX_ERROR); goto cleanup_st_handler; } switch (secure_transport_ctx->minimum_version) { case AWS_IO_SSLv3: SSLSetProtocolVersionMin(secure_transport_handler->ctx, kSSLProtocol3); break; case AWS_IO_TLSv1: SSLSetProtocolVersionMin(secure_transport_handler->ctx, kTLSProtocol1); break; case AWS_IO_TLSv1_1: SSLSetProtocolVersionMin(secure_transport_handler->ctx, kTLSProtocol12); break; case AWS_IO_TLSv1_2: SSLSetProtocolVersionMin(secure_transport_handler->ctx, kTLSProtocol12); break; case AWS_IO_TLSv1_3: #if TLS13_AVAILABLE SSLSetProtocolVersionMin(secure_transport_handler->ctx, kTLSProtocol13); #else AWS_LOGF_FATAL( AWS_LS_IO_TLS, "static: TLS 1.3 is not supported on this device. You may just want to specify " "AWS_IO_TLS_VER_SYS_DEFAULTS and you will automatically" "use the latest version of the protocol when it is available."); /* * "TLS 1.3 is not supported for your target platform, * you can probably get by setting AWS_IO_TLSv1_2 as the minimum and if tls 1.3 is supported it will be * used. */ AWS_ASSERT(0); #endif break; case AWS_IO_TLS_VER_SYS_DEFAULTS: default: /* kSSLProtocolUnknown means use system defaults. */ SSLSetProtocolVersionMin(secure_transport_handler->ctx, kSSLProtocolUnknown); break; } if (SSLSetIOFuncs(secure_transport_handler->ctx, s_read_cb, s_write_cb) != noErr || SSLSetConnection(secure_transport_handler->ctx, secure_transport_handler) != noErr) { AWS_LOGF_FATAL( AWS_LS_IO_TLS, "id=%p: failed to initialize an SSL Context.", (void *)&secure_transport_handler->handler); aws_raise_error(AWS_IO_TLS_CTX_ERROR); goto cleanup_ssl_ctx; } OSStatus status = noErr; secure_transport_handler->verify_peer = secure_transport_ctx->veriify_peer; if (!secure_transport_ctx->veriify_peer && protocol_side == kSSLClientSide) { AWS_LOGF_WARN( AWS_LS_IO_TLS, "id=%p: x.509 validation has been disabled. " "If this is not running in a test environment, this is likely a security vulnerability.", (void *)&secure_transport_handler->handler); SSLSetSessionOption(secure_transport_handler->ctx, kSSLSessionOptionBreakOnServerAuth, true); } if (secure_transport_ctx->certs) { status = SSLSetCertificate(secure_transport_handler->ctx, secure_transport_ctx->certs); } secure_transport_handler->ca_certs = NULL; if (secure_transport_ctx->ca_cert) { secure_transport_handler->ca_certs = secure_transport_ctx->ca_cert; if (protocol_side == kSSLServerSide && secure_transport_ctx->veriify_peer) { SSLSetSessionOption(secure_transport_handler->ctx, kSSLSessionOptionBreakOnClientAuth, true); } else if (secure_transport_ctx->veriify_peer) { SSLSetSessionOption(secure_transport_handler->ctx, kSSLSessionOptionBreakOnServerAuth, true); } } (void)status; aws_linked_list_init(&secure_transport_handler->input_queue); secure_transport_handler->parent_slot = slot; secure_transport_handler->latest_message_completion_user_data = NULL; secure_transport_handler->negotiation_finished = false; secure_transport_handler->latest_message_on_completion = NULL; if (options->server_name) { secure_transport_handler->server_name = aws_string_new_from_string(allocator, options->server_name); size_t server_name_len = options->server_name->len; SSLSetPeerDomainName(secure_transport_handler->ctx, aws_string_c_str(options->server_name), server_name_len); } struct aws_string *alpn_list = NULL; if (options->alpn_list) { AWS_LOGF_DEBUG( AWS_LS_IO_TLS, "id=%p: setting ALPN list %s", (void *)&secure_transport_handler->handler, aws_string_c_str(options->alpn_list)); alpn_list = options->alpn_list; } else if (secure_transport_ctx->alpn_list) { alpn_list = secure_transport_ctx->alpn_list; } if (alpn_list) { s_set_protocols(secure_transport_handler, allocator, alpn_list); } return &secure_transport_handler->handler; cleanup_ssl_ctx: CFRelease(secure_transport_handler->ctx); cleanup_st_handler: aws_mem_release(allocator, secure_transport_handler); return NULL; } struct aws_channel_handler *aws_tls_client_handler_new( struct aws_allocator *allocator, struct aws_tls_connection_options *options, struct aws_channel_slot *slot) { return s_tls_handler_new(allocator, options, slot, kSSLClientSide); } struct aws_channel_handler *aws_tls_server_handler_new( struct aws_allocator *allocator, struct aws_tls_connection_options *options, struct aws_channel_slot *slot) { return s_tls_handler_new(allocator, options, slot, kSSLServerSide); } static void s_aws_secure_transport_ctx_destroy(struct secure_transport_ctx *secure_transport_ctx) { if (secure_transport_ctx == NULL) { return; } if (secure_transport_ctx->certs) { aws_release_identity(secure_transport_ctx->certs); } if (secure_transport_ctx->ca_cert) { aws_release_certificates(secure_transport_ctx->ca_cert); } if (secure_transport_ctx->alpn_list) { aws_string_destroy(secure_transport_ctx->alpn_list); } CFRelease(secure_transport_ctx->wrapped_allocator); aws_mem_release(secure_transport_ctx->ctx.alloc, secure_transport_ctx); } static struct aws_tls_ctx *s_tls_ctx_new(struct aws_allocator *alloc, const struct aws_tls_ctx_options *options) { struct secure_transport_ctx *secure_transport_ctx = aws_mem_calloc(alloc, 1, sizeof(struct secure_transport_ctx)); if (!secure_transport_ctx) { return NULL; } if (!aws_tls_is_cipher_pref_supported(options->cipher_pref)) { aws_raise_error(AWS_IO_TLS_CIPHER_PREF_UNSUPPORTED); AWS_LOGF_ERROR(AWS_LS_IO_TLS, "static: TLS Cipher Preference is not supported: %d.", options->cipher_pref); return NULL; } secure_transport_ctx->wrapped_allocator = aws_wrapped_cf_allocator_new(alloc); secure_transport_ctx->minimum_version = options->minimum_tls_version; if (!secure_transport_ctx->wrapped_allocator) { goto cleanup_secure_transport_ctx; } if (options->alpn_list) { secure_transport_ctx->alpn_list = aws_string_new_from_string(alloc, options->alpn_list); if (!secure_transport_ctx->alpn_list) { goto cleanup_secure_transport_ctx; } } secure_transport_ctx->veriify_peer = options->verify_peer; secure_transport_ctx->ca_cert = NULL; secure_transport_ctx->certs = NULL; secure_transport_ctx->ctx.alloc = alloc; secure_transport_ctx->ctx.impl = secure_transport_ctx; aws_ref_count_init( &secure_transport_ctx->ctx.ref_count, secure_transport_ctx, (aws_simple_completion_callback *)s_aws_secure_transport_ctx_destroy); if (aws_tls_options_buf_is_set(&options->certificate) && aws_tls_options_buf_is_set(&options->private_key)) { #if !defined(AWS_OS_IOS) AWS_LOGF_DEBUG(AWS_LS_IO_TLS, "static: certificate and key have been set, setting them up now."); if (!aws_text_is_utf8(options->certificate.buffer, options->certificate.len)) { AWS_LOGF_ERROR(AWS_LS_IO_TLS, "static: failed to import certificate, must be ASCII/UTF-8 encoded"); aws_raise_error(AWS_IO_FILE_VALIDATION_FAILURE); goto cleanup_wrapped_allocator; } if (!aws_text_is_utf8(options->private_key.buffer, options->private_key.len)) { AWS_LOGF_ERROR(AWS_LS_IO_TLS, "static: failed to import private key, must be ASCII/UTF-8 encoded"); aws_raise_error(AWS_IO_FILE_VALIDATION_FAILURE); goto cleanup_wrapped_allocator; } struct aws_byte_cursor cert_chain_cur = aws_byte_cursor_from_buf(&options->certificate); struct aws_byte_cursor private_key_cur = aws_byte_cursor_from_buf(&options->private_key); if (aws_import_public_and_private_keys_to_identity( alloc, secure_transport_ctx->wrapped_allocator, &cert_chain_cur, &private_key_cur, &secure_transport_ctx->certs, options->keychain_path)) { AWS_LOGF_ERROR( AWS_LS_IO_TLS, "static: failed to import certificate and private key with error %d.", aws_last_error()); goto cleanup_wrapped_allocator; } #endif } else if (aws_tls_options_buf_is_set(&options->pkcs12)) { AWS_LOGF_DEBUG(AWS_LS_IO_TLS, "static: a pkcs$12 certificate and key has been set, setting it up now."); struct aws_byte_cursor pkcs12_blob_cur = aws_byte_cursor_from_buf(&options->pkcs12); struct aws_byte_cursor password_cur = aws_byte_cursor_from_buf(&options->pkcs12_password); if (aws_import_pkcs12_to_identity( secure_transport_ctx->wrapped_allocator, &pkcs12_blob_cur, &password_cur, &secure_transport_ctx->certs)) { AWS_LOGF_ERROR( AWS_LS_IO_TLS, "static: failed to import pkcs#12 certificate with error %d.", aws_last_error()); goto cleanup_wrapped_allocator; } } if (aws_tls_options_buf_is_set(&options->ca_file)) { AWS_LOGF_DEBUG(AWS_LS_IO_TLS, "static: loading custom CA file."); struct aws_byte_cursor ca_cursor = aws_byte_cursor_from_buf(&options->ca_file); if (aws_import_trusted_certificates( alloc, secure_transport_ctx->wrapped_allocator, &ca_cursor, &secure_transport_ctx->ca_cert)) { AWS_LOGF_ERROR(AWS_LS_IO_TLS, "static: failed to import custom CA with error %d", aws_last_error()); goto cleanup_wrapped_allocator; } } return &secure_transport_ctx->ctx; cleanup_wrapped_allocator: aws_wrapped_cf_allocator_destroy(secure_transport_ctx->wrapped_allocator); if (secure_transport_ctx->alpn_list) { aws_string_destroy(secure_transport_ctx->alpn_list); } cleanup_secure_transport_ctx: aws_mem_release(alloc, secure_transport_ctx); return NULL; } struct aws_tls_ctx *aws_tls_server_ctx_new(struct aws_allocator *alloc, const struct aws_tls_ctx_options *options) { return s_tls_ctx_new(alloc, options); } struct aws_tls_ctx *aws_tls_client_ctx_new(struct aws_allocator *alloc, const struct aws_tls_ctx_options *options) { return s_tls_ctx_new(alloc, options); } #pragma clang diagnostic pop aws-crt-python-0.20.4+dfsg/crt/aws-c-io/source/event_loop.c000066400000000000000000000461421456575232400234640ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include struct aws_event_loop *aws_event_loop_new_default(struct aws_allocator *alloc, aws_io_clock_fn *clock) { struct aws_event_loop_options options = { .thread_options = NULL, .clock = clock, }; return aws_event_loop_new_default_with_options(alloc, &options); } static void s_event_loop_group_thread_exit(void *user_data) { struct aws_event_loop_group *el_group = user_data; aws_simple_completion_callback *completion_callback = el_group->shutdown_options.shutdown_callback_fn; void *completion_user_data = el_group->shutdown_options.shutdown_callback_user_data; aws_mem_release(el_group->allocator, el_group); if (completion_callback != NULL) { completion_callback(completion_user_data); } } static void s_aws_event_loop_group_shutdown_sync(struct aws_event_loop_group *el_group) { while (aws_array_list_length(&el_group->event_loops) > 0) { struct aws_event_loop *loop = NULL; if (!aws_array_list_back(&el_group->event_loops, &loop)) { aws_event_loop_destroy(loop); } aws_array_list_pop_back(&el_group->event_loops); } aws_array_list_clean_up(&el_group->event_loops); } static void s_event_loop_destroy_async_thread_fn(void *thread_data) { struct aws_event_loop_group *el_group = thread_data; s_aws_event_loop_group_shutdown_sync(el_group); aws_thread_current_at_exit(s_event_loop_group_thread_exit, el_group); } static void s_aws_event_loop_group_shutdown_async(struct aws_event_loop_group *el_group) { /* It's possible that the last refcount was released on an event-loop thread, * so we would deadlock if we waited here for all the event-loop threads to shut down. * Therefore, we spawn a NEW thread and have it wait for all the event-loop threads to shut down */ struct aws_thread cleanup_thread; AWS_ZERO_STRUCT(cleanup_thread); aws_thread_init(&cleanup_thread, el_group->allocator); struct aws_thread_options thread_options = *aws_default_thread_options(); thread_options.join_strategy = AWS_TJS_MANAGED; thread_options.name = aws_byte_cursor_from_c_str("EvntLoopCleanup"); /* 15 characters is max for Linux */ aws_thread_launch(&cleanup_thread, s_event_loop_destroy_async_thread_fn, el_group, &thread_options); } static struct aws_event_loop_group *s_event_loop_group_new( struct aws_allocator *alloc, aws_io_clock_fn *clock, uint16_t el_count, uint16_t cpu_group, bool pin_threads, aws_new_event_loop_fn *new_loop_fn, void *new_loop_user_data, const struct aws_shutdown_callback_options *shutdown_options) { AWS_ASSERT(new_loop_fn); size_t group_cpu_count = 0; struct aws_cpu_info *usable_cpus = NULL; if (pin_threads) { group_cpu_count = aws_get_cpu_count_for_group(cpu_group); if (!group_cpu_count) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } usable_cpus = aws_mem_calloc(alloc, group_cpu_count, sizeof(struct aws_cpu_info)); if (usable_cpus == NULL) { return NULL; } aws_get_cpu_ids_for_group(cpu_group, usable_cpus, group_cpu_count); } struct aws_event_loop_group *el_group = aws_mem_calloc(alloc, 1, sizeof(struct aws_event_loop_group)); if (el_group == NULL) { return NULL; } el_group->allocator = alloc; aws_ref_count_init( &el_group->ref_count, el_group, (aws_simple_completion_callback *)s_aws_event_loop_group_shutdown_async); if (aws_array_list_init_dynamic(&el_group->event_loops, alloc, el_count, sizeof(struct aws_event_loop *))) { goto on_error; } for (uint16_t i = 0; i < el_count; ++i) { /* Don't pin to hyper-threads if a user cared enough to specify a NUMA node */ if (!pin_threads || (i < group_cpu_count && !usable_cpus[i].suspected_hyper_thread)) { struct aws_thread_options thread_options = *aws_default_thread_options(); struct aws_event_loop_options options = { .clock = clock, .thread_options = &thread_options, }; if (pin_threads) { thread_options.cpu_id = usable_cpus[i].cpu_id; } /* Thread name should be <= 15 characters */ char thread_name[32] = {0}; int thread_name_len = snprintf(thread_name, sizeof(thread_name), "AwsEventLoop %d", (int)i + 1); if (thread_name_len > AWS_THREAD_NAME_RECOMMENDED_STRLEN) { snprintf(thread_name, sizeof(thread_name), "AwsEventLoop"); } thread_options.name = aws_byte_cursor_from_c_str(thread_name); struct aws_event_loop *loop = new_loop_fn(alloc, &options, new_loop_user_data); if (!loop) { goto on_error; } if (aws_array_list_push_back(&el_group->event_loops, (const void *)&loop)) { aws_event_loop_destroy(loop); goto on_error; } if (aws_event_loop_run(loop)) { goto on_error; } } } if (shutdown_options != NULL) { el_group->shutdown_options = *shutdown_options; } if (pin_threads) { aws_mem_release(alloc, usable_cpus); } return el_group; on_error:; /* cache the error code to prevent any potential side effects */ int cached_error_code = aws_last_error(); aws_mem_release(alloc, usable_cpus); s_aws_event_loop_group_shutdown_sync(el_group); s_event_loop_group_thread_exit(el_group); /* raise the cached error code */ aws_raise_error(cached_error_code); return NULL; } struct aws_event_loop_group *aws_event_loop_group_new( struct aws_allocator *alloc, aws_io_clock_fn *clock, uint16_t el_count, aws_new_event_loop_fn *new_loop_fn, void *new_loop_user_data, const struct aws_shutdown_callback_options *shutdown_options) { AWS_ASSERT(new_loop_fn); AWS_ASSERT(el_count); return s_event_loop_group_new(alloc, clock, el_count, 0, false, new_loop_fn, new_loop_user_data, shutdown_options); } static struct aws_event_loop *s_default_new_event_loop( struct aws_allocator *allocator, const struct aws_event_loop_options *options, void *user_data) { (void)user_data; return aws_event_loop_new_default_with_options(allocator, options); } struct aws_event_loop_group *aws_event_loop_group_new_default( struct aws_allocator *alloc, uint16_t max_threads, const struct aws_shutdown_callback_options *shutdown_options) { if (!max_threads) { uint16_t processor_count = (uint16_t)aws_system_info_processor_count(); /* cut them in half to avoid using hyper threads for the IO work. */ max_threads = processor_count > 1 ? processor_count / 2 : processor_count; } return aws_event_loop_group_new( alloc, aws_high_res_clock_get_ticks, max_threads, s_default_new_event_loop, NULL, shutdown_options); } struct aws_event_loop_group *aws_event_loop_group_new_pinned_to_cpu_group( struct aws_allocator *alloc, aws_io_clock_fn *clock, uint16_t el_count, uint16_t cpu_group, aws_new_event_loop_fn *new_loop_fn, void *new_loop_user_data, const struct aws_shutdown_callback_options *shutdown_options) { AWS_ASSERT(new_loop_fn); AWS_ASSERT(el_count); return s_event_loop_group_new( alloc, clock, el_count, cpu_group, true, new_loop_fn, new_loop_user_data, shutdown_options); } struct aws_event_loop_group *aws_event_loop_group_new_default_pinned_to_cpu_group( struct aws_allocator *alloc, uint16_t max_threads, uint16_t cpu_group, const struct aws_shutdown_callback_options *shutdown_options) { if (!max_threads) { uint16_t processor_count = (uint16_t)aws_system_info_processor_count(); /* cut them in half to avoid using hyper threads for the IO work. */ max_threads = processor_count > 1 ? processor_count / 2 : processor_count; } return aws_event_loop_group_new_pinned_to_cpu_group( alloc, aws_high_res_clock_get_ticks, max_threads, cpu_group, s_default_new_event_loop, NULL, shutdown_options); } struct aws_event_loop_group *aws_event_loop_group_acquire(struct aws_event_loop_group *el_group) { if (el_group != NULL) { aws_ref_count_acquire(&el_group->ref_count); } return el_group; } void aws_event_loop_group_release(struct aws_event_loop_group *el_group) { if (el_group != NULL) { aws_ref_count_release(&el_group->ref_count); } } size_t aws_event_loop_group_get_loop_count(struct aws_event_loop_group *el_group) { return aws_array_list_length(&el_group->event_loops); } struct aws_event_loop *aws_event_loop_group_get_loop_at(struct aws_event_loop_group *el_group, size_t index) { struct aws_event_loop *el = NULL; aws_array_list_get_at(&el_group->event_loops, &el, index); return el; } struct aws_event_loop *aws_event_loop_group_get_next_loop(struct aws_event_loop_group *el_group) { size_t loop_count = aws_array_list_length(&el_group->event_loops); AWS_ASSERT(loop_count > 0); if (loop_count == 0) { return NULL; } /* do one call to get 32 random bits because this hits an actual entropy source and it's not cheap */ uint32_t random_32_bit_num = 0; aws_device_random_u32(&random_32_bit_num); /* use the best of two algorithm to select the loop with the lowest load. * If we find device random is too hard on the kernel, we can seed it and use another random * number generator. */ /* it's fine and intentional, the case will throw off the top 16 bits and that's what we want. */ uint16_t random_num_a = (uint16_t)random_32_bit_num; random_num_a = random_num_a % loop_count; uint16_t random_num_b = (uint16_t)(random_32_bit_num >> 16); random_num_b = random_num_b % loop_count; struct aws_event_loop *random_loop_a = NULL; struct aws_event_loop *random_loop_b = NULL; aws_array_list_get_at(&el_group->event_loops, &random_loop_a, random_num_a); aws_array_list_get_at(&el_group->event_loops, &random_loop_b, random_num_b); /* there's no logical reason why this should ever be possible. It's just best to die if it happens. */ AWS_FATAL_ASSERT((random_loop_a && random_loop_b) && "random_loop_a or random_loop_b is NULL."); size_t load_a = aws_event_loop_get_load_factor(random_loop_a); size_t load_b = aws_event_loop_get_load_factor(random_loop_b); return load_a < load_b ? random_loop_a : random_loop_b; } static void s_object_removed(void *value) { struct aws_event_loop_local_object *object = (struct aws_event_loop_local_object *)value; if (object->on_object_removed) { object->on_object_removed(object); } } int aws_event_loop_init_base(struct aws_event_loop *event_loop, struct aws_allocator *alloc, aws_io_clock_fn *clock) { AWS_ZERO_STRUCT(*event_loop); event_loop->alloc = alloc; event_loop->clock = clock; aws_atomic_init_int(&event_loop->current_load_factor, 0u); aws_atomic_init_int(&event_loop->next_flush_time, 0u); if (aws_hash_table_init(&event_loop->local_data, alloc, 20, aws_hash_ptr, aws_ptr_eq, NULL, s_object_removed)) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } void aws_event_loop_clean_up_base(struct aws_event_loop *event_loop) { aws_hash_table_clean_up(&event_loop->local_data); } void aws_event_loop_register_tick_start(struct aws_event_loop *event_loop) { aws_high_res_clock_get_ticks(&event_loop->latest_tick_start); } void aws_event_loop_register_tick_end(struct aws_event_loop *event_loop) { /* increment the timestamp diff counter (this should always be called from the same thread), the concurrency * work happens during the flush. */ uint64_t end_tick = 0; aws_high_res_clock_get_ticks(&end_tick); size_t elapsed = (size_t)aws_min_u64(end_tick - event_loop->latest_tick_start, SIZE_MAX); event_loop->current_tick_latency_sum = aws_add_size_saturating(event_loop->current_tick_latency_sum, elapsed); event_loop->latest_tick_start = 0; size_t next_flush_time_secs = aws_atomic_load_int(&event_loop->next_flush_time); /* store as seconds because we can't make a 64-bit integer reliably atomic across platforms. */ uint64_t end_tick_secs = aws_timestamp_convert(end_tick, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_SECS, NULL); /* if a second has passed, flush the load-factor. */ if (end_tick_secs > next_flush_time_secs) { aws_atomic_store_int(&event_loop->current_load_factor, event_loop->current_tick_latency_sum); event_loop->current_tick_latency_sum = 0; /* run again in a second. */ aws_atomic_store_int(&event_loop->next_flush_time, (size_t)(end_tick_secs + 1)); } } size_t aws_event_loop_get_load_factor(struct aws_event_loop *event_loop) { uint64_t current_time = 0; aws_high_res_clock_get_ticks(¤t_time); uint64_t current_time_secs = aws_timestamp_convert(current_time, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_SECS, NULL); size_t next_flush_time_secs = aws_atomic_load_int(&event_loop->next_flush_time); /* safety valve just in case an event-loop had heavy load and then went completely idle. If we haven't * had an update from the event-loop in 10 seconds, just assume idle. Also, yes this is racy, but it should * be good enough because an active loop will be updating its counter frequently ( more than once per 10 seconds * for sure ), in the case where we hit the technical race condition, we don't care anyways and returning 0 * is the desired behavior. */ if (current_time_secs > next_flush_time_secs + 10) { return 0; } return aws_atomic_load_int(&event_loop->current_load_factor); } void aws_event_loop_destroy(struct aws_event_loop *event_loop) { if (!event_loop) { return; } AWS_ASSERT(event_loop->vtable && event_loop->vtable->destroy); AWS_ASSERT(!aws_event_loop_thread_is_callers_thread(event_loop)); event_loop->vtable->destroy(event_loop); } int aws_event_loop_fetch_local_object( struct aws_event_loop *event_loop, void *key, struct aws_event_loop_local_object *obj) { AWS_ASSERT(aws_event_loop_thread_is_callers_thread(event_loop)); struct aws_hash_element *object = NULL; if (!aws_hash_table_find(&event_loop->local_data, key, &object) && object) { *obj = *(struct aws_event_loop_local_object *)object->value; return AWS_OP_SUCCESS; } return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } int aws_event_loop_put_local_object(struct aws_event_loop *event_loop, struct aws_event_loop_local_object *obj) { AWS_ASSERT(aws_event_loop_thread_is_callers_thread(event_loop)); struct aws_hash_element *object = NULL; int was_created = 0; if (!aws_hash_table_create(&event_loop->local_data, obj->key, &object, &was_created)) { object->key = obj->key; object->value = obj; return AWS_OP_SUCCESS; } return AWS_OP_ERR; } int aws_event_loop_remove_local_object( struct aws_event_loop *event_loop, void *key, struct aws_event_loop_local_object *removed_obj) { AWS_ASSERT(aws_event_loop_thread_is_callers_thread(event_loop)); struct aws_hash_element existing_object; AWS_ZERO_STRUCT(existing_object); int was_present = 0; struct aws_hash_element *remove_candidate = removed_obj ? &existing_object : NULL; if (!aws_hash_table_remove(&event_loop->local_data, key, remove_candidate, &was_present)) { if (remove_candidate && was_present) { *removed_obj = *(struct aws_event_loop_local_object *)existing_object.value; } return AWS_OP_SUCCESS; } return AWS_OP_ERR; } int aws_event_loop_run(struct aws_event_loop *event_loop) { AWS_ASSERT(event_loop->vtable && event_loop->vtable->run); return event_loop->vtable->run(event_loop); } int aws_event_loop_stop(struct aws_event_loop *event_loop) { AWS_ASSERT(event_loop->vtable && event_loop->vtable->stop); return event_loop->vtable->stop(event_loop); } int aws_event_loop_wait_for_stop_completion(struct aws_event_loop *event_loop) { AWS_ASSERT(!aws_event_loop_thread_is_callers_thread(event_loop)); AWS_ASSERT(event_loop->vtable && event_loop->vtable->wait_for_stop_completion); return event_loop->vtable->wait_for_stop_completion(event_loop); } void aws_event_loop_schedule_task_now(struct aws_event_loop *event_loop, struct aws_task *task) { AWS_ASSERT(event_loop->vtable && event_loop->vtable->schedule_task_now); AWS_ASSERT(task); event_loop->vtable->schedule_task_now(event_loop, task); } void aws_event_loop_schedule_task_future( struct aws_event_loop *event_loop, struct aws_task *task, uint64_t run_at_nanos) { AWS_ASSERT(event_loop->vtable && event_loop->vtable->schedule_task_future); AWS_ASSERT(task); event_loop->vtable->schedule_task_future(event_loop, task, run_at_nanos); } void aws_event_loop_cancel_task(struct aws_event_loop *event_loop, struct aws_task *task) { AWS_ASSERT(event_loop->vtable && event_loop->vtable->cancel_task); AWS_ASSERT(aws_event_loop_thread_is_callers_thread(event_loop)); AWS_ASSERT(task); event_loop->vtable->cancel_task(event_loop, task); } #if AWS_USE_IO_COMPLETION_PORTS int aws_event_loop_connect_handle_to_io_completion_port( struct aws_event_loop *event_loop, struct aws_io_handle *handle) { AWS_ASSERT(event_loop->vtable && event_loop->vtable->connect_to_io_completion_port); return event_loop->vtable->connect_to_io_completion_port(event_loop, handle); } #else /* !AWS_USE_IO_COMPLETION_PORTS */ int aws_event_loop_subscribe_to_io_events( struct aws_event_loop *event_loop, struct aws_io_handle *handle, int events, aws_event_loop_on_event_fn *on_event, void *user_data) { AWS_ASSERT(event_loop->vtable && event_loop->vtable->subscribe_to_io_events); return event_loop->vtable->subscribe_to_io_events(event_loop, handle, events, on_event, user_data); } #endif /* AWS_USE_IO_COMPLETION_PORTS */ int aws_event_loop_unsubscribe_from_io_events(struct aws_event_loop *event_loop, struct aws_io_handle *handle) { AWS_ASSERT(aws_event_loop_thread_is_callers_thread(event_loop)); AWS_ASSERT(event_loop->vtable && event_loop->vtable->unsubscribe_from_io_events); return event_loop->vtable->unsubscribe_from_io_events(event_loop, handle); } void aws_event_loop_free_io_event_resources(struct aws_event_loop *event_loop, struct aws_io_handle *handle) { AWS_ASSERT(event_loop && event_loop->vtable->free_io_event_resources); event_loop->vtable->free_io_event_resources(handle->additional_data); } bool aws_event_loop_thread_is_callers_thread(struct aws_event_loop *event_loop) { AWS_ASSERT(event_loop->vtable && event_loop->vtable->is_on_callers_thread); return event_loop->vtable->is_on_callers_thread(event_loop); } int aws_event_loop_current_clock_time(struct aws_event_loop *event_loop, uint64_t *time_nanos) { AWS_ASSERT(event_loop->clock); return event_loop->clock(time_nanos); } aws-crt-python-0.20.4+dfsg/crt/aws-c-io/source/exponential_backoff_retry_strategy.c000066400000000000000000000377201456575232400304640ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include struct exponential_backoff_strategy { struct aws_retry_strategy base; struct aws_exponential_backoff_retry_options config; struct aws_shutdown_callback_options shutdown_options; }; struct exponential_backoff_retry_token { struct aws_retry_token base; struct aws_atomic_var current_retry_count; struct aws_atomic_var last_backoff; size_t max_retries; uint64_t backoff_scale_factor_ns; uint64_t maximum_backoff_ns; enum aws_exponential_backoff_jitter_mode jitter_mode; /* Let's not make this worse by constantly moving across threads if we can help it */ struct aws_event_loop *bound_loop; uint64_t (*generate_random)(void); aws_generate_random_fn *generate_random_impl; void *generate_random_user_data; struct aws_task retry_task; struct { struct aws_mutex mutex; aws_retry_strategy_on_retry_token_acquired_fn *acquired_fn; aws_retry_strategy_on_retry_ready_fn *retry_ready_fn; void *user_data; } thread_data; }; static void s_exponential_retry_destroy(struct aws_retry_strategy *retry_strategy) { if (retry_strategy) { struct exponential_backoff_strategy *exponential_strategy = retry_strategy->impl; struct aws_event_loop_group *el_group = exponential_strategy->config.el_group; aws_simple_completion_callback *completion_callback = exponential_strategy->shutdown_options.shutdown_callback_fn; void *completion_user_data = exponential_strategy->shutdown_options.shutdown_callback_user_data; aws_mem_release(retry_strategy->allocator, exponential_strategy); if (completion_callback != NULL) { completion_callback(completion_user_data); } aws_ref_count_release(&el_group->ref_count); } } static void s_exponential_retry_task(struct aws_task *task, void *arg, enum aws_task_status status) { (void)task; int error_code = AWS_ERROR_IO_OPERATION_CANCELLED; if (status == AWS_TASK_STATUS_RUN_READY) { error_code = AWS_OP_SUCCESS; } struct exponential_backoff_retry_token *backoff_retry_token = arg; aws_retry_strategy_on_retry_token_acquired_fn *acquired_fn = NULL; aws_retry_strategy_on_retry_ready_fn *retry_ready_fn = NULL; void *user_data = NULL; { /***** BEGIN CRITICAL SECTION *********/ AWS_FATAL_ASSERT( !aws_mutex_lock(&backoff_retry_token->thread_data.mutex) && "Retry token mutex acquisition failed"); acquired_fn = backoff_retry_token->thread_data.acquired_fn; retry_ready_fn = backoff_retry_token->thread_data.retry_ready_fn; user_data = backoff_retry_token->thread_data.user_data; backoff_retry_token->thread_data.user_data = NULL; backoff_retry_token->thread_data.retry_ready_fn = NULL; backoff_retry_token->thread_data.acquired_fn = NULL; AWS_FATAL_ASSERT( !aws_mutex_unlock(&backoff_retry_token->thread_data.mutex) && "Retry token mutex release failed"); } /**** END CRITICAL SECTION ***********/ aws_retry_token_acquire(&backoff_retry_token->base); if (acquired_fn) { AWS_LOGF_DEBUG( AWS_LS_IO_EXPONENTIAL_BACKOFF_RETRY_STRATEGY, "id=%p: Vending retry_token %p", (void *)backoff_retry_token->base.retry_strategy, (void *)&backoff_retry_token->base); acquired_fn(backoff_retry_token->base.retry_strategy, error_code, &backoff_retry_token->base, user_data); } else if (retry_ready_fn) { AWS_LOGF_DEBUG( AWS_LS_IO_EXPONENTIAL_BACKOFF_RETRY_STRATEGY, "id=%p: Invoking retry_ready for token %p", (void *)backoff_retry_token->base.retry_strategy, (void *)&backoff_retry_token->base); retry_ready_fn(&backoff_retry_token->base, error_code, user_data); /* it's acquired before being scheduled for retry */ aws_retry_token_release(&backoff_retry_token->base); } aws_retry_token_release(&backoff_retry_token->base); } static int s_exponential_retry_acquire_token( struct aws_retry_strategy *retry_strategy, const struct aws_byte_cursor *partition_id, aws_retry_strategy_on_retry_token_acquired_fn *on_acquired, void *user_data, uint64_t timeout_ms) { (void)partition_id; /* no resource contention here so no timeouts. */ (void)timeout_ms; struct exponential_backoff_retry_token *backoff_retry_token = aws_mem_calloc(retry_strategy->allocator, 1, sizeof(struct exponential_backoff_retry_token)); if (!backoff_retry_token) { return AWS_OP_ERR; } AWS_LOGF_DEBUG( AWS_LS_IO_EXPONENTIAL_BACKOFF_RETRY_STRATEGY, "id=%p: Initializing retry token %p", (void *)retry_strategy, (void *)&backoff_retry_token->base); backoff_retry_token->base.allocator = retry_strategy->allocator; backoff_retry_token->base.retry_strategy = retry_strategy; aws_atomic_init_int(&backoff_retry_token->base.ref_count, 1u); aws_retry_strategy_acquire(retry_strategy); backoff_retry_token->base.impl = backoff_retry_token; struct exponential_backoff_strategy *exponential_backoff_strategy = retry_strategy->impl; backoff_retry_token->bound_loop = aws_event_loop_group_get_next_loop(exponential_backoff_strategy->config.el_group); backoff_retry_token->max_retries = exponential_backoff_strategy->config.max_retries; backoff_retry_token->backoff_scale_factor_ns = aws_timestamp_convert( exponential_backoff_strategy->config.backoff_scale_factor_ms, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL); backoff_retry_token->maximum_backoff_ns = aws_timestamp_convert( exponential_backoff_strategy->config.max_backoff_secs, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL); backoff_retry_token->jitter_mode = exponential_backoff_strategy->config.jitter_mode; backoff_retry_token->generate_random = exponential_backoff_strategy->config.generate_random; backoff_retry_token->generate_random_impl = exponential_backoff_strategy->config.generate_random_impl; backoff_retry_token->generate_random_user_data = exponential_backoff_strategy->config.generate_random_user_data; aws_atomic_init_int(&backoff_retry_token->current_retry_count, 0); aws_atomic_init_int(&backoff_retry_token->last_backoff, 0); backoff_retry_token->thread_data.acquired_fn = on_acquired; backoff_retry_token->thread_data.user_data = user_data; AWS_FATAL_ASSERT( !aws_mutex_init(&backoff_retry_token->thread_data.mutex) && "Retry strategy mutex initialization failed"); aws_task_init( &backoff_retry_token->retry_task, s_exponential_retry_task, backoff_retry_token, "aws_exponential_backoff_retry_task"); aws_event_loop_schedule_task_now(backoff_retry_token->bound_loop, &backoff_retry_token->retry_task); return AWS_OP_SUCCESS; } static inline uint64_t s_random_in_range(uint64_t from, uint64_t to, struct exponential_backoff_retry_token *token) { uint64_t max = aws_max_u64(from, to); uint64_t min = aws_min_u64(from, to); uint64_t diff = max - min; if (!diff) { return 0; } uint64_t random; if (token->generate_random_impl) { random = token->generate_random_impl(token->generate_random_user_data); } else { random = token->generate_random(); } return min + random % (diff); } typedef uint64_t(compute_backoff_fn)(struct exponential_backoff_retry_token *token); static uint64_t s_compute_no_jitter(struct exponential_backoff_retry_token *token) { uint64_t retry_count = aws_min_u64(aws_atomic_load_int(&token->current_retry_count), 63); uint64_t backoff_ns = aws_mul_u64_saturating((uint64_t)1 << retry_count, token->backoff_scale_factor_ns); return aws_min_u64(backoff_ns, token->maximum_backoff_ns); } static uint64_t s_compute_full_jitter(struct exponential_backoff_retry_token *token) { uint64_t non_jittered = s_compute_no_jitter(token); return s_random_in_range(0, non_jittered, token); } static uint64_t s_compute_deccorelated_jitter(struct exponential_backoff_retry_token *token) { size_t last_backoff_val = aws_atomic_load_int(&token->last_backoff); if (!last_backoff_val) { return s_compute_full_jitter(token); } uint64_t backoff_ns = aws_min_u64(token->maximum_backoff_ns, aws_mul_u64_saturating(last_backoff_val, 3)); return s_random_in_range(token->backoff_scale_factor_ns, backoff_ns, token); } static compute_backoff_fn *s_backoff_compute_table[] = { [AWS_EXPONENTIAL_BACKOFF_JITTER_DEFAULT] = s_compute_full_jitter, [AWS_EXPONENTIAL_BACKOFF_JITTER_NONE] = s_compute_no_jitter, [AWS_EXPONENTIAL_BACKOFF_JITTER_FULL] = s_compute_full_jitter, [AWS_EXPONENTIAL_BACKOFF_JITTER_DECORRELATED] = s_compute_deccorelated_jitter, }; static int s_exponential_retry_schedule_retry( struct aws_retry_token *token, enum aws_retry_error_type error_type, aws_retry_strategy_on_retry_ready_fn *retry_ready, void *user_data) { struct exponential_backoff_retry_token *backoff_retry_token = token->impl; AWS_LOGF_DEBUG( AWS_LS_IO_EXPONENTIAL_BACKOFF_RETRY_STRATEGY, "id=%p: Attempting retry on token %p with error type %d", (void *)backoff_retry_token->base.retry_strategy, (void *)token, error_type); uint64_t schedule_at = 0; /* AWS_RETRY_ERROR_TYPE_CLIENT_ERROR does not count against your retry budget since you were responding to an * improperly crafted request. */ if (error_type != AWS_RETRY_ERROR_TYPE_CLIENT_ERROR) { size_t retry_count = aws_atomic_load_int(&backoff_retry_token->current_retry_count); if (retry_count >= backoff_retry_token->max_retries) { AWS_LOGF_WARN( AWS_LS_IO_EXPONENTIAL_BACKOFF_RETRY_STRATEGY, "id=%p: token %p has exhausted allowed retries. Retry count %zu max retries %zu", (void *)backoff_retry_token->base.retry_strategy, (void *)token, backoff_retry_token->max_retries, retry_count); return aws_raise_error(AWS_IO_MAX_RETRIES_EXCEEDED); } uint64_t backoff = s_backoff_compute_table[backoff_retry_token->jitter_mode](backoff_retry_token); uint64_t current_time = 0; aws_event_loop_current_clock_time(backoff_retry_token->bound_loop, ¤t_time); schedule_at = backoff + current_time; aws_atomic_init_int(&backoff_retry_token->last_backoff, (size_t)backoff); aws_atomic_fetch_add(&backoff_retry_token->current_retry_count, 1u); AWS_LOGF_DEBUG( AWS_LS_IO_EXPONENTIAL_BACKOFF_RETRY_STRATEGY, "id=%p: Computed backoff value of %" PRIu64 "ns on token %p", (void *)backoff_retry_token->base.retry_strategy, backoff, (void *)token); } bool already_scheduled = false; { /***** BEGIN CRITICAL SECTION *********/ AWS_FATAL_ASSERT( !aws_mutex_lock(&backoff_retry_token->thread_data.mutex) && "Retry token mutex acquisition failed"); if (backoff_retry_token->thread_data.user_data) { already_scheduled = true; } else { backoff_retry_token->thread_data.retry_ready_fn = retry_ready; backoff_retry_token->thread_data.user_data = user_data; /* acquire to hold until the task runs. */ aws_retry_token_acquire(token); aws_task_init( &backoff_retry_token->retry_task, s_exponential_retry_task, backoff_retry_token, "aws_exponential_backoff_retry_task"); } AWS_FATAL_ASSERT( !aws_mutex_unlock(&backoff_retry_token->thread_data.mutex) && "Retry token mutex release failed"); } /**** END CRITICAL SECTION ***********/ if (already_scheduled) { AWS_LOGF_ERROR( AWS_LS_IO_EXPONENTIAL_BACKOFF_RETRY_STRATEGY, "id=%p: retry token %p is already scheduled.", (void *)backoff_retry_token->base.retry_strategy, (void *)token); return aws_raise_error(AWS_ERROR_INVALID_STATE); } aws_event_loop_schedule_task_future(backoff_retry_token->bound_loop, &backoff_retry_token->retry_task, schedule_at); return AWS_OP_SUCCESS; } static int s_exponential_backoff_record_success(struct aws_retry_token *token) { /* we don't do book keeping in this mode. */ (void)token; return AWS_OP_SUCCESS; } static void s_exponential_backoff_release_token(struct aws_retry_token *token) { if (token) { aws_retry_strategy_release(token->retry_strategy); struct exponential_backoff_retry_token *backoff_retry_token = token->impl; aws_mutex_clean_up(&backoff_retry_token->thread_data.mutex); aws_mem_release(token->allocator, backoff_retry_token); } } static struct aws_retry_strategy_vtable s_exponential_retry_vtable = { .destroy = s_exponential_retry_destroy, .acquire_token = s_exponential_retry_acquire_token, .schedule_retry = s_exponential_retry_schedule_retry, .record_success = s_exponential_backoff_record_success, .release_token = s_exponential_backoff_release_token, }; static uint64_t s_default_gen_rand(void *user_data) { (void)user_data; uint64_t res = 0; aws_device_random_u64(&res); return res; } struct aws_retry_strategy *aws_retry_strategy_new_exponential_backoff( struct aws_allocator *allocator, const struct aws_exponential_backoff_retry_options *config) { AWS_PRECONDITION(config); AWS_PRECONDITION(config->el_group); AWS_PRECONDITION(config->jitter_mode <= AWS_EXPONENTIAL_BACKOFF_JITTER_DECORRELATED); AWS_PRECONDITION(config->max_retries); if (config->max_retries > 63 || !config->el_group || config->jitter_mode > AWS_EXPONENTIAL_BACKOFF_JITTER_DECORRELATED) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } struct exponential_backoff_strategy *exponential_backoff_strategy = aws_mem_calloc(allocator, 1, sizeof(struct exponential_backoff_strategy)); if (!exponential_backoff_strategy) { return NULL; } AWS_LOGF_INFO( AWS_LS_IO_EXPONENTIAL_BACKOFF_RETRY_STRATEGY, "id=%p: Initializing exponential backoff retry strategy with scale factor: %" PRIu32 " jitter mode: %d and max retries %zu", (void *)&exponential_backoff_strategy->base, config->backoff_scale_factor_ms, config->jitter_mode, config->max_retries); exponential_backoff_strategy->base.allocator = allocator; exponential_backoff_strategy->base.impl = exponential_backoff_strategy; exponential_backoff_strategy->base.vtable = &s_exponential_retry_vtable; aws_atomic_init_int(&exponential_backoff_strategy->base.ref_count, 1); exponential_backoff_strategy->config = *config; exponential_backoff_strategy->config.el_group = aws_ref_count_acquire(&exponential_backoff_strategy->config.el_group->ref_count); if (!exponential_backoff_strategy->config.generate_random && !exponential_backoff_strategy->config.generate_random_impl) { exponential_backoff_strategy->config.generate_random_impl = s_default_gen_rand; } if (!exponential_backoff_strategy->config.max_retries) { exponential_backoff_strategy->config.max_retries = 5; } if (!exponential_backoff_strategy->config.backoff_scale_factor_ms) { exponential_backoff_strategy->config.backoff_scale_factor_ms = 500; } if (!exponential_backoff_strategy->config.max_backoff_secs) { exponential_backoff_strategy->config.max_backoff_secs = 20; } if (config->shutdown_options) { exponential_backoff_strategy->shutdown_options = *config->shutdown_options; } return &exponential_backoff_strategy->base; } aws-crt-python-0.20.4+dfsg/crt/aws-c-io/source/future.c000066400000000000000000000454311456575232400226240ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include enum aws_future_type { AWS_FUTURE_T_BY_VALUE, AWS_FUTURE_T_BY_VALUE_WITH_CLEAN_UP, AWS_FUTURE_T_POINTER, AWS_FUTURE_T_POINTER_WITH_DESTROY, AWS_FUTURE_T_POINTER_WITH_RELEASE, }; struct aws_future_callback_data { aws_future_callback_fn *fn; void *user_data; union aws_future_callback_union { struct aws_event_loop *event_loop; struct aws_channel *channel; } u; enum aws_future_callback_type { AWS_FUTURE_IMMEDIATE_CALLBACK, AWS_FUTURE_EVENT_LOOP_CALLBACK, AWS_FUTURE_CHANNEL_CALLBACK, } type; }; /* When allocating aws_future on the heap, we make 1 allocation containing: * aws_future_impl followed by T */ struct aws_future_impl { struct aws_allocator *alloc; struct aws_ref_count ref_count; struct aws_mutex lock; struct aws_condition_variable wait_cvar; struct aws_future_callback_data callback; union { aws_future_impl_result_clean_up_fn *clean_up; aws_future_impl_result_destroy_fn *destroy; aws_future_impl_result_release_fn *release; } result_dtor; int error_code; /* sum of bit fields should be 32 */ #define BIT_COUNT_FOR_SIZEOF_RESULT 27 unsigned int sizeof_result : BIT_COUNT_FOR_SIZEOF_RESULT; unsigned int type : 3; /* aws_future_type */ unsigned int is_done : 1; unsigned int owns_result : 1; }; static void s_future_impl_result_dtor(struct aws_future_impl *future, void *result_addr) { switch (future->type) { case AWS_FUTURE_T_BY_VALUE_WITH_CLEAN_UP: { future->result_dtor.clean_up(result_addr); break; } break; case AWS_FUTURE_T_POINTER_WITH_DESTROY: { void *result = *(void **)result_addr; if (result) { future->result_dtor.destroy(result); } } break; case AWS_FUTURE_T_POINTER_WITH_RELEASE: { void *result = *(void **)result_addr; if (result) { future->result_dtor.release(result); } } break; default: break; } } static void s_future_impl_destroy(void *user_data) { struct aws_future_impl *future = user_data; if (future->owns_result && !future->error_code) { s_future_impl_result_dtor(future, aws_future_impl_get_result_address(future)); } aws_condition_variable_clean_up(&future->wait_cvar); aws_mutex_clean_up(&future->lock); aws_mem_release(future->alloc, future); } static struct aws_future_impl *s_future_impl_new(struct aws_allocator *alloc, size_t sizeof_result) { size_t total_size = sizeof(struct aws_future_impl) + sizeof_result; struct aws_future_impl *future = aws_mem_calloc(alloc, 1, total_size); future->alloc = alloc; /* we store sizeof_result in a bit field, ensure the number will fit */ AWS_ASSERT(sizeof_result <= (UINT_MAX >> (32 - BIT_COUNT_FOR_SIZEOF_RESULT))); future->sizeof_result = (unsigned int)sizeof_result; aws_ref_count_init(&future->ref_count, future, s_future_impl_destroy); aws_mutex_init(&future->lock); aws_condition_variable_init(&future->wait_cvar); return future; } struct aws_future_impl *aws_future_impl_new_by_value(struct aws_allocator *alloc, size_t sizeof_result) { struct aws_future_impl *future = s_future_impl_new(alloc, sizeof_result); future->type = AWS_FUTURE_T_BY_VALUE; return future; } struct aws_future_impl *aws_future_impl_new_by_value_with_clean_up( struct aws_allocator *alloc, size_t sizeof_result, aws_future_impl_result_clean_up_fn *result_clean_up) { AWS_ASSERT(result_clean_up); struct aws_future_impl *future = s_future_impl_new(alloc, sizeof_result); future->type = AWS_FUTURE_T_BY_VALUE_WITH_CLEAN_UP; future->result_dtor.clean_up = result_clean_up; return future; } struct aws_future_impl *aws_future_impl_new_pointer(struct aws_allocator *alloc) { struct aws_future_impl *future = s_future_impl_new(alloc, sizeof(void *)); future->type = AWS_FUTURE_T_POINTER; return future; } struct aws_future_impl *aws_future_impl_new_pointer_with_destroy( struct aws_allocator *alloc, aws_future_impl_result_destroy_fn *result_destroy) { AWS_ASSERT(result_destroy); struct aws_future_impl *future = s_future_impl_new(alloc, sizeof(void *)); future->type = AWS_FUTURE_T_POINTER_WITH_DESTROY; future->result_dtor.destroy = result_destroy; return future; } struct aws_future_impl *aws_future_impl_new_pointer_with_release( struct aws_allocator *alloc, aws_future_impl_result_release_fn *result_release) { AWS_ASSERT(result_release); struct aws_future_impl *future = s_future_impl_new(alloc, sizeof(void *)); future->type = AWS_FUTURE_T_POINTER_WITH_RELEASE; future->result_dtor.release = result_release; return future; } struct aws_future_impl *aws_future_impl_release(struct aws_future_impl *future) { if (future != NULL) { aws_ref_count_release(&future->ref_count); } return NULL; } struct aws_future_impl *aws_future_impl_acquire(struct aws_future_impl *future) { if (future != NULL) { aws_ref_count_acquire(&future->ref_count); } return future; } bool aws_future_impl_is_done(const struct aws_future_impl *future) { AWS_ASSERT(future); /* this function is conceptually const, but we need to hold the lock a moment */ struct aws_mutex *mutable_lock = (struct aws_mutex *)&future->lock; /* BEGIN CRITICAL SECTION */ aws_mutex_lock(mutable_lock); bool is_done = future->is_done != 0; aws_mutex_unlock(mutable_lock); /* END CRITICAL SECTION */ return is_done; } int aws_future_impl_get_error(const struct aws_future_impl *future) { AWS_ASSERT(future != NULL); /* not bothering with lock, none of this can change after future is done */ AWS_FATAL_ASSERT(future->is_done && "Cannot get error before future is done"); return future->error_code; } void *aws_future_impl_get_result_address(const struct aws_future_impl *future) { AWS_ASSERT(future != NULL); /* not bothering with lock, none of this can change after future is done */ AWS_FATAL_ASSERT(future->is_done && "Cannot get result before future is done"); AWS_FATAL_ASSERT(!future->error_code && "Cannot get result from future that failed with an error"); AWS_FATAL_ASSERT(future->owns_result && "Result was already moved from future"); const struct aws_future_impl *address_of_memory_after_this_struct = future + 1; void *result_addr = (void *)address_of_memory_after_this_struct; return result_addr; } void aws_future_impl_get_result_by_move(struct aws_future_impl *future, void *dst_address) { void *result_addr = aws_future_impl_get_result_address(future); memcpy(dst_address, result_addr, future->sizeof_result); memset(result_addr, 0, future->sizeof_result); future->owns_result = false; } /* Data for invoking callback as a task on an event-loop */ struct aws_future_event_loop_callback_job { struct aws_allocator *alloc; struct aws_task task; struct aws_event_loop *event_loop; aws_future_callback_fn *callback; void *user_data; }; static void s_future_impl_event_loop_callback_task(struct aws_task *task, void *arg, enum aws_task_status status) { (void)task; (void)status; struct aws_future_event_loop_callback_job *job = arg; job->callback(job->user_data); // TODO: aws_event_loop_release(job->event_loop); aws_mem_release(job->alloc, job); } /* Data for invoking callback as a task on an aws_channel */ struct aws_future_channel_callback_job { struct aws_allocator *alloc; struct aws_channel_task task; struct aws_channel *channel; aws_future_callback_fn *callback; void *user_data; }; static void s_future_impl_channel_callback_task(struct aws_channel_task *task, void *arg, enum aws_task_status status) { (void)task; (void)status; struct aws_future_channel_callback_job *job = arg; job->callback(job->user_data); aws_channel_release_hold(job->channel); aws_mem_release(job->alloc, job); } static void s_future_impl_invoke_callback(struct aws_future_callback_data *callback, struct aws_allocator *alloc) { AWS_ASSERT(callback->fn); switch (callback->type) { case AWS_FUTURE_IMMEDIATE_CALLBACK: { callback->fn(callback->user_data); } break; case AWS_FUTURE_EVENT_LOOP_CALLBACK: { /* Schedule the callback as a task on the event-loop */ struct aws_future_event_loop_callback_job *job = aws_mem_calloc(alloc, 1, sizeof(struct aws_future_event_loop_callback_job)); job->alloc = alloc; aws_task_init(&job->task, s_future_impl_event_loop_callback_task, job, "aws_future_event_loop_callback"); job->event_loop = callback->u.event_loop; job->callback = callback->fn; job->user_data = callback->user_data; aws_event_loop_schedule_task_now(callback->u.event_loop, &job->task); } break; case AWS_FUTURE_CHANNEL_CALLBACK: { /* Schedule the callback as a task on the channel */ struct aws_future_channel_callback_job *job = aws_mem_calloc(alloc, 1, sizeof(struct aws_future_channel_callback_job)); job->alloc = alloc; aws_channel_task_init(&job->task, s_future_impl_channel_callback_task, job, "aws_future_channel_callback"); job->channel = callback->u.channel; job->callback = callback->fn; job->user_data = callback->user_data; aws_channel_schedule_task_now(callback->u.channel, &job->task); } break; } } static void s_future_impl_set_done(struct aws_future_impl *future, void *src_address, int error_code) { bool is_error = error_code != 0; /* BEGIN CRITICAL SECTION */ aws_mutex_lock(&future->lock); struct aws_future_callback_data callback = future->callback; bool first_time = !future->is_done; if (first_time) { future->is_done = true; AWS_ZERO_STRUCT(future->callback); if (is_error) { future->error_code = error_code; } else { future->owns_result = true; AWS_FATAL_ASSERT(src_address != NULL); memcpy(aws_future_impl_get_result_address(future), src_address, future->sizeof_result); } aws_condition_variable_notify_all(&future->wait_cvar); } aws_mutex_unlock(&future->lock); /* END CRITICAL SECTION */ if (first_time) { /* if callback was registered, invoke it now, outside of critical section to avoid deadlock */ if (callback.fn != NULL) { s_future_impl_invoke_callback(&callback, future->alloc); } } else if (!error_code) { /* future was already done, so just destroy this newer result */ s_future_impl_result_dtor(future, src_address); } } void aws_future_impl_set_error(struct aws_future_impl *future, int error_code) { AWS_ASSERT(future); /* handle recoverable usage error */ AWS_ASSERT(error_code != 0); if (AWS_UNLIKELY(error_code == 0)) { error_code = AWS_ERROR_UNKNOWN; } s_future_impl_set_done(future, NULL /*src_address*/, error_code); } void aws_future_impl_set_result_by_move(struct aws_future_impl *future, void *src_address) { AWS_ASSERT(future); AWS_ASSERT(src_address); s_future_impl_set_done(future, src_address, 0 /*error_code*/); /* the future takes ownership of the result. * zero out memory at the src_address to reinforce this transfer of ownership. */ memset(src_address, 0, future->sizeof_result); } /* Returns true if callback was registered, or false if callback was ignored * because the the future is already done and invoke_if_already_done==false */ static bool s_future_impl_register_callback( struct aws_future_impl *future, struct aws_future_callback_data *callback, bool invoke_if_already_done) { /* BEGIN CRITICAL SECTION */ aws_mutex_lock(&future->lock); AWS_FATAL_ASSERT(future->callback.fn == NULL && "Future done callback must only be set once"); bool already_done = future->is_done != 0; /* if not done, store callback for later */ if (!already_done) { future->callback = *callback; } aws_mutex_unlock(&future->lock); /* END CRITICAL SECTION */ /* if already done, invoke callback now */ if (already_done && invoke_if_already_done) { s_future_impl_invoke_callback(callback, future->alloc); } return !already_done || invoke_if_already_done; } void aws_future_impl_register_callback( struct aws_future_impl *future, aws_future_callback_fn *on_done, void *user_data) { AWS_ASSERT(future); AWS_ASSERT(on_done); struct aws_future_callback_data callback = { .fn = on_done, .user_data = user_data, .type = AWS_FUTURE_IMMEDIATE_CALLBACK, }; s_future_impl_register_callback(future, &callback, true /*invoke_if_already_done*/); } bool aws_future_impl_register_callback_if_not_done( struct aws_future_impl *future, aws_future_callback_fn *on_done, void *user_data) { AWS_ASSERT(future); AWS_ASSERT(on_done); struct aws_future_callback_data callback = { .fn = on_done, .user_data = user_data, .type = AWS_FUTURE_IMMEDIATE_CALLBACK, }; return s_future_impl_register_callback(future, &callback, false /*invoke_if_already_done*/); } void aws_future_impl_register_event_loop_callback( struct aws_future_impl *future, struct aws_event_loop *event_loop, aws_future_callback_fn *on_done, void *user_data) { AWS_ASSERT(future); AWS_ASSERT(event_loop); AWS_ASSERT(on_done); // TODO: aws_event_loop_acquire(event_loop); struct aws_future_callback_data callback = { .fn = on_done, .user_data = user_data, .type = AWS_FUTURE_EVENT_LOOP_CALLBACK, .u = {.event_loop = event_loop}, }; s_future_impl_register_callback(future, &callback, true /*invoke_if_already_done*/); } void aws_future_impl_register_channel_callback( struct aws_future_impl *future, struct aws_channel *channel, aws_future_callback_fn *on_done, void *user_data) { AWS_ASSERT(future); AWS_ASSERT(channel); AWS_ASSERT(on_done); aws_channel_acquire_hold(channel); struct aws_future_callback_data callback = { .fn = on_done, .user_data = user_data, .type = AWS_FUTURE_CHANNEL_CALLBACK, .u = {.channel = channel}, }; s_future_impl_register_callback(future, &callback, true /*invoke_if_already_done*/); } static bool s_future_impl_is_done_pred(void *user_data) { struct aws_future_impl *future = user_data; return future->is_done != 0; } bool aws_future_impl_wait(const struct aws_future_impl *future, uint64_t timeout_ns) { AWS_ASSERT(future); /* this function is conceptually const, but we need to use synchronization primitives */ struct aws_future_impl *mutable_future = (struct aws_future_impl *)future; /* BEGIN CRITICAL SECTION */ aws_mutex_lock(&mutable_future->lock); bool is_done = aws_condition_variable_wait_for_pred( &mutable_future->wait_cvar, &mutable_future->lock, (int64_t)timeout_ns, s_future_impl_is_done_pred, mutable_future) == AWS_OP_SUCCESS; aws_mutex_unlock(&mutable_future->lock); /* END CRITICAL SECTION */ return is_done; } // AWS_FUTURE_T_BY_VALUE_IMPLEMENTATION(aws_future_bool, bool) struct aws_future_bool *aws_future_bool_new(struct aws_allocator *alloc) { return (struct aws_future_bool *)aws_future_impl_new_by_value(alloc, sizeof(_Bool)); } void aws_future_bool_set_result(struct aws_future_bool *future, _Bool result) { aws_future_impl_set_result_by_move((struct aws_future_impl *)future, &result); } _Bool aws_future_bool_get_result(const struct aws_future_bool *future) { return *(_Bool *)aws_future_impl_get_result_address((const struct aws_future_impl *)future); } struct aws_future_bool *aws_future_bool_acquire(struct aws_future_bool *future) { return (struct aws_future_bool *)aws_future_impl_acquire((struct aws_future_impl *)future); } struct aws_future_bool *aws_future_bool_release(struct aws_future_bool *future) { return (struct aws_future_bool *)aws_future_impl_release((struct aws_future_impl *)future); } void aws_future_bool_set_error(struct aws_future_bool *future, int error_code) { aws_future_impl_set_error((struct aws_future_impl *)future, error_code); } _Bool aws_future_bool_is_done(const struct aws_future_bool *future) { return aws_future_impl_is_done((const struct aws_future_impl *)future); } int aws_future_bool_get_error(const struct aws_future_bool *future) { return aws_future_impl_get_error((const struct aws_future_impl *)future); } void aws_future_bool_register_callback( struct aws_future_bool *future, aws_future_callback_fn *on_done, void *user_data) { aws_future_impl_register_callback((struct aws_future_impl *)future, on_done, user_data); } _Bool aws_future_bool_register_callback_if_not_done( struct aws_future_bool *future, aws_future_callback_fn *on_done, void *user_data) { return aws_future_impl_register_callback_if_not_done((struct aws_future_impl *)future, on_done, user_data); } void aws_future_bool_register_event_loop_callback( struct aws_future_bool *future, struct aws_event_loop *event_loop, aws_future_callback_fn *on_done, void *user_data) { aws_future_impl_register_event_loop_callback((struct aws_future_impl *)future, event_loop, on_done, user_data); } void aws_future_bool_register_channel_callback( struct aws_future_bool *future, struct aws_channel *channel, aws_future_callback_fn *on_done, void *user_data) { aws_future_impl_register_channel_callback((struct aws_future_impl *)future, channel, on_done, user_data); } _Bool aws_future_bool_wait(struct aws_future_bool *future, uint64_t timeout_ns) { return aws_future_impl_wait((struct aws_future_impl *)future, timeout_ns); } AWS_FUTURE_T_BY_VALUE_IMPLEMENTATION(aws_future_size, size_t) /** * aws_future */ AWS_FUTURE_T_IMPLEMENTATION_BEGIN(aws_future_void) struct aws_future_void *aws_future_void_new(struct aws_allocator *alloc) { /* Use aws_future under the hood, to avoid edge-cases with 0-sized result */ return (struct aws_future_void *)aws_future_bool_new(alloc); } void aws_future_void_set_result(struct aws_future_void *future) { aws_future_bool_set_result((struct aws_future_bool *)future, false); } AWS_FUTURE_T_IMPLEMENTATION_END(aws_future_void) aws-crt-python-0.20.4+dfsg/crt/aws-c-io/source/host_resolver.c000066400000000000000000001701241456575232400242060ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include #include #include #include const uint64_t NS_PER_SEC = 1000000000; const size_t AWS_DEFAULT_DNS_TTL = 30; int aws_host_address_copy(const struct aws_host_address *from, struct aws_host_address *to) { to->allocator = from->allocator; to->address = aws_string_new_from_string(to->allocator, from->address); to->host = aws_string_new_from_string(to->allocator, from->host); to->record_type = from->record_type; to->use_count = from->use_count; to->connection_failure_count = from->connection_failure_count; to->expiry = from->expiry; to->weight = from->weight; return AWS_OP_SUCCESS; } void aws_host_address_move(struct aws_host_address *from, struct aws_host_address *to) { to->allocator = from->allocator; to->address = from->address; to->host = from->host; to->record_type = from->record_type; to->use_count = from->use_count; to->connection_failure_count = from->connection_failure_count; to->expiry = from->expiry; to->weight = from->weight; AWS_ZERO_STRUCT(*from); } void aws_host_address_clean_up(struct aws_host_address *address) { if (address->address) { aws_string_destroy((void *)address->address); } if (address->host) { aws_string_destroy((void *)address->host); } AWS_ZERO_STRUCT(*address); } int aws_host_resolver_resolve_host( struct aws_host_resolver *resolver, const struct aws_string *host_name, aws_on_host_resolved_result_fn *res, const struct aws_host_resolution_config *config, void *user_data) { AWS_ASSERT(resolver->vtable && resolver->vtable->resolve_host); return resolver->vtable->resolve_host(resolver, host_name, res, config, user_data); } int aws_host_resolver_purge_cache(struct aws_host_resolver *resolver) { AWS_ASSERT(resolver->vtable && resolver->vtable->purge_cache); return resolver->vtable->purge_cache(resolver); } int aws_host_resolver_purge_cache_with_callback( struct aws_host_resolver *resolver, aws_simple_completion_callback *on_purge_cache_complete_callback, void *user_data) { AWS_PRECONDITION(resolver); AWS_PRECONDITION(resolver->vtable); if (!resolver->vtable->purge_cache_with_callback) { AWS_LOGF_ERROR(AWS_LS_IO_DNS, "purge_cache_with_callback function is not supported"); return aws_raise_error(AWS_ERROR_UNSUPPORTED_OPERATION); } return resolver->vtable->purge_cache_with_callback(resolver, on_purge_cache_complete_callback, user_data); } int aws_host_resolver_purge_host_cache( struct aws_host_resolver *resolver, const struct aws_host_resolver_purge_host_options *options) { AWS_PRECONDITION(resolver); AWS_PRECONDITION(resolver->vtable); if (!resolver->vtable->purge_host_cache) { AWS_LOGF_ERROR(AWS_LS_IO_DNS, "purge_host_cache function is not supported"); return aws_raise_error(AWS_ERROR_UNSUPPORTED_OPERATION); } return resolver->vtable->purge_host_cache(resolver, options); } int aws_host_resolver_record_connection_failure( struct aws_host_resolver *resolver, const struct aws_host_address *address) { AWS_ASSERT(resolver->vtable && resolver->vtable->record_connection_failure); return resolver->vtable->record_connection_failure(resolver, address); } /* * Used by both the resolver for its lifetime state as well as individual host entries for theirs. */ enum default_resolver_state { DRS_ACTIVE, DRS_SHUTTING_DOWN, }; struct default_host_resolver { struct aws_allocator *allocator; /* * Mutually exclusion for the whole resolver, includes all member data and all host_entry_table operations. Once * an entry is retrieved, this lock MAY be dropped but certain logic may hold both the resolver and the entry lock. * The two locks must be taken in that order. */ struct aws_mutex resolver_lock; /* host_name (aws_string*) -> host_entry* */ struct aws_hash_table host_entry_table; /* Hash table of listener entries per host name. We keep this decoupled from the host entry table to allow for * listeners to be added/removed regardless of whether or not a corresponding host entry exists. * * Any time the listener list in the listener entry becomes empty, we remove the entry from the table. This * includes when a resolver thread moves all of the available listeners to its local list. */ /* host_name (aws_string*) -> host_listener_entry* */ struct aws_hash_table listener_entry_table; enum default_resolver_state state; /* * Tracks the number of launched resolution threads that have not yet invoked their shutdown completion * callback. */ uint32_t pending_host_entry_shutdown_completion_callbacks; /* * Function to use to query current time. Overridable in construction options. */ aws_io_clock_fn *system_clock_fn; struct aws_event_loop_group *event_loop_group; }; struct host_entry { /* immutable post-creation */ struct aws_allocator *allocator; struct aws_host_resolver *resolver; struct aws_thread resolver_thread; const struct aws_string *host_name; int64_t resolve_frequency_ns; struct aws_host_resolution_config resolution_config; /* synchronized data and its lock */ struct aws_mutex entry_lock; struct aws_condition_variable entry_signal; struct aws_cache *aaaa_records; struct aws_cache *a_records; struct aws_cache *failed_connection_aaaa_records; struct aws_cache *failed_connection_a_records; struct aws_linked_list pending_resolution_callbacks; uint32_t resolves_since_last_request; uint64_t last_resolve_request_timestamp_ns; enum default_resolver_state state; struct aws_array_list new_addresses; struct aws_array_list expired_addresses; aws_simple_completion_callback *on_host_purge_complete; void *on_host_purge_complete_user_data; }; /* * A host entry's caches hold things of this type. By using this and not the host_address directly, our * on_remove callbacks for the cache have access to the host_entry. We wouldn't need to do this if those * callbacks supported user data injection, but they don't and too many internal code bases already depend * on the public API. */ struct aws_host_address_cache_entry { struct aws_host_address address; struct host_entry *entry; }; int aws_host_address_cache_entry_copy( const struct aws_host_address_cache_entry *from, struct aws_host_address_cache_entry *to) { if (aws_host_address_copy(&from->address, &to->address)) { return AWS_OP_ERR; } to->entry = from->entry; return AWS_OP_SUCCESS; } static void s_shutdown_host_entry(struct host_entry *entry) { aws_mutex_lock(&entry->entry_lock); entry->state = DRS_SHUTTING_DOWN; /* * intentionally signal under the lock; we can't guarantee the resolver * is still around once the lock is released. */ aws_condition_variable_notify_all(&entry->entry_signal); aws_mutex_unlock(&entry->entry_lock); } struct host_purge_callback_options { struct aws_allocator *allocator; struct aws_ref_count ref_count; aws_simple_completion_callback *on_purge_cache_complete_callback; void *user_data; }; static void s_host_purge_callback_options_destroy(void *user_data) { struct host_purge_callback_options *options = user_data; options->on_purge_cache_complete_callback(options->user_data); aws_mem_release(options->allocator, options); } static struct host_purge_callback_options *s_host_purge_callback_options_new( struct aws_allocator *allocator, aws_simple_completion_callback *on_purge_cache_complete_callback, void *user_data) { struct host_purge_callback_options *purge_callback_options = aws_mem_calloc(allocator, 1, sizeof(struct host_purge_callback_options)); purge_callback_options->allocator = allocator; aws_ref_count_init( &purge_callback_options->ref_count, purge_callback_options, s_host_purge_callback_options_destroy); purge_callback_options->on_purge_cache_complete_callback = on_purge_cache_complete_callback; purge_callback_options->user_data = user_data; return purge_callback_options; } static void s_purge_cache_callback(void *user_data) { struct host_purge_callback_options *purge_callback_options = user_data; aws_ref_count_release(&purge_callback_options->ref_count); } /* * resolver lock must be held before calling this function */ static void s_clear_default_resolver_entry_table_synced(struct default_host_resolver *resolver) { struct aws_hash_table *table = &resolver->host_entry_table; for (struct aws_hash_iter iter = aws_hash_iter_begin(table); !aws_hash_iter_done(&iter); aws_hash_iter_next(&iter)) { struct host_entry *entry = iter.element.value; s_shutdown_host_entry(entry); } aws_hash_table_clear(table); } static int s_resolver_purge_cache(struct aws_host_resolver *resolver) { struct default_host_resolver *default_host_resolver = resolver->impl; aws_mutex_lock(&default_host_resolver->resolver_lock); s_clear_default_resolver_entry_table_synced(default_host_resolver); aws_mutex_unlock(&default_host_resolver->resolver_lock); return AWS_OP_SUCCESS; } static void s_purge_host_cache_callback_task(struct aws_task *task, void *arg, enum aws_task_status status) { (void)status; struct host_purge_callback_options *options = arg; aws_mem_release(options->allocator, task); aws_ref_count_release(&options->ref_count); } static void s_sechdule_purge_cache_callback_async( struct default_host_resolver *default_host_resolver, struct host_purge_callback_options *purge_callback_options) { struct aws_task *task = aws_mem_calloc(default_host_resolver->allocator, 1, sizeof(struct aws_task)); aws_task_init(task, s_purge_host_cache_callback_task, purge_callback_options, "async_purge_host_callback_task"); struct aws_event_loop *loop = aws_event_loop_group_get_next_loop(default_host_resolver->event_loop_group); AWS_FATAL_ASSERT(loop != NULL); aws_event_loop_schedule_task_now(loop, task); } static int s_resolver_purge_cache_with_callback( struct aws_host_resolver *resolver, aws_simple_completion_callback *on_purge_cache_complete_callback, void *user_data) { if (!on_purge_cache_complete_callback) { return s_resolver_purge_cache(resolver); } struct default_host_resolver *default_host_resolver = resolver->impl; aws_mutex_lock(&default_host_resolver->resolver_lock); struct aws_hash_table *table = &default_host_resolver->host_entry_table; struct host_purge_callback_options *purge_callback_options = s_host_purge_callback_options_new( default_host_resolver->allocator, on_purge_cache_complete_callback, user_data); /* purge all cache */ for (struct aws_hash_iter iter = aws_hash_iter_begin(table); !aws_hash_iter_done(&iter); aws_hash_iter_next(&iter)) { struct host_entry *entry = iter.element.value; /* acquire a refernce to wait for the callback to trigger */ aws_ref_count_acquire(&purge_callback_options->ref_count); aws_mutex_lock(&entry->entry_lock); entry->on_host_purge_complete = s_purge_cache_callback; entry->on_host_purge_complete_user_data = purge_callback_options; entry->state = DRS_SHUTTING_DOWN; aws_mutex_unlock(&entry->entry_lock); } aws_hash_table_clear(table); aws_mutex_unlock(&default_host_resolver->resolver_lock); /* release the original reference async */ s_sechdule_purge_cache_callback_async(default_host_resolver, purge_callback_options); return AWS_OP_SUCCESS; } static void s_cleanup_default_resolver(struct aws_host_resolver *resolver) { struct default_host_resolver *default_host_resolver = resolver->impl; aws_event_loop_group_release(default_host_resolver->event_loop_group); aws_hash_table_clean_up(&default_host_resolver->host_entry_table); aws_hash_table_clean_up(&default_host_resolver->listener_entry_table); aws_mutex_clean_up(&default_host_resolver->resolver_lock); aws_simple_completion_callback *shutdown_callback = resolver->shutdown_options.shutdown_callback_fn; void *shutdown_completion_user_data = resolver->shutdown_options.shutdown_callback_user_data; aws_mem_release(resolver->allocator, resolver); /* invoke shutdown completion finally */ if (shutdown_callback != NULL) { shutdown_callback(shutdown_completion_user_data); } } static void resolver_destroy(struct aws_host_resolver *resolver) { struct default_host_resolver *default_host_resolver = resolver->impl; bool cleanup_resolver = false; aws_mutex_lock(&default_host_resolver->resolver_lock); AWS_FATAL_ASSERT(default_host_resolver->state == DRS_ACTIVE); s_clear_default_resolver_entry_table_synced(default_host_resolver); default_host_resolver->state = DRS_SHUTTING_DOWN; if (default_host_resolver->pending_host_entry_shutdown_completion_callbacks == 0) { cleanup_resolver = true; } aws_mutex_unlock(&default_host_resolver->resolver_lock); if (cleanup_resolver) { s_cleanup_default_resolver(resolver); } } struct pending_callback { aws_on_host_resolved_result_fn *callback; void *user_data; struct aws_linked_list_node node; }; static void s_clear_address_list(struct aws_array_list *address_list) { for (size_t i = 0; i < aws_array_list_length(address_list); ++i) { struct aws_host_address *address = NULL; aws_array_list_get_at_ptr(address_list, (void **)&address, i); aws_host_address_clean_up(address); } aws_array_list_clear(address_list); } static void s_clean_up_host_entry(struct host_entry *entry) { if (entry == NULL) { return; } /* * This can happen if the resolver's final reference drops while an unanswered query is pending on an entry. * * You could add an assertion that the resolver is in the shut down state if this condition hits but that * requires additional locking just to make the assert. */ if (!aws_linked_list_empty(&entry->pending_resolution_callbacks)) { aws_raise_error(AWS_IO_DNS_HOST_REMOVED_FROM_CACHE); } while (!aws_linked_list_empty(&entry->pending_resolution_callbacks)) { struct aws_linked_list_node *resolution_callback_node = aws_linked_list_pop_front(&entry->pending_resolution_callbacks); struct pending_callback *pending_callback = AWS_CONTAINER_OF(resolution_callback_node, struct pending_callback, node); pending_callback->callback( entry->resolver, entry->host_name, AWS_IO_DNS_HOST_REMOVED_FROM_CACHE, NULL, pending_callback->user_data); aws_mem_release(entry->allocator, pending_callback); } aws_cache_destroy(entry->aaaa_records); aws_cache_destroy(entry->a_records); aws_cache_destroy(entry->failed_connection_a_records); aws_cache_destroy(entry->failed_connection_aaaa_records); aws_string_destroy((void *)entry->host_name); s_clear_address_list(&entry->new_addresses); aws_array_list_clean_up(&entry->new_addresses); s_clear_address_list(&entry->expired_addresses); aws_array_list_clean_up(&entry->expired_addresses); aws_mem_release(entry->allocator, entry); } static void s_on_host_entry_shutdown_completion(void *user_data) { struct host_entry *entry = user_data; struct aws_host_resolver *resolver = entry->resolver; struct default_host_resolver *default_host_resolver = resolver->impl; s_clean_up_host_entry(entry); bool cleanup_resolver = false; aws_mutex_lock(&default_host_resolver->resolver_lock); --default_host_resolver->pending_host_entry_shutdown_completion_callbacks; if (default_host_resolver->state == DRS_SHUTTING_DOWN && default_host_resolver->pending_host_entry_shutdown_completion_callbacks == 0) { cleanup_resolver = true; } aws_mutex_unlock(&default_host_resolver->resolver_lock); if (cleanup_resolver) { s_cleanup_default_resolver(resolver); } } static int s_copy_address_into_array_list(struct aws_host_address *address, struct aws_array_list *address_list) { /* * This is the worst. * * We have to copy the cache address while we still have a write lock. Otherwise, connection failures * can sneak in and destroy our address by moving the address to/from the various lru caches. * * But there's no nice copy construction into an array list, so we get to * (1) Push a zeroed dummy element onto the array list * (2) Get its pointer * (3) Call aws_host_address_copy onto it. If that fails, pop the dummy element. */ struct aws_host_address dummy; AWS_ZERO_STRUCT(dummy); if (aws_array_list_push_back(address_list, &dummy)) { return AWS_OP_ERR; } struct aws_host_address *dest_copy = NULL; aws_array_list_get_at_ptr(address_list, (void **)&dest_copy, aws_array_list_length(address_list) - 1); AWS_FATAL_ASSERT(dest_copy != NULL); if (aws_host_address_copy(address, dest_copy)) { aws_array_list_pop_back(address_list); return AWS_OP_ERR; } return AWS_OP_SUCCESS; } static uint64_t s_get_system_time_for_default_resolver(struct aws_host_resolver *resolver) { struct default_host_resolver *default_resolver = resolver->impl; uint64_t timestamp = 0; (*default_resolver->system_clock_fn)(×tamp); return timestamp; } /* this only ever gets called after resolution has already run. We expect that the entry's lock has been acquired for writing before this function is called and released afterwards. */ static inline void process_records( struct host_entry *host_entry, struct aws_cache *records, struct aws_cache *failed_records) { struct aws_host_resolver *resolver = host_entry->resolver; uint64_t timestamp = s_get_system_time_for_default_resolver(resolver); size_t record_count = aws_cache_get_element_count(records); size_t expired_records = 0; /* since this only ever gets called after resolution has already run, we're in a dns outage * if everything is expired. Leave an element so we can keep trying. */ for (size_t index = 0; index < record_count && expired_records < record_count - 1; ++index) { struct aws_host_address_cache_entry *lru_element_entry = aws_lru_cache_use_lru_element(records); if (lru_element_entry->address.expiry < timestamp) { AWS_LOGF_DEBUG( AWS_LS_IO_DNS, "static: purging expired record %s for %s", lru_element_entry->address.address->bytes, lru_element_entry->address.host->bytes); expired_records++; aws_cache_remove(records, lru_element_entry->address.address); } } record_count = aws_cache_get_element_count(records); AWS_LOGF_TRACE(AWS_LS_IO_DNS, "static: remaining record count for host %d", (int)record_count); /* if we don't have any known good addresses, take the least recently used, but not expired address with a history * of spotty behavior and upgrade it for reuse. If it's expired, leave it and let the resolve fail. Better to fail * than accidentally give a kids' app an IP address to somebody's adult website when the IP address gets rebound to * a different endpoint. The moral of the story here is to not disable SSL verification! */ if (!record_count) { size_t failed_count = aws_cache_get_element_count(failed_records); for (size_t index = 0; index < failed_count; ++index) { struct aws_host_address_cache_entry *lru_element_entry = aws_lru_cache_use_lru_element(failed_records); if (timestamp >= lru_element_entry->address.expiry) { continue; } struct aws_host_address_cache_entry *to_add = aws_mem_calloc(host_entry->allocator, 1, sizeof(struct aws_host_address_cache_entry)); if (to_add == NULL) { continue; } if (aws_host_address_cache_entry_copy(lru_element_entry, to_add) || aws_cache_put(records, to_add->address.address, to_add)) { aws_host_address_clean_up(&to_add->address); aws_mem_release(host_entry->allocator, to_add); continue; } /* * Promoting an address from failed to good should trigger the new address callback */ s_copy_address_into_array_list(&lru_element_entry->address, &host_entry->new_addresses); AWS_LOGF_INFO( AWS_LS_IO_DNS, "static: promoting spotty record %s for %s back to good list", lru_element_entry->address.address->bytes, lru_element_entry->address.host->bytes); aws_cache_remove(failed_records, lru_element_entry->address.address); /* we only want to promote one per process run.*/ break; } } } static int s_resolver_purge_host_cache( struct aws_host_resolver *resolver, const struct aws_host_resolver_purge_host_options *options) { AWS_PRECONDITION(resolver); if (options == NULL) { AWS_LOGF_ERROR(AWS_LS_IO_DNS, "Cannot purge host cache; options structure is NULL."); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } struct default_host_resolver *default_host_resolver = resolver->impl; AWS_LOGF_INFO(AWS_LS_IO_DNS, "id=%p: purging record for %s", (void *)resolver, options->host->bytes); aws_mutex_lock(&default_host_resolver->resolver_lock); struct aws_hash_element *element = NULL; aws_hash_table_find(&default_host_resolver->host_entry_table, options->host, &element); /* Success if entry doesn't exist in cache. */ if (element == NULL) { aws_mutex_unlock(&default_host_resolver->resolver_lock); if (options->on_host_purge_complete_callback != NULL) { /* Schedule completion callback asynchronouly */ struct host_purge_callback_options *purge_callback_options = s_host_purge_callback_options_new( default_host_resolver->allocator, options->on_host_purge_complete_callback, options->user_data); s_sechdule_purge_cache_callback_async(default_host_resolver, purge_callback_options); } return AWS_OP_SUCCESS; } struct host_entry *host_entry = element->value; AWS_FATAL_ASSERT(host_entry); /* Setup the on_host_purge_complete callback. */ aws_mutex_lock(&host_entry->entry_lock); AWS_FATAL_ASSERT(!host_entry->on_host_purge_complete); AWS_FATAL_ASSERT(!host_entry->on_host_purge_complete_user_data); host_entry->on_host_purge_complete = options->on_host_purge_complete_callback; host_entry->on_host_purge_complete_user_data = options->user_data; aws_mutex_unlock(&host_entry->entry_lock); s_shutdown_host_entry(host_entry); aws_hash_table_remove_element(&default_host_resolver->host_entry_table, element); aws_mutex_unlock(&default_host_resolver->resolver_lock); return AWS_OP_SUCCESS; } static int resolver_record_connection_failure( struct aws_host_resolver *resolver, const struct aws_host_address *address) { struct default_host_resolver *default_host_resolver = resolver->impl; AWS_LOGF_INFO( AWS_LS_IO_DNS, "id=%p: recording failure for record %s for %s, moving to bad list", (void *)resolver, address->address->bytes, address->host->bytes); aws_mutex_lock(&default_host_resolver->resolver_lock); struct aws_hash_element *element = NULL; if (aws_hash_table_find(&default_host_resolver->host_entry_table, address->host, &element)) { aws_mutex_unlock(&default_host_resolver->resolver_lock); return AWS_OP_ERR; } struct host_entry *host_entry = NULL; if (element != NULL) { host_entry = element->value; AWS_FATAL_ASSERT(host_entry); } if (host_entry) { struct aws_host_address_cache_entry *cached_address_entry = NULL; aws_mutex_lock(&host_entry->entry_lock); aws_mutex_unlock(&default_host_resolver->resolver_lock); struct aws_cache *address_table = address->record_type == AWS_ADDRESS_RECORD_TYPE_AAAA ? host_entry->aaaa_records : host_entry->a_records; struct aws_cache *failed_table = address->record_type == AWS_ADDRESS_RECORD_TYPE_AAAA ? host_entry->failed_connection_aaaa_records : host_entry->failed_connection_a_records; aws_cache_find(address_table, address->address, (void **)&cached_address_entry); struct aws_host_address_cache_entry *address_entry_copy = NULL; if (cached_address_entry) { address_entry_copy = aws_mem_calloc(resolver->allocator, 1, sizeof(struct aws_host_address_cache_entry)); if (!address_entry_copy || aws_host_address_cache_entry_copy(cached_address_entry, address_entry_copy)) { goto error_host_entry_cleanup; } /* * This will trigger an expiration callback since the good caches add the removed address to the * host_entry's expired list, via the cache's on_delete callback */ if (aws_cache_remove(address_table, cached_address_entry->address.address)) { goto error_host_entry_cleanup; } address_entry_copy->address.connection_failure_count += 1; if (aws_cache_put(failed_table, address_entry_copy->address.address, address_entry_copy)) { goto error_host_entry_cleanup; } } else { if (aws_cache_find(failed_table, address->address, (void **)&cached_address_entry)) { goto error_host_entry_cleanup; } if (cached_address_entry) { cached_address_entry->address.connection_failure_count += 1; } } aws_mutex_unlock(&host_entry->entry_lock); return AWS_OP_SUCCESS; error_host_entry_cleanup: if (address_entry_copy) { aws_host_address_clean_up(&address_entry_copy->address); aws_mem_release(resolver->allocator, address_entry_copy); } aws_mutex_unlock(&host_entry->entry_lock); return AWS_OP_ERR; } aws_mutex_unlock(&default_host_resolver->resolver_lock); return AWS_OP_SUCCESS; } /* * A bunch of convenience functions for the host resolver background thread function */ static struct aws_host_address_cache_entry *s_find_cached_address_entry_aux( struct aws_cache *primary_records, struct aws_cache *fallback_records, const struct aws_string *address) { struct aws_host_address_cache_entry *found = NULL; aws_cache_find(primary_records, address, (void **)&found); if (found == NULL) { aws_cache_find(fallback_records, address, (void **)&found); } return found; } /* * Looks in both the good and failed connection record sets for a given host record */ static struct aws_host_address_cache_entry *s_find_cached_address_entry( struct host_entry *entry, const struct aws_string *address, enum aws_address_record_type record_type) { switch (record_type) { case AWS_ADDRESS_RECORD_TYPE_AAAA: return s_find_cached_address_entry_aux(entry->aaaa_records, entry->failed_connection_aaaa_records, address); case AWS_ADDRESS_RECORD_TYPE_A: return s_find_cached_address_entry_aux(entry->a_records, entry->failed_connection_a_records, address); default: return NULL; } } static struct aws_host_address_cache_entry *s_get_lru_address_entry_aux( struct aws_cache *primary_records, struct aws_cache *fallback_records) { struct aws_host_address_cache_entry *address_entry = aws_lru_cache_use_lru_element(primary_records); if (address_entry == NULL) { aws_lru_cache_use_lru_element(fallback_records); } return address_entry; } /* * Looks in both the good and failed connection record sets for the LRU host record */ static struct aws_host_address_cache_entry *s_get_lru_address( struct host_entry *entry, enum aws_address_record_type record_type) { switch (record_type) { case AWS_ADDRESS_RECORD_TYPE_AAAA: return s_get_lru_address_entry_aux(entry->aaaa_records, entry->failed_connection_aaaa_records); case AWS_ADDRESS_RECORD_TYPE_A: return s_get_lru_address_entry_aux(entry->a_records, entry->failed_connection_a_records); default: return NULL; } } static void s_update_address_cache( struct host_entry *host_entry, struct aws_array_list *address_list, uint64_t new_expiration) { AWS_PRECONDITION(host_entry); AWS_PRECONDITION(address_list); for (size_t i = 0; i < aws_array_list_length(address_list); ++i) { struct aws_host_address *fresh_resolved_address = NULL; aws_array_list_get_at_ptr(address_list, (void **)&fresh_resolved_address, i); struct aws_host_address_cache_entry *address_to_cache_entry = s_find_cached_address_entry( host_entry, fresh_resolved_address->address, fresh_resolved_address->record_type); if (address_to_cache_entry) { address_to_cache_entry->address.expiry = new_expiration; AWS_LOGF_TRACE( AWS_LS_IO_DNS, "static: updating expiry for %s for host %s to %llu", address_to_cache_entry->address.address->bytes, host_entry->host_name->bytes, (unsigned long long)new_expiration); } else { address_to_cache_entry = aws_mem_calloc(host_entry->allocator, 1, sizeof(struct aws_host_address_cache_entry)); aws_host_address_move(fresh_resolved_address, &address_to_cache_entry->address); address_to_cache_entry->address.expiry = new_expiration; address_to_cache_entry->entry = host_entry; struct aws_cache *address_table = address_to_cache_entry->address.record_type == AWS_ADDRESS_RECORD_TYPE_AAAA ? host_entry->aaaa_records : host_entry->a_records; if (aws_cache_put(address_table, address_to_cache_entry->address.address, address_to_cache_entry)) { AWS_LOGF_ERROR( AWS_LS_IO_DNS, "static: could not add new address to host entry cache for host '%s' in " "s_update_address_cache.", host_entry->host_name->bytes); continue; } AWS_LOGF_DEBUG( AWS_LS_IO_DNS, "static: new address resolved %s for host %s caching", address_to_cache_entry->address.address->bytes, host_entry->host_name->bytes); struct aws_host_address new_address_copy; if (aws_host_address_copy(&address_to_cache_entry->address, &new_address_copy)) { AWS_LOGF_ERROR( AWS_LS_IO_DNS, "static: could not copy address for new-address list for host '%s' in s_update_address_cache.", host_entry->host_name->bytes); continue; } if (aws_array_list_push_back(&host_entry->new_addresses, &new_address_copy)) { aws_host_address_clean_up(&new_address_copy); AWS_LOGF_ERROR( AWS_LS_IO_DNS, "static: could not push address to new-address list for host '%s' in s_update_address_cache.", host_entry->host_name->bytes); continue; } } } } static void s_copy_address_into_callback_set( struct aws_host_address_cache_entry *entry, struct aws_array_list *callback_addresses, const struct aws_string *host_name) { if (entry != NULL) { if (s_copy_address_into_array_list(&entry->address, callback_addresses)) { AWS_LOGF_ERROR( AWS_LS_IO_DNS, "static: failed to vend address %s for host %s to caller", entry->address.address->bytes, host_name->bytes); return; } entry->address.use_count += 1; AWS_LOGF_TRACE( AWS_LS_IO_DNS, "static: vending address %s for host %s to caller", entry->address.address->bytes, host_name->bytes); } } static bool s_host_entry_finished_pred(void *user_data) { struct host_entry *entry = user_data; return entry->state == DRS_SHUTTING_DOWN; } static bool s_host_entry_finished_or_pending_request_pred(void *user_data) { struct host_entry *entry = user_data; return entry->state == DRS_SHUTTING_DOWN || !aws_linked_list_empty(&entry->pending_resolution_callbacks); } static const uint64_t AWS_MINIMUM_WAIT_BETWEEN_DNS_QUERIES_NS = 100000000; /* 100 ms */ static void aws_host_resolver_thread(void *arg) { struct host_entry *host_entry = arg; uint64_t max_no_solicitation_interval = aws_timestamp_convert( aws_max_u64(1, host_entry->resolution_config.max_ttl), AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL); uint64_t wait_between_resolves_interval = aws_min_u64(max_no_solicitation_interval, host_entry->resolve_frequency_ns); uint64_t shutdown_only_wait_time = AWS_MINIMUM_WAIT_BETWEEN_DNS_QUERIES_NS; uint64_t request_interruptible_wait_time = 0; if (wait_between_resolves_interval > shutdown_only_wait_time) { request_interruptible_wait_time = wait_between_resolves_interval - shutdown_only_wait_time; } struct aws_linked_list listener_list; aws_linked_list_init(&listener_list); struct aws_linked_list listener_destroy_list; aws_linked_list_init(&listener_destroy_list); bool keep_going = true; struct aws_array_list address_list; AWS_ZERO_STRUCT(address_list); struct aws_array_list new_address_list; AWS_ZERO_STRUCT(new_address_list); struct aws_array_list expired_address_list; AWS_ZERO_STRUCT(expired_address_list); if (aws_array_list_init_dynamic(&address_list, host_entry->allocator, 4, sizeof(struct aws_host_address))) { goto done; } if (aws_array_list_init_dynamic(&new_address_list, host_entry->allocator, 4, sizeof(struct aws_host_address))) { goto done; } if (aws_array_list_init_dynamic(&expired_address_list, host_entry->allocator, 4, sizeof(struct aws_host_address))) { goto done; } while (keep_going) { /* resolve and then process each record */ int err_code = AWS_ERROR_SUCCESS; if (host_entry->resolution_config.impl( host_entry->allocator, host_entry->host_name, &address_list, host_entry->resolution_config.impl_data)) { err_code = aws_last_error(); } if (err_code == AWS_ERROR_SUCCESS) { AWS_LOGF_DEBUG( AWS_LS_IO_DNS, "static, resolving host %s successful, returned %d addresses", aws_string_c_str(host_entry->host_name), (int)aws_array_list_length(&address_list)); } else { AWS_LOGF_WARN( AWS_LS_IO_DNS, "static, resolving host %s failed, ec %d (%s)", aws_string_c_str(host_entry->host_name), err_code, aws_error_debug_str(err_code)); } uint64_t timestamp = s_get_system_time_for_default_resolver(host_entry->resolver); uint64_t new_expiry = timestamp + (host_entry->resolution_config.max_ttl * NS_PER_SEC); struct aws_linked_list pending_resolve_copy; aws_linked_list_init(&pending_resolve_copy); /* * Within the lock we * (1) Update the cache with the newly resolved addresses * (2) Process all held addresses looking for expired or promotable ones * (3) Prep for callback invocations */ aws_mutex_lock(&host_entry->entry_lock); if (!err_code) { s_update_address_cache(host_entry, &address_list, new_expiry); } /* * process and clean_up records in the entry. occasionally, failed connect records will be upgraded * for retry. */ process_records(host_entry, host_entry->aaaa_records, host_entry->failed_connection_aaaa_records); process_records(host_entry, host_entry->a_records, host_entry->failed_connection_a_records); aws_linked_list_swap_contents(&pending_resolve_copy, &host_entry->pending_resolution_callbacks); aws_mutex_unlock(&host_entry->entry_lock); /* * Clean up resolved addressed outside of the lock */ s_clear_address_list(&address_list); struct aws_host_address address_array[2]; AWS_ZERO_ARRAY(address_array); /* * Perform the actual subscriber notifications */ while (!aws_linked_list_empty(&pending_resolve_copy)) { struct aws_linked_list_node *resolution_callback_node = aws_linked_list_pop_front(&pending_resolve_copy); struct pending_callback *pending_callback = AWS_CONTAINER_OF(resolution_callback_node, struct pending_callback, node); struct aws_array_list callback_address_list; aws_array_list_init_static(&callback_address_list, address_array, 2, sizeof(struct aws_host_address)); aws_mutex_lock(&host_entry->entry_lock); s_copy_address_into_callback_set( s_get_lru_address(host_entry, AWS_ADDRESS_RECORD_TYPE_AAAA), &callback_address_list, host_entry->host_name); s_copy_address_into_callback_set( s_get_lru_address(host_entry, AWS_ADDRESS_RECORD_TYPE_A), &callback_address_list, host_entry->host_name); aws_mutex_unlock(&host_entry->entry_lock); size_t callback_address_list_size = aws_array_list_length(&callback_address_list); if (callback_address_list_size > 0) { AWS_LOGF_DEBUG( AWS_LS_IO_DNS, "static, invoking resolution callback for host %s with %d addresses", aws_string_c_str(host_entry->host_name), (int)callback_address_list_size); } else { AWS_LOGF_DEBUG( AWS_LS_IO_DNS, "static, invoking resolution callback for host %s with failure", aws_string_c_str(host_entry->host_name)); } if (callback_address_list_size > 0) { pending_callback->callback( host_entry->resolver, host_entry->host_name, AWS_OP_SUCCESS, &callback_address_list, pending_callback->user_data); } else { int error_code = (err_code != AWS_ERROR_SUCCESS) ? err_code : AWS_IO_DNS_QUERY_FAILED; pending_callback->callback( host_entry->resolver, host_entry->host_name, error_code, NULL, pending_callback->user_data); } s_clear_address_list(&callback_address_list); aws_mem_release(host_entry->allocator, pending_callback); } aws_mutex_lock(&host_entry->entry_lock); ++host_entry->resolves_since_last_request; /* * A long resolve frequency matched with a connection failure can induce a state of DNS starvation, where * additional resolution requests go into the queue but since there's no good records and the thread is sleeping * for a long time, nothing happens. * * While we could make the wait predicate also check the queue of requests, there is a worry that a * host that can't be resolved (user error, dns record removal, etc...) could lead to a "spammy" scenario * where the thread generates DNS requests extremely quickly, ie, the sleep becomes almost instant. * * We'd like to be able to express the wait here as something a bit more complex: * * "Wait until either (1) shutdown notice, or (2) a small amount of time has passed and there are pending * requests, or (3) the resolution interval has passed" * * While seemingly complicated, we can do this actually just by chaining two waits: * * (1) The first wait is for a short amount of time and only predicates on the shutdown notice * (2) The second wait is for the remaining frequency interval and predicates on either the shutdown notice * or a pending resolve request * * This leaves us with wait behavior where: * (1) Shutdown always fully interrupts and immediately causes the thread function to complete * (2) Absent shutdown, there is always a controllable, non-trivial sleep between resolves * (3) Starvation is avoided as pending requests can wake the resolver thread independent of resolution * frequency */ aws_condition_variable_wait_for_pred( &host_entry->entry_signal, &host_entry->entry_lock, shutdown_only_wait_time, s_host_entry_finished_pred, host_entry); if (request_interruptible_wait_time > 0) { aws_condition_variable_wait_for_pred( &host_entry->entry_signal, &host_entry->entry_lock, request_interruptible_wait_time, s_host_entry_finished_or_pending_request_pred, host_entry); } aws_mutex_unlock(&host_entry->entry_lock); /* * This is a bit awkward that we unlock the entry and then relock both the resolver and the entry, but it * is mandatory that -- in order to maintain the consistent view of the resolver table (entry exist => entry * is alive and can be queried) -- we have the resolver lock as well before making the decision to remove * the entry from the table and terminate the thread. */ struct default_host_resolver *resolver = host_entry->resolver->impl; aws_mutex_lock(&resolver->resolver_lock); aws_mutex_lock(&host_entry->entry_lock); uint64_t now = s_get_system_time_for_default_resolver(host_entry->resolver); /* * The only way we terminate the loop with pending queries is if the resolver itself has no more references * to it and is going away. In that case, the pending queries will be completed (with failure) by the * final clean up of this entry. */ if (aws_linked_list_empty(&host_entry->pending_resolution_callbacks) && host_entry->last_resolve_request_timestamp_ns + max_no_solicitation_interval < now) { host_entry->state = DRS_SHUTTING_DOWN; } keep_going = host_entry->state == DRS_ACTIVE; if (!keep_going) { aws_hash_table_remove(&resolver->host_entry_table, host_entry->host_name, NULL, NULL); } aws_array_list_swap_contents(&host_entry->new_addresses, &new_address_list); aws_array_list_swap_contents(&host_entry->expired_addresses, &expired_address_list); aws_mutex_unlock(&host_entry->entry_lock); aws_mutex_unlock(&resolver->resolver_lock); s_clear_address_list(&new_address_list); s_clear_address_list(&expired_address_list); } AWS_LOGF_DEBUG( AWS_LS_IO_DNS, "static: Either no requests have been made for an address for %s for the duration " "of the ttl, or this thread is being forcibly shutdown. Killing thread.", host_entry->host_name->bytes); done: AWS_FATAL_ASSERT(aws_array_list_length(&address_list) == 0); AWS_FATAL_ASSERT(aws_array_list_length(&new_address_list) == 0); AWS_FATAL_ASSERT(aws_array_list_length(&expired_address_list) == 0); aws_array_list_clean_up(&address_list); aws_array_list_clean_up(&new_address_list); aws_array_list_clean_up(&expired_address_list); /* trigger the purge complete callback */ if (host_entry->on_host_purge_complete != NULL) { host_entry->on_host_purge_complete(host_entry->on_host_purge_complete_user_data); } /* please don't fail */ aws_thread_current_at_exit(s_on_host_entry_shutdown_completion, host_entry); } static void on_cache_entry_removed_helper(struct aws_host_address_cache_entry *entry) { AWS_LOGF_DEBUG( AWS_LS_IO_DNS, "static: purging address %s for host %s from " "the cache due to cache eviction or shutdown", entry->address.address->bytes, entry->address.host->bytes); struct aws_allocator *allocator = entry->address.allocator; aws_host_address_clean_up(&entry->address); aws_mem_release(allocator, entry); } static void on_good_address_entry_removed(void *value) { struct aws_host_address_cache_entry *entry = value; if (entry == NULL) { return; } s_copy_address_into_array_list(&entry->address, &entry->entry->expired_addresses); on_cache_entry_removed_helper(entry); } static void on_failed_address_entry_removed(void *value) { struct aws_host_address_cache_entry *entry = value; on_cache_entry_removed_helper(entry); } /* * The resolver lock must be held before calling this function */ static inline int create_and_init_host_entry( struct aws_host_resolver *resolver, const struct aws_string *host_name, aws_on_host_resolved_result_fn *res, const struct aws_host_resolution_config *config, uint64_t timestamp, void *user_data) { struct host_entry *new_host_entry = aws_mem_calloc(resolver->allocator, 1, sizeof(struct host_entry)); if (!new_host_entry) { return AWS_OP_ERR; } new_host_entry->resolver = resolver; new_host_entry->allocator = resolver->allocator; new_host_entry->last_resolve_request_timestamp_ns = timestamp; new_host_entry->resolves_since_last_request = 0; new_host_entry->resolve_frequency_ns = (config->resolve_frequency_ns != 0) ? config->resolve_frequency_ns : NS_PER_SEC; new_host_entry->state = DRS_ACTIVE; bool thread_init = false; struct pending_callback *pending_callback = NULL; const struct aws_string *host_string_copy = aws_string_new_from_string(resolver->allocator, host_name); if (AWS_UNLIKELY(!host_string_copy)) { goto setup_host_entry_error; } new_host_entry->host_name = host_string_copy; new_host_entry->a_records = aws_cache_new_lru( new_host_entry->allocator, aws_hash_string, aws_hash_callback_string_eq, NULL, on_good_address_entry_removed, config->max_ttl); if (AWS_UNLIKELY(!new_host_entry->a_records)) { goto setup_host_entry_error; } new_host_entry->aaaa_records = aws_cache_new_lru( new_host_entry->allocator, aws_hash_string, aws_hash_callback_string_eq, NULL, on_good_address_entry_removed, config->max_ttl); if (AWS_UNLIKELY(!new_host_entry->aaaa_records)) { goto setup_host_entry_error; } new_host_entry->failed_connection_a_records = aws_cache_new_lru( new_host_entry->allocator, aws_hash_string, aws_hash_callback_string_eq, NULL, on_failed_address_entry_removed, config->max_ttl); if (AWS_UNLIKELY(!new_host_entry->failed_connection_a_records)) { goto setup_host_entry_error; } new_host_entry->failed_connection_aaaa_records = aws_cache_new_lru( new_host_entry->allocator, aws_hash_string, aws_hash_callback_string_eq, NULL, on_failed_address_entry_removed, config->max_ttl); if (AWS_UNLIKELY(!new_host_entry->failed_connection_aaaa_records)) { goto setup_host_entry_error; } if (aws_array_list_init_dynamic( &new_host_entry->new_addresses, new_host_entry->allocator, 4, sizeof(struct aws_host_address))) { goto setup_host_entry_error; } if (aws_array_list_init_dynamic( &new_host_entry->expired_addresses, new_host_entry->allocator, 4, sizeof(struct aws_host_address))) { goto setup_host_entry_error; } aws_linked_list_init(&new_host_entry->pending_resolution_callbacks); pending_callback = aws_mem_acquire(resolver->allocator, sizeof(struct pending_callback)); if (AWS_UNLIKELY(!pending_callback)) { goto setup_host_entry_error; } /*add the current callback here */ pending_callback->user_data = user_data; pending_callback->callback = res; aws_linked_list_push_back(&new_host_entry->pending_resolution_callbacks, &pending_callback->node); aws_mutex_init(&new_host_entry->entry_lock); new_host_entry->resolution_config = *config; aws_condition_variable_init(&new_host_entry->entry_signal); aws_thread_init(&new_host_entry->resolver_thread, resolver->allocator); thread_init = true; struct default_host_resolver *default_host_resolver = resolver->impl; if (AWS_UNLIKELY( aws_hash_table_put(&default_host_resolver->host_entry_table, host_string_copy, new_host_entry, NULL))) { goto setup_host_entry_error; } struct aws_thread_options thread_options = *aws_default_thread_options(); thread_options.join_strategy = AWS_TJS_MANAGED; thread_options.name = aws_byte_cursor_from_c_str("AwsHostResolver"); /* 15 characters is max for Linux */ if (aws_thread_launch( &new_host_entry->resolver_thread, aws_host_resolver_thread, new_host_entry, &thread_options)) { goto setup_host_entry_error; } ++default_host_resolver->pending_host_entry_shutdown_completion_callbacks; return AWS_OP_SUCCESS; setup_host_entry_error: if (thread_init) { aws_thread_clean_up(&new_host_entry->resolver_thread); } // If we registered a callback, clear it. So that we don’t trigger callback and return an error. if (!aws_linked_list_empty(&new_host_entry->pending_resolution_callbacks)) { aws_linked_list_remove(&pending_callback->node); } s_clean_up_host_entry(new_host_entry); return AWS_OP_ERR; } static int default_resolve_host( struct aws_host_resolver *resolver, const struct aws_string *host_name, aws_on_host_resolved_result_fn *res, const struct aws_host_resolution_config *config, void *user_data) { int result = AWS_OP_SUCCESS; AWS_LOGF_DEBUG(AWS_LS_IO_DNS, "id=%p: Host resolution requested for %s", (void *)resolver, host_name->bytes); uint64_t timestamp = s_get_system_time_for_default_resolver(resolver); struct default_host_resolver *default_host_resolver = resolver->impl; aws_mutex_lock(&default_host_resolver->resolver_lock); struct aws_hash_element *element = NULL; /* we don't care about the error code here, only that the host_entry was found or not. */ aws_hash_table_find(&default_host_resolver->host_entry_table, host_name, &element); struct host_entry *host_entry = NULL; if (element != NULL) { host_entry = element->value; AWS_FATAL_ASSERT(host_entry != NULL); } if (!host_entry) { AWS_LOGF_DEBUG( AWS_LS_IO_DNS, "id=%p: No cached entries found for %s starting new resolver thread.", (void *)resolver, host_name->bytes); result = create_and_init_host_entry(resolver, host_name, res, config, timestamp, user_data); aws_mutex_unlock(&default_host_resolver->resolver_lock); return result; } aws_mutex_lock(&host_entry->entry_lock); /* * We don't need to make any resolver side-affects in the remaining logic and it's impossible for the entry * to disappear underneath us while holding its lock, so its safe to release the resolver lock and let other * things query other entries. */ aws_mutex_unlock(&default_host_resolver->resolver_lock); host_entry->last_resolve_request_timestamp_ns = timestamp; host_entry->resolves_since_last_request = 0; struct aws_host_address_cache_entry *aaaa_entry = aws_lru_cache_use_lru_element(host_entry->aaaa_records); struct aws_host_address *aaaa_record = (aaaa_entry != NULL) ? &aaaa_entry->address : NULL; struct aws_host_address_cache_entry *a_entry = aws_lru_cache_use_lru_element(host_entry->a_records); struct aws_host_address *a_record = (a_entry != NULL) ? &a_entry->address : NULL; struct aws_host_address address_array[2]; AWS_ZERO_ARRAY(address_array); struct aws_array_list callback_address_list; aws_array_list_init_static(&callback_address_list, address_array, 2, sizeof(struct aws_host_address)); if ((aaaa_record || a_record)) { AWS_LOGF_DEBUG( AWS_LS_IO_DNS, "id=%p: cached entries found for %s returning to caller.", (void *)resolver, host_name->bytes); /* these will all need to be copied so that we don't hold the lock during the callback. */ if (aaaa_record) { struct aws_host_address aaaa_record_cpy; aws_host_address_copy(aaaa_record, &aaaa_record_cpy); aws_array_list_push_back(&callback_address_list, &aaaa_record_cpy); AWS_LOGF_TRACE( AWS_LS_IO_DNS, "id=%p: vending address %s for host %s to caller", (void *)resolver, aaaa_record->address->bytes, host_entry->host_name->bytes); } if (a_record) { struct aws_host_address a_record_cpy; aws_host_address_copy(a_record, &a_record_cpy); aws_array_list_push_back(&callback_address_list, &a_record_cpy); AWS_LOGF_TRACE( AWS_LS_IO_DNS, "id=%p: vending address %s for host %s to caller", (void *)resolver, a_record->address->bytes, host_entry->host_name->bytes); } aws_mutex_unlock(&host_entry->entry_lock); /* we don't want to do the callback WHILE we hold the lock someone may reentrantly call us. */ // TODO: Fire the callback asynchronously res(resolver, host_name, AWS_OP_SUCCESS, &callback_address_list, user_data); for (size_t i = 0; i < aws_array_list_length(&callback_address_list); ++i) { struct aws_host_address *address_ptr = NULL; aws_array_list_get_at_ptr(&callback_address_list, (void **)&address_ptr, i); aws_host_address_clean_up(address_ptr); } aws_array_list_clean_up(&callback_address_list); return result; } struct pending_callback *pending_callback = aws_mem_acquire(default_host_resolver->allocator, sizeof(struct pending_callback)); if (pending_callback != NULL) { pending_callback->user_data = user_data; pending_callback->callback = res; aws_linked_list_push_back(&host_entry->pending_resolution_callbacks, &pending_callback->node); /* * intentionally signal under the lock; similar to the shutdown case, we can't guarantee the resolver * is still around once the lock is released. */ aws_condition_variable_notify_all(&host_entry->entry_signal); } else { result = AWS_OP_ERR; } aws_mutex_unlock(&host_entry->entry_lock); return result; } static size_t default_get_host_address_count( struct aws_host_resolver *host_resolver, const struct aws_string *host_name, uint32_t flags) { struct default_host_resolver *default_host_resolver = host_resolver->impl; size_t address_count = 0; aws_mutex_lock(&default_host_resolver->resolver_lock); struct aws_hash_element *element = NULL; aws_hash_table_find(&default_host_resolver->host_entry_table, host_name, &element); if (element != NULL) { struct host_entry *host_entry = element->value; if (host_entry != NULL) { aws_mutex_lock(&host_entry->entry_lock); if ((flags & AWS_GET_HOST_ADDRESS_COUNT_RECORD_TYPE_A) != 0) { address_count += aws_cache_get_element_count(host_entry->a_records); } if ((flags & AWS_GET_HOST_ADDRESS_COUNT_RECORD_TYPE_AAAA) != 0) { address_count += aws_cache_get_element_count(host_entry->aaaa_records); } aws_mutex_unlock(&host_entry->entry_lock); } } aws_mutex_unlock(&default_host_resolver->resolver_lock); return address_count; } static struct aws_host_resolver_vtable s_vtable = { .purge_cache = s_resolver_purge_cache, .purge_cache_with_callback = s_resolver_purge_cache_with_callback, .resolve_host = default_resolve_host, .record_connection_failure = resolver_record_connection_failure, .get_host_address_count = default_get_host_address_count, .destroy = resolver_destroy, .purge_host_cache = s_resolver_purge_host_cache, }; static void s_aws_host_resolver_destroy(struct aws_host_resolver *resolver) { AWS_ASSERT(resolver->vtable && resolver->vtable->destroy); resolver->vtable->destroy(resolver); } struct aws_host_resolver *aws_host_resolver_new_default( struct aws_allocator *allocator, const struct aws_host_resolver_default_options *options) { AWS_FATAL_ASSERT(options != NULL); AWS_ASSERT(options->el_group); struct aws_host_resolver *resolver = NULL; struct default_host_resolver *default_host_resolver = NULL; if (!aws_mem_acquire_many( allocator, 2, &resolver, sizeof(struct aws_host_resolver), &default_host_resolver, sizeof(struct default_host_resolver))) { return NULL; } AWS_ZERO_STRUCT(*resolver); AWS_ZERO_STRUCT(*default_host_resolver); AWS_LOGF_INFO( AWS_LS_IO_DNS, "id=%p: Initializing default host resolver with %llu max host entries.", (void *)resolver, (unsigned long long)options->max_entries); resolver->vtable = &s_vtable; resolver->allocator = allocator; resolver->impl = default_host_resolver; default_host_resolver->event_loop_group = aws_event_loop_group_acquire(options->el_group); default_host_resolver->allocator = allocator; default_host_resolver->pending_host_entry_shutdown_completion_callbacks = 0; default_host_resolver->state = DRS_ACTIVE; aws_mutex_init(&default_host_resolver->resolver_lock); if (aws_hash_table_init( &default_host_resolver->host_entry_table, allocator, options->max_entries, aws_hash_string, aws_hash_callback_string_eq, NULL, NULL)) { goto on_error; } aws_ref_count_init(&resolver->ref_count, resolver, (aws_simple_completion_callback *)s_aws_host_resolver_destroy); if (options->shutdown_options != NULL) { resolver->shutdown_options = *options->shutdown_options; } if (options->system_clock_override_fn != NULL) { default_host_resolver->system_clock_fn = options->system_clock_override_fn; } else { default_host_resolver->system_clock_fn = aws_high_res_clock_get_ticks; } return resolver; on_error: s_cleanup_default_resolver(resolver); return NULL; } struct aws_host_resolver *aws_host_resolver_acquire(struct aws_host_resolver *resolver) { if (resolver != NULL) { aws_ref_count_acquire(&resolver->ref_count); } return resolver; } void aws_host_resolver_release(struct aws_host_resolver *resolver) { if (resolver != NULL) { aws_ref_count_release(&resolver->ref_count); } } size_t aws_host_resolver_get_host_address_count( struct aws_host_resolver *resolver, const struct aws_string *host_name, uint32_t flags) { return resolver->vtable->get_host_address_count(resolver, host_name, flags); } struct aws_host_resolution_config aws_host_resolver_init_default_resolution_config(void) { struct aws_host_resolution_config config = { .impl = aws_default_dns_resolve, .max_ttl = AWS_DEFAULT_DNS_TTL, .impl_data = NULL, .resolve_frequency_ns = NS_PER_SEC, }; return config; } aws-crt-python-0.20.4+dfsg/crt/aws-c-io/source/io.c000066400000000000000000000441521456575232400217200ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #define AWS_DEFINE_ERROR_INFO_IO(CODE, STR) [(CODE)-0x0400] = AWS_DEFINE_ERROR_INFO(CODE, STR, "aws-c-io") #define AWS_DEFINE_ERROR_PKCS11_CKR(CKR) \ AWS_DEFINE_ERROR_INFO_IO( \ AWS_ERROR_PKCS11_##CKR, "A PKCS#11 (Cryptoki) library function failed with return value " #CKR) /* clang-format off */ static struct aws_error_info s_errors[] = { AWS_DEFINE_ERROR_INFO_IO( AWS_IO_CHANNEL_ERROR_ERROR_CANT_ACCEPT_INPUT, "Channel cannot accept input"), AWS_DEFINE_ERROR_INFO_IO( AWS_IO_CHANNEL_UNKNOWN_MESSAGE_TYPE, "Channel unknown message type"), AWS_DEFINE_ERROR_INFO_IO( AWS_IO_CHANNEL_READ_WOULD_EXCEED_WINDOW, "A channel handler attempted to propagate a read larger than the upstream window"), AWS_DEFINE_ERROR_INFO_IO( AWS_IO_EVENT_LOOP_ALREADY_ASSIGNED, "An attempt was made to assign an io handle to an event loop, but the handle was already assigned."), AWS_DEFINE_ERROR_INFO_IO( AWS_IO_EVENT_LOOP_SHUTDOWN, "Event loop has shutdown and a resource was still using it, the resource has been removed from the loop."), AWS_DEFINE_ERROR_INFO_IO( AWS_IO_TLS_ERROR_NEGOTIATION_FAILURE, "TLS (SSL) negotiation failed"), AWS_DEFINE_ERROR_INFO_IO( AWS_IO_TLS_ERROR_NOT_NEGOTIATED, "Attempt to read/write, but TLS (SSL) hasn't been negotiated"), AWS_DEFINE_ERROR_INFO_IO( AWS_IO_TLS_ERROR_WRITE_FAILURE, "Failed to write to TLS handler"), AWS_DEFINE_ERROR_INFO_IO( AWS_IO_TLS_ERROR_ALERT_RECEIVED, "Fatal TLS Alert was received"), AWS_DEFINE_ERROR_INFO_IO( AWS_IO_TLS_CTX_ERROR, "Failed to create tls context"), AWS_DEFINE_ERROR_INFO_IO( AWS_IO_TLS_VERSION_UNSUPPORTED, "A TLS version was specified that is currently not supported. Consider using AWS_IO_TLS_VER_SYS_DEFAULTS, " " and when this lib or the operating system is updated, it will automatically be used."), AWS_DEFINE_ERROR_INFO_IO( AWS_IO_TLS_CIPHER_PREF_UNSUPPORTED, "A TLS Cipher Preference was specified that is currently not supported by the current platform. Consider " " using AWS_IO_TLS_CIPHER_SYSTEM_DEFAULT, and when this lib or the operating system is updated, it will " "automatically be used."), AWS_DEFINE_ERROR_INFO_IO( AWS_IO_MISSING_ALPN_MESSAGE, "An ALPN message was expected but not received"), AWS_DEFINE_ERROR_INFO_IO( AWS_IO_UNHANDLED_ALPN_PROTOCOL_MESSAGE, "An ALPN message was received but a handler was not created by the user"), AWS_DEFINE_ERROR_INFO_IO( AWS_IO_FILE_VALIDATION_FAILURE, "A file was read and the input did not match the expected value"), AWS_DEFINE_ERROR_INFO_IO( AWS_ERROR_IO_EVENT_LOOP_THREAD_ONLY, "Attempt to perform operation that must be run inside the event loop thread"), AWS_DEFINE_ERROR_INFO_IO( AWS_ERROR_IO_ALREADY_SUBSCRIBED, "Already subscribed to receive events"), AWS_DEFINE_ERROR_INFO_IO( AWS_ERROR_IO_NOT_SUBSCRIBED, "Not subscribed to receive events"), AWS_DEFINE_ERROR_INFO_IO( AWS_ERROR_IO_OPERATION_CANCELLED, "Operation cancelled before it could complete"), AWS_DEFINE_ERROR_INFO_IO( AWS_IO_READ_WOULD_BLOCK, "Read operation would block, try again later"), AWS_DEFINE_ERROR_INFO_IO( AWS_IO_BROKEN_PIPE, "Attempt to read or write to io handle that has already been closed."), AWS_DEFINE_ERROR_INFO_IO( AWS_IO_SOCKET_UNSUPPORTED_ADDRESS_FAMILY, "Socket, unsupported address family."), AWS_DEFINE_ERROR_INFO_IO( AWS_IO_SOCKET_INVALID_OPERATION_FOR_TYPE, "Invalid socket operation for socket type."), AWS_DEFINE_ERROR_INFO_IO( AWS_IO_SOCKET_CONNECTION_REFUSED, "socket connection refused."), AWS_DEFINE_ERROR_INFO_IO( AWS_IO_SOCKET_TIMEOUT, "socket operation timed out."), AWS_DEFINE_ERROR_INFO_IO( AWS_IO_SOCKET_NO_ROUTE_TO_HOST, "socket connect failure, no route to host."), AWS_DEFINE_ERROR_INFO_IO( AWS_IO_SOCKET_NETWORK_DOWN, "network is down."), AWS_DEFINE_ERROR_INFO_IO( AWS_IO_SOCKET_CLOSED, "socket is closed."), AWS_DEFINE_ERROR_INFO_IO( AWS_IO_SOCKET_NOT_CONNECTED, "socket not connected."), AWS_DEFINE_ERROR_INFO_IO( AWS_IO_SOCKET_INVALID_OPTIONS, "Invalid socket options."), AWS_DEFINE_ERROR_INFO_IO( AWS_IO_SOCKET_ADDRESS_IN_USE, "Socket address already in use."), AWS_DEFINE_ERROR_INFO_IO( AWS_IO_SOCKET_INVALID_ADDRESS, "Invalid socket address."), AWS_DEFINE_ERROR_INFO_IO( AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE, "Illegal operation for socket state."), AWS_DEFINE_ERROR_INFO_IO( AWS_IO_SOCKET_CONNECT_ABORTED, "Incoming connection was aborted."), AWS_DEFINE_ERROR_INFO_IO ( AWS_IO_DNS_QUERY_FAILED, "A query to dns failed to resolve."), AWS_DEFINE_ERROR_INFO_IO( AWS_IO_DNS_INVALID_NAME, "Host name was invalid for dns resolution."), AWS_DEFINE_ERROR_INFO_IO( AWS_IO_DNS_NO_ADDRESS_FOR_HOST, "No address was found for the supplied host name."), AWS_DEFINE_ERROR_INFO_IO( AWS_IO_DNS_HOST_REMOVED_FROM_CACHE, "The entries for host name were removed from the local dns cache."), AWS_DEFINE_ERROR_INFO_IO( AWS_IO_STREAM_INVALID_SEEK_POSITION, "The seek position was outside of a stream's bounds"), AWS_DEFINE_ERROR_INFO_IO( AWS_IO_STREAM_READ_FAILED, "Stream failed to read from the underlying io source"), AWS_DEFINE_ERROR_INFO_IO( DEPRECATED_AWS_IO_INVALID_FILE_HANDLE, "Operation failed because the file handle was invalid"), AWS_DEFINE_ERROR_INFO_IO( AWS_IO_SHARED_LIBRARY_LOAD_FAILURE, "System call error during attempt to load shared library"), AWS_DEFINE_ERROR_INFO_IO( AWS_IO_SHARED_LIBRARY_FIND_SYMBOL_FAILURE, "System call error during attempt to find shared library symbol"), AWS_DEFINE_ERROR_INFO_IO( AWS_IO_TLS_NEGOTIATION_TIMEOUT, "Channel shutdown due to tls negotiation timeout"), AWS_DEFINE_ERROR_INFO_IO( AWS_IO_TLS_ALERT_NOT_GRACEFUL, "Channel shutdown due to tls alert. The alert was not for a graceful shutdown."), AWS_DEFINE_ERROR_INFO_IO( AWS_IO_MAX_RETRIES_EXCEEDED, "Retry cannot be attempted because the maximum number of retries has been exceeded."), AWS_DEFINE_ERROR_INFO_IO( AWS_IO_RETRY_PERMISSION_DENIED, "Retry cannot be attempted because the retry strategy has prevented the operation."), AWS_DEFINE_ERROR_INFO_IO( AWS_IO_TLS_DIGEST_ALGORITHM_UNSUPPORTED, "TLS digest was created with an unsupported algorithm"), AWS_DEFINE_ERROR_INFO_IO( AWS_IO_TLS_SIGNATURE_ALGORITHM_UNSUPPORTED, "TLS signature algorithm is currently unsupported."), AWS_DEFINE_ERROR_INFO_IO( AWS_ERROR_PKCS11_VERSION_UNSUPPORTED, "The PKCS#11 library uses an unsupported API version."), AWS_DEFINE_ERROR_INFO_IO( AWS_ERROR_PKCS11_TOKEN_NOT_FOUND, "Could not pick PKCS#11 token matching search criteria (none found, or multiple found)"), AWS_DEFINE_ERROR_INFO_IO( AWS_ERROR_PKCS11_KEY_NOT_FOUND, "Could not pick PKCS#11 key matching search criteria (none found, or multiple found)"), AWS_DEFINE_ERROR_INFO_IO( AWS_ERROR_PKCS11_KEY_TYPE_UNSUPPORTED, "PKCS#11 key type not supported"), AWS_DEFINE_ERROR_INFO_IO( AWS_ERROR_PKCS11_UNKNOWN_CRYPTOKI_RETURN_VALUE, "A PKCS#11 (Cryptoki) library function failed with an unknown return value (CKR_). See log for more details."), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_CANCEL), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_HOST_MEMORY), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_SLOT_ID_INVALID), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_GENERAL_ERROR), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_FUNCTION_FAILED), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_ARGUMENTS_BAD), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_NO_EVENT), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_NEED_TO_CREATE_THREADS), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_CANT_LOCK), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_ATTRIBUTE_READ_ONLY), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_ATTRIBUTE_SENSITIVE), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_ATTRIBUTE_TYPE_INVALID), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_ATTRIBUTE_VALUE_INVALID), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_ACTION_PROHIBITED), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_DATA_INVALID), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_DATA_LEN_RANGE), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_DEVICE_ERROR), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_DEVICE_MEMORY), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_DEVICE_REMOVED), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_ENCRYPTED_DATA_INVALID), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_ENCRYPTED_DATA_LEN_RANGE), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_FUNCTION_CANCELED), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_FUNCTION_NOT_PARALLEL), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_FUNCTION_NOT_SUPPORTED), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_KEY_HANDLE_INVALID), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_KEY_SIZE_RANGE), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_KEY_TYPE_INCONSISTENT), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_KEY_NOT_NEEDED), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_KEY_CHANGED), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_KEY_NEEDED), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_KEY_INDIGESTIBLE), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_KEY_FUNCTION_NOT_PERMITTED), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_KEY_NOT_WRAPPABLE), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_KEY_UNEXTRACTABLE), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_MECHANISM_INVALID), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_MECHANISM_PARAM_INVALID), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_OBJECT_HANDLE_INVALID), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_OPERATION_ACTIVE), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_OPERATION_NOT_INITIALIZED), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_PIN_INCORRECT), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_PIN_INVALID), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_PIN_LEN_RANGE), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_PIN_EXPIRED), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_PIN_LOCKED), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_SESSION_CLOSED), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_SESSION_COUNT), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_SESSION_HANDLE_INVALID), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_SESSION_PARALLEL_NOT_SUPPORTED), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_SESSION_READ_ONLY), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_SESSION_EXISTS), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_SESSION_READ_ONLY_EXISTS), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_SESSION_READ_WRITE_SO_EXISTS), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_SIGNATURE_INVALID), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_SIGNATURE_LEN_RANGE), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_TEMPLATE_INCOMPLETE), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_TEMPLATE_INCONSISTENT), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_TOKEN_NOT_PRESENT), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_TOKEN_NOT_RECOGNIZED), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_TOKEN_WRITE_PROTECTED), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_UNWRAPPING_KEY_HANDLE_INVALID), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_UNWRAPPING_KEY_SIZE_RANGE), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_UNWRAPPING_KEY_TYPE_INCONSISTENT), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_USER_ALREADY_LOGGED_IN), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_USER_NOT_LOGGED_IN), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_USER_PIN_NOT_INITIALIZED), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_USER_TYPE_INVALID), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_USER_ANOTHER_ALREADY_LOGGED_IN), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_USER_TOO_MANY_TYPES), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_WRAPPED_KEY_INVALID), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_WRAPPED_KEY_LEN_RANGE), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_WRAPPING_KEY_HANDLE_INVALID), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_WRAPPING_KEY_SIZE_RANGE), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_WRAPPING_KEY_TYPE_INCONSISTENT), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_RANDOM_SEED_NOT_SUPPORTED), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_RANDOM_NO_RNG), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_DOMAIN_PARAMS_INVALID), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_CURVE_NOT_SUPPORTED), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_BUFFER_TOO_SMALL), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_SAVED_STATE_INVALID), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_INFORMATION_SENSITIVE), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_STATE_UNSAVEABLE), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_CRYPTOKI_NOT_INITIALIZED), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_CRYPTOKI_ALREADY_INITIALIZED), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_MUTEX_BAD), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_MUTEX_NOT_LOCKED), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_NEW_PIN_MODE), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_NEXT_OTP), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_EXCEEDED_MAX_ITERATIONS), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_FIPS_SELF_TEST_FAILED), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_LIBRARY_LOAD_FAILED), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_PIN_TOO_WEAK), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_PUBLIC_KEY_INVALID), AWS_DEFINE_ERROR_PKCS11_CKR(CKR_FUNCTION_REJECTED), AWS_DEFINE_ERROR_INFO_IO( AWS_ERROR_IO_PINNED_EVENT_LOOP_MISMATCH, "A connection was requested on an event loop that is not associated with the client bootstrap's event loop group."), AWS_DEFINE_ERROR_INFO_IO( AWS_ERROR_PKCS11_ENCODING_ERROR, "A PKCS#11 (Cryptoki) library function was unable to ASN.1 (DER) encode a data structure. See log for more details."), AWS_DEFINE_ERROR_INFO_IO( AWS_IO_TLS_ERROR_DEFAULT_TRUST_STORE_NOT_FOUND, "Default TLS trust store not found on this system." " Trusted CA certificates must be installed," " or \"override default trust store\" must be used while creating the TLS context."), AWS_DEFINE_ERROR_INFO_IO( AWS_IO_STREAM_SEEK_FAILED, "Stream failed to seek from the underlying I/O source."), AWS_DEFINE_ERROR_INFO_IO( AWS_IO_STREAM_GET_LENGTH_FAILED, "Stream failed to get length from the underlying I/O source."), AWS_DEFINE_ERROR_INFO_IO( AWS_IO_STREAM_SEEK_UNSUPPORTED, "Seek is not supported in the underlying I/O source."), AWS_DEFINE_ERROR_INFO_IO( AWS_IO_STREAM_GET_LENGTH_UNSUPPORTED, "Get length is not supported in the underlying I/O source."), AWS_DEFINE_ERROR_INFO_IO( AWS_IO_TLS_ERROR_READ_FAILURE, "Failure during TLS read."), AWS_DEFINE_ERROR_INFO_IO(AWS_ERROR_PEM_MALFORMED, "Malformed PEM object encountered."), }; /* clang-format on */ static struct aws_error_info_list s_list = { .error_list = s_errors, .count = sizeof(s_errors) / sizeof(struct aws_error_info), }; static struct aws_log_subject_info s_io_log_subject_infos[] = { DEFINE_LOG_SUBJECT_INFO( AWS_LS_IO_GENERAL, "aws-c-io", "Subject for IO logging that doesn't belong to any particular category"), DEFINE_LOG_SUBJECT_INFO(AWS_LS_IO_EVENT_LOOP, "event-loop", "Subject for Event-loop specific logging."), DEFINE_LOG_SUBJECT_INFO(AWS_LS_IO_SOCKET, "socket", "Subject for Socket specific logging."), DEFINE_LOG_SUBJECT_INFO(AWS_LS_IO_SOCKET_HANDLER, "socket-handler", "Subject for a socket channel handler."), DEFINE_LOG_SUBJECT_INFO(AWS_LS_IO_TLS, "tls-handler", "Subject for TLS-related logging"), DEFINE_LOG_SUBJECT_INFO(AWS_LS_IO_ALPN, "alpn", "Subject for ALPN-related logging"), DEFINE_LOG_SUBJECT_INFO(AWS_LS_IO_DNS, "dns", "Subject for DNS-related logging"), DEFINE_LOG_SUBJECT_INFO(AWS_LS_IO_PKI, "pki-utils", "Subject for Pki utilities."), DEFINE_LOG_SUBJECT_INFO(AWS_LS_IO_CHANNEL, "channel", "Subject for Channels"), DEFINE_LOG_SUBJECT_INFO( AWS_LS_IO_CHANNEL_BOOTSTRAP, "channel-bootstrap", "Subject for channel bootstrap (client and server modes)"), DEFINE_LOG_SUBJECT_INFO(AWS_LS_IO_FILE_UTILS, "file-utils", "Subject for file operations"), DEFINE_LOG_SUBJECT_INFO(AWS_LS_IO_SHARED_LIBRARY, "shared-library", "Subject for shared library operations"), DEFINE_LOG_SUBJECT_INFO( AWS_LS_IO_EXPONENTIAL_BACKOFF_RETRY_STRATEGY, "exp-backoff-strategy", "Subject for exponential backoff retry strategy"), DEFINE_LOG_SUBJECT_INFO( AWS_LS_IO_STANDARD_RETRY_STRATEGY, "standard-retry-strategy", "Subject for standard retry strategy"), DEFINE_LOG_SUBJECT_INFO(AWS_LS_IO_PKCS11, "pkcs11", "Subject for PKCS#11 library operations"), DEFINE_LOG_SUBJECT_INFO(AWS_LS_IO_PEM, "pem", "Subject for pem operations")}; static struct aws_log_subject_info_list s_io_log_subject_list = { .subject_list = s_io_log_subject_infos, .count = AWS_ARRAY_SIZE(s_io_log_subject_infos), }; static bool s_io_library_initialized = false; void aws_tls_init_static_state(struct aws_allocator *alloc); void aws_tls_clean_up_static_state(void); void aws_io_library_init(struct aws_allocator *allocator) { if (!s_io_library_initialized) { s_io_library_initialized = true; aws_common_library_init(allocator); aws_cal_library_init(allocator); aws_register_error_info(&s_list); aws_register_log_subject_info_list(&s_io_log_subject_list); aws_tls_init_static_state(allocator); aws_io_tracing_init(); } } void aws_io_library_clean_up(void) { if (s_io_library_initialized) { s_io_library_initialized = false; aws_thread_join_all_managed(); aws_tls_clean_up_static_state(); aws_unregister_error_info(&s_list); aws_unregister_log_subject_info_list(&s_io_log_subject_list); aws_cal_library_clean_up(); aws_common_library_clean_up(); } } void aws_io_fatal_assert_library_initialized(void) { if (!s_io_library_initialized) { AWS_LOGF_FATAL( AWS_LS_IO_GENERAL, "aws_io_library_init() must be called before using any functionality in aws-c-io."); AWS_FATAL_ASSERT(s_io_library_initialized); } } aws-crt-python-0.20.4+dfsg/crt/aws-c-io/source/linux/000077500000000000000000000000001456575232400222765ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-io/source/linux/epoll_event_loop.c000066400000000000000000000636131456575232400260200ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #if !defined(COMPAT_MODE) && defined(__GLIBC__) && ((__GLIBC__ == 2 && __GLIBC_MINOR__ >= 8) || __GLIBC__ > 2) # define USE_EFD 1 #else # define USE_EFD 0 #endif #if USE_EFD # include # include #else # include #endif /* This isn't defined on ancient linux distros (breaking the builds). * However, if this is a prebuild, we purposely build on an ancient system, but * we want the kernel calls to still be the same as a modern build since that's likely the target of the application * calling this code. Just define this if it isn't there already. GlibC and the kernel don't really care how the flag * gets passed as long as it does. */ #ifndef EPOLLRDHUP # define EPOLLRDHUP 0x2000 #endif static void s_destroy(struct aws_event_loop *event_loop); static int s_run(struct aws_event_loop *event_loop); static int s_stop(struct aws_event_loop *event_loop); static int s_wait_for_stop_completion(struct aws_event_loop *event_loop); static void s_schedule_task_now(struct aws_event_loop *event_loop, struct aws_task *task); static void s_schedule_task_future(struct aws_event_loop *event_loop, struct aws_task *task, uint64_t run_at_nanos); static void s_cancel_task(struct aws_event_loop *event_loop, struct aws_task *task); static int s_subscribe_to_io_events( struct aws_event_loop *event_loop, struct aws_io_handle *handle, int events, aws_event_loop_on_event_fn *on_event, void *user_data); static int s_unsubscribe_from_io_events(struct aws_event_loop *event_loop, struct aws_io_handle *handle); static void s_free_io_event_resources(void *user_data); static bool s_is_on_callers_thread(struct aws_event_loop *event_loop); static void aws_event_loop_thread(void *args); static struct aws_event_loop_vtable s_vtable = { .destroy = s_destroy, .run = s_run, .stop = s_stop, .wait_for_stop_completion = s_wait_for_stop_completion, .schedule_task_now = s_schedule_task_now, .schedule_task_future = s_schedule_task_future, .cancel_task = s_cancel_task, .subscribe_to_io_events = s_subscribe_to_io_events, .unsubscribe_from_io_events = s_unsubscribe_from_io_events, .free_io_event_resources = s_free_io_event_resources, .is_on_callers_thread = s_is_on_callers_thread, }; struct epoll_loop { struct aws_task_scheduler scheduler; struct aws_thread thread_created_on; struct aws_thread_options thread_options; aws_thread_id_t thread_joined_to; struct aws_atomic_var running_thread_id; struct aws_io_handle read_task_handle; struct aws_io_handle write_task_handle; struct aws_mutex task_pre_queue_mutex; struct aws_linked_list task_pre_queue; struct aws_task stop_task; struct aws_atomic_var stop_task_ptr; int epoll_fd; bool should_process_task_pre_queue; bool should_continue; }; struct epoll_event_data { struct aws_allocator *alloc; struct aws_io_handle *handle; aws_event_loop_on_event_fn *on_event; void *user_data; struct aws_task cleanup_task; bool is_subscribed; /* false when handle is unsubscribed, but this struct hasn't been cleaned up yet */ }; /* default timeout is 100 seconds */ enum { DEFAULT_TIMEOUT = 100 * 1000, MAX_EVENTS = 100, }; int aws_open_nonblocking_posix_pipe(int pipe_fds[2]); /* Setup edge triggered epoll with a scheduler. */ struct aws_event_loop *aws_event_loop_new_default_with_options( struct aws_allocator *alloc, const struct aws_event_loop_options *options) { AWS_PRECONDITION(options); AWS_PRECONDITION(options->clock); struct aws_event_loop *loop = aws_mem_calloc(alloc, 1, sizeof(struct aws_event_loop)); if (!loop) { return NULL; } AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: Initializing edge-triggered epoll", (void *)loop); if (aws_event_loop_init_base(loop, alloc, options->clock)) { goto clean_up_loop; } struct epoll_loop *epoll_loop = aws_mem_calloc(alloc, 1, sizeof(struct epoll_loop)); if (!epoll_loop) { goto cleanup_base_loop; } if (options->thread_options) { epoll_loop->thread_options = *options->thread_options; } else { epoll_loop->thread_options = *aws_default_thread_options(); } /* initialize thread id to NULL, it should be updated when the event loop thread starts. */ aws_atomic_init_ptr(&epoll_loop->running_thread_id, NULL); aws_linked_list_init(&epoll_loop->task_pre_queue); epoll_loop->task_pre_queue_mutex = (struct aws_mutex)AWS_MUTEX_INIT; aws_atomic_init_ptr(&epoll_loop->stop_task_ptr, NULL); epoll_loop->epoll_fd = epoll_create(100); if (epoll_loop->epoll_fd < 0) { AWS_LOGF_FATAL(AWS_LS_IO_EVENT_LOOP, "id=%p: Failed to open epoll handle.", (void *)loop); aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); goto clean_up_epoll; } if (aws_thread_init(&epoll_loop->thread_created_on, alloc)) { goto clean_up_epoll; } #if USE_EFD AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: Using eventfd for cross-thread notifications.", (void *)loop); int fd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK); if (fd < 0) { AWS_LOGF_FATAL(AWS_LS_IO_EVENT_LOOP, "id=%p: Failed to open eventfd handle.", (void *)loop); aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); goto clean_up_thread; } AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: eventfd descriptor %d.", (void *)loop, fd); epoll_loop->write_task_handle = (struct aws_io_handle){.data.fd = fd, .additional_data = NULL}; epoll_loop->read_task_handle = (struct aws_io_handle){.data.fd = fd, .additional_data = NULL}; #else AWS_LOGF_DEBUG( AWS_LS_IO_EVENT_LOOP, "id=%p: Eventfd not available, falling back to pipe for cross-thread notification.", (void *)loop); int pipe_fds[2] = {0}; /* this pipe is for task scheduling. */ if (aws_open_nonblocking_posix_pipe(pipe_fds)) { AWS_LOGF_FATAL(AWS_LS_IO_EVENT_LOOP, "id=%p: failed to open pipe handle.", (void *)loop); goto clean_up_thread; } AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p: pipe descriptors read %d, write %d.", (void *)loop, pipe_fds[0], pipe_fds[1]); epoll_loop->write_task_handle.data.fd = pipe_fds[1]; epoll_loop->read_task_handle.data.fd = pipe_fds[0]; #endif if (aws_task_scheduler_init(&epoll_loop->scheduler, alloc)) { goto clean_up_pipe; } epoll_loop->should_continue = false; loop->impl_data = epoll_loop; loop->vtable = &s_vtable; return loop; clean_up_pipe: #if USE_EFD close(epoll_loop->write_task_handle.data.fd); epoll_loop->write_task_handle.data.fd = -1; epoll_loop->read_task_handle.data.fd = -1; #else close(epoll_loop->read_task_handle.data.fd); close(epoll_loop->write_task_handle.data.fd); #endif clean_up_thread: aws_thread_clean_up(&epoll_loop->thread_created_on); clean_up_epoll: if (epoll_loop->epoll_fd >= 0) { close(epoll_loop->epoll_fd); } aws_mem_release(alloc, epoll_loop); cleanup_base_loop: aws_event_loop_clean_up_base(loop); clean_up_loop: aws_mem_release(alloc, loop); return NULL; } static void s_destroy(struct aws_event_loop *event_loop) { AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: Destroying event_loop", (void *)event_loop); struct epoll_loop *epoll_loop = event_loop->impl_data; /* we don't know if stop() has been called by someone else, * just call stop() again and wait for event-loop to finish. */ aws_event_loop_stop(event_loop); s_wait_for_stop_completion(event_loop); /* setting this so that canceled tasks don't blow up when asking if they're on the event-loop thread. */ epoll_loop->thread_joined_to = aws_thread_current_thread_id(); aws_atomic_store_ptr(&epoll_loop->running_thread_id, &epoll_loop->thread_joined_to); aws_task_scheduler_clean_up(&epoll_loop->scheduler); while (!aws_linked_list_empty(&epoll_loop->task_pre_queue)) { struct aws_linked_list_node *node = aws_linked_list_pop_front(&epoll_loop->task_pre_queue); struct aws_task *task = AWS_CONTAINER_OF(node, struct aws_task, node); task->fn(task, task->arg, AWS_TASK_STATUS_CANCELED); } aws_thread_clean_up(&epoll_loop->thread_created_on); #if USE_EFD close(epoll_loop->write_task_handle.data.fd); epoll_loop->write_task_handle.data.fd = -1; epoll_loop->read_task_handle.data.fd = -1; #else close(epoll_loop->read_task_handle.data.fd); close(epoll_loop->write_task_handle.data.fd); #endif close(epoll_loop->epoll_fd); aws_mem_release(event_loop->alloc, epoll_loop); aws_event_loop_clean_up_base(event_loop); aws_mem_release(event_loop->alloc, event_loop); } static int s_run(struct aws_event_loop *event_loop) { struct epoll_loop *epoll_loop = event_loop->impl_data; AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: Starting event-loop thread.", (void *)event_loop); epoll_loop->should_continue = true; aws_thread_increment_unjoined_count(); if (aws_thread_launch( &epoll_loop->thread_created_on, &aws_event_loop_thread, event_loop, &epoll_loop->thread_options)) { aws_thread_decrement_unjoined_count(); AWS_LOGF_FATAL(AWS_LS_IO_EVENT_LOOP, "id=%p: thread creation failed.", (void *)event_loop); epoll_loop->should_continue = false; return AWS_OP_ERR; } return AWS_OP_SUCCESS; } static void s_stop_task(struct aws_task *task, void *args, enum aws_task_status status) { (void)task; struct aws_event_loop *event_loop = args; struct epoll_loop *epoll_loop = event_loop->impl_data; /* now okay to reschedule stop tasks. */ aws_atomic_store_ptr(&epoll_loop->stop_task_ptr, NULL); if (status == AWS_TASK_STATUS_RUN_READY) { /* * this allows the event loop to invoke the callback once the event loop has completed. */ epoll_loop->should_continue = false; } } static int s_stop(struct aws_event_loop *event_loop) { struct epoll_loop *epoll_loop = event_loop->impl_data; void *expected_ptr = NULL; bool update_succeeded = aws_atomic_compare_exchange_ptr(&epoll_loop->stop_task_ptr, &expected_ptr, &epoll_loop->stop_task); if (!update_succeeded) { /* the stop task is already scheduled. */ return AWS_OP_SUCCESS; } AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: Stopping event-loop thread.", (void *)event_loop); aws_task_init(&epoll_loop->stop_task, s_stop_task, event_loop, "epoll_event_loop_stop"); s_schedule_task_now(event_loop, &epoll_loop->stop_task); return AWS_OP_SUCCESS; } static int s_wait_for_stop_completion(struct aws_event_loop *event_loop) { struct epoll_loop *epoll_loop = event_loop->impl_data; int result = aws_thread_join(&epoll_loop->thread_created_on); aws_thread_decrement_unjoined_count(); return result; } static void s_schedule_task_common(struct aws_event_loop *event_loop, struct aws_task *task, uint64_t run_at_nanos) { struct epoll_loop *epoll_loop = event_loop->impl_data; /* if event loop and the caller are the same thread, just schedule and be done with it. */ if (s_is_on_callers_thread(event_loop)) { AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p: scheduling task %p in-thread for timestamp %llu", (void *)event_loop, (void *)task, (unsigned long long)run_at_nanos); if (run_at_nanos == 0) { /* zero denotes "now" task */ aws_task_scheduler_schedule_now(&epoll_loop->scheduler, task); } else { aws_task_scheduler_schedule_future(&epoll_loop->scheduler, task, run_at_nanos); } return; } AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p: Scheduling task %p cross-thread for timestamp %llu", (void *)event_loop, (void *)task, (unsigned long long)run_at_nanos); task->timestamp = run_at_nanos; aws_mutex_lock(&epoll_loop->task_pre_queue_mutex); uint64_t counter = 1; bool is_first_task = aws_linked_list_empty(&epoll_loop->task_pre_queue); aws_linked_list_push_back(&epoll_loop->task_pre_queue, &task->node); /* if the list was not empty, we already have a pending read on the pipe/eventfd, no need to write again. */ if (is_first_task) { AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: Waking up event-loop thread", (void *)event_loop); /* If the write fails because the buffer is full, we don't actually care because that means there's a pending * read on the pipe/eventfd and thus the event loop will end up checking to see if something has been queued.*/ ssize_t do_not_care = write(epoll_loop->write_task_handle.data.fd, (void *)&counter, sizeof(counter)); (void)do_not_care; } aws_mutex_unlock(&epoll_loop->task_pre_queue_mutex); } static void s_schedule_task_now(struct aws_event_loop *event_loop, struct aws_task *task) { s_schedule_task_common(event_loop, task, 0 /* zero denotes "now" task */); } static void s_schedule_task_future(struct aws_event_loop *event_loop, struct aws_task *task, uint64_t run_at_nanos) { s_schedule_task_common(event_loop, task, run_at_nanos); } static void s_cancel_task(struct aws_event_loop *event_loop, struct aws_task *task) { AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: cancelling task %p", (void *)event_loop, (void *)task); struct epoll_loop *epoll_loop = event_loop->impl_data; aws_task_scheduler_cancel_task(&epoll_loop->scheduler, task); } static int s_subscribe_to_io_events( struct aws_event_loop *event_loop, struct aws_io_handle *handle, int events, aws_event_loop_on_event_fn *on_event, void *user_data) { AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: subscribing to events on fd %d", (void *)event_loop, handle->data.fd); struct epoll_event_data *epoll_event_data = aws_mem_calloc(event_loop->alloc, 1, sizeof(struct epoll_event_data)); handle->additional_data = epoll_event_data; if (!epoll_event_data) { return AWS_OP_ERR; } struct epoll_loop *epoll_loop = event_loop->impl_data; epoll_event_data->alloc = event_loop->alloc; epoll_event_data->user_data = user_data; epoll_event_data->handle = handle; epoll_event_data->on_event = on_event; epoll_event_data->is_subscribed = true; /*everyone is always registered for edge-triggered, hang up, remote hang up, errors. */ uint32_t event_mask = EPOLLET | EPOLLHUP | EPOLLRDHUP | EPOLLERR; if (events & AWS_IO_EVENT_TYPE_READABLE) { event_mask |= EPOLLIN; } if (events & AWS_IO_EVENT_TYPE_WRITABLE) { event_mask |= EPOLLOUT; } /* this guy is copied by epoll_ctl */ struct epoll_event epoll_event = { .data = {.ptr = epoll_event_data}, .events = event_mask, }; if (epoll_ctl(epoll_loop->epoll_fd, EPOLL_CTL_ADD, handle->data.fd, &epoll_event)) { AWS_LOGF_ERROR( AWS_LS_IO_EVENT_LOOP, "id=%p: failed to subscribe to events on fd %d", (void *)event_loop, handle->data.fd); handle->additional_data = NULL; aws_mem_release(event_loop->alloc, epoll_event_data); return aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); } return AWS_OP_SUCCESS; } static void s_free_io_event_resources(void *user_data) { struct epoll_event_data *event_data = user_data; aws_mem_release(event_data->alloc, (void *)event_data); } static void s_unsubscribe_cleanup_task(struct aws_task *task, void *arg, enum aws_task_status status) { (void)task; (void)status; struct epoll_event_data *event_data = (struct epoll_event_data *)arg; s_free_io_event_resources(event_data); } static int s_unsubscribe_from_io_events(struct aws_event_loop *event_loop, struct aws_io_handle *handle) { AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p: un-subscribing from events on fd %d", (void *)event_loop, handle->data.fd); struct epoll_loop *epoll_loop = event_loop->impl_data; AWS_ASSERT(handle->additional_data); struct epoll_event_data *additional_handle_data = handle->additional_data; struct epoll_event dummy_event; if (AWS_UNLIKELY(epoll_ctl(epoll_loop->epoll_fd, EPOLL_CTL_DEL, handle->data.fd, &dummy_event /*ignored*/))) { AWS_LOGF_ERROR( AWS_LS_IO_EVENT_LOOP, "id=%p: failed to un-subscribe from events on fd %d", (void *)event_loop, handle->data.fd); return aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); } /* We can't clean up yet, because we have schedule tasks and more events to process, * mark it as unsubscribed and schedule a cleanup task. */ additional_handle_data->is_subscribed = false; aws_task_init( &additional_handle_data->cleanup_task, s_unsubscribe_cleanup_task, additional_handle_data, "epoll_event_loop_unsubscribe_cleanup"); s_schedule_task_now(event_loop, &additional_handle_data->cleanup_task); handle->additional_data = NULL; return AWS_OP_SUCCESS; } static bool s_is_on_callers_thread(struct aws_event_loop *event_loop) { struct epoll_loop *epoll_loop = event_loop->impl_data; aws_thread_id_t *thread_id = aws_atomic_load_ptr(&epoll_loop->running_thread_id); return thread_id && aws_thread_thread_id_equal(*thread_id, aws_thread_current_thread_id()); } /* We treat the pipe fd with a subscription to io events just like any other managed file descriptor. * This is the event handler for events on that pipe.*/ static void s_on_tasks_to_schedule( struct aws_event_loop *event_loop, struct aws_io_handle *handle, int events, void *user_data) { (void)handle; (void)user_data; AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: notified of cross-thread tasks to schedule", (void *)event_loop); struct epoll_loop *epoll_loop = event_loop->impl_data; if (events & AWS_IO_EVENT_TYPE_READABLE) { epoll_loop->should_process_task_pre_queue = true; } } static void s_process_task_pre_queue(struct aws_event_loop *event_loop) { struct epoll_loop *epoll_loop = event_loop->impl_data; if (!epoll_loop->should_process_task_pre_queue) { return; } AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: processing cross-thread tasks", (void *)event_loop); epoll_loop->should_process_task_pre_queue = false; struct aws_linked_list task_pre_queue; aws_linked_list_init(&task_pre_queue); uint64_t count_ignore = 0; aws_mutex_lock(&epoll_loop->task_pre_queue_mutex); /* several tasks could theoretically have been written (though this should never happen), make sure we drain the * eventfd/pipe. */ while (read(epoll_loop->read_task_handle.data.fd, &count_ignore, sizeof(count_ignore)) > -1) { } aws_linked_list_swap_contents(&epoll_loop->task_pre_queue, &task_pre_queue); aws_mutex_unlock(&epoll_loop->task_pre_queue_mutex); while (!aws_linked_list_empty(&task_pre_queue)) { struct aws_linked_list_node *node = aws_linked_list_pop_front(&task_pre_queue); struct aws_task *task = AWS_CONTAINER_OF(node, struct aws_task, node); AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p: task %p pulled to event-loop, scheduling now.", (void *)event_loop, (void *)task); /* Timestamp 0 is used to denote "now" tasks */ if (task->timestamp == 0) { aws_task_scheduler_schedule_now(&epoll_loop->scheduler, task); } else { aws_task_scheduler_schedule_future(&epoll_loop->scheduler, task, task->timestamp); } } } /** * This just calls epoll_wait() * * We broke this out into its own function so that the stacktrace clearly shows * what this thread is doing. We've had a lot of cases where users think this * thread is deadlocked because it's stuck here. We want it to be clear * that it's doing nothing on purpose. It's waiting for events to happen... */ AWS_NO_INLINE static int aws_event_loop_listen_for_io_events(int epoll_fd, struct epoll_event events[MAX_EVENTS], int timeout) { return epoll_wait(epoll_fd, events, MAX_EVENTS, timeout); } static void s_aws_epoll_cleanup_aws_lc_thread_local_state(void *user_data) { (void)user_data; aws_cal_thread_clean_up(); } static void aws_event_loop_thread(void *args) { struct aws_event_loop *event_loop = args; AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: main loop started", (void *)event_loop); struct epoll_loop *epoll_loop = event_loop->impl_data; /* set thread id to the thread of the event loop */ aws_atomic_store_ptr(&epoll_loop->running_thread_id, &epoll_loop->thread_created_on.thread_id); int err = s_subscribe_to_io_events( event_loop, &epoll_loop->read_task_handle, AWS_IO_EVENT_TYPE_READABLE, s_on_tasks_to_schedule, NULL); if (err) { return; } aws_thread_current_at_exit(s_aws_epoll_cleanup_aws_lc_thread_local_state, NULL); int timeout = DEFAULT_TIMEOUT; struct epoll_event events[MAX_EVENTS]; AWS_LOGF_INFO( AWS_LS_IO_EVENT_LOOP, "id=%p: default timeout %d, and max events to process per tick %d", (void *)event_loop, timeout, MAX_EVENTS); /* * until stop is called, * call epoll_wait, if a task is scheduled, or a file descriptor has activity, it will * return. * * process all events, * * run all scheduled tasks. * * process queued subscription cleanups. */ while (epoll_loop->should_continue) { AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: waiting for a maximum of %d ms", (void *)event_loop, timeout); int event_count = aws_event_loop_listen_for_io_events(epoll_loop->epoll_fd, events, timeout); aws_event_loop_register_tick_start(event_loop); AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p: wake up with %d events to process.", (void *)event_loop, event_count); __itt_task_begin(io_tracing_domain, __itt_null, __itt_null, tracing_event_loop_events); for (int i = 0; i < event_count; ++i) { struct epoll_event_data *event_data = (struct epoll_event_data *)events[i].data.ptr; int event_mask = 0; if (events[i].events & EPOLLIN) { event_mask |= AWS_IO_EVENT_TYPE_READABLE; } if (events[i].events & EPOLLOUT) { event_mask |= AWS_IO_EVENT_TYPE_WRITABLE; } if (events[i].events & EPOLLRDHUP) { event_mask |= AWS_IO_EVENT_TYPE_REMOTE_HANG_UP; } if (events[i].events & EPOLLHUP) { event_mask |= AWS_IO_EVENT_TYPE_CLOSED; } if (events[i].events & EPOLLERR) { event_mask |= AWS_IO_EVENT_TYPE_ERROR; } if (event_data->is_subscribed) { AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p: activity on fd %d, invoking handler.", (void *)event_loop, event_data->handle->data.fd); __itt_task_begin(io_tracing_domain, __itt_null, __itt_null, tracing_event_loop_event); event_data->on_event(event_loop, event_data->handle, event_mask, event_data->user_data); __itt_task_end(io_tracing_domain); } } __itt_task_end(io_tracing_domain); /* run scheduled tasks */ s_process_task_pre_queue(event_loop); uint64_t now_ns = 0; event_loop->clock(&now_ns); /* if clock fails, now_ns will be 0 and tasks scheduled for a specific time will not be run. That's ok, we'll handle them next time around. */ AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: running scheduled tasks.", (void *)event_loop); __itt_task_begin(io_tracing_domain, __itt_null, __itt_null, tracing_event_loop_run_tasks); aws_task_scheduler_run_all(&epoll_loop->scheduler, now_ns); __itt_task_end(io_tracing_domain); /* set timeout for next epoll_wait() call. * if clock fails, or scheduler has no tasks, use default timeout */ bool use_default_timeout = false; if (event_loop->clock(&now_ns)) { use_default_timeout = true; } uint64_t next_run_time_ns; if (!aws_task_scheduler_has_tasks(&epoll_loop->scheduler, &next_run_time_ns)) { use_default_timeout = true; } if (use_default_timeout) { AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p: no more scheduled tasks using default timeout.", (void *)event_loop); timeout = DEFAULT_TIMEOUT; } else { /* Translate timestamp (in nanoseconds) to timeout (in milliseconds) */ uint64_t timeout_ns = (next_run_time_ns > now_ns) ? (next_run_time_ns - now_ns) : 0; uint64_t timeout_ms64 = aws_timestamp_convert(timeout_ns, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_MILLIS, NULL); timeout = timeout_ms64 > INT_MAX ? INT_MAX : (int)timeout_ms64; AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p: detected more scheduled tasks with the next occurring at " "%llu, using timeout of %d.", (void *)event_loop, (unsigned long long)timeout_ns, timeout); } aws_event_loop_register_tick_end(event_loop); } AWS_LOGF_DEBUG(AWS_LS_IO_EVENT_LOOP, "id=%p: exiting main loop", (void *)event_loop); s_unsubscribe_from_io_events(event_loop, &epoll_loop->read_task_handle); /* set thread id back to NULL. This should be updated again in destroy, before tasks are canceled. */ aws_atomic_store_ptr(&epoll_loop->running_thread_id, NULL); } aws-crt-python-0.20.4+dfsg/crt/aws-c-io/source/message_pool.c000066400000000000000000000155661456575232400237750ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include int aws_memory_pool_init( struct aws_memory_pool *mempool, struct aws_allocator *alloc, uint16_t ideal_segment_count, size_t segment_size) { mempool->alloc = alloc; mempool->ideal_segment_count = ideal_segment_count; mempool->segment_size = segment_size; mempool->data_ptr = aws_mem_calloc(alloc, ideal_segment_count, sizeof(void *)); if (!mempool->data_ptr) { return AWS_OP_ERR; } aws_array_list_init_static(&mempool->stack, mempool->data_ptr, ideal_segment_count, sizeof(void *)); for (uint16_t i = 0; i < ideal_segment_count; ++i) { void *memory = aws_mem_acquire(alloc, segment_size); if (memory) { aws_array_list_push_back(&mempool->stack, &memory); } else { goto clean_up; } } return AWS_OP_SUCCESS; clean_up: aws_memory_pool_clean_up(mempool); return AWS_OP_ERR; } void aws_memory_pool_clean_up(struct aws_memory_pool *mempool) { void *cur = NULL; while (aws_array_list_length(&mempool->stack) > 0) { /* the only way this fails is not possible since I already checked the length. */ aws_array_list_back(&mempool->stack, &cur); aws_array_list_pop_back(&mempool->stack); aws_mem_release(mempool->alloc, cur); } aws_array_list_clean_up(&mempool->stack); aws_mem_release(mempool->alloc, mempool->data_ptr); } void *aws_memory_pool_acquire(struct aws_memory_pool *mempool) { void *back = NULL; if (aws_array_list_length(&mempool->stack) > 0) { aws_array_list_back(&mempool->stack, &back); aws_array_list_pop_back(&mempool->stack); return back; } void *mem = aws_mem_acquire(mempool->alloc, mempool->segment_size); return mem; } void aws_memory_pool_release(struct aws_memory_pool *mempool, void *to_release) { size_t pool_size = aws_array_list_length(&mempool->stack); if (pool_size >= mempool->ideal_segment_count) { aws_mem_release(mempool->alloc, to_release); return; } aws_array_list_push_back(&mempool->stack, &to_release); } struct message_pool_allocator { struct aws_allocator base_allocator; struct aws_message_pool *msg_pool; }; void *s_message_pool_mem_acquire(struct aws_allocator *allocator, size_t size) { (void)allocator; (void)size; /* no one should ever call this ever. */ AWS_ASSERT(0); return NULL; } void s_message_pool_mem_release(struct aws_allocator *allocator, void *ptr) { struct message_pool_allocator *msg_pool_alloc = allocator->impl; aws_message_pool_release(msg_pool_alloc->msg_pool, (struct aws_io_message *)ptr); } static size_t MSG_OVERHEAD = sizeof(struct aws_io_message) + sizeof(struct message_pool_allocator); int aws_message_pool_init( struct aws_message_pool *msg_pool, struct aws_allocator *alloc, struct aws_message_pool_creation_args *args) { msg_pool->alloc = alloc; size_t msg_data_size = args->application_data_msg_data_size + MSG_OVERHEAD; if (aws_memory_pool_init( &msg_pool->application_data_pool, alloc, args->application_data_msg_count, msg_data_size)) { return AWS_OP_ERR; } size_t small_blk_data_size = args->small_block_msg_data_size + MSG_OVERHEAD; if (aws_memory_pool_init(&msg_pool->small_block_pool, alloc, args->small_block_msg_count, small_blk_data_size)) { aws_memory_pool_clean_up(&msg_pool->application_data_pool); return AWS_OP_ERR; } return AWS_OP_SUCCESS; } void aws_message_pool_clean_up(struct aws_message_pool *msg_pool) { aws_memory_pool_clean_up(&msg_pool->application_data_pool); aws_memory_pool_clean_up(&msg_pool->small_block_pool); AWS_ZERO_STRUCT(*msg_pool); } struct message_wrapper { struct aws_io_message message; struct message_pool_allocator msg_allocator; uint8_t buffer_start[1]; }; struct aws_io_message *aws_message_pool_acquire( struct aws_message_pool *msg_pool, enum aws_io_message_type message_type, size_t size_hint) { struct message_wrapper *message_wrapper = NULL; size_t max_size = 0; switch (message_type) { case AWS_IO_MESSAGE_APPLICATION_DATA: if (size_hint > msg_pool->small_block_pool.segment_size - MSG_OVERHEAD) { message_wrapper = aws_memory_pool_acquire(&msg_pool->application_data_pool); max_size = msg_pool->application_data_pool.segment_size - MSG_OVERHEAD; } else { message_wrapper = aws_memory_pool_acquire(&msg_pool->small_block_pool); max_size = msg_pool->small_block_pool.segment_size - MSG_OVERHEAD; } break; default: AWS_ASSERT(0); aws_raise_error(AWS_IO_CHANNEL_UNKNOWN_MESSAGE_TYPE); return NULL; } if (!message_wrapper) { return NULL; } message_wrapper->message.message_type = message_type; message_wrapper->message.message_tag = 0; message_wrapper->message.user_data = NULL; message_wrapper->message.copy_mark = 0; message_wrapper->message.on_completion = NULL; /* the buffer shares the allocation with the message. It's the bit at the end. */ message_wrapper->message.message_data.buffer = message_wrapper->buffer_start; message_wrapper->message.message_data.len = 0; message_wrapper->message.message_data.capacity = size_hint <= max_size ? size_hint : max_size; /* set the allocator ptr */ message_wrapper->msg_allocator.base_allocator.impl = &message_wrapper->msg_allocator; message_wrapper->msg_allocator.base_allocator.mem_acquire = s_message_pool_mem_acquire; message_wrapper->msg_allocator.base_allocator.mem_realloc = NULL; message_wrapper->msg_allocator.base_allocator.mem_release = s_message_pool_mem_release; message_wrapper->msg_allocator.msg_pool = msg_pool; message_wrapper->message.allocator = &message_wrapper->msg_allocator.base_allocator; return &message_wrapper->message; } void aws_message_pool_release(struct aws_message_pool *msg_pool, struct aws_io_message *message) { memset(message->message_data.buffer, 0, message->message_data.len); message->allocator = NULL; struct message_wrapper *wrapper = AWS_CONTAINER_OF(message, struct message_wrapper, message); switch (message->message_type) { case AWS_IO_MESSAGE_APPLICATION_DATA: if (message->message_data.capacity > msg_pool->small_block_pool.segment_size - MSG_OVERHEAD) { aws_memory_pool_release(&msg_pool->application_data_pool, wrapper); } else { aws_memory_pool_release(&msg_pool->small_block_pool, wrapper); } break; default: AWS_ASSERT(0); aws_raise_error(AWS_IO_CHANNEL_UNKNOWN_MESSAGE_TYPE); } } aws-crt-python-0.20.4+dfsg/crt/aws-c-io/source/pem.c000066400000000000000000000466331456575232400221000ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include enum aws_pem_parse_state { BEGIN, ON_DATA, END, }; static const struct aws_byte_cursor begin_header = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("-----BEGIN"); static const struct aws_byte_cursor end_header = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("-----END"); static const struct aws_byte_cursor dashes = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("-----"); int aws_sanitize_pem(struct aws_byte_buf *pem, struct aws_allocator *allocator) { if (!pem->len) { /* reject files with no PEM data */ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } struct aws_byte_buf clean_pem_buf; if (aws_byte_buf_init(&clean_pem_buf, allocator, pem->len)) { return AWS_OP_ERR; } struct aws_byte_cursor pem_cursor = aws_byte_cursor_from_buf(pem); enum aws_pem_parse_state state = BEGIN; for (size_t i = 0; i < pem_cursor.len; i++) { /* parse through the pem once */ char current = *(pem_cursor.ptr + i); switch (state) { case BEGIN: if (current == '-') { struct aws_byte_cursor compare_cursor = pem_cursor; compare_cursor.len = begin_header.len; compare_cursor.ptr += i; if (aws_byte_cursor_eq(&compare_cursor, &begin_header)) { state = ON_DATA; i--; } } break; case ON_DATA: /* start copying everything */ if (current == '-') { struct aws_byte_cursor compare_cursor = pem_cursor; compare_cursor.len = end_header.len; compare_cursor.ptr += i; if (aws_byte_cursor_eq(&compare_cursor, &end_header)) { /* Copy the end header string and start to search for the end part of a pem */ state = END; aws_byte_buf_append(&clean_pem_buf, &end_header); i += (end_header.len - 1); break; } } aws_byte_buf_append_byte_dynamic(&clean_pem_buf, (uint8_t)current); break; case END: if (current == '-') { struct aws_byte_cursor compare_cursor = pem_cursor; compare_cursor.len = dashes.len; compare_cursor.ptr += i; if (aws_byte_cursor_eq(&compare_cursor, &dashes)) { /* End part of a pem, copy the last 5 dashes and a new line, then ignore everything before next * begin header */ state = BEGIN; aws_byte_buf_append(&clean_pem_buf, &dashes); i += (dashes.len - 1); aws_byte_buf_append_byte_dynamic(&clean_pem_buf, (uint8_t)'\n'); break; } } aws_byte_buf_append_byte_dynamic(&clean_pem_buf, (uint8_t)current); break; default: break; } } if (clean_pem_buf.len == 0) { /* No valid data remains after sanitization. File might have been the wrong format */ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); goto error; } struct aws_byte_cursor clean_pem_cursor = aws_byte_cursor_from_buf(&clean_pem_buf); aws_byte_buf_reset(pem, true); aws_byte_buf_append_dynamic(pem, &clean_pem_cursor); aws_byte_buf_clean_up(&clean_pem_buf); return AWS_OP_SUCCESS; error: aws_byte_buf_clean_up(&clean_pem_buf); return AWS_OP_ERR; } /* * Possible PEM object types. openssl/pem.h used as a source of truth for * possible types. */ static struct aws_byte_cursor s_pem_type_x509_old_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("X509 CERTIFICATE"); static struct aws_byte_cursor s_pem_type_x509_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("CERTIFICATE"); static struct aws_byte_cursor s_pem_type_x509_trusted_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("TRUSTED CERTIFICATE"); static struct aws_byte_cursor s_pem_type_x509_req_old_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("NEW CERTIFICATE REQUEST"); static struct aws_byte_cursor s_pem_type_x509_req_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("CERTIFICATE REQUEST"); static struct aws_byte_cursor s_pem_type_x509_crl_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("X509 CRL"); static struct aws_byte_cursor s_pem_type_evp_pkey_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("ANY PRIVATE KEY"); static struct aws_byte_cursor s_pem_type_public_pkcs8_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("PUBLIC KEY"); static struct aws_byte_cursor s_pem_type_private_rsa_pkcs1_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("RSA PRIVATE KEY"); static struct aws_byte_cursor s_pem_type_public_rsa_pkcs1_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("RSA PUBLIC KEY"); static struct aws_byte_cursor s_pem_type_private_dsa_pkcs1_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("RSA PRIVATE KEY"); static struct aws_byte_cursor s_pem_type_public_dsa_pkcs1_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("RSA PUBLIC KEY"); static struct aws_byte_cursor s_pem_type_pkcs7_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("PKCS7"); static struct aws_byte_cursor s_pem_type_pkcs7_signed_data_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("PKCS #7 SIGNED DATA"); static struct aws_byte_cursor s_pem_type_private_pkcs8_encrypted_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("ENCRYPTED PRIVATE KEY"); static struct aws_byte_cursor s_pem_type_private_pkcs8_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("PRIVATE KEY"); static struct aws_byte_cursor s_pem_type_dh_parameters_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("DH PARAMETERS"); static struct aws_byte_cursor s_pem_type_dh_parameters_x942_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("X9.42 DH PARAMETERS"); static struct aws_byte_cursor s_pem_type_ssl_session_parameters_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("SSL SESSION PARAMETERS"); static struct aws_byte_cursor s_pem_type_dsa_parameters_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("DSA PARAMETERS"); static struct aws_byte_cursor s_pem_type_ecdsa_public_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("ECDSA PUBLIC KEY"); static struct aws_byte_cursor s_pem_type_ec_parameters_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("EC PARAMETERS"); static struct aws_byte_cursor s_pem_type_ec_private_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("EC PRIVATE KEY"); static struct aws_byte_cursor s_pem_type_parameters_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("PARAMETERS"); static struct aws_byte_cursor s_pem_type_cms_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("CMS"); static struct aws_byte_cursor s_pem_type_sm2_parameters_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("SM2 PARAMETERS"); void aws_pem_objects_clean_up(struct aws_array_list *pem_objects) { for (size_t i = 0; i < aws_array_list_length(pem_objects); ++i) { struct aws_pem_object *pem_obj_ptr = NULL; aws_array_list_get_at_ptr(pem_objects, (void **)&pem_obj_ptr, i); if (pem_obj_ptr != NULL) { aws_byte_buf_clean_up_secure(&pem_obj_ptr->data); aws_string_destroy(pem_obj_ptr->type_string); } } aws_array_list_clear(pem_objects); aws_array_list_clean_up(pem_objects); } enum aws_pem_object_type s_map_type_cur_to_type(struct aws_byte_cursor type_cur) { /* * Putting all those in a hash table might be a bit faster depending on * hashing function cost, but it complicates code considerably for a * potential small gain. PEM parsing is already slow due to multiple * allocations and should not be used in perf critical places. * So choosing dumb and easy approach over something more complicated and we * can reevaluate decision in the future. */ if (aws_byte_cursor_eq(&type_cur, &s_pem_type_x509_old_cur)) { return AWS_PEM_TYPE_X509_OLD; } else if (aws_byte_cursor_eq(&type_cur, &s_pem_type_x509_cur)) { return AWS_PEM_TYPE_X509; } else if (aws_byte_cursor_eq(&type_cur, &s_pem_type_x509_trusted_cur)) { return AWS_PEM_TYPE_X509_TRUSTED; } else if (aws_byte_cursor_eq(&type_cur, &s_pem_type_x509_req_old_cur)) { return AWS_PEM_TYPE_X509_REQ_OLD; } else if (aws_byte_cursor_eq(&type_cur, &s_pem_type_x509_req_cur)) { return AWS_PEM_TYPE_X509_REQ; } else if (aws_byte_cursor_eq(&type_cur, &s_pem_type_x509_crl_cur)) { return AWS_PEM_TYPE_X509_CRL; } else if (aws_byte_cursor_eq(&type_cur, &s_pem_type_evp_pkey_cur)) { return AWS_PEM_TYPE_EVP_PKEY; } else if (aws_byte_cursor_eq(&type_cur, &s_pem_type_public_pkcs8_cur)) { return AWS_PEM_TYPE_PUBLIC_PKCS8; } else if (aws_byte_cursor_eq(&type_cur, &s_pem_type_private_rsa_pkcs1_cur)) { return AWS_PEM_TYPE_PRIVATE_RSA_PKCS1; } else if (aws_byte_cursor_eq(&type_cur, &s_pem_type_public_rsa_pkcs1_cur)) { return AWS_PEM_TYPE_PUBLIC_RSA_PKCS1; } else if (aws_byte_cursor_eq(&type_cur, &s_pem_type_private_dsa_pkcs1_cur)) { return AWS_PEM_TYPE_PRIVATE_DSA_PKCS1; } else if (aws_byte_cursor_eq(&type_cur, &s_pem_type_public_dsa_pkcs1_cur)) { return AWS_PEM_TYPE_PUBLIC_DSA_PKCS1; } else if (aws_byte_cursor_eq(&type_cur, &s_pem_type_pkcs7_cur)) { return AWS_PEM_TYPE_PKCS7; } else if (aws_byte_cursor_eq(&type_cur, &s_pem_type_pkcs7_signed_data_cur)) { return AWS_PEM_TYPE_PKCS7_SIGNED_DATA; } else if (aws_byte_cursor_eq(&type_cur, &s_pem_type_private_pkcs8_encrypted_cur)) { return AWS_PEM_TYPE_PRIVATE_PKCS8_ENCRYPTED; } else if (aws_byte_cursor_eq(&type_cur, &s_pem_type_private_pkcs8_cur)) { return AWS_PEM_TYPE_PRIVATE_PKCS8; } else if (aws_byte_cursor_eq(&type_cur, &s_pem_type_dh_parameters_cur)) { return AWS_PEM_TYPE_DH_PARAMETERS; } else if (aws_byte_cursor_eq(&type_cur, &s_pem_type_dh_parameters_x942_cur)) { return AWS_PEM_TYPE_DH_PARAMETERS_X942; } else if (aws_byte_cursor_eq(&type_cur, &s_pem_type_ssl_session_parameters_cur)) { return AWS_PEM_TYPE_SSL_SESSION_PARAMETERS; } else if (aws_byte_cursor_eq(&type_cur, &s_pem_type_dsa_parameters_cur)) { return AWS_PEM_TYPE_DSA_PARAMETERS; } else if (aws_byte_cursor_eq(&type_cur, &s_pem_type_ecdsa_public_cur)) { return AWS_PEM_TYPE_ECDSA_PUBLIC; } else if (aws_byte_cursor_eq(&type_cur, &s_pem_type_ec_parameters_cur)) { return AWS_PEM_TYPE_EC_PARAMETERS; } else if (aws_byte_cursor_eq(&type_cur, &s_pem_type_ec_private_cur)) { return AWS_PEM_TYPE_EC_PRIVATE; } else if (aws_byte_cursor_eq(&type_cur, &s_pem_type_parameters_cur)) { return AWS_PEM_TYPE_PARAMETERS; } else if (aws_byte_cursor_eq(&type_cur, &s_pem_type_cms_cur)) { return AWS_PEM_TYPE_CMS; } else if (aws_byte_cursor_eq(&type_cur, &s_pem_type_sm2_parameters_cur)) { return AWS_PEM_TYPE_SM2_PARAMETERS; } return AWS_PEM_TYPE_UNKNOWN; } static struct aws_byte_cursor s_begin_header_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("-----BEGIN"); static struct aws_byte_cursor s_end_header_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("-----END"); static struct aws_byte_cursor s_delim_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("-----"); int s_extract_header_type_cur(struct aws_byte_cursor cur, struct aws_byte_cursor *out) { if (!aws_byte_cursor_starts_with(&cur, &s_begin_header_cur)) { AWS_LOGF_ERROR(AWS_LS_IO_PEM, "Invalid PEM buffer: invalid begin token"); return aws_raise_error(AWS_ERROR_PEM_MALFORMED); } aws_byte_cursor_advance(&cur, s_begin_header_cur.len); aws_byte_cursor_advance(&cur, 1); // space after begin struct aws_byte_cursor type_cur = aws_byte_cursor_advance(&cur, cur.len - s_delim_cur.len); if (!aws_byte_cursor_eq(&cur, &s_delim_cur)) { AWS_LOGF_ERROR(AWS_LS_IO_PEM, "Invalid PEM buffer: invalid end token"); return aws_raise_error(AWS_ERROR_PEM_MALFORMED); } *out = type_cur; return AWS_OP_SUCCESS; } static int s_convert_pem_to_raw_base64( struct aws_allocator *allocator, struct aws_byte_cursor pem, struct aws_array_list *pem_objects) { struct aws_array_list split_buffers; if (aws_array_list_init_dynamic(&split_buffers, allocator, 16, sizeof(struct aws_byte_cursor))) { return AWS_OP_ERR; } if (aws_byte_cursor_split_on_char(&pem, '\n', &split_buffers)) { aws_array_list_clean_up(&split_buffers); AWS_LOGF_ERROR(AWS_LS_IO_PEM, "Invalid PEM buffer: failed to split on newline"); return aws_raise_error(AWS_ERROR_PEM_MALFORMED); } enum aws_pem_parse_state state = BEGIN; bool on_length_calc = true; size_t current_obj_len = 0; size_t current_obj_start_index = 0; struct aws_byte_buf current_obj_buf; AWS_ZERO_STRUCT(current_obj_buf); struct aws_byte_cursor current_obj_type_cur; AWS_ZERO_STRUCT(current_obj_type_cur); enum aws_pem_object_type current_obj_type = AWS_PEM_TYPE_UNKNOWN; size_t split_count = aws_array_list_length(&split_buffers); size_t i = 0; while (i < split_count) { struct aws_byte_cursor *line_cur_ptr = NULL; int error = aws_array_list_get_at_ptr(&split_buffers, (void **)&line_cur_ptr, i); /* should never fail as we control array size and how we index into list */ AWS_FATAL_ASSERT(error == AWS_OP_SUCCESS); /* Burn off the padding in the buffer first. * Worst case we'll only have to do this once per line in the buffer. */ *line_cur_ptr = aws_byte_cursor_left_trim_pred(line_cur_ptr, aws_isspace); /* And make sure remove any space from right side */ *line_cur_ptr = aws_byte_cursor_right_trim_pred(line_cur_ptr, aws_isspace); switch (state) { case BEGIN: if (aws_byte_cursor_starts_with(line_cur_ptr, &s_begin_header_cur)) { if (s_extract_header_type_cur(*line_cur_ptr, ¤t_obj_type_cur)) { goto on_end_of_loop; } current_obj_type = s_map_type_cur_to_type(current_obj_type_cur); current_obj_start_index = i + 1; state = ON_DATA; } ++i; break; /* this loops through the lines containing data twice. First to figure out the length, a second * time to actually copy the data. */ case ON_DATA: /* Found end tag. */ if (aws_byte_cursor_starts_with(line_cur_ptr, &s_end_header_cur)) { if (on_length_calc) { on_length_calc = false; state = ON_DATA; i = current_obj_start_index; aws_byte_buf_init(¤t_obj_buf, allocator, current_obj_len); } else { struct aws_pem_object pem_object = { .data = current_obj_buf, .type_string = aws_string_new_from_cursor(allocator, ¤t_obj_type_cur), .type = current_obj_type, }; if (aws_array_list_push_back(pem_objects, &pem_object)) { goto on_end_of_loop; } state = BEGIN; on_length_calc = true; current_obj_len = 0; ++i; AWS_ZERO_STRUCT(current_obj_buf); AWS_ZERO_STRUCT(current_obj_type_cur); current_obj_type = AWS_PEM_TYPE_UNKNOWN; } /* actually on a line with data in it. */ } else { if (on_length_calc) { current_obj_len += line_cur_ptr->len; } else { if (aws_byte_buf_append(¤t_obj_buf, line_cur_ptr)) { goto on_end_of_loop; } } ++i; } break; default: AWS_FATAL_ASSERT(false); } } /* * Note: this function only hard error if nothing can be parsed out of file. * Otherwise it succeeds and returns whatever was parsed successfully. */ on_end_of_loop: aws_array_list_clean_up(&split_buffers); aws_byte_buf_clean_up_secure(¤t_obj_buf); if (state == BEGIN && aws_array_list_length(pem_objects) > 0) { return AWS_OP_SUCCESS; } AWS_LOGF_ERROR(AWS_LS_IO_PEM, "Invalid PEM buffer."); aws_pem_objects_clean_up(pem_objects); return aws_raise_error(AWS_ERROR_PEM_MALFORMED); } int aws_pem_objects_init_from_file_contents( struct aws_array_list *pem_objects, struct aws_allocator *allocator, struct aws_byte_cursor pem_cursor) { AWS_PRECONDITION(allocator); AWS_PRECONDITION(pem_objects != NULL); /* Init empty array list, ideally, the PEM should only has one key included. */ if (aws_array_list_init_dynamic(pem_objects, allocator, 1, sizeof(struct aws_pem_object))) { return AWS_OP_ERR; } if (s_convert_pem_to_raw_base64(allocator, pem_cursor, pem_objects)) { goto on_error; } for (size_t i = 0; i < aws_array_list_length(pem_objects); ++i) { struct aws_pem_object *pem_obj_ptr = NULL; aws_array_list_get_at_ptr(pem_objects, (void **)&pem_obj_ptr, i); struct aws_byte_cursor byte_cur = aws_byte_cursor_from_buf(&pem_obj_ptr->data); size_t decoded_len = 0; if (aws_base64_compute_decoded_len(&byte_cur, &decoded_len)) { AWS_LOGF_ERROR(AWS_LS_IO_PEM, "Failed to get length for decoded base64 pem object."); aws_raise_error(AWS_ERROR_PEM_MALFORMED); goto on_error; } struct aws_byte_buf decoded_buffer; aws_byte_buf_init(&decoded_buffer, allocator, decoded_len); if (aws_base64_decode(&byte_cur, &decoded_buffer)) { AWS_LOGF_ERROR(AWS_LS_IO_PEM, "Failed to base 64 decode pem object."); aws_raise_error(AWS_ERROR_PEM_MALFORMED); aws_byte_buf_clean_up_secure(&decoded_buffer); goto on_error; } aws_byte_buf_clean_up_secure(&pem_obj_ptr->data); pem_obj_ptr->data = decoded_buffer; } return AWS_OP_SUCCESS; on_error: aws_pem_objects_clean_up(pem_objects); return AWS_OP_ERR; } int aws_pem_objects_init_from_file_path( struct aws_array_list *pem_objects, struct aws_allocator *allocator, const char *filename) { struct aws_byte_buf raw_file_buffer; if (aws_byte_buf_init_from_file(&raw_file_buffer, allocator, filename)) { AWS_LOGF_ERROR(AWS_LS_IO_PEM, "Failed to read file %s.", filename); return AWS_OP_ERR; } AWS_ASSERT(raw_file_buffer.buffer); struct aws_byte_cursor file_cursor = aws_byte_cursor_from_buf(&raw_file_buffer); if (aws_pem_objects_init_from_file_contents(pem_objects, allocator, file_cursor)) { aws_byte_buf_clean_up_secure(&raw_file_buffer); AWS_LOGF_ERROR(AWS_LS_IO_PEM, "Failed to decode PEM file %s.", filename); return AWS_OP_ERR; } aws_byte_buf_clean_up_secure(&raw_file_buffer); return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-io/source/pkcs11/000077500000000000000000000000001456575232400222415ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-io/source/pkcs11/v2.40/000077500000000000000000000000001456575232400230125ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-io/source/pkcs11/v2.40/.clang-format000066400000000000000000000000241456575232400253610ustar00rootroot00000000000000DisableFormat: true aws-crt-python-0.20.4+dfsg/crt/aws-c-io/source/pkcs11/v2.40/pkcs11.h000066400000000000000000001711231456575232400242720ustar00rootroot00000000000000/* This file is in the Public Domain. */ /* This file is based on: https://github.com/latchset/pkcs11-headers/blob/main/public-domain/2.40/pkcs11.h */ /* This file has been modified from its original version by Amazon: * - removed final semicolon from #define ULONGDEF, to avoid "extra-semi" warning * - removed final semicolon from #define STRUCTDEF, to avoid "extra-semi" warning */ #ifndef _PD_PKCS11_ #define _PD_PKCS11_ #define CRYPTOKI_VERSION_MAJOR 2 #define CRYPTOKI_VERSION_MINOR 40 #define CRYPTOKI_VERSION_AMENDMENT 0 /* Basic types */ typedef unsigned char CK_BBOOL; typedef unsigned char CK_BYTE; typedef unsigned char CK_CHAR; typedef unsigned char CK_UTF8CHAR; typedef unsigned long int CK_ULONG; typedef CK_BBOOL * CK_BBOOL_PTR; typedef CK_BYTE * CK_BYTE_PTR; typedef CK_CHAR * CK_CHAR_PTR; typedef CK_UTF8CHAR * CK_UTF8CHAR_PTR; typedef CK_ULONG * CK_ULONG_PTR; /* Basic defines */ #define NULL_PTR ((void *)0) typedef void * CK_VOID_PTR; typedef void ** CK_VOID_PTR_PTR; #define CK_EFFECTIVELY_INFINITE 0UL #define CK_UNAVAILABLE_INFORMATION ~0UL #define CK_INVALID_HANDLE 0UL #define CK_TRUE 1 #define CK_FALSE 0 /* CK_ types in alphabetical order */ #define ULONGDEF(__name__) \ typedef CK_ULONG __name__; \ typedef __name__ * __name__ ## _PTR ULONGDEF(CK_ATTRIBUTE_TYPE); ULONGDEF(CK_CERTIFICATE_CATEGORY); ULONGDEF(CK_CERTIFICATE_TYPE); ULONGDEF(CK_EC_KDF_TYPE); ULONGDEF(CK_EXTRACT_PARAMS); ULONGDEF(CK_FLAGS); ULONGDEF(CK_HW_FEATURE_TYPE); ULONGDEF(CK_JAVA_MIDP_SECURITY_DOMAIN); ULONGDEF(CK_KEY_TYPE); ULONGDEF(CK_MAC_GENERAL_PARAMS); ULONGDEF(CK_MECHANISM_TYPE); ULONGDEF(CK_NOTIFICATION); ULONGDEF(CK_OBJECT_CLASS); ULONGDEF(CK_OBJECT_HANDLE); ULONGDEF(CK_OTP_PARAM_TYPE); ULONGDEF(CK_PKCS5_PBKD2_PSEUDO_RANDOM_FUNCTION_TYPE); ULONGDEF(CK_PKCS5_PBKDF2_SALT_SOURCE_TYPE); ULONGDEF(CK_RC2_PARAMS); ULONGDEF(CK_RSA_PKCS_MGF_TYPE); ULONGDEF(CK_RSA_PKCS_OAEP_SOURCE_TYPE); ULONGDEF(CK_RV); ULONGDEF(CK_SESSION_HANDLE); ULONGDEF(CK_SLOT_ID); ULONGDEF(CK_STATE); ULONGDEF(CK_USER_TYPE); ULONGDEF(CK_X9_42_DH_KDF_TYPE); /* domain specific values and constants */ /* CK (certificate) */ #define CK_CERTIFICATE_CATEGORY_UNSPECIFIED 0UL #define CK_CERTIFICATE_CATEGORY_TOKEN_USER 1UL #define CK_CERTIFICATE_CATEGORY_AUTHORITY 2UL #define CK_CERTIFICATE_CATEGORY_OTHER_ENTITY 3UL /* CK (OTP) */ #define CK_OTP_VALUE 0UL #define CK_OTP_PIN 1UL #define CK_OTP_CHALLENGE 2UL #define CK_OTP_TIME 3UL #define CK_OTP_COUNTER 4UL #define CK_OTP_FLAGS 5UL #define CK_OTP_OUTPUT_LENGTH 6UL #define CK_OTP_OUTPUT_FORMAT 7UL /* CK (OTP format) */ #define CK_OTP_FORMAT_DECIMAL 0UL #define CK_OTP_FORMAT_HEXADECIMAL 1UL #define CK_OTP_FORMAT_ALPHANUMERIC 2UL #define CK_OTP_FORMAT_BINARY 3UL /* CK (OTP requirement) */ #define CK_OTP_PARAM_IGNORED 0UL #define CK_OTP_PARAM_OPTIONAL 1UL #define CK_OTP_PARAM_MANDATORY 2UL /* CK (security) */ #define CK_SECURITY_DOMAIN_UNSPECIFIED 0UL #define CK_SECURITY_DOMAIN_MANUFACTURER 1UL #define CK_SECURITY_DOMAIN_OPERATOR 2UL #define CK_SECURITY_DOMAIN_THIRD_PARTY 3UL /* CK (SP800 DKM) */ #define CK_SP800_108_DKM_LENGTH_SUM_OF_KEYS 0x00000001UL #define CK_SP800_108_DKM_LENGTH_SUM_OF_SEGMENTS 0x00000002UL /* CKA */ #define CKA_CLASS 0x00000000UL #define CKA_TOKEN 0x00000001UL #define CKA_PRIVATE 0x00000002UL #define CKA_LABEL 0x00000003UL #define CKA_APPLICATION 0x00000010UL #define CKA_VALUE 0x00000011UL #define CKA_OBJECT_ID 0x00000012UL #define CKA_CERTIFICATE_TYPE 0x00000080UL #define CKA_ISSUER 0x00000081UL #define CKA_SERIAL_NUMBER 0x00000082UL #define CKA_AC_ISSUER 0x00000083UL #define CKA_OWNER 0x00000084UL #define CKA_ATTR_TYPES 0x00000085UL #define CKA_TRUSTED 0x00000086UL #define CKA_CERTIFICATE_CATEGORY 0x00000087UL #define CKA_JAVA_MIDP_SECURITY_DOMAIN 0x00000088UL #define CKA_URL 0x00000089UL #define CKA_HASH_OF_SUBJECT_PUBLIC_KEY 0x0000008AUL #define CKA_HASH_OF_ISSUER_PUBLIC_KEY 0x0000008BUL #define CKA_NAME_HASH_ALGORITHM 0x0000008CUL #define CKA_CHECK_VALUE 0x00000090UL #define CKA_KEY_TYPE 0x00000100UL #define CKA_SUBJECT 0x00000101UL #define CKA_ID 0x00000102UL #define CKA_SENSITIVE 0x00000103UL #define CKA_ENCRYPT 0x00000104UL #define CKA_DECRYPT 0x00000105UL #define CKA_WRAP 0x00000106UL #define CKA_UNWRAP 0x00000107UL #define CKA_SIGN 0x00000108UL #define CKA_SIGN_RECOVER 0x00000109UL #define CKA_VERIFY 0x0000010AUL #define CKA_VERIFY_RECOVER 0x0000010BUL #define CKA_DERIVE 0x0000010CUL #define CKA_START_DATE 0x00000110UL #define CKA_END_DATE 0x00000111UL #define CKA_MODULUS 0x00000120UL #define CKA_MODULUS_BITS 0x00000121UL #define CKA_PUBLIC_EXPONENT 0x00000122UL #define CKA_PRIVATE_EXPONENT 0x00000123UL #define CKA_PRIME_1 0x00000124UL #define CKA_PRIME_2 0x00000125UL #define CKA_EXPONENT_1 0x00000126UL #define CKA_EXPONENT_2 0x00000127UL #define CKA_COEFFICIENT 0x00000128UL #define CKA_PUBLIC_KEY_INFO 0x00000129UL #define CKA_PRIME 0x00000130UL #define CKA_SUBPRIME 0x00000131UL #define CKA_BASE 0x00000132UL #define CKA_PRIME_BITS 0x00000133UL #define CKA_SUBPRIME_BITS 0x00000134UL #define CKA_SUB_PRIME_BITS 0x00000134UL #define CKA_VALUE_BITS 0x00000160UL #define CKA_VALUE_LEN 0x00000161UL #define CKA_EXTRACTABLE 0x00000162UL #define CKA_LOCAL 0x00000163UL #define CKA_NEVER_EXTRACTABLE 0x00000164UL #define CKA_ALWAYS_SENSITIVE 0x00000165UL #define CKA_KEY_GEN_MECHANISM 0x00000166UL #define CKA_MODIFIABLE 0x00000170UL #define CKA_COPYABLE 0x00000171UL #define CKA_DESTROYABLE 0x00000172UL #define CKA_EC_PARAMS 0x00000180UL #define CKA_EC_POINT 0x00000181UL #define CKA_ALWAYS_AUTHENTICATE 0x00000202UL #define CKA_WRAP_WITH_TRUSTED 0x00000210UL #define CKA_OTP_FORMAT 0x00000220UL #define CKA_OTP_LENGTH 0x00000221UL #define CKA_OTP_TIME_INTERVAL 0x00000222UL #define CKA_OTP_USER_FRIENDLY_MODE 0x00000223UL #define CKA_OTP_CHALLENGE_REQUIREMENT 0x00000224UL #define CKA_OTP_TIME_REQUIREMENT 0x00000225UL #define CKA_OTP_COUNTER_REQUIREMENT 0x00000226UL #define CKA_OTP_PIN_REQUIREMENT 0x00000227UL #define CKA_OTP_COUNTER 0x0000022EUL #define CKA_OTP_TIME 0x0000022FUL #define CKA_OTP_USER_IDENTIFIER 0x0000022AUL #define CKA_OTP_SERVICE_IDENTIFIER 0x0000022BUL #define CKA_OTP_SERVICE_LOGO 0x0000022CUL #define CKA_OTP_SERVICE_LOGO_TYPE 0x0000022DUL #define CKA_GOSTR3410_PARAMS 0x00000250UL #define CKA_GOSTR3411_PARAMS 0x00000251UL #define CKA_GOST28147_PARAMS 0x00000252UL #define CKA_HW_FEATURE_TYPE 0x00000300UL #define CKA_RESET_ON_INIT 0x00000301UL #define CKA_HAS_RESET 0x00000302UL #define CKA_PIXEL_X 0x00000400UL #define CKA_PIXEL_Y 0x00000401UL #define CKA_RESOLUTION 0x00000402UL #define CKA_CHAR_ROWS 0x00000403UL #define CKA_CHAR_COLUMNS 0x00000404UL #define CKA_COLOR 0x00000405UL #define CKA_BITS_PER_PIXEL 0x00000406UL #define CKA_CHAR_SETS 0x00000480UL #define CKA_ENCODING_METHODS 0x00000481UL #define CKA_MIME_TYPES 0x00000482UL #define CKA_MECHANISM_TYPE 0x00000500UL #define CKA_REQUIRED_CMS_ATTRIBUTES 0x00000501UL #define CKA_DEFAULT_CMS_ATTRIBUTES 0x00000502UL #define CKA_SUPPORTED_CMS_ATTRIBUTES 0x00000503UL #define CKA_VENDOR_DEFINED 0x80000000UL /* Array attributes */ #define CKA_WRAP_TEMPLATE 0x40000211UL #define CKA_UNWRAP_TEMPLATE 0x40000212UL #define CKA_DERIVE_TEMPLATE 0x40000213UL #define CKA_ALLOWED_MECHANISMS 0x40000600UL /* Deprecated */ #ifdef PKCS11_DEPRECATED #define CKA_ECDSA_PARAMS 0x00000180UL #define CKA_SECONDARY_AUTH 0x00000200UL #define CKA_AUTH_PIN_FLAGS 0x00000201UL #endif /* CKC */ #define CKC_X_509 0x00000000UL #define CKC_X_509_ATTR_CERT 0x00000001UL #define CKC_WTLS 0x00000002UL #define CKC_VENDOR_DEFINED 0x80000000UL /* CKD */ #define CKD_NULL 0x00000001UL #define CKD_SHA1_KDF 0x00000002UL #define CKD_SHA1_KDF_ASN1 0x00000003UL #define CKD_SHA1_KDF_CONCATENATE 0x00000004UL #define CKD_SHA224_KDF 0x00000005UL #define CKD_SHA256_KDF 0x00000006UL #define CKD_SHA384_KDF 0x00000007UL #define CKD_SHA512_KDF 0x00000008UL #define CKD_CPDIVERSIFY_KDF 0x00000009UL /* CFK (array attributes) */ #define CKF_ARRAY_ATTRIBUTE 0x40000000UL /* CKF (capabilities) */ #define CKF_LIBRARY_CANT_CREATE_OS_THREADS 0x00000001UL #define CKF_OS_LOCKING_OK 0x00000002UL /* CKF (mechanism) */ #define CKF_HW 0x00000001UL #define CKF_ENCRYPT 0x00000100UL #define CKF_DECRYPT 0x00000200UL #define CKF_DIGEST 0x00000400UL #define CKF_SIGN 0x00000800UL #define CKF_SIGN_RECOVER 0x00001000UL #define CKF_VERIFY 0x00002000UL #define CKF_VERIFY_RECOVER 0x00004000UL #define CKF_GENERATE 0x00008000UL #define CKF_GENERATE_KEY_PAIR 0x00010000UL #define CKF_WRAP 0x00020000UL #define CKF_UNWRAP 0x00040000UL #define CKF_DERIVE 0x00080000UL #define CKF_EC_F_P 0x00100000UL #define CKF_EC_F_2M 0x00200000UL #define CKF_EC_ECPARAMETERS 0x00400000UL #define CKF_EC_NAMEDCURVE 0x00800000U #define CKF_EC_UNCOMPRESS 0x01000000UL #define CKF_EC_COMPRESS 0x02000000UL #define CKF_EXTENSION 0x80000000UL /* CKF (OTP) */ #define CKF_NEXT_OTP 0x00000001UL #define CKF_EXCLUDE_TIME 0x00000002UL #define CKF_EXCLUDE_COUNTER 0x00000004UL #define CKF_EXCLUDE_CHALLENGE 0x00000008UL #define CKF_EXCLUDE_PIN 0x00000010UL #define CKF_USER_FRIENDLY_OTP 0x00000020UL /* CKF (paramters to functions) */ #define CKF_DONT_BLOCK 1 /* CKF (session) */ #define CKF_RW_SESSION 0x00000002UL #define CKF_SERIAL_SESSION 0x00000004UL /* CFK (slot) */ #define CKF_TOKEN_PRESENT 0x00000001UL #define CKF_REMOVABLE_DEVICE 0x00000002UL #define CKF_HW_SLOT 0x00000004UL /* CKF (token) */ #define CKF_RNG 0x00000001UL #define CKF_WRITE_PROTECTED 0x00000002UL #define CKF_LOGIN_REQUIRED 0x00000004UL #define CKF_USER_PIN_INITIALIZED 0x00000008UL #define CKF_RESTORE_KEY_NOT_NEEDED 0x00000020UL #define CKF_CLOCK_ON_TOKEN 0x00000040UL #define CKF_PROTECTED_AUTHENTICATION_PATH 0x00000100UL #define CKF_DUAL_CRYPTO_OPERATIONS 0x00000200UL #define CKF_TOKEN_INITIALIZED 0x00000400UL #define CKF_SECONDARY_AUTHENTICATION 0x00000800UL #define CKF_USER_PIN_COUNT_LOW 0x00010000UL #define CKF_USER_PIN_FINAL_TRY 0x00020000UL #define CKF_USER_PIN_LOCKED 0x00040000UL #define CKF_USER_PIN_TO_BE_CHANGED 0x00080000UL #define CKF_SO_PIN_COUNT_LOW 0x00100000UL #define CKF_SO_PIN_FINAL_TRY 0x00200000UL #define CKF_SO_PIN_LOCKED 0x00400000UL #define CKF_SO_PIN_TO_BE_CHANGED 0x00800000UL #define CKF_ERROR_STATE 0x01000000UL /* CKG (MFG) */ #define CKG_MGF1_SHA1 0x00000001UL #define CKG_MGF1_SHA256 0x00000002UL #define CKG_MGF1_SHA384 0x00000003UL #define CKG_MGF1_SHA512 0x00000004UL #define CKG_MGF1_SHA224 0x00000005UL /* CKH */ #define CKH_MONOTONIC_COUNTER 0x00000001UL #define CKH_CLOCK 0x00000002UL #define CKH_USER_INTERFACE 0x00000003UL #define CKH_VENDOR_DEFINED 0x80000000UL /* CKK */ #define CKK_RSA 0x00000000UL #define CKK_DSA 0x00000001UL #define CKK_DH 0x00000002UL #define CKK_EC 0x00000003UL #define CKK_X9_42_DH 0x00000004UL #define CKK_KEA 0x00000005UL #define CKK_GENERIC_SECRET 0x00000010UL #define CKK_RC2 0x00000011UL #define CKK_RC4 0x00000012UL #define CKK_DES 0x00000013UL #define CKK_DES2 0x00000014UL #define CKK_DES3 0x00000015UL #define CKK_CAST 0x00000016UL #define CKK_CAST3 0x00000017UL #define CKK_CAST128 0x00000018UL #define CKK_RC5 0x00000019UL #define CKK_IDEA 0x0000001AUL #define CKK_SKIPJACK 0x0000001BUL #define CKK_BATON 0x0000001CUL #define CKK_JUNIPER 0x0000001DUL #define CKK_CDMF 0x0000001EUL #define CKK_AES 0x0000001FUL #define CKK_BLOWFISH 0x00000020UL #define CKK_TWOFISH 0x00000021UL #define CKK_SECURID 0x00000022UL #define CKK_HOTP 0x00000023UL #define CKK_ACTI 0x00000024UL #define CKK_CAMELLIA 0x00000025UL #define CKK_ARIA 0x00000026UL #define CKK_MD5_HMAC 0x00000027UL #define CKK_SHA_1_HMAC 0x00000028UL #define CKK_RIPEMD128_HMAC 0x00000029UL #define CKK_RIPEMD160_HMAC 0x0000002AUL #define CKK_SHA256_HMAC 0x0000002BUL #define CKK_SHA384_HMAC 0x0000002CUL #define CKK_SHA512_HMAC 0x0000002DUL #define CKK_SHA224_HMAC 0x0000002EUL #define CKK_SEED 0x0000002FUL #define CKK_GOSTR3410 0x00000030UL #define CKK_GOSTR3411 0x00000031UL #define CKK_GOST28147 0x00000032UL #define CKK_VENDOR_DEFINED 0x80000000UL /* Deprecated */ #ifdef PKCS11_DEPRECATED #define CKK_ECDSA 0x00000003UL #define CKK_CAST5 0x00000018UL #endif /* CKM */ #define CKM_RSA_PKCS_KEY_PAIR_GEN 0x00000000UL #define CKM_RSA_PKCS 0x00000001UL #define CKM_RSA_9796 0x00000002UL #define CKM_RSA_X_509 0x00000003UL #define CKM_MD2_RSA_PKCS 0x00000004UL #define CKM_MD5_RSA_PKCS 0x00000005UL #define CKM_SHA1_RSA_PKCS 0x00000006UL #define CKM_RIPEMD128_RSA_PKCS 0x00000007UL #define CKM_RIPEMD160_RSA_PKCS 0x00000008UL #define CKM_RSA_PKCS_OAEP 0x00000009UL #define CKM_RSA_X9_31_KEY_PAIR_GEN 0x0000000AUL #define CKM_RSA_X9_31 0x0000000BUL #define CKM_SHA1_RSA_X9_31 0x0000000CUL #define CKM_RSA_PKCS_PSS 0x0000000DUL #define CKM_SHA1_RSA_PKCS_PSS 0x0000000EUL #define CKM_DSA_KEY_PAIR_GEN 0x00000010UL #define CKM_DSA 0x00000011UL #define CKM_DSA_SHA1 0x00000012UL #define CKM_DSA_SHA224 0x00000013UL #define CKM_DSA_SHA256 0x00000014UL #define CKM_DSA_SHA384 0x00000015UL #define CKM_DSA_SHA512 0x00000016UL #define CKM_DH_PKCS_KEY_PAIR_GEN 0x00000020UL #define CKM_DH_PKCS_DERIVE 0x00000021UL #define CKM_X9_42_DH_KEY_PAIR_GEN 0x00000030UL #define CKM_X9_42_DH_DERIVE 0x00000031UL #define CKM_X9_42_DH_HYBRID_DERIVE 0x00000032UL #define CKM_X9_42_MQV_DERIVE 0x00000033UL #define CKM_SHA256_RSA_PKCS 0x00000040UL #define CKM_SHA384_RSA_PKCS 0x00000041UL #define CKM_SHA512_RSA_PKCS 0x00000042UL #define CKM_SHA256_RSA_PKCS_PSS 0x00000043UL #define CKM_SHA384_RSA_PKCS_PSS 0x00000044UL #define CKM_SHA512_RSA_PKCS_PSS 0x00000045UL #define CKM_SHA224_RSA_PKCS 0x00000046UL #define CKM_SHA224_RSA_PKCS_PSS 0x00000047UL #define CKM_SHA512_224 0x00000048UL #define CKM_SHA512_224_HMAC 0x00000049UL #define CKM_SHA512_224_HMAC_GENERAL 0x0000004AUL #define CKM_SHA512_224_KEY_DERIVATION 0x0000004BUL #define CKM_SHA512_256 0x0000004CUL #define CKM_SHA512_256_HMAC 0x0000004DUL #define CKM_SHA512_256_HMAC_GENERAL 0x0000004EUL #define CKM_SHA512_256_KEY_DERIVATION 0x0000004FUL #define CKM_SHA512_T 0x00000050UL #define CKM_SHA512_T_HMAC 0x00000051UL #define CKM_SHA512_T_HMAC_GENERAL 0x00000052UL #define CKM_SHA512_T_KEY_DERIVATION 0x00000053UL #define CKM_RC2_KEY_GEN 0x00000100UL #define CKM_RC2_ECB 0x00000101UL #define CKM_RC2_CBC 0x00000102UL #define CKM_RC2_MAC 0x00000103UL #define CKM_RC2_MAC_GENERAL 0x00000104UL #define CKM_RC2_CBC_PAD 0x00000105UL #define CKM_RC4_KEY_GEN 0x00000110UL #define CKM_RC4 0x00000111UL #define CKM_DES_KEY_GEN 0x00000120UL #define CKM_DES_ECB 0x00000121UL #define CKM_DES_CBC 0x00000122UL #define CKM_DES_MAC 0x00000123UL #define CKM_DES_MAC_GENERAL 0x00000124UL #define CKM_DES_CBC_PAD 0x00000125UL #define CKM_DES2_KEY_GEN 0x00000130UL #define CKM_DES3_KEY_GEN 0x00000131UL #define CKM_DES3_ECB 0x00000132UL #define CKM_DES3_CBC 0x00000133UL #define CKM_DES3_MAC 0x00000134UL #define CKM_DES3_MAC_GENERAL 0x00000135UL #define CKM_DES3_CBC_PAD 0x00000136UL #define CKM_DES3_CMAC_GENERAL 0x00000137UL #define CKM_DES3_CMAC 0x00000138UL #define CKM_CDMF_KEY_GEN 0x00000140UL #define CKM_CDMF_ECB 0x00000141UL #define CKM_CDMF_CBC 0x00000142UL #define CKM_CDMF_MAC 0x00000143UL #define CKM_CDMF_MAC_GENERAL 0x00000144UL #define CKM_CDMF_CBC_PAD 0x00000145UL #define CKM_DES_OFB64 0x00000150UL #define CKM_DES_OFB8 0x00000151UL #define CKM_DES_CFB64 0x00000152UL #define CKM_DES_CFB8 0x00000153UL #define CKM_MD2 0x00000200UL #define CKM_MD2_HMAC 0x00000201UL #define CKM_MD2_HMAC_GENERAL 0x00000202UL #define CKM_MD5 0x00000210UL #define CKM_MD5_HMAC 0x00000211UL #define CKM_MD5_HMAC_GENERAL 0x00000212UL #define CKM_SHA_1 0x00000220UL #define CKM_SHA_1_HMAC 0x00000221UL #define CKM_SHA_1_HMAC_GENERAL 0x00000222UL #define CKM_RIPEMD128 0x00000230UL #define CKM_RIPEMD128_HMAC 0x00000231UL #define CKM_RIPEMD128_HMAC_GENERAL 0x00000232UL #define CKM_RIPEMD160 0x00000240UL #define CKM_RIPEMD160_HMAC 0x00000241UL #define CKM_RIPEMD160_HMAC_GENERAL 0x00000242UL #define CKM_SHA256 0x00000250UL #define CKM_SHA256_HMAC 0x00000251UL #define CKM_SHA256_HMAC_GENERAL 0x00000252UL #define CKM_SHA224 0x00000255UL #define CKM_SHA224_HMAC 0x00000256UL #define CKM_SHA224_HMAC_GENERAL 0x00000257UL #define CKM_SHA384 0x00000260UL #define CKM_SHA384_HMAC 0x00000261UL #define CKM_SHA384_HMAC_GENERAL 0x00000262UL #define CKM_SHA512 0x00000270UL #define CKM_SHA512_HMAC 0x00000271UL #define CKM_SHA512_HMAC_GENERAL 0x00000272UL #define CKM_SECURID_KEY_GEN 0x00000280UL #define CKM_SECURID 0x00000282UL #define CKM_HOTP_KEY_GEN 0x00000290UL #define CKM_HOTP 0x00000291UL #define CKM_ACTI 0x000002A0UL #define CKM_ACTI_KEY_GEN 0x000002A1UL #define CKM_CAST_KEY_GEN 0x00000300UL #define CKM_CAST_ECB 0x00000301UL #define CKM_CAST_CBC 0x00000302UL #define CKM_CAST_MAC 0x00000303UL #define CKM_CAST_MAC_GENERAL 0x00000304UL #define CKM_CAST_CBC_PAD 0x00000305UL #define CKM_CAST3_KEY_GEN 0x00000310UL #define CKM_CAST3_ECB 0x00000311UL #define CKM_CAST3_CBC 0x00000312UL #define CKM_CAST3_MAC 0x00000313UL #define CKM_CAST3_MAC_GENERAL 0x00000314UL #define CKM_CAST3_CBC_PAD 0x00000315UL #define CKM_CAST128_KEY_GEN 0x00000320UL #define CKM_CAST5_ECB 0x00000321UL #define CKM_CAST128_ECB 0x00000321UL #define CKM_CAST128_MAC 0x00000323UL #define CKM_CAST128_CBC 0x00000322UL #define CKM_CAST128_MAC_GENERAL 0x00000324UL #define CKM_CAST128_CBC_PAD 0x00000325UL #define CKM_RC5_KEY_GEN 0x00000330UL #define CKM_RC5_ECB 0x00000331UL #define CKM_RC5_CBC 0x00000332UL #define CKM_RC5_MAC 0x00000333UL #define CKM_RC5_MAC_GENERAL 0x00000334UL #define CKM_RC5_CBC_PAD 0x00000335UL #define CKM_IDEA_KEY_GEN 0x00000340UL #define CKM_IDEA_ECB 0x00000341UL #define CKM_IDEA_CBC 0x00000342UL #define CKM_IDEA_MAC 0x00000343UL #define CKM_IDEA_MAC_GENERAL 0x00000344UL #define CKM_IDEA_CBC_PAD 0x00000345UL #define CKM_GENERIC_SECRET_KEY_GEN 0x00000350UL #define CKM_CONCATENATE_BASE_AND_KEY 0x00000360UL #define CKM_CONCATENATE_BASE_AND_DATA 0x00000362UL #define CKM_CONCATENATE_DATA_AND_BASE 0x00000363UL #define CKM_XOR_BASE_AND_DATA 0x00000364UL #define CKM_EXTRACT_KEY_FROM_KEY 0x00000365UL #define CKM_SSL3_PRE_MASTER_KEY_GEN 0x00000370UL #define CKM_SSL3_MASTER_KEY_DERIVE 0x00000371UL #define CKM_SSL3_KEY_AND_MAC_DERIVE 0x00000372UL #define CKM_SSL3_MASTER_KEY_DERIVE_DH 0x00000373UL #define CKM_TLS_PRE_MASTER_KEY_GEN 0x00000374UL #define CKM_TLS_MASTER_KEY_DERIVE 0x00000375UL #define CKM_TLS_KEY_AND_MAC_DERIVE 0x00000376UL #define CKM_TLS_MASTER_KEY_DERIVE_DH 0x00000377UL #define CKM_TLS_PRF 0x00000378UL #define CKM_SSL3_MD5_MAC 0x00000380UL #define CKM_SSL3_SHA1_MAC 0x00000381UL #define CKM_MD5_KEY_DERIVATION 0x00000390UL #define CKM_MD2_KEY_DERIVATION 0x00000391UL #define CKM_SHA1_KEY_DERIVATION 0x00000392UL #define CKM_SHA256_KEY_DERIVATION 0x00000393UL #define CKM_SHA384_KEY_DERIVATION 0x00000394UL #define CKM_SHA512_KEY_DERIVATION 0x00000395UL #define CKM_SHA224_KEY_DERIVATION 0x00000396UL #define CKM_PBE_MD2_DES_CBC 0x000003A0UL #define CKM_PBE_MD5_DES_CBC 0x000003A1UL #define CKM_PBE_MD5_CAST_CBC 0x000003A2UL #define CKM_PBE_MD5_CAST3_CBC 0x000003A3UL #define CKM_PBE_MD5_CAST128_CBC 0x000003A4UL #define CKM_PBE_SHA1_CAST128_CBC 0x000003A5UL #define CKM_PBE_SHA1_RC4_128 0x000003A6UL #define CKM_PBE_SHA1_RC4_40 0x000003A7UL #define CKM_PBE_SHA1_DES3_EDE_CBC 0x000003A8UL #define CKM_PBE_SHA1_DES2_EDE_CBC 0x000003A9UL #define CKM_PBE_SHA1_RC2_128_CBC 0x000003AAUL #define CKM_PBE_SHA1_RC2_40_CBC 0x000003ABUL #define CKM_PKCS5_PBKD2 0x000003B0UL #define CKM_PBA_SHA1_WITH_SHA1_HMAC 0x000003C0UL #define CKM_WTLS_PRE_MASTER_KEY_GEN 0x000003D0UL #define CKM_WTLS_MASTER_KEY_DERIVE 0x000003D1UL #define CKM_WTLS_MASTER_KEY_DERIVE_DH_ECC 0x000003D2UL #define CKM_WTLS_PRF 0x000003D3UL #define CKM_WTLS_SERVER_KEY_AND_MAC_DERIVE 0x000003D4UL #define CKM_WTLS_CLIENT_KEY_AND_MAC_DERIVE 0x000003D5UL #define CKM_TLS10_MAC_SERVER 0x000003D6UL #define CKM_TLS10_MAC_CLIENT 0x000003D7UL #define CKM_TLS12_MAC 0x000003D8UL #define CKM_TLS12_KDF 0x000003D9UL #define CKM_TLS12_MASTER_KEY_DERIVE 0x000003E0UL #define CKM_TLS12_KEY_AND_MAC_DERIVE 0x000003E1UL #define CKM_TLS12_MASTER_KEY_DERIVE_DH 0x000003E2UL #define CKM_TLS12_KEY_SAFE_DERIVE 0x000003E3UL #define CKM_TLS_MAC 0x000003E4UL #define CKM_TLS_KDF 0x000003E5UL #define CKM_KEY_WRAP_LYNKS 0x00000400UL #define CKM_KEY_WRAP_SET_OAEP 0x00000401UL #define CKM_CMS_SIG 0x00000500UL #define CKM_KIP_DERIVE 0x00000510UL #define CKM_KIP_WRAP 0x00000511UL #define CKM_KIP_MAC 0x00000512UL #define CKM_CAMELLIA_KEY_GEN 0x00000550UL #define CKM_CAMELLIA_ECB 0x00000551UL #define CKM_CAMELLIA_CBC 0x00000552UL #define CKM_CAMELLIA_MAC 0x00000553UL #define CKM_CAMELLIA_MAC_GENERAL 0x00000554UL #define CKM_CAMELLIA_CBC_PAD 0x00000555UL #define CKM_CAMELLIA_ECB_ENCRYPT_DATA 0x00000556UL #define CKM_CAMELLIA_CBC_ENCRYPT_DATA 0x00000557UL #define CKM_CAMELLIA_CTR 0x00000558UL #define CKM_ARIA_KEY_GEN 0x00000560UL #define CKM_ARIA_ECB 0x00000561UL #define CKM_ARIA_CBC 0x00000562UL #define CKM_ARIA_MAC 0x00000563UL #define CKM_ARIA_MAC_GENERAL 0x00000564UL #define CKM_ARIA_CBC_PAD 0x00000565UL #define CKM_ARIA_ECB_ENCRYPT_DATA 0x00000566UL #define CKM_ARIA_CBC_ENCRYPT_DATA 0x00000567UL #define CKM_SEED_KEY_GEN 0x00000650UL #define CKM_SEED_ECB 0x00000651UL #define CKM_SEED_CBC 0x00000652UL #define CKM_SEED_MAC 0x00000653UL #define CKM_SEED_MAC_GENERAL 0x00000654UL #define CKM_SEED_CBC_PAD 0x00000655UL #define CKM_SEED_ECB_ENCRYPT_DATA 0x00000656UL #define CKM_SEED_CBC_ENCRYPT_DATA 0x00000657UL #define CKM_SKIPJACK_KEY_GEN 0x00001000UL #define CKM_SKIPJACK_ECB64 0x00001001UL #define CKM_SKIPJACK_CBC64 0x00001002UL #define CKM_SKIPJACK_OFB64 0x00001003UL #define CKM_SKIPJACK_CFB64 0x00001004UL #define CKM_SKIPJACK_CFB32 0x00001005UL #define CKM_SKIPJACK_CFB16 0x00001006UL #define CKM_SKIPJACK_CFB8 0x00001007UL #define CKM_SKIPJACK_WRAP 0x00001008UL #define CKM_SKIPJACK_PRIVATE_WRAP 0x00001009UL #define CKM_SKIPJACK_RELAYX 0x0000100AUL #define CKM_KEA_KEY_PAIR_GEN 0x00001010UL #define CKM_KEA_KEY_DERIVE 0x00001011UL #define CKM_KEA_DERIVE 0x00001012UL #define CKM_FORTEZZA_TIMESTAMP 0x00001020UL #define CKM_BATON_KEY_GEN 0x00001030UL #define CKM_BATON_ECB128 0x00001031UL #define CKM_BATON_ECB96 0x00001032UL #define CKM_BATON_CBC128 0x00001033UL #define CKM_BATON_COUNTER 0x00001034UL #define CKM_BATON_SHUFFLE 0x00001035UL #define CKM_BATON_WRAP 0x00001036UL #define CKM_EC_KEY_PAIR_GEN 0x00001040UL #define CKM_ECDSA 0x00001041UL #define CKM_ECDSA_SHA1 0x00001042UL #define CKM_ECDSA_SHA224 0x00001043UL #define CKM_ECDSA_SHA256 0x00001044UL #define CKM_ECDSA_SHA384 0x00001045UL #define CKM_ECDSA_SHA512 0x00001046UL #define CKM_ECDH1_DERIVE 0x00001050UL #define CKM_ECDH1_COFACTOR_DERIVE 0x00001051UL #define CKM_ECMQV_DERIVE 0x00001052UL #define CKM_ECDH_AES_KEY_WRAP 0x00001053UL #define CKM_RSA_AES_KEY_WRAP 0x00001054UL #define CKM_JUNIPER_KEY_GEN 0x00001060UL #define CKM_JUNIPER_ECB128 0x00001061UL #define CKM_JUNIPER_CBC128 0x00001062UL #define CKM_JUNIPER_COUNTER 0x00001063UL #define CKM_JUNIPER_SHUFFLE 0x00001064UL #define CKM_JUNIPER_WRAP 0x00001065UL #define CKM_FASTHASH 0x00001070UL #define CKM_AES_KEY_GEN 0x00001080UL #define CKM_AES_ECB 0x00001081UL #define CKM_AES_CBC 0x00001082UL #define CKM_AES_MAC 0x00001083UL #define CKM_AES_MAC_GENERAL 0x00001084UL #define CKM_AES_CBC_PAD 0x00001085UL #define CKM_AES_CTR 0x00001086UL #define CKM_AES_GCM 0x00001087UL #define CKM_AES_CCM 0x00001088UL #define CKM_AES_CTS 0x00001089UL #define CKM_AES_CMAC 0x0000108AUL #define CKM_AES_CMAC_GENERAL 0x0000108BUL #define CKM_AES_XCBC_MAC 0x0000108CUL #define CKM_AES_XCBC_MAC_96 0x0000108DUL #define CKM_AES_GMAC 0x0000108EUL #define CKM_BLOWFISH_KEY_GEN 0x00001090UL #define CKM_BLOWFISH_CBC 0x00001091UL #define CKM_TWOFISH_KEY_GEN 0x00001092UL #define CKM_TWOFISH_CBC 0x00001093UL #define CKM_BLOWFISH_CBC_PAD 0x00001094UL #define CKM_TWOFISH_CBC_PAD 0x00001095UL #define CKM_DES_ECB_ENCRYPT_DATA 0x00001100UL #define CKM_DES_CBC_ENCRYPT_DATA 0x00001101UL #define CKM_DES3_ECB_ENCRYPT_DATA 0x00001102UL #define CKM_DES3_CBC_ENCRYPT_DATA 0x00001103UL #define CKM_AES_ECB_ENCRYPT_DATA 0x00001104UL #define CKM_AES_CBC_ENCRYPT_DATA 0x00001105UL #define CKM_GOSTR3410_KEY_PAIR_GEN 0x00001200UL #define CKM_GOSTR3410 0x00001201UL #define CKM_GOSTR3410_WITH_GOSTR3411 0x00001202UL #define CKM_GOSTR3410_KEY_WRAP 0x00001203UL #define CKM_GOSTR3410_DERIVE 0x00001204UL #define CKM_GOSTR3411 0x00001210UL #define CKM_GOSTR3411_HMAC 0x00001211UL #define CKM_GOST28147_KEY_GEN 0x00001220UL #define CKM_GOST28147_ECB 0x00001221UL #define CKM_GOST28147 0x00001222UL #define CKM_GOST28147_MAC 0x00001223UL #define CKM_GOST28147_KEY_WRAP 0x00001224UL #define CKM_DSA_PARAMETER_GEN 0x00002000UL #define CKM_DH_PKCS_PARAMETER_GEN 0x00002001UL #define CKM_X9_42_DH_PARAMETER_GEN 0x00002002UL #define CKM_DSA_PROBABILISTIC_PARAMETER_GEN 0x00002003UL #define CKM_DSA_PROBABLISTIC_PARAMETER_GEN 0x00002003UL #define CKM_DSA_SHAWE_TAYLOR_PARAMETER_GEN 0x00002004UL #define CKM_AES_OFB 0x00002104UL #define CKM_AES_CFB64 0x00002105UL #define CKM_AES_CFB8 0x00002106UL #define CKM_AES_CFB128 0x00002107UL #define CKM_AES_CFB1 0x00002108UL #define CKM_AES_KEY_WRAP 0x00002109UL #define CKM_AES_KEY_WRAP_PAD 0x0000210AUL #define CKM_RSA_PKCS_TPM_1_1 0x00004001UL #define CKM_RSA_PKCS_OAEP_TPM_1_1 0x00004002UL #define CKM_VENDOR_DEFINED 0x80000000UL /* Deprecated */ #ifdef PKCS11_DEPRECATED #define CKM_CAST5_KEY_GEN 0x00000320UL #define CKM_CAST5_CBC 0x00000322UL #define CKM_CAST5_MAC 0x00000323UL #define CKM_CAST5_MAC_GENERAL 0x00000324UL #define CKM_CAST5_CBC_PAD 0x00000325UL #define CKM_PBE_MD5_CAST5_CBC 0x000003A4UL #define CKM_PBE_SHA1_CAST5_CBC 0x000003A5UL #define CKM_ECDSA_KEY_PAIR_GEN 0x00001040UL #endif /* CKN */ #define CKN_SURRENDER 0UL #define CKN_OTP_CHANGED 1UL /* CKO */ #define CKO_DATA 0x00000000UL #define CKO_CERTIFICATE 0x00000001UL #define CKO_PUBLIC_KEY 0x00000002UL #define CKO_PRIVATE_KEY 0x00000003UL #define CKO_SECRET_KEY 0x00000004UL #define CKO_HW_FEATURE 0x00000005UL #define CKO_DOMAIN_PARAMETERS 0x00000006UL #define CKO_MECHANISM 0x00000007UL #define CKO_OTP_KEY 0x00000008UL #define CKO_VENDOR_DEFINED 0x80000000UL /* CKP (PBKD2) */ #define CKP_PKCS5_PBKD2_HMAC_SHA1 0x00000001UL #define CKP_PKCS5_PBKD2_HMAC_GOSTR3411 0x00000002UL #define CKP_PKCS5_PBKD2_HMAC_SHA224 0x00000003UL #define CKP_PKCS5_PBKD2_HMAC_SHA256 0x00000004UL #define CKP_PKCS5_PBKD2_HMAC_SHA384 0x00000005UL #define CKP_PKCS5_PBKD2_HMAC_SHA512 0x00000006UL #define CKP_PKCS5_PBKD2_HMAC_SHA512_224 0x00000007UL #define CKP_PKCS5_PBKD2_HMAC_SHA512_256 0x00000008UL /* CKR */ #define CKR_OK 0x00000000UL #define CKR_CANCEL 0x00000001UL #define CKR_HOST_MEMORY 0x00000002UL #define CKR_SLOT_ID_INVALID 0x00000003UL #define CKR_GENERAL_ERROR 0x00000005UL #define CKR_FUNCTION_FAILED 0x00000006UL #define CKR_ARGUMENTS_BAD 0x00000007UL #define CKR_NO_EVENT 0x00000008UL #define CKR_NEED_TO_CREATE_THREADS 0x00000009UL #define CKR_CANT_LOCK 0x0000000AUL #define CKR_ATTRIBUTE_READ_ONLY 0x00000010UL #define CKR_ATTRIBUTE_SENSITIVE 0x00000011UL #define CKR_ATTRIBUTE_TYPE_INVALID 0x00000012UL #define CKR_ATTRIBUTE_VALUE_INVALID 0x00000013UL #define CKR_ACTION_PROHIBITED 0x0000001BUL #define CKR_DATA_INVALID 0x00000020UL #define CKR_DATA_LEN_RANGE 0x00000021UL #define CKR_DEVICE_ERROR 0x00000030UL #define CKR_DEVICE_MEMORY 0x00000031UL #define CKR_DEVICE_REMOVED 0x00000032UL #define CKR_ENCRYPTED_DATA_INVALID 0x00000040UL #define CKR_ENCRYPTED_DATA_LEN_RANGE 0x00000041UL #define CKR_FUNCTION_CANCELED 0x00000050UL #define CKR_FUNCTION_NOT_PARALLEL 0x00000051UL #define CKR_FUNCTION_NOT_SUPPORTED 0x00000054UL #define CKR_KEY_HANDLE_INVALID 0x00000060UL #define CKR_KEY_SIZE_RANGE 0x00000062UL #define CKR_KEY_TYPE_INCONSISTENT 0x00000063UL #define CKR_KEY_NOT_NEEDED 0x00000064UL #define CKR_KEY_CHANGED 0x00000065UL #define CKR_KEY_NEEDED 0x00000066UL #define CKR_KEY_INDIGESTIBLE 0x00000067UL #define CKR_KEY_FUNCTION_NOT_PERMITTED 0x00000068UL #define CKR_KEY_NOT_WRAPPABLE 0x00000069UL #define CKR_KEY_UNEXTRACTABLE 0x0000006AUL #define CKR_MECHANISM_INVALID 0x00000070UL #define CKR_MECHANISM_PARAM_INVALID 0x00000071UL #define CKR_OBJECT_HANDLE_INVALID 0x00000082UL #define CKR_OPERATION_ACTIVE 0x00000090UL #define CKR_OPERATION_NOT_INITIALIZED 0x00000091UL #define CKR_PIN_INCORRECT 0x000000A0UL #define CKR_PIN_INVALID 0x000000A1UL #define CKR_PIN_LEN_RANGE 0x000000A2UL #define CKR_PIN_EXPIRED 0x000000A3UL #define CKR_PIN_LOCKED 0x000000A4UL #define CKR_SESSION_CLOSED 0x000000B0UL #define CKR_SESSION_COUNT 0x000000B1UL #define CKR_SESSION_HANDLE_INVALID 0x000000B3UL #define CKR_SESSION_PARALLEL_NOT_SUPPORTED 0x000000B4UL #define CKR_SESSION_READ_ONLY 0x000000B5UL #define CKR_SESSION_EXISTS 0x000000B6UL #define CKR_SESSION_READ_ONLY_EXISTS 0x000000B7UL #define CKR_SESSION_READ_WRITE_SO_EXISTS 0x000000B8UL #define CKR_SIGNATURE_INVALID 0x000000C0UL #define CKR_SIGNATURE_LEN_RANGE 0x000000C1UL #define CKR_TEMPLATE_INCOMPLETE 0x000000D0UL #define CKR_TEMPLATE_INCONSISTENT 0x000000D1UL #define CKR_TOKEN_NOT_PRESENT 0x000000E0UL #define CKR_TOKEN_NOT_RECOGNIZED 0x000000E1UL #define CKR_TOKEN_WRITE_PROTECTED 0x000000E2UL #define CKR_UNWRAPPING_KEY_HANDLE_INVALID 0x000000F0UL #define CKR_UNWRAPPING_KEY_SIZE_RANGE 0x000000F1UL #define CKR_UNWRAPPING_KEY_TYPE_INCONSISTENT 0x000000F2UL #define CKR_USER_ALREADY_LOGGED_IN 0x00000100UL #define CKR_USER_NOT_LOGGED_IN 0x00000101UL #define CKR_USER_PIN_NOT_INITIALIZED 0x00000102UL #define CKR_USER_TYPE_INVALID 0x00000103UL #define CKR_USER_ANOTHER_ALREADY_LOGGED_IN 0x00000104UL #define CKR_USER_TOO_MANY_TYPES 0x00000105UL #define CKR_WRAPPED_KEY_INVALID 0x00000110UL #define CKR_WRAPPED_KEY_LEN_RANGE 0x00000112UL #define CKR_WRAPPING_KEY_HANDLE_INVALID 0x00000113UL #define CKR_WRAPPING_KEY_SIZE_RANGE 0x00000114UL #define CKR_WRAPPING_KEY_TYPE_INCONSISTENT 0x00000115UL #define CKR_RANDOM_SEED_NOT_SUPPORTED 0x00000120UL #define CKR_RANDOM_NO_RNG 0x00000121UL #define CKR_DOMAIN_PARAMS_INVALID 0x00000130UL #define CKR_CURVE_NOT_SUPPORTED 0x00000140UL #define CKR_BUFFER_TOO_SMALL 0x00000150UL #define CKR_SAVED_STATE_INVALID 0x00000160UL #define CKR_INFORMATION_SENSITIVE 0x00000170UL #define CKR_STATE_UNSAVEABLE 0x00000180UL #define CKR_CRYPTOKI_NOT_INITIALIZED 0x00000190UL #define CKR_CRYPTOKI_ALREADY_INITIALIZED 0x00000191UL #define CKR_MUTEX_BAD 0x000001A0UL #define CKR_MUTEX_NOT_LOCKED 0x000001A1UL #define CKR_NEW_PIN_MODE 0x000001B0UL #define CKR_NEXT_OTP 0x000001B1UL #define CKR_EXCEEDED_MAX_ITERATIONS 0x000001B5UL #define CKR_FIPS_SELF_TEST_FAILED 0x000001B6UL #define CKR_LIBRARY_LOAD_FAILED 0x000001B7UL #define CKR_PIN_TOO_WEAK 0x000001B8UL #define CKR_PUBLIC_KEY_INVALID 0x000001B9UL #define CKR_FUNCTION_REJECTED 0x00000200UL #define CKR_VENDOR_DEFINED 0x80000000UL /* CKS */ #define CKS_RO_PUBLIC_SESSION 0UL #define CKS_RO_USER_FUNCTIONS 1UL #define CKS_RW_PUBLIC_SESSION 2UL #define CKS_RW_USER_FUNCTIONS 3UL #define CKS_RW_SO_FUNCTIONS 4UL /* CKU */ #define CKU_SO 0UL #define CKU_USER 1UL #define CKU_CONTEXT_SPECIFIC 2UL /* CKZ (data) */ #define CKZ_DATA_SPECIFIED 0x00000001UL /* CKZ (salt) */ #define CKZ_SALT_SPECIFIED 0x00000001UL /* Sundry structures type definition in alphabetical order */ #define STRUCTDEF(__name__) \ struct __name__; \ typedef struct __name__ __name__; \ typedef struct __name__ * __name__ ## _PTR; \ typedef struct __name__ ** __name__ ## _PTR_PTR STRUCTDEF(CK_ATTRIBUTE); STRUCTDEF(CK_C_INITIALIZE_ARGS); STRUCTDEF(CK_DATE); STRUCTDEF(CK_FUNCTION_LIST); STRUCTDEF(CK_FUNCTION_LIST_3_0); STRUCTDEF(CK_INFO); STRUCTDEF(CK_MECHANISM); STRUCTDEF(CK_MECHANISM_INFO); STRUCTDEF(CK_SESSION_INFO); STRUCTDEF(CK_SLOT_INFO); STRUCTDEF(CK_TOKEN_INFO); STRUCTDEF(CK_VERSION); /* Function type definitions */ typedef CK_RV (* CK_NOTIFY)(CK_SESSION_HANDLE, CK_NOTIFICATION, void *); typedef CK_RV (* CK_CREATEMUTEX)(void **); typedef CK_RV (* CK_DESTROYMUTEX)(void *); typedef CK_RV (* CK_LOCKMUTEX)(void *); typedef CK_RV (* CK_UNLOCKMUTEX)(void *); /* General Structure definitions */ struct CK_ATTRIBUTE { CK_ATTRIBUTE_TYPE type; void * pValue; CK_ULONG ulValueLen; }; struct CK_C_INITIALIZE_ARGS { CK_CREATEMUTEX CreateMutex; CK_DESTROYMUTEX DestroyMutex; CK_LOCKMUTEX LockMutex; CK_UNLOCKMUTEX UnlockMutex; CK_FLAGS flags; void * pReserved; }; struct CK_DATE{ CK_CHAR year[4]; CK_CHAR month[2]; CK_CHAR day[2]; }; struct CK_VERSION { CK_BYTE major; CK_BYTE minor; }; struct CK_INFO { struct CK_VERSION cryptokiVersion; CK_UTF8CHAR manufacturerID[32]; CK_FLAGS flags; CK_UTF8CHAR libraryDescription[32]; struct CK_VERSION libraryVersion; }; struct CK_MECHANISM { CK_MECHANISM_TYPE mechanism; void * pParameter; CK_ULONG ulParameterLen; }; struct CK_MECHANISM_INFO { CK_ULONG ulMinKeySize; CK_ULONG ulMaxKeySize; CK_FLAGS flags; }; struct CK_SESSION_INFO { CK_SLOT_ID slotID; CK_STATE state; CK_FLAGS flags; CK_ULONG ulDeviceError; }; struct CK_SLOT_INFO { CK_UTF8CHAR slotDescription[64]; CK_UTF8CHAR manufacturerID[32]; CK_FLAGS flags; CK_VERSION hardwareVersion; CK_VERSION firmwareVersion; }; struct CK_TOKEN_INFO { CK_UTF8CHAR label[32]; CK_UTF8CHAR manufacturerID[32]; CK_UTF8CHAR model[16]; CK_CHAR serialNumber[16]; CK_FLAGS flags; CK_ULONG ulMaxSessionCount; CK_ULONG ulSessionCount; CK_ULONG ulMaxRwSessionCount; CK_ULONG ulRwSessionCount; CK_ULONG ulMaxPinLen; CK_ULONG ulMinPinLen; CK_ULONG ulTotalPublicMemory; CK_ULONG ulFreePublicMemory; CK_ULONG ulTotalPrivateMemory; CK_ULONG ulFreePrivateMemory; CK_VERSION hardwareVersion; CK_VERSION firmwareVersion; CK_CHAR utcTime[16]; }; /* Param Structure definitions in alphabetical order */ STRUCTDEF(CK_AES_CBC_ENCRYPT_DATA_PARAMS); STRUCTDEF(CK_AES_CCM_PARAMS); STRUCTDEF(CK_AES_CTR_PARAMS); STRUCTDEF(CK_AES_GCM_PARAMS); STRUCTDEF(CK_ARIA_CBC_ENCRYPT_DATA_PARAMS); STRUCTDEF(CK_CAMELLIA_CBC_ENCRYPT_DATA_PARAMS); STRUCTDEF(CK_CAMELLIA_CTR_PARAMS); STRUCTDEF(CK_CCM_PARAMS); STRUCTDEF(CK_CMS_SIG_PARAMS); STRUCTDEF(CK_DES_CBC_ENCRYPT_DATA_PARAMS); STRUCTDEF(CK_DSA_PARAMETER_GEN_PARAM); STRUCTDEF(CK_ECDH_AES_KEY_WRAP_PARAMS); STRUCTDEF(CK_ECDH1_DERIVE_PARAMS); STRUCTDEF(CK_ECDH2_DERIVE_PARAMS); STRUCTDEF(CK_ECMQV_DERIVE_PARAMS); STRUCTDEF(CK_GCM_PARAMS); STRUCTDEF(CK_GOSTR3410_DERIVE_PARAMS); STRUCTDEF(CK_GOSTR3410_KEY_WRAP_PARAMS); STRUCTDEF(CK_KEA_DERIVE_PARAMS); STRUCTDEF(CK_KEY_DERIVATION_STRING_DATA); STRUCTDEF(CK_KEY_WRAP_SET_OAEP_PARAMS); STRUCTDEF(CK_KIP_PARAMS); STRUCTDEF(CK_OTP_PARAM); STRUCTDEF(CK_OTP_PARAMS); STRUCTDEF(CK_OTP_SIGNATURE_INFO); STRUCTDEF(CK_PBE_PARAMS); STRUCTDEF(CK_PKCS5_PBKD2_PARAMS); STRUCTDEF(CK_PKCS5_PBKD2_PARAMS2); STRUCTDEF(CK_RC2_CBC_PARAMS); STRUCTDEF(CK_RC2_MAC_GENERAL_PARAMS); STRUCTDEF(CK_RC5_CBC_PARAMS); STRUCTDEF(CK_RC5_MAC_GENERAL_PARAMS); STRUCTDEF(CK_RC5_PARAMS); STRUCTDEF(CK_RSA_AES_KEY_WRAP_PARAMS); STRUCTDEF(CK_RSA_PKCS_OAEP_PARAMS); STRUCTDEF(CK_RSA_PKCS_PSS_PARAMS); STRUCTDEF(CK_SEED_CBC_ENCRYPT_DATA_PARAMS); STRUCTDEF(CK_SKIPJACK_PRIVATE_WRAP_PARAMS); STRUCTDEF(CK_SKIPJACK_RELAYX_PARAMS); STRUCTDEF(CK_X2RATCHET_INITIALIZE_PARAMS); STRUCTDEF(CK_X2RATCHET_RESPOND_PARAMS); STRUCTDEF(CK_X9_42_DH1_DERIVE_PARAMS); STRUCTDEF(CK_X9_42_DH2_DERIVE_PARAMS); STRUCTDEF(CK_X9_42_MQV_DERIVE_PARAMS); STRUCTDEF(specifiedParams); struct CK_AES_CBC_ENCRYPT_DATA_PARAMS { CK_BYTE iv[16]; CK_BYTE * pData; CK_ULONG length; }; struct CK_AES_CCM_PARAMS { CK_ULONG ulDataLen; CK_BYTE * pNonce; CK_ULONG ulNonceLen; CK_BYTE * pAAD; CK_ULONG ulAADLen; CK_ULONG ulMACLen; }; struct CK_AES_CTR_PARAMS { CK_ULONG ulCounterBits; CK_BYTE cb[16]; }; struct CK_AES_GCM_PARAMS { CK_BYTE * pIv; CK_ULONG ulIvLen; CK_ULONG ulIvBits; CK_BYTE * pAAD; CK_ULONG ulAADLen; CK_ULONG ulTagBits; }; struct CK_ARIA_CBC_ENCRYPT_DATA_PARAMS { CK_BYTE iv[16]; CK_BYTE * pData; CK_ULONG length; }; struct CK_CAMELLIA_CBC_ENCRYPT_DATA_PARAMS { CK_BYTE iv[16]; CK_BYTE * pData; CK_ULONG length; }; struct CK_CAMELLIA_CTR_PARAMS { CK_ULONG ulCounterBits; CK_BYTE cb[16]; }; struct CK_CCM_PARAMS { CK_ULONG ulDataLen; CK_BYTE * pNonce; CK_ULONG ulNonceLen; CK_BYTE * pAAD; CK_ULONG ulAADLen; CK_ULONG ulMACLen; }; struct CK_CMS_SIG_PARAMS { CK_OBJECT_HANDLE certificateHandle; CK_MECHANISM * pSigningMechanism; CK_MECHANISM * pDigestMechanism; CK_UTF8CHAR * pContentType; CK_BYTE * pRequestedAttributes; CK_ULONG ulRequestedAttributesLen; CK_BYTE * pRequiredAttributes; CK_ULONG ulRequiredAttributesLen; }; struct CK_DES_CBC_ENCRYPT_DATA_PARAMS { CK_BYTE iv[8]; CK_BYTE * pData; CK_ULONG length; }; struct CK_DSA_PARAMETER_GEN_PARAM { CK_MECHANISM_TYPE hash; CK_BYTE * pSeed; CK_ULONG ulSeedLen; CK_ULONG ulIndex; }; struct CK_ECDH_AES_KEY_WRAP_PARAMS { CK_ULONG ulAESKeyBits; CK_EC_KDF_TYPE kdf; CK_ULONG ulSharedDataLen; CK_BYTE * pSharedData; }; struct CK_ECDH1_DERIVE_PARAMS { CK_EC_KDF_TYPE kdf; CK_ULONG ulSharedDataLen; CK_BYTE * pSharedData; CK_ULONG ulPublicDataLen; CK_BYTE * pPublicData; }; struct CK_ECDH2_DERIVE_PARAMS { CK_EC_KDF_TYPE kdf; CK_ULONG ulSharedDataLen; CK_BYTE * pSharedData; CK_ULONG ulPublicDataLen; CK_BYTE * pPublicData; CK_ULONG ulPrivateDataLen; CK_OBJECT_HANDLE hPrivateData; CK_ULONG ulPublicDataLen2; CK_BYTE * pPublicData2; }; struct CK_ECMQV_DERIVE_PARAMS { CK_EC_KDF_TYPE kdf; CK_ULONG ulSharedDataLen; CK_BYTE * pSharedData; CK_ULONG ulPublicDataLen; CK_BYTE * pPublicData; CK_ULONG ulPrivateDataLen; CK_OBJECT_HANDLE hPrivateData; CK_ULONG ulPublicDataLen2; CK_BYTE * pPublicData2; CK_OBJECT_HANDLE publicKey; }; struct CK_GCM_PARAMS { CK_BYTE * pIv; CK_ULONG ulIvLen; CK_ULONG ulIvBits; CK_BYTE * pAAD; CK_ULONG ulAADLen; CK_ULONG ulTagBits; }; struct CK_GOSTR3410_DERIVE_PARAMS { CK_EC_KDF_TYPE kdf; CK_BYTE * pPublicData; CK_ULONG ulPublicDataLen; CK_BYTE * pUKM; CK_ULONG ulUKMLen; }; struct CK_GOSTR3410_KEY_WRAP_PARAMS { CK_BYTE * pWrapOID; CK_ULONG ulWrapOIDLen; CK_BYTE * pUKM; CK_ULONG ulUKMLen; CK_OBJECT_HANDLE hKey; }; struct CK_KEA_DERIVE_PARAMS { CK_BBOOL isSender; CK_ULONG ulRandomLen; CK_BYTE * RandomA; CK_BYTE * RandomB; CK_ULONG ulPublicDataLen; CK_BYTE * PublicData; }; struct CK_KEY_DERIVATION_STRING_DATA { CK_BYTE * pData; CK_ULONG ulLen; }; struct CK_KEY_WRAP_SET_OAEP_PARAMS { CK_BYTE bBC; CK_BYTE * pX; CK_ULONG ulXLen; }; struct CK_KIP_PARAMS { CK_MECHANISM * pMechanism; CK_OBJECT_HANDLE hKey; CK_BYTE * pSeed; CK_ULONG ulSeedLen; }; struct CK_OTP_PARAM { CK_OTP_PARAM_TYPE type; void * pValue; CK_ULONG ulValueLen; }; struct CK_OTP_PARAMS { CK_OTP_PARAM * pParams; CK_ULONG ulCount; }; struct CK_OTP_SIGNATURE_INFO { CK_OTP_PARAM * pParams; CK_ULONG ulCount; }; struct CK_PBE_PARAMS { CK_BYTE * pInitVector; CK_UTF8CHAR * pPassword; CK_ULONG ulPasswordLen; CK_BYTE * pSalt; CK_ULONG ulSaltLen; CK_ULONG ulIteration; }; struct CK_PKCS5_PBKD2_PARAMS { CK_PKCS5_PBKDF2_SALT_SOURCE_TYPE saltSource; void * pSaltSourceData; CK_ULONG ulSaltSourceDataLen; CK_ULONG iterations; CK_PKCS5_PBKD2_PSEUDO_RANDOM_FUNCTION_TYPE prf; void * pPrfData; CK_ULONG ulPrfDataLen; CK_UTF8CHAR * pPassword; CK_ULONG * ulPasswordLen; }; struct CK_PKCS5_PBKD2_PARAMS2 { CK_PKCS5_PBKDF2_SALT_SOURCE_TYPE saltSource; void * pSaltSourceData; CK_ULONG ulSaltSourceDataLen; CK_ULONG iterations; CK_PKCS5_PBKD2_PSEUDO_RANDOM_FUNCTION_TYPE prf; void * pPrfData; CK_ULONG ulPrfDataLen; CK_UTF8CHAR * pPassword; CK_ULONG ulPasswordLen; }; struct CK_RC2_CBC_PARAMS { CK_ULONG ulEffectiveBits; CK_BYTE iv[8]; }; struct CK_RC2_MAC_GENERAL_PARAMS { CK_ULONG ulEffectiveBits; CK_ULONG ulMacLength; }; struct CK_RC5_CBC_PARAMS { CK_ULONG ulWordsize; CK_ULONG ulRounds; CK_BYTE * pIv; CK_ULONG ulIvLen; }; struct CK_RC5_MAC_GENERAL_PARAMS { CK_ULONG ulWordsize; CK_ULONG ulRounds; CK_ULONG ulMacLength; }; struct CK_RC5_PARAMS { CK_ULONG ulWordsize; CK_ULONG ulRounds; }; struct CK_RSA_AES_KEY_WRAP_PARAMS { CK_ULONG ulAESKeyBits; CK_RSA_PKCS_OAEP_PARAMS * pOAEPParams; }; struct CK_RSA_PKCS_OAEP_PARAMS { CK_MECHANISM_TYPE hashAlg; CK_RSA_PKCS_MGF_TYPE mgf; CK_RSA_PKCS_OAEP_SOURCE_TYPE source; void * pSourceData; CK_ULONG ulSourceDataLen; }; struct CK_RSA_PKCS_PSS_PARAMS { CK_MECHANISM_TYPE hashAlg; CK_RSA_PKCS_MGF_TYPE mgf; CK_ULONG sLen; }; struct CK_SEED_CBC_ENCRYPT_DATA_PARAMS { CK_BYTE iv[16]; CK_BYTE * pData; CK_ULONG length; }; struct CK_SKIPJACK_PRIVATE_WRAP_PARAMS { CK_ULONG ulPasswordLen; CK_BYTE * pPassword; CK_ULONG ulPublicDataLen; CK_BYTE * pPublicData; CK_ULONG ulPAndGLen; CK_ULONG ulQLen; CK_ULONG ulRandomLen; CK_BYTE * pRandomA; CK_BYTE * pPrimeP; CK_BYTE * pBaseG; CK_BYTE * pSubprimeQ; }; struct CK_SKIPJACK_RELAYX_PARAMS { CK_ULONG ulOldWrappedXLen; CK_BYTE * pOldWrappedX; CK_ULONG ulOldPasswordLen; CK_BYTE * pOldPassword; CK_ULONG ulOldPublicDataLen; CK_BYTE * pOldPublicData; CK_ULONG ulOldRandomLen; CK_BYTE * pOldRandomA; CK_ULONG ulNewPasswordLen; CK_BYTE * pNewPassword; CK_ULONG ulNewPublicDataLen; CK_BYTE * pNewPublicData; CK_ULONG ulNewRandomLen; CK_BYTE * pNewRandomA; }; struct CK_X9_42_DH1_DERIVE_PARAMS { CK_X9_42_DH_KDF_TYPE kdf; CK_ULONG ulOtherInfoLen; CK_BYTE * pOtherInfo; CK_ULONG ulPublicDataLen; CK_BYTE * pPublicData; }; struct CK_X9_42_DH2_DERIVE_PARAMS { CK_X9_42_DH_KDF_TYPE kdf; CK_ULONG ulOtherInfoLen; CK_BYTE * pOtherInfo; CK_ULONG ulPublicDataLen; CK_BYTE * pPublicData; CK_ULONG ulPrivateDataLen; CK_OBJECT_HANDLE hPrivateData; CK_ULONG ulPublicDataLen2; CK_BYTE * pPublicData2; }; struct CK_X9_42_MQV_DERIVE_PARAMS { CK_X9_42_DH_KDF_TYPE kdf; CK_ULONG ulOtherInfoLen; CK_BYTE * OtherInfo; CK_ULONG ulPublicDataLen; CK_BYTE * PublicData; CK_ULONG ulPrivateDataLen; CK_OBJECT_HANDLE hPrivateData; CK_ULONG ulPublicDataLen2; CK_BYTE * PublicData2; CK_OBJECT_HANDLE publicKey; }; /* TLS related structure definitions */ STRUCTDEF(CK_SSL3_KEY_MAT_OUT); STRUCTDEF(CK_SSL3_KEY_MAT_PARAMS); STRUCTDEF(CK_SSL3_MASTER_KEY_DERIVE_PARAMS); STRUCTDEF(CK_SSL3_RANDOM_DATA); STRUCTDEF(CK_TLS_KDF_PARAMS); STRUCTDEF(CK_TLS_MAC_PARAMS); STRUCTDEF(CK_TLS_PRF_PARAMS); STRUCTDEF(CK_TLS12_KEY_MAT_PARAMS); STRUCTDEF(CK_TLS12_MASTER_KEY_DERIVE_PARAMS); STRUCTDEF(CK_WTLS_KEY_MAT_OUT); STRUCTDEF(CK_WTLS_KEY_MAT_PARAMS); STRUCTDEF(CK_WTLS_MASTER_KEY_DERIVE_PARAMS); STRUCTDEF(CK_WTLS_PRF_PARAMS); STRUCTDEF(CK_WTLS_RANDOM_DATA); struct CK_SSL3_KEY_MAT_OUT { CK_OBJECT_HANDLE hClientMacSecret; CK_OBJECT_HANDLE hServerMacSecret; CK_OBJECT_HANDLE hClientKey; CK_OBJECT_HANDLE hServerKey; CK_BYTE * pIVClient; CK_BYTE * pIVServer; }; struct CK_SSL3_RANDOM_DATA { CK_BYTE * pClientRandom; CK_ULONG ulClientRandomLen; CK_BYTE * pServerRandom; CK_ULONG ulServerRandomLen; }; struct CK_SSL3_KEY_MAT_PARAMS { CK_ULONG ulMacSizeInBits; CK_ULONG ulKeySizeInBits; CK_ULONG ulIVSizeInBits; CK_BBOOL bIsExport; CK_SSL3_RANDOM_DATA RandomInfo; CK_SSL3_KEY_MAT_OUT * pReturnedKeyMaterial; }; struct CK_SSL3_MASTER_KEY_DERIVE_PARAMS { CK_SSL3_RANDOM_DATA RandomInfo; CK_VERSION * pVersion; }; struct CK_TLS_KDF_PARAMS { CK_MECHANISM_TYPE prfMechanism; CK_BYTE * pLabel; CK_ULONG ulLabelLength; CK_SSL3_RANDOM_DATA RandomInfo; CK_BYTE * pContextData; CK_ULONG ulContextDataLength; }; struct CK_TLS_MAC_PARAMS { CK_MECHANISM_TYPE prfHashMechanism; CK_ULONG ulMacLength; CK_ULONG ulServerOrClient; }; struct CK_TLS_PRF_PARAMS { CK_BYTE * pSeed; CK_ULONG ulSeedLen; CK_BYTE * pLabel; CK_ULONG ulLabelLen; CK_BYTE * pOutput; CK_ULONG * pulOutputLen; }; struct CK_TLS12_KEY_MAT_PARAMS { CK_ULONG ulMacSizeInBits; CK_ULONG ulKeySizeInBits; CK_ULONG ulIVSizeInBits; CK_BBOOL bIsExport; CK_SSL3_RANDOM_DATA RandomInfo; CK_SSL3_KEY_MAT_OUT * pReturnedKeyMaterial; CK_MECHANISM_TYPE prfHashMechanism; }; struct CK_TLS12_MASTER_KEY_DERIVE_PARAMS { CK_SSL3_RANDOM_DATA RandomInfo; CK_VERSION * pVersion; CK_MECHANISM_TYPE prfHashMechanism; }; struct CK_WTLS_KEY_MAT_OUT { CK_OBJECT_HANDLE hMacSecret; CK_OBJECT_HANDLE hKey; CK_BYTE * pIV; }; struct CK_WTLS_RANDOM_DATA { CK_BYTE * pClientRandom; CK_ULONG ulClientRandomLen; CK_BYTE * pServerRandom; CK_ULONG ulServerRandomLen; }; struct CK_WTLS_KEY_MAT_PARAMS { CK_MECHANISM_TYPE DigestMechanism; CK_ULONG ulMacSizeInBits; CK_ULONG ulKeySizeInBits; CK_ULONG ulIVSizeInBits; CK_ULONG ulSequenceNumber; CK_BBOOL bIsExport; CK_WTLS_RANDOM_DATA RandomInfo; CK_WTLS_KEY_MAT_OUT * pReturnedKeyMaterial; }; struct CK_WTLS_MASTER_KEY_DERIVE_PARAMS { CK_MECHANISM_TYPE DigestMechanism; CK_WTLS_RANDOM_DATA RandomInfo; CK_BYTE * pVersion; }; struct CK_WTLS_PRF_PARAMS { CK_MECHANISM_TYPE DigestMechanism; CK_BYTE * pSeed; CK_ULONG ulSeedLen; CK_BYTE * pLabel; CK_ULONG ulLabelLen; CK_BYTE * pOutput; CK_ULONG * pulOutputLen; }; /* PKCS11 Functions */ extern CK_RV C_Initialize(void *); extern CK_RV C_Finalize(void *); extern CK_RV C_GetInfo(CK_INFO *); extern CK_RV C_GetFunctionList(CK_FUNCTION_LIST **); extern CK_RV C_GetSlotList(CK_BBOOL, CK_SLOT_ID *, CK_ULONG *); extern CK_RV C_GetSlotInfo(CK_SLOT_ID, CK_SLOT_INFO *); extern CK_RV C_GetTokenInfo(CK_SLOT_ID, CK_TOKEN_INFO *); extern CK_RV C_GetMechanismList(CK_SLOT_ID, CK_MECHANISM_TYPE *, CK_ULONG *); extern CK_RV C_GetMechanismInfo(CK_SLOT_ID, CK_MECHANISM_TYPE, CK_MECHANISM_INFO *); extern CK_RV C_InitToken(CK_SLOT_ID, CK_UTF8CHAR *, CK_ULONG, CK_UTF8CHAR *); extern CK_RV C_InitPIN(CK_SESSION_HANDLE, CK_UTF8CHAR *, CK_ULONG); extern CK_RV C_SetPIN(CK_SESSION_HANDLE, CK_UTF8CHAR *, CK_ULONG, CK_UTF8CHAR *, CK_ULONG); extern CK_RV C_OpenSession(CK_SLOT_ID, CK_FLAGS, void *, CK_NOTIFY, CK_SESSION_HANDLE *); extern CK_RV C_CloseSession(CK_SESSION_HANDLE); extern CK_RV C_CloseAllSessions(CK_SLOT_ID); extern CK_RV C_GetSessionInfo(CK_SESSION_HANDLE, CK_SESSION_INFO *); extern CK_RV C_GetOperationState(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG *); extern CK_RV C_SetOperationState(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG, CK_OBJECT_HANDLE, CK_OBJECT_HANDLE); extern CK_RV C_Login(CK_SESSION_HANDLE, CK_USER_TYPE, CK_UTF8CHAR *, CK_ULONG); extern CK_RV C_Logout(CK_SESSION_HANDLE); extern CK_RV C_CreateObject(CK_SESSION_HANDLE, CK_ATTRIBUTE *, CK_ULONG, CK_OBJECT_HANDLE *); extern CK_RV C_CopyObject(CK_SESSION_HANDLE, CK_OBJECT_HANDLE, CK_ATTRIBUTE *, CK_ULONG, CK_OBJECT_HANDLE *); extern CK_RV C_DestroyObject(CK_SESSION_HANDLE, CK_OBJECT_HANDLE); extern CK_RV C_GetObjectSize(CK_SESSION_HANDLE, CK_OBJECT_HANDLE, CK_ULONG *); extern CK_RV C_GetAttributeValue(CK_SESSION_HANDLE, CK_OBJECT_HANDLE, CK_ATTRIBUTE *, CK_ULONG); extern CK_RV C_SetAttributeValue(CK_SESSION_HANDLE, CK_OBJECT_HANDLE, CK_ATTRIBUTE *, CK_ULONG); extern CK_RV C_FindObjectsInit(CK_SESSION_HANDLE, CK_ATTRIBUTE *, CK_ULONG); extern CK_RV C_FindObjects(CK_SESSION_HANDLE, CK_OBJECT_HANDLE *, CK_ULONG, CK_ULONG *); extern CK_RV C_FindObjectsFinal(CK_SESSION_HANDLE); extern CK_RV C_EncryptInit(CK_SESSION_HANDLE, CK_MECHANISM *, CK_OBJECT_HANDLE); extern CK_RV C_Encrypt(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG, CK_BYTE *, CK_ULONG *); extern CK_RV C_EncryptUpdate(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG, CK_BYTE *, CK_ULONG *); extern CK_RV C_EncryptFinal(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG *); extern CK_RV C_DecryptInit(CK_SESSION_HANDLE, CK_MECHANISM *, CK_OBJECT_HANDLE); extern CK_RV C_Decrypt(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG, CK_BYTE *, CK_ULONG *); extern CK_RV C_DecryptUpdate(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG, CK_BYTE *, CK_ULONG *); extern CK_RV C_DecryptFinal(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG *); extern CK_RV C_DigestInit(CK_SESSION_HANDLE, CK_MECHANISM *); extern CK_RV C_Digest(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG, CK_BYTE *, CK_ULONG *); extern CK_RV C_DigestUpdate(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG); extern CK_RV C_DigestKey(CK_SESSION_HANDLE, CK_OBJECT_HANDLE); extern CK_RV C_DigestFinal(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG *); extern CK_RV C_SignInit(CK_SESSION_HANDLE, CK_MECHANISM *, CK_OBJECT_HANDLE); extern CK_RV C_Sign(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG, CK_BYTE *, CK_ULONG *); extern CK_RV C_SignUpdate(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG); extern CK_RV C_SignFinal(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG *); extern CK_RV C_SignRecoverInit(CK_SESSION_HANDLE, CK_MECHANISM *, CK_OBJECT_HANDLE); extern CK_RV C_SignRecover(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG, CK_BYTE *, CK_ULONG *); extern CK_RV C_VerifyInit(CK_SESSION_HANDLE, CK_MECHANISM *, CK_OBJECT_HANDLE); extern CK_RV C_Verify(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG, CK_BYTE *, CK_ULONG); extern CK_RV C_VerifyUpdate(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG); extern CK_RV C_VerifyFinal(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG); extern CK_RV C_VerifyRecoverInit(CK_SESSION_HANDLE, CK_MECHANISM *, CK_OBJECT_HANDLE); extern CK_RV C_VerifyRecover(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG, CK_BYTE *, CK_ULONG *); extern CK_RV C_DigestEncryptUpdate(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG, CK_BYTE *, CK_ULONG *); extern CK_RV C_DecryptDigestUpdate(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG, CK_BYTE *, CK_ULONG *); extern CK_RV C_SignEncryptUpdate(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG, CK_BYTE *, CK_ULONG *); extern CK_RV C_DecryptVerifyUpdate(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG, CK_BYTE *, CK_ULONG *); extern CK_RV C_GenerateKey(CK_SESSION_HANDLE, CK_MECHANISM *, CK_ATTRIBUTE *, CK_ULONG, CK_OBJECT_HANDLE *); extern CK_RV C_GenerateKeyPair(CK_SESSION_HANDLE, CK_MECHANISM *, CK_ATTRIBUTE *, CK_ULONG, CK_ATTRIBUTE *, CK_ULONG, CK_OBJECT_HANDLE *, CK_OBJECT_HANDLE *); extern CK_RV C_WrapKey(CK_SESSION_HANDLE, CK_MECHANISM *, CK_OBJECT_HANDLE, CK_OBJECT_HANDLE, CK_BYTE *, CK_ULONG *); extern CK_RV C_UnwrapKey(CK_SESSION_HANDLE, CK_MECHANISM *, CK_OBJECT_HANDLE, CK_BYTE *, CK_ULONG *, CK_ATTRIBUTE *, CK_ULONG, CK_OBJECT_HANDLE *); extern CK_RV C_DeriveKey(CK_SESSION_HANDLE, CK_MECHANISM *, CK_OBJECT_HANDLE, CK_ATTRIBUTE *, CK_ULONG, CK_OBJECT_HANDLE *); extern CK_RV C_SeedRandom(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG); extern CK_RV C_GenerateRandom(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG); extern CK_RV C_GetFunctionStatus(CK_SESSION_HANDLE); extern CK_RV C_CancelFunction(CK_SESSION_HANDLE); extern CK_RV C_WaitForSlotEvent(CK_FLAGS, CK_SLOT_ID *, void *); typedef CK_RV (* CK_C_Initialize)(void *); typedef CK_RV (* CK_C_Finalize)(void *); typedef CK_RV (* CK_C_GetInfo)(CK_INFO *); typedef CK_RV (* CK_C_GetFunctionList)(CK_FUNCTION_LIST **); typedef CK_RV (* CK_C_GetSlotList)(CK_BBOOL, CK_SLOT_ID *, CK_ULONG *); typedef CK_RV (* CK_C_GetSlotInfo)(CK_SLOT_ID, CK_SLOT_INFO *); typedef CK_RV (* CK_C_GetTokenInfo)(CK_SLOT_ID, CK_TOKEN_INFO *); typedef CK_RV (* CK_C_GetMechanismList)(CK_SLOT_ID, CK_MECHANISM_TYPE *, CK_ULONG *); typedef CK_RV (* CK_C_GetMechanismInfo)(CK_SLOT_ID, CK_MECHANISM_TYPE, CK_MECHANISM_INFO *); typedef CK_RV (* CK_C_InitToken)(CK_SLOT_ID, CK_UTF8CHAR *, CK_ULONG, CK_UTF8CHAR *); typedef CK_RV (* CK_C_InitPIN)(CK_SESSION_HANDLE, CK_UTF8CHAR *, CK_ULONG); typedef CK_RV (* CK_C_SetPIN)(CK_SESSION_HANDLE, CK_UTF8CHAR *, CK_ULONG, CK_UTF8CHAR *, CK_ULONG); typedef CK_RV (* CK_C_OpenSession)(CK_SLOT_ID, CK_FLAGS, void *, CK_NOTIFY, CK_SESSION_HANDLE *); typedef CK_RV (* CK_C_CloseSession)(CK_SESSION_HANDLE); typedef CK_RV (* CK_C_CloseAllSessions)(CK_SLOT_ID); typedef CK_RV (* CK_C_GetSessionInfo)(CK_SESSION_HANDLE, CK_SESSION_INFO *); typedef CK_RV (* CK_C_GetOperationState)(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG *); typedef CK_RV (* CK_C_SetOperationState)(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG, CK_OBJECT_HANDLE, CK_OBJECT_HANDLE); typedef CK_RV (* CK_C_Login)(CK_SESSION_HANDLE, CK_USER_TYPE, CK_UTF8CHAR *, CK_ULONG); typedef CK_RV (* CK_C_Logout)(CK_SESSION_HANDLE); typedef CK_RV (* CK_C_CreateObject)(CK_SESSION_HANDLE, CK_ATTRIBUTE *, CK_ULONG, CK_OBJECT_HANDLE *); typedef CK_RV (* CK_C_CopyObject)(CK_SESSION_HANDLE, CK_OBJECT_HANDLE, CK_ATTRIBUTE *, CK_ULONG, CK_OBJECT_HANDLE *); typedef CK_RV (* CK_C_DestroyObject)(CK_SESSION_HANDLE, CK_OBJECT_HANDLE); typedef CK_RV (* CK_C_GetObjectSize)(CK_SESSION_HANDLE, CK_OBJECT_HANDLE, CK_ULONG *); typedef CK_RV (* CK_C_GetAttributeValue)(CK_SESSION_HANDLE, CK_OBJECT_HANDLE, CK_ATTRIBUTE *, CK_ULONG); typedef CK_RV (* CK_C_SetAttributeValue)(CK_SESSION_HANDLE, CK_OBJECT_HANDLE, CK_ATTRIBUTE *, CK_ULONG); typedef CK_RV (* CK_C_FindObjectsInit)(CK_SESSION_HANDLE, CK_ATTRIBUTE *, CK_ULONG); typedef CK_RV (* CK_C_FindObjects)(CK_SESSION_HANDLE, CK_OBJECT_HANDLE *, CK_ULONG, CK_ULONG *); typedef CK_RV (* CK_C_FindObjectsFinal)(CK_SESSION_HANDLE); typedef CK_RV (* CK_C_EncryptInit)(CK_SESSION_HANDLE, CK_MECHANISM *, CK_OBJECT_HANDLE); typedef CK_RV (* CK_C_Encrypt)(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG, CK_BYTE *, CK_ULONG *); typedef CK_RV (* CK_C_EncryptUpdate)(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG, CK_BYTE *, CK_ULONG *); typedef CK_RV (* CK_C_EncryptFinal)(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG *); typedef CK_RV (* CK_C_DecryptInit)(CK_SESSION_HANDLE, CK_MECHANISM *, CK_OBJECT_HANDLE); typedef CK_RV (* CK_C_Decrypt)(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG, CK_BYTE *, CK_ULONG *); typedef CK_RV (* CK_C_DecryptUpdate)(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG, CK_BYTE *, CK_ULONG *); typedef CK_RV (* CK_C_DecryptFinal)(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG *); typedef CK_RV (* CK_C_DigestInit)(CK_SESSION_HANDLE, CK_MECHANISM *); typedef CK_RV (* CK_C_Digest)(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG, CK_BYTE *, CK_ULONG *); typedef CK_RV (* CK_C_DigestUpdate)(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG); typedef CK_RV (* CK_C_DigestKey)(CK_SESSION_HANDLE, CK_OBJECT_HANDLE); typedef CK_RV (* CK_C_DigestFinal)(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG *); typedef CK_RV (* CK_C_SignInit)(CK_SESSION_HANDLE, CK_MECHANISM *, CK_OBJECT_HANDLE); typedef CK_RV (* CK_C_Sign)(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG, CK_BYTE *, CK_ULONG *); typedef CK_RV (* CK_C_SignUpdate)(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG); typedef CK_RV (* CK_C_SignFinal)(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG *); typedef CK_RV (* CK_C_SignRecoverInit)(CK_SESSION_HANDLE, CK_MECHANISM *, CK_OBJECT_HANDLE); typedef CK_RV (* CK_C_SignRecover)(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG, CK_BYTE *, CK_ULONG *); typedef CK_RV (* CK_C_VerifyInit)(CK_SESSION_HANDLE, CK_MECHANISM *, CK_OBJECT_HANDLE); typedef CK_RV (* CK_C_Verify)(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG, CK_BYTE *, CK_ULONG); typedef CK_RV (* CK_C_VerifyUpdate)(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG); typedef CK_RV (* CK_C_VerifyFinal)(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG); typedef CK_RV (* CK_C_VerifyRecoverInit)(CK_SESSION_HANDLE, CK_MECHANISM *, CK_OBJECT_HANDLE); typedef CK_RV (* CK_C_VerifyRecover)(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG, CK_BYTE *, CK_ULONG *); typedef CK_RV (* CK_C_DigestEncryptUpdate)(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG, CK_BYTE *, CK_ULONG *); typedef CK_RV (* CK_C_DecryptDigestUpdate)(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG, CK_BYTE *, CK_ULONG *); typedef CK_RV (* CK_C_SignEncryptUpdate)(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG, CK_BYTE *, CK_ULONG *); typedef CK_RV (* CK_C_DecryptVerifyUpdate)(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG, CK_BYTE *, CK_ULONG *); typedef CK_RV (* CK_C_GenerateKey)(CK_SESSION_HANDLE, CK_MECHANISM *, CK_ATTRIBUTE *, CK_ULONG, CK_OBJECT_HANDLE *); typedef CK_RV (* CK_C_GenerateKeyPair)(CK_SESSION_HANDLE, CK_MECHANISM *, CK_ATTRIBUTE *, CK_ULONG, CK_ATTRIBUTE *, CK_ULONG, CK_OBJECT_HANDLE *, CK_OBJECT_HANDLE *); typedef CK_RV (* CK_C_WrapKey)(CK_SESSION_HANDLE, CK_MECHANISM *, CK_OBJECT_HANDLE, CK_OBJECT_HANDLE, CK_BYTE *, CK_ULONG *); typedef CK_RV (* CK_C_UnwrapKey)(CK_SESSION_HANDLE, CK_MECHANISM *, CK_OBJECT_HANDLE, CK_BYTE *, CK_ULONG, CK_ATTRIBUTE *, CK_ULONG, CK_OBJECT_HANDLE *); typedef CK_RV (* CK_C_DeriveKey)(CK_SESSION_HANDLE, CK_MECHANISM *, CK_OBJECT_HANDLE, CK_ATTRIBUTE *, CK_ULONG, CK_OBJECT_HANDLE *); typedef CK_RV (* CK_C_SeedRandom)(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG); typedef CK_RV (* CK_C_GenerateRandom)(CK_SESSION_HANDLE, CK_BYTE *, CK_ULONG); typedef CK_RV (* CK_C_GetFunctionStatus)(CK_SESSION_HANDLE); typedef CK_RV (* CK_C_CancelFunction)(CK_SESSION_HANDLE); typedef CK_RV (* CK_C_WaitForSlotEvent)(CK_FLAGS, CK_SLOT_ID *, void *); struct CK_FUNCTION_LIST { CK_VERSION version; CK_C_Initialize C_Initialize; CK_C_Finalize C_Finalize; CK_C_GetInfo C_GetInfo; CK_C_GetFunctionList C_GetFunctionList; CK_C_GetSlotList C_GetSlotList; CK_C_GetSlotInfo C_GetSlotInfo; CK_C_GetTokenInfo C_GetTokenInfo; CK_C_GetMechanismList C_GetMechanismList; CK_C_GetMechanismInfo C_GetMechanismInfo; CK_C_InitToken C_InitToken; CK_C_InitPIN C_InitPIN; CK_C_SetPIN C_SetPIN; CK_C_OpenSession C_OpenSession; CK_C_CloseSession C_CloseSession; CK_C_CloseAllSessions C_CloseAllSessions; CK_C_GetSessionInfo C_GetSessionInfo; CK_C_GetOperationState C_GetOperationState; CK_C_SetOperationState C_SetOperationState; CK_C_Login C_Login; CK_C_Logout C_Logout; CK_C_CreateObject C_CreateObject; CK_C_CopyObject C_CopyObject; CK_C_DestroyObject C_DestroyObject; CK_C_GetObjectSize C_GetObjectSize; CK_C_GetAttributeValue C_GetAttributeValue; CK_C_SetAttributeValue C_SetAttributeValue; CK_C_FindObjectsInit C_FindObjectsInit; CK_C_FindObjects C_FindObjects; CK_C_FindObjectsFinal C_FindObjectsFinal; CK_C_EncryptInit C_EncryptInit; CK_C_Encrypt C_Encrypt; CK_C_EncryptUpdate C_EncryptUpdate; CK_C_EncryptFinal C_EncryptFinal; CK_C_DecryptInit C_DecryptInit; CK_C_Decrypt C_Decrypt; CK_C_DecryptUpdate C_DecryptUpdate; CK_C_DecryptFinal C_DecryptFinal; CK_C_DigestInit C_DigestInit; CK_C_Digest C_Digest; CK_C_DigestUpdate C_DigestUpdate; CK_C_DigestKey C_DigestKey; CK_C_DigestFinal C_DigestFinal; CK_C_SignInit C_SignInit; CK_C_Sign C_Sign; CK_C_SignUpdate C_SignUpdate; CK_C_SignFinal C_SignFinal; CK_C_SignRecoverInit C_SignRecoverInit; CK_C_SignRecover C_SignRecover; CK_C_VerifyInit C_VerifyInit; CK_C_Verify C_Verify; CK_C_VerifyUpdate C_VerifyUpdate; CK_C_VerifyFinal C_VerifyFinal; CK_C_VerifyRecoverInit C_VerifyRecoverInit; CK_C_VerifyRecover C_VerifyRecover; CK_C_DigestEncryptUpdate C_DigestEncryptUpdate; CK_C_DecryptDigestUpdate C_DecryptDigestUpdate; CK_C_SignEncryptUpdate C_SignEncryptUpdate; CK_C_DecryptVerifyUpdate C_DecryptVerifyUpdate; CK_C_GenerateKey C_GenerateKey; CK_C_GenerateKeyPair C_GenerateKeyPair; CK_C_WrapKey C_WrapKey; CK_C_UnwrapKey C_UnwrapKey; CK_C_DeriveKey C_DeriveKey; CK_C_SeedRandom C_SeedRandom; CK_C_GenerateRandom C_GenerateRandom; CK_C_GetFunctionStatus C_GetFunctionStatus; CK_C_CancelFunction C_CancelFunction; CK_C_WaitForSlotEvent C_WaitForSlotEvent; }; #endif aws-crt-python-0.20.4+dfsg/crt/aws-c-io/source/pkcs11_lib.c000066400000000000000000001623571456575232400232510ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include "pkcs11_private.h" #include #include #include #include #include #include /* NOTE 1: even though we currently include the v2.40 headers, they're compatible with any v2.x library. * NOTE 2: v3.x is backwards compatible with 2.x, and even claims to be 2.40 if you check its version the 2.x way */ #define AWS_SUPPORTED_CRYPTOKI_VERSION_MAJOR 2 #define AWS_MIN_SUPPORTED_CRYPTOKI_VERSION_MINOR 20 /* clang-format off */ /* * DER encoded DigestInfo value to be prefixed to the hash, used for RSA signing * See https://tools.ietf.org/html/rfc3447#page-43 * (Notes to help understand what's going on here with DER encoding) * 0x30 nn - Sequence of tags, nn bytes, including hash, nn = mm+jj+4 (PKCS11 DigestInfo) * 0x30 mm - Subsequence of tags, mm bytes (ii+4) (PKCS11 * 0x06 ii - OID encoding, ii bytes, see X.680 - this identifies the hash algorithm * 0x05 00 - NULL * 0x04 jj - OCTET, nn = mm + jj + 4 * Digest (nn - mm - 4 bytes) */ static const uint8_t SHA1_PREFIX_TO_RSA_SIG[] = { 0x30, 0x21, 0x30, 0x09, 0x06, 0x05, 0x2b, 0x0e, 0x03, 0x02, 0x1a, 0x05, 0x00, 0x04, 0x14 }; static const uint8_t SHA256_PREFIX_TO_RSA_SIG[] = { 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05, 0x00, 0x04, 0x20 }; static const uint8_t SHA384_PREFIX_TO_RSA_SIG[] = { 0x30, 0x41, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02, 0x05, 0x00, 0x04, 0x30 }; static const uint8_t SHA512_PREFIX_TO_RSA_SIG[] = { 0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03, 0x05, 0x00, 0x04, 0x40 }; static const uint8_t SHA224_PREFIX_TO_RSA_SIG[] = { 0x30, 0x2d, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x04, 0x05, 0x00, 0x04, 0x1c }; /* clang-format on */ /* Return c-string for PKCS#11 CKR_* contants. */ const char *aws_pkcs11_ckr_str(CK_RV rv) { /* clang-format off */ switch (rv) { case (CKR_OK): return "CKR_OK"; case (CKR_CANCEL): return "CKR_CANCEL"; case (CKR_HOST_MEMORY): return "CKR_HOST_MEMORY"; case (CKR_SLOT_ID_INVALID): return "CKR_SLOT_ID_INVALID"; case (CKR_GENERAL_ERROR): return "CKR_GENERAL_ERROR"; case (CKR_FUNCTION_FAILED): return "CKR_FUNCTION_FAILED"; case (CKR_ARGUMENTS_BAD): return "CKR_ARGUMENTS_BAD"; case (CKR_NO_EVENT): return "CKR_NO_EVENT"; case (CKR_NEED_TO_CREATE_THREADS): return "CKR_NEED_TO_CREATE_THREADS"; case (CKR_CANT_LOCK): return "CKR_CANT_LOCK"; case (CKR_ATTRIBUTE_READ_ONLY): return "CKR_ATTRIBUTE_READ_ONLY"; case (CKR_ATTRIBUTE_SENSITIVE): return "CKR_ATTRIBUTE_SENSITIVE"; case (CKR_ATTRIBUTE_TYPE_INVALID): return "CKR_ATTRIBUTE_TYPE_INVALID"; case (CKR_ATTRIBUTE_VALUE_INVALID): return "CKR_ATTRIBUTE_VALUE_INVALID"; case (CKR_ACTION_PROHIBITED): return "CKR_ACTION_PROHIBITED"; case (CKR_DATA_INVALID): return "CKR_DATA_INVALID"; case (CKR_DATA_LEN_RANGE): return "CKR_DATA_LEN_RANGE"; case (CKR_DEVICE_ERROR): return "CKR_DEVICE_ERROR"; case (CKR_DEVICE_MEMORY): return "CKR_DEVICE_MEMORY"; case (CKR_DEVICE_REMOVED): return "CKR_DEVICE_REMOVED"; case (CKR_ENCRYPTED_DATA_INVALID): return "CKR_ENCRYPTED_DATA_INVALID"; case (CKR_ENCRYPTED_DATA_LEN_RANGE): return "CKR_ENCRYPTED_DATA_LEN_RANGE"; case (CKR_FUNCTION_CANCELED): return "CKR_FUNCTION_CANCELED"; case (CKR_FUNCTION_NOT_PARALLEL): return "CKR_FUNCTION_NOT_PARALLEL"; case (CKR_FUNCTION_NOT_SUPPORTED): return "CKR_FUNCTION_NOT_SUPPORTED"; case (CKR_KEY_HANDLE_INVALID): return "CKR_KEY_HANDLE_INVALID"; case (CKR_KEY_SIZE_RANGE): return "CKR_KEY_SIZE_RANGE"; case (CKR_KEY_TYPE_INCONSISTENT): return "CKR_KEY_TYPE_INCONSISTENT"; case (CKR_KEY_NOT_NEEDED): return "CKR_KEY_NOT_NEEDED"; case (CKR_KEY_CHANGED): return "CKR_KEY_CHANGED"; case (CKR_KEY_NEEDED): return "CKR_KEY_NEEDED"; case (CKR_KEY_INDIGESTIBLE): return "CKR_KEY_INDIGESTIBLE"; case (CKR_KEY_FUNCTION_NOT_PERMITTED): return "CKR_KEY_FUNCTION_NOT_PERMITTED"; case (CKR_KEY_NOT_WRAPPABLE): return "CKR_KEY_NOT_WRAPPABLE"; case (CKR_KEY_UNEXTRACTABLE): return "CKR_KEY_UNEXTRACTABLE"; case (CKR_MECHANISM_INVALID): return "CKR_MECHANISM_INVALID"; case (CKR_MECHANISM_PARAM_INVALID): return "CKR_MECHANISM_PARAM_INVALID"; case (CKR_OBJECT_HANDLE_INVALID): return "CKR_OBJECT_HANDLE_INVALID"; case (CKR_OPERATION_ACTIVE): return "CKR_OPERATION_ACTIVE"; case (CKR_OPERATION_NOT_INITIALIZED): return "CKR_OPERATION_NOT_INITIALIZED"; case (CKR_PIN_INCORRECT): return "CKR_PIN_INCORRECT"; case (CKR_PIN_INVALID): return "CKR_PIN_INVALID"; case (CKR_PIN_LEN_RANGE): return "CKR_PIN_LEN_RANGE"; case (CKR_PIN_EXPIRED): return "CKR_PIN_EXPIRED"; case (CKR_PIN_LOCKED): return "CKR_PIN_LOCKED"; case (CKR_SESSION_CLOSED): return "CKR_SESSION_CLOSED"; case (CKR_SESSION_COUNT): return "CKR_SESSION_COUNT"; case (CKR_SESSION_HANDLE_INVALID): return "CKR_SESSION_HANDLE_INVALID"; case (CKR_SESSION_PARALLEL_NOT_SUPPORTED): return "CKR_SESSION_PARALLEL_NOT_SUPPORTED"; case (CKR_SESSION_READ_ONLY): return "CKR_SESSION_READ_ONLY"; case (CKR_SESSION_EXISTS): return "CKR_SESSION_EXISTS"; case (CKR_SESSION_READ_ONLY_EXISTS): return "CKR_SESSION_READ_ONLY_EXISTS"; case (CKR_SESSION_READ_WRITE_SO_EXISTS): return "CKR_SESSION_READ_WRITE_SO_EXISTS"; case (CKR_SIGNATURE_INVALID): return "CKR_SIGNATURE_INVALID"; case (CKR_SIGNATURE_LEN_RANGE): return "CKR_SIGNATURE_LEN_RANGE"; case (CKR_TEMPLATE_INCOMPLETE): return "CKR_TEMPLATE_INCOMPLETE"; case (CKR_TEMPLATE_INCONSISTENT): return "CKR_TEMPLATE_INCONSISTENT"; case (CKR_TOKEN_NOT_PRESENT): return "CKR_TOKEN_NOT_PRESENT"; case (CKR_TOKEN_NOT_RECOGNIZED): return "CKR_TOKEN_NOT_RECOGNIZED"; case (CKR_TOKEN_WRITE_PROTECTED): return "CKR_TOKEN_WRITE_PROTECTED"; case (CKR_UNWRAPPING_KEY_HANDLE_INVALID): return "CKR_UNWRAPPING_KEY_HANDLE_INVALID"; case (CKR_UNWRAPPING_KEY_SIZE_RANGE): return "CKR_UNWRAPPING_KEY_SIZE_RANGE"; case (CKR_UNWRAPPING_KEY_TYPE_INCONSISTENT): return "CKR_UNWRAPPING_KEY_TYPE_INCONSISTENT"; case (CKR_USER_ALREADY_LOGGED_IN): return "CKR_USER_ALREADY_LOGGED_IN"; case (CKR_USER_NOT_LOGGED_IN): return "CKR_USER_NOT_LOGGED_IN"; case (CKR_USER_PIN_NOT_INITIALIZED): return "CKR_USER_PIN_NOT_INITIALIZED"; case (CKR_USER_TYPE_INVALID): return "CKR_USER_TYPE_INVALID"; case (CKR_USER_ANOTHER_ALREADY_LOGGED_IN): return "CKR_USER_ANOTHER_ALREADY_LOGGED_IN"; case (CKR_USER_TOO_MANY_TYPES): return "CKR_USER_TOO_MANY_TYPES"; case (CKR_WRAPPED_KEY_INVALID): return "CKR_WRAPPED_KEY_INVALID"; case (CKR_WRAPPED_KEY_LEN_RANGE): return "CKR_WRAPPED_KEY_LEN_RANGE"; case (CKR_WRAPPING_KEY_HANDLE_INVALID): return "CKR_WRAPPING_KEY_HANDLE_INVALID"; case (CKR_WRAPPING_KEY_SIZE_RANGE): return "CKR_WRAPPING_KEY_SIZE_RANGE"; case (CKR_WRAPPING_KEY_TYPE_INCONSISTENT): return "CKR_WRAPPING_KEY_TYPE_INCONSISTENT"; case (CKR_RANDOM_SEED_NOT_SUPPORTED): return "CKR_RANDOM_SEED_NOT_SUPPORTED"; case (CKR_RANDOM_NO_RNG): return "CKR_RANDOM_NO_RNG"; case (CKR_DOMAIN_PARAMS_INVALID): return "CKR_DOMAIN_PARAMS_INVALID"; case (CKR_CURVE_NOT_SUPPORTED): return "CKR_CURVE_NOT_SUPPORTED"; case (CKR_BUFFER_TOO_SMALL): return "CKR_BUFFER_TOO_SMALL"; case (CKR_SAVED_STATE_INVALID): return "CKR_SAVED_STATE_INVALID"; case (CKR_INFORMATION_SENSITIVE): return "CKR_INFORMATION_SENSITIVE"; case (CKR_STATE_UNSAVEABLE): return "CKR_STATE_UNSAVEABLE"; case (CKR_CRYPTOKI_NOT_INITIALIZED): return "CKR_CRYPTOKI_NOT_INITIALIZED"; case (CKR_CRYPTOKI_ALREADY_INITIALIZED): return "CKR_CRYPTOKI_ALREADY_INITIALIZED"; case (CKR_MUTEX_BAD): return "CKR_MUTEX_BAD"; case (CKR_MUTEX_NOT_LOCKED): return "CKR_MUTEX_NOT_LOCKED"; case (CKR_NEW_PIN_MODE): return "CKR_NEW_PIN_MODE"; case (CKR_NEXT_OTP): return "CKR_NEXT_OTP"; case (CKR_EXCEEDED_MAX_ITERATIONS): return "CKR_EXCEEDED_MAX_ITERATIONS"; case (CKR_FIPS_SELF_TEST_FAILED): return "CKR_FIPS_SELF_TEST_FAILED"; case (CKR_LIBRARY_LOAD_FAILED): return "CKR_LIBRARY_LOAD_FAILED"; case (CKR_PIN_TOO_WEAK): return "CKR_PIN_TOO_WEAK"; case (CKR_PUBLIC_KEY_INVALID): return "CKR_PUBLIC_KEY_INVALID"; case (CKR_FUNCTION_REJECTED): return "CKR_FUNCTION_REJECTED"; default: return ""; } /* clang-format on */ } /* Translate from a CK_RV to an AWS error code */ static int s_ck_to_aws_error(CK_RV rv) { AWS_ASSERT(rv != CKR_OK); /* clang-format off */ switch (rv) { case (CKR_CANCEL): return AWS_ERROR_PKCS11_CKR_CANCEL; case (CKR_HOST_MEMORY): return AWS_ERROR_PKCS11_CKR_HOST_MEMORY; case (CKR_SLOT_ID_INVALID): return AWS_ERROR_PKCS11_CKR_SLOT_ID_INVALID; case (CKR_GENERAL_ERROR): return AWS_ERROR_PKCS11_CKR_GENERAL_ERROR; case (CKR_FUNCTION_FAILED): return AWS_ERROR_PKCS11_CKR_FUNCTION_FAILED; case (CKR_ARGUMENTS_BAD): return AWS_ERROR_PKCS11_CKR_ARGUMENTS_BAD; case (CKR_NO_EVENT): return AWS_ERROR_PKCS11_CKR_NO_EVENT; case (CKR_NEED_TO_CREATE_THREADS): return AWS_ERROR_PKCS11_CKR_NEED_TO_CREATE_THREADS; case (CKR_CANT_LOCK): return AWS_ERROR_PKCS11_CKR_CANT_LOCK; case (CKR_ATTRIBUTE_READ_ONLY): return AWS_ERROR_PKCS11_CKR_ATTRIBUTE_READ_ONLY; case (CKR_ATTRIBUTE_SENSITIVE): return AWS_ERROR_PKCS11_CKR_ATTRIBUTE_SENSITIVE; case (CKR_ATTRIBUTE_TYPE_INVALID): return AWS_ERROR_PKCS11_CKR_ATTRIBUTE_TYPE_INVALID; case (CKR_ATTRIBUTE_VALUE_INVALID): return AWS_ERROR_PKCS11_CKR_ATTRIBUTE_VALUE_INVALID; case (CKR_ACTION_PROHIBITED): return AWS_ERROR_PKCS11_CKR_ACTION_PROHIBITED; case (CKR_DATA_INVALID): return AWS_ERROR_PKCS11_CKR_DATA_INVALID; case (CKR_DATA_LEN_RANGE): return AWS_ERROR_PKCS11_CKR_DATA_LEN_RANGE; case (CKR_DEVICE_ERROR): return AWS_ERROR_PKCS11_CKR_DEVICE_ERROR; case (CKR_DEVICE_MEMORY): return AWS_ERROR_PKCS11_CKR_DEVICE_MEMORY; case (CKR_DEVICE_REMOVED): return AWS_ERROR_PKCS11_CKR_DEVICE_REMOVED; case (CKR_ENCRYPTED_DATA_INVALID): return AWS_ERROR_PKCS11_CKR_ENCRYPTED_DATA_INVALID; case (CKR_ENCRYPTED_DATA_LEN_RANGE): return AWS_ERROR_PKCS11_CKR_ENCRYPTED_DATA_LEN_RANGE; case (CKR_FUNCTION_CANCELED): return AWS_ERROR_PKCS11_CKR_FUNCTION_CANCELED; case (CKR_FUNCTION_NOT_PARALLEL): return AWS_ERROR_PKCS11_CKR_FUNCTION_NOT_PARALLEL; case (CKR_FUNCTION_NOT_SUPPORTED): return AWS_ERROR_PKCS11_CKR_FUNCTION_NOT_SUPPORTED; case (CKR_KEY_HANDLE_INVALID): return AWS_ERROR_PKCS11_CKR_KEY_HANDLE_INVALID; case (CKR_KEY_SIZE_RANGE): return AWS_ERROR_PKCS11_CKR_KEY_SIZE_RANGE; case (CKR_KEY_TYPE_INCONSISTENT): return AWS_ERROR_PKCS11_CKR_KEY_TYPE_INCONSISTENT; case (CKR_KEY_NOT_NEEDED): return AWS_ERROR_PKCS11_CKR_KEY_NOT_NEEDED; case (CKR_KEY_CHANGED): return AWS_ERROR_PKCS11_CKR_KEY_CHANGED; case (CKR_KEY_NEEDED): return AWS_ERROR_PKCS11_CKR_KEY_NEEDED; case (CKR_KEY_INDIGESTIBLE): return AWS_ERROR_PKCS11_CKR_KEY_INDIGESTIBLE; case (CKR_KEY_FUNCTION_NOT_PERMITTED): return AWS_ERROR_PKCS11_CKR_KEY_FUNCTION_NOT_PERMITTED; case (CKR_KEY_NOT_WRAPPABLE): return AWS_ERROR_PKCS11_CKR_KEY_NOT_WRAPPABLE; case (CKR_KEY_UNEXTRACTABLE): return AWS_ERROR_PKCS11_CKR_KEY_UNEXTRACTABLE; case (CKR_MECHANISM_INVALID): return AWS_ERROR_PKCS11_CKR_MECHANISM_INVALID; case (CKR_MECHANISM_PARAM_INVALID): return AWS_ERROR_PKCS11_CKR_MECHANISM_PARAM_INVALID; case (CKR_OBJECT_HANDLE_INVALID): return AWS_ERROR_PKCS11_CKR_OBJECT_HANDLE_INVALID; case (CKR_OPERATION_ACTIVE): return AWS_ERROR_PKCS11_CKR_OPERATION_ACTIVE; case (CKR_OPERATION_NOT_INITIALIZED): return AWS_ERROR_PKCS11_CKR_OPERATION_NOT_INITIALIZED; case (CKR_PIN_INCORRECT): return AWS_ERROR_PKCS11_CKR_PIN_INCORRECT; case (CKR_PIN_INVALID): return AWS_ERROR_PKCS11_CKR_PIN_INVALID; case (CKR_PIN_LEN_RANGE): return AWS_ERROR_PKCS11_CKR_PIN_LEN_RANGE; case (CKR_PIN_EXPIRED): return AWS_ERROR_PKCS11_CKR_PIN_EXPIRED; case (CKR_PIN_LOCKED): return AWS_ERROR_PKCS11_CKR_PIN_LOCKED; case (CKR_SESSION_CLOSED): return AWS_ERROR_PKCS11_CKR_SESSION_CLOSED; case (CKR_SESSION_COUNT): return AWS_ERROR_PKCS11_CKR_SESSION_COUNT; case (CKR_SESSION_HANDLE_INVALID): return AWS_ERROR_PKCS11_CKR_SESSION_HANDLE_INVALID; case (CKR_SESSION_PARALLEL_NOT_SUPPORTED): return AWS_ERROR_PKCS11_CKR_SESSION_PARALLEL_NOT_SUPPORTED; case (CKR_SESSION_READ_ONLY): return AWS_ERROR_PKCS11_CKR_SESSION_READ_ONLY; case (CKR_SESSION_EXISTS): return AWS_ERROR_PKCS11_CKR_SESSION_EXISTS; case (CKR_SESSION_READ_ONLY_EXISTS): return AWS_ERROR_PKCS11_CKR_SESSION_READ_ONLY_EXISTS; case (CKR_SESSION_READ_WRITE_SO_EXISTS): return AWS_ERROR_PKCS11_CKR_SESSION_READ_WRITE_SO_EXISTS; case (CKR_SIGNATURE_INVALID): return AWS_ERROR_PKCS11_CKR_SIGNATURE_INVALID; case (CKR_SIGNATURE_LEN_RANGE): return AWS_ERROR_PKCS11_CKR_SIGNATURE_LEN_RANGE; case (CKR_TEMPLATE_INCOMPLETE): return AWS_ERROR_PKCS11_CKR_TEMPLATE_INCOMPLETE; case (CKR_TEMPLATE_INCONSISTENT): return AWS_ERROR_PKCS11_CKR_TEMPLATE_INCONSISTENT; case (CKR_TOKEN_NOT_PRESENT): return AWS_ERROR_PKCS11_CKR_TOKEN_NOT_PRESENT; case (CKR_TOKEN_NOT_RECOGNIZED): return AWS_ERROR_PKCS11_CKR_TOKEN_NOT_RECOGNIZED; case (CKR_TOKEN_WRITE_PROTECTED): return AWS_ERROR_PKCS11_CKR_TOKEN_WRITE_PROTECTED; case (CKR_UNWRAPPING_KEY_HANDLE_INVALID): return AWS_ERROR_PKCS11_CKR_UNWRAPPING_KEY_HANDLE_INVALID; case (CKR_UNWRAPPING_KEY_SIZE_RANGE): return AWS_ERROR_PKCS11_CKR_UNWRAPPING_KEY_SIZE_RANGE; case (CKR_UNWRAPPING_KEY_TYPE_INCONSISTENT): return AWS_ERROR_PKCS11_CKR_UNWRAPPING_KEY_TYPE_INCONSISTENT; case (CKR_USER_ALREADY_LOGGED_IN): return AWS_ERROR_PKCS11_CKR_USER_ALREADY_LOGGED_IN; case (CKR_USER_NOT_LOGGED_IN): return AWS_ERROR_PKCS11_CKR_USER_NOT_LOGGED_IN; case (CKR_USER_PIN_NOT_INITIALIZED): return AWS_ERROR_PKCS11_CKR_USER_PIN_NOT_INITIALIZED; case (CKR_USER_TYPE_INVALID): return AWS_ERROR_PKCS11_CKR_USER_TYPE_INVALID; case (CKR_USER_ANOTHER_ALREADY_LOGGED_IN): return AWS_ERROR_PKCS11_CKR_USER_ANOTHER_ALREADY_LOGGED_IN; case (CKR_USER_TOO_MANY_TYPES): return AWS_ERROR_PKCS11_CKR_USER_TOO_MANY_TYPES; case (CKR_WRAPPED_KEY_INVALID): return AWS_ERROR_PKCS11_CKR_WRAPPED_KEY_INVALID; case (CKR_WRAPPED_KEY_LEN_RANGE): return AWS_ERROR_PKCS11_CKR_WRAPPED_KEY_LEN_RANGE; case (CKR_WRAPPING_KEY_HANDLE_INVALID): return AWS_ERROR_PKCS11_CKR_WRAPPING_KEY_HANDLE_INVALID; case (CKR_WRAPPING_KEY_SIZE_RANGE): return AWS_ERROR_PKCS11_CKR_WRAPPING_KEY_SIZE_RANGE; case (CKR_WRAPPING_KEY_TYPE_INCONSISTENT): return AWS_ERROR_PKCS11_CKR_WRAPPING_KEY_TYPE_INCONSISTENT; case (CKR_RANDOM_SEED_NOT_SUPPORTED): return AWS_ERROR_PKCS11_CKR_RANDOM_SEED_NOT_SUPPORTED; case (CKR_RANDOM_NO_RNG): return AWS_ERROR_PKCS11_CKR_RANDOM_NO_RNG; case (CKR_DOMAIN_PARAMS_INVALID): return AWS_ERROR_PKCS11_CKR_DOMAIN_PARAMS_INVALID; case (CKR_CURVE_NOT_SUPPORTED): return AWS_ERROR_PKCS11_CKR_CURVE_NOT_SUPPORTED; case (CKR_BUFFER_TOO_SMALL): return AWS_ERROR_PKCS11_CKR_BUFFER_TOO_SMALL; case (CKR_SAVED_STATE_INVALID): return AWS_ERROR_PKCS11_CKR_SAVED_STATE_INVALID; case (CKR_INFORMATION_SENSITIVE): return AWS_ERROR_PKCS11_CKR_INFORMATION_SENSITIVE; case (CKR_STATE_UNSAVEABLE): return AWS_ERROR_PKCS11_CKR_STATE_UNSAVEABLE; case (CKR_CRYPTOKI_NOT_INITIALIZED): return AWS_ERROR_PKCS11_CKR_CRYPTOKI_NOT_INITIALIZED; case (CKR_CRYPTOKI_ALREADY_INITIALIZED): return AWS_ERROR_PKCS11_CKR_CRYPTOKI_ALREADY_INITIALIZED; case (CKR_MUTEX_BAD): return AWS_ERROR_PKCS11_CKR_MUTEX_BAD; case (CKR_MUTEX_NOT_LOCKED): return AWS_ERROR_PKCS11_CKR_MUTEX_NOT_LOCKED; case (CKR_NEW_PIN_MODE): return AWS_ERROR_PKCS11_CKR_NEW_PIN_MODE; case (CKR_NEXT_OTP): return AWS_ERROR_PKCS11_CKR_NEXT_OTP; case (CKR_EXCEEDED_MAX_ITERATIONS): return AWS_ERROR_PKCS11_CKR_EXCEEDED_MAX_ITERATIONS; case (CKR_FIPS_SELF_TEST_FAILED): return AWS_ERROR_PKCS11_CKR_FIPS_SELF_TEST_FAILED; case (CKR_LIBRARY_LOAD_FAILED): return AWS_ERROR_PKCS11_CKR_LIBRARY_LOAD_FAILED; case (CKR_PIN_TOO_WEAK): return AWS_ERROR_PKCS11_CKR_PIN_TOO_WEAK; case (CKR_PUBLIC_KEY_INVALID): return AWS_ERROR_PKCS11_CKR_PUBLIC_KEY_INVALID; case (CKR_FUNCTION_REJECTED): return AWS_ERROR_PKCS11_CKR_FUNCTION_REJECTED; default: return AWS_ERROR_PKCS11_UNKNOWN_CRYPTOKI_RETURN_VALUE; } /* clang-format on */ } /* Return c-string for PKCS#11 CKK_* contants. */ static const char *s_ckk_str(CK_KEY_TYPE key_type) { /* clang-format off */ switch(key_type) { case (CKK_RSA): return "CKK_RSA"; case (CKK_DSA): return "CKK_DSA"; case (CKK_DH): return "CKK_DH"; case (CKK_EC): return "CKK_EC"; case (CKK_X9_42_DH): return "CKK_X9_42_DH"; case (CKK_KEA): return "CKK_KEA"; case (CKK_GENERIC_SECRET): return "CKK_GENERIC_SECRET"; case (CKK_RC2): return "CKK_RC2"; case (CKK_RC4): return "CKK_RC4"; case (CKK_DES): return "CKK_DES"; case (CKK_DES2): return "CKK_DES2"; case (CKK_DES3): return "CKK_DES3"; case (CKK_CAST): return "CKK_CAST"; case (CKK_CAST3): return "CKK_CAST3"; case (CKK_CAST128): return "CKK_CAST128"; case (CKK_RC5): return "CKK_RC5"; case (CKK_IDEA): return "CKK_IDEA"; case (CKK_SKIPJACK): return "CKK_SKIPJACK"; case (CKK_BATON): return "CKK_BATON"; case (CKK_JUNIPER): return "CKK_JUNIPER"; case (CKK_CDMF): return "CKK_CDMF"; case (CKK_AES): return "CKK_AES"; case (CKK_BLOWFISH): return "CKK_BLOWFISH"; case (CKK_TWOFISH): return "CKK_TWOFISH"; case (CKK_SECURID): return "CKK_SECURID"; case (CKK_HOTP): return "CKK_HOTP"; case (CKK_ACTI): return "CKK_ACTI"; case (CKK_CAMELLIA): return "CKK_CAMELLIA"; case (CKK_ARIA): return "CKK_ARIA"; case (CKK_MD5_HMAC): return "CKK_MD5_HMAC"; case (CKK_SHA_1_HMAC): return "CKK_SHA_1_HMAC"; case (CKK_RIPEMD128_HMAC): return "CKK_RIPEMD128_HMAC"; case (CKK_RIPEMD160_HMAC): return "CKK_RIPEMD160_HMAC"; case (CKK_SHA256_HMAC): return "CKK_SHA256_HMAC"; case (CKK_SHA384_HMAC): return "CKK_SHA384_HMAC"; case (CKK_SHA512_HMAC): return "CKK_SHA512_HMAC"; case (CKK_SHA224_HMAC): return "CKK_SHA224_HMAC"; case (CKK_SEED): return "CKK_SEED"; case (CKK_GOSTR3410): return "CKK_GOSTR3410"; case (CKK_GOSTR3411): return "CKK_GOSTR3411"; case (CKK_GOST28147): return "CKK_GOST28147"; default: return ""; } /* clang-format on */ } /* Log the failure of a PKCS#11 function, and call aws_raise_error() with the appropriate AWS error code */ static int s_raise_ck_error(const struct aws_pkcs11_lib *pkcs11_lib, const char *fn_name, CK_RV rv) { int aws_err = s_ck_to_aws_error(rv); AWS_LOGF_ERROR( AWS_LS_IO_PKCS11, "id=%p: %s() failed. PKCS#11 error: %s (0x%08lX). AWS error: %s", (void *)pkcs11_lib, fn_name, aws_pkcs11_ckr_str(rv), rv, aws_error_name(aws_err)); return aws_raise_error(aws_err); } /* Log the failure of a PKCS#11 session-handle function and call aws_raise_error() with the appropriate error code */ static int s_raise_ck_session_error( const struct aws_pkcs11_lib *pkcs11_lib, const char *fn_name, CK_SESSION_HANDLE session, CK_RV rv) { int aws_err = s_ck_to_aws_error(rv); AWS_LOGF_ERROR( AWS_LS_IO_PKCS11, "id=%p session=%lu: %s() failed. PKCS#11 error: %s (0x%08lX). AWS error: %s", (void *)pkcs11_lib, session, fn_name, aws_pkcs11_ckr_str(rv), rv, aws_error_name(aws_err)); return aws_raise_error(aws_err); } /* PKCS#11 often pads strings with ' ' */ static bool s_is_padding(uint8_t c) { return c == ' '; } /* Return byte-cursor to string with ' ' padding trimmed off. * PKCS#11 structs commonly stores strings in fixed-width arrays, padded by ' ' instead of null-terminator */ static struct aws_byte_cursor s_trim_padding(const uint8_t *str, size_t len) { const struct aws_byte_cursor src = aws_byte_cursor_from_array(str, len); return aws_byte_cursor_right_trim_pred(&src, s_is_padding); } /* Callback for PKCS#11 library to create a mutex. * Described in PKCS11-base-v2.40 section 3.7 */ static CK_RV s_pkcs11_create_mutex(CK_VOID_PTR_PTR mutex_out) { if (mutex_out == NULL) { return CKR_GENERAL_ERROR; } /* Using the default allocator because there's no way to know which PKCS#11 instance is invoking this callback */ struct aws_allocator *allocator = aws_default_allocator(); struct aws_mutex *mutex = aws_mem_calloc(allocator, 1, sizeof(struct aws_mutex)); if (aws_mutex_init(mutex)) { AWS_LOGF_ERROR(AWS_LS_IO_PKCS11, "PKCS#11 CreateMutex() failed, error %s", aws_error_name(aws_last_error())); aws_mem_release(allocator, mutex); *mutex_out = NULL; return CKR_GENERAL_ERROR; } *mutex_out = mutex; return CKR_OK; } /* Callback for PKCS#11 library to destroy a mutex. * Described in PKCS11-base-v2.40 section 3.7 */ static CK_RV s_pkcs11_destroy_mutex(CK_VOID_PTR mutex_ptr) { if (mutex_ptr == NULL) { return CKR_GENERAL_ERROR; } struct aws_mutex *mutex = mutex_ptr; aws_mutex_clean_up(mutex); aws_mem_release(aws_default_allocator(), mutex); return CKR_OK; } /* Callback for PKCS#11 library to lock a mutex. * Described in PKCS11-base-v2.40 section 3.7 */ static CK_RV s_pkcs11_lock_mutex(CK_VOID_PTR mutex_ptr) { if (mutex_ptr == NULL) { return CKR_GENERAL_ERROR; } struct aws_mutex *mutex = mutex_ptr; if (aws_mutex_lock(mutex)) { AWS_LOGF_ERROR(AWS_LS_IO_PKCS11, "PKCS#11 LockMutex() failed, error %s", aws_error_name(aws_last_error())); return CKR_GENERAL_ERROR; } return CKR_OK; } /* Callback for PKCS#11 library to unlock a mutex. * Described in PKCS11-base-v2.40 section 3.7 */ static CK_RV s_pkcs11_unlock_mutex(CK_VOID_PTR mutex_ptr) { if (mutex_ptr == NULL) { return CKR_GENERAL_ERROR; } struct aws_mutex *mutex = mutex_ptr; if (aws_mutex_unlock(mutex)) { AWS_LOGF_ERROR(AWS_LS_IO_PKCS11, "PKCS#11 LockMutex() failed, error %s", aws_error_name(aws_last_error())); /* NOTE: Cryptoki has a CKR_MUTEX_NOT_LOCKED error code. * But posix doesn't treat this as an error and neither does windows so ¯\_(ツ)_/¯ * If aws_mutex_unlock() failed here, it was something else. */ return CKR_GENERAL_ERROR; } return CKR_OK; } struct aws_pkcs11_lib { struct aws_ref_count ref_count; struct aws_allocator *allocator; struct aws_shared_library shared_lib; CK_FUNCTION_LIST_PTR function_list; /* If true, C_Finalize() should be called when last ref-count is released */ bool finalize_on_cleanup; }; /* Invoked when last ref-count is released. Free all resources. * Note that this is also called if initialization fails half-way through */ static void s_pkcs11_lib_destroy(void *user_data) { struct aws_pkcs11_lib *pkcs11_lib = user_data; AWS_LOGF_DEBUG( AWS_LS_IO_PKCS11, "id=%p: Unloading PKCS#11. C_Finalize:%s", (void *)pkcs11_lib, pkcs11_lib->finalize_on_cleanup ? "yes" : "omit"); if (pkcs11_lib->finalize_on_cleanup) { CK_RV rv = pkcs11_lib->function_list->C_Finalize(NULL); if (rv != CKR_OK) { /* Log about it, but continue cleaning up */ s_raise_ck_error(pkcs11_lib, "C_Finalize", rv); } } aws_shared_library_clean_up(&pkcs11_lib->shared_lib); aws_mem_release(pkcs11_lib->allocator, pkcs11_lib); } struct aws_pkcs11_lib *aws_pkcs11_lib_new( struct aws_allocator *allocator, const struct aws_pkcs11_lib_options *options) { /* Validate options */ switch (options->initialize_finalize_behavior) { case AWS_PKCS11_LIB_DEFAULT_BEHAVIOR: case AWS_PKCS11_LIB_OMIT_INITIALIZE: case AWS_PKCS11_LIB_STRICT_INITIALIZE_FINALIZE: break; default: AWS_LOGF_ERROR(AWS_LS_IO_PKCS11, "Invalid PKCS#11 behavior arg: %d", options->initialize_finalize_behavior); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } /* Create the struct */ struct aws_pkcs11_lib *pkcs11_lib = aws_mem_calloc(allocator, 1, sizeof(struct aws_pkcs11_lib)); aws_ref_count_init(&pkcs11_lib->ref_count, pkcs11_lib, s_pkcs11_lib_destroy); pkcs11_lib->allocator = allocator; /* Load the library. */ /* need a null-terminated string to call next function, * or NULL if going to search the current application for PKCS#11 symbols. */ struct aws_string *filename_storage = NULL; const char *filename = NULL; if (options->filename.ptr != NULL) { filename_storage = aws_string_new_from_cursor(allocator, &options->filename); filename = aws_string_c_str(filename_storage); } AWS_LOGF_DEBUG( AWS_LS_IO_PKCS11, "Loading PKCS#11. file:'%s' C_Initialize:%s", filename ? filename : "

", (options->initialize_finalize_behavior == AWS_PKCS11_LIB_OMIT_INITIALIZE) ? "omit" : "yes"); if (aws_shared_library_init(&pkcs11_lib->shared_lib, filename)) { goto error; } /* Find C_GetFunctionList() and call it to get the list of pointers to all the other functions */ CK_C_GetFunctionList get_function_list = NULL; if (aws_shared_library_find_function( &pkcs11_lib->shared_lib, "C_GetFunctionList", (aws_generic_function *)&get_function_list)) { goto error; } CK_RV rv = get_function_list(&pkcs11_lib->function_list); if (rv != CKR_OK) { s_raise_ck_error(pkcs11_lib, "C_GetFunctionList", rv); goto error; } /* Check function list's API version */ CK_VERSION version = pkcs11_lib->function_list->version; if ((version.major != AWS_SUPPORTED_CRYPTOKI_VERSION_MAJOR) || (version.minor < AWS_MIN_SUPPORTED_CRYPTOKI_VERSION_MINOR)) { AWS_LOGF_ERROR( AWS_LS_IO_PKCS11, "id=%p: Library implements PKCS#11 version %" PRIu8 ".%" PRIu8 " but %d.%d compatibility is required", (void *)pkcs11_lib, version.major, version.minor, AWS_SUPPORTED_CRYPTOKI_VERSION_MAJOR, AWS_MIN_SUPPORTED_CRYPTOKI_VERSION_MINOR); aws_raise_error(AWS_ERROR_PKCS11_VERSION_UNSUPPORTED); goto error; } /* Call C_Initialize() */ const char *init_logging_str = "omit"; if (options->initialize_finalize_behavior != AWS_PKCS11_LIB_OMIT_INITIALIZE) { CK_C_INITIALIZE_ARGS init_args = { /* encourage lib to use our locks */ .CreateMutex = s_pkcs11_create_mutex, .DestroyMutex = s_pkcs11_destroy_mutex, .LockMutex = s_pkcs11_lock_mutex, .UnlockMutex = s_pkcs11_unlock_mutex, /* but if it needs to use OS locks instead, sure whatever you do you */ .flags = CKF_OS_LOCKING_OK, }; rv = pkcs11_lib->function_list->C_Initialize(&init_args); if (rv != CKR_OK) { /* Ignore already-initialized errors (unless user wants STRICT behavior) */ if (rv != CKR_CRYPTOKI_ALREADY_INITIALIZED || options->initialize_finalize_behavior == AWS_PKCS11_LIB_STRICT_INITIALIZE_FINALIZE) { s_raise_ck_error(pkcs11_lib, "C_Initialize", rv); goto error; } } init_logging_str = aws_pkcs11_ckr_str(rv); if (options->initialize_finalize_behavior == AWS_PKCS11_LIB_STRICT_INITIALIZE_FINALIZE) { pkcs11_lib->finalize_on_cleanup = true; } } /* Get info about the library and log it. * This will be VERY useful for diagnosing user issues. */ CK_INFO info; AWS_ZERO_STRUCT(info); rv = pkcs11_lib->function_list->C_GetInfo(&info); if (rv != CKR_OK) { s_raise_ck_error(pkcs11_lib, "C_GetInfo", rv); goto error; } AWS_LOGF_INFO( AWS_LS_IO_PKCS11, "id=%p: PKCS#11 loaded. file:'%s' cryptokiVersion:%" PRIu8 ".%" PRIu8 " manufacturerID:'" PRInSTR "' flags:0x%08lX libraryDescription:'" PRInSTR "' libraryVersion:%" PRIu8 ".%" PRIu8 " C_Initialize:%s", (void *)pkcs11_lib, filename ? filename : "
", info.cryptokiVersion.major, info.cryptokiVersion.minor, AWS_BYTE_CURSOR_PRI(s_trim_padding(info.manufacturerID, sizeof(info.manufacturerID))), info.flags, AWS_BYTE_CURSOR_PRI(s_trim_padding(info.libraryDescription, sizeof(info.libraryDescription))), info.libraryVersion.major, info.libraryVersion.minor, init_logging_str); /* Success! */ goto clean_up; error: AWS_LOGF_ERROR( AWS_LS_IO_PKCS11, "id=%p: Failed to initialize PKCS#11 library from '%s'", (void *)pkcs11_lib, filename ? filename : ""); aws_pkcs11_lib_release(pkcs11_lib); pkcs11_lib = NULL; clean_up: aws_string_destroy(filename_storage); return pkcs11_lib; } struct aws_pkcs11_lib *aws_pkcs11_lib_acquire(struct aws_pkcs11_lib *pkcs11_lib) { aws_ref_count_acquire(&pkcs11_lib->ref_count); return pkcs11_lib; } void aws_pkcs11_lib_release(struct aws_pkcs11_lib *pkcs11_lib) { if (pkcs11_lib) { aws_ref_count_release(&pkcs11_lib->ref_count); } } /** * Find the slot that meets all criteria: * - has a token * - if match_slot_id is non-null, then slot IDs must match * - if match_token_label is non-null, then labels must match * The function fails unless it finds exactly one slot meeting all criteria. */ int aws_pkcs11_lib_find_slot_with_token( struct aws_pkcs11_lib *pkcs11_lib, const uint64_t *match_slot_id, const struct aws_string *match_token_label, CK_SLOT_ID *out_slot_id) { CK_SLOT_ID *slot_id_array = NULL; /* array of IDs */ CK_SLOT_ID *candidate = NULL; /* points to ID in slot_id_array */ CK_TOKEN_INFO info; AWS_ZERO_STRUCT(info); bool success = false; /* query number of slots with tokens */ CK_ULONG num_slots = 0; CK_RV rv = pkcs11_lib->function_list->C_GetSlotList(CK_TRUE /*tokenPresent*/, NULL /*pSlotList*/, &num_slots); if (rv != CKR_OK) { s_raise_ck_error(pkcs11_lib, "C_GetSlotList", rv); goto clean_up; } if (num_slots == 0) { AWS_LOGF_ERROR(AWS_LS_IO_PKCS11, "id=%p: No PKCS#11 tokens present in any slot", (void *)pkcs11_lib); aws_raise_error(AWS_ERROR_PKCS11_TOKEN_NOT_FOUND); goto clean_up; } AWS_LOGF_TRACE( AWS_LS_IO_PKCS11, "id=%p: Found %lu slots with tokens. Picking one...", (void *)pkcs11_lib, num_slots); /* allocate space for slot IDs */ slot_id_array = aws_mem_calloc(pkcs11_lib->allocator, num_slots, sizeof(CK_SLOT_ID)); /* query all slot IDs */ rv = pkcs11_lib->function_list->C_GetSlotList(CK_TRUE /*tokenPresent*/, slot_id_array, &num_slots); if (rv != CKR_OK) { s_raise_ck_error(pkcs11_lib, "C_GetSlotList", rv); goto clean_up; } for (size_t i = 0; i < num_slots; ++i) { CK_SLOT_ID slot_id_i = slot_id_array[i]; /* if specific slot_id requested, and this isn't it, then skip */ if ((match_slot_id != NULL) && (*match_slot_id != slot_id_i)) { AWS_LOGF_TRACE( AWS_LS_IO_PKCS11, "id=%p: Ignoring PKCS#11 token because slot %lu doesn't match %" PRIu64, (void *)pkcs11_lib, slot_id_i, *match_slot_id); continue; } /* query token info */ CK_TOKEN_INFO token_info_i; AWS_ZERO_STRUCT(token_info_i); rv = pkcs11_lib->function_list->C_GetTokenInfo(slot_id_i, &token_info_i); if (rv != CKR_OK) { s_raise_ck_error(pkcs11_lib, "C_GetTokenInfo", rv); goto clean_up; } /* if specific token label requested, and this isn't it, then skip */ if (match_token_label != NULL) { struct aws_byte_cursor label_i = s_trim_padding(token_info_i.label, sizeof(token_info_i.label)); if (aws_string_eq_byte_cursor(match_token_label, &label_i) == false) { AWS_LOGF_TRACE( AWS_LS_IO_PKCS11, "id=%p: Ignoring PKCS#11 token in slot %lu because label '" PRInSTR "' doesn't match '%s'", (void *)pkcs11_lib, slot_id_i, AWS_BYTE_CURSOR_PRI(label_i), aws_string_c_str(match_token_label)); continue; } } /* this slot is a candidate! */ /* be sure there's only one candidate */ if (candidate != NULL) { AWS_LOGF_ERROR( AWS_LS_IO_PKCS11, "id=%p: Failed to choose PKCS#11 token, multiple tokens match search criteria", (void *)pkcs11_lib); aws_raise_error(AWS_ERROR_PKCS11_TOKEN_NOT_FOUND); goto clean_up; } /* the new candidate! */ candidate = &slot_id_array[i]; memcpy(&info, &token_info_i, sizeof(CK_TOKEN_INFO)); } if (candidate == NULL) { AWS_LOGF_ERROR( AWS_LS_IO_PKCS11, "id=%p: Failed to find PKCS#11 token which matches search criteria", (void *)pkcs11_lib); aws_raise_error(AWS_ERROR_PKCS11_TOKEN_NOT_FOUND); goto clean_up; } /* success! */ AWS_LOGF_DEBUG( AWS_LS_IO_PKCS11, "id=%p: Selected PKCS#11 token. slot:%lu label:'" PRInSTR "' manufacturerID:'" PRInSTR "' model:'" PRInSTR "' serialNumber:'" PRInSTR "' flags:0x%08lX sessionCount:%lu/%lu rwSessionCount:%lu/%lu" " freePublicMemory:%lu/%lu freePrivateMemory:%lu/%lu" " hardwareVersion:%" PRIu8 ".%" PRIu8 " firmwareVersion:%" PRIu8 ".%" PRIu8, (void *)pkcs11_lib, *candidate, AWS_BYTE_CURSOR_PRI(s_trim_padding(info.label, sizeof(info.label))), AWS_BYTE_CURSOR_PRI(s_trim_padding(info.manufacturerID, sizeof(info.manufacturerID))), AWS_BYTE_CURSOR_PRI(s_trim_padding(info.model, sizeof(info.model))), AWS_BYTE_CURSOR_PRI(s_trim_padding(info.serialNumber, sizeof(info.serialNumber))), info.flags, info.ulSessionCount, info.ulMaxSessionCount, info.ulRwSessionCount, info.ulMaxRwSessionCount, info.ulFreePublicMemory, info.ulTotalPublicMemory, info.ulFreePrivateMemory, info.ulTotalPrivateMemory, info.hardwareVersion.major, info.hardwareVersion.minor, info.firmwareVersion.major, info.firmwareVersion.minor); *out_slot_id = *candidate; success = true; clean_up: aws_mem_release(pkcs11_lib->allocator, slot_id_array); return success ? AWS_OP_SUCCESS : AWS_OP_ERR; } CK_FUNCTION_LIST *aws_pkcs11_lib_get_function_list(struct aws_pkcs11_lib *pkcs11_lib) { return pkcs11_lib->function_list; } int aws_pkcs11_lib_open_session( struct aws_pkcs11_lib *pkcs11_lib, CK_SLOT_ID slot_id, CK_SESSION_HANDLE *out_session_handle) { CK_SESSION_HANDLE session_handle = CK_INVALID_HANDLE; CK_RV rv = pkcs11_lib->function_list->C_OpenSession( slot_id, CKF_SERIAL_SESSION /*flags*/, NULL /*pApplication*/, NULL /*notify*/, &session_handle); if (rv != CKR_OK) { return s_raise_ck_error(pkcs11_lib, "C_OpenSession", rv); } /* success! */ AWS_LOGF_DEBUG( AWS_LS_IO_PKCS11, "id=%p session=%lu: Session opened on slot %lu", (void *)pkcs11_lib, session_handle, slot_id); *out_session_handle = session_handle; return AWS_OP_SUCCESS; } void aws_pkcs11_lib_close_session(struct aws_pkcs11_lib *pkcs11_lib, CK_SESSION_HANDLE session_handle) { CK_RV rv = pkcs11_lib->function_list->C_CloseSession(session_handle); if (rv == CKR_OK) { AWS_LOGF_DEBUG(AWS_LS_IO_PKCS11, "id=%p session=%lu: Session closed", (void *)pkcs11_lib, session_handle); } else { /* Log the error, but we can't really do anything about it */ AWS_LOGF_WARN( AWS_LS_IO_PKCS11, "id=%p session=%lu: Ignoring C_CloseSession() failure. PKCS#11 error: %s (0x%08lX)", (void *)pkcs11_lib, session_handle, aws_pkcs11_ckr_str(rv), rv); } } int aws_pkcs11_lib_login_user( struct aws_pkcs11_lib *pkcs11_lib, CK_SESSION_HANDLE session_handle, const struct aws_string *optional_user_pin) { CK_UTF8CHAR_PTR pin = NULL; CK_ULONG pin_len = 0; if (optional_user_pin) { if (optional_user_pin->len > ULONG_MAX) { AWS_LOGF_ERROR(AWS_LS_IO_PKCS11, "id=%p session=%lu: PIN is too long", (void *)pkcs11_lib, session_handle); return aws_raise_error(AWS_ERROR_PKCS11_CKR_PIN_INCORRECT); } pin_len = (CK_ULONG)optional_user_pin->len; pin = (CK_UTF8CHAR_PTR)optional_user_pin->bytes; } CK_RV rv = pkcs11_lib->function_list->C_Login(session_handle, CKU_USER, pin, pin_len); /* Ignore if we are already logged in, this could happen if application using device sdk also logs in to pkcs11 */ if (rv != CKR_OK && rv != CKR_USER_ALREADY_LOGGED_IN) { return s_raise_ck_session_error(pkcs11_lib, "C_Login", session_handle, rv); } /* Success! */ if (rv == CKR_USER_ALREADY_LOGGED_IN) { AWS_LOGF_DEBUG( AWS_LS_IO_PKCS11, "id=%p session=%lu: User was already logged in", (void *)pkcs11_lib, session_handle); } else { AWS_LOGF_DEBUG(AWS_LS_IO_PKCS11, "id=%p session=%lu: User logged in", (void *)pkcs11_lib, session_handle); } return AWS_OP_SUCCESS; } /** * Find the object that meets all criteria: * - is private key * - if match_label is non-null, then labels must match * The function fails unless it finds exactly one object meeting all criteria. */ int aws_pkcs11_lib_find_private_key( struct aws_pkcs11_lib *pkcs11_lib, CK_SESSION_HANDLE session_handle, const struct aws_string *match_label, CK_OBJECT_HANDLE *out_key_handle, CK_KEY_TYPE *out_key_type) { /* gets set true after everything succeeds */ bool success = false; /* gets set true after search initialized. * indicates that C_FindObjectsFinal() must be run before function ends */ bool must_finalize_search = false; /* set up search attributes */ CK_OBJECT_CLASS key_class = CKO_PRIVATE_KEY; CK_ULONG num_attributes = 1; CK_ATTRIBUTE attributes[2] = { { .type = CKA_CLASS, .pValue = &key_class, .ulValueLen = sizeof(key_class), }, }; if (match_label != NULL) { if (match_label->len > ULONG_MAX) { AWS_LOGF_ERROR( AWS_LS_IO_PKCS11, "id=%p session=%lu: private key label is too long", (void *)pkcs11_lib, session_handle); aws_raise_error(AWS_ERROR_PKCS11_KEY_NOT_FOUND); goto clean_up; } CK_ATTRIBUTE *attr = &attributes[num_attributes++]; attr->type = CKA_LABEL; attr->pValue = (void *)match_label->bytes; attr->ulValueLen = (CK_ULONG)match_label->len; } /* initialize search */ CK_RV rv = pkcs11_lib->function_list->C_FindObjectsInit(session_handle, attributes, num_attributes); if (rv != CKR_OK) { s_raise_ck_session_error(pkcs11_lib, "C_FindObjectsInit", session_handle, rv); goto clean_up; } must_finalize_search = true; /* get search results. * note that we're asking for 2 objects max, so we can fail if we find more than one */ CK_OBJECT_HANDLE found_objects[2] = {0}; CK_ULONG num_found = 0; rv = pkcs11_lib->function_list->C_FindObjects(session_handle, found_objects, 2 /*max*/, &num_found); if (rv != CKR_OK) { s_raise_ck_session_error(pkcs11_lib, "C_FindObjects", session_handle, rv); goto clean_up; } if ((num_found == 0) || (found_objects[0] == CK_INVALID_HANDLE)) { AWS_LOGF_ERROR( AWS_LS_IO_PKCS11, "id=%p session=%lu: Failed to find private key on PKCS#11 token which matches search criteria", (void *)pkcs11_lib, session_handle); aws_raise_error(AWS_ERROR_PKCS11_KEY_NOT_FOUND); goto clean_up; } if (num_found > 1) { AWS_LOGF_ERROR( AWS_LS_IO_PKCS11, "id=%p session=%lu: Failed to choose private key, multiple objects on PKCS#11 token match search criteria", (void *)pkcs11_lib, session_handle); aws_raise_error(AWS_ERROR_PKCS11_KEY_NOT_FOUND); goto clean_up; } /* key found */ CK_OBJECT_HANDLE key_handle = found_objects[0]; /* query key-type */ CK_KEY_TYPE key_type = 0; CK_ATTRIBUTE key_attributes[] = { { .type = CKA_KEY_TYPE, .pValue = &key_type, .ulValueLen = sizeof(key_type), }, }; rv = pkcs11_lib->function_list->C_GetAttributeValue( session_handle, key_handle, key_attributes, AWS_ARRAY_SIZE(key_attributes)); if (rv != CKR_OK) { s_raise_ck_session_error(pkcs11_lib, "C_GetAttributeValue", session_handle, rv); goto clean_up; } switch (key_type) { case CKK_RSA: case CKK_EC: break; default: AWS_LOGF_ERROR( AWS_LS_IO_PKCS11, "id=%p session=%lu: PKCS#11 private key type %s (0x%08lX) is currently unsupported", (void *)pkcs11_lib, session_handle, s_ckk_str(key_type), key_type); aws_raise_error(AWS_ERROR_PKCS11_KEY_TYPE_UNSUPPORTED); goto clean_up; } /* Success! */ AWS_LOGF_TRACE( AWS_LS_IO_PKCS11, "id=%p session=%lu: Found private key. type=%s", (void *)pkcs11_lib, session_handle, s_ckk_str(key_type)); *out_key_handle = key_handle; *out_key_type = key_type; success = true; clean_up: if (must_finalize_search) { rv = pkcs11_lib->function_list->C_FindObjectsFinal(session_handle); /* don't bother reporting error if we were already failing */ if ((rv != CKR_OK) && (success == true)) { s_raise_ck_session_error(pkcs11_lib, "C_FindObjectsFinal", session_handle, rv); success = false; } } return success ? AWS_OP_SUCCESS : AWS_OP_ERR; } int aws_pkcs11_lib_decrypt( struct aws_pkcs11_lib *pkcs11_lib, CK_SESSION_HANDLE session_handle, CK_OBJECT_HANDLE key_handle, CK_KEY_TYPE key_type, struct aws_byte_cursor encrypted_data, struct aws_allocator *allocator, struct aws_byte_buf *out_data) { AWS_ASSERT(encrypted_data.len <= ULONG_MAX); /* do real error checking if this becomes a public API */ AWS_ASSERT(out_data->allocator == NULL); CK_MECHANISM mechanism; AWS_ZERO_STRUCT(mechanism); /* Note, CKK_EC is not expected to enter into this code path */ switch (key_type) { case CKK_RSA: mechanism.mechanism = CKM_RSA_PKCS; break; default: aws_raise_error(AWS_ERROR_PKCS11_KEY_TYPE_UNSUPPORTED); goto error; } /* initialize the decryption operation */ CK_RV rv = pkcs11_lib->function_list->C_DecryptInit(session_handle, &mechanism, key_handle); if (rv != CKR_OK) { s_raise_ck_session_error(pkcs11_lib, "C_DecryptInit", session_handle, rv); goto error; } /* query needed capacity (finalizes decryption operation if it fails) */ CK_ULONG data_len = 0; rv = pkcs11_lib->function_list->C_Decrypt( session_handle, encrypted_data.ptr, (CK_ULONG)encrypted_data.len, NULL /*pData*/, &data_len); if (rv != CKR_OK) { s_raise_ck_session_error(pkcs11_lib, "C_Decrypt", session_handle, rv); goto error; } aws_byte_buf_init(out_data, allocator, data_len); /* cannot fail */ /* do actual decrypt (finalizes decryption operation, whether it succeeds or fails)*/ rv = pkcs11_lib->function_list->C_Decrypt( session_handle, encrypted_data.ptr, (CK_ULONG)encrypted_data.len, out_data->buffer, &data_len); if (rv != CKR_OK) { s_raise_ck_session_error(pkcs11_lib, "C_Decrypt", session_handle, rv); goto error; } out_data->len = data_len; return AWS_OP_SUCCESS; error: aws_byte_buf_clean_up(out_data); return AWS_OP_ERR; } /* runs C_Sign(), putting encrypted message into out_signature */ static int s_pkcs11_sign_helper( struct aws_pkcs11_lib *pkcs11_lib, CK_SESSION_HANDLE session_handle, CK_OBJECT_HANDLE key_handle, CK_MECHANISM mechanism, struct aws_byte_cursor input_data, struct aws_allocator *allocator, struct aws_byte_buf *out_signature) { /* initialize signing operation */ CK_RV rv = pkcs11_lib->function_list->C_SignInit(session_handle, &mechanism, key_handle); if (rv != CKR_OK) { s_raise_ck_session_error(pkcs11_lib, "C_SignInit", session_handle, rv); goto error; } /* query needed capacity (finalizes signing operation if it fails) */ CK_ULONG signature_len = 0; rv = pkcs11_lib->function_list->C_Sign( session_handle, input_data.ptr, (CK_ULONG)input_data.len, NULL /*pSignature*/, &signature_len); if (rv != CKR_OK) { s_raise_ck_session_error(pkcs11_lib, "C_Sign", session_handle, rv); goto error; } aws_byte_buf_init(out_signature, allocator, signature_len); /* cannot fail */ /* do actual signing (finalizes signing operation, whether it succeeds or fails) */ rv = pkcs11_lib->function_list->C_Sign( session_handle, input_data.ptr, (CK_ULONG)input_data.len, out_signature->buffer, &signature_len); if (rv != CKR_OK) { s_raise_ck_session_error(pkcs11_lib, "C_Sign", session_handle, rv); goto error; } out_signature->len = signature_len; return AWS_OP_SUCCESS; error: aws_byte_buf_clean_up(out_signature); return AWS_OP_ERR; } int aws_get_prefix_to_rsa_sig(enum aws_tls_hash_algorithm digest_alg, struct aws_byte_cursor *out_prefix) { switch (digest_alg) { case AWS_TLS_HASH_SHA1: *out_prefix = aws_byte_cursor_from_array(SHA1_PREFIX_TO_RSA_SIG, sizeof(SHA1_PREFIX_TO_RSA_SIG)); break; case AWS_TLS_HASH_SHA224: *out_prefix = aws_byte_cursor_from_array(SHA224_PREFIX_TO_RSA_SIG, sizeof(SHA224_PREFIX_TO_RSA_SIG)); break; case AWS_TLS_HASH_SHA256: *out_prefix = aws_byte_cursor_from_array(SHA256_PREFIX_TO_RSA_SIG, sizeof(SHA256_PREFIX_TO_RSA_SIG)); break; case AWS_TLS_HASH_SHA384: *out_prefix = aws_byte_cursor_from_array(SHA384_PREFIX_TO_RSA_SIG, sizeof(SHA384_PREFIX_TO_RSA_SIG)); break; case AWS_TLS_HASH_SHA512: *out_prefix = aws_byte_cursor_from_array(SHA512_PREFIX_TO_RSA_SIG, sizeof(SHA512_PREFIX_TO_RSA_SIG)); break; default: return aws_raise_error(AWS_IO_TLS_DIGEST_ALGORITHM_UNSUPPORTED); } return AWS_OP_SUCCESS; } static int s_pkcs11_sign_rsa( struct aws_pkcs11_lib *pkcs11_lib, CK_SESSION_HANDLE session_handle, CK_OBJECT_HANDLE key_handle, struct aws_byte_cursor digest_data, struct aws_allocator *allocator, enum aws_tls_hash_algorithm digest_alg, enum aws_tls_signature_algorithm signature_alg, struct aws_byte_buf *out_signature) { if (signature_alg != AWS_TLS_SIGNATURE_RSA) { AWS_LOGF_ERROR( AWS_LS_IO_PKCS11, "id=%p session=%lu: Signature algorithm '%s' is currently unsupported for PKCS#11 RSA keys. " "Supported algorithms are: RSA", (void *)pkcs11_lib, session_handle, aws_tls_signature_algorithm_str(signature_alg)); return aws_raise_error(AWS_IO_TLS_SIGNATURE_ALGORITHM_UNSUPPORTED); } struct aws_byte_cursor prefix; if (aws_get_prefix_to_rsa_sig(digest_alg, &prefix)) { AWS_LOGF_ERROR( AWS_LS_IO_PKCS11, "id=%p session=%lu: Unsupported digest '%s' for PKCS#11 RSA signing. " "Supported digests are: SHA1, SHA256, SHA384 and SHA512. AWS error: %s", (void *)pkcs11_lib, session_handle, aws_tls_hash_algorithm_str(digest_alg), aws_error_name(aws_last_error())); return AWS_OP_ERR; } bool success = false; struct aws_byte_buf prefixed_input; aws_byte_buf_init(&prefixed_input, allocator, digest_data.len + prefix.len); /* cannot fail */ aws_byte_buf_write_from_whole_cursor(&prefixed_input, prefix); aws_byte_buf_write_from_whole_cursor(&prefixed_input, digest_data); /* We could get the original input and not the digest to sign and leverage CKM_SHA*_RSA_PKCS mechanisms * but the original input is too large (all the TLS handshake messages until clientCertVerify) and * we do not want to perform the digest inside the TPM for performance reasons, therefore we only * leverage CKM_RSA_PKCS mechanism and *only* sign the digest using TPM. Only signing requires * additional prefix to the input to complete the digest part for RSA signing. */ CK_MECHANISM mechanism = {.mechanism = CKM_RSA_PKCS}; if (s_pkcs11_sign_helper( pkcs11_lib, session_handle, key_handle, mechanism, aws_byte_cursor_from_buf(&prefixed_input), allocator, out_signature)) { goto error; } success = true; goto clean_up; error: aws_byte_buf_clean_up(out_signature); clean_up: aws_byte_buf_clean_up(&prefixed_input); return success ? AWS_OP_SUCCESS : AWS_OP_ERR; } /* * Basic ASN.1 (DER) encoding of header -- sufficient for ECDSA */ static int s_asn1_enc_prefix(struct aws_byte_buf *buffer, uint8_t identifier, size_t length) { if (((identifier & 0x1f) == 0x1f) || (length > 0x7f)) { AWS_LOGF_ERROR(AWS_LS_IO_PKCS11, "Unable to encode ASN.1 (DER) header 0x%02x %zu", identifier, length); return aws_raise_error(AWS_ERROR_PKCS11_ENCODING_ERROR); } uint8_t head[2]; head[0] = identifier; head[1] = (uint8_t)length; if (!aws_byte_buf_write(buffer, head, sizeof(head))) { AWS_LOGF_ERROR( AWS_LS_IO_PKCS11, "Insufficient buffer to encode ASN.1 (DER) header 0x%02x %zu", identifier, length); return aws_raise_error(AWS_ERROR_PKCS11_ENCODING_ERROR); } return AWS_OP_SUCCESS; } /* * Basic ASN.1 (DER) encoding of an unsigned big number -- sufficient for ECDSA. Note that this implementation * may reduce the number of integer bytes down to 1 (removing leading zero bytes), or conversely increase by * one extra byte to ensure the unsigned integer is unambiguously encoded. */ int aws_pkcs11_asn1_enc_ubigint(struct aws_byte_buf *const buffer, struct aws_byte_cursor bigint) { // trim out all leading zero's while (bigint.len > 0 && bigint.ptr[0] == 0) { aws_byte_cursor_advance(&bigint, 1); } // If the most significant bit is a '1', prefix with a zero-byte to prevent misinterpreting number as negative. // If the big integer value was zero, length will be zero, replace with zero-byte using the same approach. bool add_leading_zero = bigint.len == 0 || (bigint.ptr[0] & 0x80) != 0; size_t actual_len = bigint.len + (add_leading_zero ? 1 : 0); // header - indicate integer of given length (including any prefix zero) bool success = s_asn1_enc_prefix(buffer, 0x02, actual_len) == AWS_OP_SUCCESS; if (add_leading_zero) { success = success && aws_byte_buf_write_u8(buffer, 0); } // write rest of number success = success && aws_byte_buf_write_from_whole_cursor(buffer, bigint); if (success) { return AWS_OP_SUCCESS; } else { AWS_LOGF_ERROR( AWS_LS_IO_PKCS11, "Insufficient buffer to ASN.1 (DER) encode big integer of length %zu", actual_len); return aws_raise_error(AWS_ERROR_PKCS11_ENCODING_ERROR); } } static int s_pkcs11_sign_ecdsa( struct aws_pkcs11_lib *pkcs11_lib, CK_SESSION_HANDLE session_handle, CK_OBJECT_HANDLE key_handle, struct aws_byte_cursor digest_data, struct aws_allocator *allocator, enum aws_tls_signature_algorithm signature_alg, struct aws_byte_buf *out_signature) { struct aws_byte_buf part_signature; struct aws_byte_buf r_part; struct aws_byte_buf s_part; AWS_ZERO_STRUCT(part_signature); AWS_ZERO_STRUCT(r_part); AWS_ZERO_STRUCT(s_part); if (signature_alg != AWS_TLS_SIGNATURE_ECDSA) { AWS_LOGF_ERROR( AWS_LS_IO_PKCS11, "id=%p session=%lu: Signature algorithm '%s' is currently unsupported for PKCS#11 EC keys. " "Supported algorithms are: ECDSA", (void *)pkcs11_lib, session_handle, aws_tls_signature_algorithm_str(signature_alg)); return aws_raise_error(AWS_IO_TLS_SIGNATURE_ALGORITHM_UNSUPPORTED); } bool success = false; /* ECDSA signing consists of DER-encoding of "r" and "s" parameters. C_Sign returns the two * integers as big numbers in big-endian format, so translation is required. */ CK_MECHANISM mechanism = {.mechanism = CKM_ECDSA}; if (s_pkcs11_sign_helper( pkcs11_lib, session_handle, key_handle, mechanism, digest_data, allocator, &part_signature) != AWS_OP_SUCCESS) { goto error; } /* PKCS11 library returns these parameters as two big unsigned integer numbers of exactly the same length. The * numbers need to be ASN.1/DER encoded (variable length). In addition to the header, space is needed to allow for * an occasional extra 0x00 prefix byte to ensure integer is encoded and interpreted as unsigned. */ if (part_signature.len == 0 || (part_signature.len & 1) != 0) { /* This should never happen, we would fail anyway, but making it explicit and fail early */ AWS_LOGF_ERROR( AWS_LS_IO_PKCS11, "PKCS11 library returned an invalid length, unable to interpret ECDSA signature to encode correctly."); return aws_raise_error(AWS_ERROR_PKCS11_ENCODING_ERROR); goto error; } size_t num_bytes = part_signature.len / 2; aws_byte_buf_init(&r_part, allocator, num_bytes + 4); aws_byte_buf_init(&s_part, allocator, num_bytes + 4); if (aws_pkcs11_asn1_enc_ubigint(&r_part, aws_byte_cursor_from_array(part_signature.buffer, num_bytes)) != AWS_OP_SUCCESS) { goto error; } if (aws_pkcs11_asn1_enc_ubigint( &s_part, aws_byte_cursor_from_array(part_signature.buffer + num_bytes, num_bytes)) != AWS_OP_SUCCESS) { goto error; } size_t pair_len = r_part.len + s_part.len; aws_byte_buf_init(out_signature, allocator, pair_len + 2); // inc header if (s_asn1_enc_prefix(out_signature, 0x30, pair_len) != AWS_OP_SUCCESS) { goto error; } if (!aws_byte_buf_write_from_whole_buffer(out_signature, r_part)) { AWS_LOGF_ERROR(AWS_LS_IO_PKCS11, "Insufficient buffer to ASN.1 (DER) encode ECDSA signature R-part."); return aws_raise_error(AWS_ERROR_PKCS11_ENCODING_ERROR); goto error; } if (!aws_byte_buf_write_from_whole_buffer(out_signature, s_part)) { AWS_LOGF_ERROR(AWS_LS_IO_PKCS11, "Insufficient buffer to ASN.1 (DER) encode ECDSA signature S-part."); return aws_raise_error(AWS_ERROR_PKCS11_ENCODING_ERROR); goto error; } success = true; goto clean_up; error: aws_byte_buf_clean_up(out_signature); clean_up: aws_byte_buf_clean_up(&part_signature); aws_byte_buf_clean_up(&r_part); aws_byte_buf_clean_up(&s_part); return success ? AWS_OP_SUCCESS : AWS_OP_ERR; } int aws_pkcs11_lib_sign( struct aws_pkcs11_lib *pkcs11_lib, CK_SESSION_HANDLE session_handle, CK_OBJECT_HANDLE key_handle, CK_KEY_TYPE key_type, struct aws_byte_cursor digest_data, struct aws_allocator *allocator, enum aws_tls_hash_algorithm digest_alg, enum aws_tls_signature_algorithm signature_alg, struct aws_byte_buf *out_signature) { AWS_ASSERT(digest_data.len <= ULONG_MAX); /* do real error checking if this becomes a public API */ AWS_ASSERT(out_signature->allocator == NULL); switch (key_type) { case CKK_RSA: return s_pkcs11_sign_rsa( pkcs11_lib, session_handle, key_handle, digest_data, allocator, digest_alg, signature_alg, out_signature); case CKK_EC: return s_pkcs11_sign_ecdsa( pkcs11_lib, session_handle, key_handle, digest_data, allocator, // not digest_alg -- need to check this signature_alg, out_signature); default: return aws_raise_error(AWS_ERROR_PKCS11_KEY_TYPE_UNSUPPORTED); } } aws-crt-python-0.20.4+dfsg/crt/aws-c-io/source/pkcs11_private.h000066400000000000000000000112241456575232400241440ustar00rootroot00000000000000#ifndef AWS_IO_PKCS11_PRIVATE_H #define AWS_IO_PKCS11_PRIVATE_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include "pkcs11/v2.40/pkcs11.h" /** * pkcs11_private.h * This file declares symbols that are private to aws-c-io but need to be * accessed from multiple .c files. * * NOTE: Not putting this file under `include/private/...` like we usually * do with private headers because it breaks aws-crt-swift. Swift was trying * to compile each file under include/, but the official PKCS#11 header files * are too weird break it. */ struct aws_pkcs11_lib; struct aws_pkcs11_tls_key_handler; struct aws_string; AWS_EXTERN_C_BEGIN /** * Return c-string for PKCS#11 CKR_* constant. * For use in tests only. */ AWS_IO_API const char *aws_pkcs11_ckr_str(CK_RV rv); /** * Return the raw function list. * For use in tests only. */ AWS_IO_API CK_FUNCTION_LIST *aws_pkcs11_lib_get_function_list(struct aws_pkcs11_lib *pkcs11_lib); /** * Find the slot that meets all criteria: * - has a token * - if match_slot_id is non-null, then slot IDs must match * - if match_token_label is non-null, then labels must match * The function fails unless it finds exactly one slot meeting all criteria. */ AWS_IO_API int aws_pkcs11_lib_find_slot_with_token( struct aws_pkcs11_lib *pkcs11_lib, const uint64_t *match_slot_id, const struct aws_string *match_token_label, CK_SLOT_ID *out_slot_id); AWS_IO_API int aws_pkcs11_lib_open_session( struct aws_pkcs11_lib *pkcs11_lib, CK_SLOT_ID slot_id, CK_SESSION_HANDLE *out_session_handle); AWS_IO_API void aws_pkcs11_lib_close_session(struct aws_pkcs11_lib *pkcs11_lib, CK_SESSION_HANDLE session_handle); AWS_IO_API int aws_pkcs11_lib_login_user( struct aws_pkcs11_lib *pkcs11_lib, CK_SESSION_HANDLE session_handle, const struct aws_string *optional_user_pin); /** * Find the object that meets all criteria: * - is private key * - if match_label is non-null, then labels must match * The function fails unless it finds exactly one object meeting all criteria. */ AWS_IO_API int aws_pkcs11_lib_find_private_key( struct aws_pkcs11_lib *pkcs11_lib, CK_SESSION_HANDLE session_handle, const struct aws_string *match_label, CK_OBJECT_HANDLE *out_key_handle, CK_KEY_TYPE *out_key_type); /** * Decrypt the encrypted data. * out_data should be passed in uninitialized. * If successful, out_data will be initialized and contain the recovered data. */ AWS_IO_API int aws_pkcs11_lib_decrypt( struct aws_pkcs11_lib *pkcs11_lib, CK_SESSION_HANDLE session_handle, CK_OBJECT_HANDLE key_handle, CK_KEY_TYPE key_type, struct aws_byte_cursor encrypted_data, struct aws_allocator *allocator, struct aws_byte_buf *out_data); /** * Sign a digest with the private key during TLS negotiation. * out_signature should be passed in uninitialized. * If successful, out_signature will be initialized and contain the signature. */ AWS_IO_API int aws_pkcs11_lib_sign( struct aws_pkcs11_lib *pkcs11_lib, CK_SESSION_HANDLE session_handle, CK_OBJECT_HANDLE key_handle, CK_KEY_TYPE key_type, struct aws_byte_cursor digest_data, struct aws_allocator *allocator, enum aws_tls_hash_algorithm digest_alg, enum aws_tls_signature_algorithm signature_alg, struct aws_byte_buf *out_signature); /** * Get the DER encoded DigestInfo value to be prefixed to the hash, used for RSA signing * See https://tools.ietf.org/html/rfc3447#page-43 */ AWS_IO_API int aws_get_prefix_to_rsa_sig(enum aws_tls_hash_algorithm digest_alg, struct aws_byte_cursor *out_prefix); /** * ASN.1 DER encode a big unsigned integer. Note that the source integer may be zero padded. It may also have * most significant bit set. The encoded format is canonical and unambiguous - that is, most significant * bit is never set. */ AWS_IO_API int aws_pkcs11_asn1_enc_ubigint(struct aws_byte_buf *const buffer, struct aws_byte_cursor bigint); /** * Creates a new PKCS11 TLS operation handler with an associated aws_custom_key_op_handler * with a reference count set to 1. * * The PKCS11 TLS operation handler will automatically be destroyed when the reference count reaches zero * on the aws_custom_key_op_handler. */ AWS_IO_API struct aws_custom_key_op_handler *aws_pkcs11_tls_op_handler_new( struct aws_allocator *allocator, struct aws_pkcs11_lib *pkcs11_lib, const struct aws_byte_cursor *user_pin, const struct aws_byte_cursor *match_token_label, const struct aws_byte_cursor *match_private_key_label, const uint64_t *match_slot_id); AWS_EXTERN_C_END #endif /* AWS_IO_PKCS11_PRIVATE_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-io/source/pkcs11_tls_op_handler.c000066400000000000000000000170341456575232400254670ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include "pkcs11_private.h" #include #include #include struct aws_pkcs11_tls_op_handler { /* The custom key operation handler needed for the callbacks */ struct aws_custom_key_op_handler base; struct aws_allocator *alloc; struct aws_pkcs11_lib *lib; /* Use a single PKCS#11 session for all TLS connections on an aws_tls_ctx. * We do this because PKCS#11 tokens may only support a * limited number of sessions (PKCS11-UG-v2.40 section 2.6.7). * If this one shared session turns out to be a severe bottleneck, * we could look into other setups (ex: put session on its own thread, * 1 session per event-loop, 1 session per connection, etc). * * The lock must be held while performing session operations. * Otherwise, it would not be safe for multiple threads to share a * session (PKCS11-UG-v2.40 section 2.6.7). The lock isn't needed for * setup and teardown though, since we ensure nothing parallel is going * on at these times */ struct aws_mutex session_lock; CK_SESSION_HANDLE session_handle; CK_OBJECT_HANDLE private_key_handle; CK_KEY_TYPE private_key_type; }; static void s_aws_custom_key_op_handler_destroy(struct aws_custom_key_op_handler *key_op_handler) { struct aws_pkcs11_tls_op_handler *handler = (struct aws_pkcs11_tls_op_handler *)key_op_handler->impl; if (handler->session_handle != 0) { aws_pkcs11_lib_close_session(handler->lib, handler->session_handle); } aws_mutex_clean_up(&handler->session_lock); aws_pkcs11_lib_release(handler->lib); aws_mem_release(handler->alloc, handler); } /** * Performs the PKCS11 TLS private key operation. This is called automatically when performing a mutual TLS handshake. */ void s_aws_pkcs11_tls_op_handler_do_operation( struct aws_custom_key_op_handler *handler, struct aws_tls_key_operation *operation) { struct aws_pkcs11_tls_op_handler *pkcs11_handler = (struct aws_pkcs11_tls_op_handler *)handler->impl; struct aws_byte_buf output_buf; /* initialized later */ AWS_ZERO_STRUCT(output_buf); /*********** BEGIN CRITICAL SECTION ***********/ aws_mutex_lock(&pkcs11_handler->session_lock); bool success_while_locked = false; switch (aws_tls_key_operation_get_type(operation)) { case AWS_TLS_KEY_OPERATION_DECRYPT: if (aws_pkcs11_lib_decrypt( pkcs11_handler->lib, pkcs11_handler->session_handle, pkcs11_handler->private_key_handle, pkcs11_handler->private_key_type, aws_tls_key_operation_get_input(operation), pkcs11_handler->alloc, &output_buf)) { goto unlock; } break; case AWS_TLS_KEY_OPERATION_SIGN: if (aws_pkcs11_lib_sign( pkcs11_handler->lib, pkcs11_handler->session_handle, pkcs11_handler->private_key_handle, pkcs11_handler->private_key_type, aws_tls_key_operation_get_input(operation), pkcs11_handler->alloc, aws_tls_key_operation_get_digest_algorithm(operation), aws_tls_key_operation_get_signature_algorithm(operation), &output_buf)) { goto unlock; } break; default: AWS_LOGF_ERROR( AWS_LS_IO_PKCS11, "PKCS11 Handler %p: Unknown TLS key operation with value of %u", (void *)handler, aws_tls_key_operation_get_type(operation)); aws_raise_error(AWS_ERROR_INVALID_STATE); goto unlock; } success_while_locked = true; unlock: aws_mutex_unlock(&pkcs11_handler->session_lock); /*********** END CRITICAL SECTION ***********/ if (success_while_locked) { aws_tls_key_operation_complete(operation, aws_byte_cursor_from_buf(&output_buf)); } else { aws_tls_key_operation_complete_with_error(operation, aws_last_error()); } aws_byte_buf_clean_up(&output_buf); } static struct aws_custom_key_op_handler_vtable s_aws_custom_key_op_handler_vtable = { .on_key_operation = s_aws_pkcs11_tls_op_handler_do_operation, }; struct aws_custom_key_op_handler *aws_pkcs11_tls_op_handler_new( struct aws_allocator *allocator, struct aws_pkcs11_lib *pkcs11_lib, const struct aws_byte_cursor *user_pin, const struct aws_byte_cursor *match_token_label, const struct aws_byte_cursor *match_private_key_label, const uint64_t *match_slot_id) { bool success = false; struct aws_pkcs11_tls_op_handler *pkcs11_handler = aws_mem_calloc(allocator, 1, sizeof(struct aws_pkcs11_tls_op_handler)); // Optional data struct aws_string *pkcs_user_pin = NULL; struct aws_string *pkcs_token_label = NULL; struct aws_string *pkcs_private_key_object_label = NULL; aws_ref_count_init( &pkcs11_handler->base.ref_count, &pkcs11_handler->base, (aws_simple_completion_callback *)s_aws_custom_key_op_handler_destroy); pkcs11_handler->base.impl = (void *)pkcs11_handler; pkcs11_handler->base.vtable = &s_aws_custom_key_op_handler_vtable; pkcs11_handler->alloc = allocator; /* pkcs11_lib is required */ if (pkcs11_lib == NULL) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); AWS_LOGF_ERROR(AWS_LS_IO_PKCS11, "PKCS11 Handler %p new: PKCS11 library is null", (void *)pkcs11_handler); goto done; } pkcs11_handler->lib = aws_pkcs11_lib_acquire(pkcs11_lib); /* cannot fail */ aws_mutex_init(&pkcs11_handler->session_lock); /* user_pin is optional */ if (user_pin->ptr != NULL) { pkcs_user_pin = aws_string_new_from_cursor(allocator, user_pin); } /* token_label is optional */ if (match_token_label->ptr != NULL) { pkcs_token_label = aws_string_new_from_cursor(allocator, match_token_label); } /* private_key_object_label is optional */ if (match_private_key_label->ptr != NULL) { pkcs_private_key_object_label = aws_string_new_from_cursor(allocator, match_private_key_label); } CK_SLOT_ID slot_id; if (aws_pkcs11_lib_find_slot_with_token(pkcs11_handler->lib, match_slot_id, pkcs_token_label, &slot_id /*out*/)) { goto done; } if (aws_pkcs11_lib_open_session(pkcs11_handler->lib, slot_id, &pkcs11_handler->session_handle)) { goto done; } if (aws_pkcs11_lib_login_user(pkcs11_handler->lib, pkcs11_handler->session_handle, pkcs_user_pin)) { goto done; } if (aws_pkcs11_lib_find_private_key( pkcs11_handler->lib, pkcs11_handler->session_handle, pkcs_private_key_object_label, &pkcs11_handler->private_key_handle /*out*/, &pkcs11_handler->private_key_type /*out*/)) { goto done; } success = true; done: /* CLEANUP */ if (pkcs_user_pin != NULL) { aws_string_destroy_secure(pkcs_user_pin); } if (pkcs_token_label != NULL) { aws_string_destroy(pkcs_token_label); } if (pkcs_private_key_object_label != NULL) { aws_string_destroy(pkcs_private_key_object_label); } if (success) { return &pkcs11_handler->base; } else { aws_custom_key_op_handler_release(&pkcs11_handler->base); return NULL; } } aws-crt-python-0.20.4+dfsg/crt/aws-c-io/source/posix/000077500000000000000000000000001456575232400223015ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-io/source/posix/host_resolver.c000066400000000000000000000071111456575232400253430ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include int aws_default_dns_resolve( struct aws_allocator *allocator, const struct aws_string *host_name, struct aws_array_list *output_addresses, void *user_data) { (void)user_data; struct addrinfo *result = NULL; struct addrinfo *iter = NULL; /* max string length for ipv6. */ socklen_t max_len = INET6_ADDRSTRLEN; char address_buffer[max_len]; const char *hostname_cstr = aws_string_c_str(host_name); AWS_LOGF_DEBUG(AWS_LS_IO_DNS, "static: resolving host %s", hostname_cstr); /* Android would prefer NO HINTS IF YOU DON'T MIND, SIR */ #if defined(ANDROID) int err_code = getaddrinfo(hostname_cstr, NULL, NULL, &result); #else struct addrinfo hints; AWS_ZERO_STRUCT(hints); hints.ai_family = AF_UNSPEC; hints.ai_socktype = SOCK_STREAM; # if !defined(__OpenBSD__) hints.ai_flags = AI_ALL | AI_V4MAPPED; # endif /* __OpenBSD__ */ int err_code = getaddrinfo(hostname_cstr, NULL, &hints, &result); #endif if (err_code) { AWS_LOGF_ERROR( AWS_LS_IO_DNS, "static: getaddrinfo failed with error_code %d: %s", err_code, gai_strerror(err_code)); goto clean_up; } for (iter = result; iter != NULL; iter = iter->ai_next) { struct aws_host_address host_address; AWS_ZERO_ARRAY(address_buffer); if (iter->ai_family == AF_INET6) { host_address.record_type = AWS_ADDRESS_RECORD_TYPE_AAAA; inet_ntop(iter->ai_family, &((struct sockaddr_in6 *)iter->ai_addr)->sin6_addr, address_buffer, max_len); } else { host_address.record_type = AWS_ADDRESS_RECORD_TYPE_A; inet_ntop(iter->ai_family, &((struct sockaddr_in *)iter->ai_addr)->sin_addr, address_buffer, max_len); } size_t address_len = strlen(address_buffer); const struct aws_string *address = aws_string_new_from_array(allocator, (const uint8_t *)address_buffer, address_len); if (!address) { goto clean_up; } const struct aws_string *host_cpy = aws_string_new_from_string(allocator, host_name); if (!host_cpy) { aws_string_destroy((void *)address); goto clean_up; } AWS_LOGF_DEBUG(AWS_LS_IO_DNS, "static: resolved record: %s", address_buffer); host_address.address = address; host_address.weight = 0; host_address.allocator = allocator; host_address.use_count = 0; host_address.connection_failure_count = 0; host_address.host = host_cpy; if (aws_array_list_push_back(output_addresses, &host_address)) { aws_host_address_clean_up(&host_address); goto clean_up; } } freeaddrinfo(result); return AWS_OP_SUCCESS; clean_up: if (result) { freeaddrinfo(result); } if (err_code) { switch (err_code) { case EAI_FAIL: case EAI_AGAIN: return aws_raise_error(AWS_IO_DNS_QUERY_FAILED); case EAI_MEMORY: return aws_raise_error(AWS_ERROR_OOM); case EAI_NONAME: case EAI_SERVICE: return aws_raise_error(AWS_IO_DNS_INVALID_NAME); default: return aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); } } return AWS_OP_ERR; } aws-crt-python-0.20.4+dfsg/crt/aws-c-io/source/posix/pipe.c000066400000000000000000000451661456575232400234160ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #ifdef __GLIBC__ # define __USE_GNU #endif /* TODO: move this detection to CMAKE and a config header */ #if !defined(COMPAT_MODE) && defined(__GLIBC__) && ((__GLIBC__ == 2 && __GLIBC_MINOR__ >= 9) || __GLIBC__ > 2) # define HAVE_PIPE2 1 #else # define HAVE_PIPE2 0 #endif #include #include #include /* This isn't defined on ancient linux distros (breaking the builds). * However, if this is a prebuild, we purposely build on an ancient system, but * we want the kernel calls to still be the same as a modern build since that's likely the target of the application * calling this code. Just define this if it isn't there already. GlibC and the kernel don't really care how the flag * gets passed as long as it does. */ #ifndef O_CLOEXEC # define O_CLOEXEC 02000000 #endif struct read_end_impl { struct aws_allocator *alloc; struct aws_io_handle handle; struct aws_event_loop *event_loop; aws_pipe_on_readable_fn *on_readable_user_callback; void *on_readable_user_data; /* Used in handshake for detecting whether user callback resulted in read-end being cleaned up. * If clean_up() sees that the pointer is set, the bool it points to will get set true. */ bool *did_user_callback_clean_up_read_end; bool is_subscribed; }; struct pipe_write_request { struct aws_byte_cursor original_cursor; struct aws_byte_cursor cursor; /* tracks progress of write */ size_t num_bytes_written; aws_pipe_on_write_completed_fn *user_callback; void *user_data; struct aws_linked_list_node list_node; /* True if the write-end is cleaned up while the user callback is being invoked */ bool did_user_callback_clean_up_write_end; }; struct write_end_impl { struct aws_allocator *alloc; struct aws_io_handle handle; struct aws_event_loop *event_loop; struct aws_linked_list write_list; /* Valid while invoking user callback on a completed write request. */ struct pipe_write_request *currently_invoking_write_callback; bool is_writable; /* Future optimization idea: avoid an allocation on each write by keeping 1 pre-allocated pipe_write_request around * and re-using it whenever possible */ }; static void s_write_end_on_event( struct aws_event_loop *event_loop, struct aws_io_handle *handle, int events, void *user_data); static int s_translate_posix_error(int err) { AWS_ASSERT(err); switch (err) { case EPIPE: return AWS_IO_BROKEN_PIPE; default: return AWS_ERROR_SYS_CALL_FAILURE; } } static int s_raise_posix_error(int err) { return aws_raise_error(s_translate_posix_error(err)); } AWS_IO_API int aws_open_nonblocking_posix_pipe(int pipe_fds[2]) { int err; #if HAVE_PIPE2 err = pipe2(pipe_fds, O_NONBLOCK | O_CLOEXEC); if (err) { return s_raise_posix_error(err); } return AWS_OP_SUCCESS; #else err = pipe(pipe_fds); if (err) { return s_raise_posix_error(err); } for (int i = 0; i < 2; ++i) { int flags = fcntl(pipe_fds[i], F_GETFL); if (flags == -1) { s_raise_posix_error(err); goto error; } flags |= O_NONBLOCK | O_CLOEXEC; if (fcntl(pipe_fds[i], F_SETFL, flags) == -1) { s_raise_posix_error(err); goto error; } } return AWS_OP_SUCCESS; error: close(pipe_fds[0]); close(pipe_fds[1]); return AWS_OP_ERR; #endif } int aws_pipe_init( struct aws_pipe_read_end *read_end, struct aws_event_loop *read_end_event_loop, struct aws_pipe_write_end *write_end, struct aws_event_loop *write_end_event_loop, struct aws_allocator *allocator) { AWS_ASSERT(read_end); AWS_ASSERT(read_end_event_loop); AWS_ASSERT(write_end); AWS_ASSERT(write_end_event_loop); AWS_ASSERT(allocator); AWS_ZERO_STRUCT(*read_end); AWS_ZERO_STRUCT(*write_end); struct read_end_impl *read_impl = NULL; struct write_end_impl *write_impl = NULL; int err; /* Open pipe */ int pipe_fds[2]; err = aws_open_nonblocking_posix_pipe(pipe_fds); if (err) { return AWS_OP_ERR; } /* Init read-end */ read_impl = aws_mem_calloc(allocator, 1, sizeof(struct read_end_impl)); if (!read_impl) { goto error; } read_impl->alloc = allocator; read_impl->handle.data.fd = pipe_fds[0]; read_impl->event_loop = read_end_event_loop; /* Init write-end */ write_impl = aws_mem_calloc(allocator, 1, sizeof(struct write_end_impl)); if (!write_impl) { goto error; } write_impl->alloc = allocator; write_impl->handle.data.fd = pipe_fds[1]; write_impl->event_loop = write_end_event_loop; write_impl->is_writable = true; /* Assume pipe is writable to start. Even if it's not, things shouldn't break */ aws_linked_list_init(&write_impl->write_list); read_end->impl_data = read_impl; write_end->impl_data = write_impl; err = aws_event_loop_subscribe_to_io_events( write_end_event_loop, &write_impl->handle, AWS_IO_EVENT_TYPE_WRITABLE, s_write_end_on_event, write_end); if (err) { goto error; } return AWS_OP_SUCCESS; error: close(pipe_fds[0]); close(pipe_fds[1]); if (read_impl) { aws_mem_release(allocator, read_impl); } if (write_impl) { aws_mem_release(allocator, write_impl); } read_end->impl_data = NULL; write_end->impl_data = NULL; return AWS_OP_ERR; } int aws_pipe_clean_up_read_end(struct aws_pipe_read_end *read_end) { struct read_end_impl *read_impl = read_end->impl_data; if (!read_impl) { return aws_raise_error(AWS_IO_BROKEN_PIPE); } if (!aws_event_loop_thread_is_callers_thread(read_impl->event_loop)) { return aws_raise_error(AWS_ERROR_IO_EVENT_LOOP_THREAD_ONLY); } if (read_impl->is_subscribed) { int err = aws_pipe_unsubscribe_from_readable_events(read_end); if (err) { return AWS_OP_ERR; } } /* If the event-handler is invoking a user callback, let it know that the read-end was cleaned up */ if (read_impl->did_user_callback_clean_up_read_end) { *read_impl->did_user_callback_clean_up_read_end = true; } close(read_impl->handle.data.fd); aws_mem_release(read_impl->alloc, read_impl); AWS_ZERO_STRUCT(*read_end); return AWS_OP_SUCCESS; } struct aws_event_loop *aws_pipe_get_read_end_event_loop(const struct aws_pipe_read_end *read_end) { const struct read_end_impl *read_impl = read_end->impl_data; if (!read_impl) { aws_raise_error(AWS_IO_BROKEN_PIPE); return NULL; } return read_impl->event_loop; } struct aws_event_loop *aws_pipe_get_write_end_event_loop(const struct aws_pipe_write_end *write_end) { const struct write_end_impl *write_impl = write_end->impl_data; if (!write_impl) { aws_raise_error(AWS_IO_BROKEN_PIPE); return NULL; } return write_impl->event_loop; } int aws_pipe_read(struct aws_pipe_read_end *read_end, struct aws_byte_buf *dst_buffer, size_t *num_bytes_read) { AWS_ASSERT(dst_buffer && dst_buffer->buffer); struct read_end_impl *read_impl = read_end->impl_data; if (!read_impl) { return aws_raise_error(AWS_IO_BROKEN_PIPE); } if (num_bytes_read) { *num_bytes_read = 0; } size_t num_bytes_to_read = dst_buffer->capacity - dst_buffer->len; ssize_t read_val = read(read_impl->handle.data.fd, dst_buffer->buffer + dst_buffer->len, num_bytes_to_read); if (read_val < 0) { int errno_value = errno; /* Always cache errno before potential side-effect */ if (errno_value == EAGAIN || errno_value == EWOULDBLOCK) { return aws_raise_error(AWS_IO_READ_WOULD_BLOCK); } return s_raise_posix_error(errno_value); } /* Success */ dst_buffer->len += read_val; if (num_bytes_read) { *num_bytes_read = read_val; } return AWS_OP_SUCCESS; } static void s_read_end_on_event( struct aws_event_loop *event_loop, struct aws_io_handle *handle, int events, void *user_data) { (void)event_loop; (void)handle; /* Note that it should be impossible for this to run after read-end has been unsubscribed or cleaned up */ struct aws_pipe_read_end *read_end = user_data; struct read_end_impl *read_impl = read_end->impl_data; AWS_ASSERT(read_impl); AWS_ASSERT(read_impl->event_loop == event_loop); AWS_ASSERT(&read_impl->handle == handle); AWS_ASSERT(read_impl->is_subscribed); AWS_ASSERT(events != 0); AWS_ASSERT(read_impl->did_user_callback_clean_up_read_end == NULL); /* Set up handshake, so we can be informed if the read-end is cleaned up while invoking a user callback */ bool did_user_callback_clean_up_read_end = false; read_impl->did_user_callback_clean_up_read_end = &did_user_callback_clean_up_read_end; /* If readable event received, tell user to try and read, even if "error" events have also occurred. */ if (events & AWS_IO_EVENT_TYPE_READABLE) { read_impl->on_readable_user_callback(read_end, AWS_ERROR_SUCCESS, read_impl->on_readable_user_data); if (did_user_callback_clean_up_read_end) { return; } events &= ~AWS_IO_EVENT_TYPE_READABLE; } if (events) { /* Check that user didn't unsubscribe in the previous callback */ if (read_impl->is_subscribed) { read_impl->on_readable_user_callback(read_end, AWS_IO_BROKEN_PIPE, read_impl->on_readable_user_data); if (did_user_callback_clean_up_read_end) { return; } } } read_impl->did_user_callback_clean_up_read_end = NULL; } int aws_pipe_subscribe_to_readable_events( struct aws_pipe_read_end *read_end, aws_pipe_on_readable_fn *on_readable, void *user_data) { AWS_ASSERT(on_readable); struct read_end_impl *read_impl = read_end->impl_data; if (!read_impl) { return aws_raise_error(AWS_IO_BROKEN_PIPE); } if (!aws_event_loop_thread_is_callers_thread(read_impl->event_loop)) { return aws_raise_error(AWS_ERROR_IO_EVENT_LOOP_THREAD_ONLY); } if (read_impl->is_subscribed) { return aws_raise_error(AWS_ERROR_IO_ALREADY_SUBSCRIBED); } read_impl->is_subscribed = true; read_impl->on_readable_user_callback = on_readable; read_impl->on_readable_user_data = user_data; int err = aws_event_loop_subscribe_to_io_events( read_impl->event_loop, &read_impl->handle, AWS_IO_EVENT_TYPE_READABLE, s_read_end_on_event, read_end); if (err) { read_impl->is_subscribed = false; read_impl->on_readable_user_callback = NULL; read_impl->on_readable_user_data = NULL; return AWS_OP_ERR; } return AWS_OP_SUCCESS; } int aws_pipe_unsubscribe_from_readable_events(struct aws_pipe_read_end *read_end) { struct read_end_impl *read_impl = read_end->impl_data; if (!read_impl) { return aws_raise_error(AWS_IO_BROKEN_PIPE); } if (!aws_event_loop_thread_is_callers_thread(read_impl->event_loop)) { return aws_raise_error(AWS_ERROR_IO_EVENT_LOOP_THREAD_ONLY); } if (!read_impl->is_subscribed) { return aws_raise_error(AWS_ERROR_IO_NOT_SUBSCRIBED); } int err = aws_event_loop_unsubscribe_from_io_events(read_impl->event_loop, &read_impl->handle); if (err) { return AWS_OP_ERR; } read_impl->is_subscribed = false; read_impl->on_readable_user_callback = NULL; read_impl->on_readable_user_data = NULL; return AWS_OP_SUCCESS; } /* Pop front write request, invoke its callback, and delete it. * Returns whether the callback resulted in the write-end getting cleaned up */ static bool s_write_end_complete_front_write_request(struct aws_pipe_write_end *write_end, int error_code) { struct write_end_impl *write_impl = write_end->impl_data; AWS_ASSERT(!aws_linked_list_empty(&write_impl->write_list)); struct aws_linked_list_node *node = aws_linked_list_pop_front(&write_impl->write_list); struct pipe_write_request *request = AWS_CONTAINER_OF(node, struct pipe_write_request, list_node); struct aws_allocator *alloc = write_impl->alloc; /* Let the write-end know that a callback is in process, so the write-end can inform the callback * whether it resulted in clean_up() being called. */ bool write_end_cleaned_up_during_callback = false; struct pipe_write_request *prev_invoking_request = write_impl->currently_invoking_write_callback; write_impl->currently_invoking_write_callback = request; if (request->user_callback) { request->user_callback(write_end, error_code, request->original_cursor, request->user_data); write_end_cleaned_up_during_callback = request->did_user_callback_clean_up_write_end; } if (!write_end_cleaned_up_during_callback) { write_impl->currently_invoking_write_callback = prev_invoking_request; } aws_mem_release(alloc, request); return write_end_cleaned_up_during_callback; } /* Process write requests as long as the pipe remains writable */ static void s_write_end_process_requests(struct aws_pipe_write_end *write_end) { struct write_end_impl *write_impl = write_end->impl_data; AWS_ASSERT(write_impl); while (!aws_linked_list_empty(&write_impl->write_list)) { struct aws_linked_list_node *node = aws_linked_list_front(&write_impl->write_list); struct pipe_write_request *request = AWS_CONTAINER_OF(node, struct pipe_write_request, list_node); int completed_error_code = AWS_ERROR_SUCCESS; if (request->cursor.len > 0) { ssize_t write_val = write(write_impl->handle.data.fd, request->cursor.ptr, request->cursor.len); if (write_val < 0) { int errno_value = errno; /* Always cache errno before potential side-effect */ if (errno_value == EAGAIN || errno_value == EWOULDBLOCK) { /* The pipe is no longer writable. Bail out */ write_impl->is_writable = false; return; } /* A non-recoverable error occurred during this write */ completed_error_code = s_translate_posix_error(errno_value); } else { aws_byte_cursor_advance(&request->cursor, write_val); if (request->cursor.len > 0) { /* There was a partial write, loop again to try and write the rest. */ continue; } } } /* If we got this far in the loop, then the write request is complete. * Note that the callback may result in the pipe being cleaned up. */ bool write_end_cleaned_up = s_write_end_complete_front_write_request(write_end, completed_error_code); if (write_end_cleaned_up) { /* Bail out! Any remaining requests were canceled during clean_up() */ return; } } } /* Handle events on the write-end's file handle */ static void s_write_end_on_event( struct aws_event_loop *event_loop, struct aws_io_handle *handle, int events, void *user_data) { (void)event_loop; (void)handle; /* Note that it should be impossible for this to run after write-end has been unsubscribed or cleaned up */ struct aws_pipe_write_end *write_end = user_data; struct write_end_impl *write_impl = write_end->impl_data; AWS_ASSERT(write_impl); AWS_ASSERT(write_impl->event_loop == event_loop); AWS_ASSERT(&write_impl->handle == handle); /* Only care about the writable event. */ if ((events & AWS_IO_EVENT_TYPE_WRITABLE) == 0) { return; } write_impl->is_writable = true; s_write_end_process_requests(write_end); } int aws_pipe_write( struct aws_pipe_write_end *write_end, struct aws_byte_cursor src_buffer, aws_pipe_on_write_completed_fn *on_completed, void *user_data) { AWS_ASSERT(src_buffer.ptr); struct write_end_impl *write_impl = write_end->impl_data; if (!write_impl) { return aws_raise_error(AWS_IO_BROKEN_PIPE); } if (!aws_event_loop_thread_is_callers_thread(write_impl->event_loop)) { return aws_raise_error(AWS_ERROR_IO_EVENT_LOOP_THREAD_ONLY); } struct pipe_write_request *request = aws_mem_calloc(write_impl->alloc, 1, sizeof(struct pipe_write_request)); if (!request) { return AWS_OP_ERR; } request->original_cursor = src_buffer; request->cursor = src_buffer; request->user_callback = on_completed; request->user_data = user_data; aws_linked_list_push_back(&write_impl->write_list, &request->list_node); /* If the pipe is writable, process the request (unless pipe is already in the middle of processing, which could * happen if a this aws_pipe_write() call was made by another write's completion callback */ if (write_impl->is_writable && !write_impl->currently_invoking_write_callback) { s_write_end_process_requests(write_end); } return AWS_OP_SUCCESS; } int aws_pipe_clean_up_write_end(struct aws_pipe_write_end *write_end) { struct write_end_impl *write_impl = write_end->impl_data; if (!write_impl) { return aws_raise_error(AWS_IO_BROKEN_PIPE); } if (!aws_event_loop_thread_is_callers_thread(write_impl->event_loop)) { return aws_raise_error(AWS_ERROR_IO_EVENT_LOOP_THREAD_ONLY); } int err = aws_event_loop_unsubscribe_from_io_events(write_impl->event_loop, &write_impl->handle); if (err) { return AWS_OP_ERR; } close(write_impl->handle.data.fd); /* Zero out write-end before invoking user callbacks so that it won't work anymore with public functions. */ AWS_ZERO_STRUCT(*write_end); /* If a request callback is currently being invoked, let it know that the write-end was cleaned up */ if (write_impl->currently_invoking_write_callback) { write_impl->currently_invoking_write_callback->did_user_callback_clean_up_write_end = true; } /* Force any outstanding write requests to complete with an error status. */ while (!aws_linked_list_empty(&write_impl->write_list)) { struct aws_linked_list_node *node = aws_linked_list_pop_front(&write_impl->write_list); struct pipe_write_request *request = AWS_CONTAINER_OF(node, struct pipe_write_request, list_node); if (request->user_callback) { request->user_callback(NULL, AWS_IO_BROKEN_PIPE, request->original_cursor, request->user_data); } aws_mem_release(write_impl->alloc, request); } aws_mem_release(write_impl->alloc, write_impl); return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-io/source/posix/shared_library.c000066400000000000000000000041641456575232400254440ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include static const char *s_null = ""; static const char *s_unknown_error = ""; int aws_shared_library_init(struct aws_shared_library *library, const char *library_path) { AWS_ZERO_STRUCT(*library); library->library_handle = dlopen(library_path, RTLD_LAZY); if (library->library_handle == NULL) { const char *error = dlerror(); AWS_LOGF_ERROR( AWS_LS_IO_SHARED_LIBRARY, "id=%p: Failed to load shared library at path \"%s\" with error: %s", (void *)library, library_path ? library_path : s_null, error ? error : s_unknown_error); return aws_raise_error(AWS_IO_SHARED_LIBRARY_LOAD_FAILURE); } return AWS_OP_SUCCESS; } void aws_shared_library_clean_up(struct aws_shared_library *library) { if (library && library->library_handle) { dlclose(library->library_handle); library->library_handle = NULL; } } int aws_shared_library_find_function( struct aws_shared_library *library, const char *symbol_name, aws_generic_function *function_address) { if (library == NULL || library->library_handle == NULL) { return aws_raise_error(AWS_IO_SHARED_LIBRARY_FIND_SYMBOL_FAILURE); } /* * Suggested work around for (undefined behavior) cast from void * to function pointer * in POSIX.1-2003 standard, at least according to dlsym man page code sample. */ *(void **)(function_address) = dlsym(library->library_handle, symbol_name); if (*function_address == NULL) { const char *error = dlerror(); AWS_LOGF_ERROR( AWS_LS_IO_SHARED_LIBRARY, "id=%p: Failed to find shared library symbol \"%s\" with error: %s", (void *)library, symbol_name ? symbol_name : s_null, error ? error : s_unknown_error); return aws_raise_error(AWS_IO_SHARED_LIBRARY_FIND_SYMBOL_FAILURE); } return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-io/source/posix/socket.c000066400000000000000000002172101456575232400237400ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * On OsX, suppress NoPipe signals via flags to setsockopt() * On Linux, suppress NoPipe signals via flags to send() */ #if defined(__MACH__) # define NO_SIGNAL_SOCK_OPT SO_NOSIGPIPE # define NO_SIGNAL_SEND 0 # define TCP_KEEPIDLE TCP_KEEPALIVE #else # define NO_SIGNAL_SEND MSG_NOSIGNAL #endif /* This isn't defined on ancient linux distros (breaking the builds). * However, if this is a prebuild, we purposely build on an ancient system, but * we want the kernel calls to still be the same as a modern build since that's likely the target of the application * calling this code. Just define this if it isn't there already. GlibC and the kernel don't really care how the flag * gets passed as long as it does. */ #ifndef O_CLOEXEC # define O_CLOEXEC 02000000 #endif #ifdef USE_VSOCK # if defined(__linux__) && defined(AF_VSOCK) # include # else # error "USE_VSOCK not supported on current platform" # endif #endif /* other than CONNECTED_READ | CONNECTED_WRITE * a socket is only in one of these states at a time. */ enum socket_state { INIT = 0x01, CONNECTING = 0x02, CONNECTED_READ = 0x04, CONNECTED_WRITE = 0x08, BOUND = 0x10, LISTENING = 0x20, TIMEDOUT = 0x40, ERROR = 0x80, CLOSED, }; static int s_convert_domain(enum aws_socket_domain domain) { switch (domain) { case AWS_SOCKET_IPV4: return AF_INET; case AWS_SOCKET_IPV6: return AF_INET6; case AWS_SOCKET_LOCAL: return AF_UNIX; #ifdef USE_VSOCK case AWS_SOCKET_VSOCK: return AF_VSOCK; #endif default: AWS_ASSERT(0); return AF_INET; } } static int s_convert_type(enum aws_socket_type type) { switch (type) { case AWS_SOCKET_STREAM: return SOCK_STREAM; case AWS_SOCKET_DGRAM: return SOCK_DGRAM; default: AWS_ASSERT(0); return SOCK_STREAM; } } static int s_determine_socket_error(int error) { switch (error) { case ECONNREFUSED: return AWS_IO_SOCKET_CONNECTION_REFUSED; case ECONNRESET: return AWS_IO_SOCKET_CLOSED; case ETIMEDOUT: return AWS_IO_SOCKET_TIMEOUT; case EHOSTUNREACH: case ENETUNREACH: return AWS_IO_SOCKET_NO_ROUTE_TO_HOST; case EADDRNOTAVAIL: return AWS_IO_SOCKET_INVALID_ADDRESS; case ENETDOWN: return AWS_IO_SOCKET_NETWORK_DOWN; case ECONNABORTED: return AWS_IO_SOCKET_CONNECT_ABORTED; case EADDRINUSE: return AWS_IO_SOCKET_ADDRESS_IN_USE; case ENOBUFS: case ENOMEM: return AWS_ERROR_OOM; case EAGAIN: return AWS_IO_READ_WOULD_BLOCK; case EMFILE: case ENFILE: return AWS_ERROR_MAX_FDS_EXCEEDED; case ENOENT: case EINVAL: return AWS_ERROR_FILE_INVALID_PATH; case EAFNOSUPPORT: return AWS_IO_SOCKET_UNSUPPORTED_ADDRESS_FAMILY; case EACCES: return AWS_ERROR_NO_PERMISSION; default: return AWS_IO_SOCKET_NOT_CONNECTED; } } static int s_create_socket(struct aws_socket *sock, const struct aws_socket_options *options) { int fd = socket(s_convert_domain(options->domain), s_convert_type(options->type), 0); int errno_value = errno; /* Always cache errno before potential side-effect */ AWS_LOGF_DEBUG( AWS_LS_IO_SOCKET, "id=%p fd=%d: initializing with domain %d and type %d", (void *)sock, fd, options->domain, options->type); if (fd != -1) { int flags = fcntl(fd, F_GETFL, 0); flags |= O_NONBLOCK | O_CLOEXEC; int success = fcntl(fd, F_SETFL, flags); (void)success; sock->io_handle.data.fd = fd; sock->io_handle.additional_data = NULL; return aws_socket_set_options(sock, options); } int aws_error = s_determine_socket_error(errno_value); return aws_raise_error(aws_error); } struct posix_socket_connect_args { struct aws_task task; struct aws_allocator *allocator; struct aws_socket *socket; }; struct posix_socket { struct aws_linked_list write_queue; struct aws_linked_list written_queue; struct aws_task written_task; struct posix_socket_connect_args *connect_args; /* Note that only the posix_socket impl part is refcounted. * The public aws_socket can be a stack variable and cleaned up synchronously * (by blocking until the event-loop cleans up the impl part). * In hindsight, aws_socket should have been heap-allocated and refcounted, but alas */ struct aws_ref_count internal_refcount; struct aws_allocator *allocator; bool written_task_scheduled; bool currently_subscribed; bool continue_accept; bool *close_happened; }; static void s_socket_destroy_impl(void *user_data) { struct posix_socket *socket_impl = user_data; aws_mem_release(socket_impl->allocator, socket_impl); } static int s_socket_init( struct aws_socket *socket, struct aws_allocator *alloc, const struct aws_socket_options *options, int existing_socket_fd) { AWS_ASSERT(options); AWS_ZERO_STRUCT(*socket); struct posix_socket *posix_socket = aws_mem_calloc(alloc, 1, sizeof(struct posix_socket)); if (!posix_socket) { socket->impl = NULL; return AWS_OP_ERR; } socket->allocator = alloc; socket->io_handle.data.fd = -1; socket->state = INIT; socket->options = *options; if (existing_socket_fd < 0) { int err = s_create_socket(socket, options); if (err) { aws_mem_release(alloc, posix_socket); socket->impl = NULL; return AWS_OP_ERR; } } else { socket->io_handle = (struct aws_io_handle){ .data = {.fd = existing_socket_fd}, .additional_data = NULL, }; aws_socket_set_options(socket, options); } aws_linked_list_init(&posix_socket->write_queue); aws_linked_list_init(&posix_socket->written_queue); posix_socket->currently_subscribed = false; posix_socket->continue_accept = false; aws_ref_count_init(&posix_socket->internal_refcount, posix_socket, s_socket_destroy_impl); posix_socket->allocator = alloc; posix_socket->connect_args = NULL; posix_socket->close_happened = NULL; socket->impl = posix_socket; return AWS_OP_SUCCESS; } int aws_socket_init(struct aws_socket *socket, struct aws_allocator *alloc, const struct aws_socket_options *options) { AWS_ASSERT(options); return s_socket_init(socket, alloc, options, -1); } void aws_socket_clean_up(struct aws_socket *socket) { if (!socket->impl) { /* protect from double clean */ return; } int fd_for_logging = socket->io_handle.data.fd; /* socket's fd gets reset before final log */ (void)fd_for_logging; if (aws_socket_is_open(socket)) { AWS_LOGF_DEBUG(AWS_LS_IO_SOCKET, "id=%p fd=%d: is still open, closing...", (void *)socket, fd_for_logging); aws_socket_close(socket); } struct posix_socket *socket_impl = socket->impl; if (aws_ref_count_release(&socket_impl->internal_refcount) != 0) { AWS_LOGF_DEBUG( AWS_LS_IO_SOCKET, "id=%p fd=%d: is still pending io letting it dangle and cleaning up later.", (void *)socket, fd_for_logging); } AWS_ZERO_STRUCT(*socket); socket->io_handle.data.fd = -1; } /* Update socket->local_endpoint based on the results of getsockname() */ static int s_update_local_endpoint(struct aws_socket *socket) { struct aws_socket_endpoint tmp_endpoint; AWS_ZERO_STRUCT(tmp_endpoint); struct sockaddr_storage address; AWS_ZERO_STRUCT(address); socklen_t address_size = sizeof(address); if (getsockname(socket->io_handle.data.fd, (struct sockaddr *)&address, &address_size) != 0) { int errno_value = errno; /* Always cache errno before potential side-effect */ AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p fd=%d: getsockname() failed with error %d", (void *)socket, socket->io_handle.data.fd, errno_value); int aws_error = s_determine_socket_error(errno_value); return aws_raise_error(aws_error); } if (address.ss_family == AF_INET) { struct sockaddr_in *s = (struct sockaddr_in *)&address; tmp_endpoint.port = ntohs(s->sin_port); if (inet_ntop(AF_INET, &s->sin_addr, tmp_endpoint.address, sizeof(tmp_endpoint.address)) == NULL) { int errno_value = errno; /* Always cache errno before potential side-effect */ AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p fd=%d: inet_ntop() failed with error %d", (void *)socket, socket->io_handle.data.fd, errno_value); int aws_error = s_determine_socket_error(errno_value); return aws_raise_error(aws_error); } } else if (address.ss_family == AF_INET6) { struct sockaddr_in6 *s = (struct sockaddr_in6 *)&address; tmp_endpoint.port = ntohs(s->sin6_port); if (inet_ntop(AF_INET6, &s->sin6_addr, tmp_endpoint.address, sizeof(tmp_endpoint.address)) == NULL) { int errno_value = errno; /* Always cache errno before potential side-effect */ AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p fd=%d: inet_ntop() failed with error %d", (void *)socket, socket->io_handle.data.fd, errno_value); int aws_error = s_determine_socket_error(errno_value); return aws_raise_error(aws_error); } } else if (address.ss_family == AF_UNIX) { struct sockaddr_un *s = (struct sockaddr_un *)&address; /* Ensure there's a null-terminator. * On some platforms it may be missing when the path gets very long. See: * https://man7.org/linux/man-pages/man7/unix.7.html#BUGS * But let's keep it simple, and not deal with that madness until someone demands it. */ size_t sun_len; if (aws_secure_strlen(s->sun_path, sizeof(tmp_endpoint.address), &sun_len)) { AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p fd=%d: UNIX domain socket name is too long", (void *)socket, socket->io_handle.data.fd); return aws_raise_error(AWS_IO_SOCKET_INVALID_ADDRESS); } memcpy(tmp_endpoint.address, s->sun_path, sun_len); #if USE_VSOCK } else if (address.ss_family == AF_VSOCK) { struct sockaddr_vm *s = (struct sockaddr_vm *)&address; tmp_endpoint.port = s->svm_port; snprintf(tmp_endpoint.address, sizeof(tmp_endpoint.address), "%" PRIu32, s->svm_cid); return AWS_OP_SUCCESS; #endif /* USE_VSOCK */ } else { AWS_ASSERT(0); return aws_raise_error(AWS_IO_SOCKET_UNSUPPORTED_ADDRESS_FAMILY); } socket->local_endpoint = tmp_endpoint; return AWS_OP_SUCCESS; } static void s_on_connection_error(struct aws_socket *socket, int error); static int s_on_connection_success(struct aws_socket *socket) { struct aws_event_loop *event_loop = socket->event_loop; struct posix_socket *socket_impl = socket->impl; if (socket_impl->currently_subscribed) { aws_event_loop_unsubscribe_from_io_events(socket->event_loop, &socket->io_handle); socket_impl->currently_subscribed = false; } socket->event_loop = NULL; int connect_result; socklen_t result_length = sizeof(connect_result); if (getsockopt(socket->io_handle.data.fd, SOL_SOCKET, SO_ERROR, &connect_result, &result_length) < 0) { int errno_value = errno; /* Always cache errno before potential side-effect */ AWS_LOGF_DEBUG( AWS_LS_IO_SOCKET, "id=%p fd=%d: failed to determine connection error %d", (void *)socket, socket->io_handle.data.fd, errno_value); int aws_error = s_determine_socket_error(errno_value); aws_raise_error(aws_error); s_on_connection_error(socket, aws_error); return AWS_OP_ERR; } if (connect_result) { AWS_LOGF_DEBUG( AWS_LS_IO_SOCKET, "id=%p fd=%d: connection error %d", (void *)socket, socket->io_handle.data.fd, connect_result); int aws_error = s_determine_socket_error(connect_result); aws_raise_error(aws_error); s_on_connection_error(socket, aws_error); return AWS_OP_ERR; } AWS_LOGF_INFO(AWS_LS_IO_SOCKET, "id=%p fd=%d: connection success", (void *)socket, socket->io_handle.data.fd); if (s_update_local_endpoint(socket)) { s_on_connection_error(socket, aws_last_error()); return AWS_OP_ERR; } socket->state = CONNECTED_WRITE | CONNECTED_READ; if (aws_socket_assign_to_event_loop(socket, event_loop)) { AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p fd=%d: assignment to event loop %p failed with error %d", (void *)socket, socket->io_handle.data.fd, (void *)event_loop, aws_last_error()); s_on_connection_error(socket, aws_last_error()); return AWS_OP_ERR; } socket->connection_result_fn(socket, AWS_ERROR_SUCCESS, socket->connect_accept_user_data); return AWS_OP_SUCCESS; } static void s_on_connection_error(struct aws_socket *socket, int error) { socket->state = ERROR; AWS_LOGF_DEBUG(AWS_LS_IO_SOCKET, "id=%p fd=%d: connection failure", (void *)socket, socket->io_handle.data.fd); if (socket->connection_result_fn) { socket->connection_result_fn(socket, error, socket->connect_accept_user_data); } else if (socket->accept_result_fn) { socket->accept_result_fn(socket, error, NULL, socket->connect_accept_user_data); } } /* the next two callbacks compete based on which one runs first. if s_socket_connect_event * comes back first, then we set socket_args->socket = NULL and continue on with the connection. * if s_handle_socket_timeout() runs first, is sees socket_args->socket is NULL and just cleans up its memory. * s_handle_socket_timeout() will always run so the memory for socket_connect_args is always cleaned up there. */ static void s_socket_connect_event( struct aws_event_loop *event_loop, struct aws_io_handle *handle, int events, void *user_data) { (void)event_loop; (void)handle; struct posix_socket_connect_args *socket_args = (struct posix_socket_connect_args *)user_data; AWS_LOGF_TRACE(AWS_LS_IO_SOCKET, "fd=%d: connection activity handler triggered ", handle->data.fd); if (socket_args->socket) { AWS_LOGF_TRACE( AWS_LS_IO_SOCKET, "id=%p fd=%d: has not timed out yet proceeding with connection.", (void *)socket_args->socket, handle->data.fd); struct posix_socket *socket_impl = socket_args->socket->impl; if (!(events & AWS_IO_EVENT_TYPE_ERROR || events & AWS_IO_EVENT_TYPE_CLOSED) && (events & AWS_IO_EVENT_TYPE_READABLE || events & AWS_IO_EVENT_TYPE_WRITABLE)) { struct aws_socket *socket = socket_args->socket; socket_args->socket = NULL; socket_impl->connect_args = NULL; s_on_connection_success(socket); return; } int aws_error = aws_socket_get_error(socket_args->socket); /* we'll get another notification. */ if (aws_error == AWS_IO_READ_WOULD_BLOCK) { AWS_LOGF_TRACE( AWS_LS_IO_SOCKET, "id=%p fd=%d: spurious event, waiting for another notification.", (void *)socket_args->socket, handle->data.fd); return; } struct aws_socket *socket = socket_args->socket; socket_args->socket = NULL; socket_impl->connect_args = NULL; aws_raise_error(aws_error); s_on_connection_error(socket, aws_error); } } static void s_handle_socket_timeout(struct aws_task *task, void *args, aws_task_status status) { (void)task; (void)status; struct posix_socket_connect_args *socket_args = args; AWS_LOGF_TRACE(AWS_LS_IO_SOCKET, "task_id=%p: timeout task triggered, evaluating timeouts.", (void *)task); /* successful connection will have nulled out connect_args->socket */ if (socket_args->socket) { AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p fd=%d: timed out, shutting down.", (void *)socket_args->socket, socket_args->socket->io_handle.data.fd); socket_args->socket->state = TIMEDOUT; int error_code = AWS_IO_SOCKET_TIMEOUT; if (status == AWS_TASK_STATUS_RUN_READY) { aws_event_loop_unsubscribe_from_io_events(socket_args->socket->event_loop, &socket_args->socket->io_handle); } else { error_code = AWS_IO_EVENT_LOOP_SHUTDOWN; aws_event_loop_free_io_event_resources(socket_args->socket->event_loop, &socket_args->socket->io_handle); } socket_args->socket->event_loop = NULL; struct posix_socket *socket_impl = socket_args->socket->impl; socket_impl->currently_subscribed = false; aws_raise_error(error_code); struct aws_socket *socket = socket_args->socket; /*socket close sets socket_args->socket to NULL and * socket_impl->connect_args to NULL. */ aws_socket_close(socket); s_on_connection_error(socket, error_code); } aws_mem_release(socket_args->allocator, socket_args); } /* this is used simply for moving a connect_success callback when the connect finished immediately * (like for unix domain sockets) into the event loop's thread. Also note, in that case there was no * timeout task scheduled, so in this case the socket_args are cleaned up. */ static void s_run_connect_success(struct aws_task *task, void *arg, enum aws_task_status status) { (void)task; struct posix_socket_connect_args *socket_args = arg; if (socket_args->socket) { struct posix_socket *socket_impl = socket_args->socket->impl; if (status == AWS_TASK_STATUS_RUN_READY) { s_on_connection_success(socket_args->socket); } else { aws_raise_error(AWS_IO_SOCKET_CONNECT_ABORTED); socket_args->socket->event_loop = NULL; s_on_connection_error(socket_args->socket, AWS_IO_SOCKET_CONNECT_ABORTED); } socket_impl->connect_args = NULL; } aws_mem_release(socket_args->allocator, socket_args); } static inline int s_convert_pton_error(int pton_code, int errno_value) { if (pton_code == 0) { return AWS_IO_SOCKET_INVALID_ADDRESS; } return s_determine_socket_error(errno_value); } struct socket_address { union sock_addr_types { struct sockaddr_in addr_in; struct sockaddr_in6 addr_in6; struct sockaddr_un un_addr; #ifdef USE_VSOCK struct sockaddr_vm vm_addr; #endif } sock_addr_types; }; #ifdef USE_VSOCK /** Convert a string to a VSOCK CID. Respects the calling convetion of inet_pton: * 0 on error, 1 on success. */ static int parse_cid(const char *cid_str, unsigned int *value) { if (cid_str == NULL || value == NULL) { errno = EINVAL; return 0; } /* strtoll returns 0 as both error and correct value */ errno = 0; /* unsigned long long to handle edge cases in convention explicitly */ long long cid = strtoll(cid_str, NULL, 10); if (errno != 0) { return 0; } /* -1U means any, so it's a valid value, but it needs to be converted to * unsigned int. */ if (cid == -1) { *value = VMADDR_CID_ANY; return 1; } if (cid < 0 || cid > UINT_MAX) { errno = ERANGE; return 0; } /* cast is safe here, edge cases already checked */ *value = (unsigned int)cid; return 1; } #endif int aws_socket_connect( struct aws_socket *socket, const struct aws_socket_endpoint *remote_endpoint, struct aws_event_loop *event_loop, aws_socket_on_connection_result_fn *on_connection_result, void *user_data) { AWS_ASSERT(event_loop); AWS_ASSERT(!socket->event_loop); AWS_LOGF_DEBUG(AWS_LS_IO_SOCKET, "id=%p fd=%d: beginning connect.", (void *)socket, socket->io_handle.data.fd); if (socket->event_loop) { return aws_raise_error(AWS_IO_EVENT_LOOP_ALREADY_ASSIGNED); } if (socket->options.type != AWS_SOCKET_DGRAM) { AWS_ASSERT(on_connection_result); if (socket->state != INIT) { return aws_raise_error(AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE); } } else { /* UDP socket */ /* UDP sockets jump to CONNECT_READ if bind is called first */ if (socket->state != CONNECTED_READ && socket->state != INIT) { return aws_raise_error(AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE); } } size_t address_strlen; if (aws_secure_strlen(remote_endpoint->address, AWS_ADDRESS_MAX_LEN, &address_strlen)) { return AWS_OP_ERR; } if (aws_socket_validate_port_for_connect(remote_endpoint->port, socket->options.domain)) { return AWS_OP_ERR; } struct socket_address address; AWS_ZERO_STRUCT(address); socklen_t sock_size = 0; int pton_err = 1; if (socket->options.domain == AWS_SOCKET_IPV4) { pton_err = inet_pton(AF_INET, remote_endpoint->address, &address.sock_addr_types.addr_in.sin_addr); address.sock_addr_types.addr_in.sin_port = htons((uint16_t)remote_endpoint->port); address.sock_addr_types.addr_in.sin_family = AF_INET; sock_size = sizeof(address.sock_addr_types.addr_in); } else if (socket->options.domain == AWS_SOCKET_IPV6) { pton_err = inet_pton(AF_INET6, remote_endpoint->address, &address.sock_addr_types.addr_in6.sin6_addr); address.sock_addr_types.addr_in6.sin6_port = htons((uint16_t)remote_endpoint->port); address.sock_addr_types.addr_in6.sin6_family = AF_INET6; sock_size = sizeof(address.sock_addr_types.addr_in6); } else if (socket->options.domain == AWS_SOCKET_LOCAL) { address.sock_addr_types.un_addr.sun_family = AF_UNIX; strncpy(address.sock_addr_types.un_addr.sun_path, remote_endpoint->address, AWS_ADDRESS_MAX_LEN); sock_size = sizeof(address.sock_addr_types.un_addr); #ifdef USE_VSOCK } else if (socket->options.domain == AWS_SOCKET_VSOCK) { pton_err = parse_cid(remote_endpoint->address, &address.sock_addr_types.vm_addr.svm_cid); address.sock_addr_types.vm_addr.svm_family = AF_VSOCK; address.sock_addr_types.vm_addr.svm_port = remote_endpoint->port; sock_size = sizeof(address.sock_addr_types.vm_addr); #endif } else { AWS_ASSERT(0); return aws_raise_error(AWS_IO_SOCKET_UNSUPPORTED_ADDRESS_FAMILY); } if (pton_err != 1) { int errno_value = errno; /* Always cache errno before potential side-effect */ AWS_LOGF_DEBUG( AWS_LS_IO_SOCKET, "id=%p fd=%d: failed to parse address %s:%u.", (void *)socket, socket->io_handle.data.fd, remote_endpoint->address, remote_endpoint->port); return aws_raise_error(s_convert_pton_error(pton_err, errno_value)); } AWS_LOGF_DEBUG( AWS_LS_IO_SOCKET, "id=%p fd=%d: connecting to endpoint %s:%u.", (void *)socket, socket->io_handle.data.fd, remote_endpoint->address, remote_endpoint->port); socket->state = CONNECTING; socket->remote_endpoint = *remote_endpoint; socket->connect_accept_user_data = user_data; socket->connection_result_fn = on_connection_result; struct posix_socket *socket_impl = socket->impl; socket_impl->connect_args = aws_mem_calloc(socket->allocator, 1, sizeof(struct posix_socket_connect_args)); if (!socket_impl->connect_args) { return AWS_OP_ERR; } socket_impl->connect_args->socket = socket; socket_impl->connect_args->allocator = socket->allocator; socket_impl->connect_args->task.fn = s_handle_socket_timeout; socket_impl->connect_args->task.arg = socket_impl->connect_args; int error_code = connect(socket->io_handle.data.fd, (struct sockaddr *)&address.sock_addr_types, sock_size); socket->event_loop = event_loop; if (!error_code) { AWS_LOGF_INFO( AWS_LS_IO_SOCKET, "id=%p fd=%d: connected immediately, not scheduling timeout.", (void *)socket, socket->io_handle.data.fd); socket_impl->connect_args->task.fn = s_run_connect_success; /* the subscription for IO will happen once we setup the connection in the task. Since we already * know the connection succeeded, we don't need to register for events yet. */ aws_event_loop_schedule_task_now(event_loop, &socket_impl->connect_args->task); } if (error_code) { int errno_value = errno; /* Always cache errno before potential side-effect */ if (errno_value == EINPROGRESS || errno_value == EALREADY) { AWS_LOGF_TRACE( AWS_LS_IO_SOCKET, "id=%p fd=%d: connection pending waiting on event-loop notification or timeout.", (void *)socket, socket->io_handle.data.fd); /* cache the timeout task; it is possible for the IO subscription to come back virtually immediately * and null out the connect args */ struct aws_task *timeout_task = &socket_impl->connect_args->task; socket_impl->currently_subscribed = true; /* This event is for when the connection finishes. (the fd will flip writable). */ if (aws_event_loop_subscribe_to_io_events( event_loop, &socket->io_handle, AWS_IO_EVENT_TYPE_WRITABLE, s_socket_connect_event, socket_impl->connect_args)) { AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p fd=%d: failed to register with event-loop %p.", (void *)socket, socket->io_handle.data.fd, (void *)event_loop); socket_impl->currently_subscribed = false; socket->event_loop = NULL; goto err_clean_up; } /* schedule a task to run at the connect timeout interval, if this task runs before the connect * happens, we consider that a timeout. */ uint64_t timeout = 0; aws_event_loop_current_clock_time(event_loop, &timeout); timeout += aws_timestamp_convert( socket->options.connect_timeout_ms, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL); AWS_LOGF_TRACE( AWS_LS_IO_SOCKET, "id=%p fd=%d: scheduling timeout task for %llu.", (void *)socket, socket->io_handle.data.fd, (unsigned long long)timeout); aws_event_loop_schedule_task_future(event_loop, timeout_task, timeout); } else { AWS_LOGF_DEBUG( AWS_LS_IO_SOCKET, "id=%p fd=%d: connect failed with error code %d.", (void *)socket, socket->io_handle.data.fd, errno_value); int aws_error = s_determine_socket_error(errno_value); aws_raise_error(aws_error); socket->event_loop = NULL; socket_impl->currently_subscribed = false; goto err_clean_up; } } return AWS_OP_SUCCESS; err_clean_up: aws_mem_release(socket->allocator, socket_impl->connect_args); socket_impl->connect_args = NULL; return AWS_OP_ERR; } int aws_socket_bind(struct aws_socket *socket, const struct aws_socket_endpoint *local_endpoint) { if (socket->state != INIT) { AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p fd=%d: invalid state for bind operation.", (void *)socket, socket->io_handle.data.fd); return aws_raise_error(AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE); } size_t address_strlen; if (aws_secure_strlen(local_endpoint->address, AWS_ADDRESS_MAX_LEN, &address_strlen)) { return AWS_OP_ERR; } if (aws_socket_validate_port_for_bind(local_endpoint->port, socket->options.domain)) { return AWS_OP_ERR; } AWS_LOGF_INFO( AWS_LS_IO_SOCKET, "id=%p fd=%d: binding to %s:%u.", (void *)socket, socket->io_handle.data.fd, local_endpoint->address, local_endpoint->port); struct socket_address address; AWS_ZERO_STRUCT(address); socklen_t sock_size = 0; int pton_err = 1; if (socket->options.domain == AWS_SOCKET_IPV4) { pton_err = inet_pton(AF_INET, local_endpoint->address, &address.sock_addr_types.addr_in.sin_addr); address.sock_addr_types.addr_in.sin_port = htons((uint16_t)local_endpoint->port); address.sock_addr_types.addr_in.sin_family = AF_INET; sock_size = sizeof(address.sock_addr_types.addr_in); } else if (socket->options.domain == AWS_SOCKET_IPV6) { pton_err = inet_pton(AF_INET6, local_endpoint->address, &address.sock_addr_types.addr_in6.sin6_addr); address.sock_addr_types.addr_in6.sin6_port = htons((uint16_t)local_endpoint->port); address.sock_addr_types.addr_in6.sin6_family = AF_INET6; sock_size = sizeof(address.sock_addr_types.addr_in6); } else if (socket->options.domain == AWS_SOCKET_LOCAL) { address.sock_addr_types.un_addr.sun_family = AF_UNIX; strncpy(address.sock_addr_types.un_addr.sun_path, local_endpoint->address, AWS_ADDRESS_MAX_LEN); sock_size = sizeof(address.sock_addr_types.un_addr); #ifdef USE_VSOCK } else if (socket->options.domain == AWS_SOCKET_VSOCK) { pton_err = parse_cid(local_endpoint->address, &address.sock_addr_types.vm_addr.svm_cid); address.sock_addr_types.vm_addr.svm_family = AF_VSOCK; address.sock_addr_types.vm_addr.svm_port = local_endpoint->port; sock_size = sizeof(address.sock_addr_types.vm_addr); #endif } else { AWS_ASSERT(0); return aws_raise_error(AWS_IO_SOCKET_UNSUPPORTED_ADDRESS_FAMILY); } if (pton_err != 1) { int errno_value = errno; /* Always cache errno before potential side-effect */ AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p fd=%d: failed to parse address %s:%u.", (void *)socket, socket->io_handle.data.fd, local_endpoint->address, local_endpoint->port); return aws_raise_error(s_convert_pton_error(pton_err, errno_value)); } if (bind(socket->io_handle.data.fd, (struct sockaddr *)&address.sock_addr_types, sock_size) != 0) { int errno_value = errno; /* Always cache errno before potential side-effect */ AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p fd=%d: bind failed with error code %d", (void *)socket, socket->io_handle.data.fd, errno_value); aws_raise_error(s_determine_socket_error(errno_value)); goto error; } if (s_update_local_endpoint(socket)) { goto error; } if (socket->options.type == AWS_SOCKET_STREAM) { socket->state = BOUND; } else { /* e.g. UDP is now readable */ socket->state = CONNECTED_READ; } AWS_LOGF_DEBUG( AWS_LS_IO_SOCKET, "id=%p fd=%d: successfully bound to %s:%u", (void *)socket, socket->io_handle.data.fd, socket->local_endpoint.address, socket->local_endpoint.port); return AWS_OP_SUCCESS; error: socket->state = ERROR; return AWS_OP_ERR; } int aws_socket_get_bound_address(const struct aws_socket *socket, struct aws_socket_endpoint *out_address) { if (socket->local_endpoint.address[0] == 0) { AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p fd=%d: Socket has no local address. Socket must be bound first.", (void *)socket, socket->io_handle.data.fd); return aws_raise_error(AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE); } *out_address = socket->local_endpoint; return AWS_OP_SUCCESS; } int aws_socket_listen(struct aws_socket *socket, int backlog_size) { if (socket->state != BOUND) { AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p fd=%d: invalid state for listen operation. You must call bind first.", (void *)socket, socket->io_handle.data.fd); return aws_raise_error(AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE); } int error_code = listen(socket->io_handle.data.fd, backlog_size); if (!error_code) { AWS_LOGF_INFO( AWS_LS_IO_SOCKET, "id=%p fd=%d: successfully listening", (void *)socket, socket->io_handle.data.fd); socket->state = LISTENING; return AWS_OP_SUCCESS; } int errno_value = errno; /* Always cache errno before potential side-effect */ AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p fd=%d: listen failed with error code %d", (void *)socket, socket->io_handle.data.fd, errno_value); socket->state = ERROR; return aws_raise_error(s_determine_socket_error(errno_value)); } /* this is called by the event loop handler that was installed in start_accept(). It runs once the FD goes readable, * accepts as many as it can and then returns control to the event loop. */ static void s_socket_accept_event( struct aws_event_loop *event_loop, struct aws_io_handle *handle, int events, void *user_data) { (void)event_loop; struct aws_socket *socket = user_data; struct posix_socket *socket_impl = socket->impl; AWS_LOGF_DEBUG( AWS_LS_IO_SOCKET, "id=%p fd=%d: listening event received", (void *)socket, socket->io_handle.data.fd); if (socket_impl->continue_accept && events & AWS_IO_EVENT_TYPE_READABLE) { int in_fd = 0; while (socket_impl->continue_accept && in_fd != -1) { struct sockaddr_storage in_addr; socklen_t in_len = sizeof(struct sockaddr_storage); in_fd = accept(handle->data.fd, (struct sockaddr *)&in_addr, &in_len); if (in_fd == -1) { int errno_value = errno; /* Always cache errno before potential side-effect */ if (errno_value == EAGAIN || errno_value == EWOULDBLOCK) { break; } int aws_error = aws_socket_get_error(socket); aws_raise_error(aws_error); s_on_connection_error(socket, aws_error); break; } AWS_LOGF_DEBUG( AWS_LS_IO_SOCKET, "id=%p fd=%d: incoming connection", (void *)socket, socket->io_handle.data.fd); struct aws_socket *new_sock = aws_mem_acquire(socket->allocator, sizeof(struct aws_socket)); if (!new_sock) { close(in_fd); s_on_connection_error(socket, aws_last_error()); continue; } if (s_socket_init(new_sock, socket->allocator, &socket->options, in_fd)) { aws_mem_release(socket->allocator, new_sock); s_on_connection_error(socket, aws_last_error()); continue; } new_sock->local_endpoint = socket->local_endpoint; new_sock->state = CONNECTED_READ | CONNECTED_WRITE; uint32_t port = 0; /* get the info on the incoming socket's address */ if (in_addr.ss_family == AF_INET) { struct sockaddr_in *s = (struct sockaddr_in *)&in_addr; port = ntohs(s->sin_port); /* this came from the kernel, a.) it won't fail. b.) even if it does * its not fatal. come back and add logging later. */ if (!inet_ntop( AF_INET, &s->sin_addr, new_sock->remote_endpoint.address, sizeof(new_sock->remote_endpoint.address))) { AWS_LOGF_WARN( AWS_LS_IO_SOCKET, "id=%p fd=%d:. Failed to determine remote address.", (void *)socket, socket->io_handle.data.fd); } new_sock->options.domain = AWS_SOCKET_IPV4; } else if (in_addr.ss_family == AF_INET6) { /* this came from the kernel, a.) it won't fail. b.) even if it does * its not fatal. come back and add logging later. */ struct sockaddr_in6 *s = (struct sockaddr_in6 *)&in_addr; port = ntohs(s->sin6_port); if (!inet_ntop( AF_INET6, &s->sin6_addr, new_sock->remote_endpoint.address, sizeof(new_sock->remote_endpoint.address))) { AWS_LOGF_WARN( AWS_LS_IO_SOCKET, "id=%p fd=%d:. Failed to determine remote address.", (void *)socket, socket->io_handle.data.fd); } new_sock->options.domain = AWS_SOCKET_IPV6; } else if (in_addr.ss_family == AF_UNIX) { new_sock->remote_endpoint = socket->local_endpoint; new_sock->options.domain = AWS_SOCKET_LOCAL; } new_sock->remote_endpoint.port = port; AWS_LOGF_INFO( AWS_LS_IO_SOCKET, "id=%p fd=%d: connected to %s:%d, incoming fd %d", (void *)socket, socket->io_handle.data.fd, new_sock->remote_endpoint.address, new_sock->remote_endpoint.port, in_fd); int flags = fcntl(in_fd, F_GETFL, 0); flags |= O_NONBLOCK | O_CLOEXEC; fcntl(in_fd, F_SETFL, flags); bool close_occurred = false; socket_impl->close_happened = &close_occurred; socket->accept_result_fn(socket, AWS_ERROR_SUCCESS, new_sock, socket->connect_accept_user_data); if (close_occurred) { return; } socket_impl->close_happened = NULL; } } AWS_LOGF_TRACE( AWS_LS_IO_SOCKET, "id=%p fd=%d: finished processing incoming connections, " "waiting on event-loop notification", (void *)socket, socket->io_handle.data.fd); } int aws_socket_start_accept( struct aws_socket *socket, struct aws_event_loop *accept_loop, aws_socket_on_accept_result_fn *on_accept_result, void *user_data) { AWS_ASSERT(on_accept_result); AWS_ASSERT(accept_loop); if (socket->event_loop) { AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p fd=%d: is already assigned to event-loop %p.", (void *)socket, socket->io_handle.data.fd, (void *)socket->event_loop); return aws_raise_error(AWS_IO_EVENT_LOOP_ALREADY_ASSIGNED); } if (socket->state != LISTENING) { AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p fd=%d: invalid state for start_accept operation. You must call listen first.", (void *)socket, socket->io_handle.data.fd); return aws_raise_error(AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE); } socket->accept_result_fn = on_accept_result; socket->connect_accept_user_data = user_data; socket->event_loop = accept_loop; struct posix_socket *socket_impl = socket->impl; socket_impl->continue_accept = true; socket_impl->currently_subscribed = true; if (aws_event_loop_subscribe_to_io_events( socket->event_loop, &socket->io_handle, AWS_IO_EVENT_TYPE_READABLE, s_socket_accept_event, socket)) { AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p fd=%d: failed to subscribe to event-loop %p.", (void *)socket, socket->io_handle.data.fd, (void *)socket->event_loop); socket_impl->continue_accept = false; socket_impl->currently_subscribed = false; socket->event_loop = NULL; return AWS_OP_ERR; } return AWS_OP_SUCCESS; } struct stop_accept_args { struct aws_task task; struct aws_mutex mutex; struct aws_condition_variable condition_variable; struct aws_socket *socket; int ret_code; bool invoked; }; static bool s_stop_accept_pred(void *arg) { struct stop_accept_args *stop_accept_args = arg; return stop_accept_args->invoked; } static void s_stop_accept_task(struct aws_task *task, void *arg, enum aws_task_status status) { (void)task; (void)status; struct stop_accept_args *stop_accept_args = arg; aws_mutex_lock(&stop_accept_args->mutex); stop_accept_args->ret_code = AWS_OP_SUCCESS; if (aws_socket_stop_accept(stop_accept_args->socket)) { stop_accept_args->ret_code = aws_last_error(); } stop_accept_args->invoked = true; aws_condition_variable_notify_one(&stop_accept_args->condition_variable); aws_mutex_unlock(&stop_accept_args->mutex); } int aws_socket_stop_accept(struct aws_socket *socket) { if (socket->state != LISTENING) { AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p fd=%d: is not in a listening state, can't stop_accept.", (void *)socket, socket->io_handle.data.fd); return aws_raise_error(AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE); } AWS_LOGF_INFO( AWS_LS_IO_SOCKET, "id=%p fd=%d: stopping accepting new connections", (void *)socket, socket->io_handle.data.fd); if (!aws_event_loop_thread_is_callers_thread(socket->event_loop)) { struct stop_accept_args args = { .mutex = AWS_MUTEX_INIT, .condition_variable = AWS_CONDITION_VARIABLE_INIT, .invoked = false, .socket = socket, .ret_code = AWS_OP_SUCCESS, .task = {.fn = s_stop_accept_task}, }; AWS_LOGF_INFO( AWS_LS_IO_SOCKET, "id=%p fd=%d: stopping accepting new connections from a different thread than " "the socket is running from. Blocking until it shuts down.", (void *)socket, socket->io_handle.data.fd); /* Look.... I know what I'm doing.... trust me, I'm an engineer. * We wait on the completion before 'args' goes out of scope. * NOLINTNEXTLINE */ args.task.arg = &args; aws_mutex_lock(&args.mutex); aws_event_loop_schedule_task_now(socket->event_loop, &args.task); aws_condition_variable_wait_pred(&args.condition_variable, &args.mutex, s_stop_accept_pred, &args); aws_mutex_unlock(&args.mutex); AWS_LOGF_INFO( AWS_LS_IO_SOCKET, "id=%p fd=%d: stop accept task finished running.", (void *)socket, socket->io_handle.data.fd); if (args.ret_code) { return aws_raise_error(args.ret_code); } return AWS_OP_SUCCESS; } int ret_val = AWS_OP_SUCCESS; struct posix_socket *socket_impl = socket->impl; if (socket_impl->currently_subscribed) { ret_val = aws_event_loop_unsubscribe_from_io_events(socket->event_loop, &socket->io_handle); socket_impl->currently_subscribed = false; socket_impl->continue_accept = false; socket->event_loop = NULL; } return ret_val; } int aws_socket_set_options(struct aws_socket *socket, const struct aws_socket_options *options) { if (socket->options.domain != options->domain || socket->options.type != options->type) { return aws_raise_error(AWS_IO_SOCKET_INVALID_OPTIONS); } AWS_LOGF_DEBUG( AWS_LS_IO_SOCKET, "id=%p fd=%d: setting socket options to: keep-alive %d, keep idle %d, keep-alive interval %d, keep-alive probe " "count %d.", (void *)socket, socket->io_handle.data.fd, (int)options->keepalive, (int)options->keep_alive_timeout_sec, (int)options->keep_alive_interval_sec, (int)options->keep_alive_max_failed_probes); socket->options = *options; #ifdef NO_SIGNAL_SOCK_OPT int option_value = 1; if (AWS_UNLIKELY(setsockopt( socket->io_handle.data.fd, SOL_SOCKET, NO_SIGNAL_SOCK_OPT, &option_value, sizeof(option_value)))) { int errno_value = errno; /* Always cache errno before potential side-effect */ AWS_LOGF_WARN( AWS_LS_IO_SOCKET, "id=%p fd=%d: setsockopt() for NO_SIGNAL_SOCK_OPT failed with errno %d.", (void *)socket, socket->io_handle.data.fd, errno_value); } #endif /* NO_SIGNAL_SOCK_OPT */ int reuse = 1; if (AWS_UNLIKELY(setsockopt(socket->io_handle.data.fd, SOL_SOCKET, SO_REUSEADDR, &reuse, sizeof(int)))) { int errno_value = errno; /* Always cache errno before potential side-effect */ AWS_LOGF_WARN( AWS_LS_IO_SOCKET, "id=%p fd=%d: setsockopt() for SO_REUSEADDR failed with errno %d.", (void *)socket, socket->io_handle.data.fd, errno_value); } if (options->type == AWS_SOCKET_STREAM && options->domain != AWS_SOCKET_LOCAL) { if (socket->options.keepalive) { int keep_alive = 1; if (AWS_UNLIKELY( setsockopt(socket->io_handle.data.fd, SOL_SOCKET, SO_KEEPALIVE, &keep_alive, sizeof(int)))) { int errno_value = errno; /* Always cache errno before potential side-effect */ AWS_LOGF_WARN( AWS_LS_IO_SOCKET, "id=%p fd=%d: setsockopt() for enabling SO_KEEPALIVE failed with errno %d.", (void *)socket, socket->io_handle.data.fd, errno_value); } } #if !defined(__OpenBSD__) if (socket->options.keep_alive_interval_sec && socket->options.keep_alive_timeout_sec) { int ival_in_secs = socket->options.keep_alive_interval_sec; if (AWS_UNLIKELY(setsockopt( socket->io_handle.data.fd, IPPROTO_TCP, TCP_KEEPIDLE, &ival_in_secs, sizeof(ival_in_secs)))) { int errno_value = errno; /* Always cache errno before potential side-effect */ AWS_LOGF_WARN( AWS_LS_IO_SOCKET, "id=%p fd=%d: setsockopt() for enabling TCP_KEEPIDLE for TCP failed with errno %d.", (void *)socket, socket->io_handle.data.fd, errno_value); } ival_in_secs = socket->options.keep_alive_timeout_sec; if (AWS_UNLIKELY(setsockopt( socket->io_handle.data.fd, IPPROTO_TCP, TCP_KEEPINTVL, &ival_in_secs, sizeof(ival_in_secs)))) { int errno_value = errno; /* Always cache errno before potential side-effect */ AWS_LOGF_WARN( AWS_LS_IO_SOCKET, "id=%p fd=%d: setsockopt() for enabling TCP_KEEPINTVL for TCP failed with errno %d.", (void *)socket, socket->io_handle.data.fd, errno_value); } } if (socket->options.keep_alive_max_failed_probes) { int max_probes = socket->options.keep_alive_max_failed_probes; if (AWS_UNLIKELY( setsockopt(socket->io_handle.data.fd, IPPROTO_TCP, TCP_KEEPCNT, &max_probes, sizeof(max_probes)))) { int errno_value = errno; /* Always cache errno before potential side-effect */ AWS_LOGF_WARN( AWS_LS_IO_SOCKET, "id=%p fd=%d: setsockopt() for enabling TCP_KEEPCNT for TCP failed with errno %d.", (void *)socket, socket->io_handle.data.fd, errno_value); } } #endif /* __OpenBSD__ */ } return AWS_OP_SUCCESS; } struct socket_write_request { struct aws_byte_cursor cursor_cpy; aws_socket_on_write_completed_fn *written_fn; void *write_user_data; struct aws_linked_list_node node; size_t original_buffer_len; int error_code; }; struct posix_socket_close_args { struct aws_mutex mutex; struct aws_condition_variable condition_variable; struct aws_socket *socket; bool invoked; int ret_code; }; static bool s_close_predicate(void *arg) { struct posix_socket_close_args *close_args = arg; return close_args->invoked; } static void s_close_task(struct aws_task *task, void *arg, enum aws_task_status status) { (void)task; (void)status; struct posix_socket_close_args *close_args = arg; aws_mutex_lock(&close_args->mutex); close_args->ret_code = AWS_OP_SUCCESS; if (aws_socket_close(close_args->socket)) { close_args->ret_code = aws_last_error(); } close_args->invoked = true; aws_condition_variable_notify_one(&close_args->condition_variable); aws_mutex_unlock(&close_args->mutex); } int aws_socket_close(struct aws_socket *socket) { struct posix_socket *socket_impl = socket->impl; AWS_LOGF_DEBUG(AWS_LS_IO_SOCKET, "id=%p fd=%d: closing", (void *)socket, socket->io_handle.data.fd); struct aws_event_loop *event_loop = socket->event_loop; if (socket->event_loop) { /* don't freak out on me, this almost never happens, and never occurs inside a channel * it only gets hit from a listening socket shutting down or from a unit test. */ if (!aws_event_loop_thread_is_callers_thread(socket->event_loop)) { AWS_LOGF_INFO( AWS_LS_IO_SOCKET, "id=%p fd=%d: closing from a different thread than " "the socket is running from. Blocking until it closes down.", (void *)socket, socket->io_handle.data.fd); /* the only time we allow this kind of thing is when you're a listener.*/ if (socket->state != LISTENING) { return aws_raise_error(AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE); } struct posix_socket_close_args args = { .mutex = AWS_MUTEX_INIT, .condition_variable = AWS_CONDITION_VARIABLE_INIT, .socket = socket, .ret_code = AWS_OP_SUCCESS, .invoked = false, }; struct aws_task close_task = { .fn = s_close_task, .arg = &args, }; int fd_for_logging = socket->io_handle.data.fd; /* socket's fd gets reset before final log */ (void)fd_for_logging; aws_mutex_lock(&args.mutex); aws_event_loop_schedule_task_now(socket->event_loop, &close_task); aws_condition_variable_wait_pred(&args.condition_variable, &args.mutex, s_close_predicate, &args); aws_mutex_unlock(&args.mutex); AWS_LOGF_INFO(AWS_LS_IO_SOCKET, "id=%p fd=%d: close task completed.", (void *)socket, fd_for_logging); if (args.ret_code) { return aws_raise_error(args.ret_code); } return AWS_OP_SUCCESS; } if (socket_impl->currently_subscribed) { if (socket->state & LISTENING) { aws_socket_stop_accept(socket); } else { int err_code = aws_event_loop_unsubscribe_from_io_events(socket->event_loop, &socket->io_handle); if (err_code) { return AWS_OP_ERR; } } socket_impl->currently_subscribed = false; socket->event_loop = NULL; } } if (socket_impl->close_happened) { *socket_impl->close_happened = true; } if (socket_impl->connect_args) { socket_impl->connect_args->socket = NULL; socket_impl->connect_args = NULL; } if (aws_socket_is_open(socket)) { close(socket->io_handle.data.fd); socket->io_handle.data.fd = -1; socket->state = CLOSED; /* ensure callbacks for pending writes fire (in order) before this close function returns */ if (socket_impl->written_task_scheduled) { aws_event_loop_cancel_task(event_loop, &socket_impl->written_task); } while (!aws_linked_list_empty(&socket_impl->written_queue)) { struct aws_linked_list_node *node = aws_linked_list_pop_front(&socket_impl->written_queue); struct socket_write_request *write_request = AWS_CONTAINER_OF(node, struct socket_write_request, node); size_t bytes_written = write_request->original_buffer_len - write_request->cursor_cpy.len; write_request->written_fn(socket, write_request->error_code, bytes_written, write_request->write_user_data); aws_mem_release(socket->allocator, write_request); } while (!aws_linked_list_empty(&socket_impl->write_queue)) { struct aws_linked_list_node *node = aws_linked_list_pop_front(&socket_impl->write_queue); struct socket_write_request *write_request = AWS_CONTAINER_OF(node, struct socket_write_request, node); size_t bytes_written = write_request->original_buffer_len - write_request->cursor_cpy.len; write_request->written_fn(socket, AWS_IO_SOCKET_CLOSED, bytes_written, write_request->write_user_data); aws_mem_release(socket->allocator, write_request); } } return AWS_OP_SUCCESS; } int aws_socket_shutdown_dir(struct aws_socket *socket, enum aws_channel_direction dir) { int how = dir == AWS_CHANNEL_DIR_READ ? 0 : 1; AWS_LOGF_DEBUG( AWS_LS_IO_SOCKET, "id=%p fd=%d: shutting down in direction %d", (void *)socket, socket->io_handle.data.fd, dir); if (shutdown(socket->io_handle.data.fd, how)) { int errno_value = errno; /* Always cache errno before potential side-effect */ int aws_error = s_determine_socket_error(errno_value); return aws_raise_error(aws_error); } if (dir == AWS_CHANNEL_DIR_READ) { socket->state &= ~CONNECTED_READ; } else { socket->state &= ~CONNECTED_WRITE; } return AWS_OP_SUCCESS; } static void s_written_task(struct aws_task *task, void *arg, enum aws_task_status status) { (void)task; (void)status; struct aws_socket *socket = arg; struct posix_socket *socket_impl = socket->impl; socket_impl->written_task_scheduled = false; /* this is to handle a race condition when a callback kicks off a cleanup, or the user decides * to close the socket based on something they read (SSL validation failed for example). * if clean_up happens when internal_refcount > 0, socket_impl is kept dangling */ aws_ref_count_acquire(&socket_impl->internal_refcount); /* Notes about weird loop: * 1) Only process the initial contents of queue when this task is run, * ignoring any writes queued during delivery. * If we simply looped until the queue was empty, we could get into a * synchronous loop of completing and writing and completing and writing... * and it would be tough for multiple sockets to share an event-loop fairly. * 2) Check if queue is empty with each iteration. * If user calls close() from the callback, close() will process all * nodes in the written_queue, and the queue will be empty when the * callstack gets back to here. */ if (!aws_linked_list_empty(&socket_impl->written_queue)) { struct aws_linked_list_node *stop_after = aws_linked_list_back(&socket_impl->written_queue); do { struct aws_linked_list_node *node = aws_linked_list_pop_front(&socket_impl->written_queue); struct socket_write_request *write_request = AWS_CONTAINER_OF(node, struct socket_write_request, node); size_t bytes_written = write_request->original_buffer_len - write_request->cursor_cpy.len; write_request->written_fn(socket, write_request->error_code, bytes_written, write_request->write_user_data); aws_mem_release(socket_impl->allocator, write_request); if (node == stop_after) { break; } } while (!aws_linked_list_empty(&socket_impl->written_queue)); } aws_ref_count_release(&socket_impl->internal_refcount); } /* this gets called in two scenarios. * 1st scenario, someone called aws_socket_write() and we want to try writing now, so an error can be returned * immediately if something bad has happened to the socket. In this case, `parent_request` is set. * 2nd scenario, the event loop notified us that the socket went writable. In this case `parent_request` is NULL */ static int s_process_socket_write_requests(struct aws_socket *socket, struct socket_write_request *parent_request) { struct posix_socket *socket_impl = socket->impl; if (parent_request) { AWS_LOGF_TRACE( AWS_LS_IO_SOCKET, "id=%p fd=%d: processing write requests, called from aws_socket_write", (void *)socket, socket->io_handle.data.fd); } else { AWS_LOGF_TRACE( AWS_LS_IO_SOCKET, "id=%p fd=%d: processing write requests, invoked by the event-loop", (void *)socket, socket->io_handle.data.fd); } bool purge = false; int aws_error = AWS_OP_SUCCESS; bool parent_request_failed = false; bool pushed_to_written_queue = false; /* if a close call happens in the middle, this queue will have been cleaned out from under us. */ while (!aws_linked_list_empty(&socket_impl->write_queue)) { struct aws_linked_list_node *node = aws_linked_list_front(&socket_impl->write_queue); struct socket_write_request *write_request = AWS_CONTAINER_OF(node, struct socket_write_request, node); AWS_LOGF_TRACE( AWS_LS_IO_SOCKET, "id=%p fd=%d: dequeued write request of size %llu, remaining to write %llu", (void *)socket, socket->io_handle.data.fd, (unsigned long long)write_request->original_buffer_len, (unsigned long long)write_request->cursor_cpy.len); ssize_t written = send( socket->io_handle.data.fd, write_request->cursor_cpy.ptr, write_request->cursor_cpy.len, NO_SIGNAL_SEND); int errno_value = errno; /* Always cache errno before potential side-effect */ AWS_LOGF_TRACE( AWS_LS_IO_SOCKET, "id=%p fd=%d: send written size %d", (void *)socket, socket->io_handle.data.fd, (int)written); if (written < 0) { if (errno_value == EAGAIN) { AWS_LOGF_TRACE( AWS_LS_IO_SOCKET, "id=%p fd=%d: returned would block", (void *)socket, socket->io_handle.data.fd); break; } if (errno_value == EPIPE) { AWS_LOGF_DEBUG( AWS_LS_IO_SOCKET, "id=%p fd=%d: already closed before write", (void *)socket, socket->io_handle.data.fd); aws_error = AWS_IO_SOCKET_CLOSED; aws_raise_error(aws_error); purge = true; break; } purge = true; AWS_LOGF_DEBUG( AWS_LS_IO_SOCKET, "id=%p fd=%d: write error with error code %d", (void *)socket, socket->io_handle.data.fd, errno_value); aws_error = s_determine_socket_error(errno_value); aws_raise_error(aws_error); break; } size_t remaining_to_write = write_request->cursor_cpy.len; aws_byte_cursor_advance(&write_request->cursor_cpy, (size_t)written); AWS_LOGF_TRACE( AWS_LS_IO_SOCKET, "id=%p fd=%d: remaining write request to write %llu", (void *)socket, socket->io_handle.data.fd, (unsigned long long)write_request->cursor_cpy.len); if ((size_t)written == remaining_to_write) { AWS_LOGF_TRACE( AWS_LS_IO_SOCKET, "id=%p fd=%d: write request completed", (void *)socket, socket->io_handle.data.fd); aws_linked_list_remove(node); write_request->error_code = AWS_ERROR_SUCCESS; aws_linked_list_push_back(&socket_impl->written_queue, node); pushed_to_written_queue = true; } } if (purge) { while (!aws_linked_list_empty(&socket_impl->write_queue)) { struct aws_linked_list_node *node = aws_linked_list_pop_front(&socket_impl->write_queue); struct socket_write_request *write_request = AWS_CONTAINER_OF(node, struct socket_write_request, node); /* If this fn was invoked directly from aws_socket_write(), don't invoke the error callback * as the user will be able to rely on the return value from aws_socket_write() */ if (write_request == parent_request) { parent_request_failed = true; aws_mem_release(socket->allocator, write_request); } else { write_request->error_code = aws_error; aws_linked_list_push_back(&socket_impl->written_queue, node); pushed_to_written_queue = true; } } } if (pushed_to_written_queue && !socket_impl->written_task_scheduled) { socket_impl->written_task_scheduled = true; aws_task_init(&socket_impl->written_task, s_written_task, socket, "socket_written_task"); aws_event_loop_schedule_task_now(socket->event_loop, &socket_impl->written_task); } /* Only report error if aws_socket_write() invoked this function and its write_request failed */ if (!parent_request_failed) { return AWS_OP_SUCCESS; } aws_raise_error(aws_error); return AWS_OP_ERR; } static void s_on_socket_io_event( struct aws_event_loop *event_loop, struct aws_io_handle *handle, int events, void *user_data) { (void)event_loop; (void)handle; struct aws_socket *socket = user_data; struct posix_socket *socket_impl = socket->impl; /* this is to handle a race condition when an error kicks off a cleanup, or the user decides * to close the socket based on something they read (SSL validation failed for example). * if clean_up happens when internal_refcount > 0, socket_impl is kept dangling but currently * subscribed is set to false. */ aws_ref_count_acquire(&socket_impl->internal_refcount); if (events & AWS_IO_EVENT_TYPE_REMOTE_HANG_UP || events & AWS_IO_EVENT_TYPE_CLOSED) { aws_raise_error(AWS_IO_SOCKET_CLOSED); AWS_LOGF_TRACE(AWS_LS_IO_SOCKET, "id=%p fd=%d: closed remotely", (void *)socket, socket->io_handle.data.fd); if (socket->readable_fn) { socket->readable_fn(socket, AWS_IO_SOCKET_CLOSED, socket->readable_user_data); } goto end_check; } if (socket_impl->currently_subscribed && events & AWS_IO_EVENT_TYPE_ERROR) { int aws_error = aws_socket_get_error(socket); aws_raise_error(aws_error); AWS_LOGF_TRACE( AWS_LS_IO_SOCKET, "id=%p fd=%d: error event occurred", (void *)socket, socket->io_handle.data.fd); if (socket->readable_fn) { socket->readable_fn(socket, aws_error, socket->readable_user_data); } goto end_check; } if (socket_impl->currently_subscribed && events & AWS_IO_EVENT_TYPE_READABLE) { AWS_LOGF_TRACE(AWS_LS_IO_SOCKET, "id=%p fd=%d: is readable", (void *)socket, socket->io_handle.data.fd); if (socket->readable_fn) { socket->readable_fn(socket, AWS_OP_SUCCESS, socket->readable_user_data); } } /* if socket closed in between these branches, the currently_subscribed will be false and socket_impl will not * have been cleaned up, so this next branch is safe. */ if (socket_impl->currently_subscribed && events & AWS_IO_EVENT_TYPE_WRITABLE) { AWS_LOGF_TRACE(AWS_LS_IO_SOCKET, "id=%p fd=%d: is writable", (void *)socket, socket->io_handle.data.fd); s_process_socket_write_requests(socket, NULL); } end_check: aws_ref_count_release(&socket_impl->internal_refcount); } int aws_socket_assign_to_event_loop(struct aws_socket *socket, struct aws_event_loop *event_loop) { if (!socket->event_loop) { AWS_LOGF_DEBUG( AWS_LS_IO_SOCKET, "id=%p fd=%d: assigning to event loop %p", (void *)socket, socket->io_handle.data.fd, (void *)event_loop); socket->event_loop = event_loop; struct posix_socket *socket_impl = socket->impl; socket_impl->currently_subscribed = true; if (aws_event_loop_subscribe_to_io_events( event_loop, &socket->io_handle, AWS_IO_EVENT_TYPE_WRITABLE | AWS_IO_EVENT_TYPE_READABLE, s_on_socket_io_event, socket)) { AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p fd=%d: assigning to event loop %p failed with error %d", (void *)socket, socket->io_handle.data.fd, (void *)event_loop, aws_last_error()); socket_impl->currently_subscribed = false; socket->event_loop = NULL; return AWS_OP_ERR; } return AWS_OP_SUCCESS; } return aws_raise_error(AWS_IO_EVENT_LOOP_ALREADY_ASSIGNED); } struct aws_event_loop *aws_socket_get_event_loop(struct aws_socket *socket) { return socket->event_loop; } int aws_socket_subscribe_to_readable_events( struct aws_socket *socket, aws_socket_on_readable_fn *on_readable, void *user_data) { AWS_LOGF_TRACE( AWS_LS_IO_SOCKET, " id=%p fd=%d: subscribing to readable events", (void *)socket, socket->io_handle.data.fd); if (!(socket->state & CONNECTED_READ)) { AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p fd=%d: can't subscribe to readable events since the socket is not connected", (void *)socket, socket->io_handle.data.fd); return aws_raise_error(AWS_IO_SOCKET_NOT_CONNECTED); } if (socket->readable_fn) { AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p fd=%d: can't subscribe to readable events since it is already subscribed", (void *)socket, socket->io_handle.data.fd); return aws_raise_error(AWS_ERROR_IO_ALREADY_SUBSCRIBED); } AWS_ASSERT(on_readable); socket->readable_user_data = user_data; socket->readable_fn = on_readable; return AWS_OP_SUCCESS; } int aws_socket_read(struct aws_socket *socket, struct aws_byte_buf *buffer, size_t *amount_read) { AWS_ASSERT(amount_read); if (!aws_event_loop_thread_is_callers_thread(socket->event_loop)) { AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p fd=%d: cannot read from a different thread than event loop %p", (void *)socket, socket->io_handle.data.fd, (void *)socket->event_loop); return aws_raise_error(AWS_ERROR_IO_EVENT_LOOP_THREAD_ONLY); } if (!(socket->state & CONNECTED_READ)) { AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p fd=%d: cannot read because it is not connected", (void *)socket, socket->io_handle.data.fd); return aws_raise_error(AWS_IO_SOCKET_NOT_CONNECTED); } ssize_t read_val = read(socket->io_handle.data.fd, buffer->buffer + buffer->len, buffer->capacity - buffer->len); int errno_value = errno; /* Always cache errno before potential side-effect */ AWS_LOGF_TRACE( AWS_LS_IO_SOCKET, "id=%p fd=%d: read of %d", (void *)socket, socket->io_handle.data.fd, (int)read_val); if (read_val > 0) { *amount_read = (size_t)read_val; buffer->len += *amount_read; return AWS_OP_SUCCESS; } /* read_val of 0 means EOF which we'll treat as AWS_IO_SOCKET_CLOSED */ if (read_val == 0) { AWS_LOGF_INFO( AWS_LS_IO_SOCKET, "id=%p fd=%d: zero read, socket is closed", (void *)socket, socket->io_handle.data.fd); *amount_read = 0; if (buffer->capacity - buffer->len > 0) { return aws_raise_error(AWS_IO_SOCKET_CLOSED); } return AWS_OP_SUCCESS; } #if defined(EWOULDBLOCK) if (errno_value == EAGAIN || errno_value == EWOULDBLOCK) { #else if (errno_value == EAGAIN) { #endif AWS_LOGF_TRACE(AWS_LS_IO_SOCKET, "id=%p fd=%d: read would block", (void *)socket, socket->io_handle.data.fd); return aws_raise_error(AWS_IO_READ_WOULD_BLOCK); } if (errno_value == EPIPE || errno_value == ECONNRESET) { AWS_LOGF_INFO(AWS_LS_IO_SOCKET, "id=%p fd=%d: socket is closed.", (void *)socket, socket->io_handle.data.fd); return aws_raise_error(AWS_IO_SOCKET_CLOSED); } if (errno_value == ETIMEDOUT) { AWS_LOGF_ERROR(AWS_LS_IO_SOCKET, "id=%p fd=%d: socket timed out.", (void *)socket, socket->io_handle.data.fd); return aws_raise_error(AWS_IO_SOCKET_TIMEOUT); } AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p fd=%d: read failed with error: %s", (void *)socket, socket->io_handle.data.fd, strerror(errno_value)); return aws_raise_error(s_determine_socket_error(errno_value)); } int aws_socket_write( struct aws_socket *socket, const struct aws_byte_cursor *cursor, aws_socket_on_write_completed_fn *written_fn, void *user_data) { if (!aws_event_loop_thread_is_callers_thread(socket->event_loop)) { return aws_raise_error(AWS_ERROR_IO_EVENT_LOOP_THREAD_ONLY); } if (!(socket->state & CONNECTED_WRITE)) { AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p fd=%d: cannot write to because it is not connected", (void *)socket, socket->io_handle.data.fd); return aws_raise_error(AWS_IO_SOCKET_NOT_CONNECTED); } AWS_ASSERT(written_fn); struct posix_socket *socket_impl = socket->impl; struct socket_write_request *write_request = aws_mem_calloc(socket->allocator, 1, sizeof(struct socket_write_request)); if (!write_request) { return AWS_OP_ERR; } write_request->original_buffer_len = cursor->len; write_request->written_fn = written_fn; write_request->write_user_data = user_data; write_request->cursor_cpy = *cursor; aws_linked_list_push_back(&socket_impl->write_queue, &write_request->node); return s_process_socket_write_requests(socket, write_request); } int aws_socket_get_error(struct aws_socket *socket) { int connect_result; socklen_t result_length = sizeof(connect_result); if (getsockopt(socket->io_handle.data.fd, SOL_SOCKET, SO_ERROR, &connect_result, &result_length) < 0) { return s_determine_socket_error(errno); } if (connect_result) { return s_determine_socket_error(connect_result); } return AWS_OP_SUCCESS; } bool aws_socket_is_open(struct aws_socket *socket) { return socket->io_handle.data.fd >= 0; } void aws_socket_endpoint_init_local_address_for_test(struct aws_socket_endpoint *endpoint) { struct aws_uuid uuid; AWS_FATAL_ASSERT(aws_uuid_init(&uuid) == AWS_OP_SUCCESS); char uuid_str[AWS_UUID_STR_LEN] = {0}; struct aws_byte_buf uuid_buf = aws_byte_buf_from_empty_array(uuid_str, sizeof(uuid_str)); AWS_FATAL_ASSERT(aws_uuid_to_str(&uuid, &uuid_buf) == AWS_OP_SUCCESS); snprintf(endpoint->address, sizeof(endpoint->address), "testsock" PRInSTR ".sock", AWS_BYTE_BUF_PRI(uuid_buf)); } aws-crt-python-0.20.4+dfsg/crt/aws-c-io/source/retry_strategy.c000066400000000000000000000055001456575232400243720ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include void aws_retry_strategy_acquire(struct aws_retry_strategy *retry_strategy) { size_t old_value = aws_atomic_fetch_add_explicit(&retry_strategy->ref_count, 1, aws_memory_order_relaxed); AWS_ASSERT(old_value > 0 && "aws_retry_strategy refcount had been zero, it's invalid to use it again."); (void)old_value; } void aws_retry_strategy_release(struct aws_retry_strategy *retry_strategy) { if (retry_strategy) { size_t old_value = aws_atomic_fetch_sub_explicit(&retry_strategy->ref_count, 1, aws_memory_order_seq_cst); AWS_ASSERT(old_value > 0 && "aws_retry_strategy refcount has gone negative"); if (old_value == 1) { retry_strategy->vtable->destroy(retry_strategy); } } } int aws_retry_strategy_acquire_retry_token( struct aws_retry_strategy *retry_strategy, const struct aws_byte_cursor *partition_id, aws_retry_strategy_on_retry_token_acquired_fn *on_acquired, void *user_data, uint64_t timeout_ms) { AWS_PRECONDITION(retry_strategy); AWS_PRECONDITION(retry_strategy->vtable->acquire_token); return retry_strategy->vtable->acquire_token(retry_strategy, partition_id, on_acquired, user_data, timeout_ms); } int aws_retry_strategy_schedule_retry( struct aws_retry_token *token, enum aws_retry_error_type error_type, aws_retry_strategy_on_retry_ready_fn *retry_ready, void *user_data) { AWS_PRECONDITION(token); AWS_PRECONDITION(token->retry_strategy); AWS_PRECONDITION(token->retry_strategy->vtable->schedule_retry); return token->retry_strategy->vtable->schedule_retry(token, error_type, retry_ready, user_data); } int aws_retry_token_record_success(struct aws_retry_token *token) { AWS_PRECONDITION(token); AWS_PRECONDITION(token->retry_strategy); AWS_PRECONDITION(token->retry_strategy->vtable->record_success); return token->retry_strategy->vtable->record_success(token); } void aws_retry_token_acquire(struct aws_retry_token *token) { size_t old_value = aws_atomic_fetch_add_explicit(&token->ref_count, 1u, aws_memory_order_relaxed); AWS_ASSERT(old_value > 0 && "aws_retry_token refcount had been zero, it's invalid to use it again."); (void)old_value; } void aws_retry_token_release(struct aws_retry_token *token) { if (token) { AWS_PRECONDITION(token->retry_strategy); AWS_PRECONDITION(token->retry_strategy->vtable->release_token); size_t old_value = aws_atomic_fetch_sub_explicit(&token->ref_count, 1u, aws_memory_order_seq_cst); AWS_ASSERT(old_value > 0 && "aws_retry_token refcount has gone negative"); if (old_value == 1u) { token->retry_strategy->vtable->release_token(token); } } } aws-crt-python-0.20.4+dfsg/crt/aws-c-io/source/s2n/000077500000000000000000000000001456575232400216415ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-io/source/s2n/s2n_tls_channel_handler.c000066400000000000000000001762501456575232400265710ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define EST_TLS_RECORD_OVERHEAD 53 /* 5 byte header + 32 + 16 bytes for padding */ #define KB_1 1024 #define MAX_RECORD_SIZE (KB_1 * 16) #define EST_HANDSHAKE_SIZE (7 * KB_1) static const char *s_default_ca_dir = NULL; static const char *s_default_ca_file = NULL; struct s2n_delayed_shutdown_task { struct aws_channel_task task; struct aws_channel_slot *slot; int error; }; struct s2n_handler { struct aws_channel_handler handler; struct aws_tls_channel_handler_shared shared_state; struct s2n_connection *connection; struct s2n_ctx *s2n_ctx; struct aws_channel_slot *slot; struct aws_linked_list input_queue; struct aws_byte_buf protocol; struct aws_byte_buf server_name; aws_channel_on_message_write_completed_fn *latest_message_on_completion; struct aws_channel_task sequential_tasks; void *latest_message_completion_user_data; aws_tls_on_negotiation_result_fn *on_negotiation_result; aws_tls_on_data_read_fn *on_data_read; aws_tls_on_error_fn *on_error; void *user_data; bool advertise_alpn_message; enum { NEGOTIATION_ONGOING, NEGOTIATION_FAILED, NEGOTIATION_SUCCEEDED, } state; struct s2n_delayed_shutdown_task delayed_shutdown_task; }; struct s2n_ctx { struct aws_tls_ctx ctx; struct s2n_config *s2n_config; /* Only used in special circumstances (ex: have cert but no key, because key is in PKCS#11) */ struct s2n_cert_chain_and_key *custom_cert_chain_and_key; /** * Custom key operations to perform when a private key operation is required in the TLS handshake. * Only will be used if non-NULL, otherwise this is ignored and the standard private key operations * are performed instead. * NOTE: PKCS11 also is done via this custom_key_handler. * * See aws_custom_key_op_handler in tls_channel_handler.h for more details. */ struct aws_custom_key_op_handler *custom_key_handler; }; struct aws_tls_key_operation { struct aws_allocator *alloc; struct s2n_async_pkey_op *s2n_op; struct s2n_handler *s2n_handler; enum aws_tls_key_operation_type operation_type; enum aws_tls_signature_algorithm signature_algorithm; enum aws_tls_hash_algorithm digest_algorithm; struct aws_byte_buf input_data; struct aws_channel_task completion_task; int completion_error_code; struct aws_atomic_var complete_count; }; AWS_STATIC_STRING_FROM_LITERAL(s_debian_path, "/etc/ssl/certs"); AWS_STATIC_STRING_FROM_LITERAL(s_rhel_path, "/etc/pki/tls/certs"); AWS_STATIC_STRING_FROM_LITERAL(s_android_path, "/system/etc/security/cacerts"); AWS_STATIC_STRING_FROM_LITERAL(s_free_bsd_path, "/usr/local/share/certs"); AWS_STATIC_STRING_FROM_LITERAL(s_net_bsd_path, "/etc/openssl/certs"); AWS_IO_API const char *aws_determine_default_pki_dir(void) { /* debian variants; OpenBSD (although the directory doesn't exist by default) */ if (aws_path_exists(s_debian_path)) { return aws_string_c_str(s_debian_path); } /* RHEL variants */ if (aws_path_exists(s_rhel_path)) { return aws_string_c_str(s_rhel_path); } /* android */ if (aws_path_exists(s_android_path)) { return aws_string_c_str(s_android_path); } /* FreeBSD */ if (aws_path_exists(s_free_bsd_path)) { return aws_string_c_str(s_free_bsd_path); } /* NetBSD */ if (aws_path_exists(s_net_bsd_path)) { return aws_string_c_str(s_net_bsd_path); } return NULL; } AWS_STATIC_STRING_FROM_LITERAL(s_debian_ca_file_path, "/etc/ssl/certs/ca-certificates.crt"); AWS_STATIC_STRING_FROM_LITERAL(s_old_rhel_ca_file_path, "/etc/pki/tls/certs/ca-bundle.crt"); AWS_STATIC_STRING_FROM_LITERAL(s_open_suse_ca_file_path, "/etc/ssl/ca-bundle.pem"); AWS_STATIC_STRING_FROM_LITERAL(s_open_elec_ca_file_path, "/etc/pki/tls/cacert.pem"); AWS_STATIC_STRING_FROM_LITERAL(s_modern_rhel_ca_file_path, "/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem"); AWS_STATIC_STRING_FROM_LITERAL(s_openbsd_ca_file_path, "/etc/ssl/cert.pem"); AWS_IO_API const char *aws_determine_default_pki_ca_file(void) { /* debian variants */ if (aws_path_exists(s_debian_ca_file_path)) { return aws_string_c_str(s_debian_ca_file_path); } /* Old RHEL variants */ if (aws_path_exists(s_old_rhel_ca_file_path)) { return aws_string_c_str(s_old_rhel_ca_file_path); } /* Open SUSE */ if (aws_path_exists(s_open_suse_ca_file_path)) { return aws_string_c_str(s_open_suse_ca_file_path); } /* Open ELEC */ if (aws_path_exists(s_open_elec_ca_file_path)) { return aws_string_c_str(s_open_elec_ca_file_path); } /* Modern RHEL variants */ if (aws_path_exists(s_modern_rhel_ca_file_path)) { return aws_string_c_str(s_modern_rhel_ca_file_path); } /* OpenBSD */ if (aws_path_exists(s_openbsd_ca_file_path)) { return aws_string_c_str(s_openbsd_ca_file_path); } return NULL; } /* If s2n is already initialized, then we don't call s2n_init() or s2n_cleanup() ourselves */ static bool s_s2n_initialized_externally = false; void aws_tls_init_static_state(struct aws_allocator *alloc) { (void)alloc; AWS_LOGF_INFO(AWS_LS_IO_TLS, "static: Initializing TLS using s2n."); /* Disable atexit behavior, so that s2n_cleanup() fully cleans things up. * * By default, s2n uses an ataexit handler and doesn't fully clean up until the program exits. * This can cause a crash if s2n is compiled into a shared library and * that library is unloaded before the appexit handler runs. */ if (s2n_disable_atexit() != S2N_SUCCESS) { /* If this call fails, then s2n is already initialized * https://github.com/aws/s2n-tls/blob/2ad65c11a96368591fe809cd27fd1e390b2c8ce3/api/s2n.h#L211-L212 */ AWS_LOGF_DEBUG(AWS_LS_IO_TLS, "static: s2n is already initialized"); s_s2n_initialized_externally = true; } else { s_s2n_initialized_externally = false; } if (!s_s2n_initialized_externally) { setenv("S2N_DONT_MLOCK", "1", 1); if (s2n_init() != S2N_SUCCESS) { fprintf(stderr, "s2n_init() failed: %d (%s)\n", s2n_errno, s2n_strerror(s2n_errno, "EN")); AWS_FATAL_ASSERT(0 && "s2n_init() failed"); } } s_default_ca_dir = aws_determine_default_pki_dir(); s_default_ca_file = aws_determine_default_pki_ca_file(); if (s_default_ca_dir || s_default_ca_file) { AWS_LOGF_DEBUG( AWS_LS_IO_TLS, "ctx: Based on OS, we detected the default PKI path as %s, and ca file as %s", s_default_ca_dir, s_default_ca_file); } else { AWS_LOGF_WARN( AWS_LS_IO_TLS, "Default TLS trust store not found on this system." " TLS connections will fail unless trusted CA certificates are installed," " or \"override default trust store\" is used while creating the TLS context."); } } void aws_tls_clean_up_static_state(void) { /* only clean up s2n if we were the ones that initialized it */ if (!s_s2n_initialized_externally) { s2n_cleanup(); } } bool aws_tls_is_alpn_available(void) { return true; } bool aws_tls_is_cipher_pref_supported(enum aws_tls_cipher_pref cipher_pref) { switch (cipher_pref) { case AWS_IO_TLS_CIPHER_PREF_SYSTEM_DEFAULT: return true; /* PQ Crypto no-ops on android for now */ #ifndef ANDROID case AWS_IO_TLS_CIPHER_PREF_PQ_TLSv1_0_2021_05: return true; #endif default: return false; } } static int s_generic_read(struct s2n_handler *handler, struct aws_byte_buf *buf) { size_t written = 0; while (!aws_linked_list_empty(&handler->input_queue) && written < buf->len) { struct aws_linked_list_node *node = aws_linked_list_pop_front(&handler->input_queue); struct aws_io_message *message = AWS_CONTAINER_OF(node, struct aws_io_message, queueing_handle); size_t remaining_message_len = message->message_data.len - message->copy_mark; size_t remaining_buf_len = buf->len - written; size_t to_write = remaining_message_len < remaining_buf_len ? remaining_message_len : remaining_buf_len; struct aws_byte_cursor message_cursor = aws_byte_cursor_from_buf(&message->message_data); aws_byte_cursor_advance(&message_cursor, message->copy_mark); aws_byte_cursor_read(&message_cursor, buf->buffer + written, to_write); written += to_write; message->copy_mark += to_write; if (message->copy_mark == message->message_data.len) { aws_mem_release(message->allocator, message); } else { aws_linked_list_push_front(&handler->input_queue, &message->queueing_handle); } } if (written) { return (int)written; } errno = EAGAIN; return -1; } static int s_s2n_handler_recv(void *io_context, uint8_t *buf, uint32_t len) { struct s2n_handler *handler = (struct s2n_handler *)io_context; struct aws_byte_buf read_buffer = aws_byte_buf_from_array(buf, len); return s_generic_read(handler, &read_buffer); } static int s_generic_send(struct s2n_handler *handler, struct aws_byte_buf *buf) { struct aws_byte_cursor buffer_cursor = aws_byte_cursor_from_buf(buf); size_t processed = 0; while (processed < buf->len) { const size_t overhead = aws_channel_slot_upstream_message_overhead(handler->slot); const size_t message_size_hint = (buf->len - processed) + overhead; struct aws_io_message *message = aws_channel_acquire_message_from_pool( handler->slot->channel, AWS_IO_MESSAGE_APPLICATION_DATA, message_size_hint); if (!message || message->message_data.capacity <= overhead) { errno = ENOMEM; return -1; } const size_t available_msg_write_capacity = message->message_data.capacity - overhead; const size_t to_write = available_msg_write_capacity >= buffer_cursor.len ? buffer_cursor.len : available_msg_write_capacity; struct aws_byte_cursor chunk = aws_byte_cursor_advance(&buffer_cursor, to_write); if (aws_byte_buf_append(&message->message_data, &chunk)) { aws_mem_release(message->allocator, message); return -1; } processed += message->message_data.len; if (processed == buf->len) { message->on_completion = handler->latest_message_on_completion; message->user_data = handler->latest_message_completion_user_data; handler->latest_message_on_completion = NULL; handler->latest_message_completion_user_data = NULL; } if (aws_channel_slot_send_message(handler->slot, message, AWS_CHANNEL_DIR_WRITE)) { aws_mem_release(message->allocator, message); errno = EPIPE; return -1; } } if (processed) { return (int)processed; } errno = EAGAIN; return -1; } static int s_s2n_handler_send(void *io_context, const uint8_t *buf, uint32_t len) { struct s2n_handler *handler = (struct s2n_handler *)io_context; struct aws_byte_buf send_buf = aws_byte_buf_from_array(buf, len); return s_generic_send(handler, &send_buf); } static void s_s2n_handler_destroy(struct aws_channel_handler *handler) { if (handler) { struct s2n_handler *s2n_handler = (struct s2n_handler *)handler->impl; aws_tls_channel_handler_shared_clean_up(&s2n_handler->shared_state); if (s2n_handler->connection) { s2n_connection_free(s2n_handler->connection); } if (s2n_handler->s2n_ctx) { aws_tls_ctx_release(&s2n_handler->s2n_ctx->ctx); } aws_mem_release(handler->alloc, (void *)s2n_handler); } } static void s_on_negotiation_result( struct aws_channel_handler *handler, struct aws_channel_slot *slot, int error_code, void *user_data) { struct s2n_handler *s2n_handler = (struct s2n_handler *)handler->impl; aws_on_tls_negotiation_completed(&s2n_handler->shared_state, error_code); if (s2n_handler->on_negotiation_result) { s2n_handler->on_negotiation_result(handler, slot, error_code, user_data); } } static int s_drive_negotiation(struct aws_channel_handler *handler) { struct s2n_handler *s2n_handler = (struct s2n_handler *)handler->impl; AWS_ASSERT(s2n_handler->state == NEGOTIATION_ONGOING); aws_on_drive_tls_negotiation(&s2n_handler->shared_state); s2n_blocked_status blocked = S2N_NOT_BLOCKED; do { int negotiation_code = s2n_negotiate(s2n_handler->connection, &blocked); int s2n_error = s2n_errno; if (negotiation_code == S2N_ERR_T_OK) { s2n_handler->state = NEGOTIATION_SUCCEEDED; const char *protocol = s2n_get_application_protocol(s2n_handler->connection); if (protocol) { AWS_LOGF_DEBUG(AWS_LS_IO_TLS, "id=%p: Alpn protocol negotiated as %s", (void *)handler, protocol); s2n_handler->protocol = aws_byte_buf_from_c_str(protocol); } const char *server_name = s2n_get_server_name(s2n_handler->connection); if (server_name) { AWS_LOGF_DEBUG(AWS_LS_IO_TLS, "id=%p: Remote server name is %s", (void *)handler, server_name); s2n_handler->server_name = aws_byte_buf_from_c_str(server_name); } if (s2n_handler->slot->adj_right && s2n_handler->advertise_alpn_message && protocol) { struct aws_io_message *message = aws_channel_acquire_message_from_pool( s2n_handler->slot->channel, AWS_IO_MESSAGE_APPLICATION_DATA, sizeof(struct aws_tls_negotiated_protocol_message)); message->message_tag = AWS_TLS_NEGOTIATED_PROTOCOL_MESSAGE; struct aws_tls_negotiated_protocol_message *protocol_message = (struct aws_tls_negotiated_protocol_message *)message->message_data.buffer; protocol_message->protocol = s2n_handler->protocol; message->message_data.len = sizeof(struct aws_tls_negotiated_protocol_message); if (aws_channel_slot_send_message(s2n_handler->slot, message, AWS_CHANNEL_DIR_READ)) { aws_mem_release(message->allocator, message); aws_channel_shutdown(s2n_handler->slot->channel, aws_last_error()); return AWS_OP_SUCCESS; } } s_on_negotiation_result(handler, s2n_handler->slot, AWS_OP_SUCCESS, s2n_handler->user_data); break; } if (s2n_error_get_type(s2n_error) != S2N_ERR_T_BLOCKED) { AWS_LOGF_WARN( AWS_LS_IO_TLS, "id=%p: negotiation failed with error %s (%s)", (void *)handler, s2n_strerror(s2n_error, "EN"), s2n_strerror_debug(s2n_error, "EN")); if (s2n_error_get_type(s2n_error) == S2N_ERR_T_ALERT) { AWS_LOGF_DEBUG( AWS_LS_IO_TLS, "id=%p: Alert code %d", (void *)handler, s2n_connection_get_alert(s2n_handler->connection)); } const char *err_str = s2n_strerror_debug(s2n_error, NULL); (void)err_str; s2n_handler->state = NEGOTIATION_FAILED; aws_raise_error(AWS_IO_TLS_ERROR_NEGOTIATION_FAILURE); s_on_negotiation_result( handler, s2n_handler->slot, AWS_IO_TLS_ERROR_NEGOTIATION_FAILURE, s2n_handler->user_data); return AWS_OP_ERR; } } while (blocked == S2N_NOT_BLOCKED); return AWS_OP_SUCCESS; } static void s_negotiation_task(struct aws_channel_task *task, void *arg, aws_task_status status) { task->task_fn = NULL; task->arg = NULL; if (status == AWS_TASK_STATUS_RUN_READY) { struct aws_channel_handler *handler = arg; struct s2n_handler *s2n_handler = (struct s2n_handler *)handler->impl; if (s2n_handler->state == NEGOTIATION_ONGOING) { s_drive_negotiation(handler); } } } int aws_tls_client_handler_start_negotiation(struct aws_channel_handler *handler) { struct s2n_handler *s2n_handler = (struct s2n_handler *)handler->impl; AWS_LOGF_TRACE(AWS_LS_IO_TLS, "id=%p: Kicking off TLS negotiation.", (void *)handler); if (aws_channel_thread_is_callers_thread(s2n_handler->slot->channel)) { if (s2n_handler->state == NEGOTIATION_ONGOING) { s_drive_negotiation(handler); } return AWS_OP_SUCCESS; } aws_channel_task_init( &s2n_handler->sequential_tasks, s_negotiation_task, handler, "s2n_channel_handler_negotiation"); aws_channel_schedule_task_now(s2n_handler->slot->channel, &s2n_handler->sequential_tasks); return AWS_OP_SUCCESS; } static int s_s2n_handler_process_read_message( struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_io_message *message) { struct s2n_handler *s2n_handler = handler->impl; if (AWS_UNLIKELY(s2n_handler->state == NEGOTIATION_FAILED)) { return aws_raise_error(AWS_IO_TLS_ERROR_NEGOTIATION_FAILURE); } if (message) { aws_linked_list_push_back(&s2n_handler->input_queue, &message->queueing_handle); if (s2n_handler->state == NEGOTIATION_ONGOING) { size_t message_len = message->message_data.len; if (!s_drive_negotiation(handler)) { aws_channel_slot_increment_read_window(slot, message_len); } else { aws_channel_shutdown(s2n_handler->slot->channel, AWS_IO_TLS_ERROR_NEGOTIATION_FAILURE); } return AWS_OP_SUCCESS; } } s2n_blocked_status blocked = S2N_NOT_BLOCKED; size_t downstream_window = SIZE_MAX; if (slot->adj_right) { downstream_window = aws_channel_slot_downstream_read_window(slot); } size_t processed = 0; AWS_LOGF_TRACE( AWS_LS_IO_TLS, "id=%p: Downstream window %llu", (void *)handler, (unsigned long long)downstream_window); while (processed < downstream_window) { struct aws_io_message *outgoing_read_message = aws_channel_acquire_message_from_pool( slot->channel, AWS_IO_MESSAGE_APPLICATION_DATA, downstream_window - processed); if (!outgoing_read_message) { return AWS_OP_ERR; } ssize_t read = s2n_recv( s2n_handler->connection, outgoing_read_message->message_data.buffer, outgoing_read_message->message_data.capacity, &blocked); AWS_LOGF_TRACE(AWS_LS_IO_TLS, "id=%p: Bytes read %lld", (void *)handler, (long long)read); /* weird race where we received an alert from the peer, but s2n doesn't tell us about it..... * if this happens, it's a graceful shutdown, so kick it off here. * * In other words, s2n, upon graceful shutdown, follows the unix EOF idiom. So just shutdown with * SUCCESS. */ if (read == 0) { AWS_LOGF_DEBUG( AWS_LS_IO_TLS, "id=%p: Alert code %d", (void *)handler, s2n_connection_get_alert(s2n_handler->connection)); aws_mem_release(outgoing_read_message->allocator, outgoing_read_message); aws_channel_shutdown(slot->channel, AWS_OP_SUCCESS); return AWS_OP_SUCCESS; } if (read < 0) { aws_mem_release(outgoing_read_message->allocator, outgoing_read_message); /* the socket blocked so exit from the loop */ if (s2n_error_get_type(s2n_errno) == S2N_ERR_T_BLOCKED) { break; } /* the socket returned a fatal error so shut down */ AWS_LOGF_ERROR( AWS_LS_IO_TLS, "id=%p: S2N failed to read with error: %s (%s)", (void *)handler, s2n_strerror(s2n_errno, "EN"), s2n_strerror_debug(s2n_errno, "EN")); aws_channel_shutdown(slot->channel, AWS_IO_TLS_ERROR_READ_FAILURE); return AWS_OP_SUCCESS; }; /* if read > 0 */ processed += read; outgoing_read_message->message_data.len = (size_t)read; if (s2n_handler->on_data_read) { s2n_handler->on_data_read(handler, slot, &outgoing_read_message->message_data, s2n_handler->user_data); } if (slot->adj_right) { aws_channel_slot_send_message(slot, outgoing_read_message, AWS_CHANNEL_DIR_READ); } else { aws_mem_release(outgoing_read_message->allocator, outgoing_read_message); } } AWS_LOGF_TRACE( AWS_LS_IO_TLS, "id=%p: Remaining window for this event-loop tick: %llu", (void *)handler, (unsigned long long)downstream_window - processed); return AWS_OP_SUCCESS; } static int s_s2n_handler_process_write_message( struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_io_message *message) { (void)slot; struct s2n_handler *s2n_handler = (struct s2n_handler *)handler->impl; if (AWS_UNLIKELY(s2n_handler->state != NEGOTIATION_SUCCEEDED)) { return aws_raise_error(AWS_IO_TLS_ERROR_NOT_NEGOTIATED); } s2n_handler->latest_message_on_completion = message->on_completion; s2n_handler->latest_message_completion_user_data = message->user_data; s2n_blocked_status blocked; ssize_t write_code = s2n_send(s2n_handler->connection, message->message_data.buffer, (ssize_t)message->message_data.len, &blocked); AWS_LOGF_TRACE(AWS_LS_IO_TLS, "id=%p: Bytes written: %llu", (void *)handler, (unsigned long long)write_code); ssize_t message_len = (ssize_t)message->message_data.len; if (write_code < message_len) { return aws_raise_error(AWS_IO_TLS_ERROR_WRITE_FAILURE); } aws_mem_release(message->allocator, message); return AWS_OP_SUCCESS; } static void s_delayed_shutdown_task_fn(struct aws_channel_task *channel_task, void *arg, enum aws_task_status status) { (void)channel_task; struct aws_channel_handler *handler = arg; struct s2n_handler *s2n_handler = handler->impl; if (status == AWS_TASK_STATUS_RUN_READY) { AWS_LOGF_DEBUG(AWS_LS_IO_TLS, "id=%p: Delayed shut down in write direction", (void *)handler); s2n_blocked_status blocked; /* make a best effort, but the channel is going away after this run, so.... you only get one shot anyways */ s2n_shutdown(s2n_handler->connection, &blocked); } aws_channel_slot_on_handler_shutdown_complete( s2n_handler->delayed_shutdown_task.slot, AWS_CHANNEL_DIR_WRITE, s2n_handler->delayed_shutdown_task.error, false); } static enum aws_tls_signature_algorithm s_s2n_to_aws_signature_algorithm(s2n_tls_signature_algorithm s2n_alg) { switch (s2n_alg) { case S2N_TLS_SIGNATURE_RSA: return AWS_TLS_SIGNATURE_RSA; case S2N_TLS_SIGNATURE_ECDSA: return AWS_TLS_SIGNATURE_ECDSA; default: return AWS_TLS_SIGNATURE_UNKNOWN; } } static enum aws_tls_hash_algorithm s_s2n_to_aws_hash_algorithm(s2n_tls_hash_algorithm s2n_alg) { switch (s2n_alg) { case (S2N_TLS_HASH_SHA1): return AWS_TLS_HASH_SHA1; case (S2N_TLS_HASH_SHA224): return AWS_TLS_HASH_SHA224; case (S2N_TLS_HASH_SHA256): return AWS_TLS_HASH_SHA256; case (S2N_TLS_HASH_SHA384): return AWS_TLS_HASH_SHA384; case (S2N_TLS_HASH_SHA512): return AWS_TLS_HASH_SHA512; default: return AWS_TLS_HASH_UNKNOWN; } } static void s_tls_key_operation_destroy(struct aws_tls_key_operation *operation) { if (operation->s2n_op) { s2n_async_pkey_op_free(operation->s2n_op); } if (operation->s2n_handler) { aws_channel_release_hold(operation->s2n_handler->slot->channel); } aws_byte_buf_clean_up(&operation->input_data); aws_mem_release(operation->alloc, operation); } /* This task finishes a private key operation on the event-loop thread. * If the operation was successful, TLS negotiation is resumed. * If the operation failed, the channel is shut down */ static void s_tls_key_operation_completion_task( struct aws_channel_task *channel_task, void *arg, enum aws_task_status status) { (void)channel_task; struct aws_tls_key_operation *operation = arg; struct s2n_handler *s2n_handler = operation->s2n_handler; struct aws_channel_handler *handler = &s2n_handler->handler; /* if things started failing since this task was scheduled, just clean up and bail out */ if (status != AWS_TASK_STATUS_RUN_READY || s2n_handler->state != NEGOTIATION_ONGOING) { goto clean_up; } if (operation->completion_error_code == 0) { if (s2n_async_pkey_op_apply(operation->s2n_op, s2n_handler->connection)) { AWS_LOGF_ERROR(AWS_LS_IO_TLS, "id=%p: Failed applying s2n async pkey op", (void *)handler); operation->completion_error_code = AWS_ERROR_INVALID_STATE; } } if (operation->completion_error_code == 0) { s_drive_negotiation(handler); } else { aws_channel_shutdown(s2n_handler->slot->channel, operation->completion_error_code); } clean_up: s_tls_key_operation_destroy(operation); } /* Common implementation for aws_tls_key_operation_complete() and aws_tls_key_operation_complete_with_error() * This is called exactly once. Schedules a task to actually finish things up on the event-loop thread. */ static void s_tls_key_operation_complete_common( struct aws_tls_key_operation *operation, int error_code, const struct aws_byte_cursor *output) { AWS_ASSERT((error_code != 0) ^ (output != NULL)); /* error_code XOR output must be set */ /* Ensure this can only be called once and exactly once. */ size_t complete_count = aws_atomic_fetch_add(&operation->complete_count, 1); AWS_FATAL_ASSERT(complete_count == 0 && "TLS key operation marked complete multiple times"); struct s2n_handler *s2n_handler = operation->s2n_handler; struct aws_channel_handler *handler = &s2n_handler->handler; if (output != NULL) { /* Immediately pass output through to s2n_op. */ if (s2n_async_pkey_op_set_output(operation->s2n_op, output->ptr, output->len)) { AWS_LOGF_ERROR(AWS_LS_IO_TLS, "id=%p: Failed setting output on s2n async pkey op", (void *)handler); error_code = AWS_ERROR_INVALID_STATE; goto done; } } done: operation->completion_error_code = error_code; /* Schedule a task to finish the operation. * We schedule a task because the user might * have completed the operation asynchronously, * but we need to be on the event-loop thread to * resume TLS negotiation. */ aws_channel_task_init( &operation->completion_task, s_tls_key_operation_completion_task, operation, "tls_key_operation_completion_task"); aws_channel_schedule_task_now(s2n_handler->slot->channel, &operation->completion_task); } void aws_tls_key_operation_complete(struct aws_tls_key_operation *operation, struct aws_byte_cursor output) { if (operation == NULL) { AWS_LOGF_ERROR(AWS_LS_IO_TLS, "Operation complete: operation is null and therefore cannot be set to complete!"); return; } AWS_LOGF_DEBUG( AWS_LS_IO_TLS, "id=%p: TLS key operation complete with %zu bytes of output data", (void *)operation->s2n_handler, output.len); s_tls_key_operation_complete_common(operation, 0, &output); } void aws_tls_key_operation_complete_with_error(struct aws_tls_key_operation *operation, int error_code) { if (operation == NULL) { AWS_LOGF_ERROR( AWS_LS_IO_TLS, "Operation complete with error: operation is null and therefore cannot be set to complete!"); return; } if (error_code == 0) { error_code = AWS_ERROR_UNKNOWN; AWS_LOGF_ERROR( AWS_LS_IO_TLS, "id=%p: TLS key operation completed with error, but no error-code set. Using %s", (void *)operation->s2n_handler, aws_error_name(error_code)); } AWS_LOGF_ERROR( AWS_LS_IO_TLS, "id=%p: TLS key operation complete with error %s", (void *)operation->s2n_handler, aws_error_name(error_code)); s_tls_key_operation_complete_common(operation, error_code, NULL); } static struct aws_tls_key_operation *s_tls_key_operation_new( struct aws_channel_handler *handler, struct s2n_async_pkey_op *s2n_op) { struct s2n_handler *s2n_handler = handler->impl; struct aws_tls_key_operation *operation = aws_mem_calloc(handler->alloc, 1, sizeof(struct aws_tls_key_operation)); operation->alloc = handler->alloc; /* Copy input data */ uint32_t input_size = 0; if (s2n_async_pkey_op_get_input_size(s2n_op, &input_size)) { AWS_LOGF_ERROR(AWS_LS_IO_TLS, "id=%p: Failed querying s2n async pkey op size", (void *)handler); aws_raise_error(AWS_ERROR_INVALID_STATE); goto error; } aws_byte_buf_init(&operation->input_data, operation->alloc, input_size); /* cannot fail */ if (s2n_async_pkey_op_get_input(s2n_op, operation->input_data.buffer, input_size)) { AWS_LOGF_ERROR(AWS_LS_IO_TLS, "id=%p: Failed querying s2n async pkey input", (void *)handler); aws_raise_error(AWS_ERROR_INVALID_STATE); goto error; } operation->input_data.len = input_size; /* Get operation type */ s2n_async_pkey_op_type s2n_op_type = 0; if (s2n_async_pkey_op_get_op_type(s2n_op, &s2n_op_type)) { AWS_LOGF_ERROR(AWS_LS_IO_TLS, "id=%p: Failed querying s2n async pkey op type", (void *)handler); aws_raise_error(AWS_ERROR_INVALID_STATE); goto error; } if (s2n_op_type == S2N_ASYNC_SIGN) { operation->operation_type = AWS_TLS_KEY_OPERATION_SIGN; /* Gather additional information if this is a SIGN operation */ s2n_tls_signature_algorithm s2n_sign_alg = 0; if (s2n_connection_get_selected_client_cert_signature_algorithm(s2n_handler->connection, &s2n_sign_alg)) { AWS_LOGF_ERROR(AWS_LS_IO_TLS, "id=%p: Failed getting s2n client cert signature algorithm", (void *)handler); aws_raise_error(AWS_ERROR_INVALID_STATE); goto error; } operation->signature_algorithm = s_s2n_to_aws_signature_algorithm(s2n_sign_alg); if (operation->signature_algorithm == AWS_TLS_SIGNATURE_UNKNOWN) { AWS_LOGF_ERROR( AWS_LS_IO_TLS, "id=%p: Cannot sign with s2n_tls_signature_algorithm=%d. Algorithm currently unsupported", (void *)handler, s2n_sign_alg); aws_raise_error(AWS_IO_TLS_SIGNATURE_ALGORITHM_UNSUPPORTED); goto error; } s2n_tls_hash_algorithm s2n_digest_alg = 0; if (s2n_connection_get_selected_client_cert_digest_algorithm(s2n_handler->connection, &s2n_digest_alg)) { AWS_LOGF_ERROR(AWS_LS_IO_TLS, "id=%p: Failed getting s2n client cert digest algorithm", (void *)handler); aws_raise_error(AWS_ERROR_INVALID_STATE); goto error; } operation->digest_algorithm = s_s2n_to_aws_hash_algorithm(s2n_digest_alg); if (operation->digest_algorithm == AWS_TLS_HASH_UNKNOWN) { AWS_LOGF_ERROR( AWS_LS_IO_TLS, "id=%p: Cannot sign digest created with s2n_tls_hash_algorithm=%d. Algorithm currently unsupported", (void *)handler, s2n_digest_alg); aws_raise_error(AWS_IO_TLS_DIGEST_ALGORITHM_UNSUPPORTED); goto error; } } else if (s2n_op_type == S2N_ASYNC_DECRYPT) { operation->operation_type = AWS_TLS_KEY_OPERATION_DECRYPT; } else { AWS_LOGF_ERROR(AWS_LS_IO_TLS, "id=%p: Unknown s2n async pkey op type:%d", (void *)handler, (int)s2n_op_type); aws_raise_error(AWS_ERROR_INVALID_STATE); goto error; } /* Keep channel alive until operation completes */ operation->s2n_handler = s2n_handler; aws_channel_acquire_hold(s2n_handler->slot->channel); /* Set this to zero so we can track how many times complete has been called */ aws_atomic_init_int(&operation->complete_count, 0); /* Set this last. We don't want to take ownership of s2n_op until we know setup was 100% successful */ operation->s2n_op = s2n_op; return operation; error: s_tls_key_operation_destroy(operation); return NULL; } struct aws_byte_cursor aws_tls_key_operation_get_input(const struct aws_tls_key_operation *operation) { return aws_byte_cursor_from_buf(&operation->input_data); } enum aws_tls_key_operation_type aws_tls_key_operation_get_type(const struct aws_tls_key_operation *operation) { return operation->operation_type; } enum aws_tls_signature_algorithm aws_tls_key_operation_get_signature_algorithm( const struct aws_tls_key_operation *operation) { return operation->signature_algorithm; } enum aws_tls_hash_algorithm aws_tls_key_operation_get_digest_algorithm(const struct aws_tls_key_operation *operation) { return operation->digest_algorithm; } static int s_s2n_async_pkey_callback(struct s2n_connection *conn, struct s2n_async_pkey_op *s2n_op) { struct s2n_handler *s2n_handler = s2n_connection_get_ctx(conn); struct aws_channel_handler *handler = &s2n_handler->handler; AWS_ASSERT(conn == s2n_handler->connection); (void)conn; AWS_LOGF_TRACE(AWS_LS_IO_TLS, "id=%p: s2n async pkey callback received", (void *)handler); /* Create the AWS wrapper around s2n_async_pkey_op */ struct aws_tls_key_operation *operation = s_tls_key_operation_new(handler, s2n_op); if (operation == NULL) { s2n_async_pkey_op_free(s2n_op); return S2N_FAILURE; } AWS_LOGF_DEBUG( AWS_LS_IO_TLS, "id=%p: Begin TLS key operation. type=%s input_data.len=%zu signature=%s digest=%s", (void *)operation, aws_tls_key_operation_type_str(operation->operation_type), operation->input_data.len, aws_tls_signature_algorithm_str(operation->signature_algorithm), aws_tls_hash_algorithm_str(operation->digest_algorithm)); aws_custom_key_op_handler_perform_operation(s2n_handler->s2n_ctx->custom_key_handler, operation); return S2N_SUCCESS; } static int s_s2n_do_delayed_shutdown( struct aws_channel_handler *handler, struct aws_channel_slot *slot, int error_code) { struct s2n_handler *s2n_handler = (struct s2n_handler *)handler->impl; s2n_handler->delayed_shutdown_task.slot = slot; s2n_handler->delayed_shutdown_task.error = error_code; uint64_t shutdown_delay = s2n_connection_get_delay(s2n_handler->connection); uint64_t now = 0; if (aws_channel_current_clock_time(slot->channel, &now)) { return AWS_OP_ERR; } uint64_t shutdown_time = aws_add_u64_saturating(shutdown_delay, now); aws_channel_schedule_task_future(slot->channel, &s2n_handler->delayed_shutdown_task.task, shutdown_time); return AWS_OP_SUCCESS; } static int s_s2n_handler_shutdown( struct aws_channel_handler *handler, struct aws_channel_slot *slot, enum aws_channel_direction dir, int error_code, bool abort_immediately) { struct s2n_handler *s2n_handler = (struct s2n_handler *)handler->impl; if (dir == AWS_CHANNEL_DIR_WRITE) { if (!abort_immediately && error_code != AWS_IO_SOCKET_CLOSED) { AWS_LOGF_DEBUG(AWS_LS_IO_TLS, "id=%p: Scheduling delayed write direction shutdown", (void *)handler); if (s_s2n_do_delayed_shutdown(handler, slot, error_code) == AWS_OP_SUCCESS) { return AWS_OP_SUCCESS; } } } else { AWS_LOGF_DEBUG( AWS_LS_IO_TLS, "id=%p: Shutting down read direction with error code %d", (void *)handler, error_code); /* If negotiation hasn't succeeded yet, it's certainly not going to succeed now */ if (s2n_handler->state == NEGOTIATION_ONGOING) { s2n_handler->state = NEGOTIATION_FAILED; } while (!aws_linked_list_empty(&s2n_handler->input_queue)) { struct aws_linked_list_node *node = aws_linked_list_pop_front(&s2n_handler->input_queue); struct aws_io_message *message = AWS_CONTAINER_OF(node, struct aws_io_message, queueing_handle); aws_mem_release(message->allocator, message); } } return aws_channel_slot_on_handler_shutdown_complete(slot, dir, error_code, abort_immediately); } static void s_run_read(struct aws_channel_task *task, void *arg, aws_task_status status) { task->task_fn = NULL; task->arg = NULL; if (status == AWS_TASK_STATUS_RUN_READY) { struct aws_channel_handler *handler = (struct aws_channel_handler *)arg; struct s2n_handler *s2n_handler = (struct s2n_handler *)handler->impl; s_s2n_handler_process_read_message(handler, s2n_handler->slot, NULL); } } static int s_s2n_handler_increment_read_window( struct aws_channel_handler *handler, struct aws_channel_slot *slot, size_t size) { (void)size; struct s2n_handler *s2n_handler = handler->impl; size_t downstream_size = aws_channel_slot_downstream_read_window(slot); size_t current_window_size = slot->window_size; AWS_LOGF_TRACE( AWS_LS_IO_TLS, "id=%p: Increment read window message received %llu", (void *)handler, (unsigned long long)size); size_t likely_records_count = (size_t)ceil((double)(downstream_size) / (double)(MAX_RECORD_SIZE)); size_t offset_size = aws_mul_size_saturating(likely_records_count, EST_TLS_RECORD_OVERHEAD); size_t total_desired_size = aws_add_size_saturating(offset_size, downstream_size); if (total_desired_size > current_window_size) { size_t window_update_size = total_desired_size - current_window_size; AWS_LOGF_TRACE( AWS_LS_IO_TLS, "id=%p: Propagating read window increment of size %llu", (void *)handler, (unsigned long long)window_update_size); aws_channel_slot_increment_read_window(slot, window_update_size); } if (s2n_handler->state == NEGOTIATION_SUCCEEDED && !s2n_handler->sequential_tasks.node.next) { /* TLS requires full records before it can decrypt anything. As a result we need to check everything we've * buffered instead of just waiting on a read from the socket, or we'll hit a deadlock. * * We have messages in a queue and they need to be run after the socket has popped (even if it didn't have data * to read). Alternatively, s2n reads entire records at a time, so we'll need to grab whatever we can and we * have no idea what's going on inside there. So we need to attempt another read.*/ aws_channel_task_init( &s2n_handler->sequential_tasks, s_run_read, handler, "s2n_channel_handler_read_on_window_increment"); aws_channel_schedule_task_now(slot->channel, &s2n_handler->sequential_tasks); } return AWS_OP_SUCCESS; } static size_t s_s2n_handler_message_overhead(struct aws_channel_handler *handler) { (void)handler; return EST_TLS_RECORD_OVERHEAD; } static size_t s_s2n_handler_initial_window_size(struct aws_channel_handler *handler) { (void)handler; return EST_HANDSHAKE_SIZE; } static void s_s2n_handler_reset_statistics(struct aws_channel_handler *handler) { struct s2n_handler *s2n_handler = handler->impl; aws_crt_statistics_tls_reset(&s2n_handler->shared_state.stats); } static void s_s2n_handler_gather_statistics(struct aws_channel_handler *handler, struct aws_array_list *stats) { struct s2n_handler *s2n_handler = handler->impl; void *stats_base = &s2n_handler->shared_state.stats; aws_array_list_push_back(stats, &stats_base); } struct aws_byte_buf aws_tls_handler_protocol(struct aws_channel_handler *handler) { struct s2n_handler *s2n_handler = (struct s2n_handler *)handler->impl; return s2n_handler->protocol; } struct aws_byte_buf aws_tls_handler_server_name(struct aws_channel_handler *handler) { struct s2n_handler *s2n_handler = (struct s2n_handler *)handler->impl; return s2n_handler->server_name; } static struct aws_channel_handler_vtable s_handler_vtable = { .destroy = s_s2n_handler_destroy, .process_read_message = s_s2n_handler_process_read_message, .process_write_message = s_s2n_handler_process_write_message, .shutdown = s_s2n_handler_shutdown, .increment_read_window = s_s2n_handler_increment_read_window, .initial_window_size = s_s2n_handler_initial_window_size, .message_overhead = s_s2n_handler_message_overhead, .reset_statistics = s_s2n_handler_reset_statistics, .gather_statistics = s_s2n_handler_gather_statistics, }; static int s_parse_protocol_preferences( struct aws_string *alpn_list_str, const char protocol_output[4][128], size_t *protocol_count) { size_t max_count = *protocol_count; *protocol_count = 0; struct aws_byte_cursor alpn_list_buffer[4]; AWS_ZERO_ARRAY(alpn_list_buffer); struct aws_array_list alpn_list; struct aws_byte_cursor user_alpn_str = aws_byte_cursor_from_string(alpn_list_str); aws_array_list_init_static(&alpn_list, alpn_list_buffer, 4, sizeof(struct aws_byte_cursor)); if (aws_byte_cursor_split_on_char(&user_alpn_str, ';', &alpn_list)) { aws_raise_error(AWS_IO_TLS_CTX_ERROR); return AWS_OP_ERR; } size_t protocols_list_len = aws_array_list_length(&alpn_list); if (protocols_list_len < 1) { aws_raise_error(AWS_IO_TLS_CTX_ERROR); return AWS_OP_ERR; } for (size_t i = 0; i < protocols_list_len && i < max_count; ++i) { struct aws_byte_cursor cursor; AWS_ZERO_STRUCT(cursor); if (aws_array_list_get_at(&alpn_list, (void *)&cursor, (size_t)i)) { aws_raise_error(AWS_IO_TLS_CTX_ERROR); return AWS_OP_ERR; } AWS_FATAL_ASSERT(cursor.ptr && cursor.len > 0); memcpy((void *)protocol_output[i], cursor.ptr, cursor.len); *protocol_count += 1; } return AWS_OP_SUCCESS; } static size_t s_tl_cleanup_key = 0; /* Address of variable serves as key in hash table */ /* * This local object is added to the table of every event loop that has a (s2n) tls connection * added to it at some point in time */ static struct aws_event_loop_local_object s_tl_cleanup_object = { .key = &s_tl_cleanup_key, .object = NULL, .on_object_removed = NULL, }; static void s_aws_cleanup_s2n_thread_local_state(void *user_data) { (void)user_data; s2n_cleanup(); } /* s2n allocates thread-local data structures. We need to clean these up when the event loop's thread exits. */ static int s_s2n_tls_channel_handler_schedule_thread_local_cleanup(struct aws_channel_slot *slot) { struct aws_channel *channel = slot->channel; struct aws_event_loop_local_object existing_marker; AWS_ZERO_STRUCT(existing_marker); /* * Check whether another s2n_tls_channel_handler has already scheduled the cleanup task. */ if (aws_channel_fetch_local_object(channel, &s_tl_cleanup_key, &existing_marker)) { /* Doesn't exist in event loop table: add it and add the at-exit cleanup callback */ if (aws_channel_put_local_object(channel, &s_tl_cleanup_key, &s_tl_cleanup_object)) { return AWS_OP_ERR; } aws_thread_current_at_exit(s_aws_cleanup_s2n_thread_local_state, NULL); } return AWS_OP_SUCCESS; } static struct aws_channel_handler *s_new_tls_handler( struct aws_allocator *allocator, struct aws_tls_connection_options *options, struct aws_channel_slot *slot, s2n_mode mode) { AWS_ASSERT(options->ctx); struct s2n_handler *s2n_handler = aws_mem_calloc(allocator, 1, sizeof(struct s2n_handler)); s2n_handler->handler.impl = s2n_handler; s2n_handler->handler.alloc = allocator; s2n_handler->handler.vtable = &s_handler_vtable; s2n_handler->handler.slot = slot; aws_tls_ctx_acquire(options->ctx); s2n_handler->s2n_ctx = options->ctx->impl; s2n_handler->connection = s2n_connection_new(mode); if (!s2n_handler->connection) { goto cleanup_conn; } aws_tls_channel_handler_shared_init(&s2n_handler->shared_state, &s2n_handler->handler, options); s2n_handler->user_data = options->user_data; s2n_handler->on_data_read = options->on_data_read; s2n_handler->on_error = options->on_error; s2n_handler->on_negotiation_result = options->on_negotiation_result; s2n_handler->advertise_alpn_message = options->advertise_alpn_message; s2n_handler->latest_message_completion_user_data = NULL; s2n_handler->latest_message_on_completion = NULL; s2n_handler->slot = slot; aws_linked_list_init(&s2n_handler->input_queue); s2n_handler->protocol = aws_byte_buf_from_array(NULL, 0); if (options->server_name) { if (s2n_set_server_name(s2n_handler->connection, aws_string_c_str(options->server_name))) { aws_raise_error(AWS_IO_TLS_CTX_ERROR); goto cleanup_conn; } } s2n_handler->state = NEGOTIATION_ONGOING; s2n_connection_set_recv_cb(s2n_handler->connection, s_s2n_handler_recv); s2n_connection_set_recv_ctx(s2n_handler->connection, s2n_handler); s2n_connection_set_send_cb(s2n_handler->connection, s_s2n_handler_send); s2n_connection_set_send_ctx(s2n_handler->connection, s2n_handler); s2n_connection_set_ctx(s2n_handler->connection, s2n_handler); s2n_connection_set_blinding(s2n_handler->connection, S2N_SELF_SERVICE_BLINDING); if (options->alpn_list) { AWS_LOGF_DEBUG( AWS_LS_IO_TLS, "id=%p: Setting ALPN list %s", (void *)&s2n_handler->handler, aws_string_c_str(options->alpn_list)); const char protocols_cpy[4][128]; AWS_ZERO_ARRAY(protocols_cpy); size_t protocols_size = 4; if (s_parse_protocol_preferences(options->alpn_list, protocols_cpy, &protocols_size)) { aws_raise_error(AWS_IO_TLS_CTX_ERROR); goto cleanup_conn; } const char *protocols[4]; AWS_ZERO_ARRAY(protocols); for (size_t i = 0; i < protocols_size; ++i) { protocols[i] = protocols_cpy[i]; } if (s2n_connection_set_protocol_preferences( s2n_handler->connection, (const char *const *)protocols, (int)protocols_size)) { aws_raise_error(AWS_IO_TLS_CTX_ERROR); goto cleanup_conn; } } if (s2n_connection_set_config(s2n_handler->connection, s2n_handler->s2n_ctx->s2n_config)) { AWS_LOGF_WARN( AWS_LS_IO_TLS, "id=%p: configuration error %s (%s)", (void *)&s2n_handler->handler, s2n_strerror(s2n_errno, "EN"), s2n_strerror_debug(s2n_errno, "EN")); aws_raise_error(AWS_IO_TLS_CTX_ERROR); goto cleanup_conn; } aws_channel_task_init( &s2n_handler->delayed_shutdown_task.task, s_delayed_shutdown_task_fn, &s2n_handler->handler, "s2n_delayed_shutdown"); if (s_s2n_tls_channel_handler_schedule_thread_local_cleanup(slot)) { goto cleanup_conn; } return &s2n_handler->handler; cleanup_conn: s_s2n_handler_destroy(&s2n_handler->handler); return NULL; } struct aws_channel_handler *aws_tls_client_handler_new( struct aws_allocator *allocator, struct aws_tls_connection_options *options, struct aws_channel_slot *slot) { return s_new_tls_handler(allocator, options, slot, S2N_CLIENT); } struct aws_channel_handler *aws_tls_server_handler_new( struct aws_allocator *allocator, struct aws_tls_connection_options *options, struct aws_channel_slot *slot) { return s_new_tls_handler(allocator, options, slot, S2N_SERVER); } static void s_s2n_ctx_destroy(struct s2n_ctx *s2n_ctx) { if (s2n_ctx != NULL) { if (s2n_ctx->s2n_config) { s2n_config_free(s2n_ctx->s2n_config); } if (s2n_ctx->custom_cert_chain_and_key) { s2n_cert_chain_and_key_free(s2n_ctx->custom_cert_chain_and_key); } s2n_ctx->custom_key_handler = aws_custom_key_op_handler_release(s2n_ctx->custom_key_handler); aws_mem_release(s2n_ctx->ctx.alloc, s2n_ctx); } } static int s2n_wall_clock_time_nanoseconds(void *context, uint64_t *time_in_ns) { (void)context; if (aws_sys_clock_get_ticks(time_in_ns)) { *time_in_ns = 0; return -1; } return 0; } static int s2n_monotonic_clock_time_nanoseconds(void *context, uint64_t *time_in_ns) { (void)context; if (aws_high_res_clock_get_ticks(time_in_ns)) { *time_in_ns = 0; return -1; } return 0; } static void s_log_and_raise_s2n_errno(const char *msg) { AWS_LOGF_ERROR( AWS_LS_IO_TLS, "%s: %s (%s)", msg, s2n_strerror(s2n_errno, "EN"), s2n_strerror_debug(s2n_errno, "EN")); aws_raise_error(AWS_IO_TLS_CTX_ERROR); } static struct aws_tls_ctx *s_tls_ctx_new( struct aws_allocator *alloc, const struct aws_tls_ctx_options *options, s2n_mode mode) { struct s2n_ctx *s2n_ctx = aws_mem_calloc(alloc, 1, sizeof(struct s2n_ctx)); if (!s2n_ctx) { return NULL; } if (!aws_tls_is_cipher_pref_supported(options->cipher_pref)) { aws_raise_error(AWS_IO_TLS_CIPHER_PREF_UNSUPPORTED); AWS_LOGF_ERROR(AWS_LS_IO_TLS, "static: TLS Cipher Preference is not supported: %d.", options->cipher_pref); return NULL; } s2n_ctx->ctx.alloc = alloc; s2n_ctx->ctx.impl = s2n_ctx; aws_ref_count_init(&s2n_ctx->ctx.ref_count, s2n_ctx, (aws_simple_completion_callback *)s_s2n_ctx_destroy); s2n_ctx->s2n_config = s2n_config_new(); if (!s2n_ctx->s2n_config) { s_log_and_raise_s2n_errno("ctx: creation failed"); goto cleanup_s2n_config; } int set_clock_result = s2n_config_set_wall_clock(s2n_ctx->s2n_config, s2n_wall_clock_time_nanoseconds, NULL); if (set_clock_result != S2N_ERR_T_OK) { s_log_and_raise_s2n_errno("ctx: failed to set wall clock"); goto cleanup_s2n_config; } set_clock_result = s2n_config_set_monotonic_clock(s2n_ctx->s2n_config, s2n_monotonic_clock_time_nanoseconds, NULL); if (set_clock_result != S2N_ERR_T_OK) { s_log_and_raise_s2n_errno("ctx: failed to set monotonic clock"); goto cleanup_s2n_config; } const char *security_policy = NULL; if (options->custom_key_op_handler != NULL) { /* When custom_key_op_handler is set, don't use security policy that allow TLS 1.3. * This hack is necessary until our PKCS#11 custom_key_op_handler supports RSA PSS */ switch (options->minimum_tls_version) { case AWS_IO_SSLv3: security_policy = "CloudFront-SSL-v-3"; break; case AWS_IO_TLSv1: security_policy = "CloudFront-TLS-1-0-2014"; break; case AWS_IO_TLSv1_1: security_policy = "ELBSecurityPolicy-TLS-1-1-2017-01"; break; case AWS_IO_TLSv1_2: security_policy = "ELBSecurityPolicy-TLS-1-2-Ext-2018-06"; break; case AWS_IO_TLSv1_3: AWS_LOGF_ERROR(AWS_LS_IO_TLS, "TLS 1.3 with PKCS#11 is not supported yet."); aws_raise_error(AWS_IO_TLS_VERSION_UNSUPPORTED); goto cleanup_s2n_config; case AWS_IO_TLS_VER_SYS_DEFAULTS: default: security_policy = "ELBSecurityPolicy-TLS-1-1-2017-01"; } } else { /* No custom_key_op_handler is set, use normal security policies */ switch (options->minimum_tls_version) { case AWS_IO_SSLv3: security_policy = "AWS-CRT-SDK-SSLv3.0-2023"; break; case AWS_IO_TLSv1: security_policy = "AWS-CRT-SDK-TLSv1.0-2023"; break; case AWS_IO_TLSv1_1: security_policy = "AWS-CRT-SDK-TLSv1.1-2023"; break; case AWS_IO_TLSv1_2: security_policy = "AWS-CRT-SDK-TLSv1.2-2023"; break; case AWS_IO_TLSv1_3: security_policy = "AWS-CRT-SDK-TLSv1.3-2023"; break; case AWS_IO_TLS_VER_SYS_DEFAULTS: default: security_policy = "AWS-CRT-SDK-TLSv1.0-2023"; } } switch (options->cipher_pref) { case AWS_IO_TLS_CIPHER_PREF_SYSTEM_DEFAULT: /* No-Op, if the user configured a minimum_tls_version then a version-specific Cipher Preference was set */ break; case AWS_IO_TLS_CIPHER_PREF_PQ_TLSv1_0_2021_05: security_policy = "PQ-TLS-1-0-2021-05-26"; break; default: AWS_LOGF_ERROR(AWS_LS_IO_TLS, "Unrecognized TLS Cipher Preference: %d", options->cipher_pref); aws_raise_error(AWS_IO_TLS_CIPHER_PREF_UNSUPPORTED); goto cleanup_s2n_config; } AWS_ASSERT(security_policy != NULL); if (s2n_config_set_cipher_preferences(s2n_ctx->s2n_config, security_policy)) { AWS_LOGF_ERROR( AWS_LS_IO_TLS, "ctx: Failed setting security policy '%s' (newer S2N required?): %s (%s)", security_policy, s2n_strerror(s2n_errno, "EN"), s2n_strerror_debug(s2n_errno, "EN")); aws_raise_error(AWS_IO_TLS_CTX_ERROR); goto cleanup_s2n_config; } if (aws_tls_options_buf_is_set(&options->certificate) && aws_tls_options_buf_is_set(&options->private_key)) { AWS_LOGF_DEBUG(AWS_LS_IO_TLS, "ctx: Certificate and key have been set, setting them up now."); if (!aws_text_is_utf8(options->certificate.buffer, options->certificate.len)) { AWS_LOGF_ERROR(AWS_LS_IO_TLS, "static: failed to import certificate, must be ASCII/UTF-8 encoded"); aws_raise_error(AWS_IO_FILE_VALIDATION_FAILURE); goto cleanup_s2n_config; } if (!aws_text_is_utf8(options->private_key.buffer, options->private_key.len)) { AWS_LOGF_ERROR(AWS_LS_IO_TLS, "static: failed to import private key, must be ASCII/UTF-8 encoded"); aws_raise_error(AWS_IO_FILE_VALIDATION_FAILURE); goto cleanup_s2n_config; } /* Ensure that what we pass to s2n is zero-terminated */ struct aws_string *certificate_string = aws_string_new_from_buf(alloc, &options->certificate); struct aws_string *private_key_string = aws_string_new_from_buf(alloc, &options->private_key); int err_code = s2n_config_add_cert_chain_and_key( s2n_ctx->s2n_config, (const char *)certificate_string->bytes, (const char *)private_key_string->bytes); aws_string_destroy(certificate_string); aws_string_destroy_secure(private_key_string); if (mode == S2N_CLIENT) { s2n_config_set_client_auth_type(s2n_ctx->s2n_config, S2N_CERT_AUTH_REQUIRED); } if (err_code != S2N_ERR_T_OK) { s_log_and_raise_s2n_errno("ctx: Failed to add certificate and private key"); goto cleanup_s2n_config; } } else if (options->custom_key_op_handler != NULL) { s2n_ctx->custom_key_handler = aws_custom_key_op_handler_acquire(options->custom_key_op_handler); /* set callback so that we can do custom private key operations */ if (s2n_config_set_async_pkey_callback(s2n_ctx->s2n_config, s_s2n_async_pkey_callback)) { s_log_and_raise_s2n_errno("ctx: failed to set private key callback"); goto cleanup_s2n_config; } /* set certificate. * we need to create a custom s2n_cert_chain_and_key that knows the cert but not the key */ s2n_ctx->custom_cert_chain_and_key = s2n_cert_chain_and_key_new(); if (!s2n_ctx->custom_cert_chain_and_key) { s_log_and_raise_s2n_errno("ctx: creation failed"); goto cleanup_s2n_config; } if (s2n_cert_chain_and_key_load_public_pem_bytes( s2n_ctx->custom_cert_chain_and_key, options->certificate.buffer, options->certificate.len)) { s_log_and_raise_s2n_errno("ctx: failed to load certificate"); goto cleanup_s2n_config; } if (s2n_config_add_cert_chain_and_key_to_store(s2n_ctx->s2n_config, s2n_ctx->custom_cert_chain_and_key)) { s_log_and_raise_s2n_errno("ctx: failed to add certificate to store"); goto cleanup_s2n_config; } if (mode == S2N_CLIENT) { s2n_config_set_client_auth_type(s2n_ctx->s2n_config, S2N_CERT_AUTH_REQUIRED); } } if (options->verify_peer) { if (s2n_config_set_check_stapled_ocsp_response(s2n_ctx->s2n_config, 1) == S2N_SUCCESS) { if (s2n_config_set_status_request_type(s2n_ctx->s2n_config, S2N_STATUS_REQUEST_OCSP) != S2N_SUCCESS) { s_log_and_raise_s2n_errno("ctx: ocsp status request cannot be set"); goto cleanup_s2n_config; } } else { if (s2n_error_get_type(s2n_errno) == S2N_ERR_T_USAGE) { AWS_LOGF_INFO(AWS_LS_IO_TLS, "ctx: cannot enable ocsp stapling: %s", s2n_strerror(s2n_errno, "EN")); } else { s_log_and_raise_s2n_errno("ctx: cannot enable ocsp stapling"); goto cleanup_s2n_config; } } if (options->ca_path || aws_tls_options_buf_is_set(&options->ca_file)) { /* The user called an override_default_trust_store() function. * Begin by wiping anything that s2n loaded by default */ if (s2n_config_wipe_trust_store(s2n_ctx->s2n_config)) { s_log_and_raise_s2n_errno("ctx: failed to wipe default trust store"); goto cleanup_s2n_config; } if (options->ca_path) { if (s2n_config_set_verification_ca_location( s2n_ctx->s2n_config, NULL, aws_string_c_str(options->ca_path))) { s_log_and_raise_s2n_errno("ctx: configuration error"); AWS_LOGF_ERROR(AWS_LS_IO_TLS, "Failed to set ca_path %s\n", aws_string_c_str(options->ca_path)); goto cleanup_s2n_config; } } if (aws_tls_options_buf_is_set(&options->ca_file)) { /* Ensure that what we pass to s2n is zero-terminated */ struct aws_string *ca_file_string = aws_string_new_from_buf(alloc, &options->ca_file); int set_ca_result = s2n_config_add_pem_to_trust_store(s2n_ctx->s2n_config, (const char *)ca_file_string->bytes); aws_string_destroy(ca_file_string); if (set_ca_result) { s_log_and_raise_s2n_errno("ctx: configuration error"); AWS_LOGF_ERROR(AWS_LS_IO_TLS, "Failed to set ca_file %s\n", (const char *)options->ca_file.buffer); goto cleanup_s2n_config; } } } else if (s_default_ca_file || s_default_ca_dir) { /* User wants to use the system's default trust store. * * Note that s2n's trust store always starts with libcrypto's default locations. * These paths are configured when libcrypto is built (--openssldir), * but might not be right for the current machine (e.g. if libcrypto * is statically linked into an application that is distributed * to multiple flavors of Linux). Therefore, load the locations that * were found at library startup. */ if (s2n_config_set_verification_ca_location(s2n_ctx->s2n_config, s_default_ca_file, s_default_ca_dir)) { s_log_and_raise_s2n_errno("ctx: configuration error"); AWS_LOGF_ERROR( AWS_LS_IO_TLS, "Failed to set ca_path: %s and ca_file %s\n", s_default_ca_dir, s_default_ca_file); goto cleanup_s2n_config; } } else { /* Cannot find system's trust store */ aws_raise_error(AWS_IO_TLS_ERROR_DEFAULT_TRUST_STORE_NOT_FOUND); AWS_LOGF_ERROR( AWS_LS_IO_TLS, "Default TLS trust store not found on this system." " Install CA certificates, or \"override default trust store\"."); goto cleanup_s2n_config; } if (mode == S2N_SERVER && s2n_config_set_client_auth_type(s2n_ctx->s2n_config, S2N_CERT_AUTH_REQUIRED)) { s_log_and_raise_s2n_errno("ctx: failed to set client auth type"); goto cleanup_s2n_config; } } else if (mode != S2N_SERVER) { AWS_LOGF_WARN( AWS_LS_IO_TLS, "ctx: X.509 validation has been disabled. " "If this is not running in a test environment, this is likely a security vulnerability."); if (s2n_config_disable_x509_verification(s2n_ctx->s2n_config)) { s_log_and_raise_s2n_errno("ctx: failed to disable x509 verification"); goto cleanup_s2n_config; } } if (options->alpn_list) { AWS_LOGF_DEBUG(AWS_LS_IO_TLS, "ctx: Setting ALPN list %s", aws_string_c_str(options->alpn_list)); const char protocols_cpy[4][128]; AWS_ZERO_ARRAY(protocols_cpy); size_t protocols_size = 4; if (s_parse_protocol_preferences(options->alpn_list, protocols_cpy, &protocols_size)) { s_log_and_raise_s2n_errno("ctx: Failed to parse ALPN list"); goto cleanup_s2n_config; } const char *protocols[4]; AWS_ZERO_ARRAY(protocols); for (size_t i = 0; i < protocols_size; ++i) { protocols[i] = protocols_cpy[i]; } if (s2n_config_set_protocol_preferences(s2n_ctx->s2n_config, protocols, (int)protocols_size)) { s_log_and_raise_s2n_errno("ctx: Failed to set protocol preferences"); goto cleanup_s2n_config; } } if (options->max_fragment_size == 512) { s2n_config_send_max_fragment_length(s2n_ctx->s2n_config, S2N_TLS_MAX_FRAG_LEN_512); } else if (options->max_fragment_size == 1024) { s2n_config_send_max_fragment_length(s2n_ctx->s2n_config, S2N_TLS_MAX_FRAG_LEN_1024); } else if (options->max_fragment_size == 2048) { s2n_config_send_max_fragment_length(s2n_ctx->s2n_config, S2N_TLS_MAX_FRAG_LEN_2048); } else if (options->max_fragment_size == 4096) { s2n_config_send_max_fragment_length(s2n_ctx->s2n_config, S2N_TLS_MAX_FRAG_LEN_4096); } return &s2n_ctx->ctx; cleanup_s2n_config: s_s2n_ctx_destroy(s2n_ctx); return NULL; } struct aws_tls_ctx *aws_tls_server_ctx_new(struct aws_allocator *alloc, const struct aws_tls_ctx_options *options) { aws_io_fatal_assert_library_initialized(); return s_tls_ctx_new(alloc, options, S2N_SERVER); } struct aws_tls_ctx *aws_tls_client_ctx_new(struct aws_allocator *alloc, const struct aws_tls_ctx_options *options) { aws_io_fatal_assert_library_initialized(); return s_tls_ctx_new(alloc, options, S2N_CLIENT); } aws-crt-python-0.20.4+dfsg/crt/aws-c-io/source/socket_channel_handler.c000066400000000000000000000365631456575232400257750ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #ifdef _MSC_VER # pragma warning(disable : 4204) /* non-constant aggregate initializer */ #endif struct socket_handler { struct aws_socket *socket; struct aws_channel_slot *slot; size_t max_rw_size; struct aws_channel_task read_task_storage; struct aws_channel_task shutdown_task_storage; struct aws_crt_statistics_socket stats; int shutdown_err_code; bool shutdown_in_progress; }; static int s_socket_process_read_message( struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_io_message *message) { (void)handler; (void)slot; (void)message; AWS_LOGF_FATAL( AWS_LS_IO_SOCKET_HANDLER, "id=%p: process_read_message called on " "socket handler. This should never happen", (void *)handler); /*since a socket handler will ALWAYS be the first handler in a channel, * this should NEVER happen, if it does it's a programmer error.*/ AWS_ASSERT(0); return aws_raise_error(AWS_IO_CHANNEL_ERROR_ERROR_CANT_ACCEPT_INPUT); } /* invoked by the socket when a write has completed or failed. */ static void s_on_socket_write_complete( struct aws_socket *socket, int error_code, size_t amount_written, void *user_data) { if (user_data) { struct aws_io_message *message = user_data; struct aws_channel *channel = message->owning_channel; AWS_LOGF_TRACE( AWS_LS_IO_SOCKET_HANDLER, "static: write of size %llu, completed on channel %p", (unsigned long long)amount_written, (void *)channel); if (message->on_completion) { message->on_completion(channel, message, error_code, message->user_data); } if (socket && socket->handler) { struct socket_handler *socket_handler = socket->handler->impl; socket_handler->stats.bytes_written += amount_written; } aws_mem_release(message->allocator, message); if (error_code) { aws_channel_shutdown(channel, error_code); } } } static int s_socket_process_write_message( struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_io_message *message) { (void)slot; struct socket_handler *socket_handler = handler->impl; AWS_LOGF_TRACE( AWS_LS_IO_SOCKET_HANDLER, "id=%p: writing message of size %llu", (void *)handler, (unsigned long long)message->message_data.len); if (!aws_socket_is_open(socket_handler->socket)) { return aws_raise_error(AWS_IO_SOCKET_CLOSED); } struct aws_byte_cursor cursor = aws_byte_cursor_from_buf(&message->message_data); if (aws_socket_write(socket_handler->socket, &cursor, s_on_socket_write_complete, message)) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } static void s_read_task(struct aws_channel_task *task, void *arg, aws_task_status status); static void s_on_readable_notification(struct aws_socket *socket, int error_code, void *user_data); /* Ok this next function is VERY important for how back pressure works. Here's what it's supposed to be doing: * * See how much data downstream is willing to accept. * See how much we're actually willing to read per event loop tick (usually 16 kb). * Take the minimum of those two. * Try and read as much as possible up to the calculated max read. * If we didn't read up to the max_read, we go back to waiting on the event loop to tell us we can read more. * If we did read up to the max_read, we stop reading immediately and wait for either for a window update, * or schedule a task to enforce fairness for other sockets in the event loop if we read up to the max * read per event loop tick. */ static void s_do_read(struct socket_handler *socket_handler) { size_t downstream_window = aws_channel_slot_downstream_read_window(socket_handler->slot); size_t max_to_read = downstream_window > socket_handler->max_rw_size ? socket_handler->max_rw_size : downstream_window; AWS_LOGF_TRACE( AWS_LS_IO_SOCKET_HANDLER, "id=%p: invoking read. Downstream window %llu, max_to_read %llu", (void *)socket_handler->slot->handler, (unsigned long long)downstream_window, (unsigned long long)max_to_read); if (max_to_read == 0) { return; } size_t total_read = 0; size_t read = 0; while (total_read < max_to_read && !socket_handler->shutdown_in_progress) { size_t iter_max_read = max_to_read - total_read; struct aws_io_message *message = aws_channel_acquire_message_from_pool( socket_handler->slot->channel, AWS_IO_MESSAGE_APPLICATION_DATA, iter_max_read); if (!message) { break; } if (aws_socket_read(socket_handler->socket, &message->message_data, &read)) { aws_mem_release(message->allocator, message); break; } total_read += read; AWS_LOGF_TRACE( AWS_LS_IO_SOCKET_HANDLER, "id=%p: read %llu from socket", (void *)socket_handler->slot->handler, (unsigned long long)read); if (aws_channel_slot_send_message(socket_handler->slot, message, AWS_CHANNEL_DIR_READ)) { aws_mem_release(message->allocator, message); break; } } AWS_LOGF_TRACE( AWS_LS_IO_SOCKET_HANDLER, "id=%p: total read on this tick %llu", (void *)&socket_handler->slot->handler, (unsigned long long)total_read); socket_handler->stats.bytes_read += total_read; /* resubscribe as long as there's no error, just return if we're in a would block scenario. */ if (total_read < max_to_read) { int last_error = aws_last_error(); if (last_error != AWS_IO_READ_WOULD_BLOCK && !socket_handler->shutdown_in_progress) { aws_channel_shutdown(socket_handler->slot->channel, last_error); } AWS_LOGF_TRACE( AWS_LS_IO_SOCKET_HANDLER, "id=%p: out of data to read on socket. " "Waiting on event-loop notification.", (void *)socket_handler->slot->handler); return; } /* in this case, everything was fine, but there's still pending reads. We need to schedule a task to do the read * again. */ if (!socket_handler->shutdown_in_progress && total_read == socket_handler->max_rw_size && !socket_handler->read_task_storage.task_fn) { AWS_LOGF_TRACE( AWS_LS_IO_SOCKET_HANDLER, "id=%p: more data is pending read, but we've exceeded " "the max read on this tick. Scheduling a task to read on next tick.", (void *)socket_handler->slot->handler); aws_channel_task_init( &socket_handler->read_task_storage, s_read_task, socket_handler, "socket_handler_re_read"); aws_channel_schedule_task_now(socket_handler->slot->channel, &socket_handler->read_task_storage); } } /* the socket is either readable or errored out. If it's readable, kick off s_do_read() to do its thing. * If an error, start the channel shutdown process. */ static void s_on_readable_notification(struct aws_socket *socket, int error_code, void *user_data) { (void)socket; struct socket_handler *socket_handler = user_data; AWS_LOGF_TRACE(AWS_LS_IO_SOCKET_HANDLER, "id=%p: socket is now readable", (void *)socket_handler->slot->handler); /* read regardless so we can pick up data that was sent prior to the close. For example, peer sends a TLS ALERT * then immediately closes the socket. On some platforms, we'll never see the readable flag. So we want to make * sure we read the ALERT, otherwise, we'll end up telling the user that the channel shutdown because of a socket * closure, when in reality it was a TLS error */ s_do_read(socket_handler); if (error_code && !socket_handler->shutdown_in_progress) { aws_channel_shutdown(socket_handler->slot->channel, error_code); } } /* Either the result of a context switch (for fairness in the event loop), or a window update. */ static void s_read_task(struct aws_channel_task *task, void *arg, aws_task_status status) { task->task_fn = NULL; task->arg = NULL; if (status == AWS_TASK_STATUS_RUN_READY) { struct socket_handler *socket_handler = arg; s_do_read(socket_handler); } } static int s_socket_increment_read_window( struct aws_channel_handler *handler, struct aws_channel_slot *slot, size_t size) { (void)size; struct socket_handler *socket_handler = handler->impl; if (!socket_handler->shutdown_in_progress && !socket_handler->read_task_storage.task_fn) { AWS_LOGF_TRACE( AWS_LS_IO_SOCKET_HANDLER, "id=%p: increment read window message received, scheduling" " task for another read operation.", (void *)handler); aws_channel_task_init( &socket_handler->read_task_storage, s_read_task, socket_handler, "socket_handler_read_on_window_increment"); aws_channel_schedule_task_now(slot->channel, &socket_handler->read_task_storage); } return AWS_OP_SUCCESS; } static void s_close_task(struct aws_channel_task *task, void *arg, aws_task_status status) { (void)task; (void)status; struct aws_channel_handler *handler = arg; struct socket_handler *socket_handler = handler->impl; /* * Run this unconditionally regardless of status, otherwise channel will not * finish shutting down properly */ /* this only happens in write direction. */ /* we also don't care about the free_scarce_resource_immediately * code since we're always the last one in the shutdown sequence. */ aws_channel_slot_on_handler_shutdown_complete( socket_handler->slot, AWS_CHANNEL_DIR_WRITE, socket_handler->shutdown_err_code, false); } static int s_socket_shutdown( struct aws_channel_handler *handler, struct aws_channel_slot *slot, enum aws_channel_direction dir, int error_code, bool free_scarce_resource_immediately) { struct socket_handler *socket_handler = (struct socket_handler *)handler->impl; socket_handler->shutdown_in_progress = true; if (dir == AWS_CHANNEL_DIR_READ) { AWS_LOGF_TRACE( AWS_LS_IO_SOCKET_HANDLER, "id=%p: shutting down read direction with error_code %d", (void *)handler, error_code); if (free_scarce_resource_immediately && aws_socket_is_open(socket_handler->socket)) { if (aws_socket_close(socket_handler->socket)) { return AWS_OP_ERR; } } return aws_channel_slot_on_handler_shutdown_complete(slot, dir, error_code, free_scarce_resource_immediately); } AWS_LOGF_TRACE( AWS_LS_IO_SOCKET_HANDLER, "id=%p: shutting down write direction with error_code %d", (void *)handler, error_code); if (aws_socket_is_open(socket_handler->socket)) { aws_socket_close(socket_handler->socket); } /* Schedule a task to complete the shutdown, in case a do_read task is currently pending. * It's OK to delay the shutdown, even when free_scarce_resources_immediately is true, * because the socket has been closed: mitigating the risk that the socket is still being abused by * a hostile peer. */ aws_channel_task_init(&socket_handler->shutdown_task_storage, s_close_task, handler, "socket_handler_close"); socket_handler->shutdown_err_code = error_code; aws_channel_schedule_task_now(slot->channel, &socket_handler->shutdown_task_storage); return AWS_OP_SUCCESS; } static size_t s_message_overhead(struct aws_channel_handler *handler) { (void)handler; return 0; } static size_t s_socket_initial_window_size(struct aws_channel_handler *handler) { (void)handler; return SIZE_MAX; } static void s_socket_destroy(struct aws_channel_handler *handler) { if (handler != NULL) { struct socket_handler *socket_handler = (struct socket_handler *)handler->impl; if (socket_handler != NULL) { aws_crt_statistics_socket_cleanup(&socket_handler->stats); } aws_mem_release(handler->alloc, handler); } } static void s_reset_statistics(struct aws_channel_handler *handler) { struct socket_handler *socket_handler = (struct socket_handler *)handler->impl; aws_crt_statistics_socket_reset(&socket_handler->stats); } static void s_gather_statistics(struct aws_channel_handler *handler, struct aws_array_list *stats_list) { struct socket_handler *socket_handler = (struct socket_handler *)handler->impl; void *stats_base = &socket_handler->stats; aws_array_list_push_back(stats_list, &stats_base); } static void s_trigger_read(struct aws_channel_handler *handler) { struct socket_handler *socket_handler = (struct socket_handler *)handler->impl; s_do_read(socket_handler); } static struct aws_channel_handler_vtable s_vtable = { .process_read_message = s_socket_process_read_message, .destroy = s_socket_destroy, .process_write_message = s_socket_process_write_message, .initial_window_size = s_socket_initial_window_size, .increment_read_window = s_socket_increment_read_window, .shutdown = s_socket_shutdown, .message_overhead = s_message_overhead, .reset_statistics = s_reset_statistics, .gather_statistics = s_gather_statistics, .trigger_read = s_trigger_read, }; struct aws_channel_handler *aws_socket_handler_new( struct aws_allocator *allocator, struct aws_socket *socket, struct aws_channel_slot *slot, size_t max_read_size) { /* make sure something has assigned this socket to an event loop, in client mode this will already have occurred. In server mode, someone should have assigned it before calling us.*/ AWS_ASSERT(aws_socket_get_event_loop(socket)); struct aws_channel_handler *handler = NULL; struct socket_handler *impl = NULL; if (!aws_mem_acquire_many( allocator, 2, &handler, sizeof(struct aws_channel_handler), &impl, sizeof(struct socket_handler))) { return NULL; } impl->socket = socket; impl->slot = slot; impl->max_rw_size = max_read_size; AWS_ZERO_STRUCT(impl->read_task_storage); AWS_ZERO_STRUCT(impl->shutdown_task_storage); impl->shutdown_in_progress = false; if (aws_crt_statistics_socket_init(&impl->stats)) { goto cleanup_handler; } AWS_LOGF_DEBUG( AWS_LS_IO_SOCKET_HANDLER, "id=%p: Socket handler created with max_read_size of %llu", (void *)handler, (unsigned long long)max_read_size); handler->alloc = allocator; handler->impl = impl; handler->vtable = &s_vtable; handler->slot = slot; if (aws_socket_subscribe_to_readable_events(socket, s_on_readable_notification, impl)) { goto cleanup_handler; } socket->handler = handler; return handler; cleanup_handler: aws_mem_release(allocator, handler); return NULL; } const struct aws_socket *aws_socket_handler_get_socket(const struct aws_channel_handler *handler) { AWS_PRECONDITION(handler); const struct socket_handler *socket_handler = handler->impl; return socket_handler->socket; } aws-crt-python-0.20.4+dfsg/crt/aws-c-io/source/socket_shared.c000066400000000000000000000045561456575232400241330ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include /* common validation for connect() and bind() */ static int s_socket_validate_port_for_domain(uint32_t port, enum aws_socket_domain domain) { switch (domain) { case AWS_SOCKET_IPV4: case AWS_SOCKET_IPV6: if (port > UINT16_MAX) { AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "Invalid port=%u for %s. Cannot exceed 65535", port, domain == AWS_SOCKET_IPV4 ? "IPv4" : "IPv6"); return aws_raise_error(AWS_IO_SOCKET_INVALID_ADDRESS); } break; case AWS_SOCKET_LOCAL: /* port is ignored */ break; case AWS_SOCKET_VSOCK: /* any 32bit port is legal */ break; default: AWS_LOGF_ERROR(AWS_LS_IO_SOCKET, "Cannot validate port for unknown domain=%d", domain); return aws_raise_error(AWS_IO_SOCKET_INVALID_ADDRESS); } return AWS_OP_SUCCESS; } int aws_socket_validate_port_for_connect(uint32_t port, enum aws_socket_domain domain) { if (s_socket_validate_port_for_domain(port, domain)) { return AWS_OP_ERR; } /* additional validation */ switch (domain) { case AWS_SOCKET_IPV4: case AWS_SOCKET_IPV6: if (port == 0) { AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "Invalid port=%u for %s connections. Must use 1-65535", port, domain == AWS_SOCKET_IPV4 ? "IPv4" : "IPv6"); return aws_raise_error(AWS_IO_SOCKET_INVALID_ADDRESS); } break; case AWS_SOCKET_VSOCK: if (port == (uint32_t)-1) { AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "Invalid port for VSOCK connections. Cannot use VMADDR_PORT_ANY (-1U)."); return aws_raise_error(AWS_IO_SOCKET_INVALID_ADDRESS); } break; default: /* no extra validation */ break; } return AWS_OP_SUCCESS; } int aws_socket_validate_port_for_bind(uint32_t port, enum aws_socket_domain domain) { return s_socket_validate_port_for_domain(port, domain); } aws-crt-python-0.20.4+dfsg/crt/aws-c-io/source/standard_retry_strategy.c000066400000000000000000000503671456575232400262650ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include AWS_STRING_FROM_LITERAL(s_empty_string, ""); static struct aws_byte_cursor s_empty_string_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(""); static const size_t s_initial_retry_bucket_capacity = 500u; static const size_t s_standard_retry_cost = 5u; static const size_t s_standard_transient_cost = 10u; static const size_t s_standard_no_retry_cost = 1u; struct retry_bucket { struct aws_allocator *allocator; struct aws_retry_strategy *owner; struct aws_string *partition_id; struct aws_byte_cursor partition_id_cur; struct { size_t current_capacity; struct aws_mutex partition_lock; } synced_data; }; struct retry_bucket_token { struct aws_retry_token retry_token; struct retry_bucket *strategy_bucket; struct aws_retry_token *exp_backoff_token; aws_retry_strategy_on_retry_token_acquired_fn *original_on_acquired; aws_retry_strategy_on_retry_ready_fn *original_on_ready; size_t last_retry_cost; void *original_user_data; }; static bool s_partition_id_equals_byte_cur(const void *seated_cur, const void *cur_ptr) { return aws_byte_cursor_eq_ignore_case(seated_cur, cur_ptr); } static uint64_t s_hash_partition_id(const void *seated_partition_ptr) { return aws_hash_byte_cursor_ptr_ignore_case(seated_partition_ptr); } static void s_destroy_standard_retry_bucket(void *retry_bucket) { struct retry_bucket *standard_retry_bucket = retry_bucket; AWS_LOGF_TRACE( AWS_LS_IO_STANDARD_RETRY_STRATEGY, "id=%p: destroying bucket partition " PRInSTR, (void *)standard_retry_bucket->owner, AWS_BYTE_CURSOR_PRI(standard_retry_bucket->partition_id_cur)); aws_string_destroy(standard_retry_bucket->partition_id); aws_mutex_clean_up(&standard_retry_bucket->synced_data.partition_lock); aws_mem_release(standard_retry_bucket->allocator, standard_retry_bucket); } struct standard_strategy { struct aws_retry_strategy base; struct aws_retry_strategy *exponential_backoff_retry_strategy; size_t max_capacity; struct { struct aws_hash_table token_buckets; struct aws_mutex lock; } synced_data; }; static void s_standard_retry_destroy(struct aws_retry_strategy *retry_strategy) { AWS_LOGF_TRACE(AWS_LS_IO_STANDARD_RETRY_STRATEGY, "id=%p: destroying self", (void *)retry_strategy); struct standard_strategy *standard_strategy = retry_strategy->impl; aws_retry_strategy_release(standard_strategy->exponential_backoff_retry_strategy); aws_hash_table_clean_up(&standard_strategy->synced_data.token_buckets); aws_mutex_clean_up(&standard_strategy->synced_data.lock); aws_mem_release(retry_strategy->allocator, standard_strategy); } static void s_on_standard_retry_token_acquired( struct aws_retry_strategy *retry_strategy, int error_code, struct aws_retry_token *token, void *user_data) { (void)retry_strategy; (void)token; struct retry_bucket_token *retry_token = user_data; AWS_LOGF_DEBUG( AWS_LS_IO_STANDARD_RETRY_STRATEGY, "id=%p: token acquired callback invoked with error %s with token %p and nested token %p", (void *)retry_token->retry_token.retry_strategy, aws_error_str(error_code), (void *)&retry_token->retry_token, (void *)token); AWS_LOGF_TRACE( AWS_LS_IO_STANDARD_RETRY_STRATEGY, "id=%p: invoking on_retry_token_acquired callback", (void *)retry_token->retry_token.retry_strategy); aws_retry_token_acquire(&retry_token->retry_token); if (!error_code) { retry_token->exp_backoff_token = token; retry_token->original_on_acquired( retry_token->strategy_bucket->owner, error_code, &retry_token->retry_token, retry_token->original_user_data); AWS_LOGF_TRACE( AWS_LS_IO_STANDARD_RETRY_STRATEGY, "id=%p: on_retry_token_acquired callback completed", (void *)retry_token->retry_token.retry_strategy); } else { retry_token->original_on_acquired( retry_token->strategy_bucket->owner, error_code, NULL, retry_token->original_user_data); AWS_LOGF_TRACE( AWS_LS_IO_STANDARD_RETRY_STRATEGY, "id=%p: on_retry_token_acquired callback completed", (void *)retry_token->retry_token.retry_strategy); } aws_retry_token_release(&retry_token->retry_token); } static int s_standard_retry_acquire_token( struct aws_retry_strategy *retry_strategy, const struct aws_byte_cursor *partition_id, aws_retry_strategy_on_retry_token_acquired_fn *on_acquired, void *user_data, uint64_t timeout_ms) { struct standard_strategy *standard_strategy = retry_strategy->impl; bool bucket_needs_cleanup = false; const struct aws_byte_cursor *partition_id_ptr = !partition_id || partition_id->len == 0 ? &s_empty_string_cur : partition_id; AWS_LOGF_DEBUG( AWS_LS_IO_STANDARD_RETRY_STRATEGY, "id=%p: attempting to acquire retry token for partition_id " PRInSTR, (void *)retry_strategy, AWS_BYTE_CURSOR_PRI(*partition_id_ptr)); struct retry_bucket_token *token = aws_mem_calloc(retry_strategy->allocator, 1, sizeof(struct retry_bucket_token)); if (!token) { return AWS_OP_ERR; } token->original_user_data = user_data; token->original_on_acquired = on_acquired; struct aws_hash_element *element_ptr; struct retry_bucket *bucket_ptr; AWS_FATAL_ASSERT(!aws_mutex_lock(&standard_strategy->synced_data.lock) && "Lock acquisition failed."); aws_hash_table_find(&standard_strategy->synced_data.token_buckets, partition_id_ptr, &element_ptr); if (!element_ptr) { AWS_LOGF_DEBUG( AWS_LS_IO_STANDARD_RETRY_STRATEGY, "id=%p: bucket for partition_id " PRInSTR " does not exist, attempting to create one", (void *)retry_strategy, AWS_BYTE_CURSOR_PRI(*partition_id_ptr)); bucket_ptr = aws_mem_calloc(standard_strategy->base.allocator, 1, sizeof(struct retry_bucket)); if (!bucket_ptr) { AWS_LOGF_ERROR( AWS_LS_IO_STANDARD_RETRY_STRATEGY, "id=%p: error when allocating bucket %s", (void *)retry_strategy, aws_error_debug_str(aws_last_error())); goto table_locked; } bucket_needs_cleanup = true; bucket_ptr->allocator = standard_strategy->base.allocator; bucket_ptr->partition_id = partition_id_ptr->len > 0 ? aws_string_new_from_cursor(standard_strategy->base.allocator, partition_id) : (struct aws_string *)s_empty_string; if (!bucket_ptr->partition_id) { AWS_LOGF_ERROR( AWS_LS_IO_STANDARD_RETRY_STRATEGY, "id=%p: error when allocating partition_id %s", (void *)retry_strategy, aws_error_debug_str(aws_last_error())); goto table_locked; } bucket_ptr->partition_id_cur = aws_byte_cursor_from_string(bucket_ptr->partition_id); AWS_FATAL_ASSERT(!aws_mutex_init(&bucket_ptr->synced_data.partition_lock) && "mutex init failed!"); bucket_ptr->owner = retry_strategy; bucket_ptr->synced_data.current_capacity = standard_strategy->max_capacity; AWS_LOGF_DEBUG( AWS_LS_IO_STANDARD_RETRY_STRATEGY, "id=%p: bucket %p for partition_id " PRInSTR " created", (void *)retry_strategy, (void *)bucket_ptr, AWS_BYTE_CURSOR_PRI(*partition_id_ptr)); if (aws_hash_table_put( &standard_strategy->synced_data.token_buckets, &bucket_ptr->partition_id_cur, bucket_ptr, NULL)) { AWS_LOGF_ERROR( AWS_LS_IO_STANDARD_RETRY_STRATEGY, "id=%p: error when putting bucket to token_bucket table %s", (void *)retry_strategy, aws_error_debug_str(aws_last_error())); goto table_locked; } bucket_needs_cleanup = false; } else { bucket_ptr = element_ptr->value; AWS_LOGF_DEBUG( AWS_LS_IO_STANDARD_RETRY_STRATEGY, "id=%p: bucket %p for partition_id " PRInSTR " found", (void *)retry_strategy, (void *)bucket_ptr, AWS_BYTE_CURSOR_PRI(*partition_id_ptr)); } AWS_FATAL_ASSERT(!aws_mutex_unlock(&standard_strategy->synced_data.lock) && "Mutex unlock failed"); token->strategy_bucket = bucket_ptr; token->retry_token.retry_strategy = retry_strategy; aws_atomic_init_int(&token->retry_token.ref_count, 1u); aws_retry_strategy_acquire(retry_strategy); token->retry_token.allocator = retry_strategy->allocator; token->retry_token.impl = token; /* don't decrement the capacity counter, but add the retry payback, so making calls that succeed allows for a * gradual recovery of the bucket capacity. Otherwise, we'd never recover from an outage. */ token->last_retry_cost = s_standard_no_retry_cost; AWS_LOGF_TRACE( AWS_LS_IO_STANDARD_RETRY_STRATEGY, "id=%p: allocated token %p for partition_id " PRInSTR, (void *)retry_strategy, (void *)&token->retry_token, AWS_BYTE_CURSOR_PRI(*partition_id_ptr)); if (aws_retry_strategy_acquire_retry_token( standard_strategy->exponential_backoff_retry_strategy, partition_id_ptr, s_on_standard_retry_token_acquired, token, timeout_ms)) { AWS_LOGF_ERROR( AWS_LS_IO_STANDARD_RETRY_STRATEGY, "id=%p: error when acquiring retry token from backing retry strategy %p: %s", (void *)retry_strategy, (void *)standard_strategy->exponential_backoff_retry_strategy, aws_error_debug_str(aws_last_error())); goto table_updated; } return AWS_OP_SUCCESS; table_updated: AWS_FATAL_ASSERT(!aws_mutex_lock(&standard_strategy->synced_data.lock) && "Mutex lock failed"); aws_hash_table_remove(&standard_strategy->synced_data.token_buckets, &bucket_ptr->partition_id_cur, NULL, NULL); bucket_needs_cleanup = false; table_locked: AWS_FATAL_ASSERT(!aws_mutex_unlock(&standard_strategy->synced_data.lock) && "Mutex unlock failed"); if (bucket_needs_cleanup) { s_destroy_standard_retry_bucket(bucket_ptr); } aws_retry_token_release(&token->retry_token); return AWS_OP_ERR; } void s_standard_retry_strategy_on_retry_ready(struct aws_retry_token *token, int error_code, void *user_data) { (void)token; struct aws_retry_token *standard_retry_token = user_data; struct retry_bucket_token *impl = standard_retry_token->impl; AWS_LOGF_TRACE( AWS_LS_IO_STANDARD_RETRY_STRATEGY, "id=%p: invoking on_retry_ready callback with error %s, token %p, and nested token %p", (void *)token->retry_strategy, aws_error_str(error_code), (void *)standard_retry_token, (void *)token); struct aws_retry_strategy *retry_strategy = token->retry_strategy; /* we already hold a reference count here due to the previous acquire before scheduling, so don't worry * about incrementing standard_retry_token here */ impl->original_on_ready(standard_retry_token, error_code, impl->original_user_data); AWS_LOGF_TRACE( AWS_LS_IO_STANDARD_RETRY_STRATEGY, "id=%p: on_retry_ready callback completed", (void *)retry_strategy); /* this is to release the acquire we did before scheduling the retry. Release it now. */ aws_retry_token_release(standard_retry_token); } static int s_standard_retry_strategy_schedule_retry( struct aws_retry_token *token, enum aws_retry_error_type error_type, aws_retry_strategy_on_retry_ready_fn *retry_ready, void *user_data) { if (error_type == AWS_RETRY_ERROR_TYPE_CLIENT_ERROR) { return aws_raise_error(AWS_IO_RETRY_PERMISSION_DENIED); } struct retry_bucket_token *impl = token->impl; size_t capacity_consumed = 0; AWS_FATAL_ASSERT(!aws_mutex_lock(&impl->strategy_bucket->synced_data.partition_lock) && "mutex lock failed"); size_t current_capacity = impl->strategy_bucket->synced_data.current_capacity; if (current_capacity == 0) { AWS_FATAL_ASSERT( !aws_mutex_unlock(&impl->strategy_bucket->synced_data.partition_lock) && "mutex unlock failed"); AWS_LOGF_INFO( AWS_LS_IO_STANDARD_RETRY_STRATEGY, "token_id=%p: requested to schedule retry but the bucket capacity is empty. Rejecting retry request.", (void *)token); return aws_raise_error(AWS_IO_RETRY_PERMISSION_DENIED); } if (error_type == AWS_RETRY_ERROR_TYPE_TRANSIENT) { capacity_consumed = aws_min_size(current_capacity, s_standard_transient_cost); } else { /* you may be looking for throttling, but if that happened, the service told us to slow down, * but is otherwise healthy. Pay a smaller penalty for those. */ capacity_consumed = aws_min_size(current_capacity, s_standard_retry_cost); } AWS_LOGF_DEBUG( AWS_LS_IO_STANDARD_RETRY_STRATEGY, "token_id=%p: reducing retry capacity by %zu from %zu and scheduling retry.", (void *)token, capacity_consumed, current_capacity); impl->original_user_data = user_data; impl->original_on_ready = retry_ready; size_t previous_cost = impl->last_retry_cost; impl->last_retry_cost = capacity_consumed; impl->strategy_bucket->synced_data.current_capacity -= capacity_consumed; AWS_FATAL_ASSERT(!aws_mutex_unlock(&impl->strategy_bucket->synced_data.partition_lock) && "mutex unlock failed"); /* acquire before scheduling to prevent clean up before the callback runs. */ aws_retry_token_acquire(&impl->retry_token); if (aws_retry_strategy_schedule_retry( impl->exp_backoff_token, error_type, s_standard_retry_strategy_on_retry_ready, token)) { /* release for the above acquire */ aws_retry_token_release(&impl->retry_token); AWS_LOGF_ERROR( AWS_LS_IO_STANDARD_RETRY_STRATEGY, "token_id=%p: error occurred while scheduling retry: %s.", (void *)token, aws_error_debug_str(aws_last_error())); /* roll it back. */ AWS_FATAL_ASSERT(!aws_mutex_lock(&impl->strategy_bucket->synced_data.partition_lock) && "mutex lock failed"); impl->last_retry_cost = previous_cost; size_t desired_capacity = impl->strategy_bucket->synced_data.current_capacity + capacity_consumed; struct standard_strategy *strategy_impl = token->retry_strategy->impl; impl->strategy_bucket->synced_data.current_capacity = desired_capacity < strategy_impl->max_capacity ? desired_capacity : strategy_impl->max_capacity; AWS_FATAL_ASSERT( !aws_mutex_unlock(&impl->strategy_bucket->synced_data.partition_lock) && "mutex unlock failed"); return AWS_OP_ERR; } return AWS_OP_SUCCESS; } static int s_standard_retry_strategy_record_success(struct aws_retry_token *token) { struct retry_bucket_token *impl = token->impl; AWS_FATAL_ASSERT(!aws_mutex_lock(&impl->strategy_bucket->synced_data.partition_lock) && "mutex lock failed"); AWS_LOGF_DEBUG( AWS_LS_IO_STANDARD_RETRY_STRATEGY, "token_id=%p: partition=" PRInSTR ": recording successful operation and adding %zu units of capacity back to the bucket.", (void *)token, AWS_BYTE_CURSOR_PRI(impl->strategy_bucket->partition_id_cur), impl->last_retry_cost); size_t capacity_payback = impl->strategy_bucket->synced_data.current_capacity + impl->last_retry_cost; struct standard_strategy *standard_strategy = token->retry_strategy->impl; impl->strategy_bucket->synced_data.current_capacity = capacity_payback < standard_strategy->max_capacity ? capacity_payback : standard_strategy->max_capacity; impl->last_retry_cost = 0; AWS_LOGF_TRACE( AWS_LS_IO_STANDARD_RETRY_STRATEGY, "bucket_id=%p: partition=" PRInSTR " : new capacity is %zu.", (void *)token, AWS_BYTE_CURSOR_PRI(impl->strategy_bucket->partition_id_cur), impl->strategy_bucket->synced_data.current_capacity); AWS_FATAL_ASSERT(!aws_mutex_unlock(&impl->strategy_bucket->synced_data.partition_lock) && "mutex unlock failed"); return AWS_OP_SUCCESS; } static void s_standard_retry_strategy_release_token(struct aws_retry_token *token) { if (token) { AWS_LOGF_TRACE(AWS_LS_IO_STANDARD_RETRY_STRATEGY, "id=%p: releasing token", (void *)token); struct retry_bucket_token *impl = token->impl; aws_retry_token_release(impl->exp_backoff_token); aws_retry_strategy_release(token->retry_strategy); aws_mem_release(token->allocator, impl); } } static struct aws_retry_strategy_vtable s_standard_retry_vtable = { .schedule_retry = s_standard_retry_strategy_schedule_retry, .acquire_token = s_standard_retry_acquire_token, .release_token = s_standard_retry_strategy_release_token, .destroy = s_standard_retry_destroy, .record_success = s_standard_retry_strategy_record_success, }; struct aws_retry_strategy *aws_retry_strategy_new_standard( struct aws_allocator *allocator, const struct aws_standard_retry_options *config) { AWS_PRECONDITION(allocator); AWS_PRECONDITION(config); AWS_LOGF_INFO(AWS_LS_IO_STANDARD_RETRY_STRATEGY, "static: creating new standard retry strategy"); struct standard_strategy *standard_strategy = aws_mem_calloc(allocator, 1, sizeof(struct standard_strategy)); if (!standard_strategy) { AWS_LOGF_ERROR(AWS_LS_IO_STANDARD_RETRY_STRATEGY, "static: allocation of new standard retry strategy failed"); return NULL; } aws_atomic_init_int(&standard_strategy->base.ref_count, 1); struct aws_exponential_backoff_retry_options config_cpy = config->backoff_retry_options; /* standard default is 3. */ if (!config->backoff_retry_options.max_retries) { config_cpy.max_retries = 3; } AWS_LOGF_INFO( AWS_LS_IO_STANDARD_RETRY_STRATEGY, "id=%p: creating backing exponential backoff strategy with max_retries of %zu", (void *)&standard_strategy->base, config_cpy.max_retries); standard_strategy->exponential_backoff_retry_strategy = aws_retry_strategy_new_exponential_backoff(allocator, &config_cpy); if (!standard_strategy->exponential_backoff_retry_strategy) { AWS_LOGF_ERROR( AWS_LS_IO_STANDARD_RETRY_STRATEGY, "id=%p: allocation of new exponential backoff retry strategy failed: %s", (void *)&standard_strategy->base, aws_error_debug_str(aws_last_error())); goto error; } if (aws_hash_table_init( &standard_strategy->synced_data.token_buckets, allocator, 16u, s_hash_partition_id, s_partition_id_equals_byte_cur, NULL, s_destroy_standard_retry_bucket)) { AWS_LOGF_ERROR( AWS_LS_IO_STANDARD_RETRY_STRATEGY, "id=%p: token bucket table creation failed: %s", (void *)&standard_strategy->base, aws_error_debug_str(aws_last_error())); goto error; } standard_strategy->max_capacity = config->initial_bucket_capacity ? config->initial_bucket_capacity : s_initial_retry_bucket_capacity; AWS_LOGF_DEBUG( AWS_LS_IO_STANDARD_RETRY_STRATEGY, "id=%p: maximum bucket capacity set to %zu", (void *)&standard_strategy->base, standard_strategy->max_capacity); AWS_FATAL_ASSERT(!aws_mutex_init(&standard_strategy->synced_data.lock) && "mutex init failed"); standard_strategy->base.allocator = allocator; standard_strategy->base.vtable = &s_standard_retry_vtable; standard_strategy->base.impl = standard_strategy; return &standard_strategy->base; error: if (standard_strategy->exponential_backoff_retry_strategy) { aws_retry_strategy_release(standard_strategy->exponential_backoff_retry_strategy); } aws_mem_release(allocator, standard_strategy); return NULL; } aws-crt-python-0.20.4+dfsg/crt/aws-c-io/source/statistics.c000066400000000000000000000022331456575232400234750ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include int aws_crt_statistics_socket_init(struct aws_crt_statistics_socket *stats) { AWS_ZERO_STRUCT(*stats); stats->category = AWSCRT_STAT_CAT_SOCKET; return AWS_OP_SUCCESS; } void aws_crt_statistics_socket_cleanup(struct aws_crt_statistics_socket *stats) { (void)stats; } void aws_crt_statistics_socket_reset(struct aws_crt_statistics_socket *stats) { stats->bytes_read = 0; stats->bytes_written = 0; } int aws_crt_statistics_tls_init(struct aws_crt_statistics_tls *stats) { AWS_ZERO_STRUCT(*stats); stats->category = AWSCRT_STAT_CAT_TLS; stats->handshake_status = AWS_TLS_NEGOTIATION_STATUS_NONE; return AWS_OP_SUCCESS; } void aws_crt_statistics_tls_cleanup(struct aws_crt_statistics_tls *stats) { (void)stats; } void aws_crt_statistics_tls_reset(struct aws_crt_statistics_tls *stats) { /* * We currently don't have any resettable tls statistics yet, but they may be added in the future. */ (void)stats; } aws-crt-python-0.20.4+dfsg/crt/aws-c-io/source/stream.c000066400000000000000000000275331456575232400226100ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include int aws_input_stream_seek(struct aws_input_stream *stream, int64_t offset, enum aws_stream_seek_basis basis) { AWS_ASSERT(stream && stream->vtable && stream->vtable->seek); return stream->vtable->seek(stream, offset, basis); } int aws_input_stream_read(struct aws_input_stream *stream, struct aws_byte_buf *dest) { AWS_ASSERT(stream && stream->vtable && stream->vtable->read); AWS_ASSERT(dest); AWS_ASSERT(dest->len <= dest->capacity); /* Deal with this edge case here, instead of relying on every implementation to do it right. */ if (dest->capacity == dest->len) { return AWS_OP_SUCCESS; } /* Prevent implementations from accidentally overwriting existing data in the buffer. * Hand them a "safe" buffer that starts where the existing data ends. */ const void *safe_buf_start = dest->buffer + dest->len; const size_t safe_buf_capacity = dest->capacity - dest->len; struct aws_byte_buf safe_buf = aws_byte_buf_from_empty_array(safe_buf_start, safe_buf_capacity); __itt_task_begin(io_tracing_domain, __itt_null, __itt_null, tracing_input_stream_read); int read_result = stream->vtable->read(stream, &safe_buf); __itt_task_end(io_tracing_domain); /* Ensure the implementation did not commit forbidden acts upon the buffer */ AWS_FATAL_ASSERT( (safe_buf.buffer == safe_buf_start) && (safe_buf.capacity == safe_buf_capacity) && (safe_buf.len <= safe_buf_capacity)); if (read_result == AWS_OP_SUCCESS) { /* Update the actual buffer */ dest->len += safe_buf.len; } return read_result; } int aws_input_stream_get_status(struct aws_input_stream *stream, struct aws_stream_status *status) { AWS_ASSERT(stream && stream->vtable && stream->vtable->get_status); return stream->vtable->get_status(stream, status); } int aws_input_stream_get_length(struct aws_input_stream *stream, int64_t *out_length) { AWS_ASSERT(stream && stream->vtable && stream->vtable->get_length); return stream->vtable->get_length(stream, out_length); } /* * cursor stream implementation */ struct aws_input_stream_byte_cursor_impl { struct aws_input_stream base; struct aws_allocator *allocator; struct aws_byte_cursor original_cursor; struct aws_byte_cursor current_cursor; }; /* * This is an ugly function that, in the absence of better guidance, is designed to handle all possible combinations of * size_t (uint32_t, uint64_t). If size_t ever exceeds 64 bits this function will fail badly. * * Safety and invariant assumptions are sprinkled via comments. The overall strategy is to cast up to 64 bits and * perform all arithmetic there, being careful with signed vs. unsigned to prevent bad operations. * * Assumption #1: size_t resolves to an unsigned integer 64 bits or smaller */ AWS_STATIC_ASSERT(sizeof(size_t) <= 8); static int s_aws_input_stream_byte_cursor_seek( struct aws_input_stream *stream, int64_t offset, enum aws_stream_seek_basis basis) { struct aws_input_stream_byte_cursor_impl *impl = AWS_CONTAINER_OF(stream, struct aws_input_stream_byte_cursor_impl, base); uint64_t final_offset = 0; switch (basis) { case AWS_SSB_BEGIN: /* * (uint64_t)offset -- safe by virtue of the earlier is-negative check * (uint64_t)impl->original_cursor.len -- safe via assumption 1 */ if (offset < 0 || (uint64_t)offset > (uint64_t)impl->original_cursor.len) { return aws_raise_error(AWS_IO_STREAM_INVALID_SEEK_POSITION); } /* safe because negative offsets were turned into an error */ final_offset = (uint64_t)offset; break; case AWS_SSB_END: /* * -offset -- safe as long offset is not INT64_MIN which was previously checked * (uint64_t)(-offset) -- safe because (-offset) is positive (and < INT64_MAX < UINT64_MAX) */ if (offset > 0 || offset == INT64_MIN || (uint64_t)(-offset) > (uint64_t)impl->original_cursor.len) { return aws_raise_error(AWS_IO_STREAM_INVALID_SEEK_POSITION); } /* cases that would make this unsafe became errors with previous conditional */ final_offset = (uint64_t)impl->original_cursor.len - (uint64_t)(-offset); break; default: return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } /* true because we already validated against (impl->original_cursor.len) which is <= SIZE_MAX */ AWS_ASSERT(final_offset <= SIZE_MAX); /* safe via previous assert */ size_t final_offset_sz = (size_t)final_offset; /* sanity */ AWS_ASSERT(final_offset_sz <= impl->original_cursor.len); /* reset current_cursor to new position */ impl->current_cursor = impl->original_cursor; impl->current_cursor.ptr += final_offset_sz; impl->current_cursor.len -= final_offset_sz; return AWS_OP_SUCCESS; } static int s_aws_input_stream_byte_cursor_read(struct aws_input_stream *stream, struct aws_byte_buf *dest) { struct aws_input_stream_byte_cursor_impl *impl = AWS_CONTAINER_OF(stream, struct aws_input_stream_byte_cursor_impl, base); size_t actually_read = dest->capacity - dest->len; if (actually_read > impl->current_cursor.len) { actually_read = impl->current_cursor.len; } if (!aws_byte_buf_write(dest, impl->current_cursor.ptr, actually_read)) { return aws_raise_error(AWS_IO_STREAM_READ_FAILED); } aws_byte_cursor_advance(&impl->current_cursor, actually_read); return AWS_OP_SUCCESS; } static int s_aws_input_stream_byte_cursor_get_status( struct aws_input_stream *stream, struct aws_stream_status *status) { struct aws_input_stream_byte_cursor_impl *impl = AWS_CONTAINER_OF(stream, struct aws_input_stream_byte_cursor_impl, base); status->is_end_of_stream = impl->current_cursor.len == 0; status->is_valid = true; return AWS_OP_SUCCESS; } static int s_aws_input_stream_byte_cursor_get_length(struct aws_input_stream *stream, int64_t *out_length) { struct aws_input_stream_byte_cursor_impl *impl = AWS_CONTAINER_OF(stream, struct aws_input_stream_byte_cursor_impl, base); #if SIZE_MAX > INT64_MAX size_t length = impl->original_cursor.len; if (length > INT64_MAX) { return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED); } #endif *out_length = (int64_t)impl->original_cursor.len; return AWS_OP_SUCCESS; } static void s_aws_input_stream_byte_cursor_destroy(struct aws_input_stream_byte_cursor_impl *impl) { aws_mem_release(impl->allocator, impl); } static struct aws_input_stream_vtable s_aws_input_stream_byte_cursor_vtable = { .seek = s_aws_input_stream_byte_cursor_seek, .read = s_aws_input_stream_byte_cursor_read, .get_status = s_aws_input_stream_byte_cursor_get_status, .get_length = s_aws_input_stream_byte_cursor_get_length, }; struct aws_input_stream *aws_input_stream_new_from_cursor( struct aws_allocator *allocator, const struct aws_byte_cursor *cursor) { struct aws_input_stream_byte_cursor_impl *impl = aws_mem_calloc(allocator, 1, sizeof(struct aws_input_stream_byte_cursor_impl)); impl->allocator = allocator; impl->original_cursor = *cursor; impl->current_cursor = *cursor; impl->base.vtable = &s_aws_input_stream_byte_cursor_vtable; aws_ref_count_init( &impl->base.ref_count, impl, (aws_simple_completion_callback *)s_aws_input_stream_byte_cursor_destroy); return &impl->base; } /* * file-based input stream */ struct aws_input_stream_file_impl { struct aws_input_stream base; struct aws_allocator *allocator; FILE *file; bool close_on_clean_up; }; static int s_aws_input_stream_file_seek( struct aws_input_stream *stream, int64_t offset, enum aws_stream_seek_basis basis) { struct aws_input_stream_file_impl *impl = AWS_CONTAINER_OF(stream, struct aws_input_stream_file_impl, base); int whence = (basis == AWS_SSB_BEGIN) ? SEEK_SET : SEEK_END; if (aws_fseek(impl->file, offset, whence)) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } static int s_aws_input_stream_file_read(struct aws_input_stream *stream, struct aws_byte_buf *dest) { struct aws_input_stream_file_impl *impl = AWS_CONTAINER_OF(stream, struct aws_input_stream_file_impl, base); size_t max_read = dest->capacity - dest->len; size_t actually_read = fread(dest->buffer + dest->len, 1, max_read, impl->file); if (actually_read == 0) { if (ferror(impl->file)) { return aws_raise_error(AWS_IO_STREAM_READ_FAILED); } } dest->len += actually_read; return AWS_OP_SUCCESS; } static int s_aws_input_stream_file_get_status(struct aws_input_stream *stream, struct aws_stream_status *status) { struct aws_input_stream_file_impl *impl = AWS_CONTAINER_OF(stream, struct aws_input_stream_file_impl, base); status->is_end_of_stream = feof(impl->file) != 0; status->is_valid = ferror(impl->file) == 0; return AWS_OP_SUCCESS; } static int s_aws_input_stream_file_get_length(struct aws_input_stream *stream, int64_t *length) { struct aws_input_stream_file_impl *impl = AWS_CONTAINER_OF(stream, struct aws_input_stream_file_impl, base); return aws_file_get_length(impl->file, length); } static void s_aws_input_stream_file_destroy(struct aws_input_stream_file_impl *impl) { if (impl->close_on_clean_up && impl->file) { fclose(impl->file); } aws_mem_release(impl->allocator, impl); } static struct aws_input_stream_vtable s_aws_input_stream_file_vtable = { .seek = s_aws_input_stream_file_seek, .read = s_aws_input_stream_file_read, .get_status = s_aws_input_stream_file_get_status, .get_length = s_aws_input_stream_file_get_length, }; struct aws_input_stream *aws_input_stream_new_from_file(struct aws_allocator *allocator, const char *file_name) { struct aws_input_stream_file_impl *impl = aws_mem_calloc(allocator, 1, sizeof(struct aws_input_stream_file_impl)); impl->file = aws_fopen(file_name, "rb"); if (impl->file == NULL) { goto on_error; } impl->close_on_clean_up = true; impl->allocator = allocator; impl->base.vtable = &s_aws_input_stream_file_vtable; aws_ref_count_init(&impl->base.ref_count, impl, (aws_simple_completion_callback *)s_aws_input_stream_file_destroy); return &impl->base; on_error: aws_mem_release(allocator, impl); return NULL; } struct aws_input_stream *aws_input_stream_new_from_open_file(struct aws_allocator *allocator, FILE *file) { struct aws_input_stream_file_impl *impl = aws_mem_calloc(allocator, 1, sizeof(struct aws_input_stream_file_impl)); impl->file = file; impl->close_on_clean_up = false; impl->allocator = allocator; impl->base.vtable = &s_aws_input_stream_file_vtable; aws_ref_count_init(&impl->base.ref_count, impl, (aws_simple_completion_callback *)s_aws_input_stream_file_destroy); return &impl->base; } struct aws_input_stream *aws_input_stream_acquire(struct aws_input_stream *stream) { if (stream != NULL) { if (stream->vtable->acquire) { stream->vtable->acquire(stream); } else { aws_ref_count_acquire(&stream->ref_count); } } return stream; } struct aws_input_stream *aws_input_stream_release(struct aws_input_stream *stream) { if (stream != NULL) { if (stream->vtable->release) { stream->vtable->release(stream); } else { aws_ref_count_release(&stream->ref_count); } } return NULL; } void aws_input_stream_destroy(struct aws_input_stream *stream) { aws_input_stream_release(stream); } aws-crt-python-0.20.4+dfsg/crt/aws-c-io/source/tls_channel_handler.c000066400000000000000000000675751456575232400253160ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #define AWS_DEFAULT_TLS_TIMEOUT_MS 10000 #include "./pkcs11_private.h" #include void aws_tls_ctx_options_init_default_client(struct aws_tls_ctx_options *options, struct aws_allocator *allocator) { AWS_ZERO_STRUCT(*options); options->allocator = allocator; options->minimum_tls_version = AWS_IO_TLS_VER_SYS_DEFAULTS; options->cipher_pref = AWS_IO_TLS_CIPHER_PREF_SYSTEM_DEFAULT; options->verify_peer = true; options->max_fragment_size = g_aws_channel_max_fragment_size; } void aws_tls_ctx_options_clean_up(struct aws_tls_ctx_options *options) { aws_byte_buf_clean_up(&options->ca_file); aws_string_destroy(options->ca_path); aws_byte_buf_clean_up(&options->certificate); aws_byte_buf_clean_up_secure(&options->private_key); #ifdef __APPLE__ aws_byte_buf_clean_up_secure(&options->pkcs12); aws_byte_buf_clean_up_secure(&options->pkcs12_password); # if !defined(AWS_OS_IOS) aws_string_destroy(options->keychain_path); # endif #endif aws_string_destroy(options->alpn_list); aws_custom_key_op_handler_release(options->custom_key_op_handler); AWS_ZERO_STRUCT(*options); } int aws_tls_ctx_options_init_client_mtls( struct aws_tls_ctx_options *options, struct aws_allocator *allocator, const struct aws_byte_cursor *cert, const struct aws_byte_cursor *pkey) { #if !defined(AWS_OS_IOS) aws_tls_ctx_options_init_default_client(options, allocator); if (aws_byte_buf_init_copy_from_cursor(&options->certificate, allocator, *cert)) { goto error; } if (aws_sanitize_pem(&options->certificate, allocator)) { AWS_LOGF_ERROR(AWS_LS_IO_TLS, "static: Invalid certificate. File must contain PEM encoded data"); goto error; } if (aws_byte_buf_init_copy_from_cursor(&options->private_key, allocator, *pkey)) { goto error; } if (aws_sanitize_pem(&options->private_key, allocator)) { AWS_LOGF_ERROR(AWS_LS_IO_TLS, "static: Invalid private key. File must contain PEM encoded data"); goto error; } return AWS_OP_SUCCESS; error: aws_tls_ctx_options_clean_up(options); return AWS_OP_ERR; #else (void)allocator; (void)cert; (void)pkey; AWS_ZERO_STRUCT(*options); AWS_LOGF_ERROR(AWS_LS_IO_TLS, "static: This platform does not support PEM certificates"); return aws_raise_error(AWS_ERROR_PLATFORM_NOT_SUPPORTED); #endif } int aws_tls_ctx_options_init_client_mtls_from_path( struct aws_tls_ctx_options *options, struct aws_allocator *allocator, const char *cert_path, const char *pkey_path) { #if !defined(AWS_OS_IOS) aws_tls_ctx_options_init_default_client(options, allocator); if (aws_byte_buf_init_from_file(&options->certificate, allocator, cert_path)) { goto error; } if (aws_sanitize_pem(&options->certificate, allocator)) { AWS_LOGF_ERROR(AWS_LS_IO_TLS, "static: Invalid certificate. File must contain PEM encoded data"); goto error; } if (aws_byte_buf_init_from_file(&options->private_key, allocator, pkey_path)) { goto error; } if (aws_sanitize_pem(&options->private_key, allocator)) { AWS_LOGF_ERROR(AWS_LS_IO_TLS, "static: Invalid private key. File must contain PEM encoded data"); goto error; } return AWS_OP_SUCCESS; error: aws_tls_ctx_options_clean_up(options); return AWS_OP_ERR; #else (void)allocator; (void)cert_path; (void)pkey_path; AWS_ZERO_STRUCT(*options); AWS_LOGF_ERROR(AWS_LS_IO_TLS, "static: This platform does not support PEM certificates"); return aws_raise_error(AWS_ERROR_PLATFORM_NOT_SUPPORTED); #endif } int aws_tls_ctx_options_init_client_mtls_with_custom_key_operations( struct aws_tls_ctx_options *options, struct aws_allocator *allocator, struct aws_custom_key_op_handler *custom, const struct aws_byte_cursor *cert_file_contents) { #if !USE_S2N (void)options; (void)allocator; (void)custom; (void)cert_file_contents; AWS_ZERO_STRUCT(*options); AWS_LOGF_ERROR( AWS_LS_IO_TLS, "static: This platform does not currently support TLS with custom private key operations."); return aws_raise_error(AWS_ERROR_UNIMPLEMENTED); #else aws_tls_ctx_options_init_default_client(options, allocator); /* on_key_operation is required */ AWS_ASSERT(custom != NULL); AWS_ASSERT(custom->vtable != NULL); AWS_ASSERT(custom->vtable->on_key_operation != NULL); /* Hold a reference to the custom key operation handler so it cannot be destroyed */ options->custom_key_op_handler = aws_custom_key_op_handler_acquire((struct aws_custom_key_op_handler *)custom); /* Copy the certificate data from the cursor */ AWS_ASSERT(cert_file_contents != NULL); aws_byte_buf_init_copy_from_cursor(&options->certificate, allocator, *cert_file_contents); /* Make sure the certificate is set and valid */ if (aws_sanitize_pem(&options->certificate, allocator)) { AWS_LOGF_ERROR(AWS_LS_IO_TLS, "static: Invalid certificate. File must contain PEM encoded data"); goto error; } return AWS_OP_SUCCESS; error: aws_tls_ctx_options_clean_up(options); return AWS_OP_ERR; #endif /* PLATFORM-SUPPORTS-CUSTOM-KEY-OPERATIONS */ } int aws_tls_ctx_options_init_client_mtls_with_pkcs11( struct aws_tls_ctx_options *options, struct aws_allocator *allocator, const struct aws_tls_ctx_pkcs11_options *pkcs11_options) { #if defined(USE_S2N) struct aws_custom_key_op_handler *pkcs11_handler = aws_pkcs11_tls_op_handler_new( allocator, pkcs11_options->pkcs11_lib, &pkcs11_options->user_pin, &pkcs11_options->token_label, &pkcs11_options->private_key_object_label, pkcs11_options->slot_id); struct aws_byte_buf tmp_cert_buf; AWS_ZERO_STRUCT(tmp_cert_buf); bool success = false; int custom_key_result = AWS_OP_ERR; if (pkcs11_handler == NULL) { goto finish; } if ((pkcs11_options->cert_file_contents.ptr != NULL) && (pkcs11_options->cert_file_path.ptr != NULL)) { AWS_LOGF_ERROR(AWS_LS_IO_TLS, "static: Cannot use certificate AND certificate file path, only one can be set"); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); goto finish; } else if (pkcs11_options->cert_file_contents.ptr != NULL) { custom_key_result = aws_tls_ctx_options_init_client_mtls_with_custom_key_operations( options, allocator, pkcs11_handler, &pkcs11_options->cert_file_contents); success = true; } else { struct aws_string *tmp_string = aws_string_new_from_cursor(allocator, &pkcs11_options->cert_file_path); int op = aws_byte_buf_init_from_file(&tmp_cert_buf, allocator, aws_string_c_str(tmp_string)); aws_string_destroy(tmp_string); if (op != AWS_OP_SUCCESS) { goto finish; } struct aws_byte_cursor tmp_cursor = aws_byte_cursor_from_buf(&tmp_cert_buf); custom_key_result = aws_tls_ctx_options_init_client_mtls_with_custom_key_operations( options, allocator, pkcs11_handler, &tmp_cursor); success = true; } finish: if (pkcs11_handler != NULL) { /** * Calling aws_tls_ctx_options_init_client_mtls_with_custom_key_operations will have this options * hold a reference to the custom key operations, but creating the TLS operations handler using * aws_pkcs11_tls_op_handler_set_certificate_data adds a reference too, so we need to release * this reference so the only thing (currently) holding a reference is the TLS options itself and * not this function. */ aws_custom_key_op_handler_release(pkcs11_handler); } if (success == false) { aws_tls_ctx_options_clean_up(options); } aws_byte_buf_clean_up(&tmp_cert_buf); if (success) { return custom_key_result; } else { return AWS_OP_ERR; } #else /* Platform does not support S2N */ (void)allocator; (void)pkcs11_options; AWS_ZERO_STRUCT(*options); AWS_LOGF_ERROR(AWS_LS_IO_TLS, "static: This platform does not currently support TLS with PKCS#11."); return aws_raise_error(AWS_ERROR_PLATFORM_NOT_SUPPORTED); #endif /* PLATFORM-SUPPORTS-PKCS11-TLS */ } int aws_tls_ctx_options_set_keychain_path( struct aws_tls_ctx_options *options, struct aws_byte_cursor *keychain_path_cursor) { #if defined(__APPLE__) && !defined(AWS_OS_IOS) AWS_LOGF_WARN(AWS_LS_IO_TLS, "static: Keychain path is deprecated."); options->keychain_path = aws_string_new_from_cursor(options->allocator, keychain_path_cursor); if (!options->keychain_path) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; #else (void)options; (void)keychain_path_cursor; AWS_LOGF_ERROR(AWS_LS_IO_TLS, "static: Keychain path can only be set on MacOS."); return aws_raise_error(AWS_ERROR_PLATFORM_NOT_SUPPORTED); #endif } int aws_tls_ctx_options_init_client_mtls_from_system_path( struct aws_tls_ctx_options *options, struct aws_allocator *allocator, const char *cert_reg_path) { #ifdef _WIN32 aws_tls_ctx_options_init_default_client(options, allocator); options->system_certificate_path = cert_reg_path; return AWS_OP_SUCCESS; #else (void)allocator; (void)cert_reg_path; AWS_ZERO_STRUCT(*options); AWS_LOGF_ERROR(AWS_LS_IO_TLS, "static: System certificate path can only be set on Windows."); return aws_raise_error(AWS_ERROR_PLATFORM_NOT_SUPPORTED); #endif } int aws_tls_ctx_options_init_default_server_from_system_path( struct aws_tls_ctx_options *options, struct aws_allocator *allocator, const char *cert_reg_path) { if (aws_tls_ctx_options_init_client_mtls_from_system_path(options, allocator, cert_reg_path)) { return AWS_OP_ERR; } options->verify_peer = false; return AWS_OP_SUCCESS; } int aws_tls_ctx_options_init_client_mtls_pkcs12_from_path( struct aws_tls_ctx_options *options, struct aws_allocator *allocator, const char *pkcs12_path, const struct aws_byte_cursor *pkcs_pwd) { #ifdef __APPLE__ aws_tls_ctx_options_init_default_client(options, allocator); if (aws_byte_buf_init_from_file(&options->pkcs12, allocator, pkcs12_path)) { return AWS_OP_ERR; } if (aws_byte_buf_init_copy_from_cursor(&options->pkcs12_password, allocator, *pkcs_pwd)) { aws_byte_buf_clean_up_secure(&options->pkcs12); return AWS_OP_ERR; } return AWS_OP_SUCCESS; #else (void)allocator; (void)pkcs12_path; (void)pkcs_pwd; AWS_ZERO_STRUCT(*options); AWS_LOGF_ERROR(AWS_LS_IO_TLS, "static: This platform does not support PKCS#12 files."); return aws_raise_error(AWS_ERROR_PLATFORM_NOT_SUPPORTED); #endif } int aws_tls_ctx_options_init_client_mtls_pkcs12( struct aws_tls_ctx_options *options, struct aws_allocator *allocator, struct aws_byte_cursor *pkcs12, struct aws_byte_cursor *pkcs_pwd) { #ifdef __APPLE__ aws_tls_ctx_options_init_default_client(options, allocator); if (aws_byte_buf_init_copy_from_cursor(&options->pkcs12, allocator, *pkcs12)) { return AWS_OP_ERR; } if (aws_byte_buf_init_copy_from_cursor(&options->pkcs12_password, allocator, *pkcs_pwd)) { aws_byte_buf_clean_up_secure(&options->pkcs12); return AWS_OP_ERR; } return AWS_OP_SUCCESS; #else (void)allocator; (void)pkcs12; (void)pkcs_pwd; AWS_ZERO_STRUCT(*options); AWS_LOGF_ERROR(AWS_LS_IO_TLS, "static: This platform does not support PKCS#12 files."); return aws_raise_error(AWS_ERROR_PLATFORM_NOT_SUPPORTED); #endif } int aws_tls_ctx_options_init_server_pkcs12_from_path( struct aws_tls_ctx_options *options, struct aws_allocator *allocator, const char *pkcs12_path, struct aws_byte_cursor *pkcs_password) { if (aws_tls_ctx_options_init_client_mtls_pkcs12_from_path(options, allocator, pkcs12_path, pkcs_password)) { return AWS_OP_ERR; } options->verify_peer = false; return AWS_OP_SUCCESS; } int aws_tls_ctx_options_init_server_pkcs12( struct aws_tls_ctx_options *options, struct aws_allocator *allocator, struct aws_byte_cursor *pkcs12, struct aws_byte_cursor *pkcs_password) { if (aws_tls_ctx_options_init_client_mtls_pkcs12(options, allocator, pkcs12, pkcs_password)) { return AWS_OP_ERR; } options->verify_peer = false; return AWS_OP_SUCCESS; } int aws_tls_ctx_options_init_default_server_from_path( struct aws_tls_ctx_options *options, struct aws_allocator *allocator, const char *cert_path, const char *pkey_path) { #if !defined(AWS_OS_IOS) if (aws_tls_ctx_options_init_client_mtls_from_path(options, allocator, cert_path, pkey_path)) { return AWS_OP_ERR; } options->verify_peer = false; return AWS_OP_SUCCESS; #else (void)allocator; (void)cert_path; (void)pkey_path; AWS_ZERO_STRUCT(*options); AWS_LOGF_ERROR(AWS_LS_IO_TLS, "static: Cannot create a server on this platform."); return aws_raise_error(AWS_ERROR_PLATFORM_NOT_SUPPORTED); #endif } int aws_tls_ctx_options_init_default_server( struct aws_tls_ctx_options *options, struct aws_allocator *allocator, struct aws_byte_cursor *cert, struct aws_byte_cursor *pkey) { #if !defined(AWS_OS_IOS) if (aws_tls_ctx_options_init_client_mtls(options, allocator, cert, pkey)) { return AWS_OP_ERR; } options->verify_peer = false; return AWS_OP_SUCCESS; #else (void)allocator; (void)cert; (void)pkey; AWS_ZERO_STRUCT(*options); AWS_LOGF_ERROR(AWS_LS_IO_TLS, "static: Cannot create a server on this platform."); return aws_raise_error(AWS_ERROR_PLATFORM_NOT_SUPPORTED); #endif } int aws_tls_ctx_options_set_alpn_list(struct aws_tls_ctx_options *options, const char *alpn_list) { aws_string_destroy(options->alpn_list); options->alpn_list = aws_string_new_from_c_str(options->allocator, alpn_list); if (!options->alpn_list) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } void aws_tls_ctx_options_set_verify_peer(struct aws_tls_ctx_options *options, bool verify_peer) { options->verify_peer = verify_peer; } void aws_tls_ctx_options_set_minimum_tls_version( struct aws_tls_ctx_options *options, enum aws_tls_versions minimum_tls_version) { options->minimum_tls_version = minimum_tls_version; } void aws_tls_ctx_options_set_tls_cipher_preference( struct aws_tls_ctx_options *options, enum aws_tls_cipher_pref cipher_pref) { options->cipher_pref = cipher_pref; } int aws_tls_ctx_options_override_default_trust_store_from_path( struct aws_tls_ctx_options *options, const char *ca_path, const char *ca_file) { /* Note: on success these are not cleaned up, their data is "moved" into the options struct */ struct aws_string *ca_path_tmp = NULL; struct aws_byte_buf ca_file_tmp; AWS_ZERO_STRUCT(ca_file_tmp); if (ca_path) { if (options->ca_path) { AWS_LOGF_ERROR(AWS_LS_IO_TLS, "static: cannot override trust store multiple times"); aws_raise_error(AWS_ERROR_INVALID_STATE); goto error; } ca_path_tmp = aws_string_new_from_c_str(options->allocator, ca_path); if (!ca_path_tmp) { goto error; } } if (ca_file) { if (aws_tls_options_buf_is_set(&options->ca_file)) { AWS_LOGF_ERROR(AWS_LS_IO_TLS, "static: cannot override trust store multiple times"); aws_raise_error(AWS_ERROR_INVALID_STATE); goto error; } if (aws_byte_buf_init_from_file(&ca_file_tmp, options->allocator, ca_file)) { goto error; } if (aws_sanitize_pem(&ca_file_tmp, options->allocator)) { AWS_LOGF_ERROR(AWS_LS_IO_TLS, "static: Invalid CA file. File must contain PEM encoded data"); goto error; } } /* Success, set new values. (no need to clean up old values, we checked earlier that they were unallocated) */ if (ca_path) { options->ca_path = ca_path_tmp; } if (ca_file) { options->ca_file = ca_file_tmp; } return AWS_OP_SUCCESS; error: aws_string_destroy_secure(ca_path_tmp); aws_byte_buf_clean_up_secure(&ca_file_tmp); return AWS_OP_ERR; } void aws_tls_ctx_options_set_extension_data(struct aws_tls_ctx_options *options, void *extension_data) { options->ctx_options_extension = extension_data; } int aws_tls_ctx_options_override_default_trust_store( struct aws_tls_ctx_options *options, const struct aws_byte_cursor *ca_file) { if (aws_tls_options_buf_is_set(&options->ca_file)) { AWS_LOGF_ERROR(AWS_LS_IO_TLS, "static: cannot override trust store multiple times"); return aws_raise_error(AWS_ERROR_INVALID_STATE); } if (aws_byte_buf_init_copy_from_cursor(&options->ca_file, options->allocator, *ca_file)) { goto error; } if (aws_sanitize_pem(&options->ca_file, options->allocator)) { AWS_LOGF_ERROR(AWS_LS_IO_TLS, "static: Invalid CA file. File must contain PEM encoded data"); goto error; } return AWS_OP_SUCCESS; error: aws_byte_buf_clean_up_secure(&options->ca_file); return AWS_OP_ERR; } void aws_tls_connection_options_init_from_ctx( struct aws_tls_connection_options *conn_options, struct aws_tls_ctx *ctx) { AWS_ZERO_STRUCT(*conn_options); /* the assumption here, is that if it was set in the context, we WANT it to be NULL here unless it's different. * so only set verify peer at this point. */ conn_options->ctx = aws_tls_ctx_acquire(ctx); conn_options->timeout_ms = AWS_DEFAULT_TLS_TIMEOUT_MS; } int aws_tls_connection_options_copy( struct aws_tls_connection_options *to, const struct aws_tls_connection_options *from) { /* clean up the options before copy. */ aws_tls_connection_options_clean_up(to); /* copy everything copyable over, then override the rest with deep copies. */ *to = *from; to->ctx = aws_tls_ctx_acquire(from->ctx); if (from->alpn_list) { to->alpn_list = aws_string_new_from_string(from->alpn_list->allocator, from->alpn_list); if (!to->alpn_list) { return AWS_OP_ERR; } } if (from->server_name) { to->server_name = aws_string_new_from_string(from->server_name->allocator, from->server_name); if (!to->server_name) { aws_string_destroy(to->server_name); return AWS_OP_ERR; } } return AWS_OP_SUCCESS; } void aws_tls_connection_options_clean_up(struct aws_tls_connection_options *connection_options) { aws_tls_ctx_release(connection_options->ctx); if (connection_options->alpn_list) { aws_string_destroy(connection_options->alpn_list); } if (connection_options->server_name) { aws_string_destroy(connection_options->server_name); } AWS_ZERO_STRUCT(*connection_options); } void aws_tls_connection_options_set_callbacks( struct aws_tls_connection_options *conn_options, aws_tls_on_negotiation_result_fn *on_negotiation_result, aws_tls_on_data_read_fn *on_data_read, aws_tls_on_error_fn *on_error, void *user_data) { conn_options->on_negotiation_result = on_negotiation_result; conn_options->on_data_read = on_data_read; conn_options->on_error = on_error; conn_options->user_data = user_data; } int aws_tls_connection_options_set_server_name( struct aws_tls_connection_options *conn_options, struct aws_allocator *allocator, const struct aws_byte_cursor *server_name) { if (conn_options->server_name != NULL) { aws_string_destroy(conn_options->server_name); conn_options->server_name = NULL; } conn_options->server_name = aws_string_new_from_cursor(allocator, server_name); if (!conn_options->server_name) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } int aws_tls_connection_options_set_alpn_list( struct aws_tls_connection_options *conn_options, struct aws_allocator *allocator, const char *alpn_list) { if (conn_options->alpn_list != NULL) { aws_string_destroy(conn_options->alpn_list); conn_options->alpn_list = NULL; } conn_options->alpn_list = aws_string_new_from_c_str(allocator, alpn_list); if (!conn_options->alpn_list) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } #ifdef BYO_CRYPTO struct aws_tls_ctx *aws_tls_server_ctx_new(struct aws_allocator *alloc, const struct aws_tls_ctx_options *options) { (void)alloc; (void)options; AWS_FATAL_ASSERT( false && "When using BYO_CRYPTO, user is responsible for creating aws_tls_ctx manually. You cannot call this function."); } struct aws_tls_ctx *aws_tls_client_ctx_new(struct aws_allocator *alloc, const struct aws_tls_ctx_options *options) { (void)alloc; (void)options; AWS_FATAL_ASSERT( false && "When using BYO_CRYPTO, user is responsible for creating aws_tls_ctx manually. You cannot call this function."); } static aws_tls_handler_new_fn *s_client_handler_new = NULL; static aws_tls_client_handler_start_negotiation_fn *s_start_negotiation_fn = NULL; static void *s_client_user_data = NULL; static aws_tls_handler_new_fn *s_server_handler_new = NULL; static void *s_server_user_data = NULL; struct aws_channel_handler *aws_tls_client_handler_new( struct aws_allocator *allocator, struct aws_tls_connection_options *options, struct aws_channel_slot *slot) { AWS_FATAL_ASSERT( s_client_handler_new && "For BYO_CRYPTO, you must call aws_tls_client_handler_new_set_callback() with a non-null value."); return s_client_handler_new(allocator, options, slot, s_client_user_data); } struct aws_channel_handler *aws_tls_server_handler_new( struct aws_allocator *allocator, struct aws_tls_connection_options *options, struct aws_channel_slot *slot) { AWS_FATAL_ASSERT( s_client_handler_new && "For BYO_CRYPTO, you must call aws_tls_server_handler_new_set_callback() with a non-null value."); return s_server_handler_new(allocator, options, slot, s_server_user_data); } void aws_tls_byo_crypto_set_client_setup_options(const struct aws_tls_byo_crypto_setup_options *options) { AWS_FATAL_ASSERT(options); AWS_FATAL_ASSERT(options->new_handler_fn); AWS_FATAL_ASSERT(options->start_negotiation_fn); s_client_handler_new = options->new_handler_fn; s_start_negotiation_fn = options->start_negotiation_fn; s_client_user_data = options->user_data; } void aws_tls_byo_crypto_set_server_setup_options(const struct aws_tls_byo_crypto_setup_options *options) { AWS_FATAL_ASSERT(options); AWS_FATAL_ASSERT(options->new_handler_fn); s_server_handler_new = options->new_handler_fn; s_server_user_data = options->user_data; } int aws_tls_client_handler_start_negotiation(struct aws_channel_handler *handler) { AWS_FATAL_ASSERT( s_start_negotiation_fn && "For BYO_CRYPTO, you must call aws_tls_client_handler_set_start_negotiation_callback() with a non-null value."); return s_start_negotiation_fn(handler, s_client_user_data); } void aws_tls_init_static_state(struct aws_allocator *alloc) { (void)alloc; } void aws_tls_clean_up_static_state(void) {} #endif /* BYO_CRYPTO */ int aws_channel_setup_client_tls( struct aws_channel_slot *right_of_slot, struct aws_tls_connection_options *tls_options) { AWS_FATAL_ASSERT(right_of_slot != NULL); struct aws_channel *channel = right_of_slot->channel; struct aws_allocator *allocator = right_of_slot->alloc; struct aws_channel_slot *tls_slot = aws_channel_slot_new(channel); /* as far as cleanup goes, since this stuff is being added to a channel, the caller will free this memory when they clean up the channel. */ if (!tls_slot) { return AWS_OP_ERR; } struct aws_channel_handler *tls_handler = aws_tls_client_handler_new(allocator, tls_options, tls_slot); if (!tls_handler) { aws_mem_release(allocator, tls_slot); return AWS_OP_ERR; } /* * From here on out, channel shutdown will handle slot/handler cleanup */ aws_channel_slot_insert_right(right_of_slot, tls_slot); AWS_LOGF_TRACE( AWS_LS_IO_CHANNEL, "id=%p: Setting up client TLS with handler %p on slot %p", (void *)channel, (void *)tls_handler, (void *)tls_slot); if (aws_channel_slot_set_handler(tls_slot, tls_handler) != AWS_OP_SUCCESS) { return AWS_OP_ERR; } if (aws_tls_client_handler_start_negotiation(tls_handler) != AWS_OP_SUCCESS) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } struct aws_tls_ctx *aws_tls_ctx_acquire(struct aws_tls_ctx *ctx) { if (ctx != NULL) { aws_ref_count_acquire(&ctx->ref_count); } return ctx; } void aws_tls_ctx_release(struct aws_tls_ctx *ctx) { if (ctx != NULL) { aws_ref_count_release(&ctx->ref_count); } } const char *aws_tls_hash_algorithm_str(enum aws_tls_hash_algorithm hash) { /* clang-format off */ switch (hash) { case (AWS_TLS_HASH_SHA1): return "SHA1"; case (AWS_TLS_HASH_SHA224): return "SHA224"; case (AWS_TLS_HASH_SHA256): return "SHA256"; case (AWS_TLS_HASH_SHA384): return "SHA384"; case (AWS_TLS_HASH_SHA512): return "SHA512"; default: return ""; } /* clang-format on */ } const char *aws_tls_signature_algorithm_str(enum aws_tls_signature_algorithm signature) { /* clang-format off */ switch (signature) { case (AWS_TLS_SIGNATURE_RSA): return "RSA"; case (AWS_TLS_SIGNATURE_ECDSA): return "ECDSA"; default: return ""; } /* clang-format on */ } const char *aws_tls_key_operation_type_str(enum aws_tls_key_operation_type operation_type) { /* clang-format off */ switch (operation_type) { case (AWS_TLS_KEY_OPERATION_SIGN): return "SIGN"; case (AWS_TLS_KEY_OPERATION_DECRYPT): return "DECRYPT"; default: return ""; } /* clang-format on */ } #if !USE_S2N void aws_tls_key_operation_complete(struct aws_tls_key_operation *operation, struct aws_byte_cursor output) { (void)operation; (void)output; } void aws_tls_key_operation_complete_with_error(struct aws_tls_key_operation *operation, int error_code) { (void)operation; (void)error_code; } struct aws_byte_cursor aws_tls_key_operation_get_input(const struct aws_tls_key_operation *operation) { (void)operation; return aws_byte_cursor_from_array(NULL, 0); } enum aws_tls_key_operation_type aws_tls_key_operation_get_type(const struct aws_tls_key_operation *operation) { (void)operation; return AWS_TLS_KEY_OPERATION_UNKNOWN; } enum aws_tls_signature_algorithm aws_tls_key_operation_get_signature_algorithm( const struct aws_tls_key_operation *operation) { (void)operation; return AWS_TLS_SIGNATURE_UNKNOWN; } enum aws_tls_hash_algorithm aws_tls_key_operation_get_digest_algorithm(const struct aws_tls_key_operation *operation) { (void)operation; return AWS_TLS_HASH_UNKNOWN; } #endif struct aws_custom_key_op_handler *aws_custom_key_op_handler_acquire(struct aws_custom_key_op_handler *key_op_handler) { if (key_op_handler != NULL) { aws_ref_count_acquire(&key_op_handler->ref_count); } return key_op_handler; } struct aws_custom_key_op_handler *aws_custom_key_op_handler_release(struct aws_custom_key_op_handler *key_op_handler) { if (key_op_handler != NULL) { aws_ref_count_release(&key_op_handler->ref_count); } return NULL; } void aws_custom_key_op_handler_perform_operation( struct aws_custom_key_op_handler *key_op_handler, struct aws_tls_key_operation *operation) { key_op_handler->vtable->on_key_operation(key_op_handler, operation); } aws-crt-python-0.20.4+dfsg/crt/aws-c-io/source/tls_channel_handler_shared.c000066400000000000000000000053631456575232400266270ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include static void s_tls_timeout_task_fn(struct aws_channel_task *channel_task, void *arg, enum aws_task_status status) { (void)channel_task; if (status != AWS_TASK_STATUS_RUN_READY) { return; } struct aws_tls_channel_handler_shared *tls_handler_shared = arg; if (tls_handler_shared->stats.handshake_status != AWS_TLS_NEGOTIATION_STATUS_ONGOING) { return; } struct aws_channel *channel = tls_handler_shared->handler->slot->channel; aws_channel_shutdown(channel, AWS_IO_TLS_NEGOTIATION_TIMEOUT); } void aws_tls_channel_handler_shared_init( struct aws_tls_channel_handler_shared *tls_handler_shared, struct aws_channel_handler *handler, struct aws_tls_connection_options *options) { tls_handler_shared->handler = handler; tls_handler_shared->tls_timeout_ms = options->timeout_ms; aws_crt_statistics_tls_init(&tls_handler_shared->stats); aws_channel_task_init(&tls_handler_shared->timeout_task, s_tls_timeout_task_fn, tls_handler_shared, "tls_timeout"); } void aws_tls_channel_handler_shared_clean_up(struct aws_tls_channel_handler_shared *tls_handler_shared) { (void)tls_handler_shared; } void aws_on_drive_tls_negotiation(struct aws_tls_channel_handler_shared *tls_handler_shared) { if (tls_handler_shared->stats.handshake_status == AWS_TLS_NEGOTIATION_STATUS_NONE) { tls_handler_shared->stats.handshake_status = AWS_TLS_NEGOTIATION_STATUS_ONGOING; uint64_t now = 0; aws_channel_current_clock_time(tls_handler_shared->handler->slot->channel, &now); tls_handler_shared->stats.handshake_start_ns = now; if (tls_handler_shared->tls_timeout_ms > 0) { uint64_t timeout_ns = now + aws_timestamp_convert( tls_handler_shared->tls_timeout_ms, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL); aws_channel_schedule_task_future( tls_handler_shared->handler->slot->channel, &tls_handler_shared->timeout_task, timeout_ns); } } } void aws_on_tls_negotiation_completed(struct aws_tls_channel_handler_shared *tls_handler_shared, int error_code) { tls_handler_shared->stats.handshake_status = (error_code == AWS_ERROR_SUCCESS) ? AWS_TLS_NEGOTIATION_STATUS_SUCCESS : AWS_TLS_NEGOTIATION_STATUS_FAILURE; aws_channel_current_clock_time( tls_handler_shared->handler->slot->channel, &tls_handler_shared->stats.handshake_end_ns); } bool aws_tls_options_buf_is_set(const struct aws_byte_buf *buf) { return buf->allocator != NULL; } aws-crt-python-0.20.4+dfsg/crt/aws-c-io/source/tracing.c000066400000000000000000000014431456575232400227340ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include __itt_domain *io_tracing_domain; __itt_string_handle *tracing_input_stream_read; __itt_string_handle *tracing_event_loop_run_tasks; __itt_string_handle *tracing_event_loop_event; __itt_string_handle *tracing_event_loop_events; void aws_io_tracing_init(void) { io_tracing_domain = __itt_domain_create("aws.c.io"); tracing_input_stream_read = __itt_string_handle_create("Read:InputStream"); tracing_event_loop_run_tasks = __itt_string_handle_create("RunTasks:EventLoop"); tracing_event_loop_event = __itt_string_handle_create("IOEvent:EventLoop"); tracing_event_loop_events = __itt_string_handle_create("IOEvents:EventLoop"); } aws-crt-python-0.20.4+dfsg/crt/aws-c-io/source/windows/000077500000000000000000000000001456575232400226315ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-io/source/windows/host_resolver.c000066400000000000000000000066321456575232400257020ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ /* don't move this below the Windows.h include!!!!*/ #include #include #include #include #include #include int aws_default_dns_resolve( struct aws_allocator *allocator, const struct aws_string *host_name, struct aws_array_list *output_addresses, void *user_data) { (void)user_data; ADDRINFOA *result = NULL; const char *hostname_cstr = aws_string_c_str(host_name); aws_check_and_init_winsock(); ADDRINFOA hints; AWS_ZERO_STRUCT(hints); hints.ai_family = AF_UNSPEC; hints.ai_socktype = SOCK_STREAM; hints.ai_flags = AI_ALL | AI_V4MAPPED; AWS_LOGF_DEBUG(AWS_LS_IO_DNS, "static: resolving host %s", hostname_cstr); int res_error = GetAddrInfoA(hostname_cstr, NULL, &hints, &result); if (res_error) { AWS_LOGF_ERROR(AWS_LS_IO_DNS, "static: getaddrinfo failed with error_code %d", res_error); goto clean_up; } /* max string length for ipv6. */ char address_buffer[INET6_ADDRSTRLEN]; socklen_t max_ip_addrlen = INET6_ADDRSTRLEN; for (ADDRINFOA *iter = result; iter != NULL; iter = iter->ai_next) { struct aws_host_address host_address; AWS_ZERO_ARRAY(address_buffer); host_address.allocator = allocator; if (iter->ai_family == AF_INET6) { host_address.record_type = AWS_ADDRESS_RECORD_TYPE_AAAA; InetNtopA( iter->ai_family, &((struct sockaddr_in6 *)iter->ai_addr)->sin6_addr, address_buffer, max_ip_addrlen); } else { host_address.record_type = AWS_ADDRESS_RECORD_TYPE_A; InetNtopA( iter->ai_family, &((struct sockaddr_in *)iter->ai_addr)->sin_addr, address_buffer, max_ip_addrlen); } AWS_LOGF_DEBUG(AWS_LS_IO_DNS, "static: resolved record: %s", address_buffer); const struct aws_string *address = aws_string_new_from_array(allocator, (const uint8_t *)address_buffer, strlen(address_buffer)); if (!address) { goto clean_up; } host_address.host = aws_string_new_from_string(allocator, host_name); if (!host_address.host) { aws_string_destroy((void *)host_address.host); goto clean_up; } host_address.address = address; host_address.weight = 0; host_address.use_count = 0; host_address.connection_failure_count = 0; if (aws_array_list_push_back(output_addresses, &host_address)) { aws_host_address_clean_up(&host_address); goto clean_up; } } FreeAddrInfoA(result); return AWS_OP_SUCCESS; clean_up: if (result) { FreeAddrInfoA(result); } if (res_error) { switch (res_error) { case WSATRY_AGAIN: case WSANO_DATA: case WSANO_RECOVERY: return aws_raise_error(AWS_IO_DNS_QUERY_FAILED); case WSA_NOT_ENOUGH_MEMORY: return aws_raise_error(AWS_ERROR_OOM); case WSAHOST_NOT_FOUND: case WSATYPE_NOT_FOUND: return aws_raise_error(AWS_IO_DNS_INVALID_NAME); default: return aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); } } return AWS_OP_ERR; } aws-crt-python-0.20.4+dfsg/crt/aws-c-io/source/windows/iocp/000077500000000000000000000000001456575232400235635ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-io/source/windows/iocp/iocp_event_loop.c000066400000000000000000000731731456575232400271260ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include /* The next set of struct definitions are taken directly from the windows documentation. We can't include the header files directly due to winsock. Also some of the definitions here aren't in the public API but it's the only way to do the thing we need to do. So we just declare it here and use dynamic binding to do the voodoo magic. */ struct FILE_BASIC_INFORMATION { LARGE_INTEGER CreationTime; LARGE_INTEGER LastAccessTime; LARGE_INTEGER LastWriteTime; LARGE_INTEGER ChangeTime; DWORD FileAttributes; }; struct FILE_COMPLETION_INFORMATION { HANDLE Port; PVOID Key; }; struct IO_STATUS_BLOCK { union { NTSTATUS Status; PVOID Pointer; } status_block; ULONG_PTR Information; }; enum FILE_INFORMATION_CLASS { FileReplaceCompletionInformation = 0x3D, }; typedef NTSTATUS(NTAPI NTSetInformationFile)( HANDLE file_handle, struct IO_STATUS_BLOCK *io_status_block, void *file_information, ULONG length, enum FILE_INFORMATION_CLASS file_information_class); NTSetInformationFile *s_set_info_fn = NULL; /* END of windows hackery here. */ typedef enum event_thread_state { EVENT_THREAD_STATE_READY_TO_RUN, EVENT_THREAD_STATE_RUNNING, EVENT_THREAD_STATE_STOPPING, } event_thread_state; struct iocp_loop { HANDLE iocp_handle; struct aws_thread thread_created_on; aws_thread_id_t thread_joined_to; struct aws_atomic_var running_thread_id; /* synced_data holds things that must be communicated across threads. * When the event-thread is running, the mutex must be locked while anyone touches anything in synced_data. * If this data is modified outside the event-thread, the thread is signaled via activity on a pipe. */ struct { struct aws_mutex mutex; bool thread_signaled; /* whether thread has been signaled about changes to synced_data */ struct aws_linked_list tasks_to_schedule; event_thread_state state; } synced_data; /* thread_data holds things which, when the event-thread is running, may only be touched by the thread */ struct { struct aws_task_scheduler scheduler; /* These variables duplicate ones in synced_data. * We move values out while holding the mutex and operate on them later */ event_thread_state state; } thread_data; struct aws_thread_options thread_options; }; enum { DEFAULT_TIMEOUT_MS = 100000, /* Max I/O completion packets to process per loop of the event-thread */ MAX_COMPLETION_PACKETS_PER_LOOP = 100, }; static void s_destroy(struct aws_event_loop *event_loop); static int s_run(struct aws_event_loop *event_loop); static int s_stop(struct aws_event_loop *event_loop); static int s_wait_for_stop_completion(struct aws_event_loop *event_loop); static void s_schedule_task_now(struct aws_event_loop *event_loop, struct aws_task *task); static void s_schedule_task_future(struct aws_event_loop *event_loop, struct aws_task *task, uint64_t run_at_nanos); static void s_cancel_task(struct aws_event_loop *event_loop, struct aws_task *task); static int s_connect_to_io_completion_port(struct aws_event_loop *event_loop, struct aws_io_handle *handle); static bool s_is_event_thread(struct aws_event_loop *event_loop); static int s_unsubscribe_from_io_events(struct aws_event_loop *event_loop, struct aws_io_handle *handle); static void s_free_io_event_resources(void *user_data); static void aws_event_loop_thread(void *user_data); void aws_overlapped_init( struct aws_overlapped *overlapped, aws_event_loop_on_completion_fn *on_completion, void *user_data) { AWS_ASSERT(overlapped); AWS_ZERO_STRUCT(overlapped->overlapped); overlapped->on_completion = on_completion; overlapped->user_data = user_data; } void aws_overlapped_reset(struct aws_overlapped *overlapped) { AWS_ASSERT(overlapped); AWS_ZERO_STRUCT(overlapped->overlapped); } struct _OVERLAPPED *aws_overlapped_to_windows_overlapped(struct aws_overlapped *overlapped) { return (struct _OVERLAPPED *)&overlapped->overlapped; } struct aws_event_loop_vtable s_iocp_vtable = { .destroy = s_destroy, .run = s_run, .stop = s_stop, .wait_for_stop_completion = s_wait_for_stop_completion, .schedule_task_now = s_schedule_task_now, .schedule_task_future = s_schedule_task_future, .cancel_task = s_cancel_task, .connect_to_io_completion_port = s_connect_to_io_completion_port, .is_on_callers_thread = s_is_event_thread, .unsubscribe_from_io_events = s_unsubscribe_from_io_events, .free_io_event_resources = s_free_io_event_resources, }; struct aws_event_loop *aws_event_loop_new_default_with_options( struct aws_allocator *alloc, const struct aws_event_loop_options *options) { AWS_ASSERT(alloc); AWS_ASSERT(options); AWS_ASSERT(options->clock); if (!s_set_info_fn) { HMODULE ntdll = GetModuleHandleA("ntdll.dll"); if (!ntdll) { AWS_LOGF_FATAL(AWS_LS_IO_EVENT_LOOP, "static: failed to load ntdll.dll"); AWS_ASSERT(0); exit(-1); } s_set_info_fn = (NTSetInformationFile *)GetProcAddress(ntdll, "NtSetInformationFile"); if (!s_set_info_fn) { AWS_LOGF_FATAL(AWS_LS_IO_EVENT_LOOP, "static: failed to load NtSetInformationFile()"); AWS_ASSERT(0); exit(-1); } } int err = 0; struct aws_event_loop *event_loop = NULL; bool clean_up_event_loop_base = false; struct iocp_loop *impl = NULL; bool clean_up_iocp_handle = false; bool clean_up_thread = false; bool clean_up_mutex = false; bool clean_up_scheduler = false; event_loop = aws_mem_acquire(alloc, sizeof(struct aws_event_loop)); if (!event_loop) { return NULL; } AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: Initializing IO Completion Port", (void *)event_loop); err = aws_event_loop_init_base(event_loop, alloc, options->clock); if (err) { goto clean_up; } clean_up_event_loop_base = true; impl = aws_mem_calloc(alloc, 1, sizeof(struct iocp_loop)); if (!impl) { goto clean_up; } if (options->thread_options) { impl->thread_options = *options->thread_options; } else { impl->thread_options = *aws_default_thread_options(); } /* initialize thread id to NULL. This will be updated once the event loop thread starts. */ aws_atomic_init_ptr(&impl->running_thread_id, NULL); impl->iocp_handle = CreateIoCompletionPort( INVALID_HANDLE_VALUE, /* FileHandle: passing invalid handle creates a new IOCP */ NULL, /* ExistingCompletionPort: should be NULL when file handle is invalid. */ 0, /* CompletionKey: should be 0 when file handle is invalid */ 1); /* NumberOfConcurrentThreads */ if (impl->iocp_handle == NULL) { AWS_LOGF_FATAL( AWS_LS_IO_EVENT_LOOP, "id=%p: CreateIOCompletionPort failed with error %d", (void *)event_loop, (int)GetLastError()); aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); goto clean_up; } clean_up_iocp_handle = true; err = aws_thread_init(&impl->thread_created_on, alloc); if (err) { goto clean_up; } clean_up_thread = true; err = aws_mutex_init(&impl->synced_data.mutex); if (err) { goto clean_up; } clean_up_mutex = true; aws_linked_list_init(&impl->synced_data.tasks_to_schedule); err = aws_task_scheduler_init(&impl->thread_data.scheduler, alloc); if (err) { goto clean_up; } clean_up_scheduler = true; event_loop->impl_data = impl; event_loop->vtable = &s_iocp_vtable; return event_loop; clean_up: if (clean_up_scheduler) { aws_task_scheduler_clean_up(&impl->thread_data.scheduler); } if (clean_up_mutex) { aws_mutex_clean_up(&impl->synced_data.mutex); } if (clean_up_thread) { aws_thread_clean_up(&impl->thread_created_on); } if (clean_up_iocp_handle) { CloseHandle(impl->iocp_handle); } if (impl) { aws_mem_release(alloc, impl); } if (clean_up_event_loop_base) { aws_event_loop_clean_up_base(event_loop); } if (event_loop) { aws_mem_release(alloc, event_loop); } return NULL; } /* Should not be called from event-thread */ static void s_destroy(struct aws_event_loop *event_loop) { AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: destroying event-loop", (void *)event_loop); struct iocp_loop *impl = event_loop->impl_data; AWS_ASSERT(impl); /* Stop the event-thread. This might have already happened. It's safe to call multiple times. */ aws_event_loop_stop(event_loop); int err = aws_event_loop_wait_for_stop_completion(event_loop); if (err) { AWS_LOGF_ERROR( AWS_LS_IO_EVENT_LOOP, "id=%p: Failed to destroy event-thread, resources have been leaked.", (void *)event_loop); AWS_ASSERT(0 && "Failed to destroy event-thread, resources have been leaked."); return; } /* setting this so that canceled tasks don't blow up when asking if they're on the event-loop thread. */ impl->thread_joined_to = aws_thread_current_thread_id(); aws_atomic_store_ptr(&impl->running_thread_id, &impl->thread_joined_to); /* Clean up task-related stuff first. * It's possible the a cancelled task adds further tasks to this event_loop, these new tasks would end up in * synced_data.tasks_to_schedule, so clean that up last */ aws_task_scheduler_clean_up(&impl->thread_data.scheduler); /* cancels remaining tasks in scheduler */ while (!aws_linked_list_empty(&impl->synced_data.tasks_to_schedule)) { struct aws_linked_list_node *node = aws_linked_list_pop_front(&impl->synced_data.tasks_to_schedule); struct aws_task *task = AWS_CONTAINER_OF(node, struct aws_task, node); task->fn(task, task->arg, AWS_TASK_STATUS_CANCELED); } /* Clean up everything else */ bool close_iocp_success = CloseHandle(impl->iocp_handle); AWS_ASSERT(close_iocp_success); (void)close_iocp_success; aws_mutex_clean_up(&impl->synced_data.mutex); aws_thread_clean_up(&impl->thread_created_on); aws_mem_release(event_loop->alloc, impl); aws_event_loop_clean_up_base(event_loop); aws_mem_release(event_loop->alloc, event_loop); } /* Called from any thread. /* Signal to the event-loop thread that synced_data has changed. * This should only be called after changing synced_data.thread_signaled from false to true. */ static void s_signal_synced_data_changed(struct aws_event_loop *event_loop) { struct iocp_loop *impl = event_loop->impl_data; AWS_ASSERT(impl); AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: notified of cross-thread tasks to schedule", (void *)event_loop); /* Enqueue a special completion packet to inform the event-loop that synced_data has changed. * We identify the special packet by using the iocp handle as the completion key. * This wakes the event-loop thread if it was idle. */ ULONG_PTR completion_key = (ULONG_PTR)impl->iocp_handle; PostQueuedCompletionStatus( impl->iocp_handle, /* CompletionPort */ 0, /* dwNumberOfBytesTransferred */ completion_key, /* dwCompletionKey */ NULL); /* lpOverlapped */ } static int s_run(struct aws_event_loop *event_loop) { struct iocp_loop *impl = event_loop->impl_data; /* Since thread isn't running it's ok to touch thread_data, * and it's ok to touch synced_data without locking the mutex */ /* If asserts hit, you must call stop() and wait_for_stop_completion() before calling run() again */ AWS_ASSERT(impl->thread_data.state == EVENT_THREAD_STATE_READY_TO_RUN); impl->synced_data.state = EVENT_THREAD_STATE_RUNNING; AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: Starting event-loop thread.", (void *)event_loop); aws_thread_increment_unjoined_count(); int err = aws_thread_launch(&impl->thread_created_on, aws_event_loop_thread, event_loop, &impl->thread_options); if (err) { aws_thread_decrement_unjoined_count(); AWS_LOGF_FATAL(AWS_LS_IO_EVENT_LOOP, "id=%p: thread creation failed.", (void *)event_loop); goto clean_up; } return AWS_OP_SUCCESS; clean_up: impl->synced_data.state = EVENT_THREAD_STATE_READY_TO_RUN; return AWS_OP_ERR; } /* Called from any thread */ static int s_stop(struct aws_event_loop *event_loop) { struct iocp_loop *impl = event_loop->impl_data; AWS_ASSERT(impl); bool signal_thread = false; AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: Stopping event-loop thread.", (void *)event_loop); { /* Begin critical section */ aws_mutex_lock(&impl->synced_data.mutex); if (impl->synced_data.state == EVENT_THREAD_STATE_RUNNING) { impl->synced_data.state = EVENT_THREAD_STATE_STOPPING; signal_thread = !impl->synced_data.thread_signaled; impl->synced_data.thread_signaled = true; } aws_mutex_unlock(&impl->synced_data.mutex); } /* End critical section */ if (signal_thread) { s_signal_synced_data_changed(event_loop); } return AWS_OP_SUCCESS; } /* Should not be called from event-thread */ static int s_wait_for_stop_completion(struct aws_event_loop *event_loop) { struct iocp_loop *impl = event_loop->impl_data; AWS_ASSERT(impl); #ifdef DEBUG_BUILD aws_mutex_lock(&impl->synced_data.mutex); /* call stop() before wait_for_stop_completion() or you'll wait forever */ AWS_ASSERT(impl->synced_data.state != EVENT_THREAD_STATE_RUNNING); aws_mutex_unlock(&impl->synced_data.mutex); #endif int err = aws_thread_join(&impl->thread_created_on); aws_thread_decrement_unjoined_count(); if (err) { return AWS_OP_ERR; } /* Since thread is no longer running it's ok to touch thread_data, * and it's ok to touch synced_data without locking the mutex */ impl->synced_data.state = EVENT_THREAD_STATE_READY_TO_RUN; impl->thread_data.state = EVENT_THREAD_STATE_READY_TO_RUN; return AWS_OP_SUCCESS; } /* Common function used by schedule_task_now() and schedule_task_future(). * When run_at_nanos is 0, it's treated as a "now" task. * Called from any thread */ static void s_schedule_task_common(struct aws_event_loop *event_loop, struct aws_task *task, uint64_t run_at_nanos) { struct iocp_loop *impl = event_loop->impl_data; AWS_ASSERT(impl); AWS_ASSERT(task); /* If we're on the event-thread, just schedule it directly */ if (s_is_event_thread(event_loop)) { AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p: scheduling task %p in-thread for timestamp %llu", (void *)event_loop, (void *)task, (unsigned long long)run_at_nanos); if (run_at_nanos == 0) { aws_task_scheduler_schedule_now(&impl->thread_data.scheduler, task); } else { aws_task_scheduler_schedule_future(&impl->thread_data.scheduler, task, run_at_nanos); } return; } AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p: Scheduling task %p cross-thread for timestamp %llu", (void *)event_loop, (void *)task, (unsigned long long)run_at_nanos); /* Otherwise, add it to synced_data.tasks_to_schedule and signal the event-thread to process it */ task->timestamp = run_at_nanos; bool should_signal_thread = false; { /* Begin critical section */ aws_mutex_lock(&impl->synced_data.mutex); aws_linked_list_push_back(&impl->synced_data.tasks_to_schedule, &task->node); /* Signal thread that synced_data has changed (unless it's been signaled already) */ if (!impl->synced_data.thread_signaled) { AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: Waking up event-loop thread", (void *)event_loop); should_signal_thread = true; impl->synced_data.thread_signaled = true; } aws_mutex_unlock(&impl->synced_data.mutex); } /* End critical section */ if (should_signal_thread) { s_signal_synced_data_changed(event_loop); } } /* Called from any thread */ static void s_schedule_task_now(struct aws_event_loop *event_loop, struct aws_task *task) { s_schedule_task_common(event_loop, task, 0 /* use zero to denote it's a "now" task */); } /* Called from any thread */ static void s_schedule_task_future(struct aws_event_loop *event_loop, struct aws_task *task, uint64_t run_at_nanos) { s_schedule_task_common(event_loop, task, run_at_nanos); } static void s_cancel_task(struct aws_event_loop *event_loop, struct aws_task *task) { AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: cancelling task %p", (void *)event_loop, (void *)task); struct iocp_loop *iocp_loop = event_loop->impl_data; aws_task_scheduler_cancel_task(&iocp_loop->thread_data.scheduler, task); } /* Called from any thread */ static bool s_is_event_thread(struct aws_event_loop *event_loop) { struct iocp_loop *impl = event_loop->impl_data; AWS_ASSERT(impl); aws_thread_id_t *el_thread_id = aws_atomic_load_ptr(&impl->running_thread_id); return el_thread_id && aws_thread_thread_id_equal(*el_thread_id, aws_thread_current_thread_id()); } /* Called from any thread */ static int s_connect_to_io_completion_port(struct aws_event_loop *event_loop, struct aws_io_handle *handle) { struct iocp_loop *impl = event_loop->impl_data; AWS_ASSERT(impl); AWS_ASSERT(handle); AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p: subscribing to events on handle %p", (void *)event_loop, (void *)handle->data.handle); const HANDLE iocp_handle = CreateIoCompletionPort( handle->data.handle, /* FileHandle */ impl->iocp_handle, /* ExistingCompletionPort */ 0, /* CompletionKey */ 1); /* NumberOfConcurrentThreads */ /* iocp_handle should be the event loop's handle if this succeeded */ bool iocp_associated = iocp_handle == impl->iocp_handle; /* clang-format off */ #if defined(AWS_SUPPORT_WIN7) /* * When associating named pipes, it is possible to open the same pipe in the same * process for read and write, causing multiple attempts to associate. This will * return ERROR_INVALID_PARAMETER from GetLastError on the second association on Win7, * but the prior association will continue. Detecting this before attempting to * associate requires the DDK API. */ const bool already_associated = GetLastError() == ERROR_INVALID_PARAMETER && /* Both handles should be valid prior to the above call. If they are, * and we got ERROR_INVALID_PARAMETER, the file handle already has an IOCP association */ handle->data.handle != INVALID_HANDLE_VALUE && impl->iocp_handle != INVALID_HANDLE_VALUE; iocp_associated |= already_associated; #endif /* clang-format on */ if (!iocp_associated) { AWS_LOGF_ERROR( AWS_LS_IO_EVENT_LOOP, "id=%p: CreateIoCompletionPort() failed with error %d", (void *)event_loop, (int)GetLastError()); return aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); } /* As an optimization, tell Windows not to bother signaling the handle when async I/O completes. * We're using I/O completion ports, we don't need further mechanisms to know when I/O completes. */ SetFileCompletionNotificationModes(handle->data.handle, FILE_SKIP_SET_EVENT_ON_HANDLE); /* iocp_event_loop has no need to store additional data per aws_io_handle */ handle->additional_data = NULL; return AWS_OP_SUCCESS; } /* Called from event-thread. * Takes tasks from tasks_to_schedule and adds them to the scheduler. */ static void s_process_tasks_to_schedule(struct aws_event_loop *event_loop, struct aws_linked_list *tasks_to_schedule) { struct iocp_loop *impl = event_loop->impl_data; AWS_ASSERT(impl); while (!aws_linked_list_empty(tasks_to_schedule)) { struct aws_linked_list_node *node = aws_linked_list_pop_front(tasks_to_schedule); struct aws_task *task = AWS_CONTAINER_OF(node, struct aws_task, node); /* We use timestamp of 0 to denote that it's a "now" task */ if (task->timestamp == 0) { aws_task_scheduler_schedule_now(&impl->thread_data.scheduler, task); } else { aws_task_scheduler_schedule_future(&impl->thread_data.scheduler, task, task->timestamp); } } } /* Runs on the event-thread. */ static void s_process_synced_data(struct aws_event_loop *event_loop) { struct iocp_loop *impl = event_loop->impl_data; /* If there are tasks to schedule, grab them all out of synced_data.tasks_to_schedule. * We'll process them later, so that we minimize time spent holding the mutex. */ struct aws_linked_list tasks_to_schedule; aws_linked_list_init(&tasks_to_schedule); { /* Begin critical section */ aws_mutex_lock(&impl->synced_data.mutex); impl->synced_data.thread_signaled = false; bool initiate_stop = (impl->synced_data.state == EVENT_THREAD_STATE_STOPPING) && (impl->thread_data.state == EVENT_THREAD_STATE_RUNNING); if (AWS_UNLIKELY(initiate_stop)) { impl->thread_data.state = EVENT_THREAD_STATE_STOPPING; } aws_linked_list_swap_contents(&impl->synced_data.tasks_to_schedule, &tasks_to_schedule); aws_mutex_unlock(&impl->synced_data.mutex); } /* End critical section */ AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: notified of cross-thread tasks to schedule", (void *)event_loop); s_process_tasks_to_schedule(event_loop, &tasks_to_schedule); } static int s_unsubscribe_from_io_events(struct aws_event_loop *event_loop, struct aws_io_handle *handle) { (void)event_loop; AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p: un-subscribing from events on handle %p", (void *)event_loop, (void *)handle->data.handle); struct FILE_COMPLETION_INFORMATION file_completion_info; file_completion_info.Key = NULL; file_completion_info.Port = NULL; struct IO_STATUS_BLOCK status_block; AWS_ZERO_STRUCT(status_block); NTSTATUS status = s_set_info_fn( handle->data.handle, &status_block, &file_completion_info, sizeof(file_completion_info), FileReplaceCompletionInformation); if (!status) { return AWS_OP_SUCCESS; } AWS_LOGF_ERROR( AWS_LS_IO_EVENT_LOOP, "id=%p: failed to un-subscribe from events on handle %p", (void *)event_loop, (void *)handle->data.handle); return aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); } static void s_free_io_event_resources(void *user_data) { /* iocp has no additional data stored to handle I/O events */ (void)user_data; } /** * This just calls GetQueuedCompletionStatusEx() * * We broke this out into its own function so that the stacktrace clearly shows * what this thread is doing. We've had a lot of cases where users think this * thread is deadlocked because it's stuck here. We want it to be clear * that it's doing nothing on purpose. It's waiting for events to happen... */ AWS_NO_INLINE static bool aws_event_loop_listen_for_io_events( HANDLE iocp_handle, OVERLAPPED_ENTRY completion_packets[MAX_COMPLETION_PACKETS_PER_LOOP], ULONG *num_entries, DWORD timeout_ms) { return GetQueuedCompletionStatusEx( iocp_handle, /* Completion port */ completion_packets, /* Out: completion port entries */ MAX_COMPLETION_PACKETS_PER_LOOP, /* max number of entries to remove */ num_entries, /* Out: number of entries removed */ timeout_ms, /* Timeout in ms. If timeout reached then FALSE is returned. */ false); /* Alertable */ } /* Called from event-thread */ static void aws_event_loop_thread(void *user_data) { struct aws_event_loop *event_loop = user_data; AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: main loop started", (void *)event_loop); struct iocp_loop *impl = event_loop->impl_data; /* Set thread id to event loop thread id. */ aws_atomic_store_ptr(&impl->running_thread_id, &impl->thread_created_on.thread_id); AWS_ASSERT(impl->thread_data.state == EVENT_THREAD_STATE_READY_TO_RUN); impl->thread_data.state = EVENT_THREAD_STATE_RUNNING; DWORD timeout_ms = DEFAULT_TIMEOUT_MS; OVERLAPPED_ENTRY completion_packets[MAX_COMPLETION_PACKETS_PER_LOOP]; AWS_ZERO_ARRAY(completion_packets); AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: default timeout %d", (void *)event_loop, (int)timeout_ms); while (impl->thread_data.state == EVENT_THREAD_STATE_RUNNING) { ULONG num_entries = 0; bool should_process_synced_data = false; AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: waiting for a maximum of %d ms", (void *)event_loop, timeout_ms); bool has_completion_entries = aws_event_loop_listen_for_io_events( impl->iocp_handle, /* Completion port */ completion_packets, /* Out: completion port entries */ &num_entries, /* Out: number of entries removed */ timeout_ms); /* Timeout in ms. If timeout reached then FALSE is returned. */ aws_event_loop_register_tick_start(event_loop); if (has_completion_entries) { AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p: wake up with %lu events to process.", (void *)event_loop, (unsigned long)num_entries); for (ULONG i = 0; i < num_entries; ++i) { OVERLAPPED_ENTRY *completion = &completion_packets[i]; /* Is this a special completion packet which signals that synced_data has changed? * (We use iocp_handle's value as the completion key for these special packets) */ if (completion->lpCompletionKey == (ULONG_PTR)impl->iocp_handle) { should_process_synced_data = true; } else { /* Otherwise this was a normal completion on a connected aws_io_handle. * Get our hands on the aws_overlapped which owns this OVERLAPPED, * and invoke its callback */ struct aws_overlapped *overlapped = AWS_CONTAINER_OF(completion->lpOverlapped, struct aws_overlapped, overlapped); if (overlapped->on_completion) { AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: invoking handler.", (void *)event_loop); overlapped->on_completion( event_loop, overlapped, (int)overlapped->overlapped.Internal, /* Status code for the completed request */ completion->dwNumberOfBytesTransferred); } } } } else { /* If no completion entries were dequeued then the timeout must have triggered */ AWS_ASSERT(GetLastError() == WAIT_TIMEOUT); } /* Process synced_data */ if (should_process_synced_data) { s_process_synced_data(event_loop); } /* Run scheduled tasks */ uint64_t now_ns = 0; event_loop->clock(&now_ns); /* If clock fails, now_ns will be 0 and tasks scheduled for a specific time will not be run. That's ok, we'll handle them next time around. */ AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: running scheduled tasks.", (void *)event_loop); aws_task_scheduler_run_all(&impl->thread_data.scheduler, now_ns); /* Set timeout for next GetQueuedCompletionStatus() call. * If clock fails, or scheduler has no tasks, use default timeout */ bool use_default_timeout = false; int err = event_loop->clock(&now_ns); if (err) { use_default_timeout = true; } uint64_t next_run_time_ns; if (!aws_task_scheduler_has_tasks(&impl->thread_data.scheduler, &next_run_time_ns)) { use_default_timeout = true; } if (use_default_timeout) { AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p: no more scheduled tasks using default timeout.", (void *)event_loop); timeout_ms = DEFAULT_TIMEOUT_MS; } else { /* Translate timestamp (in nanoseconds) to timeout (in milliseconds) */ uint64_t timeout_ns = (next_run_time_ns > now_ns) ? (next_run_time_ns - now_ns) : 0; uint64_t timeout_ms64 = aws_timestamp_convert(timeout_ns, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_MILLIS, NULL); timeout_ms = timeout_ms64 > MAXDWORD ? MAXDWORD : (DWORD)timeout_ms64; AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p: detected more scheduled tasks with the next occurring at " "%llu, using timeout of %d.", (void *)event_loop, (unsigned long long)next_run_time_ns, (int)timeout_ms); } aws_event_loop_register_tick_end(event_loop); } AWS_LOGF_DEBUG(AWS_LS_IO_EVENT_LOOP, "id=%p: exiting main loop", (void *)event_loop); /* set back to NULL. This should be updated again in destroy, right before task cancelation happens. */ aws_atomic_store_ptr(&impl->running_thread_id, NULL); } aws-crt-python-0.20.4+dfsg/crt/aws-c-io/source/windows/iocp/pipe.c000066400000000000000000000712641456575232400246760ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include enum read_end_state { /* Pipe is open. */ READ_END_STATE_OPEN, /* Pipe is open, user has subscribed, but async monitoring hasn't started yet. * Pipe moves to SUBCSCRIBED state if async monitoring starts successfully * or SUBSCRIBE_ERROR state if it doesn't start successfully. * From any of the SUBSCRIBE* states, the pipe moves to OPEN state if the user unsubscribes. */ READ_END_STATE_SUBSCRIBING, /* Pipe is open, user has subscribed, and user is receiving events delivered by async monitoring. * Async monitoring is paused once the file is known to be readable. * Async monitoring is resumed once the user reads all available bytes. * Pipe moves to SUBSCRIBE_ERROR state if async monitoring reports an error, or fails to restart. * Pipe move sto OPEN state if user unsubscribes. */ READ_END_STATE_SUBSCRIBED, /* Pipe is open, use has subscribed, and an error event has been delivered to the user. * No further error events are delivered to the user, and no more async monitoring occurs.*/ READ_END_STATE_SUBSCRIBE_ERROR, }; /* Reasons to launch async monitoring of the read-end's handle */ enum monitoring_reason { MONITORING_BECAUSE_SUBSCRIBING = 1, MONITORING_BECAUSE_WAITING_FOR_DATA = 2, MONITORING_BECAUSE_ERROR_SUSPECTED = 4, }; /* Async operations live in their own allocations. * This allows the pipe to be cleaned up without waiting for all outstanding operations to complete. */ struct async_operation { union { struct aws_overlapped overlapped; struct aws_task task; } op; struct aws_allocator *alloc; bool is_active; bool is_read_end_cleaned_up; }; struct read_end_impl { struct aws_allocator *alloc; enum read_end_state state; struct aws_io_handle handle; struct aws_event_loop *event_loop; /* Async overlapped operation for monitoring pipe status. * This operation is re-used each time monitoring resumes. * Note that rapidly subscribing/unsubscribing could lead to the monitoring operation from a previous subscribe * still pending while the user is re-subscribing. */ struct async_operation *async_monitoring; /* Async task operation used to deliver error reports. */ struct async_operation *async_error_report; aws_pipe_on_readable_fn *on_readable_user_callback; void *on_readable_user_data; /* Error code that the error-reporting task will report. */ int error_code_to_report; /* Reasons to restart monitoring once current async operation completes. * Contains read_end_monitoring_request_t flags.*/ uint8_t monitoring_request_reasons; }; enum write_end_state { WRITE_END_STATE_CLOSING, WRITE_END_STATE_OPEN, }; /* Data describing an async write request */ struct pipe_write_request { struct aws_byte_cursor original_cursor; aws_pipe_on_write_completed_fn *user_callback; void *user_data; struct aws_allocator *alloc; struct aws_overlapped overlapped; struct aws_linked_list_node list_node; bool is_write_end_cleaned_up; }; struct write_end_impl { struct aws_allocator *alloc; enum write_end_state state; struct aws_io_handle handle; struct aws_event_loop *event_loop; /* List of currently active pipe_write_requests */ struct aws_linked_list write_list; /* Future optimization idea: avoid an allocation on each write by keeping 1 pre-allocated pipe_write_request around * and re-using it whenever possible */ }; enum { PIPE_BUFFER_SIZE = 4096, PIPE_UNIQUE_NAME_MAX_TRIES = 10, }; static void s_read_end_on_zero_byte_read_completion( struct aws_event_loop *event_loop, struct aws_overlapped *overlapped, int status_code, size_t num_bytes_transferred); static void s_read_end_report_error_task(struct aws_task *task, void *user_data, enum aws_task_status status); static void s_write_end_on_write_completion( struct aws_event_loop *event_loop, struct aws_overlapped *overlapped, int status_code, size_t num_bytes_transferred); /* Translate Windows errors into aws_pipe errors */ static int s_translate_windows_error(DWORD win_error) { switch (win_error) { case ERROR_BROKEN_PIPE: return AWS_IO_BROKEN_PIPE; case 0xC000014B: /* STATUS_PIPE_BROKEN */ return AWS_IO_BROKEN_PIPE; case 0xC0000120: /* STATUS_CANCELLED */ return AWS_IO_BROKEN_PIPE; default: return AWS_ERROR_SYS_CALL_FAILURE; } } static int s_raise_last_windows_error(void) { DWORD win_error = GetLastError(); int aws_error = s_translate_windows_error(win_error); return aws_raise_error(aws_error); } AWS_THREAD_LOCAL uint32_t tl_unique_name_counter = 0; AWS_IO_API int aws_pipe_get_unique_name(char *dst, size_t dst_size) { /* For local pipes, name should be unique per-machine. * Mix together several sources that should should lead to something unique. */ DWORD process_id = GetCurrentProcessId(); DWORD thread_id = GetCurrentThreadId(); uint32_t counter = tl_unique_name_counter++; LARGE_INTEGER timestamp; bool success = QueryPerformanceCounter(×tamp); AWS_ASSERT(success); (void)success; /* QueryPerformanceCounter() always succeeds on XP and later */ /* snprintf() returns number of characters (not including '\0') which would have written if dst_size was ignored */ int ideal_strlen = snprintf( dst, dst_size, "\\\\.\\pipe\\aws_pipe_%08x_%08x_%08x_%08x%08x", process_id, thread_id, counter, timestamp.HighPart, timestamp.LowPart); AWS_ASSERT(ideal_strlen > 0); if (dst_size < (size_t)(ideal_strlen + 1)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } return AWS_OP_SUCCESS; } int aws_pipe_init( struct aws_pipe_read_end *read_end, struct aws_event_loop *read_end_event_loop, struct aws_pipe_write_end *write_end, struct aws_event_loop *write_end_event_loop, struct aws_allocator *allocator) { AWS_ASSERT(read_end); AWS_ASSERT(read_end_event_loop); AWS_ASSERT(write_end); AWS_ASSERT(write_end_event_loop); AWS_ASSERT(allocator); AWS_ZERO_STRUCT(*write_end); AWS_ZERO_STRUCT(*read_end); struct write_end_impl *write_impl = NULL; struct read_end_impl *read_impl = NULL; /* Init write-end */ write_impl = aws_mem_calloc(allocator, 1, sizeof(struct write_end_impl)); if (!write_impl) { goto clean_up; } write_impl->alloc = allocator; write_impl->state = WRITE_END_STATE_OPEN; write_impl->handle.data.handle = INVALID_HANDLE_VALUE; aws_linked_list_init(&write_impl->write_list); /* Anonymous pipes don't support overlapped I/O so named pipes are used. Names must be unique system-wide. * We generate random names, but collisions are theoretically possible, so try several times before giving up. */ char pipe_name[256]; int tries = 0; while (true) { int err = aws_pipe_get_unique_name(pipe_name, sizeof(pipe_name)); if (err) { goto clean_up; } const DWORD open_mode = PIPE_ACCESS_OUTBOUND | FILE_FLAG_OVERLAPPED | FILE_FLAG_FIRST_PIPE_INSTANCE; const DWORD pipe_mode = PIPE_TYPE_BYTE | PIPE_WAIT | PIPE_REJECT_REMOTE_CLIENTS; write_impl->handle.data.handle = CreateNamedPipeA( pipe_name, open_mode, pipe_mode, 1, /*nMaxInstances*/ PIPE_BUFFER_SIZE, /*nOutBufferSize*/ PIPE_BUFFER_SIZE, /*nInBufferSize*/ 0, /*nDefaultTimeout: 0 means default*/ NULL); /*lpSecurityAttributes: NULL means default */ if (write_impl->handle.data.handle != INVALID_HANDLE_VALUE) { /* Success, break out of loop */ break; } if (++tries >= PIPE_UNIQUE_NAME_MAX_TRIES) { s_raise_last_windows_error(); goto clean_up; } } int err = aws_event_loop_connect_handle_to_io_completion_port(write_end_event_loop, &write_impl->handle); if (err) { goto clean_up; } write_impl->event_loop = write_end_event_loop; /* Init read-end */ read_impl = aws_mem_calloc(allocator, 1, sizeof(struct read_end_impl)); if (!read_impl) { goto clean_up; } read_impl->alloc = allocator; read_impl->state = READ_END_STATE_OPEN; read_impl->handle.data.handle = INVALID_HANDLE_VALUE; read_impl->handle.data.handle = CreateFileA( pipe_name, /*lpFileName*/ GENERIC_READ, /*dwDesiredAccess*/ 0, /*dwShareMode: 0 prevents acess by external processes*/ NULL, /*lpSecurityAttributes: NULL prevents inheritance by child processes*/ OPEN_EXISTING, /*dwCreationDisposition*/ FILE_ATTRIBUTE_NORMAL | FILE_FLAG_OVERLAPPED, /*dwFlagsAndAttributes*/ NULL); /*hTemplateFile: ignored when opening existing file*/ if (read_impl->handle.data.handle == INVALID_HANDLE_VALUE) { s_raise_last_windows_error(); goto clean_up; } err = aws_event_loop_connect_handle_to_io_completion_port(read_end_event_loop, &read_impl->handle); if (err) { goto clean_up; } read_impl->event_loop = read_end_event_loop; /* Init the read-end's async operations */ read_impl->async_monitoring = aws_mem_calloc(allocator, 1, sizeof(struct async_operation)); if (!read_impl->async_monitoring) { goto clean_up; } read_impl->async_monitoring->alloc = allocator; aws_overlapped_init(&read_impl->async_monitoring->op.overlapped, s_read_end_on_zero_byte_read_completion, read_end); read_impl->async_error_report = aws_mem_calloc(allocator, 1, sizeof(struct async_operation)); if (!read_impl->async_error_report) { goto clean_up; } read_impl->async_error_report->alloc = allocator; aws_task_init( &read_impl->async_error_report->op.task, s_read_end_report_error_task, read_end, "pipe_read_end_report_error"); /* Success */ write_end->impl_data = write_impl; read_end->impl_data = read_impl; return AWS_OP_SUCCESS; clean_up: if (write_impl) { if (write_impl->handle.data.handle != INVALID_HANDLE_VALUE) { CloseHandle(write_impl->handle.data.handle); } aws_mem_release(allocator, write_impl); write_impl = NULL; } if (read_impl) { if (read_impl->handle.data.handle != INVALID_HANDLE_VALUE) { CloseHandle(read_impl->handle.data.handle); } if (read_impl->async_monitoring) { aws_mem_release(allocator, read_impl->async_monitoring); } if (read_impl->async_error_report) { aws_mem_release(allocator, read_impl->async_error_report); } aws_mem_release(allocator, read_impl); read_impl = NULL; } return AWS_OP_ERR; } struct aws_event_loop *aws_pipe_get_read_end_event_loop(const struct aws_pipe_read_end *read_end) { struct read_end_impl *read_impl = read_end->impl_data; if (!read_impl) { aws_raise_error(AWS_IO_BROKEN_PIPE); return NULL; } return read_impl->event_loop; } struct aws_event_loop *aws_pipe_get_write_end_event_loop(const struct aws_pipe_write_end *write_end) { struct write_end_impl *write_impl = write_end->impl_data; if (!write_impl) { aws_raise_error(AWS_IO_BROKEN_PIPE); return NULL; } return write_impl->event_loop; } int aws_pipe_clean_up_read_end(struct aws_pipe_read_end *read_end) { struct read_end_impl *read_impl = read_end->impl_data; if (!read_impl) { return aws_raise_error(AWS_IO_BROKEN_PIPE); } if (!aws_event_loop_thread_is_callers_thread(read_impl->event_loop)) { return aws_raise_error(AWS_ERROR_IO_EVENT_LOOP_THREAD_ONLY); } CloseHandle(read_impl->handle.data.handle); /* If the async operations are inactive they can be deleted now. * Otherwise, inform the operations of the clean-up so they can delete themselves upon completion. */ if (!read_impl->async_monitoring->is_active) { aws_mem_release(read_impl->alloc, read_impl->async_monitoring); } else { read_impl->async_monitoring->is_read_end_cleaned_up = true; } if (!read_impl->async_error_report->is_active) { aws_mem_release(read_impl->alloc, read_impl->async_error_report); } else { read_impl->async_error_report->is_read_end_cleaned_up = true; } aws_mem_release(read_impl->alloc, read_impl); AWS_ZERO_STRUCT(*read_end); return AWS_OP_SUCCESS; } /* Return whether a user is subscribed to receive read events */ static bool s_read_end_is_subscribed(struct aws_pipe_read_end *read_end) { struct read_end_impl *read_impl = read_end->impl_data; switch (read_impl->state) { case READ_END_STATE_SUBSCRIBING: case READ_END_STATE_SUBSCRIBED: case READ_END_STATE_SUBSCRIBE_ERROR: return true; default: return false; } } /* Detect events on the pipe by kicking off an async zero-byte-read. * When the pipe becomes readable or an error occurs, the read will * complete and we will report the event. */ static void s_read_end_request_async_monitoring(struct aws_pipe_read_end *read_end, int request_reason) { struct read_end_impl *read_impl = read_end->impl_data; AWS_ASSERT(read_impl); /* We only do async monitoring while user is subscribed, but not if we've * reported an error and moved into the SUBSCRIBE_ERROR state */ bool async_monitoring_allowed = s_read_end_is_subscribed(read_end) && (read_impl->state != READ_END_STATE_SUBSCRIBE_ERROR); if (!async_monitoring_allowed) { return; } /* We can only have one monitoring operation active at a time. Save off * the reason for the request. When the current operation completes, * if this reason is still valid, we'll re-launch async monitoring */ if (read_impl->async_monitoring->is_active) { read_impl->monitoring_request_reasons |= request_reason; return; } AWS_ASSERT(read_impl->error_code_to_report == 0); read_impl->monitoring_request_reasons = 0; read_impl->state = READ_END_STATE_SUBSCRIBED; /* aws_overlapped must be reset before each use */ aws_overlapped_reset(&read_impl->async_monitoring->op.overlapped); int fake_buffer; bool success = ReadFile( read_impl->handle.data.handle, &fake_buffer, 0, /*nNumberOfBytesToRead*/ NULL, /*lpNumberOfBytesRead: NULL for an overlapped operation*/ aws_overlapped_to_windows_overlapped(&read_impl->async_monitoring->op.overlapped)); if (success || (GetLastError() == ERROR_IO_PENDING)) { /* Success launching zero-byte-read, aka async monitoring operation */ read_impl->async_monitoring->is_active = true; return; } /* User is subscribed for IO events and expects to be notified of errors via the event callback. * We schedule this as a task so the callback doesn't happen before the user expects it. * We also set the state to SUBSCRIBE_ERROR so we don't keep trying to monitor the file. */ read_impl->state = READ_END_STATE_SUBSCRIBE_ERROR; read_impl->error_code_to_report = s_translate_windows_error(GetLastError()); read_impl->async_error_report->is_active = true; aws_event_loop_schedule_task_now(read_impl->event_loop, &read_impl->async_error_report->op.task); } static void s_read_end_report_error_task(struct aws_task *task, void *user_data, enum aws_task_status status) { (void)status; /* Do same work whether or not this is a "cancelled" task */ struct async_operation *async_op = AWS_CONTAINER_OF(task, struct async_operation, op); AWS_ASSERT(async_op->is_active); async_op->is_active = false; /* If the read end has been cleaned up, don't report the error, just free the task's memory. */ if (async_op->is_read_end_cleaned_up) { aws_mem_release(async_op->alloc, async_op); return; } struct aws_pipe_read_end *read_end = user_data; struct read_end_impl *read_impl = read_end->impl_data; AWS_ASSERT(read_impl); /* Only report the error if we're still in the SUBSCRIBE_ERROR state. * If the user unsubscribed since this task was queued, then we'd be in a different state. */ if (read_impl->state == READ_END_STATE_SUBSCRIBE_ERROR) { AWS_ASSERT(read_impl->error_code_to_report != 0); if (read_impl->on_readable_user_callback) { read_impl->on_readable_user_callback( read_end, read_impl->error_code_to_report, read_impl->on_readable_user_data); } } } static void s_read_end_on_zero_byte_read_completion( struct aws_event_loop *event_loop, struct aws_overlapped *overlapped, int status_code, size_t num_bytes_transferred) { (void)event_loop; (void)num_bytes_transferred; struct async_operation *async_op = AWS_CONTAINER_OF(overlapped, struct async_operation, op); /* If the read-end has been cleaned up, simply free the operation's memory and return. */ if (async_op->is_read_end_cleaned_up) { aws_mem_release(async_op->alloc, async_op); return; } struct aws_pipe_read_end *read_end = overlapped->user_data; struct read_end_impl *read_impl = read_end->impl_data; AWS_ASSERT(read_impl); /* Only report events to user when in the SUBSCRIBED state. * If in the SUBSCRIBING state, this completion is from an operation begun during a previous subscription. */ if (read_impl->state == READ_END_STATE_SUBSCRIBED) { int readable_error_code; if (status_code == 0) { readable_error_code = AWS_ERROR_SUCCESS; /* Clear out the "waiting for data" reason to restart zero-byte-read, since we're about to tell the user * that the pipe is readable. If the user consumes all the data, the "waiting for data" reason will get set * again and async-monitoring will be relaunched at the end of this function. */ read_impl->monitoring_request_reasons &= ~MONITORING_BECAUSE_WAITING_FOR_DATA; } else { readable_error_code = AWS_IO_BROKEN_PIPE; /* Move pipe to SUBSCRIBE_ERROR state to prevent further monitoring */ read_impl->state = READ_END_STATE_SUBSCRIBE_ERROR; } if (read_impl->on_readable_user_callback) { read_impl->on_readable_user_callback(read_end, readable_error_code, read_impl->on_readable_user_data); } } /* Note that the user callback might have invoked aws_pipe_clean_up_read_end(). * If so, clean up the operation's memory. * Otherwise, relaunch the monitoring operation if there's a reason to do so */ AWS_ASSERT(async_op->is_active); async_op->is_active = false; if (async_op->is_read_end_cleaned_up) { aws_mem_release(async_op->alloc, async_op); } else if (read_impl->monitoring_request_reasons != 0) { s_read_end_request_async_monitoring(read_end, read_impl->monitoring_request_reasons); } } int aws_pipe_subscribe_to_readable_events( struct aws_pipe_read_end *read_end, aws_pipe_on_readable_fn *on_readable, void *user_data) { struct read_end_impl *read_impl = read_end->impl_data; if (!read_impl) { return aws_raise_error(AWS_IO_BROKEN_PIPE); } if (read_impl->state != READ_END_STATE_OPEN) { /* Return specific error about why user can't subscribe */ if (s_read_end_is_subscribed(read_end)) { return aws_raise_error(AWS_ERROR_IO_ALREADY_SUBSCRIBED); } AWS_ASSERT(0); /* Unexpected state */ return aws_raise_error(AWS_ERROR_UNKNOWN); } if (!aws_event_loop_thread_is_callers_thread(read_impl->event_loop)) { return aws_raise_error(AWS_ERROR_IO_EVENT_LOOP_THREAD_ONLY); } read_impl->state = READ_END_STATE_SUBSCRIBING; read_impl->on_readable_user_callback = on_readable; read_impl->on_readable_user_data = user_data; s_read_end_request_async_monitoring(read_end, MONITORING_BECAUSE_SUBSCRIBING); return AWS_OP_SUCCESS; } int aws_pipe_unsubscribe_from_readable_events(struct aws_pipe_read_end *read_end) { struct read_end_impl *read_impl = read_end->impl_data; if (!read_impl) { return aws_raise_error(AWS_IO_BROKEN_PIPE); } if (!s_read_end_is_subscribed(read_end)) { return aws_raise_error(AWS_ERROR_IO_NOT_SUBSCRIBED); } if (!aws_event_loop_thread_is_callers_thread(read_impl->event_loop)) { return aws_raise_error(AWS_ERROR_IO_EVENT_LOOP_THREAD_ONLY); } read_impl->state = READ_END_STATE_OPEN; read_impl->on_readable_user_callback = NULL; read_impl->on_readable_user_data = NULL; read_impl->monitoring_request_reasons = 0; read_impl->error_code_to_report = 0; /* If there's a chance the zero-byte-read is pending, cancel it. * s_read_end_on_zero_byte_read_completion() will see status code * ERROR_OPERATION_ABORTED, but won't pass the event to the user * because we're not in the SUBSCRIBED state anymore. */ if (read_impl->async_monitoring->is_active) { CancelIo(read_impl->handle.data.handle); } return AWS_OP_SUCCESS; } int aws_pipe_read(struct aws_pipe_read_end *read_end, struct aws_byte_buf *dst_buffer, size_t *amount_read) { AWS_ASSERT(dst_buffer && dst_buffer->buffer); struct read_end_impl *read_impl = read_end->impl_data; if (!read_impl) { return aws_raise_error(AWS_IO_BROKEN_PIPE); } if (amount_read) { *amount_read = 0; } if (!aws_event_loop_thread_is_callers_thread(read_impl->event_loop)) { return aws_raise_error(AWS_ERROR_IO_EVENT_LOOP_THREAD_ONLY); } /* Just return success if user requests 0 data */ if (dst_buffer->capacity <= dst_buffer->len) { return AWS_OP_SUCCESS; } /* ReadFile() will be called in synchronous mode and would block indefinitely if it asked for more bytes than are * currently available. Therefore, peek at the available bytes before performing the actual read. */ DWORD bytes_available = 0; bool peek_success = PeekNamedPipe( read_impl->handle.data.handle, NULL, /*lpBuffer: NULL so peek doesn't actually copy data */ 0, /*nBufferSize*/ NULL, /*lpBytesRead*/ &bytes_available, /*lpTotalBytesAvail*/ NULL); /*lpBytesLeftThisMessage: doesn't apply to byte-type pipes*/ /* If operation failed. Request async monitoring so user is informed via aws_pipe_on_readable_fn of handle error. */ if (!peek_success) { s_read_end_request_async_monitoring(read_end, MONITORING_BECAUSE_ERROR_SUSPECTED); return s_raise_last_windows_error(); } /* If no data available. Request async monitoring so user is notified when data becomes available. */ if (bytes_available == 0) { s_read_end_request_async_monitoring(read_end, MONITORING_BECAUSE_WAITING_FOR_DATA); return aws_raise_error(AWS_IO_READ_WOULD_BLOCK); } size_t bytes_to_read = dst_buffer->capacity - dst_buffer->len; if (bytes_to_read > bytes_available) { bytes_to_read = bytes_available; } DWORD bytes_read = 0; bool read_success = ReadFile( read_impl->handle.data.handle, dst_buffer->buffer + dst_buffer->len, /*lpBuffer*/ (DWORD)bytes_to_read, /*nNumberOfBytesToRead*/ &bytes_read, /*lpNumberOfBytesRead*/ NULL); /*lpOverlapped: NULL so read is synchronous*/ /* Operation failed. Request async monitoring so user is informed via aws_pipe_on_readable_fn of handle error. */ if (!read_success) { s_read_end_request_async_monitoring(read_end, MONITORING_BECAUSE_ERROR_SUSPECTED); return s_raise_last_windows_error(); } /* Success */ dst_buffer->len += bytes_read; if (amount_read) { *amount_read = bytes_read; } if (bytes_read < bytes_to_read) { /* If we weren't able to read as many bytes as the user requested, that's ok. * Request async monitoring so we can alert the user when more data arrives */ s_read_end_request_async_monitoring(read_end, MONITORING_BECAUSE_WAITING_FOR_DATA); } return AWS_OP_SUCCESS; } int aws_pipe_clean_up_write_end(struct aws_pipe_write_end *write_end) { struct write_end_impl *write_impl = write_end->impl_data; if (!write_impl) { return aws_raise_error(AWS_IO_BROKEN_PIPE); } if (!aws_event_loop_thread_is_callers_thread(write_impl->event_loop)) { return aws_raise_error(AWS_ERROR_IO_EVENT_LOOP_THREAD_ONLY); } CloseHandle(write_impl->handle.data.handle); /* Inform outstanding writes about the clean up. */ while (!aws_linked_list_empty(&write_impl->write_list)) { struct aws_linked_list_node *node = aws_linked_list_pop_front(&write_impl->write_list); struct pipe_write_request *write_req = AWS_CONTAINER_OF(node, struct pipe_write_request, list_node); write_req->is_write_end_cleaned_up = true; } aws_mem_release(write_impl->alloc, write_impl); AWS_ZERO_STRUCT(*write_end); return AWS_OP_SUCCESS; } int aws_pipe_write( struct aws_pipe_write_end *write_end, struct aws_byte_cursor src_buffer, aws_pipe_on_write_completed_fn *on_completed, void *user_data) { struct write_end_impl *write_impl = write_end->impl_data; if (!write_impl) { return aws_raise_error(AWS_IO_BROKEN_PIPE); } if (!aws_event_loop_thread_is_callers_thread(write_impl->event_loop)) { return aws_raise_error(AWS_ERROR_IO_EVENT_LOOP_THREAD_ONLY); } if (src_buffer.len > MAXDWORD) { return aws_raise_error(AWS_ERROR_INVALID_BUFFER_SIZE); } DWORD num_bytes_to_write = (DWORD)src_buffer.len; struct pipe_write_request *write = aws_mem_acquire(write_impl->alloc, sizeof(struct pipe_write_request)); if (!write) { return AWS_OP_ERR; } AWS_ZERO_STRUCT(*write); write->original_cursor = src_buffer; write->user_callback = on_completed; write->user_data = user_data; write->alloc = write_impl->alloc; aws_overlapped_init(&write->overlapped, s_write_end_on_write_completion, write_end); bool write_success = WriteFile( write_impl->handle.data.handle, /*hFile*/ src_buffer.ptr, /*lpBuffer*/ num_bytes_to_write, /*nNumberOfBytesToWrite*/ NULL, /*lpNumberOfBytesWritten*/ aws_overlapped_to_windows_overlapped(&write->overlapped)); /*lpOverlapped*/ /* Overlapped WriteFile() calls may succeed immediately, or they may queue the work. In either of these cases, IOCP * on the event-loop will alert us when the operation completes and we'll invoke user callbacks then. */ if (!write_success && GetLastError() != ERROR_IO_PENDING) { aws_mem_release(write_impl->alloc, write); return s_raise_last_windows_error(); } aws_linked_list_push_back(&write_impl->write_list, &write->list_node); return AWS_OP_SUCCESS; } void s_write_end_on_write_completion( struct aws_event_loop *event_loop, struct aws_overlapped *overlapped, int status_code, size_t num_bytes_transferred) { (void)event_loop; (void)num_bytes_transferred; struct pipe_write_request *write_request = AWS_CONTAINER_OF(overlapped, struct pipe_write_request, overlapped); struct aws_pipe_write_end *write_end = write_request->is_write_end_cleaned_up ? NULL : overlapped->user_data; AWS_ASSERT((num_bytes_transferred == write_request->original_cursor.len) || status_code); struct aws_byte_cursor original_cursor = write_request->original_cursor; aws_pipe_on_write_completed_fn *user_callback = write_request->user_callback; void *user_data = write_request->user_data; /* Clean up write-request. * Note that write-end might have been cleaned up before this executes. */ if (!write_request->is_write_end_cleaned_up) { aws_linked_list_remove(&write_request->list_node); } aws_mem_release(write_request->alloc, write_request); /* Report outcome to user */ if (user_callback) { int error_code = AWS_ERROR_SUCCESS; if (status_code != 0) { error_code = s_translate_windows_error(status_code); } /* Note that user may choose to clean up write-end in this callback */ user_callback(write_end, error_code, original_cursor, user_data); } } aws-crt-python-0.20.4+dfsg/crt/aws-c-io/source/windows/iocp/socket.c000066400000000000000000003522471456575232400252340ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ /* clang is just a naive little idealist and doesn't understand that it can't just go around re-ordering windows header files. keep the bellow includes where they are. Also, sorry about the C++ style comments below, clang-format doesn't work (at least on my version) with the c-style comments.*/ // clang-format off #include #include #include // clang-format on #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef _MSC_VER # pragma warning(disable : 4221) /* aggregate initializer using local variable addresses */ # pragma warning(disable : 4204) /* non-constant aggregate initializer */ #endif /* due to the windows' team apparently lack of ability to handle header ordering properly we can't include ntstatus.h. Just define this, it's used for Connect and Accept callbacks. it maps directly to nt's STATUS_CANCELLED */ #define IO_OPERATION_CANCELLED 0xC0000120 #define IO_STATUS_CONNECTION_REFUSED 0xC0000236 #define IO_STATUS_TIMEOUT 0x00000102 #define IO_NETWORK_UNREACHABLE 0xC000023C #define IO_HOST_UNREACHABLE 0xC000023D #define IO_CONNECTION_ABORTED 0xC0000241 #define IO_PIPE_BROKEN 0xC000014B #define SOME_ERROR_CODE_THAT_MEANS_INVALID_PATH 0x00000003 #define IO_STATUS_BUFFER_OVERFLOW 0x80000005 #define STATUS_INVALID_ADDRESS_COMPONENT 0xC0000207 #define PIPE_BUFFER_SIZE 512 struct socket_vtable { int (*connection_success)(struct aws_socket *socket); void (*connection_error)(struct aws_socket *socket, int error_code); int (*close)(struct aws_socket *socket); int (*connect)( struct aws_socket *socket, const struct aws_socket_endpoint *remote_endpoint, struct aws_event_loop *connect_loop, aws_socket_on_connection_result_fn *on_connection_result, void *user_data); int (*start_accept)( struct aws_socket *socket, struct aws_event_loop *accept_loop, aws_socket_on_accept_result_fn *on_accept_result, void *user_data); int (*stop_accept)(struct aws_socket *socket); int (*bind)(struct aws_socket *socket, const struct aws_socket_endpoint *local_endpoint); int (*listen)(struct aws_socket *socket, int backlog_size); int (*read)(struct aws_socket *socket, struct aws_byte_buf *buffer, size_t *amount_read); int (*subscribe_to_read)(struct aws_socket *socket, aws_socket_on_readable_fn *on_readable, void *user_data); }; static int s_ipv4_stream_connection_success(struct aws_socket *socket); static int s_ipv6_stream_connection_success(struct aws_socket *socket); static void s_connection_error(struct aws_socket *socket, int error_code); static int s_local_and_udp_connection_success(struct aws_socket *socket); static int s_ipv4_stream_connect( struct aws_socket *socket, const struct aws_socket_endpoint *remote_endpoint, struct aws_event_loop *connect_loop, aws_socket_on_connection_result_fn *on_connection_result, void *user_data); static int s_ipv4_dgram_connect( struct aws_socket *socket, const struct aws_socket_endpoint *remote_endpoint, struct aws_event_loop *connect_loop, aws_socket_on_connection_result_fn *on_connection_result, void *user_data); static int s_ipv6_stream_connect( struct aws_socket *socket, const struct aws_socket_endpoint *remote_endpoint, struct aws_event_loop *connect_loop, aws_socket_on_connection_result_fn *on_connection_result, void *user_data); static int s_ipv6_dgram_connect( struct aws_socket *socket, const struct aws_socket_endpoint *remote_endpoint, struct aws_event_loop *connect_loop, aws_socket_on_connection_result_fn *on_connection_result, void *user_data); static int s_local_connect( struct aws_socket *socket, const struct aws_socket_endpoint *remote_endpoint, struct aws_event_loop *connect_loop, aws_socket_on_connection_result_fn *on_connection_result, void *user_data); static int s_tcp_start_accept( struct aws_socket *socket, struct aws_event_loop *accept_loop, aws_socket_on_accept_result_fn *on_accept_result, void *user_data); static int s_local_start_accept( struct aws_socket *socket, struct aws_event_loop *accept_loop, aws_socket_on_accept_result_fn *on_accept_result, void *user_data); static int s_stream_stop_accept(struct aws_socket *socket); static int s_dgram_start_accept( struct aws_socket *socket, struct aws_event_loop *accept_loop, aws_socket_on_accept_result_fn *on_accept_result, void *user_data); static int s_dgram_stop_accept(struct aws_socket *socket); static int s_tcp_listen(struct aws_socket *socket, int backlog_size); static int s_udp_listen(struct aws_socket *socket, int backlog_size); static int s_local_listen(struct aws_socket *socket, int backlog_size); static int s_tcp_read(struct aws_socket *socket, struct aws_byte_buf *buffer, size_t *amount_read); static int s_local_read(struct aws_socket *socket, struct aws_byte_buf *buffer, size_t *amount_read); static int s_dgram_read(struct aws_socket *socket, struct aws_byte_buf *buffer, size_t *amount_read); static int s_socket_close(struct aws_socket *socket); static int s_local_close(struct aws_socket *socket); static int s_ipv4_stream_bind(struct aws_socket *socket, const struct aws_socket_endpoint *local_endpoint); static int s_ipv4_dgram_bind(struct aws_socket *socket, const struct aws_socket_endpoint *local_endpoint); static int s_ipv6_stream_bind(struct aws_socket *socket, const struct aws_socket_endpoint *local_endpoint); static int s_ipv6_dgram_bind(struct aws_socket *socket, const struct aws_socket_endpoint *local_endpoint); static int s_local_bind(struct aws_socket *socket, const struct aws_socket_endpoint *local_endpoint); static int s_stream_subscribe_to_read( struct aws_socket *socket, aws_socket_on_readable_fn *on_readable, void *user_data); static int s_dgram_subscribe_to_read( struct aws_socket *socket, aws_socket_on_readable_fn *on_readable, void *user_data); static int s_determine_socket_error(int error); /* Why build this V-table instead of doing that beautiful posix code I just read? I'm glad you asked...... because winsock is nothing like posix and certainly not as well thought out. There were so many branches to handle three entirely different APIs we decided it was less painful to just have a bunch of function pointers in a table than to want to gouge our eyes out while looking at a ridiculous number of branches. */ static struct socket_vtable vtables[3][2] = { [AWS_SOCKET_IPV4] = { [AWS_SOCKET_STREAM] = { .connection_success = s_ipv4_stream_connection_success, .connection_error = s_connection_error, .connect = s_ipv4_stream_connect, .start_accept = s_tcp_start_accept, .stop_accept = s_stream_stop_accept, .bind = s_ipv4_stream_bind, .listen = s_tcp_listen, .read = s_tcp_read, .close = s_socket_close, .subscribe_to_read = s_stream_subscribe_to_read, }, [AWS_SOCKET_DGRAM] = { .connection_success = s_local_and_udp_connection_success, .connection_error = s_connection_error, .connect = s_ipv4_dgram_connect, .start_accept = s_dgram_start_accept, .stop_accept = s_dgram_stop_accept, .bind = s_ipv4_dgram_bind, .listen = s_udp_listen, .read = s_dgram_read, .close = s_socket_close, .subscribe_to_read = s_dgram_subscribe_to_read, }, }, [AWS_SOCKET_IPV6] = { [AWS_SOCKET_STREAM] = { .connection_success = s_ipv6_stream_connection_success, .connection_error = s_connection_error, .connect = s_ipv6_stream_connect, .start_accept = s_tcp_start_accept, .stop_accept = s_stream_stop_accept, .bind = s_ipv6_stream_bind, .listen = s_tcp_listen, .read = s_tcp_read, .close = s_socket_close, .subscribe_to_read = s_stream_subscribe_to_read, }, [AWS_SOCKET_DGRAM] = { .connection_success = s_local_and_udp_connection_success, .connection_error = s_connection_error, .connect = s_ipv6_dgram_connect, .start_accept = s_dgram_start_accept, .stop_accept = s_dgram_stop_accept, .bind = s_ipv6_dgram_bind, .listen = s_udp_listen, .read = s_dgram_read, .close = s_socket_close, .subscribe_to_read = s_dgram_subscribe_to_read, }, }, [AWS_SOCKET_LOCAL] = { [AWS_SOCKET_STREAM] = { .connection_success = s_local_and_udp_connection_success, .connection_error = s_connection_error, .connect = s_local_connect, .start_accept = s_local_start_accept, .stop_accept = s_stream_stop_accept, .bind = s_local_bind, .listen = s_local_listen, .read = s_local_read, .close = s_local_close, .subscribe_to_read = s_stream_subscribe_to_read, }, [AWS_SOCKET_DGRAM] = {0}, }, }; /* When socket is connected, any of the CONNECT_*** flags might be set. Otherwise, only one state flag is active at a time. */ enum socket_state { INIT = 0x01, CONNECTING = 0x02, CONNECTED_READ = 0x04, CONNECTED_WRITE = 0x08, CONNECTED_WAITING_ON_READABLE = 0x10, BOUND = 0x20, LISTENING = 0x40, TIMEDOUT = 0x80, CLOSED = 0x0100, ERRORED = 0x0200, }; static int s_convert_domain(enum aws_socket_domain domain) { switch (domain) { case AWS_SOCKET_IPV4: return AF_INET; case AWS_SOCKET_IPV6: return AF_INET6; case AWS_SOCKET_LOCAL: return AF_UNIX; default: AWS_ASSERT(0); return AF_INET; } } static int s_convert_type(enum aws_socket_type type) { switch (type) { case AWS_SOCKET_STREAM: return SOCK_STREAM; case AWS_SOCKET_DGRAM: return SOCK_DGRAM; default: AWS_ASSERT(0); return SOCK_STREAM; } } #define SOCK_STORAGE_SIZE (sizeof(struct sockaddr_storage) + 16) struct socket_connect_args { struct aws_allocator *allocator; struct aws_socket *socket; struct aws_task timeout_task; }; struct io_operation_data { struct aws_allocator *allocator; struct aws_socket *socket; struct aws_overlapped signal; struct aws_linked_list_node node; struct aws_task sequential_task_storage; bool in_use; }; struct iocp_socket { struct socket_vtable *vtable; struct io_operation_data *read_io_data; struct aws_socket *incoming_socket; uint8_t accept_buffer[SOCK_STORAGE_SIZE * 2]; struct socket_connect_args *connect_args; struct aws_linked_list pending_io_operations; bool stop_accept; }; static int s_create_socket(struct aws_socket *sock, const struct aws_socket_options *options) { SOCKET handle = socket(s_convert_domain(options->domain), s_convert_type(options->type), 0); if (handle == INVALID_SOCKET) { int wsa_err = WSAGetLastError(); /* logging may reset error, so cache it */ AWS_LOGF_ERROR(AWS_LS_IO_SOCKET, "id=static: socket() call failed with WSAError %d", wsa_err); return aws_raise_error(s_determine_socket_error(wsa_err)); } AWS_LOGF_DEBUG( AWS_LS_IO_SOCKET, "id=%p handle=%p: initializing with domain %d and type %d", (void *)sock, (void *)handle, options->domain, options->type); u_long non_blocking = 1; if (ioctlsocket(handle, FIONBIO, &non_blocking) != 0) { int wsa_err = WSAGetLastError(); /* logging may reset error, so cache it */ AWS_LOGF_ERROR(AWS_LS_IO_SOCKET, "id=static: ioctlsocket() call failed with WSAError %d", wsa_err); aws_raise_error(s_determine_socket_error(wsa_err)); goto error; } sock->io_handle.data.handle = (HANDLE)handle; sock->io_handle.additional_data = NULL; if (aws_socket_set_options(sock, options)) { goto error; } return AWS_OP_SUCCESS; error: closesocket(handle); sock->io_handle.data.handle = (HANDLE)INVALID_SOCKET; return AWS_OP_ERR; } static int s_socket_init( struct aws_socket *socket, struct aws_allocator *alloc, const struct aws_socket_options *options, bool create_underlying_socket) { AWS_ASSERT(options->domain <= AWS_SOCKET_LOCAL); AWS_ASSERT(options->type <= AWS_SOCKET_DGRAM); AWS_ZERO_STRUCT(*socket); struct iocp_socket *impl = aws_mem_calloc(alloc, 1, sizeof(struct iocp_socket)); if (!impl) { return AWS_OP_ERR; } impl->vtable = &vtables[options->domain][options->type]; if (!impl->vtable || !impl->vtable->read) { aws_mem_release(alloc, impl); socket->impl = NULL; return aws_raise_error(AWS_IO_SOCKET_INVALID_OPTIONS); } impl->read_io_data = aws_mem_calloc(alloc, 1, sizeof(struct io_operation_data)); if (!impl->read_io_data) { aws_mem_release(alloc, impl); socket->impl = NULL; return AWS_OP_ERR; } impl->read_io_data->allocator = alloc; impl->read_io_data->socket = socket; impl->read_io_data->in_use = false; aws_linked_list_init(&impl->pending_io_operations); socket->allocator = alloc; socket->io_handle.data.handle = INVALID_HANDLE_VALUE; socket->state = INIT; socket->impl = impl; socket->options = *options; if (options->domain != AWS_SOCKET_LOCAL && create_underlying_socket) { if (s_create_socket(socket, options)) { aws_mem_release(alloc, impl->read_io_data); aws_mem_release(alloc, impl); socket->impl = NULL; return AWS_OP_ERR; } } return AWS_OP_SUCCESS; } int aws_socket_init(struct aws_socket *socket, struct aws_allocator *alloc, const struct aws_socket_options *options) { AWS_ASSERT(options); aws_check_and_init_winsock(); int err = s_socket_init(socket, alloc, options, true); return err; } void aws_socket_clean_up(struct aws_socket *socket) { if (!socket->impl) { /* protect from double clean */ return; } AWS_LOGF_DEBUG( AWS_LS_IO_SOCKET, "id=%p, handle=%p: cleaning up socket.", (void *)socket, (void *)socket->io_handle.data.handle); struct iocp_socket *socket_impl = socket->impl; socket_impl->vtable->close(socket); if (socket_impl->incoming_socket) { aws_socket_clean_up(socket_impl->incoming_socket); aws_mem_release(socket->allocator, socket_impl->incoming_socket); } if (socket_impl->read_io_data) { aws_mem_release(socket->allocator, socket_impl->read_io_data); } aws_mem_release(socket->allocator, socket->impl); AWS_ZERO_STRUCT(*socket); socket->io_handle.data.handle = INVALID_HANDLE_VALUE; } int aws_socket_connect( struct aws_socket *socket, const struct aws_socket_endpoint *remote_endpoint, struct aws_event_loop *event_loop, aws_socket_on_connection_result_fn *on_connection_result, void *user_data) { struct iocp_socket *socket_impl = socket->impl; if (socket->options.type != AWS_SOCKET_DGRAM) { AWS_ASSERT(on_connection_result); if (socket->state != INIT) { socket->state = ERRORED; return aws_raise_error(AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE); } } else { /* UDP socket */ /* UDP sockets jump to CONNECT_READ if bind is called first */ if (socket->state != CONNECTED_READ && socket->state != INIT) { socket->state = ERRORED; return aws_raise_error(AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE); } } if (aws_socket_validate_port_for_connect(remote_endpoint->port, socket->options.domain)) { return AWS_OP_ERR; } return socket_impl->vtable->connect(socket, remote_endpoint, event_loop, on_connection_result, user_data); } int aws_socket_bind(struct aws_socket *socket, const struct aws_socket_endpoint *local_endpoint) { if (socket->state != INIT) { socket->state = ERRORED; return aws_raise_error(AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE); } if (aws_socket_validate_port_for_bind(local_endpoint->port, socket->options.domain)) { return AWS_OP_ERR; } struct iocp_socket *socket_impl = socket->impl; return socket_impl->vtable->bind(socket, local_endpoint); } int aws_socket_get_bound_address(const struct aws_socket *socket, struct aws_socket_endpoint *out_address) { if (socket->local_endpoint.address[0] == 0) { AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p fd=%d: Socket has no local address. Socket must be bound first.", (void *)socket, socket->io_handle.data.fd); return aws_raise_error(AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE); } *out_address = socket->local_endpoint; return AWS_OP_SUCCESS; } /* Update IPV4 or IPV6 socket->local_endpoint based on the results of getsockname() */ static int s_update_local_endpoint_ipv4_ipv6(struct aws_socket *socket) { struct aws_socket_endpoint tmp_endpoint; AWS_ZERO_STRUCT(tmp_endpoint); struct sockaddr_storage address; AWS_ZERO_STRUCT(address); socklen_t address_size = sizeof(address); if (getsockname((SOCKET)socket->io_handle.data.handle, (struct sockaddr *)&address, &address_size) != 0) { int wsa_err = WSAGetLastError(); /* logging may reset error, so cache it */ AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p handle=%p: getsockname() failed with error %d", (void *)socket, (void *)socket->io_handle.data.handle, wsa_err); return aws_raise_error(s_determine_socket_error(wsa_err)); } if (address.ss_family == AF_INET) { struct sockaddr_in *s = (struct sockaddr_in *)&address; tmp_endpoint.port = ntohs(s->sin_port); if (InetNtopA(AF_INET, &s->sin_addr, tmp_endpoint.address, sizeof(tmp_endpoint.address)) == NULL) { int wsa_err = WSAGetLastError(); /* logging may reset error, so cache it */ AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p handle=%p: determining local endpoint failed", (void *)socket, (void *)socket->io_handle.data.handle); return aws_raise_error(s_determine_socket_error(wsa_err)); } } else if (address.ss_family == AF_INET6) { struct sockaddr_in6 *s = (struct sockaddr_in6 *)&address; tmp_endpoint.port = ntohs(s->sin6_port); if (InetNtopA(AF_INET6, &s->sin6_addr, tmp_endpoint.address, sizeof(tmp_endpoint.address)) == NULL) { int wsa_err = WSAGetLastError(); /* logging may reset error, so cache it */ AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p handle=%p: determining local endpoint failed", (void *)socket, (void *)socket->io_handle.data.handle); return aws_raise_error(s_determine_socket_error(wsa_err)); } } else { AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p handle=%p: unknown ADDRESS_FAMILY %d", (void *)socket, (void *)socket->io_handle.data.handle, address.ss_family); return aws_raise_error(AWS_IO_SOCKET_UNSUPPORTED_ADDRESS_FAMILY); } socket->local_endpoint = tmp_endpoint; return AWS_OP_SUCCESS; } int aws_socket_listen(struct aws_socket *socket, int backlog_size) { struct iocp_socket *socket_impl = socket->impl; return socket_impl->vtable->listen(socket, backlog_size); } int aws_socket_start_accept( struct aws_socket *socket, struct aws_event_loop *accept_loop, aws_socket_on_accept_result_fn *on_accept_result, void *user_data) { struct iocp_socket *socket_impl = socket->impl; return socket_impl->vtable->start_accept(socket, accept_loop, on_accept_result, user_data); } int aws_socket_stop_accept(struct aws_socket *socket) { struct iocp_socket *socket_impl = socket->impl; return socket_impl->vtable->stop_accept(socket); } int aws_socket_close(struct aws_socket *socket) { struct iocp_socket *socket_impl = socket->impl; return socket_impl->vtable->close(socket); } int aws_socket_shutdown_dir(struct aws_socket *socket, enum aws_channel_direction dir) { int how = dir == AWS_CHANNEL_DIR_READ ? 0 : 1; if (shutdown((SOCKET)socket->io_handle.data.handle, how)) { int aws_error = s_determine_socket_error(WSAGetLastError()); return aws_raise_error(aws_error); } if (dir == AWS_CHANNEL_DIR_READ) { socket->state &= ~CONNECTED_READ; } else { socket->state &= ~CONNECTED_WRITE; } return AWS_OP_SUCCESS; } int aws_socket_read(struct aws_socket *socket, struct aws_byte_buf *buffer, size_t *amount_read) { struct iocp_socket *socket_impl = socket->impl; AWS_ASSERT(socket->readable_fn); if (!aws_event_loop_thread_is_callers_thread(socket->event_loop)) { AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p handle=%p: Read can only be called from the owning event-loop's thread.", (void *)socket, (void *)socket->io_handle.data.handle); return aws_raise_error(AWS_ERROR_IO_EVENT_LOOP_THREAD_ONLY); } if (!(socket->state & CONNECTED_READ)) { AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p handle=%p: Attempt to read from an unconnected socket.", (void *)socket, (void *)socket->io_handle.data.handle); return aws_raise_error(AWS_IO_SOCKET_NOT_CONNECTED); } return socket_impl->vtable->read(socket, buffer, amount_read); } int aws_socket_subscribe_to_readable_events( struct aws_socket *socket, aws_socket_on_readable_fn *on_readable, void *user_data) { struct iocp_socket *socket_impl = socket->impl; AWS_ASSERT(socket->event_loop); AWS_ASSERT(!socket->readable_fn); if (!(socket->state & CONNECTED_READ)) { AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p handle=%p: Attempt to subscribe to an unconnected socket.", (void *)socket, (void *)socket->io_handle.data.handle); return aws_raise_error(AWS_IO_SOCKET_NOT_CONNECTED); } return socket_impl->vtable->subscribe_to_read(socket, on_readable, user_data); } static int s_determine_socket_error(int error) { switch (error) { case WSAECONNREFUSED: case IO_STATUS_CONNECTION_REFUSED: return AWS_IO_SOCKET_CONNECTION_REFUSED; case WSAETIMEDOUT: case IO_STATUS_TIMEOUT: return AWS_IO_SOCKET_TIMEOUT; case IO_PIPE_BROKEN: return AWS_IO_SOCKET_CLOSED; case STATUS_INVALID_ADDRESS_COMPONENT: case WSAEADDRNOTAVAIL: return AWS_IO_SOCKET_INVALID_ADDRESS; case WSAEADDRINUSE: return AWS_IO_SOCKET_ADDRESS_IN_USE; case WSAENETUNREACH: case IO_NETWORK_UNREACHABLE: case IO_HOST_UNREACHABLE: return AWS_IO_SOCKET_NO_ROUTE_TO_HOST; case WSAENETDOWN: return AWS_IO_SOCKET_NETWORK_DOWN; case WSAECONNABORTED: case IO_CONNECTION_ABORTED: return AWS_IO_SOCKET_CONNECT_ABORTED; case WSAENOBUFS: return AWS_ERROR_OOM; case WSAEMFILE: return AWS_ERROR_MAX_FDS_EXCEEDED; case WSAENAMETOOLONG: case WSA_INVALID_PARAMETER: case SOME_ERROR_CODE_THAT_MEANS_INVALID_PATH: return AWS_ERROR_FILE_INVALID_PATH; case WSAEAFNOSUPPORT: return AWS_IO_SOCKET_UNSUPPORTED_ADDRESS_FAMILY; case WSAEACCES: return AWS_ERROR_NO_PERMISSION; default: return AWS_IO_SOCKET_NOT_CONNECTED; } } static inline int s_process_tcp_sock_options(struct aws_socket *socket) { if (aws_socket_set_options(socket, &socket->options)) { aws_raise_error(AWS_IO_SOCKET_INVALID_OPTIONS); return AWS_OP_ERR; } return AWS_OP_SUCCESS; } /* called when an IPV4 tcp socket successfully has connected. */ static int s_ipv4_stream_connection_success(struct aws_socket *socket) { struct iocp_socket *socket_impl = socket->impl; if (s_process_tcp_sock_options(socket)) { goto error; } int connect_result = 0; socklen_t result_length = sizeof(connect_result); if (getsockopt( (SOCKET)socket->io_handle.data.handle, SOL_SOCKET, SO_ERROR, (char *)&connect_result, &result_length) < 0) { int wsa_err = WSAGetLastError(); /* logging may reset error, so cache it */ AWS_LOGF_DEBUG( AWS_LS_IO_SOCKET, "id=%p handle=%p: failed to determine connection error %d", (void *)socket, (void *)socket->io_handle.data.handle, wsa_err); aws_raise_error(s_determine_socket_error(wsa_err)); goto error; } if (connect_result) { AWS_LOGF_DEBUG( AWS_LS_IO_SOCKET, "id=%p handle=%p: connection error %d", (void *)socket, (void *)socket->io_handle.data.handle, connect_result); aws_raise_error(s_determine_socket_error(connect_result)); goto error; } AWS_LOGF_DEBUG( AWS_LS_IO_SOCKET, "id=%p handle=%p: connection success", (void *)socket, (void *)socket->io_handle.data.handle); if (s_update_local_endpoint_ipv4_ipv6(socket)) { goto error; } AWS_LOGF_DEBUG( AWS_LS_IO_SOCKET, "id=%p handle=%p: local endpoint %s:%u", (void *)socket, (void *)socket->io_handle.data.handle, socket->local_endpoint.address, socket->local_endpoint.port); setsockopt((SOCKET)socket->io_handle.data.handle, SOL_SOCKET, SO_UPDATE_CONNECT_CONTEXT, NULL, 0); socket->state = CONNECTED_WRITE | CONNECTED_READ; socket->connection_result_fn(socket, AWS_ERROR_SUCCESS, socket->connect_accept_user_data); return AWS_OP_SUCCESS; error: socket->state = ERRORED; socket_impl->vtable->connection_error(socket, aws_last_error()); return AWS_OP_ERR; } /* called upon a successful TCP over IPv6 connection. */ static int s_ipv6_stream_connection_success(struct aws_socket *socket) { struct iocp_socket *socket_impl = socket->impl; if (s_process_tcp_sock_options(socket)) { goto error; } int connect_result = 0; socklen_t result_length = sizeof(connect_result); if (getsockopt( (SOCKET)socket->io_handle.data.handle, SOL_SOCKET, SO_ERROR, (char *)&connect_result, &result_length) < 0) { int wsa_err = WSAGetLastError(); /* logging may reset error, so cache it */ AWS_LOGF_DEBUG( AWS_LS_IO_SOCKET, "id=%p handle=%p: failed to determine connection error %d", (void *)socket, (void *)socket->io_handle.data.handle, wsa_err); aws_raise_error(s_determine_socket_error(wsa_err)); goto error; } if (connect_result) { AWS_LOGF_DEBUG( AWS_LS_IO_SOCKET, "id=%p handle=%p: connection error %d", (void *)socket, (void *)socket->io_handle.data.handle, connect_result); aws_raise_error(s_determine_socket_error(connect_result)); goto error; } AWS_LOGF_DEBUG( AWS_LS_IO_SOCKET, "id=%p handle=%p: connection success", (void *)socket, (void *)socket->io_handle.data.handle); if (s_update_local_endpoint_ipv4_ipv6(socket)) { goto error; } AWS_LOGF_DEBUG( AWS_LS_IO_SOCKET, "id=%p handle=%p: local endpoint %s:%u", (void *)socket, (void *)socket->io_handle.data.handle, socket->local_endpoint.address, socket->local_endpoint.port); setsockopt((SOCKET)socket->io_handle.data.handle, SOL_SOCKET, SO_UPDATE_CONNECT_CONTEXT, NULL, 0); socket->state = CONNECTED_WRITE | CONNECTED_READ; socket->connection_result_fn(socket, AWS_ERROR_SUCCESS, socket->connect_accept_user_data); return AWS_OP_SUCCESS; error: socket->state = ERRORED; socket_impl->vtable->connection_error(socket, aws_last_error()); return AWS_OP_ERR; } /* Outgoing UDP and Named pipe connections. */ static int s_local_and_udp_connection_success(struct aws_socket *socket) { socket->state = CONNECTED_WRITE | CONNECTED_READ; AWS_LOGF_DEBUG( AWS_LS_IO_SOCKET, "id=%p handle=%p: connection success", (void *)socket, (void *)socket->io_handle.data.handle); if (socket->connection_result_fn) { socket->connection_result_fn(socket, AWS_ERROR_SUCCESS, socket->connect_accept_user_data); } return AWS_OP_SUCCESS; } static void s_connection_error(struct aws_socket *socket, int error) { socket->state = ERRORED; AWS_LOGF_DEBUG( AWS_LS_IO_SOCKET, "id=%p handle=%p: connection error with code %d", (void *)socket, (void *)socket->io_handle.data.handle, error); if (socket->connection_result_fn) { socket->connection_result_fn(socket, error, socket->connect_accept_user_data); } else if (socket->accept_result_fn) { socket->accept_result_fn(socket, error, NULL, socket->connect_accept_user_data); } } /* Named Pipes and TCP connection callbacks from the event loop. */ void s_socket_connection_completion( struct aws_event_loop *event_loop, struct aws_overlapped *overlapped, int status_code, size_t num_bytes_transferred) { (void)num_bytes_transferred; (void)event_loop; struct io_operation_data *operation_data = AWS_CONTAINER_OF(overlapped, struct io_operation_data, signal); struct socket_connect_args *socket_args = overlapped->user_data; AWS_LOGF_TRACE(AWS_LS_IO_SOCKET, "static: connect completion triggered on event-loop %p", (void *)event_loop); if (!operation_data->socket) { aws_mem_release(operation_data->allocator, operation_data); return; } if (status_code == IO_OPERATION_CANCELLED) { operation_data->in_use = false; return; } if (socket_args->socket) { AWS_LOGF_TRACE( AWS_LS_IO_SOCKET, "id=%p handle=%p: connect completion triggered. Socket has not timed out yet: proceeding with connection", (void *)socket_args->socket, (void *)socket_args->socket->io_handle.data.handle); struct iocp_socket *socket_impl = socket_args->socket->impl; struct aws_socket *socket = socket_args->socket; socket->readable_fn = NULL; socket->readable_user_data = NULL; socket_impl->connect_args = NULL; socket_args->socket = NULL; if (!status_code) { socket_impl->vtable->connection_success(socket); } else { AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p handle=%p: connect completion triggered with error %d", (void *)socket, (void *)socket->io_handle.data.handle, status_code); int error = s_determine_socket_error(status_code); socket_impl->vtable->connection_error(socket, error); } } if (operation_data->socket) { operation_data->in_use = false; } else { aws_mem_release(operation_data->allocator, operation_data); } } /* outgoing tcp connection. If this task runs before `s_socket_connection_completion()`, then the connection is considered timedout. */ static void s_handle_socket_timeout(struct aws_task *task, void *args, aws_task_status status) { (void)task; (void)status; struct socket_connect_args *socket_args = args; AWS_LOGF_TRACE(AWS_LS_IO_SOCKET, "task_id=%p: timeout task triggered, evaluating timeouts.", (void *)task); if (socket_args->socket) { AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p handle=%p: timed out, shutting down.", (void *)socket_args->socket, (void *)socket_args->socket->io_handle.data.handle); socket_args->socket->state = TIMEDOUT; struct aws_socket *socket = socket_args->socket; int error_code = AWS_IO_SOCKET_TIMEOUT; /* since the task is canceled the event-loop is gone and the iocp will not trigger, so go ahead and tell the socket cleanup stuff that the iocp handle is no longer pending operations. */ if (status == AWS_TASK_STATUS_CANCELED) { struct iocp_socket *iocp_socket = socket->impl; iocp_socket->read_io_data->in_use = false; error_code = AWS_IO_EVENT_LOOP_SHUTDOWN; } aws_raise_error(error_code); /* socket close will set the connection args to NULL etc...*/ aws_socket_close(socket); socket->connection_result_fn(socket, error_code, socket->connect_accept_user_data); } struct aws_allocator *allocator = socket_args->allocator; aws_mem_release(allocator, socket_args); } /* initiate an outbound tcp connection (client mode). */ static inline int s_tcp_connect( struct aws_socket *socket, const struct aws_socket_endpoint *remote_endpoint, struct aws_event_loop *connect_loop, struct sockaddr *bind_addr, struct sockaddr *socket_addr, size_t sock_size) { struct iocp_socket *socket_impl = socket->impl; socket->remote_endpoint = *remote_endpoint; int reuse = 1; if (setsockopt((SOCKET)socket->io_handle.data.handle, SOL_SOCKET, SO_REUSEADDR, (char *)&reuse, sizeof(int))) { int wsa_err = WSAGetLastError(); /* logging may reset error, so cache it */ AWS_LOGF_WARN( AWS_LS_IO_SOCKET, "id=%p handle=%p: setsockopt() call for enabling SO_REUSEADDR failed with WSAError %d", (void *)socket, (void *)socket->io_handle.data.handle, wsa_err); return aws_raise_error(s_determine_socket_error(wsa_err)); } struct socket_connect_args *connect_args = aws_mem_calloc(socket->allocator, 1, sizeof(struct socket_connect_args)); if (!connect_args) { socket->state = ERRORED; return AWS_OP_ERR; } connect_args->timeout_task.fn = s_handle_socket_timeout; connect_args->timeout_task.arg = connect_args; LPFN_CONNECTEX connect_fn = NULL; if (aws_socket_assign_to_event_loop(socket, connect_loop)) { socket->state = ERRORED; aws_mem_release(socket->allocator, connect_args); return AWS_OP_ERR; } connect_args->allocator = socket->allocator; connect_args->socket = socket; socket->state = CONNECTING; connect_fn = (LPFN_CONNECTEX)aws_winsock_get_connectex_fn(); socket_impl->read_io_data->in_use = true; aws_overlapped_init(&socket_impl->read_io_data->signal, s_socket_connection_completion, connect_args); int fake_buffer = 0; socket_impl->connect_args = connect_args; BOOL connect_res = false; bind((SOCKET)socket->io_handle.data.handle, bind_addr, (int)sock_size); /* socket may be killed by the connection_completion callback inside of connect_fn, so copy out info * we need (allocator, event loop, timeout, etc), socket isn't safe to touch below connect_fn() */ struct aws_allocator *allocator = socket->allocator; uint32_t connect_timeout_ms = socket->options.connect_timeout_ms; connect_res = connect_fn( (SOCKET)socket->io_handle.data.handle, socket_addr, (int)sock_size, &fake_buffer, 0, NULL, aws_overlapped_to_windows_overlapped(&socket_impl->read_io_data->signal)); uint64_t time_to_run = 0; /* if the connect succeeded immediately, let the timeout task still run, but it can run immediately. This is cleaner because it can just deallocate the memory we just allocated. */ aws_event_loop_current_clock_time(connect_loop, &time_to_run); /* with IO completion ports, the overlapped callback triggers even if the operation succedded immediately, so we can just act like it's pending and the code path is the same.*/ if (!connect_res) { int error_code = WSAGetLastError(); if (error_code != ERROR_IO_PENDING) { AWS_LOGF_DEBUG( AWS_LS_IO_TLS, "id=%p handle=%p: connection error %d", (void *)socket, (void *)socket->io_handle.data.handle, error_code); socket_impl->connect_args = NULL; socket_impl->read_io_data->in_use = false; aws_mem_release(allocator, connect_args); int aws_err = s_determine_socket_error(error_code); return aws_raise_error(aws_err); } time_to_run += aws_timestamp_convert(connect_timeout_ms, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL); } else { /*add 500 ms just in case we're under heavy load*/ time_to_run += aws_timestamp_convert(500, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL); } AWS_LOGF_TRACE( AWS_LS_IO_SOCKET, "id=%p handle=%p: connection pending, scheduling timeout task", (void *)socket, (void *)socket->io_handle.data.handle); aws_event_loop_schedule_task_future(connect_loop, &connect_args->timeout_task, time_to_run); return AWS_OP_SUCCESS; } /* This should be called IMMEDIATELY after failure. * Otherwise, WSAGetLastError() could get cleared accidentally by a logging call */ static inline int s_convert_pton_error(int pton_err) { if (pton_err == 0) { return AWS_IO_SOCKET_INVALID_ADDRESS; } return s_determine_socket_error(WSAGetLastError()); } /* initiate TCP ipv4 outbound connection. */ static int s_ipv4_stream_connect( struct aws_socket *socket, const struct aws_socket_endpoint *remote_endpoint, struct aws_event_loop *connect_loop, aws_socket_on_connection_result_fn *on_connection_result, void *user_data) { AWS_ASSERT(connect_loop); AWS_ASSERT(on_connection_result); AWS_LOGF_DEBUG( AWS_LS_IO_SOCKET, "id=%p handle=%p: beginning connect.", (void *)socket, (void *)socket->io_handle.data.handle); socket->connection_result_fn = on_connection_result; socket->connect_accept_user_data = user_data; struct sockaddr_in addr_in; AWS_ZERO_STRUCT(addr_in); int err = inet_pton(AF_INET, remote_endpoint->address, &(addr_in.sin_addr)); if (err != 1) { int aws_err = s_convert_pton_error(err); /* call before logging or WSAError may get cleared */ AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p handle=%p: failed to parse address %s:%u.", (void *)socket, (void *)socket->io_handle.data.handle, remote_endpoint->address, remote_endpoint->port); return aws_raise_error(aws_err); } AWS_LOGF_DEBUG( AWS_LS_IO_SOCKET, "id=%p handle=%p: connecting to endpoint %s:%u.", (void *)socket, (void *)socket->io_handle.data.handle, remote_endpoint->address, remote_endpoint->port); addr_in.sin_port = htons((uint16_t)remote_endpoint->port); addr_in.sin_family = AF_INET; /* stupid as hell, we have to bind first*/ struct sockaddr_in in_bind_addr; AWS_ZERO_STRUCT(in_bind_addr); in_bind_addr.sin_family = AF_INET; in_bind_addr.sin_addr.s_addr = INADDR_ANY; in_bind_addr.sin_port = 0; return s_tcp_connect( socket, remote_endpoint, connect_loop, (struct sockaddr *)&in_bind_addr, (struct sockaddr *)&addr_in, sizeof(addr_in)); } /* initiate TCP ipv6 outbound connection. */ static int s_ipv6_stream_connect( struct aws_socket *socket, const struct aws_socket_endpoint *remote_endpoint, struct aws_event_loop *connect_loop, aws_socket_on_connection_result_fn *on_connection_result, void *user_data) { AWS_ASSERT(connect_loop); AWS_ASSERT(on_connection_result); AWS_LOGF_DEBUG( AWS_LS_IO_SOCKET, "id=%p handle=%p: beginning connect.", (void *)socket, (void *)socket->io_handle.data.handle); if (socket->state != INIT) { socket->state = ERRORED; return aws_raise_error(AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE); } socket->connection_result_fn = on_connection_result; socket->connect_accept_user_data = user_data; struct sockaddr_in6 bind_addr; AWS_ZERO_STRUCT(bind_addr); bind_addr.sin6_family = AF_INET6; bind_addr.sin6_port = 0; struct sockaddr_in6 addr_in6; AWS_ZERO_STRUCT(addr_in6); int pton_err = inet_pton(AF_INET6, remote_endpoint->address, &(addr_in6.sin6_addr)); if (pton_err != 1) { int aws_err = s_convert_pton_error(pton_err); /* call before logging or WSAError may get cleared */ AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p handle=%p: failed to parse address %s:%u.", (void *)socket, (void *)socket->io_handle.data.handle, remote_endpoint->address, remote_endpoint->port); return aws_raise_error(aws_err); } AWS_LOGF_DEBUG( AWS_LS_IO_SOCKET, "id=%p handle=%p: connecting to endpoint %s:%u.", (void *)socket, (void *)socket->io_handle.data.handle, remote_endpoint->address, remote_endpoint->port); addr_in6.sin6_port = htons((uint16_t)remote_endpoint->port); addr_in6.sin6_family = AF_INET6; return s_tcp_connect( socket, remote_endpoint, connect_loop, (struct sockaddr *)&bind_addr, (struct sockaddr *)&addr_in6, sizeof(addr_in6)); } /* simply moves the connection_success notification into the event-loop's thread. */ static void s_connection_success_task(struct aws_task *task, void *arg, enum aws_task_status task_status) { (void)task; (void)task_status; struct io_operation_data *io_data = arg; if (!io_data->socket) { aws_mem_release(io_data->allocator, io_data); return; } io_data->sequential_task_storage.fn = NULL; io_data->sequential_task_storage.arg = NULL; io_data->in_use = false; struct aws_socket *socket = io_data->socket; struct iocp_socket *socket_impl = socket->impl; socket_impl->vtable->connection_success(socket); } /* initiate the client end of a named pipe. */ static int s_local_connect( struct aws_socket *socket, const struct aws_socket_endpoint *remote_endpoint, struct aws_event_loop *connect_loop, aws_socket_on_connection_result_fn *on_connection_result, void *user_data) { AWS_ASSERT(connect_loop); AWS_ASSERT(on_connection_result); AWS_LOGF_DEBUG( AWS_LS_IO_SOCKET, "id=%p handle=%p: beginning connect.", (void *)socket, (void *)socket->io_handle.data.handle); socket->connection_result_fn = on_connection_result; socket->connect_accept_user_data = user_data; if (s_process_tcp_sock_options(socket)) { socket->state = ERRORED; return AWS_OP_ERR; } struct iocp_socket *socket_impl = socket->impl; socket->remote_endpoint = *remote_endpoint; socket->io_handle.data.handle = CreateFileA( remote_endpoint->address, GENERIC_READ | GENERIC_WRITE, 0, NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL | FILE_FLAG_OVERLAPPED, NULL); if (socket->io_handle.data.handle != INVALID_HANDLE_VALUE) { AWS_LOGF_DEBUG( AWS_LS_IO_SOCKET, "id=%p handle=%p: Successfully connected to named pipe %s.", (void *)socket, (void *)socket->io_handle.data.handle, remote_endpoint->address); if (aws_socket_assign_to_event_loop(socket, connect_loop)) { goto error; } socket_impl->read_io_data->sequential_task_storage.fn = s_connection_success_task; socket_impl->read_io_data->sequential_task_storage.arg = socket_impl->read_io_data; socket_impl->read_io_data->in_use = true; aws_event_loop_schedule_task_now(connect_loop, &socket_impl->read_io_data->sequential_task_storage); return AWS_OP_SUCCESS; } error:; int win_error = GetLastError(); /* logging may reset error, so cache it */ AWS_LOGF_DEBUG( AWS_LS_IO_SOCKET, "id=%p handle=%p: failed to connect to named pipe %s.", (void *)socket, (void *)socket->io_handle.data.handle, remote_endpoint->address); socket->state = ERRORED; int aws_error = s_determine_socket_error(win_error); aws_raise_error(aws_error); return AWS_OP_ERR; } /* connect generic udp outbound */ static inline int s_dgram_connect( struct aws_socket *socket, const struct aws_socket_endpoint *remote_endpoint, struct aws_event_loop *connect_loop, struct sockaddr *socket_addr, size_t sock_size) { struct iocp_socket *socket_impl = socket->impl; socket->remote_endpoint = *remote_endpoint; AWS_LOGF_DEBUG( AWS_LS_IO_SOCKET, "id=%p handle=%p: connecting to to %s:%u", (void *)socket, (void *)socket->io_handle.data.handle, remote_endpoint->address, remote_endpoint->port); int reuse = 1; if (setsockopt((SOCKET)socket->io_handle.data.handle, SOL_SOCKET, SO_REUSEADDR, (char *)&reuse, sizeof(int))) { int wsa_err = WSAGetLastError(); /* logging may reset error, so cache it */ AWS_LOGF_WARN( AWS_LS_IO_SOCKET, "id=%p handle=%p: setsockopt() call for enabling SO_REUSEADDR failed with WSAError %d", (void *)socket, (void *)socket->io_handle.data.handle, wsa_err); aws_raise_error(s_determine_socket_error(wsa_err)); goto error; } int connect_err = connect((SOCKET)socket->io_handle.data.handle, socket_addr, (int)sock_size); if (connect_err) { int wsa_err = WSAGetLastError(); /* logging may reset error, so cache it */ AWS_LOGF_DEBUG( AWS_LS_IO_SOCKET, "id=%p handle=%p: Failed to connect to %s:%u with error %d.", (void *)socket, (void *)socket->io_handle.data.handle, remote_endpoint->address, remote_endpoint->port, wsa_err); aws_raise_error(s_determine_socket_error(wsa_err)); goto error; } if (s_update_local_endpoint_ipv4_ipv6(socket)) { goto error; } AWS_LOGF_DEBUG( AWS_LS_IO_SOCKET, "id=%p handle=%p: local endpoint %s:%u", (void *)socket, (void *)socket->io_handle.data.handle, socket->local_endpoint.address, socket->local_endpoint.port); if (s_process_tcp_sock_options(socket)) { goto error; } socket->state = CONNECTED_READ | CONNECTED_WRITE; if (connect_loop) { if (aws_socket_assign_to_event_loop(socket, connect_loop)) { goto error; } socket_impl->read_io_data->sequential_task_storage.fn = s_connection_success_task; socket_impl->read_io_data->sequential_task_storage.arg = socket_impl->read_io_data; socket_impl->read_io_data->in_use = true; aws_event_loop_schedule_task_now(connect_loop, &socket_impl->read_io_data->sequential_task_storage); } return AWS_OP_SUCCESS; error: socket->state = ERRORED; return AWS_OP_ERR; } static int s_ipv4_dgram_connect( struct aws_socket *socket, const struct aws_socket_endpoint *remote_endpoint, struct aws_event_loop *connect_loop, aws_socket_on_connection_result_fn *on_connection_result, void *user_data) { (void)user_data; /* we don't actually care if it's null in this case. */ socket->connection_result_fn = on_connection_result; socket->connect_accept_user_data = user_data; struct sockaddr_in addr_in; AWS_ZERO_STRUCT(addr_in); int pton_err = inet_pton(AF_INET, remote_endpoint->address, &(addr_in.sin_addr)); if (pton_err != 1) { int aws_err = s_convert_pton_error(pton_err); /* call right after failure, so that WSAError isn't cleared */ socket->state = ERRORED; return aws_raise_error(aws_err); } addr_in.sin_port = htons((uint16_t)remote_endpoint->port); addr_in.sin_family = AF_INET; return s_dgram_connect(socket, remote_endpoint, connect_loop, (struct sockaddr *)&addr_in, sizeof(addr_in)); } static int s_ipv6_dgram_connect( struct aws_socket *socket, const struct aws_socket_endpoint *remote_endpoint, struct aws_event_loop *connect_loop, aws_socket_on_connection_result_fn *on_connection_result, void *user_data) { (void)user_data; /* we don't actually care if it's null in this case. */ socket->connection_result_fn = on_connection_result; socket->connect_accept_user_data = user_data; struct sockaddr_in6 addr_in6; AWS_ZERO_STRUCT(addr_in6); int pton_err = inet_pton(AF_INET6, remote_endpoint->address, &(addr_in6.sin6_addr)); if (pton_err != 1) { int aws_err = s_convert_pton_error(pton_err); /* call right after failure, so that WSAError isn't cleared */ socket->state = ERRORED; return aws_raise_error(aws_err); } addr_in6.sin6_port = htons((uint16_t)remote_endpoint->port); addr_in6.sin6_family = AF_INET6; return s_dgram_connect(socket, remote_endpoint, connect_loop, (struct sockaddr *)&addr_in6, sizeof(addr_in6)); } static inline int s_tcp_bind(struct aws_socket *socket, struct sockaddr *sock_addr, size_t sock_size) { /* set this option to prevent duplicate bind calls. */ int exclusive_use_val = 1; if (setsockopt( (SOCKET)socket->io_handle.data.handle, SOL_SOCKET, SO_EXCLUSIVEADDRUSE, (char *)&exclusive_use_val, sizeof(int))) { int wsa_err = WSAGetLastError(); /* logging may reset error, so cache it */ AWS_LOGF_WARN( AWS_LS_IO_SOCKET, "id=%p handle=%p: setsockopt() call for enabling SO_EXCLUSIVEADDRUSE failed with WSAError %d", (void *)socket, (void *)socket->io_handle.data.handle, wsa_err); aws_raise_error(s_determine_socket_error(wsa_err)); goto error; } if (bind((SOCKET)socket->io_handle.data.handle, sock_addr, (int)sock_size) != 0) { int wsa_err = WSAGetLastError(); /* logging may reset error, so cache it */ AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p handle=%p: error binding. error %d", (void *)socket, (void *)socket->io_handle.data.handle, wsa_err); aws_raise_error(s_determine_socket_error(wsa_err)); goto error; } if (s_update_local_endpoint_ipv4_ipv6(socket)) { goto error; } AWS_LOGF_INFO( AWS_LS_IO_SOCKET, "id=%p handle=%p: binding to tcp %s:%u", (void *)socket, (void *)socket->io_handle.data.handle, socket->local_endpoint.address, socket->local_endpoint.port); socket->state = BOUND; return AWS_OP_SUCCESS; error: socket->state = ERRORED; return AWS_OP_ERR; } static int s_ipv4_stream_bind(struct aws_socket *socket, const struct aws_socket_endpoint *local_endpoint) { struct sockaddr_in addr_in; AWS_ZERO_STRUCT(addr_in); int pton_err = inet_pton(AF_INET, local_endpoint->address, &(addr_in.sin_addr)); if (pton_err != 1) { int aws_err = s_convert_pton_error(pton_err); /* call right after failure, so that WSAError isn't cleared */ socket->state = ERRORED; return aws_raise_error(aws_err); } addr_in.sin_port = htons((uint16_t)local_endpoint->port); addr_in.sin_family = AF_INET; return s_tcp_bind(socket, (struct sockaddr *)&addr_in, sizeof(addr_in)); } static int s_ipv6_stream_bind(struct aws_socket *socket, const struct aws_socket_endpoint *local_endpoint) { struct sockaddr_in6 addr_in6; AWS_ZERO_STRUCT(addr_in6); int pton_err = inet_pton(AF_INET6, local_endpoint->address, &(addr_in6.sin6_addr)); if (pton_err != 1) { int aws_err = s_convert_pton_error(pton_err); /* call right after failure, so that WSAError isn't cleared */ socket->state = ERRORED; return aws_raise_error(aws_err); } addr_in6.sin6_port = htons((uint16_t)local_endpoint->port); addr_in6.sin6_family = AF_INET6; return s_tcp_bind(socket, (struct sockaddr *)&addr_in6, sizeof(addr_in6)); } static inline int s_udp_bind(struct aws_socket *socket, struct sockaddr *sock_addr, size_t sock_size) { if (bind((SOCKET)socket->io_handle.data.handle, sock_addr, (int)sock_size) != 0) { int wsa_err = WSAGetLastError(); /* logging may reset error, so cache it */ AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p handle=%p: error binding. error %d", (void *)socket, (void *)socket->io_handle.data.handle, wsa_err); aws_raise_error(s_determine_socket_error(wsa_err)); goto error; } if (s_update_local_endpoint_ipv4_ipv6(socket)) { goto error; } AWS_LOGF_INFO( AWS_LS_IO_SOCKET, "id=%p handle=%p: binding to udp %s:%u", (void *)socket, (void *)socket->io_handle.data.handle, socket->local_endpoint.address, socket->local_endpoint.port); socket->state = CONNECTED_READ; return AWS_OP_SUCCESS; error: socket->state = ERRORED; return AWS_OP_ERR; } static int s_ipv4_dgram_bind(struct aws_socket *socket, const struct aws_socket_endpoint *local_endpoint) { struct sockaddr_in addr_in; AWS_ZERO_STRUCT(addr_in); int pton_err = inet_pton(AF_INET, local_endpoint->address, &(addr_in.sin_addr)); if (pton_err != 1) { int aws_err = s_convert_pton_error(pton_err); /* call right after failure, so that WSAError isn't cleared */ socket->state = ERRORED; return aws_raise_error(aws_err); } addr_in.sin_port = htons((uint16_t)local_endpoint->port); addr_in.sin_family = AF_INET; return s_udp_bind(socket, (struct sockaddr *)&addr_in, sizeof(addr_in)); } static int s_ipv6_dgram_bind(struct aws_socket *socket, const struct aws_socket_endpoint *local_endpoint) { struct sockaddr_in6 addr_in6; AWS_ZERO_STRUCT(addr_in6); int pton_err = inet_pton(AF_INET6, local_endpoint->address, &(addr_in6.sin6_addr)); if (pton_err != 1) { int aws_err = s_convert_pton_error(pton_err); /* call right after failure, so that WSAError isn't cleared */ socket->state = ERRORED; return aws_raise_error(aws_err); } addr_in6.sin6_port = htons((uint16_t)local_endpoint->port); addr_in6.sin6_family = AF_INET6; return s_udp_bind(socket, (struct sockaddr *)&addr_in6, sizeof(addr_in6)); } static int s_local_bind(struct aws_socket *socket, const struct aws_socket_endpoint *local_endpoint) { AWS_LOGF_INFO( AWS_LS_IO_SOCKET, "id=%p handle=%p: binding to named pipe %s", (void *)socket, (void *)socket->io_handle.data.handle, local_endpoint->address); socket->local_endpoint = *local_endpoint; socket->io_handle.data.handle = CreateNamedPipeA( local_endpoint->address, PIPE_ACCESS_DUPLEX | FILE_FLAG_OVERLAPPED, PIPE_TYPE_BYTE | PIPE_READMODE_BYTE | PIPE_WAIT | PIPE_ACCEPT_REMOTE_CLIENTS, PIPE_UNLIMITED_INSTANCES, PIPE_BUFFER_SIZE, PIPE_BUFFER_SIZE, 0, NULL); if (socket->io_handle.data.handle != INVALID_HANDLE_VALUE) { socket->state = BOUND; return AWS_OP_SUCCESS; } else { int error_code = GetLastError(); /* logging may reset error, so cache it */ AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p handle=%p: failed to open named pipe %s with error %d", (void *)socket, (void *)socket->io_handle.data.handle, local_endpoint->address, error_code); socket->state = ERRORED; int aws_error = s_determine_socket_error(error_code); return aws_raise_error(aws_error); } } static int s_tcp_listen(struct aws_socket *socket, int backlog_size) { AWS_LOGF_INFO( AWS_LS_IO_SOCKET, "id=%p handle=%p: starting listen with backlog %d", (void *)socket, (void *)socket->io_handle.data.handle, backlog_size); int error_code = listen((SOCKET)socket->io_handle.data.handle, backlog_size); if (!error_code) { socket->state = LISTENING; return AWS_OP_SUCCESS; } error_code = GetLastError(); /* logging may reset error, so cache it */ AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p handle=%p: listen failed with error code %d", (void *)socket, (void *)socket->io_handle.data.handle, error_code); int aws_error = s_determine_socket_error(error_code); return aws_raise_error(aws_error); } static int s_udp_listen(struct aws_socket *socket, int backlog_size) { (void)socket; (void)backlog_size; return aws_raise_error(AWS_IO_SOCKET_INVALID_OPERATION_FOR_TYPE); } static int s_local_listen(struct aws_socket *socket, int backlog_size) { (void)socket; (void)backlog_size; if (AWS_UNLIKELY(socket->state != BOUND)) { return aws_raise_error(AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE); } socket->state = LISTENING; return AWS_OP_SUCCESS; } /* triggered by the event loop upon an incoming pipe connection. */ static void s_incoming_pipe_connection_event( struct aws_event_loop *event_loop, struct aws_overlapped *overlapped, int status_code, size_t num_bytes_transferred) { (void)event_loop; (void)num_bytes_transferred; struct io_operation_data *operation_data = AWS_CONTAINER_OF(overlapped, struct io_operation_data, signal); struct aws_socket *socket = overlapped->user_data; if (!operation_data->socket) { aws_mem_release(operation_data->allocator, operation_data); return; } if (status_code == IO_OPERATION_CANCELLED) { operation_data->in_use = false; return; } AWS_LOGF_DEBUG( AWS_LS_IO_SOCKET, "id=%p handle=%p: named-pipe listening event received", (void *)socket, (void *)socket->io_handle.data.handle); struct iocp_socket *socket_impl = socket->impl; if (status_code) { if (status_code == IO_PIPE_BROKEN) { AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p handle=%p: named-pipe is closed", (void *)socket, (void *)socket->io_handle.data.handle); aws_raise_error(AWS_IO_SOCKET_CLOSED); socket->state = CLOSED; } else { AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p handle=%p: named-pipe error %d", (void *)socket, (void *)socket->io_handle.data.handle, (int)GetLastError()); aws_raise_error(s_determine_socket_error(status_code)); socket->state = ERRORED; } socket_impl->vtable->connection_error(socket, aws_last_error()); operation_data->in_use = false; return; } bool continue_accept_loop = !socket_impl->stop_accept; do { AWS_LOGF_DEBUG( AWS_LS_IO_SOCKET, "id=%p handle=%p: incoming connection", (void *)socket, (void *)socket->io_handle.data.handle); struct aws_socket *new_socket = aws_mem_acquire(socket->allocator, sizeof(struct aws_socket)); if (!new_socket) { socket->state = ERRORED; operation_data->in_use = false; socket_impl->vtable->connection_error(socket, AWS_ERROR_OOM); return; } if (s_socket_init(new_socket, socket->allocator, &socket->options, false)) { aws_mem_release(socket->allocator, new_socket); socket->state = ERRORED; operation_data->in_use = false; socket_impl->vtable->connection_error(socket, aws_last_error()); return; } new_socket->state = CONNECTED_WRITE | CONNECTED_READ; /* Named pipes don't work like traditional socket APIs. The original handle is used for the incoming connection. so we copy it over and do some trickery with the event loop registrations. */ new_socket->io_handle = socket->io_handle; aws_event_loop_unsubscribe_from_io_events(event_loop, &new_socket->io_handle); new_socket->event_loop = NULL; socket->io_handle.data.handle = CreateNamedPipeA( socket->local_endpoint.address, PIPE_ACCESS_DUPLEX | FILE_FLAG_OVERLAPPED, PIPE_TYPE_BYTE | PIPE_READMODE_BYTE | PIPE_WAIT | PIPE_ACCEPT_REMOTE_CLIENTS, PIPE_UNLIMITED_INSTANCES, PIPE_BUFFER_SIZE, PIPE_BUFFER_SIZE, 0, NULL); if (socket->io_handle.data.handle == INVALID_HANDLE_VALUE) { AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p handle=%p: error rebinding named pipe with error %d", (void *)socket, (void *)socket->io_handle.data.handle, (int)GetLastError()); socket->state = ERRORED; operation_data->in_use = false; socket_impl->vtable->connection_error(socket, aws_last_error()); return; } aws_overlapped_init(&socket_impl->read_io_data->signal, s_incoming_pipe_connection_event, socket); socket->event_loop = NULL; if (aws_socket_assign_to_event_loop(socket, event_loop)) { socket->state = ERRORED; operation_data->in_use = false; aws_socket_clean_up(new_socket); socket_impl->vtable->connection_error(socket, aws_last_error()); return; } socket->accept_result_fn(socket, AWS_ERROR_SUCCESS, new_socket, socket->connect_accept_user_data); if (!operation_data->socket) { socket->state = ERRORED; operation_data->in_use = false; aws_mem_release(operation_data->allocator, operation_data); return; } socket_impl->read_io_data->in_use = true; BOOL res = ConnectNamedPipe( socket->io_handle.data.handle, aws_overlapped_to_windows_overlapped(&socket_impl->read_io_data->signal)); continue_accept_loop = false; if (!res) { int error_code = GetLastError(); if (error_code != ERROR_IO_PENDING && error_code != ERROR_PIPE_CONNECTED) { AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p handle=%p: named-pipe connect failed with error %d", (void *)socket, (void *)socket->io_handle.data.handle, error_code); socket->state = ERRORED; socket_impl->read_io_data->in_use = false; int aws_err = s_determine_socket_error(error_code); socket_impl->vtable->connection_error(socket, aws_err); return; } else if (error_code == ERROR_PIPE_CONNECTED) { continue_accept_loop = true; } else { AWS_LOGF_TRACE( AWS_LS_IO_SOCKET, "id=%p handle=%p: no pending connections exiting accept loop.", (void *)socket, (void *)socket->io_handle.data.handle); } } } while (continue_accept_loop && !socket_impl->stop_accept); } static void s_tcp_accept_event( struct aws_event_loop *event_loop, struct aws_overlapped *overlapped, int status_code, size_t num_bytes_transferred); static int s_socket_setup_accept(struct aws_socket *socket, struct aws_event_loop *accept_loop) { struct iocp_socket *socket_impl = socket->impl; socket_impl->incoming_socket = aws_mem_acquire(socket->allocator, sizeof(struct aws_socket)); if (!socket_impl->incoming_socket) { return AWS_OP_ERR; } int err = s_socket_init(socket_impl->incoming_socket, socket->allocator, &socket->options, true); if (err) { socket->state = ERRORED; aws_socket_clean_up(socket_impl->incoming_socket); aws_mem_release(socket->allocator, socket_impl->incoming_socket); socket_impl->incoming_socket = NULL; return AWS_OP_ERR; } socket_impl->incoming_socket->local_endpoint = socket->local_endpoint; socket_impl->incoming_socket->state = INIT; if (accept_loop && aws_socket_assign_to_event_loop(socket, accept_loop)) { socket->state = ERRORED; aws_socket_clean_up(socket_impl->incoming_socket); aws_mem_release(socket->allocator, socket_impl->incoming_socket); socket_impl->incoming_socket = NULL; return AWS_OP_ERR; } aws_overlapped_init(&socket_impl->read_io_data->signal, s_tcp_accept_event, socket); LPFN_ACCEPTEX accept_fn = (LPFN_ACCEPTEX)aws_winsock_get_acceptex_fn(); socket_impl->read_io_data->in_use = true; while (true) { AWS_LOGF_TRACE( AWS_LS_IO_SOCKET, "id=%p handle=%p: performing non-blocking accept", (void *)socket, (void *)socket->io_handle.data.handle); BOOL res = accept_fn( (SOCKET)socket->io_handle.data.handle, (SOCKET)socket_impl->incoming_socket->io_handle.data.handle, socket_impl->accept_buffer, 0, SOCK_STORAGE_SIZE, SOCK_STORAGE_SIZE, NULL, aws_overlapped_to_windows_overlapped(&socket_impl->read_io_data->signal)); if (!res) { int win_err = WSAGetLastError(); if (win_err == ERROR_IO_PENDING) { AWS_LOGF_TRACE( AWS_LS_IO_SOCKET, "id=%p handle=%p: no pending incoming connections, exiting loop.", (void *)socket, (void *)socket->io_handle.data.handle); return aws_raise_error(AWS_IO_READ_WOULD_BLOCK); } else if (AWS_UNLIKELY(win_err == WSAECONNRESET)) { continue; } AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p handle=%p: accept failed with error %d", (void *)socket, (void *)socket->io_handle.data.handle, (int)win_err); socket->state = ERRORED; socket_impl->read_io_data->in_use = false; aws_mem_release(socket->allocator, socket_impl->incoming_socket); socket_impl->incoming_socket = NULL; int aws_err = s_determine_socket_error(win_err); return aws_raise_error(aws_err); } return AWS_OP_SUCCESS; } } /* invoked by the event loop when a listening socket has incoming connections. This is only used for TCP.*/ static void s_tcp_accept_event( struct aws_event_loop *event_loop, struct aws_overlapped *overlapped, int status_code, size_t num_bytes_transferred) { (void)event_loop; (void)num_bytes_transferred; struct io_operation_data *operation_data = AWS_CONTAINER_OF(overlapped, struct io_operation_data, signal); struct aws_socket *socket = overlapped->user_data; if (!operation_data->socket) { aws_mem_release(operation_data->allocator, operation_data); return; } if (status_code == IO_OPERATION_CANCELLED || status_code == WSAECONNRESET) { operation_data->in_use = false; return; } AWS_LOGF_TRACE( AWS_LS_IO_SOCKET, "id=%p handle=%p: accept event triggered.", (void *)socket, (void *)socket->io_handle.data.handle); struct iocp_socket *socket_impl = socket->impl; if (!status_code && !socket_impl->stop_accept) { int err = AWS_OP_SUCCESS; do { socket_impl->incoming_socket->state = CONNECTED_WRITE | CONNECTED_READ; uint32_t port = 0; struct sockaddr_storage *in_addr = (struct sockaddr_storage *)socket_impl->accept_buffer; if (in_addr->ss_family == AF_INET) { struct sockaddr_in *s = (struct sockaddr_in *)in_addr; port = ntohs(s->sin_port); /* the kernel created these, a.) they won't fail, b.) if they do it's not fatal. log it later. */ InetNtopA( AF_INET, &s->sin_addr, socket_impl->incoming_socket->remote_endpoint.address, sizeof(socket_impl->incoming_socket->remote_endpoint.address)); socket_impl->incoming_socket->options.domain = AWS_SOCKET_IPV4; } else if (in_addr->ss_family == AF_INET6) { struct sockaddr_in6 *s = (struct sockaddr_in6 *)in_addr; port = ntohs(s->sin6_port); /* the kernel created these, a.) they won't fail, b.) if they do it's not fatal. log it later. */ InetNtopA( AF_INET6, &s->sin6_addr, socket_impl->incoming_socket->remote_endpoint.address, sizeof(socket_impl->incoming_socket->remote_endpoint.address)); socket_impl->incoming_socket->options.domain = AWS_SOCKET_IPV6; } socket_impl->incoming_socket->remote_endpoint.port = port; AWS_LOGF_INFO( AWS_LS_IO_SOCKET, "id=%p handle=%p: incoming connection accepted from %s:%u.", (void *)socket, (void *)socket->io_handle.data.handle, socket_impl->incoming_socket->remote_endpoint.address, port); u_long non_blocking = 1; ioctlsocket((SOCKET)socket_impl->incoming_socket->io_handle.data.handle, FIONBIO, &non_blocking); aws_socket_set_options(socket_impl->incoming_socket, &socket->options); struct aws_socket *incoming_socket = socket_impl->incoming_socket; socket_impl->incoming_socket = NULL; socket->accept_result_fn(socket, AWS_ERROR_SUCCESS, incoming_socket, socket->connect_accept_user_data); if (!operation_data->socket) { aws_mem_release(operation_data->allocator, operation_data); return; } socket_impl->incoming_socket = NULL; err = s_socket_setup_accept(socket, NULL); if (err) { if (aws_last_error() != AWS_IO_READ_WOULD_BLOCK) { socket->state = ERRORED; socket_impl->vtable->connection_error(socket, aws_last_error()); } return; } } while (!err && !socket_impl->stop_accept); } else if (status_code) { AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p handle=%p: error occurred %d.", (void *)socket, (void *)socket->io_handle.data.handle, status_code); socket->state = ERRORED; int aws_error = s_determine_socket_error(status_code); aws_raise_error(aws_error); socket_impl->vtable->connection_error(socket, aws_error); operation_data->in_use = false; } } static int s_tcp_start_accept( struct aws_socket *socket, struct aws_event_loop *accept_loop, aws_socket_on_accept_result_fn *on_accept_result, void *user_data) { AWS_ASSERT(accept_loop); AWS_ASSERT(on_accept_result); if (AWS_UNLIKELY(socket->state != LISTENING)) { AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p handle=%p: invalid state for start_accept operation. You must call listen first.", (void *)socket, (void *)socket->io_handle.data.handle); return aws_raise_error(AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE); } if (AWS_UNLIKELY(socket->event_loop && socket->event_loop != accept_loop)) { AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p handle=%p: is already assigned to event-loop %p.", (void *)socket, (void *)socket->io_handle.data.handle, (void *)socket->event_loop); return aws_raise_error(AWS_IO_EVENT_LOOP_ALREADY_ASSIGNED); } struct iocp_socket *socket_impl = socket->impl; if (!socket_impl->read_io_data) { socket_impl->read_io_data = aws_mem_calloc(socket->allocator, 1, sizeof(struct io_operation_data)); if (!socket_impl->read_io_data) { socket->state = ERRORED; return AWS_OP_ERR; } socket_impl->read_io_data->allocator = socket->allocator; socket_impl->read_io_data->in_use = false; socket_impl->read_io_data->socket = socket; } socket->accept_result_fn = on_accept_result; socket->connect_accept_user_data = user_data; socket_impl->stop_accept = false; struct aws_event_loop *el_to_use = !socket->event_loop ? accept_loop : NULL; int err = s_socket_setup_accept(socket, el_to_use); if (!err || aws_last_error() == AWS_IO_READ_WOULD_BLOCK) { return AWS_OP_SUCCESS; } socket->state = ERRORED; return AWS_OP_ERR; } struct stop_accept_args { struct aws_mutex mutex; struct aws_condition_variable condition_var; struct aws_socket *socket; bool invoked; int ret_code; }; static bool s_stop_accept_predicate(void *arg) { struct stop_accept_args *stop_accept_args = arg; return stop_accept_args->invoked; } static int s_stream_stop_accept(struct aws_socket *socket); static void s_stop_accept_task(struct aws_task *task, void *arg, enum aws_task_status status) { (void)task; (void)status; struct stop_accept_args *stop_accept_args = arg; aws_mutex_lock(&stop_accept_args->mutex); stop_accept_args->ret_code = AWS_OP_SUCCESS; if (aws_socket_stop_accept(stop_accept_args->socket)) { stop_accept_args->ret_code = aws_last_error(); } stop_accept_args->invoked = true; aws_condition_variable_notify_one(&stop_accept_args->condition_var); aws_mutex_unlock(&stop_accept_args->mutex); } static int s_stream_stop_accept(struct aws_socket *socket) { AWS_LOGF_INFO( AWS_LS_IO_SOCKET, "id=%p handle=%p: shutting down accept.", (void *)socket, (void *)socket->io_handle.data.handle); AWS_ASSERT(socket->event_loop); if (!aws_event_loop_thread_is_callers_thread(socket->event_loop)) { struct stop_accept_args args = { .mutex = AWS_MUTEX_INIT, .condition_var = AWS_CONDITION_VARIABLE_INIT, .socket = socket, .ret_code = AWS_OP_SUCCESS, }; struct aws_task stop_accept_task = { .fn = s_stop_accept_task, .arg = &args, }; AWS_LOGF_DEBUG( AWS_LS_IO_SOCKET, "id=%p handle=%p: accept is shutting down, but it was called outside the " " event-loop thread. Blocking waiting on shutdown", (void *)socket, (void *)socket->io_handle.data.handle); aws_mutex_lock(&args.mutex); aws_event_loop_schedule_task_now(socket->event_loop, &stop_accept_task); aws_condition_variable_wait_pred(&args.condition_var, &args.mutex, s_stop_accept_predicate, &args); aws_mutex_unlock(&args.mutex); AWS_LOGF_DEBUG( AWS_LS_IO_SOCKET, "id=%p handle=%p: accept shutdown completed", (void *)socket, (void *)socket->io_handle.data.handle); if (args.ret_code) { socket->state = ERRORED; return aws_raise_error(args.ret_code); } return AWS_OP_SUCCESS; } struct iocp_socket *socket_impl = socket->impl; socket_impl->stop_accept = true; CancelIo(socket->io_handle.data.handle); if (!socket_impl->read_io_data && socket_impl->incoming_socket) { aws_socket_clean_up(socket_impl->incoming_socket); aws_mem_release(socket->allocator, socket_impl->incoming_socket); socket_impl->incoming_socket = NULL; } return AWS_OP_SUCCESS; } static void s_named_pipe_is_ridiculous_task(struct aws_task *task, void *args, enum aws_task_status status) { (void)task; struct io_operation_data *io_data = args; if (!io_data->socket) { aws_mem_release(io_data->allocator, io_data); return; } if (status == AWS_TASK_STATUS_RUN_READY) { io_data->sequential_task_storage.fn = NULL; io_data->sequential_task_storage.arg = NULL; s_incoming_pipe_connection_event(io_data->socket->event_loop, &io_data->signal, AWS_OP_SUCCESS, 0); } else { io_data->in_use = false; } } static int s_local_start_accept( struct aws_socket *socket, struct aws_event_loop *accept_loop, aws_socket_on_accept_result_fn *on_accept_result, void *user_data) { AWS_ASSERT(accept_loop); AWS_ASSERT(on_accept_result); if (AWS_UNLIKELY(socket->state != LISTENING)) { AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p handle=%p: invalid state for start_accept operation. You must call listen first.", (void *)socket, (void *)socket->io_handle.data.handle); return aws_raise_error(AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE); } if (AWS_UNLIKELY(socket->event_loop && socket->event_loop != accept_loop)) { AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p handle=%p: is already assigned to event-loop %p.", (void *)socket, (void *)socket->io_handle.data.handle, (void *)socket->event_loop); return aws_raise_error(AWS_IO_EVENT_LOOP_ALREADY_ASSIGNED); } AWS_LOGF_INFO( AWS_LS_IO_SOCKET, "id=%p handle=%p: is starting to accept incoming connections", (void *)socket, (void *)socket->io_handle.data.handle); struct iocp_socket *socket_impl = socket->impl; if (!socket_impl->read_io_data) { socket_impl->read_io_data = aws_mem_calloc(socket->allocator, 1, sizeof(struct io_operation_data)); if (!socket_impl->read_io_data) { socket->state = ERRORED; return AWS_OP_ERR; } socket_impl->read_io_data->allocator = socket->allocator; socket_impl->read_io_data->in_use = false; socket_impl->read_io_data->socket = socket; } socket->accept_result_fn = on_accept_result; socket->connect_accept_user_data = user_data; socket_impl->stop_accept = false; aws_overlapped_init(&socket_impl->read_io_data->signal, s_incoming_pipe_connection_event, socket); socket_impl->read_io_data->in_use = true; if (!socket->event_loop && aws_socket_assign_to_event_loop(socket, accept_loop)) { socket_impl->read_io_data->in_use = false; socket->state = ERRORED; return AWS_OP_ERR; } BOOL res = ConnectNamedPipe( socket->io_handle.data.handle, aws_overlapped_to_windows_overlapped(&socket_impl->read_io_data->signal)); if (!res) { int error_code = GetLastError(); if (error_code != ERROR_IO_PENDING && error_code != ERROR_PIPE_CONNECTED) { AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p handle=%p: ConnectNamedPipe() failed with error %d.", (void *)socket, (void *)socket->io_handle.data.handle, error_code); socket->state = ERRORED; socket_impl->read_io_data->in_use = false; int aws_err = s_determine_socket_error(error_code); return aws_raise_error(aws_err); } else if (error_code == ERROR_PIPE_CONNECTED) { AWS_LOGF_TRACE( AWS_LS_IO_SOCKET, "id=%p handle=%p: Pipe connected immediately, scheduling task for setup.", (void *)socket, (void *)socket->io_handle.data.handle, error_code); /* There will be no IO-completion event in the case of ERROR_PIPE_CONNECTED, so schedule a task to finish the connection */ socket_impl->read_io_data->sequential_task_storage.fn = s_named_pipe_is_ridiculous_task; socket_impl->read_io_data->sequential_task_storage.arg = socket_impl->read_io_data; aws_event_loop_schedule_task_now(socket->event_loop, &socket_impl->read_io_data->sequential_task_storage); } } return AWS_OP_SUCCESS; } static int s_dgram_start_accept( struct aws_socket *socket, struct aws_event_loop *accept_loop, aws_socket_on_accept_result_fn *on_accept_result, void *user_data) { (void)socket; (void)accept_loop; (void)on_accept_result; (void)user_data; return aws_raise_error(AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE); } static int s_dgram_stop_accept(struct aws_socket *socket) { (void)socket; return aws_raise_error(AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE); } int aws_socket_set_options(struct aws_socket *socket, const struct aws_socket_options *options) { if (socket->options.domain != options->domain || socket->options.type != options->type) { return aws_raise_error(AWS_IO_SOCKET_INVALID_OPTIONS); } AWS_LOGF_DEBUG( AWS_LS_IO_SOCKET, "id=%p handle=%p: setting socket options to: keep-alive %d, keep idle %d, keep-alive interval %d, max failed " "probe count %d", (void *)socket, (void *)socket->io_handle.data.handle, (int)options->keepalive, (int)options->keep_alive_timeout_sec, (int)options->keep_alive_interval_sec, (int)options->keep_alive_max_failed_probes); socket->options = *options; if (socket->options.domain != AWS_SOCKET_LOCAL && socket->options.type == AWS_SOCKET_STREAM) { if (socket->options.keepalive && !(socket->options.keep_alive_interval_sec && socket->options.keep_alive_timeout_sec)) { int keep_alive = 1; if (setsockopt( (SOCKET)socket->io_handle.data.handle, SOL_SOCKET, SO_KEEPALIVE, (char *)&keep_alive, sizeof(int))) { int wsa_err = WSAGetLastError(); /* logging may reset error, so cache it */ AWS_LOGF_WARN( AWS_LS_IO_SOCKET, "id=%p handle=%p: setsockopt() call for enabling keep-alive failed with WSAError %d", (void *)socket, (void *)socket->io_handle.data.handle, wsa_err); } } else if (socket->options.keepalive) { ULONG keep_alive_timeout = (ULONG)aws_timestamp_convert( socket->options.keep_alive_timeout_sec, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_MILLIS, NULL); ULONG keep_alive_interval = (ULONG)aws_timestamp_convert( socket->options.keep_alive_interval_sec, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_MILLIS, NULL); struct tcp_keepalive keepalive_args = { .onoff = 1, .keepalivetime = keep_alive_timeout, .keepaliveinterval = keep_alive_interval, }; DWORD bytes_returned = 0; if (WSAIoctl( (SOCKET)socket->io_handle.data.handle, SIO_KEEPALIVE_VALS, &keepalive_args, sizeof(keepalive_args), NULL, 0, &bytes_returned, NULL, NULL)) { int wsa_err = WSAGetLastError(); /* logging may reset error, so cache it */ AWS_LOGF_WARN( AWS_LS_IO_SOCKET, "id=%p handle=%p: WSAIoctl() call for setting keep-alive values failed with WSAError %d", (void *)socket, (void *)socket->io_handle.data.handle, wsa_err); } } /* this is only available in Windows 10 1703 and later. It doesn't, matter if this runs on an older version the call will just fail, no harm done.*/ #ifdef TCP_KEEPCNT if (socket->options.keep_alive_max_failed_probes) { DWORD max_probes = socket->options.keep_alive_max_failed_probes; if (setsockopt( (SOCKET)socket->io_handle.data.handle, IPPROTO_TCP, TCP_KEEPCNT, (char *)&max_probes, sizeof(max_probes))) { int wsa_err = WSAGetLastError(); /* logging may reset error, so cache it */ AWS_LOGF_WARN( AWS_LS_IO_SOCKET, "id=%p handle=%p: setsockopt() call for setting keep-alive probe count value failed with WSAError " "%d. This likely" " isn't a problem. It's more likely you're on an old version of windows. This feature was added in " "Windows 10 1703", (void *)socket, (void *)socket->io_handle.data.handle, wsa_err); } } #endif } return AWS_OP_SUCCESS; } struct close_args { struct aws_mutex mutex; struct aws_condition_variable condition_var; struct aws_socket *socket; bool invoked; int ret_code; }; static bool s_close_predicate(void *arg) { struct close_args *close_args = arg; return close_args->invoked; } static int s_socket_close(struct aws_socket *socket); static void s_close_task(struct aws_task *task, void *arg, enum aws_task_status status) { (void)task; struct close_args *close_args = arg; aws_mutex_lock(&close_args->mutex); close_args->ret_code = AWS_OP_SUCCESS; /* since the task is canceled the event-loop is gone and the iocp will not trigger, so go ahead and tell the socket cleanup stuff that the iocp handle is no longer pending operations. */ if (status == AWS_TASK_STATUS_CANCELED) { struct iocp_socket *iocp_socket = close_args->socket->impl; iocp_socket->read_io_data->in_use = false; } if (aws_socket_close(close_args->socket)) { close_args->ret_code = aws_last_error(); } close_args->invoked = true; aws_condition_variable_notify_one(&close_args->condition_var); aws_mutex_unlock(&close_args->mutex); } static int s_wait_on_close(struct aws_socket *socket) { AWS_ASSERT(socket->event_loop); /* don't freak out on me, this almost never happens, and never occurs inside a channel * it only gets hit from a listening socket shutting down or from a unit test. the only time we allow this kind of thing is when you're a listener.*/ if (socket->state != LISTENING) { return aws_raise_error(AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE); } void *handle_for_logging = socket->io_handle.data.handle; /* socket's handle gets reset before final log */ (void)handle_for_logging; AWS_LOGF_INFO( AWS_LS_IO_SOCKET, "id=%p handle=%p: closing from a different thread than " "the socket is running from. Blocking until it closes down.", (void *)socket, handle_for_logging); struct close_args args = { .mutex = AWS_MUTEX_INIT, .condition_var = AWS_CONDITION_VARIABLE_INIT, .socket = socket, .ret_code = AWS_OP_SUCCESS, }; struct aws_task close_task = { .fn = s_close_task, .arg = &args, }; aws_mutex_lock(&args.mutex); aws_event_loop_schedule_task_now(socket->event_loop, &close_task); aws_condition_variable_wait_pred(&args.condition_var, &args.mutex, s_close_predicate, &args); aws_mutex_unlock(&args.mutex); AWS_LOGF_INFO(AWS_LS_IO_SOCKET, "id=%p handle=%p: close task completed.", (void *)socket, handle_for_logging); if (args.ret_code) { return aws_raise_error(args.ret_code); } return AWS_OP_SUCCESS; } static int s_socket_close(struct aws_socket *socket) { struct iocp_socket *socket_impl = socket->impl; AWS_LOGF_DEBUG(AWS_LS_IO_SOCKET, "id=%p handle=%p: closing", (void *)socket, (void *)socket->io_handle.data.handle); if (socket->event_loop) { if (!aws_event_loop_thread_is_callers_thread(socket->event_loop)) { return s_wait_on_close(socket); } if (socket->state & LISTENING && !socket_impl->stop_accept) { aws_socket_stop_accept(socket); } } if (socket_impl->connect_args) { socket_impl->connect_args->socket = NULL; socket_impl->connect_args = NULL; } if (socket_impl->read_io_data && socket_impl->read_io_data->in_use) { socket_impl->read_io_data->socket = NULL; socket_impl->read_io_data = NULL; } else if (socket_impl->read_io_data) { aws_mem_release(socket->allocator, socket_impl->read_io_data); socket_impl->read_io_data = NULL; } if (socket->io_handle.data.handle != INVALID_HANDLE_VALUE) { shutdown((SOCKET)socket->io_handle.data.handle, SD_BOTH); closesocket((SOCKET)socket->io_handle.data.handle); socket->io_handle.data.handle = INVALID_HANDLE_VALUE; } socket->state = CLOSED; while (!aws_linked_list_empty(&socket_impl->pending_io_operations)) { struct aws_linked_list_node *node = aws_linked_list_front(&socket_impl->pending_io_operations); struct io_operation_data *op_data = AWS_CONTAINER_OF(node, struct io_operation_data, node); op_data->socket = NULL; aws_linked_list_pop_front(&socket_impl->pending_io_operations); } socket->event_loop = NULL; return AWS_OP_SUCCESS; } static int s_local_close(struct aws_socket *socket) { struct iocp_socket *socket_impl = socket->impl; AWS_LOGF_DEBUG(AWS_LS_IO_SOCKET, "id=%p handle=%p: closing", (void *)socket, (void *)socket->io_handle.data.handle); if (socket->event_loop) { if (!aws_event_loop_thread_is_callers_thread(socket->event_loop)) { return s_wait_on_close(socket); } } if (socket_impl->connect_args) { socket_impl->connect_args->socket = NULL; socket_impl->connect_args = NULL; } if (socket_impl->read_io_data && socket_impl->read_io_data->in_use) { socket_impl->read_io_data->socket = NULL; socket_impl->read_io_data = NULL; } else if (socket_impl->read_io_data) { aws_mem_release(socket->allocator, socket_impl->read_io_data); socket_impl->read_io_data = NULL; } if (socket->io_handle.data.handle != INVALID_HANDLE_VALUE) { CloseHandle(socket->io_handle.data.handle); socket->io_handle.data.handle = INVALID_HANDLE_VALUE; } socket->state = CLOSED; while (!aws_linked_list_empty(&socket_impl->pending_io_operations)) { struct aws_linked_list_node *node = aws_linked_list_pop_front(&socket_impl->pending_io_operations); struct io_operation_data *op_data = AWS_CONTAINER_OF(node, struct io_operation_data, node); op_data->socket = NULL; } return AWS_OP_SUCCESS; } int aws_socket_half_close(struct aws_socket *socket, enum aws_channel_direction dir) { int how = dir == AWS_CHANNEL_DIR_READ ? 0 : 1; struct iocp_socket *socket_impl = socket->impl; AWS_LOGF_DEBUG( AWS_LS_IO_SOCKET, "id=%p handle=%p: shutting down in direction %d", (void *)socket, (void *)socket->io_handle.data.handle, dir); if (shutdown((SOCKET)socket->io_handle.data.handle, how)) { int error = WSAGetLastError(); int aws_error = s_determine_socket_error(error); aws_raise_error(aws_error); socket_impl->vtable->connection_error(socket, aws_error); return AWS_OP_ERR; } return AWS_OP_SUCCESS; } struct aws_io_handle *aws_socket_get_io_handle(struct aws_socket *socket) { return &socket->io_handle; } int aws_socket_assign_to_event_loop(struct aws_socket *socket, struct aws_event_loop *event_loop) { if (socket->event_loop) { return aws_raise_error(AWS_IO_EVENT_LOOP_ALREADY_ASSIGNED); } socket->event_loop = event_loop; return aws_event_loop_connect_handle_to_io_completion_port(event_loop, &socket->io_handle); } struct aws_event_loop *aws_socket_get_event_loop(struct aws_socket *socket) { return socket->event_loop; } struct read_cb_args { struct aws_socket *socket; aws_socket_on_readable_fn *user_callback; void *user_data; }; /* invoked by the event loop when the socket (TCP or Local) becomes readable. */ static void s_stream_readable_event( struct aws_event_loop *event_loop, struct aws_overlapped *overlapped, int status_code, size_t num_bytes_transferred) { (void)num_bytes_transferred; (void)event_loop; struct io_operation_data *operation_data = AWS_CONTAINER_OF(overlapped, struct io_operation_data, signal); struct aws_socket *socket = overlapped->user_data; if (!operation_data->socket) { aws_mem_release(operation_data->allocator, operation_data); return; } if (status_code == WSA_OPERATION_ABORTED || status_code == IO_OPERATION_CANCELLED) { return; } AWS_LOGF_TRACE( AWS_LS_IO_SOCKET, "id=%p handle=%p: socket readable event triggered", (void *)socket, (void *)socket->io_handle.data.handle); struct iocp_socket *socket_impl = socket->impl; socket->state = socket->state & ~CONNECTED_WAITING_ON_READABLE; int err_code = AWS_OP_SUCCESS; if (status_code && status_code != ERROR_IO_PENDING) { AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p handle=%p: socket status error %d", (void *)socket, (void *)socket->io_handle.data.handle, status_code); err_code = s_determine_socket_error(status_code); if (err_code == AWS_IO_SOCKET_CLOSED) { socket->state = CLOSED; } else { socket->state = ERRORED; } } socket->readable_fn(socket, err_code, socket->readable_user_data); if (operation_data->socket && socket_impl->read_io_data) { /* recursion and what not.... what if someone calls read from the callback until it says, HEY I'm out of data, then they toggle this flag? So check that they didn't go back into the CONNECTED_WAITING_ON_READABLE before clearing this flag. */ if (!(socket->state & CONNECTED_WAITING_ON_READABLE)) { socket_impl->read_io_data->in_use = false; } } if (!operation_data->socket) { aws_mem_release(operation_data->allocator, operation_data); return; } } /* Invoked by the event loop when a UDP socket goes readable. */ static void s_dgram_readable_event( struct aws_event_loop *event_loop, struct aws_overlapped *overlapped, int status_code, size_t num_bytes_transferred) { (void)num_bytes_transferred; (void)event_loop; struct io_operation_data *operation_data = AWS_CONTAINER_OF(overlapped, struct io_operation_data, signal); struct aws_socket *socket = overlapped->user_data; if (!operation_data->socket) { aws_mem_release(operation_data->allocator, operation_data); return; } if (status_code == WSA_OPERATION_ABORTED || status_code == IO_OPERATION_CANCELLED) { return; } AWS_LOGF_TRACE( AWS_LS_IO_SOCKET, "id=%p handle=%p: socket readable event triggered", (void *)socket, (void *)socket->io_handle.data.handle); struct iocp_socket *socket_impl = socket->impl; socket->state = socket->state & ~CONNECTED_WAITING_ON_READABLE; int err_code = AWS_OP_SUCCESS; /* IO_STATUS_BUFFER_OVERFLOW we did a peek on a zero buffer size.... this is fine we just wanted to know we're readable. */ if (status_code != ERROR_IO_PENDING && status_code != IO_STATUS_BUFFER_OVERFLOW) { AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p handle=%p: socket status error %d", (void *)socket, (void *)socket->io_handle.data.handle, status_code); err_code = s_determine_socket_error(status_code); if (err_code == AWS_IO_SOCKET_CLOSED) { socket->state = CLOSED; } else { socket->state = ERRORED; } } socket->readable_fn(socket, err_code, socket->readable_user_data); if (operation_data->socket && socket_impl->read_io_data) { /* recursion and what not.... what if someone calls read from the callback until it says, HEY I'm out of data, then they toggle this flag? So check that they didn't go back into the CONNECTED_WAITING_ON_READABLE before clearing this flag. */ if (!(socket->state & CONNECTED_WAITING_ON_READABLE)) { socket_impl->read_io_data->in_use = false; } } if (!operation_data->socket) { aws_mem_release(operation_data->allocator, operation_data); return; } } static int s_stream_subscribe_to_read( struct aws_socket *socket, aws_socket_on_readable_fn *on_readable, void *user_data) { socket->readable_fn = on_readable; socket->readable_user_data = user_data; AWS_LOGF_TRACE( AWS_LS_IO_SOCKET, "id=%p handle=%p: subscribing to readable event", (void *)socket, (void *)socket->io_handle.data.handle); struct iocp_socket *iocp_socket = socket->impl; iocp_socket->read_io_data->in_use = true; aws_overlapped_init(&iocp_socket->read_io_data->signal, s_stream_readable_event, socket); int fake_buffer = 0; socket->state |= CONNECTED_WAITING_ON_READABLE; BOOL success = ReadFile( socket->io_handle.data.handle, &fake_buffer, 0, NULL, aws_overlapped_to_windows_overlapped(&iocp_socket->read_io_data->signal)); if (!success) { int win_err = GetLastError(); if (win_err != ERROR_IO_PENDING) { AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p handle=%p: socket ReadFile() failed with error %d", (void *)socket, (void *)socket->io_handle.data.handle, win_err); iocp_socket->read_io_data->in_use = false; socket->state &= ~CONNECTED_WAITING_ON_READABLE; int aws_error = s_determine_socket_error(win_err); if (aws_error == AWS_IO_SOCKET_CLOSED) { socket->state = CLOSED; } else { socket->state = ERRORED; } return aws_raise_error(aws_error); } } return AWS_OP_SUCCESS; } static int s_dgram_subscribe_to_read( struct aws_socket *socket, aws_socket_on_readable_fn *on_readable, void *user_data) { AWS_LOGF_TRACE( AWS_LS_IO_SOCKET, "id=%p handle=%p: subscribing to readable event", (void *)socket, (void *)socket->io_handle.data.handle); socket->readable_fn = on_readable; socket->readable_user_data = user_data; struct iocp_socket *iocp_socket = socket->impl; iocp_socket->read_io_data->in_use = true; aws_overlapped_init(&iocp_socket->read_io_data->signal, s_dgram_readable_event, socket); socket->state |= CONNECTED_WAITING_ON_READABLE; /* the zero byte read trick with ReadFile doesn't actually work for UDP because it actually clears the buffer from the kernel, but if we use WSARecv, we can tell it we just want to peek which won't clear the kernel buffers. Giving a BS buffer with 0 len seems to do the trick. */ WSABUF buf = { .len = 0, .buf = NULL, }; DWORD flags = MSG_PEEK; int err = WSARecv( (SOCKET)socket->io_handle.data.handle, &buf, 1, NULL, &flags, aws_overlapped_to_windows_overlapped(&iocp_socket->read_io_data->signal), NULL); if (err) { int wsa_err = WSAGetLastError(); if (wsa_err != ERROR_IO_PENDING) { AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p handle=%p: socket WSARecv() failed with error %d", (void *)socket, (void *)socket->io_handle.data.handle, wsa_err); iocp_socket->read_io_data->in_use = false; int aws_error = s_determine_socket_error(wsa_err); if (aws_error == AWS_IO_SOCKET_CLOSED) { socket->state = CLOSED; } else { socket->state = ERRORED; } return aws_raise_error(aws_error); } } return AWS_OP_SUCCESS; } static int s_local_read(struct aws_socket *socket, struct aws_byte_buf *buffer, size_t *amount_read) { AWS_LOGF_TRACE( AWS_LS_IO_SOCKET, "id=%p handle=%p: reading from named pipe", (void *)socket, (void *)socket->io_handle.data.handle); DWORD bytes_available = 0; BOOL peek_success = PeekNamedPipe(socket->io_handle.data.handle, NULL, 0, NULL, &bytes_available, NULL); if (!peek_success) { int error_code = GetLastError(); AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p handle=%p: PeekNamedPipe() failed with error %d", (void *)socket, (void *)socket->io_handle.data.handle, error_code); return aws_raise_error(s_determine_socket_error(error_code)); } if (!bytes_available) { if (!(socket->state & CONNECTED_WAITING_ON_READABLE)) { struct iocp_socket *iocp_socket = socket->impl; socket->state |= CONNECTED_WAITING_ON_READABLE; iocp_socket->read_io_data->in_use = true; aws_overlapped_init(&iocp_socket->read_io_data->signal, s_stream_readable_event, socket); int fake_buffer = 0; BOOL success = ReadFile( socket->io_handle.data.handle, &fake_buffer, 0, NULL, aws_overlapped_to_windows_overlapped(&iocp_socket->read_io_data->signal)); if (!success) { int win_err = GetLastError(); if (win_err != ERROR_IO_PENDING) { AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p handle=%p: ReadFile() failed with error %d", (void *)socket, (void *)socket->io_handle.data.handle, win_err); iocp_socket->read_io_data->in_use = false; int aws_error = s_determine_socket_error(win_err); if (aws_error == AWS_IO_SOCKET_CLOSED) { socket->state = CLOSED; } else { socket->state = ERRORED; } return aws_raise_error(aws_error); } } } AWS_LOGF_TRACE( AWS_LS_IO_SOCKET, "id=%p handle=%p: read would block, returning", (void *)socket, (void *)socket->io_handle.data.handle); return aws_raise_error(AWS_IO_READ_WOULD_BLOCK); } AWS_LOGF_TRACE( AWS_LS_IO_SOCKET, "id=%p handle=%p: %u bytes available for read.", (void *)socket, (void *)socket->io_handle.data.handle, bytes_available); DWORD bytes_read = 0; size_t read_capacity = buffer->capacity - buffer->len; DWORD bytes_to_read = (DWORD)(bytes_available > read_capacity ? read_capacity : bytes_available); BOOL read_success = ReadFile(socket->io_handle.data.handle, buffer->buffer + buffer->len, bytes_to_read, &bytes_read, NULL); if (!read_success) { int error_code = GetLastError(); int aws_error = s_determine_socket_error(error_code); if (aws_error == AWS_IO_SOCKET_CLOSED) { socket->state = CLOSED; } else { socket->state = ERRORED; } AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p handle=%p: socket ReadFile() failed with error %d", (void *)socket, (void *)socket->io_handle.data.handle, error_code); return aws_raise_error(aws_error); } *amount_read = bytes_read; AWS_LOGF_TRACE( AWS_LS_IO_SOCKET, "id=%p handle=%p: successfully read %u bytes.", (void *)socket, (void *)socket->io_handle.data.handle, bytes_read); buffer->len += bytes_read; return AWS_OP_SUCCESS; } static int s_tcp_read(struct aws_socket *socket, struct aws_byte_buf *buffer, size_t *amount_read) { AWS_LOGF_TRACE( AWS_LS_IO_SOCKET, "id=%p handle=%p: reading from socket", (void *)socket, (void *)socket->io_handle.data.handle); int read_val = recv( (SOCKET)socket->io_handle.data.handle, (char *)buffer->buffer + buffer->len, (int)(buffer->capacity - buffer->len), 0); if (read_val > 0) { AWS_LOGF_TRACE( AWS_LS_IO_SOCKET, "id=%p handle=%p: read %d bytes from socket", (void *)socket, (void *)socket->io_handle.data.handle, read_val); *amount_read = (size_t)read_val; buffer->len += *amount_read; return AWS_OP_SUCCESS; } if (read_val == 0) { AWS_LOGF_DEBUG( AWS_LS_IO_SOCKET, "id=%p handle=%p: socket closed gracefully", (void *)socket, (void *)socket->io_handle.data.handle); socket->state = CLOSED; return aws_raise_error(AWS_IO_SOCKET_CLOSED); } int error = WSAGetLastError(); if (error == WSAEWOULDBLOCK) { AWS_LOGF_TRACE( AWS_LS_IO_SOCKET, "id=%p handle=%p: read would block, scheduling 0 byte read and returning", (void *)socket, (void *)socket->io_handle.data.handle); if (!(socket->state & CONNECTED_WAITING_ON_READABLE)) { struct iocp_socket *iocp_socket = socket->impl; socket->state |= CONNECTED_WAITING_ON_READABLE; iocp_socket->read_io_data->in_use = true; aws_overlapped_init(&iocp_socket->read_io_data->signal, s_stream_readable_event, socket); int fake_buffer = 0; BOOL success = ReadFile( socket->io_handle.data.handle, &fake_buffer, 0, NULL, aws_overlapped_to_windows_overlapped(&iocp_socket->read_io_data->signal)); if (!success) { int win_err = GetLastError(); if (win_err != ERROR_IO_PENDING) { AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p handle=%p: ReadFile() for 0 byte read failed with error %d", (void *)socket, (void *)socket->io_handle.data.handle, win_err); iocp_socket->read_io_data->in_use = false; int aws_error = s_determine_socket_error(win_err); if (aws_error == AWS_IO_SOCKET_CLOSED) { socket->state = CLOSED; } else { socket->state = ERRORED; } return aws_raise_error(aws_error); } } } return aws_raise_error(AWS_IO_READ_WOULD_BLOCK); } if (error == EPIPE) { AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p handle=%p: socket closed", (void *)socket, (void *)socket->io_handle.data.handle); socket->state = CLOSED; return aws_raise_error(AWS_IO_BROKEN_PIPE); } AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p handle=%p: ReadFile() failed with error %d", (void *)socket, (void *)socket->io_handle.data.handle, error); return aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); } static int s_dgram_read(struct aws_socket *socket, struct aws_byte_buf *buffer, size_t *amount_read) { AWS_LOGF_TRACE( AWS_LS_IO_SOCKET, "id=%p handle=%p: reading from socket", (void *)socket, (void *)socket->io_handle.data.handle); int read_val = recv( (SOCKET)socket->io_handle.data.handle, (char *)buffer->buffer + buffer->len, (int)(buffer->capacity - buffer->len), 0); if (read_val > 0) { AWS_LOGF_TRACE( AWS_LS_IO_SOCKET, "id=%p handle=%p: read %d bytes from socket", (void *)socket, (void *)socket->io_handle.data.handle, read_val); *amount_read = (size_t)read_val; buffer->len += *amount_read; return AWS_OP_SUCCESS; } if (read_val == 0) { AWS_LOGF_DEBUG( AWS_LS_IO_SOCKET, "id=%p handle=%p: socket closed gracefully", (void *)socket, (void *)socket->io_handle.data.handle); socket->state = CLOSED; return aws_raise_error(AWS_IO_SOCKET_CLOSED); } int error = WSAGetLastError(); if (error == WSAEWOULDBLOCK) { AWS_LOGF_TRACE( AWS_LS_IO_SOCKET, "id=%p handle=%p: read would block, scheduling 0 byte read and returning", (void *)socket, (void *)socket->io_handle.data.handle); if (!(socket->state & CONNECTED_WAITING_ON_READABLE)) { struct iocp_socket *iocp_socket = socket->impl; socket->state |= CONNECTED_WAITING_ON_READABLE; iocp_socket->read_io_data->in_use = true; aws_overlapped_init(&iocp_socket->read_io_data->signal, s_stream_readable_event, socket); /* the zero byte read trick with ReadFile doesn't actually work for UDP because it actually clears the buffer from the kernel, but if we use WSARecv, we can tell it we just want to peek which won't clear the kernel buffers. Giving it a BS buffer with 0 len seems to do the trick. */ WSABUF buf = { .len = 0, .buf = NULL, }; DWORD flags = MSG_PEEK; int err = WSARecv( (SOCKET)socket->io_handle.data.handle, &buf, 1, NULL, &flags, aws_overlapped_to_windows_overlapped(&iocp_socket->read_io_data->signal), NULL); if (err) { int wsa_err = WSAGetLastError(); if (wsa_err != ERROR_IO_PENDING) { AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p handle=%p: WSARecv() for 0 byte read failed with error %d", (void *)socket, (void *)socket->io_handle.data.handle, wsa_err); iocp_socket->read_io_data->in_use = false; int aws_error = s_determine_socket_error(wsa_err); if (aws_error == AWS_IO_SOCKET_CLOSED) { socket->state = CLOSED; } else { socket->state = ERRORED; } return aws_raise_error(aws_error); } } } return aws_raise_error(AWS_IO_READ_WOULD_BLOCK); } if (error == EPIPE) { AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p handle=%p: socket closed", (void *)socket, (void *)socket->io_handle.data.handle); socket->state = CLOSED; return aws_raise_error(AWS_IO_BROKEN_PIPE); } AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p handle=%p: recv() failed with error %d", (void *)socket, (void *)socket->io_handle.data.handle, error); return aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); } struct write_cb_args { struct io_operation_data io_data; size_t original_buffer_len; aws_socket_on_write_completed_fn *user_callback; void *user_data; }; /* Invoked for TCP, UDP, and Local when a message has been completely written to the wire.*/ static void s_socket_written_event( struct aws_event_loop *event_loop, struct aws_overlapped *overlapped, int status_code, size_t num_bytes_transferred) { (void)event_loop; (void)num_bytes_transferred; struct io_operation_data *operation_data = AWS_CONTAINER_OF(overlapped, struct io_operation_data, signal); struct write_cb_args *write_cb_args = overlapped->user_data; struct aws_socket *socket = operation_data->socket; int aws_error_code = status_code ? s_determine_socket_error(status_code) : AWS_OP_SUCCESS; if (aws_error_code) { aws_raise_error(aws_error_code); } if (!socket) { void *user_data = write_cb_args->user_data; aws_socket_on_write_completed_fn *callback = write_cb_args->user_callback; callback(NULL, aws_error_code, num_bytes_transferred, user_data); aws_mem_release(operation_data->allocator, write_cb_args); return; } AWS_LOGF_TRACE( AWS_LS_IO_SOCKET, "id=%p handle=%p: Write Completion callback triggered", (void *)socket, (void *)socket->io_handle.data.handle); if (status_code) { if (aws_error_code == AWS_IO_SOCKET_CLOSED) { socket->state = CLOSED; } else { socket->state = ERRORED; } } else { AWS_LOGF_TRACE( AWS_LS_IO_SOCKET, "id=%p handle=%p: Write of size %llu completed", (void *)socket, (void *)socket->io_handle.data.handle, (unsigned long long)num_bytes_transferred); AWS_ASSERT(num_bytes_transferred == write_cb_args->original_buffer_len); } aws_linked_list_remove(&operation_data->node); void *user_data = write_cb_args->user_data; aws_socket_on_write_completed_fn *callback = write_cb_args->user_callback; callback(operation_data->socket, aws_error_code, num_bytes_transferred, user_data); aws_mem_release(operation_data->allocator, write_cb_args); } int aws_socket_write( struct aws_socket *socket, const struct aws_byte_cursor *cursor, aws_socket_on_write_completed_fn *written_fn, void *user_data) { if (!aws_event_loop_thread_is_callers_thread(socket->event_loop)) { return aws_raise_error(AWS_ERROR_IO_EVENT_LOOP_THREAD_ONLY); } if (!(socket->state & CONNECTED_WRITE)) { AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p handle=%p: cannot write to because it is not connected", (void *)socket, (void *)socket->io_handle.data.handle); return aws_raise_error(AWS_IO_SOCKET_NOT_CONNECTED); } struct write_cb_args *write_cb_data = aws_mem_calloc(socket->allocator, 1, sizeof(struct write_cb_args)); if (!write_cb_data) { socket->state = ERRORED; return AWS_OP_ERR; } write_cb_data->user_callback = written_fn; write_cb_data->user_data = user_data; write_cb_data->original_buffer_len = cursor->len; write_cb_data->io_data.allocator = socket->allocator; write_cb_data->io_data.in_use = true; write_cb_data->io_data.socket = socket; aws_overlapped_init(&write_cb_data->io_data.signal, s_socket_written_event, write_cb_data); struct iocp_socket *socket_impl = socket->impl; aws_linked_list_push_back(&socket_impl->pending_io_operations, &write_cb_data->io_data.node); AWS_LOGF_TRACE( AWS_LS_IO_SOCKET, "id=%p handle=%p: queueing write of %llu bytes", (void *)socket, (void *)socket->io_handle.data.handle, (unsigned long long)cursor->len); BOOL res = WriteFile( socket->io_handle.data.handle, cursor->ptr, (DWORD)cursor->len, NULL, aws_overlapped_to_windows_overlapped(&write_cb_data->io_data.signal)); if (!res) { int error_code = GetLastError(); if (error_code != ERROR_IO_PENDING) { AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "id=%p handle=%p: WriteFile() failed with error %d", (void *)socket, (void *)socket->io_handle.data.handle, error_code); aws_linked_list_remove(&write_cb_data->io_data.node); aws_mem_release(socket->allocator, write_cb_data); int aws_error = s_determine_socket_error(error_code); if (aws_error == AWS_IO_SOCKET_CLOSED) { socket->state = CLOSED; } else { socket->state = ERRORED; } return aws_raise_error(aws_error); } } return AWS_OP_SUCCESS; } int aws_socket_get_error(struct aws_socket *socket) { if (socket->options.domain != AWS_SOCKET_LOCAL) { int connect_result; socklen_t result_length = sizeof(connect_result); if (getsockopt( (SOCKET)socket->io_handle.data.handle, SOL_SOCKET, SO_ERROR, (char *)&connect_result, &result_length) < 0) { return s_determine_socket_error(WSAGetLastError()); } if (connect_result) { return s_determine_socket_error(connect_result); } } else { return s_determine_socket_error(WSAGetLastError()); } return AWS_OP_SUCCESS; } bool aws_socket_is_open(struct aws_socket *socket) { return socket->io_handle.data.handle != INVALID_HANDLE_VALUE; } void aws_socket_endpoint_init_local_address_for_test(struct aws_socket_endpoint *endpoint) { struct aws_uuid uuid; AWS_FATAL_ASSERT(aws_uuid_init(&uuid) == AWS_OP_SUCCESS); char uuid_str[AWS_UUID_STR_LEN] = {0}; struct aws_byte_buf uuid_buf = aws_byte_buf_from_empty_array(uuid_str, sizeof(uuid_str)); AWS_FATAL_ASSERT(aws_uuid_to_str(&uuid, &uuid_buf) == AWS_OP_SUCCESS); snprintf(endpoint->address, sizeof(endpoint->address), "\\\\.\\pipe\\testsock" PRInSTR, AWS_BYTE_BUF_PRI(uuid_buf)); } aws-crt-python-0.20.4+dfsg/crt/aws-c-io/source/windows/secure_channel_tls_handler.c000066400000000000000000002423721456575232400303440ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #define SECURITY_WIN32 #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef _MSC_VER # pragma warning(disable : 4221) /* aggregate initializer using local variable addresses */ # pragma warning(disable : 4204) /* non-constant aggregate initializer */ # pragma warning(disable : 4306) /* Identifier is type cast to a larger pointer. */ #endif #define KB_1 1024 #define READ_OUT_SIZE (16 * KB_1) #define READ_IN_SIZE READ_OUT_SIZE #define EST_HANDSHAKE_SIZE (7 * KB_1) #define EST_TLS_RECORD_OVERHEAD 53 /* 5 byte header + 32 + 16 bytes for padding */ void aws_tls_init_static_state(struct aws_allocator *alloc) { AWS_LOGF_INFO(AWS_LS_IO_TLS, "static: Initializing TLS using SecureChannel (SSPI)."); (void)alloc; } void aws_tls_clean_up_static_state(void) {} struct secure_channel_ctx { struct aws_tls_ctx ctx; struct aws_string *alpn_list; SCHANNEL_CRED credentials; PCERT_CONTEXT pcerts; HCERTSTORE cert_store; HCERTSTORE custom_trust_store; HCRYPTPROV crypto_provider; HCRYPTKEY private_key; bool verify_peer; bool should_free_pcerts; }; struct secure_channel_handler { struct aws_channel_handler handler; struct aws_tls_channel_handler_shared shared_state; CtxtHandle sec_handle; CredHandle creds; /* * The SSPI API expects an array of len 1 of these where it's the leaf certificate associated with its private * key. */ PCCERT_CONTEXT cert_context[1]; HCERTSTORE cert_store; HCERTSTORE custom_ca_store; SecPkgContext_StreamSizes stream_sizes; unsigned long ctx_req; unsigned long ctx_ret_flags; struct aws_channel_slot *slot; struct aws_byte_buf protocol; struct aws_byte_buf server_name; TimeStamp sspi_timestamp; int (*s_connection_state_fn)(struct aws_channel_handler *handler); /* * Give a little bit of extra head room, for split records. */ uint8_t buffered_read_in_data[READ_IN_SIZE + KB_1]; struct aws_byte_buf buffered_read_in_data_buf; size_t estimated_incomplete_size; size_t read_extra; /* This is to accommodate the extra head room we added above. because we're allowing for splits, we may have more data decrypted than we can fit in this buffer if we don't make them match. */ uint8_t buffered_read_out_data[READ_OUT_SIZE + KB_1]; struct aws_byte_buf buffered_read_out_data_buf; struct aws_channel_task sequential_task_storage; aws_tls_on_negotiation_result_fn *on_negotiation_result; aws_tls_on_data_read_fn *on_data_read; aws_tls_on_error_fn *on_error; struct aws_string *alpn_list; void *user_data; bool advertise_alpn_message; bool negotiation_finished; bool verify_peer; }; static size_t s_message_overhead(struct aws_channel_handler *handler) { struct secure_channel_handler *sc_handler = handler->impl; if (AWS_UNLIKELY(!sc_handler->stream_sizes.cbMaximumMessage)) { SECURITY_STATUS status = QueryContextAttributes(&sc_handler->sec_handle, SECPKG_ATTR_STREAM_SIZES, &sc_handler->stream_sizes); if (status != SEC_E_OK) { return EST_TLS_RECORD_OVERHEAD; } } return sc_handler->stream_sizes.cbTrailer + sc_handler->stream_sizes.cbHeader; } bool aws_tls_is_alpn_available(void) { /* if you built on an old version of windows, still no support, but if you did, we still want to check the OS version at runtime before agreeing to attempt alpn. */ #ifdef SECBUFFER_APPLICATION_PROTOCOLS AWS_LOGF_DEBUG( AWS_LS_IO_TLS, "static: This library was built with Windows 8.1 or later, " "probing OS to see what we're actually running on."); /* make sure we're on windows 8.1 or later. */ OSVERSIONINFOEX os_version; DWORDLONG condition_mask = 0; VER_SET_CONDITION(condition_mask, VER_MAJORVERSION, VER_GREATER_EQUAL); VER_SET_CONDITION(condition_mask, VER_MINORVERSION, VER_GREATER_EQUAL); VER_SET_CONDITION(condition_mask, VER_SERVICEPACKMAJOR, VER_GREATER_EQUAL); VER_SET_CONDITION(condition_mask, VER_SERVICEPACKMINOR, VER_GREATER_EQUAL); AWS_ZERO_STRUCT(os_version); os_version.dwMajorVersion = HIBYTE(_WIN32_WINNT_WIN8); os_version.dwMinorVersion = LOBYTE(_WIN32_WINNT_WIN8); os_version.wServicePackMajor = 0; os_version.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); if (VerifyVersionInfo( &os_version, VER_MAJORVERSION | VER_MINORVERSION | VER_SERVICEPACKMAJOR | VER_SERVICEPACKMINOR, condition_mask)) { AWS_LOGF_DEBUG(AWS_LS_IO_TLS, "static: We're running on Windows 8.1 or later. ALPN is available."); return true; } AWS_LOGF_WARN( AWS_LS_IO_TLS, "static: Running on older version of windows, ALPN is not supported. " "Please update your OS to take advantage of modern features."); #else AWS_LOGF_WARN( AWS_LS_IO_TLS, "static: This library was built using a Windows SDK prior to 8.1. " "Please build with a version of windows >= 8.1 to take advantage modern features. ALPN is not supported."); #endif /*SECBUFFER_APPLICATION_PROTOCOLS */ return false; } bool aws_tls_is_cipher_pref_supported(enum aws_tls_cipher_pref cipher_pref) { switch (cipher_pref) { case AWS_IO_TLS_CIPHER_PREF_SYSTEM_DEFAULT: return true; case AWS_IO_TLS_CIPHER_PREF_KMS_PQ_TLSv1_0_2019_06: default: return false; } } /* technically we could lower this, but lets be forgiving */ #define MAX_HOST_LENGTH 255 /* this only gets called if the user specified a custom ca. */ static int s_manually_verify_peer_cert(struct aws_channel_handler *handler) { AWS_LOGF_DEBUG( AWS_LS_IO_TLS, "id=%p: manually verifying certifcate chain because a custom CA is configured.", (void *)handler); struct secure_channel_handler *sc_handler = handler->impl; int result = AWS_OP_ERR; CERT_CONTEXT *peer_certificate = NULL; HCERTCHAINENGINE engine = NULL; CERT_CHAIN_CONTEXT *cert_chain_ctx = NULL; /* get the peer's certificate so we can validate it.*/ SECURITY_STATUS status = QueryContextAttributes(&sc_handler->sec_handle, SECPKG_ATTR_REMOTE_CERT_CONTEXT, &peer_certificate); if (status != SEC_E_OK || !peer_certificate) { AWS_LOGF_ERROR( AWS_LS_IO_TLS, "id=%p: failed to load peer's certificate with SECURITY_STATUS %d", (void *)handler, (int)status); return AWS_OP_ERR; } /* this next bit scours the custom trust store to try and load a chain to verify the leaf certificate against. */ CERT_CHAIN_ENGINE_CONFIG engine_config; AWS_ZERO_STRUCT(engine_config); engine_config.cbSize = sizeof(engine_config); engine_config.hExclusiveRoot = sc_handler->custom_ca_store; if (!CertCreateCertificateChainEngine(&engine_config, &engine)) { AWS_LOGF_ERROR( AWS_LS_IO_TLS, "id=%p: failed to load a certificate chain engine with SECURITY_STATUS %d. " "Most likely, the configured CA is corrupted.", (void *)handler, (int)status); goto done; } /* * TODO: Investigate CRL options further on a per-platform basis. Add control APIs if appropriate. */ DWORD get_chain_flags = 0; /* mimic chromium here since we intend for this to be used generally */ const LPCSTR usage_identifiers[] = { szOID_PKIX_KP_SERVER_AUTH, szOID_SERVER_GATED_CRYPTO, szOID_SGC_NETSCAPE, }; CERT_CHAIN_PARA chain_params; AWS_ZERO_STRUCT(chain_params); chain_params.cbSize = sizeof(chain_params); chain_params.RequestedUsage.dwType = USAGE_MATCH_TYPE_OR; chain_params.RequestedUsage.Usage.cUsageIdentifier = AWS_ARRAY_SIZE(usage_identifiers); chain_params.RequestedUsage.Usage.rgpszUsageIdentifier = (LPSTR *)usage_identifiers; if (!CertGetCertificateChain( engine, peer_certificate, NULL, peer_certificate->hCertStore, &chain_params, get_chain_flags, NULL, &cert_chain_ctx)) { AWS_LOGF_ERROR( AWS_LS_IO_TLS, "id=%p: unable to find certificate in chain with SECURITY_STATUS %d.", (void *)handler, (int)status); goto done; } struct aws_byte_buf host = aws_tls_handler_server_name(handler); if (host.len > MAX_HOST_LENGTH) { AWS_LOGF_ERROR(AWS_LS_IO_TLS, "id=%p: host name too long (%d).", (void *)handler, (int)host.len); goto done; } wchar_t whost[MAX_HOST_LENGTH + 1]; AWS_ZERO_ARRAY(whost); int converted = MultiByteToWideChar( CP_UTF8, MB_ERR_INVALID_CHARS, (const char *)host.buffer, (int)host.len, whost, AWS_ARRAY_SIZE(whost)); if ((size_t)converted != host.len) { AWS_LOGF_ERROR( AWS_LS_IO_TLS, "id=%p: unable to convert host to wstr, %d -> %d, with last error 0x%x.", (void *)handler, (int)host.len, (int)converted, (int)GetLastError()); goto done; } /* check if the chain was trusted */ LPCSTR policyiod = CERT_CHAIN_POLICY_SSL; SSL_EXTRA_CERT_CHAIN_POLICY_PARA sslpolicy; AWS_ZERO_STRUCT(sslpolicy); sslpolicy.cbSize = sizeof(sslpolicy); sslpolicy.dwAuthType = AUTHTYPE_SERVER; sslpolicy.fdwChecks = 0; sslpolicy.pwszServerName = whost; CERT_CHAIN_POLICY_PARA policypara; AWS_ZERO_STRUCT(policypara); policypara.cbSize = sizeof(policypara); policypara.dwFlags = 0; policypara.pvExtraPolicyPara = &sslpolicy; CERT_CHAIN_POLICY_STATUS policystatus; AWS_ZERO_STRUCT(policystatus); policystatus.cbSize = sizeof(policystatus); if (!CertVerifyCertificateChainPolicy(policyiod, cert_chain_ctx, &policypara, &policystatus)) { int error = GetLastError(); AWS_LOGF_ERROR( AWS_LS_IO_TLS, "id=%p: CertVerifyCertificateChainPolicy() failed, error 0x%x", (void *)handler, (int)error); goto done; } if (policystatus.dwError) { AWS_LOGF_ERROR( AWS_LS_IO_TLS, "id=%p: certificate verification failed, error 0x%x", (void *)handler, (int)policystatus.dwError); goto done; } /* if the chain was trusted, then we're good to go, if it was not we bail out. */ CERT_SIMPLE_CHAIN *simple_chain = cert_chain_ctx->rgpChain[0]; DWORD trust_mask = ~(DWORD)CERT_TRUST_IS_NOT_TIME_NESTED; trust_mask &= simple_chain->TrustStatus.dwErrorStatus; if (trust_mask != 0) { AWS_LOGF_ERROR( AWS_LS_IO_TLS, "id=%p: peer certificate is un-trusted with SECURITY_STATUS %d.", (void *)handler, (int)trust_mask); goto done; } AWS_LOGF_DEBUG(AWS_LS_IO_TLS, "id=%p: peer certificate is trusted.", (void *)handler); result = AWS_OP_SUCCESS; done: if (cert_chain_ctx != NULL) { CertFreeCertificateChain(cert_chain_ctx); } if (engine != NULL) { CertFreeCertificateChainEngine(engine); } if (peer_certificate != NULL) { CertFreeCertificateContext(peer_certificate); } return result; } static void s_invoke_negotiation_error(struct aws_channel_handler *handler, int err) { struct secure_channel_handler *sc_handler = handler->impl; aws_on_tls_negotiation_completed(&sc_handler->shared_state, err); if (sc_handler->on_negotiation_result) { sc_handler->on_negotiation_result(handler, sc_handler->slot, err, sc_handler->user_data); } } static void s_on_negotiation_success(struct aws_channel_handler *handler) { struct secure_channel_handler *sc_handler = handler->impl; /* if the user provided an ALPN handler to the channel, we need to let them know what their protocol is. */ if (sc_handler->slot->adj_right && sc_handler->advertise_alpn_message && sc_handler->protocol.len) { struct aws_io_message *message = aws_channel_acquire_message_from_pool( sc_handler->slot->channel, AWS_IO_MESSAGE_APPLICATION_DATA, sizeof(struct aws_tls_negotiated_protocol_message)); message->message_tag = AWS_TLS_NEGOTIATED_PROTOCOL_MESSAGE; struct aws_tls_negotiated_protocol_message *protocol_message = (struct aws_tls_negotiated_protocol_message *)message->message_data.buffer; protocol_message->protocol = sc_handler->protocol; message->message_data.len = sizeof(struct aws_tls_negotiated_protocol_message); if (aws_channel_slot_send_message(sc_handler->slot, message, AWS_CHANNEL_DIR_READ)) { aws_mem_release(message->allocator, message); aws_channel_shutdown(sc_handler->slot->channel, aws_last_error()); } } aws_on_tls_negotiation_completed(&sc_handler->shared_state, AWS_ERROR_SUCCESS); if (sc_handler->on_negotiation_result) { sc_handler->on_negotiation_result(handler, sc_handler->slot, AWS_OP_SUCCESS, sc_handler->user_data); } } static int s_determine_sspi_error(int sspi_status) { switch (sspi_status) { case SEC_E_INSUFFICIENT_MEMORY: return AWS_ERROR_OOM; case SEC_I_CONTEXT_EXPIRED: return AWS_IO_TLS_ALERT_NOT_GRACEFUL; case SEC_E_WRONG_PRINCIPAL: return AWS_IO_TLS_ERROR_NEGOTIATION_FAILURE; /* case SEC_E_INVALID_HANDLE: case SEC_E_INVALID_TOKEN: case SEC_E_LOGON_DENIED: case SEC_E_TARGET_UNKNOWN: case SEC_E_NO_AUTHENTICATING_AUTHORITY: case SEC_E_INTERNAL_ERROR: case SEC_E_NO_CREDENTIALS: case SEC_E_UNSUPPORTED_FUNCTION: case SEC_E_APPLICATION_PROTOCOL_MISMATCH: */ default: return AWS_IO_TLS_ERROR_NEGOTIATION_FAILURE; } } #define CHECK_ALPN_BUFFER_SIZE(s, i, b) \ if (s <= i) { \ aws_array_list_clean_up(&b); \ return aws_raise_error(AWS_ERROR_SHORT_BUFFER); \ } /* construct ALPN extension data... apparently this works on big-endian machines? but I don't believe the docs if you're running ARM and you find ALPN isn't working, it's probably because I trusted the documentation and your bug is in here. Note, dotnet's corefx also acts like endianness isn't at play so if this is broken so is everyone's dotnet code. */ static int s_fillin_alpn_data( struct aws_channel_handler *handler, unsigned char *alpn_buffer_data, size_t buffer_size, size_t *written) { *written = 0; struct secure_channel_handler *sc_handler = handler->impl; AWS_LOGF_DEBUG(AWS_LS_IO_TLS, ""); struct aws_array_list alpn_buffers; struct aws_byte_cursor alpn_buffer_array[4]; aws_array_list_init_static(&alpn_buffers, alpn_buffer_array, 4, sizeof(struct aws_byte_cursor)); AWS_LOGF_DEBUG(AWS_LS_IO_TLS, "Setting ALPN extension with string %s.", aws_string_c_str(sc_handler->alpn_list)); struct aws_byte_cursor alpn_str_cur = aws_byte_cursor_from_string(sc_handler->alpn_list); if (aws_byte_cursor_split_on_char(&alpn_str_cur, ';', &alpn_buffers)) { return AWS_OP_ERR; } size_t protocols_count = aws_array_list_length(&alpn_buffers); size_t index = 0; CHECK_ALPN_BUFFER_SIZE(buffer_size, index + sizeof(uint32_t), alpn_buffers) uint32_t *extension_length = (uint32_t *)&alpn_buffer_data[index]; index += sizeof(uint32_t); CHECK_ALPN_BUFFER_SIZE(buffer_size, index + sizeof(uint32_t), alpn_buffers) uint32_t *extension_name = (uint32_t *)&alpn_buffer_data[index]; index += sizeof(uint32_t); CHECK_ALPN_BUFFER_SIZE(buffer_size, index + sizeof(uint32_t), alpn_buffers) uint16_t *protocols_byte_length = (uint16_t *)&alpn_buffer_data[index]; index += sizeof(uint16_t); CHECK_ALPN_BUFFER_SIZE(buffer_size, index + sizeof(uint16_t), alpn_buffers) *extension_length += sizeof(uint32_t) + sizeof(uint16_t); *extension_name = SecApplicationProtocolNegotiationExt_ALPN; /*now add the protocols*/ for (size_t i = 0; i < protocols_count; ++i) { struct aws_byte_cursor *protocol_ptr = NULL; aws_array_list_get_at_ptr(&alpn_buffers, (void **)&protocol_ptr, i); AWS_ASSERT(protocol_ptr); *extension_length += (uint32_t)protocol_ptr->len + 1; *protocols_byte_length += (uint16_t)protocol_ptr->len + 1; CHECK_ALPN_BUFFER_SIZE(buffer_size, index + 1, alpn_buffers) alpn_buffer_data[index++] = (unsigned char)protocol_ptr->len; CHECK_ALPN_BUFFER_SIZE(buffer_size, index + protocol_ptr->len, alpn_buffers) memcpy(alpn_buffer_data + index, protocol_ptr->ptr, protocol_ptr->len); index += protocol_ptr->len; } aws_array_list_clean_up(&alpn_buffers); *written = *extension_length + sizeof(uint32_t); return AWS_OP_SUCCESS; } static int s_process_connection_state(struct aws_channel_handler *handler) { struct secure_channel_handler *sc_handler = handler->impl; return sc_handler->s_connection_state_fn(handler); } static int s_do_application_data_decrypt(struct aws_channel_handler *handler); static int s_do_server_side_negotiation_step_2(struct aws_channel_handler *handler); /** invoked during the first step of the server's negotiation. It receives the client hello, adds its alpn data if available, and if everything is good, sends out the server hello. */ static int s_do_server_side_negotiation_step_1(struct aws_channel_handler *handler) { struct secure_channel_handler *sc_handler = handler->impl; AWS_LOGF_TRACE(AWS_LS_IO_TLS, "id=%p: server starting negotiation", (void *)handler); aws_on_drive_tls_negotiation(&sc_handler->shared_state); unsigned char alpn_buffer_data[128] = {0}; SecBuffer input_bufs[] = { { .pvBuffer = sc_handler->buffered_read_in_data_buf.buffer, .cbBuffer = (unsigned long)sc_handler->buffered_read_in_data_buf.len, .BufferType = SECBUFFER_TOKEN, }, { .pvBuffer = NULL, .cbBuffer = 0, .BufferType = SECBUFFER_EMPTY, }, }; SecBufferDesc input_bufs_desc = { .ulVersion = SECBUFFER_VERSION, .cBuffers = 2, .pBuffers = input_bufs, }; #ifdef SECBUFFER_APPLICATION_PROTOCOLS if (sc_handler->alpn_list && aws_tls_is_alpn_available()) { AWS_LOGF_DEBUG(AWS_LS_IO_TLS, "id=%p: Setting ALPN to %s", handler, aws_string_c_str(sc_handler->alpn_list)); size_t extension_length = 0; if (s_fillin_alpn_data(handler, alpn_buffer_data, sizeof(alpn_buffer_data), &extension_length)) { return AWS_OP_ERR; } input_bufs[1].pvBuffer = alpn_buffer_data, input_bufs[1].cbBuffer = (unsigned long)extension_length, input_bufs[1].BufferType = SECBUFFER_APPLICATION_PROTOCOLS; } #endif /* SECBUFFER_APPLICATION_PROTOCOLS*/ sc_handler->ctx_req = ASC_REQ_SEQUENCE_DETECT | ASC_REQ_REPLAY_DETECT | ASC_REQ_CONFIDENTIALITY | ASC_REQ_ALLOCATE_MEMORY | ASC_REQ_STREAM; if (sc_handler->verify_peer) { AWS_LOGF_DEBUG( AWS_LS_IO_TLS, "id=%p: server configured to use mutual tls, expecting a certficate from client.", (void *)handler); sc_handler->ctx_req |= ASC_REQ_MUTUAL_AUTH; } SecBuffer output_buffer = { .pvBuffer = NULL, .cbBuffer = 0, .BufferType = SECBUFFER_TOKEN, }; SecBufferDesc output_buffer_desc = { .ulVersion = SECBUFFER_VERSION, .cBuffers = 1, .pBuffers = &output_buffer, }; /* process the client hello. */ SECURITY_STATUS status = AcceptSecurityContext( &sc_handler->creds, NULL, &input_bufs_desc, sc_handler->ctx_req, 0, &sc_handler->sec_handle, &output_buffer_desc, &sc_handler->ctx_ret_flags, NULL); if (!(status == SEC_I_CONTINUE_NEEDED || status == SEC_E_OK)) { AWS_LOGF_ERROR( AWS_LS_IO_TLS, "id=%p: error during processing of the ClientHello. SECURITY_STATUS is %d", (void *)handler, (int)status); int error = s_determine_sspi_error(status); aws_raise_error(error); s_invoke_negotiation_error(handler, error); return AWS_OP_ERR; } size_t data_to_write_len = output_buffer.cbBuffer; AWS_LOGF_TRACE(AWS_LS_IO_TLS, "id=%p: Sending ServerHello. Data size %zu", (void *)handler, data_to_write_len); /* send the server hello. */ struct aws_io_message *outgoing_message = aws_channel_acquire_message_from_pool( sc_handler->slot->channel, AWS_IO_MESSAGE_APPLICATION_DATA, data_to_write_len); if (!outgoing_message) { FreeContextBuffer(output_buffer.pvBuffer); s_invoke_negotiation_error(handler, aws_last_error()); return AWS_OP_ERR; } AWS_ASSERT(outgoing_message->message_data.capacity >= data_to_write_len); memcpy(outgoing_message->message_data.buffer, output_buffer.pvBuffer, output_buffer.cbBuffer); outgoing_message->message_data.len = output_buffer.cbBuffer; FreeContextBuffer(output_buffer.pvBuffer); if (aws_channel_slot_send_message(sc_handler->slot, outgoing_message, AWS_CHANNEL_DIR_WRITE)) { aws_mem_release(outgoing_message->allocator, outgoing_message); s_invoke_negotiation_error(handler, aws_last_error()); return AWS_OP_ERR; } sc_handler->s_connection_state_fn = s_do_server_side_negotiation_step_2; return AWS_OP_SUCCESS; } /* cipher change, key exchange, mutual TLS stuff. */ static int s_do_server_side_negotiation_step_2(struct aws_channel_handler *handler) { struct secure_channel_handler *sc_handler = handler->impl; AWS_LOGF_TRACE( AWS_LS_IO_TLS, "id=%p: running step 2 of negotiation (cipher change, key exchange etc...)", (void *)handler); SecBuffer input_buffers[] = { [0] = { .pvBuffer = sc_handler->buffered_read_in_data_buf.buffer, .cbBuffer = (unsigned long)sc_handler->buffered_read_in_data_buf.len, .BufferType = SECBUFFER_TOKEN, }, [1] = { .pvBuffer = NULL, .cbBuffer = 0, .BufferType = SECBUFFER_EMPTY, }, }; SecBufferDesc input_buffers_desc = { .ulVersion = SECBUFFER_VERSION, .cBuffers = 2, .pBuffers = input_buffers, }; SecBuffer output_buffers[3]; AWS_ZERO_ARRAY(output_buffers); output_buffers[0].BufferType = SECBUFFER_TOKEN; output_buffers[1].BufferType = SECBUFFER_ALERT; SecBufferDesc output_buffers_desc = { .ulVersion = SECBUFFER_VERSION, .cBuffers = 3, .pBuffers = output_buffers, }; sc_handler->read_extra = 0; sc_handler->estimated_incomplete_size = 0; SECURITY_STATUS status = AcceptSecurityContext( &sc_handler->creds, &sc_handler->sec_handle, &input_buffers_desc, sc_handler->ctx_req, 0, NULL, &output_buffers_desc, &sc_handler->ctx_ret_flags, &sc_handler->sspi_timestamp); if (status != SEC_E_INCOMPLETE_MESSAGE && status != SEC_I_CONTINUE_NEEDED && status != SEC_E_OK) { AWS_LOGF_ERROR( AWS_LS_IO_TLS, "id=%p: Error during negotiation. SECURITY_STATUS is %d", (void *)handler, (int)status); int aws_error = s_determine_sspi_error(status); aws_raise_error(aws_error); s_invoke_negotiation_error(handler, aws_error); return AWS_OP_ERR; } if (status == SEC_E_INCOMPLETE_MESSAGE) { AWS_LOGF_TRACE( AWS_LS_IO_TLS, "id=%p: Last processed buffer was incomplete, waiting on more data.", (void *)handler); sc_handler->estimated_incomplete_size = input_buffers[1].cbBuffer; return aws_raise_error(AWS_IO_READ_WOULD_BLOCK); }; /* any output buffers that were filled in with SECBUFFER_TOKEN need to be sent, SECBUFFER_EXTRA means we need to account for extra data and shift everything for the next run. */ if (status == SEC_I_CONTINUE_NEEDED || status == SEC_E_OK) { for (size_t i = 0; i < output_buffers_desc.cBuffers; ++i) { SecBuffer *buf_ptr = &output_buffers[i]; if (buf_ptr->BufferType == SECBUFFER_TOKEN && buf_ptr->cbBuffer) { struct aws_io_message *outgoing_message = aws_channel_acquire_message_from_pool( sc_handler->slot->channel, AWS_IO_MESSAGE_APPLICATION_DATA, buf_ptr->cbBuffer); if (!outgoing_message) { FreeContextBuffer(buf_ptr->pvBuffer); s_invoke_negotiation_error(handler, aws_last_error()); return AWS_OP_ERR; } memcpy(outgoing_message->message_data.buffer, buf_ptr->pvBuffer, buf_ptr->cbBuffer); outgoing_message->message_data.len = buf_ptr->cbBuffer; FreeContextBuffer(buf_ptr->pvBuffer); if (aws_channel_slot_send_message(sc_handler->slot, outgoing_message, AWS_CHANNEL_DIR_WRITE)) { aws_mem_release(outgoing_message->allocator, outgoing_message); s_invoke_negotiation_error(handler, aws_last_error()); return AWS_OP_ERR; } } } if (input_buffers[1].BufferType == SECBUFFER_EXTRA && input_buffers[1].cbBuffer > 0) { AWS_LOGF_TRACE( AWS_LS_IO_TLS, "id=%p: Extra data recieved. Extra size is %lu", (void *)handler, input_buffers[1].cbBuffer); sc_handler->read_extra = input_buffers[1].cbBuffer; } } if (status == SEC_E_OK) { AWS_LOGF_TRACE(AWS_LS_IO_TLS, "id=%p: handshake completed", (void *)handler); /* if a custom CA store was configured, we have to do the verification ourselves. */ if (sc_handler->custom_ca_store) { AWS_LOGF_TRACE( AWS_LS_IO_TLS, "id=%p: Custom CA was configured, evaluating trust before completing connection", (void *)handler); if (s_manually_verify_peer_cert(handler)) { aws_raise_error(AWS_IO_TLS_ERROR_NEGOTIATION_FAILURE); s_invoke_negotiation_error(handler, AWS_IO_TLS_ERROR_NEGOTIATION_FAILURE); return AWS_OP_ERR; } } sc_handler->negotiation_finished = true; /* force query of the sizes so future calls to encrypt will be loaded. */ s_message_overhead(handler); /* grab the negotiated protocol out of the session. */ #ifdef SECBUFFER_APPLICATION_PROTOCOLS if (sc_handler->alpn_list && aws_tls_is_alpn_available()) { SecPkgContext_ApplicationProtocol alpn_result; status = QueryContextAttributes(&sc_handler->sec_handle, SECPKG_ATTR_APPLICATION_PROTOCOL, &alpn_result); AWS_LOGF_TRACE(AWS_LS_IO_TLS, "id=%p: ALPN is configured. Checking for negotiated protocol", handler); if (status == SEC_E_OK && alpn_result.ProtoNegoStatus == SecApplicationProtocolNegotiationStatus_Success) { aws_byte_buf_init(&sc_handler->protocol, handler->alloc, alpn_result.ProtocolIdSize + 1); memset(sc_handler->protocol.buffer, 0, alpn_result.ProtocolIdSize + 1); memcpy(sc_handler->protocol.buffer, alpn_result.ProtocolId, alpn_result.ProtocolIdSize); sc_handler->protocol.len = alpn_result.ProtocolIdSize; AWS_LOGF_DEBUG( AWS_LS_IO_TLS, "id=%p: negotiated protocol %s", handler, (char *)sc_handler->protocol.buffer); } else { AWS_LOGF_WARN( AWS_LS_IO_TLS, "id=%p: Error retrieving negotiated protocol. SECURITY_STATUS is %d", handler, (int)status); int aws_error = s_determine_sspi_error(status); aws_raise_error(aws_error); } } #endif sc_handler->s_connection_state_fn = s_do_application_data_decrypt; AWS_LOGF_DEBUG(AWS_LS_IO_TLS, "id=%p: TLS handshake completed successfully.", (void *)handler); s_on_negotiation_success(handler); } return AWS_OP_SUCCESS; } static int s_do_client_side_negotiation_step_2(struct aws_channel_handler *handler); /* send the client hello */ static int s_do_client_side_negotiation_step_1(struct aws_channel_handler *handler) { struct secure_channel_handler *sc_handler = handler->impl; AWS_LOGF_TRACE(AWS_LS_IO_TLS, "id=%p: client starting negotiation", (void *)handler); aws_on_drive_tls_negotiation(&sc_handler->shared_state); unsigned char alpn_buffer_data[128] = {0}; SecBuffer input_buf = { .pvBuffer = NULL, .cbBuffer = 0, .BufferType = SECBUFFER_EMPTY, }; SecBufferDesc input_buf_desc = { .ulVersion = SECBUFFER_VERSION, .cBuffers = 1, .pBuffers = &input_buf, }; SecBufferDesc *alpn_sspi_data = NULL; /* add alpn data to the client hello if it's supported. */ #ifdef SECBUFFER_APPLICATION_PROTOCOLS if (sc_handler->alpn_list && aws_tls_is_alpn_available()) { AWS_LOGF_DEBUG( AWS_LS_IO_TLS, "id=%p: Setting ALPN data as %s", handler, aws_string_c_str(sc_handler->alpn_list)); size_t extension_length = 0; if (s_fillin_alpn_data(handler, alpn_buffer_data, sizeof(alpn_buffer_data), &extension_length)) { s_invoke_negotiation_error(handler, aws_last_error()); return AWS_OP_ERR; } input_buf.pvBuffer = alpn_buffer_data, input_buf.cbBuffer = (unsigned long)extension_length, input_buf.BufferType = SECBUFFER_APPLICATION_PROTOCOLS; alpn_sspi_data = &input_buf_desc; } #endif /* SECBUFFER_APPLICATION_PROTOCOLS*/ sc_handler->ctx_req = ISC_REQ_SEQUENCE_DETECT | ISC_REQ_REPLAY_DETECT | ISC_REQ_CONFIDENTIALITY | ISC_REQ_ALLOCATE_MEMORY | ISC_REQ_STREAM; SecBuffer output_buffer = { .pvBuffer = NULL, .cbBuffer = 0, .BufferType = SECBUFFER_EMPTY, }; SecBufferDesc output_buffer_desc = { .ulVersion = SECBUFFER_VERSION, .cBuffers = 1, .pBuffers = &output_buffer, }; char server_name_cstr[256]; AWS_ZERO_ARRAY(server_name_cstr); AWS_ASSERT(sc_handler->server_name.len < 256); memcpy(server_name_cstr, sc_handler->server_name.buffer, sc_handler->server_name.len); SECURITY_STATUS status = InitializeSecurityContextA( &sc_handler->creds, NULL, (SEC_CHAR *)server_name_cstr, sc_handler->ctx_req, 0, 0, alpn_sspi_data, 0, &sc_handler->sec_handle, &output_buffer_desc, &sc_handler->ctx_ret_flags, &sc_handler->sspi_timestamp); if (status != SEC_I_CONTINUE_NEEDED) { AWS_LOGF_ERROR( AWS_LS_IO_TLS, "id=%p: Error sending client/receiving server handshake data. SECURITY_STATUS is %d", (void *)handler, (int)status); int aws_error = s_determine_sspi_error(status); aws_raise_error(aws_error); s_invoke_negotiation_error(handler, aws_error); return AWS_OP_ERR; } size_t data_to_write_len = output_buffer.cbBuffer; AWS_LOGF_TRACE( AWS_LS_IO_TLS, "id=%p: Sending client handshake data of size %zu", (void *)handler, data_to_write_len); struct aws_io_message *outgoing_message = aws_channel_acquire_message_from_pool( sc_handler->slot->channel, AWS_IO_MESSAGE_APPLICATION_DATA, data_to_write_len); if (!outgoing_message) { FreeContextBuffer(output_buffer.pvBuffer); s_invoke_negotiation_error(handler, aws_last_error()); return AWS_OP_ERR; } AWS_ASSERT(outgoing_message->message_data.capacity >= data_to_write_len); memcpy(outgoing_message->message_data.buffer, output_buffer.pvBuffer, output_buffer.cbBuffer); outgoing_message->message_data.len = output_buffer.cbBuffer; FreeContextBuffer(output_buffer.pvBuffer); if (aws_channel_slot_send_message(sc_handler->slot, outgoing_message, AWS_CHANNEL_DIR_WRITE)) { aws_mem_release(outgoing_message->allocator, outgoing_message); s_invoke_negotiation_error(handler, aws_last_error()); return AWS_OP_ERR; } sc_handler->s_connection_state_fn = s_do_client_side_negotiation_step_2; return AWS_OP_SUCCESS; } /* cipher exchange, key exchange etc.... */ static int s_do_client_side_negotiation_step_2(struct aws_channel_handler *handler) { struct secure_channel_handler *sc_handler = handler->impl; AWS_LOGF_TRACE( AWS_LS_IO_TLS, "id=%p: running step 2 of client-side negotiation (cipher change, key exchange etc...)", (void *)handler); SecBuffer input_buffers[] = { [0] = { .pvBuffer = sc_handler->buffered_read_in_data_buf.buffer, .cbBuffer = (unsigned long)sc_handler->buffered_read_in_data_buf.len, .BufferType = SECBUFFER_TOKEN, }, [1] = { .pvBuffer = NULL, .cbBuffer = 0, .BufferType = SECBUFFER_EMPTY, }, }; SecBufferDesc input_buffers_desc = { .ulVersion = SECBUFFER_VERSION, .cBuffers = 2, .pBuffers = input_buffers, }; SecBuffer output_buffers[3]; AWS_ZERO_ARRAY(output_buffers); output_buffers[0].BufferType = SECBUFFER_TOKEN; output_buffers[1].BufferType = SECBUFFER_ALERT; SecBufferDesc output_buffers_desc = { .ulVersion = SECBUFFER_VERSION, .cBuffers = 3, .pBuffers = output_buffers, }; SECURITY_STATUS status = SEC_E_OK; sc_handler->read_extra = 0; sc_handler->estimated_incomplete_size = 0; char server_name_cstr[256]; AWS_ZERO_ARRAY(server_name_cstr); AWS_FATAL_ASSERT(sc_handler->server_name.len < sizeof(server_name_cstr)); memcpy(server_name_cstr, sc_handler->server_name.buffer, sc_handler->server_name.len); status = InitializeSecurityContextA( &sc_handler->creds, &sc_handler->sec_handle, (SEC_CHAR *)server_name_cstr, sc_handler->ctx_req, 0, 0, &input_buffers_desc, 0, NULL, &output_buffers_desc, &sc_handler->ctx_ret_flags, &sc_handler->sspi_timestamp); if (status != SEC_E_INCOMPLETE_MESSAGE && status != SEC_I_CONTINUE_NEEDED && status != SEC_E_OK) { AWS_LOGF_ERROR( AWS_LS_IO_TLS, "id=%p: Error during negotiation. SECURITY_STATUS is %d", (void *)handler, (int)status); int aws_error = s_determine_sspi_error(status); aws_raise_error(aws_error); s_invoke_negotiation_error(handler, aws_error); return AWS_OP_ERR; } if (status == SEC_E_INCOMPLETE_MESSAGE) { sc_handler->estimated_incomplete_size = input_buffers[1].cbBuffer; AWS_LOGF_TRACE( AWS_LS_IO_TLS, "id=%p: Incomplete buffer recieved. Incomplete size is %zu. Waiting for more data.", (void *)handler, sc_handler->estimated_incomplete_size); return aws_raise_error(AWS_IO_READ_WOULD_BLOCK); } if (status == SEC_I_CONTINUE_NEEDED || status == SEC_E_OK) { for (size_t i = 0; i < output_buffers_desc.cBuffers; ++i) { SecBuffer *buf_ptr = &output_buffers[i]; if (buf_ptr->BufferType == SECBUFFER_TOKEN && buf_ptr->cbBuffer) { struct aws_io_message *outgoing_message = aws_channel_acquire_message_from_pool( sc_handler->slot->channel, AWS_IO_MESSAGE_APPLICATION_DATA, buf_ptr->cbBuffer); if (!outgoing_message) { FreeContextBuffer(buf_ptr->pvBuffer); s_invoke_negotiation_error(handler, aws_last_error()); return AWS_OP_ERR; } memcpy(outgoing_message->message_data.buffer, buf_ptr->pvBuffer, buf_ptr->cbBuffer); outgoing_message->message_data.len = buf_ptr->cbBuffer; FreeContextBuffer(buf_ptr->pvBuffer); if (aws_channel_slot_send_message(sc_handler->slot, outgoing_message, AWS_CHANNEL_DIR_WRITE)) { aws_mem_release(outgoing_message->allocator, outgoing_message); s_invoke_negotiation_error(handler, aws_last_error()); return AWS_OP_ERR; } } } if (input_buffers[1].BufferType == SECBUFFER_EXTRA && input_buffers[1].cbBuffer > 0) { AWS_LOGF_TRACE( AWS_LS_IO_TLS, "id=%p: Extra data recieved. Extra data size is %lu.", (void *)handler, input_buffers[1].cbBuffer); sc_handler->read_extra = input_buffers[1].cbBuffer; } } if (status == SEC_E_OK) { AWS_LOGF_TRACE(AWS_LS_IO_TLS, "id=%p: handshake completed", handler); /* if a custom CA store was configured, we have to do the verification ourselves. */ if (sc_handler->custom_ca_store) { AWS_LOGF_TRACE( AWS_LS_IO_TLS, "id=%p: Custom CA was configured, evaluating trust before completing connection", (void *)handler); if (s_manually_verify_peer_cert(handler)) { aws_raise_error(AWS_IO_TLS_ERROR_NEGOTIATION_FAILURE); s_invoke_negotiation_error(handler, AWS_IO_TLS_ERROR_NEGOTIATION_FAILURE); return AWS_OP_ERR; } } sc_handler->negotiation_finished = true; /* force the sizes query, so future Encrypt message calls work.*/ s_message_overhead(handler); #ifdef SECBUFFER_APPLICATION_PROTOCOLS if (sc_handler->alpn_list && aws_tls_is_alpn_available()) { AWS_LOGF_TRACE(AWS_LS_IO_TLS, "id=%p: Retrieving negotiated protocol.", handler); SecPkgContext_ApplicationProtocol alpn_result; status = QueryContextAttributes(&sc_handler->sec_handle, SECPKG_ATTR_APPLICATION_PROTOCOL, &alpn_result); if (status == SEC_E_OK && alpn_result.ProtoNegoStatus == SecApplicationProtocolNegotiationStatus_Success) { aws_byte_buf_init(&sc_handler->protocol, handler->alloc, alpn_result.ProtocolIdSize + 1); memset(sc_handler->protocol.buffer, 0, alpn_result.ProtocolIdSize + 1); memcpy(sc_handler->protocol.buffer, alpn_result.ProtocolId, alpn_result.ProtocolIdSize); sc_handler->protocol.len = alpn_result.ProtocolIdSize; AWS_LOGF_DEBUG( AWS_LS_IO_TLS, "id=%p: Negotiated protocol %s", handler, (char *)sc_handler->protocol.buffer); } else { AWS_LOGF_WARN( AWS_LS_IO_TLS, "id=%p: Error retrieving negotiated protocol. SECURITY_STATUS is %d", handler, (int)status); int aws_error = s_determine_sspi_error(status); aws_raise_error(aws_error); } } #endif AWS_LOGF_DEBUG(AWS_LS_IO_TLS, "id=%p: TLS handshake completed successfully.", (void *)handler); sc_handler->s_connection_state_fn = s_do_application_data_decrypt; s_on_negotiation_success(handler); } return AWS_OP_SUCCESS; } static int s_do_application_data_decrypt(struct aws_channel_handler *handler) { struct secure_channel_handler *sc_handler = handler->impl; /* I know this is an unncessary initialization, it's initialized here to make linters happy.*/ int error = AWS_OP_ERR; /* when we get an Extra buffer we have to move the pointer and replay the buffer, so we loop until we don't have any extra buffers left over, in the last phase, we then go ahead and send the output. This state function will always say BLOCKED_ON_READ, AWS_IO_TLS_ERROR_READ_FAILURE or SUCCESS. There will never be left over reads.*/ do { error = AWS_OP_ERR; /* 4 buffers are needed, only one is input, the others get zeroed out for the output operation. */ SecBuffer input_buffers[4]; AWS_ZERO_ARRAY(input_buffers); size_t read_len = sc_handler->read_extra ? sc_handler->read_extra : sc_handler->buffered_read_in_data_buf.len; size_t offset = sc_handler->read_extra ? sc_handler->buffered_read_in_data_buf.len - sc_handler->read_extra : 0; sc_handler->read_extra = 0; input_buffers[0] = (SecBuffer){ .cbBuffer = (unsigned long)(read_len), .pvBuffer = sc_handler->buffered_read_in_data_buf.buffer + offset, .BufferType = SECBUFFER_DATA, }; SecBufferDesc buffer_desc = { .ulVersion = SECBUFFER_VERSION, .cBuffers = 4, .pBuffers = input_buffers, }; SECURITY_STATUS status = DecryptMessage(&sc_handler->sec_handle, &buffer_desc, 0, NULL); if (status == SEC_E_OK) { error = AWS_OP_SUCCESS; /* if SECBUFFER_DATA is the buffer type of the second buffer, we have decrypted data to process. If SECBUFFER_DATA is the type for the fourth buffer we need to keep track of it so we can shift everything before doing another decrypt operation. We don't care what's in the third buffer for TLS usage.*/ if (input_buffers[1].BufferType == SECBUFFER_DATA) { size_t decrypted_length = input_buffers[1].cbBuffer; AWS_LOGF_TRACE( AWS_LS_IO_TLS, "id=%p: Decrypted message with length %zu.", (void *)handler, decrypted_length); struct aws_byte_cursor to_append = aws_byte_cursor_from_array(input_buffers[1].pvBuffer, decrypted_length); int append_failed = aws_byte_buf_append(&sc_handler->buffered_read_out_data_buf, &to_append); AWS_ASSERT(!append_failed); (void)append_failed; /* if we have extra we have to move the pointer and do another Decrypt operation. */ if (input_buffers[3].BufferType == SECBUFFER_EXTRA) { sc_handler->read_extra = input_buffers[3].cbBuffer; AWS_LOGF_TRACE( AWS_LS_IO_TLS, "id=%p: Extra (incomplete) message received with length %zu.", (void *)handler, sc_handler->read_extra); } else { error = AWS_OP_SUCCESS; /* this means we processed everything in the buffer. */ sc_handler->buffered_read_in_data_buf.len = 0; AWS_LOGF_TRACE( AWS_LS_IO_TLS, "id=%p: Decrypt ended exactly on the end of the record, resetting buffer.", (void *)handler); } } } /* SEC_E_INCOMPLETE_MESSAGE means the message we tried to decrypt isn't a full record and we need to append our next read to it and try again. */ else if (status == SEC_E_INCOMPLETE_MESSAGE) { sc_handler->estimated_incomplete_size = input_buffers[1].cbBuffer; AWS_LOGF_TRACE( AWS_LS_IO_TLS, "id=%p: (incomplete) message received. Expecting remaining portion of size %zu.", (void *)handler, sc_handler->estimated_incomplete_size); memmove( sc_handler->buffered_read_in_data_buf.buffer, sc_handler->buffered_read_in_data_buf.buffer + offset, read_len); sc_handler->buffered_read_in_data_buf.len = read_len; aws_raise_error(AWS_IO_READ_WOULD_BLOCK); } /* SEC_I_CONTEXT_EXPIRED means that the message sender has shut down the connection. One such case where this can happen is an unaccepted certificate. */ else if (status == SEC_I_CONTEXT_EXPIRED) { AWS_LOGF_TRACE( AWS_LS_IO_TLS, "id=%p: Alert received. Message sender has shut down the connection. SECURITY_STATUS is %d.", (void *)handler, (int)status); struct aws_channel_slot *slot = handler->slot; aws_channel_shutdown(slot->channel, AWS_OP_SUCCESS); error = AWS_OP_SUCCESS; } else { AWS_LOGF_ERROR( AWS_LS_IO_TLS, "id=%p: Error decrypting message. SECURITY_STATUS is %d.", (void *)handler, (int)status); aws_raise_error(AWS_IO_TLS_ERROR_READ_FAILURE); } } while (sc_handler->read_extra); return error; } static int s_process_pending_output_messages(struct aws_channel_handler *handler) { struct secure_channel_handler *sc_handler = handler->impl; size_t downstream_window = SIZE_MAX; if (sc_handler->slot->adj_right) { downstream_window = aws_channel_slot_downstream_read_window(sc_handler->slot); } AWS_LOGF_TRACE( AWS_LS_IO_TLS, "id=%p: Processing incomming messages. Downstream window is %zu", (void *)handler, downstream_window); while (sc_handler->buffered_read_out_data_buf.len && downstream_window) { size_t requested_message_size = sc_handler->buffered_read_out_data_buf.len > downstream_window ? downstream_window : sc_handler->buffered_read_out_data_buf.len; AWS_LOGF_TRACE(AWS_LS_IO_TLS, "id=%p: Requested message size is %zu", (void *)handler, requested_message_size); if (sc_handler->slot->adj_right) { struct aws_io_message *read_out_msg = aws_channel_acquire_message_from_pool( sc_handler->slot->channel, AWS_IO_MESSAGE_APPLICATION_DATA, requested_message_size); if (!read_out_msg) { return AWS_OP_ERR; } size_t copy_size = read_out_msg->message_data.capacity < requested_message_size ? read_out_msg->message_data.capacity : requested_message_size; memcpy(read_out_msg->message_data.buffer, sc_handler->buffered_read_out_data_buf.buffer, copy_size); read_out_msg->message_data.len = copy_size; memmove( sc_handler->buffered_read_out_data_buf.buffer, sc_handler->buffered_read_out_data_buf.buffer + copy_size, sc_handler->buffered_read_out_data_buf.len - copy_size); sc_handler->buffered_read_out_data_buf.len -= copy_size; if (sc_handler->on_data_read) { sc_handler->on_data_read(handler, sc_handler->slot, &read_out_msg->message_data, sc_handler->user_data); } if (aws_channel_slot_send_message(sc_handler->slot, read_out_msg, AWS_CHANNEL_DIR_READ)) { aws_mem_release(read_out_msg->allocator, read_out_msg); return AWS_OP_ERR; } if (sc_handler->slot->adj_right) { downstream_window = aws_channel_slot_downstream_read_window(sc_handler->slot); } AWS_LOGF_TRACE(AWS_LS_IO_TLS, "id=%p: Downstream window is %zu", (void *)handler, downstream_window); } else { if (sc_handler->on_data_read) { sc_handler->on_data_read( handler, sc_handler->slot, &sc_handler->buffered_read_out_data_buf, sc_handler->user_data); } sc_handler->buffered_read_out_data_buf.len = 0; } } return AWS_OP_SUCCESS; } static void s_process_pending_output_task(struct aws_channel_task *task, void *arg, enum aws_task_status status) { (void)task; struct aws_channel_handler *handler = arg; aws_channel_task_init(task, NULL, NULL, "secure_channel_handler_process_pending_output"); if (status == AWS_TASK_STATUS_RUN_READY) { if (s_process_pending_output_messages(handler)) { struct secure_channel_handler *sc_handler = arg; aws_channel_shutdown(sc_handler->slot->channel, aws_last_error()); } } } static int s_process_read_message( struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_io_message *message) { struct secure_channel_handler *sc_handler = handler->impl; if (message) { /* note, most of these functions log internally, so the log messages in this function are sparse. */ AWS_LOGF_TRACE( AWS_LS_IO_TLS, "id=%p: processing incoming message of size %zu", (void *)handler, message->message_data.len); struct aws_byte_cursor message_cursor = aws_byte_cursor_from_buf(&message->message_data); /* The SSPI interface forces us to manage incomplete records manually. So when we had extra after the previous read, it needs to be shifted to the beginning of the current read, then the current read data is appended to it. If we had an incomplete record, we don't need to shift anything but we do need to append the current read data to the end of the incomplete record from the previous read. Keep going until we've processed everything in the message we were just passed. */ int err = AWS_OP_SUCCESS; while (!err && message_cursor.len) { size_t available_buffer_space = sc_handler->buffered_read_in_data_buf.capacity - sc_handler->buffered_read_in_data_buf.len; size_t available_message_len = message_cursor.len; size_t amount_to_move_to_buffer = available_buffer_space > available_message_len ? available_message_len : available_buffer_space; memcpy( sc_handler->buffered_read_in_data_buf.buffer + sc_handler->buffered_read_in_data_buf.len, message_cursor.ptr, amount_to_move_to_buffer); sc_handler->buffered_read_in_data_buf.len += amount_to_move_to_buffer; err = sc_handler->s_connection_state_fn(handler); if (err && aws_last_error() == AWS_IO_READ_WOULD_BLOCK) { if (sc_handler->buffered_read_in_data_buf.len == sc_handler->buffered_read_in_data_buf.capacity) { /* throw this one as a protocol error. */ aws_raise_error(AWS_IO_TLS_ERROR_WRITE_FAILURE); } else { if (sc_handler->buffered_read_out_data_buf.len) { err = s_process_pending_output_messages(handler); if (err) { break; } } /* prevent a deadlock due to downstream handlers wanting more data, but we have an incomplete record, and the amount they're requesting is less than the size of a tls record. */ size_t window_size = slot->window_size; if (!window_size && aws_channel_slot_increment_read_window(slot, sc_handler->estimated_incomplete_size)) { err = AWS_OP_ERR; } else { sc_handler->estimated_incomplete_size = 0; err = AWS_OP_SUCCESS; } } aws_byte_cursor_advance(&message_cursor, amount_to_move_to_buffer); continue; } else if (err) { break; } /* handle any left over extra data from the decrypt operation here. */ if (sc_handler->read_extra) { size_t move_pos = sc_handler->buffered_read_in_data_buf.len - sc_handler->read_extra; memmove( sc_handler->buffered_read_in_data_buf.buffer, sc_handler->buffered_read_in_data_buf.buffer + move_pos, sc_handler->read_extra); sc_handler->buffered_read_in_data_buf.len = sc_handler->read_extra; sc_handler->read_extra = 0; } else { sc_handler->buffered_read_in_data_buf.len = 0; } if (sc_handler->buffered_read_out_data_buf.len) { err = s_process_pending_output_messages(handler); if (err) { break; } } aws_byte_cursor_advance(&message_cursor, amount_to_move_to_buffer); } if (!err) { aws_mem_release(message->allocator, message); return AWS_OP_SUCCESS; } aws_channel_shutdown(slot->channel, aws_last_error()); return AWS_OP_ERR; } if (sc_handler->buffered_read_out_data_buf.len) { if (s_process_pending_output_messages(handler)) { return AWS_OP_ERR; } aws_mem_release(message->allocator, message); } return AWS_OP_SUCCESS; } static int s_process_write_message( struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_io_message *message) { struct secure_channel_handler *sc_handler = (struct secure_channel_handler *)handler->impl; AWS_ASSERT(sc_handler->negotiation_finished); SECURITY_STATUS status = SEC_E_OK; if (message) { AWS_LOGF_TRACE( AWS_LS_IO_TLS, "id=%p: processing ougoing message of size %zu", (void *)handler, message->message_data.len); struct aws_byte_cursor message_cursor = aws_byte_cursor_from_buf(&message->message_data); while (message_cursor.len) { AWS_LOGF_TRACE( AWS_LS_IO_TLS, "id=%p: processing message fragment of size %zu", (void *)handler, message_cursor.len); /* message size will be the lesser of either payload + record overhead or the max TLS record size.*/ size_t upstream_overhead = aws_channel_slot_upstream_message_overhead(sc_handler->slot); upstream_overhead += sc_handler->stream_sizes.cbHeader + sc_handler->stream_sizes.cbTrailer; size_t requested_length = message_cursor.len + upstream_overhead; size_t to_write = sc_handler->stream_sizes.cbMaximumMessage < requested_length ? sc_handler->stream_sizes.cbMaximumMessage : requested_length; struct aws_io_message *outgoing_message = aws_channel_acquire_message_from_pool(slot->channel, AWS_IO_MESSAGE_APPLICATION_DATA, to_write); if (!outgoing_message) { return AWS_OP_ERR; } if (outgoing_message->message_data.capacity <= upstream_overhead) { aws_mem_release(outgoing_message->allocator, outgoing_message); return aws_raise_error(AWS_ERROR_INVALID_STATE); } /* what if message is larger than one record? */ size_t original_message_fragment_to_process = outgoing_message->message_data.capacity - upstream_overhead; memcpy( outgoing_message->message_data.buffer + sc_handler->stream_sizes.cbHeader, message_cursor.ptr, original_message_fragment_to_process); if (original_message_fragment_to_process == message_cursor.len) { outgoing_message->on_completion = message->on_completion; outgoing_message->user_data = message->user_data; } SecBuffer buffers[4] = { [0] = { .BufferType = SECBUFFER_STREAM_HEADER, .pvBuffer = outgoing_message->message_data.buffer, .cbBuffer = sc_handler->stream_sizes.cbHeader, }, [1] = { .BufferType = SECBUFFER_DATA, .pvBuffer = outgoing_message->message_data.buffer + sc_handler->stream_sizes.cbHeader, .cbBuffer = (unsigned long)original_message_fragment_to_process, }, [2] = { .BufferType = SECBUFFER_STREAM_TRAILER, .pvBuffer = outgoing_message->message_data.buffer + sc_handler->stream_sizes.cbHeader + original_message_fragment_to_process, .cbBuffer = sc_handler->stream_sizes.cbTrailer, }, [3] = { .BufferType = SECBUFFER_EMPTY, .pvBuffer = NULL, .cbBuffer = 0, }, }; SecBufferDesc buffer_desc = { .ulVersion = SECBUFFER_VERSION, .cBuffers = 4, .pBuffers = buffers, }; status = EncryptMessage(&sc_handler->sec_handle, 0, &buffer_desc, 0); if (status == SEC_E_OK) { outgoing_message->message_data.len = buffers[0].cbBuffer + buffers[1].cbBuffer + buffers[2].cbBuffer; AWS_LOGF_TRACE( AWS_LS_IO_TLS, "id=%p:message fragment encrypted successfully: size is %zu", (void *)handler, outgoing_message->message_data.len); if (aws_channel_slot_send_message(slot, outgoing_message, AWS_CHANNEL_DIR_WRITE)) { aws_mem_release(outgoing_message->allocator, outgoing_message); return AWS_OP_ERR; } aws_byte_cursor_advance(&message_cursor, original_message_fragment_to_process); } else { AWS_LOGF_TRACE( AWS_LS_IO_TLS, "id=%p: Error encrypting message. SECURITY_STATUS is %d", (void *)handler, (int)status); return aws_raise_error(AWS_IO_TLS_ERROR_WRITE_FAILURE); } } aws_mem_release(message->allocator, message); } return AWS_OP_SUCCESS; } static int s_increment_read_window(struct aws_channel_handler *handler, struct aws_channel_slot *slot, size_t size) { (void)size; struct secure_channel_handler *sc_handler = handler->impl; AWS_LOGF_TRACE(AWS_LS_IO_TLS, "id=%p: Increment read window message received %zu", (void *)handler, size); /* You can't query a context if negotiation isn't completed, since ciphers haven't been negotiated * and it couldn't possibly know the overhead size yet. */ if (sc_handler->negotiation_finished && !sc_handler->stream_sizes.cbMaximumMessage) { SECURITY_STATUS status = QueryContextAttributes(&sc_handler->sec_handle, SECPKG_ATTR_STREAM_SIZES, &sc_handler->stream_sizes); if (status != SEC_E_OK) { AWS_LOGF_ERROR( AWS_LS_IO_TLS, "id=%p: QueryContextAttributes failed with error %d", (void *)handler, (int)status); aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); aws_channel_shutdown(slot->channel, AWS_ERROR_SYS_CALL_FAILURE); return AWS_OP_ERR; } } size_t total_desired_size = size; size_t downstream_size = aws_channel_slot_downstream_read_window(slot); size_t current_window_size = slot->window_size; /* the only time this branch isn't taken is when a window update is propagated during tls negotiation. * in that case just pass it through. */ if (sc_handler->stream_sizes.cbMaximumMessage) { size_t likely_records_count = (size_t)ceil((double)(downstream_size) / (double)(READ_IN_SIZE)); size_t offset_size = aws_mul_size_saturating( likely_records_count, sc_handler->stream_sizes.cbTrailer + sc_handler->stream_sizes.cbHeader); total_desired_size = aws_add_size_saturating(offset_size, downstream_size); } if (total_desired_size > current_window_size) { size_t window_update_size = total_desired_size - current_window_size; AWS_LOGF_TRACE( AWS_LS_IO_TLS, "id=%p: Propagating read window increment of size %zu", (void *)handler, window_update_size); aws_channel_slot_increment_read_window(slot, window_update_size); } if (sc_handler->negotiation_finished && !sc_handler->sequential_task_storage.task_fn) { aws_channel_task_init( &sc_handler->sequential_task_storage, s_process_pending_output_task, handler, "secure_channel_handler_process_pending_output_on_window_increment"); aws_channel_schedule_task_now(slot->channel, &sc_handler->sequential_task_storage); } return AWS_OP_SUCCESS; } static size_t s_initial_window_size(struct aws_channel_handler *handler) { (void)handler; /* set this to just enough for the handshake, once the handshake completes, the downstream handler will tell us the new window size. */ return EST_HANDSHAKE_SIZE; } static int s_handler_shutdown( struct aws_channel_handler *handler, struct aws_channel_slot *slot, enum aws_channel_direction dir, int error_code, bool abort_immediately) { struct secure_channel_handler *sc_handler = handler->impl; if (dir == AWS_CHANNEL_DIR_WRITE) { if (!abort_immediately && error_code != AWS_IO_SOCKET_CLOSED) { AWS_LOGF_DEBUG(AWS_LS_IO_TLS, "id=%p: Shutting down the write direction", (void *)handler); /* send a TLS alert. */ SECURITY_STATUS status; DWORD shutdown_code = SCHANNEL_SHUTDOWN; SecBuffer shutdown_buffer = { .pvBuffer = &shutdown_code, .cbBuffer = sizeof(shutdown_code), .BufferType = SECBUFFER_TOKEN, }; SecBufferDesc shutdown_buffer_desc = { .ulVersion = SECBUFFER_VERSION, .cBuffers = 1, .pBuffers = &shutdown_buffer, }; /* this updates the SSPI internal state machine. */ status = ApplyControlToken(&sc_handler->sec_handle, &shutdown_buffer_desc); if (status != SEC_E_OK) { aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); return aws_channel_slot_on_handler_shutdown_complete( slot, dir, AWS_ERROR_SYS_CALL_FAILURE, abort_immediately); } SecBuffer output_buffer = { .pvBuffer = NULL, .cbBuffer = 0, .BufferType = SECBUFFER_EMPTY, }; SecBufferDesc output_buffer_desc = { .ulVersion = SECBUFFER_VERSION, .cBuffers = 1, .pBuffers = &output_buffer, }; struct aws_byte_buf server_name = aws_tls_handler_server_name(handler); char server_name_cstr[256]; AWS_ZERO_ARRAY(server_name_cstr); AWS_FATAL_ASSERT(server_name.len < sizeof(server_name_cstr)); memcpy(server_name_cstr, server_name.buffer, server_name.len); /* this acutally gives us an Alert record to send. */ status = InitializeSecurityContextA( &sc_handler->creds, &sc_handler->sec_handle, (SEC_CHAR *)server_name_cstr, sc_handler->ctx_req, 0, 0, NULL, 0, NULL, &output_buffer_desc, &sc_handler->ctx_ret_flags, NULL); if (status == SEC_E_OK || status == SEC_I_CONTEXT_EXPIRED) { struct aws_io_message *outgoing_message = aws_channel_acquire_message_from_pool( slot->channel, AWS_IO_MESSAGE_APPLICATION_DATA, output_buffer.cbBuffer); if (!outgoing_message || outgoing_message->message_data.capacity < output_buffer.cbBuffer) { return aws_channel_slot_on_handler_shutdown_complete(slot, dir, aws_last_error(), true); } memcpy(outgoing_message->message_data.buffer, output_buffer.pvBuffer, output_buffer.cbBuffer); outgoing_message->message_data.len = output_buffer.cbBuffer; /* we don't really care if this succeeds or not, it's just sending the TLS alert. */ if (aws_channel_slot_send_message(slot, outgoing_message, AWS_CHANNEL_DIR_WRITE)) { aws_mem_release(outgoing_message->allocator, outgoing_message); } } } } return aws_channel_slot_on_handler_shutdown_complete(slot, dir, error_code, abort_immediately); } static void s_do_negotiation_task(struct aws_channel_task *task, void *arg, enum aws_task_status status) { (void)task; struct aws_channel_handler *handler = arg; struct secure_channel_handler *sc_handler = handler->impl; if (status == AWS_TASK_STATUS_RUN_READY) { int err = sc_handler->s_connection_state_fn(handler); if (err) { aws_channel_shutdown(sc_handler->slot->channel, aws_last_error()); } } } static void s_secure_channel_handler_destroy( struct aws_allocator *allocator, struct secure_channel_handler *sc_handler) { if (sc_handler == NULL) { return; } if (sc_handler->protocol.buffer) { aws_byte_buf_clean_up(&sc_handler->protocol); } if (sc_handler->alpn_list) { aws_string_destroy(sc_handler->alpn_list); } if (sc_handler->server_name.buffer) { aws_byte_buf_clean_up(&sc_handler->server_name); } if (sc_handler->sec_handle.dwLower || sc_handler->sec_handle.dwUpper) { DeleteSecurityContext(&sc_handler->sec_handle); } if (sc_handler->creds.dwLower || sc_handler->creds.dwUpper) { DeleteSecurityContext(&sc_handler->creds); } aws_tls_channel_handler_shared_clean_up(&sc_handler->shared_state); aws_mem_release(allocator, sc_handler); } static void s_handler_destroy(struct aws_channel_handler *handler) { AWS_LOGF_DEBUG(AWS_LS_IO_TLS, "id=%p: destroying handler", (void *)handler); struct secure_channel_handler *sc_handler = handler->impl; s_secure_channel_handler_destroy(handler->alloc, sc_handler); } static void s_reset_statistics(struct aws_channel_handler *handler) { struct secure_channel_handler *sc_handler = handler->impl; aws_crt_statistics_tls_reset(&sc_handler->shared_state.stats); } static void s_gather_statistics(struct aws_channel_handler *handler, struct aws_array_list *stats) { struct secure_channel_handler *sc_handler = handler->impl; void *stats_base = &sc_handler->shared_state.stats; aws_array_list_push_back(stats, &stats_base); } int aws_tls_client_handler_start_negotiation(struct aws_channel_handler *handler) { AWS_LOGF_DEBUG(AWS_LS_IO_TLS, "id=%p: Kicking off TLS negotiation", (void *)handler); struct secure_channel_handler *sc_handler = handler->impl; if (aws_channel_thread_is_callers_thread(sc_handler->slot->channel)) { int err = sc_handler->s_connection_state_fn(handler); if (err) { aws_channel_shutdown(sc_handler->slot->channel, aws_last_error()); } return err; } aws_channel_task_init( &sc_handler->sequential_task_storage, s_do_negotiation_task, handler, "secure_channel_handler_start_negotation"); aws_channel_schedule_task_now(sc_handler->slot->channel, &sc_handler->sequential_task_storage); return AWS_OP_SUCCESS; } struct aws_byte_buf aws_tls_handler_protocol(struct aws_channel_handler *handler) { struct secure_channel_handler *sc_handler = handler->impl; return sc_handler->protocol; } struct aws_byte_buf aws_tls_handler_server_name(struct aws_channel_handler *handler) { struct secure_channel_handler *sc_handler = handler->impl; return sc_handler->server_name; } static struct aws_channel_handler_vtable s_handler_vtable = { .destroy = s_handler_destroy, .process_read_message = s_process_read_message, .process_write_message = s_process_write_message, .shutdown = s_handler_shutdown, .increment_read_window = s_increment_read_window, .initial_window_size = s_initial_window_size, .message_overhead = s_message_overhead, .reset_statistics = s_reset_statistics, .gather_statistics = s_gather_statistics, }; static struct aws_channel_handler *s_tls_handler_new( struct aws_allocator *alloc, struct aws_tls_connection_options *options, struct aws_channel_slot *slot, bool is_client_mode) { AWS_ASSERT(options->ctx); struct secure_channel_handler *sc_handler = aws_mem_calloc(alloc, 1, sizeof(struct secure_channel_handler)); if (!sc_handler) { return NULL; } sc_handler->handler.alloc = alloc; sc_handler->handler.impl = sc_handler; sc_handler->handler.vtable = &s_handler_vtable; sc_handler->handler.slot = slot; aws_tls_channel_handler_shared_init(&sc_handler->shared_state, &sc_handler->handler, options); struct secure_channel_ctx *sc_ctx = options->ctx->impl; unsigned long credential_use = SECPKG_CRED_INBOUND; if (is_client_mode) { credential_use = SECPKG_CRED_OUTBOUND; } SECURITY_STATUS status = AcquireCredentialsHandleA( NULL, UNISP_NAME, credential_use, NULL, &sc_ctx->credentials, NULL, NULL, &sc_handler->creds, &sc_handler->sspi_timestamp); if (status != SEC_E_OK) { AWS_LOGF_ERROR(AWS_LS_IO_TLS, "Error on AcquireCredentialsHandle. SECURITY_STATUS is %d", (int)status); int aws_error = s_determine_sspi_error(status); aws_raise_error(aws_error); goto on_error; } sc_handler->advertise_alpn_message = options->advertise_alpn_message; sc_handler->on_data_read = options->on_data_read; sc_handler->on_error = options->on_error; sc_handler->on_negotiation_result = options->on_negotiation_result; sc_handler->user_data = options->user_data; if (!options->alpn_list && sc_ctx->alpn_list) { sc_handler->alpn_list = aws_string_new_from_string(alloc, sc_ctx->alpn_list); if (!sc_handler->alpn_list) { goto on_error; } } else if (options->alpn_list) { sc_handler->alpn_list = aws_string_new_from_string(alloc, options->alpn_list); if (!sc_handler->alpn_list) { goto on_error; } } if (options->server_name) { AWS_LOGF_DEBUG( AWS_LS_IO_TLS, "id=%p: Setting SNI to %s", (void *)&sc_handler->handler, aws_string_c_str(options->server_name)); struct aws_byte_cursor server_name_crsr = aws_byte_cursor_from_string(options->server_name); if (aws_byte_buf_init_copy_from_cursor(&sc_handler->server_name, alloc, server_name_crsr)) { goto on_error; } } sc_handler->slot = slot; if (is_client_mode) { sc_handler->s_connection_state_fn = s_do_client_side_negotiation_step_1; } else { sc_handler->s_connection_state_fn = s_do_server_side_negotiation_step_1; } sc_handler->custom_ca_store = sc_ctx->custom_trust_store; sc_handler->buffered_read_in_data_buf = aws_byte_buf_from_array(sc_handler->buffered_read_in_data, sizeof(sc_handler->buffered_read_in_data)); sc_handler->buffered_read_in_data_buf.len = 0; sc_handler->buffered_read_out_data_buf = aws_byte_buf_from_array(sc_handler->buffered_read_out_data, sizeof(sc_handler->buffered_read_out_data)); sc_handler->buffered_read_out_data_buf.len = 0; sc_handler->verify_peer = sc_ctx->verify_peer; return &sc_handler->handler; on_error: s_secure_channel_handler_destroy(alloc, sc_handler); return NULL; } struct aws_channel_handler *aws_tls_client_handler_new( struct aws_allocator *allocator, struct aws_tls_connection_options *options, struct aws_channel_slot *slot) { return s_tls_handler_new(allocator, options, slot, true); } struct aws_channel_handler *aws_tls_server_handler_new( struct aws_allocator *allocator, struct aws_tls_connection_options *options, struct aws_channel_slot *slot) { return s_tls_handler_new(allocator, options, slot, false); } static void s_secure_channel_ctx_destroy(struct secure_channel_ctx *secure_channel_ctx) { if (secure_channel_ctx == NULL) { return; } if (secure_channel_ctx->private_key) { CryptDestroyKey(secure_channel_ctx->private_key); } if (secure_channel_ctx->crypto_provider) { CryptReleaseContext(secure_channel_ctx->crypto_provider, 0); } if (secure_channel_ctx->custom_trust_store) { aws_close_cert_store(secure_channel_ctx->custom_trust_store); } if (secure_channel_ctx->pcerts) { /** * Only free the private certificate context if the private key is NOT * from the certificate context because freeing the private key * using CryptDestroyKey frees the certificate context and then * trying to access it leads to a access violation. */ if (secure_channel_ctx->should_free_pcerts == true) { CertFreeCertificateContext(secure_channel_ctx->pcerts); } } if (secure_channel_ctx->cert_store) { aws_close_cert_store(secure_channel_ctx->cert_store); } if (secure_channel_ctx->alpn_list) { aws_string_destroy(secure_channel_ctx->alpn_list); } aws_mem_release(secure_channel_ctx->ctx.alloc, secure_channel_ctx); } struct aws_tls_ctx *s_ctx_new( struct aws_allocator *alloc, const struct aws_tls_ctx_options *options, bool is_client_mode) { if (!aws_tls_is_cipher_pref_supported(options->cipher_pref)) { aws_raise_error(AWS_IO_TLS_CIPHER_PREF_UNSUPPORTED); AWS_LOGF_ERROR(AWS_LS_IO_TLS, "static: TLS Cipher Preference is not supported: %d.", options->cipher_pref); return NULL; } struct secure_channel_ctx *secure_channel_ctx = aws_mem_calloc(alloc, 1, sizeof(struct secure_channel_ctx)); if (!secure_channel_ctx) { return NULL; } secure_channel_ctx->ctx.alloc = alloc; secure_channel_ctx->ctx.impl = secure_channel_ctx; aws_ref_count_init( &secure_channel_ctx->ctx.ref_count, secure_channel_ctx, (aws_simple_completion_callback *)s_secure_channel_ctx_destroy); if (options->alpn_list) { secure_channel_ctx->alpn_list = aws_string_new_from_string(alloc, options->alpn_list); if (!secure_channel_ctx->alpn_list) { goto clean_up; } } secure_channel_ctx->verify_peer = options->verify_peer; secure_channel_ctx->credentials.dwVersion = SCHANNEL_CRED_VERSION; secure_channel_ctx->should_free_pcerts = true; secure_channel_ctx->credentials.grbitEnabledProtocols = 0; if (is_client_mode) { switch (options->minimum_tls_version) { case AWS_IO_SSLv3: secure_channel_ctx->credentials.grbitEnabledProtocols |= SP_PROT_SSL3_CLIENT; case AWS_IO_TLSv1: secure_channel_ctx->credentials.grbitEnabledProtocols |= SP_PROT_TLS1_0_CLIENT; case AWS_IO_TLSv1_1: secure_channel_ctx->credentials.grbitEnabledProtocols |= SP_PROT_TLS1_1_CLIENT; case AWS_IO_TLSv1_2: #if defined(SP_PROT_TLS1_2_CLIENT) secure_channel_ctx->credentials.grbitEnabledProtocols |= SP_PROT_TLS1_2_CLIENT; #endif case AWS_IO_TLSv1_3: #if defined(SP_PROT_TLS1_3_CLIENT) secure_channel_ctx->credentials.grbitEnabledProtocols |= SP_PROT_TLS1_3_CLIENT; #endif break; case AWS_IO_TLS_VER_SYS_DEFAULTS: secure_channel_ctx->credentials.grbitEnabledProtocols = 0; break; } } else { switch (options->minimum_tls_version) { case AWS_IO_SSLv3: secure_channel_ctx->credentials.grbitEnabledProtocols |= SP_PROT_SSL3_SERVER; case AWS_IO_TLSv1: secure_channel_ctx->credentials.grbitEnabledProtocols |= SP_PROT_TLS1_0_SERVER; case AWS_IO_TLSv1_1: secure_channel_ctx->credentials.grbitEnabledProtocols |= SP_PROT_TLS1_1_SERVER; case AWS_IO_TLSv1_2: #if defined(SP_PROT_TLS1_2_SERVER) secure_channel_ctx->credentials.grbitEnabledProtocols |= SP_PROT_TLS1_2_SERVER; #endif case AWS_IO_TLSv1_3: #if defined(SP_PROT_TLS1_3_SERVER) secure_channel_ctx->credentials.grbitEnabledProtocols |= SP_PROT_TLS1_3_SERVER; #endif break; case AWS_IO_TLS_VER_SYS_DEFAULTS: secure_channel_ctx->credentials.grbitEnabledProtocols = 0; break; } } if (options->verify_peer && aws_tls_options_buf_is_set(&options->ca_file)) { AWS_LOGF_DEBUG(AWS_LS_IO_TLS, "static: loading custom CA file."); secure_channel_ctx->credentials.dwFlags = SCH_CRED_MANUAL_CRED_VALIDATION; struct aws_byte_cursor ca_blob_cur = aws_byte_cursor_from_buf(&options->ca_file); int error = aws_import_trusted_certificates(alloc, &ca_blob_cur, &secure_channel_ctx->custom_trust_store); if (error) { AWS_LOGF_ERROR(AWS_LS_IO_TLS, "static: failed to import custom CA with error %d", aws_last_error()); goto clean_up; } } else if (is_client_mode) { secure_channel_ctx->credentials.dwFlags = SCH_CRED_AUTO_CRED_VALIDATION; } if (is_client_mode && !options->verify_peer) { AWS_LOGF_WARN( AWS_LS_IO_TLS, "static: x.509 validation has been disabled. " "If this is not running in a test environment, this is likely a security vulnerability."); secure_channel_ctx->credentials.dwFlags &= ~(SCH_CRED_AUTO_CRED_VALIDATION); secure_channel_ctx->credentials.dwFlags |= SCH_CRED_IGNORE_NO_REVOCATION_CHECK | SCH_CRED_IGNORE_REVOCATION_OFFLINE | SCH_CRED_NO_SERVERNAME_CHECK | SCH_CRED_MANUAL_CRED_VALIDATION; } else if (is_client_mode) { secure_channel_ctx->credentials.dwFlags |= SCH_CRED_REVOCATION_CHECK_CHAIN | SCH_CRED_IGNORE_REVOCATION_OFFLINE; } /* if someone wants to use broken algorithms like rc4/md5/des they'll need to ask for a special control */ secure_channel_ctx->credentials.dwFlags |= SCH_USE_STRONG_CRYPTO; /* if using a system store. */ if (options->system_certificate_path) { AWS_LOGF_DEBUG(AWS_LS_IO_TLS, "static: assuming certificate is in a system store, loading now."); if (aws_load_cert_from_system_cert_store( options->system_certificate_path, &secure_channel_ctx->cert_store, &secure_channel_ctx->pcerts)) { AWS_LOGF_ERROR(AWS_LS_IO_TLS, "static: failed to load %s", options->system_certificate_path); goto clean_up; } secure_channel_ctx->credentials.paCred = &secure_channel_ctx->pcerts; secure_channel_ctx->credentials.cCreds = 1; /* if using traditional PEM armored PKCS#7 and ASN Encoding public/private key pairs */ } else if (aws_tls_options_buf_is_set(&options->certificate) && aws_tls_options_buf_is_set(&options->private_key)) { AWS_LOGF_DEBUG(AWS_LS_IO_TLS, "static: certificate and key have been set, setting them up now."); if (!aws_text_is_utf8(options->certificate.buffer, options->certificate.len)) { AWS_LOGF_ERROR(AWS_LS_IO_TLS, "static: failed to import certificate, must be ASCII/UTF-8 encoded"); aws_raise_error(AWS_IO_FILE_VALIDATION_FAILURE); goto clean_up; } if (!aws_text_is_utf8(options->private_key.buffer, options->private_key.len)) { AWS_LOGF_ERROR(AWS_LS_IO_TLS, "static: failed to import private key, must be ASCII/UTF-8 encoded"); aws_raise_error(AWS_IO_FILE_VALIDATION_FAILURE); goto clean_up; } struct aws_byte_cursor cert_chain_cur = aws_byte_cursor_from_buf(&options->certificate); struct aws_byte_cursor pk_cur = aws_byte_cursor_from_buf(&options->private_key); int err = aws_import_key_pair_to_cert_context( alloc, &cert_chain_cur, &pk_cur, is_client_mode, &secure_channel_ctx->cert_store, &secure_channel_ctx->pcerts, &secure_channel_ctx->crypto_provider, &secure_channel_ctx->private_key); if (err) { AWS_LOGF_ERROR( AWS_LS_IO_TLS, "static: failed to import certificate and private key with error %d.", aws_last_error()); goto clean_up; } secure_channel_ctx->credentials.paCred = &secure_channel_ctx->pcerts; secure_channel_ctx->credentials.cCreds = 1; secure_channel_ctx->should_free_pcerts = false; } return &secure_channel_ctx->ctx; clean_up: s_secure_channel_ctx_destroy(secure_channel_ctx); return NULL; } struct aws_tls_ctx *aws_tls_server_ctx_new(struct aws_allocator *alloc, const struct aws_tls_ctx_options *options) { return s_ctx_new(alloc, options, false); } struct aws_tls_ctx *aws_tls_client_ctx_new(struct aws_allocator *alloc, const struct aws_tls_ctx_options *options) { return s_ctx_new(alloc, options, true); } aws-crt-python-0.20.4+dfsg/crt/aws-c-io/source/windows/shared_library.c000066400000000000000000000037621456575232400257770ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ // clang-format off #include #include // clang-format on #include #include static const char *s_null = ""; static const char *s_unknown_error = ""; int aws_shared_library_init(struct aws_shared_library *library, const char *library_path) { AWS_ZERO_STRUCT(*library); library->library_handle = LoadLibrary(library_path); if (library->library_handle == NULL) { DWORD ec = GetLastError(); AWS_LOGF_ERROR( AWS_LS_IO_SHARED_LIBRARY, "id=%p: Failed to load shared library with path \"%s\" with Windows error code: %ul", (void *)library, library_path ? library_path : s_null, ec); return aws_raise_error(AWS_IO_SHARED_LIBRARY_LOAD_FAILURE); } return AWS_OP_SUCCESS; } void aws_shared_library_clean_up(struct aws_shared_library *library) { if (library && library->library_handle) { FreeLibrary((HMODULE)library->library_handle); library->library_handle = NULL; } } int aws_shared_library_find_function( struct aws_shared_library *library, const char *symbol_name, aws_generic_function *function_address) { if (library == NULL || library->library_handle == NULL) { return aws_raise_error(AWS_IO_SHARED_LIBRARY_FIND_SYMBOL_FAILURE); } *function_address = (aws_generic_function)GetProcAddress((HMODULE)library->library_handle, symbol_name); if (*function_address == NULL) { DWORD ec = GetLastError(); AWS_LOGF_ERROR( AWS_LS_IO_SHARED_LIBRARY, "id=%p: Failed to find shared library symbol \"%s\" with error code: %ul", (void *)library, symbol_name ? symbol_name : s_null, ec); return aws_raise_error(AWS_IO_SHARED_LIBRARY_FIND_SYMBOL_FAILURE); } return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-io/source/windows/windows_pki_utils.c000066400000000000000000000632741456575232400265660ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #ifdef _MSC_VER # pragma warning(disable : 4221) /* aggregate initializer using local variable addresses */ # pragma warning(disable : 4204) /* non-constant aggregate initializer */ #endif #define CERT_HASH_STR_LEN 40 #define CERT_HASH_LEN 20 /** * Split system cert path into exactly three segments like: * "CurrentUser\My\a11f8a9b5df5b98ba3508fbca575d09570e0d2c6" * -> ["CurrentUser", "My", "a11f8a9b5df5b98ba3508fbca575d09570e0d2c6"] */ static int s_split_system_cert_path(const char *cert_path, struct aws_byte_cursor out_splits[3]) { struct aws_byte_cursor cert_path_cursor = aws_byte_cursor_from_c_str(cert_path); struct aws_byte_cursor segment; AWS_ZERO_STRUCT(segment); for (size_t i = 0; i < 3; ++i) { if (!aws_byte_cursor_next_split(&cert_path_cursor, '\\', &segment)) { AWS_LOGF_ERROR( AWS_LS_IO_PKI, "static: invalid certificate path '%s'. Expected additional '\\' separator.", cert_path); return aws_raise_error(AWS_ERROR_FILE_INVALID_PATH); } out_splits[i] = segment; } if (aws_byte_cursor_next_split(&cert_path_cursor, '\\', &segment)) { AWS_LOGF_ERROR( AWS_LS_IO_PKI, "static: invalid certificate path '%s'. Too many '\\' separators found.", cert_path); return aws_raise_error(AWS_ERROR_FILE_INVALID_PATH); } return AWS_OP_SUCCESS; } int aws_load_cert_from_system_cert_store(const char *cert_path, HCERTSTORE *cert_store, PCCERT_CONTEXT *certs) { AWS_LOGF_INFO(AWS_LS_IO_PKI, "static: loading certificate at windows cert manager path '%s'.", cert_path); struct aws_byte_cursor segments[3]; if (s_split_system_cert_path(cert_path, segments)) { return AWS_OP_ERR; } const struct aws_byte_cursor store_location = segments[0]; const struct aws_byte_cursor store_path_cursor = segments[1]; const struct aws_byte_cursor cert_hash_cursor = segments[2]; DWORD store_val = 0; if (aws_byte_cursor_eq_c_str_ignore_case(&store_location, "CurrentUser")) { store_val = CERT_SYSTEM_STORE_CURRENT_USER; } else if (aws_byte_cursor_eq_c_str_ignore_case(&store_location, "LocalMachine")) { store_val = CERT_SYSTEM_STORE_LOCAL_MACHINE; } else if (aws_byte_cursor_eq_c_str_ignore_case(&store_location, "CurrentService")) { store_val = CERT_SYSTEM_STORE_CURRENT_SERVICE; } else if (aws_byte_cursor_eq_c_str_ignore_case(&store_location, "Services")) { store_val = CERT_SYSTEM_STORE_SERVICES; } else if (aws_byte_cursor_eq_c_str_ignore_case(&store_location, "Users")) { store_val = CERT_SYSTEM_STORE_USERS; } else if (aws_byte_cursor_eq_c_str_ignore_case(&store_location, "CurrentUserGroupPolicy")) { store_val = CERT_SYSTEM_STORE_CURRENT_USER_GROUP_POLICY; } else if (aws_byte_cursor_eq_c_str_ignore_case(&store_location, "LocalMachineGroupPolicy")) { store_val = CERT_SYSTEM_STORE_LOCAL_MACHINE_GROUP_POLICY; } else if (aws_byte_cursor_eq_c_str_ignore_case(&store_location, "LocalMachineEnterprise")) { store_val = CERT_SYSTEM_STORE_LOCAL_MACHINE_ENTERPRISE; } else { AWS_LOGF_ERROR( AWS_LS_IO_PKI, "static: invalid certificate path '%s'. System store location '" PRInSTR "' not recognized." " Expected something like 'CurrentUser'.", cert_path, AWS_BYTE_CURSOR_PRI(store_location)); return aws_raise_error(AWS_ERROR_FILE_INVALID_PATH); } AWS_LOGF_DEBUG(AWS_LS_IO_PKI, "static: determined registry value for lookup as %d.", (int)store_val); /* The store_val value has to be only the path segment related to the physical store. Looking at the docs, 128 bytes should be plenty to store that segment. https://docs.microsoft.com/en-us/windows/desktop/SecCrypto/system-store-locations */ char store_path[128] = {0}; if (store_path_cursor.len >= sizeof(store_path)) { AWS_LOGF_ERROR(AWS_LS_IO_PKI, "static: invalid certificate path '%s'. Store name is too long.", cert_path); return aws_raise_error(AWS_ERROR_FILE_INVALID_PATH); } memcpy(store_path, store_path_cursor.ptr, store_path_cursor.len); if (cert_hash_cursor.len != CERT_HASH_STR_LEN) { AWS_LOGF_ERROR( AWS_LS_IO_PKI, "static: invalid certificate path '%s'. '" PRInSTR "' should have been" " 40 bytes of hex encoded data", cert_path, AWS_BYTE_CURSOR_PRI(cert_hash_cursor)); return aws_raise_error(AWS_ERROR_FILE_INVALID_PATH); } *cert_store = CertOpenStore( CERT_STORE_PROV_SYSTEM_A, 0, (HCRYPTPROV)NULL, CERT_STORE_OPEN_EXISTING_FLAG | store_val, store_path); if (!*cert_store) { AWS_LOGF_ERROR( AWS_LS_IO_PKI, "static: invalid certificate path '%s'. Failed to load cert store with error code %d", cert_path, (int)GetLastError()); return aws_raise_error(AWS_ERROR_FILE_INVALID_PATH); } BYTE cert_hash_data[CERT_HASH_LEN]; CRYPT_HASH_BLOB cert_hash = { .pbData = cert_hash_data, .cbData = CERT_HASH_LEN, }; if (!CryptStringToBinaryA( (LPCSTR)cert_hash_cursor.ptr, /* this is null-terminated, it's the last segment of c-str */ CERT_HASH_STR_LEN, CRYPT_STRING_HEX, cert_hash.pbData, &cert_hash.cbData, NULL, NULL)) { AWS_LOGF_ERROR( AWS_LS_IO_PKI, "static: invalid certificate path '%s'. '" PRInSTR "' should have been a hex encoded string", cert_path, AWS_BYTE_CURSOR_PRI(cert_hash_cursor)); aws_raise_error(AWS_ERROR_FILE_INVALID_PATH); goto on_error; } *certs = CertFindCertificateInStore( *cert_store, X509_ASN_ENCODING | PKCS_7_ASN_ENCODING, 0, CERT_FIND_HASH, &cert_hash, NULL); if (!*certs) { AWS_LOGF_ERROR( AWS_LS_IO_PKI, "static: invalid certificate path '%s'. " "The referenced certificate was not found in the certificate store, error code %d", cert_path, (int)GetLastError()); aws_raise_error(AWS_ERROR_FILE_INVALID_PATH); goto on_error; } return AWS_OP_SUCCESS; on_error: if (*cert_store != NULL) { aws_close_cert_store(*cert_store); *cert_store = NULL; } return AWS_OP_ERR; } int aws_import_trusted_certificates( struct aws_allocator *alloc, const struct aws_byte_cursor *certificates_blob, HCERTSTORE *cert_store) { struct aws_array_list certificates; *cert_store = NULL; int result = AWS_OP_ERR; if (aws_pem_objects_init_from_file_contents(&certificates, alloc, *certificates_blob)) { goto clean_up; } size_t cert_count = aws_array_list_length(&certificates); if (cert_count == 0) { aws_raise_error(AWS_IO_FILE_VALIDATION_FAILURE); AWS_LOGF_ERROR(AWS_LS_IO_PKI, "static: no certificates found, error %s", aws_error_name(aws_last_error())); goto clean_up; } HCERTSTORE tmp_cert_store = CertOpenStore(CERT_STORE_PROV_MEMORY, 0, (ULONG_PTR)NULL, CERT_STORE_CREATE_NEW_FLAG, NULL); *cert_store = tmp_cert_store; if (!*cert_store) { AWS_LOGF_ERROR( AWS_LS_IO_PKI, "static: failed to create temporary cert store, error code %d", (int)GetLastError()); aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); goto clean_up; } AWS_LOGF_INFO(AWS_LS_IO_PKI, "static: loading %d certificates in cert chain for use as a CA", (int)cert_count); for (size_t i = 0; i < cert_count; ++i) { struct aws_pem_object *pem_object_ptr = NULL; aws_array_list_get_at_ptr(&certificates, (void **)&pem_object_ptr, i); CERT_BLOB cert_blob; CERT_CONTEXT *cert_context = NULL; cert_blob.pbData = pem_object_ptr->data.buffer; cert_blob.cbData = (DWORD)pem_object_ptr->data.len; DWORD content_type = 0; BOOL query_res = CryptQueryObject( CERT_QUERY_OBJECT_BLOB, &cert_blob, CERT_QUERY_CONTENT_FLAG_CERT, CERT_QUERY_FORMAT_FLAG_ALL, 0, NULL, &content_type, NULL, NULL, NULL, (const void **)&cert_context); if (!query_res || cert_context == NULL) { AWS_LOGF_ERROR( AWS_LS_IO_PKI, "static: failed to parse certificate blob, error code %d", (int)GetLastError()); aws_raise_error(AWS_IO_FILE_VALIDATION_FAILURE); goto clean_up; } BOOL add_result = CertAddCertificateContextToStore(*cert_store, cert_context, CERT_STORE_ADD_ALWAYS, NULL); if (!add_result) { AWS_LOGF_ERROR( AWS_LS_IO_PKI, "static: failed to add certificate to store, error code %d", (int)GetLastError()); } CertFreeCertificateContext(cert_context); if (!add_result) { aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); goto clean_up; } } result = AWS_OP_SUCCESS; clean_up: aws_pem_objects_clean_up(&certificates); if (result == AWS_OP_ERR && *cert_store) { aws_close_cert_store(*cert_store); *cert_store = NULL; } return result; } void aws_close_cert_store(HCERTSTORE cert_store) { CertCloseStore(cert_store, 0); } static int s_cert_context_import_rsa_private_key( PCCERT_CONTEXT certs, const BYTE *key, DWORD decoded_len, bool is_client_mode, wchar_t uuid_wstr[AWS_UUID_STR_LEN], HCRYPTPROV *out_crypto_provider, HCRYPTKEY *out_private_key_handle) { /* out-params will adopt these resources if the function is successful. * if function fails these resources will be cleaned up before returning */ HCRYPTPROV crypto_prov = 0; HCRYPTKEY h_key = 0; if (is_client_mode) { /* use CRYPT_VERIFYCONTEXT so that keys are ephemeral (not stored to disk, registry, etc) */ if (!CryptAcquireContextW(&crypto_prov, NULL, NULL, PROV_RSA_FULL, CRYPT_VERIFYCONTEXT)) { AWS_LOGF_ERROR( AWS_LS_IO_PKI, "static: error creating a new rsa crypto context for key with errno %d", (int)GetLastError()); aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); goto on_error; } if (!CryptImportKey(crypto_prov, key, decoded_len, 0, 0, &h_key)) { AWS_LOGF_ERROR( AWS_LS_IO_PKI, "static: failed to import rsa key into crypto provider, error code %d", GetLastError()); aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); goto on_error; } if (!CertSetCertificateContextProperty(certs, CERT_KEY_PROV_HANDLE_PROP_ID, 0, (void *)crypto_prov)) { AWS_LOGF_ERROR( AWS_LS_IO_PKI, "static: error creating a new certificate context for rsa key with errno %d", (int)GetLastError()); aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); goto on_error; } } else { if (!CryptAcquireContextW(&crypto_prov, uuid_wstr, NULL, PROV_RSA_FULL, CRYPT_NEWKEYSET)) { AWS_LOGF_ERROR( AWS_LS_IO_PKI, "static: error creating a new rsa crypto context with errno %d", (int)GetLastError()); aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); goto on_error; } if (!CryptImportKey(crypto_prov, key, decoded_len, 0, 0, &h_key)) { AWS_LOGF_ERROR( AWS_LS_IO_PKI, "static: failed to import rsa key into crypto provider, error code %d", GetLastError()); aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); goto on_error; } CRYPT_KEY_PROV_INFO key_prov_info; AWS_ZERO_STRUCT(key_prov_info); key_prov_info.pwszContainerName = uuid_wstr; key_prov_info.dwProvType = PROV_RSA_FULL; key_prov_info.dwKeySpec = AT_KEYEXCHANGE; if (!CertSetCertificateContextProperty(certs, CERT_KEY_PROV_INFO_PROP_ID, 0, &key_prov_info)) { AWS_LOGF_ERROR( AWS_LS_IO_PKI, "static: error creating a new certificate context for key with errno %d", (int)GetLastError()); aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); goto on_error; } } *out_crypto_provider = crypto_prov; *out_private_key_handle = h_key; return AWS_OP_SUCCESS; on_error: if (h_key != 0) { CryptDestroyKey(h_key); } if (crypto_prov != 0) { CryptReleaseContext(crypto_prov, 0); } return AWS_OP_ERR; } #define ECC_256_MAGIC_NUMBER 0x20 #define ECC_384_MAGIC_NUMBER 0x30 static ULONG s_compute_ecc_key_type_from_private_key_size(size_t private_key_len) { switch (private_key_len) { case ECC_256_MAGIC_NUMBER: return BCRYPT_ECDSA_PRIVATE_P256_MAGIC; case ECC_384_MAGIC_NUMBER: return BCRYPT_ECDSA_PRIVATE_P384_MAGIC; default: return BCRYPT_ECDSA_PRIVATE_P521_MAGIC; } } #ifndef AWS_SUPPORT_WIN7 enum aws_ecc_public_key_compression_type { AWS_EPKCT_COMPRESSED_EVEN = 0x02, AWS_EPKCT_COMPRESSED_ODD = 0x03, AWS_EPKCT_UNCOMPRESSED = 0x04, }; /* TODO ALSO NEEDS TO BE EPHEMERAL */ static int s_cert_context_import_ecc_private_key( PCCERT_CONTEXT cert_context, struct aws_allocator *allocator, const BYTE *key, DWORD decoded_len, wchar_t uuid_wstr[AWS_UUID_STR_LEN]) { (void)decoded_len; AWS_FATAL_ASSERT(cert_context != NULL); NCRYPT_PROV_HANDLE crypto_prov = 0; NCRYPT_KEY_HANDLE h_key = 0; BCRYPT_ECCKEY_BLOB *key_blob = NULL; int result = AWS_OP_ERR; SECURITY_STATUS status; CRYPT_BIT_BLOB *public_key_blob = &cert_context->pCertInfo->SubjectPublicKeyInfo.PublicKey; DWORD public_key_blob_length = public_key_blob->cbData; if (public_key_blob_length == 0) { AWS_LOGF_ERROR(AWS_LS_IO_PKI, "static: invalid zero-length ecc key data"); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); goto done; } /* * Per rfc5480#section-2.2, the public key section of the encoding consists of a single byte that tells whether or * not the public key is compressed, followed by the raw key data itself. Windows doesn't seem to support importing * compressed keys directly, so for now check and fail if it's a compressed key. * * Given that we're pulling the data from a windows internal structure generated by CryptQueryObject, it is * not known whether it's even possible to see a compressed tag here or if Windows automatically uncompresses a * compressed key for you. The win32 documentation is quite unhelpful here. * * We could test this by generating a certificate that contains a compressed public key and feeding it in. * I cannot find a way to do it that doesn't involve raw hex editing a sub object in the DER encoding of the * certificate. So figuring out the final expectation here is a TODO. */ if (*public_key_blob->pbData != AWS_EPKCT_UNCOMPRESSED) { AWS_LOGF_ERROR(AWS_LS_IO_PKI, "static: compressed ecc public keys not yet supported."); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); goto done; } /* * Now we want everything but the first byte, so dec the length and bump the pointer. I was more comfortable doing * it the manual way rather than with cursors because using cursors would force us to do multiple narrowing casts * back when configuring win32 data. */ public_key_blob_length--; struct aws_byte_cursor public_blob_cursor = { .ptr = public_key_blob->pbData + 1, .len = public_key_blob_length, }; CRYPT_ECC_PRIVATE_KEY_INFO *private_key_info = (CRYPT_ECC_PRIVATE_KEY_INFO *)key; ULONG private_key_length = private_key_info->PrivateKey.cbData; struct aws_byte_cursor private_key_cursor = { .ptr = private_key_info->PrivateKey.pbData, .len = private_key_length, }; DWORD key_blob_size = sizeof(BCRYPT_ECCKEY_BLOB) + public_key_blob_length + private_key_length; key_blob = (BCRYPT_ECCKEY_BLOB *)aws_mem_calloc(allocator, 1, key_blob_size); if (key_blob == NULL) { AWS_LOGF_ERROR(AWS_LS_IO_PKI, "static: could not allocate ecc key blob memory"); goto done; } key_blob->dwMagic = s_compute_ecc_key_type_from_private_key_size(private_key_cursor.len); key_blob->cbKey = private_key_length; struct aws_byte_buf key_blob_buffer = { .buffer = (uint8_t *)key_blob, .len = sizeof(BCRYPT_ECCKEY_BLOB), .capacity = key_blob_size, }; if (aws_byte_buf_append(&key_blob_buffer, &public_blob_cursor)) { AWS_LOGF_ERROR(AWS_LS_IO_PKI, "static: insufficient space to build ecc key blob"); goto done; } if (aws_byte_buf_append(&key_blob_buffer, &private_key_cursor)) { AWS_LOGF_ERROR(AWS_LS_IO_PKI, "static: insufficient space to build ecc key blob"); goto done; } status = NCryptOpenStorageProvider(&crypto_prov, MS_KEY_STORAGE_PROVIDER, 0); if (status != ERROR_SUCCESS) { AWS_LOGF_ERROR( AWS_LS_IO_PKI, "static: could not open ncrypt key storage provider, error %d", (int)GetLastError()); aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); goto done; } NCryptBuffer ncBuf = {AWS_UUID_STR_LEN * sizeof(wchar_t), NCRYPTBUFFER_PKCS_KEY_NAME, uuid_wstr}; NCryptBufferDesc ncBufDesc; ncBufDesc.ulVersion = 0; ncBufDesc.cBuffers = 1; ncBufDesc.pBuffers = &ncBuf; status = NCryptImportKey( crypto_prov, 0, BCRYPT_ECCPRIVATE_BLOB, &ncBufDesc, &h_key, (BYTE *)key_blob, key_blob_size, NCRYPT_OVERWRITE_KEY_FLAG); if (status != ERROR_SUCCESS) { AWS_LOGF_ERROR( AWS_LS_IO_PKI, "static: failed to import ecc key with status %d, last error %d", status, (int)GetLastError()); aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); goto done; } CRYPT_KEY_PROV_INFO key_prov_info = {uuid_wstr, MS_KEY_STORAGE_PROVIDER, 0, 0, 0, NULL, 0}; if (!CertSetCertificateContextProperty(cert_context, CERT_KEY_PROV_INFO_PROP_ID, 0, &key_prov_info)) { AWS_LOGF_ERROR( AWS_LS_IO_PKI, "static: failed to set cert context key provider, with last error %d", (int)GetLastError()); aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); goto done; } result = AWS_OP_SUCCESS; done: if (h_key != 0) { NCryptFreeObject(h_key); } if (crypto_prov != 0) { NCryptFreeObject(crypto_prov); } if (key_blob != NULL) { aws_mem_release(allocator, key_blob); } return result; } #endif /* AWS_SUPPORT_WIN7 */ enum aws_certificate_type { AWS_CT_X509_UNKNOWN, AWS_CT_X509_RSA, AWS_CT_X509_ECC, }; int aws_import_key_pair_to_cert_context( struct aws_allocator *alloc, const struct aws_byte_cursor *public_cert_chain, const struct aws_byte_cursor *private_key, bool is_client_mode, HCERTSTORE *store, PCCERT_CONTEXT *certs, HCRYPTPROV *crypto_provider, HCRYPTKEY *private_key_handle) { struct aws_array_list certificates, private_keys; AWS_ZERO_STRUCT(certificates); AWS_ZERO_STRUCT(private_keys); *certs = NULL; *store = NULL; *crypto_provider = 0; *private_key_handle = 0; int result = AWS_OP_ERR; BYTE *key = NULL; if (aws_pem_objects_init_from_file_contents(&certificates, alloc, *public_cert_chain)) { AWS_LOGF_ERROR( AWS_LS_IO_PKI, "static: failed to decode cert pem to buffer list with error %d", (int)aws_last_error()); goto clean_up; } if (aws_pem_objects_init_from_file_contents(&private_keys, alloc, *private_key)) { AWS_LOGF_ERROR( AWS_LS_IO_PKI, "static: failed to decode key pem to buffer list with error %d", (int)aws_last_error()); goto clean_up; } size_t cert_count = aws_array_list_length(&certificates); AWS_LOGF_INFO(AWS_LS_IO_PKI, "static: loading certificate chain with %d certificates.", (int)cert_count); *store = CertOpenStore(CERT_STORE_PROV_MEMORY, 0, (ULONG_PTR)NULL, CERT_STORE_CREATE_NEW_FLAG, NULL); if (!*store) { AWS_LOGF_ERROR( AWS_LS_IO_PKI, "static: failed to load in-memory/ephemeral certificate store, error code %d", GetLastError()); aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); goto clean_up; } for (size_t i = 0; i < cert_count; ++i) { struct aws_pem_object *pem_object_ptr = NULL; aws_array_list_get_at_ptr(&certificates, (void **)&pem_object_ptr, i); CERT_BLOB cert_blob; cert_blob.pbData = pem_object_ptr->data.buffer; cert_blob.cbData = (DWORD)pem_object_ptr->data.len; DWORD content_type = 0; PCERT_CONTEXT cert_context = NULL; BOOL query_res = CryptQueryObject( CERT_QUERY_OBJECT_BLOB, &cert_blob, CERT_QUERY_CONTENT_FLAG_CERT, CERT_QUERY_FORMAT_FLAG_ALL, 0, NULL, &content_type, NULL, NULL, NULL, (const void **)&cert_context); if (!query_res || cert_context == NULL) { AWS_LOGF_ERROR(AWS_LS_IO_PKI, "static: invalid certificate blob, error code %d.", GetLastError()); aws_raise_error(AWS_IO_FILE_VALIDATION_FAILURE); goto clean_up; } BOOL add_result = CertAddCertificateContextToStore(*store, cert_context, CERT_STORE_ADD_ALWAYS, NULL); if (!add_result) { AWS_LOGF_ERROR(AWS_LS_IO_PKI, "static: unable to add , error code %d.", GetLastError()); aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); } if (i != 0 || !add_result) { CertFreeCertificateContext(cert_context); } else { *certs = cert_context; } if (!add_result) { goto clean_up; } } if (*certs == NULL) { aws_raise_error(AWS_IO_FILE_VALIDATION_FAILURE); AWS_LOGF_ERROR(AWS_LS_IO_PKI, "static: no certificates found, error %s", aws_error_name(aws_last_error())); goto clean_up; } struct aws_pem_object *private_key_ptr = NULL; DWORD decoded_len = 0; enum aws_certificate_type cert_type = AWS_CT_X509_UNKNOWN; size_t private_key_count = aws_array_list_length(&private_keys); for (size_t i = 0; i < private_key_count; ++i) { aws_array_list_get_at_ptr(&private_keys, (void **)&private_key_ptr, i); if (CryptDecodeObjectEx( X509_ASN_ENCODING | PKCS_7_ASN_ENCODING, PKCS_RSA_PRIVATE_KEY, private_key_ptr->data.buffer, (DWORD)private_key_ptr->data.len, CRYPT_DECODE_ALLOC_FLAG, 0, &key, &decoded_len)) { cert_type = AWS_CT_X509_RSA; } #ifndef AWS_SUPPORT_WIN7 else if (CryptDecodeObjectEx( X509_ASN_ENCODING | PKCS_7_ASN_ENCODING, X509_ECC_PRIVATE_KEY, private_key_ptr->data.buffer, (DWORD)private_key_ptr->data.len, CRYPT_DECODE_ALLOC_FLAG, NULL, &key, &decoded_len)) { cert_type = AWS_CT_X509_ECC; } #endif /* AWS_SUPPORT_WIN7 */ if (cert_type != AWS_CT_X509_UNKNOWN) { break; } } if (cert_type == AWS_CT_X509_UNKNOWN) { aws_raise_error(AWS_IO_FILE_VALIDATION_FAILURE); AWS_LOGF_ERROR( AWS_LS_IO_PKI, "static: no acceptable private key found, error %s", aws_error_name(aws_last_error())); goto clean_up; } struct aws_uuid uuid; if (aws_uuid_init(&uuid)) { AWS_LOGF_ERROR(AWS_LS_IO_PKI, "static: failed to create a uuid."); goto clean_up; } char uuid_str[AWS_UUID_STR_LEN] = {0}; struct aws_byte_buf uuid_buf = aws_byte_buf_from_array(uuid_str, sizeof(uuid_str)); uuid_buf.len = 0; aws_uuid_to_str(&uuid, &uuid_buf); wchar_t uuid_wstr[AWS_UUID_STR_LEN] = {0}; size_t converted_chars = 0; mbstowcs_s(&converted_chars, uuid_wstr, AWS_UUID_STR_LEN, uuid_str, sizeof(uuid_str)); (void)converted_chars; switch (cert_type) { case AWS_CT_X509_RSA: result = s_cert_context_import_rsa_private_key( *certs, key, decoded_len, is_client_mode, uuid_wstr, crypto_provider, private_key_handle); break; #ifndef AWS_SUPPORT_WIN7 case AWS_CT_X509_ECC: result = s_cert_context_import_ecc_private_key(*certs, alloc, key, decoded_len, uuid_wstr); break; #endif /* AWS_SUPPORT_WIN7 */ default: AWS_LOGF_ERROR(AWS_LS_IO_PKI, "static: failed to decode private key"); aws_raise_error(AWS_IO_FILE_VALIDATION_FAILURE); goto clean_up; } clean_up: aws_pem_objects_clean_up(&certificates); aws_pem_objects_clean_up(&private_keys); LocalFree(key); if (result == AWS_OP_ERR) { if (*store != NULL) { aws_close_cert_store(*store); *store = NULL; } if (*certs) { CertFreeCertificateContext(*certs); *certs = NULL; } if (*crypto_provider != 0) { CryptReleaseContext(*crypto_provider, 0); *crypto_provider = 0; } if (*private_key_handle != 0) { CryptDestroyKey(*private_key_handle); *private_key_handle = 0; } } return result; } aws-crt-python-0.20.4+dfsg/crt/aws-c-io/source/windows/winsock_init.c000066400000000000000000000056401456575232400255020ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ /* clang is just a naive little idealist and doesn't understand that it can't just go around re-ordering windows header files. Also, sorry about the C++ style comments below, clang-format doesn't work (at least on my version) with the c-style comments. */ // clang-format off #include #include #include // clang-format on #include #include #include static LPFN_CONNECTEX s_connect_ex_fn = NULL; static LPFN_ACCEPTEX s_accept_ex_fn = NULL; static bool s_winsock_init = false; void aws_check_and_init_winsock(void) { if (!s_winsock_init) { AWS_LOGF_INFO(AWS_LS_IO_SOCKET, "static: initializing WinSock"); WORD requested_version = MAKEWORD(2, 2); WSADATA wsa_data; if (WSAStartup(requested_version, &wsa_data)) { AWS_LOGF_FATAL( AWS_LS_IO_SOCKET, "static: WinSock initialization failed with error %d", (int)GetLastError()); AWS_ASSERT(0); exit(-1); } SOCKET dummy_socket = socket(AF_INET, SOCK_STREAM, 0); AWS_ASSERT(dummy_socket != INVALID_SOCKET); AWS_LOGF_INFO(AWS_LS_IO_SOCKET, "static: loading WSAID_CONNECTEX function"); GUID connect_ex_guid = WSAID_CONNECTEX; DWORD bytes_written = 0; int rc = WSAIoctl( dummy_socket, SIO_GET_EXTENSION_FUNCTION_POINTER, &connect_ex_guid, sizeof(connect_ex_guid), &s_connect_ex_fn, sizeof(s_connect_ex_fn), &bytes_written, NULL, NULL); if (rc) { AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "static: failed to load WSAID_CONNECTEX function with error %d", (int)GetLastError()); AWS_ASSERT(0); exit(-1); } AWS_LOGF_INFO(AWS_LS_IO_SOCKET, "static: loading WSAID_ACCEPTEX function"); GUID accept_ex_guid = WSAID_ACCEPTEX; bytes_written = 0; rc = WSAIoctl( dummy_socket, SIO_GET_EXTENSION_FUNCTION_POINTER, &accept_ex_guid, sizeof(accept_ex_guid), &s_accept_ex_fn, sizeof(s_accept_ex_fn), &bytes_written, NULL, NULL); if (rc) { AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, "static: failed to load WSAID_ACCEPTEX function with error %d", (int)GetLastError()); AWS_ASSERT(0); exit(-1); } closesocket(dummy_socket); s_winsock_init = true; } } aws_ms_fn_ptr aws_winsock_get_connectex_fn(void) { aws_check_and_init_winsock(); return (aws_ms_fn_ptr)s_connect_ex_fn; } aws_ms_fn_ptr aws_winsock_get_acceptex_fn(void) { aws_check_and_init_winsock(); return (aws_ms_fn_ptr)s_accept_ex_fn; } aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/000077500000000000000000000000001456575232400210015ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/CMakeLists.txt000066400000000000000000000363671456575232400235600ustar00rootroot00000000000000include(AwsTestHarness) enable_testing() # See PKCS11.md for instructions on running these tests option(ENABLE_PKCS11_TESTS "Build and run PKCS#11 tests" OFF) file(GLOB TEST_SRC "*.c") file(GLOB TEST_HDRS "*.h") file(GLOB TESTS ${TEST_HDRS} ${TEST_SRC}) # Each pipe test runs in 2 different configurations macro(add_pipe_test_case name) add_test_case("${name}") add_test_case("${name}_2loops") endmacro() add_test_case(io_library_init) add_test_case(io_library_init_cleanup_init_cleanup) add_pipe_test_case(pipe_open_close) add_pipe_test_case(pipe_read_write) add_pipe_test_case(pipe_read_write_large_buffer) add_pipe_test_case(pipe_readable_event_sent_after_write) add_pipe_test_case(pipe_readable_event_sent_once) add_pipe_test_case(pipe_readable_event_sent_on_subscribe_if_data_present) add_pipe_test_case(pipe_readable_event_sent_on_resubscribe_if_data_present) add_pipe_test_case(pipe_readable_event_sent_again_after_all_data_read) add_pipe_test_case(pipe_error_event_sent_after_write_end_closed) add_pipe_test_case(pipe_error_event_sent_on_subscribe_if_write_end_already_closed) add_pipe_test_case(pipe_writes_are_fifo) add_pipe_test_case(pipe_clean_up_cancels_pending_writes) add_test_case(event_loop_xthread_scheduled_tasks_execute) add_test_case(event_loop_canceled_tasks_run_in_el_thread) if(USE_IO_COMPLETION_PORTS) add_test_case(event_loop_completion_events) else() add_test_case(event_loop_subscribe_unsubscribe) add_test_case(event_loop_writable_event_on_subscribe) add_test_case(event_loop_no_readable_event_before_write) add_test_case(event_loop_readable_event_after_write) add_test_case(event_loop_readable_event_on_subscribe_if_data_present) add_test_case(event_loop_readable_event_on_2nd_time_readable) add_test_case(event_loop_no_events_after_unsubscribe) endif() add_test_case(event_loop_stop_then_restart) add_test_case(event_loop_multiple_stops) add_test_case(event_loop_group_setup_and_shutdown) add_test_case(event_loop_group_setup_and_shutdown_async) add_test_case(numa_aware_event_loop_group_setup_and_shutdown) add_test_case(io_testing_channel) add_test_case(local_socket_communication) add_net_test_case(tcp_socket_communication) add_net_test_case(udp_socket_communication) add_test_case(udp_bind_connect_communication) add_net_test_case(connect_timeout) add_net_test_case(connect_timeout_cancelation) if(USE_VSOCK) add_test_case(vsock_loopback_socket_communication) endif() add_test_case(outgoing_local_sock_errors) add_test_case(outgoing_tcp_sock_error) add_test_case(incoming_tcp_sock_errors) add_test_case(incoming_duplicate_tcp_bind_errors) add_net_test_case(bind_on_zero_port_tcp_ipv4) add_net_test_case(bind_on_zero_port_udp_ipv4) add_test_case(incoming_udp_sock_errors) add_test_case(wrong_thread_read_write_fails) add_net_test_case(cleanup_before_connect_or_timeout_doesnt_explode) add_test_case(cleanup_in_accept_doesnt_explode) add_test_case(cleanup_in_write_cb_doesnt_explode) add_test_case(sock_write_cb_is_async) add_test_case(socket_validate_port) if(WIN32) add_test_case(local_socket_pipe_connected_race) endif() add_test_case(channel_setup) add_test_case(channel_single_slot_cleans_up) add_test_case(channel_slots_clean_up) add_test_case(channel_refcount_delays_clean_up) add_test_case(channel_tasks_run) add_test_case(channel_tasks_serialized_run) add_test_case(channel_rejects_post_shutdown_tasks) add_test_case(channel_cancels_pending_tasks) add_test_case(channel_duplicate_shutdown) add_net_test_case(channel_connect_some_hosts_timeout) add_net_test_case(test_default_with_ipv6_lookup) add_test_case(test_resolver_ipv6_address_lookup) add_net_test_case(test_default_with_multiple_lookups) add_test_case(test_resolver_ipv4_address_lookup) add_test_case(test_resolver_purge_host_cache) add_test_case(test_resolver_purge_cache) add_net_test_case(test_default_with_ipv4_only_lookup) add_test_case(test_resolver_ttls) add_test_case(test_resolver_connect_failure_recording) add_test_case(test_resolver_ttl_refreshes_on_resolve) add_test_case(test_resolver_low_frequency_starvation) add_test_case(test_pem_single_cert_parse) add_test_case(test_pem_private_key_parse) add_test_case(test_pem_cert_chain_parse) add_test_case(test_pem_cert_parse_from_file) add_test_case(test_pem_cert_parse_from_file_crlf) add_test_case(test_pem_private_key_parse_from_file) add_test_case(test_pem_cert_chain_comments_and_whitespace) add_test_case(test_pem_invalid_parse) add_test_case(test_pem_valid_data_invalid_parse) add_test_case(test_pem_invalid_in_chain_parse) add_test_case(pem_sanitize_comments_around_pem_object_removed) add_test_case(pem_sanitize_empty_file_rejected) add_test_case(pem_sanitize_wrong_format_rejected) add_test_case(socket_handler_echo_and_backpressure) add_test_case(socket_handler_close) add_test_case(socket_pinned_event_loop) add_net_test_case(socket_pinned_event_loop_dns_failure) if(NOT BYO_CRYPTO) if(USE_S2N) add_net_test_case(default_pki_path_exists) endif() # Badssl-based tests (https://badssl.com/dashboard/) - use remote endpoints for now, later transition to # internal hosting using the bad ssl container/server setup and dns redirects per # https://github.com/chromium/badssl.com # # We don't use the interception suite since that's a host configuration issue # We also don't check the domain security policy suite: # 1. s2n does not support revocation checks and we do not currently enable any revocation checks # in windows or osx, although we may add configurable support to those platforms(off-by-default) at a # later date # 2. s2n does not support public key pinning and, given its deprecated and http-centric position, there are no # plans to add support nor investigate osx/windows support as well. # Badssl - Certificate Validation endpoint suite # For each failure case, we also include a positive test that verifies success when peer verification is disabled add_net_test_case(tls_client_channel_negotiation_error_expired) add_net_test_case(tls_client_channel_negotiation_error_wrong_host) add_net_test_case(tls_client_channel_negotiation_error_wrong_host_with_ca_override) add_net_test_case(tls_client_channel_negotiation_error_self_signed) add_net_test_case(tls_client_channel_negotiation_error_untrusted_root) add_net_test_case(tls_client_channel_negotiation_error_untrusted_root_due_to_ca_override) add_net_test_case(tls_client_channel_negotiation_no_verify_expired) add_net_test_case(tls_client_channel_negotiation_no_verify_wrong_host) add_net_test_case(tls_client_channel_negotiation_no_verify_self_signed) add_net_test_case(tls_client_channel_negotiation_no_verify_untrusted_root) # Badssl - Broken Crypto endpoint suite # We don't include dh1024 as it succeeds on the windows baseline configuration and there does not seem # to be a way to disable it add_net_test_case(tls_client_channel_negotiation_error_broken_crypto_rc4) add_net_test_case(tls_client_channel_negotiation_error_broken_crypto_rc4_md5) add_net_test_case(tls_client_channel_negotiation_error_broken_crypto_dh480) add_net_test_case(tls_client_channel_negotiation_error_broken_crypto_dh512) add_net_test_case(tls_client_channel_negotiation_error_broken_crypto_null) # Badssl - Legacy crypto suite, includes both negative and positive tests, with override checks where appropriate # Our current baseline/default is platform-specific, whereas badssl expects a baseline of 1.2 # Linux - tls1.1 # Windows - system default (1.0 is the only thing we could reasonable fixate to given win7 support) # Mac - system default # We skip the cbc and 3des checks, as a positive connection result there does not yet represent a security risk # We don't include dh2048 as it succeeds on the windows baseline configuration and there does not seem # to be a way to disable it if(NOT(WIN32 AND NOT CMAKE_SYSTEM_VERSION MATCHES "10\.0\.1.*")) # Skip TLS 1.0 and TLS 1.1 test for windows later than windows server 2022, as they droped old TLS add_net_test_case(tls_client_channel_negotiation_error_legacy_crypto_tls10) add_net_test_case(tls_client_channel_negotiation_override_legacy_crypto_tls10) add_net_test_case(tls_client_channel_negotiation_error_override_legacy_crypto_tls11) add_net_test_case(tls_client_channel_negotiation_success_legacy_crypto_tls11) endif() # Badssl - Secure uncommon suite # We skip 10000san for now as its unclear the point or relevance especially with respect to the OS-based # TLS implementations # We skip 1000san, sha384 and sha512 because the public badssl certificate is expired and we haven't migrated to # internal hosting yet # We also defer the incomplete chain test for now until we can do some further study on how to get it to # properly fail on windows and osx. # add_net_test_case(tls_client_channel_negotiation_success_sha384) # add_net_test_case(tls_client_channel_negotiation_success_sha512) add_net_test_case(tls_client_channel_negotiation_success_rsa8192) add_net_test_case(tls_client_channel_negotiation_error_no_subject) add_net_test_case(tls_client_channel_negotiation_success_no_verify_no_subject) add_net_test_case(tls_client_channel_negotiation_error_no_common_name) add_net_test_case(tls_client_channel_negotiation_success_no_verify_no_common_name) add_net_test_case(tls_client_channel_negotiation_success_no_verify_incomplete_chain) # Badssl - Secure common suite, all of these should succeed add_net_test_case(tls_client_channel_negotiation_success_tls12) add_net_test_case(tls_client_channel_negotiation_success_sha256) add_net_test_case(tls_client_channel_negotiation_success_rsa2048) add_net_test_case(tls_client_channel_negotiation_success_ecc256) add_net_test_case(tls_client_channel_negotiation_success_ecc384) # add_net_test_case(tls_client_channel_negotiation_success_extended_validation) test disabled until badssl updates cert (expired 2022.08.10) add_net_test_case(tls_client_channel_negotiation_success_mozilla_modern) # Misc non-badssl tls tests add_net_test_case(test_concurrent_cert_import) add_net_test_case(test_duplicate_cert_import) add_test_case(tls_channel_echo_and_backpressure_test) add_net_test_case(tls_client_channel_negotiation_error_socket_closed) add_net_test_case(tls_client_channel_negotiation_success) add_net_test_case(tls_server_multiple_connections) add_net_test_case(tls_server_hangup_during_negotiation) add_net_test_case(tls_client_channel_no_verify) add_net_test_case(test_tls_negotiation_timeout) add_net_test_case(alpn_successfully_negotiates) add_net_test_case(alpn_no_protocol_message) add_net_test_case(test_ecc_cert_import) add_test_case(alpn_error_creating_handler) add_test_case(tls_destroy_null_context) add_test_case(tls_channel_statistics_test) add_test_case(tls_certificate_chain_test) else() add_test_case(byo_tls_handler_test) endif() add_test_case(future_by_value) add_test_case(future_void) add_test_case(future_callback_fires_immediately) add_test_case(future_callback_fires_on_another_thread) add_test_case(future_register_callback_if_not_done) add_test_case(future_register_event_loop_callback_after_done) add_test_case(future_register_event_loop_callback_before_done) add_test_case(future_register_event_loop_callback_always_scheduled) add_test_case(future_register_channel_callback) add_test_case(future_wait_timeout) add_test_case(future_pointer_with_destroy) add_test_case(future_pointer_with_release) add_test_case(future_get_result_by_move) add_test_case(future_can_die_incomplete) add_test_case(future_by_pointer_accepts_null_result) add_test_case(future_set_multiple_times) add_test_case(future_set_error) add_test_case(test_input_stream_memory_simple) add_test_case(test_input_stream_memory_iterate) add_test_case(test_input_stream_memory_seek_beginning) add_test_case(test_input_stream_memory_seek_end) add_test_case(test_input_stream_memory_seek_multiple_times) add_test_case(test_input_stream_memory_seek_past_end) add_test_case(test_input_stream_memory_seek_before_start) add_test_case(test_input_stream_file_simple) add_test_case(test_input_stream_file_iterate) add_test_case(test_input_stream_file_seek_beginning) add_test_case(test_input_stream_file_seek_end) add_test_case(test_input_stream_memory_length) add_test_case(test_input_stream_file_length) add_test_case(test_input_stream_binary) add_test_case(test_input_stream_read_only) add_test_case(async_input_stream_fill_completes_on_thread) add_test_case(async_input_stream_fill_completes_immediately) add_test_case(async_input_stream_fill_completes_randomly) add_test_case(async_input_stream_fill_eof_requires_extra_read) add_test_case(async_input_stream_fill_reports_error) add_test_case(open_channel_statistics_test) add_test_case(shared_library_open_failure) if(BUILD_SHARED_LIBS) add_test_case(shared_library_open_success) add_test_case(shared_library_find_function_failure) add_test_case(shared_library_find_function_success) endif() add_test_case(test_exponential_backoff_retry_too_many_retries_no_jitter) add_test_case(test_exponential_backoff_retry_too_many_retries_full_jitter) add_test_case(test_exponential_backoff_retry_too_many_retries_decorrelated_jitter) add_test_case(test_exponential_backoff_retry_too_many_retries_default_jitter) add_test_case(test_exponential_backoff_retry_client_errors_do_not_count) add_test_case(test_exponential_backoff_retry_no_jitter_time_taken) add_test_case(test_exponential_max_backoff_retry_no_jitter) add_test_case(test_exponential_backoff_retry_invalid_options) add_test_case(test_standard_retry_strategy_setup_shutdown) add_test_case(test_standard_retry_strategy_failure_exhausts_bucket) add_test_case(test_standard_retry_strategy_failure_recovers) # See PKCS11.md for instructions on running these tests if(ENABLE_PKCS11_TESTS) add_test_case(pkcs11_lib_sanity_check) add_test_case(pkcs11_lib_behavior_default) add_test_case(pkcs11_lib_behavior_omit_initialize) add_test_case(pkcs11_lib_behavior_strict_initialize_finalize) add_test_case(pkcs11_find_private_key) add_test_case(pkcs11_find_private_key_for_different_rsa_types) add_test_case(pkcs11_find_private_key_for_ec) add_test_case(pkcs11_find_multiple_private_key) add_test_case(pkcs11_sign_rsa_sha1) add_test_case(pkcs11_sign_rsa_sha224) add_test_case(pkcs11_sign_rsa_sha256) add_test_case(pkcs11_sign_rsa_sha384) add_test_case(pkcs11_sign_rsa_sha512) add_test_case(pkcs11_asn1_bigint) add_test_case(pkcs11_sign_ec_256) add_test_case(pkcs11_rsa_decrypt) add_test_case(pkcs11_find_slot) add_test_case(pkcs11_find_slot_many_tokens) add_test_case(pkcs11_session_tests) add_test_case(pkcs11_login_tests) # TLS with PKCS#11 not currently supported on every platform if(USE_S2N) add_test_case(pkcs11_tls_rsa_negotiation_succeeds) add_test_case(pkcs11_tls_ec_negotiation_succeeds) endif() endif() set(TEST_BINARY_NAME ${PROJECT_NAME}-tests) generate_test_driver(${TEST_BINARY_NAME}) if(USE_S2N) target_compile_definitions(${PROJECT_NAME}-tests PRIVATE "-DUSE_S2N") endif() # SSL certificates to use for testing. add_custom_command(TARGET ${TEST_BINARY_NAME} PRE_BUILD COMMAND ${CMAKE_COMMAND} -E copy_directory ${CMAKE_CURRENT_SOURCE_DIR}/resources $) aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/alpn_handler_test.c000066400000000000000000000311611456575232400246350ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include struct alpn_channel_setup_test_args { struct aws_condition_variable condition_variable; struct aws_mutex mutex; int error_code; bool shutdown_finished; bool setup_completed; }; static void s_alpn_channel_setup_test_on_setup_completed(struct aws_channel *channel, int error_code, void *ctx) { (void)channel; struct alpn_channel_setup_test_args *setup_test_args = (struct alpn_channel_setup_test_args *)ctx; aws_mutex_lock(&setup_test_args->mutex); setup_test_args->setup_completed = true; setup_test_args->error_code |= error_code; aws_mutex_unlock(&setup_test_args->mutex); aws_condition_variable_notify_one(&setup_test_args->condition_variable); } static bool s_alpn_test_setup_completed_predicate(void *arg) { struct alpn_channel_setup_test_args *setup_test_args = (struct alpn_channel_setup_test_args *)arg; return setup_test_args->setup_completed; } struct alpn_test_on_negotiation_args { struct aws_allocator *allocator; struct aws_channel_slot *new_slot; struct aws_channel_handler *new_handler; struct aws_byte_buf protocol; }; static int s_alpn_test_shutdown( struct aws_channel_handler *handler, struct aws_channel_slot *slot, enum aws_channel_direction dir, int error_code, bool abort_immediately) { (void)handler; return aws_channel_slot_on_handler_shutdown_complete(slot, dir, error_code, abort_immediately); } static size_t s_alpn_test_message_overhead(struct aws_channel_handler *handler) { (void)handler; return 0; } static size_t s_alpn_test_initial_window_size(struct aws_channel_handler *handler) { (void)handler; return SIZE_MAX; } static void s_alpn_test_destroy(struct aws_channel_handler *handler) { aws_mem_release(handler->alloc, (void *)handler); } struct aws_channel_handler_vtable s_alpn_test_vtable = { .destroy = s_alpn_test_destroy, .shutdown = s_alpn_test_shutdown, .initial_window_size = s_alpn_test_initial_window_size, .message_overhead = s_alpn_test_message_overhead, }; static struct aws_channel_handler *s_alpn_tls_successful_negotiation( struct aws_channel_slot *new_slot, struct aws_byte_buf *protocol, void *ctx) { struct alpn_test_on_negotiation_args *negotiation_args = (struct alpn_test_on_negotiation_args *)ctx; struct aws_channel_handler *handler = aws_mem_calloc(negotiation_args->allocator, 1, sizeof(struct aws_channel_handler)); negotiation_args->new_handler = handler; negotiation_args->protocol = *protocol; negotiation_args->new_slot = new_slot; handler->vtable = &s_alpn_test_vtable; handler->alloc = negotiation_args->allocator; return handler; } static bool s_alpn_test_shutdown_predicate(void *arg) { struct alpn_channel_setup_test_args *test_args = (struct alpn_channel_setup_test_args *)arg; return test_args->shutdown_finished; } static void s_on_server_channel_on_shutdown(struct aws_channel *channel, int error_code, void *user_data) { (void)channel; (void)error_code; struct alpn_channel_setup_test_args *test_args = (struct alpn_channel_setup_test_args *)user_data; aws_mutex_lock(&test_args->mutex); test_args->shutdown_finished = true; aws_mutex_unlock(&test_args->mutex); aws_condition_variable_notify_one(&test_args->condition_variable); } static int s_test_alpn_successfully_negotiates(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_event_loop *event_loop = aws_event_loop_new_default(allocator, aws_high_res_clock_get_ticks); ASSERT_NOT_NULL(event_loop, "Event loop creation failed with error: %s", aws_error_debug_str(aws_last_error())); ASSERT_SUCCESS(aws_event_loop_run(event_loop)); struct aws_channel *channel; struct alpn_channel_setup_test_args test_args = { .error_code = 0, .condition_variable = AWS_CONDITION_VARIABLE_INIT, .mutex = AWS_MUTEX_INIT, .setup_completed = false, .shutdown_finished = false, }; struct aws_channel_options args = { .on_setup_completed = s_alpn_channel_setup_test_on_setup_completed, .setup_user_data = &test_args, .on_shutdown_completed = s_on_server_channel_on_shutdown, .shutdown_user_data = &test_args, .event_loop = event_loop, }; channel = aws_channel_new(allocator, &args); ASSERT_NOT_NULL(channel); ASSERT_SUCCESS(aws_mutex_lock(&test_args.mutex)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &test_args.condition_variable, &test_args.mutex, s_alpn_test_setup_completed_predicate, &test_args)); ASSERT_SUCCESS(aws_mutex_unlock(&test_args.mutex)); struct aws_channel_slot *slot = aws_channel_slot_new(channel); ASSERT_NOT_NULL(slot); struct alpn_test_on_negotiation_args on_negotiation_args = { .new_slot = NULL, .protocol = {0}, .new_handler = NULL, .allocator = allocator}; struct aws_channel_handler *handler = aws_tls_alpn_handler_new(allocator, s_alpn_tls_successful_negotiation, &on_negotiation_args); ASSERT_NOT_NULL(handler); ASSERT_SUCCESS(aws_channel_slot_set_handler(slot, handler)); struct aws_tls_negotiated_protocol_message protocol_message = {.protocol = aws_byte_buf_from_c_str("h2")}; struct aws_io_message message = { .allocator = NULL, .user_data = NULL, .message_tag = AWS_TLS_NEGOTIATED_PROTOCOL_MESSAGE, .message_data = aws_byte_buf_from_array( (const uint8_t *)&protocol_message, sizeof(struct aws_tls_negotiated_protocol_message)), .copy_mark = 0, .on_completion = NULL, .message_type = AWS_IO_MESSAGE_APPLICATION_DATA, }; ASSERT_SUCCESS(aws_channel_handler_process_read_message(handler, slot, &message)); ASSERT_BIN_ARRAYS_EQUALS( protocol_message.protocol.buffer, protocol_message.protocol.len, on_negotiation_args.protocol.buffer, on_negotiation_args.protocol.len); aws_channel_shutdown(channel, AWS_OP_SUCCESS); ASSERT_SUCCESS(aws_mutex_lock(&test_args.mutex)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &test_args.condition_variable, &test_args.mutex, s_alpn_test_shutdown_predicate, &test_args)); ASSERT_SUCCESS(aws_mutex_unlock(&test_args.mutex)); aws_channel_destroy(channel); aws_event_loop_destroy(event_loop); return AWS_OP_SUCCESS; } AWS_TEST_CASE(alpn_successfully_negotiates, s_test_alpn_successfully_negotiates) static int s_test_alpn_no_protocol_message(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_event_loop *event_loop = aws_event_loop_new_default(allocator, aws_high_res_clock_get_ticks); ASSERT_NOT_NULL(event_loop, "Event loop creation failed with error: %s", aws_error_debug_str(aws_last_error())); ASSERT_SUCCESS(aws_event_loop_run(event_loop)); struct aws_channel *channel; struct alpn_channel_setup_test_args test_args = { .error_code = 0, .condition_variable = AWS_CONDITION_VARIABLE_INIT, .mutex = AWS_MUTEX_INIT, .shutdown_finished = false, }; struct aws_channel_options args = { .on_setup_completed = s_alpn_channel_setup_test_on_setup_completed, .setup_user_data = &test_args, .on_shutdown_completed = s_on_server_channel_on_shutdown, .shutdown_user_data = &test_args, .event_loop = event_loop, }; channel = aws_channel_new(allocator, &args); ASSERT_NOT_NULL(channel); ASSERT_SUCCESS(aws_mutex_lock(&test_args.mutex)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &test_args.condition_variable, &test_args.mutex, s_alpn_test_setup_completed_predicate, &test_args)); ASSERT_SUCCESS(aws_mutex_unlock(&test_args.mutex)); struct aws_channel_slot *slot = aws_channel_slot_new(channel); ASSERT_NOT_NULL(slot); struct alpn_test_on_negotiation_args on_negotiation_args = { .new_slot = NULL, .protocol = {0}, .new_handler = NULL, .allocator = allocator}; struct aws_channel_handler *handler = aws_tls_alpn_handler_new(allocator, s_alpn_tls_successful_negotiation, &on_negotiation_args); ASSERT_NOT_NULL(handler); ASSERT_SUCCESS(aws_channel_slot_set_handler(slot, handler)); /*this is just for the test since it's the only slot in the channel */ handler->vtable->shutdown = s_alpn_test_shutdown; struct aws_io_message message = { .allocator = NULL, .user_data = NULL, .message_tag = 0, .copy_mark = 0, .on_completion = NULL, .message_type = AWS_IO_MESSAGE_APPLICATION_DATA, }; ASSERT_ERROR(AWS_IO_MISSING_ALPN_MESSAGE, aws_channel_handler_process_read_message(handler, slot, &message)); aws_channel_shutdown(channel, AWS_OP_SUCCESS); ASSERT_SUCCESS(aws_mutex_lock(&test_args.mutex)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &test_args.condition_variable, &test_args.mutex, s_alpn_test_shutdown_predicate, &test_args)); ASSERT_SUCCESS(aws_mutex_unlock(&test_args.mutex)); aws_channel_destroy(channel); aws_event_loop_destroy(event_loop); return AWS_OP_SUCCESS; } AWS_TEST_CASE(alpn_no_protocol_message, s_test_alpn_no_protocol_message) static struct aws_channel_handler *s_alpn_tls_failed_negotiation( struct aws_channel_slot *new_slot, struct aws_byte_buf *protocol, void *ctx) { (void)new_slot; (void)protocol; (void)ctx; return NULL; } static int s_test_alpn_error_creating_handler(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_event_loop *event_loop = aws_event_loop_new_default(allocator, aws_high_res_clock_get_ticks); ASSERT_NOT_NULL(event_loop, "Event loop creation failed with error: %s", aws_error_debug_str(aws_last_error())); ASSERT_SUCCESS(aws_event_loop_run(event_loop)); struct aws_channel *channel; struct alpn_channel_setup_test_args test_args = { .error_code = 0, .condition_variable = AWS_CONDITION_VARIABLE_INIT, .mutex = AWS_MUTEX_INIT, .shutdown_finished = false, }; struct aws_channel_options args = { .on_setup_completed = s_alpn_channel_setup_test_on_setup_completed, .setup_user_data = &test_args, .on_shutdown_completed = s_on_server_channel_on_shutdown, .shutdown_user_data = &test_args, .event_loop = event_loop, }; channel = aws_channel_new(allocator, &args); ASSERT_NOT_NULL(channel); ASSERT_SUCCESS(aws_mutex_lock(&test_args.mutex)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &test_args.condition_variable, &test_args.mutex, s_alpn_test_setup_completed_predicate, &test_args)); ASSERT_SUCCESS(aws_mutex_unlock(&test_args.mutex)); struct aws_channel_slot *slot = aws_channel_slot_new(channel); ASSERT_NOT_NULL(slot); struct aws_tls_negotiated_protocol_message protocol_message = {.protocol = aws_byte_buf_from_c_str("h2")}; struct aws_io_message message = { .allocator = NULL, .user_data = NULL, .message_tag = AWS_TLS_NEGOTIATED_PROTOCOL_MESSAGE, .message_data = aws_byte_buf_from_array( (const uint8_t *)&protocol_message, sizeof(struct aws_tls_negotiated_protocol_message)), .copy_mark = 0, .on_completion = NULL, .message_type = AWS_IO_MESSAGE_APPLICATION_DATA, }; struct alpn_test_on_negotiation_args on_negotiation_args = { .new_slot = NULL, .protocol = {0}, .new_handler = NULL, .allocator = allocator}; struct aws_channel_handler *handler = aws_tls_alpn_handler_new(allocator, s_alpn_tls_failed_negotiation, &on_negotiation_args); ASSERT_NOT_NULL(handler); ASSERT_SUCCESS(aws_channel_slot_set_handler(slot, handler)); /*this is just for the test since it's the only slot in the channel */ handler->vtable->shutdown = s_alpn_test_shutdown; ASSERT_ERROR( AWS_IO_UNHANDLED_ALPN_PROTOCOL_MESSAGE, aws_channel_handler_process_read_message(handler, slot, &message)); aws_channel_shutdown(channel, AWS_OP_SUCCESS); ASSERT_SUCCESS(aws_mutex_lock(&test_args.mutex)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &test_args.condition_variable, &test_args.mutex, s_alpn_test_shutdown_predicate, &test_args)); ASSERT_SUCCESS(aws_mutex_unlock(&test_args.mutex)); aws_channel_destroy(channel); aws_event_loop_destroy(event_loop); return AWS_OP_SUCCESS; } AWS_TEST_CASE(alpn_error_creating_handler, s_test_alpn_error_creating_handler) aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/async_stream_test.c000066400000000000000000000154741456575232400247070ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #define ONE_SEC_IN_NS ((uint64_t)AWS_TIMESTAMP_NANOS) #define MAX_TIMEOUT_NS (10 * ONE_SEC_IN_NS) /* Common implementation for async_input_stream_fill_completes_on_XYZ() tests */ static int s_test_async_input_stream_read_to_fill( struct aws_allocator *alloc, struct aws_async_input_stream_tester_options *options) { aws_io_library_init(alloc); options->base.source_bytes = aws_byte_cursor_from_c_str("123456789"); struct aws_async_input_stream *async_stream = aws_async_input_stream_new_tester(alloc, options); /* read into slightly short buffer */ struct aws_byte_buf buf; aws_byte_buf_init(&buf, alloc, 5); struct aws_future_bool *read_future = aws_async_input_stream_read_to_fill(async_stream, &buf); ASSERT_TRUE(aws_future_bool_wait(read_future, MAX_TIMEOUT_NS)); ASSERT_INT_EQUALS(0, aws_future_bool_get_error(read_future)); ASSERT_BIN_ARRAYS_EQUALS("12345", 5, buf.buffer, buf.len); bool eof = aws_future_bool_get_result(read_future); ASSERT_FALSE(eof); aws_future_bool_release(read_future); /* read the rest */ buf.len = 0; read_future = aws_async_input_stream_read_to_fill(async_stream, &buf); ASSERT_TRUE(aws_future_bool_wait(read_future, MAX_TIMEOUT_NS)); ASSERT_INT_EQUALS(0, aws_future_bool_get_error(read_future)); ASSERT_BIN_ARRAYS_EQUALS("6789", 4, buf.buffer, buf.len); eof = aws_future_bool_get_result(read_future); ASSERT_TRUE(eof); aws_future_bool_release(read_future); /* cleanup */ aws_byte_buf_clean_up(&buf); aws_async_input_stream_release(async_stream); aws_io_library_clean_up(); return 0; } /* Test aws_async_input_stream_read_to_fill() * Ensure it works when reads always complete on another thread. */ AWS_TEST_CASE(async_input_stream_fill_completes_on_thread, s_test_async_input_stream_fill_completes_on_thread) static int s_test_async_input_stream_fill_completes_on_thread(struct aws_allocator *alloc, void *ctx) { (void)ctx; struct aws_async_input_stream_tester_options options = { .completion_strategy = AWS_ASYNC_READ_COMPLETES_ON_ANOTHER_THREAD, .base = {.max_bytes_per_read = 1}, }; return s_test_async_input_stream_read_to_fill(alloc, &options); } /* Test aws_async_input_stream_read_to_fill() * Ensure it works when reads always complete immediately */ AWS_TEST_CASE(async_input_stream_fill_completes_immediately, s_test_async_input_stream_fill_completes_immediately) static int s_test_async_input_stream_fill_completes_immediately(struct aws_allocator *alloc, void *ctx) { (void)ctx; struct aws_async_input_stream_tester_options options = { .completion_strategy = AWS_ASYNC_READ_COMPLETES_IMMEDIATELY, .base = {.max_bytes_per_read = 1}, }; return s_test_async_input_stream_read_to_fill(alloc, &options); } /* Test aws_async_input_stream_read_to_fill() * Ensure it works when it's kinda random which thread completes the read */ AWS_TEST_CASE(async_input_stream_fill_completes_randomly, s_test_async_input_stream_fill_completes_randomly) static int s_test_async_input_stream_fill_completes_randomly(struct aws_allocator *alloc, void *ctx) { (void)ctx; struct aws_async_input_stream_tester_options options = { .completion_strategy = AWS_ASYNC_READ_COMPLETES_ON_RANDOM_THREAD, .base = {.max_bytes_per_read = 1}, }; return s_test_async_input_stream_read_to_fill(alloc, &options); } /* Test aws_async_input_stream_read_to_fill() * Ensure that it works when it takes one more read to realize we're at EOF */ AWS_TEST_CASE(async_input_stream_fill_eof_requires_extra_read, s_test_async_input_stream_fill_eof_requires_extra_read) static int s_test_async_input_stream_fill_eof_requires_extra_read(struct aws_allocator *alloc, void *ctx) { (void)ctx; aws_io_library_init(alloc); struct aws_async_input_stream_tester_options options = { .base = { .source_bytes = aws_byte_cursor_from_c_str("123456789"), .eof_requires_extra_read = true, }, }; struct aws_async_input_stream *async_stream = aws_async_input_stream_new_tester(alloc, &options); /* read into buffer of the exact length. we shouldn't realize it's at EOF yet */ struct aws_byte_buf buf; aws_byte_buf_init(&buf, alloc, 9); struct aws_future_bool *read_future = aws_async_input_stream_read_to_fill(async_stream, &buf); ASSERT_TRUE(aws_future_bool_wait(read_future, MAX_TIMEOUT_NS)); ASSERT_INT_EQUALS(0, aws_future_bool_get_error(read_future)); ASSERT_BIN_ARRAYS_EQUALS("123456789", 9, buf.buffer, buf.len); bool eof = aws_future_bool_get_result(read_future); ASSERT_FALSE(eof); aws_future_bool_release(read_future); /* read again, get no data, but learn it's at EOF */ buf.len = 0; read_future = aws_async_input_stream_read_to_fill(async_stream, &buf); ASSERT_TRUE(aws_future_bool_wait(read_future, MAX_TIMEOUT_NS)); ASSERT_INT_EQUALS(0, aws_future_bool_get_error(read_future)); ASSERT_UINT_EQUALS(0, buf.len); eof = aws_future_bool_get_result(read_future); ASSERT_TRUE(eof); aws_future_bool_release(read_future); /* cleanup */ aws_byte_buf_clean_up(&buf); aws_async_input_stream_release(async_stream); aws_io_library_clean_up(); return 0; } /* Test aws_async_input_stream_read_to_fill() * Ensure that it reports errors from an underlying read() call */ AWS_TEST_CASE(async_input_stream_fill_reports_error, s_test_async_input_stream_fill_reports_error) static int s_test_async_input_stream_fill_reports_error(struct aws_allocator *alloc, void *ctx) { (void)ctx; aws_io_library_init(alloc); struct aws_async_input_stream_tester_options options = { .base = { .source_bytes = aws_byte_cursor_from_c_str("123456789"), .max_bytes_per_read = 1, .fail_on_nth_read = 2, .fail_with_error_code = 999, }, }; struct aws_async_input_stream *async_stream = aws_async_input_stream_new_tester(alloc, &options); /* read into buffer */ struct aws_byte_buf buf; aws_byte_buf_init(&buf, alloc, 512); struct aws_future_bool *read_future = aws_async_input_stream_read_to_fill(async_stream, &buf); ASSERT_TRUE(aws_future_bool_wait(read_future, MAX_TIMEOUT_NS)); ASSERT_INT_EQUALS(999, aws_future_bool_get_error(read_future)); aws_future_bool_release(read_future); /* cleanup */ aws_byte_buf_clean_up(&buf); aws_async_input_stream_release(async_stream); aws_io_library_clean_up(); return 0; } aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/byo_crypto_test.c000066400000000000000000000415241456575232400244030ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #ifdef _MSC_VER /* allow this file to be empty */ # pragma warning(disable : 4206) #endif /* MSVC_VER */ /* these tests only get built and run with the BYO_CRYPTO compiler define. */ #ifdef BYO_CRYPTO # include # include # include # include # include # include # include # include # include "statistics_handler_test.h" # include struct byo_crypto_test_args { struct aws_allocator *allocator; struct aws_mutex *mutex; struct aws_condition_variable *condition_variable; struct aws_channel *channel; struct aws_channel_handler *rw_handler; struct aws_channel_slot *rw_slot; struct aws_tls_ctx tls_ctx; struct aws_tls_connection_options tls_options; aws_tls_on_negotiation_result_fn *negotiation_result_fn; void *cb_data; int error_code; bool shutdown_invoked; bool listener_destroyed; bool setup_completed; }; /* common structure for test */ struct byo_crypto_common_tester { struct aws_mutex mutex; struct aws_condition_variable condition_variable; struct aws_event_loop_group *el_group; }; static struct byo_crypto_common_tester c_tester; static int s_byo_crypto_common_tester_init(struct aws_allocator *allocator, struct byo_crypto_common_tester *tester) { AWS_ZERO_STRUCT(*tester); aws_io_library_init(allocator); tester->el_group = aws_event_loop_group_new_default(allocator, 0, NULL); struct aws_mutex mutex = AWS_MUTEX_INIT; struct aws_condition_variable condition_variable = AWS_CONDITION_VARIABLE_INIT; tester->mutex = mutex; tester->condition_variable = condition_variable; return AWS_OP_SUCCESS; } static int s_byo_crypto_common_tester_clean_up(struct byo_crypto_common_tester *tester) { aws_event_loop_group_release(tester->el_group); aws_mutex_clean_up(&tester->mutex); aws_io_library_clean_up(); return AWS_OP_SUCCESS; } /* common structure for a local server */ struct local_server_tester { struct aws_socket_options socket_options; struct aws_socket_endpoint endpoint; struct aws_server_bootstrap *server_bootstrap; struct aws_socket *listener; }; static bool s_channel_setup_predicate(void *user_data) { struct byo_crypto_test_args *setup_test_args = user_data; return setup_test_args->setup_completed; } static bool s_channel_shutdown_predicate(void *user_data) { struct byo_crypto_test_args *setup_test_args = user_data; return setup_test_args->shutdown_invoked; } static bool s_listener_destroy_predicate(void *user_data) { struct byo_crypto_test_args *setup_test_args = user_data; return setup_test_args->listener_destroyed; } static void s_byo_crypto_test_client_setup_callback( struct aws_client_bootstrap *bootstrap, int error_code, struct aws_channel *channel, void *user_data) { (void)bootstrap; (void)error_code; struct byo_crypto_test_args *setup_test_args = user_data; aws_mutex_lock(setup_test_args->mutex); setup_test_args->channel = channel; setup_test_args->setup_completed = true; aws_mutex_unlock(setup_test_args->mutex); aws_condition_variable_notify_one(setup_test_args->condition_variable); } static void s_byo_crypto_test_server_setup_callback( struct aws_server_bootstrap *bootstrap, int error_code, struct aws_channel *channel, void *user_data) { (void)bootstrap; (void)error_code; struct byo_crypto_test_args *setup_test_args = user_data; aws_mutex_lock(setup_test_args->mutex); setup_test_args->channel = channel; setup_test_args->setup_completed = true; aws_mutex_unlock(setup_test_args->mutex); aws_condition_variable_notify_one(setup_test_args->condition_variable); } static void s_byo_crypto_test_client_shutdown_callback( struct aws_client_bootstrap *bootstrap, int error_code, struct aws_channel *channel, void *user_data) { (void)bootstrap; (void)channel; struct byo_crypto_test_args *setup_test_args = user_data; aws_mutex_lock(setup_test_args->mutex); setup_test_args->shutdown_invoked = true; setup_test_args->error_code = error_code; aws_mutex_unlock(setup_test_args->mutex); aws_condition_variable_notify_one(setup_test_args->condition_variable); } static void s_byo_crypto_test_server_shutdown_callback( struct aws_server_bootstrap *bootstrap, int error_code, struct aws_channel *channel, void *user_data) { (void)bootstrap; (void)error_code; (void)channel; struct byo_crypto_test_args *setup_test_args = user_data; aws_mutex_lock(setup_test_args->mutex); setup_test_args->shutdown_invoked = true; setup_test_args->error_code = error_code; aws_mutex_unlock(setup_test_args->mutex); aws_condition_variable_notify_one(setup_test_args->condition_variable); } struct byo_crypto_test_rw_args { struct aws_mutex *mutex; struct aws_condition_variable *condition_variable; struct aws_byte_buf received_message; struct byo_crypto_test_args *test_args; bool invocation_happened; bool shutdown_finished; }; static bool s_byo_crypto_test_predicate(void *user_data) { struct byo_crypto_test_rw_args *rw_args = user_data; return rw_args->invocation_happened; } static struct aws_byte_buf s_byo_crypto_test_handle_read( struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_byte_buf *data_read, void *user_data) { (void)handler; (void)slot; struct byo_crypto_test_rw_args *rw_args = user_data; aws_mutex_lock(rw_args->mutex); memcpy(rw_args->received_message.buffer + rw_args->received_message.len, data_read->buffer, data_read->len); rw_args->received_message.len += data_read->len; rw_args->invocation_happened = true; aws_condition_variable_notify_one(rw_args->condition_variable); aws_mutex_unlock(rw_args->mutex); if (rw_args->test_args->negotiation_result_fn) { rw_args->test_args->negotiation_result_fn(handler, slot, AWS_ERROR_SUCCESS, rw_args->test_args->cb_data); rw_args->test_args->negotiation_result_fn = NULL; } return rw_args->received_message; } static struct aws_byte_buf s_byo_crypto_test_handle_write( struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_byte_buf *data_read, void *user_data) { (void)handler; (void)slot; (void)data_read; (void)user_data; /*do nothing*/ return (struct aws_byte_buf){0}; } static void s_byo_crypto_test_server_listener_destroy_callback( struct aws_server_bootstrap *bootstrap, void *user_data) { (void)bootstrap; struct byo_crypto_test_args *setup_test_args = user_data; aws_mutex_lock(setup_test_args->mutex); setup_test_args->listener_destroyed = true; aws_mutex_unlock(setup_test_args->mutex); aws_condition_variable_notify_one(setup_test_args->condition_variable); } static int s_rw_args_init( struct byo_crypto_test_rw_args *args, struct byo_crypto_common_tester *s_c_tester, struct aws_byte_buf received_message, int expected_read) { AWS_ZERO_STRUCT(*args); args->mutex = &s_c_tester->mutex; args->condition_variable = &s_c_tester->condition_variable; args->received_message = received_message; return AWS_OP_SUCCESS; } static int s_byo_crypto_test_args_init( struct byo_crypto_test_args *args, struct byo_crypto_common_tester *s_c_tester, struct aws_channel_handler *rw_handler) { AWS_ZERO_STRUCT(*args); args->mutex = &s_c_tester->mutex; args->condition_variable = &s_c_tester->condition_variable; args->rw_handler = rw_handler; return AWS_OP_SUCCESS; } static int s_local_server_tester_init( struct aws_allocator *allocator, struct local_server_tester *tester, struct byo_crypto_test_args *args, struct byo_crypto_common_tester *s_c_tester, bool enable_back_pressure) { AWS_ZERO_STRUCT(*tester); tester->socket_options.connect_timeout_ms = 3000; tester->socket_options.type = AWS_SOCKET_STREAM; tester->socket_options.domain = AWS_SOCKET_LOCAL; aws_socket_endpoint_init_local_address_for_test(&tester->endpoint); tester->server_bootstrap = aws_server_bootstrap_new(allocator, s_c_tester->el_group); ASSERT_NOT_NULL(tester->server_bootstrap); aws_atomic_init_int((volatile struct aws_atomic_var *)&args->tls_ctx.ref_count, 1u); args->tls_options.ctx = &args->tls_ctx; struct aws_server_socket_channel_bootstrap_options bootstrap_options = { .bootstrap = tester->server_bootstrap, .enable_read_back_pressure = enable_back_pressure, .port = tester->endpoint.port, .host_name = tester->endpoint.address, .socket_options = &tester->socket_options, .incoming_callback = s_byo_crypto_test_server_setup_callback, .shutdown_callback = s_byo_crypto_test_server_shutdown_callback, .destroy_callback = s_byo_crypto_test_server_listener_destroy_callback, .tls_options = &args->tls_options, .user_data = args, }; tester->listener = aws_server_bootstrap_new_socket_listener(&bootstrap_options); ASSERT_NOT_NULL(tester->listener); return AWS_OP_SUCCESS; } static int s_local_server_tester_clean_up(struct local_server_tester *tester) { aws_server_bootstrap_release(tester->server_bootstrap); return AWS_OP_SUCCESS; } static const char *s_write_tag = "I'm a big teapot"; static int s_start_negotiation_fn(struct aws_channel_handler *handler, void *user_data) { struct aws_byte_buf write_tag = aws_byte_buf_from_c_str(s_write_tag); rw_handler_write(handler, handler->slot, &write_tag); struct byo_crypto_test_args *test_args = user_data; if (test_args->negotiation_result_fn) { test_args->negotiation_result_fn(handler, handler->slot, AWS_ERROR_SUCCESS, test_args->cb_data); test_args->negotiation_result_fn = NULL; } return AWS_OP_SUCCESS; } struct aws_channel_handler *s_tls_handler_new( struct aws_allocator *allocator, struct aws_tls_connection_options *options, struct aws_channel_slot *slot, void *user_data) { (void)allocator; (void)options; (void)slot; struct byo_crypto_test_args *test_args = user_data; test_args->negotiation_result_fn = options->on_negotiation_result; test_args->cb_data = options->user_data; return test_args->rw_handler; } static int s_byo_tls_handler_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_byo_crypto_common_tester_init(allocator, &c_tester); struct aws_byte_buf read_tag = aws_byte_buf_from_c_str("I'm a little teapot."); struct aws_byte_buf write_tag = aws_byte_buf_from_c_str(s_write_tag); uint8_t incoming_received_message[128] = {0}; uint8_t outgoing_received_message[128] = {0}; struct byo_crypto_test_rw_args incoming_rw_args; ASSERT_SUCCESS(s_rw_args_init( &incoming_rw_args, &c_tester, aws_byte_buf_from_empty_array(incoming_received_message, sizeof(incoming_received_message)), (int)write_tag.len)); struct byo_crypto_test_rw_args outgoing_rw_args; ASSERT_SUCCESS(s_rw_args_init( &outgoing_rw_args, &c_tester, aws_byte_buf_from_empty_array(outgoing_received_message, sizeof(outgoing_received_message)), (int)read_tag.len)); /* doesn't matter what these are, I'm turning back pressure off anyways. */ static size_t s_outgoing_initial_read_window = 128; static size_t s_incoming_initial_read_window = 128; struct aws_channel_handler *outgoing_rw_handler = rw_handler_new( allocator, s_byo_crypto_test_handle_read, s_byo_crypto_test_handle_write, true, s_outgoing_initial_read_window, &outgoing_rw_args); ASSERT_NOT_NULL(outgoing_rw_handler); struct aws_channel_handler *incoming_rw_handler = rw_handler_new( allocator, s_byo_crypto_test_handle_read, s_byo_crypto_test_handle_write, true, s_incoming_initial_read_window, &incoming_rw_args); ASSERT_NOT_NULL(outgoing_rw_handler); struct byo_crypto_test_args incoming_args; ASSERT_SUCCESS(s_byo_crypto_test_args_init(&incoming_args, &c_tester, incoming_rw_handler)); incoming_rw_args.test_args = &incoming_args; struct byo_crypto_test_args outgoing_args; ASSERT_SUCCESS(s_byo_crypto_test_args_init(&outgoing_args, &c_tester, outgoing_rw_handler)); outgoing_rw_args.test_args = &outgoing_args; struct aws_tls_byo_crypto_setup_options client_setup_options = { .new_handler_fn = s_tls_handler_new, .start_negotiation_fn = s_start_negotiation_fn, .user_data = &outgoing_args, }; aws_tls_byo_crypto_set_client_setup_options(&client_setup_options); struct aws_tls_byo_crypto_setup_options server_setup_options = { .new_handler_fn = s_tls_handler_new, .user_data = &incoming_args, }; aws_tls_byo_crypto_set_server_setup_options(&server_setup_options); struct local_server_tester local_server_tester; ASSERT_SUCCESS(s_local_server_tester_init(allocator, &local_server_tester, &incoming_args, &c_tester, true)); aws_atomic_init_int((volatile struct aws_atomic_var *)&outgoing_args.tls_ctx.ref_count, 1u); outgoing_args.tls_options.ctx = &outgoing_args.tls_ctx; struct aws_client_bootstrap_options bootstrap_options = { .event_loop_group = c_tester.el_group, .host_resolver = NULL, }; struct aws_client_bootstrap *client_bootstrap = aws_client_bootstrap_new(allocator, &bootstrap_options); ASSERT_NOT_NULL(client_bootstrap); struct aws_socket_channel_bootstrap_options channel_options; AWS_ZERO_STRUCT(channel_options); channel_options.bootstrap = client_bootstrap; channel_options.host_name = local_server_tester.endpoint.address; channel_options.port = 0; channel_options.socket_options = &local_server_tester.socket_options; channel_options.setup_callback = s_byo_crypto_test_client_setup_callback; channel_options.shutdown_callback = s_byo_crypto_test_client_shutdown_callback; channel_options.user_data = &outgoing_args; channel_options.tls_options = &outgoing_args.tls_options; channel_options.enable_read_back_pressure = false; ASSERT_SUCCESS(aws_mutex_lock(&c_tester.mutex)); ASSERT_SUCCESS(aws_client_bootstrap_new_socket_channel(&channel_options)); /* wait for both ends to setup */ ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_channel_setup_predicate, &incoming_args)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_channel_setup_predicate, &outgoing_args)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_byo_crypto_test_predicate, &incoming_rw_args)); rw_handler_write(incoming_args.rw_handler, incoming_args.rw_handler->slot, &read_tag); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_byo_crypto_test_predicate, &outgoing_rw_args)); incoming_rw_args.invocation_happened = false; outgoing_rw_args.invocation_happened = false; ASSERT_BIN_ARRAYS_EQUALS( write_tag.buffer, write_tag.len, incoming_rw_args.received_message.buffer, incoming_rw_args.received_message.len); ASSERT_BIN_ARRAYS_EQUALS( read_tag.buffer, read_tag.len, outgoing_rw_args.received_message.buffer, outgoing_rw_args.received_message.len); /* only shut down one side, this should cause the other side to shutdown as well.*/ ASSERT_SUCCESS(aws_channel_shutdown(incoming_args.channel, AWS_OP_SUCCESS)); ASSERT_SUCCESS(aws_channel_shutdown(outgoing_args.channel, AWS_OP_SUCCESS)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_channel_shutdown_predicate, &incoming_args)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_channel_shutdown_predicate, &outgoing_args)); aws_server_bootstrap_destroy_socket_listener(local_server_tester.server_bootstrap, local_server_tester.listener); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_listener_destroy_predicate, &incoming_args)); aws_mutex_unlock(&c_tester.mutex); /* clean up */ ASSERT_SUCCESS(s_local_server_tester_clean_up(&local_server_tester)); aws_client_bootstrap_release(client_bootstrap); ASSERT_SUCCESS(s_byo_crypto_common_tester_clean_up(&c_tester)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(byo_tls_handler_test, s_byo_tls_handler_test) #endif /* BYO_CRYPTO */ aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/channel_test.c000066400000000000000000000764571456575232400236370ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include "mock_dns_resolver.h" #include "read_write_test_handler.h" struct channel_setup_test_args { struct aws_mutex mutex; struct aws_condition_variable condition_variable; bool setup_completed; /* protected by mutex */ bool shutdown_completed; /* protected by mutex */ int error_code; /* protected by mutex */ enum aws_task_status task_status; }; static void s_channel_setup_test_on_setup_completed(struct aws_channel *channel, int error_code, void *user_data) { (void)channel; struct channel_setup_test_args *setup_test_args = (struct channel_setup_test_args *)user_data; aws_mutex_lock(&setup_test_args->mutex); setup_test_args->error_code |= error_code; setup_test_args->setup_completed = true; aws_mutex_unlock(&setup_test_args->mutex); aws_condition_variable_notify_one(&setup_test_args->condition_variable); } static bool s_channel_setup_test_setup_completed_predicate(void *arg) { struct channel_setup_test_args *setup_test_args = (struct channel_setup_test_args *)arg; return setup_test_args->setup_completed; } /* Create a new channel and wait until its setup completes */ static int s_channel_setup_create_and_wait( struct aws_allocator *allocator, struct aws_channel_options *args, struct channel_setup_test_args *test_args, struct aws_channel **returned_channel) { ASSERT_NULL(*returned_channel); *returned_channel = aws_channel_new(allocator, args); ASSERT_NOT_NULL(*returned_channel); ASSERT_SUCCESS(aws_mutex_lock(&test_args->mutex)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &test_args->condition_variable, &test_args->mutex, s_channel_setup_test_setup_completed_predicate, test_args)); ASSERT_INT_EQUALS(0, test_args->error_code); ASSERT_SUCCESS(aws_mutex_unlock(&test_args->mutex)); return AWS_OP_SUCCESS; } static int s_test_channel_setup(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_event_loop *event_loop = aws_event_loop_new_default(allocator, aws_high_res_clock_get_ticks); ASSERT_NOT_NULL(event_loop, "Event loop creation failed with error: %s", aws_error_debug_str(aws_last_error())); ASSERT_SUCCESS(aws_event_loop_run(event_loop)); struct aws_channel *channel_1 = NULL; struct aws_channel *channel_2 = NULL; struct channel_setup_test_args test_args = { .error_code = 0, .mutex = AWS_MUTEX_INIT, .condition_variable = AWS_CONDITION_VARIABLE_INIT, .setup_completed = false, .shutdown_completed = false, }; struct aws_channel_options args = { .on_setup_completed = s_channel_setup_test_on_setup_completed, .setup_user_data = &test_args, .on_shutdown_completed = NULL, .shutdown_user_data = NULL, .event_loop = event_loop, }; ASSERT_SUCCESS(s_channel_setup_create_and_wait(allocator, &args, &test_args, &channel_1)); ASSERT_SUCCESS(s_channel_setup_create_and_wait(allocator, &args, &test_args, &channel_2)); aws_channel_destroy(channel_1); aws_channel_destroy(channel_2); aws_event_loop_destroy(event_loop); return AWS_OP_SUCCESS; } AWS_TEST_CASE(channel_setup, s_test_channel_setup) static int s_test_channel_single_slot_cleans_up(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_event_loop *event_loop = aws_event_loop_new_default(allocator, aws_high_res_clock_get_ticks); ASSERT_NOT_NULL(event_loop, "Event loop creation failed with error: %s", aws_error_debug_str(aws_last_error())); ASSERT_SUCCESS(aws_event_loop_run(event_loop)); struct aws_channel *channel = NULL; struct channel_setup_test_args test_args = { .error_code = 0, .mutex = AWS_MUTEX_INIT, .condition_variable = AWS_CONDITION_VARIABLE_INIT, .setup_completed = false, .shutdown_completed = false, }; struct aws_channel_options args = { .on_setup_completed = s_channel_setup_test_on_setup_completed, .setup_user_data = &test_args, .on_shutdown_completed = NULL, .shutdown_user_data = NULL, .event_loop = event_loop, }; ASSERT_SUCCESS(s_channel_setup_create_and_wait(allocator, &args, &test_args, &channel)); struct aws_channel_slot *slot; slot = aws_channel_slot_new(channel); ASSERT_NOT_NULL(slot); aws_channel_destroy(channel); aws_event_loop_destroy(event_loop); return AWS_OP_SUCCESS; } AWS_TEST_CASE(channel_single_slot_cleans_up, s_test_channel_single_slot_cleans_up) static int s_test_channel_slots_clean_up(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_event_loop *event_loop = aws_event_loop_new_default(allocator, aws_high_res_clock_get_ticks); ASSERT_NOT_NULL(event_loop, "Event loop creation failed with error: %s", aws_error_debug_str(aws_last_error())); ASSERT_SUCCESS(aws_event_loop_run(event_loop)); struct aws_channel *channel = NULL; struct channel_setup_test_args test_args = { .error_code = 0, .mutex = AWS_MUTEX_INIT, .condition_variable = AWS_CONDITION_VARIABLE_INIT, .setup_completed = false, .shutdown_completed = false, }; struct aws_channel_options args = { .on_setup_completed = s_channel_setup_test_on_setup_completed, .setup_user_data = &test_args, .on_shutdown_completed = NULL, .shutdown_user_data = NULL, .event_loop = event_loop, }; ASSERT_SUCCESS(s_channel_setup_create_and_wait(allocator, &args, &test_args, &channel)); struct aws_channel_slot *slot_1, *slot_2, *slot_3, *slot_4, *slot_5; slot_1 = aws_channel_slot_new(channel); slot_2 = aws_channel_slot_new(channel); slot_3 = aws_channel_slot_new(channel); slot_4 = aws_channel_slot_new(channel); slot_5 = aws_channel_slot_new(channel); ASSERT_NOT_NULL(slot_1); ASSERT_NOT_NULL(slot_2); ASSERT_NOT_NULL(slot_3); ASSERT_NOT_NULL(slot_4); ASSERT_NOT_NULL(slot_5); ASSERT_SUCCESS(aws_channel_slot_insert_right(slot_1, slot_2)); ASSERT_SUCCESS(aws_channel_slot_insert_right(slot_2, slot_3)); ASSERT_SUCCESS(aws_channel_slot_insert_left(slot_3, slot_4)); ASSERT_SUCCESS(aws_channel_slot_remove(slot_2)); ASSERT_PTR_EQUALS(slot_1, slot_4->adj_left); ASSERT_PTR_EQUALS(slot_1->adj_right, slot_4); ASSERT_PTR_EQUALS(slot_4->adj_left, slot_1); ASSERT_NULL(slot_1->adj_left); ASSERT_PTR_EQUALS(slot_4, slot_3->adj_left); ASSERT_PTR_EQUALS(slot_4->adj_right, slot_3); ASSERT_PTR_EQUALS(slot_3->adj_left, slot_4); ASSERT_NULL(slot_3->adj_right); ASSERT_SUCCESS(aws_channel_slot_replace(slot_4, slot_5)); ASSERT_PTR_EQUALS(slot_1, slot_5->adj_left); ASSERT_PTR_EQUALS(slot_1->adj_right, slot_5); ASSERT_PTR_EQUALS(slot_5->adj_left, slot_1); ASSERT_PTR_EQUALS(slot_5, slot_3->adj_left); ASSERT_PTR_EQUALS(slot_5->adj_right, slot_3); ASSERT_PTR_EQUALS(slot_3->adj_left, slot_5); aws_channel_destroy(channel); aws_event_loop_destroy(event_loop); return AWS_OP_SUCCESS; } AWS_TEST_CASE(channel_slots_clean_up, s_test_channel_slots_clean_up) static void s_wait_a_bit_task(struct aws_task *task, void *arg, enum aws_task_status status) { (void)task; (void)status; struct aws_atomic_var *task_executed = arg; aws_atomic_store_int(task_executed, true); } static int s_wait_a_bit(struct aws_event_loop *loop) { struct aws_task task; struct aws_atomic_var task_executed = AWS_ATOMIC_INIT_INT(false); aws_task_init(&task, s_wait_a_bit_task, &task_executed, "wait_a_bit"); uint64_t run_at_ns; ASSERT_SUCCESS(aws_event_loop_current_clock_time(loop, &run_at_ns)); run_at_ns += aws_timestamp_convert(1, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL); aws_event_loop_schedule_task_future(loop, &task, run_at_ns); while (!aws_atomic_load_int(&task_executed)) { ; /* block until signaled */ } return AWS_OP_SUCCESS; } static bool s_atomic_var_is_set_predicate(void *arg) { struct aws_atomic_var *var = arg; return aws_atomic_load_int(var); } static int s_test_channel_refcount(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_event_loop *event_loop = aws_event_loop_new_default(allocator, aws_high_res_clock_get_ticks); ASSERT_NOT_NULL(event_loop); ASSERT_SUCCESS(aws_event_loop_run(event_loop)); /* Create channel */ struct channel_setup_test_args test_args = { .error_code = 0, .mutex = AWS_MUTEX_INIT, .condition_variable = AWS_CONDITION_VARIABLE_INIT, .setup_completed = false, .shutdown_completed = false, }; struct aws_channel_options args = { .on_setup_completed = s_channel_setup_test_on_setup_completed, .setup_user_data = &test_args, .on_shutdown_completed = NULL, .shutdown_user_data = NULL, .event_loop = event_loop, }; struct aws_channel *channel = NULL; ASSERT_SUCCESS(s_channel_setup_create_and_wait(allocator, &args, &test_args, &channel)); /* Add handler to channel */ struct aws_channel_slot *slot = aws_channel_slot_new(channel); ASSERT_NOT_NULL(slot); struct aws_channel_handler *handler = rw_handler_new(allocator, NULL, NULL, false, 10000, NULL); struct aws_atomic_var destroy_called = AWS_ATOMIC_INIT_INT(0); struct aws_mutex destroy_mutex = AWS_MUTEX_INIT; struct aws_condition_variable destroy_condition_variable = AWS_CONDITION_VARIABLE_INIT; rw_handler_enable_wait_on_destroy(handler, &destroy_called, &destroy_condition_variable); ASSERT_SUCCESS(aws_channel_slot_set_handler(slot, handler)); /* Shut down channel */ ASSERT_SUCCESS(aws_channel_shutdown(channel, 0)); /* Acquire 2 holds on channel and try to destroy it. The holds should prevent memory from being freed yet */ aws_channel_acquire_hold(channel); aws_channel_acquire_hold(channel); aws_channel_destroy(channel); ASSERT_SUCCESS(s_wait_a_bit(event_loop)); ASSERT_FALSE(aws_atomic_load_int(&destroy_called)); /* Release hold 1/2. Handler shouldn't get destroyed. */ aws_channel_release_hold(channel); ASSERT_SUCCESS(s_wait_a_bit(event_loop)); ASSERT_FALSE(aws_atomic_load_int(&destroy_called)); /* Release hold 2/2. The handler and channel should be destroyed. */ aws_channel_release_hold(channel); ASSERT_SUCCESS(aws_mutex_lock(&destroy_mutex)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &destroy_condition_variable, &destroy_mutex, s_atomic_var_is_set_predicate, &destroy_called)); ASSERT_SUCCESS(aws_mutex_unlock(&destroy_mutex)); ASSERT_TRUE(aws_atomic_load_int(&destroy_called)); while (!aws_atomic_load_int(&destroy_called)) { ; /* block until signaled */ } aws_event_loop_destroy(event_loop); return AWS_OP_SUCCESS; } AWS_TEST_CASE(channel_refcount_delays_clean_up, s_test_channel_refcount) static void s_channel_post_shutdown_task(struct aws_channel_task *task, void *arg, enum aws_task_status status) { (void)task; struct channel_setup_test_args *test_args = arg; test_args->task_status = status; } static void s_channel_test_shutdown(struct aws_channel *channel, int error_code, void *user_data) { (void)channel; (void)error_code; struct channel_setup_test_args *test_args = user_data; aws_mutex_lock(&test_args->mutex); test_args->shutdown_completed = true; aws_mutex_unlock(&test_args->mutex); aws_condition_variable_notify_one(&test_args->condition_variable); } static bool s_channel_test_shutdown_predicate(void *arg) { struct channel_setup_test_args *test_args = (struct channel_setup_test_args *)arg; return test_args->shutdown_completed; } enum tasks_run_id { TASK_NOW_OFF_THREAD, TASK_NOW_ON_THREAD, TASK_FUTURE_OFF_THREAD, TASK_FUTURE_ON_THREAD, TASK_COUNT, }; struct tasks_run_data { struct aws_mutex mutex; struct aws_condition_variable condvar; bool did_task_run[TASK_COUNT]; bool did_task_fail[TASK_COUNT]; struct aws_channel_task tasks[TASK_COUNT]; }; static struct tasks_run_data s_tasks_run_data; static void s_tasks_run_fn(struct aws_channel_task *task, void *arg, enum aws_task_status status) { (void)task; intptr_t id = (intptr_t)arg; aws_mutex_lock(&s_tasks_run_data.mutex); s_tasks_run_data.did_task_run[id] = true; s_tasks_run_data.did_task_fail[id] = (status == AWS_TASK_STATUS_CANCELED); aws_condition_variable_notify_one(&s_tasks_run_data.condvar); aws_mutex_unlock(&s_tasks_run_data.mutex); } static void s_schedule_on_thread_tasks_fn(struct aws_task *task, void *arg, enum aws_task_status status) { (void)task; (void)status; struct aws_channel *channel = arg; aws_channel_schedule_task_now(channel, &s_tasks_run_data.tasks[TASK_NOW_ON_THREAD]); aws_channel_schedule_task_future(channel, &s_tasks_run_data.tasks[TASK_FUTURE_ON_THREAD], 1); } static bool s_tasks_run_done_pred(void *user_data) { (void)user_data; for (int i = 0; i < TASK_COUNT; ++i) { if (!s_tasks_run_data.did_task_run[i]) { return false; } } return true; } static int s_test_channel_tasks_run_aux( struct aws_allocator *allocator, aws_task_fn *on_thread_invoker_fn, void (*submit_now_fn)(struct aws_channel *, struct aws_channel_task *)) { struct aws_event_loop *event_loop = aws_event_loop_new_default(allocator, aws_high_res_clock_get_ticks); ASSERT_NOT_NULL(event_loop); ASSERT_SUCCESS(aws_event_loop_run(event_loop)); struct aws_channel *channel = NULL; struct channel_setup_test_args test_args = { .error_code = 0, .mutex = AWS_MUTEX_INIT, .condition_variable = AWS_CONDITION_VARIABLE_INIT, .setup_completed = false, .shutdown_completed = false, .task_status = 100, }; struct aws_channel_options args = { .on_setup_completed = s_channel_setup_test_on_setup_completed, .setup_user_data = &test_args, .on_shutdown_completed = s_channel_test_shutdown, .shutdown_user_data = &test_args, .event_loop = event_loop, }; ASSERT_SUCCESS(s_channel_setup_create_and_wait(allocator, &args, &test_args, &channel)); /* Set up tasks */ AWS_ZERO_STRUCT(s_tasks_run_data); ASSERT_SUCCESS(aws_mutex_init(&s_tasks_run_data.mutex)); ASSERT_SUCCESS(aws_condition_variable_init(&s_tasks_run_data.condvar)); for (int i = 0; i < TASK_COUNT; ++i) { aws_channel_task_init(&s_tasks_run_data.tasks[i], s_tasks_run_fn, (void *)(intptr_t)i, "test_channel_task"); } /* Schedule channel-tasks from outside the channel's thread */ ASSERT_SUCCESS(aws_mutex_lock(&s_tasks_run_data.mutex)); submit_now_fn(channel, &s_tasks_run_data.tasks[TASK_NOW_OFF_THREAD]); aws_channel_schedule_task_future(channel, &s_tasks_run_data.tasks[TASK_FUTURE_OFF_THREAD], 1); /* Schedule task that schedules channel-tasks from on then channel's thread */ struct aws_task scheduler_task; aws_task_init(&scheduler_task, on_thread_invoker_fn, channel, "schedule_on_thread_tasks"); aws_event_loop_schedule_task_now(event_loop, &scheduler_task); /* Wait for all the tasks to finish */ ASSERT_SUCCESS(aws_condition_variable_wait_pred( &s_tasks_run_data.condvar, &s_tasks_run_data.mutex, s_tasks_run_done_pred, NULL)); /* Check that none failed */ bool all_succeeded = true; for (int i = 0; i < TASK_COUNT; ++i) { if (s_tasks_run_data.did_task_fail[i]) { all_succeeded = false; } } ASSERT_TRUE(all_succeeded); ASSERT_SUCCESS(aws_mutex_unlock(&s_tasks_run_data.mutex)); aws_channel_destroy(channel); aws_event_loop_destroy(event_loop); return AWS_OP_SUCCESS; } static int s_test_channel_tasks_run(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS( s_test_channel_tasks_run_aux(allocator, s_schedule_on_thread_tasks_fn, aws_channel_schedule_task_now)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(channel_tasks_run, s_test_channel_tasks_run); static void s_serialized_tasks_run_fn(struct aws_task *task, void *arg, enum aws_task_status status) { (void)task; (void)status; struct aws_channel *channel = arg; aws_channel_schedule_task_now_serialized(channel, &s_tasks_run_data.tasks[TASK_NOW_ON_THREAD]); aws_channel_schedule_task_future(channel, &s_tasks_run_data.tasks[TASK_FUTURE_ON_THREAD], 1); } static int s_channel_tasks_serialized_run(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS( s_test_channel_tasks_run_aux(allocator, s_serialized_tasks_run_fn, aws_channel_schedule_task_now_serialized)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(channel_tasks_serialized_run, s_channel_tasks_serialized_run); static int s_test_channel_rejects_post_shutdown_tasks(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_event_loop *event_loop = aws_event_loop_new_default(allocator, aws_high_res_clock_get_ticks); ASSERT_NOT_NULL(event_loop, "Event loop creation failed with error: %s", aws_error_debug_str(aws_last_error())); ASSERT_SUCCESS(aws_event_loop_run(event_loop)); struct aws_channel *channel = NULL; struct channel_setup_test_args test_args = { .error_code = 0, .mutex = AWS_MUTEX_INIT, .condition_variable = AWS_CONDITION_VARIABLE_INIT, .setup_completed = false, .shutdown_completed = false, .task_status = 100, }; struct aws_channel_options args = { .on_setup_completed = s_channel_setup_test_on_setup_completed, .setup_user_data = &test_args, .on_shutdown_completed = s_channel_test_shutdown, .shutdown_user_data = &test_args, .event_loop = event_loop, }; ASSERT_SUCCESS(s_channel_setup_create_and_wait(allocator, &args, &test_args, &channel)); ASSERT_SUCCESS(aws_mutex_lock(&test_args.mutex)); ASSERT_SUCCESS(aws_channel_shutdown(channel, AWS_ERROR_SUCCESS)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &test_args.condition_variable, &test_args.mutex, s_channel_test_shutdown_predicate, &test_args)); ASSERT_SUCCESS(aws_mutex_unlock(&test_args.mutex)); struct aws_channel_task task; aws_channel_task_init(&task, s_channel_post_shutdown_task, &test_args, "channel_post_shutdown"); aws_channel_schedule_task_now(channel, &task); ASSERT_INT_EQUALS(AWS_TASK_STATUS_CANCELED, test_args.task_status); aws_channel_destroy(channel); aws_event_loop_destroy(event_loop); return AWS_OP_SUCCESS; } AWS_TEST_CASE(channel_rejects_post_shutdown_tasks, s_test_channel_rejects_post_shutdown_tasks) static int s_test_channel_cancels_pending_tasks(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_event_loop *event_loop = aws_event_loop_new_default(allocator, aws_high_res_clock_get_ticks); ASSERT_NOT_NULL(event_loop, "Event loop creation failed with error: %s", aws_error_debug_str(aws_last_error())); ASSERT_SUCCESS(aws_event_loop_run(event_loop)); struct aws_channel *channel = NULL; struct channel_setup_test_args test_args = { .error_code = 0, .mutex = AWS_MUTEX_INIT, .condition_variable = AWS_CONDITION_VARIABLE_INIT, .setup_completed = false, .shutdown_completed = false, .task_status = 100, }; struct aws_channel_options args = { .on_setup_completed = s_channel_setup_test_on_setup_completed, .setup_user_data = &test_args, .on_shutdown_completed = s_channel_test_shutdown, .shutdown_user_data = &test_args, .event_loop = event_loop, }; ASSERT_SUCCESS(s_channel_setup_create_and_wait(allocator, &args, &test_args, &channel)); struct aws_channel_task task; aws_channel_task_init(&task, s_channel_post_shutdown_task, &test_args, "channel_post_shutdown_cancellation"); /* schedule WAY in the future. */ aws_channel_schedule_task_future(channel, &task, UINT64_MAX - 1); /* make sure it hasn't been invoked yet. */ ASSERT_SUCCESS(aws_mutex_lock(&test_args.mutex)); ASSERT_INT_EQUALS(100, test_args.task_status); ASSERT_SUCCESS(aws_channel_shutdown(channel, AWS_ERROR_SUCCESS)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &test_args.condition_variable, &test_args.mutex, s_channel_test_shutdown_predicate, &test_args)); ASSERT_INT_EQUALS(AWS_TASK_STATUS_CANCELED, test_args.task_status); ASSERT_SUCCESS(aws_mutex_unlock(&test_args.mutex)); aws_channel_destroy(channel); aws_event_loop_destroy(event_loop); return AWS_OP_SUCCESS; } AWS_TEST_CASE(channel_cancels_pending_tasks, s_test_channel_cancels_pending_tasks) static int s_test_channel_duplicate_shutdown(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_event_loop *event_loop = aws_event_loop_new_default(allocator, aws_high_res_clock_get_ticks); ASSERT_NOT_NULL(event_loop, "Event loop creation failed with error: %s", aws_error_debug_str(aws_last_error())); ASSERT_SUCCESS(aws_event_loop_run(event_loop)); struct aws_channel *channel = NULL; struct channel_setup_test_args test_args = { .error_code = 0, .mutex = AWS_MUTEX_INIT, .condition_variable = AWS_CONDITION_VARIABLE_INIT, .setup_completed = false, .shutdown_completed = false, }; struct aws_channel_options args = { .on_setup_completed = s_channel_setup_test_on_setup_completed, .setup_user_data = &test_args, .on_shutdown_completed = s_channel_test_shutdown, .shutdown_user_data = &test_args, .event_loop = event_loop, }; ASSERT_SUCCESS(s_channel_setup_create_and_wait(allocator, &args, &test_args, &channel)); ASSERT_SUCCESS(aws_channel_shutdown(channel, AWS_ERROR_SUCCESS)); ASSERT_SUCCESS(aws_mutex_lock(&test_args.mutex)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &test_args.condition_variable, &test_args.mutex, s_channel_test_shutdown_predicate, &test_args)); ASSERT_SUCCESS(aws_mutex_unlock(&test_args.mutex)); /* make sure this doesn't explode! */ ASSERT_SUCCESS(aws_channel_shutdown(channel, AWS_ERROR_SUCCESS)); aws_channel_destroy(channel); aws_event_loop_destroy(event_loop); return AWS_OP_SUCCESS; } AWS_TEST_CASE(channel_duplicate_shutdown, s_test_channel_duplicate_shutdown) struct channel_connect_test_args { struct aws_mutex *mutex; struct aws_condition_variable cv; int error_code; struct aws_channel *channel; bool setup; bool shutdown; }; static void s_test_channel_connect_some_hosts_timeout_setup( struct aws_client_bootstrap *bootstrap, int error_code, struct aws_channel *channel, void *user_data) { (void)bootstrap; struct channel_connect_test_args *test_args = user_data; aws_mutex_lock(test_args->mutex); test_args->setup = true; test_args->channel = channel; test_args->error_code = error_code; aws_condition_variable_notify_one(&test_args->cv); aws_mutex_unlock(test_args->mutex); } static void s_test_channel_connect_some_hosts_timeout_shutdown( struct aws_client_bootstrap *bootstrap, int error_code, struct aws_channel *channel, void *user_data) { (void)bootstrap; (void)channel; struct channel_connect_test_args *test_args = user_data; aws_mutex_lock(test_args->mutex); test_args->channel = NULL; test_args->shutdown = true; test_args->error_code = error_code; aws_condition_variable_notify_one(&test_args->cv); aws_mutex_unlock(test_args->mutex); } static bool s_setup_complete_pred(void *user_data) { struct channel_connect_test_args *test_args = user_data; return test_args->setup; } static bool s_shutdown_complete_pred(void *user_data) { struct channel_connect_test_args *test_args = user_data; return test_args->shutdown; } static int s_test_channel_connect_some_hosts_timeout(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_io_library_init(allocator); struct aws_mutex mutex = AWS_MUTEX_INIT; struct channel_connect_test_args callback_data = { .mutex = &mutex, .cv = AWS_CONDITION_VARIABLE_INIT, .error_code = 0, .channel = NULL, .setup = false, .shutdown = false, }; struct aws_event_loop_group *event_loop_group = aws_event_loop_group_new_default(allocator, 1, NULL); /* resolve our s3 test bucket and an EC2 host with an ACL that blackholes the connection */ const struct aws_string *addr1_ipv4 = NULL; const struct aws_string *addr2_ipv4 = NULL; struct aws_string *bh_host = aws_string_new_from_c_str(allocator, "ec2-54-158-231-48.compute-1.amazonaws.com"); struct aws_string *s3_host = aws_string_new_from_c_str(allocator, "aws-crt-test-stuff.s3.amazonaws.com"); struct aws_host_address *resolved_s3_address = NULL; struct aws_host_address *resolved_bh_address = NULL; /* resolve s3 test bucket */ struct aws_array_list s3_addresses; aws_array_list_init_dynamic(&s3_addresses, allocator, 4, sizeof(struct aws_host_address)); aws_default_dns_resolve(allocator, s3_host, &s3_addresses, NULL); const size_t s3_address_count = aws_array_list_length(&s3_addresses); ASSERT_TRUE(s3_address_count >= 1); /* find the first A record, ignore AAAA records */ for (size_t addr_idx = 0; addr_idx < s3_address_count; ++addr_idx) { aws_array_list_get_at_ptr(&s3_addresses, (void *)&resolved_s3_address, addr_idx); if (resolved_s3_address->record_type == AWS_ADDRESS_RECORD_TYPE_A) { break; } } ASSERT_NOT_NULL(resolved_s3_address); ASSERT_INT_EQUALS(AWS_ADDRESS_RECORD_TYPE_A, resolved_s3_address->record_type, "Did not find an A record"); addr1_ipv4 = aws_string_new_from_string(allocator, resolved_s3_address->address); /* resolve black hole */ struct aws_array_list bh_addresses; aws_array_list_init_dynamic(&bh_addresses, allocator, 4, sizeof(struct aws_host_address)); aws_default_dns_resolve(allocator, bh_host, &bh_addresses, NULL); const size_t bh_address_count = aws_array_list_length(&bh_addresses); ASSERT_TRUE(bh_address_count >= 1); /* find the first A record, ignore AAAA records */ for (size_t addr_idx = 0; addr_idx < bh_address_count; ++addr_idx) { aws_array_list_get_at_ptr(&bh_addresses, (void *)&resolved_bh_address, addr_idx); if (resolved_bh_address->record_type == AWS_ADDRESS_RECORD_TYPE_A) { break; } } ASSERT_NOT_NULL(resolved_bh_address); ASSERT_INT_EQUALS(AWS_ADDRESS_RECORD_TYPE_A, resolved_bh_address->record_type, "Did not find an A record"); addr2_ipv4 = aws_string_new_from_string(allocator, resolved_bh_address->address); /* create a resolver with 2 addresses: 1 which will always succeed, and 1 which will always timeout */ struct mock_dns_resolver mock_dns_resolver; ASSERT_SUCCESS(mock_dns_resolver_init(&mock_dns_resolver, 2, allocator)); struct aws_host_resolution_config mock_resolver_config = { .max_ttl = 1, .impl = mock_dns_resolve, .impl_data = &mock_dns_resolver, }; struct aws_host_address host_address_1 = { .address = addr1_ipv4, .allocator = allocator, .expiry = 0, /* connections should always succeed, if not, things are worse than this unit test failing */ .host = s3_host, .connection_failure_count = 0, .record_type = AWS_ADDRESS_RECORD_TYPE_A, .use_count = 0, .weight = 0, }; struct aws_host_address host_address_2 = { .address = addr2_ipv4, .allocator = allocator, .expiry = 0, /* same black-holed host from the timeout test, connections are a guaranteed timeout */ .host = bh_host, .connection_failure_count = 0, .record_type = AWS_ADDRESS_RECORD_TYPE_AAAA, .use_count = 0, .weight = 0, }; struct aws_array_list address_list; ASSERT_SUCCESS(aws_array_list_init_dynamic(&address_list, allocator, 2, sizeof(struct aws_host_address))); ASSERT_SUCCESS(aws_array_list_push_back(&address_list, &host_address_2)); ASSERT_SUCCESS(aws_array_list_push_back(&address_list, &host_address_1)); ASSERT_SUCCESS(mock_dns_resolver_append_address_list(&mock_dns_resolver, &address_list)); struct aws_host_resolver_default_options resolver_options = { .el_group = event_loop_group, .max_entries = 8, }; struct aws_host_resolver *resolver = aws_host_resolver_new_default(allocator, &resolver_options); struct aws_client_bootstrap_options bootstrap_options = { .event_loop_group = event_loop_group, .host_resolver = resolver, .host_resolution_config = &mock_resolver_config, }; struct aws_client_bootstrap *bootstrap = aws_client_bootstrap_new(allocator, &bootstrap_options); ASSERT_NOT_NULL(bootstrap); struct aws_socket_options options; AWS_ZERO_STRUCT(options); options.connect_timeout_ms = 10000; options.type = AWS_SOCKET_STREAM; struct aws_socket_channel_bootstrap_options channel_options; AWS_ZERO_STRUCT(channel_options); channel_options.bootstrap = bootstrap; channel_options.host_name = aws_string_c_str(s3_host); channel_options.port = 80; channel_options.socket_options = &options; channel_options.setup_callback = s_test_channel_connect_some_hosts_timeout_setup; channel_options.shutdown_callback = s_test_channel_connect_some_hosts_timeout_shutdown; channel_options.user_data = &callback_data; ASSERT_SUCCESS(aws_client_bootstrap_new_socket_channel(&channel_options)); ASSERT_SUCCESS(aws_mutex_lock(&mutex)); ASSERT_SUCCESS(aws_condition_variable_wait_pred(&callback_data.cv, &mutex, s_setup_complete_pred, &callback_data)); ASSERT_INT_EQUALS(0, callback_data.error_code, aws_error_str(callback_data.error_code)); ASSERT_NOT_NULL(callback_data.channel); ASSERT_SUCCESS(aws_mutex_unlock(&mutex)); /* this should cause a disconnect and tear down */ ASSERT_SUCCESS(aws_mutex_lock(&mutex)); ASSERT_SUCCESS(aws_channel_shutdown(callback_data.channel, AWS_OP_SUCCESS)); ASSERT_SUCCESS( aws_condition_variable_wait_pred(&callback_data.cv, &mutex, s_shutdown_complete_pred, &callback_data)); ASSERT_INT_EQUALS(0, callback_data.error_code, aws_error_str(callback_data.error_code)); ASSERT_SUCCESS(aws_mutex_unlock(&mutex)); /* clean up */ aws_client_bootstrap_release(bootstrap); aws_host_resolver_release(resolver); mock_dns_resolver_clean_up(&mock_dns_resolver); aws_event_loop_group_release(event_loop_group); for (size_t addr_idx = 0; addr_idx < s3_address_count; ++addr_idx) { aws_array_list_get_at_ptr(&s3_addresses, (void *)&resolved_s3_address, addr_idx); aws_host_address_clean_up(resolved_s3_address); } aws_array_list_clean_up(&s3_addresses); for (size_t addr_idx = 0; addr_idx < bh_address_count; ++addr_idx) { aws_array_list_get_at_ptr(&bh_addresses, (void *)&resolved_bh_address, addr_idx); aws_host_address_clean_up(resolved_bh_address); } aws_array_list_clean_up(&bh_addresses); aws_io_library_clean_up(); return 0; } AWS_TEST_CASE(channel_connect_some_hosts_timeout, s_test_channel_connect_some_hosts_timeout); aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/default_host_resolver_test.c000066400000000000000000001614241456575232400266160ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include "mock_dns_resolver.h" static const uint64_t FORCE_RESOLVE_SLEEP_TIME = 1500000000; struct default_host_callback_data { struct aws_host_address aaaa_address; struct aws_host_address a_address; bool has_aaaa_address; bool has_a_address; struct aws_condition_variable condition_variable; bool invoked; struct aws_mutex *mutex; aws_thread_id_t callback_thread_id; }; static bool s_default_host_resolved_predicate(void *arg) { struct default_host_callback_data *callback_data = arg; return callback_data->invoked; } static void s_default_host_purge_callback(void *user_data) { struct default_host_callback_data *callback_data = user_data; aws_mutex_lock(callback_data->mutex); callback_data->invoked = true; callback_data->callback_thread_id = aws_thread_current_thread_id(); aws_mutex_unlock(callback_data->mutex); aws_condition_variable_notify_one(&callback_data->condition_variable); } static void s_default_host_resolved_test_callback( struct aws_host_resolver *resolver, const struct aws_string *host_name, int err_code, const struct aws_array_list *host_addresses, void *user_data) { (void)resolver; (void)host_name; (void)err_code; struct default_host_callback_data *callback_data = user_data; aws_mutex_lock(callback_data->mutex); if (host_addresses != NULL) { struct aws_host_address *host_address = NULL; if (aws_array_list_length(host_addresses) >= 2) { aws_array_list_get_at_ptr(host_addresses, (void **)&host_address, 0); aws_host_address_copy(host_address, &callback_data->aaaa_address); aws_array_list_get_at_ptr(host_addresses, (void **)&host_address, 1); aws_host_address_copy(host_address, &callback_data->a_address); callback_data->has_aaaa_address = true; callback_data->has_a_address = true; } else if (aws_array_list_length(host_addresses) == 1) { aws_array_list_get_at_ptr(host_addresses, (void **)&host_address, 0); if (host_address->record_type == AWS_ADDRESS_RECORD_TYPE_A) { aws_host_address_copy(host_address, &callback_data->a_address); callback_data->has_a_address = true; } else if (host_address->record_type == AWS_ADDRESS_RECORD_TYPE_AAAA) { aws_host_address_copy(host_address, &callback_data->aaaa_address); callback_data->has_aaaa_address = true; } } } callback_data->invoked = true; callback_data->callback_thread_id = aws_thread_current_thread_id(); aws_mutex_unlock(callback_data->mutex); aws_condition_variable_notify_one(&callback_data->condition_variable); } static int s_test_default_with_ipv6_lookup_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_io_library_init(allocator); struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); struct aws_host_resolver_default_options resolver_options = { .el_group = el_group, .max_entries = 10, }; struct aws_host_resolver *resolver = aws_host_resolver_new_default(allocator, &resolver_options); const struct aws_string *host_name = aws_string_new_from_c_str(allocator, "s3.dualstack.us-east-1.amazonaws.com"); ASSERT_NOT_NULL(host_name); struct aws_host_resolution_config config = { .max_ttl = 10, .impl = aws_default_dns_resolve, .impl_data = NULL, }; struct aws_mutex mutex = AWS_MUTEX_INIT; struct default_host_callback_data callback_data = { .condition_variable = AWS_CONDITION_VARIABLE_INIT, .invoked = false, .has_aaaa_address = false, .has_a_address = false, .mutex = &mutex, }; ASSERT_SUCCESS(aws_host_resolver_resolve_host( resolver, host_name, s_default_host_resolved_test_callback, &config, &callback_data)); ASSERT_SUCCESS(aws_mutex_lock(&mutex)); aws_condition_variable_wait_pred( &callback_data.condition_variable, &mutex, s_default_host_resolved_predicate, &callback_data); callback_data.invoked = false; ASSERT_TRUE(callback_data.has_aaaa_address); ASSERT_INT_EQUALS(AWS_ADDRESS_RECORD_TYPE_AAAA, callback_data.aaaa_address.record_type); ASSERT_BIN_ARRAYS_EQUALS( aws_string_bytes(host_name), host_name->len, aws_string_bytes(callback_data.aaaa_address.host), callback_data.aaaa_address.host->len); ASSERT_TRUE(callback_data.has_a_address); ASSERT_INT_EQUALS(AWS_ADDRESS_RECORD_TYPE_A, callback_data.a_address.record_type); ASSERT_BIN_ARRAYS_EQUALS( aws_string_bytes(host_name), host_name->len, aws_string_bytes(callback_data.a_address.host), callback_data.a_address.host->len); ASSERT_TRUE(callback_data.aaaa_address.address->len > 1); ASSERT_TRUE(callback_data.a_address.address->len > 1); aws_host_address_clean_up(&callback_data.aaaa_address); aws_host_address_clean_up(&callback_data.a_address); aws_string_destroy((void *)host_name); aws_host_resolver_release(resolver); aws_event_loop_group_release(el_group); aws_io_library_clean_up(); return 0; } AWS_TEST_CASE(test_default_with_ipv6_lookup, s_test_default_with_ipv6_lookup_fn) /* just FYI, this test assumes that "s3.us-east-1.amazonaws.com" does not return IPv6 addresses. */ static int s_test_default_with_ipv4_only_lookup_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_io_library_init(allocator); struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); struct aws_host_resolver_default_options resolver_options = { .el_group = el_group, .max_entries = 10, }; struct aws_host_resolver *resolver = aws_host_resolver_new_default(allocator, &resolver_options); const struct aws_string *host_name = aws_string_new_from_c_str(allocator, "s3.us-east-1.amazonaws.com"); ASSERT_NOT_NULL(host_name); struct aws_host_resolution_config config = { .max_ttl = 10, .impl = aws_default_dns_resolve, .impl_data = NULL, }; struct aws_mutex mutex = AWS_MUTEX_INIT; struct default_host_callback_data callback_data = { .condition_variable = AWS_CONDITION_VARIABLE_INIT, .invoked = false, .has_aaaa_address = false, .has_a_address = false, .mutex = &mutex, }; ASSERT_SUCCESS(aws_host_resolver_resolve_host( resolver, host_name, s_default_host_resolved_test_callback, &config, &callback_data)); ASSERT_SUCCESS(aws_mutex_lock(&mutex)); aws_condition_variable_wait_pred( &callback_data.condition_variable, &mutex, s_default_host_resolved_predicate, &callback_data); callback_data.invoked = false; ASSERT_FALSE(callback_data.has_aaaa_address); ASSERT_TRUE(callback_data.has_a_address); ASSERT_INT_EQUALS(AWS_ADDRESS_RECORD_TYPE_A, callback_data.a_address.record_type); ASSERT_BIN_ARRAYS_EQUALS( aws_string_bytes(host_name), host_name->len, aws_string_bytes(callback_data.a_address.host), callback_data.a_address.host->len); ASSERT_TRUE(callback_data.a_address.address->len > 1); aws_mutex_unlock(&mutex); aws_host_address_clean_up(&callback_data.a_address); aws_string_destroy((void *)host_name); aws_host_resolver_release(resolver); aws_event_loop_group_release(el_group); aws_io_library_clean_up(); return 0; } AWS_TEST_CASE(test_default_with_ipv4_only_lookup, s_test_default_with_ipv4_only_lookup_fn) /* there are multiple big assumptions in this test. * The first assumption is that ec2.us-east-1.amazonaws.com will never return an IPv6 address. * The second assumption is that the TTLs for these records are one second and that the backend resolver * resolves at the TTL rate. * The third assumption is that this test runs in less than one second after the first background resolve. * The fourth assumption is that ec2.us-east-1.api.aws does not return multiple addresses per A or AAAA record. * If any of these assumptions ever change, this test will likely be broken, but I don't know of a better way to test * this end-to-end. */ static int s_test_default_with_multiple_lookups_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_io_library_init(allocator); struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); struct aws_host_resolver_default_options resolver_options = { .el_group = el_group, .max_entries = 10, }; struct aws_host_resolver *resolver = aws_host_resolver_new_default(allocator, &resolver_options); const struct aws_string *host_name_1 = aws_string_new_from_c_str(allocator, "ec2.us-east-1.api.aws"); const struct aws_string *host_name_2 = aws_string_new_from_c_str(allocator, "ec2.us-east-1.amazonaws.com"); ASSERT_NOT_NULL(host_name_1); ASSERT_NOT_NULL(host_name_2); struct aws_host_resolution_config config = { .max_ttl = 10, .impl = aws_default_dns_resolve, .impl_data = NULL, }; struct aws_mutex mutex = AWS_MUTEX_INIT; struct default_host_callback_data callback_data = { .condition_variable = AWS_CONDITION_VARIABLE_INIT, .invoked = false, .has_aaaa_address = false, .has_a_address = false, .mutex = &mutex, }; ASSERT_SUCCESS(aws_host_resolver_resolve_host( resolver, host_name_1, s_default_host_resolved_test_callback, &config, &callback_data)); ASSERT_SUCCESS(aws_mutex_lock(&mutex)); aws_condition_variable_wait_pred( &callback_data.condition_variable, &mutex, s_default_host_resolved_predicate, &callback_data); struct aws_host_address host_1_original_ipv6_resolve; aws_host_address_copy(&callback_data.aaaa_address, &host_1_original_ipv6_resolve); aws_host_address_clean_up(&callback_data.aaaa_address); aws_host_address_clean_up(&callback_data.a_address); callback_data.invoked = false; ASSERT_SUCCESS(aws_host_resolver_resolve_host( resolver, host_name_2, s_default_host_resolved_test_callback, &config, &callback_data)); aws_condition_variable_wait_pred( &callback_data.condition_variable, &mutex, s_default_host_resolved_predicate, &callback_data); struct aws_host_address host_2_original_ipv4_resolve; aws_host_address_copy(&callback_data.a_address, &host_2_original_ipv4_resolve); aws_host_address_clean_up(&callback_data.a_address); /* this will invoke in the calling thread since the address is already cached. */ aws_mutex_unlock(&mutex); callback_data.invoked = false; ASSERT_SUCCESS(aws_host_resolver_resolve_host( resolver, host_name_1, s_default_host_resolved_test_callback, &config, &callback_data)); aws_mutex_lock(&mutex); aws_condition_variable_wait_pred( &callback_data.condition_variable, &mutex, s_default_host_resolved_predicate, &callback_data); ASSERT_BIN_ARRAYS_EQUALS( aws_string_bytes(host_1_original_ipv6_resolve.address), host_1_original_ipv6_resolve.address->len, aws_string_bytes(callback_data.aaaa_address.address), callback_data.aaaa_address.address->len); aws_host_address_clean_up(&callback_data.aaaa_address); aws_host_address_clean_up(&callback_data.a_address); /* this will invoke in the calling thread since the address is already cached. */ callback_data.invoked = false; aws_mutex_unlock(&mutex); ASSERT_SUCCESS(aws_host_resolver_resolve_host( resolver, host_name_2, s_default_host_resolved_test_callback, &config, &callback_data)); aws_mutex_lock(&mutex); aws_condition_variable_wait_pred( &callback_data.condition_variable, &mutex, s_default_host_resolved_predicate, &callback_data); ASSERT_BIN_ARRAYS_EQUALS( aws_string_bytes(host_2_original_ipv4_resolve.address), host_2_original_ipv4_resolve.address->len, aws_string_bytes(callback_data.a_address.address), callback_data.a_address.address->len); aws_host_address_clean_up(&callback_data.a_address); aws_mutex_unlock(&mutex); aws_host_address_clean_up(&host_1_original_ipv6_resolve); aws_host_address_clean_up(&host_2_original_ipv4_resolve); aws_string_destroy((void *)host_name_1); aws_string_destroy((void *)host_name_2); aws_host_resolver_release(resolver); aws_event_loop_group_release(el_group); aws_io_library_clean_up(); return 0; } AWS_TEST_CASE(test_default_with_multiple_lookups, s_test_default_with_multiple_lookups_fn) static struct aws_mutex s_time_lock = AWS_MUTEX_INIT; static uint64_t s_current_time = 0; static int s_clock_fn(uint64_t *current_time) { aws_mutex_lock(&s_time_lock); *current_time = s_current_time; aws_mutex_unlock(&s_time_lock); return AWS_OP_SUCCESS; } static void s_set_time(uint64_t current_time) { aws_mutex_lock(&s_time_lock); s_current_time = current_time; aws_mutex_unlock(&s_time_lock); } static int s_test_resolver_ttls_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_io_library_init(allocator); s_set_time(0); struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); struct aws_host_resolver_default_options resolver_options = { .el_group = el_group, .max_entries = 10, .system_clock_override_fn = s_clock_fn}; struct aws_host_resolver *resolver = aws_host_resolver_new_default(allocator, &resolver_options); const struct aws_string *host_name = aws_string_new_from_c_str(allocator, "host_address"); const struct aws_string *addr1_ipv4 = aws_string_new_from_c_str(allocator, "address1ipv4"); const struct aws_string *addr1_ipv6 = aws_string_new_from_c_str(allocator, "address1ipv6"); const struct aws_string *addr2_ipv4 = aws_string_new_from_c_str(allocator, "address2ipv4"); const struct aws_string *addr2_ipv6 = aws_string_new_from_c_str(allocator, "address2ipv6"); struct mock_dns_resolver mock_resolver; ASSERT_SUCCESS(mock_dns_resolver_init(&mock_resolver, 2, allocator)); struct aws_host_resolution_config config = { .max_ttl = 2, .impl = mock_dns_resolve, .impl_data = &mock_resolver, .resolve_frequency_ns = aws_timestamp_convert(500, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL)}; struct aws_host_address host_address_1_ipv4 = { .address = addr1_ipv4, .allocator = allocator, .expiry = 0, .host = aws_string_new_from_c_str(allocator, "host_address"), .connection_failure_count = 0, .record_type = AWS_ADDRESS_RECORD_TYPE_A, .use_count = 0, .weight = 0, }; struct aws_host_address host_address_1_ipv6 = { .address = addr1_ipv6, .allocator = allocator, .expiry = 0, .host = aws_string_new_from_c_str(allocator, "host_address"), .connection_failure_count = 0, .record_type = AWS_ADDRESS_RECORD_TYPE_AAAA, .use_count = 0, .weight = 0, }; struct aws_array_list address_list_1; ASSERT_SUCCESS(aws_array_list_init_dynamic(&address_list_1, allocator, 2, sizeof(struct aws_host_address))); ASSERT_SUCCESS(aws_array_list_push_back(&address_list_1, &host_address_1_ipv6)); ASSERT_SUCCESS(aws_array_list_push_back(&address_list_1, &host_address_1_ipv4)); ASSERT_SUCCESS(mock_dns_resolver_append_address_list(&mock_resolver, &address_list_1)); struct aws_host_address host_address_2_ipv4 = { .address = addr2_ipv4, .allocator = allocator, .expiry = 0, .host = aws_string_new_from_c_str(allocator, "host_address"), .connection_failure_count = 0, .record_type = AWS_ADDRESS_RECORD_TYPE_A, .use_count = 0, .weight = 0, }; struct aws_host_address host_address_2_ipv6 = { .address = addr2_ipv6, .allocator = allocator, .expiry = 0, .host = aws_string_new_from_c_str(allocator, "host_address"), .connection_failure_count = 0, .record_type = AWS_ADDRESS_RECORD_TYPE_AAAA, .use_count = 0, .weight = 0, }; struct aws_array_list address_list_2; ASSERT_SUCCESS(aws_array_list_init_dynamic(&address_list_2, allocator, 2, sizeof(struct aws_host_address))); ASSERT_SUCCESS(aws_array_list_push_back(&address_list_2, &host_address_2_ipv6)); ASSERT_SUCCESS(aws_array_list_push_back(&address_list_2, &host_address_2_ipv4)); ASSERT_SUCCESS(mock_dns_resolver_append_address_list(&mock_resolver, &address_list_2)); struct aws_mutex mutex = AWS_MUTEX_INIT; struct default_host_callback_data callback_data = { .condition_variable = AWS_CONDITION_VARIABLE_INIT, .invoked = false, .has_aaaa_address = false, .has_a_address = false, .mutex = &mutex, }; /* t = 0s */ ASSERT_SUCCESS(aws_host_resolver_resolve_host( resolver, host_name, s_default_host_resolved_test_callback, &config, &callback_data)); ASSERT_SUCCESS(aws_mutex_lock(&mutex)); aws_condition_variable_wait_pred( &callback_data.condition_variable, &mutex, s_default_host_resolved_predicate, &callback_data); ASSERT_INT_EQUALS(0, aws_string_compare(addr1_ipv6, callback_data.aaaa_address.address)); ASSERT_INT_EQUALS(0, aws_string_compare(addr1_ipv4, callback_data.a_address.address)); aws_host_address_clean_up(&callback_data.aaaa_address); aws_host_address_clean_up(&callback_data.a_address); /* bump us up to near expiration time, but not quite, t = 1.5s */ s_set_time(aws_timestamp_convert(1500, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL)); /* over-sleep; several resolves should run. The second address should have an expiry time based on t = 1.5s */ aws_thread_current_sleep(FORCE_RESOLVE_SLEEP_TIME); callback_data.invoked = false; aws_mutex_unlock(&mutex); ASSERT_SUCCESS(aws_host_resolver_resolve_host( resolver, host_name, s_default_host_resolved_test_callback, &config, &callback_data)); aws_mutex_lock(&mutex); aws_condition_variable_wait_pred( &callback_data.condition_variable, &mutex, s_default_host_resolved_predicate, &callback_data); /* * We still get address 1 on the second resolve because address 2 was put as MRU when it was resolved on the * second iteration of the resolver loop. */ ASSERT_INT_EQUALS(0, aws_string_compare(addr1_ipv6, callback_data.aaaa_address.address)); ASSERT_INT_EQUALS(0, aws_string_compare(addr1_ipv4, callback_data.a_address.address)); aws_host_address_clean_up(&callback_data.aaaa_address); aws_host_address_clean_up(&callback_data.a_address); /* instantly requery, we should get address 2 (which was unfortunately dumped in the back of the lru cache on * resolution */ callback_data.invoked = false; aws_mutex_unlock(&mutex); ASSERT_SUCCESS(aws_host_resolver_resolve_host( resolver, host_name, s_default_host_resolved_test_callback, &config, &callback_data)); aws_mutex_lock(&mutex); aws_condition_variable_wait_pred( &callback_data.condition_variable, &mutex, s_default_host_resolved_predicate, &callback_data); ASSERT_INT_EQUALS(0, aws_string_compare(addr2_ipv6, callback_data.aaaa_address.address)); ASSERT_INT_EQUALS(0, aws_string_compare(addr2_ipv4, callback_data.a_address.address)); aws_host_address_clean_up(&callback_data.aaaa_address); aws_host_address_clean_up(&callback_data.a_address); /* bump us past expiration time, t = 2.001 */ s_set_time(aws_timestamp_convert(2001, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL)); /* over-sleep to allow the host resolver thread to run at least one more iteration to cull the expired record */ aws_thread_current_sleep(FORCE_RESOLVE_SLEEP_TIME); /* note that normally, the first address would come back, but the TTL is expired (we set it to two seconds). * As a result, we should get the second one again.*/ callback_data.invoked = false; aws_mutex_unlock(&mutex); ASSERT_SUCCESS(aws_host_resolver_resolve_host( resolver, host_name, s_default_host_resolved_test_callback, &config, &callback_data)); aws_mutex_lock(&mutex); aws_condition_variable_wait_pred( &callback_data.condition_variable, &mutex, s_default_host_resolved_predicate, &callback_data); ASSERT_INT_EQUALS(0, aws_string_compare(addr2_ipv6, callback_data.aaaa_address.address)); ASSERT_INT_EQUALS(0, aws_string_compare(addr2_ipv4, callback_data.a_address.address)); aws_host_address_clean_up(&callback_data.aaaa_address); aws_host_address_clean_up(&callback_data.a_address); /* * t = 4, all addresses should be expired */ s_set_time(aws_timestamp_convert(4, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL)); /* over-sleep so entry two expires. Now everything is expired, but because the last thing we resolved was addr 2, it * should still be there. */ aws_thread_current_sleep(FORCE_RESOLVE_SLEEP_TIME); callback_data.invoked = false; aws_mutex_unlock(&mutex); ASSERT_SUCCESS(aws_host_resolver_resolve_host( resolver, host_name, s_default_host_resolved_test_callback, &config, &callback_data)); aws_mutex_lock(&mutex); aws_condition_variable_wait_pred( &callback_data.condition_variable, &mutex, s_default_host_resolved_predicate, &callback_data); ASSERT_INT_EQUALS(0, aws_string_compare(addr2_ipv6, callback_data.aaaa_address.address)); ASSERT_INT_EQUALS(0, aws_string_compare(addr2_ipv4, callback_data.a_address.address)); aws_host_address_clean_up(&callback_data.aaaa_address); aws_host_address_clean_up(&callback_data.a_address); aws_mutex_unlock(&mutex); aws_host_resolver_release(resolver); aws_string_destroy((void *)host_name); aws_event_loop_group_release(el_group); aws_io_library_clean_up(); mock_dns_resolver_clean_up(&mock_resolver); return 0; } AWS_TEST_CASE(test_resolver_ttls, s_test_resolver_ttls_fn) static int s_test_resolver_connect_failure_recording_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_io_library_init(allocator); struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); struct aws_host_resolver_default_options resolver_options = { .el_group = el_group, .max_entries = 10, }; struct aws_host_resolver *resolver = aws_host_resolver_new_default(allocator, &resolver_options); const struct aws_string *host_name = aws_string_new_from_c_str(allocator, "host_address"); const struct aws_string *addr1_ipv4 = aws_string_new_from_c_str(allocator, "address1ipv4"); const struct aws_string *addr1_ipv6 = aws_string_new_from_c_str(allocator, "address1ipv6"); const struct aws_string *addr2_ipv4 = aws_string_new_from_c_str(allocator, "address2ipv4"); const struct aws_string *addr2_ipv6 = aws_string_new_from_c_str(allocator, "address2ipv6"); struct mock_dns_resolver mock_resolver; ASSERT_SUCCESS(mock_dns_resolver_init(&mock_resolver, 1000, allocator)); struct aws_host_resolution_config config = { .max_ttl = 30, .impl = mock_dns_resolve, .impl_data = &mock_resolver, }; struct aws_host_address host_address_1_ipv4 = { .address = addr1_ipv4, .allocator = allocator, .expiry = 0, .host = aws_string_new_from_c_str(allocator, "host_address"), .connection_failure_count = 0, .record_type = AWS_ADDRESS_RECORD_TYPE_A, .use_count = 0, .weight = 0, }; struct aws_host_address host_address_1_ipv6 = { .address = addr1_ipv6, .allocator = allocator, .expiry = 0, .host = aws_string_new_from_c_str(allocator, "host_address"), .connection_failure_count = 0, .record_type = AWS_ADDRESS_RECORD_TYPE_AAAA, .use_count = 0, .weight = 0, }; struct aws_host_address host_address_2_ipv4 = { .address = addr2_ipv4, .allocator = allocator, .expiry = 0, .host = aws_string_new_from_c_str(allocator, "host_address"), .connection_failure_count = 0, .record_type = AWS_ADDRESS_RECORD_TYPE_A, .use_count = 0, .weight = 0, }; struct aws_host_address host_address_2_ipv6 = { .address = addr2_ipv6, .allocator = allocator, .expiry = 0, .host = aws_string_new_from_c_str(allocator, "host_address"), .connection_failure_count = 0, .record_type = AWS_ADDRESS_RECORD_TYPE_AAAA, .use_count = 0, .weight = 0, }; struct aws_array_list address_list_1; ASSERT_SUCCESS(aws_array_list_init_dynamic(&address_list_1, allocator, 2, sizeof(struct aws_host_address))); ASSERT_SUCCESS(aws_array_list_push_back(&address_list_1, &host_address_1_ipv6)); ASSERT_SUCCESS(aws_array_list_push_back(&address_list_1, &host_address_2_ipv6)); ASSERT_SUCCESS(aws_array_list_push_back(&address_list_1, &host_address_1_ipv4)); ASSERT_SUCCESS(aws_array_list_push_back(&address_list_1, &host_address_2_ipv4)); ASSERT_SUCCESS(mock_dns_resolver_append_address_list(&mock_resolver, &address_list_1)); struct aws_mutex mutex = AWS_MUTEX_INIT; struct default_host_callback_data callback_data = { .condition_variable = AWS_CONDITION_VARIABLE_INIT, .invoked = false, .has_aaaa_address = false, .has_a_address = false, .mutex = &mutex, }; ASSERT_SUCCESS(aws_host_resolver_resolve_host( resolver, host_name, s_default_host_resolved_test_callback, &config, &callback_data)); ASSERT_SUCCESS(aws_mutex_lock(&mutex)); aws_condition_variable_wait_pred( &callback_data.condition_variable, &mutex, s_default_host_resolved_predicate, &callback_data); ASSERT_INT_EQUALS(0, aws_string_compare(addr1_ipv6, callback_data.aaaa_address.address)); ASSERT_INT_EQUALS(0, aws_string_compare(addr1_ipv4, callback_data.a_address.address)); aws_host_address_clean_up(&callback_data.aaaa_address); aws_host_address_clean_up(&callback_data.a_address); callback_data.invoked = false; /* this should still be cached don't need the mutex here. */ aws_mutex_unlock(&mutex); ASSERT_SUCCESS(aws_host_resolver_resolve_host( resolver, host_name, s_default_host_resolved_test_callback, &config, &callback_data)); aws_mutex_lock(&mutex); aws_condition_variable_wait_pred( &callback_data.condition_variable, &mutex, s_default_host_resolved_predicate, &callback_data); ASSERT_INT_EQUALS(0, aws_string_compare(addr2_ipv6, callback_data.aaaa_address.address)); ASSERT_INT_EQUALS(0, aws_string_compare(addr2_ipv4, callback_data.a_address.address)); aws_host_address_clean_up(&callback_data.aaaa_address); aws_host_address_clean_up(&callback_data.a_address); ASSERT_SUCCESS(aws_host_resolver_record_connection_failure(resolver, &host_address_1_ipv6)); ASSERT_SUCCESS(aws_host_resolver_record_connection_failure(resolver, &host_address_1_ipv4)); /* following the LRU policy, address 1 should be what gets returned here, however we marked it as failed, so it * should be skipped and address 2 should be returned. */ aws_mutex_unlock(&mutex); callback_data.invoked = false; ASSERT_SUCCESS(aws_host_resolver_resolve_host( resolver, host_name, s_default_host_resolved_test_callback, &config, &callback_data)); aws_mutex_lock(&mutex); aws_condition_variable_wait_pred( &callback_data.condition_variable, &mutex, s_default_host_resolved_predicate, &callback_data); ASSERT_INT_EQUALS(0, aws_string_compare(addr2_ipv6, callback_data.aaaa_address.address)); ASSERT_INT_EQUALS(0, aws_string_compare(addr2_ipv4, callback_data.a_address.address)); aws_host_address_clean_up(&callback_data.aaaa_address); aws_host_address_clean_up(&callback_data.a_address); ASSERT_SUCCESS(aws_host_resolver_record_connection_failure(resolver, &host_address_2_ipv6)); ASSERT_SUCCESS(aws_host_resolver_record_connection_failure(resolver, &host_address_2_ipv4)); callback_data.invoked = false; aws_mutex_unlock(&mutex); ASSERT_SUCCESS(aws_host_resolver_resolve_host( resolver, host_name, s_default_host_resolved_test_callback, &config, &callback_data)); /* here address 1 should be returned since it is now the least recently used address and all of them have failed.. */ aws_mutex_lock(&mutex); aws_condition_variable_wait_pred( &callback_data.condition_variable, &mutex, s_default_host_resolved_predicate, &callback_data); ASSERT_INT_EQUALS(0, aws_string_compare(addr1_ipv6, callback_data.aaaa_address.address)); ASSERT_INT_EQUALS(0, aws_string_compare(addr1_ipv4, callback_data.a_address.address)); aws_host_address_clean_up(&callback_data.aaaa_address); aws_host_address_clean_up(&callback_data.a_address); /* let it re-resolve, and we should still have the other connections marked as connection failures. */ aws_thread_current_sleep(FORCE_RESOLVE_SLEEP_TIME); callback_data.invoked = false; aws_mutex_unlock(&mutex); ASSERT_SUCCESS(aws_host_resolver_resolve_host( resolver, host_name, s_default_host_resolved_test_callback, &config, &callback_data)); aws_mutex_lock(&mutex); /* here address 1 should still be the one returned because though we re-resolved, we don't trust the dns entries yet * and we kept them as bad addresses. */ aws_condition_variable_wait_pred( &callback_data.condition_variable, &mutex, s_default_host_resolved_predicate, &callback_data); ASSERT_INT_EQUALS(0, aws_string_compare(addr1_ipv6, callback_data.aaaa_address.address)); ASSERT_INT_EQUALS(0, aws_string_compare(addr1_ipv4, callback_data.a_address.address)); aws_host_address_clean_up(&callback_data.aaaa_address); aws_host_address_clean_up(&callback_data.a_address); aws_mutex_unlock(&mutex); aws_host_resolver_release(resolver); aws_string_destroy((void *)host_name); aws_event_loop_group_release(el_group); aws_io_library_clean_up(); mock_dns_resolver_clean_up(&mock_resolver); return 0; } AWS_TEST_CASE(test_resolver_connect_failure_recording, s_test_resolver_connect_failure_recording_fn) static int s_test_resolver_ttl_refreshes_on_resolve_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_io_library_init(allocator); struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); struct aws_host_resolver_default_options resolver_options = { .el_group = el_group, .max_entries = 10, }; struct aws_host_resolver *resolver = aws_host_resolver_new_default(allocator, &resolver_options); const struct aws_string *host_name = aws_string_new_from_c_str(allocator, "host_address"); const struct aws_string *addr1_ipv4 = aws_string_new_from_c_str(allocator, "address1ipv4"); const struct aws_string *addr1_ipv6 = aws_string_new_from_c_str(allocator, "address1ipv6"); const struct aws_string *addr2_ipv4 = aws_string_new_from_c_str(allocator, "address2ipv4"); const struct aws_string *addr2_ipv6 = aws_string_new_from_c_str(allocator, "address2ipv6"); struct mock_dns_resolver mock_resolver; ASSERT_SUCCESS(mock_dns_resolver_init(&mock_resolver, 1000, allocator)); struct aws_host_resolution_config config = { .max_ttl = 30, .impl = mock_dns_resolve, .impl_data = &mock_resolver, }; struct aws_host_address host_address_1_ipv4 = { .address = addr1_ipv4, .allocator = allocator, .expiry = 0, .host = aws_string_new_from_c_str(allocator, "host_address"), .connection_failure_count = 0, .record_type = AWS_ADDRESS_RECORD_TYPE_A, .use_count = 0, .weight = 0, }; struct aws_host_address host_address_1_ipv6 = { .address = addr1_ipv6, .allocator = allocator, .expiry = 0, .host = aws_string_new_from_c_str(allocator, "host_address"), .connection_failure_count = 0, .record_type = AWS_ADDRESS_RECORD_TYPE_AAAA, .use_count = 0, .weight = 0, }; struct aws_host_address host_address_2_ipv4 = { .address = addr2_ipv4, .allocator = allocator, .expiry = 0, .host = aws_string_new_from_c_str(allocator, "host_address"), .connection_failure_count = 0, .record_type = AWS_ADDRESS_RECORD_TYPE_A, .use_count = 0, .weight = 0, }; struct aws_host_address host_address_2_ipv6 = { .address = addr2_ipv6, .allocator = allocator, .expiry = 0, .host = aws_string_new_from_c_str(allocator, "host_address"), .connection_failure_count = 0, .record_type = AWS_ADDRESS_RECORD_TYPE_AAAA, .use_count = 0, .weight = 0, }; struct aws_array_list address_list_1; ASSERT_SUCCESS(aws_array_list_init_dynamic(&address_list_1, allocator, 2, sizeof(struct aws_host_address))); ASSERT_SUCCESS(aws_array_list_push_back(&address_list_1, &host_address_1_ipv6)); ASSERT_SUCCESS(aws_array_list_push_back(&address_list_1, &host_address_2_ipv6)); ASSERT_SUCCESS(aws_array_list_push_back(&address_list_1, &host_address_1_ipv4)); ASSERT_SUCCESS(aws_array_list_push_back(&address_list_1, &host_address_2_ipv4)); ASSERT_SUCCESS(mock_dns_resolver_append_address_list(&mock_resolver, &address_list_1)); struct aws_mutex mutex = AWS_MUTEX_INIT; struct default_host_callback_data callback_data = { .condition_variable = AWS_CONDITION_VARIABLE_INIT, .invoked = false, .has_aaaa_address = false, .has_a_address = false, .mutex = &mutex, }; ASSERT_SUCCESS(aws_host_resolver_resolve_host( resolver, host_name, s_default_host_resolved_test_callback, &config, &callback_data)); ASSERT_SUCCESS(aws_mutex_lock(&mutex)); aws_condition_variable_wait_pred( &callback_data.condition_variable, &mutex, s_default_host_resolved_predicate, &callback_data); ASSERT_INT_EQUALS(0, aws_string_compare(addr1_ipv6, callback_data.aaaa_address.address)); ASSERT_INT_EQUALS(0, aws_string_compare(addr1_ipv4, callback_data.a_address.address)); uint64_t address_1_expiry = callback_data.aaaa_address.expiry; aws_host_address_clean_up(&callback_data.aaaa_address); aws_host_address_clean_up(&callback_data.a_address); callback_data.invoked = false; /* this will resolve in the calling thread, so don't take the lock. */ aws_mutex_unlock(&mutex); ASSERT_SUCCESS(aws_host_resolver_resolve_host( resolver, host_name, s_default_host_resolved_test_callback, &config, &callback_data)); aws_mutex_lock(&mutex); aws_condition_variable_wait_pred( &callback_data.condition_variable, &mutex, s_default_host_resolved_predicate, &callback_data); ASSERT_INT_EQUALS(0, aws_string_compare(addr2_ipv6, callback_data.aaaa_address.address)); ASSERT_INT_EQUALS(0, aws_string_compare(addr2_ipv4, callback_data.a_address.address)); uint64_t address_2_expiry = callback_data.aaaa_address.expiry; aws_host_address_clean_up(&callback_data.aaaa_address); aws_host_address_clean_up(&callback_data.a_address); aws_thread_current_sleep(FORCE_RESOLVE_SLEEP_TIME); /* now we loop back around, we resolved, but the TTLs should not have expired at all (they were actually refreshed). */ callback_data.invoked = false; aws_mutex_unlock(&mutex); ASSERT_SUCCESS(aws_host_resolver_resolve_host( resolver, host_name, s_default_host_resolved_test_callback, &config, &callback_data)); /* here address 1 should be returned since it is now the least recently used address.. */ aws_mutex_lock(&mutex); aws_condition_variable_wait_pred( &callback_data.condition_variable, &mutex, s_default_host_resolved_predicate, &callback_data); ASSERT_INT_EQUALS(0, aws_string_compare(addr1_ipv6, callback_data.aaaa_address.address)); ASSERT_INT_EQUALS(0, aws_string_compare(addr1_ipv4, callback_data.a_address.address)); ASSERT_TRUE(address_1_expiry < callback_data.aaaa_address.expiry); ASSERT_TRUE(address_1_expiry < callback_data.a_address.expiry); aws_host_address_clean_up(&callback_data.aaaa_address); aws_host_address_clean_up(&callback_data.a_address); /* let it re-resolve, we should get addr 2 back, but with a later expiry than before.. */ callback_data.invoked = false; aws_mutex_unlock(&mutex); ASSERT_SUCCESS(aws_host_resolver_resolve_host( resolver, host_name, s_default_host_resolved_test_callback, &config, &callback_data)); aws_mutex_lock(&mutex); /* here address 1 should still be the one returned because though we re-resolved, we don't trust the dns entries yet * and we kept them as bad addresses. */ aws_condition_variable_wait_pred( &callback_data.condition_variable, &mutex, s_default_host_resolved_predicate, &callback_data); ASSERT_INT_EQUALS(0, aws_string_compare(addr2_ipv6, callback_data.aaaa_address.address)); ASSERT_INT_EQUALS(0, aws_string_compare(addr2_ipv4, callback_data.a_address.address)); ASSERT_TRUE(address_2_expiry < callback_data.aaaa_address.expiry); ASSERT_TRUE(address_2_expiry < callback_data.a_address.expiry); aws_host_address_clean_up(&callback_data.aaaa_address); aws_host_address_clean_up(&callback_data.a_address); aws_mutex_unlock(&mutex); aws_host_resolver_release(resolver); aws_string_destroy((void *)host_name); aws_event_loop_group_release(el_group); aws_io_library_clean_up(); mock_dns_resolver_clean_up(&mock_resolver); return 0; } AWS_TEST_CASE(test_resolver_ttl_refreshes_on_resolve, s_test_resolver_ttl_refreshes_on_resolve_fn) static int s_test_resolver_ipv4_address_lookup_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_io_library_init(allocator); struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); struct aws_host_resolver_default_options resolver_options = { .el_group = el_group, .max_entries = 10, }; struct aws_host_resolver *resolver = aws_host_resolver_new_default(allocator, &resolver_options); const struct aws_string *host_name = aws_string_new_from_c_str(allocator, "127.0.0.1"); ASSERT_NOT_NULL(host_name); struct aws_host_resolution_config config = { .max_ttl = 10, .impl = aws_default_dns_resolve, .impl_data = NULL, }; struct aws_mutex mutex = AWS_MUTEX_INIT; struct default_host_callback_data callback_data = { .condition_variable = AWS_CONDITION_VARIABLE_INIT, .invoked = false, .has_aaaa_address = false, .has_a_address = false, .mutex = &mutex, }; ASSERT_SUCCESS(aws_host_resolver_resolve_host( resolver, host_name, s_default_host_resolved_test_callback, &config, &callback_data)); ASSERT_SUCCESS(aws_mutex_lock(&mutex)); aws_condition_variable_wait_pred( &callback_data.condition_variable, &mutex, s_default_host_resolved_predicate, &callback_data); callback_data.invoked = false; ASSERT_TRUE(callback_data.has_a_address); ASSERT_INT_EQUALS(AWS_ADDRESS_RECORD_TYPE_A, callback_data.a_address.record_type); ASSERT_BIN_ARRAYS_EQUALS( aws_string_bytes(host_name), host_name->len, aws_string_bytes(callback_data.a_address.host), callback_data.a_address.host->len); ASSERT_TRUE(callback_data.a_address.address->len > 1); ASSERT_FALSE(callback_data.has_aaaa_address); aws_host_address_clean_up(&callback_data.a_address); aws_mutex_unlock(&mutex); aws_string_destroy((void *)host_name); aws_host_resolver_release(resolver); aws_event_loop_group_release(el_group); aws_io_library_clean_up(); return 0; } AWS_TEST_CASE(test_resolver_ipv4_address_lookup, s_test_resolver_ipv4_address_lookup_fn) static int s_test_resolver_purge_host_cache(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_io_library_init(allocator); struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); struct aws_host_resolver_default_options resolver_options = { .el_group = el_group, .max_entries = 10, }; struct aws_host_resolver *resolver = aws_host_resolver_new_default(allocator, &resolver_options); const struct aws_string *host_name = aws_string_new_from_c_str(allocator, "127.0.0.1"); ASSERT_NOT_NULL(host_name); struct aws_host_resolution_config config = { .max_ttl = 10, .impl = aws_default_dns_resolve, .impl_data = NULL, }; struct aws_mutex mutex = AWS_MUTEX_INIT; struct default_host_callback_data callback_data = { .condition_variable = AWS_CONDITION_VARIABLE_INIT, .invoked = false, .has_aaaa_address = false, .has_a_address = false, .mutex = &mutex, }; ASSERT_SUCCESS(aws_host_resolver_resolve_host( resolver, host_name, s_default_host_resolved_test_callback, &config, &callback_data)); ASSERT_SUCCESS(aws_mutex_lock(&mutex)); aws_condition_variable_wait_pred( &callback_data.condition_variable, &mutex, s_default_host_resolved_predicate, &callback_data); callback_data.invoked = false; ASSERT_TRUE(callback_data.has_a_address); ASSERT_INT_EQUALS(AWS_ADDRESS_RECORD_TYPE_A, callback_data.a_address.record_type); ASSERT_BIN_ARRAYS_EQUALS( aws_string_bytes(host_name), host_name->len, aws_string_bytes(callback_data.a_address.host), callback_data.a_address.host->len); ASSERT_TRUE(callback_data.a_address.address->len > 1); ASSERT_FALSE(callback_data.has_aaaa_address); aws_host_address_clean_up(&callback_data.a_address); aws_mutex_unlock(&mutex); size_t address_count = aws_host_resolver_get_host_address_count( resolver, host_name, AWS_GET_HOST_ADDRESS_COUNT_RECORD_TYPE_A | AWS_GET_HOST_ADDRESS_COUNT_RECORD_TYPE_AAAA); ASSERT_INT_EQUALS(address_count, 1); /* purge the host */ struct aws_host_resolver_purge_host_options purge_host_options = { .host = host_name, .on_host_purge_complete_callback = s_default_host_purge_callback, .user_data = &callback_data, }; ASSERT_SUCCESS(aws_host_resolver_purge_host_cache(resolver, &purge_host_options)); ASSERT_SUCCESS(aws_mutex_lock(&mutex)); aws_condition_variable_wait_pred( &callback_data.condition_variable, &mutex, s_default_host_resolved_predicate, &callback_data); callback_data.invoked = false; aws_mutex_unlock(&mutex); address_count = aws_host_resolver_get_host_address_count( resolver, host_name, AWS_GET_HOST_ADDRESS_COUNT_RECORD_TYPE_A | AWS_GET_HOST_ADDRESS_COUNT_RECORD_TYPE_AAAA); /* If the host is really gone, we shouldn't have any addresses. */ ASSERT_INT_EQUALS(address_count, 0); /* try purging it again */ ASSERT_SUCCESS(aws_host_resolver_purge_host_cache(resolver, &purge_host_options)); ASSERT_SUCCESS(aws_mutex_lock(&mutex)); aws_condition_variable_wait_pred( &callback_data.condition_variable, &mutex, s_default_host_resolved_predicate, &callback_data); callback_data.invoked = false; aws_mutex_unlock(&mutex); /* try adding the host again */ ASSERT_SUCCESS(aws_host_resolver_resolve_host( resolver, host_name, s_default_host_resolved_test_callback, &config, &callback_data)); ASSERT_SUCCESS(aws_mutex_lock(&mutex)); aws_condition_variable_wait_pred( &callback_data.condition_variable, &mutex, s_default_host_resolved_predicate, &callback_data); ASSERT_TRUE(callback_data.has_a_address); ASSERT_INT_EQUALS(AWS_ADDRESS_RECORD_TYPE_A, callback_data.a_address.record_type); ASSERT_BIN_ARRAYS_EQUALS( aws_string_bytes(host_name), host_name->len, aws_string_bytes(callback_data.a_address.host), callback_data.a_address.host->len); ASSERT_TRUE(callback_data.a_address.address->len > 1); ASSERT_FALSE(callback_data.has_aaaa_address); aws_host_address_clean_up(&callback_data.a_address); aws_mutex_unlock(&mutex); address_count = aws_host_resolver_get_host_address_count( resolver, host_name, AWS_GET_HOST_ADDRESS_COUNT_RECORD_TYPE_A | AWS_GET_HOST_ADDRESS_COUNT_RECORD_TYPE_AAAA); ASSERT_INT_EQUALS(address_count, 1); aws_string_destroy((void *)host_name); aws_host_resolver_release(resolver); aws_event_loop_group_release(el_group); aws_io_library_clean_up(); return 0; } AWS_TEST_CASE(test_resolver_purge_host_cache, s_test_resolver_purge_host_cache) static int s_test_resolver_purge_cache(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_io_library_init(allocator); struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); struct aws_host_resolver_default_options resolver_options = { .el_group = el_group, .max_entries = 10, }; struct aws_host_resolver *resolver = aws_host_resolver_new_default(allocator, &resolver_options); const struct aws_string *host_name = aws_string_new_from_c_str(allocator, "127.0.0.1"); ASSERT_NOT_NULL(host_name); const struct aws_string *host_name_2 = aws_string_new_from_c_str(allocator, "127.0.0.2"); ASSERT_NOT_NULL(host_name_2); struct aws_host_resolution_config config = { .max_ttl = 10, .impl = aws_default_dns_resolve, .impl_data = NULL, }; struct aws_mutex mutex = AWS_MUTEX_INIT; struct default_host_callback_data callback_data = { .condition_variable = AWS_CONDITION_VARIABLE_INIT, .invoked = false, .has_aaaa_address = false, .has_a_address = false, .mutex = &mutex, }; /* resolve first host */ ASSERT_SUCCESS(aws_host_resolver_resolve_host( resolver, host_name, s_default_host_resolved_test_callback, &config, &callback_data)); ASSERT_SUCCESS(aws_mutex_lock(&mutex)); aws_condition_variable_wait_pred( &callback_data.condition_variable, &mutex, s_default_host_resolved_predicate, &callback_data); callback_data.invoked = false; ASSERT_TRUE(callback_data.has_a_address); ASSERT_INT_EQUALS(AWS_ADDRESS_RECORD_TYPE_A, callback_data.a_address.record_type); ASSERT_BIN_ARRAYS_EQUALS( aws_string_bytes(host_name), host_name->len, aws_string_bytes(callback_data.a_address.host), callback_data.a_address.host->len); ASSERT_TRUE(callback_data.a_address.address->len > 1); ASSERT_FALSE(callback_data.has_aaaa_address); aws_host_address_clean_up(&callback_data.a_address); aws_mutex_unlock(&mutex); /* resolve second host */ ASSERT_SUCCESS(aws_host_resolver_resolve_host( resolver, host_name_2, s_default_host_resolved_test_callback, &config, &callback_data)); ASSERT_SUCCESS(aws_mutex_lock(&mutex)); aws_condition_variable_wait_pred( &callback_data.condition_variable, &mutex, s_default_host_resolved_predicate, &callback_data); callback_data.invoked = false; ASSERT_TRUE(callback_data.has_a_address); ASSERT_INT_EQUALS(AWS_ADDRESS_RECORD_TYPE_A, callback_data.a_address.record_type); ASSERT_BIN_ARRAYS_EQUALS( aws_string_bytes(host_name_2), host_name->len, aws_string_bytes(callback_data.a_address.host), callback_data.a_address.host->len); ASSERT_TRUE(callback_data.a_address.address->len > 1); ASSERT_FALSE(callback_data.has_aaaa_address); aws_host_address_clean_up(&callback_data.a_address); aws_mutex_unlock(&mutex); size_t address_count = aws_host_resolver_get_host_address_count( resolver, host_name, AWS_GET_HOST_ADDRESS_COUNT_RECORD_TYPE_A | AWS_GET_HOST_ADDRESS_COUNT_RECORD_TYPE_AAAA); ASSERT_INT_EQUALS(address_count, 1); address_count = aws_host_resolver_get_host_address_count( resolver, host_name_2, AWS_GET_HOST_ADDRESS_COUNT_RECORD_TYPE_A | AWS_GET_HOST_ADDRESS_COUNT_RECORD_TYPE_AAAA); ASSERT_INT_EQUALS(address_count, 1); ASSERT_SUCCESS( aws_host_resolver_purge_cache_with_callback(resolver, s_default_host_purge_callback, &callback_data)); ASSERT_SUCCESS(aws_mutex_lock(&mutex)); aws_condition_variable_wait_pred( &callback_data.condition_variable, &mutex, s_default_host_resolved_predicate, &callback_data); callback_data.invoked = false; aws_mutex_unlock(&mutex); address_count = aws_host_resolver_get_host_address_count( resolver, host_name, AWS_GET_HOST_ADDRESS_COUNT_RECORD_TYPE_A | AWS_GET_HOST_ADDRESS_COUNT_RECORD_TYPE_AAAA); /* If the host is really gone, we shouldn't have any addresses. */ ASSERT_INT_EQUALS(address_count, 0); address_count = aws_host_resolver_get_host_address_count( resolver, host_name_2, AWS_GET_HOST_ADDRESS_COUNT_RECORD_TYPE_A | AWS_GET_HOST_ADDRESS_COUNT_RECORD_TYPE_AAAA); /* If the host is really gone, we shouldn't have any addresses. */ ASSERT_INT_EQUALS(address_count, 0); /* try purging it again */ ASSERT_SUCCESS( aws_host_resolver_purge_cache_with_callback(resolver, s_default_host_purge_callback, &callback_data)); ASSERT_SUCCESS(aws_mutex_lock(&mutex)); aws_condition_variable_wait_pred( &callback_data.condition_variable, &mutex, s_default_host_resolved_predicate, &callback_data); callback_data.invoked = false; aws_mutex_unlock(&mutex); /* try adding the host again */ ASSERT_SUCCESS(aws_host_resolver_resolve_host( resolver, host_name, s_default_host_resolved_test_callback, &config, &callback_data)); ASSERT_SUCCESS(aws_mutex_lock(&mutex)); aws_condition_variable_wait_pred( &callback_data.condition_variable, &mutex, s_default_host_resolved_predicate, &callback_data); ASSERT_TRUE(callback_data.has_a_address); ASSERT_INT_EQUALS(AWS_ADDRESS_RECORD_TYPE_A, callback_data.a_address.record_type); ASSERT_BIN_ARRAYS_EQUALS( aws_string_bytes(host_name), host_name->len, aws_string_bytes(callback_data.a_address.host), callback_data.a_address.host->len); ASSERT_TRUE(callback_data.a_address.address->len > 1); ASSERT_FALSE(callback_data.has_aaaa_address); aws_host_address_clean_up(&callback_data.a_address); aws_mutex_unlock(&mutex); address_count = aws_host_resolver_get_host_address_count( resolver, host_name, AWS_GET_HOST_ADDRESS_COUNT_RECORD_TYPE_A | AWS_GET_HOST_ADDRESS_COUNT_RECORD_TYPE_AAAA); ASSERT_INT_EQUALS(address_count, 1); aws_string_destroy((void *)host_name); aws_string_destroy((void *)host_name_2); aws_host_resolver_release(resolver); aws_event_loop_group_release(el_group); aws_io_library_clean_up(); return 0; } AWS_TEST_CASE(test_resolver_purge_cache, s_test_resolver_purge_cache) static int s_test_resolver_ipv6_address_lookup_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_io_library_init(allocator); struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); struct aws_host_resolver_default_options resolver_options = { .el_group = el_group, .max_entries = 10, }; struct aws_host_resolver *resolver = aws_host_resolver_new_default(allocator, &resolver_options); const struct aws_string *host_name = aws_string_new_from_c_str(allocator, "::1"); ASSERT_NOT_NULL(host_name); struct aws_host_resolution_config config = { .max_ttl = 10, .impl = aws_default_dns_resolve, .impl_data = NULL, }; struct aws_mutex mutex = AWS_MUTEX_INIT; struct default_host_callback_data callback_data = { .condition_variable = AWS_CONDITION_VARIABLE_INIT, .invoked = false, .has_aaaa_address = false, .has_a_address = false, .mutex = &mutex, }; ASSERT_SUCCESS(aws_host_resolver_resolve_host( resolver, host_name, s_default_host_resolved_test_callback, &config, &callback_data)); ASSERT_SUCCESS(aws_mutex_lock(&mutex)); aws_condition_variable_wait_pred( &callback_data.condition_variable, &mutex, s_default_host_resolved_predicate, &callback_data); callback_data.invoked = false; ASSERT_FALSE(callback_data.has_a_address); ASSERT_TRUE(callback_data.has_aaaa_address); ASSERT_INT_EQUALS(AWS_ADDRESS_RECORD_TYPE_AAAA, callback_data.aaaa_address.record_type); ASSERT_BIN_ARRAYS_EQUALS( aws_string_bytes(host_name), host_name->len, aws_string_bytes(callback_data.aaaa_address.host), callback_data.aaaa_address.host->len); ASSERT_TRUE(callback_data.aaaa_address.address->len > 1); aws_host_address_clean_up(&callback_data.aaaa_address); aws_mutex_unlock(&mutex); aws_string_destroy((void *)host_name); aws_host_resolver_release(resolver); aws_event_loop_group_release(el_group); aws_io_library_clean_up(); return 0; } AWS_TEST_CASE(test_resolver_ipv6_address_lookup, s_test_resolver_ipv6_address_lookup_fn) static int s_test_resolver_low_frequency_starvation_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_io_library_init(allocator); struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); struct aws_host_resolver_default_options resolver_options = { .el_group = el_group, .max_entries = 10, }; struct aws_host_resolver *resolver = aws_host_resolver_new_default(allocator, &resolver_options); const struct aws_string *host_name = aws_string_new_from_c_str(allocator, "host_address"); const struct aws_string *addr1_ipv4 = aws_string_new_from_c_str(allocator, "address1ipv4"); struct mock_dns_resolver mock_resolver; ASSERT_SUCCESS(mock_dns_resolver_init(&mock_resolver, 1000, allocator)); struct aws_host_resolution_config config = { .max_ttl = 30, .impl = mock_dns_resolve, .impl_data = &mock_resolver, .resolve_frequency_ns = aws_timestamp_convert(120, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL), }; struct aws_host_address host_address_1_ipv4 = { .address = addr1_ipv4, .allocator = allocator, .expiry = 0, .host = aws_string_new_from_c_str(allocator, "host_address"), .connection_failure_count = 0, .record_type = AWS_ADDRESS_RECORD_TYPE_A, .use_count = 0, .weight = 0, }; struct aws_array_list address_list_1; ASSERT_SUCCESS(aws_array_list_init_dynamic(&address_list_1, allocator, 2, sizeof(struct aws_host_address))); ASSERT_SUCCESS(aws_array_list_push_back(&address_list_1, &host_address_1_ipv4)); ASSERT_SUCCESS(mock_dns_resolver_append_address_list(&mock_resolver, &address_list_1)); struct aws_mutex mutex = AWS_MUTEX_INIT; struct default_host_callback_data callback_data = { .condition_variable = AWS_CONDITION_VARIABLE_INIT, .invoked = false, .has_aaaa_address = false, .has_a_address = false, .mutex = &mutex, }; ASSERT_SUCCESS(aws_host_resolver_resolve_host( resolver, host_name, s_default_host_resolved_test_callback, &config, &callback_data)); ASSERT_SUCCESS(aws_mutex_lock(&mutex)); aws_condition_variable_wait_pred( &callback_data.condition_variable, &mutex, s_default_host_resolved_predicate, &callback_data); ASSERT_INT_EQUALS(0, aws_string_compare(addr1_ipv4, callback_data.a_address.address)); aws_host_address_clean_up(&callback_data.a_address); callback_data.invoked = false; aws_mutex_unlock(&mutex); uint64_t starvation_start = 0; aws_high_res_clock_get_ticks(&starvation_start); ASSERT_SUCCESS(aws_host_resolver_record_connection_failure(resolver, &host_address_1_ipv4)); ASSERT_SUCCESS(aws_host_resolver_resolve_host( resolver, host_name, s_default_host_resolved_test_callback, &config, &callback_data)); aws_mutex_lock(&mutex); aws_condition_variable_wait_pred( &callback_data.condition_variable, &mutex, s_default_host_resolved_predicate, &callback_data); uint64_t starvation_end = 0; aws_high_res_clock_get_ticks(&starvation_end); uint64_t starvation_ms = aws_timestamp_convert(starvation_end - starvation_start, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_MILLIS, NULL); /* * verify that the time it took to get a resolution was non-trivial (in this case we check half the minimum * between-resolve wait time) and also not huge (resolve frequency is two minutes after all) */ ASSERT_TRUE(starvation_ms > 50); ASSERT_TRUE(starvation_ms < 1000); ASSERT_INT_EQUALS(0, aws_string_compare(addr1_ipv4, callback_data.a_address.address)); aws_host_address_clean_up(&callback_data.a_address); aws_mutex_unlock(&mutex); aws_host_resolver_release(resolver); aws_string_destroy((void *)host_name); aws_event_loop_group_release(el_group); aws_io_library_clean_up(); mock_dns_resolver_clean_up(&mock_resolver); return 0; } AWS_TEST_CASE(test_resolver_low_frequency_starvation, s_test_resolver_low_frequency_starvation_fn) aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/event_loop_test.c000066400000000000000000001172371456575232400243710ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include struct task_args { bool invoked; bool was_in_thread; aws_thread_id_t thread_id; struct aws_event_loop *loop; struct aws_event_loop_group *el_group; enum aws_task_status status; struct aws_mutex mutex; struct aws_condition_variable condition_variable; struct aws_atomic_var thread_complete; }; static void s_test_task(struct aws_task *task, void *user_data, enum aws_task_status status) { (void)task; struct task_args *args = user_data; aws_mutex_lock(&args->mutex); args->thread_id = aws_thread_current_thread_id(); args->invoked = true; args->status = status; args->was_in_thread = aws_event_loop_thread_is_callers_thread(args->loop); aws_mutex_unlock((&args->mutex)); aws_condition_variable_notify_one(&args->condition_variable); } static bool s_task_ran_predicate(void *args) { struct task_args *task_args = args; return task_args->invoked; } /* * Test that a scheduled task from a non-event loop owned thread executes. */ static int s_test_event_loop_xthread_scheduled_tasks_execute(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_event_loop *event_loop = aws_event_loop_new_default(allocator, aws_high_res_clock_get_ticks); ASSERT_NOT_NULL(event_loop, "Event loop creation failed with error: %s", aws_error_debug_str(aws_last_error())); ASSERT_SUCCESS(aws_event_loop_run(event_loop)); struct task_args task_args = { .condition_variable = AWS_CONDITION_VARIABLE_INIT, .mutex = AWS_MUTEX_INIT, .invoked = false, .was_in_thread = false, .status = -1, .loop = event_loop, .thread_id = 0, }; struct aws_task task; aws_task_init(&task, s_test_task, &task_args, "xthread_scheduled_tasks_execute"); /* Test "future" tasks */ ASSERT_SUCCESS(aws_mutex_lock(&task_args.mutex)); uint64_t now; ASSERT_SUCCESS(aws_event_loop_current_clock_time(event_loop, &now)); aws_event_loop_schedule_task_future(event_loop, &task, now); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &task_args.condition_variable, &task_args.mutex, s_task_ran_predicate, &task_args)); ASSERT_TRUE(task_args.invoked); aws_mutex_unlock(&task_args.mutex); ASSERT_FALSE(aws_thread_thread_id_equal(task_args.thread_id, aws_thread_current_thread_id())); /* Test "now" tasks */ task_args.invoked = false; ASSERT_SUCCESS(aws_mutex_lock(&task_args.mutex)); aws_event_loop_schedule_task_now(event_loop, &task); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &task_args.condition_variable, &task_args.mutex, s_task_ran_predicate, &task_args)); ASSERT_TRUE(task_args.invoked); aws_mutex_unlock(&task_args.mutex); aws_event_loop_destroy(event_loop); return AWS_OP_SUCCESS; } AWS_TEST_CASE(event_loop_xthread_scheduled_tasks_execute, s_test_event_loop_xthread_scheduled_tasks_execute) static bool s_test_cancel_thread_task_predicate(void *args) { struct task_args *task_args = args; return task_args->invoked; } /* * Test that a scheduled task from a non-event loop owned thread executes. */ static int s_test_event_loop_canceled_tasks_run_in_el_thread(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_event_loop *event_loop = aws_event_loop_new_default(allocator, aws_high_res_clock_get_ticks); ASSERT_NOT_NULL(event_loop, "Event loop creation failed with error: %s", aws_error_debug_str(aws_last_error())); ASSERT_SUCCESS(aws_event_loop_run(event_loop)); struct task_args task1_args = { .condition_variable = AWS_CONDITION_VARIABLE_INIT, .mutex = AWS_MUTEX_INIT, .invoked = false, .was_in_thread = false, .status = -1, .loop = event_loop, .thread_id = 0, }; struct task_args task2_args = { .condition_variable = AWS_CONDITION_VARIABLE_INIT, .mutex = AWS_MUTEX_INIT, .invoked = false, .was_in_thread = false, .status = -1, .loop = event_loop, .thread_id = 0, }; struct aws_task task1; aws_task_init(&task1, s_test_task, &task1_args, "canceled_tasks_run_in_el_thread1"); struct aws_task task2; aws_task_init(&task2, s_test_task, &task2_args, "canceled_tasks_run_in_el_thread2"); aws_event_loop_schedule_task_now(event_loop, &task1); uint64_t now; ASSERT_SUCCESS(aws_event_loop_current_clock_time(event_loop, &now)); aws_event_loop_schedule_task_future(event_loop, &task2, now + 10000000000); ASSERT_FALSE(aws_event_loop_thread_is_callers_thread(event_loop)); ASSERT_SUCCESS(aws_mutex_lock(&task1_args.mutex)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &task1_args.condition_variable, &task1_args.mutex, s_task_ran_predicate, &task1_args)); ASSERT_TRUE(task1_args.invoked); ASSERT_TRUE(task1_args.was_in_thread); ASSERT_FALSE(aws_thread_thread_id_equal(task1_args.thread_id, aws_thread_current_thread_id())); ASSERT_INT_EQUALS(AWS_TASK_STATUS_RUN_READY, task1_args.status); aws_mutex_unlock(&task1_args.mutex); aws_event_loop_destroy(event_loop); aws_mutex_lock(&task2_args.mutex); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &task2_args.condition_variable, &task2_args.mutex, s_test_cancel_thread_task_predicate, &task2_args)); ASSERT_TRUE(task2_args.invoked); aws_mutex_unlock(&task2_args.mutex); ASSERT_TRUE(task2_args.was_in_thread); ASSERT_TRUE(aws_thread_thread_id_equal(task2_args.thread_id, aws_thread_current_thread_id())); ASSERT_INT_EQUALS(AWS_TASK_STATUS_CANCELED, task2_args.status); return AWS_OP_SUCCESS; } AWS_TEST_CASE(event_loop_canceled_tasks_run_in_el_thread, s_test_event_loop_canceled_tasks_run_in_el_thread) #if AWS_USE_IO_COMPLETION_PORTS int aws_pipe_get_unique_name(char *dst, size_t dst_size); /* Open read/write handles to a pipe with support for async (overlapped) read and write */ static int s_async_pipe_init(struct aws_io_handle *read_handle, struct aws_io_handle *write_handle) { char pipe_name[256]; ASSERT_SUCCESS(aws_pipe_get_unique_name(pipe_name, sizeof(pipe_name))); write_handle->data.handle = CreateNamedPipeA( pipe_name, /* lpName */ PIPE_ACCESS_OUTBOUND | FILE_FLAG_OVERLAPPED | FILE_FLAG_FIRST_PIPE_INSTANCE, /* dwOpenMode */ PIPE_TYPE_BYTE | PIPE_WAIT | PIPE_REJECT_REMOTE_CLIENTS, /* dwPipeMode */ 1, /* nMaxInstances */ 2048, /* nOutBufferSize */ 2048, /* nInBufferSize */ 0, /* nDefaultTimeOut */ NULL); /* lpSecurityAttributes */ ASSERT_TRUE(write_handle->data.handle != INVALID_HANDLE_VALUE); read_handle->data.handle = CreateFileA( pipe_name, /* lpFileName */ GENERIC_READ, /* dwDesiredAccess */ 0, /* dwShareMode */ NULL, /* lpSecurityAttributes */ OPEN_EXISTING, /* dwCreationDisposition */ FILE_ATTRIBUTE_NORMAL | FILE_FLAG_OVERLAPPED, /* dwFlagsAndAttributes */ NULL); /* hTemplateFile */ ASSERT_TRUE(read_handle->data.handle != INVALID_HANDLE_VALUE); return AWS_OP_SUCCESS; } static void s_async_pipe_clean_up(struct aws_io_handle *read_handle, struct aws_io_handle *write_handle) { CloseHandle(read_handle->data.handle); CloseHandle(write_handle->data.handle); } struct overlapped_completion_data { struct aws_mutex mutex; struct aws_condition_variable condition_variable; bool signaled; struct aws_event_loop *event_loop; struct aws_overlapped *overlapped; int status_code; size_t num_bytes_transferred; }; static int s_overlapped_completion_data_init(struct overlapped_completion_data *data) { AWS_ZERO_STRUCT(*data); ASSERT_SUCCESS(aws_mutex_init(&data->mutex)); ASSERT_SUCCESS(aws_condition_variable_init(&data->condition_variable)); return AWS_OP_SUCCESS; } static void s_overlapped_completion_data_clean_up(struct overlapped_completion_data *data) { aws_condition_variable_clean_up(&data->condition_variable); aws_mutex_clean_up(&data->mutex); } static void s_on_overlapped_operation_complete( struct aws_event_loop *event_loop, struct aws_overlapped *overlapped, int status_code, size_t num_bytes_transferred) { struct overlapped_completion_data *data = overlapped->user_data; aws_mutex_lock(&data->mutex); data->event_loop = event_loop; data->overlapped = overlapped; data->status_code = status_code; data->num_bytes_transferred = num_bytes_transferred; data->signaled = true; aws_condition_variable_notify_one(&data->condition_variable); aws_mutex_unlock(&data->mutex); } static bool s_overlapped_completion_predicate(void *args) { struct overlapped_completion_data *data = args; return data->signaled; } static int s_test_event_loop_completion_events(struct aws_allocator *allocator, void *ctx) { (void)ctx; /* Start event-loop */ struct aws_event_loop *event_loop = aws_event_loop_new_default(allocator, aws_high_res_clock_get_ticks); ASSERT_NOT_NULL(event_loop); ASSERT_SUCCESS(aws_event_loop_run(event_loop)); /* Open a pipe */ struct aws_io_handle read_handle; struct aws_io_handle write_handle; ASSERT_SUCCESS(s_async_pipe_init(&read_handle, &write_handle)); /* Connect to event-loop */ ASSERT_SUCCESS(aws_event_loop_connect_handle_to_io_completion_port(event_loop, &write_handle)); /* Set up an async (overlapped) write that will result in s_on_overlapped_operation_complete() getting run * and filling out `completion_data` */ struct overlapped_completion_data completion_data; s_overlapped_completion_data_init(&completion_data); struct aws_overlapped overlapped; aws_overlapped_init(&overlapped, s_on_overlapped_operation_complete, &completion_data); /* Do async write */ const char msg[] = "Cherry Pie"; bool write_success = WriteFile(write_handle.data.handle, msg, sizeof(msg), NULL, aws_overlapped_to_windows_overlapped(&overlapped)); ASSERT_TRUE(write_success || GetLastError() == ERROR_IO_PENDING); /* Wait for completion callbacks */ ASSERT_SUCCESS(aws_mutex_lock(&completion_data.mutex)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &completion_data.condition_variable, &completion_data.mutex, s_overlapped_completion_predicate, &completion_data)); ASSERT_SUCCESS(aws_mutex_unlock(&completion_data.mutex)); /* Assert that the aws_event_loop_on_completion_fn passed the appropriate args */ ASSERT_PTR_EQUALS(event_loop, completion_data.event_loop); ASSERT_PTR_EQUALS(&overlapped, completion_data.overlapped); ASSERT_INT_EQUALS(0, completion_data.status_code); /* Check status code for I/O operation */ ASSERT_INT_EQUALS(sizeof(msg), completion_data.num_bytes_transferred); /* Shut it all down */ s_overlapped_completion_data_clean_up(&completion_data); s_async_pipe_clean_up(&read_handle, &write_handle); aws_event_loop_destroy(event_loop); return AWS_OP_SUCCESS; } AWS_TEST_CASE(event_loop_completion_events, s_test_event_loop_completion_events) #else /* !AWS_USE_IO_COMPLETION_PORTS */ # include int aws_open_nonblocking_posix_pipe(int pipe_fds[2]); /* Define simple pipe for testing. */ int simple_pipe_open(struct aws_io_handle *read_handle, struct aws_io_handle *write_handle) { AWS_ZERO_STRUCT(*read_handle); AWS_ZERO_STRUCT(*write_handle); int pipe_fds[2]; ASSERT_SUCCESS(aws_open_nonblocking_posix_pipe(pipe_fds)); read_handle->data.fd = pipe_fds[0]; write_handle->data.fd = pipe_fds[1]; return AWS_OP_SUCCESS; } void simple_pipe_close(struct aws_io_handle *read_handle, struct aws_io_handle *write_handle) { close(read_handle->data.fd); close(write_handle->data.fd); } /* return number of bytes written */ size_t simple_pipe_write(struct aws_io_handle *handle, const uint8_t *src, size_t src_size) { ssize_t write_val = write(handle->data.fd, src, src_size); return (write_val < 0) ? 0 : write_val; } /* return number of bytes read */ size_t simple_pipe_read(struct aws_io_handle *handle, uint8_t *dst, size_t dst_size) { ssize_t read_val = read(handle->data.fd, dst, dst_size); return (read_val < 0) ? 0 : read_val; } struct unsubrace_data { struct aws_event_loop *event_loop; struct aws_io_handle read_handle[2]; struct aws_io_handle write_handle[2]; bool is_writable[2]; bool wrote_to_both_pipes; bool is_unsubscribed; struct aws_task task; struct aws_mutex mutex; struct aws_condition_variable condition_variable; bool done; int result_code; }; void s_unsubrace_error(struct unsubrace_data *data) { aws_mutex_lock(&data->mutex); data->result_code = -1; data->done = true; aws_condition_variable_notify_one(&data->condition_variable); aws_mutex_unlock(&data->mutex); } void s_unsubrace_done(struct unsubrace_data *data) { aws_mutex_lock(&data->mutex); data->done = true; aws_condition_variable_notify_one(&data->condition_variable); aws_mutex_unlock(&data->mutex); } /* Wait until both pipes are writable, then write data to both of them. * This make it likely that both pipes receive events in the same iteration of the event-loop. */ void s_unsubrace_on_writable_event( struct aws_event_loop *event_loop, struct aws_io_handle *handle, int events, void *user_data) { (void)event_loop; struct unsubrace_data *data = user_data; /* There should be no events after unsubscribe */ if (data->is_unsubscribed) { s_unsubrace_error(data); return; } if (!(events & AWS_IO_EVENT_TYPE_WRITABLE)) { return; } if (data->wrote_to_both_pipes) { return; } bool all_writable = true; for (int i = 0; i < 2; ++i) { if (&data->write_handle[i] == handle) { data->is_writable[i] = true; } if (!data->is_writable[i]) { all_writable = false; } } if (!all_writable) { return; } for (int i = 0; i < 2; ++i) { uint8_t buffer[] = "abc"; size_t bytes_written = simple_pipe_write(&data->write_handle[i], buffer, 3); if (bytes_written == 0) { s_unsubrace_error(data); return; } } data->wrote_to_both_pipes = true; } void s_unsubrace_done_task(struct aws_task *task, void *arg, enum aws_task_status status) { (void)task; struct unsubrace_data *data = arg; if (status != AWS_TASK_STATUS_RUN_READY) { s_unsubrace_error(data); return; } s_unsubrace_done(data); } /* Both pipes should have a readable event on the way. * The first pipe to get the event closes both pipes. * Since both pipes are unsubscribed, the second readable event shouldn't be delivered */ void s_unsubrace_on_readable_event( struct aws_event_loop *event_loop, struct aws_io_handle *handle, int events, void *user_data) { (void)handle; struct unsubrace_data *data = user_data; int err; if (data->is_unsubscribed) { s_unsubrace_error(data); return; } if (!(events & AWS_IO_EVENT_TYPE_READABLE)) { return; } for (int i = 0; i < 2; ++i) { err = aws_event_loop_unsubscribe_from_io_events(event_loop, &data->read_handle[i]); if (err) { s_unsubrace_error(data); return; } err = aws_event_loop_unsubscribe_from_io_events(event_loop, &data->write_handle[i]); if (err) { s_unsubrace_error(data); return; } simple_pipe_close(&data->read_handle[i], &data->write_handle[i]); } /* Zero out the handles so that further accesses to the closed pipe are extra likely to cause crashes */ AWS_ZERO_ARRAY(data->read_handle); AWS_ZERO_ARRAY(data->write_handle); data->is_unsubscribed = true; /* Have a short delay before ending test. Any events that fire during that delay would be an error. */ uint64_t time_ns; err = aws_event_loop_current_clock_time(data->event_loop, &time_ns); if (err) { s_unsubrace_error(data); return; } time_ns += aws_timestamp_convert(1, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL); aws_task_init(&data->task, s_unsubrace_done_task, data, "unsubrace"); aws_event_loop_schedule_task_future(data->event_loop, &data->task, time_ns); } static void s_unsubrace_setup_task(struct aws_task *task, void *arg, enum aws_task_status status) { (void)task; struct unsubrace_data *data = arg; int err; if (status != AWS_TASK_STATUS_RUN_READY) { s_unsubrace_error(data); return; } for (int i = 0; i < 2; ++i) { err = simple_pipe_open(&data->read_handle[i], &data->write_handle[i]); if (err) { s_unsubrace_error(data); return; } err = aws_event_loop_subscribe_to_io_events( data->event_loop, &data->write_handle[i], AWS_IO_EVENT_TYPE_WRITABLE, s_unsubrace_on_writable_event, data); if (err) { s_unsubrace_error(data); return; } err = aws_event_loop_subscribe_to_io_events( data->event_loop, &data->read_handle[i], AWS_IO_EVENT_TYPE_READABLE, s_unsubrace_on_readable_event, data); if (err) { s_unsubrace_error(data); return; } } } static bool s_unsubrace_predicate(void *arg) { struct unsubrace_data *data = arg; return data->done; } /* Regression test: Ensure that a handle cannot receive an event after it's been unsubscribed. * This was occuring in the case that there were events on two handles in the same event-loop tick, * and the first handle to receive its event unsubscribed the other handle. * Shortname: unsubrace */ static int s_test_event_loop_no_events_after_unsubscribe(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_event_loop *event_loop = aws_event_loop_new_default(allocator, aws_high_res_clock_get_ticks); ASSERT_NOT_NULL(event_loop); ASSERT_SUCCESS(aws_event_loop_run(event_loop)); struct unsubrace_data data = { .mutex = AWS_MUTEX_INIT, .condition_variable = AWS_CONDITION_VARIABLE_INIT, .event_loop = event_loop, }; aws_task_init(&data.task, s_unsubrace_setup_task, &data, "no_events_after_unsubscribe"); aws_event_loop_schedule_task_now(event_loop, &data.task); ASSERT_SUCCESS(aws_mutex_lock(&data.mutex)); ASSERT_SUCCESS( aws_condition_variable_wait_pred(&data.condition_variable, &data.mutex, s_unsubrace_predicate, &data)); ASSERT_SUCCESS(aws_mutex_unlock(&data.mutex)); ASSERT_SUCCESS(data.result_code); aws_event_loop_destroy(event_loop); return AWS_OP_SUCCESS; } AWS_TEST_CASE(event_loop_no_events_after_unsubscribe, s_test_event_loop_no_events_after_unsubscribe) /* For testing logic that must occur on the event-loop thread. * The main thread should give the tester an array of state functions (last entry should be NULL), * then kick off the tester and then wait for it to be done. * Each function should return one of: * - AWS_OP_SUCCESS: continue to next state function * - AWS_OP_ERRROR: fail the test * - REMAIN_IN_STATE: try this state function again next time */ struct thread_tester; enum { REMAIN_IN_STATE = -2 }; typedef int(thread_tester_state_fn)(struct thread_tester *tester); struct thread_tester { struct aws_allocator *alloc; struct aws_event_loop *event_loop; bool done; int error_code; struct aws_mutex mutex; struct aws_condition_variable condition_variable; thread_tester_state_fn **state_functions; size_t current_state; size_t last_printed_state; /* data for tests */ struct aws_io_handle read_handle; struct aws_io_handle write_handle; int read_handle_event_counts[AWS_IO_EVENT_TYPE_ERROR + 1]; int write_handle_event_counts[AWS_IO_EVENT_TYPE_ERROR + 1]; enum { TIMER_NOT_SET, TIMER_WAITING, TIMER_DONE } timer_state; struct aws_task timer_task; }; static void s_thread_tester_abort(struct thread_tester *tester) { aws_mutex_lock(&tester->mutex); tester->error_code = AWS_OP_ERR; tester->done = true; aws_condition_variable_notify_one(&tester->condition_variable); aws_mutex_unlock(&tester->mutex); } static bool s_print_state_transitions = false; /* Set this true to print state transitions */ static void s_thread_tester_print_state(struct thread_tester *tester, const char *state_name) { if (tester->last_printed_state != tester->current_state) { if (s_print_state_transitions) { printf("entering state[%zu]: %s\n", tester->current_state, state_name); } tester->last_printed_state = tester->current_state; } } # define PRINT_STATE() s_thread_tester_print_state(tester, __func__) static void s_thread_tester_update(struct thread_tester *tester) { thread_tester_state_fn *current_fn; while (true) { current_fn = tester->state_functions[tester->current_state]; if (!current_fn) { /* We've reached the final state, success */ aws_mutex_lock(&tester->mutex); tester->error_code = AWS_OP_SUCCESS; tester->done = true; aws_condition_variable_notify_one(&tester->condition_variable); aws_mutex_unlock(&tester->mutex); return; } int err = current_fn(tester); if (err == AWS_OP_SUCCESS) { /* Go to next state, loop again */ tester->current_state++; } else if (err == REMAIN_IN_STATE) { /* End loop, wait for update function to be invoked again */ return; } else /* AWS_OP_ERR */ { /* End loop, end tester, end it all */ s_thread_tester_abort(tester); return; } } } static void s_thread_tester_update_task(struct aws_task *task, void *arg, enum aws_task_status status) { (void)task; struct thread_tester *tester = arg; if (status != AWS_TASK_STATUS_RUN_READY) { return s_thread_tester_abort(tester); } s_thread_tester_update(tester); } static bool s_thread_tester_pred(void *arg) { struct thread_tester *tester = arg; return tester->done; } static void s_timer_done_task(struct aws_task *task, void *arg, enum aws_task_status status) { (void)task; struct thread_tester *tester = arg; if (status != AWS_TASK_STATUS_RUN_READY) { return s_thread_tester_abort(tester); } tester->timer_state = TIMER_DONE; s_thread_tester_update(tester); } static int s_thread_tester_run(struct aws_allocator *alloc, thread_tester_state_fn *state_functions[]) { /* Set up tester */ struct thread_tester tester = { .alloc = alloc, .event_loop = aws_event_loop_new_default(alloc, aws_high_res_clock_get_ticks), .mutex = AWS_MUTEX_INIT, .condition_variable = AWS_CONDITION_VARIABLE_INIT, .state_functions = state_functions, .last_printed_state = -1, }; ASSERT_NOT_NULL(tester.event_loop); ASSERT_SUCCESS(aws_event_loop_run(tester.event_loop)); /* Set up data to test with */ ASSERT_SUCCESS(simple_pipe_open(&tester.read_handle, &tester.write_handle)); aws_task_init(&tester.timer_task, s_timer_done_task, &tester, "timer_done"); /* Wait for tester to finish running its state functions on the event-loop thread */ aws_mutex_lock(&tester.mutex); struct aws_task task; aws_task_init(&task, s_thread_tester_update_task, &tester, "thread_tester_update"); aws_event_loop_schedule_task_now(tester.event_loop, &task); aws_condition_variable_wait_pred(&tester.condition_variable, &tester.mutex, s_thread_tester_pred, &tester); aws_mutex_unlock(&tester.mutex); /* Clean up tester*/ aws_event_loop_destroy(tester.event_loop); /* Clean up data */ simple_pipe_close(&tester.read_handle, &tester.write_handle); /* Return tester results */ return tester.error_code; } /* Count how many times each type of event fires on the readable and writable handles */ static void s_io_event_counter( struct aws_event_loop *event_loop, struct aws_io_handle *handle, int events, void *user_data) { (void)event_loop; (void)handle; struct thread_tester *tester = user_data; int *event_counts; if (handle == &tester->read_handle) { event_counts = tester->read_handle_event_counts; } else if (handle == &tester->write_handle) { event_counts = tester->write_handle_event_counts; } else { return s_thread_tester_abort(tester); } for (int flag = 1; flag <= AWS_IO_EVENT_TYPE_ERROR; flag <<= 1) { if (events & flag) { event_counts[flag] += 1; } } s_thread_tester_update(tester); } static int s_state_subscribe(struct thread_tester *tester) { PRINT_STATE(); ASSERT_SUCCESS(aws_event_loop_subscribe_to_io_events( tester->event_loop, &tester->read_handle, AWS_IO_EVENT_TYPE_READABLE, s_io_event_counter, tester)); ASSERT_SUCCESS(aws_event_loop_subscribe_to_io_events( tester->event_loop, &tester->write_handle, AWS_IO_EVENT_TYPE_WRITABLE, s_io_event_counter, tester)); return AWS_OP_SUCCESS; } static int s_state_unsubscribe(struct thread_tester *tester) { PRINT_STATE(); ASSERT_SUCCESS(aws_event_loop_unsubscribe_from_io_events(tester->event_loop, &tester->read_handle)); ASSERT_SUCCESS(aws_event_loop_unsubscribe_from_io_events(tester->event_loop, &tester->write_handle)); return AWS_OP_SUCCESS; } /* Remain in state until readable event fires, then reset readable event count and proceed to next state */ static int s_state_on_readable(struct thread_tester *tester) { PRINT_STATE(); if (tester->read_handle_event_counts[AWS_IO_EVENT_TYPE_READABLE] == 0) { return REMAIN_IN_STATE; } ASSERT_UINT_EQUALS(1, tester->read_handle_event_counts[AWS_IO_EVENT_TYPE_READABLE]); tester->read_handle_event_counts[AWS_IO_EVENT_TYPE_READABLE] = 0; return AWS_OP_SUCCESS; } /* Remain in state until writable event fires, then reset writable event count and proceed to next state. */ static int s_state_on_writable(struct thread_tester *tester) { PRINT_STATE(); if (tester->write_handle_event_counts[AWS_IO_EVENT_TYPE_WRITABLE] == 0) { return REMAIN_IN_STATE; } ASSERT_UINT_EQUALS(1, tester->write_handle_event_counts[AWS_IO_EVENT_TYPE_WRITABLE]); tester->write_handle_event_counts[AWS_IO_EVENT_TYPE_WRITABLE] = 0; return AWS_OP_SUCCESS; } static int s_state_fail_if_more_readable_events(struct thread_tester *tester) { PRINT_STATE(); ASSERT_INT_EQUALS(0, tester->read_handle_event_counts[AWS_IO_EVENT_TYPE_READABLE]); return AWS_OP_SUCCESS; } static int s_state_fail_if_more_writable_events(struct thread_tester *tester) { PRINT_STATE(); ASSERT_INT_EQUALS(0, tester->write_handle_event_counts[AWS_IO_EVENT_TYPE_WRITABLE]); return AWS_OP_SUCCESS; } /* Write some data to the pipe */ static int s_state_write_data(struct thread_tester *tester) { PRINT_STATE(); const uint8_t data_to_copy[] = "abcdefghijklmnopqrstuvwxyz"; size_t num_bytes_written = simple_pipe_write(&tester->write_handle, data_to_copy, sizeof(data_to_copy)); ASSERT_UINT_EQUALS(sizeof(data_to_copy), num_bytes_written); return AWS_OP_SUCCESS; } /* Read from pipe until no data remains */ static int s_state_read_until_blocked(struct thread_tester *tester) { PRINT_STATE(); uint8_t buffer[512]; while (simple_pipe_read(&tester->read_handle, buffer, sizeof(buffer)) > 0) { } return AWS_OP_SUCCESS; } /* Entering the state starts a timer, and we remain in this state until the time completes */ static int s_state_wait_1sec(struct thread_tester *tester) { PRINT_STATE(); uint64_t time_ns; switch (tester->timer_state) { case TIMER_NOT_SET: time_ns = 0; ASSERT_SUCCESS(aws_event_loop_current_clock_time(tester->event_loop, &time_ns)); time_ns += aws_timestamp_convert(1, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL); aws_event_loop_schedule_task_future(tester->event_loop, &tester->timer_task, time_ns); tester->timer_state = TIMER_WAITING; return REMAIN_IN_STATE; case TIMER_WAITING: return REMAIN_IN_STATE; default: ASSERT_INT_EQUALS(TIMER_DONE, tester->timer_state); return AWS_OP_SUCCESS; } } /* Test that subscribe/unubscribe work at all */ static int s_test_event_loop_subscribe_unsubscribe(struct aws_allocator *allocator, void *ctx) { (void)ctx; thread_tester_state_fn *state_functions[] = { s_state_subscribe, s_state_unsubscribe, NULL, }; ASSERT_SUCCESS(s_thread_tester_run(allocator, state_functions)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(event_loop_subscribe_unsubscribe, s_test_event_loop_subscribe_unsubscribe) static int s_test_event_loop_writable_event_on_subscribe(struct aws_allocator *allocator, void *ctx) { (void)ctx; thread_tester_state_fn *state_functions[] = { s_state_subscribe, s_state_on_writable, s_state_wait_1sec, s_state_fail_if_more_writable_events, s_state_unsubscribe, NULL, }; ASSERT_SUCCESS(s_thread_tester_run(allocator, state_functions)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(event_loop_writable_event_on_subscribe, s_test_event_loop_writable_event_on_subscribe) static int s_test_event_loop_no_readable_event_before_write(struct aws_allocator *allocator, void *ctx) { (void)ctx; thread_tester_state_fn *state_functions[] = { s_state_subscribe, s_state_wait_1sec, s_state_fail_if_more_readable_events, s_state_unsubscribe, NULL, }; ASSERT_SUCCESS(s_thread_tester_run(allocator, state_functions)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(event_loop_no_readable_event_before_write, s_test_event_loop_no_readable_event_before_write); static int s_test_event_loop_readable_event_on_subscribe_if_data_present(struct aws_allocator *allocator, void *ctx) { (void)ctx; thread_tester_state_fn *state_functions[] = { s_state_write_data, s_state_subscribe, s_state_on_readable, s_state_wait_1sec, s_state_fail_if_more_readable_events, s_state_unsubscribe, NULL, }; ASSERT_SUCCESS(s_thread_tester_run(allocator, state_functions)); return AWS_OP_SUCCESS; } AWS_TEST_CASE( event_loop_readable_event_on_subscribe_if_data_present, s_test_event_loop_readable_event_on_subscribe_if_data_present); static int s_test_event_loop_readable_event_after_write(struct aws_allocator *allocator, void *ctx) { (void)ctx; thread_tester_state_fn *state_functions[] = { s_state_subscribe, s_state_on_writable, s_state_write_data, s_state_on_readable, s_state_wait_1sec, s_state_fail_if_more_readable_events, s_state_unsubscribe, NULL, }; ASSERT_SUCCESS(s_thread_tester_run(allocator, state_functions)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(event_loop_readable_event_after_write, s_test_event_loop_readable_event_after_write); static int s_test_event_loop_readable_event_on_2nd_time_readable(struct aws_allocator *allocator, void *ctx) { (void)ctx; thread_tester_state_fn *state_functions[] = { s_state_subscribe, s_state_on_writable, s_state_write_data, s_state_on_readable, s_state_read_until_blocked, s_state_write_data, s_state_on_readable, s_state_unsubscribe, NULL, }; ASSERT_SUCCESS(s_thread_tester_run(allocator, state_functions)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(event_loop_readable_event_on_2nd_time_readable, s_test_event_loop_readable_event_on_2nd_time_readable); #endif /* AWS_USE_IO_COMPLETION_PORTS */ static int s_event_loop_test_stop_then_restart(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_event_loop *event_loop = aws_event_loop_new_default(allocator, aws_high_res_clock_get_ticks); ASSERT_NOT_NULL(event_loop, "Event loop creation failed with error: %s", aws_error_debug_str(aws_last_error())); ASSERT_SUCCESS(aws_event_loop_run(event_loop)); struct task_args task_args = { .condition_variable = AWS_CONDITION_VARIABLE_INIT, .mutex = AWS_MUTEX_INIT, .invoked = false, .was_in_thread = false, .status = -1, .loop = event_loop, .thread_id = 0, }; struct aws_task task; aws_task_init(&task, s_test_task, &task_args, "stop_then_restart"); ASSERT_SUCCESS(aws_mutex_lock(&task_args.mutex)); aws_event_loop_schedule_task_now(event_loop, &task); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &task_args.condition_variable, &task_args.mutex, s_task_ran_predicate, &task_args)); ASSERT_TRUE(task_args.invoked); ASSERT_SUCCESS(aws_event_loop_stop(event_loop)); ASSERT_SUCCESS(aws_event_loop_wait_for_stop_completion(event_loop)); ASSERT_SUCCESS(aws_event_loop_run(event_loop)); aws_event_loop_schedule_task_now(event_loop, &task); task_args.invoked = false; ASSERT_SUCCESS(aws_condition_variable_wait_pred( &task_args.condition_variable, &task_args.mutex, s_task_ran_predicate, &task_args)); ASSERT_TRUE(task_args.invoked); aws_event_loop_destroy(event_loop); return AWS_OP_SUCCESS; } AWS_TEST_CASE(event_loop_stop_then_restart, s_event_loop_test_stop_then_restart) static int s_event_loop_test_multiple_stops(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_event_loop *event_loop = aws_event_loop_new_default(allocator, aws_high_res_clock_get_ticks); ASSERT_NOT_NULL(event_loop, "Event loop creation failed with error: %s", aws_error_debug_str(aws_last_error())); ASSERT_SUCCESS(aws_event_loop_run(event_loop)); for (int i = 0; i < 8; ++i) { ASSERT_SUCCESS(aws_event_loop_stop(event_loop)); } aws_event_loop_destroy(event_loop); return AWS_OP_SUCCESS; } AWS_TEST_CASE(event_loop_multiple_stops, s_event_loop_test_multiple_stops) static int test_event_loop_group_setup_and_shutdown(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_io_library_init(allocator); struct aws_event_loop_group *event_loop_group = aws_event_loop_group_new_default(allocator, 0, NULL); size_t cpu_count = aws_system_info_processor_count(); size_t el_count = aws_event_loop_group_get_loop_count(event_loop_group); struct aws_event_loop *event_loop = aws_event_loop_group_get_next_loop(event_loop_group); ASSERT_NOT_NULL(event_loop); if (cpu_count > 1) { ASSERT_INT_EQUALS(cpu_count / 2, el_count); } if (cpu_count > 1) { ASSERT_INT_EQUALS(cpu_count / 2, el_count); } aws_event_loop_group_release(event_loop_group); aws_io_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(event_loop_group_setup_and_shutdown, test_event_loop_group_setup_and_shutdown) static int test_numa_aware_event_loop_group_setup_and_shutdown(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_io_library_init(allocator); size_t cpus_for_group = aws_get_cpu_count_for_group(0); size_t el_count = 1; /* pass UINT16_MAX here to check the boundary conditions on numa cpu detection. It should never create more threads * than hw cpus available */ struct aws_event_loop_group *event_loop_group = aws_event_loop_group_new_default_pinned_to_cpu_group(allocator, UINT16_MAX, 0, NULL); el_count = aws_event_loop_group_get_loop_count(event_loop_group); size_t hw_thread_count = 0; struct aws_cpu_info *cpu_info = aws_mem_calloc(allocator, cpus_for_group, sizeof(struct aws_cpu_info)); ASSERT_NOT_NULL(cpu_info); aws_get_cpu_ids_for_group(0, cpu_info, cpus_for_group); for (size_t i = 0; i < cpus_for_group; ++i) { if (!cpu_info[i].suspected_hyper_thread) { hw_thread_count++; } } aws_mem_release(allocator, cpu_info); ASSERT_INT_EQUALS(hw_thread_count, el_count); aws_event_loop_group_release(event_loop_group); aws_io_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(numa_aware_event_loop_group_setup_and_shutdown, test_numa_aware_event_loop_group_setup_and_shutdown) static void s_async_shutdown_complete_callback(void *user_data) { struct task_args *args = user_data; aws_mutex_lock(&args->mutex); args->thread_id = aws_thread_current_thread_id(); args->invoked = true; aws_mutex_unlock((&args->mutex)); aws_atomic_store_int(&args->thread_complete, true); aws_condition_variable_notify_one(&args->condition_variable); } static void s_async_shutdown_task(struct aws_task *task, void *user_data, enum aws_task_status status) { (void)task; (void)status; struct aws_event_loop_group *el_group = user_data; aws_event_loop_group_release(el_group); } static int test_event_loop_group_setup_and_shutdown_async(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_io_library_init(allocator); /* * Small chicken-and-egg problem here: the task args needs the event loop group and loop, but * creating the event loop group needs shutdown options that refer to the task args. */ struct task_args task_args = { .condition_variable = AWS_CONDITION_VARIABLE_INIT, .mutex = AWS_MUTEX_INIT, .invoked = false, .was_in_thread = false, .status = -1, .loop = NULL, .el_group = NULL, .thread_id = 0, }; aws_atomic_init_int(&task_args.thread_complete, false); struct aws_shutdown_callback_options async_shutdown_options; AWS_ZERO_STRUCT(async_shutdown_options); async_shutdown_options.shutdown_callback_user_data = &task_args; async_shutdown_options.shutdown_callback_fn = s_async_shutdown_complete_callback; struct aws_event_loop_group *event_loop_group = aws_event_loop_group_new_default(allocator, 0, &async_shutdown_options); struct aws_event_loop *event_loop = aws_event_loop_group_get_next_loop(event_loop_group); task_args.loop = event_loop; task_args.el_group = event_loop_group; struct aws_task task; aws_task_init( &task, s_async_shutdown_task, event_loop_group, "async elg shutdown invoked from an event loop thread"); /* Test "future" tasks */ uint64_t now; ASSERT_SUCCESS(aws_event_loop_current_clock_time(event_loop, &now)); aws_event_loop_schedule_task_future(event_loop, &task, now); ASSERT_SUCCESS(aws_mutex_lock(&task_args.mutex)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &task_args.condition_variable, &task_args.mutex, s_task_ran_predicate, &task_args)); ASSERT_TRUE(task_args.invoked); aws_mutex_unlock(&task_args.mutex); while (!aws_atomic_load_int(&task_args.thread_complete)) { aws_thread_current_sleep(15); } aws_io_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(event_loop_group_setup_and_shutdown_async, test_event_loop_group_setup_and_shutdown_async) aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/exponential_backoff_retry_test.c000066400000000000000000000312021456575232400274300ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include struct exponential_backoff_test_data { size_t retry_count; size_t client_error_count; int failure_error_code; struct aws_mutex mutex; struct aws_condition_variable cvar; }; static void s_too_many_retries_test_on_retry_ready(struct aws_retry_token *token, int error_code, void *user_data) { (void)error_code; struct exponential_backoff_test_data *test_data = user_data; enum aws_retry_error_type error_type = AWS_RETRY_ERROR_TYPE_SERVER_ERROR; aws_mutex_lock(&test_data->mutex); test_data->retry_count += 1; if (test_data->client_error_count) { error_type = AWS_RETRY_ERROR_TYPE_CLIENT_ERROR; test_data->client_error_count--; } aws_mutex_unlock(&test_data->mutex); if (aws_retry_strategy_schedule_retry(token, error_type, s_too_many_retries_test_on_retry_ready, user_data)) { aws_mutex_lock(&test_data->mutex); test_data->failure_error_code = aws_last_error(); aws_mutex_unlock(&test_data->mutex); aws_retry_token_release(token); aws_condition_variable_notify_all(&test_data->cvar); } } static void s_too_many_retries_test_token_acquired( struct aws_retry_strategy *retry_strategy, int error_code, struct aws_retry_token *token, void *user_data) { (void)retry_strategy; (void)error_code; aws_retry_strategy_schedule_retry( token, AWS_RETRY_ERROR_TYPE_SERVER_ERROR, s_too_many_retries_test_on_retry_ready, user_data); } static bool s_retry_has_failed(void *arg) { struct exponential_backoff_test_data *test_data = arg; return test_data->failure_error_code != AWS_OP_SUCCESS; } static int s_test_exponential_backoff_retry_too_many_retries_for_jitter_mode( struct aws_allocator *allocator, enum aws_exponential_backoff_jitter_mode jitter_mode) { aws_io_library_init(allocator); struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); struct aws_exponential_backoff_retry_options config = { .max_retries = 3, .jitter_mode = jitter_mode, .el_group = el_group, }; struct aws_retry_strategy *retry_strategy = aws_retry_strategy_new_exponential_backoff(allocator, &config); ASSERT_NOT_NULL(retry_strategy); struct exponential_backoff_test_data test_data = { .retry_count = 0, .failure_error_code = 0, .mutex = AWS_MUTEX_INIT, .cvar = AWS_CONDITION_VARIABLE_INIT, }; ASSERT_SUCCESS(aws_mutex_lock(&test_data.mutex)); ASSERT_SUCCESS(aws_retry_strategy_acquire_retry_token( retry_strategy, NULL, s_too_many_retries_test_token_acquired, &test_data, 0)); ASSERT_SUCCESS(aws_condition_variable_wait_pred(&test_data.cvar, &test_data.mutex, s_retry_has_failed, &test_data)); aws_mutex_unlock(&test_data.mutex); ASSERT_UINT_EQUALS(config.max_retries, test_data.retry_count); ASSERT_UINT_EQUALS(AWS_IO_MAX_RETRIES_EXCEEDED, test_data.failure_error_code); aws_retry_strategy_release(retry_strategy); aws_event_loop_group_release(el_group); aws_io_library_clean_up(); return AWS_OP_SUCCESS; } /* Test that no jitter mode exponential back-off fails after max retries are exceeded. */ static int s_test_exponential_backoff_retry_too_many_retries_no_jitter_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_test_exponential_backoff_retry_too_many_retries_for_jitter_mode( allocator, AWS_EXPONENTIAL_BACKOFF_JITTER_NONE); } AWS_TEST_CASE( test_exponential_backoff_retry_too_many_retries_no_jitter, s_test_exponential_backoff_retry_too_many_retries_no_jitter_fn) /* Test that full jitter mode exponential back-off fails after max retries are exceeded. */ static int s_test_exponential_backoff_retry_too_many_retries_full_jitter_fn( struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_test_exponential_backoff_retry_too_many_retries_for_jitter_mode( allocator, AWS_EXPONENTIAL_BACKOFF_JITTER_FULL); } AWS_TEST_CASE( test_exponential_backoff_retry_too_many_retries_full_jitter, s_test_exponential_backoff_retry_too_many_retries_full_jitter_fn) /* Test that decorrelated jitter mode exponential back-off fails after max retries are exceeded. */ static int s_test_exponential_backoff_retry_too_many_retries_decorrelated_jitter_fn( struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_test_exponential_backoff_retry_too_many_retries_for_jitter_mode( allocator, AWS_EXPONENTIAL_BACKOFF_JITTER_DECORRELATED); } AWS_TEST_CASE( test_exponential_backoff_retry_too_many_retries_decorrelated_jitter, s_test_exponential_backoff_retry_too_many_retries_decorrelated_jitter_fn) /* Test that default jitter mode exponential back-off fails after max retries are exceeded. */ static int s_test_exponential_backoff_retry_too_many_retries_default_jitter_fn( struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_test_exponential_backoff_retry_too_many_retries_for_jitter_mode( allocator, AWS_EXPONENTIAL_BACKOFF_JITTER_DEFAULT); } AWS_TEST_CASE( test_exponential_backoff_retry_too_many_retries_default_jitter, s_test_exponential_backoff_retry_too_many_retries_default_jitter_fn) /* Test that client failures do not count against the max retry budget. */ static int s_test_exponential_backoff_retry_client_errors_do_not_count_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_io_library_init(allocator); struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); struct aws_exponential_backoff_retry_options config = { .el_group = el_group, .max_retries = 3, }; struct aws_retry_strategy *retry_strategy = aws_retry_strategy_new_exponential_backoff(allocator, &config); ASSERT_NOT_NULL(retry_strategy); struct exponential_backoff_test_data test_data = { .retry_count = 0, .failure_error_code = 0, .mutex = AWS_MUTEX_INIT, .cvar = AWS_CONDITION_VARIABLE_INIT, .client_error_count = 2, }; ASSERT_SUCCESS(aws_mutex_lock(&test_data.mutex)); ASSERT_SUCCESS(aws_retry_strategy_acquire_retry_token( retry_strategy, NULL, s_too_many_retries_test_token_acquired, &test_data, 0)); ASSERT_SUCCESS(aws_condition_variable_wait_pred(&test_data.cvar, &test_data.mutex, s_retry_has_failed, &test_data)); aws_mutex_unlock(&test_data.mutex); ASSERT_UINT_EQUALS(config.max_retries + 2, test_data.retry_count); ASSERT_UINT_EQUALS(AWS_IO_MAX_RETRIES_EXCEEDED, test_data.failure_error_code); aws_retry_strategy_release(retry_strategy); aws_event_loop_group_release(el_group); aws_io_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE( test_exponential_backoff_retry_client_errors_do_not_count, s_test_exponential_backoff_retry_client_errors_do_not_count_fn) /* Test that in no jitter mode, exponential backoff is actually applied as documented. */ static int s_test_exponential_backoff_retry_no_jitter_time_taken_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_io_library_init(allocator); struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); struct aws_exponential_backoff_retry_options config = { .max_retries = 3, .jitter_mode = AWS_EXPONENTIAL_BACKOFF_JITTER_NONE, .el_group = el_group, }; struct aws_retry_strategy *retry_strategy = aws_retry_strategy_new_exponential_backoff(allocator, &config); ASSERT_NOT_NULL(retry_strategy); struct exponential_backoff_test_data test_data = { .retry_count = 0, .failure_error_code = 0, .mutex = AWS_MUTEX_INIT, .cvar = AWS_CONDITION_VARIABLE_INIT, }; uint64_t before_time = 0; ASSERT_SUCCESS(aws_high_res_clock_get_ticks(&before_time)); ASSERT_SUCCESS(aws_mutex_lock(&test_data.mutex)); ASSERT_SUCCESS(aws_retry_strategy_acquire_retry_token( retry_strategy, NULL, s_too_many_retries_test_token_acquired, &test_data, 0)); ASSERT_SUCCESS(aws_condition_variable_wait_pred(&test_data.cvar, &test_data.mutex, s_retry_has_failed, &test_data)); aws_mutex_unlock(&test_data.mutex); uint64_t after_time = 0; ASSERT_SUCCESS(aws_high_res_clock_get_ticks(&after_time)); uint64_t backoff_scale_factor = aws_timestamp_convert(config.backoff_scale_factor_ms, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL); uint64_t expected_interval = (1 * backoff_scale_factor) + (2 * backoff_scale_factor) + (4 * backoff_scale_factor); ASSERT_TRUE(expected_interval <= after_time - before_time); ASSERT_UINT_EQUALS(config.max_retries, test_data.retry_count); ASSERT_UINT_EQUALS(AWS_IO_MAX_RETRIES_EXCEEDED, test_data.failure_error_code); aws_retry_strategy_release(retry_strategy); aws_event_loop_group_release(el_group); aws_io_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE( test_exponential_backoff_retry_no_jitter_time_taken, s_test_exponential_backoff_retry_no_jitter_time_taken_fn) /* Test that in no jitter mode, max exponential backoff is actually applied as documented. */ static int s_test_exponential_max_backoff_retry_no_jitter_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_io_library_init(allocator); struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); struct aws_exponential_backoff_retry_options config = { .max_retries = 3, .jitter_mode = AWS_EXPONENTIAL_BACKOFF_JITTER_NONE, .el_group = el_group, .backoff_scale_factor_ms = 1000, .max_backoff_secs = 3, }; struct aws_retry_strategy *retry_strategy = aws_retry_strategy_new_exponential_backoff(allocator, &config); ASSERT_NOT_NULL(retry_strategy); struct exponential_backoff_test_data test_data = { .retry_count = 0, .failure_error_code = 0, .mutex = AWS_MUTEX_INIT, .cvar = AWS_CONDITION_VARIABLE_INIT, }; uint64_t before_time = 0; ASSERT_SUCCESS(aws_high_res_clock_get_ticks(&before_time)); ASSERT_SUCCESS(aws_mutex_lock(&test_data.mutex)); ASSERT_SUCCESS(aws_retry_strategy_acquire_retry_token( retry_strategy, NULL, s_too_many_retries_test_token_acquired, &test_data, 0)); ASSERT_SUCCESS(aws_condition_variable_wait_pred(&test_data.cvar, &test_data.mutex, s_retry_has_failed, &test_data)); aws_mutex_unlock(&test_data.mutex); uint64_t after_time = 0; ASSERT_SUCCESS(aws_high_res_clock_get_ticks(&after_time)); uint64_t backoff_scale_factor = aws_timestamp_convert(config.backoff_scale_factor_ms, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL); uint64_t max_backoff_scale_factor = aws_timestamp_convert(config.max_backoff_secs, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL); uint64_t expected_interval = aws_min_u64(max_backoff_scale_factor, 1 * backoff_scale_factor) + aws_min_u64(max_backoff_scale_factor, 2 * backoff_scale_factor) + aws_min_u64(max_backoff_scale_factor, 4 * backoff_scale_factor); ASSERT_TRUE(expected_interval <= after_time - before_time); ASSERT_UINT_EQUALS(config.max_retries, test_data.retry_count); ASSERT_UINT_EQUALS(AWS_IO_MAX_RETRIES_EXCEEDED, test_data.failure_error_code); aws_retry_strategy_release(retry_strategy); aws_event_loop_group_release(el_group); aws_io_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_exponential_max_backoff_retry_no_jitter, s_test_exponential_max_backoff_retry_no_jitter_fn) /* verify that invalid options cause a failure at creation time. */ static int s_test_exponential_backoff_retry_invalid_options_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_io_library_init(allocator); struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); struct aws_exponential_backoff_retry_options config = { .max_retries = 64, .el_group = el_group, }; struct aws_retry_strategy *retry_strategy = aws_retry_strategy_new_exponential_backoff(allocator, &config); ASSERT_NULL(retry_strategy); ASSERT_UINT_EQUALS(AWS_ERROR_INVALID_ARGUMENT, aws_last_error()); aws_event_loop_group_release(el_group); aws_io_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_exponential_backoff_retry_invalid_options, s_test_exponential_backoff_retry_invalid_options_fn) aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/future_test.c000066400000000000000000000717441456575232400235330ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include "future_test.h" #define ONE_SEC_IN_NS ((uint64_t)AWS_TIMESTAMP_NANOS) #define MAX_TIMEOUT_NS (10 * ONE_SEC_IN_NS) AWS_FUTURE_T_POINTER_WITH_DESTROY_IMPLEMENTATION(aws_future_destroyme, struct aws_destroyme, aws_destroyme_destroy); AWS_FUTURE_T_POINTER_WITH_RELEASE_IMPLEMENTATION(aws_future_refcountme, struct aws_refcountme, aws_refcountme_release); /* Run through the basics of an AWS_FUTURE_T_BY_VALUE */ static int s_test_future_by_value(struct aws_allocator *alloc, void *ctx) { (void)ctx; aws_io_library_init(alloc); struct aws_future_bool *future = aws_future_bool_new(alloc); ASSERT_NOT_NULL(future); ASSERT_FALSE(aws_future_bool_is_done(future)); /* set result */ aws_future_bool_set_result(future, true); ASSERT_TRUE(aws_future_bool_is_done(future)); ASSERT_INT_EQUALS(0, aws_future_bool_get_error(future)); ASSERT_TRUE(aws_future_bool_get_result(future)); future = aws_future_bool_release(future); ASSERT_NULL(future); aws_io_library_clean_up(); return 0; } AWS_TEST_CASE(future_by_value, s_test_future_by_value) /* Run through the basics of an aws_future */ static int s_test_future_void(struct aws_allocator *alloc, void *ctx) { (void)ctx; aws_io_library_init(alloc); struct aws_future_void *future = aws_future_void_new(alloc); ASSERT_NOT_NULL(future); ASSERT_FALSE(aws_future_void_is_done(future)); /* set valueless result */ aws_future_void_set_result(future); ASSERT_TRUE(aws_future_void_is_done(future)); ASSERT_INT_EQUALS(0, aws_future_void_get_error(future)); future = aws_future_void_release(future); ASSERT_NULL(future); aws_io_library_clean_up(); return 0; } AWS_TEST_CASE(future_void, s_test_future_void) struct future_size_callback_recorder { struct aws_future_size *future; /* record all state when this future's callback fires */ struct aws_event_loop *event_loop; /* record whether callback fires on this event-loop's thread */ struct aws_channel *channel; /* record state of the world when callback invoked */ int error_code; size_t result; aws_thread_id_t thread_id; bool is_event_loop_thread; int invoke_count; }; static void s_record_on_future_size_done(void *user_data) { struct future_size_callback_recorder *recorder = user_data; recorder->error_code = aws_future_size_get_error(recorder->future); if (recorder->error_code == 0) { recorder->result = aws_future_size_get_result(recorder->future); } recorder->thread_id = aws_thread_current_thread_id(); recorder->invoke_count++; if (recorder->event_loop) { recorder->is_event_loop_thread = aws_event_loop_thread_is_callers_thread(recorder->event_loop); } } /* Test callback firing immediately upon registration */ static int s_test_future_callback_fires_immediately(struct aws_allocator *alloc, void *ctx) { (void)ctx; aws_io_library_init(alloc); struct future_size_callback_recorder recorder; AWS_ZERO_STRUCT(recorder); recorder.future = aws_future_size_new(alloc); aws_future_size_set_result(recorder.future, 123); aws_future_size_register_callback(recorder.future, s_record_on_future_size_done, &recorder); /* callback should have fired immediately, on main thread, since future was already done */ ASSERT_INT_EQUALS(1, recorder.invoke_count); ASSERT_INT_EQUALS(0, recorder.error_code); ASSERT_UINT_EQUALS(123, recorder.result); aws_thread_id_t main_thread_id = aws_thread_current_thread_id(); ASSERT_INT_EQUALS(0, memcmp(&main_thread_id, &recorder.thread_id, sizeof(aws_thread_id_t))); aws_future_size_release(recorder.future); aws_io_library_clean_up(); return 0; } AWS_TEST_CASE(future_callback_fires_immediately, s_test_future_callback_fires_immediately); struct future_size_thread_job { struct aws_allocator *alloc; uint64_t delay_ns; struct aws_future_size *my_future; }; /* Function that runs on thread, and completes future after delay */ static void s_run_thread_job(void *user_data) { struct future_size_thread_job *job = user_data; aws_thread_current_sleep(job->delay_ns); aws_future_size_set_result(job->my_future, 987); aws_future_size_release(job->my_future); aws_mem_release(job->alloc, job); } /* Start thread that will complete future after delay */ static struct aws_future_size *s_start_thread_job(struct aws_allocator *alloc, uint64_t delay_ns) { struct aws_future_size *future = aws_future_size_new(alloc); struct future_size_thread_job *job = aws_mem_calloc(alloc, 1, sizeof(struct future_size_thread_job)); job->alloc = alloc; job->delay_ns = delay_ns; job->my_future = aws_future_size_acquire(future); struct aws_thread thread; AWS_FATAL_ASSERT(aws_thread_init(&thread, alloc) == AWS_OP_SUCCESS); struct aws_thread_options thread_options = *aws_default_thread_options(); thread_options.join_strategy = AWS_TJS_MANAGED; thread_options.name = aws_byte_cursor_from_c_str("FutureSizeJob"); AWS_FATAL_ASSERT(aws_thread_launch(&thread, s_run_thread_job, job, &thread_options) == AWS_OP_SUCCESS); return future; } /* Test callback firing on a different thread than the one that registered it. * This is the first test that looks like real-world use of aws_future */ static int s_test_future_callback_fires_on_another_thread(struct aws_allocator *alloc, void *ctx) { (void)ctx; aws_io_library_init(alloc); /* Kick off thread, which will set result in 1sec */ struct future_size_callback_recorder recorder = { .future = s_start_thread_job(alloc, ONE_SEC_IN_NS /*delay_ns*/), }; aws_future_size_register_callback(recorder.future, s_record_on_future_size_done, &recorder); /* Wait until other thread joins, at which point the future is complete and the callback has fired */ aws_thread_set_managed_join_timeout_ns(MAX_TIMEOUT_NS); ASSERT_SUCCESS(aws_thread_join_all_managed()); /* callback should have fired on the other thread */ ASSERT_INT_EQUALS(1, recorder.invoke_count); ASSERT_INT_EQUALS(0, recorder.error_code); ASSERT_UINT_EQUALS(987, recorder.result); aws_thread_id_t main_thread_id = aws_thread_current_thread_id(); ASSERT_TRUE(memcmp(&main_thread_id, &recorder.thread_id, sizeof(aws_thread_id_t)) != 0); aws_future_size_release(recorder.future); aws_io_library_clean_up(); return 0; } AWS_TEST_CASE(future_callback_fires_on_another_thread, s_test_future_callback_fires_on_another_thread); static int s_test_future_register_callback_if_not_done(struct aws_allocator *alloc, void *ctx) { (void)ctx; aws_io_library_init(alloc); { /* the callback should not get registered if future is already done */ struct future_size_callback_recorder recorder = { .future = aws_future_size_new(alloc), }; aws_future_size_set_result(recorder.future, 555); ASSERT_FALSE( aws_future_size_register_callback_if_not_done(recorder.future, s_record_on_future_size_done, &recorder)); ASSERT_INT_EQUALS(0, recorder.invoke_count); aws_future_size_release(recorder.future); } { /* the callback should get registered if the future isn't done yet */ struct future_size_callback_recorder recorder = { .future = aws_future_size_new(alloc), }; ASSERT_TRUE( aws_future_size_register_callback_if_not_done(recorder.future, s_record_on_future_size_done, &recorder)); ASSERT_INT_EQUALS(0, recorder.invoke_count); /* now set result, the callback should fire */ aws_future_size_set_result(recorder.future, 555); ASSERT_INT_EQUALS(1, recorder.invoke_count); /* after callback fires, you're allowed to call register_callback_if_not_done() again. * (This makes it easy to call an async function repeatedly in a loop, * where you keep looping as long as the futures complete immediately, * but bail out if the callback gets registered) */ ASSERT_FALSE( aws_future_size_register_callback_if_not_done(recorder.future, s_record_on_future_size_done, &recorder)); /* make sure callback didn't fire a 2nd time */ ASSERT_INT_EQUALS(1, recorder.invoke_count); aws_future_size_release(recorder.future); } aws_io_library_clean_up(); return 0; } AWS_TEST_CASE(future_register_callback_if_not_done, s_test_future_register_callback_if_not_done) /* Test that an event-loop callback still runs if it's registered after the future is already done */ static int s_test_future_register_event_loop_callback_after_done(struct aws_allocator *alloc, void *ctx) { (void)ctx; aws_io_library_init(alloc); struct future_size_callback_recorder recorder = { .future = aws_future_size_new(alloc), .event_loop = aws_event_loop_new_default(alloc, aws_high_res_clock_get_ticks), }; ASSERT_SUCCESS(aws_event_loop_run(recorder.event_loop)); /* register callback after result already set */ aws_future_size_set_result(recorder.future, 765); aws_future_size_register_event_loop_callback( recorder.future, recorder.event_loop, s_record_on_future_size_done, &recorder); /* Wait until event loop is destroyed, at which point the future is complete and the callback has fired */ aws_event_loop_destroy(recorder.event_loop); /* callback should have fired on event-loop thread */ ASSERT_INT_EQUALS(1, recorder.invoke_count); ASSERT_INT_EQUALS(0, recorder.error_code); ASSERT_UINT_EQUALS(765, recorder.result); ASSERT_TRUE(recorder.is_event_loop_thread); /* cleanup */ aws_future_size_release(recorder.future); aws_io_library_clean_up(); return 0; } AWS_TEST_CASE(future_register_event_loop_callback_after_done, s_test_future_register_event_loop_callback_after_done) /* Test that an event-loop callback still runs if it's registered before the future is done */ static int s_test_future_register_event_loop_callback_before_done(struct aws_allocator *alloc, void *ctx) { (void)ctx; aws_io_library_init(alloc); struct future_size_callback_recorder recorder = { .future = aws_future_size_new(alloc), .event_loop = aws_event_loop_new_default(alloc, aws_high_res_clock_get_ticks), }; ASSERT_SUCCESS(aws_event_loop_run(recorder.event_loop)); /* register callback before result is set */ aws_future_size_register_event_loop_callback( recorder.future, recorder.event_loop, s_record_on_future_size_done, &recorder); aws_future_size_set_result(recorder.future, 765); /* Wait until event loop is destroyed, at which point the future is complete and the callback has fired */ aws_event_loop_destroy(recorder.event_loop); /* callback should have fired on event-loop thread */ ASSERT_INT_EQUALS(1, recorder.invoke_count); ASSERT_INT_EQUALS(0, recorder.error_code); ASSERT_UINT_EQUALS(765, recorder.result); ASSERT_TRUE(recorder.is_event_loop_thread); /* cleanup */ aws_future_size_release(recorder.future); aws_io_library_clean_up(); return 0; } AWS_TEST_CASE(future_register_event_loop_callback_before_done, s_test_future_register_event_loop_callback_before_done) void s_set_result_from_event_loop_task(struct aws_task *task, void *user_data, enum aws_task_status status) { (void)task; (void)status; struct future_size_callback_recorder *recorder = user_data; AWS_FATAL_ASSERT(recorder->invoke_count == 0); /* The future shouldn't be done yet */ aws_future_size_set_result(recorder->future, 1234567); /* The callback should NOT be invoked from the same callstack as set_result(). * The callback should run as its own scheduled task */ AWS_FATAL_ASSERT(recorder->invoke_count == 0); } /* Test that an event-loop callback always runs as its own scheduled task. * Even if set_result() is called from the event-loop thread, the callback * should NOT run in the same callstack as set_result() */ static int s_test_future_register_event_loop_callback_always_scheduled(struct aws_allocator *alloc, void *ctx) { (void)ctx; aws_io_library_init(alloc); struct future_size_callback_recorder recorder = { .future = aws_future_size_new(alloc), .event_loop = aws_event_loop_new_default(alloc, aws_high_res_clock_get_ticks), }; ASSERT_SUCCESS(aws_event_loop_run(recorder.event_loop)); /* register callback before result is set */ aws_future_size_register_event_loop_callback( recorder.future, recorder.event_loop, s_record_on_future_size_done, &recorder); struct aws_task set_result_from_event_loop_task; aws_task_init( &set_result_from_event_loop_task, s_set_result_from_event_loop_task, &recorder, "set_result_from_event_loop"); aws_event_loop_schedule_task_now(recorder.event_loop, &set_result_from_event_loop_task); /* Wait until event loop is destroyed, at which point the future is complete and the callback has fired */ aws_event_loop_destroy(recorder.event_loop); /* callback should have fired on event-loop thread */ ASSERT_INT_EQUALS(1, recorder.invoke_count); ASSERT_INT_EQUALS(0, recorder.error_code); ASSERT_UINT_EQUALS(1234567, recorder.result); ASSERT_TRUE(recorder.is_event_loop_thread); /* cleanup */ aws_future_size_release(recorder.future); aws_io_library_clean_up(); return 0; } AWS_TEST_CASE( future_register_event_loop_callback_always_scheduled, s_test_future_register_event_loop_callback_always_scheduled) static void s_on_channel_setup(struct aws_channel *channel, int error_code, void *user_data) { (void)channel; struct aws_future_void *setup_future = user_data; if (error_code) { aws_future_void_set_error(setup_future, error_code); } else { aws_future_void_set_result(setup_future); } } /* Test channel callback */ static int s_test_future_register_channel_callback(struct aws_allocator *alloc, void *ctx) { (void)ctx; aws_io_library_init(alloc); /* Set up event-loop */ struct future_size_callback_recorder recorder = { .future = aws_future_size_new(alloc), .event_loop = aws_event_loop_new_default(alloc, aws_high_res_clock_get_ticks), }; ASSERT_SUCCESS(aws_event_loop_run(recorder.event_loop)); /* Set up channel */ struct aws_future_void *channel_setup_future = aws_future_void_new(alloc); struct aws_channel_options channel_options = { .event_loop = recorder.event_loop, .on_setup_completed = s_on_channel_setup, .setup_user_data = channel_setup_future, }; struct aws_channel *channel = aws_channel_new(alloc, &channel_options); ASSERT_TRUE(aws_future_void_wait(channel_setup_future, MAX_TIMEOUT_NS)); ASSERT_INT_EQUALS(0, aws_future_void_get_error(channel_setup_future)); /* register callback after result already set */ aws_future_size_set_result(recorder.future, 234567); aws_future_size_register_channel_callback(recorder.future, channel, s_record_on_future_size_done, &recorder); /* wait until channel/event-loop are destroyed, * at which point the future is complete and the callback has fired */ aws_channel_release_hold(channel); aws_event_loop_destroy(recorder.event_loop); /* callback should have fired on channel/event-loop thread */ ASSERT_INT_EQUALS(1, recorder.invoke_count); ASSERT_INT_EQUALS(0, recorder.error_code); ASSERT_UINT_EQUALS(234567, recorder.result); ASSERT_TRUE(recorder.is_event_loop_thread); /* cleanup */ aws_future_void_release(channel_setup_future); aws_future_size_release(recorder.future); aws_io_library_clean_up(); return 0; } AWS_TEST_CASE(future_register_channel_callback, s_test_future_register_channel_callback); static int s_test_future_wait_timeout(struct aws_allocator *alloc, void *ctx) { (void)ctx; aws_io_library_init(alloc); struct aws_future_void *future = aws_future_void_new(alloc); uint64_t start_ns; ASSERT_SUCCESS(aws_high_res_clock_get_ticks(&start_ns)); /* The future will never complete, so this should time out and return false */ ASSERT_FALSE(aws_future_void_wait(future, ONE_SEC_IN_NS)); uint64_t end_ns; ASSERT_SUCCESS(aws_high_res_clock_get_ticks(&end_ns)); /* Ensure that the wait actually took some time */ uint64_t duration_ns = end_ns - start_ns; ASSERT_TRUE(duration_ns >= (uint64_t)(0.9 * ONE_SEC_IN_NS)); aws_future_void_release(future); aws_io_library_clean_up(); return 0; } AWS_TEST_CASE(future_wait_timeout, s_test_future_wait_timeout) struct aws_destroyme { struct aws_allocator *alloc; bool *set_true_on_death; }; struct aws_destroyme *aws_destroyme_new(struct aws_allocator *alloc, bool *set_true_on_death) { struct aws_destroyme *destroyme = aws_mem_calloc(alloc, 1, sizeof(struct aws_destroyme)); destroyme->alloc = alloc; destroyme->set_true_on_death = set_true_on_death; *destroyme->set_true_on_death = false; return destroyme; } void aws_destroyme_destroy(struct aws_destroyme *destroyme) { AWS_FATAL_ASSERT(destroyme != NULL && "future should not call destroy() on NULL"); AWS_FATAL_ASSERT(*destroyme->set_true_on_death == false && "destroy() called multiple times on same object"); *destroyme->set_true_on_death = true; aws_mem_release(destroyme->alloc, destroyme); } /* Run through the basics of an AWS_FUTURE_T_POINTER_WITH_DESTROY */ static int s_test_future_pointer_with_destroy(struct aws_allocator *alloc, void *ctx) { (void)ctx; aws_io_library_init(alloc); struct aws_future_destroyme *future = aws_future_destroyme_new(alloc); ASSERT_FALSE(aws_future_destroyme_is_done(future)); /* set result */ bool original_destroyme_died = false; struct aws_destroyme *original_destroyme = aws_destroyme_new(alloc, &original_destroyme_died); struct aws_destroyme *destroyme_pointer_copy = original_destroyme; aws_future_destroyme_set_result_by_move(future, &original_destroyme); ASSERT_NULL(original_destroyme); /* future should NULL this out while taking ownership of the result */ ASSERT_TRUE(aws_future_destroyme_is_done(future)); ASSERT_FALSE(original_destroyme_died); /* messing with refcount shouldn't trigger destroy */ aws_future_destroyme_acquire(future); aws_future_destroyme_release(future); ASSERT_FALSE(original_destroyme_died); /* get result (without taking ownership) */ struct aws_destroyme *destroyme_from_future = aws_future_destroyme_peek_result(future); ASSERT_NOT_NULL(destroyme_from_future); ASSERT_PTR_EQUALS(destroyme_pointer_copy, destroyme_from_future); ASSERT_FALSE(original_destroyme_died); /* result should be destroyed along with future */ aws_future_destroyme_release(future); ASSERT_TRUE(original_destroyme_died); aws_io_library_clean_up(); return 0; } AWS_TEST_CASE(future_pointer_with_destroy, s_test_future_pointer_with_destroy) struct aws_refcountme { struct aws_allocator *alloc; struct aws_ref_count ref_count; bool *set_true_on_death; }; static void s_refcountme_destroy(void *user_data) { struct aws_refcountme *refcountme = user_data; *refcountme->set_true_on_death = true; aws_mem_release(refcountme->alloc, refcountme); } struct aws_refcountme *aws_refcountme_new(struct aws_allocator *alloc, bool *set_true_on_death) { struct aws_refcountme *refcountme = aws_mem_calloc(alloc, 1, sizeof(struct aws_refcountme)); refcountme->alloc = alloc; aws_ref_count_init(&refcountme->ref_count, refcountme, s_refcountme_destroy); refcountme->set_true_on_death = set_true_on_death; *refcountme->set_true_on_death = false; return refcountme; } struct aws_refcountme *aws_refcountme_acquire(struct aws_refcountme *refcountme) { aws_ref_count_acquire(&refcountme->ref_count); return refcountme; } /* Most release() functions accept NULL, but not this one, because we want to * ensure that aws_future won't pass NULL to the release function */ struct aws_refcountme *aws_refcountme_release(struct aws_refcountme *refcountme) { AWS_FATAL_ASSERT(refcountme != NULL && "future should not call release() on NULL"); AWS_FATAL_ASSERT(*refcountme->set_true_on_death == false && "release() called multiple times on same object"); *refcountme->set_true_on_death = true; aws_mem_release(refcountme->alloc, refcountme); return NULL; } /* Run through the basics of an AWS_FUTURE_T_POINTER_WITH_RELEASE */ static int s_test_future_pointer_with_release(struct aws_allocator *alloc, void *ctx) { (void)ctx; aws_io_library_init(alloc); struct aws_future_refcountme *future = aws_future_refcountme_new(alloc); ASSERT_FALSE(aws_future_refcountme_is_done(future)); /* set result */ bool original_refcountme_died = false; struct aws_refcountme *original_refcountme = aws_refcountme_new(alloc, &original_refcountme_died); struct aws_refcountme *refcountme_pointer_copy = original_refcountme; aws_future_refcountme_set_result_by_move(future, &original_refcountme); ASSERT_NULL(original_refcountme); /* future should NULL this out while taking ownership of the result */ ASSERT_TRUE(aws_future_refcountme_is_done(future)); ASSERT_FALSE(original_refcountme_died); /* get result (without taking ownership) */ struct aws_refcountme *refcountme_from_future = aws_future_refcountme_peek_result(future); ASSERT_NOT_NULL(refcountme_from_future); ASSERT_PTR_EQUALS(refcountme_pointer_copy, refcountme_from_future); /* result should be destroyed along with future */ aws_future_refcountme_release(future); ASSERT_TRUE(original_refcountme_died); aws_io_library_clean_up(); return 0; } AWS_TEST_CASE(future_pointer_with_release, s_test_future_pointer_with_release) /* Test that get_result_by_move() transfers ownership */ static int s_test_future_get_result_by_move(struct aws_allocator *alloc, void *ctx) { (void)ctx; aws_io_library_init(alloc); { /* AWS_FUTURE_T_POINTER_WITH_DESTROY */ bool destroyme_died = false; struct aws_destroyme *original_destroyme = aws_destroyme_new(alloc, &destroyme_died); struct aws_future_destroyme *future = aws_future_destroyme_new(alloc); aws_future_destroyme_set_result_by_move(future, &original_destroyme); /* transfer ownership out of future */ struct aws_destroyme *destroyme_from_future = aws_future_destroyme_get_result_by_move(future); ASSERT_FALSE(destroyme_died); /* result should stay alive after future is destroyed */ aws_future_destroyme_release(future); ASSERT_FALSE(destroyme_died); /* clean up */ aws_destroyme_destroy(destroyme_from_future); ASSERT_TRUE(destroyme_died); } { /* AWS_FUTURE_T_POINTER_WITH_RELEASE */ bool refcountme_died = false; struct aws_refcountme *original_refcountme = aws_refcountme_new(alloc, &refcountme_died); struct aws_future_refcountme *future = aws_future_refcountme_new(alloc); aws_future_refcountme_set_result_by_move(future, &original_refcountme); /* transfer ownership out of future */ struct aws_refcountme *refcountme_from_future = aws_future_refcountme_get_result_by_move(future); ASSERT_FALSE(refcountme_died); /* result should stay alive after future is destroyed */ aws_future_refcountme_release(future); ASSERT_FALSE(refcountme_died); /* clean up */ aws_refcountme_release(refcountme_from_future); ASSERT_TRUE(refcountme_died); } aws_io_library_clean_up(); return 0; } AWS_TEST_CASE(future_get_result_by_move, s_test_future_get_result_by_move) /* Check that, if an incomplete future dies, the result's destructor doesn't run again. * We know this works because the destructor for destroyme and refcountme will assert if NULL is passed in */ static int s_test_future_can_die_incomplete(struct aws_allocator *alloc, void *ctx) { (void)ctx; aws_io_library_init(alloc); struct aws_future_destroyme *future_destroyme = aws_future_destroyme_new(alloc); aws_future_destroyme_release(future_destroyme); struct aws_future_refcountme *future_refcountme = aws_future_refcountme_new(alloc); aws_future_refcountme_release(future_refcountme); aws_io_library_clean_up(); return 0; } AWS_TEST_CASE(future_can_die_incomplete, s_test_future_can_die_incomplete) /* Check aws_future will accept NULL as a result, and not consider it an error, * and not try to run the result destructor. */ static int s_test_future_by_pointer_accepts_null_result(struct aws_allocator *alloc, void *ctx) { (void)ctx; { struct aws_future_destroyme *future = aws_future_destroyme_new(alloc); struct aws_destroyme *null_destroyme = NULL; aws_future_destroyme_set_result_by_move(future, &null_destroyme); ASSERT_TRUE(aws_future_destroyme_is_done(future)); ASSERT_INT_EQUALS(0, aws_future_destroyme_get_error(future)); ASSERT_NULL(aws_future_destroyme_peek_result(future)); aws_future_destroyme_release(future); } { struct aws_future_refcountme *future = aws_future_refcountme_new(alloc); struct aws_refcountme *null_refcountme = NULL; aws_future_refcountme_set_result_by_move(future, &null_refcountme); ASSERT_TRUE(aws_future_refcountme_is_done(future)); ASSERT_INT_EQUALS(0, aws_future_refcountme_get_error(future)); ASSERT_NULL(aws_future_refcountme_peek_result(future)); aws_future_refcountme_release(future); } aws_io_library_clean_up(); return 0; } AWS_TEST_CASE(future_by_pointer_accepts_null_result, s_test_future_by_pointer_accepts_null_result) /* Check that, if an aws_future has a result set multiple times, only the 1st result sticks. * Any 2nd or 3rd result will just get cleaned up. */ static int s_test_future_set_multiple_times(struct aws_allocator *alloc, void *ctx) { (void)ctx; struct aws_future_destroyme *future = aws_future_destroyme_new(alloc); bool result1_destroyed = false; struct aws_destroyme *result1 = aws_destroyme_new(alloc, &result1_destroyed); struct aws_destroyme *result1_pointer_copy = result1; bool result2_destroyed = false; struct aws_destroyme *result2 = aws_destroyme_new(alloc, &result2_destroyed); bool result3_destroyed = false; struct aws_destroyme *result3 = aws_destroyme_new(alloc, &result3_destroyed); /* the future now owns result1 */ aws_future_destroyme_set_result_by_move(future, &result1); ASSERT_FALSE(result1_destroyed); /* attempt to set result2. * the future should continue treating result1 as the result * result2 will simply be destroyed */ aws_future_destroyme_set_result_by_move(future, &result2); ASSERT_PTR_EQUALS(result1_pointer_copy, aws_future_destroyme_peek_result(future)); ASSERT_FALSE(result1_destroyed); ASSERT_NULL(result2); ASSERT_TRUE(result2_destroyed); /* likewise, result3 should be ignored and destroyed */ aws_future_destroyme_set_result_by_move(future, &result3); ASSERT_PTR_EQUALS(result1_pointer_copy, aws_future_destroyme_peek_result(future)); ASSERT_FALSE(result1_destroyed); ASSERT_NULL(result3); ASSERT_TRUE(result3_destroyed); /* setting an error is ignored, if there's already a result */ aws_future_destroyme_set_error(future, 999); ASSERT_PTR_EQUALS(result1_pointer_copy, aws_future_destroyme_peek_result(future)); ASSERT_FALSE(result1_destroyed); ASSERT_INT_EQUALS(0, aws_future_destroyme_get_error(future)); /* result1 should finally be destroyed when the future is destroyed */ aws_future_destroyme_release(future); ASSERT_TRUE(result1_destroyed); aws_io_library_clean_up(); return 0; } AWS_TEST_CASE(future_set_multiple_times, s_test_future_set_multiple_times) static int s_test_future_set_error(struct aws_allocator *alloc, void *ctx) { (void)ctx; aws_io_library_init(alloc); struct aws_future_destroyme *future = aws_future_destroyme_new(alloc); /* Set error code */ aws_future_destroyme_set_error(future, 999); ASSERT_TRUE(aws_future_destroyme_is_done(future)); ASSERT_INT_EQUALS(999, aws_future_destroyme_get_error(future)); /* Attempts to change the error should be ignored */ aws_future_destroyme_set_error(future, 222); ASSERT_INT_EQUALS(999, aws_future_destroyme_get_error(future)); /* Attempts to set a result instead should be ignored (the new result should just get destroyed) */ bool result_destroyed = false; struct aws_destroyme *result = aws_destroyme_new(alloc, &result_destroyed); aws_future_destroyme_set_result_by_move(future, &result); ASSERT_INT_EQUALS(999, aws_future_destroyme_get_error(future)); ASSERT_NULL(result); ASSERT_TRUE(result_destroyed); aws_future_destroyme_release(future); aws_io_library_clean_up(); return 0; } AWS_TEST_CASE(future_set_error, s_test_future_set_error) aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/future_test.h000066400000000000000000000017051456575232400235260ustar00rootroot00000000000000 #ifndef AWS_FUTURE_TEST_H #define AWS_FUTURE_TEST_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include struct aws_destroyme *aws_destroyme_new(struct aws_allocator *alloc, bool *set_true_on_death); void aws_destroyme_destroy(struct aws_destroyme *destroyme); /* We get unused-function warnings if this macro is used in a .c file, so put it in a header */ AWS_FUTURE_T_POINTER_WITH_DESTROY_DECLARATION(aws_future_destroyme, struct aws_destroyme, /*private API*/); struct aws_refcountme *aws_refcountme_new(struct aws_allocator *alloc, bool *set_true_on_death); struct aws_refcountme *aws_refcountme_acquire(struct aws_refcountme *refcountme); struct aws_refcountme *aws_refcountme_release(struct aws_refcountme *refcountme); AWS_FUTURE_T_POINTER_WITH_RELEASE_DECLARATION(aws_future_refcountme, struct aws_refcountme, /*private API*/); #endif /* AWS_FUTURE_TEST_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/io_lib_test.c000066400000000000000000000021111456575232400234340ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include /* Initialize this library and its dependencies. * This will fail if: * - the error info list is out of sync with the error enums. * - there is a memory leak */ static int s_test_io_library_init(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; aws_io_library_init(allocator); aws_io_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(io_library_init, s_test_io_library_init) /* Ensure the library can go through the init/cleanup cycle multiple times */ static int s_test_io_library_init_cleanup_init_cleanup(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; aws_io_library_init(allocator); aws_io_library_clean_up(); aws_io_library_init(allocator); aws_io_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(io_library_init_cleanup_init_cleanup, s_test_io_library_init_cleanup_init_cleanup) aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/io_testing_channel_test.c000066400000000000000000000045511456575232400260450ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include static int s_test_io_testing_channel(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_testing_channel_options test_channel_options = {.clock_fn = aws_high_res_clock_get_ticks}; struct testing_channel testing_channel; ASSERT_SUCCESS(testing_channel_init(&testing_channel, allocator, &test_channel_options)); /* Install downstream handler, so the 2 handlers can pass messages to each other */ ASSERT_SUCCESS(testing_channel_install_downstream_handler(&testing_channel, 16 * 1024)); /* Push read message and assert that downstream handler receives it */ struct aws_io_message *read_msg = aws_channel_acquire_message_from_pool(testing_channel.channel, AWS_IO_MESSAGE_APPLICATION_DATA, 64); ASSERT_NOT_NULL(read_msg); ASSERT_SUCCESS(testing_channel_push_read_message(&testing_channel, read_msg)); struct aws_linked_list *read_queue = testing_channel_get_read_message_queue(&testing_channel); ASSERT_NOT_NULL(read_queue); ASSERT_FALSE(aws_linked_list_empty(read_queue)); ASSERT_PTR_EQUALS(&read_msg->queueing_handle, aws_linked_list_front(read_queue)); /* Push write message and assert that upstream handler receives it */ struct aws_io_message *write_msg = aws_channel_acquire_message_from_pool(testing_channel.channel, AWS_IO_MESSAGE_APPLICATION_DATA, 64); ASSERT_NOT_NULL(write_msg); ASSERT_SUCCESS(testing_channel_push_write_message(&testing_channel, write_msg)); struct aws_linked_list *write_queue = testing_channel_get_written_message_queue(&testing_channel); ASSERT_NOT_NULL(write_queue); ASSERT_FALSE(aws_linked_list_empty(write_queue)); ASSERT_PTR_EQUALS(&write_msg->queueing_handle, aws_linked_list_front(write_queue)); testing_channel_drain_queued_tasks(&testing_channel); /* Test window updates */ ASSERT_SUCCESS(testing_channel_increment_read_window(&testing_channel, 12345)); testing_channel_drain_queued_tasks(&testing_channel); ASSERT_UINT_EQUALS(12345, testing_channel_last_window_update(&testing_channel)); /* Clean up */ ASSERT_SUCCESS(testing_channel_clean_up(&testing_channel)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(io_testing_channel, s_test_io_testing_channel) aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/mock_dns_resolver.c000066400000000000000000000051321456575232400246640ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "mock_dns_resolver.h" #include int mock_dns_resolver_init(struct mock_dns_resolver *resolver, size_t max_resolves, struct aws_allocator *allocator) { resolver->index = 0; resolver->max_resolves = max_resolves; resolver->resolve_count = 0; return aws_array_list_init_dynamic(&resolver->address_list, allocator, 2, sizeof(struct aws_array_list)); } void mock_dns_resolver_clean_up(struct mock_dns_resolver *resolver) { for (size_t i = 0; i < aws_array_list_length(&resolver->address_list); ++i) { struct aws_array_list *temp = NULL; aws_array_list_get_at_ptr(&resolver->address_list, (void **)&temp, i); for (size_t j = 0; j < aws_array_list_length(temp); ++j) { struct aws_host_address *temp_address = NULL; aws_array_list_get_at_ptr(temp, (void **)&temp_address, j); aws_host_address_clean_up(temp_address); } aws_array_list_clean_up(temp); } aws_array_list_clean_up(&resolver->address_list); } int mock_dns_resolver_append_address_list(struct mock_dns_resolver *resolver, struct aws_array_list *addresses) { return aws_array_list_push_back(&resolver->address_list, addresses); } int mock_dns_resolve( struct aws_allocator *allocator, const struct aws_string *host_name, struct aws_array_list *output_addresses, void *user_data) { (void)allocator; (void)host_name; struct mock_dns_resolver *mock_resolver = user_data; if (mock_resolver->resolve_count == mock_resolver->max_resolves) { return aws_raise_error(AWS_IO_DNS_QUERY_FAILED); } struct aws_array_list *iteration_list = NULL; if (aws_array_list_get_at_ptr(&mock_resolver->address_list, (void **)&iteration_list, mock_resolver->index)) { return aws_raise_error(AWS_ERROR_UNKNOWN); } mock_resolver->index = (mock_resolver->index + 1) % aws_array_list_length(&mock_resolver->address_list); mock_resolver->resolve_count += 1; if (aws_array_list_length(iteration_list) == 0) { return aws_raise_error(AWS_IO_DNS_QUERY_FAILED); } for (size_t i = 0; i < aws_array_list_length(iteration_list); ++i) { struct aws_host_address *temp_address = NULL; aws_array_list_get_at_ptr(iteration_list, (void **)&temp_address, i); struct aws_host_address address_cpy; aws_host_address_copy(temp_address, &address_cpy); aws_array_list_push_back(output_addresses, &address_cpy); } return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/mock_dns_resolver.h000066400000000000000000000016141456575232400246720ustar00rootroot00000000000000#ifndef AWS_MOCK_DNS_RESOLVER_H #define AWS_MOCK_DNS_RESOLVER_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include struct aws_string; struct mock_dns_resolver { struct aws_array_list address_list; size_t index; size_t max_resolves; size_t resolve_count; }; int mock_dns_resolver_init(struct mock_dns_resolver *resolver, size_t max_resolves, struct aws_allocator *allocator); void mock_dns_resolver_clean_up(struct mock_dns_resolver *resolver); int mock_dns_resolver_append_address_list(struct mock_dns_resolver *resolver, struct aws_array_list *addresses); int mock_dns_resolve( struct aws_allocator *allocator, const struct aws_string *host_name, struct aws_array_list *output_addresses, void *user_data); #endif /* AWS_MOCK_DNS_RESOLVER_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/pem_test.c000066400000000000000000003270411456575232400227740ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include static int s_check_clean_pem_result( struct aws_byte_cursor dirty_pem, struct aws_byte_cursor expected_clean_pem, struct aws_allocator *allocator) { struct aws_byte_buf pem_buf; ASSERT_SUCCESS(aws_byte_buf_init_copy_from_cursor(&pem_buf, allocator, dirty_pem)); ASSERT_SUCCESS(aws_sanitize_pem(&pem_buf, allocator)); ASSERT_TRUE(aws_byte_cursor_eq_byte_buf(&expected_clean_pem, &pem_buf)); aws_byte_buf_clean_up(&pem_buf); return AWS_OP_SUCCESS; } static int s_test_pem_sanitize_comments_around_pem_object_removed(struct aws_allocator *allocator, void *ctx) { (void)ctx; /* comments around pem object will be removed */ struct aws_byte_cursor dirty_pem = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("# comments\r\n" "-----BEGIN CERTIFICATE-----\n" "CERTIFICATES\n" "-----END CERTIFICATE-----\n" "# another comments\r\n" "-----BEGIN CERTIFICATE-----\n" "CERTIFICATES\n" "-----END CERTIFICATE-----\n" "# final comments\r\n"); struct aws_byte_cursor expected_clean_pem = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("-----BEGIN CERTIFICATE-----\n" "CERTIFICATES\n" "-----END CERTIFICATE-----\n" "-----BEGIN CERTIFICATE-----\n" "CERTIFICATES\n" "-----END CERTIFICATE-----\n"); return s_check_clean_pem_result(dirty_pem, expected_clean_pem, allocator); } AWS_TEST_CASE(pem_sanitize_comments_around_pem_object_removed, s_test_pem_sanitize_comments_around_pem_object_removed); static int s_test_pem_sanitize_empty_file_rejected(struct aws_allocator *allocator, void *ctx) { (void)ctx; /* We don't allow empty files. */ struct aws_byte_buf pem; ASSERT_SUCCESS(aws_byte_buf_init(&pem, allocator, 512)); ASSERT_ERROR(AWS_ERROR_INVALID_ARGUMENT, aws_sanitize_pem(&pem, allocator)); aws_byte_buf_clean_up(&pem); return AWS_OP_SUCCESS; } AWS_TEST_CASE(pem_sanitize_empty_file_rejected, s_test_pem_sanitize_empty_file_rejected) AWS_TEST_CASE(pem_sanitize_wrong_format_rejected, s_test_pem_sanitize_wrong_format_rejected) static int s_test_pem_sanitize_wrong_format_rejected(struct aws_allocator *allocator, void *ctx) { (void)ctx; /* A file with the wrong format will "sanitize" to an empty PEM file, which we do not accept */ /* This is not a PEM file, it's a DER encoded binary x.509 certificate */ const uint8_t not_a_pem_src[] = { 0x30, 0x82, 0x04, 0xD3, 0x30, 0x82, 0x03, 0xBB, 0xA0, 0x03, 0x02, 0x01, 0x02, 0x02, 0x10, 0x18, 0xDA, 0xD1, 0x9E, 0x26, 0x7D, 0xE8, 0xBB, 0x4A, 0x21, 0x58, 0xCD, 0xCC, 0x6B, 0x3B, 0x4A, 0x30, 0x0D, 0x06, 0x09, 0x2A, 0x86, 0x48, 0x86, 0xF7, 0x0D, 0x01, 0x01, 0x05, 0x05, 0x00, 0x30, 0x81, 0xCA, 0x31, 0x0B, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, 0x55, 0x53, 0x31, 0x17, 0x30, 0x15, 0x06, 0x03, 0x55, 0x04, 0x0A, 0x13, 0x0E, 0x56, 0x65, 0x72, 0x69, 0x53, 0x69, 0x67, 0x6E, 0x2C, 0x20, 0x49, 0x6E, 0x63, 0x2E, 0x31, 0x1F, 0x30, 0x1D, 0x06, 0x03, 0x55, 0x04, 0x0B, 0x13, 0x16, 0x56, 0x65, 0x72, 0x69, 0x53, 0x69, 0x67, 0x6E, 0x20, 0x54, 0x72, 0x75, 0x73, 0x74, 0x20, 0x4E, 0x65, 0x74, 0x77, 0x6F, 0x72, 0x6B, 0x31, 0x3A, 0x30, 0x38, 0x06, 0x03, 0x55, 0x04, 0x0B, 0x13, 0x31, 0x28, 0x63, 0x29, 0x20, 0x32, 0x30, 0x30, 0x36, 0x20, 0x56, 0x65, 0x72, 0x69, 0x53, 0x69, 0x67, 0x6E, 0x2C, 0x20, 0x49, 0x6E, 0x63, 0x2E, 0x20, 0x2D, 0x20, 0x46, 0x6F, 0x72, 0x20, 0x61, 0x75, 0x74, 0x68, 0x6F, 0x72, 0x69, 0x7A, 0x65, 0x64, 0x20, 0x75, 0x73, 0x65, 0x20, 0x6F, 0x6E, 0x6C, 0x79, 0x31, 0x45, 0x30, 0x43, 0x06, 0x03, 0x55, 0x04, 0x03, 0x13, 0x3C, 0x56, 0x65, 0x72, 0x69, 0x53, 0x69, 0x67, 0x6E, 0x20, 0x43, 0x6C, 0x61, 0x73, 0x73, 0x20, 0x33, 0x20, 0x50, 0x75, 0x62, 0x6C, 0x69, 0x63, 0x20, 0x50, 0x72, 0x69, 0x6D, 0x61, 0x72, 0x79, 0x20, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x41, 0x75, 0x74, 0x68, 0x6F, 0x72, 0x69, 0x74, 0x79, 0x20, 0x2D, 0x20, 0x47, 0x35, 0x30, 0x1E, 0x17, 0x0D, 0x30, 0x36, 0x31, 0x31, 0x30, 0x38, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x5A, 0x17, 0x0D, 0x33, 0x36, 0x30, 0x37, 0x31, 0x36, 0x32, 0x33, 0x35, 0x39, 0x35, 0x39, 0x5A, 0x30, 0x81, 0xCA, 0x31, 0x0B, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, 0x55, 0x53, 0x31, 0x17, 0x30, 0x15, 0x06, 0x03, 0x55, 0x04, 0x0A, 0x13, 0x0E, 0x56, 0x65, 0x72, 0x69, 0x53, 0x69, 0x67, 0x6E, 0x2C, 0x20, 0x49, 0x6E, 0x63, 0x2E, 0x31, 0x1F, 0x30, 0x1D, 0x06, 0x03, 0x55, 0x04, 0x0B, 0x13, 0x16, 0x56, 0x65, 0x72, 0x69, 0x53, 0x69, 0x67, 0x6E, 0x20, 0x54, 0x72, 0x75, 0x73, 0x74, 0x20, 0x4E, 0x65, 0x74, 0x77, 0x6F, 0x72, 0x6B, 0x31, 0x3A, 0x30, 0x38, 0x06, 0x03, 0x55, 0x04, 0x0B, 0x13, 0x31, 0x28, 0x63, 0x29, 0x20, 0x32, 0x30, 0x30, 0x36, 0x20, 0x56, 0x65, 0x72, 0x69, 0x53, 0x69, 0x67, 0x6E, 0x2C, 0x20, 0x49, 0x6E, 0x63, 0x2E, 0x20, 0x2D, 0x20, 0x46, 0x6F, 0x72, 0x20, 0x61, 0x75, 0x74, 0x68, 0x6F, 0x72, 0x69, 0x7A, 0x65, 0x64, 0x20, 0x75, 0x73, 0x65, 0x20, 0x6F, 0x6E, 0x6C, 0x79, 0x31, 0x45, 0x30, 0x43, 0x06, 0x03, 0x55, 0x04, 0x03, 0x13, 0x3C, 0x56, 0x65, 0x72, 0x69, 0x53, 0x69, 0x67, 0x6E, 0x20, 0x43, 0x6C, 0x61, 0x73, 0x73, 0x20, 0x33, 0x20, 0x50, 0x75, 0x62, 0x6C, 0x69, 0x63, 0x20, 0x50, 0x72, 0x69, 0x6D, 0x61, 0x72, 0x79, 0x20, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x41, 0x75, 0x74, 0x68, 0x6F, 0x72, 0x69, 0x74, 0x79, 0x20, 0x2D, 0x20, 0x47, 0x35, 0x30, 0x82, 0x01, 0x22, 0x30, 0x0D, 0x06, 0x09, 0x2A, 0x86, 0x48, 0x86, 0xF7, 0x0D, 0x01, 0x01, 0x01, 0x05, 0x00, 0x03, 0x82, 0x01, 0x0F, 0x00, 0x30, 0x82, 0x01, 0x0A, 0x02, 0x82, 0x01, 0x01, 0x00, 0xAF, 0x24, 0x08, 0x08, 0x29, 0x7A, 0x35, 0x9E, 0x60, 0x0C, 0xAA, 0xE7, 0x4B, 0x3B, 0x4E, 0xDC, 0x7C, 0xBC, 0x3C, 0x45, 0x1C, 0xBB, 0x2B, 0xE0, 0xFE, 0x29, 0x02, 0xF9, 0x57, 0x08, 0xA3, 0x64, 0x85, 0x15, 0x27, 0xF5, 0xF1, 0xAD, 0xC8, 0x31, 0x89, 0x5D, 0x22, 0xE8, 0x2A, 0xAA, 0xA6, 0x42, 0xB3, 0x8F, 0xF8, 0xB9, 0x55, 0xB7, 0xB1, 0xB7, 0x4B, 0xB3, 0xFE, 0x8F, 0x7E, 0x07, 0x57, 0xEC, 0xEF, 0x43, 0xDB, 0x66, 0x62, 0x15, 0x61, 0xCF, 0x60, 0x0D, 0xA4, 0xD8, 0xDE, 0xF8, 0xE0, 0xC3, 0x62, 0x08, 0x3D, 0x54, 0x13, 0xEB, 0x49, 0xCA, 0x59, 0x54, 0x85, 0x26, 0xE5, 0x2B, 0x8F, 0x1B, 0x9F, 0xEB, 0xF5, 0xA1, 0x91, 0xC2, 0x33, 0x49, 0xD8, 0x43, 0x63, 0x6A, 0x52, 0x4B, 0xD2, 0x8F, 0xE8, 0x70, 0x51, 0x4D, 0xD1, 0x89, 0x69, 0x7B, 0xC7, 0x70, 0xF6, 0xB3, 0xDC, 0x12, 0x74, 0xDB, 0x7B, 0x5D, 0x4B, 0x56, 0xD3, 0x96, 0xBF, 0x15, 0x77, 0xA1, 0xB0, 0xF4, 0xA2, 0x25, 0xF2, 0xAF, 0x1C, 0x92, 0x67, 0x18, 0xE5, 0xF4, 0x06, 0x04, 0xEF, 0x90, 0xB9, 0xE4, 0x00, 0xE4, 0xDD, 0x3A, 0xB5, 0x19, 0xFF, 0x02, 0xBA, 0xF4, 0x3C, 0xEE, 0xE0, 0x8B, 0xEB, 0x37, 0x8B, 0xEC, 0xF4, 0xD7, 0xAC, 0xF2, 0xF6, 0xF0, 0x3D, 0xAF, 0xDD, 0x75, 0x91, 0x33, 0x19, 0x1D, 0x1C, 0x40, 0xCB, 0x74, 0x24, 0x19, 0x21, 0x93, 0xD9, 0x14, 0xFE, 0xAC, 0x2A, 0x52, 0xC7, 0x8F, 0xD5, 0x04, 0x49, 0xE4, 0x8D, 0x63, 0x47, 0x88, 0x3C, 0x69, 0x83, 0xCB, 0xFE, 0x47, 0xBD, 0x2B, 0x7E, 0x4F, 0xC5, 0x95, 0xAE, 0x0E, 0x9D, 0xD4, 0xD1, 0x43, 0xC0, 0x67, 0x73, 0xE3, 0x14, 0x08, 0x7E, 0xE5, 0x3F, 0x9F, 0x73, 0xB8, 0x33, 0x0A, 0xCF, 0x5D, 0x3F, 0x34, 0x87, 0x96, 0x8A, 0xEE, 0x53, 0xE8, 0x25, 0x15, 0x02, 0x03, 0x01, 0x00, 0x01, 0xA3, 0x81, 0xB2, 0x30, 0x81, 0xAF, 0x30, 0x0F, 0x06, 0x03, 0x55, 0x1D, 0x13, 0x01, 0x01, 0xFF, 0x04, 0x05, 0x30, 0x03, 0x01, 0x01, 0xFF, 0x30, 0x0E, 0x06, 0x03, 0x55, 0x1D, 0x0F, 0x01, 0x01, 0xFF, 0x04, 0x04, 0x03, 0x02, 0x01, 0x06, 0x30, 0x6D, 0x06, 0x08, 0x2B, 0x06, 0x01, 0x05, 0x05, 0x07, 0x01, 0x0C, 0x04, 0x61, 0x30, 0x5F, 0xA1, 0x5D, 0xA0, 0x5B, 0x30, 0x59, 0x30, 0x57, 0x30, 0x55, 0x16, 0x09, 0x69, 0x6D, 0x61, 0x67, 0x65, 0x2F, 0x67, 0x69, 0x66, 0x30, 0x21, 0x30, 0x1F, 0x30, 0x07, 0x06, 0x05, 0x2B, 0x0E, 0x03, 0x02, 0x1A, 0x04, 0x14, 0x8F, 0xE5, 0xD3, 0x1A, 0x86, 0xAC, 0x8D, 0x8E, 0x6B, 0xC3, 0xCF, 0x80, 0x6A, 0xD4, 0x48, 0x18, 0x2C, 0x7B, 0x19, 0x2E, 0x30, 0x25, 0x16, 0x23, 0x68, 0x74, 0x74, 0x70, 0x3A, 0x2F, 0x2F, 0x6C, 0x6F, 0x67, 0x6F, 0x2E, 0x76, 0x65, 0x72, 0x69, 0x73, 0x69, 0x67, 0x6E, 0x2E, 0x63, 0x6F, 0x6D, 0x2F, 0x76, 0x73, 0x6C, 0x6F, 0x67, 0x6F, 0x2E, 0x67, 0x69, 0x66, 0x30, 0x1D, 0x06, 0x03, 0x55, 0x1D, 0x0E, 0x04, 0x16, 0x04, 0x14, 0x7F, 0xD3, 0x65, 0xA7, 0xC2, 0xDD, 0xEC, 0xBB, 0xF0, 0x30, 0x09, 0xF3, 0x43, 0x39, 0xFA, 0x02, 0xAF, 0x33, 0x31, 0x33, 0x30, 0x0D, 0x06, 0x09, 0x2A, 0x86, 0x48, 0x86, 0xF7, 0x0D, 0x01, 0x01, 0x05, 0x05, 0x00, 0x03, 0x82, 0x01, 0x01, 0x00, 0x93, 0x24, 0x4A, 0x30, 0x5F, 0x62, 0xCF, 0xD8, 0x1A, 0x98, 0x2F, 0x3D, 0xEA, 0xDC, 0x99, 0x2D, 0xBD, 0x77, 0xF6, 0xA5, 0x79, 0x22, 0x38, 0xEC, 0xC4, 0xA7, 0xA0, 0x78, 0x12, 0xAD, 0x62, 0x0E, 0x45, 0x70, 0x64, 0xC5, 0xE7, 0x97, 0x66, 0x2D, 0x98, 0x09, 0x7E, 0x5F, 0xAF, 0xD6, 0xCC, 0x28, 0x65, 0xF2, 0x01, 0xAA, 0x08, 0x1A, 0x47, 0xDE, 0xF9, 0xF9, 0x7C, 0x92, 0x5A, 0x08, 0x69, 0x20, 0x0D, 0xD9, 0x3E, 0x6D, 0x6E, 0x3C, 0x0D, 0x6E, 0xD8, 0xE6, 0x06, 0x91, 0x40, 0x18, 0xB9, 0xF8, 0xC1, 0xED, 0xDF, 0xDB, 0x41, 0xAA, 0xE0, 0x96, 0x20, 0xC9, 0xCD, 0x64, 0x15, 0x38, 0x81, 0xC9, 0x94, 0xEE, 0xA2, 0x84, 0x29, 0x0B, 0x13, 0x6F, 0x8E, 0xDB, 0x0C, 0xDD, 0x25, 0x02, 0xDB, 0xA4, 0x8B, 0x19, 0x44, 0xD2, 0x41, 0x7A, 0x05, 0x69, 0x4A, 0x58, 0x4F, 0x60, 0xCA, 0x7E, 0x82, 0x6A, 0x0B, 0x02, 0xAA, 0x25, 0x17, 0x39, 0xB5, 0xDB, 0x7F, 0xE7, 0x84, 0x65, 0x2A, 0x95, 0x8A, 0xBD, 0x86, 0xDE, 0x5E, 0x81, 0x16, 0x83, 0x2D, 0x10, 0xCC, 0xDE, 0xFD, 0xA8, 0x82, 0x2A, 0x6D, 0x28, 0x1F, 0x0D, 0x0B, 0xC4, 0xE5, 0xE7, 0x1A, 0x26, 0x19, 0xE1, 0xF4, 0x11, 0x6F, 0x10, 0xB5, 0x95, 0xFC, 0xE7, 0x42, 0x05, 0x32, 0xDB, 0xCE, 0x9D, 0x51, 0x5E, 0x28, 0xB6, 0x9E, 0x85, 0xD3, 0x5B, 0xEF, 0xA5, 0x7D, 0x45, 0x40, 0x72, 0x8E, 0xB7, 0x0E, 0x6B, 0x0E, 0x06, 0xFB, 0x33, 0x35, 0x48, 0x71, 0xB8, 0x9D, 0x27, 0x8B, 0xC4, 0x65, 0x5F, 0x0D, 0x86, 0x76, 0x9C, 0x44, 0x7A, 0xF6, 0x95, 0x5C, 0xF6, 0x5D, 0x32, 0x08, 0x33, 0xA4, 0x54, 0xB6, 0x18, 0x3F, 0x68, 0x5C, 0xF2, 0x42, 0x4A, 0x85, 0x38, 0x54, 0x83, 0x5F, 0xD1, 0xE8, 0x2C, 0xF2, 0xAC, 0x11, 0xD6, 0xA8, 0xED, 0x63, 0x6A}; struct aws_byte_cursor not_a_pem_cursor = aws_byte_cursor_from_array(not_a_pem_src, sizeof(not_a_pem_src)); struct aws_byte_buf not_a_pem; ASSERT_SUCCESS(aws_byte_buf_init_copy_from_cursor(¬_a_pem, allocator, not_a_pem_cursor)); ASSERT_ERROR(AWS_ERROR_INVALID_ARGUMENT, aws_sanitize_pem(¬_a_pem, allocator)); aws_byte_buf_clean_up(¬_a_pem); return AWS_OP_SUCCESS; } static int s_test_pem_cert_parse_from_file(struct aws_allocator *allocator, void *ctx) { (void)ctx; static const uint8_t s_expected[] = { 0x30, 0x82, 0x03, 0xec, 0x30, 0x82, 0x02, 0xd4, 0xa0, 0x03, 0x02, 0x01, 0x02, 0x02, 0x09, 0x00, 0x84, 0x7d, 0x2e, 0xed, 0x4d, 0xfc, 0x26, 0x87, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x0b, 0x05, 0x00, 0x30, 0x81, 0x9a, 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, 0x55, 0x53, 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, 0x55, 0x04, 0x08, 0x0c, 0x0a, 0x57, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x74, 0x6f, 0x6e, 0x31, 0x10, 0x30, 0x0e, 0x06, 0x03, 0x55, 0x04, 0x07, 0x0c, 0x07, 0x53, 0x65, 0x61, 0x74, 0x74, 0x6c, 0x65, 0x31, 0x0f, 0x30, 0x0d, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x0c, 0x06, 0x41, 0x6d, 0x61, 0x7a, 0x6f, 0x6e, 0x31, 0x0d, 0x30, 0x0b, 0x06, 0x03, 0x55, 0x04, 0x0b, 0x0c, 0x04, 0x53, 0x44, 0x4b, 0x73, 0x31, 0x12, 0x30, 0x10, 0x06, 0x03, 0x55, 0x04, 0x03, 0x0c, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x68, 0x6f, 0x73, 0x74, 0x31, 0x30, 0x30, 0x2e, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x01, 0x16, 0x21, 0x61, 0x77, 0x73, 0x2d, 0x73, 0x64, 0x6b, 0x2d, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2d, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x40, 0x61, 0x6d, 0x61, 0x7a, 0x6f, 0x6e, 0x2e, 0x63, 0x6f, 0x6d, 0x30, 0x1e, 0x17, 0x0d, 0x32, 0x31, 0x30, 0x36, 0x31, 0x36, 0x30, 0x36, 0x31, 0x37, 0x30, 0x30, 0x5a, 0x17, 0x0d, 0x32, 0x33, 0x30, 0x39, 0x31, 0x38, 0x30, 0x36, 0x31, 0x37, 0x30, 0x30, 0x5a, 0x30, 0x81, 0x9a, 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, 0x55, 0x53, 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, 0x55, 0x04, 0x08, 0x0c, 0x0a, 0x57, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x74, 0x6f, 0x6e, 0x31, 0x10, 0x30, 0x0e, 0x06, 0x03, 0x55, 0x04, 0x07, 0x0c, 0x07, 0x53, 0x65, 0x61, 0x74, 0x74, 0x6c, 0x65, 0x31, 0x0f, 0x30, 0x0d, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x0c, 0x06, 0x41, 0x6d, 0x61, 0x7a, 0x6f, 0x6e, 0x31, 0x0d, 0x30, 0x0b, 0x06, 0x03, 0x55, 0x04, 0x0b, 0x0c, 0x04, 0x53, 0x44, 0x4b, 0x73, 0x31, 0x12, 0x30, 0x10, 0x06, 0x03, 0x55, 0x04, 0x03, 0x0c, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x68, 0x6f, 0x73, 0x74, 0x31, 0x30, 0x30, 0x2e, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x01, 0x16, 0x21, 0x61, 0x77, 0x73, 0x2d, 0x73, 0x64, 0x6b, 0x2d, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2d, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x40, 0x61, 0x6d, 0x61, 0x7a, 0x6f, 0x6e, 0x2e, 0x63, 0x6f, 0x6d, 0x30, 0x82, 0x01, 0x22, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x01, 0x05, 0x00, 0x03, 0x82, 0x01, 0x0f, 0x00, 0x30, 0x82, 0x01, 0x0a, 0x02, 0x82, 0x01, 0x01, 0x00, 0xd7, 0x6a, 0x57, 0x48, 0xf8, 0x0e, 0x44, 0x03, 0x25, 0x42, 0xd6, 0x11, 0x6f, 0x1b, 0xb3, 0xfc, 0xe7, 0x1a, 0xa2, 0xb6, 0xa7, 0xdc, 0x2d, 0x85, 0x8f, 0x28, 0xe1, 0xbb, 0x4b, 0xee, 0x71, 0x21, 0x19, 0x4b, 0x0c, 0x43, 0x26, 0x9e, 0xf9, 0x4c, 0x14, 0x04, 0x31, 0xa7, 0xd2, 0xa5, 0x21, 0x0a, 0x01, 0x02, 0xde, 0x0e, 0xde, 0xf1, 0xb8, 0x34, 0x43, 0x62, 0x7e, 0x76, 0x57, 0x85, 0x04, 0xe9, 0xc1, 0x7e, 0xc5, 0x35, 0xa1, 0xb7, 0x3b, 0x1f, 0xee, 0x68, 0x4d, 0xfe, 0x51, 0xda, 0x0c, 0xf7, 0x2f, 0x47, 0x60, 0x12, 0x3c, 0x01, 0x24, 0xce, 0x48, 0xa5, 0xf0, 0xa0, 0x8b, 0x63, 0x87, 0xba, 0xb5, 0x3c, 0x52, 0xc1, 0x0f, 0x7b, 0xb2, 0x99, 0x4d, 0xb8, 0x46, 0x74, 0xf7, 0xd1, 0xe8, 0x25, 0x84, 0xd3, 0x2c, 0x56, 0x91, 0x78, 0x87, 0xdd, 0xd4, 0x3d, 0xf3, 0x67, 0x51, 0x18, 0x71, 0x2c, 0x3c, 0xc3, 0xe1, 0x99, 0xd9, 0x2c, 0x44, 0x51, 0xf6, 0x14, 0x48, 0xbd, 0x82, 0x16, 0x62, 0x18, 0x4a, 0x44, 0x23, 0x9e, 0x5b, 0x09, 0x08, 0x8a, 0x42, 0xa0, 0x68, 0x03, 0x88, 0x10, 0x0f, 0x6c, 0x85, 0x09, 0x3b, 0x72, 0x96, 0x04, 0x35, 0xf4, 0x26, 0x01, 0x83, 0x6f, 0x1d, 0xd6, 0x7f, 0x78, 0xd7, 0x1b, 0xf6, 0x3a, 0x4f, 0xad, 0xcb, 0x3e, 0xc3, 0xbe, 0x01, 0x2d, 0xb4, 0x44, 0x2b, 0xdc, 0x10, 0x5d, 0x05, 0xfe, 0xb9, 0x43, 0x20, 0xdc, 0xc8, 0xe4, 0x40, 0x07, 0x3b, 0x54, 0xce, 0x11, 0xdf, 0x5f, 0x28, 0xeb, 0xbe, 0x24, 0x02, 0xb4, 0xe8, 0xfc, 0x35, 0x9b, 0xbe, 0xc1, 0x80, 0xea, 0xc4, 0xec, 0x5b, 0x6f, 0x20, 0x6e, 0xe4, 0x60, 0xd5, 0x6e, 0x38, 0x43, 0xde, 0x22, 0x73, 0x87, 0x90, 0xeb, 0xaa, 0xaf, 0x20, 0xe2, 0xb0, 0x1d, 0x4f, 0xc2, 0x2c, 0x8f, 0x34, 0x86, 0xea, 0x75, 0x02, 0x03, 0x01, 0x00, 0x01, 0xa3, 0x33, 0x30, 0x31, 0x30, 0x13, 0x06, 0x03, 0x55, 0x1d, 0x25, 0x04, 0x0c, 0x30, 0x0a, 0x06, 0x08, 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x03, 0x01, 0x30, 0x1a, 0x06, 0x03, 0x55, 0x1d, 0x11, 0x04, 0x13, 0x30, 0x11, 0x82, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x68, 0x6f, 0x73, 0x74, 0x87, 0x04, 0x7f, 0x00, 0x00, 0x01, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x0b, 0x05, 0x00, 0x03, 0x82, 0x01, 0x01, 0x00, 0x07, 0xb4, 0x9a, 0x48, 0x4e, 0x6d, 0x71, 0x32, 0xf4, 0x35, 0x89, 0xf5, 0xe1, 0xe8, 0x27, 0x5e, 0xe3, 0x51, 0x0d, 0x54, 0xf2, 0xde, 0x1e, 0x2f, 0x9a, 0x0d, 0xdd, 0x02, 0xd4, 0xce, 0x15, 0x93, 0x8b, 0xe6, 0x75, 0x77, 0xc2, 0x41, 0xf6, 0xbf, 0xfc, 0xac, 0x25, 0x96, 0xea, 0x80, 0x38, 0x68, 0xe2, 0xa5, 0x72, 0x9a, 0x31, 0xa2, 0x95, 0x43, 0xa9, 0x90, 0x39, 0x64, 0xe3, 0x6c, 0x29, 0x37, 0x0c, 0x7a, 0xb7, 0x18, 0x97, 0x47, 0x0e, 0x16, 0x79, 0x2f, 0x9a, 0x92, 0x7b, 0x51, 0xac, 0xe4, 0x4c, 0x70, 0xc2, 0xe4, 0xf3, 0x7f, 0x2b, 0x63, 0x53, 0x2c, 0x3b, 0xdb, 0xf1, 0xef, 0x84, 0xda, 0xf3, 0x71, 0x6c, 0x6e, 0xb8, 0x41, 0x48, 0xae, 0xb5, 0x12, 0x1b, 0x20, 0xec, 0xdf, 0xff, 0x9f, 0x2b, 0x2d, 0x66, 0x52, 0x0a, 0x72, 0x17, 0x99, 0xa5, 0x4d, 0x28, 0x29, 0x8a, 0x9c, 0xc8, 0x51, 0xd0, 0xe8, 0x5c, 0x42, 0x66, 0x3e, 0xef, 0x06, 0xda, 0x72, 0xea, 0xa8, 0x5a, 0x5a, 0x02, 0x0f, 0xa2, 0x68, 0x80, 0xa9, 0x9a, 0xa4, 0x30, 0x8c, 0x9e, 0x69, 0x75, 0xa5, 0x5c, 0x34, 0x8b, 0x71, 0x49, 0xe5, 0x3a, 0x1a, 0x74, 0xa9, 0x51, 0x86, 0xee, 0x06, 0xf9, 0x54, 0x37, 0x0c, 0xf6, 0x17, 0x8a, 0x1e, 0xc3, 0x54, 0x6d, 0xa9, 0x52, 0x4a, 0x2f, 0xf8, 0xd0, 0xe3, 0x56, 0xc2, 0x61, 0x14, 0xfd, 0x7c, 0x77, 0x2b, 0x5b, 0x8b, 0x93, 0x2c, 0x6e, 0x76, 0x46, 0xa9, 0x00, 0x34, 0x8d, 0x55, 0x42, 0x6a, 0xe2, 0x6b, 0xa3, 0xd8, 0xe6, 0x5a, 0x5b, 0x65, 0x98, 0xa8, 0xb1, 0x85, 0x01, 0x92, 0x42, 0xf4, 0xd6, 0x73, 0x4d, 0xc6, 0xf6, 0xf1, 0x34, 0x36, 0x16, 0x44, 0xc6, 0x09, 0xc7, 0x94, 0x46, 0x1c, 0x06, 0x94, 0x84, 0xa9, 0x4f, 0x41, 0x0b, 0x46, 0xa6, 0xb4, 0x48, 0x1a, 0x14, 0x45, }; struct aws_array_list output_list; ASSERT_SUCCESS(aws_pem_objects_init_from_file_path(&output_list, allocator, "testparse.crt")); ASSERT_UINT_EQUALS(1, aws_array_list_length(&output_list)); struct aws_pem_object *pem_object = NULL; aws_array_list_get_at_ptr(&output_list, (void **)&pem_object, 0); ASSERT_BIN_ARRAYS_EQUALS(s_expected, sizeof(s_expected), pem_object->data.buffer, pem_object->data.len); ASSERT_CURSOR_VALUE_CSTRING_EQUALS(aws_byte_cursor_from_string(pem_object->type_string), "CERTIFICATE"); ASSERT_INT_EQUALS(AWS_PEM_TYPE_X509, pem_object->type); aws_pem_objects_clean_up(&output_list); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_pem_cert_parse_from_file, s_test_pem_cert_parse_from_file) static int s_test_pem_cert_parse_from_file_crlf(struct aws_allocator *allocator, void *ctx) { (void)ctx; static const uint8_t s_expected[] = { 0x30, 0x82, 0x03, 0xec, 0x30, 0x82, 0x02, 0xd4, 0xa0, 0x03, 0x02, 0x01, 0x02, 0x02, 0x09, 0x00, 0x84, 0x7d, 0x2e, 0xed, 0x4d, 0xfc, 0x26, 0x87, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x0b, 0x05, 0x00, 0x30, 0x81, 0x9a, 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, 0x55, 0x53, 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, 0x55, 0x04, 0x08, 0x0c, 0x0a, 0x57, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x74, 0x6f, 0x6e, 0x31, 0x10, 0x30, 0x0e, 0x06, 0x03, 0x55, 0x04, 0x07, 0x0c, 0x07, 0x53, 0x65, 0x61, 0x74, 0x74, 0x6c, 0x65, 0x31, 0x0f, 0x30, 0x0d, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x0c, 0x06, 0x41, 0x6d, 0x61, 0x7a, 0x6f, 0x6e, 0x31, 0x0d, 0x30, 0x0b, 0x06, 0x03, 0x55, 0x04, 0x0b, 0x0c, 0x04, 0x53, 0x44, 0x4b, 0x73, 0x31, 0x12, 0x30, 0x10, 0x06, 0x03, 0x55, 0x04, 0x03, 0x0c, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x68, 0x6f, 0x73, 0x74, 0x31, 0x30, 0x30, 0x2e, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x01, 0x16, 0x21, 0x61, 0x77, 0x73, 0x2d, 0x73, 0x64, 0x6b, 0x2d, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2d, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x40, 0x61, 0x6d, 0x61, 0x7a, 0x6f, 0x6e, 0x2e, 0x63, 0x6f, 0x6d, 0x30, 0x1e, 0x17, 0x0d, 0x32, 0x31, 0x30, 0x36, 0x31, 0x36, 0x30, 0x36, 0x31, 0x37, 0x30, 0x30, 0x5a, 0x17, 0x0d, 0x32, 0x33, 0x30, 0x39, 0x31, 0x38, 0x30, 0x36, 0x31, 0x37, 0x30, 0x30, 0x5a, 0x30, 0x81, 0x9a, 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, 0x55, 0x53, 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, 0x55, 0x04, 0x08, 0x0c, 0x0a, 0x57, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x74, 0x6f, 0x6e, 0x31, 0x10, 0x30, 0x0e, 0x06, 0x03, 0x55, 0x04, 0x07, 0x0c, 0x07, 0x53, 0x65, 0x61, 0x74, 0x74, 0x6c, 0x65, 0x31, 0x0f, 0x30, 0x0d, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x0c, 0x06, 0x41, 0x6d, 0x61, 0x7a, 0x6f, 0x6e, 0x31, 0x0d, 0x30, 0x0b, 0x06, 0x03, 0x55, 0x04, 0x0b, 0x0c, 0x04, 0x53, 0x44, 0x4b, 0x73, 0x31, 0x12, 0x30, 0x10, 0x06, 0x03, 0x55, 0x04, 0x03, 0x0c, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x68, 0x6f, 0x73, 0x74, 0x31, 0x30, 0x30, 0x2e, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x01, 0x16, 0x21, 0x61, 0x77, 0x73, 0x2d, 0x73, 0x64, 0x6b, 0x2d, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2d, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x40, 0x61, 0x6d, 0x61, 0x7a, 0x6f, 0x6e, 0x2e, 0x63, 0x6f, 0x6d, 0x30, 0x82, 0x01, 0x22, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x01, 0x05, 0x00, 0x03, 0x82, 0x01, 0x0f, 0x00, 0x30, 0x82, 0x01, 0x0a, 0x02, 0x82, 0x01, 0x01, 0x00, 0xd7, 0x6a, 0x57, 0x48, 0xf8, 0x0e, 0x44, 0x03, 0x25, 0x42, 0xd6, 0x11, 0x6f, 0x1b, 0xb3, 0xfc, 0xe7, 0x1a, 0xa2, 0xb6, 0xa7, 0xdc, 0x2d, 0x85, 0x8f, 0x28, 0xe1, 0xbb, 0x4b, 0xee, 0x71, 0x21, 0x19, 0x4b, 0x0c, 0x43, 0x26, 0x9e, 0xf9, 0x4c, 0x14, 0x04, 0x31, 0xa7, 0xd2, 0xa5, 0x21, 0x0a, 0x01, 0x02, 0xde, 0x0e, 0xde, 0xf1, 0xb8, 0x34, 0x43, 0x62, 0x7e, 0x76, 0x57, 0x85, 0x04, 0xe9, 0xc1, 0x7e, 0xc5, 0x35, 0xa1, 0xb7, 0x3b, 0x1f, 0xee, 0x68, 0x4d, 0xfe, 0x51, 0xda, 0x0c, 0xf7, 0x2f, 0x47, 0x60, 0x12, 0x3c, 0x01, 0x24, 0xce, 0x48, 0xa5, 0xf0, 0xa0, 0x8b, 0x63, 0x87, 0xba, 0xb5, 0x3c, 0x52, 0xc1, 0x0f, 0x7b, 0xb2, 0x99, 0x4d, 0xb8, 0x46, 0x74, 0xf7, 0xd1, 0xe8, 0x25, 0x84, 0xd3, 0x2c, 0x56, 0x91, 0x78, 0x87, 0xdd, 0xd4, 0x3d, 0xf3, 0x67, 0x51, 0x18, 0x71, 0x2c, 0x3c, 0xc3, 0xe1, 0x99, 0xd9, 0x2c, 0x44, 0x51, 0xf6, 0x14, 0x48, 0xbd, 0x82, 0x16, 0x62, 0x18, 0x4a, 0x44, 0x23, 0x9e, 0x5b, 0x09, 0x08, 0x8a, 0x42, 0xa0, 0x68, 0x03, 0x88, 0x10, 0x0f, 0x6c, 0x85, 0x09, 0x3b, 0x72, 0x96, 0x04, 0x35, 0xf4, 0x26, 0x01, 0x83, 0x6f, 0x1d, 0xd6, 0x7f, 0x78, 0xd7, 0x1b, 0xf6, 0x3a, 0x4f, 0xad, 0xcb, 0x3e, 0xc3, 0xbe, 0x01, 0x2d, 0xb4, 0x44, 0x2b, 0xdc, 0x10, 0x5d, 0x05, 0xfe, 0xb9, 0x43, 0x20, 0xdc, 0xc8, 0xe4, 0x40, 0x07, 0x3b, 0x54, 0xce, 0x11, 0xdf, 0x5f, 0x28, 0xeb, 0xbe, 0x24, 0x02, 0xb4, 0xe8, 0xfc, 0x35, 0x9b, 0xbe, 0xc1, 0x80, 0xea, 0xc4, 0xec, 0x5b, 0x6f, 0x20, 0x6e, 0xe4, 0x60, 0xd5, 0x6e, 0x38, 0x43, 0xde, 0x22, 0x73, 0x87, 0x90, 0xeb, 0xaa, 0xaf, 0x20, 0xe2, 0xb0, 0x1d, 0x4f, 0xc2, 0x2c, 0x8f, 0x34, 0x86, 0xea, 0x75, 0x02, 0x03, 0x01, 0x00, 0x01, 0xa3, 0x33, 0x30, 0x31, 0x30, 0x13, 0x06, 0x03, 0x55, 0x1d, 0x25, 0x04, 0x0c, 0x30, 0x0a, 0x06, 0x08, 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x03, 0x01, 0x30, 0x1a, 0x06, 0x03, 0x55, 0x1d, 0x11, 0x04, 0x13, 0x30, 0x11, 0x82, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x68, 0x6f, 0x73, 0x74, 0x87, 0x04, 0x7f, 0x00, 0x00, 0x01, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x0b, 0x05, 0x00, 0x03, 0x82, 0x01, 0x01, 0x00, 0x07, 0xb4, 0x9a, 0x48, 0x4e, 0x6d, 0x71, 0x32, 0xf4, 0x35, 0x89, 0xf5, 0xe1, 0xe8, 0x27, 0x5e, 0xe3, 0x51, 0x0d, 0x54, 0xf2, 0xde, 0x1e, 0x2f, 0x9a, 0x0d, 0xdd, 0x02, 0xd4, 0xce, 0x15, 0x93, 0x8b, 0xe6, 0x75, 0x77, 0xc2, 0x41, 0xf6, 0xbf, 0xfc, 0xac, 0x25, 0x96, 0xea, 0x80, 0x38, 0x68, 0xe2, 0xa5, 0x72, 0x9a, 0x31, 0xa2, 0x95, 0x43, 0xa9, 0x90, 0x39, 0x64, 0xe3, 0x6c, 0x29, 0x37, 0x0c, 0x7a, 0xb7, 0x18, 0x97, 0x47, 0x0e, 0x16, 0x79, 0x2f, 0x9a, 0x92, 0x7b, 0x51, 0xac, 0xe4, 0x4c, 0x70, 0xc2, 0xe4, 0xf3, 0x7f, 0x2b, 0x63, 0x53, 0x2c, 0x3b, 0xdb, 0xf1, 0xef, 0x84, 0xda, 0xf3, 0x71, 0x6c, 0x6e, 0xb8, 0x41, 0x48, 0xae, 0xb5, 0x12, 0x1b, 0x20, 0xec, 0xdf, 0xff, 0x9f, 0x2b, 0x2d, 0x66, 0x52, 0x0a, 0x72, 0x17, 0x99, 0xa5, 0x4d, 0x28, 0x29, 0x8a, 0x9c, 0xc8, 0x51, 0xd0, 0xe8, 0x5c, 0x42, 0x66, 0x3e, 0xef, 0x06, 0xda, 0x72, 0xea, 0xa8, 0x5a, 0x5a, 0x02, 0x0f, 0xa2, 0x68, 0x80, 0xa9, 0x9a, 0xa4, 0x30, 0x8c, 0x9e, 0x69, 0x75, 0xa5, 0x5c, 0x34, 0x8b, 0x71, 0x49, 0xe5, 0x3a, 0x1a, 0x74, 0xa9, 0x51, 0x86, 0xee, 0x06, 0xf9, 0x54, 0x37, 0x0c, 0xf6, 0x17, 0x8a, 0x1e, 0xc3, 0x54, 0x6d, 0xa9, 0x52, 0x4a, 0x2f, 0xf8, 0xd0, 0xe3, 0x56, 0xc2, 0x61, 0x14, 0xfd, 0x7c, 0x77, 0x2b, 0x5b, 0x8b, 0x93, 0x2c, 0x6e, 0x76, 0x46, 0xa9, 0x00, 0x34, 0x8d, 0x55, 0x42, 0x6a, 0xe2, 0x6b, 0xa3, 0xd8, 0xe6, 0x5a, 0x5b, 0x65, 0x98, 0xa8, 0xb1, 0x85, 0x01, 0x92, 0x42, 0xf4, 0xd6, 0x73, 0x4d, 0xc6, 0xf6, 0xf1, 0x34, 0x36, 0x16, 0x44, 0xc6, 0x09, 0xc7, 0x94, 0x46, 0x1c, 0x06, 0x94, 0x84, 0xa9, 0x4f, 0x41, 0x0b, 0x46, 0xa6, 0xb4, 0x48, 0x1a, 0x14, 0x45, }; struct aws_array_list output_list; ASSERT_SUCCESS(aws_pem_objects_init_from_file_path(&output_list, allocator, "testparse_crlf.crt")); ASSERT_UINT_EQUALS(1, aws_array_list_length(&output_list)); struct aws_pem_object *pem_object = NULL; aws_array_list_get_at_ptr(&output_list, (void **)&pem_object, 0); ASSERT_BIN_ARRAYS_EQUALS(s_expected, sizeof(s_expected), pem_object->data.buffer, pem_object->data.len); ASSERT_CURSOR_VALUE_CSTRING_EQUALS(aws_byte_cursor_from_string(pem_object->type_string), "CERTIFICATE"); ASSERT_INT_EQUALS(AWS_PEM_TYPE_X509, pem_object->type); aws_pem_objects_clean_up(&output_list); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_pem_cert_parse_from_file_crlf, s_test_pem_cert_parse_from_file_crlf) static int s_test_pem_private_key_parse_from_file(struct aws_allocator *allocator, void *ctx) { (void)ctx; static const uint8_t s_expected[] = { 0x30, 0x82, 0x04, 0xa4, 0x02, 0x01, 0x00, 0x02, 0x82, 0x01, 0x01, 0x00, 0xd7, 0x6a, 0x57, 0x48, 0xf8, 0x0e, 0x44, 0x03, 0x25, 0x42, 0xd6, 0x11, 0x6f, 0x1b, 0xb3, 0xfc, 0xe7, 0x1a, 0xa2, 0xb6, 0xa7, 0xdc, 0x2d, 0x85, 0x8f, 0x28, 0xe1, 0xbb, 0x4b, 0xee, 0x71, 0x21, 0x19, 0x4b, 0x0c, 0x43, 0x26, 0x9e, 0xf9, 0x4c, 0x14, 0x04, 0x31, 0xa7, 0xd2, 0xa5, 0x21, 0x0a, 0x01, 0x02, 0xde, 0x0e, 0xde, 0xf1, 0xb8, 0x34, 0x43, 0x62, 0x7e, 0x76, 0x57, 0x85, 0x04, 0xe9, 0xc1, 0x7e, 0xc5, 0x35, 0xa1, 0xb7, 0x3b, 0x1f, 0xee, 0x68, 0x4d, 0xfe, 0x51, 0xda, 0x0c, 0xf7, 0x2f, 0x47, 0x60, 0x12, 0x3c, 0x01, 0x24, 0xce, 0x48, 0xa5, 0xf0, 0xa0, 0x8b, 0x63, 0x87, 0xba, 0xb5, 0x3c, 0x52, 0xc1, 0x0f, 0x7b, 0xb2, 0x99, 0x4d, 0xb8, 0x46, 0x74, 0xf7, 0xd1, 0xe8, 0x25, 0x84, 0xd3, 0x2c, 0x56, 0x91, 0x78, 0x87, 0xdd, 0xd4, 0x3d, 0xf3, 0x67, 0x51, 0x18, 0x71, 0x2c, 0x3c, 0xc3, 0xe1, 0x99, 0xd9, 0x2c, 0x44, 0x51, 0xf6, 0x14, 0x48, 0xbd, 0x82, 0x16, 0x62, 0x18, 0x4a, 0x44, 0x23, 0x9e, 0x5b, 0x09, 0x08, 0x8a, 0x42, 0xa0, 0x68, 0x03, 0x88, 0x10, 0x0f, 0x6c, 0x85, 0x09, 0x3b, 0x72, 0x96, 0x04, 0x35, 0xf4, 0x26, 0x01, 0x83, 0x6f, 0x1d, 0xd6, 0x7f, 0x78, 0xd7, 0x1b, 0xf6, 0x3a, 0x4f, 0xad, 0xcb, 0x3e, 0xc3, 0xbe, 0x01, 0x2d, 0xb4, 0x44, 0x2b, 0xdc, 0x10, 0x5d, 0x05, 0xfe, 0xb9, 0x43, 0x20, 0xdc, 0xc8, 0xe4, 0x40, 0x07, 0x3b, 0x54, 0xce, 0x11, 0xdf, 0x5f, 0x28, 0xeb, 0xbe, 0x24, 0x02, 0xb4, 0xe8, 0xfc, 0x35, 0x9b, 0xbe, 0xc1, 0x80, 0xea, 0xc4, 0xec, 0x5b, 0x6f, 0x20, 0x6e, 0xe4, 0x60, 0xd5, 0x6e, 0x38, 0x43, 0xde, 0x22, 0x73, 0x87, 0x90, 0xeb, 0xaa, 0xaf, 0x20, 0xe2, 0xb0, 0x1d, 0x4f, 0xc2, 0x2c, 0x8f, 0x34, 0x86, 0xea, 0x75, 0x02, 0x03, 0x01, 0x00, 0x01, 0x02, 0x82, 0x01, 0x00, 0x44, 0x90, 0xb8, 0x8f, 0xa5, 0x45, 0x05, 0x28, 0xeb, 0x27, 0x46, 0xf3, 0xed, 0xa5, 0xa7, 0xb8, 0x8b, 0xe6, 0xd2, 0x7b, 0xc9, 0x1a, 0x2f, 0xf3, 0x1f, 0x0a, 0x28, 0x2f, 0x71, 0x8f, 0xc7, 0xba, 0x7d, 0x4e, 0x81, 0xec, 0xad, 0xd2, 0x54, 0x0f, 0x7f, 0x1f, 0x86, 0x9e, 0xa0, 0x51, 0xa7, 0x1e, 0x84, 0x0b, 0xe1, 0x9a, 0x62, 0x24, 0x16, 0x39, 0xac, 0x69, 0x21, 0x4f, 0x91, 0xb3, 0xe9, 0x48, 0x6e, 0x2a, 0x67, 0xa3, 0x16, 0x82, 0x37, 0xf3, 0x85, 0xf2, 0xf5, 0x40, 0x49, 0xd5, 0x59, 0xe3, 0x23, 0xcd, 0x58, 0x2a, 0xf5, 0xa6, 0x77, 0x8c, 0xa1, 0x5b, 0x10, 0x28, 0x49, 0xb5, 0xb8, 0x72, 0x19, 0x55, 0xc6, 0x11, 0x65, 0x58, 0x3e, 0x14, 0xc5, 0xc4, 0x2d, 0xc8, 0xf5, 0x48, 0x7e, 0xd7, 0xd2, 0x5b, 0x54, 0xf5, 0x89, 0x00, 0x10, 0x5e, 0xef, 0x3b, 0x78, 0xca, 0x1d, 0xe9, 0xe5, 0xbb, 0x55, 0x69, 0x72, 0x30, 0xa8, 0x9c, 0x62, 0x40, 0x46, 0x07, 0x6a, 0x21, 0x23, 0x48, 0x56, 0xf1, 0xc8, 0x71, 0xdf, 0xad, 0x73, 0xf7, 0xa4, 0x1c, 0xa7, 0x18, 0x40, 0xc8, 0x10, 0x1f, 0x9e, 0x1c, 0x6e, 0x4e, 0x02, 0x85, 0x61, 0x24, 0x55, 0x7f, 0x06, 0x12, 0x3a, 0x31, 0xd8, 0x3c, 0xeb, 0xe8, 0xce, 0x65, 0x3b, 0x5a, 0x3d, 0x22, 0x51, 0x14, 0xfe, 0xd4, 0xc3, 0x38, 0x88, 0xef, 0x18, 0x94, 0x10, 0xee, 0x64, 0x42, 0x40, 0xae, 0xcd, 0xd6, 0x01, 0xd2, 0x1e, 0xa6, 0x60, 0xaa, 0xea, 0xc9, 0xf3, 0x38, 0x02, 0x7a, 0x63, 0xd8, 0x84, 0xd0, 0x41, 0xad, 0x8b, 0xd4, 0x06, 0x88, 0x0a, 0x3a, 0x9d, 0xaf, 0xe7, 0x58, 0x07, 0xd5, 0x95, 0x14, 0x8c, 0xc9, 0x2f, 0xc2, 0xd4, 0x60, 0xb4, 0xa0, 0xcd, 0x0c, 0x9e, 0x94, 0x4a, 0x48, 0xb5, 0xb4, 0xb6, 0xf2, 0xd5, 0xbe, 0xd2, 0x46, 0xf3, 0x51, 0x02, 0x81, 0x81, 0x00, 0xee, 0x5c, 0xc6, 0xa5, 0xd9, 0x40, 0x2b, 0x05, 0x8d, 0x28, 0xf7, 0x36, 0x60, 0x86, 0xed, 0x50, 0xda, 0x26, 0x0f, 0xf7, 0x8e, 0xaf, 0xb4, 0xf3, 0x61, 0xe7, 0x58, 0xc4, 0x9f, 0x3c, 0x48, 0x6e, 0x76, 0x4c, 0x78, 0xe0, 0x13, 0x73, 0xee, 0xa6, 0x81, 0x77, 0xc1, 0x91, 0x63, 0x76, 0xd9, 0x70, 0xc7, 0x5b, 0xb8, 0x9e, 0xcc, 0x65, 0x55, 0xee, 0x74, 0x14, 0x14, 0xc2, 0x37, 0x9b, 0x36, 0x15, 0x5e, 0x3f, 0xf1, 0x83, 0xfd, 0xf3, 0x4c, 0xe2, 0xb3, 0xe1, 0xed, 0x50, 0x2e, 0x69, 0x58, 0x23, 0xb7, 0x3b, 0x2e, 0xbe, 0x0e, 0x34, 0xa3, 0x2b, 0xdb, 0x2d, 0xfa, 0x61, 0xb2, 0xcd, 0x88, 0xe5, 0xde, 0x8a, 0x55, 0xa9, 0xc4, 0x19, 0x90, 0x78, 0xf5, 0x2c, 0xfa, 0x8d, 0xc4, 0x19, 0xaf, 0x16, 0x90, 0xe0, 0x02, 0xd5, 0x59, 0x7d, 0xd2, 0x92, 0x77, 0x2b, 0xb3, 0x66, 0x98, 0xfc, 0xb3, 0x9b, 0x02, 0x81, 0x81, 0x00, 0xe7, 0x5a, 0xe3, 0x10, 0x56, 0xbf, 0x8a, 0x32, 0x0b, 0xa7, 0x53, 0xf9, 0xbc, 0xa9, 0xfc, 0x6f, 0x7a, 0x48, 0x7d, 0x01, 0x52, 0xb1, 0x4b, 0x17, 0xe4, 0xd5, 0xd3, 0xcb, 0x7d, 0x5f, 0xff, 0x65, 0x30, 0x55, 0x5e, 0x3d, 0xd5, 0xd8, 0xcc, 0xc8, 0xdc, 0xa1, 0xb5, 0xa4, 0x5c, 0xad, 0x73, 0xfd, 0x09, 0x8a, 0x6a, 0xdf, 0xca, 0x35, 0xc6, 0xf5, 0x1a, 0xc5, 0xed, 0xa1, 0x94, 0xd0, 0xff, 0x8e, 0x20, 0x63, 0x04, 0x77, 0xec, 0x0b, 0x5d, 0xe8, 0x50, 0xe5, 0x73, 0xf1, 0x3a, 0xc0, 0xcf, 0x10, 0xca, 0x03, 0x36, 0xc6, 0x2d, 0xc3, 0x93, 0xda, 0xda, 0xe0, 0xc4, 0xc1, 0x5b, 0x47, 0xc1, 0x33, 0xfa, 0x3b, 0xab, 0xd7, 0x24, 0x1b, 0x3e, 0x7a, 0x0a, 0x66, 0xb0, 0x7b, 0x4a, 0x8a, 0x40, 0x91, 0xc5, 0x6a, 0x66, 0xfe, 0x24, 0xb3, 0x42, 0xcb, 0xbb, 0xe0, 0x4b, 0x7c, 0x41, 0x57, 0x63, 0x2f, 0x02, 0x81, 0x81, 0x00, 0xa4, 0xdf, 0x31, 0x5c, 0x38, 0x28, 0x45, 0x59, 0xc2, 0xa9, 0x0a, 0x4d, 0xe7, 0x78, 0x8c, 0x9f, 0xf7, 0x34, 0x8a, 0xa8, 0xce, 0x5e, 0x44, 0xc8, 0x6f, 0xf8, 0xc8, 0x92, 0xc0, 0x1d, 0xbf, 0x70, 0x00, 0x8d, 0xa6, 0xb2, 0x3f, 0x62, 0x5a, 0x39, 0x7b, 0xa5, 0xed, 0x12, 0xf6, 0x7c, 0x97, 0xac, 0x85, 0x88, 0xb0, 0xeb, 0xce, 0x2f, 0x6d, 0xbf, 0xd1, 0x34, 0xae, 0xa3, 0x24, 0x39, 0x4c, 0xb0, 0x7d, 0x0f, 0xb7, 0xab, 0x77, 0xb5, 0x99, 0x81, 0xd9, 0xb0, 0xb5, 0x28, 0x57, 0xe1, 0xef, 0xe0, 0x4c, 0x76, 0x38, 0x3f, 0xa7, 0xad, 0xcb, 0x0b, 0xa3, 0xc0, 0x6a, 0xc6, 0xb7, 0x19, 0xa9, 0xce, 0x6e, 0x1e, 0xbb, 0x60, 0x00, 0xcf, 0x39, 0xfa, 0x20, 0x84, 0x2b, 0x0e, 0x72, 0x0c, 0xdd, 0xe9, 0xba, 0xed, 0xe7, 0xa7, 0xd1, 0x0d, 0xd1, 0xe0, 0x13, 0x63, 0xfb, 0xe4, 0x44, 0x7f, 0xce, 0x6f, 0x02, 0x81, 0x81, 0x00, 0xac, 0x0c, 0x71, 0xe9, 0xb7, 0xa9, 0x4f, 0x7b, 0x32, 0x21, 0x68, 0x98, 0xc3, 0x0d, 0xe2, 0xb5, 0x80, 0x49, 0xa1, 0xf4, 0xb6, 0xeb, 0x33, 0xfd, 0xfb, 0xe6, 0x6c, 0x4f, 0xda, 0xd7, 0xe6, 0x14, 0xf9, 0x21, 0xb3, 0x28, 0xe6, 0xfc, 0x08, 0x26, 0xa3, 0xb4, 0xfa, 0x60, 0xd5, 0xaf, 0x04, 0x1f, 0xbb, 0xd5, 0x9c, 0xee, 0xf9, 0xf0, 0x8e, 0x19, 0xbe, 0xa4, 0x4c, 0xb8, 0xa9, 0xf3, 0xd6, 0xe8, 0x79, 0xfb, 0x48, 0xda, 0x69, 0xc6, 0x76, 0x3a, 0x8a, 0xd6, 0x68, 0x27, 0x8f, 0xda, 0xcc, 0xe2, 0x1e, 0x68, 0xcf, 0x76, 0x07, 0x98, 0x77, 0x3e, 0xfd, 0x20, 0xc4, 0x11, 0x4a, 0xf1, 0x8c, 0xa3, 0x3b, 0xc6, 0xde, 0x5e, 0xea, 0xf1, 0xfb, 0xbf, 0x44, 0x36, 0xe3, 0xad, 0x7c, 0x5c, 0x5d, 0xf2, 0x49, 0xce, 0x7b, 0xf3, 0x29, 0x95, 0xc9, 0xe9, 0xba, 0xb8, 0xed, 0x49, 0xe5, 0x49, 0xb8, 0x6f, 0x02, 0x81, 0x80, 0x71, 0x11, 0x8a, 0x2e, 0x38, 0xcf, 0x54, 0xb9, 0x99, 0x5b, 0x95, 0x74, 0x17, 0x7e, 0xe7, 0x53, 0x59, 0x67, 0xfe, 0xc7, 0x90, 0x84, 0x5b, 0x1c, 0x89, 0x80, 0xa6, 0xa4, 0xb4, 0x71, 0x21, 0xde, 0x27, 0x9e, 0xb3, 0x58, 0x01, 0xed, 0x93, 0xdb, 0x39, 0xec, 0x0b, 0x6b, 0xc0, 0x18, 0x56, 0x3a, 0x9b, 0x36, 0x04, 0xbf, 0xaf, 0xf6, 0x94, 0x16, 0x3a, 0x41, 0x6c, 0x2a, 0x2f, 0xf0, 0x80, 0xb1, 0x73, 0x2f, 0x3a, 0x4a, 0xe1, 0x9d, 0x6b, 0x5d, 0x0b, 0x0c, 0x55, 0xfc, 0xde, 0xc6, 0xf2, 0x32, 0x6f, 0x17, 0x86, 0x4b, 0x5f, 0xc8, 0x2d, 0xcb, 0xe7, 0x88, 0xab, 0x55, 0x6e, 0x66, 0x35, 0x40, 0xdc, 0x03, 0xcb, 0x3d, 0xf8, 0x39, 0x68, 0x79, 0x39, 0x54, 0x94, 0x92, 0x2b, 0xf0, 0x9f, 0xd1, 0x00, 0x30, 0xbd, 0xae, 0x9a, 0x87, 0x2d, 0xa6, 0x73, 0x71, 0xdb, 0xe9, 0x20, 0xc8, 0x55, 0xb3, }; struct aws_array_list output_list; ASSERT_SUCCESS(aws_pem_objects_init_from_file_path(&output_list, allocator, "unittests.key")); ASSERT_UINT_EQUALS(1, aws_array_list_length(&output_list)); struct aws_pem_object *pem_object = NULL; aws_array_list_get_at_ptr(&output_list, (void **)&pem_object, 0); ASSERT_BIN_ARRAYS_EQUALS(s_expected, sizeof(s_expected), pem_object->data.buffer, pem_object->data.len); ASSERT_CURSOR_VALUE_CSTRING_EQUALS(aws_byte_cursor_from_string(pem_object->type_string), "RSA PRIVATE KEY"); ASSERT_INT_EQUALS(AWS_PEM_TYPE_PRIVATE_RSA_PKCS1, pem_object->type); aws_pem_objects_clean_up(&output_list); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_pem_private_key_parse_from_file, s_test_pem_private_key_parse_from_file) static int s_test_pem_single_cert_parse(struct aws_allocator *allocator, void *ctx) { (void)ctx; static const char *s_rsa_1024_sha224_client_crt_pem = "-----BEGIN CERTIFICATE-----\n" "MIICeDCCAeGgAwIBAgIJAObttnPKQhVlMA0GCSqGSIb3DQEBDgUAMF8xCzAJBgNV\n" "BAYTAlVTMQswCQYDVQQIDAJXQTEQMA4GA1UEBwwHU2VhdHRsZTEPMA0GA1UECgwG\n" "QW1hem9uMQwwCgYDVQQLDANzMm4xEjAQBgNVBAMMCWxvY2FsaG9zdDAgFw0xNzA4\n" "MDEyMjQzMzJaGA8yMTE3MDcwODIyNDMzMlowXzELMAkGA1UEBhMCVVMxCzAJBgNV\n" "BAgMAldBMRAwDgYDVQQHDAdTZWF0dGxlMQ8wDQYDVQQKDAZBbWF6b24xDDAKBgNV\n" "BAsMA3MybjESMBAGA1UEAwwJbG9jYWxob3N0MIGfMA0GCSqGSIb3DQEBAQUAA4GN\n" "ADCBiQKBgQCisRoXXcTh4ejn/sUjGosLlE7GlpLGtvWFEEX6Vl3klVoQdkyabLIH\n" "7bHB2P7uyt9bPzeqvWYjuepDBSQUUeb6Mkqfx237bTy8JhXIfpIhbgksTk7IPzgo\n" "XLPl1oNl7uB9HQaDQ7UPlaKbfp1gNvs6uGOH4vvyhhJGiblNJKnVwwIDAQABozow\n" "ODALBgNVHQ8EBAMCBDAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwFAYDVR0RBA0wC4IJ\n" "MTI3LjAuMC4xMA0GCSqGSIb3DQEBDgUAA4GBACleH44LSYhzHHaV70VbnLbtbv8T\n" "eaUvzstFW6YvdP1XnZKssZNdvMhoiMuMD5n40/iPbv+grtjxacRQCinLk1SEjpsu\n" "3lw90Ds0Ksd/Pdsv7d0cCiJkjadON+ZQEEJ2FP/G19KZFxC3GLk9sxIUXyUW0TXn\n" "YxwtPz26+xvPRWCS\n" "-----END CERTIFICATE-----"; static const uint8_t s_expected[] = { 0x30, 0x82, 0x02, 0x78, 0x30, 0x82, 0x01, 0xe1, 0xa0, 0x03, 0x02, 0x01, 0x02, 0x02, 0x09, 0x00, 0xe6, 0xed, 0xb6, 0x73, 0xca, 0x42, 0x15, 0x65, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x0e, 0x05, 0x00, 0x30, 0x5f, 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, 0x55, 0x53, 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x08, 0x0c, 0x02, 0x57, 0x41, 0x31, 0x10, 0x30, 0x0e, 0x06, 0x03, 0x55, 0x04, 0x07, 0x0c, 0x07, 0x53, 0x65, 0x61, 0x74, 0x74, 0x6c, 0x65, 0x31, 0x0f, 0x30, 0x0d, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x0c, 0x06, 0x41, 0x6d, 0x61, 0x7a, 0x6f, 0x6e, 0x31, 0x0c, 0x30, 0x0a, 0x06, 0x03, 0x55, 0x04, 0x0b, 0x0c, 0x03, 0x73, 0x32, 0x6e, 0x31, 0x12, 0x30, 0x10, 0x06, 0x03, 0x55, 0x04, 0x03, 0x0c, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x68, 0x6f, 0x73, 0x74, 0x30, 0x20, 0x17, 0x0d, 0x31, 0x37, 0x30, 0x38, 0x30, 0x31, 0x32, 0x32, 0x34, 0x33, 0x33, 0x32, 0x5a, 0x18, 0x0f, 0x32, 0x31, 0x31, 0x37, 0x30, 0x37, 0x30, 0x38, 0x32, 0x32, 0x34, 0x33, 0x33, 0x32, 0x5a, 0x30, 0x5f, 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, 0x55, 0x53, 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x08, 0x0c, 0x02, 0x57, 0x41, 0x31, 0x10, 0x30, 0x0e, 0x06, 0x03, 0x55, 0x04, 0x07, 0x0c, 0x07, 0x53, 0x65, 0x61, 0x74, 0x74, 0x6c, 0x65, 0x31, 0x0f, 0x30, 0x0d, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x0c, 0x06, 0x41, 0x6d, 0x61, 0x7a, 0x6f, 0x6e, 0x31, 0x0c, 0x30, 0x0a, 0x06, 0x03, 0x55, 0x04, 0x0b, 0x0c, 0x03, 0x73, 0x32, 0x6e, 0x31, 0x12, 0x30, 0x10, 0x06, 0x03, 0x55, 0x04, 0x03, 0x0c, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x68, 0x6f, 0x73, 0x74, 0x30, 0x81, 0x9f, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x01, 0x05, 0x00, 0x03, 0x81, 0x8d, 0x00, 0x30, 0x81, 0x89, 0x02, 0x81, 0x81, 0x00, 0xa2, 0xb1, 0x1a, 0x17, 0x5d, 0xc4, 0xe1, 0xe1, 0xe8, 0xe7, 0xfe, 0xc5, 0x23, 0x1a, 0x8b, 0x0b, 0x94, 0x4e, 0xc6, 0x96, 0x92, 0xc6, 0xb6, 0xf5, 0x85, 0x10, 0x45, 0xfa, 0x56, 0x5d, 0xe4, 0x95, 0x5a, 0x10, 0x76, 0x4c, 0x9a, 0x6c, 0xb2, 0x07, 0xed, 0xb1, 0xc1, 0xd8, 0xfe, 0xee, 0xca, 0xdf, 0x5b, 0x3f, 0x37, 0xaa, 0xbd, 0x66, 0x23, 0xb9, 0xea, 0x43, 0x05, 0x24, 0x14, 0x51, 0xe6, 0xfa, 0x32, 0x4a, 0x9f, 0xc7, 0x6d, 0xfb, 0x6d, 0x3c, 0xbc, 0x26, 0x15, 0xc8, 0x7e, 0x92, 0x21, 0x6e, 0x09, 0x2c, 0x4e, 0x4e, 0xc8, 0x3f, 0x38, 0x28, 0x5c, 0xb3, 0xe5, 0xd6, 0x83, 0x65, 0xee, 0xe0, 0x7d, 0x1d, 0x06, 0x83, 0x43, 0xb5, 0x0f, 0x95, 0xa2, 0x9b, 0x7e, 0x9d, 0x60, 0x36, 0xfb, 0x3a, 0xb8, 0x63, 0x87, 0xe2, 0xfb, 0xf2, 0x86, 0x12, 0x46, 0x89, 0xb9, 0x4d, 0x24, 0xa9, 0xd5, 0xc3, 0x02, 0x03, 0x01, 0x00, 0x01, 0xa3, 0x3a, 0x30, 0x38, 0x30, 0x0b, 0x06, 0x03, 0x55, 0x1d, 0x0f, 0x04, 0x04, 0x03, 0x02, 0x04, 0x30, 0x30, 0x13, 0x06, 0x03, 0x55, 0x1d, 0x25, 0x04, 0x0c, 0x30, 0x0a, 0x06, 0x08, 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x03, 0x01, 0x30, 0x14, 0x06, 0x03, 0x55, 0x1d, 0x11, 0x04, 0x0d, 0x30, 0x0b, 0x82, 0x09, 0x31, 0x32, 0x37, 0x2e, 0x30, 0x2e, 0x30, 0x2e, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x0e, 0x05, 0x00, 0x03, 0x81, 0x81, 0x00, 0x29, 0x5e, 0x1f, 0x8e, 0x0b, 0x49, 0x88, 0x73, 0x1c, 0x76, 0x95, 0xef, 0x45, 0x5b, 0x9c, 0xb6, 0xed, 0x6e, 0xff, 0x13, 0x79, 0xa5, 0x2f, 0xce, 0xcb, 0x45, 0x5b, 0xa6, 0x2f, 0x74, 0xfd, 0x57, 0x9d, 0x92, 0xac, 0xb1, 0x93, 0x5d, 0xbc, 0xc8, 0x68, 0x88, 0xcb, 0x8c, 0x0f, 0x99, 0xf8, 0xd3, 0xf8, 0x8f, 0x6e, 0xff, 0xa0, 0xae, 0xd8, 0xf1, 0x69, 0xc4, 0x50, 0x0a, 0x29, 0xcb, 0x93, 0x54, 0x84, 0x8e, 0x9b, 0x2e, 0xde, 0x5c, 0x3d, 0xd0, 0x3b, 0x34, 0x2a, 0xc7, 0x7f, 0x3d, 0xdb, 0x2f, 0xed, 0xdd, 0x1c, 0x0a, 0x22, 0x64, 0x8d, 0xa7, 0x4e, 0x37, 0xe6, 0x50, 0x10, 0x42, 0x76, 0x14, 0xff, 0xc6, 0xd7, 0xd2, 0x99, 0x17, 0x10, 0xb7, 0x18, 0xb9, 0x3d, 0xb3, 0x12, 0x14, 0x5f, 0x25, 0x16, 0xd1, 0x35, 0xe7, 0x63, 0x1c, 0x2d, 0x3f, 0x3d, 0xba, 0xfb, 0x1b, 0xcf, 0x45, 0x60, 0x92}; struct aws_byte_cursor pem_data = aws_byte_cursor_from_c_str(s_rsa_1024_sha224_client_crt_pem); struct aws_array_list output_list; ASSERT_SUCCESS(aws_pem_objects_init_from_file_contents(&output_list, allocator, pem_data)); ASSERT_UINT_EQUALS(1, aws_array_list_length(&output_list)); struct aws_pem_object *pem_object = NULL; aws_array_list_get_at_ptr(&output_list, (void **)&pem_object, 0); ASSERT_BIN_ARRAYS_EQUALS(s_expected, sizeof(s_expected), pem_object->data.buffer, pem_object->data.len); ASSERT_CURSOR_VALUE_CSTRING_EQUALS(aws_byte_cursor_from_string(pem_object->type_string), "CERTIFICATE"); ASSERT_INT_EQUALS(AWS_PEM_TYPE_X509, pem_object->type); aws_pem_objects_clean_up(&output_list); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_pem_single_cert_parse, s_test_pem_single_cert_parse) static int s_test_pem_cert_chain_parse(struct aws_allocator *allocator, void *ctx) { (void)ctx; static const char *s_rsa_2048_pkcs1_crt_pem = "-----BEGIN CERTIFICATE-----\n" "MIICrTCCAZUCAn3VMA0GCSqGSIb3DQEBBQUAMB4xHDAaBgNVBAMME3MyblRlc3RJ\n" "bnRlcm1lZGlhdGUwIBcNMTYwMzMwMTg1NzQzWhgPMjExNjAzMDYxODU3NDNaMBgx\n" "FjAUBgNVBAMMDXMyblRlc3RTZXJ2ZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw\n" "ggEKAoIBAQDRw6AuYXAeRT0YuptCfJjRB/EDJyyGXnv+8TV2H1WJWhMLk8qND27r\n" "79A6EjbVmJaOV9qrokVqpDmXS712Z3BDprJ+1LFMymm3A+AFuK/skeGy0skik+Tg\n" "MmFT5XBVvmsw4uB1S9uUqktHauXgjhFPPsfvk4ewL4LulVEN2TEeI1Odj4CaMxAO\n" "Iuowm8wI2OHVzRHlrRmyJ9hYGuHHQ2TaTGIjr3WpAFuXi9pHGGMYa0uXAVPmgjdE\n" "XZ8t46u/ZKQ9W1uJkZEVKhcijT7G2VBrsBUq0CDiL+TDaGfthnBzUc9zt4fx/S/3\n" "qulC2WbKI3xrasQyjrsHTAJ75Md3rK09AgMBAAEwDQYJKoZIhvcNAQEFBQADggEB\n" "AHHkXNA9BtgAebZC2zriW4hRfeIkJMOwvfKBXHTuY5iCLD1otis6AZljcCKXM6O9\n" "489eHBC4T6mJwVsXhH+/ccEKqNRD2bUfQgOij32PsteV1eOHfHIFqdJmnBVb8tYa\n" "jxUvy7UQvXrPqaHbODrHe+7f7r1YCzerujiP5SSHphY3GQq88KemfFczp/4GnYas\n" "sE50OYe7DQcB4zvnxmAXp51JIN4ooktUU9oKIM5y2cgEWdmJzeqPANYxf0ZIPlTg\n" "ETknKw1Dzf8wlK5mFbbG4LPQh1mkDVcwQV3ogG6kGMRa7neH+6SFkNpAKuPCoje4\n" "NAE+WQ5ve1wk7nIRTQwDAF4=\n" "-----END CERTIFICATE-----\n" "-----BEGIN CERTIFICATE-----\n" "MIIDKTCCAhGgAwIBAgICVxYwDQYJKoZIhvcNAQEFBQAwFjEUMBIGA1UEAwwLczJu\n" "VGVzdFJvb3QwIBcNMTYwMzMwMTg1NzA5WhgPMjExNjAzMDYxODU3MDlaMB4xHDAa\n" "BgNVBAMME3MyblRlc3RJbnRlcm1lZGlhdGUwggEiMA0GCSqGSIb3DQEBAQUAA4IB\n" "DwAwggEKAoIBAQDM/i3eclxYcvedPCEnVe6A/HYsYPeP1qKBZQhbpuuX061jFZKw\n" "lecb0eau1PORLbcsYK40u3xUzoA5u6Q0ebDuqPbqSJkCazsh66cu9STl8ubbk7oI\n" "8LJjUJFhhy2Jmm9krXhPyRscU+CXOCZ2G1GhBqTI8cgMYhEVHwb3qy1EHg6G3n4W\n" "AjV+cKQcbUytq8DRmVe0bNJxDOX8ivzfAp3lUIwub+JfpxrWIUhb3iVGj5CauI98\n" "bNFHTWwYp7tviIIi21Q+L3nExCyE4yTUP/mebBZ62JnbvsWSs3r3//Am5d8G3WdY\n" "BXsERoDoLBvHnqlO/oo4ppGCRI7GkDroACi/AgMBAAGjdzB1MAwGA1UdEwQFMAMB\n" "Af8wHQYDVR0OBBYEFGqUKVWVlL03sHuOggFACdlHckPBMEYGA1UdIwQ/MD2AFE2X\n" "AbNDryMlBpMNI6Ce927uUFwToRqkGDAWMRQwEgYDVQQDDAtzMm5UZXN0Um9vdIIJ\n" "ANDUkH+UYdz1MA0GCSqGSIb3DQEBBQUAA4IBAQA3O3S9VT0EC1yG4xyNNUZ7+CzF\n" "uFA6uiO38ygcN5Nz1oNPy2eQer7vYmrHtqN6gS/o1Ag5F8bLRCqeuZTsOG80O29H\n" "kNhs5xYprdU82AqcaWwEd0kDrhC5rEvs6fj1J0NKmmhbovYxuDboj0a7If7HEqX0\n" "NizyU3M3JONPZgadchZ+F5DosatF1Bpt/gsQRy383IogQ0/FS+juHCCc4VIUemuk\n" "YY1J8o5XdrGWrPBBiudTWqCobe+N541b+YLWbajT5UKzvSqJmcqpPTniJGc9eZxc\n" "z3cCNd3cKa9bK51stEnQSlA7PQXYs3K+TD3EmSn/G2x6Hmfr7lrpbIhEaD+y\n" "-----END CERTIFICATE-----\n" "-----BEGIN CERTIFICATE-----\n" "MIIDATCCAemgAwIBAgIJANDUkH+UYdz1MA0GCSqGSIb3DQEBCwUAMBYxFDASBgNV\n" "BAMMC3MyblRlc3RSb290MCAXDTE2MDMzMDE4NTYzOVoYDzIxMTYwMzA2MTg1NjM5\n" "WjAWMRQwEgYDVQQDDAtzMm5UZXN0Um9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEP\n" "ADCCAQoCggEBAMY5532000oaeed7Jmo3ssx1723ZDLpn3WGz6FxpWM0zsKA/YvdD\n" "7J6qXDvfxU6dZlmsCS+bSNAqpARKmKsBEDPTsdLmrN1V1clOxvKm6GvU1eloRTw6\n" "xukEUXJ+uxrQMLYvSJBiCBVGI+UYNCK5c6guNMRYBCGdk5/iayjmK0Nxz1918Cx9\n" "z4va8HPAgYIz0ogOdYB21O9FQGPdH1mYqRzljcSsZ7EFo1P8HJr8oKK76ZeYi2or\n" "pjzMHGnlufHaul508wQPeFAMa1Tku3HyGZRaieRAck6+QcO2NujXxKNyCBlWON23\n" "FQTuBjN/CAl74MZtcAM2hVSmpm9t4cWVN5MCAwEAAaNQME4wHQYDVR0OBBYEFE2X\n" "AbNDryMlBpMNI6Ce927uUFwTMB8GA1UdIwQYMBaAFE2XAbNDryMlBpMNI6Ce927u\n" "UFwTMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAAXkVvQdXDmozPix\n" "uZi1o9cw4Si0syqfJ4sSunrzPbbmw/Qxhth5V7XGrnsQVNxamgnbzpjGhiBF6isM\n" "ldj33zQYtke+ojOjFlhEvrPo6eW29RkLBEtJadGs2bkMLztJbf+cbH2u6irzr6S4\n" "3OgVOSuB+zG56ksTnEVmum+C/8tSIAyi3eaoStPcgEU8+3/KMrH7uuenmTOCKdD1\n" "FvSDHXT9qPgTttVQGXbXzJEr5tGE+Py6yib5uoJ0dJZNtjs7HOQEDk5J0wZaX0DC\n" "MShYLiN5qLJAk0qwl+js488BJ18M9dg4TxdBYFkwHSzKXSj9TJN77Bb0RZr8LL9T\n" "r9IyvfU=\n" "-----END CERTIFICATE-----"; static const uint8_t s_expected_intermediate_1[] = { 0x30, 0x82, 0x02, 0xad, 0x30, 0x82, 0x01, 0x95, 0x02, 0x02, 0x7d, 0xd5, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x05, 0x05, 0x00, 0x30, 0x1e, 0x31, 0x1c, 0x30, 0x1a, 0x06, 0x03, 0x55, 0x04, 0x03, 0x0c, 0x13, 0x73, 0x32, 0x6e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x30, 0x20, 0x17, 0x0d, 0x31, 0x36, 0x30, 0x33, 0x33, 0x30, 0x31, 0x38, 0x35, 0x37, 0x34, 0x33, 0x5a, 0x18, 0x0f, 0x32, 0x31, 0x31, 0x36, 0x30, 0x33, 0x30, 0x36, 0x31, 0x38, 0x35, 0x37, 0x34, 0x33, 0x5a, 0x30, 0x18, 0x31, 0x16, 0x30, 0x14, 0x06, 0x03, 0x55, 0x04, 0x03, 0x0c, 0x0d, 0x73, 0x32, 0x6e, 0x54, 0x65, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x30, 0x82, 0x01, 0x22, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x01, 0x05, 0x00, 0x03, 0x82, 0x01, 0x0f, 0x00, 0x30, 0x82, 0x01, 0x0a, 0x02, 0x82, 0x01, 0x01, 0x00, 0xd1, 0xc3, 0xa0, 0x2e, 0x61, 0x70, 0x1e, 0x45, 0x3d, 0x18, 0xba, 0x9b, 0x42, 0x7c, 0x98, 0xd1, 0x07, 0xf1, 0x03, 0x27, 0x2c, 0x86, 0x5e, 0x7b, 0xfe, 0xf1, 0x35, 0x76, 0x1f, 0x55, 0x89, 0x5a, 0x13, 0x0b, 0x93, 0xca, 0x8d, 0x0f, 0x6e, 0xeb, 0xef, 0xd0, 0x3a, 0x12, 0x36, 0xd5, 0x98, 0x96, 0x8e, 0x57, 0xda, 0xab, 0xa2, 0x45, 0x6a, 0xa4, 0x39, 0x97, 0x4b, 0xbd, 0x76, 0x67, 0x70, 0x43, 0xa6, 0xb2, 0x7e, 0xd4, 0xb1, 0x4c, 0xca, 0x69, 0xb7, 0x03, 0xe0, 0x05, 0xb8, 0xaf, 0xec, 0x91, 0xe1, 0xb2, 0xd2, 0xc9, 0x22, 0x93, 0xe4, 0xe0, 0x32, 0x61, 0x53, 0xe5, 0x70, 0x55, 0xbe, 0x6b, 0x30, 0xe2, 0xe0, 0x75, 0x4b, 0xdb, 0x94, 0xaa, 0x4b, 0x47, 0x6a, 0xe5, 0xe0, 0x8e, 0x11, 0x4f, 0x3e, 0xc7, 0xef, 0x93, 0x87, 0xb0, 0x2f, 0x82, 0xee, 0x95, 0x51, 0x0d, 0xd9, 0x31, 0x1e, 0x23, 0x53, 0x9d, 0x8f, 0x80, 0x9a, 0x33, 0x10, 0x0e, 0x22, 0xea, 0x30, 0x9b, 0xcc, 0x08, 0xd8, 0xe1, 0xd5, 0xcd, 0x11, 0xe5, 0xad, 0x19, 0xb2, 0x27, 0xd8, 0x58, 0x1a, 0xe1, 0xc7, 0x43, 0x64, 0xda, 0x4c, 0x62, 0x23, 0xaf, 0x75, 0xa9, 0x00, 0x5b, 0x97, 0x8b, 0xda, 0x47, 0x18, 0x63, 0x18, 0x6b, 0x4b, 0x97, 0x01, 0x53, 0xe6, 0x82, 0x37, 0x44, 0x5d, 0x9f, 0x2d, 0xe3, 0xab, 0xbf, 0x64, 0xa4, 0x3d, 0x5b, 0x5b, 0x89, 0x91, 0x91, 0x15, 0x2a, 0x17, 0x22, 0x8d, 0x3e, 0xc6, 0xd9, 0x50, 0x6b, 0xb0, 0x15, 0x2a, 0xd0, 0x20, 0xe2, 0x2f, 0xe4, 0xc3, 0x68, 0x67, 0xed, 0x86, 0x70, 0x73, 0x51, 0xcf, 0x73, 0xb7, 0x87, 0xf1, 0xfd, 0x2f, 0xf7, 0xaa, 0xe9, 0x42, 0xd9, 0x66, 0xca, 0x23, 0x7c, 0x6b, 0x6a, 0xc4, 0x32, 0x8e, 0xbb, 0x07, 0x4c, 0x02, 0x7b, 0xe4, 0xc7, 0x77, 0xac, 0xad, 0x3d, 0x02, 0x03, 0x01, 0x00, 0x01, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x05, 0x05, 0x00, 0x03, 0x82, 0x01, 0x01, 0x00, 0x71, 0xe4, 0x5c, 0xd0, 0x3d, 0x06, 0xd8, 0x00, 0x79, 0xb6, 0x42, 0xdb, 0x3a, 0xe2, 0x5b, 0x88, 0x51, 0x7d, 0xe2, 0x24, 0x24, 0xc3, 0xb0, 0xbd, 0xf2, 0x81, 0x5c, 0x74, 0xee, 0x63, 0x98, 0x82, 0x2c, 0x3d, 0x68, 0xb6, 0x2b, 0x3a, 0x01, 0x99, 0x63, 0x70, 0x22, 0x97, 0x33, 0xa3, 0xbd, 0xe3, 0xcf, 0x5e, 0x1c, 0x10, 0xb8, 0x4f, 0xa9, 0x89, 0xc1, 0x5b, 0x17, 0x84, 0x7f, 0xbf, 0x71, 0xc1, 0x0a, 0xa8, 0xd4, 0x43, 0xd9, 0xb5, 0x1f, 0x42, 0x03, 0xa2, 0x8f, 0x7d, 0x8f, 0xb2, 0xd7, 0x95, 0xd5, 0xe3, 0x87, 0x7c, 0x72, 0x05, 0xa9, 0xd2, 0x66, 0x9c, 0x15, 0x5b, 0xf2, 0xd6, 0x1a, 0x8f, 0x15, 0x2f, 0xcb, 0xb5, 0x10, 0xbd, 0x7a, 0xcf, 0xa9, 0xa1, 0xdb, 0x38, 0x3a, 0xc7, 0x7b, 0xee, 0xdf, 0xee, 0xbd, 0x58, 0x0b, 0x37, 0xab, 0xba, 0x38, 0x8f, 0xe5, 0x24, 0x87, 0xa6, 0x16, 0x37, 0x19, 0x0a, 0xbc, 0xf0, 0xa7, 0xa6, 0x7c, 0x57, 0x33, 0xa7, 0xfe, 0x06, 0x9d, 0x86, 0xac, 0xb0, 0x4e, 0x74, 0x39, 0x87, 0xbb, 0x0d, 0x07, 0x01, 0xe3, 0x3b, 0xe7, 0xc6, 0x60, 0x17, 0xa7, 0x9d, 0x49, 0x20, 0xde, 0x28, 0xa2, 0x4b, 0x54, 0x53, 0xda, 0x0a, 0x20, 0xce, 0x72, 0xd9, 0xc8, 0x04, 0x59, 0xd9, 0x89, 0xcd, 0xea, 0x8f, 0x00, 0xd6, 0x31, 0x7f, 0x46, 0x48, 0x3e, 0x54, 0xe0, 0x11, 0x39, 0x27, 0x2b, 0x0d, 0x43, 0xcd, 0xff, 0x30, 0x94, 0xae, 0x66, 0x15, 0xb6, 0xc6, 0xe0, 0xb3, 0xd0, 0x87, 0x59, 0xa4, 0x0d, 0x57, 0x30, 0x41, 0x5d, 0xe8, 0x80, 0x6e, 0xa4, 0x18, 0xc4, 0x5a, 0xee, 0x77, 0x87, 0xfb, 0xa4, 0x85, 0x90, 0xda, 0x40, 0x2a, 0xe3, 0xc2, 0xa2, 0x37, 0xb8, 0x34, 0x01, 0x3e, 0x59, 0x0e, 0x6f, 0x7b, 0x5c, 0x24, 0xee, 0x72, 0x11, 0x4d, 0x0c, 0x03, 0x00, 0x5e}; static const uint8_t s_expected_intermediate_2[] = { 0x30, 0x82, 0x03, 0x29, 0x30, 0x82, 0x02, 0x11, 0xa0, 0x03, 0x02, 0x01, 0x02, 0x02, 0x02, 0x57, 0x16, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x05, 0x05, 0x00, 0x30, 0x16, 0x31, 0x14, 0x30, 0x12, 0x06, 0x03, 0x55, 0x04, 0x03, 0x0c, 0x0b, 0x73, 0x32, 0x6e, 0x54, 0x65, 0x73, 0x74, 0x52, 0x6f, 0x6f, 0x74, 0x30, 0x20, 0x17, 0x0d, 0x31, 0x36, 0x30, 0x33, 0x33, 0x30, 0x31, 0x38, 0x35, 0x37, 0x30, 0x39, 0x5a, 0x18, 0x0f, 0x32, 0x31, 0x31, 0x36, 0x30, 0x33, 0x30, 0x36, 0x31, 0x38, 0x35, 0x37, 0x30, 0x39, 0x5a, 0x30, 0x1e, 0x31, 0x1c, 0x30, 0x1a, 0x06, 0x03, 0x55, 0x04, 0x03, 0x0c, 0x13, 0x73, 0x32, 0x6e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x30, 0x82, 0x01, 0x22, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x01, 0x05, 0x00, 0x03, 0x82, 0x01, 0x0f, 0x00, 0x30, 0x82, 0x01, 0x0a, 0x02, 0x82, 0x01, 0x01, 0x00, 0xcc, 0xfe, 0x2d, 0xde, 0x72, 0x5c, 0x58, 0x72, 0xf7, 0x9d, 0x3c, 0x21, 0x27, 0x55, 0xee, 0x80, 0xfc, 0x76, 0x2c, 0x60, 0xf7, 0x8f, 0xd6, 0xa2, 0x81, 0x65, 0x08, 0x5b, 0xa6, 0xeb, 0x97, 0xd3, 0xad, 0x63, 0x15, 0x92, 0xb0, 0x95, 0xe7, 0x1b, 0xd1, 0xe6, 0xae, 0xd4, 0xf3, 0x91, 0x2d, 0xb7, 0x2c, 0x60, 0xae, 0x34, 0xbb, 0x7c, 0x54, 0xce, 0x80, 0x39, 0xbb, 0xa4, 0x34, 0x79, 0xb0, 0xee, 0xa8, 0xf6, 0xea, 0x48, 0x99, 0x02, 0x6b, 0x3b, 0x21, 0xeb, 0xa7, 0x2e, 0xf5, 0x24, 0xe5, 0xf2, 0xe6, 0xdb, 0x93, 0xba, 0x08, 0xf0, 0xb2, 0x63, 0x50, 0x91, 0x61, 0x87, 0x2d, 0x89, 0x9a, 0x6f, 0x64, 0xad, 0x78, 0x4f, 0xc9, 0x1b, 0x1c, 0x53, 0xe0, 0x97, 0x38, 0x26, 0x76, 0x1b, 0x51, 0xa1, 0x06, 0xa4, 0xc8, 0xf1, 0xc8, 0x0c, 0x62, 0x11, 0x15, 0x1f, 0x06, 0xf7, 0xab, 0x2d, 0x44, 0x1e, 0x0e, 0x86, 0xde, 0x7e, 0x16, 0x02, 0x35, 0x7e, 0x70, 0xa4, 0x1c, 0x6d, 0x4c, 0xad, 0xab, 0xc0, 0xd1, 0x99, 0x57, 0xb4, 0x6c, 0xd2, 0x71, 0x0c, 0xe5, 0xfc, 0x8a, 0xfc, 0xdf, 0x02, 0x9d, 0xe5, 0x50, 0x8c, 0x2e, 0x6f, 0xe2, 0x5f, 0xa7, 0x1a, 0xd6, 0x21, 0x48, 0x5b, 0xde, 0x25, 0x46, 0x8f, 0x90, 0x9a, 0xb8, 0x8f, 0x7c, 0x6c, 0xd1, 0x47, 0x4d, 0x6c, 0x18, 0xa7, 0xbb, 0x6f, 0x88, 0x82, 0x22, 0xdb, 0x54, 0x3e, 0x2f, 0x79, 0xc4, 0xc4, 0x2c, 0x84, 0xe3, 0x24, 0xd4, 0x3f, 0xf9, 0x9e, 0x6c, 0x16, 0x7a, 0xd8, 0x99, 0xdb, 0xbe, 0xc5, 0x92, 0xb3, 0x7a, 0xf7, 0xff, 0xf0, 0x26, 0xe5, 0xdf, 0x06, 0xdd, 0x67, 0x58, 0x05, 0x7b, 0x04, 0x46, 0x80, 0xe8, 0x2c, 0x1b, 0xc7, 0x9e, 0xa9, 0x4e, 0xfe, 0x8a, 0x38, 0xa6, 0x91, 0x82, 0x44, 0x8e, 0xc6, 0x90, 0x3a, 0xe8, 0x00, 0x28, 0xbf, 0x02, 0x03, 0x01, 0x00, 0x01, 0xa3, 0x77, 0x30, 0x75, 0x30, 0x0c, 0x06, 0x03, 0x55, 0x1d, 0x13, 0x04, 0x05, 0x30, 0x03, 0x01, 0x01, 0xff, 0x30, 0x1d, 0x06, 0x03, 0x55, 0x1d, 0x0e, 0x04, 0x16, 0x04, 0x14, 0x6a, 0x94, 0x29, 0x55, 0x95, 0x94, 0xbd, 0x37, 0xb0, 0x7b, 0x8e, 0x82, 0x01, 0x40, 0x09, 0xd9, 0x47, 0x72, 0x43, 0xc1, 0x30, 0x46, 0x06, 0x03, 0x55, 0x1d, 0x23, 0x04, 0x3f, 0x30, 0x3d, 0x80, 0x14, 0x4d, 0x97, 0x01, 0xb3, 0x43, 0xaf, 0x23, 0x25, 0x06, 0x93, 0x0d, 0x23, 0xa0, 0x9e, 0xf7, 0x6e, 0xee, 0x50, 0x5c, 0x13, 0xa1, 0x1a, 0xa4, 0x18, 0x30, 0x16, 0x31, 0x14, 0x30, 0x12, 0x06, 0x03, 0x55, 0x04, 0x03, 0x0c, 0x0b, 0x73, 0x32, 0x6e, 0x54, 0x65, 0x73, 0x74, 0x52, 0x6f, 0x6f, 0x74, 0x82, 0x09, 0x00, 0xd0, 0xd4, 0x90, 0x7f, 0x94, 0x61, 0xdc, 0xf5, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x05, 0x05, 0x00, 0x03, 0x82, 0x01, 0x01, 0x00, 0x37, 0x3b, 0x74, 0xbd, 0x55, 0x3d, 0x04, 0x0b, 0x5c, 0x86, 0xe3, 0x1c, 0x8d, 0x35, 0x46, 0x7b, 0xf8, 0x2c, 0xc5, 0xb8, 0x50, 0x3a, 0xba, 0x23, 0xb7, 0xf3, 0x28, 0x1c, 0x37, 0x93, 0x73, 0xd6, 0x83, 0x4f, 0xcb, 0x67, 0x90, 0x7a, 0xbe, 0xef, 0x62, 0x6a, 0xc7, 0xb6, 0xa3, 0x7a, 0x81, 0x2f, 0xe8, 0xd4, 0x08, 0x39, 0x17, 0xc6, 0xcb, 0x44, 0x2a, 0x9e, 0xb9, 0x94, 0xec, 0x38, 0x6f, 0x34, 0x3b, 0x6f, 0x47, 0x90, 0xd8, 0x6c, 0xe7, 0x16, 0x29, 0xad, 0xd5, 0x3c, 0xd8, 0x0a, 0x9c, 0x69, 0x6c, 0x04, 0x77, 0x49, 0x03, 0xae, 0x10, 0xb9, 0xac, 0x4b, 0xec, 0xe9, 0xf8, 0xf5, 0x27, 0x43, 0x4a, 0x9a, 0x68, 0x5b, 0xa2, 0xf6, 0x31, 0xb8, 0x36, 0xe8, 0x8f, 0x46, 0xbb, 0x21, 0xfe, 0xc7, 0x12, 0xa5, 0xf4, 0x36, 0x2c, 0xf2, 0x53, 0x73, 0x37, 0x24, 0xe3, 0x4f, 0x66, 0x06, 0x9d, 0x72, 0x16, 0x7e, 0x17, 0x90, 0xe8, 0xb1, 0xab, 0x45, 0xd4, 0x1a, 0x6d, 0xfe, 0x0b, 0x10, 0x47, 0x2d, 0xfc, 0xdc, 0x8a, 0x20, 0x43, 0x4f, 0xc5, 0x4b, 0xe8, 0xee, 0x1c, 0x20, 0x9c, 0xe1, 0x52, 0x14, 0x7a, 0x6b, 0xa4, 0x61, 0x8d, 0x49, 0xf2, 0x8e, 0x57, 0x76, 0xb1, 0x96, 0xac, 0xf0, 0x41, 0x8a, 0xe7, 0x53, 0x5a, 0xa0, 0xa8, 0x6d, 0xef, 0x8d, 0xe7, 0x8d, 0x5b, 0xf9, 0x82, 0xd6, 0x6d, 0xa8, 0xd3, 0xe5, 0x42, 0xb3, 0xbd, 0x2a, 0x89, 0x99, 0xca, 0xa9, 0x3d, 0x39, 0xe2, 0x24, 0x67, 0x3d, 0x79, 0x9c, 0x5c, 0xcf, 0x77, 0x02, 0x35, 0xdd, 0xdc, 0x29, 0xaf, 0x5b, 0x2b, 0x9d, 0x6c, 0xb4, 0x49, 0xd0, 0x4a, 0x50, 0x3b, 0x3d, 0x05, 0xd8, 0xb3, 0x72, 0xbe, 0x4c, 0x3d, 0xc4, 0x99, 0x29, 0xff, 0x1b, 0x6c, 0x7a, 0x1e, 0x67, 0xeb, 0xee, 0x5a, 0xe9, 0x6c, 0x88, 0x44, 0x68, 0x3f, 0xb2}; static const uint8_t s_expected_leaf[] = { 0x30, 0x82, 0x03, 0x01, 0x30, 0x82, 0x01, 0xe9, 0xa0, 0x03, 0x02, 0x01, 0x02, 0x02, 0x09, 0x00, 0xd0, 0xd4, 0x90, 0x7f, 0x94, 0x61, 0xdc, 0xf5, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x0b, 0x05, 0x00, 0x30, 0x16, 0x31, 0x14, 0x30, 0x12, 0x06, 0x03, 0x55, 0x04, 0x03, 0x0c, 0x0b, 0x73, 0x32, 0x6e, 0x54, 0x65, 0x73, 0x74, 0x52, 0x6f, 0x6f, 0x74, 0x30, 0x20, 0x17, 0x0d, 0x31, 0x36, 0x30, 0x33, 0x33, 0x30, 0x31, 0x38, 0x35, 0x36, 0x33, 0x39, 0x5a, 0x18, 0x0f, 0x32, 0x31, 0x31, 0x36, 0x30, 0x33, 0x30, 0x36, 0x31, 0x38, 0x35, 0x36, 0x33, 0x39, 0x5a, 0x30, 0x16, 0x31, 0x14, 0x30, 0x12, 0x06, 0x03, 0x55, 0x04, 0x03, 0x0c, 0x0b, 0x73, 0x32, 0x6e, 0x54, 0x65, 0x73, 0x74, 0x52, 0x6f, 0x6f, 0x74, 0x30, 0x82, 0x01, 0x22, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x01, 0x05, 0x00, 0x03, 0x82, 0x01, 0x0f, 0x00, 0x30, 0x82, 0x01, 0x0a, 0x02, 0x82, 0x01, 0x01, 0x00, 0xc6, 0x39, 0xe7, 0x7d, 0xb4, 0xd3, 0x4a, 0x1a, 0x79, 0xe7, 0x7b, 0x26, 0x6a, 0x37, 0xb2, 0xcc, 0x75, 0xef, 0x6d, 0xd9, 0x0c, 0xba, 0x67, 0xdd, 0x61, 0xb3, 0xe8, 0x5c, 0x69, 0x58, 0xcd, 0x33, 0xb0, 0xa0, 0x3f, 0x62, 0xf7, 0x43, 0xec, 0x9e, 0xaa, 0x5c, 0x3b, 0xdf, 0xc5, 0x4e, 0x9d, 0x66, 0x59, 0xac, 0x09, 0x2f, 0x9b, 0x48, 0xd0, 0x2a, 0xa4, 0x04, 0x4a, 0x98, 0xab, 0x01, 0x10, 0x33, 0xd3, 0xb1, 0xd2, 0xe6, 0xac, 0xdd, 0x55, 0xd5, 0xc9, 0x4e, 0xc6, 0xf2, 0xa6, 0xe8, 0x6b, 0xd4, 0xd5, 0xe9, 0x68, 0x45, 0x3c, 0x3a, 0xc6, 0xe9, 0x04, 0x51, 0x72, 0x7e, 0xbb, 0x1a, 0xd0, 0x30, 0xb6, 0x2f, 0x48, 0x90, 0x62, 0x08, 0x15, 0x46, 0x23, 0xe5, 0x18, 0x34, 0x22, 0xb9, 0x73, 0xa8, 0x2e, 0x34, 0xc4, 0x58, 0x04, 0x21, 0x9d, 0x93, 0x9f, 0xe2, 0x6b, 0x28, 0xe6, 0x2b, 0x43, 0x71, 0xcf, 0x5f, 0x75, 0xf0, 0x2c, 0x7d, 0xcf, 0x8b, 0xda, 0xf0, 0x73, 0xc0, 0x81, 0x82, 0x33, 0xd2, 0x88, 0x0e, 0x75, 0x80, 0x76, 0xd4, 0xef, 0x45, 0x40, 0x63, 0xdd, 0x1f, 0x59, 0x98, 0xa9, 0x1c, 0xe5, 0x8d, 0xc4, 0xac, 0x67, 0xb1, 0x05, 0xa3, 0x53, 0xfc, 0x1c, 0x9a, 0xfc, 0xa0, 0xa2, 0xbb, 0xe9, 0x97, 0x98, 0x8b, 0x6a, 0x2b, 0xa6, 0x3c, 0xcc, 0x1c, 0x69, 0xe5, 0xb9, 0xf1, 0xda, 0xba, 0x5e, 0x74, 0xf3, 0x04, 0x0f, 0x78, 0x50, 0x0c, 0x6b, 0x54, 0xe4, 0xbb, 0x71, 0xf2, 0x19, 0x94, 0x5a, 0x89, 0xe4, 0x40, 0x72, 0x4e, 0xbe, 0x41, 0xc3, 0xb6, 0x36, 0xe8, 0xd7, 0xc4, 0xa3, 0x72, 0x08, 0x19, 0x56, 0x38, 0xdd, 0xb7, 0x15, 0x04, 0xee, 0x06, 0x33, 0x7f, 0x08, 0x09, 0x7b, 0xe0, 0xc6, 0x6d, 0x70, 0x03, 0x36, 0x85, 0x54, 0xa6, 0xa6, 0x6f, 0x6d, 0xe1, 0xc5, 0x95, 0x37, 0x93, 0x02, 0x03, 0x01, 0x00, 0x01, 0xa3, 0x50, 0x30, 0x4e, 0x30, 0x1d, 0x06, 0x03, 0x55, 0x1d, 0x0e, 0x04, 0x16, 0x04, 0x14, 0x4d, 0x97, 0x01, 0xb3, 0x43, 0xaf, 0x23, 0x25, 0x06, 0x93, 0x0d, 0x23, 0xa0, 0x9e, 0xf7, 0x6e, 0xee, 0x50, 0x5c, 0x13, 0x30, 0x1f, 0x06, 0x03, 0x55, 0x1d, 0x23, 0x04, 0x18, 0x30, 0x16, 0x80, 0x14, 0x4d, 0x97, 0x01, 0xb3, 0x43, 0xaf, 0x23, 0x25, 0x06, 0x93, 0x0d, 0x23, 0xa0, 0x9e, 0xf7, 0x6e, 0xee, 0x50, 0x5c, 0x13, 0x30, 0x0c, 0x06, 0x03, 0x55, 0x1d, 0x13, 0x04, 0x05, 0x30, 0x03, 0x01, 0x01, 0xff, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x0b, 0x05, 0x00, 0x03, 0x82, 0x01, 0x01, 0x00, 0x05, 0xe4, 0x56, 0xf4, 0x1d, 0x5c, 0x39, 0xa8, 0xcc, 0xf8, 0xb1, 0xb9, 0x98, 0xb5, 0xa3, 0xd7, 0x30, 0xe1, 0x28, 0xb4, 0xb3, 0x2a, 0x9f, 0x27, 0x8b, 0x12, 0xba, 0x7a, 0xf3, 0x3d, 0xb6, 0xe6, 0xc3, 0xf4, 0x31, 0x86, 0xd8, 0x79, 0x57, 0xb5, 0xc6, 0xae, 0x7b, 0x10, 0x54, 0xdc, 0x5a, 0x9a, 0x09, 0xdb, 0xce, 0x98, 0xc6, 0x86, 0x20, 0x45, 0xea, 0x2b, 0x0c, 0x95, 0xd8, 0xf7, 0xdf, 0x34, 0x18, 0xb6, 0x47, 0xbe, 0xa2, 0x33, 0xa3, 0x16, 0x58, 0x44, 0xbe, 0xb3, 0xe8, 0xe9, 0xe5, 0xb6, 0xf5, 0x19, 0x0b, 0x04, 0x4b, 0x49, 0x69, 0xd1, 0xac, 0xd9, 0xb9, 0x0c, 0x2f, 0x3b, 0x49, 0x6d, 0xff, 0x9c, 0x6c, 0x7d, 0xae, 0xea, 0x2a, 0xf3, 0xaf, 0xa4, 0xb8, 0xdc, 0xe8, 0x15, 0x39, 0x2b, 0x81, 0xfb, 0x31, 0xb9, 0xea, 0x4b, 0x13, 0x9c, 0x45, 0x66, 0xba, 0x6f, 0x82, 0xff, 0xcb, 0x52, 0x20, 0x0c, 0xa2, 0xdd, 0xe6, 0xa8, 0x4a, 0xd3, 0xdc, 0x80, 0x45, 0x3c, 0xfb, 0x7f, 0xca, 0x32, 0xb1, 0xfb, 0xba, 0xe7, 0xa7, 0x99, 0x33, 0x82, 0x29, 0xd0, 0xf5, 0x16, 0xf4, 0x83, 0x1d, 0x74, 0xfd, 0xa8, 0xf8, 0x13, 0xb6, 0xd5, 0x50, 0x19, 0x76, 0xd7, 0xcc, 0x91, 0x2b, 0xe6, 0xd1, 0x84, 0xf8, 0xfc, 0xba, 0xca, 0x26, 0xf9, 0xba, 0x82, 0x74, 0x74, 0x96, 0x4d, 0xb6, 0x3b, 0x3b, 0x1c, 0xe4, 0x04, 0x0e, 0x4e, 0x49, 0xd3, 0x06, 0x5a, 0x5f, 0x40, 0xc2, 0x31, 0x28, 0x58, 0x2e, 0x23, 0x79, 0xa8, 0xb2, 0x40, 0x93, 0x4a, 0xb0, 0x97, 0xe8, 0xec, 0xe3, 0xcf, 0x01, 0x27, 0x5f, 0x0c, 0xf5, 0xd8, 0x38, 0x4f, 0x17, 0x41, 0x60, 0x59, 0x30, 0x1d, 0x2c, 0xca, 0x5d, 0x28, 0xfd, 0x4c, 0x93, 0x7b, 0xec, 0x16, 0xf4, 0x45, 0x9a, 0xfc, 0x2c, 0xbf, 0x53, 0xaf, 0xd2, 0x32, 0xbd, 0xf5, }; struct aws_byte_cursor pem_data = aws_byte_cursor_from_c_str(s_rsa_2048_pkcs1_crt_pem); struct aws_array_list output_list; ASSERT_SUCCESS(aws_pem_objects_init_from_file_contents(&output_list, allocator, pem_data)); ASSERT_UINT_EQUALS(3, aws_array_list_length(&output_list)); struct aws_pem_object *pem_object = NULL; aws_array_list_get_at_ptr(&output_list, (void **)&pem_object, 0); ASSERT_BIN_ARRAYS_EQUALS( s_expected_intermediate_1, sizeof(s_expected_intermediate_1), pem_object->data.buffer, pem_object->data.len); ASSERT_CURSOR_VALUE_CSTRING_EQUALS(aws_byte_cursor_from_string(pem_object->type_string), "CERTIFICATE"); ASSERT_INT_EQUALS(AWS_PEM_TYPE_X509, pem_object->type); aws_array_list_get_at_ptr(&output_list, (void **)&pem_object, 1); ASSERT_BIN_ARRAYS_EQUALS( s_expected_intermediate_2, sizeof(s_expected_intermediate_2), pem_object->data.buffer, pem_object->data.len); ASSERT_CURSOR_VALUE_CSTRING_EQUALS(aws_byte_cursor_from_string(pem_object->type_string), "CERTIFICATE"); ASSERT_INT_EQUALS(AWS_PEM_TYPE_X509, pem_object->type); aws_array_list_get_at_ptr(&output_list, (void **)&pem_object, 2); ASSERT_BIN_ARRAYS_EQUALS(s_expected_leaf, sizeof(s_expected_leaf), pem_object->data.buffer, pem_object->data.len); ASSERT_CURSOR_VALUE_CSTRING_EQUALS(aws_byte_cursor_from_string(pem_object->type_string), "CERTIFICATE"); ASSERT_INT_EQUALS(AWS_PEM_TYPE_X509, pem_object->type); aws_pem_objects_clean_up(&output_list); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_pem_cert_chain_parse, s_test_pem_cert_chain_parse) static int s_test_pem_private_key_parse(struct aws_allocator *allocator, void *ctx) { (void)ctx; static const char *s_private_key_pem = "-----BEGIN RSA PRIVATE KEY-----\n" "MIIEpAIBAAKCAQEA12pXSPgORAMlQtYRbxuz/Ocaoran3C2Fjyjhu0vucSEZSwxD\n" "Jp75TBQEMafSpSEKAQLeDt7xuDRDYn52V4UE6cF+xTWhtzsf7mhN/lHaDPcvR2AS\n" "PAEkzkil8KCLY4e6tTxSwQ97splNuEZ099HoJYTTLFaReIfd1D3zZ1EYcSw8w+GZ\n" "2SxEUfYUSL2CFmIYSkQjnlsJCIpCoGgDiBAPbIUJO3KWBDX0JgGDbx3Wf3jXG/Y6\n" "T63LPsO+AS20RCvcEF0F/rlDINzI5EAHO1TOEd9fKOu+JAK06Pw1m77BgOrE7Ftv\n" "IG7kYNVuOEPeInOHkOuqryDisB1PwiyPNIbqdQIDAQABAoIBAESQuI+lRQUo6ydG\n" "8+2lp7iL5tJ7yRov8x8KKC9xj8e6fU6B7K3SVA9/H4aeoFGnHoQL4ZpiJBY5rGkh\n" "T5Gz6UhuKmejFoI384Xy9UBJ1VnjI81YKvWmd4yhWxAoSbW4chlVxhFlWD4UxcQt\n" "yPVIftfSW1T1iQAQXu87eMod6eW7VWlyMKicYkBGB2ohI0hW8chx361z96QcpxhA\n" "yBAfnhxuTgKFYSRVfwYSOjHYPOvozmU7Wj0iURT+1MM4iO8YlBDuZEJArs3WAdIe\n" "pmCq6snzOAJ6Y9iE0EGti9QGiAo6na/nWAfVlRSMyS/C1GC0oM0MnpRKSLW0tvLV\n" "vtJG81ECgYEA7lzGpdlAKwWNKPc2YIbtUNomD/eOr7TzYedYxJ88SG52THjgE3Pu\n" "poF3wZFjdtlwx1u4nsxlVe50FBTCN5s2FV4/8YP980zis+HtUC5pWCO3Oy6+DjSj\n" "K9st+mGyzYjl3opVqcQZkHj1LPqNxBmvFpDgAtVZfdKSdyuzZpj8s5sCgYEA51rj\n" "EFa/ijILp1P5vKn8b3pIfQFSsUsX5NXTy31f/2UwVV491djMyNyhtaRcrXP9CYpq\n" "38o1xvUaxe2hlND/jiBjBHfsC13oUOVz8TrAzxDKAzbGLcOT2trgxMFbR8Ez+jur\n" "1yQbPnoKZrB7SopAkcVqZv4ks0LLu+BLfEFXYy8CgYEApN8xXDgoRVnCqQpN53iM\n" "n/c0iqjOXkTIb/jIksAdv3AAjaayP2JaOXul7RL2fJeshYiw684vbb/RNK6jJDlM\n" "sH0Pt6t3tZmB2bC1KFfh7+BMdjg/p63LC6PAasa3GanObh67YADPOfoghCsOcgzd\n" "6brt56fRDdHgE2P75ER/zm8CgYEArAxx6bepT3syIWiYww3itYBJofS26zP9++Zs\n" "T9rX5hT5IbMo5vwIJqO0+mDVrwQfu9Wc7vnwjhm+pEy4qfPW6Hn7SNppxnY6itZo\n" "J4/azOIeaM92B5h3Pv0gxBFK8YyjO8beXurx+79ENuOtfFxd8knOe/Mplcnpurjt\n" "SeVJuG8CgYBxEYouOM9UuZlblXQXfudTWWf+x5CEWxyJgKaktHEh3iees1gB7ZPb\n" "OewLa8AYVjqbNgS/r/aUFjpBbCov8ICxcy86SuGda10LDFX83sbyMm8XhktfyC3L\n" "54irVW5mNUDcA8s9+DloeTlUlJIr8J/RADC9rpqHLaZzcdvpIMhVsw==\n" "-----END RSA PRIVATE KEY-----"; static const uint8_t s_expected[] = { 0x30, 0x82, 0x04, 0xa4, 0x02, 0x01, 0x00, 0x02, 0x82, 0x01, 0x01, 0x00, 0xd7, 0x6a, 0x57, 0x48, 0xf8, 0x0e, 0x44, 0x03, 0x25, 0x42, 0xd6, 0x11, 0x6f, 0x1b, 0xb3, 0xfc, 0xe7, 0x1a, 0xa2, 0xb6, 0xa7, 0xdc, 0x2d, 0x85, 0x8f, 0x28, 0xe1, 0xbb, 0x4b, 0xee, 0x71, 0x21, 0x19, 0x4b, 0x0c, 0x43, 0x26, 0x9e, 0xf9, 0x4c, 0x14, 0x04, 0x31, 0xa7, 0xd2, 0xa5, 0x21, 0x0a, 0x01, 0x02, 0xde, 0x0e, 0xde, 0xf1, 0xb8, 0x34, 0x43, 0x62, 0x7e, 0x76, 0x57, 0x85, 0x04, 0xe9, 0xc1, 0x7e, 0xc5, 0x35, 0xa1, 0xb7, 0x3b, 0x1f, 0xee, 0x68, 0x4d, 0xfe, 0x51, 0xda, 0x0c, 0xf7, 0x2f, 0x47, 0x60, 0x12, 0x3c, 0x01, 0x24, 0xce, 0x48, 0xa5, 0xf0, 0xa0, 0x8b, 0x63, 0x87, 0xba, 0xb5, 0x3c, 0x52, 0xc1, 0x0f, 0x7b, 0xb2, 0x99, 0x4d, 0xb8, 0x46, 0x74, 0xf7, 0xd1, 0xe8, 0x25, 0x84, 0xd3, 0x2c, 0x56, 0x91, 0x78, 0x87, 0xdd, 0xd4, 0x3d, 0xf3, 0x67, 0x51, 0x18, 0x71, 0x2c, 0x3c, 0xc3, 0xe1, 0x99, 0xd9, 0x2c, 0x44, 0x51, 0xf6, 0x14, 0x48, 0xbd, 0x82, 0x16, 0x62, 0x18, 0x4a, 0x44, 0x23, 0x9e, 0x5b, 0x09, 0x08, 0x8a, 0x42, 0xa0, 0x68, 0x03, 0x88, 0x10, 0x0f, 0x6c, 0x85, 0x09, 0x3b, 0x72, 0x96, 0x04, 0x35, 0xf4, 0x26, 0x01, 0x83, 0x6f, 0x1d, 0xd6, 0x7f, 0x78, 0xd7, 0x1b, 0xf6, 0x3a, 0x4f, 0xad, 0xcb, 0x3e, 0xc3, 0xbe, 0x01, 0x2d, 0xb4, 0x44, 0x2b, 0xdc, 0x10, 0x5d, 0x05, 0xfe, 0xb9, 0x43, 0x20, 0xdc, 0xc8, 0xe4, 0x40, 0x07, 0x3b, 0x54, 0xce, 0x11, 0xdf, 0x5f, 0x28, 0xeb, 0xbe, 0x24, 0x02, 0xb4, 0xe8, 0xfc, 0x35, 0x9b, 0xbe, 0xc1, 0x80, 0xea, 0xc4, 0xec, 0x5b, 0x6f, 0x20, 0x6e, 0xe4, 0x60, 0xd5, 0x6e, 0x38, 0x43, 0xde, 0x22, 0x73, 0x87, 0x90, 0xeb, 0xaa, 0xaf, 0x20, 0xe2, 0xb0, 0x1d, 0x4f, 0xc2, 0x2c, 0x8f, 0x34, 0x86, 0xea, 0x75, 0x02, 0x03, 0x01, 0x00, 0x01, 0x02, 0x82, 0x01, 0x00, 0x44, 0x90, 0xb8, 0x8f, 0xa5, 0x45, 0x05, 0x28, 0xeb, 0x27, 0x46, 0xf3, 0xed, 0xa5, 0xa7, 0xb8, 0x8b, 0xe6, 0xd2, 0x7b, 0xc9, 0x1a, 0x2f, 0xf3, 0x1f, 0x0a, 0x28, 0x2f, 0x71, 0x8f, 0xc7, 0xba, 0x7d, 0x4e, 0x81, 0xec, 0xad, 0xd2, 0x54, 0x0f, 0x7f, 0x1f, 0x86, 0x9e, 0xa0, 0x51, 0xa7, 0x1e, 0x84, 0x0b, 0xe1, 0x9a, 0x62, 0x24, 0x16, 0x39, 0xac, 0x69, 0x21, 0x4f, 0x91, 0xb3, 0xe9, 0x48, 0x6e, 0x2a, 0x67, 0xa3, 0x16, 0x82, 0x37, 0xf3, 0x85, 0xf2, 0xf5, 0x40, 0x49, 0xd5, 0x59, 0xe3, 0x23, 0xcd, 0x58, 0x2a, 0xf5, 0xa6, 0x77, 0x8c, 0xa1, 0x5b, 0x10, 0x28, 0x49, 0xb5, 0xb8, 0x72, 0x19, 0x55, 0xc6, 0x11, 0x65, 0x58, 0x3e, 0x14, 0xc5, 0xc4, 0x2d, 0xc8, 0xf5, 0x48, 0x7e, 0xd7, 0xd2, 0x5b, 0x54, 0xf5, 0x89, 0x00, 0x10, 0x5e, 0xef, 0x3b, 0x78, 0xca, 0x1d, 0xe9, 0xe5, 0xbb, 0x55, 0x69, 0x72, 0x30, 0xa8, 0x9c, 0x62, 0x40, 0x46, 0x07, 0x6a, 0x21, 0x23, 0x48, 0x56, 0xf1, 0xc8, 0x71, 0xdf, 0xad, 0x73, 0xf7, 0xa4, 0x1c, 0xa7, 0x18, 0x40, 0xc8, 0x10, 0x1f, 0x9e, 0x1c, 0x6e, 0x4e, 0x02, 0x85, 0x61, 0x24, 0x55, 0x7f, 0x06, 0x12, 0x3a, 0x31, 0xd8, 0x3c, 0xeb, 0xe8, 0xce, 0x65, 0x3b, 0x5a, 0x3d, 0x22, 0x51, 0x14, 0xfe, 0xd4, 0xc3, 0x38, 0x88, 0xef, 0x18, 0x94, 0x10, 0xee, 0x64, 0x42, 0x40, 0xae, 0xcd, 0xd6, 0x01, 0xd2, 0x1e, 0xa6, 0x60, 0xaa, 0xea, 0xc9, 0xf3, 0x38, 0x02, 0x7a, 0x63, 0xd8, 0x84, 0xd0, 0x41, 0xad, 0x8b, 0xd4, 0x06, 0x88, 0x0a, 0x3a, 0x9d, 0xaf, 0xe7, 0x58, 0x07, 0xd5, 0x95, 0x14, 0x8c, 0xc9, 0x2f, 0xc2, 0xd4, 0x60, 0xb4, 0xa0, 0xcd, 0x0c, 0x9e, 0x94, 0x4a, 0x48, 0xb5, 0xb4, 0xb6, 0xf2, 0xd5, 0xbe, 0xd2, 0x46, 0xf3, 0x51, 0x02, 0x81, 0x81, 0x00, 0xee, 0x5c, 0xc6, 0xa5, 0xd9, 0x40, 0x2b, 0x05, 0x8d, 0x28, 0xf7, 0x36, 0x60, 0x86, 0xed, 0x50, 0xda, 0x26, 0x0f, 0xf7, 0x8e, 0xaf, 0xb4, 0xf3, 0x61, 0xe7, 0x58, 0xc4, 0x9f, 0x3c, 0x48, 0x6e, 0x76, 0x4c, 0x78, 0xe0, 0x13, 0x73, 0xee, 0xa6, 0x81, 0x77, 0xc1, 0x91, 0x63, 0x76, 0xd9, 0x70, 0xc7, 0x5b, 0xb8, 0x9e, 0xcc, 0x65, 0x55, 0xee, 0x74, 0x14, 0x14, 0xc2, 0x37, 0x9b, 0x36, 0x15, 0x5e, 0x3f, 0xf1, 0x83, 0xfd, 0xf3, 0x4c, 0xe2, 0xb3, 0xe1, 0xed, 0x50, 0x2e, 0x69, 0x58, 0x23, 0xb7, 0x3b, 0x2e, 0xbe, 0x0e, 0x34, 0xa3, 0x2b, 0xdb, 0x2d, 0xfa, 0x61, 0xb2, 0xcd, 0x88, 0xe5, 0xde, 0x8a, 0x55, 0xa9, 0xc4, 0x19, 0x90, 0x78, 0xf5, 0x2c, 0xfa, 0x8d, 0xc4, 0x19, 0xaf, 0x16, 0x90, 0xe0, 0x02, 0xd5, 0x59, 0x7d, 0xd2, 0x92, 0x77, 0x2b, 0xb3, 0x66, 0x98, 0xfc, 0xb3, 0x9b, 0x02, 0x81, 0x81, 0x00, 0xe7, 0x5a, 0xe3, 0x10, 0x56, 0xbf, 0x8a, 0x32, 0x0b, 0xa7, 0x53, 0xf9, 0xbc, 0xa9, 0xfc, 0x6f, 0x7a, 0x48, 0x7d, 0x01, 0x52, 0xb1, 0x4b, 0x17, 0xe4, 0xd5, 0xd3, 0xcb, 0x7d, 0x5f, 0xff, 0x65, 0x30, 0x55, 0x5e, 0x3d, 0xd5, 0xd8, 0xcc, 0xc8, 0xdc, 0xa1, 0xb5, 0xa4, 0x5c, 0xad, 0x73, 0xfd, 0x09, 0x8a, 0x6a, 0xdf, 0xca, 0x35, 0xc6, 0xf5, 0x1a, 0xc5, 0xed, 0xa1, 0x94, 0xd0, 0xff, 0x8e, 0x20, 0x63, 0x04, 0x77, 0xec, 0x0b, 0x5d, 0xe8, 0x50, 0xe5, 0x73, 0xf1, 0x3a, 0xc0, 0xcf, 0x10, 0xca, 0x03, 0x36, 0xc6, 0x2d, 0xc3, 0x93, 0xda, 0xda, 0xe0, 0xc4, 0xc1, 0x5b, 0x47, 0xc1, 0x33, 0xfa, 0x3b, 0xab, 0xd7, 0x24, 0x1b, 0x3e, 0x7a, 0x0a, 0x66, 0xb0, 0x7b, 0x4a, 0x8a, 0x40, 0x91, 0xc5, 0x6a, 0x66, 0xfe, 0x24, 0xb3, 0x42, 0xcb, 0xbb, 0xe0, 0x4b, 0x7c, 0x41, 0x57, 0x63, 0x2f, 0x02, 0x81, 0x81, 0x00, 0xa4, 0xdf, 0x31, 0x5c, 0x38, 0x28, 0x45, 0x59, 0xc2, 0xa9, 0x0a, 0x4d, 0xe7, 0x78, 0x8c, 0x9f, 0xf7, 0x34, 0x8a, 0xa8, 0xce, 0x5e, 0x44, 0xc8, 0x6f, 0xf8, 0xc8, 0x92, 0xc0, 0x1d, 0xbf, 0x70, 0x00, 0x8d, 0xa6, 0xb2, 0x3f, 0x62, 0x5a, 0x39, 0x7b, 0xa5, 0xed, 0x12, 0xf6, 0x7c, 0x97, 0xac, 0x85, 0x88, 0xb0, 0xeb, 0xce, 0x2f, 0x6d, 0xbf, 0xd1, 0x34, 0xae, 0xa3, 0x24, 0x39, 0x4c, 0xb0, 0x7d, 0x0f, 0xb7, 0xab, 0x77, 0xb5, 0x99, 0x81, 0xd9, 0xb0, 0xb5, 0x28, 0x57, 0xe1, 0xef, 0xe0, 0x4c, 0x76, 0x38, 0x3f, 0xa7, 0xad, 0xcb, 0x0b, 0xa3, 0xc0, 0x6a, 0xc6, 0xb7, 0x19, 0xa9, 0xce, 0x6e, 0x1e, 0xbb, 0x60, 0x00, 0xcf, 0x39, 0xfa, 0x20, 0x84, 0x2b, 0x0e, 0x72, 0x0c, 0xdd, 0xe9, 0xba, 0xed, 0xe7, 0xa7, 0xd1, 0x0d, 0xd1, 0xe0, 0x13, 0x63, 0xfb, 0xe4, 0x44, 0x7f, 0xce, 0x6f, 0x02, 0x81, 0x81, 0x00, 0xac, 0x0c, 0x71, 0xe9, 0xb7, 0xa9, 0x4f, 0x7b, 0x32, 0x21, 0x68, 0x98, 0xc3, 0x0d, 0xe2, 0xb5, 0x80, 0x49, 0xa1, 0xf4, 0xb6, 0xeb, 0x33, 0xfd, 0xfb, 0xe6, 0x6c, 0x4f, 0xda, 0xd7, 0xe6, 0x14, 0xf9, 0x21, 0xb3, 0x28, 0xe6, 0xfc, 0x08, 0x26, 0xa3, 0xb4, 0xfa, 0x60, 0xd5, 0xaf, 0x04, 0x1f, 0xbb, 0xd5, 0x9c, 0xee, 0xf9, 0xf0, 0x8e, 0x19, 0xbe, 0xa4, 0x4c, 0xb8, 0xa9, 0xf3, 0xd6, 0xe8, 0x79, 0xfb, 0x48, 0xda, 0x69, 0xc6, 0x76, 0x3a, 0x8a, 0xd6, 0x68, 0x27, 0x8f, 0xda, 0xcc, 0xe2, 0x1e, 0x68, 0xcf, 0x76, 0x07, 0x98, 0x77, 0x3e, 0xfd, 0x20, 0xc4, 0x11, 0x4a, 0xf1, 0x8c, 0xa3, 0x3b, 0xc6, 0xde, 0x5e, 0xea, 0xf1, 0xfb, 0xbf, 0x44, 0x36, 0xe3, 0xad, 0x7c, 0x5c, 0x5d, 0xf2, 0x49, 0xce, 0x7b, 0xf3, 0x29, 0x95, 0xc9, 0xe9, 0xba, 0xb8, 0xed, 0x49, 0xe5, 0x49, 0xb8, 0x6f, 0x02, 0x81, 0x80, 0x71, 0x11, 0x8a, 0x2e, 0x38, 0xcf, 0x54, 0xb9, 0x99, 0x5b, 0x95, 0x74, 0x17, 0x7e, 0xe7, 0x53, 0x59, 0x67, 0xfe, 0xc7, 0x90, 0x84, 0x5b, 0x1c, 0x89, 0x80, 0xa6, 0xa4, 0xb4, 0x71, 0x21, 0xde, 0x27, 0x9e, 0xb3, 0x58, 0x01, 0xed, 0x93, 0xdb, 0x39, 0xec, 0x0b, 0x6b, 0xc0, 0x18, 0x56, 0x3a, 0x9b, 0x36, 0x04, 0xbf, 0xaf, 0xf6, 0x94, 0x16, 0x3a, 0x41, 0x6c, 0x2a, 0x2f, 0xf0, 0x80, 0xb1, 0x73, 0x2f, 0x3a, 0x4a, 0xe1, 0x9d, 0x6b, 0x5d, 0x0b, 0x0c, 0x55, 0xfc, 0xde, 0xc6, 0xf2, 0x32, 0x6f, 0x17, 0x86, 0x4b, 0x5f, 0xc8, 0x2d, 0xcb, 0xe7, 0x88, 0xab, 0x55, 0x6e, 0x66, 0x35, 0x40, 0xdc, 0x03, 0xcb, 0x3d, 0xf8, 0x39, 0x68, 0x79, 0x39, 0x54, 0x94, 0x92, 0x2b, 0xf0, 0x9f, 0xd1, 0x00, 0x30, 0xbd, 0xae, 0x9a, 0x87, 0x2d, 0xa6, 0x73, 0x71, 0xdb, 0xe9, 0x20, 0xc8, 0x55, 0xb3, }; struct aws_byte_cursor pem_data = aws_byte_cursor_from_c_str(s_private_key_pem); struct aws_array_list output_list; ASSERT_SUCCESS(aws_pem_objects_init_from_file_contents(&output_list, allocator, pem_data)); ASSERT_UINT_EQUALS(1, aws_array_list_length(&output_list)); struct aws_pem_object *pem_object = NULL; aws_array_list_get_at_ptr(&output_list, (void **)&pem_object, 0); ASSERT_BIN_ARRAYS_EQUALS(s_expected, sizeof(s_expected), pem_object->data.buffer, pem_object->data.len); ASSERT_CURSOR_VALUE_CSTRING_EQUALS(aws_byte_cursor_from_string(pem_object->type_string), "RSA PRIVATE KEY"); ASSERT_INT_EQUALS(AWS_PEM_TYPE_PRIVATE_RSA_PKCS1, pem_object->type); aws_pem_objects_clean_up(&output_list); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_pem_private_key_parse, s_test_pem_private_key_parse) static int s_test_pem_cert_chain_comments_and_whitespace(struct aws_allocator *allocator, void *ctx) { (void)ctx; static const char *s_pem_data_str = "# -----Comment\n" "// Style\n" "/* from */\n" "''' multiple\n" "!* languages\n" "\n" "-----BEGIN CERTIFICATE-----\n" "MIICrTCCAZUCAn3VMA0GCSqGSIb3DQEBBQUAMB4xHDAaBgNVBAMME3MyblRlc3RJ\n" "bnRlcm1lZGlhdGUwIBcNMTYwMzMwMTg1NzQzWhgPMjExNjAzMDYxODU3NDNaMBgx\n" "FjAUBgNVBAMMDXMyblRlc3RTZXJ2ZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw\n" "ggEKAoIBAQDRw6AuYXAeRT0YuptCfJjRB/EDJyyGXnv+8TV2H1WJWhMLk8qND27r\n" "79A6EjbVmJaOV9qrokVqpDmXS712Z3BDprJ+1LFMymm3A+AFuK/skeGy0skik+Tg\n" "MmFT5XBVvmsw4uB1S9uUqktHauXgjhFPPsfvk4ewL4LulVEN2TEeI1Odj4CaMxAO\n" "Iuowm8wI2OHVzRHlrRmyJ9hYGuHHQ2TaTGIjr3WpAFuXi9pHGGMYa0uXAVPmgjdE\n" "XZ8t46u/ZKQ9W1uJkZEVKhcijT7G2VBrsBUq0CDiL+TDaGfthnBzUc9zt4fx/S/3\n" "qulC2WbKI3xrasQyjrsHTAJ75Md3rK09AgMBAAEwDQYJKoZIhvcNAQEFBQADggEB\n" "AHHkXNA9BtgAebZC2zriW4hRfeIkJMOwvfKBXHTuY5iCLD1otis6AZljcCKXM6O9\n" "489eHBC4T6mJwVsXhH+/ccEKqNRD2bUfQgOij32PsteV1eOHfHIFqdJmnBVb8tYa\n" "jxUvy7UQvXrPqaHbODrHe+7f7r1YCzerujiP5SSHphY3GQq88KemfFczp/4GnYas\n" "sE50OYe7DQcB4zvnxmAXp51JIN4ooktUU9oKIM5y2cgEWdmJzeqPANYxf0ZIPlTg\n" "ETknKw1Dzf8wlK5mFbbG4LPQh1mkDVcwQV3ogG6kGMRa7neH+6SFkNpAKuPCoje4\n" "NAE+WQ5ve1wk7nIRTQwDAF4=\n" "-----END CERTIFICATE----- \n" "\n" "\n" " \n" "\n" "\n" " \n" "\n" " \n" "\n" " \n" " -----BEGIN CERTIFICATE-----\n" "MIIDKTCCAhGgAwIBAgICVxYwDQYJKoZIhvcNAQEFBQAwFjEUMBIGA1UEAwwLczJu\n" "VGVzdFJvb3QwIBcNMTYwMzMwMTg1NzA5WhgPMjExNjAzMDYxODU3MDlaMB4xHDAa\n" "BgNVBAMME3MyblRlc3RJbnRlcm1lZGlhdGUwggEiMA0GCSqGSIb3DQEBAQUAA4IB\n" "DwAwggEKAoIBAQDM/i3eclxYcvedPCEnVe6A/HYsYPeP1qKBZQhbpuuX061jFZKw\n" "lecb0eau1PORLbcsYK40u3xUzoA5u6Q0ebDuqPbqSJkCazsh66cu9STl8ubbk7oI\n" "8LJjUJFhhy2Jmm9krXhPyRscU+CXOCZ2G1GhBqTI8cgMYhEVHwb3qy1EHg6G3n4W\n" "AjV+cKQcbUytq8DRmVe0bNJxDOX8ivzfAp3lUIwub+JfpxrWIUhb3iVGj5CauI98\n" "bNFHTWwYp7tviIIi21Q+L3nExCyE4yTUP/mebBZ62JnbvsWSs3r3//Am5d8G3WdY\n" "BXsERoDoLBvHnqlO/oo4ppGCRI7GkDroACi/AgMBAAGjdzB1MAwGA1UdEwQFMAMB\n" "Af8wHQYDVR0OBBYEFGqUKVWVlL03sHuOggFACdlHckPBMEYGA1UdIwQ/MD2AFE2X\n" "AbNDryMlBpMNI6Ce927uUFwToRqkGDAWMRQwEgYDVQQDDAtzMm5UZXN0Um9vdIIJ\n" "ANDUkH+UYdz1MA0GCSqGSIb3DQEBBQUAA4IBAQA3O3S9VT0EC1yG4xyNNUZ7+CzF\n" "uFA6uiO38ygcN5Nz1oNPy2eQer7vYmrHtqN6gS/o1Ag5F8bLRCqeuZTsOG80O29H\n" "kNhs5xYprdU82AqcaWwEd0kDrhC5rEvs6fj1J0NKmmhbovYxuDboj0a7If7HEqX0\n" "NizyU3M3JONPZgadchZ+F5DosatF1Bpt/gsQRy383IogQ0/FS+juHCCc4VIUemuk\n" "YY1J8o5XdrGWrPBBiudTWqCobe+N541b+YLWbajT5UKzvSqJmcqpPTniJGc9eZxc\n" "z3cCNd3cKa9bK51stEnQSlA7PQXYs3K+TD3EmSn/G2x6Hmfr7lrpbIhEaD+y\n" "-----END CERTIFICATE-----\n" "-----BEGIN CERTIFICATE-----\n" "MIIDATCCAemgAwIBAgIJANDUkH+UYdz1MA0GCSqGSIb3DQEBCwUAMBYxFDASBgNV\n" "BAMMC3MyblRlc3RSb290MCAXDTE2MDMzMDE4NTYzOVoYDzIxMTYwMzA2MTg1NjM5\n" "WjAWMRQwEgYDVQQDDAtzMm5UZXN0Um9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEP\n" "ADCCAQoCggEBAMY5532000oaeed7Jmo3ssx1723ZDLpn3WGz6FxpWM0zsKA/YvdD\n" "7J6qXDvfxU6dZlmsCS+bSNAqpARKmKsBEDPTsdLmrN1V1clOxvKm6GvU1eloRTw6\n" "xukEUXJ+uxrQMLYvSJBiCBVGI+UYNCK5c6guNMRYBCGdk5/iayjmK0Nxz1918Cx9\n" "z4va8HPAgYIz0ogOdYB21O9FQGPdH1mYqRzljcSsZ7EFo1P8HJr8oKK76ZeYi2or\n" "pjzMHGnlufHaul508wQPeFAMa1Tku3HyGZRaieRAck6+QcO2NujXxKNyCBlWON23\n" "FQTuBjN/CAl74MZtcAM2hVSmpm9t4cWVN5MCAwEAAaNQME4wHQYDVR0OBBYEFE2X\n" "AbNDryMlBpMNI6Ce927uUFwTMB8GA1UdIwQYMBaAFE2XAbNDryMlBpMNI6Ce927u\n" "UFwTMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAAXkVvQdXDmozPix\n" "uZi1o9cw4Si0syqfJ4sSunrzPbbmw/Qxhth5V7XGrnsQVNxamgnbzpjGhiBF6isM\n" "ldj33zQYtke+ojOjFlhEvrPo6eW29RkLBEtJadGs2bkMLztJbf+cbH2u6irzr6S4\n" "3OgVOSuB+zG56ksTnEVmum+C/8tSIAyi3eaoStPcgEU8+3/KMrH7uuenmTOCKdD1\n" "FvSDHXT9qPgTttVQGXbXzJEr5tGE+Py6yib5uoJ0dJZNtjs7HOQEDk5J0wZaX0DC\n" "MShYLiN5qLJAk0qwl+js488BJ18M9dg4TxdBYFkwHSzKXSj9TJN77Bb0RZr8LL9T\n" "r9IyvfU=\n" "-----END CERTIFICATE-----"; static const uint8_t s_expected_intermediate_1[] = { 0x30, 0x82, 0x02, 0xad, 0x30, 0x82, 0x01, 0x95, 0x02, 0x02, 0x7d, 0xd5, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x05, 0x05, 0x00, 0x30, 0x1e, 0x31, 0x1c, 0x30, 0x1a, 0x06, 0x03, 0x55, 0x04, 0x03, 0x0c, 0x13, 0x73, 0x32, 0x6e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x30, 0x20, 0x17, 0x0d, 0x31, 0x36, 0x30, 0x33, 0x33, 0x30, 0x31, 0x38, 0x35, 0x37, 0x34, 0x33, 0x5a, 0x18, 0x0f, 0x32, 0x31, 0x31, 0x36, 0x30, 0x33, 0x30, 0x36, 0x31, 0x38, 0x35, 0x37, 0x34, 0x33, 0x5a, 0x30, 0x18, 0x31, 0x16, 0x30, 0x14, 0x06, 0x03, 0x55, 0x04, 0x03, 0x0c, 0x0d, 0x73, 0x32, 0x6e, 0x54, 0x65, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x30, 0x82, 0x01, 0x22, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x01, 0x05, 0x00, 0x03, 0x82, 0x01, 0x0f, 0x00, 0x30, 0x82, 0x01, 0x0a, 0x02, 0x82, 0x01, 0x01, 0x00, 0xd1, 0xc3, 0xa0, 0x2e, 0x61, 0x70, 0x1e, 0x45, 0x3d, 0x18, 0xba, 0x9b, 0x42, 0x7c, 0x98, 0xd1, 0x07, 0xf1, 0x03, 0x27, 0x2c, 0x86, 0x5e, 0x7b, 0xfe, 0xf1, 0x35, 0x76, 0x1f, 0x55, 0x89, 0x5a, 0x13, 0x0b, 0x93, 0xca, 0x8d, 0x0f, 0x6e, 0xeb, 0xef, 0xd0, 0x3a, 0x12, 0x36, 0xd5, 0x98, 0x96, 0x8e, 0x57, 0xda, 0xab, 0xa2, 0x45, 0x6a, 0xa4, 0x39, 0x97, 0x4b, 0xbd, 0x76, 0x67, 0x70, 0x43, 0xa6, 0xb2, 0x7e, 0xd4, 0xb1, 0x4c, 0xca, 0x69, 0xb7, 0x03, 0xe0, 0x05, 0xb8, 0xaf, 0xec, 0x91, 0xe1, 0xb2, 0xd2, 0xc9, 0x22, 0x93, 0xe4, 0xe0, 0x32, 0x61, 0x53, 0xe5, 0x70, 0x55, 0xbe, 0x6b, 0x30, 0xe2, 0xe0, 0x75, 0x4b, 0xdb, 0x94, 0xaa, 0x4b, 0x47, 0x6a, 0xe5, 0xe0, 0x8e, 0x11, 0x4f, 0x3e, 0xc7, 0xef, 0x93, 0x87, 0xb0, 0x2f, 0x82, 0xee, 0x95, 0x51, 0x0d, 0xd9, 0x31, 0x1e, 0x23, 0x53, 0x9d, 0x8f, 0x80, 0x9a, 0x33, 0x10, 0x0e, 0x22, 0xea, 0x30, 0x9b, 0xcc, 0x08, 0xd8, 0xe1, 0xd5, 0xcd, 0x11, 0xe5, 0xad, 0x19, 0xb2, 0x27, 0xd8, 0x58, 0x1a, 0xe1, 0xc7, 0x43, 0x64, 0xda, 0x4c, 0x62, 0x23, 0xaf, 0x75, 0xa9, 0x00, 0x5b, 0x97, 0x8b, 0xda, 0x47, 0x18, 0x63, 0x18, 0x6b, 0x4b, 0x97, 0x01, 0x53, 0xe6, 0x82, 0x37, 0x44, 0x5d, 0x9f, 0x2d, 0xe3, 0xab, 0xbf, 0x64, 0xa4, 0x3d, 0x5b, 0x5b, 0x89, 0x91, 0x91, 0x15, 0x2a, 0x17, 0x22, 0x8d, 0x3e, 0xc6, 0xd9, 0x50, 0x6b, 0xb0, 0x15, 0x2a, 0xd0, 0x20, 0xe2, 0x2f, 0xe4, 0xc3, 0x68, 0x67, 0xed, 0x86, 0x70, 0x73, 0x51, 0xcf, 0x73, 0xb7, 0x87, 0xf1, 0xfd, 0x2f, 0xf7, 0xaa, 0xe9, 0x42, 0xd9, 0x66, 0xca, 0x23, 0x7c, 0x6b, 0x6a, 0xc4, 0x32, 0x8e, 0xbb, 0x07, 0x4c, 0x02, 0x7b, 0xe4, 0xc7, 0x77, 0xac, 0xad, 0x3d, 0x02, 0x03, 0x01, 0x00, 0x01, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x05, 0x05, 0x00, 0x03, 0x82, 0x01, 0x01, 0x00, 0x71, 0xe4, 0x5c, 0xd0, 0x3d, 0x06, 0xd8, 0x00, 0x79, 0xb6, 0x42, 0xdb, 0x3a, 0xe2, 0x5b, 0x88, 0x51, 0x7d, 0xe2, 0x24, 0x24, 0xc3, 0xb0, 0xbd, 0xf2, 0x81, 0x5c, 0x74, 0xee, 0x63, 0x98, 0x82, 0x2c, 0x3d, 0x68, 0xb6, 0x2b, 0x3a, 0x01, 0x99, 0x63, 0x70, 0x22, 0x97, 0x33, 0xa3, 0xbd, 0xe3, 0xcf, 0x5e, 0x1c, 0x10, 0xb8, 0x4f, 0xa9, 0x89, 0xc1, 0x5b, 0x17, 0x84, 0x7f, 0xbf, 0x71, 0xc1, 0x0a, 0xa8, 0xd4, 0x43, 0xd9, 0xb5, 0x1f, 0x42, 0x03, 0xa2, 0x8f, 0x7d, 0x8f, 0xb2, 0xd7, 0x95, 0xd5, 0xe3, 0x87, 0x7c, 0x72, 0x05, 0xa9, 0xd2, 0x66, 0x9c, 0x15, 0x5b, 0xf2, 0xd6, 0x1a, 0x8f, 0x15, 0x2f, 0xcb, 0xb5, 0x10, 0xbd, 0x7a, 0xcf, 0xa9, 0xa1, 0xdb, 0x38, 0x3a, 0xc7, 0x7b, 0xee, 0xdf, 0xee, 0xbd, 0x58, 0x0b, 0x37, 0xab, 0xba, 0x38, 0x8f, 0xe5, 0x24, 0x87, 0xa6, 0x16, 0x37, 0x19, 0x0a, 0xbc, 0xf0, 0xa7, 0xa6, 0x7c, 0x57, 0x33, 0xa7, 0xfe, 0x06, 0x9d, 0x86, 0xac, 0xb0, 0x4e, 0x74, 0x39, 0x87, 0xbb, 0x0d, 0x07, 0x01, 0xe3, 0x3b, 0xe7, 0xc6, 0x60, 0x17, 0xa7, 0x9d, 0x49, 0x20, 0xde, 0x28, 0xa2, 0x4b, 0x54, 0x53, 0xda, 0x0a, 0x20, 0xce, 0x72, 0xd9, 0xc8, 0x04, 0x59, 0xd9, 0x89, 0xcd, 0xea, 0x8f, 0x00, 0xd6, 0x31, 0x7f, 0x46, 0x48, 0x3e, 0x54, 0xe0, 0x11, 0x39, 0x27, 0x2b, 0x0d, 0x43, 0xcd, 0xff, 0x30, 0x94, 0xae, 0x66, 0x15, 0xb6, 0xc6, 0xe0, 0xb3, 0xd0, 0x87, 0x59, 0xa4, 0x0d, 0x57, 0x30, 0x41, 0x5d, 0xe8, 0x80, 0x6e, 0xa4, 0x18, 0xc4, 0x5a, 0xee, 0x77, 0x87, 0xfb, 0xa4, 0x85, 0x90, 0xda, 0x40, 0x2a, 0xe3, 0xc2, 0xa2, 0x37, 0xb8, 0x34, 0x01, 0x3e, 0x59, 0x0e, 0x6f, 0x7b, 0x5c, 0x24, 0xee, 0x72, 0x11, 0x4d, 0x0c, 0x03, 0x00, 0x5e}; static const uint8_t s_expected_intermediate_2[] = { 0x30, 0x82, 0x03, 0x29, 0x30, 0x82, 0x02, 0x11, 0xa0, 0x03, 0x02, 0x01, 0x02, 0x02, 0x02, 0x57, 0x16, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x05, 0x05, 0x00, 0x30, 0x16, 0x31, 0x14, 0x30, 0x12, 0x06, 0x03, 0x55, 0x04, 0x03, 0x0c, 0x0b, 0x73, 0x32, 0x6e, 0x54, 0x65, 0x73, 0x74, 0x52, 0x6f, 0x6f, 0x74, 0x30, 0x20, 0x17, 0x0d, 0x31, 0x36, 0x30, 0x33, 0x33, 0x30, 0x31, 0x38, 0x35, 0x37, 0x30, 0x39, 0x5a, 0x18, 0x0f, 0x32, 0x31, 0x31, 0x36, 0x30, 0x33, 0x30, 0x36, 0x31, 0x38, 0x35, 0x37, 0x30, 0x39, 0x5a, 0x30, 0x1e, 0x31, 0x1c, 0x30, 0x1a, 0x06, 0x03, 0x55, 0x04, 0x03, 0x0c, 0x13, 0x73, 0x32, 0x6e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x30, 0x82, 0x01, 0x22, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x01, 0x05, 0x00, 0x03, 0x82, 0x01, 0x0f, 0x00, 0x30, 0x82, 0x01, 0x0a, 0x02, 0x82, 0x01, 0x01, 0x00, 0xcc, 0xfe, 0x2d, 0xde, 0x72, 0x5c, 0x58, 0x72, 0xf7, 0x9d, 0x3c, 0x21, 0x27, 0x55, 0xee, 0x80, 0xfc, 0x76, 0x2c, 0x60, 0xf7, 0x8f, 0xd6, 0xa2, 0x81, 0x65, 0x08, 0x5b, 0xa6, 0xeb, 0x97, 0xd3, 0xad, 0x63, 0x15, 0x92, 0xb0, 0x95, 0xe7, 0x1b, 0xd1, 0xe6, 0xae, 0xd4, 0xf3, 0x91, 0x2d, 0xb7, 0x2c, 0x60, 0xae, 0x34, 0xbb, 0x7c, 0x54, 0xce, 0x80, 0x39, 0xbb, 0xa4, 0x34, 0x79, 0xb0, 0xee, 0xa8, 0xf6, 0xea, 0x48, 0x99, 0x02, 0x6b, 0x3b, 0x21, 0xeb, 0xa7, 0x2e, 0xf5, 0x24, 0xe5, 0xf2, 0xe6, 0xdb, 0x93, 0xba, 0x08, 0xf0, 0xb2, 0x63, 0x50, 0x91, 0x61, 0x87, 0x2d, 0x89, 0x9a, 0x6f, 0x64, 0xad, 0x78, 0x4f, 0xc9, 0x1b, 0x1c, 0x53, 0xe0, 0x97, 0x38, 0x26, 0x76, 0x1b, 0x51, 0xa1, 0x06, 0xa4, 0xc8, 0xf1, 0xc8, 0x0c, 0x62, 0x11, 0x15, 0x1f, 0x06, 0xf7, 0xab, 0x2d, 0x44, 0x1e, 0x0e, 0x86, 0xde, 0x7e, 0x16, 0x02, 0x35, 0x7e, 0x70, 0xa4, 0x1c, 0x6d, 0x4c, 0xad, 0xab, 0xc0, 0xd1, 0x99, 0x57, 0xb4, 0x6c, 0xd2, 0x71, 0x0c, 0xe5, 0xfc, 0x8a, 0xfc, 0xdf, 0x02, 0x9d, 0xe5, 0x50, 0x8c, 0x2e, 0x6f, 0xe2, 0x5f, 0xa7, 0x1a, 0xd6, 0x21, 0x48, 0x5b, 0xde, 0x25, 0x46, 0x8f, 0x90, 0x9a, 0xb8, 0x8f, 0x7c, 0x6c, 0xd1, 0x47, 0x4d, 0x6c, 0x18, 0xa7, 0xbb, 0x6f, 0x88, 0x82, 0x22, 0xdb, 0x54, 0x3e, 0x2f, 0x79, 0xc4, 0xc4, 0x2c, 0x84, 0xe3, 0x24, 0xd4, 0x3f, 0xf9, 0x9e, 0x6c, 0x16, 0x7a, 0xd8, 0x99, 0xdb, 0xbe, 0xc5, 0x92, 0xb3, 0x7a, 0xf7, 0xff, 0xf0, 0x26, 0xe5, 0xdf, 0x06, 0xdd, 0x67, 0x58, 0x05, 0x7b, 0x04, 0x46, 0x80, 0xe8, 0x2c, 0x1b, 0xc7, 0x9e, 0xa9, 0x4e, 0xfe, 0x8a, 0x38, 0xa6, 0x91, 0x82, 0x44, 0x8e, 0xc6, 0x90, 0x3a, 0xe8, 0x00, 0x28, 0xbf, 0x02, 0x03, 0x01, 0x00, 0x01, 0xa3, 0x77, 0x30, 0x75, 0x30, 0x0c, 0x06, 0x03, 0x55, 0x1d, 0x13, 0x04, 0x05, 0x30, 0x03, 0x01, 0x01, 0xff, 0x30, 0x1d, 0x06, 0x03, 0x55, 0x1d, 0x0e, 0x04, 0x16, 0x04, 0x14, 0x6a, 0x94, 0x29, 0x55, 0x95, 0x94, 0xbd, 0x37, 0xb0, 0x7b, 0x8e, 0x82, 0x01, 0x40, 0x09, 0xd9, 0x47, 0x72, 0x43, 0xc1, 0x30, 0x46, 0x06, 0x03, 0x55, 0x1d, 0x23, 0x04, 0x3f, 0x30, 0x3d, 0x80, 0x14, 0x4d, 0x97, 0x01, 0xb3, 0x43, 0xaf, 0x23, 0x25, 0x06, 0x93, 0x0d, 0x23, 0xa0, 0x9e, 0xf7, 0x6e, 0xee, 0x50, 0x5c, 0x13, 0xa1, 0x1a, 0xa4, 0x18, 0x30, 0x16, 0x31, 0x14, 0x30, 0x12, 0x06, 0x03, 0x55, 0x04, 0x03, 0x0c, 0x0b, 0x73, 0x32, 0x6e, 0x54, 0x65, 0x73, 0x74, 0x52, 0x6f, 0x6f, 0x74, 0x82, 0x09, 0x00, 0xd0, 0xd4, 0x90, 0x7f, 0x94, 0x61, 0xdc, 0xf5, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x05, 0x05, 0x00, 0x03, 0x82, 0x01, 0x01, 0x00, 0x37, 0x3b, 0x74, 0xbd, 0x55, 0x3d, 0x04, 0x0b, 0x5c, 0x86, 0xe3, 0x1c, 0x8d, 0x35, 0x46, 0x7b, 0xf8, 0x2c, 0xc5, 0xb8, 0x50, 0x3a, 0xba, 0x23, 0xb7, 0xf3, 0x28, 0x1c, 0x37, 0x93, 0x73, 0xd6, 0x83, 0x4f, 0xcb, 0x67, 0x90, 0x7a, 0xbe, 0xef, 0x62, 0x6a, 0xc7, 0xb6, 0xa3, 0x7a, 0x81, 0x2f, 0xe8, 0xd4, 0x08, 0x39, 0x17, 0xc6, 0xcb, 0x44, 0x2a, 0x9e, 0xb9, 0x94, 0xec, 0x38, 0x6f, 0x34, 0x3b, 0x6f, 0x47, 0x90, 0xd8, 0x6c, 0xe7, 0x16, 0x29, 0xad, 0xd5, 0x3c, 0xd8, 0x0a, 0x9c, 0x69, 0x6c, 0x04, 0x77, 0x49, 0x03, 0xae, 0x10, 0xb9, 0xac, 0x4b, 0xec, 0xe9, 0xf8, 0xf5, 0x27, 0x43, 0x4a, 0x9a, 0x68, 0x5b, 0xa2, 0xf6, 0x31, 0xb8, 0x36, 0xe8, 0x8f, 0x46, 0xbb, 0x21, 0xfe, 0xc7, 0x12, 0xa5, 0xf4, 0x36, 0x2c, 0xf2, 0x53, 0x73, 0x37, 0x24, 0xe3, 0x4f, 0x66, 0x06, 0x9d, 0x72, 0x16, 0x7e, 0x17, 0x90, 0xe8, 0xb1, 0xab, 0x45, 0xd4, 0x1a, 0x6d, 0xfe, 0x0b, 0x10, 0x47, 0x2d, 0xfc, 0xdc, 0x8a, 0x20, 0x43, 0x4f, 0xc5, 0x4b, 0xe8, 0xee, 0x1c, 0x20, 0x9c, 0xe1, 0x52, 0x14, 0x7a, 0x6b, 0xa4, 0x61, 0x8d, 0x49, 0xf2, 0x8e, 0x57, 0x76, 0xb1, 0x96, 0xac, 0xf0, 0x41, 0x8a, 0xe7, 0x53, 0x5a, 0xa0, 0xa8, 0x6d, 0xef, 0x8d, 0xe7, 0x8d, 0x5b, 0xf9, 0x82, 0xd6, 0x6d, 0xa8, 0xd3, 0xe5, 0x42, 0xb3, 0xbd, 0x2a, 0x89, 0x99, 0xca, 0xa9, 0x3d, 0x39, 0xe2, 0x24, 0x67, 0x3d, 0x79, 0x9c, 0x5c, 0xcf, 0x77, 0x02, 0x35, 0xdd, 0xdc, 0x29, 0xaf, 0x5b, 0x2b, 0x9d, 0x6c, 0xb4, 0x49, 0xd0, 0x4a, 0x50, 0x3b, 0x3d, 0x05, 0xd8, 0xb3, 0x72, 0xbe, 0x4c, 0x3d, 0xc4, 0x99, 0x29, 0xff, 0x1b, 0x6c, 0x7a, 0x1e, 0x67, 0xeb, 0xee, 0x5a, 0xe9, 0x6c, 0x88, 0x44, 0x68, 0x3f, 0xb2}; static const uint8_t s_expected_leaf[] = { 0x30, 0x82, 0x03, 0x01, 0x30, 0x82, 0x01, 0xe9, 0xa0, 0x03, 0x02, 0x01, 0x02, 0x02, 0x09, 0x00, 0xd0, 0xd4, 0x90, 0x7f, 0x94, 0x61, 0xdc, 0xf5, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x0b, 0x05, 0x00, 0x30, 0x16, 0x31, 0x14, 0x30, 0x12, 0x06, 0x03, 0x55, 0x04, 0x03, 0x0c, 0x0b, 0x73, 0x32, 0x6e, 0x54, 0x65, 0x73, 0x74, 0x52, 0x6f, 0x6f, 0x74, 0x30, 0x20, 0x17, 0x0d, 0x31, 0x36, 0x30, 0x33, 0x33, 0x30, 0x31, 0x38, 0x35, 0x36, 0x33, 0x39, 0x5a, 0x18, 0x0f, 0x32, 0x31, 0x31, 0x36, 0x30, 0x33, 0x30, 0x36, 0x31, 0x38, 0x35, 0x36, 0x33, 0x39, 0x5a, 0x30, 0x16, 0x31, 0x14, 0x30, 0x12, 0x06, 0x03, 0x55, 0x04, 0x03, 0x0c, 0x0b, 0x73, 0x32, 0x6e, 0x54, 0x65, 0x73, 0x74, 0x52, 0x6f, 0x6f, 0x74, 0x30, 0x82, 0x01, 0x22, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x01, 0x05, 0x00, 0x03, 0x82, 0x01, 0x0f, 0x00, 0x30, 0x82, 0x01, 0x0a, 0x02, 0x82, 0x01, 0x01, 0x00, 0xc6, 0x39, 0xe7, 0x7d, 0xb4, 0xd3, 0x4a, 0x1a, 0x79, 0xe7, 0x7b, 0x26, 0x6a, 0x37, 0xb2, 0xcc, 0x75, 0xef, 0x6d, 0xd9, 0x0c, 0xba, 0x67, 0xdd, 0x61, 0xb3, 0xe8, 0x5c, 0x69, 0x58, 0xcd, 0x33, 0xb0, 0xa0, 0x3f, 0x62, 0xf7, 0x43, 0xec, 0x9e, 0xaa, 0x5c, 0x3b, 0xdf, 0xc5, 0x4e, 0x9d, 0x66, 0x59, 0xac, 0x09, 0x2f, 0x9b, 0x48, 0xd0, 0x2a, 0xa4, 0x04, 0x4a, 0x98, 0xab, 0x01, 0x10, 0x33, 0xd3, 0xb1, 0xd2, 0xe6, 0xac, 0xdd, 0x55, 0xd5, 0xc9, 0x4e, 0xc6, 0xf2, 0xa6, 0xe8, 0x6b, 0xd4, 0xd5, 0xe9, 0x68, 0x45, 0x3c, 0x3a, 0xc6, 0xe9, 0x04, 0x51, 0x72, 0x7e, 0xbb, 0x1a, 0xd0, 0x30, 0xb6, 0x2f, 0x48, 0x90, 0x62, 0x08, 0x15, 0x46, 0x23, 0xe5, 0x18, 0x34, 0x22, 0xb9, 0x73, 0xa8, 0x2e, 0x34, 0xc4, 0x58, 0x04, 0x21, 0x9d, 0x93, 0x9f, 0xe2, 0x6b, 0x28, 0xe6, 0x2b, 0x43, 0x71, 0xcf, 0x5f, 0x75, 0xf0, 0x2c, 0x7d, 0xcf, 0x8b, 0xda, 0xf0, 0x73, 0xc0, 0x81, 0x82, 0x33, 0xd2, 0x88, 0x0e, 0x75, 0x80, 0x76, 0xd4, 0xef, 0x45, 0x40, 0x63, 0xdd, 0x1f, 0x59, 0x98, 0xa9, 0x1c, 0xe5, 0x8d, 0xc4, 0xac, 0x67, 0xb1, 0x05, 0xa3, 0x53, 0xfc, 0x1c, 0x9a, 0xfc, 0xa0, 0xa2, 0xbb, 0xe9, 0x97, 0x98, 0x8b, 0x6a, 0x2b, 0xa6, 0x3c, 0xcc, 0x1c, 0x69, 0xe5, 0xb9, 0xf1, 0xda, 0xba, 0x5e, 0x74, 0xf3, 0x04, 0x0f, 0x78, 0x50, 0x0c, 0x6b, 0x54, 0xe4, 0xbb, 0x71, 0xf2, 0x19, 0x94, 0x5a, 0x89, 0xe4, 0x40, 0x72, 0x4e, 0xbe, 0x41, 0xc3, 0xb6, 0x36, 0xe8, 0xd7, 0xc4, 0xa3, 0x72, 0x08, 0x19, 0x56, 0x38, 0xdd, 0xb7, 0x15, 0x04, 0xee, 0x06, 0x33, 0x7f, 0x08, 0x09, 0x7b, 0xe0, 0xc6, 0x6d, 0x70, 0x03, 0x36, 0x85, 0x54, 0xa6, 0xa6, 0x6f, 0x6d, 0xe1, 0xc5, 0x95, 0x37, 0x93, 0x02, 0x03, 0x01, 0x00, 0x01, 0xa3, 0x50, 0x30, 0x4e, 0x30, 0x1d, 0x06, 0x03, 0x55, 0x1d, 0x0e, 0x04, 0x16, 0x04, 0x14, 0x4d, 0x97, 0x01, 0xb3, 0x43, 0xaf, 0x23, 0x25, 0x06, 0x93, 0x0d, 0x23, 0xa0, 0x9e, 0xf7, 0x6e, 0xee, 0x50, 0x5c, 0x13, 0x30, 0x1f, 0x06, 0x03, 0x55, 0x1d, 0x23, 0x04, 0x18, 0x30, 0x16, 0x80, 0x14, 0x4d, 0x97, 0x01, 0xb3, 0x43, 0xaf, 0x23, 0x25, 0x06, 0x93, 0x0d, 0x23, 0xa0, 0x9e, 0xf7, 0x6e, 0xee, 0x50, 0x5c, 0x13, 0x30, 0x0c, 0x06, 0x03, 0x55, 0x1d, 0x13, 0x04, 0x05, 0x30, 0x03, 0x01, 0x01, 0xff, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x0b, 0x05, 0x00, 0x03, 0x82, 0x01, 0x01, 0x00, 0x05, 0xe4, 0x56, 0xf4, 0x1d, 0x5c, 0x39, 0xa8, 0xcc, 0xf8, 0xb1, 0xb9, 0x98, 0xb5, 0xa3, 0xd7, 0x30, 0xe1, 0x28, 0xb4, 0xb3, 0x2a, 0x9f, 0x27, 0x8b, 0x12, 0xba, 0x7a, 0xf3, 0x3d, 0xb6, 0xe6, 0xc3, 0xf4, 0x31, 0x86, 0xd8, 0x79, 0x57, 0xb5, 0xc6, 0xae, 0x7b, 0x10, 0x54, 0xdc, 0x5a, 0x9a, 0x09, 0xdb, 0xce, 0x98, 0xc6, 0x86, 0x20, 0x45, 0xea, 0x2b, 0x0c, 0x95, 0xd8, 0xf7, 0xdf, 0x34, 0x18, 0xb6, 0x47, 0xbe, 0xa2, 0x33, 0xa3, 0x16, 0x58, 0x44, 0xbe, 0xb3, 0xe8, 0xe9, 0xe5, 0xb6, 0xf5, 0x19, 0x0b, 0x04, 0x4b, 0x49, 0x69, 0xd1, 0xac, 0xd9, 0xb9, 0x0c, 0x2f, 0x3b, 0x49, 0x6d, 0xff, 0x9c, 0x6c, 0x7d, 0xae, 0xea, 0x2a, 0xf3, 0xaf, 0xa4, 0xb8, 0xdc, 0xe8, 0x15, 0x39, 0x2b, 0x81, 0xfb, 0x31, 0xb9, 0xea, 0x4b, 0x13, 0x9c, 0x45, 0x66, 0xba, 0x6f, 0x82, 0xff, 0xcb, 0x52, 0x20, 0x0c, 0xa2, 0xdd, 0xe6, 0xa8, 0x4a, 0xd3, 0xdc, 0x80, 0x45, 0x3c, 0xfb, 0x7f, 0xca, 0x32, 0xb1, 0xfb, 0xba, 0xe7, 0xa7, 0x99, 0x33, 0x82, 0x29, 0xd0, 0xf5, 0x16, 0xf4, 0x83, 0x1d, 0x74, 0xfd, 0xa8, 0xf8, 0x13, 0xb6, 0xd5, 0x50, 0x19, 0x76, 0xd7, 0xcc, 0x91, 0x2b, 0xe6, 0xd1, 0x84, 0xf8, 0xfc, 0xba, 0xca, 0x26, 0xf9, 0xba, 0x82, 0x74, 0x74, 0x96, 0x4d, 0xb6, 0x3b, 0x3b, 0x1c, 0xe4, 0x04, 0x0e, 0x4e, 0x49, 0xd3, 0x06, 0x5a, 0x5f, 0x40, 0xc2, 0x31, 0x28, 0x58, 0x2e, 0x23, 0x79, 0xa8, 0xb2, 0x40, 0x93, 0x4a, 0xb0, 0x97, 0xe8, 0xec, 0xe3, 0xcf, 0x01, 0x27, 0x5f, 0x0c, 0xf5, 0xd8, 0x38, 0x4f, 0x17, 0x41, 0x60, 0x59, 0x30, 0x1d, 0x2c, 0xca, 0x5d, 0x28, 0xfd, 0x4c, 0x93, 0x7b, 0xec, 0x16, 0xf4, 0x45, 0x9a, 0xfc, 0x2c, 0xbf, 0x53, 0xaf, 0xd2, 0x32, 0xbd, 0xf5, }; struct aws_byte_cursor pem_data = aws_byte_cursor_from_c_str(s_pem_data_str); struct aws_array_list output_list; ASSERT_SUCCESS(aws_pem_objects_init_from_file_contents(&output_list, allocator, pem_data)); ASSERT_UINT_EQUALS(3, aws_array_list_length(&output_list)); struct aws_pem_object *pem_object = NULL; aws_array_list_get_at_ptr(&output_list, (void **)&pem_object, 0); ASSERT_BIN_ARRAYS_EQUALS( s_expected_intermediate_1, sizeof(s_expected_intermediate_1), pem_object->data.buffer, pem_object->data.len); ASSERT_CURSOR_VALUE_CSTRING_EQUALS(aws_byte_cursor_from_string(pem_object->type_string), "CERTIFICATE"); ASSERT_INT_EQUALS(AWS_PEM_TYPE_X509, pem_object->type); aws_array_list_get_at_ptr(&output_list, (void **)&pem_object, 1); ASSERT_BIN_ARRAYS_EQUALS( s_expected_intermediate_2, sizeof(s_expected_intermediate_2), pem_object->data.buffer, pem_object->data.len); ASSERT_CURSOR_VALUE_CSTRING_EQUALS(aws_byte_cursor_from_string(pem_object->type_string), "CERTIFICATE"); ASSERT_INT_EQUALS(AWS_PEM_TYPE_X509, pem_object->type); aws_array_list_get_at_ptr(&output_list, (void **)&pem_object, 2); ASSERT_BIN_ARRAYS_EQUALS(s_expected_leaf, sizeof(s_expected_leaf), pem_object->data.buffer, pem_object->data.len); ASSERT_CURSOR_VALUE_CSTRING_EQUALS(aws_byte_cursor_from_string(pem_object->type_string), "CERTIFICATE"); ASSERT_INT_EQUALS(AWS_PEM_TYPE_X509, pem_object->type); aws_pem_objects_clean_up(&output_list); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_pem_cert_chain_comments_and_whitespace, s_test_pem_cert_chain_comments_and_whitespace) static int s_test_pem_invalid_parse(struct aws_allocator *allocator, void *ctx) { (void)ctx; /* Maintainers note: I removed a '-' from the beginning */ static const char *s_invalid_pem = "----BEGIN CERTIFICATE-----\n" "MIICeDCCAeGgAwIBAgIJAObttnPKQhVlMA0GCSqGSIb3DQEBDgUAMF8xCzAJBgNV\n" "BAYTAlVTMQswCQYDVQQIDAJXQTEQMA4GA1UEBwwHU2VhdHRsZTEPMA0GA1UECgwG\n" "QW1hem9uMQwwCgYDVQQLDANzMm4xEjAQBgNVBAMMCWxvY2FsaG9zdDAgFw0xNzA4\n" "MDEyMjQzMzJaGA8yMTE3MDcwODIyNDMzMlowXzELMAkGA1UEBhMCVVMxCzAJBgNV\n" "BAgMAldBMRAwDgYDVQQHDAdTZWF0dGxlMQ8wDQYDVQQKDAZBbWF6b24xDDAKBgNV\n" "BAsMA3MybjESMBAGA1UEAwwJbG9jYWxob3N0MIGfMA0GCSqGSIb3DQEBAQUAA4GN\n" "ADCBiQKBgQCisRoXXcTh4ejn/sUjGosLlE7GlpLGtvWFEEX6Vl3klVoQdkyabLIH\n" "7bHB2P7uyt9bPzeqvWYjuepDBSQUUeb6Mkqfx237bTy8JhXIfpIhbgksTk7IPzgo\n" "XLPl1oNl7uB9HQaDQ7UPlaKbfp1gNvs6uGOH4vvyhhJGiblNJKnVwwIDAQABozow\n" "ODALBgNVHQ8EBAMCBDAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwFAYDVR0RBA0wC4IJ\n" "MTI3LjAuMC4xMA0GCSqGSIb3DQEBDgUAA4GBACleH44LSYhzHHaV70VbnLbtbv8T\n" "eaUvzstFW6YvdP1XnZKssZNdvMhoiMuMD5n40/iPbv+grtjxacRQCinLk1SEjpsu\n" "3lw90Ds0Ksd/Pdsv7d0cCiJkjadON+ZQEEJ2FP/G19KZFxC3GLk9sxIUXyUW0TXn\n" "YxwtPz26+xvPRWCS\n" "-----END CERTIFICATE-----"; struct aws_byte_cursor pem_data = aws_byte_cursor_from_c_str(s_invalid_pem); struct aws_array_list output_list; ASSERT_ERROR(AWS_ERROR_PEM_MALFORMED, aws_pem_objects_init_from_file_contents(&output_list, allocator, pem_data)); ASSERT_UINT_EQUALS(0, aws_array_list_length(&output_list)); aws_array_list_clean_up(&output_list); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_pem_invalid_parse, s_test_pem_invalid_parse) static int s_test_pem_valid_data_invalid_parse(struct aws_allocator *allocator, void *ctx) { (void)ctx; /* Maintainers note: I added a character (the 'q') to the end to make the base64 invalid */ static const char *s_invalid_data = "-----BEGIN CERTIFICATE-----\n" "MIICeDCCAeGgAwIBAgIJAObttnPKQhVlMA0GCSqGSIb3DQEBDgUAMF8xCzAJBgNV\n" "BAYTAlVTMQswCQYDVQQIDAJXQTEQMA4GA1UEBwwHU2VhdHRsZTEPMA0GA1UECgwG\n" "QW1hem9uMQwwCgYDVQQLDANzMm4xEjAQBgNVBAMMCWxvY2FsaG9zdDAgFw0xNzA4\n" "MDEyMjQzMzJaGA8yMTE3MDcwODIyNDMzMlowXzELMAkGA1UEBhMCVVMxCzAJBgNV\n" "BAgMAldBMRAwDgYDVQQHDAdTZWF0dGxlMQ8wDQYDVQQKDAZBbWF6b24xDDAKBgNV\n" "BAsMA3MybjESMBAGA1UEAwwJbG9jYWxob3N0MIGfMA0GCSqGSIb3DQEBAQUAA4GN\n" "ADCBiQKBgQCisRoXXcTh4ejn/sUjGosLlE7GlpLGtvWFEEX6Vl3klVoQdkyabLIH\n" "7bHB2P7uyt9bPzeqvWYjuepDBSQUUeb6Mkqfx237bTy8JhXIfpIhbgksTk7IPzgo\n" "XLPl1oNl7uB9HQaDQ7UPlaKbfp1gNvs6uGOH4vvyhhJGiblNJKnVwwIDAQABozow\n" "ODALBgNVHQ8EBAMCBDAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwFAYDVR0RBA0wC4IJ\n" "MTI3LjAuMC4xMA0GCSqGSIb3DQEBDgUAA4GBACleH44LSYhzHHaV70VbnLbtbv8T\n" "eaUvzstFW6YvdP1XnZKssZNdvMhoiMuMD5n40/iPbv+grtjxacRQCinLk1SEjpsu\n" "3lw90Ds0Ksd/Pdsv7d0cCiJkjadON+ZQEEJ2FP/G19KZFxC3GLk9sxIUXyUW0TXn\n" "YxwtPz26+xvPRWCSq\n" "-----END CERTIFICATE-----"; struct aws_byte_cursor pem_data = aws_byte_cursor_from_c_str(s_invalid_data); struct aws_array_list output_list; ASSERT_ERROR(AWS_ERROR_PEM_MALFORMED, aws_pem_objects_init_from_file_contents(&output_list, allocator, pem_data)); ASSERT_UINT_EQUALS(0, aws_array_list_length(&output_list)); aws_array_list_clean_up(&output_list); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_pem_valid_data_invalid_parse, s_test_pem_valid_data_invalid_parse) static int s_test_pem_invalid_in_chain_parse(struct aws_allocator *allocator, void *ctx) { (void)ctx; /* Maintainers note: I added a character (the 'f') to the end of the 3rd cert to make the base64 invalid */ static const char *s_invalid_data = "-----BEGIN CERTIFICATE-----\n" "MIICrTCCAZUCAn3VMA0GCSqGSIb3DQEBBQUAMB4xHDAaBgNVBAMME3MyblRlc3RJ\n" "bnRlcm1lZGlhdGUwIBcNMTYwMzMwMTg1NzQzWhgPMjExNjAzMDYxODU3NDNaMBgx\n" "FjAUBgNVBAMMDXMyblRlc3RTZXJ2ZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw\n" "ggEKAoIBAQDRw6AuYXAeRT0YuptCfJjRB/EDJyyGXnv+8TV2H1WJWhMLk8qND27r\n" "79A6EjbVmJaOV9qrokVqpDmXS712Z3BDprJ+1LFMymm3A+AFuK/skeGy0skik+Tg\n" "MmFT5XBVvmsw4uB1S9uUqktHauXgjhFPPsfvk4ewL4LulVEN2TEeI1Odj4CaMxAO\n" "Iuowm8wI2OHVzRHlrRmyJ9hYGuHHQ2TaTGIjr3WpAFuXi9pHGGMYa0uXAVPmgjdE\n" "XZ8t46u/ZKQ9W1uJkZEVKhcijT7G2VBrsBUq0CDiL+TDaGfthnBzUc9zt4fx/S/3\n" "qulC2WbKI3xrasQyjrsHTAJ75Md3rK09AgMBAAEwDQYJKoZIhvcNAQEFBQADggEB\n" "AHHkXNA9BtgAebZC2zriW4hRfeIkJMOwvfKBXHTuY5iCLD1otis6AZljcCKXM6O9\n" "489eHBC4T6mJwVsXhH+/ccEKqNRD2bUfQgOij32PsteV1eOHfHIFqdJmnBVb8tYa\n" "jxUvy7UQvXrPqaHbODrHe+7f7r1YCzerujiP5SSHphY3GQq88KemfFczp/4GnYas\n" "sE50OYe7DQcB4zvnxmAXp51JIN4ooktUU9oKIM5y2cgEWdmJzeqPANYxf0ZIPlTg\n" "ETknKw1Dzf8wlK5mFbbG4LPQh1mkDVcwQV3ogG6kGMRa7neH+6SFkNpAKuPCoje4\n" "NAE+WQ5ve1wk7nIRTQwDAF4=\n" "-----END CERTIFICATE-----\n" "-----BEGIN CERTIFICATE-----\n" "MIIDKTCCAhGgAwIBAgICVxYwDQYJKoZIhvcNAQEFBQAwFjEUMBIGA1UEAwwLczJu\n" "VGVzdFJvb3QwIBcNMTYwMzMwMTg1NzA5WhgPMjExNjAzMDYxODU3MDlaMB4xHDAa\n" "BgNVBAMME3MyblRlc3RJbnRlcm1lZGlhdGUwggEiMA0GCSqGSIb3DQEBAQUAA4IB\n" "DwAwggEKAoIBAQDM/i3eclxYcvedPCEnVe6A/HYsYPeP1qKBZQhbpuuX061jFZKw\n" "lecb0eau1PORLbcsYK40u3xUzoA5u6Q0ebDuqPbqSJkCazsh66cu9STl8ubbk7oI\n" "8LJjUJFhhy2Jmm9krXhPyRscU+CXOCZ2G1GhBqTI8cgMYhEVHwb3qy1EHg6G3n4W\n" "AjV+cKQcbUytq8DRmVe0bNJxDOX8ivzfAp3lUIwub+JfpxrWIUhb3iVGj5CauI98\n" "bNFHTWwYp7tviIIi21Q+L3nExCyE4yTUP/mebBZ62JnbvsWSs3r3//Am5d8G3WdY\n" "BXsERoDoLBvHnqlO/oo4ppGCRI7GkDroACi/AgMBAAGjdzB1MAwGA1UdEwQFMAMB\n" "Af8wHQYDVR0OBBYEFGqUKVWVlL03sHuOggFACdlHckPBMEYGA1UdIwQ/MD2AFE2X\n" "AbNDryMlBpMNI6Ce927uUFwToRqkGDAWMRQwEgYDVQQDDAtzMm5UZXN0Um9vdIIJ\n" "ANDUkH+UYdz1MA0GCSqGSIb3DQEBBQUAA4IBAQA3O3S9VT0EC1yG4xyNNUZ7+CzF\n" "uFA6uiO38ygcN5Nz1oNPy2eQer7vYmrHtqN6gS/o1Ag5F8bLRCqeuZTsOG80O29H\n" "kNhs5xYprdU82AqcaWwEd0kDrhC5rEvs6fj1J0NKmmhbovYxuDboj0a7If7HEqX0\n" "NizyU3M3JONPZgadchZ+F5DosatF1Bpt/gsQRy383IogQ0/FS+juHCCc4VIUemuk\n" "YY1J8o5XdrGWrPBBiudTWqCobe+N541b+YLWbajT5UKzvSqJmcqpPTniJGc9eZxc\n" "z3cCNd3cKa9bK51stEnQSlA7PQXYs3K+TD3EmSn/G2x6Hmfr7lrpbIhEaD+y\n" "-----END CERTIFICATE-----\n" "-----BEGIN CERTIFICATE-----\n" "MIIDATCCAemgAwIBAgIJANDUkH+UYdz1MA0GCSqGSIb3DQEBCwUAMBYxFDASBgNV\n" "BAMMC3MyblRlc3RSb290MCAXDTE2MDMzMDE4NTYzOVoYDzIxMTYwMzA2MTg1NjM5\n" "WjAWMRQwEgYDVQQDDAtzMm5UZXN0Um9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEP\n" "ADCCAQoCggEBAMY5532000oaeed7Jmo3ssx1723ZDLpn3WGz6FxpWM0zsKA/YvdD\n" "7J6qXDvfxU6dZlmsCS+bSNAqpARKmKsBEDPTsdLmrN1V1clOxvKm6GvU1eloRTw6\n" "xukEUXJ+uxrQMLYvSJBiCBVGI+UYNCK5c6guNMRYBCGdk5/iayjmK0Nxz1918Cx9\n" "z4va8HPAgYIz0ogOdYB21O9FQGPdH1mYqRzljcSsZ7EFo1P8HJr8oKK76ZeYi2or\n" "pjzMHGnlufHaul508wQPeFAMa1Tku3HyGZRaieRAck6+QcO2NujXxKNyCBlWON23\n" "FQTuBjN/CAl74MZtcAM2hVSmpm9t4cWVN5MCAwEAAaNQME4wHQYDVR0OBBYEFE2X\n" "AbNDryMlBpMNI6Ce927uUFwTMB8GA1UdIwQYMBaAFE2XAbNDryMlBpMNI6Ce927u\n" "UFwTMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAAXkVvQdXDmozPix\n" "uZi1o9cw4Si0syqfJ4sSunrzPbbmw/Qxhth5V7XGrnsQVNxamgnbzpjGhiBF6isM\n" "ldj33zQYtke+ojOjFlhEvrPo6eW29RkLBEtJadGs2bkMLztJbf+cbH2u6irzr6S4\n" "3OgVOSuB+zG56ksTnEVmum+C/8tSIAyi3eaoStPcgEU8+3/KMrH7uuenmTOCKdD1\n" "FvSDHXT9qPgTttVQGXbXzJEr5tGE+Py6yib5uoJ0dJZNtjs7HOQEDk5J0wZaX0DC\n" "MShYLiN5qLJAk0qwl+js488BJ18M9dg4TxdBYFkwHSzKXSj9TJN77Bb0RZr8LL9T\n" "r9IyvfUf=\n" "-----END CERTIFICATE-----"; struct aws_byte_cursor pem_data = aws_byte_cursor_from_c_str(s_invalid_data); struct aws_array_list output_list; ASSERT_ERROR(AWS_ERROR_PEM_MALFORMED, aws_pem_objects_init_from_file_contents(&output_list, allocator, pem_data)); ASSERT_UINT_EQUALS(0, aws_array_list_length(&output_list)); aws_array_list_clean_up(&output_list); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_pem_invalid_in_chain_parse, s_test_pem_invalid_in_chain_parse) aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/pipe_test.c000066400000000000000000000743231456575232400231520ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include enum pipe_loop_setup { SAME_EVENT_LOOP, DIFFERENT_EVENT_LOOPS, }; enum { SMALL_BUFFER_SIZE = 4, GIANT_BUFFER_SIZE = 1024 * 1024 * 32, /* 32MB */ }; /* Used for tracking state in the pipe tests. */ struct pipe_state { /* Begin setup parameters */ enum pipe_loop_setup loop_setup; size_t buffer_size; /* End setup parameters */ struct aws_allocator *alloc; struct aws_pipe_read_end read_end; struct aws_pipe_write_end write_end; struct aws_event_loop *read_loop; struct aws_event_loop *write_loop; /* Since most pipe operations must be performed on the event-loop thread, * the `results` struct is used to signal the main thread that the tests are finished. */ struct { struct aws_mutex mutex; struct aws_condition_variable condvar; bool read_end_closed; bool write_end_closed; int status_code; /* Set to non-zero if something goes wrong on the thread. */ } results; struct { struct aws_byte_buf src; struct aws_byte_buf dst; size_t num_bytes_written; } buffers; struct { int error_code_to_monitor; /* By default, monitors AWS_ERROR_SUCCESS aka normal readable events */ int count; /* count of events that we're monitoring */ int close_read_end_after_n_events; /* if set, close read-end when count reaches N */ } readable_events; void *test_data; /* If a test needs special data */ }; static int s_fixture_before(struct aws_allocator *allocator, void *ctx) { struct pipe_state *state = ctx; state->alloc = allocator; state->read_loop = aws_event_loop_new_default(allocator, aws_high_res_clock_get_ticks); ASSERT_NOT_NULL(state->read_loop); ASSERT_SUCCESS(aws_event_loop_run(state->read_loop)); if (state->loop_setup == DIFFERENT_EVENT_LOOPS) { state->write_loop = aws_event_loop_new_default(allocator, aws_high_res_clock_get_ticks); ASSERT_NOT_NULL(state->write_loop); ASSERT_SUCCESS(aws_event_loop_run(state->write_loop)); } else { state->write_loop = state->read_loop; } ASSERT_SUCCESS(aws_pipe_init(&state->read_end, state->read_loop, &state->write_end, state->write_loop, allocator)); ASSERT_SUCCESS(aws_mutex_init(&state->results.mutex)); ASSERT_SUCCESS(aws_condition_variable_init(&state->results.condvar)); if (state->buffer_size > 0) { /* Create full src buffer, containing random content */ ASSERT_SUCCESS(aws_byte_buf_init(&state->buffers.src, allocator, state->buffer_size)); state->buffers.src.len = state->buffer_size; for (size_t i = 0; i < state->buffer_size; ++i) { state->buffers.src.buffer[i] = (uint8_t)(rand() % 256); } /* Create empty dst buffer, with zeroed out content */ ASSERT_SUCCESS(aws_byte_buf_init(&state->buffers.dst, allocator, state->buffer_size)); memset(state->buffers.dst.buffer, 0, state->buffers.dst.capacity); } return AWS_OP_SUCCESS; } /* Assumes the pipe's read-end and write-end are already cleaned up */ static int s_fixture_after(struct aws_allocator *allocator, int setup_res, void *ctx) { (void)allocator; (void)setup_res; struct pipe_state *state = ctx; aws_condition_variable_clean_up(&state->results.condvar); aws_mutex_clean_up(&state->results.mutex); if (state->read_loop) { aws_event_loop_destroy(state->read_loop); } if (state->write_loop != state->read_loop) { aws_event_loop_destroy(state->write_loop); } aws_byte_buf_clean_up(&state->buffers.src); aws_byte_buf_clean_up(&state->buffers.dst); AWS_ZERO_STRUCT(*state); return AWS_OP_SUCCESS; } /* Macro for declaring pipe tests. * Add pipe tests to CMakeLists.txt like so: add_pipe_test_case(NAME) * * Each pipe test is run in 2 different configurations: * 1) both ends of the pipe use the same event-loop * 2) each end of the pipe is on its own event-loop * * For each test with NAME, write a function with the following signature: * int test_NAME(struct pipe_state *state) {...} */ #define PIPE_TEST_CASE(NAME, BUFFER_SIZE) \ static struct pipe_state NAME##_pipe_state_same_loop = { \ .loop_setup = SAME_EVENT_LOOP, \ .buffer_size = (BUFFER_SIZE), \ }; \ static int test_##NAME##_same_loop(struct aws_allocator *allocator, void *ctx) { \ (void)allocator; \ struct pipe_state *state = ctx; \ return test_##NAME(state); \ } \ AWS_TEST_CASE_FIXTURE( \ NAME, s_fixture_before, test_##NAME##_same_loop, s_fixture_after, &NAME##_pipe_state_same_loop) \ \ static struct pipe_state NAME##_pipe_state_different_loops = { \ .loop_setup = DIFFERENT_EVENT_LOOPS, \ .buffer_size = (BUFFER_SIZE), \ }; \ static int test_##NAME##_different_loops(struct aws_allocator *allocator, void *ctx) { \ (void)allocator; \ struct pipe_state *state = ctx; \ return test_##NAME(state); \ } \ AWS_TEST_CASE_FIXTURE( \ NAME##_2loops, \ s_fixture_before, \ test_##NAME##_different_loops, \ s_fixture_after, \ &NAME##_pipe_state_different_loops) /* Checking if work on thread is done */ static bool s_done_pred(void *user_data) { struct pipe_state *state = user_data; if (state->results.status_code != 0) { return true; } if (state->results.read_end_closed && state->results.write_end_closed) { return true; } return false; } /* Signal that work is done, due to an unexpected error */ static void s_signal_error(struct pipe_state *state) { aws_mutex_lock(&state->results.mutex); state->results.status_code = -1; aws_condition_variable_notify_all(&state->results.condvar); aws_mutex_unlock(&state->results.mutex); } static void s_signal_done_on_read_end_closed(struct pipe_state *state) { /* Signal that work might be done */ aws_mutex_lock(&state->results.mutex); state->results.read_end_closed = true; aws_condition_variable_notify_all(&state->results.condvar); aws_mutex_unlock(&state->results.mutex); } static void s_signal_done_on_write_end_closed(struct pipe_state *state) { /* Signal that work might be done */ aws_mutex_lock(&state->results.mutex); state->results.write_end_closed = true; aws_condition_variable_notify_all(&state->results.condvar); aws_mutex_unlock(&state->results.mutex); } static int s_pipe_state_check_copied_data(struct pipe_state *state) { ASSERT_UINT_EQUALS(state->buffer_size, state->buffers.num_bytes_written); ASSERT_TRUE(aws_byte_buf_eq(&state->buffers.src, &state->buffers.dst)); return AWS_OP_SUCCESS; } /* Use as "simplified" task functions in pipe_state tasks. * The boilerplate of task scheduling and error-checking are handled by wrapper functions */ typedef void(pipe_state_task_fn)(struct pipe_state *state); struct pipe_state_task_wrapper { struct aws_task task; struct pipe_state *state; pipe_state_task_fn *wrapped_fn; }; static void s_pipe_state_task_wrapper_fn(struct aws_task *task, void *arg, enum aws_task_status status) { (void)task; struct pipe_state_task_wrapper *wrapper = arg; struct pipe_state *state = wrapper->state; pipe_state_task_fn *wrapped_fn = wrapper->wrapped_fn; aws_mem_release(state->alloc, wrapper); if (status == AWS_TASK_STATUS_RUN_READY) { wrapped_fn(state); } else { s_signal_error(state); } } /* Schedules a pipe_state_task_fn */ static void s_schedule_task( struct pipe_state *state, struct aws_event_loop *loop, pipe_state_task_fn *fn, int delay_secs) { struct pipe_state_task_wrapper *wrapper = aws_mem_acquire(state->alloc, sizeof(struct pipe_state_task_wrapper)); if (!wrapper) { goto error; } aws_task_init(&wrapper->task, s_pipe_state_task_wrapper_fn, wrapper, "pipe_state"); wrapper->wrapped_fn = fn; wrapper->state = state; if (delay_secs == 0) { aws_event_loop_schedule_task_now(loop, &wrapper->task); } else { uint64_t run_at_ns; int err = aws_event_loop_current_clock_time(loop, &run_at_ns); if (err) { goto error; } run_at_ns += aws_timestamp_convert((uint64_t)delay_secs, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL); aws_event_loop_schedule_task_future(loop, &wrapper->task, run_at_ns); } return; error: s_signal_error(state); } static void s_schedule_read_end_task(struct pipe_state *state, pipe_state_task_fn *fn) { s_schedule_task(state, state->read_loop, fn, 0); } static void s_schedule_write_end_task(struct pipe_state *state, pipe_state_task_fn *fn) { s_schedule_task(state, state->write_loop, fn, 0); } /* wait for pipe_state to indicate that it's done */ static int s_wait_for_results(struct pipe_state *state) { ASSERT_SUCCESS(aws_mutex_lock(&state->results.mutex)); ASSERT_SUCCESS( aws_condition_variable_wait_pred(&state->results.condvar, &state->results.mutex, s_done_pred, state)); ASSERT_SUCCESS(aws_mutex_unlock(&state->results.mutex)); return state->results.status_code; } static void s_clean_up_read_end_task(struct pipe_state *state) { int err = aws_pipe_clean_up_read_end(&state->read_end); if (err) { goto error; } s_signal_done_on_read_end_closed(state); return; error: s_signal_error(state); } static void s_clean_up_write_end_task(struct pipe_state *state) { int err = aws_pipe_clean_up_write_end(&state->write_end); if (err) { goto error; } s_signal_done_on_write_end_closed(state); return; error: s_signal_error(state); } /* Just test the pipe being opened and closed */ static int test_pipe_open_close(struct pipe_state *state) { s_schedule_read_end_task(state, s_clean_up_read_end_task); s_schedule_write_end_task(state, s_clean_up_write_end_task); ASSERT_SUCCESS(s_wait_for_results(state)); return AWS_OP_SUCCESS; } PIPE_TEST_CASE(pipe_open_close, SMALL_BUFFER_SIZE); void s_clean_up_write_end_on_write_completed( struct aws_pipe_write_end *write_end, int error_code, struct aws_byte_cursor src_buffer, void *user_data) { struct pipe_state *state = user_data; if (!error_code) { state->buffers.num_bytes_written += src_buffer.len; } int err = aws_pipe_clean_up_write_end(write_end); if (err) { goto error; } s_signal_done_on_write_end_closed(state); return; error: s_signal_error(state); } /* Write everything in the buffer, clean up write-end when write completes*/ static void s_write_once_task(struct pipe_state *state) { struct aws_byte_cursor cursor = aws_byte_cursor_from_buf(&state->buffers.src); int err = aws_pipe_write(&state->write_end, cursor, s_clean_up_write_end_on_write_completed, state); if (err) { goto error; } return; error: s_signal_error(state); } /* Task tries to read as much data as possible. * Task repeatedly reschedules itself until read-buffer is full, then it cleans up the read-end */ static void s_read_everything_task(struct pipe_state *state) { int err = aws_pipe_read(&state->read_end, &state->buffers.dst, NULL); /* AWS_IO_READ_WOULD_BLOCK is an acceptable error, it just means the data's not ready yet */ if (err && (aws_last_error() != AWS_IO_READ_WOULD_BLOCK)) { goto error; } if (state->buffers.dst.len < state->buffers.dst.capacity) { s_schedule_read_end_task(state, s_read_everything_task); } else { err = aws_pipe_clean_up_read_end(&state->read_end); if (err) { goto error; } s_signal_done_on_read_end_closed(state); } return; error: s_signal_error(state); } /* common function used by small-buffer test and large-buffer test */ static int s_test_pipe_read_write(struct pipe_state *state) { s_schedule_read_end_task(state, s_read_everything_task); s_schedule_write_end_task(state, s_write_once_task); ASSERT_SUCCESS(s_wait_for_results(state)); ASSERT_SUCCESS(s_pipe_state_check_copied_data(state)); return AWS_OP_SUCCESS; } /* Test that a small buffer can be sent through the pipe */ static int test_pipe_read_write(struct pipe_state *state) { return s_test_pipe_read_write(state); } PIPE_TEST_CASE(pipe_read_write, SMALL_BUFFER_SIZE); /* Test that a large buffer can be sent through the pipe. */ static int test_pipe_read_write_large_buffer(struct pipe_state *state) { return s_test_pipe_read_write(state); } PIPE_TEST_CASE(pipe_read_write_large_buffer, GIANT_BUFFER_SIZE); static void s_on_readable_event(struct aws_pipe_read_end *read_end, int error_code, void *user_data) { struct pipe_state *state = user_data; if (error_code == state->readable_events.error_code_to_monitor) { state->readable_events.count++; if (state->readable_events.count == state->readable_events.close_read_end_after_n_events) { int err = aws_pipe_clean_up_read_end(read_end); if (err) { goto error; } s_signal_done_on_read_end_closed(state); } } return; error: s_signal_error(state); } static void s_subscribe_task(struct pipe_state *state) { int err = aws_pipe_subscribe_to_readable_events(&state->read_end, s_on_readable_event, state); if (err) { goto error; } return; error: s_signal_error(state); } static int test_pipe_readable_event_sent_after_write(struct pipe_state *state) { state->readable_events.error_code_to_monitor = AWS_ERROR_SUCCESS; state->readable_events.close_read_end_after_n_events = 1; s_schedule_read_end_task(state, s_subscribe_task); s_schedule_write_end_task(state, s_write_once_task); ASSERT_SUCCESS(s_wait_for_results(state)); ASSERT_INT_EQUALS(1, state->readable_events.count); return AWS_OP_SUCCESS; } PIPE_TEST_CASE(pipe_readable_event_sent_after_write, SMALL_BUFFER_SIZE); static void s_sentonce_on_readable_event(struct aws_pipe_read_end *read_end, int events, void *user_data) { struct pipe_state *state = user_data; int prev_events_count = state->readable_events.count; /* invoke usual readable callback so the events are logged */ s_on_readable_event(read_end, events, user_data); if (state->results.status_code) { /* bail out if anything went wrong */ return; } /* when the 1st readable event comes in, schedule task to close read-end after waiting a bit. * this lets us observe any further events that might come in */ if ((state->readable_events.count == 1) && (prev_events_count == 0)) { s_schedule_task(state, state->read_loop, s_clean_up_read_end_task, 1 /*delay*/); } } static void s_sentonce_subscribe_task(struct pipe_state *state) { int err = aws_pipe_subscribe_to_readable_events(&state->read_end, s_sentonce_on_readable_event, state); if (err) { goto error; } return; error: s_signal_error(state); } /* Check that readable event is only sent once after a write. * Short name for test is: sentonce */ static int test_pipe_readable_event_sent_once(struct pipe_state *state) { state->readable_events.error_code_to_monitor = AWS_ERROR_SUCCESS; s_schedule_read_end_task(state, s_sentonce_subscribe_task); s_schedule_write_end_task(state, s_write_once_task); ASSERT_SUCCESS(s_wait_for_results(state)); /* Accept 1 or 2 events. Epoll notifies about "readable" when sending "write end closed" event. * That's fine, we just don't want dozens of readable events to have come in. */ ASSERT_TRUE(state->readable_events.count <= 2); return AWS_OP_SUCCESS; } PIPE_TEST_CASE(pipe_readable_event_sent_once, SMALL_BUFFER_SIZE); void s_subscribe_on_write_completed( struct aws_pipe_write_end *write_end, int error_code, struct aws_byte_cursor src_buffer, void *user_data) { struct pipe_state *state = user_data; if (!error_code) { state->buffers.num_bytes_written += src_buffer.len; } int err = aws_pipe_clean_up_write_end(write_end); if (err) { goto error; } s_signal_done_on_write_end_closed(state); /* Tell read end to subscribe */ s_schedule_read_end_task(state, s_subscribe_task); return; error: s_signal_error(state); } /* Write all data. When write completes, write-end cleans up and tells the read-end to subscribe */ static void s_write_once_then_subscribe_task(struct pipe_state *state) { struct aws_byte_cursor cursor = aws_byte_cursor_from_buf(&state->buffers.src); int err = aws_pipe_write(&state->write_end, cursor, s_subscribe_on_write_completed, state); if (err) { goto error; } return; error: s_signal_error(state); } static int test_pipe_readable_event_sent_on_subscribe_if_data_present(struct pipe_state *state) { state->readable_events.error_code_to_monitor = AWS_ERROR_SUCCESS; state->readable_events.close_read_end_after_n_events = 1; s_schedule_write_end_task(state, s_write_once_then_subscribe_task); ASSERT_SUCCESS(s_wait_for_results(state)); ASSERT_INT_EQUALS(1, state->readable_events.count); return AWS_OP_SUCCESS; } PIPE_TEST_CASE(pipe_readable_event_sent_on_subscribe_if_data_present, SMALL_BUFFER_SIZE); static void s_resubscribe_on_readable_event(struct aws_pipe_read_end *read_end, int events, void *user_data) { struct pipe_state *state = user_data; int err = 0; int prev_events_count = state->readable_events.count; /* invoke usual readable callback so the events are logged */ s_on_readable_event(read_end, events, user_data); if (state->results.status_code) { /* bail out if anything went wrong */ return; } if ((state->readable_events.count == 1) && (prev_events_count == 0)) { /* unsubscribe and resubscribe */ err = aws_pipe_unsubscribe_from_readable_events(&state->read_end); if (err) { goto error; } err = aws_pipe_subscribe_to_readable_events(&state->read_end, s_on_readable_event, state); if (err) { goto error; } } return; error: s_signal_error(state); } static void s_resubscribe_1_task(struct pipe_state *state) { int err = aws_pipe_subscribe_to_readable_events(&state->read_end, s_resubscribe_on_readable_event, state); if (err) { goto error; } return; error: s_signal_error(state); } static void s_resubscribe_write_task(struct pipe_state *state) { struct aws_byte_cursor cursor = aws_byte_cursor_from_buf(&state->buffers.src); int err = aws_pipe_write(&state->write_end, cursor, s_clean_up_write_end_on_write_completed, state); if (err) { goto error; } /* schedule task for read-end to perform 1st subscribe */ s_schedule_read_end_task(state, s_resubscribe_1_task); return; error: s_signal_error(state); } static int test_pipe_readable_event_sent_on_resubscribe_if_data_present(struct pipe_state *state) { state->readable_events.error_code_to_monitor = AWS_ERROR_SUCCESS; state->readable_events.close_read_end_after_n_events = 2; s_schedule_write_end_task(state, s_resubscribe_write_task); ASSERT_SUCCESS(s_wait_for_results(state)); ASSERT_INT_EQUALS(2, state->readable_events.count); return AWS_OP_SUCCESS; } PIPE_TEST_CASE(pipe_readable_event_sent_on_resubscribe_if_data_present, SMALL_BUFFER_SIZE); static void s_readall_on_write_completed( struct aws_pipe_write_end *write_end, int error_code, struct aws_byte_cursor src_buffer, void *user_data) { struct pipe_state *state = user_data; int err = 0; if (error_code) { goto error; } bool is_2nd_write = (state->buffers.num_bytes_written > 0); state->buffers.num_bytes_written += src_buffer.len; /* Clean up after 2nd write */ if (is_2nd_write) { err = aws_pipe_clean_up_write_end(write_end); if (err) { goto error; } s_signal_done_on_write_end_closed(state); } return; error: s_signal_error(state); } static void s_readall_write_task(struct pipe_state *state) { struct aws_byte_cursor cursor = aws_byte_cursor_from_buf(&state->buffers.src); int err = aws_pipe_write(&state->write_end, cursor, s_readall_on_write_completed, state); if (err) { goto error; } return; error: s_signal_error(state); } static void s_readall_on_readable(struct aws_pipe_read_end *read_end, int events, void *user_data) { struct pipe_state *state = user_data; int err = 0; int prev_event_count = state->readable_events.count; /* invoke usual readable callback so the events are logged */ s_on_readable_event(read_end, events, user_data); if (state->results.status_code) { /* bail out if anything went wrong */ return; } if ((state->readable_events.count == 1) && (prev_event_count == 0)) { size_t total_bytes_read = 0; /* After the first write, read data until we're told that further reads would block. * This ensures that the next write is sure to trigger a readable event */ while (true) { state->buffers.dst.len = 0; err = aws_pipe_read(read_end, &state->buffers.dst, NULL); total_bytes_read += state->buffers.dst.len; if (err) { if (aws_last_error() == AWS_IO_READ_WOULD_BLOCK) { break; } goto error; } } /* Sanity check that we did in fact read something */ if (total_bytes_read == 0) { goto error; } /* Schedule the 2nd write */ s_schedule_write_end_task(state, s_readall_write_task); } return; error: s_signal_error(state); } static void s_readall_subscribe_task(struct pipe_state *state) { int err = aws_pipe_subscribe_to_readable_events(&state->read_end, s_readall_on_readable, state); if (err) { goto error; } return; error: s_signal_error(state); } /* Check that the 2nd readable event is sent again in the case of: subscribe, write 1, read all, write 2 * Short name for test is: readall */ static int test_pipe_readable_event_sent_again_after_all_data_read(struct pipe_state *state) { state->readable_events.error_code_to_monitor = AWS_ERROR_SUCCESS; state->readable_events.close_read_end_after_n_events = 2; s_schedule_read_end_task(state, s_readall_subscribe_task); s_schedule_write_end_task(state, s_readall_write_task); ASSERT_SUCCESS(s_wait_for_results(state)); ASSERT_INT_EQUALS(2, state->readable_events.count); return AWS_OP_SUCCESS; } PIPE_TEST_CASE(pipe_readable_event_sent_again_after_all_data_read, SMALL_BUFFER_SIZE); static void s_subscribe_and_schedule_write_end_clean_up_task(struct pipe_state *state) { int err = aws_pipe_subscribe_to_readable_events(&state->read_end, s_on_readable_event, state); if (err) { goto error; } /* schedule write end to clean up */ s_schedule_write_end_task(state, s_clean_up_write_end_task); return; error: s_signal_error(state); } static int test_pipe_error_event_sent_after_write_end_closed(struct pipe_state *state) { state->readable_events.error_code_to_monitor = AWS_IO_BROKEN_PIPE; state->readable_events.close_read_end_after_n_events = 1; s_schedule_read_end_task(state, s_subscribe_and_schedule_write_end_clean_up_task); ASSERT_SUCCESS(s_wait_for_results(state)); ASSERT_INT_EQUALS(1, state->readable_events.count); return AWS_OP_SUCCESS; } PIPE_TEST_CASE(pipe_error_event_sent_after_write_end_closed, SMALL_BUFFER_SIZE); static void s_clean_up_write_end_then_schedule_subscribe_task(struct pipe_state *state) { int err = aws_pipe_clean_up_write_end(&state->write_end); if (err) { goto error; } s_signal_done_on_write_end_closed(state); s_schedule_read_end_task(state, s_subscribe_task); return; error: s_signal_error(state); } static int test_pipe_error_event_sent_on_subscribe_if_write_end_already_closed(struct pipe_state *state) { state->readable_events.error_code_to_monitor = AWS_IO_BROKEN_PIPE; state->readable_events.close_read_end_after_n_events = 1; s_schedule_write_end_task(state, s_clean_up_write_end_then_schedule_subscribe_task); ASSERT_SUCCESS(s_wait_for_results(state)); ASSERT_INT_EQUALS(1, state->readable_events.count); return AWS_OP_SUCCESS; } PIPE_TEST_CASE(pipe_error_event_sent_on_subscribe_if_write_end_already_closed, SMALL_BUFFER_SIZE); static void s_close_write_end_after_all_writes_completed( struct aws_pipe_write_end *write_end, int error_code, struct aws_byte_cursor src_buffer, void *user_data) { struct pipe_state *state = user_data; if (error_code) { goto error; } state->buffers.num_bytes_written += src_buffer.len; if (state->buffers.num_bytes_written == state->buffer_size) { int err = aws_pipe_clean_up_write_end(write_end); if (err) { goto error; } s_signal_done_on_write_end_closed(state); } return; error: s_signal_error(state); } static void s_write_in_simultaneous_chunks_task(struct pipe_state *state) { /* Write the whole buffer via several successive writes */ struct aws_byte_cursor cursor = aws_byte_cursor_from_buf(&state->buffers.src); const size_t chunk_size = cursor.len / 8; while (cursor.len > 0) { size_t bytes_to_write = (chunk_size < cursor.len) ? chunk_size : cursor.len; struct aws_byte_cursor chunk_cursor = aws_byte_cursor_from_array(cursor.ptr, bytes_to_write); int err = aws_pipe_write(&state->write_end, chunk_cursor, s_close_write_end_after_all_writes_completed, state); if (err) { goto error; } aws_byte_cursor_advance(&cursor, bytes_to_write); } return; error: s_signal_error(state); } static int test_pipe_writes_are_fifo(struct pipe_state *state) { s_schedule_read_end_task(state, s_read_everything_task); s_schedule_write_end_task(state, s_write_in_simultaneous_chunks_task); ASSERT_SUCCESS(s_wait_for_results(state)); ASSERT_SUCCESS(s_pipe_state_check_copied_data(state)); return AWS_OP_SUCCESS; } PIPE_TEST_CASE(pipe_writes_are_fifo, GIANT_BUFFER_SIZE); static void s_cancelled_on_write_completed( struct aws_pipe_write_end *write_end, int error_code, struct aws_byte_cursor src_buffer, void *user_data) { (void)write_end; struct pipe_state *state = user_data; int *write_status_code = state->test_data; *write_status_code = error_code; if (!error_code) { state->buffers.num_bytes_written += src_buffer.len; } s_schedule_read_end_task(state, s_clean_up_read_end_task); } static void s_write_then_clean_up_task(struct pipe_state *state) { struct aws_byte_cursor cursor = aws_byte_cursor_from_buf(&state->buffers.src); int err = aws_pipe_write(&state->write_end, cursor, s_cancelled_on_write_completed, state); if (err) { goto error; } err = aws_pipe_clean_up_write_end(&state->write_end); if (err) { goto error; } s_signal_done_on_write_end_closed(state); return; error: s_signal_error(state); } /* Perform an enormous write that can't possibly complete without a bit of reading. * After kicking off the write operation, close the write-end. * The write operation chould complete with an error status */ static int test_pipe_clean_up_cancels_pending_writes(struct pipe_state *state) { /* capture the status code from the on-write-complete callback */ int write_status_code = 0; state->test_data = &write_status_code; s_schedule_write_end_task(state, s_write_then_clean_up_task); ASSERT_SUCCESS(s_wait_for_results(state)); ASSERT_INT_EQUALS(AWS_IO_BROKEN_PIPE, write_status_code); ASSERT_TRUE(state->buffers.num_bytes_written < state->buffer_size); return AWS_OP_SUCCESS; } PIPE_TEST_CASE(pipe_clean_up_cancels_pending_writes, GIANT_BUFFER_SIZE); aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/pkcs11_test.c000066400000000000000000002271701456575232400233170ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ /** * See PKCS11.md for instructions on running these tests */ #include #include "../source/pkcs11_private.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef _MSC_VER # pragma warning(disable : 4996) /* allow strncpy() */ #endif AWS_STATIC_STRING_FROM_LITERAL(TEST_PKCS11_LIB, "TEST_PKCS11_LIB"); AWS_STATIC_STRING_FROM_LITERAL(TEST_PKCS11_TOKEN_DIR, "TEST_PKCS11_TOKEN_DIR"); /* Singleton that stores env-var values */ struct pkcs11_tester { struct aws_allocator *allocator; struct aws_string *shared_lib_path; struct aws_string *token_dir; struct aws_pkcs11_lib *lib; }; static struct pkcs11_tester s_pkcs11_tester; const char *TOKEN_LABEL = "my-token"; const char *TOKEN_LABEL_RSA = "my-rsa-token"; const char *TOKEN_LABEL_EC = "my-ec-token"; const char *SO_PIN = "1111"; const char *USER_PIN = "0000"; const char *DEFAULT_KEY_LABEL = "my-key"; const char *DEFAULT_KEY_ID = "AABBCCDD"; #define TIMEOUT_SEC 10 #define TIMEOUT_MILLIS (AWS_TIMESTAMP_MILLIS * TIMEOUT_SEC) #define TIMEOUT_NANOS ((uint64_t)AWS_TIMESTAMP_NANOS * TIMEOUT_SEC) struct pkcs11_key_creation_params { const char *key_label; const char *key_id; const CK_ULONG key_length; }; /* Wipe out all existing tokens by deleting and recreating the SoftHSM token dir */ static int s_pkcs11_clear_softhsm(void) { ASSERT_SUCCESS(aws_directory_delete(s_pkcs11_tester.token_dir, true /*recursive*/)); ASSERT_SUCCESS(aws_directory_create(s_pkcs11_tester.token_dir)); return AWS_OP_SUCCESS; } static int s_reload_hsm(void) { /* Finalize to make sure that softhsm reads new tokens afresh */ aws_pkcs11_lib_release(s_pkcs11_tester.lib); s_pkcs11_tester.lib = NULL; /* Load library again */ struct aws_pkcs11_lib_options options = { .filename = aws_byte_cursor_from_string(s_pkcs11_tester.shared_lib_path), .initialize_finalize_behavior = AWS_PKCS11_LIB_STRICT_INITIALIZE_FINALIZE, }; s_pkcs11_tester.lib = aws_pkcs11_lib_new(s_pkcs11_tester.allocator, &options); ASSERT_NOT_NULL(s_pkcs11_tester.lib, "Failed to load PKCS#11 lib"); return AWS_OP_SUCCESS; } static int s_pkcs11_clear_softhsm_and_reload(void) { /* Finalize to make sure that softhsm reads new tokens afresh */ aws_pkcs11_lib_release(s_pkcs11_tester.lib); s_pkcs11_tester.lib = NULL; /* Clear token dir */ ASSERT_SUCCESS(s_pkcs11_clear_softhsm()); /* Load library again */ ASSERT_SUCCESS(s_reload_hsm()); return AWS_OP_SUCCESS; } /* Encryption/Decryption only applies to RSA, not ECC */ static int s_pkcs11_rsa_encrypt( struct aws_byte_cursor *message, struct aws_byte_buf *cipher_text, CK_SESSION_HANDLE session, CK_OBJECT_HANDLE public_key) { CK_FUNCTION_LIST *pkcs11_function_list = aws_pkcs11_lib_get_function_list(s_pkcs11_tester.lib); struct aws_allocator *allocator = s_pkcs11_tester.allocator; CK_MECHANISM mechanism = {.mechanism = CKM_RSA_PKCS}; CK_RV rv = pkcs11_function_list->C_EncryptInit(session, &mechanism, public_key); if (rv != CKR_OK) { FAIL("C_EncryptInit fails: PKCS#11 error: %s (0x%08lX)", aws_pkcs11_ckr_str(rv), rv); } CK_ULONG cipher_len = 0; rv = pkcs11_function_list->C_Encrypt(session, message->ptr, (CK_ULONG)message->len, NULL, &cipher_len); if (rv != CKR_OK) { FAIL("C_Encrypt fails: PKCS#11 error: %s (0x%08lX)", aws_pkcs11_ckr_str(rv), rv); } aws_byte_buf_init(cipher_text, allocator, cipher_len); rv = pkcs11_function_list->C_Encrypt( session, message->ptr, (CK_ULONG)message->len, cipher_text->buffer, &cipher_len); if (rv != CKR_OK) { FAIL("C_Encrypt fails: PKCS#11 error: %s (0x%08lX)", aws_pkcs11_ckr_str(rv), rv); } cipher_text->len = cipher_len; return AWS_OP_SUCCESS; } static int s_pkcs11_verify_signature( struct aws_byte_cursor *message, struct aws_byte_buf *signature, CK_SESSION_HANDLE session, CK_OBJECT_HANDLE public_key, CK_MECHANISM_TYPE mechanism_type) { CK_FUNCTION_LIST *pkcs11_function_list = aws_pkcs11_lib_get_function_list(s_pkcs11_tester.lib); CK_MECHANISM mechanism = {.mechanism = mechanism_type}; CK_RV rv = pkcs11_function_list->C_VerifyInit(session, &mechanism, public_key); if (rv != CKR_OK) { FAIL("C_VerifyInit fails: PKCS#11 error: %s (0x%08lX)", aws_pkcs11_ckr_str(rv), rv); } rv = pkcs11_function_list->C_Verify( session, message->ptr, (CK_ULONG)message->len, signature->buffer, (CK_ULONG)signature->len); if (rv != CKR_OK) { FAIL("C_Verify fails: PKCS#11 error: %s (0x%08lX)", aws_pkcs11_ckr_str(rv), rv); } return AWS_OP_SUCCESS; } static int s_pkcs11_create_rsa_key( struct pkcs11_key_creation_params *params, CK_SESSION_HANDLE session, CK_OBJECT_HANDLE *created_private_key, CK_OBJECT_HANDLE *created_public_key) { CK_FUNCTION_LIST *pkcs11_function_list = aws_pkcs11_lib_get_function_list(s_pkcs11_tester.lib); /* We only support RSA keys today. */ CK_MECHANISM smech = {CKM_RSA_PKCS_KEY_PAIR_GEN, NULL, 0}; /* Define key template */ static CK_BBOOL truevalue = CK_TRUE; static CK_BBOOL falsevalue = CK_FALSE; /* Set public key. Not sure if setting modulus_bits actually generates key as per that. */ CK_ATTRIBUTE publickey_template[] = { {CKA_VERIFY, &truevalue, sizeof(truevalue)}, {CKA_MODULUS_BITS, (CK_VOID_PTR)¶ms->key_length, sizeof(params->key_length)}, }; /* Set private key. The parameters here are kind of random, does not affect the test, but trying * to mimic what a real key would look like in terms of attributes */ CK_ATTRIBUTE privatekey_template[] = { {CKA_LABEL, (void *)params->key_label, (CK_ULONG)strlen(params->key_label)}, {CKA_ID, (void *)params->key_id, (CK_ULONG)strlen(params->key_id)}, {CKA_SIGN, &truevalue, sizeof(truevalue)}, {CKA_EXTRACTABLE, &falsevalue, sizeof(falsevalue)}, }; CK_OBJECT_HANDLE privatekey, publickey = CK_INVALID_HANDLE; /* Generate Key pair for signing/verifying */ CK_RV rv = pkcs11_function_list->C_GenerateKeyPair( session, &smech, publickey_template, AWS_ARRAY_SIZE(publickey_template), privatekey_template, AWS_ARRAY_SIZE(privatekey_template), &publickey, &privatekey); if (rv != CKR_OK) { FAIL("C_GenerateKeyPair fails: PKCS#11 error: %s (0x%08lX)", aws_pkcs11_ckr_str(rv), rv); } *created_private_key = privatekey; *created_public_key = publickey; return AWS_OP_SUCCESS; } static int s_pkcs11_create_ec_key( struct pkcs11_key_creation_params *params, CK_SESSION_HANDLE session, CK_OBJECT_HANDLE *created_private_key, CK_OBJECT_HANDLE *created_public_key) { CK_FUNCTION_LIST *pkcs11_function_list = aws_pkcs11_lib_get_function_list(s_pkcs11_tester.lib); CK_MECHANISM smech = {CKM_EC_KEY_PAIR_GEN, NULL, 0}; /* Define key template */ static CK_BBOOL truevalue = CK_TRUE; static CK_BBOOL falsevalue = CK_FALSE; /* DER encoded params for curve P-256 */ static CK_BYTE ec_params[] = {0x06, 0x08, 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x03, 0x01, 0x07}; CK_ATTRIBUTE publickey_template[] = { {CKA_EC_PARAMS, ec_params, sizeof(ec_params)}, {CKA_VERIFY, &truevalue, sizeof(truevalue)}, }; /* Set private key. The parameters here are kind of random, does not affect the test, but trying * to mimic what a real key would look like in terms of attributes */ CK_ATTRIBUTE privatekey_template[] = { {CKA_LABEL, (void *)params->key_label, (CK_ULONG)strlen(params->key_label)}, {CKA_ID, (void *)params->key_id, (CK_ULONG)strlen(params->key_id)}, {CKA_SIGN, &truevalue, sizeof(truevalue)}, {CKA_EXTRACTABLE, &falsevalue, sizeof(falsevalue)}, }; CK_OBJECT_HANDLE privatekey, publickey = CK_INVALID_HANDLE; /* Generate Key pair for signing/verifying */ CK_RV rv = pkcs11_function_list->C_GenerateKeyPair( session, &smech, publickey_template, AWS_ARRAY_SIZE(publickey_template), privatekey_template, AWS_ARRAY_SIZE(privatekey_template), &publickey, &privatekey); if (rv != CKR_OK) { FAIL("C_GenerateKeyPair fails: PKCS#11 error: %s (0x%08lX)", aws_pkcs11_ckr_str(rv), rv); } *created_private_key = privatekey; *created_public_key = publickey; return AWS_OP_SUCCESS; } /* if tokenInfo is set, finds slot with matching token * if tokenInfo is NULL, finds slot with uninitialized token */ static int s_pkcs11_find_slot(const CK_TOKEN_INFO *tokenInfo, CK_SLOT_ID *out_slot) { CK_FUNCTION_LIST *pkcs11_function_list = aws_pkcs11_lib_get_function_list(s_pkcs11_tester.lib); CK_ULONG ul_slot_count = 0; CK_SLOT_ID slot_id = 0; CK_RV rv = pkcs11_function_list->C_GetSlotList(CK_TRUE, NULL, &ul_slot_count); if (rv != CKR_OK) { FAIL("ERROR: Could not get the number of slots."); } CK_SLOT_ID_PTR p_slot_list = aws_mem_acquire(s_pkcs11_tester.allocator, ul_slot_count * sizeof(CK_SLOT_ID)); if (p_slot_list == NULL) { FAIL("ERROR: Could not allocate memory."); } rv = pkcs11_function_list->C_GetSlotList(CK_FALSE, p_slot_list, &ul_slot_count); if (rv != CKR_OK) { FAIL("ERROR: Could not get the slot list."); } size_t counter = 0; for (CK_ULONG i = 0; i < ul_slot_count; i++) { CK_TOKEN_INFO curr_token_info; rv = pkcs11_function_list->C_GetTokenInfo(p_slot_list[i], &curr_token_info); if (rv != CKR_OK) { FAIL("ERROR: Could not get info about the token in slot %lu.", p_slot_list[i]); } if (tokenInfo) { if (memcmp(curr_token_info.serialNumber, tokenInfo->serialNumber, sizeof(tokenInfo->serialNumber)) == 0 && memcmp(curr_token_info.label, tokenInfo->label, sizeof(tokenInfo->label)) == 0) { slot_id = p_slot_list[i]; counter++; } } else { /* find slots with uninitialized token */ if ((curr_token_info.flags & CKF_TOKEN_INITIALIZED) == 0) { slot_id = p_slot_list[i]; counter++; } } } aws_mem_release(s_pkcs11_tester.allocator, p_slot_list); if (counter == 0) { FAIL("ERROR: Could not find a slot/token using --serial, or --token"); } else if (counter > 1) { FAIL("ERROR: Found multiple matching slots/tokens."); } /* We found just one matching slot */ *out_slot = slot_id; return AWS_OP_SUCCESS; } static int s_pkcs11_find_free_slot(CK_SLOT_ID *out_slot) { return s_pkcs11_find_slot(NULL, out_slot); } /* Creation of slot requires a reload of softhsm, and hence need to re initialize the pkcs11_lib */ static int s_pkcs11_softhsm_create_slot( const char *token_name, const char *so_pin, const char *user_pin, CK_SLOT_ID *created_slot) { CK_FUNCTION_LIST *pkcs11_function_list = aws_pkcs11_lib_get_function_list(s_pkcs11_tester.lib); CK_RV rv; /* API expects ' ' padded string */ CK_UTF8CHAR paddedLabel[32]; memset(paddedLabel, ' ', sizeof(paddedLabel)); memcpy(paddedLabel, token_name, strlen(token_name)); CK_SLOT_ID slot_id = 0; ASSERT_SUCCESS(s_pkcs11_find_free_slot(&slot_id)); rv = pkcs11_function_list->C_InitToken(slot_id, (CK_UTF8CHAR_PTR)so_pin, (CK_ULONG)strlen(so_pin), paddedLabel); if (rv != CKR_OK) { FAIL("C_InitToken fails: PKCS#11 error: %s (0x%08lX)", aws_pkcs11_ckr_str(rv), rv); } CK_SESSION_HANDLE session; rv = pkcs11_function_list->C_OpenSession(slot_id, CKF_SERIAL_SESSION | CKF_RW_SESSION, NULL, NULL, &session); if (rv != CKR_OK) { FAIL("C_OpenSession fails: PKCS#11 error: %s (0x%08lX)", aws_pkcs11_ckr_str(rv), rv); } rv = pkcs11_function_list->C_Login(session, CKU_SO, (CK_UTF8CHAR_PTR)so_pin, (CK_ULONG)strlen(so_pin)); if (rv != CKR_OK) { FAIL("C_Login fails: PKCS#11 error: %s (0x%08lX)", aws_pkcs11_ckr_str(rv), rv); } rv = pkcs11_function_list->C_InitPIN(session, (CK_UTF8CHAR_PTR)user_pin, (CK_ULONG)strlen(user_pin)); if (rv != CKR_OK) { FAIL("C_InitPIN fails: PKCS#11 error: %s (0x%08lX)", aws_pkcs11_ckr_str(rv), rv); } CK_TOKEN_INFO tokenInfo; rv = pkcs11_function_list->C_GetTokenInfo(slot_id, &tokenInfo); if (rv != CKR_OK) { FAIL("C_GetTokenInfo fails: PKCS#11 error: %s (0x%08lX)", aws_pkcs11_ckr_str(rv), rv); } /* Reload the library */ ASSERT_SUCCESS(s_reload_hsm()); CK_SLOT_ID new_slot_id = 0; ASSERT_SUCCESS(s_pkcs11_find_slot(&tokenInfo, &new_slot_id)); if (slot_id == new_slot_id) { printf("The token has been initialized on slot %lu\n", new_slot_id); } else { printf("The token has been initialized and is reassigned to slot %lu\n", new_slot_id); } *created_slot = new_slot_id; return AWS_OP_SUCCESS; } /* * Helper functions to interact with softhsm end * */ /* Unload PKCS#11 lib * Clear SoftHSM's token dir so that each test ends fresh */ static void s_pkcs11_tester_clean_up(void) { aws_pkcs11_lib_release(s_pkcs11_tester.lib); s_pkcs11_tester.lib = NULL; s_pkcs11_clear_softhsm(); aws_string_destroy(s_pkcs11_tester.shared_lib_path); aws_string_destroy(s_pkcs11_tester.token_dir); AWS_ZERO_STRUCT(s_pkcs11_tester); aws_io_library_clean_up(); } /* Read env-vars, raise an error if any necessary ones are missing. * Clear SoftHSM's token dir so that each test starts fresh. * DO NOT load PKCS#11 lib. */ static int s_pkcs11_tester_init_without_load(struct aws_allocator *allocator) { aws_io_library_init(allocator); const struct aws_string *env_var = TEST_PKCS11_LIB; aws_get_environment_value(allocator, env_var, &s_pkcs11_tester.shared_lib_path); if (s_pkcs11_tester.shared_lib_path == NULL) { FAIL("Missing required env-var '%s'\n", aws_string_c_str(env_var)); } env_var = TEST_PKCS11_TOKEN_DIR; aws_get_environment_value(allocator, env_var, &s_pkcs11_tester.token_dir); if (s_pkcs11_tester.token_dir == NULL) { FAIL("Missing required env-var '%s'\n", aws_string_c_str(env_var)); } s_pkcs11_tester.allocator = allocator; ASSERT_SUCCESS(s_pkcs11_clear_softhsm()); return AWS_OP_SUCCESS; } /* Read env-vars, raise an error if any necessary ones are missing. * Clear SoftHSM's token dir so that each test starts fresh. * Load PKCS#11 lib. */ static int s_pkcs11_tester_init(struct aws_allocator *allocator) { ASSERT_SUCCESS(s_pkcs11_tester_init_without_load(allocator)); struct aws_pkcs11_lib_options options = { .filename = aws_byte_cursor_from_string(s_pkcs11_tester.shared_lib_path), .initialize_finalize_behavior = AWS_PKCS11_LIB_STRICT_INITIALIZE_FINALIZE, }; s_pkcs11_tester.lib = aws_pkcs11_lib_new(s_pkcs11_tester.allocator, &options); ASSERT_NOT_NULL(s_pkcs11_tester.lib, "Failed to load PKCS#11 lib"); return AWS_OP_SUCCESS; } static int s_pkcs11_tester_init_with_session_login( struct aws_allocator *allocator, const char *token_label, CK_SLOT_ID *created_slot, CK_SESSION_HANDLE *session) { /* Reset tokens and load library */ ASSERT_SUCCESS(s_pkcs11_tester_init(allocator)); /* Create a new slot, this reloads the softhsm library but the labels/slots remain intact */ ASSERT_SUCCESS(s_pkcs11_softhsm_create_slot(token_label, SO_PIN, USER_PIN, created_slot)); ASSERT_SUCCESS(aws_pkcs11_lib_open_session(s_pkcs11_tester.lib, *created_slot, session /*out*/)); /* Login user */ struct aws_string *user_pin = aws_string_new_from_c_str(allocator, USER_PIN); ASSERT_SUCCESS(aws_pkcs11_lib_login_user(s_pkcs11_tester.lib, *session, user_pin)); aws_string_destroy(user_pin); return AWS_OP_SUCCESS; } /* Simplest test: Loads and unloads library, calling C_Initialize() and C_Finalize() */ static int s_test_pkcs11_lib_sanity_check(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(s_pkcs11_tester_init(allocator)); s_pkcs11_tester_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(pkcs11_lib_sanity_check, s_test_pkcs11_lib_sanity_check) /* Stress test the DEFAULT_BEHAVIOR for C_Initialize() / C_Finalize() calls */ static int s_test_pkcs11_lib_behavior_default(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(s_pkcs11_tester_init_without_load(allocator)); struct aws_pkcs11_lib_options options_default_behavior = { .filename = aws_byte_cursor_from_string(s_pkcs11_tester.shared_lib_path), .initialize_finalize_behavior = AWS_PKCS11_LIB_DEFAULT_BEHAVIOR, }; struct aws_pkcs11_lib *lib_1 = aws_pkcs11_lib_new(allocator, &options_default_behavior); ASSERT_NOT_NULL(lib_1, "Failed to load PKCS#11 lib"); /* Loading the lib a 2nd time with DEFAULT_BEHAVIOR should be fine, * since CKR_CRYPTOKI_ALREADY_INITIALIZED should be ignored. */ struct aws_pkcs11_lib *lib_2 = aws_pkcs11_lib_new(allocator, &options_default_behavior); ASSERT_NOT_NULL(lib_2, "Failed to load a 2nd PKCS#11 lib"); /* lib_2 should keep working if lib_1 is freed, since C_Finalize() is not called with DEFAULT_BEHAVIOR. * (call C_GetInfo() to confirm the lib_2 still works) */ aws_pkcs11_lib_release(lib_1); lib_1 = NULL; CK_INFO info; ASSERT_INT_EQUALS(CKR_OK, aws_pkcs11_lib_get_function_list(lib_2)->C_GetInfo(&info)); /* If all libs are unloaded, and another comes online. That should be fine */ aws_pkcs11_lib_release(lib_2); lib_2 = NULL; struct aws_pkcs11_lib *lib_3 = aws_pkcs11_lib_new(allocator, &options_default_behavior); ASSERT_NOT_NULL(lib_3, "Failed to load a 3rd PKCS#11 lib"); /* Clean up */ aws_pkcs11_lib_release(lib_3); s_pkcs11_tester_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(pkcs11_lib_behavior_default, s_test_pkcs11_lib_behavior_default) /* Stress test the OMIT_INITIALIZE behavior, where neither C_Initialize() or C_Finalize() is called */ static int s_test_pkcs11_lib_behavior_omit_initialize(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(s_pkcs11_tester_init_without_load(allocator)); struct aws_pkcs11_lib_options options_omit_initialize = { .filename = aws_byte_cursor_from_string(s_pkcs11_tester.shared_lib_path), .initialize_finalize_behavior = AWS_PKCS11_LIB_OMIT_INITIALIZE, }; /* Test that we fail gracefully if OMIT_INITIALIZE behavior is used, * but no one else has initialized the underlying PKCS#11 library */ struct aws_pkcs11_lib *pkcs11_lib_should_fail = aws_pkcs11_lib_new(allocator, &options_omit_initialize); ASSERT_NULL(pkcs11_lib_should_fail); ASSERT_INT_EQUALS(AWS_ERROR_PKCS11_CKR_CRYPTOKI_NOT_INITIALIZED, aws_last_error()); /* Test that it's fine to use OMIT_INITIALIZE behavior to have the library loaded multiple times. */ /* First create a lib that DOES call C_Initialize() */ struct aws_pkcs11_lib_options options_initialize_finalize = { .filename = aws_byte_cursor_from_string(s_pkcs11_tester.shared_lib_path), .initialize_finalize_behavior = AWS_PKCS11_LIB_STRICT_INITIALIZE_FINALIZE, }; struct aws_pkcs11_lib *lib_initialize_finalize = aws_pkcs11_lib_new(allocator, &options_initialize_finalize); ASSERT_NOT_NULL(lib_initialize_finalize); /* Now test that it's fine to create a 2nd lib using OMIT_INITIALIZE */ struct aws_pkcs11_lib *lib_2 = aws_pkcs11_lib_new(allocator, &options_omit_initialize); ASSERT_NOT_NULL(lib_2); /* Clean up */ aws_pkcs11_lib_release(lib_2); aws_pkcs11_lib_release(lib_initialize_finalize); s_pkcs11_tester_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(pkcs11_lib_behavior_omit_initialize, s_test_pkcs11_lib_behavior_omit_initialize) /* Stress test the STRICT_INITIALIZE_FINALIZE behavior */ static int s_test_pkcs11_lib_behavior_strict_initialize_finalize(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(s_pkcs11_tester_init_without_load(allocator)); /* Creating the 1st lib should succeed */ struct aws_pkcs11_lib_options options_initialize_finalize = { .filename = aws_byte_cursor_from_string(s_pkcs11_tester.shared_lib_path), .initialize_finalize_behavior = AWS_PKCS11_LIB_STRICT_INITIALIZE_FINALIZE, }; struct aws_pkcs11_lib *lib_1 = aws_pkcs11_lib_new(allocator, &options_initialize_finalize); ASSERT_NOT_NULL(lib_1); /* Creating the 2nd lib should fail due to already-initialized errors */ struct aws_pkcs11_lib *lib_2_should_fail = aws_pkcs11_lib_new(allocator, &options_initialize_finalize); ASSERT_NULL(lib_2_should_fail); ASSERT_INT_EQUALS(AWS_ERROR_PKCS11_CKR_CRYPTOKI_ALREADY_INITIALIZED, aws_last_error()); /* It should be safe to release a STRICT lib, then create another */ aws_pkcs11_lib_release(lib_1); lib_1 = NULL; struct aws_pkcs11_lib *lib_2_should_succeed = aws_pkcs11_lib_new(allocator, &options_initialize_finalize); ASSERT_NOT_NULL(lib_2_should_succeed); /* Clean up */ aws_pkcs11_lib_release(lib_2_should_succeed); s_pkcs11_tester_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(pkcs11_lib_behavior_strict_initialize_finalize, s_test_pkcs11_lib_behavior_strict_initialize_finalize) static int s_test_pkcs11_session_tests(struct aws_allocator *allocator, void *ctx) { (void)ctx; /* Reset PKCS#11 tokens and load library */ ASSERT_SUCCESS(s_pkcs11_tester_init(allocator)); /* Assert that creating a session for an invalid slot fails. * * NOTE: We omit this part of the test when AddressSanitizer is being used, * because SoftHSM v2.2 triggers it in this scenario. I've tried using a * suppression file to ignore the issue, but the suppression isn't * working and I still don't understand why after 1+ hours of effort. * But this ifdef does the trick so that's what I'm doing. */ #if defined(__has_feature) # if __has_feature(address_sanitizer) # define ADDRESS_SANITIZER_ENABLED 1 # endif #endif #if !ADDRESS_SANITIZER_ENABLED CK_SESSION_HANDLE session = CK_INVALID_HANDLE; /* we haven't created any slots and we are starting from a clean softhsm, so any slot value is invalid. */ CK_SLOT_ID slot = 1; ASSERT_FAILS(aws_pkcs11_lib_open_session(s_pkcs11_tester.lib, slot, &session /*out*/)); #endif /* Create a new slot, this reloads the softhsm library but the labels/slots remain intact */ CK_SLOT_ID created_slot = 0; ASSERT_SUCCESS(s_pkcs11_softhsm_create_slot(TOKEN_LABEL, SO_PIN, USER_PIN, &created_slot)); CK_SESSION_HANDLE first_session = CK_INVALID_HANDLE; CK_SESSION_HANDLE second_session = CK_INVALID_HANDLE; /* Now, creation of a session on a valid slot will be a success */ ASSERT_SUCCESS(aws_pkcs11_lib_open_session(s_pkcs11_tester.lib, created_slot, &first_session /*out*/)); ASSERT_TRUE(first_session != CK_INVALID_HANDLE); /* create one more session */ ASSERT_SUCCESS(aws_pkcs11_lib_open_session(s_pkcs11_tester.lib, created_slot, &second_session /*out*/)); ASSERT_TRUE(second_session != CK_INVALID_HANDLE); ASSERT_TRUE(first_session != second_session); /* Close both sessions */ aws_pkcs11_lib_close_session(s_pkcs11_tester.lib, first_session); aws_pkcs11_lib_close_session(s_pkcs11_tester.lib, second_session); /* Clean up */ s_pkcs11_tester_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(pkcs11_session_tests, s_test_pkcs11_session_tests) static int s_test_pkcs11_login_tests(struct aws_allocator *allocator, void *ctx) { (void)ctx; /* Reset PKCS#11 tokens and load library */ ASSERT_SUCCESS(s_pkcs11_tester_init(allocator)); /* Create a new slot, this reloads the softhsm library but the labels/slots remain intact */ CK_SLOT_ID created_slot = 0; ASSERT_SUCCESS(s_pkcs11_softhsm_create_slot(TOKEN_LABEL, SO_PIN, USER_PIN, &created_slot)); /* Try to login with in invalid session, we have not created any session on this token * So, any session value is invalid */ struct aws_string *pin = aws_string_new_from_c_str(allocator, USER_PIN); CK_SESSION_HANDLE invalid_session = 1UL; ASSERT_FAILS(aws_pkcs11_lib_login_user(s_pkcs11_tester.lib, invalid_session, pin)); /* Now create a valid session */ CK_SESSION_HANDLE session = CK_INVALID_HANDLE; ASSERT_SUCCESS(aws_pkcs11_lib_open_session(s_pkcs11_tester.lib, created_slot, &session /*out*/)); /* Try an invalid pin on a valid slot */ struct aws_string *invalid_pin = aws_string_new_from_c_str(allocator, "INVALID_PIN"); ASSERT_FAILS(aws_pkcs11_lib_login_user(s_pkcs11_tester.lib, session, invalid_pin)); /* Try a valid pin on a valid slot */ ASSERT_SUCCESS(aws_pkcs11_lib_login_user(s_pkcs11_tester.lib, session, pin)); /* A re login should succeed, as we are already logged in now */ ASSERT_SUCCESS(aws_pkcs11_lib_login_user(s_pkcs11_tester.lib, session, pin)); /* Now create one more session */ CK_SESSION_HANDLE session_2 = CK_INVALID_HANDLE; ASSERT_SUCCESS(aws_pkcs11_lib_open_session(s_pkcs11_tester.lib, created_slot, &session_2 /*out*/)); /* A re login should succeed, as we are already logged in another session and * the spec only requires login once on any of the session in an application * */ ASSERT_SUCCESS(aws_pkcs11_lib_login_user(s_pkcs11_tester.lib, session_2, pin)); /* Close the first session */ aws_pkcs11_lib_close_session(s_pkcs11_tester.lib, session); /* A re login should succeed again on the second session, as login is only required once */ ASSERT_SUCCESS(aws_pkcs11_lib_login_user(s_pkcs11_tester.lib, session_2, pin)); /* Close the second session */ aws_pkcs11_lib_close_session(s_pkcs11_tester.lib, session_2); /* Clean up */ aws_string_destroy(pin); aws_string_destroy(invalid_pin); s_pkcs11_tester_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(pkcs11_login_tests, s_test_pkcs11_login_tests) static int s_test_pkcs11_find_private_key_for_different_rsa_types(struct aws_allocator *allocator, void *ctx) { (void)ctx; /* Reset PKCS#11 tokens and load library */ ASSERT_SUCCESS(s_pkcs11_tester_init(allocator)); /* Create a new slot, this reloads the softhsm library but the labels/slots remain intact */ CK_SLOT_ID created_slot = 0; ASSERT_SUCCESS(s_pkcs11_softhsm_create_slot(TOKEN_LABEL_RSA, SO_PIN, USER_PIN, &created_slot)); /* Do not close the session while running a test, objects created by a session are cleaned up * when the session is closed. * http://docs.oasis-open.org/pkcs11/pkcs11-ug/v2.40/cn02/pkcs11-ug-v2.40-cn02.html#_Toc386027485 * */ /* Open a different session to access the created key, and a different one to create */ CK_SESSION_HANDLE session_to_access_key; CK_SESSION_HANDLE session_to_create_key; ASSERT_SUCCESS(aws_pkcs11_lib_open_session(s_pkcs11_tester.lib, created_slot, &session_to_access_key /*out*/)); ASSERT_SUCCESS(aws_pkcs11_lib_open_session(s_pkcs11_tester.lib, created_slot, &session_to_create_key /*out*/)); /* Login user */ struct aws_string *user_pin = aws_string_new_from_c_str(allocator, USER_PIN); ASSERT_SUCCESS(aws_pkcs11_lib_login_user(s_pkcs11_tester.lib, session_to_access_key, user_pin)); CK_OBJECT_HANDLE created_priv_key = CK_INVALID_HANDLE; CK_OBJECT_HANDLE created_pub_key = CK_INVALID_HANDLE; char *key_label_1024 = "1024_Key"; char *key_id_1024 = "1024_id"; struct pkcs11_key_creation_params params_1024 = { .key_label = key_label_1024, .key_id = key_id_1024, .key_length = 1024}; ASSERT_SUCCESS(s_pkcs11_create_rsa_key(¶ms_1024, session_to_create_key, &created_priv_key, &created_pub_key)); /* Find key */ CK_OBJECT_HANDLE pkey_handle = CK_INVALID_HANDLE; CK_KEY_TYPE pkey_type; struct aws_string *key_label_str = aws_string_new_from_c_str(allocator, key_label_1024); ASSERT_SUCCESS(aws_pkcs11_lib_find_private_key( s_pkcs11_tester.lib, session_to_access_key, key_label_str, &pkey_handle, &pkey_type)); ASSERT_INT_EQUALS(created_priv_key, pkey_handle); ASSERT_INT_EQUALS(CKK_RSA, pkey_type); /* Create another RSA key */ CK_OBJECT_HANDLE created_key_2048 = CK_INVALID_HANDLE; char *key_label_2048 = "2048_Key"; char *key_id_2048 = "2048_id"; struct pkcs11_key_creation_params params_2048 = { .key_label = key_label_2048, .key_id = key_id_2048, .key_length = 2048}; ASSERT_SUCCESS(s_pkcs11_create_rsa_key(¶ms_2048, session_to_create_key, &created_key_2048, &created_pub_key)); /* Find key */ struct aws_string *key_label_str_2048 = aws_string_new_from_c_str(allocator, key_label_2048); ASSERT_SUCCESS(aws_pkcs11_lib_find_private_key( s_pkcs11_tester.lib, session_to_access_key, key_label_str_2048, &pkey_handle, &pkey_type)); ASSERT_INT_EQUALS(created_key_2048, pkey_handle); ASSERT_INT_EQUALS(CKK_RSA, pkey_type); /* Create another RSA key */ CK_OBJECT_HANDLE created_key_4096 = CK_INVALID_HANDLE; char *key_label_4096 = "4096_Key"; char *key_id_4096 = "4096_id"; struct pkcs11_key_creation_params params_4096 = { .key_label = key_label_4096, .key_id = key_id_4096, .key_length = 4096}; ASSERT_SUCCESS(s_pkcs11_create_rsa_key(¶ms_4096, session_to_create_key, &created_key_4096, &created_pub_key)); /* Find key */ struct aws_string *key_label_str_4096 = aws_string_new_from_c_str(allocator, key_label_4096); ASSERT_SUCCESS(aws_pkcs11_lib_find_private_key( s_pkcs11_tester.lib, session_to_access_key, key_label_str_4096, &pkey_handle, &pkey_type)); ASSERT_INT_EQUALS(created_key_4096, pkey_handle); ASSERT_INT_EQUALS(CKK_RSA, pkey_type); /* Clean up */ aws_string_destroy(user_pin); aws_string_destroy(key_label_str); aws_string_destroy(key_label_str_2048); aws_string_destroy(key_label_str_4096); s_pkcs11_tester_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(pkcs11_find_private_key_for_different_rsa_types, s_test_pkcs11_find_private_key_for_different_rsa_types) static int s_test_pkcs11_find_private_key_for_ec(struct aws_allocator *allocator, void *ctx) { (void)ctx; /* Reset PKCS#11 tokens and load library */ ASSERT_SUCCESS(s_pkcs11_tester_init(allocator)); /* Create a new slot, this reloads the softhsm library but the labels/slots remain intact */ CK_SLOT_ID created_slot = 0; ASSERT_SUCCESS(s_pkcs11_softhsm_create_slot(TOKEN_LABEL, SO_PIN, USER_PIN, &created_slot)); /* Do not close the session while running a test, objects created by a session are cleaned up * when the session is closed. * http://docs.oasis-open.org/pkcs11/pkcs11-ug/v2.40/cn02/pkcs11-ug-v2.40-cn02.html#_Toc386027485 * */ /* Open a different session to access the created key, and a different one to create */ CK_SESSION_HANDLE session_to_access_key; CK_SESSION_HANDLE session_to_create_key; ASSERT_SUCCESS(aws_pkcs11_lib_open_session(s_pkcs11_tester.lib, created_slot, &session_to_access_key /*out*/)); ASSERT_SUCCESS(aws_pkcs11_lib_open_session(s_pkcs11_tester.lib, created_slot, &session_to_create_key /*out*/)); /* Login user */ struct aws_string *user_pin = aws_string_new_from_c_str(allocator, USER_PIN); ASSERT_SUCCESS(aws_pkcs11_lib_login_user(s_pkcs11_tester.lib, session_to_access_key, user_pin)); /* Create an EC key */ CK_OBJECT_HANDLE created_pub_key = CK_INVALID_HANDLE; CK_OBJECT_HANDLE created_key_ec_256 = CK_INVALID_HANDLE; char *key_label_ec_256 = "EC_256_Key"; char *key_id_ec_256 = "EC_256_id"; struct pkcs11_key_creation_params params_ec_256 = {.key_label = key_label_ec_256, .key_id = key_id_ec_256}; ASSERT_SUCCESS( s_pkcs11_create_ec_key(¶ms_ec_256, session_to_create_key, &created_key_ec_256, &created_pub_key)); /* Find key */ CK_OBJECT_HANDLE pkey_handle = CK_INVALID_HANDLE; CK_KEY_TYPE pkey_type; struct aws_string *key_label_str_ec_256 = aws_string_new_from_c_str(allocator, key_label_ec_256); ASSERT_SUCCESS(aws_pkcs11_lib_find_private_key( s_pkcs11_tester.lib, session_to_access_key, key_label_str_ec_256, &pkey_handle, &pkey_type)); ASSERT_INT_EQUALS(created_key_ec_256, pkey_handle); ASSERT_INT_EQUALS(CKK_EC, pkey_type); /* Clean up */ aws_string_destroy(user_pin); aws_string_destroy(key_label_str_ec_256); s_pkcs11_tester_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(pkcs11_find_private_key_for_ec, s_test_pkcs11_find_private_key_for_ec) static int s_test_pkcs11_find_multiple_private_key(struct aws_allocator *allocator, void *ctx) { (void)ctx; /* Reset PKCS#11 tokens and load library */ ASSERT_SUCCESS(s_pkcs11_tester_init(allocator)); const char *key_label_1 = "RSA_KEY"; const char *key_id_1 = "BEEFCAFE"; const char *key_label_2 = "DES_KEY_2"; const char *key_id_2 = "BEEFCAFEDEAD"; /* Create a new slot, this reloads the softhsm library but the labels/slots remain intact */ CK_SLOT_ID created_slot = 0; ASSERT_SUCCESS(s_pkcs11_softhsm_create_slot(TOKEN_LABEL, SO_PIN, USER_PIN, &created_slot)); CK_SESSION_HANDLE session_to_access_key; CK_SESSION_HANDLE session_to_create_key_1; CK_SESSION_HANDLE session_to_create_key_2; /* Open a session to access the created key */ ASSERT_SUCCESS(aws_pkcs11_lib_open_session(s_pkcs11_tester.lib, created_slot, &session_to_access_key /*out*/)); /* Open sessions to create keys, 1 session is probably enough, but test creation with multiple sessions */ ASSERT_SUCCESS(aws_pkcs11_lib_open_session(s_pkcs11_tester.lib, created_slot, &session_to_create_key_1 /*out*/)); ASSERT_SUCCESS(aws_pkcs11_lib_open_session(s_pkcs11_tester.lib, created_slot, &session_to_create_key_2 /*out*/)); /* Login user */ struct aws_string *user_pin = aws_string_new_from_c_str(allocator, USER_PIN); ASSERT_SUCCESS(aws_pkcs11_lib_login_user(s_pkcs11_tester.lib, session_to_access_key, user_pin)); CK_OBJECT_HANDLE created_key_1 = CK_INVALID_HANDLE; CK_OBJECT_HANDLE created_key_2 = CK_INVALID_HANDLE; CK_OBJECT_HANDLE created_pub_key = CK_INVALID_HANDLE; struct pkcs11_key_creation_params params_1 = {.key_label = key_label_1, .key_id = key_id_1, .key_length = 1024}; struct pkcs11_key_creation_params params_2 = {.key_label = key_label_2, .key_id = key_id_2, .key_length = 1024}; ASSERT_SUCCESS(s_pkcs11_create_rsa_key(¶ms_1, session_to_create_key_1, &created_key_1, &created_pub_key)); ASSERT_SUCCESS(s_pkcs11_create_rsa_key(¶ms_2, session_to_create_key_2, &created_key_2, &created_pub_key)); /* Since there are 2 keys, a lookup without label should fail */ struct aws_string *key_label_str = aws_string_new_from_c_str(allocator, key_label_1); struct aws_string *key_label_2_str = aws_string_new_from_c_str(allocator, key_label_2); CK_OBJECT_HANDLE pkey_handle = CK_INVALID_HANDLE; CK_KEY_TYPE pkey_type; ASSERT_FAILS( aws_pkcs11_lib_find_private_key(s_pkcs11_tester.lib, session_to_access_key, NULL, &pkey_handle, &pkey_type)); /* a lookup with label for the first key should find the first key */ pkey_handle = CK_INVALID_HANDLE; ASSERT_SUCCESS(aws_pkcs11_lib_find_private_key( s_pkcs11_tester.lib, session_to_access_key, key_label_str, &pkey_handle, &pkey_type)); ASSERT_INT_EQUALS(created_key_1, pkey_handle); ASSERT_INT_EQUALS(CKK_RSA, pkey_type); /* a lookup with label for the second key should find the second key */ pkey_handle = CK_INVALID_HANDLE; ASSERT_SUCCESS(aws_pkcs11_lib_find_private_key( s_pkcs11_tester.lib, session_to_access_key, key_label_2_str, &pkey_handle, &pkey_type)); ASSERT_INT_EQUALS(created_key_2, pkey_handle); ASSERT_INT_EQUALS(CKK_RSA, pkey_type); /* Clean up */ aws_string_destroy(key_label_str); aws_string_destroy(key_label_2_str); aws_string_destroy(user_pin); aws_pkcs11_lib_close_session(s_pkcs11_tester.lib, session_to_access_key); aws_pkcs11_lib_close_session(s_pkcs11_tester.lib, session_to_create_key_1); aws_pkcs11_lib_close_session(s_pkcs11_tester.lib, session_to_create_key_2); s_pkcs11_tester_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(pkcs11_find_multiple_private_key, s_test_pkcs11_find_multiple_private_key) static int s_test_pkcs11_find_private_key(struct aws_allocator *allocator, void *ctx) { (void)ctx; /* Reset PKCS#11 tokens and load library */ ASSERT_SUCCESS(s_pkcs11_tester_init(allocator)); const char *key_label_1 = "RSA_KEY"; const char *key_id_1 = "BEEFCAFE"; const char *label_1 = "label!@#$%^&*-_=+{}[]<>?,./():_1"; const char *so_pin_1 = "qwertyuioplaksjdhfgbn341269504732"; const char *user_pin_1 = "341269504732"; /* Create a new slot, Use values other than defaults for label/pins */ /* Create a new slot, this reloads the softhsm library but the labels/slots remain intact */ CK_SLOT_ID created_slot = 0; ASSERT_SUCCESS(s_pkcs11_softhsm_create_slot(label_1, so_pin_1, user_pin_1, &created_slot)); /* Do not close the session while running a test, objects created by a session are cleaned up * when the session is closed. * http://docs.oasis-open.org/pkcs11/pkcs11-ug/v2.40/cn02/pkcs11-ug-v2.40-cn02.html#_Toc386027485 * */ /* Open a different session to access the created key, and a different one to create */ CK_SESSION_HANDLE session_to_access_key; CK_SESSION_HANDLE session_to_create_key; ASSERT_SUCCESS(aws_pkcs11_lib_open_session(s_pkcs11_tester.lib, created_slot, &session_to_access_key /*out*/)); ASSERT_SUCCESS(aws_pkcs11_lib_open_session(s_pkcs11_tester.lib, created_slot, &session_to_create_key /*out*/)); /* Login user */ struct aws_string *user_pin = aws_string_new_from_c_str(allocator, user_pin_1); ASSERT_SUCCESS(aws_pkcs11_lib_login_user(s_pkcs11_tester.lib, session_to_access_key, user_pin)); CK_OBJECT_HANDLE created_key = CK_INVALID_HANDLE; CK_OBJECT_HANDLE created_pub_key = CK_INVALID_HANDLE; struct pkcs11_key_creation_params params = {.key_label = key_label_1, .key_id = key_id_1, .key_length = 1024}; ASSERT_SUCCESS(s_pkcs11_create_rsa_key(¶ms, session_to_create_key, &created_key, &created_pub_key)); /* Find key */ CK_OBJECT_HANDLE pkey_handle = CK_INVALID_HANDLE; CK_KEY_TYPE pkey_type; struct aws_string *key_label_str = aws_string_new_from_c_str(allocator, key_label_1); ASSERT_SUCCESS(aws_pkcs11_lib_find_private_key( s_pkcs11_tester.lib, session_to_access_key, key_label_str, &pkey_handle, &pkey_type)); ASSERT_TRUE(CK_INVALID_HANDLE != pkey_handle); ASSERT_INT_EQUALS(created_key, pkey_handle); ASSERT_INT_EQUALS(CKK_RSA, pkey_type); /* Since there is only one key, a lookup without label should also return the key */ pkey_handle = CK_INVALID_HANDLE; ASSERT_SUCCESS( aws_pkcs11_lib_find_private_key(s_pkcs11_tester.lib, session_to_access_key, NULL, &pkey_handle, &pkey_type)); ASSERT_INT_EQUALS(created_key, pkey_handle); ASSERT_INT_EQUALS(CKK_RSA, pkey_type); /* Clean up */ aws_string_destroy(key_label_str); aws_string_destroy(user_pin); aws_pkcs11_lib_close_session(s_pkcs11_tester.lib, session_to_access_key); aws_pkcs11_lib_close_session(s_pkcs11_tester.lib, session_to_create_key); s_pkcs11_tester_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(pkcs11_find_private_key, s_test_pkcs11_find_private_key) static int s_test_pkcs11_find_slot(struct aws_allocator *allocator, void *ctx) { (void)ctx; /* Reset PKCS#11 tokens and load library */ ASSERT_SUCCESS(s_pkcs11_tester_init(allocator)); /* softhsm does not like ;| as part of label */ const char *const label = "label!@#$%^&*-_=+{}[]<>?,./():_1"; const char *const so_pin = "qwertyuioplaksjdhfgbn341269504732"; const char *const user_pin = "341269504732"; CK_SLOT_ID slot_id = 0; /* * Softhsm always has one uninitialized token which is returned by the GetSlotList() API, * so there is no way to start without any slot at all * */ /* Call aws_pkcs11_lib_find_slot_with_token with 1 token, but no matching criteria */ ASSERT_SUCCESS( aws_pkcs11_lib_find_slot_with_token(s_pkcs11_tester.lib, NULL /*match_slot_id*/, NULL, &slot_id /*out*/)); /* Create a new slot, this reloads the softhsm library but the labels/slots remain intact */ CK_SLOT_ID created_slot = 0; ASSERT_SUCCESS(s_pkcs11_softhsm_create_slot(label, so_pin, user_pin, &created_slot)); /* Call aws_pkcs11_lib_find_slot_with_token with 2 tokens, but no matching criteria */ slot_id = (CK_SLOT_ID)-1; ASSERT_FAILS( aws_pkcs11_lib_find_slot_with_token(s_pkcs11_tester.lib, NULL /*match_slot_id*/, NULL, &slot_id /*out*/)); ASSERT_INT_EQUALS((CK_SLOT_ID)-1, slot_id); /* Call aws_pkcs11_lib_find_slot_with_token with 2 tokens, but match the slot this time */ uint64_t match_slot_id = created_slot; ASSERT_SUCCESS(aws_pkcs11_lib_find_slot_with_token(s_pkcs11_tester.lib, &match_slot_id, NULL, &slot_id /*out*/)); ASSERT_INT_EQUALS(created_slot, slot_id); /* Call aws_pkcs11_lib_find_slot_with_token with 2 tokens, but match the label this time */ slot_id = 0; struct aws_string *match_label = aws_string_new_from_c_str(allocator, label); ASSERT_SUCCESS(aws_pkcs11_lib_find_slot_with_token( s_pkcs11_tester.lib, NULL /*match_slot_id*/, match_label, &slot_id /*out*/)); ASSERT_INT_EQUALS(created_slot, slot_id); /* clear softhsm and make sure that no tokens match with previous slot/label */ ASSERT_SUCCESS(s_pkcs11_clear_softhsm_and_reload()); /* * Call aws_pkcs11_lib_find_slot_with_token with just the uninitialized token, * and assert that previous label does not match anymore * */ slot_id = (CK_SLOT_ID)-1; ASSERT_FAILS(aws_pkcs11_lib_find_slot_with_token( s_pkcs11_tester.lib, NULL /*match_slot_id*/, match_label /*match_token_label*/, &slot_id /*out*/)); ASSERT_INT_EQUALS((CK_SLOT_ID)-1, slot_id); /* Clean up */ aws_string_destroy(match_label); s_pkcs11_tester_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(pkcs11_find_slot, s_test_pkcs11_find_slot) static int s_test_pkcs11_find_slot_many_tokens(struct aws_allocator *allocator, void *ctx) { (void)ctx; /* Reset PKCS#11 tokens and load library */ ASSERT_SUCCESS(s_pkcs11_tester_init(allocator)); const char *const label_1 = "label_1"; const char *const label_2 = "label_2"; const char *const so_pin_1 = "ABCD"; const char *const so_pin_2 = "0111"; const char *const user_pin_1 = "ABCD"; const char *const user_pin_2 = "0111"; /* Create 2 new slots. * WARNING: SoftHSM may change ALL the slot_ids whenever a new token is added, and the library is reloaded */ CK_SLOT_ID slot_id = (CK_SLOT_ID)-1; ASSERT_SUCCESS(s_pkcs11_softhsm_create_slot(label_1, so_pin_1, user_pin_1, &slot_id)); ASSERT_SUCCESS(s_pkcs11_softhsm_create_slot(label_2, so_pin_2, user_pin_2, &slot_id)); /* Call aws_pkcs11_lib_find_slot_with_token with 3 tokens, match the label 1 this time */ struct aws_string *match_label_1 = aws_string_new_from_c_str(allocator, label_1); CK_SLOT_ID created_slot_1 = (CK_SLOT_ID)-1; ASSERT_SUCCESS(aws_pkcs11_lib_find_slot_with_token( s_pkcs11_tester.lib, NULL /*match_slot_id*/, match_label_1 /*match_token_label*/, &created_slot_1 /*out*/)); /* Call aws_pkcs11_lib_find_slot_with_token with 3 tokens, match the label 2 this time */ struct aws_string *match_label_2 = aws_string_new_from_c_str(allocator, label_2); CK_SLOT_ID created_slot_2 = (CK_SLOT_ID)-1; ASSERT_SUCCESS(aws_pkcs11_lib_find_slot_with_token( s_pkcs11_tester.lib, NULL /*match_slot_id*/, match_label_2 /*match_token_label*/, &created_slot_2 /*out*/)); ASSERT_TRUE(created_slot_2 != created_slot_1); /* Call aws_pkcs11_lib_find_slot_with_token with 3 tokens on softhsm, but no matching criteria */ slot_id = (CK_SLOT_ID)-1; ASSERT_FAILS(aws_pkcs11_lib_find_slot_with_token( s_pkcs11_tester.lib, NULL /*match_slot_id*/, NULL /*match_token_label*/, &slot_id /*out*/)); ASSERT_INT_EQUALS((CK_SLOT_ID)-1, slot_id); /* Call aws_pkcs11_lib_find_slot_with_token with 3 tokens, but match the slot 1 this time */ const uint64_t match_slot_id_1 = created_slot_1; ASSERT_SUCCESS(aws_pkcs11_lib_find_slot_with_token( s_pkcs11_tester.lib, &match_slot_id_1 /*match_slot_id*/, NULL /*match_token_label*/, &slot_id /*out*/)); ASSERT_INT_EQUALS(created_slot_1, slot_id); /* Call aws_pkcs11_lib_find_slot_with_token with 3 tokens, but match the slot 2 this time */ const uint64_t match_slot_id_2 = created_slot_2; ASSERT_SUCCESS(aws_pkcs11_lib_find_slot_with_token( s_pkcs11_tester.lib, &match_slot_id_2 /*match_slot_id*/, NULL /*match_token_label*/, &slot_id /*out*/)); ASSERT_INT_EQUALS(created_slot_2, slot_id); /* * Call aws_pkcs11_lib_find_slot_with_token with 3 tokens, * but a mismatch for a slot and label should return error * */ slot_id = (CK_SLOT_ID)-1; ASSERT_FAILS(aws_pkcs11_lib_find_slot_with_token( s_pkcs11_tester.lib, &match_slot_id_1 /*match_slot_id*/, match_label_2 /*match_token_label*/, &slot_id /*out*/)); ASSERT_INT_EQUALS((CK_SLOT_ID)-1, slot_id); slot_id = (CK_SLOT_ID)-1; ASSERT_FAILS(aws_pkcs11_lib_find_slot_with_token( s_pkcs11_tester.lib, &match_slot_id_2 /*match_slot_id*/, match_label_1 /*match_token_label*/, &slot_id /*out*/)); ASSERT_INT_EQUALS((CK_SLOT_ID)-1, slot_id); /* * Call aws_pkcs11_lib_find_slot_with_token with 3 tokens, * but match for both, slot and label should return success * */ ASSERT_SUCCESS(aws_pkcs11_lib_find_slot_with_token( s_pkcs11_tester.lib, &match_slot_id_1 /*match_slot_id*/, match_label_1 /*match_token_label*/, &slot_id /*out*/)); ASSERT_INT_EQUALS(created_slot_1, slot_id); ASSERT_SUCCESS(aws_pkcs11_lib_find_slot_with_token( s_pkcs11_tester.lib, &match_slot_id_2 /*match_slot_id*/, match_label_2 /*match_token_label*/, &slot_id /*out*/)); ASSERT_INT_EQUALS(created_slot_2, slot_id); /* clear softhsm and make sure that no tokens match with previous slot/label */ ASSERT_SUCCESS(s_pkcs11_clear_softhsm_and_reload()); /* * Call aws_pkcs11_lib_find_slot_with_token with just the uninitialized token, * and assert that previous label does not match anymore * */ slot_id = (CK_SLOT_ID)-1; ASSERT_FAILS(aws_pkcs11_lib_find_slot_with_token( s_pkcs11_tester.lib, NULL /*match_slot_id*/, match_label_2 /*match_token_label*/, &slot_id /*out*/)); ASSERT_INT_EQUALS((CK_SLOT_ID)-1, slot_id); /* Clean up */ aws_string_destroy(match_label_1); aws_string_destroy(match_label_2); s_pkcs11_tester_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(pkcs11_find_slot_many_tokens, s_test_pkcs11_find_slot_many_tokens) static int s_test_pkcs11_prepare_rsa_2048_sign( CK_SESSION_HANDLE session, CK_OBJECT_HANDLE *pri_key, CK_OBJECT_HANDLE *pub_key) { struct pkcs11_key_creation_params params = { .key_label = DEFAULT_KEY_LABEL, .key_id = DEFAULT_KEY_ID, .key_length = 2048}; return s_pkcs11_create_rsa_key(¶ms, session, pri_key, pub_key); } static int s_test_pkcs11_prepare_ec_256_sign( CK_SESSION_HANDLE session, CK_OBJECT_HANDLE *pri_key, CK_OBJECT_HANDLE *pub_key) { struct pkcs11_key_creation_params params = { .key_label = DEFAULT_KEY_LABEL, .key_id = DEFAULT_KEY_ID, .key_length = 256}; return s_pkcs11_create_ec_key(¶ms, session, pri_key, pub_key); } /* Encryption/Decryption only applies to RSA, not ECC */ static int s_test_pkcs11_rsa_decrypt(struct aws_allocator *allocator, void *ctx) { (void)ctx; /* Reset PKCS#11 tokens, load library */ CK_SLOT_ID created_slot = 0; CK_SESSION_HANDLE session = CK_INVALID_HANDLE; s_pkcs11_tester_init_with_session_login(allocator, TOKEN_LABEL_RSA, &created_slot, &session); CK_OBJECT_HANDLE created_key = CK_INVALID_HANDLE; CK_OBJECT_HANDLE created_pub_key = CK_INVALID_HANDLE; ASSERT_SUCCESS(s_test_pkcs11_prepare_rsa_2048_sign(session, &created_key, &created_pub_key)); struct aws_byte_cursor input_cursor = aws_byte_cursor_from_c_str("ABCDEFGHIJKL"); struct aws_byte_buf output_buf; /* initialized later */ AWS_ZERO_STRUCT(output_buf); /* Encrypt our text */ ASSERT_SUCCESS(s_pkcs11_rsa_encrypt(&input_cursor, &output_buf, session, created_pub_key)); struct aws_byte_cursor cipher_text = aws_byte_cursor_from_buf(&output_buf); struct aws_byte_buf output_decrypted; /* initialized later */ AWS_ZERO_STRUCT(output_decrypted); ASSERT_SUCCESS(aws_pkcs11_lib_decrypt( s_pkcs11_tester.lib, session, created_key, CKK_RSA, cipher_text, allocator, &output_decrypted)); ASSERT_BIN_ARRAYS_EQUALS(output_decrypted.buffer, output_decrypted.len, input_cursor.ptr, input_cursor.len); /* Assert that sign fails for invalid / mismatch key type */ /* TODO: Move ASSERT_FAILS to ASSERT_ERROR */ CK_KEY_TYPE unsupported_key_type = CKK_GENERIC_SECRET; aws_byte_buf_clean_up(&output_decrypted); ASSERT_FAILS(aws_pkcs11_lib_decrypt( s_pkcs11_tester.lib, session, created_key, unsupported_key_type, cipher_text, allocator, &output_decrypted)); /* Invalid session handle should fail */ ASSERT_FAILS(aws_pkcs11_lib_decrypt( s_pkcs11_tester.lib, CK_INVALID_HANDLE, created_key, CKK_RSA, cipher_text, allocator, &output_decrypted)); /* Invalid key handle should fail */ ASSERT_FAILS(aws_pkcs11_lib_decrypt( s_pkcs11_tester.lib, session, CK_INVALID_HANDLE, CKK_RSA, cipher_text, allocator, &output_decrypted)); struct aws_byte_cursor empty_message_to_decrypt = aws_byte_cursor_from_c_str(""); ASSERT_FAILS(aws_pkcs11_lib_decrypt( s_pkcs11_tester.lib, session, created_key, CKK_RSA, empty_message_to_decrypt, allocator, &output_decrypted)); /* Clean up */ aws_byte_buf_clean_up(&output_buf); aws_byte_buf_clean_up(&output_decrypted); s_pkcs11_tester_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(pkcs11_rsa_decrypt, s_test_pkcs11_rsa_decrypt) static int s_test_pkcs11_sign_rsa(struct aws_allocator *allocator, void *ctx, enum aws_tls_hash_algorithm digest_alg) { (void)ctx; /* Reset PKCS#11 tokens, load library */ CK_SLOT_ID created_slot = 0; CK_SESSION_HANDLE session = CK_INVALID_HANDLE; s_pkcs11_tester_init_with_session_login(allocator, TOKEN_LABEL_RSA, &created_slot, &session); CK_OBJECT_HANDLE created_key = CK_INVALID_HANDLE; CK_OBJECT_HANDLE created_pub_key = CK_INVALID_HANDLE; ASSERT_SUCCESS(s_test_pkcs11_prepare_rsa_2048_sign(session, &created_key, &created_pub_key)); struct aws_byte_cursor message_to_sign = aws_byte_cursor_from_c_str("ABCDEFGHIJKL"); struct aws_byte_buf signature; /* initialized later */ AWS_ZERO_STRUCT(signature); /* Sign a message */ ASSERT_SUCCESS(aws_pkcs11_lib_sign( s_pkcs11_tester.lib, session, created_key, CKK_RSA, message_to_sign, allocator, digest_alg, AWS_TLS_SIGNATURE_RSA, &signature)); struct aws_byte_buf prefixed_input; /* There is no good way to validate without this, as we append this prefix internally before signing. */ struct aws_byte_cursor prefix; ASSERT_SUCCESS(aws_get_prefix_to_rsa_sig(digest_alg, &prefix)); aws_byte_buf_init(&prefixed_input, allocator, message_to_sign.len + prefix.len); /* cannot fail */ aws_byte_buf_write(&prefixed_input, prefix.ptr, prefix.len); aws_byte_buf_write_from_whole_cursor(&prefixed_input, message_to_sign); struct aws_byte_cursor input_message_to_verify = aws_byte_cursor_from_buf(&prefixed_input); /* Verify the signature */ ASSERT_SUCCESS( s_pkcs11_verify_signature(&input_message_to_verify, &signature, session, created_pub_key, CKM_RSA_PKCS)); /* Assert that sign fails for invalid key type */ CK_KEY_TYPE unsupported_key_type = CKK_GENERIC_SECRET; aws_byte_buf_clean_up(&signature); ASSERT_FAILS(aws_pkcs11_lib_sign( s_pkcs11_tester.lib, session, created_key, unsupported_key_type, message_to_sign, allocator, digest_alg, AWS_TLS_SIGNATURE_RSA, &signature)); /* Invalid session handle should fail */ ASSERT_FAILS(aws_pkcs11_lib_sign( s_pkcs11_tester.lib, CK_INVALID_HANDLE, created_key, CKK_RSA, message_to_sign, allocator, digest_alg, AWS_TLS_SIGNATURE_RSA, &signature)); /* Invalid key handle should fail */ ASSERT_FAILS(aws_pkcs11_lib_sign( s_pkcs11_tester.lib, session, CK_INVALID_HANDLE, CKK_RSA, message_to_sign, allocator, digest_alg, AWS_TLS_SIGNATURE_RSA, &signature)); /* Clean up */ aws_byte_buf_clean_up(&prefixed_input); aws_pkcs11_lib_close_session(s_pkcs11_tester.lib, session); s_pkcs11_tester_clean_up(); return AWS_OP_SUCCESS; } static int s_test_pkcs11_sign_rsa_sha1(struct aws_allocator *allocator, void *ctx) { return s_test_pkcs11_sign_rsa(allocator, ctx, AWS_TLS_HASH_SHA1); } AWS_TEST_CASE(pkcs11_sign_rsa_sha1, s_test_pkcs11_sign_rsa_sha1) static int s_test_pkcs11_sign_rsa_sha512(struct aws_allocator *allocator, void *ctx) { return s_test_pkcs11_sign_rsa(allocator, ctx, AWS_TLS_HASH_SHA512); } AWS_TEST_CASE(pkcs11_sign_rsa_sha512, s_test_pkcs11_sign_rsa_sha512) static int s_test_pkcs11_sign_rsa_sha384(struct aws_allocator *allocator, void *ctx) { return s_test_pkcs11_sign_rsa(allocator, ctx, AWS_TLS_HASH_SHA384); } AWS_TEST_CASE(pkcs11_sign_rsa_sha384, s_test_pkcs11_sign_rsa_sha384) static int s_test_pkcs11_sign_rsa_sha256(struct aws_allocator *allocator, void *ctx) { return s_test_pkcs11_sign_rsa(allocator, ctx, AWS_TLS_HASH_SHA256); } AWS_TEST_CASE(pkcs11_sign_rsa_sha256, s_test_pkcs11_sign_rsa_sha256) static int s_test_pkcs11_sign_rsa_sha224(struct aws_allocator *allocator, void *ctx) { return s_test_pkcs11_sign_rsa(allocator, ctx, AWS_TLS_HASH_SHA224); } AWS_TEST_CASE(pkcs11_sign_rsa_sha224, s_test_pkcs11_sign_rsa_sha224) static int s_verify_bigint( struct aws_allocator *allocator, uint8_t *ptr, size_t len_in, uint8_t *ptr_out, size_t len_out) { struct aws_byte_buf buffer; struct aws_byte_cursor src_array = aws_byte_cursor_from_array(ptr, len_in); aws_byte_buf_init(&buffer, allocator, len_in + 4); ASSERT_SUCCESS(aws_pkcs11_asn1_enc_ubigint(&buffer, src_array)); ASSERT_INT_EQUALS(len_out, buffer.len); for (size_t i = 0; i < len_out; i++) { ASSERT_HEX_EQUALS(ptr_out[i], buffer.buffer[i], "Mismatch at position %zu", i); } aws_byte_buf_clean_up(&buffer); return AWS_OP_SUCCESS; } static int s_test_pkcs11_asn1_bigint(struct aws_allocator *allocator, void *ctx) { (void)ctx; /* * ECDSA relies on this working correct, so test this first to avoid intermittent failures */ uint8_t pos_int_1_in[4] = {0x12, 0x34, 0x56, 0x78}; uint8_t pos_int_1_out[6] = {0x02, 0x04, 0x12, 0x34, 0x56, 0x78}; ASSERT_SUCCESS( s_verify_bigint(allocator, pos_int_1_in, sizeof(pos_int_1_in), pos_int_1_out, sizeof(pos_int_1_out))); uint8_t pos_int_2_in[4] = {0x00, 0x34, 0x56, 0x78}; uint8_t pos_int_2_out[5] = {0x02, 0x03, 0x34, 0x56, 0x78}; ASSERT_SUCCESS( s_verify_bigint(allocator, pos_int_2_in, sizeof(pos_int_2_in), pos_int_2_out, sizeof(pos_int_2_out))); uint8_t pos_int_3_in[4] = {0x00, 0x00, 0x56, 0x78}; uint8_t pos_int_3_out[4] = {0x02, 0x02, 0x56, 0x78}; ASSERT_SUCCESS( s_verify_bigint(allocator, pos_int_3_in, sizeof(pos_int_3_in), pos_int_3_out, sizeof(pos_int_3_out))); uint8_t pos_int_4_in[4] = {0x00, 0x00, 0x00, 0x78}; uint8_t pos_int_4_out[3] = {0x02, 0x01, 0x78}; ASSERT_SUCCESS( s_verify_bigint(allocator, pos_int_4_in, sizeof(pos_int_4_in), pos_int_4_out, sizeof(pos_int_4_out))); uint8_t pos_int_5_in[4] = {0x00, 0x00, 0x00, 0x00}; uint8_t pos_int_5_out[3] = {0x02, 0x01, 0x00}; ASSERT_SUCCESS( s_verify_bigint(allocator, pos_int_5_in, sizeof(pos_int_5_in), pos_int_5_out, sizeof(pos_int_5_out))); uint8_t pos_int_6_in[1] = {0}; // actually we specify 0-length, but not all compilers support empty array uint8_t pos_int_6_out[3] = {0x02, 0x01, 0x00}; ASSERT_SUCCESS(s_verify_bigint(allocator, pos_int_6_in, 0, pos_int_6_out, sizeof(pos_int_6_out))); uint8_t pos_int_7_in[4] = {0x00, 0x84, 0x56, 0x78}; uint8_t pos_int_7_out[6] = {0x02, 0x04, 0x00, 0x84, 0x56, 0x78}; ASSERT_SUCCESS( s_verify_bigint(allocator, pos_int_7_in, sizeof(pos_int_7_in), pos_int_7_out, sizeof(pos_int_7_out))); uint8_t pos_int_8_in[4] = {0x82, 0x34, 0x56, 0x78}; uint8_t pos_int_8_out[7] = {0x02, 0x05, 0x00, 0x82, 0x34, 0x56, 0x78}; ASSERT_SUCCESS( s_verify_bigint(allocator, pos_int_8_in, sizeof(pos_int_8_in), pos_int_8_out, sizeof(pos_int_8_out))); return AWS_OP_SUCCESS; } AWS_TEST_CASE(pkcs11_asn1_bigint, s_test_pkcs11_asn1_bigint) static int s_decode_asn1(struct aws_byte_cursor *src, uint8_t *identifier, struct aws_byte_cursor *split) { ASSERT_TRUE(src->len >= 2, "ASN1 structure too small for header, length=%u", src->len); *identifier = src->ptr[0]; uint8_t small_len = src->ptr[1]; src->ptr += 2; src->len -= 2; ASSERT_TRUE(small_len < 0x80, "ASN1 multi-byte length specified: %u", small_len); ASSERT_TRUE(small_len <= src->len, "ASN1 length too big: %u > %u", small_len, src->len); *split = aws_byte_cursor_from_array(src->ptr, small_len); src->ptr += small_len; src->len -= small_len; return AWS_OP_SUCCESS; } static int s_write_bigint(struct aws_byte_buf *buf, struct aws_byte_cursor *num, size_t len) { if (num->len > len && num->len > 1 && num->ptr[0] == 0x00 && (num->ptr[1] & 0x80) != 0) { // only scenario we allow length to be bigger num->ptr++; num->len--; } ASSERT_TRUE(num->len <= len, "ASN1 number is too big: %u > %u", num->len, len); if (num->len < len) { uint8_t fill = num->ptr[0] & 0x80 ? 0xff : 0x00; while (len > num->len) { aws_byte_buf_write(buf, &fill, 1); len--; } } aws_byte_buf_write_from_whole_cursor(buf, *num); return AWS_OP_SUCCESS; } static int s_test_pkcs11_sign_ec( struct aws_allocator *allocator, void *ctx, int sig_len, int (*prepare)(CK_SESSION_HANDLE session, CK_OBJECT_HANDLE *pri_key, CK_OBJECT_HANDLE *pub_key)) { (void)ctx; /* Reset PKCS#11 tokens, load library */ CK_SLOT_ID created_slot = 0; CK_SESSION_HANDLE session = CK_INVALID_HANDLE; s_pkcs11_tester_init_with_session_login(allocator, TOKEN_LABEL_EC, &created_slot, &session); CK_OBJECT_HANDLE created_key = CK_INVALID_HANDLE; CK_OBJECT_HANDLE created_pub_key = CK_INVALID_HANDLE; ASSERT_SUCCESS(prepare(session, &created_key, &created_pub_key)); struct aws_byte_cursor message_to_sign = aws_byte_cursor_from_c_str("ABCDEFGHIJKL"); struct aws_byte_buf signature; /* initialized later */ struct aws_byte_buf sig_verify; /* initialized later */ AWS_ZERO_STRUCT(signature); AWS_ZERO_STRUCT(sig_verify); /* Sign a message */ ASSERT_SUCCESS(aws_pkcs11_lib_sign( s_pkcs11_tester.lib, session, created_key, CKK_EC, message_to_sign, allocator, AWS_TLS_HASH_UNKNOWN, // digest handled entirely by S2N AWS_TLS_SIGNATURE_ECDSA, &signature)); /* * Verify we have a structure of 2 ASN1 encoded integers */ uint8_t identifier; struct aws_byte_cursor sig_curs = aws_byte_cursor_from_buf(&signature); struct aws_byte_cursor struct_body; ASSERT_SUCCESS(s_decode_asn1(&sig_curs, &identifier, &struct_body)); ASSERT_HEX_EQUALS(0x30, identifier); // compound structure ASSERT_INT_EQUALS(sig_curs.len, 0); struct aws_byte_cursor r; struct aws_byte_cursor s; ASSERT_SUCCESS(s_decode_asn1(&struct_body, &identifier, &r)); ASSERT_HEX_EQUALS(0x02, identifier); // integer ASSERT_SUCCESS(s_decode_asn1(&struct_body, &identifier, &s)); ASSERT_HEX_EQUALS(0x02, identifier); // integer ASSERT_INT_EQUALS(struct_body.len, 0); // rewrite signature in format PKCS11 expects aws_byte_buf_init(&sig_verify, allocator, sig_len * 2); s_write_bigint(&sig_verify, &r, sig_len); s_write_bigint(&sig_verify, &s, sig_len); struct aws_byte_cursor message_to_verify = aws_byte_cursor_from_c_str("ABCDEFGHIJKL"); /* Verify the signature */ ASSERT_SUCCESS(s_pkcs11_verify_signature(&message_to_verify, &sig_verify, session, created_pub_key, CKM_ECDSA)); aws_byte_buf_clean_up(&signature); aws_byte_buf_clean_up(&sig_verify); /* Assert that sign fails for invalid key type */ CK_KEY_TYPE unsupported_key_type = CKK_GENERIC_SECRET; ASSERT_FAILS(aws_pkcs11_lib_sign( s_pkcs11_tester.lib, session, created_key, unsupported_key_type, message_to_sign, allocator, AWS_TLS_HASH_UNKNOWN, AWS_TLS_SIGNATURE_ECDSA, &signature)); /* Invalid session handle should fail */ ASSERT_FAILS(aws_pkcs11_lib_sign( s_pkcs11_tester.lib, CK_INVALID_HANDLE, created_key, CKK_EC, message_to_sign, allocator, AWS_TLS_HASH_UNKNOWN, AWS_TLS_SIGNATURE_ECDSA, &signature)); /* Invalid key handle should fail */ ASSERT_FAILS(aws_pkcs11_lib_sign( s_pkcs11_tester.lib, session, CK_INVALID_HANDLE, CKK_EC, message_to_sign, allocator, AWS_TLS_HASH_UNKNOWN, AWS_TLS_SIGNATURE_ECDSA, &signature)); /* Clean up */ aws_byte_buf_clean_up(&signature); aws_pkcs11_lib_close_session(s_pkcs11_tester.lib, session); s_pkcs11_tester_clean_up(); return AWS_OP_SUCCESS; } static int s_test_pkcs11_sign_ec_256(struct aws_allocator *allocator, void *ctx) { return s_test_pkcs11_sign_ec(allocator, ctx, 32, s_test_pkcs11_prepare_ec_256_sign); } AWS_TEST_CASE(pkcs11_sign_ec_256, s_test_pkcs11_sign_ec_256) #ifndef BYO_CRYPTO /* * Helper function to interact with softhsm begin */ static int s_run_cmd(const char *fmt, ...) { char cmd[1024]; va_list args; va_start(args, fmt); vsnprintf(cmd, sizeof(cmd), fmt, args); va_end(args); printf("Executing command: %s\n", cmd); struct aws_run_command_options cmd_opts = {.command = cmd}; struct aws_run_command_result cmd_result; ASSERT_SUCCESS(aws_run_command_result_init(s_pkcs11_tester.allocator, &cmd_result)); ASSERT_SUCCESS(aws_run_command(s_pkcs11_tester.allocator, &cmd_opts, &cmd_result)); int ret_code = cmd_result.ret_code; aws_run_command_result_cleanup(&cmd_result); return ret_code; } struct tls_tester { struct { struct aws_mutex mutex; struct aws_condition_variable cvar; bool server_results_ready; int server_error_code; bool client_results_ready; int client_error_code; } synced; }; static struct tls_tester s_tls_tester; static bool s_are_client_results_ready(void *user_data) { (void)user_data; return s_tls_tester.synced.client_results_ready; } static bool s_are_server_results_ready(void *user_data) { (void)user_data; return s_tls_tester.synced.server_results_ready; } /* callback when client TLS connection established (or failed) */ static void s_on_tls_client_channel_setup( struct aws_client_bootstrap *bootstrap, int error_code, struct aws_channel *channel, void *user_data) { (void)bootstrap; (void)user_data; AWS_LOGF_INFO(AWS_LS_IO_PKCS11, "TLS test client setup. error_code=%s", aws_error_name(error_code)); /* if negotiation succeeds: shutdown channel nicely * if negotiation fails: store error code and notify main thread */ if (error_code == 0) { aws_channel_shutdown(channel, 0); } else { aws_mutex_lock(&s_tls_tester.synced.mutex); s_tls_tester.synced.client_error_code = error_code; s_tls_tester.synced.client_results_ready = true; aws_mutex_unlock(&s_tls_tester.synced.mutex); aws_condition_variable_notify_all(&s_tls_tester.synced.cvar); } } /* callback when client TLS connection finishes shutdown (doesn't fire if setup failed) */ static void s_on_tls_client_channel_shutdown( struct aws_client_bootstrap *bootstrap, int error_code, struct aws_channel *channel, void *user_data) { (void)bootstrap; (void)channel; (void)user_data; AWS_LOGF_INFO(AWS_LS_IO_PKCS11, "TLS test client shutdown. error_code=%s", aws_error_name(error_code)); /* store error code and notify main thread */ aws_mutex_lock(&s_tls_tester.synced.mutex); s_tls_tester.synced.client_error_code = error_code; s_tls_tester.synced.client_results_ready = true; aws_mutex_unlock(&s_tls_tester.synced.mutex); aws_condition_variable_notify_all(&s_tls_tester.synced.cvar); } /* callback when server TLS connection established (or failed) */ static void s_on_tls_server_channel_setup( struct aws_server_bootstrap *bootstrap, int error_code, struct aws_channel *channel, void *user_data) { (void)bootstrap; (void)channel; (void)user_data; AWS_LOGF_INFO(AWS_LS_IO_PKCS11, "TLS test server setup. error_code=%s", aws_error_name(error_code)); if (error_code == 0) { /* do nothing, the client will shut down this channel */ return; } else { /* store error code and notify main thread */ aws_mutex_lock(&s_tls_tester.synced.mutex); s_tls_tester.synced.server_error_code = error_code; s_tls_tester.synced.server_results_ready = true; aws_mutex_unlock(&s_tls_tester.synced.mutex); aws_condition_variable_notify_all(&s_tls_tester.synced.cvar); } } /* callback when server TLS connection established (or failed) */ static void s_on_tls_server_channel_shutdown( struct aws_server_bootstrap *bootstrap, int error_code, struct aws_channel *channel, void *user_data) { (void)bootstrap; (void)channel; (void)user_data; AWS_LOGF_INFO(AWS_LS_IO_PKCS11, "TLS test server shutdown. error_code=%s", aws_error_name(error_code)); /* store error code and notify main thread */ aws_mutex_lock(&s_tls_tester.synced.mutex); s_tls_tester.synced.server_error_code = error_code; s_tls_tester.synced.server_results_ready = true; aws_mutex_unlock(&s_tls_tester.synced.mutex); aws_condition_variable_notify_all(&s_tls_tester.synced.cvar); } /* Connect a client client and server, where the client is using PKCS#11 for private key operations */ static int s_test_pkcs11_tls_negotiation_succeeds_common( struct aws_allocator *allocator, const char *token_label, const char *p8key_path, const char *cert_path, const char *pkey_path) { ASSERT_SUCCESS(s_pkcs11_tester_init(allocator)); /* Create token for provided key */ CK_SLOT_ID slot = 0; ASSERT_SUCCESS(s_pkcs11_softhsm_create_slot(token_label, SO_PIN, USER_PIN, &slot)); aws_pkcs11_lib_release(s_pkcs11_tester.lib); s_pkcs11_tester.lib = NULL; /* use softhsm2-util to import key */ ASSERT_SUCCESS(s_run_cmd( "softhsm2-util --import %s --module \"%s\" --slot %lu --label %s --id %s --pin %s", p8key_path, aws_string_c_str(s_pkcs11_tester.shared_lib_path), slot, DEFAULT_KEY_LABEL, DEFAULT_KEY_ID, USER_PIN)); ASSERT_SUCCESS(s_reload_hsm()); /* Set up resources that aren't specific to server or client */ ASSERT_SUCCESS(aws_mutex_init(&s_tls_tester.synced.mutex)); ASSERT_SUCCESS(aws_condition_variable_init(&s_tls_tester.synced.cvar)); struct aws_event_loop_group *event_loop_group = aws_event_loop_group_new_default(allocator, 1, NULL /*shutdown_opts*/); ASSERT_NOT_NULL(event_loop_group); struct aws_host_resolver_default_options resolver_opts = { .el_group = event_loop_group, }; struct aws_host_resolver *host_resolver = aws_host_resolver_new_default(allocator, &resolver_opts); ASSERT_NOT_NULL(host_resolver); /* use randomly named local domain socket */ struct aws_socket_endpoint endpoint = {.address = {0}, .port = 0}; { struct aws_byte_buf addr_buf = aws_byte_buf_from_empty_array(endpoint.address, sizeof(endpoint.address)); ASSERT_TRUE(aws_byte_buf_write_from_whole_cursor(&addr_buf, aws_byte_cursor_from_c_str("testsock-"))); struct aws_uuid addr_uuid; ASSERT_SUCCESS(aws_uuid_init(&addr_uuid)); ASSERT_SUCCESS(aws_uuid_to_str(&addr_uuid, &addr_buf)); ASSERT_TRUE(aws_byte_buf_write_from_whole_cursor(&addr_buf, aws_byte_cursor_from_c_str(".sock"))); } struct aws_socket_options sock_opts = { .type = AWS_SOCKET_STREAM, .domain = AWS_SOCKET_LOCAL, .connect_timeout_ms = TIMEOUT_MILLIS, }; /* Set up a server that does mutual TLS. The server will not use PKCS#11 */ struct aws_tls_ctx_options server_tls_opts; ASSERT_SUCCESS( aws_tls_ctx_options_init_default_server_from_path(&server_tls_opts, allocator, cert_path, pkey_path)); /* trust the client's self-signed certificate */ ASSERT_SUCCESS( aws_tls_ctx_options_override_default_trust_store_from_path(&server_tls_opts, NULL /*ca_path*/, cert_path)); aws_tls_ctx_options_set_verify_peer(&server_tls_opts, true); struct aws_tls_ctx *server_tls_ctx = aws_tls_server_ctx_new(allocator, &server_tls_opts); ASSERT_NOT_NULL(server_tls_ctx); aws_tls_ctx_options_clean_up(&server_tls_opts); struct aws_tls_connection_options server_tls_connection_opts; aws_tls_connection_options_init_from_ctx(&server_tls_connection_opts, server_tls_ctx); struct aws_server_bootstrap *server_bootstrap = aws_server_bootstrap_new(allocator, event_loop_group); ASSERT_NOT_NULL(server_bootstrap); struct aws_server_socket_channel_bootstrap_options server_listener_sock_opts = { .bootstrap = server_bootstrap, .host_name = endpoint.address, .port = endpoint.port, .socket_options = &sock_opts, .tls_options = &server_tls_connection_opts, .incoming_callback = s_on_tls_server_channel_setup, .shutdown_callback = s_on_tls_server_channel_shutdown, }; struct aws_socket *server_listener_sock = aws_server_bootstrap_new_socket_listener(&server_listener_sock_opts); ASSERT_NOT_NULL(server_listener_sock); /* Set up a client that does mutual TLS, using PKCS#11 for private key operations */ struct aws_tls_ctx_options client_tls_opts; # if 1 /* Toggle this to run without actually using PKCS#11. Useful for debugging this test. */ struct aws_tls_ctx_pkcs11_options client_pkcs11_tls_opts = { .pkcs11_lib = s_pkcs11_tester.lib, .token_label = aws_byte_cursor_from_c_str(token_label), .user_pin = aws_byte_cursor_from_c_str(USER_PIN), .private_key_object_label = aws_byte_cursor_from_c_str(DEFAULT_KEY_LABEL), .cert_file_path = aws_byte_cursor_from_c_str(cert_path), }; ASSERT_SUCCESS( aws_tls_ctx_options_init_client_mtls_with_pkcs11(&client_tls_opts, allocator, &client_pkcs11_tls_opts)); # else ASSERT_SUCCESS(aws_tls_ctx_options_init_client_mtls_from_path(&client_tls_opts, allocator, cert_path, pkey_path)); # endif /* trust the server's self-signed certificate */ ASSERT_SUCCESS( aws_tls_ctx_options_override_default_trust_store_from_path(&client_tls_opts, NULL /*ca_path*/, cert_path)); struct aws_tls_ctx *client_tls_ctx = aws_tls_client_ctx_new(allocator, &client_tls_opts); ASSERT_NOT_NULL(client_tls_ctx); aws_tls_ctx_options_clean_up(&client_tls_opts); struct aws_client_bootstrap_options client_bootstrap_opts = { .event_loop_group = event_loop_group, .host_resolver = host_resolver, }; struct aws_client_bootstrap *client_bootstrap = aws_client_bootstrap_new(allocator, &client_bootstrap_opts); ASSERT_NOT_NULL(client_bootstrap); struct aws_byte_cursor server_name = aws_byte_cursor_from_c_str("localhost"); struct aws_tls_connection_options client_tls_connection_opts; aws_tls_connection_options_init_from_ctx(&client_tls_connection_opts, client_tls_ctx); ASSERT_SUCCESS(aws_tls_connection_options_set_server_name(&client_tls_connection_opts, allocator, &server_name)); struct aws_socket_channel_bootstrap_options client_channel_opts = { .bootstrap = client_bootstrap, .host_name = endpoint.address, .port = endpoint.port, .socket_options = &sock_opts, .tls_options = &client_tls_connection_opts, .setup_callback = s_on_tls_client_channel_setup, .shutdown_callback = s_on_tls_client_channel_shutdown, }; /* finally, tell the client to connect */ ASSERT_SUCCESS(aws_client_bootstrap_new_socket_channel(&client_channel_opts)); /* Wait for connection to go through */ /* CRITICAL SECTION */ { ASSERT_SUCCESS(aws_mutex_lock(&s_tls_tester.synced.mutex)); /* wait for client to successfully create connection and tear it down */ ASSERT_SUCCESS(aws_condition_variable_wait_for_pred( &s_tls_tester.synced.cvar, &s_tls_tester.synced.mutex, (int64_t)TIMEOUT_NANOS, s_are_client_results_ready, NULL /*user_data*/)); ASSERT_INT_EQUALS(0, s_tls_tester.synced.client_error_code); /* ensure the server also had a good experience */ ASSERT_SUCCESS(aws_condition_variable_wait_for_pred( &s_tls_tester.synced.cvar, &s_tls_tester.synced.mutex, (int64_t)TIMEOUT_NANOS, s_are_server_results_ready, NULL /*user_data*/)); ASSERT_INT_EQUALS(0, s_tls_tester.synced.server_error_code); ASSERT_SUCCESS(aws_mutex_unlock(&s_tls_tester.synced.mutex)); } /* CRITICAL SECTION */ /* clean up */ aws_tls_ctx_release(client_tls_ctx); aws_client_bootstrap_release(client_bootstrap); aws_tls_connection_options_clean_up(&client_tls_connection_opts); aws_server_bootstrap_destroy_socket_listener(server_bootstrap, server_listener_sock); aws_tls_connection_options_clean_up(&server_tls_connection_opts); aws_server_bootstrap_release(server_bootstrap); aws_tls_ctx_release(server_tls_ctx); aws_host_resolver_release(host_resolver); aws_event_loop_group_release(event_loop_group); /* wait for event-loop threads to wrap up */ aws_thread_set_managed_join_timeout_ns(TIMEOUT_NANOS * 10); ASSERT_SUCCESS(aws_thread_join_all_managed()); aws_condition_variable_clean_up(&s_tls_tester.synced.cvar); aws_mutex_clean_up(&s_tls_tester.synced.mutex); s_pkcs11_tester_clean_up(); return AWS_OP_SUCCESS; } /* Connect a client and server, where the client is using PKCS#11 RSA certificate for private key operations */ static int s_test_pkcs11_tls_rsa_negotiation_succeeds(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_test_pkcs11_tls_negotiation_succeeds_common( allocator, TOKEN_LABEL_RSA, "unittests.p8", "unittests.crt", "unittests.key"); } AWS_TEST_CASE(pkcs11_tls_rsa_negotiation_succeeds, s_test_pkcs11_tls_rsa_negotiation_succeeds) /* Connect a client and server, where the client is using PKCS#11 EC certificate for private key operations */ static int s_test_pkcs11_tls_ec_negotiation_succeeds(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_test_pkcs11_tls_negotiation_succeeds_common( allocator, TOKEN_LABEL_EC, "ec_unittests.p8", "ec_unittests.crt", "ec_unittests.key"); } AWS_TEST_CASE(pkcs11_tls_ec_negotiation_succeeds, s_test_pkcs11_tls_ec_negotiation_succeeds) #endif /* !BYO_CRYPTO */ aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/read_write_test_handler.c000066400000000000000000000260621456575232400260340ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "read_write_test_handler.h" #include #include #include #include #include #ifdef _MSC_VER # pragma warning(disable : 4204) /* non-constant aggregate initializer */ # pragma warning(disable : 4267) /* size_t to int conversion */ #endif struct rw_test_handler_impl { struct aws_atomic_var shutdown_called; bool increment_read_window_called; struct aws_atomic_var *destroy_called; struct aws_condition_variable *destroy_condition_variable; rw_handler_driver_fn *on_read; rw_handler_driver_fn *on_write; bool event_loop_driven; size_t window; struct aws_condition_variable condition_variable; struct aws_mutex mutex; struct aws_atomic_var shutdown_error; void *ctx; }; static int s_rw_handler_process_read( struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_io_message *message) { struct rw_test_handler_impl *handler_impl = handler->impl; struct aws_byte_buf next_data = handler_impl->on_read(handler, slot, &message->message_data, handler_impl->ctx); aws_mem_release(message->allocator, message); if (slot->adj_right) { struct aws_io_message *msg = aws_channel_acquire_message_from_pool(slot->channel, AWS_IO_MESSAGE_APPLICATION_DATA, next_data.len); struct aws_byte_cursor next_data_cursor = aws_byte_cursor_from_buf(&next_data); aws_byte_buf_append(&msg->message_data, &next_data_cursor); return aws_channel_slot_send_message(slot, msg, AWS_CHANNEL_DIR_READ); } return AWS_OP_SUCCESS; } static int s_rw_handler_process_write_message( struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_io_message *message) { struct rw_test_handler_impl *handler_impl = handler->impl; struct aws_byte_buf next_data = handler_impl->on_write(handler, slot, &message->message_data, handler_impl->ctx); aws_mem_release(message->allocator, message); if (slot->adj_left) { struct aws_io_message *msg = aws_channel_acquire_message_from_pool(slot->channel, AWS_IO_MESSAGE_APPLICATION_DATA, next_data.len); struct aws_byte_cursor next_data_cursor = aws_byte_cursor_from_buf(&next_data); aws_byte_buf_append(&msg->message_data, &next_data_cursor); return aws_channel_slot_send_message(slot, msg, AWS_CHANNEL_DIR_WRITE); } return AWS_OP_SUCCESS; } static int s_rw_handler_increment_read_window( struct aws_channel_handler *handler, struct aws_channel_slot *slot, size_t size) { struct rw_test_handler_impl *handler_impl = handler->impl; handler_impl->increment_read_window_called = true; aws_channel_slot_increment_read_window(slot, size); return AWS_OP_SUCCESS; } static int s_rw_handler_shutdown( struct aws_channel_handler *handler, struct aws_channel_slot *slot, enum aws_channel_direction dir, int error_code, bool abort_immediately) { struct rw_test_handler_impl *handler_impl = handler->impl; aws_atomic_store_int(&handler_impl->shutdown_called, true); aws_atomic_store_int(&handler_impl->shutdown_error, error_code); aws_condition_variable_notify_one(&handler_impl->condition_variable); return aws_channel_slot_on_handler_shutdown_complete(slot, dir, error_code, abort_immediately); } static size_t s_rw_handler_message_overhead(struct aws_channel_handler *handler) { (void)handler; return 0; } static size_t s_rw_handler_get_current_window_size(struct aws_channel_handler *handler) { struct rw_test_handler_impl *handler_impl = handler->impl; return handler_impl->window; } static void s_rw_handler_destroy(struct aws_channel_handler *handler) { struct rw_test_handler_impl *handler_impl = handler->impl; if (handler_impl->destroy_called) { aws_atomic_store_int(handler_impl->destroy_called, 1); aws_condition_variable_notify_one(handler_impl->destroy_condition_variable); } aws_mem_release(handler->alloc, handler_impl); aws_mem_release(handler->alloc, handler); } struct aws_channel_handler_vtable s_rw_test_vtable = { .shutdown = s_rw_handler_shutdown, .increment_read_window = s_rw_handler_increment_read_window, .initial_window_size = s_rw_handler_get_current_window_size, .process_read_message = s_rw_handler_process_read, .process_write_message = s_rw_handler_process_write_message, .destroy = s_rw_handler_destroy, .message_overhead = s_rw_handler_message_overhead, }; struct aws_channel_handler *rw_handler_new( struct aws_allocator *allocator, rw_handler_driver_fn *on_read, rw_handler_driver_fn *on_write, bool event_loop_driven, size_t window, void *ctx) { struct aws_channel_handler *handler = aws_mem_acquire(allocator, sizeof(struct aws_channel_handler)); handler->alloc = allocator; handler->vtable = &s_rw_test_vtable; struct rw_test_handler_impl *handler_impl = aws_mem_acquire(allocator, sizeof(struct rw_test_handler_impl)); AWS_ZERO_STRUCT(*handler_impl); handler_impl->on_read = on_read; handler_impl->on_write = on_write; handler_impl->ctx = ctx; handler_impl->event_loop_driven = event_loop_driven; handler_impl->window = window; handler_impl->condition_variable = (struct aws_condition_variable)AWS_CONDITION_VARIABLE_INIT; handler_impl->mutex = (struct aws_mutex)AWS_MUTEX_INIT; handler->impl = handler_impl; return handler; } void rw_handler_enable_wait_on_destroy( struct aws_channel_handler *handler, struct aws_atomic_var *destroy_called, struct aws_condition_variable *condition_variable) { struct rw_test_handler_impl *handler_impl = handler->impl; handler_impl->destroy_called = destroy_called; handler_impl->destroy_condition_variable = condition_variable; } void rw_handler_trigger_read(struct aws_channel_handler *handler, struct aws_channel_slot *slot) { struct rw_test_handler_impl *handler_impl = handler->impl; struct aws_byte_buf next_data = handler_impl->on_read(handler, slot, NULL, handler_impl->ctx); struct aws_io_message *msg = aws_channel_acquire_message_from_pool(slot->channel, AWS_IO_MESSAGE_APPLICATION_DATA, next_data.len); struct aws_byte_cursor next_data_cursor = aws_byte_cursor_from_buf(&next_data); aws_byte_buf_append(&msg->message_data, &next_data_cursor); aws_channel_slot_send_message(slot, msg, AWS_CHANNEL_DIR_READ); } struct rw_handler_write_task_args { struct aws_channel_handler *handler; struct aws_channel_slot *slot; struct aws_byte_buf *buffer; struct aws_channel_task task; }; static void s_rw_handler_write_task(struct aws_channel_task *task, void *arg, enum aws_task_status task_status) { (void)task; (void)task_status; struct rw_handler_write_task_args *write_task_args = arg; struct aws_io_message *msg = aws_channel_acquire_message_from_pool( write_task_args->slot->channel, AWS_IO_MESSAGE_APPLICATION_DATA, write_task_args->buffer->len); struct aws_byte_cursor write_buffer = aws_byte_cursor_from_buf(write_task_args->buffer); aws_byte_buf_append(&msg->message_data, &write_buffer); aws_channel_slot_send_message(write_task_args->slot, msg, AWS_CHANNEL_DIR_WRITE); aws_mem_release(write_task_args->handler->alloc, write_task_args); } void rw_handler_write(struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_byte_buf *buffer) { struct rw_test_handler_impl *handler_impl = handler->impl; if (!handler_impl->event_loop_driven || aws_channel_thread_is_callers_thread(slot->channel)) { struct aws_io_message *msg = aws_channel_acquire_message_from_pool(slot->channel, AWS_IO_MESSAGE_APPLICATION_DATA, buffer->len); struct aws_byte_cursor write_buffer = aws_byte_cursor_from_buf(buffer); aws_byte_buf_append(&msg->message_data, &write_buffer); aws_channel_slot_send_message(slot, msg, AWS_CHANNEL_DIR_WRITE); } else { struct rw_handler_write_task_args *write_task_args = aws_mem_acquire(handler->alloc, sizeof(struct rw_handler_write_task_args)); write_task_args->handler = handler; write_task_args->buffer = buffer; write_task_args->slot = slot; aws_channel_task_init(&write_task_args->task, s_rw_handler_write_task, write_task_args, "rw_handler_write"); aws_channel_schedule_task_now(slot->channel, &write_task_args->task); } } struct increment_read_window_task_args { size_t window_update; struct aws_channel_handler *handler; struct aws_channel_slot *slot; struct aws_channel_task task; }; static void s_increment_read_window_task(struct aws_channel_task *task, void *arg, enum aws_task_status task_status) { (void)task; (void)task_status; struct increment_read_window_task_args *increment_read_window_task_args = arg; struct rw_test_handler_impl *handler_impl = increment_read_window_task_args->handler->impl; handler_impl->window += increment_read_window_task_args->window_update; aws_channel_slot_increment_read_window( increment_read_window_task_args->slot, increment_read_window_task_args->window_update); aws_mem_release(increment_read_window_task_args->handler->alloc, increment_read_window_task_args); } void rw_handler_trigger_increment_read_window( struct aws_channel_handler *handler, struct aws_channel_slot *slot, size_t window_update) { struct rw_test_handler_impl *handler_impl = handler->impl; if (!handler_impl->event_loop_driven || aws_channel_thread_is_callers_thread(slot->channel)) { handler_impl->window += window_update; aws_channel_slot_increment_read_window(slot, window_update); } else { struct increment_read_window_task_args *increment_read_window_task_args = aws_mem_acquire(handler->alloc, sizeof(struct increment_read_window_task_args)); increment_read_window_task_args->handler = handler; increment_read_window_task_args->window_update = window_update; increment_read_window_task_args->slot = slot; aws_channel_task_init( &increment_read_window_task_args->task, s_increment_read_window_task, increment_read_window_task_args, "increment_read_window_task"); aws_channel_schedule_task_now(slot->channel, &increment_read_window_task_args->task); } } bool rw_handler_shutdown_called(struct aws_channel_handler *handler) { struct rw_test_handler_impl *handler_impl = handler->impl; return aws_atomic_load_int(&handler_impl->shutdown_called); } bool rw_handler_increment_read_window_called(struct aws_channel_handler *handler) { struct rw_test_handler_impl *handler_impl = handler->impl; return handler_impl->increment_read_window_called; } int rw_handler_last_error_code(struct aws_channel_handler *handler) { struct rw_test_handler_impl *handler_impl = handler->impl; return aws_atomic_load_int(&handler_impl->shutdown_error); } aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/read_write_test_handler.h000066400000000000000000000032151456575232400260340ustar00rootroot00000000000000#ifndef AWS_READ_WRITE_TEST_HANDLER #define AWS_READ_WRITE_TEST_HANDLER /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include struct aws_atomic_var; struct aws_byte_buf; struct aws_channel_handler; struct aws_channel_slot; struct aws_condition_variable; typedef struct aws_byte_buf(rw_handler_driver_fn)( struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_byte_buf *data_read, void *ctx); struct aws_channel_handler *rw_handler_new( struct aws_allocator *allocator, rw_handler_driver_fn *on_read, rw_handler_driver_fn *on_write, bool event_loop_driven, size_t window, void *ctx); void rw_handler_enable_wait_on_destroy( struct aws_channel_handler *handler, struct aws_atomic_var *destroy_called, struct aws_condition_variable *condition_variable); void rw_handler_write(struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_byte_buf *buffer); void rw_handler_trigger_read(struct aws_channel_handler *handler, struct aws_channel_slot *slot); bool rw_handler_shutdown_called(struct aws_channel_handler *handler); bool rw_handler_increment_read_window_called(struct aws_channel_handler *handler); void rw_handler_trigger_increment_read_window( struct aws_channel_handler *handler, struct aws_channel_slot *slot, size_t window_update); void increment_read_window_task(void *arg, enum aws_task_status task_status); int rw_handler_last_error_code(struct aws_channel_handler *handler); #endif /* AWS_READ_WRITE_TEST_HANDLER */ aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/resources/000077500000000000000000000000001456575232400230135ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/resources/DigiCertGlobalRootCA.crt.pem000066400000000000000000000024721456575232400301750ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIDrzCCApegAwIBAgIQCDvgVpBCRrGhdWrJWZHHSjANBgkqhkiG9w0BAQUFADBh MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBD QTAeFw0wNjExMTAwMDAwMDBaFw0zMTExMTAwMDAwMDBaMGExCzAJBgNVBAYTAlVT MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IENBMIIBIjANBgkqhkiG 9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4jvhEXLeqKTTo1eqUKKPC3eQyaKl7hLOllsB CSDMAZOnTjC3U/dDxGkAV53ijSLdhwZAAIEJzs4bg7/fzTtxRuLWZscFs3YnFo97 nh6Vfe63SKMI2tavegw5BmV/Sl0fvBf4q77uKNd0f3p4mVmFaG5cIzJLv07A6Fpt 43C/dxC//AH2hdmoRBBYMql1GNXRor5H4idq9Joz+EkIYIvUX7Q6hL+hqkpMfT7P T19sdl6gSzeRntwi5m3OFBqOasv+zbMUZBfHWymeMr/y7vrTC0LUq7dBMtoM1O/4 gdW7jVg/tRvoSSiicNoxBN33shbyTApOB6jtSj1etX+jkMOvJwIDAQABo2MwYTAO BgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUA95QNVbR TLtm8KPiGxvDl7I90VUwHwYDVR0jBBgwFoAUA95QNVbRTLtm8KPiGxvDl7I90VUw DQYJKoZIhvcNAQEFBQADggEBAMucN6pIExIK+t1EnE9SsPTfrgT1eXkIoyQY/Esr hMAtudXH/vTBH1jLuG2cenTnmCmrEbXjcKChzUyImZOMkXDiqw8cvpOp/2PV5Adg 06O/nVsJ8dWO41P0jmP6P6fbtGbfYmbW0W5BjfIttep3Sp+dWOIrWcBAI+0tKIJF PnlUkiaY4IBIqDfv8NZ5YBberOgOzW6sRBc4L0na4UU+Krk2U886UAb3LujEV0ls YSEY1QSteDwsOoBrp+uvFRTp2InBuThs4pFsiv9kuXclVzDAGySj4dzp30d8tbQk CAUw7C29C79Fv1C5qfPrmAESrciIxpg0X40KPMbp1ZWVbd4= -----END CERTIFICATE----- aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/resources/ca_root.cnf000066400000000000000000000101121456575232400251240ustar00rootroot00000000000000# OpenSSL root CA configuration file. # Copy to `/root/ca/openssl.cnf`. [ ca ] # `man ca` default_ca = CA_default [ CA_default ] # Directory and file locations. dir = ./ certs = $dir/certs crl_dir = $dir/crl new_certs_dir = $dir/ database = $dir/index.txt serial = $dir/serial RANDFILE = $dir/private/.rand # The root key and root certificate. private_key = $dir/ca_root.key certificate = $dir/ca_root.crt # For certificate revocation lists. crlnumber = $dir/crlnumber crl = $dir/crl/ca.crl.pem crl_extensions = crl_ext default_crl_days = 824 # SHA-1 is deprecated, so use SHA-2 instead. default_md = sha256 name_opt = ca_default cert_opt = ca_default default_days = 824 preserve = no policy = policy_strict [ policy_strict ] # The root CA should only sign intermediate certificates that match. # See the POLICY FORMAT section of `man ca`. countryName = match stateOrProvinceName = match organizationName = match organizationalUnitName = optional commonName = supplied emailAddress = optional [ policy_loose ] # Allow the intermediate CA to sign a more diverse range of certificates. # See the POLICY FORMAT section of the `ca` man page. countryName = optional stateOrProvinceName = optional localityName = optional organizationName = optional organizationalUnitName = optional commonName = supplied emailAddress = optional [ req ] # Options for the `req` tool (`man req`). default_bits = 2048 distinguished_name = req_distinguished_name string_mask = utf8only # SHA-1 is deprecated, so use SHA-2 instead. default_md = sha256 # Extension to add when the -x509 option is used. x509_extensions = v3_ca [ req_distinguished_name ] # See . countryName = Country Name (2 letter code) stateOrProvinceName = State or Province Name localityName = Locality Name 0.organizationName = Organization Name organizationalUnitName = Organizational Unit Name commonName = Common Name emailAddress = Email Address # Optionally, specify some defaults. countryName_default = GB stateOrProvinceName_default = England localityName_default = 0.organizationName_default = Alice Ltd organizationalUnitName_default = emailAddress_default = [ v3_ca ] # Extensions for a typical CA (`man x509v3_config`). subjectKeyIdentifier = hash authorityKeyIdentifier = keyid:always,issuer basicConstraints = critical, CA:true keyUsage = critical, digitalSignature, cRLSign, keyCertSign [ v3_intermediate_ca ] # Extensions for a typical intermediate CA (`man x509v3_config`). subjectKeyIdentifier = hash authorityKeyIdentifier = keyid:always,issuer basicConstraints = critical, CA:true, pathlen:1 keyUsage = critical, digitalSignature, cRLSign, keyCertSign [ usr_cert ] # Extensions for client certificates (`man x509v3_config`). basicConstraints = CA:FALSE nsCertType = client, email nsComment = "OpenSSL Generated Client Certificate" subjectKeyIdentifier = hash authorityKeyIdentifier = keyid,issuer keyUsage = critical, nonRepudiation, digitalSignature, keyEncipherment extendedKeyUsage = clientAuth, emailProtection [ server_cert ] # Extensions for server certificates (`man x509v3_config`). basicConstraints = CA:FALSE nsCertType = server nsComment = "OpenSSL Generated Server Certificate" subjectKeyIdentifier = hash authorityKeyIdentifier = keyid,issuer:always keyUsage = critical, digitalSignature, keyEncipherment extendedKeyUsage = serverAuth [ crl_ext ] # Extension for CRLs (`man x509v3_config`). authorityKeyIdentifier=keyid:always [ ocsp ] # Extension for OCSP signing certificates (`man ocsp`). basicConstraints = CA:FALSE subjectKeyIdentifier = hash authorityKeyIdentifier = keyid,issuer keyUsage = critical, digitalSignature extendedKeyUsage = critical, OCSPSigning aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/resources/ca_root.crt000066400000000000000000000027101456575232400251530ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIEGDCCAwCgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBnDELMAkGA1UEBhMCVVMx EzARBgNVBAgMCldhc2hpbmd0b24xEDAOBgNVBAcMB1NlYXR0bGUxDzANBgNVBAoM BkFtYXpvbjENMAsGA1UECwwEU0RLczEUMBIGA1UEAwwLbG9jYWxob3N0Q0ExMDAu BgkqhkiG9w0BCQEWIWF3cy1zZGstY29tbW9uLXJ1bnRpbWVAYW1hem9uLmNvbTAe Fw0yMjAxMzAyMTM4MTVaFw0yNDA1MDMyMTM4MTVaMIGcMQswCQYDVQQGEwJVUzET MBEGA1UECAwKV2FzaGluZ3RvbjEQMA4GA1UEBwwHU2VhdHRsZTEPMA0GA1UECgwG QW1hem9uMQ0wCwYDVQQLDARTREtzMRQwEgYDVQQDDAtsb2NhbGhvc3RDQTEwMC4G CSqGSIb3DQEJARYhYXdzLXNkay1jb21tb24tcnVudGltZUBhbWF6b24uY29tMIIB IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAwvqrCMmDpJCYFGYVHJiBKHUA TxebF3AVVuvQZCNFCaAbEksLuhHoqbcf7BMlbtmrOa3YuefnJ52EsvJHB/AAG3si 3Svg7IBSpqyZRGBmDsEcAeNElvWXQNxSzaJWmqHFeX6TC6fj3zsHKH1gWEIM33TT 257tU6B+JpCPU3fcfo6XCApk1W+bVOsdISzmkpRD7tSsV03tokAFgpNTqmW6TgEE B/c8RtCK0PGjEitv3Dq5suWCgvgIqLmg1QQf3V0lE29243SVBWjjPI3dhr7gkPgi 3+jc/VeIKAoWqEs+4nfDCkWBG37GPSbBGHy28+S9+XTEWT19T4ZE88thNM3biQID AQABo2MwYTAdBgNVHQ4EFgQUi6R/kkcXdBIKVyqxYKrzunmcXPkwHwYDVR0jBBgw FoAUi6R/kkcXdBIKVyqxYKrzunmcXPkwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8B Af8EBAMCAYYwDQYJKoZIhvcNAQELBQADggEBAGjp4FDCXALzP6H8yn1QE9mAwt0z HrO0DP5C1w0e/ncpJmDyoeG9i5ETImGrY9+xJ7aDREWcDboez6X3Cej+aA36P06M BoxoEMrWheq1hDgzSePHbmtMfYKmTAReKvvsB5a02CxzXGBJWcKV4qPQg7GShNIi 7DIwzxbcg2J2s0r8HBg3/V1DEOPFFFBbYTFfGoqEQ2Lr/6+qQsZJVKiqnn8zN6yb N4HCdNLvqdLcxNNN2S81p3JrTIsd3xOM/OMozaBjOG6IS51nugAOCZD1H8qS2rCG 2FhL6vYCCOqkmZmTerUaPM5DSO0GK9vfV24vl03zc+1/d0U6WWi/VuOOUKQ= -----END CERTIFICATE----- aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/resources/ec_unittests.crt000066400000000000000000000015631456575232400262430ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIICYDCCAgagAwIBAgIJAMhHikin3AYVMAoGCCqGSM49BAMCMIGaMQswCQYDVQQG EwJVUzETMBEGA1UECAwKV2FzaGluZ3RvbjEQMA4GA1UEBwwHU2VhdHRsZTEPMA0G A1UECgwGQW1hem9uMQ0wCwYDVQQLDARTREtzMRIwEAYDVQQDDAlsb2NhbGhvc3Qx MDAuBgkqhkiG9w0BCQEWIWF3cy1zZGstY29tbW9uLXJ1bnRpbWVAYW1hem9uLmNv bTAeFw0yMjAyMDgxODA4NTJaFw0yNDA1MTIxODA4NTJaMIGaMQswCQYDVQQGEwJV UzETMBEGA1UECAwKV2FzaGluZ3RvbjEQMA4GA1UEBwwHU2VhdHRsZTEPMA0GA1UE CgwGQW1hem9uMQ0wCwYDVQQLDARTREtzMRIwEAYDVQQDDAlsb2NhbGhvc3QxMDAu BgkqhkiG9w0BCQEWIWF3cy1zZGstY29tbW9uLXJ1bnRpbWVAYW1hem9uLmNvbTBZ MBMGByqGSM49AgEGCCqGSM49AwEHA0IABCOVOKo+/0+07kUwDp6BQ+6vqYcQhkgL Lj1hnKyRgsK9Na62vyb48RR62eGVau8x/u4hRNUzcQFQZT+k3YGQdLijMzAxMBMG A1UdJQQMMAoGCCsGAQUFBwMBMBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAAATAK BggqhkjOPQQDAgNIADBFAiBV/D57WihCxkW6RllW8va1TAkDFoCFQZ0ZBIFuD2OA /AIhANSK917av7vJszNr63czldt33YSNjMikb2fhiZkzqROj -----END CERTIFICATE----- aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/resources/ec_unittests.key000066400000000000000000000003431456575232400262360ustar00rootroot00000000000000-----BEGIN EC PRIVATE KEY----- MHcCAQEEILPwDoMGFMzvhek7rPPqeL0Ns2cuNAJ9w+am7/L8BDxqoAoGCCqGSM49 AwEHoUQDQgAEI5U4qj7/T7TuRTAOnoFD7q+phxCGSAsuPWGcrJGCwr01rra/Jvjx FHrZ4ZVq7zH+7iFE1TNxAVBlP6TdgZB0uA== -----END EC PRIVATE KEY----- aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/resources/ec_unittests.p12000066400000000000000000000021461456575232400260530ustar00rootroot000000000000000b0( *H 00 *H 00 *H 0 *H  0? v#7|[~le_Rb}}SG? m??(*^bes`_4t{9}KZ8rƘ^Q[$5^9jNuto,Di? hٹszQs4z",ПW*hd9@4m V90j(l.6|jq.G%\@ѹb`0bEO gA} serial # Generating a key for the new ca_root openssl genrsa -out ca_root.key 2048 # Generate the ca_root certificate openssl req -config ca_root.cnf \ -key ca_root.key \ -new -x509 -days 824 -sha256 -extensions v3_ca \ -out ca_root.crt \ -set_serial 00 \ -subj '/C=US/ST=Washington/L=Seattle/O=Amazon/OU=SDKs/CN=localhostCA/emailAddress=aws-sdk-common-runtime@amazon.com' # Generate a private key for the server openssl genrsa -out server.key 2048 # Generate a certificate signing request for the server openssl req -new -sha256 \ -key server.key \ -out server.csr \ -set_serial 02 \ -subj '/C=US/ST=Washington/L=Seattle/O=Amazon/OU=SDKs/CN=localhost/emailAddress=aws-sdk-common-runtime@amazon.com' # Sign the server signing request with ca_root yes | openssl ca -config ca_root.cnf \ -extensions server_cert \ -days 824 -notext -md sha256 \ -in server.csr \ -out server.crt # Generate a certificate chain containing the ca_root and server certificates cat server.crt ca_root.crt > server_chain.crt # Generate other unittest certificate variations for base in unittests ec_unittests; do openssl req -x509 -new \ -key $base.key \ -config unittests.conf \ -out $base.crt \ -days 824 openssl pkcs8 -topk8 \ -out $base.p8 \ -in $base.key \ -nocrypt openssl pkcs12 -export \ -out $base.p12 \ -inkey $base.key \ -in $base.crt \ -password pass:1234 done # Copy the generated certificates and keys to the resources folder cd .. cp certGeneration/ca_root.crt ./ca_root.crt cp certGeneration/server.crt ./server.crt cp certGeneration/server.key ./server.key cp certGeneration/server_chain.crt ./server_chain.crt cp certGeneration/server.crt ./server.crt for base in unittests ec_unittests; do cp certGeneration/$base.crt ./$base.crt cp certGeneration/$base.p8 ./$base.p8 cp certGeneration/$base.p12 ./$base.p12 done # Clean up the certGeneration folder rm -r certGeneration aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/resources/server.crt000066400000000000000000000034211456575232400250330ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIFCzCCA/OgAwIBAgICEAAwDQYJKoZIhvcNAQELBQAwgZwxCzAJBgNVBAYTAlVT MRMwEQYDVQQIDApXYXNoaW5ndG9uMRAwDgYDVQQHDAdTZWF0dGxlMQ8wDQYDVQQK DAZBbWF6b24xDTALBgNVBAsMBFNES3MxFDASBgNVBAMMC2xvY2FsaG9zdENBMTAw LgYJKoZIhvcNAQkBFiFhd3Mtc2RrLWNvbW1vbi1ydW50aW1lQGFtYXpvbi5jb20w HhcNMjIwMTMwMjEzODE1WhcNMjQwNTAzMjEzODE1WjCBiDELMAkGA1UEBhMCVVMx EzARBgNVBAgMCldhc2hpbmd0b24xDzANBgNVBAoMBkFtYXpvbjENMAsGA1UECwwE U0RLczESMBAGA1UEAwwJbG9jYWxob3N0MTAwLgYJKoZIhvcNAQkBFiFhd3Mtc2Rr LWNvbW1vbi1ydW50aW1lQGFtYXpvbi5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IB DwAwggEKAoIBAQDUKerUt2d0v3wYfFzS+JCoU+etqaK0lrBClonLS7RmaYGDqnBw 5pDpRfmBHw5D2kdc/4vHU/8wQtQGk4TsCjj3VyIqfd1xwO319/O9tepDqgawBj0n yFMdMjTZu1eS6rttCT1p6MFzXevwppS70ARPpFkSpvQhLYWNMeJjgAuDkH1Fvv96 2a3/v2m/+yu40Rszhn8tSRHOm8HeKlXVLlzauKIiXHzpcmQCti9hn6DQkJHemhnn hCubouk0YtR9qczxeOw65dWeY4q5gBD6wrmIxYoy/Ms/0O8EjUfnThAhCYbq0twi j39gIUme/ZMhzoOSFZA1Mdt+yYj6U3CNjqcLAgMBAAGjggFnMIIBYzAJBgNVHRME AjAAMBEGCWCGSAGG+EIBAQQEAwIGQDAzBglghkgBhvhCAQ0EJhYkT3BlblNTTCBH ZW5lcmF0ZWQgU2VydmVyIENlcnRpZmljYXRlMB0GA1UdDgQWBBQjhxwK68xFkbww GhKlGBpi6UwHYTCByQYDVR0jBIHBMIG+gBSLpH+SRxd0EgpXKrFgqvO6eZxc+aGB oqSBnzCBnDELMAkGA1UEBhMCVVMxEzARBgNVBAgMCldhc2hpbmd0b24xEDAOBgNV BAcMB1NlYXR0bGUxDzANBgNVBAoMBkFtYXpvbjENMAsGA1UECwwEU0RLczEUMBIG A1UEAwwLbG9jYWxob3N0Q0ExMDAuBgkqhkiG9w0BCQEWIWF3cy1zZGstY29tbW9u LXJ1bnRpbWVAYW1hem9uLmNvbYIBADAOBgNVHQ8BAf8EBAMCBaAwEwYDVR0lBAww CgYIKwYBBQUHAwEwDQYJKoZIhvcNAQELBQADggEBAE9GjLnlVc5rmcbQZZI8BgZA UfiOSR60Bg2LiIH7xa2InqNmIg7yT+2n8Pd61PXIjCJZqZJHWcRG0vMzoQvmMpeR VCTZdobHev79+A6YRAjm5aIqT6xQap8FPQzdZ1WogenkmeRj/GkwCXhbFmad/oTf K3CdCve3hQzJR5k1hBI2nmF7oWSKFAFDJktVe3w9v/2G1UJ4N02dzOJU8E5Tcx15 bNHP58KXJBK/er5Qs6xZKvG+GIfLX/IuaZm9FYP9WlOsObBlOZEn9dUYPpQwZn7k JIv7OBkYs8pj9Sx/EVW4aG0xn61HSevN/x0GSriRlyOP8WnUXXG1/X5vFoC81+Y= -----END CERTIFICATE----- aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/resources/server.key000066400000000000000000000032131456575232400250320ustar00rootroot00000000000000-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEA1Cnq1LdndL98GHxc0viQqFPnramitJawQpaJy0u0ZmmBg6pw cOaQ6UX5gR8OQ9pHXP+Lx1P/MELUBpOE7Ao491ciKn3dccDt9ffzvbXqQ6oGsAY9 J8hTHTI02btXkuq7bQk9aejBc13r8KaUu9AET6RZEqb0IS2FjTHiY4ALg5B9Rb7/ etmt/79pv/sruNEbM4Z/LUkRzpvB3ipV1S5c2riiIlx86XJkArYvYZ+g0JCR3poZ 54Qrm6LpNGLUfanM8XjsOuXVnmOKuYAQ+sK5iMWKMvzLP9DvBI1H504QIQmG6tLc Io9/YCFJnv2TIc6DkhWQNTHbfsmI+lNwjY6nCwIDAQABAoIBABsD72QAqorCKzIO +97dScY1UOXbGN235l/EPW2GiUUVICm81S57HW6mu1uGsdFQBRBBdfH+rxrdF5Ry ylUoBBLypGYWutDWSTatyPqaJGdTSiC12qmrJ3IrR3GGGmOZwh3jNE+9FIz1I6rz +zJjDXl6quDmI2XqHvxYosQatNyemO8yWI/rDmDRT/5dCu15aA//igmcw4/2aqS4 A7PHMLsZdLP7InLmUi+1O9rUVhnkWXCecscbxPcGyhuVp5BLaKF/SIGGzZRnDdwM VY5rHsVq3W3PgTwinoN6jEYBRY4HkCgw2x7IYxbKQPfxNSOJA9OnYikzcTUnMI+y kH+ohKECgYEA95Wf2z76z2nBlGGmnkHqqGeC2ZFzrfQPKAV1OYodgeiWMYnKW14r kZq/IkaI53np6UtVwy78xEiGm6IIGOQKeQzjehVIsPzTyU2Z/8AakMw88Ognrn3n 0XK832aGt22QZUtL8l6/eJqNFmvB3nJJ5mnuhTENZLVkxPvSNsZm/BECgYEA22AT nUcEn7OqbajIScZnpMnBoPjKhmeKplPymudeZCfIcH2Uet/kGbq4BfTEa2lpWwZP L6655VJSC4sPghUJePv+NTpnRVtx7e5FLaTHWhRP0f5TcGTm+phBH5EVCMcx5Lyb w5VCjD4BqcLL4q2sNPUB5hgArpnFSMxvnG2zPVsCgYBhMdwPtinAQ0Q7fBptBnOY qQo1X2SKZcWNcJcUf6QV27kauX03YUC5aIagbhBQzwuFCo4gkdGpPM+bSYDkRKHX 47dptfFTie2amVxbio7jSUqjSIBLgCIBl4mOKjG40Mpe+bBW9ds0/Xdl1efF7BgW 3H9Gynr3jj6BFlrwJfNIAQKBgCD0ZN3qjjpDbiW9BiF3CZjKqii5VwsrwLtNvxMg EKDxDP5BDy4KNiTbVMuoQl10X1xKLVkXYVueqy6KvFCpQlwHVt++H0de/lVkxsWd jEju5jLAht5KehxJBPdt/v/rlIYtAyU1ZeLVacCLv78Uodrut1NOZcPU27sc4uNX 42YjAoGAEI4O+Y0F+FT/7gYyMytPMGfcxLSkh2ifQZjC7hMuAfVFVj+XzkXHrZ5a v2ClKE4//gIsNQMX//TkO07SE6SK2evHrthVcZaMp+rw4XmjvyKWvIzRCgMp3Eqi LsTCWkh80e4O1MfY2Pc6UqQAhp9lmkP0ZRIeVtJmfwWLewo/efk= -----END RSA PRIVATE KEY----- aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/resources/server_chain.crt000066400000000000000000000063311456575232400262000ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIFCzCCA/OgAwIBAgICEAAwDQYJKoZIhvcNAQELBQAwgZwxCzAJBgNVBAYTAlVT MRMwEQYDVQQIDApXYXNoaW5ndG9uMRAwDgYDVQQHDAdTZWF0dGxlMQ8wDQYDVQQK DAZBbWF6b24xDTALBgNVBAsMBFNES3MxFDASBgNVBAMMC2xvY2FsaG9zdENBMTAw LgYJKoZIhvcNAQkBFiFhd3Mtc2RrLWNvbW1vbi1ydW50aW1lQGFtYXpvbi5jb20w HhcNMjIwMTMwMjEzODE1WhcNMjQwNTAzMjEzODE1WjCBiDELMAkGA1UEBhMCVVMx EzARBgNVBAgMCldhc2hpbmd0b24xDzANBgNVBAoMBkFtYXpvbjENMAsGA1UECwwE U0RLczESMBAGA1UEAwwJbG9jYWxob3N0MTAwLgYJKoZIhvcNAQkBFiFhd3Mtc2Rr LWNvbW1vbi1ydW50aW1lQGFtYXpvbi5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IB DwAwggEKAoIBAQDUKerUt2d0v3wYfFzS+JCoU+etqaK0lrBClonLS7RmaYGDqnBw 5pDpRfmBHw5D2kdc/4vHU/8wQtQGk4TsCjj3VyIqfd1xwO319/O9tepDqgawBj0n yFMdMjTZu1eS6rttCT1p6MFzXevwppS70ARPpFkSpvQhLYWNMeJjgAuDkH1Fvv96 2a3/v2m/+yu40Rszhn8tSRHOm8HeKlXVLlzauKIiXHzpcmQCti9hn6DQkJHemhnn hCubouk0YtR9qczxeOw65dWeY4q5gBD6wrmIxYoy/Ms/0O8EjUfnThAhCYbq0twi j39gIUme/ZMhzoOSFZA1Mdt+yYj6U3CNjqcLAgMBAAGjggFnMIIBYzAJBgNVHRME AjAAMBEGCWCGSAGG+EIBAQQEAwIGQDAzBglghkgBhvhCAQ0EJhYkT3BlblNTTCBH ZW5lcmF0ZWQgU2VydmVyIENlcnRpZmljYXRlMB0GA1UdDgQWBBQjhxwK68xFkbww GhKlGBpi6UwHYTCByQYDVR0jBIHBMIG+gBSLpH+SRxd0EgpXKrFgqvO6eZxc+aGB oqSBnzCBnDELMAkGA1UEBhMCVVMxEzARBgNVBAgMCldhc2hpbmd0b24xEDAOBgNV BAcMB1NlYXR0bGUxDzANBgNVBAoMBkFtYXpvbjENMAsGA1UECwwEU0RLczEUMBIG A1UEAwwLbG9jYWxob3N0Q0ExMDAuBgkqhkiG9w0BCQEWIWF3cy1zZGstY29tbW9u LXJ1bnRpbWVAYW1hem9uLmNvbYIBADAOBgNVHQ8BAf8EBAMCBaAwEwYDVR0lBAww CgYIKwYBBQUHAwEwDQYJKoZIhvcNAQELBQADggEBAE9GjLnlVc5rmcbQZZI8BgZA UfiOSR60Bg2LiIH7xa2InqNmIg7yT+2n8Pd61PXIjCJZqZJHWcRG0vMzoQvmMpeR VCTZdobHev79+A6YRAjm5aIqT6xQap8FPQzdZ1WogenkmeRj/GkwCXhbFmad/oTf K3CdCve3hQzJR5k1hBI2nmF7oWSKFAFDJktVe3w9v/2G1UJ4N02dzOJU8E5Tcx15 bNHP58KXJBK/er5Qs6xZKvG+GIfLX/IuaZm9FYP9WlOsObBlOZEn9dUYPpQwZn7k JIv7OBkYs8pj9Sx/EVW4aG0xn61HSevN/x0GSriRlyOP8WnUXXG1/X5vFoC81+Y= -----END CERTIFICATE----- -----BEGIN CERTIFICATE----- MIIEGDCCAwCgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBnDELMAkGA1UEBhMCVVMx EzARBgNVBAgMCldhc2hpbmd0b24xEDAOBgNVBAcMB1NlYXR0bGUxDzANBgNVBAoM BkFtYXpvbjENMAsGA1UECwwEU0RLczEUMBIGA1UEAwwLbG9jYWxob3N0Q0ExMDAu BgkqhkiG9w0BCQEWIWF3cy1zZGstY29tbW9uLXJ1bnRpbWVAYW1hem9uLmNvbTAe Fw0yMjAxMzAyMTM4MTVaFw0yNDA1MDMyMTM4MTVaMIGcMQswCQYDVQQGEwJVUzET MBEGA1UECAwKV2FzaGluZ3RvbjEQMA4GA1UEBwwHU2VhdHRsZTEPMA0GA1UECgwG QW1hem9uMQ0wCwYDVQQLDARTREtzMRQwEgYDVQQDDAtsb2NhbGhvc3RDQTEwMC4G CSqGSIb3DQEJARYhYXdzLXNkay1jb21tb24tcnVudGltZUBhbWF6b24uY29tMIIB IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAwvqrCMmDpJCYFGYVHJiBKHUA TxebF3AVVuvQZCNFCaAbEksLuhHoqbcf7BMlbtmrOa3YuefnJ52EsvJHB/AAG3si 3Svg7IBSpqyZRGBmDsEcAeNElvWXQNxSzaJWmqHFeX6TC6fj3zsHKH1gWEIM33TT 257tU6B+JpCPU3fcfo6XCApk1W+bVOsdISzmkpRD7tSsV03tokAFgpNTqmW6TgEE B/c8RtCK0PGjEitv3Dq5suWCgvgIqLmg1QQf3V0lE29243SVBWjjPI3dhr7gkPgi 3+jc/VeIKAoWqEs+4nfDCkWBG37GPSbBGHy28+S9+XTEWT19T4ZE88thNM3biQID AQABo2MwYTAdBgNVHQ4EFgQUi6R/kkcXdBIKVyqxYKrzunmcXPkwHwYDVR0jBBgw FoAUi6R/kkcXdBIKVyqxYKrzunmcXPkwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8B Af8EBAMCAYYwDQYJKoZIhvcNAQELBQADggEBAGjp4FDCXALzP6H8yn1QE9mAwt0z HrO0DP5C1w0e/ncpJmDyoeG9i5ETImGrY9+xJ7aDREWcDboez6X3Cej+aA36P06M BoxoEMrWheq1hDgzSePHbmtMfYKmTAReKvvsB5a02CxzXGBJWcKV4qPQg7GShNIi 7DIwzxbcg2J2s0r8HBg3/V1DEOPFFFBbYTFfGoqEQ2Lr/6+qQsZJVKiqnn8zN6yb N4HCdNLvqdLcxNNN2S81p3JrTIsd3xOM/OMozaBjOG6IS51nugAOCZD1H8qS2rCG 2FhL6vYCCOqkmZmTerUaPM5DSO0GK9vfV24vl03zc+1/d0U6WWi/VuOOUKQ= -----END CERTIFICATE----- aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/resources/suppressions-asan.txt000066400000000000000000000001051456575232400272450ustar00rootroot00000000000000interceptor_via_lib:libsofthsm2.so interceptor_via_fun:C_OpenSession aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/resources/suppressions-lsan.txt000066400000000000000000000000221456575232400272560ustar00rootroot00000000000000leak:CRYPTO_zallocaws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/resources/test.csr000066400000000000000000000021121456575232400244770ustar00rootroot00000000000000-----BEGIN CERTIFICATE REQUEST----- MIIC8zCCAdsCAQAwgZYxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJXQTEQMA4GA1UE BwwHU2VhdHRsZTEPMA0GA1UECgwGQW1hem9uMQ0wCwYDVQQLDARTREtzMRYwFAYD VQQDDA1Db21tb25SdW50aW1lMTAwLgYJKoZIhvcNAQkBFiFhd3Mtc2RrLWNvbW1v bi1ydW50aW1lQGFtYXpvbi5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK AoIBAQDF2VljJtov9V/7CSRsj55JTPJr6l0pVJJgU9yidYA9jVgoYPUZyZSOk9Y7 AYu3qfL8EosalH5IMYEizDI2WRH63eXIpFUKWYjwzRwuTq5jPQhZiTAXRlnOK3Qn drDW5v6u7h/VNGpcGVxXm54fV+1FGvGEmwq5795GemAS4Gb2SklpbikWKxvhpRkU bE4RSVzGuh6K1Z8mhotLnGmOrIusc3zsyDlHiWnj9uq0m38PPJIfVnp5OC37INf3 GrGYxFaO3p7VR8ADMTqc8alg8JSFQMNzoT/IJvUXMXnXRrbB/JdY3AGgL6PveKQr sQ2UY3XA+ZV8UcmHlMV/jnRtainnAgMBAAGgFzAVBgkqhkiG9w0BCQIxCAwGQW1h em9uMA0GCSqGSIb3DQEBCwUAA4IBAQAi8OeGiW+OiS2TadfVfLQKn9qlZn7JfwBv 3TQSJoN7xctKZOxoAXz1z6MVXgylT0sgdpUVIUDbqOFu3hkIUFq34R+kyy0A7rMv 0aEOYJDvs1V+soPxaXKN1aE7nCM24HX9TfKop4m6nJmVWZlaP+AiXU/Z2noaAdvy 160ojjtrpi49aqA6HMk2aSaXeZK8OVuGiMNLNw9P3Q15cgWZJ7CcFXxDg9PwWf/g /hJpdh9c8VI2lWmZUn5gQcWxKamg/HnNkDld1EJacC+SqLlUZezPSSjguBKw8KkL qCllKIPMJlpD6QpbuFlfy7ApVTI1jPXVZNKuKMhJriRBenqO7mjv -----END CERTIFICATE REQUEST----- aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/resources/testcert0.pem000066400000000000000000000025021456575232400254320ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIDtTCCAp0CFCOeKfdNgIjbIHCy5fwAGTvlijH5MA0GCSqGSIb3DQEBCwUAMIGW MQswCQYDVQQGEwJVUzELMAkGA1UECAwCV0ExEDAOBgNVBAcMB1NlYXR0bGUxDzAN BgNVBAoMBkFtYXpvbjENMAsGA1UECwwEU0RLczEWMBQGA1UEAwwNQ29tbW9uUnVu dGltZTEwMC4GCSqGSIb3DQEJARYhYXdzLXNkay1jb21tb24tcnVudGltZUBhbWF6 b24uY29tMB4XDTIxMDQwNzIxMjU0NFoXDTIyMDQwNzIxMjU0NFowgZYxCzAJBgNV BAYTAlVTMQswCQYDVQQIDAJXQTEQMA4GA1UEBwwHU2VhdHRsZTEPMA0GA1UECgwG QW1hem9uMQ0wCwYDVQQLDARTREtzMRYwFAYDVQQDDA1Db21tb25SdW50aW1lMTAw LgYJKoZIhvcNAQkBFiFhd3Mtc2RrLWNvbW1vbi1ydW50aW1lQGFtYXpvbi5jb20w ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDF2VljJtov9V/7CSRsj55J TPJr6l0pVJJgU9yidYA9jVgoYPUZyZSOk9Y7AYu3qfL8EosalH5IMYEizDI2WRH6 3eXIpFUKWYjwzRwuTq5jPQhZiTAXRlnOK3QndrDW5v6u7h/VNGpcGVxXm54fV+1F GvGEmwq5795GemAS4Gb2SklpbikWKxvhpRkUbE4RSVzGuh6K1Z8mhotLnGmOrIus c3zsyDlHiWnj9uq0m38PPJIfVnp5OC37INf3GrGYxFaO3p7VR8ADMTqc8alg8JSF QMNzoT/IJvUXMXnXRrbB/JdY3AGgL6PveKQrsQ2UY3XA+ZV8UcmHlMV/jnRtainn AgMBAAEwDQYJKoZIhvcNAQELBQADggEBAFULcaIj8kan9Ilp1wG+VO1mdnKkkRGc tzLAdxTVKcV0hNarsE14b9V0s5j5bajG2FqRH6P8RReg9VU7VrIrMFUI4KMeTQFP DqtYyBSo/4OHMFdOhxGt/AFmJEUd5MlGloW19DykrP6BqMtFHaOVGtmKHp0wU4dp drfB89uwDzLm68zfZ/iqe9QvS4uSdUqIUB8q0TvW89Mf3Xl5CThKClN/vgd0o90d 1ZCkPfaea0cRBeQ2DbmTuS/mj1ea/uEiWhnfpyAiICajnZhWwuqmqgaHWAHVutPY mXc6MxMavLqjG93aiympvrnbg+YQD3kzmU2aHUqwhoMYhEp5ar18TjU= -----END CERTIFICATE----- aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/resources/testcert1.pem000066400000000000000000000025021456575232400254330ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIDtTCCAp0CFC4rpzcSTOmPsvKTWzsv3U2+WBCqMA0GCSqGSIb3DQEBCwUAMIGW MQswCQYDVQQGEwJVUzELMAkGA1UECAwCV0ExEDAOBgNVBAcMB1NlYXR0bGUxDzAN BgNVBAoMBkFtYXpvbjENMAsGA1UECwwEU0RLczEWMBQGA1UEAwwNQ29tbW9uUnVu dGltZTEwMC4GCSqGSIb3DQEJARYhYXdzLXNkay1jb21tb24tcnVudGltZUBhbWF6 b24uY29tMB4XDTIxMDQwNzIxMjYxMFoXDTIyMDQwNzIxMjYxMFowgZYxCzAJBgNV BAYTAlVTMQswCQYDVQQIDAJXQTEQMA4GA1UEBwwHU2VhdHRsZTEPMA0GA1UECgwG QW1hem9uMQ0wCwYDVQQLDARTREtzMRYwFAYDVQQDDA1Db21tb25SdW50aW1lMTAw LgYJKoZIhvcNAQkBFiFhd3Mtc2RrLWNvbW1vbi1ydW50aW1lQGFtYXpvbi5jb20w ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDF2VljJtov9V/7CSRsj55J TPJr6l0pVJJgU9yidYA9jVgoYPUZyZSOk9Y7AYu3qfL8EosalH5IMYEizDI2WRH6 3eXIpFUKWYjwzRwuTq5jPQhZiTAXRlnOK3QndrDW5v6u7h/VNGpcGVxXm54fV+1F GvGEmwq5795GemAS4Gb2SklpbikWKxvhpRkUbE4RSVzGuh6K1Z8mhotLnGmOrIus c3zsyDlHiWnj9uq0m38PPJIfVnp5OC37INf3GrGYxFaO3p7VR8ADMTqc8alg8JSF QMNzoT/IJvUXMXnXRrbB/JdY3AGgL6PveKQrsQ2UY3XA+ZV8UcmHlMV/jnRtainn AgMBAAEwDQYJKoZIhvcNAQELBQADggEBAD/jemXNXeFGTjxc1oOp/8+VRoktnVlA s0SYPcD11HsdTQXEYjPHS6KV/eDjt1iFdFDeydsm7McTGQ/X6LtnDzrXsnfWKDn6 Qd4iWXs6u5xbRvSAvbSoxAM08goUmuwiWCBsDqKrjZaGJ/myLsjw5ejNrZCsfb4T k3X2Vw+Mwu9BbOnYdicx2YcHKcW9OPSCa0C9UryBFT++q8wWaQv8i3WjT03S8st0 vmTgoG3HCfaFKzcxBaSMwmqeeZq5IAlumxRuAyCYNiRABkRFYYptVfxb7QBDu+HM 4I8F+3cRugk3ucRmPw3blMLUwqNhRF+GQLFvtHMBuPoUO+jK9r7c+OA= -----END CERTIFICATE----- aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/resources/testcert2.pem000066400000000000000000000025021456575232400254340ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIDtTCCAp0CFC4vrR+BSuhD3DzQe1SQk2se67wVMA0GCSqGSIb3DQEBCwUAMIGW MQswCQYDVQQGEwJVUzELMAkGA1UECAwCV0ExEDAOBgNVBAcMB1NlYXR0bGUxDzAN BgNVBAoMBkFtYXpvbjENMAsGA1UECwwEU0RLczEWMBQGA1UEAwwNQ29tbW9uUnVu dGltZTEwMC4GCSqGSIb3DQEJARYhYXdzLXNkay1jb21tb24tcnVudGltZUBhbWF6 b24uY29tMB4XDTIxMDQwNzIxMjcwNVoXDTIyMDQwNzIxMjcwNVowgZYxCzAJBgNV BAYTAlVTMQswCQYDVQQIDAJXQTEQMA4GA1UEBwwHU2VhdHRsZTEPMA0GA1UECgwG QW1hem9uMQ0wCwYDVQQLDARTREtzMRYwFAYDVQQDDA1Db21tb25SdW50aW1lMTAw LgYJKoZIhvcNAQkBFiFhd3Mtc2RrLWNvbW1vbi1ydW50aW1lQGFtYXpvbi5jb20w ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDF2VljJtov9V/7CSRsj55J TPJr6l0pVJJgU9yidYA9jVgoYPUZyZSOk9Y7AYu3qfL8EosalH5IMYEizDI2WRH6 3eXIpFUKWYjwzRwuTq5jPQhZiTAXRlnOK3QndrDW5v6u7h/VNGpcGVxXm54fV+1F GvGEmwq5795GemAS4Gb2SklpbikWKxvhpRkUbE4RSVzGuh6K1Z8mhotLnGmOrIus c3zsyDlHiWnj9uq0m38PPJIfVnp5OC37INf3GrGYxFaO3p7VR8ADMTqc8alg8JSF QMNzoT/IJvUXMXnXRrbB/JdY3AGgL6PveKQrsQ2UY3XA+ZV8UcmHlMV/jnRtainn AgMBAAEwDQYJKoZIhvcNAQELBQADggEBAHy1O04ZvBp/dBh9OBiwkwyJFQ3gjHD+ nRzeY3WkfilUuoxivNpX6nIMYJAPKJjWBJpxrMY4yoa/YIgnne8TvmTLWSlN24XC 1ZaHKAAsHcuNdJtq4j67pn2txlUacwr0rekalvUSUxHooCLYpSJ+N7urrv1CjxiL UHbV8lXYyXo6ijgTafI4nFFeCFfiOOvfVfEfqfHDplDVNnCbqyzWnVUYymvzmiBO 1bVjBfOrwER6Y1DmSzcvsYc7weQiL3ux65y4Os0sqHtc9pIFw4ypDBTRnWGri4NZ +k/jh/DZdjHnWiidyPSEexJSCNp7hKsXE6YskNz9NdRqC3cijHNNatc= -----END CERTIFICATE----- aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/resources/testcert3.pem000066400000000000000000000025021456575232400254350ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIDtTCCAp0CFFt8AsZqy38q1AtegPCE2HeQSk2lMA0GCSqGSIb3DQEBCwUAMIGW MQswCQYDVQQGEwJVUzELMAkGA1UECAwCV0ExEDAOBgNVBAcMB1NlYXR0bGUxDzAN BgNVBAoMBkFtYXpvbjENMAsGA1UECwwEU0RLczEWMBQGA1UEAwwNQ29tbW9uUnVu dGltZTEwMC4GCSqGSIb3DQEJARYhYXdzLXNkay1jb21tb24tcnVudGltZUBhbWF6 b24uY29tMB4XDTIxMDQwNzIxMjcwN1oXDTIyMDQwNzIxMjcwN1owgZYxCzAJBgNV BAYTAlVTMQswCQYDVQQIDAJXQTEQMA4GA1UEBwwHU2VhdHRsZTEPMA0GA1UECgwG QW1hem9uMQ0wCwYDVQQLDARTREtzMRYwFAYDVQQDDA1Db21tb25SdW50aW1lMTAw LgYJKoZIhvcNAQkBFiFhd3Mtc2RrLWNvbW1vbi1ydW50aW1lQGFtYXpvbi5jb20w ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDF2VljJtov9V/7CSRsj55J TPJr6l0pVJJgU9yidYA9jVgoYPUZyZSOk9Y7AYu3qfL8EosalH5IMYEizDI2WRH6 3eXIpFUKWYjwzRwuTq5jPQhZiTAXRlnOK3QndrDW5v6u7h/VNGpcGVxXm54fV+1F GvGEmwq5795GemAS4Gb2SklpbikWKxvhpRkUbE4RSVzGuh6K1Z8mhotLnGmOrIus c3zsyDlHiWnj9uq0m38PPJIfVnp5OC37INf3GrGYxFaO3p7VR8ADMTqc8alg8JSF QMNzoT/IJvUXMXnXRrbB/JdY3AGgL6PveKQrsQ2UY3XA+ZV8UcmHlMV/jnRtainn AgMBAAEwDQYJKoZIhvcNAQELBQADggEBAJg/CTAvsUy2OQ3UQYL5bxKwCFOTipFB TxjT8PlKSmCuf2SEYrKJIUNYY/bXmMOHdcw1MveAgciASiS6QFgsOVDuccCeFRww Q/7gdOGa9Rd+9WVdC+4r55VFEeZ+j5syeVY7bt17GNt8tRpntCstEge/CeUwqkdd fv0ZaGkD05DOAmlRhjNmqz457TkoC5DclGxnhLqVpYsj8imNR1/H4CPv9lhC1ADK D96lZ8FpEedacplHXfT2YPPAwkRGh/PU4g3BC2GjI0V+/udEtUR1j33Fg+7qIXsj RXoH+Qi3Gy+g2DQEA/uGKgwk+lRyPkvSBjsWzB8dWLFdRKfLnozxJYc= -----END CERTIFICATE----- aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/resources/testcert4.pem000066400000000000000000000025021456575232400254360ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIDtTCCAp0CFBW027FrmfFAXM5lKbmtXfci96TTMA0GCSqGSIb3DQEBCwUAMIGW MQswCQYDVQQGEwJVUzELMAkGA1UECAwCV0ExEDAOBgNVBAcMB1NlYXR0bGUxDzAN BgNVBAoMBkFtYXpvbjENMAsGA1UECwwEU0RLczEWMBQGA1UEAwwNQ29tbW9uUnVu dGltZTEwMC4GCSqGSIb3DQEJARYhYXdzLXNkay1jb21tb24tcnVudGltZUBhbWF6 b24uY29tMB4XDTIxMDQwNzIxMjcwOVoXDTIyMDQwNzIxMjcwOVowgZYxCzAJBgNV BAYTAlVTMQswCQYDVQQIDAJXQTEQMA4GA1UEBwwHU2VhdHRsZTEPMA0GA1UECgwG QW1hem9uMQ0wCwYDVQQLDARTREtzMRYwFAYDVQQDDA1Db21tb25SdW50aW1lMTAw LgYJKoZIhvcNAQkBFiFhd3Mtc2RrLWNvbW1vbi1ydW50aW1lQGFtYXpvbi5jb20w ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDF2VljJtov9V/7CSRsj55J TPJr6l0pVJJgU9yidYA9jVgoYPUZyZSOk9Y7AYu3qfL8EosalH5IMYEizDI2WRH6 3eXIpFUKWYjwzRwuTq5jPQhZiTAXRlnOK3QndrDW5v6u7h/VNGpcGVxXm54fV+1F GvGEmwq5795GemAS4Gb2SklpbikWKxvhpRkUbE4RSVzGuh6K1Z8mhotLnGmOrIus c3zsyDlHiWnj9uq0m38PPJIfVnp5OC37INf3GrGYxFaO3p7VR8ADMTqc8alg8JSF QMNzoT/IJvUXMXnXRrbB/JdY3AGgL6PveKQrsQ2UY3XA+ZV8UcmHlMV/jnRtainn AgMBAAEwDQYJKoZIhvcNAQELBQADggEBAHA9+PhX9LJ2/iFuJET/su9JyHgu7ogp kn8l2xCdWCGf8OnYmWYoTA4vW7KCqqQIXxoLyAPQYHDSRFUPOiZD6q20gxqtVjc+ 2H3/M2zt6okRYA694jT0Owtpnc54ECDdU1BWdVTqiulomlPF4eWRZzqsdrQhtB/k mhHfomBDbwy01AcYommaBrj9YGs3sWWidYxXwn0a8JaNRH0lmjKmtAH05eVUsuCp WW+6kSX3HLrxc+Y126w1LJkKwPDMZOlk5IKbzaN5X7teca7U4FLxm913gS6LqlWX Bk0CglfjPSI7Obc0Vc/2nXEe31BhW5oPswvDOL3pEyXftool89NYE20= -----END CERTIFICATE----- aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/resources/testcert5.pem000066400000000000000000000025021456575232400254370ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIDtTCCAp0CFB405mAMtdzB8lORR8pjQd3SMXIEMA0GCSqGSIb3DQEBCwUAMIGW MQswCQYDVQQGEwJVUzELMAkGA1UECAwCV0ExEDAOBgNVBAcMB1NlYXR0bGUxDzAN BgNVBAoMBkFtYXpvbjENMAsGA1UECwwEU0RLczEWMBQGA1UEAwwNQ29tbW9uUnVu dGltZTEwMC4GCSqGSIb3DQEJARYhYXdzLXNkay1jb21tb24tcnVudGltZUBhbWF6 b24uY29tMB4XDTIxMDQwNzIxMjcxMloXDTIyMDQwNzIxMjcxMlowgZYxCzAJBgNV BAYTAlVTMQswCQYDVQQIDAJXQTEQMA4GA1UEBwwHU2VhdHRsZTEPMA0GA1UECgwG QW1hem9uMQ0wCwYDVQQLDARTREtzMRYwFAYDVQQDDA1Db21tb25SdW50aW1lMTAw LgYJKoZIhvcNAQkBFiFhd3Mtc2RrLWNvbW1vbi1ydW50aW1lQGFtYXpvbi5jb20w ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDF2VljJtov9V/7CSRsj55J TPJr6l0pVJJgU9yidYA9jVgoYPUZyZSOk9Y7AYu3qfL8EosalH5IMYEizDI2WRH6 3eXIpFUKWYjwzRwuTq5jPQhZiTAXRlnOK3QndrDW5v6u7h/VNGpcGVxXm54fV+1F GvGEmwq5795GemAS4Gb2SklpbikWKxvhpRkUbE4RSVzGuh6K1Z8mhotLnGmOrIus c3zsyDlHiWnj9uq0m38PPJIfVnp5OC37INf3GrGYxFaO3p7VR8ADMTqc8alg8JSF QMNzoT/IJvUXMXnXRrbB/JdY3AGgL6PveKQrsQ2UY3XA+ZV8UcmHlMV/jnRtainn AgMBAAEwDQYJKoZIhvcNAQELBQADggEBACVbMPL5+jAq5pIaIw9usqE9uyusvqm7 FofQiQJWWNTsIwiY15bgRxSKJOK1EC9L0hmfkAHTl+4UO+MTcr4ZxGgkF7X/yotU 9vzah1jWpufj6EjH6Hph2f8oF+yDjCbKWCKQc3eFFVypjw/Ki/GRtG/hB0ayZSVr sTqDEfFai+p9gms11jJlZWso9a7Q+FohMs4xBquWjCiwD2AoO/ULFah8YGmjzfrw dBwZk66Ml6jVG/NMQp3K6mb1sku0YAWtLPZ78t/8bsQDRbjpmP0JLfv0ZP4UppXf e2D1JNwQUF5kCPYviDABNJxR6SmI1Rj8op1ZnYw84XQ8EILws6QsRVI= -----END CERTIFICATE----- aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/resources/testcert6.pem000066400000000000000000000025021456575232400254400ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIDtTCCAp0CFAz9VV/Jdj/EonoAzyilXDQSjzcoMA0GCSqGSIb3DQEBCwUAMIGW MQswCQYDVQQGEwJVUzELMAkGA1UECAwCV0ExEDAOBgNVBAcMB1NlYXR0bGUxDzAN BgNVBAoMBkFtYXpvbjENMAsGA1UECwwEU0RLczEWMBQGA1UEAwwNQ29tbW9uUnVu dGltZTEwMC4GCSqGSIb3DQEJARYhYXdzLXNkay1jb21tb24tcnVudGltZUBhbWF6 b24uY29tMB4XDTIxMDQwNzIxMjcxNFoXDTIyMDQwNzIxMjcxNFowgZYxCzAJBgNV BAYTAlVTMQswCQYDVQQIDAJXQTEQMA4GA1UEBwwHU2VhdHRsZTEPMA0GA1UECgwG QW1hem9uMQ0wCwYDVQQLDARTREtzMRYwFAYDVQQDDA1Db21tb25SdW50aW1lMTAw LgYJKoZIhvcNAQkBFiFhd3Mtc2RrLWNvbW1vbi1ydW50aW1lQGFtYXpvbi5jb20w ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDF2VljJtov9V/7CSRsj55J TPJr6l0pVJJgU9yidYA9jVgoYPUZyZSOk9Y7AYu3qfL8EosalH5IMYEizDI2WRH6 3eXIpFUKWYjwzRwuTq5jPQhZiTAXRlnOK3QndrDW5v6u7h/VNGpcGVxXm54fV+1F GvGEmwq5795GemAS4Gb2SklpbikWKxvhpRkUbE4RSVzGuh6K1Z8mhotLnGmOrIus c3zsyDlHiWnj9uq0m38PPJIfVnp5OC37INf3GrGYxFaO3p7VR8ADMTqc8alg8JSF QMNzoT/IJvUXMXnXRrbB/JdY3AGgL6PveKQrsQ2UY3XA+ZV8UcmHlMV/jnRtainn AgMBAAEwDQYJKoZIhvcNAQELBQADggEBAEAJ3X87+cP0n9bpXg8lpjy6Fp1pr9GX dfiBfwvLQ7vBGm7FSDprK9qmPoV/mz3DQwKiQsHEcD/BVJu/FuwD2LgkV8ePRAas 8xaBT+6WVSk+9kAVkPav4I8ht+BOVJgDZ1UKJPcMeVmLbFDmiWMJBq8Ue0wQGKHS ilEXjllpGHSzsvi+7bZgEgh7MorqPFJaB9FD2S6JWtqqBxcXD1H9CGsYO9mAC2+l 4rpTBRzV7HYgQamTUZdm4sare66xRXqIXHX7P8XOffd8pfz/62tj58n0Z8bxbypF Z4IEJq2ClqelMEY0sJ307E6oHZt2jTtONwkM6T1clQopLOJOWA4rIQA= -----END CERTIFICATE----- aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/resources/testcert7.pem000066400000000000000000000025021456575232400254410ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIDtTCCAp0CFC2iBL/5J5IUNx3PPalPfZdAGEnpMA0GCSqGSIb3DQEBCwUAMIGW MQswCQYDVQQGEwJVUzELMAkGA1UECAwCV0ExEDAOBgNVBAcMB1NlYXR0bGUxDzAN BgNVBAoMBkFtYXpvbjENMAsGA1UECwwEU0RLczEWMBQGA1UEAwwNQ29tbW9uUnVu dGltZTEwMC4GCSqGSIb3DQEJARYhYXdzLXNkay1jb21tb24tcnVudGltZUBhbWF6 b24uY29tMB4XDTIxMDQwNzIxMjcxNloXDTIyMDQwNzIxMjcxNlowgZYxCzAJBgNV BAYTAlVTMQswCQYDVQQIDAJXQTEQMA4GA1UEBwwHU2VhdHRsZTEPMA0GA1UECgwG QW1hem9uMQ0wCwYDVQQLDARTREtzMRYwFAYDVQQDDA1Db21tb25SdW50aW1lMTAw LgYJKoZIhvcNAQkBFiFhd3Mtc2RrLWNvbW1vbi1ydW50aW1lQGFtYXpvbi5jb20w ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDF2VljJtov9V/7CSRsj55J TPJr6l0pVJJgU9yidYA9jVgoYPUZyZSOk9Y7AYu3qfL8EosalH5IMYEizDI2WRH6 3eXIpFUKWYjwzRwuTq5jPQhZiTAXRlnOK3QndrDW5v6u7h/VNGpcGVxXm54fV+1F GvGEmwq5795GemAS4Gb2SklpbikWKxvhpRkUbE4RSVzGuh6K1Z8mhotLnGmOrIus c3zsyDlHiWnj9uq0m38PPJIfVnp5OC37INf3GrGYxFaO3p7VR8ADMTqc8alg8JSF QMNzoT/IJvUXMXnXRrbB/JdY3AGgL6PveKQrsQ2UY3XA+ZV8UcmHlMV/jnRtainn AgMBAAEwDQYJKoZIhvcNAQELBQADggEBALkpodruKp4Zbk/j68PpU1EFUiSB5mGM /dYXN13jx8JCN+f3n1V3csqvc7ZJE4H86azXuXbZ2+vqebJ1C1+JletGV+hcnstR OKwQUnGXRW+XUBToBBILlRz759SFp4GcmT/Rm9fxDq38UCV1q6/U4H4TFS/CkQMD nzEBo1HpmC7lP/tbUBSI7sqcw9zoIbIGhBjST2PYF52TCWNw0YIIr2JRmwCgB8Cg AXY6fBOnJfkpirFRuWKkR9fZ0AMGOA+9HmpdO2Mi/1vlJ3owaN67obVX7A7ViZjT RB7wQouCxSK2NQ+Xa6wuy86NHIjNna0ZvRZdU//pkBEIAbIv/ju56XI= -----END CERTIFICATE----- aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/resources/testkey.pem000066400000000000000000000032131456575232400252050ustar00rootroot00000000000000-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAxdlZYybaL/Vf+wkkbI+eSUzya+pdKVSSYFPconWAPY1YKGD1 GcmUjpPWOwGLt6ny/BKLGpR+SDGBIswyNlkR+t3lyKRVClmI8M0cLk6uYz0IWYkw F0ZZzit0J3aw1ub+ru4f1TRqXBlcV5ueH1ftRRrxhJsKue/eRnpgEuBm9kpJaW4p Fisb4aUZFGxOEUlcxroeitWfJoaLS5xpjqyLrHN87Mg5R4lp4/bqtJt/DzySH1Z6 eTgt+yDX9xqxmMRWjt6e1UfAAzE6nPGpYPCUhUDDc6E/yCb1FzF510a2wfyXWNwB oC+j73ikK7ENlGN1wPmVfFHJh5TFf450bWop5wIDAQABAoIBAQCXkowIihtd4n5F tBtTkDOTtH+EbWgx4bjCU30UeTBl94MybBVaTDSjhl75lZWUf37H3uzy0BBu/n4Q Sp+tQ0+jAZyoujzsUOu4j2qnj1/KZ1eeQJu3OQ5UBLZYBXY1LYKwQX3huOZC6Kr1 8yKQoKwF5T5ieOUezcVb+SThUUmQJL8OjzpVhPe/1a/hLVafFkYmT3eRLP3WJ0lq 02vORdfe8Q4Y5u28Tnidzeb7RV9gLp7SmPcuFWAidNPF47m9KyoDm4NNDaNojse8 O1dhWVo6IL9C3/o1xF/qXbzGXqcE3h/DKIUpYWlX/Q/aWBz1MFYUtDPurwbkUfAS R3S0YjoBAoGBAPa4FrALhC9VTrASe0I2tcTi74VpPH0WMV1HxDl5o2JSGNgVc5NH 1g9X9xHOEHpFAePpxQMoE9g/jkdnUMygHZrfqE1FiveHNYwov/VePpz/xvM4dfJQ FcE6b8Ko2nsmsGNFeQBBfsDDghURmvdkQQ2ASbUuBIeRFAXLL/iGv7H/AoGBAM1K o/M5pWNOGsa1buKsf9L4up6DJTZHnkc2cMqWVSnRapE2BSeyDiC5pryge988C15q zPhXcPK8SAzBG1BTq5djJp6xbOnRtpTICxjcbh1GhW56pIvZUYptANKSTQAyr2HB HHouXJnIZu/mfJimMgNksXa5z1mvkHi1OmPGPjgZAoGBAM0yIJR8j4z6i35/cIqK jDVbdVDlTMzQZ3AonzgaRdwhYc5eUQi+p3eNds0VMLkwqbjQFe5hJ9gkTAZxFE+M H/NOb2ENJN5oUw3mWnkN0E+tn99sJxTIrL9QBeHFpCjm7a8y+cdPscdLE+bIBLYb aMt81lDdHCFQdXG4I4a1M7wrAoGBALvsD5ietF7rKGetvguRjJE7Ttm7g1zkwma9 1xIb+2mEYeD7akykGXqrd45D+iCnfmC3s1nB8YrB/9JtEuu9K94wzTGHiQOhDVm0 pTPk1IfH/mzbp7S9An+MuKwJcKM1woXhmIff2LSNVm1Ut8mkmTOyASGFheG7bKyS eRjMaDhBAn95V7ZJ9SSEM61KMrmhcJgmmQAXo1tt4p2bmmMZk2GC6J+UiWF/7M60 rpQWiqSsdIMcwpLfAJRosTMpmLzLEdCriZtCj0k1PS5pltPGWbxRcn1MkMo55vB5 P3mZjNISQhKqtZI1aH+V7h9AZ5B5CB0OY9nKGLHAWi0wKepkARgj -----END RSA PRIVATE KEY----- aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/resources/testparse.crt000077500000000000000000000026131456575232400255440ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIID7DCCAtSgAwIBAgIJAIR9Lu1N/CaHMA0GCSqGSIb3DQEBCwUAMIGaMQswCQYD VQQGEwJVUzETMBEGA1UECAwKV2FzaGluZ3RvbjEQMA4GA1UEBwwHU2VhdHRsZTEP MA0GA1UECgwGQW1hem9uMQ0wCwYDVQQLDARTREtzMRIwEAYDVQQDDAlsb2NhbGhv c3QxMDAuBgkqhkiG9w0BCQEWIWF3cy1zZGstY29tbW9uLXJ1bnRpbWVAYW1hem9u LmNvbTAeFw0yMTA2MTYwNjE3MDBaFw0yMzA5MTgwNjE3MDBaMIGaMQswCQYDVQQG EwJVUzETMBEGA1UECAwKV2FzaGluZ3RvbjEQMA4GA1UEBwwHU2VhdHRsZTEPMA0G A1UECgwGQW1hem9uMQ0wCwYDVQQLDARTREtzMRIwEAYDVQQDDAlsb2NhbGhvc3Qx MDAuBgkqhkiG9w0BCQEWIWF3cy1zZGstY29tbW9uLXJ1bnRpbWVAYW1hem9uLmNv bTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANdqV0j4DkQDJULWEW8b s/znGqK2p9wthY8o4btL7nEhGUsMQyae+UwUBDGn0qUhCgEC3g7e8bg0Q2J+dleF BOnBfsU1obc7H+5oTf5R2gz3L0dgEjwBJM5IpfCgi2OHurU8UsEPe7KZTbhGdPfR 6CWE0yxWkXiH3dQ982dRGHEsPMPhmdksRFH2FEi9ghZiGEpEI55bCQiKQqBoA4gQ D2yFCTtylgQ19CYBg28d1n941xv2Ok+tyz7DvgEttEQr3BBdBf65QyDcyORABztU zhHfXyjrviQCtOj8NZu+wYDqxOxbbyBu5GDVbjhD3iJzh5Drqq8g4rAdT8IsjzSG 6nUCAwEAAaMzMDEwEwYDVR0lBAwwCgYIKwYBBQUHAwEwGgYDVR0RBBMwEYIJbG9j YWxob3N0hwR/AAABMA0GCSqGSIb3DQEBCwUAA4IBAQAHtJpITm1xMvQ1ifXh6Cde 41ENVPLeHi+aDd0C1M4Vk4vmdXfCQfa//KwlluqAOGjipXKaMaKVQ6mQOWTjbCk3 DHq3GJdHDhZ5L5qSe1Gs5ExwwuTzfytjUyw72/HvhNrzcWxuuEFIrrUSGyDs3/+f Ky1mUgpyF5mlTSgpipzIUdDoXEJmPu8G2nLqqFpaAg+iaICpmqQwjJ5pdaVcNItx SeU6GnSpUYbuBvlUNwz2F4oew1RtqVJKL/jQ41bCYRT9fHcrW4uTLG52RqkANI1V Qmria6PY5lpbZZiosYUBkkL01nNNxvbxNDYWRMYJx5RGHAaUhKlPQQtGprRIGhRF -----END CERTIFICATE----- aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/resources/testparse_crlf.crt000066400000000000000000000026421456575232400265510ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIID7DCCAtSgAwIBAgIJAIR9Lu1N/CaHMA0GCSqGSIb3DQEBCwUAMIGaMQswCQYD VQQGEwJVUzETMBEGA1UECAwKV2FzaGluZ3RvbjEQMA4GA1UEBwwHU2VhdHRsZTEP MA0GA1UECgwGQW1hem9uMQ0wCwYDVQQLDARTREtzMRIwEAYDVQQDDAlsb2NhbGhv c3QxMDAuBgkqhkiG9w0BCQEWIWF3cy1zZGstY29tbW9uLXJ1bnRpbWVAYW1hem9u LmNvbTAeFw0yMTA2MTYwNjE3MDBaFw0yMzA5MTgwNjE3MDBaMIGaMQswCQYDVQQG EwJVUzETMBEGA1UECAwKV2FzaGluZ3RvbjEQMA4GA1UEBwwHU2VhdHRsZTEPMA0G A1UECgwGQW1hem9uMQ0wCwYDVQQLDARTREtzMRIwEAYDVQQDDAlsb2NhbGhvc3Qx MDAuBgkqhkiG9w0BCQEWIWF3cy1zZGstY29tbW9uLXJ1bnRpbWVAYW1hem9uLmNv bTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANdqV0j4DkQDJULWEW8b s/znGqK2p9wthY8o4btL7nEhGUsMQyae+UwUBDGn0qUhCgEC3g7e8bg0Q2J+dleF BOnBfsU1obc7H+5oTf5R2gz3L0dgEjwBJM5IpfCgi2OHurU8UsEPe7KZTbhGdPfR 6CWE0yxWkXiH3dQ982dRGHEsPMPhmdksRFH2FEi9ghZiGEpEI55bCQiKQqBoA4gQ D2yFCTtylgQ19CYBg28d1n941xv2Ok+tyz7DvgEttEQr3BBdBf65QyDcyORABztU zhHfXyjrviQCtOj8NZu+wYDqxOxbbyBu5GDVbjhD3iJzh5Drqq8g4rAdT8IsjzSG 6nUCAwEAAaMzMDEwEwYDVR0lBAwwCgYIKwYBBQUHAwEwGgYDVR0RBBMwEYIJbG9j YWxob3N0hwR/AAABMA0GCSqGSIb3DQEBCwUAA4IBAQAHtJpITm1xMvQ1ifXh6Cde 41ENVPLeHi+aDd0C1M4Vk4vmdXfCQfa//KwlluqAOGjipXKaMaKVQ6mQOWTjbCk3 DHq3GJdHDhZ5L5qSe1Gs5ExwwuTzfytjUyw72/HvhNrzcWxuuEFIrrUSGyDs3/+f Ky1mUgpyF5mlTSgpipzIUdDoXEJmPu8G2nLqqFpaAg+iaICpmqQwjJ5pdaVcNItx SeU6GnSpUYbuBvlUNwz2F4oew1RtqVJKL/jQ41bCYRT9fHcrW4uTLG52RqkANI1V Qmria6PY5lpbZZiosYUBkkL01nNNxvbxNDYWRMYJx5RGHAaUhKlPQQtGprRIGhRF -----END CERTIFICATE----- aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/resources/unittests.conf000066400000000000000000000010501456575232400257200ustar00rootroot00000000000000[ req ] prompt = no default_md = sha256 distinguished_name = req_distinguished_name x509_extensions = v3_ext [ req_distinguished_name ] countryName = US stateOrProvinceName = Washington localityName = Seattle organizationName = Amazon organizationalUnitName = SDKs commonName = localhost emailAddress = aws-sdk-common-runtime@amazon.com [ v3_ext ] # iOS 13+ and macOS 10.15+ require Subject Alternative Name and ExtendedKeyUsage with serverAuth extendedKeyUsage = serverAuth subjectAltName = @alt_names [ alt_names ] DNS.1 = localhost IP.1 = 127.0.0.1 aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/resources/unittests.crt000066400000000000000000000026131456575232400255710ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIID7DCCAtSgAwIBAgIJAPqdC/xmAMEJMA0GCSqGSIb3DQEBCwUAMIGaMQswCQYD VQQGEwJVUzETMBEGA1UECAwKV2FzaGluZ3RvbjEQMA4GA1UEBwwHU2VhdHRsZTEP MA0GA1UECgwGQW1hem9uMQ0wCwYDVQQLDARTREtzMRIwEAYDVQQDDAlsb2NhbGhv c3QxMDAuBgkqhkiG9w0BCQEWIWF3cy1zZGstY29tbW9uLXJ1bnRpbWVAYW1hem9u LmNvbTAeFw0yMjAxMzAyMTM4MTVaFw0yNDA1MDMyMTM4MTVaMIGaMQswCQYDVQQG EwJVUzETMBEGA1UECAwKV2FzaGluZ3RvbjEQMA4GA1UEBwwHU2VhdHRsZTEPMA0G A1UECgwGQW1hem9uMQ0wCwYDVQQLDARTREtzMRIwEAYDVQQDDAlsb2NhbGhvc3Qx MDAuBgkqhkiG9w0BCQEWIWF3cy1zZGstY29tbW9uLXJ1bnRpbWVAYW1hem9uLmNv bTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANdqV0j4DkQDJULWEW8b s/znGqK2p9wthY8o4btL7nEhGUsMQyae+UwUBDGn0qUhCgEC3g7e8bg0Q2J+dleF BOnBfsU1obc7H+5oTf5R2gz3L0dgEjwBJM5IpfCgi2OHurU8UsEPe7KZTbhGdPfR 6CWE0yxWkXiH3dQ982dRGHEsPMPhmdksRFH2FEi9ghZiGEpEI55bCQiKQqBoA4gQ D2yFCTtylgQ19CYBg28d1n941xv2Ok+tyz7DvgEttEQr3BBdBf65QyDcyORABztU zhHfXyjrviQCtOj8NZu+wYDqxOxbbyBu5GDVbjhD3iJzh5Drqq8g4rAdT8IsjzSG 6nUCAwEAAaMzMDEwEwYDVR0lBAwwCgYIKwYBBQUHAwEwGgYDVR0RBBMwEYIJbG9j YWxob3N0hwR/AAABMA0GCSqGSIb3DQEBCwUAA4IBAQAtHA9Fl4fLWmxgHBK2gu/s nWlVEwdFcaYNPKcT8EUKLhjsXbGPLRKheutrJrJ/beauL3gO8IrYwO5k1VHynCW2 osGYKRGTCIH1t9JMR0XA6Mo/w4Y57D/jY8vr3kQQHYiKS0fSOGxKe2RDaiCIWaol x5z+10qGwnqc+6cMlmt4JCnuDBdj5fmCqKdIdqzFSNM5HPKLh5szc2HGb4bmKNOW Ne7t7AxN5LhMSUvmGch0nxjb6I8TK+kyhVqO05tWUhCmC1PkAFVE+mWidjFz1/hu l6Ha095homzjr/cFpbnJlzaIfca2IDGFiF2bv1Q3TDWbSpBG3L9WL34DpdeJGQ4f -----END CERTIFICATE----- aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/resources/unittests.key000077500000000000000000000032171456575232400255750ustar00rootroot00000000000000-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEA12pXSPgORAMlQtYRbxuz/Ocaoran3C2Fjyjhu0vucSEZSwxD Jp75TBQEMafSpSEKAQLeDt7xuDRDYn52V4UE6cF+xTWhtzsf7mhN/lHaDPcvR2AS PAEkzkil8KCLY4e6tTxSwQ97splNuEZ099HoJYTTLFaReIfd1D3zZ1EYcSw8w+GZ 2SxEUfYUSL2CFmIYSkQjnlsJCIpCoGgDiBAPbIUJO3KWBDX0JgGDbx3Wf3jXG/Y6 T63LPsO+AS20RCvcEF0F/rlDINzI5EAHO1TOEd9fKOu+JAK06Pw1m77BgOrE7Ftv IG7kYNVuOEPeInOHkOuqryDisB1PwiyPNIbqdQIDAQABAoIBAESQuI+lRQUo6ydG 8+2lp7iL5tJ7yRov8x8KKC9xj8e6fU6B7K3SVA9/H4aeoFGnHoQL4ZpiJBY5rGkh T5Gz6UhuKmejFoI384Xy9UBJ1VnjI81YKvWmd4yhWxAoSbW4chlVxhFlWD4UxcQt yPVIftfSW1T1iQAQXu87eMod6eW7VWlyMKicYkBGB2ohI0hW8chx361z96QcpxhA yBAfnhxuTgKFYSRVfwYSOjHYPOvozmU7Wj0iURT+1MM4iO8YlBDuZEJArs3WAdIe pmCq6snzOAJ6Y9iE0EGti9QGiAo6na/nWAfVlRSMyS/C1GC0oM0MnpRKSLW0tvLV vtJG81ECgYEA7lzGpdlAKwWNKPc2YIbtUNomD/eOr7TzYedYxJ88SG52THjgE3Pu poF3wZFjdtlwx1u4nsxlVe50FBTCN5s2FV4/8YP980zis+HtUC5pWCO3Oy6+DjSj K9st+mGyzYjl3opVqcQZkHj1LPqNxBmvFpDgAtVZfdKSdyuzZpj8s5sCgYEA51rj EFa/ijILp1P5vKn8b3pIfQFSsUsX5NXTy31f/2UwVV491djMyNyhtaRcrXP9CYpq 38o1xvUaxe2hlND/jiBjBHfsC13oUOVz8TrAzxDKAzbGLcOT2trgxMFbR8Ez+jur 1yQbPnoKZrB7SopAkcVqZv4ks0LLu+BLfEFXYy8CgYEApN8xXDgoRVnCqQpN53iM n/c0iqjOXkTIb/jIksAdv3AAjaayP2JaOXul7RL2fJeshYiw684vbb/RNK6jJDlM sH0Pt6t3tZmB2bC1KFfh7+BMdjg/p63LC6PAasa3GanObh67YADPOfoghCsOcgzd 6brt56fRDdHgE2P75ER/zm8CgYEArAxx6bepT3syIWiYww3itYBJofS26zP9++Zs T9rX5hT5IbMo5vwIJqO0+mDVrwQfu9Wc7vnwjhm+pEy4qfPW6Hn7SNppxnY6itZo J4/azOIeaM92B5h3Pv0gxBFK8YyjO8beXurx+79ENuOtfFxd8knOe/Mplcnpurjt SeVJuG8CgYBxEYouOM9UuZlblXQXfudTWWf+x5CEWxyJgKaktHEh3iees1gB7ZPb OewLa8AYVjqbNgS/r/aUFjpBbCov8ICxcy86SuGda10LDFX83sbyMm8XhktfyC3L 54irVW5mNUDcA8s9+DloeTlUlJIr8J/RADC9rpqHLaZzcdvpIMhVsw== -----END RSA PRIVATE KEY----- aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/resources/unittests.p12000066400000000000000000000050651456575232400254070ustar00rootroot000000000000000 10  *H   0 0 *H 00} *H 0 *H  0ou{[PL%FGE8Tm^ UI'3ך]vrha г[0^7O" ӮekS?' 08!NJS&5]$c?ECkTTBVUWQکl_X0 e1_|Z:֑AV<@v*]×ކ2V^'?vorVhg<;/)0?U<꓏qnF U^!GɝC9%FN_VrLLH53LĦDU$T iBSV,"Y=- k<:3ɋ@5}"=[2!&c(IQ^V;>NԕyQ13S`V(k%X= M P\*Kh ~d] 8/^!?ly^dx{]J '+`C^'>`,w_t>XK3*ۢND&|Ie0OJStaKd^!AX(!7 ||AbF JF#/m.quh* LAd鲬ڮ&w};.})xInȵgpB=$i)*g, `%߲)Zj|4-/Cyevė"Vekd&$m&'ʂ_]FypOFkKG<!gT*숁\Sr Z=m(g="KwgFWopٍ:pQid/OZuaQө[ɺ ~|B$'*bb;ߪHw_ulIYe&JEcr_Ny|R>gR|F;B.ܯ&FbRWôvI߻ o]5덝g-r鹢PCB4*ĄQJȼ{Mkr7MRiӷԩ^#N[><2c"w82g[{B['0A *H 2.0*0& *H  00 *H  0mbM{ȸ W03\Q6WM B: U2jV5 ~#zRή;=vVCP{X}_kG9 9W"ݑ̟O9tFQC#\"qӈk=ҹs5ˊL#&DUKpFjx^WMH=&/*^.}nQ&`,"rl&c\ӁϠ12(iMM5̉m??q"!~=rמ^ leYc^4c/nEt%*+S~;Fջ)}ZvQ{z$)]ĊK^K3;@.(n2%#PB՗[lHUֈrx@LA=д&di (h[<9z&Uor }1EfUzq-=Q =P.|Xs86}6<] 0!UTD4ޏ*V4|YvB6.w&ǟhra. *Viϳ[U, omKQ'`1_SgqʀsMj-B!;Ȳ3)c8*Э_KfqyK[b㋈/5?7*jGȑ9Iɣm&kᚆ١$;r-ͥgu:!l{۳њ|Ůi!UʤW&E鳿Y YG`/j')j^[jl 1!\Fo=] ^%oaJg|:dz\qAN9rPizT;-ӛaEc.W5!R)i|CARﱲPO RcVR Js[=nM!DJ&^ϴbU,Jri(s>ㅒ/4.}pP2?&Cn`#(N:+«lZaq+ck21N^Jڸ_+FI2޶f.-Mq+U!.g_|lW wܑMS[JӷmV8ܶY׌1%0# *H  1i9)nėwy5S010!0 +r D|YnN4Y^ aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/resources/unittests.p8000066400000000000000000000032501456575232400253260ustar00rootroot00000000000000-----BEGIN PRIVATE KEY----- MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDXaldI+A5EAyVC 1hFvG7P85xqitqfcLYWPKOG7S+5xIRlLDEMmnvlMFAQxp9KlIQoBAt4O3vG4NENi fnZXhQTpwX7FNaG3Ox/uaE3+UdoM9y9HYBI8ASTOSKXwoItjh7q1PFLBD3uymU24 RnT30eglhNMsVpF4h93UPfNnURhxLDzD4ZnZLERR9hRIvYIWYhhKRCOeWwkIikKg aAOIEA9shQk7cpYENfQmAYNvHdZ/eNcb9jpPrcs+w74BLbREK9wQXQX+uUMg3Mjk QAc7VM4R318o674kArTo/DWbvsGA6sTsW28gbuRg1W44Q94ic4eQ66qvIOKwHU/C LI80hup1AgMBAAECggEARJC4j6VFBSjrJ0bz7aWnuIvm0nvJGi/zHwooL3GPx7p9 ToHsrdJUD38fhp6gUacehAvhmmIkFjmsaSFPkbPpSG4qZ6MWgjfzhfL1QEnVWeMj zVgq9aZ3jKFbEChJtbhyGVXGEWVYPhTFxC3I9Uh+19JbVPWJABBe7zt4yh3p5btV aXIwqJxiQEYHaiEjSFbxyHHfrXP3pBynGEDIEB+eHG5OAoVhJFV/BhI6Mdg86+jO ZTtaPSJRFP7UwziI7xiUEO5kQkCuzdYB0h6mYKrqyfM4Anpj2ITQQa2L1AaICjqd r+dYB9WVFIzJL8LUYLSgzQyelEpItbS28tW+0kbzUQKBgQDuXMal2UArBY0o9zZg hu1Q2iYP946vtPNh51jEnzxIbnZMeOATc+6mgXfBkWN22XDHW7iezGVV7nQUFMI3 mzYVXj/xg/3zTOKz4e1QLmlYI7c7Lr4ONKMr2y36YbLNiOXeilWpxBmQePUs+o3E Ga8WkOAC1Vl90pJ3K7NmmPyzmwKBgQDnWuMQVr+KMgunU/m8qfxvekh9AVKxSxfk 1dPLfV//ZTBVXj3V2MzI3KG1pFytc/0JimrfyjXG9RrF7aGU0P+OIGMEd+wLXehQ 5XPxOsDPEMoDNsYtw5Pa2uDEwVtHwTP6O6vXJBs+egpmsHtKikCRxWpm/iSzQsu7 4Et8QVdjLwKBgQCk3zFcOChFWcKpCk3neIyf9zSKqM5eRMhv+MiSwB2/cACNprI/ Ylo5e6XtEvZ8l6yFiLDrzi9tv9E0rqMkOUywfQ+3q3e1mYHZsLUoV+Hv4Ex2OD+n rcsLo8BqxrcZqc5uHrtgAM85+iCEKw5yDN3puu3np9EN0eATY/vkRH/ObwKBgQCs DHHpt6lPezIhaJjDDeK1gEmh9LbrM/375mxP2tfmFPkhsyjm/Agmo7T6YNWvBB+7 1Zzu+fCOGb6kTLip89boeftI2mnGdjqK1mgnj9rM4h5oz3YHmHc+/SDEEUrxjKM7 xt5e6vH7v0Q24618XF3ySc578ymVyem6uO1J5Um4bwKBgHERii44z1S5mVuVdBd+ 51NZZ/7HkIRbHImApqS0cSHeJ56zWAHtk9s57AtrwBhWOps2BL+v9pQWOkFsKi/w gLFzLzpK4Z1rXQsMVfzexvIybxeGS1/ILcvniKtVbmY1QNwDyz34OWh5OVSUkivw n9EAML2umoctpnNx2+kgyFWz -----END PRIVATE KEY----- aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/shared_library_test.c000066400000000000000000000047421456575232400252050ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #ifdef _WIN32 /* * We may need to monkey with paths (or copy .dlls) a bit when we create shared library builds */ static const char *s_self_path = ".\\aws-c-io.dll"; #else static const char *s_self_path = "../libaws-c-io.so"; #endif static int s_shared_library_open_failure(struct aws_allocator *allocator, void *ctx) { (void)ctx; (void)allocator; struct aws_shared_library library; ASSERT_FAILS(aws_shared_library_init(&library, "not-a-real-library.blah")); return AWS_OP_SUCCESS; } AWS_TEST_CASE(shared_library_open_failure, s_shared_library_open_failure); static int s_shared_library_open_success(struct aws_allocator *allocator, void *ctx) { (void)ctx; (void)allocator; struct aws_shared_library library; ASSERT_SUCCESS(aws_shared_library_init(&library, s_self_path)); aws_shared_library_clean_up(&library); return AWS_OP_SUCCESS; } AWS_TEST_CASE(shared_library_open_success, s_shared_library_open_success); typedef int (*find_symbol_function)(struct aws_shared_library *, const char *, aws_generic_function *); static int s_shared_library_find_function_success(struct aws_allocator *allocator, void *ctx) { (void)ctx; (void)allocator; struct aws_shared_library library; ASSERT_SUCCESS(aws_shared_library_init(&library, s_self_path)); aws_generic_function find_symbol = NULL; ASSERT_SUCCESS(aws_shared_library_find_function(&library, "aws_shared_library_find_function", &find_symbol)); find_symbol_function find = (find_symbol_function)find_symbol; ASSERT_TRUE(find != NULL); aws_shared_library_clean_up(&library); return AWS_OP_SUCCESS; } AWS_TEST_CASE(shared_library_find_function_success, s_shared_library_find_function_success); static int s_shared_library_find_function_failure(struct aws_allocator *allocator, void *ctx) { (void)ctx; (void)allocator; struct aws_shared_library library; ASSERT_SUCCESS(aws_shared_library_init(&library, s_self_path)); aws_generic_function find_symbol = NULL; ASSERT_FAILS(aws_shared_library_find_function(&library, "not_a_real_function", &find_symbol)); ASSERT_TRUE(find_symbol == NULL); aws_shared_library_clean_up(&library); return AWS_OP_SUCCESS; } AWS_TEST_CASE(shared_library_find_function_failure, s_shared_library_find_function_failure); aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/socket_handler_test.c000066400000000000000000001106211456575232400251720ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include "statistics_handler_test.h" #include struct socket_test_args { struct aws_allocator *allocator; struct aws_mutex *mutex; struct aws_condition_variable *condition_variable; struct aws_channel *channel; struct aws_channel_handler *rw_handler; struct aws_atomic_var rw_slot; /* pointer-to struct aws_channel_slot */ int error_code; bool shutdown_invoked; bool error_invoked; bool creation_callback_invoked; bool listener_destroyed; }; /* common structure for test */ struct socket_common_tester { struct aws_mutex mutex; struct aws_condition_variable condition_variable; struct aws_event_loop_group *el_group; struct aws_atomic_var current_time_ns; struct aws_atomic_var stats_handler; bool setup_called; struct aws_event_loop *requested_callback_event_loop; int setup_error_code; }; static struct socket_common_tester c_tester; static int s_socket_common_tester_init(struct aws_allocator *allocator, struct socket_common_tester *tester) { AWS_ZERO_STRUCT(*tester); aws_io_library_init(allocator); tester->el_group = aws_event_loop_group_new_default(allocator, 0, NULL); struct aws_mutex mutex = AWS_MUTEX_INIT; struct aws_condition_variable condition_variable = AWS_CONDITION_VARIABLE_INIT; tester->mutex = mutex; tester->condition_variable = condition_variable; aws_atomic_store_int(&tester->current_time_ns, 0); aws_atomic_store_ptr(&tester->stats_handler, NULL); return AWS_OP_SUCCESS; } static int s_socket_common_tester_clean_up(struct socket_common_tester *tester) { aws_event_loop_group_release(tester->el_group); aws_mutex_clean_up(&tester->mutex); aws_io_library_clean_up(); return AWS_OP_SUCCESS; } /* common structure for a local server */ struct local_server_tester { struct aws_socket_options socket_options; struct aws_socket_endpoint endpoint; struct aws_server_bootstrap *server_bootstrap; struct aws_socket *listener; }; static bool s_pinned_channel_setup_predicate(void *user_data) { struct socket_test_args *setup_test_args = (struct socket_test_args *)user_data; return setup_test_args->channel != NULL; } static bool s_channel_setup_predicate(void *user_data) { struct socket_test_args *setup_test_args = (struct socket_test_args *)user_data; return aws_atomic_load_ptr(&setup_test_args->rw_slot) != NULL; } static bool s_channel_shutdown_predicate(void *user_data) { struct socket_test_args *setup_test_args = (struct socket_test_args *)user_data; bool finished = setup_test_args->shutdown_invoked; return finished; } static bool s_listener_destroy_predicate(void *user_data) { struct socket_test_args *setup_test_args = (struct socket_test_args *)user_data; bool finished = setup_test_args->listener_destroyed; return finished; } static void s_socket_handler_test_client_setup_callback( struct aws_client_bootstrap *bootstrap, int error_code, struct aws_channel *channel, void *user_data) { (void)bootstrap; (void)error_code; struct socket_test_args *setup_test_args = (struct socket_test_args *)user_data; aws_mutex_lock(setup_test_args->mutex); setup_test_args->channel = channel; struct aws_channel_slot *rw_slot = aws_channel_slot_new(channel); aws_channel_slot_insert_end(channel, rw_slot); aws_channel_slot_set_handler(rw_slot, setup_test_args->rw_handler); aws_atomic_store_ptr(&setup_test_args->rw_slot, rw_slot); aws_mutex_unlock(setup_test_args->mutex); aws_condition_variable_notify_one(setup_test_args->condition_variable); } static void s_socket_handler_test_server_setup_callback( struct aws_server_bootstrap *bootstrap, int error_code, struct aws_channel *channel, void *user_data) { (void)bootstrap; (void)error_code; struct socket_test_args *setup_test_args = (struct socket_test_args *)user_data; aws_mutex_lock(setup_test_args->mutex); setup_test_args->channel = channel; if (setup_test_args->rw_handler != NULL) { struct aws_channel_slot *rw_slot = aws_channel_slot_new(channel); aws_channel_slot_insert_end(channel, rw_slot); aws_channel_slot_set_handler(rw_slot, setup_test_args->rw_handler); aws_atomic_store_ptr(&setup_test_args->rw_slot, rw_slot); } aws_mutex_unlock(setup_test_args->mutex); aws_condition_variable_notify_one(setup_test_args->condition_variable); } static void s_socket_handler_test_client_shutdown_callback( struct aws_client_bootstrap *bootstrap, int error_code, struct aws_channel *channel, void *user_data) { (void)bootstrap; (void)channel; struct socket_test_args *setup_test_args = (struct socket_test_args *)user_data; aws_mutex_lock(setup_test_args->mutex); setup_test_args->shutdown_invoked = true; setup_test_args->error_code = error_code; aws_mutex_unlock(setup_test_args->mutex); aws_condition_variable_notify_one(setup_test_args->condition_variable); } static void s_socket_handler_test_server_shutdown_callback( struct aws_server_bootstrap *bootstrap, int error_code, struct aws_channel *channel, void *user_data) { (void)bootstrap; (void)error_code; (void)channel; struct socket_test_args *setup_test_args = (struct socket_test_args *)user_data; aws_mutex_lock(setup_test_args->mutex); setup_test_args->shutdown_invoked = true; setup_test_args->error_code = error_code; aws_mutex_unlock(setup_test_args->mutex); aws_condition_variable_notify_one(setup_test_args->condition_variable); } struct socket_test_rw_args { struct aws_mutex *mutex; struct aws_condition_variable *condition_variable; struct aws_byte_buf received_message; size_t amount_read; size_t expected_read; bool invocation_happened; bool shutdown_finished; }; static bool s_socket_test_read_predicate(void *user_data) { struct socket_test_rw_args *rw_args = (struct socket_test_rw_args *)user_data; return rw_args->invocation_happened; } static bool s_socket_test_full_read_predicate(void *user_data) { struct socket_test_rw_args *rw_args = (struct socket_test_rw_args *)user_data; return rw_args->invocation_happened && rw_args->amount_read == rw_args->expected_read; } static struct aws_byte_buf s_socket_test_handle_read( struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_byte_buf *data_read, void *user_data) { (void)handler; (void)slot; struct socket_test_rw_args *rw_args = (struct socket_test_rw_args *)user_data; aws_mutex_lock(rw_args->mutex); memcpy(rw_args->received_message.buffer + rw_args->received_message.len, data_read->buffer, data_read->len); rw_args->received_message.len += data_read->len; rw_args->amount_read += data_read->len; rw_args->invocation_happened = true; aws_condition_variable_notify_one(rw_args->condition_variable); aws_mutex_unlock(rw_args->mutex); return rw_args->received_message; } static struct aws_byte_buf s_socket_test_handle_write( struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_byte_buf *data_read, void *user_data) { (void)handler; (void)slot; (void)data_read; (void)user_data; /*do nothing*/ return (struct aws_byte_buf){0}; } static void s_socket_handler_test_server_listener_destroy_callback( struct aws_server_bootstrap *bootstrap, void *user_data) { (void)bootstrap; struct socket_test_args *setup_test_args = (struct socket_test_args *)user_data; aws_mutex_lock(setup_test_args->mutex); setup_test_args->listener_destroyed = true; aws_mutex_unlock(setup_test_args->mutex); aws_condition_variable_notify_one(setup_test_args->condition_variable); } static int s_rw_args_init( struct socket_test_rw_args *args, struct socket_common_tester *s_c_tester, struct aws_byte_buf received_message, int expected_read) { AWS_ZERO_STRUCT(*args); args->mutex = &s_c_tester->mutex; args->condition_variable = &s_c_tester->condition_variable; args->received_message = received_message; args->expected_read = expected_read; return AWS_OP_SUCCESS; } static int s_socket_test_args_init( struct socket_test_args *args, struct socket_common_tester *s_c_tester, struct aws_channel_handler *rw_handler) { AWS_ZERO_STRUCT(*args); args->mutex = &s_c_tester->mutex; args->condition_variable = &s_c_tester->condition_variable; args->rw_handler = rw_handler; return AWS_OP_SUCCESS; } static int s_local_server_tester_init( struct aws_allocator *allocator, struct local_server_tester *tester, struct socket_test_args *args, struct socket_common_tester *s_c_tester, bool enable_back_pressure) { AWS_ZERO_STRUCT(*tester); tester->socket_options.connect_timeout_ms = 3000; tester->socket_options.type = AWS_SOCKET_STREAM; tester->socket_options.domain = AWS_SOCKET_LOCAL; aws_socket_endpoint_init_local_address_for_test(&tester->endpoint); tester->server_bootstrap = aws_server_bootstrap_new(allocator, s_c_tester->el_group); ASSERT_NOT_NULL(tester->server_bootstrap); struct aws_server_socket_channel_bootstrap_options bootstrap_options = { .bootstrap = tester->server_bootstrap, .enable_read_back_pressure = enable_back_pressure, .port = tester->endpoint.port, .host_name = tester->endpoint.address, .socket_options = &tester->socket_options, .incoming_callback = s_socket_handler_test_server_setup_callback, .shutdown_callback = s_socket_handler_test_server_shutdown_callback, .destroy_callback = s_socket_handler_test_server_listener_destroy_callback, .user_data = args, }; tester->listener = aws_server_bootstrap_new_socket_listener(&bootstrap_options); ASSERT_NOT_NULL(tester->listener); return AWS_OP_SUCCESS; } static int s_local_server_tester_clean_up(struct local_server_tester *tester) { aws_server_bootstrap_release(tester->server_bootstrap); return AWS_OP_SUCCESS; } static int s_socket_pinned_event_loop_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_socket_common_tester_init(allocator, &c_tester); struct aws_channel_handler *client_rw_handler = rw_handler_new(allocator, s_socket_test_handle_write, s_socket_test_handle_write, true, SIZE_MAX, NULL); ASSERT_NOT_NULL(client_rw_handler); struct aws_channel_handler *server_rw_handler = rw_handler_new(allocator, s_socket_test_handle_write, s_socket_test_handle_write, true, SIZE_MAX, NULL); ASSERT_NOT_NULL(server_rw_handler); struct socket_test_args server_args; ASSERT_SUCCESS(s_socket_test_args_init(&server_args, &c_tester, server_rw_handler)); struct socket_test_args client_args; ASSERT_SUCCESS(s_socket_test_args_init(&client_args, &c_tester, client_rw_handler)); struct local_server_tester local_server_tester; ASSERT_SUCCESS(s_local_server_tester_init(allocator, &local_server_tester, &server_args, &c_tester, true)); struct aws_client_bootstrap_options client_bootstrap_options = { .event_loop_group = c_tester.el_group, .host_resolver = NULL, }; struct aws_client_bootstrap *client_bootstrap = aws_client_bootstrap_new(allocator, &client_bootstrap_options); ASSERT_NOT_NULL(client_bootstrap); struct aws_event_loop *pinned_event_loop = aws_event_loop_group_get_next_loop(c_tester.el_group); struct aws_socket_channel_bootstrap_options client_channel_options; AWS_ZERO_STRUCT(client_channel_options); client_channel_options.bootstrap = client_bootstrap; client_channel_options.host_name = local_server_tester.endpoint.address; client_channel_options.port = 0; client_channel_options.socket_options = &local_server_tester.socket_options; client_channel_options.setup_callback = s_socket_handler_test_client_setup_callback; client_channel_options.shutdown_callback = s_socket_handler_test_client_shutdown_callback; client_channel_options.enable_read_back_pressure = false; client_channel_options.requested_event_loop = pinned_event_loop; client_channel_options.user_data = &client_args; ASSERT_SUCCESS(aws_client_bootstrap_new_socket_channel(&client_channel_options)); ASSERT_SUCCESS(aws_mutex_lock(&c_tester.mutex)); /* wait for both ends to setup */ ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_pinned_channel_setup_predicate, &server_args)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_pinned_channel_setup_predicate, &client_args)); /* Verify the client channel was placed on the requested event loop */ ASSERT_PTR_EQUALS(pinned_event_loop, aws_channel_get_event_loop(client_args.channel)); ASSERT_SUCCESS(aws_channel_shutdown(server_args.channel, AWS_OP_SUCCESS)); ASSERT_SUCCESS(aws_channel_shutdown(client_args.channel, AWS_OP_SUCCESS)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_channel_shutdown_predicate, &server_args)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_channel_shutdown_predicate, &client_args)); aws_server_bootstrap_destroy_socket_listener(local_server_tester.server_bootstrap, local_server_tester.listener); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_listener_destroy_predicate, &server_args)); aws_mutex_unlock(&c_tester.mutex); /* clean up */ ASSERT_SUCCESS(s_local_server_tester_clean_up(&local_server_tester)); aws_client_bootstrap_release(client_bootstrap); ASSERT_SUCCESS(s_socket_common_tester_clean_up(&c_tester)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(socket_pinned_event_loop, s_socket_pinned_event_loop_test) static void s_dns_failure_test_client_setup_callback( struct aws_client_bootstrap *bootstrap, int error_code, struct aws_channel *channel, void *user_data) { (void)bootstrap; (void)channel; struct socket_common_tester *socket_tester = (struct socket_common_tester *)user_data; aws_mutex_lock(&socket_tester->mutex); socket_tester->setup_error_code = error_code; socket_tester->setup_called = true; AWS_FATAL_ASSERT(aws_event_loop_thread_is_callers_thread(socket_tester->requested_callback_event_loop)); AWS_FATAL_ASSERT(channel == NULL); aws_mutex_unlock(&socket_tester->mutex); aws_condition_variable_notify_one(&socket_tester->condition_variable); } static void s_dns_failure_handler_test_client_shutdown_callback( struct aws_client_bootstrap *bootstrap, int error_code, struct aws_channel *channel, void *user_data) { (void)error_code; (void)bootstrap; (void)channel; (void)user_data; // Should never be called AWS_FATAL_ASSERT(false); } static bool s_dns_failure_channel_setup_predicate(void *user_data) { struct socket_common_tester *socket_tester = (struct socket_common_tester *)user_data; return socket_tester->setup_called; } static int s_socket_pinned_event_loop_dns_failure_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_socket_common_tester_init(allocator, &c_tester); struct aws_host_resolver_default_options resolver_options = { .el_group = c_tester.el_group, .max_entries = 8, }; struct aws_host_resolver *resolver = aws_host_resolver_new_default(allocator, &resolver_options); struct aws_client_bootstrap_options client_bootstrap_options = { .event_loop_group = c_tester.el_group, .host_resolver = resolver, }; struct aws_client_bootstrap *client_bootstrap = aws_client_bootstrap_new(allocator, &client_bootstrap_options); ASSERT_NOT_NULL(client_bootstrap); struct aws_event_loop *pinned_event_loop = aws_event_loop_group_get_next_loop(c_tester.el_group); c_tester.requested_callback_event_loop = pinned_event_loop; struct aws_socket_options socket_options = { .domain = AWS_SOCKET_IPV4, .type = AWS_SOCKET_STREAM, .connect_timeout_ms = 10000, }; struct aws_socket_channel_bootstrap_options client_channel_options; AWS_ZERO_STRUCT(client_channel_options); client_channel_options.bootstrap = client_bootstrap; client_channel_options.host_name = "notavalid.domain-seriously.uffda"; client_channel_options.port = 443; client_channel_options.socket_options = &socket_options; client_channel_options.setup_callback = s_dns_failure_test_client_setup_callback; client_channel_options.shutdown_callback = s_dns_failure_handler_test_client_shutdown_callback; client_channel_options.enable_read_back_pressure = false; client_channel_options.requested_event_loop = pinned_event_loop; client_channel_options.user_data = &c_tester; ASSERT_SUCCESS(aws_client_bootstrap_new_socket_channel(&client_channel_options)); ASSERT_SUCCESS(aws_mutex_lock(&c_tester.mutex)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_dns_failure_channel_setup_predicate, &c_tester)); /* Verify the setup callback failure was on the requested event loop */ ASSERT_TRUE(c_tester.setup_error_code != 0); aws_mutex_unlock(&c_tester.mutex); aws_client_bootstrap_release(client_bootstrap); aws_host_resolver_release(resolver); ASSERT_SUCCESS(s_socket_common_tester_clean_up(&c_tester)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(socket_pinned_event_loop_dns_failure, s_socket_pinned_event_loop_dns_failure_test) static int s_socket_echo_and_backpressure_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_socket_common_tester_init(allocator, &c_tester); struct aws_byte_buf msg_from_server = aws_byte_buf_from_c_str("I'm a little teapot."); struct aws_byte_buf msg_from_client = aws_byte_buf_from_c_str("I'm a big teapot"); uint8_t server_received_message[128] = {0}; uint8_t client_received_message[128] = {0}; struct socket_test_rw_args server_rw_args; ASSERT_SUCCESS(s_rw_args_init( &server_rw_args, &c_tester, aws_byte_buf_from_empty_array(server_received_message, sizeof(server_received_message)), (int)msg_from_client.len)); struct socket_test_rw_args client_rw_args; ASSERT_SUCCESS(s_rw_args_init( &client_rw_args, &c_tester, aws_byte_buf_from_empty_array(client_received_message, sizeof(client_received_message)), (int)msg_from_server.len)); /* make the windows small to make sure back pressure is honored. */ static size_t s_client_initial_read_window = 9; static size_t s_server_initial_read_window = 8; struct aws_channel_handler *client_rw_handler = rw_handler_new( allocator, s_socket_test_handle_read, s_socket_test_handle_write, true, s_client_initial_read_window, &client_rw_args); ASSERT_NOT_NULL(client_rw_handler); struct aws_channel_handler *server_rw_handler = rw_handler_new( allocator, s_socket_test_handle_read, s_socket_test_handle_write, true, s_server_initial_read_window, &server_rw_args); ASSERT_NOT_NULL(server_rw_handler); struct socket_test_args server_args; ASSERT_SUCCESS(s_socket_test_args_init(&server_args, &c_tester, server_rw_handler)); struct socket_test_args client_args; ASSERT_SUCCESS(s_socket_test_args_init(&client_args, &c_tester, client_rw_handler)); struct local_server_tester local_server_tester; ASSERT_SUCCESS(s_local_server_tester_init(allocator, &local_server_tester, &server_args, &c_tester, true)); struct aws_client_bootstrap_options client_bootstrap_options = { .event_loop_group = c_tester.el_group, .host_resolver = NULL, }; struct aws_client_bootstrap *client_bootstrap = aws_client_bootstrap_new(allocator, &client_bootstrap_options); ASSERT_NOT_NULL(client_bootstrap); struct aws_socket_channel_bootstrap_options client_channel_options; AWS_ZERO_STRUCT(client_channel_options); client_channel_options.bootstrap = client_bootstrap; client_channel_options.host_name = local_server_tester.endpoint.address; client_channel_options.port = 0; client_channel_options.socket_options = &local_server_tester.socket_options; client_channel_options.setup_callback = s_socket_handler_test_client_setup_callback; client_channel_options.shutdown_callback = s_socket_handler_test_client_shutdown_callback; client_channel_options.user_data = &client_args; client_channel_options.enable_read_back_pressure = true; ASSERT_SUCCESS(aws_client_bootstrap_new_socket_channel(&client_channel_options)); ASSERT_SUCCESS(aws_mutex_lock(&c_tester.mutex)); /* wait for both ends to setup */ ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_channel_setup_predicate, &server_args)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_channel_setup_predicate, &client_args)); /* send msg from client to server, and wait for some bytes to be received */ rw_handler_write(client_args.rw_handler, aws_atomic_load_ptr(&client_args.rw_slot), &msg_from_client); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_socket_test_read_predicate, &server_rw_args)); /* send msg from server to client, and wait for some bytes to be received */ rw_handler_write(server_args.rw_handler, aws_atomic_load_ptr(&server_args.rw_slot), &msg_from_server); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_socket_test_read_predicate, &client_rw_args)); /* confirm that the initial read window was respected */ server_rw_args.invocation_happened = false; client_rw_args.invocation_happened = false; ASSERT_INT_EQUALS(s_client_initial_read_window, client_rw_args.amount_read); ASSERT_INT_EQUALS(s_server_initial_read_window, server_rw_args.amount_read); /* increment the read window on both sides and confirm they receive the remainder of their message */ rw_handler_trigger_increment_read_window(server_args.rw_handler, aws_atomic_load_ptr(&server_args.rw_slot), 100); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_socket_test_full_read_predicate, &server_rw_args)); rw_handler_trigger_increment_read_window(client_args.rw_handler, aws_atomic_load_ptr(&client_args.rw_slot), 100); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_socket_test_full_read_predicate, &client_rw_args)); ASSERT_INT_EQUALS(msg_from_server.len, client_rw_args.amount_read); ASSERT_INT_EQUALS(msg_from_client.len, server_rw_args.amount_read); ASSERT_BIN_ARRAYS_EQUALS( msg_from_client.buffer, msg_from_client.len, server_rw_args.received_message.buffer, server_rw_args.received_message.len); ASSERT_BIN_ARRAYS_EQUALS( msg_from_server.buffer, msg_from_server.len, client_rw_args.received_message.buffer, client_rw_args.received_message.len); /* only shut down one side, this should cause the other side to shutdown as well.*/ ASSERT_SUCCESS(aws_channel_shutdown(server_args.channel, AWS_OP_SUCCESS)); ASSERT_SUCCESS(aws_channel_shutdown(client_args.channel, AWS_OP_SUCCESS)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_channel_shutdown_predicate, &server_args)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_channel_shutdown_predicate, &client_args)); aws_server_bootstrap_destroy_socket_listener(local_server_tester.server_bootstrap, local_server_tester.listener); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_listener_destroy_predicate, &server_args)); aws_mutex_unlock(&c_tester.mutex); /* clean up */ ASSERT_SUCCESS(s_local_server_tester_clean_up(&local_server_tester)); aws_client_bootstrap_release(client_bootstrap); ASSERT_SUCCESS(s_socket_common_tester_clean_up(&c_tester)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(socket_handler_echo_and_backpressure, s_socket_echo_and_backpressure_test) static int s_socket_close_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_socket_common_tester_init(allocator, &c_tester); uint8_t client_received_message[128]; uint8_t server_received_message[128]; struct socket_test_rw_args server_rw_args; ASSERT_SUCCESS(s_rw_args_init( &server_rw_args, &c_tester, aws_byte_buf_from_empty_array(server_received_message, sizeof(server_received_message)), 0)); struct socket_test_rw_args client_rw_args; ASSERT_SUCCESS(s_rw_args_init( &client_rw_args, &c_tester, aws_byte_buf_from_empty_array(client_received_message, sizeof(client_received_message)), 0)); struct aws_channel_handler *client_rw_handler = rw_handler_new(allocator, s_socket_test_handle_read, s_socket_test_handle_write, true, 10000, &client_rw_args); ASSERT_NOT_NULL(client_rw_handler); struct aws_channel_handler *server_rw_handler = rw_handler_new(allocator, s_socket_test_handle_read, s_socket_test_handle_write, true, 10000, &server_rw_args); ASSERT_NOT_NULL(server_rw_handler); struct socket_test_args server_args; ASSERT_SUCCESS(s_socket_test_args_init(&server_args, &c_tester, server_rw_handler)); struct socket_test_args client_args; ASSERT_SUCCESS(s_socket_test_args_init(&client_args, &c_tester, client_rw_handler)); struct local_server_tester local_server_tester; ASSERT_SUCCESS(s_local_server_tester_init(allocator, &local_server_tester, &server_args, &c_tester, false)); struct aws_client_bootstrap_options client_bootstrap_options = { .event_loop_group = c_tester.el_group, .host_resolver = NULL, }; struct aws_client_bootstrap *client_bootstrap = aws_client_bootstrap_new(allocator, &client_bootstrap_options); ASSERT_NOT_NULL(client_bootstrap); struct aws_socket_channel_bootstrap_options client_channel_options; AWS_ZERO_STRUCT(client_channel_options); client_channel_options.bootstrap = client_bootstrap; client_channel_options.host_name = local_server_tester.endpoint.address; client_channel_options.port = 0; client_channel_options.socket_options = &local_server_tester.socket_options; client_channel_options.setup_callback = s_socket_handler_test_client_setup_callback; client_channel_options.shutdown_callback = s_socket_handler_test_client_shutdown_callback; client_channel_options.user_data = &client_args; ASSERT_SUCCESS(aws_client_bootstrap_new_socket_channel(&client_channel_options)); ASSERT_SUCCESS(aws_mutex_lock(&c_tester.mutex)); /* wait for both ends to setup */ ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_channel_setup_predicate, &server_args)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_channel_setup_predicate, &client_args)); aws_channel_shutdown(server_args.channel, AWS_OP_SUCCESS); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_channel_shutdown_predicate, &server_args)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_channel_shutdown_predicate, &client_args)); ASSERT_INT_EQUALS(AWS_OP_SUCCESS, server_args.error_code); ASSERT_TRUE( AWS_IO_SOCKET_CLOSED == client_args.error_code || AWS_IO_SOCKET_NOT_CONNECTED == client_args.error_code); aws_server_bootstrap_destroy_socket_listener(local_server_tester.server_bootstrap, local_server_tester.listener); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_listener_destroy_predicate, &server_args)); aws_mutex_unlock(&c_tester.mutex); /* clean up */ ASSERT_SUCCESS(s_local_server_tester_clean_up(&local_server_tester)); aws_client_bootstrap_release(client_bootstrap); ASSERT_SUCCESS(s_socket_common_tester_clean_up(&c_tester)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(socket_handler_close, s_socket_close_test) static void s_creation_callback_test_channel_creation_callback( struct aws_client_bootstrap *bootstrap, int error_code, struct aws_channel *channel, void *user_data) { (void)bootstrap; (void)error_code; struct socket_test_args *setup_test_args = (struct socket_test_args *)user_data; setup_test_args->creation_callback_invoked = true; struct aws_crt_statistics_handler *stats_handler = aws_statistics_handler_new_test(bootstrap->allocator); aws_atomic_store_ptr(&c_tester.stats_handler, stats_handler); aws_channel_set_statistics_handler(channel, stats_handler); } static struct aws_event_loop *s_default_new_event_loop( struct aws_allocator *allocator, const struct aws_event_loop_options *options, void *user_data) { (void)user_data; return aws_event_loop_new_default_with_options(allocator, options); } static int s_statistic_test_clock_fn(uint64_t *timestamp) { *timestamp = aws_atomic_load_int(&c_tester.current_time_ns); return AWS_OP_SUCCESS; } static int s_socket_common_tester_statistics_init( struct aws_allocator *allocator, struct socket_common_tester *tester) { aws_io_library_init(allocator); AWS_ZERO_STRUCT(*tester); tester->el_group = aws_event_loop_group_new(allocator, s_statistic_test_clock_fn, 1, s_default_new_event_loop, NULL, NULL); struct aws_mutex mutex = AWS_MUTEX_INIT; struct aws_condition_variable condition_variable = AWS_CONDITION_VARIABLE_INIT; tester->mutex = mutex; tester->condition_variable = condition_variable; aws_atomic_store_int(&tester->current_time_ns, 0); aws_atomic_store_ptr(&tester->stats_handler, NULL); return AWS_OP_SUCCESS; } static bool s_stats_processed_predicate(void *user_data) { struct aws_crt_statistics_handler *stats_handler = user_data; struct aws_statistics_handler_test_impl *stats_impl = stats_handler->impl; return stats_impl->total_bytes_read > 0 && stats_impl->total_bytes_written > 0; } static int s_open_channel_statistics_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; s_socket_common_tester_statistics_init(allocator, &c_tester); struct aws_byte_buf msg_from_server = aws_byte_buf_from_c_str("Some bytes"); struct aws_byte_buf msg_from_client = aws_byte_buf_from_c_str("Fresh pressed Washington apples"); uint8_t client_received_message[128]; uint8_t server_received_message[128]; struct socket_test_rw_args server_rw_args; ASSERT_SUCCESS(s_rw_args_init( &server_rw_args, &c_tester, aws_byte_buf_from_empty_array(server_received_message, sizeof(server_received_message)), 0)); struct socket_test_rw_args client_rw_args; ASSERT_SUCCESS(s_rw_args_init( &client_rw_args, &c_tester, aws_byte_buf_from_empty_array(client_received_message, sizeof(client_received_message)), 0)); struct aws_channel_handler *client_rw_handler = rw_handler_new(allocator, s_socket_test_handle_read, s_socket_test_handle_write, true, 10000, &client_rw_args); ASSERT_NOT_NULL(client_rw_handler); struct aws_channel_handler *server_rw_handler = rw_handler_new(allocator, s_socket_test_handle_read, s_socket_test_handle_write, true, 10000, &server_rw_args); ASSERT_NOT_NULL(server_rw_handler); struct socket_test_args server_args; ASSERT_SUCCESS(s_socket_test_args_init(&server_args, &c_tester, server_rw_handler)); struct socket_test_args client_args; ASSERT_SUCCESS(s_socket_test_args_init(&client_args, &c_tester, client_rw_handler)); struct local_server_tester local_server_tester; ASSERT_SUCCESS(s_local_server_tester_init(allocator, &local_server_tester, &server_args, &c_tester, false)); struct aws_client_bootstrap_options client_bootstrap_options; AWS_ZERO_STRUCT(client_bootstrap_options); client_bootstrap_options.event_loop_group = c_tester.el_group; client_bootstrap_options.host_resolver = NULL; struct aws_client_bootstrap *client_bootstrap = aws_client_bootstrap_new(allocator, &client_bootstrap_options); ASSERT_NOT_NULL(client_bootstrap); struct aws_socket_channel_bootstrap_options client_channel_options; AWS_ZERO_STRUCT(client_channel_options); client_channel_options.bootstrap = client_bootstrap; client_channel_options.host_name = local_server_tester.endpoint.address; client_channel_options.port = 0; client_channel_options.socket_options = &local_server_tester.socket_options; client_channel_options.creation_callback = s_creation_callback_test_channel_creation_callback; client_channel_options.setup_callback = s_socket_handler_test_client_setup_callback; client_channel_options.shutdown_callback = s_socket_handler_test_client_shutdown_callback; client_channel_options.user_data = &client_args; ASSERT_SUCCESS(aws_client_bootstrap_new_socket_channel(&client_channel_options)); ASSERT_SUCCESS(aws_mutex_lock(&c_tester.mutex)); /* wait for both ends to setup */ ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_channel_setup_predicate, &server_args)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_channel_setup_predicate, &client_args)); ASSERT_TRUE(client_args.creation_callback_invoked); struct aws_channel_slot *client_rw_slot = aws_atomic_load_ptr(&client_args.rw_slot); rw_handler_write(client_args.rw_handler, client_rw_slot, &msg_from_client); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_socket_test_read_predicate, &server_rw_args)); struct aws_channel_slot *server_rw_slot = aws_atomic_load_ptr(&server_args.rw_slot); rw_handler_write(server_args.rw_handler, server_rw_slot, &msg_from_server); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_socket_test_read_predicate, &client_rw_args)); uint64_t ms_to_ns = aws_timestamp_convert(1, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL); aws_atomic_store_int(&c_tester.current_time_ns, (size_t)ms_to_ns); struct aws_crt_statistics_handler *stats_handler = aws_atomic_load_ptr(&c_tester.stats_handler); struct aws_statistics_handler_test_impl *stats_impl = stats_handler->impl; aws_mutex_lock(&stats_impl->lock); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &stats_impl->signal, &stats_impl->lock, s_stats_processed_predicate, stats_handler)); ASSERT_TRUE(stats_impl->total_bytes_read == msg_from_server.len); ASSERT_TRUE(stats_impl->total_bytes_written == msg_from_client.len); aws_mutex_unlock(&stats_impl->lock); aws_channel_shutdown(server_args.channel, AWS_OP_SUCCESS); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_channel_shutdown_predicate, &server_args)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_channel_shutdown_predicate, &client_args)); aws_server_bootstrap_destroy_socket_listener(local_server_tester.server_bootstrap, local_server_tester.listener); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_listener_destroy_predicate, &server_args)); aws_mutex_unlock(&c_tester.mutex); /* clean up */ ASSERT_SUCCESS(s_local_server_tester_clean_up(&local_server_tester)); aws_client_bootstrap_release(client_bootstrap); ASSERT_SUCCESS(s_socket_common_tester_clean_up(&c_tester)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(open_channel_statistics_test, s_open_channel_statistics_test) aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/socket_test.c000066400000000000000000002073131456575232400235020ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #ifdef _MSC_VER # pragma warning(disable : 4996) /* strncpy */ #endif #if USE_VSOCK # include #endif struct local_listener_args { struct aws_socket *incoming; struct aws_mutex *mutex; struct aws_condition_variable *condition_variable; bool incoming_invoked; bool error_invoked; }; static bool s_incoming_predicate(void *arg) { struct local_listener_args *listener_args = (struct local_listener_args *)arg; return listener_args->incoming_invoked || listener_args->error_invoked; } static void s_local_listener_incoming( struct aws_socket *socket, int error_code, struct aws_socket *new_socket, void *user_data) { (void)socket; struct local_listener_args *listener_args = (struct local_listener_args *)user_data; aws_mutex_lock(listener_args->mutex); if (!error_code) { listener_args->incoming = new_socket; listener_args->incoming_invoked = true; } else { listener_args->error_invoked = true; } aws_mutex_unlock(listener_args->mutex); aws_condition_variable_notify_one(listener_args->condition_variable); } struct local_outgoing_args { bool connect_invoked; bool error_invoked; int last_error; struct aws_mutex *mutex; struct aws_condition_variable *condition_variable; }; static bool s_connection_completed_predicate(void *arg) { struct local_outgoing_args *outgoing_args = (struct local_outgoing_args *)arg; return outgoing_args->connect_invoked || outgoing_args->error_invoked; } static void s_local_outgoing_connection(struct aws_socket *socket, int error_code, void *user_data) { (void)socket; struct local_outgoing_args *outgoing_args = (struct local_outgoing_args *)user_data; aws_mutex_lock(outgoing_args->mutex); if (!error_code) { outgoing_args->connect_invoked = true; } else { outgoing_args->last_error = error_code; outgoing_args->error_invoked = true; } aws_mutex_unlock(outgoing_args->mutex); aws_condition_variable_notify_one(outgoing_args->condition_variable); } struct socket_io_args { struct aws_socket *socket; struct aws_byte_cursor *to_write; struct aws_byte_buf *to_read; struct aws_byte_buf *read_data; size_t amount_written; size_t amount_read; int error_code; bool close_completed; struct aws_mutex *mutex; struct aws_condition_variable condition_variable; }; static void s_on_written(struct aws_socket *socket, int error_code, size_t amount_written, void *user_data) { (void)socket; struct socket_io_args *write_args = user_data; aws_mutex_lock(write_args->mutex); write_args->error_code = error_code; write_args->amount_written = amount_written; aws_mutex_unlock(write_args->mutex); aws_condition_variable_notify_one(&write_args->condition_variable); } static bool s_write_completed_predicate(void *arg) { struct socket_io_args *io_args = arg; return io_args->amount_written; } static void s_write_task(struct aws_task *task, void *args, enum aws_task_status status) { (void)task; (void)status; struct socket_io_args *io_args = args; aws_socket_write(io_args->socket, io_args->to_write, s_on_written, io_args); } static void s_read_task(struct aws_task *task, void *args, enum aws_task_status status) { (void)task; (void)status; struct socket_io_args *io_args = args; aws_mutex_lock(io_args->mutex); size_t read = 0; while (read < io_args->to_read->len) { size_t data_len = 0; if (aws_socket_read(io_args->socket, io_args->read_data, &data_len)) { if (AWS_IO_READ_WOULD_BLOCK == aws_last_error()) { continue; } break; } read += data_len; } io_args->amount_read = read; aws_mutex_unlock(io_args->mutex); aws_condition_variable_notify_one(&io_args->condition_variable); } static bool s_read_task_predicate(void *arg) { struct socket_io_args *io_args = arg; return io_args->amount_read; } static void s_on_readable(struct aws_socket *socket, int error_code, void *user_data) { (void)socket; (void)user_data; (void)error_code; } static bool s_close_completed_predicate(void *arg) { struct socket_io_args *io_args = (struct socket_io_args *)arg; return io_args->close_completed; } static void s_socket_close_task(struct aws_task *task, void *args, enum aws_task_status status) { (void)task; (void)status; struct socket_io_args *io_args = args; aws_mutex_lock(io_args->mutex); aws_socket_close(io_args->socket); io_args->close_completed = true; aws_mutex_unlock(io_args->mutex); aws_condition_variable_notify_one(&io_args->condition_variable); } /* we have tests that need to check the error handling path, but it's damn near impossible to predictably make sockets fail, the best idea we have is to do something the OS won't allow for the access permissions (like attempt to listen on a port < 1024), but alas, what if you're running the build as root? This disables those tests if the user runs the build as a root user. */ static bool s_test_running_as_root(struct aws_allocator *alloc) { struct aws_socket_endpoint endpoint = {.address = "127.0.0.1", .port = 80}; struct aws_socket socket; struct aws_socket_options options = { .type = AWS_SOCKET_STREAM, .domain = AWS_SOCKET_IPV4, .keep_alive_interval_sec = 0, .keep_alive_timeout_sec = 0, .connect_timeout_ms = 0, .keepalive = 0, }; int err = aws_socket_init(&socket, alloc, &options); AWS_FATAL_ASSERT(!err); err = aws_socket_bind(&socket, &endpoint); err |= aws_socket_listen(&socket, 1024); bool is_root = !err; aws_socket_clean_up(&socket); return is_root; } static int s_test_socket_ex( struct aws_allocator *allocator, struct aws_socket_options *options, struct aws_socket_endpoint *local, struct aws_socket_endpoint *endpoint) { struct aws_event_loop *event_loop = aws_event_loop_new_default(allocator, aws_high_res_clock_get_ticks); ASSERT_NOT_NULL(event_loop, "Event loop creation failed with error: %s", aws_error_debug_str(aws_last_error())); ASSERT_SUCCESS(aws_event_loop_run(event_loop)); struct aws_mutex mutex = AWS_MUTEX_INIT; struct aws_condition_variable condition_variable = AWS_CONDITION_VARIABLE_INIT; struct local_listener_args listener_args = { .mutex = &mutex, .condition_variable = &condition_variable, .incoming = NULL, .incoming_invoked = false, .error_invoked = false, }; struct aws_socket listener; ASSERT_SUCCESS(aws_socket_init(&listener, allocator, options)); ASSERT_SUCCESS(aws_socket_bind(&listener, endpoint)); struct aws_socket_endpoint bound_endpoint; ASSERT_SUCCESS(aws_socket_get_bound_address(&listener, &bound_endpoint)); ASSERT_INT_EQUALS(endpoint->port, bound_endpoint.port); ASSERT_STR_EQUALS(endpoint->address, bound_endpoint.address); if (options->type == AWS_SOCKET_STREAM) { ASSERT_SUCCESS(aws_socket_listen(&listener, 1024)); ASSERT_SUCCESS(aws_socket_start_accept(&listener, event_loop, s_local_listener_incoming, &listener_args)); } struct local_outgoing_args outgoing_args = { .mutex = &mutex, .condition_variable = &condition_variable, .connect_invoked = false, .error_invoked = false}; struct aws_socket outgoing; ASSERT_SUCCESS(aws_socket_init(&outgoing, allocator, options)); if (local && (strcmp(local->address, endpoint->address) != 0 || local->port != endpoint->port)) { ASSERT_SUCCESS(aws_socket_bind(&outgoing, local)); } ASSERT_SUCCESS(aws_socket_connect(&outgoing, endpoint, event_loop, s_local_outgoing_connection, &outgoing_args)); if (listener.options.type == AWS_SOCKET_STREAM) { ASSERT_SUCCESS(aws_mutex_lock(&mutex)); ASSERT_SUCCESS( aws_condition_variable_wait_pred(&condition_variable, &mutex, s_incoming_predicate, &listener_args)); ASSERT_SUCCESS(aws_mutex_unlock(&mutex)); } ASSERT_SUCCESS(aws_mutex_lock(&mutex)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &condition_variable, &mutex, s_connection_completed_predicate, &outgoing_args)); ASSERT_SUCCESS(aws_mutex_unlock(&mutex)); struct aws_socket *server_sock = &listener; if (options->type == AWS_SOCKET_STREAM) { ASSERT_TRUE(listener_args.incoming_invoked); ASSERT_FALSE(listener_args.error_invoked); server_sock = listener_args.incoming; ASSERT_TRUE(outgoing_args.connect_invoked); ASSERT_FALSE(outgoing_args.error_invoked); ASSERT_INT_EQUALS(options->domain, listener_args.incoming->options.domain); ASSERT_INT_EQUALS(options->type, listener_args.incoming->options.type); } ASSERT_SUCCESS(aws_socket_assign_to_event_loop(server_sock, event_loop)); aws_socket_subscribe_to_readable_events(server_sock, s_on_readable, NULL); aws_socket_subscribe_to_readable_events(&outgoing, s_on_readable, NULL); /* now test the read and write across the connection. */ const char read_data[] = "I'm a little teapot"; char write_data[sizeof(read_data)] = {0}; struct aws_byte_buf read_buffer = aws_byte_buf_from_array((const uint8_t *)read_data, sizeof(read_data)); struct aws_byte_buf write_buffer = aws_byte_buf_from_array((const uint8_t *)write_data, sizeof(write_data)); write_buffer.len = 0; struct aws_byte_cursor read_cursor = aws_byte_cursor_from_buf(&read_buffer); struct socket_io_args io_args = { .socket = &outgoing, .to_write = &read_cursor, .to_read = &read_buffer, .read_data = &write_buffer, .mutex = &mutex, .amount_read = 0, .amount_written = 0, .error_code = 0, .condition_variable = AWS_CONDITION_VARIABLE_INIT, .close_completed = false, }; struct aws_task write_task = { .fn = s_write_task, .arg = &io_args, }; aws_event_loop_schedule_task_now(event_loop, &write_task); ASSERT_SUCCESS(aws_mutex_lock(&mutex)); aws_condition_variable_wait_pred(&io_args.condition_variable, &mutex, s_write_completed_predicate, &io_args); ASSERT_SUCCESS(aws_mutex_unlock(&mutex)); ASSERT_INT_EQUALS(AWS_OP_SUCCESS, io_args.error_code); io_args.socket = server_sock; struct aws_task read_task = { .fn = s_read_task, .arg = &io_args, }; aws_event_loop_schedule_task_now(event_loop, &read_task); ASSERT_SUCCESS(aws_mutex_lock(&mutex)); aws_condition_variable_wait_pred(&io_args.condition_variable, &mutex, s_read_task_predicate, &io_args); ASSERT_SUCCESS(aws_mutex_unlock(&mutex)); ASSERT_INT_EQUALS(AWS_OP_SUCCESS, io_args.error_code); ASSERT_BIN_ARRAYS_EQUALS(read_buffer.buffer, read_buffer.len, write_buffer.buffer, write_buffer.len); if (options->type != AWS_SOCKET_DGRAM) { memset((void *)write_data, 0, sizeof(write_data)); write_buffer.len = 0; io_args.error_code = 0; io_args.amount_read = 0; io_args.amount_written = 0; io_args.socket = server_sock; aws_event_loop_schedule_task_now(event_loop, &write_task); ASSERT_SUCCESS(aws_mutex_lock(&mutex)); aws_condition_variable_wait_pred(&io_args.condition_variable, &mutex, s_write_completed_predicate, &io_args); ASSERT_SUCCESS(aws_mutex_unlock(&mutex)); ASSERT_INT_EQUALS(AWS_OP_SUCCESS, io_args.error_code); io_args.socket = &outgoing; aws_event_loop_schedule_task_now(event_loop, &read_task); ASSERT_SUCCESS(aws_mutex_lock(&mutex)); aws_condition_variable_wait_pred(&io_args.condition_variable, &mutex, s_read_task_predicate, &io_args); ASSERT_SUCCESS(aws_mutex_unlock(&mutex)); ASSERT_INT_EQUALS(AWS_OP_SUCCESS, io_args.error_code); ASSERT_BIN_ARRAYS_EQUALS(read_buffer.buffer, read_buffer.len, write_buffer.buffer, write_buffer.len); } struct aws_task close_task = { .fn = s_socket_close_task, .arg = &io_args, }; if (listener_args.incoming) { io_args.socket = listener_args.incoming; io_args.close_completed = false; aws_event_loop_schedule_task_now(event_loop, &close_task); ASSERT_SUCCESS(aws_mutex_lock(&mutex)); aws_condition_variable_wait_pred(&io_args.condition_variable, &mutex, s_close_completed_predicate, &io_args); ASSERT_SUCCESS(aws_mutex_unlock(&mutex)); aws_socket_clean_up(listener_args.incoming); aws_mem_release(allocator, listener_args.incoming); } io_args.socket = &outgoing; io_args.close_completed = false; aws_event_loop_schedule_task_now(event_loop, &close_task); ASSERT_SUCCESS(aws_mutex_lock(&mutex)); aws_condition_variable_wait_pred(&io_args.condition_variable, &mutex, s_close_completed_predicate, &io_args); ASSERT_SUCCESS(aws_mutex_unlock(&mutex)); aws_socket_clean_up(&outgoing); io_args.socket = &listener; io_args.close_completed = false; aws_event_loop_schedule_task_now(event_loop, &close_task); ASSERT_SUCCESS(aws_mutex_lock(&mutex)); aws_condition_variable_wait_pred(&io_args.condition_variable, &mutex, s_close_completed_predicate, &io_args); ASSERT_SUCCESS(aws_mutex_unlock(&mutex)); aws_socket_clean_up(&listener); aws_event_loop_destroy(event_loop); return 0; } static int s_test_socket( struct aws_allocator *allocator, struct aws_socket_options *options, struct aws_socket_endpoint *endpoint) { return s_test_socket_ex(allocator, options, NULL, endpoint); } static int s_test_local_socket_communication(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_socket_options options; AWS_ZERO_STRUCT(options); options.connect_timeout_ms = 3000; options.type = AWS_SOCKET_STREAM; options.domain = AWS_SOCKET_LOCAL; struct aws_socket_endpoint endpoint; AWS_ZERO_STRUCT(endpoint); aws_socket_endpoint_init_local_address_for_test(&endpoint); return s_test_socket(allocator, &options, &endpoint); } AWS_TEST_CASE(local_socket_communication, s_test_local_socket_communication) static int s_test_tcp_socket_communication(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_socket_options options; AWS_ZERO_STRUCT(options); options.connect_timeout_ms = 3000; options.keepalive = true; options.keep_alive_interval_sec = 1000; options.keep_alive_timeout_sec = 60000; options.type = AWS_SOCKET_STREAM; options.domain = AWS_SOCKET_IPV4; struct aws_socket_endpoint endpoint = {.address = "127.0.0.1", .port = 8127}; return s_test_socket(allocator, &options, &endpoint); } AWS_TEST_CASE(tcp_socket_communication, s_test_tcp_socket_communication) #if defined(USE_VSOCK) static int s_test_vsock_loopback_socket_communication(struct aws_allocator *allocator, void *ctx) { /* Without vsock loopback it's difficult to test vsock functionality. * Also note that having this defined does not guarantee that it's available * for use and there's no path to figure out dynamically if it can be used. */ # if defined(VMADDR_CID_LOCAL) (void)ctx; struct aws_socket_options options; AWS_ZERO_STRUCT(options); options.connect_timeout_ms = 3000; options.type = AWS_SOCKET_STREAM; options.domain = AWS_SOCKET_VSOCK; struct aws_socket_endpoint endpoint = {.address = "1" /* VMADDR_CID_LOCAL */, .port = 8127}; return s_test_socket(allocator, &options, &endpoint); # else return 0; # endif } AWS_TEST_CASE(vsock_loopback_socket_communication, s_test_vsock_loopback_socket_communication) #endif static int s_test_udp_socket_communication(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_socket_options options; AWS_ZERO_STRUCT(options); options.connect_timeout_ms = 3000; options.type = AWS_SOCKET_DGRAM; options.domain = AWS_SOCKET_IPV4; struct aws_socket_endpoint endpoint = {.address = "127.0.0.1", .port = 8126}; return s_test_socket(allocator, &options, &endpoint); } AWS_TEST_CASE(udp_socket_communication, s_test_udp_socket_communication) static int s_test_udp_bind_connect_communication(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_socket_options options; AWS_ZERO_STRUCT(options); options.connect_timeout_ms = 3000; options.type = AWS_SOCKET_DGRAM; options.domain = AWS_SOCKET_IPV4; struct aws_socket_endpoint local = {.address = "127.0.0.1", .port = 4242}; struct aws_socket_endpoint endpoint = {.address = "127.0.0.1", .port = 8126}; return s_test_socket_ex(allocator, &options, &local, &endpoint); } AWS_TEST_CASE(udp_bind_connect_communication, s_test_udp_bind_connect_communication) struct test_host_callback_data { struct aws_host_address a_address; struct aws_mutex *mutex; bool has_a_address; struct aws_condition_variable condition_variable; bool invoked; }; static bool s_test_host_resolved_predicate(void *arg) { struct test_host_callback_data *callback_data = arg; return callback_data->invoked; } static void s_test_host_resolver_shutdown_callback(void *user_data) { struct test_host_callback_data *callback_data = user_data; aws_mutex_lock(callback_data->mutex); callback_data->invoked = true; aws_mutex_unlock(callback_data->mutex); aws_condition_variable_notify_one(&callback_data->condition_variable); } static void s_test_host_resolved_test_callback( struct aws_host_resolver *resolver, const struct aws_string *host_name, int err_code, const struct aws_array_list *host_addresses, void *user_data) { (void)resolver; (void)host_name; (void)err_code; struct test_host_callback_data *callback_data = user_data; aws_mutex_lock(callback_data->mutex); struct aws_host_address *host_address = NULL; if (aws_array_list_length(host_addresses) == 1) { aws_array_list_get_at_ptr(host_addresses, (void **)&host_address, 0); aws_host_address_copy(host_address, &callback_data->a_address); callback_data->has_a_address = true; } callback_data->invoked = true; aws_mutex_unlock(callback_data->mutex); aws_condition_variable_notify_one(&callback_data->condition_variable); } static int s_test_connect_timeout(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_io_library_init(allocator); struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); struct aws_event_loop *event_loop = aws_event_loop_group_get_next_loop(el_group); ASSERT_NOT_NULL(event_loop, "Event loop creation failed with error: %s", aws_error_debug_str(aws_last_error())); struct aws_mutex mutex = AWS_MUTEX_INIT; struct aws_condition_variable condition_variable = AWS_CONDITION_VARIABLE_INIT; struct aws_socket_options options; AWS_ZERO_STRUCT(options); options.connect_timeout_ms = 1000; options.type = AWS_SOCKET_STREAM; options.domain = AWS_SOCKET_IPV4; struct aws_host_resolver_default_options resolver_options = { .el_group = el_group, .max_entries = 2, }; struct aws_host_resolver *resolver = aws_host_resolver_new_default(allocator, &resolver_options); struct aws_host_resolution_config resolution_config = { .impl = aws_default_dns_resolve, .impl_data = NULL, .max_ttl = 1}; struct test_host_callback_data host_callback_data = { .condition_variable = AWS_CONDITION_VARIABLE_INIT, .invoked = false, .has_a_address = false, .mutex = &mutex, }; /* This ec2 instance sits in a VPC that makes sure port 81 is black-holed (no TCP SYN should be received). */ struct aws_string *host_name = aws_string_new_from_c_str(allocator, "ec2-54-158-231-48.compute-1.amazonaws.com"); ASSERT_SUCCESS(aws_host_resolver_resolve_host( resolver, host_name, s_test_host_resolved_test_callback, &resolution_config, &host_callback_data)); aws_mutex_lock(&mutex); aws_condition_variable_wait_pred( &host_callback_data.condition_variable, &mutex, s_test_host_resolved_predicate, &host_callback_data); aws_mutex_unlock(&mutex); aws_host_resolver_release(resolver); ASSERT_TRUE(host_callback_data.has_a_address); struct aws_socket_endpoint endpoint = {.port = 81}; snprintf(endpoint.address, sizeof(endpoint.address), "%s", aws_string_bytes(host_callback_data.a_address.address)); aws_string_destroy((void *)host_name); aws_host_address_clean_up(&host_callback_data.a_address); struct local_outgoing_args outgoing_args = { .mutex = &mutex, .condition_variable = &condition_variable, .connect_invoked = false, .error_invoked = false, }; struct aws_socket outgoing; ASSERT_SUCCESS(aws_socket_init(&outgoing, allocator, &options)); ASSERT_SUCCESS(aws_socket_connect(&outgoing, &endpoint, event_loop, s_local_outgoing_connection, &outgoing_args)); aws_mutex_lock(&mutex); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &condition_variable, &mutex, s_connection_completed_predicate, &outgoing_args)); aws_mutex_unlock(&mutex); ASSERT_INT_EQUALS(AWS_IO_SOCKET_TIMEOUT, outgoing_args.last_error); aws_socket_clean_up(&outgoing); aws_event_loop_group_release(el_group); aws_io_library_clean_up(); return 0; } AWS_TEST_CASE(connect_timeout, s_test_connect_timeout) static int s_test_connect_timeout_cancelation(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_io_library_init(allocator); struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); struct aws_event_loop *event_loop = aws_event_loop_group_get_next_loop(el_group); ASSERT_NOT_NULL(event_loop, "Event loop creation failed with error: %s", aws_error_debug_str(aws_last_error())); struct aws_mutex mutex = AWS_MUTEX_INIT; struct aws_condition_variable condition_variable = AWS_CONDITION_VARIABLE_INIT; struct aws_socket_options options; AWS_ZERO_STRUCT(options); options.connect_timeout_ms = 1000; options.type = AWS_SOCKET_STREAM; options.domain = AWS_SOCKET_IPV4; struct test_host_callback_data host_callback_data = { .condition_variable = AWS_CONDITION_VARIABLE_INIT, .invoked = false, .has_a_address = false, .mutex = &mutex, }; struct aws_shutdown_callback_options shutdown_options = { .shutdown_callback_fn = s_test_host_resolver_shutdown_callback, .shutdown_callback_user_data = &host_callback_data, }; shutdown_options.shutdown_callback_fn = s_test_host_resolver_shutdown_callback; struct aws_host_resolver_default_options resolver_options = { .el_group = el_group, .max_entries = 2, .shutdown_options = &shutdown_options, }; struct aws_host_resolver *resolver = aws_host_resolver_new_default(allocator, &resolver_options); struct aws_host_resolution_config resolution_config = { .impl = aws_default_dns_resolve, .impl_data = NULL, .max_ttl = 1}; /* This ec2 instance sits in a VPC that makes sure port 81 is black-holed (no TCP SYN should be received). */ struct aws_string *host_name = aws_string_new_from_c_str(allocator, "ec2-54-158-231-48.compute-1.amazonaws.com"); ASSERT_SUCCESS(aws_host_resolver_resolve_host( resolver, host_name, s_test_host_resolved_test_callback, &resolution_config, &host_callback_data)); aws_mutex_lock(&mutex); aws_condition_variable_wait_pred( &host_callback_data.condition_variable, &mutex, s_test_host_resolved_predicate, &host_callback_data); host_callback_data.invoked = false; aws_mutex_unlock(&mutex); aws_host_resolver_release(resolver); /* wait for shutdown callback */ aws_mutex_lock(&mutex); aws_condition_variable_wait_pred( &host_callback_data.condition_variable, &mutex, s_test_host_resolved_predicate, &host_callback_data); aws_mutex_unlock(&mutex); ASSERT_TRUE(host_callback_data.has_a_address); struct aws_socket_endpoint endpoint = {.port = 81}; snprintf(endpoint.address, sizeof(endpoint.address), "%s", aws_string_bytes(host_callback_data.a_address.address)); aws_string_destroy((void *)host_name); aws_host_address_clean_up(&host_callback_data.a_address); struct local_outgoing_args outgoing_args = { .mutex = &mutex, .condition_variable = &condition_variable, .connect_invoked = false, .error_invoked = false, }; struct aws_socket outgoing; ASSERT_SUCCESS(aws_socket_init(&outgoing, allocator, &options)); ASSERT_SUCCESS(aws_socket_connect(&outgoing, &endpoint, event_loop, s_local_outgoing_connection, &outgoing_args)); aws_event_loop_group_release(el_group); aws_thread_join_all_managed(); ASSERT_INT_EQUALS(AWS_IO_EVENT_LOOP_SHUTDOWN, outgoing_args.last_error); aws_socket_clean_up(&outgoing); aws_io_library_clean_up(); return 0; } AWS_TEST_CASE(connect_timeout_cancelation, s_test_connect_timeout_cancelation) struct error_test_args { int error_code; struct aws_mutex mutex; struct aws_condition_variable condition_variable; }; static void s_null_sock_connection(struct aws_socket *socket, int error_code, void *user_data) { (void)socket; struct error_test_args *error_args = (struct error_test_args *)user_data; aws_mutex_lock(&error_args->mutex); if (error_code) { error_args->error_code = error_code; } aws_socket_close(socket); aws_condition_variable_notify_one(&error_args->condition_variable); aws_mutex_unlock(&error_args->mutex); } static int s_test_outgoing_local_sock_errors(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_event_loop *event_loop = aws_event_loop_new_default(allocator, aws_high_res_clock_get_ticks); ASSERT_NOT_NULL(event_loop, "Event loop creation failed with error: %s", aws_error_debug_str(aws_last_error())); ASSERT_SUCCESS(aws_event_loop_run(event_loop)); struct aws_socket_options options; AWS_ZERO_STRUCT(options); options.connect_timeout_ms = 1000; options.type = AWS_SOCKET_STREAM; options.domain = AWS_SOCKET_LOCAL; struct aws_socket_endpoint endpoint = {.address = ""}; struct error_test_args args = { .error_code = 0, .mutex = AWS_MUTEX_INIT, .condition_variable = AWS_CONDITION_VARIABLE_INIT, }; struct aws_socket outgoing; ASSERT_SUCCESS(aws_socket_init(&outgoing, allocator, &options)); ASSERT_FAILS(aws_socket_connect(&outgoing, &endpoint, event_loop, s_null_sock_connection, &args)); ASSERT_TRUE( aws_last_error() == AWS_IO_SOCKET_CONNECTION_REFUSED || aws_last_error() == AWS_ERROR_FILE_INVALID_PATH); aws_socket_clean_up(&outgoing); aws_event_loop_destroy(event_loop); return 0; } AWS_TEST_CASE(outgoing_local_sock_errors, s_test_outgoing_local_sock_errors) static bool s_outgoing_tcp_error_predicate(void *args) { struct error_test_args *test_args = (struct error_test_args *)args; return test_args->error_code != 0; } static int s_test_outgoing_tcp_sock_error(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_event_loop *event_loop = aws_event_loop_new_default(allocator, aws_high_res_clock_get_ticks); ASSERT_NOT_NULL(event_loop, "Event loop creation failed with error: %s", aws_error_debug_str(aws_last_error())); ASSERT_SUCCESS(aws_event_loop_run(event_loop)); struct aws_socket_options options; AWS_ZERO_STRUCT(options); options.connect_timeout_ms = 50000; options.type = AWS_SOCKET_STREAM; options.domain = AWS_SOCKET_IPV4; struct aws_socket_endpoint endpoint = { .address = "127.0.0.1", .port = 8567, }; struct error_test_args args = { .error_code = 0, .mutex = AWS_MUTEX_INIT, .condition_variable = AWS_CONDITION_VARIABLE_INIT, }; struct aws_socket outgoing; ASSERT_SUCCESS(aws_socket_init(&outgoing, allocator, &options)); /* tcp connect is non-blocking, it should return success, but the error callback will be invoked. */ ASSERT_SUCCESS(aws_socket_connect(&outgoing, &endpoint, event_loop, s_null_sock_connection, &args)); ASSERT_SUCCESS(aws_mutex_lock(&args.mutex)); ASSERT_SUCCESS( aws_condition_variable_wait_pred(&args.condition_variable, &args.mutex, s_outgoing_tcp_error_predicate, &args)); ASSERT_SUCCESS(aws_mutex_unlock(&args.mutex)); ASSERT_INT_EQUALS(AWS_IO_SOCKET_CONNECTION_REFUSED, args.error_code); aws_socket_clean_up(&outgoing); aws_event_loop_destroy(event_loop); return 0; } AWS_TEST_CASE(outgoing_tcp_sock_error, s_test_outgoing_tcp_sock_error) static int s_test_incoming_tcp_sock_errors(struct aws_allocator *allocator, void *ctx) { (void)ctx; if (!s_test_running_as_root(allocator)) { struct aws_event_loop *event_loop = aws_event_loop_new_default(allocator, aws_high_res_clock_get_ticks); ASSERT_NOT_NULL(event_loop, "Event loop creation failed with error: %s", aws_error_debug_str(aws_last_error())); ASSERT_SUCCESS(aws_event_loop_run(event_loop)); struct aws_socket_options options; AWS_ZERO_STRUCT(options); options.connect_timeout_ms = 1000; options.type = AWS_SOCKET_STREAM; options.domain = AWS_SOCKET_IPV4; struct aws_socket_endpoint endpoint = { .address = "127.0.0.1", .port = 80, }; struct aws_socket incoming; ASSERT_SUCCESS(aws_socket_init(&incoming, allocator, &options)); ASSERT_ERROR(AWS_ERROR_NO_PERMISSION, aws_socket_bind(&incoming, &endpoint)); aws_socket_clean_up(&incoming); aws_event_loop_destroy(event_loop); } return 0; } AWS_TEST_CASE(incoming_tcp_sock_errors, s_test_incoming_tcp_sock_errors) static int s_test_incoming_duplicate_tcp_bind_errors(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_event_loop *event_loop = aws_event_loop_new_default(allocator, aws_high_res_clock_get_ticks); ASSERT_NOT_NULL(event_loop, "Event loop creation failed with error: %s", aws_error_debug_str(aws_last_error())); ASSERT_SUCCESS(aws_event_loop_run(event_loop)); struct aws_socket_options options; AWS_ZERO_STRUCT(options); options.connect_timeout_ms = 1000; options.type = AWS_SOCKET_STREAM; options.domain = AWS_SOCKET_IPV4; struct aws_socket_endpoint endpoint = { .address = "127.0.0.1", .port = 30123, }; struct aws_socket incoming; ASSERT_SUCCESS(aws_socket_init(&incoming, allocator, &options)); ASSERT_SUCCESS(aws_socket_bind(&incoming, &endpoint)); ASSERT_SUCCESS(aws_socket_listen(&incoming, 1024)); struct aws_socket duplicate_bind; ASSERT_SUCCESS(aws_socket_init(&duplicate_bind, allocator, &options)); ASSERT_ERROR(AWS_IO_SOCKET_ADDRESS_IN_USE, aws_socket_bind(&duplicate_bind, &endpoint)); aws_socket_close(&duplicate_bind); aws_socket_clean_up(&duplicate_bind); aws_socket_close(&incoming); aws_socket_clean_up(&incoming); aws_event_loop_destroy(event_loop); return 0; } AWS_TEST_CASE(incoming_duplicate_tcp_bind_errors, s_test_incoming_duplicate_tcp_bind_errors) /* Ensure that binding to port 0 results in OS assigning a port */ static int s_test_bind_on_zero_port( struct aws_allocator *allocator, enum aws_socket_type sock_type, enum aws_socket_domain sock_domain, const char *address) { struct aws_event_loop *event_loop = aws_event_loop_new_default(allocator, aws_high_res_clock_get_ticks); ASSERT_NOT_NULL(event_loop, "Event loop creation failed with error: %s", aws_error_debug_str(aws_last_error())); ASSERT_SUCCESS(aws_event_loop_run(event_loop)); struct aws_socket_options options; AWS_ZERO_STRUCT(options); options.connect_timeout_ms = 1000; options.type = sock_type; options.domain = sock_domain; struct aws_socket_endpoint endpoint = { .port = 0 /* important: must be 0 for this test */, }; strncpy(endpoint.address, address, sizeof(endpoint.address)); struct aws_socket incoming; ASSERT_SUCCESS(aws_socket_init(&incoming, allocator, &options)); /* ensure address query fails if socket isn't bound yet */ struct aws_socket_endpoint local_address1; ASSERT_FAILS(aws_socket_get_bound_address(&incoming, &local_address1)); ASSERT_SUCCESS(aws_socket_bind(&incoming, &endpoint)); ASSERT_SUCCESS(aws_socket_get_bound_address(&incoming, &local_address1)); if (sock_type != AWS_SOCKET_DGRAM) { ASSERT_SUCCESS(aws_socket_listen(&incoming, 1024)); } ASSERT_TRUE(local_address1.port > 0); ASSERT_STR_EQUALS(address, local_address1.address); /* ensure that querying again gets the same results */ struct aws_socket_endpoint local_address2; ASSERT_SUCCESS(aws_socket_get_bound_address(&incoming, &local_address2)); ASSERT_INT_EQUALS(local_address1.port, local_address2.port); ASSERT_STR_EQUALS(local_address1.address, local_address2.address); aws_socket_close(&incoming); aws_socket_clean_up(&incoming); aws_event_loop_destroy(event_loop); return 0; } static int s_bind_on_zero_port_tcp_ipv4(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_test_bind_on_zero_port(allocator, AWS_SOCKET_STREAM, AWS_SOCKET_IPV4, "127.0.0.1"); } AWS_TEST_CASE(bind_on_zero_port_tcp_ipv4, s_bind_on_zero_port_tcp_ipv4) static int s_bind_on_zero_port_udp_ipv4(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_test_bind_on_zero_port(allocator, AWS_SOCKET_DGRAM, AWS_SOCKET_IPV4, "127.0.0.1"); } AWS_TEST_CASE(bind_on_zero_port_udp_ipv4, s_bind_on_zero_port_udp_ipv4) static int s_test_incoming_udp_sock_errors(struct aws_allocator *allocator, void *ctx) { (void)ctx; if (!s_test_running_as_root(allocator)) { struct aws_event_loop *event_loop = aws_event_loop_new_default(allocator, aws_high_res_clock_get_ticks); ASSERT_NOT_NULL(event_loop, "Event loop creation failed with error: %s", aws_error_debug_str(aws_last_error())); ASSERT_SUCCESS(aws_event_loop_run(event_loop)); struct aws_socket_options options; AWS_ZERO_STRUCT(options); options.connect_timeout_ms = 1000; options.type = AWS_SOCKET_DGRAM; options.domain = AWS_SOCKET_IPV4; /* hit a endpoint that will not send me a SYN packet. */ struct aws_socket_endpoint endpoint = { .address = "127.0", .port = 80, }; struct aws_socket incoming; ASSERT_SUCCESS(aws_socket_init(&incoming, allocator, &options)); ASSERT_FAILS(aws_socket_bind(&incoming, &endpoint)); int error = aws_last_error(); ASSERT_TRUE(AWS_IO_SOCKET_INVALID_ADDRESS == error || AWS_ERROR_NO_PERMISSION == error); aws_socket_clean_up(&incoming); aws_event_loop_destroy(event_loop); } return 0; } AWS_TEST_CASE(incoming_udp_sock_errors, s_test_incoming_udp_sock_errors) static void s_on_null_readable_notification(struct aws_socket *socket, int error_code, void *user_data) { (void)socket; (void)error_code; (void)user_data; } static int s_test_wrong_thread_read_write_fails(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_event_loop *event_loop = aws_event_loop_new_default(allocator, aws_high_res_clock_get_ticks); ASSERT_NOT_NULL(event_loop, "Event loop creation failed with error: %s", aws_error_debug_str(aws_last_error())); ASSERT_SUCCESS(aws_event_loop_run(event_loop)); struct aws_socket_options options; AWS_ZERO_STRUCT(options); options.connect_timeout_ms = 1000; options.type = AWS_SOCKET_DGRAM; options.domain = AWS_SOCKET_IPV4; struct aws_socket_endpoint endpoint = { .address = "127.0.0.1", .port = 50000, }; struct aws_socket socket; ASSERT_SUCCESS(aws_socket_init(&socket, allocator, &options)); aws_socket_bind(&socket, &endpoint); aws_socket_assign_to_event_loop(&socket, event_loop); aws_socket_subscribe_to_readable_events(&socket, s_on_null_readable_notification, NULL); size_t amount_read = 0; ASSERT_ERROR(AWS_ERROR_IO_EVENT_LOOP_THREAD_ONLY, aws_socket_read(&socket, NULL, &amount_read)); ASSERT_ERROR(AWS_ERROR_IO_EVENT_LOOP_THREAD_ONLY, aws_socket_write(&socket, NULL, NULL, NULL)); struct aws_mutex mutex = AWS_MUTEX_INIT; struct socket_io_args io_args; io_args.socket = &socket; io_args.close_completed = false; io_args.condition_variable = (struct aws_condition_variable)AWS_CONDITION_VARIABLE_INIT; io_args.mutex = &mutex; struct aws_task close_task = { .fn = s_socket_close_task, .arg = &io_args, }; aws_event_loop_schedule_task_now(event_loop, &close_task); aws_mutex_lock(&mutex); aws_condition_variable_wait_pred(&io_args.condition_variable, &mutex, s_close_completed_predicate, &io_args); aws_mutex_unlock(&mutex); aws_socket_clean_up(&socket); aws_event_loop_destroy(event_loop); return 0; } AWS_TEST_CASE(wrong_thread_read_write_fails, s_test_wrong_thread_read_write_fails) static void s_test_destroy_socket_task(struct aws_task *task, void *arg, enum aws_task_status status) { (void)task; (void)status; struct aws_socket *socket = arg; aws_socket_clean_up(socket); } static int s_cleanup_before_connect_or_timeout_doesnt_explode(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_io_library_init(allocator); struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); struct aws_event_loop *event_loop = aws_event_loop_group_get_next_loop(el_group); ASSERT_NOT_NULL(event_loop, "Event loop creation failed with error: %s", aws_error_debug_str(aws_last_error())); struct aws_mutex mutex = AWS_MUTEX_INIT; struct aws_condition_variable condition_variable = AWS_CONDITION_VARIABLE_INIT; struct aws_socket_options options; AWS_ZERO_STRUCT(options); options.connect_timeout_ms = 1000; options.type = AWS_SOCKET_STREAM; options.domain = AWS_SOCKET_IPV4; struct aws_host_resolver_default_options resolver_options = { .el_group = el_group, .max_entries = 2, }; struct aws_host_resolver *resolver = aws_host_resolver_new_default(allocator, &resolver_options); struct aws_host_resolution_config resolution_config = { .impl = aws_default_dns_resolve, .impl_data = NULL, .max_ttl = 1}; struct test_host_callback_data host_callback_data = { .condition_variable = AWS_CONDITION_VARIABLE_INIT, .invoked = false, .has_a_address = false, .mutex = &mutex, }; /* This ec2 instance sits in a VPC that makes sure port 81 is black-holed (no TCP SYN should be received). */ struct aws_string *host_name = aws_string_new_from_c_str(allocator, "ec2-54-158-231-48.compute-1.amazonaws.com"); ASSERT_SUCCESS(aws_host_resolver_resolve_host( resolver, host_name, s_test_host_resolved_test_callback, &resolution_config, &host_callback_data)); aws_mutex_lock(&mutex); aws_condition_variable_wait_pred( &host_callback_data.condition_variable, &mutex, s_test_host_resolved_predicate, &host_callback_data); aws_mutex_unlock(&mutex); aws_host_resolver_release(resolver); ASSERT_TRUE(host_callback_data.has_a_address); struct aws_socket_endpoint endpoint = {.port = 81}; snprintf(endpoint.address, sizeof(endpoint.address), "%s", aws_string_bytes(host_callback_data.a_address.address)); aws_string_destroy((void *)host_name); aws_host_address_clean_up(&host_callback_data.a_address); struct local_outgoing_args outgoing_args = { .mutex = &mutex, .condition_variable = &condition_variable, .connect_invoked = false, .error_invoked = false, }; struct aws_socket outgoing; struct aws_task destroy_task = { .fn = s_test_destroy_socket_task, .arg = &outgoing, }; ASSERT_SUCCESS(aws_socket_init(&outgoing, allocator, &options)); ASSERT_SUCCESS(aws_socket_connect(&outgoing, &endpoint, event_loop, s_local_outgoing_connection, &outgoing_args)); aws_event_loop_schedule_task_now(event_loop, &destroy_task); ASSERT_SUCCESS(aws_mutex_lock(&mutex)); ASSERT_ERROR( AWS_ERROR_COND_VARIABLE_TIMED_OUT, aws_condition_variable_wait_for( &condition_variable, &mutex, aws_timestamp_convert(options.connect_timeout_ms, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL))); ASSERT_SUCCESS(aws_mutex_unlock(&mutex)); ASSERT_FALSE(outgoing_args.connect_invoked); ASSERT_FALSE(outgoing_args.error_invoked); aws_event_loop_group_release(el_group); aws_io_library_clean_up(); return 0; } AWS_TEST_CASE(cleanup_before_connect_or_timeout_doesnt_explode, s_cleanup_before_connect_or_timeout_doesnt_explode) static void s_local_listener_incoming_destroy_listener( struct aws_socket *socket, int error_code, struct aws_socket *new_socket, void *user_data) { (void)socket; struct local_listener_args *listener_args = (struct local_listener_args *)user_data; aws_mutex_lock(listener_args->mutex); if (!error_code) { listener_args->incoming = new_socket; listener_args->incoming_invoked = true; } else { listener_args->error_invoked = true; } aws_socket_clean_up(socket); aws_condition_variable_notify_one(listener_args->condition_variable); aws_mutex_unlock(listener_args->mutex); } static int s_cleanup_in_accept_doesnt_explode(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_event_loop *event_loop = aws_event_loop_new_default(allocator, aws_high_res_clock_get_ticks); ASSERT_NOT_NULL(event_loop, "Event loop creation failed with error: %s", aws_error_debug_str(aws_last_error())); ASSERT_SUCCESS(aws_event_loop_run(event_loop)); struct aws_mutex mutex = AWS_MUTEX_INIT; struct aws_condition_variable condition_variable = AWS_CONDITION_VARIABLE_INIT; struct local_listener_args listener_args = { .mutex = &mutex, .condition_variable = &condition_variable, .incoming = NULL, .incoming_invoked = false, .error_invoked = false, }; struct aws_socket_options options; AWS_ZERO_STRUCT(options); options.connect_timeout_ms = 3000; options.keepalive = true; options.keep_alive_interval_sec = 1000; options.keep_alive_timeout_sec = 60000; options.type = AWS_SOCKET_STREAM; options.domain = AWS_SOCKET_IPV4; struct aws_socket_endpoint endpoint = {.address = "127.0.0.1", .port = 8129}; struct aws_socket listener; ASSERT_SUCCESS(aws_socket_init(&listener, allocator, &options)); ASSERT_SUCCESS(aws_socket_bind(&listener, &endpoint)); ASSERT_SUCCESS(aws_socket_listen(&listener, 1024)); ASSERT_SUCCESS( aws_socket_start_accept(&listener, event_loop, s_local_listener_incoming_destroy_listener, &listener_args)); struct local_outgoing_args outgoing_args = { .mutex = &mutex, .condition_variable = &condition_variable, .connect_invoked = false, .error_invoked = false}; struct aws_socket outgoing; ASSERT_SUCCESS(aws_socket_init(&outgoing, allocator, &options)); ASSERT_SUCCESS(aws_socket_connect(&outgoing, &endpoint, event_loop, s_local_outgoing_connection, &outgoing_args)); ASSERT_SUCCESS(aws_mutex_lock(&mutex)); ASSERT_SUCCESS(aws_condition_variable_wait_pred(&condition_variable, &mutex, s_incoming_predicate, &listener_args)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &condition_variable, &mutex, s_connection_completed_predicate, &outgoing_args)); ASSERT_SUCCESS(aws_mutex_unlock(&mutex)); ASSERT_TRUE(listener_args.incoming_invoked); ASSERT_FALSE(listener_args.error_invoked); ASSERT_TRUE(outgoing_args.connect_invoked); ASSERT_FALSE(outgoing_args.error_invoked); ASSERT_INT_EQUALS(options.domain, listener_args.incoming->options.domain); ASSERT_INT_EQUALS(options.type, listener_args.incoming->options.type); struct socket_io_args io_args = { .socket = &outgoing, .to_write = NULL, .to_read = NULL, .read_data = NULL, .mutex = &mutex, .amount_read = 0, .amount_written = 0, .error_code = 0, .condition_variable = AWS_CONDITION_VARIABLE_INIT, .close_completed = false, }; struct aws_task close_task = { .fn = s_socket_close_task, .arg = &io_args, }; if (listener_args.incoming) { io_args.socket = listener_args.incoming; io_args.close_completed = false; aws_event_loop_schedule_task_now(event_loop, &close_task); ASSERT_SUCCESS(aws_mutex_lock(&mutex)); aws_condition_variable_wait_pred(&io_args.condition_variable, &mutex, s_close_completed_predicate, &io_args); ASSERT_SUCCESS(aws_mutex_unlock(&mutex)); aws_socket_clean_up(listener_args.incoming); aws_mem_release(allocator, listener_args.incoming); } io_args.socket = &outgoing; io_args.close_completed = false; aws_event_loop_schedule_task_now(event_loop, &close_task); ASSERT_SUCCESS(aws_mutex_lock(&mutex)); aws_condition_variable_wait_pred(&io_args.condition_variable, &mutex, s_close_completed_predicate, &io_args); ASSERT_SUCCESS(aws_mutex_unlock(&mutex)); aws_socket_clean_up(&outgoing); aws_event_loop_destroy(event_loop); return 0; } AWS_TEST_CASE(cleanup_in_accept_doesnt_explode, s_cleanup_in_accept_doesnt_explode) static void s_on_written_destroy(struct aws_socket *socket, int error_code, size_t amount_written, void *user_data) { (void)socket; struct socket_io_args *write_args = user_data; aws_mutex_lock(write_args->mutex); write_args->error_code = error_code; write_args->amount_written = amount_written; aws_socket_clean_up(socket); aws_condition_variable_notify_one(&write_args->condition_variable); aws_mutex_unlock(write_args->mutex); } static bool s_write_completed_predicate_destroy(void *arg) { struct socket_io_args *io_args = arg; return io_args->amount_written || io_args->error_code; } static void s_write_task_destroy(struct aws_task *task, void *args, enum aws_task_status status) { (void)task; (void)status; struct socket_io_args *io_args = args; aws_socket_write(io_args->socket, io_args->to_write, s_on_written_destroy, io_args); } static int s_cleanup_in_write_cb_doesnt_explode(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_event_loop *event_loop = aws_event_loop_new_default(allocator, aws_high_res_clock_get_ticks); ASSERT_NOT_NULL(event_loop, "Event loop creation failed with error: %s", aws_error_debug_str(aws_last_error())); ASSERT_SUCCESS(aws_event_loop_run(event_loop)); struct aws_mutex mutex = AWS_MUTEX_INIT; struct aws_condition_variable condition_variable = AWS_CONDITION_VARIABLE_INIT; struct local_listener_args listener_args = { .mutex = &mutex, .condition_variable = &condition_variable, .incoming = NULL, .incoming_invoked = false, .error_invoked = false, }; struct aws_socket_options options; AWS_ZERO_STRUCT(options); options.connect_timeout_ms = 3000; options.keepalive = true; options.keep_alive_interval_sec = 1000; options.keep_alive_timeout_sec = 60000; options.type = AWS_SOCKET_STREAM; options.domain = AWS_SOCKET_IPV4; struct aws_socket_endpoint endpoint = {.address = "127.0.0.1", .port = 8130}; struct aws_socket listener; ASSERT_SUCCESS(aws_socket_init(&listener, allocator, &options)); ASSERT_SUCCESS(aws_socket_bind(&listener, &endpoint)); ASSERT_SUCCESS(aws_socket_listen(&listener, 1024)); ASSERT_SUCCESS(aws_socket_start_accept(&listener, event_loop, s_local_listener_incoming, &listener_args)); struct local_outgoing_args outgoing_args = { .mutex = &mutex, .condition_variable = &condition_variable, .connect_invoked = false, .error_invoked = false}; struct aws_socket outgoing; ASSERT_SUCCESS(aws_socket_init(&outgoing, allocator, &options)); ASSERT_SUCCESS(aws_socket_connect(&outgoing, &endpoint, event_loop, s_local_outgoing_connection, &outgoing_args)); ASSERT_SUCCESS(aws_mutex_lock(&mutex)); ASSERT_SUCCESS(aws_condition_variable_wait_pred(&condition_variable, &mutex, s_incoming_predicate, &listener_args)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &condition_variable, &mutex, s_connection_completed_predicate, &outgoing_args)); ASSERT_SUCCESS(aws_mutex_unlock(&mutex)); ASSERT_TRUE(listener_args.incoming_invoked); ASSERT_FALSE(listener_args.error_invoked); struct aws_socket *server_sock = listener_args.incoming; ASSERT_TRUE(outgoing_args.connect_invoked); ASSERT_FALSE(outgoing_args.error_invoked); ASSERT_INT_EQUALS(options.domain, listener_args.incoming->options.domain); ASSERT_INT_EQUALS(options.type, listener_args.incoming->options.type); ASSERT_SUCCESS(aws_socket_assign_to_event_loop(server_sock, event_loop)); aws_socket_subscribe_to_readable_events(server_sock, s_on_readable, NULL); aws_socket_subscribe_to_readable_events(&outgoing, s_on_readable, NULL); /* now test the read and write across the connection. */ const char read_data[] = "I'm a little teapot"; char write_data[sizeof(read_data)] = {0}; struct aws_byte_buf read_buffer = aws_byte_buf_from_array((const uint8_t *)read_data, sizeof(read_data)); struct aws_byte_buf write_buffer = aws_byte_buf_from_array((const uint8_t *)write_data, sizeof(write_data)); write_buffer.len = 0; struct aws_byte_cursor read_cursor = aws_byte_cursor_from_buf(&read_buffer); struct socket_io_args io_args = { .socket = &outgoing, .to_write = &read_cursor, .to_read = &read_buffer, .read_data = &write_buffer, .mutex = &mutex, .amount_read = 0, .amount_written = 0, .error_code = 0, .condition_variable = AWS_CONDITION_VARIABLE_INIT, .close_completed = false, }; struct aws_task write_task = { .fn = s_write_task_destroy, .arg = &io_args, }; aws_event_loop_schedule_task_now(event_loop, &write_task); ASSERT_SUCCESS(aws_mutex_lock(&mutex)); aws_condition_variable_wait_pred( &io_args.condition_variable, &mutex, s_write_completed_predicate_destroy, &io_args); ASSERT_SUCCESS(aws_mutex_unlock(&mutex)); ASSERT_INT_EQUALS(AWS_OP_SUCCESS, io_args.error_code); memset((void *)write_data, 0, sizeof(write_data)); write_buffer.len = 0; io_args.error_code = 0; io_args.amount_written = 0; io_args.socket = server_sock; aws_event_loop_schedule_task_now(event_loop, &write_task); ASSERT_SUCCESS(aws_mutex_lock(&mutex)); aws_condition_variable_wait_pred(&io_args.condition_variable, &mutex, s_write_completed_predicate, &io_args); ASSERT_SUCCESS(aws_mutex_unlock(&mutex)); ASSERT_INT_EQUALS(AWS_OP_SUCCESS, io_args.error_code); aws_mem_release(allocator, server_sock); aws_socket_clean_up(&listener); aws_event_loop_destroy(event_loop); return 0; } AWS_TEST_CASE(cleanup_in_write_cb_doesnt_explode, s_cleanup_in_write_cb_doesnt_explode) /* stuff for the sock_write_cb_is_async test */ enum async_role { ASYNC_ROLE_A_CALLBACK_WRITES_D, ASYNC_ROLE_B_CALLBACK_CLEANS_UP_SOCKET, ASYNC_ROLE_C_IS_LAST_FROM_INITIAL_BATCH_OF_WRITES, ASYNC_ROLE_D_GOT_WRITTEN_VIA_CALLBACK, ASYNC_ROLE_COUNT }; static struct { struct aws_allocator *allocator; struct aws_event_loop *event_loop; struct aws_socket *write_socket; struct aws_socket *read_socket; bool currently_writing; enum async_role next_expected_callback; struct aws_mutex *mutex; struct aws_condition_variable *condition_variable; bool write_tasks_complete; bool read_tasks_complete; } g_async_tester; static bool s_async_tasks_complete_pred(void *arg) { (void)arg; return g_async_tester.write_tasks_complete && g_async_tester.read_tasks_complete; } /* read until socket gets hung up on */ static void s_async_read_task(struct aws_task *task, void *args, enum aws_task_status status) { (void)args; (void)status; uint8_t buf_storage[100]; AWS_ZERO_ARRAY(buf_storage); struct aws_byte_buf buf = aws_byte_buf_from_array(buf_storage, sizeof(buf_storage)); while (true) { size_t amount_read = 0; buf.len = 0; if (aws_socket_read(g_async_tester.read_socket, &buf, &amount_read)) { /* reschedule task to try reading more later */ if (AWS_IO_READ_WOULD_BLOCK == aws_last_error()) { aws_event_loop_schedule_task_now(g_async_tester.event_loop, task); break; } /* other end must have hung up. clean up and signal completion */ aws_socket_clean_up(g_async_tester.read_socket); aws_mem_release(g_async_tester.allocator, g_async_tester.read_socket); aws_mutex_lock(g_async_tester.mutex); g_async_tester.read_tasks_complete = true; aws_mutex_unlock(g_async_tester.mutex); aws_condition_variable_notify_all(g_async_tester.condition_variable); break; } } } static void s_async_write_completion(struct aws_socket *socket, int error_code, size_t bytes_written, void *user_data) { enum async_role role = *(enum async_role *)user_data; aws_mem_release(g_async_tester.allocator, user_data); /* ensure callback is not firing synchronously from within aws_socket_write() */ AWS_FATAL_ASSERT(!g_async_tester.currently_writing); /* ensure callbacks arrive in order */ AWS_FATAL_ASSERT(g_async_tester.next_expected_callback == role); g_async_tester.next_expected_callback++; switch (role) { case ASYNC_ROLE_A_CALLBACK_WRITES_D: { AWS_FATAL_ASSERT(0 == error_code); AWS_FATAL_ASSERT(1 == bytes_written); g_async_tester.currently_writing = true; struct aws_byte_cursor data = aws_byte_cursor_from_c_str("D"); enum async_role *d_role = aws_mem_acquire(g_async_tester.allocator, sizeof(enum async_role)); *d_role = ASYNC_ROLE_D_GOT_WRITTEN_VIA_CALLBACK; AWS_FATAL_ASSERT(0 == aws_socket_write(socket, &data, s_async_write_completion, d_role)); g_async_tester.currently_writing = false; break; } case ASYNC_ROLE_B_CALLBACK_CLEANS_UP_SOCKET: AWS_FATAL_ASSERT(0 == error_code); AWS_FATAL_ASSERT(1 == bytes_written); aws_socket_clean_up(socket); break; case ASYNC_ROLE_C_IS_LAST_FROM_INITIAL_BATCH_OF_WRITES: /* C might succeed or fail (since socket killed after B completes), either is valid */ break; case ASYNC_ROLE_D_GOT_WRITTEN_VIA_CALLBACK: /* write tasks complete! */ aws_mutex_lock(g_async_tester.mutex); g_async_tester.write_tasks_complete = true; aws_mutex_unlock(g_async_tester.mutex); aws_condition_variable_notify_all(g_async_tester.condition_variable); break; default: AWS_FATAL_ASSERT(0); } } static void s_async_write_task(struct aws_task *task, void *args, enum aws_task_status status) { (void)task; (void)args; (void)status; g_async_tester.currently_writing = true; struct aws_byte_cursor data = aws_byte_cursor_from_c_str("A"); enum async_role *role = aws_mem_acquire(g_async_tester.allocator, sizeof(enum async_role)); *role = ASYNC_ROLE_A_CALLBACK_WRITES_D; AWS_FATAL_ASSERT(0 == aws_socket_write(g_async_tester.write_socket, &data, s_async_write_completion, role)); data = aws_byte_cursor_from_c_str("B"); role = aws_mem_acquire(g_async_tester.allocator, sizeof(enum async_role)); *role = ASYNC_ROLE_B_CALLBACK_CLEANS_UP_SOCKET; AWS_FATAL_ASSERT(0 == aws_socket_write(g_async_tester.write_socket, &data, s_async_write_completion, role)); data = aws_byte_cursor_from_c_str("C"); role = aws_mem_acquire(g_async_tester.allocator, sizeof(enum async_role)); *role = ASYNC_ROLE_C_IS_LAST_FROM_INITIAL_BATCH_OF_WRITES; AWS_FATAL_ASSERT(0 == aws_socket_write(g_async_tester.write_socket, &data, s_async_write_completion, role)); g_async_tester.currently_writing = false; } /** * aws_socket_write()'s completion callback MUST fire asynchronously. * Otherwise, we can get multiple write() calls in the same callstack, which * leads to esoteric bugs (https://github.com/aws/aws-iot-device-sdk-cpp-v2/issues/194). */ static int s_sock_write_cb_is_async(struct aws_allocator *allocator, void *ctx) { (void)ctx; /* set up server (read) and client (write) sockets */ struct aws_event_loop *event_loop = aws_event_loop_new_default(allocator, aws_high_res_clock_get_ticks); ASSERT_NOT_NULL(event_loop, "Event loop creation failed with error: %s", aws_error_debug_str(aws_last_error())); ASSERT_SUCCESS(aws_event_loop_run(event_loop)); struct aws_mutex mutex = AWS_MUTEX_INIT; struct aws_condition_variable condition_variable = AWS_CONDITION_VARIABLE_INIT; struct local_listener_args listener_args = { .mutex = &mutex, .condition_variable = &condition_variable, .incoming = NULL, .incoming_invoked = false, .error_invoked = false, }; struct aws_socket_options options; AWS_ZERO_STRUCT(options); options.connect_timeout_ms = 3000; options.keepalive = true; options.keep_alive_interval_sec = 1000; options.keep_alive_timeout_sec = 60000; options.type = AWS_SOCKET_STREAM; options.domain = AWS_SOCKET_LOCAL; struct aws_socket_endpoint endpoint; AWS_ZERO_STRUCT(endpoint); aws_socket_endpoint_init_local_address_for_test(&endpoint); struct aws_socket listener; ASSERT_SUCCESS(aws_socket_init(&listener, allocator, &options)); ASSERT_SUCCESS(aws_socket_bind(&listener, &endpoint)); ASSERT_SUCCESS(aws_socket_listen(&listener, 1024)); ASSERT_SUCCESS(aws_socket_start_accept(&listener, event_loop, s_local_listener_incoming, &listener_args)); struct local_outgoing_args outgoing_args = { .mutex = &mutex, .condition_variable = &condition_variable, .connect_invoked = false, .error_invoked = false}; struct aws_socket outgoing; ASSERT_SUCCESS(aws_socket_init(&outgoing, allocator, &options)); ASSERT_SUCCESS(aws_socket_connect(&outgoing, &endpoint, event_loop, s_local_outgoing_connection, &outgoing_args)); ASSERT_SUCCESS(aws_mutex_lock(&mutex)); ASSERT_SUCCESS(aws_condition_variable_wait_pred(&condition_variable, &mutex, s_incoming_predicate, &listener_args)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &condition_variable, &mutex, s_connection_completed_predicate, &outgoing_args)); ASSERT_SUCCESS(aws_mutex_unlock(&mutex)); ASSERT_TRUE(listener_args.incoming_invoked); ASSERT_FALSE(listener_args.error_invoked); struct aws_socket *server_sock = listener_args.incoming; ASSERT_TRUE(outgoing_args.connect_invoked); ASSERT_FALSE(outgoing_args.error_invoked); ASSERT_INT_EQUALS(options.domain, listener_args.incoming->options.domain); ASSERT_INT_EQUALS(options.type, listener_args.incoming->options.type); ASSERT_SUCCESS(aws_socket_assign_to_event_loop(server_sock, event_loop)); aws_socket_subscribe_to_readable_events(server_sock, s_on_readable, NULL); aws_socket_subscribe_to_readable_events(&outgoing, s_on_readable, NULL); /* set up g_async_tester */ g_async_tester.allocator = allocator; g_async_tester.event_loop = event_loop; g_async_tester.write_socket = &outgoing; g_async_tester.read_socket = server_sock; g_async_tester.mutex = &mutex; g_async_tester.condition_variable = &condition_variable; /* kick off writer and reader tasks */ struct aws_task writer_task; aws_task_init(&writer_task, s_async_write_task, NULL, "async_test_write_task"); aws_event_loop_schedule_task_now(event_loop, &writer_task); struct aws_task reader_task; aws_task_init(&reader_task, s_async_read_task, NULL, "async_test_read_task"); aws_event_loop_schedule_task_now(event_loop, &reader_task); /* wait for tasks to complete */ aws_mutex_lock(&mutex); aws_condition_variable_wait_pred(&condition_variable, &mutex, s_async_tasks_complete_pred, NULL); aws_mutex_unlock(&mutex); /* cleanup */ aws_socket_clean_up(&listener); aws_event_loop_destroy(event_loop); return 0; } AWS_TEST_CASE(sock_write_cb_is_async, s_sock_write_cb_is_async) #ifdef _WIN32 static int s_local_socket_pipe_connected_race(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_event_loop *event_loop = aws_event_loop_new_default(allocator, aws_high_res_clock_get_ticks); ASSERT_NOT_NULL(event_loop, "Event loop creation failed with error: %s", aws_error_debug_str(aws_last_error())); ASSERT_SUCCESS(aws_event_loop_run(event_loop)); struct aws_mutex mutex = AWS_MUTEX_INIT; struct aws_condition_variable condition_variable = AWS_CONDITION_VARIABLE_INIT; struct local_listener_args listener_args = { .mutex = &mutex, .condition_variable = &condition_variable, .incoming = NULL, .incoming_invoked = false, .error_invoked = false, }; struct aws_socket_options options; AWS_ZERO_STRUCT(options); options.connect_timeout_ms = 3000; options.type = AWS_SOCKET_STREAM; options.domain = AWS_SOCKET_LOCAL; struct aws_socket_endpoint endpoint; AWS_ZERO_STRUCT(endpoint); aws_socket_endpoint_init_local_address_for_test(&endpoint); struct aws_socket listener; ASSERT_SUCCESS(aws_socket_init(&listener, allocator, &options)); ASSERT_SUCCESS(aws_socket_bind(&listener, &endpoint)); ASSERT_SUCCESS(aws_socket_listen(&listener, 1024)); /* do the connect after the named pipe has been created (in the bind call), but before the connect named pipe call has been made in start accept. This will ensure IOCP does what we think it does. */ struct local_outgoing_args outgoing_args = { .mutex = &mutex, .condition_variable = &condition_variable, .connect_invoked = false, .error_invoked = false}; struct aws_socket outgoing; ASSERT_SUCCESS(aws_socket_init(&outgoing, allocator, &options)); ASSERT_SUCCESS(aws_socket_connect(&outgoing, &endpoint, event_loop, s_local_outgoing_connection, &outgoing_args)); ASSERT_SUCCESS(aws_socket_start_accept(&listener, event_loop, s_local_listener_incoming, &listener_args)); aws_mutex_lock(&mutex); ASSERT_SUCCESS(aws_condition_variable_wait_pred(&condition_variable, &mutex, s_incoming_predicate, &listener_args)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &condition_variable, &mutex, s_connection_completed_predicate, &outgoing_args)); aws_mutex_unlock(&mutex); struct aws_socket *server_sock = &listener; ASSERT_TRUE(listener_args.incoming_invoked); ASSERT_FALSE(listener_args.error_invoked); server_sock = listener_args.incoming; ASSERT_TRUE(outgoing_args.connect_invoked); ASSERT_FALSE(outgoing_args.error_invoked); ASSERT_INT_EQUALS(options.domain, listener_args.incoming->options.domain); ASSERT_INT_EQUALS(options.type, listener_args.incoming->options.type); struct socket_io_args io_args = { .socket = &outgoing, .to_write = NULL, .to_read = NULL, .read_data = NULL, .mutex = &mutex, .amount_read = 0, .amount_written = 0, .error_code = 0, .condition_variable = AWS_CONDITION_VARIABLE_INIT, .close_completed = false, }; struct aws_task close_task = { .fn = s_socket_close_task, .arg = &io_args, }; if (listener_args.incoming) { io_args.socket = listener_args.incoming; io_args.close_completed = false; aws_event_loop_schedule_task_now(event_loop, &close_task); aws_mutex_lock(&mutex); aws_condition_variable_wait_pred(&io_args.condition_variable, &mutex, s_close_completed_predicate, &io_args); aws_mutex_unlock(&mutex); aws_socket_clean_up(listener_args.incoming); aws_mem_release(allocator, listener_args.incoming); } io_args.socket = &outgoing; io_args.close_completed = false; aws_event_loop_schedule_task_now(event_loop, &close_task); aws_mutex_lock(&mutex); aws_condition_variable_wait_pred(&io_args.condition_variable, &mutex, s_close_completed_predicate, &io_args); aws_mutex_unlock(&mutex); aws_socket_clean_up(&outgoing); io_args.socket = &listener; io_args.close_completed = false; aws_event_loop_schedule_task_now(event_loop, &close_task); aws_mutex_lock(&mutex); aws_condition_variable_wait_pred(&io_args.condition_variable, &mutex, s_close_completed_predicate, &io_args); aws_mutex_unlock(&mutex); aws_socket_clean_up(&listener); aws_event_loop_destroy(event_loop); return 0; } AWS_TEST_CASE(local_socket_pipe_connected_race, s_local_socket_pipe_connected_race) #endif static int s_test_socket_validate_port(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; /* IPv4 - 16bit port, only bind can use 0 */ ASSERT_SUCCESS(aws_socket_validate_port_for_connect(80, AWS_SOCKET_IPV4)); ASSERT_SUCCESS(aws_socket_validate_port_for_bind(80, AWS_SOCKET_IPV4)); ASSERT_ERROR(AWS_IO_SOCKET_INVALID_ADDRESS, aws_socket_validate_port_for_connect(0, AWS_SOCKET_IPV4)); ASSERT_SUCCESS(aws_socket_validate_port_for_bind(0, AWS_SOCKET_IPV4)); ASSERT_ERROR(AWS_IO_SOCKET_INVALID_ADDRESS, aws_socket_validate_port_for_connect(0xFFFFFFFF, AWS_SOCKET_IPV4)); ASSERT_ERROR(AWS_IO_SOCKET_INVALID_ADDRESS, aws_socket_validate_port_for_bind(0xFFFFFFFF, AWS_SOCKET_IPV4)); /* IPv6 - 16bit port, only bind can use 0 */ ASSERT_SUCCESS(aws_socket_validate_port_for_connect(80, AWS_SOCKET_IPV6)); ASSERT_SUCCESS(aws_socket_validate_port_for_bind(80, AWS_SOCKET_IPV6)); ASSERT_ERROR(AWS_IO_SOCKET_INVALID_ADDRESS, aws_socket_validate_port_for_connect(0, AWS_SOCKET_IPV6)); ASSERT_SUCCESS(aws_socket_validate_port_for_bind(0, AWS_SOCKET_IPV6)); ASSERT_ERROR(AWS_IO_SOCKET_INVALID_ADDRESS, aws_socket_validate_port_for_connect(0xFFFFFFFF, AWS_SOCKET_IPV6)); ASSERT_ERROR(AWS_IO_SOCKET_INVALID_ADDRESS, aws_socket_validate_port_for_bind(0xFFFFFFFF, AWS_SOCKET_IPV6)); /* VSOCK - 32bit port, only bind can use VMADDR_PORT_ANY (-1U) */ ASSERT_SUCCESS(aws_socket_validate_port_for_connect(80, AWS_SOCKET_VSOCK)); ASSERT_SUCCESS(aws_socket_validate_port_for_bind(80, AWS_SOCKET_VSOCK)); ASSERT_SUCCESS(aws_socket_validate_port_for_connect(0, AWS_SOCKET_VSOCK)); ASSERT_SUCCESS(aws_socket_validate_port_for_bind(0, AWS_SOCKET_VSOCK)); ASSERT_SUCCESS(aws_socket_validate_port_for_connect(0x7FFFFFFF, AWS_SOCKET_VSOCK)); ASSERT_SUCCESS(aws_socket_validate_port_for_bind(0x7FFFFFFF, AWS_SOCKET_VSOCK)); ASSERT_ERROR(AWS_IO_SOCKET_INVALID_ADDRESS, aws_socket_validate_port_for_connect((uint32_t)-1, AWS_SOCKET_VSOCK)); ASSERT_SUCCESS(aws_socket_validate_port_for_bind((uint32_t)-1, AWS_SOCKET_VSOCK)); /* LOCAL - ignores port */ ASSERT_SUCCESS(aws_socket_validate_port_for_connect(0, AWS_SOCKET_LOCAL)); ASSERT_SUCCESS(aws_socket_validate_port_for_bind(0, AWS_SOCKET_LOCAL)); ASSERT_SUCCESS(aws_socket_validate_port_for_connect(80, AWS_SOCKET_LOCAL)); ASSERT_SUCCESS(aws_socket_validate_port_for_bind(80, AWS_SOCKET_LOCAL)); ASSERT_SUCCESS(aws_socket_validate_port_for_connect((uint32_t)-1, AWS_SOCKET_LOCAL)); ASSERT_SUCCESS(aws_socket_validate_port_for_bind((uint32_t)-1, AWS_SOCKET_LOCAL)); /* invalid domain should fail */ ASSERT_ERROR(AWS_IO_SOCKET_INVALID_ADDRESS, aws_socket_validate_port_for_connect(80, (enum aws_socket_domain)(-1))); ASSERT_ERROR(AWS_IO_SOCKET_INVALID_ADDRESS, aws_socket_validate_port_for_bind(80, (enum aws_socket_domain)(-1))); return 0; } AWS_TEST_CASE(socket_validate_port, s_test_socket_validate_port) aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/standard_retry_test.c000066400000000000000000000366671456575232400252530ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include struct exponential_backoff_test_data { size_t retry_count; size_t client_error_count; struct aws_event_loop_group *el_group; struct aws_retry_strategy *retry_strategy; int failure_error_code; struct aws_mutex mutex; struct aws_condition_variable cvar; bool el_group_shutdown; }; static struct exponential_backoff_test_data s_fixture_test_data = { .cvar = AWS_CONDITION_VARIABLE_INIT, .mutex = AWS_MUTEX_INIT, }; static void s_el_group_completion_callback(void *arg) { struct exponential_backoff_test_data *test_data = arg; aws_mutex_lock(&test_data->mutex); test_data->el_group_shutdown = true; aws_mutex_unlock(&test_data->mutex); aws_condition_variable_notify_one(&test_data->cvar); } static bool s_el_group_shutdown_predicate(void *arg) { struct exponential_backoff_test_data *test_data = arg; return test_data->el_group_shutdown; } static int s_fixture_setup(struct aws_allocator *allocator, void *ctx) { aws_io_library_init(allocator); struct exponential_backoff_test_data *test_data = ctx; struct aws_shutdown_callback_options shutdown_options = { .shutdown_callback_fn = s_el_group_completion_callback, .shutdown_callback_user_data = ctx, }; test_data->el_group = aws_event_loop_group_new_default(allocator, 1, &shutdown_options); ASSERT_NOT_NULL(test_data->el_group); struct aws_standard_retry_options retry_options = { .initial_bucket_capacity = 15, .backoff_retry_options = { .el_group = test_data->el_group, }, }; test_data->retry_strategy = aws_retry_strategy_new_standard(allocator, &retry_options); ASSERT_NOT_NULL(test_data->retry_strategy); return AWS_OP_SUCCESS; } static int s_fixture_shutdown(struct aws_allocator *allocator, int setup_error_code, void *ctx) { (void)allocator; if (!setup_error_code) { struct exponential_backoff_test_data *test_data = ctx; aws_mutex_lock(&test_data->mutex); aws_retry_strategy_release(test_data->retry_strategy); aws_event_loop_group_release(test_data->el_group); aws_condition_variable_wait_pred(&test_data->cvar, &test_data->mutex, s_el_group_shutdown_predicate, ctx); aws_mutex_unlock(&test_data->mutex); } aws_io_library_clean_up(); return AWS_OP_SUCCESS; } static int s_test_standard_retry_strategy_setup_shutdown(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( test_standard_retry_strategy_setup_shutdown, s_fixture_setup, s_test_standard_retry_strategy_setup_shutdown, s_fixture_shutdown, &s_fixture_test_data); struct retry_data { struct aws_retry_token *retry_token; struct aws_retry_strategy *retry_strategy; struct aws_mutex mutex; struct aws_condition_variable cvar; int token_acquisition_error_code; int schedule_retry_error_code; struct aws_retry_token *schedule_token_value; }; static bool s_retry_token_acquisition_completed(void *arg) { struct retry_data *retry_data = arg; return retry_data->retry_token || retry_data->token_acquisition_error_code; } static void s_on_retry_token_acquired( struct aws_retry_strategy *retry_strategy, int error_code, struct aws_retry_token *token, void *user_data) { struct retry_data *retry_data = user_data; aws_mutex_lock(&retry_data->mutex); retry_data->retry_token = token; retry_data->token_acquisition_error_code = error_code; retry_data->retry_strategy = retry_strategy; aws_mutex_unlock(&retry_data->mutex); aws_condition_variable_notify_one(&retry_data->cvar); } static bool s_retry_ready_completion_predicate(void *arg) { struct retry_data *retry_data = arg; return retry_data->schedule_retry_error_code || retry_data->schedule_token_value; } static void s_on_retry_ready(struct aws_retry_token *token, int error_code, void *user_data) { struct retry_data *retry_data = user_data; aws_mutex_lock(&retry_data->mutex); retry_data->schedule_retry_error_code = error_code; retry_data->schedule_token_value = token; aws_mutex_unlock(&retry_data->mutex); aws_condition_variable_notify_one(&retry_data->cvar); } static int s_test_standard_retry_strategy_failure_exhausts_bucket(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct exponential_backoff_test_data *test_data = ctx; struct retry_data retry_data = { .mutex = AWS_MUTEX_INIT, .cvar = AWS_CONDITION_VARIABLE_INIT, }; struct retry_data retry_data_dup_same_partition = { .mutex = AWS_MUTEX_INIT, .cvar = AWS_CONDITION_VARIABLE_INIT, }; struct aws_byte_cursor partition = aws_byte_cursor_from_c_str("us-east-1:super-badly-named-aws-service"); ASSERT_SUCCESS(aws_mutex_lock(&retry_data.mutex)); ASSERT_SUCCESS(aws_retry_strategy_acquire_retry_token( test_data->retry_strategy, &partition, s_on_retry_token_acquired, &retry_data, 0)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &retry_data.cvar, &retry_data.mutex, s_retry_token_acquisition_completed, &retry_data)); ASSERT_PTR_EQUALS(test_data->retry_strategy, retry_data.retry_strategy); ASSERT_NOT_NULL(retry_data.retry_token); ASSERT_INT_EQUALS(AWS_ERROR_SUCCESS, retry_data.token_acquisition_error_code); aws_mutex_unlock(&retry_data.mutex); /* do a duplicate partition, this should take a different path since the bucket already exists. */ ASSERT_SUCCESS(aws_mutex_lock(&retry_data_dup_same_partition.mutex)); ASSERT_SUCCESS(aws_retry_strategy_acquire_retry_token( test_data->retry_strategy, &partition, s_on_retry_token_acquired, &retry_data_dup_same_partition, 0)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &retry_data_dup_same_partition.cvar, &retry_data_dup_same_partition.mutex, s_retry_token_acquisition_completed, &retry_data_dup_same_partition)); ASSERT_PTR_EQUALS(test_data->retry_strategy, retry_data_dup_same_partition.retry_strategy); ASSERT_NOT_NULL(retry_data_dup_same_partition.retry_token); ASSERT_INT_EQUALS(AWS_ERROR_SUCCESS, retry_data_dup_same_partition.token_acquisition_error_code); aws_mutex_unlock(&retry_data_dup_same_partition.mutex); /* should deduct 10 from capacity */ aws_mutex_lock(&retry_data.mutex); ASSERT_SUCCESS(aws_retry_strategy_schedule_retry( retry_data.retry_token, AWS_RETRY_ERROR_TYPE_TRANSIENT, s_on_retry_ready, &retry_data)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &retry_data.cvar, &retry_data.mutex, s_retry_ready_completion_predicate, &retry_data)); ASSERT_PTR_EQUALS(retry_data.retry_token, retry_data.schedule_token_value); ASSERT_UINT_EQUALS(AWS_ERROR_SUCCESS, retry_data.schedule_retry_error_code); retry_data.schedule_retry_error_code = 0; retry_data.schedule_token_value = NULL; aws_mutex_unlock(&retry_data.mutex); /* should deduct 5 from capacity from a different token but the same partition */ aws_mutex_lock(&retry_data_dup_same_partition.mutex); ASSERT_SUCCESS(aws_retry_strategy_schedule_retry( retry_data_dup_same_partition.retry_token, AWS_RETRY_ERROR_TYPE_SERVER_ERROR, s_on_retry_ready, &retry_data_dup_same_partition)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &retry_data_dup_same_partition.cvar, &retry_data_dup_same_partition.mutex, s_retry_ready_completion_predicate, &retry_data_dup_same_partition)); ASSERT_PTR_EQUALS(retry_data_dup_same_partition.retry_token, retry_data_dup_same_partition.schedule_token_value); ASSERT_UINT_EQUALS(AWS_ERROR_SUCCESS, retry_data_dup_same_partition.schedule_retry_error_code); retry_data_dup_same_partition.schedule_retry_error_code = 0; retry_data_dup_same_partition.schedule_token_value = NULL; /* this should fail. Partition capacity was 15, we've deducted 15 already, even though 3 retries were permitted. */ ASSERT_ERROR( AWS_IO_RETRY_PERMISSION_DENIED, aws_retry_strategy_schedule_retry( retry_data.retry_token, AWS_RETRY_ERROR_TYPE_SERVER_ERROR, s_on_retry_ready, &retry_data)); /* this should fail too even though it's a separate token, they're using the same bucket. Partition capacity was 15, * we've deducted 15 already, even though 3 retries were permitted. */ ASSERT_ERROR( AWS_IO_RETRY_PERMISSION_DENIED, aws_retry_strategy_schedule_retry( retry_data_dup_same_partition.retry_token, AWS_RETRY_ERROR_TYPE_SERVER_ERROR, s_on_retry_ready, &retry_data_dup_same_partition)); aws_retry_token_release(retry_data_dup_same_partition.retry_token); aws_retry_token_release(retry_data.retry_token); ASSERT_SUCCESS(aws_mutex_unlock(&retry_data_dup_same_partition.mutex)); /* verify it doesn't affect other partitions */ struct retry_data separate_partition = { .mutex = AWS_MUTEX_INIT, .cvar = AWS_CONDITION_VARIABLE_INIT, }; ASSERT_SUCCESS(aws_mutex_lock(&separate_partition.mutex)); ASSERT_SUCCESS(aws_retry_strategy_acquire_retry_token( test_data->retry_strategy, NULL, s_on_retry_token_acquired, &separate_partition, 0)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &separate_partition.cvar, &separate_partition.mutex, s_retry_token_acquisition_completed, &separate_partition)); ASSERT_PTR_EQUALS(test_data->retry_strategy, separate_partition.retry_strategy); ASSERT_NOT_NULL(separate_partition.retry_token); ASSERT_INT_EQUALS(AWS_ERROR_SUCCESS, separate_partition.token_acquisition_error_code); ASSERT_SUCCESS(aws_retry_strategy_schedule_retry( separate_partition.retry_token, AWS_RETRY_ERROR_TYPE_SERVER_ERROR, s_on_retry_ready, &separate_partition)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &separate_partition.cvar, &separate_partition.mutex, s_retry_ready_completion_predicate, &separate_partition)); ASSERT_PTR_EQUALS(separate_partition.retry_token, separate_partition.schedule_token_value); ASSERT_UINT_EQUALS(AWS_ERROR_SUCCESS, separate_partition.schedule_retry_error_code); aws_retry_token_release(separate_partition.retry_token); ASSERT_SUCCESS(aws_mutex_unlock(&separate_partition.mutex)); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( test_standard_retry_strategy_failure_exhausts_bucket, s_fixture_setup, s_test_standard_retry_strategy_failure_exhausts_bucket, s_fixture_shutdown, &s_fixture_test_data); static int s_test_standard_retry_strategy_failure_recovers(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct exponential_backoff_test_data *test_data = ctx; struct retry_data retry_data = { .mutex = AWS_MUTEX_INIT, .cvar = AWS_CONDITION_VARIABLE_INIT, }; struct aws_byte_cursor partition = aws_byte_cursor_from_c_str("us-west-2:elastic-something-something-manager-manager"); ASSERT_SUCCESS(aws_mutex_lock(&retry_data.mutex)); ASSERT_SUCCESS(aws_retry_strategy_acquire_retry_token( test_data->retry_strategy, &partition, s_on_retry_token_acquired, &retry_data, 0)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &retry_data.cvar, &retry_data.mutex, s_retry_token_acquisition_completed, &retry_data)); ASSERT_PTR_EQUALS(test_data->retry_strategy, retry_data.retry_strategy); ASSERT_NOT_NULL(retry_data.retry_token); ASSERT_INT_EQUALS(AWS_ERROR_SUCCESS, retry_data.token_acquisition_error_code); /* should deduct 10 from capacity */ ASSERT_SUCCESS(aws_retry_strategy_schedule_retry( retry_data.retry_token, AWS_RETRY_ERROR_TYPE_TRANSIENT, s_on_retry_ready, &retry_data)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &retry_data.cvar, &retry_data.mutex, s_retry_ready_completion_predicate, &retry_data)); ASSERT_PTR_EQUALS(retry_data.retry_token, retry_data.schedule_token_value); ASSERT_UINT_EQUALS(AWS_ERROR_SUCCESS, retry_data.schedule_retry_error_code); retry_data.schedule_retry_error_code = 0; retry_data.schedule_token_value = NULL; /* should deduct 5 from capacity */ ASSERT_SUCCESS(aws_retry_strategy_schedule_retry( retry_data.retry_token, AWS_RETRY_ERROR_TYPE_SERVER_ERROR, s_on_retry_ready, &retry_data)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &retry_data.cvar, &retry_data.mutex, s_retry_ready_completion_predicate, &retry_data)); ASSERT_PTR_EQUALS(retry_data.retry_token, retry_data.schedule_token_value); ASSERT_UINT_EQUALS(AWS_ERROR_SUCCESS, retry_data.schedule_retry_error_code); retry_data.schedule_retry_error_code = 0; retry_data.schedule_token_value = NULL; /* this should fail. Partition capacity was 15, we've deducted 15 already, even though 3 retries were permitted. */ ASSERT_ERROR( AWS_IO_RETRY_PERMISSION_DENIED, aws_retry_strategy_schedule_retry( retry_data.retry_token, AWS_RETRY_ERROR_TYPE_SERVER_ERROR, s_on_retry_ready, &retry_data)); aws_retry_token_release(retry_data.retry_token); int i = 0; /* pay back 5 of them */ while (i < 5) { retry_data.token_acquisition_error_code = 0; retry_data.schedule_retry_error_code = 0; retry_data.schedule_token_value = NULL; retry_data.retry_token = NULL; /* acquire another token */ ASSERT_SUCCESS(aws_retry_strategy_acquire_retry_token( test_data->retry_strategy, &partition, s_on_retry_token_acquired, &retry_data, 0)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &retry_data.cvar, &retry_data.mutex, s_retry_token_acquisition_completed, &retry_data)); ASSERT_SUCCESS(aws_retry_token_record_success(retry_data.retry_token)); aws_retry_token_release(retry_data.retry_token); i++; } retry_data.token_acquisition_error_code = 0; retry_data.schedule_retry_error_code = 0; retry_data.schedule_token_value = NULL; retry_data.retry_token = NULL; /* acquire another token */ ASSERT_SUCCESS(aws_retry_strategy_acquire_retry_token( test_data->retry_strategy, &partition, s_on_retry_token_acquired, &retry_data, 0)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &retry_data.cvar, &retry_data.mutex, s_retry_token_acquisition_completed, &retry_data)); /* should now succeed */ ASSERT_SUCCESS(aws_retry_strategy_schedule_retry( retry_data.retry_token, AWS_RETRY_ERROR_TYPE_SERVER_ERROR, s_on_retry_ready, &retry_data)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &retry_data.cvar, &retry_data.mutex, s_retry_ready_completion_predicate, &retry_data)); /* we only paid 5 back, make sure it fails again. */ ASSERT_ERROR( AWS_IO_RETRY_PERMISSION_DENIED, aws_retry_strategy_schedule_retry( retry_data.retry_token, AWS_RETRY_ERROR_TYPE_SERVER_ERROR, s_on_retry_ready, &retry_data)); aws_retry_token_release(retry_data.retry_token); ASSERT_SUCCESS(aws_mutex_unlock(&retry_data.mutex)); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( test_standard_retry_strategy_failure_recovers, s_fixture_setup, s_test_standard_retry_strategy_failure_recovers, s_fixture_shutdown, &s_fixture_test_data); aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/statistics_handler_test.c000066400000000000000000000066431456575232400261040ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "statistics_handler_test.h" #include #include #include #include #include #include static void s_process_statistics( struct aws_crt_statistics_handler *handler, struct aws_crt_statistics_sample_interval *interval, struct aws_array_list *stats_list, void *context) { (void)context; struct aws_statistics_handler_test_impl *impl = handler->impl; aws_mutex_lock(&impl->lock); if (impl->start_time_ns == 0) { impl->start_time_ns = aws_timestamp_convert(interval->begin_time_ms, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL); } size_t stats_count = aws_array_list_length(stats_list); for (size_t i = 0; i < stats_count; ++i) { struct aws_crt_statistics_base *stats_base = NULL; if (aws_array_list_get_at(stats_list, &stats_base, i)) { continue; } switch (stats_base->category) { case AWSCRT_STAT_CAT_SOCKET: { struct aws_crt_statistics_socket *socket_stats = (struct aws_crt_statistics_socket *)stats_base; impl->total_bytes_read += socket_stats->bytes_read; impl->total_bytes_written += socket_stats->bytes_written; break; } case AWSCRT_STAT_CAT_TLS: { struct aws_crt_statistics_tls *tls_stats = (struct aws_crt_statistics_tls *)stats_base; impl->tls_status = tls_stats->handshake_status; break; } default: break; } } aws_mutex_unlock(&impl->lock); aws_condition_variable_notify_one(&impl->signal); } static void s_destroy_handler(struct aws_crt_statistics_handler *handler) { struct aws_statistics_handler_test_impl *impl = handler->impl; aws_mutex_clean_up(&impl->lock); aws_condition_variable_clean_up(&impl->signal); /* impl and handler allocated via acquire_many */ aws_mem_release(handler->allocator, handler); } static uint64_t s_get_report_interval_ms(struct aws_crt_statistics_handler *handler) { (void)handler; /* * Making this a very small number means the stat task will be in the very near future and thus a very * short wait. */ return 1; } static struct aws_crt_statistics_handler_vtable s_test_statistics_handler_vtable = { .process_statistics = s_process_statistics, .destroy = s_destroy_handler, .get_report_interval_ms = s_get_report_interval_ms}; struct aws_crt_statistics_handler *aws_statistics_handler_new_test(struct aws_allocator *allocator) { struct aws_crt_statistics_handler *handler = NULL; struct aws_statistics_handler_test_impl *impl = NULL; if (!aws_mem_acquire_many( allocator, 2, &handler, sizeof(struct aws_crt_statistics_handler), &impl, sizeof(struct aws_statistics_handler_test_impl))) { return NULL; } AWS_ZERO_STRUCT(*handler); AWS_ZERO_STRUCT(*impl); aws_mutex_init(&impl->lock); aws_condition_variable_init(&impl->signal); handler->vtable = &s_test_statistics_handler_vtable; handler->allocator = allocator; handler->impl = impl; return handler; } aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/statistics_handler_test.h000066400000000000000000000012621456575232400261010ustar00rootroot00000000000000#ifndef STATISTICS_HANDLER_TEST_H #define STATISTICS_HANDLER_TEST_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include struct aws_statistics_handler_test_impl { uint64_t start_time_ns; uint64_t total_bytes_read; uint64_t total_bytes_written; enum aws_tls_negotiation_status tls_status; struct aws_mutex lock; struct aws_condition_variable signal; }; struct aws_crt_statistics_handler *aws_statistics_handler_new_test(struct aws_allocator *allocator); #endif // STATISTICS_HANDLER_TEST_H aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/stream_test.c000066400000000000000000000363631456575232400235120ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #ifdef _WIN32 # include #endif AWS_STATIC_STRING_FROM_LITERAL(s_simple_test, "SimpleTest"); /* 0x1A represents the Windows end-of-file character. Having this in the test data set allows us to verify that file * stream reads on binary files do not terminate early on Windows.*/ const uint8_t s_simple_binary_test[] = {'a', 'b', 'c', 'd', 'e', 'f', 0x1A, 'g', 'h', 'i', 'j', 'k'}; static struct aws_input_stream *s_create_memory_stream(struct aws_allocator *allocator) { struct aws_byte_cursor test_cursor = aws_byte_cursor_from_string(s_simple_test); return aws_input_stream_new_from_cursor(allocator, &test_cursor); } static void s_destroy_memory_stream(struct aws_input_stream *stream) { aws_input_stream_destroy(stream); } static struct aws_input_stream *s_create_file_stream(struct aws_allocator *allocator, const char *file_path) { remove(file_path); FILE *file = aws_fopen(file_path, "w"); fprintf(file, "%s", (char *)s_simple_test->bytes); fclose(file); return aws_input_stream_new_from_file(allocator, file_path); } static struct aws_input_stream *s_create_binary_file_stream(struct aws_allocator *allocator, const char *file_path) { remove(file_path); FILE *file = aws_fopen(file_path, "wb"); fwrite(s_simple_binary_test, sizeof(uint8_t), sizeof(s_simple_binary_test), file); fclose(file); return aws_input_stream_new_from_file(allocator, file_path); } static struct aws_input_stream *s_create_read_only_file_stream(struct aws_allocator *allocator, const char *file_path) { remove(file_path); FILE *file = aws_fopen(file_path, "w"); fprintf(file, "%s", (char *)s_simple_test->bytes); fclose(file); #ifdef _WIN32 if (_chmod(file_path, _S_IREAD)) { return NULL; } #else if (chmod(file_path, S_IRUSR | S_IRGRP | S_IROTH)) { return NULL; } #endif return aws_input_stream_new_from_file(allocator, file_path); } static void s_destroy_file_stream(struct aws_input_stream *stream, const char *file_path) { aws_input_stream_destroy(stream); remove(file_path); } static int s_do_simple_input_stream_test( struct aws_input_stream *stream, struct aws_allocator *allocator, size_t read_buf_size, struct aws_byte_cursor *expected_contents) { struct aws_byte_buf read_buf; aws_byte_buf_init(&read_buf, allocator, read_buf_size); struct aws_byte_buf result_buf; aws_byte_buf_init(&result_buf, allocator, 1024); struct aws_stream_status status; AWS_ZERO_STRUCT(status); ASSERT_TRUE(aws_input_stream_get_status(stream, &status) == 0); ASSERT_TRUE(status.is_end_of_stream == false); while (!status.is_end_of_stream) { const size_t starting_len = read_buf.len; ASSERT_SUCCESS(aws_input_stream_read(stream, &read_buf)); if (starting_len - read_buf.len > 0) { struct aws_byte_cursor dest_cursor = aws_byte_cursor_from_buf(&read_buf); aws_byte_buf_append_dynamic(&result_buf, &dest_cursor); } read_buf.len = 0; ASSERT_TRUE(aws_input_stream_get_status(stream, &status) == 0); } struct aws_byte_cursor result_cursor = aws_byte_cursor_from_buf(&result_buf); ASSERT_TRUE(aws_byte_cursor_eq(expected_contents, &result_cursor)); aws_byte_buf_clean_up(&read_buf); aws_byte_buf_clean_up(&result_buf); return AWS_OP_SUCCESS; } static int s_test_input_stream_memory_simple(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_input_stream *stream = s_create_memory_stream(allocator); struct aws_byte_cursor test_cursor = aws_byte_cursor_from_string(s_simple_test); ASSERT_TRUE(s_do_simple_input_stream_test(stream, allocator, 100, &test_cursor) == AWS_OP_SUCCESS); s_destroy_memory_stream(stream); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_input_stream_memory_simple, s_test_input_stream_memory_simple); static int s_test_input_stream_memory_iterate(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_input_stream *stream = s_create_memory_stream(allocator); struct aws_byte_cursor test_cursor = aws_byte_cursor_from_string(s_simple_test); ASSERT_TRUE(s_do_simple_input_stream_test(stream, allocator, 2, &test_cursor) == AWS_OP_SUCCESS); s_destroy_memory_stream(stream); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_input_stream_memory_iterate, s_test_input_stream_memory_iterate); static int s_test_input_stream_file_simple(struct aws_allocator *allocator, void *ctx) { (void)ctx; const char *file_path = "test_input_stream_file_simple.txt"; /* unique name */ struct aws_input_stream *stream = s_create_file_stream(allocator, file_path); struct aws_byte_cursor test_cursor = aws_byte_cursor_from_string(s_simple_test); ASSERT_TRUE(s_do_simple_input_stream_test(stream, allocator, 100, &test_cursor) == AWS_OP_SUCCESS); s_destroy_file_stream(stream, file_path); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_input_stream_file_simple, s_test_input_stream_file_simple); static int s_test_input_stream_file_iterate(struct aws_allocator *allocator, void *ctx) { (void)ctx; const char *file_path = "test_input_stream_file_iterate.txt"; /* unique name */ struct aws_input_stream *stream = s_create_file_stream(allocator, file_path); struct aws_byte_cursor test_cursor = aws_byte_cursor_from_string(s_simple_test); ASSERT_TRUE(s_do_simple_input_stream_test(stream, allocator, 2, &test_cursor) == AWS_OP_SUCCESS); s_destroy_file_stream(stream, file_path); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_input_stream_file_iterate, s_test_input_stream_file_iterate); static int s_do_input_stream_seek_test( struct aws_input_stream *stream, struct aws_allocator *allocator, int64_t offset, enum aws_stream_seek_basis basis, struct aws_byte_cursor *expected_contents) { struct aws_byte_buf read_buf; aws_byte_buf_init(&read_buf, allocator, 1024); ASSERT_SUCCESS(aws_input_stream_seek(stream, offset, basis)); ASSERT_SUCCESS(aws_input_stream_read(stream, &read_buf)); struct aws_byte_cursor read_buf_cursor = aws_byte_cursor_from_buf(&read_buf); ASSERT_TRUE(aws_byte_cursor_eq(expected_contents, &read_buf_cursor)); aws_byte_buf_clean_up(&read_buf); return AWS_OP_SUCCESS; } #define SEEK_BEGINNING_OFFSET 5 static int s_test_input_stream_memory_seek_beginning(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_input_stream *stream = s_create_memory_stream(allocator); struct aws_byte_cursor test_cursor = aws_byte_cursor_from_string(s_simple_test); aws_byte_cursor_advance(&test_cursor, SEEK_BEGINNING_OFFSET); ASSERT_TRUE( s_do_input_stream_seek_test(stream, allocator, SEEK_BEGINNING_OFFSET, AWS_SSB_BEGIN, &test_cursor) == AWS_OP_SUCCESS); s_destroy_memory_stream(stream); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_input_stream_memory_seek_beginning, s_test_input_stream_memory_seek_beginning); static int s_test_input_stream_file_seek_beginning(struct aws_allocator *allocator, void *ctx) { (void)ctx; const char *file_path = "test_input_stream_file_seek_beginning.txt"; /* unique name */ struct aws_input_stream *stream = s_create_file_stream(allocator, file_path); struct aws_byte_cursor test_cursor = aws_byte_cursor_from_string(s_simple_test); aws_byte_cursor_advance(&test_cursor, SEEK_BEGINNING_OFFSET); ASSERT_TRUE( s_do_input_stream_seek_test(stream, allocator, SEEK_BEGINNING_OFFSET, AWS_SSB_BEGIN, &test_cursor) == AWS_OP_SUCCESS); s_destroy_file_stream(stream, file_path); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_input_stream_file_seek_beginning, s_test_input_stream_file_seek_beginning); #define SEEK_END_OFFSET (-3) static int s_test_input_stream_memory_seek_end(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_input_stream *stream = s_create_memory_stream(allocator); struct aws_byte_cursor test_cursor = aws_byte_cursor_from_string(s_simple_test); aws_byte_cursor_advance(&test_cursor, (size_t)((int64_t)s_simple_test->len + SEEK_END_OFFSET)); ASSERT_TRUE( s_do_input_stream_seek_test(stream, allocator, SEEK_END_OFFSET, AWS_SSB_END, &test_cursor) == AWS_OP_SUCCESS); s_destroy_memory_stream(stream); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_input_stream_memory_seek_end, s_test_input_stream_memory_seek_end); static int s_test_input_stream_memory_seek_multiple_times(struct aws_allocator *allocator, void *ctx) { (void)ctx; const char *src_cstr = "0123456789"; const struct aws_byte_cursor src_cursor = aws_byte_cursor_from_c_str(src_cstr); struct aws_input_stream *stream = aws_input_stream_new_from_cursor(allocator, &src_cursor); char read_byte = 0; struct aws_byte_buf read_buf = aws_byte_buf_from_empty_array(&read_byte, 1); /* Seek to BEGIN + 2. Read "2" */ ASSERT_SUCCESS(aws_input_stream_seek(stream, 2, AWS_SSB_BEGIN)); read_buf.len = 0; ASSERT_SUCCESS(aws_input_stream_read(stream, &read_buf)); ASSERT_INT_EQUALS('2', read_byte); /* Seek to BEGIN + 4. Read "4" */ ASSERT_SUCCESS(aws_input_stream_seek(stream, 4, AWS_SSB_BEGIN)); read_buf.len = 0; ASSERT_SUCCESS(aws_input_stream_read(stream, &read_buf)); ASSERT_INT_EQUALS('4', read_byte); /* Seek to END - 1. Read "9" */ ASSERT_SUCCESS(aws_input_stream_seek(stream, -1, AWS_SSB_END)); read_buf.len = 0; ASSERT_SUCCESS(aws_input_stream_read(stream, &read_buf)); ASSERT_INT_EQUALS('9', read_byte); /* Seek to END - 5. Read "5" */ ASSERT_SUCCESS(aws_input_stream_seek(stream, -1, AWS_SSB_END)); read_buf.len = 0; ASSERT_SUCCESS(aws_input_stream_read(stream, &read_buf)); ASSERT_INT_EQUALS('9', read_byte); /* Seek to BEGIN + 0. Read "0" */ ASSERT_SUCCESS(aws_input_stream_seek(stream, 4, AWS_SSB_BEGIN)); read_buf.len = 0; ASSERT_SUCCESS(aws_input_stream_read(stream, &read_buf)); ASSERT_INT_EQUALS('4', read_byte); aws_input_stream_destroy(stream); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_input_stream_memory_seek_multiple_times, s_test_input_stream_memory_seek_multiple_times); static int s_test_input_stream_file_seek_end(struct aws_allocator *allocator, void *ctx) { (void)ctx; const char *file_path = "test_input_stream_file_seek_end.txt"; /* unique name */ struct aws_input_stream *stream = s_create_file_stream(allocator, file_path); struct aws_byte_cursor test_cursor = aws_byte_cursor_from_string(s_simple_test); aws_byte_cursor_advance(&test_cursor, (size_t)((int64_t)s_simple_test->len + SEEK_END_OFFSET)); ASSERT_TRUE( s_do_input_stream_seek_test(stream, allocator, SEEK_END_OFFSET, AWS_SSB_END, &test_cursor) == AWS_OP_SUCCESS); s_destroy_file_stream(stream, file_path); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_input_stream_file_seek_end, s_test_input_stream_file_seek_end); static int s_test_input_stream_memory_seek_past_end(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_input_stream *stream = s_create_memory_stream(allocator); ASSERT_TRUE(aws_input_stream_seek(stream, 13, AWS_SSB_BEGIN) == AWS_OP_ERR); ASSERT_TRUE(aws_last_error() == AWS_IO_STREAM_INVALID_SEEK_POSITION); aws_reset_error(); ASSERT_TRUE(aws_input_stream_seek(stream, 1, AWS_SSB_END) == AWS_OP_ERR); ASSERT_TRUE(aws_last_error() == AWS_IO_STREAM_INVALID_SEEK_POSITION); s_destroy_memory_stream(stream); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_input_stream_memory_seek_past_end, s_test_input_stream_memory_seek_past_end); static int s_test_input_stream_memory_seek_before_start(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_input_stream *stream = s_create_memory_stream(allocator); ASSERT_TRUE(aws_input_stream_seek(stream, -13, AWS_SSB_END) == AWS_OP_ERR); ASSERT_TRUE(aws_last_error() == AWS_IO_STREAM_INVALID_SEEK_POSITION); aws_reset_error(); ASSERT_TRUE(aws_input_stream_seek(stream, -1, AWS_SSB_BEGIN) == AWS_OP_ERR); ASSERT_TRUE(aws_last_error() == AWS_IO_STREAM_INVALID_SEEK_POSITION); s_destroy_memory_stream(stream); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_input_stream_memory_seek_before_start, s_test_input_stream_memory_seek_before_start); #define LENGTH_SEEK_OFFSET 3 static int s_test_input_stream_memory_length(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_input_stream *stream = s_create_memory_stream(allocator); int64_t length = 0; ASSERT_TRUE(aws_input_stream_get_length(stream, &length) == AWS_OP_SUCCESS); ASSERT_TRUE(length == (int64_t)s_simple_test->len); /* invariant under seeking */ aws_input_stream_seek(stream, LENGTH_SEEK_OFFSET, AWS_SSB_BEGIN); ASSERT_TRUE(aws_input_stream_get_length(stream, &length) == AWS_OP_SUCCESS); ASSERT_TRUE(length == (int64_t)s_simple_test->len); s_destroy_memory_stream(stream); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_input_stream_memory_length, s_test_input_stream_memory_length); static int s_test_input_stream_file_length(struct aws_allocator *allocator, void *ctx) { (void)ctx; const char *file_path = "test_input_stream_file_length.txt"; /* unique name */ struct aws_input_stream *stream = s_create_file_stream(allocator, file_path); int64_t length = 0; ASSERT_TRUE(aws_input_stream_get_length(stream, &length) == AWS_OP_SUCCESS); ASSERT_TRUE(length == (int64_t)s_simple_test->len); /* invariant under seeking */ aws_input_stream_seek(stream, LENGTH_SEEK_OFFSET, AWS_SSB_BEGIN); ASSERT_TRUE(aws_input_stream_get_length(stream, &length) == AWS_OP_SUCCESS); ASSERT_TRUE(length == (int64_t)s_simple_test->len); s_destroy_file_stream(stream, file_path); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_input_stream_file_length, s_test_input_stream_file_length); static int s_test_input_stream_binary(struct aws_allocator *allocator, void *ctx) { (void)ctx; const char *file_path = "test_input_stream_binary.dat"; /* unique name */ struct aws_input_stream *stream = s_create_binary_file_stream(allocator, file_path); struct aws_byte_cursor test_cursor = { .ptr = (uint8_t *)s_simple_binary_test, .len = sizeof(s_simple_binary_test), }; ASSERT_TRUE(s_do_simple_input_stream_test(stream, allocator, 100, &test_cursor) == AWS_OP_SUCCESS); s_destroy_file_stream(stream, file_path); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_input_stream_binary, s_test_input_stream_binary); static int s_test_input_stream_read_only(struct aws_allocator *allocator, void *ctx) { (void)ctx; const char *file_path = "test_input_stream_read_only.txt"; /* unique name */ struct aws_input_stream *stream = s_create_read_only_file_stream(allocator, file_path); ASSERT_NOT_NULL(stream); struct aws_byte_cursor test_cursor = aws_byte_cursor_from_string(s_simple_test); ASSERT_TRUE(s_do_simple_input_stream_test(stream, allocator, 100, &test_cursor) == AWS_OP_SUCCESS); s_destroy_file_stream(stream, file_path); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_input_stream_read_only, s_test_input_stream_read_only); aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/tls_handler_test.c000066400000000000000000002621221456575232400245100ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #ifndef BYO_CRYPTO # include # include # include # include # include # include # include # include # include # include # include # include # include # include # include struct tls_test_args { struct aws_allocator *allocator; struct aws_mutex *mutex; struct aws_condition_variable *condition_variable; struct aws_tls_connection_options *tls_options; struct aws_channel *channel; struct aws_channel_handler *rw_handler; struct aws_channel_slot *rw_slot; struct aws_byte_buf negotiated_protocol; struct aws_byte_buf server_name; int last_error_code; uint32_t tls_levels_negotiated; uint32_t desired_tls_levels; bool listener_destroyed; bool error_invoked; bool expects_error; bool server; bool shutdown_finished; bool setup_callback_invoked; bool creation_callback_invoked; }; /* common structure for tls options */ struct tls_opt_tester { struct aws_tls_ctx_options ctx_options; struct aws_tls_ctx *ctx; struct aws_tls_connection_options opt; }; static int s_tls_server_opt_tester_init( struct aws_allocator *allocator, struct tls_opt_tester *tester, const char *cert_path, const char *pkey_path) { # ifdef __APPLE__ struct aws_byte_cursor pwd_cur = aws_byte_cursor_from_c_str("1234"); ASSERT_SUCCESS( aws_tls_ctx_options_init_server_pkcs12_from_path(&tester->ctx_options, allocator, "unittests.p12", &pwd_cur)); # else ASSERT_SUCCESS( aws_tls_ctx_options_init_default_server_from_path(&tester->ctx_options, allocator, cert_path, pkey_path)); ASSERT_SUCCESS( aws_tls_ctx_options_override_default_trust_store_from_path(&tester->ctx_options, NULL, "ca_root.crt")); # endif /* __APPLE__ */ aws_tls_ctx_options_set_alpn_list(&tester->ctx_options, "h2;http/1.1"); tester->ctx = aws_tls_server_ctx_new(allocator, &tester->ctx_options); ASSERT_NOT_NULL(tester->ctx); aws_tls_connection_options_init_from_ctx(&tester->opt, tester->ctx); return AWS_OP_SUCCESS; } static int s_tls_client_opt_tester_init( struct aws_allocator *allocator, struct tls_opt_tester *tester, struct aws_byte_cursor server_name) { aws_io_library_init(allocator); aws_tls_ctx_options_init_default_client(&tester->ctx_options, allocator); # ifdef __APPLE__ ASSERT_SUCCESS( aws_tls_ctx_options_override_default_trust_store_from_path(&tester->ctx_options, NULL, "unittests.crt")); # else ASSERT_SUCCESS( aws_tls_ctx_options_override_default_trust_store_from_path(&tester->ctx_options, NULL, "ca_root.crt")); # endif /* __APPLE__ */ tester->ctx = aws_tls_client_ctx_new(allocator, &tester->ctx_options); aws_tls_connection_options_init_from_ctx(&tester->opt, tester->ctx); aws_tls_connection_options_set_alpn_list(&tester->opt, allocator, "h2;http/1.1"); aws_tls_connection_options_set_server_name(&tester->opt, allocator, &server_name); return AWS_OP_SUCCESS; } static int s_tls_opt_tester_clean_up(struct tls_opt_tester *tester) { aws_tls_connection_options_clean_up(&tester->opt); aws_tls_ctx_options_clean_up(&tester->ctx_options); aws_tls_ctx_release(tester->ctx); return AWS_OP_SUCCESS; } /* common structure for test */ struct tls_common_tester { struct aws_mutex mutex; struct aws_condition_variable condition_variable; struct aws_event_loop_group *el_group; struct aws_host_resolver *resolver; struct aws_atomic_var current_time_ns; struct aws_atomic_var stats_handler; }; static struct tls_common_tester c_tester; /* common structure for a tls local server */ struct tls_local_server_tester { struct aws_socket_options socket_options; struct tls_opt_tester server_tls_opt_tester; struct aws_socket_endpoint endpoint; struct aws_server_bootstrap *server_bootstrap; struct aws_socket *listener; }; static int s_tls_test_arg_init( struct aws_allocator *allocator, struct tls_test_args *test_arg, bool server, struct tls_common_tester *tls_c_tester) { AWS_ZERO_STRUCT(*test_arg); test_arg->mutex = &tls_c_tester->mutex; test_arg->condition_variable = &tls_c_tester->condition_variable; test_arg->allocator = allocator; test_arg->server = server; test_arg->desired_tls_levels = 1; return AWS_OP_SUCCESS; } static int s_tls_common_tester_init(struct aws_allocator *allocator, struct tls_common_tester *tester) { AWS_ZERO_STRUCT(*tester); struct aws_mutex mutex = AWS_MUTEX_INIT; struct aws_condition_variable condition_variable = AWS_CONDITION_VARIABLE_INIT; tester->mutex = mutex; tester->condition_variable = condition_variable; aws_atomic_store_int(&tester->current_time_ns, 0); aws_atomic_store_ptr(&tester->stats_handler, NULL); tester->el_group = aws_event_loop_group_new_default(allocator, 0, NULL); struct aws_host_resolver_default_options resolver_options = { .el_group = tester->el_group, .max_entries = 1, }; tester->resolver = aws_host_resolver_new_default(allocator, &resolver_options); return AWS_OP_SUCCESS; } static int s_tls_common_tester_clean_up(struct tls_common_tester *tester) { aws_host_resolver_release(tester->resolver); aws_event_loop_group_release(tester->el_group); aws_io_library_clean_up(); aws_condition_variable_clean_up(&tester->condition_variable); aws_mutex_clean_up(&tester->mutex); return AWS_OP_SUCCESS; } static bool s_tls_channel_shutdown_predicate(void *user_data) { struct tls_test_args *setup_test_args = user_data; return setup_test_args->shutdown_finished || setup_test_args->last_error_code == AWS_IO_SOCKET_TIMEOUT || (setup_test_args->expects_error && setup_test_args->error_invoked); } static bool s_tls_listener_destroy_predicate(void *user_data) { struct tls_test_args *setup_test_args = user_data; return setup_test_args->listener_destroyed || setup_test_args->last_error_code == AWS_IO_SOCKET_TIMEOUT; } static bool s_tls_channel_setup_predicate(void *user_data) { struct tls_test_args *setup_test_args = user_data; return (setup_test_args->tls_levels_negotiated == setup_test_args->desired_tls_levels && setup_test_args->setup_callback_invoked) || setup_test_args->error_invoked; } /* * test args mutex must be held before calling this function */ static void s_aws_check_for_user_handler_setup(struct tls_test_args *setup_test_args) { if (setup_test_args->tls_levels_negotiated == setup_test_args->desired_tls_levels && setup_test_args->setup_callback_invoked) { if (setup_test_args->rw_handler) { struct aws_channel *channel = setup_test_args->channel; struct aws_channel_slot *rw_slot = aws_channel_slot_new(channel); aws_channel_slot_insert_end(channel, rw_slot); aws_channel_slot_set_handler(rw_slot, setup_test_args->rw_handler); setup_test_args->rw_slot = rw_slot; } } } static int s_add_tls_handler_to_end_of_channel(struct tls_test_args *setup_test_args) { AWS_FATAL_ASSERT(setup_test_args->desired_tls_levels > 1); AWS_FATAL_ASSERT(!setup_test_args->server); struct aws_channel_slot *last_slot = aws_channel_get_first_slot(setup_test_args->channel); while (last_slot->adj_right) { last_slot = last_slot->adj_right; } return aws_channel_setup_client_tls(last_slot, setup_test_args->tls_options); } static int s_on_channel_setup_next_tls_handler(struct tls_test_args *setup_test_args) { if (setup_test_args->tls_levels_negotiated < setup_test_args->desired_tls_levels) { ASSERT_SUCCESS(s_add_tls_handler_to_end_of_channel(setup_test_args)); } return AWS_OP_SUCCESS; } static int s_on_tls_negotiated_next_tls_handler(struct tls_test_args *setup_test_args) { if (!setup_test_args->setup_callback_invoked) { return AWS_OP_SUCCESS; } if (setup_test_args->tls_levels_negotiated < setup_test_args->desired_tls_levels) { ASSERT_SUCCESS(s_add_tls_handler_to_end_of_channel(setup_test_args)); } return AWS_OP_SUCCESS; } static void s_tls_handler_test_client_setup_callback( struct aws_client_bootstrap *bootstrap, int error_code, struct aws_channel *channel, void *user_data) { (void)bootstrap; struct tls_test_args *setup_test_args = user_data; aws_mutex_lock(setup_test_args->mutex); setup_test_args->setup_callback_invoked = true; if (!error_code) { setup_test_args->channel = channel; s_aws_check_for_user_handler_setup(setup_test_args); s_on_channel_setup_next_tls_handler(setup_test_args); } else { setup_test_args->error_invoked = true; setup_test_args->last_error_code = error_code; } aws_mutex_unlock(setup_test_args->mutex); aws_condition_variable_notify_one(setup_test_args->condition_variable); } static void s_tls_handler_test_server_setup_callback( struct aws_server_bootstrap *bootstrap, int error_code, struct aws_channel *channel, void *user_data) { (void)bootstrap; struct tls_test_args *setup_test_args = (struct tls_test_args *)user_data; aws_mutex_lock(setup_test_args->mutex); setup_test_args->setup_callback_invoked = true; if (!error_code) { setup_test_args->channel = channel; } else { setup_test_args->error_invoked = true; setup_test_args->last_error_code = error_code; } s_aws_check_for_user_handler_setup(setup_test_args); aws_mutex_unlock(setup_test_args->mutex); aws_condition_variable_notify_one(setup_test_args->condition_variable); } static void s_tls_handler_test_client_shutdown_callback( struct aws_client_bootstrap *bootstrap, int error_code, struct aws_channel *channel, void *user_data) { (void)bootstrap; (void)error_code; (void)channel; struct tls_test_args *setup_test_args = (struct tls_test_args *)user_data; aws_mutex_lock(setup_test_args->mutex); setup_test_args->shutdown_finished = true; aws_mutex_unlock(setup_test_args->mutex); aws_condition_variable_notify_one(setup_test_args->condition_variable); } static void s_tls_handler_test_server_shutdown_callback( struct aws_server_bootstrap *bootstrap, int error_code, struct aws_channel *channel, void *user_data) { (void)bootstrap; (void)error_code; (void)channel; struct tls_test_args *setup_test_args = (struct tls_test_args *)user_data; aws_mutex_lock(setup_test_args->mutex); setup_test_args->shutdown_finished = true; aws_mutex_unlock(setup_test_args->mutex); aws_condition_variable_notify_one(setup_test_args->condition_variable); } static void s_tls_handler_test_server_listener_destroy_callback( struct aws_server_bootstrap *bootstrap, void *user_data) { (void)bootstrap; struct tls_test_args *setup_test_args = (struct tls_test_args *)user_data; aws_mutex_lock(setup_test_args->mutex); setup_test_args->listener_destroyed = true; aws_mutex_unlock(setup_test_args->mutex); aws_condition_variable_notify_one(setup_test_args->condition_variable); } static void s_tls_on_negotiated( struct aws_channel_handler *handler, struct aws_channel_slot *slot, int err_code, void *user_data) { (void)slot; struct tls_test_args *setup_test_args = (struct tls_test_args *)user_data; if (!err_code) { aws_mutex_lock(setup_test_args->mutex); if (aws_tls_is_alpn_available()) { setup_test_args->negotiated_protocol = aws_tls_handler_protocol(handler); } setup_test_args->server_name = aws_tls_handler_server_name(handler); ++setup_test_args->tls_levels_negotiated; s_aws_check_for_user_handler_setup(setup_test_args); s_on_tls_negotiated_next_tls_handler(setup_test_args); aws_mutex_unlock(setup_test_args->mutex); } aws_condition_variable_notify_one(setup_test_args->condition_variable); } static int s_tls_local_server_tester_init( struct aws_allocator *allocator, struct tls_local_server_tester *tester, struct tls_test_args *args, struct tls_common_tester *tls_c_tester, bool enable_back_pressure, const char *cert_path, const char *pkey_path) { AWS_ZERO_STRUCT(*tester); ASSERT_SUCCESS(s_tls_server_opt_tester_init(allocator, &tester->server_tls_opt_tester, cert_path, pkey_path)); aws_tls_connection_options_set_callbacks(&tester->server_tls_opt_tester.opt, s_tls_on_negotiated, NULL, NULL, args); tester->socket_options.connect_timeout_ms = 3000; tester->socket_options.type = AWS_SOCKET_STREAM; tester->socket_options.domain = AWS_SOCKET_LOCAL; aws_socket_endpoint_init_local_address_for_test(&tester->endpoint); tester->server_bootstrap = aws_server_bootstrap_new(allocator, tls_c_tester->el_group); ASSERT_NOT_NULL(tester->server_bootstrap); struct aws_server_socket_channel_bootstrap_options bootstrap_options = { .bootstrap = tester->server_bootstrap, .enable_read_back_pressure = enable_back_pressure, .port = tester->endpoint.port, .host_name = tester->endpoint.address, .socket_options = &tester->socket_options, .incoming_callback = s_tls_handler_test_server_setup_callback, .shutdown_callback = s_tls_handler_test_server_shutdown_callback, .destroy_callback = s_tls_handler_test_server_listener_destroy_callback, .tls_options = &tester->server_tls_opt_tester.opt, .user_data = args, }; tester->listener = aws_server_bootstrap_new_socket_listener(&bootstrap_options); ASSERT_NOT_NULL(tester->listener); return AWS_OP_SUCCESS; } static int s_tls_local_server_tester_clean_up(struct tls_local_server_tester *tester) { ASSERT_SUCCESS(s_tls_opt_tester_clean_up(&tester->server_tls_opt_tester)); aws_server_bootstrap_release(tester->server_bootstrap); return AWS_OP_SUCCESS; } struct tls_test_rw_args { struct aws_mutex *mutex; struct aws_condition_variable *condition_variable; struct aws_byte_buf received_message; int read_invocations; bool invocation_happened; }; static int s_tls_rw_args_init( struct tls_test_rw_args *args, struct tls_common_tester *tls_c_tester, struct aws_byte_buf received_message) { AWS_ZERO_STRUCT(*args); args->mutex = &tls_c_tester->mutex; args->condition_variable = &tls_c_tester->condition_variable; args->received_message = received_message; return AWS_OP_SUCCESS; } static bool s_tls_test_read_predicate(void *user_data) { struct tls_test_rw_args *rw_args = (struct tls_test_rw_args *)user_data; return rw_args->invocation_happened; } static struct aws_byte_buf s_tls_test_handle_read( struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_byte_buf *data_read, void *user_data) { (void)handler; (void)slot; struct tls_test_rw_args *rw_args = (struct tls_test_rw_args *)user_data; aws_mutex_lock(rw_args->mutex); aws_byte_buf_write_from_whole_buffer(&rw_args->received_message, *data_read); rw_args->read_invocations += 1; rw_args->invocation_happened = true; aws_mutex_unlock(rw_args->mutex); aws_condition_variable_notify_one(rw_args->condition_variable); return rw_args->received_message; } static struct aws_byte_buf s_tls_test_handle_write( struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_byte_buf *data_read, void *user_data) { (void)handler; (void)slot; (void)data_read; (void)user_data; /*do nothing*/ return (struct aws_byte_buf){0}; } static int s_tls_channel_echo_and_backpressure_test_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_io_library_init(allocator); ASSERT_SUCCESS(s_tls_common_tester_init(allocator, &c_tester)); struct aws_byte_buf read_tag = aws_byte_buf_from_c_str("I'm a little teapot."); struct aws_byte_buf write_tag = aws_byte_buf_from_c_str("I'm a big teapot"); uint8_t incoming_received_message[128] = {0}; uint8_t outgoing_received_message[128] = {0}; struct tls_test_rw_args incoming_rw_args; ASSERT_SUCCESS(s_tls_rw_args_init( &incoming_rw_args, &c_tester, aws_byte_buf_from_empty_array(incoming_received_message, sizeof(incoming_received_message)))); struct tls_test_rw_args outgoing_rw_args; ASSERT_SUCCESS(s_tls_rw_args_init( &outgoing_rw_args, &c_tester, aws_byte_buf_from_empty_array(outgoing_received_message, sizeof(outgoing_received_message)))); struct tls_test_args outgoing_args; ASSERT_SUCCESS(s_tls_test_arg_init(allocator, &outgoing_args, false, &c_tester)); struct tls_test_args incoming_args; ASSERT_SUCCESS(s_tls_test_arg_init(allocator, &incoming_args, true, &c_tester)); struct tls_local_server_tester local_server_tester; ASSERT_SUCCESS(s_tls_local_server_tester_init( allocator, &local_server_tester, &incoming_args, &c_tester, true, "server.crt", "server.key")); /* make the windows small to make sure back pressure is honored. */ struct aws_channel_handler *outgoing_rw_handler = rw_handler_new( allocator, s_tls_test_handle_read, s_tls_test_handle_write, true, write_tag.len / 2, &outgoing_rw_args); ASSERT_NOT_NULL(outgoing_rw_handler); struct aws_channel_handler *incoming_rw_handler = rw_handler_new( allocator, s_tls_test_handle_read, s_tls_test_handle_write, true, read_tag.len / 2, &incoming_rw_args); ASSERT_NOT_NULL(incoming_rw_handler); incoming_args.rw_handler = incoming_rw_handler; outgoing_args.rw_handler = outgoing_rw_handler; g_aws_channel_max_fragment_size = 4096; struct tls_opt_tester client_tls_opt_tester; struct aws_byte_cursor server_name = aws_byte_cursor_from_c_str("localhost"); ASSERT_SUCCESS(s_tls_client_opt_tester_init(allocator, &client_tls_opt_tester, server_name)); aws_tls_connection_options_set_callbacks( &client_tls_opt_tester.opt, s_tls_on_negotiated, NULL, NULL, &outgoing_args); struct aws_client_bootstrap_options bootstrap_options = { .event_loop_group = c_tester.el_group, .host_resolver = c_tester.resolver, }; struct aws_client_bootstrap *client_bootstrap = aws_client_bootstrap_new(allocator, &bootstrap_options); struct aws_socket_channel_bootstrap_options channel_options; AWS_ZERO_STRUCT(channel_options); channel_options.bootstrap = client_bootstrap; channel_options.host_name = local_server_tester.endpoint.address; channel_options.port = 0; channel_options.socket_options = &local_server_tester.socket_options; channel_options.tls_options = &client_tls_opt_tester.opt; channel_options.setup_callback = s_tls_handler_test_client_setup_callback; channel_options.shutdown_callback = s_tls_handler_test_client_shutdown_callback; channel_options.user_data = &outgoing_args; channel_options.enable_read_back_pressure = true; ASSERT_SUCCESS(aws_client_bootstrap_new_socket_channel(&channel_options)); /* put this here to verify ownership semantics are correct. This should NOT cause a segfault. If it does, ya * done messed up. */ aws_tls_connection_options_clean_up(&client_tls_opt_tester.opt); /* wait for both ends to setup */ ASSERT_SUCCESS(aws_mutex_lock(&c_tester.mutex)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_tls_channel_setup_predicate, &incoming_args)); ASSERT_SUCCESS(aws_mutex_unlock(&c_tester.mutex)); ASSERT_FALSE(incoming_args.error_invoked); /* currently it seems ALPN doesn't work in server mode. Just leaving this check out for now. */ # ifndef __APPLE__ struct aws_byte_buf expected_protocol = aws_byte_buf_from_c_str("h2"); /* check ALPN and SNI was properly negotiated */ if (aws_tls_is_alpn_available()) { ASSERT_BIN_ARRAYS_EQUALS( expected_protocol.buffer, expected_protocol.len, incoming_args.negotiated_protocol.buffer, incoming_args.negotiated_protocol.len); } # endif ASSERT_SUCCESS(aws_mutex_lock(&c_tester.mutex)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_tls_channel_setup_predicate, &outgoing_args)); ASSERT_SUCCESS(aws_mutex_unlock(&c_tester.mutex)); ASSERT_FALSE(outgoing_args.error_invoked); /* currently it seems ALPN doesn't work in server mode. Just leaving this check out for now. */ # ifndef __MACH__ if (aws_tls_is_alpn_available()) { ASSERT_BIN_ARRAYS_EQUALS( expected_protocol.buffer, expected_protocol.len, outgoing_args.negotiated_protocol.buffer, outgoing_args.negotiated_protocol.len); } # endif ASSERT_FALSE(outgoing_args.error_invoked); /* Do the IO operations */ rw_handler_write(outgoing_args.rw_handler, outgoing_args.rw_slot, &write_tag); rw_handler_write(incoming_args.rw_handler, incoming_args.rw_slot, &read_tag); ASSERT_SUCCESS(aws_mutex_lock(&c_tester.mutex)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_tls_test_read_predicate, &incoming_rw_args)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_tls_test_read_predicate, &outgoing_rw_args)); ASSERT_SUCCESS(aws_mutex_unlock(&c_tester.mutex)); incoming_rw_args.invocation_happened = false; outgoing_rw_args.invocation_happened = false; ASSERT_INT_EQUALS(1, outgoing_rw_args.read_invocations); ASSERT_INT_EQUALS(1, incoming_rw_args.read_invocations); /* Go ahead and verify back-pressure works*/ rw_handler_trigger_increment_read_window(incoming_args.rw_handler, incoming_args.rw_slot, 100); rw_handler_trigger_increment_read_window(outgoing_args.rw_handler, outgoing_args.rw_slot, 100); ASSERT_SUCCESS(aws_mutex_lock(&c_tester.mutex)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_tls_test_read_predicate, &incoming_rw_args)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_tls_test_read_predicate, &outgoing_rw_args)); ASSERT_SUCCESS(aws_mutex_unlock(&c_tester.mutex)); ASSERT_INT_EQUALS(2, outgoing_rw_args.read_invocations); ASSERT_INT_EQUALS(2, incoming_rw_args.read_invocations); ASSERT_BIN_ARRAYS_EQUALS( write_tag.buffer, write_tag.len, incoming_rw_args.received_message.buffer, incoming_rw_args.received_message.len); ASSERT_BIN_ARRAYS_EQUALS( read_tag.buffer, read_tag.len, outgoing_rw_args.received_message.buffer, outgoing_rw_args.received_message.len); aws_channel_shutdown(incoming_args.channel, AWS_OP_SUCCESS); ASSERT_SUCCESS(aws_mutex_lock(&c_tester.mutex)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_tls_channel_shutdown_predicate, &incoming_args)); ASSERT_SUCCESS(aws_mutex_unlock(&c_tester.mutex)); /*no shutdown on the client necessary here (it should have been triggered by shutting down the other side). just * wait for the event to fire. */ ASSERT_SUCCESS(aws_mutex_lock(&c_tester.mutex)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_tls_channel_shutdown_predicate, &outgoing_args)); aws_server_bootstrap_destroy_socket_listener(local_server_tester.server_bootstrap, local_server_tester.listener); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_tls_listener_destroy_predicate, &incoming_args)); aws_mutex_unlock(&c_tester.mutex); /* clean up */ ASSERT_SUCCESS(s_tls_opt_tester_clean_up(&client_tls_opt_tester)); aws_client_bootstrap_release(client_bootstrap); ASSERT_SUCCESS(s_tls_local_server_tester_clean_up(&local_server_tester)); ASSERT_SUCCESS(s_tls_common_tester_clean_up(&c_tester)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(tls_channel_echo_and_backpressure_test, s_tls_channel_echo_and_backpressure_test_fn) struct default_host_callback_data { struct aws_host_address aaaa_address; struct aws_host_address a_address; bool has_aaaa_address; bool has_a_address; struct aws_condition_variable condition_variable; bool invoked; }; static int s_verify_negotiation_fails_helper( struct aws_allocator *allocator, const struct aws_string *host_name, uint32_t port, struct aws_tls_ctx_options *client_ctx_options) { struct aws_tls_ctx *client_ctx = aws_tls_client_ctx_new(allocator, client_ctx_options); struct aws_tls_connection_options tls_client_conn_options; aws_tls_connection_options_init_from_ctx(&tls_client_conn_options, client_ctx); aws_tls_connection_options_set_callbacks(&tls_client_conn_options, s_tls_on_negotiated, NULL, NULL, NULL); struct aws_byte_cursor host_name_cur = aws_byte_cursor_from_string(host_name); aws_tls_connection_options_set_server_name(&tls_client_conn_options, allocator, &host_name_cur); struct tls_test_args outgoing_args = { .mutex = &c_tester.mutex, .allocator = allocator, .condition_variable = &c_tester.condition_variable, .error_invoked = false, .expects_error = true, .rw_handler = NULL, .server = false, .tls_levels_negotiated = 0, .desired_tls_levels = 1, .shutdown_finished = false, }; tls_client_conn_options.user_data = &outgoing_args; struct aws_socket_options options; AWS_ZERO_STRUCT(options); /* badssl.com is great but has occasional lags, make this timeout longer so we have a higher chance of actually testing something. */ options.connect_timeout_ms = 10000; options.type = AWS_SOCKET_STREAM; options.domain = AWS_SOCKET_IPV4; struct aws_client_bootstrap_options bootstrap_options = { .event_loop_group = c_tester.el_group, .host_resolver = c_tester.resolver, }; struct aws_client_bootstrap *client_bootstrap = aws_client_bootstrap_new(allocator, &bootstrap_options); ASSERT_NOT_NULL(client_bootstrap); struct aws_socket_channel_bootstrap_options channel_options; AWS_ZERO_STRUCT(channel_options); channel_options.bootstrap = client_bootstrap; channel_options.host_name = aws_string_c_str(host_name); channel_options.port = port; channel_options.socket_options = &options; channel_options.tls_options = &tls_client_conn_options; channel_options.setup_callback = s_tls_handler_test_client_setup_callback; channel_options.shutdown_callback = s_tls_handler_test_client_shutdown_callback; channel_options.user_data = &outgoing_args; ASSERT_SUCCESS(aws_client_bootstrap_new_socket_channel(&channel_options)); /* put this here to verify ownership semantics are correct. This should NOT cause a segfault. If it does, ya * done messed up. */ aws_tls_connection_options_clean_up(&tls_client_conn_options); ASSERT_SUCCESS(aws_mutex_lock(&c_tester.mutex)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_tls_channel_shutdown_predicate, &outgoing_args)); ASSERT_SUCCESS(aws_mutex_unlock(&c_tester.mutex)); ASSERT_TRUE(outgoing_args.error_invoked); /* we're talking to an external internet endpoint, yeah this sucks... we don't know for sure that this failed for the right reasons, but there's not much we can do about it.*/ if (outgoing_args.last_error_code != AWS_IO_SOCKET_TIMEOUT) { ASSERT_INT_EQUALS(AWS_IO_TLS_ERROR_NEGOTIATION_FAILURE, outgoing_args.last_error_code); } else { fprintf( stderr, "Warning: the connection timed out and we're not completely certain" " that this fails for the right reasons. Maybe run the test again?\n"); } aws_client_bootstrap_release(client_bootstrap); aws_tls_ctx_release(client_ctx); return AWS_OP_SUCCESS; } static int s_verify_negotiation_fails( struct aws_allocator *allocator, const struct aws_string *host_name, uint32_t port, void (*context_options_override_fn)(struct aws_tls_ctx_options *)) { aws_io_library_init(allocator); ASSERT_SUCCESS(s_tls_common_tester_init(allocator, &c_tester)); struct aws_tls_ctx_options client_ctx_options; aws_tls_ctx_options_init_default_client(&client_ctx_options, allocator); if (context_options_override_fn) { (*context_options_override_fn)(&client_ctx_options); } ASSERT_SUCCESS(s_verify_negotiation_fails_helper(allocator, host_name, port, &client_ctx_options)); aws_tls_ctx_options_clean_up(&client_ctx_options); ASSERT_SUCCESS(s_tls_common_tester_clean_up(&c_tester)); return AWS_OP_SUCCESS; } static int s_verify_negotiation_fails_with_ca_override( struct aws_allocator *allocator, const struct aws_string *host_name, const char *root_ca_path) { aws_io_library_init(allocator); ASSERT_SUCCESS(s_tls_common_tester_init(allocator, &c_tester)); struct aws_tls_ctx_options client_ctx_options; aws_tls_ctx_options_init_default_client(&client_ctx_options, allocator); ASSERT_SUCCESS(aws_tls_ctx_options_override_default_trust_store_from_path(&client_ctx_options, NULL, root_ca_path)); ASSERT_SUCCESS(s_verify_negotiation_fails_helper(allocator, host_name, 443, &client_ctx_options)); ASSERT_SUCCESS(s_tls_common_tester_clean_up(&c_tester)); aws_tls_ctx_options_clean_up(&client_ctx_options); return AWS_OP_SUCCESS; } # if defined(USE_S2N) static int s_default_pki_path_exists_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; (void)allocator; ASSERT_TRUE( aws_determine_default_pki_dir() != NULL || aws_determine_default_pki_ca_file() != NULL, "Default TLS trust store not found on this system."); return AWS_OP_SUCCESS; } AWS_TEST_CASE(default_pki_path_exists, s_default_pki_path_exists_fn) # endif /* defined(USE_S2N) */ AWS_STATIC_STRING_FROM_LITERAL(s_expired_host_name, "expired.badssl.com"); static int s_tls_client_channel_negotiation_error_expired_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_verify_negotiation_fails(allocator, s_expired_host_name, 443, NULL); } AWS_TEST_CASE(tls_client_channel_negotiation_error_expired, s_tls_client_channel_negotiation_error_expired_fn) AWS_STATIC_STRING_FROM_LITERAL(s_wrong_host_name, "wrong.host.badssl.com"); static int s_tls_client_channel_negotiation_error_wrong_host_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_verify_negotiation_fails(allocator, s_wrong_host_name, 443, NULL); } AWS_TEST_CASE(tls_client_channel_negotiation_error_wrong_host, s_tls_client_channel_negotiation_error_wrong_host_fn) static int s_tls_client_channel_negotiation_error_wrong_host_with_ca_override_fn( struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_verify_negotiation_fails_with_ca_override(allocator, s_wrong_host_name, "DigiCertGlobalRootCA.crt.pem"); } AWS_TEST_CASE( tls_client_channel_negotiation_error_wrong_host_with_ca_override, s_tls_client_channel_negotiation_error_wrong_host_with_ca_override_fn) AWS_STATIC_STRING_FROM_LITERAL(s_self_signed_host_name, "self-signed.badssl.com"); static int s_tls_client_channel_negotiation_error_self_signed_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_verify_negotiation_fails(allocator, s_self_signed_host_name, 443, NULL); } AWS_TEST_CASE(tls_client_channel_negotiation_error_self_signed, s_tls_client_channel_negotiation_error_self_signed_fn) AWS_STATIC_STRING_FROM_LITERAL(s_untrusted_root_host_name, "untrusted-root.badssl.com"); static int s_tls_client_channel_negotiation_error_untrusted_root_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_verify_negotiation_fails(allocator, s_untrusted_root_host_name, 443, NULL); } AWS_TEST_CASE( tls_client_channel_negotiation_error_untrusted_root, s_tls_client_channel_negotiation_error_untrusted_root_fn); AWS_STATIC_STRING_FROM_LITERAL(s_amazon_host_name, "www.amazon.com"); /* negotiation should fail. www.amazon.com is obviously trusted by the default trust store, * but we've overridden the default trust store */ static int s_tls_client_channel_negotiation_error_untrusted_root_due_to_ca_override_fn( struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_verify_negotiation_fails_with_ca_override(allocator, s_amazon_host_name, "ca_root.crt"); } AWS_TEST_CASE( tls_client_channel_negotiation_error_untrusted_root_due_to_ca_override, s_tls_client_channel_negotiation_error_untrusted_root_due_to_ca_override_fn) AWS_STATIC_STRING_FROM_LITERAL(s_broken_crypto_rc4_host_name, "rc4.badssl.com"); static int s_tls_client_channel_negotiation_error_broken_crypto_rc4_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_verify_negotiation_fails(allocator, s_broken_crypto_rc4_host_name, 443, NULL); } AWS_TEST_CASE( tls_client_channel_negotiation_error_broken_crypto_rc4, s_tls_client_channel_negotiation_error_broken_crypto_rc4_fn) AWS_STATIC_STRING_FROM_LITERAL(s_broken_crypto_rc4_md5_host_name, "rc4-md5.badssl.com"); static int s_tls_client_channel_negotiation_error_broken_crypto_rc4_md5_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_verify_negotiation_fails(allocator, s_broken_crypto_rc4_md5_host_name, 443, NULL); } AWS_TEST_CASE( tls_client_channel_negotiation_error_broken_crypto_rc4_md5, s_tls_client_channel_negotiation_error_broken_crypto_rc4_md5_fn) AWS_STATIC_STRING_FROM_LITERAL(s_broken_crypto_dh480_host_name, "dh480.badssl.com"); static int s_tls_client_channel_negotiation_error_broken_crypto_dh480_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_verify_negotiation_fails(allocator, s_broken_crypto_dh480_host_name, 443, NULL); } AWS_TEST_CASE( tls_client_channel_negotiation_error_broken_crypto_dh480, s_tls_client_channel_negotiation_error_broken_crypto_dh480_fn) AWS_STATIC_STRING_FROM_LITERAL(s_broken_crypto_dh512_host_name, "dh512.badssl.com"); static int s_tls_client_channel_negotiation_error_broken_crypto_dh512_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_verify_negotiation_fails(allocator, s_broken_crypto_dh512_host_name, 443, NULL); } AWS_TEST_CASE( tls_client_channel_negotiation_error_broken_crypto_dh512, s_tls_client_channel_negotiation_error_broken_crypto_dh512_fn) AWS_STATIC_STRING_FROM_LITERAL(s_broken_crypto_dh1024_host_name, "dh1024.badssl.com"); static int s_tls_client_channel_negotiation_error_broken_crypto_dh1024_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_verify_negotiation_fails(allocator, s_broken_crypto_dh1024_host_name, 443, NULL); } AWS_TEST_CASE( tls_client_channel_negotiation_error_broken_crypto_dh1024, s_tls_client_channel_negotiation_error_broken_crypto_dh1024_fn) AWS_STATIC_STRING_FROM_LITERAL(s_broken_crypto_null_host_name, "null.badssl.com"); static int s_tls_client_channel_negotiation_error_broken_crypto_null_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_verify_negotiation_fails(allocator, s_broken_crypto_null_host_name, 443, NULL); } AWS_TEST_CASE( tls_client_channel_negotiation_error_broken_crypto_null, s_tls_client_channel_negotiation_error_broken_crypto_null_fn) AWS_STATIC_STRING_FROM_LITERAL(s_legacy_crypto_tls10_host_name, "tls-v1-0.badssl.com"); static void s_raise_tls_version_to_11(struct aws_tls_ctx_options *options) { aws_tls_ctx_options_set_minimum_tls_version(options, AWS_IO_TLSv1_2); } static int s_tls_client_channel_negotiation_error_legacy_crypto_tls10_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_verify_negotiation_fails(allocator, s_legacy_crypto_tls10_host_name, 1010, &s_raise_tls_version_to_11); } AWS_TEST_CASE( tls_client_channel_negotiation_error_legacy_crypto_tls10, s_tls_client_channel_negotiation_error_legacy_crypto_tls10_fn) AWS_STATIC_STRING_FROM_LITERAL(s_legacy_crypto_tls11_host_name, "tls-v1-1.badssl.com"); static void s_raise_tls_version_to_12(struct aws_tls_ctx_options *options) { aws_tls_ctx_options_set_minimum_tls_version(options, AWS_IO_TLSv1_2); } static int s_tls_client_channel_negotiation_error_override_legacy_crypto_tls11_fn( struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_verify_negotiation_fails(allocator, s_legacy_crypto_tls11_host_name, 1011, &s_raise_tls_version_to_12); } AWS_TEST_CASE( tls_client_channel_negotiation_error_override_legacy_crypto_tls11, s_tls_client_channel_negotiation_error_override_legacy_crypto_tls11_fn) AWS_STATIC_STRING_FROM_LITERAL(s_legacy_crypto_dh2048_host_name, "dh2048.badssl.com"); static int s_tls_client_channel_negotiation_error_legacy_crypto_dh2048_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_verify_negotiation_fails(allocator, s_legacy_crypto_dh2048_host_name, 443, NULL); } AWS_TEST_CASE( tls_client_channel_negotiation_error_legacy_crypto_dh2048, s_tls_client_channel_negotiation_error_legacy_crypto_dh2048_fn) AWS_STATIC_STRING_FROM_LITERAL(s_uncommon_no_subject_host_name, "no-subject.badssl.com"); static int s_tls_client_channel_negotiation_error_no_subject_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_verify_negotiation_fails(allocator, s_uncommon_no_subject_host_name, 443, NULL); } AWS_TEST_CASE(tls_client_channel_negotiation_error_no_subject, s_tls_client_channel_negotiation_error_no_subject_fn) AWS_STATIC_STRING_FROM_LITERAL(s_uncommon_no_common_name_host_name, "no-common-name.badssl.com"); static int s_tls_client_channel_negotiation_error_no_common_name_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_verify_negotiation_fails(allocator, s_uncommon_no_common_name_host_name, 443, NULL); } AWS_TEST_CASE( tls_client_channel_negotiation_error_no_common_name, s_tls_client_channel_negotiation_error_no_common_name_fn) /* Test that, if the channel shuts down unexpectedly during tls negotiation, that the user code is still notified. * We make this happen by connecting to port 80 on s3 or amazon.com and attempting TLS, * which gets you hung up on after a few seconds */ static int s_tls_client_channel_negotiation_error_socket_closed_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; const char *host_name = "aws-crt-test-stuff.s3.amazonaws.com"; uint32_t port = 80; /* Note: intentionally wrong and not 443 */ aws_io_library_init(allocator); ASSERT_SUCCESS(s_tls_common_tester_init(allocator, &c_tester)); struct tls_opt_tester client_tls_opt_tester; struct aws_byte_cursor server_name = aws_byte_cursor_from_c_str(host_name); ASSERT_SUCCESS(s_tls_client_opt_tester_init(allocator, &client_tls_opt_tester, server_name)); client_tls_opt_tester.opt.timeout_ms = 0; /* disable negotiation timeout for this test */ struct tls_test_args outgoing_args; ASSERT_SUCCESS(s_tls_test_arg_init(allocator, &outgoing_args, false, &c_tester)); struct aws_socket_options options = { .connect_timeout_ms = 10000, .type = AWS_SOCKET_STREAM, .domain = AWS_SOCKET_IPV4}; struct aws_client_bootstrap_options bootstrap_options = { .event_loop_group = c_tester.el_group, .host_resolver = c_tester.resolver, }; struct aws_client_bootstrap *client_bootstrap = aws_client_bootstrap_new(allocator, &bootstrap_options); ASSERT_NOT_NULL(client_bootstrap); struct aws_socket_channel_bootstrap_options channel_options; AWS_ZERO_STRUCT(channel_options); channel_options.bootstrap = client_bootstrap; channel_options.host_name = host_name; channel_options.port = port; channel_options.socket_options = &options; channel_options.tls_options = &client_tls_opt_tester.opt; channel_options.setup_callback = s_tls_handler_test_client_setup_callback; channel_options.shutdown_callback = s_tls_handler_test_client_shutdown_callback; channel_options.user_data = &outgoing_args; ASSERT_SUCCESS(aws_client_bootstrap_new_socket_channel(&channel_options)); /* Wait for setup to complete */ aws_mutex_lock(&c_tester.mutex); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_tls_channel_setup_predicate, &outgoing_args)); /* Assert that setup failed, and that it failed for reasons unrelated to the tls-handler. */ ASSERT_INT_EQUALS(0, outgoing_args.tls_levels_negotiated); ASSERT_TRUE(outgoing_args.error_invoked); ASSERT_INT_EQUALS(AWS_IO_SOCKET_CLOSED, outgoing_args.last_error_code); aws_mutex_unlock(&c_tester.mutex); /* Clean up */ aws_client_bootstrap_release(client_bootstrap); s_tls_opt_tester_clean_up(&client_tls_opt_tester); ASSERT_SUCCESS(s_tls_common_tester_clean_up(&c_tester)); return AWS_OP_SUCCESS; } AWS_TEST_CASE( tls_client_channel_negotiation_error_socket_closed, s_tls_client_channel_negotiation_error_socket_closed_fn); static int s_verify_good_host( struct aws_allocator *allocator, const struct aws_string *host_name, uint32_t port, void (*override_tls_options_fn)(struct aws_tls_ctx_options *)) { aws_io_library_init(allocator); ASSERT_SUCCESS(s_tls_common_tester_init(allocator, &c_tester)); struct tls_test_args outgoing_args = { .mutex = &c_tester.mutex, .allocator = allocator, .condition_variable = &c_tester.condition_variable, .error_invoked = 0, .rw_handler = NULL, .server = false, .tls_levels_negotiated = 0, .desired_tls_levels = 1, .shutdown_finished = false, }; struct aws_tls_ctx_options client_ctx_options; AWS_ZERO_STRUCT(client_ctx_options); aws_tls_ctx_options_set_verify_peer(&client_ctx_options, true); aws_tls_ctx_options_init_default_client(&client_ctx_options, allocator); aws_tls_ctx_options_set_alpn_list(&client_ctx_options, "http/1.1"); if (override_tls_options_fn) { (*override_tls_options_fn)(&client_ctx_options); } struct aws_tls_ctx *client_ctx = aws_tls_client_ctx_new(allocator, &client_ctx_options); ASSERT_NOT_NULL(client_ctx); struct aws_tls_connection_options tls_client_conn_options; aws_tls_connection_options_init_from_ctx(&tls_client_conn_options, client_ctx); aws_tls_connection_options_set_callbacks(&tls_client_conn_options, s_tls_on_negotiated, NULL, NULL, &outgoing_args); struct aws_byte_cursor host_name_cur = aws_byte_cursor_from_string(host_name); aws_tls_connection_options_set_server_name(&tls_client_conn_options, allocator, &host_name_cur); aws_tls_connection_options_set_alpn_list(&tls_client_conn_options, allocator, "http/1.1"); struct aws_socket_options options; AWS_ZERO_STRUCT(options); options.connect_timeout_ms = 10000; options.type = AWS_SOCKET_STREAM; options.domain = AWS_SOCKET_IPV4; struct aws_client_bootstrap_options bootstrap_options = { .event_loop_group = c_tester.el_group, .host_resolver = c_tester.resolver, }; struct aws_client_bootstrap *client_bootstrap = aws_client_bootstrap_new(allocator, &bootstrap_options); ASSERT_NOT_NULL(client_bootstrap); struct aws_socket_channel_bootstrap_options channel_options; AWS_ZERO_STRUCT(channel_options); channel_options.bootstrap = client_bootstrap; channel_options.host_name = aws_string_c_str(host_name); channel_options.port = port; channel_options.socket_options = &options; channel_options.tls_options = &tls_client_conn_options; channel_options.setup_callback = s_tls_handler_test_client_setup_callback; channel_options.shutdown_callback = s_tls_handler_test_client_shutdown_callback; channel_options.user_data = &outgoing_args; ASSERT_SUCCESS(aws_client_bootstrap_new_socket_channel(&channel_options)); /* put this here to verify ownership semantics are correct. This should NOT cause a segfault. If it does, ya * done messed up. */ aws_tls_connection_options_clean_up(&tls_client_conn_options); ASSERT_SUCCESS(aws_mutex_lock(&c_tester.mutex)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_tls_channel_setup_predicate, &outgoing_args)); ASSERT_SUCCESS(aws_mutex_unlock(&c_tester.mutex)); ASSERT_FALSE(outgoing_args.error_invoked); struct aws_byte_buf expected_protocol = aws_byte_buf_from_c_str("http/1.1"); /* check ALPN and SNI was properly negotiated */ if (aws_tls_is_alpn_available() && client_ctx_options.verify_peer) { ASSERT_BIN_ARRAYS_EQUALS( expected_protocol.buffer, expected_protocol.len, outgoing_args.negotiated_protocol.buffer, outgoing_args.negotiated_protocol.len); } ASSERT_BIN_ARRAYS_EQUALS( host_name->bytes, host_name->len, outgoing_args.server_name.buffer, outgoing_args.server_name.len); ASSERT_SUCCESS(aws_mutex_lock(&c_tester.mutex)); aws_channel_shutdown(outgoing_args.channel, AWS_OP_SUCCESS); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_tls_channel_shutdown_predicate, &outgoing_args)); ASSERT_SUCCESS(aws_mutex_unlock(&c_tester.mutex)); aws_client_bootstrap_release(client_bootstrap); aws_tls_ctx_release(client_ctx); aws_tls_ctx_options_clean_up(&client_ctx_options); ASSERT_SUCCESS(s_tls_common_tester_clean_up(&c_tester)); return AWS_OP_SUCCESS; } static int s_tls_client_channel_negotiation_success_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_verify_good_host(allocator, s_amazon_host_name, 443, NULL); } AWS_TEST_CASE(tls_client_channel_negotiation_success, s_tls_client_channel_negotiation_success_fn) AWS_STATIC_STRING_FROM_LITERAL(s_badssl_ecc256_host_name, "ecc256.badssl.com"); static int s_tls_client_channel_negotiation_success_ecc256_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_verify_good_host(allocator, s_badssl_ecc256_host_name, 443, NULL); } AWS_TEST_CASE(tls_client_channel_negotiation_success_ecc256, s_tls_client_channel_negotiation_success_ecc256_fn) AWS_STATIC_STRING_FROM_LITERAL(s_badssl_ecc384_host_name, "ecc384.badssl.com"); static int s_tls_client_channel_negotiation_success_ecc384_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_verify_good_host(allocator, s_badssl_ecc384_host_name, 443, NULL); } AWS_TEST_CASE(tls_client_channel_negotiation_success_ecc384, s_tls_client_channel_negotiation_success_ecc384_fn) AWS_STATIC_STRING_FROM_LITERAL(s3_host_name, "s3.amazonaws.com"); static void s_disable_verify_peer(struct aws_tls_ctx_options *options) { aws_tls_ctx_options_set_verify_peer(options, false); } /* prove that connections complete even when verify_peer is false */ static int s_tls_client_channel_no_verify_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_verify_good_host(allocator, s3_host_name, 443, &s_disable_verify_peer); } AWS_TEST_CASE(tls_client_channel_no_verify, s_tls_client_channel_no_verify_fn) /* Check all of the bad tls cases with verify_peer off. Now they should succeed. */ static int s_tls_client_channel_negotiation_no_verify_expired_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_verify_good_host(allocator, s_expired_host_name, 443, &s_disable_verify_peer); } AWS_TEST_CASE(tls_client_channel_negotiation_no_verify_expired, s_tls_client_channel_negotiation_no_verify_expired_fn) static int s_tls_client_channel_negotiation_no_verify_wrong_host_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_verify_good_host(allocator, s_wrong_host_name, 443, &s_disable_verify_peer); } AWS_TEST_CASE( tls_client_channel_negotiation_no_verify_wrong_host, s_tls_client_channel_negotiation_no_verify_wrong_host_fn) static int s_tls_client_channel_negotiation_no_verify_self_signed_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_verify_good_host(allocator, s_self_signed_host_name, 443, &s_disable_verify_peer); } AWS_TEST_CASE( tls_client_channel_negotiation_no_verify_self_signed, s_tls_client_channel_negotiation_no_verify_self_signed_fn) static int s_tls_client_channel_negotiation_no_verify_untrusted_root_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_verify_good_host(allocator, s_untrusted_root_host_name, 443, &s_disable_verify_peer); } AWS_TEST_CASE( tls_client_channel_negotiation_no_verify_untrusted_root, s_tls_client_channel_negotiation_no_verify_untrusted_root_fn) static void s_lower_tls_version(struct aws_tls_ctx_options *options) { aws_tls_ctx_options_set_minimum_tls_version(options, AWS_IO_TLSv1); } static int s_tls_client_channel_negotiation_override_legacy_crypto_tls10_fn( struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_verify_good_host(allocator, s_legacy_crypto_tls10_host_name, 1010, &s_lower_tls_version); } AWS_TEST_CASE( tls_client_channel_negotiation_override_legacy_crypto_tls10, s_tls_client_channel_negotiation_override_legacy_crypto_tls10_fn) static int s_tls_client_channel_negotiation_success_legacy_crypto_tls11_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_verify_good_host(allocator, s_legacy_crypto_tls11_host_name, 1011, NULL); } AWS_TEST_CASE( tls_client_channel_negotiation_success_legacy_crypto_tls11, s_tls_client_channel_negotiation_success_legacy_crypto_tls11_fn) AWS_STATIC_STRING_FROM_LITERAL(s_uncommon_sha384_host_name, "sha384.badssl.com"); static int s_tls_client_channel_negotiation_success_sha384_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_verify_good_host(allocator, s_uncommon_sha384_host_name, 443, NULL); } AWS_TEST_CASE(tls_client_channel_negotiation_success_sha384, s_tls_client_channel_negotiation_success_sha384_fn) AWS_STATIC_STRING_FROM_LITERAL(s_uncommon_sha512_host_name, "sha512.badssl.com"); static int s_tls_client_channel_negotiation_success_sha512_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_verify_good_host(allocator, s_uncommon_sha512_host_name, 443, NULL); } AWS_TEST_CASE(tls_client_channel_negotiation_success_sha512, s_tls_client_channel_negotiation_success_sha512_fn) AWS_STATIC_STRING_FROM_LITERAL(s_uncommon_rsa8192_host_name, "rsa8192.badssl.com"); static int s_tls_client_channel_negotiation_success_rsa8192_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_verify_good_host(allocator, s_uncommon_rsa8192_host_name, 443, NULL); } AWS_TEST_CASE(tls_client_channel_negotiation_success_rsa8192, s_tls_client_channel_negotiation_success_rsa8192_fn) AWS_STATIC_STRING_FROM_LITERAL(s_uncommon_incomplete_chain_host_name, "incomplete-chain.badssl.com"); static int s_tls_client_channel_negotiation_success_no_verify_incomplete_chain_fn( struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_verify_good_host(allocator, s_uncommon_incomplete_chain_host_name, 443, s_disable_verify_peer); } AWS_TEST_CASE( tls_client_channel_negotiation_success_no_verify_incomplete_chain, s_tls_client_channel_negotiation_success_no_verify_incomplete_chain_fn) static int s_tls_client_channel_negotiation_success_no_verify_no_subject_fn( struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_verify_good_host(allocator, s_uncommon_no_subject_host_name, 443, s_disable_verify_peer); } AWS_TEST_CASE( tls_client_channel_negotiation_success_no_verify_no_subject, s_tls_client_channel_negotiation_success_no_verify_no_subject_fn) static int s_tls_client_channel_negotiation_success_no_verify_no_common_name_fn( struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_verify_good_host(allocator, s_uncommon_no_common_name_host_name, 443, s_disable_verify_peer); } AWS_TEST_CASE( tls_client_channel_negotiation_success_no_verify_no_common_name, s_tls_client_channel_negotiation_success_no_verify_no_common_name_fn) AWS_STATIC_STRING_FROM_LITERAL(s_common_tls12_host_name, "tls-v1-2.badssl.com"); static int s_tls_client_channel_negotiation_success_tls12_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_verify_good_host(allocator, s_common_tls12_host_name, 1012, NULL); } AWS_TEST_CASE(tls_client_channel_negotiation_success_tls12, s_tls_client_channel_negotiation_success_tls12_fn) AWS_STATIC_STRING_FROM_LITERAL(s_common_sha256_host_name, "sha256.badssl.com"); static int s_tls_client_channel_negotiation_success_sha256_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_verify_good_host(allocator, s_common_sha256_host_name, 443, NULL); } AWS_TEST_CASE(tls_client_channel_negotiation_success_sha256, s_tls_client_channel_negotiation_success_sha256_fn) AWS_STATIC_STRING_FROM_LITERAL(s_common_rsa2048_host_name, "rsa2048.badssl.com"); static int s_tls_client_channel_negotiation_success_rsa2048_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_verify_good_host(allocator, s_common_rsa2048_host_name, 443, NULL); } AWS_TEST_CASE(tls_client_channel_negotiation_success_rsa2048, s_tls_client_channel_negotiation_success_rsa2048_fn) AWS_STATIC_STRING_FROM_LITERAL(s_common_extended_validation_host_name, "extended-validation.badssl.com"); static int s_tls_client_channel_negotiation_success_extended_validation_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_verify_good_host(allocator, s_common_extended_validation_host_name, 443, NULL); } AWS_TEST_CASE( tls_client_channel_negotiation_success_extended_validation, s_tls_client_channel_negotiation_success_extended_validation_fn) AWS_STATIC_STRING_FROM_LITERAL(s_common_mozilla_modern_host_name, "mozilla-modern.badssl.com"); static int s_tls_client_channel_negotiation_success_mozilla_modern_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_verify_good_host(allocator, s_common_mozilla_modern_host_name, 443, NULL); } AWS_TEST_CASE( tls_client_channel_negotiation_success_mozilla_modern, s_tls_client_channel_negotiation_success_mozilla_modern_fn) static void s_reset_arg_state(struct tls_test_args *setup_test_args) { setup_test_args->tls_levels_negotiated = 0; setup_test_args->shutdown_finished = false; setup_test_args->creation_callback_invoked = false; setup_test_args->setup_callback_invoked = false; } static int s_tls_server_multiple_connections_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_io_library_init(allocator); ASSERT_SUCCESS(s_tls_common_tester_init(allocator, &c_tester)); struct tls_test_args outgoing_args; ASSERT_SUCCESS(s_tls_test_arg_init(allocator, &outgoing_args, false, &c_tester)); struct tls_test_args incoming_args; ASSERT_SUCCESS(s_tls_test_arg_init(allocator, &incoming_args, true, &c_tester)); struct tls_local_server_tester local_server_tester; ASSERT_SUCCESS(s_tls_local_server_tester_init( allocator, &local_server_tester, &incoming_args, &c_tester, false, "server.crt", "server.key")); struct tls_opt_tester client_tls_opt_tester; struct aws_byte_cursor server_name = aws_byte_cursor_from_c_str("localhost"); ASSERT_SUCCESS(s_tls_client_opt_tester_init(allocator, &client_tls_opt_tester, server_name)); aws_tls_connection_options_set_callbacks( &client_tls_opt_tester.opt, s_tls_on_negotiated, NULL, NULL, &outgoing_args); struct aws_client_bootstrap_options bootstrap_options = { .event_loop_group = c_tester.el_group, .host_resolver = c_tester.resolver, }; struct aws_client_bootstrap *client_bootstrap = aws_client_bootstrap_new(allocator, &bootstrap_options); struct aws_socket_channel_bootstrap_options channel_options; AWS_ZERO_STRUCT(channel_options); channel_options.bootstrap = client_bootstrap; channel_options.host_name = local_server_tester.endpoint.address; channel_options.port = 0; channel_options.socket_options = &local_server_tester.socket_options; channel_options.tls_options = &client_tls_opt_tester.opt; channel_options.setup_callback = s_tls_handler_test_client_setup_callback; channel_options.shutdown_callback = s_tls_handler_test_client_shutdown_callback; channel_options.user_data = &outgoing_args; ASSERT_SUCCESS(aws_client_bootstrap_new_socket_channel(&channel_options)); /* wait for both ends to setup */ ASSERT_SUCCESS(aws_mutex_lock(&c_tester.mutex)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_tls_channel_setup_predicate, &incoming_args)); ASSERT_SUCCESS(aws_mutex_unlock(&c_tester.mutex)); ASSERT_FALSE(incoming_args.error_invoked); /* shut down */ aws_channel_shutdown(incoming_args.channel, AWS_OP_SUCCESS); ASSERT_SUCCESS(aws_mutex_lock(&c_tester.mutex)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_tls_channel_shutdown_predicate, &incoming_args)); ASSERT_SUCCESS(aws_mutex_unlock(&c_tester.mutex)); /* no shutdown on the client necessary here (it should have been triggered by shutting down the other side). just * wait for the event to fire. */ ASSERT_SUCCESS(aws_mutex_lock(&c_tester.mutex)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_tls_channel_shutdown_predicate, &outgoing_args)); ASSERT_SUCCESS(aws_mutex_unlock(&c_tester.mutex)); /* connect again! */ s_reset_arg_state(&outgoing_args); s_reset_arg_state(&incoming_args); ASSERT_SUCCESS(aws_client_bootstrap_new_socket_channel(&channel_options)); /* wait for both ends to setup */ ASSERT_SUCCESS(aws_mutex_lock(&c_tester.mutex)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_tls_channel_setup_predicate, &incoming_args)); ASSERT_SUCCESS(aws_mutex_unlock(&c_tester.mutex)); ASSERT_FALSE(incoming_args.error_invoked); /* shut down */ aws_channel_shutdown(incoming_args.channel, AWS_OP_SUCCESS); ASSERT_SUCCESS(aws_mutex_lock(&c_tester.mutex)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_tls_channel_shutdown_predicate, &incoming_args)); ASSERT_SUCCESS(aws_mutex_unlock(&c_tester.mutex)); /*no shutdown on the client necessary here (it should have been triggered by shutting down the other side). just * wait for the event to fire. */ ASSERT_SUCCESS(aws_mutex_lock(&c_tester.mutex)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_tls_channel_shutdown_predicate, &outgoing_args)); ASSERT_SUCCESS(aws_mutex_unlock(&c_tester.mutex)); aws_server_bootstrap_destroy_socket_listener(local_server_tester.server_bootstrap, local_server_tester.listener); ASSERT_SUCCESS(aws_mutex_lock(&c_tester.mutex)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_tls_listener_destroy_predicate, &incoming_args)); aws_mutex_unlock(&c_tester.mutex); /* clean up */ ASSERT_SUCCESS(s_tls_opt_tester_clean_up(&client_tls_opt_tester)); aws_client_bootstrap_release(client_bootstrap); ASSERT_SUCCESS(s_tls_local_server_tester_clean_up(&local_server_tester)); ASSERT_SUCCESS(s_tls_common_tester_clean_up(&c_tester)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(tls_server_multiple_connections, s_tls_server_multiple_connections_fn) struct shutdown_listener_tester { struct aws_socket *listener; struct aws_server_bootstrap *server_bootstrap; struct tls_test_args *outgoing_args; /* client args */ struct aws_socket client_socket; }; static bool s_client_socket_closed_predicate(void *user_data) { struct tls_test_args *args = user_data; return args->shutdown_finished; } static void s_close_client_socket_task(struct aws_task *task, void *arg, enum aws_task_status status) { (void)status; struct shutdown_listener_tester *tester = arg; /* Free task memory */ aws_mem_release(tester->outgoing_args->allocator, task); /* Close socket and notify */ AWS_FATAL_ASSERT(aws_socket_close(&tester->client_socket) == AWS_OP_SUCCESS); AWS_FATAL_ASSERT(aws_mutex_lock(tester->outgoing_args->mutex) == AWS_OP_SUCCESS); tester->outgoing_args->shutdown_finished = true; AWS_FATAL_ASSERT(aws_mutex_unlock(tester->outgoing_args->mutex) == AWS_OP_SUCCESS); AWS_FATAL_ASSERT(aws_condition_variable_notify_one(tester->outgoing_args->condition_variable) == AWS_OP_SUCCESS); } static void s_on_client_connected_do_hangup(struct aws_socket *socket, int error_code, void *user_data) { AWS_FATAL_ASSERT(error_code == 0); struct shutdown_listener_tester *tester = user_data; tester->client_socket = *socket; /* wait 1 sec so server side has time to setup the channel, then close the socket */ uint64_t run_at_ns; aws_event_loop_current_clock_time(socket->event_loop, &run_at_ns); run_at_ns += aws_timestamp_convert(1, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL); struct aws_task *close_client_socket_task = aws_mem_acquire(tester->outgoing_args->allocator, sizeof(struct aws_task)); aws_task_init(close_client_socket_task, s_close_client_socket_task, tester, "wait_close_client_socket"); aws_event_loop_schedule_task_future(socket->event_loop, close_client_socket_task, run_at_ns); } /* Test that server can handle a hangup in the middle of TLS negotiation */ static int s_tls_server_hangup_during_negotiation_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_io_library_init(allocator); ASSERT_SUCCESS(s_tls_common_tester_init(allocator, &c_tester)); struct tls_test_args outgoing_args; ASSERT_SUCCESS(s_tls_test_arg_init(allocator, &outgoing_args, false, &c_tester)); struct tls_test_args incoming_args; ASSERT_SUCCESS(s_tls_test_arg_init(allocator, &incoming_args, true, &c_tester)); struct tls_local_server_tester local_server_tester; ASSERT_SUCCESS(s_tls_local_server_tester_init( allocator, &local_server_tester, &incoming_args, &c_tester, false, "server.crt", "server.key")); ASSERT_SUCCESS(aws_mutex_lock(&c_tester.mutex)); struct shutdown_listener_tester *shutdown_tester = aws_mem_acquire(allocator, sizeof(struct shutdown_listener_tester)); shutdown_tester->server_bootstrap = local_server_tester.server_bootstrap; shutdown_tester->listener = local_server_tester.listener; shutdown_tester->outgoing_args = &outgoing_args; /* Use a raw aws_socket for the client, instead of a full-blown TLS channel. * This lets us hang up on the server, instead of automatically going through with proper TLS negotiation */ ASSERT_SUCCESS(aws_socket_init(&shutdown_tester->client_socket, allocator, &local_server_tester.socket_options)); /* Upon connecting, immediately close the socket */ ASSERT_SUCCESS(aws_socket_connect( &shutdown_tester->client_socket, &local_server_tester.endpoint, aws_event_loop_group_get_next_loop(c_tester.el_group), s_on_client_connected_do_hangup, shutdown_tester)); /* Wait for client socket to close */ ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_client_socket_closed_predicate, &outgoing_args)); /* Destroy listener socket and wait for shutdown to complete */ aws_server_bootstrap_destroy_socket_listener(shutdown_tester->server_bootstrap, shutdown_tester->listener); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_tls_listener_destroy_predicate, &incoming_args)); ASSERT_SUCCESS(aws_mutex_unlock(&c_tester.mutex)); /* clean up */ aws_socket_clean_up(&shutdown_tester->client_socket); aws_mem_release(allocator, shutdown_tester); /* cannot double free the listener */ ASSERT_SUCCESS(s_tls_opt_tester_clean_up(&local_server_tester.server_tls_opt_tester)); aws_server_bootstrap_release(local_server_tester.server_bootstrap); ASSERT_SUCCESS(s_tls_common_tester_clean_up(&c_tester)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(tls_server_hangup_during_negotiation, s_tls_server_hangup_during_negotiation_fn) static void s_creation_callback_test_channel_creation_callback( struct aws_client_bootstrap *bootstrap, int error_code, struct aws_channel *channel, void *user_data) { (void)bootstrap; (void)error_code; struct tls_test_args *setup_test_args = (struct tls_test_args *)user_data; setup_test_args->creation_callback_invoked = true; setup_test_args->channel = channel; struct aws_crt_statistics_handler *stats_handler = aws_statistics_handler_new_test(bootstrap->allocator); aws_atomic_store_ptr(&c_tester.stats_handler, stats_handler); aws_channel_set_statistics_handler(channel, stats_handler); } static struct aws_event_loop *s_default_new_event_loop( struct aws_allocator *allocator, const struct aws_event_loop_options *options, void *user_data) { (void)user_data; return aws_event_loop_new_default_with_options(allocator, options); } static int s_statistic_test_clock_fn(uint64_t *timestamp) { *timestamp = aws_atomic_load_int(&c_tester.current_time_ns); return AWS_OP_SUCCESS; } static int s_tls_common_tester_statistics_init(struct aws_allocator *allocator, struct tls_common_tester *tester) { aws_io_library_init(allocator); AWS_ZERO_STRUCT(*tester); struct aws_mutex mutex = AWS_MUTEX_INIT; struct aws_condition_variable condition_variable = AWS_CONDITION_VARIABLE_INIT; tester->mutex = mutex; tester->condition_variable = condition_variable; aws_atomic_store_int(&tester->current_time_ns, 0); aws_atomic_store_ptr(&tester->stats_handler, NULL); tester->el_group = aws_event_loop_group_new(allocator, s_statistic_test_clock_fn, 1, s_default_new_event_loop, NULL, NULL); struct aws_host_resolver_default_options resolver_options = { .el_group = tester->el_group, .max_entries = 1, }; tester->resolver = aws_host_resolver_new_default(allocator, &resolver_options); return AWS_OP_SUCCESS; } static bool s_stats_processed_predicate(void *user_data) { struct aws_crt_statistics_handler *stats_handler = user_data; struct aws_statistics_handler_test_impl *stats_impl = stats_handler->impl; return stats_impl->total_bytes_read > 0 && stats_impl->total_bytes_written > 0 && stats_impl->tls_status != AWS_TLS_NEGOTIATION_STATUS_NONE; } static int s_tls_channel_statistics_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_io_library_init(allocator); ASSERT_SUCCESS(s_tls_common_tester_statistics_init(allocator, &c_tester)); struct aws_byte_buf read_tag = aws_byte_buf_from_c_str("This is some data."); struct aws_byte_buf write_tag = aws_byte_buf_from_c_str("Created from a blend of heirloom and cider apples"); uint8_t incoming_received_message[128] = {0}; uint8_t outgoing_received_message[128] = {0}; struct tls_test_rw_args incoming_rw_args; ASSERT_SUCCESS(s_tls_rw_args_init( &incoming_rw_args, &c_tester, aws_byte_buf_from_empty_array(incoming_received_message, sizeof(incoming_received_message)))); struct tls_test_rw_args outgoing_rw_args; ASSERT_SUCCESS(s_tls_rw_args_init( &outgoing_rw_args, &c_tester, aws_byte_buf_from_empty_array(outgoing_received_message, sizeof(outgoing_received_message)))); struct tls_test_args outgoing_args; ASSERT_SUCCESS(s_tls_test_arg_init(allocator, &outgoing_args, false, &c_tester)); struct tls_test_args incoming_args; ASSERT_SUCCESS(s_tls_test_arg_init(allocator, &incoming_args, true, &c_tester)); struct tls_local_server_tester local_server_tester; ASSERT_SUCCESS(s_tls_local_server_tester_init( allocator, &local_server_tester, &incoming_args, &c_tester, false, "server.crt", "server.key")); struct aws_channel_handler *outgoing_rw_handler = rw_handler_new(allocator, s_tls_test_handle_read, s_tls_test_handle_write, true, 10000, &outgoing_rw_args); ASSERT_NOT_NULL(outgoing_rw_handler); struct aws_channel_handler *incoming_rw_handler = rw_handler_new(allocator, s_tls_test_handle_read, s_tls_test_handle_write, true, 10000, &incoming_rw_args); ASSERT_NOT_NULL(incoming_rw_handler); incoming_args.rw_handler = incoming_rw_handler; outgoing_args.rw_handler = outgoing_rw_handler; struct tls_opt_tester client_tls_opt_tester; struct aws_byte_cursor server_name = aws_byte_cursor_from_c_str("localhost"); ASSERT_SUCCESS(s_tls_client_opt_tester_init(allocator, &client_tls_opt_tester, server_name)); aws_tls_connection_options_set_callbacks( &client_tls_opt_tester.opt, s_tls_on_negotiated, NULL, NULL, &outgoing_args); struct aws_client_bootstrap_options bootstrap_options; AWS_ZERO_STRUCT(bootstrap_options); bootstrap_options.event_loop_group = c_tester.el_group; bootstrap_options.host_resolver = c_tester.resolver; struct aws_client_bootstrap *client_bootstrap = aws_client_bootstrap_new(allocator, &bootstrap_options); ASSERT_SUCCESS(aws_mutex_lock(&c_tester.mutex)); struct aws_socket_channel_bootstrap_options channel_options; AWS_ZERO_STRUCT(channel_options); channel_options.bootstrap = client_bootstrap; channel_options.host_name = local_server_tester.endpoint.address; channel_options.port = 0; channel_options.socket_options = &local_server_tester.socket_options; channel_options.tls_options = &client_tls_opt_tester.opt; channel_options.creation_callback = s_creation_callback_test_channel_creation_callback; channel_options.setup_callback = s_tls_handler_test_client_setup_callback; channel_options.shutdown_callback = s_tls_handler_test_client_shutdown_callback; channel_options.user_data = &outgoing_args; ASSERT_SUCCESS(aws_client_bootstrap_new_socket_channel(&channel_options)); /* put this here to verify ownership semantics are correct. This should NOT cause a segfault. If it does, ya * done messed up. */ aws_tls_connection_options_clean_up(&client_tls_opt_tester.opt); /* wait for both ends to setup */ ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_tls_channel_setup_predicate, &incoming_args)); ASSERT_FALSE(incoming_args.error_invoked); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_tls_channel_setup_predicate, &outgoing_args)); ASSERT_FALSE(outgoing_args.error_invoked); ASSERT_TRUE(outgoing_args.creation_callback_invoked); /* Do the IO operations */ rw_handler_write(outgoing_args.rw_handler, outgoing_args.rw_slot, &write_tag); rw_handler_write(incoming_args.rw_handler, incoming_args.rw_slot, &read_tag); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_tls_test_read_predicate, &incoming_rw_args)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_tls_test_read_predicate, &outgoing_rw_args)); uint64_t ms_to_ns = aws_timestamp_convert(1, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL); aws_atomic_store_int(&c_tester.current_time_ns, (size_t)ms_to_ns); struct aws_crt_statistics_handler *stats_handler = aws_atomic_load_ptr(&c_tester.stats_handler); struct aws_statistics_handler_test_impl *stats_impl = stats_handler->impl; aws_mutex_lock(&stats_impl->lock); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &stats_impl->signal, &stats_impl->lock, s_stats_processed_predicate, stats_handler)); ASSERT_TRUE(stats_impl->total_bytes_read >= read_tag.len); ASSERT_TRUE(stats_impl->total_bytes_written >= write_tag.len); ASSERT_TRUE(stats_impl->tls_status == AWS_TLS_NEGOTIATION_STATUS_SUCCESS); aws_mutex_unlock(&stats_impl->lock); aws_channel_shutdown(incoming_args.channel, AWS_OP_SUCCESS); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_tls_channel_shutdown_predicate, &incoming_args)); /*no shutdown on the client necessary here (it should have been triggered by shutting down the other side). just * wait for the event to fire. */ ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_tls_channel_shutdown_predicate, &outgoing_args)); aws_server_bootstrap_destroy_socket_listener(local_server_tester.server_bootstrap, local_server_tester.listener); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_tls_listener_destroy_predicate, &incoming_args)); aws_mutex_unlock(&c_tester.mutex); /* clean up */ ASSERT_SUCCESS(s_tls_opt_tester_clean_up(&client_tls_opt_tester)); ASSERT_SUCCESS(s_tls_local_server_tester_clean_up(&local_server_tester)); aws_client_bootstrap_release(client_bootstrap); ASSERT_SUCCESS(s_tls_common_tester_clean_up(&c_tester)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(tls_channel_statistics_test, s_tls_channel_statistics_test) static int s_tls_certificate_chain_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_io_library_init(allocator); ASSERT_SUCCESS(s_tls_common_tester_init(allocator, &c_tester)); struct tls_test_args outgoing_args; ASSERT_SUCCESS(s_tls_test_arg_init(allocator, &outgoing_args, false, &c_tester)); struct tls_test_args incoming_args; ASSERT_SUCCESS(s_tls_test_arg_init(allocator, &incoming_args, true, &c_tester)); struct tls_local_server_tester local_server_tester; ASSERT_SUCCESS(s_tls_local_server_tester_init( allocator, &local_server_tester, &incoming_args, &c_tester, false, "server_chain.crt", "server.key")); struct tls_opt_tester client_tls_opt_tester; struct aws_byte_cursor server_name = aws_byte_cursor_from_c_str("localhost"); ASSERT_SUCCESS(s_tls_client_opt_tester_init(allocator, &client_tls_opt_tester, server_name)); aws_tls_connection_options_set_callbacks( &client_tls_opt_tester.opt, s_tls_on_negotiated, NULL, NULL, &outgoing_args); struct aws_client_bootstrap_options bootstrap_options = { .event_loop_group = c_tester.el_group, .host_resolver = c_tester.resolver, }; struct aws_client_bootstrap *client_bootstrap = aws_client_bootstrap_new(allocator, &bootstrap_options); struct aws_socket_channel_bootstrap_options channel_options; AWS_ZERO_STRUCT(channel_options); channel_options.bootstrap = client_bootstrap; channel_options.host_name = local_server_tester.endpoint.address; channel_options.port = 0; channel_options.socket_options = &local_server_tester.socket_options; channel_options.tls_options = &client_tls_opt_tester.opt; channel_options.setup_callback = s_tls_handler_test_client_setup_callback; channel_options.shutdown_callback = s_tls_handler_test_client_shutdown_callback; channel_options.user_data = &outgoing_args; /* connect! */ ASSERT_SUCCESS(aws_client_bootstrap_new_socket_channel(&channel_options)); /* wait for both ends to setup */ ASSERT_SUCCESS(aws_mutex_lock(&c_tester.mutex)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_tls_channel_setup_predicate, &incoming_args)); ASSERT_SUCCESS(aws_mutex_unlock(&c_tester.mutex)); ASSERT_FALSE(incoming_args.error_invoked); /* shut down */ aws_channel_shutdown(incoming_args.channel, AWS_OP_SUCCESS); ASSERT_SUCCESS(aws_mutex_lock(&c_tester.mutex)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_tls_channel_shutdown_predicate, &incoming_args)); ASSERT_SUCCESS(aws_mutex_unlock(&c_tester.mutex)); /* no shutdown on the client necessary here (it should have been triggered by shutting down the other side). just * wait for the event to fire. */ ASSERT_SUCCESS(aws_mutex_lock(&c_tester.mutex)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_tls_channel_shutdown_predicate, &outgoing_args)); ASSERT_SUCCESS(aws_mutex_unlock(&c_tester.mutex)); /* clean up */ aws_server_bootstrap_destroy_socket_listener(local_server_tester.server_bootstrap, local_server_tester.listener); ASSERT_SUCCESS(aws_mutex_lock(&c_tester.mutex)); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &c_tester.condition_variable, &c_tester.mutex, s_tls_listener_destroy_predicate, &incoming_args)); aws_mutex_unlock(&c_tester.mutex); ASSERT_SUCCESS(s_tls_opt_tester_clean_up(&client_tls_opt_tester)); aws_client_bootstrap_release(client_bootstrap); ASSERT_SUCCESS(s_tls_local_server_tester_clean_up(&local_server_tester)); ASSERT_SUCCESS(s_tls_common_tester_clean_up(&c_tester)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(tls_certificate_chain_test, s_tls_certificate_chain_test) /////////////////////////////////////////////////////////////// struct channel_stat_test_context { struct aws_allocator *allocator; struct tls_opt_tester *tls_tester; struct aws_mutex lock; struct aws_condition_variable signal; bool setup_completed; bool shutdown_completed; int error_code; }; static void s_channel_setup_stat_test_context_init( struct channel_stat_test_context *context, struct aws_allocator *allocator, struct tls_opt_tester *tls_tester) { AWS_ZERO_STRUCT(*context); aws_mutex_init(&context->lock); aws_condition_variable_init(&context->signal); context->allocator = allocator; context->tls_tester = tls_tester; } static void s_channel_setup_stat_test_context_clean_up(struct channel_stat_test_context *context) { aws_mutex_clean_up(&context->lock); aws_condition_variable_clean_up(&context->signal); } static int s_dummy_process_message( struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_io_message *message) { (void)handler; (void)slot; aws_mem_release(message->allocator, message); return AWS_OP_SUCCESS; } static int s_dummy_increment_read_window( struct aws_channel_handler *handler, struct aws_channel_slot *slot, size_t size) { (void)handler; (void)slot; (void)size; return AWS_OP_SUCCESS; } static int s_dummy_shutdown( struct aws_channel_handler *handler, struct aws_channel_slot *slot, enum aws_channel_direction dir, int error_code, bool free_scarce_resources_immediately) { (void)handler; return aws_channel_slot_on_handler_shutdown_complete(slot, dir, error_code, free_scarce_resources_immediately); } static size_t s_dummy_initial_window_size(struct aws_channel_handler *handler) { (void)handler; return 10000; } static size_t s_dummy_message_overhead(struct aws_channel_handler *handler) { (void)handler; return 0; } static void s_dummy_destroy(struct aws_channel_handler *handler) { aws_mem_release(handler->alloc, handler); } static struct aws_channel_handler_vtable s_dummy_handler_vtable = { .process_read_message = s_dummy_process_message, .process_write_message = s_dummy_process_message, .increment_read_window = s_dummy_increment_read_window, .shutdown = s_dummy_shutdown, .initial_window_size = s_dummy_initial_window_size, .message_overhead = s_dummy_message_overhead, .destroy = s_dummy_destroy, }; static struct aws_channel_handler *aws_channel_handler_new_dummy(struct aws_allocator *allocator) { struct aws_channel_handler *handler = aws_mem_acquire(allocator, sizeof(struct aws_channel_handler)); handler->alloc = allocator; handler->vtable = &s_dummy_handler_vtable; handler->impl = NULL; return handler; } static bool s_setup_completed_predicate(void *arg) { struct channel_stat_test_context *context = (struct channel_stat_test_context *)arg; return context->setup_completed; } static bool s_shutdown_completed_predicate(void *arg) { struct channel_stat_test_context *context = (struct channel_stat_test_context *)arg; return context->shutdown_completed; } static void s_on_shutdown_completed(struct aws_channel *channel, int error_code, void *user_data) { (void)channel; struct channel_stat_test_context *context = (struct channel_stat_test_context *)user_data; aws_mutex_lock(&context->lock); context->shutdown_completed = true; context->error_code = error_code; aws_mutex_unlock(&context->lock); aws_condition_variable_notify_one(&context->signal); } static const int s_tls_timeout_ms = 1000; static void s_on_setup_completed(struct aws_channel *channel, int error_code, void *user_data) { (void)channel; struct channel_stat_test_context *context = (struct channel_stat_test_context *)user_data; /* attach a dummy channel handler */ struct aws_channel_slot *dummy_slot = aws_channel_slot_new(channel); struct aws_channel_handler *dummy_handler = aws_channel_handler_new_dummy(context->allocator); aws_channel_slot_set_handler(dummy_slot, dummy_handler); /* attach a tls channel handler and start negotiation */ aws_channel_setup_client_tls(dummy_slot, &context->tls_tester->opt); aws_mutex_lock(&context->lock); context->error_code = error_code; context->setup_completed = true; aws_mutex_unlock(&context->lock); aws_condition_variable_notify_one(&context->signal); } static int s_test_tls_negotiation_timeout(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_io_library_init(allocator); struct aws_event_loop *event_loop = aws_event_loop_new_default(allocator, aws_high_res_clock_get_ticks); ASSERT_SUCCESS(aws_event_loop_run(event_loop)); struct tls_opt_tester tls_test_context; s_tls_client_opt_tester_init(allocator, &tls_test_context, aws_byte_cursor_from_c_str("derp.com")); tls_test_context.opt.timeout_ms = s_tls_timeout_ms; struct channel_stat_test_context channel_context; s_channel_setup_stat_test_context_init(&channel_context, allocator, &tls_test_context); struct aws_channel_options args = { .on_setup_completed = s_on_setup_completed, .setup_user_data = &channel_context, .on_shutdown_completed = s_on_shutdown_completed, .shutdown_user_data = &channel_context, .event_loop = event_loop, }; /* set up the channel */ ASSERT_SUCCESS(aws_mutex_lock(&channel_context.lock)); struct aws_channel *channel = aws_channel_new(allocator, &args); ASSERT_NOT_NULL(channel); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &channel_context.signal, &channel_context.lock, s_setup_completed_predicate, &channel_context)); aws_mutex_unlock(&channel_context.lock); /* wait for the timeout */ aws_thread_current_sleep(aws_timestamp_convert(s_tls_timeout_ms, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL)); aws_mutex_lock(&channel_context.lock); ASSERT_SUCCESS(aws_condition_variable_wait_pred( &channel_context.signal, &channel_context.lock, s_shutdown_completed_predicate, &channel_context)); ASSERT_TRUE(channel_context.error_code == AWS_IO_TLS_NEGOTIATION_TIMEOUT); aws_mutex_unlock(&channel_context.lock); aws_channel_destroy(channel); aws_event_loop_destroy(event_loop); s_tls_opt_tester_clean_up(&tls_test_context); s_channel_setup_stat_test_context_clean_up(&channel_context); aws_io_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_tls_negotiation_timeout, s_test_tls_negotiation_timeout) struct import_info { struct aws_allocator *allocator; struct aws_byte_buf cert_buf; struct aws_byte_buf key_buf; struct aws_thread thread; struct aws_tls_ctx *tls; }; static void s_import_cert(void *ctx) { (void)ctx; # if !defined(AWS_OS_IOS) struct import_info *import = ctx; struct aws_byte_cursor cert_cur = aws_byte_cursor_from_buf(&import->cert_buf); struct aws_byte_cursor key_cur = aws_byte_cursor_from_buf(&import->key_buf); struct aws_tls_ctx_options tls_options = {0}; AWS_FATAL_ASSERT( AWS_OP_SUCCESS == aws_tls_ctx_options_init_client_mtls(&tls_options, import->allocator, &cert_cur, &key_cur)); /* import happens in here */ import->tls = aws_tls_client_ctx_new(import->allocator, &tls_options); AWS_FATAL_ASSERT(import->tls); aws_tls_ctx_options_clean_up(&tls_options); # endif /* !AWS_OS_IOS */ } # define NUM_PAIRS 2 static int s_test_concurrent_cert_import(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_io_library_init(allocator); AWS_VARIABLE_LENGTH_ARRAY(struct import_info, imports, NUM_PAIRS); /* setup, note that all I/O should be before the threads are launched */ for (size_t idx = 0; idx < NUM_PAIRS; ++idx) { struct import_info *import = &imports[idx]; import->allocator = allocator; char filename[1024]; snprintf(filename, sizeof(filename), "testcert%u.pem", (uint32_t)idx); ASSERT_SUCCESS(aws_byte_buf_init_from_file(&import->cert_buf, import->allocator, filename)); snprintf(filename, sizeof(filename), "testkey.pem"); ASSERT_SUCCESS(aws_byte_buf_init_from_file(&import->key_buf, import->allocator, filename)); struct aws_thread *thread = &import->thread; ASSERT_SUCCESS(aws_thread_init(thread, allocator)); } /* run threads */ const struct aws_thread_options *options = aws_default_thread_options(); for (size_t idx = 0; idx < NUM_PAIRS; ++idx) { struct import_info *import = &imports[idx]; struct aws_thread *thread = &import->thread; ASSERT_SUCCESS(aws_thread_launch(thread, s_import_cert, import, options)); } /* join and clean up */ for (size_t idx = 0; idx < NUM_PAIRS; ++idx) { struct import_info *import = &imports[idx]; struct aws_thread *thread = &import->thread; ASSERT_SUCCESS(aws_thread_join(thread)); aws_tls_ctx_release(import->tls); aws_byte_buf_clean_up(&import->cert_buf); aws_byte_buf_clean_up(&import->key_buf); } aws_io_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_concurrent_cert_import, s_test_concurrent_cert_import) static int s_test_duplicate_cert_import(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_io_library_init(allocator); struct aws_byte_buf cert_buf = {0}; struct aws_byte_buf key_buf = {0}; # if !defined(AWS_OS_IOS) ASSERT_SUCCESS(aws_byte_buf_init_from_file(&cert_buf, allocator, "testcert0.pem")); ASSERT_SUCCESS(aws_byte_buf_init_from_file(&key_buf, allocator, "testkey.pem")); struct aws_byte_cursor cert_cur = aws_byte_cursor_from_buf(&cert_buf); struct aws_byte_cursor key_cur = aws_byte_cursor_from_buf(&key_buf); struct aws_tls_ctx_options tls_options = {0}; AWS_FATAL_ASSERT( AWS_OP_SUCCESS == aws_tls_ctx_options_init_client_mtls(&tls_options, allocator, &cert_cur, &key_cur)); /* import happens in here */ struct aws_tls_ctx *tls = aws_tls_client_ctx_new(allocator, &tls_options); AWS_FATAL_ASSERT(tls); aws_tls_ctx_release(tls); /* import the same certs twice */ tls = aws_tls_client_ctx_new(allocator, &tls_options); AWS_FATAL_ASSERT(tls); aws_tls_ctx_release(tls); aws_tls_ctx_options_clean_up(&tls_options); # endif /* !AWS_OS_IOS */ /* clean up */ aws_byte_buf_clean_up(&cert_buf); aws_byte_buf_clean_up(&key_buf); aws_io_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_duplicate_cert_import, s_test_duplicate_cert_import) static int s_tls_destroy_null_context(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_tls_ctx *null_context = NULL; /* Verify that we don't crash. */ aws_tls_ctx_release(null_context); return AWS_OP_SUCCESS; } AWS_TEST_CASE(tls_destroy_null_context, s_tls_destroy_null_context); static int s_test_ecc_cert_import(struct aws_allocator *allocator, void *ctx) { (void)ctx; (void)allocator; # ifndef AWS_OS_APPLE aws_io_library_init(allocator); struct aws_byte_buf cert_buf; struct aws_byte_buf key_buf; ASSERT_SUCCESS(aws_byte_buf_init_from_file(&cert_buf, allocator, "ecc-cert.pem")); ASSERT_SUCCESS(aws_byte_buf_init_from_file(&key_buf, allocator, "ecc-key.pem")); struct aws_byte_cursor cert_cur = aws_byte_cursor_from_buf(&cert_buf); struct aws_byte_cursor key_cur = aws_byte_cursor_from_buf(&key_buf); struct aws_tls_ctx_options tls_options = {0}; AWS_FATAL_ASSERT( AWS_OP_SUCCESS == aws_tls_ctx_options_init_client_mtls(&tls_options, allocator, &cert_cur, &key_cur)); /* import happens in here */ struct aws_tls_ctx *tls_context = aws_tls_client_ctx_new(allocator, &tls_options); ASSERT_NOT_NULL(tls_context); aws_tls_ctx_release(tls_context); aws_tls_ctx_options_clean_up(&tls_options); aws_byte_buf_clean_up(&cert_buf); aws_byte_buf_clean_up(&key_buf); aws_io_library_clean_up(); # endif /* AWS_OS_APPLE */ return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_ecc_cert_import, s_test_ecc_cert_import) #endif /* BYO_CRYPTO */ aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/vcc/000077500000000000000000000000001456575232400215545ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/vcc/Makefile000066400000000000000000000026641456575232400232240ustar00rootroot00000000000000VCC?=vcc VCC_ARGS+=/sm GIT?=git NO_CHANGE_EXPECTED_HASH=754ba168f NO_CHANGE_FILE=source/linux/epoll_event_loop.c # The VCC proofs in this directory are based on a snapshot of # epoll_event_loop.c. This target fails if the source file has changed, in # which case the proof results may no longer be valid. .phony: .no_change .no_change: cd ../.. && $(GIT) diff --quiet $(NO_CHANGE_EXPECTED_HASH) $(NO_CHANGE_FILE) .phony: .proofs .proofs: $(VCC) $(VCC_ARGS) preamble.h $(VCC) $(VCC_ARGS) subscribe.c /f:s_subscribe_to_io_events $(VCC) $(VCC_ARGS) unsubscribe.c /f:s_unsubscribe_from_io_events $(VCC) $(VCC_ARGS) schedule.c /f:s_schedule_task_common /f:s_schedule_task_now /f:s_schedule_task_future $(VCC) $(VCC_ARGS) cancel_task.c /f:s_cancel_task $(VCC) $(VCC_ARGS) is_on_callers_thread.c /f:s_is_on_callers_thread $(VCC) $(VCC_ARGS) process_task_pre_queue.c /f:s_process_task_pre_queue $(VCC) $(VCC_ARGS) lifecycle.c /f:s_stop_task /f:s_stop /f:s_wait_for_stop_completion /f:s_run $(VCC) $(VCC_ARGS) main_loop.c /f:s_on_tasks_to_schedule /f:s_main_loop $(VCC) $(VCC_ARGS) new_destroy.c /f:aws_event_loop_new_default $(VCC) $(VCC_ARGS) new_destroy.c /f:aws_event_loop_new_default_with_options /f:s_destroy /p:"-DUSE_EFD=0" $(VCC) $(VCC_ARGS) new_destroy.c /f:aws_event_loop_new_default_with_options /f:s_destroy /p:"-DUSE_EFD=1" $(VCC) $(VCC_ARGS) client.c /f:test_new_destroy /f:test_subscribe_unsubscribe .phony: all all: .no_change .proofs aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/vcc/README.md000066400000000000000000000043261456575232400230400ustar00rootroot00000000000000# VCC Proofs of the Linux epoll event loop This directory gives the specification and annotated source-code implementation of the Linux epoll event loop implementation of C-IO. See `docs/epoll_event_loop_proof.md` for an overview of the properties proven, assumptions and simplifications. ## Reading the proofs The majority of the specification is given in the header file `preamble.h`, which specifies a contract (preconditions and postconditions) for each implementation function. The proofs themselves are the source code of the Linux epoll event loop implementation with VCC annotations embedded alongside to guide VCC. We split the event loop functions over the following files: - `cancel_task.c` - `is_on_callers_thread.c` - `lifecycle.c` - `main_loop.c` - `new_destroy.c` - `process_task_pre_queue.c` - `schedule.c` - `subscribe.c` - `unsubscribe.c` Additionally, the file `client.c` shows some simple uses of the event loop API, demonstrating that the specifications can be used together in a meaningful way. ## Running proof regression The following will check all proofs assuming VCC and make are on your path. For obtaining VCC, see the next section on building VCC in a Windows docker container. $ make ## VCC docker container I've tested the following on Windows 10 1809 with Docker Desktop 2.2.03. 1. Make sure that docker is running with Windows containers. Another good sanity check is to ensure `docker run hello-world` works as expected. 2. Build the image (this takes about 30 minutes) docker build -t vcc docker-images/win10-vs2012/ 3. Run an interactive powershell in a container docker run -it vcc powershell 4. Inside the container check VCC works vcc "C:\vcc\vcc\Test\testsuite\examples3\ArrayList.c" Verification of ArrayList#adm succeeded.[1.36] Verification of Length succeeded. [0.01] Verification of CreateArrayList succeeded. [0.05] Verification of MakeEmpty succeeded. [0.03] Verification of Select succeeded. [0.02] Verification of Update succeeded. [0.05] Verification of DisposeArrayList succeeded. [0.03] Verification of Add succeeded. [0.44] Verification of main_test succeeded. [0.67] aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/vcc/cancel_task.c000066400000000000000000000017021456575232400241670ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ /* clang-format off */ #include "preamble.h" static void s_cancel_task(struct aws_event_loop *event_loop, struct aws_task *task) { AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: cancelling task %p", (void *)event_loop, (void *)task); struct epoll_loop *epoll_loop = event_loop->impl_data; aws_task_scheduler_cancel_task(&epoll_loop->scheduler, task); } /* clang-format on */ aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/vcc/client.c000066400000000000000000000061211456575232400231760ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ /* clang-format off */ #include "preamble.h" struct aws_allocator *aws_default_allocator() _(ensures \wrapped(\result)) ; uint64_t next_timestamp(); int clock(uint64_t *timestamp) _(writes timestamp) { *timestamp = next_timestamp(); return AWS_OP_SUCCESS; } void test_new_destroy() { struct aws_allocator *alloc = aws_default_allocator(); _(ghost \claim c_mutex) struct aws_event_loop *event_loop = aws_event_loop_new_default(alloc, clock, _(out c_mutex)); if (!event_loop) return; _(ghost \claim c_event_loop;) _(ghost c_event_loop = \make_claim({event_loop}, event_loop->\closed);) s_destroy(event_loop _(ghost c_event_loop) _(ghost c_mutex)); } void on_event( struct aws_event_loop *event_loop, struct aws_io_handle *handle, int events, void *user_data _(ghost \claim(c)) ); void test_subscribe_unsubscribe(struct aws_event_loop *event_loop _(ghost \claim(c_event_loop)) _(ghost \claim(c_mutex)) ) _(always c_event_loop, event_loop->\closed) _(requires \wrapped(c_mutex) && \claims_object(c_mutex, &(epoll_loop_of(event_loop)->task_pre_queue_mutex))) _(writes event_loop) _(updates &epoll_loop_of(event_loop)->scheduler) { struct aws_io_handle *handle = malloc(sizeof(struct aws_io_handle)); if (!handle) return; handle->data.fd = 1; /* model successful open() */ _(wrap handle) int err = s_subscribe_to_io_events(event_loop, handle, 0, on_event, NULL, _(ghost c_event_loop)); if (err) { return; } _(assert wf_cio_handle(handle)) _(assert \wrapped((struct epoll_event_data *)handle->additional_data)) _(assert ((struct epoll_event_data *)handle->additional_data)->\owner == \me) s_unsubscribe_from_io_events(event_loop, handle _(ghost c_event_loop) _(ghost c_mutex)); } void task_fn(struct aws_task *task, void *arg, enum aws_task_status) { (void)task; (void)arg; } void test_schedule_cancel(struct aws_event_loop *event_loop _(ghost \claim(c_event_loop)) _(ghost \claim(c_mutex)) ) _(requires \wrapped(event_loop)) _(always c_event_loop, event_loop->\closed) _(requires \wrapped(c_mutex) && \claims_object(c_mutex, &(epoll_loop_of(event_loop)->task_pre_queue_mutex))) _(updates &epoll_loop_of(event_loop)->scheduler) { struct aws_task *task = malloc(sizeof(struct aws_task)); if (!task) return; aws_task_init(task, task_fn, NULL, "test_task"); s_schedule_task_now(event_loop, task _(ghost c_event_loop) _(ghost c_mutex)); s_cancel_task(event_loop, task); } /* clang-format on */ aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/vcc/docker-images/000077500000000000000000000000001456575232400242665ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/vcc/docker-images/win10-vs2012/000077500000000000000000000000001456575232400261575ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/vcc/docker-images/win10-vs2012/Dockerfile000066400000000000000000000023551456575232400301560ustar00rootroot00000000000000# escape=` FROM mcr.microsoft.com/windows:1809 CMD [ "cmd.exe" ] RUN powershell -NoProfile -InputFormat None -ExecutionPolicy Bypass -Command "[System.Net.ServicePointManager]::SecurityProtocol = 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/install.ps1'))" && SET "PATH=%PATH%;%ALLUSERSPROFILE%\chocolatey\bin" && ` choco install -y --timeout 0 git make python2 visualstudio2012professional && ` refreshenv && ` git --version RUN git clone https://github.com/nchong-at-aws/vcc.git && ` git clone https://github.com/z3prover/z3.git RUN cd "C:\Program Files (x86)\Microsoft Visual Studio 11.0\Common7\Tools" && ` .\vsvars32.bat && ` cd "C:\z3" && ` git checkout z3-4.3.0 && ` python scripts\mk_make.py && ` cd "build" && ` nmake && ` cd "C:\vcc" && ` # some vcc build failures are acceptable (msbuild || exit 0) RUN copy /Y "C:\z3\build\z3.exe" "C:\vcc\vcc\Host\bin\Debug\z3.exe" # Add vcc to path RUN powershell -Command "$path = $env:path + ';C:\vcc\vcc\Host\bin\Debug'; Set-ItemProperty -Path 'HKLM:\SYSTEM\CurrentControlSet\Control\Session Manager\Environment\' -Name Path -Value $path" # sanity check RUN vcc "C:\vcc\vcc\Test\testsuite\examples3\ArrayList.c" aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/vcc/is_on_callers_thread.c000066400000000000000000000023471456575232400260710ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ /* clang-format off */ #include "preamble.h" static bool s_is_on_callers_thread(struct aws_event_loop *event_loop _(ghost \claim(c_event_loop)) ) { _(assert \always_by_claim(c_event_loop, event_loop)) struct epoll_loop *epoll_loop = event_loop->impl_data; _(assert \always_by_claim(c_event_loop, epoll_loop)) _(assert \inv(epoll_loop)) aws_thread_id_t *thread_id = aws_atomic_load_ptr(&epoll_loop->running_thread_id); _(assume \thread_local(thread_id)) _(assume thread_id == NULL || *thread_id == \addr(event_loop->\owner)) return thread_id && aws_thread_thread_id_equal(*thread_id, aws_thread_current_thread_id()); } /* clang-format on */ aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/vcc/lifecycle.c000066400000000000000000000106631456575232400236650ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ /* clang-format off */ #define STOP_TASK_FN_PTR #include "preamble.h" static void s_stop_task(struct aws_task *task, void *args, enum aws_task_status status) { (void)task; struct aws_event_loop *event_loop = args; struct epoll_loop *epoll_loop = event_loop->impl_data; /* now okay to reschedule stop tasks. */ _(unwrap &epoll_loop->stop_task_ptr) aws_atomic_store_ptr(&epoll_loop->stop_task_ptr, NULL); _(wrap &epoll_loop->stop_task_ptr) if (status == AWS_TASK_STATUS_RUN_READY) { /* * this allows the event loop to invoke the callback once the event loop has completed. */ _(unwrap epoll_loop::status) epoll_loop->should_continue = false; _(wrap epoll_loop::status) } } static int s_stop(struct aws_event_loop *event_loop _(ghost \claim(c_event_loop)) _(ghost \claim(c_mutex)) ) { _(assert \always_by_claim(c_event_loop, event_loop)) struct epoll_loop *epoll_loop = event_loop->impl_data; void *expected_ptr = NULL; _(unwrap &epoll_loop->stop_task_ptr) bool update_succeeded = aws_atomic_compare_exchange_ptr(&epoll_loop->stop_task_ptr, &expected_ptr, &epoll_loop->stop_task); _(wrap &epoll_loop->stop_task_ptr) if (!update_succeeded) { /* the stop task is already scheduled. */ return AWS_OP_SUCCESS; } AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: Stopping event-loop thread.", (void *)event_loop); aws_task_init(&epoll_loop->stop_task, s_stop_task, event_loop, "epoll_event_loop_stop"); s_schedule_task_now(event_loop, &epoll_loop->stop_task _(ghost c_event_loop) _(ghost c_mutex)); return AWS_OP_SUCCESS; } int aws_thread_join(struct aws_thread *thread _(ghost struct aws_event_loop *event_loop) _(ghost \claim(c_event_loop)) _(ghost \claim(c_mutex)) ) _(requires c_event_loop != c_mutex) _(requires \wrapped0(c_event_loop) && \claims(c_event_loop, event_loop->\closed) && \claims_object(c_event_loop, event_loop)) _(writes c_event_loop, event_loop) _(ensures !c_event_loop->\closed) _(ensures \wrapped0(event_loop) && \nested(epoll_loop_of(event_loop))) _(ensures ownership_of_epoll_loop_objects(epoll_loop_of(event_loop))) _(ensures epoll_loop_of(event_loop)->task_pre_queue_mutex.locked == 0) _(maintains \malloc_root(epoll_loop_of(event_loop))) _(maintains \wrapped0(c_mutex) && \claims_object(c_mutex, &epoll_loop_of(event_loop)->task_pre_queue_mutex)) ; static int s_wait_for_stop_completion(struct aws_event_loop *event_loop _(ghost \claim(c_event_loop)) _(ghost \claim(c_mutex)) ) { struct epoll_loop *epoll_loop = _(by_claim c_event_loop) event_loop->impl_data; int result = aws_thread_join(&epoll_loop->thread_created_on _(ghost event_loop) _(ghost c_event_loop) _(ghost c_mutex)); aws_thread_decrement_unjoined_count(); return result; } int aws_thread_launch( struct aws_thread *thread, void (*func)(void *arg), void *arg, const struct aws_thread_options *options) _(requires \wrapped0(event_loop_of(arg))) _(requires func->\valid) ; /* Not modeled: thread launch ownership change semantics */ void dummy_main_loop(void *arg); /*< VCC change */ static int s_run(struct aws_event_loop *event_loop _(ghost \claim(c_mutex))) { struct epoll_loop *epoll_loop = event_loop->impl_data; AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: Starting event-loop thread.", (void *)event_loop); epoll_loop->should_continue = true; aws_thread_increment_unjoined_count(); if (aws_thread_launch(&epoll_loop->thread_created_on, /*&s_main_loop*/&dummy_main_loop, event_loop, &epoll_loop->thread_options)) { aws_thread_decrement_unjoined_count(); AWS_LOGF_FATAL(AWS_LS_IO_EVENT_LOOP, "id=%p: thread creation failed.", (void *)event_loop); epoll_loop->should_continue = false; return AWS_OP_ERR; } return AWS_OP_SUCCESS; } /* clang-format on */ aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/vcc/main_loop.c000066400000000000000000000156151456575232400237050ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ /* clang-format off */ #include "preamble.h" #define DEFAULT_TIMEOUT 100000 #define MAX_EVENTS 100 static void s_on_tasks_to_schedule( struct aws_event_loop *event_loop, struct aws_io_handle *handle, int events, void *user_data _(ghost \claim(c_event_loop)) ) { (void)handle; (void)user_data; AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: notified of cross-thread tasks to schedule", (void *)event_loop); struct epoll_loop *epoll_loop = event_loop->impl_data; if (events & AWS_IO_EVENT_TYPE_READABLE) { epoll_loop->should_process_task_pre_queue = true; } } static void s_main_loop(void *args _(ghost \claim(c_mutex))) { struct aws_event_loop *event_loop = args; AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: main loop started", (void *)event_loop); struct epoll_loop *epoll_loop = event_loop->impl_data; _(ghost \claim c_event_loop;) _(ghost c_event_loop = \make_claim({event_loop}, event_loop->\closed);) /* set thread id to the thread of the event loop */ _(unwrap &epoll_loop->running_thread_id) aws_atomic_store_ptr(&epoll_loop->running_thread_id, &epoll_loop->thread_created_on.thread_id); _(wrap &epoll_loop->running_thread_id) int err = s_subscribe_to_io_events( event_loop, &epoll_loop->read_task_handle, AWS_IO_EVENT_TYPE_READABLE, s_on_tasks_to_schedule, NULL _(ghost c_event_loop)); if (err) { return; } int timeout = DEFAULT_TIMEOUT; struct epoll_event events[MAX_EVENTS]; AWS_LOGF_INFO( AWS_LS_IO_EVENT_LOOP, "id=%p: default timeout %d, and max events to process per tick %d", (void *)event_loop, timeout, MAX_EVENTS); /* * until stop is called, * call epoll_wait, if a task is scheduled, or a file descriptor has activity, it will * return. * * process all events, * * run all scheduled tasks. * * process queued subscription cleanups. */ while (epoll_loop->should_continue) _(invariant \extent_mutable((struct epoll_event[MAX_EVENTS]) events)) _(invariant (&epoll_loop->read_task_handle)->\closed) _(invariant \wrapped(&epoll_loop->scheduler)) _(writes &epoll_loop->scheduler) _(writes &epoll_loop->should_process_task_pre_queue) _(writes \extent((struct epoll_event[MAX_EVENTS]) events)) { AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: waiting for a maximum of %d ms", (void *)event_loop, timeout); int event_count = epoll_wait(epoll_loop->epoll_fd, events, MAX_EVENTS, timeout); AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p: wake up with %d events to process.", (void *)event_loop, event_count); for (int i = 0; i < event_count; ++i) _(writes &epoll_loop->should_process_task_pre_queue) { struct epoll_event_data *event_data = (struct epoll_event_data *)events[i].data.ptr; _(assert \wrapped(event_data)) _(assert \inv(event_data)) int event_mask = 0; if (events[i].events & EPOLLIN) { event_mask |= AWS_IO_EVENT_TYPE_READABLE; } if (events[i].events & EPOLLOUT) { event_mask |= AWS_IO_EVENT_TYPE_WRITABLE; } if (events[i].events & EPOLLRDHUP) { event_mask |= AWS_IO_EVENT_TYPE_REMOTE_HANG_UP; } if (events[i].events & EPOLLHUP) { event_mask |= AWS_IO_EVENT_TYPE_CLOSED; } if (events[i].events & EPOLLERR) { event_mask |= AWS_IO_EVENT_TYPE_ERROR; } if (event_data->is_subscribed) { AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p: activity on fd %d, invoking handler.", (void *)event_loop, event_data->handle->data.fd); event_data->on_event(event_loop, event_data->handle, event_mask, event_data->user_data _(ghost c_event_loop)); } } /* run scheduled tasks */ s_process_task_pre_queue(event_loop _(ghost c_event_loop) _(ghost c_mutex)); uint64_t now_ns = 0; event_loop->clock(&now_ns); /* if clock fails, now_ns will be 0 and tasks scheduled for a specific time will not be run. That's ok, we'll handle them next time around. */ AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: running scheduled tasks.", (void *)event_loop); aws_task_scheduler_run_all(&epoll_loop->scheduler, now_ns); /* set timeout for next epoll_wait() call. * if clock fails, or scheduler has no tasks, use default timeout */ bool use_default_timeout = false; if (event_loop->clock(&now_ns)) { use_default_timeout = true; } uint64_t next_run_time_ns; if (!aws_task_scheduler_has_tasks(&epoll_loop->scheduler, &next_run_time_ns)) { use_default_timeout = true; } if (use_default_timeout) { AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p: no more scheduled tasks using default timeout.", (void *)event_loop); timeout = DEFAULT_TIMEOUT; } else { /* Translate timestamp (in nanoseconds) to timeout (in milliseconds) */ uint64_t timeout_ns = (next_run_time_ns > now_ns) ? (next_run_time_ns - now_ns) : 0; uint64_t timeout_ms64 = aws_timestamp_convert(timeout_ns, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_MILLIS, NULL); timeout = timeout_ms64 > INT_MAX ? INT_MAX : (int)timeout_ms64; AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p: detected more scheduled tasks with the next occurring at " "%llu, using timeout of %d.", (void *)event_loop, (unsigned long long)timeout_ns, timeout); } } AWS_LOGF_DEBUG(AWS_LS_IO_EVENT_LOOP, "id=%p: exiting main loop", (void *)event_loop); s_unsubscribe_from_io_events(event_loop, &epoll_loop->read_task_handle _(ghost c_event_loop) _(ghost c_mutex)); /* set thread id back to NULL. This should be updated again in destroy, before tasks are canceled. */ _(unwrap &epoll_loop->running_thread_id) aws_atomic_store_ptr(&epoll_loop->running_thread_id, NULL); _(wrap &epoll_loop->running_thread_id) } /* clang-format on */ aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/vcc/new_destroy.c000066400000000000000000000273261456575232400242740ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ /* clang-format off */ #include "preamble.h" /* VCC change: clock fnptr */ int aws_event_loop_init_base(struct aws_event_loop *loop, struct aws_allocator *allocator, aws_io_clock_fn_ptr clock) _(writes &loop->alloc) _(writes &loop->clock) _(maintains \wrapped(allocator)) _(ensures \result == 0 <==> loop->alloc == allocator && loop->clock->\valid) ; /* Cleans up hash-table (but not modeled) */ void aws_event_loop_clean_up_base(struct aws_event_loop *loop) _(writes \extent(loop)) _(ensures \mutable(loop)) ; void close(int fd) _(requires valid_fd(fd)) ; const struct aws_thread_options *aws_default_thread_options(void) _(ensures \wrapped(\result)) ; struct aws_event_loop_vtable s_vtable; #if USE_EFD enum { EFD_SEMAPHORE = 1, #define EFD_SEMAPHORE EFD_SEMAPHORE EFD_CLOEXEC = 02000000, #define EFD_CLOEXEC EFD_CLOEXEC EFD_NONBLOCK = 04000 #define EFD_NONBLOCK EFD_NONBLOCK }; int eventfd(unsigned int initval, int flags); #else int aws_open_nonblocking_posix_pipe(/*int pipe_fds[2]*/int *pipe_fds) _(writes \extent((int[2]) pipe_fds)) _(ensures \extent_mutable((int[2]) pipe_fds)) _(ensures \result == AWS_OP_SUCCESS <==> valid_fd(pipe_fds[0]) && valid_fd(pipe_fds[1])) ; #endif struct aws_event_loop *aws_event_loop_new_default(struct aws_allocator *alloc, aws_io_clock_fn_ptr clock _(out \claim(c_mutex)) ) { /* VCC change: rewrite struct initialization */ #if 0 struct aws_event_loop_options options = { .thread_options = NULL, .clock = clock, }; #else struct aws_event_loop_options options; options.thread_options = NULL; options.clock = clock; _(wrap(&options)) #endif /* VCC change: rewrite return to allow for unwrap */ #if 0 return aws_event_loop_new_default_with_options(alloc, &options); #else struct aws_event_loop *r = aws_event_loop_new_default_with_options(alloc, &options, _(out c_mutex)); _(unwrap(&options)) return r; #endif } struct aws_event_loop *aws_event_loop_new_default_with_options( struct aws_allocator *alloc, const struct aws_event_loop_options *options _(out \claim(c_mutex)) ) { AWS_PRECONDITION(options); /* VCC change: rewrite clock fnptr validity check */ #if 0 AWS_PRECONDITION(options->clock); #else AWS_PRECONDITION(options->clock->\valid); #endif struct aws_event_loop *loop = aws_mem_calloc(alloc, 1, sizeof(struct aws_event_loop)); if (!loop) { return NULL; } AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: Initializing edge-triggered epoll", (void *)loop); if (aws_event_loop_init_base(loop, alloc, options->clock)) { goto clean_up_loop; } struct epoll_loop *epoll_loop = aws_mem_calloc(alloc, 1, sizeof(struct epoll_loop)); if (!epoll_loop) { goto cleanup_base_loop; } if (options->thread_options) { epoll_loop->thread_options = *options->thread_options; } else { epoll_loop->thread_options = *aws_default_thread_options(); } /* initialize thread id to NULL, it should be updated when the event loop thread starts. */ aws_atomic_init_ptr(&epoll_loop->running_thread_id, NULL); aws_linked_list_init(&epoll_loop->task_pre_queue); epoll_loop->task_pre_queue_mutex = (struct aws_mutex)AWS_MUTEX_INIT; aws_atomic_init_ptr(&epoll_loop->stop_task_ptr, NULL); epoll_loop->epoll_fd = epoll_create(100); if (epoll_loop->epoll_fd < 0) { AWS_LOGF_FATAL(AWS_LS_IO_EVENT_LOOP, "id=%p: Failed to open epoll handle.", (void *)loop); aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); goto clean_up_epoll; } if (aws_thread_init(&epoll_loop->thread_created_on, alloc)) { goto clean_up_epoll; } #if USE_EFD AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: Using eventfd for cross-thread notifications.", (void *)loop); int fd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK); if (fd < 0) { AWS_LOGF_FATAL(AWS_LS_IO_EVENT_LOOP, "id=%p: Failed to open eventfd handle.", (void *)loop); aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); goto clean_up_thread; } AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: eventfd descriptor %d.", (void *)loop, fd); epoll_loop->write_task_handle = (struct aws_io_handle){.data.fd = fd, .additional_data = NULL}; epoll_loop->read_task_handle = (struct aws_io_handle){.data.fd = fd, .additional_data = NULL}; #else AWS_LOGF_DEBUG( AWS_LS_IO_EVENT_LOOP, "id=%p: Eventfd not available, falling back to pipe for cross-thread notification.", (void *)loop); /* VCC change: array init using {0} */ #if 0 int pipe_fds[2] = {0}; #else int pipe_fds[2]; pipe_fds[0] = 0; pipe_fds[1] = 0; #endif /* this pipe is for task scheduling. */ if (aws_open_nonblocking_posix_pipe(pipe_fds)) { AWS_LOGF_FATAL(AWS_LS_IO_EVENT_LOOP, "id=%p: failed to open pipe handle.", (void *)loop); goto clean_up_thread; } AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p: pipe descriptors read %d, write %d.", (void *)loop, pipe_fds[0], pipe_fds[1]); epoll_loop->write_task_handle.data.fd = pipe_fds[1]; epoll_loop->read_task_handle.data.fd = pipe_fds[0]; #endif if (aws_task_scheduler_init(&epoll_loop->scheduler, alloc)) { goto clean_up_pipe; } epoll_loop->should_continue = false; loop->impl_data = epoll_loop; loop->vtable = &s_vtable; _(wrap(&epoll_loop->task_pre_queue.head)) _(wrap(&epoll_loop->task_pre_queue.tail)) _(wrap(&epoll_loop->task_pre_queue)) epoll_loop->task_pre_queue_mutex.locked = 0; _(ghost { epoll_loop->task_pre_queue_mutex.protected_obj = &epoll_loop->task_pre_queue; epoll_loop->task_pre_queue_mutex.\owns = {&epoll_loop->task_pre_queue}; _(wrap(&epoll_loop->task_pre_queue_mutex)) c_mutex = \make_claim({&epoll_loop->task_pre_queue_mutex}, epoll_loop->task_pre_queue_mutex.\closed); }) _(wrap(&epoll_loop->write_task_handle)) _(wrap(&epoll_loop->scheduler.timed_queue)) _(wrap(&epoll_loop->scheduler.timed_list.head)) _(wrap(&epoll_loop->scheduler.timed_list.tail)) _(wrap(&epoll_loop->scheduler.timed_list)) _(wrap(&epoll_loop->scheduler.asap_list.head)) _(wrap(&epoll_loop->scheduler.asap_list.tail)) _(wrap(&epoll_loop->scheduler.asap_list)) _(wrap(&epoll_loop->scheduler)) _(wrap(&epoll_loop->thread_created_on)) _(wrap(&epoll_loop->running_thread_id)) _(wrap(&epoll_loop->read_task_handle)) _(wrap(&epoll_loop->stop_task.node)) _(wrap(&epoll_loop->stop_task.priority_queue_node)) _(wrap(&epoll_loop->stop_task)) _(wrap(&epoll_loop->stop_task_ptr)) _(wrap(epoll_loop::scheduler)) _(wrap(epoll_loop::read_handle)) _(wrap(epoll_loop::stop_task)) _(wrap(epoll_loop::queue)) _(wrap(epoll_loop::status)) _(wrap(epoll_loop)) _(wrap(loop)) return loop; clean_up_pipe: #if USE_EFD close(epoll_loop->write_task_handle.data.fd); epoll_loop->write_task_handle.data.fd = -1; epoll_loop->read_task_handle.data.fd = -1; #else close(epoll_loop->read_task_handle.data.fd); close(epoll_loop->write_task_handle.data.fd); #endif clean_up_thread: aws_thread_clean_up(&epoll_loop->thread_created_on); clean_up_epoll: if (epoll_loop->epoll_fd >= 0) { close(epoll_loop->epoll_fd); } aws_mem_release(alloc, epoll_loop); cleanup_base_loop: aws_event_loop_clean_up_base(loop); clean_up_loop: aws_mem_release(alloc, loop); return NULL; } /* Fake-up call to s_stop since this is just a vtable lookup */ #define aws_event_loop_stop(event_loop) \ s_stop(event_loop _(ghost c_event_loop) _(ghost c_mutex)); static void s_destroy(struct aws_event_loop *event_loop _(ghost \claim(c_event_loop)) _(ghost \claim(c_mutex)) ) { AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: Destroying event_loop", (void *)event_loop); struct epoll_loop *epoll_loop = event_loop->impl_data; /* we don't know if stop() has been called by someone else, * just call stop() again and wait for event-loop to finish. */ aws_event_loop_stop(event_loop); s_wait_for_stop_completion(event_loop _(ghost c_event_loop) _(ghost c_mutex)); epoll_loop = event_loop->impl_data; /*< VCC change: refresh epoll_loop reference */ _(unwrap(event_loop)) _(unwrap(epoll_loop)) _(assert epoll_loop->task_pre_queue_mutex.\claim_count == 1) _(ghost \destroy_claim(c_mutex, {&epoll_loop->task_pre_queue_mutex})) _(assert epoll_loop->task_pre_queue_mutex.\claim_count == 0) _(assert \wrapped0(&epoll_loop->task_pre_queue_mutex)) _(assert epoll_loop->task_pre_queue_mutex.locked == 0) _(assert \inv(&epoll_loop->task_pre_queue_mutex)) _(unwrap(&epoll_loop->task_pre_queue_mutex)) _(assert \wrapped(&epoll_loop->task_pre_queue)) /* setting this so that canceled tasks don't blow up when asking if they're on the event-loop thread. */ epoll_loop->thread_joined_to = aws_thread_current_thread_id(); _(unwrap &epoll_loop->running_thread_id) aws_atomic_store_ptr(&epoll_loop->running_thread_id, &epoll_loop->thread_joined_to); _(wrap &epoll_loop->running_thread_id) aws_task_scheduler_clean_up(&epoll_loop->scheduler); while (!aws_linked_list_empty(&epoll_loop->task_pre_queue)) _(invariant 0 <= epoll_loop->task_pre_queue.length) _(invariant \wrapped(&epoll_loop->task_pre_queue)) _(writes &epoll_loop->task_pre_queue) { _(ghost struct aws_task *t) struct aws_linked_list_node *node = aws_linked_list_pop_front(&epoll_loop->task_pre_queue _(out t)); struct aws_task *task = AWS_CONTAINER_OF(node, struct aws_task, node); task->fn(task, task->arg, AWS_TASK_STATUS_CANCELED); } _(unwrap(&epoll_loop->thread_created_on)) aws_thread_clean_up(&epoll_loop->thread_created_on); _(unwrap(&epoll_loop->read_task_handle)) _(unwrap(&epoll_loop->write_task_handle)) #if USE_EFD close(epoll_loop->write_task_handle.data.fd); epoll_loop->write_task_handle.data.fd = -1; epoll_loop->read_task_handle.data.fd = -1; #else close(epoll_loop->read_task_handle.data.fd); close(epoll_loop->write_task_handle.data.fd); #endif close(epoll_loop->epoll_fd); /* successively unwrap epoll for imminent free() */ _(unwrap epoll_loop::scheduler, &epoll_loop->scheduler, &epoll_loop->running_thread_id, epoll_loop::read_handle, epoll_loop::stop_task, &epoll_loop->stop_task, epoll_loop::queue, &epoll_loop->task_pre_queue, epoll_loop::status) _(unwrap &epoll_loop->scheduler.timed_queue, &epoll_loop->scheduler.timed_list, &epoll_loop->scheduler.asap_list, &epoll_loop->stop_task.node, &epoll_loop->stop_task.priority_queue_node, &epoll_loop->stop_task_ptr, &epoll_loop->task_pre_queue.head, &epoll_loop->task_pre_queue.tail) _(unwrap &epoll_loop->scheduler.timed_list.head, &epoll_loop->scheduler.timed_list.tail, &epoll_loop->scheduler.asap_list.head, &epoll_loop->scheduler.asap_list.tail) aws_mem_release(event_loop->alloc, epoll_loop); aws_event_loop_clean_up_base(event_loop); aws_mem_release(event_loop->alloc, event_loop); } /* clang-format on */ aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/vcc/preamble.h000066400000000000000000000725751456575232400235340ustar00rootroot00000000000000#ifndef PREAMBLE_H #define PREAMBLE_H /* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ /* clang-format off */ #include #include #include #include /* Fake-up sys/types.h */ typedef signed int ssize_t; /* Fake-up stdbool.h */ typedef int bool; const int true = 1; const int false = 0; /* Definitions from epoll.h */ typedef union epoll_data { _(backing_member) void *ptr; int fd; uint32_t u32; uint64_t u64; } epoll_data_t; struct epoll_event { uint32_t events; /* Epoll events */ _(inline) epoll_data_t data; /* User data variable */ }; enum EPOLL_EVENTS { EPOLLIN = 0x001, #define EPOLLIN EPOLLIN EPOLLPRI = 0x002, #define EPOLLPRI EPOLLPRI EPOLLOUT = 0x004, #define EPOLLOUT EPOLLOUT EPOLLRDNORM = 0x040, #define EPOLLRDNORM EPOLLRDNORM EPOLLRDBAND = 0x080, #define EPOLLRDBAND EPOLLRDBAND EPOLLWRNORM = 0x100, #define EPOLLWRNORM EPOLLWRNORM EPOLLWRBAND = 0x200, #define EPOLLWRBAND EPOLLWRBAND EPOLLMSG = 0x400, #define EPOLLMSG EPOLLMSG EPOLLERR = 0x008, #define EPOLLERR EPOLLERR EPOLLHUP = 0x010, #define EPOLLHUP EPOLLHUP EPOLLRDHUP = 0x2000, #define EPOLLRDHUP EPOLLRDHUP EPOLLONESHOT = (1 << 30), #define EPOLLONESHOT EPOLLONESHOT EPOLLET = (1 << 31) #define EPOLLET EPOLLET }; #define EPOLL_CTL_ADD 1 /* Add a file decriptor to the interface. */ #define EPOLL_CTL_DEL 2 /* Remove a file decriptor from the interface. */ #define EPOLL_CTL_MOD 3 /* Change file decriptor epoll_event structure. */ struct abstract_os_t { int unused; _(ghost \bool watched[int]) } abstract_os_data; struct abstract_os_t *abstract_os = &abstract_os_data; int epoll_ctl (int __epfd, int __op, int __fd, struct epoll_event *__event) _(ensures \result == 0 && __op == EPOLL_CTL_ADD ==> abstract_os->watched[__fd]) _(ensures \result == 0 && __op == EPOLL_CTL_DEL ==> !abstract_os->watched[__fd]) ; struct epoll_event_data; int epoll_wait(int __epfd, struct epoll_event *__events, int maxevents, int timeout) _(ensures \result <= maxevents) /*TODO: use explicit triggers for quantifier instantiation*/ _(ensures \forall int i; {:level 2} 0 <= i && i < \result ==> \wrapped((struct epoll_event_data *)__events[i].data.ptr)) _(writes \extent((struct epoll_event[(unsigned)maxevents]) __events)) _(ensures \extent_mutable((struct epoll_event[(unsigned)maxevents]) __events)) ; int epoll_create(int size) _(requires 0 < size) ; /* Definitions from aws/common/macros.h */ #define AWS_CONTAINER_OF(ptr, type, member) ((type *)((uint8_t *)(ptr)-offsetof(type, member))) #define AWS_UNLIKELY(x) x /* Definitions from aws/common/assert.h */ /* Convert into VCC assertions */ #define AWS_ASSERT(x) _(assert x) #define AWS_PRECONDITION(x) _(assert x) /* Definitions from aws/common/logging.h (no-ops for proof) */ #define AWS_LOGF_INFO(...) #define AWS_LOGF_TRACE(...) #define AWS_LOGF_DEBUG(...) #define AWS_LOGF_ERROR(...) #define AWS_LOGF_FATAL(...) /* Definitions from aws/common/clock.h */ enum aws_timestamp_unit { AWS_TIMESTAMP_SECS = 1, AWS_TIMESTAMP_MILLIS = 1000, AWS_TIMESTAMP_MICROS = 1000000, AWS_TIMESTAMP_NANOS = 1000000000, }; uint64_t aws_timestamp_convert( uint64_t timestamp, enum aws_timestamp_unit convert_from, enum aws_timestamp_unit convert_to, uint64_t *remainder); /* Definitions from aws/common/error.h */ #define AWS_OP_SUCCESS (0) #define AWS_OP_ERR (-1) enum aws_common_error { AWS_ERROR_SUCCESS = 0, AWS_ERROR_OOM, AWS_ERROR_UNKNOWN, AWS_ERROR_SHORT_BUFFER, AWS_ERROR_OVERFLOW_DETECTED, AWS_ERROR_UNSUPPORTED_OPERATION, AWS_ERROR_INVALID_BUFFER_SIZE, AWS_ERROR_INVALID_HEX_STR, AWS_ERROR_INVALID_BASE64_STR, AWS_ERROR_INVALID_INDEX, AWS_ERROR_THREAD_INVALID_SETTINGS, AWS_ERROR_THREAD_INSUFFICIENT_RESOURCE, AWS_ERROR_THREAD_NO_PERMISSIONS, AWS_ERROR_THREAD_NOT_JOINABLE, AWS_ERROR_THREAD_NO_SUCH_THREAD_ID, AWS_ERROR_THREAD_DEADLOCK_DETECTED, AWS_ERROR_MUTEX_NOT_INIT, AWS_ERROR_MUTEX_TIMEOUT, AWS_ERROR_MUTEX_CALLER_NOT_OWNER, AWS_ERROR_MUTEX_FAILED, AWS_ERROR_COND_VARIABLE_INIT_FAILED, AWS_ERROR_COND_VARIABLE_TIMED_OUT, AWS_ERROR_COND_VARIABLE_ERROR_UNKNOWN, AWS_ERROR_CLOCK_FAILURE, AWS_ERROR_LIST_EMPTY, AWS_ERROR_DEST_COPY_TOO_SMALL, AWS_ERROR_LIST_EXCEEDS_MAX_SIZE, AWS_ERROR_LIST_STATIC_MODE_CANT_SHRINK, AWS_ERROR_PRIORITY_QUEUE_FULL, AWS_ERROR_PRIORITY_QUEUE_EMPTY, AWS_ERROR_PRIORITY_QUEUE_BAD_NODE, AWS_ERROR_HASHTBL_ITEM_NOT_FOUND, AWS_ERROR_INVALID_DATE_STR, AWS_ERROR_INVALID_ARGUMENT, AWS_ERROR_RANDOM_GEN_FAILED, AWS_ERROR_MALFORMED_INPUT_STRING, AWS_ERROR_UNIMPLEMENTED, AWS_ERROR_INVALID_STATE, AWS_ERROR_ENVIRONMENT_GET, AWS_ERROR_ENVIRONMENT_SET, AWS_ERROR_ENVIRONMENT_UNSET, AWS_ERROR_STREAM_UNSEEKABLE, AWS_ERROR_NO_PERMISSION, AWS_ERROR_FILE_INVALID_PATH, AWS_ERROR_MAX_FDS_EXCEEDED, AWS_ERROR_SYS_CALL_FAILURE, AWS_ERROR_C_STRING_BUFFER_NOT_NULL_TERMINATED, AWS_ERROR_STRING_MATCH_NOT_FOUND, AWS_ERROR_END_COMMON_RANGE = 0x03FF }; int aws_raise_error(int err) _(ensures \result == AWS_OP_ERR) ; /* Forward declarations */ struct epoll_loop; struct aws_allocator; struct aws_linked_list_node; struct aws_mutex; struct aws_io_handle; /* Definitions from aws/common/allocator.h */ struct aws_allocator { void *(*mem_acquire)(struct aws_allocator *allocator, size_t size); void (*mem_release)(struct aws_allocator *allocator, void *ptr); /* Optional method; if not supported, this pointer must be NULL */ void *(*mem_realloc)(struct aws_allocator *allocator, void *oldptr, size_t oldsize, size_t newsize); /* Optional method; if not supported, this pointer must be NULL */ void *(*mem_calloc)(struct aws_allocator *allocator, size_t num, size_t size); void *impl; _(invariant mem_acquire->\valid) _(invariant mem_release->\valid) _(invariant mem_realloc == NULL || mem_release->\valid) _(invariant mem_calloc == NULL || mem_calloc->\valid) }; #define aws_mem_calloc(a,n,s) malloc(n*s) #define aws_mem_release(a,o) free(o) /* Definitions from aws/common/array_list.h */ struct aws_array_list { struct aws_allocator *alloc; size_t current_size; size_t length; size_t item_size; void *data; }; /* Definitions from aws/common/priority_queue.h */ struct aws_priority_queue_node { size_t current_index; }; /* VCC change: fnptr declaration cio function pointer declaration can't be parsed typedef int(aws_priority_queue_compare_fn)(const void *a, const void *b); replaced with */ typedef int(* aws_priority_queue_compare_fn_ptr)(const void *a, const void *b); struct aws_priority_queue { aws_priority_queue_compare_fn_ptr pred; /*< VCC change: fnptr */ _(inline) struct aws_array_list container; _(inline) struct aws_array_list backpointers; _(invariant pred->\valid) }; /* Definitions from aws/common/task_scheduler.h */ struct aws_task; typedef enum aws_task_status { AWS_TASK_STATUS_RUN_READY, AWS_TASK_STATUS_CANCELED, } aws_task_status; /* VCC change: fnptr declaration */ typedef void(* aws_task_fn_ptr)(struct aws_task *task, void *arg, enum aws_task_status) #ifdef UNSUB_TASK_FN_PTR _(requires \malloc_root((struct epoll_event_data *)arg)) _(writes \extent((struct epoll_event_data *)arg)) #elif defined(STOP_TASK_FN_PTR) _(updates epoll_loop_of(event_loop_of(arg))::status) _(updates &epoll_loop_of(event_loop_of(arg))->stop_task_ptr) _(requires \thread_local(event_loop_of(arg))) #endif ; struct aws_task { aws_task_fn_ptr fn; /*< VCC change: fnptr */ void *arg; uint64_t timestamp; struct aws_linked_list_node node; struct aws_priority_queue_node priority_queue_node; const char *type_tag; size_t reserved; _(invariant \mine(&node)) _(invariant \mine(&priority_queue_node)) }; void aws_task_init(struct aws_task *task, aws_task_fn_ptr aws_task_fn, void *arg, const char *type_tag) _(requires \thread_local(task)) _(writes task) _(ensures \wrapped(task)) _(ensures task->fn == aws_task_fn) _(ensures task->arg == arg) _(ensures task->type_tag == type_tag) ; struct aws_task_scheduler { struct aws_allocator *alloc; struct aws_priority_queue timed_queue; /* Tasks scheduled to run at specific times */ struct aws_linked_list timed_list; /* If timed_queue runs out of memory, further timed tests are stored here */ struct aws_linked_list asap_list; /* Tasks scheduled to run as soon as possible */ _(invariant \mine(&timed_queue)) _(invariant \mine(&timed_list)) _(invariant \mine(&asap_list)) }; int aws_task_scheduler_init(struct aws_task_scheduler *scheduler, struct aws_allocator *allocator) _(writes \extent(&scheduler->timed_list)) _(writes \extent(&scheduler->asap_list)) _(ensures \extent_mutable(&scheduler->timed_list)) _(ensures \extent_mutable(&scheduler->asap_list)) _(ensures \result == AWS_OP_SUCCESS <==> scheduler->timed_queue.pred.\valid && (scheduler->timed_list.head.next == &scheduler->timed_list.tail && scheduler->timed_list.length == 0) && (scheduler->asap_list.head.next == &scheduler->asap_list.tail && scheduler->asap_list.length == 0) ) ; void aws_task_scheduler_schedule_now(struct aws_task_scheduler *scheduler, struct aws_task *task) _(updates scheduler) ; void aws_task_scheduler_schedule_future(struct aws_task_scheduler *scheduler, struct aws_task *task, uint64_t time_to_run) _(updates scheduler) ; void aws_task_scheduler_run_all(struct aws_task_scheduler *scheduler, uint64_t current_time) _(updates scheduler) ; void aws_task_scheduler_clean_up(struct aws_task_scheduler *scheduler) _(updates scheduler) ; void aws_task_scheduler_cancel_task(struct aws_task_scheduler *scheduler, struct aws_task *task) _(updates scheduler) ; bool aws_task_scheduler_has_tasks(const struct aws_task_scheduler *scheduler, uint64_t *next_task_time); /* Definitions from aws/common/atomics.h */ struct aws_atomic_var { void *value; }; /* VCC change: remove volatile annotation */ void aws_atomic_init_int(/*volatile*/ struct aws_atomic_var *var, size_t n) _(writes &(var->value)) ; void aws_atomic_init_ptr(/*volatile*/ struct aws_atomic_var *var, void *p) _(writes &(var->value)) ; void *aws_atomic_load_ptr(/*volatile*/ struct aws_atomic_var *var) ; uint64_t aws_atomic_load_int(/*volatile const*/ struct aws_atomic_var *var) ; void aws_atomic_store_int(/*volatile*/ struct aws_atomic_var *var, size_t n) _(writes &(var->value)) ; bool aws_atomic_compare_exchange_ptr(/*volatile*/ struct aws_atomic_var *var, void **expected, void *desired) _(writes &(var->value)) ; void aws_atomic_store_ptr(/*volatile*/ struct aws_atomic_var *var, void *val) _(writes &(var->value)) ; /* Fake-up pthread.h */ typedef uint64_t pthread_t; /* Definitions from aws/common/thread.h */ enum aws_thread_detach_state { AWS_THREAD_NOT_CREATED = 1, AWS_THREAD_JOINABLE, AWS_THREAD_JOIN_COMPLETED, }; typedef pthread_t aws_thread_id_t; struct aws_thread { struct aws_allocator *allocator; enum aws_thread_detach_state detach_state; aws_thread_id_t thread_id; }; struct aws_thread_options { size_t stack_size; int32_t cpu_id; }; _(pure) aws_thread_id_t aws_thread_current_thread_id(void) _(ensures \result == \addr(\me)) ; int aws_thread_init(struct aws_thread *thread, struct aws_allocator *allocator) _(requires allocator->\valid) _(writes \span(thread)) _(ensures \extent_mutable(thread)) ; void aws_thread_clean_up(struct aws_thread *thread) _(writes \span(thread)) _(ensures \extent_mutable(thread)) ; _(pure) bool aws_thread_thread_id_equal(aws_thread_id_t t1, aws_thread_id_t t2) _(ensures \result == (t1 == t2)) ; /* Pure from point-of-view of the event loop since the following functions lock and mutate private aws-c-common state */ _(pure) void aws_thread_increment_unjoined_count(); _(pure) void aws_thread_decrement_unjoined_count(); /* Definitions from aws/io/io.h */ enum aws_io_event_type { AWS_IO_EVENT_TYPE_READABLE = 1, AWS_IO_EVENT_TYPE_WRITABLE = 2, AWS_IO_EVENT_TYPE_REMOTE_HANG_UP = 4, AWS_IO_EVENT_TYPE_CLOSED = 8, AWS_IO_EVENT_TYPE_ERROR = 16, }; struct aws_io_handle { _(inline) struct { int fd; void *handle; } data; void *additional_data; _(invariant valid_fd(data.fd)) }; /* VCC change: fnptr declaration */ typedef int(* aws_io_clock_fn_ptr)(uint64_t *timestamp) _(writes timestamp) ; /* Definitions from aws/io/event_loop.h */ struct aws_event_loop_vtable; struct aws_event_loop_options { aws_io_clock_fn_ptr clock; /*< VCC change: fnptr */ struct aws_thread_options *thread_options; _(invariant clock->\valid) _(invariant thread_options != NULL <==> \mine(thread_options)) }; _(claimable) struct aws_event_loop { struct aws_event_loop_vtable *vtable; struct aws_allocator *alloc; aws_io_clock_fn_ptr clock; /*< VCC change: fnptr */ /* struct aws_hash_table local_data; */ /*< VCC change: not modeled */ void *impl_data; /*TODO: allocator is shared and closed*/ _(invariant clock->\valid) _(invariant \mine((struct epoll_loop *)impl_data)) }; /* VCC change: fnptr declaration */ typedef void(* aws_event_loop_on_event_fn_ptr)( struct aws_event_loop *event_loop, struct aws_io_handle *handle, int events, void *user_data _(ghost \claim(c))) _(requires \wrapped(c) && \claims(c, event_loop->\closed)) _(requires \nested(handle)) _(writes &epoll_loop_of(event_loop)->should_process_task_pre_queue) ; /* Definitions from aws/common/linked_list.h */ struct aws_linked_list_node { struct aws_linked_list_node *next; struct aws_linked_list_node *prev; }; struct aws_linked_list { struct aws_linked_list_node head; struct aws_linked_list_node tail; _(ghost \natural length) _(invariant \mine(&head)) _(invariant \mine(&tail)) _(invariant (0 == length) <==> (head.next == &tail)) }; void aws_linked_list_init(struct aws_linked_list *list) _(requires \thread_local(list)) _(writes \extent(list)) _(ensures \extent_mutable(list)) _(ensures list->head.next == &list->tail) _(ensures list->tail.next == NULL) _(ensures list->tail.prev == &list->head) _(ensures list->head.prev == NULL) _(ensures list->length == 0) ; _(pure) bool aws_linked_list_empty(const struct aws_linked_list *list) _(requires \wrapped(list)) _(reads &list->length) _(ensures \result == (list->length == 0)) _(decreases 0) { return list->head.next == &list->tail; } /* Specialized for linked lists containing tasks */ struct aws_linked_list_node *aws_linked_list_pop_front(struct aws_linked_list *list _(out struct aws_task * task)) /* general: */ _(maintains \wrapped(list)) _(requires 0 < list->length) _(ensures (\old(list->length) - 1) == list->length) _(writes list) _(decreases 0) /* specialized: */ _(ensures task == AWS_CONTAINER_OF(\result, struct aws_task, node)) _(ensures \thread_local(task)) _(ensures task->fn->\valid) ; /* We omit `task == AWS_CONTAINER_OF(node, struct aws_task, node)` because VCC's memory model can't prove this when this occurs in `s_schedule_task_common` */ void aws_linked_list_push_back(struct aws_linked_list *list, struct aws_linked_list_node *node _(ghost struct aws_task *task)) /* general: */ _(updates list) /* specialized: */ _(requires task->fn->\valid) ; void aws_linked_list_swap_contents(struct aws_linked_list *a, struct aws_linked_list *b) _(updates a) _(updates b) ; /* Definitions from source/linux/epoll_event_loop.c */ struct epoll_loop { _(group scheduler) _(:scheduler) struct aws_task_scheduler scheduler; struct aws_thread thread_created_on; struct aws_thread_options thread_options; aws_thread_id_t thread_joined_to; struct aws_atomic_var running_thread_id; _(group read_handle) _(:read_handle) struct aws_io_handle read_task_handle; struct aws_io_handle write_task_handle; struct aws_mutex task_pre_queue_mutex; _(group queue) _(:queue) struct aws_linked_list task_pre_queue; _(group stop_task) _(:stop_task) struct aws_task stop_task; _(:stop_task) struct aws_atomic_var stop_task_ptr; int epoll_fd; _(group status) _(:status) bool should_process_task_pre_queue; _(:status) bool should_continue; _(invariant valid_fd(epoll_fd)) /* scheduler */ _(invariant \mine(&thread_created_on)) _(invariant \mine(&running_thread_id)) /* read_handle */ _(invariant \mine(&write_task_handle)) _(invariant \mine(&task_pre_queue_mutex)) /* task_pre_queue */ /* stop_task */ /* stop_task_ptr */ _(invariant task_pre_queue_mutex.protected_obj == &task_pre_queue) _(invariant task_pre_queue_mutex.\claim_count == 1) }; struct epoll_event_data { struct aws_allocator *alloc; struct aws_io_handle *handle; aws_event_loop_on_event_fn_ptr on_event; /*< VCC change: fnptr */ void *user_data; struct aws_task cleanup_task; bool is_subscribed; /* false when handle is unsubscribed, but this struct hasn't beeen cleaned up yet */ _(invariant \mine(handle)) _(invariant ((struct epoll_event_data *)handle->additional_data) == \this) _(invariant on_event->\valid) _(invariant \mine(&cleanup_task)) }; /* VCC mutex contract */ /* VCC change: replace mutex implementation with VCC contract */ #define AWS_MUTEX_INIT { .locked = 0 } _(claimable) _(volatile_owns) struct aws_mutex { volatile int locked; /* 0=>unlocked / 1=>locked */ _(ghost \object protected_obj) _(invariant locked == 0 ==> \mine(protected_obj)) }; void aws_mutex_lock(struct aws_mutex *l _(ghost \claim c)) _(always c, l->\closed) _(ensures \wrapped(l->protected_obj) && \fresh(l->protected_obj)) _(ensures l->locked == 1) ; void aws_mutex_unlock(struct aws_mutex *l _(ghost \claim c)) _(always c, l->\closed) _(requires l->protected_obj != c) _(writes l->protected_obj) _(requires \wrapped(l->protected_obj)) _(ensures l->locked == 0) ; /* Useful definitions */ _(def bool valid_fd(int fd) { return 0 <= fd; }) _(def struct aws_event_loop *event_loop_of(void *arg) { return (struct aws_event_loop *)arg; }) _(def struct epoll_loop *epoll_loop_of(struct aws_event_loop *event_loop) { return (struct epoll_loop *)event_loop->impl_data; }) /* * This predicate (which implies the object invariant properties of * epoll_event_data) means the heap looks like this: * * handle --.additional_data--> epoll_event_data --.on_event--> valid fn * ^ / \ * `--------------.handle--' '--.user_data--> (can be NULL) */ _(def \bool wf_cio_handle(struct aws_io_handle *handle) { return \nested(handle) && handle->\closed && \malloc_root((struct epoll_event_data *)handle->additional_data) && \wrapped((struct epoll_event_data *)handle->additional_data) && (handle->\owner == (struct epoll_event_data *)handle->additional_data); }) #define current_thread_owns_event_loop(event_loop) \ \addr(\me) == \addr(event_loop->\owner) #define ownership_of_epoll_loop_objects(loop) \ (\wrapped(loop::scheduler) \ && \wrapped(&loop->scheduler) \ && \wrapped(loop::read_handle) \ && \wrapped(&loop->read_task_handle) \ && \wrapped(loop::stop_task) \ && \wrapped(&loop->stop_task) \ && \wrapped(&loop->stop_task_ptr) \ && \wrapped(loop::queue) \ && \wrapped(loop::status) \ && \fresh(loop::scheduler) \ && \fresh(&loop->scheduler) \ && \fresh(loop::read_handle) \ && \fresh(&loop->read_task_handle) \ && \fresh(loop::stop_task) \ && \fresh(&loop->stop_task) \ && \fresh(&loop->stop_task_ptr) \ && \fresh(loop::queue) \ && \fresh(&loop->task_pre_queue) \ && \fresh(loop::status)) /* Specifications for epoll_loop functions */ static int s_subscribe_to_io_events( struct aws_event_loop *event_loop, struct aws_io_handle *handle, int events, aws_event_loop_on_event_fn_ptr on_event, /*< VCC change: fnptr */ void *user_data _(ghost \claim(c_event_loop)) ) _(always c_event_loop, event_loop->\closed) /*< the event_loop won't be changed or destroyed underneath us */ _(requires \wrapped(handle)) /*< wrapped means closed (the handle is valid) and owned by the current thread */ _(requires on_event->\valid) /*< valid function pointer */ /* user_data may be NULL */ _(ensures \result == AWS_OP_SUCCESS <==> wf_cio_handle(handle)) _(ensures \result == AWS_OP_SUCCESS ==> \fresh((struct epoll_event_data *)handle->additional_data)) _(writes handle) ; static int s_unsubscribe_from_io_events(struct aws_event_loop *event_loop, struct aws_io_handle *handle _(ghost \claim(c_event_loop)) _(ghost \claim(c_mutex)) ) _(maintains \wrapped(event_loop)) /*< current thread owns event loop (i.e., current thread is the event loop thread) */ _(always c_event_loop, event_loop->\closed) /*< required for schedule call */ _(requires \wrapped(c_mutex) && \claims_object(c_mutex, &(epoll_loop_of(event_loop)->task_pre_queue_mutex))) _(requires wf_cio_handle(handle)) _(ensures \result == AWS_OP_SUCCESS <==> !\nested(handle)) _(ensures \result != AWS_OP_SUCCESS <==> wf_cio_handle(handle)) _(writes ((struct epoll_event_data *)handle->additional_data)) _(updates &epoll_loop_of(event_loop)->scheduler) ; static void s_schedule_task_now(struct aws_event_loop *event_loop, struct aws_task *task _(ghost \claim(c_event_loop)) _(ghost \claim(c_mutex)) ) _(always c_event_loop, event_loop->\closed) _(requires \wrapped(c_mutex) && \claims_object(c_mutex, &(epoll_loop_of(event_loop)->task_pre_queue_mutex))) _(requires \thread_local(task)) _(requires \wrapped(task)) _(requires task->fn->\valid) _(writes task) _(updates &epoll_loop_of(event_loop)->scheduler) ; static void s_schedule_task_future(struct aws_event_loop *event_loop, struct aws_task *task, uint64_t run_at_nanos _(ghost \claim(c_event_loop)) _(ghost \claim(c_mutex)) ) _(always c_event_loop, event_loop->\closed) _(requires \wrapped(c_mutex) && \claims_object(c_mutex, &(epoll_loop_of(event_loop)->task_pre_queue_mutex))) _(requires \thread_local(task)) _(requires \wrapped(task)) _(requires task->fn->\valid) _(writes task) _(updates &epoll_loop_of(event_loop)->scheduler) ; static bool s_is_on_callers_thread(struct aws_event_loop *event_loop _(ghost \claim(c_event_loop)) ) _(always c_event_loop, event_loop->\closed) _(ensures \result ==> current_thread_owns_event_loop(event_loop)) ; static void s_cancel_task(struct aws_event_loop *event_loop, struct aws_task *task) _(requires \wrapped(event_loop)) _(updates &epoll_loop_of(event_loop)->scheduler) ; static void s_process_task_pre_queue(struct aws_event_loop *event_loop _(ghost \claim(c_event_loop)) _(ghost \claim(c_mutex))) _(always c_event_loop, event_loop->\closed) _(requires \wrapped(c_mutex) && \claims_object(c_mutex, &(epoll_loop_of(event_loop)->task_pre_queue_mutex))) _(requires \thread_local(&epoll_loop_of(event_loop)->read_task_handle)) _(requires (&epoll_loop_of(event_loop)->read_task_handle)->\closed) _(writes &epoll_loop_of(event_loop)->should_process_task_pre_queue) _(updates &epoll_loop_of(event_loop)->scheduler) ; static void s_stop_task(struct aws_task *task, void *args, enum aws_task_status status) _(requires \thread_local(event_loop_of(args))) _(updates epoll_loop_of(event_loop_of(args))::status) _(updates &epoll_loop_of(event_loop_of(args))->stop_task_ptr) ; static int s_stop(struct aws_event_loop *event_loop _(ghost \claim(c_event_loop)) _(ghost \claim(c_mutex)) ) /* wrapped0 means the claim_count of c_event_loop is 0 (i.e., notionally, all of the claims handed to client threads have been destroyed), so client threads may no longer call any further event loop API calls. */ _(maintains \wrapped0(c_event_loop) && \claims(c_event_loop, event_loop->\closed)) _(maintains \wrapped0(c_mutex) && \claims_object(c_mutex, &(epoll_loop_of(event_loop)->task_pre_queue_mutex))) _(requires \wrapped(&epoll_loop_of(event_loop)->stop_task)) _(writes (&epoll_loop_of(event_loop)->stop_task)) _(updates epoll_loop_of(event_loop)::status) _(updates &epoll_loop_of(event_loop)->scheduler) _(updates &epoll_loop_of(event_loop)->stop_task_ptr) ; static int s_wait_for_stop_completion(struct aws_event_loop *event_loop _(ghost \claim(c_event_loop)) _(ghost \claim(c_mutex)) ) _(requires c_event_loop != c_mutex) _(requires \wrapped0(c_event_loop) && \claims(c_event_loop, event_loop->\closed) && \claims_object(c_event_loop, event_loop)) _(writes c_event_loop, event_loop) _(ensures !c_event_loop->\closed) _(ensures \wrapped0(event_loop) && \nested(epoll_loop_of(event_loop))) _(ensures ownership_of_epoll_loop_objects(epoll_loop_of(event_loop))) _(ensures epoll_loop_of(event_loop)->task_pre_queue_mutex.locked == 0) _(maintains \malloc_root(epoll_loop_of(event_loop))) _(maintains \wrapped0(c_mutex) && \claims_object(c_mutex, &epoll_loop_of(event_loop)->task_pre_queue_mutex)) ; static int s_run(struct aws_event_loop *event_loop _(ghost \claim(c_mutex))) _(requires \wrapped0(event_loop) && \wrapped(c_mutex) && \claims_object(c_mutex, &(epoll_loop_of(event_loop)->task_pre_queue_mutex))) _(writes &epoll_loop_of(event_loop)->should_continue) ; static void s_on_tasks_to_schedule( struct aws_event_loop *event_loop, struct aws_io_handle *handle, int events, void *user_data _(ghost \claim(c_event_loop)) ) _(always c_event_loop, event_loop->\closed) _(requires \nested(handle)) _(writes &epoll_loop_of(event_loop)->should_process_task_pre_queue) ; static void s_main_loop(void *args _(ghost \claim(c_mutex))) _(requires \wrapped(event_loop_of(args))) _(requires \not_shared(event_loop_of(args))) _(requires \wrapped(c_mutex) && \claims_object(c_mutex, &(epoll_loop_of(event_loop_of(args))->task_pre_queue_mutex))) _(requires \wrapped(&epoll_loop_of(event_loop_of(args))->read_task_handle)) _(updates &epoll_loop_of(event_loop_of(args))->scheduler) _(updates &epoll_loop_of(event_loop_of(args))->running_thread_id) _(writes event_loop_of(args)) _(writes &epoll_loop_of(event_loop_of(args))->read_task_handle) _(writes (struct epoll_event_data *)(epoll_loop_of(event_loop_of(args))->read_task_handle.additional_data)) _(writes &epoll_loop_of(event_loop_of(args))->should_process_task_pre_queue) ; struct aws_event_loop *aws_event_loop_new_default( struct aws_allocator *alloc, aws_io_clock_fn_ptr clock /*< VCC change: fnptr */ _(out \claim(c_mutex)) ) _(requires \wrapped(alloc)) _(requires clock->\valid) _(ensures \result == NULL || (\wrapped0(\result) && \fresh(\result) && \malloc_root(\result) && \fresh(epoll_loop_of(\result)) && \malloc_root(epoll_loop_of(\result)) && ownership_of_epoll_loop_objects(epoll_loop_of(\result)) && \fresh(c_mutex) && \wrapped0(c_mutex) && \claims_object(c_mutex, &(epoll_loop_of(\result)->task_pre_queue_mutex)))) ; struct aws_event_loop *aws_event_loop_new_default_with_options( struct aws_allocator *alloc, const struct aws_event_loop_options *options _(out \claim(c_mutex)) ) _(requires \wrapped(alloc)) _(maintains \wrapped(options)) _(ensures \result == NULL || (\wrapped0(\result) && \fresh(\result) && \malloc_root(\result) && \fresh(epoll_loop_of(\result)) && \malloc_root(epoll_loop_of(\result)) && ownership_of_epoll_loop_objects(epoll_loop_of(\result)) && \fresh(c_mutex) && \wrapped0(c_mutex) && \claims_object(c_mutex, &(epoll_loop_of(\result)->task_pre_queue_mutex)))) ; static void s_destroy(struct aws_event_loop *event_loop _(ghost \claim(c_event_loop)) _(ghost \claim(c_mutex)) ) _(requires \malloc_root(event_loop)) _(requires \malloc_root(epoll_loop_of(event_loop))) _(requires c_event_loop != c_mutex) _(requires \wrapped0(c_event_loop) && \claims_object(c_event_loop, event_loop)) _(requires \wrapped0(c_mutex) && \claims_object(c_mutex, &epoll_loop_of(event_loop)->task_pre_queue_mutex)) _(requires \wrapped(&epoll_loop_of(event_loop)->scheduler)) _(requires \wrapped(epoll_loop_of(event_loop)::status)) _(requires \wrapped(&epoll_loop_of(event_loop)->stop_task)) _(writes &epoll_loop_of(event_loop)->scheduler) _(writes epoll_loop_of(event_loop)::status) _(writes &epoll_loop_of(event_loop)->stop_task) _(writes event_loop, c_event_loop, c_mutex) _(updates &epoll_loop_of(event_loop)->stop_task_ptr) ; /* clang-format on */ #endif PREAMBLE_H aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/vcc/process_task_pre_queue.c000066400000000000000000000066051456575232400265010ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ /* clang-format off */ #include "preamble.h" /* `read` not in preamble because we specialize for reading-into a uint64_t var (as used by `s_process_task_pre_queue`) */ ssize_t read(int fd, void *buf, uint64_t count) _(requires valid_fd(fd)) _(requires count == sizeof(uint64_t)) _(writes (uint64_t *)buf) ; static void s_process_task_pre_queue(struct aws_event_loop *event_loop _(ghost \claim(c_event_loop)) _(ghost \claim(c_mutex))) { _(assert \always_by_claim(c_event_loop, event_loop)) struct epoll_loop *epoll_loop = event_loop->impl_data; _(assert \inv(epoll_loop)) if (!epoll_loop->should_process_task_pre_queue) { return; } AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: processing cross-thread tasks", (void *)event_loop); epoll_loop->should_process_task_pre_queue = false; struct aws_linked_list task_pre_queue; aws_linked_list_init(&task_pre_queue); _(wrap(&task_pre_queue.head)) _(wrap(&task_pre_queue.tail)) _(wrap(&task_pre_queue)) uint64_t count_ignore = 0; aws_mutex_lock(&epoll_loop->task_pre_queue_mutex _(ghost c_mutex)); /* several tasks could theoretically have been written (though this should never happen), make sure we drain the * eventfd/pipe. */ while (read(epoll_loop->read_task_handle.data.fd, &count_ignore, sizeof(count_ignore)) > -1) _(invariant \thread_local(&epoll_loop->read_task_handle)) _(invariant (&epoll_loop->read_task_handle)->\closed) _(invariant \inv(&epoll_loop->read_task_handle)) _(invariant \wrapped(&epoll_loop->scheduler)) _(writes &count_ignore) { } aws_linked_list_swap_contents(&epoll_loop->task_pre_queue, &task_pre_queue); aws_mutex_unlock(&epoll_loop->task_pre_queue_mutex _(ghost c_mutex)); while (!aws_linked_list_empty(&task_pre_queue)) _(invariant 0 <= task_pre_queue.length) _(invariant \wrapped(&task_pre_queue)) _(invariant \wrapped(&epoll_loop->scheduler)) _(writes &task_pre_queue, &epoll_loop->scheduler) { _(ghost struct aws_task *t) struct aws_linked_list_node *node = aws_linked_list_pop_front(&task_pre_queue _(out t)); struct aws_task *task = AWS_CONTAINER_OF(node, struct aws_task, node); AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p: task %p pulled to event-loop, scheduling now.", (void *)event_loop, (void *)task); /* Timestamp 0 is used to denote "now" tasks */ if (task->timestamp == 0) { aws_task_scheduler_schedule_now(&epoll_loop->scheduler, task); } else { aws_task_scheduler_schedule_future(&epoll_loop->scheduler, task, task->timestamp); } } _(unwrap(&task_pre_queue)) _(unwrap(&task_pre_queue.head)) _(unwrap(&task_pre_queue.tail)) } /* clang-format on */ aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/vcc/schedule.c000066400000000000000000000113211456575232400235120ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ /* clang-format off */ #include "preamble.h" /* not in preamble because we specialize for reading-from a uint64_t var (as used by `s_schedule_task_common`) */ ssize_t write(int fd, void *bytes, size_t nbytes) _(requires valid_fd(fd)) _(requires nbytes == sizeof(uint64_t)) _(requires \thread_local((uint64_t *)bytes)) ; static void s_schedule_task_common(struct aws_event_loop *event_loop, struct aws_task *task, uint64_t run_at_nanos _(ghost \claim(c_event_loop)) _(ghost \claim(c_mutex)) ) _(always c_event_loop, event_loop->\closed) _(requires \wrapped(c_mutex) && \claims_object(c_mutex, &(epoll_loop_of(event_loop)->task_pre_queue_mutex))) _(requires \thread_local(task)) _(requires \wrapped(task)) _(requires task->fn->\valid) _(writes task) _(updates &epoll_loop_of(event_loop)->scheduler) { _(assert \always_by_claim(c_event_loop, event_loop)) _(assert \active_claim(c_event_loop)) struct epoll_loop *epoll_loop = event_loop->impl_data; _(assert \inv(epoll_loop)) _(assert epoll_loop->\closed) _(assert epoll_loop->write_task_handle.\closed) _(assert \inv(&epoll_loop->write_task_handle)) _(assert valid_fd(epoll_loop->write_task_handle.data.fd)) /* if event loop and the caller are the same thread, just schedule and be done with it. */ if (s_is_on_callers_thread(event_loop _(ghost c_event_loop))) { AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p: scheduling task %p in-thread for timestamp %llu", (void *)event_loop, (void *)task, (unsigned long long)run_at_nanos); if (run_at_nanos == 0) { /* zero denotes "now" task */ aws_task_scheduler_schedule_now(&epoll_loop->scheduler, task); } else { aws_task_scheduler_schedule_future(&epoll_loop->scheduler, task, run_at_nanos); } return; } AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p: Scheduling task %p cross-thread for timestamp %llu", (void *)event_loop, (void *)task, (unsigned long long)run_at_nanos); _(unwrap task) task->timestamp = run_at_nanos; _(wrap task) aws_mutex_lock(&epoll_loop->task_pre_queue_mutex _(ghost c_mutex)); _(assert epoll_loop->task_pre_queue.\owner == \me) uint64_t counter = 1; bool is_first_task = aws_linked_list_empty(&epoll_loop->task_pre_queue); aws_linked_list_push_back(&epoll_loop->task_pre_queue, &task->node _(ghost task)); /* if the list was not empty, we already have a pending read on the pipe/eventfd, no need to write again. */ if (is_first_task) { AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: Waking up event-loop thread", (void *)event_loop); /* If the write fails because the buffer is full, we don't actually care because that means there's a pending * read on the pipe/eventfd and thus the event loop will end up checking to see if something has been queued.*/ _(assert \active_claim(c_event_loop)) /*< implies write_task_handle.data.fd is a valid fd ==> event_loop->\closed ==> epoll_loop->\closed ==> \inv(epoll_loop) ==> epoll_loop->write_task_handle.\closed ==> \inv(&epoll_loop->write_task_handle) */ ssize_t do_not_care = write(_(by_claim c_event_loop) epoll_loop->write_task_handle.data.fd, (void *)&counter, sizeof(counter)); (void)do_not_care; } aws_mutex_unlock(&epoll_loop->task_pre_queue_mutex _(ghost c_mutex)); } static void s_schedule_task_now(struct aws_event_loop *event_loop, struct aws_task *task _(ghost \claim(c_event_loop)) _(ghost \claim(c_mutex)) ) { s_schedule_task_common(event_loop, task, 0 /* zero denotes "now" task */ _(ghost c_event_loop) _(ghost c_mutex)); } static void s_schedule_task_future(struct aws_event_loop *event_loop, struct aws_task *task, uint64_t run_at_nanos _(ghost \claim(c_event_loop)) _(ghost \claim(c_mutex)) ) { s_schedule_task_common(event_loop, task, run_at_nanos _(ghost c_event_loop) _(ghost c_mutex)); } /* clang-format on */ aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/vcc/subscribe.c000066400000000000000000000056461456575232400237140ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ /* clang-format off */ #include "preamble.h" static int s_subscribe_to_io_events( struct aws_event_loop *event_loop, struct aws_io_handle *handle, int events, aws_event_loop_on_event_fn_ptr on_event, /*< VCC change: fnptr */ void *user_data _(ghost \claim(c_event_loop)) ) { _(assert \always_by_claim(c_event_loop, event_loop)) AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: subscribing to events on fd %d", (void *)event_loop, handle->data.fd); struct epoll_event_data *epoll_event_data = aws_mem_calloc(event_loop->alloc, 1, sizeof(struct epoll_event_data)); _(unwrap handle) handle->additional_data = epoll_event_data; if (!epoll_event_data) { return AWS_OP_ERR; } struct epoll_loop *epoll_loop = (struct epoll_loop *)event_loop->impl_data; epoll_event_data->alloc = event_loop->alloc; epoll_event_data->user_data = user_data; epoll_event_data->handle = handle; epoll_event_data->on_event = on_event; epoll_event_data->is_subscribed = true; /*everyone is always registered for edge-triggered, hang up, remote hang up, errors. */ uint32_t event_mask = EPOLLET | EPOLLHUP | EPOLLRDHUP | EPOLLERR; if (events & AWS_IO_EVENT_TYPE_READABLE) { event_mask |= EPOLLIN; } if (events & AWS_IO_EVENT_TYPE_WRITABLE) { event_mask |= EPOLLOUT; } /* this guy is copied by epoll_ctl */ /* VCC change: rewrite struct initialization */ #if 0 struct epoll_event epoll_event = { .data = {.ptr = epoll_event_data}, .events = event_mask, }; #else struct epoll_event epoll_event; epoll_event.data.ptr = epoll_event_data; epoll_event.events = event_mask; #endif if (epoll_ctl(_(by_claim c_event_loop) epoll_loop->epoll_fd, EPOLL_CTL_ADD, handle->data.fd, &epoll_event)) { AWS_LOGF_ERROR( AWS_LS_IO_EVENT_LOOP, "id=%p: failed to subscribe to events on fd %d", (void *)event_loop, handle->data.fd); handle->additional_data = NULL; aws_mem_release(event_loop->alloc, epoll_event_data); return aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); } _(wrap(handle)) _(wrap(&epoll_event_data->cleanup_task.node)) _(wrap(&epoll_event_data->cleanup_task.priority_queue_node)) _(wrap(&epoll_event_data->cleanup_task)) _(wrap(epoll_event_data)) return AWS_OP_SUCCESS; } /* clang-format on */ aws-crt-python-0.20.4+dfsg/crt/aws-c-io/tests/vcc/unsubscribe.c000066400000000000000000000053031456575232400242450ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ /* clang-format off */ #define UNSUB_TASK_FN_PTR #include "preamble.h" void s_unsubscribe_cleanup_task(struct aws_task *task, void *arg, enum aws_task_status status) _(requires \malloc_root((struct epoll_event_data *)arg)) _(writes \extent((struct epoll_event_data *)arg)) { (void)task; (void)status; struct epoll_event_data *event_data = (struct epoll_event_data *)arg; aws_mem_release(event_data->alloc, (void *)event_data); } static int s_unsubscribe_from_io_events(struct aws_event_loop *event_loop, struct aws_io_handle *handle _(ghost \claim(c_event_loop)) _(ghost \claim(c_mutex)) ) { AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p: un-subscribing from events on fd %d", (void *)event_loop, handle->data.fd); struct epoll_loop *epoll_loop = event_loop->impl_data; AWS_ASSERT(handle->additional_data); struct epoll_event_data *additional_handle_data = handle->additional_data; _(assert \wrapped(additional_handle_data)) _(assert \inv(additional_handle_data)) struct epoll_event dummy_event; if (AWS_UNLIKELY(epoll_ctl(epoll_loop->epoll_fd, EPOLL_CTL_DEL, handle->data.fd, &dummy_event /*ignored*/))) { AWS_LOGF_ERROR( AWS_LS_IO_EVENT_LOOP, "id=%p: failed to un-subscribe from events on fd %d", (void *)event_loop, handle->data.fd); return aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); } /* We can't clean up yet, because we have schedule tasks and more events to process, * mark it as unsubscribed and schedule a cleanup task. */ _(unwrap(additional_handle_data)) _(assert handle->\owner != additional_handle_data) additional_handle_data->is_subscribed = false; aws_task_init( &additional_handle_data->cleanup_task, s_unsubscribe_cleanup_task, additional_handle_data, "epoll_event_loop_unsubscribe_cleanup"); s_schedule_task_now(event_loop, &additional_handle_data->cleanup_task _(ghost c_event_loop) _(ghost c_mutex)); _(unwrap(handle)) handle->additional_data = NULL; _(assert handle->\owner == \me) return AWS_OP_SUCCESS; } /* clang-format on */ aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/000077500000000000000000000000001456575232400202155ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/.clang-format000066400000000000000000000031611456575232400225710ustar00rootroot00000000000000--- Language: Cpp # BasedOnStyle: Mozilla AlignAfterOpenBracket: AlwaysBreak AlignConsecutiveAssignments: false AlignConsecutiveDeclarations: false AlignEscapedNewlines: Right AlignOperands: true AlignTrailingComments: true AllowAllParametersOfDeclarationOnNextLine: false AllowShortBlocksOnASingleLine: false AllowShortCaseLabelsOnASingleLine: false AllowShortFunctionsOnASingleLine: Inline AllowShortIfStatementsOnASingleLine: false AllowShortLoopsOnASingleLine: false AlwaysBreakAfterReturnType: None AlwaysBreakBeforeMultilineStrings: false BinPackArguments: false BinPackParameters: false BreakBeforeBinaryOperators: None BreakBeforeBraces: Attach BreakBeforeTernaryOperators: true BreakStringLiterals: true ColumnLimit: 120 ContinuationIndentWidth: 4 DerivePointerAlignment: false IncludeBlocks: Preserve IndentCaseLabels: true IndentPPDirectives: AfterHash IndentWidth: 4 IndentWrappedFunctionNames: true KeepEmptyLinesAtTheStartOfBlocks: true MacroBlockBegin: '' MacroBlockEnd: '' MaxEmptyLinesToKeep: 1 PenaltyBreakAssignment: 2 PenaltyBreakBeforeFirstCallParameter: 19 PenaltyBreakComment: 300 PenaltyBreakFirstLessLess: 120 PenaltyBreakString: 1000 PenaltyExcessCharacter: 1000000 PenaltyReturnTypeOnItsOwnLine: 100000 PointerAlignment: Right ReflowComments: true SortIncludes: true SpaceAfterCStyleCast: false SpaceBeforeAssignmentOperators: true SpaceBeforeParens: ControlStatements SpaceInEmptyParentheses: false SpacesInContainerLiterals: true SpacesInCStyleCastParentheses: false SpacesInParentheses: false SpacesInSquareBrackets: false Standard: Cpp11 TabWidth: 4 UseTab: Never ... aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/.clang-tidy000066400000000000000000000012701456575232400222510ustar00rootroot00000000000000--- Checks: 'clang-diagnostic-*,clang-analyzer-*,readability-*,modernize-*,bugprone-*,misc-*,google-runtime-int,llvm-header-guard,fuchsia-restrict-system-includes,-clang-analyzer-valist.Uninitialized,-clang-analyzer-security.insecureAPI.rand,-clang-analyzer-alpha.*,-readability-magic-numbers' WarningsAsErrors: '*' HeaderFilterRegex: '.*\.[h|inl]$' FormatStyle: 'file' CheckOptions: - key: readability-braces-around-statements.ShortStatementLines value: '1' - key: google-runtime-int.TypeSufix value: '_t' - key: fuchsia-restrict-system-includes.Includes value: '*,-stdint.h,-stdbool.h,-assert.h' ... aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/.github/000077500000000000000000000000001456575232400215555ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/.github/ISSUE_TEMPLATE/000077500000000000000000000000001456575232400237405ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/.github/ISSUE_TEMPLATE/bug-report.yml000066400000000000000000000045171456575232400265600ustar00rootroot00000000000000--- name: "🐛 Bug Report" description: Report a bug title: "(short issue description)" labels: [bug, needs-triage] assignees: [] body: - type: textarea id: description attributes: label: Describe the bug description: What is the problem? A clear and concise description of the bug. validations: required: true - type: textarea id: expected attributes: label: Expected Behavior description: | What did you expect to happen? validations: required: true - type: textarea id: current attributes: label: Current Behavior description: | What actually happened? Please include full errors, uncaught exceptions, stack traces, and relevant logs. If service responses are relevant, please include wire logs. validations: required: true - type: textarea id: reproduction attributes: label: Reproduction Steps description: | Provide a self-contained, concise snippet of code that can be used to reproduce the issue. For more complex issues provide a repo with the smallest sample that reproduces the bug. Avoid including business logic or unrelated code, it makes diagnosis more difficult. The code sample should be an SSCCE. See http://sscce.org/ for details. In short, please provide a code sample that we can copy/paste, run and reproduce. validations: required: true - type: textarea id: solution attributes: label: Possible Solution description: | Suggest a fix/reason for the bug validations: required: false - type: textarea id: context attributes: label: Additional Information/Context description: | Anything else that might be relevant for troubleshooting this bug. Providing context helps us come up with a solution that is most useful in the real world. validations: required: false - type: input id: aws-c-mqtt-version attributes: label: aws-c-mqtt version used validations: required: true - type: input id: compiler-version attributes: label: Compiler and version used validations: required: true - type: input id: operating-system attributes: label: Operating System and version validations: required: true aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/.github/ISSUE_TEMPLATE/config.yml000066400000000000000000000003271456575232400257320ustar00rootroot00000000000000blank_issues_enabled: false contact_links: - name: 💬 General Question url: https://github.com/awslabs/aws-c-mqtt/discussions/categories/q-a about: Please ask and answer questions as a discussion thread aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/.github/ISSUE_TEMPLATE/documentation.yml000066400000000000000000000011141456575232400273310ustar00rootroot00000000000000--- name: "📕 Documentation Issue" description: Report an issue in the API Reference documentation or Developer Guide title: "(short issue description)" labels: [documentation, needs-triage] assignees: [] body: - type: textarea id: description attributes: label: Describe the issue description: A clear and concise description of the issue. validations: required: true - type: textarea id: links attributes: label: Links description: | Include links to affected documentation page(s). validations: required: true aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/.github/ISSUE_TEMPLATE/feature-request.yml000066400000000000000000000026231456575232400276070ustar00rootroot00000000000000--- name: 🚀 Feature Request description: Suggest an idea for this project title: "(short issue description)" labels: [feature-request, needs-triage] assignees: [] body: - type: textarea id: description attributes: label: Describe the feature description: A clear and concise description of the feature you are proposing. validations: required: true - type: textarea id: use-case attributes: label: Use Case description: | Why do you need this feature? For example: "I'm always frustrated when..." validations: required: true - type: textarea id: solution attributes: label: Proposed Solution description: | Suggest how to implement the addition or change. Please include prototype/workaround/sketch/reference implementation. validations: required: false - type: textarea id: other attributes: label: Other Information description: | Any alternative solutions or features you considered, a more detailed explanation, stack traces, related issues, links for context, etc. validations: required: false - type: checkboxes id: ack attributes: label: Acknowledgements options: - label: I may be able to implement this feature request required: false - label: This feature might incur a breaking change required: false aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/.github/PULL_REQUEST_TEMPLATE.md000066400000000000000000000002511456575232400253540ustar00rootroot00000000000000*Issue #, if available:* *Description of changes:* By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license. aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/.github/workflows/000077500000000000000000000000001456575232400236125ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/.github/workflows/ci.yml000066400000000000000000000150111456575232400247260ustar00rootroot00000000000000name: CI on: push: branches-ignore: - 'main' env: BUILDER_VERSION: v0.9.55 BUILDER_SOURCE: releases BUILDER_HOST: https://d19elf31gohf1l.cloudfront.net PACKAGE_NAME: aws-c-mqtt LINUX_BASE_IMAGE: ubuntu-18-x64 RUN: ${{ github.run_id }}-${{ github.run_number }} AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} AWS_REGION: us-east-1 jobs: linux-compat: runs-on: ubuntu-20.04 # latest strategy: matrix: image: - manylinux1-x64 - manylinux1-x86 - manylinux2014-x64 - manylinux2014-x86 - al2-x64 - fedora-34-x64 - opensuse-leap - rhel8-x64 steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ matrix.image }} build -p ${{ env.PACKAGE_NAME }} --cmake-extra=-DASSERT_LOCK_HELD=ON linux-compiler-compat: runs-on: ubuntu-20.04 # latest strategy: matrix: compiler: - clang-3 - clang-6 - clang-8 - clang-9 - clang-10 - clang-11 - gcc-4.8 - gcc-5 - gcc-6 - gcc-7 - gcc-8 steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ env.LINUX_BASE_IMAGE }} build -p ${{ env.PACKAGE_NAME }} --compiler=${{ matrix.compiler }} --cmake-extra=-DASSERT_LOCK_HELD=ON clang-sanitizers: runs-on: ubuntu-20.04 # latest strategy: matrix: sanitizers: [",thread", ",address,undefined"] steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ env.LINUX_BASE_IMAGE }} build -p ${{ env.PACKAGE_NAME }} --compiler=clang-11 --cmake-extra=-DENABLE_SANITIZERS=ON --cmake-extra=-DASSERT_LOCK_HELD=ON --cmake-extra=-DSANITIZERS="${{ matrix.sanitizers }}" linux-shared-libs: runs-on: ubuntu-20.04 # latest steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ env.LINUX_BASE_IMAGE }} build -p ${{ env.PACKAGE_NAME }} --cmake-extra=-DBUILD_SHARED_LIBS=ON --cmake-extra=-DASSERT_LOCK_HELD=ON windows: runs-on: windows-2022 # latest steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python builder.pyz build -p ${{ env.PACKAGE_NAME }} --cmake-extra=-DASSERT_LOCK_HELD=ON windows-vc14: runs-on: windows-2019 # windows-2019 is last env with Visual Studio 2015 (v14.0) strategy: matrix: arch: [x86, x64] steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python builder.pyz build -p ${{ env.PACKAGE_NAME }} --target windows-${{ matrix.arch }} --compiler msvc-14 --cmake-extra=-DASSERT_LOCK_HELD=ON windows-shared-libs: runs-on: windows-2022 # latest steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python builder.pyz build -p ${{ env.PACKAGE_NAME }} --cmake-extra=-DBUILD_SHARED_LIBS=ON --cmake-extra=-DASSERT_LOCK_HELD=ON windows-app-verifier: runs-on: windows-2022 # latest steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python builder.pyz build -p ${{ env.PACKAGE_NAME }} run_tests=false --cmake-extra=-DBUILD_TESTING=ON - name: Run and check AppVerifier run: | python .\aws-c-mqtt\build\deps\aws-c-common\scripts\appverifier_ctest.py --build_directory .\aws-c-mqtt\build\aws-c-mqtt osx: runs-on: macos-12 # latest steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python3 -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder')" chmod a+x builder ./builder build -p ${{ env.PACKAGE_NAME }} --cmake-extra=-DASSERT_LOCK_HELD=ON # Test downstream repos. # This should not be required because we can run into a chicken and egg problem if there is a change that needs some fix in a downstream repo. downstream: runs-on: ubuntu-20.04 # latest steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ env.LINUX_BASE_IMAGE }} build downstream -p ${{ env.PACKAGE_NAME }} aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/.github/workflows/clang-format.yml000066400000000000000000000004671456575232400267160ustar00rootroot00000000000000name: Lint on: [push] jobs: clang-format: runs-on: ubuntu-20.04 # latest steps: - name: Checkout Sources uses: actions/checkout@v1 - name: clang-format lint uses: DoozyX/clang-format-lint-action@v0.3.1 with: # List of extensions to check extensions: c,h aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/.github/workflows/closed-issue-message.yml000066400000000000000000000013271456575232400303610ustar00rootroot00000000000000name: Closed Issue Message on: issues: types: [closed] jobs: auto_comment: runs-on: ubuntu-latest steps: - uses: aws-actions/closed-issue-message@v1 with: # These inputs are both required repo-token: "${{ secrets.GITHUB_TOKEN }}" message: | ### ⚠️COMMENT VISIBILITY WARNING⚠️ Comments on closed issues are hard for our team to see. If you need more assistance, please either tag a team member or open a new issue that references this one. If you wish to keep having a conversation with other community members under this issue feel free to do so. aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/.github/workflows/codecov.yml000066400000000000000000000014671456575232400257670ustar00rootroot00000000000000name: Code coverage check on: push: env: BUILDER_VERSION: v0.9.55 BUILDER_SOURCE: releases BUILDER_HOST: https://d19elf31gohf1l.cloudfront.net PACKAGE_NAME: aws-c-mqtt AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} AWS_REGION: us-east-1 jobs: codecov-linux: runs-on: ubuntu-22.04 steps: - name: Checkout Sources uses: actions/checkout@v3 - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python3 -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder')" chmod a+x builder ./builder build -p ${{ env.PACKAGE_NAME }} --compiler=gcc-9 --coverage aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/.github/workflows/handle-stale-discussions.yml000066400000000000000000000006471456575232400312510ustar00rootroot00000000000000name: HandleStaleDiscussions on: schedule: - cron: '0 */4 * * *' discussion_comment: types: [created] jobs: handle-stale-discussions: name: Handle stale discussions runs-on: ubuntu-latest permissions: discussions: write steps: - name: Stale discussions action uses: aws-github-ops/handle-stale-discussions@v1 env: GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}}aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/.github/workflows/stale_issue.yml000066400000000000000000000045321456575232400266610ustar00rootroot00000000000000name: "Close stale issues" # Controls when the action will run. on: schedule: - cron: "*/60 * * * *" jobs: cleanup: runs-on: ubuntu-latest name: Stale issue job steps: - uses: aws-actions/stale-issue-cleanup@v3 with: # Setting messages to an empty string will cause the automation to skip # that category ancient-issue-message: Greetings! Sorry to say but this is a very old issue that is probably not getting as much attention as it deservers. We encourage you to check if this is still an issue in the latest release and if you find that this is still a problem, please feel free to open a new one. stale-issue-message: Greetings! It looks like this issue hasn’t been active in longer than a week. We encourage you to check if this is still an issue in the latest release. Because it has been longer than a week since the last update on this, and in the absence of more information, we will be closing this issue soon. If you find that this is still a problem, please feel free to provide a comment or add an upvote to prevent automatic closure, or if the issue is already closed, please feel free to open a new one. stale-pr-message: Greetings! It looks like this PR hasn’t been active in longer than a week, add a comment or an upvote to prevent automatic closure, or if the issue is already closed, please feel free to open a new one. # These labels are required stale-issue-label: closing-soon exempt-issue-label: automation-exempt stale-pr-label: closing-soon exempt-pr-label: pr/needs-review response-requested-label: response-requested # Don't set closed-for-staleness label to skip closing very old issues # regardless of label closed-for-staleness-label: closed-for-staleness # Issue timing days-before-stale: 2 days-before-close: 5 days-before-ancient: 36500 # If you don't want to mark a issue as being ancient based on a # threshold of "upvotes", you can set this here. An "upvote" is # the total number of +1, heart, hooray, and rocket reactions # on an issue. minimum-upvotes-to-exempt: 1 repo-token: ${{ secrets.GITHUB_TOKEN }} loglevel: DEBUG # Set dry-run to true to not perform label or close actions. dry-run: false aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/.gitignore000066400000000000000000000057771456575232400222250ustar00rootroot00000000000000 # Created by https://www.gitignore.io/api/macos,linux,clion,windows,visualstudiocode # Edit at https://www.gitignore.io/?templates=macos,linux,clion,windows,visualstudiocode ### CLion ### # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 # User-specific stuff .idea/**/workspace.xml .idea/**/tasks.xml .idea/**/usage.statistics.xml .idea/**/dictionaries .idea/**/shelf # Generated files .idea/**/contentModel.xml # Sensitive or high-churn files .idea/**/dataSources/ .idea/**/dataSources.ids .idea/**/dataSources.local.xml .idea/**/sqlDataSources.xml .idea/**/dynamic.xml .idea/**/uiDesigner.xml .idea/**/dbnavigator.xml # Gradle .idea/**/gradle.xml .idea/**/libraries # Gradle and Maven with auto-import # When using Gradle or Maven with auto-import, you should exclude module files, # since they will be recreated, and may cause churn. Uncomment if using # auto-import. # .idea/modules.xml # .idea/*.iml # .idea/modules # CMake cmake-build-*/ # Mongo Explorer plugin .idea/**/mongoSettings.xml # File-based project format *.iws # IntelliJ out/ # mpeltonen/sbt-idea plugin .idea_modules/ # JIRA plugin atlassian-ide-plugin.xml # Cursive Clojure plugin .idea/replstate.xml # Crashlytics plugin (for Android Studio and IntelliJ) com_crashlytics_export_strings.xml crashlytics.properties crashlytics-build.properties fabric.properties # Editor-based Rest Client .idea/httpRequests # Android studio 3.1+ serialized cache file .idea/caches/build_file_checksums.ser ### CLion Patch ### # Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721 # *.iml # modules.xml # .idea/misc.xml # *.ipr # Sonarlint plugin .idea/sonarlint ### Linux ### *~ # temporary files which can be created if a process still has a handle open of a deleted file .fuse_hidden* # KDE directory preferences .directory # Linux trash folder which might appear on any partition or disk .Trash-* # .nfs files are created when an open file is removed but is still being accessed .nfs* ### macOS ### # General .DS_Store .AppleDouble .LSOverride # Icon must end with two \r Icon # Thumbnails ._* # Files that might appear in the root of a volume .DocumentRevisions-V100 .fseventsd .Spotlight-V100 .TemporaryItems .Trashes .VolumeIcon.icns .com.apple.timemachine.donotpresent # Directories potentially created on remote AFP share .AppleDB .AppleDesktop Network Trash Folder Temporary Items .apdisk ### VisualStudioCode ### .vscode/* ### VisualStudioCode Patch ### # Ignore all local history of files .history ### Windows ### # Windows thumbnail cache files Thumbs.db ehthumbs.db ehthumbs_vista.db # Dump file *.stackdump # Folder config file [Dd]esktop.ini # Recycle Bin used on file shares $RECYCLE.BIN/ # Windows Installer files *.cab *.msi *.msix *.msm *.msp # Windows shortcuts *.lnk # End of https://www.gitignore.io/api/macos,linux,clion,windows,visualstudiocode .idea *.pem *.crt *.key build/ aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/CMakeLists.txt000066400000000000000000000072451456575232400227650ustar00rootroot00000000000000cmake_minimum_required(VERSION 3.1) project(aws-c-mqtt C) if (POLICY CMP0069) cmake_policy(SET CMP0069 NEW) # Enable LTO/IPO if available in the compiler, see AwsCFlags endif() option(ASSERT_LOCK_HELD "Enable ASSERT_SYNCED_DATA_LOCK_HELD for checking thread issue" OFF) if (ASSERT_LOCK_HELD) add_definitions(-DASSERT_LOCK_HELD) endif() if (DEFINED CMAKE_PREFIX_PATH) file(TO_CMAKE_PATH "${CMAKE_PREFIX_PATH}" CMAKE_PREFIX_PATH) endif() if (DEFINED CMAKE_INSTALL_PREFIX) file(TO_CMAKE_PATH "${CMAKE_INSTALL_PREFIX}" CMAKE_INSTALL_PREFIX) endif() if (UNIX AND NOT APPLE) include(GNUInstallDirs) elseif(NOT DEFINED CMAKE_INSTALL_LIBDIR) set(CMAKE_INSTALL_LIBDIR "lib") endif() # This is required in order to append /lib/cmake to each element in CMAKE_PREFIX_PATH set(AWS_MODULE_DIR "/${CMAKE_INSTALL_LIBDIR}/cmake") string(REPLACE ";" "${AWS_MODULE_DIR};" AWS_MODULE_PATH "${CMAKE_PREFIX_PATH}${AWS_MODULE_DIR}") # Append that generated list to the module search path list(APPEND CMAKE_MODULE_PATH ${AWS_MODULE_PATH}) include(AwsCFlags) include(AwsCheckHeaders) include(AwsSharedLibSetup) include(AwsSanitizers) include(CheckCCompilerFlag) include(AwsFindPackage) file(GLOB AWS_MQTT_HEADERS "include/aws/mqtt/*.h" ) file(GLOB AWS_MQTT5_HEADERS "include/aws/mqtt/v5/*.h" ) file(GLOB AWS_MQTT_PRIV_HEADERS "include/aws/mqtt/private/*.h" "include/aws/mqtt/private/v5/*.h" ) file(GLOB AWS_MQTT_PRIV_EXPOSED_HEADERS "include/aws/mqtt/private/mqtt_client_test_helper.h" ) file(GLOB AWS_MQTT_SRC "source/*.c" "source/v5/*.c" ) file(GLOB MQTT_HEADERS ${AWS_MQTT_HEADERS} ${AWS_MQTT_PRIV_HEADERS} ) file(GLOB AWS_MQTT5_HEADERS ${AWS_MQTT5_HEADERS} ) file(GLOB MQTT_SRC ${AWS_MQTT_SRC} ) add_library(${PROJECT_NAME} ${MQTT_HEADERS} ${MQTT_SRC}) aws_set_common_properties(${PROJECT_NAME}) aws_prepare_symbol_visibility_args(${PROJECT_NAME} "AWS_MQTT") aws_check_headers(${PROJECT_NAME} ${AWS_MQTT_HEADERS} ${AWS_MQTT5_HEADERS}) aws_add_sanitizers(${PROJECT_NAME}) # We are not ABI stable yet set_target_properties(${PROJECT_NAME} PROPERTIES VERSION 1.0.0) target_include_directories(${PROJECT_NAME} PUBLIC $ $) aws_use_package(aws-c-http) target_link_libraries(${PROJECT_NAME} PUBLIC ${DEP_AWS_LIBS}) aws_prepare_shared_lib_exports(${PROJECT_NAME}) install(FILES ${AWS_MQTT_HEADERS} DESTINATION "include/aws/mqtt" COMPONENT Development) install(FILES ${AWS_MQTT5_HEADERS} DESTINATION "include/aws/mqtt/v5" COMPONENT Development) install(FILES ${AWS_MQTT_TESTING_HEADERS} DESTINATION "include/aws/testing/mqtt" COMPONENT Development) install(FILES ${AWS_MQTT_PRIV_EXPOSED_HEADERS} DESTINATION "include/aws/mqtt/private" COMPONENT Development) if (BUILD_SHARED_LIBS) set (TARGET_DIR "shared") else() set (TARGET_DIR "static") endif() install(EXPORT "${PROJECT_NAME}-targets" DESTINATION "${LIBRARY_DIRECTORY}/${PROJECT_NAME}/cmake/${TARGET_DIR}" NAMESPACE AWS:: COMPONENT Development) configure_file("cmake/${PROJECT_NAME}-config.cmake" "${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-config.cmake" @ONLY) install(FILES "${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-config.cmake" DESTINATION "${LIBRARY_DIRECTORY}/${PROJECT_NAME}/cmake/" COMPONENT Development) include(CTest) if (BUILD_TESTING) add_subdirectory(tests) if (NOT CMAKE_CROSSCOMPILING ) add_subdirectory(bin/elastipubsub) add_subdirectory(bin/elastipubsub5) add_subdirectory(bin/mqtt5canary) endif() endif () aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/CODE_OF_CONDUCT.md000066400000000000000000000004671456575232400230230ustar00rootroot00000000000000## Code of Conduct This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact opensource-codeofconduct@amazon.com with any additional questions or comments. aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/CONTRIBUTING.md000066400000000000000000000067441456575232400224610ustar00rootroot00000000000000# Contributing Guidelines Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional documentation, we greatly value feedback and contributions from our community. Please read through this document before submitting any issues or pull requests to ensure we have all the necessary information to effectively respond to your bug report or contribution. ## Reporting Bugs/Feature Requests We welcome you to use the GitHub issue tracker to report bugs or suggest features. When filing an issue, please check [existing open](https://github.com/awslabs/aws-c-mqtt/issues), or [recently closed](https://github.com/awslabs/aws-c-mqtt/issues?utf8=%E2%9C%93&q=is%3Aissue%20is%3Aclosed%20), issues to make sure somebody else hasn't already reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: * A reproducible test case or series of steps * The version of our code being used * Any modifications you've made relevant to the bug * Anything unusual about your environment or deployment ## Contributing via Pull Requests Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: 1. You are working against the latest source on the *main* branch. 2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. 3. You open an issue to discuss any significant work - we would hate for your time to be wasted. To send us a pull request, please: 1. Fork the repository. 2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. 3. Ensure local tests pass. 4. Commit to your fork using clear commit messages. 5. Send us a pull request, answering any default questions in the pull request interface. 6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and [creating a pull request](https://help.github.com/articles/creating-a-pull-request/). ## Finding contributions to work on Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels ((enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any ['help wanted'](https://github.com/awslabs/aws-c-mqtt/labels/help%20wanted) issues is a great place to start. ## Code of Conduct This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact opensource-codeofconduct@amazon.com with any additional questions or comments. ## Security issue notifications If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. ## Licensing See the [LICENSE](https://github.com/awslabs/aws-c-mqtt/blob/main/LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. We may ask you to sign a [Contributor License Agreement (CLA)](http://en.wikipedia.org/wiki/Contributor_License_Agreement) for larger changes. aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/LICENSE000066400000000000000000000261361456575232400212320ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/NOTICE000066400000000000000000000001631456575232400211210ustar00rootroot00000000000000AWS C Mqtt Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: Apache-2.0. aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/README.md000066400000000000000000000205751456575232400215050ustar00rootroot00000000000000## AWS C MQTT C99 implementation of the MQTT 3.1.1 and MQTT 5 specifications. ## License This library is licensed under the Apache 2.0 License. ## Usage ### Building CMake 3.1+ is required to build. `` must be an absolute path in the following instructions. #### Linux-Only Dependencies If you are building on Linux, you will need to build aws-lc and s2n-tls first. ``` git clone git@github.com:awslabs/aws-lc.git cmake -S aws-lc -B aws-lc/build -DCMAKE_INSTALL_PREFIX= cmake --build aws-lc/build --target install git clone git@github.com:aws/s2n-tls.git cmake -S s2n-tls -B s2n-tls/build -DCMAKE_INSTALL_PREFIX= -DCMAKE_PREFIX_PATH= cmake --build s2n-tls/build --target install ``` #### Building aws-c-mqtt and Remaining Dependencies ``` git clone git@github.com:awslabs/aws-c-common.git cmake -S aws-c-common -B aws-c-common/build -DCMAKE_INSTALL_PREFIX= cmake --build aws-c-common/build --target install git clone git@github.com:awslabs/aws-c-cal.git cmake -S aws-c-cal -B aws-c-cal/build -DCMAKE_INSTALL_PREFIX= -DCMAKE_PREFIX_PATH= cmake --build aws-c-cal/build --target install git clone git@github.com:awslabs/aws-c-io.git cmake -S aws-c-io -B aws-c-io/build -DCMAKE_INSTALL_PREFIX= -DCMAKE_PREFIX_PATH= cmake --build aws-c-io/build --target install git clone git@github.com:awslabs/aws-c-compression.git cmake -S aws-c-compression -B aws-c-compression/build -DCMAKE_INSTALL_PREFIX= -DCMAKE_PREFIX_PATH= cmake --build aws-c-compression/build --target install git clone git@github.com:awslabs/aws-c-http.git cmake -S aws-c-http -B aws-c-http/build -DCMAKE_INSTALL_PREFIX= -DCMAKE_PREFIX_PATH= cmake --build aws-c-http/build --target install git clone git@github.com:awslabs/aws-c-mqtt.git cmake -S aws-c-mqtt -B aws-c-mqtt/build -DCMAKE_INSTALL_PREFIX= -DCMAKE_PREFIX_PATH= cmake --build aws-c-mqtt/build --target install ``` ### Overview This library contains an MQTT implementation that is simple and easy to use, but also quite powerful and low on unnecessary copies. Here is a general overview of the API: ### `struct aws_mqtt_client;` `aws_mqtt_client` is meant to be created once per application to pool common resources required for opening MQTT connections. The instance does not need to be allocated, and may be managed by the user. ```c int aws_mqtt_client_init( struct aws_mqtt_client *client, struct aws_allocator *allocator, struct aws_event_loop_group *elg); ``` Initializes an instance of `aws_mqtt_client` with the required parameters. * `client` is effectively the `this` parameter. * `allocator` will be used to initialize the client (note that the client itself is NOT allocated). *This resource must outlive `client`*. * `bootstrap` will be used to initiate new socket connections MQTT. *This resource must outlive `client`*. See [aws-c-io][aws-c-io] for more information about `aws_client_bootstrap`. ```c void aws_mqtt_client_clean_up(struct aws_mqtt_client *client); ``` Cleans up a client and frees all owned resources. **NOTE**: DO NOT CALL THIS FUNCTION UNTIL ALL OUTSTANDING CONNECTIONS ARE CLOSED. ### `struct aws_mqtt_client_connection;` ```c struct aws_mqtt_client_connection *aws_mqtt_client_connection_new( struct aws_mqtt_client *client, struct aws_mqtt_client_connection_callbacks callbacks, const struct aws_byte_cursor *host_name, uint16_t port, struct aws_socket_options *socket_options, struct aws_tls_ctx_options *tls_options); ``` Allocates and initializes a new connection object (does NOT actually connect). You may use the returned object to configure connection parameters, and then call `aws_mqtt_client_connection_connect` to actually open the connection. * `client` is required in order to use an existing DNS resolver, event loop group, and allocator. * `callbacks` provides the connection-level (not operation level) callbacks and the userdata to be given back. * `host_name` lists the end point to connect to. This may be a DNS address or an IP address. *This resource may be freed immediately after return.* * `port` the port to connect to on `host_name`. * `socket_options` describes how to open the connection. See [aws-c-io][aws-c-io] for more information about `aws_socket_options`. * `tls_options` provides TLS credentials to connect with. Pass `NULL` to not use TLS (**NOT RECOMMENDED**). See [aws-c-io][aws-c-io] for more information about `aws_tls_ctx_options`. ```c void aws_mqtt_client_connection_destroy(struct aws_mqtt_client_connection *connection); ``` Destroys a connection and frees all outstanding resources. **NOTE**: DO NOT CALL THIS FUNCTION UNTIL THE CONNECTION IS CLOSED. ```c int aws_mqtt_client_connection_set_will( struct aws_mqtt_client_connection *connection, const struct aws_byte_cursor *topic, enum aws_mqtt_qos qos, bool retain, const struct aws_byte_cursor *payload); ``` Sets the last will and testament to be distributed by the server upon client disconnection. Must be called before `aws_mqtt_client_connection_connect`. See `aws_mqtt_client_connection_publish` for information on the parameters. `topic` and `payload` must persist past the call to `aws_mqtt_client_connection_connect`. ```c int aws_mqtt_client_connection_set_login( struct aws_mqtt_client_connection *connection, const struct aws_byte_cursor *username, const struct aws_byte_cursor *password); ``` Sets the username and password to be sent to the server on connection. Must be called before `aws_mqtt_client_connection_connect`. `username` and `password` must persist past the call to `aws_mqtt_client_connection_connect`. ```c int aws_mqtt_client_connection_set_reconnect_timeout( struct aws_mqtt_client_connection *connection, uint64_t min_timeout, uint64_t max_timeout); ``` Sets the minimum and maximum reconnect timeouts. The time between reconnect attempts will start at min and multipy by 2 until max is reached. ```c int aws_mqtt_client_connection_connect( struct aws_mqtt_client_connection *connection, const struct aws_byte_cursor *client_id, bool clean_session, uint16_t keep_alive_time); ``` Connects to the remote endpoint. The parameters here are set in the MQTT CONNECT packet directly. `client_id` must persist until the `on_connack` connection callback is called. ```c int aws_mqtt_client_connection_disconnect(struct aws_mqtt_client_connection *connection); ``` Closes an open connection. Does not clean up any resources, that's to be done by `aws_mqtt_client_connection_destroy`, probably from the `on_disconnected` connection callback. ```c uint16_t aws_mqtt_client_connection_subscribe_single( struct aws_mqtt_client_connection *connection, const struct aws_byte_cursor *topic_filter, enum aws_mqtt_qos qos, aws_mqtt_client_publish_received_fn *on_publish, void *on_publish_ud, aws_mqtt_suback_single_fn *on_suback, void *on_suback_ud); ``` Subscribes to the topic filter given with the given QoS. `on_publish` will be called whenever a packet matching `topic_filter` arrives. `on_suback` will be called when the SUBACK packet has been received. `topic_filter` must persist until `on_suback` is called. The packet_id of the SUBSCRIBE packet will be returned, or 0 on error. ```c uint16_t aws_mqtt_client_connection_unsubscribe( struct aws_mqtt_client_connection *connection, const struct aws_byte_cursor *topic_filter, aws_mqtt_op_complete_fn *on_unsuback, void *on_unsuback_ud); ``` Unsubscribes from the topic filter given. `topic_filter` must persist until `on_unsuback` is called. The packet_id of the UNSUBSCRIBE packet will be returned, or 0 on error. ```c uint16_t aws_mqtt_client_connection_publish( struct aws_mqtt_client_connection *connection, const struct aws_byte_cursor *topic, enum aws_mqtt_qos qos, bool retain, const struct aws_byte_cursor *payload, aws_mqtt_op_complete_fn *on_complete, void *userdata); ``` Publish a payload to the topic specified. For QoS 0, `on_complete` will be called as soon as the packet is sent over the wire. For QoS 1, as soon as PUBACK comes back. For QoS 2, PUBCOMP. `topic` and `payload` must persist until `on_complete`. ```c int aws_mqtt_client_connection_ping(struct aws_mqtt_client_connection *connection); ``` Sends a PINGREQ packet to the server. [aws-c-io]: https://github.com/awslabs/aws-c-io aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/bin/000077500000000000000000000000001456575232400207655ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/bin/elastipubsub/000077500000000000000000000000001456575232400234675ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/bin/elastipubsub/CMakeLists.txt000066400000000000000000000016561456575232400262370ustar00rootroot00000000000000project(elastipubsub C) list(APPEND CMAKE_MODULE_PATH "${CMAKE_INSTALL_PREFIX}/lib/cmake") file(GLOB ELASTIPUBSUB_SRC "*.c" ) set(ELASTIPUBSUB_PROJECT_NAME elastipubsub) add_executable(${ELASTIPUBSUB_PROJECT_NAME} ${ELASTIPUBSUB_SRC}) aws_set_common_properties(${ELASTIPUBSUB_PROJECT_NAME}) target_include_directories(${ELASTIPUBSUB_PROJECT_NAME} PUBLIC $ $) target_link_libraries(${ELASTIPUBSUB_PROJECT_NAME} PRIVATE aws-c-mqtt) if (BUILD_SHARED_LIBS AND NOT WIN32) message(INFO " elastiPUBSUB will be built with shared libs, but you may need to set LD_LIBRARY_PATH=${CMAKE_INSTALL_PREFIX}/lib to run the application") endif() install(TARGETS ${ELASTIPUBSUB_PROJECT_NAME} EXPORT ${ELASTIPUBSUB_PROJECT_NAME}-targets COMPONENT Runtime RUNTIME DESTINATION bin COMPONENT Runtime) aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/bin/elastipubsub/main.c000066400000000000000000000702271456575232400245670ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef _MSC_VER # pragma warning(disable : 4996) /* Disable warnings about fopen() being insecure */ # pragma warning(disable : 4204) /* Declared initializers */ # pragma warning(disable : 4221) /* Local var in declared initializer */ #endif struct app_ctx { struct aws_allocator *allocator; struct aws_mutex lock; struct aws_condition_variable signal; struct aws_uri uri; uint32_t port; const char *cacert; const char *cert; const char *key; int connect_timeout; int iterations; int connection_count; int message_count; int target_cops; int target_pops; int pending_publish_completions; int publish_successes; int publish_failures; int received_messages; struct aws_tls_connection_options tls_connection_options; struct aws_linked_list pending_connection_list; struct aws_linked_list established_connection_list; const char *log_filename; enum aws_log_level log_level; }; static void s_usage(int exit_code) { fprintf(stderr, "usage: elastipubsub [options] --endpoint\n"); fprintf(stderr, " --endpoint: url to connect to \n"); fprintf(stderr, "\n Options:\n\n"); fprintf(stderr, " --cacert FILE: path to a CA certficate file.\n"); fprintf(stderr, " --cert FILE: path to a PEM encoded certificate to use with mTLS\n"); fprintf(stderr, " --key FILE: Path to a PEM encoded private key that matches cert.\n"); fprintf(stderr, " --cops INT: target control (connect, subscribe) operations per second\n"); fprintf(stderr, " --connect-timeout INT: time in milliseconds to wait for a connection.\n"); fprintf(stderr, " -i, --iterations INT: number of independent iterations to run the test for\n"); fprintf(stderr, " -k, --connections INT: number of independent connections to make.\n"); fprintf(stderr, " -l, --log FILE: dumps logs to FILE instead of stderr.\n"); fprintf(stderr, " -n, --messages INT: number of messages to publish per iteration\n"); fprintf(stderr, " -p, --pops INT: target publish operations per second\n"); fprintf(stderr, " -v, --verbose: ERROR|INFO|DEBUG|TRACE: log level to configure. Default is none.\n"); fprintf(stderr, " -h, --help\n"); fprintf(stderr, " Display this message and quit.\n"); exit(exit_code); } static struct aws_cli_option s_long_options[] = { {"cacert", AWS_CLI_OPTIONS_REQUIRED_ARGUMENT, NULL, 'a'}, {"cert", AWS_CLI_OPTIONS_REQUIRED_ARGUMENT, NULL, 'c'}, {"cops", AWS_CLI_OPTIONS_REQUIRED_ARGUMENT, NULL, 'C'}, {"key", AWS_CLI_OPTIONS_REQUIRED_ARGUMENT, NULL, 'e'}, {"connect-timeout", AWS_CLI_OPTIONS_REQUIRED_ARGUMENT, NULL, 'f'}, {"iterations", AWS_CLI_OPTIONS_REQUIRED_ARGUMENT, NULL, 'i'}, {"connections", AWS_CLI_OPTIONS_REQUIRED_ARGUMENT, NULL, 'k'}, {"log", AWS_CLI_OPTIONS_REQUIRED_ARGUMENT, NULL, 'l'}, {"messages", AWS_CLI_OPTIONS_REQUIRED_ARGUMENT, NULL, 'n'}, {"pops", AWS_CLI_OPTIONS_REQUIRED_ARGUMENT, NULL, 'p'}, {"verbose", AWS_CLI_OPTIONS_REQUIRED_ARGUMENT, NULL, 'v'}, {"help", AWS_CLI_OPTIONS_NO_ARGUMENT, NULL, 'h'}, {"endpoint", AWS_CLI_OPTIONS_REQUIRED_ARGUMENT, NULL, 'E'}, /* Per getopt(3) the last element of the array has to be filled with all zeros */ {NULL, AWS_CLI_OPTIONS_NO_ARGUMENT, NULL, 0}, }; static void s_parse_options(int argc, char **argv, struct app_ctx *ctx) { bool uri_found = false; while (true) { int option_index = 0; int c = aws_cli_getopt_long(argc, argv, "a:c:C:e:f:i:k:l:n:p:v:h:E", s_long_options, &option_index); if (c == -1) { break; } switch (c) { case 0: /* getopt_long() returns 0 if an option.flag is non-null */ break; case 'a': ctx->cacert = aws_cli_optarg; break; case 'c': ctx->cert = aws_cli_optarg; break; case 'C': ctx->target_cops = atoi(aws_cli_optarg); break; case 'e': ctx->key = aws_cli_optarg; break; case 'f': ctx->connect_timeout = atoi(aws_cli_optarg); break; case 'i': ctx->iterations = atoi(aws_cli_optarg); break; case 'k': ctx->connection_count = atoi(aws_cli_optarg); break; case 'l': ctx->log_filename = aws_cli_optarg; break; case 'n': ctx->message_count = atoi(aws_cli_optarg); break; case 'p': ctx->target_pops = atoi(aws_cli_optarg); break; case 'v': if (!strcmp(aws_cli_optarg, "TRACE")) { ctx->log_level = AWS_LL_TRACE; } else if (!strcmp(aws_cli_optarg, "INFO")) { ctx->log_level = AWS_LL_INFO; } else if (!strcmp(aws_cli_optarg, "DEBUG")) { ctx->log_level = AWS_LL_DEBUG; } else if (!strcmp(aws_cli_optarg, "ERROR")) { ctx->log_level = AWS_LL_ERROR; } else { fprintf(stderr, "unsupported log level %s.\n", aws_cli_optarg); s_usage(1); } break; case 'h': s_usage(0); break; case 0x02: { struct aws_byte_cursor uri_cursor = aws_byte_cursor_from_c_str(aws_cli_positional_arg); if (aws_uri_init_parse(&ctx->uri, ctx->allocator, &uri_cursor)) { fprintf( stderr, "Failed to parse uri %s with error %s\n", (char *)uri_cursor.ptr, aws_error_debug_str(aws_last_error())); s_usage(1); } uri_found = true; break; } default: fprintf(stderr, "Unknown option\n"); s_usage(1); } } if (!uri_found) { fprintf(stderr, "A URI for the request must be supplied.\n"); s_usage(1); } } struct connection_user_data { struct aws_linked_list_node node; struct app_ctx *app_ctx; struct aws_mqtt_client *client; struct aws_mqtt_client_connection *connection; uint32_t id; uint32_t subscription_id; }; static void s_final_destroy_connection_data(struct connection_user_data *user_data) { aws_mutex_lock(&user_data->app_ctx->lock); aws_linked_list_remove(&user_data->node); aws_mutex_unlock(&user_data->app_ctx->lock); aws_condition_variable_notify_one(&user_data->app_ctx->signal); if (user_data->connection != NULL) { aws_mqtt_client_connection_release(user_data->connection); user_data->connection = NULL; } if (user_data->client != NULL) { aws_mqtt_client_release(user_data->client); } } static void s_on_connection_complete( struct aws_mqtt_client_connection *connection, int error_code, enum aws_mqtt_connect_return_code return_code, bool session_present, void *userdata) { (void)connection; (void)session_present; struct connection_user_data *user_data = userdata; if (error_code != AWS_ERROR_SUCCESS || return_code != AWS_MQTT_CONNECT_ACCEPTED) { s_final_destroy_connection_data(user_data); return; } aws_mutex_lock(&user_data->app_ctx->lock); aws_linked_list_remove(&user_data->node); aws_linked_list_push_back(&user_data->app_ctx->established_connection_list, &user_data->node); aws_mutex_unlock(&user_data->app_ctx->lock); aws_condition_variable_notify_one(&user_data->app_ctx->signal); } static bool s_all_connections_complete(void *context) { struct app_ctx *app_ctx = context; return aws_linked_list_empty(&app_ctx->pending_connection_list); } #define MIN_SLEEP_TIME_MILLIS 10 static void s_throttle_operations(int target_ops, uint64_t start_time, int32_t operation_index) { if (target_ops == 0) { return; } uint64_t now = 0; aws_high_res_clock_get_ticks(&now); AWS_FATAL_ASSERT(now >= start_time); uint64_t elapsed_nanos = now - start_time; uint64_t nanos_per_second = aws_timestamp_convert(1, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL); uint64_t target_ops_u64 = (uint64_t)target_ops; uint64_t operation_count_u64 = (uint64_t)operation_index; uint64_t desired_elapsed_nanos = 0; uint64_t product = 0; if (aws_mul_u64_checked(operation_count_u64, nanos_per_second, &product)) { double product_d = (double)operation_count_u64 * (double)nanos_per_second; desired_elapsed_nanos = (uint64_t)(product_d / (double)target_ops_u64); } else { desired_elapsed_nanos = product / target_ops_u64; } if (desired_elapsed_nanos > elapsed_nanos) { uint64_t nano_difference = desired_elapsed_nanos - elapsed_nanos; uint64_t millis = aws_timestamp_convert(nano_difference, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_MILLIS, NULL); if (millis > MIN_SLEEP_TIME_MILLIS) { aws_thread_current_sleep(nano_difference); } } } static void s_establish_connections( struct app_ctx *app_ctx, struct connection_user_data *connections, struct aws_client_bootstrap *bootstrap, struct aws_tls_connection_options *tls_connection_options) { struct aws_socket_options socket_options = { .type = AWS_SOCKET_STREAM, .connect_timeout_ms = (uint32_t)app_ctx->connect_timeout, .keep_alive_timeout_sec = 0, .keepalive = false, .keep_alive_interval_sec = 0, }; aws_mutex_lock(&app_ctx->lock); for (int j = 0; j < app_ctx->connection_count; j++) { struct connection_user_data *connection_data = connections + j; aws_linked_list_push_back(&app_ctx->pending_connection_list, &connection_data->node); connection_data->app_ctx = app_ctx; connection_data->client = aws_mqtt_client_new(app_ctx->allocator, bootstrap); AWS_FATAL_ASSERT(connection_data->client != NULL); connection_data->connection = aws_mqtt_client_connection_new(connection_data->client); AWS_FATAL_ASSERT(connection_data->connection != NULL); connection_data->id = (uint32_t)j; } aws_mutex_unlock(&app_ctx->lock); uint64_t start_time = 0; aws_high_res_clock_get_ticks(&start_time); for (int j = 0; j < app_ctx->connection_count; j++) { struct connection_user_data *connection_data = connections + j; char client_id[32]; snprintf(client_id, AWS_ARRAY_SIZE(client_id), "stress%d", j); struct aws_mqtt_connection_options connection_options = { .host_name = app_ctx->uri.host_name, .port = app_ctx->port, .socket_options = &socket_options, .tls_options = tls_connection_options, .client_id = aws_byte_cursor_from_c_str(client_id), .keep_alive_time_secs = 0, .ping_timeout_ms = 0, .protocol_operation_timeout_ms = (uint32_t)aws_timestamp_convert(10, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_MILLIS, NULL), .on_connection_complete = s_on_connection_complete, .user_data = connection_data, .clean_session = true, }; if (aws_mqtt_client_connection_connect(connection_data->connection, &connection_options)) { s_final_destroy_connection_data(connection_data); } if (j > 0 && j % 100 == 0) { printf("Connection count: %d\n", j); } s_throttle_operations(app_ctx->target_cops, start_time, j + 1); } aws_mutex_lock(&app_ctx->lock); aws_condition_variable_wait_pred(&app_ctx->signal, &app_ctx->lock, s_all_connections_complete, app_ctx); AWS_FATAL_ASSERT(!aws_linked_list_empty(&app_ctx->established_connection_list)); aws_mutex_unlock(&app_ctx->lock); } void s_on_publish( struct aws_mqtt_client_connection *connection, const struct aws_byte_cursor *topic, const struct aws_byte_cursor *payload, bool dup, enum aws_mqtt_qos qos, bool retain, void *userdata) { (void)connection; (void)topic; (void)payload; (void)dup; (void)qos; (void)retain; struct app_ctx *app_ctx = userdata; aws_mutex_lock(&app_ctx->lock); ++app_ctx->received_messages; aws_mutex_unlock(&app_ctx->lock); } void s_on_disconnect_complete(struct aws_mqtt_client_connection *connection, void *userdata) { (void)connection; s_final_destroy_connection_data(userdata); } void s_on_subscribe_complete( struct aws_mqtt_client_connection *connection, uint16_t packet_id, const struct aws_byte_cursor *topic, enum aws_mqtt_qos qos, int error_code, void *userdata) { (void)connection; (void)packet_id; (void)topic; (void)qos; struct connection_user_data *connection_data = userdata; if (error_code != AWS_ERROR_SUCCESS) { if (aws_mqtt_client_connection_disconnect( connection_data->connection, s_on_disconnect_complete, connection_data)) { s_final_destroy_connection_data(connection_data); } return; } aws_mutex_lock(&connection_data->app_ctx->lock); aws_linked_list_remove(&connection_data->node); aws_linked_list_push_back(&connection_data->app_ctx->established_connection_list, &connection_data->node); aws_mutex_unlock(&connection_data->app_ctx->lock); aws_condition_variable_notify_one(&connection_data->app_ctx->signal); } void s_on_subscribe_removed(void *userdata) { (void)userdata; } static void s_establish_subscriptions(struct app_ctx *app_ctx) { struct aws_linked_list connections; aws_linked_list_init(&connections); aws_mutex_lock(&app_ctx->lock); aws_linked_list_swap_contents(&app_ctx->established_connection_list, &connections); aws_mutex_unlock(&app_ctx->lock); uint64_t start_time = 0; aws_high_res_clock_get_ticks(&start_time); uint32_t subscription_index = 1; while (!aws_linked_list_empty(&connections)) { struct aws_linked_list_node *node = aws_linked_list_pop_front(&connections); struct connection_user_data *connection_data = AWS_CONTAINER_OF(node, struct connection_user_data, node); aws_mutex_lock(&app_ctx->lock); aws_linked_list_push_back(&app_ctx->pending_connection_list, &connection_data->node); aws_mutex_unlock(&app_ctx->lock); connection_data->subscription_id = subscription_index; char buffer[32]; snprintf(buffer, AWS_ARRAY_SIZE(buffer), "topic%d", subscription_index++); struct aws_byte_cursor topic_cursor = aws_byte_cursor_from_c_str(buffer); uint16_t id = aws_mqtt_client_connection_subscribe( connection_data->connection, &topic_cursor, AWS_MQTT_QOS_AT_LEAST_ONCE, s_on_publish, app_ctx, s_on_subscribe_removed, s_on_subscribe_complete, connection_data); if (id == 0) { if (aws_mqtt_client_connection_disconnect( connection_data->connection, s_on_disconnect_complete, connection_data)) { s_final_destroy_connection_data(connection_data); } } if (subscription_index > 0 && subscription_index % 100 == 0) { printf("Subscribe count: %d\n", subscription_index); } s_throttle_operations(app_ctx->target_cops, start_time, subscription_index - 1); } aws_mutex_lock(&app_ctx->lock); aws_condition_variable_wait_pred(&app_ctx->signal, &app_ctx->lock, s_all_connections_complete, app_ctx); AWS_FATAL_ASSERT(!aws_linked_list_empty(&app_ctx->established_connection_list)); aws_mutex_unlock(&app_ctx->lock); } static void s_teardown_connections(struct app_ctx *app_ctx) { struct aws_linked_list connections; aws_linked_list_init(&connections); aws_mutex_lock(&app_ctx->lock); aws_linked_list_swap_contents(&app_ctx->established_connection_list, &connections); aws_mutex_unlock(&app_ctx->lock); while (!aws_linked_list_empty(&connections)) { struct aws_linked_list_node *node = aws_linked_list_pop_front(&connections); struct connection_user_data *connection_data = AWS_CONTAINER_OF(node, struct connection_user_data, node); aws_mutex_lock(&app_ctx->lock); aws_linked_list_push_back(&app_ctx->pending_connection_list, &connection_data->node); aws_mutex_unlock(&app_ctx->lock); if (aws_mqtt_client_connection_disconnect( connection_data->connection, s_on_disconnect_complete, connection_data)) { s_final_destroy_connection_data(connection_data); } } aws_mutex_lock(&app_ctx->lock); aws_condition_variable_wait_pred(&app_ctx->signal, &app_ctx->lock, s_all_connections_complete, app_ctx); aws_mutex_unlock(&app_ctx->lock); } static void s_build_web(struct app_ctx *app_ctx, struct aws_array_list *connections, struct aws_array_list *topics) { aws_array_list_init_dynamic( connections, app_ctx->allocator, app_ctx->connection_count, sizeof(struct connection_user_data *)); aws_array_list_init_dynamic(topics, app_ctx->allocator, app_ctx->connection_count, sizeof(uint32_t)); aws_mutex_lock(&app_ctx->lock); struct aws_linked_list_node *node = aws_linked_list_begin(&app_ctx->established_connection_list); while (node != aws_linked_list_end(&app_ctx->established_connection_list)) { struct connection_user_data *connection_data = AWS_CONTAINER_OF(node, struct connection_user_data, node); uint32_t topic_id = connection_data->subscription_id; aws_array_list_push_back(connections, &connection_data); aws_array_list_push_back(topics, &topic_id); node = aws_linked_list_next(node); } aws_mutex_unlock(&app_ctx->lock); } static void s_on_publish_complete( struct aws_mqtt_client_connection *connection, uint16_t packet_id, int error_code, void *userdata) { (void)connection; (void)packet_id; struct app_ctx *app_ctx = userdata; aws_mutex_lock(&app_ctx->lock); --app_ctx->pending_publish_completions; if (error_code == AWS_ERROR_SUCCESS) { ++app_ctx->publish_successes; } else { ++app_ctx->publish_failures; } aws_mutex_unlock(&app_ctx->lock); aws_condition_variable_notify_one(&app_ctx->signal); } static bool s_all_publishes_complete(void *userdata) { struct app_ctx *app_ctx = userdata; return app_ctx->pending_publish_completions == 0; } static void s_publish(struct app_ctx *app_ctx) { struct aws_array_list valid_topics; AWS_ZERO_STRUCT(valid_topics); struct aws_array_list valid_connections; AWS_ZERO_STRUCT(valid_connections); s_build_web(app_ctx, &valid_connections, &valid_topics); printf("Using %d connections\n", (int)aws_array_list_length(&valid_connections)); size_t connection_count = aws_array_list_length(&valid_connections); AWS_FATAL_ASSERT(connection_count > 0); size_t topic_count = aws_array_list_length(&valid_topics); AWS_FATAL_ASSERT(topic_count > 0); aws_mutex_lock(&app_ctx->lock); app_ctx->pending_publish_completions = app_ctx->message_count; aws_mutex_unlock(&app_ctx->lock); struct aws_byte_cursor payload_cursor = aws_byte_cursor_from_c_str("MESSAGE PAYLOAD"); char topic_buffer[32]; uint64_t now = 0; aws_high_res_clock_get_ticks(&now); int failed_publishes = 0; for (int i = 0; i < app_ctx->message_count; ++i) { size_t random_connection_index = rand() % connection_count; struct connection_user_data *connection_ud = NULL; aws_array_list_get_at(&valid_connections, &connection_ud, random_connection_index); size_t random_topic_index = rand() % topic_count; uint32_t topic_id = 0; aws_array_list_get_at(&valid_topics, &topic_id, random_topic_index); snprintf(topic_buffer, AWS_ARRAY_SIZE(topic_buffer), "topic%d", topic_id); struct aws_byte_cursor topic_cursor = aws_byte_cursor_from_c_str(topic_buffer); uint16_t id = aws_mqtt_client_connection_publish( connection_ud->connection, &topic_cursor, AWS_MQTT_QOS_AT_LEAST_ONCE, false, &payload_cursor, s_on_publish_complete, app_ctx); if (id == 0) { ++failed_publishes; } if (i > 0 && i % 1000 == 0) { printf("Publish count: %d\n", i); } s_throttle_operations(app_ctx->target_pops, now, i + 1); } aws_mutex_lock(&app_ctx->lock); app_ctx->pending_publish_completions -= failed_publishes; app_ctx->publish_failures += failed_publishes; aws_condition_variable_wait_pred(&app_ctx->signal, &app_ctx->lock, s_all_publishes_complete, app_ctx); aws_mutex_unlock(&app_ctx->lock); aws_array_list_clean_up(&valid_topics); aws_array_list_clean_up(&valid_connections); } static void s_reset_app_ctx(struct app_ctx *app_ctx) { AWS_FATAL_ASSERT(aws_linked_list_empty(&app_ctx->pending_connection_list)); aws_linked_list_init(&app_ctx->pending_connection_list); AWS_FATAL_ASSERT(aws_linked_list_empty(&app_ctx->established_connection_list)); aws_linked_list_init(&app_ctx->established_connection_list); app_ctx->pending_publish_completions = 0; app_ctx->publish_successes = 0; app_ctx->publish_failures = 0; app_ctx->received_messages = 0; } int main(int argc, char **argv) { struct aws_allocator *allocator = aws_mem_tracer_new(aws_default_allocator(), NULL, AWS_MEMTRACE_STACKS, 8); aws_mqtt_library_init(allocator); struct app_ctx app_ctx; AWS_ZERO_STRUCT(app_ctx); app_ctx.allocator = allocator; app_ctx.signal = (struct aws_condition_variable)AWS_CONDITION_VARIABLE_INIT; app_ctx.connect_timeout = 3000; app_ctx.iterations = 1; app_ctx.message_count = 10; app_ctx.connection_count = 1; aws_mutex_init(&app_ctx.lock); app_ctx.port = 8883; app_ctx.target_cops = 250; app_ctx.target_pops = 10000; aws_linked_list_init(&app_ctx.pending_connection_list); aws_linked_list_init(&app_ctx.established_connection_list); s_parse_options(argc, argv, &app_ctx); if (app_ctx.uri.port) { app_ctx.port = app_ctx.uri.port; } struct aws_logger logger; AWS_ZERO_STRUCT(logger); struct aws_logger_standard_options options = { .level = app_ctx.log_level, }; if (app_ctx.log_level) { if (app_ctx.log_filename) { options.filename = app_ctx.log_filename; } else { options.file = stderr; } if (aws_logger_init_standard(&logger, allocator, &options)) { fprintf(stderr, "Failed to initialize logger with error %s\n", aws_error_debug_str(aws_last_error())); exit(1); } aws_logger_set(&logger); } for (int i = 0; i < app_ctx.iterations; ++i) { printf("Iteration %d starting\n", i + 1); s_reset_app_ctx(&app_ctx); struct aws_tls_ctx *tls_ctx = NULL; struct aws_tls_ctx_options tls_ctx_options; AWS_ZERO_STRUCT(tls_ctx_options); struct aws_tls_connection_options tls_connection_options; AWS_ZERO_STRUCT(tls_connection_options); if (app_ctx.cert && app_ctx.key) { if (aws_tls_ctx_options_init_client_mtls_from_path( &tls_ctx_options, allocator, app_ctx.cert, app_ctx.key)) { fprintf( stderr, "Failed to load %s and %s with error %s.", app_ctx.cert, app_ctx.key, aws_error_debug_str(aws_last_error())); exit(1); } } else { aws_tls_ctx_options_init_default_client(&tls_ctx_options, allocator); } if (app_ctx.cacert) { if (aws_tls_ctx_options_override_default_trust_store_from_path(&tls_ctx_options, NULL, app_ctx.cacert)) { fprintf( stderr, "Failed to load %s with error %s", app_ctx.cacert, aws_error_debug_str(aws_last_error())); exit(1); } } if (aws_tls_ctx_options_set_alpn_list(&tls_ctx_options, "x-amzn-mqtt-ca")) { fprintf(stderr, "Failed to set alpn list with error %s.", aws_error_debug_str(aws_last_error())); exit(1); } tls_ctx = aws_tls_client_ctx_new(allocator, &tls_ctx_options); if (!tls_ctx) { fprintf(stderr, "Failed to initialize TLS context with error %s.", aws_error_debug_str(aws_last_error())); exit(1); } aws_tls_connection_options_init_from_ctx(&tls_connection_options, tls_ctx); if (aws_tls_connection_options_set_server_name(&tls_connection_options, allocator, &app_ctx.uri.host_name)) { fprintf(stderr, "Failed to set servername with error %s.", aws_error_debug_str(aws_last_error())); exit(1); } struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 2, NULL); struct aws_host_resolver_default_options resolver_options = { .el_group = el_group, .max_entries = 8, }; struct aws_host_resolver *resolver = aws_host_resolver_new_default(allocator, &resolver_options); struct aws_client_bootstrap_options bootstrap_options = { .event_loop_group = el_group, .host_resolver = resolver, }; struct aws_client_bootstrap *bootstrap = aws_client_bootstrap_new(allocator, &bootstrap_options); struct connection_user_data *connections = aws_mem_calloc(allocator, app_ctx.connection_count, sizeof(struct connection_user_data)); AWS_FATAL_ASSERT(connections != NULL); s_establish_connections(&app_ctx, connections, bootstrap, &tls_connection_options); s_establish_subscriptions(&app_ctx); s_publish(&app_ctx); s_teardown_connections(&app_ctx); aws_mem_release(allocator, connections); aws_client_bootstrap_release(bootstrap); aws_host_resolver_release(resolver); aws_event_loop_group_release(el_group); if (tls_ctx) { aws_tls_connection_options_clean_up(&tls_connection_options); aws_tls_ctx_release(tls_ctx); aws_tls_ctx_options_clean_up(&tls_ctx_options); } aws_thread_join_all_managed(); const size_t outstanding_bytes = aws_mem_tracer_bytes(allocator); printf("Iteration %d summary:\n", i + 1); printf(" Successful Publishes: %d\n", app_ctx.publish_successes); printf(" Failed Publishes: %d\n", app_ctx.publish_failures); printf(" Outstanding bytes: %zu\n\n", outstanding_bytes); } if (app_ctx.log_level) { aws_logger_set(NULL); aws_logger_clean_up(&logger); } aws_uri_clean_up(&app_ctx.uri); aws_mqtt_library_clean_up(); const size_t leaked_bytes = aws_mem_tracer_bytes(allocator); if (leaked_bytes) { struct aws_logger memory_logger; AWS_ZERO_STRUCT(memory_logger); aws_logger_init_noalloc(&memory_logger, aws_default_allocator(), &options); aws_logger_set(&memory_logger); aws_mqtt_library_init(aws_default_allocator()); printf("Writing memory leaks to log.\n"); aws_mem_tracer_dump(allocator); aws_logger_set(NULL); aws_logger_clean_up(&memory_logger); aws_mqtt_library_clean_up(); } else { printf("Finished, with no memory leaks\n"); } aws_mem_tracer_destroy(allocator); return 0; } aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/bin/elastipubsub5/000077500000000000000000000000001456575232400235545ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/bin/elastipubsub5/CMakeLists.txt000066400000000000000000000017321456575232400263170ustar00rootroot00000000000000project(elastipubsub5 C) list(APPEND CMAKE_MODULE_PATH "${CMAKE_INSTALL_PREFIX}/lib/cmake") file(GLOB ELASTIPUBSUB_SRC "*.c" ) set(ELASTIPUBSUB_MQTT5_PROJECT_NAME elastipubsub5) add_executable(${ELASTIPUBSUB_MQTT5_PROJECT_NAME} ${ELASTIPUBSUB_SRC}) aws_set_common_properties(${ELASTIPUBSUB_MQTT5_PROJECT_NAME}) target_include_directories(${ELASTIPUBSUB_MQTT5_PROJECT_NAME} PUBLIC $ $) target_link_libraries(${ELASTIPUBSUB_MQTT5_PROJECT_NAME} PRIVATE aws-c-mqtt) if (BUILD_SHARED_LIBS AND NOT WIN32) message(INFO " elastiPUBSUB will be built with shared libs, but you may need to set LD_LIBRARY_PATH=${CMAKE_INSTALL_PREFIX}/lib to run the application") endif() install(TARGETS ${ELASTIPUBSUB_MQTT5_PROJECT_NAME} EXPORT ${ELASTIPUBSUB_MQTT5_PROJECT_NAME}-targets COMPONENT Runtime RUNTIME DESTINATION bin COMPONENT Runtime) aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/bin/elastipubsub5/main.c000066400000000000000000000643541456575232400246600ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef _MSC_VER # pragma warning(disable : 4996) /* Disable warnings about fopen() being insecure */ # pragma warning(disable : 4204) /* Declared initializers */ # pragma warning(disable : 4221) /* Local var in declared initializer */ #endif #ifdef WIN32 // Windows does not need specific imports #else # include #endif struct app_ctx { struct aws_allocator *allocator; struct aws_mutex lock; struct aws_condition_variable signal; struct aws_uri uri; uint32_t port; const char *cacert; const char *cert; const char *key; int connect_timeout; bool use_websockets; struct aws_tls_connection_options tls_connection_options; const char *log_filename; enum aws_log_level log_level; }; static void s_usage(int exit_code) { fprintf(stderr, "usage: elastipubsub5 [options] endpoint\n"); fprintf(stderr, " endpoint: url to connect to\n"); fprintf(stderr, "\n Options:\n\n"); fprintf(stderr, " --cacert FILE: path to a CA certficate file.\n"); fprintf(stderr, " --cert FILE: path to a PEM encoded certificate to use with mTLS\n"); fprintf(stderr, " --key FILE: Path to a PEM encoded private key that matches cert.\n"); fprintf(stderr, " --connect-timeout INT: time in milliseconds to wait for a connection.\n"); fprintf(stderr, " -l, --log FILE: dumps logs to FILE instead of stderr.\n"); fprintf(stderr, " -v, --verbose: ERROR|INFO|DEBUG|TRACE: log level to configure. Default is none.\n"); fprintf(stderr, " -w, --websockets: use mqtt-over-websockets rather than direct mqtt\n"); fprintf(stderr, " -h, --help\n"); fprintf(stderr, " Display this message and quit.\n"); exit(exit_code); } static struct aws_cli_option s_long_options[] = { {"cacert", AWS_CLI_OPTIONS_REQUIRED_ARGUMENT, NULL, 'a'}, {"cert", AWS_CLI_OPTIONS_REQUIRED_ARGUMENT, NULL, 'c'}, {"key", AWS_CLI_OPTIONS_REQUIRED_ARGUMENT, NULL, 'e'}, {"connect-timeout", AWS_CLI_OPTIONS_REQUIRED_ARGUMENT, NULL, 'f'}, {"log", AWS_CLI_OPTIONS_REQUIRED_ARGUMENT, NULL, 'l'}, {"verbose", AWS_CLI_OPTIONS_REQUIRED_ARGUMENT, NULL, 'v'}, {"websockets", AWS_CLI_OPTIONS_NO_ARGUMENT, NULL, 'w'}, {"help", AWS_CLI_OPTIONS_NO_ARGUMENT, NULL, 'h'}, /* Per getopt(3) the last element of the array has to be filled with all zeros */ {NULL, AWS_CLI_OPTIONS_NO_ARGUMENT, NULL, 0}, }; static void s_parse_options(int argc, char **argv, struct app_ctx *ctx) { bool uri_found = false; while (true) { int option_index = 0; int c = aws_cli_getopt_long(argc, argv, "a:c:e:f:l:v:wh", s_long_options, &option_index); if (c == -1) { break; } switch (c) { case 0: /* getopt_long() returns 0 if an option.flag is non-null */ break; case 'a': ctx->cacert = aws_cli_optarg; break; case 'c': ctx->cert = aws_cli_optarg; break; case 'e': ctx->key = aws_cli_optarg; break; case 'f': ctx->connect_timeout = atoi(aws_cli_optarg); break; case 'l': ctx->log_filename = aws_cli_optarg; break; case 'v': if (!strcmp(aws_cli_optarg, "TRACE")) { ctx->log_level = AWS_LL_TRACE; } else if (!strcmp(aws_cli_optarg, "INFO")) { ctx->log_level = AWS_LL_INFO; } else if (!strcmp(aws_cli_optarg, "DEBUG")) { ctx->log_level = AWS_LL_DEBUG; } else if (!strcmp(aws_cli_optarg, "ERROR")) { ctx->log_level = AWS_LL_ERROR; } else { fprintf(stderr, "unsupported log level %s.\n", aws_cli_optarg); s_usage(1); } break; case 'h': s_usage(0); break; case 'w': ctx->use_websockets = true; break; case 0x02: { struct aws_byte_cursor uri_cursor = aws_byte_cursor_from_c_str(aws_cli_positional_arg); if (aws_uri_init_parse(&ctx->uri, ctx->allocator, &uri_cursor)) { fprintf( stderr, "Failed to parse uri %s with error %s\n", (char *)uri_cursor.ptr, aws_error_debug_str(aws_last_error())); s_usage(1); } uri_found = true; break; } default: fprintf(stderr, "Unknown option\n"); s_usage(1); } } if (!uri_found) { fprintf(stderr, "A URI for the request must be supplied.\n"); s_usage(1); } } static void s_on_subscribe_complete_fn( const struct aws_mqtt5_packet_suback_view *suback, int error_code, void *complete_ctx) { (void)error_code; (void)complete_ctx; printf("SUBACK received!\n"); for (size_t i = 0; i < suback->reason_code_count; ++i) { printf("Subscription %d: %s\n", (int)i, aws_mqtt5_suback_reason_code_to_c_string(suback->reason_codes[i])); } fflush(stdout); } static void s_on_unsubscribe_complete_fn( const struct aws_mqtt5_packet_unsuback_view *unsuback, int error_code, void *complete_ctx) { (void)error_code; (void)complete_ctx; printf("UNSUBACK received!\n"); for (size_t i = 0; i < unsuback->reason_code_count; ++i) { printf("Topic Filter %d: %s\n", (int)i, aws_mqtt5_unsuback_reason_code_to_c_string(unsuback->reason_codes[i])); } fflush(stdout); } static void s_on_publish_complete_fn( enum aws_mqtt5_packet_type packet_type, const void *packet, int error_code, void *complete_ctx) { (void)complete_ctx; switch (error_code) { case AWS_ERROR_MQTT5_OPERATION_FAILED_DUE_TO_OFFLINE_QUEUE_POLICY: printf("PUBLISH FAILED due to disconnect and offline queue policy"); break; case AWS_ERROR_MQTT_TIMEOUT: printf("PUBLISH FAILED due to MQTT Timeout"); break; case AWS_ERROR_SUCCESS: printf("PUBLISH SUCCESS\n"); break; default: break; } if (packet_type == AWS_MQTT5_PT_PUBACK) { const struct aws_mqtt5_packet_puback_view *puback = packet; printf("PUBACK received!\n"); printf("PUBACK id:%d %s\n", puback->packet_id, aws_mqtt5_puback_reason_code_to_c_string(puback->reason_code)); } else { printf("PUBLISH Complete with no PUBACK\n"); } fflush(stdout); } static void s_on_publish_received(const struct aws_mqtt5_packet_publish_view *publish, void *user_data) { (void)publish; (void)user_data; printf("PUBLISH received!\n"); printf( "Publish received to topic:'" PRInSTR "' payload '" PRInSTR "'\n", AWS_BYTE_CURSOR_PRI(publish->topic), AWS_BYTE_CURSOR_PRI(publish->payload)); } static void s_lifecycle_event_callback(const struct aws_mqtt5_client_lifecycle_event *event) { switch (event->event_type) { case AWS_MQTT5_CLET_STOPPED: printf("Lifecycle event: Stopped!\n"); break; case AWS_MQTT5_CLET_ATTEMPTING_CONNECT: printf("Lifecycle event: Attempting Connect!\n"); break; case AWS_MQTT5_CLET_CONNECTION_FAILURE: printf("Lifecycle event: Connection Failure!\n"); printf(" Error Code: %d(%s)\n", event->error_code, aws_error_debug_str(event->error_code)); break; case AWS_MQTT5_CLET_CONNECTION_SUCCESS: printf("Lifecycle event: Connection Success!\n"); break; case AWS_MQTT5_CLET_DISCONNECTION: printf("Lifecycle event: Disconnect!\n"); printf(" Error Code: %d(%s)\n", event->error_code, aws_error_debug_str(event->error_code)); break; } fflush(stdout); } static bool s_skip_whitespace(uint8_t value) { return value == '\n' || value == '\r' || value == '\t' || value == ' '; } static void s_split_command_line(struct aws_byte_cursor cursor, struct aws_array_list *words) { struct aws_byte_cursor split_cursor; AWS_ZERO_STRUCT(split_cursor); while (aws_byte_cursor_next_split(&cursor, ' ', &split_cursor)) { struct aws_byte_cursor word_cursor = aws_byte_cursor_trim_pred(&split_cursor, &s_skip_whitespace); if (word_cursor.len > 0) { aws_array_list_push_back(words, &word_cursor); } } } static void s_handle_subscribe( struct aws_mqtt5_client *client, struct aws_allocator *allocator, struct aws_array_list *arguments) { struct aws_mqtt5_subscribe_completion_options subscribe_completion_options = { .completion_callback = &s_on_subscribe_complete_fn, .completion_user_data = NULL, }; size_t argument_count = aws_array_list_length(arguments) - 1; if (argument_count < 2) { printf("invalid subscribe options:\n"); printf(" subscribe topic1 topic2 ....\n"); return; } struct aws_byte_cursor qos_cursor; AWS_ZERO_STRUCT(qos_cursor); aws_array_list_get_at(arguments, &qos_cursor, 1); struct aws_string *qos_string = aws_string_new_from_cursor(allocator, &qos_cursor); int qos_value = atoi((const char *)qos_string->bytes); enum aws_mqtt5_qos qos = qos_value; size_t topic_count = aws_array_list_length(arguments) - 2; struct aws_array_list subscriptions; aws_array_list_init_dynamic(&subscriptions, allocator, topic_count, sizeof(struct aws_mqtt5_subscription_view)); printf("Subscribing to:\n"); for (size_t i = 0; i < topic_count; ++i) { size_t topic_index = i + 2; struct aws_byte_cursor topic_filter_cursor; aws_array_list_get_at(arguments, &topic_filter_cursor, topic_index); struct aws_mqtt5_subscription_view subscription = { .topic_filter = topic_filter_cursor, .qos = qos, .no_local = false, .retain_as_published = false, .retain_handling_type = AWS_MQTT5_RHT_DONT_SEND, }; printf(" %d:" PRInSTR "\n", (int)i, AWS_BYTE_CURSOR_PRI(topic_filter_cursor)); aws_array_list_push_back(&subscriptions, &subscription); } struct aws_mqtt5_packet_subscribe_view packet_subscribe_view = { .subscription_count = aws_array_list_length(&subscriptions), .subscriptions = subscriptions.data, }; aws_mqtt5_client_subscribe(client, &packet_subscribe_view, &subscribe_completion_options); aws_array_list_clean_up(&subscriptions); aws_string_destroy(qos_string); } static void s_handle_unsubscribe(struct aws_mqtt5_client *client, struct aws_array_list *arguments) { struct aws_mqtt5_unsubscribe_completion_options unsubscribe_completion_options = { .completion_callback = &s_on_unsubscribe_complete_fn, .completion_user_data = NULL, }; size_t argument_count = aws_array_list_length(arguments) - 1; if (argument_count < 1) { printf("invalid unsubscribe options:\n"); printf(" unsubscribe topic1 topic2 ....\n"); return; } size_t topic_count = aws_array_list_length(arguments) - 1; printf("Unsubscribing to:\n"); for (size_t i = 0; i < topic_count; ++i) { size_t topic_index = i + 1; struct aws_byte_cursor topic_filter_cursor; aws_array_list_get_at(arguments, &topic_filter_cursor, topic_index); printf(" %d:" PRInSTR "\n", (int)i, AWS_BYTE_CURSOR_PRI(topic_filter_cursor)); } struct aws_mqtt5_packet_unsubscribe_view packet_unsubscribe_view = { .topic_filter_count = topic_count, .topic_filters = ((struct aws_byte_cursor *)arguments->data) + 1, }; aws_mqtt5_client_unsubscribe(client, &packet_unsubscribe_view, &unsubscribe_completion_options); } static void s_handle_publish( struct aws_mqtt5_client *client, struct aws_allocator *allocator, struct aws_array_list *arguments, struct aws_byte_cursor *full_argument) { struct aws_mqtt5_publish_completion_options publish_completion_options = { .completion_callback = &s_on_publish_complete_fn, .completion_user_data = NULL, }; size_t argument_count = aws_array_list_length(arguments) - 1; if (argument_count < 2) { printf("invalid publish call:\n"); printf(" publish topic \n"); return; } /* QoS */ struct aws_byte_cursor qos_cursor; AWS_ZERO_STRUCT(qos_cursor); aws_array_list_get_at(arguments, &qos_cursor, 1); struct aws_string *qos_string = aws_string_new_from_cursor(allocator, &qos_cursor); int qos_value = atoi((const char *)qos_string->bytes); enum aws_mqtt5_qos qos = qos_value; /* TOPIC */ struct aws_byte_cursor topic_cursor; AWS_ZERO_STRUCT(topic_cursor); aws_array_list_get_at(arguments, &topic_cursor, 2); /* PAYLOAD */ struct aws_byte_cursor payload_cursor; AWS_ZERO_STRUCT(payload_cursor); /* account for empty payload */ if (argument_count > 2) { aws_array_list_get_at(arguments, &payload_cursor, 3); payload_cursor.len = (size_t)(full_argument->ptr + full_argument->len - payload_cursor.ptr); } printf( "Publishing to Topic:'" PRInSTR "' Payload:'" PRInSTR "'\n", AWS_BYTE_CURSOR_PRI(topic_cursor), AWS_BYTE_CURSOR_PRI(payload_cursor)); struct aws_mqtt5_packet_publish_view packet_publish_view = { .qos = qos, .topic = topic_cursor, .retain = false, .duplicate = false, .payload = payload_cursor, }; aws_mqtt5_client_publish(client, &packet_publish_view, &publish_completion_options); aws_string_destroy(qos_string); } static void s_on_disconnect_completion(int error_code, void *user_data) { (void)user_data; printf("DISCONNECT complete with error code %d(%s)!", error_code, aws_error_debug_str(error_code)); fflush(stdout); } static void s_handle_stop( struct aws_mqtt5_client *client, struct aws_allocator *allocator, struct aws_array_list *arguments) { size_t argument_count = aws_array_list_length(arguments) - 1; switch (argument_count) { case 0: printf("Stopping client by shutting down channel!\n"); aws_mqtt5_client_stop(client, NULL, NULL); break; case 1: { struct aws_byte_cursor reason_code_cursor; AWS_ZERO_STRUCT(reason_code_cursor); aws_array_list_get_at(arguments, &reason_code_cursor, 1); struct aws_string *reason_code_string = aws_string_new_from_cursor(allocator, &reason_code_cursor); int reason_code_value = atoi((const char *)reason_code_string->bytes); enum aws_mqtt5_disconnect_reason_code reason_code = reason_code_value; aws_string_destroy(reason_code_string); struct aws_mqtt5_packet_disconnect_view disconnect_options = { .reason_code = reason_code, }; struct aws_mqtt5_disconnect_completion_options completion_options = { .completion_callback = s_on_disconnect_completion, .completion_user_data = client, }; printf( "Stopping client cleanly by sending DISCONNECT packet with reason code %d(%s)!\n", reason_code_value, aws_mqtt5_disconnect_reason_code_to_c_string(reason_code, NULL)); aws_mqtt5_client_stop(client, &disconnect_options, &completion_options); break; } default: printf("invalid stop options:\n"); printf(" stop [optional int: reason_code]\n"); break; } } static bool s_handle_input(struct aws_mqtt5_client *client, struct aws_allocator *allocator, const char *input_line) { struct aws_byte_cursor quit_cursor = aws_byte_cursor_from_c_str("quit"); struct aws_byte_cursor start_cursor = aws_byte_cursor_from_c_str("start"); struct aws_byte_cursor stop_cursor = aws_byte_cursor_from_c_str("stop"); struct aws_byte_cursor subscribe_cursor = aws_byte_cursor_from_c_str("subscribe"); struct aws_byte_cursor unsubscribe_cursor = aws_byte_cursor_from_c_str("unsubscribe"); struct aws_byte_cursor publish_cursor = aws_byte_cursor_from_c_str("publish"); struct aws_array_list words; aws_array_list_init_dynamic(&words, allocator, 10, sizeof(struct aws_byte_cursor)); struct aws_byte_cursor line_cursor = aws_byte_cursor_from_c_str(input_line); line_cursor = aws_byte_cursor_trim_pred(&line_cursor, &s_skip_whitespace); bool done = false; s_split_command_line(line_cursor, &words); if (aws_array_list_length(&words) == 0) { printf("Empty command line\n"); goto done; } struct aws_byte_cursor command_cursor; AWS_ZERO_STRUCT(command_cursor); aws_array_list_get_at(&words, &command_cursor, 0); if (aws_byte_cursor_eq_ignore_case(&command_cursor, &quit_cursor)) { printf("Quitting!\n"); done = true; } else if (aws_byte_cursor_eq_ignore_case(&command_cursor, &start_cursor)) { printf("Starting client!\n"); aws_mqtt5_client_start(client); } else if (aws_byte_cursor_eq_ignore_case(&command_cursor, &stop_cursor)) { s_handle_stop(client, allocator, &words); } else if (aws_byte_cursor_eq_ignore_case(&command_cursor, &subscribe_cursor)) { s_handle_subscribe(client, allocator, &words); } else if (aws_byte_cursor_eq_ignore_case(&command_cursor, &unsubscribe_cursor)) { s_handle_unsubscribe(client, &words); } else if (aws_byte_cursor_eq_ignore_case(&command_cursor, &publish_cursor)) { s_handle_publish(client, allocator, &words, &line_cursor); } else { printf("Unknown command: " PRInSTR "\n", AWS_BYTE_CURSOR_PRI(command_cursor)); } done: aws_array_list_clean_up(&words); return done; } static void s_aws_mqtt5_transform_websocket_handshake_fn( struct aws_http_message *request, void *user_data, aws_mqtt5_transform_websocket_handshake_complete_fn *complete_fn, void *complete_ctx) { (void)user_data; (*complete_fn)(request, AWS_ERROR_SUCCESS, complete_ctx); } AWS_STATIC_STRING_FROM_LITERAL(s_client_id, "HelloWorld"); int main(int argc, char **argv) { struct aws_allocator *allocator = aws_mem_tracer_new(aws_default_allocator(), NULL, AWS_MEMTRACE_STACKS, 15); aws_mqtt_library_init(allocator); struct app_ctx app_ctx; AWS_ZERO_STRUCT(app_ctx); app_ctx.allocator = allocator; app_ctx.signal = (struct aws_condition_variable)AWS_CONDITION_VARIABLE_INIT; app_ctx.connect_timeout = 3000; aws_mutex_init(&app_ctx.lock); app_ctx.port = 1883; s_parse_options(argc, argv, &app_ctx); if (app_ctx.uri.port) { app_ctx.port = app_ctx.uri.port; } struct aws_logger logger; AWS_ZERO_STRUCT(logger); struct aws_logger_standard_options options = { .level = app_ctx.log_level, }; if (app_ctx.log_level) { if (app_ctx.log_filename) { options.filename = app_ctx.log_filename; } else { options.file = stderr; } if (aws_logger_init_standard(&logger, allocator, &options)) { fprintf(stderr, "Failed to initialize logger with error %s\n", aws_error_debug_str(aws_last_error())); exit(1); } aws_logger_set(&logger); } bool use_tls = false; struct aws_tls_ctx *tls_ctx = NULL; struct aws_tls_ctx_options tls_ctx_options; AWS_ZERO_STRUCT(tls_ctx_options); struct aws_tls_connection_options tls_connection_options; AWS_ZERO_STRUCT(tls_connection_options); if (app_ctx.cert && app_ctx.key) { if (aws_tls_ctx_options_init_client_mtls_from_path(&tls_ctx_options, allocator, app_ctx.cert, app_ctx.key)) { fprintf( stderr, "Failed to load %s and %s with error %s.", app_ctx.cert, app_ctx.key, aws_error_debug_str(aws_last_error())); exit(1); } if (app_ctx.cacert) { if (aws_tls_ctx_options_override_default_trust_store_from_path(&tls_ctx_options, NULL, app_ctx.cacert)) { fprintf( stderr, "Failed to load %s with error %s", app_ctx.cacert, aws_error_debug_str(aws_last_error())); exit(1); } } if (aws_tls_ctx_options_set_alpn_list(&tls_ctx_options, "x-amzn-mqtt-ca")) { fprintf(stderr, "Failed to set alpn list with error %s.", aws_error_debug_str(aws_last_error())); exit(1); } tls_ctx = aws_tls_client_ctx_new(allocator, &tls_ctx_options); if (!tls_ctx) { fprintf(stderr, "Failed to initialize TLS context with error %s.", aws_error_debug_str(aws_last_error())); exit(1); } aws_tls_connection_options_init_from_ctx(&tls_connection_options, tls_ctx); if (aws_tls_connection_options_set_server_name(&tls_connection_options, allocator, &app_ctx.uri.host_name)) { fprintf(stderr, "Failed to set servername with error %s.", aws_error_debug_str(aws_last_error())); exit(1); } use_tls = true; } struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 2, NULL); struct aws_host_resolver_default_options resolver_options = { .el_group = el_group, .max_entries = 8, }; struct aws_host_resolver *resolver = aws_host_resolver_new_default(allocator, &resolver_options); struct aws_client_bootstrap_options bootstrap_options = { .event_loop_group = el_group, .host_resolver = resolver, }; struct aws_client_bootstrap *bootstrap = aws_client_bootstrap_new(allocator, &bootstrap_options); struct aws_socket_options socket_options = { .type = AWS_SOCKET_STREAM, .connect_timeout_ms = (uint32_t)app_ctx.connect_timeout, .keep_alive_timeout_sec = 0, .keepalive = false, .keep_alive_interval_sec = 0, }; uint16_t receive_maximum = 9; uint32_t maximum_packet_size = 128 * 1024; struct aws_mqtt5_packet_connect_view connect_options = { .keep_alive_interval_seconds = 30, .client_id = aws_byte_cursor_from_string(s_client_id), .clean_start = true, .maximum_packet_size_bytes = &maximum_packet_size, .receive_maximum = &receive_maximum, }; aws_mqtt5_transform_websocket_handshake_fn *websocket_handshake_transform = NULL; void *websocket_handshake_transform_user_data = NULL; if (app_ctx.use_websockets) { websocket_handshake_transform = &s_aws_mqtt5_transform_websocket_handshake_fn; } struct aws_mqtt5_client_options client_options = { .host_name = app_ctx.uri.host_name, .port = app_ctx.port, .bootstrap = bootstrap, .socket_options = &socket_options, .tls_options = (use_tls) ? &tls_connection_options : NULL, .connect_options = &connect_options, .session_behavior = AWS_MQTT5_CSBT_CLEAN, .lifecycle_event_handler = s_lifecycle_event_callback, .lifecycle_event_handler_user_data = NULL, .retry_jitter_mode = AWS_EXPONENTIAL_BACKOFF_JITTER_NONE, .min_reconnect_delay_ms = 1000, .max_reconnect_delay_ms = 120000, .min_connected_time_to_reset_reconnect_delay_ms = 30000, .ping_timeout_ms = 10000, .websocket_handshake_transform = websocket_handshake_transform, .websocket_handshake_transform_user_data = websocket_handshake_transform_user_data, .publish_received_handler = s_on_publish_received, }; struct aws_mqtt5_client *client = aws_mqtt5_client_new(allocator, &client_options); aws_mqtt5_client_start(client); bool done = false; while (!done) { printf("Enter command:\n"); char input_buffer[4096]; #ifdef WIN32 char *line = gets_s(input_buffer, AWS_ARRAY_SIZE(input_buffer)); #else char *line = fgets(input_buffer, AWS_ARRAY_SIZE(input_buffer), stdin); #endif done = s_handle_input(client, allocator, line); } aws_mqtt5_client_release(client); aws_client_bootstrap_release(bootstrap); aws_host_resolver_release(resolver); aws_event_loop_group_release(el_group); if (tls_ctx) { aws_tls_connection_options_clean_up(&tls_connection_options); aws_tls_ctx_release(tls_ctx); aws_tls_ctx_options_clean_up(&tls_ctx_options); } aws_thread_join_all_managed(); const size_t outstanding_bytes = aws_mem_tracer_bytes(allocator); printf("Summary:\n"); printf(" Outstanding bytes: %zu\n\n", outstanding_bytes); if (app_ctx.log_level) { aws_logger_set(NULL); aws_logger_clean_up(&logger); } aws_uri_clean_up(&app_ctx.uri); aws_mqtt_library_clean_up(); const size_t leaked_bytes = aws_mem_tracer_bytes(allocator); if (leaked_bytes) { struct aws_logger memory_logger; AWS_ZERO_STRUCT(memory_logger); aws_logger_init_noalloc(&memory_logger, aws_default_allocator(), &options); aws_logger_set(&memory_logger); aws_mqtt_library_init(aws_default_allocator()); printf("Writing memory leaks to log.\n"); aws_mem_tracer_dump(allocator); aws_logger_set(NULL); aws_logger_clean_up(&memory_logger); aws_mqtt_library_clean_up(); } else { printf("Finished, with no memory leaks\n"); } aws_mem_tracer_destroy(allocator); return 0; } aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/bin/mqtt5canary/000077500000000000000000000000001456575232400232355ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/bin/mqtt5canary/CMakeLists.txt000066400000000000000000000016421456575232400260000ustar00rootroot00000000000000project(mqtt5canary C) list(APPEND CMAKE_MODULE_PATH "${CMAKE_INSTALL_PREFIX}/lib/cmake") file(GLOB MQTT5CANARY_SRC "*.c" ) set(MQTT5CANARY_PROJECT_NAME mqtt5canary) add_executable(${MQTT5CANARY_PROJECT_NAME} ${MQTT5CANARY_SRC}) aws_set_common_properties(${MQTT5CANARY_PROJECT_NAME}) target_include_directories(${MQTT5CANARY_PROJECT_NAME} PUBLIC $ $) target_link_libraries(${MQTT5CANARY_PROJECT_NAME} PRIVATE aws-c-mqtt) if (BUILD_SHARED_LIBS AND NOT WIN32) message(INFO " mqtt5canary will be built with shared libs, but you may need to set LD_LIBRARY_PATH=${CMAKE_INSTALL_PREFIX}/lib to run the application") endif() install(TARGETS ${MQTT5CANARY_PROJECT_NAME} EXPORT ${MQTT5CANARY_PROJECT_NAME}-targets COMPONENT Runtime RUNTIME DESTINATION bin COMPONENT Runtime) aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/bin/mqtt5canary/main.c000066400000000000000000001213311456575232400243260ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef _MSC_VER # pragma warning(disable : 4996) /* Disable warnings about fopen() being insecure */ # pragma warning(disable : 4204) /* Declared initializers */ # pragma warning(disable : 4221) /* Local var in declared initializer */ #endif #define AWS_MQTT5_CANARY_CLIENT_CREATION_SLEEP_TIME 10000000 #define AWS_MQTT5_CANARY_OPERATION_ARRAY_SIZE 10000 #define AWS_MQTT5_CANARY_TOPIC_ARRAY_SIZE 256 #define AWS_MQTT5_CANARY_CLIENT_MAX 50 #define AWS_MQTT5_CANARY_PAYLOAD_SIZE_MAX UINT16_MAX struct app_ctx { struct aws_allocator *allocator; struct aws_mutex lock; struct aws_condition_variable signal; struct aws_uri uri; uint32_t port; const char *cacert; const char *cert; const char *key; int connect_timeout; bool use_websockets; struct aws_tls_connection_options tls_connection_options; const char *log_filename; enum aws_log_level log_level; }; enum aws_mqtt5_canary_operations { AWS_MQTT5_CANARY_OPERATION_NULL = 0, AWS_MQTT5_CANARY_OPERATION_START = 1, AWS_MQTT5_CANARY_OPERATION_STOP = 2, AWS_MQTT5_CANARY_OPERATION_DESTROY = 3, AWS_MQTT5_CANARY_OPERATION_SUBSCRIBE = 4, AWS_MQTT5_CANARY_OPERATION_UNSUBSCRIBE = 5, AWS_MQTT5_CANARY_OPERATION_UNSUBSCRIBE_BAD = 6, AWS_MQTT5_CANARY_OPERATION_PUBLISH_QOS0 = 7, AWS_MQTT5_CANARY_OPERATION_PUBLISH_QOS1 = 8, AWS_MQTT5_CANARY_OPERATION_PUBLISH_TO_SUBSCRIBED_TOPIC_QOS0 = 9, AWS_MQTT5_CANARY_OPERATION_PUBLISH_TO_SUBSCRIBED_TOPIC_QOS1 = 10, AWS_MQTT5_CANARY_OPERATION_PUBLISH_TO_SHARED_TOPIC_QOS0 = 11, AWS_MQTT5_CANARY_OPERATION_PUBLISH_TO_SHARED_TOPIC_QOS1 = 12, AWS_MQTT5_CANARY_OPERATION_COUNT = 13, }; struct aws_mqtt5_canary_tester_options { uint16_t elg_max_threads; uint16_t client_count; size_t tps; uint64_t tps_sleep_time; size_t distributions_total; enum aws_mqtt5_canary_operations *operations; size_t test_run_seconds; }; static void s_usage(int exit_code) { fprintf(stderr, "usage: elastipubsub5 [options] endpoint\n"); fprintf(stderr, " endpoint: url to connect to\n"); fprintf(stderr, "\n Options:\n\n"); fprintf(stderr, " --cacert FILE: path to a CA certficate file.\n"); fprintf(stderr, " --cert FILE: path to a PEM encoded certificate to use with mTLS\n"); fprintf(stderr, " --key FILE: Path to a PEM encoded private key that matches cert.\n"); fprintf(stderr, " --connect-timeout INT: time in milliseconds to wait for a connection.\n"); fprintf(stderr, " -l, --log FILE: dumps logs to FILE instead of stderr.\n"); fprintf(stderr, " -v, --verbose: ERROR|INFO|DEBUG|TRACE: log level to configure. Default is none.\n"); fprintf(stderr, " -w, --websockets: use mqtt-over-websockets rather than direct mqtt\n"); fprintf(stderr, " -p, --port: Port to use when making MQTT connections\n"); fprintf(stderr, " -t, --threads: number of eventloop group threads to use\n"); fprintf(stderr, " -C, --clients: number of mqtt5 clients to use\n"); fprintf(stderr, " -T, --tps: operations to run per second\n"); fprintf(stderr, " -s, --seconds: seconds to run canary test (set as 0 to run forever)\n"); fprintf(stderr, " -h, --help\n"); fprintf(stderr, " Display this message and quit.\n"); exit(exit_code); } static struct aws_cli_option s_long_options[] = { {"cacert", AWS_CLI_OPTIONS_REQUIRED_ARGUMENT, NULL, 'a'}, {"cert", AWS_CLI_OPTIONS_REQUIRED_ARGUMENT, NULL, 'c'}, {"key", AWS_CLI_OPTIONS_REQUIRED_ARGUMENT, NULL, 'e'}, {"connect-timeout", AWS_CLI_OPTIONS_REQUIRED_ARGUMENT, NULL, 'f'}, {"log", AWS_CLI_OPTIONS_REQUIRED_ARGUMENT, NULL, 'l'}, {"verbose", AWS_CLI_OPTIONS_REQUIRED_ARGUMENT, NULL, 'v'}, {"websockets", AWS_CLI_OPTIONS_NO_ARGUMENT, NULL, 'w'}, {"port", AWS_CLI_OPTIONS_REQUIRED_ARGUMENT, NULL, 'p'}, {"help", AWS_CLI_OPTIONS_NO_ARGUMENT, NULL, 'h'}, {"threads", AWS_CLI_OPTIONS_REQUIRED_ARGUMENT, NULL, 't'}, {"clients", AWS_CLI_OPTIONS_REQUIRED_ARGUMENT, NULL, 'C'}, {"tps", AWS_CLI_OPTIONS_REQUIRED_ARGUMENT, NULL, 'T'}, {"seconds", AWS_CLI_OPTIONS_REQUIRED_ARGUMENT, NULL, 's'}, /* Per getopt(3) the last element of the array has to be filled with all zeros */ {NULL, AWS_CLI_OPTIONS_NO_ARGUMENT, NULL, 0}, }; static void s_parse_options( int argc, char **argv, struct app_ctx *ctx, struct aws_mqtt5_canary_tester_options *tester_options) { bool uri_found = false; while (true) { int option_index = 0; int c = aws_cli_getopt_long(argc, argv, "a:c:e:f:l:v:wht:p:C:T:s:", s_long_options, &option_index); if (c == -1) { break; } switch (c) { case 0: /* getopt_long() returns 0 if an option.flag is non-null */ break; case 'a': ctx->cacert = aws_cli_optarg; break; case 'c': ctx->cert = aws_cli_optarg; break; case 'e': ctx->key = aws_cli_optarg; break; case 'f': ctx->connect_timeout = atoi(aws_cli_optarg); break; case 'l': ctx->log_filename = aws_cli_optarg; break; case 'v': if (!strcmp(aws_cli_optarg, "TRACE")) { ctx->log_level = AWS_LL_TRACE; } else if (!strcmp(aws_cli_optarg, "INFO")) { ctx->log_level = AWS_LL_INFO; } else if (!strcmp(aws_cli_optarg, "DEBUG")) { ctx->log_level = AWS_LL_DEBUG; } else if (!strcmp(aws_cli_optarg, "ERROR")) { ctx->log_level = AWS_LL_ERROR; } else { fprintf(stderr, "unsupported log level %s.\n", aws_cli_optarg); s_usage(1); } break; case 'h': s_usage(0); break; case 'w': ctx->use_websockets = true; break; case 'p': ctx->port = (uint32_t)atoi(aws_cli_optarg); break; case 't': tester_options->elg_max_threads = (uint16_t)atoi(aws_cli_optarg); break; case 'C': tester_options->client_count = (uint16_t)atoi(aws_cli_optarg); if (tester_options->client_count > AWS_MQTT5_CANARY_CLIENT_MAX) { tester_options->client_count = AWS_MQTT5_CANARY_CLIENT_MAX; } break; case 'T': tester_options->tps = atoi(aws_cli_optarg); break; case 's': tester_options->test_run_seconds = atoi(aws_cli_optarg); break; case 0x02: { struct aws_byte_cursor uri_cursor = aws_byte_cursor_from_c_str(aws_cli_positional_arg); if (aws_uri_init_parse(&ctx->uri, ctx->allocator, &uri_cursor)) { fprintf( stderr, "Failed to parse uri %s with error %s\n", (char *)uri_cursor.ptr, aws_error_debug_str(aws_last_error())); s_usage(1); } uri_found = true; break; } default: fprintf(stderr, "Unknown option\n"); s_usage(1); } } if (!uri_found) { fprintf(stderr, "A URI for the request must be supplied.\n"); s_usage(1); } } /********************************************************** * MQTT5 CANARY OPTIONS **********************************************************/ static void s_aws_mqtt5_canary_update_tps_sleep_time(struct aws_mqtt5_canary_tester_options *tester_options) { tester_options->tps_sleep_time = (aws_timestamp_convert(1, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL) / tester_options->tps); } static void s_aws_mqtt5_canary_init_tester_options(struct aws_mqtt5_canary_tester_options *tester_options) { /* number of eventloop group threads to use */ tester_options->elg_max_threads = 3; /* number of mqtt5 clients to use */ tester_options->client_count = 10; /* operations per second to run */ tester_options->tps = 50; /* How long to run the test before exiting */ tester_options->test_run_seconds = 25200; } struct aws_mqtt5_canary_test_client { struct aws_mqtt5_client *client; const struct aws_mqtt5_negotiated_settings *settings; struct aws_byte_cursor shared_topic; struct aws_byte_cursor client_id; size_t subscription_count; bool is_connected; }; /********************************************************** * OPERATION DISTRIBUTION **********************************************************/ typedef int(aws_mqtt5_canary_operation_fn)(struct aws_mqtt5_canary_test_client *test_client); struct aws_mqtt5_canary_operations_function_table { aws_mqtt5_canary_operation_fn *operation_by_operation_type[AWS_MQTT5_CANARY_OPERATION_COUNT]; }; static void s_aws_mqtt5_canary_add_operation_to_array( struct aws_mqtt5_canary_tester_options *tester_options, enum aws_mqtt5_canary_operations operation_type, size_t probability) { for (size_t i = 0; i < probability; ++i) { tester_options->operations[tester_options->distributions_total] = operation_type; tester_options->distributions_total += 1; } } /* Add operations and their weighted probability to the list of possible operations */ static void s_aws_mqtt5_canary_init_weighted_operations(struct aws_mqtt5_canary_tester_options *tester_options) { s_aws_mqtt5_canary_add_operation_to_array(tester_options, AWS_MQTT5_CANARY_OPERATION_STOP, 1); s_aws_mqtt5_canary_add_operation_to_array(tester_options, AWS_MQTT5_CANARY_OPERATION_SUBSCRIBE, 200); s_aws_mqtt5_canary_add_operation_to_array(tester_options, AWS_MQTT5_CANARY_OPERATION_UNSUBSCRIBE, 200); s_aws_mqtt5_canary_add_operation_to_array(tester_options, AWS_MQTT5_CANARY_OPERATION_UNSUBSCRIBE_BAD, 100); s_aws_mqtt5_canary_add_operation_to_array(tester_options, AWS_MQTT5_CANARY_OPERATION_PUBLISH_QOS0, 300); s_aws_mqtt5_canary_add_operation_to_array(tester_options, AWS_MQTT5_CANARY_OPERATION_PUBLISH_QOS1, 150); s_aws_mqtt5_canary_add_operation_to_array( tester_options, AWS_MQTT5_CANARY_OPERATION_PUBLISH_TO_SUBSCRIBED_TOPIC_QOS0, 100); s_aws_mqtt5_canary_add_operation_to_array( tester_options, AWS_MQTT5_CANARY_OPERATION_PUBLISH_TO_SUBSCRIBED_TOPIC_QOS1, 50); s_aws_mqtt5_canary_add_operation_to_array( tester_options, AWS_MQTT5_CANARY_OPERATION_PUBLISH_TO_SHARED_TOPIC_QOS0, 50); s_aws_mqtt5_canary_add_operation_to_array( tester_options, AWS_MQTT5_CANARY_OPERATION_PUBLISH_TO_SHARED_TOPIC_QOS1, 50); } static enum aws_mqtt5_canary_operations s_aws_mqtt5_canary_get_random_operation( struct aws_mqtt5_canary_tester_options *tester_options) { size_t random_index = rand() % tester_options->distributions_total; return tester_options->operations[random_index]; } /********************************************************** * PACKET CALLBACKS **********************************************************/ static void s_on_publish_received(const struct aws_mqtt5_packet_publish_view *publish, void *user_data) { (void)publish; struct aws_mqtt5_canary_test_client *test_client = user_data; AWS_LOGF_INFO( AWS_LS_MQTT5_CANARY, "ID:" PRInSTR " Publish Received on topic " PRInSTR, AWS_BYTE_CURSOR_PRI(test_client->client_id), AWS_BYTE_CURSOR_PRI(publish->topic)); } /********************************************************** * LIFECYCLE EVENTS **********************************************************/ static void s_handle_lifecycle_event_connection_success( struct aws_mqtt5_canary_test_client *test_client, const struct aws_mqtt5_negotiated_settings *settings) { AWS_ASSERT(test_client != NULL); test_client->is_connected = true; test_client->settings = settings; test_client->client_id = aws_byte_cursor_from_buf(&settings->client_id_storage); AWS_LOGF_INFO( AWS_LS_MQTT5_CANARY, "ID:" PRInSTR " Lifecycle Event: Connection Success", AWS_BYTE_CURSOR_PRI(test_client->client_id)); } static void s_handle_lifecycle_event_disconnection(struct aws_mqtt5_canary_test_client *test_client) { AWS_ASSERT(test_client != NULL); test_client->is_connected = false; AWS_LOGF_INFO( AWS_LS_MQTT5_CANARY, "ID:" PRInSTR " Lifecycle Event: Disconnect", AWS_BYTE_CURSOR_PRI(test_client->client_id)); } static void s_handle_lifecycle_event_stopped(struct aws_mqtt5_canary_test_client *test_client) { AWS_ASSERT(test_client != NULL); AWS_LOGF_INFO( AWS_LS_MQTT5_CANARY, "ID:" PRInSTR " Lifecycle Event: Stopped", AWS_BYTE_CURSOR_PRI(test_client->client_id)); } static void s_lifecycle_event_callback(const struct aws_mqtt5_client_lifecycle_event *event) { switch (event->event_type) { case AWS_MQTT5_CLET_STOPPED: s_handle_lifecycle_event_stopped(event->user_data); break; case AWS_MQTT5_CLET_ATTEMPTING_CONNECT: AWS_LOGF_INFO(AWS_LS_MQTT5_CANARY, "Lifecycle event: Attempting Connect!"); break; case AWS_MQTT5_CLET_CONNECTION_FAILURE: AWS_LOGF_INFO(AWS_LS_MQTT5_CANARY, "Lifecycle event: Connection Failure!"); AWS_LOGF_INFO( AWS_LS_MQTT5_CANARY, " Error Code: %d(%s)", event->error_code, aws_error_debug_str(event->error_code)); break; case AWS_MQTT5_CLET_CONNECTION_SUCCESS: s_handle_lifecycle_event_connection_success(event->user_data, event->settings); break; case AWS_MQTT5_CLET_DISCONNECTION: s_handle_lifecycle_event_disconnection(event->user_data); AWS_LOGF_INFO( AWS_LS_MQTT5_CANARY, " Error Code: %d(%s)", event->error_code, aws_error_debug_str(event->error_code)); break; } } static void s_aws_mqtt5_transform_websocket_handshake_fn( struct aws_http_message *request, void *user_data, aws_mqtt5_transform_websocket_handshake_complete_fn *complete_fn, void *complete_ctx) { (void)user_data; (*complete_fn)(request, AWS_ERROR_SUCCESS, complete_ctx); } /********************************************************** * OPERATION CALLBACKS **********************************************************/ static void s_aws_mqtt5_canary_operation_subscribe_completion( const struct aws_mqtt5_packet_suback_view *suback, int error_code, void *complete_ctx) { (void)suback; if (error_code != AWS_MQTT5_UARC_SUCCESS) { struct aws_mqtt5_canary_test_client *test_client = (struct aws_mqtt5_canary_test_client *)(complete_ctx); if (test_client != NULL) { AWS_LOGF_ERROR( AWS_LS_MQTT5_CANARY, "ID:" PRInSTR " Subscribe completed with error code: %i", AWS_BYTE_CURSOR_PRI(test_client->client_id), error_code); } } } static void s_aws_mqtt5_canary_operation_unsubscribe_completion( const struct aws_mqtt5_packet_unsuback_view *unsuback, int error_code, void *complete_ctx) { (void)unsuback; if (error_code != AWS_MQTT5_UARC_SUCCESS) { struct aws_mqtt5_canary_test_client *test_client = (struct aws_mqtt5_canary_test_client *)(complete_ctx); if (test_client != NULL) { AWS_LOGF_ERROR( AWS_LS_MQTT5_CANARY, "ID:" PRInSTR " Unsubscribe completed with error code: %i", AWS_BYTE_CURSOR_PRI(test_client->client_id), error_code); } } } static void s_aws_mqtt5_canary_operation_publish_completion( enum aws_mqtt5_packet_type packet_type, const void *packet, int error_code, void *complete_ctx) { (void)packet; (void)packet_type; if (error_code != AWS_MQTT5_PARC_SUCCESS) { struct aws_mqtt5_canary_test_client *test_client = (struct aws_mqtt5_canary_test_client *)(complete_ctx); if (test_client != NULL) { AWS_LOGF_ERROR( AWS_LS_MQTT5_CANARY, "ID:" PRInSTR " Publish completed with error code: %i", AWS_BYTE_CURSOR_PRI(test_client->client_id), error_code); } } } /********************************************************** * OPERATION FUNCTIONS **********************************************************/ static int s_aws_mqtt5_canary_operation_start(struct aws_mqtt5_canary_test_client *test_client) { if (test_client->is_connected) { return AWS_OP_SUCCESS; } aws_mqtt5_client_start(test_client->client); struct aws_byte_cursor client_id; if (test_client->client_id.len) { client_id.ptr = test_client->client_id.ptr; client_id.len = test_client->client_id.len; } else { client_id = aws_byte_cursor_from_c_str("Client ID not set"); } AWS_LOGF_INFO(AWS_LS_MQTT5_CANARY, "ID:" PRInSTR " Start", AWS_BYTE_CURSOR_PRI(client_id)); return AWS_OP_SUCCESS; } static int s_aws_mqtt5_canary_operation_stop(struct aws_mqtt5_canary_test_client *test_client) { if (!test_client->is_connected) { return AWS_OP_SUCCESS; } aws_mqtt5_client_stop(test_client->client, NULL, NULL); test_client->subscription_count = 0; AWS_LOGF_INFO(AWS_LS_MQTT5_CANARY, "ID:" PRInSTR " Stop", AWS_BYTE_CURSOR_PRI(test_client->client_id)); return AWS_OP_SUCCESS; } static int s_aws_mqtt5_canary_operation_subscribe(struct aws_mqtt5_canary_test_client *test_client) { if (!test_client->is_connected) { return s_aws_mqtt5_canary_operation_start(test_client); } char topic_array[AWS_MQTT5_CANARY_TOPIC_ARRAY_SIZE] = ""; snprintf( topic_array, sizeof topic_array, PRInSTR "_%zu", AWS_BYTE_CURSOR_PRI(test_client->client_id), test_client->subscription_count); struct aws_mqtt5_subscription_view subscriptions[] = { { .topic_filter = aws_byte_cursor_from_c_str(topic_array), .qos = AWS_MQTT5_QOS_AT_LEAST_ONCE, .no_local = false, .retain_as_published = false, .retain_handling_type = AWS_MQTT5_RHT_SEND_ON_SUBSCRIBE, }, { .topic_filter = { .ptr = test_client->shared_topic.ptr, .len = test_client->shared_topic.len, }, .qos = AWS_MQTT5_QOS_AT_LEAST_ONCE, .no_local = false, .retain_as_published = false, .retain_handling_type = AWS_MQTT5_RHT_SEND_ON_SUBSCRIBE, }, }; struct aws_mqtt5_packet_subscribe_view subscribe_view = { .subscriptions = subscriptions, .subscription_count = AWS_ARRAY_SIZE(subscriptions), }; struct aws_mqtt5_subscribe_completion_options subscribe_view_options = { .completion_callback = &s_aws_mqtt5_canary_operation_subscribe_completion, .completion_user_data = test_client, }; test_client->subscription_count++; AWS_LOGF_INFO( AWS_LS_MQTT5_CANARY, "ID:" PRInSTR " Subscribe to topic: " PRInSTR, AWS_BYTE_CURSOR_PRI(test_client->client_id), AWS_BYTE_CURSOR_PRI(subscriptions->topic_filter)); return aws_mqtt5_client_subscribe(test_client->client, &subscribe_view, &subscribe_view_options); } static int s_aws_mqtt5_canary_operation_unsubscribe_bad(struct aws_mqtt5_canary_test_client *test_client) { if (!test_client->is_connected) { return s_aws_mqtt5_canary_operation_start(test_client); } char topic_array[AWS_MQTT5_CANARY_TOPIC_ARRAY_SIZE] = ""; snprintf( topic_array, sizeof topic_array, PRInSTR "_non_existing_topic", AWS_BYTE_CURSOR_PRI(test_client->client_id)); struct aws_byte_cursor topic = aws_byte_cursor_from_c_str(topic_array); struct aws_byte_cursor unsubscribes[] = { { .ptr = topic.ptr, .len = topic.len, }, }; struct aws_mqtt5_packet_unsubscribe_view unsubscribe_view = { .topic_filters = unsubscribes, .topic_filter_count = AWS_ARRAY_SIZE(unsubscribes), }; AWS_LOGF_INFO(AWS_LS_MQTT5_CANARY, "ID:" PRInSTR " Unsubscribe Bad", AWS_BYTE_CURSOR_PRI(test_client->client_id)); return aws_mqtt5_client_unsubscribe(test_client->client, &unsubscribe_view, NULL); } static int s_aws_mqtt5_canary_operation_unsubscribe(struct aws_mqtt5_canary_test_client *test_client) { if (!test_client->is_connected) { return s_aws_mqtt5_canary_operation_start(test_client); } if (test_client->subscription_count <= 0) { return s_aws_mqtt5_canary_operation_unsubscribe_bad(test_client); } test_client->subscription_count--; char topic_array[AWS_MQTT5_CANARY_TOPIC_ARRAY_SIZE] = ""; snprintf( topic_array, sizeof topic_array, PRInSTR "_%zu", AWS_BYTE_CURSOR_PRI(test_client->client_id), test_client->subscription_count); struct aws_byte_cursor topic = aws_byte_cursor_from_c_str(topic_array); struct aws_byte_cursor unsubscribes[] = { { .ptr = topic.ptr, .len = topic.len, }, }; struct aws_mqtt5_packet_unsubscribe_view unsubscribe_view = { .topic_filters = unsubscribes, .topic_filter_count = AWS_ARRAY_SIZE(unsubscribes), }; struct aws_mqtt5_unsubscribe_completion_options unsubscribe_view_options = { .completion_callback = &s_aws_mqtt5_canary_operation_unsubscribe_completion, .completion_user_data = test_client}; AWS_LOGF_INFO( AWS_LS_MQTT5_CANARY, "ID:" PRInSTR " Unsubscribe from topic: " PRInSTR, AWS_BYTE_CURSOR_PRI(test_client->client_id), AWS_BYTE_CURSOR_PRI(topic)); return aws_mqtt5_client_unsubscribe(test_client->client, &unsubscribe_view, &unsubscribe_view_options); } static int s_aws_mqtt5_canary_operation_publish( struct aws_mqtt5_canary_test_client *test_client, struct aws_byte_cursor topic_filter, enum aws_mqtt5_qos qos) { struct aws_byte_cursor property_cursor = aws_byte_cursor_from_c_str("property"); struct aws_mqtt5_user_property user_properties[] = { { .name = { .ptr = property_cursor.ptr, .len = property_cursor.len, }, .value = { .ptr = property_cursor.ptr, .len = property_cursor.len, }, }, { .name = { .ptr = property_cursor.ptr, .len = property_cursor.len, }, .value = { .ptr = property_cursor.ptr, .len = property_cursor.len, }, }, }; uint16_t payload_size = (rand() % UINT16_MAX) + 1; uint8_t payload_data[AWS_MQTT5_CANARY_PAYLOAD_SIZE_MAX]; struct aws_mqtt5_packet_publish_view packet_publish_view = { .qos = qos, .topic = topic_filter, .retain = false, .duplicate = false, .payload = { .ptr = payload_data, .len = payload_size, }, .user_properties = user_properties, .user_property_count = AWS_ARRAY_SIZE(user_properties), }; struct aws_mqtt5_publish_completion_options packet_publish_view_options = { .completion_callback = &s_aws_mqtt5_canary_operation_publish_completion, .completion_user_data = test_client}; return aws_mqtt5_client_publish(test_client->client, &packet_publish_view, &packet_publish_view_options); } static int s_aws_mqtt5_canary_operation_publish_qos0(struct aws_mqtt5_canary_test_client *test_client) { if (!test_client->is_connected) { return s_aws_mqtt5_canary_operation_start(test_client); } struct aws_byte_cursor topic_cursor; AWS_ZERO_STRUCT(topic_cursor); topic_cursor = aws_byte_cursor_from_c_str("topic1"); AWS_LOGF_INFO(AWS_LS_MQTT5_CANARY, "ID:" PRInSTR " Publish qos0", AWS_BYTE_CURSOR_PRI(test_client->client_id)); return s_aws_mqtt5_canary_operation_publish(test_client, topic_cursor, AWS_MQTT5_QOS_AT_MOST_ONCE); } static int s_aws_mqtt5_canary_operation_publish_qos1(struct aws_mqtt5_canary_test_client *test_client) { if (!test_client->is_connected) { return s_aws_mqtt5_canary_operation_start(test_client); } struct aws_byte_cursor topic_cursor; AWS_ZERO_STRUCT(topic_cursor); topic_cursor = aws_byte_cursor_from_c_str("topic1"); AWS_LOGF_INFO(AWS_LS_MQTT5_CANARY, "ID:" PRInSTR " Publish qos1", AWS_BYTE_CURSOR_PRI(test_client->client_id)); return s_aws_mqtt5_canary_operation_publish(test_client, topic_cursor, AWS_MQTT5_QOS_AT_LEAST_ONCE); } static int s_aws_mqtt5_canary_operation_publish_to_subscribed_topic_qos0( struct aws_mqtt5_canary_test_client *test_client) { if (!test_client->is_connected) { return s_aws_mqtt5_canary_operation_start(test_client); } if (test_client->subscription_count < 1) { return s_aws_mqtt5_canary_operation_publish_qos0(test_client); } char topic_array[AWS_MQTT5_CANARY_TOPIC_ARRAY_SIZE] = ""; snprintf( topic_array, sizeof topic_array, PRInSTR "_%zu", AWS_BYTE_CURSOR_PRI(test_client->client_id), test_client->subscription_count - 1); struct aws_byte_cursor topic_cursor = aws_byte_cursor_from_c_str(topic_array); AWS_LOGF_INFO( AWS_LS_MQTT5_CANARY, "ID:" PRInSTR " Publish qos 0 to subscribed topic: " PRInSTR, AWS_BYTE_CURSOR_PRI(test_client->client_id), AWS_BYTE_CURSOR_PRI(topic_cursor)); return s_aws_mqtt5_canary_operation_publish(test_client, topic_cursor, AWS_MQTT5_QOS_AT_MOST_ONCE); } static int s_aws_mqtt5_canary_operation_publish_to_subscribed_topic_qos1( struct aws_mqtt5_canary_test_client *test_client) { if (!test_client->is_connected) { return s_aws_mqtt5_canary_operation_start(test_client); } if (test_client->subscription_count < 1) { return s_aws_mqtt5_canary_operation_publish_qos1(test_client); } char topic_array[AWS_MQTT5_CANARY_TOPIC_ARRAY_SIZE] = ""; snprintf( topic_array, sizeof topic_array, PRInSTR "_%zu", AWS_BYTE_CURSOR_PRI(test_client->client_id), test_client->subscription_count - 1); struct aws_byte_cursor topic_cursor = aws_byte_cursor_from_c_str(topic_array); AWS_LOGF_INFO( AWS_LS_MQTT5_CANARY, "ID:" PRInSTR " Publish qos 1 to subscribed topic: " PRInSTR, AWS_BYTE_CURSOR_PRI(test_client->client_id), AWS_BYTE_CURSOR_PRI(topic_cursor)); return s_aws_mqtt5_canary_operation_publish(test_client, topic_cursor, AWS_MQTT5_QOS_AT_LEAST_ONCE); } static int s_aws_mqtt5_canary_operation_publish_to_shared_topic_qos0(struct aws_mqtt5_canary_test_client *test_client) { if (!test_client->is_connected) { return s_aws_mqtt5_canary_operation_start(test_client); } AWS_LOGF_INFO( AWS_LS_MQTT5_CANARY, "ID:" PRInSTR " Publish qos 0 to shared topic: " PRInSTR, AWS_BYTE_CURSOR_PRI(test_client->client_id), AWS_BYTE_CURSOR_PRI(test_client->shared_topic)); return s_aws_mqtt5_canary_operation_publish(test_client, test_client->shared_topic, AWS_MQTT5_QOS_AT_MOST_ONCE); } static int s_aws_mqtt5_canary_operation_publish_to_shared_topic_qos1(struct aws_mqtt5_canary_test_client *test_client) { if (!test_client->is_connected) { return s_aws_mqtt5_canary_operation_start(test_client); } AWS_LOGF_INFO( AWS_LS_MQTT5_CANARY, "ID:" PRInSTR " Publish qos 1 to shared topic: " PRInSTR, AWS_BYTE_CURSOR_PRI(test_client->client_id), AWS_BYTE_CURSOR_PRI(test_client->shared_topic)); return s_aws_mqtt5_canary_operation_publish(test_client, test_client->shared_topic, AWS_MQTT5_QOS_AT_LEAST_ONCE); } static struct aws_mqtt5_canary_operations_function_table s_aws_mqtt5_canary_operation_table = { .operation_by_operation_type = { NULL, /* null */ &s_aws_mqtt5_canary_operation_start, /* start */ &s_aws_mqtt5_canary_operation_stop, /* stop */ NULL, /* destroy */ &s_aws_mqtt5_canary_operation_subscribe, /* subscribe */ &s_aws_mqtt5_canary_operation_unsubscribe, /* unsubscribe */ &s_aws_mqtt5_canary_operation_unsubscribe_bad, /* unsubscribe_bad */ &s_aws_mqtt5_canary_operation_publish_qos0, /* publish_qos0 */ &s_aws_mqtt5_canary_operation_publish_qos1, /* publish_qos1 */ &s_aws_mqtt5_canary_operation_publish_to_subscribed_topic_qos0, /* publish_to_subscribed_topic_qos0 */ &s_aws_mqtt5_canary_operation_publish_to_subscribed_topic_qos1, /* publish_to_subscribed_topic_qos1 */ &s_aws_mqtt5_canary_operation_publish_to_shared_topic_qos0, /* publish_to_shared_topic_qos0 */ &s_aws_mqtt5_canary_operation_publish_to_shared_topic_qos1, /* publish_to_shared_topic_qos1 */ }, }; /********************************************************** * MAIN **********************************************************/ int main(int argc, char **argv) { struct aws_allocator *allocator = aws_mem_tracer_new(aws_default_allocator(), NULL, AWS_MEMTRACE_STACKS, 15); aws_mqtt_library_init(allocator); struct app_ctx app_ctx; AWS_ZERO_STRUCT(app_ctx); app_ctx.allocator = allocator; app_ctx.signal = (struct aws_condition_variable)AWS_CONDITION_VARIABLE_INIT; app_ctx.connect_timeout = 3000; aws_mutex_init(&app_ctx.lock); if (app_ctx.port == 0) { app_ctx.port = 1883; } struct aws_mqtt5_canary_tester_options tester_options; AWS_ZERO_STRUCT(tester_options); s_aws_mqtt5_canary_init_tester_options(&tester_options); enum aws_mqtt5_canary_operations operations[AWS_MQTT5_CANARY_OPERATION_ARRAY_SIZE]; AWS_ZERO_STRUCT(operations); tester_options.operations = operations; s_parse_options(argc, argv, &app_ctx, &tester_options); if (app_ctx.uri.port) { app_ctx.port = app_ctx.uri.port; } s_aws_mqtt5_canary_update_tps_sleep_time(&tester_options); s_aws_mqtt5_canary_init_weighted_operations(&tester_options); /********************************************************** * LOGGING **********************************************************/ struct aws_logger logger; AWS_ZERO_STRUCT(logger); struct aws_logger_standard_options options = { .level = app_ctx.log_level, }; if (app_ctx.log_level) { if (app_ctx.log_filename) { options.filename = app_ctx.log_filename; } else { options.file = stderr; } if (aws_logger_init_standard(&logger, allocator, &options)) { fprintf(stderr, "Faled to initialize logger with error %s", aws_error_debug_str(aws_last_error())); exit(1); } aws_logger_set(&logger); } else { options.file = stderr; } /********************************************************** * TLS **********************************************************/ bool use_tls = false; struct aws_tls_ctx *tls_ctx = NULL; struct aws_tls_ctx_options tls_ctx_options; AWS_ZERO_STRUCT(tls_ctx_options); struct aws_tls_connection_options tls_connection_options; AWS_ZERO_STRUCT(tls_connection_options); if (app_ctx.cert && app_ctx.key) { if (aws_tls_ctx_options_init_client_mtls_from_path(&tls_ctx_options, allocator, app_ctx.cert, app_ctx.key)) { fprintf( stderr, "Failed to load %s and %s with error %s.", app_ctx.cert, app_ctx.key, aws_error_debug_str(aws_last_error())); exit(1); } if (app_ctx.cacert) { if (aws_tls_ctx_options_override_default_trust_store_from_path(&tls_ctx_options, NULL, app_ctx.cacert)) { fprintf( stderr, "Failed to load %s with error %s", app_ctx.cacert, aws_error_debug_str(aws_last_error())); exit(1); } } if (aws_tls_ctx_options_set_alpn_list(&tls_ctx_options, "x-amzn-mqtt-ca")) { fprintf(stderr, "Failed to set alpn list with error %s.", aws_error_debug_str(aws_last_error())); exit(1); } tls_ctx = aws_tls_client_ctx_new(allocator, &tls_ctx_options); if (!tls_ctx) { fprintf(stderr, "Failed to initialize TLS context with error %s.", aws_error_debug_str(aws_last_error())); exit(1); } aws_tls_connection_options_init_from_ctx(&tls_connection_options, tls_ctx); if (aws_tls_connection_options_set_server_name(&tls_connection_options, allocator, &app_ctx.uri.host_name)) { fprintf(stderr, "Failed to set servername with error %s.", aws_error_debug_str(aws_last_error())); exit(1); } use_tls = true; } /********************************************************** * EVENT LOOP GROUP **********************************************************/ struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, tester_options.elg_max_threads, NULL); struct aws_host_resolver_default_options resolver_options = { .el_group = el_group, .max_entries = 8, }; struct aws_host_resolver *resolver = aws_host_resolver_new_default(allocator, &resolver_options); struct aws_client_bootstrap_options bootstrap_options = { .event_loop_group = el_group, .host_resolver = resolver, }; struct aws_client_bootstrap *bootstrap = aws_client_bootstrap_new(allocator, &bootstrap_options); struct aws_socket_options socket_options = { .type = AWS_SOCKET_STREAM, .connect_timeout_ms = (uint32_t)app_ctx.connect_timeout, .keep_alive_timeout_sec = 0, .keepalive = false, .keep_alive_interval_sec = 0, }; uint16_t receive_maximum = 9; uint32_t maximum_packet_size = 128 * 1024; aws_mqtt5_transform_websocket_handshake_fn *websocket_handshake_transform = NULL; void *websocket_handshake_transform_user_data = NULL; if (app_ctx.use_websockets) { websocket_handshake_transform = &s_aws_mqtt5_transform_websocket_handshake_fn; } /********************************************************** * MQTT5 CLIENT CREATION **********************************************************/ struct aws_mqtt5_packet_connect_view connect_options = { .keep_alive_interval_seconds = 30, .clean_start = true, .maximum_packet_size_bytes = &maximum_packet_size, .receive_maximum = &receive_maximum, }; struct aws_mqtt5_client_options client_options = { .host_name = app_ctx.uri.host_name, .port = app_ctx.port, .bootstrap = bootstrap, .socket_options = &socket_options, .tls_options = (use_tls) ? &tls_connection_options : NULL, .connect_options = &connect_options, .session_behavior = AWS_MQTT5_CSBT_CLEAN, .lifecycle_event_handler = s_lifecycle_event_callback, .retry_jitter_mode = AWS_EXPONENTIAL_BACKOFF_JITTER_NONE, .min_reconnect_delay_ms = 1000, .max_reconnect_delay_ms = 120000, .min_connected_time_to_reset_reconnect_delay_ms = 30000, .ping_timeout_ms = 10000, .websocket_handshake_transform = websocket_handshake_transform, .websocket_handshake_transform_user_data = websocket_handshake_transform_user_data, .publish_received_handler = s_on_publish_received, .ack_timeout_seconds = 300, /* 5 minute timeout */ }; struct aws_mqtt5_canary_test_client clients[AWS_MQTT5_CANARY_CLIENT_MAX]; AWS_ZERO_STRUCT(clients); uint64_t start_time = 0; aws_high_res_clock_get_ticks(&start_time); char shared_topic_array[AWS_MQTT5_CANARY_TOPIC_ARRAY_SIZE] = ""; snprintf(shared_topic_array, sizeof shared_topic_array, "%" PRIu64 "_shared_topic", start_time); struct aws_byte_cursor shared_topic = aws_byte_cursor_from_c_str(shared_topic_array); for (size_t i = 0; i < tester_options.client_count; ++i) { client_options.lifecycle_event_handler_user_data = &clients[i]; client_options.publish_received_handler_user_data = &clients[i]; clients[i].shared_topic = shared_topic; clients[i].client = aws_mqtt5_client_new(allocator, &client_options); aws_mqtt5_canary_operation_fn *operation_fn = s_aws_mqtt5_canary_operation_table.operation_by_operation_type[AWS_MQTT5_CANARY_OPERATION_START]; (*operation_fn)(&clients[i]); aws_thread_current_sleep(AWS_MQTT5_CANARY_CLIENT_CREATION_SLEEP_TIME); } fprintf(stderr, "Clients created\n"); /********************************************************** * TESTING **********************************************************/ bool done = false; size_t operations_executed = 0; uint64_t time_test_finish = 0; aws_high_res_clock_get_ticks(&time_test_finish); time_test_finish += aws_timestamp_convert(tester_options.test_run_seconds, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL); if (tester_options.test_run_seconds > 0) { printf("Running test for %zu seconds\n", tester_options.test_run_seconds); } else { printf("Running test forever\n"); } while (!done) { uint64_t now = 0; aws_high_res_clock_get_ticks(&now); operations_executed++; enum aws_mqtt5_canary_operations next_operation = s_aws_mqtt5_canary_get_random_operation(&tester_options); aws_mqtt5_canary_operation_fn *operation_fn = s_aws_mqtt5_canary_operation_table.operation_by_operation_type[next_operation]; (*operation_fn)(&clients[rand() % tester_options.client_count]); if (now > time_test_finish && tester_options.test_run_seconds > 0) { done = true; } aws_thread_current_sleep(tester_options.tps_sleep_time); } /********************************************************** * CLEAN UP **********************************************************/ for (size_t i = 0; i < tester_options.client_count; ++i) { struct aws_mqtt5_client *client = clients[i].client; aws_mqtt5_client_release(client); } aws_client_bootstrap_release(bootstrap); aws_host_resolver_release(resolver); aws_event_loop_group_release(el_group); if (tls_ctx) { aws_tls_connection_options_clean_up(&tls_connection_options); aws_tls_ctx_release(tls_ctx); aws_tls_ctx_options_clean_up(&tls_ctx_options); } aws_thread_join_all_managed(); const size_t outstanding_bytes = aws_mem_tracer_bytes(allocator); printf("Summary:\n"); printf(" Outstanding bytes: %zu\n", outstanding_bytes); if (app_ctx.log_level) { aws_logger_set(NULL); aws_logger_clean_up(&logger); } aws_uri_clean_up(&app_ctx.uri); aws_mqtt_library_clean_up(); printf(" Operations executed: %zu\n", operations_executed); printf(" Operating TPS average over test: %zu\n\n", operations_executed / tester_options.test_run_seconds); const size_t leaked_bytes = aws_mem_tracer_bytes(allocator); if (leaked_bytes) { struct aws_logger memory_logger; AWS_ZERO_STRUCT(memory_logger); aws_logger_init_noalloc(&memory_logger, aws_default_allocator(), &options); aws_logger_set(&memory_logger); aws_mqtt_library_init(aws_default_allocator()); printf("Writing memory leaks to log.\n"); aws_mem_tracer_dump(allocator); aws_logger_set(NULL); aws_logger_clean_up(&memory_logger); aws_mqtt_library_clean_up(); } else { printf("Finished, with no memory leaks\n"); } aws_mem_tracer_destroy(allocator); return 0; } aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/builder.json000066400000000000000000000003151456575232400225350ustar00rootroot00000000000000{ "name": "aws-c-mqtt", "upstream": [ { "name": "aws-c-http" }, { "name": "aws-c-io" } ], "downstream": [ { "name": "aws-c-iot" } ], "cmake_args": [ ] } aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/cmake/000077500000000000000000000000001456575232400212755ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/cmake/aws-c-mqtt-config.cmake000066400000000000000000000010701456575232400255350ustar00rootroot00000000000000include(CMakeFindDependencyMacro) find_dependency(aws-c-http) macro(aws_load_targets type) include(${CMAKE_CURRENT_LIST_DIR}/${type}/@PROJECT_NAME@-targets.cmake) endmacro() # try to load the lib follow BUILD_SHARED_LIBS. Fall back if not exist. if(BUILD_SHARED_LIBS) if(EXISTS "${CMAKE_CURRENT_LIST_DIR}/shared") aws_load_targets(shared) else() aws_load_targets(static) endif() else() if(EXISTS "${CMAKE_CURRENT_LIST_DIR}/static") aws_load_targets(static) else() aws_load_targets(shared) endif() endif() aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/codebuild/000077500000000000000000000000001456575232400221475ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/codebuild/.gitignore000066400000000000000000000000141456575232400241320ustar00rootroot00000000000000__pycache__ aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/codebuild/CanaryWrapper.py000066400000000000000000000431631456575232400253060ustar00rootroot00000000000000# Python wrapper script for collecting Canary metrics, setting-up/tearing-down alarms, reporting metrics to Cloudwatch, # checking the alarms to ensure everything is correct at the end of the run, and pushing the log to S3 if successful. # Needs to be installed prior to running # Part of standard packages in Python 3.4+ import argparse import time import datetime # Dependencies in project folder from CanaryWrapper_Classes import * from CanaryWrapper_MetricFunctions import * # Code for command line argument parsing # ================================================================================ command_parser = argparse.ArgumentParser("CanaryWrapper") command_parser.add_argument("--canary_executable", type=str, required=True, help="The path to the canary executable (or program - like 'python3')") command_parser.add_argument("--canary_arguments", type=str, default="", help="The arguments to pass/launch the canary executable with") command_parser.add_argument("--git_hash", type=str, required=True, help="The Git commit hash that we are running the canary with") command_parser.add_argument("--git_repo_name", type=str, required=True, help="The name of the Git repository") command_parser.add_argument("--git_hash_as_namespace", type=bool, default=False, help="(OPTIONAL, default=False) If true, the git hash will be used as the name of the Cloudwatch namespace") command_parser.add_argument("--output_log_filepath", type=str, default="output.log", help="(OPTIONAL, default=output.log) The file to output log info to. Set to 'None' to disable") command_parser.add_argument("--output_to_console", type=bool, default=True, help="(OPTIONAL, default=True) If true, info will be output to the console") command_parser.add_argument("--cloudwatch_region", type=str, default="us-east-1", help="(OPTIONAL, default=us-east-1) The AWS region for Cloudwatch") command_parser.add_argument("--s3_bucket_name", type=str, default="canary-wrapper-folder", help="(OPTIONAL, default=canary-wrapper-folder) The name of the S3 bucket where success logs will be stored") command_parser.add_argument("--snapshot_wait_time", type=int, default=600, help="(OPTIONAL, default=600) The number of seconds between gathering and sending snapshot reports") command_parser.add_argument("--ticket_category", type=str, default="AWS", help="(OPTIONAL, default=AWS) The category to register the ticket under") command_parser.add_argument("--ticket_type", type=str, default="SDKs and Tools", help="(OPTIONAL, default='SDKs and Tools') The type to register the ticket under") command_parser.add_argument("--ticket_item", type=str, default="IoT SDK for CPP", help="(OPTIONAL, default='IoT SDK for CPP') The item to register the ticket under") command_parser.add_argument("--ticket_group", type=str, default="AWS IoT Device SDK", help="(OPTIONAL, default='AWS IoT Device SDK') The group to register the ticket under") command_parser.add_argument("--dependencies", type=str, default="", help="(OPTIONAL, default='') Any dependencies and their commit hashes. \ Current expected format is '(name or path);(hash);(next name or path);(hash);(etc...)'.") command_parser.add_argument("--lambda_name", type=str, default="iot-send-email-lambda", help="(OPTIONAL, default='CanarySendEmailLambda') The name of the Lambda used to send emails") command_parser.add_argument("--codebuild_log_path", type=str, default="", help="The CODEBUILD_LOG_PATH environment variable. Leave blank to ignore") command_parser_arguments = command_parser.parse_args() if (command_parser_arguments.output_log_filepath == "None"): command_parser_arguments.output_log_filepath = None if (command_parser_arguments.snapshot_wait_time <= 0): command_parser_arguments.snapshot_wait_time = 60 # Deal with possibly empty values in semi-critical commands/arguments if (command_parser_arguments.canary_executable == ""): print ("ERROR - required canary_executable is empty!", flush=True) exit (1) # cannot run without a canary executable if (command_parser_arguments.git_hash == ""): print ("ERROR - required git_hash is empty!", flush=True) exit (1) # cannot run without git hash if (command_parser_arguments.git_repo_name == ""): print ("ERROR - required git_repo_name is empty!", flush=True) exit (1) # cannot run without git repo name if (command_parser_arguments.git_hash_as_namespace is not True and command_parser_arguments.git_hash_as_namespace is not False): command_parser_arguments.git_hash_as_namespace = False if (command_parser_arguments.output_log_filepath == ""): command_parser_arguments.output_log_filepath = None if (command_parser_arguments.output_to_console != True and command_parser_arguments.output_to_console != False): command_parser_arguments.output_to_console = True if (command_parser_arguments.cloudwatch_region == ""): command_parser_arguments.cloudwatch_region = "us-east-1" if (command_parser_arguments.s3_bucket_name == ""): command_parser_arguments.s3_bucket_name = "canary-wrapper-folder" if (command_parser_arguments.ticket_category == ""): command_parser_arguments.ticket_category = "AWS" if (command_parser_arguments.ticket_type == ""): command_parser_arguments.ticket_type = "SDKs and Tools" if (command_parser_arguments.ticket_item == ""): command_parser_arguments.ticket_item = "IoT SDK for CPP" if (command_parser_arguments.ticket_group == ""): command_parser_arguments.ticket_group = "AWS IoT Device SDK" # ================================================================================ datetime_now = datetime.datetime.now() datetime_string = datetime_now.strftime("%d-%m-%Y/%H-%M-%S") print("Datetime string is: " + datetime_string, flush=True) # Make the snapshot class data_snapshot = DataSnapshot( git_hash=command_parser_arguments.git_hash, git_repo_name=command_parser_arguments.git_repo_name, datetime_string=datetime_string, git_hash_as_namespace=command_parser_arguments.git_hash_as_namespace, git_fixed_namespace_text="mqtt5_canary", output_log_filepath="output.txt", output_to_console=command_parser_arguments.output_to_console, cloudwatch_region="us-east-1", cloudwatch_make_dashboard=False, cloudwatch_teardown_alarms_on_complete=True, cloudwatch_teardown_dashboard_on_complete=True, s3_bucket_name=command_parser_arguments.s3_bucket_name, s3_bucket_upload_on_complete=True, lambda_name=command_parser_arguments.lambda_name, metric_frequency=command_parser_arguments.snapshot_wait_time) # Make sure nothing failed if (data_snapshot.abort_due_to_internal_error == True): print ("INFO - Stopping application due to error caused by credentials") print ("Please fix your credentials and then restart this application again", flush=True) exit(0) # Register metrics data_snapshot.register_metric( new_metric_name="total_cpu_usage", new_metric_function=get_metric_total_cpu_usage, new_metric_unit="Percent", new_metric_alarm_threshold=70, new_metric_reports_to_skip=1, new_metric_alarm_severity=5, is_percent=True) data_snapshot.register_metric( new_metric_name="total_memory_usage_value", new_metric_function=get_metric_total_memory_usage_value, new_metric_unit="Bytes") data_snapshot.register_metric( new_metric_name="total_memory_usage_percent", new_metric_function=get_metric_total_memory_usage_percent, new_metric_unit="Percent", new_metric_alarm_threshold=70, new_metric_reports_to_skip=0, new_metric_alarm_severity=5, is_percent=True) # Print diagnosis information data_snapshot.output_diagnosis_information(command_parser_arguments.dependencies) # Make the snapshot (metrics) monitor snapshot_monitor = SnapshotMonitor( wrapper_data_snapshot=data_snapshot, wrapper_metrics_wait_time=command_parser_arguments.snapshot_wait_time) # Make sure nothing failed if (snapshot_monitor.had_internal_error == True): print ("INFO - Stopping application due to error caused by credentials") print ("Please fix your credentials and then restart this application again", flush=True) exit(0) # Make the application monitor application_monitor = ApplicationMonitor( wrapper_application_path=command_parser_arguments.canary_executable, wrapper_application_arguments=command_parser_arguments.canary_arguments, wrapper_application_restart_on_finish=False, data_snapshot=data_snapshot # pass the data_snapshot for printing to the log ) # Make sure nothing failed if (application_monitor.error_has_occurred == True): print ("INFO - Stopping application due to error caused by credentials") print ("Please fix your credentials and then restart this application again", flush=True) exit(0) # For tracking if we stopped due to a metric alarm stopped_due_to_metric_alarm = False execution_sleep_time = 30 def execution_loop(): while True: snapshot_monitor.monitor_loop_function( time_passed=execution_sleep_time, psutil_process=application_monitor.application_process_psutil) application_monitor.monitor_loop_function( time_passed=execution_sleep_time) # Did a metric go into alarm? if (snapshot_monitor.has_cut_ticket == True): # Set that we had an 'internal error' so we go down the right code path snapshot_monitor.had_internal_error = True break # If an error has occurred or otherwise this thread needs to stop, then break the loop if (application_monitor.error_has_occurred == True or snapshot_monitor.had_internal_error == True): break time.sleep(execution_sleep_time) def application_thread(): start_email_body = "MQTT5 Short Running Canary Wrapper has started for " start_email_body += "\"" + command_parser_arguments.git_repo_name + "\" commit \"" + command_parser_arguments.git_hash + "\"" start_email_body += "\nThe wrapper will run for the length the MQTT5 Canary application is set to run for, which is determined by " start_email_body += "the arguments set. The arguments used for this run are listed below:" start_email_body += "\n Arguments: " + command_parser_arguments.canary_arguments snapshot_monitor.send_email(email_body=start_email_body, email_subject_text_append="Started") # Start the application going snapshot_monitor.start_monitoring() application_monitor.start_monitoring() # Allow the snapshot monitor to cut tickets snapshot_monitor.can_cut_ticket = True # Start the execution loop execution_loop() # Make sure everything is stopped snapshot_monitor.stop_monitoring() application_monitor.stop_monitoring() # Track whether this counts as an error (and therefore we should cleanup accordingly) or not wrapper_error_occurred = False # Finished Email send_finished_email = True finished_email_body = "MQTT5 Short Running Canary Wrapper has stopped." finished_email_body += "\n\n" try: # Find out why we stopped if (snapshot_monitor.had_internal_error == True): if (snapshot_monitor.has_cut_ticket == True): # We do not need to cut a ticket here - it's cut by the snapshot monitor! print ("ERROR - Snapshot monitor stopped due to metric in alarm!", flush=True) finished_email_body += "Failure due to required metrics being in alarm! A new ticket should have been cut!" finished_email_body += "\nMetrics in Alarm: " + str(snapshot_monitor.cloudwatch_current_alarms_triggered) wrapper_error_occurred = True else: print ("ERROR - Snapshot monitor stopped due to internal error!", flush=True) cut_ticket_using_cloudwatch( git_repo_name=command_parser_arguments.git_repo_name, git_hash=command_parser_arguments.git_hash, git_hash_as_namespace=command_parser_arguments.git_hash_as_namespace, git_fixed_namespace_text="mqtt5_canary", cloudwatch_region="us-east-1", ticket_description="Snapshot monitor stopped due to internal error! Reason info: " + snapshot_monitor.internal_error_reason, ticket_reason="Snapshot monitor stopped due to internal error", ticket_allow_duplicates=True, ticket_category=command_parser_arguments.ticket_category, ticket_item=command_parser_arguments.ticket_item, ticket_group=command_parser_arguments.ticket_group, ticket_type=command_parser_arguments.ticket_type, ticket_severity=4) wrapper_error_occurred = True finished_email_body += "Failure due to Snapshot monitor stopping due to an internal error." finished_email_body += " Reason given for error: " + snapshot_monitor.internal_error_reason elif (application_monitor.error_has_occurred == True): if (application_monitor.error_due_to_credentials == True): print ("INFO - Stopping application due to error caused by credentials") print ("Please fix your credentials and then restart this application again", flush=True) wrapper_error_occurred = True send_finished_email = False else: # Is the error something in the canary failed? if (application_monitor.error_code != 0): cut_ticket_using_cloudwatch( git_repo_name=command_parser_arguments.git_repo_name, git_hash=command_parser_arguments.git_hash, git_hash_as_namespace=command_parser_arguments.git_hash_as_namespace, git_fixed_namespace_text="mqtt5_canary", cloudwatch_region="us-east-1", ticket_description="The Short Running Canary exited with a non-zero exit code! This likely means something in the canary failed.", ticket_reason="The Short Running Canary exited with a non-zero exit code", ticket_allow_duplicates=True, ticket_category=command_parser_arguments.ticket_category, ticket_item=command_parser_arguments.ticket_item, ticket_group=command_parser_arguments.ticket_group, ticket_type=command_parser_arguments.ticket_type, ticket_severity=4) wrapper_error_occurred = True finished_email_body += "Failure due to MQTT5 application exiting with a non-zero exit code! This means something in the Canary application itself failed" else: print ("INFO - Stopping application. No error has occurred, application has stopped normally", flush=True) application_monitor.print_stdout() finished_email_body += "Short Running Canary finished successfully and run without errors!" wrapper_error_occurred = False else: print ("ERROR - Short Running Canary stopped due to unknown reason!", flush=True) cut_ticket_using_cloudwatch( git_repo_name=command_parser_arguments.git_repo_name, git_hash=command_parser_arguments.git_hash, git_hash_as_namespace=command_parser_arguments.git_hash_as_namespace, git_fixed_namespace_text="mqtt5_canary", cloudwatch_region="us-east-1", ticket_description="The Short Running Canary stopped for an unknown reason!", ticket_reason="The Short Running Canary stopped for unknown reason", ticket_allow_duplicates=True, ticket_category=command_parser_arguments.ticket_category, ticket_item=command_parser_arguments.ticket_item, ticket_group=command_parser_arguments.ticket_group, ticket_type=command_parser_arguments.ticket_type, ticket_severity=4) wrapper_error_occurred = True finished_email_body += "Failure due to unknown reason! This shouldn't happen and means something has gone wrong!" except Exception as e: print ("ERROR: Could not (possibly) cut ticket due to exception!") print (f"Exception: {repr(e)}", flush=True) # Clean everything up and stop snapshot_monitor.cleanup_monitor(error_occurred=wrapper_error_occurred) application_monitor.cleanup_monitor(error_occurred=wrapper_error_occurred) print ("Short Running Canary finished!", flush=True) finished_email_body += "\n\nYou can find the log file for this run at the following S3 location: " finished_email_body += "https://s3.console.aws.amazon.com/s3/object/" finished_email_body += command_parser_arguments.s3_bucket_name finished_email_body += "?region=" + command_parser_arguments.cloudwatch_region finished_email_body += "&prefix=" + command_parser_arguments.git_repo_name + "/" + datetime_string + "/" if (wrapper_error_occurred == True): finished_email_body += "Failed_Logs/" finished_email_body += command_parser_arguments.git_hash + ".log" if (command_parser_arguments.codebuild_log_path != ""): print ("\n Codebuild log path: " + command_parser_arguments.codebuild_log_path + "\n") # Send the finish email if (send_finished_email == True): if (wrapper_error_occurred == True): snapshot_monitor.send_email(email_body=finished_email_body, email_subject_text_append="Had an error") else: snapshot_monitor.send_email(email_body=finished_email_body, email_subject_text_append="Finished") exit (application_monitor.error_code) # Start the application! application_thread() aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/codebuild/CanaryWrapper_24_7.py000066400000000000000000000500321456575232400260320ustar00rootroot00000000000000# Python wrapper script for collecting Canary metrics, setting up alarms, reporting metrics to Cloudwatch, # checking the alarms to ensure everything is correct at the end of the run, and checking for new # builds in S3, downloading them, and launching them if they exist (24/7 operation) # # Will only stop running if the Canary application itself has an issue - in which case it Canary application will # need to be fixed and then the wrapper script restarted # Needs to be installed prior to running # Part of standard packages in Python 3.4+ import argparse import time # Dependencies in project folder from CanaryWrapper_Classes import * from CanaryWrapper_MetricFunctions import * # TODO - Using subprocess may not work on Windows for starting/stopping the application thread. # Canary will likely be running on Linux, so it's probably okay, but need to confirm/check at some point.... # ================================================================================ # Code for command line argument parsing command_parser = argparse.ArgumentParser("CanaryWrapper_24_7") command_parser.add_argument("--canary_executable", type=str, required=True, help="The path to the canary executable") command_parser.add_argument("--canary_arguments", type=str, default="", help="The arguments to pass/launch the canary executable with") command_parser.add_argument("--s3_bucket_name", type=str, default="canary-wrapper-folder", help="(OPTIONAL, default=canary-wrapper-folder) The name of the S3 bucket where success logs will be stored") command_parser.add_argument("--s3_bucket_application", type=str, required=True, help="(OPTIONAL, default=canary-wrapper-folder) The S3 URL to monitor for changes MINUS the bucket name") command_parser.add_argument("--s3_bucket_application_in_zip", type=str, required=False, default="", help="(OPTIONAL, default="") The file path in the zip folder where the application is stored. Will be ignored if set to empty string") command_parser.add_argument("--lambda_name", type=str, default="iot-send-email-lambda", help="(OPTIONAL, default='CanarySendEmailLambda') The name of the Lambda used to send emails") command_parser_arguments = command_parser.parse_args() # ================================================================================ # Global variables that both threads use to communicate. # NOTE - These should likely be replaced with futures or similar for better thread safety. # However, these variables are only either read or written to from a single thread, no # thread should read and write to these variables. # The local file path (and extension) of the Canary application that the wrapper will manage # (This will also be the filename and directory used when a new file is detected in S3) # [THIS IS READ ONLY] canary_local_application_path = command_parser_arguments.canary_executable if (canary_local_application_path == ""): print ("ERROR - required canary_executable is empty!") exit (1) # cannot run without a canary executable # This is the arguments passed to the local file path when starting # [THIS IS READ ONLY] canary_local_application_arguments = command_parser_arguments.canary_arguments # The "Git Hash" to use for metrics and dimensions # [THIS IS READ ONLY] canary_local_git_hash_stub = "Canary" # The "Git Repo" name to use for metrics and dimensions. Is hard-coded since this is a 24/7 canary that should only run for MQTT # [THIS IS READ ONLY] canary_local_git_repo_stub = "MQTT5_24_7" # The Fixed Namespace name for the Canary # [THIS IS READ ONLY] canary_local_git_fixed_namespace = "MQTT5_24_7_Canary" # The S3 bucket name to monitor for the application # [THIS IS READ ONLY] canary_s3_bucket_name = command_parser_arguments.s3_bucket_name if (canary_s3_bucket_name == ""): canary_s3_bucket_name = "canary-wrapper-folder" # The file in the S3 bucket to monitor (The application filepath and file. Example: "canary/canary_application.exe") # [THIS IS READ ONLY] canary_s3_bucket_application_path = command_parser_arguments.s3_bucket_application if (canary_s3_bucket_application_path == ""): print ("ERROR - required s3_bucket_application is empty!") exit (1) # cannot run without a s3_bucket_application to monitor # The location of the file in the S3 zip, if the S3 file being monitored is a zip # (THIS IS READ ONLY) canary_s3_bucket_application_path_zip = command_parser_arguments.s3_bucket_application_in_zip if (canary_s3_bucket_application_path_zip == ""): canary_s3_bucket_application_path_zip = None # The name of the email lambda. If an empty string is set, it defaults to 'iot-send-email-lambda' if (command_parser_arguments.lambda_name == ""): command_parser_arguments.lambda_name = "iot-send-email-lambda" # The region the canary is running in # (THIS IS READ ONLY) canary_region_stub = "us-east-1" # How long (in seconds) to wait before gathering metrics and pushing them to Cloudwatch canary_metrics_wait_time = 600 # 10 minutes # How long (in seconds) to run the Application thread loop. Should be shorter or equal to the Canary Metrics time canary_application_loop_wait_time = 300 # 5 minutes # For testing - set both to 30 seconds # canary_metrics_wait_time = 30 # canary_application_loop_wait_time = 30 # ================================================================================ # Make the snapshot class data_snapshot = DataSnapshot( git_hash=canary_local_git_hash_stub, git_repo_name=canary_local_git_repo_stub, git_hash_as_namespace=False, datetime_string=None, git_fixed_namespace_text=canary_local_git_fixed_namespace, output_log_filepath="output.txt", output_to_console=True, cloudwatch_region=canary_region_stub, cloudwatch_make_dashboard=True, cloudwatch_teardown_alarms_on_complete=True, cloudwatch_teardown_dashboard_on_complete=False, s3_bucket_name=canary_s3_bucket_name, s3_bucket_upload_on_complete=True, lambda_name=command_parser_arguments.lambda_name, metric_frequency=canary_metrics_wait_time) # Make sure nothing failed if (data_snapshot.abort_due_to_internal_error == True): print ("INFO - Stopping application due to error caused by credentials") print ("Please fix your credentials and then restart this application again") exit(0) # Register metrics data_snapshot.register_metric( new_metric_name="total_cpu_usage", new_metric_function=get_metric_total_cpu_usage, new_metric_unit="Percent", new_metric_alarm_threshold=70, new_metric_reports_to_skip=1, new_metric_alarm_severity=5, is_percent=True) data_snapshot.register_metric( new_metric_name="total_memory_usage_value", new_metric_function=get_metric_total_memory_usage_value, new_metric_unit="Bytes") data_snapshot.register_metric( new_metric_name="total_memory_usage_percent", new_metric_function=get_metric_total_memory_usage_percent, new_metric_unit="Percent", new_metric_alarm_threshold=70, new_metric_reports_to_skip=0, new_metric_alarm_severity=5, is_percent=True) data_snapshot.register_dashboard_widget("Process CPU Usage - Percentage", ["total_cpu_usage"], 60) data_snapshot.register_dashboard_widget("Process Memory Usage - Percentage", ["total_memory_usage_percent"], 60) # Print diagnosis information data_snapshot.output_diagnosis_information("24/7 Canary cannot show dependencies!") # Make the S3 class s3_monitor = S3Monitor( s3_bucket_name=canary_s3_bucket_name, s3_file_name=canary_s3_bucket_application_path, s3_file_name_in_zip=canary_s3_bucket_application_path_zip, canary_local_application_path=canary_local_application_path, data_snapshot=data_snapshot) if (s3_monitor.had_internal_error == True): print ("INFO - Stopping application due to error caused by credentials") print ("Please fix your credentials and then restart this application again") exit(0) # Make the snapshot (metrics) monitor snapshot_monitor = SnapshotMonitor( wrapper_data_snapshot=data_snapshot, wrapper_metrics_wait_time=canary_metrics_wait_time) # Make sure nothing failed if (snapshot_monitor.had_internal_error == True): print ("INFO - Stopping application due to error caused by credentials") print ("Please fix your credentials and then restart this application again") exit(0) # Make the application monitor application_monitor = ApplicationMonitor( wrapper_application_path=canary_local_application_path, wrapper_application_arguments=canary_local_application_arguments, wrapper_application_restart_on_finish=True, data_snapshot=data_snapshot) # Make sure nothing failed if (application_monitor.error_has_occurred == True): print ("INFO - Stopping application due to error caused by credentials") print ("Please fix your credentials and then restart this application again") exit(0) # For tracking if we stopped due to a metric alarm stopped_due_to_metric_alarm = False def execution_loop(): while True: s3_monitor.monitor_loop_function(time_passed=canary_application_loop_wait_time) # Is there an error? if (s3_monitor.had_internal_error == True): print ("[Debug] S3 monitor had an internal error!") break # Is there a new file? if (s3_monitor.s3_file_needs_replacing == True): # Stop the application print ("[Debug] Stopping application monitor...") application_monitor.stop_monitoring() print ("[Debug] Getting S3 file...") s3_monitor.replace_current_file_for_new_file() # Start the application print ("[Debug] Starting application monitor...") application_monitor.start_monitoring() # Allow the snapshot monitor to cut a ticket snapshot_monitor.can_cut_ticket = True snapshot_monitor.monitor_loop_function( time_passed=canary_application_loop_wait_time, psutil_process=application_monitor.application_process_psutil) application_monitor.monitor_loop_function( time_passed=canary_application_loop_wait_time) # Did a metric go into alarm? if (snapshot_monitor.has_cut_ticket == True): # Do not allow it to cut anymore tickets until it gets a new build snapshot_monitor.can_cut_ticket = False # If an error has occurred or otherwise this thread needs to stop, then break the loop if (application_monitor.error_has_occurred == True or snapshot_monitor.had_internal_error == True): if (application_monitor.error_has_occurred == True): print ("[Debug] Application monitor error occurred!") else: print ("[Debug] Snapshot monitor internal error ocurred!") break time.sleep(canary_application_loop_wait_time) def application_thread(): # Start the application going snapshot_monitor.start_monitoring() application_monitor.start_monitoring() # Allow the snapshot monitor to cut tickets snapshot_monitor.can_cut_ticket = True start_email_body = "MQTT5 24/7 Canary Wrapper has started. This will run and continue to test new MQTT5 application builds as" start_email_body += " they pass CodeBuild and are uploaded to S3." snapshot_monitor.send_email(email_body=start_email_body, email_subject_text_append="Started") # Start the execution loop execution_loop() # Make sure everything is stopped snapshot_monitor.stop_monitoring() application_monitor.stop_monitoring() # Track whether this counts as an error (and therefore we should cleanup accordingly) or not wrapper_error_occurred = False send_finished_email = True finished_email_body = "MQTT5 24/7 Canary Wrapper has stopped." finished_email_body += "\n\n" try: # Find out why we stopped # S3 Monitor if (s3_monitor.had_internal_error == True): if (s3_monitor.error_due_to_credentials == False): print ("ERROR - S3 monitor stopped due to internal error!") cut_ticket_using_cloudwatch( git_repo_name=canary_local_git_repo_stub, git_hash=canary_local_git_hash_stub, git_hash_as_namespace=False, git_fixed_namespace_text=canary_local_git_fixed_namespace, cloudwatch_region=canary_region_stub, ticket_description="Snapshot monitor stopped due to internal error! Reason info: " + s3_monitor.internal_error_reason, ticket_reason="S3 monitor stopped due to internal error", ticket_allow_duplicates=True, ticket_category="AWS", ticket_type="SDKs and Tools", ticket_item="IoT SDK for CPP", ticket_group="AWS IoT Device SDK", ticket_severity=4) finished_email_body += "Failure due to S3 monitor stopping due to an internal error." finished_email_body += " Reason given for error: " + s3_monitor.internal_error_reason wrapper_error_occurred = True # Snapshot Monitor elif (snapshot_monitor.had_internal_error == True): if (snapshot_monitor.has_cut_ticket == True): # We do not need to cut a ticket here - it's cut by the snapshot monitor! print ("ERROR - Snapshot monitor stopped due to metric in alarm!") finished_email_body += "Failure due to required metrics being in alarm! A new ticket should have been cut!" finished_email_body += "\nMetrics in Alarm: " + str(snapshot_monitor.cloudwatch_current_alarms_triggered) finished_email_body += "\nNOTE - this shouldn't occur in the 24/7 Canary! If it does, then the wrapper needs adjusting." wrapper_error_occurred = True else: print ("ERROR - Snapshot monitor stopped due to internal error!") cut_ticket_using_cloudwatch( git_repo_name=canary_local_git_repo_stub, git_hash=canary_local_git_hash_stub, git_hash_as_namespace=False, git_fixed_namespace_text=canary_local_git_fixed_namespace, cloudwatch_region=canary_region_stub, ticket_description="Snapshot monitor stopped due to internal error! Reason info: " + snapshot_monitor.internal_error_reason, ticket_reason="Snapshot monitor stopped due to internal error", ticket_allow_duplicates=True, ticket_category="AWS", ticket_type="SDKs and Tools", ticket_item="IoT SDK for CPP", ticket_group="AWS IoT Device SDK", ticket_severity=4) wrapper_error_occurred = True finished_email_body += "Failure due to Snapshot monitor stopping due to an internal error." finished_email_body += " Reason given for error: " + snapshot_monitor.internal_error_reason # Application Monitor elif (application_monitor.error_has_occurred == True): if (application_monitor.error_due_to_credentials == True): print ("INFO - Stopping application due to error caused by credentials") print ("Please fix your credentials and then restart this application again") wrapper_error_occurred = True send_finished_email = False else: # Is the error something in the canary failed? if (application_monitor.error_code != 0): cut_ticket_using_cloudwatch( git_repo_name=canary_local_git_repo_stub, git_hash=canary_local_git_hash_stub, git_hash_as_namespace=False, git_fixed_namespace_text=canary_local_git_fixed_namespace, cloudwatch_region=canary_region_stub, ticket_description="The 24/7 Canary exited with a non-zero exit code! This likely means something in the canary failed.", ticket_reason="The 24/7 Canary exited with a non-zero exit code", ticket_allow_duplicates=True, ticket_category="AWS", ticket_type="SDKs and Tools", ticket_item="IoT SDK for CPP", ticket_group="AWS IoT Device SDK", ticket_severity=3) wrapper_error_occurred = True finished_email_body += "Failure due to MQTT5 application exiting with a non-zero exit code!" finished_email_body += " This means something in the Canary application itself failed" else: cut_ticket_using_cloudwatch( git_repo_name=canary_local_git_repo_stub, git_hash=canary_local_git_hash_stub, git_hash_as_namespace=False, git_fixed_namespace_text=canary_local_git_fixed_namespace, cloudwatch_region=canary_region_stub, ticket_description="The 24/7 Canary exited with a zero exit code but did not restart!", ticket_reason="The 24/7 Canary exited with a zero exit code but did not restart", ticket_allow_duplicates=True, ticket_category="AWS", ticket_type="SDKs and Tools", ticket_item="IoT SDK for CPP", ticket_group="AWS IoT Device SDK", ticket_severity=3) wrapper_error_occurred = True finished_email_body += "Failure due to MQTT5 application stopping and not automatically restarting!" finished_email_body += " This shouldn't occur and means something is wrong with the Canary wrapper!" # Other else: print ("ERROR - 24/7 Canary stopped due to unknown reason!") cut_ticket_using_cloudwatch( git_repo_name=canary_local_git_repo_stub, git_hash=canary_local_git_hash_stub, git_hash_as_namespace=False, git_fixed_namespace_text=canary_local_git_fixed_namespace, cloudwatch_region=canary_region_stub, ticket_description="The 24/7 Canary stopped for an unknown reason!", ticket_reason="The 24/7 Canary stopped for unknown reason", ticket_allow_duplicates=True, ticket_category="AWS", ticket_type="SDKs and Tools", ticket_item="IoT SDK for CPP", ticket_group="AWS IoT Device SDK", ticket_severity=3) wrapper_error_occurred = True finished_email_body += "Failure due to unknown reason! This shouldn't happen and means something has gone wrong!" except Exception as e: print ("ERROR: Could not (possibly) cut ticket due to exception!") print (f"Exception: {repr(e)}", flush=True) # Clean everything up and stop snapshot_monitor.cleanup_monitor(error_occurred=wrapper_error_occurred) application_monitor.cleanup_monitor(error_occurred=wrapper_error_occurred) print ("24/7 Canary finished!") finished_email_body += "\n\nYou can find the log file for this run at the following S3 location: " finished_email_body += "https://s3.console.aws.amazon.com/s3/object/" finished_email_body += command_parser_arguments.s3_bucket_name finished_email_body += "?region=" + canary_region_stub finished_email_body += "&prefix=" + canary_local_git_repo_stub + "/" if (wrapper_error_occurred == True): finished_email_body += "Failed_Logs/" finished_email_body += canary_local_git_hash_stub + ".log" # Send the finish email if (send_finished_email == True): if (wrapper_error_occurred == True): snapshot_monitor.send_email(email_body=finished_email_body, email_subject_text_append="Had an error") else: snapshot_monitor.send_email(email_body=finished_email_body, email_subject_text_append="Finished") exit (-1) # Start the application! application_thread() aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/codebuild/CanaryWrapper_Classes.py000066400000000000000000001733161456575232400267670ustar00rootroot00000000000000# Contains all of the classes that are shared across both the Canary Wrapper and the Persistent Canary Wrapper scripts # If a class can/is reused, then it should be in this file. # Needs to be installed prior to running import boto3 import psutil # Part of standard packages in Python 3.4+ import time import os import json import subprocess import zipfile import datetime # ================================================================================ # Class that holds metric data and has a few utility functions for getting that data in a format we can use for Cloudwatch class DataSnapshot_Metric(): def __init__(self, metric_name, metric_function, metric_dimensions=[], metric_unit="None", metric_alarm_threshold=None, metric_alarm_severity=6, git_hash="", git_repo_name="", reports_to_skip=0, is_percent=False): self.metric_name = metric_name self.metric_function = metric_function self.metric_dimensions = metric_dimensions self.metric_unit = metric_unit self.metric_alarm_threshold = metric_alarm_threshold self.metric_alarm_name = self.metric_name + "-" + git_repo_name + "-" + git_hash self.metric_alarm_description = 'Alarm for metric "' + self.metric_name + '" - git hash: ' + git_hash self.metric_value = None self.reports_to_skip = reports_to_skip self.metric_alarm_severity = metric_alarm_severity self.is_percent = is_percent # Gets the latest metric value from the metric_function callback def get_metric_value(self, psutil_process : psutil.Process): if not self.metric_function is None: self.metric_value = self.metric_function(psutil_process) return self.metric_value # Returns the data needed to send to Cloudwatch when posting metrics def get_metric_cloudwatch_dictionary(self): if (self.reports_to_skip > 0): self.reports_to_skip -= 1 return None # skips sending to Cloudwatch if (self.metric_value == None): return None # skips sending to Cloudwatch return { "MetricName": self.metric_name, "Dimensions": self.metric_dimensions, "Value": self.metric_value, "Unit": self.metric_unit } class DataSnapshot_Dashboard_Widget(): def __init__(self, widget_name, metric_namespace, metric_dimension, cloudwatch_region="us-east-1", widget_period=60) -> None: self.metric_list = [] self.region = cloudwatch_region self.widget_name = widget_name self.metric_namespace = metric_namespace self.metric_dimension = metric_dimension self.widget_period = widget_period def add_metric_to_widget(self, new_metric_name): try: self.metric_list.append(new_metric_name) except Exception as e: print ("[DataSnapshot_Dashboard] ERROR - could not add metric to dashboard widget due to exception!") print (f"[DataSnapshot_Dashboard] Exception: {repr(e)}") def remove_metric_from_widget(self, existing_metric_name): try: self.metric_list.remove(existing_metric_name) except Exception as e: print ("[DataSnapshot_Dashboard] ERROR - could not remove metric from dashboard widget due to exception!") print (f"[DataSnapshot_Dashboard] Exception: {repr(e)}") def get_widget_dictionary(self): metric_list_json = [] for metric_name in self.metric_list: metric_list_json.append([self.metric_namespace, metric_name, self.metric_dimension, metric_name]) return { "type":"metric", "properties" : { "metrics" : metric_list_json, "region": self.region, "title": self.widget_name, "period": self.widget_period, }, "width": 14, "height": 10 } # ================================================================================ # Class that keeps track of the metrics registered, sets up Cloudwatch and S3, and sends periodic reports # Is the backbone of the reporting operation class DataSnapshot(): def __init__(self, git_hash=None, git_repo_name=None, git_hash_as_namespace=False, git_fixed_namespace_text="mqtt5_canary", datetime_string=None, output_log_filepath=None, output_to_console=True, cloudwatch_region="us-east-1", cloudwatch_make_dashboard=False, cloudwatch_teardown_alarms_on_complete=True, cloudwatch_teardown_dashboard_on_complete=True, s3_bucket_name="canary-wrapper-bucket", s3_bucket_upload_on_complete=True, lambda_name="CanarySendEmailLambda", metric_frequency=None): # Setting initial values # ================== self.first_metric_call = True self.metrics = [] self.metrics_numbers = [] self.metric_report_number = 0 self.metric_report_non_zero_count = 4 # Needed so we can initialize Cloudwatch alarms, etc, outside of the init function # but before we start sending data. # This boolean tracks whether we have done the post-initialization prior to sending the first report. self.perform_final_initialization = True # Watched by the thread creating the snapshot. Will cause the thread(s) to abort and return an error. self.abort_due_to_internal_error = False self.abort_due_to_internal_error_reason = "" self.abort_due_to_internal_error_due_to_credentials = False self.git_hash = None self.git_repo_name = None self.git_hash_as_namespace = git_hash_as_namespace self.git_fixed_namespace_text = git_fixed_namespace_text self.git_metric_namespace = None self.cloudwatch_region = cloudwatch_region self.cloudwatch_client = None self.cloudwatch_make_dashboard = cloudwatch_make_dashboard self.cloudwatch_teardown_alarms_on_complete = cloudwatch_teardown_alarms_on_complete self.cloudwatch_teardown_dashboard_on_complete = cloudwatch_teardown_dashboard_on_complete self.cloudwatch_dashboard_name = "" self.cloudwatch_dashboard_widgets = [] self.s3_bucket_name = s3_bucket_name self.s3_client = None self.s3_bucket_upload_on_complete = s3_bucket_upload_on_complete self.output_to_file_filepath = output_log_filepath self.output_to_file = False self.output_file = None self.output_to_console = output_to_console self.lambda_client = None self.lambda_name = lambda_name self.datetime_string = datetime_string self.metric_frequency = metric_frequency # ================== # Check for valid credentials # ================== try: tmp_sts_client = boto3.client('sts') tmp_sts_client.get_caller_identity() except Exception as e: print ("[DataSnapshot] ERROR - AWS credentials are NOT valid!") print (f"[DataSnapshot] ERROR - Exception: {repr(e)}") self.abort_due_to_internal_error = True self.abort_due_to_internal_error_reason = "AWS credentials are NOT valid!" self.abort_due_to_internal_error_due_to_credentials = True return # ================== # Git related stuff # ================== if (git_hash == None or git_repo_name == None): print("[DataSnapshot] ERROR - a Git hash and repository name are REQUIRED for the canary wrapper to run!") self.abort_due_to_internal_error = True self.abort_due_to_internal_error_reason = "No Git hash and repository passed!" return self.git_hash = git_hash self.git_repo_name = git_repo_name if (self.git_hash_as_namespace == False): self.git_metric_namespace = self.git_fixed_namespace_text else: if (self.datetime_string == None): git_namespace_prepend_text = self.git_repo_name + "-" + self.git_hash else: git_namespace_prepend_text = self.git_repo_name + "/" + self.datetime_string + "-" + self.git_hash self.git_metric_namespace = git_namespace_prepend_text # ================== # Cloudwatch related stuff # ================== try: self.cloudwatch_client = boto3.client('cloudwatch', self.cloudwatch_region) self.cloudwatch_dashboard_name = self.git_metric_namespace except Exception as e: self.print_message("[DataSnapshot] ERROR - could not make Cloudwatch client due to exception!") self.print_message(f"[DataSnapshot] Exception: {repr(e)}") self.cloudwatch_client = None self.abort_due_to_internal_error = True self.abort_due_to_internal_error_reason = "Could not make Cloudwatch client!" return # ================== # S3 related stuff # ================== try: self.s3_client = boto3.client("s3") except Exception as e: self.print_message("[DataSnapshot] ERROR - could not make S3 client due to exception!") self.print_message(f"[DataSnapshot] Exception: {repr(e)}") self.s3_client = None self.abort_due_to_internal_error = True self.abort_due_to_internal_error_reason = "Could not make S3 client!" return # ================== # Lambda related stuff # ================== try: self.lambda_client = boto3.client("lambda", self.cloudwatch_region) except Exception as e: self.print_message("[DataSnapshot] ERROR - could not make Lambda client due to exception!") self.print_message(f"[DataSnapshot] Exception: {repr(e)}") self.lambda_client = None self.abort_due_to_internal_error = True self.abort_due_to_internal_error_reason = "Could not make Lambda client!" return # ================== # File output (logs) related stuff # ================== if (not output_log_filepath is None): self.output_to_file = True self.output_file = open(self.output_to_file_filepath, "w") else: self.output_to_file = False self.output_file = None # ================== self.print_message("[DataSnapshot] Data snapshot created!") # Cleans the class - closing any files, removing alarms, and sending data to S3. # Should be called at the end when you are totally finished shadowing metrics def cleanup(self, error_occurred=False): if (self.s3_bucket_upload_on_complete == True): self.export_result_to_s3_bucket(copy_output_log=True, log_is_error=error_occurred) self._cleanup_cloudwatch_alarms() if (self.cloudwatch_make_dashboard == True): self._cleanup_cloudwatch_dashboard() self.print_message("[DataSnapshot] Data snapshot cleaned!") if (self.output_file is not None): self.output_file.close() self.output_file = None # Utility function for printing messages def print_message(self, message): if self.output_to_file == True: try: if (self.output_file is None): self.output_file = open(self.output_to_file_filepath, "w") self.output_file.write(message + "\n") except Exception as ex: print (f"[DataSnapshot] ERROR - Exception trying to print to file") print (f"[DataSnapshot] ERROR - Exception: {repr(ex)}") if (self.output_file is not None): self.output_file.close() self.output_file = None self.abort_due_to_internal_error = True self.abort_due_to_internal_error_reason = "Could not print data to output file!" if self.output_to_console == True: print(message, flush=True) # Utility function - adds the metric alarms to Cloudwatch. We do run this right before the first # collection of metrics so we can register metrics before we initialize Cloudwatch def _init_cloudwatch_pre_first_run(self): for metric in self.metrics: if (not metric.metric_alarm_threshold is None): self._add_cloudwatch_metric_alarm(metric) if (self.cloudwatch_make_dashboard == True): self._init_cloudwatch_pre_first_run_dashboard() # Utility function - adds the Cloudwatch Dashboard for the currently running data snapshot def _init_cloudwatch_pre_first_run_dashboard(self): try: # Remove the old dashboard if it exists before adding a new one self._cleanup_cloudwatch_dashboard() new_dashboard_widgets_array = [] for widget in self.cloudwatch_dashboard_widgets: new_dashboard_widgets_array.append(widget.get_widget_dictionary()) new_dashboard_body = { "start": "-PT1H", "widgets": new_dashboard_widgets_array, } new_dashboard_body_json = json.dumps(new_dashboard_body) self.cloudwatch_client.put_dashboard( DashboardName=self.cloudwatch_dashboard_name, DashboardBody= new_dashboard_body_json) self.print_message("[DataSnapshot] Added Cloudwatch dashboard successfully") except Exception as e: self.print_message(f"[DataSnapshot] ERROR - Cloudwatch client could not make dashboard due to exception") self.print_message(f"[DataSnapshot] ERROR - Exception: {repr(e)}") self.abort_due_to_internal_error = True self.abort_due_to_internal_error_reason = f"Cloudwatch client could not make dashboard due to exception" return # Utility function - The function that adds each individual metric alarm. def _add_cloudwatch_metric_alarm(self, metric): if self.cloudwatch_client is None: self.print_message("[DataSnapshot] ERROR - Cloudwatch client not setup. Cannot register alarm") return try: self.cloudwatch_client.put_metric_alarm( AlarmName=metric.metric_alarm_name, AlarmDescription=metric.metric_alarm_description, MetricName=metric.metric_name, Namespace=self.git_metric_namespace, Statistic="Maximum", Dimensions=metric.metric_dimensions, Period=60, # How long (in seconds) is an evaluation period? EvaluationPeriods=120, # How many periods does it need to be invalid for? DatapointsToAlarm=1, # How many data points need to be invalid? Threshold=metric.metric_alarm_threshold, ComparisonOperator="GreaterThanOrEqualToThreshold", ) except Exception as e: self.print_message(f"[DataSnapshot] ERROR - could not register alarm for metric {metric.metric_name} due to exception") self.print_message(f"[DataSnapshot] ERROR - Exception {repr(e)}") self.abort_due_to_internal_error = True self.abort_due_to_internal_error_reason = f"Cloudwatch client could not make alarm due to exception" # Utility function - removes all the Cloudwatch alarms for the metrics def _cleanup_cloudwatch_alarms(self): if (self.cloudwatch_teardown_alarms_on_complete == True): try: for metric in self.metrics: if (not metric.metric_alarm_threshold is None): self.cloudwatch_client.delete_alarms(AlarmNames=[metric.metric_alarm_name]) except Exception as e: self.print_message(f"[DataSnapshot] ERROR - could not delete alarms due to exception") self.print_message(f"[DataSnapshot] ERROR - Exception {repr(e)}") # Utility function - removes all Cloudwatch dashboards created def _cleanup_cloudwatch_dashboard(self): if (self.cloudwatch_teardown_dashboard_on_complete == True): try: self.cloudwatch_client.delete_dashboards(DashboardNames=[self.cloudwatch_dashboard_name]) self.print_message("[DataSnapshot] Cloudwatch Dashboards deleted successfully!") except Exception as e: self.print_message(f"[DataSnapshot] ERROR - dashboard cleaning function failed due to exception") self.print_message(f"[DataSnapshot] ERROR - Exception {repr(e)}") self.abort_due_to_internal_error = True self.abort_due_to_internal_error_reason = "Cloudwatch dashboard cleaning function failed due to exception" return # Returns the results of the metric alarms. Will return a list containing tuples with the following structure: # [Boolean (False = the alarm is in the ALARM state), String (Name of the alarm that is in the ALARM state), int (severity of alarm)] # Currently this function will only return a list of failed alarms, so if the returned list is empty, then it means all # alarms did not get to the ALARM state in Cloudwatch for the registered metrics def get_cloudwatch_alarm_results(self): return self._check_cloudwatch_alarm_states() # Utility function - collects the metric alarm results and returns them in a list. def _check_cloudwatch_alarm_states(self): return_result_list = [] tmp = None for metric in self.metrics: tmp = self._check_cloudwatch_alarm_state_metric(metric) if (tmp[1] != None): # Do not cut a ticket for the "Alive_Alarm" that we use to check if the Canary is running if ("Alive_Alarm" in tmp[1] == False): if (tmp[0] != True): return_result_list.append(tmp) return return_result_list # Utility function - checks each individual alarm and returns a tuple with the following format: # [Boolean (False if the alarm is in the ALARM state, otherwise it is true), String (name of the alarm), Int (severity of alarm)] def _check_cloudwatch_alarm_state_metric(self, metric): try: alarms_response = self.cloudwatch_client.describe_alarms_for_metric( MetricName=metric.metric_name, Namespace=self.git_metric_namespace, Dimensions=metric.metric_dimensions) return_result = [True, None, metric.metric_alarm_severity] for metric_alarm_dict in alarms_response["MetricAlarms"]: if metric_alarm_dict["StateValue"] == "ALARM": return_result[0] = False return_result[1] = metric_alarm_dict["AlarmName"] break return return_result except Exception as e: self.print_message(f"[DataSnapshot] ERROR - checking cloudwatch alarm failed due to exception") self.print_message(f"[DataSnapshot] ERROR - Exception {repr(e)}") return None # Exports a file with the same name as the commit Git hash to an S3 bucket in a folder with the Git repo name. # By default, this file will only contain the Git hash. # If copy_output_log is true, then the output log will be copied into this file, which may be useful for debugging. def export_result_to_s3_bucket(self, copy_output_log=False, log_is_error=False): if (self.s3_client is None): self.print_message("[DataSnapshot] ERROR - No S3 client initialized! Cannot send log to S3") self.abort_due_to_internal_error = True self.abort_due_to_internal_error_reason = "S3 client not initialized and therefore cannot send log to S3" return s3_file = open(self.git_hash + ".log", "w") s3_file.write(self.git_hash) # Might be useful for debugging? if (copy_output_log == True and self.output_to_file == True): # Are we still writing? If so, then we need to close the file first so everything is written to it is_output_file_open_previously = False if (self.output_file != None): self.output_file.close() is_output_file_open_previously = True self.output_file = open(self.output_to_file_filepath, "r") s3_file.write("\n\nOUTPUT LOG\n") s3_file.write("==========================================================================================\n") output_file_lines = self.output_file.readlines() for line in output_file_lines: s3_file.write(line) self.output_file.close() # If we were writing to the output previously, then we need to open in RW mode so we can continue to write to it if (is_output_file_open_previously == True): self.output_to_file = open(self.output_to_file_filepath, "a") s3_file.close() # Upload to S3 try: if (log_is_error == False): if (self.datetime_string == None): self.s3_client.upload_file(self.git_hash + ".log", self.s3_bucket_name, self.git_repo_name + "/" + self.git_hash + ".log") else: self.s3_client.upload_file(self.git_hash + ".log", self.s3_bucket_name, self.git_repo_name + "/" + self.datetime_string + "/" + self.git_hash + ".log") else: if (self.datetime_string == None): self.s3_client.upload_file(self.git_hash + ".log", self.s3_bucket_name, self.git_repo_name + "/Failed_Logs/" + self.git_hash + ".log") else: self.s3_client.upload_file(self.git_hash + ".log", self.s3_bucket_name, self.git_repo_name + "/Failed_Logs/" + self.datetime_string + "/" + self.git_hash + ".log") self.print_message("[DataSnapshot] Uploaded to S3!") except Exception as e: self.print_message(f"[DataSnapshot] ERROR - could not upload to S3 due to exception") self.print_message(f"[DataSnapshot] ERROR - Exception {repr(e)}") self.abort_due_to_internal_error = True self.abort_due_to_internal_error_reason = "S3 client had exception and therefore could not upload log!" os.remove(self.git_hash + ".log") return # Delete the file when finished os.remove(self.git_hash + ".log") # Sends an email via a special lambda. The payload has to contain a message and a subject # * (REQUIRED) message is the message you want to send in the body of the email # * (REQUIRED) subject is the subject that the email will be sent with def lambda_send_email(self, message, subject): payload = {"Message":message, "Subject":subject} payload_string = json.dumps(payload) try: self.lambda_client.invoke( FunctionName=self.lambda_name, InvocationType="Event", ClientContext="MQTT Wrapper Script", Payload=payload_string ) except Exception as e: self.print_message(f"[DataSnapshot] ERROR - could not send email via Lambda due to exception") self.print_message(f"[DataSnapshot] ERROR - Exception {repr(e)}") self.abort_due_to_internal_error = True self.abort_due_to_internal_error_reason = "Lambda email function had an exception!" return # Registers a metric to be polled by the Snapshot. # * (REQUIRED) new_metric_name is the name of the metric. Cloudwatch will use this name # * (REQUIRED) new_metric_function is expected to be a pointer to a Python function and will not work if you pass a value/object # * (OPTIONAL) new_metric_unit is the metric unit. There is a list of possible metric unit types on the Boto3 documentation for Cloudwatch # * (OPTIONAL) new_metric_alarm_threshold is the value that the metric has to exceed in order to be registered as an alarm # * (OPTIONAL) new_reports_to_skip is the number of reports this metric will return nothing, but will get it's value. # * Useful for CPU calculations that require deltas # * (OPTIONAL) new_metric_alarm_severity is the severity of the ticket if this alarm is triggered. A severity of 6+ means no ticket. # * (OPTIONAL) is_percent whether or not to display the metric as a percent when printing it (default=false) def register_metric(self, new_metric_name, new_metric_function, new_metric_unit="None", new_metric_alarm_threshold=None, new_metric_reports_to_skip=0, new_metric_alarm_severity=6, is_percent=False): new_metric_dimensions = [] if (self.git_hash_as_namespace == False): git_namespace_prepend_text = self.git_repo_name + "-" + self.git_hash new_metric_dimensions.append( {"Name": git_namespace_prepend_text, "Value": new_metric_name}) else: new_metric_dimensions.append( {"Name": "System_Metrics", "Value": new_metric_name}) new_metric = DataSnapshot_Metric( metric_name=new_metric_name, metric_function=new_metric_function, metric_dimensions=new_metric_dimensions, metric_unit=new_metric_unit, metric_alarm_threshold=new_metric_alarm_threshold, metric_alarm_severity=new_metric_alarm_severity, git_hash=self.git_hash, git_repo_name=self.git_repo_name, reports_to_skip=new_metric_reports_to_skip, is_percent=is_percent ) self.metrics.append(new_metric) # append an empty list so we can track it's metrics over time self.metrics_numbers.append([]) def register_dashboard_widget(self, new_widget_name, metrics_to_add=[], new_widget_period=60): # We need to know what metric dimension to get the metric(s) from metric_dimension_string = "" if (self.git_hash_as_namespace == False): metric_dimension_string = self.git_repo_name + "-" + self.git_hash else: metric_dimension_string = "System_Metrics" widget = self._find_cloudwatch_widget(name=new_widget_name) if (widget == None): widget = DataSnapshot_Dashboard_Widget( widget_name=new_widget_name, metric_namespace=self.git_metric_namespace, metric_dimension=metric_dimension_string, cloudwatch_region=self.cloudwatch_region, widget_period=new_widget_period) self.cloudwatch_dashboard_widgets.append(widget) for metric in metrics_to_add: self.register_metric_to_dashboard_widget(widget_name=new_widget_name, metric_name=metric) def register_metric_to_dashboard_widget(self, widget_name, metric_name, widget=None): if widget is None: widget = self._find_cloudwatch_widget(name=widget_name) if widget is None: print ("[DataSnapshot] ERROR - could not find widget with name: " + widget_name, flush=True) return # Adjust metric name so it has the git hash, repo, etc metric_name_formatted = metric_name widget.add_metric_to_widget(new_metric_name=metric_name_formatted) return def remove_metric_from_dashboard_widget(self, widget_name, metric_name, widget=None): if widget is None: widget = self._find_cloudwatch_widget(name=widget_name) if widget is None: print ("[DataSnapshot] ERROR - could not find widget with name: " + widget_name, flush=True) return widget.remove_metric_from_widget(existing_metric_name=metric_name) return def _find_cloudwatch_widget(self, name): result = None for widget in self.cloudwatch_dashboard_widgets: if widget.widget_name == name: return widget return result # Prints the metrics to the console def export_metrics_console(self): datetime_now = datetime.datetime.now() datetime_string = datetime_now.strftime("%d-%m-%Y/%H:%M:%S") self.print_message("\n[DataSnapshot] Metric report: " + str(self.metric_report_number) + " (" + datetime_string + ")") for metric in self.metrics: if (metric.is_percent == True): self.print_message(" " + metric.metric_name + " - value: " + str(metric.metric_value) + "%") else: self.print_message(" " + metric.metric_name + " - value: " + str(metric.metric_value)) self.print_message("") # Sends all registered metrics to Cloudwatch. # Does NOT need to called on loop. Call post_metrics on loop to send all the metrics as expected. # This is just the Cloudwatch part of that loop. def export_metrics_cloudwatch(self): if (self.cloudwatch_client == None): self.print_message("[DataSnapshot] Error - cannot export Cloudwatch metrics! Cloudwatch was not initialized.") self.abort_due_to_internal_error = True self.abort_due_to_internal_error_reason = "Could not export Cloudwatch metrics due to no Cloudwatch client initialized!" return self.print_message("[DataSnapshot] Preparing to send to Cloudwatch...") metrics_data = [] metric_data_tmp = None for metric in self.metrics: metric_data_tmp = metric.get_metric_cloudwatch_dictionary() if (not metric_data_tmp is None): metrics_data.append(metric_data_tmp) if (len(metrics_data) == 0): self.print_message("[DataSnapshot] INFO - no metric data to send. Skipping...") return try: self.cloudwatch_client.put_metric_data( Namespace=self.git_metric_namespace, MetricData=metrics_data) self.print_message("[DataSnapshot] Metrics sent to Cloudwatch.") except Exception as e: self.print_message(f"[DataSnapshot] Error - something when wrong posting cloudwatch metrics") self.print_message(f"[DataSnapshot] ERROR - Exception {repr(e)}") self.print_message("[DataSnapshot] Not going to crash - just going to try again later") return # Call this at a set interval to post the metrics to Cloudwatch, etc. # This is the function you want to call repeatedly after you have everything setup. def post_metrics(self, psutil_process : psutil.Process): if (self.perform_final_initialization == True): self.perform_final_initialization = False self._init_cloudwatch_pre_first_run() # Update the metric values internally for i in range(0, len(self.metrics)): metric_value = self.metrics[i].get_metric_value(psutil_process) self.metrics_numbers[i].insert(0, metric_value) # Only keep the last metric_report_non_zero_count results if (len(self.metrics_numbers[i]) > self.metric_report_non_zero_count): amount_to_delete = len(self.metrics_numbers[i]) - self.metric_report_non_zero_count del self.metrics_numbers[i][-amount_to_delete:] # If we have metric_report_non_zero_count amount of metrics, make sure there is at least one # non-zero. If it is all zero, then print a log so we can easily find it if (len(self.metrics_numbers[i]) == self.metric_report_non_zero_count): non_zero_found = False for j in range(0, len(self.metrics_numbers[i])): if (self.metrics_numbers[i][j] != 0.0 and self.metrics_numbers[i][j] != None): non_zero_found = True break if (non_zero_found == False): self.print_message("\n[DataSnapshot] METRIC ZERO ERROR!") self.print_message(f"[DataSnapshot] Metric index {i} has been zero for last {self.metric_report_non_zero_count} reports!") self.print_message("\n") self.metric_report_number += 1 self.export_metrics_console() self.export_metrics_cloudwatch() def output_diagnosis_information(self, dependencies_list): # Print general diagnosis information self.print_message("\n========== Canary Wrapper diagnosis information ==========") self.print_message("\nRunning Canary for repository: " + self.git_repo_name) self.print_message("\t Commit hash: " + self.git_hash) if not dependencies_list == "": self.print_message("\nDependencies:") dependencies_list = dependencies_list.split(";") dependencies_list_found_hash = False for i in range(0, len(dependencies_list)): # There's probably a better way to do this... if (dependencies_list_found_hash == True): dependencies_list_found_hash = False continue self.print_message("* " + dependencies_list[i]) if (i+1 < len(dependencies_list)): self.print_message("\t Commit hash: " + dependencies_list[i+1]) dependencies_list_found_hash = True else: self.print_message("\t Commit hash: Unknown") if (self.metric_frequency != None): self.print_message("\nMetric Snapshot Frequency: " + str(self.metric_frequency) + " seconds") self.print_message("\nMetrics:") for metric in self.metrics: self.print_message("* " + metric.metric_name) if metric.metric_alarm_threshold is not None: self.print_message("\t Alarm Threshold: " + str(metric.metric_alarm_threshold)) self.print_message("\t Alarm Severity: " + str(metric.metric_alarm_severity)) else: self.print_message("\t No alarm set for metric.") self.print_message("\n") self.print_message("==========================================================") self.print_message("\n") # ================================================================================ class SnapshotMonitor(): def __init__(self, wrapper_data_snapshot, wrapper_metrics_wait_time) -> None: self.data_snapshot = wrapper_data_snapshot self.had_internal_error = False self.error_due_to_credentials = False self.internal_error_reason = "" self.error_due_to_alarm = False self.can_cut_ticket = False self.has_cut_ticket = False # A list of all the alarms triggered in the last check, cached for later # NOTE - this is only the alarm names! Not the severity. This just makes it easier to process self.cloudwatch_current_alarms_triggered = [] # Check for errors if (self.data_snapshot.abort_due_to_internal_error == True): self.had_internal_error = True self.internal_error_reason = "Could not initialize DataSnapshot. Likely credentials are not setup!" if (self.data_snapshot.abort_due_to_internal_error_due_to_credentials == True): self.error_due_to_credentials = True self.data_snapshot.cleanup() return # How long to wait before posting a metric self.metric_post_timer = 0 self.metric_post_timer_time = wrapper_metrics_wait_time def register_metric(self, new_metric_name, new_metric_function, new_metric_unit="None", new_metric_alarm_threshold=None, new_metric_reports_to_skip=0, new_metric_alarm_severity=6): try: self.data_snapshot.register_metric( new_metric_name=new_metric_name, new_metric_function=new_metric_function, new_metric_unit=new_metric_unit, new_metric_alarm_threshold=new_metric_alarm_threshold, new_metric_reports_to_skip=new_metric_reports_to_skip, new_metric_alarm_severity=new_metric_alarm_severity) except Exception as e: self.print_message(f"[SnaptshotMonitor] ERROR - could not register metric in data snapshot due to exception") self.print_message(f"[SnaptshotMonitor] ERROR - Exception {repr(e)}") self.had_internal_error = True self.internal_error_reason = "Could not register metric in data snapshot due to exception" return def register_dashboard_widget(self, new_widget_name, metrics_to_add=[], widget_period=60): self.data_snapshot.register_dashboard_widget(new_widget_name=new_widget_name, metrics_to_add=metrics_to_add, new_widget_period=widget_period) def output_diagnosis_information(self, dependencies=""): self.data_snapshot.output_diagnosis_information(dependencies_list=dependencies) def check_alarms_for_new_alarms(self, triggered_alarms): if len(triggered_alarms) > 0: self.data_snapshot.print_message( "WARNING - One or more alarms are in state of ALARM") old_alarms_still_active = [] new_alarms = [] new_alarms_highest_severity = 6 new_alarm_found = True new_alarm_ticket_description = "Canary has metrics in ALARM state!\n\nMetrics in alarm:\n" for triggered_alarm in triggered_alarms: new_alarm_found = True # Is this a new alarm? for old_alarm_name in self.cloudwatch_current_alarms_triggered: if (old_alarm_name == triggered_alarm[1]): new_alarm_found = False old_alarms_still_active.append(triggered_alarm[1]) new_alarm_ticket_description += "* (STILL IN ALARM) " + triggered_alarm[1] + "\n" new_alarm_ticket_description += "\tSeverity: " + str(triggered_alarm[2]) new_alarm_ticket_description += "\n" break # If it is a new alarm, then add it to our list so we can cut a new ticket if (new_alarm_found == True): self.data_snapshot.print_message(' (NEW) Alarm with name "' + triggered_alarm[1] + '" is in the ALARM state!') new_alarms.append(triggered_alarm[1]) if (triggered_alarm[2] < new_alarms_highest_severity): new_alarms_highest_severity = triggered_alarm[2] new_alarm_ticket_description += "* " + triggered_alarm[1] + "\n" new_alarm_ticket_description += "\tSeverity: " + str(triggered_alarm[2]) new_alarm_ticket_description += "\n" if len(new_alarms) > 0: if (self.can_cut_ticket == True): cut_ticket_using_cloudwatch( git_repo_name=self.data_snapshot.git_repo_name, git_hash=self.data_snapshot.git_hash, git_hash_as_namespace=False, git_fixed_namespace_text=self.data_snapshot.git_fixed_namespace_text, cloudwatch_region="us-east-1", ticket_description="New metric(s) went into alarm for the Canary! Metrics in alarm: " + str(new_alarms), ticket_reason="New metric(s) went into alarm", ticket_allow_duplicates=True, ticket_category="AWS", ticket_item="IoT SDK for CPP", ticket_group="AWS IoT Device SDK", ticket_type="SDKs and Tools", ticket_severity=4) self.has_cut_ticket = True # Cache the new alarms and the old alarms self.cloudwatch_current_alarms_triggered = old_alarms_still_active + new_alarms else: self.cloudwatch_current_alarms_triggered.clear() def monitor_loop_function(self, psutil_process : psutil.Process, time_passed=30): # Check for internal errors if (self.data_snapshot.abort_due_to_internal_error == True): self.had_internal_error = True self.internal_error_reason = "Data Snapshot internal error: " + self.data_snapshot.abort_due_to_internal_error_reason return try: # Poll the metric alarms if (self.had_internal_error == False): # Get a report of all the alarms that might have been set to an alarm state triggered_alarms = self.data_snapshot.get_cloudwatch_alarm_results() self.check_alarms_for_new_alarms(triggered_alarms) except Exception as e: self.print_message("[SnaptshotMonitor] ERROR - exception occurred checking metric alarms!") self.print_message(f"[SnaptshotMonitor] ERROR - Exception {repr(e)}") self.print_message("[SnaptshotMonitor] Not going to crash - just going to try again later") return if (self.metric_post_timer <= 0): if (self.had_internal_error == False): try: self.data_snapshot.post_metrics(psutil_process) except Exception as e: self.print_message("[SnaptshotMonitor] ERROR - exception occurred posting metrics!") self.print_message(f"[SnaptshotMonitor] ERROR - Exception {repr(e)}") self.print_message("[SnaptshotMonitor] Not going to crash - just going to try again later") # reset the timer self.metric_post_timer += self.metric_post_timer_time return # reset the timer self.metric_post_timer += self.metric_post_timer_time # Gather and post the metrics self.metric_post_timer -= time_passed def send_email(self, email_body, email_subject_text_append=None): if (email_subject_text_append != None): self.data_snapshot.lambda_send_email(email_body, "Canary: " + self.data_snapshot.git_repo_name + ":" + self.data_snapshot.git_hash + " - " + email_subject_text_append) else: self.data_snapshot.lambda_send_email(email_body, "Canary: " + self.data_snapshot.git_repo_name + ":" + self.data_snapshot.git_hash) def stop_monitoring(self): # Stub - just added for consistency pass def start_monitoring(self): # Stub - just added for consistency pass def restart_monitoring(self): # Stub - just added for consistency pass def cleanup_monitor(self, error_occurred=False): self.data_snapshot.cleanup(error_occurred=error_occurred) def print_message(self, message): if (self.data_snapshot != None): self.data_snapshot.print_message(message) else: print(message, flush=True) # ================================================================================ class ApplicationMonitor(): def __init__(self, wrapper_application_path, wrapper_application_arguments, wrapper_application_restart_on_finish=True, data_snapshot=None) -> None: self.application_process = None self.application_process_psutil = None self.error_has_occurred = False self.error_due_to_credentials = False self.error_reason = "" self.error_code = 0 self.wrapper_application_path = wrapper_application_path self.wrapper_application_arguments = wrapper_application_arguments self.wrapper_application_restart_on_finish = wrapper_application_restart_on_finish self.data_snapshot=data_snapshot self.still_running_wait_number = 0 self.stdout_file_path = "Canary_Stdout_File.txt" def start_monitoring(self): self.print_message("[ApplicationMonitor] Starting to monitor application...") if (self.application_process == None): try: canary_command = self.wrapper_application_path + " " + self.wrapper_application_arguments self.application_process = subprocess.Popen(canary_command + " | tee " + self.stdout_file_path, shell=True) self.application_process_psutil = psutil.Process(self.application_process.pid) self.print_message ("[ApplicationMonitor] Application started...") except Exception as e: self.print_message ("[ApplicationMonitor] ERROR - Could not launch Canary/Application due to exception!") self.print_message(f"[ApplicationMonitor] ERROR - Exception {repr(e)}") self.error_has_occurred = True self.error_reason = "Could not launch Canary/Application due to exception" self.error_code = 1 return else: self.print_message("[ApplicationMonitor] ERROR - Monitor already has an application process! Cannot monitor two applications with one monitor class!") def restart_monitoring(self): self.print_message ("[ApplicationMonitor] Restarting monitor application...") if (self.application_process != None): try: self.stop_monitoring() self.start_monitoring() self.print_message("\n[ApplicationMonitor] Restarted monitor application!") self.print_message("================================================================================") except Exception as e: self.print_message(f"[ApplicationMonitor] ERROR - Could not restart Canary/Application due to exception") self.print_message(f"[ApplicationMonitor] ERROR - Exception {repr(e)}") self.error_has_occurred = True self.error_reason = "Could not restart Canary/Application due to exception" self.error_code = 1 return else: self.print_message("[ApplicationMonitor] ERROR - Application process restart called but process is/was not running!") self.error_has_occurred = True self.error_reason = "Could not restart Canary/Application due to application process not being started initially" self.error_code = 1 return def stop_monitoring(self): self.print_message ("[ApplicationMonitor] Stopping monitor application...") if (not self.application_process == None): self.application_process.terminate() self.application_process.wait() self.print_message ("[ApplicationMonitor] Stopped monitor application!") self.application_process = None self.print_stdout() else: self.print_message ("[ApplicationMonitor] ERROR - cannot stop monitor application because no process is found!") def print_stdout(self): # Print the STDOUT file if (os.path.isfile(self.stdout_file_path)): self.print_message("Just finished Application STDOUT: ") try: with open(self.stdout_file_path, "r") as stdout_file: self.print_message(stdout_file.read()) os.remove(self.stdout_file_path) except Exception as e: self.print_message(f"[ApplicationMonitor] ERROR - Could not print Canary/Application stdout to exception") self.print_message(f"[ApplicationMonitor] ERROR - Exception {repr(e)}") def monitor_loop_function(self, time_passed=30): if (self.application_process != None): application_process_return_code = None try: application_process_return_code = self.application_process.poll() except Exception as e: self.print_message("[ApplicationMonitor] ERROR - exception occurred while trying to poll application status!") self.print_message(f"[ApplicationMonitor] ERROR - Exception {repr(e)}") self.error_has_occurred = True self.error_reason = "Exception when polling application status" self.error_code = 1 return # If it is not none, then the application finished if (application_process_return_code != None): self.print_message("[ApplicationMonitor] Monitor application has stopped! Processing result...") if (application_process_return_code != 0): self.print_message("[ApplicationMonitor] ERROR - Something Crashed in Canary/Application!") self.print_message("[ApplicationMonitor] Error code: " + str(application_process_return_code)) self.error_has_occurred = True self.error_reason = "Canary application crashed!" self.error_code = application_process_return_code else: # Should we restart? if (self.wrapper_application_restart_on_finish == True): self.print_message("[ApplicationMonitor] NOTE - Canary finished running and is restarting...") self.restart_monitoring() else: self.print_message("[ApplicationMonitor] Monitor application has stopped and monitor is not supposed to restart... Finishing...") self.error_has_occurred = True self.error_reason = "Canary Application Finished" self.error_code = 0 else: # Only print that we are still running the monitor application every 4 times to reduce log spam. self.still_running_wait_number =+ 1 if self.still_running_wait_number >= 4: self.print_message("[ApplicationMonitor] Monitor application is still running...") self.still_running_wait_number = 0 def cleanup_monitor(self, error_occurred=False): pass def print_message(self, message): if (self.data_snapshot != None): self.data_snapshot.print_message(message) else: print(message, flush=True) # ================================================================================ class S3Monitor(): def __init__(self, s3_bucket_name, s3_file_name, s3_file_name_in_zip, canary_local_application_path, data_snapshot) -> None: self.s3_client = None self.s3_current_object_version_id = None self.s3_current_object_last_modified = None self.s3_bucket_name = s3_bucket_name self.s3_file_name = s3_file_name self.s3_file_name_only_path, self.s3_file_name_only_extension = os.path.splitext(s3_file_name) self.data_snapshot = data_snapshot self.canary_local_application_path = canary_local_application_path self.s3_file_name_in_zip = s3_file_name_in_zip self.s3_file_name_in_zip_only_path = None self.s3_file_name_in_zip_only_extension = None if (self.s3_file_name_in_zip != None): self.s3_file_name_in_zip_only_path, self.s3_file_name_in_zip_only_extension = os.path.splitext(s3_file_name_in_zip) self.s3_file_needs_replacing = False self.had_internal_error = False self.error_due_to_credentials = False self.internal_error_reason = "" # Check for valid credentials # ================== try: tmp_sts_client = boto3.client('sts') tmp_sts_client.get_caller_identity() except Exception as e: self.print_message("[S3Monitor] ERROR - (S3 Check) AWS credentials are NOT valid!") self.print_message(f"[S3Monitor] ERROR - Exception {repr(e)}") self.had_internal_error = True self.error_due_to_credentials = True self.internal_error_reason = "AWS credentials are NOT valid!" return # ================== try: self.s3_client = boto3.client("s3") except Exception as e: self.print_message("[S3Monitor] ERROR - (S3 Check) Could not make S3 client") self.print_message(f"[S3Monitor] ERROR - Exception {repr(e)}") self.had_internal_error = True self.internal_error_reason = "Could not make S3 client for S3 Monitor" return def check_for_file_change(self): try: version_check_response = self.s3_client.list_object_versions( Bucket=self.s3_bucket_name, Prefix=self.s3_file_name_only_path) if "Versions" in version_check_response: for version in version_check_response["Versions"]: if (version["IsLatest"] == True): if (version["VersionId"] != self.s3_current_object_version_id or version["LastModified"] != self.s3_current_object_last_modified): self.print_message("[S3Monitor] Found new version of Canary/Application in S3!") self.print_message("[S3Monitor] Changing running Canary/Application to new one...") # Will be checked by thread to trigger replacing the file self.s3_file_needs_replacing = True self.s3_current_object_version_id = version["VersionId"] self.s3_current_object_last_modified = version["LastModified"] return except Exception as e: self.print_message(f"[S3Monitor] ERROR - Could not check for new version of file in S3 due to exception") self.print_message(f"[S3Monitor] ERROR - Exception {repr(e)}") self.print_message("[S3Monitor] Going to try again later - will not crash Canary") def replace_current_file_for_new_file(self): try: self.print_message("[S3Monitor] Making directory...") if not os.path.exists("tmp"): os.makedirs("tmp") except Exception as e: self.print_message ("[S3Monitor] ERROR - could not make tmp directory to place S3 file into!") self.print_message(f"[S3Monitor] ERROR - Exception {repr(e)}") self.had_internal_error = True self.internal_error_reason = "Could not make TMP folder for S3 file download" return # Download the file new_file_path = "tmp/new_file" + self.s3_file_name_only_extension try: self.print_message("[S3Monitor] Downloading file...") s3_resource = boto3.resource("s3") s3_resource.meta.client.download_file(self.s3_bucket_name, self.s3_file_name, new_file_path) except Exception as e: self.print_message("[S3Monitor] ERROR - could not download latest S3 file into TMP folder!") self.print_message(f"[S3Monitor] ERROR - Exception {repr(e)}") self.had_internal_error = True self.internal_error_reason = "Could not download latest S3 file into TMP folder" return # Is it a zip file? if (self.s3_file_name_in_zip != None): self.print_message("[S3Monitor] New file is zip file. Unzipping...") # Unzip it! with zipfile.ZipFile(new_file_path, 'r') as zip_file: zip_file.extractall("tmp/new_file_zip") new_file_path = "tmp/new_file_zip/" + self.s3_file_name_in_zip_only_path + self.s3_file_name_in_zip_only_extension try: # is there a file already present there? if os.path.exists(self.canary_local_application_path) == True: os.remove(self.canary_local_application_path) self.print_message("[S3Monitor] Moving file...") os.replace(new_file_path, self.canary_local_application_path) self.print_message("[S3Monitor] Getting execution rights...") os.system("chmod u+x " + self.canary_local_application_path) except Exception as e: self.print_message("[S3Monitor] ERROR - could not move file into local application path due to exception!") self.print_message(f"[S3Monitor] ERROR - Exception {repr(e)}") self.had_internal_error = True self.internal_error_reason = "Could not move file into local application path" return self.print_message("[S3Monitor] New file downloaded and moved into correct location!") self.s3_file_needs_replacing = False def stop_monitoring(self): # Stub - just added for consistency pass def start_monitoring(self): # Stub - just added for consistency pass def restart_monitoring(self): # Stub - just added for consistency pass def cleanup_monitor(self): # Stub - just added for consistency pass def monitor_loop_function(self, time_passed=30): self.check_for_file_change() def print_message(self, message): if (self.data_snapshot != None): self.data_snapshot.print_message(message) else: print(message, flush=True) # ================================================================================ # Cuts a ticket to SIM using a temporary Cloudwatch metric that is quickly created, triggered, and destroyed. # Can be called in any thread - creates its own Cloudwatch client and any data it needs is passed in. # # See (https://w.amazon.com/bin/view/CloudWatchAlarms/Internal/CloudWatchAlarmsSIMTicketing) for more details # on how the alarm is sent using Cloudwatch. def cut_ticket_using_cloudwatch( ticket_description="Description here!", ticket_reason="Reason here!", ticket_severity=5, ticket_category="AWS", ticket_type="SDKs and Tools", ticket_item="IoT SDK for CPP", ticket_group="AWS IoT Device SDK", ticket_allow_duplicates=False, git_repo_name="REPO NAME", git_hash="HASH", git_hash_as_namespace=False, git_fixed_namespace_text="mqtt5_canary", cloudwatch_region="us-east-1"): git_metric_namespace = "" if (git_hash_as_namespace == False): git_metric_namespace = git_fixed_namespace_text else: git_namespace_prepend_text = git_repo_name + "-" + git_hash git_metric_namespace = git_namespace_prepend_text try: cloudwatch_client = boto3.client('cloudwatch', cloudwatch_region) ticket_alarm_name = git_repo_name + "-" + git_hash + "-AUTO-TICKET" except Exception as e: print (f"ERROR - could not create Cloudwatch client to make ticket metric alarm due to exception", flush=True) print(f"ERROR - Exception {repr(e)}", flush=True) return new_metric_dimensions = [] if (git_hash_as_namespace == False): git_namespace_prepend_text = git_repo_name + "-" + git_hash new_metric_dimensions.append( {"Name": git_namespace_prepend_text, "Value": ticket_alarm_name}) else: new_metric_dimensions.append( {"Name": "System_Metrics", "Value": ticket_alarm_name}) ticket_arn = f"arn:aws:cloudwatch::cwa-internal:ticket:{ticket_severity}:{ticket_category}:{ticket_type}:{ticket_item}:{ticket_group}:" if (ticket_allow_duplicates == True): # use "DO-NOT-DEDUPE" so we can run the same commit again and it will cut another ticket. ticket_arn += "DO-NOT-DEDUPE" # In the ticket ARN, all spaces need to be replaced with + ticket_arn = ticket_arn.replace(" ", "+") ticket_alarm_description = f"AUTO CUT CANARY WRAPPER TICKET\n\nREASON: {ticket_reason}\n\nDESCRIPTION: {ticket_description}\n\n" # Register a metric alarm so it can auto-cut a ticket for us try: cloudwatch_client.put_metric_alarm( AlarmName=ticket_alarm_name, AlarmDescription=ticket_alarm_description, MetricName=ticket_alarm_name, Namespace=git_metric_namespace, Statistic="Maximum", Dimensions=new_metric_dimensions, Period=60, # How long (in seconds) is an evaluation period? EvaluationPeriods=1, # How many periods does it need to be invalid for? DatapointsToAlarm=1, # How many data points need to be invalid? Threshold=1, ComparisonOperator="GreaterThanOrEqualToThreshold", # The data above does not really matter - it just needs to be valid input data. # This is the part that tells Cloudwatch to cut the ticket AlarmActions=[ticket_arn] ) except Exception as e: print (f"ERROR - could not create ticket metric alarm due to exception", flush=True) print(f"ERROR - Exception {repr(e)}", flush=True) return # Trigger the alarm so it cuts the ticket try: cloudwatch_client.set_alarm_state( AlarmName=ticket_alarm_name, StateValue="ALARM", StateReason="AUTO TICKET CUT") except Exception as e: print (f"ERROR - could not cut ticket due to exception", flush=True) print(f"ERROR - Exception {repr(e)}", flush=True) return print("Waiting for ticket metric to trigger...", flush=True) # Wait a little bit (2 seconds)... time.sleep(2) # Remove the metric print("Removing ticket metric...", flush=True) cloudwatch_client.delete_alarms(AlarmNames=[ticket_alarm_name]) print ("Finished cutting ticket via Cloudwatch!", flush=True) return # A helper function that gets the majority of the ticket information from the arguments result from argparser. def cut_ticket_using_cloudwatch_from_args( ticket_description="", ticket_reason="", ticket_severity=6, arguments=None): # Do not cut a ticket for a severity of 6+ if (ticket_severity >= 6): return cut_ticket_using_cloudwatch( ticket_description=ticket_description, ticket_reason=ticket_reason, ticket_severity=ticket_severity, ticket_category=arguments.ticket_category, ticket_type=arguments.ticket_type, ticket_item=arguments.ticket_item, ticket_group=arguments.ticket_group, ticket_allow_duplicates=False, git_repo_name=arguments.git_repo_name, git_hash=arguments.git_hash, git_hash_as_namespace=arguments.git_hash_as_namespace) # ================================================================================ aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/codebuild/CanaryWrapper_MetricFunctions.py000066400000000000000000000033711456575232400304770ustar00rootroot00000000000000# Contains all of the metric reporting functions for the Canary Wrappers # Needs to be installed prior to running import psutil cache_cpu_psutil_process = None def get_metric_total_cpu_usage(psutil_process : psutil.Process): global cache_cpu_psutil_process try: if (psutil_process == None): print ("ERROR - No psutil.process passed! Cannot gather metric!", flush=True) return None # We always need to skip the first CPU poll if (cache_cpu_psutil_process != psutil_process): psutil.cpu_percent(interval=None) cache_cpu_psutil_process = psutil_process return None return psutil.cpu_percent(interval=None) except Exception as e: print ("ERROR - exception occurred gathering metrics!") print (f"Exception: {repr(e)}", flush=True) return None # Note: This value is in BYTES. def get_metric_total_memory_usage_value(psutil_process : psutil.Process): try: if (psutil_process == None): print ("ERROR - No psutil.process passed! Cannot gather metric!", flush=True) return None return psutil.virtual_memory()[3] except Exception as e: print ("ERROR - exception occurred gathering metrics!") print (f"Exception: {repr(e)}", flush=True) return None def get_metric_total_memory_usage_percent(psutil_process : psutil.Process): try: if (psutil_process == None): print ("ERROR - No psutil.process passed! Cannot gather metric!", flush=True) return None return psutil.virtual_memory()[2] except Exception as e: print ("ERROR - exception occurred gathering metrics!") print (f"Exception: {repr(e)}", flush=True) return None aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/codebuild/mqtt-canary-test.yml000066400000000000000000000053351456575232400261150ustar00rootroot00000000000000version: 0.2 env: shell: bash variables: CANARY_DURATION: 25200 CANARY_THREADS: 3 CANARY_TPS: 50 CANARY_CLIENT_COUNT: 10 CANARY_LOG_FILE: 'canary_log.txt' CANARY_LOG_LEVEL: 'ERROR' BUILDER_VERSION: v0.9.55 BUILDER_SOURCE: releases BUILDER_HOST: https://d19elf31gohf1l.cloudfront.net PACKAGE_NAME: 'aws-c-mqtt' CANARY_TEST_EXE_PATH: build/install/bin/mqtt5canary CANARY_SERVER_ARN: Mqtt5MosquittoSever CANARY_BUILD_S3_DST: mqtt5-canary/s3 phases: install: commands: # install c++ dev libraries for codebuild environment. - sudo yum update -y - sudo yum groupinstall "Development Tools" -y # Install necessary lib for canary wrapper - sudo yum install gcc python3-dev -y - sudo yum install pip -y - python3 -m pip install psutil - python3 -m pip install boto3 # Install Cmake3 - wget https://cmake.org/files/v3.18/cmake-3.18.0.tar.gz - tar -xvzf cmake-3.18.0.tar.gz - cd cmake-3.18.0 - ./bootstrap - make - sudo make install - cd .. build: commands: - export CANNARY_TEST_EXE=$CODEBUILD_SRC_DIR/$CANARY_TEST_EXE_PATH - echo $CANNARY_TEST_EXE - export ENDPOINT=$(aws secretsmanager get-secret-value --secret-id "$CANARY_SERVER_ARN" --query "SecretString" | cut -f2 -d":" | sed -e 's/[\\\"\}]//g') - export S3_DST=$(aws secretsmanager get-secret-value --secret-id "$CANARY_BUILD_S3_DST" --query "SecretString" | cut -f2,3 -d":" | sed -e 's/[\\\"\}]//g') - export GIT_HASH=$(git rev-parse HEAD) # Build library and test - python3 -c "from urllib.request import urlretrieve; urlretrieve('$BUILDER_HOST/$BUILDER_SOURCE/$BUILDER_VERSION/builder.pyz?run=$CODEBUILD_BUILD_ID', 'builder.pyz')" - python3 builder.pyz build -p aws-c-mqtt # Canary related: # ========== - echo run canary test through wrapper # Start canary - python3 codebuild/CanaryWrapper.py --canary_executable $CANNARY_TEST_EXE --canary_arguments "-s ${CANARY_DURATION} -t ${CANARY_THREADS} -T ${CANARY_TPS} -C ${CANARY_CLIENT_COUNT} -l ${CANARY_LOG_FILE} -v ${CANARY_LOG_LEVEL} endpoint ${ENDPOINT}" --git_hash ${GIT_HASH} --git_repo_name $PACKAGE_NAME --codebuild_log_path $CODEBUILD_LOG_PATH - aws s3 cp ./${CANARY_LOG_FILE} ${S3_DST}log/${GIT_HASH}/ # Upload built canary test build result to s3 bucket - zip -r latestBuild.zip build/install - aws s3 cp ./latestBuild.zip ${S3_DST}build/latest # Upload latest source to S3 bucket - find * -type f ! -perm +r -exec zip latestSnapshot.zip {} + - aws s3 cp ./latestSnapshot.zip ${S3_DST}source/latest # ========== post_build: commands: - echo Build completed on `date` aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/format-check.sh000077500000000000000000000007771456575232400231320ustar00rootroot00000000000000#!/usr/bin/env bash if [[ -z $CLANG_FORMAT ]] ; then CLANG_FORMAT=clang-format fi if NOT type $CLANG_FORMAT 2> /dev/null ; then echo "No appropriate clang-format found." exit 1 fi FAIL=0 SOURCE_FILES=`find bin source include tests -type f \( -name '*.h' -o -name '*.c' \)` for i in $SOURCE_FILES do $CLANG_FORMAT -output-replacements-xml $i | grep -c " /dev/null if [ $? -ne 1 ] then echo "$i failed clang-format check." FAIL=1 fi done exit $FAIL aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/include/000077500000000000000000000000001456575232400216405ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/include/aws/000077500000000000000000000000001456575232400224325ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/include/aws/mqtt/000077500000000000000000000000001456575232400234175ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/include/aws/mqtt/client.h000066400000000000000000000700071456575232400250520ustar00rootroot00000000000000#ifndef AWS_MQTT_CLIENT_H #define AWS_MQTT_CLIENT_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include AWS_PUSH_SANE_WARNING_LEVEL /* forward declares */ struct aws_client_bootstrap; struct aws_http_header; struct aws_http_message; struct aws_http_proxy_options; struct aws_mqtt5_client; struct aws_socket_options; struct aws_tls_connection_options; /** * Empty struct that is passed when on_connection_closed is called. * Currently holds nothing but will allow expanding in the future should it be needed. */ struct on_connection_closed_data; struct aws_mqtt_client { struct aws_allocator *allocator; struct aws_client_bootstrap *bootstrap; struct aws_ref_count ref_count; }; struct aws_mqtt_client_connection; /** * Callback called when a request roundtrip is complete (QoS0 immediately, QoS1 on PUBACK, QoS2 on PUBCOMP). Either * succeed or not */ typedef void(aws_mqtt_op_complete_fn)( struct aws_mqtt_client_connection *connection, uint16_t packet_id, int error_code, void *userdata); /** * Called when a connection attempt is completed, either in success or error. * * If error code is AWS_ERROR_SUCCESS, then a CONNACK has been received from the server and return_code and * session_present contain the values received. If error_code is not AWS_ERROR_SUCCESS, it refers to the internal error * that occurred during connection, and return_code and session_present are invalid. */ typedef void(aws_mqtt_client_on_connection_complete_fn)( struct aws_mqtt_client_connection *connection, int error_code, enum aws_mqtt_connect_return_code return_code, bool session_present, void *userdata); /* Called when a connection attempt succeed (with a successful CONNACK) * * The callback is derived from aws_mqtt_client_on_connection_complete_fn. * It gets triggered when connection succeed (with a successful CONNACK) */ typedef void(aws_mqtt_client_on_connection_success_fn)( struct aws_mqtt_client_connection *connection, enum aws_mqtt_connect_return_code return_code, bool session_present, void *userdata); /* Called if the connection attempt failed. * * The callback is derived from aws_mqtt_client_on_connection_complete_fn. * It gets triggered when connection failed. */ typedef void(aws_mqtt_client_on_connection_failure_fn)( struct aws_mqtt_client_connection *connection, int error_code, void *userdata); /* Called if the connection to the server is lost. */ typedef void(aws_mqtt_client_on_connection_interrupted_fn)( struct aws_mqtt_client_connection *connection, int error_code, void *userdata); /** * Called if the connection to the server is closed by user request * Note: Currently the "data" argument is always NULL, but this may change in the future if additional data is needed to * be sent. */ typedef void(aws_mqtt_client_on_connection_closed_fn)( struct aws_mqtt_client_connection *connection, struct on_connection_closed_data *data, void *userdata); /** * Called when a connection to the server is resumed * (if clean_session is true, calling aws_mqtt_resubscribe_existing_topics is suggested) */ typedef void(aws_mqtt_client_on_connection_resumed_fn)( struct aws_mqtt_client_connection *connection, enum aws_mqtt_connect_return_code return_code, bool session_present, void *userdata); /** * Called when a multi-topic subscription request is complete. * Note: If any topic_suback's qos value is AWS_MQTT_QOS_FAILURE, * then that topic subscription was rejected by the broker. */ typedef void(aws_mqtt_suback_multi_fn)( struct aws_mqtt_client_connection *connection, uint16_t packet_id, const struct aws_array_list *topic_subacks, /* contains aws_mqtt_topic_subscription pointers */ int error_code, void *userdata); /** * Called when a single-topic subscription request is complete. * Note: If the qos value is AWS_MQTT_QOS_FAILURE, * then the subscription was rejected by the broker. */ typedef void(aws_mqtt_suback_fn)( struct aws_mqtt_client_connection *connection, uint16_t packet_id, const struct aws_byte_cursor *topic, enum aws_mqtt_qos qos, int error_code, void *userdata); /** * Called when a publish message is received. * * \param[in] connection The connection object * \param[in] topic The information channel to which the payload data was published. * \param[in] payload The payload data. * \param[in] dup DUP flag. If true, this might be re-delivery of an earlier attempt to send the message. * \param[in] qos Quality of Service used to deliver the message. * \param[in] retain Retain flag. If true, the message was sent as a result of a new subscription being * made by the client. */ typedef void(aws_mqtt_client_publish_received_fn)( struct aws_mqtt_client_connection *connection, const struct aws_byte_cursor *topic, const struct aws_byte_cursor *payload, bool dup, enum aws_mqtt_qos qos, bool retain, void *userdata); /** Called when a connection is closed, right before any resources are deleted */ typedef void(aws_mqtt_client_on_disconnect_fn)(struct aws_mqtt_client_connection *connection, void *userdata); /** * Signature of callback invoked on a connection destruction. */ typedef void(aws_mqtt_client_on_connection_termination_fn)(void *userdata); /** * Function to invoke when the websocket handshake request transformation completes. * This function MUST be invoked or the application will soft-lock. * * `request` and `complete_ctx` must be the same pointers provided to the `aws_mqtt_transform_websocket_handshake_fn`. * `error_code` should should be AWS_ERROR_SUCCESS if transformation was successful, * otherwise pass a different AWS_ERROR_X value. */ typedef void(aws_mqtt_transform_websocket_handshake_complete_fn)( struct aws_http_message *request, int error_code, void *complete_ctx); /** * Function that may transform the websocket handshake request. * Called each time a websocket connection is attempted. * * The default request uses path "/mqtt". All required headers are present, * plus the optional header "Sec-WebSocket-Protocol: mqtt". * * The user MUST invoke the `complete_fn` when transformation is complete or the application will soft-lock. * When invoking the `complete_fn`, pass along the `request` and `complete_ctx` provided here and an error code. * The error code should be AWS_ERROR_SUCCESS if transformation was successful, * otherwise pass a different AWS_ERROR_X value. */ typedef void(aws_mqtt_transform_websocket_handshake_fn)( struct aws_http_message *request, void *user_data, aws_mqtt_transform_websocket_handshake_complete_fn *complete_fn, void *complete_ctx); /** * Function that may accept or reject a websocket handshake response. * Called each time a valid websocket connection is established. * * All required headers have been checked already (ex: "Sec-Websocket-Accept"), * * Return AWS_OP_SUCCESS to accept the connection or AWS_OP_ERR to stop the connection attempt. */ typedef int aws_mqtt_validate_websocket_handshake_fn( struct aws_mqtt_client_connection *connection, const struct aws_http_header *header_array, size_t num_headers, void *userdata); /** Passed to subscribe() and suback callbacks */ struct aws_mqtt_topic_subscription { struct aws_byte_cursor topic; enum aws_mqtt_qos qos; aws_mqtt_client_publish_received_fn *on_publish; aws_mqtt_userdata_cleanup_fn *on_cleanup; void *on_publish_ud; }; /** * host_name The server name to connect to. This resource may be freed immediately on return. * port The port on the server to connect to * client_id The clientid to place in the CONNECT packet. * socket_options The socket options to pass to the aws_client_bootstrap functions. * This is copied into the connection * tls_options TLS settings to use when opening a connection. * This is copied into the connection * Pass NULL to connect without TLS (NOT RECOMMENDED) * clean_session True to discard all server session data and start fresh * keep_alive_time_secs The keep alive value to place in the CONNECT PACKET, a PING will automatically * be sent at this interval as well. If you specify 0, defaults will be used * and a ping will be sent once per 20 minutes. * This duration must be longer than ping_timeout_ms. * ping_timeout_ms Network connection is re-established if a ping response is not received * within this amount of time (milliseconds). If you specify 0, a default value of 3 seconds * is used. Alternatively, tcp keep-alive may be away to accomplish this in a more efficient * (low-power) scenario, but keep-alive options may not work the same way on every platform * and OS version. This duration must be shorter than keep_alive_time_secs. * protocol_operation_timeout_ms * Timeout when waiting for the response to some operation requires response by protocol. * Set to zero to disable timeout. Otherwise, the operation will fail with error * AWS_ERROR_MQTT_TIMEOUT if no response is received within this amount of time after * the packet is written to the socket. The timer is reset if the connection is interrupted. * It applied to PUBLISH (QoS>0) and UNSUBSCRIBE now. * Note: While the MQTT 3 specification states that a broker MUST respond, * some brokers are known to ignore publish packets in exceptional circumstances * (e.g. AWS IoT Core will not respond if the publish quota is exceeded). * on_connection_complete The callback to fire when the connection attempt completes * user_data Passed to the userdata param of on_connection_complete */ struct aws_mqtt_connection_options { struct aws_byte_cursor host_name; uint32_t port; struct aws_socket_options *socket_options; struct aws_tls_connection_options *tls_options; struct aws_byte_cursor client_id; uint16_t keep_alive_time_secs; uint32_t ping_timeout_ms; uint32_t protocol_operation_timeout_ms; aws_mqtt_client_on_connection_complete_fn *on_connection_complete; void *user_data; bool clean_session; }; /** * Contains some simple statistics about the current state of the connection's queue of operations */ struct aws_mqtt_connection_operation_statistics { /** * total number of operations submitted to the connection that have not yet been completed. Unacked operations * are a subset of this. */ uint64_t incomplete_operation_count; /** * total packet size of operations submitted to the connection that have not yet been completed. Unacked operations * are a subset of this. */ uint64_t incomplete_operation_size; /** * total number of operations that have been sent to the server and are waiting for a corresponding ACK before * they can be completed. */ uint64_t unacked_operation_count; /** * total packet size of operations that have been sent to the server and are waiting for a corresponding ACK before * they can be completed. */ uint64_t unacked_operation_size; }; AWS_EXTERN_C_BEGIN /** * Creates an instance of aws_mqtt_client. * * \param[in] allocator The allocator the client will use for all future allocations * \param[in] bootstrap The client bootstrap to use to initiate new socket connections * * \returns a new instance of an aws_mqtt_client if successful, NULL otherwise */ AWS_MQTT_API struct aws_mqtt_client *aws_mqtt_client_new(struct aws_allocator *allocator, struct aws_client_bootstrap *bootstrap); /** * Increments the ref count to an mqtt client, allowing the caller to take a reference to it * * \param[in] client The client to increment the ref count on * * \returns the mqtt client */ AWS_MQTT_API struct aws_mqtt_client *aws_mqtt_client_acquire(struct aws_mqtt_client *client); /** * Decrements the ref count on an mqtt client. If the ref count drops to zero, the client is cleaned up. * * \param[in] client The client to release a ref count on */ AWS_MQTT_API void aws_mqtt_client_release(struct aws_mqtt_client *client); /** * Spawns a new connection object. * * \param[in] client The client to spawn the connection from * * \returns a new mqtt connection on success, NULL otherwise */ AWS_MQTT_API struct aws_mqtt_client_connection *aws_mqtt_client_connection_new(struct aws_mqtt_client *client); /** * Creates a new MQTT311 connection object that uses an MQTT5 client under the hood * * \param[in] client The mqtt5 client to create the connection from * * \returns a new mqtt (311) connection on success, NULL otherwise */ AWS_MQTT_API struct aws_mqtt_client_connection *aws_mqtt_client_connection_new_from_mqtt5_client(struct aws_mqtt5_client *client); /** * Increments the ref count to an mqtt client connection, allowing the caller to take a reference to it * * \param[in] connection The connection object * * \returns the mqtt connection */ AWS_MQTT_API struct aws_mqtt_client_connection *aws_mqtt_client_connection_acquire(struct aws_mqtt_client_connection *connection); /** * Decrements the ref count on an mqtt connection. If the ref count drops to zero, the connection is cleaned up. * Note: cannot call this with lock held, since it will start the destroy process and cause a dead lock. * * \param[in] connection The connection object */ AWS_MQTT_API void aws_mqtt_client_connection_release(struct aws_mqtt_client_connection *connection); /** * Sets the will message to send with the CONNECT packet. * * \param[in] connection The connection object * \param[in] topic The topic to publish the will on * \param[in] qos The QoS to publish the will with * \param[in] retain The retain flag to publish the will with * \param[in] payload The data if the will message */ AWS_MQTT_API int aws_mqtt_client_connection_set_will( struct aws_mqtt_client_connection *connection, const struct aws_byte_cursor *topic, enum aws_mqtt_qos qos, bool retain, const struct aws_byte_cursor *payload); /** * Sets the username and/or password to send with the CONNECT packet. * * \param[in] connection The connection object * \param[in] username The username to connect with * \param[in] password [optional] The password to connect with */ AWS_MQTT_API int aws_mqtt_client_connection_set_login( struct aws_mqtt_client_connection *connection, const struct aws_byte_cursor *username, const struct aws_byte_cursor *password); /** * Use MQTT over websockets when connecting. * Requires the MQTT_WITH_WEBSOCKETS build option. * * In this scenario, an HTTP connection is established, which is then upgraded to a websocket connection, * which is then used to send MQTT data. * * \param[in] connection The connection object. * \param[in] transformer [optional] Function that may transform the websocket handshake request. * See `aws_mqtt_transform_websocket_handshake_fn` for more info. * \param[in] transformer_ud [optional] Userdata for request_transformer. * \param[in] validator [optional] Function that may reject the websocket handshake response. * \param[in] validator_ud [optional] Userdata for response_validator. */ AWS_MQTT_API int aws_mqtt_client_connection_use_websockets( struct aws_mqtt_client_connection *connection, aws_mqtt_transform_websocket_handshake_fn *transformer, void *transformer_ud, aws_mqtt_validate_websocket_handshake_fn *validator, void *validator_ud); /** * Set http proxy options for the connection. */ AWS_MQTT_API int aws_mqtt_client_connection_set_http_proxy_options( struct aws_mqtt_client_connection *connection, struct aws_http_proxy_options *proxy_options); /** * Set host resolution ooptions for the connection. */ AWS_MQTT_API int aws_mqtt_client_connection_set_host_resolution_options( struct aws_mqtt_client_connection *connection, const struct aws_host_resolution_config *host_resolution_config); /** * Sets the minimum and maximum reconnect timeouts. * * The time between reconnect attempts will start at min and multiply by 2 until max is reached. * * \param[in] connection The connection object * \param[in] min_timeout The timeout to start with * \param[in] max_timeout The highest allowable wait time between reconnect attempts */ AWS_MQTT_API int aws_mqtt_client_connection_set_reconnect_timeout( struct aws_mqtt_client_connection *connection, uint64_t min_timeout, uint64_t max_timeout); /** * Sets the callbacks to call when a connection succeeds or fails * * \param[in] connection The connection object * \param[in] on_connection_success The function to call when a connection is successful or gets resumed * \param[in] on_connection_success_ud Userdata for on_connection_success * \param[in] on_connection_failure The function to call when a connection fails * \param[in] on_connection_failure_ud Userdata for on_connection_failure */ AWS_MQTT_API int aws_mqtt_client_connection_set_connection_result_handlers( struct aws_mqtt_client_connection *connection, aws_mqtt_client_on_connection_success_fn *on_connection_success, void *on_connection_success_ud, aws_mqtt_client_on_connection_failure_fn *on_connection_failure, void *on_connection_failure_ud); /** * Sets the callbacks to call when a connection is interrupted and resumed. * * \param[in] connection The connection object * \param[in] on_interrupted The function to call when a connection is lost * \param[in] on_interrupted_ud Userdata for on_interrupted * \param[in] on_resumed The function to call when a connection is resumed (if clean_session is true, calling aws_mqtt_resubscribe_existing_topics is suggested) * \param[in] on_resumed_ud Userdata for on_resumed */ AWS_MQTT_API int aws_mqtt_client_connection_set_connection_interruption_handlers( struct aws_mqtt_client_connection *connection, aws_mqtt_client_on_connection_interrupted_fn *on_interrupted, void *on_interrupted_ud, aws_mqtt_client_on_connection_resumed_fn *on_resumed, void *on_resumed_ud); /** * Sets the callback to call when the connection is closed normally by user request. * This is different than the connection interrupted or lost, this only covers successful * closure. * * \param[in] connection The connection object * \param[in] on_closed The function to call when a connection is closed * \param[in] on_closed_ud Userdata for on_closed */ AWS_MQTT_API int aws_mqtt_client_connection_set_connection_closed_handler( struct aws_mqtt_client_connection *connection, aws_mqtt_client_on_connection_closed_fn *on_closed, void *on_closed_ud); /** * Sets the callback to call whenever ANY publish packet is received. Only safe to set when connection is not connected. * * \param[in] connection The connection object * \param[in] on_any_publish The function to call when a publish is received (pass NULL to unset) * \param[in] on_any_publish_ud Userdata for on_any_publish */ AWS_MQTT_API int aws_mqtt_client_connection_set_on_any_publish_handler( struct aws_mqtt_client_connection *connection, aws_mqtt_client_publish_received_fn *on_any_publish, void *on_any_publish_ud); /** * Sets the callback to call on a connection destruction. * * \param[in] connection The connection object. * \param[in] on_termination The function to call when a connection is destroyed. * \param[in] on_termination_ud Userdata for on_termination. */ AWS_MQTT_API int aws_mqtt_client_connection_set_connection_termination_handler( struct aws_mqtt_client_connection *connection, aws_mqtt_client_on_connection_termination_fn *on_termination, void *on_termination_ud); /** * Opens the actual connection defined by aws_mqtt_client_connection_new. * Once the connection is opened, on_connack will be called. Only called when connection is disconnected. * * \param[in] connection The connection object * \param[in] connection_options Configuration information for the connection attempt * * \returns AWS_OP_SUCCESS if the connection has been successfully initiated, * otherwise AWS_OP_ERR and aws_last_error() will be set. */ AWS_MQTT_API int aws_mqtt_client_connection_connect( struct aws_mqtt_client_connection *connection, const struct aws_mqtt_connection_options *connection_options); /** * DEPRECATED * Opens the actual connection defined by aws_mqtt_client_connection_new. * Once the connection is opened, on_connack will be called. * * Must be called on a connection that has previously been open, * as the parameters passed during the last connection will be reused. * * \param[in] connection The connection object * \param[in] on_connection_complete The callback to fire when the connection attempt completes * \param[in] userdata (nullable) Passed to the userdata param of on_connection_complete * * \returns AWS_OP_SUCCESS if the connection has been successfully initiated, * otherwise AWS_OP_ERR and aws_last_error() will be set. */ AWS_MQTT_API int aws_mqtt_client_connection_reconnect( struct aws_mqtt_client_connection *connection, aws_mqtt_client_on_connection_complete_fn *on_connection_complete, void *userdata); /** * Closes the connection asynchronously, calls the on_disconnect callback. * All uncompleted requests (publish/subscribe/unsubscribe) will be cancelled, regardless to the status of * clean_session. DISCONNECT packet will be sent, which deletes the will message from server. * * \param[in] connection The connection to close * \param[in] on_disconnect (nullable) Callback function to invoke when the connection is completely disconnected. * \param[in] userdata (nullable) passed to on_disconnect * * \returns AWS_OP_SUCCESS if the connection is open and is being shutdown, * otherwise AWS_OP_ERR and aws_last_error() is set. */ AWS_MQTT_API int aws_mqtt_client_connection_disconnect( struct aws_mqtt_client_connection *connection, aws_mqtt_client_on_disconnect_fn *on_disconnect, void *userdata); /** * Subscribe to topic filters. on_publish will be called when a PUBLISH matching each topic_filter is received. * * \param[in] connection The connection to subscribe on * \param[in] topic_filters An array_list of aws_mqtt_topic_subscription (NOT pointers) describing the requests. * \param[in] on_suback (nullable) Called when a SUBACK has been received from the server and the subscription * is complete. Broker may fail one of the topics, check the qos in * aws_mqtt_topic_subscription from the callback * \param[in] on_suback_ud (nullable) Passed to on_suback * * \returns The packet id of the subscribe packet if successfully sent, otherwise 0. */ AWS_MQTT_API uint16_t aws_mqtt_client_connection_subscribe_multiple( struct aws_mqtt_client_connection *connection, const struct aws_array_list *topic_filters, aws_mqtt_suback_multi_fn *on_suback, void *on_suback_ud); /** * Subscribe to a single topic filter. on_publish will be called when a PUBLISH matching topic_filter is received. * * \param[in] connection The connection to subscribe on * \param[in] topic_filter The topic filter to subscribe on. This resource must persist until on_suback. * \param[in] qos The maximum QoS of messages to receive * \param[in] on_publish (nullable) Called when a PUBLISH packet matching topic_filter is received * \param[in] on_publish_ud (nullable) Passed to on_publish * \param[in] on_ud_cleanup (nullable) Called when a subscription is removed, on_publish_ud is passed. * \param[in] on_suback (nullable) Called when a SUBACK has been received from the server and the subscription is * complete * \param[in] on_suback_ud (nullable) Passed to on_suback * * \returns The packet id of the subscribe packet if successfully sent, otherwise 0. */ AWS_MQTT_API uint16_t aws_mqtt_client_connection_subscribe( struct aws_mqtt_client_connection *connection, const struct aws_byte_cursor *topic_filter, enum aws_mqtt_qos qos, aws_mqtt_client_publish_received_fn *on_publish, void *on_publish_ud, aws_mqtt_userdata_cleanup_fn *on_ud_cleanup, aws_mqtt_suback_fn *on_suback, void *on_suback_ud); /** * Resubscribe to all topics currently subscribed to. This is to help when resuming a connection with a clean session. * * \param[in] connection The connection to subscribe on * \param[in] on_suback (nullable) Called when a SUBACK has been received from the server and the subscription is * complete * \param[in] on_suback_ud (nullable) Passed to on_suback * * \returns The packet id of the subscribe packet if successfully sent, otherwise 0 (and aws_last_error() will be set). */ AWS_MQTT_API uint16_t aws_mqtt_resubscribe_existing_topics( struct aws_mqtt_client_connection *connection, aws_mqtt_suback_multi_fn *on_suback, void *on_suback_ud); /** * Unsubscribe to a topic filter. * * \param[in] connection The connection to unsubscribe on * \param[in] topic_filter The topic filter to unsubscribe on. This resource must persist until on_unsuback. * \param[in] on_unsuback (nullable) Called when a UNSUBACK has been received from the server and the subscription * is removed * \param[in] on_unsuback_ud (nullable) Passed to on_unsuback * * \returns The packet id of the unsubscribe packet if successfully sent, otherwise 0. */ AWS_MQTT_API uint16_t aws_mqtt_client_connection_unsubscribe( struct aws_mqtt_client_connection *connection, const struct aws_byte_cursor *topic_filter, aws_mqtt_op_complete_fn *on_unsuback, void *on_unsuback_ud); /** * Send a PUBLISH packet over connection. * * \param[in] connection The connection to publish on * \param[in] topic The topic to publish on * \param[in] qos The requested QoS of the packet * \param[in] retain True to have the server save the packet, and send to all new subscriptions matching topic * \param[in] payload The data to send as the payload of the publish * \param[in] on_complete (nullable) For QoS 0, called as soon as the packet is sent * For QoS 1, called when PUBACK is received * For QoS 2, called when PUBCOMP is received * \param[in] user_data (nullable) Passed to on_complete * * \returns The packet id of the publish packet if successfully sent, otherwise 0. */ AWS_MQTT_API uint16_t aws_mqtt_client_connection_publish( struct aws_mqtt_client_connection *connection, const struct aws_byte_cursor *topic, enum aws_mqtt_qos qos, bool retain, const struct aws_byte_cursor *payload, aws_mqtt_op_complete_fn *on_complete, void *userdata); /** * Queries the connection's internal statistics for incomplete/unacked operations. * \param connection connection to get statistics for * \param stats set of incomplete/unacked operation statistics * \returns AWS_OP_SUCCESS if getting the operation statistics were successful, AWS_OP_ERR otherwise */ AWS_MQTT_API int aws_mqtt_client_connection_get_stats( struct aws_mqtt_client_connection *connection, struct aws_mqtt_connection_operation_statistics *stats); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_MQTT_CLIENT_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/include/aws/mqtt/exports.h000066400000000000000000000017251456575232400253010ustar00rootroot00000000000000#ifndef AWS_MQTT_EXPORTS_H #define AWS_MQTT_EXPORTS_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #if defined(USE_WINDOWS_DLL_SEMANTICS) || defined(WIN32) # ifdef AWS_MQTT_USE_IMPORT_EXPORT # ifdef AWS_MQTT_EXPORTS # define AWS_MQTT_API __declspec(dllexport) # else # define AWS_MQTT_API __declspec(dllimport) # endif /* AWS_MQTT_EXPORTS */ # else # define AWS_MQTT_API # endif /* USE_IMPORT_EXPORT */ #else /* defined (USE_WINDOWS_DLL_SEMANTICS) || defined (WIN32) */ # if ((__GNUC__ >= 4) || defined(__clang__)) && defined(AWS_MQTT_USE_IMPORT_EXPORT) && defined(AWS_MQTT_EXPORTS) # define AWS_MQTT_API __attribute__((visibility("default"))) # else # define AWS_MQTT_API # endif /* __GNUC__ >= 4 || defined(__clang__) */ #endif /* defined (USE_WINDOWS_DLL_SEMANTICS) || defined (WIN32) */ #endif /* AWS_MQTT_EXPORTS_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/include/aws/mqtt/mqtt.h000066400000000000000000000104551456575232400245620ustar00rootroot00000000000000#ifndef AWS_MQTT_MQTT_H #define AWS_MQTT_MQTT_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include AWS_PUSH_SANE_WARNING_LEVEL #define AWS_C_MQTT_PACKAGE_ID 5 /* Quality of Service associated with a publish action or subscription [MQTT-4.3]. */ enum aws_mqtt_qos { AWS_MQTT_QOS_AT_MOST_ONCE = 0x0, AWS_MQTT_QOS_AT_LEAST_ONCE = 0x1, AWS_MQTT_QOS_EXACTLY_ONCE = 0x2, /* reserved = 3 */ AWS_MQTT_QOS_FAILURE = 0x80, /* Only used in SUBACK packets */ }; /* Result of a connect request [MQTT-3.2.2.3]. */ enum aws_mqtt_connect_return_code { AWS_MQTT_CONNECT_ACCEPTED, AWS_MQTT_CONNECT_UNACCEPTABLE_PROTOCOL_VERSION, AWS_MQTT_CONNECT_IDENTIFIER_REJECTED, AWS_MQTT_CONNECT_SERVER_UNAVAILABLE, AWS_MQTT_CONNECT_BAD_USERNAME_OR_PASSWORD, AWS_MQTT_CONNECT_NOT_AUTHORIZED, /* reserved = 6 - 255 */ }; enum aws_mqtt_error { AWS_ERROR_MQTT_INVALID_RESERVED_BITS = AWS_ERROR_ENUM_BEGIN_RANGE(AWS_C_MQTT_PACKAGE_ID), AWS_ERROR_MQTT_BUFFER_TOO_BIG, AWS_ERROR_MQTT_INVALID_REMAINING_LENGTH, AWS_ERROR_MQTT_UNSUPPORTED_PROTOCOL_NAME, AWS_ERROR_MQTT_UNSUPPORTED_PROTOCOL_LEVEL, AWS_ERROR_MQTT_INVALID_CREDENTIALS, AWS_ERROR_MQTT_INVALID_QOS, AWS_ERROR_MQTT_INVALID_PACKET_TYPE, AWS_ERROR_MQTT_INVALID_TOPIC, AWS_ERROR_MQTT_TIMEOUT, AWS_ERROR_MQTT_PROTOCOL_ERROR, AWS_ERROR_MQTT_NOT_CONNECTED, AWS_ERROR_MQTT_ALREADY_CONNECTED, AWS_ERROR_MQTT_BUILT_WITHOUT_WEBSOCKETS, AWS_ERROR_MQTT_UNEXPECTED_HANGUP, AWS_ERROR_MQTT_CONNECTION_SHUTDOWN, AWS_ERROR_MQTT_CONNECTION_DESTROYED, AWS_ERROR_MQTT_CONNECTION_DISCONNECTING, AWS_ERROR_MQTT_CANCELLED_FOR_CLEAN_SESSION, AWS_ERROR_MQTT_QUEUE_FULL, AWS_ERROR_MQTT5_CLIENT_OPTIONS_VALIDATION, AWS_ERROR_MQTT5_CONNECT_OPTIONS_VALIDATION, AWS_ERROR_MQTT5_DISCONNECT_OPTIONS_VALIDATION, AWS_ERROR_MQTT5_PUBLISH_OPTIONS_VALIDATION, AWS_ERROR_MQTT5_SUBSCRIBE_OPTIONS_VALIDATION, AWS_ERROR_MQTT5_UNSUBSCRIBE_OPTIONS_VALIDATION, AWS_ERROR_MQTT5_USER_PROPERTY_VALIDATION, AWS_ERROR_MQTT5_PACKET_VALIDATION, AWS_ERROR_MQTT5_ENCODE_FAILURE, AWS_ERROR_MQTT5_DECODE_PROTOCOL_ERROR, AWS_ERROR_MQTT5_CONNACK_CONNECTION_REFUSED, AWS_ERROR_MQTT5_CONNACK_TIMEOUT, AWS_ERROR_MQTT5_PING_RESPONSE_TIMEOUT, AWS_ERROR_MQTT5_USER_REQUESTED_STOP, AWS_ERROR_MQTT5_DISCONNECT_RECEIVED, AWS_ERROR_MQTT5_CLIENT_TERMINATED, AWS_ERROR_MQTT5_OPERATION_FAILED_DUE_TO_OFFLINE_QUEUE_POLICY, AWS_ERROR_MQTT5_ENCODE_SIZE_UNSUPPORTED_PACKET_TYPE, AWS_ERROR_MQTT5_OPERATION_PROCESSING_FAILURE, AWS_ERROR_MQTT5_INVALID_INBOUND_TOPIC_ALIAS, AWS_ERROR_MQTT5_INVALID_OUTBOUND_TOPIC_ALIAS, AWS_ERROR_MQTT5_INVALID_UTF8_STRING, AWS_ERROR_MQTT_CONNECTION_RESET_FOR_ADAPTER_CONNECT, AWS_ERROR_MQTT_CONNECTION_RESUBSCRIBE_NO_TOPICS, AWS_ERROR_MQTT_CONNECTION_SUBSCRIBE_FAILURE, AWS_ERROR_END_MQTT_RANGE = AWS_ERROR_ENUM_END_RANGE(AWS_C_MQTT_PACKAGE_ID), }; enum aws_mqtt_log_subject { AWS_LS_MQTT_GENERAL = AWS_LOG_SUBJECT_BEGIN_RANGE(AWS_C_MQTT_PACKAGE_ID), AWS_LS_MQTT_CLIENT, AWS_LS_MQTT_TOPIC_TREE, AWS_LS_MQTT5_GENERAL, AWS_LS_MQTT5_CLIENT, AWS_LS_MQTT5_CANARY, AWS_LS_MQTT5_TO_MQTT3_ADAPTER, }; /** Function called on cleanup of a userdata. */ typedef void(aws_mqtt_userdata_cleanup_fn)(void *userdata); AWS_EXTERN_C_BEGIN AWS_MQTT_API bool aws_mqtt_is_valid_topic(const struct aws_byte_cursor *topic); AWS_MQTT_API bool aws_mqtt_is_valid_topic_filter(const struct aws_byte_cursor *topic_filter); /** * Validate utf-8 string under mqtt specs * * @param text * @return AWS_OP_SUCCESS if the text is validate, otherwise AWS_OP_ERR */ AWS_MQTT_API int aws_mqtt_validate_utf8_text(struct aws_byte_cursor text); /** * Initializes internal datastructures used by aws-c-mqtt. * Must be called before using any functionality in aws-c-mqtt. */ AWS_MQTT_API void aws_mqtt_library_init(struct aws_allocator *allocator); /** * Shuts down the internal datastructures used by aws-c-mqtt. */ AWS_MQTT_API void aws_mqtt_library_clean_up(void); AWS_MQTT_API void aws_mqtt_fatal_assert_library_initialized(void); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_MQTT_MQTT_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/include/aws/mqtt/private/000077500000000000000000000000001456575232400250715ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/include/aws/mqtt/private/client_impl.h000066400000000000000000000405201456575232400275420ustar00rootroot00000000000000#ifndef AWS_MQTT_PRIVATE_CLIENT_IMPL_H #define AWS_MQTT_PRIVATE_CLIENT_IMPL_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include #include #include #include struct aws_mqtt_client_connection_311_impl; #define MQTT_CLIENT_CALL_CALLBACK(client_ptr, callback) \ do { \ if ((client_ptr)->callback) { \ (client_ptr)->callback((&client_ptr->base), (client_ptr)->callback##_ud); \ } \ } while (false) #define MQTT_CLIENT_CALL_CALLBACK_ARGS(client_ptr, callback, ...) \ do { \ if ((client_ptr)->callback) { \ (client_ptr)->callback((&client_ptr->base), __VA_ARGS__, (client_ptr)->callback##_ud); \ } \ } while (false) #if ASSERT_LOCK_HELD # define ASSERT_SYNCED_DATA_LOCK_HELD(object) \ { \ int cached_error = aws_last_error(); \ AWS_ASSERT(aws_mutex_try_lock(&(object)->synced_data.lock) == AWS_OP_ERR); \ aws_raise_error(cached_error); \ } #else # define ASSERT_SYNCED_DATA_LOCK_HELD(object) #endif enum aws_mqtt_client_connection_state { AWS_MQTT_CLIENT_STATE_CONNECTING, AWS_MQTT_CLIENT_STATE_CONNECTED, AWS_MQTT_CLIENT_STATE_RECONNECTING, AWS_MQTT_CLIENT_STATE_DISCONNECTING, AWS_MQTT_CLIENT_STATE_DISCONNECTED, }; enum aws_mqtt_client_request_state { AWS_MQTT_CLIENT_REQUEST_ONGOING, AWS_MQTT_CLIENT_REQUEST_COMPLETE, AWS_MQTT_CLIENT_REQUEST_ERROR, }; /** * Contains some simple statistics about the current state of the connection's queue of operations */ struct aws_mqtt_connection_operation_statistics_impl { /** * total number of operations submitted to the connection that have not yet been completed. Unacked operations * are a subset of this. */ struct aws_atomic_var incomplete_operation_count_atomic; /** * total packet size of operations submitted to the connection that have not yet been completed. Unacked operations * are a subset of this. */ struct aws_atomic_var incomplete_operation_size_atomic; /** * total number of operations that have been sent to the server and are waiting for a corresponding ACK before * they can be completed. */ struct aws_atomic_var unacked_operation_count_atomic; /** * total packet size of operations that have been sent to the server and are waiting for a corresponding ACK before * they can be completed. */ struct aws_atomic_var unacked_operation_size_atomic; }; /** * Called after the timeout if a matching ack packet hasn't arrived, with is_first_attempt set as false. * Or called when the request packet attempt to send firstly, with is_first_attempt set as true. * Return AWS_MQTT_CLIENT_REQUEST_ONGOING to check on the task later. * Return AWS_MQTT_CLIENT_REQUEST_COMPLETE to consider request complete. * Return AWS_MQTT_CLIENT_REQUEST_ERROR cancel the task and report an error to the caller. */ typedef enum aws_mqtt_client_request_state( aws_mqtt_send_request_fn)(uint16_t packet_id, bool is_first_attempt, void *userdata); /** * Called when the operation statistics change. */ typedef void( aws_mqtt_on_operation_statistics_fn)(struct aws_mqtt_client_connection_311_impl *connection, void *userdata); /* Flags that indicate the way in which way an operation is currently affecting the statistics of the connection */ enum aws_mqtt_operation_statistic_state_flags { /* The operation is not affecting the connection's statistics at all */ AWS_MQTT_OSS_NONE = 0, /* The operation is affecting the connection's "incomplete operation" statistics */ AWS_MQTT_OSS_INCOMPLETE = 1 << 0, /* The operation is affecting the connection's "unacked operation" statistics */ AWS_MQTT_OSS_UNACKED = 1 << 1, }; struct aws_mqtt_request { struct aws_linked_list_node list_node; struct aws_allocator *allocator; struct aws_mqtt_client_connection_311_impl *connection; struct aws_channel_task outgoing_task; /* * The request send time. Currently used to push off keepalive packet. */ uint64_t request_send_timestamp; /* How this operation is currently affecting the statistics of the connection */ enum aws_mqtt_operation_statistic_state_flags statistic_state_flags; /* The encoded size of the packet - used for operation statistics tracking */ uint64_t packet_size; uint16_t packet_id; bool retryable; bool initiated; aws_mqtt_send_request_fn *send_request; void *send_request_ud; aws_mqtt_op_complete_fn *on_complete; void *on_complete_ud; }; struct aws_mqtt_reconnect_task { struct aws_task task; struct aws_atomic_var connection_ptr; struct aws_allocator *allocator; }; /* The lifetime of this struct is from subscribe -> suback */ struct subscribe_task_arg { struct aws_mqtt_client_connection_311_impl *connection; /* list of pointer of subscribe_task_topics */ struct aws_array_list topics; /* Packet to populate */ struct aws_mqtt_packet_subscribe subscribe; /* true if transaction was committed to the topic tree, false requires a retry */ bool tree_updated; struct { aws_mqtt_suback_multi_fn *multi; aws_mqtt_suback_fn *single; } on_suback; void *on_suback_ud; }; /* The lifetime of this struct is the same as the lifetime of the subscription */ struct subscribe_task_topic { struct aws_mqtt_client_connection_311_impl *connection; struct aws_mqtt_topic_subscription request; struct aws_string *filter; struct aws_ref_count ref_count; }; struct aws_mqtt_client_connection_311_impl { struct aws_allocator *allocator; struct aws_mqtt_client_connection base; struct aws_ref_count ref_count; struct aws_mqtt_client *client; /* Channel handler information */ struct aws_channel_handler handler; struct aws_channel_slot *slot; /* The host information, changed by user when state is AWS_MQTT_CLIENT_STATE_DISCONNECTED */ struct aws_string *host_name; uint32_t port; struct aws_tls_connection_options tls_options; struct aws_socket_options socket_options; struct aws_http_proxy_config *http_proxy_config; struct aws_event_loop *loop; struct aws_host_resolution_config host_resolution_config; /* Connect parameters */ struct aws_byte_buf client_id; bool clean_session; uint16_t keep_alive_time_secs; uint64_t keep_alive_time_ns; uint64_t ping_timeout_ns; uint64_t operation_timeout_ns; struct aws_string *username; struct aws_string *password; struct { struct aws_byte_buf topic; enum aws_mqtt_qos qos; bool retain; struct aws_byte_buf payload; } will; struct { uint64_t current_sec; /* seconds */ uint64_t min_sec; /* seconds */ uint64_t max_sec; /* seconds */ /* * Invariant: this is always zero except when the current MQTT channel has received a successful connack * and is not yet shutdown. During that interval, it is the timestamp the connack was received. */ uint64_t channel_successful_connack_timestamp_ns; } reconnect_timeouts; /* User connection callbacks */ aws_mqtt_client_on_connection_complete_fn *on_connection_complete; void *on_connection_complete_ud; aws_mqtt_client_on_connection_success_fn *on_connection_success; void *on_connection_success_ud; aws_mqtt_client_on_connection_failure_fn *on_connection_failure; void *on_connection_failure_ud; aws_mqtt_client_on_connection_interrupted_fn *on_interrupted; void *on_interrupted_ud; aws_mqtt_client_on_connection_resumed_fn *on_resumed; void *on_resumed_ud; aws_mqtt_client_on_connection_closed_fn *on_closed; void *on_closed_ud; aws_mqtt_client_publish_received_fn *on_any_publish; void *on_any_publish_ud; aws_mqtt_client_on_disconnect_fn *on_disconnect; void *on_disconnect_ud; aws_mqtt_client_on_connection_termination_fn *on_termination; void *on_termination_ud; aws_mqtt_on_operation_statistics_fn *on_any_operation_statistics; void *on_any_operation_statistics_ud; /* Connection tasks. */ struct aws_mqtt_reconnect_task *reconnect_task; struct aws_channel_task ping_task; /** * Number of times this connection has successfully CONNACK-ed, used * to ensure on_connection_completed is sent on the first completed * CONNECT/CONNACK cycle */ size_t connection_count; bool use_tls; /* Only used by main thread */ /* Only the event-loop thread may touch this data */ struct { struct aws_mqtt311_decoder decoder; bool waiting_on_ping_response; /* Keeps track of all open subscriptions */ /* TODO: The subscriptions are liveing with the connection object. So if the connection disconnect from one * endpoint and connect with another endpoint, the subscription tree will still be the same as before. */ struct aws_mqtt_topic_tree subscriptions; /** * List of all requests waiting for response. */ struct aws_linked_list ongoing_requests_list; } thread_data; /* Any thread may touch this data, but the lock must be held (unless it's an atomic) */ struct { /* Note: never fire user callback with lock hold. */ struct aws_mutex lock; /* The state of the connection */ enum aws_mqtt_client_connection_state state; /** * Memory pool for all aws_mqtt_request. */ struct aws_memory_pool requests_pool; /** * Store all requests that is not completed including the pending requests. * * hash table from uint16_t (packet_id) to aws_mqtt_outstanding_request */ struct aws_hash_table outstanding_requests_table; /** * List of all requests that cannot be scheduled until the connection comes online. */ struct aws_linked_list pending_requests_list; /** * Remember the last packet ID assigned. * Helps us find the next free ID faster. */ uint16_t packet_id; } synced_data; struct { aws_mqtt_transform_websocket_handshake_fn *handshake_transformer; void *handshake_transformer_ud; aws_mqtt_validate_websocket_handshake_fn *handshake_validator; void *handshake_validator_ud; bool enabled; struct aws_http_message *handshake_request; } websocket; /** * The time that the next ping task should execute at. Note that this does not mean that * this IS when the ping task will execute, but rather that this is when the next ping * SHOULD execute. There may be an already scheduled PING task that will elapse sooner * than this time that has to be rescheduled. */ uint64_t next_ping_time; /** * Statistics tracking operational state */ struct aws_mqtt_connection_operation_statistics_impl operation_statistics_impl; }; struct aws_channel_handler_vtable *aws_mqtt_get_client_channel_vtable(void); /* Helper for getting a message object for a packet */ struct aws_io_message *mqtt_get_message_for_packet( struct aws_mqtt_client_connection_311_impl *connection, struct aws_mqtt_fixed_header *header); void mqtt_connection_lock_synced_data(struct aws_mqtt_client_connection_311_impl *connection); void mqtt_connection_unlock_synced_data(struct aws_mqtt_client_connection_311_impl *connection); /* Note: needs to be called with lock held. */ void mqtt_connection_set_state( struct aws_mqtt_client_connection_311_impl *connection, enum aws_mqtt_client_connection_state state); /** * This function registers a new outstanding request and returns the message identifier to use (or 0 on error). * send_request will be called from request_timeout_task if everything succeed. Not called with error. * on_complete will be called once the request completed, either either in success or error. * noRetry is true for the packets will never be retried or offline queued. */ AWS_MQTT_API uint16_t mqtt_create_request( struct aws_mqtt_client_connection_311_impl *connection, aws_mqtt_send_request_fn *send_request, void *send_request_ud, aws_mqtt_op_complete_fn *on_complete, void *on_complete_ud, bool noRetry, uint64_t packet_size); /* Call when an ack packet comes back from the server. */ AWS_MQTT_API void mqtt_request_complete( struct aws_mqtt_client_connection_311_impl *connection, int error_code, uint16_t packet_id); /* Call to close the connection with an error code */ AWS_MQTT_API void mqtt_disconnect_impl(struct aws_mqtt_client_connection_311_impl *connection, int error_code); /* Creates the task used to reestablish a broken connection */ AWS_MQTT_API void aws_create_reconnect_task(struct aws_mqtt_client_connection_311_impl *connection); /** * Sets the callback to call whenever the operation statistics change. * * \param[in] connection The connection object * \param[in] on_operation_statistics The function to call when the operation statistics change (pass NULL to unset) * \param[in] on_operation_statistics_ud Userdata for on_operation_statistics */ AWS_MQTT_API int aws_mqtt_client_connection_set_on_operation_statistics_handler( struct aws_mqtt_client_connection_311_impl *connection, aws_mqtt_on_operation_statistics_fn *on_operation_statistics, void *on_operation_statistics_ud); /* * Sends a PINGREQ packet to the server to keep the connection alive. This is not exported and should not ever * be called directly. This function is driven by the timeout values passed to aws_mqtt_client_connect(). * If a PINGRESP is not received within a reasonable period of time, the connection will be closed. * * \params[in] connection The connection to ping on * * \returns AWS_OP_SUCCESS if the connection is open and the PINGREQ is sent or queued to send, * otherwise AWS_OP_ERR and aws_last_error() is set. */ int aws_mqtt_client_connection_ping(struct aws_mqtt_client_connection_311_impl *connection); /** * Changes the operation statistics for the passed-in aws_mqtt_request. Used for tracking * whether operations have been completed or not. * * NOTE: This function will get lock the synced data! Do NOT call with the synced data already * held or the function will deadlock trying to get the lock * * @param connection The connection whose operations are being tracked * @param request The request to change the state of * @param new_state_flags The new state to use */ void aws_mqtt_connection_statistics_change_operation_statistic_state( struct aws_mqtt_client_connection_311_impl *connection, struct aws_mqtt_request *request, enum aws_mqtt_operation_statistic_state_flags new_state_flags); AWS_MQTT_API const struct aws_mqtt_client_connection_packet_handlers *aws_mqtt311_get_default_packet_handlers(void); #endif /* AWS_MQTT_PRIVATE_CLIENT_IMPL_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/include/aws/mqtt/private/client_impl_shared.h000066400000000000000000000102051456575232400310650ustar00rootroot00000000000000#ifndef AWS_MQTT_PRIVATE_CLIENT_IMPL_SHARED_H #define AWS_MQTT_PRIVATE_CLIENT_IMPL_SHARED_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include struct aws_mqtt_client_connection; struct aws_mqtt_client_connection_vtable { struct aws_mqtt_client_connection *(*acquire_fn)(void *impl); void (*release_fn)(void *impl); int (*set_will_fn)( void *impl, const struct aws_byte_cursor *topic, enum aws_mqtt_qos qos, bool retain, const struct aws_byte_cursor *payload); int (*set_login_fn)(void *impl, const struct aws_byte_cursor *username, const struct aws_byte_cursor *password); int (*use_websockets_fn)( void *impl, aws_mqtt_transform_websocket_handshake_fn *transformer, void *transformer_ud, aws_mqtt_validate_websocket_handshake_fn *validator, void *validator_ud); int (*set_http_proxy_options_fn)(void *impl, struct aws_http_proxy_options *proxy_options); int (*set_host_resolution_options_fn)(void *impl, const struct aws_host_resolution_config *host_resolution_config); int (*set_reconnect_timeout_fn)(void *impl, uint64_t min_timeout, uint64_t max_timeout); int (*set_connection_interruption_handlers_fn)( void *impl, aws_mqtt_client_on_connection_interrupted_fn *on_interrupted, void *on_interrupted_ud, aws_mqtt_client_on_connection_resumed_fn *on_resumed, void *on_resumed_ud); int (*set_connection_result_handlers)( void *impl, aws_mqtt_client_on_connection_success_fn *on_connection_success, void *on_connection_success_ud, aws_mqtt_client_on_connection_failure_fn *on_connection_failure, void *on_connection_failure_ud); int (*set_connection_closed_handler_fn)( void *impl, aws_mqtt_client_on_connection_closed_fn *on_closed, void *on_closed_ud); int (*set_on_any_publish_handler_fn)( void *impl, aws_mqtt_client_publish_received_fn *on_any_publish, void *on_any_publish_ud); int (*set_connection_termination_handler_fn)( void *impl, aws_mqtt_client_on_connection_termination_fn *on_termination, void *on_termination_ud); int (*connect_fn)(void *impl, const struct aws_mqtt_connection_options *connection_options); int (*reconnect_fn)(void *impl, aws_mqtt_client_on_connection_complete_fn *on_connection_complete, void *userdata); int (*disconnect_fn)(void *impl, aws_mqtt_client_on_disconnect_fn *on_disconnect, void *userdata); uint16_t (*subscribe_multiple_fn)( void *impl, const struct aws_array_list *topic_filters, aws_mqtt_suback_multi_fn *on_suback, void *on_suback_ud); uint16_t (*subscribe_fn)( void *impl, const struct aws_byte_cursor *topic_filter, enum aws_mqtt_qos qos, aws_mqtt_client_publish_received_fn *on_publish, void *on_publish_ud, aws_mqtt_userdata_cleanup_fn *on_ud_cleanup, aws_mqtt_suback_fn *on_suback, void *on_suback_ud); uint16_t (*resubscribe_existing_topics_fn)(void *impl, aws_mqtt_suback_multi_fn *on_suback, void *on_suback_ud); uint16_t (*unsubscribe_fn)( void *impl, const struct aws_byte_cursor *topic_filter, aws_mqtt_op_complete_fn *on_unsuback, void *on_unsuback_ud); uint16_t (*publish_fn)( void *impl, const struct aws_byte_cursor *topic, enum aws_mqtt_qos qos, bool retain, const struct aws_byte_cursor *payload, aws_mqtt_op_complete_fn *on_complete, void *userdata); int (*get_stats_fn)(void *impl, struct aws_mqtt_connection_operation_statistics *stats); }; struct aws_mqtt_client_connection { struct aws_mqtt_client_connection_vtable *vtable; void *impl; }; AWS_MQTT_API uint64_t aws_mqtt_hash_uint16_t(const void *item); AWS_MQTT_API bool aws_mqtt_compare_uint16_t_eq(const void *a, const void *b); AWS_MQTT_API bool aws_mqtt_byte_cursor_hash_equality(const void *a, const void *b); #endif /* AWS_MQTT_PRIVATE_CLIENT_IMPL_SHARED_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/include/aws/mqtt/private/fixed_header.h000066400000000000000000000034541456575232400276570ustar00rootroot00000000000000#ifndef AWS_MQTT_PRIVATE_FIXED_HEADER_H #define AWS_MQTT_PRIVATE_FIXED_HEADER_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include /* Represents the types of the MQTT control packets [MQTT-2.2.1]. */ enum aws_mqtt_packet_type { /* reserved = 0, */ AWS_MQTT_PACKET_CONNECT = 1, AWS_MQTT_PACKET_CONNACK, AWS_MQTT_PACKET_PUBLISH, AWS_MQTT_PACKET_PUBACK, AWS_MQTT_PACKET_PUBREC, AWS_MQTT_PACKET_PUBREL, AWS_MQTT_PACKET_PUBCOMP, AWS_MQTT_PACKET_SUBSCRIBE, AWS_MQTT_PACKET_SUBACK, AWS_MQTT_PACKET_UNSUBSCRIBE, AWS_MQTT_PACKET_UNSUBACK, AWS_MQTT_PACKET_PINGREQ, AWS_MQTT_PACKET_PINGRESP, AWS_MQTT_PACKET_DISCONNECT, /* reserved = 15, */ }; /** * Represents the fixed header [MQTT-2.2]. */ struct aws_mqtt_fixed_header { enum aws_mqtt_packet_type packet_type; size_t remaining_length; uint8_t flags; }; /** * Get the type of packet from the first byte of the buffer [MQTT-2.2.1]. */ AWS_MQTT_API enum aws_mqtt_packet_type aws_mqtt_get_packet_type(const uint8_t *buffer); /** * Get traits describing a packet described by header [MQTT-2.2.2]. */ AWS_MQTT_API bool aws_mqtt_packet_has_flags(const struct aws_mqtt_fixed_header *header); /** * Write a fixed header to a byte stream. */ AWS_MQTT_API int aws_mqtt_fixed_header_encode(struct aws_byte_buf *buf, const struct aws_mqtt_fixed_header *header); /** * Read a fixed header from a byte stream. */ AWS_MQTT_API int aws_mqtt_fixed_header_decode(struct aws_byte_cursor *cur, struct aws_mqtt_fixed_header *header); AWS_MQTT_API int aws_mqtt311_decode_remaining_length(struct aws_byte_cursor *cur, size_t *remaining_length_out); #endif /* AWS_MQTT_PRIVATE_FIXED_HEADER_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/include/aws/mqtt/private/mqtt311_decoder.h000066400000000000000000000111551456575232400301440ustar00rootroot00000000000000#ifndef AWS_MQTT_PRIVATE_MQTT311_DECODER_H #define AWS_MQTT_PRIVATE_MQTT311_DECODER_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include /* * Per-packet-type callback signature. message_cursor contains the entire packet's data. */ typedef int(packet_handler_fn)(struct aws_byte_cursor message_cursor, void *user_data); /* * Wrapper for a set of packet handlers for each possible MQTT packet type. Some values are invalid in 311 (15), and * some values are only valid from the perspective of the server or client. */ struct aws_mqtt_client_connection_packet_handlers { packet_handler_fn *handlers_by_packet_type[16]; }; /* * Internal state of the 311 decoder/framing logic. * * When a packet is fragmented across multiple io buffers, state moves circularly: * first byte -> remaining length -> body -> first byte etc... * * When a packet is completely contained inside a single io buffer, the entire packet is processed within * the READ_FIRST_BYTE state. */ enum aws_mqtt_311_decoder_state_type { /* * The decoder is expecting the first byte of the fixed header of an MQTT control packet */ AWS_MDST_READ_FIRST_BYTE, /* * The decoder is expecting the vli-encoded total remaining length field of the fixed header on an MQTT control * packet. */ AWS_MDST_READ_REMAINING_LENGTH, /* * The decoder is expecting the "rest" of the MQTT packet's data based on the remaining length value that has * already been read. */ AWS_MDST_READ_BODY, /* * Terminal state for when a protocol error has been encountered by the decoder. The only way to leave this * state is to reset the decoder via the aws_mqtt311_decoder_reset_for_new_connection() API. */ AWS_MDST_PROTOCOL_ERROR, }; /* * Configuration options for the decoder. When used by the actual implementation, handler_user_data is the * connection object and the packet handlers are channel functions that hook into reactionary behavior and user * callbacks. */ struct aws_mqtt311_decoder_options { const struct aws_mqtt_client_connection_packet_handlers *packet_handlers; void *handler_user_data; }; /* * Simple MQTT311 decoder structure. Actual decoding is deferred to per-packet functions that expect the whole * packet in a buffer. The primary function of this sub-system is correctly framing a stream of bytes into the * constituent packets. */ struct aws_mqtt311_decoder { struct aws_mqtt311_decoder_options config; enum aws_mqtt_311_decoder_state_type state; /* * If zero, not valid. If non-zero, represents the number of bytes that need to be read to finish the packet. * This includes the total encoding size of the fixed header. */ size_t total_packet_length; /* scratch buffer to hold individual packets when they fragment across incoming data frame boundaries */ struct aws_byte_buf packet_buffer; }; AWS_EXTERN_C_BEGIN /** * Initialize function for the MQTT311 decoder * * @param decoder decoder to initialize * @param allocator memory allocator to use * @param options additional decoder configuration options */ AWS_MQTT_API void aws_mqtt311_decoder_init( struct aws_mqtt311_decoder *decoder, struct aws_allocator *allocator, const struct aws_mqtt311_decoder_options *options); /** * Clean up function for an MQTT311 decoder * * @param decoder decoder to release resources for */ AWS_MQTT_API void aws_mqtt311_decoder_clean_up(struct aws_mqtt311_decoder *decoder); /** * Callback function to decode the incoming data stream of an MQTT311 connection. Handles packet framing and * correct decoder/handler function dispatch. * * @param decoder decoder to decode with * @param data raw plaintext bytes of a connection operating on the MQTT311 protocol * @return success/failure, failure represents a protocol error and implies the connection must be shut down */ AWS_MQTT_API int aws_mqtt311_decoder_on_bytes_received( struct aws_mqtt311_decoder *decoder, struct aws_byte_cursor data); /** * Resets a decoder's state to its initial values. If using a decoder across multiple network connections (within * the same client), you must invoke this when setting up a new connection, before any MQTT protocol bytes are * processed. Breaks the decoder out of any previous protocol error terminal state. * * @param decoder decoder to reset */ AWS_MQTT_API void aws_mqtt311_decoder_reset_for_new_connection(struct aws_mqtt311_decoder *decoder); AWS_EXTERN_C_END #endif /* AWS_MQTT_PRIVATE_MQTT311_DECODER_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/include/aws/mqtt/private/mqtt_client_test_helper.h000066400000000000000000000021021456575232400321560ustar00rootroot00000000000000#ifndef AWS_MQTT_CLIENT_TEST_HELPER_H #define AWS_MQTT_CLIENT_TEST_HELPER_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include struct aws_allocator; struct aws_byte_cursor; struct aws_mqtt_client_connection_311_impl; struct aws_string; AWS_EXTERN_C_BEGIN /** This is for testing applications sending MQTT payloads. Don't ever include this file outside of a unit test. */ /** result buffer will be initialized and payload will be written into it */ AWS_MQTT_API int aws_mqtt_client_get_payload_for_outstanding_publish_packet( struct aws_mqtt_client_connection *connection, uint16_t packet_id, struct aws_allocator *allocator, struct aws_byte_buf *result); AWS_MQTT_API int aws_mqtt_client_get_topic_for_outstanding_publish_packet( struct aws_mqtt_client_connection *connection, uint16_t packet_id, struct aws_allocator *allocator, struct aws_string **result); AWS_EXTERN_C_END #endif // AWS_C_IOT_MQTT_CLIENT_TEST_HELPER_H aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/include/aws/mqtt/private/mqtt_subscription_set.h000066400000000000000000000177541456575232400317240ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #ifndef AWS_MQTT_MQTT3_TO_MQTT5_ADAPTER_SUBSCRIPTION_SET_H #define AWS_MQTT_MQTT3_TO_MQTT5_ADAPTER_SUBSCRIPTION_SET_H #include "aws/mqtt/mqtt.h" #include "aws/mqtt/client.h" #include "aws/mqtt/v5/mqtt5_types.h" #include /** * (Transient) configuration options about a single persistent MQTT topic filter subscription */ struct aws_mqtt_subscription_set_subscription_options { struct aws_byte_cursor topic_filter; enum aws_mqtt5_qos qos; bool no_local; bool retain_as_published; enum aws_mqtt5_retain_handling_type retain_handling_type; /* Callback invoked when this subscription matches an incoming publish */ aws_mqtt_client_publish_received_fn *on_publish_received; /* Callback invoked when this subscription is removed from the set */ aws_mqtt_userdata_cleanup_fn *on_cleanup; void *callback_user_data; }; /** * Persistent structure to track a single MQTT topic filter subscription */ struct aws_mqtt_subscription_set_subscription_record { struct aws_allocator *allocator; struct aws_byte_buf topic_filter; struct aws_mqtt_subscription_set_subscription_options subscription_view; }; /** * (Transient) configuration options about an incoming publish message */ struct aws_mqtt_subscription_set_publish_received_options { struct aws_mqtt_client_connection *connection; struct aws_byte_cursor topic; enum aws_mqtt_qos qos; bool retain; bool dup; struct aws_byte_cursor payload; }; /** * A node in the topic trie maintained by the subscription set. Each node represents a single "path segment" in a * topic filter "path." Segments can be empty. * * Some examples (topic filter -> path segments): * * "hello/world" -> [ "hello", "world" ] * "a/b/" -> [ "a", "b", "" ] * "/b/" -> [ "", "b", "" ] * "a/#/c" -> [ "a", "#", "c" ] * * On incoming publish, we walk the tree invoking callbacks based on topic vs. topic filter matching, segment by * segment. * */ struct aws_mqtt_subscription_set_topic_tree_node { struct aws_allocator *allocator; struct aws_byte_cursor topic_segment_cursor; /* segment can be empty */ struct aws_byte_buf topic_segment; struct aws_mqtt_subscription_set_topic_tree_node *parent; struct aws_hash_table children; /* (embedded topic_segment -> containing node) */ /* * A node starts with a ref count of one and is incremented every time a new, overlapping path is added * to the subscription set. When the ref count goes to zero, that means there are not subscriptions using the * segment (or path suffix) represented by this node and therefor it can be deleted without any additional * analysis. * * Replacing an existing path does not change the ref count. */ size_t ref_count; bool is_subscription; aws_mqtt_client_publish_received_fn *on_publish_received; aws_mqtt_userdata_cleanup_fn *on_cleanup; void *callback_user_data; }; /** * Utility type to track a client's subscriptions. * * The topic tree supports per-subscription callbacks as used by the MQTT311 implementation. * * The subscriptions table supports resubscribe APIs for both MQTT311 and MQTT5 by tracking the subscription * details on a per-topic-filter basis. */ struct aws_mqtt_subscription_set { struct aws_allocator *allocator; /* a permanent ref */ struct aws_mqtt_subscription_set_topic_tree_node *root; /* embedded topic_filter_cursor -> persistent subscription */ struct aws_hash_table subscriptions; }; AWS_EXTERN_C_BEGIN /** * Creates a new subscription set * * @param allocator allocator to use * @return a new subscription set or NULL */ AWS_MQTT_API struct aws_mqtt_subscription_set *aws_mqtt_subscription_set_new(struct aws_allocator *allocator); /** * Destroys a subscription set * * @param subscription_set subscription set to destroy */ AWS_MQTT_API void aws_mqtt_subscription_set_destroy(struct aws_mqtt_subscription_set *subscription_set); /** * Checks if a topic filter exists in the subscription set's hash table of subscriptions * * @param subscription_set subscription set to check * @param topic_filter topic filter to check for existence in the set * @return true if the topic filter exists in the table of subscriptions, false otherwise */ AWS_MQTT_API bool aws_mqtt_subscription_set_is_subscribed( const struct aws_mqtt_subscription_set *subscription_set, struct aws_byte_cursor topic_filter); /** * Checks if a topic filter exists as a subscription (has a publish received handler) in the set's topic tree * * @param subscription_set subscription set to check * @param topic_filter topic filter to check for existence in the set's topic tree * @return true if the set's topic tree contains a publish received callback for the topic filter, false otherwise */ AWS_MQTT_API bool aws_mqtt_subscription_set_is_in_topic_tree( const struct aws_mqtt_subscription_set *subscription_set, struct aws_byte_cursor topic_filter); /** * Adds a subscription to the subscription set. If a subscription already exists with a matching topic filter, it * will be overwritten. * * @param subscription_set subscription set to add a subscription to * @param subscription_options options for the new subscription */ AWS_MQTT_API void aws_mqtt_subscription_set_add_subscription( struct aws_mqtt_subscription_set *subscription_set, const struct aws_mqtt_subscription_set_subscription_options *subscription_options); /** * Removes a subscription from the subscription set * * @param subscription_set subscription set to remove a subscription from * @param topic_filter topic filter to remove subscription information for */ AWS_MQTT_API void aws_mqtt_subscription_set_remove_subscription( struct aws_mqtt_subscription_set *subscription_set, struct aws_byte_cursor topic_filter); /** * Given a publish message, invokes all publish received handlers for matching subscriptions in the subscription set * * @param subscription_set subscription set to invoke publish received callbacks for * @param publish_options received publish message properties */ AWS_MQTT_API void aws_mqtt_subscription_set_on_publish_received( const struct aws_mqtt_subscription_set *subscription_set, const struct aws_mqtt_subscription_set_publish_received_options *publish_options); /** * Queries the properties of all subscriptions tracked by this subscription set. Used to implement re-subscribe * behavior. * * @param subscription_set subscription set to query the subscriptions on * @param subscriptions uninitialized array list to hold the subscriptions. * * The caller must invoke the cleanup function for array lists on the result. The list elements are of type * 'struct aws_mqtt_subscription_set_subscription_options' and the topic filter cursor points to the subscription set's * internal record. This means that the result must be used and cleaned up in local scope. */ AWS_MQTT_API void aws_mqtt_subscription_set_get_subscriptions( struct aws_mqtt_subscription_set *subscription_set, struct aws_array_list *subscriptions); /** * Creates a new subscription record. A subscription record tracks all information about a single MQTT topic filter * subscription * * @param allocator memory allocator to use * @param subscription all relevant information about the subscription * @return a new persistent subscription record */ AWS_MQTT_API struct aws_mqtt_subscription_set_subscription_record *aws_mqtt_subscription_set_subscription_record_new( struct aws_allocator *allocator, const struct aws_mqtt_subscription_set_subscription_options *subscription); /** * Destroys a subscription record * * @param record subscription record to destroy */ AWS_MQTT_API void aws_mqtt_subscription_set_subscription_record_destroy( struct aws_mqtt_subscription_set_subscription_record *record); AWS_EXTERN_C_END #endif /* AWS_MQTT_MQTT3_TO_MQTT5_ADAPTER_SUBSCRIPTION_SET_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/include/aws/mqtt/private/packets.h000066400000000000000000000254731456575232400267070ustar00rootroot00000000000000#ifndef AWS_MQTT_PRIVATE_PACKETS_H #define AWS_MQTT_PRIVATE_PACKETS_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include /* * General MQTT Control Packet Format [MQTT-2]: * 1. Fixed header, present in all packets * 2. Variable header, present in some packets * 3. Payload, preset in some packets */ /* Struct used internally for representing subscriptions */ struct aws_mqtt_subscription { /* Topic filter to subscribe to [MQTT-4.7]. */ struct aws_byte_cursor topic_filter; /* Maximum QoS of messages to receive [MQTT-4.3]. */ enum aws_mqtt_qos qos; }; /** * Used to represent the following MQTT packets: * - PUBACK * - PUBREC * - PUBREL * - PUBCOMP * - UNSUBACK */ struct aws_mqtt_packet_ack { /* Fixed header */ struct aws_mqtt_fixed_header fixed_header; /* Variable header */ uint16_t packet_identifier; }; /** * Represents the MQTT SUBACK packet */ struct aws_mqtt_packet_suback { /* Fixed header */ struct aws_mqtt_fixed_header fixed_header; /* Variable header */ uint16_t packet_identifier; /* Payload */ /* List of uint8_t return code */ struct aws_array_list return_codes; }; /* Represents the MQTT CONNECT packet */ struct aws_mqtt_packet_connect { /* Fixed header */ struct aws_mqtt_fixed_header fixed_header; /* Variable header */ bool clean_session; bool has_will; bool will_retain; bool has_password; bool has_username; uint16_t keep_alive_timeout; enum aws_mqtt_qos will_qos; struct aws_byte_cursor client_identifier; /* Payload */ struct aws_byte_cursor will_topic; struct aws_byte_cursor will_message; struct aws_byte_cursor username; struct aws_byte_cursor password; }; /* Represents the MQTT CONNACK packet */ struct aws_mqtt_packet_connack { /* Fixed header */ struct aws_mqtt_fixed_header fixed_header; /* Variable header */ bool session_present; uint8_t connect_return_code; }; /* Represents the MQTT PUBLISH packet */ struct aws_mqtt_packet_publish { struct aws_mqtt_fixed_header fixed_header; /* Variable header */ uint16_t packet_identifier; struct aws_byte_cursor topic_name; /* Payload */ struct aws_byte_cursor payload; }; /* Represents the MQTT SUBSCRIBE packet */ struct aws_mqtt_packet_subscribe { /* Fixed header */ struct aws_mqtt_fixed_header fixed_header; /* Variable header */ uint16_t packet_identifier; /* Payload */ /* List of aws_mqtt_subscription */ struct aws_array_list topic_filters; }; /* Represents the MQTT UNSUBSCRIBE packet */ struct aws_mqtt_packet_unsubscribe { /* Fixed header */ struct aws_mqtt_fixed_header fixed_header; /* Variable header */ uint16_t packet_identifier; /* Payload */ /* List of aws_byte_cursors */ struct aws_array_list topic_filters; }; /** * Used to represent the following MQTT packets: * - PINGREQ * - PINGRESP * - DISCONNECT */ struct aws_mqtt_packet_connection { /* Fixed header */ struct aws_mqtt_fixed_header fixed_header; }; #ifdef __cplusplus extern "C" { #endif /*****************************************************************************/ /* Ack */ AWS_MQTT_API int aws_mqtt_packet_ack_encode(struct aws_byte_buf *buf, const struct aws_mqtt_packet_ack *packet); AWS_MQTT_API int aws_mqtt_packet_ack_decode(struct aws_byte_cursor *cur, struct aws_mqtt_packet_ack *packet); /*****************************************************************************/ /* Connect */ AWS_MQTT_API int aws_mqtt_packet_connect_init( struct aws_mqtt_packet_connect *packet, struct aws_byte_cursor client_identifier, bool clean_session, uint16_t keep_alive); AWS_MQTT_API int aws_mqtt_packet_connect_add_will( struct aws_mqtt_packet_connect *packet, struct aws_byte_cursor topic, enum aws_mqtt_qos qos, bool retain, struct aws_byte_cursor payload); AWS_MQTT_API int aws_mqtt_packet_connect_add_credentials( struct aws_mqtt_packet_connect *packet, struct aws_byte_cursor username, struct aws_byte_cursor password); AWS_MQTT_API int aws_mqtt_packet_connect_encode(struct aws_byte_buf *buf, const struct aws_mqtt_packet_connect *packet); AWS_MQTT_API int aws_mqtt_packet_connect_decode(struct aws_byte_cursor *cur, struct aws_mqtt_packet_connect *packet); /*****************************************************************************/ /* Connack */ AWS_MQTT_API int aws_mqtt_packet_connack_init( struct aws_mqtt_packet_connack *packet, bool session_present, enum aws_mqtt_connect_return_code return_code); AWS_MQTT_API int aws_mqtt_packet_connack_encode(struct aws_byte_buf *buf, const struct aws_mqtt_packet_connack *packet); AWS_MQTT_API int aws_mqtt_packet_connack_decode(struct aws_byte_cursor *cur, struct aws_mqtt_packet_connack *packet); /*****************************************************************************/ /* Publish */ AWS_MQTT_API int aws_mqtt_packet_publish_init( struct aws_mqtt_packet_publish *packet, bool retain, enum aws_mqtt_qos qos, bool dup, struct aws_byte_cursor topic_name, uint16_t packet_identifier, struct aws_byte_cursor payload); AWS_MQTT_API int aws_mqtt_packet_publish_encode(struct aws_byte_buf *buf, const struct aws_mqtt_packet_publish *packet); AWS_MQTT_API int aws_mqtt_packet_publish_encode_headers(struct aws_byte_buf *buf, const struct aws_mqtt_packet_publish *packet); AWS_MQTT_API int aws_mqtt_packet_publish_decode(struct aws_byte_cursor *cur, struct aws_mqtt_packet_publish *packet); AWS_MQTT_API void aws_mqtt_packet_publish_set_dup(struct aws_mqtt_packet_publish *packet); AWS_MQTT_API bool aws_mqtt_packet_publish_get_dup(const struct aws_mqtt_packet_publish *packet); AWS_MQTT_API enum aws_mqtt_qos aws_mqtt_packet_publish_get_qos(const struct aws_mqtt_packet_publish *packet); AWS_MQTT_API bool aws_mqtt_packet_publish_get_retain(const struct aws_mqtt_packet_publish *packet); /*****************************************************************************/ /* Puback */ AWS_MQTT_API int aws_mqtt_packet_puback_init(struct aws_mqtt_packet_ack *packet, uint16_t packet_identifier); /*****************************************************************************/ /* Pubrec */ AWS_MQTT_API int aws_mqtt_packet_pubrec_init(struct aws_mqtt_packet_ack *packet, uint16_t packet_identifier); /*****************************************************************************/ /* Pubrel */ AWS_MQTT_API int aws_mqtt_packet_pubrel_init(struct aws_mqtt_packet_ack *packet, uint16_t packet_identifier); /*****************************************************************************/ /* Pubcomp */ AWS_MQTT_API int aws_mqtt_packet_pubcomp_init(struct aws_mqtt_packet_ack *packet, uint16_t packet_identifier); /*****************************************************************************/ /* Subscribe */ AWS_MQTT_API int aws_mqtt_packet_subscribe_init( struct aws_mqtt_packet_subscribe *packet, struct aws_allocator *allocator, uint16_t packet_identifier); AWS_MQTT_API void aws_mqtt_packet_subscribe_clean_up(struct aws_mqtt_packet_subscribe *packet); AWS_MQTT_API int aws_mqtt_packet_subscribe_add_topic( struct aws_mqtt_packet_subscribe *packet, struct aws_byte_cursor topic_filter, enum aws_mqtt_qos qos); AWS_MQTT_API int aws_mqtt_packet_subscribe_encode(struct aws_byte_buf *buf, const struct aws_mqtt_packet_subscribe *packet); AWS_MQTT_API int aws_mqtt_packet_subscribe_decode(struct aws_byte_cursor *cur, struct aws_mqtt_packet_subscribe *packet); /*****************************************************************************/ /* Suback */ AWS_MQTT_API int aws_mqtt_packet_suback_init( struct aws_mqtt_packet_suback *packet, struct aws_allocator *allocator, uint16_t packet_identifier); AWS_MQTT_API void aws_mqtt_packet_suback_clean_up(struct aws_mqtt_packet_suback *packet); AWS_MQTT_API int aws_mqtt_packet_suback_add_return_code(struct aws_mqtt_packet_suback *packet, uint8_t return_code); AWS_MQTT_API int aws_mqtt_packet_suback_encode(struct aws_byte_buf *buf, const struct aws_mqtt_packet_suback *packet); AWS_MQTT_API int aws_mqtt_packet_suback_decode(struct aws_byte_cursor *cur, struct aws_mqtt_packet_suback *packet); /*****************************************************************************/ /* Unsubscribe */ AWS_MQTT_API int aws_mqtt_packet_unsubscribe_init( struct aws_mqtt_packet_unsubscribe *packet, struct aws_allocator *allocator, uint16_t packet_identifier); AWS_MQTT_API void aws_mqtt_packet_unsubscribe_clean_up(struct aws_mqtt_packet_unsubscribe *packet); AWS_MQTT_API int aws_mqtt_packet_unsubscribe_add_topic( struct aws_mqtt_packet_unsubscribe *packet, struct aws_byte_cursor topic_filter); AWS_MQTT_API int aws_mqtt_packet_unsubscribe_encode(struct aws_byte_buf *buf, const struct aws_mqtt_packet_unsubscribe *packet); AWS_MQTT_API int aws_mqtt_packet_unsubscribe_decode(struct aws_byte_cursor *cur, struct aws_mqtt_packet_unsubscribe *packet); /*****************************************************************************/ /* Unsuback */ AWS_MQTT_API int aws_mqtt_packet_unsuback_init(struct aws_mqtt_packet_ack *packet, uint16_t packet_identifier); /*****************************************************************************/ /* Ping request/response, disconnect */ AWS_MQTT_API int aws_mqtt_packet_pingreq_init(struct aws_mqtt_packet_connection *packet); AWS_MQTT_API int aws_mqtt_packet_pingresp_init(struct aws_mqtt_packet_connection *packet); AWS_MQTT_API int aws_mqtt_packet_disconnect_init(struct aws_mqtt_packet_connection *packet); AWS_MQTT_API int aws_mqtt_packet_connection_encode(struct aws_byte_buf *buf, const struct aws_mqtt_packet_connection *packet); AWS_MQTT_API int aws_mqtt_packet_connection_decode(struct aws_byte_cursor *cur, struct aws_mqtt_packet_connection *packet); #ifdef __cplusplus } #endif #endif /* AWS_MQTT_PRIVATE_PACKETS_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/include/aws/mqtt/private/shared_constants.h000066400000000000000000000007431456575232400306100ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #ifndef AWS_MQTT_SHARED_CONSTANTS_H #define AWS_MQTT_SHARED_CONSTANTS_H #include AWS_EXTERN_C_BEGIN AWS_MQTT_API extern const struct aws_byte_cursor *g_websocket_handshake_default_path; AWS_MQTT_API extern const struct aws_http_header *g_websocket_handshake_default_protocol_header; AWS_EXTERN_C_END #endif /* AWS_MQTT_SHARED_CONSTANTS_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/include/aws/mqtt/private/topic_tree.h000066400000000000000000000152501456575232400274020ustar00rootroot00000000000000#ifndef AWS_MQTT_PRIVATE_TOPIC_TREE_H #define AWS_MQTT_PRIVATE_TOPIC_TREE_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include /** Type of function called when a publish received matches a subscription */ typedef void(aws_mqtt_publish_received_fn)( const struct aws_byte_cursor *topic, const struct aws_byte_cursor *payload, bool dup, enum aws_mqtt_qos qos, bool retain, void *user_data); /** * Function called per subscription when iterating through subscriptions. * Return true to continue iteration, or false to stop. */ typedef bool( aws_mqtt_topic_tree_iterator_fn)(const struct aws_byte_cursor *topic, enum aws_mqtt_qos qos, void *user_data); struct aws_mqtt_topic_node { /* This node's part of the topic filter. If in another node's subtopics, this is the key. */ struct aws_byte_cursor topic; /** * aws_byte_cursor -> aws_mqtt_topic_node * '#' and '+' are special values in here */ struct aws_hash_table subtopics; /* The entire topic filter. If !owns_topic_filter, this topic_filter belongs to someone else. */ const struct aws_string *topic_filter; bool owns_topic_filter; /* The following will only be populated if the node IS a subscription */ /* Max QoS to deliver. */ enum aws_mqtt_qos qos; /* Callback to call on message received */ aws_mqtt_publish_received_fn *callback; aws_mqtt_userdata_cleanup_fn *cleanup; void *userdata; }; struct aws_mqtt_topic_tree { struct aws_mqtt_topic_node *root; struct aws_allocator *allocator; }; /** * The size of transaction instances. * When you initialize an aws_array_list for use as a transaction, pass this as the item size. */ extern AWS_MQTT_API size_t aws_mqtt_topic_tree_action_size; /** * Initialize a topic tree with an allocator to later use. * Note that calling init allocates root. */ AWS_MQTT_API int aws_mqtt_topic_tree_init(struct aws_mqtt_topic_tree *tree, struct aws_allocator *allocator); /** * Cleanup and deallocate an entire topic tree. */ AWS_MQTT_API void aws_mqtt_topic_tree_clean_up(struct aws_mqtt_topic_tree *tree); /** * Iterates through all registered subscriptions, and calls iterator. * * Iterator may return false to stop iterating, or true to continue. */ AWS_MQTT_API void aws_mqtt_topic_tree_iterate( const struct aws_mqtt_topic_tree *tree, aws_mqtt_topic_tree_iterator_fn *iterator, void *user_data); /** * Gets the total number of subscriptions in the tree. */ AWS_MQTT_API size_t aws_mqtt_topic_tree_get_sub_count(const struct aws_mqtt_topic_tree *tree); /** * Insert a new topic filter into the subscription tree (subscribe). * * \param[in] tree The tree to insert into. * \param[in] transaction The transaction to add the insert action to. * Must be initialized with aws_mqtt_topic_tree_action_size as item size. * \param[in] topic_filter The topic filter to subscribe on. May contain wildcards. * \param[in] callback The callback to call on a publish with a matching topic. * \param[in] connection The connection object to pass to the callback. This is a void* to support client and server * connections in the future. * \param[in] userdata The userdata to pass to callback. * * \returns AWS_OP_SUCCESS on successful insertion, AWS_OP_ERR with aws_last_error() populated on failure. * If AWS_OP_ERR is returned, aws_mqtt_topic_tree_transaction_rollback should be called to prevent leaks. */ AWS_MQTT_API int aws_mqtt_topic_tree_transaction_insert( struct aws_mqtt_topic_tree *tree, struct aws_array_list *transaction, const struct aws_string *topic_filter, enum aws_mqtt_qos qos, aws_mqtt_publish_received_fn *callback, aws_mqtt_userdata_cleanup_fn *cleanup, void *userdata); /** * Remove a topic filter from the subscription tree (unsubscribe). * * \param[in] tree The tree to remove from. * \param[in] transaction The transaction to add the insert action to. * Must be initialized with aws_mqtt_topic_tree_action_size as item size. * \param[in] topic_filter The filter to remove (must be exactly the same as the topic_filter passed to insert). * \param[out] old_userdata The userdata assigned to this subscription will be assigned if not NULL. * \NOTE once the transaction is committed, old_userdata may be destroyed, * if a cleanup callback was set on insert. * * \returns AWS_OP_SUCCESS on successful removal, AWS_OP_ERR with aws_last_error() populated on failure. * If AWS_OP_ERR is returned, aws_mqtt_topic_tree_transaction_rollback should be called to prevent leaks. */ AWS_MQTT_API int aws_mqtt_topic_tree_transaction_remove( struct aws_mqtt_topic_tree *tree, struct aws_array_list *transaction, const struct aws_byte_cursor *topic_filter, void **old_userdata); AWS_MQTT_API void aws_mqtt_topic_tree_transaction_commit( struct aws_mqtt_topic_tree *tree, struct aws_array_list *transaction); AWS_MQTT_API void aws_mqtt_topic_tree_transaction_roll_back( struct aws_mqtt_topic_tree *tree, struct aws_array_list *transaction); /** * Insert a new topic filter into the subscription tree (subscribe). * * \param[in] tree The tree to insert into. * \param[in] topic_filter The topic filter to subscribe on. May contain wildcards. * \param[in] callback The callback to call on a publish with a matching topic. * \param[in] connection The connection object to pass to the callback. This is a void* to support client and server * connections in the future. * \param[in] userdata The userdata to pass to callback. * * \returns AWS_OP_SUCCESS on successful insertion, AWS_OP_ERR with aws_last_error() populated on failure. */ AWS_MQTT_API int aws_mqtt_topic_tree_insert( struct aws_mqtt_topic_tree *tree, const struct aws_string *topic_filter, enum aws_mqtt_qos qos, aws_mqtt_publish_received_fn *callback, aws_mqtt_userdata_cleanup_fn *cleanup, void *userdata); AWS_MQTT_API int aws_mqtt_topic_tree_remove(struct aws_mqtt_topic_tree *tree, const struct aws_byte_cursor *topic_filter); /** * Dispatches a publish packet to all subscriptions matching the publish topic. * * \param[in] tree The tree to publish on. * \param[in] pub The publish packet to dispatch. The topic MUST NOT contain wildcards. */ void AWS_MQTT_API aws_mqtt_topic_tree_publish(const struct aws_mqtt_topic_tree *tree, struct aws_mqtt_packet_publish *pub); #endif /* AWS_MQTT_PRIVATE_TOPIC_TREE_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/include/aws/mqtt/private/v5/000077500000000000000000000000001456575232400254235ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/include/aws/mqtt/private/v5/mqtt5_callbacks.h000066400000000000000000000051421456575232400306470ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #ifndef AWS_MQTT_MQTT5_CALLBACKS_H #define AWS_MQTT_MQTT5_CALLBACKS_H #include #include #include struct aws_mqtt5_callback_set; /* * An internal type for managing chains of callbacks attached to an mqtt5 client. Supports chains for * lifecycle event handling and incoming publish packet handling. * * Assumed to be owned and used only by an MQTT5 client. */ struct aws_mqtt5_callback_set_manager { struct aws_mqtt5_client *client; struct aws_linked_list callback_set_entries; uint64_t next_callback_set_entry_id; }; AWS_EXTERN_C_BEGIN /* * Initializes a callback set manager */ AWS_MQTT_API void aws_mqtt5_callback_set_manager_init( struct aws_mqtt5_callback_set_manager *manager, struct aws_mqtt5_client *client); /* * Cleans up a callback set manager. * * aws_mqtt5_callback_set_manager_init must have been previously called or this will crash. */ AWS_MQTT_API void aws_mqtt5_callback_set_manager_clean_up(struct aws_mqtt5_callback_set_manager *manager); /* * Adds a callback set to the front of the handler chain. Returns an integer id that can be used to selectively * remove the callback set from the manager. * * May only be called on the client's event loop thread. */ AWS_MQTT_API uint64_t aws_mqtt5_callback_set_manager_push_front( struct aws_mqtt5_callback_set_manager *manager, struct aws_mqtt5_callback_set *callback_set); /* * Removes a callback set from the handler chain. * * May only be called on the client's event loop thread. */ AWS_MQTT_API void aws_mqtt5_callback_set_manager_remove(struct aws_mqtt5_callback_set_manager *manager, uint64_t callback_set_id); /* * Walks the handler chain for an MQTT5 client's incoming publish messages. The chain's callbacks will be invoked * until either the end is reached or one of the callbacks returns true. * * May only be called on the client's event loop thread. */ AWS_MQTT_API void aws_mqtt5_callback_set_manager_on_publish_received( struct aws_mqtt5_callback_set_manager *manager, const struct aws_mqtt5_packet_publish_view *publish_view); /* * Walks the handler chain for an MQTT5 client's lifecycle events. * * May only be called on the client's event loop thread. */ AWS_MQTT_API void aws_mqtt5_callback_set_manager_on_lifecycle_event( struct aws_mqtt5_callback_set_manager *manager, const struct aws_mqtt5_client_lifecycle_event *lifecycle_event); AWS_EXTERN_C_END #endif /* AWS_MQTT_MQTT5_CALLBACKS_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/include/aws/mqtt/private/v5/mqtt5_client_impl.h000066400000000000000000000710171456575232400312330ustar00rootroot00000000000000#ifndef AWS_MQTT_MQTT5_CLIENT_IMPL_H #define AWS_MQTT_MQTT5_CLIENT_IMPL_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include #include struct aws_event_loop; struct aws_http_message; struct aws_http_proxy_options; struct aws_mqtt5_client_options_storage; struct aws_mqtt5_operation; struct aws_websocket_client_connection_options; /** * The various states that the client can be in. A client has both a current state and a desired state. * Desired state is only allowed to be one of {STOPPED, CONNECTED, TERMINATED}. The client transitions states * based on either * (1) changes in desired state, or * (2) external events. * * Most states are interruptible (in the sense of a change in desired state causing an immediate change in state) but * CONNECTING and CHANNEL_SHUTDOWN cannot be interrupted due to waiting for an asynchronous callback (that has no * cancel) to complete. */ enum aws_mqtt5_client_state { /* * The client is not connected and not waiting for anything to happen. * * Next States: * CONNECTING - if the user invokes Start() on the client * TERMINATED - if the user releases the last ref count on the client */ AWS_MCS_STOPPED, /* * The client is attempting to connect to a remote endpoint, and is waiting for channel setup to complete. This * state is not interruptible by any means other than channel setup completion. * * Next States: * MQTT_CONNECT - if the channel completes setup with no error and desired state is still CONNECTED * CHANNEL_SHUTDOWN - if the channel completes setup with no error, but desired state is not CONNECTED * PENDING_RECONNECT - if the channel fails to complete setup and desired state is still CONNECTED * STOPPED - if the channel fails to complete setup and desired state is not CONNECTED */ AWS_MCS_CONNECTING, /* * The client is sending a CONNECT packet and waiting on a CONNACK packet. * * Next States: * CONNECTED - if a successful CONNACK is received and desired state is still CONNECTED * CHANNEL_SHUTDOWN - On send/encode errors, read/decode errors, unsuccessful CONNACK, timeout to receive * CONNACK, desired state is no longer CONNECTED * PENDING_RECONNECT - unexpected channel shutdown completion and desired state still CONNECTED * STOPPED - unexpected channel shutdown completion and desired state no longer CONNECTED */ AWS_MCS_MQTT_CONNECT, /* * The client is ready to perform user-requested mqtt operations. * * Next States: * CHANNEL_SHUTDOWN - On send/encode errors, read/decode errors, DISCONNECT packet received, desired state * no longer CONNECTED, PINGRESP timeout * PENDING_RECONNECT - unexpected channel shutdown completion and desired state still CONNECTED * STOPPED - unexpected channel shutdown completion and desired state no longer CONNECTED */ AWS_MCS_CONNECTED, /* * The client is attempt to shut down a connection cleanly by finishing the current operation and then * transmitting an outbound DISCONNECT. * * Next States: * CHANNEL_SHUTDOWN - on successful (or unsuccessful) send of the DISCONNECT * PENDING_RECONNECT - unexpected channel shutdown completion and desired state still CONNECTED * STOPPED - unexpected channel shutdown completion and desired state no longer CONNECTED */ AWS_MCS_CLEAN_DISCONNECT, /* * The client is waiting for the io channel to completely shut down. This state is not interruptible. * * Next States: * PENDING_RECONNECT - the io channel has shut down and desired state is still CONNECTED * STOPPED - the io channel has shut down and desired state is not CONNECTED */ AWS_MCS_CHANNEL_SHUTDOWN, /* * The client is waiting for the reconnect timer to expire before attempting to connect again. * * Next States: * CONNECTING - the reconnect timer has expired and desired state is still CONNECTED * STOPPED - desired state is no longer CONNECTED */ AWS_MCS_PENDING_RECONNECT, /* * The client is performing final shutdown and release of all resources. This state is only realized for * a non-observable instant of time (transition out of STOPPED). */ AWS_MCS_TERMINATED, }; /** * Table of overridable external functions to allow mocking and monitoring of the client. */ struct aws_mqtt5_client_vtable { /* aws_high_res_clock_get_ticks */ uint64_t (*get_current_time_fn)(void); /* aws_channel_shutdown */ int (*channel_shutdown_fn)(struct aws_channel *channel, int error_code); /* aws_websocket_client_connect */ int (*websocket_connect_fn)(const struct aws_websocket_client_connection_options *options); /* aws_client_bootstrap_new_socket_channel */ int (*client_bootstrap_new_socket_channel_fn)(struct aws_socket_channel_bootstrap_options *options); /* aws_http_proxy_new_socket_channel */ int (*http_proxy_new_socket_channel_fn)( struct aws_socket_channel_bootstrap_options *channel_options, const struct aws_http_proxy_options *proxy_options); /* This doesn't replace anything, it's just for test verification of state changes */ void (*on_client_state_change_callback_fn)( struct aws_mqtt5_client *client, enum aws_mqtt5_client_state old_state, enum aws_mqtt5_client_state new_state, void *vtable_user_data); /* This doesn't replace anything, it's just for test verification of statistic changes */ void (*on_client_statistics_changed_callback_fn)( struct aws_mqtt5_client *client, struct aws_mqtt5_operation *operation, void *vtable_user_data); /* aws_channel_acquire_message_from_pool */ struct aws_io_message *(*aws_channel_acquire_message_from_pool_fn)( struct aws_channel *channel, enum aws_io_message_type message_type, size_t size_hint, void *user_data); /* aws_channel_slot_send_message */ int (*aws_channel_slot_send_message_fn)( struct aws_channel_slot *slot, struct aws_io_message *message, enum aws_channel_direction dir, void *user_data); void *vtable_user_data; }; /* * In order to make it easier to guarantee the lifecycle events are properly paired and emitted, we track * a separate state (from aws_mqtt5_client_state) and emit lifecycle events based on it. * * For example, if our lifecycle event is state CONNECTING, than anything going wrong becomes a CONNECTION_FAILED event * whereas if we were in CONNECTED, it must be a DISCONNECTED event. By setting the state to NONE after emitting * a CONNECTION_FAILED or DISCONNECTED event, then emission spots further down the execution pipeline will not * accidentally emit an additional event. This also allows us to emit immediately when an event happens, if * appropriate, without having to persist additional event data (like packet views) until some singular point. * * For example: * * If I'm in CONNECTING and the channel shuts down, I want to emit a CONNECTION_FAILED event with the error code. * If I'm in CONNECTING and I receive a failed CONNACK, I want to emit a CONNECTION_FAILED event immediately with * the CONNACK view in it and then invoke channel shutdown (and channel shutdown completing later should not emit an * event). * If I'm in CONNECTED and the channel shuts down, I want to emit a DISCONNECTED event with the error code. * If I'm in CONNECTED and get a DISCONNECT packet from the server, I want to emit a DISCONNECTED event with * the DISCONNECT packet in it, invoke channel shutdown, and then I *don't* want to emit a DISCONNECTED event * when the channel finishes shutting down. */ enum aws_mqtt5_lifecycle_state { AWS_MQTT5_LS_NONE, AWS_MQTT5_LS_CONNECTING, AWS_MQTT5_LS_CONNECTED, }; /* * Operation-related state notes * * operation flow: * (qos 0 publish, disconnect, connect) * user (via cross thread task) -> * queued_operations -> (on front of queue) * current_operation -> (on completely encoded and passed to next handler) * write_completion_operations -> (on socket write complete) * release * * (qos 1+ publish, sub/unsub) * user (via cross thread task) -> * queued_operations -> (on front of queue) * current_operation (allocate packet id if necessary) -> (on completely encoded and passed to next handler) * unacked_operations && unacked_operations_table -> (on ack received) * release * * QoS 1+ requires both a table and a list holding the same operations in order to support fast lookups by * mqtt packet id and in-order re-queueing in the case of a disconnection (required by spec) * * On Qos 1 PUBLISH completely received (and final callback invoked): * Add PUBACK at head of queued_operations * * On disconnect (on transition to PENDING_RECONNECT or STOPPED): * If current_operation, move current_operation to head of queued_operations * Fail all operations in the pending write completion list * Fail, remove, and release operations in queued_operations where * (1) They fail the offline queue policy OR * (2) They are a PUBACK, PINGREQ, or DISCONNECT * Fail, remove, and release unacked_operations if: * (1) They fail the offline queue policy AND * (2) operation is not Qos 1+ publish * * On reconnect (post CONNACK): * if rejoined_session: * Move-and-append all non-qos1+-publishes in unacked_operations to the front of queued_operations * Move-and-append remaining operations (qos1+ publishes) to the front of queued_operations * else: * Fail, remove, and release unacked_operations that fail the offline queue policy * Move and append unacked operations to front of queued_operations * * Clear unacked_operations_table */ struct aws_mqtt5_client_operational_state { /* back pointer to the client */ struct aws_mqtt5_client *client; /* * One more than the most recently used packet id. This is the best starting point for a forward search through * the id space for a free id. */ aws_mqtt5_packet_id_t next_mqtt_packet_id; struct aws_linked_list queued_operations; struct aws_mqtt5_operation *current_operation; struct aws_hash_table unacked_operations_table; struct aws_linked_list unacked_operations; struct aws_linked_list write_completion_operations; /* * heap of operation pointers where the timeout is the sort value. Elements are added/removed from this * data structure in exact synchronization with unacked_operations_table. */ struct aws_priority_queue operations_by_ack_timeout; /* * Is there an io message in transit (to the socket) that has not invoked its write completion callback yet? * The client implementation only allows one in-transit message at a time, and so if this is true, we don't * send additional ones/ */ bool pending_write_completion; }; /* * State related to flow-control rules for the mqtt5 client * * Includes: * (1) Mqtt5 ReceiveMaximum support * (2) AWS IoT Core limit support: * (a) Publish TPS rate limit * (b) Total outbound throughput limit */ struct aws_mqtt5_client_flow_control_state { /* * Mechanically follows the mqtt5 suggested implementation: * * Starts at the server's receive maximum. * 1. Decrement every time we send a QoS1+ publish * 2. Increment every time we receive a PUBACK * * Qos1+ publishes (and all operations behind them in the queue) are blocked while this value is zero. * * Qos 2 support will require additional work here to match the spec. */ uint32_t unacked_publish_token_count; /* * Optional throttle (extended validation) that prevents the client from exceeding Iot Core's default throughput * limit */ struct aws_rate_limiter_token_bucket throughput_throttle; /* * Optional throttle (extended validation) that prevents the client from exceeding Iot Core's default publish * rate limit. */ struct aws_rate_limiter_token_bucket publish_throttle; }; /** * Contains some simple statistics about the current state of the client's queue of operations */ struct aws_mqtt5_client_operation_statistics_impl { /* * total number of operations submitted to the client that have not yet been completed. Unacked operations * are a subset of this. */ struct aws_atomic_var incomplete_operation_count_atomic; /* * total packet size of operations submitted to the client that have not yet been completed. Unacked operations * are a subset of this. */ struct aws_atomic_var incomplete_operation_size_atomic; /* * total number of operations that have been sent to the server and are waiting for a corresponding ACK before * they can be completed. */ struct aws_atomic_var unacked_operation_count_atomic; /* * total packet size of operations that have been sent to the server and are waiting for a corresponding ACK before * they can be completed. */ struct aws_atomic_var unacked_operation_size_atomic; }; struct aws_mqtt5_client { struct aws_allocator *allocator; struct aws_ref_count ref_count; const struct aws_mqtt5_client_vtable *vtable; /* * Client configuration */ struct aws_mqtt5_client_options_storage *config; /* * The recurrent task that runs all client logic outside of external event callbacks. Bound to the client's * event loop. */ struct aws_task service_task; /* * Tracks when the client's service task is next schedule to run. Is zero if the task is not scheduled to run or * we are in the middle of a service (so technically not scheduled too). */ uint64_t next_service_task_run_time; /* * True if the client's service task is running. Used to skip service task reevaluation due to state changes * while running the service task. Reevaluation will occur at the very end of the service. */ bool in_service; /* * The final mqtt5 settings negotiated between defaults, CONNECT, and CONNACK. Only valid while in * CONNECTED or CLEAN_DISCONNECT states. */ struct aws_mqtt5_negotiated_settings negotiated_settings; /* * Event loop all the client's connections and any related tasks will be pinned to, ensuring serialization and * concurrency safety. */ struct aws_event_loop *loop; /* Channel handler information */ struct aws_channel_handler handler; struct aws_channel_slot *slot; /* * What state is the client working towards? */ enum aws_mqtt5_client_state desired_state; /* * What is the client's current state? */ enum aws_mqtt5_client_state current_state; /* * The client's lifecycle state. Used to correctly emit lifecycle events in spite of the complicated * async execution pathways that are possible. */ enum aws_mqtt5_lifecycle_state lifecycle_state; /* * The client's MQTT packet encoder */ struct aws_mqtt5_encoder encoder; /* * The client's MQTT packet decoder */ struct aws_mqtt5_decoder decoder; /* * Cache of inbound topic aliases */ struct aws_mqtt5_inbound_topic_alias_resolver inbound_topic_alias_resolver; /* * Cache of outbound topic aliases */ struct aws_mqtt5_outbound_topic_alias_resolver *outbound_topic_alias_resolver; /* * Temporary state-related data. * * clean_disconnect_error_code - the CLEAN_DISCONNECT state takes time to complete and we want to be able * to pass an error code from a prior event to the channel shutdown. This holds the "override" error code * that we'd like to shut down the channel with while CLEAN_DISCONNECT is processed. * * handshake exists on websocket-configured clients between the transform completion timepoint and the * websocket setup callback. */ int clean_disconnect_error_code; struct aws_http_message *handshake; /* * Wraps all state related to pending and in-progress MQTT operations within the client. */ struct aws_mqtt5_client_operational_state operational_state; /* Statistics tracking operational state */ struct aws_mqtt5_client_operation_statistics_impl operation_statistics_impl; /* * Wraps all state related to outbound flow control. */ struct aws_mqtt5_client_flow_control_state flow_control_state; /* * Manages notification listener chains for lifecycle events and incoming publishes */ struct aws_mqtt5_callback_set_manager callback_manager; /* * When should the next PINGREQ be sent? */ uint64_t next_ping_time; /* * When should we shut down the channel due to failure to receive a PINGRESP? Only non-zero when an outstanding * PINGREQ has not been answered. */ uint64_t next_ping_timeout_time; /* * When should the client next attempt to reconnect? Only used by PENDING_RECONNECT state. */ uint64_t next_reconnect_time_ns; /* * How many consecutive reconnect failures have we experienced? */ uint64_t reconnect_count; /* * How much should we wait before our next reconnect attempt? */ uint64_t current_reconnect_delay_ms; /* * When should the client reset current_reconnect_delay_interval_ms to the minimum value? Only relevant to the * CONNECTED state. */ uint64_t next_reconnect_delay_reset_time_ns; /* * When should we shut down the channel due to failure to receive a CONNACK? Only relevant during the MQTT_CONNECT * state. */ uint64_t next_mqtt_connect_packet_timeout_time; /* * Starts false and set to true as soon as a successful connection is established. If the session resumption * behavior is AWS_MQTT5_CSBT_REJOIN_POST_SUCCESS then this must be true before the client sends CONNECT packets * with clean start set to false. */ bool has_connected_successfully; /* * A flag that allows in-thread observers (currently the mqtt3_to_5 adapter) to signal that the connection * should be torn down and re-established. Only relevant to the CONNECTING state which is not interruptible: * * If the mqtt5 client is in the CONNECTING state (ie waiting for bootstrap to complete) and the 3-adapter * is asked to connect, then we *MUST* discard the in-progress connection attempt in order to guarantee the * connection we establish uses all of the configuration parameters that are passed during the mqtt3 API's connect * call (host, port, tls options, socket options, etc...). Since we can't interrupt the CONNECTING state, we * instead set a flag that tells the mqtt5 client to tear down the connection as soon as the initial bootstrap * completes. The reconnect will establish the requested connection using the parameters passed to * the mqtt3 API. * * Rather than try and catch every escape path from CONNECTING, we lazily reset this flag to false when we * enter the CONNECTING state. On a similar note, we only check this flag as we transition to MQTT_CONNECT. * * This flag is ultimately only needed when the 3 adapter and 5 client are used out-of-sync. If you use the * 3 adapter exclusively after 5 client creation, it never comes into play. * * Even the adapter shouldn't manipulate this directly. Instead, use the aws_mqtt5_client_reset_connection private * API to tear down an in-progress or established connection in response to a connect() request on the adapter. */ bool should_reset_connection; }; AWS_EXTERN_C_BEGIN /* * A number of private APIs which are either set up for mocking parts of the client or testing subsystems within it by * exposing what would normally be static functions internal to the implementation. */ /* * Override the vtable used by the client; useful for mocking certain scenarios. */ AWS_MQTT_API void aws_mqtt5_client_set_vtable( struct aws_mqtt5_client *client, const struct aws_mqtt5_client_vtable *vtable); /* * Gets the default vtable used by the client. In order to mock something, we start with the default and then * mutate it selectively to achieve the scenario we're interested in. */ AWS_MQTT_API const struct aws_mqtt5_client_vtable *aws_mqtt5_client_get_default_vtable(void); /* * Sets the packet id, if necessary, on an operation based on the current pending acks table. The caller is * responsible for adding the operation to the unacked table when the packet has been encoding in an io message. * * There is an argument that the operation should go into the table only on socket write completion, but that breaks * allocation unless an additional, independent table is added, which I'd prefer not to do presently. Also, socket * write completion callbacks can be a bit delayed which could lead to a situation where the response from a local * server could arrive before the write completion runs which would be a disaster. */ AWS_MQTT_API int aws_mqtt5_operation_bind_packet_id( struct aws_mqtt5_operation *operation, struct aws_mqtt5_client_operational_state *client_operational_state); /* * Initialize and clean up of the client operational state. Exposed (privately) to enabled tests to reuse the * init/cleanup used by the client itself. */ AWS_MQTT_API int aws_mqtt5_client_operational_state_init( struct aws_mqtt5_client_operational_state *client_operational_state, struct aws_allocator *allocator, struct aws_mqtt5_client *client); AWS_MQTT_API void aws_mqtt5_client_operational_state_clean_up( struct aws_mqtt5_client_operational_state *client_operational_state); /* * Resets the client's operational state based on a disconnection (from above comment): * * If current_operation * move current_operation to head of queued_operations * Fail all operations in the pending write completion list * Fail, remove, and release operations in queued_operations where they fail the offline queue policy * Iterate unacked_operations: * If qos1+ publish * set dup flag * else * unset/release packet id * Fail, remove, and release unacked_operations if: * (1) They fail the offline queue policy AND * (2) the operation is not Qos 1+ publish */ AWS_MQTT_API void aws_mqtt5_client_on_disconnection_update_operational_state(struct aws_mqtt5_client *client); /* * Updates the client's operational state based on a successfully established connection event: * * if rejoined_session: * Move-and-append all non-qos1+-publishes in unacked_operations to the front of queued_operations * Move-and-append remaining operations (qos1+ publishes) to the front of queued_operations * else: * Fail, remove, and release unacked_operations that fail the offline queue policy * Move and append unacked operations to front of queued_operations */ AWS_MQTT_API void aws_mqtt5_client_on_connection_update_operational_state(struct aws_mqtt5_client *client); /* * Processes the pending operation queue based on the current state of the associated client */ AWS_MQTT_API int aws_mqtt5_client_service_operational_state( struct aws_mqtt5_client_operational_state *client_operational_state); /* * Updates the client's operational state based on the receipt of an ACK packet from the server. In general this * means looking up the original operation in the pending ack table, completing it, removing it from both the * pending ack table and list, and then destroying it. */ AWS_MQTT_API void aws_mqtt5_client_operational_state_handle_ack( struct aws_mqtt5_client_operational_state *client_operational_state, aws_mqtt5_packet_id_t packet_id, enum aws_mqtt5_packet_type packet_type, const void *packet_view, int error_code); /* * Helper function that returns whether or not the current value of the negotiated settings can be used. Primarily * a client state check (received CONNACK, not yet disconnected) */ AWS_MQTT_API bool aws_mqtt5_client_are_negotiated_settings_valid(const struct aws_mqtt5_client *client); /* * Initializes the client's flow control state. This state governs the rates and delays between processing * operations and sending packets. */ AWS_MQTT_API void aws_mqtt5_client_flow_control_state_init(struct aws_mqtt5_client *client); /* * Resets the client's flow control state to a known baseline. Invoked right after entering the connected state. */ AWS_MQTT_API void aws_mqtt5_client_flow_control_state_reset(struct aws_mqtt5_client *client); /* * Updates the client's flow control state based on the receipt of a PUBACK for a Qos1 publish. */ AWS_MQTT_API void aws_mqtt5_client_flow_control_state_on_puback(struct aws_mqtt5_client *client); /* * Updates the client's flow control state based on successfully encoding an operation into a channel message. */ AWS_MQTT_API void aws_mqtt5_client_flow_control_state_on_outbound_operation( struct aws_mqtt5_client *client, struct aws_mqtt5_operation *operation); /* * Given the next operation in the queue, examines the flow control state to determine when is the earliest time * it should be processed. */ AWS_MQTT_API uint64_t aws_mqtt5_client_flow_control_state_get_next_operation_service_time( struct aws_mqtt5_client *client, struct aws_mqtt5_operation *operation, uint64_t now); /* * Updates the client's operation statistics based on a change in the state of an operation. */ AWS_MQTT_API void aws_mqtt5_client_statistics_change_operation_statistic_state( struct aws_mqtt5_client *client, struct aws_mqtt5_operation *operation, enum aws_mqtt5_operation_statistic_state_flags new_state_flags); /** * Converts a client state type to a readable description. * * @param state client state * @return short string describing the client state */ AWS_MQTT_API const char *aws_mqtt5_client_state_to_c_string(enum aws_mqtt5_client_state state); /** * An internal API used by the MQTT3 adapter to force any existing-or-in-progress connection to * be torn down and re-established. Necessary because the MQTT3 interface allows overrides on a large number * of configuration parameters through the connect() call. We must honor those parameters and the safest thing * to do is to just throw away the current connection (if it exists) and make a new one. In the case that an MQTT5 * client is being driven entirely by the MQTT3 adapter, this case never actually happens. * * @param client client to reset an existing or in-progress connection for * @return true if a connection reset was triggered, false if there was nothing to do */ AWS_MQTT_API bool aws_mqtt5_client_reset_connection(struct aws_mqtt5_client *client); /** * Event-loop-internal API used to switch the client's desired state. Used by both start() and stop() cross-thread * tasks as well as by the 3-to-5 adapter to make changes synchronously (when in the event loop). * * @param client mqtt5 client to update desired state for * @param desired_state new desired state * @param disconnect_op optional description of a DISCONNECT packet to send as part of a stop command */ AWS_MQTT_API void aws_mqtt5_client_change_desired_state( struct aws_mqtt5_client *client, enum aws_mqtt5_client_state desired_state, struct aws_mqtt5_operation_disconnect *disconnect_op); /** * Event-loop-internal API to add an operation to the client's queue. Used by the 3-to-5 adapter to synchnrously * inject the MQTT5 operation once the adapter operation has reached the event loop. * * @param client MQTT5 client to submit an operation to * @param operation MQTT5 operation to submit * @param is_terminated flag that indicates whether the submitter is shutting down or not. Needed to differentiate * between adapter submissions and MQTT5 client API submissions and correctly handle ref count adjustments. */ AWS_MQTT_API void aws_mqtt5_client_submit_operation_internal( struct aws_mqtt5_client *client, struct aws_mqtt5_operation *operation, bool is_terminated); AWS_EXTERN_C_END #endif /* AWS_MQTT_MQTT5_CLIENT_IMPL_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/include/aws/mqtt/private/v5/mqtt5_decoder.h000066400000000000000000000267161456575232400303470ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #ifndef AWS_MQTT_MQTT5_DECODER_H #define AWS_MQTT_MQTT5_DECODER_H #include #include #include #include #include struct aws_mqtt5_client; struct aws_mqtt5_decoder; struct aws_mqtt5_inbound_topic_alias_resolver; /** * Overall decoder state. We read the packet type and the remaining length, and then buffer the * entire packet before decoding. */ enum aws_mqtt5_decoder_state { AWS_MQTT5_DS_READ_PACKET_TYPE, AWS_MQTT5_DS_READ_REMAINING_LENGTH, AWS_MQTT5_DS_READ_PACKET, AWS_MQTT5_DS_FATAL_ERROR, }; /* * Basic return value for a number of different decoding operations. Error is always fatal and implies the * connection needs to be torn down. */ enum aws_mqtt5_decode_result_type { AWS_MQTT5_DRT_MORE_DATA, AWS_MQTT5_DRT_SUCCESS, AWS_MQTT5_DRT_ERROR, }; /* * Callbacks the decoder should invoke. We don't invoke functions directly on the client because * we want to test the decoder's correctness in isolation. */ typedef int(aws_mqtt5_on_packet_received_fn)( enum aws_mqtt5_packet_type type, void *packet_view, void *decoder_callback_user_data); typedef int(aws_mqtt5_on_publish_payload_data_fn)( struct aws_mqtt5_packet_publish_view *publish_view, struct aws_byte_cursor payload, void *decoder_callback_user_data); /** * per-packet-type decoding function signature */ typedef int(aws_mqtt5_decoding_fn)(struct aws_mqtt5_decoder *decoder); /** * table of decoding functions. Tests use an augmented version that includes decoders for packet types normally * only decoded by an mqtt server. */ struct aws_mqtt5_decoder_function_table { aws_mqtt5_decoding_fn *decoders_by_packet_type[16]; }; /** * Basic decoder configuration. */ struct aws_mqtt5_decoder_options { void *callback_user_data; aws_mqtt5_on_packet_received_fn *on_packet_received; const struct aws_mqtt5_decoder_function_table *decoder_table; }; struct aws_mqtt5_decoder { struct aws_allocator *allocator; struct aws_mqtt5_decoder_options options; enum aws_mqtt5_decoder_state state; /* * decode scratch space: packets may get fully buffered here before decode * Exceptions: * when the incoming io message buffer contains the entire packet, we decode directly from it instead */ struct aws_byte_buf scratch_space; /* * packet type and flags */ uint8_t packet_first_byte; uint32_t remaining_length; /* * Packet decoders work from this cursor. It may point to scratch_space (for packets that were delivered * in more than one fragment) or to an io message buffer that contains the entire packet. */ struct aws_byte_cursor packet_cursor; struct aws_mqtt5_inbound_topic_alias_resolver *topic_alias_resolver; }; AWS_EXTERN_C_BEGIN /** * One-time initialization for an mqtt5 decoder * * @param decoder decoder to initialize * @param allocator allocator to use for memory allocation * @param options configuration options * @return success/failure */ AWS_MQTT_API int aws_mqtt5_decoder_init( struct aws_mqtt5_decoder *decoder, struct aws_allocator *allocator, struct aws_mqtt5_decoder_options *options); /** * Cleans up an mqtt5 decoder * * @param decoder decoder to clean up */ AWS_MQTT_API void aws_mqtt5_decoder_clean_up(struct aws_mqtt5_decoder *decoder); /** * Resets the state of an mqtt5 decoder. Used whenever a new connection is established * * @param decoder decoder to reset state for */ AWS_MQTT_API void aws_mqtt5_decoder_reset(struct aws_mqtt5_decoder *decoder); /** * Basic entry point for all incoming mqtt5 data once the basic connection has been established * * @param decoder decoder to decode data with * @param data the data to decode * @return success/failure - failure implies a need to shut down the connection */ AWS_MQTT_API int aws_mqtt5_decoder_on_data_received(struct aws_mqtt5_decoder *decoder, struct aws_byte_cursor data); /** * Sets the optional inbound alias resolver that the decoder should use during the lifetime of a connection * * @param decoder decoder to apply inbound topic alias resolution to * @param resolver inbound topic alias resolver */ AWS_MQTT_API void aws_mqtt5_decoder_set_inbound_topic_alias_resolver( struct aws_mqtt5_decoder *decoder, struct aws_mqtt5_inbound_topic_alias_resolver *resolver); /** * Default decoding table; tests use an augmented version with decoders for packets that only the server needs to * decode. */ AWS_MQTT_API extern const struct aws_mqtt5_decoder_function_table *g_aws_mqtt5_default_decoder_table; AWS_EXTERN_C_END /* Decode helpers */ AWS_EXTERN_C_BEGIN /** * Decodes, if possible, a variable length integer from a cursor. If the decode is successful, the cursor is advanced * past the variable length integer encoding. This can be used both for streaming and non-streaming decode operations. * * @param cursor data to decode from * @param dest where to put a successfully decoded variable length integer * @return the result of attempting the decode: {success, error, not enough data} Does not set aws_last_error. */ AWS_MQTT_API enum aws_mqtt5_decode_result_type aws_mqtt5_decode_vli(struct aws_byte_cursor *cursor, uint32_t *dest); /** * Decodes an MQTT5 user property from a cursor * * @param packet_cursor data to decode from * @param properties property set to add the decoded property to * @return success/failure - failures implies connection termination */ AWS_MQTT_API int aws_mqtt5_decode_user_property( struct aws_byte_cursor *packet_cursor, struct aws_mqtt5_user_property_set *properties); AWS_EXTERN_C_END /* Decode helper macros operating on a cursor */ /* * u8 and u16 decode are a little different in order to support encoded values that are widened to larger storage. * To make that safe, we decode to a local and then assign the local to the final spot. There should be no * complaints as long as the implicit conversion is the same size or wider. * * Some u8 examples include qos (one byte encode -> int-based enum) and various reason codes * Some u16 examples include cursor lengths decoded directly into a cursor's len field (u16 -> size_t) */ #define AWS_MQTT5_DECODE_U8(cursor_ptr, u8_ptr, error_label) \ { \ uint8_t decoded_value = 0; \ if (!aws_byte_cursor_read_u8((cursor_ptr), (&decoded_value))) { \ goto error_label; \ } \ *u8_ptr = decoded_value; \ } #define AWS_MQTT5_DECODE_U8_OPTIONAL(cursor_ptr, u8_ptr, u8_ptr_ptr, error_label) \ AWS_MQTT5_DECODE_U8(cursor_ptr, u8_ptr, error_label); \ *(u8_ptr_ptr) = (u8_ptr); #define AWS_MQTT5_DECODE_U16(cursor_ptr, u16_ptr, error_label) \ { \ uint16_t decoded_value = 0; \ if (!aws_byte_cursor_read_be16((cursor_ptr), (&decoded_value))) { \ goto error_label; \ } \ *u16_ptr = decoded_value; \ } /* * In addition to decoding a length prefix, this also verifies that the length prefix does not exceed the source * cursor length. */ #define AWS_MQTT5_DECODE_U16_PREFIX(cursor_ptr, u16_ptr, error_label) \ AWS_MQTT5_DECODE_U16((cursor_ptr), (u16_ptr), error_label); \ if (cursor_ptr->len < *(u16_ptr)) { \ aws_raise_error(AWS_ERROR_MQTT5_DECODE_PROTOCOL_ERROR); \ goto error_label; \ } #define AWS_MQTT5_DECODE_U16_OPTIONAL(cursor_ptr, u16_ptr, u16_ptr_ptr, error_label) \ AWS_MQTT5_DECODE_U16((cursor_ptr), u16_ptr, error_label); \ *(u16_ptr_ptr) = (u16_ptr); #define AWS_MQTT5_DECODE_U32(cursor_ptr, u32_ptr, error_label) \ if (!aws_byte_cursor_read_be32((cursor_ptr), (u32_ptr))) { \ goto error_label; \ } #define AWS_MQTT5_DECODE_U32_OPTIONAL(cursor_ptr, u32_ptr, u32_ptr_ptr, error_label) \ AWS_MQTT5_DECODE_U32((cursor_ptr), u32_ptr, error_label); \ *(u32_ptr_ptr) = (u32_ptr); #define AWS_MQTT5_DECODE_VLI(cursor_ptr, u32_ptr, error_label) \ if (AWS_MQTT5_DRT_SUCCESS != aws_mqtt5_decode_vli((cursor_ptr), (u32_ptr))) { \ goto error_label; \ } /* decodes both the length prefix and the following cursor field */ #define AWS_MQTT5_DECODE_LENGTH_PREFIXED_CURSOR(cursor_ptr, dest_cursor_ptr, error_label) \ { \ uint16_t prefix_length = 0; \ AWS_MQTT5_DECODE_U16_PREFIX((cursor_ptr), &prefix_length, error_label) \ \ *(dest_cursor_ptr) = aws_byte_cursor_advance((cursor_ptr), prefix_length); \ } #define AWS_MQTT5_DECODE_LENGTH_PREFIXED_CURSOR_OPTIONAL( \ cursor_ptr, dest_cursor_ptr, dest_cursor_ptr_ptr, error_label) \ AWS_MQTT5_DECODE_LENGTH_PREFIXED_CURSOR((cursor_ptr), (dest_cursor_ptr), error_label) \ *(dest_cursor_ptr_ptr) = (dest_cursor_ptr); #endif /* AWS_MQTT_MQTT5_DECODER_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/include/aws/mqtt/private/v5/mqtt5_encoder.h000066400000000000000000000374761456575232400303660ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #ifndef AWS_MQTT_MQTT5_ENCODER_H #define AWS_MQTT_MQTT5_ENCODER_H #include #include #include #include struct aws_mqtt5_client; struct aws_mqtt5_encoder; struct aws_mqtt5_outbound_topic_alias_resolver; /** * We encode packets by looking at all of the packet's values/properties and building a sequence of encoding steps. * Each encoding step is a simple, primitive operation of which there are two types: * (1) encode an integer in some fashion (fixed width or variable length) * (2) encode a raw sequence of bytes (either a cursor or a stream) * * Once the encoding step sequence is constructed, we do the actual encoding by iterating the sequence, performing * the steps. This is interruptible/resumable, so we can perform encodings that span multiple buffers easily. */ enum aws_mqtt5_encoding_step_type { /* encode a single byte */ AWS_MQTT5_EST_U8, /* encode a 16 bit unsigned integer in network order */ AWS_MQTT5_EST_U16, /* encode a 32 bit unsigned integer in network order */ AWS_MQTT5_EST_U32, /* * encode a 32 bit unsigned integer using MQTT variable length encoding. It is assumed that the 32 bit value has * already been checked against the maximum allowed value for variable length encoding. */ AWS_MQTT5_EST_VLI, /* * encode an array of bytes as referenced by a cursor. Most of the time this step is paired with either a prefix * specifying the number of bytes or a preceding variable length integer from which the data length can be * computed. */ AWS_MQTT5_EST_CURSOR, /* encode a stream of bytes. The same context that applies to cursor encoding above also applies here. */ AWS_MQTT5_EST_STREAM, }; /** * Elemental unit of packet encoding. */ struct aws_mqtt5_encoding_step { enum aws_mqtt5_encoding_step_type type; union { uint8_t value_u8; uint16_t value_u16; uint32_t value_u32; struct aws_byte_cursor value_cursor; struct aws_input_stream *value_stream; } value; }; /** * signature of a function that can takes a view assumed to be a specific packet type and appends the encoding * steps necessary to encode that packet into the encoder */ typedef int(aws_mqtt5_encode_begin_packet_type_fn)(struct aws_mqtt5_encoder *encoder, const void *view); /** * Per-packet-type table of encoding functions */ struct aws_mqtt5_encoder_function_table { aws_mqtt5_encode_begin_packet_type_fn *encoders_by_packet_type[16]; }; /** * Configuration options for an mqtt5 encoder. Everything is optional at this time. */ struct aws_mqtt5_encoder_options { struct aws_mqtt5_client *client; const struct aws_mqtt5_encoder_function_table *encoders; }; /** * An encoder is just a list of steps and a current location for the encoding process within that list. */ struct aws_mqtt5_encoder { struct aws_mqtt5_encoder_options config; struct aws_array_list encoding_steps; size_t current_encoding_step_index; struct aws_mqtt5_outbound_topic_alias_resolver *topic_alias_resolver; }; /** * Encoding proceeds until either * (1) a fatal error is reached * (2) the steps are done * (3) no room is left in the buffer */ enum aws_mqtt5_encoding_result { /* * A fatal error state was reached during encoding. This forces a connection shut down with no DISCONNECT. * An error can arise from several sources: * (1) Bug in the encoder (length calculations, step calculations) * (2) Bug in the view validation logic that is assumed to have caught any illegal/forbidden situations like * values-too-big, etc... * (3) System error when reading from a stream that is more than just a memory buffer * * Regardless of the origin, the connection is in an unusable state once this happens. * * If the encode function returns this value, aws last error will have an error value in it */ AWS_MQTT5_ER_ERROR, /* All encoding steps in the encoder have been completed. The encoder is ready for a new packet. */ AWS_MQTT5_ER_FINISHED, /* * The buffer has been filled as closely to full as possible and there are still encoding steps remaining that * have not been completed. It is technically possible to hit a permanent out-of-room state if the buffer size * is less than 4. Don't do that. */ AWS_MQTT5_ER_OUT_OF_ROOM, }; AWS_EXTERN_C_BEGIN /** * Initializes an mqtt5 encoder * * @param encoder encoder to initialize * @param allocator allocator to use for all memory allocation * @param options encoder configuration options to use * @return */ AWS_MQTT_API int aws_mqtt5_encoder_init( struct aws_mqtt5_encoder *encoder, struct aws_allocator *allocator, struct aws_mqtt5_encoder_options *options); /** * Cleans up an mqtt5 encoder * * @param encoder encoder to free up all resources for */ AWS_MQTT_API void aws_mqtt5_encoder_clean_up(struct aws_mqtt5_encoder *encoder); /** * Resets the state on an mqtt5 encoder. Ok to call after a failure to a packet _begin_packet() function. Not ok to * call after a failed call to aws_mqtt5_encoder_encode_to_buffer() * * @param encoder encoder to reset * @return */ AWS_MQTT_API void aws_mqtt5_encoder_reset(struct aws_mqtt5_encoder *encoder); /** * Adds all of the primitive encoding steps necessary to encode an MQTT5 packet * * @param encoder encoder to add encoding steps to * @param packet_type type of packet to encode * @param packet_view view into the corresponding packet type * @return success/failure */ AWS_MQTT_API int aws_mqtt5_encoder_append_packet_encoding( struct aws_mqtt5_encoder *encoder, enum aws_mqtt5_packet_type packet_type, const void *packet_view); /* * We intend that the client implementation only submits one packet at a time to the encoder, corresponding to the * current operation of the client. This is an important property to maintain to allow us to correlate socket * completions with packets/operations sent. It's the client's responsibility though; the encoder is dumb. * * The client will greedily use as much of an iomsg's buffer as it can if there are multiple operations (packets) * queued and there is sufficient room. */ /** * Asks the encoder to encode as much as it possibly can into the supplied buffer. * * @param encoder encoder to do the encoding * @param buffer where to encode into * @return result of the encoding process. aws last error will be set appropriately. */ AWS_MQTT_API enum aws_mqtt5_encoding_result aws_mqtt5_encoder_encode_to_buffer( struct aws_mqtt5_encoder *encoder, struct aws_byte_buf *buffer); /** * Sets the outbound alias resolver that the encoder should use during the lifetime of a connection * * @param encoder encoder to apply outbound topic alias resolution to * @param resolver outbound topic alias resolver */ AWS_MQTT_API void aws_mqtt5_encoder_set_outbound_topic_alias_resolver( struct aws_mqtt5_encoder *encoder, struct aws_mqtt5_outbound_topic_alias_resolver *resolver); /** * Default encoder table. Tests copy it and augment with additional functions in order to do round-trip encode-decode * tests for packets that are only encoded on the server. */ AWS_MQTT_API extern const struct aws_mqtt5_encoder_function_table *g_aws_mqtt5_encoder_default_function_table; AWS_EXTERN_C_END /****************************************************************************************************************** * Encoding helper functions and macros - placed in header so that test-only encoding has access ******************************************************************************************************************/ AWS_EXTERN_C_BEGIN /** * Utility function to calculate the encoded packet size of a given packet view. Used to validate operations * against the server's maximum packet size. * * @param packet_type type of packet the view represents * @param packet_view packet view * @param packet_size output parameter, set if the size was successfully calculated * @return success/failure */ AWS_MQTT_API int aws_mqtt5_packet_view_get_encoded_size( enum aws_mqtt5_packet_type packet_type, const void *packet_view, size_t *packet_size); /** * Encodes a variable length integer to a buffer. Assumes the buffer has been checked for sufficient room (this * is not a streaming/resumable operation) * * @param buf buffer to encode to * @param value value to encode * @return success/failure */ AWS_MQTT_API int aws_mqtt5_encode_variable_length_integer(struct aws_byte_buf *buf, uint32_t value); /** * Computes how many bytes are necessary to encode a value as a variable length integer * @param value value to encode * @param encode_size output parameter for the encoding size * @return success/failure where failure is exclusively value-is-illegal-and-too-large-to-encode */ AWS_MQTT_API int aws_mqtt5_get_variable_length_encode_size(size_t value, size_t *encode_size); AWS_MQTT_API void aws_mqtt5_encoder_push_step_u8(struct aws_mqtt5_encoder *encoder, uint8_t value); AWS_MQTT_API void aws_mqtt5_encoder_push_step_u16(struct aws_mqtt5_encoder *encoder, uint16_t value); AWS_MQTT_API void aws_mqtt5_encoder_push_step_u32(struct aws_mqtt5_encoder *encoder, uint32_t value); AWS_MQTT_API int aws_mqtt5_encoder_push_step_vli(struct aws_mqtt5_encoder *encoder, uint32_t value); AWS_MQTT_API void aws_mqtt5_encoder_push_step_cursor(struct aws_mqtt5_encoder *encoder, struct aws_byte_cursor value); AWS_MQTT_API size_t aws_mqtt5_compute_user_property_encode_length( const struct aws_mqtt5_user_property *properties, size_t user_property_count); AWS_MQTT_API void aws_mqtt5_add_user_property_encoding_steps( struct aws_mqtt5_encoder *encoder, const struct aws_mqtt5_user_property *user_properties, size_t user_property_count); AWS_EXTERN_C_END /* macros to simplify encoding step list construction */ #define ADD_ENCODE_STEP_U8(encoder, value) aws_mqtt5_encoder_push_step_u8(encoder, (uint8_t)(value)) #define ADD_ENCODE_STEP_U16(encoder, value) aws_mqtt5_encoder_push_step_u16(encoder, (uint16_t)(value)) #define ADD_ENCODE_STEP_U32(encoder, value) aws_mqtt5_encoder_push_step_u32(encoder, (uint32_t)(value)) #define ADD_ENCODE_STEP_CURSOR(encoder, cursor) aws_mqtt5_encoder_push_step_cursor(encoder, (cursor)) #define ADD_ENCODE_STEP_VLI(encoder, value) \ if (aws_mqtt5_encoder_push_step_vli(encoder, (value))) { \ return AWS_OP_ERR; \ } #define ADD_ENCODE_STEP_LENGTH_PREFIXED_CURSOR(encoder, cursor) \ { \ aws_mqtt5_encoder_push_step_u16(encoder, (uint16_t)((cursor).len)); \ aws_mqtt5_encoder_push_step_cursor(encoder, (cursor)); \ } #define ADD_ENCODE_STEP_OPTIONAL_LENGTH_PREFIXED_CURSOR(encoder, cursor_ptr) \ if (cursor_ptr != NULL) { \ ADD_ENCODE_STEP_LENGTH_PREFIXED_CURSOR(encoder, *cursor_ptr); \ } /* Property-oriented macros for encode steps. Properties have an additional prefix byte saying what their type is. */ #define ADD_ENCODE_STEP_OPTIONAL_U8_PROPERTY(encoder, property_value, value_ptr) \ if ((value_ptr) != NULL) { \ ADD_ENCODE_STEP_U8(encoder, property_value); \ ADD_ENCODE_STEP_U8(encoder, *(value_ptr)); \ } #define ADD_ENCODE_STEP_OPTIONAL_U16_PROPERTY(encoder, property_value, value_ptr) \ if ((value_ptr) != NULL) { \ ADD_ENCODE_STEP_U8(encoder, property_value); \ ADD_ENCODE_STEP_U16(encoder, *(value_ptr)); \ } #define ADD_ENCODE_STEP_OPTIONAL_U32_PROPERTY(encoder, property_value, value_ptr) \ if ((value_ptr) != NULL) { \ ADD_ENCODE_STEP_U8(encoder, property_value); \ ADD_ENCODE_STEP_U32(encoder, *(value_ptr)); \ } #define ADD_ENCODE_STEP_OPTIONAL_VLI_PROPERTY(encoder, property_value, value_ptr) \ if ((value_ptr) != NULL) { \ ADD_ENCODE_STEP_U8(encoder, property_value); \ ADD_ENCODE_STEP_VLI(encoder, *(value_ptr)); \ } #define ADD_ENCODE_STEP_OPTIONAL_CURSOR_PROPERTY(encoder, property_type, cursor_ptr) \ if ((cursor_ptr) != NULL) { \ ADD_ENCODE_STEP_U8(encoder, property_type); \ ADD_ENCODE_STEP_U16(encoder, (cursor_ptr)->len); \ ADD_ENCODE_STEP_CURSOR(encoder, *(cursor_ptr)); \ } /* * Macros to simplify packet size calculations, which are significantly complicated by mqtt5's many optional * properties. */ #define ADD_OPTIONAL_U8_PROPERTY_LENGTH(property_ptr, length) \ if ((property_ptr) != NULL) { \ (length) += 2; \ } #define ADD_OPTIONAL_U16_PROPERTY_LENGTH(property_ptr, length) \ if ((property_ptr) != NULL) { \ (length) += 3; \ } #define ADD_OPTIONAL_U32_PROPERTY_LENGTH(property_ptr, length) \ if ((property_ptr) != NULL) { \ (length) += 5; \ } #define ADD_OPTIONAL_CURSOR_PROPERTY_LENGTH(property_ptr, length) \ if ((property_ptr) != NULL) { \ (length) += 3 + ((property_ptr)->len); \ } #endif /* AWS_MQTT_MQTT5_ENCODER_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/include/aws/mqtt/private/v5/mqtt5_options_storage.h000066400000000000000000000276241456575232400321600ustar00rootroot00000000000000#ifndef AWS_MQTT_MQTT5_OPERATION_H #define AWS_MQTT_MQTT5_OPERATION_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include #include #include struct aws_client_bootstrap; struct aws_mqtt5_client; struct aws_mqtt5_client_options; struct aws_mqtt5_operation; struct aws_string; /* Basic vtable for all mqtt operations. Implementations are per-packet type */ struct aws_mqtt5_operation_vtable { void (*aws_mqtt5_operation_completion_fn)( struct aws_mqtt5_operation *operation, int error_code, enum aws_mqtt5_packet_type packet_type, const void *completion_view); void ( *aws_mqtt5_operation_set_packet_id_fn)(struct aws_mqtt5_operation *operation, aws_mqtt5_packet_id_t packet_id); aws_mqtt5_packet_id_t *(*aws_mqtt5_operation_get_packet_id_address_fn)(const struct aws_mqtt5_operation *operation); int (*aws_mqtt5_operation_validate_vs_connection_settings_fn)( const void *operation_packet_view, const struct aws_mqtt5_client *client); uint32_t (*aws_mqtt5_operation_get_ack_timeout_override_fn)(const struct aws_mqtt5_operation *operation); }; /* Flags that indicate the way in which an operation is currently affecting the statistics of the client */ enum aws_mqtt5_operation_statistic_state_flags { /* The operation is not affecting the client's statistics at all */ AWS_MQTT5_OSS_NONE = 0, /* The operation is affecting the client's "incomplete operation" statistics */ AWS_MQTT5_OSS_INCOMPLETE = 1 << 0, /* The operation is affecting the client's "unacked operation" statistics */ AWS_MQTT5_OSS_UNACKED = 1 << 1, }; /** * This is the base structure for all mqtt5 operations. It includes the type, a ref count, timeout timepoint, * and list management. */ struct aws_mqtt5_operation { const struct aws_mqtt5_operation_vtable *vtable; struct aws_ref_count ref_count; uint64_t ack_timeout_timepoint_ns; struct aws_priority_queue_node priority_queue_node; struct aws_linked_list_node node; enum aws_mqtt5_packet_type packet_type; const void *packet_view; /* How this operation is currently affecting the statistics of the client */ enum aws_mqtt5_operation_statistic_state_flags statistic_state_flags; /* Size of the MQTT packet this operation represents */ size_t packet_size; void *impl; }; struct aws_mqtt5_operation_connect { struct aws_mqtt5_operation base; struct aws_allocator *allocator; struct aws_mqtt5_packet_connect_storage options_storage; }; struct aws_mqtt5_operation_publish { struct aws_mqtt5_operation base; struct aws_allocator *allocator; struct aws_mqtt5_packet_publish_storage options_storage; struct aws_mqtt5_publish_completion_options completion_options; }; struct aws_mqtt5_operation_puback { struct aws_mqtt5_operation base; struct aws_allocator *allocator; struct aws_mqtt5_packet_puback_storage options_storage; }; struct aws_mqtt5_operation_disconnect { struct aws_mqtt5_operation base; struct aws_allocator *allocator; struct aws_mqtt5_packet_disconnect_storage options_storage; struct aws_mqtt5_disconnect_completion_options external_completion_options; struct aws_mqtt5_disconnect_completion_options internal_completion_options; }; struct aws_mqtt5_operation_subscribe { struct aws_mqtt5_operation base; struct aws_allocator *allocator; struct aws_mqtt5_packet_subscribe_storage options_storage; struct aws_mqtt5_subscribe_completion_options completion_options; }; struct aws_mqtt5_operation_unsubscribe { struct aws_mqtt5_operation base; struct aws_allocator *allocator; struct aws_mqtt5_packet_unsubscribe_storage options_storage; struct aws_mqtt5_unsubscribe_completion_options completion_options; }; struct aws_mqtt5_operation_pingreq { struct aws_mqtt5_operation base; struct aws_allocator *allocator; }; struct aws_mqtt5_client_options_storage { struct aws_allocator *allocator; struct aws_string *host_name; uint32_t port; struct aws_client_bootstrap *bootstrap; struct aws_socket_options socket_options; struct aws_tls_connection_options tls_options; struct aws_tls_connection_options *tls_options_ptr; struct aws_http_proxy_options http_proxy_options; struct aws_http_proxy_config *http_proxy_config; aws_mqtt5_transform_websocket_handshake_fn *websocket_handshake_transform; void *websocket_handshake_transform_user_data; aws_mqtt5_publish_received_fn *publish_received_handler; void *publish_received_handler_user_data; enum aws_mqtt5_client_session_behavior_type session_behavior; enum aws_mqtt5_extended_validation_and_flow_control_options extended_validation_and_flow_control_options; enum aws_mqtt5_client_operation_queue_behavior_type offline_queue_behavior; enum aws_exponential_backoff_jitter_mode retry_jitter_mode; uint64_t min_reconnect_delay_ms; uint64_t max_reconnect_delay_ms; uint64_t min_connected_time_to_reset_reconnect_delay_ms; uint32_t ack_timeout_seconds; uint32_t ping_timeout_ms; uint32_t connack_timeout_ms; struct aws_mqtt5_client_topic_alias_options topic_aliasing_options; struct aws_mqtt5_packet_connect_storage *connect; aws_mqtt5_client_connection_event_callback_fn *lifecycle_event_handler; void *lifecycle_event_handler_user_data; aws_mqtt5_client_termination_completion_fn *client_termination_handler; void *client_termination_handler_user_data; struct aws_host_resolution_config host_resolution_override; }; AWS_EXTERN_C_BEGIN /* Operation base */ AWS_MQTT_API struct aws_mqtt5_operation *aws_mqtt5_operation_acquire(struct aws_mqtt5_operation *operation); AWS_MQTT_API struct aws_mqtt5_operation *aws_mqtt5_operation_release(struct aws_mqtt5_operation *operation); AWS_MQTT_API void aws_mqtt5_operation_complete( struct aws_mqtt5_operation *operation, int error_code, enum aws_mqtt5_packet_type packet_type, const void *associated_view); AWS_MQTT_API void aws_mqtt5_operation_set_packet_id( struct aws_mqtt5_operation *operation, aws_mqtt5_packet_id_t packet_id); AWS_MQTT_API aws_mqtt5_packet_id_t aws_mqtt5_operation_get_packet_id(const struct aws_mqtt5_operation *operation); AWS_MQTT_API aws_mqtt5_packet_id_t *aws_mqtt5_operation_get_packet_id_address( const struct aws_mqtt5_operation *operation); AWS_MQTT_API int aws_mqtt5_operation_validate_vs_connection_settings( const struct aws_mqtt5_operation *operation, const struct aws_mqtt5_client *client); AWS_MQTT_API uint32_t aws_mqtt5_operation_get_ack_timeout_override(const struct aws_mqtt5_operation *operation); /* Connect */ AWS_MQTT_API struct aws_mqtt5_operation_connect *aws_mqtt5_operation_connect_new( struct aws_allocator *allocator, const struct aws_mqtt5_packet_connect_view *connect_options); AWS_MQTT_API int aws_mqtt5_packet_connect_view_validate(const struct aws_mqtt5_packet_connect_view *connect_view); AWS_MQTT_API void aws_mqtt5_packet_connect_view_log( const struct aws_mqtt5_packet_connect_view *connect_view, enum aws_log_level level); /* Connack */ AWS_MQTT_API void aws_mqtt5_packet_connack_view_log( const struct aws_mqtt5_packet_connack_view *connack_view, enum aws_log_level level); /* Disconnect */ AWS_MQTT_API struct aws_mqtt5_operation_disconnect *aws_mqtt5_operation_disconnect_new( struct aws_allocator *allocator, const struct aws_mqtt5_packet_disconnect_view *disconnect_options, const struct aws_mqtt5_disconnect_completion_options *external_completion_options, const struct aws_mqtt5_disconnect_completion_options *internal_completion_options); AWS_MQTT_API struct aws_mqtt5_operation_disconnect *aws_mqtt5_operation_disconnect_acquire( struct aws_mqtt5_operation_disconnect *disconnect_op); AWS_MQTT_API struct aws_mqtt5_operation_disconnect *aws_mqtt5_operation_disconnect_release( struct aws_mqtt5_operation_disconnect *disconnect_op); AWS_MQTT_API int aws_mqtt5_packet_disconnect_view_validate( const struct aws_mqtt5_packet_disconnect_view *disconnect_view); AWS_MQTT_API void aws_mqtt5_packet_disconnect_view_log( const struct aws_mqtt5_packet_disconnect_view *disconnect_view, enum aws_log_level level); /* Publish */ AWS_MQTT_API struct aws_mqtt5_operation_publish *aws_mqtt5_operation_publish_new( struct aws_allocator *allocator, const struct aws_mqtt5_client *client, const struct aws_mqtt5_packet_publish_view *publish_options, const struct aws_mqtt5_publish_completion_options *completion_options); AWS_MQTT_API int aws_mqtt5_packet_publish_view_validate(const struct aws_mqtt5_packet_publish_view *publish_view); AWS_MQTT_API void aws_mqtt5_packet_publish_view_log( const struct aws_mqtt5_packet_publish_view *publish_view, enum aws_log_level level); /* Puback */ AWS_MQTT_API struct aws_mqtt5_operation_puback *aws_mqtt5_operation_puback_new( struct aws_allocator *allocator, const struct aws_mqtt5_packet_puback_view *puback_options); AWS_MQTT_API void aws_mqtt5_packet_puback_view_log( const struct aws_mqtt5_packet_puback_view *puback_view, enum aws_log_level level); /* Subscribe */ AWS_MQTT_API struct aws_mqtt5_operation_subscribe *aws_mqtt5_operation_subscribe_new( struct aws_allocator *allocator, const struct aws_mqtt5_client *client, const struct aws_mqtt5_packet_subscribe_view *subscribe_options, const struct aws_mqtt5_subscribe_completion_options *completion_options); AWS_MQTT_API int aws_mqtt5_packet_subscribe_view_validate(const struct aws_mqtt5_packet_subscribe_view *subscribe_view); AWS_MQTT_API void aws_mqtt5_packet_subscribe_view_log( const struct aws_mqtt5_packet_subscribe_view *subscribe_view, enum aws_log_level level); /* Suback */ AWS_MQTT_API void aws_mqtt5_packet_suback_view_log( const struct aws_mqtt5_packet_suback_view *suback_view, enum aws_log_level level); /* Unsubscribe */ AWS_MQTT_API struct aws_mqtt5_operation_unsubscribe *aws_mqtt5_operation_unsubscribe_new( struct aws_allocator *allocator, const struct aws_mqtt5_client *client, const struct aws_mqtt5_packet_unsubscribe_view *unsubscribe_options, const struct aws_mqtt5_unsubscribe_completion_options *completion_options); AWS_MQTT_API int aws_mqtt5_packet_unsubscribe_view_validate( const struct aws_mqtt5_packet_unsubscribe_view *unsubscribe_view); AWS_MQTT_API void aws_mqtt5_packet_unsubscribe_view_log( const struct aws_mqtt5_packet_unsubscribe_view *unsubscribe_view, enum aws_log_level level); /* Unsuback */ AWS_MQTT_API void aws_mqtt5_packet_unsuback_view_log( const struct aws_mqtt5_packet_unsuback_view *unsuback_view, enum aws_log_level level); /* PINGREQ */ AWS_MQTT_API struct aws_mqtt5_operation_pingreq *aws_mqtt5_operation_pingreq_new(struct aws_allocator *allocator); /* client */ AWS_MQTT_API struct aws_mqtt5_client_options_storage *aws_mqtt5_client_options_storage_new( struct aws_allocator *allocator, const struct aws_mqtt5_client_options *options); AWS_MQTT_API void aws_mqtt5_client_options_storage_destroy(struct aws_mqtt5_client_options_storage *options_storage); AWS_MQTT_API int aws_mqtt5_client_options_validate(const struct aws_mqtt5_client_options *client_options); AWS_MQTT_API void aws_mqtt5_client_options_storage_log( const struct aws_mqtt5_client_options_storage *options_storage, enum aws_log_level level); AWS_MQTT_API bool aws_mqtt5_client_keep_alive_options_are_valid( uint16_t keep_alive_interval_seconds, uint32_t ping_timeout_ms); AWS_EXTERN_C_END #endif /* AWS_MQTT_MQTT5_OPERATION_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/include/aws/mqtt/private/v5/mqtt5_to_mqtt3_adapter_impl.h000066400000000000000000000272131456575232400332260ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #ifndef AWS_MQTT_MQTT5_TO_MQTT3_ADAPTER_IMPL_H #define AWS_MQTT_MQTT5_TO_MQTT3_ADAPTER_IMPL_H #include #include #include #include #include #include #include struct aws_mqtt_subscription_set; struct aws_mqtt5_to_mqtt3_adapter_publish_options { struct aws_mqtt_client_connection_5_impl *adapter; const struct aws_byte_cursor topic; enum aws_mqtt_qos qos; bool retain; const struct aws_byte_cursor payload; aws_mqtt_op_complete_fn *on_complete; void *on_complete_userdata; }; /* * A subscribe with no subscriptions represents a re-subscribe of all internally tracked topics. While this * is a bit hacky, the alternative is to copy-and-paste almost the entire multi-subscribe adapter operation and * supporting logic, which is approximately 300 lines. */ struct aws_mqtt5_to_mqtt3_adapter_subscribe_options { struct aws_mqtt_client_connection_5_impl *adapter; struct aws_mqtt_topic_subscription *subscriptions; size_t subscription_count; aws_mqtt_suback_fn *on_suback; void *on_suback_user_data; aws_mqtt_suback_multi_fn *on_multi_suback; void *on_multi_suback_user_data; }; struct aws_mqtt5_to_mqtt3_adapter_unsubscribe_options { struct aws_mqtt_client_connection_5_impl *adapter; struct aws_byte_cursor topic_filter; aws_mqtt_op_complete_fn *on_unsuback; void *on_unsuback_user_data; }; enum aws_mqtt5_to_mqtt3_adapter_operation_type { AWS_MQTT5TO3_AOT_PUBLISH, AWS_MQTT5TO3_AOT_SUBSCRIBE, AWS_MQTT5TO3_AOT_UNSUBSCRIBE, }; struct aws_mqtt5_to_mqtt3_adapter_operation_vtable { void (*fail_fn)(void *impl, int error_code); }; struct aws_mqtt5_to_mqtt3_adapter_operation_base { struct aws_allocator *allocator; struct aws_ref_count ref_count; const struct aws_mqtt5_to_mqtt3_adapter_operation_vtable *vtable; void *impl; /* * Holds an internal reference to the adapter while traveling to the event loop. Reference gets released * after intake on the event loop. * * We avoid calling back into a deleted adapter by zeroing out the * mqtt5 operation callbacks for everything we've submitted before final mqtt5 client release. */ struct aws_mqtt_client_connection_5_impl *adapter; bool holding_adapter_ref; struct aws_task submission_task; enum aws_mqtt5_to_mqtt3_adapter_operation_type type; uint16_t id; }; struct aws_mqtt5_to_mqtt3_adapter_operation_publish { struct aws_mqtt5_to_mqtt3_adapter_operation_base base; /* * holds a reference to the MQTT5 client publish operation until the operation completes or our adapter * goes away. * * In the case where we're going away, we zero out the MQTT5 operation callbacks to prevent crash-triggering * notifications. */ struct aws_mqtt5_operation_publish *publish_op; aws_mqtt_op_complete_fn *on_publish_complete; void *on_publish_complete_user_data; }; struct aws_mqtt5_to_mqtt3_adapter_operation_subscribe { struct aws_mqtt5_to_mqtt3_adapter_operation_base base; /* * holds a reference to the MQTT5 client subscribe operation until the operation completes or our adapter * goes away. * * In the case where we're going away, we zero out the MQTT5 operation callbacks to prevent crash-triggering * notifications. */ struct aws_mqtt5_operation_subscribe *subscribe_op; /* aws_array_list */ struct aws_array_list subscriptions; aws_mqtt_suback_fn *on_suback; void *on_suback_user_data; aws_mqtt_suback_multi_fn *on_multi_suback; void *on_multi_suback_user_data; }; struct aws_mqtt5_to_mqtt3_adapter_operation_unsubscribe { struct aws_mqtt5_to_mqtt3_adapter_operation_base base; /* * holds a reference to the MQTT5 client unsubscribe operation until the operation completes or our adapter * goes away. * * In the case where we're going away, we zero out the MQTT5 operation callbacks to prevent crash-triggering * notifications. */ struct aws_mqtt5_operation_unsubscribe *unsubscribe_op; struct aws_byte_buf topic_filter; aws_mqtt_op_complete_fn *on_unsuback; void *on_unsuback_user_data; }; /* Sequencing (PUBLISH example): Mqtt311 public API call Create cross thread task Create adapter op -> Create and attach mqtt5 op allocate id and add operation to adapter table Add adapter op's internal ref to adapter submit cross thread task to event loop return id or 0 Adapter Op reaches event loop task function: (from this point, all callbacks must be safe-guarded) terminated = true Safe handler: If adapter not terminated: terminated = false Synchronously enqueue operation to mqtt5 client if terminated: remove adapter op from table destroy adapter op Release adapter op's internal ref to adapter On publish completion: Safe handler: If not terminated: invoke mqtt311 callback Remove adapter op from table Destroy adapter op On final destroy (zero internal refs): Iterate all incomplete adapter operations and cancel them: zero callbacks and remove from queue if in queue and unbound Destroy all adapter ops Clear table */ struct aws_mqtt5_to_mqtt3_adapter_operation_table { struct aws_mutex lock; struct aws_hash_table operations; uint16_t next_id; }; /* * The adapter maintains a notion of state based on how its 311 API has been used. This state guides how it handles * external lifecycle events. * * Operational (sourced from the adapter) events are always relayed unless the adapter has been terminated. */ enum aws_mqtt_adapter_state { /* * The 311 API has had connect() called but that connect has not yet resolved. * * If it resolves successfully we will move to the STAY_CONNECTED state which will relay lifecycle callbacks * transparently. * * If it resolves unsuccessfully, we will move to the STAY_DISCONNECTED state where we will ignore lifecycle * events because, from the 311 API's perspective, nothing should be getting emitted. */ AWS_MQTT_AS_FIRST_CONNECT, /* * A call to the 311 connect API has resolved successfully. Relay all lifecycle events until told otherwise. */ AWS_MQTT_AS_STAY_CONNECTED, /* * We have not observed a successful initial connection attempt via the 311 API (or disconnect has been * invoked afterwards). Ignore all lifecycle events. */ AWS_MQTT_AS_STAY_DISCONNECTED, }; struct aws_mqtt_client_connection_5_impl { struct aws_allocator *allocator; struct aws_mqtt_client_connection base; struct aws_mqtt5_client *client; struct aws_mqtt5_listener *listener; struct aws_event_loop *loop; /* * The current adapter state based on the sequence of connect(), disconnect(), and connection completion events. * This affects how the adapter reacts to incoming mqtt5 events. Under certain conditions, we may change * this state value based on unexpected events (stopping the mqtt5 client underneath the adapter, for example) */ enum aws_mqtt_adapter_state adapter_state; /* * Tracks all references from external sources (ie users). Incremented and decremented by the public * acquire/release APIs of the 311 connection. * * When this value drops to zero, the terminated flag is set and no further callbacks will be invoked. This * also starts the asynchronous destruction process for the adapter. */ struct aws_ref_count external_refs; /* * Tracks all references to the adapter from internal sources (temporary async processes that need the * adapter to stay alive for an interval of time, like sending tasks across thread boundaries). * * Starts with a single reference that is held until the adapter's listener has fully detached from the mqtt5 * client. * * Once the internal ref count drops to zero, the adapter may be destroyed synchronously. */ struct aws_ref_count internal_refs; struct aws_mqtt5_to_mqtt3_adapter_operation_table operational_state; struct aws_mqtt_subscription_set *subscriptions; /* All fields after here are internal to the adapter event loop thread */ /* 311 interface callbacks */ aws_mqtt_client_on_connection_interrupted_fn *on_interrupted; void *on_interrupted_user_data; aws_mqtt_client_on_connection_resumed_fn *on_resumed; void *on_resumed_user_data; aws_mqtt_client_on_connection_closed_fn *on_closed; void *on_closed_user_data; aws_mqtt_client_on_connection_success_fn *on_connection_success; void *on_connection_success_user_data; aws_mqtt_client_on_connection_failure_fn *on_connection_failure; void *on_connection_failure_user_data; aws_mqtt_client_publish_received_fn *on_any_publish; void *on_any_publish_user_data; aws_mqtt_transform_websocket_handshake_fn *websocket_handshake_transformer; void *websocket_handshake_transformer_user_data; aws_mqtt5_transform_websocket_handshake_complete_fn *mqtt5_websocket_handshake_completion_function; void *mqtt5_websocket_handshake_completion_user_data; /* (mutually exclusive) 311 interface one-time transient callbacks */ aws_mqtt_client_on_disconnect_fn *on_disconnect; void *on_disconnect_user_data; aws_mqtt_client_on_connection_complete_fn *on_connection_complete; void *on_connection_complete_user_data; aws_mqtt_client_on_connection_termination_fn *on_termination; void *on_termination_user_data; }; AWS_EXTERN_C_BEGIN AWS_MQTT_API void aws_mqtt5_to_mqtt3_adapter_operation_table_init( struct aws_mqtt5_to_mqtt3_adapter_operation_table *table, struct aws_allocator *allocator); AWS_MQTT_API void aws_mqtt5_to_mqtt3_adapter_operation_table_clean_up( struct aws_mqtt5_to_mqtt3_adapter_operation_table *table); AWS_MQTT_API int aws_mqtt5_to_mqtt3_adapter_operation_table_add_operation( struct aws_mqtt5_to_mqtt3_adapter_operation_table *table, struct aws_mqtt5_to_mqtt3_adapter_operation_base *operation); AWS_MQTT_API void aws_mqtt5_to_mqtt3_adapter_operation_table_remove_operation( struct aws_mqtt5_to_mqtt3_adapter_operation_table *table, uint16_t operation_id); AWS_MQTT_API struct aws_mqtt5_to_mqtt3_adapter_operation_publish *aws_mqtt5_to_mqtt3_adapter_operation_new_publish( struct aws_allocator *allocator, const struct aws_mqtt5_to_mqtt3_adapter_publish_options *options); AWS_MQTT_API struct aws_mqtt5_to_mqtt3_adapter_operation_subscribe *aws_mqtt5_to_mqtt3_adapter_operation_new_subscribe( struct aws_allocator *allocator, const struct aws_mqtt5_to_mqtt3_adapter_subscribe_options *options, struct aws_mqtt_client_connection_5_impl *adapter); AWS_MQTT_API struct aws_mqtt5_to_mqtt3_adapter_operation_unsubscribe * aws_mqtt5_to_mqtt3_adapter_operation_new_unsubscribe( struct aws_allocator *allocator, const struct aws_mqtt5_to_mqtt3_adapter_unsubscribe_options *options); AWS_MQTT_API struct aws_mqtt5_to_mqtt3_adapter_operation_base *aws_mqtt5_to_mqtt3_adapter_operation_release( struct aws_mqtt5_to_mqtt3_adapter_operation_base *operation); AWS_MQTT_API struct aws_mqtt5_to_mqtt3_adapter_operation_base *aws_mqtt5_to_mqtt3_adapter_operation_acquire( struct aws_mqtt5_to_mqtt3_adapter_operation_base *operation); AWS_EXTERN_C_END #endif /* AWS_MQTT_MQTT5_TO_MQTT3_ADAPTER_IMPL_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/include/aws/mqtt/private/v5/mqtt5_topic_alias.h000066400000000000000000000044431456575232400312220ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #ifndef AWS_MQTT_MQTT5_TOPIC_ALIAS_H #define AWS_MQTT_MQTT5_TOPIC_ALIAS_H #include #include #include /* outbound resolvers are polymorphic; implementations are completely internal */ struct aws_mqtt5_outbound_topic_alias_resolver; /* there are only two possibilities for inbound resolution: on or off */ struct aws_mqtt5_inbound_topic_alias_resolver { struct aws_allocator *allocator; struct aws_array_list topic_aliases; }; AWS_EXTERN_C_BEGIN AWS_MQTT_API int aws_mqtt5_inbound_topic_alias_resolver_init( struct aws_mqtt5_inbound_topic_alias_resolver *resolver, struct aws_allocator *allocator); AWS_MQTT_API void aws_mqtt5_inbound_topic_alias_resolver_clean_up( struct aws_mqtt5_inbound_topic_alias_resolver *resolver); AWS_MQTT_API int aws_mqtt5_inbound_topic_alias_resolver_reset( struct aws_mqtt5_inbound_topic_alias_resolver *resolver, uint16_t cache_size); AWS_MQTT_API int aws_mqtt5_inbound_topic_alias_resolver_resolve_alias( struct aws_mqtt5_inbound_topic_alias_resolver *resolver, uint16_t alias, struct aws_byte_cursor *topic_out); AWS_MQTT_API int aws_mqtt5_inbound_topic_alias_resolver_register_alias( struct aws_mqtt5_inbound_topic_alias_resolver *resolver, uint16_t alias, struct aws_byte_cursor topic); AWS_MQTT_API struct aws_mqtt5_outbound_topic_alias_resolver *aws_mqtt5_outbound_topic_alias_resolver_new( struct aws_allocator *allocator, enum aws_mqtt5_client_outbound_topic_alias_behavior_type outbound_alias_behavior); AWS_MQTT_API void aws_mqtt5_outbound_topic_alias_resolver_destroy( struct aws_mqtt5_outbound_topic_alias_resolver *resolver); AWS_MQTT_API int aws_mqtt5_outbound_topic_alias_resolver_reset( struct aws_mqtt5_outbound_topic_alias_resolver *resolver, uint16_t topic_alias_maximum); AWS_MQTT_API int aws_mqtt5_outbound_topic_alias_resolver_resolve_outbound_publish( struct aws_mqtt5_outbound_topic_alias_resolver *resolver, const struct aws_mqtt5_packet_publish_view *publish_view, uint16_t *topic_alias_out, struct aws_byte_cursor *topic_out); AWS_EXTERN_C_END #endif /* AWS_MQTT_MQTT5_TOPIC_ALIAS_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/include/aws/mqtt/private/v5/mqtt5_utils.h000066400000000000000000000365401456575232400300760ustar00rootroot00000000000000#ifndef AWS_MQTT_MQTT5_UTILS_H #define AWS_MQTT_MQTT5_UTILS_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include struct aws_byte_buf; struct aws_mqtt5_negotiated_settings; #define AWS_MQTT5_MAXIMUM_VARIABLE_LENGTH_INTEGER 268435455 #define AWS_MQTT5_MAXIMUM_PACKET_SIZE (5 + AWS_MQTT5_MAXIMUM_VARIABLE_LENGTH_INTEGER) #define AWS_MQTT5_RECEIVE_MAXIMUM 65535 #define AWS_MQTT5_PINGREQ_ENCODED_SIZE 2 /* property type codes */ #define AWS_MQTT5_PROPERTY_TYPE_PAYLOAD_FORMAT_INDICATOR ((uint8_t)1) #define AWS_MQTT5_PROPERTY_TYPE_MESSAGE_EXPIRY_INTERVAL ((uint8_t)2) #define AWS_MQTT5_PROPERTY_TYPE_CONTENT_TYPE ((uint8_t)3) #define AWS_MQTT5_PROPERTY_TYPE_RESPONSE_TOPIC ((uint8_t)8) #define AWS_MQTT5_PROPERTY_TYPE_CORRELATION_DATA ((uint8_t)9) #define AWS_MQTT5_PROPERTY_TYPE_SUBSCRIPTION_IDENTIFIER ((uint8_t)11) #define AWS_MQTT5_PROPERTY_TYPE_SESSION_EXPIRY_INTERVAL ((uint8_t)17) #define AWS_MQTT5_PROPERTY_TYPE_ASSIGNED_CLIENT_IDENTIFIER ((uint8_t)18) #define AWS_MQTT5_PROPERTY_TYPE_SERVER_KEEP_ALIVE ((uint8_t)19) #define AWS_MQTT5_PROPERTY_TYPE_AUTHENTICATION_METHOD ((uint8_t)21) #define AWS_MQTT5_PROPERTY_TYPE_AUTHENTICATION_DATA ((uint8_t)22) #define AWS_MQTT5_PROPERTY_TYPE_REQUEST_PROBLEM_INFORMATION ((uint8_t)23) #define AWS_MQTT5_PROPERTY_TYPE_WILL_DELAY_INTERVAL ((uint8_t)24) #define AWS_MQTT5_PROPERTY_TYPE_REQUEST_RESPONSE_INFORMATION ((uint8_t)25) #define AWS_MQTT5_PROPERTY_TYPE_RESPONSE_INFORMATION ((uint8_t)26) #define AWS_MQTT5_PROPERTY_TYPE_SERVER_REFERENCE ((uint8_t)28) #define AWS_MQTT5_PROPERTY_TYPE_REASON_STRING ((uint8_t)31) #define AWS_MQTT5_PROPERTY_TYPE_RECEIVE_MAXIMUM ((uint8_t)33) #define AWS_MQTT5_PROPERTY_TYPE_TOPIC_ALIAS_MAXIMUM ((uint8_t)34) #define AWS_MQTT5_PROPERTY_TYPE_TOPIC_ALIAS ((uint8_t)35) #define AWS_MQTT5_PROPERTY_TYPE_MAXIMUM_QOS ((uint8_t)36) #define AWS_MQTT5_PROPERTY_TYPE_RETAIN_AVAILABLE ((uint8_t)37) #define AWS_MQTT5_PROPERTY_TYPE_USER_PROPERTY ((uint8_t)38) #define AWS_MQTT5_PROPERTY_TYPE_MAXIMUM_PACKET_SIZE ((uint8_t)39) #define AWS_MQTT5_PROPERTY_TYPE_WILDCARD_SUBSCRIPTIONS_AVAILABLE ((uint8_t)40) #define AWS_MQTT5_PROPERTY_TYPE_SUBSCRIPTION_IDENTIFIERS_AVAILABLE ((uint8_t)41) #define AWS_MQTT5_PROPERTY_TYPE_SHARED_SUBSCRIPTIONS_AVAILABLE ((uint8_t)42) /* decode/encode bit masks and positions */ #define AWS_MQTT5_CONNECT_FLAGS_WILL_BIT (1U << 2) #define AWS_MQTT5_CONNECT_FLAGS_CLEAN_START_BIT (1U << 1) #define AWS_MQTT5_CONNECT_FLAGS_USER_NAME_BIT (1U << 7) #define AWS_MQTT5_CONNECT_FLAGS_PASSWORD_BIT (1U << 6) #define AWS_MQTT5_CONNECT_FLAGS_WILL_RETAIN_BIT (1U << 5) #define AWS_MQTT5_CONNECT_FLAGS_WILL_QOS_BIT_POSITION 3 #define AWS_MQTT5_CONNECT_FLAGS_WILL_QOS_BIT_MASK 0x03 #define AWS_MQTT5_SUBSCRIBE_FLAGS_NO_LOCAL (1U << 2) #define AWS_MQTT5_SUBSCRIBE_FLAGS_RETAIN_AS_PUBLISHED (1U << 3) #define AWS_MQTT5_SUBSCRIBE_FLAGS_RETAIN_HANDLING_TYPE_BIT_POSITION 4 #define AWS_MQTT5_SUBSCRIBE_FLAGS_RETAIN_HANDLING_TYPE_BIT_MASK 0x03 #define AWS_MQTT5_SUBSCRIBE_FLAGS_QOS_BIT_POSITION 0 #define AWS_MQTT5_SUBSCRIBE_FLAGS_QOS_BIT_MASK 0x03 /* Static AWS IoT Core Limit/Quota Values */ #define AWS_IOT_CORE_MAXIMUM_TOPIC_LENGTH 256 #define AWS_IOT_CORE_MAXIMUM_TOPIC_SEGMENTS 8 /* Dynamic IoT Core Limits */ #define AWS_IOT_CORE_PUBLISH_PER_SECOND_LIMIT 100 #define AWS_IOT_CORE_THROUGHPUT_LIMIT (512 * 1024) /* Client configuration defaults when parameter left zero */ #define AWS_MQTT5_DEFAULT_SOCKET_CONNECT_TIMEOUT_MS 10000 #define AWS_MQTT5_CLIENT_DEFAULT_MIN_RECONNECT_DELAY_MS 1000 #define AWS_MQTT5_CLIENT_DEFAULT_MAX_RECONNECT_DELAY_MS 120000 #define AWS_MQTT5_CLIENT_DEFAULT_MIN_CONNECTED_TIME_TO_RESET_RECONNECT_DELAY_MS 30000 #define AWS_MQTT5_CLIENT_DEFAULT_PING_TIMEOUT_MS 30000 #define AWS_MQTT5_CLIENT_DEFAULT_CONNACK_TIMEOUT_MS 20000 #define AWS_MQTT5_CLIENT_DEFAULT_OPERATION_TIMEOUNT_SECONDS 60 #define AWS_MQTT5_CLIENT_DEFAULT_INBOUND_TOPIC_ALIAS_CACHE_SIZE 25 #define AWS_MQTT5_CLIENT_DEFAULT_OUTBOUND_TOPIC_ALIAS_CACHE_SIZE 25 AWS_EXTERN_C_BEGIN /** * CONNECT packet MQTT5 prefix which includes "MQTT" encoded as a utf-8 string followed by the protocol number (5) * * {0x00, 0x04, "MQTT", 0x05} */ AWS_MQTT_API extern struct aws_byte_cursor g_aws_mqtt5_connect_protocol_cursor; /** * Simple helper function to compute the first byte of an MQTT packet encoding as a function of 4 bit flags * and the packet type. * * @param packet_type type of MQTT packet * @param flags 4-bit wide flags, specific to each packet type, 0-valued for most * @return the expected/required first byte of a packet of that type with flags set */ AWS_MQTT_API uint8_t aws_mqtt5_compute_fixed_header_byte1(enum aws_mqtt5_packet_type packet_type, uint8_t flags); AWS_MQTT_API void aws_mqtt5_negotiated_settings_log( struct aws_mqtt5_negotiated_settings *negotiated_settings, enum aws_log_level level); /** * Assigns and stores a client id for use on CONNECT * * @param negotiated_settings settings to apply client id to * @param client_id client id to set */ AWS_MQTT_API int aws_mqtt5_negotiated_settings_apply_client_id( struct aws_mqtt5_negotiated_settings *negotiated_settings, const struct aws_byte_cursor *client_id); /** * Resets negotiated_settings to defaults reconciled with client set properties. * Called on init of mqtt5 Client and just prior to a CONNECT. * * @param negotiated_settings struct containing settings to be set * @param packet_connect_view Read-only snapshot of a CONNECT packet * @return void */ AWS_MQTT_API void aws_mqtt5_negotiated_settings_reset( struct aws_mqtt5_negotiated_settings *negotiated_settings, const struct aws_mqtt5_packet_connect_view *packet_connect_view); /** * Checks properties received from Server CONNACK and reconcile with negotiated_settings * * @param negotiated_settings struct containing settings to be set * @param connack_data Read-only snapshot of a CONNACK packet * @return void */ AWS_MQTT_API void aws_mqtt5_negotiated_settings_apply_connack( struct aws_mqtt5_negotiated_settings *negotiated_settings, const struct aws_mqtt5_packet_connack_view *connack_data); /** * Converts a disconnect reason code into the Reason Code Name, as it appears in the mqtt5 spec. * * @param reason_code a disconnect reason code * @return name associated with the reason code */ AWS_MQTT_API const char *aws_mqtt5_disconnect_reason_code_to_c_string( enum aws_mqtt5_disconnect_reason_code reason_code, bool *is_valid); /** * Converts a connect reason code into the Reason Code Name, as it appears in the mqtt5 spec. * * @param reason_code a connect reason code * @return name associated with the reason code */ AWS_MQTT_API const char *aws_mqtt5_connect_reason_code_to_c_string(enum aws_mqtt5_connect_reason_code reason_code); /** * Converts a publish reason code into the Reason Code Name, as it appears in the mqtt5 spec. * * @param reason_code a publish reason code * @return name associated with the reason code */ AWS_MQTT_API const char *aws_mqtt5_puback_reason_code_to_c_string(enum aws_mqtt5_puback_reason_code reason_code); /** * Converts a subscribe reason code into the Reason Code Name, as it appears in the mqtt5 spec. * * @param reason_code a subscribe reason code * @return name associated with the reason code */ AWS_MQTT_API const char *aws_mqtt5_suback_reason_code_to_c_string(enum aws_mqtt5_suback_reason_code reason_code); /** * Converts a unsubscribe reason code into the Reason Code Name, as it appears in the mqtt5 spec. * * @param reason_code an unsubscribe reason code * @return name associated with the reason code */ AWS_MQTT_API const char *aws_mqtt5_unsuback_reason_code_to_c_string(enum aws_mqtt5_unsuback_reason_code reason_code); /** * Converts a session behavior type value to a readable description. * * @param session_behavior type of session behavior * @return short string describing the session behavior */ AWS_MQTT_API const char *aws_mqtt5_client_session_behavior_type_to_c_string( enum aws_mqtt5_client_session_behavior_type session_behavior); /** * Converts a session behavior type value to a final non-default value. * * @param session_behavior type of session behavior * @return session behavior value where default has been mapped to its intended meaning */ AWS_MQTT_API enum aws_mqtt5_client_session_behavior_type aws_mqtt5_client_session_behavior_type_to_non_default( enum aws_mqtt5_client_session_behavior_type session_behavior); /** * Converts an outbound topic aliasing behavior type value to a readable description. * * @param outbound_aliasing_behavior type of outbound topic aliasing behavior * @return short string describing the outbound topic aliasing behavior */ AWS_MQTT_API const char *aws_mqtt5_outbound_topic_alias_behavior_type_to_c_string( enum aws_mqtt5_client_outbound_topic_alias_behavior_type outbound_aliasing_behavior); /** * Checks an outbound aliasing behavior type value for validity * * @param outbound_aliasing_behavior value to check * @return true if this is a valid value, false otherwise */ AWS_MQTT_API bool aws_mqtt5_outbound_topic_alias_behavior_type_validate( enum aws_mqtt5_client_outbound_topic_alias_behavior_type outbound_aliasing_behavior); /** * Converts an outbound topic aliasing behavior type value to a final non-default value. * * @param outbound_aliasing_behavior type of outbound topic aliasing behavior * @return outbound topic aliasing value where default has been mapped to its intended meaning */ AWS_MQTT_API enum aws_mqtt5_client_outbound_topic_alias_behavior_type aws_mqtt5_outbound_topic_alias_behavior_type_to_non_default( enum aws_mqtt5_client_outbound_topic_alias_behavior_type outbound_aliasing_behavior); /** * Converts an inbound topic aliasing behavior type value to a readable description. * * @param inbound_aliasing_behavior type of inbound topic aliasing behavior * @return short string describing the inbound topic aliasing behavior */ AWS_MQTT_API const char *aws_mqtt5_inbound_topic_alias_behavior_type_to_c_string( enum aws_mqtt5_client_inbound_topic_alias_behavior_type inbound_aliasing_behavior); /** * Checks an inbound aliasing behavior type value for validity * * @param inbound_aliasing_behavior value to check * @return true if this is a valid value, false otherwise */ AWS_MQTT_API bool aws_mqtt5_inbound_topic_alias_behavior_type_validate( enum aws_mqtt5_client_inbound_topic_alias_behavior_type inbound_aliasing_behavior); /** * Converts an inbound topic aliasing behavior type value to a final non-default value. * * @param inbound_aliasing_behavior type of inbound topic aliasing behavior * @return inbound topic aliasing value where default has been mapped to its intended meaning */ AWS_MQTT_API enum aws_mqtt5_client_inbound_topic_alias_behavior_type aws_mqtt5_inbound_topic_alias_behavior_type_to_non_default( enum aws_mqtt5_client_inbound_topic_alias_behavior_type inbound_aliasing_behavior); /** * Converts an extended validation and flow control options value to a readable description. * * @param extended_validation_behavior type of extended validation and flow control * @return short string describing the extended validation and flow control behavior */ AWS_MQTT_API const char *aws_mqtt5_extended_validation_and_flow_control_options_to_c_string( enum aws_mqtt5_extended_validation_and_flow_control_options extended_validation_behavior); /** * Converts an offline queue behavior type value to a readable description. * * @param offline_queue_behavior type of offline queue behavior * @return short string describing the offline queue behavior */ AWS_MQTT_API const char *aws_mqtt5_client_operation_queue_behavior_type_to_c_string( enum aws_mqtt5_client_operation_queue_behavior_type offline_queue_behavior); /** * Converts an offline queue behavior type value to a final non-default value. * * @param offline_queue_behavior type of offline queue behavior * @return offline queue behavior value where default has been mapped to its intended meaning */ AWS_MQTT_API enum aws_mqtt5_client_operation_queue_behavior_type aws_mqtt5_client_operation_queue_behavior_type_to_non_default( enum aws_mqtt5_client_operation_queue_behavior_type offline_queue_behavior); /** * Converts a lifecycle event type value to a readable description. * * @param lifecycle_event type of lifecycle event * @return short string describing the lifecycle event type */ AWS_MQTT_API const char *aws_mqtt5_client_lifecycle_event_type_to_c_string( enum aws_mqtt5_client_lifecycle_event_type lifecycle_event); /** * Converts a payload format indicator value to a readable description. * * @param format_indicator type of payload format indicator * @return short string describing the payload format indicator */ AWS_MQTT_API const char *aws_mqtt5_payload_format_indicator_to_c_string( enum aws_mqtt5_payload_format_indicator format_indicator); /** * Converts a retain handling type value to a readable description. * * @param retain_handling_type type of retain handling * @return short string describing the retain handling type */ AWS_MQTT_API const char *aws_mqtt5_retain_handling_type_to_c_string( enum aws_mqtt5_retain_handling_type retain_handling_type); /** * Converts a packet type value to a readable description. * * @param packet_type type of packet * @return short string describing the packet type */ AWS_MQTT_API const char *aws_mqtt5_packet_type_to_c_string(enum aws_mqtt5_packet_type packet_type); /** * Computes a uniformly-distributed random number in the specified range. Not intended for cryptographic purposes. * * @param from one end of the range to sample from * @param to other end of the range to sample from * @return a random number from the supplied range, with roughly a uniform distribution */ AWS_MQTT_API uint64_t aws_mqtt5_client_random_in_range(uint64_t from, uint64_t to); /** * Utility function to skip the "$aws/rules//" prefix of a topic. Technically this works for topic * filters too. * * @param topic_cursor topic to get the non-rules suffix for * @return remaining part of the topic after the leading AWS IoT Rules prefix has been skipped, if present */ AWS_MQTT_API struct aws_byte_cursor aws_mqtt5_topic_skip_aws_iot_core_uncounted_prefix( struct aws_byte_cursor topic_cursor); /** * Computes the number of topic segments in a topic or topic filter * @param topic_cursor topic or topic filter * @return number of topic segments in the topic or topic filter */ AWS_MQTT_API size_t aws_mqtt5_topic_get_segment_count(struct aws_byte_cursor topic_cursor); /** * Checks a topic filter for validity against AWS IoT Core rules * @param topic_filter_cursor topic filter to check * @return true if valid, false otherwise */ AWS_MQTT_API bool aws_mqtt_is_valid_topic_filter_for_iot_core(struct aws_byte_cursor topic_filter_cursor); /** * Checks a topic for validity against AWS IoT Core rules * @param topic_cursor topic to check * @return true if valid, false otherwise */ AWS_MQTT_API bool aws_mqtt_is_valid_topic_for_iot_core(struct aws_byte_cursor topic_cursor); /** * Checks if a topic filter matches a shared subscription according to the mqtt5 spec * @param topic_cursor topic to check * @return true if this matches the definition of a shared subscription, false otherwise */ AWS_MQTT_API bool aws_mqtt_is_topic_filter_shared_subscription(struct aws_byte_cursor topic_cursor); AWS_EXTERN_C_END #endif /* AWS_MQTT_MQTT5_UTILS_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/include/aws/mqtt/private/v5/rate_limiters.h000066400000000000000000000075521456575232400304500ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #ifndef AWS_RATE_LIMITERS_H #define AWS_RATE_LIMITERS_H #include #include struct aws_rate_limiter_token_bucket_options { /* Clock function override. If left null, the high resolution clock will be used */ aws_io_clock_fn *clock_fn; /* How many tokens regenerate per second? */ uint64_t tokens_per_second; /* Initial amount of tokens the limiter will start with */ uint64_t initial_token_count; /* * Maximum amount of tokens the limiter can hold. Regenerated tokens that exceed this maximum are * discarded */ uint64_t maximum_token_count; }; /** * A token-bucket based rate limiter. * * Has an unusually complex implementation due to implementer-desired constraints: * * (1) Model regeneration as an integral rate per second. This is for ease-of-use. A regeneration interval would * be a much simpler implementation, but not as intuitive (or accurate for non-integral rates). * (2) Integer math only. Not comfortable falling back on doubles and not having a good understanding of the * accuracy issues, over time, that doing so would create. * (3) Minimize as much as possible the dangers of multiplication saturation and integer division round-down. * (4) No integer division round-off "error" accumulation allowed. Arguments could be made that it might be small * enough to never make a difference but I'd rather not even have the argument at all. * (5) A perfectly accurate how-long-must-I-wait query. Not just a safe over-estimate. */ struct aws_rate_limiter_token_bucket { uint64_t last_service_time; uint64_t current_token_count; uint64_t fractional_nanos; uint64_t fractional_nano_tokens; struct aws_rate_limiter_token_bucket_options config; }; AWS_EXTERN_C_BEGIN /** * Initializes a token-bucket-based rate limiter * * @param limiter rate limiter to intiialize * @param options configuration values for the token bucket rate limiter * @return AWS_OP_SUCCESS/AWS_OP_ERR */ AWS_MQTT_API int aws_rate_limiter_token_bucket_init( struct aws_rate_limiter_token_bucket *limiter, const struct aws_rate_limiter_token_bucket_options *options); /** * Resets a token-bucket-based rate limiter * * @param limiter rate limiter to reset */ AWS_MQTT_API void aws_rate_limiter_token_bucket_reset(struct aws_rate_limiter_token_bucket *limiter); /** * Queries if the token bucket has a number of tokens currently available * * @param limiter token bucket rate limiter to query, non-const because token count is lazily updated * @param token_count how many tokens to check for * @return true if that many tokens are available, false otherwise */ AWS_MQTT_API bool aws_rate_limiter_token_bucket_can_take_tokens( struct aws_rate_limiter_token_bucket *limiter, uint64_t token_count); /** * Takes a number of tokens from the token bucket rate limiter * * @param limiter token bucket rate limiter to take from * @param token_count how many tokens to take * @return AWS_OP_SUCCESS if there were that many tokens available, AWS_OP_ERR otherwise */ AWS_MQTT_API int aws_rate_limiter_token_bucket_take_tokens( struct aws_rate_limiter_token_bucket *limiter, uint64_t token_count); /** * Queries a token-bucket-based rate limiter for how long, in nanoseconds, until a specified amount of tokens will * be available. * * @param limiter token-bucket-based rate limiter to query * @param token_count how many tokens need to be avilable * @return how long the caller must wait, in nanoseconds, before that many tokens are available */ AWS_MQTT_API uint64_t aws_rate_limiter_token_bucket_compute_wait_for_tokens( struct aws_rate_limiter_token_bucket *limiter, uint64_t token_count); AWS_EXTERN_C_END #endif /* AWS_RATE_LIMITERS_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/include/aws/mqtt/v5/000077500000000000000000000000001456575232400237515ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/include/aws/mqtt/v5/mqtt5_client.h000066400000000000000000000671531456575232400265460ustar00rootroot00000000000000#ifndef AWS_MQTT_MQTT5_CLIENT_H #define AWS_MQTT_MQTT5_CLIENT_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include AWS_PUSH_SANE_WARNING_LEVEL struct aws_allocator; struct aws_client_bootstrap; struct aws_host_resolution_config; struct aws_http_message; struct aws_mqtt5_client; struct aws_mqtt5_client_lifecycle_event; struct aws_tls_connection_options; struct aws_socket_options; /* public client-related enums */ /** * Controls how the mqtt client should behave with respect to mqtt sessions. */ enum aws_mqtt5_client_session_behavior_type { /** * Maps to AWS_MQTT5_CSBT_CLEAN */ AWS_MQTT5_CSBT_DEFAULT, /** * Always join a new, clean session */ AWS_MQTT5_CSBT_CLEAN, /** * Always attempt to rejoin an existing session after an initial connection success. */ AWS_MQTT5_CSBT_REJOIN_POST_SUCCESS, /** * Always attempt to rejoin an existing session. Since the client does not support durable session persistence, * this option is not guaranteed to be spec compliant because any unacknowledged qos1 publishes (which are * part of the client session state) will not be present on the initial connection. Until we support * durable session resumption, this option is technically spec-breaking, but useful. */ AWS_MQTT5_CSBT_REJOIN_ALWAYS, }; /** * Outbound topic aliasing behavior is controlled by this type. * * Topic alias behavior is described in https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901113 * * If the server allows topic aliasing, this setting controls how topic aliases are used on PUBLISH packets sent * from the client to the server. * * If topic aliasing is not supported by the server, this setting has no effect and any attempts to directly * manipulate the topic alias id in outbound publishes will be ignored. */ enum aws_mqtt5_client_outbound_topic_alias_behavior_type { /** * Maps to AWS_MQTT5_COTABT_DISABLED This keeps the client from being broken (by default) if the broker * topic aliasing implementation has a problem. */ AWS_MQTT5_COTABT_DEFAULT, /** * Outbound aliasing is the user's responsibility. Client will cache and use * previously-established aliases if they fall within the negotiated limits of the connection. * * The user must still always submit a full topic in their publishes because disconnections disrupt * topic alias mappings unpredictably. The client will properly use the alias when the current connection * has seen the alias binding already. */ AWS_MQTT5_COTABT_MANUAL, /** * Client ignores any user-specified topic aliasing and acts on the outbound alias set as an LRU cache. */ AWS_MQTT5_COTABT_LRU, /** * Completely disable outbound topic aliasing. */ AWS_MQTT5_COTABT_DISABLED }; /** * Inbound topic aliasing behavior is controlled by this type. * * Topic alias behavior is described in https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901113 * * This setting controls whether or not the client will send a positive topic alias maximum to the server * in its CONNECT packets. * * If topic aliasing is not supported by the server, this setting has no net effect. */ enum aws_mqtt5_client_inbound_topic_alias_behavior_type { /** * Maps to AWS_MQTT5_CITABT_DISABLED */ AWS_MQTT5_CITABT_DEFAULT, /** * Allow the server to send PUBLISH packets to the client that use topic aliasing */ AWS_MQTT5_CITABT_ENABLED, /** * Forbid the server from sending PUBLISH packets to the client that use topic aliasing */ AWS_MQTT5_CITABT_DISABLED }; /** * Configuration struct for all client topic aliasing behavior. If this is left null, then all default options * (as it zeroed) will be used. */ struct aws_mqtt5_client_topic_alias_options { /** * Controls what kind of outbound topic aliasing behavior the client should attempt to use. */ enum aws_mqtt5_client_outbound_topic_alias_behavior_type outbound_topic_alias_behavior; /** * If outbound topic aliasing is set to LRU, this controls the maximum size of the cache. If outbound topic * aliasing is set to LRU and this is zero, a sensible default is used (25). If outbound topic aliasing is not * set to LRU, then this setting has no effect. * * The final size of the cache is determined by the minimum of this setting and the value of the * topic_alias_maximum property of the received CONNACK. If the received CONNACK does not have an explicit * positive value for that field, outbound topic aliasing is disabled for the duration of that connection. */ uint16_t outbound_alias_cache_max_size; /** * Controls what kind of inbound topic aliasing behavior the client should use. * * Even if inbound topic aliasing is enabled, it is up to the server to choose whether or not to use it. */ enum aws_mqtt5_client_inbound_topic_alias_behavior_type inbound_topic_alias_behavior; /** * If inbound topic aliasing is enabled, this will control the size of the inbound alias cache. If inbound * aliases are enabled and this is zero, then a sensible default will be used (25). If inbound aliases are * disabled, this setting has no effect. * * Behaviorally, this value overrides anything present in the topic_alias_maximum field of * the CONNECT packet options. */ uint16_t inbound_alias_cache_size; }; /** * Extended validation and flow control options * * Potentially a point of expansion in the future. We could add custom controls letting people override * the Aws IOT Core limits based on their account properties. We could, with IoT Core support, add dynamic * limit recognition via user properties as well. */ enum aws_mqtt5_extended_validation_and_flow_control_options { /** * Do not do any additional validation or flow control outside of the MQTT5 spec */ AWS_MQTT5_EVAFCO_NONE, /** * Apply additional client-side operational flow control that respects the * default AWS IoT Core limits. * * Applies the following flow control: * (1) Outbound throughput throttled to 512KB/s * (2) Outbound publish TPS throttled to 100 */ AWS_MQTT5_EVAFCO_AWS_IOT_CORE_DEFAULTS, }; /** * Controls how disconnects affect the queued and in-progress operations submitted to the client. Also controls * how operations are handled while the client is not connected. In particular, if the client is not connected, * then any operation that would be failed on disconnect (according to these rules) will be rejected. */ enum aws_mqtt5_client_operation_queue_behavior_type { /* * Maps to AWS_MQTT5_COQBT_FAIL_QOS0_PUBLISH_ON_DISCONNECT */ AWS_MQTT5_COQBT_DEFAULT, /* * Requeues QoS 1+ publishes on disconnect; unacked publishes go to the front, unprocessed publishes stay * in place. All other operations (QoS 0 publishes, subscribe, unsubscribe) are failed. */ AWS_MQTT5_COQBT_FAIL_NON_QOS1_PUBLISH_ON_DISCONNECT, /* * Qos 0 publishes that are not complete at the time of disconnection are failed. Unacked QoS 1+ publishes are * requeued at the head of the line for immediate retransmission on a session resumption. All other operations * are requeued in original order behind any retransmissions. */ AWS_MQTT5_COQBT_FAIL_QOS0_PUBLISH_ON_DISCONNECT, /* * All operations that are not complete at the time of disconnection are failed, except those operations that * the mqtt 5 spec requires to be retransmitted (unacked qos1+ publishes). */ AWS_MQTT5_COQBT_FAIL_ALL_ON_DISCONNECT, }; /** * Type of a client lifecycle event */ enum aws_mqtt5_client_lifecycle_event_type { /** * Emitted when the client begins an attempt to connect to the remote endpoint. * * Mandatory event fields: client, user_data */ AWS_MQTT5_CLET_ATTEMPTING_CONNECT, /** * Emitted after the client connects to the remote endpoint and receives a successful CONNACK. * Every ATTEMPTING_CONNECT will be followed by exactly one CONNECTION_SUCCESS or one CONNECTION_FAILURE. * * Mandatory event fields: client, user_data, connack_data, settings */ AWS_MQTT5_CLET_CONNECTION_SUCCESS, /** * Emitted at any point during the connection process when it has conclusively failed. * Every ATTEMPTING_CONNECT will be followed by exactly one CONNECTION_SUCCESS or one CONNECTION_FAILURE. * * Mandatory event fields: client, user_data, error_code * Conditional event fields: connack_data */ AWS_MQTT5_CLET_CONNECTION_FAILURE, /** * Lifecycle event containing information about a disconnect. Every CONNECTION_SUCCESS will eventually be * followed by one and only one DISCONNECTION. * * Mandatory event fields: client, user_data, error_code * Conditional event fields: disconnect_data */ AWS_MQTT5_CLET_DISCONNECTION, /** * Lifecycle event notifying the user that the client has entered the STOPPED state. Entering this state will * cause the client to wipe all MQTT session state. * * Mandatory event fields: client, user_data */ AWS_MQTT5_CLET_STOPPED, }; /* client-related callback function signatures */ /** * Signature of the continuation function to be called after user-code transforms a websocket handshake request */ typedef void(aws_mqtt5_transform_websocket_handshake_complete_fn)( struct aws_http_message *request, int error_code, void *complete_ctx); /** * Signature of the websocket handshake request transformation function. After transformation, the completion * function must be invoked to send the request. */ typedef void(aws_mqtt5_transform_websocket_handshake_fn)( struct aws_http_message *request, void *user_data, aws_mqtt5_transform_websocket_handshake_complete_fn *complete_fn, void *complete_ctx); /** * Callback signature for mqtt5 client lifecycle events. */ typedef void(aws_mqtt5_client_connection_event_callback_fn)(const struct aws_mqtt5_client_lifecycle_event *event); /** * Signature of callback to invoke on Publish success/failure. */ typedef void(aws_mqtt5_publish_completion_fn)( enum aws_mqtt5_packet_type packet_type, const void *packet, int error_code, void *complete_ctx); /** * Signature of callback to invoke on Subscribe success/failure. */ typedef void(aws_mqtt5_subscribe_completion_fn)( const struct aws_mqtt5_packet_suback_view *suback, int error_code, void *complete_ctx); /** * Signature of callback to invoke on Unsubscribe success/failure. */ typedef void(aws_mqtt5_unsubscribe_completion_fn)( const struct aws_mqtt5_packet_unsuback_view *unsuback, int error_code, void *complete_ctx); /** * Signature of callback to invoke on Publish received */ typedef void(aws_mqtt5_publish_received_fn)(const struct aws_mqtt5_packet_publish_view *publish, void *user_data); /** * Signature of a listener publish received callback that returns an indicator whether or not the publish * was handled by the listener. */ typedef bool( aws_mqtt5_listener_publish_received_fn)(const struct aws_mqtt5_packet_publish_view *publish, void *user_data); /** * Signature of callback to invoke when a DISCONNECT is fully written to the socket (or fails to be) */ typedef void(aws_mqtt5_disconnect_completion_fn)(int error_code, void *complete_ctx); /** * Signature of callback invoked when a client has completely destroyed itself */ typedef void(aws_mqtt5_client_termination_completion_fn)(void *complete_ctx); /* operation completion options structures */ /** * Completion options for the Publish operation */ struct aws_mqtt5_publish_completion_options { aws_mqtt5_publish_completion_fn *completion_callback; void *completion_user_data; uint32_t ack_timeout_seconds_override; }; /** * Completion options for the Subscribe operation */ struct aws_mqtt5_subscribe_completion_options { aws_mqtt5_subscribe_completion_fn *completion_callback; void *completion_user_data; uint32_t ack_timeout_seconds_override; }; /** * Completion options for the Unsubscribe operation */ struct aws_mqtt5_unsubscribe_completion_options { aws_mqtt5_unsubscribe_completion_fn *completion_callback; void *completion_user_data; uint32_t ack_timeout_seconds_override; }; /** * Completion options for the a DISCONNECT operation */ struct aws_mqtt5_disconnect_completion_options { aws_mqtt5_disconnect_completion_fn *completion_callback; void *completion_user_data; }; /** * Mqtt behavior settings that are dynamically negotiated as part of the CONNECT/CONNACK exchange. */ struct aws_mqtt5_negotiated_settings { /** * The maximum QoS used between the server and client. */ enum aws_mqtt5_qos maximum_qos; /** * the amount of time in seconds the server will retain the session after a disconnect. */ uint32_t session_expiry_interval; /** * the number of QoS 1 and QoS2 publications the server is willing to process concurrently. */ uint16_t receive_maximum_from_server; /** * the maximum packet size the server is willing to accept. */ uint32_t maximum_packet_size_to_server; /** * the highest value that the server will accept as a Topic Alias sent by the client. */ uint16_t topic_alias_maximum_to_server; /** * the highest value that the client will accept as a Topic Alias sent by the server. */ uint16_t topic_alias_maximum_to_client; /** * the amount of time in seconds before the server will disconnect the client for inactivity. */ uint16_t server_keep_alive; /** * whether the server supports retained messages. */ bool retain_available; /** * whether the server supports wildcard subscriptions. */ bool wildcard_subscriptions_available; /** * whether the server supports subscription identifiers */ bool subscription_identifiers_available; /** * whether the server supports shared subscriptions */ bool shared_subscriptions_available; /** * whether the client has rejoined an existing session. */ bool rejoined_session; struct aws_byte_buf client_id_storage; }; /** * Contains some simple statistics about the current state of the client's queue of operations */ struct aws_mqtt5_client_operation_statistics { /* * total number of operations submitted to the client that have not yet been completed. Unacked operations * are a subset of this. */ uint64_t incomplete_operation_count; /* * total packet size of operations submitted to the client that have not yet been completed. Unacked operations * are a subset of this. */ uint64_t incomplete_operation_size; /* * total number of operations that have been sent to the server and are waiting for a corresponding ACK before * they can be completed. */ uint64_t unacked_operation_count; /* * total packet size of operations that have been sent to the server and are waiting for a corresponding ACK before * they can be completed. */ uint64_t unacked_operation_size; }; /** * Details about a client lifecycle event. */ struct aws_mqtt5_client_lifecycle_event { /** * Type of event this is. */ enum aws_mqtt5_client_lifecycle_event_type event_type; /** * Client this event corresponds to. Necessary (can't be replaced with user data) because the client * doesn't exist at the time the event callback user data is configured. */ struct aws_mqtt5_client *client; /** * Aws-c-* error code associated with the event */ int error_code; /** * User data associated with the client's lifecycle event handler. Set with client configuration. */ void *user_data; /** * If this event was caused by receiving a CONNACK, this will be a view of that packet. */ const struct aws_mqtt5_packet_connack_view *connack_data; /** * If this is a successful connection establishment, this will contain the negotiated mqtt5 behavioral settings */ const struct aws_mqtt5_negotiated_settings *settings; /** * If this event was caused by receiving a DISCONNECT, this will be a view of that packet. */ const struct aws_mqtt5_packet_disconnect_view *disconnect_data; }; /** * Basic mqtt5 client configuration struct. * * Contains desired connection properties * Configuration that represents properties of the mqtt5 CONNECT packet go in the connect view (connect_options) */ struct aws_mqtt5_client_options { /** * Host to establish mqtt connections to */ struct aws_byte_cursor host_name; /** * Port to establish mqtt connections to */ uint32_t port; /** * Client bootstrap to use whenever this client establishes a connection */ struct aws_client_bootstrap *bootstrap; /** * Socket options to use whenever this client establishes a connection */ const struct aws_socket_options *socket_options; /** * (Optional) Tls options to use whenever this client establishes a connection */ const struct aws_tls_connection_options *tls_options; /** * (Optional) Http proxy options to use whenever this client establishes a connection */ const struct aws_http_proxy_options *http_proxy_options; /** * (Optional) Websocket handshake transformation function and user data. Websockets are used if the * transformation function is non-null. */ aws_mqtt5_transform_websocket_handshake_fn *websocket_handshake_transform; void *websocket_handshake_transform_user_data; /** * All CONNECT-related options, includes the will configuration, if desired */ const struct aws_mqtt5_packet_connect_view *connect_options; /** * Controls session rejoin behavior */ enum aws_mqtt5_client_session_behavior_type session_behavior; /** * Controls if any additional AWS-specific validation or flow control should be performed by the client. */ enum aws_mqtt5_extended_validation_and_flow_control_options extended_validation_and_flow_control_options; /** * Controls how the client treats queued/in-progress operations when the connection drops for any reason. */ enum aws_mqtt5_client_operation_queue_behavior_type offline_queue_behavior; /** * Controls the exponential backoff behavior when the client is waiting to reconnect. * * See: https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/ */ enum aws_exponential_backoff_jitter_mode retry_jitter_mode; /** * Minimum amount of time in ms to wait before attempting to reconnect. If this is zero, a default of 1000 ms will * be used. */ uint64_t min_reconnect_delay_ms; /** * Maximum amount of time in ms to wait before attempting to reconnect. If this is zero, a default of 120000 ms * will be used. */ uint64_t max_reconnect_delay_ms; /** * Amount of time that must elapse with a good connection before the reconnect delay is reset to the minimum. If * this zero, a default of 30000 ms will be used. */ uint64_t min_connected_time_to_reset_reconnect_delay_ms; /** * Time interval to wait after sending a PINGREQ for a PINGRESP to arrive. If one does not arrive, the connection * will be shut down. If this is zero, a default of 30000 ms will be used. */ uint32_t ping_timeout_ms; /** * Time interval to wait after sending a CONNECT request for a CONNACK to arrive. If one does not arrive, the * connection will be shut down. If this zero, a default of 20000 ms will be used. */ uint32_t connack_timeout_ms; /** * Time interval to wait for an ack after sending a SUBSCRIBE, UNSUBSCRIBE, or PUBLISH with QoS 1+ before * failing the packet, notifying the client of failure, and removing it. If this is zero, a default of 60 seconds * will be used. */ uint32_t ack_timeout_seconds; /** * Controls how the client uses mqtt5 topic aliasing. If NULL, zero-based defaults will be used. */ const struct aws_mqtt5_client_topic_alias_options *topic_aliasing_options; /** * Callback for received publish packets */ aws_mqtt5_publish_received_fn *publish_received_handler; void *publish_received_handler_user_data; /** * Callback and user data for all client lifecycle events. * Life cycle events include: * ConnectionSuccess * ConnectionFailure, * Disconnect * (client) Stopped * * Disconnect lifecycle events are 1-1 with -- strictly after -- ConnectionSuccess events. */ aws_mqtt5_client_connection_event_callback_fn *lifecycle_event_handler; void *lifecycle_event_handler_user_data; /** * Callback for when the client has completely destroyed itself */ aws_mqtt5_client_termination_completion_fn *client_termination_handler; void *client_termination_handler_user_data; /** * Options to override aspects of DNS resolution. If unspecified, use a default that matches the regular * configuration but changes the refresh frequency to a value that prevents DNS pinging. */ struct aws_host_resolution_config *host_resolution_override; }; AWS_EXTERN_C_BEGIN /** * Creates a new mqtt5 client using the supplied configuration * * @param allocator allocator to use with all memory operations related to this client's creation and operation * @param options mqtt5 client configuration * @return a new mqtt5 client or NULL */ AWS_MQTT_API struct aws_mqtt5_client *aws_mqtt5_client_new( struct aws_allocator *allocator, const struct aws_mqtt5_client_options *options); /** * Acquires a reference to an mqtt5 client * * @param client client to acquire a reference to. May be NULL. * @return what was passed in as the client (a client or NULL) */ AWS_MQTT_API struct aws_mqtt5_client *aws_mqtt5_client_acquire(struct aws_mqtt5_client *client); /** * Release a reference to an mqtt5 client. When the client ref count drops to zero, the client will automatically * trigger a stop and once the stop completes, the client will delete itself. * * @param client client to release a reference to. May be NULL. * @return NULL */ AWS_MQTT_API struct aws_mqtt5_client *aws_mqtt5_client_release(struct aws_mqtt5_client *client); /** * Asynchronous notify to the mqtt5 client that you want it to attempt to connect to the configured endpoint. * The client will attempt to stay connected using the properties of the reconnect-related parameters * in the mqtt5 client configuration. * * @param client mqtt5 client to start * @return success/failure in the synchronous logic that kicks off the start process */ AWS_MQTT_API int aws_mqtt5_client_start(struct aws_mqtt5_client *client); /** * Asynchronous notify to the mqtt5 client that you want it to transition to the stopped state. When the client * reaches the stopped state, all session state is erased. * * @param client mqtt5 client to stop * @param disconnect_options (optional) properties of a DISCONNECT packet to send as part of the shutdown process * @return success/failure in the synchronous logic that kicks off the stop process */ AWS_MQTT_API int aws_mqtt5_client_stop( struct aws_mqtt5_client *client, const struct aws_mqtt5_packet_disconnect_view *disconnect_options, const struct aws_mqtt5_disconnect_completion_options *completion_options); /** * Queues a Publish operation in an mqtt5 client * * @param client mqtt5 client to queue a Publish for * @param publish_options configuration options for the Publish operation * @param completion_options completion callback configuration. Successful QoS 0 publishes invoke the callback when * the data has been written to the socket. Successful QoS1+ publishes invoke the callback when the corresponding ack * is received. Unsuccessful publishes invoke the callback at the point in time a failure condition is reached. * @return success/failure in the synchronous logic that kicks off the publish operation */ AWS_MQTT_API int aws_mqtt5_client_publish( struct aws_mqtt5_client *client, const struct aws_mqtt5_packet_publish_view *publish_options, const struct aws_mqtt5_publish_completion_options *completion_options); /** * Queues a Subscribe operation in an mqtt5 client * * @param client mqtt5 client to queue a Subscribe for * @param subscribe_options configuration options for the Subscribe operation * @param completion_options Completion callback configuration. Invoked when the corresponding SUBACK is received or * a failure condition is reached. An error code implies complete failure of the subscribe, while a success code * implies the user must still check all of the SUBACK's reason codes for per-subscription feedback. * @return success/failure in the synchronous logic that kicks off the Subscribe operation */ AWS_MQTT_API int aws_mqtt5_client_subscribe( struct aws_mqtt5_client *client, const struct aws_mqtt5_packet_subscribe_view *subscribe_options, const struct aws_mqtt5_subscribe_completion_options *completion_options); /** * Queues an Unsubscribe operation in an mqtt5 client * * @param client mqtt5 client to queue an Unsubscribe for * @param unsubscribe_options configuration options for the Unsubscribe operation * @param completion_options Completion callback configuration. Invoked when the corresponding UNSUBACK is received or * a failure condition is reached. An error code implies complete failure of the unsubscribe, while a success code * implies the user must still check all of the UNSUBACK's reason codes for per-topic-filter feedback. * @return success/failure in the synchronous logic that kicks off the Unsubscribe operation */ AWS_MQTT_API int aws_mqtt5_client_unsubscribe( struct aws_mqtt5_client *client, const struct aws_mqtt5_packet_unsubscribe_view *unsubscribe_options, const struct aws_mqtt5_unsubscribe_completion_options *completion_options); /** * Queries the client's internal statistics for incomplete operations. * @param client client to get statistics for * @param stats set of incomplete operation statistics */ AWS_MQTT_API void aws_mqtt5_client_get_stats(struct aws_mqtt5_client *client, struct aws_mqtt5_client_operation_statistics *stats); /* Misc related type APIs */ /** * Initializes the Client ID byte buf in negotiated settings * * @param allocator allocator to use for memory allocation * @param negotiated_settings settings to apply client id to * @param client_id client id to set */ AWS_MQTT_API int aws_mqtt5_negotiated_settings_init( struct aws_allocator *allocator, struct aws_mqtt5_negotiated_settings *negotiated_settings, const struct aws_byte_cursor *client_id); /** * Makes an owning copy of a negotiated settings structure. * * @param source settings to copy from * @param dest settings to copy into. Must be in a zeroed or initialized state because it gets clean up * called on it as the first step of the copy process. * @return success/failure * * Used in downstream. */ AWS_MQTT_API int aws_mqtt5_negotiated_settings_copy( const struct aws_mqtt5_negotiated_settings *source, struct aws_mqtt5_negotiated_settings *dest); /** * Clean up owned memory in negotiated_settings * * @param negotiated_settings settings to clean up */ AWS_MQTT_API void aws_mqtt5_negotiated_settings_clean_up(struct aws_mqtt5_negotiated_settings *negotiated_settings); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_MQTT_MQTT5_CLIENT_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/include/aws/mqtt/v5/mqtt5_listener.h000066400000000000000000000051621456575232400271050ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #ifndef AWS_MQTT_MQTT5_LISTENER_H #define AWS_MQTT_MQTT5_LISTENER_H #include #include AWS_PUSH_SANE_WARNING_LEVEL /* * Callback signature for when an mqtt5 listener has completely destroyed itself. */ typedef void(aws_mqtt5_listener_termination_completion_fn)(void *complete_ctx); /** * A record that tracks MQTT5 client callbacks which can be dynamically injected via a listener. */ struct aws_mqtt5_callback_set { aws_mqtt5_listener_publish_received_fn *listener_publish_received_handler; void *listener_publish_received_handler_user_data; aws_mqtt5_client_connection_event_callback_fn *lifecycle_event_handler; void *lifecycle_event_handler_user_data; }; /** * Configuration options for MQTT5 listener objects. */ struct aws_mqtt5_listener_config { /** * MQTT5 client to listen to events on */ struct aws_mqtt5_client *client; /** * Callbacks to invoke when events occur on the MQTT5 client */ struct aws_mqtt5_callback_set listener_callbacks; /** * Listener destruction is asynchronous and thus requires a termination callback and associated user data * to notify the user that the listener has been fully destroyed and no further events will be received. */ aws_mqtt5_listener_termination_completion_fn *termination_callback; void *termination_callback_user_data; }; AWS_EXTERN_C_BEGIN /** * Creates a new MQTT5 listener object. For as long as the listener lives, incoming publishes and lifecycle events * will be forwarded to the callbacks configured on the listener. * * @param allocator allocator to use * @param config listener configuration * @return a new aws_mqtt5_listener object */ AWS_MQTT_API struct aws_mqtt5_listener *aws_mqtt5_listener_new( struct aws_allocator *allocator, struct aws_mqtt5_listener_config *config); /** * Adds a reference to an mqtt5 listener. * * @param listener listener to add a reference to * @return the listener object */ AWS_MQTT_API struct aws_mqtt5_listener *aws_mqtt5_listener_acquire(struct aws_mqtt5_listener *listener); /** * Removes a reference to an mqtt5 listener. When the reference count drops to zero, the listener's asynchronous * destruction will be started. * * @param listener listener to remove a reference from * @return NULL */ AWS_MQTT_API struct aws_mqtt5_listener *aws_mqtt5_listener_release(struct aws_mqtt5_listener *listener); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_MQTT_MQTT5_LISTENER_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/include/aws/mqtt/v5/mqtt5_packet_storage.h000066400000000000000000000231051456575232400302500ustar00rootroot00000000000000#ifndef AWS_MQTT_MQTT5_PACKET_STORAGE_H #define AWS_MQTT_MQTT5_PACKET_STORAGE_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include AWS_PUSH_SANE_WARNING_LEVEL struct aws_mqtt5_user_property_set { struct aws_array_list properties; }; struct aws_mqtt5_packet_connect_storage { struct aws_allocator *allocator; struct aws_mqtt5_packet_connect_view storage_view; struct aws_byte_cursor username; struct aws_byte_cursor password; uint32_t session_expiry_interval_seconds; uint8_t request_response_information; uint8_t request_problem_information; uint16_t receive_maximum; uint16_t topic_alias_maximum; uint32_t maximum_packet_size_bytes; struct aws_mqtt5_packet_publish_storage *will; uint32_t will_delay_interval_seconds; struct aws_mqtt5_user_property_set user_properties; struct aws_byte_cursor authentication_method; struct aws_byte_cursor authentication_data; struct aws_byte_buf storage; }; struct aws_mqtt5_packet_connack_storage { struct aws_allocator *allocator; struct aws_mqtt5_packet_connack_view storage_view; uint32_t session_expiry_interval; uint16_t receive_maximum; enum aws_mqtt5_qos maximum_qos; bool retain_available; uint32_t maximum_packet_size; struct aws_byte_cursor assigned_client_identifier; uint16_t topic_alias_maximum; struct aws_byte_cursor reason_string; bool wildcard_subscriptions_available; bool subscription_identifiers_available; bool shared_subscriptions_available; uint16_t server_keep_alive; struct aws_byte_cursor response_information; struct aws_byte_cursor server_reference; struct aws_byte_cursor authentication_method; struct aws_byte_cursor authentication_data; struct aws_mqtt5_user_property_set user_properties; struct aws_byte_buf storage; }; struct aws_mqtt5_packet_suback_storage { struct aws_allocator *allocator; struct aws_mqtt5_packet_suback_view storage_view; struct aws_byte_cursor reason_string; struct aws_mqtt5_user_property_set user_properties; struct aws_array_list reason_codes; struct aws_byte_buf storage; }; struct aws_mqtt5_packet_unsuback_storage { struct aws_allocator *allocator; struct aws_mqtt5_packet_unsuback_view storage_view; struct aws_byte_cursor reason_string; struct aws_mqtt5_user_property_set user_properties; struct aws_array_list reason_codes; struct aws_byte_buf storage; }; struct aws_mqtt5_packet_publish_storage { struct aws_mqtt5_packet_publish_view storage_view; enum aws_mqtt5_payload_format_indicator payload_format; uint32_t message_expiry_interval_seconds; uint16_t topic_alias; struct aws_byte_cursor response_topic; struct aws_byte_cursor correlation_data; struct aws_byte_cursor content_type; struct aws_mqtt5_user_property_set user_properties; struct aws_array_list subscription_identifiers; struct aws_byte_buf storage; }; struct aws_mqtt5_packet_puback_storage { struct aws_mqtt5_packet_puback_view storage_view; struct aws_byte_cursor reason_string; struct aws_mqtt5_user_property_set user_properties; struct aws_byte_buf storage; }; struct aws_mqtt5_packet_disconnect_storage { struct aws_mqtt5_packet_disconnect_view storage_view; uint32_t session_expiry_interval_seconds; struct aws_byte_cursor reason_string; struct aws_mqtt5_user_property_set user_properties; struct aws_byte_cursor server_reference; struct aws_byte_buf storage; }; struct aws_mqtt5_packet_subscribe_storage { struct aws_mqtt5_packet_subscribe_view storage_view; uint32_t subscription_identifier; struct aws_array_list subscriptions; struct aws_mqtt5_user_property_set user_properties; struct aws_byte_buf storage; }; struct aws_mqtt5_packet_unsubscribe_storage { struct aws_mqtt5_packet_unsubscribe_view storage_view; struct aws_array_list topic_filters; struct aws_mqtt5_user_property_set user_properties; struct aws_byte_buf storage; }; AWS_EXTERN_C_BEGIN /* User properties */ AWS_MQTT_API int aws_mqtt5_user_property_set_init_with_storage( struct aws_mqtt5_user_property_set *property_set, struct aws_allocator *allocator, struct aws_byte_buf *storage_buffer, size_t property_count, const struct aws_mqtt5_user_property *properties); AWS_MQTT_API void aws_mqtt5_user_property_set_clean_up(struct aws_mqtt5_user_property_set *property_set); AWS_MQTT_API size_t aws_mqtt5_user_property_set_size(const struct aws_mqtt5_user_property_set *property_set); /* Connect */ AWS_MQTT_API int aws_mqtt5_packet_connect_storage_init( struct aws_mqtt5_packet_connect_storage *connect_storage, struct aws_allocator *allocator, const struct aws_mqtt5_packet_connect_view *connect_options); AWS_MQTT_API int aws_mqtt5_packet_connect_storage_init_from_external_storage( struct aws_mqtt5_packet_connect_storage *connect_storage, struct aws_allocator *allocator); AWS_MQTT_API void aws_mqtt5_packet_connect_storage_clean_up(struct aws_mqtt5_packet_connect_storage *connect_storage); /* Connack */ AWS_MQTT_API int aws_mqtt5_packet_connack_storage_init( struct aws_mqtt5_packet_connack_storage *connack_storage, struct aws_allocator *allocator, const struct aws_mqtt5_packet_connack_view *connack_options); AWS_MQTT_API int aws_mqtt5_packet_connack_storage_init_from_external_storage( struct aws_mqtt5_packet_connack_storage *connack_storage, struct aws_allocator *allocator); AWS_MQTT_API void aws_mqtt5_packet_connack_storage_clean_up(struct aws_mqtt5_packet_connack_storage *connack_storage); /* Disconnect */ AWS_MQTT_API int aws_mqtt5_packet_disconnect_storage_init( struct aws_mqtt5_packet_disconnect_storage *disconnect_storage, struct aws_allocator *allocator, const struct aws_mqtt5_packet_disconnect_view *disconnect_options); AWS_MQTT_API int aws_mqtt5_packet_disconnect_storage_init_from_external_storage( struct aws_mqtt5_packet_disconnect_storage *disconnect_storage, struct aws_allocator *allocator); AWS_MQTT_API void aws_mqtt5_packet_disconnect_storage_clean_up( struct aws_mqtt5_packet_disconnect_storage *disconnect_storage); /* Publish */ AWS_MQTT_API int aws_mqtt5_packet_publish_storage_init( struct aws_mqtt5_packet_publish_storage *publish_storage, struct aws_allocator *allocator, const struct aws_mqtt5_packet_publish_view *publish_options); AWS_MQTT_API int aws_mqtt5_packet_publish_storage_init_from_external_storage( struct aws_mqtt5_packet_publish_storage *publish_storage, struct aws_allocator *allocator); AWS_MQTT_API void aws_mqtt5_packet_publish_storage_clean_up(struct aws_mqtt5_packet_publish_storage *publish_storage); /* Puback */ AWS_MQTT_API int aws_mqtt5_packet_puback_storage_init( struct aws_mqtt5_packet_puback_storage *puback_storage, struct aws_allocator *allocator, const struct aws_mqtt5_packet_puback_view *puback_view); AWS_MQTT_API int aws_mqtt5_packet_puback_storage_init_from_external_storage( struct aws_mqtt5_packet_puback_storage *puback_storage, struct aws_allocator *allocator); AWS_MQTT_API void aws_mqtt5_packet_puback_storage_clean_up(struct aws_mqtt5_packet_puback_storage *puback_storage); /* Subscribe */ AWS_MQTT_API int aws_mqtt5_packet_subscribe_storage_init( struct aws_mqtt5_packet_subscribe_storage *subscribe_storage, struct aws_allocator *allocator, const struct aws_mqtt5_packet_subscribe_view *subscribe_options); AWS_MQTT_API int aws_mqtt5_packet_subscribe_storage_init_from_external_storage( struct aws_mqtt5_packet_subscribe_storage *subscribe_storage, struct aws_allocator *allocator); AWS_MQTT_API void aws_mqtt5_packet_subscribe_storage_clean_up( struct aws_mqtt5_packet_subscribe_storage *subscribe_storage); /* Suback */ AWS_MQTT_API int aws_mqtt5_packet_suback_storage_init( struct aws_mqtt5_packet_suback_storage *suback_storage, struct aws_allocator *allocator, const struct aws_mqtt5_packet_suback_view *suback_view); AWS_MQTT_API int aws_mqtt5_packet_suback_storage_init_from_external_storage( struct aws_mqtt5_packet_suback_storage *suback_storage, struct aws_allocator *allocator); AWS_MQTT_API void aws_mqtt5_packet_suback_storage_clean_up(struct aws_mqtt5_packet_suback_storage *suback_storage); /* Unsubscribe */ AWS_MQTT_API int aws_mqtt5_packet_unsubscribe_storage_init( struct aws_mqtt5_packet_unsubscribe_storage *unsubscribe_storage, struct aws_allocator *allocator, const struct aws_mqtt5_packet_unsubscribe_view *unsubscribe_options); AWS_MQTT_API int aws_mqtt5_packet_unsubscribe_storage_init_from_external_storage( struct aws_mqtt5_packet_unsubscribe_storage *unsubscribe_storage, struct aws_allocator *allocator); AWS_MQTT_API void aws_mqtt5_packet_unsubscribe_storage_clean_up( struct aws_mqtt5_packet_unsubscribe_storage *unsubscribe_storage); /* Unsuback */ AWS_MQTT_API int aws_mqtt5_packet_unsuback_storage_init( struct aws_mqtt5_packet_unsuback_storage *unsuback_storage, struct aws_allocator *allocator, const struct aws_mqtt5_packet_unsuback_view *unsuback_view); AWS_MQTT_API int aws_mqtt5_packet_unsuback_storage_init_from_external_storage( struct aws_mqtt5_packet_unsuback_storage *unsuback_storage, struct aws_allocator *allocator); AWS_MQTT_API void aws_mqtt5_packet_unsuback_storage_clean_up( struct aws_mqtt5_packet_unsuback_storage *unsuback_storage); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_MQTT_MQTT5_PACKET_STORAGE_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/include/aws/mqtt/v5/mqtt5_types.h000066400000000000000000000365451456575232400264350ustar00rootroot00000000000000#ifndef AWS_MQTT_MQTT5_TYPES_H #define AWS_MQTT_MQTT5_TYPES_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include AWS_PUSH_SANE_WARNING_LEVEL /** * Some artificial (non-MQTT spec specified) limits that we place on input packets (publish, subscribe, unsubscibe) * which lets us safely do the various packet size calculations with a bare minimum of checked arithmetic. * * I don't see any conceivable use cases why you'd want more than this, but they are relaxable to some degree. * * TODO: Add some static assert calculations that verify that we can't possibly overflow against the maximum value * of a variable length integer for relevant packet size encodings that are absolute worst-case against these limits. */ #define AWS_MQTT5_CLIENT_MAXIMUM_USER_PROPERTIES 1024 #define AWS_MQTT5_CLIENT_MAXIMUM_SUBSCRIPTIONS_PER_SUBSCRIBE 1024 #define AWS_MQTT5_CLIENT_MAXIMUM_TOPIC_FILTERS_PER_UNSUBSCRIBE 1024 /** * Over-the-wire packet id as defined in the mqtt spec. Allocated at the point in time when the packet is * is next to go down the channel and about to be encoded into an io message buffer. * * https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901026 */ typedef uint16_t aws_mqtt5_packet_id_t; /** * MQTT Message delivery quality of service. * Enum values match mqtt spec encoding values. * * https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901234 */ enum aws_mqtt5_qos { /** https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901235 */ AWS_MQTT5_QOS_AT_MOST_ONCE = 0x0, /** https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901236 */ AWS_MQTT5_QOS_AT_LEAST_ONCE = 0x1, /** https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901237 */ AWS_MQTT5_QOS_EXACTLY_ONCE = 0x2, }; /** * Server return code for CONNECT attempts. * Enum values match mqtt spec encoding values. * * https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901079 */ enum aws_mqtt5_connect_reason_code { AWS_MQTT5_CRC_SUCCESS = 0, AWS_MQTT5_CRC_UNSPECIFIED_ERROR = 128, AWS_MQTT5_CRC_MALFORMED_PACKET = 129, AWS_MQTT5_CRC_PROTOCOL_ERROR = 130, AWS_MQTT5_CRC_IMPLEMENTATION_SPECIFIC_ERROR = 131, AWS_MQTT5_CRC_UNSUPPORTED_PROTOCOL_VERSION = 132, AWS_MQTT5_CRC_CLIENT_IDENTIFIER_NOT_VALID = 133, AWS_MQTT5_CRC_BAD_USERNAME_OR_PASSWORD = 134, AWS_MQTT5_CRC_NOT_AUTHORIZED = 135, AWS_MQTT5_CRC_SERVER_UNAVAILABLE = 136, AWS_MQTT5_CRC_SERVER_BUSY = 137, AWS_MQTT5_CRC_BANNED = 138, AWS_MQTT5_CRC_BAD_AUTHENTICATION_METHOD = 140, AWS_MQTT5_CRC_TOPIC_NAME_INVALID = 144, AWS_MQTT5_CRC_PACKET_TOO_LARGE = 149, AWS_MQTT5_CRC_QUOTA_EXCEEDED = 151, AWS_MQTT5_CRC_PAYLOAD_FORMAT_INVALID = 153, AWS_MQTT5_CRC_RETAIN_NOT_SUPPORTED = 154, AWS_MQTT5_CRC_QOS_NOT_SUPPORTED = 155, AWS_MQTT5_CRC_USE_ANOTHER_SERVER = 156, AWS_MQTT5_CRC_SERVER_MOVED = 157, AWS_MQTT5_CRC_CONNECTION_RATE_EXCEEDED = 159, }; /** * Reason code inside DISCONNECT packets. * Enum values match mqtt spec encoding values. * * https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901208 */ enum aws_mqtt5_disconnect_reason_code { AWS_MQTT5_DRC_NORMAL_DISCONNECTION = 0, AWS_MQTT5_DRC_DISCONNECT_WITH_WILL_MESSAGE = 4, AWS_MQTT5_DRC_UNSPECIFIED_ERROR = 128, AWS_MQTT5_DRC_MALFORMED_PACKET = 129, AWS_MQTT5_DRC_PROTOCOL_ERROR = 130, AWS_MQTT5_DRC_IMPLEMENTATION_SPECIFIC_ERROR = 131, AWS_MQTT5_DRC_NOT_AUTHORIZED = 135, AWS_MQTT5_DRC_SERVER_BUSY = 137, AWS_MQTT5_DRC_SERVER_SHUTTING_DOWN = 139, AWS_MQTT5_DRC_KEEP_ALIVE_TIMEOUT = 141, AWS_MQTT5_DRC_SESSION_TAKEN_OVER = 142, AWS_MQTT5_DRC_TOPIC_FILTER_INVALID = 143, AWS_MQTT5_DRC_TOPIC_NAME_INVALID = 144, AWS_MQTT5_DRC_RECEIVE_MAXIMUM_EXCEEDED = 147, AWS_MQTT5_DRC_TOPIC_ALIAS_INVALID = 148, AWS_MQTT5_DRC_PACKET_TOO_LARGE = 149, AWS_MQTT5_DRC_MESSAGE_RATE_TOO_HIGH = 150, AWS_MQTT5_DRC_QUOTA_EXCEEDED = 151, AWS_MQTT5_DRC_ADMINISTRATIVE_ACTION = 152, AWS_MQTT5_DRC_PAYLOAD_FORMAT_INVALID = 153, AWS_MQTT5_DRC_RETAIN_NOT_SUPPORTED = 154, AWS_MQTT5_DRC_QOS_NOT_SUPPORTED = 155, AWS_MQTT5_DRC_USE_ANOTHER_SERVER = 156, AWS_MQTT5_DRC_SERVER_MOVED = 157, AWS_MQTT5_DRC_SHARED_SUBSCRIPTIONS_NOT_SUPPORTED = 158, AWS_MQTT5_DRC_CONNECTION_RATE_EXCEEDED = 159, AWS_MQTT5_DRC_MAXIMUM_CONNECT_TIME = 160, AWS_MQTT5_DRC_SUBSCRIPTION_IDENTIFIERS_NOT_SUPPORTED = 161, AWS_MQTT5_DRC_WILDCARD_SUBSCRIPTIONS_NOT_SUPPORTED = 162, }; /** * Reason code inside PUBACK packets. * Enum values match mqtt spec encoding values. * * https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901124 */ enum aws_mqtt5_puback_reason_code { AWS_MQTT5_PARC_SUCCESS = 0, AWS_MQTT5_PARC_NO_MATCHING_SUBSCRIBERS = 16, AWS_MQTT5_PARC_UNSPECIFIED_ERROR = 128, AWS_MQTT5_PARC_IMPLEMENTATION_SPECIFIC_ERROR = 131, AWS_MQTT5_PARC_NOT_AUTHORIZED = 135, AWS_MQTT5_PARC_TOPIC_NAME_INVALID = 144, AWS_MQTT5_PARC_PACKET_IDENTIFIER_IN_USE = 145, AWS_MQTT5_PARC_QUOTA_EXCEEDED = 151, AWS_MQTT5_PARC_PAYLOAD_FORMAT_INVALID = 153, }; /** * Reason code inside SUBACK packet payloads. * Enum values match mqtt spec encoding values. * * https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901178 */ enum aws_mqtt5_suback_reason_code { AWS_MQTT5_SARC_GRANTED_QOS_0 = 0, AWS_MQTT5_SARC_GRANTED_QOS_1 = 1, AWS_MQTT5_SARC_GRANTED_QOS_2 = 2, AWS_MQTT5_SARC_UNSPECIFIED_ERROR = 128, AWS_MQTT5_SARC_IMPLEMENTATION_SPECIFIC_ERROR = 131, AWS_MQTT5_SARC_NOT_AUTHORIZED = 135, AWS_MQTT5_SARC_TOPIC_FILTER_INVALID = 143, AWS_MQTT5_SARC_PACKET_IDENTIFIER_IN_USE = 145, AWS_MQTT5_SARC_QUOTA_EXCEEDED = 151, AWS_MQTT5_SARC_SHARED_SUBSCRIPTIONS_NOT_SUPPORTED = 158, AWS_MQTT5_SARC_SUBSCRIPTION_IDENTIFIERS_NOT_SUPPORTED = 161, AWS_MQTT5_SARC_WILDCARD_SUBSCRIPTIONS_NOT_SUPPORTED = 162, }; /** * Reason code inside UNSUBACK packet payloads. * Enum values match mqtt spec encoding values. * * https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901194 */ enum aws_mqtt5_unsuback_reason_code { AWS_MQTT5_UARC_SUCCESS = 0, AWS_MQTT5_UARC_NO_SUBSCRIPTION_EXISTED = 17, AWS_MQTT5_UARC_UNSPECIFIED_ERROR = 128, AWS_MQTT5_UARC_IMPLEMENTATION_SPECIFIC_ERROR = 131, AWS_MQTT5_UARC_NOT_AUTHORIZED = 135, AWS_MQTT5_UARC_TOPIC_FILTER_INVALID = 143, AWS_MQTT5_UARC_PACKET_IDENTIFIER_IN_USE = 145, }; /** * Type of mqtt packet. * Enum values match mqtt spec encoding values. * * https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901022 */ enum aws_mqtt5_packet_type { /* internal indicator that the associated packet is null */ AWS_MQTT5_PT_NONE = -1, AWS_MQTT5_PT_RESERVED = 0, AWS_MQTT5_PT_CONNECT = 1, AWS_MQTT5_PT_CONNACK = 2, AWS_MQTT5_PT_PUBLISH = 3, AWS_MQTT5_PT_PUBACK = 4, AWS_MQTT5_PT_PUBREC = 5, AWS_MQTT5_PT_PUBREL = 6, AWS_MQTT5_PT_PUBCOMP = 7, AWS_MQTT5_PT_SUBSCRIBE = 8, AWS_MQTT5_PT_SUBACK = 9, AWS_MQTT5_PT_UNSUBSCRIBE = 10, AWS_MQTT5_PT_UNSUBACK = 11, AWS_MQTT5_PT_PINGREQ = 12, AWS_MQTT5_PT_PINGRESP = 13, AWS_MQTT5_PT_DISCONNECT = 14, AWS_MQTT5_PT_AUTH = 15, }; /** * Non-persistent representation of an mqtt5 user property. */ struct aws_mqtt5_user_property { struct aws_byte_cursor name; struct aws_byte_cursor value; }; /** * Optional property describing a message's payload format. * Enum values match mqtt spec encoding values. * * https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901063 */ enum aws_mqtt5_payload_format_indicator { AWS_MQTT5_PFI_BYTES = 0, AWS_MQTT5_PFI_UTF8 = 1, }; /** * Configures how retained messages should be handled when subscribing with a topic filter that matches topics with * associated retained messages. * * https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901169 */ enum aws_mqtt5_retain_handling_type { /** * Server should send all retained messages on topics that match the subscription's filter. */ AWS_MQTT5_RHT_SEND_ON_SUBSCRIBE = 0x00, /** * Server should send all retained messages on topics that match the subscription's filter, where this is the * first (relative to connection) subscription filter that matches the topic with a retained message. */ AWS_MQTT5_RHT_SEND_ON_SUBSCRIBE_IF_NEW = 0x01, /** * Subscribe must not trigger any retained message publishes from the server. */ AWS_MQTT5_RHT_DONT_SEND = 0x02, }; /** * Configures a single subscription within a Subscribe operation */ struct aws_mqtt5_subscription_view { /** * Topic filter to subscribe to */ struct aws_byte_cursor topic_filter; /** * Maximum QOS that the subscriber will accept messages for. Negotiated QoS may be different. */ enum aws_mqtt5_qos qos; /** * Should the server not send publishes to a client when that client was the one who sent the publish? */ bool no_local; /** * Should messages sent due to this subscription keep the retain flag preserved on the message? */ bool retain_as_published; /** * Should retained messages on matching topics be sent in reaction to this subscription? */ enum aws_mqtt5_retain_handling_type retain_handling_type; }; /** * Read-only snapshot of a DISCONNECT packet * * https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901205 */ struct aws_mqtt5_packet_disconnect_view { enum aws_mqtt5_disconnect_reason_code reason_code; const uint32_t *session_expiry_interval_seconds; const struct aws_byte_cursor *reason_string; size_t user_property_count; const struct aws_mqtt5_user_property *user_properties; const struct aws_byte_cursor *server_reference; }; /** * Read-only snapshot of a SUBSCRIBE packet * * https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901161 */ struct aws_mqtt5_packet_subscribe_view { aws_mqtt5_packet_id_t packet_id; size_t subscription_count; const struct aws_mqtt5_subscription_view *subscriptions; const uint32_t *subscription_identifier; size_t user_property_count; const struct aws_mqtt5_user_property *user_properties; }; /** * Read-only snapshot of an UNSUBSCRIBE packet * * https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901179 */ struct aws_mqtt5_packet_unsubscribe_view { aws_mqtt5_packet_id_t packet_id; size_t topic_filter_count; const struct aws_byte_cursor *topic_filters; size_t user_property_count; const struct aws_mqtt5_user_property *user_properties; }; /** * Read-only snapshot of a PUBLISH packet. Used both in configuration of a publish operation and callback * data in message receipt. * * https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901100 */ struct aws_mqtt5_packet_publish_view { struct aws_byte_cursor payload; /* packet_id is only set for QoS 1 and QoS 2 */ aws_mqtt5_packet_id_t packet_id; enum aws_mqtt5_qos qos; /* * Used to set the duplicate flag on QoS 1+ re-delivery attempts. * Set to false on all first attempts or QoS 0. Set to true on any re-delivery. */ bool duplicate; bool retain; struct aws_byte_cursor topic; const enum aws_mqtt5_payload_format_indicator *payload_format; const uint32_t *message_expiry_interval_seconds; const uint16_t *topic_alias; const struct aws_byte_cursor *response_topic; const struct aws_byte_cursor *correlation_data; /* These are ignored when building publish operations */ size_t subscription_identifier_count; const uint32_t *subscription_identifiers; const struct aws_byte_cursor *content_type; size_t user_property_count; const struct aws_mqtt5_user_property *user_properties; }; /** * Read-only snapshot of a CONNECT packet * * https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901033 */ struct aws_mqtt5_packet_connect_view { uint16_t keep_alive_interval_seconds; struct aws_byte_cursor client_id; const struct aws_byte_cursor *username; const struct aws_byte_cursor *password; bool clean_start; const uint32_t *session_expiry_interval_seconds; const uint8_t *request_response_information; const uint8_t *request_problem_information; const uint16_t *receive_maximum; const uint16_t *topic_alias_maximum; const uint32_t *maximum_packet_size_bytes; const uint32_t *will_delay_interval_seconds; const struct aws_mqtt5_packet_publish_view *will; size_t user_property_count; const struct aws_mqtt5_user_property *user_properties; /* Do not bind these. We don't support AUTH packets yet. For decode/encade testing purposes only. */ const struct aws_byte_cursor *authentication_method; const struct aws_byte_cursor *authentication_data; }; /** * Read-only snapshot of a CONNACK packet. * * https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901074 */ struct aws_mqtt5_packet_connack_view { bool session_present; enum aws_mqtt5_connect_reason_code reason_code; const uint32_t *session_expiry_interval; const uint16_t *receive_maximum; const enum aws_mqtt5_qos *maximum_qos; const bool *retain_available; const uint32_t *maximum_packet_size; const struct aws_byte_cursor *assigned_client_identifier; const uint16_t *topic_alias_maximum; const struct aws_byte_cursor *reason_string; size_t user_property_count; const struct aws_mqtt5_user_property *user_properties; const bool *wildcard_subscriptions_available; const bool *subscription_identifiers_available; const bool *shared_subscriptions_available; const uint16_t *server_keep_alive; const struct aws_byte_cursor *response_information; const struct aws_byte_cursor *server_reference; const struct aws_byte_cursor *authentication_method; const struct aws_byte_cursor *authentication_data; }; /** * Read-only snapshot of a PUBACK packet * * https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901121 */ struct aws_mqtt5_packet_puback_view { aws_mqtt5_packet_id_t packet_id; enum aws_mqtt5_puback_reason_code reason_code; const struct aws_byte_cursor *reason_string; size_t user_property_count; const struct aws_mqtt5_user_property *user_properties; }; /** * Read-only snapshot of a SUBACK packet * * https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901171 */ struct aws_mqtt5_packet_suback_view { aws_mqtt5_packet_id_t packet_id; const struct aws_byte_cursor *reason_string; size_t user_property_count; const struct aws_mqtt5_user_property *user_properties; size_t reason_code_count; const enum aws_mqtt5_suback_reason_code *reason_codes; }; /** * Read-only snapshot of an UNSUBACK packet * * https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901187 */ struct aws_mqtt5_packet_unsuback_view { aws_mqtt5_packet_id_t packet_id; const struct aws_byte_cursor *reason_string; size_t user_property_count; const struct aws_mqtt5_user_property *user_properties; size_t reason_code_count; const enum aws_mqtt5_unsuback_reason_code *reason_codes; }; AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_MQTT_MQTT5_TYPES_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/source/000077500000000000000000000000001456575232400215155ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/source/client.c000066400000000000000000004046031456575232400231460ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef _MSC_VER # pragma warning(disable : 4204) # pragma warning(disable : 4996) /* allow strncpy() */ #endif /* 3 seconds */ static const uint64_t s_default_ping_timeout_ns = 3000000000; /* 20 minutes - This is the default (and max) for AWS IoT as of 2020.02.18 */ static const uint16_t s_default_keep_alive_sec = 1200; #define DEFAULT_MQTT311_OPERATION_TABLE_SIZE 100 static int s_mqtt_client_connect( struct aws_mqtt_client_connection_311_impl *connection, aws_mqtt_client_on_connection_complete_fn *on_connection_complete, void *userdata); /******************************************************************************* * Helper functions ******************************************************************************/ void mqtt_connection_lock_synced_data(struct aws_mqtt_client_connection_311_impl *connection) { int err = aws_mutex_lock(&connection->synced_data.lock); AWS_ASSERT(!err); (void)err; } void mqtt_connection_unlock_synced_data(struct aws_mqtt_client_connection_311_impl *connection) { ASSERT_SYNCED_DATA_LOCK_HELD(connection); int err = aws_mutex_unlock(&connection->synced_data.lock); AWS_ASSERT(!err); (void)err; } static void s_aws_mqtt_schedule_reconnect_task(struct aws_mqtt_client_connection_311_impl *connection) { uint64_t next_attempt_ns = 0; aws_high_res_clock_get_ticks(&next_attempt_ns); next_attempt_ns += aws_timestamp_convert( connection->reconnect_timeouts.current_sec, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL); aws_event_loop_schedule_task_future(connection->loop, &connection->reconnect_task->task, next_attempt_ns); AWS_LOGF_TRACE( AWS_LS_MQTT_CLIENT, "id=%p: Scheduling reconnect, for %" PRIu64 " on event-loop %p", (void *)connection, next_attempt_ns, (void *)connection->loop); } static void s_aws_mqtt_client_destroy(struct aws_mqtt_client *client) { AWS_LOGF_DEBUG(AWS_LS_MQTT_CLIENT, "client=%p: Cleaning up MQTT client", (void *)client); aws_client_bootstrap_release(client->bootstrap); aws_mem_release(client->allocator, client); } void mqtt_connection_set_state( struct aws_mqtt_client_connection_311_impl *connection, enum aws_mqtt_client_connection_state state) { ASSERT_SYNCED_DATA_LOCK_HELD(connection); if (connection->synced_data.state == state) { AWS_LOGF_DEBUG(AWS_LS_MQTT_CLIENT, "id=%p: MQTT connection already in state %d", (void *)connection, state); return; } connection->synced_data.state = state; } struct request_timeout_wrapper; /* used for timeout task */ struct request_timeout_task_arg { uint16_t packet_id; struct aws_mqtt_client_connection_311_impl *connection; struct request_timeout_wrapper *task_arg_wrapper; }; /* * We want the timeout task to be able to destroy the forward reference from the operation's task arg structure * to the timeout task. But the operation task arg structures don't have any data structure in common. So to allow * the timeout to refer back to a zero-able forward pointer, we wrap a pointer to the timeout task and embed it * in every operation's task arg that needs to create a timeout. */ struct request_timeout_wrapper { struct request_timeout_task_arg *timeout_task_arg; }; static void s_request_timeout(struct aws_channel_task *channel_task, void *arg, enum aws_task_status status) { (void)channel_task; struct request_timeout_task_arg *timeout_task_arg = arg; struct aws_mqtt_client_connection_311_impl *connection = timeout_task_arg->connection; if (status == AWS_TASK_STATUS_RUN_READY) { if (timeout_task_arg->task_arg_wrapper != NULL) { mqtt_request_complete(connection, AWS_ERROR_MQTT_TIMEOUT, timeout_task_arg->packet_id); } } /* * Whether cancelled or run, if we have a back pointer to the operation's task arg, we must zero it out * so that when it completes it does not try to cancel us, because we will already be freed. * * If we don't have a back pointer to the operation's task arg, that means it already ran and completed. */ if (timeout_task_arg->task_arg_wrapper != NULL) { timeout_task_arg->task_arg_wrapper->timeout_task_arg = NULL; timeout_task_arg->task_arg_wrapper = NULL; } aws_mem_release(connection->allocator, timeout_task_arg); } static struct request_timeout_task_arg *s_schedule_timeout_task( struct aws_mqtt_client_connection_311_impl *connection, uint16_t packet_id) { /* schedule a timeout task to run, in case server consider the publish is not received */ struct aws_channel_task *request_timeout_task = NULL; struct request_timeout_task_arg *timeout_task_arg = NULL; if (!aws_mem_acquire_many( connection->allocator, 2, &timeout_task_arg, sizeof(struct request_timeout_task_arg), &request_timeout_task, sizeof(struct aws_channel_task))) { return NULL; } aws_channel_task_init(request_timeout_task, s_request_timeout, timeout_task_arg, "mqtt_request_timeout"); AWS_ZERO_STRUCT(*timeout_task_arg); timeout_task_arg->connection = connection; timeout_task_arg->packet_id = packet_id; uint64_t timestamp = 0; if (aws_channel_current_clock_time(connection->slot->channel, ×tamp)) { aws_mem_release(connection->allocator, timeout_task_arg); return NULL; } timestamp = aws_add_u64_saturating(timestamp, connection->operation_timeout_ns); aws_channel_schedule_task_future(connection->slot->channel, request_timeout_task, timestamp); return timeout_task_arg; } static void s_init_statistics(struct aws_mqtt_connection_operation_statistics_impl *stats) { aws_atomic_store_int(&stats->incomplete_operation_count_atomic, 0); aws_atomic_store_int(&stats->incomplete_operation_size_atomic, 0); aws_atomic_store_int(&stats->unacked_operation_count_atomic, 0); aws_atomic_store_int(&stats->unacked_operation_size_atomic, 0); } static bool s_is_topic_shared_topic(struct aws_byte_cursor *input) { char *input_str = (char *)input->ptr; if (strncmp("$share/", input_str, strlen("$share/")) == 0) { return true; } return false; } static struct aws_string *s_get_normal_topic_from_shared_topic(struct aws_string *input) { const char *input_char_str = aws_string_c_str(input); size_t input_char_length = strlen(input_char_str); size_t split_position = 7; // Start at '$share/' since we know it has to exist while (split_position < input_char_length) { split_position += 1; if (input_char_str[split_position] == '/') { break; } } // If we got all the way to the end, OR there is not at least a single character // after the second /, then it's invalid input. if (split_position + 1 >= input_char_length) { AWS_LOGF_ERROR(AWS_LS_MQTT_CLIENT, "Cannot parse shared subscription topic: Topic is not formatted correctly"); return NULL; } const size_t split_delta = input_char_length - split_position; if (split_delta > 0) { // Annoyingly, we cannot just use 'char result_char[split_delta];' because // MSVC doesn't support it. char *result_char = aws_mem_calloc(input->allocator, split_delta, sizeof(char)); strncpy(result_char, input_char_str + split_position + 1, split_delta); struct aws_string *result_string = aws_string_new_from_c_str(input->allocator, (const char *)result_char); aws_mem_release(input->allocator, result_char); return result_string; } AWS_LOGF_ERROR(AWS_LS_MQTT_CLIENT, "Cannot parse shared subscription topic: Topic is not formatted correctly"); return NULL; } /******************************************************************************* * Client Init ******************************************************************************/ struct aws_mqtt_client *aws_mqtt_client_new(struct aws_allocator *allocator, struct aws_client_bootstrap *bootstrap) { aws_mqtt_fatal_assert_library_initialized(); struct aws_mqtt_client *client = aws_mem_calloc(allocator, 1, sizeof(struct aws_mqtt_client)); if (client == NULL) { return NULL; } AWS_LOGF_DEBUG(AWS_LS_MQTT_CLIENT, "client=%p: Initalizing MQTT client", (void *)client); client->allocator = allocator; client->bootstrap = aws_client_bootstrap_acquire(bootstrap); aws_ref_count_init(&client->ref_count, client, (aws_simple_completion_callback *)s_aws_mqtt_client_destroy); return client; } struct aws_mqtt_client *aws_mqtt_client_acquire(struct aws_mqtt_client *client) { if (client != NULL) { aws_ref_count_acquire(&client->ref_count); } return client; } void aws_mqtt_client_release(struct aws_mqtt_client *client) { if (client != NULL) { aws_ref_count_release(&client->ref_count); } } #define AWS_RESET_RECONNECT_BACKOFF_DELAY_SECONDS 10 /* At this point, the channel for the MQTT connection has completed its shutdown */ static void s_mqtt_client_shutdown( struct aws_client_bootstrap *bootstrap, int error_code, struct aws_channel *channel, void *user_data) { (void)bootstrap; (void)channel; struct aws_mqtt_client_connection_311_impl *connection = user_data; AWS_LOGF_TRACE( AWS_LS_MQTT_CLIENT, "id=%p: Channel has been shutdown with error code %d", (void *)connection, error_code); enum aws_mqtt_client_connection_state prev_state; struct aws_linked_list cancelling_requests; aws_linked_list_init(&cancelling_requests); bool disconnected_state = false; { /* BEGIN CRITICAL SECTION */ mqtt_connection_lock_synced_data(connection); /* * On a channel that represents a valid connection (successful connack received), * channel_successful_connack_timestamp_ns will be the time the connack was received. Otherwise it will be * zero. * * Use that fact to determine whether or not we should reset the current reconnect backoff delay. * * We reset the reconnect backoff if either of: * 1) the user called disconnect() * 2) a successful connection had lasted longer than our minimum reset time (10s at the moment) */ uint64_t now = 0; aws_high_res_clock_get_ticks(&now); uint64_t time_diff = now - connection->reconnect_timeouts.channel_successful_connack_timestamp_ns; bool was_user_disconnect = connection->synced_data.state == AWS_MQTT_CLIENT_STATE_DISCONNECTING; bool was_sufficiently_long_connection = (connection->reconnect_timeouts.channel_successful_connack_timestamp_ns != 0) && (time_diff >= aws_timestamp_convert( AWS_RESET_RECONNECT_BACKOFF_DELAY_SECONDS, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL)); if (was_user_disconnect || was_sufficiently_long_connection) { connection->reconnect_timeouts.current_sec = connection->reconnect_timeouts.min_sec; } connection->reconnect_timeouts.channel_successful_connack_timestamp_ns = 0; /* Move all the ongoing requests to the pending requests list, because the response they are waiting for will * never arrives. Sad. But, we will retry. */ if (connection->clean_session) { /* For a clean session, the Session lasts as long as the Network Connection. Thus, discard the previous * session */ AWS_LOGF_TRACE( AWS_LS_MQTT_CLIENT, "id=%p: Discard ongoing requests and pending requests when a clean session connection lost.", (void *)connection); aws_linked_list_move_all_back(&cancelling_requests, &connection->thread_data.ongoing_requests_list); aws_linked_list_move_all_back(&cancelling_requests, &connection->synced_data.pending_requests_list); } else { aws_linked_list_move_all_back( &connection->synced_data.pending_requests_list, &connection->thread_data.ongoing_requests_list); AWS_LOGF_TRACE( AWS_LS_MQTT_CLIENT, "id=%p: All subscribe/unsubscribe and publish QoS>0 have been move to pending list", (void *)connection); } prev_state = connection->synced_data.state; switch (connection->synced_data.state) { case AWS_MQTT_CLIENT_STATE_CONNECTED: /* unexpected hangup from broker, try to reconnect */ mqtt_connection_set_state(connection, AWS_MQTT_CLIENT_STATE_RECONNECTING); AWS_LOGF_DEBUG( AWS_LS_MQTT_CLIENT, "id=%p: connection was unexpected interrupted, switch state to RECONNECTING.", (void *)connection); break; case AWS_MQTT_CLIENT_STATE_DISCONNECTING: /* disconnect requested by user */ /* Successfully shutdown, if cleansession is set, ongoing and pending requests will be cleared */ disconnected_state = true; AWS_LOGF_DEBUG( AWS_LS_MQTT_CLIENT, "id=%p: disconnect finished, switch state to DISCONNECTED.", (void *)connection); break; case AWS_MQTT_CLIENT_STATE_CONNECTING: /* failed to connect */ disconnected_state = true; break; case AWS_MQTT_CLIENT_STATE_RECONNECTING: /* reconnect failed, schedule the next attempt later, no need to change the state. */ break; default: /* AWS_MQTT_CLIENT_STATE_DISCONNECTED */ break; } AWS_LOGF_TRACE( AWS_LS_MQTT_CLIENT, "id=%p: current state is %d", (void *)connection, (int)connection->synced_data.state); /* Always clear slot, as that's what's been shutdown */ if (connection->slot) { aws_channel_slot_remove(connection->slot); AWS_LOGF_TRACE(AWS_LS_MQTT_CLIENT, "id=%p: slot is removed successfully", (void *)connection); connection->slot = NULL; } mqtt_connection_unlock_synced_data(connection); } /* END CRITICAL SECTION */ if (!aws_linked_list_empty(&cancelling_requests)) { struct aws_linked_list_node *current = aws_linked_list_front(&cancelling_requests); const struct aws_linked_list_node *end = aws_linked_list_end(&cancelling_requests); while (current != end) { struct aws_mqtt_request *request = AWS_CONTAINER_OF(current, struct aws_mqtt_request, list_node); if (request->on_complete) { request->on_complete( &connection->base, request->packet_id, AWS_ERROR_MQTT_CANCELLED_FOR_CLEAN_SESSION, request->on_complete_ud); } current = current->next; } { /* BEGIN CRITICAL SECTION */ mqtt_connection_lock_synced_data(connection); while (!aws_linked_list_empty(&cancelling_requests)) { struct aws_linked_list_node *node = aws_linked_list_pop_front(&cancelling_requests); struct aws_mqtt_request *request = AWS_CONTAINER_OF(node, struct aws_mqtt_request, list_node); aws_hash_table_remove( &connection->synced_data.outstanding_requests_table, &request->packet_id, NULL, NULL); aws_memory_pool_release(&connection->synced_data.requests_pool, request); } mqtt_connection_unlock_synced_data(connection); } /* END CRITICAL SECTION */ } /* If there's no error code and this wasn't user-requested, set the error code to something useful */ if (error_code == AWS_ERROR_SUCCESS) { if (prev_state != AWS_MQTT_CLIENT_STATE_DISCONNECTING && prev_state != AWS_MQTT_CLIENT_STATE_DISCONNECTED) { error_code = AWS_ERROR_MQTT_UNEXPECTED_HANGUP; } } switch (prev_state) { case AWS_MQTT_CLIENT_STATE_RECONNECTING: { /* If reconnect attempt failed, schedule the next attempt */ AWS_LOGF_TRACE(AWS_LS_MQTT_CLIENT, "id=%p: Reconnect failed, retrying", (void *)connection); s_aws_mqtt_schedule_reconnect_task(connection); break; } case AWS_MQTT_CLIENT_STATE_CONNECTED: { AWS_LOGF_DEBUG( AWS_LS_MQTT_CLIENT, "id=%p: Connection interrupted, calling callback and attempting reconnect", (void *)connection); MQTT_CLIENT_CALL_CALLBACK_ARGS(connection, on_interrupted, error_code); /* In case user called disconnect from the on_interrupted callback */ bool stop_reconnect; { /* BEGIN CRITICAL SECTION */ mqtt_connection_lock_synced_data(connection); stop_reconnect = connection->synced_data.state == AWS_MQTT_CLIENT_STATE_DISCONNECTING; if (stop_reconnect) { disconnected_state = true; AWS_LOGF_DEBUG( AWS_LS_MQTT_CLIENT, "id=%p: disconnect finished, switch state to DISCONNECTED.", (void *)connection); } mqtt_connection_unlock_synced_data(connection); } /* END CRITICAL SECTION */ if (!stop_reconnect) { s_aws_mqtt_schedule_reconnect_task(connection); } break; } default: break; } if (disconnected_state) { { /* BEGIN CRITICAL SECTION */ mqtt_connection_lock_synced_data(connection); mqtt_connection_set_state(connection, AWS_MQTT_CLIENT_STATE_DISCONNECTED); mqtt_connection_unlock_synced_data(connection); } /* END CRITICAL SECTION */ switch (prev_state) { case AWS_MQTT_CLIENT_STATE_CONNECTED: AWS_LOGF_TRACE( AWS_LS_MQTT_CLIENT, "id=%p: Caller requested disconnect from on_interrupted callback, aborting reconnect", (void *)connection); MQTT_CLIENT_CALL_CALLBACK(connection, on_disconnect); MQTT_CLIENT_CALL_CALLBACK_ARGS(connection, on_closed, NULL); break; case AWS_MQTT_CLIENT_STATE_DISCONNECTING: AWS_LOGF_DEBUG( AWS_LS_MQTT_CLIENT, "id=%p: Disconnect completed, clearing request queue and calling callback", (void *)connection); MQTT_CLIENT_CALL_CALLBACK(connection, on_disconnect); MQTT_CLIENT_CALL_CALLBACK_ARGS(connection, on_closed, NULL); break; case AWS_MQTT_CLIENT_STATE_CONNECTING: AWS_LOGF_TRACE( AWS_LS_MQTT_CLIENT, "id=%p: Initial connection attempt failed, calling callback", (void *)connection); MQTT_CLIENT_CALL_CALLBACK_ARGS(connection, on_connection_complete, error_code, 0, false); MQTT_CLIENT_CALL_CALLBACK_ARGS(connection, on_connection_failure, error_code); break; default: break; } /* The connection can die now. Release the refcount */ aws_mqtt_client_connection_release(&connection->base); } } /******************************************************************************* * Connection New ******************************************************************************/ /* The assumption here is that a connection always outlives its channels, and the channel this task was scheduled on * always outlives this task, so all we need to do is check the connection state. If we are in a state that waits * for a CONNACK, kill it off. In the case that the connection died between scheduling this task and it being executed * the status will always be CANCELED because this task will be canceled when the owning channel goes away. */ static void s_connack_received_timeout(struct aws_channel_task *channel_task, void *arg, enum aws_task_status status) { struct aws_mqtt_client_connection_311_impl *connection = arg; if (status == AWS_TASK_STATUS_RUN_READY) { bool time_out = false; { /* BEGIN CRITICAL SECTION */ mqtt_connection_lock_synced_data(connection); time_out = (connection->synced_data.state == AWS_MQTT_CLIENT_STATE_CONNECTING || connection->synced_data.state == AWS_MQTT_CLIENT_STATE_RECONNECTING); mqtt_connection_unlock_synced_data(connection); } /* END CRITICAL SECTION */ if (time_out) { AWS_LOGF_ERROR(AWS_LS_MQTT_CLIENT, "id=%p: mqtt CONNACK response timeout detected", (void *)connection); aws_channel_shutdown(connection->slot->channel, AWS_ERROR_MQTT_TIMEOUT); } } aws_mem_release(connection->allocator, channel_task); } /** * Channel has been initialized callback. Sets up channel handler and sends out CONNECT packet. * The on_connack callback is called with the CONNACK packet is received from the server. */ static void s_mqtt_client_init( struct aws_client_bootstrap *bootstrap, int error_code, struct aws_channel *channel, void *user_data) { (void)bootstrap; struct aws_io_message *message = NULL; /* Setup callback contract is: if error_code is non-zero then channel is NULL. */ AWS_FATAL_ASSERT((error_code != 0) == (channel == NULL)); struct aws_mqtt_client_connection_311_impl *connection = user_data; if (error_code != AWS_OP_SUCCESS) { /* client shutdown already handles this case, so just call that. */ s_mqtt_client_shutdown(bootstrap, error_code, channel, user_data); return; } AWS_FATAL_ASSERT(aws_channel_get_event_loop(channel) == connection->loop); /* user requested disconnect before the channel has been set up. Stop installing the slot and sending CONNECT. */ bool failed_create_slot = false; { /* BEGIN CRITICAL SECTION */ mqtt_connection_lock_synced_data(connection); if (connection->synced_data.state == AWS_MQTT_CLIENT_STATE_DISCONNECTING) { /* It only happens when the user request disconnect during reconnecting, we don't need to fire any callback. * The on_disconnect will be invoked as channel finish shutting down. */ mqtt_connection_unlock_synced_data(connection); aws_channel_shutdown(channel, AWS_ERROR_SUCCESS); return; } /* Create the slot */ connection->slot = aws_channel_slot_new(channel); if (!connection->slot) { failed_create_slot = true; } mqtt_connection_unlock_synced_data(connection); } /* END CRITICAL SECTION */ /* install the slot and handler */ if (failed_create_slot) { AWS_LOGF_ERROR( AWS_LS_MQTT_CLIENT, "id=%p: Failed to create new slot, something has gone horribly wrong, error %d (%s).", (void *)connection, aws_last_error(), aws_error_name(aws_last_error())); goto handle_error; } if (aws_channel_slot_insert_end(channel, connection->slot)) { AWS_LOGF_ERROR( AWS_LS_MQTT_CLIENT, "id=%p: Failed to insert slot into channel %p, error %d (%s).", (void *)connection, (void *)channel, aws_last_error(), aws_error_name(aws_last_error())); goto handle_error; } if (aws_channel_slot_set_handler(connection->slot, &connection->handler)) { AWS_LOGF_ERROR( AWS_LS_MQTT_CLIENT, "id=%p: Failed to set MQTT handler into slot on channel %p, error %d (%s).", (void *)connection, (void *)channel, aws_last_error(), aws_error_name(aws_last_error())); goto handle_error; } aws_mqtt311_decoder_reset_for_new_connection(&connection->thread_data.decoder); AWS_LOGF_DEBUG( AWS_LS_MQTT_CLIENT, "id=%p: Connection successfully opened, sending CONNECT packet", (void *)connection); struct aws_channel_task *connack_task = aws_mem_calloc(connection->allocator, 1, sizeof(struct aws_channel_task)); if (!connack_task) { AWS_LOGF_ERROR(AWS_LS_MQTT_CLIENT, "id=%p: Failed to allocate timeout task.", (void *)connection); goto handle_error; } aws_channel_task_init(connack_task, s_connack_received_timeout, connection, "mqtt_connack_timeout"); uint64_t now = 0; if (aws_channel_current_clock_time(channel, &now)) { AWS_LOGF_ERROR( AWS_LS_MQTT_CLIENT, "static: Failed to setting MQTT handler into slot on channel %p, error %d (%s).", (void *)channel, aws_last_error(), aws_error_name(aws_last_error())); goto handle_error; } now += connection->ping_timeout_ns; aws_channel_schedule_task_future(channel, connack_task, now); struct aws_byte_cursor client_id_cursor = aws_byte_cursor_from_buf(&connection->client_id); AWS_LOGF_DEBUG( AWS_LS_MQTT_CLIENT, "id=%p: MQTT Connection initializing CONNECT packet for client-id '" PRInSTR "'", (void *)connection, AWS_BYTE_CURSOR_PRI(client_id_cursor)); /* Send the connect packet */ struct aws_mqtt_packet_connect connect; aws_mqtt_packet_connect_init( &connect, client_id_cursor, connection->clean_session, connection->keep_alive_time_secs); if (connection->will.topic.buffer) { /* Add will if present */ struct aws_byte_cursor topic_cur = aws_byte_cursor_from_buf(&connection->will.topic); struct aws_byte_cursor payload_cur = aws_byte_cursor_from_buf(&connection->will.payload); AWS_LOGF_DEBUG( AWS_LS_MQTT_CLIENT, "id=%p: Adding will to connection on " PRInSTR " with payload " PRInSTR, (void *)connection, AWS_BYTE_CURSOR_PRI(topic_cur), AWS_BYTE_CURSOR_PRI(payload_cur)); aws_mqtt_packet_connect_add_will( &connect, topic_cur, connection->will.qos, connection->will.retain, payload_cur); } if (connection->username) { struct aws_byte_cursor username_cur = aws_byte_cursor_from_string(connection->username); AWS_LOGF_DEBUG( AWS_LS_MQTT_CLIENT, "id=%p: Adding username " PRInSTR " to connection", (void *)connection, AWS_BYTE_CURSOR_PRI(username_cur)); struct aws_byte_cursor password_cur = { .ptr = NULL, .len = 0, }; if (connection->password) { password_cur = aws_byte_cursor_from_string(connection->password); } aws_mqtt_packet_connect_add_credentials(&connect, username_cur, password_cur); } message = mqtt_get_message_for_packet(connection, &connect.fixed_header); if (!message) { AWS_LOGF_ERROR(AWS_LS_MQTT_CLIENT, "id=%p: Failed to get message from pool", (void *)connection); goto handle_error; } if (aws_mqtt_packet_connect_encode(&message->message_data, &connect)) { AWS_LOGF_ERROR(AWS_LS_MQTT_CLIENT, "id=%p: Failed to encode CONNECT packet", (void *)connection); goto handle_error; } if (aws_channel_slot_send_message(connection->slot, message, AWS_CHANNEL_DIR_WRITE)) { AWS_LOGF_ERROR(AWS_LS_MQTT_CLIENT, "id=%p: Failed to send encoded CONNECT packet upstream", (void *)connection); goto handle_error; } return; handle_error: MQTT_CLIENT_CALL_CALLBACK_ARGS(connection, on_connection_complete, aws_last_error(), 0, false); MQTT_CLIENT_CALL_CALLBACK_ARGS(connection, on_connection_failure, aws_last_error()); aws_channel_shutdown(channel, aws_last_error()); if (message) { aws_mem_release(message->allocator, message); } } static void s_attempt_reconnect(struct aws_task *task, void *userdata, enum aws_task_status status) { (void)task; struct aws_mqtt_reconnect_task *reconnect = userdata; struct aws_mqtt_client_connection_311_impl *connection = aws_atomic_load_ptr(&reconnect->connection_ptr); /* If the task is not cancelled and a connection has not succeeded, attempt reconnect */ if (status == AWS_TASK_STATUS_RUN_READY && connection) { mqtt_connection_lock_synced_data(connection); /** * Check the state and if we are disconnecting (AWS_MQTT_CLIENT_STATE_DISCONNECTING) then we want to skip it * and abort the reconnect task (or rather, just do not try to reconnect) */ if (connection->synced_data.state == AWS_MQTT_CLIENT_STATE_DISCONNECTING) { AWS_LOGF_TRACE( AWS_LS_MQTT_CLIENT, "id=%p: Skipping reconnect: Client is trying to disconnect", (void *)connection); /** * There is the nasty world where the disconnect task/function is called right when we are "reconnecting" as * our state but we have not reconnected. When this happens, the disconnect function doesn't do anything * beyond setting the state to AWS_MQTT_CLIENT_STATE_DISCONNECTING (aws_mqtt_client_connection_disconnect), * meaning the disconnect callback will NOT be called nor will we release memory. * For this reason, we have to do the callback and release of the connection here otherwise the code * will DEADLOCK forever and that is bad. */ bool perform_full_destroy = false; if (!connection->slot) { AWS_LOGF_TRACE( AWS_LS_MQTT_CLIENT, "id=%p: Reconnect task called but client is disconnecting and has no slot. Finishing disconnect", (void *)connection); mqtt_connection_set_state(connection, AWS_MQTT_CLIENT_STATE_DISCONNECTED); perform_full_destroy = true; } aws_mem_release(reconnect->allocator, reconnect); connection->reconnect_task = NULL; /* Unlock the synced data, then potentially call the disconnect callback and release the connection */ mqtt_connection_unlock_synced_data(connection); if (perform_full_destroy) { MQTT_CLIENT_CALL_CALLBACK(connection, on_disconnect); MQTT_CLIENT_CALL_CALLBACK_ARGS(connection, on_closed, NULL); aws_mqtt_client_connection_release(&connection->base); } return; } AWS_LOGF_TRACE( AWS_LS_MQTT_CLIENT, "id=%p: Attempting reconnect, if it fails next attempt will be in %" PRIu64 " seconds", (void *)connection, connection->reconnect_timeouts.current_sec); /* Check before multiplying to avoid potential overflow */ if (connection->reconnect_timeouts.current_sec > connection->reconnect_timeouts.max_sec / 2) { connection->reconnect_timeouts.current_sec = connection->reconnect_timeouts.max_sec; } else { connection->reconnect_timeouts.current_sec *= 2; } AWS_LOGF_TRACE( AWS_LS_MQTT_CLIENT, "id=%p: Attempting reconnect, if it fails next attempt will be in %" PRIu64 " seconds", (void *)connection, connection->reconnect_timeouts.current_sec); mqtt_connection_unlock_synced_data(connection); if (s_mqtt_client_connect( connection, connection->on_connection_complete, connection->on_connection_complete_ud)) { /* If reconnect attempt failed, schedule the next attempt */ s_aws_mqtt_schedule_reconnect_task(connection); } else { /* Ideally, it would be nice to move this inside the lock, but I'm unsure of the correctness */ connection->reconnect_task->task.timestamp = 0; } } else { aws_mem_release(reconnect->allocator, reconnect); } } void aws_create_reconnect_task(struct aws_mqtt_client_connection_311_impl *connection) { if (connection->reconnect_task == NULL) { connection->reconnect_task = aws_mem_calloc(connection->allocator, 1, sizeof(struct aws_mqtt_reconnect_task)); AWS_FATAL_ASSERT(connection->reconnect_task != NULL); aws_atomic_init_ptr(&connection->reconnect_task->connection_ptr, connection); connection->reconnect_task->allocator = connection->allocator; aws_task_init( &connection->reconnect_task->task, s_attempt_reconnect, connection->reconnect_task, "mqtt_reconnect"); } } static void s_mqtt_client_connection_destroy_final(struct aws_mqtt_client_connection *base_connection) { struct aws_mqtt_client_connection_311_impl *connection = base_connection->impl; AWS_PRECONDITION(!connection || connection->allocator); if (!connection) { return; } /* If the slot is not NULL, the connection is still connected, which should be prevented from calling this function */ AWS_ASSERT(!connection->slot); AWS_LOGF_DEBUG(AWS_LS_MQTT_CLIENT, "id=%p: Destroying connection", (void *)connection); aws_mqtt_client_on_connection_termination_fn *termination_handler = NULL; void *termination_handler_user_data = NULL; if (connection->on_termination != NULL) { termination_handler = connection->on_termination; termination_handler_user_data = connection->on_termination_ud; } /* If the reconnect_task isn't freed, free it */ if (connection->reconnect_task) { aws_mem_release(connection->reconnect_task->allocator, connection->reconnect_task); } aws_string_destroy(connection->host_name); /* Clear the credentials */ if (connection->username) { aws_string_destroy_secure(connection->username); } if (connection->password) { aws_string_destroy_secure(connection->password); } /* Clean up the will */ aws_byte_buf_clean_up(&connection->will.topic); aws_byte_buf_clean_up(&connection->will.payload); /* Clear the client_id */ aws_byte_buf_clean_up(&connection->client_id); /* Free all of the active subscriptions */ aws_mqtt_topic_tree_clean_up(&connection->thread_data.subscriptions); aws_mqtt311_decoder_clean_up(&connection->thread_data.decoder); aws_hash_table_clean_up(&connection->synced_data.outstanding_requests_table); /* clean up the pending_requests if it's not empty */ while (!aws_linked_list_empty(&connection->synced_data.pending_requests_list)) { struct aws_linked_list_node *node = aws_linked_list_pop_front(&connection->synced_data.pending_requests_list); struct aws_mqtt_request *request = AWS_CONTAINER_OF(node, struct aws_mqtt_request, list_node); /* Fire the callback and clean up the memory, as the connection get destroyed. */ if (request->on_complete) { request->on_complete( &connection->base, request->packet_id, AWS_ERROR_MQTT_CONNECTION_DESTROYED, request->on_complete_ud); } aws_memory_pool_release(&connection->synced_data.requests_pool, request); } aws_memory_pool_clean_up(&connection->synced_data.requests_pool); aws_mutex_clean_up(&connection->synced_data.lock); aws_tls_connection_options_clean_up(&connection->tls_options); /* Clean up the websocket proxy options */ if (connection->http_proxy_config) { aws_http_proxy_config_destroy(connection->http_proxy_config); connection->http_proxy_config = NULL; } aws_mqtt_client_release(connection->client); /* Frees all allocated memory */ aws_mem_release(connection->allocator, connection); if (termination_handler != NULL) { (*termination_handler)(termination_handler_user_data); } } static void s_on_final_disconnect(struct aws_mqtt_client_connection *connection, void *userdata) { (void)userdata; s_mqtt_client_connection_destroy_final(connection); } static void s_mqtt_client_connection_start_destroy(struct aws_mqtt_client_connection_311_impl *connection) { bool call_destroy_final = false; AWS_LOGF_DEBUG( AWS_LS_MQTT_CLIENT, "id=%p: Last refcount on connection has been released, start destroying the connection.", (void *)connection); { /* BEGIN CRITICAL SECTION */ mqtt_connection_lock_synced_data(connection); if (connection->synced_data.state != AWS_MQTT_CLIENT_STATE_DISCONNECTED) { /* * We don't call the on_disconnect callback until we've transitioned to the DISCONNECTED state. So it's * safe to change it now while we hold the lock since we know we're not DISCONNECTED yet. */ connection->on_disconnect = s_on_final_disconnect; if (connection->synced_data.state != AWS_MQTT_CLIENT_STATE_DISCONNECTING) { mqtt_disconnect_impl(connection, AWS_ERROR_SUCCESS); AWS_LOGF_DEBUG( AWS_LS_MQTT_CLIENT, "id=%p: final refcount has been released, switch state to DISCONNECTING.", (void *)connection); mqtt_connection_set_state(connection, AWS_MQTT_CLIENT_STATE_DISCONNECTING); } } else { call_destroy_final = true; } mqtt_connection_unlock_synced_data(connection); } /* END CRITICAL SECTION */ if (call_destroy_final) { s_mqtt_client_connection_destroy_final(&connection->base); } } /******************************************************************************* * Connection Configuration ******************************************************************************/ /* To configure the connection, ensure the state is DISCONNECTED or CONNECTED */ static int s_check_connection_state_for_configuration(struct aws_mqtt_client_connection_311_impl *connection) { int result = AWS_OP_SUCCESS; { /* BEGIN CRITICAL SECTION */ mqtt_connection_lock_synced_data(connection); if (connection->synced_data.state != AWS_MQTT_CLIENT_STATE_DISCONNECTED && connection->synced_data.state != AWS_MQTT_CLIENT_STATE_CONNECTED) { AWS_LOGF_ERROR( AWS_LS_MQTT_CLIENT, "id=%p: Connection is currently pending connect/disconnect. Unable to make configuration changes until " "pending operation completes.", (void *)connection); result = AWS_OP_ERR; } mqtt_connection_unlock_synced_data(connection); } /* END CRITICAL SECTION */ return result; } static int s_aws_mqtt_client_connection_311_set_will( void *impl, const struct aws_byte_cursor *topic, enum aws_mqtt_qos qos, bool retain, const struct aws_byte_cursor *payload) { struct aws_mqtt_client_connection_311_impl *connection = impl; AWS_PRECONDITION(connection); AWS_PRECONDITION(topic); if (s_check_connection_state_for_configuration(connection)) { return aws_raise_error(AWS_ERROR_INVALID_STATE); } if (!aws_mqtt_is_valid_topic(topic)) { AWS_LOGF_ERROR(AWS_LS_MQTT_CLIENT, "id=%p: Will topic is invalid", (void *)connection); return aws_raise_error(AWS_ERROR_MQTT_INVALID_TOPIC); } if (qos > AWS_MQTT_QOS_EXACTLY_ONCE) { AWS_LOGF_ERROR(AWS_LS_MQTT_CLIENT, "id=%p: Will qos is invalid", (void *)connection); return aws_raise_error(AWS_ERROR_MQTT_INVALID_QOS); } int result = AWS_OP_ERR; AWS_LOGF_TRACE( AWS_LS_MQTT_CLIENT, "id=%p: Setting last will with topic \"" PRInSTR "\"", (void *)connection, AWS_BYTE_CURSOR_PRI(*topic)); struct aws_byte_buf local_topic_buf; struct aws_byte_buf local_payload_buf; AWS_ZERO_STRUCT(local_topic_buf); AWS_ZERO_STRUCT(local_payload_buf); struct aws_byte_buf topic_buf = aws_byte_buf_from_array(topic->ptr, topic->len); if (aws_byte_buf_init_copy(&local_topic_buf, connection->allocator, &topic_buf)) { AWS_LOGF_ERROR(AWS_LS_MQTT_CLIENT, "id=%p: Failed to copy will topic", (void *)connection); goto cleanup; } connection->will.qos = qos; connection->will.retain = retain; struct aws_byte_buf payload_buf = aws_byte_buf_from_array(payload->ptr, payload->len); if (aws_byte_buf_init_copy(&local_payload_buf, connection->allocator, &payload_buf)) { AWS_LOGF_ERROR(AWS_LS_MQTT_CLIENT, "id=%p: Failed to copy will body", (void *)connection); goto cleanup; } if (connection->will.topic.len) { AWS_LOGF_TRACE(AWS_LS_MQTT_CLIENT, "id=%p: Will has been set before, resetting it.", (void *)connection); } /* Succeed. */ result = AWS_OP_SUCCESS; /* swap the local buffer with connection */ struct aws_byte_buf temp = local_topic_buf; local_topic_buf = connection->will.topic; connection->will.topic = temp; temp = local_payload_buf; local_payload_buf = connection->will.payload; connection->will.payload = temp; cleanup: aws_byte_buf_clean_up(&local_topic_buf); aws_byte_buf_clean_up(&local_payload_buf); return result; } static int s_aws_mqtt_client_connection_311_set_login( void *impl, const struct aws_byte_cursor *username, const struct aws_byte_cursor *password) { struct aws_mqtt_client_connection_311_impl *connection = impl; AWS_PRECONDITION(connection); AWS_PRECONDITION(username); if (s_check_connection_state_for_configuration(connection)) { return aws_raise_error(AWS_ERROR_INVALID_STATE); } if (username != NULL && aws_mqtt_validate_utf8_text(*username) == AWS_OP_ERR) { AWS_LOGF_DEBUG( AWS_LS_MQTT_CLIENT, "id=%p: Invalid utf8 or forbidden codepoints in username", (void *)connection); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } int result = AWS_OP_ERR; AWS_LOGF_TRACE(AWS_LS_MQTT_CLIENT, "id=%p: Setting username and password", (void *)connection); struct aws_string *username_string = NULL; struct aws_string *password_string = NULL; username_string = aws_string_new_from_array(connection->allocator, username->ptr, username->len); if (!username_string) { AWS_LOGF_ERROR(AWS_LS_MQTT_CLIENT, "id=%p: Failed to copy username", (void *)connection); goto cleanup; } if (password) { password_string = aws_string_new_from_array(connection->allocator, password->ptr, password->len); if (!password_string) { AWS_LOGF_ERROR(AWS_LS_MQTT_CLIENT, "id=%p: Failed to copy password", (void *)connection); goto cleanup; } } if (connection->username) { AWS_LOGF_TRACE( AWS_LS_MQTT_CLIENT, "id=%p: Login information has been set before, resetting it.", (void *)connection); } /* Succeed. */ result = AWS_OP_SUCCESS; /* swap the local string with connection */ struct aws_string *temp = username_string; username_string = connection->username; connection->username = temp; temp = password_string; password_string = connection->password; connection->password = temp; cleanup: aws_string_destroy_secure(username_string); aws_string_destroy_secure(password_string); return result; } static int s_aws_mqtt_client_connection_311_set_reconnect_timeout( void *impl, uint64_t min_timeout, uint64_t max_timeout) { struct aws_mqtt_client_connection_311_impl *connection = impl; AWS_PRECONDITION(connection); if (s_check_connection_state_for_configuration(connection)) { return aws_raise_error(AWS_ERROR_INVALID_STATE); } AWS_LOGF_TRACE( AWS_LS_MQTT_CLIENT, "id=%p: Setting reconnect timeouts min: %" PRIu64 " max: %" PRIu64, (void *)connection, min_timeout, max_timeout); connection->reconnect_timeouts.min_sec = min_timeout; connection->reconnect_timeouts.max_sec = max_timeout; connection->reconnect_timeouts.current_sec = min_timeout; return AWS_OP_SUCCESS; } static int s_aws_mqtt_client_connection_311_set_connection_result_handlers( void *impl, aws_mqtt_client_on_connection_success_fn *on_connection_success, void *on_connection_success_ud, aws_mqtt_client_on_connection_failure_fn *on_connection_failure, void *on_connection_failure_ud) { struct aws_mqtt_client_connection_311_impl *connection = impl; AWS_PRECONDITION(connection); if (s_check_connection_state_for_configuration(connection)) { return aws_raise_error(AWS_ERROR_INVALID_STATE); } AWS_LOGF_TRACE(AWS_LS_MQTT_CLIENT, "id=%p: Setting connection success and failure handlers", (void *)connection); connection->on_connection_success = on_connection_success; connection->on_connection_success_ud = on_connection_success_ud; connection->on_connection_failure = on_connection_failure; connection->on_connection_failure_ud = on_connection_failure_ud; return AWS_OP_SUCCESS; } static int s_aws_mqtt_client_connection_311_set_connection_interruption_handlers( void *impl, aws_mqtt_client_on_connection_interrupted_fn *on_interrupted, void *on_interrupted_ud, aws_mqtt_client_on_connection_resumed_fn *on_resumed, void *on_resumed_ud) { struct aws_mqtt_client_connection_311_impl *connection = impl; AWS_PRECONDITION(connection); if (s_check_connection_state_for_configuration(connection)) { return aws_raise_error(AWS_ERROR_INVALID_STATE); } AWS_LOGF_TRACE( AWS_LS_MQTT_CLIENT, "id=%p: Setting connection interrupted and resumed handlers", (void *)connection); connection->on_interrupted = on_interrupted; connection->on_interrupted_ud = on_interrupted_ud; connection->on_resumed = on_resumed; connection->on_resumed_ud = on_resumed_ud; return AWS_OP_SUCCESS; } static int s_aws_mqtt_client_connection_311_set_connection_closed_handler( void *impl, aws_mqtt_client_on_connection_closed_fn *on_closed, void *on_closed_ud) { struct aws_mqtt_client_connection_311_impl *connection = impl; AWS_PRECONDITION(connection); if (s_check_connection_state_for_configuration(connection)) { return aws_raise_error(AWS_ERROR_INVALID_STATE); } AWS_LOGF_TRACE(AWS_LS_MQTT_CLIENT, "id=%p: Setting connection closed handler", (void *)connection); connection->on_closed = on_closed; connection->on_closed_ud = on_closed_ud; return AWS_OP_SUCCESS; } static int s_aws_mqtt_client_connection_311_set_on_any_publish_handler( void *impl, aws_mqtt_client_publish_received_fn *on_any_publish, void *on_any_publish_ud) { struct aws_mqtt_client_connection_311_impl *connection = impl; AWS_PRECONDITION(connection); { /* BEGIN CRITICAL SECTION */ mqtt_connection_lock_synced_data(connection); if (connection->synced_data.state == AWS_MQTT_CLIENT_STATE_CONNECTED) { mqtt_connection_unlock_synced_data(connection); AWS_LOGF_ERROR( AWS_LS_MQTT_CLIENT, "id=%p: Connection is connected, publishes may arrive anytime. Unable to set publish handler until " "offline.", (void *)connection); return aws_raise_error(AWS_ERROR_INVALID_STATE); } mqtt_connection_unlock_synced_data(connection); } /* END CRITICAL SECTION */ AWS_LOGF_TRACE(AWS_LS_MQTT_CLIENT, "id=%p: Setting on_any_publish handler", (void *)connection); connection->on_any_publish = on_any_publish; connection->on_any_publish_ud = on_any_publish_ud; return AWS_OP_SUCCESS; } static int s_aws_mqtt_client_connection_311_set_connection_termination_handler( void *impl, aws_mqtt_client_on_connection_termination_fn *on_termination, void *on_termination_ud) { struct aws_mqtt_client_connection_311_impl *connection = impl; AWS_PRECONDITION(connection); if (s_check_connection_state_for_configuration(connection)) { return aws_raise_error(AWS_ERROR_INVALID_STATE); } AWS_LOGF_TRACE(AWS_LS_MQTT_CLIENT, "id=%p: Setting connection termination handler", (void *)connection); connection->on_termination = on_termination; connection->on_termination_ud = on_termination_ud; return AWS_OP_SUCCESS; } /******************************************************************************* * Websockets ******************************************************************************/ static int s_aws_mqtt_client_connection_311_use_websockets( void *impl, aws_mqtt_transform_websocket_handshake_fn *transformer, void *transformer_ud, aws_mqtt_validate_websocket_handshake_fn *validator, void *validator_ud) { struct aws_mqtt_client_connection_311_impl *connection = impl; connection->websocket.handshake_transformer = transformer; connection->websocket.handshake_transformer_ud = transformer_ud; connection->websocket.handshake_validator = validator; connection->websocket.handshake_validator_ud = validator_ud; connection->websocket.enabled = true; AWS_LOGF_TRACE(AWS_LS_MQTT_CLIENT, "id=%p: Using websockets", (void *)connection); return AWS_OP_SUCCESS; } static int s_aws_mqtt_client_connection_311_set_http_proxy_options( void *impl, struct aws_http_proxy_options *proxy_options) { struct aws_mqtt_client_connection_311_impl *connection = impl; /* If there is existing proxy options, nuke em */ if (connection->http_proxy_config) { aws_http_proxy_config_destroy(connection->http_proxy_config); connection->http_proxy_config = NULL; } connection->http_proxy_config = aws_http_proxy_config_new_tunneling_from_proxy_options(connection->allocator, proxy_options); return connection->http_proxy_config != NULL ? AWS_OP_SUCCESS : AWS_OP_ERR; } static int s_aws_mqtt_client_connection_311_set_host_resolution_options( void *impl, const struct aws_host_resolution_config *host_resolution_config) { struct aws_mqtt_client_connection_311_impl *connection = impl; connection->host_resolution_config = *host_resolution_config; return AWS_OP_SUCCESS; } static void s_on_websocket_shutdown(struct aws_websocket *websocket, int error_code, void *user_data) { struct aws_mqtt_client_connection_311_impl *connection = user_data; struct aws_channel *channel = connection->slot ? connection->slot->channel : NULL; s_mqtt_client_shutdown(connection->client->bootstrap, error_code, channel, connection); if (websocket) { aws_websocket_release(websocket); } } static void s_on_websocket_setup(const struct aws_websocket_on_connection_setup_data *setup, void *user_data) { /* Setup callback contract is: if error_code is non-zero then websocket is NULL. */ AWS_FATAL_ASSERT((setup->error_code != 0) == (setup->websocket == NULL)); struct aws_mqtt_client_connection_311_impl *connection = user_data; struct aws_channel *channel = NULL; if (connection->websocket.handshake_request) { aws_http_message_release(connection->websocket.handshake_request); connection->websocket.handshake_request = NULL; } if (setup->websocket) { channel = aws_websocket_get_channel(setup->websocket); AWS_FATAL_ASSERT(channel); AWS_FATAL_ASSERT(aws_channel_get_event_loop(channel) == connection->loop); /* Websocket must be "converted" before the MQTT handler can be installed next to it. */ if (aws_websocket_convert_to_midchannel_handler(setup->websocket)) { AWS_LOGF_ERROR( AWS_LS_MQTT_CLIENT, "id=%p: Failed converting websocket, error %d (%s)", (void *)connection, aws_last_error(), aws_error_name(aws_last_error())); aws_channel_shutdown(channel, aws_last_error()); return; } /* If validation callback is set, let the user accept/reject the handshake */ if (connection->websocket.handshake_validator) { AWS_LOGF_TRACE(AWS_LS_MQTT_CLIENT, "id=%p: Validating websocket handshake response.", (void *)connection); if (connection->websocket.handshake_validator( &connection->base, setup->handshake_response_header_array, setup->num_handshake_response_headers, connection->websocket.handshake_validator_ud)) { AWS_LOGF_ERROR( AWS_LS_MQTT_CLIENT, "id=%p: Failure reported by websocket handshake validator callback, error %d (%s)", (void *)connection, aws_last_error(), aws_error_name(aws_last_error())); aws_channel_shutdown(channel, aws_last_error()); return; } AWS_LOGF_TRACE( AWS_LS_MQTT_CLIENT, "id=%p: Done validating websocket handshake response.", (void *)connection); } } /* Call into the channel-setup callback, the rest of the logic is the same. */ s_mqtt_client_init(connection->client->bootstrap, setup->error_code, channel, connection); } static aws_mqtt_transform_websocket_handshake_complete_fn s_websocket_handshake_transform_complete; /* fwd declare */ static int s_websocket_connect(struct aws_mqtt_client_connection_311_impl *connection) { AWS_ASSERT(connection->websocket.enabled); /* Build websocket handshake request */ connection->websocket.handshake_request = aws_http_message_new_websocket_handshake_request( connection->allocator, *g_websocket_handshake_default_path, aws_byte_cursor_from_string(connection->host_name)); if (!connection->websocket.handshake_request) { AWS_LOGF_ERROR(AWS_LS_MQTT_CLIENT, "id=%p: Failed to generate websocket handshake request", (void *)connection); goto error; } if (aws_http_message_add_header( connection->websocket.handshake_request, *g_websocket_handshake_default_protocol_header)) { AWS_LOGF_ERROR(AWS_LS_MQTT_CLIENT, "id=%p: Failed to generate websocket handshake request", (void *)connection); goto error; } /* If user registered a transform callback, call it and wait for transform_complete() to be called. * If no callback registered, call the transform_complete() function ourselves. */ if (connection->websocket.handshake_transformer) { AWS_LOGF_TRACE(AWS_LS_MQTT_CLIENT, "id=%p: Transforming websocket handshake request.", (void *)connection); connection->websocket.handshake_transformer( connection->websocket.handshake_request, connection->websocket.handshake_transformer_ud, s_websocket_handshake_transform_complete, connection); } else { s_websocket_handshake_transform_complete( connection->websocket.handshake_request, AWS_ERROR_SUCCESS, connection); } return AWS_OP_SUCCESS; error: aws_http_message_release(connection->websocket.handshake_request); connection->websocket.handshake_request = NULL; return AWS_OP_ERR; } static void s_websocket_handshake_transform_complete( struct aws_http_message *handshake_request, int error_code, void *complete_ctx) { struct aws_mqtt_client_connection_311_impl *connection = complete_ctx; if (error_code) { AWS_LOGF_ERROR( AWS_LS_MQTT_CLIENT, "id=%p: Failure reported by websocket handshake transform callback.", (void *)connection); goto error; } if (connection->websocket.handshake_transformer) { AWS_LOGF_TRACE(AWS_LS_MQTT_CLIENT, "id=%p: Done transforming websocket handshake request.", (void *)connection); } /* Call websocket connect() */ struct aws_websocket_client_connection_options websocket_options = { .allocator = connection->allocator, .bootstrap = connection->client->bootstrap, .socket_options = &connection->socket_options, .tls_options = connection->tls_options.ctx ? &connection->tls_options : NULL, .host = aws_byte_cursor_from_string(connection->host_name), .port = connection->port, .handshake_request = handshake_request, .initial_window_size = 0, /* Prevent websocket data from arriving before the MQTT handler is installed */ .user_data = connection, .on_connection_setup = s_on_websocket_setup, .on_connection_shutdown = s_on_websocket_shutdown, .requested_event_loop = connection->loop, .host_resolution_config = &connection->host_resolution_config, }; struct aws_http_proxy_options proxy_options; AWS_ZERO_STRUCT(proxy_options); if (connection->http_proxy_config != NULL) { aws_http_proxy_options_init_from_config(&proxy_options, connection->http_proxy_config); websocket_options.proxy_options = &proxy_options; } if (aws_websocket_client_connect(&websocket_options)) { AWS_LOGF_ERROR(AWS_LS_MQTT_CLIENT, "id=%p: Failed to initiate websocket connection.", (void *)connection); error_code = aws_last_error(); goto error; } /* Success */ return; error:; /* Proceed to next step, telling it that we failed. */ struct aws_websocket_on_connection_setup_data websocket_setup = {.error_code = error_code}; s_on_websocket_setup(&websocket_setup, connection); } /******************************************************************************* * Connect ******************************************************************************/ static int s_aws_mqtt_client_connection_311_connect( void *impl, const struct aws_mqtt_connection_options *connection_options) { struct aws_mqtt_client_connection_311_impl *connection = impl; if (connection_options == NULL) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } if (aws_mqtt_validate_utf8_text(connection_options->client_id) == AWS_OP_ERR) { AWS_LOGF_DEBUG( AWS_LS_MQTT_CLIENT, "id=%p: Invalid utf8 or forbidden codepoints in client id", (void *)connection); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } /* TODO: Do we need to support resuming the connection if user connect to the same connection & endpoint and the * clean_session is false? * If not, the broker will resume the connection in this case, and we pretend we are making a new connection, which * may cause some confusing behavior. This is basically what we have now. NOTE: The topic_tree is living with the * connection right now, which is really confusing. * If yes, an edge case will be: User disconnected from the connection with clean_session * being false, then connect to another endpoint with the same connection object, we probably need to clear all * those states from last connection and create a new "connection". Problem is what if user finish the second * connection and reconnect to the first endpoint. There is no way for us to resume the connection in this case. */ AWS_LOGF_TRACE(AWS_LS_MQTT_CLIENT, "id=%p: Opening connection", (void *)connection); { /* BEGIN CRITICAL SECTION */ mqtt_connection_lock_synced_data(connection); if (connection->synced_data.state != AWS_MQTT_CLIENT_STATE_DISCONNECTED) { mqtt_connection_unlock_synced_data(connection); return aws_raise_error(AWS_ERROR_MQTT_ALREADY_CONNECTED); } mqtt_connection_set_state(connection, AWS_MQTT_CLIENT_STATE_CONNECTING); AWS_LOGF_DEBUG( AWS_LS_MQTT_CLIENT, "id=%p: Begin connecting process, switch state to CONNECTING.", (void *)connection); mqtt_connection_unlock_synced_data(connection); } /* END CRITICAL SECTION */ if (connection->host_name) { aws_string_destroy(connection->host_name); } connection->host_name = aws_string_new_from_array( connection->allocator, connection_options->host_name.ptr, connection_options->host_name.len); connection->port = connection_options->port; connection->socket_options = *connection_options->socket_options; connection->clean_session = connection_options->clean_session; connection->keep_alive_time_secs = connection_options->keep_alive_time_secs; connection->connection_count = 0; if (!connection->keep_alive_time_secs) { connection->keep_alive_time_secs = s_default_keep_alive_sec; } connection->keep_alive_time_ns = aws_timestamp_convert(connection->keep_alive_time_secs, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL); if (!connection_options->protocol_operation_timeout_ms) { connection->operation_timeout_ns = UINT64_MAX; } else { connection->operation_timeout_ns = aws_timestamp_convert( (uint64_t)connection_options->protocol_operation_timeout_ms, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL); } if (!connection_options->ping_timeout_ms) { connection->ping_timeout_ns = s_default_ping_timeout_ns; } else { connection->ping_timeout_ns = aws_timestamp_convert( (uint64_t)connection_options->ping_timeout_ms, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL); } /* Keep alive time should always be greater than the timeouts. */ if (AWS_UNLIKELY(connection->keep_alive_time_ns <= connection->ping_timeout_ns)) { AWS_LOGF_FATAL( AWS_LS_MQTT_CLIENT, "id=%p: Illegal configuration, Connection keep alive %" PRIu64 "ns must be greater than the request timeouts %" PRIu64 "ns.", (void *)connection, connection->keep_alive_time_ns, connection->ping_timeout_ns); AWS_FATAL_ASSERT(connection->keep_alive_time_ns > connection->ping_timeout_ns); } AWS_LOGF_INFO( AWS_LS_MQTT_CLIENT, "id=%p: using ping timeout of %" PRIu64 " ns", (void *)connection, connection->ping_timeout_ns); /* Cheat and set the tls_options host_name to our copy if they're the same */ if (connection_options->tls_options) { connection->use_tls = true; if (aws_tls_connection_options_copy(&connection->tls_options, connection_options->tls_options)) { AWS_LOGF_ERROR( AWS_LS_MQTT_CLIENT, "id=%p: Failed to copy TLS Connection Options into connection", (void *)connection); return AWS_OP_ERR; } if (!connection_options->tls_options->server_name) { struct aws_byte_cursor host_name_cur = aws_byte_cursor_from_string(connection->host_name); if (aws_tls_connection_options_set_server_name( &connection->tls_options, connection->allocator, &host_name_cur)) { AWS_LOGF_ERROR( AWS_LS_MQTT_CLIENT, "id=%p: Failed to set TLS Connection Options server name", (void *)connection); goto error; } } } else { AWS_ZERO_STRUCT(connection->tls_options); } /* Clean up old client_id */ if (connection->client_id.buffer) { aws_byte_buf_clean_up(&connection->client_id); } /* Only set connection->client_id if a new one was provided */ struct aws_byte_buf client_id_buf = aws_byte_buf_from_array(connection_options->client_id.ptr, connection_options->client_id.len); if (aws_byte_buf_init_copy(&connection->client_id, connection->allocator, &client_id_buf)) { AWS_LOGF_ERROR(AWS_LS_MQTT_CLIENT, "id=%p: Failed to copy client_id into connection", (void *)connection); goto error; } struct aws_linked_list cancelling_requests; aws_linked_list_init(&cancelling_requests); { /* BEGIN CRITICAL SECTION */ mqtt_connection_lock_synced_data(connection); if (connection->clean_session) { AWS_LOGF_TRACE( AWS_LS_MQTT_CLIENT, "id=%p: a clean session connection requested, all the previous requests will fail", (void *)connection); aws_linked_list_swap_contents(&connection->synced_data.pending_requests_list, &cancelling_requests); } mqtt_connection_unlock_synced_data(connection); } /* END CRITICAL SECTION */ if (!aws_linked_list_empty(&cancelling_requests)) { struct aws_linked_list_node *current = aws_linked_list_front(&cancelling_requests); const struct aws_linked_list_node *end = aws_linked_list_end(&cancelling_requests); /* invoke all the complete callback for requests from previous session */ while (current != end) { struct aws_mqtt_request *request = AWS_CONTAINER_OF(current, struct aws_mqtt_request, list_node); AWS_LOGF_TRACE( AWS_LS_MQTT_CLIENT, "id=%p: Establishing a new clean session connection, discard the previous request %" PRIu16, (void *)connection, request->packet_id); if (request->on_complete) { request->on_complete( &connection->base, request->packet_id, AWS_ERROR_MQTT_CANCELLED_FOR_CLEAN_SESSION, request->on_complete_ud); } current = current->next; } /* free the resource */ { /* BEGIN CRITICAL SECTION */ mqtt_connection_lock_synced_data(connection); while (!aws_linked_list_empty(&cancelling_requests)) { struct aws_linked_list_node *node = aws_linked_list_pop_front(&cancelling_requests); struct aws_mqtt_request *request = AWS_CONTAINER_OF(node, struct aws_mqtt_request, list_node); aws_hash_table_remove( &connection->synced_data.outstanding_requests_table, &request->packet_id, NULL, NULL); aws_memory_pool_release(&connection->synced_data.requests_pool, request); } mqtt_connection_unlock_synced_data(connection); } /* END CRITICAL SECTION */ } /* Begin the connecting process, acquire the connection to keep it alive until we disconnected */ aws_mqtt_client_connection_acquire(&connection->base); if (s_mqtt_client_connect(connection, connection_options->on_connection_complete, connection_options->user_data)) { /* * An error calling s_mqtt_client_connect should (must) be mutually exclusive with s_mqtt_client_shutdown(). * So it should be safe and correct to call release now to undo the pinning we did a few lines above. */ aws_mqtt_client_connection_release(&connection->base); /* client_id has been updated with something but it will get cleaned up when the connection gets cleaned up * so we don't need to worry about it here*/ if (connection->clean_session) { AWS_LOGF_WARN( AWS_LS_MQTT_CLIENT, "id=%p: The previous session has been cleaned up and losted!", (void *)connection); } goto error; } return AWS_OP_SUCCESS; error: aws_tls_connection_options_clean_up(&connection->tls_options); AWS_ZERO_STRUCT(connection->tls_options); { /* BEGIN CRITICAL SECTION */ mqtt_connection_lock_synced_data(connection); mqtt_connection_set_state(connection, AWS_MQTT_CLIENT_STATE_DISCONNECTED); mqtt_connection_unlock_synced_data(connection); } /* END CRITICAL SECTION */ return AWS_OP_ERR; } static int s_mqtt_client_connect( struct aws_mqtt_client_connection_311_impl *connection, aws_mqtt_client_on_connection_complete_fn *on_connection_complete, void *userdata) { connection->on_connection_complete = on_connection_complete; connection->on_connection_complete_ud = userdata; int result = 0; if (connection->websocket.enabled) { result = s_websocket_connect(connection); } else { struct aws_socket_channel_bootstrap_options channel_options; AWS_ZERO_STRUCT(channel_options); channel_options.bootstrap = connection->client->bootstrap; channel_options.host_name = aws_string_c_str(connection->host_name); channel_options.port = connection->port; channel_options.socket_options = &connection->socket_options; channel_options.tls_options = connection->use_tls ? &connection->tls_options : NULL; channel_options.setup_callback = &s_mqtt_client_init; channel_options.shutdown_callback = &s_mqtt_client_shutdown; channel_options.user_data = connection; channel_options.requested_event_loop = connection->loop; channel_options.host_resolution_override_config = &connection->host_resolution_config; if (connection->http_proxy_config == NULL) { result = aws_client_bootstrap_new_socket_channel(&channel_options); } else { struct aws_http_proxy_options proxy_options; AWS_ZERO_STRUCT(proxy_options); aws_http_proxy_options_init_from_config(&proxy_options, connection->http_proxy_config); result = aws_http_proxy_new_socket_channel(&channel_options, &proxy_options); } } if (result) { /* Connection attempt failed */ AWS_LOGF_ERROR( AWS_LS_MQTT_CLIENT, "id=%p: Failed to begin connection routine, error %d (%s).", (void *)connection, aws_last_error(), aws_error_name(aws_last_error())); return AWS_OP_ERR; } return AWS_OP_SUCCESS; } /******************************************************************************* * Reconnect DEPRECATED ******************************************************************************/ static int s_aws_mqtt_client_connection_311_reconnect( void *impl, aws_mqtt_client_on_connection_complete_fn *on_connection_complete, void *userdata) { (void)impl; (void)on_connection_complete; (void)userdata; /* DEPRECATED, connection will reconnect automatically now. */ AWS_LOGF_ERROR(AWS_LS_MQTT_CLIENT, "aws_mqtt_client_connection_reconnect has been DEPRECATED."); return aws_raise_error(AWS_ERROR_UNSUPPORTED_OPERATION); } /******************************************************************************* * Disconnect ******************************************************************************/ static int s_aws_mqtt_client_connection_311_disconnect( void *impl, aws_mqtt_client_on_disconnect_fn *on_disconnect, void *userdata) { struct aws_mqtt_client_connection_311_impl *connection = impl; AWS_LOGF_DEBUG(AWS_LS_MQTT_CLIENT, "id=%p: user called disconnect.", (void *)connection); { /* BEGIN CRITICAL SECTION */ mqtt_connection_lock_synced_data(connection); if (connection->synced_data.state != AWS_MQTT_CLIENT_STATE_CONNECTED && connection->synced_data.state != AWS_MQTT_CLIENT_STATE_RECONNECTING) { mqtt_connection_unlock_synced_data(connection); AWS_LOGF_ERROR( AWS_LS_MQTT_CLIENT, "id=%p: Connection is not open, and may not be closed", (void *)connection); aws_raise_error(AWS_ERROR_MQTT_NOT_CONNECTED); return AWS_OP_ERR; } mqtt_connection_set_state(connection, AWS_MQTT_CLIENT_STATE_DISCONNECTING); AWS_LOGF_DEBUG( AWS_LS_MQTT_CLIENT, "id=%p: User requests disconnecting, switch state to DISCONNECTING.", (void *)connection); connection->on_disconnect = on_disconnect; connection->on_disconnect_ud = userdata; mqtt_connection_unlock_synced_data(connection); } /* END CRITICAL SECTION */ AWS_LOGF_DEBUG(AWS_LS_MQTT_CLIENT, "id=%p: Closing connection", (void *)connection); mqtt_disconnect_impl(connection, AWS_OP_SUCCESS); return AWS_OP_SUCCESS; } /******************************************************************************* * Subscribe ******************************************************************************/ static void s_on_publish_client_wrapper( const struct aws_byte_cursor *topic, const struct aws_byte_cursor *payload, bool dup, enum aws_mqtt_qos qos, bool retain, void *userdata) { struct subscribe_task_topic *task_topic = userdata; /* Call out to the user callback */ if (task_topic->request.on_publish) { task_topic->request.on_publish( &task_topic->connection->base, topic, payload, dup, qos, retain, task_topic->request.on_publish_ud); } } static void s_task_topic_release(void *userdata) { struct subscribe_task_topic *task_topic = userdata; if (task_topic != NULL) { aws_ref_count_release(&task_topic->ref_count); } } static void s_task_topic_clean_up(void *userdata) { struct subscribe_task_topic *task_topic = userdata; if (task_topic->request.on_cleanup) { task_topic->request.on_cleanup(task_topic->request.on_publish_ud); } aws_string_destroy(task_topic->filter); aws_mem_release(task_topic->connection->allocator, task_topic); } static enum aws_mqtt_client_request_state s_subscribe_send(uint16_t packet_id, bool is_first_attempt, void *userdata) { (void)is_first_attempt; struct subscribe_task_arg *task_arg = userdata; bool initing_packet = task_arg->subscribe.fixed_header.packet_type == 0; struct aws_io_message *message = NULL; AWS_LOGF_TRACE( AWS_LS_MQTT_CLIENT, "id=%p: Attempting send of subscribe %" PRIu16 " (%s)", (void *)task_arg->connection, packet_id, is_first_attempt ? "first attempt" : "resend"); if (initing_packet) { /* Init the subscribe packet */ if (aws_mqtt_packet_subscribe_init(&task_arg->subscribe, task_arg->connection->allocator, packet_id)) { return AWS_MQTT_CLIENT_REQUEST_ERROR; } } const size_t num_topics = aws_array_list_length(&task_arg->topics); if (num_topics <= 0) { aws_raise_error(AWS_ERROR_MQTT_INVALID_TOPIC); return AWS_MQTT_CLIENT_REQUEST_ERROR; } AWS_VARIABLE_LENGTH_ARRAY(uint8_t, transaction_buf, num_topics * aws_mqtt_topic_tree_action_size); struct aws_array_list transaction; aws_array_list_init_static(&transaction, transaction_buf, num_topics, aws_mqtt_topic_tree_action_size); for (size_t i = 0; i < num_topics; ++i) { struct subscribe_task_topic *topic = NULL; aws_array_list_get_at(&task_arg->topics, &topic, i); AWS_ASSUME(topic); /* We know we're within bounds */ if (initing_packet) { if (aws_mqtt_packet_subscribe_add_topic(&task_arg->subscribe, topic->request.topic, topic->request.qos)) { goto handle_error; } } if (!task_arg->tree_updated) { struct aws_byte_cursor filter_cursor = aws_byte_cursor_from_string(topic->filter); if (s_is_topic_shared_topic(&filter_cursor)) { struct aws_string *normal_topic = s_get_normal_topic_from_shared_topic(topic->filter); if (normal_topic == NULL) { AWS_LOGF_ERROR( AWS_LS_MQTT_CLIENT, "id=%p: Topic is shared subscription topic but topic could not be parsed from " "shared subscription topic.", (void *)task_arg->connection); goto handle_error; } if (aws_mqtt_topic_tree_transaction_insert( &task_arg->connection->thread_data.subscriptions, &transaction, normal_topic, topic->request.qos, s_on_publish_client_wrapper, s_task_topic_release, topic)) { aws_string_destroy(normal_topic); goto handle_error; } aws_string_destroy(normal_topic); } else { if (aws_mqtt_topic_tree_transaction_insert( &task_arg->connection->thread_data.subscriptions, &transaction, topic->filter, topic->request.qos, s_on_publish_client_wrapper, s_task_topic_release, topic)) { goto handle_error; } } /* If insert succeed, acquire the refcount */ aws_ref_count_acquire(&topic->ref_count); } } message = mqtt_get_message_for_packet(task_arg->connection, &task_arg->subscribe.fixed_header); if (!message) { goto handle_error; } if (aws_mqtt_packet_subscribe_encode(&message->message_data, &task_arg->subscribe)) { goto handle_error; } /* This is not necessarily a fatal error; if the subscribe fails, it'll just retry. Still need to clean up though. */ if (aws_channel_slot_send_message(task_arg->connection->slot, message, AWS_CHANNEL_DIR_WRITE)) { aws_mem_release(message->allocator, message); } if (!task_arg->tree_updated) { aws_mqtt_topic_tree_transaction_commit(&task_arg->connection->thread_data.subscriptions, &transaction); task_arg->tree_updated = true; } aws_array_list_clean_up(&transaction); return AWS_MQTT_CLIENT_REQUEST_ONGOING; handle_error: if (message) { aws_mem_release(message->allocator, message); } if (!task_arg->tree_updated) { aws_mqtt_topic_tree_transaction_roll_back(&task_arg->connection->thread_data.subscriptions, &transaction); } aws_array_list_clean_up(&transaction); return AWS_MQTT_CLIENT_REQUEST_ERROR; } static void s_subscribe_complete( struct aws_mqtt_client_connection *connection_base, uint16_t packet_id, int error_code, void *userdata) { struct aws_mqtt_client_connection_311_impl *connection = connection_base->impl; struct subscribe_task_arg *task_arg = userdata; struct subscribe_task_topic *topic = NULL; aws_array_list_get_at(&task_arg->topics, &topic, 0); AWS_ASSUME(topic); AWS_LOGF_DEBUG( AWS_LS_MQTT_CLIENT, "id=%p: Subscribe %" PRIu16 " completed with error_code %d", (void *)connection, packet_id, error_code); size_t list_len = aws_array_list_length(&task_arg->topics); if (task_arg->on_suback.multi) { /* create a list of aws_mqtt_topic_subscription pointers from topics for the callback */ AWS_VARIABLE_LENGTH_ARRAY(uint8_t, cb_list_buf, list_len * sizeof(void *)); struct aws_array_list cb_list; aws_array_list_init_static(&cb_list, cb_list_buf, list_len, sizeof(void *)); int err = 0; for (size_t i = 0; i < list_len; i++) { err |= aws_array_list_get_at(&task_arg->topics, &topic, i); struct aws_mqtt_topic_subscription *subscription = &topic->request; err |= aws_array_list_push_back(&cb_list, &subscription); } AWS_ASSUME(!err); task_arg->on_suback.multi(&connection->base, packet_id, &cb_list, error_code, task_arg->on_suback_ud); aws_array_list_clean_up(&cb_list); } else if (task_arg->on_suback.single) { task_arg->on_suback.single( &connection->base, packet_id, &topic->request.topic, topic->request.qos, error_code, task_arg->on_suback_ud); } for (size_t i = 0; i < list_len; i++) { aws_array_list_get_at(&task_arg->topics, &topic, i); s_task_topic_release(topic); } aws_array_list_clean_up(&task_arg->topics); aws_mqtt_packet_subscribe_clean_up(&task_arg->subscribe); aws_mem_release(task_arg->connection->allocator, task_arg); } static uint16_t s_aws_mqtt_client_connection_311_subscribe_multiple( void *impl, const struct aws_array_list *topic_filters, aws_mqtt_suback_multi_fn *on_suback, void *on_suback_ud) { struct aws_mqtt_client_connection_311_impl *connection = impl; AWS_PRECONDITION(connection); if (topic_filters == NULL || aws_array_list_length(topic_filters) == 0) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return 0; } struct subscribe_task_arg *task_arg = aws_mem_calloc(connection->allocator, 1, sizeof(struct subscribe_task_arg)); if (!task_arg) { return 0; } task_arg->connection = connection; task_arg->on_suback.multi = on_suback; task_arg->on_suback_ud = on_suback_ud; const size_t num_topics = aws_array_list_length(topic_filters); if (aws_array_list_init_dynamic(&task_arg->topics, connection->allocator, num_topics, sizeof(void *))) { goto handle_error; } AWS_LOGF_DEBUG(AWS_LS_MQTT_CLIENT, "id=%p: Starting multi-topic subscribe", (void *)connection); /* Calculate the size of the subscribe packet * The fixed header is 2 bytes and the packet ID is 2 bytes. * Note: The size of the topic filter(s) are calculated in the loop below */ uint64_t subscribe_packet_size = 4; for (size_t i = 0; i < num_topics; ++i) { struct aws_mqtt_topic_subscription *request = NULL; aws_array_list_get_at_ptr(topic_filters, (void **)&request, i); if (!aws_mqtt_is_valid_topic_filter(&request->topic)) { aws_raise_error(AWS_ERROR_MQTT_INVALID_TOPIC); goto handle_error; } struct subscribe_task_topic *task_topic = aws_mem_calloc(connection->allocator, 1, sizeof(struct subscribe_task_topic)); if (!task_topic) { goto handle_error; } aws_ref_count_init(&task_topic->ref_count, task_topic, (aws_simple_completion_callback *)s_task_topic_clean_up); task_topic->connection = connection; task_topic->request = *request; task_topic->filter = aws_string_new_from_array( connection->allocator, task_topic->request.topic.ptr, task_topic->request.topic.len); if (!task_topic->filter) { aws_mem_release(connection->allocator, task_topic); goto handle_error; } /* Update request topic cursor to refer to owned string */ task_topic->request.topic = aws_byte_cursor_from_string(task_topic->filter); AWS_LOGF_DEBUG( AWS_LS_MQTT_CLIENT, "id=%p: Adding topic \"" PRInSTR "\"", (void *)connection, AWS_BYTE_CURSOR_PRI(task_topic->request.topic)); /* Push into the list */ aws_array_list_push_back(&task_arg->topics, &task_topic); /* Subscribe topic filter is: always 3 bytes (1 for QoS, 2 for Length MSB/LSB) + the size of the topic filter */ subscribe_packet_size += 3 + task_topic->request.topic.len; } uint16_t packet_id = mqtt_create_request( task_arg->connection, &s_subscribe_send, task_arg, &s_subscribe_complete, task_arg, false, /* noRetry */ subscribe_packet_size); if (packet_id == 0) { AWS_LOGF_ERROR( AWS_LS_MQTT_CLIENT, "id=%p: Failed to kick off multi-topic subscribe, with error %s", (void *)connection, aws_error_debug_str(aws_last_error())); goto handle_error; } AWS_LOGF_DEBUG(AWS_LS_MQTT_CLIENT, "id=%p: Sending multi-topic subscribe %" PRIu16, (void *)connection, packet_id); return packet_id; handle_error: if (task_arg) { if (task_arg->topics.data) { const size_t num_added_topics = aws_array_list_length(&task_arg->topics); for (size_t i = 0; i < num_added_topics; ++i) { struct subscribe_task_topic *task_topic = NULL; aws_array_list_get_at(&task_arg->topics, (void **)&task_topic, i); AWS_ASSUME(task_topic); aws_string_destroy(task_topic->filter); aws_mem_release(connection->allocator, task_topic); } aws_array_list_clean_up(&task_arg->topics); } aws_mem_release(connection->allocator, task_arg); } return 0; } /******************************************************************************* * Subscribe Single ******************************************************************************/ static void s_subscribe_single_complete( struct aws_mqtt_client_connection *connection_base, uint16_t packet_id, int error_code, void *userdata) { struct aws_mqtt_client_connection_311_impl *connection = connection_base->impl; struct subscribe_task_arg *task_arg = userdata; AWS_LOGF_DEBUG( AWS_LS_MQTT_CLIENT, "id=%p: Subscribe %" PRIu16 " completed with error code %d", (void *)connection, packet_id, error_code); AWS_ASSERT(aws_array_list_length(&task_arg->topics) == 1); struct subscribe_task_topic *topic = NULL; aws_array_list_get_at(&task_arg->topics, &topic, 0); AWS_ASSUME(topic); /* There needs to be exactly 1 topic in this list */ if (task_arg->on_suback.single) { AWS_ASSUME(aws_string_is_valid(topic->filter)); aws_mqtt_suback_fn *suback = task_arg->on_suback.single; suback( &connection->base, packet_id, &topic->request.topic, topic->request.qos, error_code, task_arg->on_suback_ud); } s_task_topic_release(topic); aws_array_list_clean_up(&task_arg->topics); aws_mqtt_packet_subscribe_clean_up(&task_arg->subscribe); aws_mem_release(task_arg->connection->allocator, task_arg); } static uint16_t s_aws_mqtt_client_connection_311_subscribe( void *impl, const struct aws_byte_cursor *topic_filter, enum aws_mqtt_qos qos, aws_mqtt_client_publish_received_fn *on_publish, void *on_publish_ud, aws_mqtt_userdata_cleanup_fn *on_ud_cleanup, aws_mqtt_suback_fn *on_suback, void *on_suback_ud) { struct aws_mqtt_client_connection_311_impl *connection = impl; AWS_PRECONDITION(connection); if (!aws_mqtt_is_valid_topic_filter(topic_filter)) { aws_raise_error(AWS_ERROR_MQTT_INVALID_TOPIC); return 0; } /* Because we know we're only going to have 1 topic, we can cheat and allocate the array_list in the same block as * the task argument. */ void *task_topic_storage = NULL; struct subscribe_task_topic *task_topic = NULL; struct subscribe_task_arg *task_arg = aws_mem_acquire_many( connection->allocator, 2, &task_arg, sizeof(struct subscribe_task_arg), &task_topic_storage, sizeof(struct subscribe_task_topic *)); if (!task_arg) { goto handle_error; } AWS_ZERO_STRUCT(*task_arg); task_arg->connection = connection; task_arg->on_suback.single = on_suback; task_arg->on_suback_ud = on_suback_ud; /* It stores the pointer */ aws_array_list_init_static(&task_arg->topics, task_topic_storage, 1, sizeof(void *)); /* Allocate the topic and push into the list */ task_topic = aws_mem_calloc(connection->allocator, 1, sizeof(struct subscribe_task_topic)); if (!task_topic) { goto handle_error; } aws_ref_count_init(&task_topic->ref_count, task_topic, (aws_simple_completion_callback *)s_task_topic_clean_up); aws_array_list_push_back(&task_arg->topics, &task_topic); task_topic->filter = aws_string_new_from_array(connection->allocator, topic_filter->ptr, topic_filter->len); if (!task_topic->filter) { goto handle_error; } task_topic->connection = connection; task_topic->request.topic = aws_byte_cursor_from_string(task_topic->filter); task_topic->request.qos = qos; task_topic->request.on_publish = on_publish; task_topic->request.on_cleanup = on_ud_cleanup; task_topic->request.on_publish_ud = on_publish_ud; /* Calculate the size of the (single) subscribe packet * The fixed header is 2 bytes, * the topic filter is always at least 3 bytes (1 for QoS, 2 for Length MSB/LSB) * - plus the size of the topic filter * and finally the packet ID is 2 bytes */ uint64_t subscribe_packet_size = 7 + topic_filter->len; uint16_t packet_id = mqtt_create_request( task_arg->connection, &s_subscribe_send, task_arg, &s_subscribe_single_complete, task_arg, false, /* noRetry */ subscribe_packet_size); if (packet_id == 0) { AWS_LOGF_ERROR( AWS_LS_MQTT_CLIENT, "id=%p: Failed to start subscribe on topic " PRInSTR " with error %s", (void *)connection, AWS_BYTE_CURSOR_PRI(task_topic->request.topic), aws_error_debug_str(aws_last_error())); goto handle_error; } AWS_LOGF_DEBUG( AWS_LS_MQTT_CLIENT, "id=%p: Starting subscribe %" PRIu16 " on topic " PRInSTR, (void *)connection, packet_id, AWS_BYTE_CURSOR_PRI(task_topic->request.topic)); return packet_id; handle_error: if (task_topic) { if (task_topic->filter) { aws_string_destroy(task_topic->filter); } aws_mem_release(connection->allocator, task_topic); } if (task_arg) { aws_mem_release(connection->allocator, task_arg); } return 0; } /******************************************************************************* * Resubscribe ******************************************************************************/ static bool s_reconnect_resub_iterator(const struct aws_byte_cursor *topic, enum aws_mqtt_qos qos, void *user_data) { struct subscribe_task_arg *task_arg = user_data; struct subscribe_task_topic *task_topic = aws_mem_calloc(task_arg->connection->allocator, 1, sizeof(struct subscribe_task_topic)); struct aws_mqtt_topic_subscription sub; AWS_ZERO_STRUCT(sub); sub.topic = *topic; sub.qos = qos; task_topic->request = sub; task_topic->connection = task_arg->connection; aws_array_list_push_back(&task_arg->topics, &task_topic); aws_ref_count_init(&task_topic->ref_count, task_topic, (aws_simple_completion_callback *)s_task_topic_clean_up); return true; } static bool s_reconnect_resub_operation_statistics_iterator( const struct aws_byte_cursor *topic, enum aws_mqtt_qos qos, void *user_data) { (void)qos; uint64_t *packet_size = user_data; /* Always 3 bytes (1 for QoS, 2 for length MSB and LSB respectively) */ *packet_size += 3; /* The size of the topic filter */ *packet_size += topic->len; return true; } static enum aws_mqtt_client_request_state s_resubscribe_send( uint16_t packet_id, bool is_first_attempt, void *userdata) { struct subscribe_task_arg *task_arg = userdata; bool initing_packet = task_arg->subscribe.fixed_header.packet_type == 0; struct aws_io_message *message = NULL; const size_t sub_count = aws_mqtt_topic_tree_get_sub_count(&task_arg->connection->thread_data.subscriptions); /* Init the topics list even if there are no topics because the s_resubscribe_complete callback will always run. */ if (aws_array_list_init_dynamic(&task_arg->topics, task_arg->connection->allocator, sub_count, sizeof(void *))) { goto handle_error; } if (sub_count == 0) { AWS_LOGF_TRACE( AWS_LS_MQTT_CLIENT, "id=%p: Not subscribed to any topics. Resubscribe is unnecessary, no packet will be sent.", (void *)task_arg->connection); return AWS_MQTT_CLIENT_REQUEST_COMPLETE; } aws_mqtt_topic_tree_iterate(&task_arg->connection->thread_data.subscriptions, s_reconnect_resub_iterator, task_arg); AWS_LOGF_TRACE( AWS_LS_MQTT_CLIENT, "id=%p: Attempting send of resubscribe %" PRIu16 " (%s)", (void *)task_arg->connection, packet_id, is_first_attempt ? "first attempt" : "resend"); if (initing_packet) { /* Init the subscribe packet */ if (aws_mqtt_packet_subscribe_init(&task_arg->subscribe, task_arg->connection->allocator, packet_id)) { return AWS_MQTT_CLIENT_REQUEST_ERROR; } const size_t num_topics = aws_array_list_length(&task_arg->topics); if (num_topics <= 0) { aws_raise_error(AWS_ERROR_MQTT_INVALID_TOPIC); return AWS_MQTT_CLIENT_REQUEST_ERROR; } for (size_t i = 0; i < num_topics; ++i) { struct subscribe_task_topic *topic = NULL; aws_array_list_get_at(&task_arg->topics, &topic, i); AWS_ASSUME(topic); /* We know we're within bounds */ if (aws_mqtt_packet_subscribe_add_topic(&task_arg->subscribe, topic->request.topic, topic->request.qos)) { goto handle_error; } } } message = mqtt_get_message_for_packet(task_arg->connection, &task_arg->subscribe.fixed_header); if (!message) { goto handle_error; } if (aws_mqtt_packet_subscribe_encode(&message->message_data, &task_arg->subscribe)) { goto handle_error; } /* This is not necessarily a fatal error; if the send fails, it'll just retry. Still need to clean up though. */ if (aws_channel_slot_send_message(task_arg->connection->slot, message, AWS_CHANNEL_DIR_WRITE)) { aws_mem_release(message->allocator, message); } return AWS_MQTT_CLIENT_REQUEST_ONGOING; handle_error: if (message) { aws_mem_release(message->allocator, message); } return AWS_MQTT_CLIENT_REQUEST_ERROR; } static void s_resubscribe_complete( struct aws_mqtt_client_connection *connection_base, uint16_t packet_id, int error_code, void *userdata) { struct aws_mqtt_client_connection_311_impl *connection = connection_base->impl; struct subscribe_task_arg *task_arg = userdata; const size_t list_len = aws_array_list_length(&task_arg->topics); if (list_len <= 0) { goto clean_up; } struct subscribe_task_topic *topic = NULL; aws_array_list_get_at(&task_arg->topics, &topic, 0); AWS_ASSUME(topic); AWS_LOGF_DEBUG( AWS_LS_MQTT_CLIENT, "id=%p: Subscribe %" PRIu16 " completed with error_code %d", (void *)connection, packet_id, error_code); if (task_arg->on_suback.multi) { /* create a list of aws_mqtt_topic_subscription pointers from topics for the callback */ AWS_VARIABLE_LENGTH_ARRAY(uint8_t, cb_list_buf, list_len * sizeof(void *)); struct aws_array_list cb_list; aws_array_list_init_static(&cb_list, cb_list_buf, list_len, sizeof(void *)); int err = 0; for (size_t i = 0; i < list_len; i++) { err |= aws_array_list_get_at(&task_arg->topics, &topic, i); struct aws_mqtt_topic_subscription *subscription = &topic->request; err |= aws_array_list_push_back(&cb_list, &subscription); } AWS_ASSUME(!err); task_arg->on_suback.multi(&connection->base, packet_id, &cb_list, error_code, task_arg->on_suback_ud); aws_array_list_clean_up(&cb_list); } else if (task_arg->on_suback.single) { task_arg->on_suback.single( &connection->base, packet_id, &topic->request.topic, topic->request.qos, error_code, task_arg->on_suback_ud); } clean_up: /* We need to cleanup the subscribe_task_topics, since they are not inserted into the topic tree by resubscribe. We * take the ownership to clean it up */ for (size_t i = 0; i < list_len; i++) { aws_array_list_get_at(&task_arg->topics, &topic, i); s_task_topic_release(topic); } aws_array_list_clean_up(&task_arg->topics); aws_mqtt_packet_subscribe_clean_up(&task_arg->subscribe); aws_mem_release(task_arg->connection->allocator, task_arg); } static uint16_t s_aws_mqtt_311_resubscribe_existing_topics( void *impl, aws_mqtt_suback_multi_fn *on_suback, void *on_suback_ud) { struct aws_mqtt_client_connection_311_impl *connection = impl; struct subscribe_task_arg *task_arg = aws_mem_calloc(connection->allocator, 1, sizeof(struct subscribe_task_arg)); if (!task_arg) { AWS_LOGF_ERROR( AWS_LS_MQTT_CLIENT, "id=%p: failed to allocate storage for resubscribe arguments", (void *)connection); return 0; } AWS_ZERO_STRUCT(*task_arg); task_arg->connection = connection; task_arg->on_suback.multi = on_suback; task_arg->on_suback_ud = on_suback_ud; /* Calculate the size of the packet. * The fixed header is 2 bytes and the packet ID is 2 bytes * plus the size of each topic in the topic tree */ uint64_t resubscribe_packet_size = 4; /* Get the length of each subscription we are going to resubscribe with */ aws_mqtt_topic_tree_iterate( &connection->thread_data.subscriptions, s_reconnect_resub_operation_statistics_iterator, &resubscribe_packet_size); uint16_t packet_id = mqtt_create_request( task_arg->connection, &s_resubscribe_send, task_arg, &s_resubscribe_complete, task_arg, false, /* noRetry */ resubscribe_packet_size); if (packet_id == 0) { AWS_LOGF_ERROR( AWS_LS_MQTT_CLIENT, "id=%p: Failed to send multi-topic resubscribe with error %s", (void *)connection, aws_error_name(aws_last_error())); goto handle_error; } AWS_LOGF_DEBUG( AWS_LS_MQTT_CLIENT, "id=%p: Sending multi-topic resubscribe %" PRIu16, (void *)connection, packet_id); return packet_id; handle_error: aws_mem_release(connection->allocator, task_arg); return 0; } /******************************************************************************* * Unsubscribe ******************************************************************************/ struct unsubscribe_task_arg { struct aws_mqtt_client_connection_311_impl *connection; struct aws_string *filter_string; struct aws_byte_cursor filter; /* Packet to populate */ struct aws_mqtt_packet_unsubscribe unsubscribe; /* true if transaction was committed to the topic tree, false requires a retry */ bool tree_updated; aws_mqtt_op_complete_fn *on_unsuback; void *on_unsuback_ud; struct request_timeout_wrapper timeout_wrapper; }; static enum aws_mqtt_client_request_state s_unsubscribe_send( uint16_t packet_id, bool is_first_attempt, void *userdata) { (void)is_first_attempt; struct unsubscribe_task_arg *task_arg = userdata; struct aws_io_message *message = NULL; AWS_LOGF_TRACE( AWS_LS_MQTT_CLIENT, "id=%p: Attempting send of unsubscribe %" PRIu16 " %s", (void *)task_arg->connection, packet_id, is_first_attempt ? "first attempt" : "resend"); static const size_t num_topics = 1; AWS_VARIABLE_LENGTH_ARRAY(uint8_t, transaction_buf, num_topics * aws_mqtt_topic_tree_action_size); struct aws_array_list transaction; aws_array_list_init_static(&transaction, transaction_buf, num_topics, aws_mqtt_topic_tree_action_size); if (!task_arg->tree_updated) { struct subscribe_task_topic *topic; if (s_is_topic_shared_topic(&task_arg->filter)) { struct aws_string *shared_topic = aws_string_new_from_cursor(task_arg->connection->allocator, &task_arg->filter); struct aws_string *normal_topic = s_get_normal_topic_from_shared_topic(shared_topic); if (normal_topic == NULL) { AWS_LOGF_ERROR( AWS_LS_MQTT_CLIENT, "id=%p: Topic is shared subscription topic but topic could not be parsed from " "shared subscription topic.", (void *)task_arg->connection); aws_string_destroy(shared_topic); goto handle_error; } struct aws_byte_cursor normal_topic_cursor = aws_byte_cursor_from_string(normal_topic); if (aws_mqtt_topic_tree_transaction_remove( &task_arg->connection->thread_data.subscriptions, &transaction, &normal_topic_cursor, (void **)&topic)) { aws_string_destroy(shared_topic); aws_string_destroy(normal_topic); goto handle_error; } aws_string_destroy(shared_topic); aws_string_destroy(normal_topic); } else { if (aws_mqtt_topic_tree_transaction_remove( &task_arg->connection->thread_data.subscriptions, &transaction, &task_arg->filter, (void **)&topic)) { goto handle_error; } } } if (task_arg->unsubscribe.fixed_header.packet_type == 0) { /* If unsubscribe packet is uninitialized, init it */ if (aws_mqtt_packet_unsubscribe_init(&task_arg->unsubscribe, task_arg->connection->allocator, packet_id)) { goto handle_error; } if (aws_mqtt_packet_unsubscribe_add_topic(&task_arg->unsubscribe, task_arg->filter)) { goto handle_error; } } message = mqtt_get_message_for_packet(task_arg->connection, &task_arg->unsubscribe.fixed_header); if (!message) { goto handle_error; } if (aws_mqtt_packet_unsubscribe_encode(&message->message_data, &task_arg->unsubscribe)) { goto handle_error; } if (aws_channel_slot_send_message(task_arg->connection->slot, message, AWS_CHANNEL_DIR_WRITE)) { goto handle_error; } /* TODO: timing should start from the message written into the socket, which is aws_io_message->on_completion * invoked, but there are bugs in the websocket handler (and maybe also the h1 handler?) where we don't properly * fire the on_completion callbacks. */ struct request_timeout_task_arg *timeout_task_arg = s_schedule_timeout_task(task_arg->connection, packet_id); if (!timeout_task_arg) { return AWS_MQTT_CLIENT_REQUEST_ERROR; } /* * Set up mutual references between the operation task args and the timeout task args. Whoever runs first * "wins", does its logic, and then breaks the connection between the two. */ task_arg->timeout_wrapper.timeout_task_arg = timeout_task_arg; timeout_task_arg->task_arg_wrapper = &task_arg->timeout_wrapper; if (!task_arg->tree_updated) { aws_mqtt_topic_tree_transaction_commit(&task_arg->connection->thread_data.subscriptions, &transaction); task_arg->tree_updated = true; } aws_array_list_clean_up(&transaction); return AWS_MQTT_CLIENT_REQUEST_ONGOING; handle_error: if (message) { aws_mem_release(message->allocator, message); } if (!task_arg->tree_updated) { aws_mqtt_topic_tree_transaction_roll_back(&task_arg->connection->thread_data.subscriptions, &transaction); } aws_array_list_clean_up(&transaction); return AWS_MQTT_CLIENT_REQUEST_ERROR; } static void s_unsubscribe_complete( struct aws_mqtt_client_connection *connection_base, uint16_t packet_id, int error_code, void *userdata) { struct aws_mqtt_client_connection_311_impl *connection = connection_base->impl; struct unsubscribe_task_arg *task_arg = userdata; AWS_LOGF_DEBUG(AWS_LS_MQTT_CLIENT, "id=%p: Unsubscribe %" PRIu16 " complete", (void *)connection, packet_id); /* * If we have a forward pointer to a timeout task, then that means the timeout task has not run yet. So we should * follow it and zero out the back pointer to us, because we're going away now. The timeout task will run later * and be harmless (even vs. future operations with the same packet id) because it only cancels if it has a back * pointer. */ if (task_arg->timeout_wrapper.timeout_task_arg) { task_arg->timeout_wrapper.timeout_task_arg->task_arg_wrapper = NULL; task_arg->timeout_wrapper.timeout_task_arg = NULL; } if (task_arg->on_unsuback) { task_arg->on_unsuback(&connection->base, packet_id, error_code, task_arg->on_unsuback_ud); } aws_string_destroy(task_arg->filter_string); aws_mqtt_packet_unsubscribe_clean_up(&task_arg->unsubscribe); aws_mem_release(task_arg->connection->allocator, task_arg); } static uint16_t s_aws_mqtt_client_connection_311_unsubscribe( void *impl, const struct aws_byte_cursor *topic_filter, aws_mqtt_op_complete_fn *on_unsuback, void *on_unsuback_ud) { struct aws_mqtt_client_connection_311_impl *connection = impl; AWS_PRECONDITION(connection); if (!aws_mqtt_is_valid_topic_filter(topic_filter)) { aws_raise_error(AWS_ERROR_MQTT_INVALID_TOPIC); return 0; } struct unsubscribe_task_arg *task_arg = aws_mem_calloc(connection->allocator, 1, sizeof(struct unsubscribe_task_arg)); if (!task_arg) { return 0; } task_arg->connection = connection; task_arg->filter_string = aws_string_new_from_array(connection->allocator, topic_filter->ptr, topic_filter->len); task_arg->filter = aws_byte_cursor_from_string(task_arg->filter_string); task_arg->on_unsuback = on_unsuback; task_arg->on_unsuback_ud = on_unsuback_ud; /* Calculate the size of the unsubscribe packet. * The fixed header is always 2 bytes, the packet ID is always 2 bytes * plus the size of the topic filter */ uint64_t unsubscribe_packet_size = 4 + task_arg->filter.len; uint16_t packet_id = mqtt_create_request( connection, &s_unsubscribe_send, task_arg, s_unsubscribe_complete, task_arg, false, /* noRetry */ unsubscribe_packet_size); if (packet_id == 0) { AWS_LOGF_DEBUG( AWS_LS_MQTT_CLIENT, "id=%p: Failed to start unsubscribe, with error %s", (void *)connection, aws_error_debug_str(aws_last_error())); goto handle_error; } AWS_LOGF_DEBUG(AWS_LS_MQTT_CLIENT, "id=%p: Starting unsubscribe %" PRIu16, (void *)connection, packet_id); return packet_id; handle_error: aws_string_destroy(task_arg->filter_string); aws_mem_release(connection->allocator, task_arg); return 0; } /******************************************************************************* * Publish ******************************************************************************/ struct publish_task_arg { struct aws_mqtt_client_connection_311_impl *connection; struct aws_string *topic_string; struct aws_byte_cursor topic; enum aws_mqtt_qos qos; bool retain; struct aws_byte_cursor payload; struct aws_byte_buf payload_buf; /* Packet to populate */ struct aws_mqtt_packet_publish publish; aws_mqtt_op_complete_fn *on_complete; void *userdata; struct request_timeout_wrapper timeout_wrapper; }; /* should only be called by tests */ static int s_get_stuff_from_outstanding_requests_table( struct aws_mqtt_client_connection_311_impl *connection, uint16_t packet_id, struct aws_allocator *allocator, struct aws_byte_buf *result_buf, struct aws_string **result_string) { int err = AWS_OP_SUCCESS; aws_mutex_lock(&connection->synced_data.lock); struct aws_hash_element *elem = NULL; aws_hash_table_find(&connection->synced_data.outstanding_requests_table, &packet_id, &elem); if (elem) { struct aws_mqtt_request *request = elem->value; struct publish_task_arg *pub = (struct publish_task_arg *)request->send_request_ud; if (result_buf != NULL) { if (aws_byte_buf_init_copy(result_buf, allocator, &pub->payload_buf)) { err = AWS_OP_ERR; } } else if (result_string != NULL) { *result_string = aws_string_new_from_string(allocator, pub->topic_string); if (*result_string == NULL) { err = AWS_OP_ERR; } } } else { /* So lovely that this error is defined, but hashtable never actually raises it */ err = aws_raise_error(AWS_ERROR_HASHTBL_ITEM_NOT_FOUND); } aws_mutex_unlock(&connection->synced_data.lock); return err; } /* should only be called by tests */ int aws_mqtt_client_get_payload_for_outstanding_publish_packet( struct aws_mqtt_client_connection *connection_base, uint16_t packet_id, struct aws_allocator *allocator, struct aws_byte_buf *result) { AWS_ZERO_STRUCT(*result); return s_get_stuff_from_outstanding_requests_table(connection_base->impl, packet_id, allocator, result, NULL); } /* should only be called by tests */ int aws_mqtt_client_get_topic_for_outstanding_publish_packet( struct aws_mqtt_client_connection *connection_base, uint16_t packet_id, struct aws_allocator *allocator, struct aws_string **result) { *result = NULL; return s_get_stuff_from_outstanding_requests_table(connection_base->impl, packet_id, allocator, NULL, result); } static enum aws_mqtt_client_request_state s_publish_send(uint16_t packet_id, bool is_first_attempt, void *userdata) { struct publish_task_arg *task_arg = userdata; struct aws_mqtt_client_connection_311_impl *connection = task_arg->connection; AWS_LOGF_TRACE( AWS_LS_MQTT_CLIENT, "id=%p: Attempting send of publish %" PRIu16 " %s", (void *)task_arg->connection, packet_id, is_first_attempt ? "first attempt" : "resend"); bool is_qos_0 = task_arg->qos == AWS_MQTT_QOS_AT_MOST_ONCE; if (is_qos_0) { packet_id = 0; } if (is_first_attempt) { if (aws_mqtt_packet_publish_init( &task_arg->publish, task_arg->retain, task_arg->qos, !is_first_attempt, task_arg->topic, packet_id, task_arg->payload)) { return AWS_MQTT_CLIENT_REQUEST_ERROR; } } else { aws_mqtt_packet_publish_set_dup(&task_arg->publish); } struct aws_io_message *message = mqtt_get_message_for_packet(task_arg->connection, &task_arg->publish.fixed_header); if (!message) { return AWS_MQTT_CLIENT_REQUEST_ERROR; } /* Encode the headers, and everything but the payload */ if (aws_mqtt_packet_publish_encode_headers(&message->message_data, &task_arg->publish)) { return AWS_MQTT_CLIENT_REQUEST_ERROR; } struct aws_byte_cursor payload_cur = task_arg->payload; { write_payload_chunk: (void)NULL; const size_t left_in_message = message->message_data.capacity - message->message_data.len; const size_t to_write = payload_cur.len < left_in_message ? payload_cur.len : left_in_message; if (to_write) { /* Write this chunk */ struct aws_byte_cursor to_write_cur = aws_byte_cursor_advance(&payload_cur, to_write); AWS_ASSERT(to_write_cur.ptr); /* to_write is guaranteed to be inside the bounds of payload_cur */ if (!aws_byte_buf_write_from_whole_cursor(&message->message_data, to_write_cur)) { aws_mem_release(message->allocator, message); return AWS_MQTT_CLIENT_REQUEST_ERROR; } } if (aws_channel_slot_send_message(task_arg->connection->slot, message, AWS_CHANNEL_DIR_WRITE)) { aws_mem_release(message->allocator, message); /* If it's QoS 0, telling user that the message haven't been sent, else, the message will be resent once the * connection is back */ return is_qos_0 ? AWS_MQTT_CLIENT_REQUEST_ERROR : AWS_MQTT_CLIENT_REQUEST_ONGOING; } /* If there's still payload left, get a new message and start again. */ if (payload_cur.len) { message = mqtt_get_message_for_packet(task_arg->connection, &task_arg->publish.fixed_header); goto write_payload_chunk; } } if (!is_qos_0 && connection->operation_timeout_ns != UINT64_MAX) { /* TODO: timing should start from the message written into the socket, which is aws_io_message->on_completion * invoked, but there are bugs in the websocket handler (and maybe also the h1 handler?) where we don't properly * fire fire the on_completion callbacks. */ struct request_timeout_task_arg *timeout_task_arg = s_schedule_timeout_task(connection, packet_id); if (!timeout_task_arg) { return AWS_MQTT_CLIENT_REQUEST_ERROR; } /* * Set up mutual references between the operation task args and the timeout task args. Whoever runs first * "wins", does its logic, and then breaks the connection between the two. */ task_arg->timeout_wrapper.timeout_task_arg = timeout_task_arg; timeout_task_arg->task_arg_wrapper = &task_arg->timeout_wrapper; } /* If QoS == 0, there will be no ack, so consider the request done now. */ return is_qos_0 ? AWS_MQTT_CLIENT_REQUEST_COMPLETE : AWS_MQTT_CLIENT_REQUEST_ONGOING; } static void s_publish_complete( struct aws_mqtt_client_connection *connection_base, uint16_t packet_id, int error_code, void *userdata) { struct aws_mqtt_client_connection_311_impl *connection = connection_base->impl; struct publish_task_arg *task_arg = userdata; AWS_LOGF_DEBUG(AWS_LS_MQTT_CLIENT, "id=%p: Publish %" PRIu16 " complete", (void *)connection, packet_id); if (task_arg->on_complete) { task_arg->on_complete(&connection->base, packet_id, error_code, task_arg->userdata); } /* * If we have a forward pointer to a timeout task, then that means the timeout task has not run yet. So we should * follow it and zero out the back pointer to us, because we're going away now. The timeout task will run later * and be harmless (even vs. future operations with the same packet id) because it only cancels if it has a back * pointer. */ if (task_arg->timeout_wrapper.timeout_task_arg != NULL) { task_arg->timeout_wrapper.timeout_task_arg->task_arg_wrapper = NULL; task_arg->timeout_wrapper.timeout_task_arg = NULL; } aws_byte_buf_clean_up(&task_arg->payload_buf); aws_string_destroy(task_arg->topic_string); aws_mem_release(connection->allocator, task_arg); } static uint16_t s_aws_mqtt_client_connection_311_publish( void *impl, const struct aws_byte_cursor *topic, enum aws_mqtt_qos qos, bool retain, const struct aws_byte_cursor *payload, aws_mqtt_op_complete_fn *on_complete, void *userdata) { struct aws_mqtt_client_connection_311_impl *connection = impl; AWS_PRECONDITION(connection); if (!aws_mqtt_is_valid_topic(topic)) { aws_raise_error(AWS_ERROR_MQTT_INVALID_TOPIC); return 0; } if (qos > AWS_MQTT_QOS_EXACTLY_ONCE) { aws_raise_error(AWS_ERROR_MQTT_INVALID_QOS); return 0; } struct publish_task_arg *arg = aws_mem_calloc(connection->allocator, 1, sizeof(struct publish_task_arg)); if (!arg) { return 0; } arg->connection = connection; arg->topic_string = aws_string_new_from_array(connection->allocator, topic->ptr, topic->len); arg->topic = aws_byte_cursor_from_string(arg->topic_string); arg->qos = qos; arg->retain = retain; struct aws_byte_cursor payload_cursor; AWS_ZERO_STRUCT(payload_cursor); if (payload != NULL) { payload_cursor = *payload; } if (aws_byte_buf_init_copy_from_cursor(&arg->payload_buf, connection->allocator, payload_cursor)) { goto handle_error; } arg->payload = aws_byte_cursor_from_buf(&arg->payload_buf); arg->on_complete = on_complete; arg->userdata = userdata; /* Calculate the size of the publish packet. * The fixed header size is 2 bytes, the packet ID is 2 bytes, * plus the size of both the topic name and payload */ uint64_t publish_packet_size = 4 + arg->topic.len + arg->payload.len; bool retry = qos == AWS_MQTT_QOS_AT_MOST_ONCE; uint16_t packet_id = mqtt_create_request(connection, &s_publish_send, arg, &s_publish_complete, arg, retry, publish_packet_size); if (packet_id == 0) { /* bummer, we failed to make a new request */ AWS_LOGF_ERROR( AWS_LS_MQTT_CLIENT, "id=%p: Failed starting publish to topic " PRInSTR ",error %d (%s)", (void *)connection, AWS_BYTE_CURSOR_PRI(*topic), aws_last_error(), aws_error_name(aws_last_error())); goto handle_error; } AWS_LOGF_DEBUG( AWS_LS_MQTT_CLIENT, "id=%p: Starting publish %" PRIu16 " to topic " PRInSTR, (void *)connection, packet_id, AWS_BYTE_CURSOR_PRI(*topic)); return packet_id; handle_error: /* we know arg is valid, topic_string may or may not be valid */ if (arg->topic_string) { aws_string_destroy(arg->topic_string); } aws_byte_buf_clean_up(&arg->payload_buf); aws_mem_release(connection->allocator, arg); return 0; } /******************************************************************************* * Ping ******************************************************************************/ static void s_pingresp_received_timeout(struct aws_channel_task *channel_task, void *arg, enum aws_task_status status) { struct aws_mqtt_client_connection_311_impl *connection = arg; if (status == AWS_TASK_STATUS_RUN_READY) { /* Check that a pingresp has been received since pingreq was sent */ if (connection->thread_data.waiting_on_ping_response) { connection->thread_data.waiting_on_ping_response = false; /* It's been too long since the last ping, close the connection */ AWS_LOGF_ERROR(AWS_LS_MQTT_CLIENT, "id=%p: ping timeout detected", (void *)connection); aws_channel_shutdown(connection->slot->channel, AWS_ERROR_MQTT_TIMEOUT); } } aws_mem_release(connection->allocator, channel_task); } static enum aws_mqtt_client_request_state s_pingreq_send(uint16_t packet_id, bool is_first_attempt, void *userdata) { (void)packet_id; (void)is_first_attempt; AWS_PRECONDITION(is_first_attempt); struct aws_mqtt_client_connection_311_impl *connection = userdata; AWS_LOGF_TRACE(AWS_LS_MQTT_CLIENT, "id=%p: pingreq send", (void *)connection); struct aws_mqtt_packet_connection pingreq; aws_mqtt_packet_pingreq_init(&pingreq); struct aws_io_message *message = mqtt_get_message_for_packet(connection, &pingreq.fixed_header); if (!message) { return AWS_MQTT_CLIENT_REQUEST_ERROR; } if (aws_mqtt_packet_connection_encode(&message->message_data, &pingreq)) { aws_mem_release(message->allocator, message); return AWS_MQTT_CLIENT_REQUEST_ERROR; } if (aws_channel_slot_send_message(connection->slot, message, AWS_CHANNEL_DIR_WRITE)) { aws_mem_release(message->allocator, message); return AWS_MQTT_CLIENT_REQUEST_ERROR; } /* Mark down that now is when the last pingreq was sent */ connection->thread_data.waiting_on_ping_response = true; struct aws_channel_task *ping_timeout_task = aws_mem_calloc(connection->allocator, 1, sizeof(struct aws_channel_task)); if (!ping_timeout_task) { /* allocation failed, no log, just return error. */ goto error; } aws_channel_task_init(ping_timeout_task, s_pingresp_received_timeout, connection, "mqtt_pingresp_timeout"); uint64_t now = 0; if (aws_channel_current_clock_time(connection->slot->channel, &now)) { goto error; } now += connection->ping_timeout_ns; aws_channel_schedule_task_future(connection->slot->channel, ping_timeout_task, now); return AWS_MQTT_CLIENT_REQUEST_COMPLETE; error: return AWS_MQTT_CLIENT_REQUEST_ERROR; } int aws_mqtt_client_connection_ping(struct aws_mqtt_client_connection_311_impl *connection) { AWS_LOGF_DEBUG(AWS_LS_MQTT_CLIENT, "id=%p: Starting ping", (void *)connection); uint16_t packet_id = mqtt_create_request(connection, &s_pingreq_send, connection, NULL, NULL, true, /* noRetry */ 0); AWS_LOGF_DEBUG(AWS_LS_MQTT_CLIENT, "id=%p: Starting ping with packet id %" PRIu16, (void *)connection, packet_id); return (packet_id > 0) ? AWS_OP_SUCCESS : AWS_OP_ERR; } /******************************************************************************* * Operation Statistics ******************************************************************************/ void aws_mqtt_connection_statistics_change_operation_statistic_state( struct aws_mqtt_client_connection_311_impl *connection, struct aws_mqtt_request *request, enum aws_mqtt_operation_statistic_state_flags new_state_flags) { // Error checking if (!connection) { AWS_LOGF_ERROR( AWS_LS_MQTT_CLIENT, "Invalid MQTT311 connection used when trying to change operation statistic state"); return; } if (!request) { AWS_LOGF_ERROR( AWS_LS_MQTT_CLIENT, "Invalid MQTT311 request used when trying to change operation statistic state"); return; } uint64_t packet_size = request->packet_size; /** * If the packet size is zero, then just skip it as we only want to track packets we have intentially * calculated the size of and therefore it will be non-zero (zero packets will be ACKs, Pings, etc) */ if (packet_size <= 0) { return; } enum aws_mqtt_operation_statistic_state_flags old_state_flags = request->statistic_state_flags; if (new_state_flags == old_state_flags) { return; } struct aws_mqtt_connection_operation_statistics_impl *stats = &connection->operation_statistics_impl; if ((old_state_flags & AWS_MQTT_OSS_INCOMPLETE) != (new_state_flags & AWS_MQTT_OSS_INCOMPLETE)) { if ((new_state_flags & AWS_MQTT_OSS_INCOMPLETE) != 0) { aws_atomic_fetch_add(&stats->incomplete_operation_count_atomic, 1); aws_atomic_fetch_add(&stats->incomplete_operation_size_atomic, (size_t)packet_size); } else { aws_atomic_fetch_sub(&stats->incomplete_operation_count_atomic, 1); aws_atomic_fetch_sub(&stats->incomplete_operation_size_atomic, (size_t)packet_size); } } if ((old_state_flags & AWS_MQTT_OSS_UNACKED) != (new_state_flags & AWS_MQTT_OSS_UNACKED)) { if ((new_state_flags & AWS_MQTT_OSS_UNACKED) != 0) { aws_atomic_fetch_add(&stats->unacked_operation_count_atomic, 1); aws_atomic_fetch_add(&stats->unacked_operation_size_atomic, (size_t)packet_size); } else { aws_atomic_fetch_sub(&stats->unacked_operation_count_atomic, 1); aws_atomic_fetch_sub(&stats->unacked_operation_size_atomic, (size_t)packet_size); } } request->statistic_state_flags = new_state_flags; // If the callback is defined, then call it if (connection && connection->on_any_operation_statistics && connection->on_any_operation_statistics_ud) { (*connection->on_any_operation_statistics)(connection, connection->on_any_operation_statistics_ud); } } static int s_aws_mqtt_client_connection_311_get_stats( void *impl, struct aws_mqtt_connection_operation_statistics *stats) { struct aws_mqtt_client_connection_311_impl *connection = impl; // Error checking if (!connection) { AWS_LOGF_ERROR(AWS_LS_MQTT_CLIENT, "Invalid MQTT311 connection used when trying to get operation statistics"); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } if (!stats) { AWS_LOGF_ERROR( AWS_LS_MQTT_CLIENT, "id=%p: Invalid MQTT311 connection statistics struct used when trying to get operation statistics", (void *)connection); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } stats->incomplete_operation_count = (uint64_t)aws_atomic_load_int(&connection->operation_statistics_impl.incomplete_operation_count_atomic); stats->incomplete_operation_size = (uint64_t)aws_atomic_load_int(&connection->operation_statistics_impl.incomplete_operation_size_atomic); stats->unacked_operation_count = (uint64_t)aws_atomic_load_int(&connection->operation_statistics_impl.unacked_operation_count_atomic); stats->unacked_operation_size = (uint64_t)aws_atomic_load_int(&connection->operation_statistics_impl.unacked_operation_size_atomic); return AWS_OP_SUCCESS; } int aws_mqtt_client_connection_set_on_operation_statistics_handler( struct aws_mqtt_client_connection_311_impl *connection, aws_mqtt_on_operation_statistics_fn *on_operation_statistics, void *on_operation_statistics_ud) { AWS_LOGF_TRACE(AWS_LS_MQTT_CLIENT, "id=%p: Setting on_operation_statistics handler", (void *)connection); connection->on_any_operation_statistics = on_operation_statistics; connection->on_any_operation_statistics_ud = on_operation_statistics_ud; return AWS_OP_SUCCESS; } static struct aws_mqtt_client_connection *s_aws_mqtt_client_connection_311_acquire(void *impl) { struct aws_mqtt_client_connection_311_impl *connection = impl; aws_ref_count_acquire(&connection->ref_count); return &connection->base; } static void s_aws_mqtt_client_connection_311_release(void *impl) { struct aws_mqtt_client_connection_311_impl *connection = impl; aws_ref_count_release(&connection->ref_count); } static struct aws_mqtt_client_connection_vtable s_aws_mqtt_client_connection_311_vtable = { .acquire_fn = s_aws_mqtt_client_connection_311_acquire, .release_fn = s_aws_mqtt_client_connection_311_release, .set_will_fn = s_aws_mqtt_client_connection_311_set_will, .set_login_fn = s_aws_mqtt_client_connection_311_set_login, .use_websockets_fn = s_aws_mqtt_client_connection_311_use_websockets, .set_http_proxy_options_fn = s_aws_mqtt_client_connection_311_set_http_proxy_options, .set_host_resolution_options_fn = s_aws_mqtt_client_connection_311_set_host_resolution_options, .set_reconnect_timeout_fn = s_aws_mqtt_client_connection_311_set_reconnect_timeout, .set_connection_result_handlers = s_aws_mqtt_client_connection_311_set_connection_result_handlers, .set_connection_interruption_handlers_fn = s_aws_mqtt_client_connection_311_set_connection_interruption_handlers, .set_connection_closed_handler_fn = s_aws_mqtt_client_connection_311_set_connection_closed_handler, .set_on_any_publish_handler_fn = s_aws_mqtt_client_connection_311_set_on_any_publish_handler, .set_connection_termination_handler_fn = s_aws_mqtt_client_connection_311_set_connection_termination_handler, .connect_fn = s_aws_mqtt_client_connection_311_connect, .reconnect_fn = s_aws_mqtt_client_connection_311_reconnect, .disconnect_fn = s_aws_mqtt_client_connection_311_disconnect, .subscribe_multiple_fn = s_aws_mqtt_client_connection_311_subscribe_multiple, .subscribe_fn = s_aws_mqtt_client_connection_311_subscribe, .resubscribe_existing_topics_fn = s_aws_mqtt_311_resubscribe_existing_topics, .unsubscribe_fn = s_aws_mqtt_client_connection_311_unsubscribe, .publish_fn = s_aws_mqtt_client_connection_311_publish, .get_stats_fn = s_aws_mqtt_client_connection_311_get_stats, }; static struct aws_mqtt_client_connection_vtable *s_aws_mqtt_client_connection_311_vtable_ptr = &s_aws_mqtt_client_connection_311_vtable; struct aws_mqtt_client_connection *aws_mqtt_client_connection_new(struct aws_mqtt_client *client) { AWS_PRECONDITION(client); struct aws_mqtt_client_connection_311_impl *connection = aws_mem_calloc(client->allocator, 1, sizeof(struct aws_mqtt_client_connection_311_impl)); if (!connection) { return NULL; } AWS_LOGF_DEBUG(AWS_LS_MQTT_CLIENT, "id=%p: Creating new mqtt 311 connection", (void *)connection); /* Initialize the client */ connection->allocator = client->allocator; connection->base.vtable = s_aws_mqtt_client_connection_311_vtable_ptr; connection->base.impl = connection; aws_ref_count_init( &connection->ref_count, connection, (aws_simple_completion_callback *)s_mqtt_client_connection_start_destroy); connection->client = aws_mqtt_client_acquire(client); AWS_ZERO_STRUCT(connection->synced_data); connection->synced_data.state = AWS_MQTT_CLIENT_STATE_DISCONNECTED; connection->reconnect_timeouts.min_sec = 1; connection->reconnect_timeouts.current_sec = 1; connection->reconnect_timeouts.max_sec = 128; aws_linked_list_init(&connection->synced_data.pending_requests_list); aws_linked_list_init(&connection->thread_data.ongoing_requests_list); s_init_statistics(&connection->operation_statistics_impl); if (aws_mutex_init(&connection->synced_data.lock)) { AWS_LOGF_ERROR( AWS_LS_MQTT_CLIENT, "id=%p: Failed to initialize mutex, error %d (%s)", (void *)connection, aws_last_error(), aws_error_name(aws_last_error())); goto failed_init_mutex; } struct aws_mqtt311_decoder_options config = { .packet_handlers = aws_mqtt311_get_default_packet_handlers(), .handler_user_data = connection, }; aws_mqtt311_decoder_init(&connection->thread_data.decoder, client->allocator, &config); if (aws_mqtt_topic_tree_init(&connection->thread_data.subscriptions, connection->allocator)) { AWS_LOGF_ERROR( AWS_LS_MQTT_CLIENT, "id=%p: Failed to initialize subscriptions topic_tree, error %d (%s)", (void *)connection, aws_last_error(), aws_error_name(aws_last_error())); goto failed_init_subscriptions; } if (aws_memory_pool_init( &connection->synced_data.requests_pool, connection->allocator, 32, sizeof(struct aws_mqtt_request))) { AWS_LOGF_ERROR( AWS_LS_MQTT_CLIENT, "id=%p: Failed to initialize request pool, error %d (%s)", (void *)connection, aws_last_error(), aws_error_name(aws_last_error())); goto failed_init_requests_pool; } if (aws_hash_table_init( &connection->synced_data.outstanding_requests_table, connection->allocator, DEFAULT_MQTT311_OPERATION_TABLE_SIZE, aws_mqtt_hash_uint16_t, aws_mqtt_compare_uint16_t_eq, NULL, NULL)) { AWS_LOGF_ERROR( AWS_LS_MQTT_CLIENT, "id=%p: Failed to initialize outstanding requests table, error %d (%s)", (void *)connection, aws_last_error(), aws_error_name(aws_last_error())); goto failed_init_outstanding_requests_table; } connection->loop = aws_event_loop_group_get_next_loop(client->bootstrap->event_loop_group); connection->host_resolution_config = aws_host_resolver_init_default_resolution_config(); connection->host_resolution_config.resolve_frequency_ns = aws_timestamp_convert(connection->reconnect_timeouts.max_sec, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL); /* Initialize the handler */ connection->handler.alloc = connection->allocator; connection->handler.vtable = aws_mqtt_get_client_channel_vtable(); connection->handler.impl = connection; return &connection->base; failed_init_outstanding_requests_table: aws_memory_pool_clean_up(&connection->synced_data.requests_pool); failed_init_requests_pool: aws_mqtt_topic_tree_clean_up(&connection->thread_data.subscriptions); failed_init_subscriptions: aws_mutex_clean_up(&connection->synced_data.lock); failed_init_mutex: aws_mem_release(client->allocator, connection); return NULL; } aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/source/client_channel_handler.c000066400000000000000000001270751456575232400263400ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #ifdef _MSC_VER # pragma warning(disable : 4204) #endif /******************************************************************************* * Static Helper functions ******************************************************************************/ /* Caches the socket write time for ping scheduling purposes */ static void s_update_next_ping_time(struct aws_mqtt_client_connection_311_impl *connection) { if (connection->slot != NULL && connection->slot->channel != NULL) { aws_channel_current_clock_time(connection->slot->channel, &connection->next_ping_time); aws_add_u64_checked(connection->next_ping_time, connection->keep_alive_time_ns, &connection->next_ping_time); } } /* Caches the request send time. The `request_send_timestamp` will be used to push off ping request on request complete. */ static void s_update_request_send_time(struct aws_mqtt_request *request) { if (request->connection != NULL && request->connection->slot != NULL && request->connection->slot->channel != NULL) { aws_channel_current_clock_time(request->connection->slot->channel, &request->request_send_timestamp); } } /* push off next ping time on ack received to last_request_send_timestamp_ns + keep_alive_time_ns * The function must be called in critical section. */ static void s_pushoff_next_ping_time( struct aws_mqtt_client_connection_311_impl *connection, uint64_t last_request_send_timestamp_ns) { ASSERT_SYNCED_DATA_LOCK_HELD(connection); aws_add_u64_checked( last_request_send_timestamp_ns, connection->keep_alive_time_ns, &last_request_send_timestamp_ns); if (last_request_send_timestamp_ns > connection->next_ping_time) { connection->next_ping_time = last_request_send_timestamp_ns; } } /******************************************************************************* * Packet State Machine ******************************************************************************/ static int s_packet_handler_default(struct aws_byte_cursor message_cursor, void *user_data) { (void)message_cursor; struct aws_mqtt_client_connection_311_impl *connection = user_data; AWS_LOGF_ERROR(AWS_LS_MQTT_CLIENT, "id=%p: Unhandled packet type received", (void *)connection); return aws_raise_error(AWS_ERROR_MQTT_INVALID_PACKET_TYPE); } static void s_on_time_to_ping(struct aws_channel_task *channel_task, void *arg, enum aws_task_status status); static void s_schedule_ping(struct aws_mqtt_client_connection_311_impl *connection) { aws_channel_task_init(&connection->ping_task, s_on_time_to_ping, connection, "mqtt_ping"); uint64_t now = 0; aws_channel_current_clock_time(connection->slot->channel, &now); AWS_LOGF_TRACE( AWS_LS_MQTT_CLIENT, "id=%p: Scheduling PING task. current timestamp is %" PRIu64, (void *)connection, now); AWS_LOGF_TRACE( AWS_LS_MQTT_CLIENT, "id=%p: The next PING task will be run at timestamp %" PRIu64, (void *)connection, connection->next_ping_time); aws_channel_schedule_task_future(connection->slot->channel, &connection->ping_task, connection->next_ping_time); } static void s_on_time_to_ping(struct aws_channel_task *channel_task, void *arg, enum aws_task_status status) { (void)channel_task; if (status == AWS_TASK_STATUS_RUN_READY) { struct aws_mqtt_client_connection_311_impl *connection = arg; uint64_t now = 0; aws_channel_current_clock_time(connection->slot->channel, &now); if (now >= connection->next_ping_time) { s_update_next_ping_time(connection); AWS_LOGF_TRACE(AWS_LS_MQTT_CLIENT, "id=%p: Sending PING", (void *)connection); aws_mqtt_client_connection_ping(connection); } else { AWS_LOGF_TRACE( AWS_LS_MQTT_CLIENT, "id=%p: Skipped sending PING because scheduled ping time %" PRIu64 " has not elapsed yet. Current time is %" PRIu64 ". Rescheduling ping to run at the scheduled ping time...", (void *)connection, connection->next_ping_time, now); } s_schedule_ping(connection); } } static int s_validate_received_packet_type( struct aws_mqtt_client_connection_311_impl *connection, enum aws_mqtt_packet_type packet_type) { { /* BEGIN CRITICAL SECTION */ mqtt_connection_lock_synced_data(connection); /* [MQTT-3.2.0-1] The first packet sent from the Server to the Client MUST be a CONNACK Packet */ if (connection->synced_data.state == AWS_MQTT_CLIENT_STATE_CONNECTING && packet_type != AWS_MQTT_PACKET_CONNACK) { mqtt_connection_unlock_synced_data(connection); AWS_LOGF_ERROR( AWS_LS_MQTT_CLIENT, "id=%p: First message received from the server was not a CONNACK. Terminating connection.", (void *)connection); return aws_raise_error(AWS_ERROR_MQTT_PROTOCOL_ERROR); } mqtt_connection_unlock_synced_data(connection); } /* END CRITICAL SECTION */ if (AWS_UNLIKELY(packet_type > AWS_MQTT_PACKET_DISCONNECT || packet_type < AWS_MQTT_PACKET_CONNECT)) { AWS_LOGF_ERROR( AWS_LS_MQTT_CLIENT, "id=%p: Invalid packet type received %d. Terminating connection.", (void *)connection, packet_type); return aws_raise_error(AWS_ERROR_MQTT_INVALID_PACKET_TYPE); } /* Handle the packet */ return AWS_OP_SUCCESS; } static int s_packet_handler_connack(struct aws_byte_cursor message_cursor, void *user_data) { struct aws_mqtt_client_connection_311_impl *connection = user_data; AWS_LOGF_DEBUG(AWS_LS_MQTT_CLIENT, "id=%p: CONNACK received", (void *)connection); if (s_validate_received_packet_type(connection, AWS_MQTT_PACKET_CONNACK)) { return AWS_OP_ERR; } struct aws_mqtt_packet_connack connack; if (aws_mqtt_packet_connack_decode(&message_cursor, &connack)) { AWS_LOGF_ERROR( AWS_LS_MQTT_CLIENT, "id=%p: error %d parsing CONNACK packet", (void *)connection, aws_last_error()); return AWS_OP_ERR; } bool was_reconnecting; struct aws_linked_list requests; aws_linked_list_init(&requests); { /* BEGIN CRITICAL SECTION */ mqtt_connection_lock_synced_data(connection); /* User requested disconnect, don't do anything */ if (connection->synced_data.state >= AWS_MQTT_CLIENT_STATE_DISCONNECTING) { mqtt_connection_unlock_synced_data(connection); AWS_LOGF_TRACE( AWS_LS_MQTT_CLIENT, "id=%p: User has requested disconnect, dropping connection", (void *)connection); return AWS_OP_SUCCESS; } was_reconnecting = connection->synced_data.state == AWS_MQTT_CLIENT_STATE_RECONNECTING; if (connack.connect_return_code == AWS_MQTT_CONNECT_ACCEPTED) { AWS_LOGF_DEBUG( AWS_LS_MQTT_CLIENT, "id=%p: connection was accepted, switch state from %d to CONNECTED.", (void *)connection, (int)connection->synced_data.state); /* Don't change the state if it's not ACCEPTED by broker */ mqtt_connection_set_state(connection, AWS_MQTT_CLIENT_STATE_CONNECTED); aws_linked_list_swap_contents(&connection->synced_data.pending_requests_list, &requests); } mqtt_connection_unlock_synced_data(connection); } /* END CRITICAL SECTION */ connection->connection_count++; uint64_t now = 0; aws_high_res_clock_get_ticks(&now); if (connack.connect_return_code == AWS_MQTT_CONNECT_ACCEPTED) { /* * This was a successful MQTT connection establishment, record the time so that channel shutdown * can make a good decision about reconnect backoff reset. */ connection->reconnect_timeouts.channel_successful_connack_timestamp_ns = now; /* If successfully connected, schedule all pending tasks */ AWS_LOGF_TRACE( AWS_LS_MQTT_CLIENT, "id=%p: connection was accepted processing offline requests.", (void *)connection); if (!aws_linked_list_empty(&requests)) { struct aws_linked_list_node *current = aws_linked_list_front(&requests); const struct aws_linked_list_node *end = aws_linked_list_end(&requests); do { struct aws_mqtt_request *request = AWS_CONTAINER_OF(current, struct aws_mqtt_request, list_node); AWS_LOGF_TRACE( AWS_LS_MQTT_CLIENT, "id=%p: processing offline request %" PRIu16, (void *)connection, request->packet_id); aws_channel_schedule_task_now(connection->slot->channel, &request->outgoing_task); current = current->next; } while (current != end); } } else { AWS_LOGF_ERROR( AWS_LS_MQTT_CLIENT, "id=%p: invalid connect return code %d, disconnecting", (void *)connection, connack.connect_return_code); /* If error code returned, disconnect, on_completed will be invoked from shutdown process */ aws_channel_shutdown(connection->slot->channel, AWS_ERROR_MQTT_PROTOCOL_ERROR); return AWS_OP_SUCCESS; } /* It is possible for a connection to complete, and a hangup to occur before the * CONNECT/CONNACK cycle completes. In that case, we must deliver on_connection_complete * on the first successful CONNACK or user code will never think it's connected */ if (was_reconnecting && connection->connection_count > 1) { AWS_LOGF_TRACE( AWS_LS_MQTT_CLIENT, "id=%p: connection is a resumed connection, invoking on_resumed callback", (void *)connection); MQTT_CLIENT_CALL_CALLBACK_ARGS(connection, on_resumed, connack.connect_return_code, connack.session_present); } else { aws_create_reconnect_task(connection); AWS_LOGF_TRACE( AWS_LS_MQTT_CLIENT, "id=%p: connection is a new connection, invoking on_connection_complete callback", (void *)connection); MQTT_CLIENT_CALL_CALLBACK_ARGS( connection, on_connection_complete, AWS_OP_SUCCESS, connack.connect_return_code, connack.session_present); } /* * The on_connection_success would get triggered on the successful CONNACK. It invoked with both the first connect * attempt and reconnection attempt as Mqtt5 does not have on_resume callback for reconnection. */ AWS_LOGF_TRACE( AWS_LS_MQTT_CLIENT, "id=%p: received a successful CONNACK, invoking on_connection_success callback", (void *)connection); MQTT_CLIENT_CALL_CALLBACK_ARGS( connection, on_connection_success, connack.connect_return_code, connack.session_present); AWS_LOGF_TRACE(AWS_LS_MQTT_CLIENT, "id=%p: connection callback completed", (void *)connection); s_update_next_ping_time(connection); s_schedule_ping(connection); return AWS_OP_SUCCESS; } static int s_packet_handler_publish(struct aws_byte_cursor message_cursor, void *user_data) { struct aws_mqtt_client_connection_311_impl *connection = user_data; AWS_LOGF_DEBUG(AWS_LS_MQTT_CLIENT, "id=%p: PUBLISH received", (void *)connection); if (s_validate_received_packet_type(connection, AWS_MQTT_PACKET_PUBLISH)) { return AWS_OP_ERR; } /* TODO: need to handle the QoS 2 message to avoid processing the message a second time */ struct aws_mqtt_packet_publish publish; if (aws_mqtt_packet_publish_decode(&message_cursor, &publish)) { return AWS_OP_ERR; } aws_mqtt_topic_tree_publish(&connection->thread_data.subscriptions, &publish); bool dup = aws_mqtt_packet_publish_get_dup(&publish); enum aws_mqtt_qos qos = aws_mqtt_packet_publish_get_qos(&publish); bool retain = aws_mqtt_packet_publish_get_retain(&publish); MQTT_CLIENT_CALL_CALLBACK_ARGS(connection, on_any_publish, &publish.topic_name, &publish.payload, dup, qos, retain); AWS_LOGF_TRACE( AWS_LS_MQTT_CLIENT, "id=%p: publish received with msg id=%" PRIu16 " dup=%d qos=%d retain=%d payload-size=%zu topic=" PRInSTR, (void *)connection, publish.packet_identifier, dup, qos, retain, publish.payload.len, AWS_BYTE_CURSOR_PRI(publish.topic_name)); struct aws_mqtt_packet_ack puback; AWS_ZERO_STRUCT(puback); /* Switch on QoS flags (bits 1 & 2) */ switch (qos) { case AWS_MQTT_QOS_AT_MOST_ONCE: AWS_LOGF_TRACE( AWS_LS_MQTT_CLIENT, "id=%p: received publish QOS is 0, not sending puback", (void *)connection); /* No more communication necessary */ break; case AWS_MQTT_QOS_AT_LEAST_ONCE: AWS_LOGF_TRACE(AWS_LS_MQTT_CLIENT, "id=%p: received publish QOS is 1, sending puback", (void *)connection); aws_mqtt_packet_puback_init(&puback, publish.packet_identifier); break; case AWS_MQTT_QOS_EXACTLY_ONCE: AWS_LOGF_TRACE(AWS_LS_MQTT_CLIENT, "id=%p: received publish QOS is 2, sending pubrec", (void *)connection); aws_mqtt_packet_pubrec_init(&puback, publish.packet_identifier); break; default: /* Impossible to hit this branch. QoS value is checked when decoding */ AWS_FATAL_ASSERT(0); break; } if (puback.packet_identifier) { struct aws_io_message *message = mqtt_get_message_for_packet(connection, &puback.fixed_header); if (!message) { return AWS_OP_ERR; } if (aws_mqtt_packet_ack_encode(&message->message_data, &puback)) { aws_mem_release(message->allocator, message); return AWS_OP_ERR; } if (aws_channel_slot_send_message(connection->slot, message, AWS_CHANNEL_DIR_WRITE)) { aws_mem_release(message->allocator, message); return AWS_OP_ERR; } } return AWS_OP_SUCCESS; } static int s_packet_handler_puback(struct aws_byte_cursor message_cursor, void *user_data) { struct aws_mqtt_client_connection_311_impl *connection = user_data; AWS_LOGF_DEBUG(AWS_LS_MQTT_CLIENT, "id=%p: received a PUBACK", (void *)connection); if (s_validate_received_packet_type(connection, AWS_MQTT_PACKET_PUBACK)) { return AWS_OP_ERR; } struct aws_mqtt_packet_ack ack; if (aws_mqtt_packet_ack_decode(&message_cursor, &ack)) { return AWS_OP_ERR; } AWS_LOGF_TRACE( AWS_LS_MQTT_CLIENT, "id=%p: received ack for message id %" PRIu16, (void *)connection, ack.packet_identifier); mqtt_request_complete(connection, AWS_ERROR_SUCCESS, ack.packet_identifier); return AWS_OP_SUCCESS; } static int s_packet_handler_suback(struct aws_byte_cursor message_cursor, void *user_data) { struct aws_mqtt_client_connection_311_impl *connection = user_data; AWS_LOGF_DEBUG(AWS_LS_MQTT_CLIENT, "id=%p: received a SUBACK", (void *)connection); if (s_validate_received_packet_type(connection, AWS_MQTT_PACKET_SUBACK)) { return AWS_OP_ERR; } struct aws_mqtt_packet_suback suback; if (aws_mqtt_packet_suback_init(&suback, connection->allocator, 0 /* fake packet_id */)) { return AWS_OP_ERR; } if (aws_mqtt_packet_suback_decode(&message_cursor, &suback)) { goto error; } AWS_LOGF_TRACE( AWS_LS_MQTT_CLIENT, "id=%p: received suback for message id %" PRIu16, (void *)connection, suback.packet_identifier); struct aws_mqtt_request *request = NULL; { /* BEGIN CRITICAL SECTION */ mqtt_connection_lock_synced_data(connection); struct aws_hash_element *elem = NULL; aws_hash_table_find(&connection->synced_data.outstanding_requests_table, &suback.packet_identifier, &elem); if (elem != NULL) { request = elem->value; } mqtt_connection_unlock_synced_data(connection); } /* END CRITICAL SECTION */ if (request == NULL) { /* no corresponding request found */ goto done; } struct subscribe_task_arg *task_arg = request->on_complete_ud; size_t request_topics_len = aws_array_list_length(&task_arg->topics); size_t suback_return_code_len = aws_array_list_length(&suback.return_codes); if (request_topics_len != suback_return_code_len) { goto error; } size_t num_filters = aws_array_list_length(&suback.return_codes); for (size_t i = 0; i < num_filters; ++i) { uint8_t return_code = 0; struct subscribe_task_topic *topic = NULL; aws_array_list_get_at(&suback.return_codes, (void *)&return_code, i); aws_array_list_get_at(&task_arg->topics, &topic, i); topic->request.qos = return_code; } done: mqtt_request_complete(connection, AWS_ERROR_SUCCESS, suback.packet_identifier); aws_mqtt_packet_suback_clean_up(&suback); return AWS_OP_SUCCESS; error: aws_mqtt_packet_suback_clean_up(&suback); return AWS_OP_ERR; } static int s_packet_handler_unsuback(struct aws_byte_cursor message_cursor, void *user_data) { struct aws_mqtt_client_connection_311_impl *connection = user_data; AWS_LOGF_DEBUG(AWS_LS_MQTT_CLIENT, "id=%p: received a UNSUBACK", (void *)connection); if (s_validate_received_packet_type(connection, AWS_MQTT_PACKET_UNSUBACK)) { return AWS_OP_ERR; } struct aws_mqtt_packet_ack ack; if (aws_mqtt_packet_ack_decode(&message_cursor, &ack)) { return AWS_OP_ERR; } AWS_LOGF_DEBUG( AWS_LS_MQTT_CLIENT, "id=%p: received ack for message id %" PRIu16, (void *)connection, ack.packet_identifier); mqtt_request_complete(connection, AWS_ERROR_SUCCESS, ack.packet_identifier); return AWS_OP_SUCCESS; } static int s_packet_handler_pubrec(struct aws_byte_cursor message_cursor, void *user_data) { struct aws_mqtt_client_connection_311_impl *connection = user_data; AWS_LOGF_DEBUG(AWS_LS_MQTT_CLIENT, "id=%p: received a PUBREC", (void *)connection); if (s_validate_received_packet_type(connection, AWS_MQTT_PACKET_PUBREC)) { return AWS_OP_ERR; } struct aws_mqtt_packet_ack ack; if (aws_mqtt_packet_ack_decode(&message_cursor, &ack)) { return AWS_OP_ERR; } /* TODO: When sending PUBLISH with QoS 2, we should be storing the data until this packet is received, at which * point we may discard it. */ /* Send PUBREL */ aws_mqtt_packet_pubrel_init(&ack, ack.packet_identifier); struct aws_io_message *message = mqtt_get_message_for_packet(connection, &ack.fixed_header); if (!message) { return AWS_OP_ERR; } if (aws_mqtt_packet_ack_encode(&message->message_data, &ack)) { goto on_error; } if (aws_channel_slot_send_message(connection->slot, message, AWS_CHANNEL_DIR_WRITE)) { goto on_error; } return AWS_OP_SUCCESS; on_error: if (message) { aws_mem_release(message->allocator, message); } return AWS_OP_ERR; } static int s_packet_handler_pubrel(struct aws_byte_cursor message_cursor, void *user_data) { struct aws_mqtt_client_connection_311_impl *connection = user_data; AWS_LOGF_DEBUG(AWS_LS_MQTT_CLIENT, "id=%p: received a PUBREL", (void *)connection); if (s_validate_received_packet_type(connection, AWS_MQTT_PACKET_PUBREL)) { return AWS_OP_ERR; } struct aws_mqtt_packet_ack ack; if (aws_mqtt_packet_ack_decode(&message_cursor, &ack)) { return AWS_OP_ERR; } /* Send PUBCOMP */ aws_mqtt_packet_pubcomp_init(&ack, ack.packet_identifier); struct aws_io_message *message = mqtt_get_message_for_packet(connection, &ack.fixed_header); if (!message) { return AWS_OP_ERR; } if (aws_mqtt_packet_ack_encode(&message->message_data, &ack)) { goto on_error; } if (aws_channel_slot_send_message(connection->slot, message, AWS_CHANNEL_DIR_WRITE)) { goto on_error; } return AWS_OP_SUCCESS; on_error: if (message) { aws_mem_release(message->allocator, message); } return AWS_OP_ERR; } static int s_packet_handler_pubcomp(struct aws_byte_cursor message_cursor, void *user_data) { struct aws_mqtt_client_connection_311_impl *connection = user_data; AWS_LOGF_DEBUG(AWS_LS_MQTT_CLIENT, "id=%p: received a PUBCOMP", (void *)connection); if (s_validate_received_packet_type(connection, AWS_MQTT_PACKET_PUBCOMP)) { return AWS_OP_ERR; } struct aws_mqtt_packet_ack ack; if (aws_mqtt_packet_ack_decode(&message_cursor, &ack)) { return AWS_OP_ERR; } AWS_LOGF_DEBUG( AWS_LS_MQTT_CLIENT, "id=%p: received ack for message id %" PRIu16, (void *)connection, ack.packet_identifier); mqtt_request_complete(connection, AWS_ERROR_SUCCESS, ack.packet_identifier); return AWS_OP_SUCCESS; } static int s_packet_handler_pingresp(struct aws_byte_cursor message_cursor, void *user_data) { (void)message_cursor; struct aws_mqtt_client_connection_311_impl *connection = user_data; AWS_LOGF_DEBUG(AWS_LS_MQTT_CLIENT, "id=%p: PINGRESP received", (void *)connection); connection->thread_data.waiting_on_ping_response = false; return AWS_OP_SUCCESS; } /* Bake up a big ol' function table just like Gramma used to make */ static struct aws_mqtt_client_connection_packet_handlers s_default_packet_handlers = { .handlers_by_packet_type = { [AWS_MQTT_PACKET_CONNECT] = &s_packet_handler_default, [AWS_MQTT_PACKET_CONNACK] = &s_packet_handler_connack, [AWS_MQTT_PACKET_PUBLISH] = &s_packet_handler_publish, [AWS_MQTT_PACKET_PUBACK] = &s_packet_handler_puback, [AWS_MQTT_PACKET_PUBREC] = &s_packet_handler_pubrec, [AWS_MQTT_PACKET_PUBREL] = &s_packet_handler_pubrel, [AWS_MQTT_PACKET_PUBCOMP] = &s_packet_handler_pubcomp, [AWS_MQTT_PACKET_SUBSCRIBE] = &s_packet_handler_default, [AWS_MQTT_PACKET_SUBACK] = &s_packet_handler_suback, [AWS_MQTT_PACKET_UNSUBSCRIBE] = &s_packet_handler_default, [AWS_MQTT_PACKET_UNSUBACK] = &s_packet_handler_unsuback, [AWS_MQTT_PACKET_PINGREQ] = &s_packet_handler_default, [AWS_MQTT_PACKET_PINGRESP] = &s_packet_handler_pingresp, [AWS_MQTT_PACKET_DISCONNECT] = &s_packet_handler_default, }}; const struct aws_mqtt_client_connection_packet_handlers *aws_mqtt311_get_default_packet_handlers(void) { return &s_default_packet_handlers; } /******************************************************************************* * Channel Handler ******************************************************************************/ /** * Handles incoming messages from the server. */ static int s_process_read_message( struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_io_message *message) { struct aws_mqtt_client_connection_311_impl *connection = handler->impl; if (message->message_type != AWS_IO_MESSAGE_APPLICATION_DATA || message->message_data.len < 1) { return aws_raise_error(AWS_ERROR_INVALID_STATE); } AWS_LOGF_TRACE( AWS_LS_MQTT_CLIENT, "id=%p: precessing read message of size %zu", (void *)connection, message->message_data.len); /* This cursor will be updated as we read through the message. */ struct aws_byte_cursor message_cursor = aws_byte_cursor_from_buf(&message->message_data); int result = aws_mqtt311_decoder_on_bytes_received(&connection->thread_data.decoder, message_cursor); if (result == AWS_OP_SUCCESS) { /* Do cleanup */ size_t message_data_length = message->message_data.len; aws_mem_release(message->allocator, message); aws_channel_slot_increment_read_window(slot, message_data_length); } else { aws_channel_shutdown(connection->slot->channel, aws_last_error()); } return result; } static int s_shutdown( struct aws_channel_handler *handler, struct aws_channel_slot *slot, enum aws_channel_direction dir, int error_code, bool free_scarce_resources_immediately) { struct aws_mqtt_client_connection_311_impl *connection = handler->impl; if (dir == AWS_CHANNEL_DIR_WRITE) { /* On closing write direction, send out disconnect packet before closing connection. */ if (!free_scarce_resources_immediately) { if (error_code == AWS_OP_SUCCESS) { AWS_LOGF_INFO( AWS_LS_MQTT_CLIENT, "id=%p: sending disconnect message as part of graceful shutdown.", (void *)connection); /* On clean shutdown, send the disconnect message */ struct aws_mqtt_packet_connection disconnect; aws_mqtt_packet_disconnect_init(&disconnect); struct aws_io_message *message = mqtt_get_message_for_packet(connection, &disconnect.fixed_header); if (!message) { goto done; } if (aws_mqtt_packet_connection_encode(&message->message_data, &disconnect)) { AWS_LOGF_DEBUG( AWS_LS_MQTT_CLIENT, "id=%p: failed to encode courteous disconnect io message", (void *)connection); aws_mem_release(message->allocator, message); goto done; } if (aws_channel_slot_send_message(slot, message, AWS_CHANNEL_DIR_WRITE)) { AWS_LOGF_DEBUG( AWS_LS_MQTT_CLIENT, "id=%p: failed to send courteous disconnect io message", (void *)connection); aws_mem_release(message->allocator, message); goto done; } } } } done: return aws_channel_slot_on_handler_shutdown_complete(slot, dir, error_code, free_scarce_resources_immediately); } static size_t s_initial_window_size(struct aws_channel_handler *handler) { (void)handler; return SIZE_MAX; } static void s_destroy(struct aws_channel_handler *handler) { struct aws_mqtt_client_connection_311_impl *connection = handler->impl; (void)connection; } static size_t s_message_overhead(struct aws_channel_handler *handler) { (void)handler; return 0; } struct aws_channel_handler_vtable *aws_mqtt_get_client_channel_vtable(void) { static struct aws_channel_handler_vtable s_vtable = { .process_read_message = &s_process_read_message, .process_write_message = NULL, .increment_read_window = NULL, .shutdown = &s_shutdown, .initial_window_size = &s_initial_window_size, .message_overhead = &s_message_overhead, .destroy = &s_destroy, }; return &s_vtable; } /******************************************************************************* * Helpers ******************************************************************************/ struct aws_io_message *mqtt_get_message_for_packet( struct aws_mqtt_client_connection_311_impl *connection, struct aws_mqtt_fixed_header *header) { const size_t required_length = 3 + header->remaining_length; struct aws_io_message *message = aws_channel_acquire_message_from_pool( connection->slot->channel, AWS_IO_MESSAGE_APPLICATION_DATA, required_length); AWS_LOGF_TRACE( AWS_LS_MQTT_CLIENT, "id=%p: Acquiring memory from pool of required_length %zu", (void *)connection, required_length); return message; } /******************************************************************************* * Requests ******************************************************************************/ /* Send the request */ static void s_request_outgoing_task(struct aws_channel_task *task, void *arg, enum aws_task_status status) { struct aws_mqtt_request *request = arg; struct aws_mqtt_client_connection_311_impl *connection = request->connection; if (status == AWS_TASK_STATUS_CANCELED) { /* Connection lost before the request ever get send, check the request needs to be retried or not */ if (request->retryable) { AWS_LOGF_DEBUG( AWS_LS_MQTT_CLIENT, "static: task id %p, was canceled due to the channel shutting down. Request for packet id " "%" PRIu16 ". will be retried", (void *)task, request->packet_id); /* put it into the offline queue. */ { /* BEGIN CRITICAL SECTION */ mqtt_connection_lock_synced_data(connection); /* Set the status as incomplete */ aws_mqtt_connection_statistics_change_operation_statistic_state( connection, request, AWS_MQTT_OSS_INCOMPLETE); aws_linked_list_push_back(&connection->synced_data.pending_requests_list, &request->list_node); mqtt_connection_unlock_synced_data(connection); } /* END CRITICAL SECTION */ } else { AWS_LOGF_DEBUG( AWS_LS_MQTT_CLIENT, "static: task id %p, was canceled due to the channel shutting down. Request for packet id " "%" PRIu16 ". will NOT be retried, will be cancelled", (void *)task, request->packet_id); /* Fire the callback and clean up the memory, as the connection get destroyed. */ if (request->on_complete) { request->on_complete( &connection->base, request->packet_id, AWS_ERROR_MQTT_NOT_CONNECTED, request->on_complete_ud); } { /* BEGIN CRITICAL SECTION */ mqtt_connection_lock_synced_data(connection); /* Cancel the request in the operation statistics */ aws_mqtt_connection_statistics_change_operation_statistic_state(connection, request, AWS_MQTT_OSS_NONE); aws_hash_table_remove( &connection->synced_data.outstanding_requests_table, &request->packet_id, NULL, NULL); aws_memory_pool_release(&connection->synced_data.requests_pool, request); mqtt_connection_unlock_synced_data(connection); } /* END CRITICAL SECTION */ } return; } /* Send the request */ enum aws_mqtt_client_request_state state = request->send_request(request->packet_id, !request->initiated, request->send_request_ud); /* Update the request send time.*/ s_update_request_send_time(request); request->initiated = true; int error_code = AWS_ERROR_SUCCESS; switch (state) { case AWS_MQTT_CLIENT_REQUEST_ERROR: error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_MQTT_CLIENT, "id=%p: sending request %" PRIu16 " failed with error %d.", (void *)request->connection, request->packet_id, error_code); /* fall-thru */ case AWS_MQTT_CLIENT_REQUEST_COMPLETE: AWS_LOGF_TRACE( AWS_LS_MQTT_CLIENT, "id=%p: sending request %" PRIu16 " complete, invoking on_complete callback.", (void *)request->connection, request->packet_id); /* If the send_request function reports the request is complete, * remove from the hash table and call the callback. */ if (request->on_complete) { request->on_complete(&connection->base, request->packet_id, error_code, request->on_complete_ud); } { /* BEGIN CRITICAL SECTION */ mqtt_connection_lock_synced_data(connection); /* Set the request as complete in the operation statistics */ aws_mqtt_connection_statistics_change_operation_statistic_state( request->connection, request, AWS_MQTT_OSS_NONE); aws_hash_table_remove( &connection->synced_data.outstanding_requests_table, &request->packet_id, NULL, NULL); aws_memory_pool_release(&connection->synced_data.requests_pool, request); mqtt_connection_unlock_synced_data(connection); } /* END CRITICAL SECTION */ break; case AWS_MQTT_CLIENT_REQUEST_ONGOING: AWS_LOGF_TRACE( AWS_LS_MQTT_CLIENT, "id=%p: request %" PRIu16 " sent, but waiting on an acknowledgement from peer.", (void *)request->connection, request->packet_id); { /* BEGIN CRITICAL SECTION */ mqtt_connection_lock_synced_data(connection); /* Set the request as incomplete and un-acked in the operation statistics */ aws_mqtt_connection_statistics_change_operation_statistic_state( request->connection, request, AWS_MQTT_OSS_INCOMPLETE | AWS_MQTT_OSS_UNACKED); mqtt_connection_unlock_synced_data(connection); } /* END CRITICAL SECTION */ /* Put the request into the ongoing list */ aws_linked_list_push_back(&connection->thread_data.ongoing_requests_list, &request->list_node); break; } } uint16_t mqtt_create_request( struct aws_mqtt_client_connection_311_impl *connection, aws_mqtt_send_request_fn *send_request, void *send_request_ud, aws_mqtt_op_complete_fn *on_complete, void *on_complete_ud, bool noRetry, uint64_t packet_size) { AWS_ASSERT(connection); AWS_ASSERT(send_request); struct aws_mqtt_request *next_request = NULL; bool should_schedule_task = false; struct aws_channel *channel = NULL; { /* BEGIN CRITICAL SECTION */ mqtt_connection_lock_synced_data(connection); if (connection->synced_data.state == AWS_MQTT_CLIENT_STATE_DISCONNECTING) { mqtt_connection_unlock_synced_data(connection); /* User requested disconnecting, ensure no new requests are made until the channel finished shutting * down. */ AWS_LOGF_ERROR( AWS_LS_MQTT_CLIENT, "id=%p: Disconnect requested, stop creating any new request until disconnect process finishes.", (void *)connection); aws_raise_error(AWS_ERROR_MQTT_CONNECTION_DISCONNECTING); return 0; } if (noRetry && connection->synced_data.state != AWS_MQTT_CLIENT_STATE_CONNECTED) { mqtt_connection_unlock_synced_data(connection); /* Not offline queueing QoS 0 publish or PINGREQ. Fail the call. */ AWS_LOGF_DEBUG( AWS_LS_MQTT_CLIENT, "id=%p: Not currently connected. No offline queueing for QoS 0 publish or pingreq.", (void *)connection); aws_raise_error(AWS_ERROR_MQTT_NOT_CONNECTED); return 0; } /** * Find a free packet ID. * QoS 0 PUBLISH packets don't actually need an ID on the wire, * but we assign them internally anyway just so everything has a unique ID. * * Yes, this is an O(N) search. * We remember the last ID we assigned, so it's O(1) in the common case. * But it's theoretically possible to reach O(N) where N is just above 64000 * if the user is letting a ton of un-ack'd messages queue up */ uint16_t search_start = connection->synced_data.packet_id; struct aws_hash_element *elem = NULL; while (true) { /* Increment ID, watch out for overflow, ID cannot be 0 */ if (connection->synced_data.packet_id == UINT16_MAX) { connection->synced_data.packet_id = 1; } else { connection->synced_data.packet_id++; } /* Is there already an outstanding request using this ID? */ aws_hash_table_find( &connection->synced_data.outstanding_requests_table, &connection->synced_data.packet_id, &elem); if (elem == NULL) { /* Found a free ID! Break out of loop */ break; } else if (connection->synced_data.packet_id == search_start) { /* Every ID is taken */ mqtt_connection_unlock_synced_data(connection); AWS_LOGF_ERROR( AWS_LS_MQTT_CLIENT, "id=%p: Queue is full. No more packet IDs are available at this time.", (void *)connection); aws_raise_error(AWS_ERROR_MQTT_QUEUE_FULL); return 0; } } next_request = aws_memory_pool_acquire(&connection->synced_data.requests_pool); if (!next_request) { mqtt_connection_unlock_synced_data(connection); return 0; } memset(next_request, 0, sizeof(struct aws_mqtt_request)); next_request->packet_id = connection->synced_data.packet_id; if (aws_hash_table_put( &connection->synced_data.outstanding_requests_table, &next_request->packet_id, next_request, NULL)) { /* failed to put the next request into the table */ aws_memory_pool_release(&connection->synced_data.requests_pool, next_request); mqtt_connection_unlock_synced_data(connection); return 0; } /* Store the request by packet_id */ next_request->allocator = connection->allocator; next_request->connection = connection; next_request->initiated = false; next_request->retryable = !noRetry; next_request->send_request = send_request; next_request->send_request_ud = send_request_ud; next_request->on_complete = on_complete; next_request->on_complete_ud = on_complete_ud; next_request->packet_size = packet_size; aws_channel_task_init( &next_request->outgoing_task, s_request_outgoing_task, next_request, "mqtt_outgoing_request_task"); if (connection->synced_data.state != AWS_MQTT_CLIENT_STATE_CONNECTED) { aws_linked_list_push_back(&connection->synced_data.pending_requests_list, &next_request->list_node); } else { AWS_ASSERT(connection->slot); AWS_ASSERT(connection->slot->channel); should_schedule_task = true; channel = connection->slot->channel; /* keep the channel alive until the task is scheduled */ aws_channel_acquire_hold(channel); } if (next_request && next_request->packet_size > 0) { /* Set the status as incomplete */ aws_mqtt_connection_statistics_change_operation_statistic_state( next_request->connection, next_request, AWS_MQTT_OSS_INCOMPLETE); } mqtt_connection_unlock_synced_data(connection); } /* END CRITICAL SECTION */ if (should_schedule_task) { AWS_LOGF_TRACE( AWS_LS_MQTT_CLIENT, "id=%p: Currently not in the event-loop thread, scheduling a task to send message id %" PRIu16 ".", (void *)connection, next_request->packet_id); aws_channel_schedule_task_now(channel, &next_request->outgoing_task); /* release the refcount we hold with the protection of lock */ aws_channel_release_hold(channel); } return next_request->packet_id; } void mqtt_request_complete(struct aws_mqtt_client_connection_311_impl *connection, int error_code, uint16_t packet_id) { AWS_LOGF_TRACE( AWS_LS_MQTT_CLIENT, "id=%p: message id %" PRIu16 " completed with error code %d, removing from outstanding requests list.", (void *)connection, packet_id, error_code); bool found_request = false; aws_mqtt_op_complete_fn *on_complete = NULL; void *on_complete_ud = NULL; { /* BEGIN CRITICAL SECTION */ mqtt_connection_lock_synced_data(connection); struct aws_hash_element *elem = NULL; aws_hash_table_find(&connection->synced_data.outstanding_requests_table, &packet_id, &elem); if (elem != NULL) { found_request = true; struct aws_mqtt_request *request = elem->value; on_complete = request->on_complete; on_complete_ud = request->on_complete_ud; /* Set the status as complete */ aws_mqtt_connection_statistics_change_operation_statistic_state( request->connection, request, AWS_MQTT_OSS_NONE); if (error_code == AWS_OP_SUCCESS) { s_pushoff_next_ping_time(connection, request->request_send_timestamp); } /* clean up request resources */ aws_hash_table_remove_element(&connection->synced_data.outstanding_requests_table, elem); /* remove the request from the list, which is thread_data.ongoing_requests_list */ aws_linked_list_remove(&request->list_node); aws_memory_pool_release(&connection->synced_data.requests_pool, request); } mqtt_connection_unlock_synced_data(connection); } /* END CRITICAL SECTION */ if (!found_request) { AWS_LOGF_DEBUG( AWS_LS_MQTT_CLIENT, "id=%p: received completion for message id %" PRIu16 " but no outstanding request exists. Assuming this is an ack of a resend when the first request has " "already completed.", (void *)connection, packet_id); return; } /* Invoke the complete callback. */ if (on_complete) { on_complete(&connection->base, packet_id, error_code, on_complete_ud); } } struct mqtt_shutdown_task { int error_code; struct aws_channel_task task; }; static void s_mqtt_disconnect_task(struct aws_channel_task *channel_task, void *arg, enum aws_task_status status) { (void)status; struct mqtt_shutdown_task *task = AWS_CONTAINER_OF(channel_task, struct mqtt_shutdown_task, task); struct aws_mqtt_client_connection_311_impl *connection = arg; AWS_LOGF_TRACE(AWS_LS_MQTT_CLIENT, "id=%p: Doing disconnect", (void *)connection); { /* BEGIN CRITICAL SECTION */ mqtt_connection_lock_synced_data(connection); /* If there is an outstanding reconnect task, cancel it */ if (connection->synced_data.state == AWS_MQTT_CLIENT_STATE_DISCONNECTING && connection->reconnect_task) { aws_atomic_store_ptr(&connection->reconnect_task->connection_ptr, NULL); /* If the reconnect_task isn't scheduled, free it */ if (connection->reconnect_task && !connection->reconnect_task->task.timestamp) { aws_mem_release(connection->reconnect_task->allocator, connection->reconnect_task); } connection->reconnect_task = NULL; } mqtt_connection_unlock_synced_data(connection); } /* END CRITICAL SECTION */ if (connection->slot && connection->slot->channel) { aws_channel_shutdown(connection->slot->channel, task->error_code); } aws_mem_release(connection->allocator, task); } void mqtt_disconnect_impl(struct aws_mqtt_client_connection_311_impl *connection, int error_code) { if (connection->slot) { struct mqtt_shutdown_task *shutdown_task = aws_mem_calloc(connection->allocator, 1, sizeof(struct mqtt_shutdown_task)); shutdown_task->error_code = error_code; aws_channel_task_init(&shutdown_task->task, s_mqtt_disconnect_task, connection, "mqtt_disconnect"); aws_channel_schedule_task_now(connection->slot->channel, &shutdown_task->task); } else { AWS_LOGF_TRACE(AWS_LS_MQTT_CLIENT, "id=%p: Client currently has no slot to disconnect", (void *)connection); } } aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/source/client_impl_shared.c000066400000000000000000000171601456575232400255130ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include struct aws_mqtt_client_connection *aws_mqtt_client_connection_acquire(struct aws_mqtt_client_connection *connection) { if (connection != NULL) { return (*connection->vtable->acquire_fn)(connection->impl); } return NULL; } void aws_mqtt_client_connection_release(struct aws_mqtt_client_connection *connection) { if (connection != NULL) { (*connection->vtable->release_fn)(connection->impl); } } int aws_mqtt_client_connection_set_will( struct aws_mqtt_client_connection *connection, const struct aws_byte_cursor *topic, enum aws_mqtt_qos qos, bool retain, const struct aws_byte_cursor *payload) { return (*connection->vtable->set_will_fn)(connection->impl, topic, qos, retain, payload); } int aws_mqtt_client_connection_set_login( struct aws_mqtt_client_connection *connection, const struct aws_byte_cursor *username, const struct aws_byte_cursor *password) { return (*connection->vtable->set_login_fn)(connection->impl, username, password); } int aws_mqtt_client_connection_use_websockets( struct aws_mqtt_client_connection *connection, aws_mqtt_transform_websocket_handshake_fn *transformer, void *transformer_ud, aws_mqtt_validate_websocket_handshake_fn *validator, void *validator_ud) { return (*connection->vtable->use_websockets_fn)( connection->impl, transformer, transformer_ud, validator, validator_ud); } int aws_mqtt_client_connection_set_http_proxy_options( struct aws_mqtt_client_connection *connection, struct aws_http_proxy_options *proxy_options) { return (*connection->vtable->set_http_proxy_options_fn)(connection->impl, proxy_options); } int aws_mqtt_client_connection_set_host_resolution_options( struct aws_mqtt_client_connection *connection, const struct aws_host_resolution_config *host_resolution_config) { return (*connection->vtable->set_host_resolution_options_fn)(connection->impl, host_resolution_config); } int aws_mqtt_client_connection_set_reconnect_timeout( struct aws_mqtt_client_connection *connection, uint64_t min_timeout, uint64_t max_timeout) { return (*connection->vtable->set_reconnect_timeout_fn)(connection->impl, min_timeout, max_timeout); } int aws_mqtt_client_connection_set_connection_result_handlers( struct aws_mqtt_client_connection *connection, aws_mqtt_client_on_connection_success_fn *on_connection_success, void *on_connection_success_ud, aws_mqtt_client_on_connection_failure_fn *on_connection_failure, void *on_connection_failure_ud) { return (*connection->vtable->set_connection_result_handlers)( connection->impl, on_connection_success, on_connection_success_ud, on_connection_failure, on_connection_failure_ud); } int aws_mqtt_client_connection_set_connection_interruption_handlers( struct aws_mqtt_client_connection *connection, aws_mqtt_client_on_connection_interrupted_fn *on_interrupted, void *on_interrupted_ud, aws_mqtt_client_on_connection_resumed_fn *on_resumed, void *on_resumed_ud) { return (*connection->vtable->set_connection_interruption_handlers_fn)( connection->impl, on_interrupted, on_interrupted_ud, on_resumed, on_resumed_ud); } int aws_mqtt_client_connection_set_connection_closed_handler( struct aws_mqtt_client_connection *connection, aws_mqtt_client_on_connection_closed_fn *on_closed, void *on_closed_ud) { return (*connection->vtable->set_connection_closed_handler_fn)(connection->impl, on_closed, on_closed_ud); } int aws_mqtt_client_connection_set_on_any_publish_handler( struct aws_mqtt_client_connection *connection, aws_mqtt_client_publish_received_fn *on_any_publish, void *on_any_publish_ud) { return (*connection->vtable->set_on_any_publish_handler_fn)(connection->impl, on_any_publish, on_any_publish_ud); } int aws_mqtt_client_connection_set_connection_termination_handler( struct aws_mqtt_client_connection *connection, aws_mqtt_client_on_connection_termination_fn *on_termination, void *on_termination_ud) { return (*connection->vtable->set_connection_termination_handler_fn)( connection->impl, on_termination, on_termination_ud); } int aws_mqtt_client_connection_connect( struct aws_mqtt_client_connection *connection, const struct aws_mqtt_connection_options *connection_options) { return (*connection->vtable->connect_fn)(connection->impl, connection_options); } int aws_mqtt_client_connection_reconnect( struct aws_mqtt_client_connection *connection, aws_mqtt_client_on_connection_complete_fn *on_connection_complete, void *userdata) { return (*connection->vtable->reconnect_fn)(connection->impl, on_connection_complete, userdata); } int aws_mqtt_client_connection_disconnect( struct aws_mqtt_client_connection *connection, aws_mqtt_client_on_disconnect_fn *on_disconnect, void *userdata) { return (*connection->vtable->disconnect_fn)(connection->impl, on_disconnect, userdata); } uint16_t aws_mqtt_client_connection_subscribe_multiple( struct aws_mqtt_client_connection *connection, const struct aws_array_list *topic_filters, aws_mqtt_suback_multi_fn *on_suback, void *on_suback_ud) { return (*connection->vtable->subscribe_multiple_fn)(connection->impl, topic_filters, on_suback, on_suback_ud); } uint16_t aws_mqtt_client_connection_subscribe( struct aws_mqtt_client_connection *connection, const struct aws_byte_cursor *topic_filter, enum aws_mqtt_qos qos, aws_mqtt_client_publish_received_fn *on_publish, void *on_publish_ud, aws_mqtt_userdata_cleanup_fn *on_ud_cleanup, aws_mqtt_suback_fn *on_suback, void *on_suback_ud) { return (*connection->vtable->subscribe_fn)( connection->impl, topic_filter, qos, on_publish, on_publish_ud, on_ud_cleanup, on_suback, on_suback_ud); } uint16_t aws_mqtt_resubscribe_existing_topics( struct aws_mqtt_client_connection *connection, aws_mqtt_suback_multi_fn *on_suback, void *on_suback_ud) { return (*connection->vtable->resubscribe_existing_topics_fn)(connection->impl, on_suback, on_suback_ud); } uint16_t aws_mqtt_client_connection_unsubscribe( struct aws_mqtt_client_connection *connection, const struct aws_byte_cursor *topic_filter, aws_mqtt_op_complete_fn *on_unsuback, void *on_unsuback_ud) { return (*connection->vtable->unsubscribe_fn)(connection->impl, topic_filter, on_unsuback, on_unsuback_ud); } uint16_t aws_mqtt_client_connection_publish( struct aws_mqtt_client_connection *connection, const struct aws_byte_cursor *topic, enum aws_mqtt_qos qos, bool retain, const struct aws_byte_cursor *payload, aws_mqtt_op_complete_fn *on_complete, void *userdata) { return (*connection->vtable->publish_fn)(connection->impl, topic, qos, retain, payload, on_complete, userdata); } int aws_mqtt_client_connection_get_stats( struct aws_mqtt_client_connection *connection, struct aws_mqtt_connection_operation_statistics *stats) { return (*connection->vtable->get_stats_fn)(connection->impl, stats); } uint64_t aws_mqtt_hash_uint16_t(const void *item) { return *(uint16_t *)item; } bool aws_mqtt_compare_uint16_t_eq(const void *a, const void *b) { return *(uint16_t *)a == *(uint16_t *)b; } bool aws_mqtt_byte_cursor_hash_equality(const void *a, const void *b) { const struct aws_byte_cursor *a_cursor = a; const struct aws_byte_cursor *b_cursor = b; return aws_byte_cursor_eq(a_cursor, b_cursor); } aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/source/fixed_header.c000066400000000000000000000106061456575232400242730ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include /** * Implements encoding & decoding of the remaining_length field across 1-4 bytes [MQTT-2.2.3]. * * Any number less than or equal to 127 (7 bit max) can be written into a single byte, where any number larger than 128 * may be written into multiple bytes, using the most significant bit (128) as a continuation flag. */ static int s_encode_remaining_length(struct aws_byte_buf *buf, size_t remaining_length) { AWS_PRECONDITION(buf); AWS_PRECONDITION(remaining_length < UINT32_MAX); do { uint8_t encoded_byte = remaining_length % 128; remaining_length /= 128; if (remaining_length) { encoded_byte |= 128; } if (!aws_byte_buf_write_u8(buf, encoded_byte)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } } while (remaining_length); return AWS_OP_SUCCESS; } int aws_mqtt311_decode_remaining_length(struct aws_byte_cursor *cur, size_t *remaining_length_out) { AWS_PRECONDITION(cur); /* Read remaining_length */ size_t multiplier = 1; size_t remaining_length = 0; while (true) { uint8_t encoded_byte; if (!aws_byte_cursor_read_u8(cur, &encoded_byte)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } remaining_length += (encoded_byte & 127) * multiplier; multiplier *= 128; if (!(encoded_byte & 128)) { break; } if (multiplier > 128 * 128 * 128) { /* If high order bit is set on last byte, value is malformed */ return aws_raise_error(AWS_ERROR_MQTT_INVALID_REMAINING_LENGTH); } } *remaining_length_out = remaining_length; return AWS_OP_SUCCESS; } enum aws_mqtt_packet_type aws_mqtt_get_packet_type(const uint8_t *buffer) { return *buffer >> 4; } bool aws_mqtt_packet_has_flags(const struct aws_mqtt_fixed_header *header) { /* Parse attributes based on packet type */ switch (header->packet_type) { case AWS_MQTT_PACKET_SUBSCRIBE: case AWS_MQTT_PACKET_UNSUBSCRIBE: case AWS_MQTT_PACKET_PUBLISH: case AWS_MQTT_PACKET_PUBREL: return true; break; case AWS_MQTT_PACKET_CONNECT: case AWS_MQTT_PACKET_CONNACK: case AWS_MQTT_PACKET_PUBACK: case AWS_MQTT_PACKET_PUBREC: case AWS_MQTT_PACKET_PUBCOMP: case AWS_MQTT_PACKET_SUBACK: case AWS_MQTT_PACKET_UNSUBACK: case AWS_MQTT_PACKET_PINGREQ: case AWS_MQTT_PACKET_PINGRESP: case AWS_MQTT_PACKET_DISCONNECT: return false; default: return false; } } int aws_mqtt_fixed_header_encode(struct aws_byte_buf *buf, const struct aws_mqtt_fixed_header *header) { AWS_PRECONDITION(buf); AWS_PRECONDITION(header); /* Check that flags are 0 if they must not be present */ if (!aws_mqtt_packet_has_flags(header) && header->flags != 0) { return aws_raise_error(AWS_ERROR_MQTT_INVALID_RESERVED_BITS); } /* Write packet type and flags */ uint8_t byte_1 = (uint8_t)((header->packet_type << 4) | (header->flags & 0xF)); if (!aws_byte_buf_write_u8(buf, byte_1)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } /* Write remaining length */ if (s_encode_remaining_length(buf, header->remaining_length)) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } int aws_mqtt_fixed_header_decode(struct aws_byte_cursor *cur, struct aws_mqtt_fixed_header *header) { AWS_PRECONDITION(cur); AWS_PRECONDITION(header); /* Read packet type and flags */ uint8_t byte_1 = 0; if (!aws_byte_cursor_read_u8(cur, &byte_1)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } header->packet_type = aws_mqtt_get_packet_type(&byte_1); header->flags = byte_1 & 0xF; /* Read remaining length */ if (aws_mqtt311_decode_remaining_length(cur, &header->remaining_length)) { return AWS_OP_ERR; } if (cur->len < header->remaining_length) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } /* Check that flags are 0 if they must not be present */ if (!aws_mqtt_packet_has_flags(header) && header->flags != 0) { return aws_raise_error(AWS_ERROR_MQTT_INVALID_RESERVED_BITS); } return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/source/mqtt.c000066400000000000000000000340301456575232400226460ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include /******************************************************************************* * Topic Validation ******************************************************************************/ static bool s_is_valid_topic(const struct aws_byte_cursor *topic, bool is_filter) { if (topic == NULL) { return false; } /* [MQTT-4.7.3-1] Check existance and length */ if (!topic->ptr || !topic->len) { return false; } if (aws_mqtt_validate_utf8_text(*topic) == AWS_OP_ERR) { return false; } /* [MQTT-4.7.3-2] Check for the null character */ if (memchr(topic->ptr, 0, topic->len)) { return false; } /* [MQTT-4.7.3-3] Topic must not be too long */ if (topic->len > 65535) { return false; } bool saw_hash = false; struct aws_byte_cursor topic_part; AWS_ZERO_STRUCT(topic_part); while (aws_byte_cursor_next_split(topic, '/', &topic_part)) { if (saw_hash) { /* [MQTT-4.7.1-2] If last part was a '#' and there's still another part, it's an invalid topic */ return false; } if (topic_part.len == 0) { /* 0 length parts are fine */ continue; } /* Check single level wildcard */ if (memchr(topic_part.ptr, '+', topic_part.len)) { if (!is_filter) { /* [MQTT-4.7.1-3] + only allowed on filters */ return false; } if (topic_part.len > 1) { /* topic part must be 1 character long */ return false; } } /* Check multi level wildcard */ if (memchr(topic_part.ptr, '#', topic_part.len)) { if (!is_filter) { /* [MQTT-4.7.1-2] # only allowed on filters */ return false; } if (topic_part.len > 1) { /* topic part must be 1 character long */ return false; } saw_hash = true; } } return true; } bool aws_mqtt_is_valid_topic(const struct aws_byte_cursor *topic) { return s_is_valid_topic(topic, false); } bool aws_mqtt_is_valid_topic_filter(const struct aws_byte_cursor *topic_filter) { return s_is_valid_topic(topic_filter, true); } /******************************************************************************* * Library Init ******************************************************************************/ #define AWS_DEFINE_ERROR_INFO_MQTT(C, ES) AWS_DEFINE_ERROR_INFO(C, ES, "libaws-c-mqtt") /* clang-format off */ static struct aws_error_info s_errors[] = { AWS_DEFINE_ERROR_INFO_MQTT( AWS_ERROR_MQTT_INVALID_RESERVED_BITS, "Bits marked as reserved in the MQTT spec were incorrectly set."), AWS_DEFINE_ERROR_INFO_MQTT( AWS_ERROR_MQTT_BUFFER_TOO_BIG, "[MQTT-1.5.3] Encoded UTF-8 buffers may be no bigger than 65535 bytes."), AWS_DEFINE_ERROR_INFO_MQTT( AWS_ERROR_MQTT_INVALID_REMAINING_LENGTH, "[MQTT-2.2.3] Encoded remaining length field is malformed."), AWS_DEFINE_ERROR_INFO_MQTT( AWS_ERROR_MQTT_UNSUPPORTED_PROTOCOL_NAME, "[MQTT-3.1.2-1] Protocol name specified is unsupported."), AWS_DEFINE_ERROR_INFO_MQTT( AWS_ERROR_MQTT_UNSUPPORTED_PROTOCOL_LEVEL, "[MQTT-3.1.2-2] Protocol level specified is unsupported."), AWS_DEFINE_ERROR_INFO_MQTT( AWS_ERROR_MQTT_INVALID_CREDENTIALS, "[MQTT-3.1.2-21] Connect packet may not include password when no username is present."), AWS_DEFINE_ERROR_INFO_MQTT( AWS_ERROR_MQTT_INVALID_QOS, "Both bits in a QoS field must not be set."), AWS_DEFINE_ERROR_INFO_MQTT( AWS_ERROR_MQTT_INVALID_PACKET_TYPE, "Packet type in packet fixed header is invalid."), AWS_DEFINE_ERROR_INFO_MQTT( AWS_ERROR_MQTT_INVALID_TOPIC, "Topic or filter is invalid."), AWS_DEFINE_ERROR_INFO_MQTT( AWS_ERROR_MQTT_TIMEOUT, "Time limit between request and response has been exceeded."), AWS_DEFINE_ERROR_INFO_MQTT( AWS_ERROR_MQTT_PROTOCOL_ERROR, "Protocol error occurred."), AWS_DEFINE_ERROR_INFO_MQTT( AWS_ERROR_MQTT_NOT_CONNECTED, "The requested operation is invalid as the connection is not open."), AWS_DEFINE_ERROR_INFO_MQTT( AWS_ERROR_MQTT_ALREADY_CONNECTED, "The requested operation is invalid as the connection is already open."), AWS_DEFINE_ERROR_INFO_MQTT( AWS_ERROR_MQTT_BUILT_WITHOUT_WEBSOCKETS, "Library built without MQTT_WITH_WEBSOCKETS option."), AWS_DEFINE_ERROR_INFO_MQTT( AWS_ERROR_MQTT_UNEXPECTED_HANGUP, "The connection was closed unexpectedly."), AWS_DEFINE_ERROR_INFO_MQTT( AWS_ERROR_MQTT_CONNECTION_SHUTDOWN, "MQTT operation interrupted by connection shutdown."), AWS_DEFINE_ERROR_INFO_MQTT( AWS_ERROR_MQTT_CONNECTION_DESTROYED, "Connection has started destroying process, all uncompleted requests will fail."), AWS_DEFINE_ERROR_INFO_MQTT( AWS_ERROR_MQTT_CONNECTION_DISCONNECTING, "Connection is disconnecting, it's not safe to do this operation until the connection finishes shutdown."), AWS_DEFINE_ERROR_INFO_MQTT( AWS_ERROR_MQTT_CANCELLED_FOR_CLEAN_SESSION, "Old requests from the previous session are cancelled, and offline request will not be accept."), AWS_DEFINE_ERROR_INFO_MQTT( AWS_ERROR_MQTT_QUEUE_FULL, "MQTT request queue is full."), AWS_DEFINE_ERROR_INFO_MQTT( AWS_ERROR_MQTT5_CLIENT_OPTIONS_VALIDATION, "Invalid mqtt5 client options value."), AWS_DEFINE_ERROR_INFO_MQTT( AWS_ERROR_MQTT5_CONNECT_OPTIONS_VALIDATION, "Invalid mqtt5 connect packet options value."), AWS_DEFINE_ERROR_INFO_MQTT( AWS_ERROR_MQTT5_DISCONNECT_OPTIONS_VALIDATION, "Invalid mqtt5 disconnect packet options value."), AWS_DEFINE_ERROR_INFO_MQTT( AWS_ERROR_MQTT5_PUBLISH_OPTIONS_VALIDATION, "Invalid mqtt5 publish packet options value."), AWS_DEFINE_ERROR_INFO_MQTT( AWS_ERROR_MQTT5_SUBSCRIBE_OPTIONS_VALIDATION, "Invalid mqtt5 subscribe packet options value."), AWS_DEFINE_ERROR_INFO_MQTT( AWS_ERROR_MQTT5_UNSUBSCRIBE_OPTIONS_VALIDATION, "Invalid mqtt5 unsubscribe packet options value."), AWS_DEFINE_ERROR_INFO_MQTT( AWS_ERROR_MQTT5_USER_PROPERTY_VALIDATION, "Invalid mqtt5 user property value."), AWS_DEFINE_ERROR_INFO_MQTT( AWS_ERROR_MQTT5_PACKET_VALIDATION, "General mqtt5 packet validation error"), AWS_DEFINE_ERROR_INFO_MQTT( AWS_ERROR_MQTT5_ENCODE_FAILURE, "Error occurred while encoding an outgoing mqtt5 packet"), AWS_DEFINE_ERROR_INFO_MQTT( AWS_ERROR_MQTT5_DECODE_PROTOCOL_ERROR, "Mqtt5 decoder received an invalid packet that broke mqtt5 protocol rules"), AWS_DEFINE_ERROR_INFO_MQTT( AWS_ERROR_MQTT5_CONNACK_CONNECTION_REFUSED, "Remote endpoint rejected the CONNECT attempt by returning an unsuccessful CONNACK"), AWS_DEFINE_ERROR_INFO_MQTT( AWS_ERROR_MQTT5_CONNACK_TIMEOUT, "Remote endpoint did not respond to a CONNECT request before timeout exceeded"), AWS_DEFINE_ERROR_INFO_MQTT( AWS_ERROR_MQTT5_PING_RESPONSE_TIMEOUT, "Remote endpoint did not respond to a PINGREQ before timeout exceeded"), AWS_DEFINE_ERROR_INFO_MQTT( AWS_ERROR_MQTT5_USER_REQUESTED_STOP, "Mqtt5 client connection interrupted by user request."), AWS_DEFINE_ERROR_INFO_MQTT( AWS_ERROR_MQTT5_DISCONNECT_RECEIVED, "Mqtt5 client connection interrupted by server DISCONNECT."), AWS_DEFINE_ERROR_INFO_MQTT( AWS_ERROR_MQTT5_CLIENT_TERMINATED, "Mqtt5 client terminated by user request."), AWS_DEFINE_ERROR_INFO_MQTT( AWS_ERROR_MQTT5_OPERATION_FAILED_DUE_TO_OFFLINE_QUEUE_POLICY, "Mqtt5 operation failed due to a disconnection event in conjunction with the client's offline queue retention policy."), AWS_DEFINE_ERROR_INFO_MQTT( AWS_ERROR_MQTT5_ENCODE_SIZE_UNSUPPORTED_PACKET_TYPE, "Unsupported packet type for encode size calculation"), AWS_DEFINE_ERROR_INFO_MQTT( AWS_ERROR_MQTT5_OPERATION_PROCESSING_FAILURE, "Error while processing mqtt5 operational state"), AWS_DEFINE_ERROR_INFO_MQTT( AWS_ERROR_MQTT5_INVALID_INBOUND_TOPIC_ALIAS, "Incoming publish contained an invalid (too large or unknown) topic alias"), AWS_DEFINE_ERROR_INFO_MQTT( AWS_ERROR_MQTT5_INVALID_OUTBOUND_TOPIC_ALIAS, "Outgoing publish contained an invalid (too large or unknown) topic alias"), AWS_DEFINE_ERROR_INFO_MQTT( AWS_ERROR_MQTT5_INVALID_UTF8_STRING, "Outbound packet contains invalid utf-8 data in a field that must be utf-8"), AWS_DEFINE_ERROR_INFO_MQTT( AWS_ERROR_MQTT_CONNECTION_RESET_FOR_ADAPTER_CONNECT, "Mqtt5 connection was reset by the Mqtt3 adapter in order to guarantee correct connection configuration"), AWS_DEFINE_ERROR_INFO_MQTT( AWS_ERROR_MQTT_CONNECTION_RESUBSCRIBE_NO_TOPICS, "Resubscribe was called when there were no subscriptions"), AWS_DEFINE_ERROR_INFO_MQTT( AWS_ERROR_MQTT_CONNECTION_SUBSCRIBE_FAILURE, "MQTT subscribe operation failed"), }; /* clang-format on */ #undef AWS_DEFINE_ERROR_INFO_MQTT static struct aws_error_info_list s_error_list = { .error_list = s_errors, .count = AWS_ARRAY_SIZE(s_errors), }; /* clang-format off */ static struct aws_log_subject_info s_logging_subjects[] = { DEFINE_LOG_SUBJECT_INFO(AWS_LS_MQTT_GENERAL, "mqtt", "Misc MQTT logging"), DEFINE_LOG_SUBJECT_INFO(AWS_LS_MQTT_CLIENT, "mqtt-client", "MQTT client and connections"), DEFINE_LOG_SUBJECT_INFO(AWS_LS_MQTT_TOPIC_TREE, "mqtt-topic-tree", "MQTT subscription tree"), DEFINE_LOG_SUBJECT_INFO(AWS_LS_MQTT5_GENERAL, "mqtt5-general", "Misc MQTT5 logging"), DEFINE_LOG_SUBJECT_INFO(AWS_LS_MQTT5_CLIENT, "mqtt5-client", "MQTT5 client and connections"), DEFINE_LOG_SUBJECT_INFO(AWS_LS_MQTT5_CANARY, "mqtt5-canary", "MQTT5 canary logging"), DEFINE_LOG_SUBJECT_INFO(AWS_LS_MQTT5_TO_MQTT3_ADAPTER, "mqtt5-to-mqtt3-adapter", "MQTT5-To-MQTT3 adapter logging"), }; /* clang-format on */ static struct aws_log_subject_info_list s_logging_subjects_list = { .subject_list = s_logging_subjects, .count = AWS_ARRAY_SIZE(s_logging_subjects), }; static bool s_mqtt_library_initialized = false; void aws_mqtt_library_init(struct aws_allocator *allocator) { (void)allocator; if (!s_mqtt_library_initialized) { s_mqtt_library_initialized = true; aws_io_library_init(allocator); aws_http_library_init(allocator); aws_register_error_info(&s_error_list); aws_register_log_subject_info_list(&s_logging_subjects_list); } } void aws_mqtt_library_clean_up(void) { if (s_mqtt_library_initialized) { s_mqtt_library_initialized = false; aws_thread_join_all_managed(); aws_unregister_error_info(&s_error_list); aws_unregister_log_subject_info_list(&s_logging_subjects_list); aws_http_library_clean_up(); aws_io_library_clean_up(); } } void aws_mqtt_fatal_assert_library_initialized(void) { if (!s_mqtt_library_initialized) { AWS_LOGF_FATAL( AWS_LS_MQTT_GENERAL, "aws_mqtt_library_init() must be called before using any functionality in aws-c-mqtt."); AWS_FATAL_ASSERT(s_mqtt_library_initialized); } } /* UTF-8 encoded string validation respect to [MQTT-1.5.3-2]. */ static int aws_mqtt_utf8_decoder(uint32_t codepoint, void *user_data) { (void)user_data; /* U+0000 - A UTF-8 Encoded String MUST NOT include an encoding of the null character U+0000. [MQTT-1.5.4-2] * U+0001..U+001F control characters are not valid */ if (AWS_UNLIKELY(codepoint <= 0x001F)) { return aws_raise_error(AWS_ERROR_MQTT5_INVALID_UTF8_STRING); } /* U+007F..U+009F control characters are not valid */ if (AWS_UNLIKELY((codepoint >= 0x007F) && (codepoint <= 0x009F))) { return aws_raise_error(AWS_ERROR_MQTT5_INVALID_UTF8_STRING); } /* Unicode non-characters are not valid: https://www.unicode.org/faq/private_use.html#nonchar1 */ if (AWS_UNLIKELY((codepoint & 0x00FFFF) >= 0x00FFFE)) { return aws_raise_error(AWS_ERROR_MQTT5_INVALID_UTF8_STRING); } if (AWS_UNLIKELY(codepoint >= 0xFDD0 && codepoint <= 0xFDEF)) { return aws_raise_error(AWS_ERROR_MQTT5_INVALID_UTF8_STRING); } return AWS_OP_SUCCESS; } static struct aws_utf8_decoder_options s_aws_mqtt_utf8_decoder_options = { .on_codepoint = aws_mqtt_utf8_decoder, }; int aws_mqtt_validate_utf8_text(struct aws_byte_cursor text) { return aws_decode_utf8(text, &s_aws_mqtt_utf8_decoder_options); } aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/source/mqtt311_decoder.c000066400000000000000000000201551456575232400245630ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include static void s_aws_mqtt311_decoder_reset(struct aws_mqtt311_decoder *decoder) { decoder->state = AWS_MDST_READ_FIRST_BYTE; decoder->total_packet_length = 0; aws_byte_buf_reset(&decoder->packet_buffer, false); } void aws_mqtt311_decoder_init( struct aws_mqtt311_decoder *decoder, struct aws_allocator *allocator, const struct aws_mqtt311_decoder_options *options) { aws_byte_buf_init(&decoder->packet_buffer, allocator, 5); decoder->config = *options; s_aws_mqtt311_decoder_reset(decoder); } void aws_mqtt311_decoder_clean_up(struct aws_mqtt311_decoder *decoder) { aws_byte_buf_clean_up(&decoder->packet_buffer); } static void s_aws_mqtt311_decoder_reset_for_new_packet(struct aws_mqtt311_decoder *decoder) { if (decoder->state != AWS_MDST_PROTOCOL_ERROR) { s_aws_mqtt311_decoder_reset(decoder); } } enum aws_mqtt311_decoding_directive { AWS_MDD_CONTINUE, AWS_MDD_OUT_OF_DATA, AWS_MDD_PROTOCOL_ERROR }; static enum aws_mqtt311_decoding_directive aws_result_to_mqtt311_decoding_directive(int result) { return (result == AWS_OP_SUCCESS) ? AWS_MDD_CONTINUE : AWS_MDD_PROTOCOL_ERROR; } static int s_aws_mqtt311_decoder_safe_packet_handle( struct aws_mqtt311_decoder *decoder, enum aws_mqtt_packet_type packet_type, struct aws_byte_cursor packet_cursor) { packet_handler_fn *handler = decoder->config.packet_handlers->handlers_by_packet_type[packet_type]; if (handler != NULL) { return handler(packet_cursor, decoder->config.handler_user_data); } else { return aws_raise_error(AWS_ERROR_MQTT_PROTOCOL_ERROR); } } static enum aws_mqtt311_decoding_directive s_handle_decoder_read_first_byte( struct aws_mqtt311_decoder *decoder, struct aws_byte_cursor *data) { AWS_FATAL_ASSERT(decoder->packet_buffer.len == 0); if (data->len == 0) { return AWS_MDD_OUT_OF_DATA; } /* * Do a greedy check to see if the whole MQTT packet is contained within the received data. If it is, decode it * directly from the incoming data cursor without buffering it first. Otherwise, the packet is fragmented * across multiple received data calls, and so we must use the packet buffer and copy everything first via the * full decoder state machine. * * A corollary of this is that the decoder is only ever in the AWS_MDST_READ_REMAINING_LENGTH or AWS_MDST_READ_BODY * states if the current MQTT packet was received in a fragmented manner. */ struct aws_byte_cursor temp_cursor = *data; struct aws_mqtt_fixed_header packet_header; AWS_ZERO_STRUCT(packet_header); if (!aws_mqtt_fixed_header_decode(&temp_cursor, &packet_header) && temp_cursor.len >= packet_header.remaining_length) { /* figure out the cursor that spans the full packet */ size_t fixed_header_length = temp_cursor.ptr - data->ptr; struct aws_byte_cursor whole_packet_cursor = *data; whole_packet_cursor.len = fixed_header_length + packet_header.remaining_length; /* advance the external, mutable data cursor to the start of the next packet */ aws_byte_cursor_advance(data, whole_packet_cursor.len); /* * if this fails, the decoder goes into an error state. If it succeeds we'll loop again into the same state * because we'll be back at the beginning of the next packet (if it exists). */ enum aws_mqtt_packet_type packet_type = aws_mqtt_get_packet_type(whole_packet_cursor.ptr); return aws_result_to_mqtt311_decoding_directive( s_aws_mqtt311_decoder_safe_packet_handle(decoder, packet_type, whole_packet_cursor)); } /* * The packet is fragmented, spanning more than this io message. So we buffer it and use the * simple state machine to decode. */ uint8_t byte = *data->ptr; aws_byte_cursor_advance(data, 1); aws_byte_buf_append_byte_dynamic(&decoder->packet_buffer, byte); decoder->state = AWS_MDST_READ_REMAINING_LENGTH; return AWS_MDD_CONTINUE; } static enum aws_mqtt311_decoding_directive s_handle_decoder_read_remaining_length( struct aws_mqtt311_decoder *decoder, struct aws_byte_cursor *data) { AWS_FATAL_ASSERT(decoder->total_packet_length == 0); if (data->len == 0) { return AWS_MDD_OUT_OF_DATA; } uint8_t byte = *data->ptr; aws_byte_cursor_advance(data, 1); aws_byte_buf_append_byte_dynamic(&decoder->packet_buffer, byte); struct aws_byte_cursor vli_cursor = aws_byte_cursor_from_buf(&decoder->packet_buffer); aws_byte_cursor_advance(&vli_cursor, 1); size_t remaining_length = 0; if (aws_mqtt311_decode_remaining_length(&vli_cursor, &remaining_length) == AWS_OP_ERR) { /* anything other than a short buffer error (not enough data yet) is a terminal error */ if (aws_last_error() == AWS_ERROR_SHORT_BUFFER) { return AWS_MDD_CONTINUE; } else { return AWS_MDD_PROTOCOL_ERROR; } } /* * If we successfully decoded a variable-length integer, we now know exactly how many bytes we need to receive in * order to have the full packet. */ decoder->total_packet_length = remaining_length + decoder->packet_buffer.len; AWS_FATAL_ASSERT(decoder->total_packet_length > 0); decoder->state = AWS_MDST_READ_BODY; return AWS_MDD_CONTINUE; } static enum aws_mqtt311_decoding_directive s_handle_decoder_read_body( struct aws_mqtt311_decoder *decoder, struct aws_byte_cursor *data) { AWS_FATAL_ASSERT(decoder->total_packet_length > 0); size_t buffer_length = decoder->packet_buffer.len; size_t amount_to_read = aws_min_size(decoder->total_packet_length - buffer_length, data->len); struct aws_byte_cursor copy_cursor = aws_byte_cursor_advance(data, amount_to_read); aws_byte_buf_append_dynamic(&decoder->packet_buffer, ©_cursor); if (decoder->packet_buffer.len == decoder->total_packet_length) { /* We have the full packet in the scratch buffer, invoke the correct handler to decode and process it */ struct aws_byte_cursor packet_data = aws_byte_cursor_from_buf(&decoder->packet_buffer); enum aws_mqtt_packet_type packet_type = aws_mqtt_get_packet_type(packet_data.ptr); if (s_aws_mqtt311_decoder_safe_packet_handle(decoder, packet_type, packet_data) == AWS_OP_ERR) { return AWS_MDD_PROTOCOL_ERROR; } s_aws_mqtt311_decoder_reset_for_new_packet(decoder); return AWS_MDD_CONTINUE; } return AWS_MDD_OUT_OF_DATA; } int aws_mqtt311_decoder_on_bytes_received(struct aws_mqtt311_decoder *decoder, struct aws_byte_cursor data) { struct aws_byte_cursor data_cursor = data; enum aws_mqtt311_decoding_directive decode_directive = AWS_MDD_CONTINUE; while (decode_directive == AWS_MDD_CONTINUE) { switch (decoder->state) { case AWS_MDST_READ_FIRST_BYTE: decode_directive = s_handle_decoder_read_first_byte(decoder, &data_cursor); break; case AWS_MDST_READ_REMAINING_LENGTH: decode_directive = s_handle_decoder_read_remaining_length(decoder, &data_cursor); break; case AWS_MDST_READ_BODY: decode_directive = s_handle_decoder_read_body(decoder, &data_cursor); break; default: decode_directive = AWS_MDD_PROTOCOL_ERROR; break; } /* * Protocol error is a terminal failure state until aws_mqtt311_decoder_reset_for_new_connection() is called. */ if (decode_directive == AWS_MDD_PROTOCOL_ERROR) { decoder->state = AWS_MDST_PROTOCOL_ERROR; if (aws_last_error() == AWS_ERROR_SUCCESS) { aws_raise_error(AWS_ERROR_MQTT_PROTOCOL_ERROR); } return AWS_OP_ERR; } } return AWS_OP_SUCCESS; } void aws_mqtt311_decoder_reset_for_new_connection(struct aws_mqtt311_decoder *decoder) { s_aws_mqtt311_decoder_reset(decoder); } aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/source/mqtt_subscription_set.c000066400000000000000000000400371456575232400263310ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "aws/mqtt/private/mqtt_subscription_set.h" #include "aws/mqtt/private/client_impl_shared.h" #define SUBSCRIPTION_SET_DEFAULT_BRANCH_FACTOR 10 #define SUBSCRIPTION_SET_DEFAULT_ENTRY_COUNT 50 struct aws_mqtt_subscription_set_subscription_record *aws_mqtt_subscription_set_subscription_record_new( struct aws_allocator *allocator, const struct aws_mqtt_subscription_set_subscription_options *subscription) { struct aws_mqtt_subscription_set_subscription_record *record = aws_mem_calloc(allocator, 1, sizeof(struct aws_mqtt_subscription_set_subscription_record)); record->allocator = allocator; aws_byte_buf_init_copy_from_cursor(&record->topic_filter, allocator, subscription->topic_filter); record->subscription_view = *subscription; record->subscription_view.topic_filter = aws_byte_cursor_from_buf(&record->topic_filter); return record; } void aws_mqtt_subscription_set_subscription_record_destroy( struct aws_mqtt_subscription_set_subscription_record *record) { if (record == NULL) { return; } aws_byte_buf_clean_up(&record->topic_filter); aws_mem_release(record->allocator, record); } static void s_aws_mqtt_subscription_set_subscription_record_hash_destroy(void *element) { struct aws_mqtt_subscription_set_subscription_record *record = element; aws_mqtt_subscription_set_subscription_record_destroy(record); } static struct aws_mqtt_subscription_set_topic_tree_node *s_aws_mqtt_subscription_set_node_new( struct aws_allocator *allocator, struct aws_mqtt_subscription_set_topic_tree_node *parent) { struct aws_mqtt_subscription_set_topic_tree_node *node = aws_mem_calloc(allocator, 1, sizeof(struct aws_mqtt_subscription_set_topic_tree_node)); node->allocator = allocator; aws_hash_table_init( &node->children, allocator, SUBSCRIPTION_SET_DEFAULT_BRANCH_FACTOR, aws_hash_byte_cursor_ptr, aws_mqtt_byte_cursor_hash_equality, NULL, NULL); node->ref_count = 1; node->parent = parent; return node; } struct aws_mqtt_subscription_set *aws_mqtt_subscription_set_new(struct aws_allocator *allocator) { struct aws_mqtt_subscription_set *subscription_set = aws_mem_calloc(allocator, 1, sizeof(struct aws_mqtt_subscription_set)); subscription_set->allocator = allocator; subscription_set->root = s_aws_mqtt_subscription_set_node_new(allocator, NULL); aws_hash_table_init( &subscription_set->subscriptions, allocator, SUBSCRIPTION_SET_DEFAULT_ENTRY_COUNT, aws_hash_byte_cursor_ptr, aws_mqtt_byte_cursor_hash_equality, NULL, s_aws_mqtt_subscription_set_subscription_record_hash_destroy); return subscription_set; } static int s_subscription_set_node_destroy_hash_foreach_wrap(void *context, struct aws_hash_element *elem); static void s_aws_mqtt_subscription_set_node_destroy_node(struct aws_mqtt_subscription_set_topic_tree_node *node) { aws_hash_table_foreach(&node->children, s_subscription_set_node_destroy_hash_foreach_wrap, NULL); aws_hash_table_clean_up(&node->children); if (node->on_cleanup && node->callback_user_data) { node->on_cleanup(node->callback_user_data); } aws_byte_buf_clean_up(&node->topic_segment); aws_mem_release(node->allocator, node); } static void s_aws_mqtt_subscription_set_node_destroy_tree(struct aws_mqtt_subscription_set_topic_tree_node *tree) { if (tree == NULL) { return; } if (tree->parent != NULL) { aws_hash_table_remove(&tree->parent->children, &tree->topic_segment, NULL, NULL); } s_aws_mqtt_subscription_set_node_destroy_node(tree); } static int s_subscription_set_node_destroy_hash_foreach_wrap(void *context, struct aws_hash_element *elem) { (void)context; s_aws_mqtt_subscription_set_node_destroy_node(elem->value); return AWS_COMMON_HASH_TABLE_ITER_CONTINUE | AWS_COMMON_HASH_TABLE_ITER_DELETE; } void aws_mqtt_subscription_set_destroy(struct aws_mqtt_subscription_set *subscription_set) { if (subscription_set == NULL) { return; } s_aws_mqtt_subscription_set_node_destroy_tree(subscription_set->root); aws_hash_table_clean_up(&subscription_set->subscriptions); aws_mem_release(subscription_set->allocator, subscription_set); } static struct aws_mqtt_subscription_set_topic_tree_node *s_aws_mqtt_subscription_set_get_existing_subscription_node( const struct aws_mqtt_subscription_set *subscription_set, struct aws_byte_cursor topic_filter) { struct aws_mqtt_subscription_set_topic_tree_node *current_node = subscription_set->root; struct aws_byte_cursor topic_segment; AWS_ZERO_STRUCT(topic_segment); while (aws_byte_cursor_next_split(&topic_filter, '/', &topic_segment)) { struct aws_hash_element *hash_element = NULL; aws_hash_table_find(¤t_node->children, &topic_segment, &hash_element); if (hash_element == NULL) { return NULL; } else { current_node = hash_element->value; } } if (!current_node->is_subscription) { return NULL; } return current_node; } bool aws_mqtt_subscription_set_is_subscribed( const struct aws_mqtt_subscription_set *subscription_set, struct aws_byte_cursor topic_filter) { struct aws_hash_element *element = NULL; aws_hash_table_find(&subscription_set->subscriptions, &topic_filter, &element); return element && (element->value != NULL); } bool aws_mqtt_subscription_set_is_in_topic_tree( const struct aws_mqtt_subscription_set *subscription_set, struct aws_byte_cursor topic_filter) { struct aws_mqtt_subscription_set_topic_tree_node *existing_node = s_aws_mqtt_subscription_set_get_existing_subscription_node(subscription_set, topic_filter); return existing_node != NULL; } /* * Walks the existing tree creating nodes as necessary to reach the subscription leaf implied by the topic filter. * Returns the node representing the final level of the topic filter. Each existing node has its ref count increased by * one. Newly-created nodes start with a ref count of one. Given that the topic filter has been validated, the only * possible error is a memory allocation error which is a crash anyways. * * If the leaf node already exists and has a cleanup callback, it will be invoked and both the callback and its user * data will be cleared . The returned node will always have is_subscription set to true. */ static struct aws_mqtt_subscription_set_topic_tree_node * s_aws_mqtt_subscription_set_create_or_reference_topic_filter_path( struct aws_mqtt_subscription_set_topic_tree_node *root, struct aws_byte_cursor topic_filter) { struct aws_mqtt_subscription_set_topic_tree_node *current_node = root; ++root->ref_count; /* * Invariants: * (1) No failure allowed (allocation failure = crash) * (2) The ref count of current_node is always correct *before* the loop condition is evaluated */ struct aws_byte_cursor topic_segment; AWS_ZERO_STRUCT(topic_segment); while (aws_byte_cursor_next_split(&topic_filter, '/', &topic_segment)) { struct aws_hash_element *hash_element = NULL; aws_hash_table_find(¤t_node->children, &topic_segment, &hash_element); if (hash_element == NULL) { struct aws_mqtt_subscription_set_topic_tree_node *new_node = s_aws_mqtt_subscription_set_node_new(current_node->allocator, current_node); aws_byte_buf_init_copy_from_cursor(&new_node->topic_segment, new_node->allocator, topic_segment); new_node->topic_segment_cursor = aws_byte_cursor_from_buf(&new_node->topic_segment); aws_hash_table_put(¤t_node->children, &new_node->topic_segment_cursor, new_node, NULL); current_node = new_node; } else { current_node = hash_element->value; ++current_node->ref_count; } } return current_node; } void aws_mqtt_subscription_set_add_subscription( struct aws_mqtt_subscription_set *subscription_set, const struct aws_mqtt_subscription_set_subscription_options *subscription_options) { AWS_FATAL_ASSERT(aws_mqtt_is_valid_topic_filter(&subscription_options->topic_filter)); aws_hash_table_remove(&subscription_set->subscriptions, &subscription_options->topic_filter, NULL, NULL); struct aws_mqtt_subscription_set_subscription_record *record = aws_mqtt_subscription_set_subscription_record_new(subscription_set->allocator, subscription_options); aws_hash_table_put(&subscription_set->subscriptions, &record->topic_filter, record, NULL); struct aws_mqtt_subscription_set_topic_tree_node *subscription_node = s_aws_mqtt_subscription_set_get_existing_subscription_node( subscription_set, subscription_options->topic_filter); if (subscription_node == NULL) { subscription_node = s_aws_mqtt_subscription_set_create_or_reference_topic_filter_path( subscription_set->root, subscription_options->topic_filter); } if (subscription_node->on_cleanup) { (*subscription_node->on_cleanup)(subscription_node->callback_user_data); subscription_node->on_cleanup = NULL; } subscription_node->is_subscription = true; subscription_node->on_publish_received = subscription_options->on_publish_received; subscription_node->on_cleanup = subscription_options->on_cleanup; subscription_node->callback_user_data = subscription_options->callback_user_data; } void aws_mqtt_subscription_set_remove_subscription( struct aws_mqtt_subscription_set *subscription_set, struct aws_byte_cursor topic_filter) { aws_hash_table_remove(&subscription_set->subscriptions, &topic_filter, NULL, NULL); if (!aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, topic_filter)) { return; } struct aws_mqtt_subscription_set_topic_tree_node *current_node = subscription_set->root; struct aws_byte_cursor topic_segment; AWS_ZERO_STRUCT(topic_segment); while (aws_byte_cursor_next_split(&topic_filter, '/', &topic_segment)) { --current_node->ref_count; if (current_node->ref_count == 0) { s_aws_mqtt_subscription_set_node_destroy_tree(current_node); return; } struct aws_hash_element *hash_element = NULL; aws_hash_table_find(¤t_node->children, &topic_segment, &hash_element); /* We previously validated the full path; this must exist */ current_node = hash_element->value; } --current_node->ref_count; if (current_node->ref_count == 0) { s_aws_mqtt_subscription_set_node_destroy_tree(current_node); return; } if (current_node->on_cleanup) { (*current_node->on_cleanup)(current_node->callback_user_data); current_node->on_cleanup = NULL; } current_node->on_publish_received = NULL; current_node->is_subscription = false; } struct aws_mqtt_subscription_set_path_continuation { struct aws_byte_cursor current_fragment; struct aws_mqtt_subscription_set_topic_tree_node *current_node; }; static void s_add_subscription_set_path_continuation( struct aws_array_list *paths, struct aws_byte_cursor fragment, struct aws_mqtt_subscription_set_topic_tree_node *node) { if (node == NULL) { return; } struct aws_mqtt_subscription_set_path_continuation path = { .current_fragment = fragment, .current_node = node, }; aws_array_list_push_back(paths, &path); } #define SUBSCRIPTION_SET_PATH_FRAGMENT_DEFAULT 10 AWS_STATIC_STRING_FROM_LITERAL(s_single_level_wildcard, "+"); AWS_STATIC_STRING_FROM_LITERAL(s_multi_level_wildcard, "#"); static struct aws_mqtt_subscription_set_topic_tree_node *s_aws_mqtt_subscription_set_node_find_child( struct aws_mqtt_subscription_set_topic_tree_node *node, struct aws_byte_cursor fragment) { struct aws_hash_element *element = NULL; aws_hash_table_find(&node->children, &fragment, &element); if (element == NULL) { return NULL; } return element->value; } static void s_invoke_on_publish_received( struct aws_mqtt_subscription_set_topic_tree_node *node, const struct aws_mqtt_subscription_set_publish_received_options *publish_options) { if (node == NULL || !node->is_subscription || node->on_publish_received == NULL) { return; } (*node->on_publish_received)( publish_options->connection, &publish_options->topic, &publish_options->payload, publish_options->dup, publish_options->qos, publish_options->retain, node->callback_user_data); } void aws_mqtt_subscription_set_on_publish_received( const struct aws_mqtt_subscription_set *subscription_set, const struct aws_mqtt_subscription_set_publish_received_options *publish_options) { struct aws_byte_cursor slw_cursor = aws_byte_cursor_from_string(s_single_level_wildcard); struct aws_byte_cursor mlw_cursor = aws_byte_cursor_from_string(s_multi_level_wildcard); struct aws_array_list tree_paths; aws_array_list_init_dynamic( &tree_paths, subscription_set->allocator, SUBSCRIPTION_SET_PATH_FRAGMENT_DEFAULT, sizeof(struct aws_mqtt_subscription_set_path_continuation)); struct aws_byte_cursor empty_cursor; AWS_ZERO_STRUCT(empty_cursor); s_add_subscription_set_path_continuation(&tree_paths, empty_cursor, subscription_set->root); while (aws_array_list_length(&tree_paths) > 0) { struct aws_mqtt_subscription_set_path_continuation path_continuation; AWS_ZERO_STRUCT(path_continuation); size_t path_count = aws_array_list_length(&tree_paths); aws_array_list_get_at(&tree_paths, &path_continuation, path_count - 1); aws_array_list_pop_back(&tree_paths); /* * Invoke multi-level wildcard check before checking split result; this allows a subscription like * 'a/b/#' to match an incoming 'a/b' */ struct aws_mqtt_subscription_set_topic_tree_node *mlw_node = s_aws_mqtt_subscription_set_node_find_child(path_continuation.current_node, mlw_cursor); s_invoke_on_publish_received(mlw_node, publish_options); struct aws_byte_cursor next_fragment = path_continuation.current_fragment; if (!aws_byte_cursor_next_split(&publish_options->topic, '/', &next_fragment)) { s_invoke_on_publish_received(path_continuation.current_node, publish_options); continue; } struct aws_mqtt_subscription_set_topic_tree_node *slw_node = s_aws_mqtt_subscription_set_node_find_child(path_continuation.current_node, slw_cursor); s_add_subscription_set_path_continuation(&tree_paths, next_fragment, slw_node); struct aws_mqtt_subscription_set_topic_tree_node *fragment_node = s_aws_mqtt_subscription_set_node_find_child(path_continuation.current_node, next_fragment); s_add_subscription_set_path_continuation(&tree_paths, next_fragment, fragment_node); } aws_array_list_clean_up(&tree_paths); } static int s_subscription_set_subscriptions_hash_get_wrap(void *context, struct aws_hash_element *elem) { struct aws_array_list *subscriptions = context; struct aws_mqtt_subscription_set_subscription_record *record = elem->value; aws_array_list_push_back(subscriptions, &record->subscription_view); return AWS_COMMON_HASH_TABLE_ITER_CONTINUE; } void aws_mqtt_subscription_set_get_subscriptions( struct aws_mqtt_subscription_set *subscription_set, struct aws_array_list *subscriptions) { AWS_ZERO_STRUCT(*subscriptions); size_t subscription_count = aws_hash_table_get_entry_count(&subscription_set->subscriptions); aws_array_list_init_dynamic( subscriptions, subscription_set->allocator, subscription_count, sizeof(struct aws_mqtt_subscription_set_subscription_options)); aws_hash_table_foreach( &subscription_set->subscriptions, s_subscription_set_subscriptions_hash_get_wrap, subscriptions); } aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/source/packets.c000066400000000000000000001064451456575232400233250ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include enum { S_PROTOCOL_LEVEL = 4 }; enum { S_BIT_1_FLAGS = 0x2 }; static struct aws_byte_cursor s_protocol_name = { .ptr = (uint8_t *)"MQTT", .len = 4, }; static size_t s_sizeof_encoded_buffer(struct aws_byte_cursor *buf) { return sizeof(uint16_t) + buf->len; } static int s_encode_buffer(struct aws_byte_buf *buf, const struct aws_byte_cursor cur) { AWS_PRECONDITION(buf); AWS_PRECONDITION(aws_byte_cursor_is_valid(&cur)); /* Make sure the buffer isn't too big */ if (cur.len > UINT16_MAX) { return aws_raise_error(AWS_ERROR_MQTT_BUFFER_TOO_BIG); } /* Write the length */ if (!aws_byte_buf_write_be16(buf, (uint16_t)cur.len)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } /* Write the data */ if (!aws_byte_buf_write(buf, cur.ptr, cur.len)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } return AWS_OP_SUCCESS; } static int s_decode_buffer(struct aws_byte_cursor *cur, struct aws_byte_cursor *buf) { AWS_PRECONDITION(cur); AWS_PRECONDITION(buf); /* Read the length */ uint16_t len; if (!aws_byte_cursor_read_be16(cur, &len)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } /* Store the data */ *buf = aws_byte_cursor_advance(cur, len); return AWS_OP_SUCCESS; } /*****************************************************************************/ /* Ack without payload */ static void s_ack_init(struct aws_mqtt_packet_ack *packet, enum aws_mqtt_packet_type type, uint16_t packet_identifier) { AWS_PRECONDITION(packet); AWS_ZERO_STRUCT(*packet); packet->fixed_header.packet_type = type; packet->fixed_header.remaining_length = sizeof(uint16_t); packet->packet_identifier = packet_identifier; } int aws_mqtt_packet_ack_encode(struct aws_byte_buf *buf, const struct aws_mqtt_packet_ack *packet) { AWS_PRECONDITION(buf); AWS_PRECONDITION(packet); /*************************************************************************/ /* Fixed Header */ if (aws_mqtt_fixed_header_encode(buf, &packet->fixed_header)) { return AWS_OP_ERR; } /*************************************************************************/ /* Variable Header */ /* Write packet identifier */ if (!aws_byte_buf_write_be16(buf, packet->packet_identifier)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } return AWS_OP_SUCCESS; } int aws_mqtt_packet_ack_decode(struct aws_byte_cursor *cur, struct aws_mqtt_packet_ack *packet) { AWS_PRECONDITION(cur); AWS_PRECONDITION(packet); /*************************************************************************/ /* Fixed Header */ if (aws_mqtt_fixed_header_decode(cur, &packet->fixed_header)) { return AWS_OP_ERR; } /* Validate flags */ if (packet->fixed_header.flags != (aws_mqtt_packet_has_flags(&packet->fixed_header) ? S_BIT_1_FLAGS : 0U)) { return aws_raise_error(AWS_ERROR_MQTT_INVALID_RESERVED_BITS); } /*************************************************************************/ /* Variable Header */ /* Read packet identifier */ if (!aws_byte_cursor_read_be16(cur, &packet->packet_identifier)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } return AWS_OP_SUCCESS; } /*****************************************************************************/ /* Connect */ int aws_mqtt_packet_connect_init( struct aws_mqtt_packet_connect *packet, struct aws_byte_cursor client_identifier, bool clean_session, uint16_t keep_alive) { AWS_PRECONDITION(packet); AWS_PRECONDITION(client_identifier.len > 0); AWS_ZERO_STRUCT(*packet); packet->fixed_header.packet_type = AWS_MQTT_PACKET_CONNECT; /* [MQTT-3.1.1] */ packet->fixed_header.remaining_length = 10 + s_sizeof_encoded_buffer(&client_identifier); packet->client_identifier = client_identifier; packet->clean_session = clean_session; packet->keep_alive_timeout = keep_alive; return AWS_OP_SUCCESS; } int aws_mqtt_packet_connect_add_credentials( struct aws_mqtt_packet_connect *packet, struct aws_byte_cursor username, struct aws_byte_cursor password) { AWS_PRECONDITION(packet); AWS_PRECONDITION(username.len > 0); if (!packet->has_username) { /* If not already username, add size of length field */ packet->fixed_header.remaining_length += 2; } /* Add change in size to remaining_length */ packet->fixed_header.remaining_length += username.len - packet->username.len; packet->has_username = true; packet->username = username; if (password.len > 0) { if (!packet->has_password) { /* If not already password, add size of length field */ packet->fixed_header.remaining_length += 2; } /* Add change in size to remaining_length */ packet->fixed_header.remaining_length += password.len - packet->password.len; packet->has_password = true; packet->password = password; } return AWS_OP_SUCCESS; } int aws_mqtt_packet_connect_add_will( struct aws_mqtt_packet_connect *packet, struct aws_byte_cursor topic, enum aws_mqtt_qos qos, bool retain, struct aws_byte_cursor payload) { packet->has_will = true; packet->will_topic = topic; packet->will_qos = qos; packet->will_retain = retain; packet->will_message = payload; packet->fixed_header.remaining_length += s_sizeof_encoded_buffer(&topic) + s_sizeof_encoded_buffer(&payload); return AWS_OP_SUCCESS; } int aws_mqtt_packet_connect_encode(struct aws_byte_buf *buf, const struct aws_mqtt_packet_connect *packet) { AWS_PRECONDITION(buf); AWS_PRECONDITION(packet); /* Do validation */ if (packet->has_password && !packet->has_username) { return aws_raise_error(AWS_ERROR_MQTT_INVALID_CREDENTIALS); } /*************************************************************************/ /* Fixed Header */ if (aws_mqtt_fixed_header_encode(buf, &packet->fixed_header)) { return AWS_OP_ERR; } /*************************************************************************/ /* Variable Header */ /* Write protocol name */ if (s_encode_buffer(buf, s_protocol_name)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } /* Write protocol level */ if (!aws_byte_buf_write_u8(buf, S_PROTOCOL_LEVEL)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } /* Write connect flags [MQTT-3.1.2.3] */ uint8_t connect_flags = (uint8_t)( packet->clean_session << 1 | packet->has_will << 2 | packet->will_qos << 3 | packet->will_retain << 5 | packet->has_password << 6 | packet->has_username << 7); if (!aws_byte_buf_write_u8(buf, connect_flags)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } /* Write keep alive */ if (!aws_byte_buf_write_be16(buf, packet->keep_alive_timeout)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } /*************************************************************************/ /* Payload */ /* Client identifier is required, write it */ if (s_encode_buffer(buf, packet->client_identifier)) { return AWS_OP_ERR; } /* Write will */ if (packet->has_will) { if (s_encode_buffer(buf, packet->will_topic)) { return AWS_OP_ERR; } if (s_encode_buffer(buf, packet->will_message)) { return AWS_OP_ERR; } } /* Write username */ if (packet->has_username) { if (s_encode_buffer(buf, packet->username)) { return AWS_OP_ERR; } } /* Write password */ if (packet->has_password) { if (s_encode_buffer(buf, packet->password)) { return AWS_OP_ERR; } } return AWS_OP_SUCCESS; } int aws_mqtt_packet_connect_decode(struct aws_byte_cursor *cur, struct aws_mqtt_packet_connect *packet) { AWS_PRECONDITION(cur); AWS_PRECONDITION(packet); /*************************************************************************/ /* Fixed Header */ if (aws_mqtt_fixed_header_decode(cur, &packet->fixed_header)) { return AWS_OP_ERR; } /*************************************************************************/ /* Variable Header */ /* Check protocol name */ struct aws_byte_cursor protocol_name = { .ptr = NULL, .len = 0, }; if (s_decode_buffer(cur, &protocol_name)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } AWS_ASSERT(protocol_name.ptr && protocol_name.len); if (protocol_name.len != s_protocol_name.len) { return aws_raise_error(AWS_ERROR_MQTT_UNSUPPORTED_PROTOCOL_NAME); } if (memcmp(protocol_name.ptr, s_protocol_name.ptr, s_protocol_name.len) != 0) { return aws_raise_error(AWS_ERROR_MQTT_UNSUPPORTED_PROTOCOL_NAME); } /* Check protocol level */ struct aws_byte_cursor protocol_level = aws_byte_cursor_advance(cur, 1); if (protocol_level.len == 0) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } if (*protocol_level.ptr != S_PROTOCOL_LEVEL) { return aws_raise_error(AWS_ERROR_MQTT_UNSUPPORTED_PROTOCOL_LEVEL); } /* Read connect flags [MQTT-3.1.2.3] */ uint8_t connect_flags = 0; if (!aws_byte_cursor_read_u8(cur, &connect_flags)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } packet->clean_session = (connect_flags >> 1) & 0x1; packet->has_will = (connect_flags >> 2) & 0x1; packet->will_qos = (connect_flags >> 3) & 0x3; packet->will_retain = (connect_flags >> 5) & 0x1; packet->has_password = (connect_flags >> 6) & 0x1; packet->has_username = (connect_flags >> 7) & 0x1; /* Read keep alive */ if (!aws_byte_cursor_read_be16(cur, &packet->keep_alive_timeout)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } /*************************************************************************/ /* Payload */ /* Client identifier is required, Read it */ if (s_decode_buffer(cur, &packet->client_identifier)) { return AWS_OP_ERR; } /* Read will */ if (packet->has_will) { if (s_decode_buffer(cur, &packet->will_topic)) { return AWS_OP_ERR; } if (s_decode_buffer(cur, &packet->will_message)) { return AWS_OP_ERR; } } /* Read username */ if (packet->has_username) { if (s_decode_buffer(cur, &packet->username)) { return AWS_OP_ERR; } } /* Read password */ if (packet->has_password) { if (s_decode_buffer(cur, &packet->password)) { return AWS_OP_ERR; } } /* Do validation */ if (packet->has_password && !packet->has_username) { return aws_raise_error(AWS_ERROR_MQTT_INVALID_CREDENTIALS); } return AWS_OP_SUCCESS; } /*****************************************************************************/ /* Connack */ int aws_mqtt_packet_connack_init( struct aws_mqtt_packet_connack *packet, bool session_present, enum aws_mqtt_connect_return_code return_code) { AWS_PRECONDITION(packet); AWS_ZERO_STRUCT(*packet); packet->fixed_header.packet_type = AWS_MQTT_PACKET_CONNACK; packet->fixed_header.remaining_length = 1 + sizeof(packet->connect_return_code); packet->session_present = session_present; packet->connect_return_code = (uint8_t)return_code; return AWS_OP_SUCCESS; } int aws_mqtt_packet_connack_encode(struct aws_byte_buf *buf, const struct aws_mqtt_packet_connack *packet) { AWS_PRECONDITION(buf); AWS_PRECONDITION(packet); /*************************************************************************/ /* Fixed Header */ if (aws_mqtt_fixed_header_encode(buf, &packet->fixed_header)) { return AWS_OP_ERR; } /*************************************************************************/ /* Variable Header */ /* Read connack flags */ uint8_t connack_flags = packet->session_present & 0x1; if (!aws_byte_buf_write_u8(buf, connack_flags)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } /* Read return code */ if (!aws_byte_buf_write_u8(buf, packet->connect_return_code)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } return AWS_OP_SUCCESS; } int aws_mqtt_packet_connack_decode(struct aws_byte_cursor *cur, struct aws_mqtt_packet_connack *packet) { AWS_PRECONDITION(cur); AWS_PRECONDITION(packet); /*************************************************************************/ /* Fixed Header */ if (aws_mqtt_fixed_header_decode(cur, &packet->fixed_header)) { return AWS_OP_ERR; } /*************************************************************************/ /* Variable Header */ /* Read connack flags */ uint8_t connack_flags = 0; if (!aws_byte_cursor_read_u8(cur, &connack_flags)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } packet->session_present = connack_flags & 0x1; /* Read return code */ if (!aws_byte_cursor_read_u8(cur, &packet->connect_return_code)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } return AWS_OP_SUCCESS; } /*****************************************************************************/ /* Publish */ int aws_mqtt_packet_publish_init( struct aws_mqtt_packet_publish *packet, bool retain, enum aws_mqtt_qos qos, bool dup, struct aws_byte_cursor topic_name, uint16_t packet_identifier, struct aws_byte_cursor payload) { AWS_PRECONDITION(packet); AWS_FATAL_PRECONDITION(topic_name.len > 0); /* [MQTT-4.7.3-1] */ AWS_ZERO_STRUCT(*packet); packet->fixed_header.packet_type = AWS_MQTT_PACKET_PUBLISH; packet->fixed_header.remaining_length = s_sizeof_encoded_buffer(&topic_name) + payload.len; if (qos > 0) { packet->fixed_header.remaining_length += sizeof(packet->packet_identifier); } /* [MQTT-2.2.2] */ uint8_t publish_flags = (uint8_t)((retain & 0x1) | (qos & 0x3) << 1 | (dup & 0x1) << 3); packet->fixed_header.flags = publish_flags; packet->topic_name = topic_name; packet->packet_identifier = packet_identifier; packet->payload = payload; return AWS_OP_SUCCESS; } int aws_mqtt_packet_publish_encode(struct aws_byte_buf *buf, const struct aws_mqtt_packet_publish *packet) { if (aws_mqtt_packet_publish_encode_headers(buf, packet)) { return AWS_OP_ERR; } /*************************************************************************/ /* Payload */ if (!aws_byte_buf_write(buf, packet->payload.ptr, packet->payload.len)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } return AWS_OP_SUCCESS; } int aws_mqtt_packet_publish_encode_headers(struct aws_byte_buf *buf, const struct aws_mqtt_packet_publish *packet) { AWS_PRECONDITION(buf); AWS_PRECONDITION(packet); /*************************************************************************/ /* Fixed Header */ if (aws_mqtt_fixed_header_encode(buf, &packet->fixed_header)) { return AWS_OP_ERR; } /*************************************************************************/ /* Variable Header */ /* Write topic name */ if (s_encode_buffer(buf, packet->topic_name)) { return AWS_OP_ERR; } enum aws_mqtt_qos qos = aws_mqtt_packet_publish_get_qos(packet); if (qos > 0) { /* Write packet identifier */ if (!aws_byte_buf_write_be16(buf, packet->packet_identifier)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } } return AWS_OP_SUCCESS; } int aws_mqtt_packet_publish_decode(struct aws_byte_cursor *cur, struct aws_mqtt_packet_publish *packet) { AWS_PRECONDITION(cur); AWS_PRECONDITION(packet); /*************************************************************************/ /* Fixed Header */ if (aws_mqtt_fixed_header_decode(cur, &packet->fixed_header)) { return AWS_OP_ERR; } /*************************************************************************/ /* Variable Header */ /* Read topic name */ if (s_decode_buffer(cur, &packet->topic_name)) { return AWS_OP_ERR; } size_t payload_size = packet->fixed_header.remaining_length - s_sizeof_encoded_buffer(&packet->topic_name); /* Read QoS */ enum aws_mqtt_qos qos = aws_mqtt_packet_publish_get_qos(packet); if (qos > 2) { return aws_raise_error(AWS_ERROR_MQTT_PROTOCOL_ERROR); } /* Read packet identifier */ if (qos > 0) { if (!aws_byte_cursor_read_be16(cur, &packet->packet_identifier)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } payload_size -= sizeof(packet->packet_identifier); } else { packet->packet_identifier = 0; } /*************************************************************************/ /* Payload */ packet->payload = aws_byte_cursor_advance(cur, payload_size); if (packet->payload.len != payload_size) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } return AWS_OP_SUCCESS; } bool aws_mqtt_packet_publish_get_dup(const struct aws_mqtt_packet_publish *packet) { return packet->fixed_header.flags & (1 << 3); /* bit 3 */ } void aws_mqtt_packet_publish_set_dup(struct aws_mqtt_packet_publish *packet) { packet->fixed_header.flags |= 0x08; } enum aws_mqtt_qos aws_mqtt_packet_publish_get_qos(const struct aws_mqtt_packet_publish *packet) { return (packet->fixed_header.flags >> 1) & 0x3; /* bits 2,1 */ } bool aws_mqtt_packet_publish_get_retain(const struct aws_mqtt_packet_publish *packet) { return packet->fixed_header.flags & 0x1; /* bit 0 */ } /*****************************************************************************/ /* Puback */ int aws_mqtt_packet_puback_init(struct aws_mqtt_packet_ack *packet, uint16_t packet_identifier) { s_ack_init(packet, AWS_MQTT_PACKET_PUBACK, packet_identifier); return AWS_OP_SUCCESS; } /*****************************************************************************/ /* Pubrec */ int aws_mqtt_packet_pubrec_init(struct aws_mqtt_packet_ack *packet, uint16_t packet_identifier) { s_ack_init(packet, AWS_MQTT_PACKET_PUBREC, packet_identifier); return AWS_OP_SUCCESS; } /*****************************************************************************/ /* Pubrel */ int aws_mqtt_packet_pubrel_init(struct aws_mqtt_packet_ack *packet, uint16_t packet_identifier) { s_ack_init(packet, AWS_MQTT_PACKET_PUBREL, packet_identifier); packet->fixed_header.flags = S_BIT_1_FLAGS; return AWS_OP_SUCCESS; } /*****************************************************************************/ /* Pubcomp */ int aws_mqtt_packet_pubcomp_init(struct aws_mqtt_packet_ack *packet, uint16_t packet_identifier) { s_ack_init(packet, AWS_MQTT_PACKET_PUBCOMP, packet_identifier); return AWS_OP_SUCCESS; } /*****************************************************************************/ /* Subscribe */ int aws_mqtt_packet_subscribe_init( struct aws_mqtt_packet_subscribe *packet, struct aws_allocator *allocator, uint16_t packet_identifier) { AWS_PRECONDITION(packet); AWS_ZERO_STRUCT(*packet); packet->fixed_header.packet_type = AWS_MQTT_PACKET_SUBSCRIBE; packet->fixed_header.flags = S_BIT_1_FLAGS; packet->fixed_header.remaining_length = sizeof(uint16_t); packet->packet_identifier = packet_identifier; if (aws_array_list_init_dynamic(&packet->topic_filters, allocator, 1, sizeof(struct aws_mqtt_subscription))) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } void aws_mqtt_packet_subscribe_clean_up(struct aws_mqtt_packet_subscribe *packet) { AWS_PRECONDITION(packet); aws_array_list_clean_up(&packet->topic_filters); AWS_ZERO_STRUCT(*packet); } int aws_mqtt_packet_subscribe_add_topic( struct aws_mqtt_packet_subscribe *packet, struct aws_byte_cursor topic_filter, enum aws_mqtt_qos qos) { AWS_PRECONDITION(packet); /* Add to the array list */ struct aws_mqtt_subscription subscription; subscription.topic_filter = topic_filter; subscription.qos = qos; if (aws_array_list_push_back(&packet->topic_filters, &subscription)) { return AWS_OP_ERR; } /* Add to the remaining length */ packet->fixed_header.remaining_length += s_sizeof_encoded_buffer(&topic_filter) + 1; return AWS_OP_SUCCESS; } int aws_mqtt_packet_subscribe_encode(struct aws_byte_buf *buf, const struct aws_mqtt_packet_subscribe *packet) { AWS_PRECONDITION(buf); AWS_PRECONDITION(packet); /*************************************************************************/ /* Fixed Header */ if (aws_mqtt_fixed_header_encode(buf, &packet->fixed_header)) { return AWS_OP_ERR; } /*************************************************************************/ /* Variable Header */ /* Write packet identifier */ if (!aws_byte_buf_write_be16(buf, packet->packet_identifier)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } /* Write topic filters */ const size_t num_filters = aws_array_list_length(&packet->topic_filters); for (size_t i = 0; i < num_filters; ++i) { struct aws_mqtt_subscription *subscription; if (aws_array_list_get_at_ptr(&packet->topic_filters, (void **)&subscription, i)) { return AWS_OP_ERR; } s_encode_buffer(buf, subscription->topic_filter); uint8_t eos_byte = subscription->qos & 0x3; if (!aws_byte_buf_write_u8(buf, eos_byte)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } } return AWS_OP_SUCCESS; } int aws_mqtt_packet_subscribe_decode(struct aws_byte_cursor *cur, struct aws_mqtt_packet_subscribe *packet) { AWS_PRECONDITION(cur); AWS_PRECONDITION(packet); /*************************************************************************/ /* Fixed Header */ if (aws_mqtt_fixed_header_decode(cur, &packet->fixed_header)) { return AWS_OP_ERR; } if (packet->fixed_header.remaining_length < sizeof(uint16_t)) { return aws_raise_error(AWS_ERROR_MQTT_INVALID_REMAINING_LENGTH); } /*************************************************************************/ /* Variable Header */ /* Read packet identifier */ if (!aws_byte_cursor_read_be16(cur, &packet->packet_identifier)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } /* Read topic filters */ size_t remaining_length = packet->fixed_header.remaining_length - sizeof(uint16_t); while (remaining_length) { struct aws_mqtt_subscription subscription = { .topic_filter = {.ptr = NULL, .len = 0}, .qos = 0, }; if (s_decode_buffer(cur, &subscription.topic_filter)) { return AWS_OP_ERR; } uint8_t eos_byte = 0; if (!aws_byte_cursor_read_u8(cur, &eos_byte)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } if ((eos_byte >> 2) != 0) { return aws_raise_error(AWS_ERROR_MQTT_INVALID_RESERVED_BITS); } if (eos_byte == 0x3) { return aws_raise_error(AWS_ERROR_MQTT_INVALID_QOS); } subscription.qos = eos_byte & 0x3; aws_array_list_push_back(&packet->topic_filters, &subscription); remaining_length -= s_sizeof_encoded_buffer(&subscription.topic_filter) + 1; } return AWS_OP_SUCCESS; } /*****************************************************************************/ /* Suback */ int aws_mqtt_packet_suback_init( struct aws_mqtt_packet_suback *packet, struct aws_allocator *allocator, uint16_t packet_identifier) { AWS_PRECONDITION(packet); AWS_ZERO_STRUCT(*packet); packet->fixed_header.packet_type = AWS_MQTT_PACKET_SUBACK; packet->fixed_header.remaining_length = sizeof(uint16_t); packet->packet_identifier = packet_identifier; if (aws_array_list_init_dynamic(&packet->return_codes, allocator, 1, sizeof(uint8_t))) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } void aws_mqtt_packet_suback_clean_up(struct aws_mqtt_packet_suback *packet) { AWS_PRECONDITION(packet); aws_array_list_clean_up(&packet->return_codes); AWS_ZERO_STRUCT(*packet); } static bool s_return_code_check(uint8_t return_code) { if (return_code != AWS_MQTT_QOS_FAILURE && return_code != AWS_MQTT_QOS_AT_MOST_ONCE && return_code != AWS_MQTT_QOS_AT_LEAST_ONCE && return_code != AWS_MQTT_QOS_EXACTLY_ONCE) { return false; } return true; } int aws_mqtt_packet_suback_add_return_code(struct aws_mqtt_packet_suback *packet, uint8_t return_code) { AWS_PRECONDITION(packet); if (!(s_return_code_check(return_code))) { return aws_raise_error(AWS_ERROR_MQTT_PROTOCOL_ERROR); } /* Add to the array list */ if (aws_array_list_push_back(&packet->return_codes, &return_code)) { return AWS_OP_ERR; } /* Add to the remaining length, each return code takes one byte */ packet->fixed_header.remaining_length += 1; return AWS_OP_SUCCESS; } int aws_mqtt_packet_suback_encode(struct aws_byte_buf *buf, const struct aws_mqtt_packet_suback *packet) { AWS_PRECONDITION(buf); AWS_PRECONDITION(packet); /*************************************************************************/ /* Fixed Header */ if (aws_mqtt_fixed_header_encode(buf, &packet->fixed_header)) { return AWS_OP_ERR; } /*************************************************************************/ /* Variable Header */ /* Write packet identifier */ if (!aws_byte_buf_write_be16(buf, packet->packet_identifier)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } /*************************************************************************/ /* Payload */ /* Write topic filters */ const size_t num_filters = aws_array_list_length(&packet->return_codes); for (size_t i = 0; i < num_filters; ++i) { uint8_t return_code = 0; if (aws_array_list_get_at(&packet->return_codes, (void *)&return_code, i)) { return AWS_OP_ERR; } if (!aws_byte_buf_write_u8(buf, return_code)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } } return AWS_OP_SUCCESS; } int aws_mqtt_packet_suback_decode(struct aws_byte_cursor *cur, struct aws_mqtt_packet_suback *packet) { AWS_PRECONDITION(cur); AWS_PRECONDITION(packet); /*************************************************************************/ /* Fixed Header */ if (aws_mqtt_fixed_header_decode(cur, &packet->fixed_header)) { return AWS_OP_ERR; } /* Validate flags */ if (packet->fixed_header.flags != (aws_mqtt_packet_has_flags(&packet->fixed_header) ? S_BIT_1_FLAGS : 0U)) { return aws_raise_error(AWS_ERROR_MQTT_INVALID_RESERVED_BITS); } /*************************************************************************/ /* Variable Header */ /* Read packet identifier */ if (!aws_byte_cursor_read_be16(cur, &packet->packet_identifier)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } /*************************************************************************/ /* Payload */ /* Read return codes */ size_t remaining_length = packet->fixed_header.remaining_length - sizeof(uint16_t); while (remaining_length) { uint8_t return_code = 0; if (!aws_byte_cursor_read_u8(cur, &return_code)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } if (!(s_return_code_check(return_code))) { return aws_raise_error(AWS_ERROR_MQTT_PROTOCOL_ERROR); } aws_array_list_push_back(&packet->return_codes, &return_code); remaining_length -= 1; } return AWS_OP_SUCCESS; } /*****************************************************************************/ /* Unsubscribe */ int aws_mqtt_packet_unsubscribe_init( struct aws_mqtt_packet_unsubscribe *packet, struct aws_allocator *allocator, uint16_t packet_identifier) { AWS_PRECONDITION(packet); AWS_PRECONDITION(allocator); AWS_ZERO_STRUCT(*packet); packet->fixed_header.packet_type = AWS_MQTT_PACKET_UNSUBSCRIBE; packet->fixed_header.flags = S_BIT_1_FLAGS; packet->fixed_header.remaining_length = sizeof(uint16_t); packet->packet_identifier = packet_identifier; if (aws_array_list_init_dynamic(&packet->topic_filters, allocator, 1, sizeof(struct aws_byte_cursor))) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } void aws_mqtt_packet_unsubscribe_clean_up(struct aws_mqtt_packet_unsubscribe *packet) { AWS_PRECONDITION(packet); aws_array_list_clean_up(&packet->topic_filters); AWS_ZERO_STRUCT(*packet); } int aws_mqtt_packet_unsubscribe_add_topic( struct aws_mqtt_packet_unsubscribe *packet, struct aws_byte_cursor topic_filter) { AWS_PRECONDITION(packet); /* Add to the array list */ if (aws_array_list_push_back(&packet->topic_filters, &topic_filter)) { return AWS_OP_ERR; } /* Add to the remaining length */ packet->fixed_header.remaining_length += s_sizeof_encoded_buffer(&topic_filter); return AWS_OP_SUCCESS; } int aws_mqtt_packet_unsubscribe_encode(struct aws_byte_buf *buf, const struct aws_mqtt_packet_unsubscribe *packet) { AWS_PRECONDITION(buf); AWS_PRECONDITION(packet); /*************************************************************************/ /* Fixed Header */ if (aws_mqtt_fixed_header_encode(buf, &packet->fixed_header)) { return AWS_OP_ERR; } /*************************************************************************/ /* Variable Header */ /* Write packet identifier */ if (!aws_byte_buf_write_be16(buf, packet->packet_identifier)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } /* Write topic filters */ const size_t num_filters = aws_array_list_length(&packet->topic_filters); for (size_t i = 0; i < num_filters; ++i) { struct aws_byte_cursor topic_filter = {.ptr = NULL, .len = 0}; if (aws_array_list_get_at(&packet->topic_filters, (void *)&topic_filter, i)) { return AWS_OP_ERR; } s_encode_buffer(buf, topic_filter); } return AWS_OP_SUCCESS; } int aws_mqtt_packet_unsubscribe_decode(struct aws_byte_cursor *cur, struct aws_mqtt_packet_unsubscribe *packet) { AWS_PRECONDITION(cur); AWS_PRECONDITION(packet); /*************************************************************************/ /* Fixed Header */ if (aws_mqtt_fixed_header_decode(cur, &packet->fixed_header)) { return AWS_OP_ERR; } if (packet->fixed_header.remaining_length < sizeof(uint16_t)) { return aws_raise_error(AWS_ERROR_MQTT_INVALID_REMAINING_LENGTH); } /*************************************************************************/ /* Variable Header */ /* Read packet identifier */ if (!aws_byte_cursor_read_be16(cur, &packet->packet_identifier)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } /* Read topic filters */ size_t remaining_length = packet->fixed_header.remaining_length - sizeof(uint16_t); while (remaining_length) { struct aws_byte_cursor topic_filter; AWS_ZERO_STRUCT(topic_filter); if (s_decode_buffer(cur, &topic_filter)) { return AWS_OP_ERR; } aws_array_list_push_back(&packet->topic_filters, &topic_filter); remaining_length -= s_sizeof_encoded_buffer(&topic_filter); } return AWS_OP_SUCCESS; } /*****************************************************************************/ /* Unsuback */ int aws_mqtt_packet_unsuback_init(struct aws_mqtt_packet_ack *packet, uint16_t packet_identifier) { s_ack_init(packet, AWS_MQTT_PACKET_UNSUBACK, packet_identifier); return AWS_OP_SUCCESS; } /*****************************************************************************/ /* Ping request/response */ static void s_connection_init(struct aws_mqtt_packet_connection *packet, enum aws_mqtt_packet_type type) { AWS_PRECONDITION(packet); AWS_ZERO_STRUCT(*packet); packet->fixed_header.packet_type = type; } int aws_mqtt_packet_pingreq_init(struct aws_mqtt_packet_connection *packet) { s_connection_init(packet, AWS_MQTT_PACKET_PINGREQ); return AWS_OP_SUCCESS; } int aws_mqtt_packet_pingresp_init(struct aws_mqtt_packet_connection *packet) { s_connection_init(packet, AWS_MQTT_PACKET_PINGRESP); return AWS_OP_SUCCESS; } int aws_mqtt_packet_disconnect_init(struct aws_mqtt_packet_connection *packet) { s_connection_init(packet, AWS_MQTT_PACKET_DISCONNECT); return AWS_OP_SUCCESS; } int aws_mqtt_packet_connection_encode(struct aws_byte_buf *buf, const struct aws_mqtt_packet_connection *packet) { AWS_PRECONDITION(buf); AWS_PRECONDITION(packet); /*************************************************************************/ /* Fixed Header */ if (aws_mqtt_fixed_header_encode(buf, &packet->fixed_header)) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } int aws_mqtt_packet_connection_decode(struct aws_byte_cursor *cur, struct aws_mqtt_packet_connection *packet) { AWS_PRECONDITION(cur); AWS_PRECONDITION(packet); /*************************************************************************/ /* Fixed Header */ if (aws_mqtt_fixed_header_decode(cur, &packet->fixed_header)) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/source/shared_constants.c000066400000000000000000000016731456575232400252320ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include /* * These defaults were chosen because they're commmon in other MQTT libraries. * The user can modify the request in their transform callback if they need to. */ static const struct aws_byte_cursor s_websocket_handshake_default_path = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/mqtt"); const struct aws_byte_cursor *g_websocket_handshake_default_path = &s_websocket_handshake_default_path; static const struct aws_http_header s_websocket_handshake_default_protocol_header = { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Sec-WebSocket-Protocol"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("mqtt"), }; const struct aws_http_header *g_websocket_handshake_default_protocol_header = &s_websocket_handshake_default_protocol_header; aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/source/topic_tree.c000066400000000000000000001034761456575232400240310ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #ifdef _MSC_VER /* disables warning non const declared initializers for Microsoft compilers */ # pragma warning(disable : 4204) #endif /* _MSC_VER */ AWS_STATIC_STRING_FROM_LITERAL(s_single_level_wildcard, "+"); AWS_STATIC_STRING_FROM_LITERAL(s_multi_level_wildcard, "#"); /******************************************************************************* * Transactions ******************************************************************************/ struct topic_tree_action { enum { AWS_MQTT_TOPIC_TREE_ADD, AWS_MQTT_TOPIC_TREE_UPDATE, AWS_MQTT_TOPIC_TREE_REMOVE, } mode; /* All Nodes */ struct aws_mqtt_topic_node *node_to_update; /* ADD/UPDATE */ struct aws_byte_cursor topic; const struct aws_string *topic_filter; enum aws_mqtt_qos qos; aws_mqtt_publish_received_fn *callback; aws_mqtt_userdata_cleanup_fn *cleanup; void *userdata; /* ADD */ struct aws_mqtt_topic_node *last_found; struct aws_mqtt_topic_node *first_created; /* REMOVE */ struct aws_array_list to_remove; /* topic_tree_node* */ }; size_t aws_mqtt_topic_tree_action_size = sizeof(struct topic_tree_action); static struct topic_tree_action *s_topic_tree_action_create(struct aws_array_list *transaction) { struct topic_tree_action *action = NULL; /* Push an empty action into the transaction and get a pointer to it. */ struct topic_tree_action empty_action; AWS_ZERO_STRUCT(empty_action); if (aws_array_list_push_back(transaction, &empty_action)) { AWS_LOGF_ERROR(AWS_LS_MQTT_TOPIC_TREE, "Failed to insert action into transaction, array_list_push_back failed"); goto push_back_failed; } if (aws_array_list_get_at_ptr(transaction, (void **)&action, aws_array_list_length(transaction) - 1)) { AWS_LOGF_ERROR(AWS_LS_MQTT_TOPIC_TREE, "Failed to retrieve most recent action from transaction"); goto get_at_failed; } AWS_LOGF_TRACE(AWS_LS_MQTT_TOPIC_TREE, "action=%p: Created action", (void *)action); return action; get_at_failed: aws_array_list_pop_back(transaction); push_back_failed: return NULL; } static void s_topic_tree_action_destroy(struct topic_tree_action *action) { AWS_LOGF_TRACE(AWS_LS_MQTT_TOPIC_TREE, "action=%p: Destroying action", (void *)action); if (action->mode == AWS_MQTT_TOPIC_TREE_REMOVE) { aws_array_list_clean_up(&action->to_remove); } AWS_ZERO_STRUCT(*action); } static int s_topic_tree_action_to_remove( struct topic_tree_action *action, struct aws_allocator *allocator, size_t size_hint) { if (action->mode != AWS_MQTT_TOPIC_TREE_REMOVE) { if (aws_array_list_init_dynamic(&action->to_remove, allocator, size_hint, sizeof(void *))) { AWS_LOGF_ERROR( AWS_LS_MQTT_TOPIC_TREE, "action=%p: Failed to initialize to_remove list in action", (void *)action); return AWS_OP_ERR; } action->mode = AWS_MQTT_TOPIC_TREE_REMOVE; } return AWS_OP_SUCCESS; } static bool byte_cursor_eq(const void *a, const void *b) { const struct aws_byte_cursor *cur_a = a; const struct aws_byte_cursor *cur_b = b; return aws_byte_cursor_eq(cur_a, cur_b); } /******************************************************************************* * Init ******************************************************************************/ static struct aws_mqtt_topic_node *s_topic_node_new( struct aws_allocator *allocator, const struct aws_byte_cursor *topic_filter, const struct aws_string *full_topic) { AWS_PRECONDITION(!topic_filter || full_topic); struct aws_mqtt_topic_node *node = aws_mem_calloc(allocator, 1, sizeof(struct aws_mqtt_topic_node)); if (!node) { AWS_LOGF_ERROR(AWS_LS_MQTT_TOPIC_TREE, "Failed to allocate new topic node"); return NULL; } if (topic_filter) { AWS_LOGF_TRACE( AWS_LS_MQTT_TOPIC_TREE, "node=%p: Creating new node with topic filter " PRInSTR, (void *)node, AWS_BYTE_CURSOR_PRI(*topic_filter)); } if (topic_filter) { node->topic = *topic_filter; node->topic_filter = full_topic; } /* Init the sub topics map */ if (aws_hash_table_init(&node->subtopics, allocator, 0, aws_hash_byte_cursor_ptr, byte_cursor_eq, NULL, NULL)) { AWS_LOGF_ERROR( AWS_LS_MQTT_TOPIC_TREE, "node=%p: Failed to initialize subtopics table in topic node", (void *)node); aws_mem_release(allocator, node); return NULL; } return node; } static int s_topic_node_destroy_hash_foreach_wrap(void *context, struct aws_hash_element *elem); static void s_topic_node_destroy(struct aws_mqtt_topic_node *node, struct aws_allocator *allocator) { AWS_LOGF_TRACE(AWS_LS_MQTT_TOPIC_TREE, "node=%p: Destroying topic tree node", (void *)node); /* Traverse all children and remove */ aws_hash_table_foreach(&node->subtopics, s_topic_node_destroy_hash_foreach_wrap, allocator); if (node->cleanup && node->userdata) { node->cleanup(node->userdata); } if (node->owns_topic_filter) { aws_string_destroy((void *)node->topic_filter); } aws_hash_table_clean_up(&node->subtopics); aws_mem_release(allocator, node); } static int s_topic_node_destroy_hash_foreach_wrap(void *context, struct aws_hash_element *elem) { s_topic_node_destroy(elem->value, context); return AWS_COMMON_HASH_TABLE_ITER_CONTINUE | AWS_COMMON_HASH_TABLE_ITER_DELETE; } int aws_mqtt_topic_tree_init(struct aws_mqtt_topic_tree *tree, struct aws_allocator *allocator) { AWS_PRECONDITION(tree); AWS_PRECONDITION(allocator); AWS_LOGF_DEBUG(AWS_LS_MQTT_TOPIC_TREE, "tree=%p: Creating new topic tree", (void *)tree); tree->root = s_topic_node_new(allocator, NULL, NULL); if (!tree->root) { /* Error raised by s_topic_node_new */ return AWS_OP_ERR; } tree->allocator = allocator; return AWS_OP_SUCCESS; } /******************************************************************************* * Clean Up ******************************************************************************/ void aws_mqtt_topic_tree_clean_up(struct aws_mqtt_topic_tree *tree) { AWS_PRECONDITION(tree); AWS_LOGF_DEBUG(AWS_LS_MQTT_TOPIC_TREE, "tree=%p: Cleaning up topic tree", (void *)tree); if (tree->allocator && tree->root) { s_topic_node_destroy(tree->root, tree->allocator); AWS_ZERO_STRUCT(*tree); } } /******************************************************************************* * Iterate ******************************************************************************/ bool s_topic_node_is_subscription(const struct aws_mqtt_topic_node *node) { return node->callback; } struct topic_tree_iterate_context { bool should_continue; aws_mqtt_topic_tree_iterator_fn *iterator; void *user_data; }; static int s_topic_tree_iterate_do_recurse(void *context, struct aws_hash_element *current_elem) { struct topic_tree_iterate_context *ctx = context; struct aws_mqtt_topic_node *current = current_elem->value; if (s_topic_node_is_subscription(current)) { const struct aws_byte_cursor topic_filter = aws_byte_cursor_from_string(current->topic_filter); ctx->should_continue = ctx->iterator(&topic_filter, current->qos, ctx->user_data); } if (ctx->should_continue) { aws_hash_table_foreach(¤t->subtopics, s_topic_tree_iterate_do_recurse, context); } /* One of the children could have updated should_continue, so check again */ if (ctx->should_continue) { return AWS_COMMON_HASH_TABLE_ITER_CONTINUE; } /* If false returned, return immediately. */ return 0; } void aws_mqtt_topic_tree_iterate( const struct aws_mqtt_topic_tree *tree, aws_mqtt_topic_tree_iterator_fn *iterator, void *user_data) { AWS_PRECONDITION(tree); AWS_PRECONDITION(tree->root); AWS_PRECONDITION(iterator); struct topic_tree_iterate_context itr; itr.should_continue = true; itr.iterator = iterator; itr.user_data = user_data; aws_hash_table_foreach(&tree->root->subtopics, s_topic_tree_iterate_do_recurse, &itr); } bool s_topic_tree_sub_count_iterator(const struct aws_byte_cursor *topic, enum aws_mqtt_qos qos, void *user_data) { (void)topic; (void)qos; size_t *sub_count = user_data; *sub_count += 1; return true; } size_t aws_mqtt_topic_tree_get_sub_count(const struct aws_mqtt_topic_tree *tree) { AWS_PRECONDITION(tree); AWS_PRECONDITION(tree->root); size_t sub_count = 0; aws_mqtt_topic_tree_iterate(tree, s_topic_tree_sub_count_iterator, &sub_count); return sub_count; } /******************************************************************************* * Action Commit ******************************************************************************/ /* Searches subtree until a topic_filter with a different pointer value is found. */ static int s_topic_node_string_finder(void *userdata, struct aws_hash_element *elem) { const struct aws_string **topic_filter = userdata; struct aws_mqtt_topic_node *node = elem->value; /* We've found this node again, search it's children */ if (*topic_filter == node->topic_filter) { if (0 == aws_hash_table_get_entry_count(&node->subtopics)) { /* If no children, then there must be siblings, so we can use those */ return AWS_COMMON_HASH_TABLE_ITER_CONTINUE; } aws_hash_table_foreach(&node->subtopics, s_topic_node_string_finder, userdata); if (*topic_filter == node->topic_filter) { /* If the topic filter still hasn't changed, continue iterating */ return AWS_COMMON_HASH_TABLE_ITER_CONTINUE; } AWS_LOGF_TRACE(AWS_LS_MQTT_TOPIC_TREE, " Found matching topic string, using %s", node->topic_filter->bytes); return 0; } AWS_LOGF_TRACE(AWS_LS_MQTT_TOPIC_TREE, " Found matching topic string, using %s", node->topic_filter->bytes); *topic_filter = node->topic_filter; return 0; } static void s_topic_tree_action_commit(struct topic_tree_action *action, struct aws_mqtt_topic_tree *tree) { (void)tree; AWS_PRECONDITION(action->node_to_update); switch (action->mode) { case AWS_MQTT_TOPIC_TREE_ADD: case AWS_MQTT_TOPIC_TREE_UPDATE: { AWS_LOGF_TRACE( AWS_LS_MQTT_TOPIC_TREE, "tree=%p action=%p: Committing %s topic tree action", (void *)tree, (void *)action, (action->mode == AWS_MQTT_TOPIC_TREE_ADD) ? "add" : "update"); /* Destroy old userdata */ if (action->node_to_update->cleanup && action->node_to_update->userdata) { /* If there was userdata assigned to this node, pass it out. */ action->node_to_update->cleanup(action->node_to_update->userdata); } /* Update data */ action->node_to_update->callback = action->callback; action->node_to_update->cleanup = action->cleanup; action->node_to_update->userdata = action->userdata; action->node_to_update->qos = action->qos; if (action->topic.ptr) { action->node_to_update->topic = action->topic; } if (action->topic_filter) { if (action->node_to_update->owns_topic_filter && action->node_to_update->topic_filter) { /* The topic filer is already there, destory the new filter to keep all the byte cursor valid */ aws_string_destroy((void *)action->topic_filter); } else { action->node_to_update->topic_filter = action->topic_filter; action->node_to_update->owns_topic_filter = true; } } break; } case AWS_MQTT_TOPIC_TREE_REMOVE: { AWS_LOGF_TRACE( AWS_LS_MQTT_TOPIC_TREE, "tree=%p action=%p: Committing remove topic tree action", (void *)tree, (void *)action); struct aws_mqtt_topic_node *current = action->node_to_update; const size_t sub_parts_len = aws_array_list_length(&action->to_remove) - 1; if (current) { /* If found the node, traverse up and remove each with no sub-topics. * Then update all nodes that were using current's topic_filter for topic. */ /* "unsubscribe" current. */ if (current->cleanup && current->userdata) { AWS_LOGF_TRACE(AWS_LS_MQTT_TOPIC_TREE, "node=%p: Cleaning up node's userdata", (void *)current); /* If there was userdata assigned to this node, pass it out. */ current->cleanup(current->userdata); } current->callback = NULL; current->cleanup = NULL; current->userdata = NULL; /* Set to true if current needs to be cleaned up. */ bool destroy_current = false; /* How many nodes are left after the great purge. */ size_t nodes_left = sub_parts_len; /* Remove all subscription-less and child-less nodes. */ for (size_t i = sub_parts_len; i > 0; --i) { struct aws_mqtt_topic_node *node = NULL; aws_array_list_get_at(&action->to_remove, &node, i); AWS_ASSUME(node); /* Must be in bounds */ if (!s_topic_node_is_subscription(node) && 0 == aws_hash_table_get_entry_count(&node->subtopics)) { /* No subscription and no children, this node needs to go. */ struct aws_mqtt_topic_node *grandma = NULL; aws_array_list_get_at(&action->to_remove, &grandma, i - 1); AWS_ASSUME(grandma); /* Must be in bounds */ AWS_LOGF_TRACE( AWS_LS_MQTT_TOPIC_TREE, "tree=%p node=%p: Removing child node %p with topic \"" PRInSTR "\"", (void *)tree, (void *)grandma, (void *)node, AWS_BYTE_CURSOR_PRI(node->topic)); aws_hash_table_remove(&grandma->subtopics, &node->topic, NULL, NULL); /* Make sure the following loop doesn't hit this node. */ --nodes_left; if (i != sub_parts_len) { /* Clean up and delete */ s_topic_node_destroy(node, tree->allocator); } else { // We do not delete the current node immediately as we would like to use // it to update the topic filter of the remaining nodes destroy_current = true; } } else { AWS_LOGF_TRACE( AWS_LS_MQTT_TOPIC_TREE, "tree=%p: Node %p with topic \"" PRInSTR "\" has children or is a subscription, leaving in place", (void *)tree, (void *)node, AWS_BYTE_CURSOR_PRI(node->topic)); /* Once we've found one node with children, the rest are guaranteed to. */ break; } } /* If at least one node is destroyed and there is node(s) remaining in the branch, * go fixup the topic filter reference . */ if (nodes_left > 0 && destroy_current) { /* If a new viable topic filter is found once, it can be used for all parents. */ const struct aws_string *new_topic_filter = NULL; const struct aws_string *const old_topic_filter = current->topic_filter; /* How much of new_topic_filter should be lopped off the beginning. */ struct aws_mqtt_topic_node *parent = NULL; aws_array_list_get_at(&action->to_remove, &parent, nodes_left); AWS_ASSUME(parent); size_t topic_offset = parent->topic.ptr - aws_string_bytes(parent->topic_filter) + parent->topic.len + 1; /* Loop through all remaining nodes to update the topic filters */ for (size_t i = nodes_left; i > 0; --i) { aws_array_list_get_at(&action->to_remove, &parent, i); AWS_ASSUME(parent); /* Must be in bounds */ /* Remove this topic and following / from offset. */ topic_offset -= (parent->topic.len + 1); if (parent->topic_filter == old_topic_filter) { /* Uh oh, Mom's using my topic string again! Steal it and replace it with a new one, Indiana * Jones style. */ AWS_LOGF_TRACE( AWS_LS_MQTT_TOPIC_TREE, "tree=%p: Found node %p reusing topic filter part, replacing with next child", (void *)tree, (void *)parent); if (!new_topic_filter) { /* Set new_tf to old_tf so it's easier to check against the existing node. * Basically, it's an INOUT param. */ new_topic_filter = old_topic_filter; /* Search all subtopics until we find one that isn't current. */ aws_hash_table_foreach( &parent->subtopics, s_topic_node_string_finder, (void *)&new_topic_filter); /* This would only happen if there is only one topic in subtopics (current's) and * it has no children (in which case it should have been removed above as `destroy_current` is set to true). */ AWS_ASSERT(new_topic_filter != old_topic_filter); /* Now that the new string has been found, the old one can be destroyed. */ aws_string_destroy((void *)current->topic_filter); current->owns_topic_filter = false; } /* Update the pointers. */ parent->topic_filter = new_topic_filter; parent->topic.ptr = (uint8_t *)aws_string_bytes(new_topic_filter) + topic_offset; } } } /* Now that the strings are update, remove current. */ if (destroy_current) { s_topic_node_destroy(current, tree->allocator); } current = NULL; } break; } } s_topic_tree_action_destroy(action); } /******************************************************************************* * Action Roll Back ******************************************************************************/ static void s_topic_tree_action_roll_back(struct topic_tree_action *action, struct aws_mqtt_topic_tree *tree) { AWS_PRECONDITION(action); switch (action->mode) { case AWS_MQTT_TOPIC_TREE_ADD: { AWS_LOGF_TRACE( AWS_LS_MQTT_TOPIC_TREE, "tree=%p action=%p: Rolling back add transaction action", (void *)tree, (void *)action); /* Remove the first new node from it's parent's map */ aws_hash_table_remove(&action->last_found->subtopics, &action->first_created->topic, NULL, NULL); /* Recursively destroy all other created nodes */ s_topic_node_destroy(action->first_created, tree->allocator); if (action->topic_filter) { aws_string_destroy((void *)action->topic_filter); } break; } case AWS_MQTT_TOPIC_TREE_REMOVE: case AWS_MQTT_TOPIC_TREE_UPDATE: { AWS_LOGF_TRACE( AWS_LS_MQTT_TOPIC_TREE, "tree=%p action=%p: Rolling back remove/update transaction, no changes made", (void *)tree, (void *)action); /* Aborting a remove or update doesn't require any actions. */ break; } } s_topic_tree_action_destroy(action); } /******************************************************************************* * Insert ******************************************************************************/ int aws_mqtt_topic_tree_transaction_insert( struct aws_mqtt_topic_tree *tree, struct aws_array_list *transaction, const struct aws_string *topic_filter_ori, enum aws_mqtt_qos qos, aws_mqtt_publish_received_fn *callback, aws_mqtt_userdata_cleanup_fn *cleanup, void *userdata) { AWS_PRECONDITION(tree); AWS_PRECONDITION(transaction); AWS_PRECONDITION(topic_filter_ori); AWS_PRECONDITION(callback); /* let topic tree take the ownership of the new string and leave the caller string alone. */ struct aws_string *topic_filter = aws_string_new_from_string(tree->allocator, topic_filter_ori); AWS_LOGF_DEBUG( AWS_LS_MQTT_TOPIC_TREE, "tree=%p: Inserting topic filter %s into topic tree", (void *)tree, topic_filter->bytes); struct aws_mqtt_topic_node *current = tree->root; struct topic_tree_action *action = s_topic_tree_action_create(transaction); if (!action) { return AWS_OP_ERR; } /* Default to update unless a node was added */ action->mode = AWS_MQTT_TOPIC_TREE_UPDATE; action->qos = qos; action->callback = callback; action->cleanup = cleanup; action->userdata = userdata; struct aws_byte_cursor topic_filter_cur = aws_byte_cursor_from_string(topic_filter); struct aws_byte_cursor sub_part; AWS_ZERO_STRUCT(sub_part); struct aws_byte_cursor last_part; AWS_ZERO_STRUCT(last_part); while (aws_byte_cursor_next_split(&topic_filter_cur, '/', &sub_part)) { last_part = sub_part; /* Add or find mid-node */ struct aws_hash_element *elem = NULL; int was_created = 0; aws_hash_table_create(¤t->subtopics, &sub_part, &elem, &was_created); if (was_created) { if (action->mode == AWS_MQTT_TOPIC_TREE_UPDATE) { /* Store the last found node */ action->last_found = current; } /* Node does not exist, add new one */ current = s_topic_node_new(tree->allocator, &sub_part, topic_filter); if (!current) { /* Don't do handle_error logic, the action needs to persist to be rolled back */ return AWS_OP_ERR; } /* Stash in the hash map */ elem->key = ¤t->topic; elem->value = current; if (action->mode == AWS_MQTT_TOPIC_TREE_UPDATE) { AWS_LOGF_TRACE( AWS_LS_MQTT_TOPIC_TREE, "tree=%p: Topic part \"" PRInSTR "\" is new, it and all children will be added as new nodes", (void *)tree, AWS_BYTE_CURSOR_PRI(sub_part)); /* Store the node we just made, and make sure we don't store again */ action->mode = AWS_MQTT_TOPIC_TREE_ADD; action->first_created = current; } } else { AWS_ASSERT(action->mode == AWS_MQTT_TOPIC_TREE_UPDATE); /* Can't have found an existing node while adding */ /* If the node exists, just traverse it */ current = elem->value; } } action->node_to_update = current; /* Node found (or created), add the topic filter and callbacks */ if (current->owns_topic_filter) { AWS_LOGF_TRACE( AWS_LS_MQTT_TOPIC_TREE, "tree=%p node=%p: Updating existing node that already owns its topic_filter, throwing out parameter", (void *)tree, (void *)current); /* If the topic filter was already here, this is already a subscription. Free the new topic_filter so all existing byte_cursors remain valid. */ aws_string_destroy(topic_filter); } else { /* Node already existed (or was created) but wasn't subscription. */ action->topic = last_part; action->topic_filter = topic_filter; } return AWS_OP_SUCCESS; } /******************************************************************************* * Remove ******************************************************************************/ int aws_mqtt_topic_tree_transaction_remove( struct aws_mqtt_topic_tree *tree, struct aws_array_list *transaction, const struct aws_byte_cursor *topic_filter, void **old_userdata) { AWS_PRECONDITION(tree); AWS_PRECONDITION(transaction); AWS_PRECONDITION(topic_filter); AWS_LOGF_DEBUG( AWS_LS_MQTT_TOPIC_TREE, "tree=%p: Removing topic filter \"" PRInSTR "\" from topic tree", (void *)tree, AWS_BYTE_CURSOR_PRI(*topic_filter)); /* Initialize output parameter to a safe default */ if (old_userdata) { *old_userdata = NULL; } /* Default to error because that's what handle_error will do in all cases except node not found */ int result = AWS_OP_ERR; struct topic_tree_action *action = s_topic_tree_action_create(transaction); if (!action) { return AWS_OP_ERR; } struct aws_array_list sub_topic_parts; AWS_ZERO_STRUCT(sub_topic_parts); if (aws_array_list_init_dynamic(&sub_topic_parts, tree->allocator, 1, sizeof(struct aws_byte_cursor))) { AWS_LOGF_ERROR(AWS_LS_MQTT_TOPIC_TREE, "tree=%p: Failed to initialize topic parts array", (void *)tree); goto handle_error; } if (aws_byte_cursor_split_on_char(topic_filter, '/', &sub_topic_parts)) { AWS_LOGF_ERROR(AWS_LS_MQTT_TOPIC_TREE, "tree=%p: Failed to split topic filter", (void *)tree); goto handle_error; } const size_t sub_parts_len = aws_array_list_length(&sub_topic_parts); if (!sub_parts_len) { AWS_LOGF_ERROR(AWS_LS_MQTT_TOPIC_TREE, "tree=%p: Failed to get topic parts length", (void *)tree); goto handle_error; } s_topic_tree_action_to_remove(action, tree->allocator, sub_parts_len); struct aws_mqtt_topic_node *current = tree->root; if (aws_array_list_push_back(&action->to_remove, ¤t)) { AWS_LOGF_ERROR(AWS_LS_MQTT_TOPIC_TREE, "tree=%p: Failed to insert root node into to_remove list", (void *)tree); goto handle_error; } for (size_t i = 0; i < sub_parts_len; ++i) { /* Get the current topic part */ struct aws_byte_cursor *sub_part = NULL; aws_array_list_get_at_ptr(&sub_topic_parts, (void **)&sub_part, i); /* Find mid-node */ struct aws_hash_element *elem = NULL; aws_hash_table_find(¤t->subtopics, sub_part, &elem); if (elem) { /* If the node exists, just traverse it */ current = elem->value; if (aws_array_list_push_back(&action->to_remove, ¤t)) { AWS_LOGF_ERROR( AWS_LS_MQTT_TOPIC_TREE, "tree=%p: Failed to insert topic node into to_remove list", (void *)tree); goto handle_error; } } else { /* If not, abandon ship */ goto handle_not_found; } } action->node_to_update = current; aws_array_list_clean_up(&sub_topic_parts); if (old_userdata) { *old_userdata = current->userdata; } return AWS_OP_SUCCESS; handle_not_found: result = AWS_OP_SUCCESS; handle_error: aws_array_list_clean_up(&sub_topic_parts); s_topic_tree_action_destroy(action); aws_array_list_pop_back(transaction); return result; } /******************************************************************************* * Commit ******************************************************************************/ void aws_mqtt_topic_tree_transaction_commit(struct aws_mqtt_topic_tree *tree, struct aws_array_list *transaction) { const size_t num_actions = aws_array_list_length(transaction); for (size_t i = 0; i < num_actions; ++i) { struct topic_tree_action *action = NULL; aws_array_list_get_at_ptr(transaction, (void **)&action, i); AWS_ASSUME(action); /* Within bounds */ s_topic_tree_action_commit(action, tree); } aws_array_list_clear(transaction); } /******************************************************************************* * Roll Back ******************************************************************************/ void aws_mqtt_topic_tree_transaction_roll_back(struct aws_mqtt_topic_tree *tree, struct aws_array_list *transaction) { const size_t num_actions = aws_array_list_length(transaction); for (size_t i = 1; i <= num_actions; ++i) { struct topic_tree_action *action = NULL; aws_array_list_get_at_ptr(transaction, (void **)&action, num_actions - i); AWS_ASSUME(action); /* Within bounds */ s_topic_tree_action_roll_back(action, tree); } aws_array_list_clear(transaction); } int aws_mqtt_topic_tree_insert( struct aws_mqtt_topic_tree *tree, const struct aws_string *topic_filter, enum aws_mqtt_qos qos, aws_mqtt_publish_received_fn *callback, aws_mqtt_userdata_cleanup_fn *cleanup, void *userdata) { AWS_VARIABLE_LENGTH_ARRAY(uint8_t, transaction_buf, aws_mqtt_topic_tree_action_size); struct aws_array_list transaction; aws_array_list_init_static(&transaction, transaction_buf, 1, aws_mqtt_topic_tree_action_size); if (aws_mqtt_topic_tree_transaction_insert(tree, &transaction, topic_filter, qos, callback, cleanup, userdata)) { aws_mqtt_topic_tree_transaction_roll_back(tree, &transaction); return AWS_OP_ERR; } aws_mqtt_topic_tree_transaction_commit(tree, &transaction); return AWS_OP_SUCCESS; } int aws_mqtt_topic_tree_remove(struct aws_mqtt_topic_tree *tree, const struct aws_byte_cursor *topic_filter) { AWS_PRECONDITION(tree); AWS_PRECONDITION(topic_filter); AWS_VARIABLE_LENGTH_ARRAY(uint8_t, transaction_buf, aws_mqtt_topic_tree_action_size); struct aws_array_list transaction; aws_array_list_init_static(&transaction, transaction_buf, 1, aws_mqtt_topic_tree_action_size); if (aws_mqtt_topic_tree_transaction_remove(tree, &transaction, topic_filter, NULL)) { aws_mqtt_topic_tree_transaction_roll_back(tree, &transaction); return AWS_OP_ERR; } aws_mqtt_topic_tree_transaction_commit(tree, &transaction); return AWS_OP_SUCCESS; } /******************************************************************************* * Publish ******************************************************************************/ static void s_topic_tree_publish_do_recurse( const struct aws_byte_cursor *current_sub_part, const struct aws_mqtt_topic_node *current, const struct aws_mqtt_packet_publish *pub) { struct aws_byte_cursor hash_cur = aws_byte_cursor_from_string(s_multi_level_wildcard); struct aws_byte_cursor plus_cur = aws_byte_cursor_from_string(s_single_level_wildcard); struct aws_hash_element *elem = NULL; struct aws_byte_cursor sub_part = *current_sub_part; if (!aws_byte_cursor_next_split(&pub->topic_name, '/', &sub_part)) { /* If this is the last node and is a sub, call it */ if (s_topic_node_is_subscription(current)) { bool dup = aws_mqtt_packet_publish_get_dup(pub); enum aws_mqtt_qos qos = aws_mqtt_packet_publish_get_qos(pub); bool retain = aws_mqtt_packet_publish_get_retain(pub); current->callback(&pub->topic_name, &pub->payload, dup, qos, retain, current->userdata); } return; } /* Check multi-level wildcard */ aws_hash_table_find(¤t->subtopics, &hash_cur, &elem); if (elem) { /* Match! */ struct aws_mqtt_topic_node *multi_wildcard = elem->value; /* Must be a subscription and have no children */ AWS_ASSERT(s_topic_node_is_subscription(multi_wildcard)); AWS_ASSERT(0 == aws_hash_table_get_entry_count(&multi_wildcard->subtopics)); bool dup = aws_mqtt_packet_publish_get_dup(pub); enum aws_mqtt_qos qos = aws_mqtt_packet_publish_get_qos(pub); bool retain = aws_mqtt_packet_publish_get_retain(pub); multi_wildcard->callback(&pub->topic_name, &pub->payload, dup, qos, retain, multi_wildcard->userdata); } /* Check single level wildcard */ aws_hash_table_find(¤t->subtopics, &plus_cur, &elem); if (elem) { /* Recurse sub topics */ s_topic_tree_publish_do_recurse(&sub_part, elem->value, pub); } /* Check actual topic name */ aws_hash_table_find(¤t->subtopics, &sub_part, &elem); if (elem) { /* Found the actual topic, recurse to it */ s_topic_tree_publish_do_recurse(&sub_part, elem->value, pub); } } void aws_mqtt_topic_tree_publish(const struct aws_mqtt_topic_tree *tree, struct aws_mqtt_packet_publish *pub) { AWS_PRECONDITION(tree); AWS_PRECONDITION(pub); AWS_LOGF_TRACE( AWS_LS_MQTT_TOPIC_TREE, "tree=%p: Publishing on topic " PRInSTR, (void *)tree, AWS_BYTE_CURSOR_PRI(pub->topic_name)); struct aws_byte_cursor sub_part; AWS_ZERO_STRUCT(sub_part); s_topic_tree_publish_do_recurse(&sub_part, tree->root, pub); } aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/source/v5/000077500000000000000000000000001456575232400220475ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/source/v5/mqtt5_callbacks.c000066400000000000000000000137711456575232400252750ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include struct aws_mqtt5_callback_set_entry { struct aws_allocator *allocator; struct aws_linked_list_node node; uint64_t id; struct aws_mqtt5_callback_set callbacks; }; void aws_mqtt5_callback_set_manager_init( struct aws_mqtt5_callback_set_manager *manager, struct aws_mqtt5_client *client) { manager->client = client; /* no need to ref count, it's assumed to be owned by the client */ manager->next_callback_set_entry_id = 1; aws_linked_list_init(&manager->callback_set_entries); } void aws_mqtt5_callback_set_manager_clean_up(struct aws_mqtt5_callback_set_manager *manager) { struct aws_linked_list_node *node = aws_linked_list_begin(&manager->callback_set_entries); while (node != aws_linked_list_end(&manager->callback_set_entries)) { struct aws_mqtt5_callback_set_entry *entry = AWS_CONTAINER_OF(node, struct aws_mqtt5_callback_set_entry, node); node = aws_linked_list_next(node); aws_linked_list_remove(&entry->node); aws_mem_release(entry->allocator, entry); } } static struct aws_mqtt5_callback_set_entry *s_new_callback_set_entry( struct aws_mqtt5_callback_set_manager *manager, struct aws_mqtt5_callback_set *callback_set) { struct aws_mqtt5_callback_set_entry *entry = aws_mem_calloc(manager->client->allocator, 1, sizeof(struct aws_mqtt5_callback_set_entry)); entry->allocator = manager->client->allocator; entry->id = manager->next_callback_set_entry_id++; entry->callbacks = *callback_set; AWS_LOGF_INFO( AWS_LS_MQTT5_GENERAL, "id=%p: callback manager created new entry :%" PRIu64, (void *)manager->client, entry->id); return entry; } uint64_t aws_mqtt5_callback_set_manager_push_front( struct aws_mqtt5_callback_set_manager *manager, struct aws_mqtt5_callback_set *callback_set) { AWS_FATAL_ASSERT(aws_event_loop_thread_is_callers_thread(manager->client->loop)); struct aws_mqtt5_callback_set_entry *entry = s_new_callback_set_entry(manager, callback_set); aws_linked_list_push_front(&manager->callback_set_entries, &entry->node); return entry->id; } void aws_mqtt5_callback_set_manager_remove(struct aws_mqtt5_callback_set_manager *manager, uint64_t callback_set_id) { AWS_FATAL_ASSERT(aws_event_loop_thread_is_callers_thread(manager->client->loop)); struct aws_linked_list_node *node = aws_linked_list_begin(&manager->callback_set_entries); while (node != aws_linked_list_end(&manager->callback_set_entries)) { struct aws_mqtt5_callback_set_entry *entry = AWS_CONTAINER_OF(node, struct aws_mqtt5_callback_set_entry, node); node = aws_linked_list_next(node); if (entry->id == callback_set_id) { aws_linked_list_remove(&entry->node); AWS_LOGF_INFO( AWS_LS_MQTT5_GENERAL, "id=%p: callback manager removed entry id=%" PRIu64, (void *)manager->client, entry->id); aws_mem_release(entry->allocator, entry); return; } } AWS_LOGF_INFO( AWS_LS_MQTT5_GENERAL, "id=%p: callback manager failed to remove entry id=%" PRIu64 ", callback set id not found.", (void *)manager->client, callback_set_id); } void aws_mqtt5_callback_set_manager_on_publish_received( struct aws_mqtt5_callback_set_manager *manager, const struct aws_mqtt5_packet_publish_view *publish_view) { AWS_FATAL_ASSERT(aws_event_loop_thread_is_callers_thread(manager->client->loop)); struct aws_linked_list_node *node = aws_linked_list_begin(&manager->callback_set_entries); while (node != aws_linked_list_end(&manager->callback_set_entries)) { struct aws_mqtt5_callback_set_entry *entry = AWS_CONTAINER_OF(node, struct aws_mqtt5_callback_set_entry, node); node = aws_linked_list_next(node); struct aws_mqtt5_callback_set *callback_set = &entry->callbacks; if (callback_set->listener_publish_received_handler != NULL) { bool handled = (*callback_set->listener_publish_received_handler)( publish_view, callback_set->listener_publish_received_handler_user_data); if (handled) { return; } } } if (manager->client->config->publish_received_handler != NULL) { (*manager->client->config->publish_received_handler)( publish_view, manager->client->config->publish_received_handler_user_data); } } void aws_mqtt5_callback_set_manager_on_lifecycle_event( struct aws_mqtt5_callback_set_manager *manager, const struct aws_mqtt5_client_lifecycle_event *lifecycle_event) { AWS_FATAL_ASSERT(aws_event_loop_thread_is_callers_thread(manager->client->loop)); struct aws_linked_list_node *node = aws_linked_list_begin(&manager->callback_set_entries); while (node != aws_linked_list_end(&manager->callback_set_entries)) { struct aws_mqtt5_callback_set_entry *entry = AWS_CONTAINER_OF(node, struct aws_mqtt5_callback_set_entry, node); node = aws_linked_list_next(node); struct aws_mqtt5_callback_set *callback_set = &entry->callbacks; if (callback_set->lifecycle_event_handler != NULL) { struct aws_mqtt5_client_lifecycle_event listener_copy = *lifecycle_event; listener_copy.user_data = callback_set->lifecycle_event_handler_user_data; (*callback_set->lifecycle_event_handler)(&listener_copy); } } struct aws_mqtt5_client_lifecycle_event client_copy = *lifecycle_event; client_copy.user_data = manager->client->config->lifecycle_event_handler_user_data; if (manager->client->config->lifecycle_event_handler != NULL) { (*manager->client->config->lifecycle_event_handler)(&client_copy); } } aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/source/v5/mqtt5_client.c000066400000000000000000003733731456575232400246430ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef _MSC_VER # pragma warning(push) # pragma warning(disable : 4232) /* function pointer to dll symbol */ #endif #define AWS_MQTT5_IO_MESSAGE_DEFAULT_LENGTH 4096 #define AWS_MQTT5_DEFAULT_CONNACK_PACKET_TIMEOUT_MS 10000 #define DEFAULT_MQTT5_OPERATION_TABLE_SIZE 200 const char *aws_mqtt5_client_state_to_c_string(enum aws_mqtt5_client_state state) { switch (state) { case AWS_MCS_STOPPED: return "STOPPED"; case AWS_MCS_CONNECTING: return "CONNECTING"; case AWS_MCS_MQTT_CONNECT: return "MQTT_CONNECT"; case AWS_MCS_CONNECTED: return "CONNECTED"; case AWS_MCS_CLEAN_DISCONNECT: return "CLEAN_DISCONNECT"; case AWS_MCS_CHANNEL_SHUTDOWN: return "CHANNEL_SHUTDOWN"; case AWS_MCS_PENDING_RECONNECT: return "PENDING_RECONNECT"; case AWS_MCS_TERMINATED: return "TERMINATED"; default: return "UNKNOWN"; } } static bool s_aws_mqtt5_operation_is_retainable(struct aws_mqtt5_operation *operation) { switch (operation->packet_type) { case AWS_MQTT5_PT_PUBLISH: case AWS_MQTT5_PT_SUBSCRIBE: case AWS_MQTT5_PT_UNSUBSCRIBE: return true; default: return false; } } static void s_init_statistics(struct aws_mqtt5_client_operation_statistics_impl *stats) { aws_atomic_store_int(&stats->incomplete_operation_count_atomic, 0); aws_atomic_store_int(&stats->incomplete_operation_size_atomic, 0); aws_atomic_store_int(&stats->unacked_operation_count_atomic, 0); aws_atomic_store_int(&stats->unacked_operation_size_atomic, 0); } static bool s_aws_mqtt5_operation_satisfies_offline_queue_retention_policy( struct aws_mqtt5_operation *operation, enum aws_mqtt5_client_operation_queue_behavior_type queue_behavior) { switch (aws_mqtt5_client_operation_queue_behavior_type_to_non_default(queue_behavior)) { case AWS_MQTT5_COQBT_FAIL_ALL_ON_DISCONNECT: return false; case AWS_MQTT5_COQBT_FAIL_QOS0_PUBLISH_ON_DISCONNECT: if (!s_aws_mqtt5_operation_is_retainable(operation)) { return false; } if (operation->packet_type == AWS_MQTT5_PT_PUBLISH) { const struct aws_mqtt5_packet_publish_view *publish_view = operation->packet_view; if (publish_view->qos == AWS_MQTT5_QOS_AT_MOST_ONCE) { return false; } } return true; case AWS_MQTT5_COQBT_FAIL_NON_QOS1_PUBLISH_ON_DISCONNECT: if (!s_aws_mqtt5_operation_is_retainable(operation)) { return false; } if (operation->packet_type == AWS_MQTT5_PT_PUBLISH) { const struct aws_mqtt5_packet_publish_view *publish_view = operation->packet_view; if (publish_view->qos != AWS_MQTT5_QOS_AT_MOST_ONCE) { return true; } } return false; default: return false; } } typedef bool(mqtt5_operation_filter)(struct aws_mqtt5_operation *operation, void *filter_context); static void s_filter_operation_list( struct aws_linked_list *source_operations, mqtt5_operation_filter *filter_fn, struct aws_linked_list *filtered_operations, void *filter_context) { struct aws_linked_list_node *node = aws_linked_list_begin(source_operations); while (node != aws_linked_list_end(source_operations)) { struct aws_mqtt5_operation *operation = AWS_CONTAINER_OF(node, struct aws_mqtt5_operation, node); node = aws_linked_list_next(node); if (filter_fn(operation, filter_context)) { aws_linked_list_remove(&operation->node); aws_linked_list_push_back(filtered_operations, &operation->node); } } } typedef void(mqtt5_operation_applicator)(struct aws_mqtt5_operation *operation, void *applicator_context); static void s_apply_to_operation_list( struct aws_linked_list *operations, mqtt5_operation_applicator *applicator_fn, void *applicator_context) { struct aws_linked_list_node *node = aws_linked_list_begin(operations); while (node != aws_linked_list_end(operations)) { struct aws_mqtt5_operation *operation = AWS_CONTAINER_OF(node, struct aws_mqtt5_operation, node); node = aws_linked_list_next(node); applicator_fn(operation, applicator_context); } } static int s_aws_mqtt5_client_change_desired_state( struct aws_mqtt5_client *client, enum aws_mqtt5_client_state desired_state, struct aws_mqtt5_operation_disconnect *disconnect_operation); static uint64_t s_aws_mqtt5_client_compute_operational_state_service_time( const struct aws_mqtt5_client_operational_state *client_operational_state, uint64_t now); static int s_submit_operation(struct aws_mqtt5_client *client, struct aws_mqtt5_operation *operation); static void s_complete_operation( struct aws_mqtt5_client *client, struct aws_mqtt5_operation *operation, int error_code, enum aws_mqtt5_packet_type packet_type, const void *view) { if (client != NULL) { aws_mqtt5_client_statistics_change_operation_statistic_state(client, operation, AWS_MQTT5_OSS_NONE); if (aws_priority_queue_node_is_in_queue(&operation->priority_queue_node)) { struct aws_mqtt5_operation *queued_operation = NULL; aws_priority_queue_remove( &client->operational_state.operations_by_ack_timeout, &queued_operation, &operation->priority_queue_node); } } aws_mqtt5_operation_complete(operation, error_code, packet_type, view); aws_mqtt5_operation_release(operation); } static void s_complete_operation_list( struct aws_mqtt5_client *client, struct aws_linked_list *operation_list, int error_code) { struct aws_linked_list_node *node = aws_linked_list_begin(operation_list); while (node != aws_linked_list_end(operation_list)) { struct aws_mqtt5_operation *operation = AWS_CONTAINER_OF(node, struct aws_mqtt5_operation, node); node = aws_linked_list_next(node); s_complete_operation(client, operation, error_code, AWS_MQTT5_PT_NONE, NULL); } /* we've released everything, so reset the list to empty */ aws_linked_list_init(operation_list); } static void s_check_timeouts(struct aws_mqtt5_client *client, uint64_t now) { struct aws_priority_queue *timeout_queue = &client->operational_state.operations_by_ack_timeout; bool done = aws_priority_queue_size(timeout_queue) == 0; while (!done) { struct aws_mqtt5_operation **next_operation_by_timeout_ptr = NULL; aws_priority_queue_top(timeout_queue, (void **)&next_operation_by_timeout_ptr); AWS_FATAL_ASSERT(next_operation_by_timeout_ptr != NULL); struct aws_mqtt5_operation *next_operation_by_timeout = *next_operation_by_timeout_ptr; AWS_FATAL_ASSERT(next_operation_by_timeout != NULL); // If the top of the heap hasn't timed out than nothing has if (next_operation_by_timeout->ack_timeout_timepoint_ns > now) { break; } /* Ack timeout for this operation has been reached */ aws_priority_queue_pop(timeout_queue, &next_operation_by_timeout); aws_mqtt5_packet_id_t packet_id = aws_mqtt5_operation_get_packet_id(next_operation_by_timeout); AWS_LOGF_INFO( AWS_LS_MQTT5_CLIENT, "id=%p: %s packet with id:%d has timed out", (void *)client, aws_mqtt5_packet_type_to_c_string(next_operation_by_timeout->packet_type), (int)packet_id); struct aws_hash_element *elem = NULL; aws_hash_table_find(&client->operational_state.unacked_operations_table, &packet_id, &elem); if (elem == NULL || elem->value == NULL) { AWS_LOGF_ERROR( AWS_LS_MQTT5_CLIENT, "id=%p: timeout for unknown operation with id %d", (void *)client, (int)packet_id); return; } aws_linked_list_remove(&next_operation_by_timeout->node); aws_hash_table_remove(&client->operational_state.unacked_operations_table, &packet_id, NULL, NULL); s_complete_operation(client, next_operation_by_timeout, AWS_ERROR_MQTT_TIMEOUT, AWS_MQTT5_PT_NONE, NULL); done = aws_priority_queue_size(timeout_queue) == 0; } } static void s_mqtt5_client_final_destroy(struct aws_mqtt5_client *client) { if (client == NULL) { return; } aws_mqtt5_client_termination_completion_fn *client_termination_handler = NULL; void *client_termination_handler_user_data = NULL; if (client->config != NULL) { client_termination_handler = client->config->client_termination_handler; client_termination_handler_user_data = client->config->client_termination_handler_user_data; } aws_mqtt5_callback_set_manager_clean_up(&client->callback_manager); aws_mqtt5_client_operational_state_clean_up(&client->operational_state); aws_mqtt5_client_options_storage_destroy((struct aws_mqtt5_client_options_storage *)client->config); aws_mqtt5_negotiated_settings_clean_up(&client->negotiated_settings); aws_http_message_release(client->handshake); aws_mqtt5_encoder_clean_up(&client->encoder); aws_mqtt5_decoder_clean_up(&client->decoder); aws_mqtt5_inbound_topic_alias_resolver_clean_up(&client->inbound_topic_alias_resolver); aws_mqtt5_outbound_topic_alias_resolver_destroy(client->outbound_topic_alias_resolver); aws_mem_release(client->allocator, client); if (client_termination_handler != NULL) { (*client_termination_handler)(client_termination_handler_user_data); } } static void s_on_mqtt5_client_zero_ref_count(void *user_data) { struct aws_mqtt5_client *client = user_data; s_aws_mqtt5_client_change_desired_state(client, AWS_MCS_TERMINATED, NULL); } static void s_aws_mqtt5_client_emit_stopped_lifecycle_event(struct aws_mqtt5_client *client) { AWS_LOGF_INFO(AWS_LS_MQTT5_CLIENT, "id=%p: emitting stopped lifecycle event", (void *)client); struct aws_mqtt5_client_lifecycle_event event; AWS_ZERO_STRUCT(event); event.event_type = AWS_MQTT5_CLET_STOPPED; event.client = client; aws_mqtt5_callback_set_manager_on_lifecycle_event(&client->callback_manager, &event); } static void s_aws_mqtt5_client_emit_connecting_lifecycle_event(struct aws_mqtt5_client *client) { AWS_LOGF_INFO(AWS_LS_MQTT5_CLIENT, "id=%p: emitting connecting lifecycle event", (void *)client); client->lifecycle_state = AWS_MQTT5_LS_CONNECTING; struct aws_mqtt5_client_lifecycle_event event; AWS_ZERO_STRUCT(event); event.event_type = AWS_MQTT5_CLET_ATTEMPTING_CONNECT; event.client = client; aws_mqtt5_callback_set_manager_on_lifecycle_event(&client->callback_manager, &event); } static void s_aws_mqtt5_client_emit_connection_success_lifecycle_event( struct aws_mqtt5_client *client, const struct aws_mqtt5_packet_connack_view *connack_view) { AWS_LOGF_INFO(AWS_LS_MQTT5_CLIENT, "id=%p: emitting connection success lifecycle event", (void *)client); client->lifecycle_state = AWS_MQTT5_LS_CONNECTED; struct aws_mqtt5_client_lifecycle_event event; AWS_ZERO_STRUCT(event); event.event_type = AWS_MQTT5_CLET_CONNECTION_SUCCESS; event.client = client; event.settings = &client->negotiated_settings; event.connack_data = connack_view; aws_mqtt5_callback_set_manager_on_lifecycle_event(&client->callback_manager, &event); } /* * Emits either a CONNECTION_FAILED or DISCONNECT event based on the current life cycle state. Once a "final" * event is emitted by the client, it must attempt to reconnect before another one will be emitted, since the * lifecycle state check will early out until then. It is expected that this function may get called unnecessarily * often during various channel shutdown or disconnection/failure flows. This will not affect overall correctness. */ static void s_aws_mqtt5_client_emit_final_lifecycle_event( struct aws_mqtt5_client *client, int error_code, const struct aws_mqtt5_packet_connack_view *connack_view, const struct aws_mqtt5_packet_disconnect_view *disconnect_view) { if (client->lifecycle_state == AWS_MQTT5_LS_NONE) { /* we already emitted a final event earlier */ return; } struct aws_mqtt5_client_lifecycle_event event; AWS_ZERO_STRUCT(event); if (client->lifecycle_state == AWS_MQTT5_LS_CONNECTING) { AWS_FATAL_ASSERT(disconnect_view == NULL); event.event_type = AWS_MQTT5_CLET_CONNECTION_FAILURE; AWS_LOGF_INFO( AWS_LS_MQTT5_CLIENT, "id=%p: emitting connection failure lifecycle event with error code %d(%s)", (void *)client, error_code, aws_error_debug_str(error_code)); } else { AWS_FATAL_ASSERT(client->lifecycle_state == AWS_MQTT5_LS_CONNECTED); AWS_FATAL_ASSERT(connack_view == NULL); event.event_type = AWS_MQTT5_CLET_DISCONNECTION; AWS_LOGF_INFO( AWS_LS_MQTT5_CLIENT, "id=%p: emitting disconnection lifecycle event with error code %d(%s)", (void *)client, error_code, aws_error_debug_str(error_code)); } event.error_code = error_code; event.connack_data = connack_view; event.disconnect_data = disconnect_view; client->lifecycle_state = AWS_MQTT5_LS_NONE; aws_mqtt5_callback_set_manager_on_lifecycle_event(&client->callback_manager, &event); } /* * next_service_time == 0 means to not service the client, i.e. a state that only cares about external events * * This includes connecting and channel shutdown. Terminated is also included, but it's a state that only exists * instantaneously before final destruction. */ static uint64_t s_compute_next_service_time_client_stopped(struct aws_mqtt5_client *client, uint64_t now) { /* have we been told to connect or terminate? */ if (client->desired_state != AWS_MCS_STOPPED) { return now; } return 0; } static uint64_t s_compute_next_service_time_client_connecting(struct aws_mqtt5_client *client, uint64_t now) { (void)client; (void)now; return 0; } static uint64_t s_compute_next_service_time_client_mqtt_connect(struct aws_mqtt5_client *client, uint64_t now) { /* This state is interruptable by a stop/terminate */ if (client->desired_state != AWS_MCS_CONNECTED) { return now; } uint64_t operation_processing_time = s_aws_mqtt5_client_compute_operational_state_service_time(&client->operational_state, now); if (operation_processing_time == 0) { return client->next_mqtt_connect_packet_timeout_time; } return aws_min_u64(client->next_mqtt_connect_packet_timeout_time, operation_processing_time); } /* * Returns the minimum of two numbers, ignoring zero. Zero is returned only if both are zero. Useful when we're * computing (next service) timepoints and zero means "no timepoint" */ static uint64_t s_min_non_zero_u64(uint64_t a, uint64_t b) { if (a == 0) { return b; } if (b == 0) { return a; } return aws_min_u64(a, b); } /* * If there are unacked operations, returns the earliest point in time that one could timeout. */ static uint64_t s_get_unacked_operation_timeout_for_next_service_time(struct aws_mqtt5_client *client) { if (aws_priority_queue_size(&client->operational_state.operations_by_ack_timeout) > 0) { struct aws_mqtt5_operation **operation = NULL; aws_priority_queue_top(&client->operational_state.operations_by_ack_timeout, (void **)&operation); return (*operation)->ack_timeout_timepoint_ns; } return 0; } static uint64_t s_compute_next_service_time_client_connected(struct aws_mqtt5_client *client, uint64_t now) { /* ping and ping timeout */ uint64_t next_service_time = client->next_ping_time; if (client->next_ping_timeout_time != 0) { next_service_time = aws_min_u64(next_service_time, client->next_ping_timeout_time); } next_service_time = s_min_non_zero_u64(next_service_time, s_get_unacked_operation_timeout_for_next_service_time(client)); if (client->desired_state != AWS_MCS_CONNECTED) { next_service_time = now; } uint64_t operation_processing_time = s_aws_mqtt5_client_compute_operational_state_service_time(&client->operational_state, now); next_service_time = s_min_non_zero_u64(operation_processing_time, next_service_time); /* reset reconnect delay interval */ next_service_time = s_min_non_zero_u64(client->next_reconnect_delay_reset_time_ns, next_service_time); return next_service_time; } static uint64_t s_compute_next_service_time_client_clean_disconnect(struct aws_mqtt5_client *client, uint64_t now) { uint64_t ack_timeout_time = s_get_unacked_operation_timeout_for_next_service_time(client); uint64_t operation_processing_time = s_aws_mqtt5_client_compute_operational_state_service_time(&client->operational_state, now); return s_min_non_zero_u64(ack_timeout_time, operation_processing_time); } static uint64_t s_compute_next_service_time_client_channel_shutdown(struct aws_mqtt5_client *client, uint64_t now) { (void)client; (void)now; return 0; } static uint64_t s_compute_next_service_time_client_pending_reconnect(struct aws_mqtt5_client *client, uint64_t now) { if (client->desired_state != AWS_MCS_CONNECTED) { return now; } return client->next_reconnect_time_ns; } static uint64_t s_compute_next_service_time_client_terminated(struct aws_mqtt5_client *client, uint64_t now) { (void)client; (void)now; return 0; } static uint64_t s_compute_next_service_time_by_current_state(struct aws_mqtt5_client *client, uint64_t now) { switch (client->current_state) { case AWS_MCS_STOPPED: return s_compute_next_service_time_client_stopped(client, now); case AWS_MCS_CONNECTING: return s_compute_next_service_time_client_connecting(client, now); case AWS_MCS_MQTT_CONNECT: return s_compute_next_service_time_client_mqtt_connect(client, now); case AWS_MCS_CONNECTED: return s_compute_next_service_time_client_connected(client, now); case AWS_MCS_CLEAN_DISCONNECT: return s_compute_next_service_time_client_clean_disconnect(client, now); case AWS_MCS_CHANNEL_SHUTDOWN: return s_compute_next_service_time_client_channel_shutdown(client, now); case AWS_MCS_PENDING_RECONNECT: return s_compute_next_service_time_client_pending_reconnect(client, now); case AWS_MCS_TERMINATED: return s_compute_next_service_time_client_terminated(client, now); } return 0; } static void s_reevaluate_service_task(struct aws_mqtt5_client *client) { /* * This causes the client to only reevaluate service schedule time at the end of the service call or in * a callback from an external event. */ if (client->in_service) { return; } uint64_t now = (*client->vtable->get_current_time_fn)(); uint64_t next_service_time = s_compute_next_service_time_by_current_state(client, now); /* * This catches both the case when there's an existing service schedule and we either want to not * perform it (next_service_time == 0) or need to run service at a different time than the current scheduled time. */ if (next_service_time != client->next_service_task_run_time && client->next_service_task_run_time > 0) { aws_event_loop_cancel_task(client->loop, &client->service_task); client->next_service_task_run_time = 0; AWS_LOGF_TRACE(AWS_LS_MQTT5_CLIENT, "id=%p: cancelling previously scheduled service task", (void *)client); } if (next_service_time > 0 && (next_service_time < client->next_service_task_run_time || client->next_service_task_run_time == 0)) { aws_event_loop_schedule_task_future(client->loop, &client->service_task, next_service_time); AWS_LOGF_TRACE( AWS_LS_MQTT5_CLIENT, "id=%p: scheduled service task for time %" PRIu64, (void *)client, next_service_time); } client->next_service_task_run_time = next_service_time; } static void s_enqueue_operation_back(struct aws_mqtt5_client *client, struct aws_mqtt5_operation *operation) { AWS_LOGF_DEBUG( AWS_LS_MQTT5_CLIENT, "id=%p: enqueuing %s operation to back", (void *)client, aws_mqtt5_packet_type_to_c_string(operation->packet_type)); aws_linked_list_push_back(&client->operational_state.queued_operations, &operation->node); s_reevaluate_service_task(client); } static void s_enqueue_operation_front(struct aws_mqtt5_client *client, struct aws_mqtt5_operation *operation) { AWS_LOGF_DEBUG( AWS_LS_MQTT5_CLIENT, "id=%p: enqueuing %s operation to front", (void *)client, aws_mqtt5_packet_type_to_c_string(operation->packet_type)); aws_linked_list_push_front(&client->operational_state.queued_operations, &operation->node); s_reevaluate_service_task(client); } static void s_aws_mqtt5_client_operational_state_reset( struct aws_mqtt5_client_operational_state *client_operational_state, int completion_error_code, bool is_final) { struct aws_mqtt5_client *client = client_operational_state->client; s_complete_operation_list(client, &client_operational_state->queued_operations, completion_error_code); s_complete_operation_list(client, &client_operational_state->write_completion_operations, completion_error_code); s_complete_operation_list(client, &client_operational_state->unacked_operations, completion_error_code); if (is_final) { aws_priority_queue_clean_up(&client_operational_state->operations_by_ack_timeout); aws_hash_table_clean_up(&client_operational_state->unacked_operations_table); } else { aws_priority_queue_clear(&client->operational_state.operations_by_ack_timeout); aws_hash_table_clear(&client_operational_state->unacked_operations_table); } } static void s_change_current_state(struct aws_mqtt5_client *client, enum aws_mqtt5_client_state next_state); static void s_change_current_state_to_stopped(struct aws_mqtt5_client *client) { client->current_state = AWS_MCS_STOPPED; s_aws_mqtt5_client_operational_state_reset(&client->operational_state, AWS_ERROR_MQTT5_USER_REQUESTED_STOP, false); /* Stop works as a complete session wipe, and so the next time we connect, we want it to be clean */ client->has_connected_successfully = false; s_aws_mqtt5_client_emit_stopped_lifecycle_event(client); } static void s_aws_mqtt5_client_shutdown_channel(struct aws_mqtt5_client *client, int error_code) { if (error_code == AWS_ERROR_SUCCESS) { error_code = AWS_ERROR_UNKNOWN; } s_aws_mqtt5_client_emit_final_lifecycle_event(client, error_code, NULL, NULL); if (client->current_state != AWS_MCS_MQTT_CONNECT && client->current_state != AWS_MCS_CONNECTED && client->current_state != AWS_MCS_CLEAN_DISCONNECT) { AWS_LOGF_ERROR( AWS_LS_MQTT5_CLIENT, "id=%p: client channel shutdown invoked from unexpected state %d(%s)", (void *)client, (int)client->current_state, aws_mqtt5_client_state_to_c_string(client->current_state)); return; } if (client->slot == NULL || client->slot->channel == NULL) { AWS_LOGF_ERROR(AWS_LS_MQTT5_CLIENT, "id=%p: client channel shutdown invoked without a channel", (void *)client); return; } s_change_current_state(client, AWS_MCS_CHANNEL_SHUTDOWN); (*client->vtable->channel_shutdown_fn)(client->slot->channel, error_code); } static void s_aws_mqtt5_client_shutdown_channel_with_disconnect( struct aws_mqtt5_client *client, int error_code, struct aws_mqtt5_operation_disconnect *disconnect_op) { if (client->current_state != AWS_MCS_CONNECTED && client->current_state != AWS_MCS_MQTT_CONNECT) { s_aws_mqtt5_client_shutdown_channel(client, error_code); return; } aws_linked_list_push_front(&client->operational_state.queued_operations, &disconnect_op->base.node); aws_mqtt5_operation_disconnect_acquire(disconnect_op); client->clean_disconnect_error_code = error_code; s_change_current_state(client, AWS_MCS_CLEAN_DISCONNECT); } static void s_on_disconnect_operation_complete(int error_code, void *user_data) { struct aws_mqtt5_client *client = user_data; s_aws_mqtt5_client_shutdown_channel( client, (error_code != AWS_ERROR_SUCCESS) ? error_code : client->clean_disconnect_error_code); } static void s_aws_mqtt5_client_shutdown_channel_clean( struct aws_mqtt5_client *client, int error_code, enum aws_mqtt5_disconnect_reason_code reason_code) { struct aws_mqtt5_packet_disconnect_view disconnect_options = { .reason_code = reason_code, }; struct aws_mqtt5_disconnect_completion_options internal_completion_options = { .completion_callback = s_on_disconnect_operation_complete, .completion_user_data = client, }; struct aws_mqtt5_operation_disconnect *disconnect_op = aws_mqtt5_operation_disconnect_new(client->allocator, &disconnect_options, NULL, &internal_completion_options); if (disconnect_op == NULL) { s_aws_mqtt5_client_shutdown_channel(client, error_code); return; } s_aws_mqtt5_client_shutdown_channel_with_disconnect(client, error_code, disconnect_op); aws_mqtt5_operation_disconnect_release(disconnect_op); } static void s_mqtt5_client_shutdown_final(int error_code, struct aws_mqtt5_client *client) { AWS_FATAL_ASSERT(aws_event_loop_thread_is_callers_thread(client->loop)); s_aws_mqtt5_client_emit_final_lifecycle_event(client, error_code, NULL, NULL); AWS_LOGF_INFO( AWS_LS_MQTT5_CLIENT, "id=%p: channel tore down with error code %d(%s)", (void *)client, error_code, aws_error_debug_str(error_code)); if (client->slot) { aws_channel_slot_remove(client->slot); AWS_LOGF_TRACE(AWS_LS_MQTT5_CLIENT, "id=%p: slot removed successfully", (void *)client); client->slot = NULL; } aws_mqtt5_client_on_disconnection_update_operational_state(client); if (client->desired_state == AWS_MCS_CONNECTED) { s_change_current_state(client, AWS_MCS_PENDING_RECONNECT); } else { s_change_current_state(client, AWS_MCS_STOPPED); } } static void s_mqtt5_client_shutdown( struct aws_client_bootstrap *bootstrap, int error_code, struct aws_channel *channel, void *user_data) { (void)bootstrap; (void)channel; struct aws_mqtt5_client *client = user_data; if (error_code == AWS_ERROR_SUCCESS) { error_code = AWS_ERROR_MQTT_UNEXPECTED_HANGUP; } AWS_FATAL_ASSERT(aws_event_loop_thread_is_callers_thread(client->loop)); s_mqtt5_client_shutdown_final(error_code, client); } static void s_mqtt5_client_setup( struct aws_client_bootstrap *bootstrap, int error_code, struct aws_channel *channel, void *user_data) { (void)bootstrap; /* Setup callback contract is: if error_code is non-zero then channel is NULL. */ AWS_FATAL_ASSERT((error_code != 0) == (channel == NULL)); struct aws_mqtt5_client *client = user_data; if (error_code != AWS_OP_SUCCESS) { /* client shutdown already handles this case, so just call that. */ s_mqtt5_client_shutdown(bootstrap, error_code, channel, user_data); return; } AWS_FATAL_ASSERT(client->current_state == AWS_MCS_CONNECTING); AWS_FATAL_ASSERT(aws_event_loop_thread_is_callers_thread(client->loop)); if (client->desired_state != AWS_MCS_CONNECTED) { aws_raise_error(AWS_ERROR_MQTT5_USER_REQUESTED_STOP); goto error; } client->slot = aws_channel_slot_new(channel); /* allocs or crashes */ if (aws_channel_slot_insert_end(channel, client->slot)) { AWS_LOGF_ERROR( AWS_LS_MQTT5_CLIENT, "id=%p: Failed to insert slot into channel %p, error %d (%s).", (void *)client, (void *)channel, aws_last_error(), aws_error_name(aws_last_error())); goto error; } if (aws_channel_slot_set_handler(client->slot, &client->handler)) { AWS_LOGF_ERROR( AWS_LS_MQTT5_CLIENT, "id=%p: Failed to set MQTT handler into slot on channel %p, error %d (%s).", (void *)client, (void *)channel, aws_last_error(), aws_error_name(aws_last_error())); goto error; } s_change_current_state(client, AWS_MCS_MQTT_CONNECT); return; error: s_change_current_state(client, AWS_MCS_CHANNEL_SHUTDOWN); (*client->vtable->channel_shutdown_fn)(channel, aws_last_error()); } static void s_on_websocket_shutdown(struct aws_websocket *websocket, int error_code, void *user_data) { struct aws_mqtt5_client *client = user_data; struct aws_channel *channel = client->slot ? client->slot->channel : NULL; s_mqtt5_client_shutdown(client->config->bootstrap, error_code, channel, client); if (websocket) { aws_websocket_release(websocket); } } static void s_on_websocket_setup(const struct aws_websocket_on_connection_setup_data *setup, void *user_data) { struct aws_mqtt5_client *client = user_data; client->handshake = aws_http_message_release(client->handshake); /* Setup callback contract is: if error_code is non-zero then websocket is NULL. */ AWS_FATAL_ASSERT((setup->error_code != 0) == (setup->websocket == NULL)); struct aws_channel *channel = NULL; if (setup->websocket) { channel = aws_websocket_get_channel(setup->websocket); AWS_ASSERT(channel); /* Websocket must be "converted" before the MQTT handler can be installed next to it. */ if (aws_websocket_convert_to_midchannel_handler(setup->websocket)) { AWS_LOGF_ERROR( AWS_LS_MQTT5_CLIENT, "id=%p: Failed converting websocket, error %d (%s)", (void *)client, aws_last_error(), aws_error_name(aws_last_error())); (*client->vtable->channel_shutdown_fn)(channel, aws_last_error()); return; } } /* Call into the channel-setup callback, the rest of the logic is the same. */ s_mqtt5_client_setup(client->config->bootstrap, setup->error_code, channel, client); } struct aws_mqtt5_websocket_transform_complete_task { struct aws_task task; struct aws_allocator *allocator; struct aws_mqtt5_client *client; int error_code; struct aws_http_message *handshake; }; void s_websocket_transform_complete_task_fn(struct aws_task *task, void *arg, enum aws_task_status status) { (void)task; struct aws_mqtt5_websocket_transform_complete_task *websocket_transform_complete_task = arg; if (status != AWS_TASK_STATUS_RUN_READY) { goto done; } struct aws_mqtt5_client *client = websocket_transform_complete_task->client; aws_http_message_release(client->handshake); client->handshake = aws_http_message_acquire(websocket_transform_complete_task->handshake); int error_code = websocket_transform_complete_task->error_code; if (error_code == 0 && client->desired_state == AWS_MCS_CONNECTED) { struct aws_websocket_client_connection_options websocket_options = { .allocator = client->allocator, .bootstrap = client->config->bootstrap, .socket_options = &client->config->socket_options, .tls_options = client->config->tls_options_ptr, .host = aws_byte_cursor_from_string(client->config->host_name), .port = client->config->port, .handshake_request = websocket_transform_complete_task->handshake, .initial_window_size = 0, /* Prevent websocket data from arriving before the MQTT handler is installed */ .user_data = client, .on_connection_setup = s_on_websocket_setup, .on_connection_shutdown = s_on_websocket_shutdown, .requested_event_loop = client->loop, .host_resolution_config = &client->config->host_resolution_override}; if (client->config->http_proxy_config != NULL) { websocket_options.proxy_options = &client->config->http_proxy_options; } if (client->vtable->websocket_connect_fn(&websocket_options)) { AWS_LOGF_ERROR(AWS_LS_MQTT5_CLIENT, "id=%p: Failed to initiate websocket connection.", (void *)client); error_code = aws_last_error(); goto error; } goto done; } else { if (error_code == AWS_ERROR_SUCCESS) { AWS_ASSERT(client->desired_state != AWS_MCS_CONNECTED); error_code = AWS_ERROR_MQTT5_USER_REQUESTED_STOP; } } error:; struct aws_websocket_on_connection_setup_data websocket_setup = {.error_code = error_code}; s_on_websocket_setup(&websocket_setup, client); done: aws_http_message_release(websocket_transform_complete_task->handshake); aws_mqtt5_client_release(websocket_transform_complete_task->client); aws_mem_release(websocket_transform_complete_task->allocator, websocket_transform_complete_task); } static void s_websocket_handshake_transform_complete( struct aws_http_message *handshake_request, int error_code, void *complete_ctx) { struct aws_mqtt5_client *client = complete_ctx; struct aws_mqtt5_websocket_transform_complete_task *task = aws_mem_calloc(client->allocator, 1, sizeof(struct aws_mqtt5_websocket_transform_complete_task)); aws_task_init( &task->task, s_websocket_transform_complete_task_fn, (void *)task, "WebsocketHandshakeTransformComplete"); task->allocator = client->allocator; task->client = aws_mqtt5_client_acquire(client); task->error_code = error_code; task->handshake = handshake_request; aws_event_loop_schedule_task_now(client->loop, &task->task); } static int s_websocket_connect(struct aws_mqtt5_client *client) { AWS_ASSERT(client); AWS_ASSERT(client->config->websocket_handshake_transform); /* Build websocket handshake request */ struct aws_http_message *handshake = aws_http_message_new_websocket_handshake_request( client->allocator, *g_websocket_handshake_default_path, aws_byte_cursor_from_string(client->config->host_name)); if (handshake == NULL) { AWS_LOGF_ERROR(AWS_LS_MQTT5_CLIENT, "id=%p: Failed to generate websocket handshake request", (void *)client); return AWS_OP_ERR; } if (aws_http_message_add_header(handshake, *g_websocket_handshake_default_protocol_header)) { AWS_LOGF_ERROR( AWS_LS_MQTT5_CLIENT, "id=%p: Failed to add default header to websocket handshake request", (void *)client); goto on_error; } AWS_LOGF_TRACE(AWS_LS_MQTT5_CLIENT, "id=%p: Transforming websocket handshake request.", (void *)client); /* * There is no need to inc the client's ref count here since this state (AWS_MCS_CONNECTING) is uninterruptible by * the async destruction process. Only a completion of the chain of connection establishment callbacks can cause * this state to be left by the client. */ client->config->websocket_handshake_transform( handshake, client->config->websocket_handshake_transform_user_data, s_websocket_handshake_transform_complete, client); return AWS_OP_SUCCESS; on_error: aws_http_message_release(handshake); return AWS_OP_ERR; } static void s_change_current_state_to_connecting(struct aws_mqtt5_client *client) { AWS_ASSERT(client->current_state == AWS_MCS_STOPPED || client->current_state == AWS_MCS_PENDING_RECONNECT); client->current_state = AWS_MCS_CONNECTING; client->clean_disconnect_error_code = AWS_ERROR_SUCCESS; client->should_reset_connection = false; s_aws_mqtt5_client_emit_connecting_lifecycle_event(client); int result = 0; if (client->config->websocket_handshake_transform != NULL) { result = s_websocket_connect(client); } else { struct aws_socket_channel_bootstrap_options channel_options; AWS_ZERO_STRUCT(channel_options); channel_options.bootstrap = client->config->bootstrap; channel_options.host_name = aws_string_c_str(client->config->host_name); channel_options.port = client->config->port; channel_options.socket_options = &client->config->socket_options; channel_options.tls_options = client->config->tls_options_ptr; channel_options.setup_callback = &s_mqtt5_client_setup; channel_options.shutdown_callback = &s_mqtt5_client_shutdown; channel_options.user_data = client; channel_options.requested_event_loop = client->loop; channel_options.host_resolution_override_config = &client->config->host_resolution_override; if (client->config->http_proxy_config == NULL) { result = (*client->vtable->client_bootstrap_new_socket_channel_fn)(&channel_options); } else { result = (*client->vtable->http_proxy_new_socket_channel_fn)( &channel_options, &client->config->http_proxy_options); } } if (result) { int error_code = aws_last_error(); AWS_LOGF_INFO( AWS_LS_MQTT5_CLIENT, "id=%p: failed to kick off connection with error %d(%s)", (void *)client, error_code, aws_error_debug_str(error_code)); s_aws_mqtt5_client_emit_final_lifecycle_event(client, aws_last_error(), NULL, NULL); s_change_current_state(client, AWS_MCS_PENDING_RECONNECT); } } static int s_aws_mqtt5_client_set_current_operation( struct aws_mqtt5_client *client, struct aws_mqtt5_operation *operation) { if (aws_mqtt5_operation_bind_packet_id(operation, &client->operational_state)) { int error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_MQTT5_CLIENT, "id=%p: failed to bind mqtt packet id for current operation, with error %d(%s)", (void *)client, error_code, aws_error_debug_str(error_code)); return AWS_OP_ERR; } if (aws_mqtt5_encoder_append_packet_encoding(&client->encoder, operation->packet_type, operation->packet_view)) { int error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_MQTT5_CLIENT, "id=%p: failed to append packet encoding sequence for current operation with error %d(%s)", (void *)client, error_code, aws_error_debug_str(error_code)); return AWS_OP_ERR; } client->operational_state.current_operation = operation; return AWS_OP_SUCCESS; } static void s_reset_ping(struct aws_mqtt5_client *client) { uint64_t now = (*client->vtable->get_current_time_fn)(); uint16_t keep_alive_seconds = client->negotiated_settings.server_keep_alive; uint64_t keep_alive_interval_nanos = aws_timestamp_convert(keep_alive_seconds, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL); client->next_ping_time = aws_add_u64_saturating(now, keep_alive_interval_nanos); AWS_LOGF_DEBUG( AWS_LS_MQTT5_CLIENT, "id=%p: next PINGREQ scheduled for time %" PRIu64, (void *)client, client->next_ping_time); } static void s_aws_mqtt5_on_socket_write_completion_mqtt_connect(struct aws_mqtt5_client *client, int error_code) { if (error_code != AWS_ERROR_SUCCESS) { s_aws_mqtt5_client_shutdown_channel(client, error_code); return; } s_reevaluate_service_task(client); } static void s_aws_mqtt5_on_socket_write_completion_connected(struct aws_mqtt5_client *client, int error_code) { if (error_code != AWS_ERROR_SUCCESS) { s_aws_mqtt5_client_shutdown_channel(client, error_code); return; } s_reevaluate_service_task(client); } static void s_aws_mqtt5_on_socket_write_completion( struct aws_channel *channel, struct aws_io_message *message, int error_code, void *user_data) { (void)channel; (void)message; struct aws_mqtt5_client *client = user_data; client->operational_state.pending_write_completion = false; if (error_code != AWS_ERROR_SUCCESS) { AWS_LOGF_INFO( AWS_LS_MQTT5_CLIENT, "id=%p: socket write completion invoked with error %d(%s)", (void *)client, error_code, aws_error_debug_str(error_code)); } switch (client->current_state) { case AWS_MCS_MQTT_CONNECT: s_aws_mqtt5_on_socket_write_completion_mqtt_connect(client, error_code); break; case AWS_MCS_CONNECTED: s_aws_mqtt5_on_socket_write_completion_connected(client, error_code); break; case AWS_MCS_CLEAN_DISCONNECT: /* the CONNECTED callback works just fine for CLEAN_DISCONNECT */ s_aws_mqtt5_on_socket_write_completion_connected(client, error_code); break; default: break; } s_complete_operation_list(client, &client->operational_state.write_completion_operations, error_code); } static bool s_should_resume_session(const struct aws_mqtt5_client *client) { enum aws_mqtt5_client_session_behavior_type session_behavior = aws_mqtt5_client_session_behavior_type_to_non_default(client->config->session_behavior); return (session_behavior == AWS_MQTT5_CSBT_REJOIN_POST_SUCCESS && client->has_connected_successfully) || (session_behavior == AWS_MQTT5_CSBT_REJOIN_ALWAYS); } static void s_change_current_state_to_mqtt_connect(struct aws_mqtt5_client *client) { AWS_FATAL_ASSERT(client->current_state == AWS_MCS_CONNECTING); AWS_FATAL_ASSERT(client->operational_state.current_operation == NULL); client->current_state = AWS_MCS_MQTT_CONNECT; if (client->should_reset_connection) { s_aws_mqtt5_client_shutdown_channel(client, AWS_ERROR_MQTT_CONNECTION_RESET_FOR_ADAPTER_CONNECT); return; } client->operational_state.pending_write_completion = false; aws_mqtt5_encoder_reset(&client->encoder); aws_mqtt5_decoder_reset(&client->decoder); bool resume_session = s_should_resume_session(client); struct aws_mqtt5_packet_connect_view connect_view = client->config->connect->storage_view; connect_view.clean_start = !resume_session; if (aws_mqtt5_inbound_topic_alias_behavior_type_to_non_default( client->config->topic_aliasing_options.inbound_topic_alias_behavior) == AWS_MQTT5_CITABT_ENABLED) { connect_view.topic_alias_maximum = &client->config->topic_aliasing_options.inbound_alias_cache_size; } aws_mqtt5_negotiated_settings_reset(&client->negotiated_settings, &connect_view); connect_view.client_id = aws_byte_cursor_from_buf(&client->negotiated_settings.client_id_storage); struct aws_mqtt5_operation_connect *connect_op = aws_mqtt5_operation_connect_new(client->allocator, &connect_view); if (connect_op == NULL) { int error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_MQTT5_CLIENT, "id=%p: failed to create CONNECT operation with error %d(%s)", (void *)client, error_code, aws_error_debug_str(error_code)); s_aws_mqtt5_client_shutdown_channel(client, error_code); return; } s_enqueue_operation_front(client, &connect_op->base); uint32_t timeout_ms = client->config->connack_timeout_ms; if (timeout_ms == 0) { timeout_ms = AWS_MQTT5_DEFAULT_CONNACK_PACKET_TIMEOUT_MS; } uint64_t now = (*client->vtable->get_current_time_fn)(); client->next_mqtt_connect_packet_timeout_time = now + aws_timestamp_convert(timeout_ms, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL); AWS_LOGF_DEBUG( AWS_LS_MQTT5_CLIENT, "id=%p: setting CONNECT timeout to %" PRIu64, (void *)client, client->next_mqtt_connect_packet_timeout_time); } static void s_reset_reconnection_delay_time(struct aws_mqtt5_client *client) { uint64_t now = (*client->vtable->get_current_time_fn)(); uint64_t reset_reconnection_delay_time_nanos = aws_timestamp_convert( client->config->min_connected_time_to_reset_reconnect_delay_ms, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL); client->next_reconnect_delay_reset_time_ns = aws_add_u64_saturating(now, reset_reconnection_delay_time_nanos); AWS_LOGF_DEBUG( AWS_LS_MQTT5_CLIENT, "id=%p: reconnection delay reset time set to %" PRIu64, (void *)client, client->next_reconnect_delay_reset_time_ns); } static void s_change_current_state_to_connected(struct aws_mqtt5_client *client) { AWS_FATAL_ASSERT(client->current_state == AWS_MCS_MQTT_CONNECT); client->current_state = AWS_MCS_CONNECTED; aws_mqtt5_client_on_connection_update_operational_state(client); client->has_connected_successfully = true; client->next_ping_timeout_time = 0; s_reset_ping(client); s_reset_reconnection_delay_time(client); } static void s_change_current_state_to_clean_disconnect(struct aws_mqtt5_client *client) { (void)client; AWS_FATAL_ASSERT(client->current_state == AWS_MCS_MQTT_CONNECT || client->current_state == AWS_MCS_CONNECTED); client->current_state = AWS_MCS_CLEAN_DISCONNECT; } static void s_change_current_state_to_channel_shutdown(struct aws_mqtt5_client *client) { enum aws_mqtt5_client_state current_state = client->current_state; AWS_FATAL_ASSERT( current_state == AWS_MCS_MQTT_CONNECT || current_state == AWS_MCS_CONNECTING || current_state == AWS_MCS_CONNECTED || current_state == AWS_MCS_CLEAN_DISCONNECT); client->current_state = AWS_MCS_CHANNEL_SHUTDOWN; /* * Critical requirement: The caller must invoke the channel shutdown function themselves (with the desired error * code) *after* changing state. * * The caller is the only one with the error context and we want to be safe and avoid the possibility of a * synchronous channel shutdown (mocks) leading to a situation where we get the shutdown callback before we've * transitioned into the CHANNEL_SHUTDOWN state. * * We could relax this if a synchronous channel shutdown is literally impossible even with mocked channels. */ } /* TODO: refactor and reunify with internals of retry strategy to expose these as usable functions in aws-c-io */ static uint64_t s_aws_mqtt5_compute_reconnect_backoff_no_jitter(struct aws_mqtt5_client *client) { uint64_t retry_count = aws_min_u64(client->reconnect_count, 63); return aws_mul_u64_saturating((uint64_t)1 << retry_count, client->config->min_reconnect_delay_ms); } static uint64_t s_aws_mqtt5_compute_reconnect_backoff_full_jitter(struct aws_mqtt5_client *client) { uint64_t non_jittered = s_aws_mqtt5_compute_reconnect_backoff_no_jitter(client); return aws_mqtt5_client_random_in_range(0, non_jittered); } static uint64_t s_compute_deccorelated_jitter(struct aws_mqtt5_client *client) { uint64_t last_backoff_val = client->current_reconnect_delay_ms; if (!last_backoff_val) { return s_aws_mqtt5_compute_reconnect_backoff_full_jitter(client); } return aws_mqtt5_client_random_in_range( client->config->min_reconnect_delay_ms, aws_mul_u64_saturating(last_backoff_val, 3)); } static void s_update_reconnect_delay_for_pending_reconnect(struct aws_mqtt5_client *client) { uint64_t delay_ms = 0; switch (client->config->retry_jitter_mode) { case AWS_EXPONENTIAL_BACKOFF_JITTER_DECORRELATED: delay_ms = s_compute_deccorelated_jitter(client); break; case AWS_EXPONENTIAL_BACKOFF_JITTER_NONE: delay_ms = s_aws_mqtt5_compute_reconnect_backoff_no_jitter(client); break; case AWS_EXPONENTIAL_BACKOFF_JITTER_FULL: case AWS_EXPONENTIAL_BACKOFF_JITTER_DEFAULT: default: delay_ms = s_aws_mqtt5_compute_reconnect_backoff_full_jitter(client); break; } delay_ms = aws_min_u64(delay_ms, client->config->max_reconnect_delay_ms); uint64_t now = (*client->vtable->get_current_time_fn)(); client->next_reconnect_time_ns = aws_add_u64_saturating(now, aws_timestamp_convert(delay_ms, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL)); AWS_LOGF_DEBUG( AWS_LS_MQTT5_CLIENT, "id=%p: next connection attempt in %" PRIu64 " milliseconds", (void *)client, delay_ms); client->reconnect_count++; } static void s_change_current_state_to_pending_reconnect(struct aws_mqtt5_client *client) { client->current_state = AWS_MCS_PENDING_RECONNECT; s_update_reconnect_delay_for_pending_reconnect(client); } static void s_change_current_state_to_terminated(struct aws_mqtt5_client *client) { client->current_state = AWS_MCS_TERMINATED; s_mqtt5_client_final_destroy(client); } static void s_change_current_state(struct aws_mqtt5_client *client, enum aws_mqtt5_client_state next_state) { AWS_ASSERT(next_state != client->current_state); if (next_state == client->current_state) { return; } AWS_LOGF_DEBUG( AWS_LS_MQTT5_CLIENT, "id=%p: switching current state from %s to %s", (void *)client, aws_mqtt5_client_state_to_c_string(client->current_state), aws_mqtt5_client_state_to_c_string(next_state)); if (client->vtable->on_client_state_change_callback_fn != NULL) { (*client->vtable->on_client_state_change_callback_fn)( client, client->current_state, next_state, client->vtable->vtable_user_data); } switch (next_state) { case AWS_MCS_STOPPED: s_change_current_state_to_stopped(client); break; case AWS_MCS_CONNECTING: s_change_current_state_to_connecting(client); break; case AWS_MCS_MQTT_CONNECT: s_change_current_state_to_mqtt_connect(client); break; case AWS_MCS_CONNECTED: s_change_current_state_to_connected(client); break; case AWS_MCS_CLEAN_DISCONNECT: s_change_current_state_to_clean_disconnect(client); break; case AWS_MCS_CHANNEL_SHUTDOWN: s_change_current_state_to_channel_shutdown(client); break; case AWS_MCS_PENDING_RECONNECT: s_change_current_state_to_pending_reconnect(client); break; case AWS_MCS_TERMINATED: s_change_current_state_to_terminated(client); return; } s_reevaluate_service_task(client); } static bool s_service_state_stopped(struct aws_mqtt5_client *client) { enum aws_mqtt5_client_state desired_state = client->desired_state; if (desired_state == AWS_MCS_CONNECTED) { s_change_current_state(client, AWS_MCS_CONNECTING); } else if (desired_state == AWS_MCS_TERMINATED) { s_change_current_state(client, AWS_MCS_TERMINATED); return true; } return false; } static void s_service_state_connecting(struct aws_mqtt5_client *client) { (void)client; } static void s_service_state_mqtt_connect(struct aws_mqtt5_client *client, uint64_t now) { enum aws_mqtt5_client_state desired_state = client->desired_state; if (desired_state != AWS_MCS_CONNECTED) { s_aws_mqtt5_client_emit_final_lifecycle_event(client, AWS_ERROR_MQTT5_USER_REQUESTED_STOP, NULL, NULL); s_aws_mqtt5_client_shutdown_channel(client, AWS_ERROR_MQTT5_USER_REQUESTED_STOP); return; } if (now >= client->next_mqtt_connect_packet_timeout_time) { s_aws_mqtt5_client_emit_final_lifecycle_event(client, AWS_ERROR_MQTT5_CONNACK_TIMEOUT, NULL, NULL); AWS_LOGF_INFO(AWS_LS_MQTT5_CLIENT, "id=%p: shutting down channel due to CONNACK timeout", (void *)client); s_aws_mqtt5_client_shutdown_channel(client, AWS_ERROR_MQTT5_CONNACK_TIMEOUT); return; } if (aws_mqtt5_client_service_operational_state(&client->operational_state)) { int error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_MQTT5_CLIENT, "id=%p: failed to service outgoing CONNECT packet to channel with error %d(%s)", (void *)client, error_code, aws_error_debug_str(error_code)); s_aws_mqtt5_client_shutdown_channel(client, error_code); return; } } static int s_aws_mqtt5_client_queue_ping(struct aws_mqtt5_client *client) { s_reset_ping(client); AWS_LOGF_DEBUG(AWS_LS_MQTT5_CLIENT, "id=%p: queuing PINGREQ", (void *)client); struct aws_mqtt5_operation_pingreq *pingreq_op = aws_mqtt5_operation_pingreq_new(client->allocator); s_enqueue_operation_front(client, &pingreq_op->base); return AWS_OP_SUCCESS; } static void s_service_state_connected(struct aws_mqtt5_client *client, uint64_t now) { enum aws_mqtt5_client_state desired_state = client->desired_state; if (desired_state != AWS_MCS_CONNECTED) { s_aws_mqtt5_client_emit_final_lifecycle_event(client, AWS_ERROR_MQTT5_USER_REQUESTED_STOP, NULL, NULL); AWS_LOGF_INFO(AWS_LS_MQTT5_CLIENT, "id=%p: channel shutdown due to user Stop request", (void *)client); s_aws_mqtt5_client_shutdown_channel(client, AWS_ERROR_MQTT5_USER_REQUESTED_STOP); return; } if (now >= client->next_ping_timeout_time && client->next_ping_timeout_time != 0) { s_aws_mqtt5_client_emit_final_lifecycle_event(client, AWS_ERROR_MQTT5_PING_RESPONSE_TIMEOUT, NULL, NULL); AWS_LOGF_INFO(AWS_LS_MQTT5_CLIENT, "id=%p: channel shutdown due to PINGRESP timeout", (void *)client); s_aws_mqtt5_client_shutdown_channel_clean( client, AWS_ERROR_MQTT5_PING_RESPONSE_TIMEOUT, AWS_MQTT5_DRC_KEEP_ALIVE_TIMEOUT); return; } if (now >= client->next_ping_time) { if (s_aws_mqtt5_client_queue_ping(client)) { int error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_MQTT5_CLIENT, "id=%p: failed to queue PINGREQ with error %d(%s)", (void *)client, error_code, aws_error_debug_str(error_code)); s_aws_mqtt5_client_shutdown_channel(client, error_code); return; } } if (now >= client->next_reconnect_delay_reset_time_ns && client->next_reconnect_delay_reset_time_ns != 0) { AWS_LOGF_DEBUG( AWS_LS_MQTT5_CLIENT, "id=%p: connected sufficiently long that reconnect backoff delay has been reset back to " "minimum value", (void *)client); client->reconnect_count = 0; client->current_reconnect_delay_ms = 0; client->next_reconnect_delay_reset_time_ns = 0; } s_check_timeouts(client, now); if (aws_mqtt5_client_service_operational_state(&client->operational_state)) { int error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_MQTT5_CLIENT, "id=%p: failed to service CONNECTED operation queue with error %d(%s)", (void *)client, error_code, aws_error_debug_str(error_code)); s_aws_mqtt5_client_shutdown_channel(client, error_code); return; } } static void s_service_state_clean_disconnect(struct aws_mqtt5_client *client, uint64_t now) { if (aws_mqtt5_client_service_operational_state(&client->operational_state)) { int error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_MQTT5_CLIENT, "id=%p: failed to service CLEAN_DISCONNECT operation queue with error %d(%s)", (void *)client, error_code, aws_error_debug_str(error_code)); s_aws_mqtt5_client_shutdown_channel(client, error_code); return; } s_check_timeouts(client, now); } static void s_service_state_channel_shutdown(struct aws_mqtt5_client *client) { (void)client; } static void s_service_state_pending_reconnect(struct aws_mqtt5_client *client, uint64_t now) { if (client->desired_state != AWS_MCS_CONNECTED) { s_change_current_state(client, AWS_MCS_STOPPED); return; } if (now >= client->next_reconnect_time_ns) { s_change_current_state(client, AWS_MCS_CONNECTING); return; } } static void s_mqtt5_service_task_fn(struct aws_task *task, void *arg, enum aws_task_status status) { (void)task; if (status != AWS_TASK_STATUS_RUN_READY) { return; } struct aws_mqtt5_client *client = arg; client->next_service_task_run_time = 0; client->in_service = true; uint64_t now = (*client->vtable->get_current_time_fn)(); bool terminated = false; switch (client->current_state) { case AWS_MCS_STOPPED: terminated = s_service_state_stopped(client); break; case AWS_MCS_CONNECTING: s_service_state_connecting(client); break; case AWS_MCS_MQTT_CONNECT: s_service_state_mqtt_connect(client, now); break; case AWS_MCS_CONNECTED: s_service_state_connected(client, now); break; case AWS_MCS_CLEAN_DISCONNECT: s_service_state_clean_disconnect(client, now); break; case AWS_MCS_CHANNEL_SHUTDOWN: s_service_state_channel_shutdown(client); break; case AWS_MCS_PENDING_RECONNECT: s_service_state_pending_reconnect(client, now); break; default: break; } /* * We can only enter the terminated state from stopped. If we do so, the client memory is now freed and we * will crash if we access anything anymore. */ if (terminated) { return; } /* we're not scheduled anymore, reschedule as needed */ client->in_service = false; s_reevaluate_service_task(client); } static bool s_should_client_disconnect_cleanly(struct aws_mqtt5_client *client) { enum aws_mqtt5_client_state current_state = client->current_state; return current_state == AWS_MCS_CONNECTED; } static int s_process_read_message( struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_io_message *message) { struct aws_mqtt5_client *client = handler->impl; if (message->message_type != AWS_IO_MESSAGE_APPLICATION_DATA) { AWS_LOGF_ERROR(AWS_LS_MQTT5_CLIENT, "id=%p: unexpected io message data", (void *)client); return aws_raise_error(AWS_ERROR_INVALID_STATE); } AWS_LOGF_TRACE( AWS_LS_MQTT5_CLIENT, "id=%p: processing read message of size %zu", (void *)client, message->message_data.len); struct aws_byte_cursor message_cursor = aws_byte_cursor_from_buf(&message->message_data); int result = aws_mqtt5_decoder_on_data_received(&client->decoder, message_cursor); if (result != AWS_OP_SUCCESS) { int error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_MQTT5_CLIENT, "id=%p: decode failure with error %d(%s)", (void *)client, error_code, aws_error_debug_str(error_code)); if (error_code == AWS_ERROR_MQTT5_DECODE_PROTOCOL_ERROR && s_should_client_disconnect_cleanly(client)) { s_aws_mqtt5_client_shutdown_channel_clean(client, error_code, AWS_MQTT5_DRC_PROTOCOL_ERROR); } else { s_aws_mqtt5_client_shutdown_channel(client, error_code); } goto done; } aws_channel_slot_increment_read_window(slot, message->message_data.len); done: aws_mem_release(message->allocator, message); return AWS_OP_SUCCESS; } static int s_shutdown( struct aws_channel_handler *handler, struct aws_channel_slot *slot, enum aws_channel_direction dir, int error_code, bool free_scarce_resources_immediately) { (void)handler; return aws_channel_slot_on_handler_shutdown_complete(slot, dir, error_code, free_scarce_resources_immediately); } static size_t s_initial_window_size(struct aws_channel_handler *handler) { (void)handler; return SIZE_MAX; } static void s_destroy(struct aws_channel_handler *handler) { (void)handler; } static size_t s_message_overhead(struct aws_channel_handler *handler) { (void)handler; return 0; } static struct aws_channel_handler_vtable s_mqtt5_channel_handler_vtable = { .process_read_message = &s_process_read_message, .process_write_message = NULL, .increment_read_window = NULL, .shutdown = &s_shutdown, .initial_window_size = &s_initial_window_size, .message_overhead = &s_message_overhead, .destroy = &s_destroy, }; static bool s_aws_is_successful_reason_code(int value) { return value < 128; } static void s_aws_mqtt5_client_on_connack( struct aws_mqtt5_client *client, struct aws_mqtt5_packet_connack_view *connack_view) { AWS_FATAL_ASSERT(client->current_state == AWS_MCS_MQTT_CONNECT); bool is_successful = s_aws_is_successful_reason_code((int)connack_view->reason_code); if (!is_successful) { s_aws_mqtt5_client_emit_final_lifecycle_event( client, AWS_ERROR_MQTT5_CONNACK_CONNECTION_REFUSED, connack_view, NULL); enum aws_mqtt5_connect_reason_code reason_code = connack_view->reason_code; AWS_LOGF_INFO( AWS_LS_MQTT5_CLIENT, "id=%p: connection refused (via failed CONNACK) by remote host with reason code %d(%s)", (void *)client, (int)reason_code, aws_mqtt5_connect_reason_code_to_c_string(reason_code)); s_aws_mqtt5_client_shutdown_channel(client, AWS_ERROR_MQTT5_CONNACK_CONNECTION_REFUSED); return; } aws_mqtt5_negotiated_settings_apply_connack(&client->negotiated_settings, connack_view); /* Check if a session is being rejoined and perform associated rejoin connect logic here */ if (client->negotiated_settings.rejoined_session) { /* Disconnect if the server is attempting to connect the client to an unexpected session */ if (!s_should_resume_session(client)) { s_aws_mqtt5_client_emit_final_lifecycle_event( client, AWS_ERROR_MQTT_CANCELLED_FOR_CLEAN_SESSION, connack_view, NULL); s_aws_mqtt5_client_shutdown_channel(client, AWS_ERROR_MQTT_CANCELLED_FOR_CLEAN_SESSION); return; } else if (!client->has_connected_successfully) { /* * We were configured with REJOIN_ALWAYS and this is the first connection. This is technically not safe * and so let's log a warning for future diagnostics should it cause the user problems. */ AWS_LOGF_WARN( AWS_LS_MQTT5_CLIENT, "id=%p: initial connection rejoined existing session. This may cause packet id collisions.", (void *)client); } } s_change_current_state(client, AWS_MCS_CONNECTED); s_aws_mqtt5_client_emit_connection_success_lifecycle_event(client, connack_view); } static void s_aws_mqtt5_client_log_received_packet( struct aws_mqtt5_client *client, enum aws_mqtt5_packet_type type, void *packet_view) { AWS_LOGF_DEBUG( AWS_LS_MQTT5_CLIENT, "id=%p: Received %s packet", (void *)client, aws_mqtt5_packet_type_to_c_string(type)); switch (type) { case AWS_MQTT5_PT_CONNACK: aws_mqtt5_packet_connack_view_log(packet_view, AWS_LL_DEBUG); break; case AWS_MQTT5_PT_PUBLISH: aws_mqtt5_packet_publish_view_log(packet_view, AWS_LL_DEBUG); break; case AWS_MQTT5_PT_PUBACK: aws_mqtt5_packet_puback_view_log(packet_view, AWS_LL_DEBUG); break; case AWS_MQTT5_PT_SUBACK: aws_mqtt5_packet_suback_view_log(packet_view, AWS_LL_DEBUG); break; case AWS_MQTT5_PT_UNSUBACK: aws_mqtt5_packet_unsuback_view_log(packet_view, AWS_LL_DEBUG); break; case AWS_MQTT5_PT_PINGRESP: break; /* nothing to log */ case AWS_MQTT5_PT_DISCONNECT: aws_mqtt5_packet_disconnect_view_log(packet_view, AWS_LL_DEBUG); break; default: break; } } static void s_aws_mqtt5_client_mqtt_connect_on_packet_received( struct aws_mqtt5_client *client, enum aws_mqtt5_packet_type type, void *packet_view) { if (type == AWS_MQTT5_PT_CONNACK) { s_aws_mqtt5_client_on_connack(client, (struct aws_mqtt5_packet_connack_view *)packet_view); } else { AWS_LOGF_ERROR( AWS_LS_MQTT5_CLIENT, "id=%p: Invalid packet type received while in MQTT_CONNECT state", (void *)client); s_aws_mqtt5_client_shutdown_channel_clean( client, AWS_ERROR_MQTT5_DECODE_PROTOCOL_ERROR, AWS_MQTT5_DRC_PROTOCOL_ERROR); } } typedef bool(aws_linked_list_node_predicate_fn)(struct aws_linked_list_node *); /* * This predicate finds the first (if any) operation in the queue that is not a PUBACK or a PINGREQ. */ static bool s_is_ping_or_puback(struct aws_linked_list_node *operation_node) { struct aws_mqtt5_operation *operation = AWS_CONTAINER_OF(operation_node, struct aws_mqtt5_operation, node); return operation->packet_type == AWS_MQTT5_PT_PUBACK || operation->packet_type == AWS_MQTT5_PT_PINGREQ; } /* * Helper function to insert a node (operation) into a list (operation queue) in the correct spot. Currently, this * is only used to enqueue PUBACKs after existing PUBACKs and PINGREQs. This ensure that PUBACKs go out in the order * the corresponding PUBLISH was received, regardless of whether or not there was an intervening service call. */ static void s_insert_node_before_predicate_failure( struct aws_linked_list *list, struct aws_linked_list_node *node, aws_linked_list_node_predicate_fn predicate) { struct aws_linked_list_node *current_node = NULL; for (current_node = aws_linked_list_begin(list); current_node != aws_linked_list_end(list); current_node = aws_linked_list_next(current_node)) { if (!predicate(current_node)) { break; } } AWS_FATAL_ASSERT(current_node != NULL); aws_linked_list_insert_before(current_node, node); } static int s_aws_mqtt5_client_queue_puback(struct aws_mqtt5_client *client, uint16_t packet_id) { AWS_PRECONDITION(client != NULL); const struct aws_mqtt5_packet_puback_view puback_view = { .packet_id = packet_id, .reason_code = AWS_MQTT5_PARC_SUCCESS, }; struct aws_mqtt5_operation_puback *puback_op = aws_mqtt5_operation_puback_new(client->allocator, &puback_view); if (puback_op == NULL) { return AWS_OP_ERR; } AWS_LOGF_DEBUG( AWS_LS_MQTT5_CLIENT, "id=%p: enqueuing PUBACK operation to first position in queue that is not a PUBACK or PINGREQ", (void *)client); /* * Put the PUBACK ahead of all user-submitted operations (PUBLISH, SUBSCRIBE, UNSUBSCRIBE, DISCONNECT), but behind * all pre-existing "internal" operations (PINGREQ, PUBACK). * * Qos 2 support will need to extend the predicate to include Qos 2 publish packets. */ s_insert_node_before_predicate_failure( &client->operational_state.queued_operations, &puback_op->base.node, s_is_ping_or_puback); s_reevaluate_service_task(client); return AWS_OP_SUCCESS; } static void s_aws_mqtt5_client_connected_on_packet_received( struct aws_mqtt5_client *client, enum aws_mqtt5_packet_type type, void *packet_view) { switch (type) { case AWS_MQTT5_PT_PINGRESP: AWS_LOGF_DEBUG(AWS_LS_MQTT5_CLIENT, "id=%p: resetting PINGREQ timer", (void *)client); client->next_ping_timeout_time = 0; break; case AWS_MQTT5_PT_DISCONNECT: s_aws_mqtt5_client_emit_final_lifecycle_event( client, AWS_ERROR_MQTT5_DISCONNECT_RECEIVED, NULL, packet_view); AWS_LOGF_INFO(AWS_LS_MQTT5_CLIENT, "id=%p: shutting down channel due to DISCONNECT", (void *)client); s_aws_mqtt5_client_shutdown_channel(client, AWS_ERROR_MQTT5_DISCONNECT_RECEIVED); break; case AWS_MQTT5_PT_SUBACK: { uint16_t packet_id = ((const struct aws_mqtt5_packet_suback_view *)packet_view)->packet_id; aws_mqtt5_client_operational_state_handle_ack( &client->operational_state, packet_id, AWS_MQTT5_PT_SUBACK, packet_view, AWS_ERROR_SUCCESS); break; } case AWS_MQTT5_PT_UNSUBACK: { uint16_t packet_id = ((const struct aws_mqtt5_packet_unsuback_view *)packet_view)->packet_id; aws_mqtt5_client_operational_state_handle_ack( &client->operational_state, packet_id, AWS_MQTT5_PT_UNSUBACK, packet_view, AWS_ERROR_SUCCESS); break; } case AWS_MQTT5_PT_PUBLISH: { const struct aws_mqtt5_packet_publish_view *publish_view = packet_view; aws_mqtt5_callback_set_manager_on_publish_received(&client->callback_manager, publish_view); /* Send a puback if QoS 1+ */ if (publish_view->qos != AWS_MQTT5_QOS_AT_MOST_ONCE) { int result = s_aws_mqtt5_client_queue_puback(client, publish_view->packet_id); if (result != AWS_OP_SUCCESS) { int error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_MQTT5_CLIENT, "id=%p: decode failure with error %d(%s)", (void *)client, error_code, aws_error_debug_str(error_code)); s_aws_mqtt5_client_shutdown_channel(client, error_code); } } break; } case AWS_MQTT5_PT_PUBACK: { uint16_t packet_id = ((const struct aws_mqtt5_packet_puback_view *)packet_view)->packet_id; aws_mqtt5_client_operational_state_handle_ack( &client->operational_state, packet_id, AWS_MQTT5_PT_PUBACK, packet_view, AWS_ERROR_SUCCESS); break; } default: break; } } static int s_aws_mqtt5_client_on_packet_received( enum aws_mqtt5_packet_type type, void *packet_view, void *decoder_callback_user_data) { struct aws_mqtt5_client *client = decoder_callback_user_data; s_aws_mqtt5_client_log_received_packet(client, type, packet_view); switch (client->current_state) { case AWS_MCS_MQTT_CONNECT: s_aws_mqtt5_client_mqtt_connect_on_packet_received(client, type, packet_view); break; case AWS_MCS_CONNECTED: case AWS_MCS_CLEAN_DISCONNECT: s_aws_mqtt5_client_connected_on_packet_received(client, type, packet_view); break; default: break; } s_reevaluate_service_task(client); return AWS_OP_SUCCESS; } static uint64_t s_aws_high_res_clock_get_ticks_proxy(void) { uint64_t current_time = 0; AWS_FATAL_ASSERT(aws_high_res_clock_get_ticks(¤t_time) == AWS_OP_SUCCESS); return current_time; } struct aws_io_message *s_aws_channel_acquire_message_from_pool_default( struct aws_channel *channel, enum aws_io_message_type message_type, size_t size_hint, void *user_data) { (void)user_data; return aws_channel_acquire_message_from_pool(channel, message_type, size_hint); } static int s_aws_channel_slot_send_message_default( struct aws_channel_slot *slot, struct aws_io_message *message, enum aws_channel_direction dir, void *user_data) { (void)user_data; return aws_channel_slot_send_message(slot, message, dir); } static struct aws_mqtt5_client_vtable s_default_client_vtable = { .get_current_time_fn = s_aws_high_res_clock_get_ticks_proxy, .channel_shutdown_fn = aws_channel_shutdown, .websocket_connect_fn = aws_websocket_client_connect, .client_bootstrap_new_socket_channel_fn = aws_client_bootstrap_new_socket_channel, .http_proxy_new_socket_channel_fn = aws_http_proxy_new_socket_channel, .on_client_state_change_callback_fn = NULL, .aws_channel_acquire_message_from_pool_fn = s_aws_channel_acquire_message_from_pool_default, .aws_channel_slot_send_message_fn = s_aws_channel_slot_send_message_default, .vtable_user_data = NULL, }; void aws_mqtt5_client_set_vtable(struct aws_mqtt5_client *client, const struct aws_mqtt5_client_vtable *vtable) { client->vtable = vtable; } const struct aws_mqtt5_client_vtable *aws_mqtt5_client_get_default_vtable(void) { return &s_default_client_vtable; } struct aws_mqtt5_client *aws_mqtt5_client_new( struct aws_allocator *allocator, const struct aws_mqtt5_client_options *options) { AWS_FATAL_ASSERT(allocator != NULL); AWS_FATAL_ASSERT(options != NULL); struct aws_mqtt5_client *client = aws_mem_calloc(allocator, 1, sizeof(struct aws_mqtt5_client)); if (client == NULL) { return NULL; } aws_task_init(&client->service_task, s_mqtt5_service_task_fn, client, "Mqtt5Service"); client->allocator = allocator; client->vtable = &s_default_client_vtable; aws_ref_count_init(&client->ref_count, client, s_on_mqtt5_client_zero_ref_count); aws_mqtt5_callback_set_manager_init(&client->callback_manager, client); if (aws_mqtt5_client_operational_state_init(&client->operational_state, allocator, client)) { goto on_error; } client->config = aws_mqtt5_client_options_storage_new(allocator, options); if (client->config == NULL) { goto on_error; } aws_mqtt5_client_flow_control_state_init(client); /* all client activity will take place on this event loop, serializing things like reconnect, ping, etc... */ client->loop = aws_event_loop_group_get_next_loop(client->config->bootstrap->event_loop_group); if (client->loop == NULL) { goto on_error; } client->desired_state = AWS_MCS_STOPPED; client->current_state = AWS_MCS_STOPPED; client->lifecycle_state = AWS_MQTT5_LS_NONE; struct aws_mqtt5_decoder_options decoder_options = { .callback_user_data = client, .on_packet_received = s_aws_mqtt5_client_on_packet_received, }; if (aws_mqtt5_decoder_init(&client->decoder, allocator, &decoder_options)) { goto on_error; } struct aws_mqtt5_encoder_options encoder_options = { .client = client, }; if (aws_mqtt5_encoder_init(&client->encoder, allocator, &encoder_options)) { goto on_error; } if (aws_mqtt5_inbound_topic_alias_resolver_init(&client->inbound_topic_alias_resolver, allocator)) { goto on_error; } client->outbound_topic_alias_resolver = aws_mqtt5_outbound_topic_alias_resolver_new( allocator, client->config->topic_aliasing_options.outbound_topic_alias_behavior); if (client->outbound_topic_alias_resolver == NULL) { goto on_error; } if (aws_mqtt5_negotiated_settings_init( allocator, &client->negotiated_settings, &options->connect_options->client_id)) { goto on_error; } client->current_reconnect_delay_ms = 0; client->handler.alloc = client->allocator; client->handler.vtable = &s_mqtt5_channel_handler_vtable; client->handler.impl = client; aws_mqtt5_client_options_storage_log(client->config, AWS_LL_DEBUG); s_init_statistics(&client->operation_statistics_impl); return client; on_error: /* release isn't usable here since we may not even have an event loop */ s_mqtt5_client_final_destroy(client); return NULL; } struct aws_mqtt5_client *aws_mqtt5_client_acquire(struct aws_mqtt5_client *client) { if (client != NULL) { aws_ref_count_acquire(&client->ref_count); } return client; } struct aws_mqtt5_client *aws_mqtt5_client_release(struct aws_mqtt5_client *client) { if (client != NULL) { aws_ref_count_release(&client->ref_count); } return NULL; } struct aws_mqtt_change_desired_state_task { struct aws_task task; struct aws_allocator *allocator; struct aws_mqtt5_client *client; enum aws_mqtt5_client_state desired_state; struct aws_mqtt5_operation_disconnect *disconnect_operation; }; void aws_mqtt5_client_change_desired_state( struct aws_mqtt5_client *client, enum aws_mqtt5_client_state desired_state, struct aws_mqtt5_operation_disconnect *disconnect_op) { AWS_FATAL_ASSERT(aws_event_loop_thread_is_callers_thread(client->loop)); if (client->desired_state != desired_state) { AWS_LOGF_INFO( AWS_LS_MQTT5_CLIENT, "id=%p: changing desired client state from %s to %s", (void *)client, aws_mqtt5_client_state_to_c_string(client->desired_state), aws_mqtt5_client_state_to_c_string(desired_state)); client->desired_state = desired_state; if (desired_state == AWS_MCS_STOPPED && disconnect_op != NULL) { s_aws_mqtt5_client_shutdown_channel_with_disconnect( client, AWS_ERROR_MQTT5_USER_REQUESTED_STOP, disconnect_op); } s_reevaluate_service_task(client); } } static void s_change_state_task_fn(struct aws_task *task, void *arg, enum aws_task_status status) { (void)task; struct aws_mqtt_change_desired_state_task *change_state_task = arg; struct aws_mqtt5_client *client = change_state_task->client; enum aws_mqtt5_client_state desired_state = change_state_task->desired_state; if (status != AWS_TASK_STATUS_RUN_READY) { goto done; } aws_mqtt5_client_change_desired_state(client, desired_state, change_state_task->disconnect_operation); done: aws_mqtt5_operation_disconnect_release(change_state_task->disconnect_operation); if (desired_state != AWS_MCS_TERMINATED) { aws_mqtt5_client_release(client); } aws_mem_release(change_state_task->allocator, change_state_task); } static struct aws_mqtt_change_desired_state_task *s_aws_mqtt_change_desired_state_task_new( struct aws_allocator *allocator, struct aws_mqtt5_client *client, enum aws_mqtt5_client_state desired_state, struct aws_mqtt5_operation_disconnect *disconnect_operation) { struct aws_mqtt_change_desired_state_task *change_state_task = aws_mem_calloc(allocator, 1, sizeof(struct aws_mqtt_change_desired_state_task)); if (change_state_task == NULL) { return NULL; } aws_task_init(&change_state_task->task, s_change_state_task_fn, (void *)change_state_task, "ChangeStateTask"); change_state_task->allocator = client->allocator; change_state_task->client = (desired_state == AWS_MCS_TERMINATED) ? client : aws_mqtt5_client_acquire(client); change_state_task->desired_state = desired_state; change_state_task->disconnect_operation = aws_mqtt5_operation_disconnect_acquire(disconnect_operation); return change_state_task; } static bool s_is_valid_desired_state(enum aws_mqtt5_client_state desired_state) { switch (desired_state) { case AWS_MCS_STOPPED: case AWS_MCS_CONNECTED: case AWS_MCS_TERMINATED: return true; default: return false; } } static int s_aws_mqtt5_client_change_desired_state( struct aws_mqtt5_client *client, enum aws_mqtt5_client_state desired_state, struct aws_mqtt5_operation_disconnect *disconnect_operation) { AWS_FATAL_ASSERT(client != NULL); AWS_FATAL_ASSERT(client->loop != NULL); AWS_FATAL_ASSERT(disconnect_operation == NULL || desired_state == AWS_MCS_STOPPED); if (!s_is_valid_desired_state(desired_state)) { AWS_LOGF_ERROR( AWS_LS_MQTT5_CLIENT, "id=%p: invalid desired state argument %d(%s)", (void *)client, (int)desired_state, aws_mqtt5_client_state_to_c_string(desired_state)); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } struct aws_mqtt_change_desired_state_task *task = s_aws_mqtt_change_desired_state_task_new(client->allocator, client, desired_state, disconnect_operation); if (task == NULL) { AWS_LOGF_ERROR(AWS_LS_MQTT5_CLIENT, "id=%p: failed to create change desired state task", (void *)client); return AWS_OP_ERR; } aws_event_loop_schedule_task_now(client->loop, &task->task); return AWS_OP_SUCCESS; } int aws_mqtt5_client_start(struct aws_mqtt5_client *client) { return s_aws_mqtt5_client_change_desired_state(client, AWS_MCS_CONNECTED, NULL); } int aws_mqtt5_client_stop( struct aws_mqtt5_client *client, const struct aws_mqtt5_packet_disconnect_view *options, const struct aws_mqtt5_disconnect_completion_options *completion_options) { AWS_FATAL_ASSERT(client != NULL); struct aws_mqtt5_operation_disconnect *disconnect_op = NULL; if (options != NULL) { struct aws_mqtt5_disconnect_completion_options internal_completion_options = { .completion_callback = s_on_disconnect_operation_complete, .completion_user_data = client, }; disconnect_op = aws_mqtt5_operation_disconnect_new( client->allocator, options, completion_options, &internal_completion_options); if (disconnect_op == NULL) { AWS_LOGF_ERROR( AWS_LS_MQTT5_CLIENT, "id=%p: failed to create requested DISCONNECT operation", (void *)client); return AWS_OP_ERR; } AWS_LOGF_DEBUG( AWS_LS_MQTT5_CLIENT, "id=%p: Stopping client via DISCONNECT operation (%p)", (void *)client, (void *)disconnect_op); aws_mqtt5_packet_disconnect_view_log(disconnect_op->base.packet_view, AWS_LL_DEBUG); } else { AWS_LOGF_DEBUG(AWS_LS_MQTT5_CLIENT, "id=%p: Stopping client immediately", (void *)client); } int result = s_aws_mqtt5_client_change_desired_state(client, AWS_MCS_STOPPED, disconnect_op); aws_mqtt5_operation_disconnect_release(disconnect_op); return result; } struct aws_mqtt5_submit_operation_task { struct aws_task task; struct aws_allocator *allocator; struct aws_mqtt5_client *client; struct aws_mqtt5_operation *operation; }; void aws_mqtt5_client_submit_operation_internal( struct aws_mqtt5_client *client, struct aws_mqtt5_operation *operation, bool is_terminated) { /* * Take a ref to the operation that represents the client taking ownership * If we subsequently reject it (task cancel or offline queue policy), then the operation completion * will undo this ref acquisition. */ aws_mqtt5_operation_acquire(operation); if (is_terminated) { s_complete_operation(NULL, operation, AWS_ERROR_MQTT5_CLIENT_TERMINATED, AWS_MQTT5_PT_NONE, NULL); return; } /* * If we're offline and this operation doesn't meet the requirements of the offline queue retention policy, * fail it immediately. */ if (client->current_state != AWS_MCS_CONNECTED) { if (!s_aws_mqtt5_operation_satisfies_offline_queue_retention_policy( operation, client->config->offline_queue_behavior)) { s_complete_operation( NULL, operation, AWS_ERROR_MQTT5_OPERATION_FAILED_DUE_TO_OFFLINE_QUEUE_POLICY, AWS_MQTT5_PT_NONE, NULL); return; } } /* newly-submitted operations must have a 0 packet id */ aws_mqtt5_operation_set_packet_id(operation, 0); s_enqueue_operation_back(client, operation); aws_mqtt5_client_statistics_change_operation_statistic_state(client, operation, AWS_MQTT5_OSS_INCOMPLETE); } static void s_mqtt5_submit_operation_task_fn(struct aws_task *task, void *arg, enum aws_task_status status) { (void)task; struct aws_mqtt5_submit_operation_task *submit_operation_task = arg; struct aws_mqtt5_client *client = submit_operation_task->client; struct aws_mqtt5_operation *operation = submit_operation_task->operation; aws_mqtt5_client_submit_operation_internal(client, operation, status != AWS_TASK_STATUS_RUN_READY); aws_mqtt5_operation_release(submit_operation_task->operation); aws_mqtt5_client_release(submit_operation_task->client); aws_mem_release(submit_operation_task->allocator, submit_operation_task); } static int s_submit_operation(struct aws_mqtt5_client *client, struct aws_mqtt5_operation *operation) { struct aws_mqtt5_submit_operation_task *submit_task = aws_mem_calloc(client->allocator, 1, sizeof(struct aws_mqtt5_submit_operation_task)); if (submit_task == NULL) { return AWS_OP_ERR; } aws_task_init(&submit_task->task, s_mqtt5_submit_operation_task_fn, submit_task, "Mqtt5SubmitOperation"); submit_task->allocator = client->allocator; submit_task->client = aws_mqtt5_client_acquire(client); submit_task->operation = operation; aws_event_loop_schedule_task_now(client->loop, &submit_task->task); return AWS_OP_SUCCESS; } int aws_mqtt5_client_publish( struct aws_mqtt5_client *client, const struct aws_mqtt5_packet_publish_view *publish_options, const struct aws_mqtt5_publish_completion_options *completion_options) { AWS_PRECONDITION(client != NULL); AWS_PRECONDITION(publish_options != NULL); struct aws_mqtt5_operation_publish *publish_op = aws_mqtt5_operation_publish_new(client->allocator, client, publish_options, completion_options); if (publish_op == NULL) { return AWS_OP_ERR; } AWS_LOGF_DEBUG(AWS_LS_MQTT5_CLIENT, "id=%p: Submitting PUBLISH operation (%p)", (void *)client, (void *)publish_op); aws_mqtt5_packet_publish_view_log(publish_op->base.packet_view, AWS_LL_DEBUG); if (s_submit_operation(client, &publish_op->base)) { goto error; } return AWS_OP_SUCCESS; error: aws_mqtt5_operation_release(&publish_op->base); return AWS_OP_ERR; } int aws_mqtt5_client_subscribe( struct aws_mqtt5_client *client, const struct aws_mqtt5_packet_subscribe_view *subscribe_options, const struct aws_mqtt5_subscribe_completion_options *completion_options) { AWS_PRECONDITION(client != NULL); AWS_PRECONDITION(subscribe_options != NULL); struct aws_mqtt5_operation_subscribe *subscribe_op = aws_mqtt5_operation_subscribe_new(client->allocator, client, subscribe_options, completion_options); if (subscribe_op == NULL) { return AWS_OP_ERR; } AWS_LOGF_DEBUG( AWS_LS_MQTT5_CLIENT, "id=%p: Submitting SUBSCRIBE operation (%p)", (void *)client, (void *)subscribe_op); aws_mqtt5_packet_subscribe_view_log(subscribe_op->base.packet_view, AWS_LL_DEBUG); if (s_submit_operation(client, &subscribe_op->base)) { goto error; } return AWS_OP_SUCCESS; error: aws_mqtt5_operation_release(&subscribe_op->base); return AWS_OP_ERR; } int aws_mqtt5_client_unsubscribe( struct aws_mqtt5_client *client, const struct aws_mqtt5_packet_unsubscribe_view *unsubscribe_options, const struct aws_mqtt5_unsubscribe_completion_options *completion_options) { AWS_PRECONDITION(client != NULL); AWS_PRECONDITION(unsubscribe_options != NULL); struct aws_mqtt5_operation_unsubscribe *unsubscribe_op = aws_mqtt5_operation_unsubscribe_new(client->allocator, client, unsubscribe_options, completion_options); if (unsubscribe_op == NULL) { return AWS_OP_ERR; } AWS_LOGF_DEBUG( AWS_LS_MQTT5_CLIENT, "id=%p: Submitting UNSUBSCRIBE operation (%p)", (void *)client, (void *)unsubscribe_op); aws_mqtt5_packet_unsubscribe_view_log(unsubscribe_op->base.packet_view, AWS_LL_DEBUG); if (s_submit_operation(client, &unsubscribe_op->base)) { goto error; } return AWS_OP_SUCCESS; error: aws_mqtt5_operation_release(&unsubscribe_op->base); return AWS_OP_ERR; } static bool s_needs_packet_id(const struct aws_mqtt5_operation *operation) { switch (operation->packet_type) { case AWS_MQTT5_PT_SUBSCRIBE: case AWS_MQTT5_PT_UNSUBSCRIBE: return aws_mqtt5_operation_get_packet_id(operation) == 0; case AWS_MQTT5_PT_PUBLISH: { const struct aws_mqtt5_packet_publish_view *publish_view = operation->packet_view; if (publish_view->qos == AWS_MQTT5_QOS_AT_MOST_ONCE) { return false; } return aws_mqtt5_operation_get_packet_id(operation) == 0; } default: return false; } } static uint16_t s_next_packet_id(uint16_t current_id) { if (++current_id == 0) { current_id = 1; } return current_id; } int aws_mqtt5_operation_bind_packet_id( struct aws_mqtt5_operation *operation, struct aws_mqtt5_client_operational_state *client_operational_state) { if (!s_needs_packet_id(operation)) { return AWS_OP_SUCCESS; } uint16_t current_id = client_operational_state->next_mqtt_packet_id; struct aws_hash_element *elem = NULL; for (uint16_t i = 0; i < UINT16_MAX; ++i) { aws_hash_table_find(&client_operational_state->unacked_operations_table, ¤t_id, &elem); if (elem == NULL) { aws_mqtt5_operation_set_packet_id(operation, current_id); client_operational_state->next_mqtt_packet_id = s_next_packet_id(current_id); return AWS_OP_SUCCESS; } current_id = s_next_packet_id(current_id); } aws_raise_error(AWS_ERROR_INVALID_STATE); return AWS_OP_ERR; } /* * Priority queue comparison function for ack timeout processing */ static int s_compare_operation_timeouts(const void *a, const void *b) { const struct aws_mqtt5_operation **operation_a_ptr = (void *)a; const struct aws_mqtt5_operation *operation_a = *operation_a_ptr; const struct aws_mqtt5_operation **operation_b_ptr = (void *)b; const struct aws_mqtt5_operation *operation_b = *operation_b_ptr; if (operation_a->ack_timeout_timepoint_ns < operation_b->ack_timeout_timepoint_ns) { return -1; } else if (operation_a->ack_timeout_timepoint_ns > operation_b->ack_timeout_timepoint_ns) { return 1; } else { return 0; } } int aws_mqtt5_client_operational_state_init( struct aws_mqtt5_client_operational_state *client_operational_state, struct aws_allocator *allocator, struct aws_mqtt5_client *client) { aws_linked_list_init(&client_operational_state->queued_operations); aws_linked_list_init(&client_operational_state->write_completion_operations); aws_linked_list_init(&client_operational_state->unacked_operations); if (aws_hash_table_init( &client_operational_state->unacked_operations_table, allocator, DEFAULT_MQTT5_OPERATION_TABLE_SIZE, aws_mqtt_hash_uint16_t, aws_mqtt_compare_uint16_t_eq, NULL, NULL)) { return AWS_OP_ERR; } if (aws_priority_queue_init_dynamic( &client_operational_state->operations_by_ack_timeout, allocator, 100, sizeof(struct aws_mqtt5_operation *), s_compare_operation_timeouts)) { return AWS_OP_ERR; } client_operational_state->next_mqtt_packet_id = 1; client_operational_state->current_operation = NULL; client_operational_state->client = client; return AWS_OP_SUCCESS; } void aws_mqtt5_client_operational_state_clean_up(struct aws_mqtt5_client_operational_state *client_operational_state) { AWS_ASSERT(client_operational_state->current_operation == NULL); s_aws_mqtt5_client_operational_state_reset(client_operational_state, AWS_ERROR_MQTT5_CLIENT_TERMINATED, true); } static bool s_filter_queued_operations_for_offline(struct aws_mqtt5_operation *operation, void *context) { struct aws_mqtt5_client *client = context; enum aws_mqtt5_client_operation_queue_behavior_type queue_behavior = client->config->offline_queue_behavior; return !s_aws_mqtt5_operation_satisfies_offline_queue_retention_policy(operation, queue_behavior); } static void s_process_unacked_operations_for_disconnect(struct aws_mqtt5_operation *operation, void *context) { (void)context; if (operation->packet_type == AWS_MQTT5_PT_PUBLISH) { struct aws_mqtt5_packet_publish_view *publish_view = (struct aws_mqtt5_packet_publish_view *)operation->packet_view; if (publish_view->qos != AWS_MQTT5_QOS_AT_MOST_ONCE) { publish_view->duplicate = true; return; } } aws_mqtt5_operation_set_packet_id(operation, 0); } static bool s_filter_unacked_operations_for_offline(struct aws_mqtt5_operation *operation, void *context) { struct aws_mqtt5_client *client = context; enum aws_mqtt5_client_operation_queue_behavior_type queue_behavior = client->config->offline_queue_behavior; if (operation->packet_type == AWS_MQTT5_PT_PUBLISH) { const struct aws_mqtt5_packet_publish_view *publish_view = operation->packet_view; if (publish_view->qos != AWS_MQTT5_QOS_AT_MOST_ONCE) { return false; } } return !s_aws_mqtt5_operation_satisfies_offline_queue_retention_policy(operation, queue_behavior); } /* * Resets the client's operational state based on a disconnection (from above comment): * * If current_operation * move current_operation to head of queued_operations * Fail all operations in the pending write completion list * Fail, remove, and release operations in queued_operations where they fail the offline queue policy * Iterate unacked_operations: * If qos1+ publish * set dup flag * else * unset/release packet id * Fail, remove, and release unacked_operations if: * (1) They fail the offline queue policy AND * (2) the operation is not Qos 1+ publish * * Clears the unacked_operations table */ void aws_mqtt5_client_on_disconnection_update_operational_state(struct aws_mqtt5_client *client) { struct aws_mqtt5_client_operational_state *client_operational_state = &client->operational_state; /* move current operation to the head of the queue */ if (client_operational_state->current_operation != NULL) { aws_linked_list_push_front( &client_operational_state->queued_operations, &client_operational_state->current_operation->node); client_operational_state->current_operation = NULL; } /* fail everything in pending write completion */ s_complete_operation_list( client, &client_operational_state->write_completion_operations, AWS_ERROR_MQTT5_OPERATION_FAILED_DUE_TO_OFFLINE_QUEUE_POLICY); struct aws_linked_list operations_to_fail; AWS_ZERO_STRUCT(operations_to_fail); aws_linked_list_init(&operations_to_fail); /* fail everything in the pending queue that doesn't meet the offline queue behavior retention requirements */ s_filter_operation_list( &client_operational_state->queued_operations, s_filter_queued_operations_for_offline, &operations_to_fail, client); s_complete_operation_list( client, &operations_to_fail, AWS_ERROR_MQTT5_OPERATION_FAILED_DUE_TO_OFFLINE_QUEUE_POLICY); /* Mark unacked qos1+ publishes as duplicate and release packet ids for non qos1+ publish */ s_apply_to_operation_list( &client_operational_state->unacked_operations, s_process_unacked_operations_for_disconnect, NULL); /* * fail everything in the pending queue that * (1) isn't a qos1+ publish AND * (2) doesn't meet the offline queue behavior retention requirements */ s_filter_operation_list( &client_operational_state->unacked_operations, s_filter_unacked_operations_for_offline, &operations_to_fail, client); s_complete_operation_list( client, &operations_to_fail, AWS_ERROR_MQTT5_OPERATION_FAILED_DUE_TO_OFFLINE_QUEUE_POLICY); aws_hash_table_clear(&client->operational_state.unacked_operations_table); aws_priority_queue_clear(&client->operational_state.operations_by_ack_timeout); /* * Prevents inbound resolution on the highly unlikely, illegal server behavior of sending a PUBLISH before * a CONNACK on next connection establishment. */ aws_mqtt5_decoder_set_inbound_topic_alias_resolver(&client->decoder, NULL); } static void s_set_operation_list_statistic_state( struct aws_mqtt5_client *client, struct aws_linked_list *operation_list, enum aws_mqtt5_operation_statistic_state_flags new_state_flags) { struct aws_linked_list_node *node = aws_linked_list_begin(operation_list); while (node != aws_linked_list_end(operation_list)) { struct aws_mqtt5_operation *operation = AWS_CONTAINER_OF(node, struct aws_mqtt5_operation, node); node = aws_linked_list_next(node); aws_mqtt5_client_statistics_change_operation_statistic_state(client, operation, new_state_flags); } } static bool s_filter_unacked_operations_for_session_rejoin(struct aws_mqtt5_operation *operation, void *context) { (void)context; if (operation->packet_type == AWS_MQTT5_PT_PUBLISH) { const struct aws_mqtt5_packet_publish_view *publish_view = operation->packet_view; if (publish_view->qos != AWS_MQTT5_QOS_AT_MOST_ONCE) { return false; } } return true; } /* * Updates the client's operational state based on a successfully established connection event: * * if rejoined_session: * Move-and-append all non-qos1+-publishes in unacked_operations to the front of queued_operations * Move-and-append remaining operations (qos1+ publishes) to the front of queued_operations * else: * Fail, remove, and release unacked_operations that fail the offline queue policy * Move and append unacked operations to front of queued_operations */ void aws_mqtt5_client_on_connection_update_operational_state(struct aws_mqtt5_client *client) { struct aws_mqtt5_client_operational_state *client_operational_state = &client->operational_state; if (client->negotiated_settings.rejoined_session) { struct aws_linked_list requeued_operations; AWS_ZERO_STRUCT(requeued_operations); aws_linked_list_init(&requeued_operations); /* * qos1+ publishes must go out first, so split the unacked operation list into two sets: qos1+ publishes and * everything else. */ s_filter_operation_list( &client_operational_state->unacked_operations, s_filter_unacked_operations_for_session_rejoin, &requeued_operations, client); /* * Put non-qos1+ publishes on the front of the pending queue */ aws_linked_list_move_all_front(&client->operational_state.queued_operations, &requeued_operations); /* * Put qos1+ publishes on the front of the pending queue */ aws_linked_list_move_all_front( &client->operational_state.queued_operations, &client_operational_state->unacked_operations); } else { struct aws_linked_list failed_operations; AWS_ZERO_STRUCT(failed_operations); aws_linked_list_init(&failed_operations); s_filter_operation_list( &client_operational_state->unacked_operations, s_filter_queued_operations_for_offline, &failed_operations, client); /* * fail operations that we aren't going to requeue. In this particular case it's only qos1+ publishes * that we didn't fail because we didn't know if we were going to rejoin a sesison or not. */ s_complete_operation_list( client, &failed_operations, AWS_ERROR_MQTT5_OPERATION_FAILED_DUE_TO_OFFLINE_QUEUE_POLICY); /* requeue operations that we are going to perform again */ aws_linked_list_move_all_front( &client->operational_state.queued_operations, &client->operational_state.unacked_operations); } /* set everything remaining to incomplete */ s_set_operation_list_statistic_state( client, &client->operational_state.queued_operations, AWS_MQTT5_OSS_INCOMPLETE); aws_mqtt5_client_flow_control_state_reset(client); uint16_t inbound_alias_maximum = client->negotiated_settings.topic_alias_maximum_to_client; if (aws_mqtt5_inbound_topic_alias_resolver_reset(&client->inbound_topic_alias_resolver, inbound_alias_maximum)) { AWS_LOGF_ERROR( AWS_LS_MQTT5_CLIENT, "id=%p: client unable to reset inbound alias resolver", (void *)client_operational_state->client); goto on_error; } if (inbound_alias_maximum > 0) { aws_mqtt5_decoder_set_inbound_topic_alias_resolver(&client->decoder, &client->inbound_topic_alias_resolver); } else { aws_mqtt5_decoder_set_inbound_topic_alias_resolver(&client->decoder, NULL); } uint16_t outbound_alias_maximum = client->negotiated_settings.topic_alias_maximum_to_server; if (aws_mqtt5_outbound_topic_alias_resolver_reset(client->outbound_topic_alias_resolver, outbound_alias_maximum)) { AWS_LOGF_ERROR( AWS_LS_MQTT5_CLIENT, "id=%p: client unable to reset outbound alias resolver", (void *)client_operational_state->client); goto on_error; } aws_mqtt5_encoder_set_outbound_topic_alias_resolver(&client->encoder, client->outbound_topic_alias_resolver); return; on_error: s_aws_mqtt5_client_shutdown_channel(client, aws_last_error()); } static bool s_aws_mqtt5_client_has_pending_operational_work( const struct aws_mqtt5_client_operational_state *client_operational_state, enum aws_mqtt5_client_state client_state) { if (aws_linked_list_empty(&client_operational_state->queued_operations)) { return false; } struct aws_linked_list_node *next_operation_node = aws_linked_list_front(&client_operational_state->queued_operations); struct aws_mqtt5_operation *next_operation = AWS_CONTAINER_OF(next_operation_node, struct aws_mqtt5_operation, node); switch (client_state) { case AWS_MCS_MQTT_CONNECT: /* Only allowed to send a CONNECT packet in this state */ return next_operation->packet_type == AWS_MQTT5_PT_CONNECT; case AWS_MCS_CLEAN_DISCONNECT: /* Except for finishing the current operation, only allowed to send a DISCONNECT packet in this state */ return next_operation->packet_type == AWS_MQTT5_PT_DISCONNECT; case AWS_MCS_CONNECTED: return true; default: return false; } } static uint64_t s_aws_mqtt5_client_compute_next_operation_flow_control_service_time( struct aws_mqtt5_client *client, struct aws_mqtt5_operation *operation, uint64_t now) { (void)operation; switch (client->current_state) { case AWS_MCS_MQTT_CONNECT: case AWS_MCS_CLEAN_DISCONNECT: return now; case AWS_MCS_CONNECTED: return aws_mqtt5_client_flow_control_state_get_next_operation_service_time(client, operation, now); default: /* no outbound traffic is allowed outside of the above states */ return 0; } } /* * We don't presently know if IoT Core's throughput limit is on the plaintext or encrypted data stream. Assume * it's on the encrypted stream for now and make a reasonable guess at the additional cost TLS imposes on data size: * * This calculation is intended to be a reasonable default but will not be accurate in all cases * * Estimate the # of ethernet frames (max 1444 bytes) and add in potential TLS framing and padding values per. * * TODO: query IoT Core to determine if this calculation is needed after all * TODO: may eventually want to expose the ethernet frame size here as a configurable option for networks that have a * lower MTU * * References: * https://tools.ietf.org/id/draft-mattsson-uta-tls-overhead-01.xml#rfc.section.3 * */ #define ETHERNET_FRAME_MAX_PAYLOAD_SIZE 1500 #define TCP_SIZE_OVERESTIMATE 72 #define TLS_FRAMING_AND_PADDING_OVERESTIMATE 64 #define AVAILABLE_ETHERNET_FRAME_SIZE \ (ETHERNET_FRAME_MAX_PAYLOAD_SIZE - (TCP_SIZE_OVERESTIMATE + TLS_FRAMING_AND_PADDING_OVERESTIMATE)) #define ETHERNET_FRAMES_PER_IO_MESSAGE_ESTIMATE \ ((AWS_MQTT5_IO_MESSAGE_DEFAULT_LENGTH + AVAILABLE_ETHERNET_FRAME_SIZE - 1) / AVAILABLE_ETHERNET_FRAME_SIZE) #define THROUGHPUT_TOKENS_PER_IO_MESSAGE_OVERESTIMATE \ (AWS_MQTT5_IO_MESSAGE_DEFAULT_LENGTH + \ ETHERNET_FRAMES_PER_IO_MESSAGE_ESTIMATE * TLS_FRAMING_AND_PADDING_OVERESTIMATE) static uint64_t s_compute_throughput_throttle_wait(const struct aws_mqtt5_client *client, uint64_t now) { /* flow control only applies during CONNECTED/CLEAN_DISCONNECT */ if (!aws_mqtt5_client_are_negotiated_settings_valid(client)) { return now; } uint64_t throughput_wait = 0; if (client->config->extended_validation_and_flow_control_options != AWS_MQTT5_EVAFCO_NONE) { throughput_wait = aws_rate_limiter_token_bucket_compute_wait_for_tokens( (struct aws_rate_limiter_token_bucket *)&client->flow_control_state.throughput_throttle, THROUGHPUT_TOKENS_PER_IO_MESSAGE_OVERESTIMATE); } return aws_add_u64_saturating(now, throughput_wait); } static uint64_t s_aws_mqtt5_client_compute_operational_state_service_time( const struct aws_mqtt5_client_operational_state *client_operational_state, uint64_t now) { /* If an io message is in transit down the channel, then wait for it to complete */ if (client_operational_state->pending_write_completion) { return 0; } /* Throughput flow control check */ uint64_t next_throttled_time = s_compute_throughput_throttle_wait(client_operational_state->client, now); if (next_throttled_time > now) { return next_throttled_time; } /* If we're in the middle of something, keep going */ if (client_operational_state->current_operation != NULL) { return now; } /* If nothing is queued, there's nothing to do */ enum aws_mqtt5_client_state client_state = client_operational_state->client->current_state; if (!s_aws_mqtt5_client_has_pending_operational_work(client_operational_state, client_state)) { return 0; } AWS_FATAL_ASSERT(!aws_linked_list_empty(&client_operational_state->queued_operations)); struct aws_linked_list_node *next_operation_node = aws_linked_list_front(&client_operational_state->queued_operations); struct aws_mqtt5_operation *next_operation = AWS_CONTAINER_OF(next_operation_node, struct aws_mqtt5_operation, node); AWS_FATAL_ASSERT(next_operation != NULL); /* * Check the head of the pending operation queue against flow control and client state restrictions */ return s_aws_mqtt5_client_compute_next_operation_flow_control_service_time( client_operational_state->client, next_operation, now); } static bool s_aws_mqtt5_client_should_service_operational_state( const struct aws_mqtt5_client_operational_state *client_operational_state, uint64_t now) { return now == s_aws_mqtt5_client_compute_operational_state_service_time(client_operational_state, now); } static bool s_operation_requires_ack(const struct aws_mqtt5_operation *operation) { switch (operation->packet_type) { case AWS_MQTT5_PT_SUBSCRIBE: case AWS_MQTT5_PT_UNSUBSCRIBE: return true; case AWS_MQTT5_PT_PUBLISH: { const struct aws_mqtt5_packet_publish_view *publish_view = operation->packet_view; return publish_view->qos != AWS_MQTT5_QOS_AT_MOST_ONCE; } default: return false; } } static void s_on_pingreq_send(struct aws_mqtt5_client *client) { uint64_t now = client->vtable->get_current_time_fn(); uint64_t ping_timeout_nanos = aws_timestamp_convert(client->config->ping_timeout_ms, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL); client->next_ping_timeout_time = aws_add_u64_saturating(now, ping_timeout_nanos); } static int s_apply_throughput_flow_control(struct aws_mqtt5_client *client) { /* flow control only applies during CONNECTED/CLEAN_DISCONNECT */ if (!aws_mqtt5_client_are_negotiated_settings_valid(client)) { return AWS_OP_SUCCESS; } if (client->config->extended_validation_and_flow_control_options == AWS_MQTT5_EVAFCO_NONE) { return AWS_OP_SUCCESS; } return aws_rate_limiter_token_bucket_take_tokens( (struct aws_rate_limiter_token_bucket *)&client->flow_control_state.throughput_throttle, THROUGHPUT_TOKENS_PER_IO_MESSAGE_OVERESTIMATE); } static int s_apply_publish_tps_flow_control(struct aws_mqtt5_client *client, struct aws_mqtt5_operation *operation) { if (client->config->extended_validation_and_flow_control_options == AWS_MQTT5_EVAFCO_NONE) { return AWS_OP_SUCCESS; } if (operation->packet_type != AWS_MQTT5_PT_PUBLISH) { return AWS_OP_SUCCESS; } return aws_rate_limiter_token_bucket_take_tokens( (struct aws_rate_limiter_token_bucket *)&client->flow_control_state.publish_throttle, 1); } int aws_mqtt5_client_service_operational_state(struct aws_mqtt5_client_operational_state *client_operational_state) { struct aws_mqtt5_client *client = client_operational_state->client; struct aws_channel_slot *slot = client->slot; const struct aws_mqtt5_client_vtable *vtable = client->vtable; uint64_t now = (*vtable->get_current_time_fn)(); /* Should we write data? */ bool should_service = s_aws_mqtt5_client_should_service_operational_state(client_operational_state, now); if (!should_service) { return AWS_OP_SUCCESS; } if (s_apply_throughput_flow_control(client)) { return AWS_OP_SUCCESS; } /* If we're going to write data, we need something to write to */ struct aws_io_message *io_message = (*vtable->aws_channel_acquire_message_from_pool_fn)( slot->channel, AWS_IO_MESSAGE_APPLICATION_DATA, AWS_MQTT5_IO_MESSAGE_DEFAULT_LENGTH, vtable->vtable_user_data); if (io_message == NULL) { return AWS_OP_ERR; } int operational_error_code = AWS_ERROR_SUCCESS; do { /* if no current operation, pull one in and setup encode */ if (client_operational_state->current_operation == NULL) { /* * Loop through queued operations, discarding ones that fail validation, until we run out or find * a good one. Failing validation against negotiated settings is expected to be a rare event. */ struct aws_mqtt5_operation *next_operation = NULL; while (!aws_linked_list_empty(&client_operational_state->queued_operations)) { struct aws_linked_list_node *next_operation_node = aws_linked_list_front(&client_operational_state->queued_operations); struct aws_mqtt5_operation *operation = AWS_CONTAINER_OF(next_operation_node, struct aws_mqtt5_operation, node); /* If this is a publish and we're throttled, just quit out of the loop. */ if (s_apply_publish_tps_flow_control(client, operation)) { break; } /* Wait until flow control has passed before actually dequeuing the operation. */ aws_linked_list_pop_front(&client_operational_state->queued_operations); if (!aws_mqtt5_operation_validate_vs_connection_settings(operation, client)) { next_operation = operation; break; } enum aws_mqtt5_packet_type packet_type = operation->packet_type; int validation_error_code = aws_last_error(); s_complete_operation(client, operation, validation_error_code, AWS_MQTT5_PT_NONE, NULL); /* A DISCONNECT packet failing dynamic validation should shut down the whole channel */ if (packet_type == AWS_MQTT5_PT_DISCONNECT) { operational_error_code = AWS_ERROR_MQTT5_OPERATION_PROCESSING_FAILURE; break; } } if (next_operation != NULL && s_aws_mqtt5_client_set_current_operation(client, next_operation)) { operational_error_code = AWS_ERROR_MQTT5_OPERATION_PROCESSING_FAILURE; break; } } struct aws_mqtt5_operation *current_operation = client_operational_state->current_operation; if (current_operation == NULL) { break; } /* write current operation to message, handle errors */ enum aws_mqtt5_encoding_result encoding_result = aws_mqtt5_encoder_encode_to_buffer(&client->encoder, &io_message->message_data); if (encoding_result == AWS_MQTT5_ER_ERROR) { operational_error_code = AWS_ERROR_MQTT5_ENCODE_FAILURE; break; } /* if encoding finished: * push to write completion or unacked * clear current * else (message full) * break */ if (encoding_result == AWS_MQTT5_ER_FINISHED) { aws_mqtt5_client_flow_control_state_on_outbound_operation(client, current_operation); if (s_operation_requires_ack(current_operation)) { /* track the operation in the unacked data structures by packet id */ AWS_FATAL_ASSERT(aws_mqtt5_operation_get_packet_id(current_operation) != 0); if (aws_hash_table_put( &client_operational_state->unacked_operations_table, aws_mqtt5_operation_get_packet_id_address(current_operation), current_operation, NULL)) { operational_error_code = aws_last_error(); break; } uint32_t ack_timeout_seconds = aws_mqtt5_operation_get_ack_timeout_override(current_operation); if (ack_timeout_seconds == 0) { ack_timeout_seconds = client->config->ack_timeout_seconds; } if (ack_timeout_seconds > 0) { current_operation->ack_timeout_timepoint_ns = now + aws_timestamp_convert(ack_timeout_seconds, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL); } else { current_operation->ack_timeout_timepoint_ns = UINT64_MAX; } if (aws_priority_queue_push_ref( &client_operational_state->operations_by_ack_timeout, (void *)¤t_operation, ¤t_operation->priority_queue_node)) { operational_error_code = aws_last_error(); break; } aws_linked_list_push_back(&client_operational_state->unacked_operations, ¤t_operation->node); aws_mqtt5_client_statistics_change_operation_statistic_state( client, current_operation, AWS_MQTT5_OSS_INCOMPLETE | AWS_MQTT5_OSS_UNACKED); } else { /* no ack is necessary, just add to socket write completion list */ aws_linked_list_push_back( &client_operational_state->write_completion_operations, ¤t_operation->node); /* * We special-case setting the ping timeout here. Other possible places are not appropriate: * * (1) Socket write completion - this leads to a race condition where our domain socket tests can * sporadically fail because the PINGRESP is processed before the write completion callback is * invoked. * * (2) Enqueue the ping - if the current operation is a large payload over a poor connection, it may * be an arbitrarily long time before the current operation completes and the ping even has a chance * to go out, meaning we will trigger a ping time out before it's even sent. * * Given a reasonable io message size, this is the best place to set the timeout. */ if (current_operation->packet_type == AWS_MQTT5_PT_PINGREQ) { s_on_pingreq_send(client); } } client->operational_state.current_operation = NULL; } else { AWS_FATAL_ASSERT(encoding_result == AWS_MQTT5_ER_OUT_OF_ROOM); break; } now = (*vtable->get_current_time_fn)(); should_service = s_aws_mqtt5_client_should_service_operational_state(client_operational_state, now); } while (should_service); if (operational_error_code != AWS_ERROR_SUCCESS) { aws_mem_release(io_message->allocator, io_message); return aws_raise_error(operational_error_code); } /* It's possible for there to be no data if we serviced operations that failed validation */ if (io_message->message_data.len == 0) { aws_mem_release(io_message->allocator, io_message); return AWS_OP_SUCCESS; } /* send io_message down channel in write direction, handle errors */ io_message->on_completion = s_aws_mqtt5_on_socket_write_completion; io_message->user_data = client_operational_state->client; client_operational_state->pending_write_completion = true; if ((*vtable->aws_channel_slot_send_message_fn)( slot, io_message, AWS_CHANNEL_DIR_WRITE, vtable->vtable_user_data)) { client_operational_state->pending_write_completion = false; aws_mem_release(io_message->allocator, io_message); return AWS_OP_ERR; } return AWS_OP_SUCCESS; } void aws_mqtt5_client_operational_state_handle_ack( struct aws_mqtt5_client_operational_state *client_operational_state, aws_mqtt5_packet_id_t packet_id, enum aws_mqtt5_packet_type packet_type, const void *packet_view, int error_code) { if (packet_type == AWS_MQTT5_PT_PUBACK) { aws_mqtt5_client_flow_control_state_on_puback(client_operational_state->client); } struct aws_hash_element *elem = NULL; aws_hash_table_find(&client_operational_state->unacked_operations_table, &packet_id, &elem); if (elem == NULL || elem->value == NULL) { AWS_LOGF_ERROR( AWS_LS_MQTT5_CLIENT, "id=%p: received an ACK for an unknown operation with id %d", (void *)client_operational_state->client, (int)packet_id); return; } else { AWS_LOGF_TRACE( AWS_LS_MQTT5_CLIENT, "id=%p: Processing ACK with id %d", (void *)client_operational_state->client, (int)packet_id); } struct aws_mqtt5_operation *operation = elem->value; aws_linked_list_remove(&operation->node); aws_hash_table_remove(&client_operational_state->unacked_operations_table, &packet_id, NULL, NULL); s_complete_operation(client_operational_state->client, operation, error_code, packet_type, packet_view); } bool aws_mqtt5_client_are_negotiated_settings_valid(const struct aws_mqtt5_client *client) { return client->current_state == AWS_MCS_CONNECTED || client->current_state == AWS_MCS_CLEAN_DISCONNECT; } void aws_mqtt5_client_flow_control_state_init(struct aws_mqtt5_client *client) { struct aws_mqtt5_client_flow_control_state *flow_control = &client->flow_control_state; struct aws_rate_limiter_token_bucket_options publish_throttle_config = { .tokens_per_second = AWS_IOT_CORE_PUBLISH_PER_SECOND_LIMIT, .maximum_token_count = AWS_IOT_CORE_PUBLISH_PER_SECOND_LIMIT, .initial_token_count = 0, }; aws_rate_limiter_token_bucket_init(&flow_control->publish_throttle, &publish_throttle_config); struct aws_rate_limiter_token_bucket_options throughput_throttle_config = { .tokens_per_second = AWS_IOT_CORE_THROUGHPUT_LIMIT, .maximum_token_count = AWS_IOT_CORE_THROUGHPUT_LIMIT, .initial_token_count = 0, }; aws_rate_limiter_token_bucket_init(&flow_control->throughput_throttle, &throughput_throttle_config); } void aws_mqtt5_client_flow_control_state_reset(struct aws_mqtt5_client *client) { struct aws_mqtt5_client_flow_control_state *flow_control = &client->flow_control_state; AWS_FATAL_ASSERT(aws_mqtt5_client_are_negotiated_settings_valid(client)); flow_control->unacked_publish_token_count = client->negotiated_settings.receive_maximum_from_server; aws_rate_limiter_token_bucket_reset(&client->flow_control_state.publish_throttle); aws_rate_limiter_token_bucket_reset(&client->flow_control_state.throughput_throttle); } void aws_mqtt5_client_flow_control_state_on_puback(struct aws_mqtt5_client *client) { struct aws_mqtt5_client_flow_control_state *flow_control = &client->flow_control_state; bool was_zero = flow_control->unacked_publish_token_count == 0; flow_control->unacked_publish_token_count = aws_min_u32( client->negotiated_settings.receive_maximum_from_server, flow_control->unacked_publish_token_count + 1); if (was_zero) { s_reevaluate_service_task(client); } } void aws_mqtt5_client_flow_control_state_on_outbound_operation( struct aws_mqtt5_client *client, struct aws_mqtt5_operation *operation) { if (operation->packet_type != AWS_MQTT5_PT_PUBLISH) { return; } const struct aws_mqtt5_packet_publish_view *publish_view = operation->packet_view; if (publish_view->qos == AWS_MQTT5_QOS_AT_MOST_ONCE) { return; } struct aws_mqtt5_client_flow_control_state *flow_control = &client->flow_control_state; AWS_FATAL_ASSERT(flow_control->unacked_publish_token_count > 0); --flow_control->unacked_publish_token_count; } uint64_t aws_mqtt5_client_flow_control_state_get_next_operation_service_time( struct aws_mqtt5_client *client, struct aws_mqtt5_operation *next_operation, uint64_t now) { if (next_operation->packet_type != AWS_MQTT5_PT_PUBLISH) { return now; } /* publish tps check */ if (client->config->extended_validation_and_flow_control_options != AWS_MQTT5_EVAFCO_NONE) { uint64_t publish_wait = aws_rate_limiter_token_bucket_compute_wait_for_tokens(&client->flow_control_state.publish_throttle, 1); if (publish_wait > 0) { return now + publish_wait; } } /* receive maximum check */ const struct aws_mqtt5_packet_publish_view *publish_view = next_operation->packet_view; if (publish_view->qos == AWS_MQTT5_QOS_AT_MOST_ONCE) { return now; } if (client->flow_control_state.unacked_publish_token_count > 0) { return now; } return 0; } void aws_mqtt5_client_statistics_change_operation_statistic_state( struct aws_mqtt5_client *client, struct aws_mqtt5_operation *operation, enum aws_mqtt5_operation_statistic_state_flags new_state_flags) { enum aws_mqtt5_packet_type packet_type = operation->packet_type; if (packet_type != AWS_MQTT5_PT_PUBLISH && packet_type != AWS_MQTT5_PT_SUBSCRIBE && packet_type != AWS_MQTT5_PT_UNSUBSCRIBE) { return; } if (operation->packet_size == 0) { if (aws_mqtt5_packet_view_get_encoded_size(packet_type, operation->packet_view, &operation->packet_size)) { return; } } AWS_FATAL_ASSERT(operation->packet_size > 0); uint64_t packet_size = (uint64_t)operation->packet_size; enum aws_mqtt5_operation_statistic_state_flags old_state_flags = operation->statistic_state_flags; if (new_state_flags == old_state_flags) { return; } struct aws_mqtt5_client_operation_statistics_impl *stats = &client->operation_statistics_impl; if ((old_state_flags & AWS_MQTT5_OSS_INCOMPLETE) != (new_state_flags & AWS_MQTT5_OSS_INCOMPLETE)) { if ((new_state_flags & AWS_MQTT5_OSS_INCOMPLETE) != 0) { aws_atomic_fetch_add(&stats->incomplete_operation_count_atomic, 1); aws_atomic_fetch_add(&stats->incomplete_operation_size_atomic, (size_t)packet_size); } else { aws_atomic_fetch_sub(&stats->incomplete_operation_count_atomic, 1); aws_atomic_fetch_sub(&stats->incomplete_operation_size_atomic, (size_t)packet_size); } } if ((old_state_flags & AWS_MQTT5_OSS_UNACKED) != (new_state_flags & AWS_MQTT5_OSS_UNACKED)) { if ((new_state_flags & AWS_MQTT5_OSS_UNACKED) != 0) { aws_atomic_fetch_add(&stats->unacked_operation_count_atomic, 1); aws_atomic_fetch_add(&stats->unacked_operation_size_atomic, (size_t)packet_size); } else { aws_atomic_fetch_sub(&stats->unacked_operation_count_atomic, 1); aws_atomic_fetch_sub(&stats->unacked_operation_size_atomic, (size_t)packet_size); } } operation->statistic_state_flags = new_state_flags; if (client->vtable != NULL && client->vtable->on_client_statistics_changed_callback_fn != NULL) { (*client->vtable->on_client_statistics_changed_callback_fn)( client, operation, client->vtable->vtable_user_data); } } void aws_mqtt5_client_get_stats(struct aws_mqtt5_client *client, struct aws_mqtt5_client_operation_statistics *stats) { stats->incomplete_operation_count = (uint64_t)aws_atomic_load_int(&client->operation_statistics_impl.incomplete_operation_count_atomic); stats->incomplete_operation_size = (uint64_t)aws_atomic_load_int(&client->operation_statistics_impl.incomplete_operation_size_atomic); stats->unacked_operation_count = (uint64_t)aws_atomic_load_int(&client->operation_statistics_impl.unacked_operation_count_atomic); stats->unacked_operation_size = (uint64_t)aws_atomic_load_int(&client->operation_statistics_impl.unacked_operation_size_atomic); } bool aws_mqtt5_client_reset_connection(struct aws_mqtt5_client *client) { AWS_FATAL_ASSERT(aws_event_loop_thread_is_callers_thread(client->loop)); client->current_reconnect_delay_ms = client->config->min_reconnect_delay_ms; switch (client->current_state) { case AWS_MCS_MQTT_CONNECT: case AWS_MCS_CONNECTED: s_aws_mqtt5_client_shutdown_channel(client, AWS_ERROR_MQTT_CONNECTION_RESET_FOR_ADAPTER_CONNECT); return true; case AWS_MCS_CONNECTING: client->should_reset_connection = true; return true; default: break; } return false; } aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/source/v5/mqtt5_decoder.c000066400000000000000000001212361456575232400247570ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #define AWS_MQTT5_DECODER_BUFFER_START_SIZE 2048 #define PUBLISH_PACKET_FIXED_HEADER_DUPLICATE_FLAG 8 #define PUBLISH_PACKET_FIXED_HEADER_RETAIN_FLAG 1 #define PUBLISH_PACKET_FIXED_HEADER_QOS_FLAG 3 static void s_reset_decoder_for_new_packet(struct aws_mqtt5_decoder *decoder) { aws_byte_buf_reset(&decoder->scratch_space, false); decoder->packet_first_byte = 0; decoder->remaining_length = 0; AWS_ZERO_STRUCT(decoder->packet_cursor); } static void s_enter_state(struct aws_mqtt5_decoder *decoder, enum aws_mqtt5_decoder_state state) { decoder->state = state; if (state == AWS_MQTT5_DS_READ_PACKET_TYPE) { s_reset_decoder_for_new_packet(decoder); } else { aws_byte_buf_reset(&decoder->scratch_space, false); } } static bool s_is_decodable_packet_type(struct aws_mqtt5_decoder *decoder, enum aws_mqtt5_packet_type type) { return (uint32_t)type < AWS_ARRAY_SIZE(decoder->options.decoder_table->decoders_by_packet_type) && decoder->options.decoder_table->decoders_by_packet_type[type] != NULL; } /* * Every mqtt packet has a first byte that, amongst other things, determines the packet type */ static int s_aws_mqtt5_decoder_read_packet_type_on_data( struct aws_mqtt5_decoder *decoder, struct aws_byte_cursor *data) { if (data->len == 0) { return AWS_MQTT5_DRT_MORE_DATA; } uint8_t byte = *data->ptr; aws_byte_cursor_advance(data, 1); aws_byte_buf_append_byte_dynamic(&decoder->scratch_space, byte); enum aws_mqtt5_packet_type packet_type = (byte >> 4); if (!s_is_decodable_packet_type(decoder, packet_type)) { AWS_LOGF_ERROR( AWS_LS_MQTT5_CLIENT, "id=%p: unsupported or illegal packet type value: %d", decoder->options.callback_user_data, (int)packet_type); return AWS_MQTT5_DRT_ERROR; } decoder->packet_first_byte = byte; s_enter_state(decoder, AWS_MQTT5_DS_READ_REMAINING_LENGTH); return AWS_MQTT5_DRT_SUCCESS; } /* * non-streaming variable length integer decode. cursor is updated only if the value was successfully read */ enum aws_mqtt5_decode_result_type aws_mqtt5_decode_vli(struct aws_byte_cursor *cursor, uint32_t *dest) { uint32_t value = 0; bool more_data = false; size_t bytes_used = 0; uint32_t shift = 0; struct aws_byte_cursor cursor_copy = *cursor; for (; bytes_used < 4; ++bytes_used) { uint8_t byte = 0; if (!aws_byte_cursor_read_u8(&cursor_copy, &byte)) { return AWS_MQTT5_DRT_MORE_DATA; } value |= ((byte & 0x7F) << shift); shift += 7; more_data = (byte & 0x80) != 0; if (!more_data) { break; } } if (more_data) { /* A variable length integer with the 4th byte high bit set is not valid */ AWS_LOGF_ERROR(AWS_LS_MQTT5_GENERAL, "(static) aws_mqtt5_decoder - illegal variable length integer encoding"); return AWS_MQTT5_DRT_ERROR; } aws_byte_cursor_advance(cursor, bytes_used + 1); *dest = value; return AWS_MQTT5_DRT_SUCCESS; } /* "streaming" variable length integer decode */ static enum aws_mqtt5_decode_result_type s_aws_mqtt5_decoder_read_vli_on_data( struct aws_mqtt5_decoder *decoder, uint32_t *vli_dest, struct aws_byte_cursor *data) { enum aws_mqtt5_decode_result_type decode_vli_result = AWS_MQTT5_DRT_MORE_DATA; /* try to decode the vli integer one byte at a time */ while (data->len > 0 && decode_vli_result == AWS_MQTT5_DRT_MORE_DATA) { /* append a single byte to the scratch buffer */ struct aws_byte_cursor byte_cursor = aws_byte_cursor_advance(data, 1); aws_byte_buf_append_dynamic(&decoder->scratch_space, &byte_cursor); /* now try and decode a vli integer based on the range implied by the offset into the buffer */ struct aws_byte_cursor vli_cursor = { .ptr = decoder->scratch_space.buffer, .len = decoder->scratch_space.len, }; decode_vli_result = aws_mqtt5_decode_vli(&vli_cursor, vli_dest); } return decode_vli_result; } /* attempts to read the variable length integer that is always the second piece of data in an mqtt packet */ static enum aws_mqtt5_decode_result_type s_aws_mqtt5_decoder_read_remaining_length_on_data( struct aws_mqtt5_decoder *decoder, struct aws_byte_cursor *data) { enum aws_mqtt5_decode_result_type result = s_aws_mqtt5_decoder_read_vli_on_data(decoder, &decoder->remaining_length, data); if (result != AWS_MQTT5_DRT_SUCCESS) { return result; } s_enter_state(decoder, AWS_MQTT5_DS_READ_PACKET); return AWS_MQTT5_DRT_SUCCESS; } /* non-streaming decode of a user property; failure implies connection termination */ int aws_mqtt5_decode_user_property( struct aws_byte_cursor *packet_cursor, struct aws_mqtt5_user_property_set *properties) { struct aws_mqtt5_user_property property; AWS_MQTT5_DECODE_LENGTH_PREFIXED_CURSOR(packet_cursor, &property.name, error); AWS_MQTT5_DECODE_LENGTH_PREFIXED_CURSOR(packet_cursor, &property.value, error); if (aws_array_list_push_back(&properties->properties, &property)) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; error: return AWS_OP_ERR; } /* decode function for all CONNACK properties */ static int s_read_connack_property( struct aws_mqtt5_packet_connack_storage *storage, struct aws_byte_cursor *packet_cursor) { int result = AWS_OP_ERR; uint8_t property_type = 0; AWS_MQTT5_DECODE_U8(packet_cursor, &property_type, done); struct aws_mqtt5_packet_connack_view *storage_view = &storage->storage_view; switch (property_type) { case AWS_MQTT5_PROPERTY_TYPE_SESSION_EXPIRY_INTERVAL: AWS_MQTT5_DECODE_U32_OPTIONAL( packet_cursor, &storage->session_expiry_interval, &storage_view->session_expiry_interval, done); break; case AWS_MQTT5_PROPERTY_TYPE_RECEIVE_MAXIMUM: AWS_MQTT5_DECODE_U16_OPTIONAL( packet_cursor, &storage->receive_maximum, &storage_view->receive_maximum, done); break; case AWS_MQTT5_PROPERTY_TYPE_MAXIMUM_QOS: AWS_MQTT5_DECODE_U8_OPTIONAL(packet_cursor, &storage->maximum_qos, &storage_view->maximum_qos, done); break; case AWS_MQTT5_PROPERTY_TYPE_RETAIN_AVAILABLE: AWS_MQTT5_DECODE_U8_OPTIONAL( packet_cursor, &storage->retain_available, &storage_view->retain_available, done); break; case AWS_MQTT5_PROPERTY_TYPE_MAXIMUM_PACKET_SIZE: AWS_MQTT5_DECODE_U32_OPTIONAL( packet_cursor, &storage->maximum_packet_size, &storage_view->maximum_packet_size, done); break; case AWS_MQTT5_PROPERTY_TYPE_ASSIGNED_CLIENT_IDENTIFIER: AWS_MQTT5_DECODE_LENGTH_PREFIXED_CURSOR_OPTIONAL( packet_cursor, &storage->assigned_client_identifier, &storage_view->assigned_client_identifier, done); break; case AWS_MQTT5_PROPERTY_TYPE_TOPIC_ALIAS_MAXIMUM: AWS_MQTT5_DECODE_U16_OPTIONAL( packet_cursor, &storage->topic_alias_maximum, &storage_view->topic_alias_maximum, done); break; case AWS_MQTT5_PROPERTY_TYPE_REASON_STRING: AWS_MQTT5_DECODE_LENGTH_PREFIXED_CURSOR_OPTIONAL( packet_cursor, &storage->reason_string, &storage_view->reason_string, done); break; case AWS_MQTT5_PROPERTY_TYPE_WILDCARD_SUBSCRIPTIONS_AVAILABLE: AWS_MQTT5_DECODE_U8_OPTIONAL( packet_cursor, &storage->wildcard_subscriptions_available, &storage_view->wildcard_subscriptions_available, done); break; case AWS_MQTT5_PROPERTY_TYPE_SUBSCRIPTION_IDENTIFIERS_AVAILABLE: AWS_MQTT5_DECODE_U8_OPTIONAL( packet_cursor, &storage->subscription_identifiers_available, &storage_view->subscription_identifiers_available, done); break; case AWS_MQTT5_PROPERTY_TYPE_SHARED_SUBSCRIPTIONS_AVAILABLE: AWS_MQTT5_DECODE_U8_OPTIONAL( packet_cursor, &storage->shared_subscriptions_available, &storage_view->shared_subscriptions_available, done); break; case AWS_MQTT5_PROPERTY_TYPE_SERVER_KEEP_ALIVE: AWS_MQTT5_DECODE_U16_OPTIONAL( packet_cursor, &storage->server_keep_alive, &storage_view->server_keep_alive, done); break; case AWS_MQTT5_PROPERTY_TYPE_RESPONSE_INFORMATION: AWS_MQTT5_DECODE_LENGTH_PREFIXED_CURSOR_OPTIONAL( packet_cursor, &storage->response_information, &storage_view->response_information, done); break; case AWS_MQTT5_PROPERTY_TYPE_SERVER_REFERENCE: AWS_MQTT5_DECODE_LENGTH_PREFIXED_CURSOR_OPTIONAL( packet_cursor, &storage->server_reference, &storage_view->server_reference, done); break; case AWS_MQTT5_PROPERTY_TYPE_AUTHENTICATION_METHOD: AWS_MQTT5_DECODE_LENGTH_PREFIXED_CURSOR_OPTIONAL( packet_cursor, &storage->authentication_method, &storage_view->authentication_method, done); break; case AWS_MQTT5_PROPERTY_TYPE_AUTHENTICATION_DATA: AWS_MQTT5_DECODE_LENGTH_PREFIXED_CURSOR_OPTIONAL( packet_cursor, &storage->authentication_data, &storage_view->authentication_data, done); break; case AWS_MQTT5_PROPERTY_TYPE_USER_PROPERTY: if (aws_mqtt5_decode_user_property(packet_cursor, &storage->user_properties)) { goto done; } break; default: goto done; } result = AWS_OP_SUCCESS; done: if (result != AWS_OP_SUCCESS) { AWS_LOGF_ERROR(AWS_LS_MQTT5_CLIENT, "Read CONNACK property decode failure"); aws_raise_error(AWS_ERROR_MQTT5_DECODE_PROTOCOL_ERROR); } return result; } /* decodes a CONNACK packet whose data must be in the scratch buffer */ static int s_aws_mqtt5_decoder_decode_connack(struct aws_mqtt5_decoder *decoder) { struct aws_mqtt5_packet_connack_storage storage; if (aws_mqtt5_packet_connack_storage_init_from_external_storage(&storage, decoder->allocator)) { return AWS_OP_ERR; } int result = AWS_OP_ERR; uint8_t first_byte = decoder->packet_first_byte; /* CONNACK flags must be zero by protocol */ if ((first_byte & 0x0F) != 0) { goto done; } struct aws_byte_cursor packet_cursor = decoder->packet_cursor; uint32_t remaining_length = decoder->remaining_length; if (remaining_length != (uint32_t)packet_cursor.len) { goto done; } uint8_t connect_flags = 0; AWS_MQTT5_DECODE_U8(&packet_cursor, &connect_flags, done); /* everything but the 0-bit must be 0 */ if ((connect_flags & 0xFE) != 0) { goto done; } struct aws_mqtt5_packet_connack_view *storage_view = &storage.storage_view; storage_view->session_present = (connect_flags & 0x01) != 0; uint8_t reason_code = 0; AWS_MQTT5_DECODE_U8(&packet_cursor, &reason_code, done); storage_view->reason_code = reason_code; uint32_t property_length = 0; AWS_MQTT5_DECODE_VLI(&packet_cursor, &property_length, done); if (property_length != (uint32_t)packet_cursor.len) { goto done; } while (packet_cursor.len > 0) { if (s_read_connack_property(&storage, &packet_cursor)) { goto done; } } storage_view->user_property_count = aws_mqtt5_user_property_set_size(&storage.user_properties); storage_view->user_properties = storage.user_properties.properties.data; result = AWS_OP_SUCCESS; done: if (result == AWS_OP_SUCCESS) { if (decoder->options.on_packet_received != NULL) { result = (*decoder->options.on_packet_received)( AWS_MQTT5_PT_CONNACK, &storage.storage_view, decoder->options.callback_user_data); } } else { AWS_LOGF_ERROR(AWS_LS_MQTT5_CLIENT, "id=%p: CONNACK decode failure", decoder->options.callback_user_data); aws_raise_error(AWS_ERROR_MQTT5_DECODE_PROTOCOL_ERROR); } aws_mqtt5_packet_connack_storage_clean_up(&storage); return result; } /* decode function for all PUBLISH properties */ static int s_read_publish_property( struct aws_mqtt5_packet_publish_storage *storage, struct aws_byte_cursor *packet_cursor) { int result = AWS_OP_ERR; uint8_t property_type = 0; AWS_MQTT5_DECODE_U8(packet_cursor, &property_type, done); struct aws_mqtt5_packet_publish_view *storage_view = &storage->storage_view; switch (property_type) { case AWS_MQTT5_PROPERTY_TYPE_PAYLOAD_FORMAT_INDICATOR: AWS_MQTT5_DECODE_U8_OPTIONAL(packet_cursor, &storage->payload_format, &storage_view->payload_format, done); break; case AWS_MQTT5_PROPERTY_TYPE_MESSAGE_EXPIRY_INTERVAL: AWS_MQTT5_DECODE_U32_OPTIONAL( packet_cursor, &storage->message_expiry_interval_seconds, &storage_view->message_expiry_interval_seconds, done); break; case AWS_MQTT5_PROPERTY_TYPE_TOPIC_ALIAS: AWS_MQTT5_DECODE_U16_OPTIONAL(packet_cursor, &storage->topic_alias, &storage_view->topic_alias, done); break; case AWS_MQTT5_PROPERTY_TYPE_RESPONSE_TOPIC: AWS_MQTT5_DECODE_LENGTH_PREFIXED_CURSOR_OPTIONAL( packet_cursor, &storage->response_topic, &storage_view->response_topic, done); break; case AWS_MQTT5_PROPERTY_TYPE_CORRELATION_DATA: AWS_MQTT5_DECODE_LENGTH_PREFIXED_CURSOR_OPTIONAL( packet_cursor, &storage->correlation_data, &storage_view->correlation_data, done); break; case AWS_MQTT5_PROPERTY_TYPE_USER_PROPERTY: if (aws_mqtt5_decode_user_property(packet_cursor, &storage->user_properties)) { goto done; } break; case AWS_MQTT5_PROPERTY_TYPE_SUBSCRIPTION_IDENTIFIER: { uint32_t subscription_identifier = 0; AWS_MQTT5_DECODE_VLI(packet_cursor, &subscription_identifier, done); aws_array_list_push_back(&storage->subscription_identifiers, &subscription_identifier); break; } case AWS_MQTT5_PROPERTY_TYPE_CONTENT_TYPE: AWS_MQTT5_DECODE_LENGTH_PREFIXED_CURSOR_OPTIONAL( packet_cursor, &storage->content_type, &storage_view->content_type, done); break; default: goto done; } result = AWS_OP_SUCCESS; done: if (result != AWS_OP_SUCCESS) { AWS_LOGF_ERROR(AWS_LS_MQTT5_CLIENT, "Read PUBLISH property decode failure"); aws_raise_error(AWS_ERROR_MQTT5_DECODE_PROTOCOL_ERROR); } return result; } /* decodes a PUBLISH packet whose data must be in the scratch buffer */ static int s_aws_mqtt5_decoder_decode_publish(struct aws_mqtt5_decoder *decoder) { struct aws_mqtt5_packet_publish_storage storage; if (aws_mqtt5_packet_publish_storage_init_from_external_storage(&storage, decoder->allocator)) { return AWS_OP_ERR; } int result = AWS_OP_ERR; struct aws_mqtt5_packet_publish_view *storage_view = &storage.storage_view; /* * Fixed Header * byte 1: * bits 4-7: MQTT Control Packet Type * bit 3: DUP flag * bit 1-2: QoS level * bit 0: RETAIN * byte 2-x: Remaining Length as Variable Byte Integer (1-4 bytes) */ uint8_t first_byte = decoder->packet_first_byte; if ((first_byte & PUBLISH_PACKET_FIXED_HEADER_DUPLICATE_FLAG) != 0) { storage_view->duplicate = true; } if ((first_byte & PUBLISH_PACKET_FIXED_HEADER_RETAIN_FLAG) != 0) { storage_view->retain = true; } storage_view->qos = (enum aws_mqtt5_qos)((first_byte >> 1) & PUBLISH_PACKET_FIXED_HEADER_QOS_FLAG); struct aws_byte_cursor packet_cursor = decoder->packet_cursor; uint32_t remaining_length = decoder->remaining_length; if (remaining_length != (uint32_t)packet_cursor.len) { goto done; } /* * Topic Name * Packet Identifier (only present for > QoS 0) * Properties * - Property Length * - Properties * Payload */ AWS_MQTT5_DECODE_LENGTH_PREFIXED_CURSOR(&packet_cursor, &storage_view->topic, done); if (storage_view->qos > 0) { AWS_MQTT5_DECODE_U16(&packet_cursor, &storage_view->packet_id, done); } uint32_t property_length = 0; AWS_MQTT5_DECODE_VLI(&packet_cursor, &property_length, done); if (property_length > (uint32_t)packet_cursor.len) { goto done; } struct aws_byte_cursor properties_cursor = aws_byte_cursor_advance(&packet_cursor, property_length); while (properties_cursor.len > 0) { if (s_read_publish_property(&storage, &properties_cursor)) { goto done; } } storage_view->subscription_identifier_count = aws_array_list_length(&storage.subscription_identifiers); storage_view->subscription_identifiers = storage.subscription_identifiers.data; storage_view->user_property_count = aws_mqtt5_user_property_set_size(&storage.user_properties); storage_view->user_properties = storage.user_properties.properties.data; storage_view->payload = packet_cursor; if (storage_view->topic_alias != NULL) { if (decoder->topic_alias_resolver == NULL) { AWS_LOGF_ERROR( AWS_LS_MQTT5_CLIENT, "id=%p: PUBLISH packet contained topic alias when not allowed", decoder->options.callback_user_data); goto done; } uint16_t topic_alias_id = *storage_view->topic_alias; if (topic_alias_id == 0) { AWS_LOGF_ERROR( AWS_LS_MQTT5_CLIENT, "id=%p: PUBLISH packet contained illegal topic alias", decoder->options.callback_user_data); goto done; } if (storage_view->topic.len > 0) { if (aws_mqtt5_inbound_topic_alias_resolver_register_alias( decoder->topic_alias_resolver, topic_alias_id, storage_view->topic)) { AWS_LOGF_ERROR( AWS_LS_MQTT5_CLIENT, "id=%p: unable to register topic alias", decoder->options.callback_user_data); goto done; } } else { if (aws_mqtt5_inbound_topic_alias_resolver_resolve_alias( decoder->topic_alias_resolver, topic_alias_id, &storage_view->topic)) { AWS_LOGF_ERROR( AWS_LS_MQTT5_CLIENT, "id=%p: PUBLISH packet contained unknown topic alias", decoder->options.callback_user_data); goto done; } } } result = AWS_OP_SUCCESS; done: if (result == AWS_OP_SUCCESS) { if (decoder->options.on_packet_received != NULL) { result = (*decoder->options.on_packet_received)( AWS_MQTT5_PT_PUBLISH, &storage.storage_view, decoder->options.callback_user_data); } } else { AWS_LOGF_ERROR(AWS_LS_MQTT5_CLIENT, "id=%p: PUBLISH decode failure", decoder->options.callback_user_data); aws_raise_error(AWS_ERROR_MQTT5_DECODE_PROTOCOL_ERROR); } aws_mqtt5_packet_publish_storage_clean_up(&storage); return result; } /* decode function for all PUBACK properties */ static int s_read_puback_property( struct aws_mqtt5_packet_puback_storage *storage, struct aws_byte_cursor *packet_cursor) { int result = AWS_OP_ERR; uint8_t property_type = 0; AWS_MQTT5_DECODE_U8(packet_cursor, &property_type, done); struct aws_mqtt5_packet_puback_view *storage_view = &storage->storage_view; switch (property_type) { case AWS_MQTT5_PROPERTY_TYPE_REASON_STRING: AWS_MQTT5_DECODE_LENGTH_PREFIXED_CURSOR_OPTIONAL( packet_cursor, &storage->reason_string, &storage_view->reason_string, done); break; case AWS_MQTT5_PROPERTY_TYPE_USER_PROPERTY: if (aws_mqtt5_decode_user_property(packet_cursor, &storage->user_properties)) { goto done; } break; default: goto done; } result = AWS_OP_SUCCESS; done: if (result != AWS_OP_SUCCESS) { AWS_LOGF_ERROR(AWS_LS_MQTT5_CLIENT, "Read PUBACK property decode failure"); aws_raise_error(AWS_ERROR_MQTT5_DECODE_PROTOCOL_ERROR); } return result; } /* decodes a PUBACK packet whose data must be in the scratch buffer */ static int s_aws_mqtt5_decoder_decode_puback(struct aws_mqtt5_decoder *decoder) { struct aws_mqtt5_packet_puback_storage storage; if (aws_mqtt5_packet_puback_storage_init_from_external_storage(&storage, decoder->allocator)) { return AWS_OP_ERR; } int result = AWS_OP_ERR; uint8_t first_byte = decoder->packet_first_byte; /* PUBACK flags must be zero by protocol */ if ((first_byte & 0x0F) != 0) { goto done; } struct aws_byte_cursor packet_cursor = decoder->packet_cursor; uint32_t remaining_length = decoder->remaining_length; if (remaining_length != (uint32_t)packet_cursor.len) { goto done; } struct aws_mqtt5_packet_puback_view *storage_view = &storage.storage_view; AWS_MQTT5_DECODE_U16(&packet_cursor, &storage_view->packet_id, done); /* Packet can end immediately following packet id with default success reason code */ uint8_t reason_code = 0; if (packet_cursor.len > 0) { AWS_MQTT5_DECODE_U8(&packet_cursor, &reason_code, done); /* Packet can end immediately following reason code */ if (packet_cursor.len > 0) { uint32_t property_length = 0; AWS_MQTT5_DECODE_VLI(&packet_cursor, &property_length, done); if (property_length != (uint32_t)packet_cursor.len) { goto done; } while (packet_cursor.len > 0) { if (s_read_puback_property(&storage, &packet_cursor)) { goto done; } } } } storage_view->user_property_count = aws_mqtt5_user_property_set_size(&storage.user_properties); storage_view->user_properties = storage.user_properties.properties.data; storage_view->reason_code = reason_code; result = AWS_OP_SUCCESS; done: if (result == AWS_OP_SUCCESS) { if (decoder->options.on_packet_received != NULL) { result = (*decoder->options.on_packet_received)( AWS_MQTT5_PT_PUBACK, &storage.storage_view, decoder->options.callback_user_data); } } else { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "(%p) aws_mqtt5_decoder - PUBACK decode failure", decoder->options.callback_user_data); aws_raise_error(AWS_ERROR_MQTT5_DECODE_PROTOCOL_ERROR); } aws_mqtt5_packet_puback_storage_clean_up(&storage); return result; } /* decode function for all SUBACK properties */ static int s_read_suback_property( struct aws_mqtt5_packet_suback_storage *storage, struct aws_byte_cursor *packet_cursor) { int result = AWS_OP_ERR; uint8_t property_type = 0; AWS_MQTT5_DECODE_U8(packet_cursor, &property_type, done); struct aws_mqtt5_packet_suback_view *storage_view = &storage->storage_view; switch (property_type) { case AWS_MQTT5_PROPERTY_TYPE_REASON_STRING: AWS_MQTT5_DECODE_LENGTH_PREFIXED_CURSOR_OPTIONAL( packet_cursor, &storage->reason_string, &storage_view->reason_string, done); break; case AWS_MQTT5_PROPERTY_TYPE_USER_PROPERTY: if (aws_mqtt5_decode_user_property(packet_cursor, &storage->user_properties)) { goto done; } break; default: goto done; } result = AWS_OP_SUCCESS; done: if (result != AWS_OP_SUCCESS) { AWS_LOGF_ERROR(AWS_LS_MQTT5_CLIENT, "Read SUBACK property decode failure"); aws_raise_error(AWS_ERROR_MQTT5_DECODE_PROTOCOL_ERROR); } return result; } /* decodes a SUBACK packet whose data must be in the scratch buffer */ static int s_aws_mqtt5_decoder_decode_suback(struct aws_mqtt5_decoder *decoder) { struct aws_mqtt5_packet_suback_storage storage; if (aws_mqtt5_packet_suback_storage_init_from_external_storage(&storage, decoder->allocator)) { return AWS_OP_ERR; } int result = AWS_OP_ERR; struct aws_mqtt5_packet_suback_view *storage_view = &storage.storage_view; struct aws_byte_cursor packet_cursor = decoder->packet_cursor; AWS_MQTT5_DECODE_U16(&packet_cursor, &storage_view->packet_id, done); uint32_t property_length = 0; AWS_MQTT5_DECODE_VLI(&packet_cursor, &property_length, done); struct aws_byte_cursor properties_cursor = aws_byte_cursor_advance(&packet_cursor, property_length); while (properties_cursor.len > 0) { if (s_read_suback_property(&storage, &properties_cursor)) { goto done; } } aws_array_list_init_dynamic( &storage.reason_codes, decoder->allocator, packet_cursor.len, sizeof(enum aws_mqtt5_suback_reason_code)); while (packet_cursor.len > 0) { uint8_t reason_code; AWS_MQTT5_DECODE_U8(&packet_cursor, &reason_code, done); enum aws_mqtt5_suback_reason_code reason_code_enum = reason_code; aws_array_list_push_back(&storage.reason_codes, &reason_code_enum); } storage_view->reason_code_count = aws_array_list_length(&storage.reason_codes); storage_view->reason_codes = storage.reason_codes.data; storage_view->user_property_count = aws_mqtt5_user_property_set_size(&storage.user_properties); storage_view->user_properties = storage.user_properties.properties.data; result = AWS_OP_SUCCESS; done: if (result == AWS_OP_SUCCESS) { if (decoder->options.on_packet_received != NULL) { result = (*decoder->options.on_packet_received)( AWS_MQTT5_PT_SUBACK, &storage.storage_view, decoder->options.callback_user_data); } } else { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "(%p) aws_mqtt5_decoder - SUBACK decode failure", decoder->options.callback_user_data); aws_raise_error(AWS_ERROR_MQTT5_DECODE_PROTOCOL_ERROR); } aws_mqtt5_packet_suback_storage_clean_up(&storage); return result; } /* decode function for all UNSUBACK properties */ static int s_read_unsuback_property( struct aws_mqtt5_packet_unsuback_storage *storage, struct aws_byte_cursor *packet_cursor) { int result = AWS_OP_ERR; uint8_t property_type = 0; AWS_MQTT5_DECODE_U8(packet_cursor, &property_type, done); struct aws_mqtt5_packet_unsuback_view *storage_view = &storage->storage_view; switch (property_type) { case AWS_MQTT5_PROPERTY_TYPE_REASON_STRING: AWS_MQTT5_DECODE_LENGTH_PREFIXED_CURSOR_OPTIONAL( packet_cursor, &storage->reason_string, &storage_view->reason_string, done); break; case AWS_MQTT5_PROPERTY_TYPE_USER_PROPERTY: if (aws_mqtt5_decode_user_property(packet_cursor, &storage->user_properties)) { goto done; } break; default: goto done; } result = AWS_OP_SUCCESS; done: if (result != AWS_OP_SUCCESS) { AWS_LOGF_ERROR(AWS_LS_MQTT5_CLIENT, "Read UNSUBACK property decode failure"); aws_raise_error(AWS_ERROR_MQTT5_DECODE_PROTOCOL_ERROR); } return result; } /* decodes an UNSUBACK packet whose data must be in the scratch buffer */ static int s_aws_mqtt5_decoder_decode_unsuback(struct aws_mqtt5_decoder *decoder) { struct aws_mqtt5_packet_unsuback_storage storage; /* * Fixed Header * byte 1: MQTT5 Control Packet - Reserved 0 * byte 2 - x: VLI Remaining Length * * Variable Header * byte 1-2: Packet Identifier * Byte 3 - x: VLI Property Length * * Properties * byte 1: Idenfier * bytes 2 - x: Property content * * Payload * 1 byte per reason code in order of unsub requests */ if (aws_mqtt5_packet_unsuback_storage_init_from_external_storage(&storage, decoder->allocator)) { return AWS_OP_ERR; } int result = AWS_OP_ERR; struct aws_byte_cursor packet_cursor = decoder->packet_cursor; struct aws_mqtt5_packet_unsuback_view *storage_view = &storage.storage_view; AWS_MQTT5_DECODE_U16(&packet_cursor, &storage_view->packet_id, done); uint32_t property_length = 0; AWS_MQTT5_DECODE_VLI(&packet_cursor, &property_length, done); struct aws_byte_cursor properties_cursor = aws_byte_cursor_advance(&packet_cursor, property_length); while (properties_cursor.len > 0) { if (s_read_unsuback_property(&storage, &properties_cursor)) { goto done; } } aws_array_list_init_dynamic( &storage.reason_codes, decoder->allocator, packet_cursor.len, sizeof(enum aws_mqtt5_unsuback_reason_code)); while (packet_cursor.len > 0) { uint8_t reason_code; AWS_MQTT5_DECODE_U8(&packet_cursor, &reason_code, done); enum aws_mqtt5_unsuback_reason_code reason_code_enum = reason_code; aws_array_list_push_back(&storage.reason_codes, &reason_code_enum); } storage_view->reason_code_count = aws_array_list_length(&storage.reason_codes); storage_view->reason_codes = storage.reason_codes.data; storage_view->user_property_count = aws_mqtt5_user_property_set_size(&storage.user_properties); storage_view->user_properties = storage.user_properties.properties.data; result = AWS_OP_SUCCESS; done: if (result == AWS_OP_SUCCESS) { if (decoder->options.on_packet_received != NULL) { result = (*decoder->options.on_packet_received)( AWS_MQTT5_PT_UNSUBACK, &storage.storage_view, decoder->options.callback_user_data); } } else { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "(%p) aws_mqtt5_decoder - UNSUBACK decode failure", decoder->options.callback_user_data); aws_raise_error(AWS_ERROR_MQTT5_DECODE_PROTOCOL_ERROR); } aws_mqtt5_packet_unsuback_storage_clean_up(&storage); return result; } /* decode function for all DISCONNECT properties */ static int s_read_disconnect_property( struct aws_mqtt5_packet_disconnect_storage *storage, struct aws_byte_cursor *packet_cursor) { int result = AWS_OP_ERR; uint8_t property_type = 0; AWS_MQTT5_DECODE_U8(packet_cursor, &property_type, done); struct aws_mqtt5_packet_disconnect_view *storage_view = &storage->storage_view; switch (property_type) { case AWS_MQTT5_PROPERTY_TYPE_SESSION_EXPIRY_INTERVAL: AWS_MQTT5_DECODE_U32_OPTIONAL( packet_cursor, &storage->session_expiry_interval_seconds, &storage_view->session_expiry_interval_seconds, done); break; case AWS_MQTT5_PROPERTY_TYPE_SERVER_REFERENCE: AWS_MQTT5_DECODE_LENGTH_PREFIXED_CURSOR_OPTIONAL( packet_cursor, &storage->server_reference, &storage_view->server_reference, done); break; case AWS_MQTT5_PROPERTY_TYPE_REASON_STRING: AWS_MQTT5_DECODE_LENGTH_PREFIXED_CURSOR_OPTIONAL( packet_cursor, &storage->reason_string, &storage_view->reason_string, done); break; case AWS_MQTT5_PROPERTY_TYPE_USER_PROPERTY: if (aws_mqtt5_decode_user_property(packet_cursor, &storage->user_properties)) { goto done; } break; default: goto done; } result = AWS_OP_SUCCESS; done: if (result == AWS_OP_ERR) { AWS_LOGF_ERROR(AWS_LS_MQTT5_CLIENT, "Read DISCONNECT property decode failure"); aws_raise_error(AWS_ERROR_MQTT5_DECODE_PROTOCOL_ERROR); } return result; } /* decodes a DISCONNECT packet whose data must be in the scratch buffer */ static int s_aws_mqtt5_decoder_decode_disconnect(struct aws_mqtt5_decoder *decoder) { struct aws_mqtt5_packet_disconnect_storage storage; if (aws_mqtt5_packet_disconnect_storage_init_from_external_storage(&storage, decoder->allocator)) { return AWS_OP_ERR; } int result = AWS_OP_ERR; uint8_t first_byte = decoder->packet_first_byte; /* DISCONNECT flags must be zero by protocol */ if ((first_byte & 0x0F) != 0) { goto done; } struct aws_byte_cursor packet_cursor = decoder->packet_cursor; uint32_t remaining_length = decoder->remaining_length; if (remaining_length != (uint32_t)packet_cursor.len) { goto done; } struct aws_mqtt5_packet_disconnect_view *storage_view = &storage.storage_view; if (remaining_length > 0) { uint8_t reason_code = 0; AWS_MQTT5_DECODE_U8(&packet_cursor, &reason_code, done); storage_view->reason_code = reason_code; if (packet_cursor.len == 0) { result = AWS_OP_SUCCESS; goto done; } uint32_t property_length = 0; AWS_MQTT5_DECODE_VLI(&packet_cursor, &property_length, done); if (property_length != (uint32_t)packet_cursor.len) { goto done; } while (packet_cursor.len > 0) { if (s_read_disconnect_property(&storage, &packet_cursor)) { goto done; } } } storage_view->user_property_count = aws_mqtt5_user_property_set_size(&storage.user_properties); storage_view->user_properties = storage.user_properties.properties.data; result = AWS_OP_SUCCESS; done: if (result == AWS_OP_SUCCESS) { if (decoder->options.on_packet_received != NULL) { result = (*decoder->options.on_packet_received)( AWS_MQTT5_PT_DISCONNECT, &storage.storage_view, decoder->options.callback_user_data); } } else { AWS_LOGF_ERROR(AWS_LS_MQTT5_CLIENT, "id=%p: DISCONNECT decode failure", decoder->options.callback_user_data); aws_raise_error(AWS_ERROR_MQTT5_DECODE_PROTOCOL_ERROR); } aws_mqtt5_packet_disconnect_storage_clean_up(&storage); return result; } static int s_aws_mqtt5_decoder_decode_pingresp(struct aws_mqtt5_decoder *decoder) { if (decoder->packet_cursor.len != 0) { goto error; } uint8_t expected_first_byte = aws_mqtt5_compute_fixed_header_byte1(AWS_MQTT5_PT_PINGRESP, 0); if (decoder->packet_first_byte != expected_first_byte || decoder->remaining_length != 0) { goto error; } int result = AWS_OP_SUCCESS; if (decoder->options.on_packet_received != NULL) { result = (*decoder->options.on_packet_received)(AWS_MQTT5_PT_PINGRESP, NULL, decoder->options.callback_user_data); } return result; error: AWS_LOGF_ERROR(AWS_LS_MQTT5_CLIENT, "id=%p: PINGRESP decode failure", decoder->options.callback_user_data); return aws_raise_error(AWS_ERROR_MQTT5_DECODE_PROTOCOL_ERROR); } static int s_aws_mqtt5_decoder_decode_packet(struct aws_mqtt5_decoder *decoder) { enum aws_mqtt5_packet_type packet_type = (enum aws_mqtt5_packet_type)(decoder->packet_first_byte >> 4); aws_mqtt5_decoding_fn *decoder_fn = decoder->options.decoder_table->decoders_by_packet_type[packet_type]; if (decoder_fn == NULL) { AWS_LOGF_ERROR(AWS_LS_MQTT5_CLIENT, "Decoder decode packet function missing for enum: %d", packet_type); return aws_raise_error(AWS_ERROR_MQTT5_DECODE_PROTOCOL_ERROR); } return (*decoder_fn)(decoder); } /* * (Streaming) Given a packet type and a variable length integer specifying the packet length, this state either * (1) decodes directly from the cursor if possible * (2) reads the packet into the scratch buffer and then decodes it once it is completely present * */ static enum aws_mqtt5_decode_result_type s_aws_mqtt5_decoder_read_packet_on_data( struct aws_mqtt5_decoder *decoder, struct aws_byte_cursor *data) { /* Are we able to decode directly from the channel message data buffer? */ if (decoder->scratch_space.len == 0 && decoder->remaining_length <= data->len) { /* The cursor contains the entire packet, so decode directly from the backing io message buffer */ decoder->packet_cursor = aws_byte_cursor_advance(data, decoder->remaining_length); } else { /* If the packet is fragmented across multiple io messages, then we buffer it internally */ size_t unread_length = decoder->remaining_length - decoder->scratch_space.len; size_t copy_length = aws_min_size(unread_length, data->len); struct aws_byte_cursor copy_cursor = aws_byte_cursor_advance(data, copy_length); if (aws_byte_buf_append_dynamic(&decoder->scratch_space, ©_cursor)) { return AWS_MQTT5_DRT_ERROR; } if (copy_length < unread_length) { return AWS_MQTT5_DRT_MORE_DATA; } decoder->packet_cursor = aws_byte_cursor_from_buf(&decoder->scratch_space); } if (s_aws_mqtt5_decoder_decode_packet(decoder)) { return AWS_MQTT5_DRT_ERROR; } s_enter_state(decoder, AWS_MQTT5_DS_READ_PACKET_TYPE); return AWS_MQTT5_DRT_SUCCESS; } /* top-level entry function for all new data received from the remote mqtt endpoint */ int aws_mqtt5_decoder_on_data_received(struct aws_mqtt5_decoder *decoder, struct aws_byte_cursor data) { enum aws_mqtt5_decode_result_type result = AWS_MQTT5_DRT_SUCCESS; while (result == AWS_MQTT5_DRT_SUCCESS) { switch (decoder->state) { case AWS_MQTT5_DS_READ_PACKET_TYPE: result = s_aws_mqtt5_decoder_read_packet_type_on_data(decoder, &data); break; case AWS_MQTT5_DS_READ_REMAINING_LENGTH: result = s_aws_mqtt5_decoder_read_remaining_length_on_data(decoder, &data); break; case AWS_MQTT5_DS_READ_PACKET: result = s_aws_mqtt5_decoder_read_packet_on_data(decoder, &data); break; default: result = AWS_MQTT5_DRT_ERROR; break; } } if (result == AWS_MQTT5_DRT_ERROR) { aws_raise_error(AWS_ERROR_MQTT5_DECODE_PROTOCOL_ERROR); decoder->state = AWS_MQTT5_DS_FATAL_ERROR; return AWS_OP_ERR; } return AWS_OP_SUCCESS; } static struct aws_mqtt5_decoder_function_table s_aws_mqtt5_decoder_default_function_table = { .decoders_by_packet_type = { NULL, /* RESERVED = 0 */ NULL, /* CONNECT */ &s_aws_mqtt5_decoder_decode_connack, /* CONNACK */ &s_aws_mqtt5_decoder_decode_publish, /* PUBLISH */ &s_aws_mqtt5_decoder_decode_puback, /* PUBACK */ NULL, /* PUBREC */ NULL, /* PUBREL */ NULL, /* PUBCOMP */ NULL, /* SUBSCRIBE */ &s_aws_mqtt5_decoder_decode_suback, /* SUBACK */ NULL, /* UNSUBSCRIBE */ &s_aws_mqtt5_decoder_decode_unsuback, /* UNSUBACK */ NULL, /* PINGREQ */ &s_aws_mqtt5_decoder_decode_pingresp, /* PINGRESP */ &s_aws_mqtt5_decoder_decode_disconnect, /* DISCONNECT */ NULL /* AUTH */ }, }; const struct aws_mqtt5_decoder_function_table *g_aws_mqtt5_default_decoder_table = &s_aws_mqtt5_decoder_default_function_table; int aws_mqtt5_decoder_init( struct aws_mqtt5_decoder *decoder, struct aws_allocator *allocator, struct aws_mqtt5_decoder_options *options) { AWS_ZERO_STRUCT(*decoder); decoder->options = *options; if (decoder->options.decoder_table == NULL) { decoder->options.decoder_table = g_aws_mqtt5_default_decoder_table; } decoder->allocator = allocator; decoder->state = AWS_MQTT5_DS_READ_PACKET_TYPE; if (aws_byte_buf_init(&decoder->scratch_space, allocator, AWS_MQTT5_DECODER_BUFFER_START_SIZE)) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } void aws_mqtt5_decoder_reset(struct aws_mqtt5_decoder *decoder) { s_reset_decoder_for_new_packet(decoder); decoder->state = AWS_MQTT5_DS_READ_PACKET_TYPE; } void aws_mqtt5_decoder_clean_up(struct aws_mqtt5_decoder *decoder) { aws_byte_buf_clean_up(&decoder->scratch_space); } void aws_mqtt5_decoder_set_inbound_topic_alias_resolver( struct aws_mqtt5_decoder *decoder, struct aws_mqtt5_inbound_topic_alias_resolver *resolver) { decoder->topic_alias_resolver = resolver; } aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/source/v5/mqtt5_encoder.c000066400000000000000000001426271456575232400250000ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #define INITIAL_ENCODING_STEP_COUNT 64 #define SUBSCRIBE_PACKET_FIXED_HEADER_RESERVED_BITS 2 #define UNSUBSCRIBE_PACKET_FIXED_HEADER_RESERVED_BITS 2 int aws_mqtt5_encode_variable_length_integer(struct aws_byte_buf *buf, uint32_t value) { AWS_PRECONDITION(buf); if (value > AWS_MQTT5_MAXIMUM_VARIABLE_LENGTH_INTEGER) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } do { uint8_t encoded_byte = value % 128; value /= 128; if (value) { encoded_byte |= 128; } if (!aws_byte_buf_write_u8(buf, encoded_byte)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } } while (value); return AWS_OP_SUCCESS; } int aws_mqtt5_get_variable_length_encode_size(size_t value, size_t *encode_size) { if (value > AWS_MQTT5_MAXIMUM_VARIABLE_LENGTH_INTEGER) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } if (value < 128) { *encode_size = 1; } else if (value < 16384) { *encode_size = 2; } else if (value < 2097152) { *encode_size = 3; } else { *encode_size = 4; } return AWS_OP_SUCCESS; } /* helper functions that add a single type of encoding step to the list of steps in an encoder */ void aws_mqtt5_encoder_push_step_u8(struct aws_mqtt5_encoder *encoder, uint8_t value) { struct aws_mqtt5_encoding_step step; AWS_ZERO_STRUCT(step); step.type = AWS_MQTT5_EST_U8; step.value.value_u8 = value; aws_array_list_push_back(&encoder->encoding_steps, &step); } void aws_mqtt5_encoder_push_step_u16(struct aws_mqtt5_encoder *encoder, uint16_t value) { struct aws_mqtt5_encoding_step step; AWS_ZERO_STRUCT(step); step.type = AWS_MQTT5_EST_U16; step.value.value_u16 = value; aws_array_list_push_back(&encoder->encoding_steps, &step); } void aws_mqtt5_encoder_push_step_u32(struct aws_mqtt5_encoder *encoder, uint32_t value) { struct aws_mqtt5_encoding_step step; AWS_ZERO_STRUCT(step); step.type = AWS_MQTT5_EST_U32; step.value.value_u32 = value; aws_array_list_push_back(&encoder->encoding_steps, &step); } int aws_mqtt5_encoder_push_step_vli(struct aws_mqtt5_encoder *encoder, uint32_t value) { if (value > AWS_MQTT5_MAXIMUM_VARIABLE_LENGTH_INTEGER) { return aws_raise_error(AWS_ERROR_MQTT5_ENCODE_FAILURE); } struct aws_mqtt5_encoding_step step; AWS_ZERO_STRUCT(step); step.type = AWS_MQTT5_EST_VLI; step.value.value_u32 = value; aws_array_list_push_back(&encoder->encoding_steps, &step); return AWS_OP_SUCCESS; } void aws_mqtt5_encoder_push_step_cursor(struct aws_mqtt5_encoder *encoder, struct aws_byte_cursor value) { struct aws_mqtt5_encoding_step step; AWS_ZERO_STRUCT(step); step.type = AWS_MQTT5_EST_CURSOR; step.value.value_cursor = value; aws_array_list_push_back(&encoder->encoding_steps, &step); } /* * All size calculations are done with size_t. We assume that view validation will catch and fail all packets * that violate length constraints either from the MQTT5 spec or additional constraints that we impose on packets * to ensure that the size calculations do not need to perform checked arithmetic. The only place where we need * to use checked arithmetic is a PUBLISH packet when combining the payload size and "sizeof everything else" * * The additional beyond-spec constraints we apply to view validation ensure our results actually fit in 32 bits. */ size_t aws_mqtt5_compute_user_property_encode_length( const struct aws_mqtt5_user_property *properties, size_t user_property_count) { /* * for each user property, in addition to the raw name-value bytes, we also have 5 bytes of prefix required: * 1 byte for the property type * 2 bytes for the name length * 2 bytes for the value length */ size_t length = 5 * user_property_count; for (size_t i = 0; i < user_property_count; ++i) { const struct aws_mqtt5_user_property *property = &properties[i]; length += property->name.len; length += property->value.len; } return length; } void aws_mqtt5_add_user_property_encoding_steps( struct aws_mqtt5_encoder *encoder, const struct aws_mqtt5_user_property *user_properties, size_t user_property_count) { for (size_t i = 0; i < user_property_count; ++i) { const struct aws_mqtt5_user_property *property = &user_properties[i]; /* https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901054 */ ADD_ENCODE_STEP_U8(encoder, AWS_MQTT5_PROPERTY_TYPE_USER_PROPERTY); ADD_ENCODE_STEP_U16(encoder, (uint16_t)property->name.len); ADD_ENCODE_STEP_CURSOR(encoder, property->name); ADD_ENCODE_STEP_U16(encoder, (uint16_t)property->value.len); ADD_ENCODE_STEP_CURSOR(encoder, property->value); } } static int s_aws_mqtt5_encoder_begin_pingreq(struct aws_mqtt5_encoder *encoder, const void *view) { (void)view; AWS_LOGF_DEBUG( AWS_LS_MQTT5_CLIENT, "id=%p: setting up encode for a PINGREQ packet", (void *)encoder->config.client); /* A ping is just a fixed header with a 0-valued remaining length which we encode as a 0 u8 rather than a 0 vli */ ADD_ENCODE_STEP_U8(encoder, aws_mqtt5_compute_fixed_header_byte1(AWS_MQTT5_PT_PINGREQ, 0)); ADD_ENCODE_STEP_U8(encoder, 0); return AWS_OP_SUCCESS; } static int s_compute_disconnect_variable_length_fields( const struct aws_mqtt5_packet_disconnect_view *disconnect_view, size_t *total_remaining_length, size_t *property_length) { size_t local_property_length = aws_mqtt5_compute_user_property_encode_length( disconnect_view->user_properties, disconnect_view->user_property_count); ADD_OPTIONAL_U32_PROPERTY_LENGTH(disconnect_view->session_expiry_interval_seconds, local_property_length); ADD_OPTIONAL_CURSOR_PROPERTY_LENGTH(disconnect_view->server_reference, local_property_length); ADD_OPTIONAL_CURSOR_PROPERTY_LENGTH(disconnect_view->reason_string, local_property_length); *property_length = local_property_length; size_t property_length_encoding_length = 0; if (aws_mqtt5_get_variable_length_encode_size(local_property_length, &property_length_encoding_length)) { return AWS_OP_ERR; } /* reason code is the only other thing to worry about */ *total_remaining_length = 1 + *property_length + property_length_encoding_length; return AWS_OP_SUCCESS; } static int s_aws_mqtt5_encoder_begin_disconnect(struct aws_mqtt5_encoder *encoder, const void *view) { const struct aws_mqtt5_packet_disconnect_view *disconnect_view = view; size_t total_remaining_length = 0; size_t property_length = 0; if (s_compute_disconnect_variable_length_fields(disconnect_view, &total_remaining_length, &property_length)) { int error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_MQTT5_CLIENT, "id=%p: failed to compute variable length values for DISCONNECT packet with error " "%d(%s)", (void *)encoder->config.client, error_code, aws_error_debug_str(error_code)); return AWS_OP_ERR; } uint32_t total_remaining_length_u32 = (uint32_t)total_remaining_length; uint32_t property_length_u32 = (uint32_t)property_length; AWS_LOGF_DEBUG( AWS_LS_MQTT5_CLIENT, "id=%p: setting up encode for a DISCONNECT packet with remaining length %" PRIu32, (void *)encoder->config.client, total_remaining_length_u32); ADD_ENCODE_STEP_U8(encoder, aws_mqtt5_compute_fixed_header_byte1(AWS_MQTT5_PT_DISCONNECT, 0)); ADD_ENCODE_STEP_VLI(encoder, total_remaining_length_u32); ADD_ENCODE_STEP_U8(encoder, (uint8_t)disconnect_view->reason_code); ADD_ENCODE_STEP_VLI(encoder, property_length_u32); if (property_length > 0) { ADD_ENCODE_STEP_OPTIONAL_U32_PROPERTY( encoder, AWS_MQTT5_PROPERTY_TYPE_SESSION_EXPIRY_INTERVAL, disconnect_view->session_expiry_interval_seconds); ADD_ENCODE_STEP_OPTIONAL_CURSOR_PROPERTY( encoder, AWS_MQTT5_PROPERTY_TYPE_REASON_STRING, disconnect_view->reason_string); ADD_ENCODE_STEP_OPTIONAL_CURSOR_PROPERTY( encoder, AWS_MQTT5_PROPERTY_TYPE_SERVER_REFERENCE, disconnect_view->server_reference); aws_mqtt5_add_user_property_encoding_steps( encoder, disconnect_view->user_properties, disconnect_view->user_property_count); } return AWS_OP_SUCCESS; } static int s_compute_connect_variable_length_fields( const struct aws_mqtt5_packet_connect_view *connect_view, size_t *total_remaining_length, size_t *connect_property_length, size_t *will_property_length) { size_t connect_property_section_length = aws_mqtt5_compute_user_property_encode_length(connect_view->user_properties, connect_view->user_property_count); ADD_OPTIONAL_U32_PROPERTY_LENGTH(connect_view->session_expiry_interval_seconds, connect_property_section_length); ADD_OPTIONAL_U16_PROPERTY_LENGTH(connect_view->receive_maximum, connect_property_section_length); ADD_OPTIONAL_U32_PROPERTY_LENGTH(connect_view->maximum_packet_size_bytes, connect_property_section_length); ADD_OPTIONAL_U16_PROPERTY_LENGTH(connect_view->topic_alias_maximum, connect_property_section_length); ADD_OPTIONAL_U8_PROPERTY_LENGTH(connect_view->request_response_information, connect_property_section_length); ADD_OPTIONAL_U8_PROPERTY_LENGTH(connect_view->request_problem_information, connect_property_section_length); ADD_OPTIONAL_CURSOR_PROPERTY_LENGTH(connect_view->authentication_method, connect_property_section_length); ADD_OPTIONAL_CURSOR_PROPERTY_LENGTH(connect_view->authentication_data, connect_property_section_length); *connect_property_length = (uint32_t)connect_property_section_length; /* variable header length = * 10 bytes (6 for mqtt string, 1 for protocol version, 1 for flags, 2 for keep alive) * + # bytes(variable_length_encoding(connect_property_section_length)) * + connect_property_section_length */ size_t variable_header_length = 0; if (aws_mqtt5_get_variable_length_encode_size(connect_property_section_length, &variable_header_length)) { return AWS_OP_ERR; } variable_header_length += 10 + connect_property_section_length; size_t payload_length = 2 + connect_view->client_id.len; *will_property_length = 0; if (connect_view->will != NULL) { const struct aws_mqtt5_packet_publish_view *publish_view = connect_view->will; *will_property_length = aws_mqtt5_compute_user_property_encode_length( publish_view->user_properties, publish_view->user_property_count); ADD_OPTIONAL_U32_PROPERTY_LENGTH(connect_view->will_delay_interval_seconds, *will_property_length); ADD_OPTIONAL_U8_PROPERTY_LENGTH(publish_view->payload_format, *will_property_length); ADD_OPTIONAL_U32_PROPERTY_LENGTH(publish_view->message_expiry_interval_seconds, *will_property_length); ADD_OPTIONAL_CURSOR_PROPERTY_LENGTH(publish_view->content_type, *will_property_length); ADD_OPTIONAL_CURSOR_PROPERTY_LENGTH(publish_view->response_topic, *will_property_length); ADD_OPTIONAL_CURSOR_PROPERTY_LENGTH(publish_view->correlation_data, *will_property_length); size_t will_properties_length_encode_size = 0; if (aws_mqtt5_get_variable_length_encode_size( (uint32_t)*will_property_length, &will_properties_length_encode_size)) { return AWS_OP_ERR; } payload_length += *will_property_length; payload_length += will_properties_length_encode_size; payload_length += 2 + publish_view->topic.len; payload_length += 2 + publish_view->payload.len; } /* Can't use the optional property macros because these don't have a leading property type byte */ if (connect_view->username != NULL) { payload_length += connect_view->username->len + 2; } if (connect_view->password != NULL) { payload_length += connect_view->password->len + 2; } *total_remaining_length = payload_length + variable_header_length; return AWS_OP_SUCCESS; } static uint8_t s_aws_mqtt5_connect_compute_connect_flags(const struct aws_mqtt5_packet_connect_view *connect_view) { uint8_t flags = 0; if (connect_view->clean_start) { flags |= 1 << 1; } const struct aws_mqtt5_packet_publish_view *will = connect_view->will; if (will != NULL) { flags |= 1 << 2; flags |= ((uint8_t)will->qos) << 3; if (will->retain) { flags |= 1 << 5; } } if (connect_view->password != NULL) { flags |= 1 << 6; } if (connect_view->username != NULL) { flags |= 1 << 7; } return flags; } static int s_aws_mqtt5_encoder_begin_connect(struct aws_mqtt5_encoder *encoder, const void *view) { const struct aws_mqtt5_packet_connect_view *connect_view = view; const struct aws_mqtt5_packet_publish_view *will = connect_view->will; size_t total_remaining_length = 0; size_t connect_property_length = 0; size_t will_property_length = 0; if (s_compute_connect_variable_length_fields( connect_view, &total_remaining_length, &connect_property_length, &will_property_length)) { int error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_MQTT5_CLIENT, "id=%p: failed to compute variable length values for CONNECT packet with error %d(%s)", (void *)encoder->config.client, error_code, aws_error_debug_str(error_code)); return AWS_OP_ERR; } AWS_LOGF_DEBUG( AWS_LS_MQTT5_CLIENT, "id=%p: setting up encode for a CONNECT packet with remaining length %zu", (void *)encoder->config.client, total_remaining_length); uint32_t total_remaining_length_u32 = (uint32_t)total_remaining_length; uint32_t connect_property_length_u32 = (uint32_t)connect_property_length; uint32_t will_property_length_u32 = (uint32_t)will_property_length; ADD_ENCODE_STEP_U8(encoder, aws_mqtt5_compute_fixed_header_byte1(AWS_MQTT5_PT_CONNECT, 0)); ADD_ENCODE_STEP_VLI(encoder, total_remaining_length_u32); ADD_ENCODE_STEP_CURSOR(encoder, g_aws_mqtt5_connect_protocol_cursor); ADD_ENCODE_STEP_U8(encoder, s_aws_mqtt5_connect_compute_connect_flags(connect_view)); ADD_ENCODE_STEP_U16(encoder, connect_view->keep_alive_interval_seconds); ADD_ENCODE_STEP_VLI(encoder, connect_property_length_u32); ADD_ENCODE_STEP_OPTIONAL_U32_PROPERTY( encoder, AWS_MQTT5_PROPERTY_TYPE_SESSION_EXPIRY_INTERVAL, connect_view->session_expiry_interval_seconds); ADD_ENCODE_STEP_OPTIONAL_U16_PROPERTY( encoder, AWS_MQTT5_PROPERTY_TYPE_RECEIVE_MAXIMUM, connect_view->receive_maximum); ADD_ENCODE_STEP_OPTIONAL_U32_PROPERTY( encoder, AWS_MQTT5_PROPERTY_TYPE_MAXIMUM_PACKET_SIZE, connect_view->maximum_packet_size_bytes); ADD_ENCODE_STEP_OPTIONAL_U16_PROPERTY( encoder, AWS_MQTT5_PROPERTY_TYPE_TOPIC_ALIAS_MAXIMUM, connect_view->topic_alias_maximum); ADD_ENCODE_STEP_OPTIONAL_U8_PROPERTY( encoder, AWS_MQTT5_PROPERTY_TYPE_REQUEST_RESPONSE_INFORMATION, connect_view->request_response_information); ADD_ENCODE_STEP_OPTIONAL_U8_PROPERTY( encoder, AWS_MQTT5_PROPERTY_TYPE_REQUEST_PROBLEM_INFORMATION, connect_view->request_problem_information); ADD_ENCODE_STEP_OPTIONAL_CURSOR_PROPERTY( encoder, AWS_MQTT5_PROPERTY_TYPE_AUTHENTICATION_METHOD, connect_view->authentication_method); ADD_ENCODE_STEP_OPTIONAL_CURSOR_PROPERTY( encoder, AWS_MQTT5_PROPERTY_TYPE_AUTHENTICATION_DATA, connect_view->authentication_data); aws_mqtt5_add_user_property_encoding_steps( encoder, connect_view->user_properties, connect_view->user_property_count); ADD_ENCODE_STEP_LENGTH_PREFIXED_CURSOR(encoder, connect_view->client_id); if (will != NULL) { ADD_ENCODE_STEP_VLI(encoder, will_property_length_u32); ADD_ENCODE_STEP_OPTIONAL_U32_PROPERTY( encoder, AWS_MQTT5_PROPERTY_TYPE_WILL_DELAY_INTERVAL, connect_view->will_delay_interval_seconds); ADD_ENCODE_STEP_OPTIONAL_U8_PROPERTY( encoder, AWS_MQTT5_PROPERTY_TYPE_PAYLOAD_FORMAT_INDICATOR, will->payload_format); ADD_ENCODE_STEP_OPTIONAL_U32_PROPERTY( encoder, AWS_MQTT5_PROPERTY_TYPE_MESSAGE_EXPIRY_INTERVAL, will->message_expiry_interval_seconds); ADD_ENCODE_STEP_OPTIONAL_CURSOR_PROPERTY(encoder, AWS_MQTT5_PROPERTY_TYPE_CONTENT_TYPE, will->content_type); ADD_ENCODE_STEP_OPTIONAL_CURSOR_PROPERTY(encoder, AWS_MQTT5_PROPERTY_TYPE_RESPONSE_TOPIC, will->response_topic); ADD_ENCODE_STEP_OPTIONAL_CURSOR_PROPERTY( encoder, AWS_MQTT5_PROPERTY_TYPE_CORRELATION_DATA, will->correlation_data); aws_mqtt5_add_user_property_encoding_steps(encoder, will->user_properties, will->user_property_count); ADD_ENCODE_STEP_LENGTH_PREFIXED_CURSOR(encoder, will->topic); ADD_ENCODE_STEP_U16(encoder, (uint16_t)will->payload.len); ADD_ENCODE_STEP_CURSOR(encoder, will->payload); } ADD_ENCODE_STEP_OPTIONAL_LENGTH_PREFIXED_CURSOR(encoder, connect_view->username); ADD_ENCODE_STEP_OPTIONAL_LENGTH_PREFIXED_CURSOR(encoder, connect_view->password); return AWS_OP_SUCCESS; } static uint8_t s_aws_mqtt5_subscribe_compute_subscription_flags( const struct aws_mqtt5_subscription_view *subscription_view) { uint8_t flags = (uint8_t)subscription_view->qos; if (subscription_view->no_local) { flags |= 1 << 2; } if (subscription_view->retain_as_published) { flags |= 1 << 3; } flags |= ((uint8_t)subscription_view->retain_handling_type) << 4; return flags; } static void aws_mqtt5_add_subscribe_topic_filter_encoding_steps( struct aws_mqtt5_encoder *encoder, const struct aws_mqtt5_subscription_view *subscriptions, size_t subscription_count) { /* https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901169 */ for (size_t i = 0; i < subscription_count; ++i) { const struct aws_mqtt5_subscription_view *subscription = &subscriptions[i]; ADD_ENCODE_STEP_LENGTH_PREFIXED_CURSOR(encoder, subscription->topic_filter); ADD_ENCODE_STEP_U8(encoder, s_aws_mqtt5_subscribe_compute_subscription_flags(subscription)); } } static void aws_mqtt5_add_unsubscribe_topic_filter_encoding_steps( struct aws_mqtt5_encoder *encoder, const struct aws_byte_cursor *topics, size_t unsubscription_count) { /* https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901185 */ for (size_t i = 0; i < unsubscription_count; ++i) { const struct aws_byte_cursor topic_filter = topics[i]; ADD_ENCODE_STEP_LENGTH_PREFIXED_CURSOR(encoder, topic_filter); } } static int s_compute_subscribe_variable_length_fields( const struct aws_mqtt5_packet_subscribe_view *subscribe_view, size_t *total_remaining_length, size_t *subscribe_properties_length) { size_t subscribe_variable_header_property_length = aws_mqtt5_compute_user_property_encode_length( subscribe_view->user_properties, subscribe_view->user_property_count); /* * Add the length of 1 byte for the identifier of a Subscription Identifier property * and the VLI of the subscription_identifier itself */ if (subscribe_view->subscription_identifier != 0) { size_t subscription_identifier_length = 0; aws_mqtt5_get_variable_length_encode_size( *subscribe_view->subscription_identifier, &subscription_identifier_length); subscribe_variable_header_property_length += subscription_identifier_length + 1; } *subscribe_properties_length = subscribe_variable_header_property_length; /* variable header total length = * 2 bytes for Packet Identifier * + # bytes (variable_length_encoding(subscribe_variable_header_property_length)) * + subscribe_variable_header_property_length */ size_t variable_header_length = 0; if (aws_mqtt5_get_variable_length_encode_size(subscribe_variable_header_property_length, &variable_header_length)) { return AWS_OP_ERR; } variable_header_length += 2 + subscribe_variable_header_property_length; size_t payload_length = 0; /* * for each subscription view, in addition to the raw name-value bytes, we also have 2 bytes of * prefix and one byte suffix required. * 2 bytes for the Topic Filter length * 1 byte for the Subscription Options Flags */ for (size_t i = 0; i < subscribe_view->subscription_count; ++i) { const struct aws_mqtt5_subscription_view *subscription = &subscribe_view->subscriptions[i]; payload_length += subscription->topic_filter.len; } payload_length += (3 * subscribe_view->subscription_count); *total_remaining_length = variable_header_length + payload_length; return AWS_OP_SUCCESS; } static int s_aws_mqtt5_encoder_begin_subscribe(struct aws_mqtt5_encoder *encoder, const void *view) { const struct aws_mqtt5_packet_subscribe_view *subscription_view = view; size_t total_remaining_length = 0; size_t subscribe_properties_length = 0; if (s_compute_subscribe_variable_length_fields( subscription_view, &total_remaining_length, &subscribe_properties_length)) { int error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "(%p) mqtt5 client encoder - failed to compute variable length values for SUBSCRIBE packet with error " "%d(%s)", (void *)encoder->config.client, error_code, aws_error_debug_str(error_code)); return AWS_OP_ERR; } AWS_LOGF_DEBUG( AWS_LS_MQTT5_GENERAL, "(%p) mqtt5 client encoder - setting up encode for a SUBSCRIBE packet with remaining length %zu", (void *)encoder->config.client, total_remaining_length); uint32_t total_remaining_length_u32 = (uint32_t)total_remaining_length; uint32_t subscribe_property_length_u32 = (uint32_t)subscribe_properties_length; /* * Fixed Header * byte 1: * bits 7-4 MQTT Control Packet Type * bits 3-0 Reserved, must be set to 0, 0, 1, 0 * byte 2-x: Remaining Length as Variable Byte Integer (1-4 bytes) */ ADD_ENCODE_STEP_U8( encoder, aws_mqtt5_compute_fixed_header_byte1(AWS_MQTT5_PT_SUBSCRIBE, SUBSCRIBE_PACKET_FIXED_HEADER_RESERVED_BITS)); ADD_ENCODE_STEP_VLI(encoder, total_remaining_length_u32); /* * Variable Header * byte 1-2: Packet Identifier * byte 3-x: Property Length as Variable Byte Integer (1-4 bytes) */ ADD_ENCODE_STEP_U16(encoder, (uint16_t)subscription_view->packet_id); ADD_ENCODE_STEP_VLI(encoder, subscribe_property_length_u32); /* * Subscribe Properties * (optional) Subscription Identifier * (optional) User Properties */ if (subscription_view->subscription_identifier != 0) { ADD_ENCODE_STEP_U8(encoder, AWS_MQTT5_PROPERTY_TYPE_SUBSCRIPTION_IDENTIFIER); ADD_ENCODE_STEP_VLI(encoder, *subscription_view->subscription_identifier); } aws_mqtt5_add_user_property_encoding_steps( encoder, subscription_view->user_properties, subscription_view->user_property_count); /* * Payload * n Topic Filters * byte 1-2: Length * byte 3..N: UTF-8 encoded Topic Filter * byte N+1: * bits 7-6 Reserved * bits 5-4 Retain Handling * bit 3 Retain as Published * bit 2 No Local * bits 1-0 Maximum QoS */ aws_mqtt5_add_subscribe_topic_filter_encoding_steps( encoder, subscription_view->subscriptions, subscription_view->subscription_count); return AWS_OP_SUCCESS; } static int s_compute_unsubscribe_variable_length_fields( const struct aws_mqtt5_packet_unsubscribe_view *unsubscribe_view, size_t *total_remaining_length, size_t *unsubscribe_properties_length) { size_t unsubscribe_variable_header_property_length = aws_mqtt5_compute_user_property_encode_length( unsubscribe_view->user_properties, unsubscribe_view->user_property_count); *unsubscribe_properties_length = unsubscribe_variable_header_property_length; /* variable header total length = * 2 bytes for Packet Identifier * + # bytes (variable_length_encoding(subscribe_variable_header_property_length)) * + unsubscribe_variable_header_property_length */ size_t variable_header_length = 0; if (aws_mqtt5_get_variable_length_encode_size( unsubscribe_variable_header_property_length, &variable_header_length)) { return AWS_OP_ERR; } variable_header_length += 2 + unsubscribe_variable_header_property_length; size_t payload_length = 0; /* * for each unsubscribe topic filter * 2 bytes for the Topic Filter length * n bytes for Topic Filter */ for (size_t i = 0; i < unsubscribe_view->topic_filter_count; ++i) { const struct aws_byte_cursor topic_filter = unsubscribe_view->topic_filters[i]; payload_length += topic_filter.len; } payload_length += (2 * unsubscribe_view->topic_filter_count); *total_remaining_length = variable_header_length + payload_length; return AWS_OP_SUCCESS; } static int s_aws_mqtt5_encoder_begin_unsubscribe(struct aws_mqtt5_encoder *encoder, const void *view) { const struct aws_mqtt5_packet_unsubscribe_view *unsubscribe_view = view; size_t total_remaining_length = 0; size_t unsubscribe_properties_length = 0; if (s_compute_unsubscribe_variable_length_fields( unsubscribe_view, &total_remaining_length, &unsubscribe_properties_length)) { int error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "(%p) mqtt5 client encoder - failed to compute variable length values for UNSUBSCRIBE packet with error " "%d(%s)", (void *)encoder->config.client, error_code, aws_error_debug_str(error_code)); return AWS_OP_ERR; } AWS_LOGF_DEBUG( AWS_LS_MQTT5_GENERAL, "(%p) mqtt5 client encoder - setting up encode for a UNSUBSCRIBE packet with remaining length %zu", (void *)encoder->config.client, total_remaining_length); uint32_t total_remaining_length_u32 = (uint32_t)total_remaining_length; uint32_t unsubscribe_property_length_u32 = (uint32_t)unsubscribe_properties_length; /* * Fixed Header * byte 1: * bits 7-4 MQTT Control Packet type (10) * bits 3-0 Reserved, must be set to 0, 0, 1, 0 * byte 2-x: Remaining Length as Variable Byte Integer (1-4 bytes) */ ADD_ENCODE_STEP_U8( encoder, aws_mqtt5_compute_fixed_header_byte1(AWS_MQTT5_PT_UNSUBSCRIBE, UNSUBSCRIBE_PACKET_FIXED_HEADER_RESERVED_BITS)); ADD_ENCODE_STEP_VLI(encoder, total_remaining_length_u32); /* * Variable Header * byte 1-2: Packet Identifier * byte 3-x: Properties length as Variable Byte Integer (1-4 bytes) */ ADD_ENCODE_STEP_U16(encoder, (uint16_t)unsubscribe_view->packet_id); ADD_ENCODE_STEP_VLI(encoder, unsubscribe_property_length_u32); /* * (optional) User Properties */ aws_mqtt5_add_user_property_encoding_steps( encoder, unsubscribe_view->user_properties, unsubscribe_view->user_property_count); /* * Payload * n Topic Filters * byte 1-2: Length * byte 3..N: UTF-8 encoded Topic Filter */ aws_mqtt5_add_unsubscribe_topic_filter_encoding_steps( encoder, unsubscribe_view->topic_filters, unsubscribe_view->topic_filter_count); return AWS_OP_SUCCESS; } static int s_compute_publish_variable_length_fields( const struct aws_mqtt5_packet_publish_view *publish_view, size_t *total_remaining_length, size_t *publish_properties_length) { size_t publish_property_section_length = aws_mqtt5_compute_user_property_encode_length(publish_view->user_properties, publish_view->user_property_count); ADD_OPTIONAL_U8_PROPERTY_LENGTH(publish_view->payload_format, publish_property_section_length); ADD_OPTIONAL_U32_PROPERTY_LENGTH(publish_view->message_expiry_interval_seconds, publish_property_section_length); ADD_OPTIONAL_U16_PROPERTY_LENGTH(publish_view->topic_alias, publish_property_section_length); ADD_OPTIONAL_CURSOR_PROPERTY_LENGTH(publish_view->response_topic, publish_property_section_length); ADD_OPTIONAL_CURSOR_PROPERTY_LENGTH(publish_view->correlation_data, publish_property_section_length); ADD_OPTIONAL_CURSOR_PROPERTY_LENGTH(publish_view->content_type, publish_property_section_length); for (size_t i = 0; i < publish_view->subscription_identifier_count; ++i) { size_t encoding_size = 0; if (aws_mqtt5_get_variable_length_encode_size(publish_view->subscription_identifiers[i], &encoding_size)) { return AWS_OP_ERR; } publish_property_section_length += 1 + encoding_size; } *publish_properties_length = (uint32_t)publish_property_section_length; /* * Remaining Length: * Variable Header * - Topic Name * - Packet Identifier * - Property Length as VLI x * - All Properties x * Payload */ size_t remaining_length = 0; /* Property Length VLI size */ if (aws_mqtt5_get_variable_length_encode_size(publish_property_section_length, &remaining_length)) { return AWS_OP_ERR; } /* Topic name */ remaining_length += 2 + publish_view->topic.len; /* Optional packet id */ if (publish_view->packet_id != 0) { remaining_length += 2; } /* Properties */ remaining_length += publish_property_section_length; /* Payload */ remaining_length += publish_view->payload.len; *total_remaining_length = remaining_length; return AWS_OP_SUCCESS; } static int s_aws_mqtt5_encoder_begin_publish(struct aws_mqtt5_encoder *encoder, const void *view) { /* We do a shallow copy of the stored view in order to temporarily side affect it for topic aliasing */ struct aws_mqtt5_packet_publish_view local_publish_view = *((const struct aws_mqtt5_packet_publish_view *)view); uint16_t outbound_topic_alias = 0; struct aws_byte_cursor outbound_topic; if (encoder->topic_alias_resolver != NULL) { AWS_ZERO_STRUCT(outbound_topic); if (aws_mqtt5_outbound_topic_alias_resolver_resolve_outbound_publish( encoder->topic_alias_resolver, &local_publish_view, &outbound_topic_alias, &outbound_topic)) { int error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "(%p) mqtt5 client encoder - failed to perform outbound topic alias resolution on PUBLISH packet with " "error " "%d(%s)", (void *)encoder->config.client, error_code, aws_error_debug_str(error_code)); return AWS_OP_ERR; } local_publish_view.topic = outbound_topic; if (outbound_topic_alias != 0) { AWS_LOGF_DEBUG( AWS_LS_MQTT5_GENERAL, "(%p) mqtt5 client encoder - PUBLISH packet using topic alias value %" PRIu16, (void *)encoder->config.client, outbound_topic_alias); if (outbound_topic.len == 0) { AWS_LOGF_DEBUG( AWS_LS_MQTT5_GENERAL, "(%p) mqtt5 client encoder - PUBLISH packet dropping topic field for previously established alias", (void *)encoder->config.client); } local_publish_view.topic_alias = &outbound_topic_alias; } else { AWS_FATAL_ASSERT(local_publish_view.topic.len > 0); AWS_LOGF_DEBUG( AWS_LS_MQTT5_GENERAL, "(%p) mqtt5 client encoder - PUBLISH packet not using a topic alias", (void *)encoder->config.client); local_publish_view.topic_alias = NULL; } } /* * We're going to encode the local mutated view copy, not the stored view. This lets the original packet stay * unchanged for the entire time it is owned by the client. Otherwise, events that disrupt the alias cache * (like disconnections) would make correct aliasing impossible (because we'd have mutated and potentially lost * topic information). */ const struct aws_mqtt5_packet_publish_view *publish_view = &local_publish_view; size_t total_remaining_length = 0; size_t publish_properties_length = 0; if (s_compute_publish_variable_length_fields(publish_view, &total_remaining_length, &publish_properties_length)) { int error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "(%p) mqtt5 client encoder - failed to compute variable length values for PUBLISH packet with error " "%d(%s)", (void *)encoder->config.client, error_code, aws_error_debug_str(error_code)); return AWS_OP_ERR; } AWS_LOGF_DEBUG( AWS_LS_MQTT5_GENERAL, "(%p) mqtt5 client encoder - setting up encode for a PUBLISH packet with remaining length %zu", (void *)encoder->config.client, total_remaining_length); uint32_t total_remaining_length_u32 = (uint32_t)total_remaining_length; uint32_t publish_property_length_u32 = (uint32_t)publish_properties_length; /* * Fixed Header * byte 1: * bits 4-7: MQTT Control Packet Type * bit 3: DUP flag * bit 1-2: QoS level * bit 0: RETAIN * byte 2-x: Remaining Length as Variable Byte Integer (1-4 bytes) */ uint8_t flags = 0; if (publish_view->duplicate) { flags |= 1 << 3; } flags |= ((uint8_t)publish_view->qos) << 1; if (publish_view->retain) { flags |= 1; } ADD_ENCODE_STEP_U8(encoder, aws_mqtt5_compute_fixed_header_byte1(AWS_MQTT5_PT_PUBLISH, flags)); ADD_ENCODE_STEP_VLI(encoder, total_remaining_length_u32); /* * Variable Header * UTF-8 Encoded Topic Name * 2 byte Packet Identifier * 1-4 byte Property Length as Variable Byte Integer * n bytes Properties */ ADD_ENCODE_STEP_LENGTH_PREFIXED_CURSOR(encoder, publish_view->topic); if (publish_view->qos != AWS_MQTT5_QOS_AT_MOST_ONCE) { ADD_ENCODE_STEP_U16(encoder, (uint16_t)publish_view->packet_id); } ADD_ENCODE_STEP_VLI(encoder, publish_property_length_u32); ADD_ENCODE_STEP_OPTIONAL_U8_PROPERTY( encoder, AWS_MQTT5_PROPERTY_TYPE_PAYLOAD_FORMAT_INDICATOR, publish_view->payload_format); ADD_ENCODE_STEP_OPTIONAL_U32_PROPERTY( encoder, AWS_MQTT5_PROPERTY_TYPE_MESSAGE_EXPIRY_INTERVAL, publish_view->message_expiry_interval_seconds); ADD_ENCODE_STEP_OPTIONAL_U16_PROPERTY(encoder, AWS_MQTT5_PROPERTY_TYPE_TOPIC_ALIAS, publish_view->topic_alias); ADD_ENCODE_STEP_OPTIONAL_CURSOR_PROPERTY( encoder, AWS_MQTT5_PROPERTY_TYPE_RESPONSE_TOPIC, publish_view->response_topic); ADD_ENCODE_STEP_OPTIONAL_CURSOR_PROPERTY( encoder, AWS_MQTT5_PROPERTY_TYPE_CORRELATION_DATA, publish_view->correlation_data); for (size_t i = 0; i < publish_view->subscription_identifier_count; ++i) { ADD_ENCODE_STEP_OPTIONAL_VLI_PROPERTY( encoder, AWS_MQTT5_PROPERTY_TYPE_SUBSCRIPTION_IDENTIFIER, &publish_view->subscription_identifiers[i]); } ADD_ENCODE_STEP_OPTIONAL_CURSOR_PROPERTY(encoder, AWS_MQTT5_PROPERTY_TYPE_CONTENT_TYPE, publish_view->content_type); aws_mqtt5_add_user_property_encoding_steps( encoder, publish_view->user_properties, publish_view->user_property_count); /* * Payload * Content and format of data is application specific */ if (publish_view->payload.len > 0) { ADD_ENCODE_STEP_CURSOR(encoder, publish_view->payload); } return AWS_OP_SUCCESS; } static int s_compute_puback_variable_length_fields( const struct aws_mqtt5_packet_puback_view *puback_view, size_t *total_remaining_length, size_t *puback_properties_length) { size_t local_property_length = aws_mqtt5_compute_user_property_encode_length(puback_view->user_properties, puback_view->user_property_count); ADD_OPTIONAL_CURSOR_PROPERTY_LENGTH(puback_view->reason_string, local_property_length); *puback_properties_length = (uint32_t)local_property_length; /* variable header total length = * 2 bytes for Packet Identifier * + 1 byte for PUBACK reason code if it exists * + subscribe_variable_header_property_length * * https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901124 * If there are no properties and Reason Code is success, PUBACK ends with the packet id * * https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901124 * If there are no properties and Reason Code is not success, PUBACK ends with the reason code */ if (local_property_length == 0) { if (puback_view->reason_code == AWS_MQTT5_PARC_SUCCESS) { *total_remaining_length = 2; } else { *total_remaining_length = 3; } return AWS_OP_SUCCESS; } size_t variable_property_length_size = 0; if (aws_mqtt5_get_variable_length_encode_size(local_property_length, &variable_property_length_size)) { return AWS_OP_ERR; } /* vli of property length + packet id + reason code + properties length */ *total_remaining_length = variable_property_length_size + 3 + local_property_length; return AWS_OP_SUCCESS; } static int s_aws_mqtt5_encoder_begin_puback(struct aws_mqtt5_encoder *encoder, const void *view) { const struct aws_mqtt5_packet_puback_view *puback_view = view; size_t total_remaining_length = 0; size_t puback_properties_length = 0; if (s_compute_puback_variable_length_fields(puback_view, &total_remaining_length, &puback_properties_length)) { int error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "(%p) mqtt5 client encoder - failed to compute variable length values for PUBACK packet with error " "%d(%s)", (void *)encoder->config.client, error_code, aws_error_debug_str(error_code)); return AWS_OP_ERR; } AWS_LOGF_DEBUG( AWS_LS_MQTT5_GENERAL, "(%p) mqtt5 client encoder - setting up encode for a PUBACK packet with remaining length %zu", (void *)encoder->config.client, total_remaining_length); uint32_t total_remaining_length_u32 = (uint32_t)total_remaining_length; uint32_t puback_property_length_u32 = (uint32_t)puback_properties_length; /* * Fixed Header * byte 1: * bits 7-4 MQTT Control Packet Type * bits 3-0 Reserved, bust be set to 0, 0, 0, 0 * byte 2-x: Remaining Length as a Variable Byte Integer (1-4 bytes) */ ADD_ENCODE_STEP_U8(encoder, aws_mqtt5_compute_fixed_header_byte1(AWS_MQTT5_PT_PUBACK, 0)); ADD_ENCODE_STEP_VLI(encoder, total_remaining_length_u32); /* * Variable Header * byte 1-2: Packet Identifier * byte 3: PUBACK Reason Code * byte 4-x: Property Length * Properties */ ADD_ENCODE_STEP_U16(encoder, (uint16_t)puback_view->packet_id); /* * https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901124 * If Reason Code is success and there are no properties, PUBACK ends with the packet id */ if (total_remaining_length == 2) { return AWS_OP_SUCCESS; } ADD_ENCODE_STEP_U8(encoder, puback_view->reason_code); /* * https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901126 * If remaining length < 4 there is no property length */ if (total_remaining_length < 4) { return AWS_OP_SUCCESS; } ADD_ENCODE_STEP_VLI(encoder, puback_property_length_u32); ADD_ENCODE_STEP_OPTIONAL_CURSOR_PROPERTY( encoder, AWS_MQTT5_PROPERTY_TYPE_REASON_STRING, puback_view->reason_string); aws_mqtt5_add_user_property_encoding_steps(encoder, puback_view->user_properties, puback_view->user_property_count); return AWS_OP_SUCCESS; } static enum aws_mqtt5_encoding_result s_execute_encode_step( struct aws_mqtt5_encoder *encoder, struct aws_mqtt5_encoding_step *step, struct aws_byte_buf *buffer) { size_t buffer_room = buffer->capacity - buffer->len; switch (step->type) { case AWS_MQTT5_EST_U8: if (buffer_room < 1) { return AWS_MQTT5_ER_OUT_OF_ROOM; } aws_byte_buf_write_u8(buffer, step->value.value_u8); return AWS_MQTT5_ER_FINISHED; case AWS_MQTT5_EST_U16: if (buffer_room < 2) { return AWS_MQTT5_ER_OUT_OF_ROOM; } aws_byte_buf_write_be16(buffer, step->value.value_u16); return AWS_MQTT5_ER_FINISHED; case AWS_MQTT5_EST_U32: if (buffer_room < 4) { return AWS_MQTT5_ER_OUT_OF_ROOM; } aws_byte_buf_write_be32(buffer, step->value.value_u32); return AWS_MQTT5_ER_FINISHED; case AWS_MQTT5_EST_VLI: /* being lazy here and just assuming the worst case */ if (buffer_room < 4) { return AWS_MQTT5_ER_OUT_OF_ROOM; } /* This can't fail. We've already validated the vli value when we made the step */ aws_mqtt5_encode_variable_length_integer(buffer, step->value.value_u32); return AWS_MQTT5_ER_FINISHED; case AWS_MQTT5_EST_CURSOR: if (buffer_room < 1) { return AWS_MQTT5_ER_OUT_OF_ROOM; } aws_byte_buf_write_to_capacity(buffer, &step->value.value_cursor); return (step->value.value_cursor.len == 0) ? AWS_MQTT5_ER_FINISHED : AWS_MQTT5_ER_OUT_OF_ROOM; case AWS_MQTT5_EST_STREAM: while (buffer->len < buffer->capacity) { if (aws_input_stream_read(step->value.value_stream, buffer)) { int error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_MQTT5_CLIENT, "id=%p: failed to read from stream with error %d(%s)", (void *)encoder->config.client, error_code, aws_error_debug_str(error_code)); return AWS_MQTT5_ER_ERROR; } struct aws_stream_status status; if (aws_input_stream_get_status(step->value.value_stream, &status)) { int error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_MQTT5_CLIENT, "id=%p: failed to query stream status with error %d(%s)", (void *)encoder->config.client, error_code, aws_error_debug_str(error_code)); return AWS_MQTT5_ER_ERROR; } if (status.is_end_of_stream) { return AWS_MQTT5_ER_FINISHED; } } if (buffer->len == buffer->capacity) { return AWS_MQTT5_ER_OUT_OF_ROOM; } /* fall through intentional */ } /* shouldn't be reachable */ AWS_LOGF_ERROR(AWS_LS_MQTT5_CLIENT, "id=%p: encoder reached an unreachable state", (void *)encoder->config.client); aws_raise_error(AWS_ERROR_INVALID_STATE); return AWS_MQTT5_ER_ERROR; } enum aws_mqtt5_encoding_result aws_mqtt5_encoder_encode_to_buffer( struct aws_mqtt5_encoder *encoder, struct aws_byte_buf *buffer) { enum aws_mqtt5_encoding_result result = AWS_MQTT5_ER_FINISHED; size_t step_count = aws_array_list_length(&encoder->encoding_steps); while (result == AWS_MQTT5_ER_FINISHED && encoder->current_encoding_step_index < step_count) { struct aws_mqtt5_encoding_step *step = NULL; aws_array_list_get_at_ptr(&encoder->encoding_steps, (void **)&step, encoder->current_encoding_step_index); result = s_execute_encode_step(encoder, step, buffer); if (result == AWS_MQTT5_ER_FINISHED) { encoder->current_encoding_step_index++; } } if (result == AWS_MQTT5_ER_FINISHED) { AWS_LOGF_DEBUG( AWS_LS_MQTT5_CLIENT, "id=%p: finished encoding current operation", (void *)encoder->config.client); aws_mqtt5_encoder_reset(encoder); } return result; } static struct aws_mqtt5_encoder_function_table s_aws_mqtt5_encoder_default_function_table = { .encoders_by_packet_type = { NULL, /* RESERVED = 0 */ &s_aws_mqtt5_encoder_begin_connect, /* CONNECT */ NULL, /* CONNACK */ &s_aws_mqtt5_encoder_begin_publish, /* PUBLISH */ &s_aws_mqtt5_encoder_begin_puback, /* PUBACK */ NULL, /* PUBREC */ NULL, /* PUBREL */ NULL, /* PUBCOMP */ &s_aws_mqtt5_encoder_begin_subscribe, /* SUBSCRIBE */ NULL, /* SUBACK */ &s_aws_mqtt5_encoder_begin_unsubscribe, /* UNSUBSCRIBE */ NULL, /* UNSUBACK */ &s_aws_mqtt5_encoder_begin_pingreq, /* PINGREQ */ NULL, /* PINGRESP */ &s_aws_mqtt5_encoder_begin_disconnect, /* DISCONNECT */ NULL /* AUTH */ }, }; const struct aws_mqtt5_encoder_function_table *g_aws_mqtt5_encoder_default_function_table = &s_aws_mqtt5_encoder_default_function_table; int aws_mqtt5_encoder_init( struct aws_mqtt5_encoder *encoder, struct aws_allocator *allocator, struct aws_mqtt5_encoder_options *options) { AWS_ZERO_STRUCT(*encoder); encoder->config = *options; if (encoder->config.encoders == NULL) { encoder->config.encoders = &s_aws_mqtt5_encoder_default_function_table; } if (aws_array_list_init_dynamic( &encoder->encoding_steps, allocator, INITIAL_ENCODING_STEP_COUNT, sizeof(struct aws_mqtt5_encoding_step))) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } void aws_mqtt5_encoder_clean_up(struct aws_mqtt5_encoder *encoder) { aws_array_list_clean_up(&encoder->encoding_steps); } void aws_mqtt5_encoder_reset(struct aws_mqtt5_encoder *encoder) { aws_array_list_clear(&encoder->encoding_steps); encoder->current_encoding_step_index = 0; } int aws_mqtt5_encoder_append_packet_encoding( struct aws_mqtt5_encoder *encoder, enum aws_mqtt5_packet_type packet_type, const void *packet_view) { aws_mqtt5_encode_begin_packet_type_fn *encoding_fn = encoder->config.encoders->encoders_by_packet_type[packet_type]; if (encoding_fn == NULL) { return aws_raise_error(AWS_ERROR_MQTT5_ENCODE_FAILURE); } return (*encoding_fn)(encoder, packet_view); } static int s_compute_packet_size(size_t total_remaining_length, size_t *packet_size) { /* 1 (packet type + flags) + vli_length(total_remaining_length) + total_remaining_length */ size_t encode_size = 0; if (aws_mqtt5_get_variable_length_encode_size(total_remaining_length, &encode_size)) { return AWS_OP_ERR; } size_t prefix = (size_t)1 + encode_size; if (aws_add_size_checked(prefix, total_remaining_length, packet_size)) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } int aws_mqtt5_packet_view_get_encoded_size( enum aws_mqtt5_packet_type packet_type, const void *packet_view, size_t *packet_size) { size_t total_remaining_length = 0; size_t properties_length = 0; if (packet_type == AWS_MQTT5_PT_PINGREQ) { *packet_size = AWS_MQTT5_PINGREQ_ENCODED_SIZE; return AWS_OP_SUCCESS; } switch (packet_type) { case AWS_MQTT5_PT_PUBLISH: if (s_compute_publish_variable_length_fields(packet_view, &total_remaining_length, &properties_length)) { return AWS_OP_ERR; } break; case AWS_MQTT5_PT_SUBSCRIBE: if (s_compute_subscribe_variable_length_fields(packet_view, &total_remaining_length, &properties_length)) { return AWS_OP_ERR; } break; case AWS_MQTT5_PT_UNSUBSCRIBE: if (s_compute_unsubscribe_variable_length_fields( packet_view, &total_remaining_length, &properties_length)) { return AWS_OP_ERR; } break; case AWS_MQTT5_PT_DISCONNECT: if (s_compute_disconnect_variable_length_fields(packet_view, &total_remaining_length, &properties_length)) { return AWS_OP_ERR; } break; case AWS_MQTT5_PT_PUBACK: if (s_compute_puback_variable_length_fields(packet_view, &total_remaining_length, &properties_length)) { return AWS_OP_ERR; } break; default: return aws_raise_error(AWS_ERROR_MQTT5_ENCODE_SIZE_UNSUPPORTED_PACKET_TYPE); } return s_compute_packet_size(total_remaining_length, packet_size); } void aws_mqtt5_encoder_set_outbound_topic_alias_resolver( struct aws_mqtt5_encoder *encoder, struct aws_mqtt5_outbound_topic_alias_resolver *resolver) { encoder->topic_alias_resolver = resolver; } aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/source/v5/mqtt5_listener.c000066400000000000000000000075111456575232400251760ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include struct aws_mqtt5_listener { struct aws_allocator *allocator; struct aws_ref_count ref_count; struct aws_mqtt5_listener_config config; uint64_t callback_set_id; struct aws_task initialize_task; struct aws_task terminate_task; }; static void s_mqtt5_listener_destroy(struct aws_mqtt5_listener *listener) { aws_mqtt5_client_release(listener->config.client); aws_mqtt5_listener_termination_completion_fn *termination_callback = listener->config.termination_callback; void *temination_callback_user_data = listener->config.termination_callback_user_data; aws_mem_release(listener->allocator, listener); if (termination_callback != NULL) { (*termination_callback)(temination_callback_user_data); } } static void s_mqtt5_listener_initialize_task_fn(struct aws_task *task, void *arg, enum aws_task_status task_status) { (void)task; struct aws_mqtt5_listener *listener = arg; if (task_status == AWS_TASK_STATUS_RUN_READY) { listener->callback_set_id = aws_mqtt5_callback_set_manager_push_front( &listener->config.client->callback_manager, &listener->config.listener_callbacks); AWS_LOGF_INFO( AWS_LS_MQTT5_GENERAL, "id=%p: Mqtt5 Listener initialized, listener id=%p", (void *)listener->config.client, (void *)listener); aws_mqtt5_listener_release(listener); } else { s_mqtt5_listener_destroy(listener); } } static void s_mqtt5_listener_terminate_task_fn(struct aws_task *task, void *arg, enum aws_task_status task_status) { (void)task; struct aws_mqtt5_listener *listener = arg; if (task_status == AWS_TASK_STATUS_RUN_READY) { aws_mqtt5_callback_set_manager_remove(&listener->config.client->callback_manager, listener->callback_set_id); } AWS_LOGF_INFO( AWS_LS_MQTT5_GENERAL, "id=%p: Mqtt5 Listener terminated, listener id=%p", (void *)listener->config.client, (void *)listener); s_mqtt5_listener_destroy(listener); } static void s_aws_mqtt5_listener_on_zero_ref_count(void *context) { struct aws_mqtt5_listener *listener = context; aws_event_loop_schedule_task_now(listener->config.client->loop, &listener->terminate_task); } struct aws_mqtt5_listener *aws_mqtt5_listener_new( struct aws_allocator *allocator, struct aws_mqtt5_listener_config *config) { if (config->client == NULL) { return NULL; } struct aws_mqtt5_listener *listener = aws_mem_calloc(allocator, 1, sizeof(struct aws_mqtt5_listener)); listener->allocator = allocator; listener->config = *config; aws_mqtt5_client_acquire(config->client); aws_ref_count_init(&listener->ref_count, listener, s_aws_mqtt5_listener_on_zero_ref_count); aws_task_init(&listener->initialize_task, s_mqtt5_listener_initialize_task_fn, listener, "Mqtt5ListenerInitialize"); aws_task_init(&listener->terminate_task, s_mqtt5_listener_terminate_task_fn, listener, "Mqtt5ListenerTerminate"); aws_mqtt5_listener_acquire(listener); aws_event_loop_schedule_task_now(config->client->loop, &listener->initialize_task); return listener; } struct aws_mqtt5_listener *aws_mqtt5_listener_acquire(struct aws_mqtt5_listener *listener) { if (listener != NULL) { aws_ref_count_acquire(&listener->ref_count); } return listener; } struct aws_mqtt5_listener *aws_mqtt5_listener_release(struct aws_mqtt5_listener *listener) { if (listener != NULL) { aws_ref_count_release(&listener->ref_count); } return NULL; } aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/source/v5/mqtt5_options_storage.c000066400000000000000000004414041456575232400265730ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include #include /********************************************************************************************************************* * Property set ********************************************************************************************************************/ int aws_mqtt5_user_property_set_init( struct aws_mqtt5_user_property_set *property_set, struct aws_allocator *allocator) { AWS_ZERO_STRUCT(*property_set); if (aws_array_list_init_dynamic(&property_set->properties, allocator, 0, sizeof(struct aws_mqtt5_user_property))) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } int aws_mqtt5_user_property_set_init_with_storage( struct aws_mqtt5_user_property_set *property_set, struct aws_allocator *allocator, struct aws_byte_buf *storage, size_t property_count, const struct aws_mqtt5_user_property *properties) { AWS_ZERO_STRUCT(*property_set); if (aws_array_list_init_dynamic( &property_set->properties, allocator, property_count, sizeof(struct aws_mqtt5_user_property))) { goto error; } for (size_t i = 0; i < property_count; ++i) { const struct aws_mqtt5_user_property *property = &properties[i]; struct aws_mqtt5_user_property property_clone = *property; if (aws_byte_buf_append_and_update(storage, &property_clone.name)) { goto error; } if (aws_byte_buf_append_and_update(storage, &property_clone.value)) { goto error; } if (aws_array_list_push_back(&property_set->properties, &property_clone)) { goto error; } } return AWS_OP_SUCCESS; error: aws_mqtt5_user_property_set_clean_up(property_set); return AWS_OP_ERR; } void aws_mqtt5_user_property_set_clean_up(struct aws_mqtt5_user_property_set *property_set) { aws_array_list_clean_up(&property_set->properties); } size_t aws_mqtt5_user_property_set_size(const struct aws_mqtt5_user_property_set *property_set) { return aws_array_list_length(&property_set->properties); } static void s_aws_mqtt5_user_property_set_log( struct aws_logger *log_handle, const struct aws_mqtt5_user_property *properties, size_t property_count, void *log_context, enum aws_log_level level, const char *log_prefix) { if (property_count == 0) { return; } AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: %s with %zu user properties:", log_context, log_prefix, property_count); for (size_t i = 0; i < property_count; ++i) { const struct aws_mqtt5_user_property *property = &properties[i]; AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: %s user property %zu with name \"" PRInSTR "\", value \"" PRInSTR "\"", log_context, log_prefix, i, AWS_BYTE_CURSOR_PRI(property->name), AWS_BYTE_CURSOR_PRI(property->value)); } } static size_t s_aws_mqtt5_user_property_set_compute_storage_size( const struct aws_mqtt5_user_property *properties, size_t property_count) { size_t storage_size = 0; for (size_t i = 0; i < property_count; ++i) { const struct aws_mqtt5_user_property *property = &properties[i]; storage_size += property->name.len; storage_size += property->value.len; } return storage_size; } static int s_aws_mqtt5_user_property_set_validate( const struct aws_mqtt5_user_property *properties, size_t property_count, const char *log_prefix, void *log_context) { if (properties == NULL) { if (property_count == 0) { return AWS_OP_SUCCESS; } else { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "id=%p: %s - Invalid user property configuration, null properties, non-zero property count", log_context, log_prefix); return aws_raise_error(AWS_ERROR_MQTT5_USER_PROPERTY_VALIDATION); } } if (property_count > AWS_MQTT5_CLIENT_MAXIMUM_USER_PROPERTIES) { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "id=%p: %s - user property limit (%d) exceeded (%zu)", log_context, log_prefix, (int)AWS_MQTT5_CLIENT_MAXIMUM_USER_PROPERTIES, property_count); return aws_raise_error(AWS_ERROR_MQTT5_USER_PROPERTY_VALIDATION); } for (size_t i = 0; i < property_count; ++i) { const struct aws_mqtt5_user_property *property = &properties[i]; if (property->name.len > UINT16_MAX) { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "id=%p: %s - user property #%zu name too long (%zu)", log_context, log_prefix, i, property->name.len); return aws_raise_error(AWS_ERROR_MQTT5_USER_PROPERTY_VALIDATION); } if (aws_mqtt_validate_utf8_text(property->name)) { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "id=%p: %s - user property #%zu name not valid UTF8", log_context, log_prefix, i); return aws_raise_error(AWS_ERROR_MQTT5_USER_PROPERTY_VALIDATION); } if (property->value.len > UINT16_MAX) { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "id=%p: %s - user property #%zu value too long (%zu)", log_context, log_prefix, i, property->value.len); return aws_raise_error(AWS_ERROR_MQTT5_USER_PROPERTY_VALIDATION); } if (aws_mqtt_validate_utf8_text(property->value)) { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "id=%p: %s - user property #%zu value not valid UTF8", log_context, log_prefix, i); return aws_raise_error(AWS_ERROR_MQTT5_USER_PROPERTY_VALIDATION); } } return AWS_OP_SUCCESS; } /********************************************************************************************************************* * Operation base ********************************************************************************************************************/ struct aws_mqtt5_operation *aws_mqtt5_operation_acquire(struct aws_mqtt5_operation *operation) { if (operation == NULL) { return NULL; } aws_ref_count_acquire(&operation->ref_count); return operation; } struct aws_mqtt5_operation *aws_mqtt5_operation_release(struct aws_mqtt5_operation *operation) { if (operation != NULL) { aws_ref_count_release(&operation->ref_count); } return NULL; } void aws_mqtt5_operation_complete( struct aws_mqtt5_operation *operation, int error_code, enum aws_mqtt5_packet_type packet_type, const void *associated_view) { AWS_FATAL_ASSERT(operation->vtable != NULL); if (operation->vtable->aws_mqtt5_operation_completion_fn != NULL) { (*operation->vtable->aws_mqtt5_operation_completion_fn)(operation, error_code, packet_type, associated_view); } } void aws_mqtt5_operation_set_packet_id(struct aws_mqtt5_operation *operation, aws_mqtt5_packet_id_t packet_id) { AWS_FATAL_ASSERT(operation->vtable != NULL); if (operation->vtable->aws_mqtt5_operation_set_packet_id_fn != NULL) { (*operation->vtable->aws_mqtt5_operation_set_packet_id_fn)(operation, packet_id); } } aws_mqtt5_packet_id_t aws_mqtt5_operation_get_packet_id(const struct aws_mqtt5_operation *operation) { AWS_FATAL_ASSERT(operation->vtable != NULL); if (operation->vtable->aws_mqtt5_operation_get_packet_id_address_fn != NULL) { aws_mqtt5_packet_id_t *packet_id_ptr = (*operation->vtable->aws_mqtt5_operation_get_packet_id_address_fn)(operation); if (packet_id_ptr != NULL) { return *packet_id_ptr; } } return 0; } aws_mqtt5_packet_id_t *aws_mqtt5_operation_get_packet_id_address(const struct aws_mqtt5_operation *operation) { AWS_FATAL_ASSERT(operation->vtable != NULL); if (operation->vtable->aws_mqtt5_operation_get_packet_id_address_fn != NULL) { return (*operation->vtable->aws_mqtt5_operation_get_packet_id_address_fn)(operation); } return NULL; } int aws_mqtt5_operation_validate_vs_connection_settings( const struct aws_mqtt5_operation *operation, const struct aws_mqtt5_client *client) { AWS_FATAL_ASSERT(operation->vtable != NULL); AWS_FATAL_ASSERT(client->loop == NULL || aws_event_loop_thread_is_callers_thread(client->loop)); /* If we have valid negotiated settings, check against them as well */ if (aws_mqtt5_client_are_negotiated_settings_valid(client)) { const struct aws_mqtt5_negotiated_settings *settings = &client->negotiated_settings; size_t packet_size_in_bytes = 0; if (aws_mqtt5_packet_view_get_encoded_size( operation->packet_type, operation->packet_view, &packet_size_in_bytes)) { int error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_MQTT5_CLIENT, "id=%p: error %d (%s) computing %s packet size", (void *)client, error_code, aws_error_debug_str(error_code), aws_mqtt5_packet_type_to_c_string(operation->packet_type)); return aws_raise_error(AWS_ERROR_MQTT5_PACKET_VALIDATION); } if (packet_size_in_bytes > settings->maximum_packet_size_to_server) { AWS_LOGF_ERROR( AWS_LS_MQTT5_CLIENT, "id=%p: encoded %s packet size (%zu) exceeds server's maximum " "packet size (%" PRIu32 ")", (void *)client, aws_mqtt5_packet_type_to_c_string(operation->packet_type), packet_size_in_bytes, settings->maximum_packet_size_to_server); return aws_raise_error(AWS_ERROR_MQTT5_PACKET_VALIDATION); } } if (operation->vtable->aws_mqtt5_operation_validate_vs_connection_settings_fn != NULL) { return (*operation->vtable->aws_mqtt5_operation_validate_vs_connection_settings_fn)( operation->packet_view, client); } return AWS_OP_SUCCESS; } uint32_t aws_mqtt5_operation_get_ack_timeout_override(const struct aws_mqtt5_operation *operation) { if (operation->vtable->aws_mqtt5_operation_get_ack_timeout_override_fn != NULL) { return (*operation->vtable->aws_mqtt5_operation_get_ack_timeout_override_fn)(operation); } return 0; } static struct aws_mqtt5_operation_vtable s_empty_operation_vtable = { .aws_mqtt5_operation_completion_fn = NULL, .aws_mqtt5_operation_set_packet_id_fn = NULL, .aws_mqtt5_operation_get_packet_id_address_fn = NULL, .aws_mqtt5_operation_validate_vs_connection_settings_fn = NULL, .aws_mqtt5_operation_get_ack_timeout_override_fn = NULL, }; /********************************************************************************************************************* * Connect ********************************************************************************************************************/ int aws_mqtt5_packet_connect_view_validate(const struct aws_mqtt5_packet_connect_view *connect_options) { if (connect_options == NULL) { AWS_LOGF_ERROR(AWS_LS_MQTT5_GENERAL, "Null CONNECT options"); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } if (connect_options->client_id.len > UINT16_MAX) { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_connect_view - client id too long", (void *)connect_options); return aws_raise_error(AWS_ERROR_MQTT5_CONNECT_OPTIONS_VALIDATION); } if (aws_mqtt_validate_utf8_text(connect_options->client_id)) { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_connect_view - client id not valid UTF-8", (void *)connect_options); return aws_raise_error(AWS_ERROR_MQTT5_CONNECT_OPTIONS_VALIDATION); } if (connect_options->username != NULL) { if (connect_options->username->len > UINT16_MAX) { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_connect_view - username too long", (void *)connect_options); return aws_raise_error(AWS_ERROR_MQTT5_CONNECT_OPTIONS_VALIDATION); } if (aws_mqtt_validate_utf8_text(*connect_options->username)) { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_connect_view - username not valid UTF-8", (void *)connect_options); return aws_raise_error(AWS_ERROR_MQTT5_CONNECT_OPTIONS_VALIDATION); } } if (connect_options->password != NULL) { if (connect_options->password->len > UINT16_MAX) { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_connect_view - password too long", (void *)connect_options); return aws_raise_error(AWS_ERROR_MQTT5_CONNECT_OPTIONS_VALIDATION); } } if (connect_options->receive_maximum != NULL) { if (*connect_options->receive_maximum == 0) { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_connect_view - receive maximum property of CONNECT packet may not be zero.", (void *)connect_options); return aws_raise_error(AWS_ERROR_MQTT5_CONNECT_OPTIONS_VALIDATION); } } if (connect_options->maximum_packet_size_bytes != NULL) { if (*connect_options->maximum_packet_size_bytes == 0) { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_connect_view - maximum packet size property of CONNECT packet may not be " "zero.", (void *)connect_options); return aws_raise_error(AWS_ERROR_MQTT5_CONNECT_OPTIONS_VALIDATION); } } if (connect_options->will != NULL) { const struct aws_mqtt5_packet_publish_view *will_options = connect_options->will; if (aws_mqtt5_packet_publish_view_validate(will_options)) { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_connect_view - CONNECT packet Will message failed validation", (void *)connect_options); return AWS_OP_ERR; } if (will_options->payload.len > UINT16_MAX) { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_connect_view - will payload larger than %d", (void *)connect_options, (int)UINT16_MAX); return aws_raise_error(AWS_ERROR_MQTT5_CONNECT_OPTIONS_VALIDATION); } } if (connect_options->request_problem_information != NULL) { if (*connect_options->request_problem_information > 1) { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_connect_view - CONNECT packet request problem information has invalid value", (void *)connect_options); return aws_raise_error(AWS_ERROR_MQTT5_CONNECT_OPTIONS_VALIDATION); } } if (connect_options->request_response_information != NULL) { if (*connect_options->request_response_information > 1) { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_connect_view - CONNECT packet request response information has invalid value", (void *)connect_options); return aws_raise_error(AWS_ERROR_MQTT5_CONNECT_OPTIONS_VALIDATION); } } if (s_aws_mqtt5_user_property_set_validate( connect_options->user_properties, connect_options->user_property_count, "aws_mqtt5_packet_connect_view", (void *)connect_options)) { return AWS_OP_ERR; } if (connect_options->authentication_method != NULL || connect_options->authentication_data != NULL) { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_connect_view - CONNECT packet has unsupported authentication fields set.", (void *)connect_options); return aws_raise_error(AWS_ERROR_MQTT5_CONNECT_OPTIONS_VALIDATION); // TODO: UTF-8 validation for authentication_method once supported. } return AWS_OP_SUCCESS; } void aws_mqtt5_packet_connect_view_log( const struct aws_mqtt5_packet_connect_view *connect_view, enum aws_log_level level) { struct aws_logger *log_handle = aws_logger_get_conditional(AWS_LS_MQTT5_GENERAL, level); if (log_handle == NULL) { return; } AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_connect_view keep alive interval set to %" PRIu16, (void *)connect_view, connect_view->keep_alive_interval_seconds); AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_connect_view client id set to \"" PRInSTR "\"", (void *)connect_view, AWS_BYTE_CURSOR_PRI(connect_view->client_id)); if (connect_view->username != NULL) { /* Intentionally do not log username since it too can contain sensitive information */ AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_connect_view username set", (void *)connect_view); } if (connect_view->password != NULL) { AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_connect_view password set", (void *)connect_view); } AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_connect_view clean start set to %d", (void *)connect_view, (int)(connect_view->clean_start ? 1 : 0)); if (connect_view->session_expiry_interval_seconds != NULL) { AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_connect_view session expiry interval set to %" PRIu32, (void *)connect_view, *connect_view->session_expiry_interval_seconds); } if (connect_view->request_response_information != NULL) { AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_connect_view request response information set to %d", (void *)connect_view, (int)*connect_view->request_response_information); } if (connect_view->request_problem_information) { AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_connect_view request problem information set to %d", (void *)connect_view, (int)*connect_view->request_problem_information); } if (connect_view->receive_maximum != NULL) { AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_connect_view receive maximum set to %" PRIu16, (void *)connect_view, *connect_view->receive_maximum); } if (connect_view->topic_alias_maximum != NULL) { AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_connect_view topic alias maximum set to %" PRIu16, (void *)connect_view, *connect_view->topic_alias_maximum); } if (connect_view->maximum_packet_size_bytes != NULL) { AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_connect_view maximum packet size set to %" PRIu32, (void *)connect_view, *connect_view->maximum_packet_size_bytes); } AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_connect_view set will to (%p)", (void *)connect_view, (void *)connect_view->will); if (connect_view->will != NULL) { aws_mqtt5_packet_publish_view_log(connect_view->will, level); } if (connect_view->will_delay_interval_seconds != NULL) { AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_connect_view will delay interval set to %" PRIu32, (void *)connect_view, *connect_view->will_delay_interval_seconds); } s_aws_mqtt5_user_property_set_log( log_handle, connect_view->user_properties, connect_view->user_property_count, (void *)connect_view, level, "aws_mqtt5_packet_connect_view"); if (connect_view->authentication_method != NULL) { AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_connect_view authentication method set", (void *)connect_view); } if (connect_view->password != NULL) { AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_connect_view authentication data set", (void *)connect_view); } } void aws_mqtt5_packet_connect_storage_clean_up(struct aws_mqtt5_packet_connect_storage *storage) { if (storage == NULL) { return; } if (storage->will != NULL) { aws_mqtt5_packet_publish_storage_clean_up(storage->will); aws_mem_release(storage->allocator, storage->will); } aws_mqtt5_user_property_set_clean_up(&storage->user_properties); aws_byte_buf_clean_up_secure(&storage->storage); } static size_t s_aws_mqtt5_packet_connect_compute_storage_size(const struct aws_mqtt5_packet_connect_view *view) { if (view == NULL) { return 0; } size_t storage_size = 0; storage_size += view->client_id.len; if (view->username != NULL) { storage_size += view->username->len; } if (view->password != NULL) { storage_size += view->password->len; } storage_size += s_aws_mqtt5_user_property_set_compute_storage_size(view->user_properties, view->user_property_count); if (view->authentication_method != NULL) { storage_size += view->authentication_method->len; } if (view->authentication_data != NULL) { storage_size += view->authentication_data->len; } return storage_size; } int aws_mqtt5_packet_connect_storage_init( struct aws_mqtt5_packet_connect_storage *storage, struct aws_allocator *allocator, const struct aws_mqtt5_packet_connect_view *view) { AWS_ZERO_STRUCT(*storage); struct aws_mqtt5_packet_connect_view *storage_view = &storage->storage_view; size_t storage_capacity = s_aws_mqtt5_packet_connect_compute_storage_size(view); if (aws_byte_buf_init(&storage->storage, allocator, storage_capacity)) { return AWS_OP_ERR; } storage->allocator = allocator; storage_view->keep_alive_interval_seconds = view->keep_alive_interval_seconds; storage_view->client_id = view->client_id; if (aws_byte_buf_append_and_update(&storage->storage, &storage_view->client_id)) { return AWS_OP_ERR; } if (view->username != NULL) { storage->username = *view->username; if (aws_byte_buf_append_and_update(&storage->storage, &storage->username)) { return AWS_OP_ERR; } storage_view->username = &storage->username; } if (view->password != NULL) { storage->password = *view->password; if (aws_byte_buf_append_and_update(&storage->storage, &storage->password)) { return AWS_OP_ERR; } storage_view->password = &storage->password; } storage_view->clean_start = view->clean_start; if (view->session_expiry_interval_seconds != NULL) { storage->session_expiry_interval_seconds = *view->session_expiry_interval_seconds; storage_view->session_expiry_interval_seconds = &storage->session_expiry_interval_seconds; } if (view->request_response_information != NULL) { storage->request_response_information = *view->request_response_information; storage_view->request_response_information = &storage->request_response_information; } if (view->request_problem_information != NULL) { storage->request_problem_information = *view->request_problem_information; storage_view->request_problem_information = &storage->request_problem_information; } if (view->receive_maximum != NULL) { storage->receive_maximum = *view->receive_maximum; storage_view->receive_maximum = &storage->receive_maximum; } if (view->topic_alias_maximum != NULL) { storage->topic_alias_maximum = *view->topic_alias_maximum; storage_view->topic_alias_maximum = &storage->topic_alias_maximum; } if (view->maximum_packet_size_bytes != NULL) { storage->maximum_packet_size_bytes = *view->maximum_packet_size_bytes; storage_view->maximum_packet_size_bytes = &storage->maximum_packet_size_bytes; } if (view->will != NULL) { storage->will = aws_mem_calloc(allocator, 1, sizeof(struct aws_mqtt5_packet_publish_storage)); if (storage->will == NULL) { return AWS_OP_ERR; } if (aws_mqtt5_packet_publish_storage_init(storage->will, allocator, view->will)) { return AWS_OP_ERR; } storage_view->will = &storage->will->storage_view; } if (view->will_delay_interval_seconds != 0) { storage->will_delay_interval_seconds = *view->will_delay_interval_seconds; storage_view->will_delay_interval_seconds = &storage->will_delay_interval_seconds; } if (aws_mqtt5_user_property_set_init_with_storage( &storage->user_properties, allocator, &storage->storage, view->user_property_count, view->user_properties)) { return AWS_OP_ERR; } storage_view->user_property_count = aws_mqtt5_user_property_set_size(&storage->user_properties); storage_view->user_properties = storage->user_properties.properties.data; if (view->authentication_method != NULL) { storage->authentication_method = *view->authentication_method; if (aws_byte_buf_append_and_update(&storage->storage, &storage->authentication_method)) { return AWS_OP_ERR; } storage_view->authentication_method = &storage->authentication_method; } if (view->authentication_data != NULL) { storage->authentication_data = *view->authentication_data; if (aws_byte_buf_append_and_update(&storage->storage, &storage->authentication_data)) { return AWS_OP_ERR; } storage_view->authentication_data = &storage->authentication_data; } return AWS_OP_SUCCESS; } int aws_mqtt5_packet_connect_storage_init_from_external_storage( struct aws_mqtt5_packet_connect_storage *connect_storage, struct aws_allocator *allocator) { AWS_ZERO_STRUCT(*connect_storage); if (aws_mqtt5_user_property_set_init(&connect_storage->user_properties, allocator)) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } static void s_destroy_operation_connect(void *object) { if (object == NULL) { return; } struct aws_mqtt5_operation_connect *connect_op = object; aws_mqtt5_packet_connect_storage_clean_up(&connect_op->options_storage); aws_mem_release(connect_op->allocator, connect_op); } struct aws_mqtt5_operation_connect *aws_mqtt5_operation_connect_new( struct aws_allocator *allocator, const struct aws_mqtt5_packet_connect_view *connect_options) { AWS_PRECONDITION(allocator != NULL); AWS_PRECONDITION(connect_options != NULL); if (aws_mqtt5_packet_connect_view_validate(connect_options)) { return NULL; } struct aws_mqtt5_operation_connect *connect_op = aws_mem_calloc(allocator, 1, sizeof(struct aws_mqtt5_operation_connect)); if (connect_op == NULL) { return NULL; } connect_op->allocator = allocator; connect_op->base.vtable = &s_empty_operation_vtable; connect_op->base.packet_type = AWS_MQTT5_PT_CONNECT; aws_ref_count_init(&connect_op->base.ref_count, connect_op, s_destroy_operation_connect); aws_priority_queue_node_init(&connect_op->base.priority_queue_node); connect_op->base.impl = connect_op; if (aws_mqtt5_packet_connect_storage_init(&connect_op->options_storage, allocator, connect_options)) { goto error; } connect_op->base.packet_view = &connect_op->options_storage.storage_view; return connect_op; error: aws_mqtt5_operation_release(&connect_op->base); return NULL; } /********************************************************************************************************************* * Connack ********************************************************************************************************************/ static size_t s_aws_mqtt5_packet_connack_compute_storage_size(const struct aws_mqtt5_packet_connack_view *view) { if (view == NULL) { return 0; } size_t storage_size = 0; if (view->assigned_client_identifier != NULL) { storage_size += view->assigned_client_identifier->len; } if (view->reason_string != NULL) { storage_size += view->reason_string->len; } if (view->response_information != NULL) { storage_size += view->response_information->len; } if (view->server_reference != NULL) { storage_size += view->server_reference->len; } if (view->authentication_method != NULL) { storage_size += view->authentication_method->len; } if (view->authentication_data != NULL) { storage_size += view->authentication_data->len; } storage_size += s_aws_mqtt5_user_property_set_compute_storage_size(view->user_properties, view->user_property_count); return storage_size; } int aws_mqtt5_packet_connack_storage_init( struct aws_mqtt5_packet_connack_storage *connack_storage, struct aws_allocator *allocator, const struct aws_mqtt5_packet_connack_view *connack_view) { AWS_ZERO_STRUCT(*connack_storage); size_t storage_capacity = s_aws_mqtt5_packet_connack_compute_storage_size(connack_view); if (aws_byte_buf_init(&connack_storage->storage, allocator, storage_capacity)) { return AWS_OP_ERR; } struct aws_mqtt5_packet_connack_view *stored_view = &connack_storage->storage_view; connack_storage->allocator = allocator; stored_view->session_present = connack_view->session_present; stored_view->reason_code = connack_view->reason_code; if (connack_view->session_expiry_interval != NULL) { connack_storage->session_expiry_interval = *connack_view->session_expiry_interval; stored_view->session_expiry_interval = &connack_storage->session_expiry_interval; } if (connack_view->receive_maximum != NULL) { connack_storage->receive_maximum = *connack_view->receive_maximum; stored_view->receive_maximum = &connack_storage->receive_maximum; } if (connack_view->maximum_qos != NULL) { connack_storage->maximum_qos = *connack_view->maximum_qos; stored_view->maximum_qos = &connack_storage->maximum_qos; } if (connack_view->retain_available != NULL) { connack_storage->retain_available = *connack_view->retain_available; stored_view->retain_available = &connack_storage->retain_available; } if (connack_view->maximum_packet_size != NULL) { connack_storage->maximum_packet_size = *connack_view->maximum_packet_size; stored_view->maximum_packet_size = &connack_storage->maximum_packet_size; } if (connack_view->assigned_client_identifier != NULL) { connack_storage->assigned_client_identifier = *connack_view->assigned_client_identifier; if (aws_byte_buf_append_and_update(&connack_storage->storage, &connack_storage->assigned_client_identifier)) { return AWS_OP_ERR; } stored_view->assigned_client_identifier = &connack_storage->assigned_client_identifier; } if (connack_view->topic_alias_maximum != NULL) { connack_storage->topic_alias_maximum = *connack_view->topic_alias_maximum; stored_view->topic_alias_maximum = &connack_storage->topic_alias_maximum; } if (connack_view->reason_string != NULL) { connack_storage->reason_string = *connack_view->reason_string; if (aws_byte_buf_append_and_update(&connack_storage->storage, &connack_storage->reason_string)) { return AWS_OP_ERR; } stored_view->reason_string = &connack_storage->reason_string; } if (connack_view->wildcard_subscriptions_available != NULL) { connack_storage->wildcard_subscriptions_available = *connack_view->wildcard_subscriptions_available; stored_view->wildcard_subscriptions_available = &connack_storage->wildcard_subscriptions_available; } if (connack_view->subscription_identifiers_available != NULL) { connack_storage->subscription_identifiers_available = *connack_view->subscription_identifiers_available; stored_view->subscription_identifiers_available = &connack_storage->subscription_identifiers_available; } if (connack_view->shared_subscriptions_available != NULL) { connack_storage->shared_subscriptions_available = *connack_view->shared_subscriptions_available; stored_view->shared_subscriptions_available = &connack_storage->shared_subscriptions_available; } if (connack_view->server_keep_alive != NULL) { connack_storage->server_keep_alive = *connack_view->server_keep_alive; stored_view->server_keep_alive = &connack_storage->server_keep_alive; } if (connack_view->response_information != NULL) { connack_storage->response_information = *connack_view->response_information; if (aws_byte_buf_append_and_update(&connack_storage->storage, &connack_storage->response_information)) { return AWS_OP_ERR; } stored_view->response_information = &connack_storage->response_information; } if (connack_view->server_reference != NULL) { connack_storage->server_reference = *connack_view->server_reference; if (aws_byte_buf_append_and_update(&connack_storage->storage, &connack_storage->server_reference)) { return AWS_OP_ERR; } stored_view->server_reference = &connack_storage->server_reference; } if (connack_view->authentication_method != NULL) { connack_storage->authentication_method = *connack_view->authentication_method; if (aws_byte_buf_append_and_update(&connack_storage->storage, &connack_storage->authentication_method)) { return AWS_OP_ERR; } stored_view->authentication_method = &connack_storage->authentication_method; } if (connack_view->authentication_data != NULL) { connack_storage->authentication_data = *connack_view->authentication_data; if (aws_byte_buf_append_and_update(&connack_storage->storage, &connack_storage->authentication_data)) { return AWS_OP_ERR; } stored_view->authentication_data = &connack_storage->authentication_data; } if (aws_mqtt5_user_property_set_init_with_storage( &connack_storage->user_properties, allocator, &connack_storage->storage, connack_view->user_property_count, connack_view->user_properties)) { return AWS_OP_ERR; } stored_view->user_property_count = aws_mqtt5_user_property_set_size(&connack_storage->user_properties); stored_view->user_properties = connack_storage->user_properties.properties.data; return AWS_OP_SUCCESS; } int aws_mqtt5_packet_connack_storage_init_from_external_storage( struct aws_mqtt5_packet_connack_storage *connack_storage, struct aws_allocator *allocator) { AWS_ZERO_STRUCT(*connack_storage); if (aws_mqtt5_user_property_set_init(&connack_storage->user_properties, allocator)) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } void aws_mqtt5_packet_connack_storage_clean_up(struct aws_mqtt5_packet_connack_storage *connack_storage) { if (connack_storage == NULL) { return; } aws_mqtt5_user_property_set_clean_up(&connack_storage->user_properties); aws_byte_buf_clean_up(&connack_storage->storage); } void aws_mqtt5_packet_connack_view_log( const struct aws_mqtt5_packet_connack_view *connack_view, enum aws_log_level level) { struct aws_logger *log_handle = aws_logger_get_conditional(AWS_LS_MQTT5_GENERAL, level); if (log_handle == NULL) { return; } AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_connack_view reason code set to %d (%s)", (void *)connack_view, (int)connack_view->reason_code, aws_mqtt5_connect_reason_code_to_c_string(connack_view->reason_code)); AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_connack_view session present set to %d", (void *)connack_view, (int)connack_view->session_present); if (connack_view->session_expiry_interval != NULL) { AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_connack_view session expiry interval set to %" PRIu32, (void *)connack_view, *connack_view->session_expiry_interval); } if (connack_view->receive_maximum != NULL) { AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_connack_view receive maximum set to %" PRIu16, (void *)connack_view, *connack_view->receive_maximum); } if (connack_view->maximum_qos != NULL) { AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_connack_view maximum qos set to %d", (void *)connack_view, (int)(*connack_view->maximum_qos)); } if (connack_view->retain_available != NULL) { AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_connack_view retain available set to %d", (void *)connack_view, (int)(*connack_view->retain_available)); } if (connack_view->maximum_packet_size != NULL) { AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_connack_view maximum packet size set to %" PRIu32, (void *)connack_view, *connack_view->maximum_packet_size); } if (connack_view->assigned_client_identifier != NULL) { AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_connack_view assigned client identifier set to \"" PRInSTR "\"", (void *)connack_view, AWS_BYTE_CURSOR_PRI(*connack_view->assigned_client_identifier)); } if (connack_view->topic_alias_maximum != NULL) { AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_connack_view topic alias maximum set to %" PRIu16, (void *)connack_view, *connack_view->topic_alias_maximum); } if (connack_view->reason_string != NULL) { AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_connack_view reason string set to \"" PRInSTR "\"", (void *)connack_view, AWS_BYTE_CURSOR_PRI(*connack_view->reason_string)); } if (connack_view->wildcard_subscriptions_available != NULL) { AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_connack_view wildcard subscriptions available set to %d", (void *)connack_view, (int)(*connack_view->wildcard_subscriptions_available)); } if (connack_view->subscription_identifiers_available != NULL) { AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_connack_view subscription identifiers available set to %d", (void *)connack_view, (int)(*connack_view->subscription_identifiers_available)); } if (connack_view->shared_subscriptions_available != NULL) { AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_connack_view shared subscriptions available set to %d", (void *)connack_view, (int)(*connack_view->shared_subscriptions_available)); } if (connack_view->server_keep_alive != NULL) { AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_connack_view server keep alive set to %" PRIu16, (void *)connack_view, *connack_view->server_keep_alive); } if (connack_view->response_information != NULL) { AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_connack_view response information set to \"" PRInSTR "\"", (void *)connack_view, AWS_BYTE_CURSOR_PRI(*connack_view->response_information)); } if (connack_view->server_reference != NULL) { AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_connack_view server reference set to \"" PRInSTR "\"", (void *)connack_view, AWS_BYTE_CURSOR_PRI(*connack_view->server_reference)); } if (connack_view->authentication_method != NULL) { AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_connack_view authentication method set", (void *)connack_view); } if (connack_view->authentication_data != NULL) { AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_connack_view authentication data set", (void *)connack_view); } s_aws_mqtt5_user_property_set_log( log_handle, connack_view->user_properties, connack_view->user_property_count, (void *)connack_view, level, "aws_mqtt5_packet_connack_view"); } /********************************************************************************************************************* * Disconnect ********************************************************************************************************************/ int aws_mqtt5_packet_disconnect_view_validate(const struct aws_mqtt5_packet_disconnect_view *disconnect_view) { if (disconnect_view == NULL) { AWS_LOGF_ERROR(AWS_LS_MQTT5_GENERAL, "null DISCONNECT packet options"); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } bool is_valid_reason_code = true; aws_mqtt5_disconnect_reason_code_to_c_string(disconnect_view->reason_code, &is_valid_reason_code); if (!is_valid_reason_code) { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_disconnect_view - invalid DISCONNECT reason code:%d", (void *)disconnect_view, (int)disconnect_view->reason_code); return aws_raise_error(AWS_ERROR_MQTT5_DISCONNECT_OPTIONS_VALIDATION); } if (disconnect_view->reason_string != NULL) { if (disconnect_view->reason_string->len > UINT16_MAX) { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_disconnect_view - reason string too long", (void *)disconnect_view); return aws_raise_error(AWS_ERROR_MQTT5_DISCONNECT_OPTIONS_VALIDATION); } if (aws_mqtt_validate_utf8_text(*disconnect_view->reason_string)) { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_disconnect_view - reason string not valid UTF-8", (void *)disconnect_view); return aws_raise_error(AWS_ERROR_MQTT5_DISCONNECT_OPTIONS_VALIDATION); } } if (disconnect_view->server_reference != NULL) { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_disconnect_view - sending a server reference with a client-sourced DISCONNECT is " "not allowed", (void *)disconnect_view); return aws_raise_error(AWS_ERROR_MQTT5_DISCONNECT_OPTIONS_VALIDATION); } if (s_aws_mqtt5_user_property_set_validate( disconnect_view->user_properties, disconnect_view->user_property_count, "aws_mqtt5_packet_disconnect_view", (void *)disconnect_view)) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } static int s_aws_mqtt5_packet_disconnect_view_validate_vs_connection_settings( const void *packet_view, const struct aws_mqtt5_client *client) { const struct aws_mqtt5_packet_disconnect_view *disconnect_view = packet_view; if (disconnect_view->session_expiry_interval_seconds != NULL) { /* * By spec (https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901211), you * cannot set a non-zero value here if you sent a 0-value or no value in the CONNECT (presumably allows * the server to skip tracking session state, and we can't undo that now) */ const uint32_t *session_expiry_ptr = client->config->connect->storage_view.session_expiry_interval_seconds; if (*disconnect_view->session_expiry_interval_seconds > 0 && (session_expiry_ptr == NULL || *session_expiry_ptr == 0)) { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_disconnect_view - cannot specify a positive session expiry after " "committing " "to 0-valued session expiry in CONNECT", (void *)disconnect_view); return aws_raise_error(AWS_ERROR_MQTT5_DISCONNECT_OPTIONS_VALIDATION); } } return AWS_OP_SUCCESS; } void aws_mqtt5_packet_disconnect_view_log( const struct aws_mqtt5_packet_disconnect_view *disconnect_view, enum aws_log_level level) { struct aws_logger *log_handle = aws_logger_get_conditional(AWS_LS_MQTT5_GENERAL, level); if (log_handle == NULL) { return; } AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_disconnect_view reason code set to %d (%s)", (void *)disconnect_view, (int)disconnect_view->reason_code, aws_mqtt5_disconnect_reason_code_to_c_string(disconnect_view->reason_code, NULL)); if (disconnect_view->session_expiry_interval_seconds != NULL) { AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_disconnect_view session expiry interval set to %" PRIu32, (void *)disconnect_view, *disconnect_view->session_expiry_interval_seconds); } if (disconnect_view->reason_string != NULL) { AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_disconnect_view reason string set to \"" PRInSTR "\"", (void *)disconnect_view, AWS_BYTE_CURSOR_PRI(*disconnect_view->reason_string)); } if (disconnect_view->server_reference != NULL) { AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_disconnect_view server reference set to \"" PRInSTR "\"", (void *)disconnect_view, AWS_BYTE_CURSOR_PRI(*disconnect_view->server_reference)); } s_aws_mqtt5_user_property_set_log( log_handle, disconnect_view->user_properties, disconnect_view->user_property_count, (void *)disconnect_view, level, "aws_mqtt5_packet_disconnect_view"); } void aws_mqtt5_packet_disconnect_storage_clean_up(struct aws_mqtt5_packet_disconnect_storage *disconnect_storage) { if (disconnect_storage == NULL) { return; } aws_mqtt5_user_property_set_clean_up(&disconnect_storage->user_properties); aws_byte_buf_clean_up(&disconnect_storage->storage); } static size_t s_aws_mqtt5_packet_disconnect_compute_storage_size( const struct aws_mqtt5_packet_disconnect_view *disconnect_view) { size_t storage_size = s_aws_mqtt5_user_property_set_compute_storage_size( disconnect_view->user_properties, disconnect_view->user_property_count); if (disconnect_view->reason_string != NULL) { storage_size += disconnect_view->reason_string->len; } if (disconnect_view->server_reference != NULL) { storage_size += disconnect_view->server_reference->len; } return storage_size; } int aws_mqtt5_packet_disconnect_storage_init( struct aws_mqtt5_packet_disconnect_storage *disconnect_storage, struct aws_allocator *allocator, const struct aws_mqtt5_packet_disconnect_view *disconnect_options) { AWS_ZERO_STRUCT(*disconnect_storage); size_t storage_capacity = s_aws_mqtt5_packet_disconnect_compute_storage_size(disconnect_options); if (aws_byte_buf_init(&disconnect_storage->storage, allocator, storage_capacity)) { return AWS_OP_ERR; } struct aws_mqtt5_packet_disconnect_view *storage_view = &disconnect_storage->storage_view; storage_view->reason_code = disconnect_options->reason_code; if (disconnect_options->session_expiry_interval_seconds != NULL) { disconnect_storage->session_expiry_interval_seconds = *disconnect_options->session_expiry_interval_seconds; storage_view->session_expiry_interval_seconds = &disconnect_storage->session_expiry_interval_seconds; } if (disconnect_options->reason_string != NULL) { disconnect_storage->reason_string = *disconnect_options->reason_string; if (aws_byte_buf_append_and_update(&disconnect_storage->storage, &disconnect_storage->reason_string)) { return AWS_OP_ERR; } storage_view->reason_string = &disconnect_storage->reason_string; } if (disconnect_options->server_reference != NULL) { disconnect_storage->server_reference = *disconnect_options->server_reference; if (aws_byte_buf_append_and_update(&disconnect_storage->storage, &disconnect_storage->server_reference)) { return AWS_OP_ERR; } storage_view->server_reference = &disconnect_storage->server_reference; } if (aws_mqtt5_user_property_set_init_with_storage( &disconnect_storage->user_properties, allocator, &disconnect_storage->storage, disconnect_options->user_property_count, disconnect_options->user_properties)) { return AWS_OP_ERR; } storage_view->user_property_count = aws_mqtt5_user_property_set_size(&disconnect_storage->user_properties); storage_view->user_properties = disconnect_storage->user_properties.properties.data; return AWS_OP_SUCCESS; } int aws_mqtt5_packet_disconnect_storage_init_from_external_storage( struct aws_mqtt5_packet_disconnect_storage *disconnect_storage, struct aws_allocator *allocator) { AWS_ZERO_STRUCT(*disconnect_storage); if (aws_mqtt5_user_property_set_init(&disconnect_storage->user_properties, allocator)) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } static void s_destroy_operation_disconnect(void *object) { if (object == NULL) { return; } struct aws_mqtt5_operation_disconnect *disconnect_op = object; aws_mqtt5_packet_disconnect_storage_clean_up(&disconnect_op->options_storage); aws_mem_release(disconnect_op->allocator, disconnect_op); } static void s_aws_mqtt5_disconnect_operation_completion( struct aws_mqtt5_operation *operation, int error_code, enum aws_mqtt5_packet_type packet_type, const void *completion_view) { (void)completion_view; (void)packet_type; struct aws_mqtt5_operation_disconnect *disconnect_op = operation->impl; if (disconnect_op->internal_completion_options.completion_callback != NULL) { (*disconnect_op->internal_completion_options.completion_callback)( error_code, disconnect_op->internal_completion_options.completion_user_data); } if (disconnect_op->external_completion_options.completion_callback != NULL) { (*disconnect_op->external_completion_options.completion_callback)( error_code, disconnect_op->external_completion_options.completion_user_data); } } static struct aws_mqtt5_operation_vtable s_disconnect_operation_vtable = { .aws_mqtt5_operation_completion_fn = s_aws_mqtt5_disconnect_operation_completion, .aws_mqtt5_operation_set_packet_id_fn = NULL, .aws_mqtt5_operation_get_packet_id_address_fn = NULL, .aws_mqtt5_operation_validate_vs_connection_settings_fn = s_aws_mqtt5_packet_disconnect_view_validate_vs_connection_settings, .aws_mqtt5_operation_get_ack_timeout_override_fn = NULL, }; struct aws_mqtt5_operation_disconnect *aws_mqtt5_operation_disconnect_new( struct aws_allocator *allocator, const struct aws_mqtt5_packet_disconnect_view *disconnect_options, const struct aws_mqtt5_disconnect_completion_options *external_completion_options, const struct aws_mqtt5_disconnect_completion_options *internal_completion_options) { AWS_PRECONDITION(allocator != NULL); if (aws_mqtt5_packet_disconnect_view_validate(disconnect_options)) { return NULL; } struct aws_mqtt5_operation_disconnect *disconnect_op = aws_mem_calloc(allocator, 1, sizeof(struct aws_mqtt5_operation_disconnect)); if (disconnect_op == NULL) { return NULL; } disconnect_op->allocator = allocator; disconnect_op->base.vtable = &s_disconnect_operation_vtable; disconnect_op->base.packet_type = AWS_MQTT5_PT_DISCONNECT; aws_ref_count_init(&disconnect_op->base.ref_count, disconnect_op, s_destroy_operation_disconnect); aws_priority_queue_node_init(&disconnect_op->base.priority_queue_node); disconnect_op->base.impl = disconnect_op; if (aws_mqtt5_packet_disconnect_storage_init(&disconnect_op->options_storage, allocator, disconnect_options)) { goto error; } disconnect_op->base.packet_view = &disconnect_op->options_storage.storage_view; if (external_completion_options != NULL) { disconnect_op->external_completion_options = *external_completion_options; } if (internal_completion_options != NULL) { disconnect_op->internal_completion_options = *internal_completion_options; } return disconnect_op; error: aws_mqtt5_operation_release(&disconnect_op->base); return NULL; } /********************************************************************************************************************* * Publish ********************************************************************************************************************/ int aws_mqtt5_packet_publish_view_validate(const struct aws_mqtt5_packet_publish_view *publish_view) { if (publish_view == NULL) { AWS_LOGF_ERROR(AWS_LS_MQTT5_GENERAL, "null PUBLISH packet options"); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } if (publish_view->qos < AWS_MQTT5_QOS_AT_MOST_ONCE || publish_view->qos > AWS_MQTT5_QOS_EXACTLY_ONCE) { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_publish_view - unsupported QoS value in PUBLISH packet options: %d", (void *)publish_view, (int)publish_view->qos); return aws_raise_error(AWS_ERROR_MQTT5_PUBLISH_OPTIONS_VALIDATION); } if (publish_view->qos == AWS_MQTT5_QOS_AT_MOST_ONCE) { if (publish_view->duplicate) { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_publish_view - duplicate flag must be set to 0 for QoS 0 messages", (void *)publish_view); return aws_raise_error(AWS_ERROR_MQTT5_PUBLISH_OPTIONS_VALIDATION); } if (publish_view->packet_id != 0) { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_publish_view - Packet ID must not be set for QoS 0 messages", (void *)publish_view); return aws_raise_error(AWS_ERROR_MQTT5_PUBLISH_OPTIONS_VALIDATION); } } /* 0-length topic is never valid, even with user-controlled outbound aliasing */ if (publish_view->topic.len == 0) { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_publish_view - missing topic", (void *)publish_view); return aws_raise_error(AWS_ERROR_MQTT5_PUBLISH_OPTIONS_VALIDATION); } else if (aws_mqtt_validate_utf8_text(publish_view->topic)) { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_publish_view - topic not valid UTF-8", (void *)publish_view); return aws_raise_error(AWS_ERROR_MQTT5_PUBLISH_OPTIONS_VALIDATION); } else if (!aws_mqtt_is_valid_topic(&publish_view->topic)) { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_publish_view - invalid topic: \"" PRInSTR "\"", (void *)publish_view, AWS_BYTE_CURSOR_PRI(publish_view->topic)); return aws_raise_error(AWS_ERROR_MQTT5_PUBLISH_OPTIONS_VALIDATION); } if (publish_view->topic_alias != NULL) { if (*publish_view->topic_alias == 0) { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_publish_view - topic alias may not be zero", (void *)publish_view); return aws_raise_error(AWS_ERROR_MQTT5_PUBLISH_OPTIONS_VALIDATION); } } if (publish_view->payload_format != NULL) { if (*publish_view->payload_format < AWS_MQTT5_PFI_BYTES || *publish_view->payload_format > AWS_MQTT5_PFI_UTF8) { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_publish_view - invalid payload format value: %d", (void *)publish_view, (int)*publish_view->payload_format); return aws_raise_error(AWS_ERROR_MQTT5_PUBLISH_OPTIONS_VALIDATION); } // Make sure the payload data is UTF-8 if the payload_format set to UTF8 if (*publish_view->payload_format == AWS_MQTT5_PFI_UTF8) { if (aws_mqtt_validate_utf8_text(publish_view->payload)) { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_publish_view - payload value is not valid UTF-8 while payload format " "set to UTF-8", (void *)publish_view); return aws_raise_error(AWS_ERROR_MQTT5_PUBLISH_OPTIONS_VALIDATION); } } } if (publish_view->response_topic != NULL) { if (publish_view->response_topic->len >= UINT16_MAX) { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_publish_view - response topic too long", (void *)publish_view); return aws_raise_error(AWS_ERROR_MQTT5_PUBLISH_OPTIONS_VALIDATION); } if (aws_mqtt_validate_utf8_text(*publish_view->response_topic)) { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_publish_view - response topic not valid UTF-8", (void *)publish_view); return aws_raise_error(AWS_ERROR_MQTT5_PUBLISH_OPTIONS_VALIDATION); } if (!aws_mqtt_is_valid_topic(publish_view->response_topic)) { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_publish_view - response topic must be a valid mqtt topic", (void *)publish_view); return aws_raise_error(AWS_ERROR_MQTT5_PUBLISH_OPTIONS_VALIDATION); } } if (publish_view->correlation_data != NULL) { if (publish_view->correlation_data->len >= UINT16_MAX) { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_publish_view - correlation data too long", (void *)publish_view); return aws_raise_error(AWS_ERROR_MQTT5_PUBLISH_OPTIONS_VALIDATION); } } /* * validate is done from a client perspective and clients should never generate subscription identifier in a * publish message */ if (publish_view->subscription_identifier_count != 0) { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "Client-initiated PUBLISH packets may not contain subscription identifiers"); return aws_raise_error(AWS_ERROR_MQTT5_PUBLISH_OPTIONS_VALIDATION); } if (publish_view->content_type != NULL) { if (publish_view->content_type->len >= UINT16_MAX) { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_publish_view - content type too long", (void *)publish_view); return aws_raise_error(AWS_ERROR_MQTT5_PUBLISH_OPTIONS_VALIDATION); } if (aws_mqtt_validate_utf8_text(*publish_view->content_type)) { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_publish_view - content type not valid UTF-8", (void *)publish_view); return aws_raise_error(AWS_ERROR_MQTT5_PUBLISH_OPTIONS_VALIDATION); } } if (s_aws_mqtt5_user_property_set_validate( publish_view->user_properties, publish_view->user_property_count, "aws_mqtt5_packet_publish_view", (void *)publish_view)) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } static int s_aws_mqtt5_packet_publish_view_validate_vs_connection_settings( const void *packet_view, const struct aws_mqtt5_client *client) { const struct aws_mqtt5_packet_publish_view *publish_view = packet_view; /* If we have valid negotiated settings, check against them as well */ if (aws_mqtt5_client_are_negotiated_settings_valid(client)) { const struct aws_mqtt5_negotiated_settings *settings = &client->negotiated_settings; if (publish_view->qos > settings->maximum_qos) { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_publish_view - QoS value %d exceeds negotiated maximum qos %d", (void *)publish_view, (int)publish_view->qos, (int)settings->maximum_qos); return aws_raise_error(AWS_ERROR_MQTT5_PUBLISH_OPTIONS_VALIDATION); } if (publish_view->retain && settings->retain_available == false) { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_publish_view - server does not support Retain", (void *)publish_view); return aws_raise_error(AWS_ERROR_MQTT5_PUBLISH_OPTIONS_VALIDATION); } } return AWS_OP_SUCCESS; } void aws_mqtt5_packet_publish_view_log( const struct aws_mqtt5_packet_publish_view *publish_view, enum aws_log_level level) { struct aws_logger *log_handle = aws_logger_get_conditional(AWS_LS_MQTT5_GENERAL, level); if (log_handle == NULL) { return; } AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_publish_view packet id set to %d", (void *)publish_view, (int)publish_view->packet_id); AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_publish_view payload set containing %zu bytes", (void *)publish_view, publish_view->payload.len); AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_publish_view qos set to %d", (void *)publish_view, (int)publish_view->qos); AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_publish_view retain set to %d", (void *)publish_view, (int)publish_view->retain); AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_publish_view topic set to \"" PRInSTR "\"", (void *)publish_view, AWS_BYTE_CURSOR_PRI(publish_view->topic)); if (publish_view->payload_format != NULL) { AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_publish_view payload format indicator set to %d (%s)", (void *)publish_view, (int)*publish_view->payload_format, aws_mqtt5_payload_format_indicator_to_c_string(*publish_view->payload_format)); } if (publish_view->message_expiry_interval_seconds != NULL) { AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_publish_view message expiry interval set to %" PRIu32, (void *)publish_view, *publish_view->message_expiry_interval_seconds); } if (publish_view->topic_alias != NULL) { AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_publish_view topic alias set to %" PRIu16, (void *)publish_view, *publish_view->topic_alias); } if (publish_view->response_topic != NULL) { AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_publish_view response topic set to \"" PRInSTR "\"", (void *)publish_view, AWS_BYTE_CURSOR_PRI(*publish_view->response_topic)); } if (publish_view->correlation_data != NULL) { AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_publish_view - set correlation data", (void *)publish_view); } if (publish_view->content_type != NULL) { AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_publish_view content type set to \"" PRInSTR "\"", (void *)publish_view, AWS_BYTE_CURSOR_PRI(*publish_view->content_type)); } for (size_t i = 0; i < publish_view->subscription_identifier_count; ++i) { AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_publish_view subscription identifier %d: %" PRIu32, (void *)publish_view, (int)i, publish_view->subscription_identifiers[i]); } s_aws_mqtt5_user_property_set_log( log_handle, publish_view->user_properties, publish_view->user_property_count, (void *)publish_view, level, "aws_mqtt5_packet_publish_view"); } static size_t s_aws_mqtt5_packet_publish_compute_storage_size( const struct aws_mqtt5_packet_publish_view *publish_view) { size_t storage_size = s_aws_mqtt5_user_property_set_compute_storage_size( publish_view->user_properties, publish_view->user_property_count); storage_size += publish_view->topic.len; storage_size += publish_view->payload.len; if (publish_view->response_topic != NULL) { storage_size += publish_view->response_topic->len; } if (publish_view->correlation_data != NULL) { storage_size += publish_view->correlation_data->len; } if (publish_view->content_type != NULL) { storage_size += publish_view->content_type->len; } return storage_size; } int aws_mqtt5_packet_publish_storage_init( struct aws_mqtt5_packet_publish_storage *publish_storage, struct aws_allocator *allocator, const struct aws_mqtt5_packet_publish_view *publish_options) { AWS_ZERO_STRUCT(*publish_storage); size_t storage_capacity = s_aws_mqtt5_packet_publish_compute_storage_size(publish_options); if (aws_byte_buf_init(&publish_storage->storage, allocator, storage_capacity)) { return AWS_OP_ERR; } if (aws_array_list_init_dynamic(&publish_storage->subscription_identifiers, allocator, 0, sizeof(uint32_t))) { return AWS_OP_ERR; } struct aws_mqtt5_packet_publish_view *storage_view = &publish_storage->storage_view; storage_view->packet_id = publish_options->packet_id; storage_view->payload = publish_options->payload; if (aws_byte_buf_append_and_update(&publish_storage->storage, &storage_view->payload)) { return AWS_OP_ERR; } storage_view->qos = publish_options->qos; storage_view->retain = publish_options->retain; storage_view->duplicate = publish_options->duplicate; storage_view->topic = publish_options->topic; if (aws_byte_buf_append_and_update(&publish_storage->storage, &storage_view->topic)) { return AWS_OP_ERR; } if (publish_options->payload_format != NULL) { publish_storage->payload_format = *publish_options->payload_format; storage_view->payload_format = &publish_storage->payload_format; } if (publish_options->message_expiry_interval_seconds != NULL) { publish_storage->message_expiry_interval_seconds = *publish_options->message_expiry_interval_seconds; storage_view->message_expiry_interval_seconds = &publish_storage->message_expiry_interval_seconds; } if (publish_options->topic_alias != NULL) { publish_storage->topic_alias = *publish_options->topic_alias; storage_view->topic_alias = &publish_storage->topic_alias; } if (publish_options->response_topic != NULL) { publish_storage->response_topic = *publish_options->response_topic; if (aws_byte_buf_append_and_update(&publish_storage->storage, &publish_storage->response_topic)) { return AWS_OP_ERR; } storage_view->response_topic = &publish_storage->response_topic; } if (publish_options->correlation_data != NULL) { publish_storage->correlation_data = *publish_options->correlation_data; if (aws_byte_buf_append_and_update(&publish_storage->storage, &publish_storage->correlation_data)) { return AWS_OP_ERR; } storage_view->correlation_data = &publish_storage->correlation_data; } for (size_t i = 0; i < publish_options->subscription_identifier_count; ++i) { aws_array_list_push_back( &publish_storage->subscription_identifiers, &publish_options->subscription_identifiers[i]); } storage_view->subscription_identifier_count = aws_array_list_length(&publish_storage->subscription_identifiers); storage_view->subscription_identifiers = publish_storage->subscription_identifiers.data; if (publish_options->content_type != NULL) { publish_storage->content_type = *publish_options->content_type; if (aws_byte_buf_append_and_update(&publish_storage->storage, &publish_storage->content_type)) { return AWS_OP_ERR; } storage_view->content_type = &publish_storage->content_type; } if (aws_mqtt5_user_property_set_init_with_storage( &publish_storage->user_properties, allocator, &publish_storage->storage, publish_options->user_property_count, publish_options->user_properties)) { return AWS_OP_ERR; } storage_view->user_property_count = aws_mqtt5_user_property_set_size(&publish_storage->user_properties); storage_view->user_properties = publish_storage->user_properties.properties.data; return AWS_OP_SUCCESS; } int aws_mqtt5_packet_publish_storage_init_from_external_storage( struct aws_mqtt5_packet_publish_storage *publish_storage, struct aws_allocator *allocator) { AWS_ZERO_STRUCT(*publish_storage); if (aws_mqtt5_user_property_set_init(&publish_storage->user_properties, allocator)) { return AWS_OP_ERR; } if (aws_array_list_init_dynamic(&publish_storage->subscription_identifiers, allocator, 0, sizeof(uint32_t))) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } void aws_mqtt5_packet_publish_storage_clean_up(struct aws_mqtt5_packet_publish_storage *publish_storage) { aws_mqtt5_user_property_set_clean_up(&publish_storage->user_properties); aws_array_list_clean_up(&publish_storage->subscription_identifiers); aws_byte_buf_clean_up(&publish_storage->storage); } static void s_aws_mqtt5_operation_publish_complete( struct aws_mqtt5_operation *operation, int error_code, enum aws_mqtt5_packet_type packet_type, const void *completion_view) { struct aws_mqtt5_operation_publish *publish_op = operation->impl; if (publish_op->completion_options.completion_callback != NULL) { (*publish_op->completion_options.completion_callback)( packet_type, completion_view, error_code, publish_op->completion_options.completion_user_data); } } static void s_aws_mqtt5_operation_publish_set_packet_id( struct aws_mqtt5_operation *operation, aws_mqtt5_packet_id_t packet_id) { struct aws_mqtt5_operation_publish *publish_op = operation->impl; publish_op->options_storage.storage_view.packet_id = packet_id; } static aws_mqtt5_packet_id_t *s_aws_mqtt5_operation_publish_get_packet_id_address( const struct aws_mqtt5_operation *operation) { struct aws_mqtt5_operation_publish *publish_op = operation->impl; return &publish_op->options_storage.storage_view.packet_id; } static uint32_t s_aws_mqtt5_operation_publish_get_ack_timeout_override(const struct aws_mqtt5_operation *operation) { struct aws_mqtt5_operation_publish *publish_op = operation->impl; return publish_op->completion_options.ack_timeout_seconds_override; } static struct aws_mqtt5_operation_vtable s_publish_operation_vtable = { .aws_mqtt5_operation_completion_fn = s_aws_mqtt5_operation_publish_complete, .aws_mqtt5_operation_set_packet_id_fn = s_aws_mqtt5_operation_publish_set_packet_id, .aws_mqtt5_operation_get_packet_id_address_fn = s_aws_mqtt5_operation_publish_get_packet_id_address, .aws_mqtt5_operation_validate_vs_connection_settings_fn = s_aws_mqtt5_packet_publish_view_validate_vs_connection_settings, .aws_mqtt5_operation_get_ack_timeout_override_fn = s_aws_mqtt5_operation_publish_get_ack_timeout_override}; static void s_destroy_operation_publish(void *object) { if (object == NULL) { return; } struct aws_mqtt5_operation_publish *publish_op = object; aws_mqtt5_packet_publish_storage_clean_up(&publish_op->options_storage); aws_mem_release(publish_op->allocator, publish_op); } struct aws_mqtt5_operation_publish *aws_mqtt5_operation_publish_new( struct aws_allocator *allocator, const struct aws_mqtt5_client *client, const struct aws_mqtt5_packet_publish_view *publish_options, const struct aws_mqtt5_publish_completion_options *completion_options) { (void)client; AWS_PRECONDITION(allocator != NULL); AWS_PRECONDITION(publish_options != NULL); if (aws_mqtt5_packet_publish_view_validate(publish_options)) { return NULL; } if (publish_options->packet_id != 0) { AWS_LOGF_DEBUG( AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_publish_view packet id must be zero", (void *)publish_options); aws_raise_error(AWS_ERROR_MQTT5_PUBLISH_OPTIONS_VALIDATION); return NULL; } struct aws_mqtt5_operation_publish *publish_op = aws_mem_calloc(allocator, 1, sizeof(struct aws_mqtt5_operation_publish)); if (publish_op == NULL) { return NULL; } publish_op->allocator = allocator; publish_op->base.vtable = &s_publish_operation_vtable; publish_op->base.packet_type = AWS_MQTT5_PT_PUBLISH; aws_ref_count_init(&publish_op->base.ref_count, publish_op, s_destroy_operation_publish); aws_priority_queue_node_init(&publish_op->base.priority_queue_node); publish_op->base.impl = publish_op; if (aws_mqtt5_packet_publish_storage_init(&publish_op->options_storage, allocator, publish_options)) { goto error; } publish_op->base.packet_view = &publish_op->options_storage.storage_view; if (completion_options != NULL) { publish_op->completion_options = *completion_options; } return publish_op; error: aws_mqtt5_operation_release(&publish_op->base); return NULL; } /********************************************************************************************************************* * Puback ********************************************************************************************************************/ static size_t s_aws_mqtt5_packet_puback_compute_storage_size(const struct aws_mqtt5_packet_puback_view *puback_view) { size_t storage_size = s_aws_mqtt5_user_property_set_compute_storage_size( puback_view->user_properties, puback_view->user_property_count); if (puback_view->reason_string != NULL) { storage_size += puback_view->reason_string->len; } return storage_size; } AWS_MQTT_API int aws_mqtt5_packet_puback_storage_init( struct aws_mqtt5_packet_puback_storage *puback_storage, struct aws_allocator *allocator, const struct aws_mqtt5_packet_puback_view *puback_view) { AWS_ZERO_STRUCT(*puback_storage); size_t storage_capacity = s_aws_mqtt5_packet_puback_compute_storage_size(puback_view); if (aws_byte_buf_init(&puback_storage->storage, allocator, storage_capacity)) { return AWS_OP_ERR; } struct aws_mqtt5_packet_puback_view *storage_view = &puback_storage->storage_view; storage_view->packet_id = puback_view->packet_id; storage_view->reason_code = puback_view->reason_code; if (puback_view->reason_string != NULL) { puback_storage->reason_string = *puback_view->reason_string; if (aws_byte_buf_append_and_update(&puback_storage->storage, &puback_storage->reason_string)) { return AWS_OP_ERR; } storage_view->reason_string = &puback_storage->reason_string; } if (aws_mqtt5_user_property_set_init_with_storage( &puback_storage->user_properties, allocator, &puback_storage->storage, puback_view->user_property_count, puback_view->user_properties)) { return AWS_OP_ERR; } storage_view->user_property_count = aws_mqtt5_user_property_set_size(&puback_storage->user_properties); storage_view->user_properties = puback_storage->user_properties.properties.data; return AWS_OP_SUCCESS; } int aws_mqtt5_packet_puback_storage_init_from_external_storage( struct aws_mqtt5_packet_puback_storage *puback_storage, struct aws_allocator *allocator) { AWS_ZERO_STRUCT(*puback_storage); if (aws_mqtt5_user_property_set_init(&puback_storage->user_properties, allocator)) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } void aws_mqtt5_packet_puback_storage_clean_up(struct aws_mqtt5_packet_puback_storage *puback_storage) { if (puback_storage == NULL) { return; } aws_mqtt5_user_property_set_clean_up(&puback_storage->user_properties); aws_byte_buf_clean_up(&puback_storage->storage); } void aws_mqtt5_packet_puback_view_log( const struct aws_mqtt5_packet_puback_view *puback_view, enum aws_log_level level) { struct aws_logger *log_handle = aws_logger_get_conditional(AWS_LS_MQTT5_GENERAL, level); if (log_handle == NULL) { return; } AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_puback_view packet id set to %d", (void *)puback_view, (int)puback_view->packet_id); enum aws_mqtt5_puback_reason_code reason_code = puback_view->reason_code; AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: puback %d reason code: %s", (void *)puback_view, (int)reason_code, aws_mqtt5_puback_reason_code_to_c_string(reason_code)); if (puback_view->reason_string != NULL) { AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_puback_view reason string set to \"" PRInSTR "\"", (void *)puback_view, AWS_BYTE_CURSOR_PRI(*puback_view->reason_string)); } s_aws_mqtt5_user_property_set_log( log_handle, puback_view->user_properties, puback_view->user_property_count, (void *)puback_view, level, "aws_mqtt5_packet_puback_view"); } static void s_destroy_operation_puback(void *object) { if (object == NULL) { return; } struct aws_mqtt5_operation_puback *puback_op = object; aws_mqtt5_packet_puback_storage_clean_up(&puback_op->options_storage); aws_mem_release(puback_op->allocator, puback_op); } struct aws_mqtt5_operation_puback *aws_mqtt5_operation_puback_new( struct aws_allocator *allocator, const struct aws_mqtt5_packet_puback_view *puback_options) { AWS_PRECONDITION(allocator != NULL); AWS_PRECONDITION(puback_options != NULL); struct aws_mqtt5_operation_puback *puback_op = aws_mem_calloc(allocator, 1, sizeof(struct aws_mqtt5_operation_puback)); if (puback_op == NULL) { return NULL; } puback_op->allocator = allocator; puback_op->base.vtable = &s_empty_operation_vtable; puback_op->base.packet_type = AWS_MQTT5_PT_PUBACK; aws_ref_count_init(&puback_op->base.ref_count, puback_op, s_destroy_operation_puback); aws_priority_queue_node_init(&puback_op->base.priority_queue_node); puback_op->base.impl = puback_op; if (aws_mqtt5_packet_puback_storage_init(&puback_op->options_storage, allocator, puback_options)) { goto error; } puback_op->base.packet_view = &puback_op->options_storage.storage_view; return puback_op; error: aws_mqtt5_operation_release(&puback_op->base); return NULL; } /********************************************************************************************************************* * Unsubscribe ********************************************************************************************************************/ int aws_mqtt5_packet_unsubscribe_view_validate(const struct aws_mqtt5_packet_unsubscribe_view *unsubscribe_view) { if (unsubscribe_view == NULL) { AWS_LOGF_ERROR(AWS_LS_MQTT5_GENERAL, "null UNSUBSCRIBE packet options"); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } if (unsubscribe_view->topic_filter_count == 0) { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_unsubscribe_view - must contain at least one topic", (void *)unsubscribe_view); return aws_raise_error(AWS_ERROR_MQTT5_UNSUBSCRIBE_OPTIONS_VALIDATION); } if (unsubscribe_view->topic_filter_count > AWS_MQTT5_CLIENT_MAXIMUM_TOPIC_FILTERS_PER_UNSUBSCRIBE) { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_unsubscribe_view - contains too many topics (%zu)", (void *)unsubscribe_view, unsubscribe_view->topic_filter_count); return aws_raise_error(AWS_ERROR_MQTT5_UNSUBSCRIBE_OPTIONS_VALIDATION); } for (size_t i = 0; i < unsubscribe_view->topic_filter_count; ++i) { const struct aws_byte_cursor *topic_filter = &unsubscribe_view->topic_filters[i]; if (aws_mqtt_validate_utf8_text(*topic_filter)) { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_unsubscribe_view - topic filter not valid UTF-8: \"" PRInSTR "\"", (void *)unsubscribe_view, AWS_BYTE_CURSOR_PRI(*topic_filter)); return aws_raise_error(AWS_ERROR_MQTT5_UNSUBSCRIBE_OPTIONS_VALIDATION); } if (!aws_mqtt_is_valid_topic_filter(topic_filter)) { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_unsubscribe_view - invalid topic filter: \"" PRInSTR "\"", (void *)unsubscribe_view, AWS_BYTE_CURSOR_PRI(*topic_filter)); return aws_raise_error(AWS_ERROR_MQTT5_UNSUBSCRIBE_OPTIONS_VALIDATION); } } if (s_aws_mqtt5_user_property_set_validate( unsubscribe_view->user_properties, unsubscribe_view->user_property_count, "aws_mqtt5_packet_unsubscribe_view", (void *)unsubscribe_view)) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } void aws_mqtt5_packet_unsubscribe_view_log( const struct aws_mqtt5_packet_unsubscribe_view *unsubscribe_view, enum aws_log_level level) { struct aws_logger *log_handle = aws_logger_get_conditional(AWS_LS_MQTT5_GENERAL, level); if (log_handle == NULL) { return; } size_t topic_count = unsubscribe_view->topic_filter_count; for (size_t i = 0; i < topic_count; ++i) { const struct aws_byte_cursor *topic_cursor = &unsubscribe_view->topic_filters[i]; AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_unsubscribe_view topic #%zu: \"" PRInSTR "\"", (void *)unsubscribe_view, i, AWS_BYTE_CURSOR_PRI(*topic_cursor)); } s_aws_mqtt5_user_property_set_log( log_handle, unsubscribe_view->user_properties, unsubscribe_view->user_property_count, (void *)unsubscribe_view, level, "aws_mqtt5_packet_unsubscribe_view"); } void aws_mqtt5_packet_unsubscribe_storage_clean_up(struct aws_mqtt5_packet_unsubscribe_storage *unsubscribe_storage) { if (unsubscribe_storage == NULL) { return; } aws_array_list_clean_up(&unsubscribe_storage->topic_filters); aws_mqtt5_user_property_set_clean_up(&unsubscribe_storage->user_properties); aws_byte_buf_clean_up(&unsubscribe_storage->storage); } static int s_aws_mqtt5_packet_unsubscribe_build_topic_list( struct aws_mqtt5_packet_unsubscribe_storage *unsubscribe_storage, struct aws_allocator *allocator, size_t topic_count, const struct aws_byte_cursor *topics) { if (aws_array_list_init_dynamic( &unsubscribe_storage->topic_filters, allocator, topic_count, sizeof(struct aws_byte_cursor))) { return AWS_OP_ERR; } for (size_t i = 0; i < topic_count; ++i) { const struct aws_byte_cursor *topic_cursor_ptr = &topics[i]; struct aws_byte_cursor topic_cursor = *topic_cursor_ptr; if (aws_byte_buf_append_and_update(&unsubscribe_storage->storage, &topic_cursor)) { return AWS_OP_ERR; } if (aws_array_list_push_back(&unsubscribe_storage->topic_filters, &topic_cursor)) { return AWS_OP_ERR; } } return AWS_OP_SUCCESS; } static size_t s_aws_mqtt5_packet_unsubscribe_compute_storage_size( const struct aws_mqtt5_packet_unsubscribe_view *unsubscribe_view) { size_t storage_size = s_aws_mqtt5_user_property_set_compute_storage_size( unsubscribe_view->user_properties, unsubscribe_view->user_property_count); for (size_t i = 0; i < unsubscribe_view->topic_filter_count; ++i) { const struct aws_byte_cursor *topic = &unsubscribe_view->topic_filters[i]; storage_size += topic->len; } return storage_size; } int aws_mqtt5_packet_unsubscribe_storage_init( struct aws_mqtt5_packet_unsubscribe_storage *unsubscribe_storage, struct aws_allocator *allocator, const struct aws_mqtt5_packet_unsubscribe_view *unsubscribe_options) { AWS_ZERO_STRUCT(*unsubscribe_storage); size_t storage_capacity = s_aws_mqtt5_packet_unsubscribe_compute_storage_size(unsubscribe_options); if (aws_byte_buf_init(&unsubscribe_storage->storage, allocator, storage_capacity)) { return AWS_OP_ERR; } struct aws_mqtt5_packet_unsubscribe_view *storage_view = &unsubscribe_storage->storage_view; if (s_aws_mqtt5_packet_unsubscribe_build_topic_list( unsubscribe_storage, allocator, unsubscribe_options->topic_filter_count, unsubscribe_options->topic_filters)) { return AWS_OP_ERR; } storage_view->topic_filter_count = aws_array_list_length(&unsubscribe_storage->topic_filters); storage_view->topic_filters = unsubscribe_storage->topic_filters.data; if (aws_mqtt5_user_property_set_init_with_storage( &unsubscribe_storage->user_properties, allocator, &unsubscribe_storage->storage, unsubscribe_options->user_property_count, unsubscribe_options->user_properties)) { return AWS_OP_ERR; } storage_view->user_property_count = aws_mqtt5_user_property_set_size(&unsubscribe_storage->user_properties); storage_view->user_properties = unsubscribe_storage->user_properties.properties.data; return AWS_OP_SUCCESS; } int aws_mqtt5_packet_unsubscribe_storage_init_from_external_storage( struct aws_mqtt5_packet_unsubscribe_storage *unsubscribe_storage, struct aws_allocator *allocator) { AWS_ZERO_STRUCT(*unsubscribe_storage); if (aws_mqtt5_user_property_set_init(&unsubscribe_storage->user_properties, allocator)) { return AWS_OP_ERR; } if (aws_array_list_init_dynamic( &unsubscribe_storage->topic_filters, allocator, 0, sizeof(struct aws_byte_cursor))) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } static void s_aws_mqtt5_operation_unsubscribe_complete( struct aws_mqtt5_operation *operation, int error_code, enum aws_mqtt5_packet_type packet_type, const void *completion_view) { struct aws_mqtt5_operation_unsubscribe *unsubscribe_op = operation->impl; (void)packet_type; if (unsubscribe_op->completion_options.completion_callback != NULL) { (*unsubscribe_op->completion_options.completion_callback)( completion_view, error_code, unsubscribe_op->completion_options.completion_user_data); } } static void s_aws_mqtt5_operation_unsubscribe_set_packet_id( struct aws_mqtt5_operation *operation, aws_mqtt5_packet_id_t packet_id) { struct aws_mqtt5_operation_unsubscribe *unsubscribe_op = operation->impl; unsubscribe_op->options_storage.storage_view.packet_id = packet_id; } static aws_mqtt5_packet_id_t *s_aws_mqtt5_operation_unsubscribe_get_packet_id_address( const struct aws_mqtt5_operation *operation) { struct aws_mqtt5_operation_unsubscribe *unsubscribe_op = operation->impl; return &unsubscribe_op->options_storage.storage_view.packet_id; } static uint32_t s_aws_mqtt5_operation_unsubscribe_get_ack_timeout_override( const struct aws_mqtt5_operation *operation) { struct aws_mqtt5_operation_unsubscribe *unsubscribe_op = operation->impl; return unsubscribe_op->completion_options.ack_timeout_seconds_override; } static struct aws_mqtt5_operation_vtable s_unsubscribe_operation_vtable = { .aws_mqtt5_operation_completion_fn = s_aws_mqtt5_operation_unsubscribe_complete, .aws_mqtt5_operation_set_packet_id_fn = s_aws_mqtt5_operation_unsubscribe_set_packet_id, .aws_mqtt5_operation_get_packet_id_address_fn = s_aws_mqtt5_operation_unsubscribe_get_packet_id_address, .aws_mqtt5_operation_validate_vs_connection_settings_fn = NULL, .aws_mqtt5_operation_get_ack_timeout_override_fn = s_aws_mqtt5_operation_unsubscribe_get_ack_timeout_override, }; static void s_destroy_operation_unsubscribe(void *object) { if (object == NULL) { return; } struct aws_mqtt5_operation_unsubscribe *unsubscribe_op = object; aws_mqtt5_packet_unsubscribe_storage_clean_up(&unsubscribe_op->options_storage); aws_mem_release(unsubscribe_op->allocator, unsubscribe_op); } struct aws_mqtt5_operation_unsubscribe *aws_mqtt5_operation_unsubscribe_new( struct aws_allocator *allocator, const struct aws_mqtt5_client *client, const struct aws_mqtt5_packet_unsubscribe_view *unsubscribe_options, const struct aws_mqtt5_unsubscribe_completion_options *completion_options) { (void)client; AWS_PRECONDITION(allocator != NULL); AWS_PRECONDITION(unsubscribe_options != NULL); if (aws_mqtt5_packet_unsubscribe_view_validate(unsubscribe_options)) { return NULL; } if (unsubscribe_options->packet_id != 0) { AWS_LOGF_DEBUG( AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_unsubscribe_view packet id must be zero", (void *)unsubscribe_options); aws_raise_error(AWS_ERROR_MQTT5_UNSUBSCRIBE_OPTIONS_VALIDATION); return NULL; } struct aws_mqtt5_operation_unsubscribe *unsubscribe_op = aws_mem_calloc(allocator, 1, sizeof(struct aws_mqtt5_operation_unsubscribe)); if (unsubscribe_op == NULL) { return NULL; } unsubscribe_op->allocator = allocator; unsubscribe_op->base.vtable = &s_unsubscribe_operation_vtable; unsubscribe_op->base.packet_type = AWS_MQTT5_PT_UNSUBSCRIBE; aws_ref_count_init(&unsubscribe_op->base.ref_count, unsubscribe_op, s_destroy_operation_unsubscribe); aws_priority_queue_node_init(&unsubscribe_op->base.priority_queue_node); unsubscribe_op->base.impl = unsubscribe_op; if (aws_mqtt5_packet_unsubscribe_storage_init(&unsubscribe_op->options_storage, allocator, unsubscribe_options)) { goto error; } unsubscribe_op->base.packet_view = &unsubscribe_op->options_storage.storage_view; if (completion_options != NULL) { unsubscribe_op->completion_options = *completion_options; } return unsubscribe_op; error: aws_mqtt5_operation_release(&unsubscribe_op->base); return NULL; } /********************************************************************************************************************* * Subscribe ********************************************************************************************************************/ static int s_aws_mqtt5_validate_subscription( const struct aws_mqtt5_subscription_view *subscription, void *log_context) { if (aws_mqtt_validate_utf8_text(subscription->topic_filter)) { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_subscribe_view - topic filter \"" PRInSTR "\" not valid UTF-8 in subscription", log_context, AWS_BYTE_CURSOR_PRI(subscription->topic_filter)); return aws_raise_error(AWS_ERROR_MQTT5_SUBSCRIBE_OPTIONS_VALIDATION); } if (!aws_mqtt_is_valid_topic_filter(&subscription->topic_filter)) { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_subscribe_view - invalid topic filter \"" PRInSTR "\" in subscription", log_context, AWS_BYTE_CURSOR_PRI(subscription->topic_filter)); return aws_raise_error(AWS_ERROR_MQTT5_SUBSCRIBE_OPTIONS_VALIDATION); } if (subscription->topic_filter.len > UINT16_MAX) { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_subscribe_view - subscription contains too-long topic filter", log_context); return aws_raise_error(AWS_ERROR_MQTT5_SUBSCRIBE_OPTIONS_VALIDATION); } if (subscription->qos < AWS_MQTT5_QOS_AT_MOST_ONCE || subscription->qos > AWS_MQTT5_QOS_AT_LEAST_ONCE) { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_subscribe_view - unsupported QoS value: %d", log_context, (int)subscription->qos); return aws_raise_error(AWS_ERROR_MQTT5_SUBSCRIBE_OPTIONS_VALIDATION); } if (subscription->retain_handling_type < AWS_MQTT5_RHT_SEND_ON_SUBSCRIBE || subscription->retain_handling_type > AWS_MQTT5_RHT_DONT_SEND) { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_subscribe_view - unsupported retain handling value: %d", log_context, (int)subscription->retain_handling_type); return aws_raise_error(AWS_ERROR_MQTT5_SUBSCRIBE_OPTIONS_VALIDATION); } /* mqtt5 forbids no_local to be set to 1 if the topic filter represents a shared subscription */ if (subscription->no_local) { if (aws_mqtt_is_topic_filter_shared_subscription(subscription->topic_filter)) { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_subscribe_view - no_local cannot be 1 if the topic filter is a shared" "subscription", log_context); return aws_raise_error(AWS_ERROR_MQTT5_SUBSCRIBE_OPTIONS_VALIDATION); } } return AWS_OP_SUCCESS; } int aws_mqtt5_packet_subscribe_view_validate(const struct aws_mqtt5_packet_subscribe_view *subscribe_view) { if (subscribe_view == NULL) { AWS_LOGF_ERROR(AWS_LS_MQTT5_GENERAL, "null SUBSCRIBE packet options"); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } if (subscribe_view->subscription_count == 0) { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_subscribe_view - must contain at least one subscription", (void *)subscribe_view); return aws_raise_error(AWS_ERROR_MQTT5_SUBSCRIBE_OPTIONS_VALIDATION); } if (subscribe_view->subscription_count > AWS_MQTT5_CLIENT_MAXIMUM_SUBSCRIPTIONS_PER_SUBSCRIBE) { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_subscribe_view - too many subscriptions", (void *)subscribe_view); return aws_raise_error(AWS_ERROR_MQTT5_SUBSCRIBE_OPTIONS_VALIDATION); } for (size_t i = 0; i < subscribe_view->subscription_count; ++i) { const struct aws_mqtt5_subscription_view *subscription = &subscribe_view->subscriptions[i]; if (s_aws_mqtt5_validate_subscription(subscription, (void *)subscribe_view)) { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_subscribe_view - invalid subscription", (void *)subscribe_view); return aws_raise_error(AWS_ERROR_MQTT5_SUBSCRIBE_OPTIONS_VALIDATION); } } if (subscribe_view->subscription_identifier != NULL) { if (*subscribe_view->subscription_identifier > AWS_MQTT5_MAXIMUM_VARIABLE_LENGTH_INTEGER) { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_subscribe_view - subscription identifier (%" PRIu32 ") too large", (void *)subscribe_view, *subscribe_view->subscription_identifier); return aws_raise_error(AWS_ERROR_MQTT5_SUBSCRIBE_OPTIONS_VALIDATION); } } if (s_aws_mqtt5_user_property_set_validate( subscribe_view->user_properties, subscribe_view->user_property_count, "aws_mqtt5_packet_subscribe_view", (void *)subscribe_view)) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } void aws_mqtt5_packet_subscribe_view_log( const struct aws_mqtt5_packet_subscribe_view *subscribe_view, enum aws_log_level level) { struct aws_logger *log_handle = aws_logger_get_conditional(AWS_LS_MQTT5_GENERAL, level); if (log_handle == NULL) { return; } size_t subscription_count = subscribe_view->subscription_count; for (size_t i = 0; i < subscription_count; ++i) { const struct aws_mqtt5_subscription_view *view = &subscribe_view->subscriptions[i]; AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_subscribe_view subscription #%zu, topic filter \"" PRInSTR "\", qos %d, no local %d, retain as " "published %d, retain handling %d (%s)", (void *)subscribe_view, i, AWS_BYTE_CURSOR_PRI(view->topic_filter), (int)view->qos, (int)view->no_local, (int)view->retain_as_published, (int)view->retain_handling_type, aws_mqtt5_retain_handling_type_to_c_string(view->retain_handling_type)); } if (subscribe_view->subscription_identifier != NULL) { AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_subscribe_view subscription identifier set to %" PRIu32, (void *)subscribe_view, *subscribe_view->subscription_identifier); } s_aws_mqtt5_user_property_set_log( log_handle, subscribe_view->user_properties, subscribe_view->user_property_count, (void *)subscribe_view, level, "aws_mqtt5_packet_subscribe_view"); } void aws_mqtt5_packet_subscribe_storage_clean_up(struct aws_mqtt5_packet_subscribe_storage *subscribe_storage) { if (subscribe_storage == NULL) { return; } aws_array_list_clean_up(&subscribe_storage->subscriptions); aws_mqtt5_user_property_set_clean_up(&subscribe_storage->user_properties); aws_byte_buf_clean_up(&subscribe_storage->storage); } static int s_aws_mqtt5_packet_subscribe_storage_init_subscriptions( struct aws_mqtt5_packet_subscribe_storage *subscribe_storage, struct aws_allocator *allocator, size_t subscription_count, const struct aws_mqtt5_subscription_view *subscriptions) { if (aws_array_list_init_dynamic( &subscribe_storage->subscriptions, allocator, subscription_count, sizeof(struct aws_mqtt5_subscription_view))) { return AWS_OP_ERR; } for (size_t i = 0; i < subscription_count; ++i) { const struct aws_mqtt5_subscription_view *source = &subscriptions[i]; struct aws_mqtt5_subscription_view copy = *source; if (aws_byte_buf_append_and_update(&subscribe_storage->storage, ©.topic_filter)) { return AWS_OP_ERR; } if (aws_array_list_push_back(&subscribe_storage->subscriptions, ©)) { return AWS_OP_ERR; } } return AWS_OP_SUCCESS; } static size_t s_aws_mqtt5_packet_subscribe_compute_storage_size( const struct aws_mqtt5_packet_subscribe_view *subscribe_view) { size_t storage_size = s_aws_mqtt5_user_property_set_compute_storage_size( subscribe_view->user_properties, subscribe_view->user_property_count); for (size_t i = 0; i < subscribe_view->subscription_count; ++i) { const struct aws_mqtt5_subscription_view *subscription = &subscribe_view->subscriptions[i]; storage_size += subscription->topic_filter.len; } return storage_size; } int aws_mqtt5_packet_subscribe_storage_init( struct aws_mqtt5_packet_subscribe_storage *subscribe_storage, struct aws_allocator *allocator, const struct aws_mqtt5_packet_subscribe_view *subscribe_options) { AWS_ZERO_STRUCT(*subscribe_storage); size_t storage_capacity = s_aws_mqtt5_packet_subscribe_compute_storage_size(subscribe_options); if (aws_byte_buf_init(&subscribe_storage->storage, allocator, storage_capacity)) { return AWS_OP_ERR; } struct aws_mqtt5_packet_subscribe_view *storage_view = &subscribe_storage->storage_view; storage_view->packet_id = subscribe_options->packet_id; if (subscribe_options->subscription_identifier != NULL) { subscribe_storage->subscription_identifier = *subscribe_options->subscription_identifier; storage_view->subscription_identifier = &subscribe_storage->subscription_identifier; } if (s_aws_mqtt5_packet_subscribe_storage_init_subscriptions( subscribe_storage, allocator, subscribe_options->subscription_count, subscribe_options->subscriptions)) { return AWS_OP_ERR; } storage_view->subscription_count = aws_array_list_length(&subscribe_storage->subscriptions); storage_view->subscriptions = subscribe_storage->subscriptions.data; if (aws_mqtt5_user_property_set_init_with_storage( &subscribe_storage->user_properties, allocator, &subscribe_storage->storage, subscribe_options->user_property_count, subscribe_options->user_properties)) { return AWS_OP_ERR; } storage_view->user_property_count = aws_mqtt5_user_property_set_size(&subscribe_storage->user_properties); storage_view->user_properties = subscribe_storage->user_properties.properties.data; return AWS_OP_SUCCESS; } int aws_mqtt5_packet_subscribe_storage_init_from_external_storage( struct aws_mqtt5_packet_subscribe_storage *subscribe_storage, struct aws_allocator *allocator) { AWS_ZERO_STRUCT(*subscribe_storage); if (aws_mqtt5_user_property_set_init(&subscribe_storage->user_properties, allocator)) { return AWS_OP_ERR; } if (aws_array_list_init_dynamic( &subscribe_storage->subscriptions, allocator, 0, sizeof(struct aws_mqtt5_subscription_view))) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } static void s_aws_mqtt5_operation_subscribe_complete( struct aws_mqtt5_operation *operation, int error_code, enum aws_mqtt5_packet_type packet_type, const void *completion_view) { (void)packet_type; struct aws_mqtt5_operation_subscribe *subscribe_op = operation->impl; if (subscribe_op->completion_options.completion_callback != NULL) { (*subscribe_op->completion_options.completion_callback)( completion_view, error_code, subscribe_op->completion_options.completion_user_data); } } static void s_aws_mqtt5_operation_subscribe_set_packet_id( struct aws_mqtt5_operation *operation, aws_mqtt5_packet_id_t packet_id) { struct aws_mqtt5_operation_subscribe *subscribe_op = operation->impl; subscribe_op->options_storage.storage_view.packet_id = packet_id; } static aws_mqtt5_packet_id_t *s_aws_mqtt5_operation_subscribe_get_packet_id_address( const struct aws_mqtt5_operation *operation) { struct aws_mqtt5_operation_subscribe *subscribe_op = operation->impl; return &subscribe_op->options_storage.storage_view.packet_id; } static uint32_t s_aws_mqtt5_operation_subscribe_get_ack_timeout_override(const struct aws_mqtt5_operation *operation) { struct aws_mqtt5_operation_subscribe *subscribe_op = operation->impl; return subscribe_op->completion_options.ack_timeout_seconds_override; } static struct aws_mqtt5_operation_vtable s_subscribe_operation_vtable = { .aws_mqtt5_operation_completion_fn = s_aws_mqtt5_operation_subscribe_complete, .aws_mqtt5_operation_set_packet_id_fn = s_aws_mqtt5_operation_subscribe_set_packet_id, .aws_mqtt5_operation_get_packet_id_address_fn = s_aws_mqtt5_operation_subscribe_get_packet_id_address, .aws_mqtt5_operation_validate_vs_connection_settings_fn = NULL, .aws_mqtt5_operation_get_ack_timeout_override_fn = s_aws_mqtt5_operation_subscribe_get_ack_timeout_override, }; static void s_destroy_operation_subscribe(void *object) { if (object == NULL) { return; } struct aws_mqtt5_operation_subscribe *subscribe_op = object; aws_mqtt5_packet_subscribe_storage_clean_up(&subscribe_op->options_storage); aws_mem_release(subscribe_op->allocator, subscribe_op); } struct aws_mqtt5_operation_subscribe *aws_mqtt5_operation_subscribe_new( struct aws_allocator *allocator, const struct aws_mqtt5_client *client, const struct aws_mqtt5_packet_subscribe_view *subscribe_options, const struct aws_mqtt5_subscribe_completion_options *completion_options) { (void)client; AWS_PRECONDITION(allocator != NULL); AWS_PRECONDITION(subscribe_options != NULL); if (aws_mqtt5_packet_subscribe_view_validate(subscribe_options)) { return NULL; } if (subscribe_options->packet_id != 0) { AWS_LOGF_DEBUG( AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_subscribe_view packet id must be zero", (void *)subscribe_options); aws_raise_error(AWS_ERROR_MQTT5_SUBSCRIBE_OPTIONS_VALIDATION); return NULL; } struct aws_mqtt5_operation_subscribe *subscribe_op = aws_mem_calloc(allocator, 1, sizeof(struct aws_mqtt5_operation_subscribe)); if (subscribe_op == NULL) { return NULL; } subscribe_op->allocator = allocator; subscribe_op->base.vtable = &s_subscribe_operation_vtable; subscribe_op->base.packet_type = AWS_MQTT5_PT_SUBSCRIBE; aws_ref_count_init(&subscribe_op->base.ref_count, subscribe_op, s_destroy_operation_subscribe); aws_priority_queue_node_init(&subscribe_op->base.priority_queue_node); subscribe_op->base.impl = subscribe_op; if (aws_mqtt5_packet_subscribe_storage_init(&subscribe_op->options_storage, allocator, subscribe_options)) { goto error; } subscribe_op->base.packet_view = &subscribe_op->options_storage.storage_view; if (completion_options != NULL) { subscribe_op->completion_options = *completion_options; } return subscribe_op; error: aws_mqtt5_operation_release(&subscribe_op->base); return NULL; } /********************************************************************************************************************* * Suback ********************************************************************************************************************/ static size_t s_aws_mqtt5_packet_suback_compute_storage_size(const struct aws_mqtt5_packet_suback_view *suback_view) { size_t storage_size = s_aws_mqtt5_user_property_set_compute_storage_size( suback_view->user_properties, suback_view->user_property_count); if (suback_view->reason_string != NULL) { storage_size += suback_view->reason_string->len; } return storage_size; } static int s_aws_mqtt5_packet_suback_storage_init_reason_codes( struct aws_mqtt5_packet_suback_storage *suback_storage, struct aws_allocator *allocator, size_t reason_code_count, const enum aws_mqtt5_suback_reason_code *reason_codes) { if (aws_array_list_init_dynamic( &suback_storage->reason_codes, allocator, reason_code_count, sizeof(enum aws_mqtt5_suback_reason_code))) { return AWS_OP_ERR; } for (size_t i = 0; i < reason_code_count; ++i) { aws_array_list_push_back(&suback_storage->reason_codes, &reason_codes[i]); } return AWS_OP_SUCCESS; } AWS_MQTT_API int aws_mqtt5_packet_suback_storage_init( struct aws_mqtt5_packet_suback_storage *suback_storage, struct aws_allocator *allocator, const struct aws_mqtt5_packet_suback_view *suback_view) { AWS_ZERO_STRUCT(*suback_storage); size_t storage_capacity = s_aws_mqtt5_packet_suback_compute_storage_size(suback_view); if (aws_byte_buf_init(&suback_storage->storage, allocator, storage_capacity)) { return AWS_OP_ERR; } struct aws_mqtt5_packet_suback_view *storage_view = &suback_storage->storage_view; storage_view->packet_id = suback_view->packet_id; if (suback_view->reason_string != NULL) { suback_storage->reason_string = *suback_view->reason_string; if (aws_byte_buf_append_and_update(&suback_storage->storage, &suback_storage->reason_string)) { return AWS_OP_ERR; } storage_view->reason_string = &suback_storage->reason_string; } if (s_aws_mqtt5_packet_suback_storage_init_reason_codes( suback_storage, allocator, suback_view->reason_code_count, suback_view->reason_codes)) { return AWS_OP_ERR; } storage_view->reason_code_count = aws_array_list_length(&suback_storage->reason_codes); storage_view->reason_codes = suback_storage->reason_codes.data; if (aws_mqtt5_user_property_set_init_with_storage( &suback_storage->user_properties, allocator, &suback_storage->storage, suback_view->user_property_count, suback_view->user_properties)) { return AWS_OP_ERR; } storage_view->user_property_count = aws_mqtt5_user_property_set_size(&suback_storage->user_properties); storage_view->user_properties = suback_storage->user_properties.properties.data; return AWS_OP_SUCCESS; } int aws_mqtt5_packet_suback_storage_init_from_external_storage( struct aws_mqtt5_packet_suback_storage *suback_storage, struct aws_allocator *allocator) { AWS_ZERO_STRUCT(*suback_storage); if (aws_mqtt5_user_property_set_init(&suback_storage->user_properties, allocator)) { return AWS_OP_ERR; } if (aws_array_list_init_dynamic( &suback_storage->reason_codes, allocator, 0, sizeof(enum aws_mqtt5_suback_reason_code))) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } void aws_mqtt5_packet_suback_storage_clean_up(struct aws_mqtt5_packet_suback_storage *suback_storage) { if (suback_storage == NULL) { return; } aws_mqtt5_user_property_set_clean_up(&suback_storage->user_properties); aws_array_list_clean_up(&suback_storage->reason_codes); aws_byte_buf_clean_up(&suback_storage->storage); } void aws_mqtt5_packet_suback_view_log( const struct aws_mqtt5_packet_suback_view *suback_view, enum aws_log_level level) { struct aws_logger *log_handle = aws_logger_get_conditional(AWS_LS_MQTT5_GENERAL, level); if (log_handle == NULL) { return; } AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_suback_view packet id set to %d", (void *)suback_view, (int)suback_view->packet_id); for (size_t i = 0; i < suback_view->reason_code_count; ++i) { enum aws_mqtt5_suback_reason_code reason_code = suback_view->reason_codes[i]; AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_suback_view topic #%zu, reason code %d (%s)", (void *)suback_view, i, (int)reason_code, aws_mqtt5_suback_reason_code_to_c_string(reason_code)); } s_aws_mqtt5_user_property_set_log( log_handle, suback_view->user_properties, suback_view->user_property_count, (void *)suback_view, level, "aws_mqtt5_packet_suback_view"); } /********************************************************************************************************************* * Unsuback ********************************************************************************************************************/ static size_t s_aws_mqtt5_packet_unsuback_compute_storage_size( const struct aws_mqtt5_packet_unsuback_view *unsuback_view) { size_t storage_size = s_aws_mqtt5_user_property_set_compute_storage_size( unsuback_view->user_properties, unsuback_view->user_property_count); if (unsuback_view->reason_string != NULL) { storage_size += unsuback_view->reason_string->len; } return storage_size; } static int s_aws_mqtt5_packet_unsuback_storage_init_reason_codes( struct aws_mqtt5_packet_unsuback_storage *unsuback_storage, struct aws_allocator *allocator, size_t reason_code_count, const enum aws_mqtt5_unsuback_reason_code *reason_codes) { if (aws_array_list_init_dynamic( &unsuback_storage->reason_codes, allocator, reason_code_count, sizeof(enum aws_mqtt5_unsuback_reason_code))) { return AWS_OP_ERR; } for (size_t i = 0; i < reason_code_count; ++i) { aws_array_list_push_back(&unsuback_storage->reason_codes, &reason_codes[i]); } return AWS_OP_SUCCESS; } AWS_MQTT_API int aws_mqtt5_packet_unsuback_storage_init( struct aws_mqtt5_packet_unsuback_storage *unsuback_storage, struct aws_allocator *allocator, const struct aws_mqtt5_packet_unsuback_view *unsuback_view) { AWS_ZERO_STRUCT(*unsuback_storage); size_t storage_capacity = s_aws_mqtt5_packet_unsuback_compute_storage_size(unsuback_view); if (aws_byte_buf_init(&unsuback_storage->storage, allocator, storage_capacity)) { return AWS_OP_ERR; } struct aws_mqtt5_packet_unsuback_view *storage_view = &unsuback_storage->storage_view; storage_view->packet_id = unsuback_view->packet_id; if (unsuback_view->reason_string != NULL) { unsuback_storage->reason_string = *unsuback_view->reason_string; if (aws_byte_buf_append_and_update(&unsuback_storage->storage, &unsuback_storage->reason_string)) { return AWS_OP_ERR; } storage_view->reason_string = &unsuback_storage->reason_string; } if (s_aws_mqtt5_packet_unsuback_storage_init_reason_codes( unsuback_storage, allocator, unsuback_view->reason_code_count, unsuback_view->reason_codes)) { return AWS_OP_ERR; } storage_view->reason_code_count = aws_array_list_length(&unsuback_storage->reason_codes); storage_view->reason_codes = unsuback_storage->reason_codes.data; if (aws_mqtt5_user_property_set_init_with_storage( &unsuback_storage->user_properties, allocator, &unsuback_storage->storage, unsuback_view->user_property_count, unsuback_view->user_properties)) { return AWS_OP_ERR; } storage_view->user_property_count = aws_mqtt5_user_property_set_size(&unsuback_storage->user_properties); storage_view->user_properties = unsuback_storage->user_properties.properties.data; return AWS_OP_SUCCESS; } int aws_mqtt5_packet_unsuback_storage_init_from_external_storage( struct aws_mqtt5_packet_unsuback_storage *unsuback_storage, struct aws_allocator *allocator) { AWS_ZERO_STRUCT(*unsuback_storage); if (aws_mqtt5_user_property_set_init(&unsuback_storage->user_properties, allocator)) { return AWS_OP_ERR; } if (aws_array_list_init_dynamic( &unsuback_storage->reason_codes, allocator, 0, sizeof(enum aws_mqtt5_unsuback_reason_code))) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } void aws_mqtt5_packet_unsuback_storage_clean_up(struct aws_mqtt5_packet_unsuback_storage *unsuback_storage) { if (unsuback_storage == NULL) { return; } aws_mqtt5_user_property_set_clean_up(&unsuback_storage->user_properties); aws_array_list_clean_up(&unsuback_storage->reason_codes); aws_byte_buf_clean_up(&unsuback_storage->storage); } void aws_mqtt5_packet_unsuback_view_log( const struct aws_mqtt5_packet_unsuback_view *unsuback_view, enum aws_log_level level) { struct aws_logger *log_handle = aws_logger_get_conditional(AWS_LS_MQTT5_GENERAL, level); if (log_handle == NULL) { return; } AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_unsuback_view packet id set to %d", (void *)unsuback_view, (int)unsuback_view->packet_id); for (size_t i = 0; i < unsuback_view->reason_code_count; ++i) { enum aws_mqtt5_unsuback_reason_code reason_code = unsuback_view->reason_codes[i]; AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_unsuback_view topic #%zu, reason code %d (%s)", (void *)unsuback_view, i, (int)reason_code, aws_mqtt5_unsuback_reason_code_to_c_string(reason_code)); } s_aws_mqtt5_user_property_set_log( log_handle, unsuback_view->user_properties, unsuback_view->user_property_count, (void *)unsuback_view, level, "aws_mqtt5_packet_unsuback_view"); } /********************************************************************************************************************* * PINGREQ ********************************************************************************************************************/ static void s_destroy_operation_pingreq(void *object) { if (object == NULL) { return; } struct aws_mqtt5_operation_pingreq *pingreq_op = object; aws_mem_release(pingreq_op->allocator, pingreq_op); } struct aws_mqtt5_operation_pingreq *aws_mqtt5_operation_pingreq_new(struct aws_allocator *allocator) { struct aws_mqtt5_operation_pingreq *pingreq_op = aws_mem_calloc(allocator, 1, sizeof(struct aws_mqtt5_operation_pingreq)); if (pingreq_op == NULL) { return NULL; } pingreq_op->allocator = allocator; pingreq_op->base.vtable = &s_empty_operation_vtable; pingreq_op->base.packet_type = AWS_MQTT5_PT_PINGREQ; aws_ref_count_init(&pingreq_op->base.ref_count, pingreq_op, s_destroy_operation_pingreq); aws_priority_queue_node_init(&pingreq_op->base.priority_queue_node); pingreq_op->base.impl = pingreq_op; return pingreq_op; } bool aws_mqtt5_client_keep_alive_options_are_valid(uint16_t keep_alive_interval_seconds, uint32_t ping_timeout_ms) { /* The client will not behave properly if ping timeout is not significantly shorter than the keep alive interval */ if (keep_alive_interval_seconds > 0) { uint64_t keep_alive_ms = aws_timestamp_convert(keep_alive_interval_seconds, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_MILLIS, NULL); uint64_t one_second_ms = aws_timestamp_convert(1, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_MILLIS, NULL); if (ping_timeout_ms == 0) { ping_timeout_ms = AWS_MQTT5_CLIENT_DEFAULT_PING_TIMEOUT_MS; } if ((uint64_t)ping_timeout_ms + one_second_ms > keep_alive_ms) { AWS_LOGF_ERROR(AWS_LS_MQTT5_GENERAL, "keep alive interval is too small relative to ping timeout interval"); return false; } } return true; } /********************************************************************************************************************* * Client storage options ********************************************************************************************************************/ int aws_mqtt5_client_options_validate(const struct aws_mqtt5_client_options *options) { if (options == NULL) { AWS_LOGF_ERROR(AWS_LS_MQTT5_GENERAL, "null mqtt5 client configuration options"); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } if (options->host_name.len == 0) { AWS_LOGF_ERROR(AWS_LS_MQTT5_GENERAL, "host name not set in mqtt5 client configuration"); return aws_raise_error(AWS_ERROR_MQTT5_CLIENT_OPTIONS_VALIDATION); } if (options->bootstrap == NULL) { AWS_LOGF_ERROR(AWS_LS_MQTT5_GENERAL, "client bootstrap not set in mqtt5 client configuration"); return aws_raise_error(AWS_ERROR_MQTT5_CLIENT_OPTIONS_VALIDATION); } /* forbid no-timeout until someone convinces me otherwise */ if (options->socket_options != NULL) { if (options->socket_options->type == AWS_SOCKET_DGRAM || options->socket_options->connect_timeout_ms == 0) { AWS_LOGF_ERROR(AWS_LS_MQTT5_GENERAL, "invalid socket options in mqtt5 client configuration"); return aws_raise_error(AWS_ERROR_MQTT5_CLIENT_OPTIONS_VALIDATION); } } if (aws_socket_validate_port_for_connect( options->port, options->socket_options ? options->socket_options->domain : AWS_SOCKET_IPV4)) { AWS_LOGF_ERROR(AWS_LS_MQTT5_GENERAL, "invalid port in mqtt5 client configuration"); return aws_raise_error(AWS_ERROR_MQTT5_CLIENT_OPTIONS_VALIDATION); } if (options->http_proxy_options != NULL) { if (options->http_proxy_options->host.len == 0) { AWS_LOGF_ERROR(AWS_LS_MQTT5_GENERAL, "proxy host name not set in mqtt5 client configuration"); return aws_raise_error(AWS_ERROR_MQTT5_CLIENT_OPTIONS_VALIDATION); } if (aws_socket_validate_port_for_connect(options->http_proxy_options->port, AWS_SOCKET_IPV4)) { AWS_LOGF_ERROR(AWS_LS_MQTT5_GENERAL, "invalid proxy port in mqtt5 client configuration"); return aws_raise_error(AWS_ERROR_MQTT5_CLIENT_OPTIONS_VALIDATION); } } /* can't think of why you'd ever want an MQTT client without lifecycle event notifications */ if (options->lifecycle_event_handler == NULL) { AWS_LOGF_ERROR(AWS_LS_MQTT5_GENERAL, "lifecycle event handler not set in mqtt5 client configuration"); return aws_raise_error(AWS_ERROR_MQTT5_CLIENT_OPTIONS_VALIDATION); } if (options->publish_received_handler == NULL) { AWS_LOGF_ERROR(AWS_LS_MQTT5_GENERAL, "publish received not set in mqtt5 client configuration"); return aws_raise_error(AWS_ERROR_MQTT5_CLIENT_OPTIONS_VALIDATION); } if (aws_mqtt5_packet_connect_view_validate(options->connect_options)) { AWS_LOGF_ERROR(AWS_LS_MQTT5_GENERAL, "invalid CONNECT options in mqtt5 client configuration"); /* connect validation failure will have already rasied the appropriate error */ return AWS_OP_ERR; } /* The client will not behave properly if ping timeout is not significantly shorter than the keep alive interval */ if (!aws_mqtt5_client_keep_alive_options_are_valid( options->connect_options->keep_alive_interval_seconds, options->ping_timeout_ms)) { AWS_LOGF_ERROR(AWS_LS_MQTT5_GENERAL, "keep alive interval is too small relative to ping timeout interval"); return aws_raise_error(AWS_ERROR_MQTT5_CLIENT_OPTIONS_VALIDATION); } if (options->topic_aliasing_options != NULL) { if (!aws_mqtt5_outbound_topic_alias_behavior_type_validate( options->topic_aliasing_options->outbound_topic_alias_behavior)) { AWS_LOGF_ERROR(AWS_LS_MQTT5_GENERAL, "invalid outbound topic alias behavior type value"); return aws_raise_error(AWS_ERROR_MQTT5_CLIENT_OPTIONS_VALIDATION); } if (!aws_mqtt5_inbound_topic_alias_behavior_type_validate( options->topic_aliasing_options->inbound_topic_alias_behavior)) { AWS_LOGF_ERROR(AWS_LS_MQTT5_GENERAL, "invalid inbound topic alias behavior type value"); return aws_raise_error(AWS_ERROR_MQTT5_CLIENT_OPTIONS_VALIDATION); } } return AWS_OP_SUCCESS; } static void s_log_tls_connection_options( struct aws_logger *log_handle, const struct aws_mqtt5_client_options_storage *options_storage, const struct aws_tls_connection_options *tls_options, enum aws_log_level level, const char *log_text) { AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_client_options_storage %s tls options set:", (void *)options_storage, log_text); if (tls_options->advertise_alpn_message && tls_options->alpn_list) { AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_client_options_storage %s tls options alpn protocol list set to \"%s\"", (void *)options_storage, log_text, aws_string_c_str(tls_options->alpn_list)); } else { AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_client_options_storage %s tls options alpn not used", (void *)options_storage, log_text); } if (tls_options->server_name) { AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_client_options_storage %s tls options SNI value set to \"%s\"", (void *)options_storage, log_text, aws_string_c_str(tls_options->server_name)); } else { AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_client_options_storage %s tls options SNI not used", (void *)options_storage, log_text); } AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_client_options_storage %s tls options tls context set to (%p)", (void *)options_storage, log_text, (void *)(tls_options->ctx)); AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_client_options_storage %s tls options handshake timeout set to %" PRIu32, (void *)options_storage, log_text, tls_options->timeout_ms); } static void s_log_topic_aliasing_options( struct aws_logger *log_handle, const struct aws_mqtt5_client_options_storage *options_storage, const struct aws_mqtt5_client_topic_alias_options *topic_aliasing_options, enum aws_log_level level) { AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_client_options_storage outbound topic aliasing behavior set to %d (%s)", (void *)options_storage, (int)topic_aliasing_options->outbound_topic_alias_behavior, aws_mqtt5_outbound_topic_alias_behavior_type_to_c_string( topic_aliasing_options->outbound_topic_alias_behavior)); AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_client_options_storage maximum outbound topic alias cache size set to %" PRIu16, (void *)options_storage, topic_aliasing_options->outbound_alias_cache_max_size); AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_client_options_storage inbound topic aliasing behavior set to %d (%s)", (void *)options_storage, (int)topic_aliasing_options->inbound_topic_alias_behavior, aws_mqtt5_inbound_topic_alias_behavior_type_to_c_string(topic_aliasing_options->inbound_topic_alias_behavior)); AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_client_options_storage inbound topic alias cache size set to %" PRIu16, (void *)options_storage, topic_aliasing_options->inbound_alias_cache_size); } void aws_mqtt5_client_options_storage_log( const struct aws_mqtt5_client_options_storage *options_storage, enum aws_log_level level) { struct aws_logger *log_handle = aws_logger_get_conditional(AWS_LS_MQTT5_GENERAL, level); if (log_handle == NULL) { return; } AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_client_options_storage host name set to %s", (void *)options_storage, aws_string_c_str(options_storage->host_name)); AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_client_options_storage port set to %" PRIu32, (void *)options_storage, options_storage->port); AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_client_options_storage client bootstrap set to (%p)", (void *)options_storage, (void *)options_storage->bootstrap); AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_client_options_storage socket options set to: type = %d, domain = %d, connect_timeout_ms = " "%" PRIu32, (void *)options_storage, (int)options_storage->socket_options.type, (int)options_storage->socket_options.domain, options_storage->socket_options.connect_timeout_ms); if (options_storage->socket_options.keepalive) { AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_client_options_storage socket keepalive options set to: keep_alive_interval_sec = " "%" PRIu16 ", " "keep_alive_timeout_sec = %" PRIu16 ", keep_alive_max_failed_probes = %" PRIu16, (void *)options_storage, options_storage->socket_options.keep_alive_interval_sec, options_storage->socket_options.keep_alive_timeout_sec, options_storage->socket_options.keep_alive_max_failed_probes); } if (options_storage->tls_options_ptr != NULL) { s_log_tls_connection_options(log_handle, options_storage, options_storage->tls_options_ptr, level, ""); } if (options_storage->http_proxy_config != NULL) { AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_client_options_storage using http proxy:", (void *)options_storage); AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_client_options_storage http proxy host name set to " PRInSTR, (void *)options_storage, AWS_BYTE_CURSOR_PRI(options_storage->http_proxy_options.host)); AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_client_options_storage http proxy port set to %" PRIu32, (void *)options_storage, options_storage->http_proxy_options.port); if (options_storage->http_proxy_options.tls_options != NULL) { s_log_tls_connection_options( log_handle, options_storage, options_storage->tls_options_ptr, level, "http proxy"); } /* ToDo: add (and use) an API to proxy strategy that returns a debug string (Basic, Adaptive, etc...) */ if (options_storage->http_proxy_options.proxy_strategy != NULL) { AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_client_options_storage http proxy strategy set to (%p)", (void *)options_storage, (void *)options_storage->http_proxy_options.proxy_strategy); } } if (options_storage->websocket_handshake_transform != NULL) { AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_client_options_storage enabling websockets", (void *)options_storage); AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_client_options_storage websocket handshake transform user data set to (%p)", (void *)options_storage, options_storage->websocket_handshake_transform_user_data); } else { AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: mqtt5_client_options_storage disabling websockets", (void *)options_storage); } AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_client_options_storage session behavior set to %d (%s)", (void *)options_storage, (int)options_storage->session_behavior, aws_mqtt5_client_session_behavior_type_to_c_string(options_storage->session_behavior)); s_log_topic_aliasing_options(log_handle, options_storage, &options_storage->topic_aliasing_options, level); AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_client_options_storage extended validation and flow control options set to %d (%s)", (void *)options_storage, (int)options_storage->extended_validation_and_flow_control_options, aws_mqtt5_extended_validation_and_flow_control_options_to_c_string( options_storage->extended_validation_and_flow_control_options)); AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_client_options_storage operation queue behavior set to %d (%s)", (void *)options_storage, (int)options_storage->offline_queue_behavior, aws_mqtt5_client_operation_queue_behavior_type_to_c_string(options_storage->offline_queue_behavior)); AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_client_options_storage reconnect jitter mode set to %d", (void *)options_storage, (int)options_storage->retry_jitter_mode); AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: mqtt5_client_options_storage reconnect delay min set to %" PRIu64 " ms, max set to %" PRIu64 " ms", (void *)options_storage, options_storage->min_reconnect_delay_ms, options_storage->max_reconnect_delay_ms); AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_client_options_storage minimum necessary connection time in order to reset the reconnect " "delay " "set " "to %" PRIu64 " ms", (void *)options_storage, options_storage->min_connected_time_to_reset_reconnect_delay_ms); AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_client_options_storage ping timeout interval set to %" PRIu32 " ms", (void *)options_storage, options_storage->ping_timeout_ms); AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_client_options_storage connack timeout interval set to %" PRIu32 " ms", (void *)options_storage, options_storage->connack_timeout_ms); AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_client_options_storage connect options:", (void *)options_storage); aws_mqtt5_packet_connect_view_log(&options_storage->connect->storage_view, level); AWS_LOGUF( log_handle, level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_client_options_storage lifecycle event handler user data set to (%p)", (void *)options_storage, options_storage->lifecycle_event_handler_user_data); } void aws_mqtt5_client_options_storage_destroy(struct aws_mqtt5_client_options_storage *options_storage) { if (options_storage == NULL) { return; } aws_string_destroy(options_storage->host_name); aws_client_bootstrap_release(options_storage->bootstrap); aws_tls_connection_options_clean_up(&options_storage->tls_options); aws_http_proxy_config_destroy(options_storage->http_proxy_config); aws_mqtt5_packet_connect_storage_clean_up(options_storage->connect); aws_mem_release(options_storage->connect->allocator, options_storage->connect); aws_mem_release(options_storage->allocator, options_storage); } static void s_apply_zero_valued_defaults_to_client_options_storage( struct aws_mqtt5_client_options_storage *options_storage) { if (options_storage->min_reconnect_delay_ms == 0) { options_storage->min_reconnect_delay_ms = AWS_MQTT5_CLIENT_DEFAULT_MIN_RECONNECT_DELAY_MS; } if (options_storage->max_reconnect_delay_ms == 0) { options_storage->max_reconnect_delay_ms = AWS_MQTT5_CLIENT_DEFAULT_MAX_RECONNECT_DELAY_MS; } if (options_storage->min_connected_time_to_reset_reconnect_delay_ms == 0) { options_storage->min_connected_time_to_reset_reconnect_delay_ms = AWS_MQTT5_CLIENT_DEFAULT_MIN_CONNECTED_TIME_TO_RESET_RECONNECT_DELAY_MS; } if (options_storage->ping_timeout_ms == 0) { options_storage->ping_timeout_ms = AWS_MQTT5_CLIENT_DEFAULT_PING_TIMEOUT_MS; } if (options_storage->connack_timeout_ms == 0) { options_storage->connack_timeout_ms = AWS_MQTT5_CLIENT_DEFAULT_CONNACK_TIMEOUT_MS; } if (options_storage->ack_timeout_seconds == 0) { options_storage->ack_timeout_seconds = AWS_MQTT5_CLIENT_DEFAULT_OPERATION_TIMEOUNT_SECONDS; } if (options_storage->topic_aliasing_options.inbound_alias_cache_size == 0) { options_storage->topic_aliasing_options.inbound_alias_cache_size = AWS_MQTT5_CLIENT_DEFAULT_INBOUND_TOPIC_ALIAS_CACHE_SIZE; } if (options_storage->topic_aliasing_options.outbound_alias_cache_max_size == 0) { options_storage->topic_aliasing_options.outbound_alias_cache_max_size = AWS_MQTT5_CLIENT_DEFAULT_OUTBOUND_TOPIC_ALIAS_CACHE_SIZE; } } struct aws_mqtt5_client_options_storage *aws_mqtt5_client_options_storage_new( struct aws_allocator *allocator, const struct aws_mqtt5_client_options *options) { AWS_PRECONDITION(allocator != NULL); AWS_PRECONDITION(options != NULL); if (aws_mqtt5_client_options_validate(options)) { return NULL; } struct aws_mqtt5_client_options_storage *options_storage = aws_mem_calloc(allocator, 1, sizeof(struct aws_mqtt5_client_options_storage)); if (options_storage == NULL) { return NULL; } options_storage->allocator = allocator; options_storage->host_name = aws_string_new_from_cursor(allocator, &options->host_name); if (options_storage->host_name == NULL) { goto error; } options_storage->port = options->port; options_storage->bootstrap = aws_client_bootstrap_acquire(options->bootstrap); if (options->socket_options != NULL) { options_storage->socket_options = *options->socket_options; } else { options_storage->socket_options.type = AWS_SOCKET_STREAM; options_storage->socket_options.connect_timeout_ms = AWS_MQTT5_DEFAULT_SOCKET_CONNECT_TIMEOUT_MS; } if (options->tls_options != NULL) { if (aws_tls_connection_options_copy(&options_storage->tls_options, options->tls_options)) { goto error; } options_storage->tls_options_ptr = &options_storage->tls_options; if (!options_storage->tls_options.server_name) { struct aws_byte_cursor host_name_cur = aws_byte_cursor_from_string(options_storage->host_name); if (aws_tls_connection_options_set_server_name(&options_storage->tls_options, allocator, &host_name_cur)) { AWS_LOGF_ERROR(AWS_LS_MQTT5_GENERAL, "Failed to set TLS Connection Options server name"); goto error; } } } if (options->http_proxy_options != NULL) { options_storage->http_proxy_config = aws_http_proxy_config_new_from_proxy_options(allocator, options->http_proxy_options); if (options_storage->http_proxy_config == NULL) { goto error; } aws_http_proxy_options_init_from_config( &options_storage->http_proxy_options, options_storage->http_proxy_config); } options_storage->websocket_handshake_transform = options->websocket_handshake_transform; options_storage->websocket_handshake_transform_user_data = options->websocket_handshake_transform_user_data; options_storage->publish_received_handler = options->publish_received_handler; options_storage->publish_received_handler_user_data = options->publish_received_handler_user_data; options_storage->session_behavior = options->session_behavior; options_storage->extended_validation_and_flow_control_options = options->extended_validation_and_flow_control_options; options_storage->offline_queue_behavior = options->offline_queue_behavior; options_storage->retry_jitter_mode = options->retry_jitter_mode; options_storage->min_reconnect_delay_ms = options->min_reconnect_delay_ms; options_storage->max_reconnect_delay_ms = options->max_reconnect_delay_ms; options_storage->min_connected_time_to_reset_reconnect_delay_ms = options->min_connected_time_to_reset_reconnect_delay_ms; options_storage->ping_timeout_ms = options->ping_timeout_ms; options_storage->connack_timeout_ms = options->connack_timeout_ms; options_storage->ack_timeout_seconds = options->ack_timeout_seconds; if (options->topic_aliasing_options != NULL) { options_storage->topic_aliasing_options = *options->topic_aliasing_options; } options_storage->connect = aws_mem_calloc(allocator, 1, sizeof(struct aws_mqtt5_packet_connect_storage)); if (aws_mqtt5_packet_connect_storage_init(options_storage->connect, allocator, options->connect_options)) { goto error; } options_storage->lifecycle_event_handler = options->lifecycle_event_handler; options_storage->lifecycle_event_handler_user_data = options->lifecycle_event_handler_user_data; options_storage->client_termination_handler = options->client_termination_handler; options_storage->client_termination_handler_user_data = options->client_termination_handler_user_data; s_apply_zero_valued_defaults_to_client_options_storage(options_storage); /* must do this after zero-valued defaults are applied so that max reconnect is accurate */ if (options->host_resolution_override) { options_storage->host_resolution_override = *options->host_resolution_override; } else { options_storage->host_resolution_override = aws_host_resolver_init_default_resolution_config(); options_storage->host_resolution_override.resolve_frequency_ns = aws_timestamp_convert( options_storage->max_reconnect_delay_ms, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL); } return options_storage; error: aws_mqtt5_client_options_storage_destroy(options_storage); return NULL; } struct aws_mqtt5_operation_disconnect *aws_mqtt5_operation_disconnect_acquire( struct aws_mqtt5_operation_disconnect *disconnect_op) { if (disconnect_op != NULL) { aws_mqtt5_operation_acquire(&disconnect_op->base); } return disconnect_op; } struct aws_mqtt5_operation_disconnect *aws_mqtt5_operation_disconnect_release( struct aws_mqtt5_operation_disconnect *disconnect_op) { if (disconnect_op != NULL) { aws_mqtt5_operation_release(&disconnect_op->base); } return NULL; } aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/source/v5/mqtt5_to_mqtt3_adapter.c000066400000000000000000003434331456575232400266310ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include /* * A best-effort-but-not-100%-accurate translation from mqtt5 error codes to mqtt311 error codes. */ static int s_translate_mqtt5_error_code_to_mqtt311(int error_code) { switch (error_code) { case AWS_ERROR_MQTT5_ENCODE_FAILURE: case AWS_ERROR_MQTT5_DECODE_PROTOCOL_ERROR: return AWS_ERROR_MQTT_PROTOCOL_ERROR; case AWS_ERROR_MQTT5_CONNACK_CONNECTION_REFUSED: return AWS_ERROR_MQTT_PROTOCOL_ERROR; /* a decidedly strange choice by the 311 implementation */ case AWS_ERROR_MQTT5_CONNACK_TIMEOUT: case AWS_ERROR_MQTT5_PING_RESPONSE_TIMEOUT: return AWS_ERROR_MQTT_TIMEOUT; case AWS_ERROR_MQTT5_USER_REQUESTED_STOP: case AWS_ERROR_MQTT5_CLIENT_TERMINATED: return AWS_IO_SOCKET_CLOSED; case AWS_ERROR_MQTT5_DISCONNECT_RECEIVED: return AWS_ERROR_MQTT_UNEXPECTED_HANGUP; case AWS_ERROR_MQTT5_OPERATION_FAILED_DUE_TO_OFFLINE_QUEUE_POLICY: return AWS_ERROR_MQTT_CANCELLED_FOR_CLEAN_SESSION; case AWS_ERROR_MQTT5_ENCODE_SIZE_UNSUPPORTED_PACKET_TYPE: return AWS_ERROR_MQTT_INVALID_PACKET_TYPE; case AWS_ERROR_MQTT5_OPERATION_PROCESSING_FAILURE: return AWS_ERROR_MQTT_PROTOCOL_ERROR; case AWS_ERROR_MQTT5_INVALID_UTF8_STRING: return AWS_ERROR_MQTT_INVALID_TOPIC; default: return error_code; } } struct aws_mqtt_adapter_final_destroy_task { struct aws_task task; struct aws_allocator *allocator; struct aws_mqtt_client_connection *connection; }; static void s_mqtt_adapter_final_destroy_task_fn(struct aws_task *task, void *arg, enum aws_task_status status) { (void)task; (void)status; struct aws_mqtt_adapter_final_destroy_task *destroy_task = arg; struct aws_mqtt_client_connection_5_impl *adapter = destroy_task->connection->impl; AWS_LOGF_DEBUG(AWS_LS_MQTT5_TO_MQTT3_ADAPTER, "id=%p: Final destruction of mqtt3-to-5 adapter", (void *)adapter); aws_mqtt_client_on_connection_termination_fn *termination_handler = NULL; void *termination_handler_user_data = NULL; if (adapter->on_termination != NULL) { termination_handler = adapter->on_termination; termination_handler_user_data = adapter->on_termination_user_data; } if (adapter->client->config->websocket_handshake_transform_user_data == adapter) { /* * If the mqtt5 client is pointing to us for websocket transform, then erase that. The callback * is invoked from our pinned event loop so this is safe. * * TODO: It is possible that multiple adapters may have sequentially side-affected the websocket handshake. * For now, in that case, subsequent connection attempts will probably not succeed. */ adapter->client->config->websocket_handshake_transform = NULL; adapter->client->config->websocket_handshake_transform_user_data = NULL; } aws_mqtt_subscription_set_destroy(adapter->subscriptions); aws_mqtt5_to_mqtt3_adapter_operation_table_clean_up(&adapter->operational_state); adapter->client = aws_mqtt5_client_release(adapter->client); aws_mem_release(adapter->allocator, adapter); aws_mem_release(destroy_task->allocator, destroy_task); /* trigger the termination callback */ if (termination_handler) { termination_handler(termination_handler_user_data); } } static struct aws_mqtt_adapter_final_destroy_task *s_aws_mqtt_adapter_final_destroy_task_new( struct aws_allocator *allocator, struct aws_mqtt_client_connection_5_impl *adapter) { struct aws_mqtt_adapter_final_destroy_task *destroy_task = aws_mem_calloc(allocator, 1, sizeof(struct aws_mqtt_adapter_final_destroy_task)); aws_task_init( &destroy_task->task, s_mqtt_adapter_final_destroy_task_fn, (void *)destroy_task, "MqttAdapterFinalDestroy"); destroy_task->allocator = adapter->allocator; destroy_task->connection = &adapter->base; /* Do not acquire, we're at zero external and internal ref counts */ return destroy_task; } static void s_aws_mqtt_adapter_final_destroy(struct aws_mqtt_client_connection_5_impl *adapter) { struct aws_mqtt_adapter_final_destroy_task *task = s_aws_mqtt_adapter_final_destroy_task_new(adapter->allocator, adapter); if (task == NULL) { int error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_MQTT5_TO_MQTT3_ADAPTER, "id=%p: failed to create adapter final destroy task, last_error: %d(%s)", (void *)adapter, error_code, aws_error_debug_str(error_code)); return; } aws_event_loop_schedule_task_now(adapter->loop, &task->task); } struct aws_mqtt_adapter_disconnect_task { struct aws_task task; struct aws_allocator *allocator; struct aws_mqtt_client_connection_5_impl *adapter; aws_mqtt_client_on_disconnect_fn *on_disconnect; void *on_disconnect_user_data; }; static void s_adapter_disconnect_task_fn(struct aws_task *task, void *arg, enum aws_task_status status); static struct aws_mqtt_adapter_disconnect_task *s_aws_mqtt_adapter_disconnect_task_new( struct aws_allocator *allocator, struct aws_mqtt_client_connection_5_impl *adapter, aws_mqtt_client_on_disconnect_fn *on_disconnect, void *on_disconnect_user_data) { struct aws_mqtt_adapter_disconnect_task *disconnect_task = aws_mem_calloc(allocator, 1, sizeof(struct aws_mqtt_adapter_disconnect_task)); aws_task_init( &disconnect_task->task, s_adapter_disconnect_task_fn, (void *)disconnect_task, "AdapterDisconnectTask"); disconnect_task->allocator = adapter->allocator; disconnect_task->adapter = (struct aws_mqtt_client_connection_5_impl *)aws_ref_count_acquire(&adapter->internal_refs); disconnect_task->on_disconnect = on_disconnect; disconnect_task->on_disconnect_user_data = on_disconnect_user_data; return disconnect_task; } static int s_aws_mqtt_client_connection_5_disconnect( void *impl, aws_mqtt_client_on_disconnect_fn *on_disconnect, void *on_disconnect_user_data) { struct aws_mqtt_client_connection_5_impl *adapter = impl; struct aws_mqtt_adapter_disconnect_task *task = s_aws_mqtt_adapter_disconnect_task_new(adapter->allocator, adapter, on_disconnect, on_disconnect_user_data); if (task == NULL) { int error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_MQTT5_TO_MQTT3_ADAPTER, "id=%p: failed to create adapter disconnect task, error code %d(%s)", (void *)adapter, error_code, aws_error_debug_str(error_code)); return AWS_OP_ERR; } aws_event_loop_schedule_task_now(adapter->loop, &task->task); return AWS_OP_SUCCESS; } struct aws_mqtt_adapter_connect_task { struct aws_task task; struct aws_allocator *allocator; struct aws_mqtt_client_connection_5_impl *adapter; struct aws_byte_buf host_name; uint32_t port; struct aws_socket_options socket_options; struct aws_tls_connection_options *tls_options_ptr; struct aws_tls_connection_options tls_options; struct aws_byte_buf client_id; uint16_t keep_alive_time_secs; uint32_t ping_timeout_ms; uint32_t protocol_operation_timeout_ms; aws_mqtt_client_on_connection_complete_fn *on_connection_complete; void *on_connection_complete_user_data; bool clean_session; }; static void s_aws_mqtt_adapter_connect_task_destroy(struct aws_mqtt_adapter_connect_task *task) { if (task == NULL) { return; } aws_byte_buf_clean_up(&task->host_name); aws_byte_buf_clean_up(&task->client_id); if (task->tls_options_ptr) { aws_tls_connection_options_clean_up(task->tls_options_ptr); } aws_mem_release(task->allocator, task); } static void s_adapter_connect_task_fn(struct aws_task *task, void *arg, enum aws_task_status status); static struct aws_mqtt_adapter_connect_task *s_aws_mqtt_adapter_connect_task_new( struct aws_allocator *allocator, struct aws_mqtt_client_connection_5_impl *adapter, const struct aws_mqtt_connection_options *connection_options) { struct aws_mqtt_adapter_connect_task *connect_task = aws_mem_calloc(allocator, 1, sizeof(struct aws_mqtt_adapter_connect_task)); aws_task_init(&connect_task->task, s_adapter_connect_task_fn, (void *)connect_task, "AdapterConnectTask"); connect_task->allocator = adapter->allocator; aws_byte_buf_init_copy_from_cursor(&connect_task->host_name, allocator, connection_options->host_name); connect_task->port = connection_options->port; connect_task->socket_options = *connection_options->socket_options; if (connection_options->tls_options) { if (aws_tls_connection_options_copy(&connect_task->tls_options, connection_options->tls_options)) { goto error; } connect_task->tls_options_ptr = &connect_task->tls_options; /* Cheat and set the tls_options host_name to our copy if they're the same */ if (!connect_task->tls_options.server_name) { struct aws_byte_cursor host_name_cur = aws_byte_cursor_from_buf(&connect_task->host_name); if (aws_tls_connection_options_set_server_name( &connect_task->tls_options, connect_task->allocator, &host_name_cur)) { AWS_LOGF_ERROR( AWS_LS_MQTT5_TO_MQTT3_ADAPTER, "id=%p: mqtt3-to-5-adapter - Failed to set TLS Connection Options server name", (void *)adapter); goto error; } } } connect_task->adapter = (struct aws_mqtt_client_connection_5_impl *)aws_ref_count_acquire(&adapter->internal_refs); aws_byte_buf_init_copy_from_cursor(&connect_task->client_id, allocator, connection_options->client_id); connect_task->keep_alive_time_secs = connection_options->keep_alive_time_secs; connect_task->ping_timeout_ms = connection_options->ping_timeout_ms; connect_task->protocol_operation_timeout_ms = connection_options->protocol_operation_timeout_ms; connect_task->on_connection_complete = connection_options->on_connection_complete; connect_task->on_connection_complete_user_data = connection_options->user_data; connect_task->clean_session = connection_options->clean_session; return connect_task; error: s_aws_mqtt_adapter_connect_task_destroy(connect_task); return NULL; } static int s_validate_adapter_connection_options( const struct aws_mqtt_connection_options *connection_options, struct aws_mqtt_client_connection_5_impl *adapter) { if (connection_options == NULL) { return aws_raise_error(AWS_ERROR_MQTT5_CLIENT_OPTIONS_VALIDATION); } if (connection_options->host_name.len == 0) { AWS_LOGF_ERROR( AWS_LS_MQTT5_TO_MQTT3_ADAPTER, "id=%p: mqtt3-to-5-adapter - host name not set in MQTT client configuration", (void *)adapter); return aws_raise_error(AWS_ERROR_MQTT5_CLIENT_OPTIONS_VALIDATION); } /* forbid no-timeout until someone convinces me otherwise */ if (connection_options->socket_options != NULL) { if (connection_options->socket_options->type == AWS_SOCKET_DGRAM || connection_options->socket_options->connect_timeout_ms == 0) { AWS_LOGF_ERROR( AWS_LS_MQTT5_TO_MQTT3_ADAPTER, "id=%p: mqtt3-to-5-adapter - invalid socket options in MQTT client configuration", (void *)adapter); return aws_raise_error(AWS_ERROR_MQTT5_CLIENT_OPTIONS_VALIDATION); } } /* The client will not behave properly if ping timeout is not significantly shorter than the keep alive interval */ if (!aws_mqtt5_client_keep_alive_options_are_valid( connection_options->keep_alive_time_secs, connection_options->ping_timeout_ms)) { AWS_LOGF_ERROR( AWS_LS_MQTT5_TO_MQTT3_ADAPTER, "id=%p: mqtt3-to-5-adapter - keep alive interval is too small relative to ping timeout interval", (void *)adapter); return aws_raise_error(AWS_ERROR_MQTT5_CLIENT_OPTIONS_VALIDATION); } return AWS_OP_SUCCESS; } static int s_aws_mqtt_client_connection_5_connect( void *impl, const struct aws_mqtt_connection_options *connection_options) { struct aws_mqtt_client_connection_5_impl *adapter = impl; /* The client will not behave properly if ping timeout is not significantly shorter than the keep alive interval */ if (s_validate_adapter_connection_options(connection_options, adapter)) { return AWS_OP_ERR; } struct aws_mqtt_adapter_connect_task *task = s_aws_mqtt_adapter_connect_task_new(adapter->allocator, adapter, connection_options); if (task == NULL) { int error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_MQTT5_TO_MQTT3_ADAPTER, "id=%p: mqtt3-to-5-adapter - failed to create adapter connect task, error code %d(%s)", (void *)adapter, error_code, aws_error_debug_str(error_code)); return AWS_OP_ERR; } aws_event_loop_schedule_task_now(adapter->loop, &task->task); return AWS_OP_SUCCESS; } static void s_aws_mqtt5_to_mqtt3_adapter_lifecycle_handler(const struct aws_mqtt5_client_lifecycle_event *event) { struct aws_mqtt_client_connection_5_impl *adapter = event->user_data; switch (event->event_type) { case AWS_MQTT5_CLET_CONNECTION_SUCCESS: AWS_LOGF_DEBUG( AWS_LS_MQTT5_TO_MQTT3_ADAPTER, "id=%p: mqtt3-to-5-adapter - received on connection success event from mqtt5 client, adapter in state " "(%d)", (void *)adapter, (int)adapter->adapter_state); if (adapter->adapter_state != AWS_MQTT_AS_STAY_DISCONNECTED) { if (adapter->on_connection_success != NULL) { (*adapter->on_connection_success)( &adapter->base, 0, event->settings->rejoined_session, adapter->on_connection_success_user_data); } if (adapter->adapter_state == AWS_MQTT_AS_FIRST_CONNECT) { /* * If the 311 view is that this is an initial connection attempt, then invoke the completion * callback and move to the stay-connected state. */ if (adapter->on_connection_complete != NULL) { (*adapter->on_connection_complete)( &adapter->base, event->error_code, 0, event->settings->rejoined_session, adapter->on_connection_complete_user_data); adapter->on_connection_complete = NULL; adapter->on_connection_complete_user_data = NULL; } adapter->adapter_state = AWS_MQTT_AS_STAY_CONNECTED; } else if (adapter->adapter_state == AWS_MQTT_AS_STAY_CONNECTED) { /* * If the 311 view is that we're in the stay-connected state (ie we've successfully done or * simulated an initial connection), then invoke the connection resumption callback. */ if (adapter->on_resumed != NULL) { (*adapter->on_resumed)( &adapter->base, 0, event->settings->rejoined_session, adapter->on_resumed_user_data); } } } break; case AWS_MQTT5_CLET_CONNECTION_FAILURE: AWS_LOGF_DEBUG( AWS_LS_MQTT5_TO_MQTT3_ADAPTER, "id=%p: mqtt3-to-5-adapter - received on connection failure event from mqtt5 client, adapter in state " "(%d)", (void *)adapter, (int)adapter->adapter_state); /* * The MQTT311 interface only cares about connection failures when it's the initial connection attempt * after a call to connect(). Since an adapter connect() can sever an existing connection (with an * error code of AWS_ERROR_MQTT_CONNECTION_RESET_FOR_ADAPTER_CONNECT) we only react to connection failures * if * (1) the error code is not AWS_ERROR_MQTT_CONNECTION_RESET_FOR_ADAPTER_CONNECT and * (2) we're in the FIRST_CONNECT state * * Only if both of these are true should we invoke the connection completion callback with a failure and * put the adapter into the "disconnected" state, simulating the way the 311 client stops after an * initial connection failure. */ if (event->error_code != AWS_ERROR_MQTT_CONNECTION_RESET_FOR_ADAPTER_CONNECT) { if (adapter->adapter_state != AWS_MQTT_AS_STAY_DISCONNECTED) { int mqtt311_error_code = s_translate_mqtt5_error_code_to_mqtt311(event->error_code); if (adapter->on_connection_failure != NULL) { (*adapter->on_connection_failure)( &adapter->base, mqtt311_error_code, adapter->on_connection_failure_user_data); } if (adapter->adapter_state == AWS_MQTT_AS_FIRST_CONNECT) { if (adapter->on_connection_complete != NULL) { (*adapter->on_connection_complete)( &adapter->base, mqtt311_error_code, 0, false, adapter->on_connection_complete_user_data); adapter->on_connection_complete = NULL; adapter->on_connection_complete_user_data = NULL; } adapter->adapter_state = AWS_MQTT_AS_STAY_DISCONNECTED; } } } break; case AWS_MQTT5_CLET_DISCONNECTION: AWS_LOGF_DEBUG( AWS_LS_MQTT5_TO_MQTT3_ADAPTER, "id=%p: mqtt3-to-5-adapter - received on disconnection event from mqtt5 client, adapter in state (%d), " "error code (%d)", (void *)adapter, (int)adapter->adapter_state, event->error_code); /* * If the 311 view is that we're in the stay-connected state (ie we've successfully done or simulated * an initial connection), then invoke the connection interrupted callback. */ if (adapter->on_interrupted != NULL && adapter->adapter_state == AWS_MQTT_AS_STAY_CONNECTED && event->error_code != AWS_ERROR_MQTT_CONNECTION_RESET_FOR_ADAPTER_CONNECT) { (*adapter->on_interrupted)( &adapter->base, s_translate_mqtt5_error_code_to_mqtt311(event->error_code), adapter->on_interrupted_user_data); } break; case AWS_MQTT5_CLET_STOPPED: AWS_LOGF_DEBUG( AWS_LS_MQTT5_TO_MQTT3_ADAPTER, "id=%p: mqtt3-to-5-adapter - received on stopped event from mqtt5 client, adapter in state (%d)", (void *)adapter, (int)adapter->adapter_state); /* If an MQTT311-view user is waiting on a disconnect callback, invoke it */ if (adapter->on_disconnect) { (*adapter->on_disconnect)(&adapter->base, adapter->on_disconnect_user_data); adapter->on_disconnect = NULL; adapter->on_disconnect_user_data = NULL; } if (adapter->on_closed) { (*adapter->on_closed)(&adapter->base, NULL, adapter->on_closed_user_data); } /* * Judgement call: If the mqtt5 client is stopped behind our back, it seems better to transition to the * disconnected state (which only requires a connect() to restart) then stay in the STAY_CONNECTED state * which currently requires a disconnect() and then a connect() to restore connectivity. * * ToDo: what if we disabled mqtt5 client start/stop somehow while the adapter is attached, preventing * the potential to backstab each other? Unfortunately neither start() nor stop() have an error reporting * mechanism. */ adapter->adapter_state = AWS_MQTT_AS_STAY_DISCONNECTED; break; default: break; } } static void s_aws_mqtt5_to_mqtt3_adapter_disconnect_handler( struct aws_mqtt_client_connection_5_impl *adapter, struct aws_mqtt_adapter_disconnect_task *disconnect_task) { AWS_LOGF_DEBUG( AWS_LS_MQTT5_TO_MQTT3_ADAPTER, "id=%p: mqtt3-to-5-adapter - performing disconnect safe callback, adapter in state (%d)", (void *)adapter, (int)adapter->adapter_state); /* * If we're already disconnected (from the 311 perspective only), then invoke the callback and return */ if (adapter->adapter_state == AWS_MQTT_AS_STAY_DISCONNECTED) { if (disconnect_task->on_disconnect) { (*disconnect_task->on_disconnect)(&adapter->base, disconnect_task->on_disconnect_user_data); } return; } /* * If we had a pending first connect, then notify failure */ if (adapter->adapter_state == AWS_MQTT_AS_FIRST_CONNECT) { if (adapter->on_connection_complete != NULL) { (*adapter->on_connection_complete)( &adapter->base, AWS_ERROR_MQTT_CONNECTION_SHUTDOWN, 0, false, adapter->on_connection_complete_user_data); adapter->on_connection_complete = NULL; adapter->on_connection_complete_user_data = NULL; } } adapter->adapter_state = AWS_MQTT_AS_STAY_DISCONNECTED; bool invoke_callbacks = true; if (adapter->client->desired_state != AWS_MCS_STOPPED) { AWS_LOGF_DEBUG( AWS_LS_MQTT5_TO_MQTT3_ADAPTER, "id=%p: mqtt3-to-5-adapter - disconnect forwarding stop request to mqtt5 client", (void *)adapter); aws_mqtt5_client_change_desired_state(adapter->client, AWS_MCS_STOPPED, NULL); adapter->on_disconnect = disconnect_task->on_disconnect; adapter->on_disconnect_user_data = disconnect_task->on_disconnect_user_data; invoke_callbacks = false; } if (invoke_callbacks) { if (disconnect_task->on_disconnect != NULL) { (*disconnect_task->on_disconnect)(&adapter->base, disconnect_task->on_disconnect_user_data); } if (adapter->on_closed) { (*adapter->on_closed)(&adapter->base, NULL, adapter->on_closed_user_data); } } } static void s_adapter_disconnect_task_fn(struct aws_task *task, void *arg, enum aws_task_status status) { (void)task; struct aws_mqtt_adapter_disconnect_task *disconnect_task = arg; struct aws_mqtt_client_connection_5_impl *adapter = disconnect_task->adapter; if (status != AWS_TASK_STATUS_RUN_READY) { goto done; } s_aws_mqtt5_to_mqtt3_adapter_disconnect_handler(adapter, disconnect_task); done: aws_ref_count_release(&adapter->internal_refs); aws_mem_release(disconnect_task->allocator, disconnect_task); } static void s_aws_mqtt5_to_mqtt3_adapter_update_config_on_connect( struct aws_mqtt_client_connection_5_impl *adapter, struct aws_mqtt_adapter_connect_task *connect_task) { struct aws_mqtt5_client_options_storage *config = adapter->client->config; aws_string_destroy(config->host_name); config->host_name = aws_string_new_from_buf(adapter->allocator, &connect_task->host_name); config->port = connect_task->port; config->socket_options = connect_task->socket_options; if (config->tls_options_ptr) { aws_tls_connection_options_clean_up(&config->tls_options); config->tls_options_ptr = NULL; } if (connect_task->tls_options_ptr) { aws_tls_connection_options_copy(&config->tls_options, connect_task->tls_options_ptr); config->tls_options_ptr = &config->tls_options; } aws_byte_buf_clean_up(&adapter->client->negotiated_settings.client_id_storage); aws_byte_buf_init_copy_from_cursor( &adapter->client->negotiated_settings.client_id_storage, adapter->allocator, aws_byte_cursor_from_buf(&connect_task->client_id)); config->connect->storage_view.keep_alive_interval_seconds = connect_task->keep_alive_time_secs; config->ping_timeout_ms = connect_task->ping_timeout_ms; /* Override timeout, rounding up as necessary */ config->ack_timeout_seconds = (uint32_t)aws_timestamp_convert( connect_task->protocol_operation_timeout_ms + AWS_TIMESTAMP_MILLIS - 1, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_SECS, NULL); if (connect_task->clean_session) { config->session_behavior = AWS_MQTT5_CSBT_CLEAN; config->connect->storage_view.session_expiry_interval_seconds = NULL; } else { config->session_behavior = AWS_MQTT5_CSBT_REJOIN_ALWAYS; /* This is a judgement call to translate session expiry to the maximum possible allowed by AWS IoT Core */ config->connect->session_expiry_interval_seconds = 7 * 24 * 60 * 60; config->connect->storage_view.session_expiry_interval_seconds = &config->connect->session_expiry_interval_seconds; } } static void s_aws_mqtt5_to_mqtt3_adapter_connect_handler( struct aws_mqtt_client_connection_5_impl *adapter, struct aws_mqtt_adapter_connect_task *connect_task) { AWS_LOGF_DEBUG( AWS_LS_MQTT5_TO_MQTT3_ADAPTER, "id=%p: mqtt3-to-5-adapter - performing connect safe callback, adapter in state (%d)", (void *)adapter, (int)adapter->adapter_state); if (adapter->adapter_state != AWS_MQTT_AS_STAY_DISCONNECTED) { if (connect_task->on_connection_complete) { (*connect_task->on_connection_complete)( &adapter->base, AWS_ERROR_MQTT_ALREADY_CONNECTED, 0, false, connect_task->on_connection_complete_user_data); } return; } if (adapter->on_disconnect) { (*adapter->on_disconnect)(&adapter->base, adapter->on_disconnect_user_data); adapter->on_disconnect = NULL; adapter->on_disconnect_user_data = NULL; } adapter->adapter_state = AWS_MQTT_AS_FIRST_CONNECT; AWS_LOGF_DEBUG( AWS_LS_MQTT5_TO_MQTT3_ADAPTER, "id=%p: mqtt3-to-5-adapter - resetting mqtt5 client connection and requesting start", (void *)adapter); /* Update mqtt5 config */ s_aws_mqtt5_to_mqtt3_adapter_update_config_on_connect(adapter, connect_task); aws_mqtt5_client_reset_connection(adapter->client); aws_mqtt5_client_change_desired_state(adapter->client, AWS_MCS_CONNECTED, NULL); adapter->on_connection_complete = connect_task->on_connection_complete; adapter->on_connection_complete_user_data = connect_task->on_connection_complete_user_data; } static void s_adapter_connect_task_fn(struct aws_task *task, void *arg, enum aws_task_status status) { (void)task; struct aws_mqtt_adapter_connect_task *connect_task = arg; struct aws_mqtt_client_connection_5_impl *adapter = connect_task->adapter; if (status != AWS_TASK_STATUS_RUN_READY) { goto done; } s_aws_mqtt5_to_mqtt3_adapter_connect_handler(adapter, connect_task); done: aws_ref_count_release(&adapter->internal_refs); s_aws_mqtt_adapter_connect_task_destroy(connect_task); } static bool s_aws_mqtt5_listener_publish_received_adapter( const struct aws_mqtt5_packet_publish_view *publish, void *user_data) { struct aws_mqtt_client_connection_5_impl *adapter = user_data; struct aws_mqtt_client_connection *connection = &adapter->base; struct aws_mqtt_subscription_set_publish_received_options incoming_publish_options = { .connection = connection, .topic = publish->topic, .qos = (enum aws_mqtt_qos)publish->qos, .retain = publish->retain, .dup = publish->duplicate, .payload = publish->payload, }; aws_mqtt_subscription_set_on_publish_received(adapter->subscriptions, &incoming_publish_options); if (adapter->on_any_publish) { (*adapter->on_any_publish)( connection, &publish->topic, &publish->payload, publish->duplicate, (enum aws_mqtt_qos)publish->qos, publish->retain, adapter->on_any_publish_user_data); } return false; } struct aws_mqtt_set_interruption_handlers_task { struct aws_task task; struct aws_allocator *allocator; struct aws_mqtt_client_connection_5_impl *adapter; aws_mqtt_client_on_connection_interrupted_fn *on_interrupted; void *on_interrupted_user_data; aws_mqtt_client_on_connection_resumed_fn *on_resumed; void *on_resumed_user_data; }; static void s_set_interruption_handlers_task_fn(struct aws_task *task, void *arg, enum aws_task_status status) { (void)task; struct aws_mqtt_set_interruption_handlers_task *set_task = arg; struct aws_mqtt_client_connection_5_impl *adapter = set_task->adapter; if (status != AWS_TASK_STATUS_RUN_READY) { goto done; } adapter->on_interrupted = set_task->on_interrupted; adapter->on_interrupted_user_data = set_task->on_interrupted_user_data; adapter->on_resumed = set_task->on_resumed; adapter->on_resumed_user_data = set_task->on_resumed_user_data; done: aws_ref_count_release(&adapter->internal_refs); aws_mem_release(set_task->allocator, set_task); } static struct aws_mqtt_set_interruption_handlers_task *s_aws_mqtt_set_interruption_handlers_task_new( struct aws_allocator *allocator, struct aws_mqtt_client_connection_5_impl *adapter, aws_mqtt_client_on_connection_interrupted_fn *on_interrupted, void *on_interrupted_user_data, aws_mqtt_client_on_connection_resumed_fn *on_resumed, void *on_resumed_user_data) { struct aws_mqtt_set_interruption_handlers_task *set_task = aws_mem_calloc(allocator, 1, sizeof(struct aws_mqtt_set_interruption_handlers_task)); aws_task_init( &set_task->task, s_set_interruption_handlers_task_fn, (void *)set_task, "SetInterruptionHandlersTask"); set_task->allocator = adapter->allocator; set_task->adapter = (struct aws_mqtt_client_connection_5_impl *)aws_ref_count_acquire(&adapter->internal_refs); set_task->on_interrupted = on_interrupted; set_task->on_interrupted_user_data = on_interrupted_user_data; set_task->on_resumed = on_resumed; set_task->on_resumed_user_data = on_resumed_user_data; return set_task; } static int s_aws_mqtt_client_connection_5_set_interruption_handlers( void *impl, aws_mqtt_client_on_connection_interrupted_fn *on_interrupted, void *on_interrupted_user_data, aws_mqtt_client_on_connection_resumed_fn *on_resumed, void *on_resumed_user_data) { struct aws_mqtt_client_connection_5_impl *adapter = impl; struct aws_mqtt_set_interruption_handlers_task *task = s_aws_mqtt_set_interruption_handlers_task_new( adapter->allocator, adapter, on_interrupted, on_interrupted_user_data, on_resumed, on_resumed_user_data); if (task == NULL) { int error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_MQTT5_TO_MQTT3_ADAPTER, "id=%p: failed to create set interruption handlers task, error code %d(%s)", (void *)adapter, error_code, aws_error_debug_str(error_code)); return AWS_OP_ERR; } aws_event_loop_schedule_task_now(adapter->loop, &task->task); return AWS_OP_SUCCESS; } struct aws_mqtt_set_connection_result_handlers_task { struct aws_task task; struct aws_allocator *allocator; struct aws_mqtt_client_connection_5_impl *adapter; aws_mqtt_client_on_connection_success_fn *on_connection_success; void *on_connection_success_user_data; aws_mqtt_client_on_connection_failure_fn *on_connection_failure; void *on_connection_failure_user_data; }; static void s_set_connection_result_handlers_task_fn(struct aws_task *task, void *arg, enum aws_task_status status) { (void)task; struct aws_mqtt_set_connection_result_handlers_task *set_task = arg; struct aws_mqtt_client_connection_5_impl *adapter = set_task->adapter; if (status != AWS_TASK_STATUS_RUN_READY) { goto done; } adapter->on_connection_success = set_task->on_connection_success; adapter->on_connection_success_user_data = set_task->on_connection_success_user_data; adapter->on_connection_failure = set_task->on_connection_failure; adapter->on_connection_failure_user_data = set_task->on_connection_failure_user_data; done: aws_ref_count_release(&adapter->internal_refs); aws_mem_release(set_task->allocator, set_task); } static struct aws_mqtt_set_connection_result_handlers_task *s_aws_mqtt_set_connection_result_handlers_task_new( struct aws_allocator *allocator, struct aws_mqtt_client_connection_5_impl *adapter, aws_mqtt_client_on_connection_success_fn *on_connection_success, void *on_connection_success_user_data, aws_mqtt_client_on_connection_failure_fn *on_connection_failure, void *on_connection_failure_user_data) { struct aws_mqtt_set_connection_result_handlers_task *set_task = aws_mem_calloc(allocator, 1, sizeof(struct aws_mqtt_set_connection_result_handlers_task)); aws_task_init( &set_task->task, s_set_connection_result_handlers_task_fn, (void *)set_task, "SetConnectionResultHandlersTask"); set_task->allocator = adapter->allocator; set_task->adapter = (struct aws_mqtt_client_connection_5_impl *)aws_ref_count_acquire(&adapter->internal_refs); set_task->on_connection_success = on_connection_success; set_task->on_connection_success_user_data = on_connection_success_user_data; set_task->on_connection_failure = on_connection_failure; set_task->on_connection_failure_user_data = on_connection_failure_user_data; return set_task; } static int s_aws_mqtt_client_connection_5_set_connection_result_handlers( void *impl, aws_mqtt_client_on_connection_success_fn *on_connection_success, void *on_connection_success_user_data, aws_mqtt_client_on_connection_failure_fn *on_connection_failure, void *on_connection_failure_user_data) { struct aws_mqtt_client_connection_5_impl *adapter = impl; struct aws_mqtt_set_connection_result_handlers_task *task = s_aws_mqtt_set_connection_result_handlers_task_new( adapter->allocator, adapter, on_connection_success, on_connection_success_user_data, on_connection_failure, on_connection_failure_user_data); if (task == NULL) { int error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_MQTT5_TO_MQTT3_ADAPTER, "id=%p: failed to create set connection result handlers task, error code %d(%s)", (void *)adapter, error_code, aws_error_debug_str(error_code)); return AWS_OP_ERR; } aws_event_loop_schedule_task_now(adapter->loop, &task->task); return AWS_OP_SUCCESS; } struct aws_mqtt_set_on_closed_handler_task { struct aws_task task; struct aws_allocator *allocator; struct aws_mqtt_client_connection_5_impl *adapter; aws_mqtt_client_on_connection_closed_fn *on_closed; void *on_closed_user_data; }; static void s_set_on_closed_handler_task_fn(struct aws_task *task, void *arg, enum aws_task_status status) { (void)task; struct aws_mqtt_set_on_closed_handler_task *set_task = arg; struct aws_mqtt_client_connection_5_impl *adapter = set_task->adapter; if (status != AWS_TASK_STATUS_RUN_READY) { goto done; } adapter->on_closed = set_task->on_closed; adapter->on_closed_user_data = set_task->on_closed_user_data; done: aws_ref_count_release(&adapter->internal_refs); aws_mem_release(set_task->allocator, set_task); } static struct aws_mqtt_set_on_closed_handler_task *s_aws_mqtt_set_on_closed_handler_task_new( struct aws_allocator *allocator, struct aws_mqtt_client_connection_5_impl *adapter, aws_mqtt_client_on_connection_closed_fn *on_closed, void *on_closed_user_data) { struct aws_mqtt_set_on_closed_handler_task *set_task = aws_mem_calloc(allocator, 1, sizeof(struct aws_mqtt_set_on_closed_handler_task)); aws_task_init(&set_task->task, s_set_on_closed_handler_task_fn, (void *)set_task, "SetOnClosedHandlerTask"); set_task->allocator = adapter->allocator; set_task->adapter = (struct aws_mqtt_client_connection_5_impl *)aws_ref_count_acquire(&adapter->internal_refs); set_task->on_closed = on_closed; set_task->on_closed_user_data = on_closed_user_data; return set_task; } static int s_aws_mqtt_client_connection_5_set_on_closed_handler( void *impl, aws_mqtt_client_on_connection_closed_fn *on_closed, void *on_closed_user_data) { struct aws_mqtt_client_connection_5_impl *adapter = impl; struct aws_mqtt_set_on_closed_handler_task *task = s_aws_mqtt_set_on_closed_handler_task_new(adapter->allocator, adapter, on_closed, on_closed_user_data); if (task == NULL) { int error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_MQTT5_TO_MQTT3_ADAPTER, "id=%p: failed to create set on closed handler task, error code %d(%s)", (void *)adapter, error_code, aws_error_debug_str(error_code)); return AWS_OP_ERR; } aws_event_loop_schedule_task_now(adapter->loop, &task->task); return AWS_OP_SUCCESS; } struct aws_mqtt_set_on_termination_handlers_task { struct aws_task task; struct aws_allocator *allocator; struct aws_mqtt_client_connection_5_impl *adapter; aws_mqtt_client_on_connection_termination_fn *on_termination_callback; void *on_termination_ud; }; static void s_set_on_termination_handlers_task_fn(struct aws_task *task, void *arg, enum aws_task_status status) { (void)task; struct aws_mqtt_set_on_termination_handlers_task *set_task = arg; struct aws_mqtt_client_connection_5_impl *adapter = set_task->adapter; if (status != AWS_TASK_STATUS_RUN_READY) { goto done; } adapter->on_termination = set_task->on_termination_callback; adapter->on_termination_user_data = set_task->on_termination_ud; done: aws_ref_count_release(&adapter->internal_refs); aws_mem_release(set_task->allocator, set_task); } static struct aws_mqtt_set_on_termination_handlers_task *s_aws_mqtt_set_on_termination_handler_task_new( struct aws_allocator *allocator, struct aws_mqtt_client_connection_5_impl *adapter, aws_mqtt_client_on_connection_termination_fn *on_termination, void *on_termination_user_data) { struct aws_mqtt_set_on_termination_handlers_task *set_task = aws_mem_calloc(allocator, 1, sizeof(struct aws_mqtt_set_on_termination_handlers_task)); aws_task_init(&set_task->task, s_set_on_termination_handlers_task_fn, (void *)set_task, "SetOnClosedHandlerTask"); set_task->allocator = adapter->allocator; set_task->adapter = (struct aws_mqtt_client_connection_5_impl *)aws_ref_count_acquire(&adapter->internal_refs); set_task->on_termination_callback = on_termination; set_task->on_termination_ud = on_termination_user_data; return set_task; } static int s_aws_mqtt_client_connection_5_set_termination_handler( void *impl, aws_mqtt_client_on_connection_termination_fn *on_termination, void *on_termination_ud) { struct aws_mqtt_client_connection_5_impl *adapter = impl; struct aws_mqtt_set_on_termination_handlers_task *task = s_aws_mqtt_set_on_termination_handler_task_new(adapter->allocator, adapter, on_termination, on_termination_ud); if (task == NULL) { int error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_MQTT5_TO_MQTT3_ADAPTER, "id=%p: failed to create set on closed handler task, error code %d(%s)", (void *)adapter, error_code, aws_error_debug_str(error_code)); return AWS_OP_ERR; } aws_event_loop_schedule_task_now(adapter->loop, &task->task); return AWS_OP_SUCCESS; } struct aws_mqtt_set_on_any_publish_handler_task { struct aws_task task; struct aws_allocator *allocator; struct aws_mqtt_client_connection_5_impl *adapter; aws_mqtt_client_publish_received_fn *on_any_publish; void *on_any_publish_user_data; }; static void s_set_on_any_publish_handler_task_fn(struct aws_task *task, void *arg, enum aws_task_status status) { (void)task; struct aws_mqtt_set_on_any_publish_handler_task *set_task = arg; struct aws_mqtt_client_connection_5_impl *adapter = set_task->adapter; if (status != AWS_TASK_STATUS_RUN_READY) { goto done; } adapter->on_any_publish = set_task->on_any_publish; adapter->on_any_publish_user_data = set_task->on_any_publish_user_data; done: aws_ref_count_release(&adapter->internal_refs); aws_mem_release(set_task->allocator, set_task); } static struct aws_mqtt_set_on_any_publish_handler_task *s_aws_mqtt_set_on_any_publish_handler_task_new( struct aws_allocator *allocator, struct aws_mqtt_client_connection_5_impl *adapter, aws_mqtt_client_publish_received_fn *on_any_publish, void *on_any_publish_user_data) { struct aws_mqtt_set_on_any_publish_handler_task *set_task = aws_mem_calloc(allocator, 1, sizeof(struct aws_mqtt_set_on_any_publish_handler_task)); aws_task_init( &set_task->task, s_set_on_any_publish_handler_task_fn, (void *)set_task, "SetOnAnyPublishHandlerTask"); set_task->allocator = adapter->allocator; set_task->adapter = (struct aws_mqtt_client_connection_5_impl *)aws_ref_count_acquire(&adapter->internal_refs); set_task->on_any_publish = on_any_publish; set_task->on_any_publish_user_data = on_any_publish_user_data; return set_task; } static int s_aws_mqtt_client_connection_5_set_on_any_publish_handler( void *impl, aws_mqtt_client_publish_received_fn *on_any_publish, void *on_any_publish_user_data) { struct aws_mqtt_client_connection_5_impl *adapter = impl; struct aws_mqtt_set_on_any_publish_handler_task *task = s_aws_mqtt_set_on_any_publish_handler_task_new( adapter->allocator, adapter, on_any_publish, on_any_publish_user_data); if (task == NULL) { int error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_MQTT5_TO_MQTT3_ADAPTER, "id=%p: failed to create set on any publish task, error code %d(%s)", (void *)adapter, error_code, aws_error_debug_str(error_code)); return AWS_OP_ERR; } aws_event_loop_schedule_task_now(adapter->loop, &task->task); return AWS_OP_SUCCESS; } struct aws_mqtt_set_reconnect_timeout_task { struct aws_task task; struct aws_allocator *allocator; struct aws_mqtt_client_connection_5_impl *adapter; uint64_t min_timeout; uint64_t max_timeout; }; static void s_set_reconnect_timeout_task_fn(struct aws_task *task, void *arg, enum aws_task_status status) { (void)task; struct aws_mqtt_set_reconnect_timeout_task *set_task = arg; struct aws_mqtt_client_connection_5_impl *adapter = set_task->adapter; if (status != AWS_TASK_STATUS_RUN_READY) { goto done; } /* we're in the mqtt5 client's event loop; it's safe to access internal state */ adapter->client->config->min_reconnect_delay_ms = set_task->min_timeout; adapter->client->config->max_reconnect_delay_ms = set_task->max_timeout; adapter->client->current_reconnect_delay_ms = set_task->min_timeout; done: aws_ref_count_release(&adapter->internal_refs); aws_mem_release(set_task->allocator, set_task); } static struct aws_mqtt_set_reconnect_timeout_task *s_aws_mqtt_set_reconnect_timeout_task_new( struct aws_allocator *allocator, struct aws_mqtt_client_connection_5_impl *adapter, uint64_t min_timeout, uint64_t max_timeout) { struct aws_mqtt_set_reconnect_timeout_task *set_task = aws_mem_calloc(allocator, 1, sizeof(struct aws_mqtt_set_reconnect_timeout_task)); aws_task_init(&set_task->task, s_set_reconnect_timeout_task_fn, (void *)set_task, "SetReconnectTimeoutTask"); set_task->allocator = adapter->allocator; set_task->adapter = (struct aws_mqtt_client_connection_5_impl *)aws_ref_count_acquire(&adapter->internal_refs); set_task->min_timeout = aws_min_u64(min_timeout, max_timeout); set_task->max_timeout = aws_max_u64(min_timeout, max_timeout); return set_task; } static int s_aws_mqtt_client_connection_5_set_reconnect_timeout( void *impl, uint64_t min_timeout, uint64_t max_timeout) { struct aws_mqtt_client_connection_5_impl *adapter = impl; struct aws_mqtt_set_reconnect_timeout_task *task = s_aws_mqtt_set_reconnect_timeout_task_new(adapter->allocator, adapter, min_timeout, max_timeout); if (task == NULL) { int error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_MQTT5_TO_MQTT3_ADAPTER, "id=%p: failed to create set reconnect timeout task, error code %d(%s)", (void *)adapter, error_code, aws_error_debug_str(error_code)); return AWS_OP_ERR; } aws_event_loop_schedule_task_now(adapter->loop, &task->task); return AWS_OP_SUCCESS; } struct aws_mqtt_set_http_proxy_options_task { struct aws_task task; struct aws_allocator *allocator; struct aws_mqtt_client_connection_5_impl *adapter; struct aws_http_proxy_config *proxy_config; }; static void s_set_http_proxy_options_task_fn(struct aws_task *task, void *arg, enum aws_task_status status) { (void)task; struct aws_mqtt_set_http_proxy_options_task *set_task = arg; struct aws_mqtt_client_connection_5_impl *adapter = set_task->adapter; if (status != AWS_TASK_STATUS_RUN_READY) { goto done; } /* we're in the mqtt5 client's event loop; it's safe to access internal state */ aws_http_proxy_config_destroy(adapter->client->config->http_proxy_config); /* move the proxy config from the set task to the client's config */ adapter->client->config->http_proxy_config = set_task->proxy_config; if (adapter->client->config->http_proxy_config != NULL) { aws_http_proxy_options_init_from_config( &adapter->client->config->http_proxy_options, adapter->client->config->http_proxy_config); } /* don't clean up the proxy config if it was successfully assigned to the mqtt5 client */ set_task->proxy_config = NULL; done: aws_ref_count_release(&adapter->internal_refs); /* If the task was canceled we need to clean this up because it didn't get assigned to the mqtt5 client */ aws_http_proxy_config_destroy(set_task->proxy_config); aws_mem_release(set_task->allocator, set_task); } static struct aws_mqtt_set_http_proxy_options_task *s_aws_mqtt_set_http_proxy_options_task_new( struct aws_allocator *allocator, struct aws_mqtt_client_connection_5_impl *adapter, struct aws_http_proxy_options *proxy_options) { struct aws_http_proxy_config *proxy_config = aws_http_proxy_config_new_tunneling_from_proxy_options(allocator, proxy_options); if (proxy_config == NULL) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } struct aws_mqtt_set_http_proxy_options_task *set_task = aws_mem_calloc(allocator, 1, sizeof(struct aws_mqtt_set_http_proxy_options_task)); aws_task_init(&set_task->task, s_set_http_proxy_options_task_fn, (void *)set_task, "SetHttpProxyOptionsTask"); set_task->allocator = adapter->allocator; set_task->adapter = (struct aws_mqtt_client_connection_5_impl *)aws_ref_count_acquire(&adapter->internal_refs); set_task->proxy_config = proxy_config; return set_task; } static int s_aws_mqtt_client_connection_5_set_http_proxy_options( void *impl, struct aws_http_proxy_options *proxy_options) { struct aws_mqtt_client_connection_5_impl *adapter = impl; struct aws_mqtt_set_http_proxy_options_task *task = s_aws_mqtt_set_http_proxy_options_task_new(adapter->allocator, adapter, proxy_options); if (task == NULL) { int error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_MQTT5_TO_MQTT3_ADAPTER, "id=%p: failed to create set http proxy options task, error code %d(%s)", (void *)adapter, error_code, aws_error_debug_str(error_code)); return AWS_OP_ERR; } aws_event_loop_schedule_task_now(adapter->loop, &task->task); return AWS_OP_SUCCESS; } struct aws_mqtt_set_use_websockets_task { struct aws_task task; struct aws_allocator *allocator; struct aws_mqtt_client_connection_5_impl *adapter; aws_mqtt_transform_websocket_handshake_fn *transformer; void *transformer_user_data; }; static void s_aws_mqtt5_adapter_websocket_handshake_completion_fn( struct aws_http_message *request, int error_code, void *complete_ctx) { struct aws_mqtt_client_connection_5_impl *adapter = complete_ctx; (*adapter->mqtt5_websocket_handshake_completion_function)( request, s_translate_mqtt5_error_code_to_mqtt311(error_code), adapter->mqtt5_websocket_handshake_completion_user_data); aws_ref_count_release(&adapter->internal_refs); } static void s_aws_mqtt5_adapter_transform_websocket_handshake_fn( struct aws_http_message *request, void *user_data, aws_mqtt5_transform_websocket_handshake_complete_fn *complete_fn, void *complete_ctx) { struct aws_mqtt_client_connection_5_impl *adapter = user_data; if (adapter->websocket_handshake_transformer == NULL) { (*complete_fn)(request, AWS_ERROR_SUCCESS, complete_ctx); } else { aws_ref_count_acquire(&adapter->internal_refs); adapter->mqtt5_websocket_handshake_completion_function = complete_fn; adapter->mqtt5_websocket_handshake_completion_user_data = complete_ctx; (*adapter->websocket_handshake_transformer)( request, adapter->websocket_handshake_transformer_user_data, s_aws_mqtt5_adapter_websocket_handshake_completion_fn, adapter); } } static void s_set_use_websockets_task_fn(struct aws_task *task, void *arg, enum aws_task_status status) { (void)task; struct aws_mqtt_set_use_websockets_task *set_task = arg; struct aws_mqtt_client_connection_5_impl *adapter = set_task->adapter; if (status != AWS_TASK_STATUS_RUN_READY) { goto done; } adapter->websocket_handshake_transformer = set_task->transformer; adapter->websocket_handshake_transformer_user_data = set_task->transformer_user_data; /* we're in the mqtt5 client's event loop; it's safe to access its internal state */ adapter->client->config->websocket_handshake_transform = s_aws_mqtt5_adapter_transform_websocket_handshake_fn; adapter->client->config->websocket_handshake_transform_user_data = adapter; done: aws_ref_count_release(&adapter->internal_refs); aws_mem_release(set_task->allocator, set_task); } static struct aws_mqtt_set_use_websockets_task *s_aws_mqtt_set_use_websockets_task_new( struct aws_allocator *allocator, struct aws_mqtt_client_connection_5_impl *adapter, aws_mqtt_transform_websocket_handshake_fn *transformer, void *transformer_user_data) { struct aws_mqtt_set_use_websockets_task *set_task = aws_mem_calloc(allocator, 1, sizeof(struct aws_mqtt_set_use_websockets_task)); aws_task_init(&set_task->task, s_set_use_websockets_task_fn, (void *)set_task, "SetUseWebsocketsTask"); set_task->allocator = adapter->allocator; set_task->adapter = (struct aws_mqtt_client_connection_5_impl *)aws_ref_count_acquire(&adapter->internal_refs); set_task->transformer = transformer; set_task->transformer_user_data = transformer_user_data; return set_task; } static int s_aws_mqtt_client_connection_5_use_websockets( void *impl, aws_mqtt_transform_websocket_handshake_fn *transformer, void *transformer_user_data, aws_mqtt_validate_websocket_handshake_fn *validator, void *validator_user_data) { /* mqtt5 doesn't use these */ (void)validator; (void)validator_user_data; struct aws_mqtt_client_connection_5_impl *adapter = impl; struct aws_mqtt_set_use_websockets_task *task = s_aws_mqtt_set_use_websockets_task_new(adapter->allocator, adapter, transformer, transformer_user_data); if (task == NULL) { int error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_MQTT5_TO_MQTT3_ADAPTER, "id=%p: failed to create set use websockets task, error code %d(%s)", (void *)adapter, error_code, aws_error_debug_str(error_code)); return AWS_OP_ERR; } aws_event_loop_schedule_task_now(adapter->loop, &task->task); return AWS_OP_SUCCESS; } struct aws_mqtt_set_host_resolution_task { struct aws_task task; struct aws_allocator *allocator; struct aws_mqtt_client_connection_5_impl *adapter; struct aws_host_resolution_config host_resolution_config; }; static void s_set_host_resolution_task_fn(struct aws_task *task, void *arg, enum aws_task_status status) { (void)task; struct aws_mqtt_set_host_resolution_task *set_task = arg; struct aws_mqtt_client_connection_5_impl *adapter = set_task->adapter; if (status != AWS_TASK_STATUS_RUN_READY) { goto done; } /* we're in the mqtt5 client's event loop; it's safe to access internal state */ adapter->client->config->host_resolution_override = set_task->host_resolution_config; done: aws_ref_count_release(&adapter->internal_refs); aws_mem_release(set_task->allocator, set_task); } static struct aws_mqtt_set_host_resolution_task *s_aws_mqtt_set_host_resolution_task_new( struct aws_allocator *allocator, struct aws_mqtt_client_connection_5_impl *adapter, const struct aws_host_resolution_config *host_resolution_config) { struct aws_mqtt_set_host_resolution_task *set_task = aws_mem_calloc(allocator, 1, sizeof(struct aws_mqtt_set_host_resolution_task)); aws_task_init(&set_task->task, s_set_host_resolution_task_fn, (void *)set_task, "SetHostResolutionTask"); set_task->allocator = adapter->allocator; set_task->adapter = (struct aws_mqtt_client_connection_5_impl *)aws_ref_count_acquire(&adapter->internal_refs); set_task->host_resolution_config = *host_resolution_config; return set_task; } static int s_aws_mqtt_client_connection_5_set_host_resolution_options( void *impl, const struct aws_host_resolution_config *host_resolution_config) { struct aws_mqtt_client_connection_5_impl *adapter = impl; struct aws_mqtt_set_host_resolution_task *task = s_aws_mqtt_set_host_resolution_task_new(adapter->allocator, adapter, host_resolution_config); if (task == NULL) { int error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_MQTT5_TO_MQTT3_ADAPTER, "id=%p: failed to create set reconnect timeout task, error code %d(%s)", (void *)adapter, error_code, aws_error_debug_str(error_code)); return AWS_OP_ERR; } aws_event_loop_schedule_task_now(adapter->loop, &task->task); return AWS_OP_SUCCESS; } struct aws_mqtt_set_will_task { struct aws_task task; struct aws_allocator *allocator; struct aws_mqtt_client_connection_5_impl *adapter; struct aws_byte_buf topic_buffer; enum aws_mqtt_qos qos; bool retain; struct aws_byte_buf payload_buffer; }; static void s_aws_mqtt_set_will_task_destroy(struct aws_mqtt_set_will_task *task) { if (task == NULL) { return; } aws_byte_buf_clean_up(&task->topic_buffer); aws_byte_buf_clean_up(&task->payload_buffer); aws_mem_release(task->allocator, task); } static void s_set_will_task_fn(struct aws_task *task, void *arg, enum aws_task_status status) { (void)task; struct aws_mqtt_set_will_task *set_task = arg; struct aws_mqtt_client_connection_5_impl *adapter = set_task->adapter; if (status != AWS_TASK_STATUS_RUN_READY) { goto done; } /* we're in the mqtt5 client's event loop; it's safe to access internal state */ struct aws_mqtt5_packet_connect_storage *connect = adapter->client->config->connect; /* clean up the old will if necessary */ if (connect->will != NULL) { aws_mqtt5_packet_publish_storage_clean_up(connect->will); aws_mem_release(connect->allocator, connect->will); connect->will = NULL; } struct aws_mqtt5_packet_publish_view will = { .topic = aws_byte_cursor_from_buf(&set_task->topic_buffer), .qos = (enum aws_mqtt5_qos)set_task->qos, .retain = set_task->retain, .payload = aws_byte_cursor_from_buf(&set_task->payload_buffer), }; /* make a new will */ connect->will = aws_mem_calloc(connect->allocator, 1, sizeof(struct aws_mqtt5_packet_publish_storage)); aws_mqtt5_packet_publish_storage_init(connect->will, connect->allocator, &will); /* manually update the storage view's will reference */ connect->storage_view.will = &connect->will->storage_view; done: aws_ref_count_release(&adapter->internal_refs); s_aws_mqtt_set_will_task_destroy(set_task); } static struct aws_mqtt_set_will_task *s_aws_mqtt_set_will_task_new( struct aws_allocator *allocator, struct aws_mqtt_client_connection_5_impl *adapter, const struct aws_byte_cursor *topic, enum aws_mqtt_qos qos, bool retain, const struct aws_byte_cursor *payload) { if (topic == NULL) { return NULL; } struct aws_mqtt_set_will_task *set_task = aws_mem_calloc(allocator, 1, sizeof(struct aws_mqtt_set_will_task)); aws_task_init(&set_task->task, s_set_will_task_fn, (void *)set_task, "SetWillTask"); set_task->allocator = adapter->allocator; set_task->adapter = (struct aws_mqtt_client_connection_5_impl *)aws_ref_count_acquire(&adapter->internal_refs); set_task->qos = qos; set_task->retain = retain; aws_byte_buf_init_copy_from_cursor(&set_task->topic_buffer, allocator, *topic); if (payload != NULL) { aws_byte_buf_init_copy_from_cursor(&set_task->payload_buffer, allocator, *payload); } return set_task; } static int s_aws_mqtt_client_connection_5_set_will( void *impl, const struct aws_byte_cursor *topic, enum aws_mqtt_qos qos, bool retain, const struct aws_byte_cursor *payload) { struct aws_mqtt_client_connection_5_impl *adapter = impl; /* check qos */ if (qos < 0 || qos > AWS_MQTT_QOS_EXACTLY_ONCE) { AWS_LOGF_ERROR( AWS_LS_MQTT5_TO_MQTT3_ADAPTER, "id=%p: mqtt3-to-5-adapter, invalid qos for will", (void *)adapter); return aws_raise_error(AWS_ERROR_MQTT_INVALID_QOS); } /* check topic */ if (!aws_mqtt_is_valid_topic(topic)) { AWS_LOGF_ERROR( AWS_LS_MQTT5_TO_MQTT3_ADAPTER, "id=%p: mqtt3-to-5-adapter, invalid topic for will", (void *)adapter); return aws_raise_error(AWS_ERROR_MQTT_INVALID_TOPIC); } struct aws_mqtt_set_will_task *task = s_aws_mqtt_set_will_task_new(adapter->allocator, adapter, topic, qos, retain, payload); if (task == NULL) { AWS_LOGF_ERROR(AWS_LS_MQTT5_TO_MQTT3_ADAPTER, "id=%p: failed to create set will task", (void *)adapter); return AWS_OP_ERR; } aws_event_loop_schedule_task_now(adapter->loop, &task->task); return AWS_OP_SUCCESS; } struct aws_mqtt_set_login_task { struct aws_task task; struct aws_allocator *allocator; struct aws_mqtt_client_connection_5_impl *adapter; struct aws_byte_buf username_buffer; struct aws_byte_buf password_buffer; }; static void s_aws_mqtt_set_login_task_destroy(struct aws_mqtt_set_login_task *task) { if (task == NULL) { return; } aws_byte_buf_clean_up_secure(&task->username_buffer); aws_byte_buf_clean_up_secure(&task->password_buffer); aws_mem_release(task->allocator, task); } static void s_set_login_task_fn(struct aws_task *task, void *arg, enum aws_task_status status) { (void)task; struct aws_mqtt_set_login_task *set_task = arg; struct aws_mqtt_client_connection_5_impl *adapter = set_task->adapter; if (status != AWS_TASK_STATUS_RUN_READY) { goto done; } struct aws_byte_cursor username_cursor = aws_byte_cursor_from_buf(&set_task->username_buffer); struct aws_byte_cursor password_cursor = aws_byte_cursor_from_buf(&set_task->password_buffer); /* we're in the mqtt5 client's event loop; it's safe to access internal state */ struct aws_mqtt5_packet_connect_storage *old_connect = adapter->client->config->connect; /* * Packet storage stores binary data in a single buffer. The safest way to replace some binary data is * to make a new storage from the old storage, deleting the old storage after construction is complete. */ struct aws_mqtt5_packet_connect_view new_connect_view = old_connect->storage_view; if (set_task->username_buffer.len > 0) { new_connect_view.username = &username_cursor; } else { new_connect_view.username = NULL; } if (set_task->password_buffer.len > 0) { new_connect_view.password = &password_cursor; } else { new_connect_view.password = NULL; } if (aws_mqtt5_packet_connect_view_validate(&new_connect_view)) { AWS_LOGF_ERROR( AWS_LS_MQTT5_TO_MQTT3_ADAPTER, "id=%p: mqtt3-to-5-adapter - invalid CONNECT username or password", (void *)adapter); goto done; } struct aws_mqtt5_packet_connect_storage *new_connect = aws_mem_calloc(adapter->allocator, 1, sizeof(struct aws_mqtt5_packet_connect_storage)); aws_mqtt5_packet_connect_storage_init(new_connect, adapter->allocator, &new_connect_view); adapter->client->config->connect = new_connect; aws_mqtt5_packet_connect_storage_clean_up(old_connect); aws_mem_release(old_connect->allocator, old_connect); done: aws_ref_count_release(&adapter->internal_refs); s_aws_mqtt_set_login_task_destroy(set_task); } static struct aws_mqtt_set_login_task *s_aws_mqtt_set_login_task_new( struct aws_allocator *allocator, struct aws_mqtt_client_connection_5_impl *adapter, const struct aws_byte_cursor *username, const struct aws_byte_cursor *password) { struct aws_mqtt_set_login_task *set_task = aws_mem_calloc(allocator, 1, sizeof(struct aws_mqtt_set_login_task)); aws_task_init(&set_task->task, s_set_login_task_fn, (void *)set_task, "SetLoginTask"); set_task->allocator = adapter->allocator; set_task->adapter = (struct aws_mqtt_client_connection_5_impl *)aws_ref_count_acquire(&adapter->internal_refs); if (username != NULL) { aws_byte_buf_init_copy_from_cursor(&set_task->username_buffer, allocator, *username); } if (password != NULL) { aws_byte_buf_init_copy_from_cursor(&set_task->password_buffer, allocator, *password); } return set_task; } static int s_aws_mqtt_client_connection_5_set_login( void *impl, const struct aws_byte_cursor *username, const struct aws_byte_cursor *password) { struct aws_mqtt_client_connection_5_impl *adapter = impl; struct aws_mqtt_set_login_task *task = s_aws_mqtt_set_login_task_new(adapter->allocator, adapter, username, password); if (task == NULL) { int error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_MQTT5_TO_MQTT3_ADAPTER, "id=%p: failed to create set login task, error code %d(%s)", (void *)adapter, error_code, aws_error_debug_str(error_code)); return AWS_OP_ERR; } aws_event_loop_schedule_task_now(adapter->loop, &task->task); return AWS_OP_SUCCESS; } static void s_aws_mqtt5_to_mqtt3_adapter_on_zero_internal_refs(void *context) { struct aws_mqtt_client_connection_5_impl *adapter = context; s_aws_mqtt_adapter_final_destroy(adapter); } static void s_aws_mqtt5_to_mqtt3_adapter_on_listener_detached(void *context) { struct aws_mqtt_client_connection_5_impl *adapter = context; /* * Release the single internal reference that we started with. Only ephemeral references for cross-thread * tasks might remain, and they will disappear quickly. */ aws_ref_count_release(&adapter->internal_refs); } static struct aws_mqtt_client_connection *s_aws_mqtt_client_connection_5_acquire(void *impl) { struct aws_mqtt_client_connection_5_impl *adapter = impl; aws_ref_count_acquire(&adapter->external_refs); return &adapter->base; } static void s_aws_mqtt5_to_mqtt3_adapter_on_zero_external_refs(void *impl) { struct aws_mqtt_client_connection_5_impl *adapter = impl; /* * When the adapter's exernal ref count goes to zero, here's what we want to do: * * (1) Release the client listener, starting its asynchronous shutdown process (since we're the only user * of it) * (2) Wait for the client listener to notify us that asynchronous shutdown is over. At this point we * are guaranteed that no more callbacks from the mqtt5 client will reach us. * (3) Release the single internal ref we started with when the adapter was created. * (4) On last internal ref, we can safely release the mqtt5 client and synchronously clean up all other * resources * * Step (1) is done here. * Steps (2) and (3) are accomplished by s_aws_mqtt5_to_mqtt3_adapter_on_listener_detached * Step (4) is completed by s_aws_mqtt5_to_mqtt3_adapter_on_zero_internal_refs */ aws_mqtt5_listener_release(adapter->listener); } static void s_aws_mqtt_client_connection_5_release(void *impl) { struct aws_mqtt_client_connection_5_impl *adapter = impl; aws_ref_count_release(&adapter->external_refs); } /* * When submitting an operation (across threads), we not only need to keep the adapter alive, we also need to keep * the operation alive since it's technically already being tracked within the adapter's operational state. * * Note: we may not truly need the operation ref but it's safer to keep it. */ static void s_aws_mqtt5_to_mqtt3_adapter_operation_acquire_cross_thread_refs( struct aws_mqtt5_to_mqtt3_adapter_operation_base *operation) { if (!operation->holding_adapter_ref) { operation->holding_adapter_ref = true; aws_ref_count_acquire(&operation->adapter->internal_refs); } aws_mqtt5_to_mqtt3_adapter_operation_acquire(operation); } /* * Once an operation has been received on the adapter's event loop, whether reject or accepted, we must release the * transient references to the operation and adapter */ static void s_aws_mqtt5_to_mqtt3_adapter_operation_release_cross_thread_refs( struct aws_mqtt5_to_mqtt3_adapter_operation_base *operation) { if (operation->holding_adapter_ref) { operation->holding_adapter_ref = false; aws_ref_count_release(&operation->adapter->internal_refs); } aws_mqtt5_to_mqtt3_adapter_operation_release(operation); } static void s_adapter_publish_operation_destroy(void *context) { struct aws_mqtt5_to_mqtt3_adapter_operation_base *operation = context; if (operation == NULL) { return; } struct aws_mqtt5_to_mqtt3_adapter_operation_publish *publish_op = operation->impl; struct aws_mqtt_client_connection_5_impl *adapter_to_release = NULL; if (publish_op->base.holding_adapter_ref) { adapter_to_release = publish_op->base.adapter; } /* We're going away before our MQTT5 operation, make sure it doesn't try to call us back when it completes */ publish_op->publish_op->completion_options.completion_callback = NULL; publish_op->publish_op->completion_options.completion_user_data = NULL; aws_mqtt5_operation_release(&publish_op->publish_op->base); aws_mem_release(operation->allocator, operation); if (adapter_to_release != NULL) { aws_ref_count_release(&adapter_to_release->internal_refs); } } static void s_aws_mqtt5_to_mqtt3_adapter_publish_completion_fn( enum aws_mqtt5_packet_type packet_type, const void *packet, int error_code, void *complete_ctx) { (void)packet_type; (void)packet; struct aws_mqtt5_to_mqtt3_adapter_operation_publish *publish_op = complete_ctx; if (publish_op->on_publish_complete != NULL) { (*publish_op->on_publish_complete)( &publish_op->base.adapter->base, publish_op->base.id, error_code, publish_op->on_publish_complete_user_data); } aws_mqtt5_to_mqtt3_adapter_operation_table_remove_operation( &publish_op->base.adapter->operational_state, publish_op->base.id); } static void s_fail_publish(void *impl, int error_code) { struct aws_mqtt5_to_mqtt3_adapter_operation_publish *publish_op = impl; if (publish_op->on_publish_complete != NULL) { (*publish_op->on_publish_complete)( &publish_op->base.adapter->base, publish_op->base.id, error_code, publish_op->on_publish_complete_user_data); } } static struct aws_mqtt5_to_mqtt3_adapter_operation_vtable s_publish_vtable = { .fail_fn = s_fail_publish, }; struct aws_mqtt5_to_mqtt3_adapter_operation_publish *aws_mqtt5_to_mqtt3_adapter_operation_new_publish( struct aws_allocator *allocator, const struct aws_mqtt5_to_mqtt3_adapter_publish_options *options) { struct aws_mqtt5_to_mqtt3_adapter_operation_publish *publish_op = aws_mem_calloc(allocator, 1, sizeof(struct aws_mqtt5_to_mqtt3_adapter_operation_publish)); publish_op->base.allocator = allocator; aws_ref_count_init(&publish_op->base.ref_count, publish_op, s_adapter_publish_operation_destroy); publish_op->base.impl = publish_op; publish_op->base.vtable = &s_publish_vtable; publish_op->base.type = AWS_MQTT5TO3_AOT_PUBLISH; publish_op->base.adapter = options->adapter; publish_op->base.holding_adapter_ref = false; struct aws_mqtt5_packet_publish_view publish_view = { .topic = options->topic, .qos = (enum aws_mqtt5_qos)options->qos, .payload = options->payload, .retain = options->retain, }; struct aws_mqtt5_publish_completion_options publish_completion_options = { .completion_callback = s_aws_mqtt5_to_mqtt3_adapter_publish_completion_fn, .completion_user_data = publish_op, }; publish_op->publish_op = aws_mqtt5_operation_publish_new( allocator, options->adapter->client, &publish_view, &publish_completion_options); if (publish_op->publish_op == NULL) { goto error; } publish_op->on_publish_complete = options->on_complete; publish_op->on_publish_complete_user_data = options->on_complete_userdata; return publish_op; error: aws_mqtt5_to_mqtt3_adapter_operation_release(&publish_op->base); return NULL; } void s_adapter_publish_submission_fn(struct aws_task *task, void *arg, enum aws_task_status status) { (void)task; struct aws_mqtt5_to_mqtt3_adapter_operation_publish *operation = arg; struct aws_mqtt_client_connection_5_impl *adapter = operation->base.adapter; aws_mqtt5_client_submit_operation_internal( adapter->client, &operation->publish_op->base, status != AWS_TASK_STATUS_RUN_READY); /* * The operation has been submitted in-thread. We can release the transient refs (operation, adapter) needed to * keep things alive during the handover */ s_aws_mqtt5_to_mqtt3_adapter_operation_release_cross_thread_refs(&operation->base); } static uint16_t s_aws_mqtt_client_connection_5_publish( void *impl, const struct aws_byte_cursor *topic, enum aws_mqtt_qos qos, bool retain, const struct aws_byte_cursor *payload, aws_mqtt_op_complete_fn *on_complete, void *userdata) { struct aws_mqtt_client_connection_5_impl *adapter = impl; AWS_LOGF_DEBUG(AWS_LS_MQTT5_TO_MQTT3_ADAPTER, "id=%p: mqtt3-to-5-adapter, invoking publish API", (void *)adapter); /* check qos */ if (qos < 0 || qos > AWS_MQTT_QOS_EXACTLY_ONCE) { AWS_LOGF_ERROR( AWS_LS_MQTT5_TO_MQTT3_ADAPTER, "id=%p: mqtt3-to-5-adapter, invalid qos for publish", (void *)adapter); aws_raise_error(AWS_ERROR_MQTT_INVALID_QOS); return 0; } if (!aws_mqtt_is_valid_topic(topic)) { AWS_LOGF_ERROR( AWS_LS_MQTT5_TO_MQTT3_ADAPTER, "id=%p: mqtt3-to-5-adapter, invalid topic for publish", (void *)adapter); aws_raise_error(AWS_ERROR_MQTT_INVALID_TOPIC); return 0; } struct aws_byte_cursor topic_cursor = *topic; struct aws_byte_cursor payload_cursor; AWS_ZERO_STRUCT(payload_cursor); if (payload != NULL) { payload_cursor = *payload; } struct aws_mqtt5_to_mqtt3_adapter_publish_options publish_options = { .adapter = adapter, .topic = topic_cursor, .qos = qos, .retain = retain, .payload = payload_cursor, .on_complete = on_complete, .on_complete_userdata = userdata, }; struct aws_mqtt5_to_mqtt3_adapter_operation_publish *operation = aws_mqtt5_to_mqtt3_adapter_operation_new_publish(adapter->allocator, &publish_options); if (operation == NULL) { return 0; } if (aws_mqtt5_to_mqtt3_adapter_operation_table_add_operation(&adapter->operational_state, &operation->base)) { goto error; } uint16_t synthetic_id = operation->base.id; /* * While in-transit to the adapter event loop, we take refs to both the operation and the adapter so that we * are guaranteed they are still alive when the cross-thread submission task is run. */ s_aws_mqtt5_to_mqtt3_adapter_operation_acquire_cross_thread_refs(&operation->base); aws_task_init( &operation->base.submission_task, s_adapter_publish_submission_fn, operation, "Mqtt5ToMqtt3AdapterPublishSubmission"); aws_event_loop_schedule_task_now(adapter->loop, &operation->base.submission_task); return synthetic_id; error: aws_mqtt5_to_mqtt3_adapter_operation_release(&operation->base); return 0; } static void s_adapter_subscribe_operation_destroy(void *context) { struct aws_mqtt5_to_mqtt3_adapter_operation_base *operation = context; if (operation == NULL) { return; } struct aws_mqtt5_to_mqtt3_adapter_operation_subscribe *subscribe_op = operation->impl; size_t subscription_count = aws_array_list_length(&subscribe_op->subscriptions); for (size_t i = 0; i < subscription_count; ++i) { struct aws_mqtt_subscription_set_subscription_record *record = NULL; aws_array_list_get_at(&subscribe_op->subscriptions, &record, i); aws_mqtt_subscription_set_subscription_record_destroy(record); } aws_array_list_clean_up(&subscribe_op->subscriptions); struct aws_mqtt_client_connection_5_impl *adapter_to_release = NULL; if (subscribe_op->base.holding_adapter_ref) { adapter_to_release = subscribe_op->base.adapter; } /* We're going away before our MQTT5 operation, make sure it doesn't try to call us back when it completes */ if (subscribe_op->subscribe_op != NULL) { subscribe_op->subscribe_op->completion_options.completion_callback = NULL; subscribe_op->subscribe_op->completion_options.completion_user_data = NULL; aws_mqtt5_operation_release(&subscribe_op->subscribe_op->base); } aws_mem_release(operation->allocator, operation); if (adapter_to_release != NULL) { aws_ref_count_release(&adapter_to_release->internal_refs); } } static enum aws_mqtt_qos s_convert_mqtt5_suback_reason_code_to_mqtt3_granted_qos( enum aws_mqtt5_suback_reason_code reason_code) { switch (reason_code) { case AWS_MQTT5_SARC_GRANTED_QOS_0: case AWS_MQTT5_SARC_GRANTED_QOS_1: case AWS_MQTT5_SARC_GRANTED_QOS_2: return (enum aws_mqtt_qos)reason_code; default: return AWS_MQTT_QOS_FAILURE; } } static void s_aws_mqtt5_to_mqtt3_adapter_subscribe_completion_helper( struct aws_mqtt5_to_mqtt3_adapter_operation_subscribe *subscribe_op, const struct aws_mqtt5_packet_suback_view *suback, int error_code) { struct aws_mqtt_client_connection_5_impl *adapter = subscribe_op->base.adapter; struct aws_mqtt_subscription_set_subscription_record *record = NULL; if (subscribe_op->on_suback != NULL) { AWS_LOGF_DEBUG( AWS_LS_MQTT5_TO_MQTT3_ADAPTER, "id=%p: mqtt3-to-5-adapter, completing single-topic subscribe", (void *)adapter); struct aws_byte_cursor topic_filter; AWS_ZERO_STRUCT(topic_filter); enum aws_mqtt_qos granted_qos = AWS_MQTT_QOS_AT_MOST_ONCE; size_t subscription_count = aws_array_list_length(&subscribe_op->subscriptions); if (subscription_count > 0) { aws_array_list_get_at(&subscribe_op->subscriptions, &record, 0); topic_filter = record->subscription_view.topic_filter; } if (suback != NULL) { if (suback->reason_code_count > 0) { granted_qos = s_convert_mqtt5_suback_reason_code_to_mqtt3_granted_qos(suback->reason_codes[0]); } } else { granted_qos = AWS_MQTT_QOS_FAILURE; } (*subscribe_op->on_suback)( &adapter->base, subscribe_op->base.id, &topic_filter, granted_qos, error_code, subscribe_op->on_suback_user_data); } if (subscribe_op->on_multi_suback != NULL) { AWS_LOGF_DEBUG( AWS_LS_MQTT5_TO_MQTT3_ADAPTER, "id=%p: mqtt3-to-5-adapter, completing multi-topic subscribe", (void *)adapter); if (suback == NULL) { (*subscribe_op->on_multi_suback)( &adapter->base, subscribe_op->base.id, NULL, error_code, subscribe_op->on_multi_suback_user_data); } else { AWS_VARIABLE_LENGTH_ARRAY( struct aws_mqtt_topic_subscription, multi_sub_subscription_buf, suback->reason_code_count); AWS_VARIABLE_LENGTH_ARRAY( struct aws_mqtt_topic_subscription *, multi_sub_subscription_ptr_buf, suback->reason_code_count); struct aws_mqtt_topic_subscription *subscription_ptr = (struct aws_mqtt_topic_subscription *)multi_sub_subscription_buf; struct aws_array_list multi_sub_list; aws_array_list_init_static( &multi_sub_list, multi_sub_subscription_ptr_buf, suback->reason_code_count, sizeof(struct aws_mqtt_topic_subscription *)); size_t subscription_count = aws_array_list_length(&subscribe_op->subscriptions); for (size_t i = 0; i < suback->reason_code_count; ++i) { struct aws_mqtt_topic_subscription *subscription = subscription_ptr + i; AWS_ZERO_STRUCT(*subscription); subscription->qos = s_convert_mqtt5_suback_reason_code_to_mqtt3_granted_qos(suback->reason_codes[i]); if (i < subscription_count) { aws_array_list_get_at(&subscribe_op->subscriptions, &record, i); subscription->topic = record->subscription_view.topic_filter; subscription->on_publish = record->subscription_view.on_publish_received; subscription->on_publish_ud = record->subscription_view.callback_user_data; subscription->on_cleanup = record->subscription_view.on_cleanup; } aws_array_list_push_back(&multi_sub_list, &subscription); } (*subscribe_op->on_multi_suback)( &adapter->base, subscribe_op->base.id, &multi_sub_list, error_code, subscribe_op->on_multi_suback_user_data); } } } static void s_aws_mqtt5_to_mqtt3_adapter_subscribe_completion_fn( const struct aws_mqtt5_packet_suback_view *suback, int error_code, void *complete_ctx) { struct aws_mqtt5_to_mqtt3_adapter_operation_subscribe *subscribe_op = complete_ctx; struct aws_mqtt_client_connection_5_impl *adapter = subscribe_op->base.adapter; s_aws_mqtt5_to_mqtt3_adapter_subscribe_completion_helper(subscribe_op, suback, error_code); aws_mqtt5_to_mqtt3_adapter_operation_table_remove_operation(&adapter->operational_state, subscribe_op->base.id); } static int s_validate_adapter_subscribe_options( size_t subscription_count, struct aws_mqtt_topic_subscription *subscriptions, struct aws_mqtt_client_connection_5_impl *adapter) { for (size_t i = 0; i < subscription_count; ++i) { struct aws_mqtt_topic_subscription *subscription = subscriptions + i; /* check qos */ if (subscription->qos < 0 || subscription->qos > AWS_MQTT_QOS_EXACTLY_ONCE) { AWS_LOGF_ERROR( AWS_LS_MQTT5_TO_MQTT3_ADAPTER, "id=%p: mqtt3-to-5-adapter, invalid qos for subscribe", (void *)adapter); return aws_raise_error(AWS_ERROR_MQTT_INVALID_QOS); } /* check topic */ if (!aws_mqtt_is_valid_topic_filter(&subscription->topic)) { AWS_LOGF_ERROR( AWS_LS_MQTT5_TO_MQTT3_ADAPTER, "id=%p: mqtt3-to-5-adapter, invalid topic filter for subscribe", (void *)adapter); return aws_raise_error(AWS_ERROR_MQTT_INVALID_TOPIC); } } return AWS_OP_SUCCESS; } static int s_aws_mqtt5_to_mqtt3_adapter_build_subscribe( struct aws_mqtt5_to_mqtt3_adapter_operation_subscribe *subscribe_op, size_t subscription_count, struct aws_mqtt_topic_subscription *subscriptions) { struct aws_allocator *allocator = subscribe_op->base.allocator; /* make persistent adapter sub array */ aws_array_list_init_dynamic( &subscribe_op->subscriptions, allocator, subscription_count, sizeof(struct aws_mqtt_subscription_set_subscription_record *)); for (size_t i = 0; i < subscription_count; ++i) { struct aws_mqtt_topic_subscription *subscription_options = &subscriptions[i]; struct aws_mqtt_subscription_set_subscription_options subscription_record_options = { .topic_filter = subscription_options->topic, .qos = (enum aws_mqtt5_qos)subscription_options->qos, .on_publish_received = subscription_options->on_publish, .callback_user_data = subscription_options->on_publish_ud, .on_cleanup = subscription_options->on_cleanup, }; struct aws_mqtt_subscription_set_subscription_record *record = aws_mqtt_subscription_set_subscription_record_new(allocator, &subscription_record_options); aws_array_list_push_back(&subscribe_op->subscriptions, &record); } /* make temp mqtt5 subscription view array */ AWS_VARIABLE_LENGTH_ARRAY(struct aws_mqtt5_subscription_view, mqtt5_subscription_buffer, subscription_count); struct aws_mqtt5_subscription_view *subscription_ptr = mqtt5_subscription_buffer; for (size_t i = 0; i < subscription_count; ++i) { struct aws_mqtt5_subscription_view *subscription = subscription_ptr + i; AWS_ZERO_STRUCT(*subscription); subscription->topic_filter = subscriptions[i].topic; subscription->qos = (enum aws_mqtt5_qos)subscriptions[i].qos; } struct aws_mqtt5_packet_subscribe_view subscribe_view = { .subscriptions = subscription_ptr, .subscription_count = subscription_count, }; struct aws_mqtt5_subscribe_completion_options subscribe_completion_options = { .completion_callback = s_aws_mqtt5_to_mqtt3_adapter_subscribe_completion_fn, .completion_user_data = subscribe_op, }; subscribe_op->subscribe_op = aws_mqtt5_operation_subscribe_new( allocator, subscribe_op->base.adapter->client, &subscribe_view, &subscribe_completion_options); if (subscribe_op->subscribe_op == NULL) { /* subscribe options validation will have been raised as the error */ return AWS_OP_ERR; } return AWS_OP_SUCCESS; } static void s_fail_subscribe(void *impl, int error_code) { struct aws_mqtt5_to_mqtt3_adapter_operation_subscribe *subscribe_op = impl; s_aws_mqtt5_to_mqtt3_adapter_subscribe_completion_helper(subscribe_op, NULL, error_code); } static struct aws_mqtt5_to_mqtt3_adapter_operation_vtable s_subscribe_vtable = { .fail_fn = s_fail_subscribe, }; struct aws_mqtt5_to_mqtt3_adapter_operation_subscribe *aws_mqtt5_to_mqtt3_adapter_operation_new_subscribe( struct aws_allocator *allocator, const struct aws_mqtt5_to_mqtt3_adapter_subscribe_options *options, struct aws_mqtt_client_connection_5_impl *adapter) { if (s_validate_adapter_subscribe_options(options->subscription_count, options->subscriptions, adapter)) { return NULL; } struct aws_mqtt5_to_mqtt3_adapter_operation_subscribe *subscribe_op = aws_mem_calloc(allocator, 1, sizeof(struct aws_mqtt5_to_mqtt3_adapter_operation_subscribe)); subscribe_op->base.allocator = allocator; aws_ref_count_init(&subscribe_op->base.ref_count, subscribe_op, s_adapter_subscribe_operation_destroy); subscribe_op->base.impl = subscribe_op; subscribe_op->base.vtable = &s_subscribe_vtable; subscribe_op->base.type = AWS_MQTT5TO3_AOT_SUBSCRIBE; subscribe_op->base.adapter = options->adapter; subscribe_op->base.holding_adapter_ref = false; /* * If we're a regular subscribe, build the mqtt5 operation now. Otherwise, we have to wait until * we're on the event loop thread and it's safe to query the subscription set. */ if (options->subscription_count > 0) { if (s_aws_mqtt5_to_mqtt3_adapter_build_subscribe( subscribe_op, options->subscription_count, options->subscriptions)) { goto error; } } subscribe_op->on_suback = options->on_suback; subscribe_op->on_suback_user_data = options->on_suback_user_data; subscribe_op->on_multi_suback = options->on_multi_suback; subscribe_op->on_multi_suback_user_data = options->on_multi_suback_user_data; return subscribe_op; error: aws_mqtt5_to_mqtt3_adapter_operation_release(&subscribe_op->base); return NULL; } static int s_aws_mqtt5_to_mqtt3_adapter_build_resubscribe( struct aws_mqtt5_to_mqtt3_adapter_operation_subscribe *subscribe_op, struct aws_array_list *full_subscriptions) { size_t subscription_count = aws_array_list_length(full_subscriptions); AWS_VARIABLE_LENGTH_ARRAY(struct aws_mqtt_topic_subscription, multi_sub_subscriptions, subscription_count); for (size_t i = 0; i < subscription_count; ++i) { struct aws_mqtt_subscription_set_subscription_options *existing_subscription = NULL; aws_array_list_get_at_ptr(full_subscriptions, (void **)&existing_subscription, i); multi_sub_subscriptions[i].topic = existing_subscription->topic_filter; multi_sub_subscriptions[i].qos = (enum aws_mqtt_qos)existing_subscription->qos; multi_sub_subscriptions[i].on_publish = existing_subscription->on_publish_received; multi_sub_subscriptions[i].on_cleanup = existing_subscription->on_cleanup; multi_sub_subscriptions[i].on_publish_ud = existing_subscription->callback_user_data; } return s_aws_mqtt5_to_mqtt3_adapter_build_subscribe(subscribe_op, subscription_count, multi_sub_subscriptions); } void s_adapter_subscribe_submission_fn(struct aws_task *task, void *arg, enum aws_task_status status) { (void)task; struct aws_mqtt5_to_mqtt3_adapter_operation_subscribe *operation = arg; struct aws_mqtt_client_connection_5_impl *adapter = operation->base.adapter; struct aws_array_list full_subscriptions; AWS_ZERO_STRUCT(full_subscriptions); /* If we're a re-subscribe, it's now safe to build the subscription set and MQTT5 subscribe op */ if (operation->subscribe_op == NULL) { aws_mqtt_subscription_set_get_subscriptions(adapter->subscriptions, &full_subscriptions); size_t subscription_count = aws_array_list_length(&full_subscriptions); if (subscription_count == 0 || s_aws_mqtt5_to_mqtt3_adapter_build_resubscribe(operation, &full_subscriptions)) { /* There's either nothing to do (no subscriptions) or we failed to build the op (should never happen) */ int error_code = aws_last_error(); if (subscription_count == 0) { error_code = AWS_ERROR_MQTT_CONNECTION_RESUBSCRIBE_NO_TOPICS; } if (operation->on_multi_suback) { (*operation->on_multi_suback)( &adapter->base, operation->base.id, NULL, error_code, operation->on_multi_suback_user_data); } /* * Remove the persistent ref represented by being seated in the incomplete operations table. * The other (transient) ref gets released at the end of the function. */ aws_mqtt5_to_mqtt3_adapter_operation_table_remove_operation( &adapter->operational_state, operation->base.id); goto complete; } } size_t subscription_count = aws_array_list_length(&operation->subscriptions); for (size_t i = 0; i < subscription_count; ++i) { struct aws_mqtt_subscription_set_subscription_record *record = NULL; aws_array_list_get_at(&operation->subscriptions, &record, i); aws_mqtt_subscription_set_add_subscription(adapter->subscriptions, &record->subscription_view); } aws_mqtt5_client_submit_operation_internal( adapter->client, &operation->subscribe_op->base, status != AWS_TASK_STATUS_RUN_READY); complete: aws_array_list_clean_up(&full_subscriptions); /* * The operation has been submitted in-thread. We can release the transient refs (operation, adapter) needed to * keep things alive during the handover */ s_aws_mqtt5_to_mqtt3_adapter_operation_release_cross_thread_refs(&operation->base); } static uint16_t s_aws_mqtt_client_connection_5_subscribe( void *impl, const struct aws_byte_cursor *topic_filter, enum aws_mqtt_qos qos, aws_mqtt_client_publish_received_fn *on_publish, void *on_publish_ud, aws_mqtt_userdata_cleanup_fn *on_ud_cleanup, aws_mqtt_suback_fn *on_suback, void *on_suback_user_data) { struct aws_mqtt_client_connection_5_impl *adapter = impl; AWS_LOGF_DEBUG( AWS_LS_MQTT5_TO_MQTT3_ADAPTER, "id=%p: mqtt3-to-5-adapter, single-topic subscribe API invoked", (void *)adapter); struct aws_mqtt_topic_subscription subscription = { .topic = *topic_filter, .qos = qos, .on_publish = on_publish, .on_cleanup = on_ud_cleanup, .on_publish_ud = on_publish_ud, }; struct aws_mqtt5_to_mqtt3_adapter_subscribe_options subscribe_options = { .adapter = adapter, .subscriptions = &subscription, .subscription_count = 1, .on_suback = on_suback, .on_suback_user_data = on_suback_user_data, }; struct aws_mqtt5_to_mqtt3_adapter_operation_subscribe *operation = aws_mqtt5_to_mqtt3_adapter_operation_new_subscribe(adapter->allocator, &subscribe_options, adapter); if (operation == NULL) { return 0; } if (aws_mqtt5_to_mqtt3_adapter_operation_table_add_operation(&adapter->operational_state, &operation->base)) { goto error; } uint16_t synthetic_id = operation->base.id; /* * While in-transit to the adapter event loop, we take refs to both the operation and the adapter so that we * are guaranteed they are still alive when the cross-thread submission task is run. */ s_aws_mqtt5_to_mqtt3_adapter_operation_acquire_cross_thread_refs(&operation->base); aws_task_init( &operation->base.submission_task, s_adapter_subscribe_submission_fn, operation, "Mqtt5ToMqtt3AdapterSubscribeSubmission"); aws_event_loop_schedule_task_now(adapter->loop, &operation->base.submission_task); return synthetic_id; error: ; int error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_MQTT5_TO_MQTT3_ADAPTER, "id=%p: mqtt3-to-5-adapter, single-topic subscribe failed synchronously, error code %d(%s)", (void *)adapter, error_code, aws_error_debug_str(error_code)); aws_mqtt5_to_mqtt3_adapter_operation_release(&operation->base); return 0; } static uint16_t s_aws_mqtt_client_connection_5_subscribe_multiple( void *impl, const struct aws_array_list *topic_filters, aws_mqtt_suback_multi_fn *on_suback, void *on_suback_user_data) { struct aws_mqtt_client_connection_5_impl *adapter = impl; AWS_LOGF_DEBUG( AWS_LS_MQTT5_TO_MQTT3_ADAPTER, "id=%p: mqtt3-to-5-adapter, multi-topic subscribe API invoked", (void *)adapter); if (topic_filters == NULL || aws_array_list_length(topic_filters) == 0) { AWS_LOGF_ERROR( AWS_LS_MQTT5_TO_MQTT3_ADAPTER, "id=%p: mqtt3-to-5-adapter multi-topic subscribe empty", (void *)adapter); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return 0; } struct aws_mqtt_topic_subscription *subscriptions = topic_filters->data; struct aws_mqtt5_to_mqtt3_adapter_subscribe_options subscribe_options = { .adapter = adapter, .subscriptions = subscriptions, .subscription_count = aws_array_list_length(topic_filters), .on_multi_suback = on_suback, .on_multi_suback_user_data = on_suback_user_data, }; struct aws_mqtt5_to_mqtt3_adapter_operation_subscribe *operation = aws_mqtt5_to_mqtt3_adapter_operation_new_subscribe(adapter->allocator, &subscribe_options, adapter); if (operation == NULL) { int error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_MQTT5_TO_MQTT3_ADAPTER, "id=%p: mqtt3-to-5-adapter, multi-topic subscribe operation creation failed, error code %d(%s)", (void *)adapter, error_code, aws_error_debug_str(error_code)); return 0; } if (aws_mqtt5_to_mqtt3_adapter_operation_table_add_operation(&adapter->operational_state, &operation->base)) { goto error; } uint16_t synthetic_id = operation->base.id; /* * While in-transit to the adapter event loop, we take refs to both the operation and the adapter so that we * are guaranteed they are still alive when the cross-thread submission task is run. */ s_aws_mqtt5_to_mqtt3_adapter_operation_acquire_cross_thread_refs(&operation->base); aws_task_init( &operation->base.submission_task, s_adapter_subscribe_submission_fn, operation, "Mqtt5ToMqtt3AdapterSubscribeMultipleSubmission"); aws_event_loop_schedule_task_now(adapter->loop, &operation->base.submission_task); return synthetic_id; error: ; int error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_MQTT5_TO_MQTT3_ADAPTER, "id=%p: mqtt3-to-5-adapter, multi-topic subscribe failed, error code %d(%s)", (void *)adapter, error_code, aws_error_debug_str(error_code)); aws_mqtt5_to_mqtt3_adapter_operation_release(&operation->base); return 0; } static void s_adapter_unsubscribe_operation_destroy(void *context) { struct aws_mqtt5_to_mqtt3_adapter_operation_base *operation = context; if (operation == NULL) { return; } struct aws_mqtt5_to_mqtt3_adapter_operation_unsubscribe *unsubscribe_op = operation->impl; aws_byte_buf_clean_up(&unsubscribe_op->topic_filter); struct aws_mqtt_client_connection_5_impl *adapter_to_release = NULL; if (unsubscribe_op->base.holding_adapter_ref) { adapter_to_release = unsubscribe_op->base.adapter; } /* We're going away before our MQTT5 operation, make sure it doesn't try to call us back when it completes */ unsubscribe_op->unsubscribe_op->completion_options.completion_callback = NULL; unsubscribe_op->unsubscribe_op->completion_options.completion_user_data = NULL; aws_mqtt5_operation_release(&unsubscribe_op->unsubscribe_op->base); aws_mem_release(operation->allocator, operation); if (adapter_to_release != NULL) { aws_ref_count_release(&adapter_to_release->internal_refs); } } static void s_aws_mqtt5_to_mqtt3_adapter_unsubscribe_completion_fn( const struct aws_mqtt5_packet_unsuback_view *unsuback, int error_code, void *complete_ctx) { (void)unsuback; struct aws_mqtt5_to_mqtt3_adapter_operation_unsubscribe *unsubscribe_op = complete_ctx; if (unsubscribe_op->on_unsuback != NULL) { (*unsubscribe_op->on_unsuback)( &unsubscribe_op->base.adapter->base, unsubscribe_op->base.id, error_code, unsubscribe_op->on_unsuback_user_data); } aws_mqtt5_to_mqtt3_adapter_operation_table_remove_operation( &unsubscribe_op->base.adapter->operational_state, unsubscribe_op->base.id); } static void s_fail_unsubscribe(void *impl, int error_code) { struct aws_mqtt5_to_mqtt3_adapter_operation_unsubscribe *unsubscribe_op = impl; if (unsubscribe_op->on_unsuback != NULL) { (*unsubscribe_op->on_unsuback)( &unsubscribe_op->base.adapter->base, unsubscribe_op->base.id, error_code, unsubscribe_op->on_unsuback_user_data); } } static struct aws_mqtt5_to_mqtt3_adapter_operation_vtable s_unsubscribe_vtable = { .fail_fn = s_fail_unsubscribe, }; struct aws_mqtt5_to_mqtt3_adapter_operation_unsubscribe *aws_mqtt5_to_mqtt3_adapter_operation_new_unsubscribe( struct aws_allocator *allocator, const struct aws_mqtt5_to_mqtt3_adapter_unsubscribe_options *options) { struct aws_mqtt5_to_mqtt3_adapter_operation_unsubscribe *unsubscribe_op = aws_mem_calloc(allocator, 1, sizeof(struct aws_mqtt5_to_mqtt3_adapter_operation_unsubscribe)); unsubscribe_op->base.allocator = allocator; aws_ref_count_init(&unsubscribe_op->base.ref_count, unsubscribe_op, s_adapter_unsubscribe_operation_destroy); unsubscribe_op->base.impl = unsubscribe_op; unsubscribe_op->base.vtable = &s_unsubscribe_vtable; unsubscribe_op->base.type = AWS_MQTT5TO3_AOT_UNSUBSCRIBE; unsubscribe_op->base.adapter = options->adapter; unsubscribe_op->base.holding_adapter_ref = false; struct aws_mqtt5_packet_unsubscribe_view unsubscribe_view = { .topic_filters = &options->topic_filter, .topic_filter_count = 1, }; struct aws_mqtt5_unsubscribe_completion_options unsubscribe_completion_options = { .completion_callback = s_aws_mqtt5_to_mqtt3_adapter_unsubscribe_completion_fn, .completion_user_data = unsubscribe_op, }; unsubscribe_op->unsubscribe_op = aws_mqtt5_operation_unsubscribe_new( allocator, options->adapter->client, &unsubscribe_view, &unsubscribe_completion_options); if (unsubscribe_op->unsubscribe_op == NULL) { goto error; } unsubscribe_op->on_unsuback = options->on_unsuback; unsubscribe_op->on_unsuback_user_data = options->on_unsuback_user_data; aws_byte_buf_init_copy_from_cursor(&unsubscribe_op->topic_filter, allocator, options->topic_filter); return unsubscribe_op; error: aws_mqtt5_to_mqtt3_adapter_operation_release(&unsubscribe_op->base); return NULL; } void s_adapter_unsubscribe_submission_fn(struct aws_task *task, void *arg, enum aws_task_status status) { (void)task; struct aws_mqtt5_to_mqtt3_adapter_operation_unsubscribe *operation = arg; struct aws_mqtt_client_connection_5_impl *adapter = operation->base.adapter; aws_mqtt_subscription_set_remove_subscription( adapter->subscriptions, aws_byte_cursor_from_buf(&operation->topic_filter)); aws_mqtt5_client_submit_operation_internal( adapter->client, &operation->unsubscribe_op->base, status != AWS_TASK_STATUS_RUN_READY); /* * The operation has been submitted in-thread. We can release the transient refs (operation, adapter) needed to * keep things alive during the handover */ s_aws_mqtt5_to_mqtt3_adapter_operation_release_cross_thread_refs(&operation->base); } static uint16_t s_aws_mqtt_client_connection_5_unsubscribe( void *impl, const struct aws_byte_cursor *topic_filter, aws_mqtt_op_complete_fn *on_unsuback, void *on_unsuback_user_data) { struct aws_mqtt_client_connection_5_impl *adapter = impl; AWS_LOGF_DEBUG(AWS_LS_MQTT5_TO_MQTT3_ADAPTER, "id=%p: mqtt3-to-5-adapter, unsubscribe called", (void *)adapter); if (!aws_mqtt_is_valid_topic_filter(topic_filter)) { AWS_LOGF_ERROR( AWS_LS_MQTT5_TO_MQTT3_ADAPTER, "id=%p: mqtt3-to-5-adapter, unsubscribe failed, invalid topic filter", (void *)adapter); aws_raise_error(AWS_ERROR_MQTT_INVALID_TOPIC); return 0; } struct aws_mqtt5_to_mqtt3_adapter_unsubscribe_options unsubscribe_options = { .adapter = adapter, .topic_filter = *topic_filter, .on_unsuback = on_unsuback, .on_unsuback_user_data = on_unsuback_user_data, }; struct aws_mqtt5_to_mqtt3_adapter_operation_unsubscribe *operation = aws_mqtt5_to_mqtt3_adapter_operation_new_unsubscribe(adapter->allocator, &unsubscribe_options); if (operation == NULL) { int error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_MQTT5_TO_MQTT3_ADAPTER, "id=%p: mqtt3-to-5-adapter, unsubscribe operation creation failed, error code %d(%s)", (void *)adapter, error_code, aws_error_debug_str(error_code)); return 0; } if (aws_mqtt5_to_mqtt3_adapter_operation_table_add_operation(&adapter->operational_state, &operation->base)) { goto error; } uint16_t synthetic_id = operation->base.id; /* * While in-transit to the adapter event loop, we take refs to both the operation and the adapter so that we * are guaranteed they are still alive when the cross-thread submission task is run. */ s_aws_mqtt5_to_mqtt3_adapter_operation_acquire_cross_thread_refs(&operation->base); aws_task_init( &operation->base.submission_task, s_adapter_unsubscribe_submission_fn, operation, "Mqtt5ToMqtt3AdapterUnsubscribeSubmission"); aws_event_loop_schedule_task_now(adapter->loop, &operation->base.submission_task); return synthetic_id; error: ; int error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_MQTT5_TO_MQTT3_ADAPTER, "id=%p: mqtt3-to-5-adapter, unsubscribe failed, error code %d(%s)", (void *)adapter, error_code, aws_error_debug_str(error_code)); aws_mqtt5_to_mqtt3_adapter_operation_release(&operation->base); return 0; } static int s_aws_mqtt_client_connection_5_reconnect( void *impl, aws_mqtt_client_on_connection_complete_fn *on_connection_complete, void *userdata) { (void)impl; (void)on_connection_complete; (void)userdata; /* DEPRECATED, connection will reconnect automatically now. */ AWS_LOGF_ERROR(AWS_LS_MQTT5_TO_MQTT3_ADAPTER, "aws_mqtt_client_connection_reconnect has been DEPRECATED."); return aws_raise_error(AWS_ERROR_UNSUPPORTED_OPERATION); } static int s_aws_mqtt_client_connection_5_get_stats( void *impl, struct aws_mqtt_connection_operation_statistics *stats) { struct aws_mqtt_client_connection_5_impl *adapter = impl; // Error checking if (!adapter) { AWS_LOGF_ERROR( AWS_LS_MQTT5_TO_MQTT3_ADAPTER, "Invalid MQTT3-to-5 adapter used when trying to get operation statistics"); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } AWS_LOGF_DEBUG(AWS_LS_MQTT5_TO_MQTT3_ADAPTER, "id=%p: mqtt3-to-5-adapter, get_stats invoked", (void *)adapter); if (!stats) { AWS_LOGF_ERROR( AWS_LS_MQTT5_TO_MQTT3_ADAPTER, "id=%p: Invalid MQTT311 statistics struct used when trying to get operation statistics", (void *)adapter); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } struct aws_mqtt5_client_operation_statistics mqtt5_stats; AWS_ZERO_STRUCT(mqtt5_stats); aws_mqtt5_client_get_stats(adapter->client, &mqtt5_stats); stats->incomplete_operation_count = mqtt5_stats.incomplete_operation_count; stats->incomplete_operation_size = mqtt5_stats.incomplete_operation_size; stats->unacked_operation_count = mqtt5_stats.unacked_operation_count; stats->unacked_operation_size = mqtt5_stats.unacked_operation_size; return AWS_OP_SUCCESS; } static uint16_t s_aws_mqtt_5_resubscribe_existing_topics( void *impl, aws_mqtt_suback_multi_fn *on_suback, void *on_suback_user_data) { struct aws_mqtt_client_connection_5_impl *adapter = impl; AWS_LOGF_DEBUG( AWS_LS_MQTT5_TO_MQTT3_ADAPTER, "id=%p: mqtt3-to-5-adapter, resubscribe_existing_topics invoked", (void *)adapter); struct aws_mqtt5_to_mqtt3_adapter_subscribe_options subscribe_options = { .adapter = adapter, .subscriptions = NULL, .subscription_count = 0, .on_multi_suback = on_suback, .on_multi_suback_user_data = on_suback_user_data, }; struct aws_mqtt5_to_mqtt3_adapter_operation_subscribe *operation = aws_mqtt5_to_mqtt3_adapter_operation_new_subscribe(adapter->allocator, &subscribe_options, adapter); if (operation == NULL) { int error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_MQTT5_TO_MQTT3_ADAPTER, "id=%p: mqtt3-to-5-adapter, resubscribe_existing_topics failed on operation creation, error code %d(%s)", (void *)adapter, error_code, aws_error_debug_str(error_code)); return 0; } if (aws_mqtt5_to_mqtt3_adapter_operation_table_add_operation(&adapter->operational_state, &operation->base)) { goto error; } uint16_t synthetic_id = operation->base.id; /* * While in-transit to the adapter event loop, we take refs to both the operation and the adapter so that we * are guaranteed they are still alive when the cross-thread submission task is run. */ s_aws_mqtt5_to_mqtt3_adapter_operation_acquire_cross_thread_refs(&operation->base); aws_task_init( &operation->base.submission_task, s_adapter_subscribe_submission_fn, operation, "Mqtt5ToMqtt3AdapterSubscribeResubscribe"); aws_event_loop_schedule_task_now(adapter->loop, &operation->base.submission_task); return synthetic_id; error: ; int error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_MQTT5_TO_MQTT3_ADAPTER, "id=%p: mqtt3-to-5-adapter, resubscribe_existing_topics failed, error code %d(%s)", (void *)adapter, error_code, aws_error_debug_str(error_code)); aws_mqtt5_to_mqtt3_adapter_operation_release(&operation->base); return 0; } static struct aws_mqtt_client_connection_vtable s_aws_mqtt_client_connection_5_vtable = { .acquire_fn = s_aws_mqtt_client_connection_5_acquire, .release_fn = s_aws_mqtt_client_connection_5_release, .set_will_fn = s_aws_mqtt_client_connection_5_set_will, .set_login_fn = s_aws_mqtt_client_connection_5_set_login, .use_websockets_fn = s_aws_mqtt_client_connection_5_use_websockets, .set_http_proxy_options_fn = s_aws_mqtt_client_connection_5_set_http_proxy_options, .set_host_resolution_options_fn = s_aws_mqtt_client_connection_5_set_host_resolution_options, .set_reconnect_timeout_fn = s_aws_mqtt_client_connection_5_set_reconnect_timeout, .set_connection_result_handlers = s_aws_mqtt_client_connection_5_set_connection_result_handlers, .set_connection_interruption_handlers_fn = s_aws_mqtt_client_connection_5_set_interruption_handlers, .set_connection_closed_handler_fn = s_aws_mqtt_client_connection_5_set_on_closed_handler, .set_connection_termination_handler_fn = s_aws_mqtt_client_connection_5_set_termination_handler, .set_on_any_publish_handler_fn = s_aws_mqtt_client_connection_5_set_on_any_publish_handler, .connect_fn = s_aws_mqtt_client_connection_5_connect, .reconnect_fn = s_aws_mqtt_client_connection_5_reconnect, .disconnect_fn = s_aws_mqtt_client_connection_5_disconnect, .subscribe_multiple_fn = s_aws_mqtt_client_connection_5_subscribe_multiple, .subscribe_fn = s_aws_mqtt_client_connection_5_subscribe, .resubscribe_existing_topics_fn = s_aws_mqtt_5_resubscribe_existing_topics, .unsubscribe_fn = s_aws_mqtt_client_connection_5_unsubscribe, .publish_fn = s_aws_mqtt_client_connection_5_publish, .get_stats_fn = s_aws_mqtt_client_connection_5_get_stats, }; static struct aws_mqtt_client_connection_vtable *s_aws_mqtt_client_connection_5_vtable_ptr = &s_aws_mqtt_client_connection_5_vtable; struct aws_mqtt_client_connection *aws_mqtt_client_connection_new_from_mqtt5_client(struct aws_mqtt5_client *client) { struct aws_allocator *allocator = client->allocator; struct aws_mqtt_client_connection_5_impl *adapter = aws_mem_calloc(allocator, 1, sizeof(struct aws_mqtt_client_connection_5_impl)); adapter->allocator = allocator; adapter->base.vtable = s_aws_mqtt_client_connection_5_vtable_ptr; adapter->base.impl = adapter; adapter->client = aws_mqtt5_client_acquire(client); adapter->loop = client->loop; adapter->adapter_state = AWS_MQTT_AS_STAY_DISCONNECTED; aws_ref_count_init(&adapter->external_refs, adapter, s_aws_mqtt5_to_mqtt3_adapter_on_zero_external_refs); aws_ref_count_init(&adapter->internal_refs, adapter, s_aws_mqtt5_to_mqtt3_adapter_on_zero_internal_refs); aws_mqtt5_to_mqtt3_adapter_operation_table_init(&adapter->operational_state, allocator); adapter->subscriptions = aws_mqtt_subscription_set_new(allocator); struct aws_mqtt5_listener_config listener_config = { .client = client, .listener_callbacks = { .listener_publish_received_handler = s_aws_mqtt5_listener_publish_received_adapter, .listener_publish_received_handler_user_data = adapter, .lifecycle_event_handler = s_aws_mqtt5_to_mqtt3_adapter_lifecycle_handler, .lifecycle_event_handler_user_data = adapter, }, .termination_callback = s_aws_mqtt5_to_mqtt3_adapter_on_listener_detached, .termination_callback_user_data = adapter, }; adapter->listener = aws_mqtt5_listener_new(allocator, &listener_config); return &adapter->base; } #define DEFAULT_MQTT_ADAPTER_OPERATION_TABLE_SIZE 100 void aws_mqtt5_to_mqtt3_adapter_operation_table_init( struct aws_mqtt5_to_mqtt3_adapter_operation_table *table, struct aws_allocator *allocator) { aws_mutex_init(&table->lock); aws_hash_table_init( &table->operations, allocator, DEFAULT_MQTT_ADAPTER_OPERATION_TABLE_SIZE, aws_mqtt_hash_uint16_t, aws_mqtt_compare_uint16_t_eq, NULL, NULL); table->next_id = 1; } static int s_adapter_operation_fail(void *context, struct aws_hash_element *operation_element) { (void)context; struct aws_mqtt5_to_mqtt3_adapter_operation_base *operation = operation_element->value; (*operation->vtable->fail_fn)(operation->impl, AWS_ERROR_MQTT_CONNECTION_DESTROYED); return AWS_COMMON_HASH_TABLE_ITER_CONTINUE; } static int s_adapter_operation_clean_up(void *context, struct aws_hash_element *operation_element) { (void)context; struct aws_mqtt5_to_mqtt3_adapter_operation_base *operation = operation_element->value; aws_mqtt5_to_mqtt3_adapter_operation_release(operation); return AWS_COMMON_HASH_TABLE_ITER_CONTINUE; } void aws_mqtt5_to_mqtt3_adapter_operation_table_clean_up(struct aws_mqtt5_to_mqtt3_adapter_operation_table *table) { aws_hash_table_foreach(&table->operations, s_adapter_operation_fail, table); aws_hash_table_foreach(&table->operations, s_adapter_operation_clean_up, table); aws_hash_table_clean_up(&table->operations); aws_mutex_clean_up(&table->lock); } static uint16_t s_next_adapter_id(uint16_t current_id) { if (++current_id == 0) { current_id = 1; } return current_id; } int aws_mqtt5_to_mqtt3_adapter_operation_table_add_operation( struct aws_mqtt5_to_mqtt3_adapter_operation_table *table, struct aws_mqtt5_to_mqtt3_adapter_operation_base *operation) { operation->id = 0; aws_mutex_lock(&table->lock); uint16_t current_id = table->next_id; struct aws_hash_element *elem = NULL; for (uint16_t i = 0; i < UINT16_MAX; ++i) { aws_hash_table_find(&table->operations, ¤t_id, &elem); if (elem == NULL) { operation->id = current_id; table->next_id = s_next_adapter_id(current_id); if (aws_hash_table_put(&table->operations, &operation->id, operation, NULL)) { operation->id = 0; } goto done; } current_id = s_next_adapter_id(current_id); } done: aws_mutex_unlock(&table->lock); return (operation->id != 0) ? AWS_OP_SUCCESS : aws_raise_error(AWS_ERROR_MQTT_QUEUE_FULL); } void aws_mqtt5_to_mqtt3_adapter_operation_table_remove_operation( struct aws_mqtt5_to_mqtt3_adapter_operation_table *table, uint16_t operation_id) { struct aws_hash_element existing_element; AWS_ZERO_STRUCT(existing_element); aws_mutex_lock(&table->lock); aws_hash_table_remove(&table->operations, &operation_id, &existing_element, NULL); aws_mutex_unlock(&table->lock); struct aws_mqtt5_to_mqtt3_adapter_operation_base *operation = existing_element.value; if (operation != NULL) { aws_mqtt5_to_mqtt3_adapter_operation_release(operation); } } struct aws_mqtt5_to_mqtt3_adapter_operation_base *aws_mqtt5_to_mqtt3_adapter_operation_release( struct aws_mqtt5_to_mqtt3_adapter_operation_base *operation) { if (operation != NULL) { aws_ref_count_release(&operation->ref_count); } return NULL; } struct aws_mqtt5_to_mqtt3_adapter_operation_base *aws_mqtt5_to_mqtt3_adapter_operation_acquire( struct aws_mqtt5_to_mqtt3_adapter_operation_base *operation) { if (operation != NULL) { aws_ref_count_acquire(&operation->ref_count); } return operation; } aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/source/v5/mqtt5_topic_alias.c000066400000000000000000000513501456575232400256400ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include int aws_mqtt5_inbound_topic_alias_resolver_init( struct aws_mqtt5_inbound_topic_alias_resolver *resolver, struct aws_allocator *allocator) { AWS_ZERO_STRUCT(*resolver); resolver->allocator = allocator; if (aws_array_list_init_dynamic(&resolver->topic_aliases, allocator, 0, sizeof(struct aws_string *))) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } static void s_release_aliases(struct aws_mqtt5_inbound_topic_alias_resolver *resolver) { if (!aws_array_list_is_valid(&resolver->topic_aliases)) { return; } size_t cache_size = aws_array_list_length(&resolver->topic_aliases); for (size_t i = 0; i < cache_size; ++i) { struct aws_string *topic = NULL; aws_array_list_get_at(&resolver->topic_aliases, &topic, i); aws_string_destroy(topic); } } void aws_mqtt5_inbound_topic_alias_resolver_clean_up(struct aws_mqtt5_inbound_topic_alias_resolver *resolver) { s_release_aliases(resolver); aws_array_list_clean_up(&resolver->topic_aliases); } int aws_mqtt5_inbound_topic_alias_resolver_reset( struct aws_mqtt5_inbound_topic_alias_resolver *resolver, uint16_t cache_size) { aws_mqtt5_inbound_topic_alias_resolver_clean_up(resolver); AWS_ZERO_STRUCT(resolver->topic_aliases); if (aws_array_list_init_dynamic( &resolver->topic_aliases, resolver->allocator, cache_size, sizeof(struct aws_string *))) { return AWS_OP_ERR; } for (size_t i = 0; i < cache_size; ++i) { struct aws_string *topic = NULL; aws_array_list_push_back(&resolver->topic_aliases, &topic); } return AWS_OP_SUCCESS; } int aws_mqtt5_inbound_topic_alias_resolver_resolve_alias( struct aws_mqtt5_inbound_topic_alias_resolver *resolver, uint16_t alias, struct aws_byte_cursor *topic_out) { size_t cache_size = aws_array_list_length(&resolver->topic_aliases); if (alias > cache_size || alias == 0) { return aws_raise_error(AWS_ERROR_MQTT5_INVALID_INBOUND_TOPIC_ALIAS); } size_t alias_index = alias - 1; struct aws_string *topic = NULL; aws_array_list_get_at(&resolver->topic_aliases, &topic, alias_index); if (topic == NULL) { return aws_raise_error(AWS_ERROR_MQTT5_INVALID_INBOUND_TOPIC_ALIAS); } *topic_out = aws_byte_cursor_from_string(topic); return AWS_OP_SUCCESS; } int aws_mqtt5_inbound_topic_alias_resolver_register_alias( struct aws_mqtt5_inbound_topic_alias_resolver *resolver, uint16_t alias, struct aws_byte_cursor topic) { size_t cache_size = aws_array_list_length(&resolver->topic_aliases); if (alias > cache_size || alias == 0) { return aws_raise_error(AWS_ERROR_MQTT5_INVALID_INBOUND_TOPIC_ALIAS); } struct aws_string *new_entry = aws_string_new_from_cursor(resolver->allocator, &topic); if (new_entry == NULL) { return AWS_OP_ERR; } size_t alias_index = alias - 1; struct aws_string *existing_entry = NULL; aws_array_list_get_at(&resolver->topic_aliases, &existing_entry, alias_index); aws_string_destroy(existing_entry); aws_array_list_set_at(&resolver->topic_aliases, &new_entry, alias_index); return AWS_OP_SUCCESS; } /****************************************************************************************************************/ struct aws_mqtt5_outbound_topic_alias_resolver_vtable { void (*destroy_fn)(struct aws_mqtt5_outbound_topic_alias_resolver *); int (*reset_fn)(struct aws_mqtt5_outbound_topic_alias_resolver *, uint16_t); int (*resolve_outbound_publish_fn)( struct aws_mqtt5_outbound_topic_alias_resolver *, const struct aws_mqtt5_packet_publish_view *, uint16_t *, struct aws_byte_cursor *); }; struct aws_mqtt5_outbound_topic_alias_resolver { struct aws_allocator *allocator; struct aws_mqtt5_outbound_topic_alias_resolver_vtable *vtable; void *impl; }; static struct aws_mqtt5_outbound_topic_alias_resolver *s_aws_mqtt5_outbound_topic_alias_resolver_disabled_new( struct aws_allocator *allocator); static struct aws_mqtt5_outbound_topic_alias_resolver *s_aws_mqtt5_outbound_topic_alias_resolver_lru_new( struct aws_allocator *allocator); static struct aws_mqtt5_outbound_topic_alias_resolver *s_aws_mqtt5_outbound_topic_alias_resolver_manual_new( struct aws_allocator *allocator); struct aws_mqtt5_outbound_topic_alias_resolver *aws_mqtt5_outbound_topic_alias_resolver_new( struct aws_allocator *allocator, enum aws_mqtt5_client_outbound_topic_alias_behavior_type outbound_alias_behavior) { switch (aws_mqtt5_outbound_topic_alias_behavior_type_to_non_default(outbound_alias_behavior)) { case AWS_MQTT5_COTABT_MANUAL: return s_aws_mqtt5_outbound_topic_alias_resolver_manual_new(allocator); case AWS_MQTT5_COTABT_LRU: return s_aws_mqtt5_outbound_topic_alias_resolver_lru_new(allocator); case AWS_MQTT5_COTABT_DISABLED: return s_aws_mqtt5_outbound_topic_alias_resolver_disabled_new(allocator); default: return NULL; } } void aws_mqtt5_outbound_topic_alias_resolver_destroy(struct aws_mqtt5_outbound_topic_alias_resolver *resolver) { if (resolver == NULL) { return; } (*resolver->vtable->destroy_fn)(resolver); } int aws_mqtt5_outbound_topic_alias_resolver_reset( struct aws_mqtt5_outbound_topic_alias_resolver *resolver, uint16_t topic_alias_maximum) { if (resolver == NULL) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } return (*resolver->vtable->reset_fn)(resolver, topic_alias_maximum); } int aws_mqtt5_outbound_topic_alias_resolver_resolve_outbound_publish( struct aws_mqtt5_outbound_topic_alias_resolver *resolver, const struct aws_mqtt5_packet_publish_view *publish_view, uint16_t *topic_alias_out, struct aws_byte_cursor *topic_out) { if (resolver == NULL) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } return (*resolver->vtable->resolve_outbound_publish_fn)(resolver, publish_view, topic_alias_out, topic_out); } /* * Disabled resolver */ static void s_aws_mqtt5_outbound_topic_alias_resolver_disabled_destroy( struct aws_mqtt5_outbound_topic_alias_resolver *resolver) { if (resolver == NULL) { return; } aws_mem_release(resolver->allocator, resolver); } static int s_aws_mqtt5_outbound_topic_alias_resolver_disabled_reset( struct aws_mqtt5_outbound_topic_alias_resolver *resolver, uint16_t topic_alias_maximum) { (void)resolver; (void)topic_alias_maximum; return AWS_OP_SUCCESS; } static int s_aws_mqtt5_outbound_topic_alias_resolver_disabled_resolve_outbound_publish_fn( struct aws_mqtt5_outbound_topic_alias_resolver *resolver, const struct aws_mqtt5_packet_publish_view *publish_view, uint16_t *topic_alias_out, struct aws_byte_cursor *topic_out) { (void)resolver; if (publish_view->topic.len == 0) { return aws_raise_error(AWS_ERROR_MQTT5_PUBLISH_OPTIONS_VALIDATION); } *topic_alias_out = 0; *topic_out = publish_view->topic; return AWS_OP_SUCCESS; } static struct aws_mqtt5_outbound_topic_alias_resolver_vtable s_aws_mqtt5_outbound_topic_alias_resolver_disabled_vtable = { .destroy_fn = s_aws_mqtt5_outbound_topic_alias_resolver_disabled_destroy, .reset_fn = s_aws_mqtt5_outbound_topic_alias_resolver_disabled_reset, .resolve_outbound_publish_fn = s_aws_mqtt5_outbound_topic_alias_resolver_disabled_resolve_outbound_publish_fn, }; static struct aws_mqtt5_outbound_topic_alias_resolver *s_aws_mqtt5_outbound_topic_alias_resolver_disabled_new( struct aws_allocator *allocator) { struct aws_mqtt5_outbound_topic_alias_resolver *resolver = aws_mem_calloc(allocator, 1, sizeof(struct aws_mqtt5_outbound_topic_alias_resolver)); resolver->allocator = allocator; resolver->vtable = &s_aws_mqtt5_outbound_topic_alias_resolver_disabled_vtable; return resolver; } /* * Manual resolver * * Manual resolution implies the user is controlling the topic alias assignments, but we still want to validate their * actions. In particular, we track the currently valid set of aliases (based on previous outbound publishes) * and only use an alias when the submitted publish is an exact match for the current assignment. */ struct aws_mqtt5_outbound_topic_alias_resolver_manual { struct aws_mqtt5_outbound_topic_alias_resolver base; struct aws_array_list aliases; }; static void s_cleanup_manual_aliases(struct aws_mqtt5_outbound_topic_alias_resolver_manual *manual_resolver) { for (size_t i = 0; i < aws_array_list_length(&manual_resolver->aliases); ++i) { struct aws_string *alias = NULL; aws_array_list_get_at(&manual_resolver->aliases, &alias, i); aws_string_destroy(alias); } aws_array_list_clean_up(&manual_resolver->aliases); AWS_ZERO_STRUCT(manual_resolver->aliases); } static void s_aws_mqtt5_outbound_topic_alias_resolver_manual_destroy( struct aws_mqtt5_outbound_topic_alias_resolver *resolver) { if (resolver == NULL) { return; } struct aws_mqtt5_outbound_topic_alias_resolver_manual *manual_resolver = resolver->impl; s_cleanup_manual_aliases(manual_resolver); aws_mem_release(resolver->allocator, manual_resolver); } static int s_aws_mqtt5_outbound_topic_alias_resolver_manual_reset( struct aws_mqtt5_outbound_topic_alias_resolver *resolver, uint16_t topic_alias_maximum) { struct aws_mqtt5_outbound_topic_alias_resolver_manual *manual_resolver = resolver->impl; s_cleanup_manual_aliases(manual_resolver); aws_array_list_init_dynamic( &manual_resolver->aliases, resolver->allocator, topic_alias_maximum, sizeof(struct aws_string *)); for (size_t i = 0; i < topic_alias_maximum; ++i) { struct aws_string *invalid_alias = NULL; aws_array_list_push_back(&manual_resolver->aliases, &invalid_alias); } return AWS_OP_SUCCESS; } static int s_aws_mqtt5_outbound_topic_alias_resolver_manual_resolve_outbound_publish_fn( struct aws_mqtt5_outbound_topic_alias_resolver *resolver, const struct aws_mqtt5_packet_publish_view *publish_view, uint16_t *topic_alias_out, struct aws_byte_cursor *topic_out) { if (publish_view->topic_alias == NULL) { /* not using a topic alias, nothing to do */ *topic_alias_out = 0; *topic_out = publish_view->topic; return AWS_OP_SUCCESS; } uint16_t user_alias = *publish_view->topic_alias; if (user_alias == 0) { /* should have been caught by publish validation */ return aws_raise_error(AWS_ERROR_MQTT5_INVALID_OUTBOUND_TOPIC_ALIAS); } struct aws_mqtt5_outbound_topic_alias_resolver_manual *manual_resolver = resolver->impl; uint16_t user_alias_index = user_alias - 1; if (user_alias_index >= aws_array_list_length(&manual_resolver->aliases)) { /* should have been caught by dynamic publish validation */ return aws_raise_error(AWS_ERROR_MQTT5_INVALID_OUTBOUND_TOPIC_ALIAS); } struct aws_string *current_assignment = NULL; aws_array_list_get_at(&manual_resolver->aliases, ¤t_assignment, user_alias_index); *topic_alias_out = user_alias; bool can_use_alias = false; if (current_assignment != NULL) { struct aws_byte_cursor assignment_cursor = aws_byte_cursor_from_string(current_assignment); if (aws_byte_cursor_eq(&assignment_cursor, &publish_view->topic)) { can_use_alias = true; } } if (can_use_alias) { AWS_ZERO_STRUCT(*topic_out); } else { *topic_out = publish_view->topic; } /* mark this alias as seen */ if (!can_use_alias) { aws_string_destroy(current_assignment); current_assignment = aws_string_new_from_cursor(resolver->allocator, &publish_view->topic); aws_array_list_set_at(&manual_resolver->aliases, ¤t_assignment, user_alias_index); } return AWS_OP_SUCCESS; } static struct aws_mqtt5_outbound_topic_alias_resolver_vtable s_aws_mqtt5_outbound_topic_alias_resolver_manual_vtable = { .destroy_fn = s_aws_mqtt5_outbound_topic_alias_resolver_manual_destroy, .reset_fn = s_aws_mqtt5_outbound_topic_alias_resolver_manual_reset, .resolve_outbound_publish_fn = s_aws_mqtt5_outbound_topic_alias_resolver_manual_resolve_outbound_publish_fn, }; static struct aws_mqtt5_outbound_topic_alias_resolver *s_aws_mqtt5_outbound_topic_alias_resolver_manual_new( struct aws_allocator *allocator) { struct aws_mqtt5_outbound_topic_alias_resolver_manual *resolver = aws_mem_calloc(allocator, 1, sizeof(struct aws_mqtt5_outbound_topic_alias_resolver_manual)); resolver->base.allocator = allocator; resolver->base.vtable = &s_aws_mqtt5_outbound_topic_alias_resolver_manual_vtable; resolver->base.impl = resolver; aws_array_list_init_dynamic(&resolver->aliases, allocator, 0, sizeof(struct aws_string *)); return &resolver->base; } /* * LRU resolver * * This resolver uses an LRU cache to automatically create topic alias assignments for the user. With a reasonable * cache size, this should perform well for the majority of MQTT workloads. For workloads it does not perform well * with, the user should control the assignment (or disable entirely). Even for workloads where the LRU cache fails * to reuse an assignment every single time, the overall cost is 3 extra bytes per publish. As a rough estimate, this * means that LRU topic aliasing is "worth it" if an existing alias can be used at least once every * (AverageTopicLength / 3) publishes. */ struct aws_mqtt5_outbound_topic_alias_resolver_lru { struct aws_mqtt5_outbound_topic_alias_resolver base; struct aws_cache *lru_cache; size_t max_aliases; }; static void s_aws_mqtt5_outbound_topic_alias_resolver_lru_destroy( struct aws_mqtt5_outbound_topic_alias_resolver *resolver) { if (resolver == NULL) { return; } struct aws_mqtt5_outbound_topic_alias_resolver_lru *lru_resolver = resolver->impl; if (lru_resolver->lru_cache != NULL) { aws_cache_destroy(lru_resolver->lru_cache); } aws_mem_release(resolver->allocator, lru_resolver); } struct aws_topic_alias_assignment { struct aws_byte_cursor topic_cursor; struct aws_byte_buf topic; uint16_t alias; struct aws_allocator *allocator; }; static void s_aws_topic_alias_assignment_destroy(struct aws_topic_alias_assignment *alias_assignment) { if (alias_assignment == NULL) { return; } aws_byte_buf_clean_up(&alias_assignment->topic); aws_mem_release(alias_assignment->allocator, alias_assignment); } static struct aws_topic_alias_assignment *s_aws_topic_alias_assignment_new( struct aws_allocator *allocator, struct aws_byte_cursor topic, uint16_t alias) { struct aws_topic_alias_assignment *assignment = aws_mem_calloc(allocator, 1, sizeof(struct aws_topic_alias_assignment)); assignment->allocator = allocator; assignment->alias = alias; if (aws_byte_buf_init_copy_from_cursor(&assignment->topic, allocator, topic)) { goto on_error; } assignment->topic_cursor = aws_byte_cursor_from_buf(&assignment->topic); return assignment; on_error: s_aws_topic_alias_assignment_destroy(assignment); return NULL; } static void s_destroy_assignment_value(void *value) { s_aws_topic_alias_assignment_destroy(value); } static int s_aws_mqtt5_outbound_topic_alias_resolver_lru_reset( struct aws_mqtt5_outbound_topic_alias_resolver *resolver, uint16_t topic_alias_maximum) { struct aws_mqtt5_outbound_topic_alias_resolver_lru *lru_resolver = resolver->impl; if (lru_resolver->lru_cache != NULL) { aws_cache_destroy(lru_resolver->lru_cache); lru_resolver->lru_cache = NULL; } if (topic_alias_maximum > 0) { lru_resolver->lru_cache = aws_cache_new_lru( lru_resolver->base.allocator, aws_hash_byte_cursor_ptr, aws_mqtt_byte_cursor_hash_equality, NULL, s_destroy_assignment_value, topic_alias_maximum); } lru_resolver->max_aliases = topic_alias_maximum; return AWS_OP_SUCCESS; } static int s_aws_mqtt5_outbound_topic_alias_resolver_lru_resolve_outbound_publish_fn( struct aws_mqtt5_outbound_topic_alias_resolver *resolver, const struct aws_mqtt5_packet_publish_view *publish_view, uint16_t *topic_alias_out, struct aws_byte_cursor *topic_out) { /* No cache => no aliasing done */ struct aws_mqtt5_outbound_topic_alias_resolver_lru *lru_resolver = resolver->impl; if (lru_resolver->lru_cache == NULL || lru_resolver->max_aliases == 0) { *topic_alias_out = 0; *topic_out = publish_view->topic; return AWS_OP_SUCCESS; } /* Look for the topic in the cache */ struct aws_byte_cursor topic = publish_view->topic; void *existing_element = NULL; if (aws_cache_find(lru_resolver->lru_cache, &topic, &existing_element)) { return AWS_OP_ERR; } struct aws_topic_alias_assignment *existing_assignment = existing_element; if (existing_assignment != NULL) { /* * Topic exists, so use the assignment. The LRU cache find implementation has already promoted the element * to MRU. */ *topic_alias_out = existing_assignment->alias; AWS_ZERO_STRUCT(*topic_out); return AWS_OP_SUCCESS; } /* Topic doesn't exist in the cache. */ uint16_t new_alias_id = 0; size_t assignment_count = aws_cache_get_element_count(lru_resolver->lru_cache); if (assignment_count == lru_resolver->max_aliases) { /* * The cache is full. Get the LRU element to figure out what id we're going to reuse. There's no way to get * the LRU element without promoting it. So we get the element, save the discovered alias id, then remove * the element. */ void *lru_element = aws_lru_cache_use_lru_element(lru_resolver->lru_cache); struct aws_topic_alias_assignment *replaced_assignment = lru_element; new_alias_id = replaced_assignment->alias; struct aws_byte_cursor replaced_topic = replaced_assignment->topic_cursor; /* * This is a little uncomfortable but valid. The cursor we're passing in will get invalidated (and the backing * memory deleted) as part of the removal process but it is only used to find the element to remove. Once * destruction begins it is no longer accessed. */ aws_cache_remove(lru_resolver->lru_cache, &replaced_topic); } else { /* * The cache never shrinks and the first N adds are the N valid topic aliases. Since the cache isn't full, * we know the next alias that hasn't been used. This invariant only holds given that we will tear down * the connection (invalidating the cache) on errors from this function (ie, continuing on from a put * error would break the invariant and create duplicated ids). */ new_alias_id = (uint16_t)(assignment_count + 1); } /* * We have a topic alias to use. Add our new assignment. */ struct aws_topic_alias_assignment *new_assignment = s_aws_topic_alias_assignment_new(resolver->allocator, topic, new_alias_id); if (new_assignment == NULL) { return AWS_OP_ERR; } /* the LRU cache put implementation automatically makes the newly added element MRU */ if (aws_cache_put(lru_resolver->lru_cache, &new_assignment->topic_cursor, new_assignment)) { s_aws_topic_alias_assignment_destroy(new_assignment); return AWS_OP_ERR; } *topic_alias_out = new_assignment->alias; *topic_out = topic; /* this is a new assignment so topic must go out too */ return AWS_OP_SUCCESS; } static struct aws_mqtt5_outbound_topic_alias_resolver_vtable s_aws_mqtt5_outbound_topic_alias_resolver_lru_vtable = { .destroy_fn = s_aws_mqtt5_outbound_topic_alias_resolver_lru_destroy, .reset_fn = s_aws_mqtt5_outbound_topic_alias_resolver_lru_reset, .resolve_outbound_publish_fn = s_aws_mqtt5_outbound_topic_alias_resolver_lru_resolve_outbound_publish_fn, }; static struct aws_mqtt5_outbound_topic_alias_resolver *s_aws_mqtt5_outbound_topic_alias_resolver_lru_new( struct aws_allocator *allocator) { struct aws_mqtt5_outbound_topic_alias_resolver_lru *resolver = aws_mem_calloc(allocator, 1, sizeof(struct aws_mqtt5_outbound_topic_alias_resolver_lru)); resolver->base.allocator = allocator; resolver->base.vtable = &s_aws_mqtt5_outbound_topic_alias_resolver_lru_vtable; resolver->base.impl = resolver; return &resolver->base; } aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/source/v5/mqtt5_types.c000066400000000000000000000322401456575232400245120ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include /* disconnect and shared reason codes */ static const char *s_normal_disconnection = "Normal Disconnection"; static const char *s_disconnect_with_will_message = "Disconnect With Will Message"; static const char *s_unspecified_error = "Unspecified Error"; static const char *s_malformed_packet = "Malformed Packet"; static const char *s_protocol_error = "Protocol Error"; static const char *s_implementation_specific_error = "Implementation Specific Error"; static const char *s_not_authorized = "Not Authorized"; static const char *s_server_busy = "Server Busy"; static const char *s_server_shutting_down = "Server Shutting Down"; static const char *s_keep_alive_timeout = "Keep Alive Timeout"; static const char *s_session_taken_over = "Session Taken Over"; static const char *s_topic_filter_invalid = "Topic Filter Invalid"; static const char *s_topic_name_invalid = "Topic Name Invalid"; static const char *s_receive_maximum_exceeded = "Receive Maximum Exceeded"; static const char *s_topic_alias_invalid = "Topic Alias Invalid"; static const char *s_packet_too_large = "Packet Too Large"; static const char *s_message_rate_too_high = "Message Rate Too High"; static const char *s_quota_exceeded = "Quota Exceeded"; static const char *s_administrative_action = "Administrative Action"; static const char *s_payload_format_invalid = "Payload Format Invalid"; static const char *s_retain_not_supported = "Retain Not Supported"; static const char *s_qos_not_supported = "QoS Not Supported"; static const char *s_use_another_server = "Use Another Server"; static const char *s_server_moved = "Server Moved"; static const char *s_shared_subscriptions_not_supported = "Shared Subscriptions Not Supported"; static const char *s_connection_rate_exceeded = "Connection Rate Exceeded"; static const char *s_maximum_connect_time = "Maximum Connect Time"; static const char *s_subscription_identifiers_not_supported = "Subscription Identifiers Not Supported"; static const char *s_wildcard_subscriptions_not_supported = "Wildcard Subscriptions Not Supported"; static const char *s_success = "Success"; static const char *s_unsupported_protocol_version = "Unsupported Protocol Version"; static const char *s_client_identifier_not_valid = "Client Identifier Not Valid"; static const char *s_bad_username_or_password = "Bad Username Or Password"; static const char *s_server_unavailable = "Server Unavailable"; static const char *s_banned = "Banned"; static const char *s_bad_authentication_method = "Bad Authentication Method"; static const char *s_unknown_reason = "Unknown Reason"; static const char *s_no_subscription_existed = "No Subscription Existed"; static const char *s_packet_identifier_in_use = "Packet Identifier In Use"; static const char *s_granted_qos_0 = "Granted QoS 0"; static const char *s_granted_qos_1 = "Granted QoS 1"; static const char *s_granted_qos_2 = "Granted QoS 2"; static const char *s_no_matching_subscribers = "No Matching Subscribers"; const char *aws_mqtt5_connect_reason_code_to_c_string(enum aws_mqtt5_connect_reason_code reason_code) { switch (reason_code) { case AWS_MQTT5_CRC_SUCCESS: return s_success; case AWS_MQTT5_CRC_UNSPECIFIED_ERROR: return s_unspecified_error; case AWS_MQTT5_CRC_MALFORMED_PACKET: return s_malformed_packet; case AWS_MQTT5_CRC_PROTOCOL_ERROR: return s_protocol_error; case AWS_MQTT5_CRC_IMPLEMENTATION_SPECIFIC_ERROR: return s_implementation_specific_error; case AWS_MQTT5_CRC_UNSUPPORTED_PROTOCOL_VERSION: return s_unsupported_protocol_version; case AWS_MQTT5_CRC_CLIENT_IDENTIFIER_NOT_VALID: return s_client_identifier_not_valid; case AWS_MQTT5_CRC_BAD_USERNAME_OR_PASSWORD: return s_bad_username_or_password; case AWS_MQTT5_CRC_NOT_AUTHORIZED: return s_not_authorized; case AWS_MQTT5_CRC_SERVER_UNAVAILABLE: return s_server_unavailable; case AWS_MQTT5_CRC_SERVER_BUSY: return s_server_busy; case AWS_MQTT5_CRC_BANNED: return s_banned; case AWS_MQTT5_CRC_BAD_AUTHENTICATION_METHOD: return s_bad_authentication_method; case AWS_MQTT5_CRC_TOPIC_NAME_INVALID: return s_topic_name_invalid; case AWS_MQTT5_CRC_PACKET_TOO_LARGE: return s_packet_too_large; case AWS_MQTT5_CRC_QUOTA_EXCEEDED: return s_quota_exceeded; case AWS_MQTT5_CRC_PAYLOAD_FORMAT_INVALID: return s_payload_format_invalid; case AWS_MQTT5_CRC_RETAIN_NOT_SUPPORTED: return s_retain_not_supported; case AWS_MQTT5_CRC_QOS_NOT_SUPPORTED: return s_qos_not_supported; case AWS_MQTT5_CRC_USE_ANOTHER_SERVER: return s_use_another_server; case AWS_MQTT5_CRC_SERVER_MOVED: return s_server_moved; case AWS_MQTT5_CRC_CONNECTION_RATE_EXCEEDED: return s_connection_rate_exceeded; } return s_unknown_reason; } const char *aws_mqtt5_disconnect_reason_code_to_c_string( enum aws_mqtt5_disconnect_reason_code reason_code, bool *is_valid) { if (is_valid != NULL) { *is_valid = true; } switch (reason_code) { case AWS_MQTT5_DRC_NORMAL_DISCONNECTION: return s_normal_disconnection; case AWS_MQTT5_DRC_DISCONNECT_WITH_WILL_MESSAGE: return s_disconnect_with_will_message; case AWS_MQTT5_DRC_UNSPECIFIED_ERROR: return s_unspecified_error; case AWS_MQTT5_DRC_MALFORMED_PACKET: return s_malformed_packet; case AWS_MQTT5_DRC_PROTOCOL_ERROR: return s_protocol_error; case AWS_MQTT5_DRC_IMPLEMENTATION_SPECIFIC_ERROR: return s_implementation_specific_error; case AWS_MQTT5_DRC_NOT_AUTHORIZED: return s_not_authorized; case AWS_MQTT5_DRC_SERVER_BUSY: return s_server_busy; case AWS_MQTT5_DRC_SERVER_SHUTTING_DOWN: return s_server_shutting_down; case AWS_MQTT5_DRC_KEEP_ALIVE_TIMEOUT: return s_keep_alive_timeout; case AWS_MQTT5_DRC_SESSION_TAKEN_OVER: return s_session_taken_over; case AWS_MQTT5_DRC_TOPIC_FILTER_INVALID: return s_topic_filter_invalid; case AWS_MQTT5_DRC_TOPIC_NAME_INVALID: return s_topic_name_invalid; case AWS_MQTT5_DRC_RECEIVE_MAXIMUM_EXCEEDED: return s_receive_maximum_exceeded; case AWS_MQTT5_DRC_TOPIC_ALIAS_INVALID: return s_topic_alias_invalid; case AWS_MQTT5_DRC_PACKET_TOO_LARGE: return s_packet_too_large; case AWS_MQTT5_DRC_MESSAGE_RATE_TOO_HIGH: return s_message_rate_too_high; case AWS_MQTT5_DRC_QUOTA_EXCEEDED: return s_quota_exceeded; case AWS_MQTT5_DRC_ADMINISTRATIVE_ACTION: return s_administrative_action; case AWS_MQTT5_DRC_PAYLOAD_FORMAT_INVALID: return s_payload_format_invalid; case AWS_MQTT5_DRC_RETAIN_NOT_SUPPORTED: return s_retain_not_supported; case AWS_MQTT5_DRC_QOS_NOT_SUPPORTED: return s_qos_not_supported; case AWS_MQTT5_DRC_USE_ANOTHER_SERVER: return s_use_another_server; case AWS_MQTT5_DRC_SERVER_MOVED: return s_server_moved; case AWS_MQTT5_DRC_SHARED_SUBSCRIPTIONS_NOT_SUPPORTED: return s_shared_subscriptions_not_supported; case AWS_MQTT5_DRC_CONNECTION_RATE_EXCEEDED: return s_connection_rate_exceeded; case AWS_MQTT5_DRC_MAXIMUM_CONNECT_TIME: return s_maximum_connect_time; case AWS_MQTT5_DRC_SUBSCRIPTION_IDENTIFIERS_NOT_SUPPORTED: return s_subscription_identifiers_not_supported; case AWS_MQTT5_DRC_WILDCARD_SUBSCRIPTIONS_NOT_SUPPORTED: return s_wildcard_subscriptions_not_supported; } if (is_valid != NULL) { *is_valid = false; } return s_unknown_reason; } const char *aws_mqtt5_puback_reason_code_to_c_string(enum aws_mqtt5_puback_reason_code reason_code) { switch (reason_code) { case AWS_MQTT5_PARC_SUCCESS: return s_success; case AWS_MQTT5_PARC_NO_MATCHING_SUBSCRIBERS: return s_no_matching_subscribers; case AWS_MQTT5_PARC_UNSPECIFIED_ERROR: return s_unspecified_error; case AWS_MQTT5_PARC_IMPLEMENTATION_SPECIFIC_ERROR: return s_implementation_specific_error; case AWS_MQTT5_PARC_NOT_AUTHORIZED: return s_not_authorized; case AWS_MQTT5_PARC_TOPIC_NAME_INVALID: return s_topic_name_invalid; case AWS_MQTT5_PARC_PACKET_IDENTIFIER_IN_USE: return s_packet_identifier_in_use; case AWS_MQTT5_PARC_QUOTA_EXCEEDED: return s_quota_exceeded; case AWS_MQTT5_PARC_PAYLOAD_FORMAT_INVALID: return s_payload_format_invalid; } return s_unknown_reason; } const char *aws_mqtt5_suback_reason_code_to_c_string(enum aws_mqtt5_suback_reason_code reason_code) { switch (reason_code) { case AWS_MQTT5_SARC_GRANTED_QOS_0: return s_granted_qos_0; case AWS_MQTT5_SARC_GRANTED_QOS_1: return s_granted_qos_1; case AWS_MQTT5_SARC_GRANTED_QOS_2: return s_granted_qos_2; case AWS_MQTT5_SARC_UNSPECIFIED_ERROR: return s_unspecified_error; case AWS_MQTT5_SARC_IMPLEMENTATION_SPECIFIC_ERROR: return s_implementation_specific_error; case AWS_MQTT5_SARC_NOT_AUTHORIZED: return s_not_authorized; case AWS_MQTT5_SARC_TOPIC_FILTER_INVALID: return s_topic_filter_invalid; case AWS_MQTT5_SARC_PACKET_IDENTIFIER_IN_USE: return s_packet_identifier_in_use; case AWS_MQTT5_SARC_QUOTA_EXCEEDED: return s_quota_exceeded; case AWS_MQTT5_SARC_SHARED_SUBSCRIPTIONS_NOT_SUPPORTED: return s_shared_subscriptions_not_supported; case AWS_MQTT5_SARC_SUBSCRIPTION_IDENTIFIERS_NOT_SUPPORTED: return s_subscription_identifiers_not_supported; case AWS_MQTT5_SARC_WILDCARD_SUBSCRIPTIONS_NOT_SUPPORTED: return s_wildcard_subscriptions_not_supported; } return s_unknown_reason; } const char *aws_mqtt5_unsuback_reason_code_to_c_string(enum aws_mqtt5_unsuback_reason_code reason_code) { switch (reason_code) { case AWS_MQTT5_UARC_SUCCESS: return s_success; case AWS_MQTT5_UARC_NO_SUBSCRIPTION_EXISTED: return s_no_subscription_existed; case AWS_MQTT5_UARC_UNSPECIFIED_ERROR: return s_unspecified_error; case AWS_MQTT5_UARC_IMPLEMENTATION_SPECIFIC_ERROR: return s_implementation_specific_error; case AWS_MQTT5_UARC_NOT_AUTHORIZED: return s_not_authorized; case AWS_MQTT5_UARC_TOPIC_FILTER_INVALID: return s_topic_filter_invalid; case AWS_MQTT5_UARC_PACKET_IDENTIFIER_IN_USE: return s_packet_identifier_in_use; } return s_unknown_reason; } const char *aws_mqtt5_payload_format_indicator_to_c_string(enum aws_mqtt5_payload_format_indicator format_indicator) { switch (format_indicator) { case AWS_MQTT5_PFI_BYTES: return "Bytes"; case AWS_MQTT5_PFI_UTF8: return "Utf-8"; } return "Unknown Payload Format"; } const char *aws_mqtt5_retain_handling_type_to_c_string(enum aws_mqtt5_retain_handling_type retain_handling_type) { switch (retain_handling_type) { case AWS_MQTT5_RHT_SEND_ON_SUBSCRIBE: return "Send retained on any subscribe"; case AWS_MQTT5_RHT_SEND_ON_SUBSCRIBE_IF_NEW: return "Send retained on subscribe if not already subscribed"; case AWS_MQTT5_RHT_DONT_SEND: return "Dont send retained at all"; } return "Unknown Retain Handling Type"; } const char *aws_mqtt5_packet_type_to_c_string(enum aws_mqtt5_packet_type packet_type) { switch (packet_type) { case AWS_MQTT5_PT_RESERVED: return "RESERVED(INVALID)"; case AWS_MQTT5_PT_CONNECT: return "CONNECT"; case AWS_MQTT5_PT_CONNACK: return "CONNACK"; case AWS_MQTT5_PT_PUBLISH: return "PUBLISH"; case AWS_MQTT5_PT_PUBACK: return "PUBACK"; case AWS_MQTT5_PT_PUBREC: return "PUBREC"; case AWS_MQTT5_PT_PUBREL: return "PUBREL"; case AWS_MQTT5_PT_PUBCOMP: return "PUBCOMP"; case AWS_MQTT5_PT_SUBSCRIBE: return "SUBSCRIBE"; case AWS_MQTT5_PT_SUBACK: return "SUBACK"; case AWS_MQTT5_PT_UNSUBSCRIBE: return "UNSUBSCRIBE"; case AWS_MQTT5_PT_UNSUBACK: return "UNSUBACK"; case AWS_MQTT5_PT_PINGREQ: return "PINGREQ"; case AWS_MQTT5_PT_PINGRESP: return "PINGRESP"; case AWS_MQTT5_PT_DISCONNECT: return "DISCONNECT"; case AWS_MQTT5_PT_AUTH: return "AUTH"; default: return "UNKNOWN"; } } aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/source/v5/mqtt5_utils.c000066400000000000000000000516551456575232400245210ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include uint8_t aws_mqtt5_compute_fixed_header_byte1(enum aws_mqtt5_packet_type packet_type, uint8_t flags) { return flags | ((uint8_t)packet_type << 4); } /* encodes a utf8-string (2 byte length + "MQTT") + the version value (5) */ static uint8_t s_connect_variable_length_header_prefix[7] = {0x00, 0x04, 0x4D, 0x51, 0x54, 0x54, 0x05}; struct aws_byte_cursor g_aws_mqtt5_connect_protocol_cursor = { .ptr = &s_connect_variable_length_header_prefix[0], .len = AWS_ARRAY_SIZE(s_connect_variable_length_header_prefix), }; void aws_mqtt5_negotiated_settings_log( struct aws_mqtt5_negotiated_settings *negotiated_settings, enum aws_log_level level) { struct aws_logger *temp_logger = aws_logger_get(); if (temp_logger == NULL || temp_logger->vtable->get_log_level(temp_logger, AWS_LS_MQTT5_GENERAL) < level) { return; } AWS_LOGF( level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_negotiated_settings maxiumum qos set to %d", (void *)negotiated_settings, negotiated_settings->maximum_qos); AWS_LOGF( level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_negotiated_settings session expiry interval set to %" PRIu32, (void *)negotiated_settings, negotiated_settings->session_expiry_interval); AWS_LOGF( level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_negotiated_settings receive maximum from server set to %" PRIu16, (void *)negotiated_settings, negotiated_settings->receive_maximum_from_server); AWS_LOGF( level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_negotiated_settings maximum packet size to server set to %" PRIu32, (void *)negotiated_settings, negotiated_settings->maximum_packet_size_to_server); AWS_LOGF( level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_negotiated_settings topic alias maximum to server set to %" PRIu16, (void *)negotiated_settings, negotiated_settings->topic_alias_maximum_to_server); AWS_LOGF( level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_negotiated_settings topic alias maximum to client set to %" PRIu16, (void *)negotiated_settings, negotiated_settings->topic_alias_maximum_to_client); AWS_LOGF( level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_negotiated_settings server keep alive set to %" PRIu16, (void *)negotiated_settings, negotiated_settings->server_keep_alive); AWS_LOGF( level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_negotiated_settings retain available set to %s", (void *)negotiated_settings, negotiated_settings->retain_available ? "true" : "false"); AWS_LOGF( level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_negotiated_settings wildcard subscriptions available set to %s", (void *)negotiated_settings, negotiated_settings->wildcard_subscriptions_available ? "true" : "false"); AWS_LOGF( level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_negotiated_settings subscription identifiers available set to %s", (void *)negotiated_settings, negotiated_settings->subscription_identifiers_available ? "true" : "false"); AWS_LOGF( level, AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_negotiated_settings shared subscriptions available set to %s", (void *)negotiated_settings, negotiated_settings->shared_subscriptions_available ? "true" : "false"); } int aws_mqtt5_negotiated_settings_init( struct aws_allocator *allocator, struct aws_mqtt5_negotiated_settings *negotiated_settings, const struct aws_byte_cursor *client_id) { if (aws_byte_buf_init(&negotiated_settings->client_id_storage, allocator, client_id->len)) { return AWS_OP_ERR; } if (aws_byte_buf_append_dynamic(&negotiated_settings->client_id_storage, client_id)) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } int aws_mqtt5_negotiated_settings_copy( const struct aws_mqtt5_negotiated_settings *source, struct aws_mqtt5_negotiated_settings *dest) { aws_mqtt5_negotiated_settings_clean_up(dest); *dest = *source; AWS_ZERO_STRUCT(dest->client_id_storage); if (source->client_id_storage.allocator != NULL) { return aws_byte_buf_init_copy_from_cursor( &dest->client_id_storage, source->client_id_storage.allocator, aws_byte_cursor_from_buf(&source->client_id_storage)); } return AWS_OP_SUCCESS; } int aws_mqtt5_negotiated_settings_apply_client_id( struct aws_mqtt5_negotiated_settings *negotiated_settings, const struct aws_byte_cursor *client_id) { if (negotiated_settings->client_id_storage.len == 0) { if (aws_byte_buf_append_dynamic(&negotiated_settings->client_id_storage, client_id)) { return AWS_OP_ERR; } } return AWS_OP_SUCCESS; } void aws_mqtt5_negotiated_settings_clean_up(struct aws_mqtt5_negotiated_settings *negotiated_settings) { aws_byte_buf_clean_up(&negotiated_settings->client_id_storage); } /** Assign defaults values to negotiated_settings */ void aws_mqtt5_negotiated_settings_reset( struct aws_mqtt5_negotiated_settings *negotiated_settings, const struct aws_mqtt5_packet_connect_view *packet_connect_view) { AWS_PRECONDITION(negotiated_settings != NULL); AWS_PRECONDITION(packet_connect_view != NULL); /* Properties that may be sent in CONNECT to Server. These should only be sent if Client changes them from their default values. */ negotiated_settings->server_keep_alive = packet_connect_view->keep_alive_interval_seconds; negotiated_settings->session_expiry_interval = 0; negotiated_settings->receive_maximum_from_server = AWS_MQTT5_RECEIVE_MAXIMUM; negotiated_settings->maximum_packet_size_to_server = AWS_MQTT5_MAXIMUM_PACKET_SIZE; negotiated_settings->topic_alias_maximum_to_client = 0; // Default for Client is QoS 1. Server default is 2. // This should only be changed if server returns a 0 in the CONNACK negotiated_settings->maximum_qos = AWS_MQTT5_QOS_AT_LEAST_ONCE; negotiated_settings->topic_alias_maximum_to_server = 0; // Default is true for following settings but can be changed by Server on CONNACK negotiated_settings->retain_available = true; negotiated_settings->wildcard_subscriptions_available = true; negotiated_settings->subscription_identifiers_available = true; negotiated_settings->shared_subscriptions_available = true; negotiated_settings->rejoined_session = false; /** * Apply user set properties to negotiated_settings * NULL pointers indicate user has not set a property and it should remain the default value. */ if (packet_connect_view->session_expiry_interval_seconds != NULL) { negotiated_settings->session_expiry_interval = *packet_connect_view->session_expiry_interval_seconds; } if (packet_connect_view->topic_alias_maximum != NULL) { negotiated_settings->topic_alias_maximum_to_client = *packet_connect_view->topic_alias_maximum; } } void aws_mqtt5_negotiated_settings_apply_connack( struct aws_mqtt5_negotiated_settings *negotiated_settings, const struct aws_mqtt5_packet_connack_view *connack_data) { AWS_PRECONDITION(negotiated_settings != NULL); AWS_PRECONDITION(connack_data != NULL); /** * Reconcile CONNACK set properties with current negotiated_settings values * NULL pointers indicate Server has not set a property */ if (connack_data->session_expiry_interval != NULL) { negotiated_settings->session_expiry_interval = *connack_data->session_expiry_interval; } if (connack_data->receive_maximum != NULL) { negotiated_settings->receive_maximum_from_server = *connack_data->receive_maximum; } // NULL = Maximum QoS of 2. if (connack_data->maximum_qos != NULL) { if (*connack_data->maximum_qos < negotiated_settings->maximum_qos) { negotiated_settings->maximum_qos = *connack_data->maximum_qos; } } if (connack_data->retain_available != NULL) { negotiated_settings->retain_available = *connack_data->retain_available; } if (connack_data->maximum_packet_size != NULL) { negotiated_settings->maximum_packet_size_to_server = *connack_data->maximum_packet_size; } // If a value is not sent by Server, the Client must not send any Topic Aliases to the Server. if (connack_data->topic_alias_maximum != NULL) { negotiated_settings->topic_alias_maximum_to_server = *connack_data->topic_alias_maximum; } if (connack_data->wildcard_subscriptions_available != NULL) { negotiated_settings->wildcard_subscriptions_available = *connack_data->wildcard_subscriptions_available; } if (connack_data->subscription_identifiers_available != NULL) { negotiated_settings->subscription_identifiers_available = *connack_data->subscription_identifiers_available; } if (connack_data->shared_subscriptions_available != NULL) { negotiated_settings->shared_subscriptions_available = *connack_data->shared_subscriptions_available; } if (connack_data->server_keep_alive != NULL) { negotiated_settings->server_keep_alive = *connack_data->server_keep_alive; } if (connack_data->assigned_client_identifier != NULL) { aws_mqtt5_negotiated_settings_apply_client_id(negotiated_settings, connack_data->assigned_client_identifier); } negotiated_settings->rejoined_session = connack_data->session_present; } const char *aws_mqtt5_client_session_behavior_type_to_c_string( enum aws_mqtt5_client_session_behavior_type session_behavior) { switch (aws_mqtt5_client_session_behavior_type_to_non_default(session_behavior)) { case AWS_MQTT5_CSBT_CLEAN: return "Clean session always"; case AWS_MQTT5_CSBT_REJOIN_POST_SUCCESS: return "Attempt to resume a session after initial connection success"; case AWS_MQTT5_CSBT_REJOIN_ALWAYS: return "Always attempt to resume a session"; default: return "Unknown session behavior"; } } enum aws_mqtt5_client_session_behavior_type aws_mqtt5_client_session_behavior_type_to_non_default( enum aws_mqtt5_client_session_behavior_type session_behavior) { if (session_behavior == AWS_MQTT5_CSBT_DEFAULT) { return AWS_MQTT5_CSBT_CLEAN; } return session_behavior; } const char *aws_mqtt5_outbound_topic_alias_behavior_type_to_c_string( enum aws_mqtt5_client_outbound_topic_alias_behavior_type outbound_aliasing_behavior) { switch (aws_mqtt5_outbound_topic_alias_behavior_type_to_non_default(outbound_aliasing_behavior)) { case AWS_MQTT5_COTABT_MANUAL: return "User-controlled outbound topic aliasing behavior"; case AWS_MQTT5_COTABT_LRU: return "LRU caching outbound topic aliasing behavior"; case AWS_MQTT5_COTABT_DISABLED: return "Outbound topic aliasing disabled"; default: return "Unknown outbound topic aliasing behavior"; } } bool aws_mqtt5_outbound_topic_alias_behavior_type_validate( enum aws_mqtt5_client_outbound_topic_alias_behavior_type outbound_aliasing_behavior) { return outbound_aliasing_behavior >= AWS_MQTT5_COTABT_DEFAULT && outbound_aliasing_behavior <= AWS_MQTT5_COTABT_DISABLED; } enum aws_mqtt5_client_outbound_topic_alias_behavior_type aws_mqtt5_outbound_topic_alias_behavior_type_to_non_default( enum aws_mqtt5_client_outbound_topic_alias_behavior_type outbound_aliasing_behavior) { if (outbound_aliasing_behavior == AWS_MQTT5_COTABT_DEFAULT) { return AWS_MQTT5_COTABT_DISABLED; } return outbound_aliasing_behavior; } const char *aws_mqtt5_inbound_topic_alias_behavior_type_to_c_string( enum aws_mqtt5_client_inbound_topic_alias_behavior_type inbound_aliasing_behavior) { switch (aws_mqtt5_inbound_topic_alias_behavior_type_to_non_default(inbound_aliasing_behavior)) { case AWS_MQTT5_CITABT_ENABLED: return "Inbound topic aliasing behavior enabled"; case AWS_MQTT5_CITABT_DISABLED: return "Inbound topic aliasing behavior disabled"; default: return "Unknown inbound topic aliasing behavior"; } } bool aws_mqtt5_inbound_topic_alias_behavior_type_validate( enum aws_mqtt5_client_inbound_topic_alias_behavior_type inbound_aliasing_behavior) { return inbound_aliasing_behavior >= AWS_MQTT5_CITABT_DEFAULT && inbound_aliasing_behavior <= AWS_MQTT5_CITABT_DISABLED; } enum aws_mqtt5_client_inbound_topic_alias_behavior_type aws_mqtt5_inbound_topic_alias_behavior_type_to_non_default( enum aws_mqtt5_client_inbound_topic_alias_behavior_type inbound_aliasing_behavior) { if (inbound_aliasing_behavior == AWS_MQTT5_CITABT_DEFAULT) { return AWS_MQTT5_CITABT_DISABLED; } return inbound_aliasing_behavior; } const char *aws_mqtt5_extended_validation_and_flow_control_options_to_c_string( enum aws_mqtt5_extended_validation_and_flow_control_options extended_validation_behavior) { switch (extended_validation_behavior) { case AWS_MQTT5_EVAFCO_NONE: return "No additional flow control or packet validation"; case AWS_MQTT5_EVAFCO_AWS_IOT_CORE_DEFAULTS: return "AWS IoT Core flow control and packet validation"; default: return "Unknown extended validation behavior"; } } const char *aws_mqtt5_client_operation_queue_behavior_type_to_c_string( enum aws_mqtt5_client_operation_queue_behavior_type offline_queue_behavior) { switch (aws_mqtt5_client_operation_queue_behavior_type_to_non_default(offline_queue_behavior)) { case AWS_MQTT5_COQBT_FAIL_NON_QOS1_PUBLISH_ON_DISCONNECT: return "Fail all incomplete operations except QoS 1 publishes"; case AWS_MQTT5_COQBT_FAIL_QOS0_PUBLISH_ON_DISCONNECT: return "Fail incomplete QoS 0 publishes"; case AWS_MQTT5_COQBT_FAIL_ALL_ON_DISCONNECT: return "Fail all incomplete operations"; default: return "Unknown operation queue behavior type"; } } enum aws_mqtt5_client_operation_queue_behavior_type aws_mqtt5_client_operation_queue_behavior_type_to_non_default( enum aws_mqtt5_client_operation_queue_behavior_type offline_queue_behavior) { if (offline_queue_behavior == AWS_MQTT5_COQBT_DEFAULT) { return AWS_MQTT5_COQBT_FAIL_QOS0_PUBLISH_ON_DISCONNECT; } return offline_queue_behavior; } const char *aws_mqtt5_client_lifecycle_event_type_to_c_string( enum aws_mqtt5_client_lifecycle_event_type lifecycle_event) { switch (lifecycle_event) { case AWS_MQTT5_CLET_ATTEMPTING_CONNECT: return "Connection establishment attempt"; case AWS_MQTT5_CLET_CONNECTION_SUCCESS: return "Connection establishment success"; case AWS_MQTT5_CLET_CONNECTION_FAILURE: return "Connection establishment failure"; case AWS_MQTT5_CLET_DISCONNECTION: return "Disconnection"; case AWS_MQTT5_CLET_STOPPED: return "Client stopped"; } return "Unknown lifecycle event"; } uint64_t aws_mqtt5_client_random_in_range(uint64_t from, uint64_t to) { uint64_t max = aws_max_u64(from, to); uint64_t min = aws_min_u64(from, to); /* Note: this contains several changes to the corresponding function in aws-c-io. Don't throw them away. * * 1. random range is now inclusive/closed: [from, to] rather than half-open [from, to) * 2. as a corollary, diff == 0 => return min, not 0 */ uint64_t diff = max - min; if (!diff) { return min; } uint64_t random_value = 0; if (aws_device_random_u64(&random_value)) { return min; } if (diff == UINT64_MAX) { return random_value; } return min + random_value % (diff + 1); /* + 1 is safe due to previous check */ } static uint8_t s_aws_iot_core_rules_prefix[] = "$aws/rules/"; static struct aws_byte_cursor s_aws_mqtt5_topic_skip_aws_iot_rules_prefix(struct aws_byte_cursor topic_cursor) { size_t prefix_length = AWS_ARRAY_SIZE(s_aws_iot_core_rules_prefix) - 1; /* skip 0-terminator */ struct aws_byte_cursor rules_prefix = { .ptr = s_aws_iot_core_rules_prefix, .len = prefix_length, }; if (topic_cursor.len < rules_prefix.len) { return topic_cursor; } struct aws_byte_cursor topic_cursor_copy = topic_cursor; struct aws_byte_cursor topic_prefix = topic_cursor; topic_prefix.len = rules_prefix.len; if (!aws_byte_cursor_eq_ignore_case(&rules_prefix, &topic_prefix)) { return topic_cursor; } aws_byte_cursor_advance(&topic_cursor_copy, prefix_length); if (topic_cursor_copy.len == 0) { return topic_cursor; } struct aws_byte_cursor rule_name_segment_cursor; AWS_ZERO_STRUCT(rule_name_segment_cursor); if (!aws_byte_cursor_next_split(&topic_cursor_copy, '/', &rule_name_segment_cursor)) { return topic_cursor; } if (topic_cursor_copy.len < rule_name_segment_cursor.len + 1) { return topic_cursor; } aws_byte_cursor_advance(&topic_cursor_copy, rule_name_segment_cursor.len + 1); return topic_cursor_copy; } static uint8_t s_shared_subscription_prefix[] = "$share"; static bool s_is_not_hash_or_plus(uint8_t byte) { return byte != '+' && byte != '#'; } static struct aws_byte_cursor s_aws_mqtt5_topic_skip_shared_prefix(struct aws_byte_cursor topic_cursor) { /* shared subscription filters must have an initial segment of "$share" */ struct aws_byte_cursor first_segment_cursor; AWS_ZERO_STRUCT(first_segment_cursor); if (!aws_byte_cursor_next_split(&topic_cursor, '/', &first_segment_cursor)) { return topic_cursor; } struct aws_byte_cursor share_prefix_cursor = { .ptr = s_shared_subscription_prefix, .len = AWS_ARRAY_SIZE(s_shared_subscription_prefix) - 1, /* skip null terminator */ }; if (!aws_byte_cursor_eq_ignore_case(&share_prefix_cursor, &first_segment_cursor)) { return topic_cursor; } /* * The next segment must be non-empty and cannot include '#', '/', or '+'. In this case we know it already * does not include '/' */ struct aws_byte_cursor second_segment_cursor = first_segment_cursor; if (!aws_byte_cursor_next_split(&topic_cursor, '/', &second_segment_cursor)) { return topic_cursor; } if (second_segment_cursor.len == 0 || !aws_byte_cursor_satisfies_pred(&second_segment_cursor, s_is_not_hash_or_plus)) { return topic_cursor; } /* * Everything afterwards must form a normal, valid topic filter. */ struct aws_byte_cursor remaining_cursor = topic_cursor; size_t remaining_length = topic_cursor.ptr + topic_cursor.len - (second_segment_cursor.len + second_segment_cursor.ptr); if (remaining_length == 0) { return topic_cursor; } aws_byte_cursor_advance(&remaining_cursor, topic_cursor.len - remaining_length + 1); return remaining_cursor; } struct aws_byte_cursor aws_mqtt5_topic_skip_aws_iot_core_uncounted_prefix(struct aws_byte_cursor topic_cursor) { struct aws_byte_cursor skip_shared = s_aws_mqtt5_topic_skip_shared_prefix(topic_cursor); struct aws_byte_cursor skip_rules = s_aws_mqtt5_topic_skip_aws_iot_rules_prefix(skip_shared); return skip_rules; } size_t aws_mqtt5_topic_get_segment_count(struct aws_byte_cursor topic_cursor) { size_t segment_count = 0; struct aws_byte_cursor segment_cursor; AWS_ZERO_STRUCT(segment_cursor); while (aws_byte_cursor_next_split(&topic_cursor, '/', &segment_cursor)) { ++segment_count; } return segment_count; } bool aws_mqtt_is_valid_topic_filter_for_iot_core(struct aws_byte_cursor topic_filter_cursor) { struct aws_byte_cursor post_rule_suffix = aws_mqtt5_topic_skip_aws_iot_core_uncounted_prefix(topic_filter_cursor); return aws_mqtt5_topic_get_segment_count(post_rule_suffix) <= AWS_IOT_CORE_MAXIMUM_TOPIC_SEGMENTS; } bool aws_mqtt_is_valid_topic_for_iot_core(struct aws_byte_cursor topic_cursor) { struct aws_byte_cursor post_rule_suffix = aws_mqtt5_topic_skip_aws_iot_core_uncounted_prefix(topic_cursor); if (aws_mqtt5_topic_get_segment_count(post_rule_suffix) > AWS_IOT_CORE_MAXIMUM_TOPIC_SEGMENTS) { return false; } return post_rule_suffix.len <= AWS_IOT_CORE_MAXIMUM_TOPIC_LENGTH; } /* $share/{ShareName}/{filter} */ bool aws_mqtt_is_topic_filter_shared_subscription(struct aws_byte_cursor topic_cursor) { struct aws_byte_cursor remaining_cursor = s_aws_mqtt5_topic_skip_shared_prefix(topic_cursor); if (remaining_cursor.len == topic_cursor.len) { return false; } if (!aws_mqtt_is_valid_topic_filter(&remaining_cursor)) { return false; } return true; } aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/source/v5/rate_limiters.c000066400000000000000000000206411456575232400250610ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include static int s_rate_limit_time_fn(const struct aws_rate_limiter_token_bucket_options *options, uint64_t *current_time) { if (options->clock_fn != NULL) { return (*options->clock_fn)(current_time); } return aws_high_res_clock_get_ticks(current_time); } int aws_rate_limiter_token_bucket_init( struct aws_rate_limiter_token_bucket *limiter, const struct aws_rate_limiter_token_bucket_options *options) { AWS_ZERO_STRUCT(*limiter); if (options->tokens_per_second == 0 || options->maximum_token_count == 0) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } limiter->config = *options; aws_rate_limiter_token_bucket_reset(limiter); return AWS_OP_SUCCESS; } void aws_rate_limiter_token_bucket_reset(struct aws_rate_limiter_token_bucket *limiter) { limiter->current_token_count = aws_min_u64(limiter->config.initial_token_count, limiter->config.maximum_token_count); limiter->fractional_nanos = 0; limiter->fractional_nano_tokens = 0; uint64_t now = 0; AWS_FATAL_ASSERT(s_rate_limit_time_fn(&limiter->config, &now) == AWS_OP_SUCCESS); limiter->last_service_time = now; } static void s_regenerate_tokens(struct aws_rate_limiter_token_bucket *limiter) { uint64_t now = 0; AWS_FATAL_ASSERT(s_rate_limit_time_fn(&limiter->config, &now) == AWS_OP_SUCCESS); if (now <= limiter->last_service_time) { return; } uint64_t nanos_elapsed = now - limiter->last_service_time; /* * We break the regeneration calculation into two distinct steps: * (1) Perform regeneration based on whole seconds elapsed (nice and easy just multiply times the regen rate) * (2) Perform regeneration based on the remaining fraction of a second elapsed * * We do this to minimize the chances of multiplication saturation before the divide necessary to normalize to * nanos. * * In particular, by doing this, we won't see saturation unless a regeneration rate in the multi-billions is used * or elapsed_seconds is in the billions. This is similar reasoning to what we do in aws_timestamp_convert_u64. * * Additionally, we use a (sub-second) fractional counter/accumulator (fractional_nanos, fractional_nano_tokens) * in order to prevent error accumulation due to integer division rounding. */ /* break elapsed time into seconds and remainder nanos */ uint64_t remainder_nanos = 0; uint64_t elapsed_seconds = aws_timestamp_convert(nanos_elapsed, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_SECS, &remainder_nanos); /* apply seconds-based regeneration */ uint64_t tokens_regenerated = aws_mul_u64_saturating(elapsed_seconds, limiter->config.tokens_per_second); /* apply fractional remainder regeneration */ limiter->fractional_nanos += remainder_nanos; /* fractional overflow check */ if (limiter->fractional_nanos < AWS_TIMESTAMP_NANOS) { /* * no overflow, just do the division to figure out how many tokens are represented by the updated * fractional nanos */ uint64_t new_fractional_tokens = aws_mul_u64_saturating(limiter->fractional_nanos, limiter->config.tokens_per_second) / AWS_TIMESTAMP_NANOS; /* * update token count by how much fractional tokens changed */ tokens_regenerated += new_fractional_tokens - limiter->fractional_nano_tokens; limiter->fractional_nano_tokens = new_fractional_tokens; } else { /* * overflow. In this case, update token count by the remaining tokens left to regenerate to make the * original fractional nano amount equal to one second. This is the key part (a pseudo-reset) that lets us * avoid error accumulation due to integer division rounding over time. */ tokens_regenerated += limiter->config.tokens_per_second - limiter->fractional_nano_tokens; /* * subtract off a second from the fractional part. Guaranteed to be less than a second afterwards. */ limiter->fractional_nanos -= AWS_TIMESTAMP_NANOS; /* * Calculate the new fractional nano token amount, and add them in. */ limiter->fractional_nano_tokens = aws_mul_u64_saturating(limiter->fractional_nanos, limiter->config.tokens_per_second) / AWS_TIMESTAMP_NANOS; tokens_regenerated += limiter->fractional_nano_tokens; } limiter->current_token_count = aws_add_u64_saturating(tokens_regenerated, limiter->current_token_count); if (limiter->current_token_count > limiter->config.maximum_token_count) { limiter->current_token_count = limiter->config.maximum_token_count; } limiter->last_service_time = now; } bool aws_rate_limiter_token_bucket_can_take_tokens( struct aws_rate_limiter_token_bucket *limiter, uint64_t token_count) { s_regenerate_tokens(limiter); return limiter->current_token_count >= token_count; } int aws_rate_limiter_token_bucket_take_tokens(struct aws_rate_limiter_token_bucket *limiter, uint64_t token_count) { s_regenerate_tokens(limiter); if (limiter->current_token_count < token_count) { /* TODO: correct error once seated in aws-c-common */ return aws_raise_error(AWS_ERROR_INVALID_STATE); } limiter->current_token_count -= token_count; return AWS_OP_SUCCESS; } uint64_t aws_rate_limiter_token_bucket_compute_wait_for_tokens( struct aws_rate_limiter_token_bucket *limiter, uint64_t token_count) { s_regenerate_tokens(limiter); if (limiter->current_token_count >= token_count) { return 0; } uint64_t token_rate = limiter->config.tokens_per_second; AWS_FATAL_ASSERT(limiter->fractional_nanos < AWS_TIMESTAMP_NANOS); AWS_FATAL_ASSERT(limiter->fractional_nano_tokens <= token_rate); uint64_t expected_wait = 0; uint64_t deficit = token_count - limiter->current_token_count; uint64_t remaining_fractional_tokens = token_rate - limiter->fractional_nano_tokens; if (deficit < remaining_fractional_tokens) { /* * case 1: * The token deficit is less than what will be regenerated by waiting for the fractional nanos accumulator * to reach one second's worth of time. * * In this case, base the calculation off of just a wait from fractional nanos. */ uint64_t target_fractional_tokens = aws_add_u64_saturating(deficit, limiter->fractional_nano_tokens); uint64_t remainder_wait_unnormalized = aws_mul_u64_saturating(target_fractional_tokens, AWS_TIMESTAMP_NANOS); expected_wait = remainder_wait_unnormalized / token_rate - limiter->fractional_nanos; /* If the fractional wait is itself, fractional, then add one more nano second to push us over the edge */ if (remainder_wait_unnormalized % token_rate) { ++expected_wait; } } else { /* * case 2: * The token deficit requires regeneration for a time interval at least as large as what is needed * to overflow the fractional nanos accumulator. */ /* First account for making the fractional nano accumulator exactly one second */ expected_wait = AWS_TIMESTAMP_NANOS - limiter->fractional_nanos; deficit -= remaining_fractional_tokens; /* * Now, for the remaining tokens, split into tokens from whole seconds worth of regeneration as well * as a remainder requiring a fractional regeneration */ uint64_t expected_wait_seconds = deficit / token_rate; uint64_t deficit_remainder = deficit % token_rate; /* * Account for seconds worth of waiting */ expected_wait += aws_mul_u64_saturating(expected_wait_seconds, AWS_TIMESTAMP_NANOS); /* * And finally, calculate the fractional wait to give us the last few tokens */ uint64_t remainder_wait_unnormalized = aws_mul_u64_saturating(deficit_remainder, AWS_TIMESTAMP_NANOS); expected_wait += remainder_wait_unnormalized / token_rate; /* If the fractional wait is itself, fractional, then add one more nano second to push us over the edge */ if (remainder_wait_unnormalized % token_rate) { ++expected_wait; } } return expected_wait; } aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/tests/000077500000000000000000000000001456575232400213575ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/tests/CMakeLists.txt000066400000000000000000000631521456575232400241260ustar00rootroot00000000000000include(CTest) include(AwsTestHarness) include(AwsLibFuzzer) enable_testing() file(GLOB TEST_HDRS "v3/*.h v5/*.h") set(TEST_SRC v3/*.c v5/*.c *.c) file(GLOB TESTS ${TEST_HDRS} ${TEST_SRC}) add_test_case(mqtt_packet_puback) add_test_case(mqtt_packet_pubrec) add_test_case(mqtt_packet_pubrel) add_test_case(mqtt_packet_pubcomp) add_test_case(mqtt_packet_suback) add_test_case(mqtt_packet_unsuback) add_test_case(mqtt_packet_connect) add_test_case(mqtt_packet_connect_will) add_test_case(mqtt_packet_connect_empty_payload_will) add_test_case(mqtt_packet_connect_password) add_test_case(mqtt_packet_connect_all) add_test_case(mqtt_packet_connack) add_test_case(mqtt_packet_publish_qos0_dup) add_test_case(mqtt_packet_publish_qos2_retain) add_test_case(mqtt_packet_publish_empty_payload) add_test_case(mqtt_packet_subscribe) add_test_case(mqtt_packet_unsubscribe) add_test_case(mqtt_packet_pingreq) add_test_case(mqtt_packet_pingresp) add_test_case(mqtt_packet_disconnect) add_test_case(mqtt_packet_connack_decode_failure_reserved) add_test_case(mqtt_packet_ack_decode_failure_reserved) add_test_case(mqtt_packet_pingresp_decode_failure_reserved) add_test_case(mqtt_frame_and_decode_publish) add_test_case(mqtt_frame_and_decode_suback) add_test_case(mqtt_frame_and_decode_unsuback) add_test_case(mqtt_frame_and_decode_puback) add_test_case(mqtt_frame_and_decode_pingresp) add_test_case(mqtt_frame_and_decode_connack) add_test_case(mqtt_frame_and_decode_bad_remaining_length) add_test_case(mqtt_frame_and_decode_unsupported_packet_type) add_test_case(mqtt_frame_and_decode_bad_flags_for_packet_type) add_test_case(mqtt_topic_tree_match) add_test_case(mqtt_topic_tree_unsubscribe) add_test_case(mqtt_topic_tree_duplicate_transactions) add_test_case(mqtt_topic_tree_transactions) add_test_case(mqtt_topic_validation) add_test_case(mqtt_topic_filter_validation) add_test_case(mqtt_connect_disconnect) add_test_case(mqtt_connect_set_will_login) add_test_case(mqtt_connection_interrupted) add_test_case(mqtt_connection_any_publish) add_test_case(mqtt_connection_timeout) add_test_case(mqtt_connection_connack_timeout) add_test_case(mqtt_connection_failure_callback) add_test_case(mqtt_connection_success_callback) add_test_case(mqtt_connect_subscribe) add_test_case(mqtt_connect_subscribe_fail_from_broker) add_test_case(mqtt_connect_subscribe_multi) add_test_case(mqtt_connect_subscribe_incoming_dup) add_test_case(mqtt_connect_unsubscribe) add_test_case(mqtt_connect_resubscribe) add_test_case(mqtt_connect_publish) add_test_case(mqtt_connect_publish_payload) add_test_case(mqtt_connection_offline_publish) add_test_case(mqtt_connection_disconnect_while_reconnecting) add_test_case(mqtt_connection_closes_while_making_requests) add_test_case(mqtt_connection_resend_packets) # It's too complicate to implement... Let's save it for the future. # add_test_case(mqtt_connection_not_retry_publish_QoS_0) add_test_case(mqtt_connection_consistent_retry_policy) add_test_case(mqtt_connection_not_resend_packets_on_healthy_connection) add_test_case(mqtt_connection_destory_pending_requests) add_test_case(mqtt_clean_session_not_retry) add_test_case(mqtt_clean_session_discard_previous) add_test_case(mqtt_clean_session_keep_next_session) add_test_case(mqtt_connection_publish_QoS1_timeout) add_test_case(mqtt_connection_publish_QoS1_timeout_with_ping) add_test_case(mqtt_connection_unsub_timeout) add_test_case(mqtt_connection_publish_QoS1_timeout_connection_lost_reset_time) add_test_case(mqtt_connection_ping_norm) add_test_case(mqtt_connection_ping_no) add_test_case(mqtt_connection_ping_noack) add_test_case(mqtt_connection_ping_basic_scenario) add_test_case(mqtt_connection_ping_double_scenario) add_test_case(mqtt_connection_close_callback_simple) add_test_case(mqtt_connection_close_callback_interrupted) add_test_case(mqtt_connection_close_callback_multi) add_test_case(mqtt_connection_reconnection_backoff_stable) add_test_case(mqtt_connection_reconnection_backoff_unstable) add_test_case(mqtt_connection_reconnection_backoff_reset) add_test_case(mqtt_connection_reconnection_backoff_reset_after_disconnection) add_test_case(mqtt_validation_failure_publish_qos) add_test_case(mqtt_validation_failure_invalid_will_qos) add_test_case(mqtt_validation_failure_subscribe_empty) add_test_case(mqtt_validation_failure_unsubscribe_null) add_test_case(mqtt_validation_failure_connect_invalid_client_id_utf8) add_test_case(mqtt_validation_failure_invalid_will_topic_utf8) add_test_case(mqtt_validation_failure_invalid_username_utf8) # Operation statistics tests add_test_case(mqtt_operation_statistics_simple_publish) add_test_case(mqtt_operation_statistics_offline_publish) add_test_case(mqtt_operation_statistics_disconnect_publish) add_test_case(mqtt_operation_statistics_reconnect_publish) add_test_case(mqtt_operation_statistics_simple_subscribe) add_test_case(mqtt_operation_statistics_simple_unsubscribe) add_test_case(mqtt_operation_statistics_simple_resubscribe) add_test_case(mqtt_operation_statistics_simple_callback) # Connection termination tests add_test_case(mqtt_connection_termination_callback_simple) # MQTT5 tests # topic utilities add_test_case(mqtt5_topic_skip_rules_prefix) add_test_case(mqtt5_topic_get_segment_count) add_test_case(mqtt5_shared_subscription_validation) # utf8 utility add_test_case(mqtt_utf8_encoded_string_test) # topic aliasing add_test_case(mqtt5_inbound_topic_alias_register_failure) add_test_case(mqtt5_inbound_topic_alias_resolve_success) add_test_case(mqtt5_inbound_topic_alias_resolve_failure) add_test_case(mqtt5_inbound_topic_alias_reset) add_test_case(mqtt5_outbound_topic_alias_disabled_resolve_success) add_test_case(mqtt5_outbound_topic_alias_disabled_resolve_failure) add_test_case(mqtt5_outbound_topic_alias_manual_resolve_failure_zero_alias) add_test_case(mqtt5_outbound_topic_alias_manual_resolve_failure_too_big_alias) add_test_case(mqtt5_outbound_topic_alias_manual_resolve_success) add_test_case(mqtt5_outbound_topic_alias_manual_reset) add_test_case(mqtt5_outbound_topic_alias_lru_zero_size) # lru topic sequence tests # cache size of 2 # a, b, c, refer to distinct topics # the 'r' suffice refers to expected alias reuse add_test_case(mqtt5_outbound_topic_alias_lru_a_ar) add_test_case(mqtt5_outbound_topic_alias_lru_b_a_br) add_test_case(mqtt5_outbound_topic_alias_lru_a_b_ar_br) add_test_case(mqtt5_outbound_topic_alias_lru_a_b_c_br_cr_br_cr_a) add_test_case(mqtt5_outbound_topic_alias_lru_a_b_c_a_cr_b) add_test_case(mqtt5_outbound_topic_alias_lru_a_b_reset_a_b) # mqtt operation/storage/view creation/relationship tests add_test_case(mqtt5_publish_operation_new_set_no_optional) add_test_case(mqtt5_publish_operation_new_set_all) add_test_case(mqtt5_publish_operation_new_failure_packet_id) add_test_case(mqtt5_subscribe_operation_new_set_no_optional) add_test_case(mqtt5_subscribe_operation_new_set_all) add_test_case(mqtt5_unsubscribe_operation_new_set_all) add_test_case(mqtt5_connect_storage_new_set_no_optional) add_test_case(mqtt5_connect_storage_new_set_all) add_test_case(mqtt5_connack_storage_new_set_no_optional) add_test_case(mqtt5_connack_storage_new_set_all) add_test_case(mqtt5_disconnect_storage_new_set_no_optional) add_test_case(mqtt5_disconnect_storage_new_set_all) add_test_case(mqtt5_suback_storage_new_set_no_optional) add_test_case(mqtt5_suback_storage_new_set_all) add_test_case(mqtt5_unsuback_storage_new_set_no_optional) add_test_case(mqtt5_unsuback_storage_new_set_all) add_test_case(mqtt5_puback_storage_new_set_all) add_test_case(mqtt5_publish_storage_new_set_all) # operation/view validation failure tests add_test_case(mqtt5_operation_disconnect_validation_failure_server_reference) add_test_case(mqtt5_operation_disconnect_validation_failure_bad_reason_code) add_test_case(mqtt5_operation_disconnect_validation_failure_reason_string_too_long) add_test_case(mqtt5_operation_disconnect_validation_failure_reason_string_invalid_utf8) add_test_case(mqtt5_operation_disconnect_validation_failure_user_properties_name_too_long) add_test_case(mqtt5_operation_disconnect_validation_failure_user_properties_name_invalid_utf8) add_test_case(mqtt5_operation_disconnect_validation_failure_user_properties_value_too_long) add_test_case(mqtt5_operation_disconnect_validation_failure_user_properties_value_invalid_utf8) add_test_case(mqtt5_operation_disconnect_validation_failure_user_properties_too_many) add_test_case(mqtt5_operation_connect_validation_failure_client_id_too_long) add_test_case(mqtt5_operation_connect_validation_failure_client_id_invalid_utf8) add_test_case(mqtt5_operation_connect_validation_failure_username_too_long) add_test_case(mqtt5_operation_connect_validation_failure_username_invalid_utf8) add_test_case(mqtt5_operation_connect_validation_failure_password_too_long) add_test_case(mqtt5_operation_connect_validation_failure_receive_maximum_zero) add_test_case(mqtt5_operation_connect_validation_failure_maximum_packet_size_zero) add_test_case(mqtt5_operation_connect_validation_failure_will_invalid) add_test_case(mqtt5_operation_connect_validation_failure_will_payload_too_long) add_test_case(mqtt5_operation_connect_validation_failure_auth_method_unsupported) add_test_case(mqtt5_operation_connect_validation_failure_auth_data_unsupported) add_test_case(mqtt5_operation_connect_validation_failure_request_problem_information_invalid) add_test_case(mqtt5_operation_connect_validation_failure_request_response_information_invalid) add_test_case(mqtt5_operation_connect_validation_failure_user_properties_name_too_long) add_test_case(mqtt5_operation_connect_validation_failure_user_properties_name_invalid_utf8) add_test_case(mqtt5_operation_connect_validation_failure_user_properties_value_too_long) add_test_case(mqtt5_operation_connect_validation_failure_user_properties_value_invalid_utf8) add_test_case(mqtt5_operation_connect_validation_failure_user_properties_too_many) add_test_case(mqtt5_operation_subscribe_validation_failure_no_subscriptions) add_test_case(mqtt5_operation_subscribe_validation_failure_too_many_subscriptions) add_test_case(mqtt5_operation_subscribe_validation_failure_invalid_subscription_identifier) add_test_case(mqtt5_operation_subscribe_validation_failure_invalid_topic_filter) add_test_case(mqtt5_operation_subscribe_validation_failure_invalid_utf8_topic_filter) add_test_case(mqtt5_operation_subscribe_validation_failure_invalid_qos) add_test_case(mqtt5_operation_subscribe_validation_failure_invalid_retain_type) add_test_case(mqtt5_operation_subscribe_validation_failure_invalid_no_local) add_test_case(mqtt5_operation_subscribe_validation_failure_user_properties_name_too_long) add_test_case(mqtt5_operation_subscribe_validation_failure_user_properties_name_invalid_utf8) add_test_case(mqtt5_operation_subscribe_validation_failure_user_properties_value_too_long) add_test_case(mqtt5_operation_subscribe_validation_failure_user_properties_value_invalid_utf8) add_test_case(mqtt5_operation_subscribe_validation_failure_user_properties_too_many) add_test_case(mqtt5_operation_unsubscribe_validation_failure_no_topic_filters) add_test_case(mqtt5_operation_unsubscribe_validation_failure_too_many_topic_filters) add_test_case(mqtt5_operation_unsubscribe_validation_failure_invalid_topic_filter) add_test_case(mqtt5_operation_unsubscribe_validation_failure_invalid_utf8_topic_filter) add_test_case(mqtt5_operation_unsubscribe_validation_failure_user_properties_name_too_long) add_test_case(mqtt5_operation_unsubscribe_validation_failure_user_properties_name_invalid_utf8) add_test_case(mqtt5_operation_unsubscribe_validation_failure_user_properties_value_too_long) add_test_case(mqtt5_operation_unsubscribe_validation_failure_user_properties_value_invalid_utf8) add_test_case(mqtt5_operation_unsubscribe_validation_failure_user_properties_too_many) add_test_case(mqtt5_operation_publish_validation_failure_invalid_topic) add_test_case(mqtt5_operation_publish_validation_failure_invalid_utf8_topic) add_test_case(mqtt5_operation_publish_validation_failure_no_topic) add_test_case(mqtt5_operation_publish_validation_failure_invalid_payload_format) add_test_case(mqtt5_operation_publish_validation_failure_invalid_utf8_payload) add_test_case(mqtt5_operation_publish_validation_failure_response_topic_too_long) add_test_case(mqtt5_operation_publish_validation_failure_invalid_response_topic) add_test_case(mqtt5_operation_publish_validation_failure_invalid_utf8_response_topic) add_test_case(mqtt5_operation_publish_validation_failure_correlation_data_too_long) add_test_case(mqtt5_operation_publish_validation_failure_content_type_too_long) add_test_case(mqtt5_operation_publish_validation_failure_invalid_utf8_content_type) add_test_case(mqtt5_operation_publish_validation_failure_subscription_identifier_exists) add_test_case(mqtt5_operation_publish_validation_failure_topic_alias_zero) add_test_case(mqtt5_operation_publish_validation_failure_user_properties_name_too_long) add_test_case(mqtt5_operation_publish_validation_failure_user_properties_name_invalid_utf8) add_test_case(mqtt5_operation_publish_validation_failure_user_properties_value_too_long) add_test_case(mqtt5_operation_publish_validation_failure_user_properties_value_invalid_utf8) add_test_case(mqtt5_operation_publish_validation_failure_user_properties_too_many) add_test_case(mqtt5_operation_publish_validation_failure_qos0_duplicate_true) add_test_case(mqtt5_operation_publish_validation_failure_qos0_with_packet_id) add_test_case(mqtt5_client_options_validation_failure_no_host) add_test_case(mqtt5_client_options_validation_failure_no_bootstrap) add_test_case(mqtt5_client_options_validation_failure_no_publish_received) add_test_case(mqtt5_client_options_validation_failure_invalid_socket_options) add_test_case(mqtt5_client_options_validation_failure_invalid_connect) add_test_case(mqtt5_client_options_validation_failure_invalid_keep_alive) add_test_case(mqtt5_client_options_validation_failure_invalid_port) add_test_case(mqtt5_operation_subscribe_connection_settings_validation_failure_exceeds_maximum_packet_size) add_test_case(mqtt5_operation_unsubscribe_connection_settings_validation_failure_exceeds_maximum_packet_size) add_test_case(mqtt5_operation_publish_connection_settings_validation_failure_exceeds_maximum_packet_size) add_test_case(mqtt5_operation_publish_connection_settings_validation_failure_exceeds_maximum_qos) add_test_case(mqtt5_operation_publish_connection_settings_validation_failure_invalid_retain) add_test_case(mqtt5_operation_disconnect_connection_settings_validation_failure_exceeds_maximum_packet_size) add_test_case(mqtt5_operation_disconnect_connection_settings_validation_failure_promote_zero_session_expiry) add_test_case(mqtt5_client_options_defaults_set) add_test_case(mqtt5_operation_bind_packet_id_empty_table) add_test_case(mqtt5_operation_bind_packet_id_multiple_with_existing) add_test_case(mqtt5_operation_bind_packet_id_multiple_with_wrap_around) add_test_case(mqtt5_operation_bind_packet_id_full_table) add_test_case(mqtt5_operation_bind_packet_id_not_valid) add_test_case(mqtt5_operation_bind_packet_id_already_bound) add_test_case(mqtt5_operation_processing_nothing_empty_queue) add_test_case(mqtt5_operation_processing_nothing_mqtt_connect) add_test_case(mqtt5_operation_processing_nothing_clean_disconnect) add_test_case(mqtt5_operation_processing_nothing_pending_write_completion_mqtt_connect) add_test_case(mqtt5_operation_processing_nothing_pending_write_completion_connected) add_test_case(mqtt5_operation_processing_nothing_pending_write_completion_clean_disconnect) add_test_case(mqtt5_operation_processing_failure_message_allocation) add_test_case(mqtt5_operation_processing_failure_message_send) add_test_case(mqtt5_operation_processing_something_mqtt_connect) add_test_case(mqtt5_operation_processing_something_clean_disconnect) add_test_case(mqtt5_operation_processing_something_connected_multi) add_test_case(mqtt5_operation_processing_something_connected_overflow) add_test_case(mqtt5_operation_processing_disconnect_fail_all) add_test_case(mqtt5_operation_processing_disconnect_fail_qos0) add_test_case(mqtt5_operation_processing_disconnect_fail_non_qos1) add_test_case(mqtt5_operation_processing_reconnect_rejoin_session_fail_all) add_test_case(mqtt5_operation_processing_reconnect_rejoin_session_fail_qos0) # intentionally skip the non_qos1 rejoin session case, there's no meaningful test given the logic add_test_case(mqtt5_operation_processing_reconnect_no_session_fail_all) add_test_case(mqtt5_operation_processing_reconnect_no_session_fail_qos0) add_test_case(mqtt5_operation_processing_reconnect_no_session_fail_non_qos1) add_test_case(mqtt5_negotiated_settings_reset_test) add_test_case(mqtt5_negotiated_settings_apply_connack_test) add_test_case(mqtt5_negotiated_settings_server_override_test) # vli encode/decode add_test_case(mqtt5_vli_size) add_test_case(mqtt5_vli_success_round_trip) add_test_case(mqtt5_vli_encode_failures) add_test_case(mqtt5_vli_decode_failures) # packet encode/decode cycle tests add_test_case(mqtt5_packet_disconnect_round_trip) add_test_case(mqtt5_packet_pingreq_round_trip) add_test_case(mqtt5_packet_pingresp_round_trip) add_test_case(mqtt5_packet_connect_round_trip) add_test_case(mqtt5_packet_connack_round_trip) add_test_case(mqtt5_packet_subscribe_round_trip) add_test_case(mqtt5_packet_suback_round_trip) add_test_case(mqtt5_packet_unsubscribe_round_trip) add_test_case(mqtt5_packet_unsuback_round_trip) add_test_case(mqtt5_packet_publish_round_trip) add_test_case(mqtt5_packet_puback_round_trip) add_test_case(mqtt5_packet_encode_connect_no_will) add_test_case(mqtt5_packet_encode_connect_no_username) add_test_case(mqtt5_packet_encode_connect_no_password) add_test_case(mqtt5_packet_encode_connect_will_property_order) add_test_case(mqtt5_first_byte_reserved_header_check_subscribe) add_test_case(mqtt5_first_byte_reserved_header_check_unsubscribe) add_test_case(mqtt5_first_byte_reserved_header_check_disconnect) add_test_case(mqtt5_client_direct_connect_success) add_test_case(mqtt5_client_direct_connect_sync_channel_failure) add_test_case(mqtt5_client_direct_connect_async_channel_failure) add_test_case(mqtt5_client_websocket_connect_sync_channel_failure) add_test_case(mqtt5_client_websocket_connect_async_channel_failure) add_test_case(mqtt5_client_websocket_connect_handshake_failure) add_test_case(mqtt5_client_direct_connect_connack_refusal) add_test_case(mqtt5_client_direct_connect_connack_timeout) add_test_case(mqtt5_client_direct_connect_from_server_disconnect) add_test_case(mqtt5_client_subscribe_success) add_test_case(mqtt5_client_unsubscribe_success) add_test_case(mqtt5_client_sub_pub_unsub_qos0) add_test_case(mqtt5_client_sub_pub_unsub_qos1) add_test_case(mqtt5_client_ping_sequence) add_test_case(mqtt5_client_ping_timeout) add_test_case(mqtt5_client_reconnect_failure_backoff) add_test_case(mqtt5_client_reconnect_backoff_insufficient_reset) add_test_case(mqtt5_client_reconnect_backoff_sufficient_reset) add_test_case(mqtt5_client_subscribe_fail_packet_too_big) add_test_case(mqtt5_client_disconnect_fail_packet_too_big) add_test_case(mqtt5_client_flow_control_receive_maximum) add_test_case(mqtt5_client_publish_timeout) add_test_case(mqtt5_client_dynamic_operation_timeout) add_test_case(mqtt5_client_dynamic_operation_timeout_default) add_test_case(mqtt5_client_flow_control_iot_core_throughput) add_test_case(mqtt5_client_flow_control_iot_core_publish_tps) add_test_case(mqtt5_client_session_resumption_clean_start) add_test_case(mqtt5_client_session_resumption_post_success) add_test_case(mqtt5_client_session_resumption_always) add_test_case(mqtt5_client_receive_qos1_return_puback_test) add_test_case(mqtt5_client_receive_nonexisting_session_state) add_test_case(mqtt5_client_receive_assigned_client_id) add_test_case(mqtt5_client_no_session_after_client_stop) add_test_case(mqtt5_client_restore_session_on_ping_timeout_reconnect) add_test_case(mqtt5_client_discard_session_on_server_clean_start) add_test_case(mqtt5_client_statistics_subscribe) add_test_case(mqtt5_client_statistics_unsubscribe) add_test_case(mqtt5_client_statistics_publish_qos0) add_test_case(mqtt5_client_statistics_publish_qos1) add_test_case(mqtt5_client_statistics_publish_qos1_requeue) add_test_case(mqtt5_client_puback_ordering) add_test_case(mqtt5_client_listeners) add_test_case(mqtt5_client_offline_operation_submission_fail_all) add_test_case(mqtt5_client_offline_operation_submission_fail_qos0) add_test_case(mqtt5_client_offline_operation_submission_fail_non_qos1) add_test_case(mqtt5_client_offline_operation_submission_then_connect) add_test_case(mqtt5_client_inbound_alias_success) add_test_case(mqtt5_client_inbound_alias_failure_disabled) add_test_case(mqtt5_client_inbound_alias_failure_zero_id) add_test_case(mqtt5_client_inbound_alias_failure_too_large_id) add_test_case(mqtt5_client_inbound_alias_failure_unbound_id) add_test_case(mqtt5_client_outbound_alias_manual_failure_empty_topic) # a, b, c, r imply notation as the outbound resolver unit tests above add_test_case(mqtt5_client_outbound_alias_manual_success_a_b_ar_br) add_test_case(mqtt5_client_outbound_alias_lru_success_a_b_c_br_cr_a) add_test_case(rate_limiter_token_bucket_init_invalid) add_test_case(rate_limiter_token_bucket_regeneration_integral) add_test_case(rate_limiter_token_bucket_regeneration_fractional) add_test_case(rate_limiter_token_bucket_fractional_iteration) add_test_case(rate_limiter_token_bucket_large_fractional_iteration) add_test_case(rate_limiter_token_bucket_real_iteration) add_test_case(rate_limiter_token_bucket_reset) # mqtt5 to 3 adapter tests add_test_case(mqtt5to3_adapter_create_destroy) add_test_case(mqtt5to3_adapter_create_destroy_delayed) add_test_case(mqtt5to3_adapter_set_will) add_test_case(mqtt5to3_adapter_set_login) add_test_case(mqtt5to3_adapter_set_reconnect_timeout) add_test_case(mqtt5to3_adapter_connect_success) add_test_case(mqtt5to3_adapter_connect_success_disconnect_success) add_test_case(mqtt5to3_adapter_connect_success_disconnect_success_thrice) add_test_case(mqtt5to3_adapter_connect_success_connect_failure) add_test_case(mqtt5to3_adapter_connect_success_sloppy_shutdown) add_test_case(mqtt5to3_adapter_connect_bad_connectivity) add_test_case(mqtt5to3_adapter_connect_bad_connectivity_with_mqtt5_restart) add_test_case(mqtt5to3_adapter_connect_failure_connect_success_via_mqtt5) add_test_case(mqtt5to3_adapter_connect_failure_bad_config_success_good_config) add_test_case(mqtt5to3_adapter_connect_reconnect_failures) add_test_case(mqtt5to3_adapter_connect_success_disconnect_connect) add_test_case(mqtt5to3_adapter_connect_success_stop_mqtt5_disconnect_success) add_test_case(mqtt5to3_adapter_disconnect_success) add_test_case(mqtt5to3_adapter_connect_success_disconnect_success_disconnect_success) add_test_case(mqtt5to3_adapter_operation_allocation_simple) add_test_case(mqtt5to3_adapter_operation_allocation_wraparound) add_test_case(mqtt5to3_adapter_operation_allocation_exhaustion) add_test_case(mqtt5to3_adapter_publish_failure_invalid) add_test_case(mqtt5to3_adapter_publish_failure_offline_queue_policy) add_test_case(mqtt5to3_adapter_publish_success_qos0) add_test_case(mqtt5to3_adapter_publish_success_qos1) add_test_case(mqtt5to3_adapter_publish_no_ack) add_test_case(mqtt5to3_adapter_publish_interrupted) add_test_case(mqtt5to3_adapter_subscribe_single_success) add_test_case(mqtt5to3_adapter_subscribe_multi_success) add_test_case(mqtt5to3_adapter_subscribe_single_failure) add_test_case(mqtt5to3_adapter_subscribe_single_invalid) add_test_case(mqtt5to3_adapter_subscribe_multi_failure) add_test_case(mqtt5to3_adapter_subscribe_multi_invalid) add_test_case(mqtt5to3_adapter_subscribe_single_publish) add_test_case(mqtt5to3_adapter_subscribe_multi_overlapping_publish) add_test_case(mqtt5to3_adapter_unsubscribe_success) add_test_case(mqtt5to3_adapter_unsubscribe_failure) add_test_case(mqtt5to3_adapter_unsubscribe_invalid) add_test_case(mqtt5to3_adapter_unsubscribe_overlapped) add_test_case(mqtt5to3_adapter_get_stats) add_test_case(mqtt5to3_adapter_resubscribe_nothing) add_test_case(mqtt5to3_adapter_resubscribe_something) add_test_case(mqtt5to3_adapter_subscribe_single_null_suback) add_test_case(mqtt5to3_adapter_subscribe_multi_null_suback) add_test_case(mqtt5to3_adapter_operation_callbacks_after_shutdown) add_test_case(mqtt_subscription_set_add_empty_not_subbed) add_test_case(mqtt_subscription_set_add_single_path) add_test_case(mqtt_subscription_set_add_overlapped_branching_paths) add_test_case(mqtt_subscription_set_remove_overlapping_path) add_test_case(mqtt_subscription_set_remove_branching_path) add_test_case(mqtt_subscription_set_remove_invalid) add_test_case(mqtt_subscription_set_remove_empty_segments) add_test_case(mqtt_subscription_set_add_remove_repeated) add_test_case(mqtt_subscription_set_publish_single_path) add_test_case(mqtt_subscription_set_publish_multi_path) add_test_case(mqtt_subscription_set_publish_single_level_wildcards) add_test_case(mqtt_subscription_set_publish_multi_level_wildcards) add_test_case(mqtt_subscription_set_get_subscriptions) generate_test_driver(${PROJECT_NAME}-tests) set(TEST_PAHO_CLIENT_BINARY_NAME ${PROJECT_NAME}-paho-client) add_executable(${TEST_PAHO_CLIENT_BINARY_NAME} "v3-client/paho_client_test.c") target_link_libraries(${TEST_PAHO_CLIENT_BINARY_NAME} PRIVATE ${PROJECT_NAME}) aws_set_common_properties(${TEST_PAHO_CLIENT_BINARY_NAME}) aws_add_sanitizers(${TEST_PAHO_CLIENT_BINARY_NAME} ${${PROJECT_NAME}_SANITIZERS}) target_compile_definitions(${TEST_PAHO_CLIENT_BINARY_NAME} PRIVATE AWS_UNSTABLE_TESTING_API=1) target_include_directories(${TEST_PAHO_CLIENT_BINARY_NAME} PRIVATE ${CMAKE_CURRENT_LIST_DIR}) set(TEST_IOT_CLIENT_BINARY_NAME ${PROJECT_NAME}-iot-client) add_executable(${TEST_IOT_CLIENT_BINARY_NAME} "v3-client/aws_iot_client_test.c") target_link_libraries(${TEST_IOT_CLIENT_BINARY_NAME} PRIVATE ${PROJECT_NAME}) aws_set_common_properties(${TEST_IOT_CLIENT_BINARY_NAME}) aws_add_sanitizers(${TEST_IOT_CLIENT_BINARY_NAME} ${${PROJECT_NAME}_SANITIZERS}) target_compile_definitions(${TEST_IOT_CLIENT_BINARY_NAME} PRIVATE AWS_UNSTABLE_TESTING_API=1) target_include_directories(${TEST_IOT_CLIENT_BINARY_NAME} PRIVATE ${CMAKE_CURRENT_LIST_DIR}) aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/tests/shared_utils_tests.c000066400000000000000000000126231456575232400254370ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include struct utf8_example { const char *name; struct aws_byte_cursor text; }; static struct utf8_example s_valid_mqtt_utf8_examples[] = { { .name = "1 letter", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("a"), }, { .name = "Several ascii letters", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("ascii word"), }, { .name = "empty string", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(""), }, { .name = "2 byte codepoint", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\xC2\xA3"), }, { .name = "3 byte codepoint", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\xE2\x82\xAC"), }, { .name = "4 byte codepoint", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\xF0\x90\x8D\x88"), }, { .name = "A variety of different length codepoints", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL( "\xF0\x90\x8D\x88\xE2\x82\xAC\xC2\xA3\x24\xC2\xA3\xE2\x82\xAC\xF0\x90\x8D\x88"), }, { .name = "UTF8 BOM", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\xEF\xBB\xBF"), }, { .name = "UTF8 BOM plus extra", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\xEF\xBB\xBF\x24\xC2\xA3"), }, { .name = "First possible 3 byte codepoint", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\xE0\xA0\x80"), }, { .name = "First possible 4 byte codepoint", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\xF0\x90\x80\x80"), }, { .name = "Last possible 2 byte codepoint", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\xDF\xBF"), }, { .name = "Last valid codepoint before prohibited range U+D800 - U+DFFF", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\xED\x9F\xBF"), }, { .name = "Next valid codepoint after prohibited range U+D800 - U+DFFF", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\xEE\x80\x80"), }, { .name = "Boundary condition", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\xEF\xBF\xBD"), }, { .name = "Boundary condition", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\xF4\x90\x80\x80"), }, }; static struct utf8_example s_illegal_mqtt_utf8_examples[] = { { .name = "non character U+0000", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\x00"), }, { .name = "Codepoint in prohibited range U+0001 - U+001F (in the middle)", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\x04"), }, { .name = "Codepoint in prohibited range U+0001 - U+001F (boundary)", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\x1F"), }, { .name = "Codepoint in prohibited range U+007F - U+009F (min: U+7F)", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\x7F"), }, { .name = "Codepoint in prohibited range U+007F - U+009F (in the middle u+8F)", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\xC2\x8F"), }, { .name = "Codepoint in prohibited range U+007F - U+009F (boundary U+9F)", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\xC2\x9F"), }, { .name = "non character end with U+FFFF", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\xEF\xBF\xBF"), }, { .name = "non character end with U+FFFE", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\xF7\xBF\xBF\xBE"), }, { .name = "non character in U+FDD0 - U+FDEF (lower bound)", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\xEF\xB7\x90"), }, { .name = "non character in U+FDD0 - U+FDEF (in middle)", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\xEF\xB7\xA1"), }, { .name = "non character in U+FDD0 - U+FDEF (upper bound)", .text = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\xEF\xB7\xAF"), }}; static int s_mqtt_utf8_encoded_string_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; /* Check the valid test cases */ for (size_t i = 0; i < AWS_ARRAY_SIZE(s_valid_mqtt_utf8_examples); ++i) { struct utf8_example example = s_valid_mqtt_utf8_examples[i]; printf("valid example [%zu]: %s\n", i, example.name); ASSERT_SUCCESS(aws_mqtt_validate_utf8_text(example.text)); } /* Glue all the valid test cases together, they ought to pass */ struct aws_byte_buf all_good_text; aws_byte_buf_init(&all_good_text, allocator, 1024); for (size_t i = 0; i < AWS_ARRAY_SIZE(s_valid_mqtt_utf8_examples); ++i) { aws_byte_buf_append_dynamic(&all_good_text, &s_valid_mqtt_utf8_examples[i].text); } ASSERT_SUCCESS(aws_mqtt_validate_utf8_text(aws_byte_cursor_from_buf(&all_good_text))); aws_byte_buf_clean_up(&all_good_text); /* Check the illegal test cases */ for (size_t i = 0; i < AWS_ARRAY_SIZE(s_illegal_mqtt_utf8_examples); ++i) { struct utf8_example example = s_illegal_mqtt_utf8_examples[i]; printf("illegal example [%zu]: %s\n", i, example.name); ASSERT_FAILS(aws_mqtt_validate_utf8_text(example.text)); } return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt_utf8_encoded_string_test, s_mqtt_utf8_encoded_string_test)aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/tests/v3-client/000077500000000000000000000000001456575232400231635ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/tests/v3-client/aws_iot_client_test.c000066400000000000000000000332001456575232400273670ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef WIN32 # include # define sleep Sleep #else # include #endif const char s_client_id_prefix[] = "sdk-c-v2-"; AWS_STATIC_STRING_FROM_LITERAL(s_subscribe_topic, "sdk/test/c"); enum { PUBLISHES = 20 }; enum { PAYLOAD_LEN = 200 }; static uint8_t s_payload[PAYLOAD_LEN]; static uint8_t s_will_payload[] = "The client has gone offline!"; enum { WILL_PAYLOAD_LEN = sizeof(s_will_payload) }; struct test_context { struct aws_allocator *allocator; struct aws_mutex lock; struct aws_condition_variable signal; struct aws_logger logger; struct aws_tls_ctx *tls_ctx; struct aws_event_loop_group *el_group; struct aws_host_resolver *resolver; struct aws_client_bootstrap *bootstrap; struct aws_mqtt_client *client; struct aws_mqtt_client_connection *connection; struct aws_tls_connection_options tls_connection_options; size_t pubacks_gotten; size_t packets_gotten; bool received_on_connection_complete; bool received_on_suback; bool received_on_disconnect; }; static void s_on_puback( struct aws_mqtt_client_connection *connection, uint16_t packet_id, int error_code, void *userdata) { (void)connection; (void)packet_id; (void)error_code; AWS_FATAL_ASSERT(error_code == AWS_ERROR_SUCCESS); struct test_context *tester = userdata; aws_mutex_lock(&tester->lock); ++tester->pubacks_gotten; aws_mutex_unlock(&tester->lock); } static void s_on_packet_received( struct aws_mqtt_client_connection *connection, const struct aws_byte_cursor *topic, const struct aws_byte_cursor *payload, bool dup, enum aws_mqtt_qos qos, bool retain, void *userdata) { (void)connection; (void)topic; (void)dup; (void)qos; (void)retain; AWS_FATAL_ASSERT(payload->len == PAYLOAD_LEN); AWS_FATAL_ASSERT(0 == memcmp(payload->ptr, s_payload, PAYLOAD_LEN)); bool notify = false; struct test_context *tester = userdata; aws_mutex_lock(&tester->lock); ++tester->packets_gotten; if (tester->packets_gotten == PUBLISHES) { notify = true; } aws_mutex_unlock(&tester->lock); if (notify) { aws_condition_variable_notify_one(&tester->signal); } } static bool s_all_packets_received_cond(void *userdata) { struct test_context *tester = userdata; return tester->packets_gotten == PUBLISHES; } static void s_mqtt_on_connection_complete( struct aws_mqtt_client_connection *connection, int error_code, enum aws_mqtt_connect_return_code return_code, bool session_present, void *userdata) { (void)connection; (void)error_code; (void)return_code; (void)session_present; AWS_FATAL_ASSERT(error_code == AWS_ERROR_SUCCESS); AWS_FATAL_ASSERT(return_code == AWS_MQTT_CONNECT_ACCEPTED); AWS_FATAL_ASSERT(session_present == false); struct test_context *tester = userdata; aws_mutex_lock(&tester->lock); tester->received_on_connection_complete = true; aws_mutex_unlock(&tester->lock); aws_condition_variable_notify_one(&tester->signal); } static void s_mqtt_on_suback( struct aws_mqtt_client_connection *connection, uint16_t packet_id, const struct aws_byte_cursor *topic, enum aws_mqtt_qos qos, int error_code, void *userdata) { (void)connection; (void)packet_id; (void)topic; (void)qos; (void)error_code; AWS_FATAL_ASSERT(error_code == AWS_ERROR_SUCCESS); AWS_FATAL_ASSERT(qos != AWS_MQTT_QOS_FAILURE); struct test_context *tester = userdata; aws_mutex_lock(&tester->lock); tester->received_on_suback = true; aws_mutex_unlock(&tester->lock); aws_condition_variable_notify_one(&tester->signal); } static void s_on_connection_interrupted(struct aws_mqtt_client_connection *connection, int error_code, void *userdata) { (void)connection; (void)userdata; printf("CONNECTION INTERRUPTED error_code=%d\n", error_code); } static void s_on_resubscribed( struct aws_mqtt_client_connection *connection, uint16_t packet_id, const struct aws_array_list *topic_subacks, /* contains aws_mqtt_topic_subscription pointers */ int error_code, void *userdata) { (void)connection; (void)packet_id; (void)userdata; AWS_FATAL_ASSERT(error_code == AWS_ERROR_SUCCESS); size_t num_topics = aws_array_list_length(topic_subacks); printf("RESUBSCRIBE_COMPLETE. error_code=%d num_topics=%zu\n", error_code, num_topics); for (size_t i = 0; i < num_topics; ++i) { struct aws_mqtt_topic_subscription sub_i; aws_array_list_get_at(topic_subacks, &sub_i, i); printf(" topic=" PRInSTR " qos=%d\n", AWS_BYTE_CURSOR_PRI(sub_i.topic), sub_i.qos); AWS_FATAL_ASSERT(sub_i.qos != AWS_MQTT_QOS_FAILURE); } } static void s_on_connection_resumed( struct aws_mqtt_client_connection *connection, enum aws_mqtt_connect_return_code return_code, bool session_present, void *userdata) { (void)connection; (void)userdata; printf("CONNECTION RESUMED return_code=%d session_present=%d\n", return_code, session_present); if (!session_present) { printf("RESUBSCRIBING..."); uint16_t packet_id = aws_mqtt_resubscribe_existing_topics(connection, s_on_resubscribed, NULL); AWS_FATAL_ASSERT(packet_id); } } static void s_mqtt_on_disconnect(struct aws_mqtt_client_connection *connection, void *userdata) { (void)connection; struct test_context *tester = userdata; aws_mutex_lock(&tester->lock); tester->received_on_disconnect = true; aws_mutex_unlock(&tester->lock); aws_condition_variable_notify_one(&tester->signal); } static void s_wait_on_tester_predicate(struct test_context *tester, bool (*predicate)(void *)) { aws_mutex_lock(&tester->lock); aws_condition_variable_wait_pred(&tester->signal, &tester->lock, predicate, tester); aws_mutex_unlock(&tester->lock); } static bool s_received_on_disconnect_pred(void *user_data) { struct test_context *tester = user_data; return tester->received_on_disconnect; } static bool s_received_on_suback_pred(void *user_data) { struct test_context *tester = user_data; return tester->received_on_suback; } static bool s_connection_complete_pred(void *user_data) { struct test_context *tester = user_data; return tester->received_on_connection_complete; } int s_initialize_test( struct test_context *tester, struct aws_allocator *allocator, const char *cert, const char *private_key, const char *root_ca, const char *endpoint) { aws_mqtt_library_init(allocator); struct aws_logger_standard_options logger_options = { .level = AWS_LL_TRACE, .file = stdout, }; aws_logger_init_standard(&tester->logger, allocator, &logger_options); aws_logger_set(&tester->logger); tester->allocator = allocator; aws_mutex_init(&tester->lock); aws_condition_variable_init(&tester->signal); tester->el_group = aws_event_loop_group_new_default(allocator, 1, NULL); struct aws_host_resolver_default_options resolver_options = { .el_group = tester->el_group, .max_entries = 8, }; tester->resolver = aws_host_resolver_new_default(allocator, &resolver_options); struct aws_client_bootstrap_options bootstrap_options = { .event_loop_group = tester->el_group, .host_resolver = tester->resolver, }; tester->bootstrap = aws_client_bootstrap_new(allocator, &bootstrap_options); struct aws_tls_ctx_options tls_ctx_opt; AWS_FATAL_ASSERT( AWS_OP_SUCCESS == aws_tls_ctx_options_init_client_mtls_from_path(&tls_ctx_opt, allocator, cert, private_key)); AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_tls_ctx_options_set_alpn_list(&tls_ctx_opt, "x-amzn-mqtt-ca")); AWS_FATAL_ASSERT( AWS_OP_SUCCESS == aws_tls_ctx_options_override_default_trust_store_from_path(&tls_ctx_opt, NULL, root_ca)); tester->tls_ctx = aws_tls_client_ctx_new(allocator, &tls_ctx_opt); AWS_FATAL_ASSERT(tester->tls_ctx != NULL); aws_tls_ctx_options_clean_up(&tls_ctx_opt); aws_tls_connection_options_init_from_ctx(&tester->tls_connection_options, tester->tls_ctx); tester->client = aws_mqtt_client_new(allocator, tester->bootstrap); tester->connection = aws_mqtt_client_connection_new(tester->client); struct aws_socket_options socket_options; AWS_ZERO_STRUCT(socket_options); socket_options.connect_timeout_ms = 3000; socket_options.type = AWS_SOCKET_STREAM; socket_options.domain = AWS_SOCKET_IPV6; struct aws_byte_cursor host_name_cur = aws_byte_cursor_from_c_str(endpoint); /* Generate a random clientid */ char client_id[128]; struct aws_byte_buf client_id_buf = aws_byte_buf_from_empty_array(client_id, AWS_ARRAY_SIZE(client_id)); aws_byte_buf_write(&client_id_buf, (const uint8_t *)s_client_id_prefix, AWS_ARRAY_SIZE(s_client_id_prefix)); struct aws_uuid uuid; aws_uuid_init(&uuid); aws_uuid_to_str(&uuid, &client_id_buf); struct aws_byte_cursor client_id_cur = aws_byte_cursor_from_buf(&client_id_buf); struct aws_mqtt_connection_options conn_options = { .host_name = host_name_cur, .port = 8883, .socket_options = &socket_options, .tls_options = &tester->tls_connection_options, .client_id = client_id_cur, .keep_alive_time_secs = 0, .ping_timeout_ms = 0, .on_connection_complete = s_mqtt_on_connection_complete, .user_data = tester, .clean_session = true, }; AWS_FATAL_ASSERT( AWS_OP_SUCCESS == aws_mqtt_client_connection_set_connection_interruption_handlers( tester->connection, s_on_connection_interrupted, NULL, s_on_connection_resumed, NULL)); struct aws_byte_cursor subscribe_topic_cur = aws_byte_cursor_from_string(s_subscribe_topic); struct aws_byte_cursor will_cur = aws_byte_cursor_from_array(s_will_payload, WILL_PAYLOAD_LEN); aws_mqtt_client_connection_set_will(tester->connection, &subscribe_topic_cur, 1, false, &will_cur); aws_mqtt_client_connection_connect(tester->connection, &conn_options); return AWS_OP_SUCCESS; } static void s_cleanup_test(struct test_context *tester) { aws_tls_connection_options_clean_up(&tester->tls_connection_options); aws_mqtt_client_connection_release(tester->connection); aws_mqtt_client_release(tester->client); aws_tls_ctx_release(tester->tls_ctx); aws_client_bootstrap_release(tester->bootstrap); aws_host_resolver_release(tester->resolver); aws_event_loop_group_release(tester->el_group); aws_thread_join_all_managed(); aws_logger_clean_up(&tester->logger); aws_mutex_clean_up(&tester->lock); aws_condition_variable_clean_up(&tester->signal); aws_mqtt_library_clean_up(); } int main(int argc, char **argv) { if (argc < 5) { printf( "4 args required, only %d passed. Usage:\n" "aws-c-mqtt-iot-client [endpoint] [certificate] [private_key] [root_ca]\n", argc - 1); return 1; } const char *endpoint = argv[1]; const char *cert = argv[2]; const char *private_key = argv[3]; const char *root_ca = argv[4]; struct aws_allocator *allocator = aws_mem_tracer_new(aws_default_allocator(), NULL, AWS_MEMTRACE_BYTES, 0); struct test_context tester; AWS_ZERO_STRUCT(tester); AWS_FATAL_ASSERT(s_initialize_test(&tester, allocator, cert, private_key, root_ca, endpoint) == AWS_OP_SUCCESS); struct aws_byte_cursor subscribe_topic_cur = aws_byte_cursor_from_string(s_subscribe_topic); s_wait_on_tester_predicate(&tester, s_connection_complete_pred); aws_mqtt_client_connection_subscribe( tester.connection, &subscribe_topic_cur, AWS_MQTT_QOS_AT_LEAST_ONCE, &s_on_packet_received, &tester, NULL, s_mqtt_on_suback, &tester); s_wait_on_tester_predicate(&tester, s_received_on_suback_pred); /* Populate the payload */ struct aws_byte_buf payload_buf = aws_byte_buf_from_empty_array(s_payload, PAYLOAD_LEN); aws_device_random_buffer(&payload_buf); struct aws_byte_cursor payload_cur = aws_byte_cursor_from_buf(&payload_buf); for (int i = 0; i < PUBLISHES; ++i) { aws_mqtt_client_connection_publish( tester.connection, &subscribe_topic_cur, AWS_MQTT_QOS_AT_LEAST_ONCE, false, &payload_cur, &s_on_puback, &tester); /* Keep the service endpoint from throttling the connection */ if (i != 0 && i % 100 == 0) { sleep(1); } } s_wait_on_tester_predicate(&tester, s_all_packets_received_cond); AWS_FATAL_ASSERT(PUBLISHES == tester.packets_gotten); aws_mqtt_client_connection_disconnect(tester.connection, s_mqtt_on_disconnect, &tester); s_wait_on_tester_predicate(&tester, s_received_on_disconnect_pred); s_cleanup_test(&tester); AWS_FATAL_ASSERT(0 == aws_mem_tracer_count(allocator)); allocator = aws_mem_tracer_destroy(allocator); return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/tests/v3-client/paho_client_test.c000066400000000000000000000276271456575232400266710ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef WIN32 # include # define sleep Sleep #else # include #endif /* Note: to successfully run this test, a broker on localhost:1883 is required, eg: mosquitto */ static struct aws_byte_cursor s_client_id = { .ptr = (uint8_t *)"MyClientId1", .len = 11, }; AWS_STATIC_STRING_FROM_LITERAL(s_subscribe_topic, "a/b"); AWS_STATIC_STRING_FROM_LITERAL(s_hostname, "localhost"); enum { PAYLOAD_LEN = 20000 }; static uint8_t s_payload[PAYLOAD_LEN]; struct test_context { struct aws_allocator *allocator; struct aws_mutex lock; struct aws_condition_variable signal; struct aws_event_loop_group *el_group; struct aws_host_resolver *resolver; struct aws_client_bootstrap *bootstrap; struct aws_mqtt_client *client; struct aws_mqtt_client_connection *connection; bool retained_packet_received; bool on_any_publish_fired; bool connection_complete; bool received_pub_ack; bool received_unsub_ack; bool on_disconnect_received; }; static void s_on_packet_received( struct aws_mqtt_client_connection *connection, const struct aws_byte_cursor *topic, const struct aws_byte_cursor *payload, bool dup, enum aws_mqtt_qos qos, bool retain, void *user_data) { (void)connection; (void)topic; (void)dup; (void)qos; (void)retain; struct aws_byte_cursor expected_payload = { .ptr = s_payload, .len = PAYLOAD_LEN, }; (void)expected_payload; AWS_FATAL_ASSERT(aws_byte_cursor_eq(payload, &expected_payload)); printf("2 started\n"); struct test_context *tester = user_data; aws_mutex_lock(&tester->lock); tester->retained_packet_received = true; aws_mutex_unlock(&tester->lock); aws_condition_variable_notify_one(&tester->signal); } static void s_on_any_packet_received( struct aws_mqtt_client_connection *connection, const struct aws_byte_cursor *topic, const struct aws_byte_cursor *payload, bool dup, enum aws_mqtt_qos qos, bool retain, void *user_data) { (void)connection; (void)topic; (void)dup; (void)qos; (void)retain; struct aws_byte_cursor expected_payload = { .ptr = s_payload, .len = PAYLOAD_LEN, }; (void)expected_payload; AWS_FATAL_ASSERT(aws_byte_cursor_eq(payload, &expected_payload)); struct test_context *tester = user_data; aws_mutex_lock(&tester->lock); tester->on_any_publish_fired = true; aws_mutex_unlock(&tester->lock); aws_condition_variable_notify_one(&tester->signal); } static void s_mqtt_on_puback( struct aws_mqtt_client_connection *connection, uint16_t packet_id, int error_code, void *userdata) { (void)connection; (void)packet_id; (void)error_code; AWS_FATAL_ASSERT(error_code == AWS_OP_SUCCESS); struct test_context *tester = userdata; printf("2 started\n"); aws_mutex_lock(&tester->lock); tester->received_pub_ack = true; aws_mutex_unlock(&tester->lock); aws_condition_variable_notify_one(&tester->signal); } static void s_mqtt_on_connection_complete( struct aws_mqtt_client_connection *connection, int error_code, enum aws_mqtt_connect_return_code return_code, bool session_present, void *user_data) { (void)connection; (void)error_code; (void)return_code; (void)session_present; AWS_FATAL_ASSERT(error_code == AWS_OP_SUCCESS); AWS_FATAL_ASSERT(return_code == AWS_MQTT_CONNECT_ACCEPTED); AWS_FATAL_ASSERT(session_present == false); struct test_context *tester = user_data; printf("1 started\n"); aws_mutex_lock(&tester->lock); tester->connection_complete = true; aws_mutex_unlock(&tester->lock); aws_condition_variable_notify_one(&tester->signal); } static void s_mqtt_on_interrupted(struct aws_mqtt_client_connection *connection, int error_code, void *userdata) { (void)connection; (void)error_code; (void)userdata; printf("Connection offline\n"); } static void s_mqtt_on_resumed( struct aws_mqtt_client_connection *connection, enum aws_mqtt_connect_return_code return_code, bool session_present, void *userdata) { (void)connection; (void)return_code; (void)session_present; (void)userdata; printf("Connection resumed\n"); } static void s_mqtt_on_unsuback( struct aws_mqtt_client_connection *connection, uint16_t packet_id, int error_code, void *userdata) { (void)connection; (void)packet_id; (void)error_code; AWS_FATAL_ASSERT(error_code == AWS_OP_SUCCESS); struct test_context *tester = userdata; printf("2 started\n"); aws_mutex_lock(&tester->lock); tester->received_unsub_ack = true; aws_mutex_unlock(&tester->lock); aws_condition_variable_notify_one(&tester->signal); } static void s_mqtt_on_disconnect(struct aws_mqtt_client_connection *connection, void *user_data) { (void)connection; struct test_context *tester = user_data; printf("3 started\n"); aws_mutex_lock(&tester->lock); tester->on_disconnect_received = true; aws_mutex_unlock(&tester->lock); aws_condition_variable_notify_one(&tester->signal); } static bool s_is_connection_complete_pred(void *user_data) { struct test_context *tester = user_data; return tester->connection_complete; } static bool s_received_pub_ack_pred(void *user_data) { struct test_context *tester = user_data; return tester->received_pub_ack; } static bool s_on_disconnect_received(void *user_data) { struct test_context *tester = user_data; return tester->on_disconnect_received; } static bool s_retained_and_any_publish_pred(void *user_data) { struct test_context *tester = user_data; return tester->retained_packet_received && tester->on_any_publish_fired; } static bool s_received_unsub_ack_pred(void *user_data) { struct test_context *tester = user_data; return tester->received_unsub_ack; } static void s_wait_on_tester_predicate(struct test_context *tester, bool (*predicate)(void *)) { aws_mutex_lock(&tester->lock); aws_condition_variable_wait_pred(&tester->signal, &tester->lock, predicate, tester); aws_mutex_unlock(&tester->lock); } static int s_initialize_test( struct test_context *tester, struct aws_allocator *allocator, struct aws_mqtt_connection_options *conn_options) { aws_mqtt_library_init(allocator); aws_mutex_init(&tester->lock); aws_condition_variable_init(&tester->signal); tester->el_group = aws_event_loop_group_new_default(allocator, 1, NULL); struct aws_host_resolver_default_options resolver_options = { .el_group = tester->el_group, .max_entries = 8, }; tester->resolver = aws_host_resolver_new_default(allocator, &resolver_options); struct aws_client_bootstrap_options bootstrap_options = { .event_loop_group = tester->el_group, .host_resolver = tester->resolver, }; tester->bootstrap = aws_client_bootstrap_new(allocator, &bootstrap_options); tester->client = aws_mqtt_client_new(allocator, tester->bootstrap); tester->connection = aws_mqtt_client_connection_new(tester->client); aws_mqtt_client_connection_set_connection_interruption_handlers( tester->connection, s_mqtt_on_interrupted, NULL, s_mqtt_on_resumed, NULL); aws_mqtt_client_connection_set_on_any_publish_handler(tester->connection, s_on_any_packet_received, tester); AWS_FATAL_ASSERT(aws_mqtt_client_connection_connect(tester->connection, conn_options) == AWS_OP_SUCCESS); return AWS_OP_SUCCESS; } static void s_cleanup_test(struct test_context *tester) { aws_mqtt_client_connection_release(tester->connection); aws_mqtt_client_release(tester->client); aws_client_bootstrap_release(tester->bootstrap); aws_host_resolver_release(tester->resolver); aws_event_loop_group_release(tester->el_group); aws_thread_join_all_managed(); aws_mutex_clean_up(&tester->lock); aws_condition_variable_clean_up(&tester->signal); aws_mqtt_library_clean_up(); } int main(int argc, char **argv) { (void)argc; (void)argv; struct aws_allocator *allocator = aws_mem_tracer_new(aws_default_allocator(), NULL, AWS_MEMTRACE_BYTES, 0); struct test_context tester; AWS_ZERO_STRUCT(tester); struct aws_byte_cursor host_name_cur = aws_byte_cursor_from_string(s_hostname); struct aws_socket_options options; AWS_ZERO_STRUCT(options); options.connect_timeout_ms = 3000; options.type = AWS_SOCKET_STREAM; options.domain = AWS_SOCKET_IPV4; struct aws_mqtt_connection_options conn_options = { .host_name = host_name_cur, .port = 1883, .socket_options = &options, .tls_options = NULL, .client_id = s_client_id, .keep_alive_time_secs = 0, .ping_timeout_ms = 0, .on_connection_complete = s_mqtt_on_connection_complete, .user_data = &tester, .clean_session = true, }; AWS_FATAL_ASSERT(AWS_OP_SUCCESS == s_initialize_test(&tester, allocator, &conn_options)); /* Wait for connack */ s_wait_on_tester_predicate(&tester, s_is_connection_complete_pred); printf("1 done\n"); struct aws_byte_cursor subscribe_topic_cur = aws_byte_cursor_from_string(s_subscribe_topic); /* Populate the payload */ struct aws_byte_cursor payload_cur = aws_byte_cursor_from_array(s_payload, PAYLOAD_LEN); aws_mqtt_client_connection_publish( tester.connection, &subscribe_topic_cur, AWS_MQTT_QOS_EXACTLY_ONCE, true, &payload_cur, &s_mqtt_on_puback, &tester); /* Wait for puback */ s_wait_on_tester_predicate(&tester, s_received_pub_ack_pred); printf("2 done\n"); aws_mqtt_client_connection_disconnect(tester.connection, s_mqtt_on_disconnect, &tester); /* Wait for disconnack */ s_wait_on_tester_predicate(&tester, s_on_disconnect_received); printf("3 done\n"); aws_mutex_lock(&tester.lock); tester.connection_complete = false; tester.on_disconnect_received = false; aws_mutex_unlock(&tester.lock); AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_mqtt_client_connection_connect(tester.connection, &conn_options)); /* Wait for connack */ s_wait_on_tester_predicate(&tester, s_is_connection_complete_pred); printf("1 done\n"); /* Subscribe (no on_suback, the on_message received will trigger the cv) */ aws_mqtt_client_connection_subscribe( tester.connection, &subscribe_topic_cur, AWS_MQTT_QOS_EXACTLY_ONCE, &s_on_packet_received, &tester, NULL, NULL, NULL); /* Wait for PUBLISH */ s_wait_on_tester_predicate(&tester, s_retained_and_any_publish_pred); printf("2 done\n"); struct aws_byte_cursor topic_filter = aws_byte_cursor_from_array(aws_string_bytes(s_subscribe_topic), s_subscribe_topic->len); aws_mqtt_client_connection_unsubscribe(tester.connection, &topic_filter, &s_mqtt_on_unsuback, &tester); /* Wait for UNSUBACK */ s_wait_on_tester_predicate(&tester, s_received_unsub_ack_pred); printf("3 done\n"); sleep(4); aws_mqtt_client_connection_disconnect(tester.connection, s_mqtt_on_disconnect, &tester); s_wait_on_tester_predicate(&tester, s_on_disconnect_received); s_cleanup_test(&tester); AWS_FATAL_ASSERT(0 == aws_mem_tracer_count(allocator)); allocator = aws_mem_tracer_destroy(allocator); return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/tests/v3/000077500000000000000000000000001456575232400217075ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/tests/v3/connection_state_test.c000066400000000000000000005467101456575232400264660ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "mqtt_mock_server_handler.h" #include #include #include #include #include #include #include #include static const int TEST_LOG_SUBJECT = 60000; static const int ONE_SEC = 1000000000; // The value is extract from aws-c-mqtt/source/client.c static const int AWS_RESET_RECONNECT_BACKOFF_DELAY_SECONDS = 10; static const uint64_t RECONNECT_BACKOFF_DELAY_ERROR_MARGIN_NANO_SECONDS = 500000000; #define DEFAULT_MIN_RECONNECT_DELAY_SECONDS 1 #define DEFAULT_TEST_PING_TIMEOUT_MS 1000 #define DEFAULT_TEST_KEEP_ALIVE_S 2 struct received_publish_packet { struct aws_byte_buf topic; struct aws_byte_buf payload; bool dup; enum aws_mqtt_qos qos; bool retain; }; struct mqtt_connection_state_test { struct aws_allocator *allocator; struct aws_channel *server_channel; struct aws_channel_handler *mock_server; struct aws_client_bootstrap *client_bootstrap; struct aws_server_bootstrap *server_bootstrap; struct aws_event_loop_group *el_group; struct aws_host_resolver *host_resolver; struct aws_socket_endpoint endpoint; struct aws_socket *listener; struct aws_mqtt_client *mqtt_client; struct aws_mqtt_client_connection *mqtt_connection; struct aws_socket_options socket_options; bool session_present; bool connection_completed; bool connection_success; bool connection_failure; bool client_disconnect_completed; bool server_disconnect_completed; bool connection_interrupted; bool connection_resumed; bool subscribe_completed; bool listener_destroyed; bool connection_terminated; int interruption_error; int subscribe_complete_error; int op_complete_error; enum aws_mqtt_connect_return_code mqtt_return_code; int error; struct aws_condition_variable cvar; struct aws_mutex lock; /* any published messages from mock server, that you may not subscribe to. (Which should not happen in real life) */ struct aws_array_list any_published_messages; /* list of struct received_publish_packet */ size_t any_publishes_received; size_t expected_any_publishes; /* the published messages from mock server, that you did subscribe to. */ struct aws_array_list published_messages; /* list of struct received_publish_packet */ size_t publishes_received; size_t expected_publishes; /* The returned QoS from mock server */ struct aws_array_list qos_returned; /* list of uint_8 */ size_t ops_completed; size_t expected_ops_completed; size_t connection_close_calls; /* All of the times on_connection_closed has been called */ size_t connection_termination_calls; /* How many times on_connection_termination has been called, should be 1 */ }; static struct mqtt_connection_state_test test_data = {0}; static void s_on_any_publish_received( struct aws_mqtt_client_connection *connection, const struct aws_byte_cursor *topic, const struct aws_byte_cursor *payload, bool dup, enum aws_mqtt_qos qos, bool retain, void *userdata); static void s_on_incoming_channel_setup_fn( struct aws_server_bootstrap *bootstrap, int error_code, struct aws_channel *channel, void *user_data) { (void)bootstrap; struct mqtt_connection_state_test *state_test_data = user_data; state_test_data->error = error_code; if (!error_code) { aws_mutex_lock(&state_test_data->lock); state_test_data->server_disconnect_completed = false; aws_mutex_unlock(&state_test_data->lock); AWS_LOGF_DEBUG(TEST_LOG_SUBJECT, "server channel setup completed"); state_test_data->server_channel = channel; struct aws_channel_slot *test_handler_slot = aws_channel_slot_new(channel); aws_channel_slot_insert_end(channel, test_handler_slot); mqtt_mock_server_handler_update_slot(state_test_data->mock_server, test_handler_slot); aws_channel_slot_set_handler(test_handler_slot, state_test_data->mock_server); } } static void s_on_incoming_channel_shutdown_fn( struct aws_server_bootstrap *bootstrap, int error_code, struct aws_channel *channel, void *user_data) { (void)bootstrap; (void)error_code; (void)channel; struct mqtt_connection_state_test *state_test_data = user_data; aws_mutex_lock(&state_test_data->lock); state_test_data->server_disconnect_completed = true; AWS_LOGF_DEBUG(TEST_LOG_SUBJECT, "server channel shutdown completed"); aws_mutex_unlock(&state_test_data->lock); aws_condition_variable_notify_one(&state_test_data->cvar); } static void s_on_listener_destroy(struct aws_server_bootstrap *bootstrap, void *user_data) { (void)bootstrap; struct mqtt_connection_state_test *state_test_data = user_data; aws_mutex_lock(&state_test_data->lock); state_test_data->listener_destroyed = true; aws_mutex_unlock(&state_test_data->lock); aws_condition_variable_notify_one(&state_test_data->cvar); } static bool s_is_listener_destroyed(void *arg) { struct mqtt_connection_state_test *state_test_data = arg; return state_test_data->listener_destroyed; } static void s_wait_on_listener_cleanup(struct mqtt_connection_state_test *state_test_data) { aws_mutex_lock(&state_test_data->lock); aws_condition_variable_wait_pred( &state_test_data->cvar, &state_test_data->lock, s_is_listener_destroyed, state_test_data); aws_mutex_unlock(&state_test_data->lock); } static void s_on_connection_interrupted(struct aws_mqtt_client_connection *connection, int error_code, void *userdata) { (void)connection; (void)error_code; struct mqtt_connection_state_test *state_test_data = userdata; aws_mutex_lock(&state_test_data->lock); state_test_data->connection_interrupted = true; state_test_data->interruption_error = error_code; aws_mutex_unlock(&state_test_data->lock); AWS_LOGF_DEBUG(TEST_LOG_SUBJECT, "connection interrupted"); aws_condition_variable_notify_one(&state_test_data->cvar); } static bool s_is_connection_interrupted(void *arg) { struct mqtt_connection_state_test *state_test_data = arg; return state_test_data->connection_interrupted; } static void s_wait_for_interrupt_to_complete(struct mqtt_connection_state_test *state_test_data) { aws_mutex_lock(&state_test_data->lock); aws_condition_variable_wait_pred( &state_test_data->cvar, &state_test_data->lock, s_is_connection_interrupted, state_test_data); state_test_data->connection_interrupted = false; aws_mutex_unlock(&state_test_data->lock); } static void s_on_connection_resumed( struct aws_mqtt_client_connection *connection, enum aws_mqtt_connect_return_code return_code, bool session_present, void *userdata) { (void)connection; (void)return_code; (void)session_present; AWS_LOGF_DEBUG(TEST_LOG_SUBJECT, "reconnect completed"); struct mqtt_connection_state_test *state_test_data = userdata; aws_mutex_lock(&state_test_data->lock); state_test_data->connection_resumed = true; aws_mutex_unlock(&state_test_data->lock); aws_condition_variable_notify_one(&state_test_data->cvar); } static bool s_is_connection_resumed(void *arg) { struct mqtt_connection_state_test *state_test_data = arg; return state_test_data->connection_resumed; } static void s_wait_for_reconnect_to_complete(struct mqtt_connection_state_test *state_test_data) { aws_mutex_lock(&state_test_data->lock); aws_condition_variable_wait_pred( &state_test_data->cvar, &state_test_data->lock, s_is_connection_resumed, state_test_data); state_test_data->connection_resumed = false; aws_mutex_unlock(&state_test_data->lock); } static void s_on_connection_success( struct aws_mqtt_client_connection *connection, enum aws_mqtt_connect_return_code return_code, bool session_present, void *userdata) { (void)connection; struct mqtt_connection_state_test *state_test_data = userdata; aws_mutex_lock(&state_test_data->lock); state_test_data->session_present = session_present; state_test_data->mqtt_return_code = return_code; state_test_data->connection_success = true; aws_mutex_unlock(&state_test_data->lock); aws_condition_variable_notify_one(&state_test_data->cvar); } static void s_on_connection_failure(struct aws_mqtt_client_connection *connection, int error_code, void *userdata) { (void)connection; struct mqtt_connection_state_test *state_test_data = userdata; aws_mutex_lock(&state_test_data->lock); state_test_data->error = error_code; state_test_data->connection_failure = true; aws_mutex_unlock(&state_test_data->lock); aws_condition_variable_notify_one(&state_test_data->cvar); } static bool s_is_connection_succeed(void *arg) { struct mqtt_connection_state_test *state_test_data = arg; return state_test_data->connection_success; } static bool s_is_connection_failed(void *arg) { struct mqtt_connection_state_test *state_test_data = arg; return state_test_data->connection_failure; } static void s_wait_for_connection_to_succeed(struct mqtt_connection_state_test *state_test_data) { aws_mutex_lock(&state_test_data->lock); aws_condition_variable_wait_pred( &state_test_data->cvar, &state_test_data->lock, s_is_connection_succeed, state_test_data); state_test_data->connection_success = false; aws_mutex_unlock(&state_test_data->lock); } static void s_wait_for_connection_to_fail(struct mqtt_connection_state_test *state_test_data) { aws_mutex_lock(&state_test_data->lock); aws_condition_variable_wait_pred( &state_test_data->cvar, &state_test_data->lock, s_is_connection_failed, state_test_data); state_test_data->connection_failure = false; aws_mutex_unlock(&state_test_data->lock); } static bool s_is_termination_completed(void *arg) { struct mqtt_connection_state_test *state_test_data = arg; return state_test_data->connection_terminated; } static void s_wait_for_termination_to_complete(struct mqtt_connection_state_test *state_test_data) { aws_mutex_lock(&state_test_data->lock); aws_condition_variable_wait_pred( &state_test_data->cvar, &state_test_data->lock, s_is_termination_completed, state_test_data); state_test_data->connection_terminated = false; aws_mutex_unlock(&state_test_data->lock); } static void s_on_connection_termination_fn(void *userdata) { struct mqtt_connection_state_test *state_test_data = (struct mqtt_connection_state_test *)userdata; aws_mutex_lock(&state_test_data->lock); state_test_data->connection_termination_calls += 1; state_test_data->connection_terminated = true; aws_mutex_unlock(&state_test_data->lock); aws_condition_variable_notify_one(&state_test_data->cvar); } /** sets up a unix domain socket server and socket options. Creates an mqtt connection configured to use * the domain socket. */ static int s_setup_mqtt_server_fn(struct aws_allocator *allocator, void *ctx) { aws_mqtt_library_init(allocator); struct mqtt_connection_state_test *state_test_data = ctx; AWS_ZERO_STRUCT(*state_test_data); state_test_data->allocator = allocator; state_test_data->el_group = aws_event_loop_group_new_default(allocator, 1, NULL); state_test_data->mock_server = new_mqtt_mock_server(allocator); ASSERT_NOT_NULL(state_test_data->mock_server); state_test_data->server_bootstrap = aws_server_bootstrap_new(allocator, state_test_data->el_group); ASSERT_NOT_NULL(state_test_data->server_bootstrap); struct aws_socket_options socket_options = { .connect_timeout_ms = 100, .domain = AWS_SOCKET_LOCAL, }; state_test_data->socket_options = socket_options; ASSERT_SUCCESS(aws_condition_variable_init(&state_test_data->cvar)); ASSERT_SUCCESS(aws_mutex_init(&state_test_data->lock)); aws_socket_endpoint_init_local_address_for_test(&state_test_data->endpoint); struct aws_server_socket_channel_bootstrap_options server_bootstrap_options = { .bootstrap = state_test_data->server_bootstrap, .host_name = state_test_data->endpoint.address, .port = state_test_data->endpoint.port, .socket_options = &state_test_data->socket_options, .incoming_callback = s_on_incoming_channel_setup_fn, .shutdown_callback = s_on_incoming_channel_shutdown_fn, .destroy_callback = s_on_listener_destroy, .user_data = state_test_data, }; state_test_data->listener = aws_server_bootstrap_new_socket_listener(&server_bootstrap_options); ASSERT_NOT_NULL(state_test_data->listener); struct aws_host_resolver_default_options resolver_options = { .el_group = state_test_data->el_group, .max_entries = 1, }; state_test_data->host_resolver = aws_host_resolver_new_default(allocator, &resolver_options); struct aws_client_bootstrap_options bootstrap_options = { .event_loop_group = state_test_data->el_group, .user_data = state_test_data, .host_resolver = state_test_data->host_resolver, }; state_test_data->client_bootstrap = aws_client_bootstrap_new(allocator, &bootstrap_options); state_test_data->mqtt_client = aws_mqtt_client_new(allocator, state_test_data->client_bootstrap); state_test_data->mqtt_connection = aws_mqtt_client_connection_new(state_test_data->mqtt_client); ASSERT_NOT_NULL(state_test_data->mqtt_connection); ASSERT_SUCCESS(aws_mqtt_client_connection_set_connection_interruption_handlers( state_test_data->mqtt_connection, s_on_connection_interrupted, state_test_data, s_on_connection_resumed, state_test_data)); ASSERT_SUCCESS(aws_mqtt_client_connection_set_connection_result_handlers( state_test_data->mqtt_connection, s_on_connection_success, state_test_data, s_on_connection_failure, state_test_data)); ASSERT_SUCCESS(aws_mqtt_client_connection_set_on_any_publish_handler( state_test_data->mqtt_connection, s_on_any_publish_received, state_test_data)); ASSERT_SUCCESS(aws_array_list_init_dynamic( &state_test_data->published_messages, allocator, 4, sizeof(struct received_publish_packet))); ASSERT_SUCCESS(aws_array_list_init_dynamic( &state_test_data->any_published_messages, allocator, 4, sizeof(struct received_publish_packet))); ASSERT_SUCCESS(aws_array_list_init_dynamic(&state_test_data->qos_returned, allocator, 2, sizeof(uint8_t))); ASSERT_SUCCESS(aws_mqtt_client_connection_set_connection_termination_handler( state_test_data->mqtt_connection, s_on_connection_termination_fn, state_test_data)); return AWS_OP_SUCCESS; } static void s_received_publish_packet_list_clean_up(struct aws_array_list *list) { for (size_t i = 0; i < aws_array_list_length(list); ++i) { struct received_publish_packet *val_ptr = NULL; aws_array_list_get_at_ptr(list, (void **)&val_ptr, i); aws_byte_buf_clean_up(&val_ptr->payload); aws_byte_buf_clean_up(&val_ptr->topic); } aws_array_list_clean_up(list); } static int s_clean_up_mqtt_server_fn(struct aws_allocator *allocator, int setup_result, void *ctx) { (void)allocator; if (!setup_result) { struct mqtt_connection_state_test *state_test_data = ctx; s_received_publish_packet_list_clean_up(&state_test_data->published_messages); s_received_publish_packet_list_clean_up(&state_test_data->any_published_messages); aws_array_list_clean_up(&state_test_data->qos_returned); aws_mqtt_client_connection_release(state_test_data->mqtt_connection); s_wait_for_termination_to_complete(state_test_data); ASSERT_UINT_EQUALS(1, state_test_data->connection_termination_calls); aws_mqtt_client_release(state_test_data->mqtt_client); aws_client_bootstrap_release(state_test_data->client_bootstrap); aws_host_resolver_release(state_test_data->host_resolver); aws_server_bootstrap_destroy_socket_listener(state_test_data->server_bootstrap, state_test_data->listener); s_wait_on_listener_cleanup(state_test_data); aws_server_bootstrap_release(state_test_data->server_bootstrap); aws_event_loop_group_release(state_test_data->el_group); destroy_mqtt_mock_server(state_test_data->mock_server); } aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } static void s_on_connection_complete_fn( struct aws_mqtt_client_connection *connection, int error_code, enum aws_mqtt_connect_return_code return_code, bool session_present, void *userdata) { (void)connection; struct mqtt_connection_state_test *state_test_data = userdata; aws_mutex_lock(&state_test_data->lock); state_test_data->session_present = session_present; state_test_data->mqtt_return_code = return_code; state_test_data->error = error_code; state_test_data->connection_completed = true; aws_mutex_unlock(&state_test_data->lock); aws_condition_variable_notify_one(&state_test_data->cvar); } static bool s_is_connection_completed(void *arg) { struct mqtt_connection_state_test *state_test_data = arg; return state_test_data->connection_completed; } static void s_wait_for_connection_to_complete(struct mqtt_connection_state_test *state_test_data) { aws_mutex_lock(&state_test_data->lock); aws_condition_variable_wait_pred( &state_test_data->cvar, &state_test_data->lock, s_is_connection_completed, state_test_data); state_test_data->connection_completed = false; aws_mutex_unlock(&state_test_data->lock); } void s_on_disconnect_fn(struct aws_mqtt_client_connection *connection, void *userdata) { (void)connection; struct mqtt_connection_state_test *state_test_data = userdata; AWS_LOGF_DEBUG(TEST_LOG_SUBJECT, "disconnect completed"); aws_mutex_lock(&state_test_data->lock); state_test_data->client_disconnect_completed = true; aws_mutex_unlock(&state_test_data->lock); aws_condition_variable_notify_one(&state_test_data->cvar); } static bool s_is_disconnect_completed(void *arg) { struct mqtt_connection_state_test *state_test_data = arg; return state_test_data->client_disconnect_completed && state_test_data->server_disconnect_completed; } static void s_wait_for_disconnect_to_complete(struct mqtt_connection_state_test *state_test_data) { aws_mutex_lock(&state_test_data->lock); aws_condition_variable_wait_pred( &state_test_data->cvar, &state_test_data->lock, s_is_disconnect_completed, state_test_data); state_test_data->client_disconnect_completed = false; state_test_data->server_disconnect_completed = false; aws_mutex_unlock(&state_test_data->lock); } static void s_on_any_publish_received( struct aws_mqtt_client_connection *connection, const struct aws_byte_cursor *topic, const struct aws_byte_cursor *payload, bool dup, enum aws_mqtt_qos qos, bool retain, void *userdata) { (void)connection; struct mqtt_connection_state_test *state_test_data = userdata; struct aws_byte_buf payload_cp; aws_byte_buf_init_copy_from_cursor(&payload_cp, state_test_data->allocator, *payload); struct aws_byte_buf topic_cp; aws_byte_buf_init_copy_from_cursor(&topic_cp, state_test_data->allocator, *topic); struct received_publish_packet received_packet = { .payload = payload_cp, .topic = topic_cp, .dup = dup, .qos = qos, .retain = retain, }; aws_mutex_lock(&state_test_data->lock); aws_array_list_push_back(&state_test_data->any_published_messages, &received_packet); state_test_data->any_publishes_received++; aws_mutex_unlock(&state_test_data->lock); aws_condition_variable_notify_one(&state_test_data->cvar); } static bool s_is_any_publish_received(void *arg) { struct mqtt_connection_state_test *state_test_data = arg; return state_test_data->any_publishes_received == state_test_data->expected_any_publishes; } static void s_wait_for_any_publish(struct mqtt_connection_state_test *state_test_data) { aws_mutex_lock(&state_test_data->lock); aws_condition_variable_wait_pred( &state_test_data->cvar, &state_test_data->lock, s_is_any_publish_received, state_test_data); state_test_data->any_publishes_received = 0; state_test_data->expected_any_publishes = 0; aws_mutex_unlock(&state_test_data->lock); } static void s_on_publish_received( struct aws_mqtt_client_connection *connection, const struct aws_byte_cursor *topic, const struct aws_byte_cursor *payload, bool dup, enum aws_mqtt_qos qos, bool retain, void *userdata) { (void)connection; (void)topic; struct mqtt_connection_state_test *state_test_data = userdata; struct aws_byte_buf payload_cp; aws_byte_buf_init_copy_from_cursor(&payload_cp, state_test_data->allocator, *payload); struct aws_byte_buf topic_cp; aws_byte_buf_init_copy_from_cursor(&topic_cp, state_test_data->allocator, *topic); struct received_publish_packet received_packet = { .payload = payload_cp, .topic = topic_cp, .dup = dup, .qos = qos, .retain = retain, }; aws_mutex_lock(&state_test_data->lock); aws_array_list_push_back(&state_test_data->published_messages, &received_packet); state_test_data->publishes_received++; aws_mutex_unlock(&state_test_data->lock); aws_condition_variable_notify_one(&state_test_data->cvar); } static bool s_is_publish_received(void *arg) { struct mqtt_connection_state_test *state_test_data = arg; return state_test_data->publishes_received == state_test_data->expected_publishes; } static void s_wait_for_publish(struct mqtt_connection_state_test *state_test_data) { aws_mutex_lock(&state_test_data->lock); aws_condition_variable_wait_pred( &state_test_data->cvar, &state_test_data->lock, s_is_publish_received, state_test_data); state_test_data->publishes_received = 0; state_test_data->expected_publishes = 0; aws_mutex_unlock(&state_test_data->lock); } static void s_on_suback( struct aws_mqtt_client_connection *connection, uint16_t packet_id, const struct aws_byte_cursor *topic, enum aws_mqtt_qos qos, int error_code, void *userdata) { (void)connection; (void)packet_id; (void)topic; struct mqtt_connection_state_test *state_test_data = userdata; aws_mutex_lock(&state_test_data->lock); if (!error_code) { aws_array_list_push_back(&state_test_data->qos_returned, &qos); } state_test_data->subscribe_completed = true; state_test_data->subscribe_complete_error = error_code; aws_mutex_unlock(&state_test_data->lock); aws_condition_variable_notify_one(&state_test_data->cvar); } static bool s_is_subscribe_completed(void *arg) { struct mqtt_connection_state_test *state_test_data = arg; return state_test_data->subscribe_completed; } static void s_wait_for_subscribe_to_complete(struct mqtt_connection_state_test *state_test_data) { aws_mutex_lock(&state_test_data->lock); aws_condition_variable_wait_pred( &state_test_data->cvar, &state_test_data->lock, s_is_subscribe_completed, state_test_data); state_test_data->subscribe_completed = false; aws_mutex_unlock(&state_test_data->lock); } static void s_on_multi_suback( struct aws_mqtt_client_connection *connection, uint16_t packet_id, const struct aws_array_list *topic_subacks, /* contains aws_mqtt_topic_subscription pointers */ int error_code, void *userdata) { (void)connection; (void)packet_id; (void)topic_subacks; (void)error_code; struct mqtt_connection_state_test *state_test_data = userdata; aws_mutex_lock(&state_test_data->lock); state_test_data->subscribe_completed = true; if (!error_code) { size_t length = aws_array_list_length(topic_subacks); for (size_t i = 0; i < length; ++i) { struct aws_mqtt_topic_subscription *subscription = NULL; aws_array_list_get_at(topic_subacks, &subscription, i); aws_array_list_push_back(&state_test_data->qos_returned, &subscription->qos); } } aws_mutex_unlock(&state_test_data->lock); aws_condition_variable_notify_one(&state_test_data->cvar); } static void s_on_op_complete( struct aws_mqtt_client_connection *connection, uint16_t packet_id, int error_code, void *userdata) { (void)connection; (void)packet_id; struct mqtt_connection_state_test *state_test_data = userdata; AWS_LOGF_DEBUG(TEST_LOG_SUBJECT, "pub op completed"); aws_mutex_lock(&state_test_data->lock); state_test_data->ops_completed++; state_test_data->op_complete_error = error_code; aws_mutex_unlock(&state_test_data->lock); aws_condition_variable_notify_one(&state_test_data->cvar); } static bool s_is_ops_completed(void *arg) { struct mqtt_connection_state_test *state_test_data = arg; return state_test_data->ops_completed == state_test_data->expected_ops_completed; } static void s_wait_for_ops_completed(struct mqtt_connection_state_test *state_test_data) { aws_mutex_lock(&state_test_data->lock); aws_condition_variable_wait_for_pred( &state_test_data->cvar, &state_test_data->lock, 10000000000, s_is_ops_completed, state_test_data); aws_mutex_unlock(&state_test_data->lock); } /* * Makes an Mqtt connect call, then a disconnect. Then verifies a CONNECT and DISCONNECT were sent. */ static int s_test_mqtt_connect_disconnect_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_mqtt_connection_options connection_options = { .user_data = state_test_data, .clean_session = false, .client_id = aws_byte_cursor_from_c_str("client1234"), .host_name = aws_byte_cursor_from_c_str(state_test_data->endpoint.address), .socket_options = &state_test_data->socket_options, .on_connection_complete = s_on_connection_complete_fn, }; ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); s_wait_for_connection_to_complete(state_test_data); ASSERT_SUCCESS( aws_mqtt_client_connection_disconnect(state_test_data->mqtt_connection, s_on_disconnect_fn, state_test_data)); s_wait_for_disconnect_to_complete(state_test_data); /* Decode all received packets by mock server */ ASSERT_SUCCESS(mqtt_mock_server_decode_packets(state_test_data->mock_server)); ASSERT_UINT_EQUALS(2, mqtt_mock_server_decoded_packets_count(state_test_data->mock_server)); struct mqtt_decoded_packet *received_packet = mqtt_mock_server_get_decoded_packet_by_index(state_test_data->mock_server, 0); ASSERT_UINT_EQUALS(AWS_MQTT_PACKET_CONNECT, received_packet->type); ASSERT_UINT_EQUALS(connection_options.clean_session, received_packet->clean_session); ASSERT_TRUE(aws_byte_cursor_eq(&received_packet->client_identifier, &connection_options.client_id)); received_packet = mqtt_mock_server_get_decoded_packet_by_index(state_test_data->mock_server, 1); ASSERT_UINT_EQUALS(AWS_MQTT_PACKET_DISCONNECT, received_packet->type); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_connect_disconnect, s_setup_mqtt_server_fn, s_test_mqtt_connect_disconnect_fn, s_clean_up_mqtt_server_fn, &test_data) /* * Makes an Mqtt connect call, and set will and login information for the connection. Validate the those information are * correctly included in the CONNECT package. */ static int s_test_mqtt_connect_set_will_login_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_byte_cursor will_payload = aws_byte_cursor_from_c_str("this is a will."); struct aws_byte_cursor topic = aws_byte_cursor_from_c_str("test_topic"); struct aws_byte_cursor username = aws_byte_cursor_from_c_str("user name"); struct aws_byte_cursor password = aws_byte_cursor_from_c_str("password"); enum aws_mqtt_qos will_qos = AWS_MQTT_QOS_AT_LEAST_ONCE; ASSERT_SUCCESS(aws_mqtt_client_connection_set_will( state_test_data->mqtt_connection, &topic, will_qos, true /*retain*/, &will_payload)); ASSERT_SUCCESS(aws_mqtt_client_connection_set_login(state_test_data->mqtt_connection, &username, &password)); struct aws_mqtt_connection_options connection_options = { .user_data = state_test_data, .clean_session = false, .client_id = aws_byte_cursor_from_c_str("client1234"), .host_name = aws_byte_cursor_from_c_str(state_test_data->endpoint.address), .socket_options = &state_test_data->socket_options, .on_connection_complete = s_on_connection_complete_fn, }; ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); s_wait_for_connection_to_complete(state_test_data); ASSERT_SUCCESS( aws_mqtt_client_connection_disconnect(state_test_data->mqtt_connection, s_on_disconnect_fn, state_test_data)); s_wait_for_disconnect_to_complete(state_test_data); /* Decode all received packets by mock server */ ASSERT_SUCCESS(mqtt_mock_server_decode_packets(state_test_data->mock_server)); ASSERT_UINT_EQUALS(2, mqtt_mock_server_decoded_packets_count(state_test_data->mock_server)); /* CONNECT packet */ struct mqtt_decoded_packet *received_packet = mqtt_mock_server_get_decoded_packet_by_index(state_test_data->mock_server, 0); ASSERT_UINT_EQUALS(AWS_MQTT_PACKET_CONNECT, received_packet->type); ASSERT_UINT_EQUALS(connection_options.clean_session, received_packet->clean_session); ASSERT_TRUE(aws_byte_cursor_eq(&received_packet->client_identifier, &connection_options.client_id)); /* validate the received will */ ASSERT_TRUE(aws_byte_cursor_eq(&received_packet->will_message, &will_payload)); ASSERT_TRUE(aws_byte_cursor_eq(&received_packet->will_topic, &topic)); ASSERT_UINT_EQUALS(will_qos, received_packet->will_qos); ASSERT_TRUE(true == received_packet->will_retain); /* validate the received login information */ ASSERT_TRUE(aws_byte_cursor_eq(&received_packet->username, &username)); ASSERT_TRUE(aws_byte_cursor_eq(&received_packet->password, &password)); /* DISCONNECT packet */ received_packet = mqtt_mock_server_get_decoded_packet_by_index(state_test_data->mock_server, 1); ASSERT_UINT_EQUALS(AWS_MQTT_PACKET_DISCONNECT, received_packet->type); /* Connect to the mock server again. If set will&loggin message is not called before the next connect, the * will&loggin message will still be there and be sent to the server again */ ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); s_wait_for_connection_to_complete(state_test_data); ASSERT_SUCCESS(mqtt_mock_server_decode_packets(state_test_data->mock_server)); /* The second CONNECT packet */ received_packet = mqtt_mock_server_get_latest_decoded_packet(state_test_data->mock_server); ASSERT_UINT_EQUALS(AWS_MQTT_PACKET_CONNECT, received_packet->type); ASSERT_UINT_EQUALS(connection_options.clean_session, received_packet->clean_session); ASSERT_TRUE(aws_byte_cursor_eq(&received_packet->client_identifier, &connection_options.client_id)); /* validate the received will */ ASSERT_TRUE(aws_byte_cursor_eq(&received_packet->will_message, &will_payload)); ASSERT_TRUE(aws_byte_cursor_eq(&received_packet->will_topic, &topic)); ASSERT_UINT_EQUALS(will_qos, received_packet->will_qos); ASSERT_TRUE(true == received_packet->will_retain); /* validate the received login information */ ASSERT_TRUE(aws_byte_cursor_eq(&received_packet->username, &username)); ASSERT_TRUE(aws_byte_cursor_eq(&received_packet->password, &password)); /* disconnect */ ASSERT_SUCCESS( aws_mqtt_client_connection_disconnect(state_test_data->mqtt_connection, s_on_disconnect_fn, state_test_data)); s_wait_for_disconnect_to_complete(state_test_data); /* set new will & loggin message, before next connect, the next CONNECT packet will contain the new information */ struct aws_byte_cursor new_will_payload = aws_byte_cursor_from_c_str("this is a new will."); struct aws_byte_cursor new_topic = aws_byte_cursor_from_c_str("test_topic_New"); struct aws_byte_cursor new_username = aws_byte_cursor_from_c_str("new user name"); struct aws_byte_cursor new_password = aws_byte_cursor_from_c_str("new password"); enum aws_mqtt_qos new_will_qos = AWS_MQTT_QOS_AT_MOST_ONCE; ASSERT_SUCCESS(aws_mqtt_client_connection_set_will( state_test_data->mqtt_connection, &new_topic, new_will_qos, true /*retain*/, &new_will_payload)); ASSERT_SUCCESS( aws_mqtt_client_connection_set_login(state_test_data->mqtt_connection, &new_username, &new_password)); /* connect again */ ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); s_wait_for_connection_to_complete(state_test_data); ASSERT_SUCCESS(mqtt_mock_server_decode_packets(state_test_data->mock_server)); /* The third CONNECT packet */ received_packet = mqtt_mock_server_get_latest_decoded_packet(state_test_data->mock_server); ASSERT_UINT_EQUALS(AWS_MQTT_PACKET_CONNECT, received_packet->type); ASSERT_UINT_EQUALS(connection_options.clean_session, received_packet->clean_session); ASSERT_TRUE(aws_byte_cursor_eq(&received_packet->client_identifier, &connection_options.client_id)); /* validate the received will */ ASSERT_TRUE(aws_byte_cursor_eq(&received_packet->will_message, &new_will_payload)); ASSERT_TRUE(aws_byte_cursor_eq(&received_packet->will_topic, &new_topic)); ASSERT_UINT_EQUALS(new_will_qos, received_packet->will_qos); ASSERT_TRUE(true == received_packet->will_retain); /* validate the received login information */ ASSERT_TRUE(aws_byte_cursor_eq(&received_packet->username, &new_username)); ASSERT_TRUE(aws_byte_cursor_eq(&received_packet->password, &new_password)); /* disconnect. FINISHED */ ASSERT_SUCCESS( aws_mqtt_client_connection_disconnect(state_test_data->mqtt_connection, s_on_disconnect_fn, state_test_data)); s_wait_for_disconnect_to_complete(state_test_data); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_connect_set_will_login, s_setup_mqtt_server_fn, s_test_mqtt_connect_set_will_login_fn, s_clean_up_mqtt_server_fn, &test_data) #define MIN_RECONNECT_DELAY_SECONDS 5 #define MAX_RECONNECT_DELAY_SECONDS 120 /* * Makes a CONNECT, then the server hangs up, tests that the client reconnects on its own, then sends a DISCONNECT. * Also checks that the minimum reconnect time delay is honored. */ static int s_test_mqtt_connection_interrupted_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_mqtt_connection_options connection_options = { .user_data = state_test_data, .clean_session = true, .client_id = aws_byte_cursor_from_c_str("client1234"), .host_name = aws_byte_cursor_from_c_str(state_test_data->endpoint.address), .socket_options = &state_test_data->socket_options, .on_connection_complete = s_on_connection_complete_fn, }; aws_mqtt_client_connection_set_reconnect_timeout( state_test_data->mqtt_connection, MIN_RECONNECT_DELAY_SECONDS, MAX_RECONNECT_DELAY_SECONDS); ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); s_wait_for_connection_to_complete(state_test_data); /* shut it down and make sure the client automatically reconnects.*/ uint64_t now = 0; aws_high_res_clock_get_ticks(&now); uint64_t start_shutdown = now; aws_channel_shutdown(state_test_data->server_channel, AWS_OP_SUCCESS); s_wait_for_reconnect_to_complete(state_test_data); aws_high_res_clock_get_ticks(&now); uint64_t reconnect_complete = now; uint64_t elapsed_time = reconnect_complete - start_shutdown; ASSERT_TRUE( aws_timestamp_convert(elapsed_time, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_SECS, NULL) >= MIN_RECONNECT_DELAY_SECONDS); ASSERT_SUCCESS( aws_mqtt_client_connection_disconnect(state_test_data->mqtt_connection, s_on_disconnect_fn, state_test_data)); s_wait_for_disconnect_to_complete(state_test_data); /* Decode all received packets by mock server */ ASSERT_SUCCESS(mqtt_mock_server_decode_packets(state_test_data->mock_server)); ASSERT_UINT_EQUALS(3, mqtt_mock_server_decoded_packets_count(state_test_data->mock_server)); struct mqtt_decoded_packet *received_packet = mqtt_mock_server_get_decoded_packet_by_index(state_test_data->mock_server, 0); ASSERT_UINT_EQUALS(AWS_MQTT_PACKET_CONNECT, received_packet->type); ASSERT_UINT_EQUALS(connection_options.clean_session, received_packet->clean_session); ASSERT_TRUE(aws_byte_cursor_eq(&received_packet->client_identifier, &connection_options.client_id)); received_packet = mqtt_mock_server_get_decoded_packet_by_index(state_test_data->mock_server, 1); ASSERT_UINT_EQUALS(AWS_MQTT_PACKET_CONNECT, received_packet->type); ASSERT_UINT_EQUALS(connection_options.clean_session, received_packet->clean_session); ASSERT_TRUE(aws_byte_cursor_eq(&received_packet->client_identifier, &connection_options.client_id)); received_packet = mqtt_mock_server_get_decoded_packet_by_index(state_test_data->mock_server, 2); ASSERT_UINT_EQUALS(AWS_MQTT_PACKET_DISCONNECT, received_packet->type); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_connection_interrupted, s_setup_mqtt_server_fn, s_test_mqtt_connection_interrupted_fn, s_clean_up_mqtt_server_fn, &test_data) /* Makes a CONNECT, with a 1 second keep alive ping interval, the mock is configured to not reply to the PING, * this should cause a timeout, then the PING responses are turned back on, and the client should automatically * reconnect. Then send a DISCONNECT. */ static int s_test_mqtt_connection_timeout_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_mqtt_connection_options connection_options = { .user_data = state_test_data, .clean_session = true, .client_id = aws_byte_cursor_from_c_str("client1234"), .host_name = aws_byte_cursor_from_c_str(state_test_data->endpoint.address), .socket_options = &state_test_data->socket_options, .on_connection_complete = s_on_connection_complete_fn, .keep_alive_time_secs = DEFAULT_TEST_KEEP_ALIVE_S, .ping_timeout_ms = DEFAULT_TEST_PING_TIMEOUT_MS, }; mqtt_mock_server_set_max_ping_resp(state_test_data->mock_server, 0); ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); s_wait_for_connection_to_complete(state_test_data); /* this should take about 1.1 seconds for the timeout and reconnect.*/ s_wait_for_reconnect_to_complete(state_test_data); ASSERT_SUCCESS( aws_mqtt_client_connection_disconnect(state_test_data->mqtt_connection, s_on_disconnect_fn, state_test_data)); s_wait_for_disconnect_to_complete(state_test_data); ASSERT_INT_EQUALS(AWS_ERROR_MQTT_TIMEOUT, state_test_data->interruption_error); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_connection_timeout, s_setup_mqtt_server_fn, s_test_mqtt_connection_timeout_fn, s_clean_up_mqtt_server_fn, &test_data) /* Test set on_any_publish handler. User can set on_any_publish handler to be called whenever any publish packet is * received */ static int s_test_mqtt_connection_any_publish_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_mqtt_connection_options connection_options = { .user_data = state_test_data, .clean_session = false, .client_id = aws_byte_cursor_from_c_str("client1234"), .host_name = aws_byte_cursor_from_c_str(state_test_data->endpoint.address), .socket_options = &state_test_data->socket_options, .on_connection_complete = s_on_connection_complete_fn, }; struct aws_byte_cursor topic_1 = aws_byte_cursor_from_c_str("/test/topic1"); struct aws_byte_cursor topic_2 = aws_byte_cursor_from_c_str("/test/topic2"); ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); s_wait_for_connection_to_complete(state_test_data); /* NOTE: mock server sends to client with no subscription at all, which should not happen in the real world! */ state_test_data->expected_any_publishes = 2; struct aws_byte_cursor payload_1 = aws_byte_cursor_from_c_str("Test Message 1"); ASSERT_SUCCESS(mqtt_mock_server_send_publish( state_test_data->mock_server, &topic_1, &payload_1, false /*dup*/, AWS_MQTT_QOS_AT_LEAST_ONCE, false /*retain*/)); struct aws_byte_cursor payload_2 = aws_byte_cursor_from_c_str("Test Message 2"); ASSERT_SUCCESS(mqtt_mock_server_send_publish( state_test_data->mock_server, &topic_2, &payload_2, false /*dup*/, AWS_MQTT_QOS_AT_LEAST_ONCE, false /*retain*/)); s_wait_for_any_publish(state_test_data); mqtt_mock_server_wait_for_pubacks(state_test_data->mock_server, 2); ASSERT_SUCCESS( aws_mqtt_client_connection_disconnect(state_test_data->mqtt_connection, s_on_disconnect_fn, state_test_data)); s_wait_for_disconnect_to_complete(state_test_data); /* Decode all received packets by mock server */ ASSERT_SUCCESS(mqtt_mock_server_decode_packets(state_test_data->mock_server)); /* CONNECT two PUBACK DISCONNECT */ ASSERT_UINT_EQUALS(4, mqtt_mock_server_decoded_packets_count(state_test_data->mock_server)); struct mqtt_decoded_packet *received_packet = mqtt_mock_server_get_decoded_packet_by_index(state_test_data->mock_server, 0); ASSERT_UINT_EQUALS(AWS_MQTT_PACKET_CONNECT, received_packet->type); ASSERT_UINT_EQUALS(connection_options.clean_session, received_packet->clean_session); ASSERT_TRUE(aws_byte_cursor_eq(&received_packet->client_identifier, &connection_options.client_id)); received_packet = mqtt_mock_server_get_decoded_packet_by_index(state_test_data->mock_server, 1); ASSERT_UINT_EQUALS(AWS_MQTT_PACKET_PUBACK, received_packet->type); received_packet = mqtt_mock_server_get_decoded_packet_by_index(state_test_data->mock_server, 2); ASSERT_UINT_EQUALS(AWS_MQTT_PACKET_PUBACK, received_packet->type); received_packet = mqtt_mock_server_get_decoded_packet_by_index(state_test_data->mock_server, 3); ASSERT_UINT_EQUALS(AWS_MQTT_PACKET_DISCONNECT, received_packet->type); /* Check the received publish packet from the client side */ ASSERT_UINT_EQUALS(2, aws_array_list_length(&state_test_data->any_published_messages)); struct received_publish_packet *publish_msg = NULL; ASSERT_SUCCESS(aws_array_list_get_at_ptr(&state_test_data->any_published_messages, (void **)&publish_msg, 0)); ASSERT_TRUE(aws_byte_cursor_eq_byte_buf(&topic_1, &publish_msg->topic)); ASSERT_TRUE(aws_byte_cursor_eq_byte_buf(&payload_1, &publish_msg->payload)); ASSERT_SUCCESS(aws_array_list_get_at_ptr(&state_test_data->any_published_messages, (void **)&publish_msg, 1)); ASSERT_TRUE(aws_byte_cursor_eq_byte_buf(&topic_2, &publish_msg->topic)); ASSERT_TRUE(aws_byte_cursor_eq_byte_buf(&payload_2, &publish_msg->payload)); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_connection_any_publish, s_setup_mqtt_server_fn, s_test_mqtt_connection_any_publish_fn, s_clean_up_mqtt_server_fn, &test_data) /* Makes a CONNECT, channel is successfully setup, but the server never sends a connack, make sure we timeout. */ static int s_test_mqtt_connection_connack_timeout_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_mqtt_connection_options connection_options = { .user_data = state_test_data, .clean_session = true, .client_id = aws_byte_cursor_from_c_str("client1234"), .host_name = aws_byte_cursor_from_c_str(state_test_data->endpoint.address), .socket_options = &state_test_data->socket_options, .on_connection_complete = s_on_connection_complete_fn, .keep_alive_time_secs = DEFAULT_TEST_KEEP_ALIVE_S, .ping_timeout_ms = DEFAULT_TEST_PING_TIMEOUT_MS, }; mqtt_mock_server_set_max_connack(state_test_data->mock_server, 0); ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); s_wait_for_connection_to_complete(state_test_data); ASSERT_INT_EQUALS(AWS_ERROR_MQTT_TIMEOUT, state_test_data->error); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_connection_connack_timeout, s_setup_mqtt_server_fn, s_test_mqtt_connection_connack_timeout_fn, s_clean_up_mqtt_server_fn, &test_data) /* Use the connack timeout to test the connection failure callback */ static int s_test_mqtt_connection_failure_callback_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_mqtt_connection_options connection_options = { .user_data = state_test_data, .clean_session = true, .client_id = aws_byte_cursor_from_c_str("client1234"), .host_name = aws_byte_cursor_from_c_str(state_test_data->endpoint.address), .socket_options = &state_test_data->socket_options, .on_connection_complete = s_on_connection_complete_fn, .keep_alive_time_secs = DEFAULT_TEST_KEEP_ALIVE_S, .ping_timeout_ms = DEFAULT_TEST_PING_TIMEOUT_MS, }; mqtt_mock_server_set_max_connack(state_test_data->mock_server, 0); ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); s_wait_for_connection_to_fail(state_test_data); ASSERT_INT_EQUALS(AWS_ERROR_MQTT_TIMEOUT, state_test_data->error); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_connection_failure_callback, s_setup_mqtt_server_fn, s_test_mqtt_connection_failure_callback_fn, s_clean_up_mqtt_server_fn, &test_data) /* Quick test the connection succeed callback */ static int s_test_mqtt_connection_success_callback_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_mqtt_connection_options connection_options = { .user_data = state_test_data, .clean_session = false, .client_id = aws_byte_cursor_from_c_str("client1234"), .host_name = aws_byte_cursor_from_c_str(state_test_data->endpoint.address), .socket_options = &state_test_data->socket_options, .on_connection_complete = s_on_connection_complete_fn, }; ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); s_wait_for_connection_to_succeed(state_test_data); /* Decode all received packets by mock server */ ASSERT_SUCCESS(mqtt_mock_server_decode_packets(state_test_data->mock_server)); ASSERT_UINT_EQUALS(1, mqtt_mock_server_decoded_packets_count(state_test_data->mock_server)); struct mqtt_decoded_packet *received_packet = mqtt_mock_server_get_decoded_packet_by_index(state_test_data->mock_server, 0); ASSERT_UINT_EQUALS(AWS_MQTT_PACKET_CONNECT, received_packet->type); ASSERT_UINT_EQUALS(connection_options.clean_session, received_packet->clean_session); ASSERT_TRUE(aws_byte_cursor_eq(&received_packet->client_identifier, &connection_options.client_id)); // Disconnect and finish ASSERT_SUCCESS( aws_mqtt_client_connection_disconnect(state_test_data->mqtt_connection, s_on_disconnect_fn, state_test_data)); s_wait_for_disconnect_to_complete(state_test_data); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_connection_success_callback, s_setup_mqtt_server_fn, s_test_mqtt_connection_success_callback_fn, s_clean_up_mqtt_server_fn, &test_data) /* Subscribe to a topic prior to connection, make a CONNECT, have the server send PUBLISH messages, * make sure they're received, then send a DISCONNECT. */ static int s_test_mqtt_subscribe_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_mqtt_connection_options connection_options = { .user_data = state_test_data, .clean_session = false, .client_id = aws_byte_cursor_from_c_str("client1234"), .host_name = aws_byte_cursor_from_c_str(state_test_data->endpoint.address), .socket_options = &state_test_data->socket_options, .on_connection_complete = s_on_connection_complete_fn, }; struct aws_byte_cursor sub_topic = aws_byte_cursor_from_c_str("/test/topic"); uint16_t packet_id = aws_mqtt_client_connection_subscribe( state_test_data->mqtt_connection, &sub_topic, AWS_MQTT_QOS_AT_LEAST_ONCE, s_on_publish_received, state_test_data, NULL, s_on_suback, state_test_data); ASSERT_TRUE(packet_id > 0); ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); s_wait_for_connection_to_complete(state_test_data); s_wait_for_subscribe_to_complete(state_test_data); state_test_data->expected_publishes = 2; state_test_data->expected_any_publishes = 2; struct aws_byte_cursor payload_1 = aws_byte_cursor_from_c_str("Test Message 1"); ASSERT_SUCCESS(mqtt_mock_server_send_publish( state_test_data->mock_server, &sub_topic, &payload_1, false /*dup*/, AWS_MQTT_QOS_AT_LEAST_ONCE, true /*retain*/)); struct aws_byte_cursor payload_2 = aws_byte_cursor_from_c_str("Test Message 2"); ASSERT_SUCCESS(mqtt_mock_server_send_publish( state_test_data->mock_server, &sub_topic, &payload_2, true /*dup*/, AWS_MQTT_QOS_AT_LEAST_ONCE, false /*retain*/)); s_wait_for_publish(state_test_data); s_wait_for_any_publish(state_test_data); mqtt_mock_server_wait_for_pubacks(state_test_data->mock_server, 2); ASSERT_SUCCESS( aws_mqtt_client_connection_disconnect(state_test_data->mqtt_connection, s_on_disconnect_fn, state_test_data)); s_wait_for_disconnect_to_complete(state_test_data); /* Decode all received packets by mock server */ ASSERT_SUCCESS(mqtt_mock_server_decode_packets(state_test_data->mock_server)); ASSERT_UINT_EQUALS(5, mqtt_mock_server_decoded_packets_count(state_test_data->mock_server)); struct mqtt_decoded_packet *received_packet = mqtt_mock_server_get_decoded_packet_by_index(state_test_data->mock_server, 0); ASSERT_UINT_EQUALS(AWS_MQTT_PACKET_CONNECT, received_packet->type); ASSERT_UINT_EQUALS(connection_options.clean_session, received_packet->clean_session); ASSERT_TRUE(aws_byte_cursor_eq(&received_packet->client_identifier, &connection_options.client_id)); received_packet = mqtt_mock_server_get_decoded_packet_by_index(state_test_data->mock_server, 1); ASSERT_UINT_EQUALS(AWS_MQTT_PACKET_SUBSCRIBE, received_packet->type); ASSERT_UINT_EQUALS(1, aws_array_list_length(&received_packet->sub_topic_filters)); struct aws_mqtt_subscription val; ASSERT_SUCCESS(aws_array_list_front(&received_packet->sub_topic_filters, &val)); ASSERT_TRUE(aws_byte_cursor_eq(&val.topic_filter, &sub_topic)); ASSERT_UINT_EQUALS(AWS_MQTT_QOS_AT_LEAST_ONCE, val.qos); ASSERT_UINT_EQUALS(packet_id, received_packet->packet_identifier); received_packet = mqtt_mock_server_get_decoded_packet_by_index(state_test_data->mock_server, 2); ASSERT_UINT_EQUALS(AWS_MQTT_PACKET_PUBACK, received_packet->type); received_packet = mqtt_mock_server_get_decoded_packet_by_index(state_test_data->mock_server, 3); ASSERT_UINT_EQUALS(AWS_MQTT_PACKET_PUBACK, received_packet->type); received_packet = mqtt_mock_server_get_decoded_packet_by_index(state_test_data->mock_server, 4); ASSERT_UINT_EQUALS(AWS_MQTT_PACKET_DISCONNECT, received_packet->type); /* Check PUBLISH packets received via subscription callback */ ASSERT_UINT_EQUALS(2, aws_array_list_length(&state_test_data->published_messages)); struct received_publish_packet *publish_msg = NULL; ASSERT_SUCCESS(aws_array_list_get_at_ptr(&state_test_data->published_messages, (void **)&publish_msg, 0)); ASSERT_TRUE(aws_byte_cursor_eq_byte_buf(&sub_topic, &publish_msg->topic)); ASSERT_TRUE(aws_byte_cursor_eq_byte_buf(&payload_1, &publish_msg->payload)); ASSERT_FALSE(publish_msg->dup); ASSERT_TRUE(publish_msg->retain); ASSERT_SUCCESS(aws_array_list_get_at_ptr(&state_test_data->published_messages, (void **)&publish_msg, 1)); ASSERT_TRUE(aws_byte_cursor_eq_byte_buf(&sub_topic, &publish_msg->topic)); ASSERT_TRUE(aws_byte_cursor_eq_byte_buf(&payload_2, &publish_msg->payload)); ASSERT_TRUE(publish_msg->dup); ASSERT_FALSE(publish_msg->retain); /* Check PUBLISH packets received via on_any_publish callback */ ASSERT_UINT_EQUALS(2, aws_array_list_length(&state_test_data->any_published_messages)); ASSERT_SUCCESS(aws_array_list_get_at_ptr(&state_test_data->any_published_messages, (void **)&publish_msg, 0)); ASSERT_TRUE(aws_byte_cursor_eq_byte_buf(&sub_topic, &publish_msg->topic)); ASSERT_TRUE(aws_byte_cursor_eq_byte_buf(&payload_1, &publish_msg->payload)); ASSERT_FALSE(publish_msg->dup); ASSERT_TRUE(publish_msg->retain); ASSERT_SUCCESS(aws_array_list_get_at_ptr(&state_test_data->any_published_messages, (void **)&publish_msg, 1)); ASSERT_TRUE(aws_byte_cursor_eq_byte_buf(&sub_topic, &publish_msg->topic)); ASSERT_TRUE(aws_byte_cursor_eq_byte_buf(&payload_2, &publish_msg->payload)); ASSERT_TRUE(publish_msg->dup); ASSERT_FALSE(publish_msg->retain); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_connect_subscribe, s_setup_mqtt_server_fn, s_test_mqtt_subscribe_fn, s_clean_up_mqtt_server_fn, &test_data) static int s_test_mqtt_subscribe_incoming_dup_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_mqtt_connection_options connection_options = { .user_data = state_test_data, .clean_session = false, .client_id = aws_byte_cursor_from_c_str("client1234"), .host_name = aws_byte_cursor_from_c_str(state_test_data->endpoint.address), .socket_options = &state_test_data->socket_options, .on_connection_complete = s_on_connection_complete_fn, }; struct aws_byte_cursor subscribed_topic = aws_byte_cursor_from_c_str("/test/topic"); struct aws_byte_cursor any_topic = aws_byte_cursor_from_c_str("/a/b/c"); uint16_t packet_id = aws_mqtt_client_connection_subscribe( state_test_data->mqtt_connection, &subscribed_topic, AWS_MQTT_QOS_AT_LEAST_ONCE, s_on_publish_received, state_test_data, NULL, s_on_suback, state_test_data); ASSERT_TRUE(packet_id > 0); ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); s_wait_for_connection_to_complete(state_test_data); s_wait_for_subscribe_to_complete(state_test_data); state_test_data->expected_publishes = 4; state_test_data->expected_any_publishes = 8; struct aws_byte_cursor subscribed_payload = aws_byte_cursor_from_c_str("Subscribed"); for (size_t i = 0; i < 4; ++i) { ASSERT_SUCCESS(mqtt_mock_server_send_publish_by_id( state_test_data->mock_server, 1111, &subscribed_topic, &subscribed_payload, i > 0 /*dup*/, AWS_MQTT_QOS_AT_LEAST_ONCE, true /*retain*/)); } struct aws_byte_cursor any_payload = aws_byte_cursor_from_c_str("Not subscribed. On-any only."); for (size_t i = 0; i < 4; ++i) { ASSERT_SUCCESS(mqtt_mock_server_send_publish_by_id( state_test_data->mock_server, 1234, &any_topic, &any_payload, i > 0 /*dup*/, AWS_MQTT_QOS_AT_LEAST_ONCE, false /*retain*/)); } s_wait_for_publish(state_test_data); s_wait_for_any_publish(state_test_data); mqtt_mock_server_wait_for_pubacks(state_test_data->mock_server, 8); ASSERT_SUCCESS( aws_mqtt_client_connection_disconnect(state_test_data->mqtt_connection, s_on_disconnect_fn, state_test_data)); s_wait_for_disconnect_to_complete(state_test_data); /* Decode all received packets by mock server */ ASSERT_SUCCESS(mqtt_mock_server_decode_packets(state_test_data->mock_server)); ASSERT_UINT_EQUALS(11, mqtt_mock_server_decoded_packets_count(state_test_data->mock_server)); struct mqtt_decoded_packet *received_packet = mqtt_mock_server_get_decoded_packet_by_index(state_test_data->mock_server, 0); ASSERT_UINT_EQUALS(AWS_MQTT_PACKET_CONNECT, received_packet->type); ASSERT_UINT_EQUALS(connection_options.clean_session, received_packet->clean_session); ASSERT_TRUE(aws_byte_cursor_eq(&received_packet->client_identifier, &connection_options.client_id)); received_packet = mqtt_mock_server_get_decoded_packet_by_index(state_test_data->mock_server, 1); ASSERT_UINT_EQUALS(AWS_MQTT_PACKET_SUBSCRIBE, received_packet->type); ASSERT_UINT_EQUALS(1, aws_array_list_length(&received_packet->sub_topic_filters)); struct aws_mqtt_subscription val; ASSERT_SUCCESS(aws_array_list_front(&received_packet->sub_topic_filters, &val)); ASSERT_TRUE(aws_byte_cursor_eq(&val.topic_filter, &subscribed_topic)); ASSERT_UINT_EQUALS(AWS_MQTT_QOS_AT_LEAST_ONCE, val.qos); ASSERT_UINT_EQUALS(packet_id, received_packet->packet_identifier); for (size_t i = 0; i < 8; ++i) { received_packet = mqtt_mock_server_get_decoded_packet_by_index(state_test_data->mock_server, 2 + i); ASSERT_UINT_EQUALS(AWS_MQTT_PACKET_PUBACK, received_packet->type); } received_packet = mqtt_mock_server_get_decoded_packet_by_index(state_test_data->mock_server, 10); ASSERT_UINT_EQUALS(AWS_MQTT_PACKET_DISCONNECT, received_packet->type); /* Check PUBLISH packets received via subscription callback */ ASSERT_UINT_EQUALS(4, aws_array_list_length(&state_test_data->published_messages)); for (size_t i = 0; i < 4; ++i) { struct received_publish_packet *publish_msg = NULL; ASSERT_SUCCESS(aws_array_list_get_at_ptr(&state_test_data->published_messages, (void **)&publish_msg, i)); ASSERT_TRUE(aws_byte_cursor_eq_byte_buf(&subscribed_topic, &publish_msg->topic)); ASSERT_TRUE(aws_byte_cursor_eq_byte_buf(&subscribed_payload, &publish_msg->payload)); ASSERT_INT_EQUALS((i != 0) ? 1 : 0, publish_msg->dup ? 1 : 0); ASSERT_TRUE(publish_msg->retain); } /* Check PUBLISH packets received via on_any_publish callback */ ASSERT_UINT_EQUALS(8, aws_array_list_length(&state_test_data->any_published_messages)); for (size_t i = 0; i < 4; ++i) { struct received_publish_packet *publish_msg = NULL; ASSERT_SUCCESS(aws_array_list_get_at_ptr(&state_test_data->any_published_messages, (void **)&publish_msg, i)); ASSERT_TRUE(aws_byte_cursor_eq_byte_buf(&subscribed_topic, &publish_msg->topic)); ASSERT_TRUE(aws_byte_cursor_eq_byte_buf(&subscribed_payload, &publish_msg->payload)); ASSERT_INT_EQUALS((i > 0) ? 1 : 0, publish_msg->dup ? 1 : 0); ASSERT_TRUE(publish_msg->retain); } for (size_t i = 4; i < 8; ++i) { struct received_publish_packet *publish_msg = NULL; ASSERT_SUCCESS(aws_array_list_get_at_ptr(&state_test_data->any_published_messages, (void **)&publish_msg, i)); ASSERT_TRUE(aws_byte_cursor_eq_byte_buf(&any_topic, &publish_msg->topic)); ASSERT_TRUE(aws_byte_cursor_eq_byte_buf(&any_payload, &publish_msg->payload)); ASSERT_INT_EQUALS((i > 4) ? 1 : 0, publish_msg->dup ? 1 : 0); ASSERT_FALSE(publish_msg->retain); } return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_connect_subscribe_incoming_dup, s_setup_mqtt_server_fn, s_test_mqtt_subscribe_incoming_dup_fn, s_clean_up_mqtt_server_fn, &test_data) /* Subscribe to a topic and broker returns a SUBACK with failure return code, the subscribe should fail */ static int s_test_mqtt_connect_subscribe_fail_from_broker_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_mqtt_connection_options connection_options = { .user_data = state_test_data, .clean_session = false, .client_id = aws_byte_cursor_from_c_str("client1234"), .host_name = aws_byte_cursor_from_c_str(state_test_data->endpoint.address), .socket_options = &state_test_data->socket_options, .on_connection_complete = s_on_connection_complete_fn, }; ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); s_wait_for_connection_to_complete(state_test_data); /* Disable the auto ACK packets sent by the server, we will send failure SUBACK */ mqtt_mock_server_disable_auto_ack(state_test_data->mock_server); struct aws_byte_cursor sub_topic = aws_byte_cursor_from_c_str("/test/topic"); uint16_t packet_id = aws_mqtt_client_connection_subscribe( state_test_data->mqtt_connection, &sub_topic, AWS_MQTT_QOS_AT_LEAST_ONCE, s_on_publish_received, state_test_data, NULL, s_on_suback, state_test_data); ASSERT_TRUE(packet_id > 0); ASSERT_SUCCESS(mqtt_mock_server_send_single_suback(state_test_data->mock_server, packet_id, AWS_MQTT_QOS_FAILURE)); s_wait_for_subscribe_to_complete(state_test_data); /* Check the subscribe returned QoS is failure */ size_t length = aws_array_list_length(&state_test_data->qos_returned); ASSERT_UINT_EQUALS(1, length); uint8_t qos = 0; ASSERT_SUCCESS(aws_array_list_get_at(&state_test_data->qos_returned, &qos, 0)); ASSERT_UINT_EQUALS(AWS_MQTT_QOS_FAILURE, qos); ASSERT_SUCCESS( aws_mqtt_client_connection_disconnect(state_test_data->mqtt_connection, s_on_disconnect_fn, state_test_data)); s_wait_for_disconnect_to_complete(state_test_data); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_connect_subscribe_fail_from_broker, s_setup_mqtt_server_fn, s_test_mqtt_connect_subscribe_fail_from_broker_fn, s_clean_up_mqtt_server_fn, &test_data) /* Subscribe to multiple topics prior to connection, make a CONNECT, have the server send PUBLISH messages, * make sure they're received, then send a DISCONNECT. */ static int s_test_mqtt_subscribe_multi_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_mqtt_connection_options connection_options = { .user_data = state_test_data, .clean_session = false, .client_id = aws_byte_cursor_from_c_str("client1234"), .host_name = aws_byte_cursor_from_c_str(state_test_data->endpoint.address), .socket_options = &state_test_data->socket_options, .on_connection_complete = s_on_connection_complete_fn, }; struct aws_byte_cursor sub_topic_1 = aws_byte_cursor_from_c_str("/test/topic1"); struct aws_byte_cursor sub_topic_2 = aws_byte_cursor_from_c_str("/test/topic2"); struct aws_mqtt_topic_subscription sub1 = { .topic = sub_topic_1, .qos = AWS_MQTT_QOS_AT_LEAST_ONCE, .on_publish = s_on_publish_received, .on_cleanup = NULL, .on_publish_ud = state_test_data, }; struct aws_mqtt_topic_subscription sub2 = { .topic = sub_topic_2, .qos = AWS_MQTT_QOS_AT_LEAST_ONCE, .on_publish = s_on_publish_received, .on_cleanup = NULL, .on_publish_ud = state_test_data, }; struct aws_array_list topic_filters; size_t list_len = 2; AWS_VARIABLE_LENGTH_ARRAY(uint8_t, static_buf, list_len * sizeof(struct aws_mqtt_topic_subscription)); aws_array_list_init_static(&topic_filters, static_buf, list_len, sizeof(struct aws_mqtt_topic_subscription)); aws_array_list_push_back(&topic_filters, &sub1); aws_array_list_push_back(&topic_filters, &sub2); uint16_t packet_id = aws_mqtt_client_connection_subscribe_multiple( state_test_data->mqtt_connection, &topic_filters, s_on_multi_suback, state_test_data); ASSERT_TRUE(packet_id > 0); ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); s_wait_for_connection_to_complete(state_test_data); s_wait_for_subscribe_to_complete(state_test_data); /* Check the subscribe returned QoS is expected */ size_t length = aws_array_list_length(&state_test_data->qos_returned); ASSERT_UINT_EQUALS(2, length); uint8_t qos = 0; ASSERT_SUCCESS(aws_array_list_get_at(&state_test_data->qos_returned, &qos, 0)); ASSERT_UINT_EQUALS(AWS_MQTT_QOS_EXACTLY_ONCE, qos); ASSERT_SUCCESS(aws_array_list_get_at(&state_test_data->qos_returned, &qos, 1)); ASSERT_UINT_EQUALS(AWS_MQTT_QOS_EXACTLY_ONCE, qos); state_test_data->expected_publishes = 2; struct aws_byte_cursor payload_1 = aws_byte_cursor_from_c_str("Test Message 1"); ASSERT_SUCCESS(mqtt_mock_server_send_publish( state_test_data->mock_server, &sub_topic_1, &payload_1, false /*dup*/, AWS_MQTT_QOS_AT_LEAST_ONCE, false /*retain*/)); struct aws_byte_cursor payload_2 = aws_byte_cursor_from_c_str("Test Message 2"); ASSERT_SUCCESS(mqtt_mock_server_send_publish( state_test_data->mock_server, &sub_topic_2, &payload_2, false /*dup*/, AWS_MQTT_QOS_AT_LEAST_ONCE, false /*retain*/)); s_wait_for_publish(state_test_data); /* Let's do another publish on a topic that is not subscribed by client. * This can happen if the Server automatically assigned a subscription to the Client */ state_test_data->expected_any_publishes = 3; struct aws_byte_cursor payload_3 = aws_byte_cursor_from_c_str("Test Message 3"); struct aws_byte_cursor topic_3 = aws_byte_cursor_from_c_str("/test/topic3"); ASSERT_SUCCESS(mqtt_mock_server_send_publish( state_test_data->mock_server, &topic_3, &payload_3, false /*dup*/, AWS_MQTT_QOS_AT_LEAST_ONCE, false /*retain*/)); s_wait_for_any_publish(state_test_data); mqtt_mock_server_wait_for_pubacks(state_test_data->mock_server, 3); ASSERT_SUCCESS( aws_mqtt_client_connection_disconnect(state_test_data->mqtt_connection, s_on_disconnect_fn, state_test_data)); s_wait_for_disconnect_to_complete(state_test_data); /* Decode all received packets by mock server */ ASSERT_SUCCESS(mqtt_mock_server_decode_packets(state_test_data->mock_server)); ASSERT_UINT_EQUALS(6, mqtt_mock_server_decoded_packets_count(state_test_data->mock_server)); struct mqtt_decoded_packet *received_packet = mqtt_mock_server_get_decoded_packet_by_index(state_test_data->mock_server, 0); ASSERT_UINT_EQUALS(AWS_MQTT_PACKET_CONNECT, received_packet->type); received_packet = mqtt_mock_server_get_decoded_packet_by_index(state_test_data->mock_server, 1); ASSERT_UINT_EQUALS(AWS_MQTT_PACKET_SUBSCRIBE, received_packet->type); ASSERT_UINT_EQUALS(2, aws_array_list_length(&received_packet->sub_topic_filters)); struct aws_mqtt_subscription val; ASSERT_SUCCESS(aws_array_list_front(&received_packet->sub_topic_filters, &val)); ASSERT_TRUE(aws_byte_cursor_eq(&val.topic_filter, &sub_topic_1)); ASSERT_UINT_EQUALS(AWS_MQTT_QOS_AT_LEAST_ONCE, val.qos); ASSERT_SUCCESS(aws_array_list_back(&received_packet->sub_topic_filters, &val)); ASSERT_TRUE(aws_byte_cursor_eq(&val.topic_filter, &sub_topic_2)); ASSERT_UINT_EQUALS(AWS_MQTT_QOS_AT_LEAST_ONCE, val.qos); ASSERT_UINT_EQUALS(packet_id, received_packet->packet_identifier); received_packet = mqtt_mock_server_get_decoded_packet_by_index(state_test_data->mock_server, 2); ASSERT_UINT_EQUALS(AWS_MQTT_PACKET_PUBACK, received_packet->type); received_packet = mqtt_mock_server_get_decoded_packet_by_index(state_test_data->mock_server, 3); ASSERT_UINT_EQUALS(AWS_MQTT_PACKET_PUBACK, received_packet->type); received_packet = mqtt_mock_server_get_decoded_packet_by_index(state_test_data->mock_server, 4); ASSERT_UINT_EQUALS(AWS_MQTT_PACKET_PUBACK, received_packet->type); received_packet = mqtt_mock_server_get_decoded_packet_by_index(state_test_data->mock_server, 5); ASSERT_UINT_EQUALS(AWS_MQTT_PACKET_DISCONNECT, received_packet->type); /* Only two packets should be recorded by the published_messages, but all the three packets will be recorded by * any_published_messages */ ASSERT_UINT_EQUALS(2, aws_array_list_length(&state_test_data->published_messages)); ASSERT_UINT_EQUALS(3, aws_array_list_length(&state_test_data->any_published_messages)); struct received_publish_packet *publish_msg = NULL; ASSERT_SUCCESS(aws_array_list_get_at_ptr(&state_test_data->published_messages, (void **)&publish_msg, 0)); ASSERT_TRUE(aws_byte_cursor_eq_byte_buf(&sub_topic_1, &publish_msg->topic)); ASSERT_TRUE(aws_byte_cursor_eq_byte_buf(&payload_1, &publish_msg->payload)); ASSERT_SUCCESS(aws_array_list_get_at_ptr(&state_test_data->published_messages, (void **)&publish_msg, 1)); ASSERT_TRUE(aws_byte_cursor_eq_byte_buf(&sub_topic_2, &publish_msg->topic)); ASSERT_TRUE(aws_byte_cursor_eq_byte_buf(&payload_2, &publish_msg->payload)); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_connect_subscribe_multi, s_setup_mqtt_server_fn, s_test_mqtt_subscribe_multi_fn, s_clean_up_mqtt_server_fn, &test_data) /* Subscribe to multiple topics prior to connection, make a CONNECT, have the server send PUBLISH messages, unsubscribe * to a topic, have the server send PUBLISH messages again, make sure the unsubscribed topic callback will not be fired */ static int s_test_mqtt_unsubscribe_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_mqtt_connection_options connection_options = { .user_data = state_test_data, .clean_session = false, .client_id = aws_byte_cursor_from_c_str("client1234"), .host_name = aws_byte_cursor_from_c_str(state_test_data->endpoint.address), .socket_options = &state_test_data->socket_options, .on_connection_complete = s_on_connection_complete_fn, }; struct aws_byte_cursor sub_topic_1 = aws_byte_cursor_from_c_str("/test/topic1"); struct aws_byte_cursor sub_topic_2 = aws_byte_cursor_from_c_str("/test/topic2"); struct aws_mqtt_topic_subscription sub1 = { .topic = sub_topic_1, .qos = AWS_MQTT_QOS_AT_LEAST_ONCE, .on_publish = s_on_publish_received, .on_cleanup = NULL, .on_publish_ud = state_test_data, }; struct aws_mqtt_topic_subscription sub2 = { .topic = sub_topic_2, .qos = AWS_MQTT_QOS_AT_LEAST_ONCE, .on_publish = s_on_publish_received, .on_cleanup = NULL, .on_publish_ud = state_test_data, }; struct aws_array_list topic_filters; size_t list_len = 2; AWS_VARIABLE_LENGTH_ARRAY(uint8_t, static_buf, list_len * sizeof(struct aws_mqtt_topic_subscription)); aws_array_list_init_static(&topic_filters, static_buf, list_len, sizeof(struct aws_mqtt_topic_subscription)); aws_array_list_push_back(&topic_filters, &sub1); aws_array_list_push_back(&topic_filters, &sub2); uint16_t sub_packet_id = aws_mqtt_client_connection_subscribe_multiple( state_test_data->mqtt_connection, &topic_filters, s_on_multi_suback, state_test_data); ASSERT_TRUE(sub_packet_id > 0); ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); s_wait_for_connection_to_complete(state_test_data); s_wait_for_subscribe_to_complete(state_test_data); state_test_data->expected_any_publishes = 2; struct aws_byte_cursor payload_1 = aws_byte_cursor_from_c_str("Test Message 1"); ASSERT_SUCCESS(mqtt_mock_server_send_publish( state_test_data->mock_server, &sub_topic_1, &payload_1, false /*dup*/, AWS_MQTT_QOS_AT_LEAST_ONCE, false /*retain*/)); struct aws_byte_cursor payload_2 = aws_byte_cursor_from_c_str("Test Message 2"); ASSERT_SUCCESS(mqtt_mock_server_send_publish( state_test_data->mock_server, &sub_topic_2, &payload_2, false /*dup*/, AWS_MQTT_QOS_AT_LEAST_ONCE, false /*retain*/)); s_wait_for_any_publish(state_test_data); mqtt_mock_server_wait_for_pubacks(state_test_data->mock_server, 2); aws_mutex_lock(&state_test_data->lock); state_test_data->expected_ops_completed = 1; aws_mutex_unlock(&state_test_data->lock); /* unsubscribe to the first topic */ uint16_t unsub_packet_id = aws_mqtt_client_connection_unsubscribe( state_test_data->mqtt_connection, &sub_topic_1, s_on_op_complete, state_test_data); ASSERT_TRUE(unsub_packet_id > 0); /* Even when the UNSUBACK has not received, the client will not invoke the on_pub callback for that topic */ ASSERT_SUCCESS(mqtt_mock_server_send_publish( state_test_data->mock_server, &sub_topic_1, &payload_1, false /*dup*/, AWS_MQTT_QOS_AT_LEAST_ONCE, false /*retain*/)); ASSERT_SUCCESS(mqtt_mock_server_send_publish( state_test_data->mock_server, &sub_topic_2, &payload_2, false /*dup*/, AWS_MQTT_QOS_AT_LEAST_ONCE, false /*retain*/)); state_test_data->expected_any_publishes = 2; s_wait_for_any_publish(state_test_data); mqtt_mock_server_wait_for_pubacks(state_test_data->mock_server, 2); s_wait_for_ops_completed(state_test_data); ASSERT_SUCCESS( aws_mqtt_client_connection_disconnect(state_test_data->mqtt_connection, s_on_disconnect_fn, state_test_data)); s_wait_for_disconnect_to_complete(state_test_data); /* Decode all received packets by mock server */ ASSERT_SUCCESS(mqtt_mock_server_decode_packets(state_test_data->mock_server)); struct mqtt_decoded_packet *received_packet = mqtt_mock_server_get_decoded_packet_by_index(state_test_data->mock_server, 0); ASSERT_UINT_EQUALS(AWS_MQTT_PACKET_CONNECT, received_packet->type); received_packet = mqtt_mock_server_get_decoded_packet_by_index(state_test_data->mock_server, 1); ASSERT_UINT_EQUALS(AWS_MQTT_PACKET_SUBSCRIBE, received_packet->type); ASSERT_UINT_EQUALS(2, aws_array_list_length(&received_packet->sub_topic_filters)); struct aws_mqtt_subscription val; ASSERT_SUCCESS(aws_array_list_front(&received_packet->sub_topic_filters, &val)); ASSERT_TRUE(aws_byte_cursor_eq(&val.topic_filter, &sub_topic_1)); ASSERT_UINT_EQUALS(AWS_MQTT_QOS_AT_LEAST_ONCE, val.qos); ASSERT_SUCCESS(aws_array_list_back(&received_packet->sub_topic_filters, &val)); ASSERT_TRUE(aws_byte_cursor_eq(&val.topic_filter, &sub_topic_2)); ASSERT_UINT_EQUALS(AWS_MQTT_QOS_AT_LEAST_ONCE, val.qos); ASSERT_UINT_EQUALS(sub_packet_id, received_packet->packet_identifier); received_packet = mqtt_mock_server_get_decoded_packet_by_index(state_test_data->mock_server, 2); ASSERT_UINT_EQUALS(AWS_MQTT_PACKET_PUBACK, received_packet->type); received_packet = mqtt_mock_server_get_decoded_packet_by_index(state_test_data->mock_server, 3); ASSERT_UINT_EQUALS(AWS_MQTT_PACKET_PUBACK, received_packet->type); received_packet = mqtt_mock_server_get_decoded_packet_by_index(state_test_data->mock_server, 4); ASSERT_UINT_EQUALS(AWS_MQTT_PACKET_UNSUBSCRIBE, received_packet->type); ASSERT_UINT_EQUALS(1, aws_array_list_length(&received_packet->unsub_topic_filters)); struct aws_byte_cursor val_cur; ASSERT_SUCCESS(aws_array_list_front(&received_packet->unsub_topic_filters, &val_cur)); ASSERT_TRUE(aws_byte_cursor_eq(&val_cur, &sub_topic_1)); ASSERT_UINT_EQUALS(unsub_packet_id, received_packet->packet_identifier); received_packet = mqtt_mock_server_get_decoded_packet_by_index(state_test_data->mock_server, 5); ASSERT_UINT_EQUALS(AWS_MQTT_PACKET_PUBACK, received_packet->type); received_packet = mqtt_mock_server_get_decoded_packet_by_index(state_test_data->mock_server, 6); ASSERT_UINT_EQUALS(AWS_MQTT_PACKET_PUBACK, received_packet->type); received_packet = mqtt_mock_server_get_decoded_packet_by_index(state_test_data->mock_server, 7); ASSERT_UINT_EQUALS(AWS_MQTT_PACKET_DISCONNECT, received_packet->type); /* Only three packets should be recorded by the published_messages, but all the four packets will be recorded by * any_published_messages */ ASSERT_UINT_EQUALS(3, aws_array_list_length(&state_test_data->published_messages)); ASSERT_UINT_EQUALS(4, aws_array_list_length(&state_test_data->any_published_messages)); struct received_publish_packet *publish_msg = NULL; ASSERT_SUCCESS(aws_array_list_get_at_ptr(&state_test_data->published_messages, (void **)&publish_msg, 0)); ASSERT_TRUE(aws_byte_cursor_eq_byte_buf(&sub_topic_1, &publish_msg->topic)); ASSERT_TRUE(aws_byte_cursor_eq_byte_buf(&payload_1, &publish_msg->payload)); ASSERT_SUCCESS(aws_array_list_get_at_ptr(&state_test_data->published_messages, (void **)&publish_msg, 1)); ASSERT_TRUE(aws_byte_cursor_eq_byte_buf(&sub_topic_2, &publish_msg->topic)); ASSERT_TRUE(aws_byte_cursor_eq_byte_buf(&payload_2, &publish_msg->payload)); ASSERT_SUCCESS(aws_array_list_get_at_ptr(&state_test_data->published_messages, (void **)&publish_msg, 2)); ASSERT_TRUE(aws_byte_cursor_eq_byte_buf(&sub_topic_2, &publish_msg->topic)); ASSERT_TRUE(aws_byte_cursor_eq_byte_buf(&payload_2, &publish_msg->payload)); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_connect_unsubscribe, s_setup_mqtt_server_fn, s_test_mqtt_unsubscribe_fn, s_clean_up_mqtt_server_fn, &test_data) /** * Subscribe to multiple topics prior to connection, make a CONNECT, have the server send PUBLISH messages, unsubscribe * to a topic, disconnect with the broker, make a connection with clean_session true, then call resubscribe, client will * successfully resubscribe to the old topics. */ static int s_test_mqtt_resubscribe_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_mqtt_connection_options connection_options = { .user_data = state_test_data, .clean_session = false, .client_id = aws_byte_cursor_from_c_str("client1234"), .host_name = aws_byte_cursor_from_c_str(state_test_data->endpoint.address), .socket_options = &state_test_data->socket_options, .on_connection_complete = s_on_connection_complete_fn, }; struct aws_byte_cursor sub_topic_1 = aws_byte_cursor_from_c_str("/test/topic1"); struct aws_byte_cursor sub_topic_2 = aws_byte_cursor_from_c_str("/test/topic2"); struct aws_byte_cursor sub_topic_3 = aws_byte_cursor_from_c_str("/test/topic3"); struct aws_mqtt_topic_subscription sub1 = { .topic = sub_topic_1, .qos = AWS_MQTT_QOS_AT_LEAST_ONCE, .on_publish = s_on_publish_received, .on_cleanup = NULL, .on_publish_ud = state_test_data, }; struct aws_mqtt_topic_subscription sub2 = { .topic = sub_topic_2, .qos = AWS_MQTT_QOS_AT_LEAST_ONCE, .on_publish = s_on_publish_received, .on_cleanup = NULL, .on_publish_ud = state_test_data, }; struct aws_mqtt_topic_subscription sub3 = { .topic = sub_topic_3, .qos = AWS_MQTT_QOS_AT_LEAST_ONCE, .on_publish = s_on_publish_received, .on_cleanup = NULL, .on_publish_ud = state_test_data, }; struct aws_array_list topic_filters; size_t list_len = 3; AWS_VARIABLE_LENGTH_ARRAY(uint8_t, static_buf, list_len * sizeof(struct aws_mqtt_topic_subscription)); aws_array_list_init_static(&topic_filters, static_buf, list_len, sizeof(struct aws_mqtt_topic_subscription)); aws_array_list_push_back(&topic_filters, &sub1); aws_array_list_push_back(&topic_filters, &sub2); aws_array_list_push_back(&topic_filters, &sub3); uint16_t sub_packet_id = aws_mqtt_client_connection_subscribe_multiple( state_test_data->mqtt_connection, &topic_filters, s_on_multi_suback, state_test_data); ASSERT_TRUE(sub_packet_id > 0); ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); s_wait_for_connection_to_complete(state_test_data); s_wait_for_subscribe_to_complete(state_test_data); aws_mutex_lock(&state_test_data->lock); state_test_data->expected_ops_completed = 1; aws_mutex_unlock(&state_test_data->lock); /* unsubscribe to the first topic */ uint16_t unsub_packet_id = aws_mqtt_client_connection_unsubscribe( state_test_data->mqtt_connection, &sub_topic_1, s_on_op_complete, state_test_data); ASSERT_TRUE(unsub_packet_id > 0); s_wait_for_ops_completed(state_test_data); /* client still subscribes to topic_2 & topic_3 */ ASSERT_SUCCESS( aws_mqtt_client_connection_disconnect(state_test_data->mqtt_connection, s_on_disconnect_fn, state_test_data)); s_wait_for_disconnect_to_complete(state_test_data); /* reconnection to the same server */ ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); s_wait_for_connection_to_complete(state_test_data); /* Get all the packets out of the way */ ASSERT_SUCCESS(mqtt_mock_server_decode_packets(state_test_data->mock_server)); size_t packets_count = mqtt_mock_server_decoded_packets_count(state_test_data->mock_server); struct mqtt_decoded_packet *t_received_packet = mqtt_mock_server_get_latest_decoded_packet(state_test_data->mock_server); ASSERT_UINT_EQUALS(AWS_MQTT_PACKET_CONNECT, t_received_packet->type); /* resubscribes to topic_2 & topic_3 */ uint16_t resub_packet_id = aws_mqtt_resubscribe_existing_topics(state_test_data->mqtt_connection, s_on_multi_suback, state_test_data); ASSERT_TRUE(resub_packet_id > 0); s_wait_for_subscribe_to_complete(state_test_data); ASSERT_SUCCESS( aws_mqtt_client_connection_disconnect(state_test_data->mqtt_connection, s_on_disconnect_fn, state_test_data)); s_wait_for_disconnect_to_complete(state_test_data); ASSERT_SUCCESS(mqtt_mock_server_decode_packets(state_test_data->mock_server)); struct mqtt_decoded_packet *received_packet = mqtt_mock_server_get_decoded_packet_by_index(state_test_data->mock_server, packets_count); ASSERT_UINT_EQUALS(AWS_MQTT_PACKET_SUBSCRIBE, received_packet->type); ASSERT_UINT_EQUALS(2, aws_array_list_length(&received_packet->sub_topic_filters)); struct aws_mqtt_subscription val; ASSERT_SUCCESS(aws_array_list_front(&received_packet->sub_topic_filters, &val)); ASSERT_TRUE(aws_byte_cursor_eq(&val.topic_filter, &sub_topic_3)); ASSERT_UINT_EQUALS(AWS_MQTT_QOS_AT_LEAST_ONCE, val.qos); ASSERT_SUCCESS(aws_array_list_back(&received_packet->sub_topic_filters, &val)); ASSERT_TRUE(aws_byte_cursor_eq(&val.topic_filter, &sub_topic_2)); ASSERT_UINT_EQUALS(AWS_MQTT_QOS_AT_LEAST_ONCE, val.qos); ASSERT_UINT_EQUALS(resub_packet_id, received_packet->packet_identifier); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_connect_resubscribe, s_setup_mqtt_server_fn, s_test_mqtt_resubscribe_fn, s_clean_up_mqtt_server_fn, &test_data) /* Make a CONNECT, PUBLISH to a topic, make sure server received, then send a DISCONNECT. */ static int s_test_mqtt_publish_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_mqtt_connection_options connection_options = { .user_data = state_test_data, .clean_session = false, .client_id = aws_byte_cursor_from_c_str("client1234"), .host_name = aws_byte_cursor_from_c_str(state_test_data->endpoint.address), .socket_options = &state_test_data->socket_options, .on_connection_complete = s_on_connection_complete_fn, }; struct aws_byte_cursor pub_topic = aws_byte_cursor_from_c_str("/test/topic"); struct aws_byte_cursor payload_1 = aws_byte_cursor_from_c_str("Test Message 1"); struct aws_byte_cursor payload_2 = aws_byte_cursor_from_c_str("Test Message 2"); ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); s_wait_for_connection_to_complete(state_test_data); aws_mutex_lock(&state_test_data->lock); state_test_data->expected_ops_completed = 3; aws_mutex_unlock(&state_test_data->lock); uint16_t packet_id_1 = aws_mqtt_client_connection_publish( state_test_data->mqtt_connection, &pub_topic, AWS_MQTT_QOS_AT_LEAST_ONCE, false, &payload_1, s_on_op_complete, state_test_data); ASSERT_TRUE(packet_id_1 > 0); uint16_t packet_id_2 = aws_mqtt_client_connection_publish( state_test_data->mqtt_connection, &pub_topic, AWS_MQTT_QOS_AT_LEAST_ONCE, false, &payload_2, s_on_op_complete, state_test_data); ASSERT_TRUE(packet_id_2 > 0); /* Null payload case */ uint16_t packet_id_3 = aws_mqtt_client_connection_publish( state_test_data->mqtt_connection, &pub_topic, AWS_MQTT_QOS_AT_LEAST_ONCE, false, NULL, s_on_op_complete, state_test_data); ASSERT_TRUE(packet_id_3 > 0); s_wait_for_ops_completed(state_test_data); ASSERT_SUCCESS( aws_mqtt_client_connection_disconnect(state_test_data->mqtt_connection, s_on_disconnect_fn, state_test_data)); s_wait_for_disconnect_to_complete(state_test_data); /* Decode all received packets by mock server */ ASSERT_SUCCESS(mqtt_mock_server_decode_packets(state_test_data->mock_server)); ASSERT_UINT_EQUALS(5, mqtt_mock_server_decoded_packets_count(state_test_data->mock_server)); struct mqtt_decoded_packet *received_packet = mqtt_mock_server_get_decoded_packet_by_index(state_test_data->mock_server, 0); ASSERT_UINT_EQUALS(AWS_MQTT_PACKET_CONNECT, received_packet->type); ASSERT_TRUE(aws_byte_cursor_eq(&received_packet->client_identifier, &connection_options.client_id)); received_packet = mqtt_mock_server_get_decoded_packet_by_index(state_test_data->mock_server, 1); ASSERT_UINT_EQUALS(AWS_MQTT_PACKET_PUBLISH, received_packet->type); ASSERT_TRUE(aws_byte_cursor_eq(&received_packet->topic_name, &pub_topic)); ASSERT_TRUE(aws_byte_cursor_eq(&received_packet->publish_payload, &payload_1)); received_packet = mqtt_mock_server_get_decoded_packet_by_index(state_test_data->mock_server, 2); ASSERT_UINT_EQUALS(AWS_MQTT_PACKET_PUBLISH, received_packet->type); ASSERT_TRUE(aws_byte_cursor_eq(&received_packet->topic_name, &pub_topic)); ASSERT_TRUE(aws_byte_cursor_eq(&received_packet->publish_payload, &payload_2)); received_packet = mqtt_mock_server_get_decoded_packet_by_index(state_test_data->mock_server, 3); ASSERT_UINT_EQUALS(AWS_MQTT_PACKET_PUBLISH, received_packet->type); ASSERT_TRUE(aws_byte_cursor_eq(&received_packet->topic_name, &pub_topic)); ASSERT_INT_EQUALS(0, received_packet->publish_payload.len); received_packet = mqtt_mock_server_get_decoded_packet_by_index(state_test_data->mock_server, 4); ASSERT_UINT_EQUALS(AWS_MQTT_PACKET_DISCONNECT, received_packet->type); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_connect_publish, s_setup_mqtt_server_fn, s_test_mqtt_publish_fn, s_clean_up_mqtt_server_fn, &test_data) /* Make a CONNECT, PUBLISH to a topic, free the payload before the publish completes to make sure it's safe */ static int s_test_mqtt_publish_payload_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_mqtt_connection_options connection_options = { .user_data = state_test_data, .clean_session = false, .client_id = aws_byte_cursor_from_c_str("client1234"), .host_name = aws_byte_cursor_from_c_str(state_test_data->endpoint.address), .socket_options = &state_test_data->socket_options, .on_connection_complete = s_on_connection_complete_fn, }; struct aws_byte_cursor pub_topic = aws_byte_cursor_from_c_str("/test/topic"); struct aws_byte_buf buf_payload; struct aws_byte_cursor ori_payload = aws_byte_cursor_from_c_str("Test Message 1"); ASSERT_SUCCESS(aws_byte_buf_init_copy_from_cursor(&buf_payload, allocator, ori_payload)); struct aws_byte_cursor payload_curser = aws_byte_cursor_from_buf(&buf_payload); ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); s_wait_for_connection_to_complete(state_test_data); aws_mutex_lock(&state_test_data->lock); state_test_data->expected_ops_completed = 1; aws_mutex_unlock(&state_test_data->lock); uint16_t packet_id = aws_mqtt_client_connection_publish( state_test_data->mqtt_connection, &pub_topic, AWS_MQTT_QOS_AT_LEAST_ONCE, false, &payload_curser, s_on_op_complete, state_test_data); ASSERT_TRUE(packet_id > 0); /* clean up the payload buf, as user don't need to manage the buf and keep it valid until publish completes */ aws_byte_buf_clean_up(&buf_payload); s_wait_for_ops_completed(state_test_data); ASSERT_SUCCESS( aws_mqtt_client_connection_disconnect(state_test_data->mqtt_connection, s_on_disconnect_fn, state_test_data)); s_wait_for_disconnect_to_complete(state_test_data); /* Decode all received packets by mock server */ ASSERT_SUCCESS(mqtt_mock_server_decode_packets(state_test_data->mock_server)); ASSERT_UINT_EQUALS(3, mqtt_mock_server_decoded_packets_count(state_test_data->mock_server)); struct mqtt_decoded_packet *received_packet = mqtt_mock_server_get_decoded_packet_by_index(state_test_data->mock_server, 0); ASSERT_UINT_EQUALS(AWS_MQTT_PACKET_CONNECT, received_packet->type); ASSERT_TRUE(aws_byte_cursor_eq(&received_packet->client_identifier, &connection_options.client_id)); received_packet = mqtt_mock_server_get_decoded_packet_by_index(state_test_data->mock_server, 1); ASSERT_UINT_EQUALS(AWS_MQTT_PACKET_PUBLISH, received_packet->type); ASSERT_TRUE(aws_byte_cursor_eq(&received_packet->topic_name, &pub_topic)); ASSERT_TRUE(aws_byte_cursor_eq(&received_packet->publish_payload, &ori_payload)); received_packet = mqtt_mock_server_get_decoded_packet_by_index(state_test_data->mock_server, 2); ASSERT_UINT_EQUALS(AWS_MQTT_PACKET_DISCONNECT, received_packet->type); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_connect_publish_payload, s_setup_mqtt_server_fn, s_test_mqtt_publish_payload_fn, s_clean_up_mqtt_server_fn, &test_data) /** * CONNECT, force the server to hang up after a successful connection and block all CONNACKS, send PUBLISH messages * let the server send CONNACKS, make sure when the client reconnects automatically, it sends the PUBLISH messages * that were sent during offline mode. Then send a DISCONNECT. */ static int s_test_mqtt_connection_offline_publish_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_mqtt_connection_options connection_options = { .user_data = state_test_data, .clean_session = false, .client_id = aws_byte_cursor_from_c_str("client1234"), .host_name = aws_byte_cursor_from_c_str(state_test_data->endpoint.address), .socket_options = &state_test_data->socket_options, .on_connection_complete = s_on_connection_complete_fn, .ping_timeout_ms = DEFAULT_TEST_PING_TIMEOUT_MS, .keep_alive_time_secs = DEFAULT_TEST_KEEP_ALIVE_S, }; ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); s_wait_for_connection_to_complete(state_test_data); mqtt_mock_server_set_max_connack(state_test_data->mock_server, 0); /* shut it down and make sure the client automatically reconnects.*/ aws_channel_shutdown(state_test_data->server_channel, AWS_OP_SUCCESS); s_wait_for_interrupt_to_complete(state_test_data); state_test_data->server_disconnect_completed = false; struct aws_byte_cursor pub_topic = aws_byte_cursor_from_c_str("/test/topic"); struct aws_byte_cursor payload_1 = aws_byte_cursor_from_c_str("Test Message 1"); struct aws_byte_cursor payload_2 = aws_byte_cursor_from_c_str("Test Message 2"); aws_mutex_lock(&state_test_data->lock); state_test_data->expected_ops_completed = 2; aws_mutex_unlock(&state_test_data->lock); ASSERT_TRUE( aws_mqtt_client_connection_publish( state_test_data->mqtt_connection, &pub_topic, AWS_MQTT_QOS_AT_LEAST_ONCE, false, &payload_1, s_on_op_complete, state_test_data) > 0); ASSERT_TRUE( aws_mqtt_client_connection_publish( state_test_data->mqtt_connection, &pub_topic, AWS_MQTT_QOS_AT_LEAST_ONCE, false, &payload_2, s_on_op_complete, state_test_data) > 0); aws_mutex_lock(&state_test_data->lock); ASSERT_FALSE(state_test_data->connection_resumed); aws_mutex_unlock(&state_test_data->lock); mqtt_mock_server_set_max_connack(state_test_data->mock_server, SIZE_MAX); s_wait_for_ops_completed(state_test_data); aws_mutex_lock(&state_test_data->lock); ASSERT_TRUE(state_test_data->connection_resumed); aws_mutex_unlock(&state_test_data->lock); ASSERT_SUCCESS( aws_mqtt_client_connection_disconnect(state_test_data->mqtt_connection, s_on_disconnect_fn, state_test_data)); s_wait_for_disconnect_to_complete(state_test_data); /* Decode all received packets by mock server */ ASSERT_SUCCESS(mqtt_mock_server_decode_packets(state_test_data->mock_server)); size_t packets_count = mqtt_mock_server_decoded_packets_count(state_test_data->mock_server); ASSERT_TRUE(packets_count >= 5 && packets_count <= 6); struct mqtt_decoded_packet *received_packet = mqtt_mock_server_get_decoded_packet_by_index(state_test_data->mock_server, 0); ASSERT_UINT_EQUALS(AWS_MQTT_PACKET_CONNECT, received_packet->type); ASSERT_UINT_EQUALS(connection_options.clean_session, received_packet->clean_session); ASSERT_TRUE(aws_byte_cursor_eq(&received_packet->client_identifier, &connection_options.client_id)); received_packet = mqtt_mock_server_get_decoded_packet_by_index(state_test_data->mock_server, 1); ASSERT_UINT_EQUALS(AWS_MQTT_PACKET_CONNECT, received_packet->type); ASSERT_UINT_EQUALS(connection_options.clean_session, received_packet->clean_session); ASSERT_TRUE(aws_byte_cursor_eq(&received_packet->client_identifier, &connection_options.client_id)); /* if message count is 6 there was an extra connect message due to the automatic reconnect behavior and timing. */ size_t index = 2; if (packets_count == 6) { received_packet = mqtt_mock_server_get_decoded_packet_by_index(state_test_data->mock_server, index++); ASSERT_UINT_EQUALS(AWS_MQTT_PACKET_CONNECT, received_packet->type); ASSERT_UINT_EQUALS(connection_options.clean_session, received_packet->clean_session); ASSERT_TRUE(aws_byte_cursor_eq(&received_packet->client_identifier, &connection_options.client_id)); } received_packet = mqtt_mock_server_get_decoded_packet_by_index(state_test_data->mock_server, index++); ASSERT_UINT_EQUALS(AWS_MQTT_PACKET_PUBLISH, received_packet->type); ASSERT_TRUE(aws_byte_cursor_eq(&received_packet->topic_name, &pub_topic)); ASSERT_TRUE(aws_byte_cursor_eq(&received_packet->publish_payload, &payload_1)); received_packet = mqtt_mock_server_get_decoded_packet_by_index(state_test_data->mock_server, index++); ASSERT_UINT_EQUALS(AWS_MQTT_PACKET_PUBLISH, received_packet->type); ASSERT_TRUE(aws_byte_cursor_eq(&received_packet->topic_name, &pub_topic)); ASSERT_TRUE(aws_byte_cursor_eq(&received_packet->publish_payload, &payload_2)); received_packet = mqtt_mock_server_get_decoded_packet_by_index(state_test_data->mock_server, index++); ASSERT_UINT_EQUALS(AWS_MQTT_PACKET_DISCONNECT, received_packet->type); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_connection_offline_publish, s_setup_mqtt_server_fn, s_test_mqtt_connection_offline_publish_fn, s_clean_up_mqtt_server_fn, &test_data) /** * CONNECT, force the server to hang up after a successful connection and block all CONNACKS, DISCONNECT while client is * reconnecting. Resource and pending requests are cleaned up correctly */ static int s_test_mqtt_connection_disconnect_while_reconnecting(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_mqtt_connection_options connection_options = { .user_data = state_test_data, .clean_session = false, .client_id = aws_byte_cursor_from_c_str("client1234"), .host_name = aws_byte_cursor_from_c_str(state_test_data->endpoint.address), .socket_options = &state_test_data->socket_options, .on_connection_complete = s_on_connection_complete_fn, .ping_timeout_ms = DEFAULT_TEST_PING_TIMEOUT_MS, .keep_alive_time_secs = DEFAULT_TEST_KEEP_ALIVE_S, }; ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); s_wait_for_connection_to_complete(state_test_data); mqtt_mock_server_set_max_connack(state_test_data->mock_server, 0); /* shut it down and the client automatically reconnects.*/ aws_channel_shutdown(state_test_data->server_channel, AWS_OP_SUCCESS); s_wait_for_interrupt_to_complete(state_test_data); struct aws_byte_cursor pub_topic = aws_byte_cursor_from_c_str("/test/topic"); struct aws_byte_cursor payload_1 = aws_byte_cursor_from_c_str("Test Message 1"); struct aws_byte_cursor payload_2 = aws_byte_cursor_from_c_str("Test Message 2"); aws_mutex_lock(&state_test_data->lock); state_test_data->expected_ops_completed = 2; aws_mutex_unlock(&state_test_data->lock); ASSERT_TRUE( aws_mqtt_client_connection_publish( state_test_data->mqtt_connection, &pub_topic, AWS_MQTT_QOS_AT_LEAST_ONCE, false, &payload_1, s_on_op_complete, state_test_data) > 0); ASSERT_TRUE( aws_mqtt_client_connection_publish( state_test_data->mqtt_connection, &pub_topic, AWS_MQTT_QOS_AT_LEAST_ONCE, false, &payload_2, s_on_op_complete, state_test_data) > 0); ASSERT_SUCCESS( aws_mqtt_client_connection_disconnect(state_test_data->mqtt_connection, s_on_disconnect_fn, state_test_data)); s_wait_for_disconnect_to_complete(state_test_data); aws_mqtt_client_connection_release(state_test_data->mqtt_connection); state_test_data->mqtt_connection = NULL; s_wait_for_ops_completed(state_test_data); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_connection_disconnect_while_reconnecting, s_setup_mqtt_server_fn, s_test_mqtt_connection_disconnect_while_reconnecting, s_clean_up_mqtt_server_fn, &test_data) /** * Regression test: Once upon a time there was a bug caused by race condition on the state of connection. * The scenario is the server/broker closes the connection, while the client is making requests. * The race condition between the eventloop thread closes the connection and the main thread makes request could cause a * bug. * Solution: put a lock for the state of connection, protect it from accessing by multiple threads at the same time. */ static int s_test_mqtt_connection_closes_while_making_requests_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_mqtt_connection_options connection_options = { .user_data = state_test_data, .clean_session = false, .client_id = aws_byte_cursor_from_c_str("client1234"), .host_name = aws_byte_cursor_from_c_str(state_test_data->endpoint.address), .socket_options = &state_test_data->socket_options, .on_connection_complete = s_on_connection_complete_fn, .ping_timeout_ms = DEFAULT_TEST_PING_TIMEOUT_MS, .keep_alive_time_secs = DEFAULT_TEST_KEEP_ALIVE_S, }; struct aws_byte_cursor pub_topic = aws_byte_cursor_from_c_str("/test/topic"); struct aws_byte_cursor payload_1 = aws_byte_cursor_from_c_str("Test Message 1"); ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); s_wait_for_connection_to_complete(state_test_data); aws_mutex_lock(&state_test_data->lock); state_test_data->expected_ops_completed = 1; aws_mutex_unlock(&state_test_data->lock); /* shutdown the channel for some error */ aws_channel_shutdown(state_test_data->server_channel, AWS_ERROR_INVALID_STATE); /* While the shutdown is still in process, making a publish request */ /* It may not 100% trigger the crash, the crash only happens when the s_mqtt_client_shutdown and mqtt_create_request * happen at the same time. Like the slot is removed by shutdown, and the create request still think the slot is * there and try to access it. Crash happens. It's not possible to trigger the crash 100% without changing the * implementation. */ uint16_t packet_id_1 = aws_mqtt_client_connection_publish( state_test_data->mqtt_connection, &pub_topic, AWS_MQTT_QOS_AT_LEAST_ONCE, false, &payload_1, s_on_op_complete, state_test_data); ASSERT_TRUE(packet_id_1 > 0); s_wait_for_reconnect_to_complete(state_test_data); s_wait_for_ops_completed(state_test_data); ASSERT_SUCCESS( aws_mqtt_client_connection_disconnect(state_test_data->mqtt_connection, s_on_disconnect_fn, state_test_data)); s_wait_for_disconnect_to_complete(state_test_data); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_connection_closes_while_making_requests, s_setup_mqtt_server_fn, s_test_mqtt_connection_closes_while_making_requests_fn, s_clean_up_mqtt_server_fn, &test_data) /* helper to make sure packets are received/resent in expected order and duplicat flag is appropriately set */ static int s_check_resend_packets( struct aws_channel_handler *handler, size_t search_start_idx, bool duplicate_publish_expected, uint16_t *packet_ids, size_t packet_id_count) { ASSERT_SUCCESS(mqtt_mock_server_decode_packets(handler)); if (packet_id_count == 0) { return AWS_OP_SUCCESS; } size_t previous_index = 0; struct mqtt_decoded_packet *previous_packet = mqtt_mock_server_find_decoded_packet_by_id(handler, search_start_idx, packet_ids[0], &previous_index); if (previous_packet->type == AWS_MQTT_PACKET_PUBLISH) { ASSERT_INT_EQUALS(duplicate_publish_expected, previous_packet->duplicate); } for (size_t i = 1; i < packet_id_count; ++i) { size_t current_index = 0; struct mqtt_decoded_packet *current_packet = mqtt_mock_server_find_decoded_packet_by_id(handler, search_start_idx, packet_ids[i], ¤t_index); if (current_packet->type == AWS_MQTT_PACKET_PUBLISH) { ASSERT_INT_EQUALS(duplicate_publish_expected, current_packet->duplicate); } ASSERT_TRUE(current_index > previous_index); previous_packet = current_packet; previous_index = current_index; } return AWS_OP_SUCCESS; } /** * Test that when the response is not back from the server, and the connection lost, the sent packets will be retried * in the same order as they are sent before. */ static int s_test_mqtt_connection_resend_packets_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_mqtt_connection_options connection_options = { .user_data = state_test_data, .clean_session = false, .client_id = aws_byte_cursor_from_c_str("client1234"), .host_name = aws_byte_cursor_from_c_str(state_test_data->endpoint.address), .socket_options = &state_test_data->socket_options, .on_connection_complete = s_on_connection_complete_fn, .ping_timeout_ms = DEFAULT_TEST_PING_TIMEOUT_MS, .keep_alive_time_secs = DEFAULT_TEST_KEEP_ALIVE_S, }; struct aws_byte_cursor sub_topic = aws_byte_cursor_from_c_str("/test/topic/sub/#"); struct aws_byte_cursor pub_topic = aws_byte_cursor_from_c_str("/test/topic"); struct aws_byte_cursor payload_1 = aws_byte_cursor_from_c_str("Test Message 1"); struct aws_byte_cursor payload_2 = aws_byte_cursor_from_c_str("Test Message 2"); struct aws_byte_cursor payload_3 = aws_byte_cursor_from_c_str("Test Message 3"); ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); s_wait_for_connection_to_complete(state_test_data); /* Disable the auto ACK packets sent by the server, which blocks the requests to complete */ mqtt_mock_server_disable_auto_ack(state_test_data->mock_server); uint16_t packet_ids[5]; packet_ids[0] = aws_mqtt_client_connection_publish( state_test_data->mqtt_connection, &pub_topic, AWS_MQTT_QOS_AT_LEAST_ONCE, false, &payload_1, NULL, NULL); ASSERT_TRUE(packet_ids[0] > 0); packet_ids[1] = aws_mqtt_client_connection_subscribe( state_test_data->mqtt_connection, &sub_topic, AWS_MQTT_QOS_AT_LEAST_ONCE, s_on_publish_received, state_test_data, NULL, s_on_suback, state_test_data); ASSERT_TRUE(packet_ids[1] > 0); packet_ids[2] = aws_mqtt_client_connection_publish( state_test_data->mqtt_connection, &pub_topic, AWS_MQTT_QOS_AT_LEAST_ONCE, false, &payload_2, NULL, NULL); ASSERT_TRUE(packet_ids[2] > 0); packet_ids[3] = aws_mqtt_client_connection_unsubscribe(state_test_data->mqtt_connection, &sub_topic, NULL, NULL); ASSERT_TRUE(packet_ids[3] > 0); packet_ids[4] = aws_mqtt_client_connection_publish( state_test_data->mqtt_connection, &pub_topic, AWS_MQTT_QOS_AT_LEAST_ONCE, false, &payload_3, NULL, NULL); ASSERT_TRUE(packet_ids[4] > 0); /* Wait for 1 sec. ensure all the publishes have been received by the server */ aws_thread_current_sleep(ONE_SEC); ASSERT_SUCCESS( s_check_resend_packets(state_test_data->mock_server, 0, false, packet_ids, AWS_ARRAY_SIZE(packet_ids))); size_t packet_count = mqtt_mock_server_decoded_packets_count(state_test_data->mock_server); /* shutdown the channel for some error */ aws_channel_shutdown(state_test_data->server_channel, AWS_ERROR_INVALID_STATE); s_wait_for_reconnect_to_complete(state_test_data); /* Wait again, and ensure the publishes have been resent */ aws_thread_current_sleep(ONE_SEC); ASSERT_SUCCESS(s_check_resend_packets( state_test_data->mock_server, packet_count, true, packet_ids, AWS_ARRAY_SIZE(packet_ids))); ASSERT_SUCCESS( aws_mqtt_client_connection_disconnect(state_test_data->mqtt_connection, s_on_disconnect_fn, state_test_data)); s_wait_for_disconnect_to_complete(state_test_data); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_connection_resend_packets, s_setup_mqtt_server_fn, s_test_mqtt_connection_resend_packets_fn, s_clean_up_mqtt_server_fn, &test_data) /** * Test that connection lost before the publish QoS 0 ever sent, publish QoS 0 will not be retried. */ static int s_test_mqtt_connection_not_retry_publish_QoS_0_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_mqtt_connection_options connection_options = { .user_data = state_test_data, .clean_session = true, .client_id = aws_byte_cursor_from_c_str("client1234"), .host_name = aws_byte_cursor_from_c_str(state_test_data->endpoint.address), .socket_options = &state_test_data->socket_options, .on_connection_complete = s_on_connection_complete_fn, .ping_timeout_ms = DEFAULT_TEST_PING_TIMEOUT_MS, .keep_alive_time_secs = 16960, /* basically stop automatically sending PINGREQ */ }; struct aws_byte_cursor pub_topic = aws_byte_cursor_from_c_str("/test/topic"); struct aws_byte_cursor payload_1 = aws_byte_cursor_from_c_str("Test Message 1"); ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); s_wait_for_connection_to_complete(state_test_data); /* kill the connection */ aws_channel_shutdown(state_test_data->server_channel, AWS_ERROR_INVALID_STATE); /* TODO: only one eventloop thread in the el group. Most likely the test will fail, the outgoing task will not be * cancelled because of connection lost */ /* make a publish with QoS 0 immediate. */ aws_mutex_lock(&state_test_data->lock); state_test_data->expected_ops_completed = 1; aws_mutex_unlock(&state_test_data->lock); uint16_t packet_id_1 = aws_mqtt_client_connection_publish( state_test_data->mqtt_connection, &pub_topic, AWS_MQTT_QOS_AT_MOST_ONCE, false, &payload_1, s_on_op_complete, state_test_data); ASSERT_TRUE(packet_id_1 > 0); /* publish should complete after the shutdown */ s_wait_for_ops_completed(state_test_data); /* wait for reconnect */ s_wait_for_reconnect_to_complete(state_test_data); /* Check all received packets, no publish packets ever received by the server. Because the connection lost before it * ever get sent. */ ASSERT_SUCCESS(mqtt_mock_server_decode_packets(state_test_data->mock_server)); ASSERT_NULL( mqtt_mock_server_find_decoded_packet_by_type(state_test_data->mock_server, 0, AWS_MQTT_PACKET_PUBLISH, NULL)); ASSERT_SUCCESS( aws_mqtt_client_connection_disconnect(state_test_data->mqtt_connection, s_on_disconnect_fn, state_test_data)); s_wait_for_disconnect_to_complete(state_test_data); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_connection_not_retry_publish_QoS_0, s_setup_mqtt_server_fn, s_test_mqtt_connection_not_retry_publish_QoS_0_fn, s_clean_up_mqtt_server_fn, &test_data) /** * Test that the retry policy is consistent, which means either the request has been sent or not, when the connection * lost/resume, the request will be retried. */ static int s_test_mqtt_connection_consistent_retry_policy_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_mqtt_connection_options connection_options = { .user_data = state_test_data, .clean_session = false, .client_id = aws_byte_cursor_from_c_str("client1234"), .host_name = aws_byte_cursor_from_c_str(state_test_data->endpoint.address), .socket_options = &state_test_data->socket_options, .on_connection_complete = s_on_connection_complete_fn, .ping_timeout_ms = DEFAULT_TEST_PING_TIMEOUT_MS, .protocol_operation_timeout_ms = 3000, .keep_alive_time_secs = 16960, /* basically stop automatically sending PINGREQ */ }; struct aws_byte_cursor pub_topic = aws_byte_cursor_from_c_str("/test/topic"); struct aws_byte_cursor payload_1 = aws_byte_cursor_from_c_str("Test Message 1"); struct aws_byte_cursor sub_topic = aws_byte_cursor_from_c_str("/test/topic"); ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); s_wait_for_connection_to_complete(state_test_data); struct aws_channel_handler *handler = state_test_data->mock_server; /* Disable the auto ACK packets sent by the server, which blocks the requests to complete */ mqtt_mock_server_disable_auto_ack(handler); /* kill the connection */ aws_channel_shutdown(state_test_data->server_channel, AWS_ERROR_INVALID_STATE); /* There is a hidden race condition between channel shutdown from eventloop and publish/subscribe from main thread, * but either way, it should work as they are retried in the end. */ /* make a publish with QoS 1 immediate. */ uint16_t packet_id_1 = aws_mqtt_client_connection_publish( state_test_data->mqtt_connection, &pub_topic, AWS_MQTT_QOS_AT_LEAST_ONCE, false, &payload_1, s_on_op_complete, state_test_data); ASSERT_TRUE(packet_id_1 > 0); /* make another subscribe */ uint16_t packet_id_2 = aws_mqtt_client_connection_subscribe( state_test_data->mqtt_connection, &sub_topic, AWS_MQTT_QOS_AT_LEAST_ONCE, NULL, NULL, NULL, s_on_suback, state_test_data); ASSERT_TRUE(packet_id_2 > 0); /* wait for reconnect */ s_wait_for_reconnect_to_complete(state_test_data); /* Wait for 1 sec. ensure all the requests have been received by the server */ aws_thread_current_sleep(ONE_SEC); /* Check all received packets, subscribe and publish has been received */ ASSERT_SUCCESS(mqtt_mock_server_decode_packets(handler)); size_t packet_count = mqtt_mock_server_decoded_packets_count(handler); /* the latest packet should be subscribe */ ASSERT_NOT_NULL(mqtt_mock_server_find_decoded_packet_by_type(handler, 0, AWS_MQTT_PACKET_SUBSCRIBE, NULL)); ASSERT_NOT_NULL(mqtt_mock_server_find_decoded_packet_by_type(handler, 0, AWS_MQTT_PACKET_PUBLISH, NULL)); /* Re-enable the auto ack to finish the requests */ mqtt_mock_server_enable_auto_ack(handler); /* Kill the connection again, the requests will be retried since the response has not been received yet. */ aws_channel_shutdown(state_test_data->server_channel, AWS_ERROR_INVALID_STATE); s_wait_for_reconnect_to_complete(state_test_data); /* subscribe should be able to completed now */ s_wait_for_subscribe_to_complete(state_test_data); /* Check all received packets, subscribe and publish has been resent */ ASSERT_SUCCESS(mqtt_mock_server_decode_packets(handler)); ASSERT_TRUE(mqtt_mock_server_decoded_packets_count(handler) > packet_count); /* the latest packet should be subscribe */ ASSERT_NOT_NULL( mqtt_mock_server_find_decoded_packet_by_type(handler, packet_count, AWS_MQTT_PACKET_SUBSCRIBE, NULL)); ASSERT_NOT_NULL(mqtt_mock_server_find_decoded_packet_by_type(handler, packet_count, AWS_MQTT_PACKET_PUBLISH, NULL)); ASSERT_SUCCESS( aws_mqtt_client_connection_disconnect(state_test_data->mqtt_connection, s_on_disconnect_fn, state_test_data)); s_wait_for_disconnect_to_complete(state_test_data); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_connection_consistent_retry_policy, s_setup_mqtt_server_fn, s_test_mqtt_connection_consistent_retry_policy_fn, s_clean_up_mqtt_server_fn, &test_data) /** * Test that the request will not be retried even the response has not received for a while. */ static int s_test_mqtt_connection_not_resend_packets_on_healthy_connection_fn( struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_mqtt_connection_options connection_options = { .user_data = state_test_data, .clean_session = true, .client_id = aws_byte_cursor_from_c_str("client1234"), .host_name = aws_byte_cursor_from_c_str(state_test_data->endpoint.address), .socket_options = &state_test_data->socket_options, .on_connection_complete = s_on_connection_complete_fn, .ping_timeout_ms = DEFAULT_TEST_PING_TIMEOUT_MS, .keep_alive_time_secs = DEFAULT_TEST_KEEP_ALIVE_S, }; struct aws_byte_cursor pub_topic = aws_byte_cursor_from_c_str("/test/topic"); struct aws_byte_cursor payload_1 = aws_byte_cursor_from_c_str("Test Message 1"); struct aws_byte_cursor sub_topic = aws_byte_cursor_from_c_str("/test/topic"); ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); s_wait_for_connection_to_complete(state_test_data); struct aws_channel_handler *handler = state_test_data->mock_server; /* Disable the auto ACK packets sent by the server, which blocks the requests to complete */ mqtt_mock_server_disable_auto_ack(handler); /* make a publish with QoS 1 */ aws_mutex_lock(&state_test_data->lock); state_test_data->expected_ops_completed = 1; aws_mutex_unlock(&state_test_data->lock); uint16_t packet_id_1 = aws_mqtt_client_connection_publish( state_test_data->mqtt_connection, &pub_topic, AWS_MQTT_QOS_AT_LEAST_ONCE, false, &payload_1, s_on_op_complete, state_test_data); ASSERT_TRUE(packet_id_1 > 0); /* make another subscribe */ uint16_t packet_id_2 = aws_mqtt_client_connection_subscribe( state_test_data->mqtt_connection, &sub_topic, AWS_MQTT_QOS_AT_LEAST_ONCE, NULL, NULL, NULL, s_on_suback, state_test_data); ASSERT_TRUE(packet_id_2 > 0); /* Wait for 3 sec. ensure no duplicate requests will be sent */ aws_thread_current_sleep((uint64_t)ONE_SEC * 3); /* Check all received packets, only one publish and subscribe received */ ASSERT_SUCCESS(mqtt_mock_server_decode_packets(state_test_data->mock_server)); size_t pre_index = SIZE_MAX; ASSERT_NOT_NULL(mqtt_mock_server_find_decoded_packet_by_type(handler, 0, AWS_MQTT_PACKET_PUBLISH, &pre_index)); if (pre_index + 1 < mqtt_mock_server_decoded_packets_count(handler)) { /* If it's not the last packet, search again, and result should be NULL. */ ASSERT_NULL( mqtt_mock_server_find_decoded_packet_by_type(handler, pre_index + 1, AWS_MQTT_PACKET_PUBLISH, NULL)); } ASSERT_NOT_NULL(mqtt_mock_server_find_decoded_packet_by_type(handler, 0, AWS_MQTT_PACKET_SUBSCRIBE, &pre_index)); if (pre_index + 1 < mqtt_mock_server_decoded_packets_count(handler)) { ASSERT_NULL( mqtt_mock_server_find_decoded_packet_by_type(handler, pre_index + 1, AWS_MQTT_PACKET_SUBSCRIBE, NULL)); } ASSERT_SUCCESS( aws_mqtt_client_connection_disconnect(state_test_data->mqtt_connection, s_on_disconnect_fn, state_test_data)); s_wait_for_disconnect_to_complete(state_test_data); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_connection_not_resend_packets_on_healthy_connection, s_setup_mqtt_server_fn, s_test_mqtt_connection_not_resend_packets_on_healthy_connection_fn, s_clean_up_mqtt_server_fn, &test_data) /* Make requests during offline, and destory the connection before ever online, the resource should be cleaned up * properly */ static int s_test_mqtt_connection_destory_pending_requests_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_byte_cursor topic = aws_byte_cursor_from_c_str("/test/topic"); struct aws_byte_cursor payload = aws_byte_cursor_from_c_str("Test Message 1"); ASSERT_TRUE( aws_mqtt_client_connection_publish( state_test_data->mqtt_connection, &topic, AWS_MQTT_QOS_AT_LEAST_ONCE, false, &payload, s_on_op_complete, state_test_data) > 0); ASSERT_TRUE( aws_mqtt_client_connection_subscribe( state_test_data->mqtt_connection, &topic, AWS_MQTT_QOS_AT_LEAST_ONCE, s_on_publish_received, state_test_data, NULL, s_on_suback, state_test_data) > 0); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_connection_destory_pending_requests, s_setup_mqtt_server_fn, s_test_mqtt_connection_destory_pending_requests_fn, s_clean_up_mqtt_server_fn, &test_data) /* Make a clean session connection, all the request will not be retried when the connection lost */ static int s_test_mqtt_clean_session_not_retry_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_mqtt_connection_options connection_options = { .user_data = state_test_data, .clean_session = true, /* make a clean_session connection */ .client_id = aws_byte_cursor_from_c_str("client1234"), .host_name = aws_byte_cursor_from_c_str(state_test_data->endpoint.address), .socket_options = &state_test_data->socket_options, .on_connection_complete = s_on_connection_complete_fn, .ping_timeout_ms = DEFAULT_TEST_PING_TIMEOUT_MS, .keep_alive_time_secs = DEFAULT_TEST_KEEP_ALIVE_S, }; ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); s_wait_for_connection_to_complete(state_test_data); struct aws_channel_handler *handler = state_test_data->mock_server; mqtt_mock_server_disable_auto_ack(handler); struct aws_byte_cursor topic = aws_byte_cursor_from_c_str("/test/topic"); struct aws_byte_cursor payload = aws_byte_cursor_from_c_str("Test Message 1"); ASSERT_TRUE( aws_mqtt_client_connection_publish( state_test_data->mqtt_connection, &topic, AWS_MQTT_QOS_AT_LEAST_ONCE, false, &payload, s_on_op_complete, state_test_data) > 0); ASSERT_TRUE( aws_mqtt_client_connection_subscribe( state_test_data->mqtt_connection, &topic, AWS_MQTT_QOS_AT_LEAST_ONCE, s_on_publish_received, state_test_data, NULL, s_on_suback, state_test_data) > 0); aws_mutex_lock(&state_test_data->lock); state_test_data->expected_ops_completed = 1; aws_mutex_unlock(&state_test_data->lock); /* Shutdown the connection */ aws_channel_shutdown(state_test_data->server_channel, AWS_OP_SUCCESS); s_wait_for_interrupt_to_complete(state_test_data); /* Once the connection lost, the requests will fail */ ASSERT_UINT_EQUALS(state_test_data->op_complete_error, AWS_ERROR_MQTT_CANCELLED_FOR_CLEAN_SESSION); ASSERT_UINT_EQUALS(state_test_data->subscribe_complete_error, AWS_ERROR_MQTT_CANCELLED_FOR_CLEAN_SESSION); /* Disconnect */ ASSERT_SUCCESS( aws_mqtt_client_connection_disconnect(state_test_data->mqtt_connection, s_on_disconnect_fn, state_test_data)); s_wait_for_disconnect_to_complete(state_test_data); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_clean_session_not_retry, s_setup_mqtt_server_fn, s_test_mqtt_clean_session_not_retry_fn, s_clean_up_mqtt_server_fn, &test_data) /* Make a clean session connection, the previous session will be discard when a new connection with clean session true * is created */ static int s_test_mqtt_clean_session_discard_previous_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_mqtt_connection_options connection_options = { .user_data = state_test_data, .clean_session = true, /* make a clean_session connection */ .client_id = aws_byte_cursor_from_c_str("client1234"), .host_name = aws_byte_cursor_from_c_str(state_test_data->endpoint.address), .socket_options = &state_test_data->socket_options, .on_connection_complete = s_on_connection_complete_fn, .ping_timeout_ms = DEFAULT_TEST_PING_TIMEOUT_MS, .keep_alive_time_secs = 16960, /* basically stop automatically sending PINGREQ */ }; struct aws_byte_cursor topic = aws_byte_cursor_from_c_str("/test/topic"); struct aws_byte_cursor payload = aws_byte_cursor_from_c_str("Test Message 1"); aws_mutex_lock(&state_test_data->lock); state_test_data->expected_ops_completed = 1; aws_mutex_unlock(&state_test_data->lock); /* Requests made now will be considered as the previous session. */ ASSERT_TRUE( aws_mqtt_client_connection_publish( state_test_data->mqtt_connection, &topic, AWS_MQTT_QOS_AT_LEAST_ONCE, false, &payload, s_on_op_complete, state_test_data) > 0); ASSERT_TRUE( aws_mqtt_client_connection_subscribe( state_test_data->mqtt_connection, &topic, AWS_MQTT_QOS_AT_LEAST_ONCE, s_on_publish_received, state_test_data, NULL, s_on_suback, state_test_data) > 0); ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); s_wait_for_connection_to_complete(state_test_data); struct aws_channel_handler *handler = state_test_data->mock_server; s_wait_for_ops_completed(state_test_data); s_wait_for_subscribe_to_complete(state_test_data); ASSERT_UINT_EQUALS(state_test_data->op_complete_error, AWS_ERROR_MQTT_CANCELLED_FOR_CLEAN_SESSION); ASSERT_UINT_EQUALS(state_test_data->subscribe_complete_error, AWS_ERROR_MQTT_CANCELLED_FOR_CLEAN_SESSION); /* Check no request is received by the server */ ASSERT_SUCCESS(mqtt_mock_server_decode_packets(handler)); ASSERT_NULL(mqtt_mock_server_find_decoded_packet_by_type(handler, 0, AWS_MQTT_PACKET_PUBLISH, NULL)); ASSERT_NULL(mqtt_mock_server_find_decoded_packet_by_type(handler, 0, AWS_MQTT_PACKET_SUBSCRIBE, NULL)); /* Disconnect */ ASSERT_SUCCESS( aws_mqtt_client_connection_disconnect(state_test_data->mqtt_connection, s_on_disconnect_fn, state_test_data)); s_wait_for_disconnect_to_complete(state_test_data); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_clean_session_discard_previous, s_setup_mqtt_server_fn, s_test_mqtt_clean_session_discard_previous_fn, s_clean_up_mqtt_server_fn, &test_data) /* Make a clean session connection, the requests after the connect function will be considered as the next session */ static int s_test_mqtt_clean_session_keep_next_session_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_mqtt_connection_options connection_options = { .user_data = state_test_data, .clean_session = true, /* make a clean_session connection */ .client_id = aws_byte_cursor_from_c_str("client1234"), .host_name = aws_byte_cursor_from_c_str(state_test_data->endpoint.address), .socket_options = &state_test_data->socket_options, .on_connection_complete = s_on_connection_complete_fn, .ping_timeout_ms = DEFAULT_TEST_PING_TIMEOUT_MS, .keep_alive_time_secs = 16960, /* basically stop automatically sending PINGREQ */ }; struct aws_byte_cursor topic = aws_byte_cursor_from_c_str("/test/topic"); struct aws_byte_cursor payload = aws_byte_cursor_from_c_str("Test Message 1"); aws_mutex_lock(&state_test_data->lock); state_test_data->expected_ops_completed = 1; aws_mutex_unlock(&state_test_data->lock); ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); /* Requests made after the connect function will be considered as the next session, and will be sent eventually */ ASSERT_TRUE( aws_mqtt_client_connection_publish( state_test_data->mqtt_connection, &topic, AWS_MQTT_QOS_AT_LEAST_ONCE, false, &payload, s_on_op_complete, state_test_data) > 0); ASSERT_TRUE( aws_mqtt_client_connection_subscribe( state_test_data->mqtt_connection, &topic, AWS_MQTT_QOS_AT_LEAST_ONCE, s_on_publish_received, state_test_data, NULL, s_on_suback, state_test_data) > 0); s_wait_for_connection_to_complete(state_test_data); struct aws_channel_handler *handler = state_test_data->mock_server; s_wait_for_ops_completed(state_test_data); s_wait_for_subscribe_to_complete(state_test_data); ASSERT_UINT_EQUALS(state_test_data->op_complete_error, AWS_ERROR_SUCCESS); ASSERT_UINT_EQUALS(state_test_data->subscribe_complete_error, AWS_ERROR_SUCCESS); /* Check no request is received by the server */ ASSERT_SUCCESS(mqtt_mock_server_decode_packets(handler)); ASSERT_NOT_NULL(mqtt_mock_server_find_decoded_packet_by_type(handler, 0, AWS_MQTT_PACKET_PUBLISH, NULL)); ASSERT_NOT_NULL(mqtt_mock_server_find_decoded_packet_by_type(handler, 0, AWS_MQTT_PACKET_SUBSCRIBE, NULL)); /* Disconnect */ ASSERT_SUCCESS( aws_mqtt_client_connection_disconnect(state_test_data->mqtt_connection, s_on_disconnect_fn, state_test_data)); s_wait_for_disconnect_to_complete(state_test_data); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_clean_session_keep_next_session, s_setup_mqtt_server_fn, s_test_mqtt_clean_session_keep_next_session_fn, s_clean_up_mqtt_server_fn, &test_data) /** * Test that connection is healthy, user set the timeout for request, and timeout happens and the publish failed. */ static int s_test_mqtt_connection_publish_QoS1_timeout_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_mqtt_connection_options connection_options = { .user_data = state_test_data, .clean_session = false, .client_id = aws_byte_cursor_from_c_str("client1234"), .host_name = aws_byte_cursor_from_c_str(state_test_data->endpoint.address), .socket_options = &state_test_data->socket_options, .on_connection_complete = s_on_connection_complete_fn, .ping_timeout_ms = DEFAULT_TEST_PING_TIMEOUT_MS, .protocol_operation_timeout_ms = 3000, .keep_alive_time_secs = 16960, /* basically stop automatically sending PINGREQ */ }; struct aws_byte_cursor pub_topic = aws_byte_cursor_from_c_str("/test/topic"); struct aws_byte_cursor payload_1 = aws_byte_cursor_from_c_str("Test Message 1"); ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); s_wait_for_connection_to_complete(state_test_data); /* Disable the auto ACK packets sent by the server, which blocks the requests to complete */ mqtt_mock_server_disable_auto_ack(state_test_data->mock_server); /* make a publish with QoS 1 immediate. */ aws_mutex_lock(&state_test_data->lock); state_test_data->expected_ops_completed = 1; aws_mutex_unlock(&state_test_data->lock); uint16_t packet_id_1 = aws_mqtt_client_connection_publish( state_test_data->mqtt_connection, &pub_topic, AWS_MQTT_QOS_AT_LEAST_ONCE, false, &payload_1, s_on_op_complete, state_test_data); ASSERT_TRUE(packet_id_1 > 0); /* publish should complete after the shutdown */ s_wait_for_ops_completed(state_test_data); /* Check the publish has been completed with timeout error */ ASSERT_UINT_EQUALS(state_test_data->op_complete_error, AWS_ERROR_MQTT_TIMEOUT); ASSERT_SUCCESS( aws_mqtt_client_connection_disconnect(state_test_data->mqtt_connection, s_on_disconnect_fn, state_test_data)); s_wait_for_disconnect_to_complete(state_test_data); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_connection_publish_QoS1_timeout, s_setup_mqtt_server_fn, s_test_mqtt_connection_publish_QoS1_timeout_fn, s_clean_up_mqtt_server_fn, &test_data) /** * Test that connection is healthy, user set the timeout for request, and timeout happens we still send ping reqs */ static int s_test_mqtt_connection_publish_QoS1_timeout_with_ping_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_mqtt_connection_options connection_options = { .user_data = state_test_data, .clean_session = false, .client_id = aws_byte_cursor_from_c_str("client1234"), .host_name = aws_byte_cursor_from_c_str(state_test_data->endpoint.address), .socket_options = &state_test_data->socket_options, .on_connection_complete = s_on_connection_complete_fn, .ping_timeout_ms = 99, .protocol_operation_timeout_ms = 20, .keep_alive_time_secs = 3, // connection->keep_alive_time_ns, pushoff the timestamp by that amount }; struct aws_byte_cursor pub_topic = aws_byte_cursor_from_c_str("/test/topic"); struct aws_byte_cursor payload_1 = aws_byte_cursor_from_c_str("Test Message 1"); ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); s_wait_for_connection_to_complete(state_test_data); /* Disable the auto ACK packets sent by the server, which blocks the requests to complete */ mqtt_mock_server_disable_auto_ack(state_test_data->mock_server); /* make a publish with QoS 1 immediate. */ aws_mutex_lock(&state_test_data->lock); state_test_data->expected_ops_completed = 10; aws_mutex_unlock(&state_test_data->lock); for (int i = 0; i < 10; i++) { uint16_t packet_id_1 = aws_mqtt_client_connection_publish( state_test_data->mqtt_connection, &pub_topic, AWS_MQTT_QOS_AT_LEAST_ONCE, false, &payload_1, s_on_op_complete, state_test_data); ASSERT_TRUE(packet_id_1 > 0); // sleep 1 second aws_thread_current_sleep(1000000000); } // Wait for 3 seconds aws_thread_current_sleep(3000000000); // make sure we are still receiveing pings when the connection is down, in other words ping pushoff is not happening ASSERT_TRUE(mqtt_mock_server_get_ping_count(state_test_data->mock_server) > 1); aws_channel_shutdown(state_test_data->server_channel, AWS_OP_SUCCESS); /* publish should complete after the shutdown */ s_wait_for_ops_completed(state_test_data); ASSERT_SUCCESS( aws_mqtt_client_connection_disconnect(state_test_data->mqtt_connection, s_on_disconnect_fn, state_test_data)); s_wait_for_disconnect_to_complete(state_test_data); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_connection_publish_QoS1_timeout_with_ping, s_setup_mqtt_server_fn, s_test_mqtt_connection_publish_QoS1_timeout_with_ping_fn, s_clean_up_mqtt_server_fn, &test_data) /** * Test that connection is healthy, user set the timeout for request, and timeout happens and the unsubscribe failed. */ static int s_test_mqtt_connection_unsub_timeout_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_mqtt_connection_options connection_options = { .user_data = state_test_data, .clean_session = false, .client_id = aws_byte_cursor_from_c_str("client1234"), .host_name = aws_byte_cursor_from_c_str(state_test_data->endpoint.address), .socket_options = &state_test_data->socket_options, .on_connection_complete = s_on_connection_complete_fn, .ping_timeout_ms = DEFAULT_TEST_PING_TIMEOUT_MS, .protocol_operation_timeout_ms = 3000, .keep_alive_time_secs = 16960, /* basically stop automatically sending PINGREQ */ }; struct aws_byte_cursor pub_topic = aws_byte_cursor_from_c_str("/test/topic"); ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); s_wait_for_connection_to_complete(state_test_data); /* Disable the auto ACK packets sent by the server, which blocks the requests to complete */ mqtt_mock_server_disable_auto_ack(state_test_data->mock_server); aws_mutex_lock(&state_test_data->lock); state_test_data->expected_ops_completed = 1; aws_mutex_unlock(&state_test_data->lock); /* unsubscribe to the first topic */ uint16_t unsub_packet_id = aws_mqtt_client_connection_unsubscribe( state_test_data->mqtt_connection, &pub_topic, s_on_op_complete, state_test_data); ASSERT_TRUE(unsub_packet_id > 0); /* publish should complete after the shutdown */ s_wait_for_ops_completed(state_test_data); /* Check the publish has been completed with timeout error */ ASSERT_UINT_EQUALS(state_test_data->op_complete_error, AWS_ERROR_MQTT_TIMEOUT); ASSERT_SUCCESS( aws_mqtt_client_connection_disconnect(state_test_data->mqtt_connection, s_on_disconnect_fn, state_test_data)); s_wait_for_disconnect_to_complete(state_test_data); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_connection_unsub_timeout, s_setup_mqtt_server_fn, s_test_mqtt_connection_unsub_timeout_fn, s_clean_up_mqtt_server_fn, &test_data) /** * Test that connection is healthy, user set the timeout for request, and connection lost will reset timeout. */ static int s_test_mqtt_connection_publish_QoS1_timeout_connection_lost_reset_time_fn( struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_mqtt_connection_options connection_options = { .user_data = state_test_data, .clean_session = false, .client_id = aws_byte_cursor_from_c_str("client1234"), .host_name = aws_byte_cursor_from_c_str(state_test_data->endpoint.address), .socket_options = &state_test_data->socket_options, .on_connection_complete = s_on_connection_complete_fn, .ping_timeout_ms = DEFAULT_TEST_PING_TIMEOUT_MS, .protocol_operation_timeout_ms = 3000, .keep_alive_time_secs = 16960, /* basically stop automatically sending PINGREQ */ }; struct aws_byte_cursor pub_topic = aws_byte_cursor_from_c_str("/test/topic"); struct aws_byte_cursor payload_1 = aws_byte_cursor_from_c_str("Test Message 1"); ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); s_wait_for_connection_to_complete(state_test_data); /* Disable the auto ACK packets sent by the server, which blocks the requests to complete */ mqtt_mock_server_disable_auto_ack(state_test_data->mock_server); /* make a publish with QoS 1 immediate. */ aws_mutex_lock(&state_test_data->lock); state_test_data->expected_ops_completed = 1; aws_mutex_unlock(&state_test_data->lock); uint16_t packet_id_1 = aws_mqtt_client_connection_publish( state_test_data->mqtt_connection, &pub_topic, AWS_MQTT_QOS_AT_LEAST_ONCE, false, &payload_1, s_on_op_complete, state_test_data); ASSERT_TRUE(packet_id_1 > 0); /* sleep for 2 sec, close the connection */ aws_thread_current_sleep((uint64_t)ONE_SEC * 2); /* Kill the connection, the requests will be retried and the timeout will be reset. */ aws_channel_shutdown(state_test_data->server_channel, AWS_ERROR_INVALID_STATE); s_wait_for_reconnect_to_complete(state_test_data); /* sleep for 2 sec again, in total the response has not received for more than 4 sec, timeout should happen if the * lost of connection not reset the timeout */ aws_thread_current_sleep((uint64_t)ONE_SEC * 2); /* send a puback */ ASSERT_SUCCESS(mqtt_mock_server_send_puback(state_test_data->mock_server, packet_id_1)); /* publish should complete after the shutdown */ s_wait_for_ops_completed(state_test_data); /* Check the publish has been completed successfully since the lost of the connection reset the timeout */ ASSERT_UINT_EQUALS(state_test_data->op_complete_error, AWS_ERROR_SUCCESS); ASSERT_SUCCESS( aws_mqtt_client_connection_disconnect(state_test_data->mqtt_connection, s_on_disconnect_fn, state_test_data)); s_wait_for_disconnect_to_complete(state_test_data); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_connection_publish_QoS1_timeout_connection_lost_reset_time, s_setup_mqtt_server_fn, s_test_mqtt_connection_publish_QoS1_timeout_connection_lost_reset_time_fn, s_clean_up_mqtt_server_fn, &test_data) /* Function called for testing the on_connection_closed callback */ static void s_on_connection_closed_fn( struct aws_mqtt_client_connection *connection, struct on_connection_closed_data *data, void *userdata) { (void)connection; (void)data; struct mqtt_connection_state_test *state_test_data = (struct mqtt_connection_state_test *)userdata; aws_mutex_lock(&state_test_data->lock); state_test_data->connection_close_calls += 1; aws_mutex_unlock(&state_test_data->lock); } /** * Test that the connection close callback is fired only once and when the connection was closed */ static int s_test_mqtt_connection_close_callback_simple_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_mqtt_connection_options connection_options = { .user_data = state_test_data, .clean_session = false, .client_id = aws_byte_cursor_from_c_str("client1234"), .host_name = aws_byte_cursor_from_c_str(state_test_data->endpoint.address), .socket_options = &state_test_data->socket_options, .on_connection_complete = s_on_connection_complete_fn, }; aws_mqtt_client_connection_set_connection_closed_handler( state_test_data->mqtt_connection, s_on_connection_closed_fn, state_test_data); ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); s_wait_for_connection_to_complete(state_test_data); /* sleep for 2 sec, just to make sure the connection is stable */ aws_thread_current_sleep((uint64_t)ONE_SEC * 2); /* Disconnect */ ASSERT_SUCCESS( aws_mqtt_client_connection_disconnect(state_test_data->mqtt_connection, s_on_disconnect_fn, state_test_data)); s_wait_for_disconnect_to_complete(state_test_data); /* Make sure the callback was called and the value is what we expect */ ASSERT_UINT_EQUALS(1, state_test_data->connection_close_calls); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_connection_close_callback_simple, s_setup_mqtt_server_fn, s_test_mqtt_connection_close_callback_simple_fn, s_clean_up_mqtt_server_fn, &test_data) /** * Test that the connection close callback is NOT fired during an interrupt */ static int s_test_mqtt_connection_close_callback_interrupted_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_mqtt_connection_options connection_options = { .user_data = state_test_data, .clean_session = false, .client_id = aws_byte_cursor_from_c_str("client1234"), .host_name = aws_byte_cursor_from_c_str(state_test_data->endpoint.address), .socket_options = &state_test_data->socket_options, .on_connection_complete = s_on_connection_complete_fn, }; aws_mqtt_client_connection_set_connection_closed_handler( state_test_data->mqtt_connection, s_on_connection_closed_fn, state_test_data); ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); s_wait_for_connection_to_complete(state_test_data); /* Kill the connection */ aws_channel_shutdown(state_test_data->server_channel, AWS_ERROR_INVALID_STATE); s_wait_for_reconnect_to_complete(state_test_data); /* sleep for 2 sec, just to make sure the connection is stable */ aws_thread_current_sleep((uint64_t)ONE_SEC * 2); /* Disconnect */ ASSERT_SUCCESS( aws_mqtt_client_connection_disconnect(state_test_data->mqtt_connection, s_on_disconnect_fn, state_test_data)); s_wait_for_disconnect_to_complete(state_test_data); /* Make sure the callback was called only ONCE and the value is what we expect */ ASSERT_UINT_EQUALS(1, state_test_data->connection_close_calls); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_connection_close_callback_interrupted, s_setup_mqtt_server_fn, s_test_mqtt_connection_close_callback_interrupted_fn, s_clean_up_mqtt_server_fn, &test_data) /** * Test that the connection close callback is called every time a disconnect happens, if it happens multiple times */ static int s_test_mqtt_connection_close_callback_multi_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_mqtt_connection_options connection_options = { .user_data = state_test_data, .clean_session = false, .client_id = aws_byte_cursor_from_c_str("client1234"), .host_name = aws_byte_cursor_from_c_str(state_test_data->endpoint.address), .socket_options = &state_test_data->socket_options, .on_connection_complete = s_on_connection_complete_fn, }; aws_mqtt_client_connection_set_connection_closed_handler( state_test_data->mqtt_connection, s_on_connection_closed_fn, state_test_data); int disconnect_amount = 10; for (int i = 0; i < disconnect_amount; i++) { ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); s_wait_for_connection_to_complete(state_test_data); /* Disconnect */ ASSERT_SUCCESS(aws_mqtt_client_connection_disconnect( state_test_data->mqtt_connection, s_on_disconnect_fn, state_test_data)); s_wait_for_disconnect_to_complete(state_test_data); } /* Make sure the callback was called disconnect_amount times */ ASSERT_UINT_EQUALS(disconnect_amount, state_test_data->connection_close_calls); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_connection_close_callback_multi, s_setup_mqtt_server_fn, s_test_mqtt_connection_close_callback_multi_fn, s_clean_up_mqtt_server_fn, &test_data) static int s_test_mqtt_connection_reconnection_backoff_stable(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_mqtt_connection_options connection_options = { .user_data = state_test_data, .clean_session = false, .client_id = aws_byte_cursor_from_c_str("client1234"), .host_name = aws_byte_cursor_from_c_str(state_test_data->endpoint.address), .socket_options = &state_test_data->socket_options, .on_connection_complete = s_on_connection_complete_fn, }; ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); s_wait_for_connection_to_complete(state_test_data); uint64_t time_before = 0; uint64_t time_after = 0; for (int i = 0; i < 3; i++) { /* sleep for AWS_RESET_RECONNECT_BACKOFF_DELAY_SECONDS to make sure our connection is successful */ aws_thread_current_sleep( (uint64_t)ONE_SEC * AWS_RESET_RECONNECT_BACKOFF_DELAY_SECONDS + RECONNECT_BACKOFF_DELAY_ERROR_MARGIN_NANO_SECONDS); aws_high_res_clock_get_ticks(&time_before); /* shut it down and make sure the client automatically reconnects.*/ aws_channel_shutdown(state_test_data->server_channel, AWS_OP_SUCCESS); s_wait_for_reconnect_to_complete(state_test_data); aws_high_res_clock_get_ticks(&time_after); uint64_t reconnection_backoff_time = time_after - time_before; uint64_t remainder = 0; ASSERT_TRUE( aws_timestamp_convert(reconnection_backoff_time, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_SECS, &remainder) == DEFAULT_MIN_RECONNECT_DELAY_SECONDS); ASSERT_TRUE(remainder <= RECONNECT_BACKOFF_DELAY_ERROR_MARGIN_NANO_SECONDS); } /* Disconnect */ ASSERT_SUCCESS( aws_mqtt_client_connection_disconnect(state_test_data->mqtt_connection, s_on_disconnect_fn, state_test_data)); s_wait_for_disconnect_to_complete(state_test_data); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_connection_reconnection_backoff_stable, s_setup_mqtt_server_fn, s_test_mqtt_connection_reconnection_backoff_stable, s_clean_up_mqtt_server_fn, &test_data) static int s_test_mqtt_connection_reconnection_backoff_unstable(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_mqtt_connection_options connection_options = { .user_data = state_test_data, .clean_session = false, .client_id = aws_byte_cursor_from_c_str("client1234"), .host_name = aws_byte_cursor_from_c_str(state_test_data->endpoint.address), .socket_options = &state_test_data->socket_options, .on_connection_complete = s_on_connection_complete_fn, }; ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); s_wait_for_connection_to_complete(state_test_data); uint64_t time_before = 0; uint64_t time_after = 0; uint64_t expected_reconnect_backoff = 1; for (int i = 0; i < 3; i++) { aws_high_res_clock_get_ticks(&time_before); /* shut it down and make sure the client automatically reconnects.*/ aws_channel_shutdown(state_test_data->server_channel, AWS_OP_SUCCESS); s_wait_for_reconnect_to_complete(state_test_data); aws_high_res_clock_get_ticks(&time_after); uint64_t reconnection_backoff = time_after - time_before; uint64_t remainder = 0; ASSERT_TRUE( aws_timestamp_convert(reconnection_backoff, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_SECS, &remainder) == expected_reconnect_backoff); ASSERT_TRUE(remainder <= RECONNECT_BACKOFF_DELAY_ERROR_MARGIN_NANO_SECONDS); // Increase the exponential backoff expected_reconnect_backoff = aws_min_u64(expected_reconnect_backoff * 2, 10); } /* Disconnect */ ASSERT_SUCCESS( aws_mqtt_client_connection_disconnect(state_test_data->mqtt_connection, s_on_disconnect_fn, state_test_data)); s_wait_for_disconnect_to_complete(state_test_data); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_connection_reconnection_backoff_unstable, s_setup_mqtt_server_fn, s_test_mqtt_connection_reconnection_backoff_unstable, s_clean_up_mqtt_server_fn, &test_data) static int s_test_mqtt_connection_reconnection_backoff_reset(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_mqtt_connection_options connection_options = { .user_data = state_test_data, .clean_session = false, .client_id = aws_byte_cursor_from_c_str("client1234"), .host_name = aws_byte_cursor_from_c_str(state_test_data->endpoint.address), .socket_options = &state_test_data->socket_options, .on_connection_complete = s_on_connection_complete_fn, }; ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); s_wait_for_connection_to_complete(state_test_data); uint64_t time_before = 0; uint64_t time_after = 0; uint64_t expected_reconnect_backoff = 1; uint64_t reconnection_backoff = 0; for (int i = 0; i < 3; i++) { aws_high_res_clock_get_ticks(&time_before); /* shut it down and make sure the client automatically reconnects.*/ aws_channel_shutdown(state_test_data->server_channel, AWS_OP_SUCCESS); s_wait_for_reconnect_to_complete(state_test_data); aws_high_res_clock_get_ticks(&time_after); reconnection_backoff = time_after - time_before; ASSERT_TRUE( aws_timestamp_convert(reconnection_backoff, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_SECS, NULL) >= expected_reconnect_backoff); expected_reconnect_backoff = aws_min_u64(expected_reconnect_backoff * 2, 10); } /* sleep for AWS_RESET_RECONNECT_BACKOFF_DELAY_SECONDS to make sure our connection is successful */ aws_thread_current_sleep( (uint64_t)ONE_SEC * AWS_RESET_RECONNECT_BACKOFF_DELAY_SECONDS + RECONNECT_BACKOFF_DELAY_ERROR_MARGIN_NANO_SECONDS); aws_high_res_clock_get_ticks(&time_before); /* shut it down and make sure the client automatically reconnects.*/ aws_channel_shutdown(state_test_data->server_channel, AWS_OP_SUCCESS); s_wait_for_reconnect_to_complete(state_test_data); aws_high_res_clock_get_ticks(&time_after); reconnection_backoff = time_after - time_before; uint64_t remainder = 0; ASSERT_TRUE( aws_timestamp_convert(reconnection_backoff, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_SECS, &remainder) == DEFAULT_MIN_RECONNECT_DELAY_SECONDS); ASSERT_TRUE(remainder <= RECONNECT_BACKOFF_DELAY_ERROR_MARGIN_NANO_SECONDS); /* Disconnect */ ASSERT_SUCCESS( aws_mqtt_client_connection_disconnect(state_test_data->mqtt_connection, s_on_disconnect_fn, state_test_data)); s_wait_for_disconnect_to_complete(state_test_data); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_connection_reconnection_backoff_reset, s_setup_mqtt_server_fn, s_test_mqtt_connection_reconnection_backoff_reset, s_clean_up_mqtt_server_fn, &test_data) static int s_test_mqtt_connection_reconnection_backoff_reset_after_disconnection( struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_mqtt_connection_options connection_options = { .user_data = state_test_data, .clean_session = false, .client_id = aws_byte_cursor_from_c_str("client1234"), .host_name = aws_byte_cursor_from_c_str(state_test_data->endpoint.address), .socket_options = &state_test_data->socket_options, .on_connection_complete = s_on_connection_complete_fn, }; ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); s_wait_for_connection_to_complete(state_test_data); uint64_t time_before = 0; uint64_t time_after = 0; uint64_t expected_reconnect_backoff = 1; uint64_t reconnection_backoff = 0; for (int i = 0; i < 3; i++) { aws_high_res_clock_get_ticks(&time_before); /* shut it down and make sure the client automatically reconnects.*/ aws_channel_shutdown(state_test_data->server_channel, AWS_OP_SUCCESS); s_wait_for_reconnect_to_complete(state_test_data); aws_high_res_clock_get_ticks(&time_after); reconnection_backoff = time_after - time_before; ASSERT_TRUE( aws_timestamp_convert(reconnection_backoff, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_SECS, NULL) >= expected_reconnect_backoff); expected_reconnect_backoff = aws_min_u64(expected_reconnect_backoff * 2, 10); } /* Disconnect */ ASSERT_SUCCESS( aws_mqtt_client_connection_disconnect(state_test_data->mqtt_connection, s_on_disconnect_fn, state_test_data)); s_wait_for_disconnect_to_complete(state_test_data); /* connect again */ ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); s_wait_for_connection_to_complete(state_test_data); aws_high_res_clock_get_ticks(&time_before); aws_channel_shutdown(state_test_data->server_channel, AWS_OP_SUCCESS); s_wait_for_reconnect_to_complete(state_test_data); aws_high_res_clock_get_ticks(&time_after); reconnection_backoff = time_after - time_before; uint64_t remainder = 0; ASSERT_TRUE( aws_timestamp_convert(reconnection_backoff, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_SECS, &remainder) == DEFAULT_MIN_RECONNECT_DELAY_SECONDS); ASSERT_TRUE(remainder <= RECONNECT_BACKOFF_DELAY_ERROR_MARGIN_NANO_SECONDS); /* Disconnect */ ASSERT_SUCCESS( aws_mqtt_client_connection_disconnect(state_test_data->mqtt_connection, s_on_disconnect_fn, state_test_data)); s_wait_for_disconnect_to_complete(state_test_data); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_connection_reconnection_backoff_reset_after_disconnection, s_setup_mqtt_server_fn, s_test_mqtt_connection_reconnection_backoff_reset_after_disconnection, s_clean_up_mqtt_server_fn, &test_data) /** * Makes a CONNECT, with 1 second keep alive ping interval, does nothing for roughly 4 seconds, ensures 4 pings are sent */ static int s_test_mqtt_connection_ping_norm_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_mqtt_connection_options connection_options = { .user_data = state_test_data, .clean_session = true, .client_id = aws_byte_cursor_from_c_str("client1234"), .host_name = aws_byte_cursor_from_c_str(state_test_data->endpoint.address), .socket_options = &state_test_data->socket_options, .on_connection_complete = s_on_connection_complete_fn, .keep_alive_time_secs = 1, .ping_timeout_ms = 100, }; ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); /* Wait for 4.5 seconds (to account for slight drift/jitter) */ aws_thread_current_sleep(4500000000); /* Ensure the server got 4 PING packets */ ASSERT_INT_EQUALS(4, mqtt_mock_server_get_ping_count(state_test_data->mock_server)); ASSERT_SUCCESS( aws_mqtt_client_connection_disconnect(state_test_data->mqtt_connection, s_on_disconnect_fn, state_test_data)); s_wait_for_disconnect_to_complete(state_test_data); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_connection_ping_norm, s_setup_mqtt_server_fn, s_test_mqtt_connection_ping_norm_fn, s_clean_up_mqtt_server_fn, &test_data) /** * Makes a CONNECT, with 1 second keep alive ping interval. Publish QOS1 message for 4.5 seconds and then ensure NO * pings were sent. (The ping time will be push off on ack ) */ static int s_test_mqtt_connection_ping_no_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_mqtt_connection_options connection_options = { .user_data = state_test_data, .clean_session = true, .client_id = aws_byte_cursor_from_c_str("client1234"), .host_name = aws_byte_cursor_from_c_str(state_test_data->endpoint.address), .socket_options = &state_test_data->socket_options, .on_connection_complete = s_on_connection_complete_fn, .keep_alive_time_secs = 1, .ping_timeout_ms = 100, }; ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); s_wait_for_connection_to_complete(state_test_data); struct aws_byte_cursor pub_topic = aws_byte_cursor_from_c_str("/test/topic"); struct aws_byte_cursor payload_1 = aws_byte_cursor_from_c_str("Test Message 1"); uint64_t begin_timestamp = 0; uint64_t elapsed_time = 0; uint64_t now = 0; aws_high_res_clock_get_ticks(&begin_timestamp); uint64_t test_duration = (uint64_t)4 * AWS_TIMESTAMP_NANOS; // Make sure we publish for 4 seconds; while (elapsed_time < test_duration) { /* Publish qos1*/ uint16_t packet_id = aws_mqtt_client_connection_publish( state_test_data->mqtt_connection, &pub_topic, AWS_MQTT_QOS_AT_LEAST_ONCE, false, &payload_1, s_on_op_complete, state_test_data); ASSERT_TRUE(packet_id > 0); aws_thread_current_sleep(500000000); /* Sleep 0.5 seconds to avoid spamming*/ aws_high_res_clock_get_ticks(&now); elapsed_time = now - begin_timestamp; } aws_thread_current_sleep(250000000); /* Sleep 0.25 seconds to consider jitter*/ /* Ensure the server got 0 PING packets */ ASSERT_INT_EQUALS(0, mqtt_mock_server_get_ping_count(state_test_data->mock_server)); ASSERT_SUCCESS( aws_mqtt_client_connection_disconnect(state_test_data->mqtt_connection, s_on_disconnect_fn, state_test_data)); s_wait_for_disconnect_to_complete(state_test_data); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_connection_ping_no, s_setup_mqtt_server_fn, s_test_mqtt_connection_ping_no_fn, s_clean_up_mqtt_server_fn, &test_data) /** * Makes a CONNECT, with 1 second keep alive ping interval, publish a qos0 messages for 4.5 seconds. * We should send a total of 4 pings */ static int s_test_mqtt_connection_ping_noack_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_mqtt_connection_options connection_options = { .user_data = state_test_data, .clean_session = true, .client_id = aws_byte_cursor_from_c_str("client1234"), .host_name = aws_byte_cursor_from_c_str(state_test_data->endpoint.address), .socket_options = &state_test_data->socket_options, .on_connection_complete = s_on_connection_complete_fn, .keep_alive_time_secs = 1, .ping_timeout_ms = 100, }; ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); s_wait_for_connection_to_complete(state_test_data); struct aws_byte_cursor pub_topic = aws_byte_cursor_from_c_str("/test/topic"); struct aws_byte_cursor payload_1 = aws_byte_cursor_from_c_str("Test Message 1"); uint64_t begin_timestamp = 0; uint64_t elapsed_time = 0; uint64_t now = 0; aws_high_res_clock_get_ticks(&begin_timestamp); uint64_t test_duration = (uint64_t)4 * AWS_TIMESTAMP_NANOS; // Make sure we publish for 4 seconds; while (elapsed_time < test_duration) { /* Publish qos0*/ uint16_t packet_id = aws_mqtt_client_connection_publish( state_test_data->mqtt_connection, &pub_topic, AWS_MQTT_QOS_AT_MOST_ONCE, false, &payload_1, s_on_op_complete, state_test_data); ASSERT_TRUE(packet_id > 0); aws_thread_current_sleep(500000000); /* Sleep 0.5 seconds to avoid spamming*/ aws_high_res_clock_get_ticks(&now); elapsed_time = now - begin_timestamp; } aws_thread_current_sleep(250000000); /* Sleep 0.25 seconds to consider jitter*/ /* Ensure the server got 4 PING packets */ ASSERT_INT_EQUALS(4, mqtt_mock_server_get_ping_count(state_test_data->mock_server)); ASSERT_SUCCESS( aws_mqtt_client_connection_disconnect(state_test_data->mqtt_connection, s_on_disconnect_fn, state_test_data)); s_wait_for_disconnect_to_complete(state_test_data); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_connection_ping_noack, s_setup_mqtt_server_fn, s_test_mqtt_connection_ping_noack_fn, s_clean_up_mqtt_server_fn, &test_data) /** * Test to make sure the PING timing is correct if a publish/packet is sent near the end of the keep alive time. * Note: Because of socket write jitter and scheduling jitter, the times have a 0.25 (quarter of a second) delta range. * * To test this, this test has a keep alive at 4 seconds and makes a publish after 3 seconds. This resets the ping * task and will reschedule it for 4 seconds from the publish (the PING will be scheduled for 3 seconds after the 4 * second task is invoked). This test then waits a second, makes sure a PING has NOT been sent (with no ping reschedule, * it would have) and then waits 3 seconds to ensure and checks that a PING has been sent. Finally, it waits 4 seconds * to ensure a second PING was sent at the correct time. */ static int s_test_mqtt_connection_ping_basic_scenario_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_mqtt_connection_options connection_options = { .user_data = state_test_data, .clean_session = true, .client_id = aws_byte_cursor_from_c_str("client1234"), .host_name = aws_byte_cursor_from_c_str(state_test_data->endpoint.address), .socket_options = &state_test_data->socket_options, .on_connection_complete = s_on_connection_complete_fn, .keep_alive_time_secs = 4, .ping_timeout_ms = 100, }; ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); s_wait_for_connection_to_complete(state_test_data); /* PING will be in 4 seconds */ aws_thread_current_sleep(3000000000); /* Wait 3 seconds */ aws_mutex_lock(&state_test_data->lock); state_test_data->expected_ops_completed = 1; aws_mutex_unlock(&state_test_data->lock); /* Make a publish */ struct aws_byte_cursor pub_topic = aws_byte_cursor_from_c_str("/test/topic"); struct aws_byte_cursor payload_1 = aws_byte_cursor_from_c_str("Test Message 1"); uint16_t packet_id_1 = aws_mqtt_client_connection_publish( state_test_data->mqtt_connection, &pub_topic, AWS_MQTT_QOS_AT_LEAST_ONCE, false, &payload_1, s_on_op_complete, state_test_data); ASSERT_TRUE(packet_id_1 > 0); s_wait_for_ops_completed(state_test_data); /* Publish packet written at 3 seconds */ aws_thread_current_sleep(1250000000); /* Wait 1.25 second (the extra 0.25 is to account for jitter) */ /* PING task has executed and been rescheduled at 3 seconds (1 second passed) */ /* Ensure the server has gotten 0 PING packets so far */ ASSERT_INT_EQUALS(0, mqtt_mock_server_get_ping_count(state_test_data->mock_server)); aws_thread_current_sleep( 3000000000); /* Wait 3 seconds more (no jitter needed because we already added 0.25 in the prior sleep) */ /* PING task (from publish) has been executed */ /* Ensure the server has gotten only 1 PING packet */ ASSERT_INT_EQUALS(1, mqtt_mock_server_get_ping_count(state_test_data->mock_server)); aws_thread_current_sleep(4000000000); /* Wait 4 seconds (since we didn't publish or anything, it should go back to normal keep alive time) */ /* Ensure the server has gotten 2 PING packets */ ASSERT_INT_EQUALS(2, mqtt_mock_server_get_ping_count(state_test_data->mock_server)); /* Disconnect and finish! */ ASSERT_SUCCESS( aws_mqtt_client_connection_disconnect(state_test_data->mqtt_connection, s_on_disconnect_fn, state_test_data)); s_wait_for_disconnect_to_complete(state_test_data); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_connection_ping_basic_scenario, s_setup_mqtt_server_fn, s_test_mqtt_connection_ping_basic_scenario_fn, s_clean_up_mqtt_server_fn, &test_data) /** * The test is the same as above (s_test_mqtt_connection_ping_basic_scenario_fn) but after the first publish, it waits * 1 second and makes another publish, before waiting 4 seconds from that point and ensures only a single PING was sent. */ static int s_test_mqtt_connection_ping_double_scenario_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_mqtt_connection_options connection_options = { .user_data = state_test_data, .clean_session = true, .client_id = aws_byte_cursor_from_c_str("client1234"), .host_name = aws_byte_cursor_from_c_str(state_test_data->endpoint.address), .socket_options = &state_test_data->socket_options, .on_connection_complete = s_on_connection_complete_fn, .keep_alive_time_secs = 4, .ping_timeout_ms = 100, }; ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); s_wait_for_connection_to_complete(state_test_data); /* PING will be in 4 seconds */ aws_thread_current_sleep(3000000000); /* Wait 3 seconds */ aws_mutex_lock(&state_test_data->lock); state_test_data->expected_ops_completed = 1; aws_mutex_unlock(&state_test_data->lock); /* Make a publish */ struct aws_byte_cursor pub_topic = aws_byte_cursor_from_c_str("/test/topic"); struct aws_byte_cursor payload_1 = aws_byte_cursor_from_c_str("Test Message 1"); uint16_t packet_id_1 = aws_mqtt_client_connection_publish( state_test_data->mqtt_connection, &pub_topic, AWS_MQTT_QOS_AT_LEAST_ONCE, false, &payload_1, s_on_op_complete, state_test_data); ASSERT_TRUE(packet_id_1 > 0); s_wait_for_ops_completed(state_test_data); /* Publish packet written at 3 seconds */ aws_thread_current_sleep(1250000000); /* Wait 1.25 second (the extra 0.25 is to account for jitter) */ /* PING task has executed and been rescheduled at 3 seconds (1 second passed) */ /* Ensure the server has gotten 0 PING packets so far */ ASSERT_INT_EQUALS(0, mqtt_mock_server_get_ping_count(state_test_data->mock_server)); aws_thread_current_sleep(750000000); /* wait 0.75 seconds */ aws_mutex_lock(&state_test_data->lock); state_test_data->expected_ops_completed = 2; aws_mutex_unlock(&state_test_data->lock); /* Make as second publish */ uint16_t packet_id_2 = aws_mqtt_client_connection_publish( state_test_data->mqtt_connection, &pub_topic, AWS_MQTT_QOS_AT_LEAST_ONCE, false, &payload_1, s_on_op_complete, state_test_data); ASSERT_TRUE(packet_id_2 > 0); s_wait_for_ops_completed(state_test_data); /* Publish packet written at 2 seconds (relative to PING that was scheduled above) */ aws_thread_current_sleep(4250000000); /* Wait 4.25 (the extra 0.25 is to account for jitter) seconds */ /** * Note: The extra 2 seconds are to account for the time it takes to publish on the socket. Despite best efforts, * I cannot get it to trigger right away in the test suite... * If you read the logs though, the scheduled PINGs should be 4 seconds, 3 seconds, 2 seconds, 4 seconds */ /* Ensure the server has gotten only 1 PING packet */ ASSERT_INT_EQUALS(1, mqtt_mock_server_get_ping_count(state_test_data->mock_server)); /** * At this point a new PING task is scheduled for 4 seconds, but we do not care anymore for the * purposes of this test. */ /* Disconnect and finish! */ ASSERT_SUCCESS( aws_mqtt_client_connection_disconnect(state_test_data->mqtt_connection, s_on_disconnect_fn, state_test_data)); s_wait_for_disconnect_to_complete(state_test_data); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_connection_ping_double_scenario, s_setup_mqtt_server_fn, s_test_mqtt_connection_ping_double_scenario_fn, s_clean_up_mqtt_server_fn, &test_data) /** * Test that the connection termination callback is fired for the connection that was not actually connected ever. * \note Other tests use on_connection_termination callback as well, so one simple dedicated case is enough. */ static int s_test_mqtt_connection_termination_callback_simple_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; ASSERT_SUCCESS(aws_mqtt_client_connection_set_connection_termination_handler( state_test_data->mqtt_connection, s_on_connection_termination_fn, state_test_data)); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_connection_termination_callback_simple, s_setup_mqtt_server_fn, s_test_mqtt_connection_termination_callback_simple_fn, s_clean_up_mqtt_server_fn, &test_data) /* * Verifies that calling publish with a bad qos results in a validation failure */ static int s_test_mqtt_validation_failure_publish_qos_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_mqtt_connection_options connection_options = { .user_data = state_test_data, .clean_session = false, .client_id = aws_byte_cursor_from_c_str("client1234"), .host_name = aws_byte_cursor_from_c_str(state_test_data->endpoint.address), .socket_options = &state_test_data->socket_options, .on_connection_complete = s_on_connection_complete_fn, }; ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); s_wait_for_connection_to_complete(state_test_data); struct aws_byte_cursor topic = aws_byte_cursor_from_c_str("a/b"); ASSERT_INT_EQUALS( 0, aws_mqtt_client_connection_publish( state_test_data->mqtt_connection, &topic, (enum aws_mqtt_qos)3, true, NULL, s_on_op_complete, state_test_data)); int error_code = aws_last_error(); ASSERT_INT_EQUALS(AWS_ERROR_MQTT_INVALID_QOS, error_code); ASSERT_SUCCESS( aws_mqtt_client_connection_disconnect(state_test_data->mqtt_connection, s_on_disconnect_fn, state_test_data)); s_wait_for_disconnect_to_complete(state_test_data); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_validation_failure_publish_qos, s_setup_mqtt_server_fn, s_test_mqtt_validation_failure_publish_qos_fn, s_clean_up_mqtt_server_fn, &test_data) /* * Verifies that calling subscribe_multiple with no topics causes a validation failure */ static int s_test_mqtt_validation_failure_subscribe_empty_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_mqtt_connection_options connection_options = { .user_data = state_test_data, .clean_session = false, .client_id = aws_byte_cursor_from_c_str("client1234"), .host_name = aws_byte_cursor_from_c_str(state_test_data->endpoint.address), .socket_options = &state_test_data->socket_options, .on_connection_complete = s_on_connection_complete_fn, }; ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); s_wait_for_connection_to_complete(state_test_data); struct aws_array_list topic_filters; size_t list_len = 2; AWS_VARIABLE_LENGTH_ARRAY(uint8_t, static_buf, list_len * sizeof(struct aws_mqtt_topic_subscription)); aws_array_list_init_static(&topic_filters, static_buf, list_len, sizeof(struct aws_mqtt_topic_subscription)); ASSERT_INT_EQUALS( 0, aws_mqtt_client_connection_subscribe_multiple( state_test_data->mqtt_connection, &topic_filters, s_on_multi_suback, state_test_data)); int error_code = aws_last_error(); ASSERT_INT_EQUALS(AWS_ERROR_INVALID_ARGUMENT, error_code); ASSERT_SUCCESS( aws_mqtt_client_connection_disconnect(state_test_data->mqtt_connection, s_on_disconnect_fn, state_test_data)); s_wait_for_disconnect_to_complete(state_test_data); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_validation_failure_subscribe_empty, s_setup_mqtt_server_fn, s_test_mqtt_validation_failure_subscribe_empty_fn, s_clean_up_mqtt_server_fn, &test_data) /* * Verifies that calling unsubscribe with a null topic causes a validation failure (not a crash) */ static int s_test_mqtt_validation_failure_unsubscribe_null_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_mqtt_connection_options connection_options = { .user_data = state_test_data, .clean_session = false, .client_id = aws_byte_cursor_from_c_str("client1234"), .host_name = aws_byte_cursor_from_c_str(state_test_data->endpoint.address), .socket_options = &state_test_data->socket_options, .on_connection_complete = s_on_connection_complete_fn, }; ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); s_wait_for_connection_to_complete(state_test_data); ASSERT_INT_EQUALS( 0, aws_mqtt_client_connection_unsubscribe( state_test_data->mqtt_connection, NULL, s_on_op_complete, state_test_data)); int error_code = aws_last_error(); ASSERT_INT_EQUALS(AWS_ERROR_MQTT_INVALID_TOPIC, error_code); ASSERT_SUCCESS( aws_mqtt_client_connection_disconnect(state_test_data->mqtt_connection, s_on_disconnect_fn, state_test_data)); s_wait_for_disconnect_to_complete(state_test_data); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_validation_failure_unsubscribe_null, s_setup_mqtt_server_fn, s_test_mqtt_validation_failure_unsubscribe_null_fn, s_clean_up_mqtt_server_fn, &test_data) static struct aws_byte_cursor s_bad_client_id_utf8 = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\x41\xED\xA0\x80\x41"); static struct aws_byte_cursor s_bad_username_utf8 = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\x41\x00\x41"); static struct aws_byte_cursor s_bad_will_topic_utf8 = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\x41\xED\xBF\xBF"); static int s_test_mqtt_validation_failure_connect_invalid_client_id_utf8_fn( struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_mqtt_connection_options connection_options = { .user_data = state_test_data, .clean_session = false, .client_id = s_bad_client_id_utf8, .host_name = aws_byte_cursor_from_c_str(state_test_data->endpoint.address), .socket_options = &state_test_data->socket_options, .on_connection_complete = s_on_connection_complete_fn, }; ASSERT_FAILS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); int error_code = aws_last_error(); ASSERT_INT_EQUALS(AWS_ERROR_INVALID_ARGUMENT, error_code); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_validation_failure_connect_invalid_client_id_utf8, s_setup_mqtt_server_fn, s_test_mqtt_validation_failure_connect_invalid_client_id_utf8_fn, s_clean_up_mqtt_server_fn, &test_data) static int s_test_mqtt_validation_failure_invalid_will_topic_utf8_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_byte_cursor will_topic_cursor = s_bad_will_topic_utf8; ASSERT_FAILS(aws_mqtt_client_connection_set_will( state_test_data->mqtt_connection, &will_topic_cursor, AWS_MQTT_QOS_AT_MOST_ONCE, false, &will_topic_cursor)); int error_code = aws_last_error(); ASSERT_INT_EQUALS(AWS_ERROR_MQTT_INVALID_TOPIC, error_code); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_validation_failure_invalid_will_topic_utf8, s_setup_mqtt_server_fn, s_test_mqtt_validation_failure_invalid_will_topic_utf8_fn, s_clean_up_mqtt_server_fn, &test_data) static int s_mqtt_validation_failure_invalid_will_qos_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_byte_cursor will_topic_cursor = aws_byte_cursor_from_c_str("a/b"); ASSERT_FAILS(aws_mqtt_client_connection_set_will( state_test_data->mqtt_connection, &will_topic_cursor, 12, false, &will_topic_cursor)); int error_code = aws_last_error(); ASSERT_INT_EQUALS(AWS_ERROR_MQTT_INVALID_QOS, error_code); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_validation_failure_invalid_will_qos, s_setup_mqtt_server_fn, s_mqtt_validation_failure_invalid_will_qos_fn, s_clean_up_mqtt_server_fn, &test_data) static int s_test_mqtt_validation_failure_invalid_username_utf8_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_byte_cursor login_cursor = s_bad_username_utf8; ASSERT_FAILS(aws_mqtt_client_connection_set_login(state_test_data->mqtt_connection, &login_cursor, NULL)); int error_code = aws_last_error(); ASSERT_INT_EQUALS(AWS_ERROR_INVALID_ARGUMENT, error_code); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_validation_failure_invalid_username_utf8, s_setup_mqtt_server_fn, s_test_mqtt_validation_failure_invalid_username_utf8_fn, s_clean_up_mqtt_server_fn, &test_data) aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/tests/v3/mqtt_mock_server_handler.c000066400000000000000000000753271456575232400271520ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "mqtt_mock_server_handler.h" #include #include #include #include /* 10sec */ #define CVAR_TIMEOUT (10 * (int64_t)1000000000) struct mqtt_mock_server_handler { struct aws_channel_handler handler; struct aws_channel_slot *slot; /* partial incoming packet, finish decoding when the rest arrives */ struct aws_byte_buf pending_packet; /* Lock must be held when accessing "synced" data */ struct { struct aws_mutex lock; struct aws_condition_variable cvar; size_t ping_resp_avail; size_t pubacks_received; size_t ping_received; size_t connacks_avail; bool auto_ack; /* last ID used when sending PUBLISH (QoS1+) to client */ uint16_t last_packet_id; /* contains aws_byte_buf with raw bytes of each packet received. */ struct aws_array_list raw_packets; /* progress decoding from raw_packets to decoded_packets*/ size_t decoded_index; } synced; /* contains mqtt_decoded_packet* for each packet received * only accessed from main thread by test code */ struct aws_array_list decoded_packets; }; struct mqtt_decoded_packet *s_mqtt_decoded_packet_create(struct aws_allocator *alloc) { struct mqtt_decoded_packet *packet = aws_mem_calloc(alloc, 1, sizeof(struct mqtt_decoded_packet)); packet->alloc = alloc; aws_array_list_init_dynamic(&packet->sub_topic_filters, alloc, 1, sizeof(struct aws_mqtt_subscription)); aws_array_list_init_dynamic(&packet->unsub_topic_filters, alloc, 1, sizeof(struct aws_byte_cursor)); return packet; } static void s_mqtt_decoded_packet_destroy(struct mqtt_decoded_packet *packet) { aws_array_list_clean_up(&packet->sub_topic_filters); aws_array_list_clean_up(&packet->unsub_topic_filters); aws_mem_release(packet->alloc, packet); } static int s_mqtt_mock_server_handler_process_packet( struct mqtt_mock_server_handler *server, struct aws_byte_cursor *message_cur) { struct aws_byte_buf received_message; aws_byte_buf_init_copy_from_cursor(&received_message, server->handler.alloc, *message_cur); aws_mutex_lock(&server->synced.lock); aws_array_list_push_back(&server->synced.raw_packets, &received_message); aws_mutex_unlock(&server->synced.lock); struct aws_byte_cursor message_cur_cpy = *message_cur; int err = 0; enum aws_mqtt_packet_type packet_type = aws_mqtt_get_packet_type(message_cur_cpy.ptr); switch (packet_type) { case AWS_MQTT_PACKET_CONNECT: { size_t connacks_available = 0; aws_mutex_lock(&server->synced.lock); AWS_LOGF_DEBUG( MOCK_LOG_SUBJECT, "server, CONNECT received, %llu available connacks.", (long long unsigned)server->synced.connacks_avail); connacks_available = server->synced.connacks_avail > 0 ? server->synced.connacks_avail-- : 0; aws_mutex_unlock(&server->synced.lock); if (connacks_available) { struct aws_io_message *connack_msg = aws_channel_acquire_message_from_pool(server->slot->channel, AWS_IO_MESSAGE_APPLICATION_DATA, 256); struct aws_mqtt_packet_connack conn_ack; err |= aws_mqtt_packet_connack_init(&conn_ack, false, AWS_MQTT_CONNECT_ACCEPTED); err |= aws_mqtt_packet_connack_encode(&connack_msg->message_data, &conn_ack); if (aws_channel_slot_send_message(server->slot, connack_msg, AWS_CHANNEL_DIR_WRITE)) { err |= 1; AWS_LOGF_DEBUG(MOCK_LOG_SUBJECT, "Failed to send connack with error %d", aws_last_error()); } } break; } case AWS_MQTT_PACKET_DISCONNECT: AWS_LOGF_DEBUG(MOCK_LOG_SUBJECT, "server, DISCONNECT received"); err |= aws_channel_shutdown(server->slot->channel, AWS_OP_SUCCESS); break; case AWS_MQTT_PACKET_PINGREQ: { AWS_LOGF_DEBUG(MOCK_LOG_SUBJECT, "server, PINGREQ received"); size_t ping_resp_available = 0; aws_mutex_lock(&server->synced.lock); ping_resp_available = server->synced.ping_resp_avail > 0 ? server->synced.ping_resp_avail-- : 0; server->synced.ping_received += 1; aws_mutex_unlock(&server->synced.lock); if (ping_resp_available) { struct aws_io_message *ping_resp = aws_channel_acquire_message_from_pool(server->slot->channel, AWS_IO_MESSAGE_APPLICATION_DATA, 256); struct aws_mqtt_packet_connection packet; err |= aws_mqtt_packet_pingresp_init(&packet); err |= aws_mqtt_packet_connection_encode(&ping_resp->message_data, &packet); err |= aws_channel_slot_send_message(server->slot, ping_resp, AWS_CHANNEL_DIR_WRITE); } break; } case AWS_MQTT_PACKET_SUBSCRIBE: { AWS_LOGF_DEBUG(MOCK_LOG_SUBJECT, "server, SUBSCRIBE received"); struct aws_mqtt_packet_subscribe subscribe_packet; err |= aws_mqtt_packet_subscribe_init(&subscribe_packet, server->handler.alloc, 0); err |= aws_mqtt_packet_subscribe_decode(message_cur, &subscribe_packet); aws_mutex_lock(&server->synced.lock); bool auto_ack = server->synced.auto_ack; aws_mutex_unlock(&server->synced.lock); if (auto_ack) { struct aws_io_message *suback_msg = aws_channel_acquire_message_from_pool(server->slot->channel, AWS_IO_MESSAGE_APPLICATION_DATA, 256); struct aws_mqtt_packet_suback suback; err |= aws_mqtt_packet_suback_init(&suback, server->handler.alloc, subscribe_packet.packet_identifier); const size_t num_filters = aws_array_list_length(&subscribe_packet.topic_filters); for (size_t i = 0; i < num_filters; ++i) { err |= aws_mqtt_packet_suback_add_return_code(&suback, AWS_MQTT_QOS_EXACTLY_ONCE); } err |= aws_mqtt_packet_suback_encode(&suback_msg->message_data, &suback); err |= aws_channel_slot_send_message(server->slot, suback_msg, AWS_CHANNEL_DIR_WRITE); aws_mqtt_packet_suback_clean_up(&suback); } aws_mqtt_packet_subscribe_clean_up(&subscribe_packet); break; } case AWS_MQTT_PACKET_UNSUBSCRIBE: { AWS_LOGF_DEBUG(MOCK_LOG_SUBJECT, "server, UNSUBSCRIBE received"); struct aws_mqtt_packet_unsubscribe unsubscribe_packet; err |= aws_mqtt_packet_unsubscribe_init(&unsubscribe_packet, server->handler.alloc, 0); err |= aws_mqtt_packet_unsubscribe_decode(message_cur, &unsubscribe_packet); aws_mutex_lock(&server->synced.lock); bool auto_ack = server->synced.auto_ack; aws_mutex_unlock(&server->synced.lock); if (auto_ack) { struct aws_io_message *unsuback_msg = aws_channel_acquire_message_from_pool(server->slot->channel, AWS_IO_MESSAGE_APPLICATION_DATA, 256); struct aws_mqtt_packet_ack unsuback; err |= aws_mqtt_packet_unsuback_init(&unsuback, unsubscribe_packet.packet_identifier); err |= aws_mqtt_packet_ack_encode(&unsuback_msg->message_data, &unsuback); err |= aws_channel_slot_send_message(server->slot, unsuback_msg, AWS_CHANNEL_DIR_WRITE); } aws_mqtt_packet_unsubscribe_clean_up(&unsubscribe_packet); break; } case AWS_MQTT_PACKET_PUBLISH: { AWS_LOGF_DEBUG(MOCK_LOG_SUBJECT, "server, PUBLISH received"); struct aws_mqtt_packet_publish publish_packet; err |= aws_mqtt_packet_publish_decode(message_cur, &publish_packet); aws_mutex_lock(&server->synced.lock); bool auto_ack = server->synced.auto_ack; aws_mutex_unlock(&server->synced.lock); uint8_t qos = (publish_packet.fixed_header.flags >> 1) & 0x3; // Do not send puback if qos0 if (auto_ack && qos != 0) { struct aws_io_message *puback_msg = aws_channel_acquire_message_from_pool(server->slot->channel, AWS_IO_MESSAGE_APPLICATION_DATA, 256); struct aws_mqtt_packet_ack puback; err |= aws_mqtt_packet_puback_init(&puback, publish_packet.packet_identifier); err |= aws_mqtt_packet_ack_encode(&puback_msg->message_data, &puback); err |= aws_channel_slot_send_message(server->slot, puback_msg, AWS_CHANNEL_DIR_WRITE); } break; } case AWS_MQTT_PACKET_PUBACK: AWS_LOGF_DEBUG(MOCK_LOG_SUBJECT, "server, PUBACK received"); aws_mutex_lock(&server->synced.lock); server->synced.pubacks_received++; aws_mutex_unlock(&server->synced.lock); err |= aws_condition_variable_notify_one(&server->synced.cvar); break; default: break; } if (err) { AWS_LOGF_DEBUG(MOCK_LOG_SUBJECT, "server, process packet failed, the package type is %d", packet_type); /* crash */ AWS_FATAL_ASSERT(!err); } return AWS_OP_SUCCESS; } static int s_mqtt_mock_server_handler_process_read_message( struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_io_message *message) { struct mqtt_mock_server_handler *server = handler->impl; struct aws_byte_cursor message_cursor = aws_byte_cursor_from_buf(&message->message_data); if (server->pending_packet.len) { size_t to_read = server->pending_packet.capacity - server->pending_packet.len; bool packet_complete = true; if (to_read > message_cursor.len) { to_read = message_cursor.len; packet_complete = false; } struct aws_byte_cursor chunk = aws_byte_cursor_advance(&message_cursor, to_read); aws_byte_buf_write_from_whole_cursor(&server->pending_packet, chunk); if (!packet_complete) { goto cleanup; } struct aws_byte_cursor packet_data = aws_byte_cursor_from_buf(&server->pending_packet); s_mqtt_mock_server_handler_process_packet(server, &packet_data); } while (message_cursor.len) { struct aws_byte_cursor header_decode = message_cursor; struct aws_mqtt_fixed_header packet_header; AWS_ZERO_STRUCT(packet_header); int result = aws_mqtt_fixed_header_decode(&header_decode, &packet_header); const size_t fixed_header_size = message_cursor.len - header_decode.len; if (result) { if (aws_last_error() == AWS_ERROR_SHORT_BUFFER) { AWS_FATAL_ASSERT( packet_header.remaining_length > 0 && "need to handle getting partial packet, but not enough to know total length"); aws_byte_buf_init( &server->pending_packet, server->handler.alloc, fixed_header_size + packet_header.remaining_length); aws_byte_buf_write_from_whole_cursor(&server->pending_packet, message_cursor); aws_reset_error(); goto cleanup; } } struct aws_byte_cursor packet_data = aws_byte_cursor_advance(&message_cursor, fixed_header_size + packet_header.remaining_length); s_mqtt_mock_server_handler_process_packet(server, &packet_data); } cleanup: aws_mem_release(message->allocator, message); aws_channel_slot_increment_read_window(slot, message->message_data.len); return AWS_OP_SUCCESS; } struct mqtt_mock_server_send_args { struct aws_channel_task task; struct mqtt_mock_server_handler *server; struct aws_byte_buf data; }; static void s_mqtt_send_in_thread(struct aws_channel_task *channel_task, void *arg, enum aws_task_status status) { (void)channel_task; struct mqtt_mock_server_send_args *send_args = arg; if (status == AWS_TASK_STATUS_RUN_READY) { struct aws_io_message *msg = aws_channel_acquire_message_from_pool( send_args->server->slot->channel, AWS_IO_MESSAGE_APPLICATION_DATA, send_args->data.len); AWS_FATAL_ASSERT(aws_byte_buf_write_from_whole_buffer(&msg->message_data, send_args->data)); AWS_FATAL_ASSERT(0 == aws_channel_slot_send_message(send_args->server->slot, msg, AWS_CHANNEL_DIR_WRITE)); } aws_byte_buf_clean_up(&send_args->data); aws_mem_release(send_args->server->handler.alloc, send_args); } static struct mqtt_mock_server_send_args *s_mqtt_send_args_create(struct mqtt_mock_server_handler *server) { struct mqtt_mock_server_send_args *args = aws_mem_calloc(server->handler.alloc, 1, sizeof(struct mqtt_mock_server_send_args)); aws_channel_task_init(&args->task, s_mqtt_send_in_thread, args, "mqtt_mock_server_send_in_thread"); args->server = server; aws_byte_buf_init(&args->data, server->handler.alloc, 1024); return args; } int mqtt_mock_server_send_publish_by_id( struct aws_channel_handler *handler, uint16_t packet_id, struct aws_byte_cursor *topic, struct aws_byte_cursor *payload, bool dup, enum aws_mqtt_qos qos, bool retain) { struct mqtt_mock_server_handler *server = handler->impl; struct mqtt_mock_server_send_args *args = s_mqtt_send_args_create(server); struct aws_mqtt_packet_publish publish; ASSERT_SUCCESS(aws_mqtt_packet_publish_init(&publish, retain, qos, dup, *topic, packet_id, *payload)); ASSERT_SUCCESS(aws_mqtt_packet_publish_encode(&args->data, &publish)); aws_channel_schedule_task_now(server->slot->channel, &args->task); return AWS_OP_SUCCESS; } int mqtt_mock_server_send_publish( struct aws_channel_handler *handler, struct aws_byte_cursor *topic, struct aws_byte_cursor *payload, bool dup, enum aws_mqtt_qos qos, bool retain) { struct mqtt_mock_server_handler *server = handler->impl; aws_mutex_lock(&server->synced.lock); uint16_t id = qos == 0 ? 0 : ++server->synced.last_packet_id; aws_mutex_unlock(&server->synced.lock); return mqtt_mock_server_send_publish_by_id(handler, id, topic, payload, dup, qos, retain); } int mqtt_mock_server_send_single_suback( struct aws_channel_handler *handler, uint16_t packet_id, enum aws_mqtt_qos return_code) { struct mqtt_mock_server_handler *server = handler->impl; struct mqtt_mock_server_send_args *args = s_mqtt_send_args_create(server); struct aws_mqtt_packet_suback suback; ASSERT_SUCCESS(aws_mqtt_packet_suback_init(&suback, server->handler.alloc, packet_id)); ASSERT_SUCCESS(aws_mqtt_packet_suback_add_return_code(&suback, return_code)); ASSERT_SUCCESS(aws_mqtt_packet_suback_encode(&args->data, &suback)); aws_mqtt_packet_suback_clean_up(&suback); aws_channel_schedule_task_now(server->slot->channel, &args->task); return AWS_OP_SUCCESS; } static int s_mqtt_mock_server_handler_process_write_message( struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_io_message *message) { (void)handler; (void)slot; (void)message; return AWS_OP_SUCCESS; } static int s_mqtt_mock_server_handler_increment_read_window( struct aws_channel_handler *handler, struct aws_channel_slot *slot, size_t size) { (void)handler; aws_channel_slot_increment_read_window(slot, size); return AWS_OP_SUCCESS; } void mqtt_mock_server_handler_update_slot(struct aws_channel_handler *handler, struct aws_channel_slot *slot) { struct mqtt_mock_server_handler *server = handler->impl; server->slot = slot; } static int s_mqtt_mock_server_handler_shutdown( struct aws_channel_handler *handler, struct aws_channel_slot *slot, enum aws_channel_direction dir, int error_code, bool free_scarce_resources_immediately) { (void)handler; return aws_channel_slot_on_handler_shutdown_complete(slot, dir, error_code, free_scarce_resources_immediately); } static size_t s_mqtt_mock_server_handler_initial_window_size(struct aws_channel_handler *handler) { (void)handler; return SIZE_MAX; } static size_t s_mqtt_mock_server_handler_message_overhead(struct aws_channel_handler *handler) { (void)handler; return 0; } static void s_mqtt_mock_server_handler_destroy(struct aws_channel_handler *handler) { (void)handler; } static struct aws_channel_handler_vtable s_mqtt_mock_server_handler_vtable = { .process_read_message = s_mqtt_mock_server_handler_process_read_message, .process_write_message = s_mqtt_mock_server_handler_process_write_message, .increment_read_window = s_mqtt_mock_server_handler_increment_read_window, .shutdown = s_mqtt_mock_server_handler_shutdown, .initial_window_size = s_mqtt_mock_server_handler_initial_window_size, .message_overhead = s_mqtt_mock_server_handler_message_overhead, .destroy = s_mqtt_mock_server_handler_destroy, }; struct aws_channel_handler *new_mqtt_mock_server(struct aws_allocator *allocator) { struct mqtt_mock_server_handler *server = aws_mem_calloc(allocator, 1, sizeof(struct mqtt_mock_server_handler)); aws_array_list_init_dynamic(&server->decoded_packets, allocator, 4, sizeof(struct mqtt_decoded_packet *)); aws_array_list_init_dynamic(&server->synced.raw_packets, allocator, 4, sizeof(struct aws_byte_buf)); server->handler.impl = server; server->handler.vtable = &s_mqtt_mock_server_handler_vtable; server->handler.alloc = allocator; server->synced.ping_resp_avail = SIZE_MAX; server->synced.connacks_avail = SIZE_MAX; server->synced.auto_ack = true; aws_mutex_init(&server->synced.lock); aws_condition_variable_init(&server->synced.cvar); return &server->handler; } void destroy_mqtt_mock_server(struct aws_channel_handler *handler) { struct mqtt_mock_server_handler *server = handler->impl; for (size_t i = 0; i < aws_array_list_length(&server->decoded_packets); ++i) { struct mqtt_decoded_packet *packet = NULL; aws_array_list_get_at(&server->decoded_packets, &packet, i); s_mqtt_decoded_packet_destroy(packet); } aws_array_list_clean_up(&server->decoded_packets); for (size_t i = 0; i < aws_array_list_length(&server->synced.raw_packets); ++i) { struct aws_byte_buf *byte_buf_ptr = NULL; aws_array_list_get_at_ptr(&server->synced.raw_packets, (void **)&byte_buf_ptr, i); aws_byte_buf_clean_up(byte_buf_ptr); } aws_array_list_clean_up(&server->synced.raw_packets); aws_mutex_clean_up(&server->synced.lock); aws_condition_variable_clean_up(&server->synced.cvar); aws_mem_release(handler->alloc, server); } void mqtt_mock_server_set_max_ping_resp(struct aws_channel_handler *handler, size_t max_ping) { struct mqtt_mock_server_handler *server = handler->impl; aws_mutex_lock(&server->synced.lock); server->synced.ping_resp_avail = max_ping; aws_mutex_unlock(&server->synced.lock); } void mqtt_mock_server_set_max_connack(struct aws_channel_handler *handler, size_t connack_avail) { struct mqtt_mock_server_handler *server = handler->impl; aws_mutex_lock(&server->synced.lock); server->synced.connacks_avail = connack_avail; aws_mutex_unlock(&server->synced.lock); } void mqtt_mock_server_disable_auto_ack(struct aws_channel_handler *handler) { struct mqtt_mock_server_handler *server = handler->impl; aws_mutex_lock(&server->synced.lock); server->synced.auto_ack = false; aws_mutex_unlock(&server->synced.lock); } void mqtt_mock_server_enable_auto_ack(struct aws_channel_handler *handler) { struct mqtt_mock_server_handler *server = handler->impl; aws_mutex_lock(&server->synced.lock); server->synced.auto_ack = true; aws_mutex_unlock(&server->synced.lock); } struct mqtt_mock_server_ack_args { struct aws_channel_task task; struct aws_mqtt_packet_ack ack; struct mqtt_mock_server_handler *server; }; static void s_send_ack_in_thread(struct aws_channel_task *channel_task, void *arg, enum aws_task_status status) { (void)channel_task; (void)status; struct mqtt_mock_server_ack_args *ack_args = arg; struct aws_io_message *msg = aws_channel_acquire_message_from_pool(ack_args->server->slot->channel, AWS_IO_MESSAGE_APPLICATION_DATA, 256); AWS_FATAL_ASSERT(0 == aws_mqtt_packet_ack_encode(&msg->message_data, &ack_args->ack)); AWS_FATAL_ASSERT(0 == aws_channel_slot_send_message(ack_args->server->slot, msg, AWS_CHANNEL_DIR_WRITE)); aws_mem_release(ack_args->server->handler.alloc, ack_args); } static int s_send_ack(struct aws_channel_handler *handler, uint16_t packet_id, enum aws_mqtt_packet_type type) { struct mqtt_mock_server_handler *server = handler->impl; struct mqtt_mock_server_ack_args *args = aws_mem_calloc(server->handler.alloc, 1, sizeof(struct mqtt_mock_server_ack_args)); args->server = server; aws_channel_task_init(&args->task, s_send_ack_in_thread, args, "send ack in thread"); switch (type) { case AWS_MQTT_PACKET_PUBACK: ASSERT_SUCCESS(aws_mqtt_packet_puback_init(&args->ack, packet_id)); break; case AWS_MQTT_PACKET_UNSUBACK: ASSERT_SUCCESS(aws_mqtt_packet_unsuback_init(&args->ack, packet_id)); break; default: AWS_FATAL_ASSERT(0); break; } aws_channel_schedule_task_now(server->slot->channel, &args->task); return AWS_OP_SUCCESS; } int mqtt_mock_server_send_unsuback(struct aws_channel_handler *handler, uint16_t packet_id) { return s_send_ack(handler, packet_id, AWS_MQTT_PACKET_UNSUBACK); } int mqtt_mock_server_send_puback(struct aws_channel_handler *handler, uint16_t packet_id) { return s_send_ack(handler, packet_id, AWS_MQTT_PACKET_PUBACK); } struct puback_waiter { struct mqtt_mock_server_handler *server; size_t wait_for_count; }; static bool s_is_pubacks_complete(void *arg) { struct puback_waiter *waiter = arg; return waiter->server->synced.pubacks_received >= waiter->wait_for_count; } void mqtt_mock_server_wait_for_pubacks(struct aws_channel_handler *handler, size_t puback_count) { struct mqtt_mock_server_handler *server = handler->impl; struct puback_waiter waiter; waiter.server = server; waiter.wait_for_count = puback_count; aws_mutex_lock(&server->synced.lock); AWS_FATAL_ASSERT( 0 == aws_condition_variable_wait_for_pred( &server->synced.cvar, &server->synced.lock, CVAR_TIMEOUT, s_is_pubacks_complete, &waiter)); aws_mutex_unlock(&server->synced.lock); } size_t mqtt_mock_server_decoded_packets_count(struct aws_channel_handler *handler) { struct mqtt_mock_server_handler *server = handler->impl; size_t count = aws_array_list_length(&server->decoded_packets); return count; } struct mqtt_decoded_packet *mqtt_mock_server_get_decoded_packet_by_index( struct aws_channel_handler *handler, size_t i) { struct mqtt_mock_server_handler *server = handler->impl; AWS_FATAL_ASSERT(mqtt_mock_server_decoded_packets_count(handler) > i); struct mqtt_decoded_packet *packet = NULL; aws_array_list_get_at(&server->decoded_packets, &packet, i); return packet; } struct mqtt_decoded_packet *mqtt_mock_server_get_latest_decoded_packet(struct aws_channel_handler *handler) { size_t packet_count = mqtt_mock_server_decoded_packets_count(handler); AWS_FATAL_ASSERT(packet_count > 0); return mqtt_mock_server_get_decoded_packet_by_index(handler, packet_count - 1); } struct mqtt_decoded_packet *mqtt_mock_server_find_decoded_packet_by_id( struct aws_channel_handler *handler, size_t search_start_idx, uint16_t packet_id, size_t *out_idx) { struct mqtt_mock_server_handler *server = handler->impl; size_t len = aws_array_list_length(&server->decoded_packets); AWS_FATAL_ASSERT(search_start_idx < len); for (size_t i = search_start_idx; i < len; i++) { struct mqtt_decoded_packet *packet = NULL; aws_array_list_get_at(&server->decoded_packets, &packet, i); if (packet->packet_identifier == packet_id) { if (out_idx) { *out_idx = i; } return packet; } } if (out_idx) { *out_idx = SIZE_MAX; } return NULL; } struct mqtt_decoded_packet *mqtt_mock_server_find_decoded_packet_by_type( struct aws_channel_handler *handler, size_t search_start_idx, enum aws_mqtt_packet_type type, size_t *out_idx) { struct mqtt_mock_server_handler *server = handler->impl; size_t len = aws_array_list_length(&server->decoded_packets); AWS_FATAL_ASSERT(search_start_idx < len); for (size_t i = search_start_idx; i < len; i++) { struct mqtt_decoded_packet *packet = NULL; aws_array_list_get_at(&server->decoded_packets, &packet, i); if (packet->type == type) { if (out_idx) { *out_idx = i; } return packet; } } if (out_idx) { *out_idx = SIZE_MAX; } return NULL; } int mqtt_mock_server_decode_packets(struct aws_channel_handler *handler) { struct mqtt_mock_server_handler *server = handler->impl; struct aws_allocator *alloc = handler->alloc; /* NOTE: if there's an error in this function we may not unlock, but don't care because * this is only called from main test thread which will fail if this errors */ aws_mutex_lock(&server->synced.lock); struct aws_array_list raw_packets = server->synced.raw_packets; size_t length = aws_array_list_length(&raw_packets); for (size_t index = server->synced.decoded_index; index < length; index++) { struct aws_byte_buf raw_packet = {0}; aws_array_list_get_at(&raw_packets, &raw_packet, index); struct aws_byte_cursor message_cur = aws_byte_cursor_from_buf(&raw_packet); struct mqtt_decoded_packet *packet = s_mqtt_decoded_packet_create(alloc); packet->index = index; packet->type = aws_mqtt_get_packet_type(message_cur.ptr); switch (packet->type) { case AWS_MQTT_PACKET_CONNECT: { struct aws_mqtt_packet_connect connect_packet; AWS_ZERO_STRUCT(connect_packet); ASSERT_SUCCESS(aws_mqtt_packet_connect_decode(&message_cur, &connect_packet)); packet->clean_session = connect_packet.clean_session; packet->has_will = connect_packet.has_will; packet->will_retain = connect_packet.will_retain; packet->has_password = connect_packet.has_password; packet->has_username = connect_packet.has_username; packet->keep_alive_timeout = connect_packet.keep_alive_timeout; packet->will_qos = connect_packet.will_qos; packet->client_identifier = connect_packet.client_identifier; if (packet->has_will) { packet->will_topic = connect_packet.will_topic; packet->will_message = connect_packet.will_message; } if (packet->has_username) { packet->username = connect_packet.username; } if (packet->has_password) { packet->password = connect_packet.password; } break; } case AWS_MQTT_PACKET_SUBSCRIBE: { struct aws_mqtt_packet_subscribe subscribe_packet; AWS_ZERO_STRUCT(subscribe_packet); ASSERT_SUCCESS(aws_mqtt_packet_subscribe_init(&subscribe_packet, alloc, 0)); ASSERT_SUCCESS(aws_mqtt_packet_subscribe_decode(&message_cur, &subscribe_packet)); packet->packet_identifier = subscribe_packet.packet_identifier; /* copy the array one by one for simplicity */ for (size_t i = 0; i < aws_array_list_length(&subscribe_packet.topic_filters); i++) { struct aws_mqtt_subscription val; aws_array_list_get_at(&subscribe_packet.topic_filters, &val, i); aws_array_list_push_back(&packet->sub_topic_filters, &val); } aws_mqtt_packet_subscribe_clean_up(&subscribe_packet); break; } case AWS_MQTT_PACKET_UNSUBSCRIBE: { struct aws_mqtt_packet_unsubscribe unsubscribe_packet; AWS_ZERO_STRUCT(unsubscribe_packet); ASSERT_SUCCESS(aws_mqtt_packet_unsubscribe_init(&unsubscribe_packet, alloc, 0)); ASSERT_SUCCESS(aws_mqtt_packet_unsubscribe_decode(&message_cur, &unsubscribe_packet)); packet->packet_identifier = unsubscribe_packet.packet_identifier; /* copy the array one by one for simplicity */ for (size_t i = 0; i < aws_array_list_length(&unsubscribe_packet.topic_filters); i++) { struct aws_byte_cursor val; aws_array_list_get_at(&unsubscribe_packet.topic_filters, &val, i); aws_array_list_push_back(&packet->unsub_topic_filters, &val); } aws_mqtt_packet_unsubscribe_clean_up(&unsubscribe_packet); break; } case AWS_MQTT_PACKET_PUBLISH: { struct aws_mqtt_packet_publish publish_packet; ASSERT_SUCCESS(aws_mqtt_packet_publish_decode(&message_cur, &publish_packet)); packet->packet_identifier = publish_packet.packet_identifier; packet->topic_name = publish_packet.topic_name; packet->publish_payload = publish_packet.payload; packet->duplicate = aws_mqtt_packet_publish_get_dup(&publish_packet); break; } case AWS_MQTT_PACKET_PUBACK: { struct aws_mqtt_packet_ack puback; ASSERT_SUCCESS(aws_mqtt_packet_ack_decode(&message_cur, &puback)); packet->packet_identifier = puback.packet_identifier; break; } case AWS_MQTT_PACKET_DISCONNECT: case AWS_MQTT_PACKET_PINGREQ: /* Nothing to decode, just record that type of packet has received */ break; default: AWS_FATAL_ASSERT(0 && "mock unsupported packet type decoded"); } aws_array_list_push_back(&server->decoded_packets, &packet); } server->synced.decoded_index = length; aws_mutex_unlock(&server->synced.lock); return AWS_OP_SUCCESS; } size_t mqtt_mock_server_get_ping_count(struct aws_channel_handler *handler) { struct mqtt_mock_server_handler *server = handler->impl; aws_mutex_lock(&server->synced.lock); size_t count = server->synced.ping_received; aws_mutex_unlock(&server->synced.lock); return count; } aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/tests/v3/mqtt_mock_server_handler.h000066400000000000000000000125741456575232400271520ustar00rootroot00000000000000#ifndef MQTT_MOCK_SERVER_HANDLER_H #define MQTT_MOCK_SERVER_HANDLER_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include struct aws_channel_handler; struct aws_channel_slot; static const int MOCK_LOG_SUBJECT = 60000; struct mqtt_decoded_packet { enum aws_mqtt_packet_type type; struct aws_allocator *alloc; /* CONNECT */ bool clean_session; bool has_will; bool will_retain; bool has_password; bool has_username; uint16_t keep_alive_timeout; enum aws_mqtt_qos will_qos; struct aws_byte_cursor client_identifier; /* These cursors live with the received_message */ struct aws_byte_cursor will_topic; struct aws_byte_cursor will_message; struct aws_byte_cursor username; struct aws_byte_cursor password; /* PUBLISH SUBSCRIBE UNSUBSCRIBE */ uint16_t packet_identifier; struct aws_byte_cursor topic_name; /* PUBLISH topic */ struct aws_byte_cursor publish_payload; /* PUBLISH payload */ struct aws_array_list sub_topic_filters; /* list of aws_mqtt_subscription for SUBSCRIBE */ struct aws_array_list unsub_topic_filters; /* list of aws_byte_cursor for UNSUBSCRIBE */ bool duplicate; /* PUBLISH only */ /* index of the received packet, indicating when it's received by the server */ size_t index; }; struct aws_channel_handler *new_mqtt_mock_server(struct aws_allocator *allocator); void destroy_mqtt_mock_server(struct aws_channel_handler *handler); void mqtt_mock_server_handler_update_slot(struct aws_channel_handler *handler, struct aws_channel_slot *slot); /** * Mock server sends a publish packet back to client */ int mqtt_mock_server_send_publish( struct aws_channel_handler *handler, struct aws_byte_cursor *topic, struct aws_byte_cursor *payload, bool dup, enum aws_mqtt_qos qos, bool retain); /** * Mock server sends a publish packet back to client with user-controlled packet id */ int mqtt_mock_server_send_publish_by_id( struct aws_channel_handler *handler, uint16_t packet_id, struct aws_byte_cursor *topic, struct aws_byte_cursor *payload, bool dup, enum aws_mqtt_qos qos, bool retain); /** * Set max number of PINGRESP that mock server will send back to client */ void mqtt_mock_server_set_max_ping_resp(struct aws_channel_handler *handler, size_t max_ping); /** * Set max number of CONACK that mock server will send back to client */ void mqtt_mock_server_set_max_connack(struct aws_channel_handler *handler, size_t connack_avail); /** * Disable the automatically response (suback/unsuback/puback) to the client */ void mqtt_mock_server_disable_auto_ack(struct aws_channel_handler *handler); /** * Enable the automatically response (suback/unsuback/puback) to the client */ void mqtt_mock_server_enable_auto_ack(struct aws_channel_handler *handler); /** * Send response back the client given the packet ID */ int mqtt_mock_server_send_unsuback(struct aws_channel_handler *handler, uint16_t packet_id); int mqtt_mock_server_send_puback(struct aws_channel_handler *handler, uint16_t packet_id); int mqtt_mock_server_send_single_suback( struct aws_channel_handler *handler, uint16_t packet_id, enum aws_mqtt_qos return_code); /** * Wait for puback_count PUBACK packages from client */ void mqtt_mock_server_wait_for_pubacks(struct aws_channel_handler *handler, size_t puback_count); /** * Getters for decoded packets, call mqtt_mock_server_decode_packets first. */ size_t mqtt_mock_server_decoded_packets_count(struct aws_channel_handler *handler); /** * Get the decoded packet by index */ struct mqtt_decoded_packet *mqtt_mock_server_get_decoded_packet_by_index(struct aws_channel_handler *handler, size_t i); /** * Get the latest received packet by index */ struct mqtt_decoded_packet *mqtt_mock_server_get_latest_decoded_packet(struct aws_channel_handler *handler); /** * Get the decoded packet by packet_id started from search_start_idx (included), Note: it may have multiple packets with * the same ID, this will return the earliest received on with the packet_id. If out_idx is not NULL, the index of found * packet will be stored at there, and if failed to find the packet, it will be set to SIZE_MAX, and the return value * will be NULL. */ struct mqtt_decoded_packet *mqtt_mock_server_find_decoded_packet_by_id( struct aws_channel_handler *handler, size_t search_start_idx, uint16_t packet_id, size_t *out_idx); /** * Get the decoded packet by type started from search_start_idx (included), Note: it may have multiple packets with * the same type, this will return the earliest received on with the packet_id. If out_idx is not NULL, the index of * found packet will be stored at there, and if failed to find the packet, it will be set to SIZE_MAX, and the return * value will be NULL. */ struct mqtt_decoded_packet *mqtt_mock_server_find_decoded_packet_by_type( struct aws_channel_handler *handler, size_t search_start_idx, enum aws_mqtt_packet_type type, size_t *out_idx); /** * Run all received messages through, and decode the messages. */ int mqtt_mock_server_decode_packets(struct aws_channel_handler *handler); /** * Returns the number of PINGs the server has gotten */ size_t mqtt_mock_server_get_ping_count(struct aws_channel_handler *handler); #endif /* MQTT_MOCK_SERVER_HANDLER_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/tests/v3/operation_statistics_test.c000066400000000000000000001660561456575232400274020ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "mqtt_mock_server_handler.h" #include #include #include #include #include #include #include static const int TEST_LOG_SUBJECT = 60000; static const int ONE_SEC = 1000000000; struct received_publish_packet { struct aws_byte_buf topic; struct aws_byte_buf payload; bool dup; enum aws_mqtt_qos qos; bool retain; }; struct mqtt_connection_state_test { struct aws_allocator *allocator; struct aws_channel *server_channel; struct aws_channel_handler *mock_server; struct aws_client_bootstrap *client_bootstrap; struct aws_server_bootstrap *server_bootstrap; struct aws_event_loop_group *el_group; struct aws_host_resolver *host_resolver; struct aws_socket_endpoint endpoint; struct aws_socket *listener; struct aws_mqtt_client *mqtt_client; struct aws_mqtt_client_connection *mqtt_connection; struct aws_socket_options socket_options; bool session_present; bool connection_completed; bool client_disconnect_completed; bool server_disconnect_completed; bool connection_interrupted; bool connection_resumed; bool subscribe_completed; bool listener_destroyed; int interruption_error; int subscribe_complete_error; int op_complete_error; enum aws_mqtt_connect_return_code mqtt_return_code; int error; struct aws_condition_variable cvar; struct aws_mutex lock; /* any published messages from mock server, that you may not subscribe to. (Which should not happen in real life) */ struct aws_array_list any_published_messages; /* list of struct received_publish_packet */ size_t any_publishes_received; size_t expected_any_publishes; /* the published messages from mock server, that you did subscribe to. */ struct aws_array_list published_messages; /* list of struct received_publish_packet */ size_t publishes_received; size_t expected_publishes; /* The returned QoS from mock server */ struct aws_array_list qos_returned; /* list of uint_8 */ size_t ops_completed; size_t expected_ops_completed; }; static struct mqtt_connection_state_test test_data = {0}; /* ========== HELPER FUNCTIONS FOR THE TEST ========== */ static void s_operation_statistics_on_any_publish_received( struct aws_mqtt_client_connection *connection, const struct aws_byte_cursor *topic, const struct aws_byte_cursor *payload, bool dup, enum aws_mqtt_qos qos, bool retain, void *userdata); static void s_operation_statistics_on_incoming_channel_setup_fn( struct aws_server_bootstrap *bootstrap, int error_code, struct aws_channel *channel, void *user_data) { (void)bootstrap; struct mqtt_connection_state_test *state_test_data = user_data; state_test_data->error = error_code; if (!error_code) { aws_mutex_lock(&state_test_data->lock); state_test_data->server_disconnect_completed = false; aws_mutex_unlock(&state_test_data->lock); AWS_LOGF_DEBUG(TEST_LOG_SUBJECT, "server channel setup completed"); state_test_data->server_channel = channel; struct aws_channel_slot *test_handler_slot = aws_channel_slot_new(channel); aws_channel_slot_insert_end(channel, test_handler_slot); mqtt_mock_server_handler_update_slot(state_test_data->mock_server, test_handler_slot); aws_channel_slot_set_handler(test_handler_slot, state_test_data->mock_server); } } static void s_operation_statistics_on_incoming_channel_shutdown_fn( struct aws_server_bootstrap *bootstrap, int error_code, struct aws_channel *channel, void *user_data) { (void)bootstrap; (void)error_code; (void)channel; struct mqtt_connection_state_test *state_test_data = user_data; aws_mutex_lock(&state_test_data->lock); state_test_data->server_disconnect_completed = true; AWS_LOGF_DEBUG(TEST_LOG_SUBJECT, "server channel shutdown completed"); aws_mutex_unlock(&state_test_data->lock); aws_condition_variable_notify_one(&state_test_data->cvar); } static void s_operation_statistics_on_listener_destroy(struct aws_server_bootstrap *bootstrap, void *user_data) { (void)bootstrap; struct mqtt_connection_state_test *state_test_data = user_data; aws_mutex_lock(&state_test_data->lock); state_test_data->listener_destroyed = true; aws_mutex_unlock(&state_test_data->lock); aws_condition_variable_notify_one(&state_test_data->cvar); } static bool s_operation_statistics_is_listener_destroyed(void *arg) { struct mqtt_connection_state_test *state_test_data = arg; return state_test_data->listener_destroyed; } static void s_operation_statistics_wait_on_listener_cleanup(struct mqtt_connection_state_test *state_test_data) { aws_mutex_lock(&state_test_data->lock); aws_condition_variable_wait_pred( &state_test_data->cvar, &state_test_data->lock, s_operation_statistics_is_listener_destroyed, state_test_data); aws_mutex_unlock(&state_test_data->lock); } static void s_operation_statistics_on_connection_interrupted( struct aws_mqtt_client_connection *connection, int error_code, void *userdata) { (void)connection; (void)error_code; struct mqtt_connection_state_test *state_test_data = userdata; aws_mutex_lock(&state_test_data->lock); state_test_data->connection_interrupted = true; state_test_data->interruption_error = error_code; aws_mutex_unlock(&state_test_data->lock); AWS_LOGF_DEBUG(TEST_LOG_SUBJECT, "connection interrupted"); aws_condition_variable_notify_one(&state_test_data->cvar); } static void s_operation_statistics_on_connection_resumed( struct aws_mqtt_client_connection *connection, enum aws_mqtt_connect_return_code return_code, bool session_present, void *userdata) { (void)connection; (void)return_code; (void)session_present; AWS_LOGF_DEBUG(TEST_LOG_SUBJECT, "reconnect completed"); struct mqtt_connection_state_test *state_test_data = userdata; aws_mutex_lock(&state_test_data->lock); state_test_data->connection_resumed = true; aws_mutex_unlock(&state_test_data->lock); aws_condition_variable_notify_one(&state_test_data->cvar); } /** sets up a unix domain socket server and socket options. Creates an mqtt connection configured to use * the domain socket. */ static int s_operation_statistics_setup_mqtt_server_fn(struct aws_allocator *allocator, void *ctx) { aws_mqtt_library_init(allocator); struct mqtt_connection_state_test *state_test_data = ctx; AWS_ZERO_STRUCT(*state_test_data); state_test_data->allocator = allocator; state_test_data->el_group = aws_event_loop_group_new_default(allocator, 1, NULL); state_test_data->mock_server = new_mqtt_mock_server(allocator); ASSERT_NOT_NULL(state_test_data->mock_server); state_test_data->server_bootstrap = aws_server_bootstrap_new(allocator, state_test_data->el_group); ASSERT_NOT_NULL(state_test_data->server_bootstrap); struct aws_socket_options socket_options = { .connect_timeout_ms = 100, .domain = AWS_SOCKET_LOCAL, }; state_test_data->socket_options = socket_options; ASSERT_SUCCESS(aws_condition_variable_init(&state_test_data->cvar)); ASSERT_SUCCESS(aws_mutex_init(&state_test_data->lock)); aws_socket_endpoint_init_local_address_for_test(&state_test_data->endpoint); struct aws_server_socket_channel_bootstrap_options server_bootstrap_options = { .bootstrap = state_test_data->server_bootstrap, .host_name = state_test_data->endpoint.address, .port = state_test_data->endpoint.port, .socket_options = &state_test_data->socket_options, .incoming_callback = s_operation_statistics_on_incoming_channel_setup_fn, .shutdown_callback = s_operation_statistics_on_incoming_channel_shutdown_fn, .destroy_callback = s_operation_statistics_on_listener_destroy, .user_data = state_test_data, }; state_test_data->listener = aws_server_bootstrap_new_socket_listener(&server_bootstrap_options); ASSERT_NOT_NULL(state_test_data->listener); struct aws_host_resolver_default_options resolver_options = { .el_group = state_test_data->el_group, .max_entries = 1, }; state_test_data->host_resolver = aws_host_resolver_new_default(allocator, &resolver_options); struct aws_client_bootstrap_options bootstrap_options = { .event_loop_group = state_test_data->el_group, .user_data = state_test_data, .host_resolver = state_test_data->host_resolver, }; state_test_data->client_bootstrap = aws_client_bootstrap_new(allocator, &bootstrap_options); state_test_data->mqtt_client = aws_mqtt_client_new(allocator, state_test_data->client_bootstrap); state_test_data->mqtt_connection = aws_mqtt_client_connection_new(state_test_data->mqtt_client); ASSERT_NOT_NULL(state_test_data->mqtt_connection); ASSERT_SUCCESS(aws_mqtt_client_connection_set_connection_interruption_handlers( state_test_data->mqtt_connection, s_operation_statistics_on_connection_interrupted, state_test_data, s_operation_statistics_on_connection_resumed, state_test_data)); ASSERT_SUCCESS(aws_mqtt_client_connection_set_on_any_publish_handler( state_test_data->mqtt_connection, s_operation_statistics_on_any_publish_received, state_test_data)); ASSERT_SUCCESS(aws_array_list_init_dynamic( &state_test_data->published_messages, allocator, 4, sizeof(struct received_publish_packet))); ASSERT_SUCCESS(aws_array_list_init_dynamic( &state_test_data->any_published_messages, allocator, 4, sizeof(struct received_publish_packet))); ASSERT_SUCCESS(aws_array_list_init_dynamic(&state_test_data->qos_returned, allocator, 2, sizeof(uint8_t))); return AWS_OP_SUCCESS; } static void s_received_publish_packet_list_clean_up(struct aws_array_list *list) { for (size_t i = 0; i < aws_array_list_length(list); ++i) { struct received_publish_packet *val_ptr = NULL; aws_array_list_get_at_ptr(list, (void **)&val_ptr, i); aws_byte_buf_clean_up(&val_ptr->payload); aws_byte_buf_clean_up(&val_ptr->topic); } aws_array_list_clean_up(list); } static int s_operation_statistics_clean_up_mqtt_server_fn( struct aws_allocator *allocator, int setup_result, void *ctx) { (void)allocator; if (!setup_result) { struct mqtt_connection_state_test *state_test_data = ctx; s_received_publish_packet_list_clean_up(&state_test_data->published_messages); s_received_publish_packet_list_clean_up(&state_test_data->any_published_messages); aws_array_list_clean_up(&state_test_data->qos_returned); aws_mqtt_client_connection_release(state_test_data->mqtt_connection); aws_mqtt_client_release(state_test_data->mqtt_client); aws_client_bootstrap_release(state_test_data->client_bootstrap); aws_host_resolver_release(state_test_data->host_resolver); aws_server_bootstrap_destroy_socket_listener(state_test_data->server_bootstrap, state_test_data->listener); s_operation_statistics_wait_on_listener_cleanup(state_test_data); aws_server_bootstrap_release(state_test_data->server_bootstrap); aws_event_loop_group_release(state_test_data->el_group); destroy_mqtt_mock_server(state_test_data->mock_server); } aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } static void s_operation_statistics_on_connection_complete_fn( struct aws_mqtt_client_connection *connection, int error_code, enum aws_mqtt_connect_return_code return_code, bool session_present, void *userdata) { (void)connection; struct mqtt_connection_state_test *state_test_data = userdata; aws_mutex_lock(&state_test_data->lock); state_test_data->session_present = session_present; state_test_data->mqtt_return_code = return_code; state_test_data->error = error_code; state_test_data->connection_completed = true; aws_mutex_unlock(&state_test_data->lock); aws_condition_variable_notify_one(&state_test_data->cvar); } static bool s_operation_statistics_is_connection_completed(void *arg) { struct mqtt_connection_state_test *state_test_data = arg; return state_test_data->connection_completed; } static void s_operation_statistics_wait_for_connection_to_complete(struct mqtt_connection_state_test *state_test_data) { aws_mutex_lock(&state_test_data->lock); aws_condition_variable_wait_pred( &state_test_data->cvar, &state_test_data->lock, s_operation_statistics_is_connection_completed, state_test_data); state_test_data->connection_completed = false; aws_mutex_unlock(&state_test_data->lock); } void s_operation_statistics_on_disconnect_fn(struct aws_mqtt_client_connection *connection, void *userdata) { (void)connection; struct mqtt_connection_state_test *state_test_data = userdata; AWS_LOGF_DEBUG(TEST_LOG_SUBJECT, "disconnect completed"); aws_mutex_lock(&state_test_data->lock); state_test_data->client_disconnect_completed = true; aws_mutex_unlock(&state_test_data->lock); aws_condition_variable_notify_one(&state_test_data->cvar); } static bool s_operation_statistics_is_disconnect_completed(void *arg) { struct mqtt_connection_state_test *state_test_data = arg; return state_test_data->client_disconnect_completed && state_test_data->server_disconnect_completed; } static void s_operation_statistics_wait_for_disconnect_to_complete(struct mqtt_connection_state_test *state_test_data) { aws_mutex_lock(&state_test_data->lock); aws_condition_variable_wait_pred( &state_test_data->cvar, &state_test_data->lock, s_operation_statistics_is_disconnect_completed, state_test_data); state_test_data->client_disconnect_completed = false; state_test_data->server_disconnect_completed = false; aws_mutex_unlock(&state_test_data->lock); } static void s_operation_statistics_on_any_publish_received( struct aws_mqtt_client_connection *connection, const struct aws_byte_cursor *topic, const struct aws_byte_cursor *payload, bool dup, enum aws_mqtt_qos qos, bool retain, void *userdata) { (void)connection; struct mqtt_connection_state_test *state_test_data = userdata; struct aws_byte_buf payload_cp; aws_byte_buf_init_copy_from_cursor(&payload_cp, state_test_data->allocator, *payload); struct aws_byte_buf topic_cp; aws_byte_buf_init_copy_from_cursor(&topic_cp, state_test_data->allocator, *topic); struct received_publish_packet received_packet = { .payload = payload_cp, .topic = topic_cp, .dup = dup, .qos = qos, .retain = retain, }; aws_mutex_lock(&state_test_data->lock); aws_array_list_push_back(&state_test_data->any_published_messages, &received_packet); state_test_data->any_publishes_received++; aws_mutex_unlock(&state_test_data->lock); aws_condition_variable_notify_one(&state_test_data->cvar); } static void s_operation_statistics_on_publish_received( struct aws_mqtt_client_connection *connection, const struct aws_byte_cursor *topic, const struct aws_byte_cursor *payload, bool dup, enum aws_mqtt_qos qos, bool retain, void *userdata) { (void)connection; (void)topic; struct mqtt_connection_state_test *state_test_data = userdata; struct aws_byte_buf payload_cp; aws_byte_buf_init_copy_from_cursor(&payload_cp, state_test_data->allocator, *payload); struct aws_byte_buf topic_cp; aws_byte_buf_init_copy_from_cursor(&topic_cp, state_test_data->allocator, *topic); struct received_publish_packet received_packet = { .payload = payload_cp, .topic = topic_cp, .dup = dup, .qos = qos, .retain = retain, }; aws_mutex_lock(&state_test_data->lock); aws_array_list_push_back(&state_test_data->published_messages, &received_packet); state_test_data->publishes_received++; aws_mutex_unlock(&state_test_data->lock); aws_condition_variable_notify_one(&state_test_data->cvar); } static void s_operation_statistics_on_suback( struct aws_mqtt_client_connection *connection, uint16_t packet_id, const struct aws_byte_cursor *topic, enum aws_mqtt_qos qos, int error_code, void *userdata) { (void)connection; (void)packet_id; (void)topic; struct mqtt_connection_state_test *state_test_data = userdata; aws_mutex_lock(&state_test_data->lock); if (!error_code) { aws_array_list_push_back(&state_test_data->qos_returned, &qos); } state_test_data->subscribe_completed = true; state_test_data->subscribe_complete_error = error_code; aws_mutex_unlock(&state_test_data->lock); aws_condition_variable_notify_one(&state_test_data->cvar); } static void s_on_op_complete( struct aws_mqtt_client_connection *connection, uint16_t packet_id, int error_code, void *userdata) { (void)connection; (void)packet_id; struct mqtt_connection_state_test *state_test_data = userdata; AWS_LOGF_DEBUG(TEST_LOG_SUBJECT, "pub op completed"); aws_mutex_lock(&state_test_data->lock); state_test_data->ops_completed++; state_test_data->op_complete_error = error_code; aws_mutex_unlock(&state_test_data->lock); aws_condition_variable_notify_one(&state_test_data->cvar); } static bool s_is_ops_completed(void *arg) { struct mqtt_connection_state_test *state_test_data = arg; return state_test_data->ops_completed == state_test_data->expected_ops_completed; } static void s_wait_for_ops_completed(struct mqtt_connection_state_test *state_test_data) { aws_mutex_lock(&state_test_data->lock); aws_condition_variable_wait_for_pred( &state_test_data->cvar, &state_test_data->lock, 10000000000, s_is_ops_completed, state_test_data); aws_mutex_unlock(&state_test_data->lock); } static bool s_operation_statistics_is_subscribe_completed(void *arg) { struct mqtt_connection_state_test *state_test_data = arg; return state_test_data->subscribe_completed; } static void s_operation_statistics_wait_for_subscribe_to_complete(struct mqtt_connection_state_test *state_test_data) { aws_mutex_lock(&state_test_data->lock); aws_condition_variable_wait_pred( &state_test_data->cvar, &state_test_data->lock, s_operation_statistics_is_subscribe_completed, state_test_data); state_test_data->subscribe_completed = false; aws_mutex_unlock(&state_test_data->lock); } /* ========== PUBLISH TESTS ========== */ /** * Make a connection, tell the server NOT to send Acks, publish and immediately check the statistics to make sure * it is incomplete, then wait a little bit and check that it was properly marked as UnAcked, then send the PubAck * confirm statistics are zero, and then disconnect */ static int s_test_mqtt_operation_statistics_simple_publish(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_mqtt_connection_options connection_options = { .user_data = state_test_data, .clean_session = false, .client_id = aws_byte_cursor_from_c_str("client1234"), .host_name = aws_byte_cursor_from_c_str(state_test_data->endpoint.address), .socket_options = &state_test_data->socket_options, .on_connection_complete = s_operation_statistics_on_connection_complete_fn, }; struct aws_byte_cursor pub_topic = aws_byte_cursor_from_c_str("/test/topic"); struct aws_byte_cursor payload_1 = aws_byte_cursor_from_c_str("Test Message"); /* Connect */ ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); s_operation_statistics_wait_for_connection_to_complete(state_test_data); /* Stop ACKS so we make sure the operation statistics has time to allow us to identify we sent a packet */ mqtt_mock_server_disable_auto_ack(state_test_data->mock_server); /* We want to wait for 1 operation to complete */ aws_mutex_lock(&state_test_data->lock); state_test_data->expected_ops_completed = 1; aws_mutex_unlock(&state_test_data->lock); /* Publish a packet */ uint16_t packet_id_1 = aws_mqtt_client_connection_publish( state_test_data->mqtt_connection, &pub_topic, AWS_MQTT_QOS_AT_LEAST_ONCE, false, &payload_1, s_on_op_complete, state_test_data); ASSERT_TRUE(packet_id_1 > 0); /* Wait a little bit to allow the code to put the packet into the socket from the queue, allowing it * to be unacked. If we check right away, we may or may not see it in the un-acked statistics */ aws_thread_current_sleep((uint64_t)ONE_SEC); /* Make sure the sizes are correct and there is only one operation waiting * (The size of the topic, the size of the payload, 2 for the header, 2 for the packet ID) */ uint64_t expected_packet_size = pub_topic.len + payload_1.len + 4; struct aws_mqtt_connection_operation_statistics operation_statistics; ASSERT_SUCCESS(aws_mqtt_client_connection_get_stats(state_test_data->mqtt_connection, &operation_statistics)); ASSERT_INT_EQUALS(1, operation_statistics.incomplete_operation_count); ASSERT_INT_EQUALS(expected_packet_size, operation_statistics.incomplete_operation_size); ASSERT_INT_EQUALS(1, operation_statistics.unacked_operation_count); ASSERT_INT_EQUALS(expected_packet_size, operation_statistics.unacked_operation_size); /* Send the PubAck and wait for the client to receive it */ mqtt_mock_server_send_puback(state_test_data->mock_server, packet_id_1); s_wait_for_ops_completed(state_test_data); /* Make sure the operation values are back to zero now that the publish went out */ ASSERT_SUCCESS(aws_mqtt_client_connection_get_stats(state_test_data->mqtt_connection, &operation_statistics)); ASSERT_INT_EQUALS(0, operation_statistics.incomplete_operation_count); ASSERT_INT_EQUALS(0, operation_statistics.incomplete_operation_size); ASSERT_INT_EQUALS(0, operation_statistics.unacked_operation_count); ASSERT_INT_EQUALS(0, operation_statistics.unacked_operation_size); /* Disconnect */ ASSERT_SUCCESS(aws_mqtt_client_connection_disconnect( state_test_data->mqtt_connection, s_operation_statistics_on_disconnect_fn, state_test_data)); s_operation_statistics_wait_for_disconnect_to_complete(state_test_data); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_operation_statistics_simple_publish, s_operation_statistics_setup_mqtt_server_fn, s_test_mqtt_operation_statistics_simple_publish, s_operation_statistics_clean_up_mqtt_server_fn, &test_data) /** * Make five publishes offline, confirm they are in the incomplete statistics, make a connection to a server * that does not send ACKs, send ConnAck, confirm five publishes are in unacked statistics, send PubAcks, * confirm operation statistics are zero, and then disconnect */ static int s_test_mqtt_operation_statistics_offline_publish(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_mqtt_connection_options connection_options = { .user_data = state_test_data, .clean_session = false, .client_id = aws_byte_cursor_from_c_str("client1234"), .host_name = aws_byte_cursor_from_c_str(state_test_data->endpoint.address), .socket_options = &state_test_data->socket_options, .on_connection_complete = s_operation_statistics_on_connection_complete_fn, }; struct aws_byte_cursor pub_topic = aws_byte_cursor_from_c_str("/test/topic_1"); struct aws_byte_cursor payload = aws_byte_cursor_from_c_str("Test Message"); /* Stop ACKS so we make sure the operation statistics has time to allow us to identify we sent a packet */ mqtt_mock_server_disable_auto_ack(state_test_data->mock_server); uint16_t pub_packet_id_1 = 0; uint16_t pub_packet_id_2 = 0; uint16_t pub_packet_id_3 = 0; uint16_t pub_packet_id_4 = 0; uint16_t pub_packet_id_5 = 0; /* Publish the five packets */ for (int i = 0; i < 5; i++) { uint16_t packet = aws_mqtt_client_connection_publish( state_test_data->mqtt_connection, &pub_topic, AWS_MQTT_QOS_AT_LEAST_ONCE, false, &payload, s_on_op_complete, state_test_data); ASSERT_TRUE(packet > 0); if (i == 0) { pub_packet_id_1 = packet; } else if (i == 1) { pub_packet_id_2 = packet; } else if (i == 2) { pub_packet_id_3 = packet; } else if (i == 3) { pub_packet_id_4 = packet; } else { pub_packet_id_5 = packet; } } /* Wait a little bit to make sure the client has processed them (and NOT put them in the un-acked statistics) */ aws_thread_current_sleep((uint64_t)ONE_SEC); /* Make sure the sizes are correct and there is five operations waiting * Each packet size = (The size of the topic, the size of the payload, 2 for the header, 2 for the packet ID) */ uint64_t expected_packet_size = (pub_topic.len + payload.len + 4) * 5; struct aws_mqtt_connection_operation_statistics operation_statistics; ASSERT_SUCCESS(aws_mqtt_client_connection_get_stats(state_test_data->mqtt_connection, &operation_statistics)); ASSERT_INT_EQUALS(5, operation_statistics.incomplete_operation_count); ASSERT_INT_EQUALS(expected_packet_size, operation_statistics.incomplete_operation_size); // The UnAcked operations should be zero, because we are NOT connected ASSERT_INT_EQUALS(0, operation_statistics.unacked_operation_count); ASSERT_INT_EQUALS(0, operation_statistics.unacked_operation_size); /* Connect */ ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); s_operation_statistics_wait_for_connection_to_complete(state_test_data); /* We want to wait for 5 operations to complete */ aws_mutex_lock(&state_test_data->lock); state_test_data->expected_ops_completed = 5; aws_mutex_unlock(&state_test_data->lock); /* Send the PubAck for each packet and wait for the client to receive it */ mqtt_mock_server_send_puback(state_test_data->mock_server, pub_packet_id_1); mqtt_mock_server_send_puback(state_test_data->mock_server, pub_packet_id_2); mqtt_mock_server_send_puback(state_test_data->mock_server, pub_packet_id_3); mqtt_mock_server_send_puback(state_test_data->mock_server, pub_packet_id_4); mqtt_mock_server_send_puback(state_test_data->mock_server, pub_packet_id_5); s_wait_for_ops_completed(state_test_data); /* Make sure the operation values are back to zero now that the publish went out */ ASSERT_SUCCESS(aws_mqtt_client_connection_get_stats(state_test_data->mqtt_connection, &operation_statistics)); ASSERT_INT_EQUALS(0, operation_statistics.incomplete_operation_count); ASSERT_INT_EQUALS(0, operation_statistics.incomplete_operation_size); ASSERT_INT_EQUALS(0, operation_statistics.unacked_operation_count); ASSERT_INT_EQUALS(0, operation_statistics.unacked_operation_size); /* Disconnect */ ASSERT_SUCCESS(aws_mqtt_client_connection_disconnect( state_test_data->mqtt_connection, s_operation_statistics_on_disconnect_fn, state_test_data)); s_operation_statistics_wait_for_disconnect_to_complete(state_test_data); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_operation_statistics_offline_publish, s_operation_statistics_setup_mqtt_server_fn, s_test_mqtt_operation_statistics_offline_publish, s_operation_statistics_clean_up_mqtt_server_fn, &test_data) /** * Make five publishes offline and confirm the operation statistics properly tracks them only in the incomplete * operations, before connecting and confirming post-connect they are also in unacked in the operation statistics. * Then disconnect and confirm the operation statistics are still correct post-disconnect */ static int s_test_mqtt_operation_statistics_disconnect_publish(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_mqtt_connection_options connection_options = { .user_data = state_test_data, .clean_session = false, .client_id = aws_byte_cursor_from_c_str("client1234"), .host_name = aws_byte_cursor_from_c_str(state_test_data->endpoint.address), .socket_options = &state_test_data->socket_options, .on_connection_complete = s_operation_statistics_on_connection_complete_fn, }; struct aws_byte_cursor pub_topic = aws_byte_cursor_from_c_str("/test/topic_1"); struct aws_byte_cursor payload = aws_byte_cursor_from_c_str("Test Message"); /* Stop ACKS so we make sure the operation statistics has time to allow us to identify we sent a packet */ mqtt_mock_server_disable_auto_ack(state_test_data->mock_server); /* Publish the five packets */ for (int i = 0; i < 5; i++) { uint16_t packet = aws_mqtt_client_connection_publish( state_test_data->mqtt_connection, &pub_topic, AWS_MQTT_QOS_AT_LEAST_ONCE, false, &payload, s_on_op_complete, state_test_data); ASSERT_TRUE(packet > 0); } /* Wait a little bit to make sure the client has processed them (and NOT put them in the un-acked statistics) */ aws_thread_current_sleep((uint64_t)ONE_SEC); /* Make sure the sizes are correct and there is five operations waiting * Each packet size = (The size of the topic, the size of the payload, 2 for the header, 2 for the packet ID) */ uint64_t expected_packet_size = (pub_topic.len + payload.len + 4) * 5; struct aws_mqtt_connection_operation_statistics operation_statistics; ASSERT_SUCCESS(aws_mqtt_client_connection_get_stats(state_test_data->mqtt_connection, &operation_statistics)); ASSERT_INT_EQUALS(5, operation_statistics.incomplete_operation_count); ASSERT_INT_EQUALS(expected_packet_size, operation_statistics.incomplete_operation_size); // The UnAcked operations should be zero, because we are NOT connected ASSERT_INT_EQUALS(0, operation_statistics.unacked_operation_count); ASSERT_INT_EQUALS(0, operation_statistics.unacked_operation_size); /* Connect */ ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); s_operation_statistics_wait_for_connection_to_complete(state_test_data); /* Wait a little bit to make sure the client has had a chance to put the publishes out */ aws_thread_current_sleep((uint64_t)ONE_SEC); /* Confirm the UnAcked operations are now correct as well */ ASSERT_SUCCESS(aws_mqtt_client_connection_get_stats(state_test_data->mqtt_connection, &operation_statistics)); ASSERT_INT_EQUALS(5, operation_statistics.incomplete_operation_count); ASSERT_INT_EQUALS(expected_packet_size, operation_statistics.incomplete_operation_size); ASSERT_INT_EQUALS(5, operation_statistics.unacked_operation_count); ASSERT_INT_EQUALS(expected_packet_size, operation_statistics.unacked_operation_size); /* Disconnect */ ASSERT_SUCCESS(aws_mqtt_client_connection_disconnect( state_test_data->mqtt_connection, s_operation_statistics_on_disconnect_fn, state_test_data)); s_operation_statistics_wait_for_disconnect_to_complete(state_test_data); /* Wait a little bit just to make sure the client has fully processed the shutdown */ aws_thread_current_sleep((uint64_t)ONE_SEC); /* Confirm the operation statistics are still correctly tracking post-disconnect */ ASSERT_SUCCESS(aws_mqtt_client_connection_get_stats(state_test_data->mqtt_connection, &operation_statistics)); ASSERT_INT_EQUALS(5, operation_statistics.incomplete_operation_count); ASSERT_INT_EQUALS(expected_packet_size, operation_statistics.incomplete_operation_size); ASSERT_INT_EQUALS(5, operation_statistics.unacked_operation_count); ASSERT_INT_EQUALS(expected_packet_size, operation_statistics.unacked_operation_size); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_operation_statistics_disconnect_publish, s_operation_statistics_setup_mqtt_server_fn, s_test_mqtt_operation_statistics_disconnect_publish, s_operation_statistics_clean_up_mqtt_server_fn, &test_data) /** * Makes a publish offline, checks operation statistics, connects to non-ACK sending server, checks operation * statistics, makes another publish while online, checks operation statistics, disconnects, makes another publish, and * finally checks operation statistics one last time. */ static int s_test_mqtt_operation_statistics_reconnect_publish(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_mqtt_connection_options connection_options = { .user_data = state_test_data, .clean_session = false, .client_id = aws_byte_cursor_from_c_str("client1234"), .host_name = aws_byte_cursor_from_c_str(state_test_data->endpoint.address), .socket_options = &state_test_data->socket_options, .on_connection_complete = s_operation_statistics_on_connection_complete_fn, }; struct aws_byte_cursor pub_topic = aws_byte_cursor_from_c_str("/test/topic_1"); struct aws_byte_cursor payload = aws_byte_cursor_from_c_str("Test Message"); /* Stop ACKS so we make sure the operation statistics has time to allow us to identify we sent a packet */ mqtt_mock_server_disable_auto_ack(state_test_data->mock_server); /* First publish! */ aws_mutex_lock(&state_test_data->lock); state_test_data->expected_ops_completed = 1; aws_mutex_unlock(&state_test_data->lock); uint64_t pub_packet_id_1 = aws_mqtt_client_connection_publish( state_test_data->mqtt_connection, &pub_topic, AWS_MQTT_QOS_AT_LEAST_ONCE, false, &payload, s_on_op_complete, state_test_data); ASSERT_TRUE(pub_packet_id_1 > 0); s_wait_for_ops_completed(state_test_data); /* Make sure the sizes are correct and there is five operations waiting * Each packet size = (The size of the topic, the size of the payload, 2 for the header, 2 for the packet ID) */ uint64_t expected_packet_size = (pub_topic.len + payload.len + 4); struct aws_mqtt_connection_operation_statistics operation_statistics; ASSERT_SUCCESS(aws_mqtt_client_connection_get_stats(state_test_data->mqtt_connection, &operation_statistics)); ASSERT_INT_EQUALS(1, operation_statistics.incomplete_operation_count); ASSERT_INT_EQUALS(expected_packet_size, operation_statistics.incomplete_operation_size); // The UnAcked operations should be zero, because we are NOT connected ASSERT_INT_EQUALS(0, operation_statistics.unacked_operation_count); ASSERT_INT_EQUALS(0, operation_statistics.unacked_operation_size); /* Connect */ ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); s_operation_statistics_wait_for_connection_to_complete(state_test_data); /* Wait a second to give the MQTT311 client time to move the offline publish to unacked */ aws_thread_current_sleep((uint64_t)ONE_SEC); /* Confirm the UnAcked operations are now correct as well */ ASSERT_SUCCESS(aws_mqtt_client_connection_get_stats(state_test_data->mqtt_connection, &operation_statistics)); ASSERT_INT_EQUALS(1, operation_statistics.incomplete_operation_count); ASSERT_INT_EQUALS(expected_packet_size, operation_statistics.incomplete_operation_size); ASSERT_INT_EQUALS(1, operation_statistics.unacked_operation_count); ASSERT_INT_EQUALS(expected_packet_size, operation_statistics.unacked_operation_size); /* Second publish! */ aws_mutex_lock(&state_test_data->lock); state_test_data->expected_ops_completed = 1; aws_mutex_unlock(&state_test_data->lock); uint64_t pub_packet_id_2 = aws_mqtt_client_connection_publish( state_test_data->mqtt_connection, &pub_topic, AWS_MQTT_QOS_AT_LEAST_ONCE, false, &payload, s_on_op_complete, state_test_data); ASSERT_TRUE(pub_packet_id_2 > 0); s_wait_for_ops_completed(state_test_data); /* Confirm both publishes are correct across all statistics */ ASSERT_SUCCESS(aws_mqtt_client_connection_get_stats(state_test_data->mqtt_connection, &operation_statistics)); ASSERT_INT_EQUALS(2, operation_statistics.incomplete_operation_count); ASSERT_INT_EQUALS(expected_packet_size * 2, operation_statistics.incomplete_operation_size); ASSERT_INT_EQUALS(2, operation_statistics.unacked_operation_count); ASSERT_INT_EQUALS(expected_packet_size * 2, operation_statistics.unacked_operation_size); /* Disconnect */ ASSERT_SUCCESS(aws_mqtt_client_connection_disconnect( state_test_data->mqtt_connection, s_operation_statistics_on_disconnect_fn, state_test_data)); s_operation_statistics_wait_for_disconnect_to_complete(state_test_data); /* Third publish! */ aws_mutex_lock(&state_test_data->lock); state_test_data->expected_ops_completed = 1; aws_mutex_unlock(&state_test_data->lock); uint64_t pub_packet_id_3 = aws_mqtt_client_connection_publish( state_test_data->mqtt_connection, &pub_topic, AWS_MQTT_QOS_AT_LEAST_ONCE, false, &payload, s_on_op_complete, state_test_data); ASSERT_TRUE(pub_packet_id_3 > 0); s_wait_for_ops_completed(state_test_data); /* Confirm all three publishes are in the incomplete statistics, but only two are in unacked */ ASSERT_SUCCESS(aws_mqtt_client_connection_get_stats(state_test_data->mqtt_connection, &operation_statistics)); ASSERT_INT_EQUALS(3, operation_statistics.incomplete_operation_count); ASSERT_INT_EQUALS(expected_packet_size * 3, operation_statistics.incomplete_operation_size); ASSERT_INT_EQUALS(2, operation_statistics.unacked_operation_count); ASSERT_INT_EQUALS(expected_packet_size * 2, operation_statistics.unacked_operation_size); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_operation_statistics_reconnect_publish, s_operation_statistics_setup_mqtt_server_fn, s_test_mqtt_operation_statistics_reconnect_publish, s_operation_statistics_clean_up_mqtt_server_fn, &test_data) /* ========== SUBSCRIBE TESTS ========== */ /** * Make a connection, tell the server NOT to send Acks, subscribe and check the statistics to make sure * it is incomplete, then wait a little bit and check that it was properly marked as UnAcked, then send the SubAck * confirm statistics are zero, and then disconnect */ static int s_test_mqtt_operation_statistics_simple_subscribe(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_mqtt_connection_options connection_options = { .user_data = state_test_data, .clean_session = false, .client_id = aws_byte_cursor_from_c_str("client1234"), .host_name = aws_byte_cursor_from_c_str(state_test_data->endpoint.address), .socket_options = &state_test_data->socket_options, .on_connection_complete = s_operation_statistics_on_connection_complete_fn, }; struct aws_byte_cursor sub_topic = aws_byte_cursor_from_c_str("/test/topic"); /* Connect */ ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); s_operation_statistics_wait_for_connection_to_complete(state_test_data); /* We want to wait for 1 operation */ aws_mutex_lock(&state_test_data->lock); state_test_data->expected_ops_completed = 1; aws_mutex_unlock(&state_test_data->lock); /* Stop ACKS so we make sure the operation statistics has time to allow us to identify we sent a packet */ mqtt_mock_server_disable_auto_ack(state_test_data->mock_server); // Send a subscribe packet uint16_t packet_id_1 = aws_mqtt_client_connection_subscribe( state_test_data->mqtt_connection, &sub_topic, AWS_MQTT_QOS_AT_LEAST_ONCE, s_operation_statistics_on_publish_received, state_test_data, NULL, s_operation_statistics_on_suback, state_test_data); ASSERT_TRUE(packet_id_1 > 0); /* Wait a little bit to allow the code to put the packet into the socket from the queue, allowing it * to be unacked. If we check right away, we may or may not see it in the un-acked statistics */ aws_thread_current_sleep((uint64_t)ONE_SEC); /* Make sure the sizes are correct and there is only one operation waiting * (The size of the topic + 3 (QoS, MSB, LSB), 2 for the header, 2 for the packet ID) */ uint64_t expected_packet_size = sub_topic.len + 7; struct aws_mqtt_connection_operation_statistics operation_statistics; ASSERT_SUCCESS(aws_mqtt_client_connection_get_stats(state_test_data->mqtt_connection, &operation_statistics)); ASSERT_INT_EQUALS(1, operation_statistics.incomplete_operation_count); ASSERT_INT_EQUALS(expected_packet_size, operation_statistics.incomplete_operation_size); ASSERT_INT_EQUALS(1, operation_statistics.unacked_operation_count); ASSERT_INT_EQUALS(expected_packet_size, operation_statistics.unacked_operation_size); /* Send the SubAck and wait for the client to get the ACK */ mqtt_mock_server_send_single_suback(state_test_data->mock_server, packet_id_1, AWS_MQTT_QOS_AT_LEAST_ONCE); s_wait_for_ops_completed(state_test_data); /* Make sure the operation statistics are empty */ ASSERT_SUCCESS(aws_mqtt_client_connection_get_stats(state_test_data->mqtt_connection, &operation_statistics)); ASSERT_INT_EQUALS(0, operation_statistics.incomplete_operation_count); ASSERT_INT_EQUALS(0, operation_statistics.incomplete_operation_size); ASSERT_INT_EQUALS(0, operation_statistics.unacked_operation_count); ASSERT_INT_EQUALS(0, operation_statistics.unacked_operation_size); /* Disconnect */ ASSERT_SUCCESS(aws_mqtt_client_connection_disconnect( state_test_data->mqtt_connection, s_operation_statistics_on_disconnect_fn, state_test_data)); s_operation_statistics_wait_for_disconnect_to_complete(state_test_data); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_operation_statistics_simple_subscribe, s_operation_statistics_setup_mqtt_server_fn, s_test_mqtt_operation_statistics_simple_subscribe, s_operation_statistics_clean_up_mqtt_server_fn, &test_data) /* ========== UNSUBSCRIBE TESTS ========== */ /** * Make a connection, tell the server NOT to send Acks, publish and immediately check the statistics to make sure * it is incomplete, then wait a little bit and check that it was properly marked as UnAcked, then send the UnsubAck * confirm statistics are zero, and then disconnect */ static int s_test_mqtt_operation_statistics_simple_unsubscribe(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_mqtt_connection_options connection_options = { .user_data = state_test_data, .clean_session = false, .client_id = aws_byte_cursor_from_c_str("client1234"), .host_name = aws_byte_cursor_from_c_str(state_test_data->endpoint.address), .socket_options = &state_test_data->socket_options, .on_connection_complete = s_operation_statistics_on_connection_complete_fn, }; struct aws_byte_cursor unsub_topic = aws_byte_cursor_from_c_str("/test/topic"); /* Connect */ ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); s_operation_statistics_wait_for_connection_to_complete(state_test_data); /* We want to wait for 1 operation */ aws_mutex_lock(&state_test_data->lock); state_test_data->expected_ops_completed = 1; aws_mutex_unlock(&state_test_data->lock); /* Stop ACKS so we make sure the operation statistics has time to allow us to identify we sent a packet */ mqtt_mock_server_disable_auto_ack(state_test_data->mock_server); /* Send a subscribe packet */ uint16_t packet_id_1 = aws_mqtt_client_connection_unsubscribe( state_test_data->mqtt_connection, &unsub_topic, s_on_op_complete, state_test_data); ASSERT_TRUE(packet_id_1 > 0); /* Wait a little bit to allow the code to put the packet into the socket from the queue, allowing it * to be unacked. If we check right away, we may or may not see it in the un-acked statistics */ aws_thread_current_sleep((uint64_t)ONE_SEC); /* Make sure the sizes are correct and there is only one operation waiting * (The size of the topic, 2 for the header, 2 for the packet ID) */ uint64_t expected_packet_size = unsub_topic.len + 4; struct aws_mqtt_connection_operation_statistics operation_statistics; ASSERT_SUCCESS(aws_mqtt_client_connection_get_stats(state_test_data->mqtt_connection, &operation_statistics)); ASSERT_INT_EQUALS(1, operation_statistics.incomplete_operation_count); ASSERT_INT_EQUALS(expected_packet_size, operation_statistics.incomplete_operation_size); ASSERT_INT_EQUALS(1, operation_statistics.unacked_operation_count); ASSERT_INT_EQUALS(expected_packet_size, operation_statistics.unacked_operation_size); /* Send the UnsubAck and wait for the client to get the ACK */ mqtt_mock_server_send_unsuback(state_test_data->mock_server, packet_id_1); s_wait_for_ops_completed(state_test_data); /* Make sure the operation statistics are empty */ ASSERT_SUCCESS(aws_mqtt_client_connection_get_stats(state_test_data->mqtt_connection, &operation_statistics)); ASSERT_INT_EQUALS(0, operation_statistics.incomplete_operation_count); ASSERT_INT_EQUALS(0, operation_statistics.incomplete_operation_size); ASSERT_INT_EQUALS(0, operation_statistics.unacked_operation_count); ASSERT_INT_EQUALS(0, operation_statistics.unacked_operation_size); /* Disconnect */ ASSERT_SUCCESS(aws_mqtt_client_connection_disconnect( state_test_data->mqtt_connection, s_operation_statistics_on_disconnect_fn, state_test_data)); s_operation_statistics_wait_for_disconnect_to_complete(state_test_data); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_operation_statistics_simple_unsubscribe, s_operation_statistics_setup_mqtt_server_fn, s_test_mqtt_operation_statistics_simple_unsubscribe, s_operation_statistics_clean_up_mqtt_server_fn, &test_data) /* ========== RESUBSCRIBE TESTS ========== */ /** * Subscribe to multiple topics prior to connection, make a CONNECT, unsubscribe to a topic, disconnect with the broker, * make a connection with clean_session set to true, then call resubscribe (without the server being able to send ACKs) * and confirm the operation statistics size is correct, then resubscribe and finally disconnect. */ static int s_test_mqtt_operation_statistics_simple_resubscribe(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_mqtt_connection_options connection_options = { .user_data = state_test_data, .clean_session = false, .client_id = aws_byte_cursor_from_c_str("client1234"), .host_name = aws_byte_cursor_from_c_str(state_test_data->endpoint.address), .socket_options = &state_test_data->socket_options, .on_connection_complete = s_operation_statistics_on_connection_complete_fn, }; struct aws_byte_cursor sub_topic_1 = aws_byte_cursor_from_c_str("/test/topic1"); struct aws_byte_cursor sub_topic_2 = aws_byte_cursor_from_c_str("/test/topic2"); struct aws_byte_cursor sub_topic_3 = aws_byte_cursor_from_c_str("/test/topic3"); /* We want to wait for 3 operations */ aws_mutex_lock(&state_test_data->lock); state_test_data->expected_ops_completed = 3; aws_mutex_unlock(&state_test_data->lock); /* Subscribe to the three topics */ uint16_t sub_packet_id_1 = aws_mqtt_client_connection_subscribe( state_test_data->mqtt_connection, &sub_topic_1, AWS_MQTT_QOS_AT_LEAST_ONCE, s_operation_statistics_on_publish_received, state_test_data, NULL, s_operation_statistics_on_suback, state_test_data); ASSERT_TRUE(sub_packet_id_1 > 0); uint16_t sub_packet_id_2 = aws_mqtt_client_connection_subscribe( state_test_data->mqtt_connection, &sub_topic_2, AWS_MQTT_QOS_AT_LEAST_ONCE, s_operation_statistics_on_publish_received, state_test_data, NULL, s_operation_statistics_on_suback, state_test_data); ASSERT_TRUE(sub_packet_id_2 > 0); uint16_t sub_packet_id_3 = aws_mqtt_client_connection_subscribe( state_test_data->mqtt_connection, &sub_topic_3, AWS_MQTT_QOS_AT_LEAST_ONCE, s_operation_statistics_on_publish_received, state_test_data, NULL, s_operation_statistics_on_suback, state_test_data); ASSERT_TRUE(sub_packet_id_3 > 0); /* Wait a little bit to allow the code to put the packet into the socket from the queue, allowing it * to be unacked. If we check right away, we may or may not see it in the un-acked statistics */ aws_thread_current_sleep((uint64_t)ONE_SEC); /* Confirm the 3 subscribes are both pending and unacked, and confirm their byte size * The size = each subscribe: 4 (fixed header + packet ID) + topic filter + 3 (QoS, MSB and LSB length) */ uint64_t expected_packet_size = 12; // fixed packet headers and IDs expected_packet_size += sub_topic_1.len + 3; expected_packet_size += sub_topic_2.len + 3; expected_packet_size += sub_topic_3.len + 3; /* Check the size (Note: UnAcked will be ZERO because we are not connected) */ struct aws_mqtt_connection_operation_statistics operation_statistics; ASSERT_SUCCESS(aws_mqtt_client_connection_get_stats(state_test_data->mqtt_connection, &operation_statistics)); ASSERT_INT_EQUALS(3, operation_statistics.incomplete_operation_count); ASSERT_INT_EQUALS(expected_packet_size, operation_statistics.incomplete_operation_size); ASSERT_INT_EQUALS(0, operation_statistics.unacked_operation_count); ASSERT_INT_EQUALS(0, operation_statistics.unacked_operation_size); /* Connect */ ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); s_operation_statistics_wait_for_connection_to_complete(state_test_data); /* Wait for the subscribes to complete */ s_wait_for_ops_completed(state_test_data); /* We want to wait for 1 operation */ aws_mutex_lock(&state_test_data->lock); state_test_data->expected_ops_completed = 1; aws_mutex_unlock(&state_test_data->lock); /* unsubscribe to the first topic */ uint16_t unsub_packet_id = aws_mqtt_client_connection_unsubscribe( state_test_data->mqtt_connection, &sub_topic_1, s_on_op_complete, state_test_data); ASSERT_TRUE(unsub_packet_id > 0); s_wait_for_ops_completed(state_test_data); /* Disconnect */ ASSERT_SUCCESS(aws_mqtt_client_connection_disconnect( state_test_data->mqtt_connection, s_operation_statistics_on_disconnect_fn, state_test_data)); s_operation_statistics_wait_for_disconnect_to_complete(state_test_data); /* Note: The client is still subscribed to both topic_2 and topic_3 */ /* Reconnect to the same server */ ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); s_operation_statistics_wait_for_connection_to_complete(state_test_data); /* Get all the packets out of the way */ ASSERT_SUCCESS(mqtt_mock_server_decode_packets(state_test_data->mock_server)); // Stop ACKs - we want to determine the size mqtt_mock_server_disable_auto_ack(state_test_data->mock_server); /* Resubscribes to topic_2 & topic_3 (Note: we do not need a callback for the purpose of this test) */ uint16_t resub_packet_id = aws_mqtt_resubscribe_existing_topics(state_test_data->mqtt_connection, NULL, state_test_data); ASSERT_TRUE(resub_packet_id > 0); /* Wait a little bit to allow the code to put the packet into the socket from the queue, allowing it * to be unacked. If we check right away, we may or may not see it in the un-acked statistics */ aws_thread_current_sleep((uint64_t)ONE_SEC); // Make sure the resubscribe packet size is correct and there is only one resubscribe waiting // The size = 4 (fixed header + packet ID) + [for each topic](topic filter size + 3 (QoS, MSB and LSB length)) expected_packet_size = 4; expected_packet_size += sub_topic_2.len + 3; expected_packet_size += sub_topic_3.len + 3; ASSERT_SUCCESS(aws_mqtt_client_connection_get_stats(state_test_data->mqtt_connection, &operation_statistics)); ASSERT_INT_EQUALS(1, operation_statistics.incomplete_operation_count); ASSERT_INT_EQUALS(expected_packet_size, operation_statistics.incomplete_operation_size); ASSERT_INT_EQUALS(1, operation_statistics.unacked_operation_count); ASSERT_INT_EQUALS(expected_packet_size, operation_statistics.unacked_operation_size); /* Send the resubscribe */ mqtt_mock_server_send_single_suback(state_test_data->mock_server, resub_packet_id, AWS_MQTT_QOS_AT_LEAST_ONCE); s_operation_statistics_wait_for_subscribe_to_complete(state_test_data); /* Enable ACKs again, and then disconnect */ mqtt_mock_server_enable_auto_ack(state_test_data->mock_server); ASSERT_SUCCESS(aws_mqtt_client_connection_disconnect( state_test_data->mqtt_connection, s_operation_statistics_on_disconnect_fn, state_test_data)); s_operation_statistics_wait_for_disconnect_to_complete(state_test_data); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_operation_statistics_simple_resubscribe, s_operation_statistics_setup_mqtt_server_fn, s_test_mqtt_operation_statistics_simple_resubscribe, s_operation_statistics_clean_up_mqtt_server_fn, &test_data) /* ========== OTHER TESTS ========== */ static void s_test_operation_statistics_simple_callback( struct aws_mqtt_client_connection_311_impl *connection, void *userdata) { struct aws_atomic_var *statistics_count = (struct aws_atomic_var *)userdata; aws_atomic_fetch_add(statistics_count, 1); // Confirm we can get the operation statistics from the callback struct aws_mqtt_connection_operation_statistics operation_statistics; aws_mqtt_client_connection_get_stats(&connection->base, &operation_statistics); (void)operation_statistics; } /** * Tests the operation statistics callback to make sure it is being called as expected. This is a very simple * test that just ensures the callback is being called multiple times AND that you can access the operation * statistics from within the callback. */ static int s_test_mqtt_operation_statistics_simple_callback(struct aws_allocator *allocator, void *ctx) { (void)allocator; struct mqtt_connection_state_test *state_test_data = ctx; struct aws_mqtt_connection_options connection_options = { .user_data = state_test_data, .clean_session = true, .client_id = aws_byte_cursor_from_c_str("client1234"), .host_name = aws_byte_cursor_from_c_str(state_test_data->endpoint.address), .socket_options = &state_test_data->socket_options, .on_connection_complete = s_operation_statistics_on_connection_complete_fn, }; struct aws_byte_cursor pub_topic = aws_byte_cursor_from_c_str("/test/topic"); struct aws_byte_cursor payload_1 = aws_byte_cursor_from_c_str("Test Message"); /* Connect */ ASSERT_SUCCESS(aws_mqtt_client_connection_connect(state_test_data->mqtt_connection, &connection_options)); s_operation_statistics_wait_for_connection_to_complete(state_test_data); /* Set the operation statistics callback */ struct aws_atomic_var statistics_count; aws_atomic_store_int(&statistics_count, 0); aws_mqtt_client_connection_set_on_operation_statistics_handler( state_test_data->mqtt_connection->impl, s_test_operation_statistics_simple_callback, &statistics_count); // /* Stop ACKS so we make sure the operation statistics has time to allow us to identify we sent a packet */ mqtt_mock_server_disable_auto_ack(state_test_data->mock_server); // /* We want to wait for 1 operation to complete */ aws_mutex_lock(&state_test_data->lock); state_test_data->expected_ops_completed = 1; aws_mutex_unlock(&state_test_data->lock); /* Publish a packet */ uint16_t packet_id_1 = aws_mqtt_client_connection_publish( state_test_data->mqtt_connection, &pub_topic, AWS_MQTT_QOS_AT_LEAST_ONCE, false, &payload_1, s_on_op_complete, state_test_data); ASSERT_TRUE(packet_id_1 > 0); /* Wait a little bit to allow the code to put the packet into the socket from the queue, allowing it * to be unacked. If we check right away, we may or may not see it in the un-acked statistics */ aws_thread_current_sleep((uint64_t)ONE_SEC); /* Make sure the sizes are correct and there is only one operation waiting * (The size of the topic, the size of the payload, 2 for the header, 2 for the packet ID) */ uint64_t expected_packet_size = pub_topic.len + payload_1.len + 4; struct aws_mqtt_connection_operation_statistics operation_statistics; ASSERT_SUCCESS(aws_mqtt_client_connection_get_stats(state_test_data->mqtt_connection, &operation_statistics)); ASSERT_INT_EQUALS(1, operation_statistics.incomplete_operation_count); ASSERT_INT_EQUALS(expected_packet_size, operation_statistics.incomplete_operation_size); ASSERT_INT_EQUALS(1, operation_statistics.unacked_operation_count); ASSERT_INT_EQUALS(expected_packet_size, operation_statistics.unacked_operation_size); /* Assert the callback was called twice (first for putting in incomplete, second for putting in unacked) */ ASSERT_INT_EQUALS(2, aws_atomic_load_int(&statistics_count)); /* Send the PubAck and wait for the client to receive it */ mqtt_mock_server_send_puback(state_test_data->mock_server, packet_id_1); s_wait_for_ops_completed(state_test_data); // /* Assert the callback was called */ aws_thread_current_sleep((uint64_t)ONE_SEC); ASSERT_INT_EQUALS(3, aws_atomic_load_int(&statistics_count)); /* Make sure the operation values are back to zero now that the publish went out */ ASSERT_SUCCESS(aws_mqtt_client_connection_get_stats(state_test_data->mqtt_connection, &operation_statistics)); ASSERT_INT_EQUALS(0, operation_statistics.incomplete_operation_count); ASSERT_INT_EQUALS(0, operation_statistics.incomplete_operation_size); ASSERT_INT_EQUALS(0, operation_statistics.unacked_operation_count); ASSERT_INT_EQUALS(0, operation_statistics.unacked_operation_size); /* Disconnect */ ASSERT_SUCCESS(aws_mqtt_client_connection_disconnect( state_test_data->mqtt_connection, s_operation_statistics_on_disconnect_fn, state_test_data)); s_operation_statistics_wait_for_disconnect_to_complete(state_test_data); return AWS_OP_SUCCESS; } AWS_TEST_CASE_FIXTURE( mqtt_operation_statistics_simple_callback, s_operation_statistics_setup_mqtt_server_fn, s_test_mqtt_operation_statistics_simple_callback, s_operation_statistics_clean_up_mqtt_server_fn, &test_data) aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/tests/v3/packet_encoding_test.c000066400000000000000000001074141456575232400262360ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #ifdef _MSC_VER # pragma warning(push) # pragma warning(disable : 4232) /* function pointer to dll symbol */ #endif enum { S_BUFFER_SIZE = 128 }; struct packet_test_fixture; /* Function type used to init and cleanup a fixture */ typedef int(packet_init_fn)(struct packet_test_fixture *); /* Function used to encode a packet (this should be set to a function from packets.h) */ typedef int(packet_encode_fn)(struct aws_byte_buf *, void *); /* Function used to decode a packet (this should be set to a function from packets.h) */ typedef int(packet_decode_fn)(struct aws_byte_cursor *, void *); /* Function used to check if two packets are equal */ typedef bool(packet_eq_fn)(void *, void *, size_t); /* Helper for comparing the fixed headers of packets */ static bool s_fixed_header_eq(struct aws_mqtt_fixed_header *l, struct aws_mqtt_fixed_header *r) { return l->packet_type == r->packet_type && l->flags == r->flags && l->remaining_length == r->remaining_length; } /* Default packet compare function, checks headers then memcmps the rest */ static bool s_packet_eq_default(void *a, void *b, size_t size) { static const size_t HEADER_SIZE = sizeof(struct aws_mqtt_fixed_header); return s_fixed_header_eq(a, b) && memcmp((uint8_t *)a + HEADER_SIZE, (uint8_t *)b + HEADER_SIZE, size - HEADER_SIZE) == 0; } /* Contains all of the information required to run a packet's test case */ struct packet_test_fixture { enum aws_mqtt_packet_type type; size_t size; packet_init_fn *init; packet_encode_fn *encode; packet_decode_fn *decode; packet_init_fn *teardown; packet_eq_fn *equal; struct aws_allocator *allocator; void *in_packet; void *out_packet; struct aws_byte_buf buffer; }; static int s_packet_test_before(struct aws_allocator *allocator, void *ctx) { struct packet_test_fixture *fixture = ctx; fixture->allocator = allocator; /* Setup the fixture */ fixture->in_packet = aws_mem_acquire(allocator, fixture->size); ASSERT_NOT_NULL(fixture->in_packet); memset(fixture->in_packet, 0, fixture->size); fixture->out_packet = aws_mem_acquire(allocator, fixture->size); ASSERT_NOT_NULL(fixture->out_packet); memset(fixture->out_packet, 0, fixture->size); return AWS_OP_SUCCESS; } static int s_packet_test_run(struct aws_allocator *allocator, void *ctx) { struct packet_test_fixture *fixture = ctx; aws_byte_buf_init(&fixture->buffer, allocator, S_BUFFER_SIZE); /* Init the in_packet & buffer */ ASSERT_SUCCESS(fixture->init(fixture)); /* Encode */ /* Create the output buffer */ struct aws_byte_buf output_buffer; ASSERT_SUCCESS(aws_byte_buf_init(&output_buffer, allocator, S_BUFFER_SIZE)); /* Encode the packet */ ASSERT_SUCCESS(fixture->encode(&output_buffer, fixture->in_packet)); /* Compare the buffers */ ASSERT_BIN_ARRAYS_EQUALS(fixture->buffer.buffer, fixture->buffer.len, output_buffer.buffer, output_buffer.len); aws_byte_buf_clean_up(&output_buffer); /* Decode */ /* Decode the buffer */ struct aws_byte_cursor cursor = aws_byte_cursor_from_buf(&fixture->buffer); ASSERT_SUCCESS(fixture->decode(&cursor, fixture->out_packet)); /* Compare the packets */ if (fixture->equal) { ASSERT_TRUE(fixture->equal(fixture->out_packet, fixture->in_packet, fixture->size)); } else { ASSERT_TRUE(s_packet_eq_default(fixture->out_packet, fixture->in_packet, fixture->size)); } return AWS_OP_SUCCESS; } static int s_packet_test_after(struct aws_allocator *allocator, int setup_result, void *ctx) { (void)setup_result; struct packet_test_fixture *fixture = ctx; /* Tear down the packet & buffer */ if (fixture->teardown) { fixture->teardown(fixture); } /* Tear down the fixture */ aws_mem_release(allocator, fixture->in_packet); aws_mem_release(allocator, fixture->out_packet); aws_byte_buf_clean_up(&fixture->buffer); return AWS_OP_SUCCESS; } #define PACKET_TEST_NAME(e_type, t_name, s_name, i, t, e) \ static struct packet_test_fixture mqtt_packet_##t_name##_fixture = { \ .type = AWS_MQTT_PACKET_##e_type, \ .size = sizeof(struct aws_mqtt_packet_##s_name), \ .init = (i), \ .encode = (packet_encode_fn *)&aws_mqtt_packet_##s_name##_encode, \ .decode = (packet_decode_fn *)&aws_mqtt_packet_##s_name##_decode, \ .teardown = (t), \ .equal = (e), \ }; \ AWS_TEST_CASE_FIXTURE( \ mqtt_packet_##t_name, \ s_packet_test_before, \ s_packet_test_run, \ s_packet_test_after, \ &mqtt_packet_##t_name##_fixture) #define PACKET_TEST(e_type, s_name, i, t, e) PACKET_TEST_NAME(e_type, s_name, s_name, i, t, e) static uint8_t s_client_id[] = "Test Client ID"; enum { CLIENT_ID_LEN = sizeof(s_client_id) }; static uint8_t s_topic_name[] = "test/topic"; enum { TOPIC_NAME_LEN = sizeof(s_topic_name) }; static uint8_t s_payload[] = "This s_payload contains data. It is some good ol' fashioned data."; enum { PAYLOAD_LEN = sizeof(s_payload) }; static uint8_t s_username[] = "admin"; enum { USERNAME_LEN = sizeof(s_username) }; static uint8_t s_password[] = "12345"; enum { PASSWORD_LEN = sizeof(s_password) }; /*****************************************************************************/ /* Ack */ static int s_test_ack_init(struct packet_test_fixture *fixture) { /* Init buffer */ uint8_t packet_id = (uint8_t)(fixture->type + 7); /* clang-format off */ uint8_t header[] = { (uint8_t)(fixture->type << 4), /* Packet type */ 2, /* Remaining length */ 0, packet_id, /* Packet identifier */ }; /* Init packet */ switch (fixture->type) { case AWS_MQTT_PACKET_PUBACK: ASSERT_SUCCESS(aws_mqtt_packet_puback_init(fixture->in_packet, packet_id)); break; case AWS_MQTT_PACKET_PUBREC: ASSERT_SUCCESS(aws_mqtt_packet_pubrec_init(fixture->in_packet, packet_id)); break; case AWS_MQTT_PACKET_PUBREL: ASSERT_SUCCESS(aws_mqtt_packet_pubrel_init(fixture->in_packet, packet_id)); /* if pubrel, bit 1 in flags must be set */ header[0] |= 0x2; break; case AWS_MQTT_PACKET_PUBCOMP: ASSERT_SUCCESS(aws_mqtt_packet_pubcomp_init(fixture->in_packet, packet_id)); break; case AWS_MQTT_PACKET_UNSUBACK: ASSERT_SUCCESS(aws_mqtt_packet_unsuback_init(fixture->in_packet, packet_id)); break; default: AWS_ASSUME(false); break; } aws_byte_buf_write(&fixture->buffer, header, sizeof(header)); return AWS_OP_SUCCESS; } #define PACKET_TEST_ACK(e_type, name) \ PACKET_TEST_NAME(e_type, name, ack, &s_test_ack_init, NULL, NULL) PACKET_TEST_ACK(PUBACK, puback) PACKET_TEST_ACK(PUBREC, pubrec) PACKET_TEST_ACK(PUBREL, pubrel) PACKET_TEST_ACK(PUBCOMP, pubcomp) PACKET_TEST_ACK(UNSUBACK, unsuback) #undef PACKET_TEST_ACK /*****************************************************************************/ /* Connect */ static int s_test_connect_init(struct packet_test_fixture *fixture) { /* Init packet */ ASSERT_SUCCESS(aws_mqtt_packet_connect_init( fixture->in_packet, aws_byte_cursor_from_array(s_client_id, CLIENT_ID_LEN), false, 0)); /* Init buffer */ /* clang-format off */ uint8_t header[] = { AWS_MQTT_PACKET_CONNECT << 4, /* Packet type */ 10 + 2 + CLIENT_ID_LEN, /* Remaining length */ 0, 4, 'M', 'Q', 'T', 'T', /* Protocol name */ 4, /* Protocol level */ 0, /* Connect Flags */ 0, 0, /* Keep alive */ }; /* clang-format on */ aws_byte_buf_write(&fixture->buffer, header, sizeof(header)); aws_byte_buf_write_u8(&fixture->buffer, 0); aws_byte_buf_write_u8(&fixture->buffer, CLIENT_ID_LEN); aws_byte_buf_write(&fixture->buffer, s_client_id, CLIENT_ID_LEN); return AWS_OP_SUCCESS; } static bool s_test_connect_eq(void *a, void *b, size_t size) { (void)size; struct aws_mqtt_packet_connect *l = a; struct aws_mqtt_packet_connect *r = b; return s_fixed_header_eq(&l->fixed_header, &r->fixed_header) && l->clean_session == r->clean_session && l->has_will == r->has_will && l->will_qos == r->will_qos && l->will_retain == r->will_retain && l->has_password == r->has_password && l->has_username == r->has_username && l->keep_alive_timeout == r->keep_alive_timeout && aws_byte_cursor_eq(&l->client_identifier, &r->client_identifier) && aws_byte_cursor_eq(&l->will_topic, &r->will_topic) && aws_byte_cursor_eq(&l->username, &r->username) && aws_byte_cursor_eq(&l->password, &r->password); } PACKET_TEST(CONNECT, connect, &s_test_connect_init, NULL, &s_test_connect_eq) static int s_test_connect_will_init(struct packet_test_fixture *fixture) { /* Init packet */ ASSERT_SUCCESS(aws_mqtt_packet_connect_init( fixture->in_packet, aws_byte_cursor_from_array(s_client_id, CLIENT_ID_LEN), false, 0)); ASSERT_SUCCESS(aws_mqtt_packet_connect_add_will( fixture->in_packet, aws_byte_cursor_from_array(s_topic_name, TOPIC_NAME_LEN), AWS_MQTT_QOS_EXACTLY_ONCE, true /*retain*/, aws_byte_cursor_from_array(s_payload, PAYLOAD_LEN))); /* Init buffer */ /* clang-format off */ uint8_t header[] = { AWS_MQTT_PACKET_CONNECT << 4, /* Packet type */ 10 + (2 + CLIENT_ID_LEN) + (2 + TOPIC_NAME_LEN) + (2 + PAYLOAD_LEN), /* Remaining length */ 0, 4, 'M', 'Q', 'T', 'T', /* Protocol name */ 4, /* Protocol level */ /* Connect Flags: */ (1 << 2) /* Will flag, bit 2 */ | (AWS_MQTT_QOS_EXACTLY_ONCE << 3)/* Will QoS, bits 4-3 */ | (1 << 5), /* Will Retain, bit 5 */ 0, 0, /* Keep alive */ }; /* clang-format on */ aws_byte_buf_write(&fixture->buffer, header, sizeof(header)); /* client identifier */ aws_byte_buf_write_be16(&fixture->buffer, CLIENT_ID_LEN); aws_byte_buf_write(&fixture->buffer, s_client_id, CLIENT_ID_LEN); /* will topic */ aws_byte_buf_write_be16(&fixture->buffer, TOPIC_NAME_LEN); aws_byte_buf_write(&fixture->buffer, s_topic_name, TOPIC_NAME_LEN); /* will payload */ aws_byte_buf_write_be16(&fixture->buffer, PAYLOAD_LEN); aws_byte_buf_write(&fixture->buffer, s_payload, PAYLOAD_LEN); return AWS_OP_SUCCESS; } PACKET_TEST_NAME(CONNECT, connect_will, connect, &s_test_connect_will_init, NULL, &s_test_connect_eq) static uint8_t s_empty_payload[] = ""; enum { EMPTY_PAYLOAD_LEN = 0 }; static int s_test_connect_empty_payload_will_init(struct packet_test_fixture *fixture) { /* Init packet */ ASSERT_SUCCESS(aws_mqtt_packet_connect_init( fixture->in_packet, aws_byte_cursor_from_array(s_client_id, CLIENT_ID_LEN), false, 0)); ASSERT_SUCCESS(aws_mqtt_packet_connect_add_will( fixture->in_packet, aws_byte_cursor_from_array(s_topic_name, TOPIC_NAME_LEN), AWS_MQTT_QOS_EXACTLY_ONCE, true /*retain*/, aws_byte_cursor_from_array(s_empty_payload, EMPTY_PAYLOAD_LEN))); /* Init buffer */ /* clang-format off */ uint8_t header[] = { AWS_MQTT_PACKET_CONNECT << 4, /* Packet type */ 10 + (2 + CLIENT_ID_LEN) + (2 + TOPIC_NAME_LEN) + (2 + EMPTY_PAYLOAD_LEN), /* Remaining length */ 0, 4, 'M', 'Q', 'T', 'T', /* Protocol name */ 4, /* Protocol level */ /* Connect Flags: */ (1 << 2) /* Will flag, bit 2 */ | (AWS_MQTT_QOS_EXACTLY_ONCE << 3)/* Will QoS, bits 4-3 */ | (1 << 5), /* Will Retain, bit 5 */ 0, 0, /* Keep alive */ }; /* clang-format on */ aws_byte_buf_write(&fixture->buffer, header, sizeof(header)); /* client identifier */ aws_byte_buf_write_be16(&fixture->buffer, CLIENT_ID_LEN); aws_byte_buf_write(&fixture->buffer, s_client_id, CLIENT_ID_LEN); /* will topic */ aws_byte_buf_write_be16(&fixture->buffer, TOPIC_NAME_LEN); aws_byte_buf_write(&fixture->buffer, s_topic_name, TOPIC_NAME_LEN); /* will payload */ aws_byte_buf_write_be16(&fixture->buffer, EMPTY_PAYLOAD_LEN); aws_byte_buf_write(&fixture->buffer, s_empty_payload, EMPTY_PAYLOAD_LEN); return AWS_OP_SUCCESS; } PACKET_TEST_NAME( CONNECT, connect_empty_payload_will, connect, &s_test_connect_empty_payload_will_init, NULL, &s_test_connect_eq) static int s_test_connect_password_init(struct packet_test_fixture *fixture) { /* Init packet */ ASSERT_SUCCESS(aws_mqtt_packet_connect_init( fixture->in_packet, aws_byte_cursor_from_array(s_client_id, CLIENT_ID_LEN), false, 0xBEEF)); ASSERT_SUCCESS(aws_mqtt_packet_connect_add_credentials( fixture->in_packet, aws_byte_cursor_from_array(s_username, USERNAME_LEN), aws_byte_cursor_from_array(s_password, PASSWORD_LEN))); /* Init buffer */ /* clang-format off */ uint8_t header[] = { AWS_MQTT_PACKET_CONNECT << 4, /* Packet type */ 10 + (2 + CLIENT_ID_LEN) + (2 + USERNAME_LEN) + (2 + PASSWORD_LEN), /* Remaining length */ 0, 4, 'M', 'Q', 'T', 'T', /* Protocol name */ 4, /* Protocol level */ (1 << 7) | (1 << 6), /* Connect Flags: username bit 7, password bit 6 */ 0xBE, 0xEF, /* Keep alive */ }; /* clang-format on */ aws_byte_buf_write(&fixture->buffer, header, sizeof(header)); /* client identifier */ aws_byte_buf_write_be16(&fixture->buffer, CLIENT_ID_LEN); aws_byte_buf_write(&fixture->buffer, s_client_id, CLIENT_ID_LEN); /* username */ aws_byte_buf_write_be16(&fixture->buffer, USERNAME_LEN); aws_byte_buf_write(&fixture->buffer, s_username, USERNAME_LEN); /* password */ aws_byte_buf_write_be16(&fixture->buffer, PASSWORD_LEN); aws_byte_buf_write(&fixture->buffer, s_password, PASSWORD_LEN); return AWS_OP_SUCCESS; } PACKET_TEST_NAME(CONNECT, connect_password, connect, &s_test_connect_password_init, NULL, &s_test_connect_eq) static int s_test_connect_all_init(struct packet_test_fixture *fixture) { /* Init packet */ ASSERT_SUCCESS(aws_mqtt_packet_connect_init( fixture->in_packet, aws_byte_cursor_from_array(s_client_id, CLIENT_ID_LEN), false, 0)); ASSERT_SUCCESS(aws_mqtt_packet_connect_add_will( fixture->in_packet, aws_byte_cursor_from_array(s_topic_name, TOPIC_NAME_LEN), AWS_MQTT_QOS_EXACTLY_ONCE, true /*retain*/, aws_byte_cursor_from_array(s_payload, PAYLOAD_LEN))); ASSERT_SUCCESS(aws_mqtt_packet_connect_add_credentials( fixture->in_packet, aws_byte_cursor_from_array(s_username, USERNAME_LEN), aws_byte_cursor_from_array(s_password, PASSWORD_LEN))); /* Init buffer */ /* clang-format off */ uint8_t header[] = { AWS_MQTT_PACKET_CONNECT << 4, /* Packet type */ 10 + (2 + CLIENT_ID_LEN) + (2 + TOPIC_NAME_LEN) + (2 + PAYLOAD_LEN) + (2 + USERNAME_LEN) + (2 + PASSWORD_LEN), /* Remaining length */ 0, 4, 'M', 'Q', 'T', 'T', /* Protocol name */ 4, /* Protocol level */ /* Connect Flags: */ (1 << 2) /* Will flag, bit 2 */ | (AWS_MQTT_QOS_EXACTLY_ONCE << 3)/* Will QoS, bits 4-3 */ | (1 << 5) /* Will Retain, bit 5 */ | (1 << 7) | (1 << 6), /* username bit 7, password bit 6 */ 0, 0, /* Keep alive */ }; /* clang-format on */ aws_byte_buf_write(&fixture->buffer, header, sizeof(header)); /* client identifier */ aws_byte_buf_write_be16(&fixture->buffer, CLIENT_ID_LEN); aws_byte_buf_write(&fixture->buffer, s_client_id, CLIENT_ID_LEN); /* will topic */ aws_byte_buf_write_be16(&fixture->buffer, TOPIC_NAME_LEN); aws_byte_buf_write(&fixture->buffer, s_topic_name, TOPIC_NAME_LEN); /* will payload */ aws_byte_buf_write_be16(&fixture->buffer, PAYLOAD_LEN); aws_byte_buf_write(&fixture->buffer, s_payload, PAYLOAD_LEN); /* username */ aws_byte_buf_write_be16(&fixture->buffer, USERNAME_LEN); aws_byte_buf_write(&fixture->buffer, s_username, USERNAME_LEN); /* password */ aws_byte_buf_write_be16(&fixture->buffer, PASSWORD_LEN); aws_byte_buf_write(&fixture->buffer, s_password, PASSWORD_LEN); return AWS_OP_SUCCESS; } PACKET_TEST_NAME(CONNECT, connect_all, connect, &s_test_connect_all_init, NULL, &s_test_connect_eq) /*****************************************************************************/ /* Connack */ static int s_test_connack_init(struct packet_test_fixture *fixture) { /* Init packet */ ASSERT_SUCCESS(aws_mqtt_packet_connack_init(fixture->in_packet, true, AWS_MQTT_CONNECT_ACCEPTED)); /* Init buffer */ /* clang-format off */ uint8_t header[] = { AWS_MQTT_PACKET_CONNACK << 4, /* Packet type */ 2, /* Remaining length */ 1, /* Acknowledge flags */ AWS_MQTT_CONNECT_ACCEPTED, /* Return code */ }; /* clang-format on */ aws_byte_buf_write(&fixture->buffer, header, sizeof(header)); return AWS_OP_SUCCESS; } PACKET_TEST(CONNACK, connack, &s_test_connack_init, NULL, NULL) /*****************************************************************************/ /* Publish */ static int s_test_publish_qos0_dup_init(struct packet_test_fixture *fixture) { /* Init packet */ ASSERT_SUCCESS(aws_mqtt_packet_publish_init( fixture->in_packet, false /* retain */, AWS_MQTT_QOS_AT_MOST_ONCE, true /* dup */, aws_byte_cursor_from_array(s_topic_name, TOPIC_NAME_LEN), 0, aws_byte_cursor_from_array(s_payload, PAYLOAD_LEN))); /* Init buffer */ /* clang-format off */ aws_byte_buf_write_u8( &fixture->buffer, (AWS_MQTT_PACKET_PUBLISH << 4) /* Packet type bits 7-4 */ | (1 << 3) /* DUP bit 3 */ | (AWS_MQTT_QOS_AT_MOST_ONCE << 1) /* QoS bits 2-1 */ | 0 /* RETAIN bit 0 */); aws_byte_buf_write_u8( &fixture->buffer, 2 + TOPIC_NAME_LEN + PAYLOAD_LEN); /* Remaining length */ aws_byte_buf_write_u8( &fixture->buffer, 0); /* Topic name len byte 1 */ aws_byte_buf_write_u8( &fixture->buffer, TOPIC_NAME_LEN); /* Topic name len byte 2 */ aws_byte_buf_write( &fixture->buffer, s_topic_name, TOPIC_NAME_LEN); /* Topic name */ aws_byte_buf_write( &fixture->buffer, s_payload, PAYLOAD_LEN); /* payload */ /* clang-format on */ return AWS_OP_SUCCESS; } static int s_test_publish_qos2_retain_init(struct packet_test_fixture *fixture) { /* Init packet */ ASSERT_SUCCESS(aws_mqtt_packet_publish_init( fixture->in_packet, true /* retain */, AWS_MQTT_QOS_EXACTLY_ONCE, false /* dup */, aws_byte_cursor_from_array(s_topic_name, TOPIC_NAME_LEN), 7, aws_byte_cursor_from_array(s_payload, PAYLOAD_LEN))); /* Init buffer */ /* clang-format off */ aws_byte_buf_write_u8( &fixture->buffer, (AWS_MQTT_PACKET_PUBLISH << 4) /* Packet type bits 7-4 */ | (0 << 3) /* DUP bit 3 */ | (AWS_MQTT_QOS_EXACTLY_ONCE << 1) /* QoS bits 2-1 */ | 1 /* RETAIN bit 0 */); aws_byte_buf_write_u8( &fixture->buffer, 4 + TOPIC_NAME_LEN + PAYLOAD_LEN); /* Remaining length */ aws_byte_buf_write_u8( &fixture->buffer, 0); /* Topic name len byte 1 */ aws_byte_buf_write_u8( &fixture->buffer, TOPIC_NAME_LEN); /* Topic name len byte 2 */ aws_byte_buf_write( &fixture->buffer, s_topic_name, TOPIC_NAME_LEN); /* Topic name */ aws_byte_buf_write_u8( &fixture->buffer, 0); /* Packet id byte 1 */ aws_byte_buf_write_u8( &fixture->buffer, 7); /* Packet id byte 2 */ aws_byte_buf_write( &fixture->buffer, s_payload, PAYLOAD_LEN); /* payload */ /* clang-format on */ return AWS_OP_SUCCESS; } static int s_test_publish_empty_payload_init(struct packet_test_fixture *fixture) { /* Init packet */ ASSERT_SUCCESS(aws_mqtt_packet_publish_init( fixture->in_packet, false /* retain */, AWS_MQTT_QOS_AT_MOST_ONCE, true /* dup */, aws_byte_cursor_from_array(s_topic_name, TOPIC_NAME_LEN), 0, aws_byte_cursor_from_array(s_empty_payload, EMPTY_PAYLOAD_LEN))); /* Init buffer */ /* clang-format off */ aws_byte_buf_write_u8( &fixture->buffer, (AWS_MQTT_PACKET_PUBLISH << 4) /* Packet type bits 7-4 */ | (1 << 3) /* DUP bit 3 */ | (AWS_MQTT_QOS_AT_MOST_ONCE << 1) /* QoS bits 2-1 */ | 0 /* RETAIN bit 0 */); aws_byte_buf_write_u8( &fixture->buffer, 2 + TOPIC_NAME_LEN + EMPTY_PAYLOAD_LEN); /* Remaining length */ aws_byte_buf_write_u8( &fixture->buffer, 0); /* Topic name len byte 1 */ aws_byte_buf_write_u8( &fixture->buffer, TOPIC_NAME_LEN); /* Topic name len byte 2 */ aws_byte_buf_write( &fixture->buffer, s_topic_name, TOPIC_NAME_LEN); /* Topic name */ aws_byte_buf_write( &fixture->buffer, s_empty_payload, EMPTY_PAYLOAD_LEN); /* payload */ /* clang-format on */ return AWS_OP_SUCCESS; } static bool s_test_publish_eq(void *a, void *b, size_t size) { (void)size; struct aws_mqtt_packet_publish *l = a; struct aws_mqtt_packet_publish *r = b; return s_fixed_header_eq(&l->fixed_header, &r->fixed_header) && l->packet_identifier == r->packet_identifier && aws_byte_cursor_eq(&l->topic_name, &r->topic_name) && aws_byte_cursor_eq(&l->payload, &r->payload); } PACKET_TEST_NAME(PUBLISH, publish_qos0_dup, publish, &s_test_publish_qos0_dup_init, NULL, &s_test_publish_eq) PACKET_TEST_NAME(PUBLISH, publish_qos2_retain, publish, &s_test_publish_qos2_retain_init, NULL, &s_test_publish_eq) PACKET_TEST_NAME(PUBLISH, publish_empty_payload, publish, &s_test_publish_empty_payload_init, NULL, &s_test_publish_eq) /*****************************************************************************/ /* Subscribe */ static int s_test_subscribe_init(struct packet_test_fixture *fixture) { /* Init packets */ ASSERT_SUCCESS(aws_mqtt_packet_subscribe_init(fixture->in_packet, fixture->allocator, 7)); ASSERT_SUCCESS(aws_mqtt_packet_subscribe_init(fixture->out_packet, fixture->allocator, 0)); ASSERT_SUCCESS(aws_mqtt_packet_subscribe_add_topic( fixture->in_packet, aws_byte_cursor_from_array(s_topic_name, TOPIC_NAME_LEN), AWS_MQTT_QOS_EXACTLY_ONCE)); /* Init buffer */ /* clang-format off */ aws_byte_buf_write_u8( &fixture->buffer, (AWS_MQTT_PACKET_SUBSCRIBE << 4) | 0x2); /* Packet type & flags */ aws_byte_buf_write_u8( &fixture->buffer, 4 + TOPIC_NAME_LEN + 1); /* Remaining length */ aws_byte_buf_write_u8( &fixture->buffer, 0); aws_byte_buf_write_u8( &fixture->buffer, 7); aws_byte_buf_write_u8( &fixture->buffer, 0); aws_byte_buf_write_u8( &fixture->buffer, TOPIC_NAME_LEN); aws_byte_buf_write( &fixture->buffer, s_topic_name, TOPIC_NAME_LEN); aws_byte_buf_write_u8( &fixture->buffer, AWS_MQTT_QOS_EXACTLY_ONCE); /* clang-format on */ return AWS_OP_SUCCESS; } static int s_test_subscribe_clean_up(struct packet_test_fixture *fixture) { aws_mqtt_packet_subscribe_clean_up(fixture->in_packet); aws_mqtt_packet_subscribe_clean_up(fixture->out_packet); return AWS_OP_SUCCESS; } static bool s_test_subscribe_eq(void *a, void *b, size_t size) { (void)size; struct aws_mqtt_packet_subscribe *l = a; struct aws_mqtt_packet_subscribe *r = b; if (!s_fixed_header_eq(&l->fixed_header, &r->fixed_header) || l->packet_identifier != r->packet_identifier) { return false; } const size_t length = aws_array_list_length(&l->topic_filters); if (length != aws_array_list_length(&r->topic_filters)) { return false; } for (size_t i = 0; i < length; ++i) { struct aws_mqtt_subscription *lt = NULL; aws_array_list_get_at_ptr(&l->topic_filters, (void **)<, i); struct aws_mqtt_subscription *rt = NULL; aws_array_list_get_at_ptr(&r->topic_filters, (void **)&rt, i); AWS_ASSUME(lt && rt); if (lt->qos != rt->qos) { return false; } if (!aws_byte_cursor_eq(<->topic_filter, &rt->topic_filter)) { return false; } } return true; } PACKET_TEST(SUBSCRIBE, subscribe, &s_test_subscribe_init, &s_test_subscribe_clean_up, &s_test_subscribe_eq) /*****************************************************************************/ /* Suback */ static int s_test_suback_init(struct packet_test_fixture *fixture) { /* Init packets */ ASSERT_SUCCESS(aws_mqtt_packet_suback_init(fixture->in_packet, fixture->allocator, 7)); ASSERT_SUCCESS(aws_mqtt_packet_suback_init(fixture->out_packet, fixture->allocator, 0)); ASSERT_SUCCESS(aws_mqtt_packet_suback_add_return_code(fixture->in_packet, AWS_MQTT_QOS_EXACTLY_ONCE)); ASSERT_SUCCESS(aws_mqtt_packet_suback_add_return_code(fixture->in_packet, AWS_MQTT_QOS_FAILURE)); /* Init buffer */ /* clang-format off */ aws_byte_buf_write_u8( &fixture->buffer, (AWS_MQTT_PACKET_SUBACK << 4) | 0x0); /* Packet type & flags */ aws_byte_buf_write_u8( &fixture->buffer, 2/* variable header */ + 2/* payload */); /* Remaining length */ aws_byte_buf_write_u8( &fixture->buffer, 0); aws_byte_buf_write_u8( &fixture->buffer, 7); aws_byte_buf_write_u8( &fixture->buffer, AWS_MQTT_QOS_EXACTLY_ONCE); /* Payload */ aws_byte_buf_write_u8( &fixture->buffer, AWS_MQTT_QOS_FAILURE); /* Payload */ /* clang-format on */ return AWS_OP_SUCCESS; } static int s_test_suback_clean_up(struct packet_test_fixture *fixture) { aws_mqtt_packet_suback_clean_up(fixture->in_packet); aws_mqtt_packet_suback_clean_up(fixture->out_packet); return AWS_OP_SUCCESS; } static bool s_test_suback_eq(void *a, void *b, size_t size) { (void)size; struct aws_mqtt_packet_suback *l = a; struct aws_mqtt_packet_suback *r = b; if (!s_fixed_header_eq(&l->fixed_header, &r->fixed_header) || l->packet_identifier != r->packet_identifier) { return false; } const size_t length = aws_array_list_length(&l->return_codes); if (length != aws_array_list_length(&r->return_codes)) { return false; } for (size_t i = 0; i < length; ++i) { uint8_t lt = 0; aws_array_list_get_at(&l->return_codes, (void *)<, i); uint8_t rt = 0; aws_array_list_get_at(&r->return_codes, (void *)&rt, i); AWS_ASSUME(lt && rt); } return true; } PACKET_TEST(SUBACK, suback, &s_test_suback_init, &s_test_suback_clean_up, &s_test_suback_eq) /*****************************************************************************/ /* Unsubscribe */ static int s_test_unsubscribe_init(struct packet_test_fixture *fixture) { /* Init packet */ ASSERT_SUCCESS(aws_mqtt_packet_unsubscribe_init(fixture->in_packet, fixture->allocator, 7)); ASSERT_SUCCESS(aws_mqtt_packet_unsubscribe_init(fixture->out_packet, fixture->allocator, 0)); ASSERT_SUCCESS(aws_mqtt_packet_unsubscribe_add_topic( fixture->in_packet, aws_byte_cursor_from_array(s_topic_name, TOPIC_NAME_LEN))); /* Init buffer */ /* clang-format off */ aws_byte_buf_write_u8( &fixture->buffer, (AWS_MQTT_PACKET_UNSUBSCRIBE << 4) | 0x2); /* Packet type & flags */ aws_byte_buf_write_u8( &fixture->buffer, 4 + TOPIC_NAME_LEN); /* Remaining length */ aws_byte_buf_write_u8( &fixture->buffer, 0); aws_byte_buf_write_u8( &fixture->buffer, 7); aws_byte_buf_write_u8( &fixture->buffer, 0); aws_byte_buf_write_u8( &fixture->buffer, TOPIC_NAME_LEN); aws_byte_buf_write( &fixture->buffer, s_topic_name, TOPIC_NAME_LEN); /* clang-format on */ return AWS_OP_SUCCESS; } static int s_test_unsubscribe_clean_up(struct packet_test_fixture *fixture) { aws_mqtt_packet_unsubscribe_clean_up(fixture->in_packet); aws_mqtt_packet_unsubscribe_clean_up(fixture->out_packet); return AWS_OP_SUCCESS; } static bool s_test_unsubscribe_eq(void *a, void *b, size_t size) { (void)size; struct aws_mqtt_packet_unsubscribe *l = a; struct aws_mqtt_packet_unsubscribe *r = b; if (!s_fixed_header_eq(&l->fixed_header, &r->fixed_header) || l->packet_identifier != r->packet_identifier) { return false; } const size_t length = aws_array_list_length(&l->topic_filters); if (length != aws_array_list_length(&r->topic_filters)) { return false; } for (size_t i = 0; i < length; ++i) { struct aws_byte_cursor *lt = NULL; aws_array_list_get_at_ptr(&l->topic_filters, (void **)<, i); struct aws_byte_cursor *rt = NULL; aws_array_list_get_at_ptr(&r->topic_filters, (void **)&rt, i); AWS_ASSUME(lt && rt); if (!aws_byte_cursor_eq(lt, rt)) { return false; } } return true; } PACKET_TEST(UNSUBSCRIBE, unsubscribe, &s_test_unsubscribe_init, &s_test_unsubscribe_clean_up, &s_test_unsubscribe_eq) /*****************************************************************************/ /* Connection */ static int s_test_connection_init(struct packet_test_fixture *fixture) { /* Init packet */ switch (fixture->type) { case AWS_MQTT_PACKET_PINGREQ: ASSERT_SUCCESS(aws_mqtt_packet_pingreq_init(fixture->in_packet)); break; case AWS_MQTT_PACKET_PINGRESP: ASSERT_SUCCESS(aws_mqtt_packet_pingresp_init(fixture->in_packet)); break; case AWS_MQTT_PACKET_DISCONNECT: ASSERT_SUCCESS(aws_mqtt_packet_disconnect_init(fixture->in_packet)); break; default: AWS_FATAL_ASSERT(false); break; } /* Init buffer */ /* clang-format off */ uint8_t header[] = { (uint8_t)(fixture->type << 4), /* Packet type */ 0, /* Remaining length */ }; /* clang-format on */ aws_byte_buf_write(&fixture->buffer, header, sizeof(header)); return AWS_OP_SUCCESS; } #define PACKET_TEST_CONNETION(e_type, name) \ PACKET_TEST_NAME(e_type, name, connection, &s_test_connection_init, NULL, NULL) PACKET_TEST_CONNETION(PINGREQ, pingreq) PACKET_TEST_CONNETION(PINGRESP, pingresp) PACKET_TEST_CONNETION(DISCONNECT, disconnect) #undef PACKET_TEST_CONNETION static int s_mqtt_packet_connack_decode_failure_reserved_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_buf encoded_packet; aws_byte_buf_init(&encoded_packet, allocator, 1024); struct aws_mqtt_packet_connack connack; ASSERT_SUCCESS(aws_mqtt_packet_connack_init(&connack, true, AWS_MQTT_CONNECT_SERVER_UNAVAILABLE)); ASSERT_SUCCESS(aws_mqtt_packet_connack_encode(&encoded_packet, &connack)); struct aws_byte_cursor decode_cursor = aws_byte_cursor_from_buf(&encoded_packet); struct aws_mqtt_packet_connack decoded_connack; ASSERT_SUCCESS(aws_mqtt_packet_connack_decode(&decode_cursor, &decoded_connack)); /* mess up the fixed header reserved bits */ encoded_packet.buffer[0] |= 0x01; decode_cursor = aws_byte_cursor_from_buf(&encoded_packet); ASSERT_FAILS(aws_mqtt_packet_connack_decode(&decode_cursor, &decoded_connack)); aws_byte_buf_clean_up(&encoded_packet); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt_packet_connack_decode_failure_reserved, s_mqtt_packet_connack_decode_failure_reserved_fn) static int s_mqtt_packet_ack_decode_failure_reserved_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_buf encoded_packet; aws_byte_buf_init(&encoded_packet, allocator, 1024); struct aws_mqtt_packet_ack puback; ASSERT_SUCCESS(aws_mqtt_packet_puback_init(&puback, 5)); ASSERT_SUCCESS(aws_mqtt_packet_ack_encode(&encoded_packet, &puback)); struct aws_byte_cursor decode_cursor = aws_byte_cursor_from_buf(&encoded_packet); struct aws_mqtt_packet_ack decoded_ack; ASSERT_SUCCESS(aws_mqtt_packet_ack_decode(&decode_cursor, &decoded_ack)); /* mess up the fixed header reserved bits */ encoded_packet.buffer[0] |= 0x0F; decode_cursor = aws_byte_cursor_from_buf(&encoded_packet); ASSERT_FAILS(aws_mqtt_packet_ack_decode(&decode_cursor, &decoded_ack)); aws_byte_buf_clean_up(&encoded_packet); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt_packet_ack_decode_failure_reserved, s_mqtt_packet_ack_decode_failure_reserved_fn) static int s_mqtt_packet_pingresp_decode_failure_reserved_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_buf encoded_packet; aws_byte_buf_init(&encoded_packet, allocator, 1024); struct aws_mqtt_packet_connection pingresp; ASSERT_SUCCESS(aws_mqtt_packet_pingresp_init(&pingresp)); ASSERT_SUCCESS(aws_mqtt_packet_connection_encode(&encoded_packet, &pingresp)); struct aws_byte_cursor decode_cursor = aws_byte_cursor_from_buf(&encoded_packet); struct aws_mqtt_packet_connection decoded_pingresp; ASSERT_SUCCESS(aws_mqtt_packet_connection_decode(&decode_cursor, &decoded_pingresp)); /* mess up the fixed header reserved bits */ encoded_packet.buffer[0] |= 0x08; decode_cursor = aws_byte_cursor_from_buf(&encoded_packet); ASSERT_FAILS(aws_mqtt_packet_connection_decode(&decode_cursor, &decoded_pingresp)); aws_byte_buf_clean_up(&encoded_packet); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt_packet_pingresp_decode_failure_reserved, s_mqtt_packet_pingresp_decode_failure_reserved_fn) #ifdef _MSC_VER # pragma warning(pop) #endif aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/tests/v3/packet_framing_tests.c000066400000000000000000000603311456575232400262520ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include struct mqtt_311_decoding_test_context { struct aws_allocator *allocator; struct aws_mqtt311_decoder decoder; void *expected_packet; size_t packet_count[16]; }; static int s_compare_fixed_header( struct aws_mqtt_fixed_header *expected_header, struct aws_mqtt_fixed_header *actual_header) { ASSERT_INT_EQUALS(expected_header->packet_type, actual_header->packet_type); ASSERT_INT_EQUALS(expected_header->remaining_length, actual_header->remaining_length); ASSERT_INT_EQUALS(expected_header->flags, actual_header->flags); return AWS_OP_SUCCESS; } static int s_decoding_test_handle_publish(struct aws_byte_cursor message_cursor, void *user_data) { struct mqtt_311_decoding_test_context *context = user_data; (void)context; struct aws_mqtt_packet_publish publish; if (aws_mqtt_packet_publish_decode(&message_cursor, &publish)) { return AWS_OP_ERR; } struct aws_mqtt_packet_publish *expected_publish = context->expected_packet; ASSERT_SUCCESS(s_compare_fixed_header(&expected_publish->fixed_header, &publish.fixed_header)); ASSERT_INT_EQUALS(expected_publish->packet_identifier, publish.packet_identifier); ASSERT_BIN_ARRAYS_EQUALS( expected_publish->topic_name.ptr, expected_publish->topic_name.len, publish.topic_name.ptr, publish.topic_name.len); ASSERT_BIN_ARRAYS_EQUALS( expected_publish->payload.ptr, expected_publish->payload.len, publish.payload.ptr, publish.payload.len); ++context->packet_count[AWS_MQTT_PACKET_PUBLISH]; return AWS_OP_SUCCESS; } static int s_decoding_test_handle_suback(struct aws_byte_cursor message_cursor, void *user_data) { struct mqtt_311_decoding_test_context *context = user_data; (void)context; struct aws_mqtt_packet_suback suback; if (aws_mqtt_packet_suback_init(&suback, context->allocator, 0 /* fake packet_id */)) { return AWS_OP_ERR; } int result = AWS_OP_ERR; if (aws_mqtt_packet_suback_decode(&message_cursor, &suback)) { goto done; } struct aws_mqtt_packet_suback *expected_suback = context->expected_packet; ASSERT_INT_EQUALS(expected_suback->packet_identifier, suback.packet_identifier); size_t expected_ack_count = aws_array_list_length(&expected_suback->return_codes); size_t actual_ack_count = aws_array_list_length(&suback.return_codes); ASSERT_INT_EQUALS(expected_ack_count, actual_ack_count); for (size_t i = 0; i < expected_ack_count; ++i) { uint8_t expected_return_code = 0; aws_array_list_get_at(&expected_suback->return_codes, &expected_return_code, i); uint8_t actual_return_code = 0; aws_array_list_get_at(&suback.return_codes, &actual_return_code, i); ASSERT_INT_EQUALS(expected_return_code, actual_return_code); } ++context->packet_count[AWS_MQTT_PACKET_SUBACK]; result = AWS_OP_SUCCESS; done: aws_mqtt_packet_suback_clean_up(&suback); return result; } static int s_decoding_test_handle_unsuback(struct aws_byte_cursor message_cursor, void *user_data) { struct mqtt_311_decoding_test_context *context = user_data; (void)context; struct aws_mqtt_packet_ack unsuback; if (aws_mqtt_packet_unsuback_init(&unsuback, 0 /* fake packet_id */)) { return AWS_OP_ERR; } if (aws_mqtt_packet_ack_decode(&message_cursor, &unsuback)) { return AWS_OP_ERR; } struct aws_mqtt_packet_ack *expected_unsuback = context->expected_packet; ASSERT_INT_EQUALS(expected_unsuback->packet_identifier, unsuback.packet_identifier); ++context->packet_count[AWS_MQTT_PACKET_UNSUBACK]; return AWS_OP_SUCCESS; } static int s_decoding_test_handle_puback(struct aws_byte_cursor message_cursor, void *user_data) { struct mqtt_311_decoding_test_context *context = user_data; (void)context; struct aws_mqtt_packet_ack puback; if (aws_mqtt_packet_puback_init(&puback, 0 /* fake packet_id */)) { return AWS_OP_ERR; } if (aws_mqtt_packet_ack_decode(&message_cursor, &puback)) { return AWS_OP_ERR; } struct aws_mqtt_packet_ack *expected_puback = context->expected_packet; ASSERT_INT_EQUALS(expected_puback->packet_identifier, puback.packet_identifier); ++context->packet_count[AWS_MQTT_PACKET_PUBACK]; return AWS_OP_SUCCESS; } static int s_decoding_test_handle_pingresp(struct aws_byte_cursor message_cursor, void *user_data) { struct mqtt_311_decoding_test_context *context = user_data; (void)context; struct aws_mqtt_packet_connection pingresp; if (aws_mqtt_packet_pingresp_init(&pingresp)) { return AWS_OP_ERR; } if (aws_mqtt_packet_connection_decode(&message_cursor, &pingresp)) { return AWS_OP_ERR; } ++context->packet_count[AWS_MQTT_PACKET_PINGRESP]; return AWS_OP_SUCCESS; } static int s_decoding_test_handle_connack(struct aws_byte_cursor message_cursor, void *user_data) { struct mqtt_311_decoding_test_context *context = user_data; (void)context; struct aws_mqtt_packet_connack connack; if (aws_mqtt_packet_connack_init(&connack, false, 0)) { return AWS_OP_ERR; } if (aws_mqtt_packet_connack_decode(&message_cursor, &connack)) { return AWS_OP_ERR; } struct aws_mqtt_packet_connack *expected_connack = context->expected_packet; ASSERT_INT_EQUALS(expected_connack->session_present, connack.session_present); ASSERT_INT_EQUALS(expected_connack->connect_return_code, connack.connect_return_code); ++context->packet_count[AWS_MQTT_PACKET_CONNACK]; return AWS_OP_SUCCESS; } static struct aws_mqtt_client_connection_packet_handlers s_decoding_test_packet_handlers = { .handlers_by_packet_type = { [AWS_MQTT_PACKET_PUBLISH] = &s_decoding_test_handle_publish, [AWS_MQTT_PACKET_SUBACK] = &s_decoding_test_handle_suback, [AWS_MQTT_PACKET_UNSUBACK] = &s_decoding_test_handle_unsuback, [AWS_MQTT_PACKET_PUBACK] = &s_decoding_test_handle_puback, [AWS_MQTT_PACKET_PINGRESP] = &s_decoding_test_handle_pingresp, [AWS_MQTT_PACKET_CONNACK] = &s_decoding_test_handle_connack, }}; static void s_init_decoding_test_context( struct mqtt_311_decoding_test_context *context, struct aws_allocator *allocator) { AWS_ZERO_STRUCT(*context); context->allocator = allocator; struct aws_mqtt311_decoder_options config = { .packet_handlers = &s_decoding_test_packet_handlers, .handler_user_data = context, }; aws_mqtt311_decoder_init(&context->decoder, allocator, &config); } static void s_clean_up_decoding_test_context(struct mqtt_311_decoding_test_context *context) { aws_mqtt311_decoder_clean_up(&context->decoder); } #define TEST_ADJACENT_PACKET_COUNT 4 static int s_mqtt_frame_and_decode_publish_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); /* * For completeness, run the test with payload sizes that lead to a remaining length VLI encoding of 1, 2, 3, and * 4 bytes. */ size_t payload_sizes[] = {35, 1234, 1 << 16, 1 << 21}; for (size_t i = 0; i < AWS_ARRAY_SIZE(payload_sizes); ++i) { struct mqtt_311_decoding_test_context test_context; s_init_decoding_test_context(&test_context, allocator); struct aws_mqtt311_decoder *decoder = &test_context.decoder; size_t publish_payload_size = payload_sizes[i]; /* Intentionally don't initialize so we have lots of garbage */ uint8_t *raw_payload = aws_mem_acquire(allocator, publish_payload_size); struct aws_mqtt_packet_publish publish_packet; ASSERT_SUCCESS(aws_mqtt_packet_publish_init( &publish_packet, true, AWS_MQTT_QOS_AT_LEAST_ONCE, false, aws_byte_cursor_from_c_str("Hello/World"), 12, aws_byte_cursor_from_array(raw_payload, publish_payload_size))); test_context.expected_packet = &publish_packet; struct aws_byte_buf encoded_buffer; aws_byte_buf_init(&encoded_buffer, allocator, (publish_payload_size + 100) * TEST_ADJACENT_PACKET_COUNT); for (size_t j = 0; j < TEST_ADJACENT_PACKET_COUNT; ++j) { ASSERT_SUCCESS(aws_mqtt_packet_publish_encode(&encoded_buffer, &publish_packet)); } size_t fragment_lengths[] = {1, 2, 3, 5, 7, 11, 23, 37, 67, 131}; for (size_t j = 0; j < AWS_ARRAY_SIZE(fragment_lengths); ++j) { size_t fragment_length = fragment_lengths[j]; struct aws_byte_cursor packet_cursor = aws_byte_cursor_from_buf(&encoded_buffer); while (packet_cursor.len > 0) { size_t advance = aws_min_size(packet_cursor.len, fragment_length); struct aws_byte_cursor fragment_cursor = aws_byte_cursor_advance(&packet_cursor, advance); ASSERT_SUCCESS(aws_mqtt311_decoder_on_bytes_received(decoder, fragment_cursor)); } } ASSERT_INT_EQUALS(4 * AWS_ARRAY_SIZE(fragment_lengths), test_context.packet_count[AWS_MQTT_PACKET_PUBLISH]); aws_byte_buf_clean_up(&encoded_buffer); aws_mem_release(allocator, raw_payload); s_clean_up_decoding_test_context(&test_context); } aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt_frame_and_decode_publish, s_mqtt_frame_and_decode_publish_fn) static int s_mqtt_frame_and_decode_suback_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt_311_decoding_test_context test_context; s_init_decoding_test_context(&test_context, allocator); struct aws_mqtt311_decoder *decoder = &test_context.decoder; struct aws_mqtt_packet_suback suback_packet; ASSERT_SUCCESS(aws_mqtt_packet_suback_init(&suback_packet, allocator, 1234)); uint8_t sample_return_codes[] = {0x00, 0x01, 0x02, 0x80, 0x01}; for (size_t i = 0; i < AWS_ARRAY_SIZE(sample_return_codes); ++i) { aws_mqtt_packet_suback_add_return_code(&suback_packet, sample_return_codes[i]); } test_context.expected_packet = &suback_packet; struct aws_byte_buf encoded_buffer; aws_byte_buf_init(&encoded_buffer, allocator, 100 * TEST_ADJACENT_PACKET_COUNT); for (size_t j = 0; j < TEST_ADJACENT_PACKET_COUNT; ++j) { ASSERT_SUCCESS(aws_mqtt_packet_suback_encode(&encoded_buffer, &suback_packet)); } size_t fragment_lengths[] = {1, 2, 3, 5, 7, 11, 23, 37, 67, 143}; for (size_t j = 0; j < AWS_ARRAY_SIZE(fragment_lengths); ++j) { size_t fragment_length = fragment_lengths[j]; struct aws_byte_cursor packet_cursor = aws_byte_cursor_from_buf(&encoded_buffer); while (packet_cursor.len > 0) { size_t advance = aws_min_size(packet_cursor.len, fragment_length); struct aws_byte_cursor fragment_cursor = aws_byte_cursor_advance(&packet_cursor, advance); ASSERT_SUCCESS(aws_mqtt311_decoder_on_bytes_received(decoder, fragment_cursor)); } } ASSERT_INT_EQUALS(4 * AWS_ARRAY_SIZE(fragment_lengths), test_context.packet_count[AWS_MQTT_PACKET_SUBACK]); aws_mqtt_packet_suback_clean_up(&suback_packet); aws_byte_buf_clean_up(&encoded_buffer); s_clean_up_decoding_test_context(&test_context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt_frame_and_decode_suback, s_mqtt_frame_and_decode_suback_fn) static int s_mqtt_frame_and_decode_unsuback_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt_311_decoding_test_context test_context; s_init_decoding_test_context(&test_context, allocator); struct aws_mqtt311_decoder *decoder = &test_context.decoder; struct aws_mqtt_packet_ack unsuback_packet; ASSERT_SUCCESS(aws_mqtt_packet_unsuback_init(&unsuback_packet, 1234)); test_context.expected_packet = &unsuback_packet; struct aws_byte_buf encoded_buffer; aws_byte_buf_init(&encoded_buffer, allocator, 100 * TEST_ADJACENT_PACKET_COUNT); for (size_t j = 0; j < TEST_ADJACENT_PACKET_COUNT; ++j) { ASSERT_SUCCESS(aws_mqtt_packet_ack_encode(&encoded_buffer, &unsuback_packet)); } size_t fragment_lengths[] = {1, 2, 3, 5, 7, 11, 23}; for (size_t j = 0; j < AWS_ARRAY_SIZE(fragment_lengths); ++j) { size_t fragment_length = fragment_lengths[j]; struct aws_byte_cursor packet_cursor = aws_byte_cursor_from_buf(&encoded_buffer); while (packet_cursor.len > 0) { size_t advance = aws_min_size(packet_cursor.len, fragment_length); struct aws_byte_cursor fragment_cursor = aws_byte_cursor_advance(&packet_cursor, advance); ASSERT_SUCCESS(aws_mqtt311_decoder_on_bytes_received(decoder, fragment_cursor)); } } ASSERT_INT_EQUALS(4 * AWS_ARRAY_SIZE(fragment_lengths), test_context.packet_count[AWS_MQTT_PACKET_UNSUBACK]); aws_byte_buf_clean_up(&encoded_buffer); s_clean_up_decoding_test_context(&test_context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt_frame_and_decode_unsuback, s_mqtt_frame_and_decode_unsuback_fn) static int s_mqtt_frame_and_decode_puback_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt_311_decoding_test_context test_context; s_init_decoding_test_context(&test_context, allocator); struct aws_mqtt311_decoder *decoder = &test_context.decoder; struct aws_mqtt_packet_ack puback_packet; ASSERT_SUCCESS(aws_mqtt_packet_puback_init(&puback_packet, 1234)); test_context.expected_packet = &puback_packet; struct aws_byte_buf encoded_buffer; aws_byte_buf_init(&encoded_buffer, allocator, 100 * TEST_ADJACENT_PACKET_COUNT); for (size_t j = 0; j < TEST_ADJACENT_PACKET_COUNT; ++j) { ASSERT_SUCCESS(aws_mqtt_packet_ack_encode(&encoded_buffer, &puback_packet)); } size_t fragment_lengths[] = {1, 2, 3, 5, 7, 11, 23}; for (size_t j = 0; j < AWS_ARRAY_SIZE(fragment_lengths); ++j) { size_t fragment_length = fragment_lengths[j]; struct aws_byte_cursor packet_cursor = aws_byte_cursor_from_buf(&encoded_buffer); while (packet_cursor.len > 0) { size_t advance = aws_min_size(packet_cursor.len, fragment_length); struct aws_byte_cursor fragment_cursor = aws_byte_cursor_advance(&packet_cursor, advance); ASSERT_SUCCESS(aws_mqtt311_decoder_on_bytes_received(decoder, fragment_cursor)); } } ASSERT_INT_EQUALS(4 * AWS_ARRAY_SIZE(fragment_lengths), test_context.packet_count[AWS_MQTT_PACKET_PUBACK]); aws_byte_buf_clean_up(&encoded_buffer); s_clean_up_decoding_test_context(&test_context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt_frame_and_decode_puback, s_mqtt_frame_and_decode_puback_fn) static int s_mqtt_frame_and_decode_pingresp_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt_311_decoding_test_context test_context; s_init_decoding_test_context(&test_context, allocator); struct aws_mqtt311_decoder *decoder = &test_context.decoder; struct aws_mqtt_packet_connection pingresp_packet; ASSERT_SUCCESS(aws_mqtt_packet_pingresp_init(&pingresp_packet)); test_context.expected_packet = &pingresp_packet; struct aws_byte_buf encoded_buffer; aws_byte_buf_init(&encoded_buffer, allocator, 100 * TEST_ADJACENT_PACKET_COUNT); for (size_t j = 0; j < TEST_ADJACENT_PACKET_COUNT; ++j) { ASSERT_SUCCESS(aws_mqtt_packet_connection_encode(&encoded_buffer, &pingresp_packet)); } size_t fragment_lengths[] = {1, 2, 3, 5, 7, 11}; for (size_t j = 0; j < AWS_ARRAY_SIZE(fragment_lengths); ++j) { size_t fragment_length = fragment_lengths[j]; struct aws_byte_cursor packet_cursor = aws_byte_cursor_from_buf(&encoded_buffer); while (packet_cursor.len > 0) { size_t advance = aws_min_size(packet_cursor.len, fragment_length); struct aws_byte_cursor fragment_cursor = aws_byte_cursor_advance(&packet_cursor, advance); ASSERT_SUCCESS(aws_mqtt311_decoder_on_bytes_received(decoder, fragment_cursor)); } } ASSERT_INT_EQUALS(4 * AWS_ARRAY_SIZE(fragment_lengths), test_context.packet_count[AWS_MQTT_PACKET_PINGRESP]); aws_byte_buf_clean_up(&encoded_buffer); s_clean_up_decoding_test_context(&test_context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt_frame_and_decode_pingresp, s_mqtt_frame_and_decode_pingresp_fn) static int s_mqtt_frame_and_decode_connack_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt_311_decoding_test_context test_context; s_init_decoding_test_context(&test_context, allocator); struct aws_mqtt311_decoder *decoder = &test_context.decoder; struct aws_mqtt_packet_connack connack_packet; ASSERT_SUCCESS(aws_mqtt_packet_connack_init(&connack_packet, true, AWS_MQTT_CONNECT_NOT_AUTHORIZED)); test_context.expected_packet = &connack_packet; struct aws_byte_buf encoded_buffer; aws_byte_buf_init(&encoded_buffer, allocator, 100 * TEST_ADJACENT_PACKET_COUNT); for (size_t j = 0; j < TEST_ADJACENT_PACKET_COUNT; ++j) { ASSERT_SUCCESS(aws_mqtt_packet_connack_encode(&encoded_buffer, &connack_packet)); } size_t fragment_lengths[] = {1, 2, 3, 5, 7, 11, 23}; for (size_t j = 0; j < AWS_ARRAY_SIZE(fragment_lengths); ++j) { size_t fragment_length = fragment_lengths[j]; struct aws_byte_cursor packet_cursor = aws_byte_cursor_from_buf(&encoded_buffer); while (packet_cursor.len > 0) { size_t advance = aws_min_size(packet_cursor.len, fragment_length); struct aws_byte_cursor fragment_cursor = aws_byte_cursor_advance(&packet_cursor, advance); ASSERT_SUCCESS(aws_mqtt311_decoder_on_bytes_received(decoder, fragment_cursor)); } } ASSERT_INT_EQUALS(4 * AWS_ARRAY_SIZE(fragment_lengths), test_context.packet_count[AWS_MQTT_PACKET_CONNACK]); aws_byte_buf_clean_up(&encoded_buffer); s_clean_up_decoding_test_context(&test_context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt_frame_and_decode_connack, s_mqtt_frame_and_decode_connack_fn) static int s_mqtt_frame_and_decode_bad_remaining_length_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt_311_decoding_test_context test_context; s_init_decoding_test_context(&test_context, allocator); struct aws_mqtt311_decoder *decoder = &test_context.decoder; /* QoS 0 Publish "Packet" data where the remaining length vli-encoding is illegal */ uint8_t bad_packet_data[] = {0x30, 0x80, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00}; size_t fragment_lengths[] = {1, 2, 3, 5, 13}; for (size_t j = 0; j < AWS_ARRAY_SIZE(fragment_lengths); ++j) { aws_mqtt311_decoder_reset_for_new_connection(decoder); size_t fragment_length = fragment_lengths[j]; struct aws_byte_cursor packet_cursor = aws_byte_cursor_from_array(bad_packet_data, AWS_ARRAY_SIZE(bad_packet_data)); while (packet_cursor.len > 0) { size_t advance = aws_min_size(packet_cursor.len, fragment_length); struct aws_byte_cursor fragment_cursor = aws_byte_cursor_advance(&packet_cursor, advance); /* If this or a previous call contains the final 0x80 of the invalid vli encoding, then decode must fail */ bool should_fail = (fragment_cursor.ptr + fragment_cursor.len) - bad_packet_data > 4; if (should_fail) { ASSERT_FAILS(aws_mqtt311_decoder_on_bytes_received(decoder, fragment_cursor)); } else { ASSERT_SUCCESS(aws_mqtt311_decoder_on_bytes_received(decoder, fragment_cursor)); } } } s_clean_up_decoding_test_context(&test_context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt_frame_and_decode_bad_remaining_length, s_mqtt_frame_and_decode_bad_remaining_length_fn) static int s_mqtt_frame_and_decode_unsupported_packet_type_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt_311_decoding_test_context test_context; s_init_decoding_test_context(&test_context, allocator); struct aws_mqtt311_decoder *decoder = &test_context.decoder; /* Pingreq packet, no handler installed */ uint8_t pingreq_packet_data[] = {192, 0}; size_t fragment_lengths[] = {1, 2}; for (size_t j = 0; j < AWS_ARRAY_SIZE(fragment_lengths); ++j) { aws_mqtt311_decoder_reset_for_new_connection(decoder); size_t fragment_length = fragment_lengths[j]; struct aws_byte_cursor packet_cursor = aws_byte_cursor_from_array(pingreq_packet_data, AWS_ARRAY_SIZE(pingreq_packet_data)); while (packet_cursor.len > 0) { size_t advance = aws_min_size(packet_cursor.len, fragment_length); struct aws_byte_cursor fragment_cursor = aws_byte_cursor_advance(&packet_cursor, advance); /* If this is the final call, it should fail-but-not-crash as there's no handler */ bool should_fail = packet_cursor.len == 0; if (should_fail) { ASSERT_FAILS(aws_mqtt311_decoder_on_bytes_received(decoder, fragment_cursor)); } else { ASSERT_SUCCESS(aws_mqtt311_decoder_on_bytes_received(decoder, fragment_cursor)); } } } s_clean_up_decoding_test_context(&test_context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt_frame_and_decode_unsupported_packet_type, s_mqtt_frame_and_decode_unsupported_packet_type_fn) static int s_mqtt_frame_and_decode_bad_flags_for_packet_type_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt_311_decoding_test_context test_context; s_init_decoding_test_context(&test_context, allocator); struct aws_mqtt311_decoder *decoder = &test_context.decoder; /* start with a valid suback */ struct aws_mqtt_packet_suback suback_packet; ASSERT_SUCCESS(aws_mqtt_packet_suback_init(&suback_packet, allocator, 1234)); uint8_t sample_return_codes[] = {0x00, 0x01, 0x02, 0x80, 0x01}; for (size_t i = 0; i < AWS_ARRAY_SIZE(sample_return_codes); ++i) { aws_mqtt_packet_suback_add_return_code(&suback_packet, sample_return_codes[i]); } /* encode it */ struct aws_byte_buf encoded_buffer; aws_byte_buf_init(&encoded_buffer, allocator, 100 * TEST_ADJACENT_PACKET_COUNT); ASSERT_SUCCESS(aws_mqtt_packet_suback_encode(&encoded_buffer, &suback_packet)); /* suback flags should be zero; mess that up */ encoded_buffer.buffer[0] |= 0x05; size_t fragment_lengths[] = {1, 2, 3, 5, 7, 11, 23}; for (size_t j = 0; j < AWS_ARRAY_SIZE(fragment_lengths); ++j) { size_t fragment_length = fragment_lengths[j]; aws_mqtt311_decoder_reset_for_new_connection(decoder); struct aws_byte_cursor packet_cursor = aws_byte_cursor_from_buf(&encoded_buffer); while (packet_cursor.len > 0) { size_t advance = aws_min_size(packet_cursor.len, fragment_length); struct aws_byte_cursor fragment_cursor = aws_byte_cursor_advance(&packet_cursor, advance); /* If this is the final call, it should fail-but-not-crash as the full decode should fail */ bool should_fail = packet_cursor.len == 0; if (should_fail) { ASSERT_FAILS(aws_mqtt311_decoder_on_bytes_received(decoder, fragment_cursor)); } else { ASSERT_SUCCESS(aws_mqtt311_decoder_on_bytes_received(decoder, fragment_cursor)); } } } aws_mqtt_packet_suback_clean_up(&suback_packet); aws_byte_buf_clean_up(&encoded_buffer); s_clean_up_decoding_test_context(&test_context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt_frame_and_decode_bad_flags_for_packet_type, s_mqtt_frame_and_decode_bad_flags_for_packet_type_fn) aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/tests/v3/topic_tree_test.c000066400000000000000000000405431456575232400252550ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include static struct aws_byte_cursor s_empty_cursor = { .ptr = NULL, .len = 0, }; static int times_called = 0; static void on_publish( const struct aws_byte_cursor *topic, const struct aws_byte_cursor *payload, bool dup, enum aws_mqtt_qos qos, bool retain, void *user_data) { (void)topic; (void)payload; (void)dup; (void)qos; (void)retain; (void)user_data; times_called++; } static void s_string_clean_up(void *userdata) { struct aws_string *string = userdata; aws_string_destroy(string); } /* Subscribes to multiple topics and returns the number that matched with the pub_topic */ static int s_check_multi_topic_match( struct aws_allocator *allocator, const char **sub_filters, size_t sub_filters_len, const char *pub_topic) { times_called = 0; struct aws_mqtt_topic_tree tree; aws_mqtt_topic_tree_init(&tree, allocator); for (size_t i = 0; i < sub_filters_len; ++i) { struct aws_string *topic_filter = aws_string_new_from_c_str(allocator, sub_filters[i]); aws_mqtt_topic_tree_insert( &tree, topic_filter, AWS_MQTT_QOS_AT_MOST_ONCE, &on_publish, s_string_clean_up, topic_filter); } struct aws_byte_cursor filter_cursor = aws_byte_cursor_from_array(pub_topic, strlen(pub_topic)); struct aws_mqtt_packet_publish publish; aws_mqtt_packet_publish_init(&publish, false, AWS_MQTT_QOS_AT_MOST_ONCE, false, filter_cursor, 1, s_empty_cursor); aws_mqtt_topic_tree_publish(&tree, &publish); aws_mqtt_topic_tree_clean_up(&tree); return times_called; } static bool s_check_topic_match(struct aws_allocator *allocator, const char *sub_filter, const char *pub_topic) { int matches = s_check_multi_topic_match(allocator, &sub_filter, 1, pub_topic); return matches == 1; } AWS_TEST_CASE(mqtt_topic_tree_match, s_mqtt_topic_tree_match_fn) static int s_mqtt_topic_tree_match_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; /* Check single-level filters */ ASSERT_TRUE(s_check_topic_match(allocator, "a", "a")); ASSERT_FALSE(s_check_topic_match(allocator, "b", "B")); /* Check multi-level filters */ ASSERT_TRUE(s_check_topic_match(allocator, "a/b", "a/b")); ASSERT_FALSE(s_check_topic_match(allocator, "a/b", "a/B")); ASSERT_FALSE(s_check_topic_match(allocator, "a/b", "a/b/c")); ASSERT_FALSE(s_check_topic_match(allocator, "a/b/c", "a/b")); ASSERT_TRUE(s_check_topic_match(allocator, "a/b/c", "a/b/c")); ASSERT_FALSE(s_check_topic_match(allocator, "a/b/c", "a/B/c")); /* Check single-level wildcard filters */ ASSERT_TRUE(s_check_topic_match(allocator, "sport/tennis/+", "sport/tennis/player1")); ASSERT_TRUE(s_check_topic_match(allocator, "sport/tennis/+", "sport/tennis/player2")); ASSERT_FALSE(s_check_topic_match(allocator, "sport/tennis/+", "sport/tennis/player1/ranking")); ASSERT_TRUE(s_check_topic_match(allocator, "sport/+", "sport/")); ASSERT_FALSE(s_check_topic_match(allocator, "sport/+", "sport")); ASSERT_TRUE(s_check_topic_match(allocator, "+/+", "/finance")); ASSERT_TRUE(s_check_topic_match(allocator, "/+", "/finance")); ASSERT_FALSE(s_check_topic_match(allocator, "+", "/finance")); ASSERT_TRUE(s_check_topic_match(allocator, "///", "///")); ASSERT_FALSE(s_check_topic_match(allocator, "///", "//")); const char *sub_topics[] = {"a/b/c", "a/+/c", "a/#"}; ASSERT_INT_EQUALS(s_check_multi_topic_match(allocator, sub_topics, AWS_ARRAY_SIZE(sub_topics), "a/b/c"), 3); ASSERT_INT_EQUALS(s_check_multi_topic_match(allocator, sub_topics, AWS_ARRAY_SIZE(sub_topics), "a/Z/c"), 2); ASSERT_INT_EQUALS(s_check_multi_topic_match(allocator, sub_topics, AWS_ARRAY_SIZE(sub_topics), "a/b/Z"), 1); ASSERT_INT_EQUALS(s_check_multi_topic_match(allocator, sub_topics, AWS_ARRAY_SIZE(sub_topics), "Z/b/c"), 0); return AWS_OP_SUCCESS; } static struct aws_byte_cursor s_topic_a_a = { .ptr = (uint8_t *)"a/a", .len = 3, }; static struct aws_byte_cursor s_topic_a_a_a = { .ptr = (uint8_t *)"a/a/a", .len = 5, }; static struct aws_byte_cursor s_topic_a_a_b = { .ptr = (uint8_t *)"a/a/b", .len = 5, }; AWS_TEST_CASE(mqtt_topic_tree_unsubscribe, s_mqtt_topic_tree_unsubscribe_fn) static int s_mqtt_topic_tree_unsubscribe_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt_topic_tree tree; ASSERT_SUCCESS(aws_mqtt_topic_tree_init(&tree, allocator)); struct aws_string *topic_a_a = aws_string_new_from_array(allocator, s_topic_a_a.ptr, s_topic_a_a.len); struct aws_string *topic_a_a_a = aws_string_new_from_array(allocator, s_topic_a_a_a.ptr, s_topic_a_a_a.len); struct aws_string *topic_a_a_b = aws_string_new_from_array(allocator, s_topic_a_a_b.ptr, s_topic_a_a_b.len); AWS_VARIABLE_LENGTH_ARRAY(uint8_t, transaction_buf, aws_mqtt_topic_tree_action_size * 3); struct aws_array_list transaction; aws_array_list_init_static(&transaction, transaction_buf, 3, aws_mqtt_topic_tree_action_size); ASSERT_SUCCESS(aws_mqtt_topic_tree_insert( &tree, topic_a_a_a, AWS_MQTT_QOS_AT_MOST_ONCE, &on_publish, s_string_clean_up, topic_a_a_a)); /* At this moment, the topic_a_a was not inserted. Though the remove returns a success, the topic_a_a_a should still * remained in the tree. * The test is inspired by the ticket: https://github.com/awslabs/aws-crt-nodejs/issues/405. There was a crash when * we unsubscribe from an unsubscribed parent topic. * We fixed the issue in https://github.com/awslabs/aws-c-mqtt/pull/297 */ ASSERT_SUCCESS(aws_mqtt_topic_tree_remove(&tree, &s_topic_a_a)); ASSERT_SUCCESS(aws_mqtt_topic_tree_remove(&tree, &s_topic_a_a_a)); /* Re-create, it was nuked by remove. */ topic_a_a_a = aws_string_new_from_array(allocator, s_topic_a_a_a.ptr, s_topic_a_a_a.len); /* Ensure that the intermediate 'a' node was removed as well. */ ASSERT_UINT_EQUALS(0, aws_hash_table_get_entry_count(&tree.root->subtopics)); /* Put it back so we can test removal of a partial tree. */ /* Bonus points: test transactions here */ ASSERT_SUCCESS(aws_mqtt_topic_tree_transaction_insert( &tree, &transaction, topic_a_a_a, AWS_MQTT_QOS_AT_MOST_ONCE, &on_publish, s_string_clean_up, topic_a_a_a)); ASSERT_SUCCESS(aws_mqtt_topic_tree_transaction_insert( &tree, &transaction, topic_a_a, AWS_MQTT_QOS_AT_MOST_ONCE, &on_publish, s_string_clean_up, topic_a_a)); ASSERT_SUCCESS(aws_mqtt_topic_tree_transaction_insert( &tree, &transaction, topic_a_a_b, AWS_MQTT_QOS_AT_MOST_ONCE, &on_publish, s_string_clean_up, topic_a_a_b)); aws_mqtt_topic_tree_transaction_commit(&tree, &transaction); /* Should remove the last /a, but not the first 2. */ void *userdata = (void *)0xBADCAFE; ASSERT_SUCCESS(aws_mqtt_topic_tree_transaction_remove(&tree, &transaction, &s_topic_a_a_a, &userdata)); /* Ensure userdata was set back to the right user_data correctly. */ ASSERT_PTR_EQUALS(topic_a_a_a, userdata); ASSERT_SUCCESS(aws_mqtt_topic_tree_transaction_remove(&tree, &transaction, &s_topic_a_a, NULL)); aws_mqtt_topic_tree_transaction_commit(&tree, &transaction); struct aws_mqtt_packet_publish publish; aws_mqtt_packet_publish_init(&publish, false, AWS_MQTT_QOS_AT_MOST_ONCE, false, s_topic_a_a_a, 1, s_topic_a_a_a); times_called = 0; aws_mqtt_topic_tree_publish(&tree, &publish); ASSERT_INT_EQUALS(times_called, 0); publish.topic_name = s_topic_a_a_b; times_called = 0; aws_mqtt_topic_tree_publish(&tree, &publish); ASSERT_INT_EQUALS(times_called, 1); const struct aws_byte_cursor not_in_tree = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("not/in/tree"); aws_mqtt_topic_tree_remove(&tree, ¬_in_tree); aws_mqtt_topic_tree_clean_up(&tree); return AWS_OP_SUCCESS; } struct s_duplicate_test_ud { struct aws_string *string; bool cleaned; }; static void s_userdata_cleanup(void *userdata) { struct s_duplicate_test_ud *ud = userdata; aws_string_destroy(ud->string); ud->cleaned = true; } AWS_TEST_CASE(mqtt_topic_tree_duplicate_transactions, s_mqtt_topic_tree_duplicate_transactions_fn) static int s_mqtt_topic_tree_duplicate_transactions_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt_topic_tree tree; ASSERT_SUCCESS(aws_mqtt_topic_tree_init(&tree, allocator)); struct aws_string *topic_a_a = aws_string_new_from_array(allocator, s_topic_a_a.ptr, s_topic_a_a.len); struct aws_string *topic_a_a_a = aws_string_new_from_array(allocator, s_topic_a_a_a.ptr, s_topic_a_a_a.len); struct aws_string *topic_a_a_a_copy = aws_string_new_from_array(allocator, s_topic_a_a_a.ptr, s_topic_a_a_a.len); struct aws_string *topic_a_a_b = aws_string_new_from_array(allocator, s_topic_a_a_b.ptr, s_topic_a_a_b.len); size_t number_topics = 4; AWS_VARIABLE_LENGTH_ARRAY(uint8_t, transaction_buf, aws_mqtt_topic_tree_action_size * number_topics); struct aws_array_list transaction; aws_array_list_init_static(&transaction, transaction_buf, number_topics, aws_mqtt_topic_tree_action_size); /* Ensure that the intermediate 'a' node was removed as well. */ ASSERT_UINT_EQUALS(0, aws_hash_table_get_entry_count(&tree.root->subtopics)); struct s_duplicate_test_ud ud_a_a = {.string = topic_a_a, .cleaned = false}; struct s_duplicate_test_ud ud_a_a_a = {.string = topic_a_a_a, .cleaned = false}; struct s_duplicate_test_ud ud_a_a_a_copy = {.string = topic_a_a_a_copy, .cleaned = false}; struct s_duplicate_test_ud ud_a_a_b = {.string = topic_a_a_b, .cleaned = false}; /* insert duplicate strings, and the old userdata will be cleaned up */ ASSERT_SUCCESS(aws_mqtt_topic_tree_transaction_insert( &tree, &transaction, topic_a_a_a, AWS_MQTT_QOS_AT_MOST_ONCE, &on_publish, s_userdata_cleanup, &ud_a_a_a)); ASSERT_SUCCESS(aws_mqtt_topic_tree_transaction_insert( &tree, &transaction, topic_a_a_a_copy, AWS_MQTT_QOS_AT_MOST_ONCE, &on_publish, s_userdata_cleanup, &ud_a_a_a_copy)); ASSERT_SUCCESS(aws_mqtt_topic_tree_transaction_insert( &tree, &transaction, topic_a_a, AWS_MQTT_QOS_AT_MOST_ONCE, &on_publish, s_userdata_cleanup, &ud_a_a)); ASSERT_SUCCESS(aws_mqtt_topic_tree_transaction_insert( &tree, &transaction, topic_a_a_b, AWS_MQTT_QOS_AT_MOST_ONCE, &on_publish, s_userdata_cleanup, &ud_a_a_b)); aws_mqtt_topic_tree_transaction_commit(&tree, &transaction); /* The copy replaced the original node, and the old string has been cleaned up, but the new string will live with * the topic tree. */ ASSERT_TRUE(ud_a_a_a.cleaned); ASSERT_FALSE(ud_a_a_a_copy.cleaned); ASSERT_FALSE(ud_a_a.cleaned); ASSERT_FALSE(ud_a_a_b.cleaned); /* the result will be the same as we just intert three nodes */ ASSERT_UINT_EQUALS(1, aws_hash_table_get_entry_count(&tree.root->subtopics)); struct aws_mqtt_packet_publish publish; aws_mqtt_packet_publish_init(&publish, false, AWS_MQTT_QOS_AT_MOST_ONCE, false, s_topic_a_a, 1, s_topic_a_a); times_called = 0; aws_mqtt_topic_tree_publish(&tree, &publish); ASSERT_INT_EQUALS(times_called, 1); aws_mqtt_topic_tree_clean_up(&tree); ASSERT_TRUE(ud_a_a_a_copy.cleaned); ASSERT_TRUE(ud_a_a.cleaned); ASSERT_TRUE(ud_a_a_b.cleaned); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt_topic_tree_transactions, s_mqtt_topic_tree_transactions_fn) static int s_mqtt_topic_tree_transactions_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt_topic_tree tree; ASSERT_SUCCESS(aws_mqtt_topic_tree_init(&tree, allocator)); struct aws_string *topic_a_a = aws_string_new_from_array(allocator, s_topic_a_a.ptr, s_topic_a_a.len); AWS_VARIABLE_LENGTH_ARRAY(uint8_t, transaction_buf, aws_mqtt_topic_tree_action_size * 3); struct aws_array_list transaction; aws_array_list_init_static(&transaction, transaction_buf, 3, aws_mqtt_topic_tree_action_size); ASSERT_SUCCESS(aws_mqtt_topic_tree_transaction_insert( &tree, &transaction, topic_a_a, AWS_MQTT_QOS_AT_MOST_ONCE, &on_publish, s_string_clean_up, topic_a_a)); /* The userdata will not be cleaned up by roll back, since the transaction has not been commit yet. */ aws_mqtt_topic_tree_transaction_roll_back(&tree, &transaction); /* Ensure that the intermediate 'a' node was removed as well. */ ASSERT_UINT_EQUALS(0, aws_hash_table_get_entry_count(&tree.root->subtopics)); /* Insert(commit), remove, roll back the removal */ ASSERT_SUCCESS(aws_mqtt_topic_tree_insert( &tree, topic_a_a, AWS_MQTT_QOS_AT_MOST_ONCE, &on_publish, s_string_clean_up, topic_a_a)); ASSERT_SUCCESS(aws_mqtt_topic_tree_transaction_remove(&tree, &transaction, &s_topic_a_a, NULL)); aws_mqtt_topic_tree_transaction_roll_back(&tree, &transaction); struct aws_mqtt_packet_publish publish; aws_mqtt_packet_publish_init(&publish, false, AWS_MQTT_QOS_AT_MOST_ONCE, false, s_topic_a_a, 1, s_topic_a_a); times_called = 0; aws_mqtt_topic_tree_publish(&tree, &publish); ASSERT_INT_EQUALS(times_called, 1); aws_mqtt_topic_tree_clean_up(&tree); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt_topic_validation, s_mqtt_topic_validation_fn) static int s_mqtt_topic_validation_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; #define ASSERT_TOPIC_VALIDITY(expected, topic) \ do { \ struct aws_byte_cursor topic_cursor; \ topic_cursor.ptr = (uint8_t *)(topic); \ topic_cursor.len = strlen(topic); \ ASSERT_##expected(aws_mqtt_is_valid_topic(&topic_cursor)); \ } while (false) ASSERT_TOPIC_VALIDITY(TRUE, "/"); ASSERT_TOPIC_VALIDITY(TRUE, "a/"); ASSERT_TOPIC_VALIDITY(TRUE, "/b"); ASSERT_TOPIC_VALIDITY(TRUE, "a/b/c"); ASSERT_TOPIC_VALIDITY(FALSE, "#"); ASSERT_TOPIC_VALIDITY(FALSE, "sport/tennis/#"); ASSERT_TOPIC_VALIDITY(FALSE, "sport/tennis#"); ASSERT_TOPIC_VALIDITY(FALSE, "sport/tennis/#/ranking"); ASSERT_TOPIC_VALIDITY(FALSE, ""); ASSERT_TOPIC_VALIDITY(FALSE, "+"); ASSERT_TOPIC_VALIDITY(FALSE, "+/tennis/#"); ASSERT_TOPIC_VALIDITY(FALSE, "sport/+/player1"); ASSERT_TOPIC_VALIDITY(FALSE, "sport+"); ASSERT_TOPIC_VALIDITY(FALSE, "\x41/\xED\xBF\xBF/\x41"); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt_topic_filter_validation, s_mqtt_topic_filter_validation_fn) static int s_mqtt_topic_filter_validation_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; #define ASSERT_TOPIC_FILTER_VALIDITY(expected, topic_filter) \ do { \ struct aws_byte_cursor topic_filter_cursor; \ topic_filter_cursor.ptr = (uint8_t *)(topic_filter); \ topic_filter_cursor.len = strlen(topic_filter); \ ASSERT_##expected(aws_mqtt_is_valid_topic_filter(&topic_filter_cursor)); \ } while (false) ASSERT_TOPIC_FILTER_VALIDITY(TRUE, "#"); ASSERT_TOPIC_FILTER_VALIDITY(TRUE, "sport/tennis/#"); ASSERT_TOPIC_FILTER_VALIDITY(FALSE, "sport/tennis#"); ASSERT_TOPIC_FILTER_VALIDITY(FALSE, "sport/tennis/#/ranking"); ASSERT_TOPIC_FILTER_VALIDITY(FALSE, ""); ASSERT_TOPIC_FILTER_VALIDITY(TRUE, "+/"); ASSERT_TOPIC_FILTER_VALIDITY(TRUE, "+"); ASSERT_TOPIC_FILTER_VALIDITY(TRUE, "+/tennis/#"); ASSERT_TOPIC_FILTER_VALIDITY(TRUE, "sport/+/player1"); ASSERT_TOPIC_FILTER_VALIDITY(FALSE, "sport+"); ASSERT_TOPIC_FILTER_VALIDITY(FALSE, "\x41/\xED\xA0\x80/\x41"); return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/tests/v5/000077500000000000000000000000001456575232400217115ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/tests/v5/mqtt5_client_tests.c000066400000000000000000007323331456575232400257220ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "mqtt5_testing_utils.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #define TEST_IO_MESSAGE_LENGTH 4096 static bool s_is_within_percentage_of(uint64_t expected_time, uint64_t actual_time, double percentage) { double actual_percent = 1.0 - (double)actual_time / (double)expected_time; return fabs(actual_percent) <= percentage; } int aws_mqtt5_mock_server_send_packet( struct aws_mqtt5_server_mock_connection_context *connection, enum aws_mqtt5_packet_type packet_type, void *packet) { aws_mqtt5_encoder_append_packet_encoding(&connection->encoder, packet_type, packet); struct aws_io_message *message = aws_channel_acquire_message_from_pool( connection->slot->channel, AWS_IO_MESSAGE_APPLICATION_DATA, TEST_IO_MESSAGE_LENGTH); if (message == NULL) { return AWS_OP_ERR; } enum aws_mqtt5_encoding_result result = aws_mqtt5_encoder_encode_to_buffer(&connection->encoder, &message->message_data); AWS_FATAL_ASSERT(result == AWS_MQTT5_ER_FINISHED); if (aws_channel_slot_send_message(connection->slot, message, AWS_CHANNEL_DIR_WRITE)) { aws_mem_release(message->allocator, message); return AWS_OP_ERR; } return AWS_OP_SUCCESS; } int aws_mqtt5_mock_server_handle_connect_always_succeed( void *packet, struct aws_mqtt5_server_mock_connection_context *connection, void *user_data) { (void)packet; (void)user_data; struct aws_mqtt5_packet_connack_view connack_view; AWS_ZERO_STRUCT(connack_view); connack_view.reason_code = AWS_MQTT5_CRC_SUCCESS; return aws_mqtt5_mock_server_send_packet(connection, AWS_MQTT5_PT_CONNACK, &connack_view); } static int s_aws_mqtt5_mock_server_handle_pingreq_always_respond( void *packet, struct aws_mqtt5_server_mock_connection_context *connection, void *user_data) { (void)packet; (void)user_data; return aws_mqtt5_mock_server_send_packet(connection, AWS_MQTT5_PT_PINGRESP, NULL); } static int s_aws_mqtt5_mock_server_handle_disconnect( void *packet, struct aws_mqtt5_server_mock_connection_context *connection, void *user_data) { (void)packet; (void)connection; (void)user_data; return AWS_OP_SUCCESS; } void s_lifecycle_event_callback(const struct aws_mqtt5_client_lifecycle_event *event) { (void)event; } void s_publish_received_callback(const struct aws_mqtt5_packet_publish_view *publish, void *user_data) { (void)publish; (void)user_data; } AWS_STRING_FROM_LITERAL(g_default_client_id, "HelloWorld"); void aws_mqtt5_client_test_init_default_options(struct mqtt5_client_test_options *test_options) { struct aws_mqtt5_client_topic_alias_options local_topic_aliasing_options = { .outbound_topic_alias_behavior = AWS_MQTT5_COTABT_DISABLED, }; test_options->topic_aliasing_options = local_topic_aliasing_options; struct aws_mqtt5_packet_connect_view local_connect_options = { .keep_alive_interval_seconds = 30, .client_id = aws_byte_cursor_from_string(g_default_client_id), .clean_start = true, }; test_options->connect_options = local_connect_options; struct aws_mqtt5_client_options local_client_options = { .connect_options = &test_options->connect_options, .session_behavior = AWS_MQTT5_CSBT_CLEAN, .lifecycle_event_handler = s_lifecycle_event_callback, .lifecycle_event_handler_user_data = NULL, .max_reconnect_delay_ms = 120000, .min_connected_time_to_reset_reconnect_delay_ms = 30000, .min_reconnect_delay_ms = 1000, .ping_timeout_ms = 10000, .publish_received_handler = s_publish_received_callback, .ack_timeout_seconds = 0, .topic_aliasing_options = &test_options->topic_aliasing_options, }; test_options->client_options = local_client_options; struct aws_mqtt5_mock_server_vtable local_server_function_table = { .packet_handlers = { NULL, /* RESERVED = 0 */ &aws_mqtt5_mock_server_handle_connect_always_succeed, /* CONNECT */ NULL, /* CONNACK */ NULL, /* PUBLISH */ NULL, /* PUBACK */ NULL, /* PUBREC */ NULL, /* PUBREL */ NULL, /* PUBCOMP */ NULL, /* SUBSCRIBE */ NULL, /* SUBACK */ NULL, /* UNSUBSCRIBE */ NULL, /* UNSUBACK */ &s_aws_mqtt5_mock_server_handle_pingreq_always_respond, /* PINGREQ */ NULL, /* PINGRESP */ &s_aws_mqtt5_mock_server_handle_disconnect, /* DISCONNECT */ NULL /* AUTH */ }}; test_options->server_function_table = local_server_function_table; } static int s_aws_mqtt5_client_test_init_default_connect_storage( struct aws_mqtt5_packet_connect_storage *storage, struct aws_allocator *allocator) { struct aws_mqtt5_packet_connect_view connect_view = { .keep_alive_interval_seconds = 30, .client_id = aws_byte_cursor_from_string(g_default_client_id), .clean_start = true, }; return aws_mqtt5_packet_connect_storage_init(storage, allocator, &connect_view); } static int s_aws_mqtt5_client_test_init_default_disconnect_storage( struct aws_mqtt5_packet_disconnect_storage *storage, struct aws_allocator *allocator) { struct aws_mqtt5_packet_disconnect_view disconnect_view = { .reason_code = AWS_MQTT5_DRC_NORMAL_DISCONNECTION, }; return aws_mqtt5_packet_disconnect_storage_init(storage, allocator, &disconnect_view); } static bool s_last_life_cycle_event_is( struct aws_mqtt5_client_mock_test_fixture *test_fixture, enum aws_mqtt5_client_lifecycle_event_type event_type) { size_t event_count = aws_array_list_length(&test_fixture->lifecycle_events); if (event_count == 0) { return false; } struct aws_mqtt5_lifecycle_event_record *record = NULL; aws_array_list_get_at(&test_fixture->lifecycle_events, &record, event_count - 1); return record->event.event_type == event_type; } static bool s_last_mock_server_packet_received_is( struct aws_mqtt5_client_mock_test_fixture *test_fixture, enum aws_mqtt5_packet_type packet_type) { size_t packet_count = aws_array_list_length(&test_fixture->server_received_packets); if (packet_count == 0) { return false; } struct aws_mqtt5_mock_server_packet_record *packet = NULL; aws_array_list_get_at_ptr(&test_fixture->server_received_packets, (void **)&packet, packet_count - 1); return packet_type == packet->packet_type; } static bool s_last_mock_server_packet_received_is_disconnect(void *arg) { struct aws_mqtt5_client_mock_test_fixture *test_fixture = arg; return s_last_mock_server_packet_received_is(test_fixture, AWS_MQTT5_PT_DISCONNECT); } static void s_wait_for_mock_server_to_receive_disconnect_packet( struct aws_mqtt5_client_mock_test_fixture *test_context) { aws_mutex_lock(&test_context->lock); aws_condition_variable_wait_pred( &test_context->signal, &test_context->lock, s_last_mock_server_packet_received_is_disconnect, test_context); aws_mutex_unlock(&test_context->lock); } static bool s_last_lifecycle_event_is_connected(void *arg) { struct aws_mqtt5_client_mock_test_fixture *test_fixture = arg; return s_last_life_cycle_event_is(test_fixture, AWS_MQTT5_CLET_CONNECTION_SUCCESS); } void aws_wait_for_connected_lifecycle_event(struct aws_mqtt5_client_mock_test_fixture *test_context) { aws_mutex_lock(&test_context->lock); aws_condition_variable_wait_pred( &test_context->signal, &test_context->lock, s_last_lifecycle_event_is_connected, test_context); aws_mutex_unlock(&test_context->lock); } static bool s_last_lifecycle_event_is_stopped(void *arg) { struct aws_mqtt5_client_mock_test_fixture *test_fixture = arg; return s_last_life_cycle_event_is(test_fixture, AWS_MQTT5_CLET_STOPPED); } void aws_wait_for_stopped_lifecycle_event(struct aws_mqtt5_client_mock_test_fixture *test_context) { aws_mutex_lock(&test_context->lock); aws_condition_variable_wait_pred( &test_context->signal, &test_context->lock, s_last_lifecycle_event_is_stopped, test_context); aws_mutex_unlock(&test_context->lock); } static bool s_has_lifecycle_event( struct aws_mqtt5_client_mock_test_fixture *test_fixture, enum aws_mqtt5_client_lifecycle_event_type event_type) { size_t record_count = aws_array_list_length(&test_fixture->lifecycle_events); for (size_t i = 0; i < record_count; ++i) { struct aws_mqtt5_lifecycle_event_record *record = NULL; aws_array_list_get_at(&test_fixture->lifecycle_events, &record, i); if (record->event.event_type == event_type) { return true; } } return false; } static bool s_has_connection_failure_event(void *arg) { struct aws_mqtt5_client_mock_test_fixture *test_fixture = arg; return s_has_lifecycle_event(test_fixture, AWS_MQTT5_CLET_CONNECTION_FAILURE); } static void s_wait_for_connection_failure_lifecycle_event(struct aws_mqtt5_client_mock_test_fixture *test_context) { aws_mutex_lock(&test_context->lock); aws_condition_variable_wait_pred( &test_context->signal, &test_context->lock, s_has_connection_failure_event, test_context); aws_mutex_unlock(&test_context->lock); } static bool s_has_disconnect_event(void *arg) { struct aws_mqtt5_client_mock_test_fixture *test_fixture = arg; return s_has_lifecycle_event(test_fixture, AWS_MQTT5_CLET_DISCONNECTION); } static void s_wait_for_disconnection_lifecycle_event(struct aws_mqtt5_client_mock_test_fixture *test_context) { aws_mutex_lock(&test_context->lock); aws_condition_variable_wait_pred(&test_context->signal, &test_context->lock, s_has_disconnect_event, test_context); aws_mutex_unlock(&test_context->lock); } static bool s_disconnect_completion_invoked(void *arg) { struct aws_mqtt5_client_mock_test_fixture *test_fixture = arg; return test_fixture->disconnect_completion_callback_invoked; } static void s_on_disconnect_completion(int error_code, void *user_data) { (void)error_code; struct aws_mqtt5_client_mock_test_fixture *test_fixture = user_data; aws_mutex_lock(&test_fixture->lock); test_fixture->disconnect_completion_callback_invoked = true; aws_mutex_unlock(&test_fixture->lock); aws_condition_variable_notify_all(&test_fixture->signal); } static void s_wait_for_disconnect_completion(struct aws_mqtt5_client_mock_test_fixture *test_context) { aws_mutex_lock(&test_context->lock); aws_condition_variable_wait_pred( &test_context->signal, &test_context->lock, s_disconnect_completion_invoked, test_context); aws_mutex_unlock(&test_context->lock); } int aws_verify_client_state_sequence( struct aws_mqtt5_client_mock_test_fixture *test_context, enum aws_mqtt5_client_state *expected_states, size_t expected_states_count) { aws_mutex_lock(&test_context->lock); size_t actual_states_count = aws_array_list_length(&test_context->client_states); ASSERT_TRUE(actual_states_count >= expected_states_count); for (size_t i = 0; i < expected_states_count; ++i) { enum aws_mqtt5_client_state state = AWS_MCS_STOPPED; aws_array_list_get_at(&test_context->client_states, &state, i); ASSERT_INT_EQUALS(expected_states[i], state); } aws_mutex_unlock(&test_context->lock); return AWS_OP_SUCCESS; } static int s_verify_simple_lifecycle_event_sequence( struct aws_mqtt5_client_mock_test_fixture *test_context, struct aws_mqtt5_client_lifecycle_event *expected_events, size_t expected_events_count) { aws_mutex_lock(&test_context->lock); size_t actual_events_count = aws_array_list_length(&test_context->lifecycle_events); ASSERT_TRUE(actual_events_count >= expected_events_count); for (size_t i = 0; i < expected_events_count; ++i) { struct aws_mqtt5_lifecycle_event_record *lifecycle_event = NULL; aws_array_list_get_at(&test_context->lifecycle_events, &lifecycle_event, i); struct aws_mqtt5_client_lifecycle_event *expected_event = &expected_events[i]; ASSERT_INT_EQUALS(expected_event->event_type, lifecycle_event->event.event_type); ASSERT_INT_EQUALS(expected_event->error_code, lifecycle_event->event.error_code); } aws_mutex_unlock(&test_context->lock); return AWS_OP_SUCCESS; } int aws_verify_received_packet_sequence( struct aws_mqtt5_client_mock_test_fixture *test_context, struct aws_mqtt5_mock_server_packet_record *expected_packets, size_t expected_packets_count) { aws_mutex_lock(&test_context->lock); size_t actual_packets_count = aws_array_list_length(&test_context->server_received_packets); ASSERT_TRUE(actual_packets_count >= expected_packets_count); for (size_t i = 0; i < expected_packets_count; ++i) { struct aws_mqtt5_mock_server_packet_record *actual_packet = NULL; aws_array_list_get_at_ptr(&test_context->server_received_packets, (void **)&actual_packet, i); struct aws_mqtt5_mock_server_packet_record *expected_packet = &expected_packets[i]; ASSERT_INT_EQUALS(expected_packet->packet_type, actual_packet->packet_type); /* a NULL storage means we don't care about verifying it on a field-by-field basis */ if (expected_packet->packet_storage != NULL) { ASSERT_TRUE(aws_mqtt5_client_test_are_packets_equal( expected_packet->packet_type, expected_packet->packet_storage, actual_packet->packet_storage)); } } aws_mutex_unlock(&test_context->lock); return AWS_OP_SUCCESS; } /* * Basic successful connect/disconnect test. We check expected lifecycle events, internal client state changes, * and server received packets. */ static int s_mqtt5_client_direct_connect_success_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, }; struct aws_mqtt5_client_mock_test_fixture test_context; ASSERT_SUCCESS(aws_mqtt5_client_mock_test_fixture_init(&test_context, allocator, &test_fixture_options)); struct aws_mqtt5_client *client = test_context.client; ASSERT_SUCCESS(aws_mqtt5_client_start(client)); aws_wait_for_connected_lifecycle_event(&test_context); struct aws_mqtt5_packet_disconnect_view disconnect_options = { .reason_code = AWS_MQTT5_DRC_DISCONNECT_WITH_WILL_MESSAGE, }; struct aws_mqtt5_disconnect_completion_options completion_options = { .completion_callback = s_on_disconnect_completion, .completion_user_data = &test_context, }; ASSERT_SUCCESS(aws_mqtt5_client_stop(client, &disconnect_options, &completion_options)); aws_wait_for_stopped_lifecycle_event(&test_context); s_wait_for_disconnect_completion(&test_context); s_wait_for_mock_server_to_receive_disconnect_packet(&test_context); struct aws_mqtt5_client_lifecycle_event expected_events[] = { { .event_type = AWS_MQTT5_CLET_ATTEMPTING_CONNECT, }, { .event_type = AWS_MQTT5_CLET_CONNECTION_SUCCESS, }, { .event_type = AWS_MQTT5_CLET_DISCONNECTION, .error_code = AWS_ERROR_MQTT5_USER_REQUESTED_STOP, }, { .event_type = AWS_MQTT5_CLET_STOPPED, }, }; ASSERT_SUCCESS( s_verify_simple_lifecycle_event_sequence(&test_context, expected_events, AWS_ARRAY_SIZE(expected_events))); enum aws_mqtt5_client_state expected_states[] = { AWS_MCS_CONNECTING, AWS_MCS_MQTT_CONNECT, AWS_MCS_CONNECTED, AWS_MCS_CLEAN_DISCONNECT, AWS_MCS_CHANNEL_SHUTDOWN, AWS_MCS_STOPPED, }; ASSERT_SUCCESS(aws_verify_client_state_sequence(&test_context, expected_states, AWS_ARRAY_SIZE(expected_states))); struct aws_mqtt5_packet_connect_storage expected_connect_storage; ASSERT_SUCCESS(s_aws_mqtt5_client_test_init_default_connect_storage(&expected_connect_storage, allocator)); struct aws_mqtt5_packet_disconnect_storage expected_disconnect_storage; ASSERT_SUCCESS(s_aws_mqtt5_client_test_init_default_disconnect_storage(&expected_disconnect_storage, allocator)); expected_disconnect_storage.storage_view.reason_code = AWS_MQTT5_DRC_DISCONNECT_WITH_WILL_MESSAGE; struct aws_mqtt5_mock_server_packet_record expected_packets[] = { { .packet_type = AWS_MQTT5_PT_CONNECT, .packet_storage = &expected_connect_storage, }, { .packet_type = AWS_MQTT5_PT_DISCONNECT, .packet_storage = &expected_disconnect_storage, }, }; ASSERT_SUCCESS( aws_verify_received_packet_sequence(&test_context, expected_packets, AWS_ARRAY_SIZE(expected_packets))); aws_mqtt5_packet_connect_storage_clean_up(&expected_connect_storage); aws_mqtt5_client_mock_test_fixture_clean_up(&test_context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_client_direct_connect_success, s_mqtt5_client_direct_connect_success_fn) /* * Connection failure test infrastructure. Supplied callbacks are used to modify the way in which the connection * establishment fails. */ static int s_mqtt5_client_simple_failure_test_fn( struct aws_allocator *allocator, void (*change_client_test_config_fn)(struct aws_mqtt5_client_mqtt5_mock_test_fixture_options *config), void (*change_client_vtable_fn)(struct aws_mqtt5_client_vtable *)) { aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, }; if (change_client_test_config_fn != NULL) { (*change_client_test_config_fn)(&test_fixture_options); } struct aws_mqtt5_client_mock_test_fixture test_context; ASSERT_SUCCESS(aws_mqtt5_client_mock_test_fixture_init(&test_context, allocator, &test_fixture_options)); struct aws_mqtt5_client *client = test_context.client; struct aws_mqtt5_client_vtable vtable = *client->vtable; if (change_client_vtable_fn != NULL) { (*change_client_vtable_fn)(&vtable); aws_mqtt5_client_set_vtable(client, &vtable); } ASSERT_SUCCESS(aws_mqtt5_client_start(client)); s_wait_for_connection_failure_lifecycle_event(&test_context); ASSERT_SUCCESS(aws_mqtt5_client_stop(client, NULL, NULL)); aws_wait_for_stopped_lifecycle_event(&test_context); struct aws_mqtt5_client_lifecycle_event expected_events[] = { { .event_type = AWS_MQTT5_CLET_ATTEMPTING_CONNECT, }, { .event_type = AWS_MQTT5_CLET_CONNECTION_FAILURE, .error_code = AWS_ERROR_INVALID_STATE, }, }; ASSERT_SUCCESS( s_verify_simple_lifecycle_event_sequence(&test_context, expected_events, AWS_ARRAY_SIZE(expected_events))); enum aws_mqtt5_client_state expected_states[] = { AWS_MCS_CONNECTING, AWS_MCS_PENDING_RECONNECT, }; ASSERT_SUCCESS(aws_verify_client_state_sequence(&test_context, expected_states, AWS_ARRAY_SIZE(expected_states))); aws_mqtt5_client_mock_test_fixture_clean_up(&test_context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } static int s_mqtt5_client_test_synchronous_socket_channel_failure_fn( struct aws_socket_channel_bootstrap_options *options) { (void)options; return aws_raise_error(AWS_ERROR_INVALID_STATE); } static void s_change_client_vtable_synchronous_direct_failure(struct aws_mqtt5_client_vtable *vtable) { vtable->client_bootstrap_new_socket_channel_fn = s_mqtt5_client_test_synchronous_socket_channel_failure_fn; } /* Connection failure test where direct MQTT channel establishment fails synchronously */ static int s_mqtt5_client_direct_connect_sync_channel_failure_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS( s_mqtt5_client_simple_failure_test_fn(allocator, NULL, s_change_client_vtable_synchronous_direct_failure)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_client_direct_connect_sync_channel_failure, s_mqtt5_client_direct_connect_sync_channel_failure_fn) struct socket_channel_failure_wrapper { struct aws_socket_channel_bootstrap_options bootstrap_options; struct aws_task task; }; static struct socket_channel_failure_wrapper s_socket_channel_failure_wrapper; void s_socket_channel_async_failure_task_fn(struct aws_task *task, void *arg, enum aws_task_status status) { (void)task; if (status != AWS_TASK_STATUS_RUN_READY) { return; } struct socket_channel_failure_wrapper *wrapper = arg; struct aws_socket_channel_bootstrap_options *options = &wrapper->bootstrap_options; (*wrapper->bootstrap_options.setup_callback)(options->bootstrap, AWS_ERROR_INVALID_STATE, NULL, options->user_data); } static int s_mqtt5_client_test_asynchronous_socket_channel_failure_fn( struct aws_socket_channel_bootstrap_options *options) { aws_task_init( &s_socket_channel_failure_wrapper.task, s_socket_channel_async_failure_task_fn, &s_socket_channel_failure_wrapper, "asynchronous_socket_channel_failure"); s_socket_channel_failure_wrapper.bootstrap_options = *options; struct aws_mqtt5_client *client = options->user_data; aws_event_loop_schedule_task_now(client->loop, &s_socket_channel_failure_wrapper.task); return AWS_OP_SUCCESS; } static void s_change_client_vtable_asynchronous_direct_failure(struct aws_mqtt5_client_vtable *vtable) { vtable->client_bootstrap_new_socket_channel_fn = s_mqtt5_client_test_asynchronous_socket_channel_failure_fn; } /* Connection failure test where direct MQTT channel establishment fails asynchronously */ static int s_mqtt5_client_direct_connect_async_channel_failure_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS( s_mqtt5_client_simple_failure_test_fn(allocator, NULL, s_change_client_vtable_asynchronous_direct_failure)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_client_direct_connect_async_channel_failure, s_mqtt5_client_direct_connect_async_channel_failure_fn) static int s_mqtt5_client_test_synchronous_websocket_failure_fn( const struct aws_websocket_client_connection_options *options) { (void)options; return aws_raise_error(AWS_ERROR_INVALID_STATE); } static void s_change_client_vtable_synchronous_websocket_failure(struct aws_mqtt5_client_vtable *vtable) { vtable->websocket_connect_fn = s_mqtt5_client_test_synchronous_websocket_failure_fn; } static void s_mqtt5_client_test_websocket_successful_transform( struct aws_http_message *request, void *user_data, aws_mqtt5_transform_websocket_handshake_complete_fn *complete_fn, void *complete_ctx) { (void)user_data; (*complete_fn)(request, AWS_ERROR_SUCCESS, complete_ctx); } static void s_change_client_options_to_websockets(struct aws_mqtt5_client_mqtt5_mock_test_fixture_options *config) { config->client_options->websocket_handshake_transform = s_mqtt5_client_test_websocket_successful_transform; } /* Connection failure test where websocket MQTT channel establishment fails synchronously */ static int s_mqtt5_client_websocket_connect_sync_channel_failure_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(s_mqtt5_client_simple_failure_test_fn( allocator, s_change_client_options_to_websockets, s_change_client_vtable_synchronous_websocket_failure)); return AWS_OP_SUCCESS; } AWS_TEST_CASE( mqtt5_client_websocket_connect_sync_channel_failure, s_mqtt5_client_websocket_connect_sync_channel_failure_fn) struct websocket_channel_failure_wrapper { struct aws_websocket_client_connection_options websocket_options; struct aws_task task; }; static struct websocket_channel_failure_wrapper s_websocket_channel_failure_wrapper; void s_websocket_channel_async_failure_task_fn(struct aws_task *task, void *arg, enum aws_task_status status) { (void)task; if (status != AWS_TASK_STATUS_RUN_READY) { return; } struct websocket_channel_failure_wrapper *wrapper = arg; struct aws_websocket_client_connection_options *options = &wrapper->websocket_options; struct aws_websocket_on_connection_setup_data websocket_setup = {.error_code = AWS_ERROR_INVALID_STATE}; (*wrapper->websocket_options.on_connection_setup)(&websocket_setup, options->user_data); } static int s_mqtt5_client_test_asynchronous_websocket_failure_fn( const struct aws_websocket_client_connection_options *options) { aws_task_init( &s_websocket_channel_failure_wrapper.task, s_websocket_channel_async_failure_task_fn, &s_websocket_channel_failure_wrapper, "asynchronous_websocket_channel_failure"); s_websocket_channel_failure_wrapper.websocket_options = *options; struct aws_mqtt5_client *client = options->user_data; aws_event_loop_schedule_task_now(client->loop, &s_websocket_channel_failure_wrapper.task); return AWS_OP_SUCCESS; } static void s_change_client_vtable_asynchronous_websocket_failure(struct aws_mqtt5_client_vtable *vtable) { vtable->websocket_connect_fn = s_mqtt5_client_test_asynchronous_websocket_failure_fn; } /* Connection failure test where websocket MQTT channel establishment fails asynchronously */ static int s_mqtt5_client_websocket_connect_async_channel_failure_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(s_mqtt5_client_simple_failure_test_fn( allocator, s_change_client_options_to_websockets, s_change_client_vtable_asynchronous_websocket_failure)); return AWS_OP_SUCCESS; } AWS_TEST_CASE( mqtt5_client_websocket_connect_async_channel_failure, s_mqtt5_client_websocket_connect_async_channel_failure_fn) static void s_mqtt5_client_test_websocket_failed_transform( struct aws_http_message *request, void *user_data, aws_mqtt5_transform_websocket_handshake_complete_fn *complete_fn, void *complete_ctx) { (void)user_data; (*complete_fn)(request, AWS_ERROR_INVALID_STATE, complete_ctx); } static void s_change_client_options_to_failed_websocket_transform( struct aws_mqtt5_client_mqtt5_mock_test_fixture_options *config) { config->client_options->websocket_handshake_transform = s_mqtt5_client_test_websocket_failed_transform; } /* Connection failure test where websocket MQTT channel establishment fails due to handshake transform failure */ static int s_mqtt5_client_websocket_connect_handshake_failure_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS( s_mqtt5_client_simple_failure_test_fn(allocator, s_change_client_options_to_failed_websocket_transform, NULL)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_client_websocket_connect_handshake_failure, s_mqtt5_client_websocket_connect_handshake_failure_fn) int aws_mqtt5_mock_server_handle_connect_always_fail( void *packet, struct aws_mqtt5_server_mock_connection_context *connection, void *user_data) { (void)packet; (void)user_data; struct aws_mqtt5_packet_connack_view connack_view; AWS_ZERO_STRUCT(connack_view); connack_view.reason_code = AWS_MQTT5_CRC_BANNED; return aws_mqtt5_mock_server_send_packet(connection, AWS_MQTT5_PT_CONNACK, &connack_view); } /* Connection failure test where overall connection fails due to a CONNACK error code */ static int s_mqtt5_client_direct_connect_connack_refusal_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_CONNECT] = aws_mqtt5_mock_server_handle_connect_always_fail; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, }; struct aws_mqtt5_client_mock_test_fixture test_context; ASSERT_SUCCESS(aws_mqtt5_client_mock_test_fixture_init(&test_context, allocator, &test_fixture_options)); struct aws_mqtt5_client *client = test_context.client; ASSERT_SUCCESS(aws_mqtt5_client_start(client)); s_wait_for_connection_failure_lifecycle_event(&test_context); ASSERT_SUCCESS(aws_mqtt5_client_stop(client, NULL, NULL)); aws_wait_for_stopped_lifecycle_event(&test_context); struct aws_mqtt5_client_lifecycle_event expected_events[] = { { .event_type = AWS_MQTT5_CLET_ATTEMPTING_CONNECT, }, { .event_type = AWS_MQTT5_CLET_CONNECTION_FAILURE, .error_code = AWS_ERROR_MQTT5_CONNACK_CONNECTION_REFUSED, }, }; ASSERT_SUCCESS( s_verify_simple_lifecycle_event_sequence(&test_context, expected_events, AWS_ARRAY_SIZE(expected_events))); enum aws_mqtt5_client_state expected_states[] = { AWS_MCS_CONNECTING, AWS_MCS_MQTT_CONNECT, AWS_MCS_CHANNEL_SHUTDOWN, }; ASSERT_SUCCESS(aws_verify_client_state_sequence(&test_context, expected_states, AWS_ARRAY_SIZE(expected_states))); aws_mqtt5_client_mock_test_fixture_clean_up(&test_context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_client_direct_connect_connack_refusal, s_mqtt5_client_direct_connect_connack_refusal_fn) /* Connection failure test where overall connection fails because there's no response to the CONNECT packet */ static int s_mqtt5_client_direct_connect_connack_timeout_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); /* fast CONNACK timeout and don't response to the CONNECT packet */ test_options.client_options.connack_timeout_ms = 2000; test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_CONNECT] = NULL; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, }; struct aws_mqtt5_client_mock_test_fixture test_context; ASSERT_SUCCESS(aws_mqtt5_client_mock_test_fixture_init(&test_context, allocator, &test_fixture_options)); struct aws_mqtt5_client *client = test_context.client; ASSERT_SUCCESS(aws_mqtt5_client_start(client)); s_wait_for_connection_failure_lifecycle_event(&test_context); ASSERT_SUCCESS(aws_mqtt5_client_stop(client, NULL, NULL)); aws_wait_for_stopped_lifecycle_event(&test_context); struct aws_mqtt5_client_lifecycle_event expected_events[] = { { .event_type = AWS_MQTT5_CLET_ATTEMPTING_CONNECT, }, { .event_type = AWS_MQTT5_CLET_CONNECTION_FAILURE, .error_code = AWS_ERROR_MQTT5_CONNACK_TIMEOUT, }, }; ASSERT_SUCCESS( s_verify_simple_lifecycle_event_sequence(&test_context, expected_events, AWS_ARRAY_SIZE(expected_events))); enum aws_mqtt5_client_state expected_states[] = { AWS_MCS_CONNECTING, AWS_MCS_MQTT_CONNECT, AWS_MCS_CHANNEL_SHUTDOWN, }; ASSERT_SUCCESS(aws_verify_client_state_sequence(&test_context, expected_states, AWS_ARRAY_SIZE(expected_states))); aws_mqtt5_client_mock_test_fixture_clean_up(&test_context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_client_direct_connect_connack_timeout, s_mqtt5_client_direct_connect_connack_timeout_fn) struct aws_mqtt5_server_disconnect_test_context { struct aws_mqtt5_client_mock_test_fixture *test_fixture; bool disconnect_sent; bool connack_sent; }; static void s_server_disconnect_service_fn( struct aws_mqtt5_server_mock_connection_context *mock_server, void *user_data) { struct aws_mqtt5_server_disconnect_test_context *test_context = user_data; if (test_context->disconnect_sent || !test_context->connack_sent) { return; } test_context->disconnect_sent = true; struct aws_mqtt5_packet_disconnect_view disconnect = { .reason_code = AWS_MQTT5_DRC_PACKET_TOO_LARGE, }; aws_mqtt5_mock_server_send_packet(mock_server, AWS_MQTT5_PT_DISCONNECT, &disconnect); } static int s_aws_mqtt5_server_disconnect_on_connect( void *packet, struct aws_mqtt5_server_mock_connection_context *connection, void *user_data) { /* * We intercept the CONNECT in order to correctly set the connack_sent test state. Otherwise we risk sometimes * sending the DISCONNECT before the CONNACK */ int result = aws_mqtt5_mock_server_handle_connect_always_succeed(packet, connection, user_data); struct aws_mqtt5_server_disconnect_test_context *test_context = user_data; test_context->connack_sent = true; return result; } /* Connection test where we succeed and then the server sends a DISCONNECT */ static int s_mqtt5_client_direct_connect_from_server_disconnect_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); /* mock server sends a DISCONNECT packet back to the client after a successful CONNECTION establishment */ test_options.server_function_table.service_task_fn = s_server_disconnect_service_fn; test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_CONNECT] = s_aws_mqtt5_server_disconnect_on_connect; struct aws_mqtt5_client_mock_test_fixture test_context; struct aws_mqtt5_server_disconnect_test_context disconnect_context = { .test_fixture = &test_context, .disconnect_sent = false, }; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, .mock_server_user_data = &disconnect_context, }; ASSERT_SUCCESS(aws_mqtt5_client_mock_test_fixture_init(&test_context, allocator, &test_fixture_options)); struct aws_mqtt5_client *client = test_context.client; ASSERT_SUCCESS(aws_mqtt5_client_start(client)); s_wait_for_disconnection_lifecycle_event(&test_context); ASSERT_SUCCESS(aws_mqtt5_client_stop(client, NULL, NULL)); aws_wait_for_stopped_lifecycle_event(&test_context); struct aws_mqtt5_client_lifecycle_event expected_events[] = { { .event_type = AWS_MQTT5_CLET_ATTEMPTING_CONNECT, }, { .event_type = AWS_MQTT5_CLET_CONNECTION_SUCCESS, }, { .event_type = AWS_MQTT5_CLET_DISCONNECTION, .error_code = AWS_ERROR_MQTT5_DISCONNECT_RECEIVED, }, }; ASSERT_SUCCESS( s_verify_simple_lifecycle_event_sequence(&test_context, expected_events, AWS_ARRAY_SIZE(expected_events))); enum aws_mqtt5_client_state expected_states[] = { AWS_MCS_CONNECTING, AWS_MCS_MQTT_CONNECT, AWS_MCS_CONNECTED, AWS_MCS_CHANNEL_SHUTDOWN, }; ASSERT_SUCCESS(aws_verify_client_state_sequence(&test_context, expected_states, AWS_ARRAY_SIZE(expected_states))); aws_mqtt5_client_mock_test_fixture_clean_up(&test_context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE( mqtt5_client_direct_connect_from_server_disconnect, s_mqtt5_client_direct_connect_from_server_disconnect_fn) struct aws_mqtt5_client_test_wait_for_n_context { size_t required_event_count; struct aws_mqtt5_client_mock_test_fixture *test_fixture; }; static bool s_received_at_least_n_pingreqs(void *arg) { struct aws_mqtt5_client_test_wait_for_n_context *ping_context = arg; struct aws_mqtt5_client_mock_test_fixture *test_fixture = ping_context->test_fixture; size_t ping_count = 0; size_t packet_count = aws_array_list_length(&test_fixture->server_received_packets); for (size_t i = 0; i < packet_count; ++i) { struct aws_mqtt5_mock_server_packet_record *record = NULL; aws_array_list_get_at_ptr(&test_fixture->server_received_packets, (void **)&record, i); if (record->packet_type == AWS_MQTT5_PT_PINGREQ) { ping_count++; } } return ping_count >= ping_context->required_event_count; } static void s_wait_for_n_pingreqs(struct aws_mqtt5_client_test_wait_for_n_context *ping_context) { struct aws_mqtt5_client_mock_test_fixture *test_context = ping_context->test_fixture; aws_mutex_lock(&test_context->lock); aws_condition_variable_wait_pred( &test_context->signal, &test_context->lock, s_received_at_least_n_pingreqs, ping_context); aws_mutex_unlock(&test_context->lock); } #define TEST_PING_INTERVAL_MS 2000 static int s_verify_ping_sequence_timing(struct aws_mqtt5_client_mock_test_fixture *test_context) { aws_mutex_lock(&test_context->lock); uint64_t last_packet_time = 0; size_t packet_count = aws_array_list_length(&test_context->server_received_packets); for (size_t i = 0; i < packet_count; ++i) { struct aws_mqtt5_mock_server_packet_record *record = NULL; aws_array_list_get_at_ptr(&test_context->server_received_packets, (void **)&record, i); if (i == 0) { ASSERT_INT_EQUALS(record->packet_type, AWS_MQTT5_PT_CONNECT); last_packet_time = record->timestamp; } else { if (record->packet_type == AWS_MQTT5_PT_PINGREQ) { uint64_t time_delta_ns = record->timestamp - last_packet_time; uint64_t time_delta_millis = aws_timestamp_convert(time_delta_ns, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_MILLIS, NULL); ASSERT_TRUE(s_is_within_percentage_of(TEST_PING_INTERVAL_MS, time_delta_millis, .3)); last_packet_time = record->timestamp; } } } aws_mutex_unlock(&test_context->lock); return AWS_OP_SUCCESS; } static int s_aws_mqtt5_client_test_init_ping_test_connect_storage( struct aws_mqtt5_packet_connect_storage *storage, struct aws_allocator *allocator) { struct aws_mqtt5_packet_connect_view connect_view = { .keep_alive_interval_seconds = (uint16_t)aws_timestamp_convert(TEST_PING_INTERVAL_MS, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_SECS, NULL), .client_id = aws_byte_cursor_from_string(g_default_client_id), .clean_start = true, }; return aws_mqtt5_packet_connect_storage_init(storage, allocator, &connect_view); } /* * Test that the client sends pings at regular intervals to the server * * This is a low-keep-alive variant of the basic success test that waits for N pingreqs to be received by the server * and validates the approximate time intervals between them. */ static int s_mqtt5_client_ping_sequence_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); /* fast keep alive in order keep tests reasonably short */ uint16_t keep_alive_seconds = (uint16_t)aws_timestamp_convert(TEST_PING_INTERVAL_MS, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_SECS, NULL); test_options.connect_options.keep_alive_interval_seconds = keep_alive_seconds; /* faster ping timeout */ test_options.client_options.ping_timeout_ms = 750; struct aws_mqtt5_client_mock_test_fixture test_context; struct aws_mqtt5_client_test_wait_for_n_context ping_context = { .required_event_count = 5, .test_fixture = &test_context, }; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, .mock_server_user_data = &ping_context, }; ASSERT_SUCCESS(aws_mqtt5_client_mock_test_fixture_init(&test_context, allocator, &test_fixture_options)); struct aws_mqtt5_client *client = test_context.client; ASSERT_SUCCESS(aws_mqtt5_client_start(client)); aws_wait_for_connected_lifecycle_event(&test_context); s_wait_for_n_pingreqs(&ping_context); /* * There's a really unpleasant race condition where we can stop the client so fast (based on the mock * server receiving PINGREQs that the mock server's socket gets closed underneath it as it is trying to * write the PINGRESP back to the client, which in turn triggers channel shutdown where no further data * is read from the socket, so we never see the DISCONNECT that the client actually sent. * * We're not able to wait on the PINGRESP because we have no insight into when it's received. So for now, * we'll insert an artificial sleep before stopping the client. We should try and come up with a more * elegant solution. */ aws_thread_current_sleep(aws_timestamp_convert(1, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL)); struct aws_mqtt5_packet_disconnect_view disconnect_view = { .reason_code = AWS_MQTT5_DRC_NORMAL_DISCONNECTION, }; ASSERT_SUCCESS(aws_mqtt5_client_stop(client, &disconnect_view, NULL)); aws_wait_for_stopped_lifecycle_event(&test_context); s_wait_for_mock_server_to_receive_disconnect_packet(&test_context); struct aws_mqtt5_client_lifecycle_event expected_events[] = { { .event_type = AWS_MQTT5_CLET_ATTEMPTING_CONNECT, }, { .event_type = AWS_MQTT5_CLET_CONNECTION_SUCCESS, }, { .event_type = AWS_MQTT5_CLET_DISCONNECTION, .error_code = AWS_ERROR_MQTT5_USER_REQUESTED_STOP, }, { .event_type = AWS_MQTT5_CLET_STOPPED, }, }; ASSERT_SUCCESS( s_verify_simple_lifecycle_event_sequence(&test_context, expected_events, AWS_ARRAY_SIZE(expected_events))); enum aws_mqtt5_client_state expected_states[] = { AWS_MCS_CONNECTING, AWS_MCS_MQTT_CONNECT, AWS_MCS_CONNECTED, AWS_MCS_CLEAN_DISCONNECT, AWS_MCS_CHANNEL_SHUTDOWN, AWS_MCS_STOPPED, }; ASSERT_SUCCESS(aws_verify_client_state_sequence(&test_context, expected_states, AWS_ARRAY_SIZE(expected_states))); struct aws_mqtt5_packet_connect_storage expected_connect_storage; ASSERT_SUCCESS(s_aws_mqtt5_client_test_init_ping_test_connect_storage(&expected_connect_storage, allocator)); struct aws_mqtt5_packet_disconnect_storage expected_disconnect_storage; ASSERT_SUCCESS(s_aws_mqtt5_client_test_init_default_disconnect_storage(&expected_disconnect_storage, allocator)); struct aws_mqtt5_mock_server_packet_record expected_packets[] = { { .packet_type = AWS_MQTT5_PT_CONNECT, .packet_storage = &expected_connect_storage, }, { .packet_type = AWS_MQTT5_PT_PINGREQ, }, { .packet_type = AWS_MQTT5_PT_PINGREQ, }, { .packet_type = AWS_MQTT5_PT_PINGREQ, }, { .packet_type = AWS_MQTT5_PT_PINGREQ, }, { .packet_type = AWS_MQTT5_PT_PINGREQ, }, { .packet_type = AWS_MQTT5_PT_DISCONNECT, .packet_storage = &expected_disconnect_storage, }, }; ASSERT_SUCCESS( aws_verify_received_packet_sequence(&test_context, expected_packets, AWS_ARRAY_SIZE(expected_packets))); ASSERT_SUCCESS(s_verify_ping_sequence_timing(&test_context)); aws_mqtt5_packet_connect_storage_clean_up(&expected_connect_storage); aws_mqtt5_client_mock_test_fixture_clean_up(&test_context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_client_ping_sequence, s_mqtt5_client_ping_sequence_fn) /* * Test that sending other data to the server pushes out the client ping timer * * This is a low-keep-alive variant of the basic success test that writes UNSUBSCRIBEs to the server at fast intervals. * Verify the server doesn't receive any PINGREQs until the right amount of time after we stop sending the CONNECTs to * it. * * TODO: we can't write this test until we have proper operation handling during CONNECTED state */ static int s_mqtt5_client_ping_write_pushout_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_client_ping_write_pushout, s_mqtt5_client_ping_write_pushout_fn) #define TIMEOUT_TEST_PING_INTERVAL_MS ((uint64_t)10000) static int s_verify_ping_timeout_interval(struct aws_mqtt5_client_mock_test_fixture *test_context) { aws_mutex_lock(&test_context->lock); uint64_t connected_time = 0; uint64_t disconnected_time = 0; size_t event_count = aws_array_list_length(&test_context->lifecycle_events); for (size_t i = 0; i < event_count; ++i) { struct aws_mqtt5_lifecycle_event_record *record = NULL; aws_array_list_get_at(&test_context->lifecycle_events, &record, i); if (connected_time == 0 && record->event.event_type == AWS_MQTT5_CLET_CONNECTION_SUCCESS) { connected_time = record->timestamp; } if (disconnected_time == 0 && record->event.event_type == AWS_MQTT5_CLET_DISCONNECTION) { disconnected_time = record->timestamp; } } aws_mutex_unlock(&test_context->lock); ASSERT_TRUE(connected_time > 0 && disconnected_time > 0 && disconnected_time > connected_time); uint64_t connected_interval_ms = aws_timestamp_convert(disconnected_time - connected_time, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_MILLIS, NULL); uint64_t expected_connected_time_ms = TIMEOUT_TEST_PING_INTERVAL_MS + (uint64_t)test_context->client->config->ping_timeout_ms; ASSERT_TRUE(s_is_within_percentage_of(expected_connected_time_ms, connected_interval_ms, .3)); return AWS_OP_SUCCESS; } /* * Test that not receiving a PINGRESP causes a disconnection * * This is a low-keep-alive variant of the basic success test where the mock server does not respond to a PINGREQ. */ static int s_mqtt5_client_ping_timeout_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); /* fast keep alive in order keep tests reasonably short */ uint16_t keep_alive_seconds = (uint16_t)aws_timestamp_convert(TIMEOUT_TEST_PING_INTERVAL_MS, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_SECS, NULL); test_options.connect_options.keep_alive_interval_seconds = keep_alive_seconds; /* don't respond to PINGREQs */ test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_PINGREQ] = NULL; /* faster ping timeout */ test_options.client_options.ping_timeout_ms = 5000; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, }; struct aws_mqtt5_client_mock_test_fixture test_context; ASSERT_SUCCESS(aws_mqtt5_client_mock_test_fixture_init(&test_context, allocator, &test_fixture_options)); struct aws_mqtt5_client *client = test_context.client; ASSERT_SUCCESS(aws_mqtt5_client_start(client)); aws_wait_for_connected_lifecycle_event(&test_context); s_wait_for_disconnection_lifecycle_event(&test_context); ASSERT_SUCCESS(aws_mqtt5_client_stop(client, NULL, NULL)); aws_wait_for_stopped_lifecycle_event(&test_context); struct aws_mqtt5_client_lifecycle_event expected_events[] = { { .event_type = AWS_MQTT5_CLET_ATTEMPTING_CONNECT, }, { .event_type = AWS_MQTT5_CLET_CONNECTION_SUCCESS, }, { .event_type = AWS_MQTT5_CLET_DISCONNECTION, .error_code = AWS_ERROR_MQTT5_PING_RESPONSE_TIMEOUT, }, { .event_type = AWS_MQTT5_CLET_STOPPED, }, }; ASSERT_SUCCESS( s_verify_simple_lifecycle_event_sequence(&test_context, expected_events, AWS_ARRAY_SIZE(expected_events))); ASSERT_SUCCESS(s_verify_ping_timeout_interval(&test_context)); enum aws_mqtt5_client_state expected_states[] = { AWS_MCS_CONNECTING, AWS_MCS_MQTT_CONNECT, AWS_MCS_CONNECTED, AWS_MCS_CLEAN_DISCONNECT, AWS_MCS_CHANNEL_SHUTDOWN, }; ASSERT_SUCCESS(aws_verify_client_state_sequence(&test_context, expected_states, AWS_ARRAY_SIZE(expected_states))); aws_mqtt5_client_mock_test_fixture_clean_up(&test_context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_client_ping_timeout, s_mqtt5_client_ping_timeout_fn) struct aws_lifecycle_event_wait_context { enum aws_mqtt5_client_lifecycle_event_type type; size_t count; struct aws_mqtt5_client_mock_test_fixture *test_fixture; }; static bool s_received_at_least_n_events(void *arg) { struct aws_lifecycle_event_wait_context *context = arg; struct aws_mqtt5_client_mock_test_fixture *test_fixture = context->test_fixture; size_t actual_count = 0; size_t event_count = aws_array_list_length(&test_fixture->lifecycle_events); for (size_t i = 0; i < event_count; ++i) { struct aws_mqtt5_lifecycle_event_record *record = NULL; aws_array_list_get_at(&test_fixture->lifecycle_events, &record, i); if (record->event.event_type == context->type) { actual_count++; } } return actual_count >= context->count; } void aws_mqtt5_wait_for_n_lifecycle_events( struct aws_mqtt5_client_mock_test_fixture *test_context, enum aws_mqtt5_client_lifecycle_event_type type, size_t count) { struct aws_lifecycle_event_wait_context context = { .type = type, .count = count, .test_fixture = test_context, }; aws_mutex_lock(&test_context->lock); aws_condition_variable_wait_pred( &test_context->signal, &test_context->lock, s_received_at_least_n_events, &context); aws_mutex_unlock(&test_context->lock); } int aws_verify_reconnection_exponential_backoff_timestamps(struct aws_mqtt5_client_mock_test_fixture *test_fixture) { aws_mutex_lock(&test_fixture->lock); size_t event_count = aws_array_list_length(&test_fixture->lifecycle_events); uint64_t last_timestamp = 0; uint64_t expected_backoff = RECONNECT_TEST_MIN_BACKOFF; for (size_t i = 0; i < event_count; ++i) { struct aws_mqtt5_lifecycle_event_record *record = NULL; aws_array_list_get_at(&test_fixture->lifecycle_events, &record, i); if (record->event.event_type == AWS_MQTT5_CLET_CONNECTION_FAILURE) { if (last_timestamp == 0) { last_timestamp = record->timestamp; } else { uint64_t time_diff = aws_timestamp_convert( record->timestamp - last_timestamp, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_MILLIS, NULL); if (!s_is_within_percentage_of(expected_backoff, time_diff, .3)) { return AWS_OP_ERR; } expected_backoff = aws_min_u64(expected_backoff * 2, RECONNECT_TEST_MAX_BACKOFF); last_timestamp = record->timestamp; } } } aws_mutex_unlock(&test_fixture->lock); return AWS_OP_SUCCESS; } /* * Always-fail variant that waits for 6 connection failures and then checks the timestamps between them against * what we'd expect with exponential backoff and no jitter */ static int s_mqtt5_client_reconnect_failure_backoff_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); /* backoff delay sequence: 500, 1000, 2000, 4000, 5000, ... */ test_options.client_options.retry_jitter_mode = AWS_EXPONENTIAL_BACKOFF_JITTER_NONE; test_options.client_options.min_reconnect_delay_ms = RECONNECT_TEST_MIN_BACKOFF; test_options.client_options.max_reconnect_delay_ms = RECONNECT_TEST_MAX_BACKOFF; test_options.client_options.min_connected_time_to_reset_reconnect_delay_ms = RECONNECT_TEST_BACKOFF_RESET_DELAY; test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_CONNECT] = aws_mqtt5_mock_server_handle_connect_always_fail; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, }; struct aws_mqtt5_client_mock_test_fixture test_context; ASSERT_SUCCESS(aws_mqtt5_client_mock_test_fixture_init(&test_context, allocator, &test_fixture_options)); struct aws_mqtt5_client *client = test_context.client; ASSERT_SUCCESS(aws_mqtt5_client_start(client)); aws_mqtt5_wait_for_n_lifecycle_events(&test_context, AWS_MQTT5_CLET_CONNECTION_FAILURE, 6); ASSERT_SUCCESS(aws_mqtt5_client_stop(client, NULL, NULL)); aws_wait_for_stopped_lifecycle_event(&test_context); /* 6 (connecting, connection failure) pairs */ struct aws_mqtt5_client_lifecycle_event expected_events[] = { { .event_type = AWS_MQTT5_CLET_ATTEMPTING_CONNECT, }, { .event_type = AWS_MQTT5_CLET_CONNECTION_FAILURE, .error_code = AWS_ERROR_MQTT5_CONNACK_CONNECTION_REFUSED, }, { .event_type = AWS_MQTT5_CLET_ATTEMPTING_CONNECT, }, { .event_type = AWS_MQTT5_CLET_CONNECTION_FAILURE, .error_code = AWS_ERROR_MQTT5_CONNACK_CONNECTION_REFUSED, }, { .event_type = AWS_MQTT5_CLET_ATTEMPTING_CONNECT, }, { .event_type = AWS_MQTT5_CLET_CONNECTION_FAILURE, .error_code = AWS_ERROR_MQTT5_CONNACK_CONNECTION_REFUSED, }, { .event_type = AWS_MQTT5_CLET_ATTEMPTING_CONNECT, }, { .event_type = AWS_MQTT5_CLET_CONNECTION_FAILURE, .error_code = AWS_ERROR_MQTT5_CONNACK_CONNECTION_REFUSED, }, { .event_type = AWS_MQTT5_CLET_ATTEMPTING_CONNECT, }, { .event_type = AWS_MQTT5_CLET_CONNECTION_FAILURE, .error_code = AWS_ERROR_MQTT5_CONNACK_CONNECTION_REFUSED, }, { .event_type = AWS_MQTT5_CLET_ATTEMPTING_CONNECT, }, { .event_type = AWS_MQTT5_CLET_CONNECTION_FAILURE, .error_code = AWS_ERROR_MQTT5_CONNACK_CONNECTION_REFUSED, }, }; ASSERT_SUCCESS( s_verify_simple_lifecycle_event_sequence(&test_context, expected_events, AWS_ARRAY_SIZE(expected_events))); ASSERT_SUCCESS(aws_verify_reconnection_exponential_backoff_timestamps(&test_context)); /* 6 (connecting, mqtt_connect, channel_shutdown, pending_reconnect) tuples (minus the final pending_reconnect) */ enum aws_mqtt5_client_state expected_states[] = { AWS_MCS_CONNECTING, AWS_MCS_MQTT_CONNECT, AWS_MCS_CHANNEL_SHUTDOWN, AWS_MCS_PENDING_RECONNECT, AWS_MCS_CONNECTING, AWS_MCS_MQTT_CONNECT, AWS_MCS_CHANNEL_SHUTDOWN, AWS_MCS_PENDING_RECONNECT, AWS_MCS_CONNECTING, AWS_MCS_MQTT_CONNECT, AWS_MCS_CHANNEL_SHUTDOWN, AWS_MCS_PENDING_RECONNECT, AWS_MCS_CONNECTING, AWS_MCS_MQTT_CONNECT, AWS_MCS_CHANNEL_SHUTDOWN, AWS_MCS_PENDING_RECONNECT, AWS_MCS_CONNECTING, AWS_MCS_MQTT_CONNECT, AWS_MCS_CHANNEL_SHUTDOWN, AWS_MCS_PENDING_RECONNECT, AWS_MCS_CONNECTING, AWS_MCS_MQTT_CONNECT, AWS_MCS_CHANNEL_SHUTDOWN, }; ASSERT_SUCCESS(aws_verify_client_state_sequence(&test_context, expected_states, AWS_ARRAY_SIZE(expected_states))); aws_mqtt5_client_mock_test_fixture_clean_up(&test_context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_client_reconnect_failure_backoff, s_mqtt5_client_reconnect_failure_backoff_fn) int aws_mqtt5_mock_server_handle_connect_succeed_on_nth( void *packet, struct aws_mqtt5_server_mock_connection_context *connection, void *user_data) { (void)packet; struct aws_mqtt5_mock_server_reconnect_state *context = user_data; struct aws_mqtt5_packet_connack_view connack_view; AWS_ZERO_STRUCT(connack_view); if (context->connection_attempts == context->required_connection_count_threshold) { connack_view.reason_code = AWS_MQTT5_CRC_SUCCESS; aws_high_res_clock_get_ticks(&context->connect_timestamp); } else { connack_view.reason_code = AWS_MQTT5_CRC_NOT_AUTHORIZED; } ++context->connection_attempts; return aws_mqtt5_mock_server_send_packet(connection, AWS_MQTT5_PT_CONNACK, &connack_view); } static void s_aws_mqtt5_mock_server_disconnect_after_n_ms( struct aws_mqtt5_server_mock_connection_context *mock_server, void *user_data) { struct aws_mqtt5_mock_server_reconnect_state *context = user_data; if (context->connect_timestamp == 0) { return; } uint64_t now = 0; aws_high_res_clock_get_ticks(&now); if (now < context->connect_timestamp) { return; } uint64_t elapsed_ms = aws_timestamp_convert(now - context->connect_timestamp, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_MILLIS, NULL); if (elapsed_ms > context->successful_connection_disconnect_delay_ms) { struct aws_mqtt5_packet_disconnect_view disconnect = { .reason_code = AWS_MQTT5_DRC_PACKET_TOO_LARGE, }; aws_mqtt5_mock_server_send_packet(mock_server, AWS_MQTT5_PT_DISCONNECT, &disconnect); context->connect_timestamp = 0; } } static int s_verify_reconnection_after_success_used_backoff( struct aws_mqtt5_client_mock_test_fixture *test_fixture, uint64_t expected_reconnect_delay_ms) { aws_mutex_lock(&test_fixture->lock); size_t event_count = aws_array_list_length(&test_fixture->lifecycle_events); uint64_t disconnect_after_success_timestamp = 0; uint64_t reconnect_failure_after_disconnect_timestamp = 0; for (size_t i = 0; i < event_count; ++i) { struct aws_mqtt5_lifecycle_event_record *record = NULL; aws_array_list_get_at(&test_fixture->lifecycle_events, &record, i); if (record->event.event_type == AWS_MQTT5_CLET_DISCONNECTION) { ASSERT_INT_EQUALS(0, disconnect_after_success_timestamp); disconnect_after_success_timestamp = record->timestamp; } else if (record->event.event_type == AWS_MQTT5_CLET_CONNECTION_FAILURE) { if (reconnect_failure_after_disconnect_timestamp == 0 && disconnect_after_success_timestamp > 0) { reconnect_failure_after_disconnect_timestamp = record->timestamp; } } } aws_mutex_unlock(&test_fixture->lock); ASSERT_TRUE(disconnect_after_success_timestamp > 0 && reconnect_failure_after_disconnect_timestamp > 0); uint64_t post_success_reconnect_time_ms = aws_timestamp_convert( reconnect_failure_after_disconnect_timestamp - disconnect_after_success_timestamp, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_MILLIS, NULL); if (!s_is_within_percentage_of(expected_reconnect_delay_ms, post_success_reconnect_time_ms, .3)) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } /* * Fail-until-max-backoff variant, followed by a success that then quickly disconnects. Verify the next reconnect * attempt still uses the maximum backoff because we weren't connected long enough to reset it. */ static int s_mqtt5_client_reconnect_backoff_insufficient_reset_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); struct aws_mqtt5_mock_server_reconnect_state mock_server_state = { .required_connection_count_threshold = 6, /* quick disconnect should not reset reconnect delay */ .successful_connection_disconnect_delay_ms = RECONNECT_TEST_BACKOFF_RESET_DELAY / 5, }; /* backoff delay sequence: 500, 1000, 2000, 4000, 5000, ... */ test_options.client_options.retry_jitter_mode = AWS_EXPONENTIAL_BACKOFF_JITTER_NONE; test_options.client_options.min_reconnect_delay_ms = RECONNECT_TEST_MIN_BACKOFF; test_options.client_options.max_reconnect_delay_ms = RECONNECT_TEST_MAX_BACKOFF; test_options.client_options.min_connected_time_to_reset_reconnect_delay_ms = RECONNECT_TEST_BACKOFF_RESET_DELAY; test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_CONNECT] = aws_mqtt5_mock_server_handle_connect_succeed_on_nth; test_options.server_function_table.service_task_fn = s_aws_mqtt5_mock_server_disconnect_after_n_ms; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, .mock_server_user_data = &mock_server_state, }; struct aws_mqtt5_client_mock_test_fixture test_context; ASSERT_SUCCESS(aws_mqtt5_client_mock_test_fixture_init(&test_context, allocator, &test_fixture_options)); struct aws_mqtt5_client *client = test_context.client; ASSERT_SUCCESS(aws_mqtt5_client_start(client)); aws_mqtt5_wait_for_n_lifecycle_events(&test_context, AWS_MQTT5_CLET_CONNECTION_FAILURE, 7); ASSERT_SUCCESS(aws_mqtt5_client_stop(client, NULL, NULL)); aws_wait_for_stopped_lifecycle_event(&test_context); /* 6 (connecting, connection failure) pairs, followed by a successful connection, then a disconnect and reconnect */ struct aws_mqtt5_client_lifecycle_event expected_events[] = { { .event_type = AWS_MQTT5_CLET_ATTEMPTING_CONNECT, }, { .event_type = AWS_MQTT5_CLET_CONNECTION_FAILURE, .error_code = AWS_ERROR_MQTT5_CONNACK_CONNECTION_REFUSED, }, { .event_type = AWS_MQTT5_CLET_ATTEMPTING_CONNECT, }, { .event_type = AWS_MQTT5_CLET_CONNECTION_FAILURE, .error_code = AWS_ERROR_MQTT5_CONNACK_CONNECTION_REFUSED, }, { .event_type = AWS_MQTT5_CLET_ATTEMPTING_CONNECT, }, { .event_type = AWS_MQTT5_CLET_CONNECTION_FAILURE, .error_code = AWS_ERROR_MQTT5_CONNACK_CONNECTION_REFUSED, }, { .event_type = AWS_MQTT5_CLET_ATTEMPTING_CONNECT, }, { .event_type = AWS_MQTT5_CLET_CONNECTION_FAILURE, .error_code = AWS_ERROR_MQTT5_CONNACK_CONNECTION_REFUSED, }, { .event_type = AWS_MQTT5_CLET_ATTEMPTING_CONNECT, }, { .event_type = AWS_MQTT5_CLET_CONNECTION_FAILURE, .error_code = AWS_ERROR_MQTT5_CONNACK_CONNECTION_REFUSED, }, { .event_type = AWS_MQTT5_CLET_ATTEMPTING_CONNECT, }, { .event_type = AWS_MQTT5_CLET_CONNECTION_FAILURE, .error_code = AWS_ERROR_MQTT5_CONNACK_CONNECTION_REFUSED, }, { .event_type = AWS_MQTT5_CLET_ATTEMPTING_CONNECT, }, { .event_type = AWS_MQTT5_CLET_CONNECTION_SUCCESS, }, { .event_type = AWS_MQTT5_CLET_DISCONNECTION, .error_code = AWS_ERROR_MQTT5_DISCONNECT_RECEIVED, }, { .event_type = AWS_MQTT5_CLET_ATTEMPTING_CONNECT, }, { .event_type = AWS_MQTT5_CLET_CONNECTION_FAILURE, .error_code = AWS_ERROR_MQTT5_CONNACK_CONNECTION_REFUSED, }, }; ASSERT_SUCCESS( s_verify_simple_lifecycle_event_sequence(&test_context, expected_events, AWS_ARRAY_SIZE(expected_events))); ASSERT_SUCCESS(s_verify_reconnection_after_success_used_backoff(&test_context, RECONNECT_TEST_MAX_BACKOFF)); aws_mqtt5_client_mock_test_fixture_clean_up(&test_context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_client_reconnect_backoff_insufficient_reset, s_mqtt5_client_reconnect_backoff_insufficient_reset_fn) /* * Fail-until-max-backoff variant, followed by a success that disconnects after enough time has passed that the backoff * should be reset. Verify that the next reconnect is back to using the minimum backoff value. */ static int s_mqtt5_client_reconnect_backoff_sufficient_reset_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); struct aws_mqtt5_mock_server_reconnect_state mock_server_state = { .required_connection_count_threshold = 6, /* slow disconnect should reset reconnect delay */ .successful_connection_disconnect_delay_ms = RECONNECT_TEST_BACKOFF_RESET_DELAY * 2, }; /* backoff delay sequence: 500, 1000, 2000, 4000, 5000, ... */ test_options.client_options.retry_jitter_mode = AWS_EXPONENTIAL_BACKOFF_JITTER_NONE; test_options.client_options.min_reconnect_delay_ms = RECONNECT_TEST_MIN_BACKOFF; test_options.client_options.max_reconnect_delay_ms = RECONNECT_TEST_MAX_BACKOFF; test_options.client_options.min_connected_time_to_reset_reconnect_delay_ms = RECONNECT_TEST_BACKOFF_RESET_DELAY; test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_CONNECT] = aws_mqtt5_mock_server_handle_connect_succeed_on_nth; test_options.server_function_table.service_task_fn = s_aws_mqtt5_mock_server_disconnect_after_n_ms; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, .mock_server_user_data = &mock_server_state, }; struct aws_mqtt5_client_mock_test_fixture test_context; ASSERT_SUCCESS(aws_mqtt5_client_mock_test_fixture_init(&test_context, allocator, &test_fixture_options)); struct aws_mqtt5_client *client = test_context.client; ASSERT_SUCCESS(aws_mqtt5_client_start(client)); aws_mqtt5_wait_for_n_lifecycle_events(&test_context, AWS_MQTT5_CLET_CONNECTION_FAILURE, 7); ASSERT_SUCCESS(aws_mqtt5_client_stop(client, NULL, NULL)); aws_wait_for_stopped_lifecycle_event(&test_context); /* 6 (connecting, connection failure) pairs, followed by a successful connection, then a disconnect and reconnect */ struct aws_mqtt5_client_lifecycle_event expected_events[] = { { .event_type = AWS_MQTT5_CLET_ATTEMPTING_CONNECT, }, { .event_type = AWS_MQTT5_CLET_CONNECTION_FAILURE, .error_code = AWS_ERROR_MQTT5_CONNACK_CONNECTION_REFUSED, }, { .event_type = AWS_MQTT5_CLET_ATTEMPTING_CONNECT, }, { .event_type = AWS_MQTT5_CLET_CONNECTION_FAILURE, .error_code = AWS_ERROR_MQTT5_CONNACK_CONNECTION_REFUSED, }, { .event_type = AWS_MQTT5_CLET_ATTEMPTING_CONNECT, }, { .event_type = AWS_MQTT5_CLET_CONNECTION_FAILURE, .error_code = AWS_ERROR_MQTT5_CONNACK_CONNECTION_REFUSED, }, { .event_type = AWS_MQTT5_CLET_ATTEMPTING_CONNECT, }, { .event_type = AWS_MQTT5_CLET_CONNECTION_FAILURE, .error_code = AWS_ERROR_MQTT5_CONNACK_CONNECTION_REFUSED, }, { .event_type = AWS_MQTT5_CLET_ATTEMPTING_CONNECT, }, { .event_type = AWS_MQTT5_CLET_CONNECTION_FAILURE, .error_code = AWS_ERROR_MQTT5_CONNACK_CONNECTION_REFUSED, }, { .event_type = AWS_MQTT5_CLET_ATTEMPTING_CONNECT, }, { .event_type = AWS_MQTT5_CLET_CONNECTION_FAILURE, .error_code = AWS_ERROR_MQTT5_CONNACK_CONNECTION_REFUSED, }, { .event_type = AWS_MQTT5_CLET_ATTEMPTING_CONNECT, }, { .event_type = AWS_MQTT5_CLET_CONNECTION_SUCCESS, }, { .event_type = AWS_MQTT5_CLET_DISCONNECTION, .error_code = AWS_ERROR_MQTT5_DISCONNECT_RECEIVED, }, { .event_type = AWS_MQTT5_CLET_ATTEMPTING_CONNECT, }, { .event_type = AWS_MQTT5_CLET_CONNECTION_FAILURE, .error_code = AWS_ERROR_MQTT5_CONNACK_CONNECTION_REFUSED, }, }; ASSERT_SUCCESS( s_verify_simple_lifecycle_event_sequence(&test_context, expected_events, AWS_ARRAY_SIZE(expected_events))); ASSERT_SUCCESS(s_verify_reconnection_after_success_used_backoff(&test_context, RECONNECT_TEST_MIN_BACKOFF)); aws_mqtt5_client_mock_test_fixture_clean_up(&test_context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_client_reconnect_backoff_sufficient_reset, s_mqtt5_client_reconnect_backoff_sufficient_reset_fn) static const char s_topic_filter1[] = "some/topic/but/letsmakeit/longer/soIcanfailpacketsizetests/+"; static struct aws_mqtt5_subscription_view s_subscriptions[] = { { .topic_filter = { .ptr = (uint8_t *)s_topic_filter1, .len = AWS_ARRAY_SIZE(s_topic_filter1) - 1, }, .qos = AWS_MQTT5_QOS_AT_LEAST_ONCE, .no_local = false, .retain_as_published = false, .retain_handling_type = AWS_MQTT5_RHT_SEND_ON_SUBSCRIBE, }, }; static enum aws_mqtt5_suback_reason_code s_suback_reason_codes[] = { AWS_MQTT5_SARC_GRANTED_QOS_1, }; void s_aws_mqtt5_subscribe_complete_fn( const struct aws_mqtt5_packet_suback_view *suback, int error_code, void *complete_ctx) { (void)suback; (void)error_code; struct aws_mqtt5_client_mock_test_fixture *test_context = complete_ctx; aws_mutex_lock(&test_context->lock); test_context->subscribe_complete = true; aws_mutex_unlock(&test_context->lock); aws_condition_variable_notify_all(&test_context->signal); } static bool s_received_suback(void *arg) { struct aws_mqtt5_client_mock_test_fixture *test_context = arg; return test_context->subscribe_complete; } static void s_wait_for_suback_received(struct aws_mqtt5_client_mock_test_fixture *test_context) { aws_mutex_lock(&test_context->lock); aws_condition_variable_wait_pred(&test_context->signal, &test_context->lock, s_received_suback, test_context); aws_mutex_unlock(&test_context->lock); } static int s_aws_mqtt5_server_send_suback_on_subscribe( void *packet, struct aws_mqtt5_server_mock_connection_context *connection, void *user_data) { (void)packet; (void)user_data; struct aws_mqtt5_packet_subscribe_view *subscribe_view = packet; struct aws_mqtt5_packet_suback_view suback_view = { .packet_id = subscribe_view->packet_id, .reason_code_count = AWS_ARRAY_SIZE(s_suback_reason_codes), .reason_codes = s_suback_reason_codes, }; return aws_mqtt5_mock_server_send_packet(connection, AWS_MQTT5_PT_SUBACK, &suback_view); } /* Connection test where we succeed, send a SUBSCRIBE, and wait for a SUBACK */ static int s_mqtt5_client_subscribe_success_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_SUBSCRIBE] = s_aws_mqtt5_server_send_suback_on_subscribe; struct aws_mqtt5_client_mock_test_fixture test_context; struct aws_mqtt5_server_disconnect_test_context disconnect_context = { .test_fixture = &test_context, .disconnect_sent = false, }; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, .mock_server_user_data = &disconnect_context, }; ASSERT_SUCCESS(aws_mqtt5_client_mock_test_fixture_init(&test_context, allocator, &test_fixture_options)); struct aws_mqtt5_client *client = test_context.client; ASSERT_SUCCESS(aws_mqtt5_client_start(client)); aws_wait_for_connected_lifecycle_event(&test_context); struct aws_mqtt5_packet_subscribe_view subscribe_view = { .subscriptions = s_subscriptions, .subscription_count = AWS_ARRAY_SIZE(s_subscriptions), }; struct aws_mqtt5_subscribe_completion_options completion_options = { .completion_callback = s_aws_mqtt5_subscribe_complete_fn, .completion_user_data = &test_context, }; aws_mqtt5_client_subscribe(client, &subscribe_view, &completion_options); s_wait_for_suback_received(&test_context); ASSERT_SUCCESS(aws_mqtt5_client_stop(client, NULL, NULL)); aws_wait_for_stopped_lifecycle_event(&test_context); aws_mqtt5_client_mock_test_fixture_clean_up(&test_context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_client_subscribe_success, s_mqtt5_client_subscribe_success_fn) static int s_aws_mqtt5_mock_server_handle_connect_succeed_maximum_packet_size( void *packet, struct aws_mqtt5_server_mock_connection_context *connection, void *user_data) { (void)packet; (void)user_data; struct aws_mqtt5_packet_connack_view connack_view; AWS_ZERO_STRUCT(connack_view); uint32_t maximum_packet_size = 50; connack_view.reason_code = AWS_MQTT5_CRC_SUCCESS; connack_view.maximum_packet_size = &maximum_packet_size; return aws_mqtt5_mock_server_send_packet(connection, AWS_MQTT5_PT_CONNACK, &connack_view); } void s_aws_mqtt5_subscribe_complete_packet_size_too_small_fn( const struct aws_mqtt5_packet_suback_view *suback, int error_code, void *complete_ctx) { AWS_FATAL_ASSERT(suback == NULL); AWS_FATAL_ASSERT(AWS_ERROR_MQTT5_PACKET_VALIDATION == error_code); struct aws_mqtt5_client_mock_test_fixture *test_context = complete_ctx; aws_mutex_lock(&test_context->lock); test_context->subscribe_complete = true; aws_mutex_unlock(&test_context->lock); aws_condition_variable_notify_all(&test_context->signal); } static int s_mqtt5_client_subscribe_fail_packet_too_big_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_CONNECT] = s_aws_mqtt5_mock_server_handle_connect_succeed_maximum_packet_size; struct aws_mqtt5_client_mock_test_fixture test_context; struct aws_mqtt5_server_disconnect_test_context disconnect_context = { .test_fixture = &test_context, .disconnect_sent = false, }; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, .mock_server_user_data = &disconnect_context, }; ASSERT_SUCCESS(aws_mqtt5_client_mock_test_fixture_init(&test_context, allocator, &test_fixture_options)); struct aws_mqtt5_client *client = test_context.client; ASSERT_SUCCESS(aws_mqtt5_client_start(client)); aws_wait_for_connected_lifecycle_event(&test_context); struct aws_mqtt5_packet_subscribe_view subscribe_view = { .subscriptions = s_subscriptions, .subscription_count = AWS_ARRAY_SIZE(s_subscriptions), }; struct aws_mqtt5_subscribe_completion_options completion_options = { .completion_callback = s_aws_mqtt5_subscribe_complete_packet_size_too_small_fn, .completion_user_data = &test_context, }; aws_mqtt5_client_subscribe(client, &subscribe_view, &completion_options); s_wait_for_suback_received(&test_context); ASSERT_SUCCESS(aws_mqtt5_client_stop(client, NULL, NULL)); aws_wait_for_stopped_lifecycle_event(&test_context); aws_mqtt5_client_mock_test_fixture_clean_up(&test_context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_client_subscribe_fail_packet_too_big, s_mqtt5_client_subscribe_fail_packet_too_big_fn) static void s_aws_mqtt5_disconnect_failure_completion_fn(int error_code, void *complete_ctx) { AWS_FATAL_ASSERT(error_code == AWS_ERROR_MQTT5_PACKET_VALIDATION); s_on_disconnect_completion(error_code, complete_ctx); } static int s_mqtt5_client_disconnect_fail_packet_too_big_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_CONNECT] = s_aws_mqtt5_mock_server_handle_connect_succeed_maximum_packet_size; struct aws_mqtt5_client_mock_test_fixture test_context; struct aws_mqtt5_server_disconnect_test_context disconnect_context = { .test_fixture = &test_context, .disconnect_sent = false, }; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, .mock_server_user_data = &disconnect_context, }; ASSERT_SUCCESS(aws_mqtt5_client_mock_test_fixture_init(&test_context, allocator, &test_fixture_options)); struct aws_mqtt5_client *client = test_context.client; ASSERT_SUCCESS(aws_mqtt5_client_start(client)); aws_wait_for_connected_lifecycle_event(&test_context); struct aws_byte_cursor long_reason_string_cursor = aws_byte_cursor_from_c_str( "Not valid because it includes the 0-terminator but we don't check utf-8 so who cares"); struct aws_mqtt5_packet_disconnect_view disconnect_view = { .reason_string = &long_reason_string_cursor, }; struct aws_mqtt5_disconnect_completion_options completion_options = { .completion_callback = s_aws_mqtt5_disconnect_failure_completion_fn, .completion_user_data = &test_context, }; ASSERT_SUCCESS(aws_mqtt5_client_stop(client, &disconnect_view, &completion_options)); s_wait_for_disconnect_completion(&test_context); aws_wait_for_stopped_lifecycle_event(&test_context); aws_mqtt5_client_mock_test_fixture_clean_up(&test_context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_client_disconnect_fail_packet_too_big, s_mqtt5_client_disconnect_fail_packet_too_big_fn) static uint8_t s_topic[] = "Hello/world"; #define RECEIVE_MAXIMUM_PUBLISH_COUNT 30 #define TEST_RECEIVE_MAXIMUM 3 static int s_aws_mqtt5_mock_server_handle_connect_succeed_receive_maximum( void *packet, struct aws_mqtt5_server_mock_connection_context *connection, void *user_data) { (void)packet; (void)user_data; struct aws_mqtt5_packet_connack_view connack_view; AWS_ZERO_STRUCT(connack_view); uint16_t receive_maximum = TEST_RECEIVE_MAXIMUM; connack_view.reason_code = AWS_MQTT5_CRC_SUCCESS; connack_view.receive_maximum = &receive_maximum; return aws_mqtt5_mock_server_send_packet(connection, AWS_MQTT5_PT_CONNACK, &connack_view); } struct send_puback_task { struct aws_allocator *allocator; struct aws_task task; struct aws_mqtt5_server_mock_connection_context *connection; uint16_t packet_id; }; void send_puback_fn(struct aws_task *task, void *arg, enum aws_task_status status) { (void)task; struct send_puback_task *puback_response_task = arg; if (status == AWS_TASK_STATUS_CANCELED) { goto done; } struct aws_mqtt5_client_mock_test_fixture *test_fixture = puback_response_task->connection->test_fixture; aws_mutex_lock(&test_fixture->lock); --test_fixture->server_current_inflight_publishes; aws_mutex_unlock(&test_fixture->lock); struct aws_mqtt5_packet_puback_view puback_view = { .packet_id = puback_response_task->packet_id, }; aws_mqtt5_mock_server_send_packet(puback_response_task->connection, AWS_MQTT5_PT_PUBACK, &puback_view); done: aws_mem_release(puback_response_task->allocator, puback_response_task); } static int s_aws_mqtt5_mock_server_handle_publish_delayed_puback( void *packet, struct aws_mqtt5_server_mock_connection_context *connection, void *user_data) { (void)user_data; struct aws_mqtt5_client_mock_test_fixture *test_fixture = connection->test_fixture; aws_mutex_lock(&test_fixture->lock); ++test_fixture->server_current_inflight_publishes; test_fixture->server_maximum_inflight_publishes = aws_max_u32(test_fixture->server_current_inflight_publishes, test_fixture->server_maximum_inflight_publishes); aws_mutex_unlock(&test_fixture->lock); struct aws_mqtt5_packet_publish_view *publish_view = packet; struct send_puback_task *puback_response_task = aws_mem_calloc(connection->allocator, 1, sizeof(struct send_puback_task)); puback_response_task->allocator = connection->allocator; puback_response_task->connection = connection; puback_response_task->packet_id = publish_view->packet_id; aws_task_init(&puback_response_task->task, send_puback_fn, puback_response_task, "delayed_puback_response"); struct aws_event_loop *event_loop = aws_channel_get_event_loop(connection->slot->channel); uint64_t now = 0; aws_event_loop_current_clock_time(event_loop, &now); uint64_t min_delay = aws_timestamp_convert(250, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL); uint64_t max_delay = aws_timestamp_convert(500, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL); uint64_t delay_nanos = aws_mqtt5_client_random_in_range(min_delay, max_delay); uint64_t puback_time = aws_add_u64_saturating(now, delay_nanos); aws_event_loop_schedule_task_future(event_loop, &puback_response_task->task, puback_time); return AWS_OP_SUCCESS; } static void s_receive_maximum_publish_completion_fn( enum aws_mqtt5_packet_type packet_type, const void *packet, int error_code, void *complete_ctx) { if (packet_type != AWS_MQTT5_PT_PUBACK) { return; } const struct aws_mqtt5_packet_puback_view *puback = packet; struct aws_mqtt5_client_mock_test_fixture *test_context = complete_ctx; uint64_t now = 0; aws_high_res_clock_get_ticks(&now); aws_mutex_lock(&test_context->lock); ++test_context->total_pubacks_received; if (error_code == AWS_ERROR_SUCCESS && puback->reason_code < 128) { ++test_context->successful_pubacks_received; } aws_mutex_unlock(&test_context->lock); aws_condition_variable_notify_all(&test_context->signal); } static bool s_received_n_successful_publishes(void *arg) { struct aws_mqtt5_client_test_wait_for_n_context *context = arg; struct aws_mqtt5_client_mock_test_fixture *test_fixture = context->test_fixture; return test_fixture->successful_pubacks_received >= context->required_event_count; } static void s_wait_for_n_successful_publishes(struct aws_mqtt5_client_test_wait_for_n_context *context) { struct aws_mqtt5_client_mock_test_fixture *test_context = context->test_fixture; aws_mutex_lock(&test_context->lock); aws_condition_variable_wait_pred( &test_context->signal, &test_context->lock, s_received_n_successful_publishes, context); aws_mutex_unlock(&test_context->lock); } static int s_mqtt5_client_flow_control_receive_maximum_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); /* send delayed pubacks */ test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_PUBLISH] = s_aws_mqtt5_mock_server_handle_publish_delayed_puback; /* establish a low receive maximum */ test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_CONNECT] = s_aws_mqtt5_mock_server_handle_connect_succeed_receive_maximum; struct aws_mqtt5_client_mock_test_fixture test_context; struct aws_mqtt5_server_disconnect_test_context disconnect_context = { .test_fixture = &test_context, .disconnect_sent = false, }; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, .mock_server_user_data = &disconnect_context, }; ASSERT_SUCCESS(aws_mqtt5_client_mock_test_fixture_init(&test_context, allocator, &test_fixture_options)); struct aws_mqtt5_client *client = test_context.client; ASSERT_SUCCESS(aws_mqtt5_client_start(client)); aws_wait_for_connected_lifecycle_event(&test_context); /* send a bunch of publishes */ for (size_t i = 0; i < RECEIVE_MAXIMUM_PUBLISH_COUNT; ++i) { struct aws_mqtt5_packet_publish_view qos1_publish_view = { .qos = AWS_MQTT5_QOS_AT_LEAST_ONCE, .topic = { .ptr = s_topic, .len = AWS_ARRAY_SIZE(s_topic) - 1, }, }; struct aws_mqtt5_publish_completion_options completion_options = { .completion_callback = s_receive_maximum_publish_completion_fn, .completion_user_data = &test_context, }; ASSERT_SUCCESS(aws_mqtt5_client_publish(client, &qos1_publish_view, &completion_options)); } /* wait for all publishes to succeed */ struct aws_mqtt5_client_test_wait_for_n_context wait_context = { .test_fixture = &test_context, .required_event_count = RECEIVE_MAXIMUM_PUBLISH_COUNT, }; s_wait_for_n_successful_publishes(&wait_context); ASSERT_SUCCESS(aws_mqtt5_client_stop(client, NULL, NULL)); aws_wait_for_stopped_lifecycle_event(&test_context); /* * verify that the maximum number of in-progress qos1 publishes on the server was never more than what the * server said its maximum was */ aws_mutex_lock(&test_context.lock); uint32_t max_inflight_publishes = test_context.server_maximum_inflight_publishes; aws_mutex_unlock(&test_context.lock); ASSERT_TRUE(max_inflight_publishes <= TEST_RECEIVE_MAXIMUM); aws_mqtt5_client_mock_test_fixture_clean_up(&test_context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_client_flow_control_receive_maximum, s_mqtt5_client_flow_control_receive_maximum_fn) static void s_publish_timeout_publish_completion_fn( enum aws_mqtt5_packet_type packet_type, const void *packet, int error_code, void *complete_ctx) { (void)packet; (void)packet_type; struct aws_mqtt5_client_mock_test_fixture *test_context = complete_ctx; aws_mutex_lock(&test_context->lock); if (error_code == AWS_ERROR_MQTT_TIMEOUT) { ++test_context->timeouts_received; } aws_mutex_unlock(&test_context->lock); aws_condition_variable_notify_all(&test_context->signal); } static bool s_received_n_publish_timeouts(void *arg) { struct aws_mqtt5_client_test_wait_for_n_context *context = arg; struct aws_mqtt5_client_mock_test_fixture *test_fixture = context->test_fixture; return test_fixture->timeouts_received >= context->required_event_count; } static void s_wait_for_n_publish_timeouts(struct aws_mqtt5_client_test_wait_for_n_context *context) { struct aws_mqtt5_client_mock_test_fixture *test_context = context->test_fixture; aws_mutex_lock(&test_context->lock); aws_condition_variable_wait_pred( &test_context->signal, &test_context->lock, s_received_n_publish_timeouts, context); aws_mutex_unlock(&test_context->lock); } static bool s_sent_n_timeout_publish_packets(void *arg) { struct aws_mqtt5_client_test_wait_for_n_context *context = arg; struct aws_mqtt5_client_mock_test_fixture *test_fixture = context->test_fixture; return test_fixture->publishes_received >= context->required_event_count; } static int s_aws_mqtt5_mock_server_handle_timeout_publish( void *packet, struct aws_mqtt5_server_mock_connection_context *connection, void *user_data) { (void)packet; (void)user_data; struct aws_mqtt5_client_mock_test_fixture *test_fixture = connection->test_fixture; aws_mutex_lock(&test_fixture->lock); ++connection->test_fixture->publishes_received; aws_mutex_unlock(&test_fixture->lock); return AWS_OP_SUCCESS; } static void s_wait_for_n_successful_server_timeout_publishes(struct aws_mqtt5_client_test_wait_for_n_context *context) { struct aws_mqtt5_client_mock_test_fixture *test_context = context->test_fixture; aws_mutex_lock(&test_context->lock); aws_condition_variable_wait_pred( &test_context->signal, &test_context->lock, s_sent_n_timeout_publish_packets, context); aws_mutex_unlock(&test_context->lock); } /* * Test that not receiving a PUBACK causes the PUBLISH waiting for the PUBACK to timeout */ static int s_mqtt5_client_publish_timeout_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_PUBLISH] = s_aws_mqtt5_mock_server_handle_timeout_publish; /* fast publish timeout */ test_options.client_options.ack_timeout_seconds = 5; struct aws_mqtt5_client_mock_test_fixture test_context; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, }; ASSERT_SUCCESS(aws_mqtt5_client_mock_test_fixture_init(&test_context, allocator, &test_fixture_options)); struct aws_mqtt5_client *client = test_context.client; ASSERT_SUCCESS(aws_mqtt5_client_start(client)); aws_wait_for_connected_lifecycle_event(&test_context); struct aws_mqtt5_publish_completion_options completion_options = { .completion_callback = &s_publish_timeout_publish_completion_fn, .completion_user_data = &test_context, }; struct aws_mqtt5_packet_publish_view packet_publish_view = { .qos = AWS_MQTT5_QOS_AT_LEAST_ONCE, .topic = { .ptr = s_topic, .len = AWS_ARRAY_SIZE(s_topic) - 1, }, }; struct aws_mqtt5_client_test_wait_for_n_context wait_context = { .test_fixture = &test_context, .required_event_count = (size_t)aws_mqtt5_client_random_in_range(3, 20), }; /* Send semi-random number of publishes that will not be acked */ for (size_t publish_count = 0; publish_count < wait_context.required_event_count; ++publish_count) { ASSERT_SUCCESS(aws_mqtt5_client_publish(client, &packet_publish_view, &completion_options)); } s_wait_for_n_successful_server_timeout_publishes(&wait_context); s_wait_for_n_publish_timeouts(&wait_context); ASSERT_INT_EQUALS(wait_context.required_event_count, test_context.timeouts_received); ASSERT_SUCCESS(aws_mqtt5_client_stop(client, NULL, NULL)); aws_wait_for_stopped_lifecycle_event(&test_context); aws_mqtt5_client_mock_test_fixture_clean_up(&test_context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_client_publish_timeout, s_mqtt5_client_publish_timeout_fn) int aws_mqtt5_mock_server_handle_publish_puback( void *packet, struct aws_mqtt5_server_mock_connection_context *connection, void *user_data) { (void)user_data; struct aws_mqtt5_packet_publish_view *publish_view = packet; if (publish_view->qos != AWS_MQTT5_QOS_AT_LEAST_ONCE) { return AWS_OP_SUCCESS; } struct aws_mqtt5_packet_puback_view puback_view = { .packet_id = publish_view->packet_id, }; return aws_mqtt5_mock_server_send_packet(connection, AWS_MQTT5_PT_PUBACK, &puback_view); } #define IOT_CORE_THROUGHPUT_PACKETS 21 static uint8_t s_large_packet_payload[127 * 1024]; static int s_do_iot_core_throughput_test(struct aws_allocator *allocator, bool use_iot_core_limits) { struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); /* send pubacks */ test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_PUBLISH] = aws_mqtt5_mock_server_handle_publish_puback; if (use_iot_core_limits) { test_options.client_options.extended_validation_and_flow_control_options = AWS_MQTT5_EVAFCO_AWS_IOT_CORE_DEFAULTS; } struct aws_mqtt5_client_mock_test_fixture test_context; struct aws_mqtt5_server_disconnect_test_context disconnect_context = { .test_fixture = &test_context, .disconnect_sent = false, }; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, .mock_server_user_data = &disconnect_context, }; ASSERT_SUCCESS(aws_mqtt5_client_mock_test_fixture_init(&test_context, allocator, &test_fixture_options)); struct aws_mqtt5_client *client = test_context.client; ASSERT_SUCCESS(aws_mqtt5_client_start(client)); aws_wait_for_connected_lifecycle_event(&test_context); /* send a bunch of large publishes */ aws_secure_zero(s_large_packet_payload, AWS_ARRAY_SIZE(s_large_packet_payload)); for (size_t i = 0; i < IOT_CORE_THROUGHPUT_PACKETS; ++i) { struct aws_mqtt5_packet_publish_view qos1_publish_view = { .qos = AWS_MQTT5_QOS_AT_LEAST_ONCE, .topic = { .ptr = s_topic, .len = AWS_ARRAY_SIZE(s_topic) - 1, }, .payload = aws_byte_cursor_from_array(s_large_packet_payload, AWS_ARRAY_SIZE(s_large_packet_payload)), }; struct aws_mqtt5_publish_completion_options completion_options = { .completion_callback = s_receive_maximum_publish_completion_fn, /* can reuse receive_maximum callback */ .completion_user_data = &test_context, }; ASSERT_SUCCESS(aws_mqtt5_client_publish(client, &qos1_publish_view, &completion_options)); } /* wait for all publishes to succeed */ struct aws_mqtt5_client_test_wait_for_n_context wait_context = { .test_fixture = &test_context, .required_event_count = IOT_CORE_THROUGHPUT_PACKETS, }; s_wait_for_n_successful_publishes(&wait_context); ASSERT_SUCCESS(aws_mqtt5_client_stop(client, NULL, NULL)); aws_wait_for_stopped_lifecycle_event(&test_context); aws_mqtt5_client_mock_test_fixture_clean_up(&test_context); return AWS_OP_SUCCESS; } static int s_mqtt5_client_flow_control_iot_core_throughput_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); uint64_t start_time = 0; aws_high_res_clock_get_ticks(&start_time); ASSERT_SUCCESS(s_do_iot_core_throughput_test(allocator, true)); uint64_t end_time = 0; aws_high_res_clock_get_ticks(&end_time); uint64_t test_time = end_time - start_time; /* * We expect the throttled version to take around 5 seconds, since we're sending 21 almost-max size (127k) packets * against a limit of 512KB/s. Since the packets are submitted immediately on CONNACK, the rate limiter * token bucket is starting at zero and so will give us immediate throttling. */ ASSERT_TRUE(test_time > 5 * (uint64_t)AWS_TIMESTAMP_NANOS); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_client_flow_control_iot_core_throughput, s_mqtt5_client_flow_control_iot_core_throughput_fn) #define IOT_CORE_PUBLISH_TPS_PACKETS 650 static int s_do_iot_core_publish_tps_test(struct aws_allocator *allocator, bool use_iot_core_limits) { struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); /* send pubacks */ test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_PUBLISH] = aws_mqtt5_mock_server_handle_publish_puback; if (use_iot_core_limits) { test_options.client_options.extended_validation_and_flow_control_options = AWS_MQTT5_EVAFCO_AWS_IOT_CORE_DEFAULTS; } struct aws_mqtt5_client_mock_test_fixture test_context; struct aws_mqtt5_server_disconnect_test_context disconnect_context = { .test_fixture = &test_context, .disconnect_sent = false, }; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, .mock_server_user_data = &disconnect_context, }; ASSERT_SUCCESS(aws_mqtt5_client_mock_test_fixture_init(&test_context, allocator, &test_fixture_options)); struct aws_mqtt5_client *client = test_context.client; ASSERT_SUCCESS(aws_mqtt5_client_start(client)); aws_wait_for_connected_lifecycle_event(&test_context); /* send a bunch of tiny publishes */ for (size_t i = 0; i < IOT_CORE_PUBLISH_TPS_PACKETS; ++i) { struct aws_mqtt5_packet_publish_view qos1_publish_view = { .qos = AWS_MQTT5_QOS_AT_LEAST_ONCE, .topic = { .ptr = s_topic, .len = AWS_ARRAY_SIZE(s_topic) - 1, }, }; struct aws_mqtt5_publish_completion_options completion_options = { .completion_callback = s_receive_maximum_publish_completion_fn, /* can reuse receive_maximum callback */ .completion_user_data = &test_context, }; ASSERT_SUCCESS(aws_mqtt5_client_publish(client, &qos1_publish_view, &completion_options)); } /* wait for all publishes to succeed */ struct aws_mqtt5_client_test_wait_for_n_context wait_context = { .test_fixture = &test_context, .required_event_count = IOT_CORE_PUBLISH_TPS_PACKETS, }; s_wait_for_n_successful_publishes(&wait_context); ASSERT_SUCCESS(aws_mqtt5_client_stop(client, NULL, NULL)); aws_wait_for_stopped_lifecycle_event(&test_context); aws_mqtt5_client_mock_test_fixture_clean_up(&test_context); return AWS_OP_SUCCESS; } static int s_mqtt5_client_flow_control_iot_core_publish_tps_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); uint64_t start_time1 = 0; aws_high_res_clock_get_ticks(&start_time1); ASSERT_SUCCESS(s_do_iot_core_publish_tps_test(allocator, false)); uint64_t end_time1 = 0; aws_high_res_clock_get_ticks(&end_time1); uint64_t test_time1 = end_time1 - start_time1; uint64_t start_time2 = 0; aws_high_res_clock_get_ticks(&start_time2); ASSERT_SUCCESS(s_do_iot_core_publish_tps_test(allocator, true)); uint64_t end_time2 = 0; aws_high_res_clock_get_ticks(&end_time2); uint64_t test_time2 = end_time2 - start_time2; /* We expect the unthrottled test to complete quickly */ ASSERT_TRUE(test_time1 < AWS_TIMESTAMP_NANOS); /* * We expect the throttled version to take over 6 seconds, since we're sending over 650 tiny publish packets * against a limit of 100TPS. Since the packets are submitted immediately on CONNACK, the rate limiter * token bucket is starting at zero and so will give us immediate throttling. */ ASSERT_TRUE(test_time2 > 6 * (uint64_t)AWS_TIMESTAMP_NANOS); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_client_flow_control_iot_core_publish_tps, s_mqtt5_client_flow_control_iot_core_publish_tps_fn) static int s_aws_mqtt5_mock_server_handle_connect_honor_session_after_success( void *packet, struct aws_mqtt5_server_mock_connection_context *connection, void *user_data) { (void)packet; (void)user_data; struct aws_mqtt5_packet_connect_view *connect_packet = packet; struct aws_mqtt5_packet_connack_view connack_view; AWS_ZERO_STRUCT(connack_view); connack_view.reason_code = AWS_MQTT5_CRC_SUCCESS; /* Only resume a connection if the client has already connected to the server before */ if (connection->test_fixture->client->has_connected_successfully) { connack_view.session_present = !connect_packet->clean_start; } return aws_mqtt5_mock_server_send_packet(connection, AWS_MQTT5_PT_CONNACK, &connack_view); } static int s_aws_mqtt5_mock_server_handle_connect_honor_session_unconditional( void *packet, struct aws_mqtt5_server_mock_connection_context *connection, void *user_data) { (void)packet; (void)user_data; struct aws_mqtt5_packet_connect_view *connect_packet = packet; struct aws_mqtt5_packet_connack_view connack_view; AWS_ZERO_STRUCT(connack_view); connack_view.reason_code = AWS_MQTT5_CRC_SUCCESS; connack_view.session_present = !connect_packet->clean_start; return aws_mqtt5_mock_server_send_packet(connection, AWS_MQTT5_PT_CONNACK, &connack_view); } struct aws_mqtt5_wait_for_n_lifecycle_events_context { struct aws_mqtt5_client_mock_test_fixture *test_fixture; enum aws_mqtt5_client_lifecycle_event_type event_type; size_t expected_event_count; }; static bool s_received_n_lifecycle_events(void *arg) { struct aws_mqtt5_wait_for_n_lifecycle_events_context *context = arg; struct aws_mqtt5_client_mock_test_fixture *test_fixture = context->test_fixture; size_t matching_events = 0; size_t event_count = aws_array_list_length(&test_fixture->lifecycle_events); for (size_t i = 0; i < event_count; ++i) { struct aws_mqtt5_lifecycle_event_record *record = NULL; aws_array_list_get_at(&test_fixture->lifecycle_events, &record, i); if (record->event.event_type == context->event_type) { ++matching_events; } } return matching_events >= context->expected_event_count; } static void s_wait_for_n_lifecycle_events( struct aws_mqtt5_client_mock_test_fixture *test_fixture, enum aws_mqtt5_client_lifecycle_event_type event_type, size_t expected_event_count) { struct aws_mqtt5_wait_for_n_lifecycle_events_context wait_context = { .test_fixture = test_fixture, .event_type = event_type, .expected_event_count = expected_event_count, }; aws_mutex_lock(&test_fixture->lock); aws_condition_variable_wait_pred( &test_fixture->signal, &test_fixture->lock, s_received_n_lifecycle_events, &wait_context); aws_mutex_unlock(&test_fixture->lock); } static bool s_compute_expected_rejoined_session( enum aws_mqtt5_client_session_behavior_type session_behavior, size_t connect_index) { switch (session_behavior) { case AWS_MQTT5_CSBT_CLEAN: return false; case AWS_MQTT5_CSBT_REJOIN_POST_SUCCESS: return connect_index > 0; case AWS_MQTT5_CSBT_REJOIN_ALWAYS: default: return true; } } static int s_aws_mqtt5_client_test_init_resume_session_connect_storage( struct aws_mqtt5_packet_connect_storage *storage, struct aws_allocator *allocator) { struct aws_mqtt5_packet_connect_view connect_view = { .keep_alive_interval_seconds = 30, .client_id = aws_byte_cursor_from_string(g_default_client_id), .clean_start = false, }; return aws_mqtt5_packet_connect_storage_init(storage, allocator, &connect_view); } #define SESSION_RESUMPTION_CONNECT_COUNT 5 static int s_do_mqtt5_client_session_resumption_test( struct aws_allocator *allocator, enum aws_mqtt5_client_session_behavior_type session_behavior) { aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); test_options.client_options.session_behavior = session_behavior; test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_CONNECT] = s_aws_mqtt5_mock_server_handle_connect_honor_session_unconditional; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, }; struct aws_mqtt5_client_mock_test_fixture test_context; ASSERT_SUCCESS(aws_mqtt5_client_mock_test_fixture_init(&test_context, allocator, &test_fixture_options)); struct aws_mqtt5_client *client = test_context.client; for (size_t i = 0; i < SESSION_RESUMPTION_CONNECT_COUNT; ++i) { ASSERT_SUCCESS(aws_mqtt5_client_start(client)); s_wait_for_n_lifecycle_events(&test_context, AWS_MQTT5_CLET_CONNECTION_SUCCESS, i + 1); /* not technically truly safe to query depending on memory model. Remove if it becomes a problem. */ bool expected_rejoined_session = s_compute_expected_rejoined_session(session_behavior, i); ASSERT_INT_EQUALS(expected_rejoined_session, client->negotiated_settings.rejoined_session); /* can't use stop as that wipes session state */ aws_channel_shutdown(test_context.server_channel, AWS_ERROR_UNKNOWN); s_wait_for_n_lifecycle_events(&test_context, AWS_MQTT5_CLET_DISCONNECTION, i + 1); } struct aws_mqtt5_packet_connect_storage clean_start_connect_storage; ASSERT_SUCCESS(s_aws_mqtt5_client_test_init_default_connect_storage(&clean_start_connect_storage, allocator)); struct aws_mqtt5_packet_connect_storage resume_session_connect_storage; ASSERT_SUCCESS( s_aws_mqtt5_client_test_init_resume_session_connect_storage(&resume_session_connect_storage, allocator)); struct aws_mqtt5_mock_server_packet_record expected_packets[SESSION_RESUMPTION_CONNECT_COUNT]; for (size_t i = 0; i < SESSION_RESUMPTION_CONNECT_COUNT; ++i) { expected_packets[i].packet_type = AWS_MQTT5_PT_CONNECT; if (s_compute_expected_rejoined_session(session_behavior, i)) { expected_packets[i].packet_storage = &resume_session_connect_storage; } else { expected_packets[i].packet_storage = &clean_start_connect_storage; } } ASSERT_SUCCESS( aws_verify_received_packet_sequence(&test_context, expected_packets, AWS_ARRAY_SIZE(expected_packets))); aws_mqtt5_packet_connect_storage_clean_up(&clean_start_connect_storage); aws_mqtt5_packet_connect_storage_clean_up(&resume_session_connect_storage); aws_mqtt5_client_mock_test_fixture_clean_up(&test_context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } static int s_mqtt5_client_session_resumption_clean_start_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(s_do_mqtt5_client_session_resumption_test(allocator, AWS_MQTT5_CSBT_CLEAN)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_client_session_resumption_clean_start, s_mqtt5_client_session_resumption_clean_start_fn) static int s_mqtt5_client_session_resumption_post_success_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(s_do_mqtt5_client_session_resumption_test(allocator, AWS_MQTT5_CSBT_REJOIN_POST_SUCCESS)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_client_session_resumption_post_success, s_mqtt5_client_session_resumption_post_success_fn) static int s_mqtt5_client_session_resumption_always_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(s_do_mqtt5_client_session_resumption_test(allocator, AWS_MQTT5_CSBT_REJOIN_ALWAYS)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_client_session_resumption_always, s_mqtt5_client_session_resumption_always_fn) static uint8_t s_sub_pub_unsub_topic_filter[] = "hello/+"; static struct aws_mqtt5_subscription_view s_sub_pub_unsub_subscriptions[] = { { .topic_filter = { .ptr = (uint8_t *)s_sub_pub_unsub_topic_filter, .len = AWS_ARRAY_SIZE(s_sub_pub_unsub_topic_filter) - 1, }, .qos = AWS_MQTT5_QOS_AT_LEAST_ONCE, .no_local = false, .retain_as_published = false, .retain_handling_type = AWS_MQTT5_RHT_SEND_ON_SUBSCRIBE, }, }; static struct aws_byte_cursor s_sub_pub_unsub_topic_filters[] = { { .ptr = s_sub_pub_unsub_topic_filter, .len = AWS_ARRAY_SIZE(s_sub_pub_unsub_topic_filter) - 1, }, }; struct aws_mqtt5_sub_pub_unsub_context { struct aws_mqtt5_client_mock_test_fixture *test_fixture; bool subscribe_complete; bool publish_complete; bool publish_received; bool unsubscribe_complete; size_t publishes_received; size_t subscribe_failures; size_t publish_failures; size_t unsubscribe_failures; struct aws_mqtt5_packet_publish_storage publish_storage; }; static void s_sub_pub_unsub_context_clean_up(struct aws_mqtt5_sub_pub_unsub_context *context) { aws_mqtt5_packet_publish_storage_clean_up(&context->publish_storage); } void s_sub_pub_unsub_subscribe_complete_fn( const struct aws_mqtt5_packet_suback_view *suback, int error_code, void *complete_ctx) { AWS_FATAL_ASSERT(suback != NULL); AWS_FATAL_ASSERT(error_code == AWS_ERROR_SUCCESS); struct aws_mqtt5_sub_pub_unsub_context *test_context = complete_ctx; struct aws_mqtt5_client_mock_test_fixture *test_fixture = test_context->test_fixture; aws_mutex_lock(&test_fixture->lock); test_context->subscribe_complete = true; if (error_code != AWS_ERROR_SUCCESS) { ++test_context->subscribe_failures; } aws_mutex_unlock(&test_fixture->lock); aws_condition_variable_notify_all(&test_fixture->signal); } static bool s_sub_pub_unsub_received_suback(void *arg) { struct aws_mqtt5_sub_pub_unsub_context *test_context = arg; return test_context->subscribe_complete && test_context->subscribe_failures == 0; } static void s_sub_pub_unsub_wait_for_suback_received(struct aws_mqtt5_sub_pub_unsub_context *test_context) { struct aws_mqtt5_client_mock_test_fixture *test_fixture = test_context->test_fixture; aws_mutex_lock(&test_fixture->lock); aws_condition_variable_wait_pred( &test_fixture->signal, &test_fixture->lock, s_sub_pub_unsub_received_suback, test_context); aws_mutex_unlock(&test_fixture->lock); } static int s_mqtt5_client_sub_pub_unsub_subscribe( struct aws_mqtt5_sub_pub_unsub_context *full_test_context, struct aws_mqtt5_packet_subscribe_storage *expected_subscribe_storage) { struct aws_mqtt5_packet_subscribe_view subscribe_view = { .subscriptions = s_sub_pub_unsub_subscriptions, .subscription_count = AWS_ARRAY_SIZE(s_sub_pub_unsub_subscriptions), }; struct aws_mqtt5_subscribe_completion_options completion_options = { .completion_callback = s_sub_pub_unsub_subscribe_complete_fn, .completion_user_data = full_test_context, }; struct aws_mqtt5_client *client = full_test_context->test_fixture->client; ASSERT_SUCCESS(aws_mqtt5_client_subscribe(client, &subscribe_view, &completion_options)); s_sub_pub_unsub_wait_for_suback_received(full_test_context); ASSERT_SUCCESS(aws_mqtt5_packet_subscribe_storage_init( expected_subscribe_storage, full_test_context->test_fixture->allocator, &subscribe_view)); return AWS_OP_SUCCESS; } void s_sub_pub_unsub_unsubscribe_complete_fn( const struct aws_mqtt5_packet_unsuback_view *unsuback, int error_code, void *complete_ctx) { AWS_FATAL_ASSERT(unsuback != NULL); AWS_FATAL_ASSERT(error_code == AWS_ERROR_SUCCESS); struct aws_mqtt5_sub_pub_unsub_context *test_context = complete_ctx; struct aws_mqtt5_client_mock_test_fixture *test_fixture = test_context->test_fixture; aws_mutex_lock(&test_fixture->lock); test_context->unsubscribe_complete = true; if (error_code != AWS_ERROR_SUCCESS) { ++test_context->unsubscribe_failures; } aws_mutex_unlock(&test_fixture->lock); aws_condition_variable_notify_all(&test_fixture->signal); } static bool s_sub_pub_unsub_received_unsuback(void *arg) { struct aws_mqtt5_sub_pub_unsub_context *test_context = arg; return test_context->unsubscribe_complete && test_context->unsubscribe_failures == 0; } static void s_sub_pub_unsub_wait_for_unsuback_received(struct aws_mqtt5_sub_pub_unsub_context *test_context) { struct aws_mqtt5_client_mock_test_fixture *test_fixture = test_context->test_fixture; aws_mutex_lock(&test_fixture->lock); aws_condition_variable_wait_pred( &test_fixture->signal, &test_fixture->lock, s_sub_pub_unsub_received_unsuback, test_context); aws_mutex_unlock(&test_fixture->lock); } static int s_mqtt5_client_sub_pub_unsub_unsubscribe( struct aws_mqtt5_sub_pub_unsub_context *full_test_context, struct aws_mqtt5_packet_unsubscribe_storage *expected_unsubscribe_storage) { struct aws_mqtt5_packet_unsubscribe_view unsubscribe_view = { .topic_filters = s_sub_pub_unsub_topic_filters, .topic_filter_count = AWS_ARRAY_SIZE(s_sub_pub_unsub_topic_filters), }; struct aws_mqtt5_unsubscribe_completion_options completion_options = { .completion_callback = s_sub_pub_unsub_unsubscribe_complete_fn, .completion_user_data = full_test_context, }; struct aws_mqtt5_client *client = full_test_context->test_fixture->client; ASSERT_SUCCESS(aws_mqtt5_client_unsubscribe(client, &unsubscribe_view, &completion_options)); s_sub_pub_unsub_wait_for_unsuback_received(full_test_context); ASSERT_SUCCESS(aws_mqtt5_packet_unsubscribe_storage_init( expected_unsubscribe_storage, full_test_context->test_fixture->allocator, &unsubscribe_view)); return AWS_OP_SUCCESS; } void s_sub_pub_unsub_publish_received_fn(const struct aws_mqtt5_packet_publish_view *publish, void *complete_ctx) { AWS_FATAL_ASSERT(publish != NULL); struct aws_mqtt5_sub_pub_unsub_context *test_context = complete_ctx; struct aws_mqtt5_client_mock_test_fixture *test_fixture = test_context->test_fixture; aws_mutex_lock(&test_fixture->lock); test_context->publish_received = true; aws_mqtt5_packet_publish_storage_init(&test_context->publish_storage, test_fixture->allocator, publish); aws_mutex_unlock(&test_fixture->lock); aws_condition_variable_notify_all(&test_fixture->signal); } static bool s_sub_pub_unsub_received_publish(void *arg) { struct aws_mqtt5_sub_pub_unsub_context *test_context = arg; return test_context->publish_received && test_context->publish_failures == 0; } static void s_sub_pub_unsub_wait_for_publish_received(struct aws_mqtt5_sub_pub_unsub_context *test_context) { struct aws_mqtt5_client_mock_test_fixture *test_fixture = test_context->test_fixture; aws_mutex_lock(&test_fixture->lock); aws_condition_variable_wait_pred( &test_fixture->signal, &test_fixture->lock, s_sub_pub_unsub_received_publish, test_context); aws_mutex_unlock(&test_fixture->lock); } int aws_mqtt5_mock_server_handle_unsubscribe_unsuback_success( void *packet, struct aws_mqtt5_server_mock_connection_context *connection, void *user_data) { (void)packet; (void)user_data; struct aws_mqtt5_packet_unsubscribe_view *unsubscribe_view = packet; AWS_VARIABLE_LENGTH_ARRAY( enum aws_mqtt5_unsuback_reason_code, mqtt5_unsuback_codes, unsubscribe_view->topic_filter_count); for (size_t i = 0; i < unsubscribe_view->topic_filter_count; ++i) { enum aws_mqtt5_unsuback_reason_code *reason_code_ptr = &mqtt5_unsuback_codes[i]; *reason_code_ptr = AWS_MQTT5_UARC_SUCCESS; } struct aws_mqtt5_packet_unsuback_view unsuback_view = { .packet_id = unsubscribe_view->packet_id, .reason_code_count = AWS_ARRAY_SIZE(mqtt5_unsuback_codes), .reason_codes = mqtt5_unsuback_codes, }; return aws_mqtt5_mock_server_send_packet(connection, AWS_MQTT5_PT_UNSUBACK, &unsuback_view); } #define FORWARDED_PUBLISH_PACKET_ID 32768 int aws_mqtt5_mock_server_handle_publish_puback_and_forward( void *packet, struct aws_mqtt5_server_mock_connection_context *connection, void *user_data) { (void)packet; (void)user_data; struct aws_mqtt5_packet_publish_view *publish_view = packet; /* send a PUBACK? */ if (publish_view->qos == AWS_MQTT5_QOS_AT_LEAST_ONCE) { struct aws_mqtt5_packet_puback_view puback_view = { .packet_id = publish_view->packet_id, .reason_code = AWS_MQTT5_PARC_SUCCESS, }; if (aws_mqtt5_mock_server_send_packet(connection, AWS_MQTT5_PT_PUBACK, &puback_view)) { return AWS_OP_ERR; } } /* assume we're subscribed, reflect the publish back to the test client */ struct aws_mqtt5_packet_publish_view reflect_publish_view = *publish_view; if (publish_view->qos == AWS_MQTT5_QOS_AT_LEAST_ONCE) { reflect_publish_view.packet_id = FORWARDED_PUBLISH_PACKET_ID; } return aws_mqtt5_mock_server_send_packet(connection, AWS_MQTT5_PT_PUBLISH, &reflect_publish_view); } void s_sub_pub_unsub_publish_complete_fn( enum aws_mqtt5_packet_type packet_type, const void *packet, int error_code, void *complete_ctx) { (void)packet; (void)packet_type; AWS_FATAL_ASSERT(error_code == AWS_ERROR_SUCCESS); struct aws_mqtt5_sub_pub_unsub_context *test_context = complete_ctx; struct aws_mqtt5_client_mock_test_fixture *test_fixture = test_context->test_fixture; aws_mutex_lock(&test_fixture->lock); test_context->publish_complete = true; if (error_code != AWS_ERROR_SUCCESS) { ++test_context->publish_failures; } aws_mutex_unlock(&test_fixture->lock); aws_condition_variable_notify_all(&test_fixture->signal); } static bool s_sub_pub_unsub_publish_complete(void *arg) { struct aws_mqtt5_sub_pub_unsub_context *test_context = arg; return test_context->publish_complete; } static void s_sub_pub_unsub_wait_for_publish_complete(struct aws_mqtt5_sub_pub_unsub_context *test_context) { struct aws_mqtt5_client_mock_test_fixture *test_fixture = test_context->test_fixture; aws_mutex_lock(&test_fixture->lock); aws_condition_variable_wait_pred( &test_fixture->signal, &test_fixture->lock, s_sub_pub_unsub_publish_complete, test_context); aws_mutex_unlock(&test_fixture->lock); } static uint8_t s_sub_pub_unsub_publish_topic[] = "hello/world"; static uint8_t s_sub_pub_unsub_publish_payload[] = "PublishPayload"; static int s_mqtt5_client_sub_pub_unsub_publish( struct aws_mqtt5_sub_pub_unsub_context *full_test_context, enum aws_mqtt5_qos qos, struct aws_mqtt5_packet_publish_storage *expected_publish_storage, struct aws_mqtt5_packet_puback_storage *expected_puback_storage) { struct aws_mqtt5_packet_publish_view publish_view = { .qos = qos, .topic = { .ptr = s_sub_pub_unsub_publish_topic, .len = AWS_ARRAY_SIZE(s_sub_pub_unsub_publish_topic) - 1, }, .payload = { .ptr = s_sub_pub_unsub_publish_payload, .len = AWS_ARRAY_SIZE(s_sub_pub_unsub_publish_payload) - 1, }, }; struct aws_mqtt5_publish_completion_options completion_options = { .completion_callback = s_sub_pub_unsub_publish_complete_fn, .completion_user_data = full_test_context, }; struct aws_mqtt5_client *client = full_test_context->test_fixture->client; ASSERT_SUCCESS(aws_mqtt5_client_publish(client, &publish_view, &completion_options)); if (qos != AWS_MQTT5_QOS_AT_MOST_ONCE) { s_sub_pub_unsub_wait_for_publish_complete(full_test_context); } struct aws_allocator *allocator = full_test_context->test_fixture->allocator; ASSERT_SUCCESS(aws_mqtt5_packet_publish_storage_init(expected_publish_storage, allocator, &publish_view)); struct aws_mqtt5_packet_puback_view puback_view = { .packet_id = FORWARDED_PUBLISH_PACKET_ID, .reason_code = AWS_MQTT5_PARC_SUCCESS, }; ASSERT_SUCCESS(aws_mqtt5_packet_puback_storage_init(expected_puback_storage, allocator, &puback_view)); return AWS_OP_SUCCESS; } static int s_do_sub_pub_unsub_test(struct aws_allocator *allocator, enum aws_mqtt5_qos qos) { aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); struct aws_mqtt5_client_mock_test_fixture test_context; struct aws_mqtt5_sub_pub_unsub_context full_test_context = { .test_fixture = &test_context, }; test_options.client_options.publish_received_handler = s_sub_pub_unsub_publish_received_fn; test_options.client_options.publish_received_handler_user_data = &full_test_context; test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_SUBSCRIBE] = s_aws_mqtt5_server_send_suback_on_subscribe; test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_PUBLISH] = aws_mqtt5_mock_server_handle_publish_puback_and_forward; test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_UNSUBSCRIBE] = aws_mqtt5_mock_server_handle_unsubscribe_unsuback_success; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, .mock_server_user_data = &full_test_context, }; ASSERT_SUCCESS(aws_mqtt5_client_mock_test_fixture_init(&test_context, allocator, &test_fixture_options)); struct aws_mqtt5_client *client = test_context.client; ASSERT_SUCCESS(aws_mqtt5_client_start(client)); aws_wait_for_connected_lifecycle_event(&test_context); struct aws_mqtt5_packet_subscribe_storage expected_subscribe_storage; AWS_ZERO_STRUCT(expected_subscribe_storage); struct aws_mqtt5_packet_publish_storage expected_publish_storage; AWS_ZERO_STRUCT(expected_publish_storage); struct aws_mqtt5_packet_puback_storage expected_puback_storage; AWS_ZERO_STRUCT(expected_puback_storage); struct aws_mqtt5_packet_unsubscribe_storage expected_unsubscribe_storage; AWS_ZERO_STRUCT(expected_unsubscribe_storage); ASSERT_SUCCESS(s_mqtt5_client_sub_pub_unsub_subscribe(&full_test_context, &expected_subscribe_storage)); ASSERT_SUCCESS(s_mqtt5_client_sub_pub_unsub_publish( &full_test_context, qos, &expected_publish_storage, &expected_puback_storage)); s_sub_pub_unsub_wait_for_publish_received(&full_test_context); ASSERT_SUCCESS(s_mqtt5_client_sub_pub_unsub_unsubscribe(&full_test_context, &expected_unsubscribe_storage)); ASSERT_SUCCESS(aws_mqtt5_client_stop(client, NULL, NULL)); aws_wait_for_stopped_lifecycle_event(&test_context); /* verify packets that server received: connect,subscribe, publish, puback(if qos1), unsubscribe */ struct aws_array_list expected_packets; aws_array_list_init_dynamic(&expected_packets, allocator, 5, sizeof(struct aws_mqtt5_mock_server_packet_record)); struct aws_mqtt5_mock_server_packet_record connect_record = { .packet_type = AWS_MQTT5_PT_CONNECT, .packet_storage = NULL, }; aws_array_list_push_back(&expected_packets, &connect_record); struct aws_mqtt5_mock_server_packet_record subscribe_record = { .packet_type = AWS_MQTT5_PT_SUBSCRIBE, .packet_storage = &expected_subscribe_storage, }; aws_array_list_push_back(&expected_packets, &subscribe_record); struct aws_mqtt5_mock_server_packet_record publish_record = { .packet_type = AWS_MQTT5_PT_PUBLISH, .packet_storage = &expected_publish_storage, }; aws_array_list_push_back(&expected_packets, &publish_record); if (qos == AWS_MQTT5_QOS_AT_LEAST_ONCE) { struct aws_mqtt5_mock_server_packet_record puback_record = { .packet_type = AWS_MQTT5_PT_PUBACK, .packet_storage = &expected_puback_storage, }; aws_array_list_push_back(&expected_packets, &puback_record); } struct aws_mqtt5_mock_server_packet_record unsubscribe_record = { .packet_type = AWS_MQTT5_PT_UNSUBSCRIBE, .packet_storage = &expected_unsubscribe_storage, }; aws_array_list_push_back(&expected_packets, &unsubscribe_record); ASSERT_SUCCESS(aws_verify_received_packet_sequence( &test_context, expected_packets.data, aws_array_list_length(&expected_packets))); /* verify client received the publish that we sent */ const struct aws_mqtt5_packet_publish_view *received_publish = &full_test_context.publish_storage.storage_view; ASSERT_TRUE((received_publish->packet_id != 0) == (qos == AWS_MQTT5_QOS_AT_LEAST_ONCE)); ASSERT_INT_EQUALS((uint32_t)qos, (uint32_t)received_publish->qos); ASSERT_BIN_ARRAYS_EQUALS( s_sub_pub_unsub_publish_topic, AWS_ARRAY_SIZE(s_sub_pub_unsub_publish_topic) - 1, received_publish->topic.ptr, received_publish->topic.len); ASSERT_BIN_ARRAYS_EQUALS( s_sub_pub_unsub_publish_payload, AWS_ARRAY_SIZE(s_sub_pub_unsub_publish_payload) - 1, received_publish->payload.ptr, received_publish->payload.len); aws_mqtt5_packet_subscribe_storage_clean_up(&expected_subscribe_storage); aws_mqtt5_packet_publish_storage_clean_up(&expected_publish_storage); aws_mqtt5_packet_puback_storage_clean_up(&expected_puback_storage); aws_mqtt5_packet_unsubscribe_storage_clean_up(&expected_unsubscribe_storage); aws_array_list_clean_up(&expected_packets); s_sub_pub_unsub_context_clean_up(&full_test_context); aws_mqtt5_client_mock_test_fixture_clean_up(&test_context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } static int s_mqtt5_client_sub_pub_unsub_qos0_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(s_do_sub_pub_unsub_test(allocator, AWS_MQTT5_QOS_AT_MOST_ONCE)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_client_sub_pub_unsub_qos0, s_mqtt5_client_sub_pub_unsub_qos0_fn) static int s_mqtt5_client_sub_pub_unsub_qos1_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(s_do_sub_pub_unsub_test(allocator, AWS_MQTT5_QOS_AT_LEAST_ONCE)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_client_sub_pub_unsub_qos1, s_mqtt5_client_sub_pub_unsub_qos1_fn) static enum aws_mqtt5_unsuback_reason_code s_unsubscribe_success_reason_codes[] = { AWS_MQTT5_UARC_NO_SUBSCRIPTION_EXISTED, }; static int s_aws_mqtt5_server_send_not_subscribe_unsuback_on_unsubscribe( void *packet, struct aws_mqtt5_server_mock_connection_context *connection, void *user_data) { (void)user_data; struct aws_mqtt5_packet_unsubscribe_view *unsubscribe_view = packet; struct aws_mqtt5_packet_unsuback_view unsuback_view = { .packet_id = unsubscribe_view->packet_id, .reason_code_count = AWS_ARRAY_SIZE(s_unsubscribe_success_reason_codes), .reason_codes = s_unsubscribe_success_reason_codes, }; return aws_mqtt5_mock_server_send_packet(connection, AWS_MQTT5_PT_UNSUBACK, &unsuback_view); } static int s_mqtt5_client_unsubscribe_success_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_UNSUBSCRIBE] = s_aws_mqtt5_server_send_not_subscribe_unsuback_on_unsubscribe; struct aws_mqtt5_client_mock_test_fixture test_context; struct aws_mqtt5_sub_pub_unsub_context full_test_context = { .test_fixture = &test_context, }; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, .mock_server_user_data = &full_test_context, }; ASSERT_SUCCESS(aws_mqtt5_client_mock_test_fixture_init(&test_context, allocator, &test_fixture_options)); struct aws_mqtt5_client *client = test_context.client; ASSERT_SUCCESS(aws_mqtt5_client_start(client)); aws_wait_for_connected_lifecycle_event(&test_context); struct aws_mqtt5_packet_unsubscribe_view unsubscribe_view = { .topic_filters = s_sub_pub_unsub_topic_filters, .topic_filter_count = AWS_ARRAY_SIZE(s_sub_pub_unsub_topic_filters), }; struct aws_mqtt5_unsubscribe_completion_options completion_options = { .completion_callback = s_sub_pub_unsub_unsubscribe_complete_fn, .completion_user_data = &full_test_context, }; aws_mqtt5_client_unsubscribe(client, &unsubscribe_view, &completion_options); s_sub_pub_unsub_wait_for_unsuback_received(&full_test_context); ASSERT_SUCCESS(aws_mqtt5_client_stop(client, NULL, NULL)); aws_wait_for_stopped_lifecycle_event(&test_context); aws_mqtt5_client_mock_test_fixture_clean_up(&test_context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_client_unsubscribe_success, s_mqtt5_client_unsubscribe_success_fn) static aws_mqtt5_packet_id_t s_puback_packet_id = 183; struct aws_mqtt5_server_send_qos1_publish_context { struct aws_mqtt5_client_mock_test_fixture *test_fixture; bool publish_sent; bool connack_sent; bool connack_checked; }; static int s_aws_mqtt5_mock_server_handle_puback( void *packet, struct aws_mqtt5_server_mock_connection_context *connection, void *user_data) { (void)user_data; struct aws_mqtt5_packet_puback_view *puback_view = packet; ASSERT_INT_EQUALS(puback_view->packet_id, s_puback_packet_id); ASSERT_TRUE(puback_view->reason_code == AWS_MQTT5_PARC_SUCCESS); struct aws_mqtt5_client_mock_test_fixture *test_fixture = connection->test_fixture; struct aws_mqtt5_server_send_qos1_publish_context *publish_context = connection->test_fixture->mock_server_user_data; aws_mutex_lock(&test_fixture->lock); publish_context->connack_checked = true; aws_mutex_unlock(&test_fixture->lock); return AWS_OP_SUCCESS; } static void s_aws_mqtt5_mock_server_send_qos1_publish( struct aws_mqtt5_server_mock_connection_context *mock_server, void *user_data) { struct aws_mqtt5_server_send_qos1_publish_context *test_context = user_data; if (test_context->publish_sent || !test_context->connack_sent) { return; } test_context->publish_sent = true; struct aws_mqtt5_packet_publish_view qos1_publish_view = { .qos = AWS_MQTT5_QOS_AT_LEAST_ONCE, .topic = { .ptr = s_topic, .len = AWS_ARRAY_SIZE(s_topic) - 1, }, .packet_id = s_puback_packet_id, }; aws_mqtt5_mock_server_send_packet(mock_server, AWS_MQTT5_PT_PUBLISH, &qos1_publish_view); } static int s_aws_mqtt5_server_send_qos1_publish_on_connect( void *packet, struct aws_mqtt5_server_mock_connection_context *connection, void *user_data) { int result = aws_mqtt5_mock_server_handle_connect_always_succeed(packet, connection, user_data); struct aws_mqtt5_server_send_qos1_publish_context *test_context = user_data; test_context->connack_sent = true; return result; } static bool s_publish_qos1_puback(void *arg) { struct aws_mqtt5_server_send_qos1_publish_context *test_context = arg; return test_context->connack_checked; } static void s_publish_qos1_wait_for_puback(struct aws_mqtt5_server_send_qos1_publish_context *test_context) { struct aws_mqtt5_client_mock_test_fixture *test_fixture = test_context->test_fixture; aws_mutex_lock(&test_fixture->lock); aws_condition_variable_wait_pred(&test_fixture->signal, &test_fixture->lock, s_publish_qos1_puback, test_context); aws_mutex_unlock(&test_fixture->lock); } /* When client receives a QoS1 PUBLISH it must send a valid PUBACK with packet id */ static int mqtt5_client_receive_qos1_return_puback_test_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); /* mock server sends a PUBLISH packet to the client */ test_options.server_function_table.service_task_fn = s_aws_mqtt5_mock_server_send_qos1_publish; test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_PUBACK] = s_aws_mqtt5_mock_server_handle_puback; test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_CONNECT] = s_aws_mqtt5_server_send_qos1_publish_on_connect; struct aws_mqtt5_client_mock_test_fixture test_context; struct aws_mqtt5_server_send_qos1_publish_context publish_context = { .test_fixture = &test_context, .publish_sent = false, .connack_sent = false, .connack_checked = false, }; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, .mock_server_user_data = &publish_context, }; ASSERT_SUCCESS(aws_mqtt5_client_mock_test_fixture_init(&test_context, allocator, &test_fixture_options)); struct aws_mqtt5_client *client = test_context.client; ASSERT_SUCCESS(aws_mqtt5_client_start(client)); aws_wait_for_connected_lifecycle_event(&test_context); s_publish_qos1_wait_for_puback(&publish_context); ASSERT_SUCCESS(aws_mqtt5_client_stop(client, NULL, NULL)); aws_wait_for_stopped_lifecycle_event(&test_context); aws_mqtt5_client_mock_test_fixture_clean_up(&test_context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_client_receive_qos1_return_puback_test, mqtt5_client_receive_qos1_return_puback_test_fn) static int s_aws_mqtt5_mock_server_handle_connect_session_present( void *packet, struct aws_mqtt5_server_mock_connection_context *connection, void *user_data) { (void)packet; (void)user_data; struct aws_mqtt5_packet_connack_view connack_view; AWS_ZERO_STRUCT(connack_view); connack_view.reason_code = AWS_MQTT5_CRC_SUCCESS; connack_view.session_present = true; return aws_mqtt5_mock_server_send_packet(connection, AWS_MQTT5_PT_CONNACK, &connack_view); } /* When client receives a CONNACK with existing session state when one isn't present it should disconnect */ static int mqtt5_client_receive_nonexisting_session_state_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); /* mock server returns a CONNACK indicating a session is being resumed */ test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_CONNECT] = s_aws_mqtt5_mock_server_handle_connect_session_present; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, }; struct aws_mqtt5_client_mock_test_fixture test_context; ASSERT_SUCCESS(aws_mqtt5_client_mock_test_fixture_init(&test_context, allocator, &test_fixture_options)); struct aws_mqtt5_client *client = test_context.client; ASSERT_SUCCESS(aws_mqtt5_client_start(client)); s_wait_for_connection_failure_lifecycle_event(&test_context); ASSERT_SUCCESS(aws_mqtt5_client_stop(client, NULL, NULL)); aws_wait_for_stopped_lifecycle_event(&test_context); enum aws_mqtt5_client_state expected_states[] = { AWS_MCS_CONNECTING, AWS_MCS_MQTT_CONNECT, AWS_MCS_CHANNEL_SHUTDOWN, }; ASSERT_SUCCESS(aws_verify_client_state_sequence(&test_context, expected_states, AWS_ARRAY_SIZE(expected_states))); aws_mqtt5_client_mock_test_fixture_clean_up(&test_context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_client_receive_nonexisting_session_state, mqtt5_client_receive_nonexisting_session_state_fn) static const char *s_receive_assigned_client_id_client_id = "Assigned_Client_ID"; struct aws_mqtt5_server_receive_assigned_client_id_context { struct aws_mqtt5_client_mock_test_fixture *test_fixture; bool assigned_client_id_checked; }; static int s_aws_mqtt5_mock_server_handle_connect_assigned_client_id( void *packet, struct aws_mqtt5_server_mock_connection_context *connection, void *user_data) { (void)user_data; struct aws_mqtt5_packet_connect_view *connect_view = packet; struct aws_mqtt5_packet_connack_view connack_view; AWS_ZERO_STRUCT(connack_view); connack_view.reason_code = AWS_MQTT5_CRC_SUCCESS; struct aws_byte_cursor assigned_client_id = aws_byte_cursor_from_c_str(s_receive_assigned_client_id_client_id); /* Server behavior sets the Assigned Client Identifier on a CONNECT packet with an empty Client ID */ if (connect_view->client_id.len == 0) { connack_view.assigned_client_identifier = &assigned_client_id; } else { ASSERT_BIN_ARRAYS_EQUALS( assigned_client_id.ptr, assigned_client_id.len, connect_view->client_id.ptr, connect_view->client_id.len); struct aws_mqtt5_server_receive_assigned_client_id_context *test_context = user_data; test_context->assigned_client_id_checked = true; } return aws_mqtt5_mock_server_send_packet(connection, AWS_MQTT5_PT_CONNACK, &connack_view); } /* * When client connects with a zero length Client ID, server provides one. * The client should then use the assigned Client ID on reconnection attempts. */ static int mqtt5_client_receive_assigned_client_id_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); /* Empty the Client ID for connect */ test_options.connect_options.client_id.len = 0; /* mock server checks for a client ID and if it's missing sends an assigned one */ test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_CONNECT] = s_aws_mqtt5_mock_server_handle_connect_assigned_client_id; struct aws_mqtt5_client_mock_test_fixture test_context; struct aws_mqtt5_server_receive_assigned_client_id_context assinged_id_context = { .test_fixture = &test_context, .assigned_client_id_checked = false, }; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, .mock_server_user_data = &assinged_id_context, }; ASSERT_SUCCESS(aws_mqtt5_client_mock_test_fixture_init(&test_context, allocator, &test_fixture_options)); struct aws_mqtt5_client *client = test_context.client; ASSERT_SUCCESS(aws_mqtt5_client_start(client)); aws_wait_for_connected_lifecycle_event(&test_context); struct aws_byte_cursor assigned_client_id = aws_byte_cursor_from_c_str(s_receive_assigned_client_id_client_id); struct aws_byte_cursor negotiated_settings_client_id = aws_byte_cursor_from_buf(&client->negotiated_settings.client_id_storage); /* Test that Assigned Client ID is stored */ ASSERT_BIN_ARRAYS_EQUALS( assigned_client_id.ptr, assigned_client_id.len, negotiated_settings_client_id.ptr, negotiated_settings_client_id.len); ASSERT_SUCCESS(aws_mqtt5_client_stop(client, NULL, NULL)); aws_wait_for_stopped_lifecycle_event(&test_context); /* Check for Assigned Client ID on reconnect */ ASSERT_SUCCESS(aws_mqtt5_client_start(client)); aws_wait_for_connected_lifecycle_event(&test_context); ASSERT_TRUE(assinged_id_context.assigned_client_id_checked); ASSERT_SUCCESS(aws_mqtt5_client_stop(client, NULL, NULL)); aws_wait_for_stopped_lifecycle_event(&test_context); aws_mqtt5_client_mock_test_fixture_clean_up(&test_context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_client_receive_assigned_client_id, mqtt5_client_receive_assigned_client_id_fn); #define TEST_PUBLISH_COUNT 10 static int s_aws_mqtt5_mock_server_handle_publish_no_puback_on_first_connect( void *packet, struct aws_mqtt5_server_mock_connection_context *connection, void *user_data) { (void)user_data; struct aws_mqtt5_client_mock_test_fixture *test_fixture = connection->test_fixture; struct aws_mqtt5_packet_publish_view *publish_view = packet; aws_mutex_lock(&test_fixture->lock); ++connection->test_fixture->publishes_received; aws_mutex_unlock(&test_fixture->lock); /* Only send the PUBACK on the second attempt after a reconnect and restored session */ if (publish_view->duplicate) { struct aws_mqtt5_packet_puback_view puback_view = { .packet_id = publish_view->packet_id, }; return aws_mqtt5_mock_server_send_packet(connection, AWS_MQTT5_PT_PUBACK, &puback_view); } return AWS_OP_SUCCESS; } static void s_receive_stored_session_publish_completion_fn( enum aws_mqtt5_packet_type packet_type, const void *packet, int error_code, void *complete_ctx) { if (packet_type != AWS_MQTT5_PT_PUBACK) { return; } const struct aws_mqtt5_packet_puback_view *puback = packet; struct aws_mqtt5_client_mock_test_fixture *test_context = complete_ctx; aws_mutex_lock(&test_context->lock); if (error_code == AWS_ERROR_SUCCESS && puback->reason_code < 128) { ++test_context->successful_pubacks_received; } aws_mutex_unlock(&test_context->lock); aws_condition_variable_notify_all(&test_context->signal); } static bool s_received_n_unacked_publishes(void *arg) { struct aws_mqtt5_client_test_wait_for_n_context *context = arg; struct aws_mqtt5_client_mock_test_fixture *test_fixture = context->test_fixture; return test_fixture->publishes_received >= context->required_event_count; } static void s_wait_for_n_unacked_publishes(struct aws_mqtt5_client_test_wait_for_n_context *context) { struct aws_mqtt5_client_mock_test_fixture *test_context = context->test_fixture; aws_mutex_lock(&test_context->lock); aws_condition_variable_wait_pred( &test_context->signal, &test_context->lock, s_received_n_unacked_publishes, context); aws_mutex_unlock(&test_context->lock); } static int mqtt5_client_no_session_after_client_stop_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); /* Set to rejoin */ test_options.client_options.session_behavior = AWS_MQTT5_CSBT_REJOIN_POST_SUCCESS; /* mock server will not send PUBACKS on initial connect */ test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_PUBLISH] = s_aws_mqtt5_mock_server_handle_publish_no_puback_on_first_connect; /* Simulate reconnecting to an existing connection */ test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_CONNECT] = s_aws_mqtt5_mock_server_handle_connect_honor_session_after_success; struct aws_mqtt5_client_mock_test_fixture test_context; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, }; ASSERT_SUCCESS(aws_mqtt5_client_mock_test_fixture_init(&test_context, allocator, &test_fixture_options)); struct aws_mqtt5_client *client = test_context.client; ASSERT_SUCCESS(aws_mqtt5_client_start(client)); aws_wait_for_connected_lifecycle_event(&test_context); for (size_t i = 0; i < TEST_PUBLISH_COUNT; ++i) { struct aws_mqtt5_packet_publish_view qos1_publish_view = { .qos = AWS_MQTT5_QOS_AT_LEAST_ONCE, .topic = { .ptr = s_topic, .len = AWS_ARRAY_SIZE(s_topic) - 1, }, }; struct aws_mqtt5_publish_completion_options completion_options = { .completion_callback = s_receive_stored_session_publish_completion_fn, .completion_user_data = &test_context, }; ASSERT_SUCCESS(aws_mqtt5_client_publish(client, &qos1_publish_view, &completion_options)); } /* Wait for publishes to have gone out from client */ struct aws_mqtt5_client_test_wait_for_n_context wait_context = { .test_fixture = &test_context, .required_event_count = TEST_PUBLISH_COUNT, }; s_wait_for_n_unacked_publishes(&wait_context); ASSERT_SUCCESS(aws_mqtt5_client_stop(client, NULL, NULL)); aws_wait_for_stopped_lifecycle_event(&test_context); ASSERT_SUCCESS(aws_mqtt5_client_start(client)); aws_wait_for_connected_lifecycle_event(&test_context); aws_mutex_lock(&test_context.lock); size_t event_count = aws_array_list_length(&test_context.lifecycle_events); struct aws_mqtt5_lifecycle_event_record *record = NULL; aws_array_list_get_at(&test_context.lifecycle_events, &record, event_count - 1); aws_mutex_unlock(&test_context.lock); ASSERT_FALSE(record->connack_storage.storage_view.session_present); ASSERT_SUCCESS(aws_mqtt5_client_stop(client, NULL, NULL)); aws_wait_for_stopped_lifecycle_event(&test_context); aws_mqtt5_client_mock_test_fixture_clean_up(&test_context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_client_no_session_after_client_stop, mqtt5_client_no_session_after_client_stop_fn); static int mqtt5_client_restore_session_on_ping_timeout_reconnect_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); /* Set to rejoin */ test_options.client_options.session_behavior = AWS_MQTT5_CSBT_REJOIN_POST_SUCCESS; /* faster ping timeout */ test_options.client_options.ping_timeout_ms = 3000; test_options.connect_options.keep_alive_interval_seconds = 5; /* don't respond to PINGREQs */ test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_PINGREQ] = NULL; /* mock server will not send PUBACKS on initial connect */ test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_PUBLISH] = s_aws_mqtt5_mock_server_handle_publish_no_puback_on_first_connect; /* Simulate reconnecting to an existing connection */ test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_CONNECT] = s_aws_mqtt5_mock_server_handle_connect_honor_session_after_success; struct aws_mqtt5_client_mock_test_fixture test_context; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, }; ASSERT_SUCCESS(aws_mqtt5_client_mock_test_fixture_init(&test_context, allocator, &test_fixture_options)); struct aws_mqtt5_client *client = test_context.client; ASSERT_SUCCESS(aws_mqtt5_client_start(client)); aws_wait_for_connected_lifecycle_event(&test_context); for (size_t i = 0; i < TEST_PUBLISH_COUNT; ++i) { struct aws_mqtt5_packet_publish_view qos1_publish_view = { .qos = AWS_MQTT5_QOS_AT_LEAST_ONCE, .topic = { .ptr = s_topic, .len = AWS_ARRAY_SIZE(s_topic) - 1, }, }; struct aws_mqtt5_publish_completion_options completion_options = { .completion_callback = s_receive_stored_session_publish_completion_fn, .completion_user_data = &test_context, }; ASSERT_SUCCESS(aws_mqtt5_client_publish(client, &qos1_publish_view, &completion_options)); } /* Wait for publishes to have gone out from client */ struct aws_mqtt5_client_test_wait_for_n_context wait_context = { .test_fixture = &test_context, .required_event_count = TEST_PUBLISH_COUNT, }; s_wait_for_n_unacked_publishes(&wait_context); /* disconnect due to failed ping */ s_wait_for_disconnection_lifecycle_event(&test_context); /* Reconnect from a disconnect automatically */ aws_wait_for_connected_lifecycle_event(&test_context); s_wait_for_n_successful_publishes(&wait_context); ASSERT_SUCCESS(aws_mqtt5_client_stop(client, NULL, NULL)); aws_wait_for_stopped_lifecycle_event(&test_context); enum aws_mqtt5_client_state expected_states[] = { AWS_MCS_CONNECTING, AWS_MCS_MQTT_CONNECT, AWS_MCS_CONNECTED, AWS_MCS_CLEAN_DISCONNECT, AWS_MCS_CHANNEL_SHUTDOWN, AWS_MCS_PENDING_RECONNECT, AWS_MCS_CONNECTING, AWS_MCS_MQTT_CONNECT, AWS_MCS_CONNECTED, AWS_MCS_CHANNEL_SHUTDOWN, AWS_MCS_STOPPED, }; ASSERT_SUCCESS(aws_verify_client_state_sequence(&test_context, expected_states, AWS_ARRAY_SIZE(expected_states))); aws_mqtt5_client_mock_test_fixture_clean_up(&test_context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE( mqtt5_client_restore_session_on_ping_timeout_reconnect, mqtt5_client_restore_session_on_ping_timeout_reconnect_fn); /* If the server returns a Clean Session, client must discard any existing Session and start a new Session */ static int mqtt5_client_discard_session_on_server_clean_start_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); /* Set to rejoin */ test_options.client_options.session_behavior = AWS_MQTT5_CSBT_REJOIN_POST_SUCCESS; /* mock server will not send PUBACKS on initial connect */ test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_PUBLISH] = s_aws_mqtt5_mock_server_handle_publish_no_puback_on_first_connect; struct aws_mqtt5_client_mock_test_fixture test_context; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, }; ASSERT_SUCCESS(aws_mqtt5_client_mock_test_fixture_init(&test_context, allocator, &test_fixture_options)); struct aws_mqtt5_client *client = test_context.client; ASSERT_SUCCESS(aws_mqtt5_client_start(client)); aws_wait_for_connected_lifecycle_event(&test_context); for (size_t i = 0; i < TEST_PUBLISH_COUNT; ++i) { struct aws_mqtt5_packet_publish_view qos1_publish_view = { .qos = AWS_MQTT5_QOS_AT_LEAST_ONCE, .topic = { .ptr = s_topic, .len = AWS_ARRAY_SIZE(s_topic) - 1, }, }; struct aws_mqtt5_publish_completion_options completion_options = { .completion_callback = s_receive_stored_session_publish_completion_fn, .completion_user_data = &test_context, }; ASSERT_SUCCESS(aws_mqtt5_client_publish(client, &qos1_publish_view, &completion_options)); } /* Wait for QoS1 publishes to have gone out from client */ struct aws_mqtt5_client_test_wait_for_n_context wait_context = { .test_fixture = &test_context, .required_event_count = TEST_PUBLISH_COUNT, }; s_wait_for_n_unacked_publishes(&wait_context); /* Disconnect with unacked publishes */ ASSERT_SUCCESS(aws_mqtt5_client_stop(client, NULL, NULL)); aws_wait_for_stopped_lifecycle_event(&test_context); /* Reconnect with a Client Stored Session */ ASSERT_SUCCESS(aws_mqtt5_client_start(client)); aws_wait_for_connected_lifecycle_event(&test_context); /* Provide time for Client to process any queued operations */ aws_thread_current_sleep(1000000000); ASSERT_SUCCESS(aws_mqtt5_client_stop(client, NULL, NULL)); aws_wait_for_stopped_lifecycle_event(&test_context); /* Check that no publishes were resent after the initial batch on first connect */ ASSERT_INT_EQUALS(test_context.publishes_received, TEST_PUBLISH_COUNT); aws_mqtt5_client_mock_test_fixture_clean_up(&test_context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE( mqtt5_client_discard_session_on_server_clean_start, mqtt5_client_discard_session_on_server_clean_start_fn); static int s_verify_zero_statistics(struct aws_mqtt5_client_operation_statistics *stats) { ASSERT_INT_EQUALS(stats->incomplete_operation_size, 0); ASSERT_INT_EQUALS(stats->incomplete_operation_count, 0); ASSERT_INT_EQUALS(stats->unacked_operation_size, 0); ASSERT_INT_EQUALS(stats->unacked_operation_count, 0); return AWS_OP_SUCCESS; } static int s_verify_statistics_equal( struct aws_mqtt5_client_operation_statistics *expected_stats, struct aws_mqtt5_client_operation_statistics *actual_stats) { ASSERT_INT_EQUALS(expected_stats->incomplete_operation_size, actual_stats->incomplete_operation_size); ASSERT_INT_EQUALS(expected_stats->incomplete_operation_count, actual_stats->incomplete_operation_count); ASSERT_INT_EQUALS(expected_stats->unacked_operation_size, actual_stats->unacked_operation_size); ASSERT_INT_EQUALS(expected_stats->unacked_operation_size, actual_stats->unacked_operation_size); return AWS_OP_SUCCESS; } static int s_verify_client_statistics( struct aws_mqtt5_client_mock_test_fixture *test_context, struct aws_mqtt5_client_operation_statistics *expected_stats, size_t expected_stats_count) { struct aws_array_list *actual_stats = &test_context->client_statistics; size_t actual_stats_count = aws_array_list_length(actual_stats); /* we expect the last stats to be zero, the expected stats represent the stats before that */ ASSERT_INT_EQUALS(actual_stats_count, expected_stats_count + 1); struct aws_mqtt5_client_operation_statistics *current_stats = NULL; aws_array_list_get_at_ptr(actual_stats, (void **)¤t_stats, actual_stats_count - 1); ASSERT_SUCCESS(s_verify_zero_statistics(current_stats)); for (size_t i = 0; i < expected_stats_count; ++i) { aws_array_list_get_at_ptr(actual_stats, (void **)¤t_stats, i); ASSERT_SUCCESS(s_verify_statistics_equal(&expected_stats[i], current_stats)); } return AWS_OP_SUCCESS; } static struct aws_mqtt5_client_operation_statistics s_subscribe_test_statistics[] = { { .incomplete_operation_size = 68, .incomplete_operation_count = 1, .unacked_operation_size = 0, .unacked_operation_count = 0, }, { .incomplete_operation_size = 68, .incomplete_operation_count = 1, .unacked_operation_size = 68, .unacked_operation_count = 1, }, }; static int s_mqtt5_client_statistics_subscribe_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_SUBSCRIBE] = s_aws_mqtt5_server_send_suback_on_subscribe; struct aws_mqtt5_client_mock_test_fixture test_context; struct aws_mqtt5_server_disconnect_test_context disconnect_context = { .test_fixture = &test_context, .disconnect_sent = false, }; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, .mock_server_user_data = &disconnect_context, }; ASSERT_SUCCESS(aws_mqtt5_client_mock_test_fixture_init(&test_context, allocator, &test_fixture_options)); struct aws_mqtt5_client *client = test_context.client; ASSERT_SUCCESS(aws_mqtt5_client_start(client)); aws_wait_for_connected_lifecycle_event(&test_context); struct aws_mqtt5_packet_subscribe_view subscribe_view = { .subscriptions = s_subscriptions, .subscription_count = AWS_ARRAY_SIZE(s_subscriptions), }; struct aws_mqtt5_subscribe_completion_options completion_options = { .completion_callback = s_aws_mqtt5_subscribe_complete_fn, .completion_user_data = &test_context, }; aws_mqtt5_client_subscribe(client, &subscribe_view, &completion_options); s_wait_for_suback_received(&test_context); ASSERT_SUCCESS(aws_mqtt5_client_stop(client, NULL, NULL)); aws_wait_for_stopped_lifecycle_event(&test_context); ASSERT_SUCCESS(s_verify_client_statistics( &test_context, s_subscribe_test_statistics, AWS_ARRAY_SIZE(s_subscribe_test_statistics))); aws_mqtt5_client_mock_test_fixture_clean_up(&test_context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_client_statistics_subscribe, s_mqtt5_client_statistics_subscribe_fn) static struct aws_mqtt5_client_operation_statistics s_unsubscribe_test_statistics[] = { { .incomplete_operation_size = 14, .incomplete_operation_count = 1, .unacked_operation_size = 0, .unacked_operation_count = 0, }, { .incomplete_operation_size = 14, .incomplete_operation_count = 1, .unacked_operation_size = 14, .unacked_operation_count = 1, }, }; static int s_mqtt5_client_statistics_unsubscribe_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_UNSUBSCRIBE] = s_aws_mqtt5_server_send_not_subscribe_unsuback_on_unsubscribe; struct aws_mqtt5_client_mock_test_fixture test_context; struct aws_mqtt5_sub_pub_unsub_context full_test_context = { .test_fixture = &test_context, }; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, .mock_server_user_data = &full_test_context, }; ASSERT_SUCCESS(aws_mqtt5_client_mock_test_fixture_init(&test_context, allocator, &test_fixture_options)); struct aws_mqtt5_client *client = test_context.client; ASSERT_SUCCESS(aws_mqtt5_client_start(client)); aws_wait_for_connected_lifecycle_event(&test_context); struct aws_mqtt5_packet_unsubscribe_view unsubscribe_view = { .topic_filters = s_sub_pub_unsub_topic_filters, .topic_filter_count = AWS_ARRAY_SIZE(s_sub_pub_unsub_topic_filters), }; struct aws_mqtt5_unsubscribe_completion_options completion_options = { .completion_callback = s_sub_pub_unsub_unsubscribe_complete_fn, .completion_user_data = &full_test_context, }; aws_mqtt5_client_unsubscribe(client, &unsubscribe_view, &completion_options); s_sub_pub_unsub_wait_for_unsuback_received(&full_test_context); ASSERT_SUCCESS(aws_mqtt5_client_stop(client, NULL, NULL)); aws_wait_for_stopped_lifecycle_event(&test_context); ASSERT_SUCCESS(s_verify_client_statistics( &test_context, s_unsubscribe_test_statistics, AWS_ARRAY_SIZE(s_unsubscribe_test_statistics))); aws_mqtt5_client_mock_test_fixture_clean_up(&test_context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_client_statistics_unsubscribe, s_mqtt5_client_statistics_unsubscribe_fn) static struct aws_mqtt5_client_operation_statistics s_publish_qos1_test_statistics[] = { { .incomplete_operation_size = 30, .incomplete_operation_count = 1, .unacked_operation_size = 0, .unacked_operation_count = 0, }, { .incomplete_operation_size = 30, .incomplete_operation_count = 1, .unacked_operation_size = 30, .unacked_operation_count = 1, }, }; static int s_do_mqtt5_client_statistics_publish_test( struct aws_allocator *allocator, enum aws_mqtt5_qos qos, struct aws_mqtt5_client_operation_statistics *expected_stats, size_t expected_stats_count) { aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_PUBLISH] = aws_mqtt5_mock_server_handle_publish_puback_and_forward; struct aws_mqtt5_client_mock_test_fixture test_context; struct aws_mqtt5_sub_pub_unsub_context full_test_context = { .test_fixture = &test_context, }; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, .mock_server_user_data = &full_test_context, }; ASSERT_SUCCESS(aws_mqtt5_client_mock_test_fixture_init(&test_context, allocator, &test_fixture_options)); struct aws_mqtt5_client *client = test_context.client; ASSERT_SUCCESS(aws_mqtt5_client_start(client)); aws_wait_for_connected_lifecycle_event(&test_context); struct aws_mqtt5_packet_publish_view publish_view = { .qos = qos, .topic = { .ptr = s_sub_pub_unsub_publish_topic, .len = AWS_ARRAY_SIZE(s_sub_pub_unsub_publish_topic) - 1, }, .payload = { .ptr = s_sub_pub_unsub_publish_payload, .len = AWS_ARRAY_SIZE(s_sub_pub_unsub_publish_payload) - 1, }, }; struct aws_mqtt5_publish_completion_options completion_options = { .completion_callback = s_sub_pub_unsub_publish_complete_fn, .completion_user_data = &full_test_context, }; aws_mqtt5_client_publish(client, &publish_view, &completion_options); s_sub_pub_unsub_wait_for_publish_complete(&full_test_context); ASSERT_SUCCESS(aws_mqtt5_client_stop(client, NULL, NULL)); aws_wait_for_stopped_lifecycle_event(&test_context); ASSERT_SUCCESS(s_verify_client_statistics(&test_context, expected_stats, expected_stats_count)); aws_mqtt5_client_mock_test_fixture_clean_up(&test_context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } static int s_mqtt5_client_statistics_publish_qos1_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(s_do_mqtt5_client_statistics_publish_test( allocator, AWS_MQTT5_QOS_AT_LEAST_ONCE, s_publish_qos1_test_statistics, AWS_ARRAY_SIZE(s_publish_qos1_test_statistics))); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_client_statistics_publish_qos1, s_mqtt5_client_statistics_publish_qos1_fn) static struct aws_mqtt5_client_operation_statistics s_publish_qos0_test_statistics[] = { { .incomplete_operation_size = 30, .incomplete_operation_count = 1, .unacked_operation_size = 0, .unacked_operation_count = 0, }, }; static int s_mqtt5_client_statistics_publish_qos0_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(s_do_mqtt5_client_statistics_publish_test( allocator, AWS_MQTT5_QOS_AT_MOST_ONCE, s_publish_qos0_test_statistics, AWS_ARRAY_SIZE(s_publish_qos0_test_statistics))); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_client_statistics_publish_qos0, s_mqtt5_client_statistics_publish_qos0_fn) static struct aws_mqtt5_client_operation_statistics s_publish_qos1_requeue_test_statistics[] = { { .incomplete_operation_size = 30, .incomplete_operation_count = 1, .unacked_operation_size = 0, .unacked_operation_count = 0, }, { .incomplete_operation_size = 30, .incomplete_operation_count = 1, .unacked_operation_size = 30, .unacked_operation_count = 1, }, { .incomplete_operation_size = 30, .incomplete_operation_count = 1, .unacked_operation_size = 0, .unacked_operation_count = 0, }, { .incomplete_operation_size = 30, .incomplete_operation_count = 1, .unacked_operation_size = 30, .unacked_operation_count = 1, }, }; static int s_aws_mqtt5_server_disconnect_on_first_publish_puback_after( void *packet, struct aws_mqtt5_server_mock_connection_context *connection, void *user_data) { (void)packet; struct aws_mqtt5_sub_pub_unsub_context *test_context = user_data; ++test_context->publishes_received; if (test_context->publishes_received == 1) { return AWS_OP_ERR; } struct aws_mqtt5_packet_publish_view *publish_view = packet; /* send a PUBACK? */ if (publish_view->qos == AWS_MQTT5_QOS_AT_LEAST_ONCE) { struct aws_mqtt5_packet_puback_view puback_view = { .packet_id = publish_view->packet_id, .reason_code = AWS_MQTT5_PARC_SUCCESS, }; if (aws_mqtt5_mock_server_send_packet(connection, AWS_MQTT5_PT_PUBACK, &puback_view)) { return AWS_OP_ERR; } } return AWS_OP_SUCCESS; } static int s_mqtt5_client_statistics_publish_qos1_requeue_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); test_options.client_options.session_behavior = AWS_MQTT5_CSBT_REJOIN_POST_SUCCESS; test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_PUBLISH] = s_aws_mqtt5_server_disconnect_on_first_publish_puback_after; test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_CONNECT] = s_aws_mqtt5_mock_server_handle_connect_honor_session_after_success; struct aws_mqtt5_client_mock_test_fixture test_context; struct aws_mqtt5_sub_pub_unsub_context full_test_context = { .test_fixture = &test_context, }; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, .mock_server_user_data = &full_test_context, }; ASSERT_SUCCESS(aws_mqtt5_client_mock_test_fixture_init(&test_context, allocator, &test_fixture_options)); struct aws_mqtt5_client *client = test_context.client; ASSERT_SUCCESS(aws_mqtt5_client_start(client)); aws_wait_for_connected_lifecycle_event(&test_context); struct aws_mqtt5_packet_publish_view publish_view = { .qos = AWS_MQTT5_QOS_AT_LEAST_ONCE, .topic = { .ptr = s_sub_pub_unsub_publish_topic, .len = AWS_ARRAY_SIZE(s_sub_pub_unsub_publish_topic) - 1, }, .payload = { .ptr = s_sub_pub_unsub_publish_payload, .len = AWS_ARRAY_SIZE(s_sub_pub_unsub_publish_payload) - 1, }, }; struct aws_mqtt5_publish_completion_options completion_options = { .completion_callback = s_sub_pub_unsub_publish_complete_fn, .completion_user_data = &full_test_context, }; aws_mqtt5_client_publish(client, &publish_view, &completion_options); s_sub_pub_unsub_wait_for_publish_complete(&full_test_context); ASSERT_SUCCESS(aws_mqtt5_client_stop(client, NULL, NULL)); aws_wait_for_stopped_lifecycle_event(&test_context); ASSERT_SUCCESS(s_verify_client_statistics( &test_context, s_publish_qos1_requeue_test_statistics, AWS_ARRAY_SIZE(s_publish_qos1_requeue_test_statistics))); aws_mqtt5_client_mock_test_fixture_clean_up(&test_context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_client_statistics_publish_qos1_requeue, s_mqtt5_client_statistics_publish_qos1_requeue_fn) #define PUBACK_ORDERING_PUBLISH_COUNT 5 static int s_aws_mqtt5_server_send_multiple_publishes( void *packet, struct aws_mqtt5_server_mock_connection_context *connection, void *user_data) { (void)packet; (void)user_data; /* in order: send PUBLISH packet ids 1, 2, 3, 4, 5 */ for (size_t i = 0; i < PUBACK_ORDERING_PUBLISH_COUNT; ++i) { struct aws_mqtt5_packet_publish_view publish_view = { .packet_id = (uint16_t)i + 1, .qos = AWS_MQTT5_QOS_AT_LEAST_ONCE, .topic = { .ptr = s_sub_pub_unsub_publish_topic, .len = AWS_ARRAY_SIZE(s_sub_pub_unsub_publish_topic) - 1, }, .payload = { .ptr = s_sub_pub_unsub_publish_payload, .len = AWS_ARRAY_SIZE(s_sub_pub_unsub_publish_payload) - 1, }, }; if (aws_mqtt5_mock_server_send_packet(connection, AWS_MQTT5_PT_PUBLISH, &publish_view)) { return AWS_OP_ERR; } } return AWS_OP_SUCCESS; } static bool s_server_received_all_pubacks(void *arg) { struct aws_mqtt5_client_mock_test_fixture *test_fixture = arg; size_t pubacks_received = 0; size_t packet_count = aws_array_list_length(&test_fixture->server_received_packets); for (size_t i = 0; i < packet_count; ++i) { struct aws_mqtt5_mock_server_packet_record *packet = NULL; aws_array_list_get_at_ptr(&test_fixture->server_received_packets, (void **)&packet, i); if (packet->packet_type == AWS_MQTT5_PT_PUBACK) { ++pubacks_received; } } return pubacks_received == PUBACK_ORDERING_PUBLISH_COUNT; } static void s_wait_for_mock_server_pubacks(struct aws_mqtt5_client_mock_test_fixture *test_context) { aws_mutex_lock(&test_context->lock); aws_condition_variable_wait_pred( &test_context->signal, &test_context->lock, s_server_received_all_pubacks, test_context); aws_mutex_unlock(&test_context->lock); } static int s_verify_mock_puback_order(struct aws_mqtt5_client_mock_test_fixture *test_context) { aws_mutex_lock(&test_context->lock); uint16_t expected_packet_id = 1; size_t packet_count = aws_array_list_length(&test_context->server_received_packets); /* in order: received PUBACK packet ids 1, 2, 3, 4, 5 */ for (size_t i = 0; i < packet_count; ++i) { struct aws_mqtt5_mock_server_packet_record *packet = NULL; aws_array_list_get_at_ptr(&test_context->server_received_packets, (void **)&packet, i); if (packet->packet_type == AWS_MQTT5_PT_PUBACK) { struct aws_mqtt5_packet_puback_view *puback_view = &((struct aws_mqtt5_packet_puback_storage *)(packet->packet_storage))->storage_view; ASSERT_INT_EQUALS(expected_packet_id, puback_view->packet_id); ++expected_packet_id; } } ASSERT_INT_EQUALS(PUBACK_ORDERING_PUBLISH_COUNT + 1, expected_packet_id); aws_mutex_unlock(&test_context->lock); return AWS_OP_SUCCESS; } static int s_mqtt5_client_puback_ordering_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_PUBLISH] = s_aws_mqtt5_server_send_multiple_publishes; struct aws_mqtt5_client_mock_test_fixture test_context; struct aws_mqtt5_sub_pub_unsub_context full_test_context = { .test_fixture = &test_context, }; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, .mock_server_user_data = &full_test_context, }; ASSERT_SUCCESS(aws_mqtt5_client_mock_test_fixture_init(&test_context, allocator, &test_fixture_options)); struct aws_mqtt5_client *client = test_context.client; ASSERT_SUCCESS(aws_mqtt5_client_start(client)); aws_wait_for_connected_lifecycle_event(&test_context); struct aws_mqtt5_packet_publish_view publish_view = { .qos = AWS_MQTT5_QOS_AT_MOST_ONCE, .topic = { .ptr = s_sub_pub_unsub_publish_topic, .len = AWS_ARRAY_SIZE(s_sub_pub_unsub_publish_topic) - 1, }, .payload = { .ptr = s_sub_pub_unsub_publish_payload, .len = AWS_ARRAY_SIZE(s_sub_pub_unsub_publish_payload) - 1, }, }; aws_mqtt5_client_publish(client, &publish_view, NULL); s_wait_for_mock_server_pubacks(&test_context); ASSERT_SUCCESS(aws_mqtt5_client_stop(client, NULL, NULL)); aws_wait_for_stopped_lifecycle_event(&test_context); ASSERT_SUCCESS(s_verify_mock_puback_order(&test_context)); aws_mqtt5_client_mock_test_fixture_clean_up(&test_context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_client_puback_ordering, s_mqtt5_client_puback_ordering_fn) enum aws_mqtt5_listener_test_publish_received_callback_type { AWS_MQTT5_LTPRCT_DEFAULT, AWS_MQTT5_LTPRCT_LISTENER, }; struct aws_mqtt5_listeners_test_context { struct aws_mqtt5_client_mock_test_fixture *test_fixture; struct aws_array_list publish_received_callback_types; struct aws_mutex lock; struct aws_condition_variable signal; }; static void s_aws_mqtt5_listeners_test_context_init( struct aws_mqtt5_listeners_test_context *listener_test_context, struct aws_allocator *allocator) { AWS_ZERO_STRUCT(*listener_test_context); aws_array_list_init_dynamic( &listener_test_context->publish_received_callback_types, allocator, 0, sizeof(enum aws_mqtt5_listener_test_publish_received_callback_type)); aws_mutex_init(&listener_test_context->lock); aws_condition_variable_init(&listener_test_context->signal); } static void s_aws_mqtt5_listeners_test_context_clean_up( struct aws_mqtt5_listeners_test_context *listener_test_context) { aws_condition_variable_clean_up(&listener_test_context->signal); aws_mutex_clean_up(&listener_test_context->lock); aws_array_list_clean_up(&listener_test_context->publish_received_callback_types); } static int s_aws_mqtt5_mock_server_reflect_publish( void *packet, struct aws_mqtt5_server_mock_connection_context *connection, void *user_data) { (void)user_data; struct aws_mqtt5_packet_publish_view *publish_view = packet; struct aws_mqtt5_packet_publish_view reflected_view = *publish_view; if (reflected_view.qos != AWS_MQTT5_QOS_AT_MOST_ONCE) { reflected_view.packet_id = 1; } if (aws_mqtt5_mock_server_send_packet(connection, AWS_MQTT5_PT_PUBLISH, &reflected_view)) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } static void s_aws_mqtt5_listeners_test_publish_received_default_handler( const struct aws_mqtt5_packet_publish_view *publish_view, void *user_data) { (void)publish_view; struct aws_mqtt5_listeners_test_context *context = user_data; aws_mutex_lock(&context->lock); enum aws_mqtt5_listener_test_publish_received_callback_type callback_type = AWS_MQTT5_LTPRCT_DEFAULT; aws_array_list_push_back(&context->publish_received_callback_types, &callback_type); aws_mutex_unlock(&context->lock); aws_condition_variable_notify_all(&context->signal); } static bool s_aws_mqtt5_listeners_test_publish_received_listener_handler( const struct aws_mqtt5_packet_publish_view *publish_view, void *user_data) { struct aws_mqtt5_listeners_test_context *context = user_data; aws_mutex_lock(&context->lock); enum aws_mqtt5_listener_test_publish_received_callback_type callback_type = AWS_MQTT5_LTPRCT_LISTENER; aws_array_list_push_back(&context->publish_received_callback_types, &callback_type); aws_mutex_unlock(&context->lock); aws_condition_variable_notify_all(&context->signal); return publish_view->qos == AWS_MQTT5_QOS_AT_LEAST_ONCE; } struct aws_mqtt5_listeners_test_wait_context { size_t callback_count; struct aws_mqtt5_listeners_test_context *test_fixture; }; static bool s_aws_mqtt5_listeners_test_wait_on_callback_count(void *context) { struct aws_mqtt5_listeners_test_wait_context *wait_context = context; return wait_context->callback_count == aws_array_list_length(&wait_context->test_fixture->publish_received_callback_types); } static int s_aws_mqtt5_listeners_test_wait_on_and_verify_callbacks( struct aws_mqtt5_listeners_test_context *context, size_t callback_count, enum aws_mqtt5_listener_test_publish_received_callback_type *callback_types) { struct aws_mqtt5_listeners_test_wait_context wait_context = { .callback_count = callback_count, .test_fixture = context, }; aws_mutex_lock(&context->lock); aws_condition_variable_wait_pred( &context->signal, &context->lock, s_aws_mqtt5_listeners_test_wait_on_callback_count, &wait_context); for (size_t i = 0; i < callback_count; ++i) { enum aws_mqtt5_listener_test_publish_received_callback_type callback_type; aws_array_list_get_at(&context->publish_received_callback_types, &callback_type, i); ASSERT_INT_EQUALS(callback_types[i], callback_type); } aws_mutex_unlock(&context->lock); return AWS_OP_SUCCESS; } static int s_mqtt5_client_listeners_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_PUBLISH] = s_aws_mqtt5_mock_server_reflect_publish; struct aws_mqtt5_client_mock_test_fixture test_context; struct aws_mqtt5_listeners_test_context full_test_context = { .test_fixture = &test_context, }; s_aws_mqtt5_listeners_test_context_init(&full_test_context, allocator); struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, .mock_server_user_data = &full_test_context, }; test_fixture_options.client_options->publish_received_handler = s_aws_mqtt5_listeners_test_publish_received_default_handler; test_fixture_options.client_options->publish_received_handler_user_data = &full_test_context; ASSERT_SUCCESS(aws_mqtt5_client_mock_test_fixture_init(&test_context, allocator, &test_fixture_options)); struct aws_mqtt5_client *client = test_context.client; ASSERT_SUCCESS(aws_mqtt5_client_start(client)); aws_wait_for_connected_lifecycle_event(&test_context); struct aws_mqtt5_packet_publish_view qos0_publish_view = { .qos = AWS_MQTT5_QOS_AT_MOST_ONCE, .topic = { .ptr = s_sub_pub_unsub_publish_topic, .len = AWS_ARRAY_SIZE(s_sub_pub_unsub_publish_topic) - 1, }, }; struct aws_mqtt5_packet_publish_view qos1_publish_view = { .qos = AWS_MQTT5_QOS_AT_LEAST_ONCE, .topic = { .ptr = s_sub_pub_unsub_publish_topic, .len = AWS_ARRAY_SIZE(s_sub_pub_unsub_publish_topic) - 1, }, }; // send a qos 0 publish, wait for it to reflect, verify it's the default handler aws_mqtt5_client_publish(client, &qos0_publish_view, NULL); enum aws_mqtt5_listener_test_publish_received_callback_type first_callback_type_array[] = { AWS_MQTT5_LTPRCT_DEFAULT, }; ASSERT_SUCCESS(s_aws_mqtt5_listeners_test_wait_on_and_verify_callbacks( &full_test_context, AWS_ARRAY_SIZE(first_callback_type_array), first_callback_type_array)); // attach a listener at the beginning of the handler chain struct aws_mqtt5_listener_config listener_config = { .client = client, .listener_callbacks = { .listener_publish_received_handler_user_data = &full_test_context, .listener_publish_received_handler = s_aws_mqtt5_listeners_test_publish_received_listener_handler, }}; struct aws_mqtt5_listener *listener = aws_mqtt5_listener_new(allocator, &listener_config); // send a qos 0 publish, wait for it to reflect, verify both handlers were invoked in the proper order aws_mqtt5_client_publish(client, &qos0_publish_view, NULL); enum aws_mqtt5_listener_test_publish_received_callback_type second_callback_type_array[] = { AWS_MQTT5_LTPRCT_DEFAULT, AWS_MQTT5_LTPRCT_LISTENER, AWS_MQTT5_LTPRCT_DEFAULT, }; ASSERT_SUCCESS(s_aws_mqtt5_listeners_test_wait_on_and_verify_callbacks( &full_test_context, AWS_ARRAY_SIZE(second_callback_type_array), second_callback_type_array)); // send a qos1 publish (which is short-circuited by the listener), verify just the listener was notified aws_mqtt5_client_publish(client, &qos1_publish_view, NULL); enum aws_mqtt5_listener_test_publish_received_callback_type third_callback_type_array[] = { AWS_MQTT5_LTPRCT_DEFAULT, AWS_MQTT5_LTPRCT_LISTENER, AWS_MQTT5_LTPRCT_DEFAULT, AWS_MQTT5_LTPRCT_LISTENER, }; ASSERT_SUCCESS(s_aws_mqtt5_listeners_test_wait_on_and_verify_callbacks( &full_test_context, AWS_ARRAY_SIZE(third_callback_type_array), third_callback_type_array)); // remove the listener aws_mqtt5_listener_release(listener); // send a qos1 publish, wait for it to reflect, verify it's the default handler aws_mqtt5_client_publish(client, &qos1_publish_view, NULL); enum aws_mqtt5_listener_test_publish_received_callback_type fourth_callback_type_array[] = { AWS_MQTT5_LTPRCT_DEFAULT, AWS_MQTT5_LTPRCT_LISTENER, AWS_MQTT5_LTPRCT_DEFAULT, AWS_MQTT5_LTPRCT_LISTENER, AWS_MQTT5_LTPRCT_DEFAULT, }; ASSERT_SUCCESS(s_aws_mqtt5_listeners_test_wait_on_and_verify_callbacks( &full_test_context, AWS_ARRAY_SIZE(fourth_callback_type_array), fourth_callback_type_array)); ASSERT_SUCCESS(aws_mqtt5_client_stop(client, NULL, NULL)); aws_wait_for_stopped_lifecycle_event(&test_context); aws_mqtt5_client_mock_test_fixture_clean_up(&test_context); s_aws_mqtt5_listeners_test_context_clean_up(&full_test_context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_client_listeners, s_mqtt5_client_listeners_fn) static void s_on_offline_publish_completion( enum aws_mqtt5_packet_type packet_type, const void *packet, int error_code, void *user_data) { (void)packet_type; (void)packet; struct aws_mqtt5_sub_pub_unsub_context *full_test_context = user_data; aws_mutex_lock(&full_test_context->test_fixture->lock); if (error_code == AWS_ERROR_MQTT5_OPERATION_FAILED_DUE_TO_OFFLINE_QUEUE_POLICY) { ++full_test_context->publish_failures; } else if (error_code == 0) { full_test_context->publish_complete = true; } aws_mutex_unlock(&full_test_context->test_fixture->lock); aws_condition_variable_notify_all(&full_test_context->test_fixture->signal); } static bool s_has_failed_publishes(void *user_data) { struct aws_mqtt5_sub_pub_unsub_context *full_test_context = user_data; return full_test_context->publish_failures > 0; } static void s_aws_mqtt5_wait_for_publish_failure(struct aws_mqtt5_sub_pub_unsub_context *full_test_context) { aws_mutex_lock(&full_test_context->test_fixture->lock); aws_condition_variable_wait_pred( &full_test_context->test_fixture->signal, &full_test_context->test_fixture->lock, s_has_failed_publishes, full_test_context); aws_mutex_unlock(&full_test_context->test_fixture->lock); } static int s_offline_publish( struct aws_mqtt5_client *client, struct aws_mqtt5_sub_pub_unsub_context *full_test_context, enum aws_mqtt5_qos qos) { struct aws_mqtt5_packet_publish_view publish_view = { .qos = qos, .topic = { .ptr = s_sub_pub_unsub_publish_topic, .len = AWS_ARRAY_SIZE(s_sub_pub_unsub_publish_topic) - 1, }, .payload = { .ptr = s_sub_pub_unsub_publish_payload, .len = AWS_ARRAY_SIZE(s_sub_pub_unsub_publish_payload) - 1, }, }; struct aws_mqtt5_publish_completion_options completion_options = { .completion_callback = s_on_offline_publish_completion, .completion_user_data = full_test_context, }; return aws_mqtt5_client_publish(client, &publish_view, &completion_options); } static int s_verify_offline_publish_failure( struct aws_mqtt5_client *client, struct aws_mqtt5_sub_pub_unsub_context *full_test_context, enum aws_mqtt5_qos qos) { aws_mutex_lock(&full_test_context->test_fixture->lock); full_test_context->publish_failures = 0; aws_mutex_unlock(&full_test_context->test_fixture->lock); ASSERT_SUCCESS(s_offline_publish(client, full_test_context, qos)); s_aws_mqtt5_wait_for_publish_failure(full_test_context); return AWS_OP_SUCCESS; } /* There's no signalable event for internal queue changes so we have to spin-poll this in a dumb manner */ static void s_aws_mqtt5_wait_for_offline_queue_size( struct aws_mqtt5_sub_pub_unsub_context *full_test_context, size_t expected_queue_size) { bool done = false; struct aws_mqtt5_client *client = full_test_context->test_fixture->client; while (!done) { aws_thread_current_sleep(aws_timestamp_convert(100, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL)); struct aws_mqtt5_client_operation_statistics stats; AWS_ZERO_STRUCT(stats); aws_mqtt5_client_get_stats(client, &stats); done = stats.incomplete_operation_count == expected_queue_size; } } static void s_on_offline_subscribe_completion( const struct aws_mqtt5_packet_suback_view *suback_view, int error_code, void *user_data) { (void)suback_view; struct aws_mqtt5_sub_pub_unsub_context *full_test_context = user_data; aws_mutex_lock(&full_test_context->test_fixture->lock); if (error_code == AWS_ERROR_MQTT5_OPERATION_FAILED_DUE_TO_OFFLINE_QUEUE_POLICY) { ++full_test_context->subscribe_failures; } aws_mutex_unlock(&full_test_context->test_fixture->lock); aws_condition_variable_notify_all(&full_test_context->test_fixture->signal); } static bool s_has_failed_subscribes(void *user_data) { struct aws_mqtt5_sub_pub_unsub_context *full_test_context = user_data; return full_test_context->subscribe_failures > 0; } static void s_aws_mqtt5_wait_for_subscribe_failure(struct aws_mqtt5_sub_pub_unsub_context *full_test_context) { aws_mutex_lock(&full_test_context->test_fixture->lock); aws_condition_variable_wait_pred( &full_test_context->test_fixture->signal, &full_test_context->test_fixture->lock, s_has_failed_subscribes, full_test_context); aws_mutex_unlock(&full_test_context->test_fixture->lock); } static int s_offline_subscribe( struct aws_mqtt5_client *client, struct aws_mqtt5_sub_pub_unsub_context *full_test_context) { struct aws_mqtt5_packet_subscribe_view subscribe_view = { .subscriptions = s_subscriptions, .subscription_count = AWS_ARRAY_SIZE(s_subscriptions), }; struct aws_mqtt5_subscribe_completion_options completion_options = { .completion_callback = s_on_offline_subscribe_completion, .completion_user_data = full_test_context, }; return aws_mqtt5_client_subscribe(client, &subscribe_view, &completion_options); } static int s_verify_offline_subscribe_failure( struct aws_mqtt5_client *client, struct aws_mqtt5_sub_pub_unsub_context *full_test_context) { aws_mutex_lock(&full_test_context->test_fixture->lock); full_test_context->subscribe_failures = 0; aws_mutex_unlock(&full_test_context->test_fixture->lock); ASSERT_SUCCESS(s_offline_subscribe(client, full_test_context)); s_aws_mqtt5_wait_for_subscribe_failure(full_test_context); return AWS_OP_SUCCESS; } static void s_on_offline_unsubscribe_completion( const struct aws_mqtt5_packet_unsuback_view *unsuback_view, int error_code, void *user_data) { (void)unsuback_view; struct aws_mqtt5_sub_pub_unsub_context *full_test_context = user_data; aws_mutex_lock(&full_test_context->test_fixture->lock); if (error_code == AWS_ERROR_MQTT5_OPERATION_FAILED_DUE_TO_OFFLINE_QUEUE_POLICY) { ++full_test_context->unsubscribe_failures; } aws_mutex_unlock(&full_test_context->test_fixture->lock); aws_condition_variable_notify_all(&full_test_context->test_fixture->signal); } static bool s_has_failed_unsubscribes(void *user_data) { struct aws_mqtt5_sub_pub_unsub_context *full_test_context = user_data; return full_test_context->unsubscribe_failures > 0; } static void s_aws_mqtt5_wait_for_unsubscribe_failure(struct aws_mqtt5_sub_pub_unsub_context *full_test_context) { aws_mutex_lock(&full_test_context->test_fixture->lock); aws_condition_variable_wait_pred( &full_test_context->test_fixture->signal, &full_test_context->test_fixture->lock, s_has_failed_unsubscribes, full_test_context); aws_mutex_unlock(&full_test_context->test_fixture->lock); } static int s_offline_unsubscribe( struct aws_mqtt5_client *client, struct aws_mqtt5_sub_pub_unsub_context *full_test_context) { struct aws_mqtt5_packet_unsubscribe_view unsubscribe_view = { .topic_filters = s_sub_pub_unsub_topic_filters, .topic_filter_count = AWS_ARRAY_SIZE(s_sub_pub_unsub_topic_filters), }; struct aws_mqtt5_unsubscribe_completion_options completion_options = { .completion_callback = s_on_offline_unsubscribe_completion, .completion_user_data = full_test_context, }; return aws_mqtt5_client_unsubscribe(client, &unsubscribe_view, &completion_options); } static int s_verify_offline_unsubscribe_failure( struct aws_mqtt5_client *client, struct aws_mqtt5_sub_pub_unsub_context *full_test_context) { aws_mutex_lock(&full_test_context->test_fixture->lock); full_test_context->unsubscribe_failures = 0; aws_mutex_unlock(&full_test_context->test_fixture->lock); ASSERT_SUCCESS(s_offline_unsubscribe(client, full_test_context)); s_aws_mqtt5_wait_for_unsubscribe_failure(full_test_context); return AWS_OP_SUCCESS; } static int s_mqtt5_client_offline_operation_submission_fail_all_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); test_options.client_options.offline_queue_behavior = AWS_MQTT5_COQBT_FAIL_ALL_ON_DISCONNECT; struct aws_mqtt5_client_mock_test_fixture test_context; struct aws_mqtt5_sub_pub_unsub_context full_test_context = { .test_fixture = &test_context, }; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, .mock_server_user_data = &full_test_context, }; ASSERT_SUCCESS(aws_mqtt5_client_mock_test_fixture_init(&test_context, allocator, &test_fixture_options)); struct aws_mqtt5_client *client = test_context.client; /* everything should fail on submission */ ASSERT_SUCCESS(s_verify_offline_publish_failure(client, &full_test_context, AWS_MQTT5_QOS_AT_MOST_ONCE)); ASSERT_SUCCESS(s_verify_offline_publish_failure(client, &full_test_context, AWS_MQTT5_QOS_AT_LEAST_ONCE)); ASSERT_SUCCESS(s_verify_offline_subscribe_failure(client, &full_test_context)); ASSERT_SUCCESS(s_verify_offline_unsubscribe_failure(client, &full_test_context)); aws_mqtt5_client_mock_test_fixture_clean_up(&test_context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE( mqtt5_client_offline_operation_submission_fail_all, s_mqtt5_client_offline_operation_submission_fail_all_fn) static int s_mqtt5_client_offline_operation_submission_fail_qos0_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); test_options.client_options.offline_queue_behavior = AWS_MQTT5_COQBT_FAIL_QOS0_PUBLISH_ON_DISCONNECT; struct aws_mqtt5_client_mock_test_fixture test_context; struct aws_mqtt5_sub_pub_unsub_context full_test_context = { .test_fixture = &test_context, }; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, .mock_server_user_data = &full_test_context, }; ASSERT_SUCCESS(aws_mqtt5_client_mock_test_fixture_init(&test_context, allocator, &test_fixture_options)); struct aws_mqtt5_client *client = test_context.client; /* qos 0 publish should fail on submission */ ASSERT_SUCCESS(s_verify_offline_publish_failure(client, &full_test_context, AWS_MQTT5_QOS_AT_MOST_ONCE)); /* qos 1 publish, subscribe, and unsubscribe should queue on submission */ ASSERT_SUCCESS(s_offline_publish(client, &full_test_context, AWS_MQTT5_QOS_AT_LEAST_ONCE)); s_aws_mqtt5_wait_for_offline_queue_size(&full_test_context, 1); ASSERT_SUCCESS(s_offline_subscribe(client, &full_test_context)); s_aws_mqtt5_wait_for_offline_queue_size(&full_test_context, 2); ASSERT_SUCCESS(s_offline_unsubscribe(client, &full_test_context)); s_aws_mqtt5_wait_for_offline_queue_size(&full_test_context, 3); aws_mqtt5_client_mock_test_fixture_clean_up(&test_context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE( mqtt5_client_offline_operation_submission_fail_qos0, s_mqtt5_client_offline_operation_submission_fail_qos0_fn) static int s_mqtt5_client_offline_operation_submission_fail_non_qos1_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); test_options.client_options.offline_queue_behavior = AWS_MQTT5_COQBT_FAIL_NON_QOS1_PUBLISH_ON_DISCONNECT; struct aws_mqtt5_client_mock_test_fixture test_context; struct aws_mqtt5_sub_pub_unsub_context full_test_context = { .test_fixture = &test_context, }; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, .mock_server_user_data = &full_test_context, }; ASSERT_SUCCESS(aws_mqtt5_client_mock_test_fixture_init(&test_context, allocator, &test_fixture_options)); struct aws_mqtt5_client *client = test_context.client; /* qos0 publish, subscribe, and unsubscribe should fail on submission */ ASSERT_SUCCESS(s_verify_offline_publish_failure(client, &full_test_context, AWS_MQTT5_QOS_AT_MOST_ONCE)); ASSERT_SUCCESS(s_verify_offline_subscribe_failure(client, &full_test_context)); ASSERT_SUCCESS(s_verify_offline_unsubscribe_failure(client, &full_test_context)); /* qos1 publish should queue on submission */ ASSERT_SUCCESS(s_offline_publish(client, &full_test_context, AWS_MQTT5_QOS_AT_LEAST_ONCE)); s_aws_mqtt5_wait_for_offline_queue_size(&full_test_context, 1); aws_mqtt5_client_mock_test_fixture_clean_up(&test_context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE( mqtt5_client_offline_operation_submission_fail_non_qos1, s_mqtt5_client_offline_operation_submission_fail_non_qos1_fn) static int s_mqtt5_client_offline_operation_submission_then_connect_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_PUBLISH] = aws_mqtt5_mock_server_handle_publish_puback_and_forward; test_options.client_options.offline_queue_behavior = AWS_MQTT5_COQBT_FAIL_NON_QOS1_PUBLISH_ON_DISCONNECT; struct aws_mqtt5_client_mock_test_fixture test_context; struct aws_mqtt5_sub_pub_unsub_context full_test_context = { .test_fixture = &test_context, }; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, .mock_server_user_data = &full_test_context, }; ASSERT_SUCCESS(aws_mqtt5_client_mock_test_fixture_init(&test_context, allocator, &test_fixture_options)); struct aws_mqtt5_client *client = test_context.client; /* qos1 publish should queue on submission */ ASSERT_SUCCESS(s_offline_publish(client, &full_test_context, AWS_MQTT5_QOS_AT_LEAST_ONCE)); s_aws_mqtt5_wait_for_offline_queue_size(&full_test_context, 1); /* start the client, it should connect and immediately send the publish */ aws_mqtt5_client_start(client); s_sub_pub_unsub_wait_for_publish_complete(&full_test_context); ASSERT_SUCCESS(aws_mqtt5_client_stop(client, NULL, NULL)); aws_wait_for_stopped_lifecycle_event(&test_context); aws_mqtt5_client_mock_test_fixture_clean_up(&test_context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE( mqtt5_client_offline_operation_submission_then_connect, s_mqtt5_client_offline_operation_submission_then_connect_fn) #define ALIASED_PUBLISH_SEQUENCE_COUNT 4 struct aws_mqtt5_aliased_publish_sequence_context { struct aws_mqtt5_client_mock_test_fixture *test_fixture; struct aws_allocator *allocator; struct aws_array_list publishes_received; }; static int s_aws_mqtt5_aliased_publish_sequence_context_init( struct aws_mqtt5_aliased_publish_sequence_context *context, struct aws_allocator *allocator) { AWS_ZERO_STRUCT(*context); context->allocator = allocator; return aws_array_list_init_dynamic( &context->publishes_received, allocator, 10, sizeof(struct aws_mqtt5_packet_publish_storage *)); } static void s_aws_mqtt5_aliased_publish_sequence_context_clean_up( struct aws_mqtt5_aliased_publish_sequence_context *context) { for (size_t i = 0; i < aws_array_list_length(&context->publishes_received); ++i) { struct aws_mqtt5_packet_publish_storage *storage = NULL; aws_array_list_get_at(&context->publishes_received, &storage, i); aws_mqtt5_packet_publish_storage_clean_up(storage); aws_mem_release(context->allocator, storage); } aws_array_list_clean_up(&context->publishes_received); } void s_aliased_publish_received_fn(const struct aws_mqtt5_packet_publish_view *publish, void *complete_ctx) { AWS_FATAL_ASSERT(publish != NULL); struct aws_mqtt5_aliased_publish_sequence_context *full_test_context = complete_ctx; struct aws_mqtt5_client_mock_test_fixture *test_fixture = full_test_context->test_fixture; aws_mutex_lock(&test_fixture->lock); struct aws_mqtt5_packet_publish_storage *storage = aws_mem_calloc(full_test_context->allocator, 1, sizeof(struct aws_mqtt5_packet_publish_storage)); aws_mqtt5_packet_publish_storage_init(storage, full_test_context->allocator, publish); aws_array_list_push_back(&full_test_context->publishes_received, &storage); aws_mutex_unlock(&test_fixture->lock); aws_condition_variable_notify_all(&test_fixture->signal); } static enum aws_mqtt5_suback_reason_code s_alias_reason_codes[] = { AWS_MQTT5_SARC_GRANTED_QOS_1, AWS_MQTT5_SARC_GRANTED_QOS_1, }; static uint8_t s_alias_topic1[] = "alias/first/topic"; static uint8_t s_alias_topic2[] = "alias/second/topic"; static int s_aws_mqtt5_server_send_aliased_publish_sequence( void *packet, struct aws_mqtt5_server_mock_connection_context *connection, void *user_data) { (void)user_data; struct aws_mqtt5_packet_subscribe_view *subscribe_view = packet; struct aws_mqtt5_packet_suback_view suback_view = { .packet_id = subscribe_view->packet_id, .reason_code_count = 1, .reason_codes = s_alias_reason_codes}; // just to be thorough, send a suback if (aws_mqtt5_mock_server_send_packet(connection, AWS_MQTT5_PT_SUBACK, &suback_view)) { return AWS_OP_ERR; } uint16_t alias_id = 1; struct aws_mqtt5_packet_publish_view publish_view = { .packet_id = 1, .qos = AWS_MQTT5_QOS_AT_LEAST_ONCE, .topic = { .ptr = s_alias_topic1, .len = AWS_ARRAY_SIZE(s_alias_topic1) - 1, }, .topic_alias = &alias_id, }; // establish an alias with id 1 if (aws_mqtt5_mock_server_send_packet(connection, AWS_MQTT5_PT_PUBLISH, &publish_view)) { return AWS_OP_ERR; } // alias alone AWS_ZERO_STRUCT(publish_view.topic); if (aws_mqtt5_mock_server_send_packet(connection, AWS_MQTT5_PT_PUBLISH, &publish_view)) { return AWS_OP_ERR; } // establish a new alias with id 1 publish_view.topic.ptr = s_alias_topic2; publish_view.topic.len = AWS_ARRAY_SIZE(s_alias_topic2) - 1; if (aws_mqtt5_mock_server_send_packet(connection, AWS_MQTT5_PT_PUBLISH, &publish_view)) { return AWS_OP_ERR; } // alias alone AWS_ZERO_STRUCT(publish_view.topic); if (aws_mqtt5_mock_server_send_packet(connection, AWS_MQTT5_PT_PUBLISH, &publish_view)) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } static bool s_client_received_aliased_publish_sequence(void *arg) { struct aws_mqtt5_aliased_publish_sequence_context *full_test_context = arg; return aws_array_list_length(&full_test_context->publishes_received) == ALIASED_PUBLISH_SEQUENCE_COUNT; } static void s_wait_for_aliased_publish_sequence(struct aws_mqtt5_aliased_publish_sequence_context *full_test_context) { struct aws_mqtt5_client_mock_test_fixture *test_fixture = full_test_context->test_fixture; aws_mutex_lock(&test_fixture->lock); aws_condition_variable_wait_pred( &test_fixture->signal, &test_fixture->lock, s_client_received_aliased_publish_sequence, full_test_context); aws_mutex_unlock(&test_fixture->lock); } static int s_verify_aliased_publish_sequence(struct aws_mqtt5_aliased_publish_sequence_context *full_test_context) { struct aws_mqtt5_client_mock_test_fixture *test_fixture = full_test_context->test_fixture; aws_mutex_lock(&test_fixture->lock); for (size_t i = 0; i < aws_array_list_length(&full_test_context->publishes_received); ++i) { struct aws_mqtt5_packet_publish_storage *publish_storage = NULL; aws_array_list_get_at(&full_test_context->publishes_received, &publish_storage, i); struct aws_byte_cursor topic_cursor = publish_storage->storage_view.topic; if (i < 2) { // the first two publishes should be the first topic ASSERT_BIN_ARRAYS_EQUALS( s_alias_topic1, AWS_ARRAY_SIZE(s_alias_topic1) - 1, topic_cursor.ptr, topic_cursor.len); } else { // the last two publishes should be the second topic ASSERT_BIN_ARRAYS_EQUALS( s_alias_topic2, AWS_ARRAY_SIZE(s_alias_topic2) - 1, topic_cursor.ptr, topic_cursor.len); } } aws_mutex_unlock(&test_fixture->lock); return AWS_OP_SUCCESS; } static struct aws_mqtt5_subscription_view s_alias_subscriptions[] = { { .topic_filter = { .ptr = (uint8_t *)s_alias_topic1, .len = AWS_ARRAY_SIZE(s_alias_topic1) - 1, }, .qos = AWS_MQTT5_QOS_AT_LEAST_ONCE, }, { .topic_filter = { .ptr = (uint8_t *)s_alias_topic2, .len = AWS_ARRAY_SIZE(s_alias_topic2) - 1, }, .qos = AWS_MQTT5_QOS_AT_LEAST_ONCE, }, }; static int s_mqtt5_client_inbound_alias_success_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_SUBSCRIBE] = s_aws_mqtt5_server_send_aliased_publish_sequence; struct aws_mqtt5_client_mock_test_fixture test_context; struct aws_mqtt5_aliased_publish_sequence_context full_test_context; ASSERT_SUCCESS(s_aws_mqtt5_aliased_publish_sequence_context_init(&full_test_context, allocator)); full_test_context.test_fixture = &test_context; struct aws_mqtt5_client_topic_alias_options aliasing_config = { .inbound_alias_cache_size = 10, .inbound_topic_alias_behavior = AWS_MQTT5_CITABT_ENABLED, }; test_options.client_options.topic_aliasing_options = &aliasing_config; test_options.client_options.publish_received_handler = s_aliased_publish_received_fn; test_options.client_options.publish_received_handler_user_data = &full_test_context; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, .mock_server_user_data = &full_test_context, }; ASSERT_SUCCESS(aws_mqtt5_client_mock_test_fixture_init(&test_context, allocator, &test_fixture_options)); struct aws_mqtt5_client *client = test_context.client; ASSERT_SUCCESS(aws_mqtt5_client_start(client)); aws_wait_for_connected_lifecycle_event(&test_context); struct aws_mqtt5_packet_subscribe_view subscribe = { .subscriptions = s_alias_subscriptions, .subscription_count = AWS_ARRAY_SIZE(s_alias_subscriptions), }; ASSERT_SUCCESS(aws_mqtt5_client_subscribe(client, &subscribe, NULL)); s_wait_for_aliased_publish_sequence(&full_test_context); ASSERT_SUCCESS(aws_mqtt5_client_stop(client, NULL, NULL)); aws_wait_for_stopped_lifecycle_event(&test_context); ASSERT_SUCCESS(s_verify_aliased_publish_sequence(&full_test_context)); s_aws_mqtt5_aliased_publish_sequence_context_clean_up(&full_test_context); aws_mqtt5_client_mock_test_fixture_clean_up(&test_context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_client_inbound_alias_success, s_mqtt5_client_inbound_alias_success_fn) enum aws_mqtt5_test_inbound_alias_failure_type { AWS_MTIAFT_DISABLED, AWS_MTIAFT_ZERO_ID, AWS_MTIAFT_TOO_LARGE_ID, AWS_MTIAFT_UNBOUND_ID }; struct aws_mqtt5_test_inbound_alias_failure_context { struct aws_mqtt5_client_mock_test_fixture *test_fixture; enum aws_mqtt5_test_inbound_alias_failure_type failure_type; }; static int s_aws_mqtt5_server_send_aliased_publish_failure( void *packet, struct aws_mqtt5_server_mock_connection_context *connection, void *user_data) { struct aws_mqtt5_test_inbound_alias_failure_context *test_context = user_data; struct aws_mqtt5_packet_subscribe_view *subscribe_view = packet; struct aws_mqtt5_packet_suback_view suback_view = { .packet_id = subscribe_view->packet_id, .reason_code_count = 1, .reason_codes = s_alias_reason_codes}; // just to be thorough, send a suback if (aws_mqtt5_mock_server_send_packet(connection, AWS_MQTT5_PT_SUBACK, &suback_view)) { return AWS_OP_ERR; } uint16_t alias_id = 0; struct aws_byte_cursor topic_cursor = { .ptr = s_alias_topic1, .len = AWS_ARRAY_SIZE(s_alias_topic1) - 1, }; switch (test_context->failure_type) { case AWS_MTIAFT_TOO_LARGE_ID: alias_id = 100; break; case AWS_MTIAFT_UNBOUND_ID: AWS_ZERO_STRUCT(topic_cursor); alias_id = 1; break; default: break; } struct aws_mqtt5_packet_publish_view publish_view = { .packet_id = 1, .qos = AWS_MQTT5_QOS_AT_LEAST_ONCE, .topic = topic_cursor, .topic_alias = &alias_id}; // establish an alias with id 1 if (aws_mqtt5_mock_server_send_packet(connection, AWS_MQTT5_PT_PUBLISH, &publish_view)) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } static bool s_has_decoding_error_disconnect_event(void *arg) { struct aws_mqtt5_test_inbound_alias_failure_context *test_context = arg; struct aws_mqtt5_client_mock_test_fixture *test_fixture = test_context->test_fixture; size_t record_count = aws_array_list_length(&test_fixture->lifecycle_events); for (size_t i = 0; i < record_count; ++i) { struct aws_mqtt5_lifecycle_event_record *record = NULL; aws_array_list_get_at(&test_fixture->lifecycle_events, &record, i); if (record->event.event_type == AWS_MQTT5_CLET_DISCONNECTION) { if (record->event.error_code == AWS_ERROR_MQTT5_DECODE_PROTOCOL_ERROR) { return true; } } } return false; } static void s_wait_for_decoding_error_disconnect(struct aws_mqtt5_test_inbound_alias_failure_context *test_context) { struct aws_mqtt5_client_mock_test_fixture *test_fixture = test_context->test_fixture; aws_mutex_lock(&test_fixture->lock); aws_condition_variable_wait_pred( &test_fixture->signal, &test_fixture->lock, s_has_decoding_error_disconnect_event, test_context); aws_mutex_unlock(&test_fixture->lock); } static int s_do_inbound_alias_failure_test( struct aws_allocator *allocator, enum aws_mqtt5_test_inbound_alias_failure_type test_failure_type) { aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_SUBSCRIBE] = s_aws_mqtt5_server_send_aliased_publish_failure; struct aws_mqtt5_client_mock_test_fixture test_context; struct aws_mqtt5_test_inbound_alias_failure_context full_test_context = { .test_fixture = &test_context, .failure_type = test_failure_type, }; struct aws_mqtt5_client_topic_alias_options aliasing_config = { .inbound_alias_cache_size = 10, .inbound_topic_alias_behavior = (test_failure_type == AWS_MTIAFT_DISABLED) ? AWS_MQTT5_CITABT_DISABLED : AWS_MQTT5_CITABT_ENABLED, }; test_options.client_options.topic_aliasing_options = &aliasing_config; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, .mock_server_user_data = &full_test_context, }; ASSERT_SUCCESS(aws_mqtt5_client_mock_test_fixture_init(&test_context, allocator, &test_fixture_options)); struct aws_mqtt5_client *client = test_context.client; ASSERT_SUCCESS(aws_mqtt5_client_start(client)); aws_wait_for_connected_lifecycle_event(&test_context); struct aws_mqtt5_packet_subscribe_view subscribe = { .subscriptions = s_alias_subscriptions, .subscription_count = AWS_ARRAY_SIZE(s_alias_subscriptions), }; ASSERT_SUCCESS(aws_mqtt5_client_subscribe(client, &subscribe, NULL)); s_wait_for_decoding_error_disconnect(&full_test_context); ASSERT_SUCCESS(aws_mqtt5_client_stop(client, NULL, NULL)); aws_wait_for_stopped_lifecycle_event(&test_context); aws_mqtt5_client_mock_test_fixture_clean_up(&test_context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } static int s_mqtt5_client_inbound_alias_failure_disabled_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(s_do_inbound_alias_failure_test(allocator, AWS_MTIAFT_DISABLED)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_client_inbound_alias_failure_disabled, s_mqtt5_client_inbound_alias_failure_disabled_fn) static int s_mqtt5_client_inbound_alias_failure_zero_id_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(s_do_inbound_alias_failure_test(allocator, AWS_MTIAFT_ZERO_ID)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_client_inbound_alias_failure_zero_id, s_mqtt5_client_inbound_alias_failure_zero_id_fn) static int s_mqtt5_client_inbound_alias_failure_too_large_id_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(s_do_inbound_alias_failure_test(allocator, AWS_MTIAFT_TOO_LARGE_ID)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_client_inbound_alias_failure_too_large_id, s_mqtt5_client_inbound_alias_failure_too_large_id_fn) static int s_mqtt5_client_inbound_alias_failure_unbound_id_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(s_do_inbound_alias_failure_test(allocator, AWS_MTIAFT_UNBOUND_ID)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_client_inbound_alias_failure_unbound_id, s_mqtt5_client_inbound_alias_failure_unbound_id_fn) void s_outbound_alias_failure_publish_complete_fn( enum aws_mqtt5_packet_type packet_type, const void *packet, int error_code, void *complete_ctx) { (void)packet_type; (void)packet; AWS_FATAL_ASSERT(error_code != AWS_ERROR_SUCCESS); struct aws_mqtt5_sub_pub_unsub_context *test_context = complete_ctx; struct aws_mqtt5_client_mock_test_fixture *test_fixture = test_context->test_fixture; aws_mutex_lock(&test_fixture->lock); test_context->publish_failures++; aws_mutex_unlock(&test_fixture->lock); aws_condition_variable_notify_all(&test_fixture->signal); } #define SEQUENCE_TEST_CACHE_SIZE 2 static int s_aws_mqtt5_mock_server_handle_connect_allow_aliasing( void *packet, struct aws_mqtt5_server_mock_connection_context *connection, void *user_data) { (void)packet; (void)user_data; struct aws_mqtt5_packet_connack_view connack_view; AWS_ZERO_STRUCT(connack_view); uint16_t topic_alias_maximum = SEQUENCE_TEST_CACHE_SIZE; connack_view.topic_alias_maximum = &topic_alias_maximum; return aws_mqtt5_mock_server_send_packet(connection, AWS_MQTT5_PT_CONNACK, &connack_view); } static int s_do_mqtt5_client_outbound_alias_failure_test( struct aws_allocator *allocator, enum aws_mqtt5_client_outbound_topic_alias_behavior_type behavior_type) { aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_CONNECT] = s_aws_mqtt5_mock_server_handle_connect_allow_aliasing; test_options.topic_aliasing_options.outbound_topic_alias_behavior = behavior_type; struct aws_mqtt5_client_mock_test_fixture test_context; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, }; struct aws_mqtt5_sub_pub_unsub_context full_test_context = { .test_fixture = &test_context, }; ASSERT_SUCCESS(aws_mqtt5_client_mock_test_fixture_init(&test_context, allocator, &test_fixture_options)); struct aws_mqtt5_client *client = test_context.client; ASSERT_SUCCESS(aws_mqtt5_client_start(client)); aws_wait_for_connected_lifecycle_event(&test_context); uint16_t topic_alias = 1; struct aws_mqtt5_packet_publish_view packet_publish_view = { .qos = AWS_MQTT5_QOS_AT_LEAST_ONCE, .topic = { .ptr = s_topic, .len = AWS_ARRAY_SIZE(s_topic) - 1, }, .topic_alias = &topic_alias, }; if (behavior_type == AWS_MQTT5_COTABT_MANUAL) { AWS_ZERO_STRUCT(packet_publish_view.topic); } struct aws_mqtt5_publish_completion_options completion_options = { .completion_callback = s_outbound_alias_failure_publish_complete_fn, .completion_user_data = &full_test_context, }; /* should result in an immediate validation failure or a subsequent dynamic validation failure */ if (aws_mqtt5_client_publish(client, &packet_publish_view, &completion_options) == AWS_OP_SUCCESS) { s_aws_mqtt5_wait_for_publish_failure(&full_test_context); } ASSERT_SUCCESS(aws_mqtt5_client_stop(client, NULL, NULL)); aws_wait_for_stopped_lifecycle_event(&test_context); aws_mqtt5_client_mock_test_fixture_clean_up(&test_context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } static int s_mqtt5_client_outbound_alias_manual_failure_empty_topic_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(s_do_mqtt5_client_outbound_alias_failure_test(allocator, AWS_MQTT5_COTABT_MANUAL)); return AWS_OP_SUCCESS; } AWS_TEST_CASE( mqtt5_client_outbound_alias_manual_failure_empty_topic, s_mqtt5_client_outbound_alias_manual_failure_empty_topic_fn) struct outbound_alias_publish { struct aws_byte_cursor topic; uint16_t topic_alias; size_t expected_alias_id; bool expected_reuse; }; #define DEFINE_OUTBOUND_ALIAS_PUBLISH(topic_suffix, desired_alias, expected_alias_index, reused) \ { \ .topic = aws_byte_cursor_from_string(s_topic_##topic_suffix), .topic_alias = desired_alias, \ .expected_alias_id = expected_alias_index, .expected_reuse = reused, \ } static void s_outbound_alias_publish_completion_fn( enum aws_mqtt5_packet_type packet_type, const void *packet, int error_code, void *complete_ctx) { AWS_FATAL_ASSERT(packet_type == AWS_MQTT5_PT_PUBACK); AWS_FATAL_ASSERT(error_code == AWS_ERROR_SUCCESS); const struct aws_mqtt5_packet_puback_view *puback = packet; struct aws_mqtt5_client_mock_test_fixture *test_context = complete_ctx; aws_mutex_lock(&test_context->lock); ++test_context->total_pubacks_received; if (error_code == AWS_ERROR_SUCCESS && puback->reason_code < 128) { ++test_context->successful_pubacks_received; } aws_mutex_unlock(&test_context->lock); aws_condition_variable_notify_all(&test_context->signal); } static int s_perform_outbound_alias_publish( struct aws_mqtt5_client_mock_test_fixture *test_fixture, struct outbound_alias_publish *publish) { struct aws_mqtt5_client *client = test_fixture->client; uint16_t alias_id = publish->topic_alias; struct aws_mqtt5_packet_publish_view publish_view = { .qos = AWS_MQTT5_QOS_AT_LEAST_ONCE, .topic = publish->topic, }; if (alias_id != 0) { publish_view.topic_alias = &alias_id; } struct aws_mqtt5_publish_completion_options completion_options = { .completion_callback = s_outbound_alias_publish_completion_fn, .completion_user_data = test_fixture, }; ASSERT_SUCCESS(aws_mqtt5_client_publish(client, &publish_view, &completion_options)); return AWS_OP_SUCCESS; } static int s_perform_outbound_alias_sequence( struct aws_mqtt5_client_mock_test_fixture *test_fixture, struct outbound_alias_publish *publishes, size_t publish_count) { for (size_t i = 0; i < publish_count; ++i) { struct outbound_alias_publish *publish = &publishes[i]; ASSERT_SUCCESS(s_perform_outbound_alias_publish(test_fixture, publish)); } return AWS_OP_SUCCESS; } static int s_perform_outbound_alias_sequence_test( struct aws_allocator *allocator, enum aws_mqtt5_client_outbound_topic_alias_behavior_type behavior_type, struct outbound_alias_publish *publishes, size_t publish_count) { aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_CONNECT] = s_aws_mqtt5_mock_server_handle_connect_allow_aliasing; test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_PUBLISH] = aws_mqtt5_mock_server_handle_publish_puback; test_options.topic_aliasing_options.outbound_topic_alias_behavior = behavior_type; struct aws_mqtt5_client_mock_test_fixture test_context; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, }; ASSERT_SUCCESS(aws_mqtt5_client_mock_test_fixture_init(&test_context, allocator, &test_fixture_options)); test_context.maximum_inbound_topic_aliases = SEQUENCE_TEST_CACHE_SIZE; struct aws_mqtt5_client *client = test_context.client; ASSERT_SUCCESS(aws_mqtt5_client_start(client)); aws_wait_for_connected_lifecycle_event(&test_context); ASSERT_SUCCESS(s_perform_outbound_alias_sequence(&test_context, publishes, publish_count)); struct aws_mqtt5_client_test_wait_for_n_context wait_context = { .test_fixture = &test_context, .required_event_count = publish_count, }; s_wait_for_n_successful_publishes(&wait_context); aws_mutex_lock(&test_context.lock); size_t packet_count = aws_array_list_length(&test_context.server_received_packets); ASSERT_INT_EQUALS(1 + publish_count, packet_count); // N publishes, 1 connect /* start at 1 and skip the connect */ for (size_t i = 1; i < packet_count; ++i) { struct aws_mqtt5_mock_server_packet_record *packet = NULL; aws_array_list_get_at_ptr(&test_context.server_received_packets, (void **)&packet, i); ASSERT_INT_EQUALS(AWS_MQTT5_PT_PUBLISH, packet->packet_type); struct aws_mqtt5_packet_publish_storage *publish_storage = packet->packet_storage; struct aws_mqtt5_packet_publish_view *publish_view = &publish_storage->storage_view; struct outbound_alias_publish *publish = &publishes[i - 1]; ASSERT_NOT_NULL(publish_view->topic_alias); ASSERT_INT_EQUALS(publish->expected_alias_id, *publish_view->topic_alias); /* * Unfortunately, the decoder fails unless it has an inbound resolver and the inbound resolver will always * resolve the topics first. So we can't actually check that an empty topic was sent. It would be nice to * harden this up in the future. */ ASSERT_BIN_ARRAYS_EQUALS( publish->topic.ptr, publish->topic.len, publish_view->topic.ptr, publish_view->topic.len); } aws_mutex_unlock(&test_context.lock); ASSERT_SUCCESS(aws_mqtt5_client_stop(client, NULL, NULL)); aws_wait_for_stopped_lifecycle_event(&test_context); aws_mqtt5_client_mock_test_fixture_clean_up(&test_context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_STATIC_STRING_FROM_LITERAL(s_topic_a, "topic/a"); AWS_STATIC_STRING_FROM_LITERAL(s_topic_b, "b/topic"); AWS_STATIC_STRING_FROM_LITERAL(s_topic_c, "topic/c"); static int s_mqtt5_client_outbound_alias_manual_success_a_b_ar_br_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct outbound_alias_publish test_publishes[] = { DEFINE_OUTBOUND_ALIAS_PUBLISH(a, 1, 1, false), DEFINE_OUTBOUND_ALIAS_PUBLISH(b, 2, 2, false), DEFINE_OUTBOUND_ALIAS_PUBLISH(a, 1, 1, true), DEFINE_OUTBOUND_ALIAS_PUBLISH(b, 2, 2, true), }; ASSERT_SUCCESS(s_perform_outbound_alias_sequence_test( allocator, AWS_MQTT5_COTABT_MANUAL, test_publishes, AWS_ARRAY_SIZE(test_publishes))); return AWS_OP_SUCCESS; } AWS_TEST_CASE( mqtt5_client_outbound_alias_manual_success_a_b_ar_br, s_mqtt5_client_outbound_alias_manual_success_a_b_ar_br_fn) static int s_mqtt5_client_outbound_alias_lru_success_a_b_c_br_cr_a_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct outbound_alias_publish test_publishes[] = { DEFINE_OUTBOUND_ALIAS_PUBLISH(a, 0, 1, false), DEFINE_OUTBOUND_ALIAS_PUBLISH(b, 0, 2, false), DEFINE_OUTBOUND_ALIAS_PUBLISH(c, 0, 1, false), DEFINE_OUTBOUND_ALIAS_PUBLISH(b, 0, 2, true), DEFINE_OUTBOUND_ALIAS_PUBLISH(c, 0, 1, true), DEFINE_OUTBOUND_ALIAS_PUBLISH(a, 0, 2, false), }; ASSERT_SUCCESS(s_perform_outbound_alias_sequence_test( allocator, AWS_MQTT5_COTABT_LRU, test_publishes, AWS_ARRAY_SIZE(test_publishes))); return AWS_OP_SUCCESS; } AWS_TEST_CASE( mqtt5_client_outbound_alias_lru_success_a_b_c_br_cr_a, s_mqtt5_client_outbound_alias_lru_success_a_b_c_br_cr_a_fn) struct mqtt5_operation_timeout_completion_callback { enum aws_mqtt5_packet_type type; uint64_t timepoint_ns; int error_code; }; struct mqtt5_dynamic_operation_timeout_test_context { struct aws_allocator *allocator; struct aws_mqtt5_client_mock_test_fixture *fixture; struct aws_array_list completion_callbacks; const struct mqtt5_operation_timeout_completion_callback *expected_callbacks; size_t expected_callback_count; }; static void s_mqtt5_dynamic_operation_timeout_test_context_init( struct mqtt5_dynamic_operation_timeout_test_context *context, struct aws_allocator *allocator, struct aws_mqtt5_client_mock_test_fixture *fixture) { AWS_ZERO_STRUCT(*context); context->allocator = allocator; context->fixture = fixture; aws_array_list_init_dynamic( &context->completion_callbacks, allocator, 5, sizeof(struct mqtt5_operation_timeout_completion_callback)); } static void s_mqtt5_dynamic_operation_timeout_test_context_cleanup( struct mqtt5_dynamic_operation_timeout_test_context *context) { aws_array_list_clean_up(&context->completion_callbacks); } static bool s_mqtt5_dynamic_operation_timeout_test_context_callback_sequence_equals_expected( struct mqtt5_dynamic_operation_timeout_test_context *context) { if (context->expected_callback_count != aws_array_list_length(&context->completion_callbacks)) { return false; } for (size_t i = 0; i < context->expected_callback_count; ++i) { const struct mqtt5_operation_timeout_completion_callback *expected_callback = &context->expected_callbacks[i]; struct mqtt5_operation_timeout_completion_callback *callback = NULL; aws_array_list_get_at_ptr(&context->completion_callbacks, (void **)&callback, i); if (callback->type != expected_callback->type) { return false; } if (callback->error_code != expected_callback->error_code) { return false; } } return true; } static void s_add_completion_callback( struct mqtt5_dynamic_operation_timeout_test_context *context, enum aws_mqtt5_packet_type type, int error_code) { aws_mutex_lock(&context->fixture->lock); struct mqtt5_operation_timeout_completion_callback callback_entry = { .type = type, .error_code = error_code, .timepoint_ns = 0}; aws_high_res_clock_get_ticks(&callback_entry.timepoint_ns); aws_array_list_push_back(&context->completion_callbacks, &callback_entry); aws_mutex_unlock(&context->fixture->lock); aws_condition_variable_notify_all(&context->fixture->signal); } static void s_timeout_test_publish_completion_fn( enum aws_mqtt5_packet_type packet_type, const void *packet, int error_code, void *complete_ctx) { (void)packet_type; (void)packet; s_add_completion_callback(complete_ctx, AWS_MQTT5_PT_PUBLISH, error_code); } static void s_timeout_test_subscribe_completion_fn( const struct aws_mqtt5_packet_suback_view *suback, int error_code, void *complete_ctx) { (void)suback; s_add_completion_callback(complete_ctx, AWS_MQTT5_PT_SUBSCRIBE, error_code); } static void s_timeout_test_unsubscribe_completion_fn( const struct aws_mqtt5_packet_unsuback_view *unsuback, int error_code, void *complete_ctx) { (void)unsuback; s_add_completion_callback(complete_ctx, AWS_MQTT5_PT_UNSUBSCRIBE, error_code); } static bool s_all_timeout_operations_complete(void *arg) { struct mqtt5_dynamic_operation_timeout_test_context *context = arg; return aws_array_list_length(&context->completion_callbacks) == context->expected_callback_count; } static void s_wait_for_all_operation_timeouts(struct mqtt5_dynamic_operation_timeout_test_context *context) { aws_mutex_lock(&context->fixture->lock); aws_condition_variable_wait_pred( &context->fixture->signal, &context->fixture->lock, s_all_timeout_operations_complete, context); aws_mutex_unlock(&context->fixture->lock); } /* * Tests a mixture of qos 0 publish, qos 1 publish, subscribe, and unsubscribe with override ack timeouts. * * qos 1 publish with 3 second timeout * subscribe with 2 second timeout * qos 0 publish * unsubscribe with 4 second timeout * qos 1 publish with 1 second timeout * * We expect to see callbacks in sequence: * * qos 0 publish success * qos 1 publish failure by timeout after 1 second * subscribe failure by timeout after 2 seconds * qos 1 publish failure by timeout after 3 seconds * unsubscribe failure by timeout after 4 seconds */ static int s_mqtt5_client_dynamic_operation_timeout_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_PUBLISH] = s_aws_mqtt5_mock_server_handle_timeout_publish; struct aws_mqtt5_client_mock_test_fixture test_context; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, }; ASSERT_SUCCESS(aws_mqtt5_client_mock_test_fixture_init(&test_context, allocator, &test_fixture_options)); struct mqtt5_dynamic_operation_timeout_test_context context; s_mqtt5_dynamic_operation_timeout_test_context_init(&context, allocator, &test_context); struct mqtt5_operation_timeout_completion_callback expected_callbacks[] = { { .type = AWS_MQTT5_PT_PUBLISH, .error_code = AWS_ERROR_SUCCESS, }, { .type = AWS_MQTT5_PT_PUBLISH, .error_code = AWS_ERROR_MQTT_TIMEOUT, }, { .type = AWS_MQTT5_PT_SUBSCRIBE, .error_code = AWS_ERROR_MQTT_TIMEOUT, }, { .type = AWS_MQTT5_PT_PUBLISH, .error_code = AWS_ERROR_MQTT_TIMEOUT, }, { .type = AWS_MQTT5_PT_UNSUBSCRIBE, .error_code = AWS_ERROR_MQTT_TIMEOUT, }, }; context.expected_callbacks = expected_callbacks; context.expected_callback_count = AWS_ARRAY_SIZE(expected_callbacks); struct aws_mqtt5_client *client = test_context.client; ASSERT_SUCCESS(aws_mqtt5_client_start(client)); aws_wait_for_connected_lifecycle_event(&test_context); uint64_t operation_start = 0; aws_high_res_clock_get_ticks(&operation_start); struct aws_byte_cursor topic = { .ptr = s_topic, .len = AWS_ARRAY_SIZE(s_topic) - 1, }; // qos 1 publish - 3 second timeout struct aws_mqtt5_packet_publish_view qos1_publish = { .qos = AWS_MQTT5_QOS_AT_LEAST_ONCE, .topic = topic, }; struct aws_mqtt5_publish_completion_options qos1_publish_options = { .completion_callback = s_timeout_test_publish_completion_fn, .completion_user_data = &context, .ack_timeout_seconds_override = 3, }; ASSERT_SUCCESS(aws_mqtt5_client_publish(client, &qos1_publish, &qos1_publish_options)); // subscribe - 2 seconds timeout struct aws_mqtt5_subscription_view subscriptions[] = {{ .topic_filter = topic, .qos = AWS_MQTT5_QOS_AT_MOST_ONCE, }}; struct aws_mqtt5_packet_subscribe_view subscribe_view = { .subscriptions = subscriptions, .subscription_count = AWS_ARRAY_SIZE(subscriptions), }; struct aws_mqtt5_subscribe_completion_options subscribe_options = { .completion_callback = s_timeout_test_subscribe_completion_fn, .completion_user_data = &context, .ack_timeout_seconds_override = 2, }; ASSERT_SUCCESS(aws_mqtt5_client_subscribe(client, &subscribe_view, &subscribe_options)); // qos 0 publish struct aws_mqtt5_packet_publish_view qos0_publish = { .qos = AWS_MQTT5_QOS_AT_MOST_ONCE, .topic = topic, }; struct aws_mqtt5_publish_completion_options qos0_publish_options = { .completion_callback = s_timeout_test_publish_completion_fn, .completion_user_data = &context, }; ASSERT_SUCCESS(aws_mqtt5_client_publish(client, &qos0_publish, &qos0_publish_options)); // unsubscribe - 4 second timeout struct aws_byte_cursor topic_filters[] = { topic, }; struct aws_mqtt5_packet_unsubscribe_view unsubscribe = { .topic_filters = topic_filters, .topic_filter_count = AWS_ARRAY_SIZE(topic_filters), }; struct aws_mqtt5_unsubscribe_completion_options unsubscribe_options = { .completion_callback = s_timeout_test_unsubscribe_completion_fn, .completion_user_data = &context, .ack_timeout_seconds_override = 4, }; ASSERT_SUCCESS(aws_mqtt5_client_unsubscribe(client, &unsubscribe, &unsubscribe_options)); // qos 1 publish - 1 second timeout qos1_publish_options.ack_timeout_seconds_override = 1; ASSERT_SUCCESS(aws_mqtt5_client_publish(client, &qos1_publish, &qos1_publish_options)); s_wait_for_all_operation_timeouts(&context); ASSERT_SUCCESS(aws_mqtt5_client_stop(client, NULL, NULL)); aws_wait_for_stopped_lifecycle_event(&test_context); aws_mutex_lock(&test_context.lock); s_mqtt5_dynamic_operation_timeout_test_context_callback_sequence_equals_expected(&context); aws_mutex_unlock(&test_context.lock); /* * Finally, do a minimum time elapsed check: * each operation after the first (the qos 0 publish which did not time out) should have a completion * timepoint N seconds or later after the start of the test (where N is the operation's index in the sequence) */ for (size_t i = 1; i < context.expected_callback_count; ++i) { struct mqtt5_operation_timeout_completion_callback *callback = NULL; aws_array_list_get_at_ptr(&context.completion_callbacks, (void **)&callback, i); uint64_t delta_ns = callback->timepoint_ns - operation_start; ASSERT_TRUE(delta_ns >= aws_timestamp_convert(i, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL)); } s_mqtt5_dynamic_operation_timeout_test_context_cleanup(&context); aws_mqtt5_client_mock_test_fixture_clean_up(&test_context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_client_dynamic_operation_timeout, s_mqtt5_client_dynamic_operation_timeout_fn) #define DYNAMIC_TIMEOUT_DEFAULT_SECONDS 2 /* * Checks that using a override operation timeout of zero results in using the client's default timeout */ static int s_mqtt5_client_dynamic_operation_timeout_default_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); test_options.client_options.ack_timeout_seconds = DYNAMIC_TIMEOUT_DEFAULT_SECONDS; test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_PUBLISH] = s_aws_mqtt5_mock_server_handle_timeout_publish; struct aws_mqtt5_client_mock_test_fixture test_context; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, }; ASSERT_SUCCESS(aws_mqtt5_client_mock_test_fixture_init(&test_context, allocator, &test_fixture_options)); struct mqtt5_dynamic_operation_timeout_test_context context; s_mqtt5_dynamic_operation_timeout_test_context_init(&context, allocator, &test_context); struct mqtt5_operation_timeout_completion_callback expected_callbacks[] = { { .type = AWS_MQTT5_PT_PUBLISH, .error_code = AWS_ERROR_MQTT_TIMEOUT, }, }; context.expected_callbacks = expected_callbacks; context.expected_callback_count = AWS_ARRAY_SIZE(expected_callbacks); struct aws_mqtt5_client *client = test_context.client; ASSERT_SUCCESS(aws_mqtt5_client_start(client)); aws_wait_for_connected_lifecycle_event(&test_context); uint64_t operation_start = 0; aws_high_res_clock_get_ticks(&operation_start); struct aws_byte_cursor topic = { .ptr = s_topic, .len = AWS_ARRAY_SIZE(s_topic) - 1, }; struct aws_mqtt5_packet_publish_view qos1_publish = { .qos = AWS_MQTT5_QOS_AT_LEAST_ONCE, .topic = topic, }; struct aws_mqtt5_publish_completion_options qos1_publish_options = { .completion_callback = s_timeout_test_publish_completion_fn, .completion_user_data = &context, .ack_timeout_seconds_override = 0, }; ASSERT_SUCCESS(aws_mqtt5_client_publish(client, &qos1_publish, &qos1_publish_options)); s_wait_for_all_operation_timeouts(&context); ASSERT_SUCCESS(aws_mqtt5_client_stop(client, NULL, NULL)); aws_wait_for_stopped_lifecycle_event(&test_context); aws_mutex_lock(&test_context.lock); s_mqtt5_dynamic_operation_timeout_test_context_callback_sequence_equals_expected(&context); aws_mutex_unlock(&test_context.lock); /* * Finally, do a minimum time elapsed check: */ for (size_t i = 0; i < context.expected_callback_count; ++i) { struct mqtt5_operation_timeout_completion_callback *callback = NULL; aws_array_list_get_at_ptr(&context.completion_callbacks, (void **)&callback, i); uint64_t delta_ns = callback->timepoint_ns - operation_start; ASSERT_TRUE( delta_ns >= aws_timestamp_convert(DYNAMIC_TIMEOUT_DEFAULT_SECONDS, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL)); } s_mqtt5_dynamic_operation_timeout_test_context_cleanup(&context); aws_mqtt5_client_mock_test_fixture_clean_up(&test_context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_client_dynamic_operation_timeout_default, s_mqtt5_client_dynamic_operation_timeout_default_fn)aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/tests/v5/mqtt5_encoding_tests.c000066400000000000000000002237701456575232400262320ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "mqtt5_testing_utils.h" #include #include #include #include #include #include #include #include static int s_mqtt5_vli_size_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; (void)allocator; size_t encode_size = 0; ASSERT_SUCCESS(aws_mqtt5_get_variable_length_encode_size(0, &encode_size)); ASSERT_INT_EQUALS(1, encode_size); ASSERT_SUCCESS(aws_mqtt5_get_variable_length_encode_size(1, &encode_size)); ASSERT_INT_EQUALS(1, encode_size); ASSERT_SUCCESS(aws_mqtt5_get_variable_length_encode_size(127, &encode_size)); ASSERT_INT_EQUALS(1, encode_size); ASSERT_SUCCESS(aws_mqtt5_get_variable_length_encode_size(128, &encode_size)); ASSERT_INT_EQUALS(2, encode_size); ASSERT_SUCCESS(aws_mqtt5_get_variable_length_encode_size(256, &encode_size)); ASSERT_INT_EQUALS(2, encode_size); ASSERT_SUCCESS(aws_mqtt5_get_variable_length_encode_size(16383, &encode_size)); ASSERT_INT_EQUALS(2, encode_size); ASSERT_SUCCESS(aws_mqtt5_get_variable_length_encode_size(16384, &encode_size)); ASSERT_INT_EQUALS(3, encode_size); ASSERT_SUCCESS(aws_mqtt5_get_variable_length_encode_size(16385, &encode_size)); ASSERT_INT_EQUALS(3, encode_size); ASSERT_SUCCESS(aws_mqtt5_get_variable_length_encode_size(2097151, &encode_size)); ASSERT_INT_EQUALS(3, encode_size); ASSERT_SUCCESS(aws_mqtt5_get_variable_length_encode_size(2097152, &encode_size)); ASSERT_INT_EQUALS(4, encode_size); ASSERT_SUCCESS(aws_mqtt5_get_variable_length_encode_size(AWS_MQTT5_MAXIMUM_VARIABLE_LENGTH_INTEGER, &encode_size)); ASSERT_INT_EQUALS(4, encode_size); ASSERT_FAILS( aws_mqtt5_get_variable_length_encode_size(AWS_MQTT5_MAXIMUM_VARIABLE_LENGTH_INTEGER + 1, &encode_size)); ASSERT_FAILS(aws_mqtt5_get_variable_length_encode_size(0xFFFFFFFF, &encode_size)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_vli_size, s_mqtt5_vli_size_fn) static int s_do_success_round_trip_vli_test(uint32_t value, struct aws_allocator *allocator) { struct aws_byte_buf buffer; aws_byte_buf_init(&buffer, allocator, 4); ASSERT_SUCCESS(aws_mqtt5_encode_variable_length_integer(&buffer, value)); size_t encoded_size = 0; ASSERT_SUCCESS(aws_mqtt5_get_variable_length_encode_size(value, &encoded_size)); ASSERT_INT_EQUALS(encoded_size, buffer.len); uint32_t decoded_value = 0; for (size_t i = 1; i < encoded_size; ++i) { struct aws_byte_cursor partial_cursor = aws_byte_cursor_from_buf(&buffer); partial_cursor.len = i; enum aws_mqtt5_decode_result_type result = aws_mqtt5_decode_vli(&partial_cursor, &decoded_value); ASSERT_INT_EQUALS(AWS_MQTT5_DRT_MORE_DATA, result); } struct aws_byte_cursor full_cursor = aws_byte_cursor_from_buf(&buffer); enum aws_mqtt5_decode_result_type result = aws_mqtt5_decode_vli(&full_cursor, &decoded_value); ASSERT_INT_EQUALS(AWS_MQTT5_DRT_SUCCESS, result); ASSERT_INT_EQUALS(decoded_value, value); aws_byte_buf_clean_up(&buffer); return AWS_OP_SUCCESS; } static int s_mqtt5_vli_success_round_trip_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(s_do_success_round_trip_vli_test(0, allocator)); ASSERT_SUCCESS(s_do_success_round_trip_vli_test(1, allocator)); ASSERT_SUCCESS(s_do_success_round_trip_vli_test(47, allocator)); ASSERT_SUCCESS(s_do_success_round_trip_vli_test(127, allocator)); ASSERT_SUCCESS(s_do_success_round_trip_vli_test(128, allocator)); ASSERT_SUCCESS(s_do_success_round_trip_vli_test(129, allocator)); ASSERT_SUCCESS(s_do_success_round_trip_vli_test(511, allocator)); ASSERT_SUCCESS(s_do_success_round_trip_vli_test(8000, allocator)); ASSERT_SUCCESS(s_do_success_round_trip_vli_test(16383, allocator)); ASSERT_SUCCESS(s_do_success_round_trip_vli_test(16384, allocator)); ASSERT_SUCCESS(s_do_success_round_trip_vli_test(16385, allocator)); ASSERT_SUCCESS(s_do_success_round_trip_vli_test(100000, allocator)); ASSERT_SUCCESS(s_do_success_round_trip_vli_test(4200000, allocator)); ASSERT_SUCCESS(s_do_success_round_trip_vli_test(34200000, allocator)); ASSERT_SUCCESS(s_do_success_round_trip_vli_test(AWS_MQTT5_MAXIMUM_VARIABLE_LENGTH_INTEGER, allocator)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_vli_success_round_trip, s_mqtt5_vli_success_round_trip_fn) static int s_mqtt5_vli_encode_failures_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_buf buffer; aws_byte_buf_init(&buffer, allocator, 4); ASSERT_FAILS(aws_mqtt5_encode_variable_length_integer(&buffer, AWS_MQTT5_MAXIMUM_VARIABLE_LENGTH_INTEGER + 1)); ASSERT_FAILS(aws_mqtt5_encode_variable_length_integer(&buffer, 0x80000000)); ASSERT_FAILS(aws_mqtt5_encode_variable_length_integer(&buffer, 0xFFFFFFFF)); aws_byte_buf_clean_up(&buffer); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_vli_encode_failures, s_mqtt5_vli_encode_failures_fn) static uint8_t bad_buffers0[] = {0x80, 0x80, 0x80, 0x80}; static uint8_t bad_buffers1[] = {0x81, 0x81, 0x81, 0xFF}; static int s_mqtt5_vli_decode_failures_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; (void)allocator; uint32_t value = 0; struct aws_byte_cursor cursor = aws_byte_cursor_from_array(&bad_buffers0[0], AWS_ARRAY_SIZE(bad_buffers0)); ASSERT_INT_EQUALS(AWS_MQTT5_DRT_ERROR, aws_mqtt5_decode_vli(&cursor, &value)); cursor = aws_byte_cursor_from_array(&bad_buffers1[0], AWS_ARRAY_SIZE(bad_buffers1)); ASSERT_INT_EQUALS(AWS_MQTT5_DRT_ERROR, aws_mqtt5_decode_vli(&cursor, &value)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_vli_decode_failures, s_mqtt5_vli_decode_failures_fn) static char s_user_prop1_name[] = "Property1"; static char s_user_prop1_value[] = "Value1"; static char s_user_prop2_name[] = "Property2"; static char s_user_prop2_value[] = "Value2"; static const struct aws_mqtt5_user_property s_user_properties[] = { { .name = { .ptr = (uint8_t *)s_user_prop1_name, .len = AWS_ARRAY_SIZE(s_user_prop1_name) - 1, }, .value = { .ptr = (uint8_t *)s_user_prop1_value, .len = AWS_ARRAY_SIZE(s_user_prop1_value) - 1, }, }, { .name = { .ptr = (uint8_t *)s_user_prop2_name, .len = AWS_ARRAY_SIZE(s_user_prop2_name) - 1, }, .value = { .ptr = (uint8_t *)s_user_prop2_value, .len = AWS_ARRAY_SIZE(s_user_prop2_value) - 1, }, }, }; static const char *s_reason_string = "This is why I'm disconnecting"; static const char *s_server_reference = "connect-here-instead.com"; static const char *s_will_payload = "{\"content\":\"This is the payload of a will message\"}"; static const char *s_will_topic = "zomg/where-did/my/connection-go"; static const char *s_will_response_topic = "TheWillResponseTopic"; static const char *s_will_correlation_data = "ThisAndThat"; static const char *s_will_content_type = "Json"; static const char *s_client_id = "DeviceNumber47"; static const char *s_username = "MyUser"; static const char *s_password = "SuprSekritDontRead"; struct aws_mqtt5_encode_decode_tester { struct aws_mqtt5_encoder_function_table encoder_function_table; struct aws_mqtt5_decoder_function_table decoder_function_table; size_t view_count; size_t expected_view_count; void *expected_views; }; static void s_aws_mqtt5_encode_decoder_tester_init_single_view( struct aws_mqtt5_encode_decode_tester *tester, void *expected_view) { tester->view_count = 0; tester->expected_view_count = 1; tester->expected_views = expected_view; aws_mqtt5_encode_init_testing_function_table(&tester->encoder_function_table); aws_mqtt5_decode_init_testing_function_table(&tester->decoder_function_table); } int s_check_packet_encoding_reserved_flags(struct aws_byte_buf *encoding, enum aws_mqtt5_packet_type packet_type) { if (packet_type == AWS_MQTT5_PT_PUBLISH) { return AWS_OP_SUCCESS; } uint8_t expected_reserved_flags_value = 0; switch (packet_type) { case AWS_MQTT5_PT_PUBREL: case AWS_MQTT5_PT_SUBSCRIBE: case AWS_MQTT5_PT_UNSUBSCRIBE: expected_reserved_flags_value = 0x02; break; default: break; } uint8_t first_byte = encoding->buffer[0]; uint8_t encoded_packet_type = (first_byte >> 4) & 0x0F; ASSERT_INT_EQUALS(packet_type, encoded_packet_type); uint8_t reserved_flags = first_byte & 0x0F; ASSERT_INT_EQUALS(expected_reserved_flags_value, reserved_flags); return AWS_OP_SUCCESS; } typedef int(aws_mqtt5_check_encoding_fn)(struct aws_byte_buf *encoding); struct aws_mqtt5_packet_round_trip_test_context { struct aws_allocator *allocator; enum aws_mqtt5_packet_type packet_type; void *packet_view; aws_mqtt5_on_packet_received_fn *decoder_callback; aws_mqtt5_check_encoding_fn *encoding_checker; }; static int s_aws_mqtt5_encode_decode_round_trip_test( struct aws_mqtt5_packet_round_trip_test_context *context, size_t encode_fragment_size, size_t decode_fragment_size) { struct aws_byte_buf whole_dest; aws_byte_buf_init(&whole_dest, context->allocator, 4096); struct aws_mqtt5_encode_decode_tester tester; s_aws_mqtt5_encode_decoder_tester_init_single_view(&tester, context->packet_view); struct aws_mqtt5_encoder_options encoder_options = { .encoders = &tester.encoder_function_table, }; struct aws_mqtt5_encoder encoder; ASSERT_SUCCESS(aws_mqtt5_encoder_init(&encoder, context->allocator, &encoder_options)); ASSERT_SUCCESS(aws_mqtt5_encoder_append_packet_encoding(&encoder, context->packet_type, context->packet_view)); enum aws_mqtt5_encoding_result result = AWS_MQTT5_ER_OUT_OF_ROOM; while (result == AWS_MQTT5_ER_OUT_OF_ROOM) { struct aws_byte_buf fragment_dest; aws_byte_buf_init(&fragment_dest, context->allocator, encode_fragment_size); result = aws_mqtt5_encoder_encode_to_buffer(&encoder, &fragment_dest); ASSERT_TRUE(result != AWS_MQTT5_ER_ERROR); struct aws_byte_cursor fragment_cursor = aws_byte_cursor_from_buf(&fragment_dest); ASSERT_SUCCESS(aws_byte_buf_append_dynamic(&whole_dest, &fragment_cursor)); aws_byte_buf_clean_up(&fragment_dest); } /* * For packet types that support encoded size calculations (outgoing operations), verify that the encoded size * calculation matches the length we encoded */ size_t expected_packet_size = 0; if (!aws_mqtt5_packet_view_get_encoded_size(context->packet_type, context->packet_view, &expected_packet_size)) { ASSERT_INT_EQUALS(whole_dest.len, expected_packet_size); } ASSERT_SUCCESS(s_check_packet_encoding_reserved_flags(&whole_dest, context->packet_type)); ASSERT_INT_EQUALS(AWS_MQTT5_ER_FINISHED, result); struct aws_mqtt5_decoder_options decoder_options = { .on_packet_received = context->decoder_callback, .callback_user_data = &tester, .decoder_table = &tester.decoder_function_table, }; struct aws_mqtt5_decoder decoder; ASSERT_SUCCESS(aws_mqtt5_decoder_init(&decoder, context->allocator, &decoder_options)); struct aws_mqtt5_inbound_topic_alias_resolver inbound_alias_resolver; ASSERT_SUCCESS(aws_mqtt5_inbound_topic_alias_resolver_init(&inbound_alias_resolver, context->allocator)); ASSERT_SUCCESS(aws_mqtt5_inbound_topic_alias_resolver_reset(&inbound_alias_resolver, 65535)); aws_mqtt5_decoder_set_inbound_topic_alias_resolver(&decoder, &inbound_alias_resolver); struct aws_byte_cursor whole_cursor = aws_byte_cursor_from_buf(&whole_dest); while (whole_cursor.len > 0) { size_t advance = aws_min_size(whole_cursor.len, decode_fragment_size); struct aws_byte_cursor fragment_cursor = aws_byte_cursor_advance(&whole_cursor, advance); ASSERT_SUCCESS(aws_mqtt5_decoder_on_data_received(&decoder, fragment_cursor)); } ASSERT_INT_EQUALS(1, tester.view_count); /* Now do a check where we partially decode the buffer, reset, and then fully decode. */ aws_mqtt5_decoder_reset(&decoder); tester.view_count = 0; whole_cursor = aws_byte_cursor_from_buf(&whole_dest); if (decode_fragment_size >= whole_cursor.len) { whole_cursor.len--; } else { whole_cursor.len -= decode_fragment_size; } while (whole_cursor.len > 0) { size_t advance = aws_min_size(whole_cursor.len, decode_fragment_size); struct aws_byte_cursor fragment_cursor = aws_byte_cursor_advance(&whole_cursor, advance); ASSERT_SUCCESS(aws_mqtt5_decoder_on_data_received(&decoder, fragment_cursor)); } /* Nothing should have been received */ ASSERT_INT_EQUALS(0, tester.view_count); /* Reset and decode the whole packet, everything should be fine */ aws_mqtt5_decoder_reset(&decoder); whole_cursor = aws_byte_cursor_from_buf(&whole_dest); while (whole_cursor.len > 0) { size_t advance = aws_min_size(whole_cursor.len, decode_fragment_size); struct aws_byte_cursor fragment_cursor = aws_byte_cursor_advance(&whole_cursor, advance); ASSERT_SUCCESS(aws_mqtt5_decoder_on_data_received(&decoder, fragment_cursor)); } ASSERT_INT_EQUALS(1, tester.view_count); aws_byte_buf_clean_up(&whole_dest); aws_mqtt5_inbound_topic_alias_resolver_clean_up(&inbound_alias_resolver); aws_mqtt5_encoder_clean_up(&encoder); aws_mqtt5_decoder_clean_up(&decoder); return AWS_OP_SUCCESS; } static size_t s_encode_fragment_sizes[] = {4, 5, 7, 65536}; static size_t s_decode_fragment_sizes[] = {1, 2, 3, 11, 65536}; static int s_aws_mqtt5_encode_decode_round_trip_matrix_test(struct aws_mqtt5_packet_round_trip_test_context *context) { for (size_t i = 0; i < AWS_ARRAY_SIZE(s_encode_fragment_sizes); ++i) { size_t encode_fragment_size = s_encode_fragment_sizes[i]; for (size_t j = 0; j < AWS_ARRAY_SIZE(s_decode_fragment_sizes); ++j) { size_t decode_fragment_size = s_decode_fragment_sizes[j]; ASSERT_SUCCESS( s_aws_mqtt5_encode_decode_round_trip_test(context, encode_fragment_size, decode_fragment_size)); } } return AWS_OP_SUCCESS; } static int s_aws_mqtt5_on_disconnect_received_fn(enum aws_mqtt5_packet_type type, void *packet_view, void *user_data) { struct aws_mqtt5_encode_decode_tester *tester = user_data; ++tester->view_count; ASSERT_INT_EQUALS((uint32_t)AWS_MQTT5_PT_DISCONNECT, (uint32_t)type); struct aws_mqtt5_packet_disconnect_view *disconnect_view = packet_view; struct aws_mqtt5_packet_disconnect_view *expected_view = tester->expected_views; ASSERT_INT_EQUALS((uint32_t)expected_view->reason_code, (uint32_t)disconnect_view->reason_code); ASSERT_INT_EQUALS( *expected_view->session_expiry_interval_seconds, *disconnect_view->session_expiry_interval_seconds); ASSERT_BIN_ARRAYS_EQUALS( expected_view->reason_string->ptr, expected_view->reason_string->len, disconnect_view->reason_string->ptr, disconnect_view->reason_string->len); ASSERT_BIN_ARRAYS_EQUALS( expected_view->server_reference->ptr, expected_view->server_reference->len, disconnect_view->server_reference->ptr, disconnect_view->server_reference->len); ASSERT_SUCCESS(aws_mqtt5_test_verify_user_properties_raw( disconnect_view->user_property_count, disconnect_view->user_properties, expected_view->user_property_count, expected_view->user_properties)); return AWS_OP_SUCCESS; } static int s_mqtt5_packet_disconnect_round_trip_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint32_t session_expiry_interval_seconds = 333; struct aws_byte_cursor reason_string_cursor = aws_byte_cursor_from_c_str(s_reason_string); struct aws_byte_cursor server_reference_cursor = aws_byte_cursor_from_c_str(s_server_reference); struct aws_mqtt5_packet_disconnect_view disconnect_view = { .reason_code = AWS_MQTT5_DRC_DISCONNECT_WITH_WILL_MESSAGE, .session_expiry_interval_seconds = &session_expiry_interval_seconds, .reason_string = &reason_string_cursor, .user_property_count = AWS_ARRAY_SIZE(s_user_properties), .user_properties = &s_user_properties[0], .server_reference = &server_reference_cursor, }; struct aws_mqtt5_packet_round_trip_test_context context = { .allocator = allocator, .packet_type = AWS_MQTT5_PT_DISCONNECT, .packet_view = &disconnect_view, .decoder_callback = s_aws_mqtt5_on_disconnect_received_fn, }; ASSERT_SUCCESS(s_aws_mqtt5_encode_decode_round_trip_matrix_test(&context)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_packet_disconnect_round_trip, s_mqtt5_packet_disconnect_round_trip_fn) static int s_aws_mqtt5_on_pingreq_received_fn(enum aws_mqtt5_packet_type type, void *packet_view, void *user_data) { struct aws_mqtt5_encode_decode_tester *tester = user_data; ++tester->view_count; ASSERT_INT_EQUALS((uint32_t)AWS_MQTT5_PT_PINGREQ, (uint32_t)type); ASSERT_NULL(packet_view); return AWS_OP_SUCCESS; } static int s_mqtt5_packet_pingreq_round_trip_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt5_packet_round_trip_test_context context = { .allocator = allocator, .packet_type = AWS_MQTT5_PT_PINGREQ, .packet_view = NULL, .decoder_callback = s_aws_mqtt5_on_pingreq_received_fn, }; ASSERT_SUCCESS(s_aws_mqtt5_encode_decode_round_trip_matrix_test(&context)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_packet_pingreq_round_trip, s_mqtt5_packet_pingreq_round_trip_fn) static int s_aws_mqtt5_on_pingresp_received_fn(enum aws_mqtt5_packet_type type, void *packet_view, void *user_data) { struct aws_mqtt5_encode_decode_tester *tester = user_data; ++tester->view_count; ASSERT_INT_EQUALS((uint32_t)AWS_MQTT5_PT_PINGRESP, (uint32_t)type); ASSERT_NULL(packet_view); return AWS_OP_SUCCESS; } static int s_mqtt5_packet_pingresp_round_trip_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt5_packet_round_trip_test_context context = { .allocator = allocator, .packet_type = AWS_MQTT5_PT_PINGRESP, .packet_view = NULL, .decoder_callback = s_aws_mqtt5_on_pingresp_received_fn, }; ASSERT_SUCCESS(s_aws_mqtt5_encode_decode_round_trip_matrix_test(&context)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_packet_pingresp_round_trip, s_mqtt5_packet_pingresp_round_trip_fn) static int s_aws_mqtt5_on_connect_received_fn(enum aws_mqtt5_packet_type type, void *packet_view, void *user_data) { struct aws_mqtt5_encode_decode_tester *tester = user_data; ++tester->view_count; ASSERT_INT_EQUALS((uint32_t)AWS_MQTT5_PT_CONNECT, (uint32_t)type); struct aws_mqtt5_packet_connect_view *connect_view = packet_view; struct aws_mqtt5_packet_connect_view *expected_view = tester->expected_views; ASSERT_INT_EQUALS(expected_view->keep_alive_interval_seconds, connect_view->keep_alive_interval_seconds); ASSERT_BIN_ARRAYS_EQUALS( expected_view->client_id.ptr, expected_view->client_id.len, connect_view->client_id.ptr, connect_view->client_id.len); ASSERT_BIN_ARRAYS_EQUALS( expected_view->username->ptr, expected_view->username->len, connect_view->username->ptr, connect_view->username->len); ASSERT_BIN_ARRAYS_EQUALS( expected_view->password->ptr, expected_view->password->len, connect_view->password->ptr, connect_view->password->len); ASSERT_TRUE(expected_view->clean_start == connect_view->clean_start); ASSERT_INT_EQUALS(*expected_view->session_expiry_interval_seconds, *connect_view->session_expiry_interval_seconds); ASSERT_INT_EQUALS(*expected_view->request_response_information, *connect_view->request_response_information); ASSERT_INT_EQUALS(*expected_view->request_problem_information, *connect_view->request_problem_information); ASSERT_INT_EQUALS(*expected_view->receive_maximum, *connect_view->receive_maximum); ASSERT_INT_EQUALS(*expected_view->topic_alias_maximum, *connect_view->topic_alias_maximum); ASSERT_INT_EQUALS(*expected_view->maximum_packet_size_bytes, *connect_view->maximum_packet_size_bytes); ASSERT_INT_EQUALS(*expected_view->will_delay_interval_seconds, *connect_view->will_delay_interval_seconds); const struct aws_mqtt5_packet_publish_view *expected_will_view = expected_view->will; const struct aws_mqtt5_packet_publish_view *will_view = connect_view->will; ASSERT_BIN_ARRAYS_EQUALS( expected_will_view->payload.ptr, expected_will_view->payload.len, will_view->payload.ptr, will_view->payload.len); ASSERT_INT_EQUALS(expected_will_view->qos, will_view->qos); ASSERT_INT_EQUALS(expected_will_view->retain, will_view->retain); ASSERT_BIN_ARRAYS_EQUALS( expected_will_view->topic.ptr, expected_will_view->topic.len, will_view->topic.ptr, will_view->topic.len); ASSERT_INT_EQUALS(*expected_will_view->payload_format, *will_view->payload_format); ASSERT_INT_EQUALS( *expected_will_view->message_expiry_interval_seconds, *will_view->message_expiry_interval_seconds); ASSERT_BIN_ARRAYS_EQUALS( expected_will_view->response_topic->ptr, expected_will_view->response_topic->len, will_view->response_topic->ptr, will_view->response_topic->len); ASSERT_BIN_ARRAYS_EQUALS( expected_will_view->correlation_data->ptr, expected_will_view->correlation_data->len, will_view->correlation_data->ptr, will_view->correlation_data->len); ASSERT_BIN_ARRAYS_EQUALS( expected_will_view->content_type->ptr, expected_will_view->content_type->len, will_view->content_type->ptr, will_view->content_type->len); ASSERT_SUCCESS(aws_mqtt5_test_verify_user_properties_raw( expected_will_view->user_property_count, expected_will_view->user_properties, will_view->user_property_count, will_view->user_properties)); ASSERT_SUCCESS(aws_mqtt5_test_verify_user_properties_raw( expected_view->user_property_count, expected_view->user_properties, connect_view->user_property_count, connect_view->user_properties)); ASSERT_BIN_ARRAYS_EQUALS( expected_view->authentication_method->ptr, expected_view->authentication_method->len, connect_view->authentication_method->ptr, connect_view->authentication_method->len); ASSERT_BIN_ARRAYS_EQUALS( expected_view->authentication_data->ptr, expected_view->authentication_data->len, connect_view->authentication_data->ptr, connect_view->authentication_data->len); return AWS_OP_SUCCESS; } static int s_mqtt5_packet_encode_connect_to_buffer( struct aws_allocator *allocator, struct aws_mqtt5_packet_connect_view *connect_view, struct aws_byte_buf *buffer) { AWS_ZERO_STRUCT(*buffer); aws_byte_buf_init(buffer, allocator, 4096); struct aws_mqtt5_encoder_options encoder_options; AWS_ZERO_STRUCT(encoder_options); struct aws_mqtt5_encoder encoder; ASSERT_SUCCESS(aws_mqtt5_encoder_init(&encoder, allocator, &encoder_options)); ASSERT_SUCCESS(aws_mqtt5_encoder_append_packet_encoding(&encoder, AWS_MQTT5_PT_CONNECT, connect_view)); enum aws_mqtt5_encoding_result result = aws_mqtt5_encoder_encode_to_buffer(&encoder, buffer); ASSERT_INT_EQUALS(AWS_MQTT5_ER_FINISHED, result); aws_mqtt5_encoder_clean_up(&encoder); return AWS_OP_SUCCESS; } static int s_mqtt5_packet_connect_round_trip_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor will_payload_cursor = aws_byte_cursor_from_c_str(s_will_payload); enum aws_mqtt5_payload_format_indicator payload_format = AWS_MQTT5_PFI_UTF8; uint32_t message_expiry_interval_seconds = 65537; struct aws_byte_cursor will_response_topic = aws_byte_cursor_from_c_str(s_will_response_topic); struct aws_byte_cursor will_correlation_data = aws_byte_cursor_from_c_str(s_will_correlation_data); struct aws_byte_cursor will_content_type = aws_byte_cursor_from_c_str(s_will_content_type); struct aws_byte_cursor username = aws_byte_cursor_from_c_str(s_username); struct aws_byte_cursor password = aws_byte_cursor_from_c_str(s_password); uint32_t session_expiry_interval_seconds = 3600; uint8_t request_response_information = 1; uint8_t request_problem_information = 1; uint16_t receive_maximum = 50; uint16_t topic_alias_maximum = 16; uint32_t maximum_packet_size_bytes = 1ULL << 24; uint32_t will_delay_interval_seconds = 30; struct aws_byte_cursor authentication_method = aws_byte_cursor_from_c_str("AuthMethod"); struct aws_byte_cursor authentication_data = aws_byte_cursor_from_c_str("SuperSecret"); struct aws_mqtt5_packet_publish_view will_view = { .payload = will_payload_cursor, .qos = AWS_MQTT5_QOS_AT_LEAST_ONCE, .retain = true, .topic = aws_byte_cursor_from_c_str(s_will_topic), .payload_format = &payload_format, .message_expiry_interval_seconds = &message_expiry_interval_seconds, .response_topic = &will_response_topic, .correlation_data = &will_correlation_data, .content_type = &will_content_type, .user_property_count = AWS_ARRAY_SIZE(s_user_properties), .user_properties = &s_user_properties[0], }; struct aws_mqtt5_packet_connect_view connect_view = { .keep_alive_interval_seconds = 1200, .client_id = aws_byte_cursor_from_c_str(s_client_id), .username = &username, .password = &password, .clean_start = true, .session_expiry_interval_seconds = &session_expiry_interval_seconds, .request_response_information = &request_response_information, .request_problem_information = &request_problem_information, .receive_maximum = &receive_maximum, .topic_alias_maximum = &topic_alias_maximum, .maximum_packet_size_bytes = &maximum_packet_size_bytes, .will_delay_interval_seconds = &will_delay_interval_seconds, .will = &will_view, .user_property_count = AWS_ARRAY_SIZE(s_user_properties), .user_properties = &s_user_properties[0], .authentication_method = &authentication_method, .authentication_data = &authentication_data, }; struct aws_mqtt5_packet_round_trip_test_context context = { .allocator = allocator, .packet_type = AWS_MQTT5_PT_CONNECT, .packet_view = &connect_view, .decoder_callback = s_aws_mqtt5_on_connect_received_fn, }; ASSERT_SUCCESS(s_aws_mqtt5_encode_decode_round_trip_matrix_test(&context)); struct aws_byte_buf encoding_buffer; ASSERT_SUCCESS(s_mqtt5_packet_encode_connect_to_buffer(allocator, &connect_view, &encoding_buffer)); /* * For this packet: 1 byte packet type + flags, 2 bytes vli remaining length + 7 bytes protocol prefix * (0x00, 0x04, "MQTT", 0x05), then we're at the CONNECT flags which we want to check */ size_t connect_flags_byte_index = 10; uint8_t connect_flags = encoding_buffer.buffer[connect_flags_byte_index]; /* * Verify Will flag (0x04), Will QoS (0x08), Will Retain (0x20), Username (0x80), * clean start (0x02), password (0x40) flags are set */ ASSERT_INT_EQUALS(connect_flags, 0xEE); aws_byte_buf_clean_up(&encoding_buffer); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_packet_connect_round_trip, s_mqtt5_packet_connect_round_trip_fn) static int s_aws_mqtt5_on_connack_received_fn(enum aws_mqtt5_packet_type type, void *packet_view, void *user_data) { struct aws_mqtt5_encode_decode_tester *tester = user_data; ++tester->view_count; ASSERT_INT_EQUALS((uint32_t)AWS_MQTT5_PT_CONNACK, (uint32_t)type); struct aws_mqtt5_packet_connack_view *connack_view = packet_view; struct aws_mqtt5_packet_connack_view *expected_view = tester->expected_views; ASSERT_INT_EQUALS(expected_view->session_present, connack_view->session_present); ASSERT_INT_EQUALS(expected_view->reason_code, connack_view->reason_code); ASSERT_INT_EQUALS(*expected_view->session_expiry_interval, *connack_view->session_expiry_interval); ASSERT_INT_EQUALS(*expected_view->receive_maximum, *connack_view->receive_maximum); ASSERT_INT_EQUALS(*expected_view->maximum_qos, *connack_view->maximum_qos); ASSERT_INT_EQUALS(*expected_view->retain_available, *connack_view->retain_available); ASSERT_INT_EQUALS(*expected_view->maximum_packet_size, *connack_view->maximum_packet_size); ASSERT_BIN_ARRAYS_EQUALS( expected_view->assigned_client_identifier->ptr, expected_view->assigned_client_identifier->len, connack_view->assigned_client_identifier->ptr, connack_view->assigned_client_identifier->len); ASSERT_INT_EQUALS(*expected_view->topic_alias_maximum, *connack_view->topic_alias_maximum); ASSERT_BIN_ARRAYS_EQUALS( expected_view->reason_string->ptr, expected_view->reason_string->len, connack_view->reason_string->ptr, connack_view->reason_string->len); ASSERT_SUCCESS(aws_mqtt5_test_verify_user_properties_raw( expected_view->user_property_count, expected_view->user_properties, connack_view->user_property_count, connack_view->user_properties)); ASSERT_INT_EQUALS( *expected_view->wildcard_subscriptions_available, *connack_view->wildcard_subscriptions_available); ASSERT_INT_EQUALS( *expected_view->subscription_identifiers_available, *connack_view->subscription_identifiers_available); ASSERT_INT_EQUALS(*expected_view->shared_subscriptions_available, *connack_view->shared_subscriptions_available); ASSERT_INT_EQUALS(*expected_view->server_keep_alive, *connack_view->server_keep_alive); ASSERT_BIN_ARRAYS_EQUALS( expected_view->response_information->ptr, expected_view->response_information->len, connack_view->response_information->ptr, connack_view->response_information->len); ASSERT_BIN_ARRAYS_EQUALS( expected_view->server_reference->ptr, expected_view->server_reference->len, connack_view->server_reference->ptr, connack_view->server_reference->len); ASSERT_BIN_ARRAYS_EQUALS( expected_view->authentication_method->ptr, expected_view->authentication_method->len, connack_view->authentication_method->ptr, connack_view->authentication_method->len); ASSERT_BIN_ARRAYS_EQUALS( expected_view->authentication_data->ptr, expected_view->authentication_data->len, connack_view->authentication_data->ptr, connack_view->authentication_data->len); return AWS_OP_SUCCESS; } static int s_mqtt5_packet_connack_round_trip_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint32_t session_expiry_interval = 3600; uint16_t receive_maximum = 20; enum aws_mqtt5_qos maximum_qos = AWS_MQTT5_QOS_AT_LEAST_ONCE; bool retain_available = true; uint32_t maximum_packet_size = 1U << 18; struct aws_byte_cursor assigned_client_identifier = aws_byte_cursor_from_c_str("server-assigned-client-id-01"); uint16_t topic_alias_maximum = 10; struct aws_byte_cursor reason_string = aws_byte_cursor_from_c_str("You've been banned from this server"); bool wildcard_subscriptions_available = true; bool subscription_identifiers_available = true; bool shared_subscriptions_available = true; uint16_t server_keep_alive = 1200; struct aws_byte_cursor response_information = aws_byte_cursor_from_c_str("some/topic-prefix"); struct aws_byte_cursor server_reference = aws_byte_cursor_from_c_str("www.somewhere-else.com:1883"); struct aws_byte_cursor authentication_method = aws_byte_cursor_from_c_str("GSSAPI"); struct aws_byte_cursor authentication_data = aws_byte_cursor_from_c_str("TOP-SECRET"); struct aws_mqtt5_packet_connack_view connack_view = { .session_present = true, .reason_code = AWS_MQTT5_CRC_BANNED, .session_expiry_interval = &session_expiry_interval, .receive_maximum = &receive_maximum, .maximum_qos = &maximum_qos, .retain_available = &retain_available, .maximum_packet_size = &maximum_packet_size, .assigned_client_identifier = &assigned_client_identifier, .topic_alias_maximum = &topic_alias_maximum, .reason_string = &reason_string, .user_property_count = AWS_ARRAY_SIZE(s_user_properties), .user_properties = &s_user_properties[0], .wildcard_subscriptions_available = &wildcard_subscriptions_available, .subscription_identifiers_available = &subscription_identifiers_available, .shared_subscriptions_available = &shared_subscriptions_available, .server_keep_alive = &server_keep_alive, .response_information = &response_information, .server_reference = &server_reference, .authentication_method = &authentication_method, .authentication_data = &authentication_data, }; struct aws_mqtt5_packet_round_trip_test_context context = { .allocator = allocator, .packet_type = AWS_MQTT5_PT_CONNACK, .packet_view = &connack_view, .decoder_callback = s_aws_mqtt5_on_connack_received_fn, }; ASSERT_SUCCESS(s_aws_mqtt5_encode_decode_round_trip_matrix_test(&context)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_packet_connack_round_trip, s_mqtt5_packet_connack_round_trip_fn) static char s_subscription_topic_1[] = "test_topic_1"; static char s_subscription_topic_2[] = "test_topic_2"; static char s_subscription_topic_3[] = "test_topic_3"; static const struct aws_mqtt5_subscription_view s_subscriptions[] = { {.topic_filter = { .ptr = (uint8_t *)s_subscription_topic_1, .len = AWS_ARRAY_SIZE(s_subscription_topic_1) - 1, }, .qos = AWS_MQTT5_QOS_AT_MOST_ONCE, .no_local = true, .retain_as_published = true, .retain_handling_type = AWS_MQTT5_RHT_DONT_SEND}, {.topic_filter = { .ptr = (uint8_t *)s_subscription_topic_2, .len = AWS_ARRAY_SIZE(s_subscription_topic_2) - 1, }, .qos = AWS_MQTT5_QOS_AT_LEAST_ONCE, .no_local = false, .retain_as_published = true, .retain_handling_type = AWS_MQTT5_RHT_SEND_ON_SUBSCRIBE}, {.topic_filter = { .ptr = (uint8_t *)s_subscription_topic_3, .len = AWS_ARRAY_SIZE(s_subscription_topic_3) - 1, }, .qos = AWS_MQTT5_QOS_AT_MOST_ONCE, .no_local = false, .retain_as_published = true, .retain_handling_type = AWS_MQTT5_RHT_SEND_ON_SUBSCRIBE_IF_NEW}, }; int aws_mqtt5_test_verify_subscriptions_raw( size_t subscription_count, const struct aws_mqtt5_subscription_view *subscriptions, size_t expected_count, const struct aws_mqtt5_subscription_view *expected_subscriptions) { ASSERT_UINT_EQUALS(expected_count, subscription_count); for (size_t i = 0; i < expected_count; ++i) { const struct aws_mqtt5_subscription_view *expected_subscription = &expected_subscriptions[i]; ASSERT_BIN_ARRAYS_EQUALS( expected_subscription->topic_filter.ptr, expected_subscription->topic_filter.len, subscriptions[i].topic_filter.ptr, subscriptions[i].topic_filter.len); ASSERT_INT_EQUALS(expected_subscription->qos, subscriptions[i].qos); ASSERT_INT_EQUALS(expected_subscription->no_local, subscriptions[i].no_local); ASSERT_INT_EQUALS(expected_subscription->retain_as_published, subscriptions[i].retain_as_published); ASSERT_INT_EQUALS(expected_subscription->retain_handling_type, subscriptions[i].retain_handling_type); } return AWS_OP_SUCCESS; } static int s_aws_mqtt5_on_subscribe_received_fn(enum aws_mqtt5_packet_type type, void *packet_view, void *user_data) { (void)type; struct aws_mqtt5_encode_decode_tester *tester = user_data; ++tester->view_count; struct aws_mqtt5_packet_subscribe_view *subscribe_view = packet_view; struct aws_mqtt5_packet_subscribe_view *expected_view = tester->expected_views; ASSERT_INT_EQUALS(expected_view->packet_id, subscribe_view->packet_id); ASSERT_SUCCESS(aws_mqtt5_test_verify_subscriptions_raw( expected_view->subscription_count, expected_view->subscriptions, subscribe_view->subscription_count, subscribe_view->subscriptions)); ASSERT_INT_EQUALS(*expected_view->subscription_identifier, *subscribe_view->subscription_identifier); ASSERT_SUCCESS(aws_mqtt5_test_verify_user_properties_raw( expected_view->user_property_count, expected_view->user_properties, subscribe_view->user_property_count, subscribe_view->user_properties)); return AWS_OP_SUCCESS; } static int s_mqtt5_packet_subscribe_round_trip_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt5_packet_id_t packet_id = 47; uint32_t subscription_identifier = 1; struct aws_mqtt5_packet_subscribe_view subscribe_view = { .packet_id = packet_id, .subscription_count = AWS_ARRAY_SIZE(s_subscriptions), .subscriptions = &s_subscriptions[0], .subscription_identifier = &subscription_identifier, .user_property_count = AWS_ARRAY_SIZE(s_user_properties), .user_properties = &s_user_properties[0], }; struct aws_mqtt5_packet_round_trip_test_context context = { .allocator = allocator, .packet_type = AWS_MQTT5_PT_SUBSCRIBE, .packet_view = &subscribe_view, .decoder_callback = s_aws_mqtt5_on_subscribe_received_fn, }; ASSERT_SUCCESS(s_aws_mqtt5_encode_decode_round_trip_matrix_test(&context)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_packet_subscribe_round_trip, s_mqtt5_packet_subscribe_round_trip_fn) static int s_aws_mqtt5_on_suback_received_fn(enum aws_mqtt5_packet_type type, void *packet_view, void *user_data) { struct aws_mqtt5_encode_decode_tester *tester = user_data; ++tester->view_count; ASSERT_INT_EQUALS((uint32_t)AWS_MQTT5_PT_SUBACK, (uint32_t)type); struct aws_mqtt5_packet_suback_view *suback_view = packet_view; struct aws_mqtt5_packet_suback_view *expected_view = tester->expected_views; ASSERT_INT_EQUALS(expected_view->packet_id, suback_view->packet_id); ASSERT_BIN_ARRAYS_EQUALS( expected_view->reason_string->ptr, expected_view->reason_string->len, suback_view->reason_string->ptr, suback_view->reason_string->len); ASSERT_SUCCESS(aws_mqtt5_test_verify_user_properties_raw( expected_view->user_property_count, expected_view->user_properties, suback_view->user_property_count, suback_view->user_properties)); ASSERT_INT_EQUALS(expected_view->reason_code_count, suback_view->reason_code_count); for (size_t i = 0; i < suback_view->reason_code_count; ++i) { ASSERT_INT_EQUALS(expected_view->reason_codes[i], suback_view->reason_codes[i]); } return AWS_OP_SUCCESS; } static int s_mqtt5_packet_suback_round_trip_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt5_packet_id_t packet_id = 47; struct aws_byte_cursor reason_string = aws_byte_cursor_from_c_str("Some random reason string"); enum aws_mqtt5_suback_reason_code reason_code_1 = AWS_MQTT5_SARC_GRANTED_QOS_0; enum aws_mqtt5_suback_reason_code reason_code_2 = AWS_MQTT5_SARC_GRANTED_QOS_1; enum aws_mqtt5_suback_reason_code reason_code_3 = AWS_MQTT5_SARC_GRANTED_QOS_2; const enum aws_mqtt5_suback_reason_code reason_codes[3] = { reason_code_1, reason_code_2, reason_code_3, }; struct aws_mqtt5_packet_suback_view suback_view = { .packet_id = packet_id, .reason_string = &reason_string, .user_property_count = AWS_ARRAY_SIZE(s_user_properties), .user_properties = &s_user_properties[0], .reason_code_count = 3, .reason_codes = &reason_codes[0], }; struct aws_mqtt5_packet_round_trip_test_context context = { .allocator = allocator, .packet_type = AWS_MQTT5_PT_SUBACK, .packet_view = &suback_view, .decoder_callback = s_aws_mqtt5_on_suback_received_fn, }; ASSERT_SUCCESS(s_aws_mqtt5_encode_decode_round_trip_matrix_test(&context)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_packet_suback_round_trip, s_mqtt5_packet_suback_round_trip_fn) static char s_unsubscribe_topic_1[] = "test_topic_1"; static char s_unsubscribe_topic_2[] = "test_topic_2"; static char s_unsubscribe_topic_3[] = "test_topic_3"; static const struct aws_byte_cursor s_unsubscribe_topics[] = { { .ptr = (uint8_t *)s_unsubscribe_topic_1, .len = AWS_ARRAY_SIZE(s_unsubscribe_topic_1) - 1, }, { .ptr = (uint8_t *)s_unsubscribe_topic_2, .len = AWS_ARRAY_SIZE(s_unsubscribe_topic_2) - 1, }, { .ptr = (uint8_t *)s_unsubscribe_topic_3, .len = AWS_ARRAY_SIZE(s_unsubscribe_topic_3) - 1, }, }; static int s_aws_mqtt5_on_unsubscribe_received_fn(enum aws_mqtt5_packet_type type, void *packet_view, void *user_data) { (void)type; struct aws_mqtt5_encode_decode_tester *tester = user_data; ++tester->view_count; struct aws_mqtt5_packet_unsubscribe_view *unsubscribe_view = packet_view; struct aws_mqtt5_packet_unsubscribe_view *expected_view = tester->expected_views; ASSERT_INT_EQUALS(expected_view->packet_id, unsubscribe_view->packet_id); ASSERT_INT_EQUALS(expected_view->topic_filter_count, unsubscribe_view->topic_filter_count); for (size_t i = 0; i < unsubscribe_view->topic_filter_count; ++i) { const struct aws_byte_cursor unsubscribe_topic = unsubscribe_view->topic_filters[i]; const struct aws_byte_cursor expected_topic = expected_view->topic_filters[i]; ASSERT_BIN_ARRAYS_EQUALS(expected_topic.ptr, expected_topic.len, unsubscribe_topic.ptr, unsubscribe_topic.len); } ASSERT_SUCCESS(aws_mqtt5_test_verify_user_properties_raw( expected_view->user_property_count, expected_view->user_properties, unsubscribe_view->user_property_count, unsubscribe_view->user_properties)); return AWS_OP_SUCCESS; } static int s_mqtt5_packet_unsubscribe_round_trip_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt5_packet_id_t packet_id = 47; struct aws_mqtt5_packet_unsubscribe_view unsubscribe_view = { .packet_id = packet_id, .topic_filter_count = AWS_ARRAY_SIZE(s_unsubscribe_topics), .topic_filters = &s_unsubscribe_topics[0], .user_property_count = AWS_ARRAY_SIZE(s_user_properties), .user_properties = &s_user_properties[0], }; struct aws_mqtt5_packet_round_trip_test_context context = { .allocator = allocator, .packet_type = AWS_MQTT5_PT_UNSUBSCRIBE, .packet_view = &unsubscribe_view, .decoder_callback = s_aws_mqtt5_on_unsubscribe_received_fn, }; ASSERT_SUCCESS(s_aws_mqtt5_encode_decode_round_trip_matrix_test(&context)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_packet_unsubscribe_round_trip, s_mqtt5_packet_unsubscribe_round_trip_fn) static int s_aws_mqtt5_on_unsuback_received_fn(enum aws_mqtt5_packet_type type, void *packet_view, void *user_data) { struct aws_mqtt5_encode_decode_tester *tester = user_data; ++tester->view_count; ASSERT_INT_EQUALS((uint32_t)AWS_MQTT5_PT_UNSUBACK, (uint32_t)type); struct aws_mqtt5_packet_unsuback_view *unsuback_view = packet_view; struct aws_mqtt5_packet_unsuback_view *expected_view = tester->expected_views; ASSERT_INT_EQUALS(expected_view->packet_id, unsuback_view->packet_id); ASSERT_BIN_ARRAYS_EQUALS( expected_view->reason_string->ptr, expected_view->reason_string->len, unsuback_view->reason_string->ptr, unsuback_view->reason_string->len); ASSERT_SUCCESS(aws_mqtt5_test_verify_user_properties_raw( expected_view->user_property_count, expected_view->user_properties, unsuback_view->user_property_count, unsuback_view->user_properties)); ASSERT_INT_EQUALS(expected_view->reason_code_count, unsuback_view->reason_code_count); for (size_t i = 0; i < unsuback_view->reason_code_count; ++i) { ASSERT_INT_EQUALS(expected_view->reason_codes[i], unsuback_view->reason_codes[i]); } return AWS_OP_SUCCESS; } static int s_mqtt5_packet_unsuback_round_trip_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt5_packet_id_t packet_id = 47; struct aws_byte_cursor reason_string = aws_byte_cursor_from_c_str("Some random reason string"); enum aws_mqtt5_unsuback_reason_code reason_code_1 = AWS_MQTT5_UARC_SUCCESS; enum aws_mqtt5_unsuback_reason_code reason_code_2 = AWS_MQTT5_UARC_NO_SUBSCRIPTION_EXISTED; enum aws_mqtt5_unsuback_reason_code reason_code_3 = AWS_MQTT5_UARC_UNSPECIFIED_ERROR; const enum aws_mqtt5_unsuback_reason_code reason_codes[3] = { reason_code_1, reason_code_2, reason_code_3, }; struct aws_mqtt5_packet_unsuback_view unsuback_view = { .packet_id = packet_id, .reason_string = &reason_string, .user_property_count = AWS_ARRAY_SIZE(s_user_properties), .user_properties = &s_user_properties[0], .reason_code_count = AWS_ARRAY_SIZE(reason_codes), .reason_codes = &reason_codes[0], }; struct aws_mqtt5_packet_round_trip_test_context context = { .allocator = allocator, .packet_type = AWS_MQTT5_PT_UNSUBACK, .packet_view = &unsuback_view, .decoder_callback = s_aws_mqtt5_on_unsuback_received_fn, }; ASSERT_SUCCESS(s_aws_mqtt5_encode_decode_round_trip_matrix_test(&context)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_packet_unsuback_round_trip, s_mqtt5_packet_unsuback_round_trip_fn) static char s_publish_topic[] = "test_topic"; static char s_publish_payload[] = "test payload contents"; static char s_publish_response_topic[] = "test response topic"; static char s_publish_correlation_data[] = "test correlation data"; static char s_publish_content_type[] = "test content type"; static int s_aws_mqtt5_on_publish_received_fn(enum aws_mqtt5_packet_type type, void *packet_view, void *user_data) { (void)type; struct aws_mqtt5_encode_decode_tester *tester = user_data; ++tester->view_count; struct aws_mqtt5_packet_publish_view *publish_view = packet_view; struct aws_mqtt5_packet_publish_view *expected_view = tester->expected_views; ASSERT_BIN_ARRAYS_EQUALS( expected_view->payload.ptr, expected_view->payload.len, publish_view->payload.ptr, publish_view->payload.len); ASSERT_INT_EQUALS(expected_view->packet_id, publish_view->packet_id); ASSERT_INT_EQUALS(expected_view->qos, publish_view->qos); ASSERT_INT_EQUALS(expected_view->duplicate, publish_view->duplicate); ASSERT_INT_EQUALS(expected_view->retain, publish_view->retain); ASSERT_BIN_ARRAYS_EQUALS( expected_view->topic.ptr, expected_view->topic.len, publish_view->topic.ptr, publish_view->topic.len); ASSERT_INT_EQUALS(*expected_view->payload_format, *publish_view->payload_format); ASSERT_INT_EQUALS(*expected_view->message_expiry_interval_seconds, *publish_view->message_expiry_interval_seconds); ASSERT_INT_EQUALS(*expected_view->topic_alias, *publish_view->topic_alias); ASSERT_BIN_ARRAYS_EQUALS( expected_view->response_topic->ptr, expected_view->response_topic->len, publish_view->response_topic->ptr, publish_view->response_topic->len); ASSERT_BIN_ARRAYS_EQUALS( expected_view->correlation_data->ptr, expected_view->correlation_data->len, publish_view->correlation_data->ptr, publish_view->correlation_data->len); ASSERT_BIN_ARRAYS_EQUALS( expected_view->content_type->ptr, expected_view->content_type->len, publish_view->content_type->ptr, publish_view->content_type->len); ASSERT_SUCCESS(aws_mqtt5_test_verify_user_properties_raw( expected_view->user_property_count, expected_view->user_properties, publish_view->user_property_count, publish_view->user_properties)); return AWS_OP_SUCCESS; } static int s_mqtt5_packet_publish_round_trip_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt5_packet_id_t packet_id = 47; enum aws_mqtt5_payload_format_indicator payload_format = AWS_MQTT5_PFI_UTF8; uint32_t message_expiry_interval_seconds = 100; uint16_t topic_alias = 1; struct aws_byte_cursor response_topic = { .ptr = (uint8_t *)s_publish_response_topic, .len = AWS_ARRAY_SIZE(s_publish_response_topic) - 1, }; struct aws_byte_cursor correlation_data = { .ptr = (uint8_t *)s_publish_correlation_data, .len = AWS_ARRAY_SIZE(s_publish_correlation_data) - 1, }; struct aws_byte_cursor content_type = { .ptr = (uint8_t *)s_publish_content_type, .len = AWS_ARRAY_SIZE(s_publish_content_type) - 1, }; uint32_t subscription_identifiers[2] = {2, 3}; struct aws_mqtt5_packet_publish_view publish_view = { .payload = { .ptr = (uint8_t *)s_publish_payload, .len = AWS_ARRAY_SIZE(s_publish_payload) - 1, }, .packet_id = packet_id, .qos = AWS_MQTT5_QOS_AT_LEAST_ONCE, .duplicate = false, .retain = false, .topic = { .ptr = (uint8_t *)s_publish_topic, .len = AWS_ARRAY_SIZE(s_publish_topic) - 1, }, .payload_format = &payload_format, .message_expiry_interval_seconds = &message_expiry_interval_seconds, .topic_alias = &topic_alias, .response_topic = &response_topic, .correlation_data = &correlation_data, .subscription_identifier_count = AWS_ARRAY_SIZE(subscription_identifiers), .subscription_identifiers = subscription_identifiers, .content_type = &content_type, .user_property_count = AWS_ARRAY_SIZE(s_user_properties), .user_properties = &s_user_properties[0], }; struct aws_mqtt5_packet_round_trip_test_context context = { .allocator = allocator, .packet_type = AWS_MQTT5_PT_PUBLISH, .packet_view = &publish_view, .decoder_callback = s_aws_mqtt5_on_publish_received_fn, }; ASSERT_SUCCESS(s_aws_mqtt5_encode_decode_round_trip_matrix_test(&context)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_packet_publish_round_trip, s_mqtt5_packet_publish_round_trip_fn) static char s_puback_reason_string[] = "test reason string"; static int s_aws_mqtt5_on_puback_received_fn(enum aws_mqtt5_packet_type type, void *packet_view, void *user_data) { (void)type; struct aws_mqtt5_encode_decode_tester *tester = user_data; ++tester->view_count; struct aws_mqtt5_packet_puback_view *puback_view = packet_view; struct aws_mqtt5_packet_puback_view *expected_view = tester->expected_views; ASSERT_INT_EQUALS(expected_view->packet_id, puback_view->packet_id); ASSERT_INT_EQUALS(expected_view->reason_code, puback_view->reason_code); ASSERT_BIN_ARRAYS_EQUALS( expected_view->reason_string->ptr, expected_view->reason_string->len, puback_view->reason_string->ptr, puback_view->reason_string->len); ASSERT_SUCCESS(aws_mqtt5_test_verify_user_properties_raw( expected_view->user_property_count, expected_view->user_properties, puback_view->user_property_count, puback_view->user_properties)); return AWS_OP_SUCCESS; } static int s_mqtt5_packet_puback_round_trip_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt5_packet_id_t packet_id = 47; enum aws_mqtt5_puback_reason_code reason_code = AWS_MQTT5_PARC_NO_MATCHING_SUBSCRIBERS; struct aws_byte_cursor reason_string = { .ptr = (uint8_t *)s_puback_reason_string, .len = AWS_ARRAY_SIZE(s_puback_reason_string) - 1, }; struct aws_mqtt5_packet_puback_view puback_view = { .packet_id = packet_id, .reason_code = reason_code, .reason_string = &reason_string, .user_property_count = AWS_ARRAY_SIZE(s_user_properties), .user_properties = &s_user_properties[0], }; struct aws_mqtt5_packet_round_trip_test_context context = { .allocator = allocator, .packet_type = AWS_MQTT5_PT_PUBACK, .packet_view = &puback_view, .decoder_callback = s_aws_mqtt5_on_puback_received_fn, }; ASSERT_SUCCESS(s_aws_mqtt5_encode_decode_round_trip_matrix_test(&context)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_packet_puback_round_trip, s_mqtt5_packet_puback_round_trip_fn) static int s_aws_mqtt5_on_no_will_connect_received_fn( enum aws_mqtt5_packet_type type, void *packet_view, void *user_data) { struct aws_mqtt5_encode_decode_tester *tester = user_data; ++tester->view_count; ASSERT_INT_EQUALS((uint32_t)AWS_MQTT5_PT_CONNECT, (uint32_t)type); struct aws_mqtt5_packet_connect_view *connect_view = packet_view; struct aws_mqtt5_packet_connect_view *expected_view = tester->expected_views; ASSERT_INT_EQUALS(expected_view->keep_alive_interval_seconds, connect_view->keep_alive_interval_seconds); ASSERT_BIN_ARRAYS_EQUALS( expected_view->client_id.ptr, expected_view->client_id.len, connect_view->client_id.ptr, connect_view->client_id.len); if (expected_view->username != NULL) { ASSERT_BIN_ARRAYS_EQUALS( expected_view->username->ptr, expected_view->username->len, connect_view->username->ptr, connect_view->username->len); } else { ASSERT_NULL(connect_view->username); } if (expected_view->password != NULL) { ASSERT_BIN_ARRAYS_EQUALS( expected_view->password->ptr, expected_view->password->len, connect_view->password->ptr, connect_view->password->len); } else { ASSERT_NULL(connect_view->password); } ASSERT_TRUE(expected_view->clean_start == connect_view->clean_start); ASSERT_INT_EQUALS(*expected_view->session_expiry_interval_seconds, *connect_view->session_expiry_interval_seconds); ASSERT_INT_EQUALS(*expected_view->request_response_information, *connect_view->request_response_information); ASSERT_INT_EQUALS(*expected_view->request_problem_information, *connect_view->request_problem_information); ASSERT_INT_EQUALS(*expected_view->receive_maximum, *connect_view->receive_maximum); ASSERT_INT_EQUALS(*expected_view->topic_alias_maximum, *connect_view->topic_alias_maximum); ASSERT_INT_EQUALS(*expected_view->maximum_packet_size_bytes, *connect_view->maximum_packet_size_bytes); ASSERT_NULL(connect_view->will_delay_interval_seconds); ASSERT_NULL(connect_view->will); ASSERT_SUCCESS(aws_mqtt5_test_verify_user_properties_raw( expected_view->user_property_count, expected_view->user_properties, connect_view->user_property_count, connect_view->user_properties)); ASSERT_BIN_ARRAYS_EQUALS( expected_view->authentication_method->ptr, expected_view->authentication_method->len, connect_view->authentication_method->ptr, connect_view->authentication_method->len); ASSERT_BIN_ARRAYS_EQUALS( expected_view->authentication_data->ptr, expected_view->authentication_data->len, connect_view->authentication_data->ptr, connect_view->authentication_data->len); return AWS_OP_SUCCESS; } static int s_mqtt5_packet_encode_connect_no_will_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor username = aws_byte_cursor_from_c_str(s_username); struct aws_byte_cursor password = aws_byte_cursor_from_c_str(s_password); uint32_t session_expiry_interval_seconds = 3600; uint8_t request_response_information = 1; uint8_t request_problem_information = 1; uint16_t receive_maximum = 50; uint16_t topic_alias_maximum = 16; uint32_t maximum_packet_size_bytes = 1ULL << 24; uint32_t will_delay_interval_seconds = 30; struct aws_byte_cursor authentication_method = aws_byte_cursor_from_c_str("AuthMethod"); struct aws_byte_cursor authentication_data = aws_byte_cursor_from_c_str("SuperSecret"); struct aws_mqtt5_packet_connect_view connect_view = { .keep_alive_interval_seconds = 1200, .client_id = aws_byte_cursor_from_c_str(s_client_id), .username = &username, .password = &password, .clean_start = true, .session_expiry_interval_seconds = &session_expiry_interval_seconds, .request_response_information = &request_response_information, .request_problem_information = &request_problem_information, .receive_maximum = &receive_maximum, .topic_alias_maximum = &topic_alias_maximum, .maximum_packet_size_bytes = &maximum_packet_size_bytes, .will_delay_interval_seconds = &will_delay_interval_seconds, .will = NULL, .user_property_count = AWS_ARRAY_SIZE(s_user_properties), .user_properties = &s_user_properties[0], .authentication_method = &authentication_method, .authentication_data = &authentication_data, }; struct aws_mqtt5_packet_round_trip_test_context context = { .allocator = allocator, .packet_type = AWS_MQTT5_PT_CONNECT, .packet_view = &connect_view, .decoder_callback = s_aws_mqtt5_on_no_will_connect_received_fn, }; ASSERT_SUCCESS(s_aws_mqtt5_encode_decode_round_trip_matrix_test(&context)); struct aws_byte_buf encoding_buffer; ASSERT_SUCCESS(s_mqtt5_packet_encode_connect_to_buffer(allocator, &connect_view, &encoding_buffer)); /* * For this packet: 1 byte packet type + flags, 2 bytes vli remaining length + 7 bytes protocol prefix * (0x00, 0x04, "MQTT", 0x05), then we're at the CONNECT flags which we want to check */ size_t connect_flags_byte_index = 10; uint8_t connect_flags = encoding_buffer.buffer[connect_flags_byte_index]; /* * Verify Will flag, Will QoS, Will Retain are all zero, * while clean start (0x02), password (0x40) and username (0x80) flags are set */ ASSERT_INT_EQUALS(connect_flags, 0xC2); aws_byte_buf_clean_up(&encoding_buffer); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_packet_encode_connect_no_will, s_mqtt5_packet_encode_connect_no_will_fn) static int s_mqtt5_packet_encode_connect_no_username_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor password = aws_byte_cursor_from_c_str(s_password); uint32_t session_expiry_interval_seconds = 3600; uint8_t request_response_information = 1; uint8_t request_problem_information = 1; uint16_t receive_maximum = 50; uint16_t topic_alias_maximum = 16; uint32_t maximum_packet_size_bytes = 1ULL << 24; uint32_t will_delay_interval_seconds = 30; struct aws_byte_cursor authentication_method = aws_byte_cursor_from_c_str("AuthMethod"); struct aws_byte_cursor authentication_data = aws_byte_cursor_from_c_str("SuperSecret"); struct aws_mqtt5_packet_connect_view connect_view = { .keep_alive_interval_seconds = 1200, .client_id = aws_byte_cursor_from_c_str(s_client_id), .username = NULL, .password = &password, .clean_start = true, .session_expiry_interval_seconds = &session_expiry_interval_seconds, .request_response_information = &request_response_information, .request_problem_information = &request_problem_information, .receive_maximum = &receive_maximum, .topic_alias_maximum = &topic_alias_maximum, .maximum_packet_size_bytes = &maximum_packet_size_bytes, .will_delay_interval_seconds = &will_delay_interval_seconds, .will = NULL, .user_property_count = AWS_ARRAY_SIZE(s_user_properties), .user_properties = &s_user_properties[0], .authentication_method = &authentication_method, .authentication_data = &authentication_data, }; struct aws_mqtt5_packet_round_trip_test_context context = { .allocator = allocator, .packet_type = AWS_MQTT5_PT_CONNECT, .packet_view = &connect_view, .decoder_callback = s_aws_mqtt5_on_no_will_connect_received_fn, }; ASSERT_SUCCESS(s_aws_mqtt5_encode_decode_round_trip_matrix_test(&context)); struct aws_byte_buf encoding_buffer; ASSERT_SUCCESS(s_mqtt5_packet_encode_connect_to_buffer(allocator, &connect_view, &encoding_buffer)); /* * For this packet: 1 byte packet type + flags, 2 bytes vli remaining length + 7 bytes protocol prefix * (0x00, 0x04, "MQTT", 0x05), then we're at the CONNECT flags which we want to check */ size_t connect_flags_byte_index = 10; uint8_t connect_flags = encoding_buffer.buffer[connect_flags_byte_index]; /* * Verify Will flag, Will QoS, Will Retain, Username are all zero, * while clean start (0x02), password (0x40) flags are set */ ASSERT_INT_EQUALS(connect_flags, 0x42); aws_byte_buf_clean_up(&encoding_buffer); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_packet_encode_connect_no_username, s_mqtt5_packet_encode_connect_no_username_fn) static int s_mqtt5_packet_encode_connect_no_password_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor username = aws_byte_cursor_from_c_str(s_username); uint32_t session_expiry_interval_seconds = 3600; uint8_t request_response_information = 1; uint8_t request_problem_information = 1; uint16_t receive_maximum = 50; uint16_t topic_alias_maximum = 16; uint32_t maximum_packet_size_bytes = 1ULL << 24; uint32_t will_delay_interval_seconds = 30; struct aws_byte_cursor authentication_method = aws_byte_cursor_from_c_str("AuthMethod"); struct aws_byte_cursor authentication_data = aws_byte_cursor_from_c_str("SuperSecret"); struct aws_mqtt5_packet_connect_view connect_view = { .keep_alive_interval_seconds = 1200, .client_id = aws_byte_cursor_from_c_str(s_client_id), .username = &username, .password = NULL, .clean_start = true, .session_expiry_interval_seconds = &session_expiry_interval_seconds, .request_response_information = &request_response_information, .request_problem_information = &request_problem_information, .receive_maximum = &receive_maximum, .topic_alias_maximum = &topic_alias_maximum, .maximum_packet_size_bytes = &maximum_packet_size_bytes, .will_delay_interval_seconds = &will_delay_interval_seconds, .will = NULL, .user_property_count = AWS_ARRAY_SIZE(s_user_properties), .user_properties = &s_user_properties[0], .authentication_method = &authentication_method, .authentication_data = &authentication_data, }; struct aws_mqtt5_packet_round_trip_test_context context = { .allocator = allocator, .packet_type = AWS_MQTT5_PT_CONNECT, .packet_view = &connect_view, .decoder_callback = s_aws_mqtt5_on_no_will_connect_received_fn, }; ASSERT_SUCCESS(s_aws_mqtt5_encode_decode_round_trip_matrix_test(&context)); struct aws_byte_buf encoding_buffer; ASSERT_SUCCESS(s_mqtt5_packet_encode_connect_to_buffer(allocator, &connect_view, &encoding_buffer)); /* * For this packet: 1 byte packet type + flags, 1 byte vli remaining length + 7 bytes protocol prefix * (0x00, 0x04, "MQTT", 0x05), then we're at the CONNECT flags which we want to check */ size_t connect_flags_byte_index = 9; uint8_t connect_flags = encoding_buffer.buffer[connect_flags_byte_index]; /* * Verify Will flag, Will QoS, Will Retain, Password are all zero, * while clean start (0x02), username (0x80) flags are set */ ASSERT_INT_EQUALS(connect_flags, 0x82); aws_byte_buf_clean_up(&encoding_buffer); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_packet_encode_connect_no_password, s_mqtt5_packet_encode_connect_no_password_fn) static int s_mqtt5_packet_encode_connect_will_property_order_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor will_payload_cursor = aws_byte_cursor_from_c_str(s_will_payload); enum aws_mqtt5_payload_format_indicator payload_format = AWS_MQTT5_PFI_UTF8; uint32_t message_expiry_interval_seconds = 65537; struct aws_byte_cursor will_response_topic = aws_byte_cursor_from_c_str(s_will_response_topic); struct aws_byte_cursor will_correlation_data = aws_byte_cursor_from_c_str(s_will_correlation_data); struct aws_byte_cursor will_content_type = aws_byte_cursor_from_c_str(s_will_content_type); struct aws_byte_cursor username = aws_byte_cursor_from_c_str(s_username); struct aws_byte_cursor password = aws_byte_cursor_from_c_str(s_password); uint32_t session_expiry_interval_seconds = 3600; uint8_t request_response_information = 1; uint8_t request_problem_information = 1; uint16_t receive_maximum = 50; uint16_t topic_alias_maximum = 16; uint32_t maximum_packet_size_bytes = 1ULL << 24; uint32_t will_delay_interval_seconds = 30; struct aws_byte_cursor authentication_method = aws_byte_cursor_from_c_str("AuthMethod"); struct aws_byte_cursor authentication_data = aws_byte_cursor_from_c_str("SuperSecret"); struct aws_mqtt5_packet_publish_view will_view = { .payload = will_payload_cursor, .qos = AWS_MQTT5_QOS_AT_LEAST_ONCE, .retain = true, .topic = aws_byte_cursor_from_c_str(s_will_topic), .payload_format = &payload_format, .message_expiry_interval_seconds = &message_expiry_interval_seconds, .response_topic = &will_response_topic, .correlation_data = &will_correlation_data, .content_type = &will_content_type, .user_property_count = AWS_ARRAY_SIZE(s_user_properties), .user_properties = &s_user_properties[0], }; struct aws_mqtt5_packet_connect_view connect_view = { .keep_alive_interval_seconds = 1200, .client_id = aws_byte_cursor_from_c_str(s_client_id), .username = &username, .password = &password, .clean_start = true, .session_expiry_interval_seconds = &session_expiry_interval_seconds, .request_response_information = &request_response_information, .request_problem_information = &request_problem_information, .receive_maximum = &receive_maximum, .topic_alias_maximum = &topic_alias_maximum, .maximum_packet_size_bytes = &maximum_packet_size_bytes, .will_delay_interval_seconds = &will_delay_interval_seconds, .will = &will_view, .user_property_count = AWS_ARRAY_SIZE(s_user_properties), .user_properties = &s_user_properties[0], .authentication_method = &authentication_method, .authentication_data = &authentication_data, }; struct aws_byte_buf encoding_buffer; ASSERT_SUCCESS(s_mqtt5_packet_encode_connect_to_buffer(allocator, &connect_view, &encoding_buffer)); struct aws_byte_cursor encoding_cursor = aws_byte_cursor_from_buf(&encoding_buffer); struct aws_byte_cursor client_id_cursor; AWS_ZERO_STRUCT(client_id_cursor); ASSERT_SUCCESS(aws_byte_cursor_find_exact(&encoding_cursor, &connect_view.client_id, &client_id_cursor)); struct aws_byte_cursor will_topic_cursor; AWS_ZERO_STRUCT(will_topic_cursor); ASSERT_SUCCESS(aws_byte_cursor_find_exact(&encoding_cursor, &will_view.topic, &will_topic_cursor)); struct aws_byte_cursor will_message_payload_cursor; AWS_ZERO_STRUCT(will_message_payload_cursor); ASSERT_SUCCESS(aws_byte_cursor_find_exact(&encoding_cursor, &will_view.payload, &will_message_payload_cursor)); struct aws_byte_cursor username_cursor; AWS_ZERO_STRUCT(username_cursor); ASSERT_SUCCESS(aws_byte_cursor_find_exact(&encoding_cursor, connect_view.username, &username_cursor)); struct aws_byte_cursor password_cursor; AWS_ZERO_STRUCT(password_cursor); ASSERT_SUCCESS(aws_byte_cursor_find_exact(&encoding_cursor, connect_view.password, &password_cursor)); ASSERT_NOT_NULL(client_id_cursor.ptr); ASSERT_NOT_NULL(will_topic_cursor.ptr); ASSERT_NOT_NULL(will_message_payload_cursor.ptr); ASSERT_NOT_NULL(username_cursor.ptr); ASSERT_NOT_NULL(password_cursor.ptr); ASSERT_TRUE(client_id_cursor.ptr < will_topic_cursor.ptr); ASSERT_TRUE(will_topic_cursor.ptr < will_message_payload_cursor.ptr); ASSERT_TRUE(will_message_payload_cursor.ptr < username_cursor.ptr); ASSERT_TRUE(username_cursor.ptr < password_cursor.ptr); aws_byte_buf_clean_up(&encoding_buffer); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_packet_encode_connect_will_property_order, s_mqtt5_packet_encode_connect_will_property_order_fn) static int s_aws_mqtt5_decoder_decode_subscribe_first_byte_check(struct aws_mqtt5_decoder *decoder) { uint8_t first_byte = decoder->packet_first_byte; if ((first_byte & 0x0F) != 2) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } static int s_aws_mqtt5_decoder_decode_unsubscribe_first_byte_check(struct aws_mqtt5_decoder *decoder) { uint8_t first_byte = decoder->packet_first_byte; if ((first_byte & 0x0F) != 2) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } static int s_aws_mqtt5_decoder_decode_disconnect_first_byte_check(struct aws_mqtt5_decoder *decoder) { uint8_t first_byte = decoder->packet_first_byte; if ((first_byte & 0x0F) != 0) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } static int s_aws_mqtt5_first_byte_check( struct aws_allocator *allocator, enum aws_mqtt5_packet_type packet_type, void *packet_view) { struct aws_byte_buf whole_dest; aws_byte_buf_init(&whole_dest, allocator, 4096); struct aws_mqtt5_encode_decode_tester tester; aws_mqtt5_encode_init_testing_function_table(&tester.encoder_function_table); aws_mqtt5_decode_init_testing_function_table(&tester.decoder_function_table); tester.decoder_function_table.decoders_by_packet_type[AWS_MQTT5_PT_SUBSCRIBE] = &s_aws_mqtt5_decoder_decode_subscribe_first_byte_check; tester.decoder_function_table.decoders_by_packet_type[AWS_MQTT5_PT_UNSUBSCRIBE] = &s_aws_mqtt5_decoder_decode_unsubscribe_first_byte_check; tester.decoder_function_table.decoders_by_packet_type[AWS_MQTT5_PT_DISCONNECT] = &s_aws_mqtt5_decoder_decode_disconnect_first_byte_check; struct aws_mqtt5_encoder_options encoder_options = { .encoders = &tester.encoder_function_table, }; struct aws_mqtt5_encoder encoder; ASSERT_SUCCESS(aws_mqtt5_encoder_init(&encoder, allocator, &encoder_options)); ASSERT_SUCCESS(aws_mqtt5_encoder_append_packet_encoding(&encoder, packet_type, packet_view)); enum aws_mqtt5_encoding_result result = AWS_MQTT5_ER_ERROR; result = aws_mqtt5_encoder_encode_to_buffer(&encoder, &whole_dest); ASSERT_INT_EQUALS(AWS_MQTT5_ER_FINISHED, result); struct aws_mqtt5_decoder_options decoder_options = { .callback_user_data = &tester, .decoder_table = &tester.decoder_function_table, }; struct aws_mqtt5_decoder decoder; ASSERT_SUCCESS(aws_mqtt5_decoder_init(&decoder, allocator, &decoder_options)); struct aws_byte_cursor whole_cursor = aws_byte_cursor_from_buf(&whole_dest); ASSERT_SUCCESS(aws_mqtt5_decoder_on_data_received(&decoder, whole_cursor)); aws_byte_buf_clean_up(&whole_dest); aws_mqtt5_encoder_clean_up(&encoder); aws_mqtt5_decoder_clean_up(&decoder); return AWS_OP_SUCCESS; } static int mqtt5_first_byte_reserved_header_check_subscribe_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt5_packet_id_t packet_id = 47; uint32_t subscription_identifier = 1; struct aws_mqtt5_packet_subscribe_view subscribe_view = { .packet_id = packet_id, .subscription_count = AWS_ARRAY_SIZE(s_subscriptions), .subscriptions = &s_subscriptions[0], .subscription_identifier = &subscription_identifier, .user_property_count = AWS_ARRAY_SIZE(s_user_properties), .user_properties = &s_user_properties[0], }; ASSERT_SUCCESS(s_aws_mqtt5_first_byte_check(allocator, AWS_MQTT5_PT_SUBSCRIBE, &subscribe_view)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_first_byte_reserved_header_check_subscribe, mqtt5_first_byte_reserved_header_check_subscribe_fn) static int mqtt5_first_byte_reserved_header_check_unsubscribe_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt5_packet_id_t packet_id = 47; struct aws_mqtt5_packet_unsubscribe_view unsubscribe_view = { .packet_id = packet_id, .topic_filter_count = AWS_ARRAY_SIZE(s_unsubscribe_topics), .topic_filters = &s_unsubscribe_topics[0], .user_property_count = AWS_ARRAY_SIZE(s_user_properties), .user_properties = &s_user_properties[0], }; ASSERT_SUCCESS(s_aws_mqtt5_first_byte_check(allocator, AWS_MQTT5_PT_UNSUBSCRIBE, &unsubscribe_view)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_first_byte_reserved_header_check_unsubscribe, mqtt5_first_byte_reserved_header_check_unsubscribe_fn) static int mqtt5_first_byte_reserved_header_check_disconnect_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint32_t session_expiry_interval_seconds = 333; struct aws_byte_cursor reason_string_cursor = aws_byte_cursor_from_c_str(s_reason_string); struct aws_byte_cursor server_reference_cursor = aws_byte_cursor_from_c_str(s_server_reference); struct aws_mqtt5_packet_disconnect_view disconnect_view = { .reason_code = AWS_MQTT5_DRC_DISCONNECT_WITH_WILL_MESSAGE, .session_expiry_interval_seconds = &session_expiry_interval_seconds, .reason_string = &reason_string_cursor, .user_property_count = AWS_ARRAY_SIZE(s_user_properties), .user_properties = &s_user_properties[0], .server_reference = &server_reference_cursor, }; ASSERT_SUCCESS(s_aws_mqtt5_first_byte_check(allocator, AWS_MQTT5_PT_DISCONNECT, &disconnect_view)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_first_byte_reserved_header_check_disconnect, mqtt5_first_byte_reserved_header_check_disconnect_fn) aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/tests/v5/mqtt5_operation_and_storage_tests.c000066400000000000000000004571221456575232400310120ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include "mqtt5_testing_utils.h" #include #include #include #include #include #include #include #include static int s_verify_user_properties( struct aws_mqtt5_user_property_set *property_set, size_t expected_count, const struct aws_mqtt5_user_property *expected_properties) { return aws_mqtt5_test_verify_user_properties_raw( aws_mqtt5_user_property_set_size(property_set), property_set->properties.data, expected_count, expected_properties); } AWS_STATIC_STRING_FROM_LITERAL(s_client_id, "MyClientId"); static bool s_is_cursor_in_buffer(const struct aws_byte_buf *buffer, struct aws_byte_cursor cursor) { if (cursor.ptr < buffer->buffer) { return false; } if (cursor.ptr + cursor.len > buffer->buffer + buffer->len) { return false; } return true; } /* * a bunch of macros to simplify the verification of required and optional properties being properly propagated * and referenced within packet storage and packet views */ #define AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULL(storage_ptr, field_name) \ ASSERT_NULL((storage_ptr)->storage_view.field_name); #define AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULLABLE_CURSOR(storage_ptr, view_ptr, field_name) \ ASSERT_BIN_ARRAYS_EQUALS( \ (view_ptr)->field_name->ptr, \ (view_ptr)->field_name->len, \ (storage_ptr)->field_name.ptr, \ (storage_ptr)->field_name.len); \ ASSERT_BIN_ARRAYS_EQUALS( \ (view_ptr)->field_name->ptr, \ (view_ptr)->field_name->len, \ (storage_ptr)->storage_view.field_name->ptr, \ (storage_ptr)->storage_view.field_name->len); \ ASSERT_TRUE(s_is_cursor_in_buffer(&(storage_ptr)->storage, ((storage_ptr)->field_name))); \ ASSERT_TRUE(s_is_cursor_in_buffer(&(storage_ptr)->storage, *((storage_ptr)->storage_view.field_name))); \ ASSERT_TRUE((view_ptr)->field_name->ptr != (storage_ptr)->field_name.ptr); #define AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_CURSOR(storage_ptr, view_ptr, field_name) \ ASSERT_BIN_ARRAYS_EQUALS( \ (view_ptr)->field_name.ptr, \ (view_ptr)->field_name.len, \ (storage_ptr)->storage_view.field_name.ptr, \ (storage_ptr)->storage_view.field_name.len); \ ASSERT_TRUE(s_is_cursor_in_buffer(&(storage_ptr)->storage, (storage_ptr)->storage_view.field_name)); \ ASSERT_TRUE((view_ptr)->field_name.ptr != (storage_ptr)->storage_view.field_name.ptr); #define AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_EMPTY_CURSOR(storage_ptr, view_ptr, field_name) \ ASSERT_UINT_EQUALS(0, (storage_ptr)->storage_view.field_name.len); #define AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_UINT(storage_ptr, view_ptr, field_name) \ ASSERT_UINT_EQUALS((view_ptr)->field_name, (storage_ptr)->storage_view.field_name); #define AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULLABLE_UINT(storage_ptr, view_ptr, field_name) \ ASSERT_UINT_EQUALS(*(view_ptr)->field_name, (storage_ptr)->field_name); \ ASSERT_PTR_EQUALS((storage_ptr)->storage_view.field_name, &(storage_ptr)->field_name); static const char *PUBLISH_PAYLOAD = "hello-world"; static const char *PUBLISH_TOPIC = "greetings/friendly"; static int s_verify_publish_operation_required_fields( struct aws_mqtt5_packet_publish_storage *publish_storage, struct aws_mqtt5_packet_publish_view *original_view) { AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_EMPTY_CURSOR(publish_storage, original_view, payload); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_UINT(publish_storage, original_view, qos); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_UINT(publish_storage, original_view, retain); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_CURSOR(publish_storage, original_view, topic); return AWS_OP_SUCCESS; } static int s_mqtt5_publish_operation_new_set_no_optional_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt5_packet_publish_view publish_options = { .qos = AWS_MQTT5_QOS_AT_LEAST_ONCE, .retain = true, .topic = aws_byte_cursor_from_c_str(PUBLISH_TOPIC), }; ASSERT_SUCCESS(aws_mqtt5_packet_publish_view_validate(&publish_options)); struct aws_mqtt5_operation_publish *publish_op = aws_mqtt5_operation_publish_new(allocator, NULL, &publish_options, NULL); ASSERT_NOT_NULL(publish_op); /* This test will check both the values in storage as well as the embedded view. They should be in sync. */ struct aws_mqtt5_packet_publish_storage *publish_storage = &publish_op->options_storage; struct aws_mqtt5_packet_publish_view *stored_view = &publish_storage->storage_view; ASSERT_SUCCESS(aws_mqtt5_packet_publish_view_validate(stored_view)); /* required fields */ ASSERT_SUCCESS(s_verify_publish_operation_required_fields(publish_storage, &publish_options)); /* optional fields */ AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULL(publish_storage, payload_format); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULL(publish_storage, message_expiry_interval_seconds); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULL(publish_storage, topic_alias); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULL(publish_storage, response_topic); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULL(publish_storage, correlation_data); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULL(publish_storage, content_type); ASSERT_SUCCESS(s_verify_user_properties(&publish_storage->user_properties, 0, NULL)); ASSERT_SUCCESS(aws_mqtt5_test_verify_user_properties_raw( stored_view->user_property_count, stored_view->user_properties, 0, NULL)); ASSERT_NULL(publish_op->completion_options.completion_callback); ASSERT_NULL(publish_op->completion_options.completion_user_data); aws_mqtt5_packet_publish_view_log(stored_view, AWS_LL_DEBUG); aws_mqtt5_operation_release(&publish_op->base); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_publish_operation_new_set_no_optional, s_mqtt5_publish_operation_new_set_no_optional_fn) static const uint32_t s_message_expiry_interval_seconds = 60; static const uint16_t s_topic_alias = 2; static const char *s_response_topic = "A-response-topic"; static const char *s_correlation_data = "CorrelationData"; static const char *s_content_type = "JSON"; static char s_user_prop1_name[] = "Property1"; static char s_user_prop1_value[] = "Value1"; static char s_user_prop2_name[] = "Property2"; static char s_user_prop2_value[] = "Value2"; static const struct aws_mqtt5_user_property s_user_properties[] = { { .name = { .ptr = (uint8_t *)s_user_prop1_name, .len = AWS_ARRAY_SIZE(s_user_prop1_name) - 1, }, .value = { .ptr = (uint8_t *)s_user_prop1_value, .len = AWS_ARRAY_SIZE(s_user_prop1_value) - 1, }, }, { .name = { .ptr = (uint8_t *)s_user_prop2_name, .len = AWS_ARRAY_SIZE(s_user_prop2_name) - 1, }, .value = { .ptr = (uint8_t *)s_user_prop2_value, .len = AWS_ARRAY_SIZE(s_user_prop2_value) - 1, }, }, }; static void s_aws_mqtt5_publish_completion_fn( enum aws_mqtt5_packet_type packet_type, const void *packet, int error_code, void *complete_ctx) { (void)packet_type; (void)packet; (void)error_code; (void)complete_ctx; } static int s_mqtt5_publish_operation_new_set_all_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor response_topic = aws_byte_cursor_from_c_str(s_response_topic); struct aws_byte_cursor correlation_data = aws_byte_cursor_from_c_str(s_correlation_data); struct aws_byte_cursor content_type = aws_byte_cursor_from_c_str(s_content_type); enum aws_mqtt5_payload_format_indicator payload_format = AWS_MQTT5_PFI_UTF8; struct aws_byte_cursor payload_cursor = aws_byte_cursor_from_c_str(PUBLISH_PAYLOAD); struct aws_mqtt5_packet_publish_view publish_options = { .payload = payload_cursor, .qos = AWS_MQTT5_QOS_AT_MOST_ONCE, .retain = false, .topic = aws_byte_cursor_from_c_str(PUBLISH_TOPIC), .payload_format = &payload_format, .message_expiry_interval_seconds = &s_message_expiry_interval_seconds, .topic_alias = &s_topic_alias, .response_topic = &response_topic, .correlation_data = &correlation_data, .subscription_identifier_count = 0, .subscription_identifiers = NULL, .content_type = &content_type, .user_property_count = AWS_ARRAY_SIZE(s_user_properties), .user_properties = s_user_properties, }; ASSERT_SUCCESS(aws_mqtt5_packet_publish_view_validate(&publish_options)); struct aws_mqtt5_publish_completion_options completion_options = { .completion_callback = &s_aws_mqtt5_publish_completion_fn, .completion_user_data = (void *)0xFFFF, }; struct aws_mqtt5_operation_publish *publish_op = aws_mqtt5_operation_publish_new(allocator, NULL, &publish_options, &completion_options); ASSERT_NOT_NULL(publish_op); struct aws_mqtt5_packet_publish_storage *publish_storage = &publish_op->options_storage; struct aws_mqtt5_packet_publish_view *stored_view = &publish_storage->storage_view; ASSERT_SUCCESS(aws_mqtt5_packet_publish_view_validate(stored_view)); /* required fields */ AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_CURSOR(publish_storage, &publish_options, payload); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_UINT(publish_storage, &publish_options, qos); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_UINT(publish_storage, &publish_options, retain); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_CURSOR(publish_storage, &publish_options, topic); /* optional fields */ AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULLABLE_UINT(publish_storage, &publish_options, payload_format); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULLABLE_UINT( publish_storage, &publish_options, message_expiry_interval_seconds); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULLABLE_UINT(publish_storage, &publish_options, topic_alias); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULLABLE_CURSOR(publish_storage, &publish_options, response_topic); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULLABLE_CURSOR(publish_storage, &publish_options, correlation_data); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULLABLE_CURSOR(publish_storage, &publish_options, content_type); ASSERT_SUCCESS(s_verify_user_properties( &publish_storage->user_properties, AWS_ARRAY_SIZE(s_user_properties), s_user_properties)); ASSERT_SUCCESS(aws_mqtt5_test_verify_user_properties_raw( stored_view->user_property_count, stored_view->user_properties, AWS_ARRAY_SIZE(s_user_properties), s_user_properties)); ASSERT_PTR_EQUALS(completion_options.completion_callback, publish_op->completion_options.completion_callback); ASSERT_PTR_EQUALS(completion_options.completion_user_data, publish_op->completion_options.completion_user_data); aws_mqtt5_packet_publish_view_log(stored_view, AWS_LL_DEBUG); aws_mqtt5_operation_release(&publish_op->base); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_publish_operation_new_set_all, s_mqtt5_publish_operation_new_set_all_fn) static int s_mqtt5_publish_operation_new_failure_packet_id_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt5_packet_publish_view publish_options = { .qos = AWS_MQTT5_QOS_AT_MOST_ONCE, .retain = true, .topic = aws_byte_cursor_from_c_str(PUBLISH_TOPIC), }; struct aws_mqtt5_publish_completion_options completion_options = { .completion_callback = &s_aws_mqtt5_publish_completion_fn, .completion_user_data = (void *)0xFFFF, }; struct aws_mqtt5_operation_publish *publish_op = aws_mqtt5_operation_publish_new(allocator, NULL, &publish_options, &completion_options); ASSERT_NOT_NULL(publish_op); aws_mqtt5_operation_release(&publish_op->base); publish_options.packet_id = 1, publish_op = aws_mqtt5_operation_publish_new(allocator, NULL, &publish_options, &completion_options); ASSERT_INT_EQUALS(AWS_ERROR_MQTT5_PUBLISH_OPTIONS_VALIDATION, aws_last_error()); ASSERT_NULL(publish_op); aws_raise_error(AWS_ERROR_SUCCESS); publish_options.qos = AWS_MQTT5_QOS_AT_LEAST_ONCE; publish_op = aws_mqtt5_operation_publish_new(allocator, NULL, &publish_options, &completion_options); ASSERT_INT_EQUALS(AWS_ERROR_MQTT5_PUBLISH_OPTIONS_VALIDATION, aws_last_error()); ASSERT_NULL(publish_op); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_publish_operation_new_failure_packet_id, s_mqtt5_publish_operation_new_failure_packet_id_fn) static const char s_topic_filter1[] = "some/topic/+"; static const char s_topic_filter2[] = "another/topic/*"; static struct aws_mqtt5_subscription_view s_subscriptions[] = { { .topic_filter = { .ptr = (uint8_t *)s_topic_filter1, .len = AWS_ARRAY_SIZE(s_topic_filter1) - 1, }, .qos = AWS_MQTT5_QOS_AT_MOST_ONCE, .no_local = true, .retain_as_published = false, .retain_handling_type = AWS_MQTT5_RHT_SEND_ON_SUBSCRIBE, }, { .topic_filter = { .ptr = (uint8_t *)s_topic_filter2, .len = AWS_ARRAY_SIZE(s_topic_filter2) - 1, }, .qos = AWS_MQTT5_QOS_AT_LEAST_ONCE, .no_local = false, .retain_as_published = true, .retain_handling_type = AWS_MQTT5_RHT_SEND_ON_SUBSCRIBE_IF_NEW, }, }; static int s_verify_subscriptions_raw( size_t expected_subscription_count, const struct aws_mqtt5_subscription_view *expected_subscriptions, size_t actual_subscription_count, const struct aws_mqtt5_subscription_view *actual_subscriptions) { ASSERT_INT_EQUALS(expected_subscription_count, actual_subscription_count); for (size_t i = 0; i < expected_subscription_count; ++i) { const struct aws_mqtt5_subscription_view *expected_view = &expected_subscriptions[i]; const struct aws_mqtt5_subscription_view *actual_view = &actual_subscriptions[i]; ASSERT_BIN_ARRAYS_EQUALS( expected_view->topic_filter.ptr, expected_view->topic_filter.len, actual_view->topic_filter.ptr, actual_view->topic_filter.len); ASSERT_INT_EQUALS(expected_view->qos, actual_view->qos); ASSERT_INT_EQUALS(expected_view->no_local, actual_view->no_local); ASSERT_INT_EQUALS(expected_view->retain_as_published, actual_view->retain_as_published); ASSERT_INT_EQUALS(expected_view->retain_handling_type, actual_view->retain_handling_type); } return AWS_OP_SUCCESS; } static int s_verify_subscriptions( size_t expected_subscription_count, const struct aws_mqtt5_subscription_view *expected_subscriptions, struct aws_array_list *storage_subscriptions) { return s_verify_subscriptions_raw( expected_subscription_count, expected_subscriptions, aws_array_list_length(storage_subscriptions), storage_subscriptions->data); } static int s_aws_mqtt5_subcribe_operation_verify_required_properties( struct aws_mqtt5_operation_subscribe *subscribe_op, struct aws_mqtt5_packet_subscribe_view *original_view, struct aws_mqtt5_subscribe_completion_options *original_completion_options) { (void)original_view; ASSERT_NOT_NULL(subscribe_op); struct aws_mqtt5_packet_subscribe_storage *subscribe_storage = &subscribe_op->options_storage; struct aws_mqtt5_packet_subscribe_view *stored_view = &subscribe_storage->storage_view; ASSERT_SUCCESS( s_verify_subscriptions(AWS_ARRAY_SIZE(s_subscriptions), s_subscriptions, &subscribe_storage->subscriptions)); ASSERT_SUCCESS(s_verify_subscriptions_raw( AWS_ARRAY_SIZE(s_subscriptions), s_subscriptions, stored_view->subscription_count, stored_view->subscriptions)); ASSERT_PTR_EQUALS( original_completion_options->completion_callback, subscribe_op->completion_options.completion_callback); ASSERT_PTR_EQUALS( original_completion_options->completion_user_data, subscribe_op->completion_options.completion_user_data); return AWS_OP_SUCCESS; } static void s_aws_mqtt5_subscribe_completion_fn( const struct aws_mqtt5_packet_suback_view *suback, int error_code, void *complete_ctx) { (void)suback; (void)error_code; (void)complete_ctx; } static int s_mqtt5_subscribe_operation_new_set_no_optional_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt5_packet_subscribe_view subscribe_options = { .subscriptions = s_subscriptions, .subscription_count = AWS_ARRAY_SIZE(s_subscriptions), .user_property_count = 0, .user_properties = NULL, }; ASSERT_SUCCESS(aws_mqtt5_packet_subscribe_view_validate(&subscribe_options)); struct aws_mqtt5_subscribe_completion_options completion_options = { .completion_callback = &s_aws_mqtt5_subscribe_completion_fn, .completion_user_data = (void *)0xFFFF, }; struct aws_mqtt5_operation_subscribe *subscribe_op = aws_mqtt5_operation_subscribe_new(allocator, NULL, &subscribe_options, &completion_options); ASSERT_SUCCESS(s_aws_mqtt5_subcribe_operation_verify_required_properties( subscribe_op, &subscribe_options, &completion_options)); struct aws_mqtt5_packet_subscribe_view *stored_view = &subscribe_op->options_storage.storage_view; ASSERT_SUCCESS(aws_mqtt5_packet_subscribe_view_validate(stored_view)); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULL(&subscribe_op->options_storage, subscription_identifier); aws_mqtt5_packet_subscribe_view_log(stored_view, AWS_LL_DEBUG); aws_mqtt5_operation_release(&subscribe_op->base); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_subscribe_operation_new_set_no_optional, s_mqtt5_subscribe_operation_new_set_no_optional_fn) static int s_mqtt5_subscribe_operation_new_set_all_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt5_packet_subscribe_view subscribe_options = { .subscriptions = s_subscriptions, .subscription_count = AWS_ARRAY_SIZE(s_subscriptions), .user_property_count = AWS_ARRAY_SIZE(s_user_properties), .user_properties = s_user_properties, }; uint32_t sub_id = 5; subscribe_options.subscription_identifier = &sub_id; ASSERT_SUCCESS(aws_mqtt5_packet_subscribe_view_validate(&subscribe_options)); struct aws_mqtt5_subscribe_completion_options completion_options = { .completion_callback = &s_aws_mqtt5_subscribe_completion_fn, .completion_user_data = (void *)0xFFFF, }; struct aws_mqtt5_operation_subscribe *subscribe_op = aws_mqtt5_operation_subscribe_new(allocator, NULL, &subscribe_options, &completion_options); ASSERT_SUCCESS(s_aws_mqtt5_subcribe_operation_verify_required_properties( subscribe_op, &subscribe_options, &completion_options)); struct aws_mqtt5_packet_subscribe_storage *subscribe_storage = &subscribe_op->options_storage; struct aws_mqtt5_packet_subscribe_view *stored_view = &subscribe_storage->storage_view; ASSERT_SUCCESS(aws_mqtt5_packet_subscribe_view_validate(stored_view)); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULLABLE_UINT(subscribe_storage, &subscribe_options, subscription_identifier); ASSERT_SUCCESS(s_verify_user_properties( &subscribe_storage->user_properties, AWS_ARRAY_SIZE(s_user_properties), s_user_properties)); ASSERT_SUCCESS(aws_mqtt5_test_verify_user_properties_raw( stored_view->user_property_count, stored_view->user_properties, AWS_ARRAY_SIZE(s_user_properties), s_user_properties)); aws_mqtt5_packet_subscribe_view_log(stored_view, AWS_LL_DEBUG); aws_mqtt5_operation_release(&subscribe_op->base); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_subscribe_operation_new_set_all, s_mqtt5_subscribe_operation_new_set_all_fn) static void s_aws_mqtt5_unsubscribe_completion_fn( const struct aws_mqtt5_packet_unsuback_view *unsuback, int error_code, void *complete_ctx) { (void)unsuback; (void)error_code; (void)complete_ctx; } static const char s_unsub_topic_filter1[] = "a/topic"; static const char s_unsub_topic_filter2[] = "another/*"; static const char s_unsub_topic_filter3[] = "hello/+/world"; static const struct aws_byte_cursor s_topics[] = { { .ptr = (uint8_t *)s_unsub_topic_filter1, .len = AWS_ARRAY_SIZE(s_unsub_topic_filter1) - 1, }, { .ptr = (uint8_t *)s_unsub_topic_filter2, .len = AWS_ARRAY_SIZE(s_unsub_topic_filter2) - 1, }, { .ptr = (uint8_t *)s_unsub_topic_filter3, .len = AWS_ARRAY_SIZE(s_unsub_topic_filter3) - 1, }, }; static int s_mqtt5_unsubscribe_operation_new_set_all_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt5_packet_unsubscribe_view unsubscribe_options = { .topic_filters = s_topics, .topic_filter_count = AWS_ARRAY_SIZE(s_topics), .user_property_count = AWS_ARRAY_SIZE(s_user_properties), .user_properties = s_user_properties, }; ASSERT_SUCCESS(aws_mqtt5_packet_unsubscribe_view_validate(&unsubscribe_options)); struct aws_mqtt5_unsubscribe_completion_options completion_options = { .completion_callback = &s_aws_mqtt5_unsubscribe_completion_fn, .completion_user_data = (void *)0xFFFF, }; struct aws_mqtt5_operation_unsubscribe *unsubscribe_op = aws_mqtt5_operation_unsubscribe_new(allocator, NULL, &unsubscribe_options, &completion_options); struct aws_mqtt5_packet_unsubscribe_storage *unsubscribe_storage = &unsubscribe_op->options_storage; struct aws_mqtt5_packet_unsubscribe_view *stored_view = &unsubscribe_storage->storage_view; ASSERT_SUCCESS(aws_mqtt5_packet_unsubscribe_view_validate(stored_view)); ASSERT_UINT_EQUALS(stored_view->topic_filter_count, unsubscribe_options.topic_filter_count); for (size_t i = 0; i < stored_view->topic_filter_count; ++i) { const struct aws_byte_cursor *expected_topic = &unsubscribe_options.topic_filters[i]; const struct aws_byte_cursor *actual_topic = &stored_view->topic_filters[i]; ASSERT_UINT_EQUALS(expected_topic->len, actual_topic->len); ASSERT_TRUE(expected_topic->ptr != actual_topic->ptr); ASSERT_BIN_ARRAYS_EQUALS(expected_topic->ptr, expected_topic->len, actual_topic->ptr, actual_topic->len); } ASSERT_SUCCESS(s_verify_user_properties( &unsubscribe_storage->user_properties, AWS_ARRAY_SIZE(s_user_properties), s_user_properties)); ASSERT_SUCCESS(aws_mqtt5_test_verify_user_properties_raw( stored_view->user_property_count, stored_view->user_properties, AWS_ARRAY_SIZE(s_user_properties), s_user_properties)); aws_mqtt5_packet_unsubscribe_view_log(stored_view, AWS_LL_DEBUG); aws_mqtt5_operation_release(&unsubscribe_op->base); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_unsubscribe_operation_new_set_all, s_mqtt5_unsubscribe_operation_new_set_all_fn) static int s_aws_mqtt5_connect_storage_verify_required_properties( struct aws_mqtt5_packet_connect_storage *connect_storage, struct aws_mqtt5_packet_connect_view *connect_options) { AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_UINT(connect_storage, connect_options, keep_alive_interval_seconds); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_CURSOR(connect_storage, connect_options, client_id); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_UINT(connect_storage, connect_options, clean_start); return AWS_OP_SUCCESS; } static int s_mqtt5_connect_storage_new_set_no_optional_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt5_packet_connect_view connect_options = { .keep_alive_interval_seconds = 50, .client_id = aws_byte_cursor_from_string(s_client_id), .clean_start = true, }; ASSERT_SUCCESS(aws_mqtt5_packet_connect_view_validate(&connect_options)); struct aws_mqtt5_packet_connect_storage connect_storage; AWS_ZERO_STRUCT(connect_storage); ASSERT_SUCCESS(aws_mqtt5_packet_connect_storage_init(&connect_storage, allocator, &connect_options)); ASSERT_SUCCESS(s_aws_mqtt5_connect_storage_verify_required_properties(&connect_storage, &connect_options)); struct aws_mqtt5_packet_connect_view *stored_view = &connect_storage.storage_view; ASSERT_SUCCESS(aws_mqtt5_packet_connect_view_validate(stored_view)); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULL(&connect_storage, username); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULL(&connect_storage, password); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULL(&connect_storage, session_expiry_interval_seconds); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULL(&connect_storage, request_response_information); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULL(&connect_storage, request_problem_information); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULL(&connect_storage, receive_maximum); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULL(&connect_storage, topic_alias_maximum); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULL(&connect_storage, maximum_packet_size_bytes); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULL(&connect_storage, will_delay_interval_seconds); ASSERT_NULL(connect_storage.will); ASSERT_NULL(stored_view->will); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULL(&connect_storage, authentication_method); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULL(&connect_storage, authentication_data); ASSERT_SUCCESS(s_verify_user_properties(&connect_storage.user_properties, 0, NULL)); ASSERT_SUCCESS(aws_mqtt5_test_verify_user_properties_raw( stored_view->user_property_count, stored_view->user_properties, 0, NULL)); aws_mqtt5_packet_connect_view_log(stored_view, AWS_LL_DEBUG); aws_mqtt5_packet_connect_storage_clean_up(&connect_storage); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_connect_storage_new_set_no_optional, s_mqtt5_connect_storage_new_set_no_optional_fn) static const char s_username[] = "SomeUser"; static const struct aws_byte_cursor s_username_cursor = { .ptr = (uint8_t *)s_username, .len = AWS_ARRAY_SIZE(s_username) - 1, }; static const char s_password[] = "CantBeGuessed"; static const struct aws_byte_cursor s_password_cursor = { .ptr = (uint8_t *)s_password, .len = AWS_ARRAY_SIZE(s_password) - 1, }; static const uint32_t s_session_expiry_interval_seconds = 60; static const uint8_t s_request_response_information = true; static const uint8_t s_request_problem_information = true; static const uint16_t s_connect_receive_maximum = 10; static const uint16_t s_connect_topic_alias_maximum = 15; static const uint32_t s_connect_maximum_packet_size_bytes = 128 * 1024 * 1024; static const uint32_t s_will_delay_interval_seconds = 30; static const char s_authentication_method[] = "ECDSA-DH-RSA-EVP-SEKRTI"; static const struct aws_byte_cursor s_authentication_method_cursor = { .ptr = (uint8_t *)s_authentication_method, .len = AWS_ARRAY_SIZE(s_authentication_method) - 1, }; static const char s_authentication_data[] = "SomeSignature"; static const struct aws_byte_cursor s_authentication_data_cursor = { .ptr = (uint8_t *)s_authentication_data, .len = AWS_ARRAY_SIZE(s_authentication_data) - 1, }; static int s_mqtt5_connect_storage_new_set_all_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt5_packet_publish_view publish_options = { .qos = AWS_MQTT5_QOS_AT_LEAST_ONCE, .retain = true, .topic = aws_byte_cursor_from_c_str(PUBLISH_TOPIC), }; struct aws_mqtt5_packet_connect_view connect_options = { .keep_alive_interval_seconds = 50, .client_id = aws_byte_cursor_from_string(s_client_id), .username = &s_username_cursor, .password = &s_password_cursor, .clean_start = true, .session_expiry_interval_seconds = &s_session_expiry_interval_seconds, .request_response_information = &s_request_response_information, .request_problem_information = &s_request_problem_information, .receive_maximum = &s_connect_receive_maximum, .topic_alias_maximum = &s_connect_topic_alias_maximum, .maximum_packet_size_bytes = &s_connect_maximum_packet_size_bytes, .will_delay_interval_seconds = &s_will_delay_interval_seconds, .will = &publish_options, .user_property_count = AWS_ARRAY_SIZE(s_user_properties), .user_properties = s_user_properties, .authentication_method = &s_authentication_method_cursor, .authentication_data = &s_authentication_data_cursor, }; struct aws_mqtt5_packet_connect_storage connect_storage; AWS_ZERO_STRUCT(connect_storage); ASSERT_SUCCESS(aws_mqtt5_packet_connect_storage_init(&connect_storage, allocator, &connect_options)); ASSERT_SUCCESS(s_aws_mqtt5_connect_storage_verify_required_properties(&connect_storage, &connect_options)); struct aws_mqtt5_packet_connect_view *stored_view = &connect_storage.storage_view; AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULLABLE_CURSOR(&connect_storage, &connect_options, username); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULLABLE_CURSOR(&connect_storage, &connect_options, password); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULLABLE_UINT( &connect_storage, &connect_options, session_expiry_interval_seconds); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULLABLE_UINT( &connect_storage, &connect_options, request_response_information); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULLABLE_UINT(&connect_storage, &connect_options, request_problem_information); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULLABLE_UINT(&connect_storage, &connect_options, receive_maximum); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULLABLE_UINT(&connect_storage, &connect_options, topic_alias_maximum); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULLABLE_UINT(&connect_storage, &connect_options, maximum_packet_size_bytes); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULLABLE_UINT(&connect_storage, &connect_options, will_delay_interval_seconds); ASSERT_NOT_NULL(connect_storage.will); ASSERT_NOT_NULL(stored_view->will); ASSERT_SUCCESS(s_verify_publish_operation_required_fields(connect_storage.will, &publish_options)); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULLABLE_CURSOR(&connect_storage, &connect_options, authentication_method); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULLABLE_CURSOR(&connect_storage, &connect_options, authentication_data); ASSERT_SUCCESS(s_verify_user_properties( &connect_storage.user_properties, AWS_ARRAY_SIZE(s_user_properties), s_user_properties)); ASSERT_SUCCESS(aws_mqtt5_test_verify_user_properties_raw( stored_view->user_property_count, stored_view->user_properties, AWS_ARRAY_SIZE(s_user_properties), s_user_properties)); aws_mqtt5_packet_connect_view_log(stored_view, AWS_LL_DEBUG); aws_mqtt5_packet_connect_storage_clean_up(&connect_storage); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_connect_storage_new_set_all, s_mqtt5_connect_storage_new_set_all_fn) static int s_aws_mqtt5_connack_storage_verify_required_properties( struct aws_mqtt5_packet_connack_storage *connack_storage, struct aws_mqtt5_packet_connack_view *connack_view) { AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_UINT(connack_storage, connack_view, session_present); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_UINT(connack_storage, connack_view, reason_code); return AWS_OP_SUCCESS; } static int s_mqtt5_connack_storage_new_set_no_optional_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt5_packet_connack_view connack_options = { .session_present = true, .reason_code = AWS_MQTT5_CRC_BANNED, }; struct aws_mqtt5_packet_connack_storage connack_storage; AWS_ZERO_STRUCT(connack_storage); ASSERT_SUCCESS(aws_mqtt5_packet_connack_storage_init(&connack_storage, allocator, &connack_options)); ASSERT_SUCCESS(s_aws_mqtt5_connack_storage_verify_required_properties(&connack_storage, &connack_options)); struct aws_mqtt5_packet_connack_view *stored_view = &connack_storage.storage_view; AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULL(&connack_storage, session_expiry_interval); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULL(&connack_storage, receive_maximum); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULL(&connack_storage, maximum_qos); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULL(&connack_storage, retain_available); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULL(&connack_storage, maximum_packet_size); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULL(&connack_storage, assigned_client_identifier); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULL(&connack_storage, topic_alias_maximum); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULL(&connack_storage, reason_string); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULL(&connack_storage, wildcard_subscriptions_available); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULL(&connack_storage, subscription_identifiers_available); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULL(&connack_storage, shared_subscriptions_available); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULL(&connack_storage, server_keep_alive); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULL(&connack_storage, response_information); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULL(&connack_storage, server_reference); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULL(&connack_storage, authentication_method); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULL(&connack_storage, authentication_data); ASSERT_SUCCESS(s_verify_user_properties(&connack_storage.user_properties, 0, NULL)); ASSERT_SUCCESS(aws_mqtt5_test_verify_user_properties_raw( stored_view->user_property_count, stored_view->user_properties, 0, NULL)); aws_mqtt5_packet_connack_view_log(stored_view, AWS_LL_DEBUG); aws_mqtt5_packet_connack_storage_clean_up(&connack_storage); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_connack_storage_new_set_no_optional, s_mqtt5_connack_storage_new_set_no_optional_fn) static const uint32_t s_connack_session_expiry_interval = 300; static const uint16_t s_connack_receive_maximum = 15; static const enum aws_mqtt5_qos s_connack_maximum_qos = AWS_MQTT5_QOS_EXACTLY_ONCE; static const bool s_connack_retain_available = true; static const uint32_t s_connack_maximum_packet_size = 256 * 1024 * 1024; static const char s_assigned_client_identifier[] = "ThisIsYourClientId"; static const struct aws_byte_cursor s_assigned_client_identifier_cursor = { .ptr = (uint8_t *)s_assigned_client_identifier, .len = AWS_ARRAY_SIZE(s_assigned_client_identifier) - 1, }; static const uint16_t s_connack_topic_alias_maximum = 32; static const char s_reason_string[] = "Very Bad Behavior"; static const struct aws_byte_cursor s_reason_string_cursor = { .ptr = (uint8_t *)s_reason_string, .len = AWS_ARRAY_SIZE(s_reason_string) - 1, }; static const bool s_connack_wildcard_subscriptions_available = true; static const bool s_connack_subscription_identifiers_available = true; static const bool s_connack_shared_subscriptions_available = true; static const uint16_t s_connack_server_keep_alive = 3600; static const char s_response_information[] = "Everything worked great!"; static const struct aws_byte_cursor s_response_information_cursor = { .ptr = (uint8_t *)s_response_information, .len = AWS_ARRAY_SIZE(s_response_information) - 1, }; static const char s_server_reference[] = "no-dont-leave.com"; static const struct aws_byte_cursor s_server_reference_cursor = { .ptr = (uint8_t *)s_server_reference, .len = AWS_ARRAY_SIZE(s_server_reference) - 1, }; static int s_mqtt5_connack_storage_new_set_all_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt5_packet_connack_view connack_options = { .session_present = true, .reason_code = AWS_MQTT5_CRC_BANNED, .session_expiry_interval = &s_connack_session_expiry_interval, .receive_maximum = &s_connack_receive_maximum, .maximum_qos = &s_connack_maximum_qos, .retain_available = &s_connack_retain_available, .maximum_packet_size = &s_connack_maximum_packet_size, .assigned_client_identifier = &s_assigned_client_identifier_cursor, .topic_alias_maximum = &s_connack_topic_alias_maximum, .reason_string = &s_reason_string_cursor, .user_property_count = AWS_ARRAY_SIZE(s_user_properties), .user_properties = s_user_properties, .wildcard_subscriptions_available = &s_connack_wildcard_subscriptions_available, .subscription_identifiers_available = &s_connack_subscription_identifiers_available, .shared_subscriptions_available = &s_connack_shared_subscriptions_available, .server_keep_alive = &s_connack_server_keep_alive, .response_information = &s_response_information_cursor, .server_reference = &s_server_reference_cursor, .authentication_method = &s_authentication_method_cursor, .authentication_data = &s_authentication_data_cursor, }; struct aws_mqtt5_packet_connack_storage connack_storage; AWS_ZERO_STRUCT(connack_storage); ASSERT_SUCCESS(aws_mqtt5_packet_connack_storage_init(&connack_storage, allocator, &connack_options)); ASSERT_SUCCESS(s_aws_mqtt5_connack_storage_verify_required_properties(&connack_storage, &connack_options)); struct aws_mqtt5_packet_connack_view *stored_view = &connack_storage.storage_view; AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULLABLE_UINT(&connack_storage, &connack_options, session_expiry_interval); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULLABLE_UINT(&connack_storage, &connack_options, receive_maximum); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULLABLE_UINT(&connack_storage, &connack_options, maximum_qos); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULLABLE_UINT(&connack_storage, &connack_options, retain_available); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULLABLE_UINT(&connack_storage, &connack_options, maximum_packet_size); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULLABLE_CURSOR( &connack_storage, &connack_options, assigned_client_identifier); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULLABLE_UINT(&connack_storage, &connack_options, topic_alias_maximum); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULLABLE_CURSOR(&connack_storage, &connack_options, reason_string); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULLABLE_UINT( &connack_storage, &connack_options, wildcard_subscriptions_available); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULLABLE_UINT( &connack_storage, &connack_options, subscription_identifiers_available); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULLABLE_UINT( &connack_storage, &connack_options, shared_subscriptions_available); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULLABLE_UINT(&connack_storage, &connack_options, server_keep_alive); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULLABLE_CURSOR(&connack_storage, &connack_options, response_information); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULLABLE_CURSOR(&connack_storage, &connack_options, server_reference); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULLABLE_CURSOR(&connack_storage, &connack_options, authentication_method); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULLABLE_CURSOR(&connack_storage, &connack_options, authentication_data); ASSERT_SUCCESS(s_verify_user_properties( &connack_storage.user_properties, AWS_ARRAY_SIZE(s_user_properties), s_user_properties)); ASSERT_SUCCESS(aws_mqtt5_test_verify_user_properties_raw( stored_view->user_property_count, stored_view->user_properties, AWS_ARRAY_SIZE(s_user_properties), s_user_properties)); aws_mqtt5_packet_connack_view_log(stored_view, AWS_LL_DEBUG); aws_mqtt5_packet_connack_storage_clean_up(&connack_storage); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_connack_storage_new_set_all, s_mqtt5_connack_storage_new_set_all_fn) static int s_mqtt5_disconnect_storage_new_set_no_optional_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt5_packet_disconnect_view disconnect_options = { .reason_code = AWS_MQTT5_DRC_ADMINISTRATIVE_ACTION, }; ASSERT_SUCCESS(aws_mqtt5_packet_disconnect_view_validate(&disconnect_options)); struct aws_mqtt5_packet_disconnect_storage disconnect_storage; AWS_ZERO_STRUCT(disconnect_storage); ASSERT_SUCCESS(aws_mqtt5_packet_disconnect_storage_init(&disconnect_storage, allocator, &disconnect_options)); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_UINT(&disconnect_storage, &disconnect_options, reason_code); struct aws_mqtt5_packet_disconnect_view *stored_view = &disconnect_storage.storage_view; ASSERT_SUCCESS(aws_mqtt5_packet_disconnect_view_validate(stored_view)); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULL(&disconnect_storage, session_expiry_interval_seconds); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULL(&disconnect_storage, reason_string); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULL(&disconnect_storage, server_reference); ASSERT_SUCCESS(s_verify_user_properties(&disconnect_storage.user_properties, 0, NULL)); ASSERT_SUCCESS(aws_mqtt5_test_verify_user_properties_raw( stored_view->user_property_count, stored_view->user_properties, 0, NULL)); aws_mqtt5_packet_disconnect_view_log(stored_view, AWS_LL_DEBUG); aws_mqtt5_packet_disconnect_storage_clean_up(&disconnect_storage); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_disconnect_storage_new_set_no_optional, s_mqtt5_disconnect_storage_new_set_no_optional_fn) static int s_mqtt5_disconnect_storage_new_set_all_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt5_packet_disconnect_view disconnect_options = { .reason_code = AWS_MQTT5_DRC_ADMINISTRATIVE_ACTION, .session_expiry_interval_seconds = &s_session_expiry_interval_seconds, .reason_string = &s_reason_string_cursor, .user_property_count = AWS_ARRAY_SIZE(s_user_properties), .user_properties = s_user_properties, .server_reference = &s_server_reference_cursor, }; struct aws_mqtt5_packet_disconnect_storage disconnect_storage; AWS_ZERO_STRUCT(disconnect_storage); ASSERT_SUCCESS(aws_mqtt5_packet_disconnect_storage_init(&disconnect_storage, allocator, &disconnect_options)); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_UINT(&disconnect_storage, &disconnect_options, reason_code); struct aws_mqtt5_packet_disconnect_view *stored_view = &disconnect_storage.storage_view; AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULLABLE_UINT( &disconnect_storage, &disconnect_options, session_expiry_interval_seconds); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULLABLE_CURSOR(&disconnect_storage, &disconnect_options, reason_string); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULLABLE_CURSOR(&disconnect_storage, &disconnect_options, server_reference); ASSERT_SUCCESS(s_verify_user_properties( &disconnect_storage.user_properties, AWS_ARRAY_SIZE(s_user_properties), s_user_properties)); ASSERT_SUCCESS(aws_mqtt5_test_verify_user_properties_raw( stored_view->user_property_count, stored_view->user_properties, AWS_ARRAY_SIZE(s_user_properties), s_user_properties)); aws_mqtt5_packet_disconnect_view_log(stored_view, AWS_LL_DEBUG); aws_mqtt5_packet_disconnect_storage_clean_up(&disconnect_storage); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_disconnect_storage_new_set_all, s_mqtt5_disconnect_storage_new_set_all_fn) static const enum aws_mqtt5_suback_reason_code s_suback_reason_codes[] = { AWS_MQTT5_SARC_GRANTED_QOS_0, AWS_MQTT5_SARC_GRANTED_QOS_2, AWS_MQTT5_SARC_NOT_AUTHORIZED, }; static int s_verify_suback_reason_codes_raw( const enum aws_mqtt5_suback_reason_code *reason_codes, size_t reason_code_count, const struct aws_mqtt5_packet_suback_view *original_view) { ASSERT_UINT_EQUALS(reason_code_count, original_view->reason_code_count); for (size_t i = 0; i < reason_code_count; ++i) { ASSERT_UINT_EQUALS(reason_codes[i], original_view->reason_codes[i]); } return AWS_OP_SUCCESS; } static int s_verify_suback_reason_codes( const struct aws_mqtt5_packet_suback_storage *suback_storage, const struct aws_mqtt5_packet_suback_view *original_view) { ASSERT_SUCCESS(s_verify_suback_reason_codes_raw( suback_storage->reason_codes.data, aws_array_list_length(&suback_storage->reason_codes), original_view)); const struct aws_mqtt5_packet_suback_view *storage_view = &suback_storage->storage_view; ASSERT_SUCCESS( s_verify_suback_reason_codes_raw(storage_view->reason_codes, storage_view->reason_code_count, original_view)); return AWS_OP_SUCCESS; } static int s_mqtt5_suback_storage_new_set_no_optional_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt5_packet_suback_view suback_options = { .reason_codes = s_suback_reason_codes, .reason_code_count = AWS_ARRAY_SIZE(s_suback_reason_codes), }; struct aws_mqtt5_packet_suback_storage suback_storage; AWS_ZERO_STRUCT(suback_storage); ASSERT_SUCCESS(aws_mqtt5_packet_suback_storage_init(&suback_storage, allocator, &suback_options)); struct aws_mqtt5_packet_suback_view *stored_view = &suback_storage.storage_view; ASSERT_SUCCESS(s_verify_suback_reason_codes(&suback_storage, &suback_options)); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULL(&suback_storage, reason_string); ASSERT_SUCCESS(s_verify_user_properties(&suback_storage.user_properties, 0, NULL)); ASSERT_SUCCESS(aws_mqtt5_test_verify_user_properties_raw( stored_view->user_property_count, stored_view->user_properties, 0, NULL)); aws_mqtt5_packet_suback_view_log(stored_view, AWS_LL_DEBUG); aws_mqtt5_packet_suback_storage_clean_up(&suback_storage); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_suback_storage_new_set_no_optional, s_mqtt5_suback_storage_new_set_no_optional_fn) static int s_mqtt5_suback_storage_new_set_all_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt5_packet_suback_view suback_options = { .reason_string = &s_reason_string_cursor, .reason_codes = s_suback_reason_codes, .reason_code_count = AWS_ARRAY_SIZE(s_suback_reason_codes), .user_properties = s_user_properties, .user_property_count = AWS_ARRAY_SIZE(s_user_properties), }; struct aws_mqtt5_packet_suback_storage suback_storage; AWS_ZERO_STRUCT(suback_storage); ASSERT_SUCCESS(aws_mqtt5_packet_suback_storage_init(&suback_storage, allocator, &suback_options)); struct aws_mqtt5_packet_suback_view *stored_view = &suback_storage.storage_view; ASSERT_SUCCESS(s_verify_suback_reason_codes(&suback_storage, &suback_options)); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULLABLE_CURSOR(&suback_storage, &suback_options, reason_string); ASSERT_SUCCESS(s_verify_user_properties( &suback_storage.user_properties, AWS_ARRAY_SIZE(s_user_properties), s_user_properties)); ASSERT_SUCCESS(aws_mqtt5_test_verify_user_properties_raw( stored_view->user_property_count, stored_view->user_properties, AWS_ARRAY_SIZE(s_user_properties), s_user_properties)); aws_mqtt5_packet_suback_view_log(stored_view, AWS_LL_DEBUG); aws_mqtt5_packet_suback_storage_clean_up(&suback_storage); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_suback_storage_new_set_all, s_mqtt5_suback_storage_new_set_all_fn) static const enum aws_mqtt5_unsuback_reason_code s_unsuback_reason_codes[] = { AWS_MQTT5_UARC_NOT_AUTHORIZED, AWS_MQTT5_UARC_SUCCESS, AWS_MQTT5_UARC_NO_SUBSCRIPTION_EXISTED, }; static int s_verify_unsuback_reason_codes_raw( const enum aws_mqtt5_unsuback_reason_code *reason_codes, size_t reason_code_count, const struct aws_mqtt5_packet_unsuback_view *original_view) { ASSERT_UINT_EQUALS(reason_code_count, original_view->reason_code_count); for (size_t i = 0; i < reason_code_count; ++i) { ASSERT_UINT_EQUALS(reason_codes[i], original_view->reason_codes[i]); } return AWS_OP_SUCCESS; } static int s_verify_unsuback_reason_codes( const struct aws_mqtt5_packet_unsuback_storage *unsuback_storage, const struct aws_mqtt5_packet_unsuback_view *original_view) { ASSERT_SUCCESS(s_verify_unsuback_reason_codes_raw( unsuback_storage->reason_codes.data, aws_array_list_length(&unsuback_storage->reason_codes), original_view)); const struct aws_mqtt5_packet_unsuback_view *storage_view = &unsuback_storage->storage_view; ASSERT_SUCCESS( s_verify_unsuback_reason_codes_raw(storage_view->reason_codes, storage_view->reason_code_count, original_view)); return AWS_OP_SUCCESS; } static int s_mqtt5_unsuback_storage_new_set_no_optional_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt5_packet_unsuback_view unsuback_options = { .reason_codes = s_unsuback_reason_codes, .reason_code_count = AWS_ARRAY_SIZE(s_unsuback_reason_codes), }; struct aws_mqtt5_packet_unsuback_storage unsuback_storage; AWS_ZERO_STRUCT(unsuback_storage); ASSERT_SUCCESS(aws_mqtt5_packet_unsuback_storage_init(&unsuback_storage, allocator, &unsuback_options)); struct aws_mqtt5_packet_unsuback_view *stored_view = &unsuback_storage.storage_view; ASSERT_SUCCESS(s_verify_unsuback_reason_codes(&unsuback_storage, &unsuback_options)); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULL(&unsuback_storage, reason_string); ASSERT_SUCCESS(s_verify_user_properties(&unsuback_storage.user_properties, 0, NULL)); ASSERT_SUCCESS(aws_mqtt5_test_verify_user_properties_raw( stored_view->user_property_count, stored_view->user_properties, 0, NULL)); aws_mqtt5_packet_unsuback_view_log(stored_view, AWS_LL_DEBUG); aws_mqtt5_packet_unsuback_storage_clean_up(&unsuback_storage); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_unsuback_storage_new_set_no_optional, s_mqtt5_unsuback_storage_new_set_no_optional_fn) static int s_mqtt5_unsuback_storage_new_set_all_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt5_packet_unsuback_view unsuback_options = { .reason_string = &s_reason_string_cursor, .reason_codes = s_unsuback_reason_codes, .reason_code_count = AWS_ARRAY_SIZE(s_unsuback_reason_codes), .user_properties = s_user_properties, .user_property_count = AWS_ARRAY_SIZE(s_user_properties), }; struct aws_mqtt5_packet_unsuback_storage unsuback_storage; AWS_ZERO_STRUCT(unsuback_storage); ASSERT_SUCCESS(aws_mqtt5_packet_unsuback_storage_init(&unsuback_storage, allocator, &unsuback_options)); struct aws_mqtt5_packet_unsuback_view *stored_view = &unsuback_storage.storage_view; ASSERT_SUCCESS(s_verify_unsuback_reason_codes(&unsuback_storage, &unsuback_options)); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULLABLE_CURSOR(&unsuback_storage, &unsuback_options, reason_string); ASSERT_SUCCESS(s_verify_user_properties( &unsuback_storage.user_properties, AWS_ARRAY_SIZE(s_user_properties), s_user_properties)); ASSERT_SUCCESS(aws_mqtt5_test_verify_user_properties_raw( stored_view->user_property_count, stored_view->user_properties, AWS_ARRAY_SIZE(s_user_properties), s_user_properties)); aws_mqtt5_packet_unsuback_view_log(stored_view, AWS_LL_DEBUG); aws_mqtt5_packet_unsuback_storage_clean_up(&unsuback_storage); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_unsuback_storage_new_set_all, s_mqtt5_unsuback_storage_new_set_all_fn) static int s_mqtt5_puback_storage_new_set_all_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt5_packet_puback_view puback_options = { .packet_id = 333, .reason_code = AWS_MQTT5_PARC_NO_MATCHING_SUBSCRIBERS, .reason_string = &s_reason_string_cursor, .user_properties = s_user_properties, .user_property_count = AWS_ARRAY_SIZE(s_user_properties), }; struct aws_mqtt5_packet_puback_storage puback_storage; AWS_ZERO_STRUCT(puback_storage); ASSERT_SUCCESS(aws_mqtt5_packet_puback_storage_init(&puback_storage, allocator, &puback_options)); struct aws_mqtt5_packet_puback_view *stored_view = &puback_storage.storage_view; AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_UINT(&puback_storage, &puback_options, packet_id); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_UINT(&puback_storage, &puback_options, reason_code); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULLABLE_CURSOR(&puback_storage, &puback_options, reason_string); ASSERT_SUCCESS(s_verify_user_properties( &puback_storage.user_properties, AWS_ARRAY_SIZE(s_user_properties), s_user_properties)); ASSERT_SUCCESS(aws_mqtt5_test_verify_user_properties_raw( stored_view->user_property_count, stored_view->user_properties, AWS_ARRAY_SIZE(s_user_properties), s_user_properties)); aws_mqtt5_packet_puback_view_log(stored_view, AWS_LL_DEBUG); aws_mqtt5_packet_puback_storage_clean_up(&puback_storage); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_puback_storage_new_set_all, s_mqtt5_puback_storage_new_set_all_fn) static int s_verify_publish_subscription_identifiers_raw( const uint32_t *subscription_identifiers, size_t subscription_identifier_count, const struct aws_mqtt5_packet_publish_view *original_view) { ASSERT_UINT_EQUALS(subscription_identifier_count, original_view->subscription_identifier_count); for (size_t i = 0; i < subscription_identifier_count; ++i) { ASSERT_UINT_EQUALS(subscription_identifiers[i], original_view->subscription_identifiers[i]); } return AWS_OP_SUCCESS; } static int s_verify_publish_subscription_identifiers( const struct aws_mqtt5_packet_publish_storage *publish_storage, const struct aws_mqtt5_packet_publish_view *original_view) { ASSERT_SUCCESS(s_verify_publish_subscription_identifiers_raw( publish_storage->subscription_identifiers.data, aws_array_list_length(&publish_storage->subscription_identifiers), original_view)); const struct aws_mqtt5_packet_publish_view *storage_view = &publish_storage->storage_view; ASSERT_SUCCESS(s_verify_publish_subscription_identifiers_raw( storage_view->subscription_identifiers, storage_view->subscription_identifier_count, original_view)); return AWS_OP_SUCCESS; } static int s_mqtt5_publish_storage_new_set_all_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor response_topic = aws_byte_cursor_from_c_str(s_response_topic); struct aws_byte_cursor correlation_data = aws_byte_cursor_from_c_str(s_correlation_data); struct aws_byte_cursor content_type = aws_byte_cursor_from_c_str(s_content_type); enum aws_mqtt5_payload_format_indicator payload_format = AWS_MQTT5_PFI_UTF8; struct aws_byte_cursor payload_cursor = aws_byte_cursor_from_c_str(PUBLISH_PAYLOAD); uint32_t subscription_identifiers[] = {2, 128000}; struct aws_mqtt5_packet_publish_view publish_options = { .packet_id = 333, .payload = payload_cursor, .qos = AWS_MQTT5_QOS_AT_MOST_ONCE, .retain = false, .topic = aws_byte_cursor_from_c_str(PUBLISH_TOPIC), .payload_format = &payload_format, .message_expiry_interval_seconds = &s_message_expiry_interval_seconds, .topic_alias = &s_topic_alias, .response_topic = &response_topic, .correlation_data = &correlation_data, .subscription_identifier_count = AWS_ARRAY_SIZE(subscription_identifiers), .subscription_identifiers = subscription_identifiers, .content_type = &content_type, .user_property_count = AWS_ARRAY_SIZE(s_user_properties), .user_properties = s_user_properties, }; struct aws_mqtt5_packet_publish_storage publish_storage; AWS_ZERO_STRUCT(publish_storage); ASSERT_SUCCESS(aws_mqtt5_packet_publish_storage_init(&publish_storage, allocator, &publish_options)); struct aws_mqtt5_packet_publish_view *stored_view = &publish_storage.storage_view; AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_UINT(&publish_storage, &publish_options, packet_id); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_CURSOR(&publish_storage, &publish_options, payload); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_UINT(&publish_storage, &publish_options, qos); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_UINT(&publish_storage, &publish_options, retain); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_CURSOR(&publish_storage, &publish_options, topic); /* optional fields */ AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULLABLE_UINT(&publish_storage, &publish_options, payload_format); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULLABLE_UINT( &publish_storage, &publish_options, message_expiry_interval_seconds); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULLABLE_UINT(&publish_storage, &publish_options, topic_alias); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULLABLE_CURSOR(&publish_storage, &publish_options, response_topic); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULLABLE_CURSOR(&publish_storage, &publish_options, correlation_data); AWS_VERIFY_VIEW_STORAGE_RELATIONSHIP_NULLABLE_CURSOR(&publish_storage, &publish_options, content_type); ASSERT_SUCCESS(s_verify_publish_subscription_identifiers(&publish_storage, &publish_options)); ASSERT_SUCCESS(s_verify_user_properties( &publish_storage.user_properties, AWS_ARRAY_SIZE(s_user_properties), s_user_properties)); ASSERT_SUCCESS(aws_mqtt5_test_verify_user_properties_raw( stored_view->user_property_count, stored_view->user_properties, AWS_ARRAY_SIZE(s_user_properties), s_user_properties)); aws_mqtt5_packet_publish_view_log(stored_view, AWS_LL_DEBUG); aws_mqtt5_packet_publish_storage_clean_up(&publish_storage); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_publish_storage_new_set_all, s_mqtt5_publish_storage_new_set_all_fn) static const enum aws_mqtt5_qos s_maximum_qos_at_least_once = AWS_MQTT5_QOS_AT_LEAST_ONCE; static const enum aws_mqtt5_qos s_maximum_qos_at_most_once = AWS_MQTT5_QOS_AT_MOST_ONCE; static const uint16_t s_keep_alive_interval_seconds = 999; static const uint32_t s_session_expiry_interval = 999; static const uint16_t s_receive_maximum = 999; static const uint32_t s_maximum_packet_size = 999; static const uint16_t s_topic_alias_maximum_to_server = 999; static const uint16_t s_topic_alias_maximum = 999; static const uint16_t s_server_keep_alive = 999; static const bool s_retain_available = false; static const bool s_wildcard_subscriptions_available = false; static const bool s_subscription_identifiers_available = false; static const bool s_shared_subscriptions_available = false; static int mqtt5_negotiated_settings_reset_test_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; (void)allocator; /* aws_mqtt5_negotiated_settings used for testing */ struct aws_mqtt5_negotiated_settings negotiated_settings; AWS_ZERO_STRUCT(negotiated_settings); /* Simulate an aws_mqtt5_packet_connect_view with no user set settings */ struct aws_mqtt5_packet_connect_view connect_view = { .keep_alive_interval_seconds = 0, }; /* Apply no client settings to a reset of negotiated_settings */ aws_mqtt5_negotiated_settings_reset(&negotiated_settings, &connect_view); /* Check that all settings are the expected default values */ ASSERT_TRUE(negotiated_settings.maximum_qos == AWS_MQTT5_QOS_AT_LEAST_ONCE); ASSERT_UINT_EQUALS(negotiated_settings.session_expiry_interval, 0); ASSERT_UINT_EQUALS(negotiated_settings.receive_maximum_from_server, AWS_MQTT5_RECEIVE_MAXIMUM); ASSERT_UINT_EQUALS(negotiated_settings.maximum_packet_size_to_server, AWS_MQTT5_MAXIMUM_PACKET_SIZE); ASSERT_UINT_EQUALS(negotiated_settings.topic_alias_maximum_to_server, 0); ASSERT_UINT_EQUALS(negotiated_settings.topic_alias_maximum_to_client, 0); ASSERT_UINT_EQUALS(negotiated_settings.server_keep_alive, 0); ASSERT_TRUE(negotiated_settings.retain_available); ASSERT_TRUE(negotiated_settings.wildcard_subscriptions_available); ASSERT_TRUE(negotiated_settings.subscription_identifiers_available); ASSERT_TRUE(negotiated_settings.shared_subscriptions_available); ASSERT_FALSE(negotiated_settings.rejoined_session); /* Set client modifiable CONNECT settings */ connect_view.keep_alive_interval_seconds = s_keep_alive_interval_seconds; connect_view.session_expiry_interval_seconds = &s_session_expiry_interval; connect_view.receive_maximum = &s_receive_maximum; connect_view.maximum_packet_size_bytes = &s_maximum_packet_size; connect_view.topic_alias_maximum = &s_topic_alias_maximum; /* Apply client settings to a reset of negotiated settings */ aws_mqtt5_negotiated_settings_reset(&negotiated_settings, &connect_view); /* Check that all settings are the expected values with client settings */ ASSERT_TRUE(negotiated_settings.maximum_qos == AWS_MQTT5_QOS_AT_LEAST_ONCE); ASSERT_UINT_EQUALS(negotiated_settings.server_keep_alive, connect_view.keep_alive_interval_seconds); ASSERT_UINT_EQUALS(negotiated_settings.session_expiry_interval, *connect_view.session_expiry_interval_seconds); ASSERT_UINT_EQUALS(negotiated_settings.receive_maximum_from_server, AWS_MQTT5_RECEIVE_MAXIMUM); ASSERT_UINT_EQUALS(negotiated_settings.topic_alias_maximum_to_server, 0); ASSERT_UINT_EQUALS(negotiated_settings.topic_alias_maximum_to_client, *connect_view.topic_alias_maximum); ASSERT_TRUE(negotiated_settings.retain_available); ASSERT_TRUE(negotiated_settings.wildcard_subscriptions_available); ASSERT_TRUE(negotiated_settings.subscription_identifiers_available); ASSERT_TRUE(negotiated_settings.shared_subscriptions_available); ASSERT_FALSE(negotiated_settings.rejoined_session); /* Reset connect view to clean defaults */ connect_view.keep_alive_interval_seconds = 0; connect_view.session_expiry_interval_seconds = NULL; connect_view.receive_maximum = NULL; connect_view.maximum_packet_size_bytes = NULL; connect_view.topic_alias_maximum = NULL; /* Change remaining default properties on negotiated_settings to non-default values */ negotiated_settings.maximum_qos = AWS_MQTT5_QOS_EXACTLY_ONCE; negotiated_settings.topic_alias_maximum_to_server = s_topic_alias_maximum_to_server; negotiated_settings.topic_alias_maximum_to_client = s_topic_alias_maximum_to_server; negotiated_settings.retain_available = s_retain_available; negotiated_settings.wildcard_subscriptions_available = s_wildcard_subscriptions_available; negotiated_settings.subscription_identifiers_available = s_subscription_identifiers_available; negotiated_settings.shared_subscriptions_available = s_shared_subscriptions_available; /* Apply no client settings to a reset of negotiated_settings */ aws_mqtt5_negotiated_settings_reset(&negotiated_settings, &connect_view); /* Check that all settings are the expected default values */ ASSERT_TRUE(negotiated_settings.maximum_qos == AWS_MQTT5_QOS_AT_LEAST_ONCE); ASSERT_UINT_EQUALS(negotiated_settings.session_expiry_interval, 0); ASSERT_UINT_EQUALS(negotiated_settings.receive_maximum_from_server, AWS_MQTT5_RECEIVE_MAXIMUM); ASSERT_UINT_EQUALS(negotiated_settings.maximum_packet_size_to_server, AWS_MQTT5_MAXIMUM_PACKET_SIZE); ASSERT_UINT_EQUALS(negotiated_settings.topic_alias_maximum_to_server, 0); ASSERT_UINT_EQUALS(negotiated_settings.topic_alias_maximum_to_client, 0); ASSERT_UINT_EQUALS(negotiated_settings.server_keep_alive, 0); ASSERT_TRUE(negotiated_settings.retain_available); ASSERT_TRUE(negotiated_settings.wildcard_subscriptions_available); ASSERT_TRUE(negotiated_settings.subscription_identifiers_available); ASSERT_TRUE(negotiated_settings.shared_subscriptions_available); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_negotiated_settings_reset_test, mqtt5_negotiated_settings_reset_test_fn) static int mqtt5_negotiated_settings_apply_connack_test_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; /* aws_mqtt5_negotiated_settings used for testing */ struct aws_mqtt5_negotiated_settings negotiated_settings; AWS_ZERO_STRUCT(negotiated_settings); /* An aws_mqtt5_packet_connect_view with no user set settings to reset negotiated_settings */ struct aws_mqtt5_packet_connect_view connect_view = { .keep_alive_interval_seconds = 0, }; /* reset negotiated_settings to default values */ aws_mqtt5_negotiated_settings_reset(&negotiated_settings, &connect_view); /* Simulate an aws_mqtt5_packet_connack_view with no user set settings */ struct aws_mqtt5_packet_connack_view connack_view = { .session_present = false, }; /* Check if everything defaults appropriately if no properties are set in either direction */ aws_mqtt5_negotiated_settings_apply_connack(&negotiated_settings, &connack_view); ASSERT_TRUE(negotiated_settings.maximum_qos == AWS_MQTT5_QOS_AT_LEAST_ONCE); ASSERT_UINT_EQUALS(negotiated_settings.session_expiry_interval, 0); ASSERT_UINT_EQUALS(negotiated_settings.receive_maximum_from_server, AWS_MQTT5_RECEIVE_MAXIMUM); ASSERT_UINT_EQUALS(negotiated_settings.maximum_packet_size_to_server, AWS_MQTT5_MAXIMUM_PACKET_SIZE); ASSERT_UINT_EQUALS(negotiated_settings.topic_alias_maximum_to_server, 0); ASSERT_UINT_EQUALS(negotiated_settings.topic_alias_maximum_to_client, 0); ASSERT_UINT_EQUALS(negotiated_settings.server_keep_alive, 0); ASSERT_TRUE(negotiated_settings.retain_available); ASSERT_TRUE(negotiated_settings.wildcard_subscriptions_available); ASSERT_TRUE(negotiated_settings.subscription_identifiers_available); ASSERT_TRUE(negotiated_settings.shared_subscriptions_available); ASSERT_FALSE(negotiated_settings.rejoined_session); /* Apply server settings to properties in connack_view */ connack_view.session_present = true; connack_view.maximum_qos = &s_maximum_qos_at_least_once; connack_view.session_expiry_interval = &s_session_expiry_interval; connack_view.receive_maximum = &s_receive_maximum; connack_view.retain_available = &s_retain_available; connack_view.maximum_packet_size = &s_maximum_packet_size; connack_view.topic_alias_maximum = &s_topic_alias_maximum_to_server; connack_view.wildcard_subscriptions_available = &s_wildcard_subscriptions_available; connack_view.subscription_identifiers_available = &s_subscription_identifiers_available; connack_view.shared_subscriptions_available = &s_shared_subscriptions_available; connack_view.server_keep_alive = &s_server_keep_alive; aws_mqtt5_negotiated_settings_apply_connack(&negotiated_settings, &connack_view); ASSERT_TRUE(negotiated_settings.rejoined_session); ASSERT_TRUE(negotiated_settings.maximum_qos == s_maximum_qos_at_least_once); ASSERT_UINT_EQUALS(negotiated_settings.session_expiry_interval, *connack_view.session_expiry_interval); ASSERT_UINT_EQUALS(negotiated_settings.receive_maximum_from_server, *connack_view.receive_maximum); ASSERT_UINT_EQUALS(negotiated_settings.maximum_packet_size_to_server, *connack_view.maximum_packet_size); ASSERT_UINT_EQUALS(negotiated_settings.server_keep_alive, *connack_view.server_keep_alive); ASSERT_UINT_EQUALS(negotiated_settings.topic_alias_maximum_to_server, *connack_view.topic_alias_maximum); ASSERT_UINT_EQUALS(negotiated_settings.topic_alias_maximum_to_client, 0); ASSERT_FALSE(negotiated_settings.retain_available); ASSERT_FALSE(negotiated_settings.wildcard_subscriptions_available); ASSERT_FALSE(negotiated_settings.subscription_identifiers_available); ASSERT_FALSE(negotiated_settings.shared_subscriptions_available); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_negotiated_settings_apply_connack_test, mqtt5_negotiated_settings_apply_connack_test_fn) static int mqtt5_negotiated_settings_server_override_test_fn(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; /* aws_mqtt5_negotiated_settings used for client */ struct aws_mqtt5_negotiated_settings negotiated_settings; AWS_ZERO_STRUCT(negotiated_settings); /* An aws_mqtt5_packet_connect_view with no user set settings to reset negotiated_settings */ struct aws_mqtt5_packet_connect_view connect_view = { .keep_alive_interval_seconds = 0, }; /* reset negotiated_settings to default values */ aws_mqtt5_negotiated_settings_reset(&negotiated_settings, &connect_view); /* Simulate negotiated settings that the client may have set to values different from incoming CONNACK settings */ negotiated_settings.session_expiry_interval = 123; negotiated_settings.maximum_qos = s_maximum_qos_at_least_once; negotiated_settings.receive_maximum_from_server = 123; negotiated_settings.maximum_packet_size_to_server = 123; negotiated_settings.topic_alias_maximum_to_server = 123; negotiated_settings.topic_alias_maximum_to_client = 123; negotiated_settings.server_keep_alive = 123; /* CONNACK settings from a server that should overwrite client settings */ struct aws_mqtt5_packet_connack_view connack_view = { .session_present = false, .server_keep_alive = &s_keep_alive_interval_seconds, .maximum_qos = &s_maximum_qos_at_most_once, .session_expiry_interval = &s_session_expiry_interval, .receive_maximum = &s_receive_maximum, .maximum_packet_size = &s_maximum_packet_size, .topic_alias_maximum = &s_topic_alias_maximum, .retain_available = &s_retain_available, .wildcard_subscriptions_available = &s_wildcard_subscriptions_available, .subscription_identifiers_available = &s_subscription_identifiers_available, .shared_subscriptions_available = &s_shared_subscriptions_available, }; /* Apply CONNACK settings to client values in negotiated_settings */ aws_mqtt5_negotiated_settings_apply_connack(&negotiated_settings, &connack_view); /* Assert values that should have been overwritten have been overwritten */ ASSERT_UINT_EQUALS(negotiated_settings.server_keep_alive, s_keep_alive_interval_seconds); ASSERT_TRUE(negotiated_settings.maximum_qos == s_maximum_qos_at_most_once); ASSERT_UINT_EQUALS(negotiated_settings.session_expiry_interval, s_session_expiry_interval); ASSERT_UINT_EQUALS(negotiated_settings.receive_maximum_from_server, s_receive_maximum); ASSERT_UINT_EQUALS(negotiated_settings.maximum_packet_size_to_server, s_maximum_packet_size); ASSERT_UINT_EQUALS(negotiated_settings.topic_alias_maximum_to_server, s_topic_alias_maximum); ASSERT_FALSE(negotiated_settings.retain_available); ASSERT_FALSE(negotiated_settings.wildcard_subscriptions_available); ASSERT_FALSE(negotiated_settings.subscription_identifiers_available); ASSERT_FALSE(negotiated_settings.shared_subscriptions_available); /* reset negotiated_settings to default values */ aws_mqtt5_negotiated_settings_reset(&negotiated_settings, &connect_view); /* Simulate negotiated settings that would change based on default/missing settings from a CONNACK */ negotiated_settings.session_expiry_interval = s_session_expiry_interval_seconds; /* NULL CONNACK values that result in an override in negotiated settings */ connack_view.server_keep_alive = NULL; connack_view.topic_alias_maximum = NULL; connack_view.maximum_qos = NULL; connack_view.session_expiry_interval = NULL; connack_view.receive_maximum = NULL; connack_view.retain_available = NULL; connack_view.maximum_packet_size = NULL; connack_view.wildcard_subscriptions_available = NULL; connack_view.subscription_identifiers_available = NULL; connack_view.shared_subscriptions_available = NULL; /* Apply CONNACK settings to client values in negotiated_settings */ aws_mqtt5_negotiated_settings_apply_connack(&negotiated_settings, &connack_view); /* Assert values that should have been overwritten have been overwritten */ ASSERT_UINT_EQUALS(negotiated_settings.server_keep_alive, 0); ASSERT_UINT_EQUALS(negotiated_settings.topic_alias_maximum_to_server, 0); ASSERT_TRUE(negotiated_settings.maximum_qos == s_maximum_qos_at_least_once); ASSERT_UINT_EQUALS(negotiated_settings.session_expiry_interval, s_session_expiry_interval_seconds); ASSERT_TRUE(negotiated_settings.retain_available); ASSERT_UINT_EQUALS(negotiated_settings.maximum_packet_size_to_server, AWS_MQTT5_MAXIMUM_PACKET_SIZE); ASSERT_TRUE(negotiated_settings.wildcard_subscriptions_available); ASSERT_TRUE(negotiated_settings.subscription_identifiers_available); ASSERT_TRUE(negotiated_settings.shared_subscriptions_available); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_negotiated_settings_server_override_test, mqtt5_negotiated_settings_server_override_test_fn) static const struct aws_byte_cursor s_topic = { .ptr = (uint8_t *)s_unsub_topic_filter1, .len = AWS_ARRAY_SIZE(s_unsub_topic_filter1) - 1, }; static const char s_payload[] = "ThePayload"; static const struct aws_byte_cursor s_payload_cursor = { .ptr = (uint8_t *)s_payload, .len = AWS_ARRAY_SIZE(s_payload) - 1, }; /* test that allocates packet ids from an empty table. */ static int s_mqtt5_operation_bind_packet_id_empty_table_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; (void)allocator; struct aws_mqtt5_packet_publish_view publish_view = { .qos = AWS_MQTT5_QOS_AT_LEAST_ONCE, .topic = s_topic, .payload = s_payload_cursor, }; struct aws_mqtt5_operation_publish *publish_operation = aws_mqtt5_operation_publish_new(allocator, NULL, &publish_view, NULL); struct aws_mqtt5_client_operational_state operational_state; aws_mqtt5_client_operational_state_init(&operational_state, allocator, NULL); operational_state.next_mqtt_packet_id = 1; ASSERT_SUCCESS(aws_mqtt5_operation_bind_packet_id(&publish_operation->base, &operational_state)); ASSERT_UINT_EQUALS(1, aws_mqtt5_operation_get_packet_id(&publish_operation->base)); ASSERT_UINT_EQUALS(2, operational_state.next_mqtt_packet_id); aws_mqtt5_operation_set_packet_id(&publish_operation->base, 0); operational_state.next_mqtt_packet_id = 5; ASSERT_SUCCESS(aws_mqtt5_operation_bind_packet_id(&publish_operation->base, &operational_state)); ASSERT_UINT_EQUALS(5, aws_mqtt5_operation_get_packet_id(&publish_operation->base)); ASSERT_UINT_EQUALS(6, operational_state.next_mqtt_packet_id); aws_mqtt5_operation_set_packet_id(&publish_operation->base, 0); operational_state.next_mqtt_packet_id = 65535; ASSERT_SUCCESS(aws_mqtt5_operation_bind_packet_id(&publish_operation->base, &operational_state)); ASSERT_UINT_EQUALS(65535, aws_mqtt5_operation_get_packet_id(&publish_operation->base)); ASSERT_UINT_EQUALS(1, operational_state.next_mqtt_packet_id); aws_mqtt5_client_operational_state_clean_up(&operational_state); aws_mqtt5_operation_release(&publish_operation->base); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_operation_bind_packet_id_empty_table, s_mqtt5_operation_bind_packet_id_empty_table_fn) static void s_create_operations( struct aws_allocator *allocator, struct aws_mqtt5_operation_publish **publish_op, struct aws_mqtt5_operation_subscribe **subscribe_op, struct aws_mqtt5_operation_unsubscribe **unsubscribe_op) { struct aws_mqtt5_packet_publish_view publish_view = { .qos = AWS_MQTT5_QOS_AT_LEAST_ONCE, .topic = s_topic, .payload = s_payload_cursor, }; *publish_op = aws_mqtt5_operation_publish_new(allocator, NULL, &publish_view, NULL); struct aws_mqtt5_packet_subscribe_view subscribe_view = { .subscriptions = s_subscriptions, .subscription_count = AWS_ARRAY_SIZE(s_subscriptions), }; *subscribe_op = aws_mqtt5_operation_subscribe_new(allocator, NULL, &subscribe_view, NULL); struct aws_mqtt5_packet_unsubscribe_view unsubscribe_view = { .topic_filters = s_topics, .topic_filter_count = AWS_ARRAY_SIZE(s_topics), }; *unsubscribe_op = aws_mqtt5_operation_unsubscribe_new(allocator, NULL, &unsubscribe_view, NULL); } static void s_seed_unacked_operations( struct aws_mqtt5_client_operational_state *operational_state, struct aws_mqtt5_operation_publish *pending_publish, struct aws_mqtt5_operation_subscribe *pending_subscribe, struct aws_mqtt5_operation_unsubscribe *pending_unsubscribe) { aws_hash_table_put( &operational_state->unacked_operations_table, &pending_publish->options_storage.storage_view.packet_id, &pending_publish->base, NULL); aws_linked_list_push_back(&operational_state->unacked_operations, &pending_publish->base.node); aws_hash_table_put( &operational_state->unacked_operations_table, &pending_subscribe->options_storage.storage_view.packet_id, &pending_subscribe->base, NULL); aws_linked_list_push_back(&operational_state->unacked_operations, &pending_subscribe->base.node); aws_hash_table_put( &operational_state->unacked_operations_table, &pending_unsubscribe->options_storage.storage_view.packet_id, &pending_unsubscribe->base, NULL); aws_linked_list_push_back(&operational_state->unacked_operations, &pending_unsubscribe->base.node); } /* test that allocates packet ids from a table with entries that overlap the next id space */ static int s_mqtt5_operation_bind_packet_id_multiple_with_existing_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt5_operation_publish *pending_publish = NULL; struct aws_mqtt5_operation_subscribe *pending_subscribe = NULL; struct aws_mqtt5_operation_unsubscribe *pending_unsubscribe = NULL; s_create_operations(allocator, &pending_publish, &pending_subscribe, &pending_unsubscribe); aws_mqtt5_operation_set_packet_id(&pending_publish->base, 1); aws_mqtt5_operation_set_packet_id(&pending_subscribe->base, 3); aws_mqtt5_operation_set_packet_id(&pending_unsubscribe->base, 5); struct aws_mqtt5_client_operational_state operational_state; aws_mqtt5_client_operational_state_init(&operational_state, allocator, NULL); s_seed_unacked_operations(&operational_state, pending_publish, pending_subscribe, pending_unsubscribe); struct aws_mqtt5_operation_publish *new_publish = NULL; struct aws_mqtt5_operation_subscribe *new_subscribe = NULL; struct aws_mqtt5_operation_unsubscribe *new_unsubscribe = NULL; s_create_operations(allocator, &new_publish, &new_subscribe, &new_unsubscribe); ASSERT_SUCCESS(aws_mqtt5_operation_bind_packet_id(&new_publish->base, &operational_state)); ASSERT_UINT_EQUALS(2, aws_mqtt5_operation_get_packet_id(&new_publish->base)); ASSERT_UINT_EQUALS(3, operational_state.next_mqtt_packet_id); ASSERT_SUCCESS(aws_mqtt5_operation_bind_packet_id(&new_subscribe->base, &operational_state)); ASSERT_UINT_EQUALS(4, aws_mqtt5_operation_get_packet_id(&new_subscribe->base)); ASSERT_UINT_EQUALS(5, operational_state.next_mqtt_packet_id); ASSERT_SUCCESS(aws_mqtt5_operation_bind_packet_id(&new_unsubscribe->base, &operational_state)); ASSERT_UINT_EQUALS(6, aws_mqtt5_operation_get_packet_id(&new_unsubscribe->base)); ASSERT_UINT_EQUALS(7, operational_state.next_mqtt_packet_id); aws_mqtt5_client_operational_state_clean_up(&operational_state); aws_mqtt5_operation_release(&new_publish->base); aws_mqtt5_operation_release(&new_subscribe->base); aws_mqtt5_operation_release(&new_unsubscribe->base); return AWS_OP_SUCCESS; } AWS_TEST_CASE( mqtt5_operation_bind_packet_id_multiple_with_existing, s_mqtt5_operation_bind_packet_id_multiple_with_existing_fn) /* test that allocates packet ids from a table where the next id forces an id wraparound */ static int s_mqtt5_operation_bind_packet_id_multiple_with_wrap_around_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt5_operation_publish *pending_publish = NULL; struct aws_mqtt5_operation_subscribe *pending_subscribe = NULL; struct aws_mqtt5_operation_unsubscribe *pending_unsubscribe = NULL; s_create_operations(allocator, &pending_publish, &pending_subscribe, &pending_unsubscribe); aws_mqtt5_operation_set_packet_id(&pending_publish->base, 65533); aws_mqtt5_operation_set_packet_id(&pending_subscribe->base, 65535); aws_mqtt5_operation_set_packet_id(&pending_unsubscribe->base, 1); struct aws_mqtt5_client_operational_state operational_state; aws_mqtt5_client_operational_state_init(&operational_state, allocator, NULL); operational_state.next_mqtt_packet_id = 65532; s_seed_unacked_operations(&operational_state, pending_publish, pending_subscribe, pending_unsubscribe); struct aws_mqtt5_operation_publish *new_publish = NULL; struct aws_mqtt5_operation_subscribe *new_subscribe = NULL; struct aws_mqtt5_operation_unsubscribe *new_unsubscribe = NULL; s_create_operations(allocator, &new_publish, &new_subscribe, &new_unsubscribe); ASSERT_SUCCESS(aws_mqtt5_operation_bind_packet_id(&new_publish->base, &operational_state)); ASSERT_UINT_EQUALS(65532, aws_mqtt5_operation_get_packet_id(&new_publish->base)); ASSERT_UINT_EQUALS(65533, operational_state.next_mqtt_packet_id); ASSERT_SUCCESS(aws_mqtt5_operation_bind_packet_id(&new_subscribe->base, &operational_state)); ASSERT_UINT_EQUALS(65534, aws_mqtt5_operation_get_packet_id(&new_subscribe->base)); ASSERT_UINT_EQUALS(65535, operational_state.next_mqtt_packet_id); ASSERT_SUCCESS(aws_mqtt5_operation_bind_packet_id(&new_unsubscribe->base, &operational_state)); ASSERT_UINT_EQUALS(2, aws_mqtt5_operation_get_packet_id(&new_unsubscribe->base)); ASSERT_UINT_EQUALS(3, operational_state.next_mqtt_packet_id); aws_mqtt5_client_operational_state_clean_up(&operational_state); aws_mqtt5_operation_release(&new_publish->base); aws_mqtt5_operation_release(&new_subscribe->base); aws_mqtt5_operation_release(&new_unsubscribe->base); return AWS_OP_SUCCESS; } AWS_TEST_CASE( mqtt5_operation_bind_packet_id_multiple_with_wrap_around, s_mqtt5_operation_bind_packet_id_multiple_with_wrap_around_fn) /* test that fails to allocate packet ids from a full table */ static int s_mqtt5_operation_bind_packet_id_full_table_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt5_packet_publish_view publish_view = { .qos = AWS_MQTT5_QOS_AT_LEAST_ONCE, .topic = s_topic, .payload = s_payload_cursor, }; struct aws_mqtt5_client_operational_state operational_state; aws_mqtt5_client_operational_state_init(&operational_state, allocator, NULL); for (uint16_t i = 0; i < UINT16_MAX; ++i) { struct aws_mqtt5_operation_publish *publish_op = aws_mqtt5_operation_publish_new(allocator, NULL, &publish_view, NULL); aws_mqtt5_operation_set_packet_id(&publish_op->base, i + 1); aws_hash_table_put( &operational_state.unacked_operations_table, &publish_op->options_storage.storage_view.packet_id, &publish_op->base, NULL); aws_linked_list_push_back(&operational_state.unacked_operations, &publish_op->base.node); } struct aws_mqtt5_operation_publish *new_publish = aws_mqtt5_operation_publish_new(allocator, NULL, &publish_view, NULL); ASSERT_FAILS(aws_mqtt5_operation_bind_packet_id(&new_publish->base, &operational_state)); ASSERT_UINT_EQUALS(1, operational_state.next_mqtt_packet_id); aws_mqtt5_client_operational_state_clean_up(&operational_state); aws_mqtt5_operation_release(&new_publish->base); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_operation_bind_packet_id_full_table, s_mqtt5_operation_bind_packet_id_full_table_fn) /* test that skips allocation because the packet is not a QOS1+PUBLISH */ static int s_mqtt5_operation_bind_packet_id_not_valid_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt5_packet_publish_view publish_view = { .qos = AWS_MQTT5_QOS_AT_MOST_ONCE, .topic = s_topic, .payload = s_payload_cursor, }; struct aws_mqtt5_operation_publish *new_publish = aws_mqtt5_operation_publish_new(allocator, NULL, &publish_view, NULL); struct aws_mqtt5_client_operational_state operational_state; aws_mqtt5_client_operational_state_init(&operational_state, allocator, NULL); ASSERT_SUCCESS(aws_mqtt5_operation_bind_packet_id(&new_publish->base, &operational_state)); ASSERT_UINT_EQUALS(0, aws_mqtt5_operation_get_packet_id(&new_publish->base)); ASSERT_UINT_EQUALS(1, operational_state.next_mqtt_packet_id); aws_mqtt5_client_operational_state_clean_up(&operational_state); aws_mqtt5_operation_release(&new_publish->base); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_operation_bind_packet_id_not_valid, s_mqtt5_operation_bind_packet_id_not_valid_fn) /* test that skips allocation because the packet already has an id bound */ static int s_mqtt5_operation_bind_packet_id_already_bound_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt5_packet_publish_view publish_view = { .qos = AWS_MQTT5_QOS_AT_LEAST_ONCE, .topic = s_topic, .payload = s_payload_cursor, }; struct aws_mqtt5_operation_publish *new_publish = aws_mqtt5_operation_publish_new(allocator, NULL, &publish_view, NULL); aws_mqtt5_operation_set_packet_id(&new_publish->base, 2); struct aws_mqtt5_client_operational_state operational_state; aws_mqtt5_client_operational_state_init(&operational_state, allocator, NULL); ASSERT_SUCCESS(aws_mqtt5_operation_bind_packet_id(&new_publish->base, &operational_state)); ASSERT_UINT_EQUALS(2, aws_mqtt5_operation_get_packet_id(&new_publish->base)); ASSERT_UINT_EQUALS(1, operational_state.next_mqtt_packet_id); aws_mqtt5_client_operational_state_clean_up(&operational_state); aws_mqtt5_operation_release(&new_publish->base); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_operation_bind_packet_id_already_bound, s_mqtt5_operation_bind_packet_id_already_bound_fn) /* * A large suit of test infrastructure oriented towards exercising and checking the result of servicing an mqtt5 * client's operational state * * We mock io message acquisition/sending, but no other mocks are needed. We use a dummy * hand-initialized client to hold additional state (current_state) needed by the operational processing logic. */ struct aws_mqtt5_operation_processing_test_context { struct aws_allocator *allocator; struct aws_mqtt5_client dummy_client; struct aws_mqtt5_client_options_storage dummy_client_options; struct aws_mqtt5_client_vtable vtable; struct aws_channel_slot dummy_slot; struct aws_mqtt5_encoder verification_encoder; struct aws_array_list output_io_messages; void *failed_io_message_buffer; struct aws_array_list completed_operation_error_codes; }; /* io message mocks */ static struct aws_io_message *s_aws_channel_acquire_message_from_pool_success_fn( struct aws_channel *channel, enum aws_io_message_type message_type, size_t size_hint, void *user_data) { (void)channel; (void)message_type; (void)size_hint; struct aws_mqtt5_operation_processing_test_context *test_context = user_data; struct aws_allocator *allocator = test_context->allocator; struct aws_io_message *new_message = aws_mem_calloc(allocator, 1, sizeof(struct aws_io_message)); new_message->allocator = allocator; aws_byte_buf_init(&new_message->message_data, allocator, size_hint); return new_message; } static struct aws_io_message *s_aws_channel_acquire_message_from_pool_success_small_fn( struct aws_channel *channel, enum aws_io_message_type message_type, size_t size_hint, void *user_data) { (void)channel; (void)message_type; (void)size_hint; struct aws_mqtt5_operation_processing_test_context *test_context = user_data; struct aws_allocator *allocator = test_context->allocator; struct aws_io_message *new_message = aws_mem_calloc(allocator, 1, sizeof(struct aws_io_message)); new_message->allocator = allocator; aws_byte_buf_init(&new_message->message_data, allocator, 35); return new_message; } static struct aws_io_message *s_aws_channel_acquire_message_from_pool_success_send_failure_fn( struct aws_channel *channel, enum aws_io_message_type message_type, size_t size_hint, void *user_data) { (void)channel; (void)message_type; struct aws_mqtt5_operation_processing_test_context *test_context = user_data; struct aws_allocator *allocator = test_context->allocator; struct aws_io_message *new_message = aws_mem_calloc(allocator, 1, sizeof(struct aws_io_message)); new_message->allocator = allocator; aws_byte_buf_init(&new_message->message_data, allocator, size_hint); test_context->failed_io_message_buffer = new_message->message_data.buffer; return new_message; } static struct aws_io_message *s_aws_channel_acquire_message_from_pool_failure_fn( struct aws_channel *channel, enum aws_io_message_type message_type, size_t size_hint, void *user_data) { (void)channel; (void)message_type; (void)size_hint; (void)user_data; aws_raise_error(AWS_ERROR_INVALID_STATE); return NULL; } static int s_aws_channel_slot_send_message_success_fn( struct aws_channel_slot *slot, struct aws_io_message *message, enum aws_channel_direction dir, void *user_data) { (void)slot; (void)dir; struct aws_mqtt5_operation_processing_test_context *test_context = user_data; aws_array_list_push_back(&test_context->output_io_messages, &message); return AWS_OP_SUCCESS; } static int s_aws_channel_slot_send_message_failure_fn( struct aws_channel_slot *slot, struct aws_io_message *message, enum aws_channel_direction dir, void *user_data) { (void)slot; (void)message; (void)dir; (void)user_data; return aws_raise_error(AWS_ERROR_INVALID_STATE); } static void s_aws_mqtt5_operation_processing_test_context_init( struct aws_mqtt5_operation_processing_test_context *test_context, struct aws_allocator *allocator) { AWS_ZERO_STRUCT(*test_context); test_context->allocator = allocator; aws_mqtt5_client_operational_state_init( &test_context->dummy_client.operational_state, allocator, &test_context->dummy_client); struct aws_mqtt5_client_options_storage test_storage = { .ack_timeout_seconds = 0, }; test_context->dummy_client.config = &test_storage; struct aws_mqtt5_encoder_options encoder_options = { .client = &test_context->dummy_client, }; aws_mqtt5_encoder_init(&test_context->dummy_client.encoder, allocator, &encoder_options); aws_mqtt5_inbound_topic_alias_resolver_init(&test_context->dummy_client.inbound_topic_alias_resolver, allocator); test_context->dummy_client.outbound_topic_alias_resolver = aws_mqtt5_outbound_topic_alias_resolver_new(allocator, AWS_MQTT5_COTABT_DISABLED); test_context->vtable = *aws_mqtt5_client_get_default_vtable(); test_context->vtable.aws_channel_acquire_message_from_pool_fn = s_aws_channel_acquire_message_from_pool_success_fn; test_context->vtable.aws_channel_slot_send_message_fn = s_aws_channel_slot_send_message_success_fn; test_context->vtable.vtable_user_data = test_context; test_context->dummy_client.vtable = &test_context->vtable; /* this keeps the operation processing logic from crashing when dereferencing client->slot->channel */ test_context->dummy_client.slot = &test_context->dummy_slot; /* this keeps operation processing tests from failing operations due to a 0 maximum packet size */ test_context->dummy_client.negotiated_settings.maximum_packet_size_to_server = AWS_MQTT5_MAXIMUM_PACKET_SIZE; test_context->dummy_client.negotiated_settings.maximum_qos = AWS_MQTT5_QOS_AT_LEAST_ONCE; /* this keeps operation processing tests from crashing when dereferencing config options */ test_context->dummy_client.config = &test_context->dummy_client_options; aws_array_list_init_dynamic(&test_context->output_io_messages, allocator, 0, sizeof(struct aws_io_message *)); struct aws_mqtt5_encoder_options verification_encoder_options = { .client = NULL, }; aws_mqtt5_encoder_init(&test_context->verification_encoder, allocator, &verification_encoder_options); aws_array_list_init_dynamic(&test_context->completed_operation_error_codes, allocator, 0, sizeof(int)); } static void s_aws_mqtt5_operation_processing_test_context_clean_up( struct aws_mqtt5_operation_processing_test_context *test_context) { for (size_t i = 0; i < aws_array_list_length(&test_context->output_io_messages); ++i) { struct aws_io_message *message = NULL; aws_array_list_get_at(&test_context->output_io_messages, &message, i); aws_byte_buf_clean_up(&message->message_data); aws_mem_release(message->allocator, message); } aws_array_list_clean_up(&test_context->output_io_messages); aws_mqtt5_encoder_clean_up(&test_context->verification_encoder); aws_mqtt5_inbound_topic_alias_resolver_clean_up(&test_context->dummy_client.inbound_topic_alias_resolver); aws_mqtt5_outbound_topic_alias_resolver_destroy(test_context->dummy_client.outbound_topic_alias_resolver); aws_mqtt5_encoder_clean_up(&test_context->dummy_client.encoder); aws_mqtt5_client_operational_state_clean_up(&test_context->dummy_client.operational_state); aws_array_list_clean_up(&test_context->completed_operation_error_codes); } static void s_aws_mqtt5_operation_processing_test_context_enqueue_op( struct aws_mqtt5_operation_processing_test_context *test_context, struct aws_mqtt5_operation *operation) { aws_linked_list_push_back(&test_context->dummy_client.operational_state.queued_operations, &operation->node); } /* Test that just runs the operational processing logic with an empty operation queue */ static int s_mqtt5_operation_processing_nothing_empty_queue_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt5_operation_processing_test_context test_context; s_aws_mqtt5_operation_processing_test_context_init(&test_context, allocator); for (int32_t i = 0; i <= AWS_MCS_TERMINATED; ++i) { enum aws_mqtt5_client_state state = i; test_context.dummy_client.current_state = state; ASSERT_SUCCESS(aws_mqtt5_client_service_operational_state(&test_context.dummy_client.operational_state)); ASSERT_UINT_EQUALS(0, aws_array_list_length(&test_context.output_io_messages)); } s_aws_mqtt5_operation_processing_test_context_clean_up(&test_context); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_operation_processing_nothing_empty_queue, s_mqtt5_operation_processing_nothing_empty_queue_fn) static struct aws_mqtt5_operation_subscribe *s_make_simple_subscribe_operation(struct aws_allocator *allocator) { struct aws_mqtt5_packet_subscribe_view subscribe_view = { .subscriptions = s_subscriptions, .subscription_count = AWS_ARRAY_SIZE(s_subscriptions), }; return aws_mqtt5_operation_subscribe_new(allocator, NULL, &subscribe_view, NULL); } /* * Test that runs the operational processing logic for the MQTT_CONNECT state where the pending operation * is not valid to be sent by this state (a SUBSCRIBE) */ static int s_mqtt5_operation_processing_nothing_mqtt_connect_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt5_operation_processing_test_context test_context; s_aws_mqtt5_operation_processing_test_context_init(&test_context, allocator); test_context.dummy_client.current_state = AWS_MCS_MQTT_CONNECT; struct aws_mqtt5_operation_subscribe *subscribe_op = s_make_simple_subscribe_operation(allocator); s_aws_mqtt5_operation_processing_test_context_enqueue_op(&test_context, &subscribe_op->base); ASSERT_SUCCESS(aws_mqtt5_client_service_operational_state(&test_context.dummy_client.operational_state)); ASSERT_UINT_EQUALS(0, aws_array_list_length(&test_context.output_io_messages)); s_aws_mqtt5_operation_processing_test_context_clean_up(&test_context); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_operation_processing_nothing_mqtt_connect, s_mqtt5_operation_processing_nothing_mqtt_connect_fn) /* * Test that runs the operational processing logic for the CLEAN_DISCONNECT state where the pending operation * is not valid to be sent by this state (a SUBSCRIBE) */ static int s_mqtt5_operation_processing_nothing_clean_disconnect_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt5_operation_processing_test_context test_context; s_aws_mqtt5_operation_processing_test_context_init(&test_context, allocator); test_context.dummy_client.current_state = AWS_MCS_CLEAN_DISCONNECT; struct aws_mqtt5_operation_subscribe *subscribe_op = s_make_simple_subscribe_operation(allocator); s_aws_mqtt5_operation_processing_test_context_enqueue_op(&test_context, &subscribe_op->base); ASSERT_SUCCESS(aws_mqtt5_client_service_operational_state(&test_context.dummy_client.operational_state)); ASSERT_UINT_EQUALS(0, aws_array_list_length(&test_context.output_io_messages)); s_aws_mqtt5_operation_processing_test_context_clean_up(&test_context); return AWS_OP_SUCCESS; } AWS_TEST_CASE( mqtt5_operation_processing_nothing_clean_disconnect, s_mqtt5_operation_processing_nothing_clean_disconnect_fn) static struct aws_mqtt5_operation_connect *s_make_simple_connect_operation(struct aws_allocator *allocator) { struct aws_mqtt5_packet_connect_view connect_view = { .keep_alive_interval_seconds = 0, }; return aws_mqtt5_operation_connect_new(allocator, &connect_view); } /* * Test that runs the operational processing logic for the MQTT_CONNECT state with a valid CONNECT operation, but * the pending_write_completion flag is set and should block any further processing */ static int s_mqtt5_operation_processing_nothing_pending_write_completion_mqtt_connect_fn( struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt5_operation_processing_test_context test_context; s_aws_mqtt5_operation_processing_test_context_init(&test_context, allocator); test_context.dummy_client.current_state = AWS_MCS_MQTT_CONNECT; struct aws_mqtt5_operation_connect *connect_op = s_make_simple_connect_operation(allocator); s_aws_mqtt5_operation_processing_test_context_enqueue_op(&test_context, &connect_op->base); test_context.dummy_client.operational_state.pending_write_completion = true; ASSERT_SUCCESS(aws_mqtt5_client_service_operational_state(&test_context.dummy_client.operational_state)); ASSERT_UINT_EQUALS(0, aws_array_list_length(&test_context.output_io_messages)); s_aws_mqtt5_operation_processing_test_context_clean_up(&test_context); return AWS_OP_SUCCESS; } AWS_TEST_CASE( mqtt5_operation_processing_nothing_pending_write_completion_mqtt_connect, s_mqtt5_operation_processing_nothing_pending_write_completion_mqtt_connect_fn) /* * Test that runs the operational processing logic for the CONNECTED state with a valid SUSBCRIBE operation, but * the pending_write_completion flag is set and should block any further processing */ static int s_mqtt5_operation_processing_nothing_pending_write_completion_connected_fn( struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt5_operation_processing_test_context test_context; s_aws_mqtt5_operation_processing_test_context_init(&test_context, allocator); test_context.dummy_client.current_state = AWS_MCS_CONNECTED; struct aws_mqtt5_operation_subscribe *subscribe_op = s_make_simple_subscribe_operation(allocator); s_aws_mqtt5_operation_processing_test_context_enqueue_op(&test_context, &subscribe_op->base); test_context.dummy_client.operational_state.pending_write_completion = true; ASSERT_SUCCESS(aws_mqtt5_client_service_operational_state(&test_context.dummy_client.operational_state)); ASSERT_UINT_EQUALS(0, aws_array_list_length(&test_context.output_io_messages)); s_aws_mqtt5_operation_processing_test_context_clean_up(&test_context); return AWS_OP_SUCCESS; } AWS_TEST_CASE( mqtt5_operation_processing_nothing_pending_write_completion_connected, s_mqtt5_operation_processing_nothing_pending_write_completion_connected_fn) static struct aws_mqtt5_operation_disconnect *s_make_simple_disconnect_operation(struct aws_allocator *allocator) { struct aws_mqtt5_packet_disconnect_view disconnect_view = { .reason_code = AWS_MQTT5_DRC_ADMINISTRATIVE_ACTION, }; return aws_mqtt5_operation_disconnect_new(allocator, &disconnect_view, NULL, NULL); } /* * Test that runs the operational processing logic for the CLEAN_DISCONNECT state with a valid DISCONNECT operation, * but the pending_write_completion flag is set and should block any further processing */ static int s_mqtt5_operation_processing_nothing_pending_write_completion_clean_disconnect_fn( struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt5_operation_processing_test_context test_context; s_aws_mqtt5_operation_processing_test_context_init(&test_context, allocator); test_context.dummy_client.current_state = AWS_MCS_CLEAN_DISCONNECT; struct aws_mqtt5_operation_disconnect *disconnect_op = s_make_simple_disconnect_operation(allocator); s_aws_mqtt5_operation_processing_test_context_enqueue_op(&test_context, &disconnect_op->base); test_context.dummy_client.operational_state.pending_write_completion = true; ASSERT_SUCCESS(aws_mqtt5_client_service_operational_state(&test_context.dummy_client.operational_state)); ASSERT_UINT_EQUALS(0, aws_array_list_length(&test_context.output_io_messages)); s_aws_mqtt5_operation_processing_test_context_clean_up(&test_context); return AWS_OP_SUCCESS; } AWS_TEST_CASE( mqtt5_operation_processing_nothing_pending_write_completion_clean_disconnect, s_mqtt5_operation_processing_nothing_pending_write_completion_clean_disconnect_fn) /* * Test that runs the operational processing logic for the CONNECTED state with a valid SUBSCRIBE operation, * but the io message acquisition call fails */ static int s_mqtt5_operation_processing_failure_message_allocation_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt5_operation_processing_test_context test_context; s_aws_mqtt5_operation_processing_test_context_init(&test_context, allocator); test_context.vtable.aws_channel_acquire_message_from_pool_fn = s_aws_channel_acquire_message_from_pool_failure_fn; test_context.dummy_client.current_state = AWS_MCS_CONNECTED; struct aws_mqtt5_operation_subscribe *subscribe_op = s_make_simple_subscribe_operation(allocator); s_aws_mqtt5_operation_processing_test_context_enqueue_op(&test_context, &subscribe_op->base); ASSERT_FAILS(aws_mqtt5_client_service_operational_state(&test_context.dummy_client.operational_state)); ASSERT_UINT_EQUALS(0, aws_array_list_length(&test_context.output_io_messages)); s_aws_mqtt5_operation_processing_test_context_clean_up(&test_context); return AWS_OP_SUCCESS; } AWS_TEST_CASE( mqtt5_operation_processing_failure_message_allocation, s_mqtt5_operation_processing_failure_message_allocation_fn) /* * Test that runs the operational processing logic for the CONNECTED state with a valid SUBSCRIBE operation, * but the io message send call fails */ static int s_mqtt5_operation_processing_failure_message_send_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt5_operation_processing_test_context test_context; s_aws_mqtt5_operation_processing_test_context_init(&test_context, allocator); test_context.vtable.aws_channel_slot_send_message_fn = s_aws_channel_slot_send_message_failure_fn; test_context.vtable.aws_channel_acquire_message_from_pool_fn = s_aws_channel_acquire_message_from_pool_success_send_failure_fn; test_context.dummy_client.current_state = AWS_MCS_CONNECTED; struct aws_mqtt5_operation_subscribe *subscribe_op = s_make_simple_subscribe_operation(allocator); s_aws_mqtt5_operation_processing_test_context_enqueue_op(&test_context, &subscribe_op->base); ASSERT_FAILS(aws_mqtt5_client_service_operational_state(&test_context.dummy_client.operational_state)); ASSERT_UINT_EQUALS(0, aws_array_list_length(&test_context.output_io_messages)); aws_mem_release(test_context.allocator, test_context.failed_io_message_buffer); s_aws_mqtt5_operation_processing_test_context_clean_up(&test_context); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_operation_processing_failure_message_send, s_mqtt5_operation_processing_failure_message_send_fn) static int s_verify_operation_list_versus_expected( struct aws_linked_list *operation_list, struct aws_mqtt5_operation **expected_operations, size_t expected_operations_size) { struct aws_linked_list_node *node = aws_linked_list_begin(operation_list); for (size_t i = 0; i < expected_operations_size; ++i) { ASSERT_TRUE(node != aws_linked_list_end(operation_list)); struct aws_mqtt5_operation *operation = expected_operations[i]; struct aws_mqtt5_operation *queued_operation = AWS_CONTAINER_OF(node, struct aws_mqtt5_operation, node); ASSERT_PTR_EQUALS(operation, queued_operation); node = aws_linked_list_next(node); } ASSERT_TRUE(node == aws_linked_list_end(operation_list)); return AWS_OP_SUCCESS; } struct aws_mqtt5_simple_operation_processing_write_test_context { size_t requested_service_count; struct aws_mqtt5_operation **initial_operations; size_t initial_operations_size; struct aws_mqtt5_operation **expected_written; size_t expected_written_size; struct aws_mqtt5_operation **expected_write_completions; size_t expected_write_completions_size; struct aws_mqtt5_operation **expected_pending_acks; size_t expected_pending_acks_size; struct aws_mqtt5_operation **expected_queued; size_t expected_queued_size; }; /* * Basic success test with actual output via 1 or more io messages */ static int s_do_simple_operation_processing_io_message_write_test( struct aws_allocator *allocator, struct aws_mqtt5_operation_processing_test_context *test_context, struct aws_mqtt5_simple_operation_processing_write_test_context *write_context) { /* add operations to the pending queue */ for (size_t i = 0; i < write_context->initial_operations_size; ++i) { s_aws_mqtt5_operation_processing_test_context_enqueue_op(test_context, write_context->initial_operations[i]); } /* service the operational state the requested number of times. reset pending write completion between calls */ for (size_t i = 0; i < write_context->requested_service_count; ++i) { ASSERT_SUCCESS(aws_mqtt5_client_service_operational_state(&test_context->dummy_client.operational_state)); test_context->dummy_client.operational_state.pending_write_completion = false; } /* # of outputted io messages should match the request number of service calls */ ASSERT_UINT_EQUALS( write_context->requested_service_count, aws_array_list_length(&test_context->output_io_messages)); /* encode all of the messages that we expect to have sent as a result of processing into one large buffer */ struct aws_byte_buf verification_buffer; aws_byte_buf_init(&verification_buffer, allocator, 4096); for (size_t i = 0; i < write_context->expected_written_size; ++i) { struct aws_mqtt5_operation *operation = write_context->expected_written[i]; aws_mqtt5_encoder_append_packet_encoding( &test_context->verification_encoder, operation->packet_type, operation->packet_view); aws_mqtt5_encoder_encode_to_buffer(&test_context->verification_encoder, &verification_buffer); } /* concatenate all of the sent io message buffers into a single buffer */ struct aws_byte_buf concatenated_message_buffers; aws_byte_buf_init(&concatenated_message_buffers, allocator, 4096); for (size_t i = 0; i < write_context->requested_service_count; ++i) { struct aws_io_message *message = NULL; aws_array_list_get_at(&test_context->output_io_messages, &message, i); struct aws_byte_cursor message_cursor = aws_byte_cursor_from_buf(&message->message_data); aws_byte_buf_append_dynamic(&concatenated_message_buffers, &message_cursor); } /* * verify that what we sent out (in 1 or more io messages) matches the sequential encoding of the operations * that we expected to go out */ ASSERT_BIN_ARRAYS_EQUALS( verification_buffer.buffer, verification_buffer.len, concatenated_message_buffers.buffer, concatenated_message_buffers.len); aws_byte_buf_clean_up(&verification_buffer); aws_byte_buf_clean_up(&concatenated_message_buffers); /* verify that operations we expected to *NOT* be processed are still in the queue in order */ ASSERT_SUCCESS(s_verify_operation_list_versus_expected( &test_context->dummy_client.operational_state.queued_operations, write_context->expected_queued, write_context->expected_queued_size)); /* verify that the operations we expected to be placed into the write completion list are there, in order */ ASSERT_SUCCESS(s_verify_operation_list_versus_expected( &test_context->dummy_client.operational_state.write_completion_operations, write_context->expected_write_completions, write_context->expected_write_completions_size)); /* verify that the operations we expected to be in the unacked operation list are there, in order */ ASSERT_SUCCESS(s_verify_operation_list_versus_expected( &test_context->dummy_client.operational_state.unacked_operations, write_context->expected_pending_acks, write_context->expected_pending_acks_size)); ASSERT_UINT_EQUALS( write_context->expected_pending_acks_size, aws_hash_table_get_entry_count(&test_context->dummy_client.operational_state.unacked_operations_table)); /* verify that every operation that should be in pending ack has a packet id and is in the pending ack table */ for (size_t i = 0; i < write_context->expected_pending_acks_size; ++i) { struct aws_mqtt5_operation *operation = write_context->expected_pending_acks[i]; uint16_t packet_id = aws_mqtt5_operation_get_packet_id(operation); ASSERT_TRUE(packet_id != 0); struct aws_hash_element *elem = NULL; aws_hash_table_find(&test_context->dummy_client.operational_state.unacked_operations_table, &packet_id, &elem); ASSERT_NOT_NULL(elem); ASSERT_NOT_NULL(elem->value); struct aws_mqtt5_operation *table_operation = elem->value; ASSERT_PTR_EQUALS(operation, table_operation); } return AWS_OP_SUCCESS; } static int s_mqtt5_operation_processing_something_mqtt_connect_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt5_operation_processing_test_context test_context; s_aws_mqtt5_operation_processing_test_context_init(&test_context, allocator); test_context.dummy_client.current_state = AWS_MCS_MQTT_CONNECT; struct aws_mqtt5_operation *connect_op = &s_make_simple_connect_operation(allocator)->base; struct aws_mqtt5_operation *subscribe_op = &s_make_simple_subscribe_operation(allocator)->base; struct aws_mqtt5_operation *initial_operations[] = {connect_op, subscribe_op}; struct aws_mqtt5_operation *expected_written[] = {connect_op}; struct aws_mqtt5_operation *expected_write_completions[] = {connect_op}; struct aws_mqtt5_operation *expected_queued[] = {subscribe_op}; struct aws_mqtt5_simple_operation_processing_write_test_context write_context = { .requested_service_count = 1, .initial_operations = initial_operations, .initial_operations_size = AWS_ARRAY_SIZE(initial_operations), .expected_written = expected_written, .expected_written_size = AWS_ARRAY_SIZE(expected_written), .expected_write_completions = expected_write_completions, .expected_write_completions_size = AWS_ARRAY_SIZE(expected_write_completions), .expected_queued = expected_queued, .expected_queued_size = AWS_ARRAY_SIZE(expected_queued), }; ASSERT_SUCCESS(s_do_simple_operation_processing_io_message_write_test(allocator, &test_context, &write_context)); s_aws_mqtt5_operation_processing_test_context_clean_up(&test_context); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_operation_processing_something_mqtt_connect, s_mqtt5_operation_processing_something_mqtt_connect_fn) static int s_mqtt5_operation_processing_something_clean_disconnect_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt5_operation_processing_test_context test_context; s_aws_mqtt5_operation_processing_test_context_init(&test_context, allocator); test_context.dummy_client.current_state = AWS_MCS_CLEAN_DISCONNECT; struct aws_mqtt5_operation *disconnect_op = &s_make_simple_disconnect_operation(allocator)->base; struct aws_mqtt5_operation *subscribe_op = &s_make_simple_subscribe_operation(allocator)->base; struct aws_mqtt5_operation *initial_operations[] = {disconnect_op, subscribe_op}; struct aws_mqtt5_operation *expected_written[] = {disconnect_op}; struct aws_mqtt5_operation *expected_write_completions[] = {disconnect_op}; struct aws_mqtt5_operation *expected_queued[] = {subscribe_op}; struct aws_mqtt5_simple_operation_processing_write_test_context write_context = { .requested_service_count = 1, .initial_operations = initial_operations, .initial_operations_size = AWS_ARRAY_SIZE(initial_operations), .expected_written = expected_written, .expected_written_size = AWS_ARRAY_SIZE(expected_written), .expected_write_completions = expected_write_completions, .expected_write_completions_size = AWS_ARRAY_SIZE(expected_write_completions), .expected_queued = expected_queued, .expected_queued_size = AWS_ARRAY_SIZE(expected_queued), }; ASSERT_SUCCESS(s_do_simple_operation_processing_io_message_write_test(allocator, &test_context, &write_context)); s_aws_mqtt5_operation_processing_test_context_clean_up(&test_context); return AWS_OP_SUCCESS; } AWS_TEST_CASE( mqtt5_operation_processing_something_clean_disconnect, s_mqtt5_operation_processing_something_clean_disconnect_fn) static int s_mqtt5_operation_processing_something_connected_multi_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt5_operation_processing_test_context test_context; s_aws_mqtt5_operation_processing_test_context_init(&test_context, allocator); test_context.dummy_client.current_state = AWS_MCS_CONNECTED; struct aws_mqtt5_operation *disconnect_op = &s_make_simple_disconnect_operation(allocator)->base; struct aws_mqtt5_operation *subscribe1_op = &s_make_simple_subscribe_operation(allocator)->base; struct aws_mqtt5_operation *subscribe2_op = &s_make_simple_subscribe_operation(allocator)->base; struct aws_mqtt5_operation *initial_operations[] = {subscribe1_op, subscribe2_op, disconnect_op}; struct aws_mqtt5_operation *expected_written[] = {subscribe1_op, subscribe2_op, disconnect_op}; struct aws_mqtt5_operation *expected_write_completions[] = {disconnect_op}; struct aws_mqtt5_operation *expected_pending_acks[] = {subscribe1_op, subscribe2_op}; struct aws_mqtt5_simple_operation_processing_write_test_context write_context = { .requested_service_count = 1, .initial_operations = initial_operations, .initial_operations_size = AWS_ARRAY_SIZE(initial_operations), .expected_written = expected_written, .expected_written_size = AWS_ARRAY_SIZE(expected_written), .expected_write_completions = expected_write_completions, .expected_write_completions_size = AWS_ARRAY_SIZE(expected_write_completions), .expected_pending_acks = expected_pending_acks, .expected_pending_acks_size = AWS_ARRAY_SIZE(expected_pending_acks), }; ASSERT_SUCCESS(s_do_simple_operation_processing_io_message_write_test(allocator, &test_context, &write_context)); s_aws_mqtt5_operation_processing_test_context_clean_up(&test_context); return AWS_OP_SUCCESS; } AWS_TEST_CASE( mqtt5_operation_processing_something_connected_multi, s_mqtt5_operation_processing_something_connected_multi_fn) static int s_mqtt5_operation_processing_something_connected_overflow_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt5_operation_processing_test_context test_context; s_aws_mqtt5_operation_processing_test_context_init(&test_context, allocator); test_context.vtable.aws_channel_acquire_message_from_pool_fn = s_aws_channel_acquire_message_from_pool_success_small_fn; test_context.dummy_client.current_state = AWS_MCS_CONNECTED; struct aws_mqtt5_operation *disconnect_op = &s_make_simple_disconnect_operation(allocator)->base; struct aws_mqtt5_operation *subscribe1_op = &s_make_simple_subscribe_operation(allocator)->base; struct aws_mqtt5_operation *subscribe2_op = &s_make_simple_subscribe_operation(allocator)->base; struct aws_mqtt5_operation *initial_operations[] = {subscribe1_op, subscribe2_op, disconnect_op}; struct aws_mqtt5_operation *expected_written[] = {subscribe1_op, subscribe2_op, disconnect_op}; struct aws_mqtt5_operation *expected_write_completions[] = {disconnect_op}; struct aws_mqtt5_operation *expected_pending_acks[] = {subscribe1_op, subscribe2_op}; struct aws_mqtt5_simple_operation_processing_write_test_context write_context = { .requested_service_count = 3, .initial_operations = initial_operations, .initial_operations_size = AWS_ARRAY_SIZE(initial_operations), .expected_written = expected_written, .expected_written_size = AWS_ARRAY_SIZE(expected_written), .expected_write_completions = expected_write_completions, .expected_write_completions_size = AWS_ARRAY_SIZE(expected_write_completions), .expected_pending_acks = expected_pending_acks, .expected_pending_acks_size = AWS_ARRAY_SIZE(expected_pending_acks), }; ASSERT_SUCCESS(s_do_simple_operation_processing_io_message_write_test(allocator, &test_context, &write_context)); s_aws_mqtt5_operation_processing_test_context_clean_up(&test_context); return AWS_OP_SUCCESS; } AWS_TEST_CASE( mqtt5_operation_processing_something_connected_overflow, s_mqtt5_operation_processing_something_connected_overflow_fn) void s_on_subscribe_operation_complete( const struct aws_mqtt5_packet_suback_view *suback, int error_code, void *complete_ctx) { (void)suback; struct aws_mqtt5_operation_processing_test_context *test_context = complete_ctx; aws_array_list_push_back(&test_context->completed_operation_error_codes, &error_code); } static struct aws_mqtt5_operation_subscribe *s_make_completable_subscribe_operation( struct aws_allocator *allocator, struct aws_mqtt5_operation_processing_test_context *test_context) { struct aws_mqtt5_packet_subscribe_view subscribe_view = { .subscriptions = s_subscriptions, .subscription_count = AWS_ARRAY_SIZE(s_subscriptions), }; struct aws_mqtt5_subscribe_completion_options completion_options = { .completion_callback = s_on_subscribe_operation_complete, .completion_user_data = test_context, }; return aws_mqtt5_operation_subscribe_new(allocator, NULL, &subscribe_view, &completion_options); } void s_on_publish_operation_complete( enum aws_mqtt5_packet_type packet_type, const void *packet, int error_code, void *complete_ctx) { (void)packet_type; (void)packet; struct aws_mqtt5_operation_processing_test_context *test_context = complete_ctx; aws_array_list_push_back(&test_context->completed_operation_error_codes, &error_code); } static struct aws_mqtt5_operation_publish *s_make_completable_publish_operation( struct aws_allocator *allocator, enum aws_mqtt5_qos qos, struct aws_mqtt5_operation_processing_test_context *test_context) { struct aws_byte_cursor payload_cursor = aws_byte_cursor_from_c_str(PUBLISH_PAYLOAD); struct aws_mqtt5_packet_publish_view publish_options = { .payload = payload_cursor, .qos = qos, .topic = aws_byte_cursor_from_c_str(PUBLISH_TOPIC), }; struct aws_mqtt5_publish_completion_options completion_options = { .completion_callback = s_on_publish_operation_complete, .completion_user_data = test_context, }; return aws_mqtt5_operation_publish_new(allocator, NULL, &publish_options, &completion_options); } static int s_setup_unacked_operation( struct aws_mqtt5_client_operational_state *operational_state, struct aws_mqtt5_operation *operation) { ASSERT_SUCCESS(aws_mqtt5_operation_bind_packet_id(operation, operational_state)); aws_linked_list_push_back(&operational_state->unacked_operations, &operation->node); aws_mqtt5_packet_id_t *packet_id_ptr = aws_mqtt5_operation_get_packet_id_address(operation); aws_hash_table_put(&operational_state->unacked_operations_table, packet_id_ptr, operation, NULL); return AWS_OP_SUCCESS; } static int s_mqtt5_operation_processing_disconnect_fail_all_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt5_operation_processing_test_context test_context; s_aws_mqtt5_operation_processing_test_context_init(&test_context, allocator); test_context.dummy_client.current_state = AWS_MCS_CONNECTED; struct aws_mqtt5_client_options_storage *config = (struct aws_mqtt5_client_options_storage *)test_context.dummy_client.config; config->offline_queue_behavior = AWS_MQTT5_COQBT_FAIL_ALL_ON_DISCONNECT; struct aws_mqtt5_operation *subscribe1_op = &s_make_completable_subscribe_operation(allocator, &test_context)->base; struct aws_mqtt5_operation *subscribe2_op = &s_make_completable_subscribe_operation(allocator, &test_context)->base; struct aws_mqtt5_operation *subscribe3_op = &s_make_completable_subscribe_operation(allocator, &test_context)->base; struct aws_mqtt5_operation *publish1_op = &s_make_completable_publish_operation(allocator, AWS_MQTT5_QOS_AT_LEAST_ONCE, &test_context)->base; struct aws_mqtt5_operation *publish2_op = &s_make_completable_publish_operation(allocator, AWS_MQTT5_QOS_AT_MOST_ONCE, &test_context)->base; aws_linked_list_push_back(&test_context.dummy_client.operational_state.queued_operations, &subscribe3_op->node); aws_linked_list_push_back( &test_context.dummy_client.operational_state.write_completion_operations, &publish2_op->node); ASSERT_SUCCESS(s_setup_unacked_operation(&test_context.dummy_client.operational_state, subscribe1_op)); ASSERT_SUCCESS(s_setup_unacked_operation(&test_context.dummy_client.operational_state, publish1_op)); ASSERT_SUCCESS(s_setup_unacked_operation(&test_context.dummy_client.operational_state, subscribe2_op)); aws_mqtt5_client_on_disconnection_update_operational_state(&test_context.dummy_client); /* Should have been failed: publish2_op, subscribe1_op, subscribe2_op, subscribe3_op */ /* Should still be in unacked list: publish1_op */ ASSERT_UINT_EQUALS(4, aws_array_list_length(&test_context.completed_operation_error_codes)); for (size_t i = 0; i < aws_array_list_length(&test_context.completed_operation_error_codes); ++i) { int error_code = 0; aws_array_list_get_at(&test_context.completed_operation_error_codes, &error_code, i); ASSERT_INT_EQUALS(AWS_ERROR_MQTT5_OPERATION_FAILED_DUE_TO_OFFLINE_QUEUE_POLICY, error_code); } ASSERT_TRUE(aws_linked_list_empty(&test_context.dummy_client.operational_state.queued_operations)); ASSERT_TRUE(aws_linked_list_empty(&test_context.dummy_client.operational_state.write_completion_operations)); struct aws_mqtt5_operation *expected_post_disconnect_pending_acks[] = {publish1_op}; /* verify that the operations we expected to be in the unacked operation list are there, in order */ ASSERT_SUCCESS(s_verify_operation_list_versus_expected( &test_context.dummy_client.operational_state.unacked_operations, expected_post_disconnect_pending_acks, AWS_ARRAY_SIZE(expected_post_disconnect_pending_acks))); ASSERT_UINT_EQUALS( AWS_ARRAY_SIZE(expected_post_disconnect_pending_acks), aws_mqtt5_linked_list_length(&test_context.dummy_client.operational_state.unacked_operations)); s_aws_mqtt5_operation_processing_test_context_clean_up(&test_context); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_operation_processing_disconnect_fail_all, s_mqtt5_operation_processing_disconnect_fail_all_fn) static int s_mqtt5_operation_processing_disconnect_fail_qos0_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt5_operation_processing_test_context test_context; s_aws_mqtt5_operation_processing_test_context_init(&test_context, allocator); test_context.dummy_client.current_state = AWS_MCS_CONNECTED; struct aws_mqtt5_client_options_storage *config = (struct aws_mqtt5_client_options_storage *)test_context.dummy_client.config; config->offline_queue_behavior = AWS_MQTT5_COQBT_FAIL_QOS0_PUBLISH_ON_DISCONNECT; struct aws_mqtt5_operation *subscribe1_op = &s_make_completable_subscribe_operation(allocator, &test_context)->base; struct aws_mqtt5_operation *subscribe2_op = &s_make_completable_subscribe_operation(allocator, &test_context)->base; struct aws_mqtt5_operation *subscribe3_op = &s_make_completable_subscribe_operation(allocator, &test_context)->base; struct aws_mqtt5_operation *publish1_op = &s_make_completable_publish_operation(allocator, AWS_MQTT5_QOS_AT_LEAST_ONCE, &test_context)->base; struct aws_mqtt5_operation *publish2_op = &s_make_completable_publish_operation(allocator, AWS_MQTT5_QOS_AT_MOST_ONCE, &test_context)->base; struct aws_mqtt5_operation *publish3_op = &s_make_completable_publish_operation(allocator, AWS_MQTT5_QOS_AT_MOST_ONCE, &test_context)->base; struct aws_mqtt5_operation *publish4_op = &s_make_completable_publish_operation(allocator, AWS_MQTT5_QOS_AT_LEAST_ONCE, &test_context)->base; aws_linked_list_push_back(&test_context.dummy_client.operational_state.queued_operations, &subscribe3_op->node); aws_linked_list_push_back(&test_context.dummy_client.operational_state.queued_operations, &publish3_op->node); aws_linked_list_push_back(&test_context.dummy_client.operational_state.queued_operations, &publish4_op->node); aws_linked_list_push_back( &test_context.dummy_client.operational_state.write_completion_operations, &publish2_op->node); ASSERT_SUCCESS(s_setup_unacked_operation(&test_context.dummy_client.operational_state, subscribe1_op)); ASSERT_SUCCESS(s_setup_unacked_operation(&test_context.dummy_client.operational_state, publish1_op)); ASSERT_SUCCESS(s_setup_unacked_operation(&test_context.dummy_client.operational_state, subscribe2_op)); aws_mqtt5_client_on_disconnection_update_operational_state(&test_context.dummy_client); /* Should have been failed: publish2_op, publish3_op */ ASSERT_UINT_EQUALS(2, aws_array_list_length(&test_context.completed_operation_error_codes)); for (size_t i = 0; i < aws_array_list_length(&test_context.completed_operation_error_codes); ++i) { int error_code = 0; aws_array_list_get_at(&test_context.completed_operation_error_codes, &error_code, i); ASSERT_INT_EQUALS(AWS_ERROR_MQTT5_OPERATION_FAILED_DUE_TO_OFFLINE_QUEUE_POLICY, error_code); } /* Should still be in pending queue: subscribe3_op, publish4_op */ struct aws_mqtt5_operation *expected_post_disconnect_queued_operations[] = {subscribe3_op, publish4_op}; ASSERT_SUCCESS(s_verify_operation_list_versus_expected( &test_context.dummy_client.operational_state.queued_operations, expected_post_disconnect_queued_operations, AWS_ARRAY_SIZE(expected_post_disconnect_queued_operations))); ASSERT_TRUE(aws_linked_list_empty(&test_context.dummy_client.operational_state.write_completion_operations)); struct aws_mqtt5_operation *expected_post_disconnect_pending_acks[] = {subscribe1_op, publish1_op, subscribe2_op}; /* verify that the operations we expected to be in the unacked operation list are there, in order */ /* Should still be in unacked list: subscribe1_op, publish1_op, subscribe2_op */ ASSERT_SUCCESS(s_verify_operation_list_versus_expected( &test_context.dummy_client.operational_state.unacked_operations, expected_post_disconnect_pending_acks, AWS_ARRAY_SIZE(expected_post_disconnect_pending_acks))); /* verify pending-requeue subscribes have had their packet ids erased */ ASSERT_INT_EQUALS(0, aws_mqtt5_operation_get_packet_id(subscribe1_op)); ASSERT_INT_EQUALS(0, aws_mqtt5_operation_get_packet_id(subscribe2_op)); /* interrupted qos1 publish should be marked as duplicate and still have a packet id */ const struct aws_mqtt5_packet_publish_view *publish_view = publish1_op->packet_view; ASSERT_TRUE(publish_view->duplicate); ASSERT_TRUE(publish_view->packet_id != 0); s_aws_mqtt5_operation_processing_test_context_clean_up(&test_context); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_operation_processing_disconnect_fail_qos0, s_mqtt5_operation_processing_disconnect_fail_qos0_fn) static int s_mqtt5_operation_processing_disconnect_fail_non_qos1_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt5_operation_processing_test_context test_context; s_aws_mqtt5_operation_processing_test_context_init(&test_context, allocator); test_context.dummy_client.current_state = AWS_MCS_CONNECTED; struct aws_mqtt5_client_options_storage *config = (struct aws_mqtt5_client_options_storage *)test_context.dummy_client.config; config->offline_queue_behavior = AWS_MQTT5_COQBT_FAIL_NON_QOS1_PUBLISH_ON_DISCONNECT; struct aws_mqtt5_operation *subscribe1_op = &s_make_completable_subscribe_operation(allocator, &test_context)->base; struct aws_mqtt5_operation *subscribe2_op = &s_make_completable_subscribe_operation(allocator, &test_context)->base; struct aws_mqtt5_operation *subscribe3_op = &s_make_completable_subscribe_operation(allocator, &test_context)->base; struct aws_mqtt5_operation *publish1_op = &s_make_completable_publish_operation(allocator, AWS_MQTT5_QOS_AT_LEAST_ONCE, &test_context)->base; struct aws_mqtt5_operation *publish2_op = &s_make_completable_publish_operation(allocator, AWS_MQTT5_QOS_AT_MOST_ONCE, &test_context)->base; struct aws_mqtt5_operation *publish3_op = &s_make_completable_publish_operation(allocator, AWS_MQTT5_QOS_AT_MOST_ONCE, &test_context)->base; struct aws_mqtt5_operation *publish4_op = &s_make_completable_publish_operation(allocator, AWS_MQTT5_QOS_AT_LEAST_ONCE, &test_context)->base; aws_linked_list_push_back(&test_context.dummy_client.operational_state.queued_operations, &subscribe3_op->node); aws_linked_list_push_back(&test_context.dummy_client.operational_state.queued_operations, &publish3_op->node); aws_linked_list_push_back(&test_context.dummy_client.operational_state.queued_operations, &publish4_op->node); aws_linked_list_push_back( &test_context.dummy_client.operational_state.write_completion_operations, &publish2_op->node); ASSERT_SUCCESS(s_setup_unacked_operation(&test_context.dummy_client.operational_state, subscribe1_op)); ASSERT_SUCCESS(s_setup_unacked_operation(&test_context.dummy_client.operational_state, publish1_op)); ASSERT_SUCCESS(s_setup_unacked_operation(&test_context.dummy_client.operational_state, subscribe2_op)); aws_mqtt5_client_on_disconnection_update_operational_state(&test_context.dummy_client); /* Should have been failed: publish2_op, publish3_op, subscribe1_op, subscribe2_op, subscribe3_op */ ASSERT_UINT_EQUALS(5, aws_array_list_length(&test_context.completed_operation_error_codes)); for (size_t i = 0; i < aws_array_list_length(&test_context.completed_operation_error_codes); ++i) { int error_code = 0; aws_array_list_get_at(&test_context.completed_operation_error_codes, &error_code, i); ASSERT_INT_EQUALS(AWS_ERROR_MQTT5_OPERATION_FAILED_DUE_TO_OFFLINE_QUEUE_POLICY, error_code); } /* Should still be in pending queue: publish4_op */ struct aws_mqtt5_operation *expected_post_disconnect_queued_operations[] = {publish4_op}; ASSERT_SUCCESS(s_verify_operation_list_versus_expected( &test_context.dummy_client.operational_state.queued_operations, expected_post_disconnect_queued_operations, AWS_ARRAY_SIZE(expected_post_disconnect_queued_operations))); ASSERT_TRUE(aws_linked_list_empty(&test_context.dummy_client.operational_state.write_completion_operations)); struct aws_mqtt5_operation *expected_post_disconnect_pending_acks[] = {publish1_op}; /* verify that the operations we expected to be in the unacked operation list are there, in order */ /* Should still be in unacked list: publish1_op */ ASSERT_SUCCESS(s_verify_operation_list_versus_expected( &test_context.dummy_client.operational_state.unacked_operations, expected_post_disconnect_pending_acks, AWS_ARRAY_SIZE(expected_post_disconnect_pending_acks))); /* interrupted qos1 publish should be marked as duplicate and still have a packet id */ const struct aws_mqtt5_packet_publish_view *publish_view = publish1_op->packet_view; ASSERT_TRUE(publish_view->duplicate); ASSERT_TRUE(publish_view->packet_id != 0); s_aws_mqtt5_operation_processing_test_context_clean_up(&test_context); return AWS_OP_SUCCESS; } AWS_TEST_CASE( mqtt5_operation_processing_disconnect_fail_non_qos1, s_mqtt5_operation_processing_disconnect_fail_non_qos1_fn) static int s_mqtt5_operation_processing_reconnect_rejoin_session_fail_all_fn( struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt5_operation_processing_test_context test_context; s_aws_mqtt5_operation_processing_test_context_init(&test_context, allocator); test_context.dummy_client.current_state = AWS_MCS_CONNECTED; struct aws_mqtt5_operation *publish1_op = &s_make_completable_publish_operation(allocator, AWS_MQTT5_QOS_AT_LEAST_ONCE, &test_context)->base; struct aws_mqtt5_operation *publish2_op = &s_make_completable_publish_operation(allocator, AWS_MQTT5_QOS_AT_LEAST_ONCE, &test_context)->base; ASSERT_SUCCESS(s_setup_unacked_operation(&test_context.dummy_client.operational_state, publish1_op)); ASSERT_SUCCESS(s_setup_unacked_operation(&test_context.dummy_client.operational_state, publish2_op)); /* we now clear this on disconnect, which we aren't simulating here, so do it manually */ aws_hash_table_clear(&test_context.dummy_client.operational_state.unacked_operations_table); test_context.dummy_client.negotiated_settings.rejoined_session = true; aws_mqtt5_client_on_connection_update_operational_state(&test_context.dummy_client); /* Nothing should have failed */ ASSERT_UINT_EQUALS(0, aws_array_list_length(&test_context.completed_operation_error_codes)); ASSERT_TRUE(aws_linked_list_empty(&test_context.dummy_client.operational_state.unacked_operations)); ASSERT_TRUE(aws_linked_list_empty(&test_context.dummy_client.operational_state.write_completion_operations)); struct aws_mqtt5_operation *expected_queued_operations[] = {publish1_op, publish2_op}; /* verify that the operations we expected to be in the unacked operation list are there, in order */ ASSERT_SUCCESS(s_verify_operation_list_versus_expected( &test_context.dummy_client.operational_state.queued_operations, expected_queued_operations, AWS_ARRAY_SIZE(expected_queued_operations))); s_aws_mqtt5_operation_processing_test_context_clean_up(&test_context); return AWS_OP_SUCCESS; } AWS_TEST_CASE( mqtt5_operation_processing_reconnect_rejoin_session_fail_all, s_mqtt5_operation_processing_reconnect_rejoin_session_fail_all_fn) static int s_mqtt5_operation_processing_reconnect_rejoin_session_fail_qos0_fn( struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt5_operation_processing_test_context test_context; s_aws_mqtt5_operation_processing_test_context_init(&test_context, allocator); test_context.dummy_client.current_state = AWS_MCS_CONNECTED; struct aws_mqtt5_client_options_storage *config = (struct aws_mqtt5_client_options_storage *)test_context.dummy_client.config; config->offline_queue_behavior = AWS_MQTT5_COQBT_FAIL_QOS0_PUBLISH_ON_DISCONNECT; struct aws_mqtt5_operation *publish1_op = &s_make_completable_publish_operation(allocator, AWS_MQTT5_QOS_AT_LEAST_ONCE, &test_context)->base; struct aws_mqtt5_operation *publish2_op = &s_make_completable_publish_operation(allocator, AWS_MQTT5_QOS_AT_LEAST_ONCE, &test_context)->base; struct aws_mqtt5_operation *subscribe1_op = &s_make_completable_subscribe_operation(allocator, &test_context)->base; struct aws_mqtt5_operation *subscribe2_op = &s_make_completable_subscribe_operation(allocator, &test_context)->base; ASSERT_SUCCESS(s_setup_unacked_operation(&test_context.dummy_client.operational_state, subscribe1_op)); ASSERT_SUCCESS(s_setup_unacked_operation(&test_context.dummy_client.operational_state, publish1_op)); ASSERT_SUCCESS(s_setup_unacked_operation(&test_context.dummy_client.operational_state, publish2_op)); ASSERT_SUCCESS(s_setup_unacked_operation(&test_context.dummy_client.operational_state, subscribe2_op)); /* we now clear this on disconnect, which we aren't simulating here, so do it manually */ aws_hash_table_clear(&test_context.dummy_client.operational_state.unacked_operations_table); test_context.dummy_client.negotiated_settings.rejoined_session = true; aws_mqtt5_client_on_connection_update_operational_state(&test_context.dummy_client); /* Nothing should have failed */ ASSERT_UINT_EQUALS(0, aws_array_list_length(&test_context.completed_operation_error_codes)); ASSERT_TRUE(aws_linked_list_empty(&test_context.dummy_client.operational_state.unacked_operations)); ASSERT_TRUE(aws_linked_list_empty(&test_context.dummy_client.operational_state.write_completion_operations)); /* The only place where order gets modified: the resubmitted publishes should be strictly ahead of everything else */ struct aws_mqtt5_operation *expected_queued_operations[] = {publish1_op, publish2_op, subscribe1_op, subscribe2_op}; /* verify that the operations we expected to be in the unacked operation list are there, in order */ ASSERT_SUCCESS(s_verify_operation_list_versus_expected( &test_context.dummy_client.operational_state.queued_operations, expected_queued_operations, AWS_ARRAY_SIZE(expected_queued_operations))); s_aws_mqtt5_operation_processing_test_context_clean_up(&test_context); return AWS_OP_SUCCESS; } AWS_TEST_CASE( mqtt5_operation_processing_reconnect_rejoin_session_fail_qos0, s_mqtt5_operation_processing_reconnect_rejoin_session_fail_qos0_fn) static int s_mqtt5_operation_processing_reconnect_no_session_fail_all_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt5_operation_processing_test_context test_context; s_aws_mqtt5_operation_processing_test_context_init(&test_context, allocator); test_context.dummy_client.current_state = AWS_MCS_CONNECTED; struct aws_mqtt5_client_options_storage *config = (struct aws_mqtt5_client_options_storage *)test_context.dummy_client.config; config->offline_queue_behavior = AWS_MQTT5_COQBT_FAIL_ALL_ON_DISCONNECT; struct aws_mqtt5_operation *publish1_op = &s_make_completable_publish_operation(allocator, AWS_MQTT5_QOS_AT_LEAST_ONCE, &test_context)->base; struct aws_mqtt5_operation *publish2_op = &s_make_completable_publish_operation(allocator, AWS_MQTT5_QOS_AT_LEAST_ONCE, &test_context)->base; struct aws_mqtt5_operation *subscribe1_op = &s_make_completable_subscribe_operation(allocator, &test_context)->base; struct aws_mqtt5_operation *subscribe2_op = &s_make_completable_subscribe_operation(allocator, &test_context)->base; ASSERT_SUCCESS(s_setup_unacked_operation(&test_context.dummy_client.operational_state, subscribe1_op)); ASSERT_SUCCESS(s_setup_unacked_operation(&test_context.dummy_client.operational_state, publish1_op)); ASSERT_SUCCESS(s_setup_unacked_operation(&test_context.dummy_client.operational_state, publish2_op)); ASSERT_SUCCESS(s_setup_unacked_operation(&test_context.dummy_client.operational_state, subscribe2_op)); /* we now clear this on disconnect, which we aren't simulating here, so do it manually */ aws_hash_table_clear(&test_context.dummy_client.operational_state.unacked_operations_table); test_context.dummy_client.negotiated_settings.rejoined_session = false; aws_mqtt5_client_on_connection_update_operational_state(&test_context.dummy_client); /* Everything should have failed */ ASSERT_UINT_EQUALS(4, aws_array_list_length(&test_context.completed_operation_error_codes)); for (size_t i = 0; i < aws_array_list_length(&test_context.completed_operation_error_codes); ++i) { int error_code = 0; aws_array_list_get_at(&test_context.completed_operation_error_codes, &error_code, i); ASSERT_INT_EQUALS(AWS_ERROR_MQTT5_OPERATION_FAILED_DUE_TO_OFFLINE_QUEUE_POLICY, error_code); } ASSERT_TRUE(aws_linked_list_empty(&test_context.dummy_client.operational_state.unacked_operations)); ASSERT_TRUE(aws_linked_list_empty(&test_context.dummy_client.operational_state.write_completion_operations)); ASSERT_TRUE(aws_linked_list_empty(&test_context.dummy_client.operational_state.queued_operations)); s_aws_mqtt5_operation_processing_test_context_clean_up(&test_context); return AWS_OP_SUCCESS; } AWS_TEST_CASE( mqtt5_operation_processing_reconnect_no_session_fail_all, s_mqtt5_operation_processing_reconnect_no_session_fail_all_fn) static int s_mqtt5_operation_processing_reconnect_no_session_fail_qos0_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt5_operation_processing_test_context test_context; s_aws_mqtt5_operation_processing_test_context_init(&test_context, allocator); test_context.dummy_client.current_state = AWS_MCS_CONNECTED; struct aws_mqtt5_client_options_storage *config = (struct aws_mqtt5_client_options_storage *)test_context.dummy_client.config; config->offline_queue_behavior = AWS_MQTT5_COQBT_FAIL_QOS0_PUBLISH_ON_DISCONNECT; struct aws_mqtt5_operation *publish1_op = &s_make_completable_publish_operation(allocator, AWS_MQTT5_QOS_AT_LEAST_ONCE, &test_context)->base; struct aws_mqtt5_operation *publish2_op = &s_make_completable_publish_operation(allocator, AWS_MQTT5_QOS_AT_LEAST_ONCE, &test_context)->base; struct aws_mqtt5_operation *subscribe1_op = &s_make_completable_subscribe_operation(allocator, &test_context)->base; struct aws_mqtt5_operation *subscribe2_op = &s_make_completable_subscribe_operation(allocator, &test_context)->base; ASSERT_SUCCESS(s_setup_unacked_operation(&test_context.dummy_client.operational_state, subscribe1_op)); ASSERT_SUCCESS(s_setup_unacked_operation(&test_context.dummy_client.operational_state, publish1_op)); ASSERT_SUCCESS(s_setup_unacked_operation(&test_context.dummy_client.operational_state, publish2_op)); ASSERT_SUCCESS(s_setup_unacked_operation(&test_context.dummy_client.operational_state, subscribe2_op)); /* we now clear this on disconnect, which we aren't simulating here, so do it manually */ aws_hash_table_clear(&test_context.dummy_client.operational_state.unacked_operations_table); test_context.dummy_client.negotiated_settings.rejoined_session = false; aws_mqtt5_client_on_connection_update_operational_state(&test_context.dummy_client); /* Nothing should have failed */ ASSERT_UINT_EQUALS(0, aws_array_list_length(&test_context.completed_operation_error_codes)); ASSERT_TRUE(aws_linked_list_empty(&test_context.dummy_client.operational_state.unacked_operations)); ASSERT_TRUE(aws_linked_list_empty(&test_context.dummy_client.operational_state.write_completion_operations)); /* Unlike the session case, operation order should be unchanged */ struct aws_mqtt5_operation *expected_queued_operations[] = {subscribe1_op, publish1_op, publish2_op, subscribe2_op}; /* verify that the operations we expected to be in the unacked operation list are there, in order */ ASSERT_SUCCESS(s_verify_operation_list_versus_expected( &test_context.dummy_client.operational_state.queued_operations, expected_queued_operations, AWS_ARRAY_SIZE(expected_queued_operations))); s_aws_mqtt5_operation_processing_test_context_clean_up(&test_context); return AWS_OP_SUCCESS; } AWS_TEST_CASE( mqtt5_operation_processing_reconnect_no_session_fail_qos0, s_mqtt5_operation_processing_reconnect_no_session_fail_qos0_fn) static int s_mqtt5_operation_processing_reconnect_no_session_fail_non_qos1_fn( struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt5_operation_processing_test_context test_context; s_aws_mqtt5_operation_processing_test_context_init(&test_context, allocator); test_context.dummy_client.current_state = AWS_MCS_CONNECTED; struct aws_mqtt5_client_options_storage *config = (struct aws_mqtt5_client_options_storage *)test_context.dummy_client.config; config->offline_queue_behavior = AWS_MQTT5_COQBT_FAIL_NON_QOS1_PUBLISH_ON_DISCONNECT; struct aws_mqtt5_operation *publish1_op = &s_make_completable_publish_operation(allocator, AWS_MQTT5_QOS_AT_LEAST_ONCE, &test_context)->base; struct aws_mqtt5_operation *publish2_op = &s_make_completable_publish_operation(allocator, AWS_MQTT5_QOS_AT_LEAST_ONCE, &test_context)->base; struct aws_mqtt5_operation *subscribe1_op = &s_make_completable_subscribe_operation(allocator, &test_context)->base; struct aws_mqtt5_operation *subscribe2_op = &s_make_completable_subscribe_operation(allocator, &test_context)->base; ASSERT_SUCCESS(s_setup_unacked_operation(&test_context.dummy_client.operational_state, subscribe1_op)); ASSERT_SUCCESS(s_setup_unacked_operation(&test_context.dummy_client.operational_state, publish1_op)); ASSERT_SUCCESS(s_setup_unacked_operation(&test_context.dummy_client.operational_state, publish2_op)); ASSERT_SUCCESS(s_setup_unacked_operation(&test_context.dummy_client.operational_state, subscribe2_op)); /* we now clear this on disconnect, which we aren't simulating here, so do it manually */ aws_hash_table_clear(&test_context.dummy_client.operational_state.unacked_operations_table); test_context.dummy_client.negotiated_settings.rejoined_session = false; aws_mqtt5_client_on_connection_update_operational_state(&test_context.dummy_client); /* The subscribes should have failed */ ASSERT_UINT_EQUALS(2, aws_array_list_length(&test_context.completed_operation_error_codes)); for (size_t i = 0; i < aws_array_list_length(&test_context.completed_operation_error_codes); ++i) { int error_code = 0; aws_array_list_get_at(&test_context.completed_operation_error_codes, &error_code, i); ASSERT_INT_EQUALS(AWS_ERROR_MQTT5_OPERATION_FAILED_DUE_TO_OFFLINE_QUEUE_POLICY, error_code); } ASSERT_TRUE(aws_linked_list_empty(&test_context.dummy_client.operational_state.unacked_operations)); ASSERT_TRUE(aws_linked_list_empty(&test_context.dummy_client.operational_state.write_completion_operations)); /* Only the qos1 publishes should remain */ struct aws_mqtt5_operation *expected_queued_operations[] = {publish1_op, publish2_op}; /* verify that the operations we expected to be in the unacked operation list are there, in order */ ASSERT_SUCCESS(s_verify_operation_list_versus_expected( &test_context.dummy_client.operational_state.queued_operations, expected_queued_operations, AWS_ARRAY_SIZE(expected_queued_operations))); s_aws_mqtt5_operation_processing_test_context_clean_up(&test_context); return AWS_OP_SUCCESS; } AWS_TEST_CASE( mqtt5_operation_processing_reconnect_no_session_fail_non_qos1, s_mqtt5_operation_processing_reconnect_no_session_fail_non_qos1_fn) AWS_STATIC_STRING_FROM_LITERAL(s_host_name, "derp.com"); static void s_dummy_lifecycle_handler(const struct aws_mqtt5_client_lifecycle_event *event) { (void)event; } static void s_dummy_publish_received_(const struct aws_mqtt5_packet_publish_view *publish, void *user_data) { (void)publish; (void)user_data; } static int s_mqtt5_client_options_defaults_set_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct aws_event_loop_group *elg = aws_event_loop_group_new_default(allocator, 1, NULL); struct aws_host_resolver_default_options hr_options = { .el_group = elg, .max_entries = 1, }; struct aws_host_resolver *hr = aws_host_resolver_new_default(allocator, &hr_options); struct aws_client_bootstrap_options bootstrap_options = { .event_loop_group = elg, .host_resolver = hr, }; struct aws_client_bootstrap *bootstrap = aws_client_bootstrap_new(allocator, &bootstrap_options); struct aws_mqtt5_packet_connect_view connect_options; AWS_ZERO_STRUCT(connect_options); struct aws_mqtt5_client_options client_options = { .host_name = aws_byte_cursor_from_string(s_host_name), .port = 1883, .bootstrap = bootstrap, .lifecycle_event_handler = s_dummy_lifecycle_handler, .publish_received_handler = s_dummy_publish_received_, .connect_options = &connect_options, }; struct aws_mqtt5_client_options_storage *client_options_storage = aws_mqtt5_client_options_storage_new(allocator, &client_options); ASSERT_INT_EQUALS( AWS_MQTT5_DEFAULT_SOCKET_CONNECT_TIMEOUT_MS, client_options_storage->socket_options.connect_timeout_ms); ASSERT_INT_EQUALS(AWS_MQTT5_CLIENT_DEFAULT_MIN_RECONNECT_DELAY_MS, client_options_storage->min_reconnect_delay_ms); ASSERT_INT_EQUALS(AWS_MQTT5_CLIENT_DEFAULT_MAX_RECONNECT_DELAY_MS, client_options_storage->max_reconnect_delay_ms); ASSERT_INT_EQUALS( AWS_MQTT5_CLIENT_DEFAULT_MIN_CONNECTED_TIME_TO_RESET_RECONNECT_DELAY_MS, client_options_storage->min_connected_time_to_reset_reconnect_delay_ms); ASSERT_INT_EQUALS(AWS_MQTT5_CLIENT_DEFAULT_PING_TIMEOUT_MS, client_options_storage->ping_timeout_ms); ASSERT_INT_EQUALS(AWS_MQTT5_CLIENT_DEFAULT_CONNACK_TIMEOUT_MS, client_options_storage->connack_timeout_ms); ASSERT_INT_EQUALS(AWS_MQTT5_CLIENT_DEFAULT_OPERATION_TIMEOUNT_SECONDS, client_options_storage->ack_timeout_seconds); aws_mqtt5_client_options_storage_destroy(client_options_storage); aws_client_bootstrap_release(bootstrap); aws_host_resolver_release(hr); aws_event_loop_group_release(elg); aws_thread_join_all_managed(); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_client_options_defaults_set, s_mqtt5_client_options_defaults_set_fn) aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/tests/v5/mqtt5_operation_validation_failure_tests.c000066400000000000000000001704651456575232400323670ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include static uint8_t s_server_reference[] = "derp.com"; static struct aws_byte_cursor s_server_reference_cursor = { .ptr = s_server_reference, .len = AWS_ARRAY_SIZE(s_server_reference) - 1, }; static uint8_t s_binary_data[] = "binary data"; static struct aws_byte_cursor s_binary_data_cursor = { .ptr = s_binary_data, .len = AWS_ARRAY_SIZE(s_binary_data) - 1, }; static uint8_t s_too_long_for_uint16[UINT16_MAX + 1]; static struct aws_byte_cursor s_too_long_for_uint16_cursor = { .ptr = s_too_long_for_uint16, .len = AWS_ARRAY_SIZE(s_too_long_for_uint16), }; // Mqtt5 Specific invalid codepoint in prohibited range U+007F - U+009F (value = U+008F)", static uint8_t s_invalid_utf8[] = "\xC2\x8F"; static struct aws_byte_cursor s_invalid_utf8_string = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(s_invalid_utf8); static uint8_t s_user_prop_name[] = "name"; static uint8_t s_user_prop_value[] = "value"; static const struct aws_mqtt5_user_property s_bad_user_properties_name[] = { { .name = { .ptr = s_too_long_for_uint16, .len = AWS_ARRAY_SIZE(s_too_long_for_uint16), }, .value = { .ptr = (uint8_t *)s_user_prop_value, .len = AWS_ARRAY_SIZE(s_user_prop_value) - 1, }, }, }; static const struct aws_mqtt5_user_property s_bad_user_properties_value[] = { { .name = { .ptr = s_user_prop_name, .len = AWS_ARRAY_SIZE(s_user_prop_name) - 1, }, .value = { .ptr = s_too_long_for_uint16, .len = AWS_ARRAY_SIZE(s_too_long_for_uint16), }, }, }; static const struct aws_mqtt5_user_property s_user_properties_with_invalid_name[] = { {.name = { .ptr = (uint8_t *)s_invalid_utf8, .len = AWS_ARRAY_SIZE(s_invalid_utf8) - 1, }, .value = { .ptr = (uint8_t *)s_user_prop_value, .len = AWS_ARRAY_SIZE(s_user_prop_value) - 1, }}}; static const struct aws_mqtt5_user_property s_user_properties_with_invalid_value[] = { {.name = { .ptr = s_user_prop_name, .len = AWS_ARRAY_SIZE(s_user_prop_name) - 1, }, .value = { .ptr = (uint8_t *)s_invalid_utf8, .len = AWS_ARRAY_SIZE(s_invalid_utf8) - 1, }}}; static struct aws_mqtt5_user_property s_bad_user_properties_too_many[AWS_MQTT5_CLIENT_MAXIMUM_USER_PROPERTIES + 1]; /* * rather than just checking the bad view, we do a side-by-side before-after view validation as well to give more * confidence that the failure is coming from what we're actually trying to check */ #define AWS_VALIDATION_FAILURE_PREFIX(packet_type, failure_reason, view_name, mutate_function) \ static int s_mqtt5_operation_##packet_type##_validation_failure_##failure_reason##_fn( \ struct aws_allocator *allocator, void *ctx) { \ (void)ctx; \ \ struct aws_mqtt5_packet_##packet_type##_view good_view = view_name; \ struct aws_mqtt5_packet_##packet_type##_view bad_view = view_name; \ (*mutate_function)(&bad_view); \ ASSERT_SUCCESS(aws_mqtt5_packet_##packet_type##_view_validate(&good_view)); \ ASSERT_FAILS(aws_mqtt5_packet_##packet_type##_view_validate(&bad_view)); #define AWS_VALIDATION_FAILURE_SUFFIX(packet_type, failure_reason) \ return AWS_OP_SUCCESS; \ } \ \ AWS_TEST_CASE( \ mqtt5_operation_##packet_type##_validation_failure_##failure_reason, \ s_mqtt5_operation_##packet_type##_validation_failure_##failure_reason##_fn) #define AWS_VALIDATION_FAILURE_TEST4(packet_type, failure_reason, view_name, mutate_function) \ AWS_VALIDATION_FAILURE_PREFIX(packet_type, failure_reason, view_name, mutate_function) \ struct aws_mqtt5_operation_##packet_type *operation = \ aws_mqtt5_operation_##packet_type##_new(allocator, &good_view, NULL, NULL); \ ASSERT_NOT_NULL(operation); \ aws_mqtt5_operation_release(&operation->base); \ ASSERT_NULL(aws_mqtt5_operation_##packet_type##_new(allocator, &bad_view, NULL, NULL)); \ AWS_VALIDATION_FAILURE_SUFFIX(packet_type, failure_reason) #define AWS_VALIDATION_FAILURE_TEST3(packet_type, failure_reason, view_name, mutate_function) \ AWS_VALIDATION_FAILURE_PREFIX(packet_type, failure_reason, view_name, mutate_function) \ ASSERT_NULL(aws_mqtt5_operation_##packet_type##_new(allocator, NULL, &bad_view, NULL)); \ AWS_VALIDATION_FAILURE_SUFFIX(packet_type, failure_reason) #define AWS_VALIDATION_FAILURE_TEST2(packet_type, failure_reason, view_name, mutate_function) \ AWS_VALIDATION_FAILURE_PREFIX(packet_type, failure_reason, view_name, mutate_function) \ ASSERT_NULL(aws_mqtt5_operation_##packet_type##_new(allocator, &bad_view)); \ AWS_VALIDATION_FAILURE_SUFFIX(packet_type, failure_reason) #define AWS_IOT_CORE_VALIDATION_FAILURE(packet_type, failure_reason, view_name, mutate_function) \ static int s_mqtt5_operation_##packet_type##_validation_failure_##failure_reason##_fn( \ struct aws_allocator *allocator, void *ctx) { \ (void)ctx; \ \ struct aws_mqtt5_packet_##packet_type##_view good_view = view_name; \ struct aws_mqtt5_packet_##packet_type##_view bad_view = view_name; \ (*mutate_function)(&bad_view); \ ASSERT_SUCCESS(aws_mqtt5_packet_##packet_type##_view_validate(&good_view)); \ ASSERT_SUCCESS(aws_mqtt5_packet_##packet_type##_view_validate(&bad_view)); \ \ struct aws_mqtt5_client dummy_client; \ AWS_ZERO_STRUCT(dummy_client); \ struct aws_mqtt5_client_options_storage client_options_storage; \ AWS_ZERO_STRUCT(client_options_storage); \ client_options_storage.extended_validation_and_flow_control_options = AWS_MQTT5_EVAFCO_AWS_IOT_CORE_DEFAULTS; \ dummy_client.config = &client_options_storage; \ \ ASSERT_NULL(aws_mqtt5_operation_##packet_type##_new(allocator, &dummy_client, &bad_view, NULL)); \ return AWS_OP_SUCCESS; \ } \ \ AWS_TEST_CASE( \ mqtt5_operation_##packet_type##_validation_failure_##failure_reason, \ s_mqtt5_operation_##packet_type##_validation_failure_##failure_reason##_fn) static struct aws_mqtt5_packet_disconnect_view s_good_disconnect_view; static void s_make_server_reference_disconnect_view(struct aws_mqtt5_packet_disconnect_view *view) { view->server_reference = &s_server_reference_cursor; } AWS_VALIDATION_FAILURE_TEST4( disconnect, server_reference, s_good_disconnect_view, s_make_server_reference_disconnect_view) static void s_make_bad_reason_code_disconnect_view(struct aws_mqtt5_packet_disconnect_view *view) { view->reason_code = -1; } AWS_VALIDATION_FAILURE_TEST4( disconnect, bad_reason_code, s_good_disconnect_view, s_make_bad_reason_code_disconnect_view) static void s_make_reason_string_too_long_disconnect_view(struct aws_mqtt5_packet_disconnect_view *view) { view->reason_string = &s_too_long_for_uint16_cursor; } AWS_VALIDATION_FAILURE_TEST4( disconnect, reason_string_too_long, s_good_disconnect_view, s_make_reason_string_too_long_disconnect_view) static void s_make_reason_string_invalid_utf8_disconnect_view(struct aws_mqtt5_packet_disconnect_view *view) { view->reason_string = &s_invalid_utf8_string; } AWS_VALIDATION_FAILURE_TEST4( disconnect, reason_string_invalid_utf8, s_good_disconnect_view, s_make_reason_string_invalid_utf8_disconnect_view) static void s_make_user_properties_name_too_long_disconnect_view(struct aws_mqtt5_packet_disconnect_view *view) { view->user_property_count = AWS_ARRAY_SIZE(s_bad_user_properties_name); view->user_properties = s_bad_user_properties_name; } AWS_VALIDATION_FAILURE_TEST4( disconnect, user_properties_name_too_long, s_good_disconnect_view, s_make_user_properties_name_too_long_disconnect_view) static void s_make_user_properties_name_invalid_utf8_disconnect_view(struct aws_mqtt5_packet_disconnect_view *view) { view->user_property_count = AWS_ARRAY_SIZE(s_user_properties_with_invalid_name); view->user_properties = s_user_properties_with_invalid_name; } AWS_VALIDATION_FAILURE_TEST4( disconnect, user_properties_name_invalid_utf8, s_good_disconnect_view, s_make_user_properties_name_invalid_utf8_disconnect_view) static void s_make_user_properties_value_too_long_disconnect_view(struct aws_mqtt5_packet_disconnect_view *view) { view->user_property_count = AWS_ARRAY_SIZE(s_bad_user_properties_value); view->user_properties = s_bad_user_properties_value; } AWS_VALIDATION_FAILURE_TEST4( disconnect, user_properties_value_too_long, s_good_disconnect_view, s_make_user_properties_value_too_long_disconnect_view) static void s_make_user_properties_value_invalid_utf8_disconnect_view(struct aws_mqtt5_packet_disconnect_view *view) { view->user_property_count = AWS_ARRAY_SIZE(s_user_properties_with_invalid_value); view->user_properties = s_user_properties_with_invalid_value; } AWS_VALIDATION_FAILURE_TEST4( disconnect, user_properties_value_invalid_utf8, s_good_disconnect_view, s_make_user_properties_value_invalid_utf8_disconnect_view) static void s_make_user_properties_too_many_disconnect_view(struct aws_mqtt5_packet_disconnect_view *view) { view->user_property_count = AWS_ARRAY_SIZE(s_bad_user_properties_too_many); view->user_properties = s_bad_user_properties_too_many; } AWS_VALIDATION_FAILURE_TEST4( disconnect, user_properties_too_many, s_good_disconnect_view, s_make_user_properties_too_many_disconnect_view) static struct aws_mqtt5_packet_connect_view s_good_connect_view; static void s_make_client_id_too_long_connect_view(struct aws_mqtt5_packet_connect_view *view) { view->client_id.ptr = s_too_long_for_uint16; view->client_id.len = AWS_ARRAY_SIZE(s_too_long_for_uint16); } AWS_VALIDATION_FAILURE_TEST2(connect, client_id_too_long, s_good_connect_view, s_make_client_id_too_long_connect_view) static void s_make_client_id_invalid_utf8_connect_view(struct aws_mqtt5_packet_connect_view *view) { view->client_id = s_invalid_utf8_string; } AWS_VALIDATION_FAILURE_TEST2( connect, client_id_invalid_utf8, s_good_connect_view, s_make_client_id_invalid_utf8_connect_view) static void s_make_username_too_long_connect_view(struct aws_mqtt5_packet_connect_view *view) { view->username = &s_too_long_for_uint16_cursor; } AWS_VALIDATION_FAILURE_TEST2(connect, username_too_long, s_good_connect_view, s_make_username_too_long_connect_view) static void s_make_username_invalid_utf8_connect_view(struct aws_mqtt5_packet_connect_view *view) { view->username = &s_invalid_utf8_string; } AWS_VALIDATION_FAILURE_TEST2( connect, username_invalid_utf8, s_good_connect_view, s_make_username_invalid_utf8_connect_view) static void s_make_password_too_long_connect_view(struct aws_mqtt5_packet_connect_view *view) { view->password = &s_too_long_for_uint16_cursor; } AWS_VALIDATION_FAILURE_TEST2(connect, password_too_long, s_good_connect_view, s_make_password_too_long_connect_view) static const uint16_t s_zero_receive_maximum = 0; static void s_make_receive_maximum_zero_connect_view(struct aws_mqtt5_packet_connect_view *view) { view->receive_maximum = &s_zero_receive_maximum; } AWS_VALIDATION_FAILURE_TEST2( connect, receive_maximum_zero, s_good_connect_view, s_make_receive_maximum_zero_connect_view) static const uint32_t s_maximum_packet_size_zero = 0; static void s_make_maximum_packet_size_zero_connect_view(struct aws_mqtt5_packet_connect_view *view) { view->maximum_packet_size_bytes = &s_maximum_packet_size_zero; } AWS_VALIDATION_FAILURE_TEST2( connect, maximum_packet_size_zero, s_good_connect_view, s_make_maximum_packet_size_zero_connect_view) static void s_make_auth_method_unsupported_connect_view(struct aws_mqtt5_packet_connect_view *view) { view->authentication_method = &s_binary_data_cursor; } AWS_VALIDATION_FAILURE_TEST2( connect, auth_method_unsupported, s_good_connect_view, s_make_auth_method_unsupported_connect_view) static void s_make_auth_data_unsupported_connect_view(struct aws_mqtt5_packet_connect_view *view) { view->authentication_data = &s_binary_data_cursor; } AWS_VALIDATION_FAILURE_TEST2( connect, auth_data_unsupported, s_good_connect_view, s_make_auth_data_unsupported_connect_view) static uint8_t s_bad_boolean = 2; static void s_make_request_problem_information_invalid_connect_view(struct aws_mqtt5_packet_connect_view *view) { view->request_problem_information = &s_bad_boolean; } AWS_VALIDATION_FAILURE_TEST2( connect, request_problem_information_invalid, s_good_connect_view, s_make_request_problem_information_invalid_connect_view) static void s_make_request_response_information_invalid_connect_view(struct aws_mqtt5_packet_connect_view *view) { view->request_response_information = &s_bad_boolean; }; AWS_VALIDATION_FAILURE_TEST2( connect, request_response_information_invalid, s_good_connect_view, s_make_request_response_information_invalid_connect_view) static void s_make_user_properties_name_too_long_connect_view(struct aws_mqtt5_packet_connect_view *view) { view->user_property_count = AWS_ARRAY_SIZE(s_bad_user_properties_name); view->user_properties = s_bad_user_properties_name; } AWS_VALIDATION_FAILURE_TEST2( connect, user_properties_name_too_long, s_good_connect_view, s_make_user_properties_name_too_long_connect_view) static void s_make_user_properties_name_invalid_utf8_connect_view(struct aws_mqtt5_packet_connect_view *view) { view->user_property_count = AWS_ARRAY_SIZE(s_user_properties_with_invalid_name); view->user_properties = s_user_properties_with_invalid_name; } AWS_VALIDATION_FAILURE_TEST2( connect, user_properties_name_invalid_utf8, s_good_connect_view, s_make_user_properties_name_invalid_utf8_connect_view) static void s_make_user_properties_value_too_long_connect_view(struct aws_mqtt5_packet_connect_view *view) { view->user_property_count = AWS_ARRAY_SIZE(s_bad_user_properties_value); view->user_properties = s_bad_user_properties_value; } AWS_VALIDATION_FAILURE_TEST2( connect, user_properties_value_too_long, s_good_connect_view, s_make_user_properties_value_too_long_connect_view) static void s_make_user_properties_value_invalid_utf8_connect_view(struct aws_mqtt5_packet_connect_view *view) { view->user_property_count = AWS_ARRAY_SIZE(s_user_properties_with_invalid_value); view->user_properties = s_user_properties_with_invalid_value; } AWS_VALIDATION_FAILURE_TEST2( connect, user_properties_value_invalid_utf8, s_good_connect_view, s_make_user_properties_value_invalid_utf8_connect_view) static void s_make_user_properties_too_many_connect_view(struct aws_mqtt5_packet_connect_view *view) { view->user_property_count = AWS_ARRAY_SIZE(s_bad_user_properties_too_many); view->user_properties = s_bad_user_properties_too_many; } AWS_VALIDATION_FAILURE_TEST2( connect, user_properties_too_many, s_good_connect_view, s_make_user_properties_too_many_connect_view) static uint8_t s_good_topic[] = "hello/world"; /* no-topic and no-topic-alias is invalid */ static struct aws_mqtt5_packet_publish_view s_good_will_publish_view = { .topic = { .ptr = s_good_topic, .len = AWS_ARRAY_SIZE(s_good_topic) - 1, }, }; static struct aws_mqtt5_packet_connect_view s_good_will_connect_view = { .will = &s_good_will_publish_view, }; static struct aws_mqtt5_packet_publish_view s_will_invalid_publish_view; static void s_make_will_invalid_connect_view(struct aws_mqtt5_packet_connect_view *view) { view->will = &s_will_invalid_publish_view; } AWS_VALIDATION_FAILURE_TEST2(connect, will_invalid, s_good_will_connect_view, s_make_will_invalid_connect_view) static struct aws_mqtt5_packet_publish_view s_will_payload_too_long_publish_view = { .topic = { .ptr = s_user_prop_name, .len = AWS_ARRAY_SIZE(s_user_prop_name) - 1, }, .payload = { .ptr = s_too_long_for_uint16, .len = AWS_ARRAY_SIZE(s_too_long_for_uint16), }}; static void s_make_will_payload_too_long_connect_view(struct aws_mqtt5_packet_connect_view *view) { view->will = &s_will_payload_too_long_publish_view; } AWS_VALIDATION_FAILURE_TEST2( connect, will_payload_too_long, s_good_will_connect_view, s_make_will_payload_too_long_connect_view) static struct aws_mqtt5_subscription_view s_good_subscription[] = { { .topic_filter = { .ptr = s_good_topic, .len = AWS_ARRAY_SIZE(s_good_topic) - 1, }, .qos = AWS_MQTT5_QOS_AT_MOST_ONCE, .no_local = false, .retain_handling_type = AWS_MQTT5_RHT_SEND_ON_SUBSCRIBE, .retain_as_published = false, }, }; static struct aws_mqtt5_packet_subscribe_view s_good_subscribe_view = { .subscriptions = s_good_subscription, .subscription_count = AWS_ARRAY_SIZE(s_good_subscription), }; static void s_make_no_subscriptions_subscribe_view(struct aws_mqtt5_packet_subscribe_view *view) { view->subscriptions = NULL; view->subscription_count = 0; } AWS_VALIDATION_FAILURE_TEST3(subscribe, no_subscriptions, s_good_subscribe_view, s_make_no_subscriptions_subscribe_view) static struct aws_mqtt5_subscription_view s_too_many_subscriptions[AWS_MQTT5_CLIENT_MAXIMUM_SUBSCRIPTIONS_PER_SUBSCRIBE + 1]; static void s_make_too_many_subscriptions_subscribe_view(struct aws_mqtt5_packet_subscribe_view *view) { for (size_t i = 0; i < AWS_ARRAY_SIZE(s_too_many_subscriptions); ++i) { struct aws_mqtt5_subscription_view *subscription_view = &s_too_many_subscriptions[i]; subscription_view->topic_filter = aws_byte_cursor_from_array(s_good_topic, AWS_ARRAY_SIZE(s_good_topic) - 1); subscription_view->qos = AWS_MQTT5_QOS_AT_MOST_ONCE; subscription_view->no_local = false; subscription_view->retain_handling_type = AWS_MQTT5_RHT_SEND_ON_SUBSCRIBE; subscription_view->retain_as_published = false; } view->subscriptions = s_too_many_subscriptions; view->subscription_count = AWS_ARRAY_SIZE(s_too_many_subscriptions); } AWS_VALIDATION_FAILURE_TEST3( subscribe, too_many_subscriptions, s_good_subscribe_view, s_make_too_many_subscriptions_subscribe_view) static const uint32_t s_invalid_subscription_identifier = AWS_MQTT5_MAXIMUM_VARIABLE_LENGTH_INTEGER + 1; static void s_make_invalid_subscription_identifier_subscribe_view(struct aws_mqtt5_packet_subscribe_view *view) { view->subscription_identifier = &s_invalid_subscription_identifier; } AWS_VALIDATION_FAILURE_TEST3( subscribe, invalid_subscription_identifier, s_good_subscribe_view, s_make_invalid_subscription_identifier_subscribe_view) static uint8_t s_bad_topic_filter[] = "hello/#/world"; static struct aws_mqtt5_subscription_view s_invalid_topic_filter_subscription[] = { { .topic_filter = { .ptr = s_bad_topic_filter, .len = AWS_ARRAY_SIZE(s_bad_topic_filter) - 1, }, .qos = AWS_MQTT5_QOS_AT_MOST_ONCE, .no_local = false, .retain_handling_type = AWS_MQTT5_RHT_SEND_ON_SUBSCRIBE, .retain_as_published = false, }, }; static struct aws_mqtt5_subscription_view s_invalid_utf8_topic_filter_subscription[] = { { .topic_filter = { .ptr = (uint8_t *)s_invalid_utf8, .len = AWS_ARRAY_SIZE(s_invalid_utf8) - 1, }, .qos = AWS_MQTT5_QOS_AT_MOST_ONCE, .no_local = false, .retain_handling_type = AWS_MQTT5_RHT_SEND_ON_SUBSCRIBE, .retain_as_published = false, }, }; static void s_make_invalid_topic_filter_subscribe_view(struct aws_mqtt5_packet_subscribe_view *view) { view->subscriptions = s_invalid_topic_filter_subscription; view->subscription_count = AWS_ARRAY_SIZE(s_invalid_topic_filter_subscription); }; AWS_VALIDATION_FAILURE_TEST3( subscribe, invalid_topic_filter, s_good_subscribe_view, s_make_invalid_topic_filter_subscribe_view) static void s_make_invalid_utf8_topic_filter_subscribe_view(struct aws_mqtt5_packet_subscribe_view *view) { view->subscriptions = s_invalid_utf8_topic_filter_subscription; view->subscription_count = AWS_ARRAY_SIZE(s_invalid_utf8_topic_filter_subscription); }; AWS_VALIDATION_FAILURE_TEST3( subscribe, invalid_utf8_topic_filter, s_good_subscribe_view, s_make_invalid_utf8_topic_filter_subscribe_view) static struct aws_mqtt5_subscription_view s_invalid_qos_subscription[] = { { .topic_filter = { .ptr = s_good_topic, .len = AWS_ARRAY_SIZE(s_good_topic) - 1, }, .qos = 3, .no_local = false, .retain_handling_type = AWS_MQTT5_RHT_SEND_ON_SUBSCRIBE, .retain_as_published = false, }, }; static void s_make_invalid_qos_subscribe_view(struct aws_mqtt5_packet_subscribe_view *view) { view->subscriptions = s_invalid_qos_subscription; view->subscription_count = AWS_ARRAY_SIZE(s_invalid_qos_subscription); } AWS_VALIDATION_FAILURE_TEST3(subscribe, invalid_qos, s_good_subscribe_view, s_make_invalid_qos_subscribe_view) static struct aws_mqtt5_subscription_view s_invalid_retain_type_subscription[] = { { .topic_filter = { .ptr = s_good_topic, .len = AWS_ARRAY_SIZE(s_good_topic) - 1, }, .qos = AWS_MQTT5_QOS_AT_MOST_ONCE, .no_local = false, .retain_handling_type = 5, .retain_as_published = false, }, }; static void s_make_invalid_retain_type_subscribe_view(struct aws_mqtt5_packet_subscribe_view *view) { view->subscriptions = s_invalid_retain_type_subscription; view->subscription_count = AWS_ARRAY_SIZE(s_invalid_retain_type_subscription); } AWS_VALIDATION_FAILURE_TEST3( subscribe, invalid_retain_type, s_good_subscribe_view, s_make_invalid_retain_type_subscribe_view) static uint8_t s_shared_topic[] = "$share/sharename/topic/filter"; static struct aws_mqtt5_subscription_view s_invalid_no_local_subscription[] = { { .topic_filter = { .ptr = s_shared_topic, .len = AWS_ARRAY_SIZE(s_shared_topic) - 1, }, .qos = AWS_MQTT5_QOS_AT_MOST_ONCE, .no_local = true, .retain_handling_type = AWS_MQTT5_RHT_SEND_ON_SUBSCRIBE, .retain_as_published = false, }, }; static void s_make_invalid_no_local_subscribe_view(struct aws_mqtt5_packet_subscribe_view *view) { view->subscriptions = s_invalid_no_local_subscription; view->subscription_count = AWS_ARRAY_SIZE(s_invalid_no_local_subscription); } AWS_VALIDATION_FAILURE_TEST3(subscribe, invalid_no_local, s_good_subscribe_view, s_make_invalid_no_local_subscribe_view) static void s_make_user_properties_name_too_long_subscribe_view(struct aws_mqtt5_packet_subscribe_view *view) { view->user_property_count = AWS_ARRAY_SIZE(s_bad_user_properties_name); view->user_properties = s_bad_user_properties_name; } AWS_VALIDATION_FAILURE_TEST3( subscribe, user_properties_name_too_long, s_good_subscribe_view, s_make_user_properties_name_too_long_subscribe_view) static void s_make_user_properties_name_invalid_utf8_subscribe_view(struct aws_mqtt5_packet_subscribe_view *view) { view->user_property_count = AWS_ARRAY_SIZE(s_user_properties_with_invalid_name); view->user_properties = s_user_properties_with_invalid_name; } AWS_VALIDATION_FAILURE_TEST3( subscribe, user_properties_name_invalid_utf8, s_good_subscribe_view, s_make_user_properties_name_invalid_utf8_subscribe_view) static void s_make_user_properties_value_too_long_subscribe_view(struct aws_mqtt5_packet_subscribe_view *view) { view->user_property_count = AWS_ARRAY_SIZE(s_bad_user_properties_value); view->user_properties = s_bad_user_properties_value; } AWS_VALIDATION_FAILURE_TEST3( subscribe, user_properties_value_too_long, s_good_subscribe_view, s_make_user_properties_value_too_long_subscribe_view) static void s_make_user_properties_value_invalid_utf8_subscribe_view(struct aws_mqtt5_packet_subscribe_view *view) { view->user_property_count = AWS_ARRAY_SIZE(s_user_properties_with_invalid_value); view->user_properties = s_user_properties_with_invalid_value; } AWS_VALIDATION_FAILURE_TEST3( subscribe, user_properties_value_invalid_utf8, s_good_subscribe_view, s_make_user_properties_value_invalid_utf8_subscribe_view) static void s_make_user_properties_too_many_subscribe_view(struct aws_mqtt5_packet_subscribe_view *view) { view->user_property_count = AWS_ARRAY_SIZE(s_bad_user_properties_too_many); view->user_properties = s_bad_user_properties_too_many; } AWS_VALIDATION_FAILURE_TEST3( subscribe, user_properties_too_many, s_good_subscribe_view, s_make_user_properties_too_many_subscribe_view) static struct aws_byte_cursor s_good_topic_filter[] = { { .ptr = s_good_topic, .len = AWS_ARRAY_SIZE(s_good_topic) - 1, }, }; static struct aws_mqtt5_packet_unsubscribe_view s_good_unsubscribe_view = { .topic_filters = s_good_topic_filter, .topic_filter_count = AWS_ARRAY_SIZE(s_good_topic_filter), }; static void s_make_no_topic_filters_unsubscribe_view(struct aws_mqtt5_packet_unsubscribe_view *view) { view->topic_filters = NULL; view->topic_filter_count = 0; } AWS_VALIDATION_FAILURE_TEST3( unsubscribe, no_topic_filters, s_good_unsubscribe_view, s_make_no_topic_filters_unsubscribe_view) static struct aws_byte_cursor s_too_many_topic_filters[AWS_MQTT5_CLIENT_MAXIMUM_TOPIC_FILTERS_PER_UNSUBSCRIBE + 1]; static void s_make_too_many_topic_filters_unsubscribe_view(struct aws_mqtt5_packet_unsubscribe_view *view) { for (size_t i = 0; i < AWS_ARRAY_SIZE(s_too_many_topic_filters); ++i) { struct aws_byte_cursor *cursor = &s_too_many_topic_filters[i]; cursor->ptr = s_good_topic; cursor->len = AWS_ARRAY_SIZE(s_good_topic) - 1; } view->topic_filters = s_too_many_topic_filters; view->topic_filter_count = AWS_ARRAY_SIZE(s_too_many_topic_filters); } AWS_VALIDATION_FAILURE_TEST3( unsubscribe, too_many_topic_filters, s_good_unsubscribe_view, s_make_too_many_topic_filters_unsubscribe_view) static struct aws_byte_cursor s_invalid_topic_filter[] = { { .ptr = s_bad_topic_filter, .len = AWS_ARRAY_SIZE(s_bad_topic_filter) - 1, }, }; static struct aws_byte_cursor s_invalid_utf8_topic_filter[] = {{ .ptr = (uint8_t *)s_invalid_utf8, .len = AWS_ARRAY_SIZE(s_invalid_utf8) - 1, }}; static void s_make_invalid_topic_filter_unsubscribe_view(struct aws_mqtt5_packet_unsubscribe_view *view) { view->topic_filters = s_invalid_topic_filter; view->topic_filter_count = AWS_ARRAY_SIZE(s_invalid_topic_filter); } AWS_VALIDATION_FAILURE_TEST3( unsubscribe, invalid_topic_filter, s_good_unsubscribe_view, s_make_invalid_topic_filter_unsubscribe_view) static void s_make_invalid_utf8_topic_filter_unsubscribe_view(struct aws_mqtt5_packet_unsubscribe_view *view) { view->topic_filters = s_invalid_utf8_topic_filter; view->topic_filter_count = AWS_ARRAY_SIZE(s_invalid_utf8_topic_filter); } AWS_VALIDATION_FAILURE_TEST3( unsubscribe, invalid_utf8_topic_filter, s_good_unsubscribe_view, s_make_invalid_utf8_topic_filter_unsubscribe_view) static void s_make_user_properties_name_too_long_unsubscribe_view(struct aws_mqtt5_packet_unsubscribe_view *view) { view->user_property_count = AWS_ARRAY_SIZE(s_bad_user_properties_name); view->user_properties = s_bad_user_properties_name; } AWS_VALIDATION_FAILURE_TEST3( unsubscribe, user_properties_name_too_long, s_good_unsubscribe_view, s_make_user_properties_name_too_long_unsubscribe_view) static void s_make_user_properties_name_invalid_utf8_unsubscribe_view(struct aws_mqtt5_packet_unsubscribe_view *view) { view->user_property_count = AWS_ARRAY_SIZE(s_user_properties_with_invalid_name); view->user_properties = s_user_properties_with_invalid_name; } AWS_VALIDATION_FAILURE_TEST3( unsubscribe, user_properties_name_invalid_utf8, s_good_unsubscribe_view, s_make_user_properties_name_invalid_utf8_unsubscribe_view) static void s_make_user_properties_value_too_long_unsubscribe_view(struct aws_mqtt5_packet_unsubscribe_view *view) { view->user_property_count = AWS_ARRAY_SIZE(s_bad_user_properties_value); view->user_properties = s_bad_user_properties_value; } AWS_VALIDATION_FAILURE_TEST3( unsubscribe, user_properties_value_too_long, s_good_unsubscribe_view, s_make_user_properties_value_too_long_unsubscribe_view) static void s_make_user_properties_value_invalid_utf8_unsubscribe_view(struct aws_mqtt5_packet_unsubscribe_view *view) { view->user_property_count = AWS_ARRAY_SIZE(s_user_properties_with_invalid_value); view->user_properties = s_user_properties_with_invalid_value; } AWS_VALIDATION_FAILURE_TEST3( unsubscribe, user_properties_value_invalid_utf8, s_good_unsubscribe_view, s_make_user_properties_value_invalid_utf8_unsubscribe_view) static void s_make_user_properties_too_many_unsubscribe_view(struct aws_mqtt5_packet_unsubscribe_view *view) { view->user_property_count = AWS_ARRAY_SIZE(s_bad_user_properties_too_many); view->user_properties = s_bad_user_properties_too_many; } AWS_VALIDATION_FAILURE_TEST3( unsubscribe, user_properties_too_many, s_good_unsubscribe_view, s_make_user_properties_too_many_unsubscribe_view) static struct aws_mqtt5_packet_publish_view s_good_publish_view = { .topic = { .ptr = s_good_topic, .len = AWS_ARRAY_SIZE(s_good_topic) - 1, }, }; static uint8_t s_invalid_topic[] = "hello/#"; static void s_make_invalid_topic_publish_view(struct aws_mqtt5_packet_publish_view *view) { view->topic.ptr = s_invalid_topic; view->topic.len = AWS_ARRAY_SIZE(s_invalid_topic) - 1; } AWS_VALIDATION_FAILURE_TEST3(publish, invalid_topic, s_good_publish_view, s_make_invalid_topic_publish_view) static void s_make_invalid_utf8_topic_publish_view(struct aws_mqtt5_packet_publish_view *view) { view->topic = s_invalid_utf8_string; } AWS_VALIDATION_FAILURE_TEST3(publish, invalid_utf8_topic, s_good_publish_view, s_make_invalid_utf8_topic_publish_view) static void s_make_no_topic_publish_view(struct aws_mqtt5_packet_publish_view *view) { view->topic.ptr = NULL; view->topic.len = 0; } AWS_VALIDATION_FAILURE_TEST3(publish, no_topic, s_good_publish_view, s_make_no_topic_publish_view) static enum aws_mqtt5_payload_format_indicator s_invalid_payload_format = 3; static enum aws_mqtt5_payload_format_indicator s_valid_payload_format = AWS_MQTT5_PFI_UTF8; static void s_make_invalid_payload_format_publish_view(struct aws_mqtt5_packet_publish_view *view) { view->payload_format = &s_invalid_payload_format; } AWS_VALIDATION_FAILURE_TEST3( publish, invalid_payload_format, s_good_publish_view, s_make_invalid_payload_format_publish_view) static void s_make_invalid_utf8_payload_publish_view(struct aws_mqtt5_packet_publish_view *view) { view->payload_format = &s_valid_payload_format; view->payload = s_invalid_utf8_string; } AWS_VALIDATION_FAILURE_TEST3( publish, invalid_utf8_payload, s_good_publish_view, s_make_invalid_utf8_payload_publish_view) static void s_make_response_topic_too_long_publish_view(struct aws_mqtt5_packet_publish_view *view) { view->response_topic = &s_too_long_for_uint16_cursor; } AWS_VALIDATION_FAILURE_TEST3( publish, response_topic_too_long, s_good_publish_view, s_make_response_topic_too_long_publish_view) static struct aws_byte_cursor s_invalid_topic_cursor = { .ptr = s_invalid_topic, .len = AWS_ARRAY_SIZE(s_invalid_topic) - 1, }; static void s_make_response_topic_invalid_publish_view(struct aws_mqtt5_packet_publish_view *view) { view->response_topic = &s_invalid_topic_cursor; } AWS_VALIDATION_FAILURE_TEST3( publish, invalid_response_topic, s_good_publish_view, s_make_response_topic_invalid_publish_view) static void s_make_response_topic_invalid_utf8_publish_view(struct aws_mqtt5_packet_publish_view *view) { view->response_topic = &s_invalid_utf8_string; } AWS_VALIDATION_FAILURE_TEST3( publish, invalid_utf8_response_topic, s_good_publish_view, s_make_response_topic_invalid_utf8_publish_view) static void s_make_correlation_data_too_long_publish_view(struct aws_mqtt5_packet_publish_view *view) { view->correlation_data = &s_too_long_for_uint16_cursor; } AWS_VALIDATION_FAILURE_TEST3( publish, correlation_data_too_long, s_good_publish_view, s_make_correlation_data_too_long_publish_view) static void s_make_content_type_invalid_utf8_publish_view(struct aws_mqtt5_packet_publish_view *view) { view->content_type = &s_invalid_utf8_string; } AWS_VALIDATION_FAILURE_TEST3( publish, invalid_utf8_content_type, s_good_publish_view, s_make_content_type_invalid_utf8_publish_view) static const uint32_t s_subscription_identifiers[] = {1, 2}; static void s_make_subscription_identifier_exists_publish_view(struct aws_mqtt5_packet_publish_view *view) { view->subscription_identifiers = s_subscription_identifiers; view->subscription_identifier_count = AWS_ARRAY_SIZE(s_subscription_identifiers); } AWS_VALIDATION_FAILURE_TEST3( publish, subscription_identifier_exists, s_good_publish_view, s_make_subscription_identifier_exists_publish_view) static void s_make_content_type_too_long_publish_view(struct aws_mqtt5_packet_publish_view *view) { view->content_type = &s_too_long_for_uint16_cursor; } AWS_VALIDATION_FAILURE_TEST3( publish, content_type_too_long, s_good_publish_view, s_make_content_type_too_long_publish_view) static const uint16_t s_topic_alias_zero = 0; static void s_make_topic_alias_zero_publish_view(struct aws_mqtt5_packet_publish_view *view) { view->topic_alias = &s_topic_alias_zero; view->topic.ptr = NULL; view->topic.len = 0; } AWS_VALIDATION_FAILURE_TEST3(publish, topic_alias_zero, s_good_publish_view, s_make_topic_alias_zero_publish_view) static void s_make_user_properties_name_too_long_publish_view(struct aws_mqtt5_packet_publish_view *view) { view->user_property_count = AWS_ARRAY_SIZE(s_bad_user_properties_name); view->user_properties = s_bad_user_properties_name; } AWS_VALIDATION_FAILURE_TEST3( publish, user_properties_name_too_long, s_good_publish_view, s_make_user_properties_name_too_long_publish_view) static void s_make_user_properties_name_invalid_utf8_publish_view(struct aws_mqtt5_packet_publish_view *view) { view->user_property_count = AWS_ARRAY_SIZE(s_user_properties_with_invalid_name); view->user_properties = s_user_properties_with_invalid_name; } AWS_VALIDATION_FAILURE_TEST3( publish, user_properties_name_invalid_utf8, s_good_publish_view, s_make_user_properties_name_invalid_utf8_publish_view) static void s_make_user_properties_value_too_long_publish_view(struct aws_mqtt5_packet_publish_view *view) { view->user_property_count = AWS_ARRAY_SIZE(s_bad_user_properties_value); view->user_properties = s_bad_user_properties_value; } AWS_VALIDATION_FAILURE_TEST3( publish, user_properties_value_too_long, s_good_publish_view, s_make_user_properties_value_too_long_publish_view) static void s_make_user_properties_value_invalid_utf8_publish_view(struct aws_mqtt5_packet_publish_view *view) { view->user_property_count = AWS_ARRAY_SIZE(s_user_properties_with_invalid_value); view->user_properties = s_user_properties_with_invalid_value; } AWS_VALIDATION_FAILURE_TEST3( publish, user_properties_value_invalid_utf8, s_good_publish_view, s_make_user_properties_value_invalid_utf8_publish_view) static void s_make_user_properties_too_many_publish_view(struct aws_mqtt5_packet_publish_view *view) { view->user_property_count = AWS_ARRAY_SIZE(s_bad_user_properties_too_many); view->user_properties = s_bad_user_properties_too_many; } AWS_VALIDATION_FAILURE_TEST3( publish, user_properties_too_many, s_good_publish_view, s_make_user_properties_too_many_publish_view) static void s_make_qos0_duplicate_true_publish_view(struct aws_mqtt5_packet_publish_view *view) { view->qos = AWS_MQTT5_QOS_AT_MOST_ONCE; view->duplicate = true; } AWS_VALIDATION_FAILURE_TEST3(publish, qos0_duplicate_true, s_good_publish_view, s_make_qos0_duplicate_true_publish_view) static void s_make_qos0_with_packet_id_publish_view(struct aws_mqtt5_packet_publish_view *view) { view->qos = AWS_MQTT5_QOS_AT_MOST_ONCE; view->packet_id = 1; } AWS_VALIDATION_FAILURE_TEST3(publish, qos0_with_packet_id, s_good_publish_view, s_make_qos0_with_packet_id_publish_view) #define AWS_CLIENT_CREATION_VALIDATION_FAILURE(failure_reason, base_options, mutate_function) \ static int s_mqtt5_client_options_validation_failure_##failure_reason##_fn( \ struct aws_allocator *allocator, void *ctx) { \ (void)ctx; \ aws_mqtt_library_init(allocator); \ struct aws_event_loop_group *elg = NULL; \ struct aws_host_resolver *hr = NULL; \ struct aws_client_bootstrap *bootstrap = NULL; \ elg = aws_event_loop_group_new_default(allocator, 1, NULL); \ \ struct aws_host_resolver_default_options hr_options = { \ .el_group = elg, \ .max_entries = 1, \ }; \ hr = aws_host_resolver_new_default(allocator, &hr_options); \ \ struct aws_client_bootstrap_options bootstrap_options = { \ .event_loop_group = elg, \ .host_resolver = hr, \ }; \ bootstrap = aws_client_bootstrap_new(allocator, &bootstrap_options); \ \ struct aws_mqtt5_client_options good_options = base_options; \ good_options.bootstrap = bootstrap; \ \ ASSERT_SUCCESS(aws_mqtt5_client_options_validate(&good_options)); \ struct aws_mqtt5_client *client = aws_mqtt5_client_new(allocator, &good_options); \ ASSERT_NOT_NULL(client); \ aws_mqtt5_client_release(client); \ \ struct aws_mqtt5_client_options bad_options = good_options; \ (*mutate_function)(&bad_options); \ ASSERT_FAILS(aws_mqtt5_client_options_validate(&bad_options)); \ ASSERT_NULL(aws_mqtt5_client_new(allocator, &bad_options)); \ \ aws_client_bootstrap_release(bootstrap); \ aws_host_resolver_release(hr); \ aws_event_loop_group_release(elg); \ \ aws_mqtt_library_clean_up(); \ return AWS_OP_SUCCESS; \ } \ \ AWS_TEST_CASE( \ mqtt5_client_options_validation_failure_##failure_reason, \ s_mqtt5_client_options_validation_failure_##failure_reason##_fn) static struct aws_socket_options s_good_socket_options = { .type = AWS_SOCKET_STREAM, .domain = AWS_SOCKET_IPV4, .connect_timeout_ms = 10000, }; static struct aws_mqtt5_packet_connect_view s_good_connect = { .keep_alive_interval_seconds = 30, }; void s_lifecycle_event_handler(const struct aws_mqtt5_client_lifecycle_event *event) { (void)event; } void s_publish_received(const struct aws_mqtt5_packet_publish_view *publish, void *user_data) { (void)publish; (void)user_data; } static struct aws_mqtt5_client_options s_good_client_options = { .host_name = { .ptr = s_server_reference, .len = AWS_ARRAY_SIZE(s_server_reference) - 1, }, .port = 1883, .socket_options = &s_good_socket_options, .connect_options = &s_good_connect, .ping_timeout_ms = 5000, .lifecycle_event_handler = &s_lifecycle_event_handler, .publish_received_handler = &s_publish_received, }; static void s_make_no_host_client_options(struct aws_mqtt5_client_options *options) { options->host_name.ptr = NULL; options->host_name.len = 0; } AWS_CLIENT_CREATION_VALIDATION_FAILURE(no_host, s_good_client_options, s_make_no_host_client_options) static void s_make_no_bootstrap_client_options(struct aws_mqtt5_client_options *options) { options->bootstrap = NULL; } AWS_CLIENT_CREATION_VALIDATION_FAILURE(no_bootstrap, s_good_client_options, s_make_no_bootstrap_client_options) static void s_make_no_publish_received_client_options(struct aws_mqtt5_client_options *options) { options->publish_received_handler = NULL; } AWS_CLIENT_CREATION_VALIDATION_FAILURE( no_publish_received, s_good_client_options, s_make_no_publish_received_client_options) static struct aws_socket_options s_bad_socket_options = { .type = AWS_SOCKET_DGRAM, }; static void s_make_invalid_socket_options_client_options(struct aws_mqtt5_client_options *options) { options->socket_options = &s_bad_socket_options; }; AWS_CLIENT_CREATION_VALIDATION_FAILURE( invalid_socket_options, s_good_client_options, s_make_invalid_socket_options_client_options) static struct aws_mqtt5_packet_connect_view s_client_id_too_long_connect_view = { .client_id = { .ptr = s_too_long_for_uint16, .len = AWS_ARRAY_SIZE(s_too_long_for_uint16), }, }; static void s_make_invalid_connect_client_options(struct aws_mqtt5_client_options *options) { options->connect_options = &s_client_id_too_long_connect_view; } AWS_CLIENT_CREATION_VALIDATION_FAILURE(invalid_connect, s_good_client_options, s_make_invalid_connect_client_options) static struct aws_mqtt5_packet_connect_view s_short_keep_alive_connect_view = { .keep_alive_interval_seconds = 20, }; static void s_make_invalid_keep_alive_client_options(struct aws_mqtt5_client_options *options) { options->connect_options = &s_short_keep_alive_connect_view; options->ping_timeout_ms = 30000; } AWS_CLIENT_CREATION_VALIDATION_FAILURE( invalid_keep_alive, s_good_client_options, s_make_invalid_keep_alive_client_options) static void s_make_invalid_port_client_options(struct aws_mqtt5_client_options *options) { options->port = 0xFFFFFFFF; } AWS_CLIENT_CREATION_VALIDATION_FAILURE(invalid_port, s_good_client_options, s_make_invalid_port_client_options) #define AWS_CONNECTION_SETTINGS_VALIDATION_FAILURE_TEST_PREFIX(packet_type, failure_reason, init_success_settings_fn) \ static int s_mqtt5_operation_##packet_type##_connection_settings_validation_failure_##failure_reason##_fn( \ struct aws_allocator *allocator, void *ctx) { \ (void)ctx; \ \ struct aws_mqtt5_client dummy_client; \ AWS_ZERO_STRUCT(dummy_client); \ \ dummy_client.current_state = AWS_MCS_CONNECTED; \ (*init_success_settings_fn)(&dummy_client); #define AWS_CONNECTION_SETTINGS_VALIDATION_FAILURE_TEST_SUFFIX(packet_type, failure_reason, init_failure_settings_fn) \ ASSERT_NOT_NULL(operation); \ \ ASSERT_SUCCESS(aws_mqtt5_operation_validate_vs_connection_settings(&operation->base, &dummy_client)); \ \ (*init_failure_settings_fn)(&dummy_client); \ \ ASSERT_FAILS(aws_mqtt5_operation_validate_vs_connection_settings(&operation->base, &dummy_client)); \ \ aws_mqtt5_operation_release(&operation->base); \ \ return AWS_OP_SUCCESS; \ } \ \ AWS_TEST_CASE( \ mqtt5_operation_##packet_type##_connection_settings_validation_failure_##failure_reason, \ s_mqtt5_operation_##packet_type##_connection_settings_validation_failure_##failure_reason##_fn) #define AWS_CONNECTION_SETTINGS_VALIDATION_FAILURE_TEST3( \ packet_type, failure_reason, view_name, init_success_settings_fn, init_failure_settings_fn) \ AWS_CONNECTION_SETTINGS_VALIDATION_FAILURE_TEST_PREFIX(packet_type, failure_reason, init_success_settings_fn) \ struct aws_mqtt5_operation_##packet_type *operation = \ aws_mqtt5_operation_##packet_type##_new(allocator, NULL, &view_name, NULL); \ AWS_CONNECTION_SETTINGS_VALIDATION_FAILURE_TEST_SUFFIX(packet_type, failure_reason, init_failure_settings_fn) static struct aws_mqtt5_packet_subscribe_view s_exceeds_maximum_packet_size_subscribe_view = { .subscriptions = s_good_subscription, .subscription_count = AWS_ARRAY_SIZE(s_good_subscription), }; static void s_packet_size_init_settings_success_fn(struct aws_mqtt5_client *dummy_client) { dummy_client->negotiated_settings.maximum_packet_size_to_server = 100; } static void s_packet_size_init_settings_failure_fn(struct aws_mqtt5_client *dummy_client) { dummy_client->negotiated_settings.maximum_packet_size_to_server = 10; } AWS_CONNECTION_SETTINGS_VALIDATION_FAILURE_TEST3( subscribe, exceeds_maximum_packet_size, s_exceeds_maximum_packet_size_subscribe_view, s_packet_size_init_settings_success_fn, s_packet_size_init_settings_failure_fn) static struct aws_mqtt5_packet_unsubscribe_view s_exceeds_maximum_packet_size_unsubscribe_view = { .topic_filters = s_good_topic_filter, .topic_filter_count = AWS_ARRAY_SIZE(s_good_topic_filter), }; AWS_CONNECTION_SETTINGS_VALIDATION_FAILURE_TEST3( unsubscribe, exceeds_maximum_packet_size, s_exceeds_maximum_packet_size_unsubscribe_view, s_packet_size_init_settings_success_fn, s_packet_size_init_settings_failure_fn) static struct aws_mqtt5_packet_publish_view s_exceeds_maximum_packet_size_publish_view = { .topic = { .ptr = s_good_topic, .len = AWS_ARRAY_SIZE(s_good_topic) - 1, }, .payload = { .ptr = s_binary_data, .len = AWS_ARRAY_SIZE(s_binary_data), }, }; AWS_CONNECTION_SETTINGS_VALIDATION_FAILURE_TEST3( publish, exceeds_maximum_packet_size, s_exceeds_maximum_packet_size_publish_view, s_packet_size_init_settings_success_fn, s_packet_size_init_settings_failure_fn) static struct aws_mqtt5_packet_publish_view s_exceeds_maximum_qos_publish_view = { .topic = { .ptr = s_good_topic, .len = AWS_ARRAY_SIZE(s_good_topic) - 1, }, .qos = AWS_MQTT5_QOS_EXACTLY_ONCE, }; static void s_maximum_qos_init_settings_success_fn(struct aws_mqtt5_client *dummy_client) { dummy_client->negotiated_settings.maximum_packet_size_to_server = 100; dummy_client->negotiated_settings.maximum_qos = AWS_MQTT5_QOS_EXACTLY_ONCE; } static void s_maximum_qos_init_settings_failure_fn(struct aws_mqtt5_client *dummy_client) { dummy_client->negotiated_settings.maximum_qos = AWS_MQTT5_QOS_AT_LEAST_ONCE; } AWS_CONNECTION_SETTINGS_VALIDATION_FAILURE_TEST3( publish, exceeds_maximum_qos, s_exceeds_maximum_qos_publish_view, s_maximum_qos_init_settings_success_fn, s_maximum_qos_init_settings_failure_fn) static struct aws_mqtt5_packet_publish_view s_invalid_retain_publish_view = { .topic = { .ptr = s_good_topic, .len = AWS_ARRAY_SIZE(s_good_topic) - 1, }, .qos = AWS_MQTT5_QOS_AT_MOST_ONCE, .retain = true, }; static void s_invalid_retain_init_settings_success_fn(struct aws_mqtt5_client *dummy_client) { dummy_client->negotiated_settings.maximum_packet_size_to_server = 100; dummy_client->negotiated_settings.retain_available = true; } static void s_invalid_retain_init_settings_failure_fn(struct aws_mqtt5_client *dummy_client) { dummy_client->negotiated_settings.retain_available = false; } AWS_CONNECTION_SETTINGS_VALIDATION_FAILURE_TEST3( publish, invalid_retain, s_invalid_retain_publish_view, s_invalid_retain_init_settings_success_fn, s_invalid_retain_init_settings_failure_fn) #define AWS_CONNECTION_SETTINGS_VALIDATION_FAILURE_TEST4( \ packet_type, failure_reason, view_name, init_success_settings_fn, init_failure_settings_fn) \ AWS_CONNECTION_SETTINGS_VALIDATION_FAILURE_TEST_PREFIX(packet_type, failure_reason, init_success_settings_fn) \ struct aws_mqtt5_operation_##packet_type *operation = \ aws_mqtt5_operation_##packet_type##_new(allocator, &view_name, NULL, NULL); \ AWS_CONNECTION_SETTINGS_VALIDATION_FAILURE_TEST_SUFFIX(packet_type, failure_reason, init_failure_settings_fn) static uint8_t s_disconnect_reason_string[] = "We're leaving this planet for somewhere else"; static struct aws_byte_cursor s_disconnect_reason_string_cursor = { .ptr = s_disconnect_reason_string, .len = AWS_ARRAY_SIZE(s_disconnect_reason_string) - 1, }; static struct aws_mqtt5_packet_disconnect_view s_exceeds_maximum_packet_size_disconnect_view = { .reason_string = &s_disconnect_reason_string_cursor, }; AWS_CONNECTION_SETTINGS_VALIDATION_FAILURE_TEST4( disconnect, exceeds_maximum_packet_size, s_exceeds_maximum_packet_size_disconnect_view, s_packet_size_init_settings_success_fn, s_packet_size_init_settings_failure_fn) static const uint32_t s_positive_session_expiry = 1; static const uint32_t s_session_expiry = 5; static struct aws_mqtt5_packet_disconnect_view s_promote_zero_session_expiry_disconnect_view = { .session_expiry_interval_seconds = &s_session_expiry, }; static int mqtt5_operation_disconnect_connection_settings_validation_failure_promote_zero_session_expiry_fn( struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct aws_event_loop_group *elg = aws_event_loop_group_new_default(allocator, 1, NULL); struct aws_host_resolver_default_options hr_options = { .el_group = elg, .max_entries = 1, }; struct aws_host_resolver *hr = aws_host_resolver_new_default(allocator, &hr_options); struct aws_client_bootstrap_options bootstrap_options = { .event_loop_group = elg, .host_resolver = hr, }; struct aws_client_bootstrap *bootstrap = aws_client_bootstrap_new(allocator, &bootstrap_options); struct aws_mqtt5_client_options client_options = s_good_client_options; client_options.host_name = s_server_reference_cursor; client_options.bootstrap = bootstrap; struct aws_mqtt5_packet_connect_view connect_options = s_good_connect; connect_options.session_expiry_interval_seconds = &s_positive_session_expiry; client_options.connect_options = &connect_options; struct aws_mqtt5_client_options_storage *client_options_storage = aws_mqtt5_client_options_storage_new(allocator, &client_options); ASSERT_NOT_NULL(client_options_storage); struct aws_mqtt5_client dummy_client; AWS_ZERO_STRUCT(dummy_client); dummy_client.current_state = AWS_MCS_CONNECTED; dummy_client.negotiated_settings.maximum_packet_size_to_server = 100; dummy_client.config = client_options_storage; struct aws_mqtt5_operation_disconnect *operation = aws_mqtt5_operation_disconnect_new(allocator, &s_promote_zero_session_expiry_disconnect_view, NULL, NULL); ASSERT_SUCCESS(aws_mqtt5_operation_validate_vs_connection_settings(&operation->base, &dummy_client)); ((struct aws_mqtt5_client_options_storage *)dummy_client.config) ->connect->storage_view.session_expiry_interval_seconds = NULL; ASSERT_FAILS(aws_mqtt5_operation_validate_vs_connection_settings(&operation->base, &dummy_client)); aws_mqtt5_operation_release(&operation->base); aws_mqtt5_client_options_storage_destroy(client_options_storage); aws_client_bootstrap_release(bootstrap); aws_host_resolver_release(hr); aws_event_loop_group_release(elg); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE( mqtt5_operation_disconnect_connection_settings_validation_failure_promote_zero_session_expiry, mqtt5_operation_disconnect_connection_settings_validation_failure_promote_zero_session_expiry_fn) aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/tests/v5/mqtt5_testing_utils.c000066400000000000000000002166221456575232400261150ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "mqtt5_testing_utils.h" #include #include #include #include #include #include #include #include #include #include int aws_mqtt5_test_verify_user_properties_raw( size_t property_count, const struct aws_mqtt5_user_property *properties, size_t expected_count, const struct aws_mqtt5_user_property *expected_properties) { ASSERT_UINT_EQUALS(expected_count, property_count); for (size_t i = 0; i < expected_count; ++i) { const struct aws_mqtt5_user_property *expected_property = &expected_properties[i]; struct aws_byte_cursor expected_name = expected_property->name; struct aws_byte_cursor expected_value = expected_property->value; bool found = false; for (size_t j = 0; j < property_count; ++j) { const struct aws_mqtt5_user_property *nv_pair = &properties[j]; if (aws_byte_cursor_compare_lexical(&expected_name, &nv_pair->name) == 0 && aws_byte_cursor_compare_lexical(&expected_value, &nv_pair->value) == 0) { found = true; break; } } if (!found) { return AWS_OP_ERR; } } return AWS_OP_SUCCESS; } static int s_compute_connack_variable_length_fields( const struct aws_mqtt5_packet_connack_view *connack_view, uint32_t *total_remaining_length, uint32_t *property_length) { size_t local_property_length = aws_mqtt5_compute_user_property_encode_length(connack_view->user_properties, connack_view->user_property_count); ADD_OPTIONAL_U32_PROPERTY_LENGTH(connack_view->session_expiry_interval, local_property_length); ADD_OPTIONAL_U16_PROPERTY_LENGTH(connack_view->receive_maximum, local_property_length); ADD_OPTIONAL_U8_PROPERTY_LENGTH(connack_view->maximum_qos, local_property_length); ADD_OPTIONAL_U8_PROPERTY_LENGTH(connack_view->retain_available, local_property_length); ADD_OPTIONAL_U32_PROPERTY_LENGTH(connack_view->maximum_packet_size, local_property_length); ADD_OPTIONAL_CURSOR_PROPERTY_LENGTH(connack_view->assigned_client_identifier, local_property_length); ADD_OPTIONAL_U16_PROPERTY_LENGTH(connack_view->topic_alias_maximum, local_property_length); ADD_OPTIONAL_CURSOR_PROPERTY_LENGTH(connack_view->reason_string, local_property_length); ADD_OPTIONAL_U8_PROPERTY_LENGTH(connack_view->wildcard_subscriptions_available, local_property_length); ADD_OPTIONAL_U8_PROPERTY_LENGTH(connack_view->subscription_identifiers_available, local_property_length); ADD_OPTIONAL_U8_PROPERTY_LENGTH(connack_view->shared_subscriptions_available, local_property_length); ADD_OPTIONAL_U16_PROPERTY_LENGTH(connack_view->server_keep_alive, local_property_length); ADD_OPTIONAL_CURSOR_PROPERTY_LENGTH(connack_view->response_information, local_property_length); ADD_OPTIONAL_CURSOR_PROPERTY_LENGTH(connack_view->server_reference, local_property_length); ADD_OPTIONAL_CURSOR_PROPERTY_LENGTH(connack_view->authentication_method, local_property_length); ADD_OPTIONAL_CURSOR_PROPERTY_LENGTH(connack_view->authentication_data, local_property_length); *property_length = (uint32_t)local_property_length; size_t property_length_encoding_length = 0; if (aws_mqtt5_get_variable_length_encode_size(local_property_length, &property_length_encoding_length)) { return AWS_OP_ERR; } /* reason code (1 byte) + flags (1 byte) */ *total_remaining_length = *property_length + (uint32_t)property_length_encoding_length + 2; return AWS_OP_SUCCESS; } int aws_mqtt5_encoder_begin_connack(struct aws_mqtt5_encoder *encoder, const void *packet_view) { const struct aws_mqtt5_packet_connack_view *connack_view = packet_view; uint32_t total_remaining_length = 0; uint32_t property_length = 0; if (s_compute_connack_variable_length_fields(connack_view, &total_remaining_length, &property_length)) { int error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "(%p) mqtt5 client encoder - failed to compute variable length values for CONNACK packet with error " "%d(%s)", (void *)encoder->config.client, error_code, aws_error_debug_str(error_code)); return AWS_OP_ERR; } AWS_LOGF_DEBUG( AWS_LS_MQTT5_GENERAL, "(%p) mqtt5 client encoder - setting up encode for a CONNACK packet with remaining length %" PRIu32, (void *)encoder->config.client, total_remaining_length); uint8_t flags = connack_view->session_present ? 1 : 0; ADD_ENCODE_STEP_U8(encoder, aws_mqtt5_compute_fixed_header_byte1(AWS_MQTT5_PT_CONNACK, 0)); ADD_ENCODE_STEP_VLI(encoder, total_remaining_length); ADD_ENCODE_STEP_U8(encoder, flags); ADD_ENCODE_STEP_U8(encoder, (uint8_t)connack_view->reason_code); ADD_ENCODE_STEP_VLI(encoder, property_length); if (property_length > 0) { ADD_ENCODE_STEP_OPTIONAL_U32_PROPERTY( encoder, AWS_MQTT5_PROPERTY_TYPE_SESSION_EXPIRY_INTERVAL, connack_view->session_expiry_interval); ADD_ENCODE_STEP_OPTIONAL_U16_PROPERTY( encoder, AWS_MQTT5_PROPERTY_TYPE_RECEIVE_MAXIMUM, connack_view->receive_maximum); ADD_ENCODE_STEP_OPTIONAL_U8_PROPERTY(encoder, AWS_MQTT5_PROPERTY_TYPE_MAXIMUM_QOS, connack_view->maximum_qos); ADD_ENCODE_STEP_OPTIONAL_U8_PROPERTY( encoder, AWS_MQTT5_PROPERTY_TYPE_RETAIN_AVAILABLE, connack_view->retain_available); ADD_ENCODE_STEP_OPTIONAL_U32_PROPERTY( encoder, AWS_MQTT5_PROPERTY_TYPE_MAXIMUM_PACKET_SIZE, connack_view->maximum_packet_size); ADD_ENCODE_STEP_OPTIONAL_CURSOR_PROPERTY( encoder, AWS_MQTT5_PROPERTY_TYPE_ASSIGNED_CLIENT_IDENTIFIER, connack_view->assigned_client_identifier); ADD_ENCODE_STEP_OPTIONAL_U16_PROPERTY( encoder, AWS_MQTT5_PROPERTY_TYPE_TOPIC_ALIAS_MAXIMUM, connack_view->topic_alias_maximum); ADD_ENCODE_STEP_OPTIONAL_CURSOR_PROPERTY( encoder, AWS_MQTT5_PROPERTY_TYPE_REASON_STRING, connack_view->reason_string); ADD_ENCODE_STEP_OPTIONAL_U8_PROPERTY( encoder, AWS_MQTT5_PROPERTY_TYPE_WILDCARD_SUBSCRIPTIONS_AVAILABLE, connack_view->wildcard_subscriptions_available); ADD_ENCODE_STEP_OPTIONAL_U8_PROPERTY( encoder, AWS_MQTT5_PROPERTY_TYPE_SUBSCRIPTION_IDENTIFIERS_AVAILABLE, connack_view->subscription_identifiers_available); ADD_ENCODE_STEP_OPTIONAL_U8_PROPERTY( encoder, AWS_MQTT5_PROPERTY_TYPE_SHARED_SUBSCRIPTIONS_AVAILABLE, connack_view->shared_subscriptions_available); ADD_ENCODE_STEP_OPTIONAL_U16_PROPERTY( encoder, AWS_MQTT5_PROPERTY_TYPE_SERVER_KEEP_ALIVE, connack_view->server_keep_alive); ADD_ENCODE_STEP_OPTIONAL_CURSOR_PROPERTY( encoder, AWS_MQTT5_PROPERTY_TYPE_RESPONSE_INFORMATION, connack_view->response_information); ADD_ENCODE_STEP_OPTIONAL_CURSOR_PROPERTY( encoder, AWS_MQTT5_PROPERTY_TYPE_SERVER_REFERENCE, connack_view->server_reference); ADD_ENCODE_STEP_OPTIONAL_CURSOR_PROPERTY( encoder, AWS_MQTT5_PROPERTY_TYPE_AUTHENTICATION_METHOD, connack_view->authentication_method); ADD_ENCODE_STEP_OPTIONAL_CURSOR_PROPERTY( encoder, AWS_MQTT5_PROPERTY_TYPE_AUTHENTICATION_DATA, connack_view->authentication_data); aws_mqtt5_add_user_property_encoding_steps( encoder, connack_view->user_properties, connack_view->user_property_count); } return AWS_OP_SUCCESS; } int aws_mqtt5_encoder_begin_pingresp(struct aws_mqtt5_encoder *encoder, const void *packet_view) { (void)packet_view; AWS_LOGF_DEBUG( AWS_LS_MQTT5_GENERAL, "(%p) mqtt5 client encoder - setting up encode for a PINGRESP packet", (void *)encoder->config.client); /* A ping response is just a fixed header with a 0-valued remaining length which we encode as a 0 u8 */ ADD_ENCODE_STEP_U8(encoder, aws_mqtt5_compute_fixed_header_byte1(AWS_MQTT5_PT_PINGRESP, 0)); ADD_ENCODE_STEP_U8(encoder, 0); return AWS_OP_SUCCESS; } static int s_compute_suback_variable_length_fields( const struct aws_mqtt5_packet_suback_view *suback_view, uint32_t *total_remaining_length, uint32_t *property_length) { /* User Properties length */ size_t local_property_length = aws_mqtt5_compute_user_property_encode_length(suback_view->user_properties, suback_view->user_property_count); /* Optional Reason String */ ADD_OPTIONAL_CURSOR_PROPERTY_LENGTH(suback_view->reason_string, local_property_length); *property_length = (uint32_t)local_property_length; size_t local_total_remaining_length = 0; if (aws_mqtt5_get_variable_length_encode_size(local_property_length, &local_total_remaining_length)) { return AWS_OP_ERR; } /* Packet Identifier (2 bytes) */ local_total_remaining_length += 2; /* Reason Codes (1 byte each) */ local_total_remaining_length += suback_view->reason_code_count; /* Add property length */ *total_remaining_length = *property_length + (uint32_t)local_total_remaining_length; return AWS_OP_SUCCESS; } int aws_mqtt5_encoder_begin_suback(struct aws_mqtt5_encoder *encoder, const void *packet_view) { const struct aws_mqtt5_packet_suback_view *suback_view = packet_view; uint32_t total_remaining_length = 0; uint32_t property_length = 0; if (s_compute_suback_variable_length_fields(suback_view, &total_remaining_length, &property_length)) { int error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "(%p) mqtt5 client encoder - failed to compute variable length values for SUBACK packet with error " "%d(%s)", (void *)encoder->config.client, error_code, aws_error_debug_str(error_code)); return AWS_OP_ERR; } AWS_LOGF_DEBUG( AWS_LS_MQTT5_GENERAL, "(%p) mqtt5 client encoder - setting up encode for a SUBACK packet with remaining length %" PRIu32, (void *)encoder->config.client, total_remaining_length); ADD_ENCODE_STEP_U8(encoder, aws_mqtt5_compute_fixed_header_byte1(AWS_MQTT5_PT_SUBACK, 0)); ADD_ENCODE_STEP_VLI(encoder, total_remaining_length); ADD_ENCODE_STEP_U16(encoder, suback_view->packet_id); ADD_ENCODE_STEP_VLI(encoder, property_length); ADD_ENCODE_STEP_OPTIONAL_CURSOR_PROPERTY( encoder, AWS_MQTT5_PROPERTY_TYPE_REASON_STRING, suback_view->reason_string); aws_mqtt5_add_user_property_encoding_steps(encoder, suback_view->user_properties, suback_view->user_property_count); for (size_t i = 0; i < suback_view->reason_code_count; ++i) { ADD_ENCODE_STEP_U8(encoder, suback_view->reason_codes[i]); } return AWS_OP_SUCCESS; } static int s_compute_unsuback_variable_length_fields( const struct aws_mqtt5_packet_unsuback_view *unsuback_view, uint32_t *total_remaining_length, uint32_t *property_length) { /* User Properties length */ size_t local_property_length = aws_mqtt5_compute_user_property_encode_length( unsuback_view->user_properties, unsuback_view->user_property_count); /* Optional Reason String */ ADD_OPTIONAL_CURSOR_PROPERTY_LENGTH(unsuback_view->reason_string, local_property_length); *property_length = (uint32_t)local_property_length; size_t local_total_remaining_length = 0; if (aws_mqtt5_get_variable_length_encode_size(local_property_length, &local_total_remaining_length)) { return AWS_OP_ERR; } /* Packet Identifier (2 bytes) */ local_total_remaining_length += 2; /* Reason Codes (1 byte each) */ local_total_remaining_length += unsuback_view->reason_code_count; /* Add property length */ *total_remaining_length = *property_length + (uint32_t)local_total_remaining_length; return AWS_OP_SUCCESS; } int aws_mqtt5_encoder_begin_unsuback(struct aws_mqtt5_encoder *encoder, const void *packet_view) { const struct aws_mqtt5_packet_unsuback_view *unsuback_view = packet_view; uint32_t total_remaining_length = 0; uint32_t property_length = 0; if (s_compute_unsuback_variable_length_fields(unsuback_view, &total_remaining_length, &property_length)) { int error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "(%p) mqtt5 client encoder - failed to compute variable length values for UNSUBACK packet with error " "%d(%s)", (void *)encoder->config.client, error_code, aws_error_debug_str(error_code)); return AWS_OP_ERR; } AWS_LOGF_DEBUG( AWS_LS_MQTT5_GENERAL, "(%p) mqtt5 client encoder - setting up encode for an UNSUBACK packet with remaining length %" PRIu32, (void *)encoder->config.client, total_remaining_length); ADD_ENCODE_STEP_U8(encoder, aws_mqtt5_compute_fixed_header_byte1(AWS_MQTT5_PT_UNSUBACK, 0)); ADD_ENCODE_STEP_VLI(encoder, total_remaining_length); ADD_ENCODE_STEP_U16(encoder, unsuback_view->packet_id); ADD_ENCODE_STEP_VLI(encoder, property_length); ADD_ENCODE_STEP_OPTIONAL_CURSOR_PROPERTY( encoder, AWS_MQTT5_PROPERTY_TYPE_REASON_STRING, unsuback_view->reason_string); aws_mqtt5_add_user_property_encoding_steps( encoder, unsuback_view->user_properties, unsuback_view->user_property_count); for (size_t i = 0; i < unsuback_view->reason_code_count; ++i) { ADD_ENCODE_STEP_U8(encoder, unsuback_view->reason_codes[i]); } return AWS_OP_SUCCESS; } static int s_compute_puback_variable_length_fields( const struct aws_mqtt5_packet_puback_view *puback_view, uint32_t *total_remaining_length, uint32_t *property_length) { /* User Properties length */ size_t local_property_length = aws_mqtt5_compute_user_property_encode_length(puback_view->user_properties, puback_view->user_property_count); /* Optional Reason String */ ADD_OPTIONAL_CURSOR_PROPERTY_LENGTH(puback_view->reason_string, local_property_length); *property_length = (uint32_t)local_property_length; size_t local_total_remaining_length = 0; if (aws_mqtt5_get_variable_length_encode_size(local_property_length, &local_total_remaining_length)) { return AWS_OP_ERR; } /* Packet Identifier (2 bytes) */ local_total_remaining_length += 2; /* Reason Code */ local_total_remaining_length += 1; /* Add property length */ *total_remaining_length = *property_length + (uint32_t)local_total_remaining_length; return AWS_OP_SUCCESS; } int aws_mqtt5_encoder_begin_puback(struct aws_mqtt5_encoder *encoder, const void *packet_view) { const struct aws_mqtt5_packet_puback_view *puback_view = packet_view; uint32_t total_remaining_length = 0; uint32_t property_length = 0; if (s_compute_puback_variable_length_fields(puback_view, &total_remaining_length, &property_length)) { int error_code = aws_last_error(); AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "(%p) mqtt5 client encoder - failed to compute variable length values for PUBACK packet with error " "%d(%s)", (void *)encoder->config.client, error_code, aws_error_debug_str(error_code)); return AWS_OP_ERR; } AWS_LOGF_DEBUG( AWS_LS_MQTT5_GENERAL, "(%p) mqtt5 client encoder - setting up encode for an PUBACK packet with remaining length %" PRIu32, (void *)encoder->config.client, total_remaining_length); ADD_ENCODE_STEP_U8(encoder, aws_mqtt5_compute_fixed_header_byte1(AWS_MQTT5_PT_PUBACK, 0)); ADD_ENCODE_STEP_VLI(encoder, total_remaining_length); ADD_ENCODE_STEP_U16(encoder, puback_view->packet_id); ADD_ENCODE_STEP_U8(encoder, puback_view->reason_code); ADD_ENCODE_STEP_VLI(encoder, property_length); ADD_ENCODE_STEP_OPTIONAL_CURSOR_PROPERTY( encoder, AWS_MQTT5_PROPERTY_TYPE_REASON_STRING, puback_view->reason_string); aws_mqtt5_add_user_property_encoding_steps(encoder, puback_view->user_properties, puback_view->user_property_count); return AWS_OP_SUCCESS; } void aws_mqtt5_encode_init_testing_function_table(struct aws_mqtt5_encoder_function_table *function_table) { *function_table = *g_aws_mqtt5_encoder_default_function_table; function_table->encoders_by_packet_type[AWS_MQTT5_PT_PINGRESP] = &aws_mqtt5_encoder_begin_pingresp; function_table->encoders_by_packet_type[AWS_MQTT5_PT_CONNACK] = &aws_mqtt5_encoder_begin_connack; function_table->encoders_by_packet_type[AWS_MQTT5_PT_SUBACK] = &aws_mqtt5_encoder_begin_suback; function_table->encoders_by_packet_type[AWS_MQTT5_PT_UNSUBACK] = &aws_mqtt5_encoder_begin_unsuback; function_table->encoders_by_packet_type[AWS_MQTT5_PT_PUBACK] = &aws_mqtt5_encoder_begin_puback; } static int s_aws_mqtt5_decoder_decode_pingreq(struct aws_mqtt5_decoder *decoder) { if (decoder->packet_cursor.len != 0) { goto error; } uint8_t expected_first_byte = aws_mqtt5_compute_fixed_header_byte1(AWS_MQTT5_PT_PINGREQ, 0); if (decoder->packet_first_byte != expected_first_byte || decoder->remaining_length != 0) { goto error; } if (decoder->options.on_packet_received != NULL) { (*decoder->options.on_packet_received)(AWS_MQTT5_PT_PINGREQ, NULL, decoder->options.callback_user_data); } return AWS_OP_SUCCESS; error: AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "(%p) aws_mqtt5_decoder - PINGREQ decode failure", decoder->options.callback_user_data); return aws_raise_error(AWS_ERROR_MQTT5_DECODE_PROTOCOL_ERROR); } /* decode function for all CONNECT properties. Movable to test-only code if we switched to a decoding function table */ static int s_read_connect_property( struct aws_mqtt5_packet_connect_storage *storage, struct aws_byte_cursor *packet_cursor) { int result = AWS_OP_ERR; uint8_t property_type = 0; AWS_MQTT5_DECODE_U8(packet_cursor, &property_type, done); struct aws_mqtt5_packet_connect_view *storage_view = &storage->storage_view; switch (property_type) { case AWS_MQTT5_PROPERTY_TYPE_SESSION_EXPIRY_INTERVAL: AWS_MQTT5_DECODE_U32_OPTIONAL( packet_cursor, &storage->session_expiry_interval_seconds, &storage_view->session_expiry_interval_seconds, done); break; case AWS_MQTT5_PROPERTY_TYPE_RECEIVE_MAXIMUM: AWS_MQTT5_DECODE_U16_OPTIONAL( packet_cursor, &storage->receive_maximum, &storage_view->receive_maximum, done); break; case AWS_MQTT5_PROPERTY_TYPE_MAXIMUM_PACKET_SIZE: AWS_MQTT5_DECODE_U32_OPTIONAL( packet_cursor, &storage->maximum_packet_size_bytes, &storage_view->maximum_packet_size_bytes, done); break; case AWS_MQTT5_PROPERTY_TYPE_TOPIC_ALIAS_MAXIMUM: AWS_MQTT5_DECODE_U16_OPTIONAL( packet_cursor, &storage->topic_alias_maximum, &storage_view->topic_alias_maximum, done); break; case AWS_MQTT5_PROPERTY_TYPE_REQUEST_RESPONSE_INFORMATION: AWS_MQTT5_DECODE_U8_OPTIONAL( packet_cursor, &storage->request_response_information, &storage_view->request_response_information, done); break; case AWS_MQTT5_PROPERTY_TYPE_REQUEST_PROBLEM_INFORMATION: AWS_MQTT5_DECODE_U8_OPTIONAL( packet_cursor, &storage->request_problem_information, &storage_view->request_problem_information, done); break; case AWS_MQTT5_PROPERTY_TYPE_AUTHENTICATION_METHOD: AWS_MQTT5_DECODE_LENGTH_PREFIXED_CURSOR_OPTIONAL( packet_cursor, &storage->authentication_method, &storage_view->authentication_method, done); break; case AWS_MQTT5_PROPERTY_TYPE_AUTHENTICATION_DATA: AWS_MQTT5_DECODE_LENGTH_PREFIXED_CURSOR_OPTIONAL( packet_cursor, &storage->authentication_data, &storage_view->authentication_data, done); break; case AWS_MQTT5_PROPERTY_TYPE_USER_PROPERTY: if (aws_mqtt5_decode_user_property(packet_cursor, &storage->user_properties)) { goto done; } break; default: goto done; } result = AWS_OP_SUCCESS; done: if (result != AWS_OP_SUCCESS) { aws_raise_error(AWS_ERROR_MQTT5_DECODE_PROTOCOL_ERROR); } return result; } /* decode function for all will properties. Movable to test-only code if we switched to a decoding function table */ static int s_read_will_property( struct aws_mqtt5_packet_connect_storage *connect_storage, struct aws_mqtt5_packet_publish_storage *will_storage, struct aws_byte_cursor *packet_cursor) { int result = AWS_OP_ERR; uint8_t property_type = 0; AWS_MQTT5_DECODE_U8(packet_cursor, &property_type, done); struct aws_mqtt5_packet_connect_view *connect_storage_view = &connect_storage->storage_view; struct aws_mqtt5_packet_publish_view *will_storage_view = &will_storage->storage_view; switch (property_type) { case AWS_MQTT5_PROPERTY_TYPE_WILL_DELAY_INTERVAL: AWS_MQTT5_DECODE_U32_OPTIONAL( packet_cursor, &connect_storage->will_delay_interval_seconds, &connect_storage_view->will_delay_interval_seconds, done); break; case AWS_MQTT5_PROPERTY_TYPE_PAYLOAD_FORMAT_INDICATOR: AWS_MQTT5_DECODE_U8_OPTIONAL( packet_cursor, &will_storage->payload_format, &will_storage_view->payload_format, done); break; case AWS_MQTT5_PROPERTY_TYPE_MESSAGE_EXPIRY_INTERVAL: AWS_MQTT5_DECODE_U32_OPTIONAL( packet_cursor, &will_storage->message_expiry_interval_seconds, &will_storage_view->message_expiry_interval_seconds, done); break; case AWS_MQTT5_PROPERTY_TYPE_CONTENT_TYPE: AWS_MQTT5_DECODE_LENGTH_PREFIXED_CURSOR_OPTIONAL( packet_cursor, &will_storage->content_type, &will_storage_view->content_type, done); break; case AWS_MQTT5_PROPERTY_TYPE_RESPONSE_TOPIC: AWS_MQTT5_DECODE_LENGTH_PREFIXED_CURSOR_OPTIONAL( packet_cursor, &will_storage->response_topic, &will_storage_view->response_topic, done); break; case AWS_MQTT5_PROPERTY_TYPE_CORRELATION_DATA: AWS_MQTT5_DECODE_LENGTH_PREFIXED_CURSOR_OPTIONAL( packet_cursor, &will_storage->correlation_data, &will_storage_view->correlation_data, done); break; case AWS_MQTT5_PROPERTY_TYPE_USER_PROPERTY: if (aws_mqtt5_decode_user_property(packet_cursor, &will_storage->user_properties)) { goto done; } break; default: goto done; } result = AWS_OP_SUCCESS; done: if (result != AWS_OP_SUCCESS) { aws_raise_error(AWS_ERROR_MQTT5_DECODE_PROTOCOL_ERROR); } return result; } /* * Decodes a CONNECT packet whose data must be in the scratch buffer. * Could be moved to test-only if we used a function table for per-packet-type decoding. */ static int s_aws_mqtt5_decoder_decode_connect(struct aws_mqtt5_decoder *decoder) { struct aws_mqtt5_packet_connect_storage connect_storage; struct aws_mqtt5_packet_publish_storage publish_storage; int result = AWS_OP_ERR; bool has_will = false; if (aws_mqtt5_packet_connect_storage_init_from_external_storage(&connect_storage, decoder->allocator)) { return AWS_OP_ERR; } if (aws_mqtt5_packet_publish_storage_init_from_external_storage(&publish_storage, decoder->allocator)) { goto done; } uint8_t first_byte = decoder->packet_first_byte; /* CONNECT flags must be zero by protocol */ if ((first_byte & 0x0F) != 0) { goto done; } struct aws_byte_cursor packet_cursor = decoder->packet_cursor; if (decoder->remaining_length != (uint32_t)packet_cursor.len) { goto done; } struct aws_byte_cursor protocol_cursor = aws_byte_cursor_advance(&packet_cursor, 7); if (!aws_byte_cursor_eq(&protocol_cursor, &g_aws_mqtt5_connect_protocol_cursor)) { goto done; } uint8_t connect_flags = 0; AWS_MQTT5_DECODE_U8(&packet_cursor, &connect_flags, done); struct aws_mqtt5_packet_connect_view *connect_storage_view = &connect_storage.storage_view; struct aws_mqtt5_packet_publish_view *will_storage_view = &publish_storage.storage_view; connect_storage_view->clean_start = (connect_flags & AWS_MQTT5_CONNECT_FLAGS_CLEAN_START_BIT) != 0; AWS_MQTT5_DECODE_U16(&packet_cursor, &connect_storage_view->keep_alive_interval_seconds, done); uint32_t connect_property_length = 0; AWS_MQTT5_DECODE_VLI(&packet_cursor, &connect_property_length, done); if (connect_property_length > packet_cursor.len) { goto done; } struct aws_byte_cursor connect_property_cursor = aws_byte_cursor_advance(&packet_cursor, connect_property_length); while (connect_property_cursor.len > 0) { if (s_read_connect_property(&connect_storage, &connect_property_cursor)) { goto done; } } connect_storage_view->user_property_count = aws_mqtt5_user_property_set_size(&connect_storage.user_properties); connect_storage_view->user_properties = connect_storage.user_properties.properties.data; AWS_MQTT5_DECODE_LENGTH_PREFIXED_CURSOR(&packet_cursor, &connect_storage_view->client_id, done); has_will = (connect_flags & AWS_MQTT5_CONNECT_FLAGS_WILL_BIT) != 0; if (has_will) { uint32_t will_property_length = 0; AWS_MQTT5_DECODE_VLI(&packet_cursor, &will_property_length, done); if (will_property_length > packet_cursor.len) { goto done; } struct aws_byte_cursor will_property_cursor = aws_byte_cursor_advance(&packet_cursor, will_property_length); while (will_property_cursor.len > 0) { if (s_read_will_property(&connect_storage, &publish_storage, &will_property_cursor)) { goto done; } } AWS_MQTT5_DECODE_LENGTH_PREFIXED_CURSOR(&packet_cursor, &will_storage_view->topic, done); AWS_MQTT5_DECODE_LENGTH_PREFIXED_CURSOR(&packet_cursor, &will_storage_view->payload, done); /* apply will flags from the connect flags to the will's storage */ will_storage_view->qos = (enum aws_mqtt5_qos)( (connect_flags >> AWS_MQTT5_CONNECT_FLAGS_WILL_QOS_BIT_POSITION) & AWS_MQTT5_CONNECT_FLAGS_WILL_QOS_BIT_MASK); will_storage_view->retain = (connect_flags & AWS_MQTT5_CONNECT_FLAGS_WILL_RETAIN_BIT) != 0; will_storage_view->user_property_count = aws_mqtt5_user_property_set_size(&publish_storage.user_properties); will_storage_view->user_properties = publish_storage.user_properties.properties.data; } if ((connect_flags & AWS_MQTT5_CONNECT_FLAGS_USER_NAME_BIT) != 0) { AWS_MQTT5_DECODE_LENGTH_PREFIXED_CURSOR_OPTIONAL( &packet_cursor, &connect_storage.username, &connect_storage_view->username, done); } if ((connect_flags & AWS_MQTT5_CONNECT_FLAGS_PASSWORD_BIT) != 0) { AWS_MQTT5_DECODE_LENGTH_PREFIXED_CURSOR_OPTIONAL( &packet_cursor, &connect_storage.password, &connect_storage_view->password, done); } if (packet_cursor.len == 0) { result = AWS_OP_SUCCESS; } done: if (result == AWS_OP_SUCCESS) { if (decoder->options.on_packet_received != NULL) { if (has_will) { connect_storage.storage_view.will = &publish_storage.storage_view; } result = (*decoder->options.on_packet_received)( AWS_MQTT5_PT_CONNECT, &connect_storage.storage_view, decoder->options.callback_user_data); } } else { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "(%p) aws_mqtt5_decoder - CONNECT decode failure", decoder->options.callback_user_data); aws_raise_error(AWS_ERROR_MQTT5_DECODE_PROTOCOL_ERROR); } aws_mqtt5_packet_publish_storage_clean_up(&publish_storage); aws_mqtt5_packet_connect_storage_clean_up(&connect_storage); return result; } /* decode function for subscribe properties. Movable to test-only code if we switched to a decoding function table */ static int s_read_subscribe_property( struct aws_mqtt5_packet_subscribe_storage *subscribe_storage, struct aws_byte_cursor *packet_cursor) { int result = AWS_OP_ERR; uint8_t property_type = 0; AWS_MQTT5_DECODE_U8(packet_cursor, &property_type, done); struct aws_mqtt5_packet_subscribe_view *storage_view = &subscribe_storage->storage_view; switch (property_type) { case AWS_MQTT5_PROPERTY_TYPE_SUBSCRIPTION_IDENTIFIER: AWS_MQTT5_DECODE_VLI(packet_cursor, &subscribe_storage->subscription_identifier, done); storage_view->subscription_identifier = &subscribe_storage->subscription_identifier; break; case AWS_MQTT5_PROPERTY_TYPE_USER_PROPERTY: if (aws_mqtt5_decode_user_property(packet_cursor, &subscribe_storage->user_properties)) { goto done; } break; default: goto done; } result = AWS_OP_SUCCESS; done: if (result != AWS_OP_SUCCESS) { aws_raise_error(AWS_ERROR_MQTT5_DECODE_PROTOCOL_ERROR); } return result; } /* * Decodes a SUBSCRIBE packet whose data must be in the scratch buffer. */ static int s_aws_mqtt5_decoder_decode_subscribe(struct aws_mqtt5_decoder *decoder) { struct aws_mqtt5_packet_subscribe_storage subscribe_storage; int result = AWS_OP_ERR; if (aws_mqtt5_packet_subscribe_storage_init_from_external_storage(&subscribe_storage, decoder->allocator)) { goto done; } struct aws_mqtt5_packet_subscribe_view *storage_view = &subscribe_storage.storage_view; /* SUBSCRIBE flags must be 2 by protocol*/ uint8_t first_byte = decoder->packet_first_byte; if ((first_byte & 0x0F) != 2) { goto done; } struct aws_byte_cursor packet_cursor = decoder->packet_cursor; if (decoder->remaining_length != (uint32_t)packet_cursor.len) { goto done; } uint16_t packet_id = 0; AWS_MQTT5_DECODE_U16(&packet_cursor, &packet_id, done); subscribe_storage.storage_view.packet_id = packet_id; uint32_t property_length = 0; AWS_MQTT5_DECODE_VLI(&packet_cursor, &property_length, done); if (property_length > packet_cursor.len) { goto done; } struct aws_byte_cursor property_cursor = aws_byte_cursor_advance(&packet_cursor, property_length); while (property_cursor.len > 0) { if (s_read_subscribe_property(&subscribe_storage, &property_cursor)) { goto done; } } while (packet_cursor.len > 0) { struct aws_mqtt5_subscription_view subscription_view; AWS_MQTT5_DECODE_LENGTH_PREFIXED_CURSOR(&packet_cursor, &subscription_view.topic_filter, done); uint8_t subscription_options = 0; AWS_MQTT5_DECODE_U8(&packet_cursor, &subscription_options, done); subscription_view.no_local = (subscription_options & AWS_MQTT5_SUBSCRIBE_FLAGS_NO_LOCAL) != 0; subscription_view.retain_as_published = (subscription_options & AWS_MQTT5_SUBSCRIBE_FLAGS_RETAIN_AS_PUBLISHED) != 0; subscription_view.qos = (enum aws_mqtt5_qos)( (subscription_options >> AWS_MQTT5_SUBSCRIBE_FLAGS_QOS_BIT_POSITION) & AWS_MQTT5_SUBSCRIBE_FLAGS_QOS_BIT_MASK); subscription_view.retain_handling_type = (enum aws_mqtt5_retain_handling_type)( (subscription_options >> AWS_MQTT5_SUBSCRIBE_FLAGS_RETAIN_HANDLING_TYPE_BIT_POSITION) & AWS_MQTT5_SUBSCRIBE_FLAGS_RETAIN_HANDLING_TYPE_BIT_MASK); if (aws_array_list_push_back(&subscribe_storage.subscriptions, &subscription_view)) { goto done; } } storage_view->subscription_count = aws_array_list_length(&subscribe_storage.subscriptions); storage_view->subscriptions = subscribe_storage.subscriptions.data; storage_view->user_property_count = aws_mqtt5_user_property_set_size(&subscribe_storage.user_properties); storage_view->user_properties = subscribe_storage.user_properties.properties.data; if (packet_cursor.len == 0) { result = AWS_OP_SUCCESS; } done: if (result == AWS_OP_SUCCESS) { if (decoder->options.on_packet_received != NULL) { result = (*decoder->options.on_packet_received)( AWS_MQTT5_PT_SUBSCRIBE, &subscribe_storage.storage_view, decoder->options.callback_user_data); } } else { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "(%p) aws_mqtt5_decoder - SUBSCRIBE decode failure", decoder->options.callback_user_data); aws_raise_error(AWS_ERROR_MQTT5_DECODE_PROTOCOL_ERROR); } aws_mqtt5_packet_subscribe_storage_clean_up(&subscribe_storage); return result; } /* decode function for unsubscribe properties. Movable to test-only code if we switched to a decoding function table */ static int s_read_unsubscribe_property( struct aws_mqtt5_packet_unsubscribe_storage *unsubscribe_storage, struct aws_byte_cursor *packet_cursor) { int result = AWS_OP_ERR; uint8_t property_type = 0; AWS_MQTT5_DECODE_U8(packet_cursor, &property_type, done); switch (property_type) { case AWS_MQTT5_PROPERTY_TYPE_USER_PROPERTY: if (aws_mqtt5_decode_user_property(packet_cursor, &unsubscribe_storage->user_properties)) { goto done; } break; default: goto done; } result = AWS_OP_SUCCESS; done: if (result != AWS_OP_SUCCESS) { aws_raise_error(AWS_ERROR_MQTT5_DECODE_PROTOCOL_ERROR); } return result; } /* * Decodes an UNSUBSCRIBE packet whose data must be in the scratch buffer. */ static int s_aws_mqtt5_decoder_decode_unsubscribe(struct aws_mqtt5_decoder *decoder) { struct aws_mqtt5_packet_unsubscribe_storage unsubscribe_storage; int result = AWS_OP_ERR; if (aws_mqtt5_packet_unsubscribe_storage_init_from_external_storage(&unsubscribe_storage, decoder->allocator)) { goto done; } struct aws_mqtt5_packet_unsubscribe_view *storage_view = &unsubscribe_storage.storage_view; /* UNSUBSCRIBE flags must be 2 by protocol*/ uint8_t first_byte = decoder->packet_first_byte; if ((first_byte & 0x0F) != 2) { goto done; } struct aws_byte_cursor packet_cursor = decoder->packet_cursor; if (decoder->remaining_length != (uint32_t)packet_cursor.len) { goto done; } uint16_t packet_id = 0; AWS_MQTT5_DECODE_U16(&packet_cursor, &packet_id, done); unsubscribe_storage.storage_view.packet_id = packet_id; uint32_t property_length = 0; AWS_MQTT5_DECODE_VLI(&packet_cursor, &property_length, done); if (property_length > packet_cursor.len) { goto done; } struct aws_byte_cursor property_cursor = aws_byte_cursor_advance(&packet_cursor, property_length); while (property_cursor.len > 0) { if (s_read_unsubscribe_property(&unsubscribe_storage, &property_cursor)) { goto done; } } while (packet_cursor.len > 0) { struct aws_byte_cursor topic; AWS_MQTT5_DECODE_LENGTH_PREFIXED_CURSOR(&packet_cursor, &topic, done); if (aws_array_list_push_back(&unsubscribe_storage.topic_filters, &topic)) { goto done; } } storage_view->topic_filter_count = aws_array_list_length(&unsubscribe_storage.topic_filters); storage_view->topic_filters = unsubscribe_storage.topic_filters.data; storage_view->user_property_count = aws_mqtt5_user_property_set_size(&unsubscribe_storage.user_properties); storage_view->user_properties = unsubscribe_storage.user_properties.properties.data; result = AWS_OP_SUCCESS; done: if (result == AWS_OP_SUCCESS) { if (decoder->options.on_packet_received != NULL) { result = (*decoder->options.on_packet_received)( AWS_MQTT5_PT_UNSUBSCRIBE, &unsubscribe_storage.storage_view, decoder->options.callback_user_data); } } else { AWS_LOGF_ERROR( AWS_LS_MQTT5_GENERAL, "(%p) aws_mqtt5_decoder - UNSUBSCRIBE decode failure", decoder->options.callback_user_data); aws_raise_error(AWS_ERROR_MQTT5_DECODE_PROTOCOL_ERROR); } aws_mqtt5_packet_unsubscribe_storage_clean_up(&unsubscribe_storage); return result; } void aws_mqtt5_decode_init_testing_function_table(struct aws_mqtt5_decoder_function_table *function_table) { *function_table = *g_aws_mqtt5_default_decoder_table; function_table->decoders_by_packet_type[AWS_MQTT5_PT_PINGREQ] = &s_aws_mqtt5_decoder_decode_pingreq; function_table->decoders_by_packet_type[AWS_MQTT5_PT_CONNECT] = &s_aws_mqtt5_decoder_decode_connect; function_table->decoders_by_packet_type[AWS_MQTT5_PT_SUBSCRIBE] = &s_aws_mqtt5_decoder_decode_subscribe; function_table->decoders_by_packet_type[AWS_MQTT5_PT_UNSUBSCRIBE] = &s_aws_mqtt5_decoder_decode_unsubscribe; } static int s_aws_mqtt5_mock_test_fixture_on_packet_received_fn( enum aws_mqtt5_packet_type type, void *packet_view, void *decoder_callback_user_data) { struct aws_mqtt5_mock_server_packet_record packet_record; AWS_ZERO_STRUCT(packet_record); struct aws_mqtt5_server_mock_connection_context *server_connection = decoder_callback_user_data; struct aws_mqtt5_client_mock_test_fixture *test_fixture = server_connection->test_fixture; switch (type) { case AWS_MQTT5_PT_CONNECT: packet_record.packet_storage = aws_mem_calloc(test_fixture->allocator, 1, sizeof(struct aws_mqtt5_packet_connect_storage)); aws_mqtt5_packet_connect_storage_init(packet_record.packet_storage, test_fixture->allocator, packet_view); break; case AWS_MQTT5_PT_DISCONNECT: packet_record.packet_storage = aws_mem_calloc(test_fixture->allocator, 1, sizeof(struct aws_mqtt5_packet_disconnect_storage)); aws_mqtt5_packet_disconnect_storage_init( packet_record.packet_storage, test_fixture->allocator, packet_view); break; case AWS_MQTT5_PT_SUBSCRIBE: packet_record.packet_storage = aws_mem_calloc(test_fixture->allocator, 1, sizeof(struct aws_mqtt5_packet_subscribe_storage)); aws_mqtt5_packet_subscribe_storage_init(packet_record.packet_storage, test_fixture->allocator, packet_view); break; case AWS_MQTT5_PT_UNSUBSCRIBE: packet_record.packet_storage = aws_mem_calloc(test_fixture->allocator, 1, sizeof(struct aws_mqtt5_packet_unsubscribe_storage)); aws_mqtt5_packet_unsubscribe_storage_init( packet_record.packet_storage, test_fixture->allocator, packet_view); break; case AWS_MQTT5_PT_PUBLISH: packet_record.packet_storage = aws_mem_calloc(test_fixture->allocator, 1, sizeof(struct aws_mqtt5_packet_publish_storage)); aws_mqtt5_packet_publish_storage_init(packet_record.packet_storage, test_fixture->allocator, packet_view); break; case AWS_MQTT5_PT_PUBACK: packet_record.packet_storage = aws_mem_calloc(test_fixture->allocator, 1, sizeof(struct aws_mqtt5_packet_puback_storage)); aws_mqtt5_packet_puback_storage_init(packet_record.packet_storage, test_fixture->allocator, packet_view); break; default: break; } packet_record.storage_allocator = test_fixture->allocator; packet_record.timestamp = (*test_fixture->client_vtable.get_current_time_fn)(); packet_record.packet_type = type; aws_mutex_lock(&test_fixture->lock); AWS_LOGF_DEBUG( AWS_LS_MQTT5_GENERAL, "mqtt5 mock server received packet of type %s", aws_mqtt5_packet_type_to_c_string(type)); aws_array_list_push_back(&test_fixture->server_received_packets, &packet_record); aws_mutex_unlock(&test_fixture->lock); aws_condition_variable_notify_all(&test_fixture->signal); int result = AWS_OP_SUCCESS; aws_mqtt5_on_mock_server_packet_received_fn *packet_handler = test_fixture->server_function_table->packet_handlers[type]; if (packet_handler != NULL) { result = (*packet_handler)(packet_view, server_connection, server_connection->test_fixture->mock_server_user_data); } return result; } static int s_process_read_message( struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_io_message *message) { struct aws_mqtt5_server_mock_connection_context *server_connection = handler->impl; if (message->message_type != AWS_IO_MESSAGE_APPLICATION_DATA) { return AWS_OP_ERR; } struct aws_byte_cursor message_cursor = aws_byte_cursor_from_buf(&message->message_data); int result = aws_mqtt5_decoder_on_data_received(&server_connection->decoder, message_cursor); if (result != AWS_OP_SUCCESS) { aws_channel_shutdown(server_connection->channel, aws_last_error()); goto done; } aws_channel_slot_increment_read_window(slot, message->message_data.len); done: aws_mem_release(message->allocator, message); return AWS_OP_SUCCESS; } static int s_shutdown( struct aws_channel_handler *handler, struct aws_channel_slot *slot, enum aws_channel_direction dir, int error_code, bool free_scarce_resources_immediately) { (void)handler; return aws_channel_slot_on_handler_shutdown_complete(slot, dir, error_code, free_scarce_resources_immediately); } static size_t s_initial_window_size(struct aws_channel_handler *handler) { (void)handler; return SIZE_MAX; } static void s_destroy(struct aws_channel_handler *handler) { struct aws_mqtt5_server_mock_connection_context *server_connection = handler->impl; aws_event_loop_cancel_task( aws_channel_get_event_loop(server_connection->channel), &server_connection->service_task); aws_mqtt5_decoder_clean_up(&server_connection->decoder); aws_mqtt5_encoder_clean_up(&server_connection->encoder); aws_mqtt5_inbound_topic_alias_resolver_clean_up(&server_connection->inbound_alias_resolver); aws_mem_release(server_connection->allocator, server_connection); } static size_t s_message_overhead(struct aws_channel_handler *handler) { (void)handler; return 0; } static struct aws_channel_handler_vtable s_mqtt5_mock_server_channel_handler_vtable = { .process_read_message = &s_process_read_message, .process_write_message = NULL, .increment_read_window = NULL, .shutdown = &s_shutdown, .initial_window_size = &s_initial_window_size, .message_overhead = &s_message_overhead, .destroy = &s_destroy, }; static void s_mock_server_service_task_fn(struct aws_task *task, void *arg, enum aws_task_status status) { if (status != AWS_TASK_STATUS_RUN_READY) { return; } struct aws_mqtt5_server_mock_connection_context *server_connection = arg; aws_mqtt5_mock_server_service_fn *service_fn = server_connection->test_fixture->server_function_table->service_task_fn; if (service_fn != NULL) { (*service_fn)(server_connection, server_connection->test_fixture->mock_server_user_data); } uint64_t now = 0; aws_high_res_clock_get_ticks(&now); uint64_t next_service_time = now + aws_timestamp_convert(1, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL); aws_event_loop_schedule_task_future( aws_channel_get_event_loop(server_connection->channel), task, next_service_time); } static void s_on_incoming_channel_setup_fn( struct aws_server_bootstrap *bootstrap, int error_code, struct aws_channel *channel, void *user_data) { (void)bootstrap; struct aws_mqtt5_client_mock_test_fixture *test_fixture = user_data; if (!error_code) { struct aws_channel_slot *test_handler_slot = aws_channel_slot_new(channel); aws_channel_slot_insert_end(channel, test_handler_slot); struct aws_mqtt5_server_mock_connection_context *server_connection = aws_mem_calloc(test_fixture->allocator, 1, sizeof(struct aws_mqtt5_server_mock_connection_context)); server_connection->allocator = test_fixture->allocator; server_connection->channel = channel; server_connection->test_fixture = test_fixture; server_connection->slot = test_handler_slot; server_connection->handler.alloc = server_connection->allocator; server_connection->handler.vtable = &s_mqtt5_mock_server_channel_handler_vtable; server_connection->handler.impl = server_connection; aws_task_init( &server_connection->service_task, s_mock_server_service_task_fn, server_connection, "mock_server_service_task_fn"); aws_event_loop_schedule_task_now(aws_channel_get_event_loop(channel), &server_connection->service_task); aws_channel_slot_set_handler(server_connection->slot, &server_connection->handler); aws_mqtt5_encode_init_testing_function_table(&server_connection->encoding_table); struct aws_mqtt5_encoder_options encoder_options = { .client = NULL, .encoders = &server_connection->encoding_table, }; aws_mqtt5_encoder_init(&server_connection->encoder, server_connection->allocator, &encoder_options); aws_mqtt5_decode_init_testing_function_table(&server_connection->decoding_table); struct aws_mqtt5_decoder_options decoder_options = { .callback_user_data = server_connection, .on_packet_received = s_aws_mqtt5_mock_test_fixture_on_packet_received_fn, .decoder_table = &server_connection->decoding_table, }; aws_mqtt5_decoder_init(&server_connection->decoder, server_connection->allocator, &decoder_options); aws_mqtt5_inbound_topic_alias_resolver_init( &server_connection->inbound_alias_resolver, server_connection->allocator); aws_mqtt5_inbound_topic_alias_resolver_reset( &server_connection->inbound_alias_resolver, test_fixture->maximum_inbound_topic_aliases); aws_mqtt5_decoder_set_inbound_topic_alias_resolver( &server_connection->decoder, &server_connection->inbound_alias_resolver); aws_mutex_lock(&test_fixture->lock); test_fixture->server_channel = channel; aws_mutex_unlock(&test_fixture->lock); /* * Just like the tls tests in aws-c-io, it's possible for the server channel setup to execute after the client * channel setup has already posted data to the socket. In this case, the read notification gets lost because * the server hasn't subscribed to it yet and then we hang and time out. So do the same thing we do for * tls server channel setup and force a read of the socket after we're fully initialized. */ aws_channel_trigger_read(channel); } } static void s_on_incoming_channel_shutdown_fn( struct aws_server_bootstrap *bootstrap, int error_code, struct aws_channel *channel, void *user_data) { (void)bootstrap; (void)error_code; (void)channel; (void)user_data; } static void s_on_listener_destroy(struct aws_server_bootstrap *bootstrap, void *user_data) { (void)bootstrap; struct aws_mqtt5_client_mock_test_fixture *test_fixture = user_data; aws_mutex_lock(&test_fixture->lock); test_fixture->listener_destroyed = true; aws_mutex_unlock(&test_fixture->lock); aws_condition_variable_notify_one(&test_fixture->signal); } static bool s_is_listener_destroyed(void *arg) { struct aws_mqtt5_client_mock_test_fixture *test_fixture = arg; return test_fixture->listener_destroyed; } static void s_wait_on_listener_cleanup(struct aws_mqtt5_client_mock_test_fixture *test_fixture) { aws_mutex_lock(&test_fixture->lock); aws_condition_variable_wait_pred(&test_fixture->signal, &test_fixture->lock, s_is_listener_destroyed, test_fixture); aws_mutex_unlock(&test_fixture->lock); } static bool s_has_client_terminated(void *arg) { struct aws_mqtt5_client_mock_test_fixture *test_fixture = arg; return test_fixture->client_terminated; } static void s_wait_for_client_terminated(struct aws_mqtt5_client_mock_test_fixture *test_context) { aws_mutex_lock(&test_context->lock); aws_condition_variable_wait_pred(&test_context->signal, &test_context->lock, s_has_client_terminated, test_context); aws_mutex_unlock(&test_context->lock); } void s_aws_mqtt5_test_fixture_lifecycle_event_handler(const struct aws_mqtt5_client_lifecycle_event *event) { struct aws_mqtt5_client_mock_test_fixture *test_fixture = event->user_data; struct aws_mqtt5_lifecycle_event_record *record = aws_mem_calloc(test_fixture->allocator, 1, sizeof(struct aws_mqtt5_lifecycle_event_record)); record->allocator = test_fixture->allocator; aws_high_res_clock_get_ticks(&record->timestamp); record->event = *event; if (event->settings != NULL) { record->settings_storage = *event->settings; record->event.settings = &record->settings_storage; } if (event->disconnect_data != NULL) { aws_mqtt5_packet_disconnect_storage_init( &record->disconnect_storage, record->allocator, event->disconnect_data); record->event.disconnect_data = &record->disconnect_storage.storage_view; } if (event->connack_data != NULL) { aws_mqtt5_packet_connack_storage_init(&record->connack_storage, record->allocator, event->connack_data); record->event.connack_data = &record->connack_storage.storage_view; } aws_mutex_lock(&test_fixture->lock); AWS_LOGF_DEBUG( AWS_LS_MQTT5_GENERAL, "mqtt5 mock server received lifecycle event of type %s", aws_mqtt5_client_lifecycle_event_type_to_c_string(event->event_type)); aws_array_list_push_back(&test_fixture->lifecycle_events, &record); aws_mutex_unlock(&test_fixture->lock); aws_condition_variable_notify_all(&test_fixture->signal); aws_mqtt5_client_connection_event_callback_fn *event_callback = test_fixture->original_lifecycle_event_handler; if (event_callback != NULL) { struct aws_mqtt5_client_lifecycle_event event_copy = *event; event_copy.user_data = test_fixture->original_lifecycle_event_handler_user_data; (*event_callback)(&event_copy); } } void s_aws_mqtt5_test_fixture_state_changed_callback( struct aws_mqtt5_client *client, enum aws_mqtt5_client_state old_state, enum aws_mqtt5_client_state new_state, void *vtable_user_data) { (void)old_state; (void)client; struct aws_mqtt5_client_mock_test_fixture *test_fixture = vtable_user_data; aws_mutex_lock(&test_fixture->lock); AWS_LOGF_DEBUG( AWS_LS_MQTT5_GENERAL, "mqtt5 mock server received client state change to %s", aws_mqtt5_client_state_to_c_string(new_state)); aws_array_list_push_back(&test_fixture->client_states, &new_state); aws_mutex_unlock(&test_fixture->lock); } static void s_aws_mqtt5_test_fixture_statistics_changed_callback( struct aws_mqtt5_client *client, struct aws_mqtt5_operation *operation, void *vtable_user_data) { (void)operation; struct aws_mqtt5_client_mock_test_fixture *test_fixture = vtable_user_data; struct aws_mqtt5_client_operation_statistics stats; AWS_ZERO_STRUCT(stats); aws_mutex_lock(&test_fixture->lock); aws_mqtt5_client_get_stats(client, &stats); aws_array_list_push_back(&test_fixture->client_statistics, &stats); aws_mutex_unlock(&test_fixture->lock); } static void s_on_test_client_termination(void *user_data) { struct aws_mqtt5_client_mock_test_fixture *test_fixture = user_data; aws_mutex_lock(&test_fixture->lock); test_fixture->client_terminated = true; aws_mutex_unlock(&test_fixture->lock); aws_condition_variable_notify_all(&test_fixture->signal); } int aws_mqtt5_client_mock_test_fixture_init( struct aws_mqtt5_client_mock_test_fixture *test_fixture, struct aws_allocator *allocator, struct aws_mqtt5_client_mqtt5_mock_test_fixture_options *options) { AWS_ZERO_STRUCT(*test_fixture); test_fixture->allocator = allocator; aws_mutex_init(&test_fixture->lock); aws_condition_variable_init(&test_fixture->signal); test_fixture->server_function_table = options->server_function_table; test_fixture->mock_server_user_data = options->mock_server_user_data; struct aws_socket_options socket_options = { .connect_timeout_ms = 1000, .domain = AWS_SOCKET_LOCAL, }; test_fixture->socket_options = socket_options; test_fixture->server_elg = aws_event_loop_group_new_default(allocator, 1, NULL); test_fixture->server_bootstrap = aws_server_bootstrap_new(allocator, test_fixture->server_elg); test_fixture->client_elg = aws_event_loop_group_new_default(allocator, 4, NULL); struct aws_host_resolver_default_options resolver_options = { .el_group = test_fixture->client_elg, .max_entries = 1, }; test_fixture->host_resolver = aws_host_resolver_new_default(allocator, &resolver_options); struct aws_client_bootstrap_options bootstrap_options = { .event_loop_group = test_fixture->client_elg, .user_data = test_fixture, .host_resolver = test_fixture->host_resolver, }; test_fixture->client_bootstrap = aws_client_bootstrap_new(allocator, &bootstrap_options); aws_socket_endpoint_init_local_address_for_test(&test_fixture->endpoint); struct aws_server_socket_channel_bootstrap_options server_bootstrap_options = { .bootstrap = test_fixture->server_bootstrap, .host_name = test_fixture->endpoint.address, .port = test_fixture->endpoint.port, .socket_options = &test_fixture->socket_options, .incoming_callback = s_on_incoming_channel_setup_fn, .shutdown_callback = s_on_incoming_channel_shutdown_fn, .destroy_callback = s_on_listener_destroy, .user_data = test_fixture, }; test_fixture->listener = aws_server_bootstrap_new_socket_listener(&server_bootstrap_options); test_fixture->original_lifecycle_event_handler = options->client_options->lifecycle_event_handler; test_fixture->original_lifecycle_event_handler_user_data = options->client_options->lifecycle_event_handler_user_data; options->client_options->lifecycle_event_handler = &s_aws_mqtt5_test_fixture_lifecycle_event_handler; options->client_options->lifecycle_event_handler_user_data = test_fixture; options->client_options->host_name = aws_byte_cursor_from_c_str(test_fixture->endpoint.address); options->client_options->port = test_fixture->endpoint.port; options->client_options->socket_options = &test_fixture->socket_options; options->client_options->bootstrap = test_fixture->client_bootstrap; options->client_options->client_termination_handler = s_on_test_client_termination; options->client_options->client_termination_handler_user_data = test_fixture; test_fixture->client = aws_mqtt5_client_new(allocator, options->client_options); test_fixture->client_vtable = *aws_mqtt5_client_get_default_vtable(); test_fixture->client_vtable.on_client_state_change_callback_fn = s_aws_mqtt5_test_fixture_state_changed_callback; test_fixture->client_vtable.on_client_statistics_changed_callback_fn = s_aws_mqtt5_test_fixture_statistics_changed_callback; test_fixture->client_vtable.vtable_user_data = test_fixture; aws_mqtt5_client_set_vtable(test_fixture->client, &test_fixture->client_vtable); aws_array_list_init_dynamic( &test_fixture->server_received_packets, allocator, 10, sizeof(struct aws_mqtt5_mock_server_packet_record)); aws_array_list_init_dynamic( &test_fixture->lifecycle_events, allocator, 10, sizeof(struct aws_mqtt5_lifecycle_event_record *)); aws_array_list_init_dynamic(&test_fixture->client_states, allocator, 10, sizeof(enum aws_mqtt5_client_state)); aws_array_list_init_dynamic( &test_fixture->client_statistics, allocator, 10, sizeof(struct aws_mqtt5_client_operation_statistics)); return AWS_OP_SUCCESS; } static void s_destroy_packet_storage( void *packet_storage, struct aws_allocator *allocator, enum aws_mqtt5_packet_type packet_type) { switch (packet_type) { case AWS_MQTT5_PT_CONNECT: aws_mqtt5_packet_connect_storage_clean_up(packet_storage); break; case AWS_MQTT5_PT_DISCONNECT: aws_mqtt5_packet_disconnect_storage_clean_up(packet_storage); break; case AWS_MQTT5_PT_SUBSCRIBE: aws_mqtt5_packet_subscribe_storage_clean_up(packet_storage); break; case AWS_MQTT5_PT_SUBACK: aws_mqtt5_packet_suback_storage_clean_up(packet_storage); break; case AWS_MQTT5_PT_UNSUBSCRIBE: aws_mqtt5_packet_unsubscribe_storage_clean_up(packet_storage); break; case AWS_MQTT5_PT_UNSUBACK: aws_mqtt5_packet_unsuback_storage_clean_up(packet_storage); break; case AWS_MQTT5_PT_PUBLISH: aws_mqtt5_packet_publish_storage_clean_up(packet_storage); break; case AWS_MQTT5_PT_PUBACK: /* TODO */ break; case AWS_MQTT5_PT_PINGREQ: AWS_FATAL_ASSERT(packet_storage == NULL); break; default: AWS_FATAL_ASSERT(false); } if (packet_storage != NULL) { aws_mem_release(allocator, packet_storage); } } static void s_destroy_lifecycle_event_storage(struct aws_mqtt5_lifecycle_event_record *event) { aws_mqtt5_packet_connack_storage_clean_up(&event->connack_storage); aws_mqtt5_packet_disconnect_storage_clean_up(&event->disconnect_storage); aws_mem_release(event->allocator, event); } void aws_mqtt5_client_mock_test_fixture_clean_up(struct aws_mqtt5_client_mock_test_fixture *test_fixture) { aws_mqtt5_client_release(test_fixture->client); s_wait_for_client_terminated(test_fixture); aws_client_bootstrap_release(test_fixture->client_bootstrap); aws_host_resolver_release(test_fixture->host_resolver); aws_server_bootstrap_destroy_socket_listener(test_fixture->server_bootstrap, test_fixture->listener); s_wait_on_listener_cleanup(test_fixture); aws_server_bootstrap_release(test_fixture->server_bootstrap); aws_event_loop_group_release(test_fixture->server_elg); aws_event_loop_group_release(test_fixture->client_elg); aws_thread_join_all_managed(); for (size_t i = 0; i < aws_array_list_length(&test_fixture->server_received_packets); ++i) { struct aws_mqtt5_mock_server_packet_record *packet = NULL; aws_array_list_get_at_ptr(&test_fixture->server_received_packets, (void **)&packet, i); s_destroy_packet_storage(packet->packet_storage, packet->storage_allocator, packet->packet_type); } aws_array_list_clean_up(&test_fixture->server_received_packets); for (size_t i = 0; i < aws_array_list_length(&test_fixture->lifecycle_events); ++i) { struct aws_mqtt5_lifecycle_event_record *event = NULL; aws_array_list_get_at(&test_fixture->lifecycle_events, &event, i); s_destroy_lifecycle_event_storage(event); } aws_array_list_clean_up(&test_fixture->lifecycle_events); aws_array_list_clean_up(&test_fixture->client_states); aws_array_list_clean_up(&test_fixture->client_statistics); aws_mutex_clean_up(&test_fixture->lock); aws_condition_variable_clean_up(&test_fixture->signal); } #define AWS_MQTT5_CLIENT_TEST_CHECK_INT_EQUALS(lhs, rhs) \ if ((lhs) != (rhs)) { \ return false; \ } #define AWS_MQTT5_CLIENT_TEST_CHECK_OPTIONAL_INT_EQUALS(lhs, rhs) \ if (((lhs) != NULL) != ((rhs) != NULL)) { \ return false; \ } \ if (((lhs) != NULL) && ((rhs) != NULL) && (*(lhs) != *(rhs))) { \ return false; \ } #define AWS_MQTT5_CLIENT_TEST_CHECK_CURSOR_EQUALS(lhs, rhs) \ if (!aws_byte_cursor_eq(&(lhs), &(rhs))) { \ return false; \ } #define AWS_MQTT5_CLIENT_TEST_CHECK_OPTIONAL_CURSOR_EQUALS(lhs, rhs) \ if (((lhs) != NULL) != ((rhs) != NULL)) { \ return false; \ } \ if (((lhs) != NULL) && ((rhs) != NULL) && (!aws_byte_cursor_eq(lhs, rhs))) { \ return false; \ } #define AWS_MQTT5_CLIENT_TEST_CHECK_USER_PROPERTIES(lhs_props, lhs_prop_count, rhs_props, rhs_prop_count) \ if (aws_mqtt5_test_verify_user_properties_raw((lhs_prop_count), (lhs_props), (rhs_prop_count), (rhs_props)) != \ AWS_OP_SUCCESS) { \ return false; \ } static bool s_aws_publish_packets_equal( const struct aws_mqtt5_packet_publish_view *lhs, const struct aws_mqtt5_packet_publish_view *rhs) { // Don't check packet id intentionally AWS_MQTT5_CLIENT_TEST_CHECK_CURSOR_EQUALS(lhs->payload, rhs->payload); AWS_MQTT5_CLIENT_TEST_CHECK_INT_EQUALS(lhs->qos, rhs->qos); AWS_MQTT5_CLIENT_TEST_CHECK_INT_EQUALS(lhs->duplicate, rhs->duplicate); AWS_MQTT5_CLIENT_TEST_CHECK_INT_EQUALS(lhs->retain, rhs->retain); AWS_MQTT5_CLIENT_TEST_CHECK_CURSOR_EQUALS(lhs->topic, rhs->topic); AWS_MQTT5_CLIENT_TEST_CHECK_OPTIONAL_INT_EQUALS(lhs->payload_format, rhs->payload_format); AWS_MQTT5_CLIENT_TEST_CHECK_OPTIONAL_INT_EQUALS( lhs->message_expiry_interval_seconds, rhs->message_expiry_interval_seconds); AWS_MQTT5_CLIENT_TEST_CHECK_OPTIONAL_INT_EQUALS(lhs->topic_alias, rhs->topic_alias); AWS_MQTT5_CLIENT_TEST_CHECK_OPTIONAL_CURSOR_EQUALS(lhs->response_topic, rhs->response_topic); AWS_MQTT5_CLIENT_TEST_CHECK_OPTIONAL_CURSOR_EQUALS(lhs->correlation_data, rhs->correlation_data); AWS_MQTT5_CLIENT_TEST_CHECK_INT_EQUALS(lhs->subscription_identifier_count, rhs->subscription_identifier_count); for (size_t i = 0; i < lhs->subscription_identifier_count; ++i) { AWS_MQTT5_CLIENT_TEST_CHECK_INT_EQUALS(lhs->subscription_identifiers[i], rhs->subscription_identifiers[i]); } AWS_MQTT5_CLIENT_TEST_CHECK_USER_PROPERTIES( lhs->user_properties, lhs->user_property_count, rhs->user_properties, rhs->user_property_count); return true; } static bool s_aws_connect_packets_equal( const struct aws_mqtt5_packet_connect_view *lhs, const struct aws_mqtt5_packet_connect_view *rhs) { AWS_MQTT5_CLIENT_TEST_CHECK_INT_EQUALS(lhs->keep_alive_interval_seconds, rhs->keep_alive_interval_seconds); AWS_MQTT5_CLIENT_TEST_CHECK_CURSOR_EQUALS(lhs->client_id, rhs->client_id); AWS_MQTT5_CLIENT_TEST_CHECK_OPTIONAL_CURSOR_EQUALS(lhs->username, rhs->username); AWS_MQTT5_CLIENT_TEST_CHECK_OPTIONAL_CURSOR_EQUALS(lhs->password, rhs->password); AWS_MQTT5_CLIENT_TEST_CHECK_INT_EQUALS(lhs->clean_start, rhs->clean_start); AWS_MQTT5_CLIENT_TEST_CHECK_OPTIONAL_INT_EQUALS( lhs->session_expiry_interval_seconds, rhs->session_expiry_interval_seconds); AWS_MQTT5_CLIENT_TEST_CHECK_OPTIONAL_INT_EQUALS( lhs->request_response_information, rhs->request_response_information); AWS_MQTT5_CLIENT_TEST_CHECK_OPTIONAL_INT_EQUALS(lhs->request_problem_information, rhs->request_problem_information); AWS_MQTT5_CLIENT_TEST_CHECK_OPTIONAL_INT_EQUALS(lhs->receive_maximum, rhs->receive_maximum); AWS_MQTT5_CLIENT_TEST_CHECK_OPTIONAL_INT_EQUALS(lhs->topic_alias_maximum, rhs->topic_alias_maximum); AWS_MQTT5_CLIENT_TEST_CHECK_OPTIONAL_INT_EQUALS(lhs->maximum_packet_size_bytes, rhs->maximum_packet_size_bytes); AWS_MQTT5_CLIENT_TEST_CHECK_OPTIONAL_INT_EQUALS(lhs->will_delay_interval_seconds, rhs->will_delay_interval_seconds); if ((lhs->will != NULL) != (rhs->will != NULL)) { return false; } if (lhs->will) { if (!s_aws_publish_packets_equal(lhs->will, rhs->will)) { return false; } } AWS_MQTT5_CLIENT_TEST_CHECK_USER_PROPERTIES( lhs->user_properties, lhs->user_property_count, rhs->user_properties, rhs->user_property_count); AWS_MQTT5_CLIENT_TEST_CHECK_OPTIONAL_CURSOR_EQUALS(lhs->authentication_method, rhs->authentication_method); AWS_MQTT5_CLIENT_TEST_CHECK_OPTIONAL_CURSOR_EQUALS(lhs->authentication_data, rhs->authentication_data); return true; } static bool s_aws_disconnect_packets_equal( const struct aws_mqtt5_packet_disconnect_view *lhs, const struct aws_mqtt5_packet_disconnect_view *rhs) { AWS_MQTT5_CLIENT_TEST_CHECK_INT_EQUALS(lhs->reason_code, rhs->reason_code); AWS_MQTT5_CLIENT_TEST_CHECK_OPTIONAL_CURSOR_EQUALS(lhs->reason_string, rhs->reason_string); AWS_MQTT5_CLIENT_TEST_CHECK_OPTIONAL_INT_EQUALS( lhs->session_expiry_interval_seconds, rhs->session_expiry_interval_seconds); AWS_MQTT5_CLIENT_TEST_CHECK_OPTIONAL_CURSOR_EQUALS(lhs->server_reference, rhs->server_reference); AWS_MQTT5_CLIENT_TEST_CHECK_USER_PROPERTIES( lhs->user_properties, lhs->user_property_count, rhs->user_properties, rhs->user_property_count); return true; } static bool s_are_subscription_views_equal( const struct aws_mqtt5_subscription_view *lhs, const struct aws_mqtt5_subscription_view *rhs) { AWS_MQTT5_CLIENT_TEST_CHECK_CURSOR_EQUALS(lhs->topic_filter, rhs->topic_filter); AWS_MQTT5_CLIENT_TEST_CHECK_INT_EQUALS(lhs->qos, rhs->qos); AWS_MQTT5_CLIENT_TEST_CHECK_INT_EQUALS(lhs->no_local, rhs->no_local); AWS_MQTT5_CLIENT_TEST_CHECK_INT_EQUALS(lhs->retain_as_published, rhs->retain_as_published); AWS_MQTT5_CLIENT_TEST_CHECK_INT_EQUALS(lhs->retain_handling_type, rhs->retain_handling_type); return true; } static bool s_aws_subscribe_packets_equal( const struct aws_mqtt5_packet_subscribe_view *lhs, const struct aws_mqtt5_packet_subscribe_view *rhs) { // Don't check packet id intentionally AWS_MQTT5_CLIENT_TEST_CHECK_INT_EQUALS(lhs->subscription_count, rhs->subscription_count); for (size_t i = 0; i < lhs->subscription_count; ++i) { const struct aws_mqtt5_subscription_view *lhs_sub_view = &lhs->subscriptions[i]; const struct aws_mqtt5_subscription_view *rhs_sub_view = &rhs->subscriptions[i]; if (!s_are_subscription_views_equal(lhs_sub_view, rhs_sub_view)) { return false; } } AWS_MQTT5_CLIENT_TEST_CHECK_OPTIONAL_INT_EQUALS(lhs->subscription_identifier, rhs->subscription_identifier); AWS_MQTT5_CLIENT_TEST_CHECK_USER_PROPERTIES( lhs->user_properties, lhs->user_property_count, rhs->user_properties, rhs->user_property_count); return true; } static bool s_aws_unsubscribe_packets_equal( const struct aws_mqtt5_packet_unsubscribe_view *lhs, const struct aws_mqtt5_packet_unsubscribe_view *rhs) { // Don't check packet id intentionally AWS_MQTT5_CLIENT_TEST_CHECK_INT_EQUALS(lhs->topic_filter_count, rhs->topic_filter_count); for (size_t i = 0; i < lhs->topic_filter_count; ++i) { struct aws_byte_cursor lhs_topic_filter = lhs->topic_filters[i]; struct aws_byte_cursor rhs_topic_filter = rhs->topic_filters[i]; AWS_MQTT5_CLIENT_TEST_CHECK_CURSOR_EQUALS(lhs_topic_filter, rhs_topic_filter); } AWS_MQTT5_CLIENT_TEST_CHECK_USER_PROPERTIES( lhs->user_properties, lhs->user_property_count, rhs->user_properties, rhs->user_property_count); return true; } static bool s_aws_puback_packets_equal( const struct aws_mqtt5_packet_puback_view *lhs, const struct aws_mqtt5_packet_puback_view *rhs) { // Don't check packet id intentionally AWS_MQTT5_CLIENT_TEST_CHECK_INT_EQUALS(lhs->reason_code, rhs->reason_code); AWS_MQTT5_CLIENT_TEST_CHECK_OPTIONAL_CURSOR_EQUALS(lhs->reason_string, rhs->reason_string); AWS_MQTT5_CLIENT_TEST_CHECK_USER_PROPERTIES( lhs->user_properties, lhs->user_property_count, rhs->user_properties, rhs->user_property_count); return true; } bool aws_mqtt5_client_test_are_packets_equal( enum aws_mqtt5_packet_type packet_type, void *lhs_packet_storage, void *rhs_packet_storage) { switch (packet_type) { case AWS_MQTT5_PT_CONNECT: return s_aws_connect_packets_equal( &((struct aws_mqtt5_packet_connect_storage *)lhs_packet_storage)->storage_view, &((struct aws_mqtt5_packet_connect_storage *)rhs_packet_storage)->storage_view); case AWS_MQTT5_PT_DISCONNECT: return s_aws_disconnect_packets_equal( &((struct aws_mqtt5_packet_disconnect_storage *)lhs_packet_storage)->storage_view, &((struct aws_mqtt5_packet_disconnect_storage *)rhs_packet_storage)->storage_view); case AWS_MQTT5_PT_SUBSCRIBE: return s_aws_subscribe_packets_equal( &((struct aws_mqtt5_packet_subscribe_storage *)lhs_packet_storage)->storage_view, &((struct aws_mqtt5_packet_subscribe_storage *)rhs_packet_storage)->storage_view); case AWS_MQTT5_PT_UNSUBSCRIBE: return s_aws_unsubscribe_packets_equal( &((struct aws_mqtt5_packet_unsubscribe_storage *)lhs_packet_storage)->storage_view, &((struct aws_mqtt5_packet_unsubscribe_storage *)rhs_packet_storage)->storage_view); case AWS_MQTT5_PT_PUBLISH: return s_aws_publish_packets_equal( &((struct aws_mqtt5_packet_publish_storage *)lhs_packet_storage)->storage_view, &((struct aws_mqtt5_packet_publish_storage *)rhs_packet_storage)->storage_view); case AWS_MQTT5_PT_PUBACK: return s_aws_puback_packets_equal( &((struct aws_mqtt5_packet_puback_storage *)lhs_packet_storage)->storage_view, &((struct aws_mqtt5_packet_puback_storage *)rhs_packet_storage)->storage_view); case AWS_MQTT5_PT_PINGREQ: case AWS_MQTT5_PT_PINGRESP: return true; default: return false; } } size_t aws_mqtt5_linked_list_length(struct aws_linked_list *list) { size_t length = 0; struct aws_linked_list_node *node = aws_linked_list_begin(list); while (node != aws_linked_list_end(list)) { ++length; node = aws_linked_list_next(node); } return length; } aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/tests/v5/mqtt5_testing_utils.h000066400000000000000000000170301456575232400261120ustar00rootroot00000000000000#ifndef MQTT_MQTT5_TESTING_UTILS_H #define MQTT_MQTT5_TESTING_UTILS_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include struct aws_event_loop_group; struct aws_mqtt5_mock_server_packet_record { struct aws_allocator *storage_allocator; uint64_t timestamp; void *packet_storage; enum aws_mqtt5_packet_type packet_type; }; struct aws_mqtt5_lifecycle_event_record { struct aws_allocator *allocator; uint64_t timestamp; struct aws_mqtt5_client_lifecycle_event event; struct aws_mqtt5_negotiated_settings settings_storage; struct aws_mqtt5_packet_disconnect_storage disconnect_storage; struct aws_mqtt5_packet_connack_storage connack_storage; }; struct aws_mqtt5_server_mock_connection_context { struct aws_allocator *allocator; struct aws_channel *channel; struct aws_channel_handler handler; struct aws_channel_slot *slot; struct aws_mqtt5_encoder_function_table encoding_table; struct aws_mqtt5_encoder encoder; struct aws_mqtt5_decoder_function_table decoding_table; struct aws_mqtt5_decoder decoder; struct aws_mqtt5_inbound_topic_alias_resolver inbound_alias_resolver; struct aws_mqtt5_client_mock_test_fixture *test_fixture; struct aws_task service_task; }; typedef int(aws_mqtt5_on_mock_server_packet_received_fn)( void *packet_view, struct aws_mqtt5_server_mock_connection_context *connection, void *packet_received_user_data); typedef void( aws_mqtt5_mock_server_service_fn)(struct aws_mqtt5_server_mock_connection_context *mock_server, void *user_data); struct aws_mqtt5_mock_server_vtable { aws_mqtt5_on_mock_server_packet_received_fn *packet_handlers[16]; aws_mqtt5_mock_server_service_fn *service_task_fn; }; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options { struct aws_mqtt5_client_options *client_options; const struct aws_mqtt5_mock_server_vtable *server_function_table; void *mock_server_user_data; }; struct aws_mqtt5_client_mock_test_fixture { struct aws_allocator *allocator; struct aws_event_loop_group *client_elg; struct aws_event_loop_group *server_elg; struct aws_host_resolver *host_resolver; struct aws_client_bootstrap *client_bootstrap; struct aws_server_bootstrap *server_bootstrap; struct aws_socket_endpoint endpoint; struct aws_socket_options socket_options; struct aws_socket *listener; struct aws_channel *server_channel; const struct aws_mqtt5_mock_server_vtable *server_function_table; void *mock_server_user_data; struct aws_mqtt5_client_vtable client_vtable; struct aws_mqtt5_client *client; aws_mqtt5_client_connection_event_callback_fn *original_lifecycle_event_handler; void *original_lifecycle_event_handler_user_data; uint16_t maximum_inbound_topic_aliases; struct aws_mutex lock; struct aws_condition_variable signal; struct aws_array_list server_received_packets; struct aws_array_list lifecycle_events; struct aws_array_list client_states; struct aws_array_list client_statistics; bool listener_destroyed; bool subscribe_complete; bool disconnect_completion_callback_invoked; bool client_terminated; uint32_t total_pubacks_received; uint32_t publishes_received; uint32_t successful_pubacks_received; uint32_t timeouts_received; uint32_t server_maximum_inflight_publishes; uint32_t server_current_inflight_publishes; }; struct mqtt5_client_test_options { struct aws_mqtt5_client_topic_alias_options topic_aliasing_options; struct aws_mqtt5_packet_connect_view connect_options; struct aws_mqtt5_client_options client_options; struct aws_mqtt5_mock_server_vtable server_function_table; }; struct aws_mqtt5_mock_server_reconnect_state { size_t required_connection_count_threshold; size_t connection_attempts; uint64_t connect_timestamp; uint64_t successful_connection_disconnect_delay_ms; }; int aws_mqtt5_test_verify_user_properties_raw( size_t property_count, const struct aws_mqtt5_user_property *properties, size_t expected_count, const struct aws_mqtt5_user_property *expected_properties); void aws_mqtt5_encode_init_testing_function_table(struct aws_mqtt5_encoder_function_table *function_table); void aws_mqtt5_decode_init_testing_function_table(struct aws_mqtt5_decoder_function_table *function_table); int aws_mqtt5_client_mock_test_fixture_init( struct aws_mqtt5_client_mock_test_fixture *test_fixture, struct aws_allocator *allocator, struct aws_mqtt5_client_mqtt5_mock_test_fixture_options *options); void aws_mqtt5_client_mock_test_fixture_clean_up(struct aws_mqtt5_client_mock_test_fixture *test_fixture); bool aws_mqtt5_client_test_are_packets_equal( enum aws_mqtt5_packet_type packet_type, void *lhs_packet_storage, void *rhs_packet_storage); size_t aws_mqtt5_linked_list_length(struct aws_linked_list *list); void aws_mqtt5_client_test_init_default_options(struct mqtt5_client_test_options *test_options); void aws_wait_for_connected_lifecycle_event(struct aws_mqtt5_client_mock_test_fixture *test_context); void aws_wait_for_stopped_lifecycle_event(struct aws_mqtt5_client_mock_test_fixture *test_context); int aws_verify_received_packet_sequence( struct aws_mqtt5_client_mock_test_fixture *test_context, struct aws_mqtt5_mock_server_packet_record *expected_packets, size_t expected_packets_count); int aws_mqtt5_mock_server_handle_connect_always_fail( void *packet, struct aws_mqtt5_server_mock_connection_context *connection, void *user_data); void aws_mqtt5_wait_for_n_lifecycle_events( struct aws_mqtt5_client_mock_test_fixture *test_context, enum aws_mqtt5_client_lifecycle_event_type type, size_t count); int aws_verify_reconnection_exponential_backoff_timestamps(struct aws_mqtt5_client_mock_test_fixture *test_fixture); int aws_verify_client_state_sequence( struct aws_mqtt5_client_mock_test_fixture *test_context, enum aws_mqtt5_client_state *expected_states, size_t expected_states_count); int aws_mqtt5_mock_server_handle_connect_always_succeed( void *packet, struct aws_mqtt5_server_mock_connection_context *connection, void *user_data); int aws_mqtt5_mock_server_send_packet( struct aws_mqtt5_server_mock_connection_context *connection, enum aws_mqtt5_packet_type packet_type, void *packet); int aws_mqtt5_mock_server_handle_connect_succeed_on_nth( void *packet, struct aws_mqtt5_server_mock_connection_context *connection, void *user_data); int aws_mqtt5_mock_server_handle_publish_puback( void *packet, struct aws_mqtt5_server_mock_connection_context *connection, void *user_data); int aws_mqtt5_mock_server_handle_publish_puback_and_forward( void *packet, struct aws_mqtt5_server_mock_connection_context *connection, void *user_data); int aws_mqtt5_mock_server_handle_unsubscribe_unsuback_success( void *packet, struct aws_mqtt5_server_mock_connection_context *connection, void *user_data); extern const struct aws_string *g_default_client_id; #define RECONNECT_TEST_MIN_BACKOFF 500 #define RECONNECT_TEST_MAX_BACKOFF 5000 #define RECONNECT_TEST_BACKOFF_RESET_DELAY 5000 #endif /* MQTT_MQTT5_TESTING_UTILS_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/tests/v5/mqtt5_to_mqtt3_adapter_tests.c000066400000000000000000005034111456575232400277070ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "mqtt5_testing_utils.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include enum aws_mqtt3_lifecycle_event_type { AWS_MQTT3_LET_CONNECTION_COMPLETE, AWS_MQTT3_LET_INTERRUPTED, AWS_MQTT3_LET_RESUMED, AWS_MQTT3_LET_CLOSED, AWS_MQTT3_LET_DISCONNECTION_COMPLETE, AWS_MQTT3_LET_CONNECTION_SUCCESS, AWS_MQTT3_LET_CONNECTION_FAILURE, AWS_MQTT3_LET_TERMINATION, }; struct aws_mqtt3_lifecycle_event { enum aws_mqtt3_lifecycle_event_type type; uint64_t timestamp; int error_code; enum aws_mqtt_connect_return_code return_code; bool session_present; bool skip_error_code_equality; }; enum aws_mqtt3_operation_event_type { AWS_MQTT3_OET_PUBLISH_COMPLETE, AWS_MQTT3_OET_SUBSCRIBE_COMPLETE, AWS_MQTT3_OET_UNSUBSCRIBE_COMPLETE, AWS_MQTT3_OET_PUBLISH_RECEIVED_SUBSCRIBED, AWS_MQTT3_OET_PUBLISH_RECEIVED_ANY, }; struct aws_mqtt3_operation_event { enum aws_mqtt3_operation_event_type type; uint64_t timestamp; int error_code; // publish received properties enum aws_mqtt_qos qos; struct aws_byte_buf topic; struct aws_byte_cursor topic_cursor; struct aws_byte_buf payload; struct aws_byte_cursor payload_cursor; // subscribe complete properties struct aws_array_list granted_subscriptions; struct aws_byte_buf topic_storage; /* * Not a part of recorded events, instead used to help verification check number of occurrences. Only * used by the "contains" verification function which wouldn't otherise be able to check if an exact * event appears multiple times in the set. */ size_t expected_count; }; static void s_aws_mqtt3_operation_event_clean_up(struct aws_mqtt3_operation_event *event) { if (event == NULL) { return; } aws_byte_buf_clean_up(&event->topic); aws_byte_buf_clean_up(&event->payload); aws_byte_buf_clean_up(&event->topic_storage); aws_array_list_clean_up(&event->granted_subscriptions); } static bool s_aws_mqtt3_operation_event_equals( struct aws_mqtt3_operation_event *expected, struct aws_mqtt3_operation_event *actual) { if (expected->type != actual->type) { return false; } if (expected->error_code != actual->error_code) { return false; } if (expected->qos != actual->qos) { return false; } if (expected->topic_cursor.len != actual->topic_cursor.len) { return false; } if (expected->topic_cursor.len > 0) { if (memcmp(expected->topic_cursor.ptr, actual->topic_cursor.ptr, expected->topic_cursor.len) != 0) { return false; } } if (expected->payload_cursor.len != actual->payload_cursor.len) { return false; } if (expected->payload_cursor.len > 0) { if (memcmp(expected->payload_cursor.ptr, actual->payload_cursor.ptr, expected->payload_cursor.len) != 0) { return false; } } if (aws_array_list_length(&expected->granted_subscriptions) != aws_array_list_length(&actual->granted_subscriptions)) { return false; } for (size_t i = 0; i < aws_array_list_length(&expected->granted_subscriptions); ++i) { struct aws_mqtt_topic_subscription expected_sub; aws_array_list_get_at(&expected->granted_subscriptions, &expected_sub, i); bool found_match = false; for (size_t j = 0; j < aws_array_list_length(&actual->granted_subscriptions); ++j) { struct aws_mqtt_topic_subscription actual_sub; aws_array_list_get_at(&actual->granted_subscriptions, &actual_sub, j); if (expected_sub.qos != actual_sub.qos) { continue; } if (expected_sub.topic.len != actual_sub.topic.len) { continue; } if (expected_sub.topic.len > 0) { if (memcmp(expected_sub.topic.ptr, actual_sub.topic.ptr, expected_sub.topic.len) != 0) { continue; } } found_match = true; break; } if (!found_match) { return false; } } return true; } struct aws_mqtt5_to_mqtt3_adapter_test_fixture { struct aws_mqtt5_client_mock_test_fixture mqtt5_fixture; struct aws_mqtt_client_connection *connection; struct aws_array_list lifecycle_events; struct aws_array_list operation_events; struct aws_mutex lock; struct aws_condition_variable signal; }; static void s_init_adapter_connection_options_from_fixture( struct aws_mqtt_connection_options *connection_options, struct aws_mqtt5_to_mqtt3_adapter_test_fixture *fixture) { AWS_ZERO_STRUCT(*connection_options); connection_options->host_name = aws_byte_cursor_from_c_str(fixture->mqtt5_fixture.endpoint.address); connection_options->port = fixture->mqtt5_fixture.endpoint.port; connection_options->socket_options = &fixture->mqtt5_fixture.socket_options; connection_options->keep_alive_time_secs = 30; connection_options->ping_timeout_ms = 10000; connection_options->clean_session = true; } static int s_aws_mqtt5_to_mqtt3_adapter_test_fixture_verify_operation_sequence( struct aws_mqtt5_to_mqtt3_adapter_test_fixture *fixture, size_t expected_event_count, struct aws_mqtt3_operation_event *expected_events, size_t maximum_event_count) { aws_mutex_lock(&fixture->lock); size_t actual_event_count = aws_array_list_length(&fixture->operation_events); ASSERT_TRUE(expected_event_count <= actual_event_count); ASSERT_TRUE(actual_event_count <= maximum_event_count); for (size_t i = 0; i < expected_event_count; ++i) { struct aws_mqtt3_operation_event *expected_event = expected_events + i; struct aws_mqtt3_operation_event *actual_event = NULL; aws_array_list_get_at_ptr(&fixture->operation_events, (void **)(&actual_event), i); ASSERT_TRUE(s_aws_mqtt3_operation_event_equals(expected_event, actual_event)); } aws_mutex_unlock(&fixture->lock); return AWS_OP_SUCCESS; } static int s_aws_mqtt5_to_mqtt3_adapter_test_fixture_verify_operation_sequence_contains( struct aws_mqtt5_to_mqtt3_adapter_test_fixture *fixture, size_t expected_event_count, struct aws_mqtt3_operation_event *expected_events) { aws_mutex_lock(&fixture->lock); size_t actual_event_count = aws_array_list_length(&fixture->operation_events); for (size_t i = 0; i < expected_event_count; ++i) { struct aws_mqtt3_operation_event *expected_event = expected_events + i; size_t match_count = 0; for (size_t j = 0; j < actual_event_count; ++j) { struct aws_mqtt3_operation_event *actual_event = NULL; aws_array_list_get_at_ptr(&fixture->operation_events, (void **)(&actual_event), j); if (s_aws_mqtt3_operation_event_equals(expected_event, actual_event)) { ++match_count; } } if (expected_event->expected_count == 0) { ASSERT_INT_EQUALS(1, match_count); } else { ASSERT_INT_EQUALS(expected_event->expected_count, match_count); } } aws_mutex_unlock(&fixture->lock); return AWS_OP_SUCCESS; } struct n_operation_event_wait_context { struct aws_mqtt5_to_mqtt3_adapter_test_fixture *fixture; enum aws_mqtt3_operation_event_type type; size_t count; }; static bool s_wait_for_n_adapter_operation_events_predicate(void *context) { struct n_operation_event_wait_context *wait_context = context; struct aws_mqtt5_to_mqtt3_adapter_test_fixture *fixture = wait_context->fixture; size_t actual_count = 0; size_t event_count = aws_array_list_length(&fixture->operation_events); for (size_t i = 0; i < event_count; ++i) { struct aws_mqtt3_operation_event *actual_event = NULL; aws_array_list_get_at_ptr(&fixture->operation_events, (void **)(&actual_event), i); if (actual_event->type == wait_context->type) { ++actual_count; } } return actual_count >= wait_context->count; } static void s_wait_for_n_adapter_operation_events( struct aws_mqtt5_to_mqtt3_adapter_test_fixture *fixture, enum aws_mqtt3_operation_event_type type, size_t count) { struct n_operation_event_wait_context wait_context = { .fixture = fixture, .type = type, .count = count, }; aws_mutex_lock(&fixture->lock); aws_condition_variable_wait_pred( &fixture->signal, &fixture->lock, s_wait_for_n_adapter_operation_events_predicate, &wait_context); aws_mutex_unlock(&fixture->lock); } struct n_lifeycle_event_wait_context { struct aws_mqtt5_to_mqtt3_adapter_test_fixture *fixture; enum aws_mqtt3_lifecycle_event_type type; size_t count; }; static bool s_wait_for_n_adapter_lifecycle_events_predicate(void *context) { struct n_lifeycle_event_wait_context *wait_context = context; struct aws_mqtt5_to_mqtt3_adapter_test_fixture *fixture = wait_context->fixture; size_t actual_count = 0; size_t event_count = aws_array_list_length(&fixture->lifecycle_events); for (size_t i = 0; i < event_count; ++i) { struct aws_mqtt3_lifecycle_event *actual_event = NULL; aws_array_list_get_at_ptr(&fixture->lifecycle_events, (void **)(&actual_event), i); if (actual_event->type == wait_context->type) { ++actual_count; } } return actual_count >= wait_context->count; } static void s_wait_for_n_adapter_lifecycle_events( struct aws_mqtt5_to_mqtt3_adapter_test_fixture *fixture, enum aws_mqtt3_lifecycle_event_type type, size_t count) { struct n_lifeycle_event_wait_context wait_context = { .fixture = fixture, .type = type, .count = count, }; aws_mutex_lock(&fixture->lock); aws_condition_variable_wait_pred( &fixture->signal, &fixture->lock, s_wait_for_n_adapter_lifecycle_events_predicate, &wait_context); aws_mutex_unlock(&fixture->lock); } static int s_aws_mqtt5_to_mqtt3_adapter_test_fixture_verify_lifecycle_event( struct aws_mqtt3_lifecycle_event *expected_event, struct aws_mqtt3_lifecycle_event *actual_event) { ASSERT_INT_EQUALS(actual_event->type, expected_event->type); if (expected_event->skip_error_code_equality) { /* some error scenarios lead to different values cross-platform, so just verify yes/no in that case */ ASSERT_TRUE((actual_event->error_code != 0) == (expected_event->error_code != 0)); } else { ASSERT_INT_EQUALS(actual_event->error_code, expected_event->error_code); } ASSERT_INT_EQUALS(actual_event->return_code, expected_event->return_code); ASSERT_TRUE(actual_event->session_present == expected_event->session_present); return AWS_OP_SUCCESS; } static int s_aws_mqtt5_to_mqtt3_adapter_test_fixture_verify_lifecycle_sequence( struct aws_mqtt5_to_mqtt3_adapter_test_fixture *fixture, size_t expected_event_count, struct aws_mqtt3_lifecycle_event *expected_events, size_t maximum_event_count) { aws_mutex_lock(&fixture->lock); size_t actual_event_count = aws_array_list_length(&fixture->lifecycle_events); ASSERT_TRUE(expected_event_count <= actual_event_count); ASSERT_TRUE(actual_event_count <= maximum_event_count); for (size_t i = 0; i < expected_event_count; ++i) { struct aws_mqtt3_lifecycle_event *expected_event = expected_events + i; struct aws_mqtt3_lifecycle_event *actual_event = NULL; aws_array_list_get_at_ptr(&fixture->lifecycle_events, (void **)(&actual_event), i); ASSERT_SUCCESS(s_aws_mqtt5_to_mqtt3_adapter_test_fixture_verify_lifecycle_event(expected_event, actual_event)); } aws_mutex_unlock(&fixture->lock); return AWS_OP_SUCCESS; } static int s_aws_mqtt5_to_mqtt3_adapter_test_fixture_verify_lifecycle_sequence_starts_with( struct aws_mqtt5_to_mqtt3_adapter_test_fixture *fixture, size_t expected_event_count, struct aws_mqtt3_lifecycle_event *expected_events) { aws_mutex_lock(&fixture->lock); size_t actual_event_count = aws_array_list_length(&fixture->lifecycle_events); ASSERT_TRUE(expected_event_count <= actual_event_count); for (size_t i = 0; i < expected_event_count; ++i) { struct aws_mqtt3_lifecycle_event *expected_event = expected_events + i; struct aws_mqtt3_lifecycle_event *actual_event = NULL; aws_array_list_get_at_ptr(&fixture->lifecycle_events, (void **)(&actual_event), i); ASSERT_SUCCESS(s_aws_mqtt5_to_mqtt3_adapter_test_fixture_verify_lifecycle_event(expected_event, actual_event)); } aws_mutex_unlock(&fixture->lock); return AWS_OP_SUCCESS; } static int s_aws_mqtt5_to_mqtt3_adapter_test_fixture_verify_lifecycle_sequence_ends_with( struct aws_mqtt5_to_mqtt3_adapter_test_fixture *fixture, size_t expected_event_count, struct aws_mqtt3_lifecycle_event *expected_events) { aws_mutex_lock(&fixture->lock); size_t actual_event_count = aws_array_list_length(&fixture->lifecycle_events); ASSERT_TRUE(expected_event_count <= actual_event_count); for (size_t i = 0; i < expected_event_count; ++i) { struct aws_mqtt3_lifecycle_event *expected_event = expected_events + i; size_t actual_index = i + (actual_event_count - expected_event_count); struct aws_mqtt3_lifecycle_event *actual_event = NULL; aws_array_list_get_at_ptr(&fixture->lifecycle_events, (void **)(&actual_event), actual_index); ASSERT_SUCCESS(s_aws_mqtt5_to_mqtt3_adapter_test_fixture_verify_lifecycle_event(expected_event, actual_event)); } aws_mutex_unlock(&fixture->lock); return AWS_OP_SUCCESS; } static void s_aws_mqtt5_to_mqtt3_adapter_test_fixture_closed_handler( struct aws_mqtt_client_connection *connection, struct on_connection_closed_data *data, void *userdata) { (void)connection; (void)data; struct aws_mqtt5_to_mqtt3_adapter_test_fixture *fixture = userdata; /* record the event */ struct aws_mqtt3_lifecycle_event event; AWS_ZERO_STRUCT(event); event.type = AWS_MQTT3_LET_CLOSED; aws_high_res_clock_get_ticks(&event.timestamp); aws_mutex_lock(&fixture->lock); aws_array_list_push_back(&fixture->lifecycle_events, &event); aws_mutex_unlock(&fixture->lock); aws_condition_variable_notify_all(&fixture->signal); } static void s_aws_mqtt5_to_mqtt3_adapter_test_fixture_termination_handler(void *userdata) { struct aws_mqtt5_to_mqtt3_adapter_test_fixture *fixture = userdata; /* record the event */ struct aws_mqtt3_lifecycle_event event; AWS_ZERO_STRUCT(event); event.type = AWS_MQTT3_LET_TERMINATION; aws_high_res_clock_get_ticks(&event.timestamp); aws_mutex_lock(&fixture->lock); aws_array_list_push_back(&fixture->lifecycle_events, &event); aws_mutex_unlock(&fixture->lock); aws_condition_variable_notify_all(&fixture->signal); } static void s_aws_mqtt5_to_mqtt3_adapter_test_fixture_interrupted_handler( struct aws_mqtt_client_connection *connection, int error_code, void *userdata) { (void)connection; struct aws_mqtt5_to_mqtt3_adapter_test_fixture *fixture = userdata; /* record the event */ struct aws_mqtt3_lifecycle_event event; AWS_ZERO_STRUCT(event); event.type = AWS_MQTT3_LET_INTERRUPTED; aws_high_res_clock_get_ticks(&event.timestamp); event.error_code = error_code; aws_mutex_lock(&fixture->lock); aws_array_list_push_back(&fixture->lifecycle_events, &event); aws_mutex_unlock(&fixture->lock); aws_condition_variable_notify_all(&fixture->signal); } static void s_aws_mqtt5_to_mqtt3_adapter_test_fixture_resumed_handler( struct aws_mqtt_client_connection *connection, enum aws_mqtt_connect_return_code return_code, bool session_present, void *userdata) { (void)connection; struct aws_mqtt5_to_mqtt3_adapter_test_fixture *fixture = userdata; /* record the event */ struct aws_mqtt3_lifecycle_event event; AWS_ZERO_STRUCT(event); event.type = AWS_MQTT3_LET_RESUMED; aws_high_res_clock_get_ticks(&event.timestamp); event.return_code = return_code; event.session_present = session_present; aws_mutex_lock(&fixture->lock); aws_array_list_push_back(&fixture->lifecycle_events, &event); aws_mutex_unlock(&fixture->lock); aws_condition_variable_notify_all(&fixture->signal); } static void s_aws_mqtt5_to_mqtt3_adapter_test_fixture_connection_failure_handler( struct aws_mqtt_client_connection *connection, int error_code, void *userdata) { (void)connection; struct aws_mqtt5_to_mqtt3_adapter_test_fixture *fixture = userdata; /* record the event */ struct aws_mqtt3_lifecycle_event event; AWS_ZERO_STRUCT(event); event.type = AWS_MQTT3_LET_CONNECTION_FAILURE; aws_high_res_clock_get_ticks(&event.timestamp); event.error_code = error_code; aws_mutex_lock(&fixture->lock); aws_array_list_push_back(&fixture->lifecycle_events, &event); aws_mutex_unlock(&fixture->lock); aws_condition_variable_notify_all(&fixture->signal); } static void s_aws_mqtt5_to_mqtt3_adapter_test_fixture_connection_success_handler( struct aws_mqtt_client_connection *connection, enum aws_mqtt_connect_return_code return_code, bool session_present, void *userdata) { (void)connection; struct aws_mqtt5_to_mqtt3_adapter_test_fixture *fixture = userdata; /* record the event */ struct aws_mqtt3_lifecycle_event event; AWS_ZERO_STRUCT(event); event.type = AWS_MQTT3_LET_CONNECTION_SUCCESS; aws_high_res_clock_get_ticks(&event.timestamp); event.return_code = return_code; event.session_present = session_present; aws_mutex_lock(&fixture->lock); aws_array_list_push_back(&fixture->lifecycle_events, &event); aws_mutex_unlock(&fixture->lock); aws_condition_variable_notify_all(&fixture->signal); } static void s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_connection_complete( struct aws_mqtt_client_connection *connection, int error_code, enum aws_mqtt_connect_return_code return_code, bool session_present, void *user_data) { (void)connection; struct aws_mqtt5_to_mqtt3_adapter_test_fixture *fixture = user_data; struct aws_mqtt3_lifecycle_event event; AWS_ZERO_STRUCT(event); event.type = AWS_MQTT3_LET_CONNECTION_COMPLETE; aws_high_res_clock_get_ticks(&event.timestamp); event.error_code = error_code; event.return_code = return_code; event.session_present = session_present; aws_mutex_lock(&fixture->lock); aws_array_list_push_back(&fixture->lifecycle_events, &event); aws_mutex_unlock(&fixture->lock); aws_condition_variable_notify_all(&fixture->signal); } static void s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_disconnection_complete( struct aws_mqtt_client_connection *connection, void *user_data) { (void)connection; struct aws_mqtt5_to_mqtt3_adapter_test_fixture *fixture = user_data; struct aws_mqtt3_lifecycle_event event; AWS_ZERO_STRUCT(event); event.type = AWS_MQTT3_LET_DISCONNECTION_COMPLETE; aws_high_res_clock_get_ticks(&event.timestamp); aws_mutex_lock(&fixture->lock); aws_array_list_push_back(&fixture->lifecycle_events, &event); aws_mutex_unlock(&fixture->lock); aws_condition_variable_notify_all(&fixture->signal); } int aws_mqtt5_to_mqtt3_adapter_test_fixture_init( struct aws_mqtt5_to_mqtt3_adapter_test_fixture *fixture, struct aws_allocator *allocator, struct aws_mqtt5_client_mqtt5_mock_test_fixture_options *mqtt5_fixture_config) { AWS_ZERO_STRUCT(*fixture); if (aws_mqtt5_client_mock_test_fixture_init(&fixture->mqtt5_fixture, allocator, mqtt5_fixture_config)) { return AWS_OP_ERR; } fixture->connection = aws_mqtt_client_connection_new_from_mqtt5_client(fixture->mqtt5_fixture.client); if (fixture->connection == NULL) { return AWS_OP_ERR; } aws_array_list_init_dynamic(&fixture->lifecycle_events, allocator, 10, sizeof(struct aws_mqtt3_lifecycle_event)); aws_array_list_init_dynamic(&fixture->operation_events, allocator, 10, sizeof(struct aws_mqtt3_operation_event)); aws_mutex_init(&fixture->lock); aws_condition_variable_init(&fixture->signal); aws_mqtt_client_connection_set_connection_termination_handler( fixture->connection, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_termination_handler, fixture); aws_mqtt_client_connection_set_connection_closed_handler( fixture->connection, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_closed_handler, fixture); aws_mqtt_client_connection_set_connection_interruption_handlers( fixture->connection, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_interrupted_handler, fixture, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_resumed_handler, fixture); aws_mqtt_client_connection_set_connection_result_handlers( fixture->connection, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_connection_success_handler, fixture, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_connection_failure_handler, fixture); return AWS_OP_SUCCESS; } void aws_mqtt5_to_mqtt3_adapter_test_fixture_clean_up(struct aws_mqtt5_to_mqtt3_adapter_test_fixture *fixture) { aws_mqtt_client_connection_release(fixture->connection); s_wait_for_n_adapter_lifecycle_events(fixture, AWS_MQTT3_LET_TERMINATION, 1); aws_mqtt5_client_mock_test_fixture_clean_up(&fixture->mqtt5_fixture); aws_array_list_clean_up(&fixture->lifecycle_events); size_t operation_event_count = aws_array_list_length(&fixture->operation_events); for (size_t i = 0; i < operation_event_count; ++i) { struct aws_mqtt3_operation_event *event = NULL; aws_array_list_get_at_ptr(&fixture->operation_events, (void **)(&event), i); s_aws_mqtt3_operation_event_clean_up(event); } aws_array_list_clean_up(&fixture->operation_events); aws_mutex_clean_up(&fixture->lock); aws_condition_variable_clean_up(&fixture->signal); } void s_mqtt5to3_lifecycle_event_callback(const struct aws_mqtt5_client_lifecycle_event *event) { (void)event; } void s_mqtt5to3_publish_received_callback(const struct aws_mqtt5_packet_publish_view *publish, void *user_data) { (void)publish; (void)user_data; } static int s_do_mqtt5to3_adapter_create_destroy(struct aws_allocator *allocator, uint64_t sleep_nanos) { aws_mqtt_library_init(allocator); struct aws_mqtt5_packet_connect_view local_connect_options = { .keep_alive_interval_seconds = 30, .clean_start = true, }; struct aws_mqtt5_client_options client_options = { .connect_options = &local_connect_options, .lifecycle_event_handler = s_mqtt5to3_lifecycle_event_callback, .lifecycle_event_handler_user_data = NULL, .publish_received_handler = s_mqtt5to3_publish_received_callback, .publish_received_handler_user_data = NULL, .ping_timeout_ms = 10000, }; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_config = { .client_options = &client_options, }; struct aws_mqtt5_client_mock_test_fixture test_fixture; AWS_ZERO_STRUCT(test_fixture); ASSERT_SUCCESS(aws_mqtt5_client_mock_test_fixture_init(&test_fixture, allocator, &test_fixture_config)); struct aws_mqtt_client_connection *connection = aws_mqtt_client_connection_new_from_mqtt5_client(test_fixture.client); if (sleep_nanos > 0) { /* sleep a little just to let the listener attachment resolve */ aws_thread_current_sleep(aws_timestamp_convert(1, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL)); } aws_mqtt_client_connection_release(connection); if (sleep_nanos > 0) { /* sleep a little just to let the listener detachment resolve */ aws_thread_current_sleep(aws_timestamp_convert(1, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL)); } aws_mqtt5_client_mock_test_fixture_clean_up(&test_fixture); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } static int s_mqtt5to3_adapter_create_destroy_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(s_do_mqtt5to3_adapter_create_destroy(allocator, 0)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5to3_adapter_create_destroy, s_mqtt5to3_adapter_create_destroy_fn) static int s_mqtt5to3_adapter_create_destroy_delayed_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(s_do_mqtt5to3_adapter_create_destroy( allocator, aws_timestamp_convert(1, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL))); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5to3_adapter_create_destroy_delayed, s_mqtt5to3_adapter_create_destroy_delayed_fn) typedef int (*mqtt5to3_adapter_config_test_setup_fn)( struct aws_allocator *allocator, struct aws_mqtt_client_connection *adapter, struct aws_mqtt5_packet_connect_storage *expected_connect); static int s_do_mqtt5to3_adapter_config_test( struct aws_allocator *allocator, mqtt5to3_adapter_config_test_setup_fn setup_fn) { aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, }; struct aws_mqtt5_to_mqtt3_adapter_test_fixture test_fixture; ASSERT_SUCCESS(aws_mqtt5_to_mqtt3_adapter_test_fixture_init(&test_fixture, allocator, &test_fixture_options)); struct aws_mqtt5_client *client = test_fixture.mqtt5_fixture.client; struct aws_mqtt_client_connection *adapter = test_fixture.connection; struct aws_mqtt5_packet_connect_storage expected_connect_storage; ASSERT_SUCCESS((*setup_fn)(allocator, adapter, &expected_connect_storage)); ASSERT_SUCCESS(aws_mqtt5_client_start(client)); aws_wait_for_connected_lifecycle_event(&test_fixture.mqtt5_fixture); ASSERT_SUCCESS(aws_mqtt5_client_stop(client, NULL, NULL)); aws_wait_for_stopped_lifecycle_event(&test_fixture.mqtt5_fixture); struct aws_mqtt5_mock_server_packet_record expected_packets[] = { { .packet_type = AWS_MQTT5_PT_CONNECT, .packet_storage = &expected_connect_storage, }, }; ASSERT_SUCCESS(aws_verify_received_packet_sequence( &test_fixture.mqtt5_fixture, expected_packets, AWS_ARRAY_SIZE(expected_packets))); aws_mqtt5_packet_connect_storage_clean_up(&expected_connect_storage); aws_mqtt5_to_mqtt3_adapter_test_fixture_clean_up(&test_fixture); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_STATIC_STRING_FROM_LITERAL(s_simple_topic, "Hello/World"); AWS_STATIC_STRING_FROM_LITERAL(s_simple_payload, "A Payload"); static int s_mqtt5to3_adapter_set_will_setup( struct aws_allocator *allocator, struct aws_mqtt_client_connection *adapter, struct aws_mqtt5_packet_connect_storage *expected_connect) { struct aws_byte_cursor topic_cursor = aws_byte_cursor_from_string(s_simple_topic); struct aws_byte_cursor payload_cursor = aws_byte_cursor_from_string(s_simple_payload); ASSERT_SUCCESS( aws_mqtt_client_connection_set_will(adapter, &topic_cursor, AWS_MQTT_QOS_AT_LEAST_ONCE, true, &payload_cursor)); struct aws_mqtt5_packet_publish_view expected_will = { .payload = payload_cursor, .qos = AWS_MQTT5_QOS_AT_LEAST_ONCE, .retain = true, .topic = topic_cursor, }; struct aws_mqtt5_packet_connect_view expected_connect_view = { .client_id = aws_byte_cursor_from_string(g_default_client_id), .keep_alive_interval_seconds = 30, .clean_start = true, .will = &expected_will, }; ASSERT_SUCCESS(aws_mqtt5_packet_connect_storage_init(expected_connect, allocator, &expected_connect_view)); return AWS_OP_SUCCESS; } static int s_mqtt5to3_adapter_set_will_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(s_do_mqtt5to3_adapter_config_test(allocator, s_mqtt5to3_adapter_set_will_setup)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5to3_adapter_set_will, s_mqtt5to3_adapter_set_will_fn) AWS_STATIC_STRING_FROM_LITERAL(s_username, "MyUsername"); AWS_STATIC_STRING_FROM_LITERAL(s_password, "TopTopSecret"); static int s_mqtt5to3_adapter_set_login_setup( struct aws_allocator *allocator, struct aws_mqtt_client_connection *adapter, struct aws_mqtt5_packet_connect_storage *expected_connect) { struct aws_byte_cursor username_cursor = aws_byte_cursor_from_string(s_username); struct aws_byte_cursor password_cursor = aws_byte_cursor_from_string(s_password); ASSERT_SUCCESS(aws_mqtt_client_connection_set_login(adapter, &username_cursor, &password_cursor)); struct aws_mqtt5_packet_connect_view expected_connect_view = { .client_id = aws_byte_cursor_from_string(g_default_client_id), .keep_alive_interval_seconds = 30, .clean_start = true, .username = &username_cursor, .password = &password_cursor, }; ASSERT_SUCCESS(aws_mqtt5_packet_connect_storage_init(expected_connect, allocator, &expected_connect_view)); return AWS_OP_SUCCESS; } static int s_mqtt5to3_adapter_set_login_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(s_do_mqtt5to3_adapter_config_test(allocator, s_mqtt5to3_adapter_set_login_setup)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5to3_adapter_set_login, s_mqtt5to3_adapter_set_login_fn) static int s_mqtt5to3_adapter_set_reconnect_timeout_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; /* * This is a variant of the mqtt5_client_reconnect_failure_backoff test. * * The primary change is that we configure the mqtt5 client with "wrong" (fast) reconnect delays and then use * the adapter API to configure with the "right" ones that will let the test pass. */ aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); /* backoff delay sequence: 500, 1000, 2000, 4000, 5000, ... */ test_options.client_options.retry_jitter_mode = AWS_EXPONENTIAL_BACKOFF_JITTER_NONE; test_options.client_options.min_reconnect_delay_ms = 10; test_options.client_options.max_reconnect_delay_ms = 50; test_options.client_options.min_connected_time_to_reset_reconnect_delay_ms = RECONNECT_TEST_BACKOFF_RESET_DELAY; test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_CONNECT] = aws_mqtt5_mock_server_handle_connect_always_fail; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, }; struct aws_mqtt5_client_mock_test_fixture test_context; ASSERT_SUCCESS(aws_mqtt5_client_mock_test_fixture_init(&test_context, allocator, &test_fixture_options)); struct aws_mqtt5_client *client = test_context.client; struct aws_mqtt_client_connection *adapter = aws_mqtt_client_connection_new_from_mqtt5_client(client); aws_mqtt_client_connection_set_reconnect_timeout(adapter, RECONNECT_TEST_MIN_BACKOFF, RECONNECT_TEST_MAX_BACKOFF); ASSERT_SUCCESS(aws_mqtt5_client_start(client)); aws_mqtt5_wait_for_n_lifecycle_events(&test_context, AWS_MQTT5_CLET_CONNECTION_FAILURE, 6); ASSERT_SUCCESS(aws_mqtt5_client_stop(client, NULL, NULL)); aws_wait_for_stopped_lifecycle_event(&test_context); ASSERT_SUCCESS(aws_verify_reconnection_exponential_backoff_timestamps(&test_context)); /* 6 (connecting, mqtt_connect, channel_shutdown, pending_reconnect) tuples (minus the final pending_reconnect) */ enum aws_mqtt5_client_state expected_states[] = { AWS_MCS_CONNECTING, AWS_MCS_MQTT_CONNECT, AWS_MCS_CHANNEL_SHUTDOWN, AWS_MCS_PENDING_RECONNECT, AWS_MCS_CONNECTING, AWS_MCS_MQTT_CONNECT, AWS_MCS_CHANNEL_SHUTDOWN, AWS_MCS_PENDING_RECONNECT, AWS_MCS_CONNECTING, AWS_MCS_MQTT_CONNECT, AWS_MCS_CHANNEL_SHUTDOWN, AWS_MCS_PENDING_RECONNECT, AWS_MCS_CONNECTING, AWS_MCS_MQTT_CONNECT, AWS_MCS_CHANNEL_SHUTDOWN, AWS_MCS_PENDING_RECONNECT, AWS_MCS_CONNECTING, AWS_MCS_MQTT_CONNECT, AWS_MCS_CHANNEL_SHUTDOWN, AWS_MCS_PENDING_RECONNECT, AWS_MCS_CONNECTING, AWS_MCS_MQTT_CONNECT, AWS_MCS_CHANNEL_SHUTDOWN, }; ASSERT_SUCCESS(aws_verify_client_state_sequence(&test_context, expected_states, AWS_ARRAY_SIZE(expected_states))); aws_mqtt_client_connection_release(adapter); aws_mqtt5_client_mock_test_fixture_clean_up(&test_context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5to3_adapter_set_reconnect_timeout, s_mqtt5to3_adapter_set_reconnect_timeout_fn) /* * Basic successful connection test */ static int s_mqtt5to3_adapter_connect_success_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, }; struct aws_mqtt5_to_mqtt3_adapter_test_fixture fixture; ASSERT_SUCCESS(aws_mqtt5_to_mqtt3_adapter_test_fixture_init(&fixture, allocator, &test_fixture_options)); struct aws_mqtt_client_connection *adapter = fixture.connection; struct aws_mqtt_connection_options connection_options; s_init_adapter_connection_options_from_fixture(&connection_options, &fixture); connection_options.on_connection_complete = s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_connection_complete; connection_options.user_data = &fixture; aws_mqtt_client_connection_connect(adapter, &connection_options); s_wait_for_n_adapter_lifecycle_events(&fixture, AWS_MQTT3_LET_CONNECTION_COMPLETE, 1); struct aws_mqtt3_lifecycle_event expected_events[] = { { .type = AWS_MQTT3_LET_CONNECTION_SUCCESS, }, { .type = AWS_MQTT3_LET_CONNECTION_COMPLETE, }, }; ASSERT_SUCCESS(s_aws_mqtt5_to_mqtt3_adapter_test_fixture_verify_lifecycle_sequence( &fixture, AWS_ARRAY_SIZE(expected_events), expected_events, AWS_ARRAY_SIZE(expected_events))); aws_mqtt5_to_mqtt3_adapter_test_fixture_clean_up(&fixture); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5to3_adapter_connect_success, s_mqtt5to3_adapter_connect_success_fn) static int s_do_mqtt5to3_adapter_connect_success_disconnect_success_cycle( struct aws_allocator *allocator, size_t iterations) { aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, }; struct aws_mqtt5_to_mqtt3_adapter_test_fixture fixture; ASSERT_SUCCESS(aws_mqtt5_to_mqtt3_adapter_test_fixture_init(&fixture, allocator, &test_fixture_options)); struct aws_mqtt_client_connection *adapter = fixture.connection; for (size_t i = 0; i < iterations; ++i) { struct aws_mqtt_connection_options connection_options; s_init_adapter_connection_options_from_fixture(&connection_options, &fixture); connection_options.on_connection_complete = s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_connection_complete; connection_options.user_data = &fixture; aws_mqtt_client_connection_connect(adapter, &connection_options); s_wait_for_n_adapter_lifecycle_events(&fixture, AWS_MQTT3_LET_CONNECTION_COMPLETE, i + 1); aws_mqtt_client_connection_disconnect( adapter, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_disconnection_complete, &fixture); s_wait_for_n_adapter_lifecycle_events(&fixture, AWS_MQTT3_LET_DISCONNECTION_COMPLETE, i + 1); s_wait_for_n_adapter_lifecycle_events(&fixture, AWS_MQTT3_LET_CLOSED, i + 1); struct aws_mqtt3_lifecycle_event expected_event_sequence[] = { { .type = AWS_MQTT3_LET_CONNECTION_SUCCESS, }, { .type = AWS_MQTT3_LET_CONNECTION_COMPLETE, }, { .type = AWS_MQTT3_LET_DISCONNECTION_COMPLETE, }, { .type = AWS_MQTT3_LET_CLOSED, }, }; size_t sequence_size = AWS_ARRAY_SIZE(expected_event_sequence); size_t expected_event_count = (i + 1) * sequence_size; struct aws_mqtt3_lifecycle_event *expected_events = aws_mem_calloc(allocator, expected_event_count, sizeof(struct aws_mqtt3_lifecycle_event)); for (size_t j = 0; j < i + 1; ++j) { for (size_t k = 0; k < sequence_size; ++k) { *(expected_events + j * sequence_size + k) = expected_event_sequence[k]; } } ASSERT_SUCCESS(s_aws_mqtt5_to_mqtt3_adapter_test_fixture_verify_lifecycle_sequence( &fixture, expected_event_count, expected_events, expected_event_count)); aws_mem_release(allocator, expected_events); } aws_mqtt5_to_mqtt3_adapter_test_fixture_clean_up(&fixture); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } /* * A couple of simple connect-disconnect cycle tests. The first does a single cycle while the second does several. * Verifies proper lifecycle event sequencing. */ static int s_mqtt5to3_adapter_connect_success_disconnect_success_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(s_do_mqtt5to3_adapter_connect_success_disconnect_success_cycle(allocator, 1)); return AWS_OP_SUCCESS; } AWS_TEST_CASE( mqtt5to3_adapter_connect_success_disconnect_success, s_mqtt5to3_adapter_connect_success_disconnect_success_fn) static int s_mqtt5to3_adapter_connect_success_disconnect_success_thrice_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(s_do_mqtt5to3_adapter_connect_success_disconnect_success_cycle(allocator, 3)); return AWS_OP_SUCCESS; } AWS_TEST_CASE( mqtt5to3_adapter_connect_success_disconnect_success_thrice, s_mqtt5to3_adapter_connect_success_disconnect_success_thrice_fn) /* * Verifies that calling connect() while connected yields a connection completion callback with the * appropriate already-connected error code. Note that in the mqtt311 impl, this error is synchronous. */ static int s_mqtt5to3_adapter_connect_success_connect_failure_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, }; struct aws_mqtt5_to_mqtt3_adapter_test_fixture fixture; ASSERT_SUCCESS(aws_mqtt5_to_mqtt3_adapter_test_fixture_init(&fixture, allocator, &test_fixture_options)); struct aws_mqtt_client_connection *adapter = fixture.connection; struct aws_mqtt_connection_options connection_options; s_init_adapter_connection_options_from_fixture(&connection_options, &fixture); connection_options.on_connection_complete = s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_connection_complete; connection_options.user_data = &fixture; aws_mqtt_client_connection_connect(adapter, &connection_options); s_wait_for_n_adapter_lifecycle_events(&fixture, AWS_MQTT3_LET_CONNECTION_COMPLETE, 1); aws_mqtt_client_connection_connect(adapter, &connection_options); s_wait_for_n_adapter_lifecycle_events(&fixture, AWS_MQTT3_LET_CONNECTION_COMPLETE, 2); struct aws_mqtt3_lifecycle_event expected_events[] = { { .type = AWS_MQTT3_LET_CONNECTION_SUCCESS, }, { .type = AWS_MQTT3_LET_CONNECTION_COMPLETE, }, { .type = AWS_MQTT3_LET_CONNECTION_COMPLETE, .error_code = AWS_ERROR_MQTT_ALREADY_CONNECTED, }, }; ASSERT_SUCCESS(s_aws_mqtt5_to_mqtt3_adapter_test_fixture_verify_lifecycle_sequence( &fixture, AWS_ARRAY_SIZE(expected_events), expected_events, AWS_ARRAY_SIZE(expected_events))); aws_mqtt5_to_mqtt3_adapter_test_fixture_clean_up(&fixture); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5to3_adapter_connect_success_connect_failure, s_mqtt5to3_adapter_connect_success_connect_failure_fn) /* * A non-deterministic test that starts the connect process and immediately drops the last external adapter * reference. Intended to stochastically shake out shutdown race conditions. */ static int s_mqtt5to3_adapter_connect_success_sloppy_shutdown_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, }; struct aws_mqtt5_to_mqtt3_adapter_test_fixture fixture; ASSERT_SUCCESS(aws_mqtt5_to_mqtt3_adapter_test_fixture_init(&fixture, allocator, &test_fixture_options)); struct aws_mqtt_client_connection *adapter = fixture.connection; struct aws_mqtt_connection_options connection_options; s_init_adapter_connection_options_from_fixture(&connection_options, &fixture); connection_options.on_connection_complete = s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_connection_complete; connection_options.user_data = &fixture; aws_mqtt_client_connection_connect(adapter, &connection_options); aws_mqtt5_to_mqtt3_adapter_test_fixture_clean_up(&fixture); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5to3_adapter_connect_success_sloppy_shutdown, s_mqtt5to3_adapter_connect_success_sloppy_shutdown_fn) static int s_aws_mqtt5_server_disconnect_after_connect( void *packet, struct aws_mqtt5_server_mock_connection_context *connection, void *user_data) { aws_mqtt5_mock_server_handle_connect_always_succeed(packet, connection, user_data); struct aws_mqtt5_packet_disconnect_view disconnect = { .reason_code = AWS_MQTT5_DRC_SERVER_SHUTTING_DOWN, }; int result = aws_mqtt5_mock_server_send_packet(connection, AWS_MQTT5_PT_DISCONNECT, &disconnect); return result; } static int s_verify_bad_connectivity_callbacks(struct aws_mqtt5_to_mqtt3_adapter_test_fixture *fixture) { struct aws_mqtt3_lifecycle_event expected_events_start[] = { { .type = AWS_MQTT3_LET_CONNECTION_SUCCESS, }, { .type = AWS_MQTT3_LET_CONNECTION_COMPLETE, }, { .type = AWS_MQTT3_LET_INTERRUPTED, .error_code = AWS_ERROR_MQTT_UNEXPECTED_HANGUP, }, { .type = AWS_MQTT3_LET_CONNECTION_SUCCESS, }, { .type = AWS_MQTT3_LET_RESUMED, }, { .type = AWS_MQTT3_LET_INTERRUPTED, .error_code = AWS_ERROR_MQTT_UNEXPECTED_HANGUP, }, { .type = AWS_MQTT3_LET_CONNECTION_SUCCESS, }, { .type = AWS_MQTT3_LET_RESUMED, }, { .type = AWS_MQTT3_LET_INTERRUPTED, .error_code = AWS_ERROR_MQTT_UNEXPECTED_HANGUP, }, }; ASSERT_SUCCESS(s_aws_mqtt5_to_mqtt3_adapter_test_fixture_verify_lifecycle_sequence_starts_with( fixture, AWS_ARRAY_SIZE(expected_events_start), expected_events_start)); struct aws_mqtt3_lifecycle_event expected_events_end[] = { { .type = AWS_MQTT3_LET_DISCONNECTION_COMPLETE, }, { .type = AWS_MQTT3_LET_CLOSED, }, }; ASSERT_SUCCESS(s_aws_mqtt5_to_mqtt3_adapter_test_fixture_verify_lifecycle_sequence_ends_with( fixture, AWS_ARRAY_SIZE(expected_events_end), expected_events_end)); return AWS_OP_SUCCESS; } static int s_do_bad_connectivity_basic_test(struct aws_mqtt5_to_mqtt3_adapter_test_fixture *fixture) { struct aws_mqtt_client_connection *adapter = fixture->connection; struct aws_mqtt_connection_options connection_options; s_init_adapter_connection_options_from_fixture(&connection_options, fixture); connection_options.on_connection_complete = s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_connection_complete; connection_options.user_data = fixture; aws_mqtt_client_connection_connect(adapter, &connection_options); s_wait_for_n_adapter_lifecycle_events(fixture, AWS_MQTT3_LET_INTERRUPTED, 3); aws_mqtt_client_connection_disconnect( adapter, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_disconnection_complete, fixture); s_wait_for_n_adapter_lifecycle_events(fixture, AWS_MQTT3_LET_CLOSED, 1); ASSERT_SUCCESS(s_verify_bad_connectivity_callbacks(fixture)); return AWS_OP_SUCCESS; } /* * A test where each successful connection is immediately dropped after the connack is sent. Allows us to verify * proper interrupt/resume sequencing. */ static int s_mqtt5to3_adapter_connect_bad_connectivity_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); /* So that the test doesn't get excessively slow due to all the reconnects with backoff */ test_options.client_options.min_reconnect_delay_ms = 500; test_options.client_options.max_reconnect_delay_ms = 1000; test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_CONNECT] = s_aws_mqtt5_server_disconnect_after_connect; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, }; struct aws_mqtt5_to_mqtt3_adapter_test_fixture fixture; ASSERT_SUCCESS(aws_mqtt5_to_mqtt3_adapter_test_fixture_init(&fixture, allocator, &test_fixture_options)); ASSERT_SUCCESS(s_do_bad_connectivity_basic_test(&fixture)); aws_mqtt5_to_mqtt3_adapter_test_fixture_clean_up(&fixture); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5to3_adapter_connect_bad_connectivity, s_mqtt5to3_adapter_connect_bad_connectivity_fn) /* * A variant of the bad connectivity test where we restart the mqtt5 client after the main test is over and verify * we don't get any interrupt/resume callbacks. */ static int s_mqtt5to3_adapter_connect_bad_connectivity_with_mqtt5_restart_fn( struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); /* So that the test doesn't get excessively slow due to all the reconnects with backoff */ test_options.client_options.min_reconnect_delay_ms = 500; test_options.client_options.max_reconnect_delay_ms = 1000; test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_CONNECT] = s_aws_mqtt5_server_disconnect_after_connect; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, }; struct aws_mqtt5_to_mqtt3_adapter_test_fixture fixture; ASSERT_SUCCESS(aws_mqtt5_to_mqtt3_adapter_test_fixture_init(&fixture, allocator, &test_fixture_options)); ASSERT_SUCCESS(s_do_bad_connectivity_basic_test(&fixture)); /* * Now restart the 5 client, wait for a few more connection success/disconnect cycles, and then verify that no * further adapter callbacks were invoked because of this. */ aws_mqtt5_client_start(fixture.mqtt5_fixture.client); aws_mqtt5_wait_for_n_lifecycle_events(&fixture.mqtt5_fixture, AWS_MQTT5_CLET_CONNECTION_SUCCESS, 6); aws_thread_current_sleep(aws_timestamp_convert(2, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL)); ASSERT_SUCCESS(s_verify_bad_connectivity_callbacks(&fixture)); aws_mqtt5_to_mqtt3_adapter_test_fixture_clean_up(&fixture); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE( mqtt5to3_adapter_connect_bad_connectivity_with_mqtt5_restart, s_mqtt5to3_adapter_connect_bad_connectivity_with_mqtt5_restart_fn) int aws_mqtt5_mock_server_handle_connect_succeed_on_or_after_nth( void *packet, struct aws_mqtt5_server_mock_connection_context *connection, void *user_data) { (void)packet; struct aws_mqtt5_mock_server_reconnect_state *context = user_data; struct aws_mqtt5_packet_connack_view connack_view; AWS_ZERO_STRUCT(connack_view); if (context->connection_attempts >= context->required_connection_count_threshold) { connack_view.reason_code = AWS_MQTT5_CRC_SUCCESS; aws_high_res_clock_get_ticks(&context->connect_timestamp); } else { connack_view.reason_code = AWS_MQTT5_CRC_NOT_AUTHORIZED; } ++context->connection_attempts; return aws_mqtt5_mock_server_send_packet(connection, AWS_MQTT5_PT_CONNACK, &connack_view); } /* * Test where the initial connect is rejected, which should put the adapter to sleep. Meanwhile followup attempts * are successful and the mqtt5 client itself becomes connected. */ static int s_mqtt5to3_adapter_connect_failure_connect_success_via_mqtt5_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct aws_mqtt5_mock_server_reconnect_state mock_server_state = { .required_connection_count_threshold = 1, }; struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_CONNECT] = aws_mqtt5_mock_server_handle_connect_succeed_on_or_after_nth; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, .mock_server_user_data = &mock_server_state, }; struct aws_mqtt5_to_mqtt3_adapter_test_fixture fixture; ASSERT_SUCCESS(aws_mqtt5_to_mqtt3_adapter_test_fixture_init(&fixture, allocator, &test_fixture_options)); struct aws_mqtt_client_connection *adapter = fixture.connection; struct aws_mqtt_connection_options connection_options; s_init_adapter_connection_options_from_fixture(&connection_options, &fixture); connection_options.on_connection_complete = s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_connection_complete; connection_options.user_data = &fixture; aws_mqtt_client_connection_connect(adapter, &connection_options); // wait for and verify a connection failure s_wait_for_n_adapter_lifecycle_events(&fixture, AWS_MQTT3_LET_CONNECTION_COMPLETE, 1); struct aws_mqtt3_lifecycle_event expected_events[] = { { .type = AWS_MQTT3_LET_CONNECTION_FAILURE, .error_code = AWS_ERROR_MQTT_PROTOCOL_ERROR, }, { .type = AWS_MQTT3_LET_CONNECTION_COMPLETE, .error_code = AWS_ERROR_MQTT_PROTOCOL_ERROR, }, }; ASSERT_SUCCESS(s_aws_mqtt5_to_mqtt3_adapter_test_fixture_verify_lifecycle_sequence( &fixture, AWS_ARRAY_SIZE(expected_events), expected_events, AWS_ARRAY_SIZE(expected_events))); // wait for the mqtt5 client to successfully connect on the second try aws_mqtt5_wait_for_n_lifecycle_events(&fixture.mqtt5_fixture, AWS_MQTT5_CLET_CONNECTION_SUCCESS, 1); // verify we didn't get any callbacks on the adapter ASSERT_SUCCESS(s_aws_mqtt5_to_mqtt3_adapter_test_fixture_verify_lifecycle_sequence( &fixture, AWS_ARRAY_SIZE(expected_events), expected_events, AWS_ARRAY_SIZE(expected_events))); // "connect" on the adapter, wait for and verify success aws_mqtt_client_connection_connect(adapter, &connection_options); s_wait_for_n_adapter_lifecycle_events(&fixture, AWS_MQTT3_LET_CONNECTION_COMPLETE, 2); struct aws_mqtt3_lifecycle_event expected_reconnect_events[] = { { .type = AWS_MQTT3_LET_CONNECTION_FAILURE, .error_code = AWS_ERROR_MQTT_PROTOCOL_ERROR, }, { .type = AWS_MQTT3_LET_CONNECTION_COMPLETE, .error_code = AWS_ERROR_MQTT_PROTOCOL_ERROR, }, { .type = AWS_MQTT3_LET_CONNECTION_SUCCESS, }, { .type = AWS_MQTT3_LET_CONNECTION_COMPLETE, }, }; ASSERT_SUCCESS(s_aws_mqtt5_to_mqtt3_adapter_test_fixture_verify_lifecycle_sequence( &fixture, AWS_ARRAY_SIZE(expected_reconnect_events), expected_reconnect_events, AWS_ARRAY_SIZE(expected_reconnect_events))); aws_mqtt5_to_mqtt3_adapter_test_fixture_clean_up(&fixture); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE( mqtt5to3_adapter_connect_failure_connect_success_via_mqtt5, s_mqtt5to3_adapter_connect_failure_connect_success_via_mqtt5_fn) AWS_STATIC_STRING_FROM_LITERAL(s_bad_host_name, "derpity_derp"); /* * Fails to connect with a bad config. Follow up with a good config. Verifies that config is re-evaluated with * each connect() invocation. */ static int s_mqtt5to3_adapter_connect_failure_bad_config_success_good_config_fn( struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, }; struct aws_mqtt5_to_mqtt3_adapter_test_fixture fixture; ASSERT_SUCCESS(aws_mqtt5_to_mqtt3_adapter_test_fixture_init(&fixture, allocator, &test_fixture_options)); struct aws_mqtt_client_connection *adapter = fixture.connection; struct aws_mqtt_connection_options connection_options; s_init_adapter_connection_options_from_fixture(&connection_options, &fixture); struct aws_byte_cursor good_host_name = connection_options.host_name; connection_options.host_name = aws_byte_cursor_from_string(s_bad_host_name); connection_options.on_connection_complete = s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_connection_complete; connection_options.user_data = &fixture; aws_mqtt_client_connection_connect(adapter, &connection_options); // wait for and verify a connection failure s_wait_for_n_adapter_lifecycle_events(&fixture, AWS_MQTT3_LET_CONNECTION_COMPLETE, 1); struct aws_mqtt3_lifecycle_event expected_events[] = { { .type = AWS_MQTT3_LET_CONNECTION_FAILURE, .error_code = AWS_ERROR_FILE_INVALID_PATH, .skip_error_code_equality = true, /* the error code here is platform-dependent */ }, { .type = AWS_MQTT3_LET_CONNECTION_COMPLETE, .error_code = AWS_ERROR_FILE_INVALID_PATH, .skip_error_code_equality = true, /* the error code here is platform-dependent */ }, }; ASSERT_SUCCESS(s_aws_mqtt5_to_mqtt3_adapter_test_fixture_verify_lifecycle_sequence( &fixture, AWS_ARRAY_SIZE(expected_events), expected_events, AWS_ARRAY_SIZE(expected_events))); // reconnect with a good host the adapter, wait for and verify success connection_options.host_name = good_host_name; aws_mqtt_client_connection_connect(adapter, &connection_options); s_wait_for_n_adapter_lifecycle_events(&fixture, AWS_MQTT3_LET_CONNECTION_COMPLETE, 2); struct aws_mqtt3_lifecycle_event expected_reconnect_events[] = { { .type = AWS_MQTT3_LET_CONNECTION_FAILURE, .error_code = AWS_ERROR_FILE_INVALID_PATH, .skip_error_code_equality = true, /* the error code here is platform-dependent */ }, { .type = AWS_MQTT3_LET_CONNECTION_COMPLETE, .error_code = AWS_ERROR_FILE_INVALID_PATH, .skip_error_code_equality = true, /* the error code here is platform-dependent */ }, { .type = AWS_MQTT3_LET_CONNECTION_SUCCESS, }, { .type = AWS_MQTT3_LET_CONNECTION_COMPLETE, }, }; ASSERT_SUCCESS(s_aws_mqtt5_to_mqtt3_adapter_test_fixture_verify_lifecycle_sequence( &fixture, AWS_ARRAY_SIZE(expected_reconnect_events), expected_reconnect_events, AWS_ARRAY_SIZE(expected_reconnect_events))); aws_mqtt5_to_mqtt3_adapter_test_fixture_clean_up(&fixture); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE( mqtt5to3_adapter_connect_failure_bad_config_success_good_config, s_mqtt5to3_adapter_connect_failure_bad_config_success_good_config_fn) int aws_mqtt5_mock_server_handle_connect_fail_on_or_after_nth( void *packet, struct aws_mqtt5_server_mock_connection_context *connection, void *user_data) { (void)packet; struct aws_mqtt5_mock_server_reconnect_state *context = user_data; bool send_disconnect = false; struct aws_mqtt5_packet_connack_view connack_view; AWS_ZERO_STRUCT(connack_view); if (context->connection_attempts >= context->required_connection_count_threshold) { connack_view.reason_code = AWS_MQTT5_CRC_NOT_AUTHORIZED; } else { connack_view.reason_code = AWS_MQTT5_CRC_SUCCESS; aws_high_res_clock_get_ticks(&context->connect_timestamp); send_disconnect = true; } ++context->connection_attempts; aws_mqtt5_mock_server_send_packet(connection, AWS_MQTT5_PT_CONNACK, &connack_view); if (send_disconnect) { struct aws_mqtt5_packet_disconnect_view disconnect = { .reason_code = AWS_MQTT5_DRC_SERVER_SHUTTING_DOWN, }; aws_mqtt5_mock_server_send_packet(connection, AWS_MQTT5_PT_DISCONNECT, &disconnect); } return AWS_OP_SUCCESS; } /* * Establishes a successful connection then drops it followed by a perma-failure loop, verify we receive * the new connection failure callbacks. */ static int s_mqtt5to3_adapter_connect_reconnect_failures_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct aws_mqtt5_mock_server_reconnect_state mock_server_state = { .required_connection_count_threshold = 1, }; struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_CONNECT] = aws_mqtt5_mock_server_handle_connect_fail_on_or_after_nth; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, .mock_server_user_data = &mock_server_state, }; struct aws_mqtt5_to_mqtt3_adapter_test_fixture fixture; ASSERT_SUCCESS(aws_mqtt5_to_mqtt3_adapter_test_fixture_init(&fixture, allocator, &test_fixture_options)); struct aws_mqtt_client_connection *adapter = fixture.connection; struct aws_mqtt_connection_options connection_options; s_init_adapter_connection_options_from_fixture(&connection_options, &fixture); connection_options.on_connection_complete = s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_connection_complete; connection_options.user_data = &fixture; aws_mqtt_client_connection_connect(adapter, &connection_options); s_wait_for_n_adapter_lifecycle_events(&fixture, AWS_MQTT3_LET_CONNECTION_COMPLETE, 1); s_wait_for_n_adapter_lifecycle_events(&fixture, AWS_MQTT3_LET_CONNECTION_FAILURE, 3); aws_mqtt_client_connection_disconnect(adapter, NULL, NULL); struct aws_mqtt3_lifecycle_event expected_events[] = { { .type = AWS_MQTT3_LET_CONNECTION_SUCCESS, }, { .type = AWS_MQTT3_LET_CONNECTION_COMPLETE, }, { .type = AWS_MQTT3_LET_INTERRUPTED, .error_code = AWS_ERROR_MQTT_UNEXPECTED_HANGUP, }, { .type = AWS_MQTT3_LET_CONNECTION_FAILURE, .error_code = AWS_ERROR_MQTT_PROTOCOL_ERROR, }, { .type = AWS_MQTT3_LET_CONNECTION_FAILURE, .error_code = AWS_ERROR_MQTT_PROTOCOL_ERROR, }, { .type = AWS_MQTT3_LET_CONNECTION_FAILURE, .error_code = AWS_ERROR_MQTT_PROTOCOL_ERROR, }, }; ASSERT_SUCCESS(s_aws_mqtt5_to_mqtt3_adapter_test_fixture_verify_lifecycle_sequence_starts_with( &fixture, AWS_ARRAY_SIZE(expected_events), expected_events)); aws_mqtt5_to_mqtt3_adapter_test_fixture_clean_up(&fixture); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5to3_adapter_connect_reconnect_failures, s_mqtt5to3_adapter_connect_reconnect_failures_fn) /* * Connect successfully then disconnect followed by a connect with no intervening wait. Verifies simple reliable * action and event sequencing. */ static int s_mqtt5to3_adapter_connect_success_disconnect_connect_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, }; struct aws_mqtt5_to_mqtt3_adapter_test_fixture fixture; ASSERT_SUCCESS(aws_mqtt5_to_mqtt3_adapter_test_fixture_init(&fixture, allocator, &test_fixture_options)); struct aws_mqtt_client_connection *adapter = fixture.connection; struct aws_mqtt_connection_options connection_options; s_init_adapter_connection_options_from_fixture(&connection_options, &fixture); connection_options.on_connection_complete = s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_connection_complete; connection_options.user_data = &fixture; aws_mqtt_client_connection_connect(adapter, &connection_options); s_wait_for_n_adapter_lifecycle_events(&fixture, AWS_MQTT3_LET_CONNECTION_COMPLETE, 1); aws_mqtt_client_connection_disconnect( adapter, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_disconnection_complete, &fixture); aws_mqtt_client_connection_connect(adapter, &connection_options); s_wait_for_n_adapter_lifecycle_events(&fixture, AWS_MQTT3_LET_DISCONNECTION_COMPLETE, 1); s_wait_for_n_adapter_lifecycle_events(&fixture, AWS_MQTT3_LET_CONNECTION_COMPLETE, 2); /* * depending on timing there may or may not be a closed event in between, so just check beginning and end for * expected events */ struct aws_mqtt3_lifecycle_event expected_sequence_beginning[] = { { .type = AWS_MQTT3_LET_CONNECTION_SUCCESS, }, { .type = AWS_MQTT3_LET_CONNECTION_COMPLETE, }, { .type = AWS_MQTT3_LET_DISCONNECTION_COMPLETE, }, }; ASSERT_SUCCESS(s_aws_mqtt5_to_mqtt3_adapter_test_fixture_verify_lifecycle_sequence_starts_with( &fixture, AWS_ARRAY_SIZE(expected_sequence_beginning), expected_sequence_beginning)); struct aws_mqtt3_lifecycle_event expected_sequence_ending[] = { { .type = AWS_MQTT3_LET_CONNECTION_SUCCESS, }, { .type = AWS_MQTT3_LET_CONNECTION_COMPLETE, }, }; ASSERT_SUCCESS(s_aws_mqtt5_to_mqtt3_adapter_test_fixture_verify_lifecycle_sequence_ends_with( &fixture, AWS_ARRAY_SIZE(expected_sequence_ending), expected_sequence_ending)); aws_mqtt5_to_mqtt3_adapter_test_fixture_clean_up(&fixture); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE( mqtt5to3_adapter_connect_success_disconnect_connect, s_mqtt5to3_adapter_connect_success_disconnect_connect_fn) /* * Calls disconnect() on an adapter that successfully connected but then had the mqtt5 client stopped behind the * adapter's back. Verifies that we still get a completion callback. */ static int s_mqtt5to3_adapter_connect_success_stop_mqtt5_disconnect_success_fn( struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, }; struct aws_mqtt5_to_mqtt3_adapter_test_fixture fixture; ASSERT_SUCCESS(aws_mqtt5_to_mqtt3_adapter_test_fixture_init(&fixture, allocator, &test_fixture_options)); struct aws_mqtt_client_connection *adapter = fixture.connection; struct aws_mqtt_connection_options connection_options; s_init_adapter_connection_options_from_fixture(&connection_options, &fixture); connection_options.on_connection_complete = s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_connection_complete; connection_options.user_data = &fixture; aws_mqtt_client_connection_connect(adapter, &connection_options); s_wait_for_n_adapter_lifecycle_events(&fixture, AWS_MQTT3_LET_CONNECTION_COMPLETE, 1); aws_mqtt5_client_stop(fixture.mqtt5_fixture.client, NULL, NULL); aws_wait_for_stopped_lifecycle_event(&fixture.mqtt5_fixture); aws_mqtt_client_connection_disconnect( adapter, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_disconnection_complete, &fixture); s_wait_for_n_adapter_lifecycle_events(&fixture, AWS_MQTT3_LET_DISCONNECTION_COMPLETE, 1); aws_mqtt5_to_mqtt3_adapter_test_fixture_clean_up(&fixture); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE( mqtt5to3_adapter_connect_success_stop_mqtt5_disconnect_success, s_mqtt5to3_adapter_connect_success_stop_mqtt5_disconnect_success_fn) /* * Call disconnect on a newly-created adapter. Verifies that we get a completion callback. */ static int s_mqtt5to3_adapter_disconnect_success_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, }; struct aws_mqtt5_to_mqtt3_adapter_test_fixture fixture; ASSERT_SUCCESS(aws_mqtt5_to_mqtt3_adapter_test_fixture_init(&fixture, allocator, &test_fixture_options)); struct aws_mqtt_client_connection *adapter = fixture.connection; aws_mqtt_client_connection_disconnect( adapter, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_disconnection_complete, &fixture); s_wait_for_n_adapter_lifecycle_events(&fixture, AWS_MQTT3_LET_DISCONNECTION_COMPLETE, 1); struct aws_mqtt3_lifecycle_event expected_events[] = { { .type = AWS_MQTT3_LET_DISCONNECTION_COMPLETE, }, }; ASSERT_SUCCESS(s_aws_mqtt5_to_mqtt3_adapter_test_fixture_verify_lifecycle_sequence( &fixture, AWS_ARRAY_SIZE(expected_events), expected_events, AWS_ARRAY_SIZE(expected_events))); aws_mqtt5_to_mqtt3_adapter_test_fixture_clean_up(&fixture); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5to3_adapter_disconnect_success, s_mqtt5to3_adapter_disconnect_success_fn) /* * Use the adapter to successfully connect then call disconnect multiple times. Verify that all disconnect * invocations generate expected lifecycle events. Verifies that disconnects after a disconnect are properly handled. */ static int s_mqtt5to3_adapter_connect_success_disconnect_success_disconnect_success_fn( struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, }; struct aws_mqtt5_to_mqtt3_adapter_test_fixture fixture; ASSERT_SUCCESS(aws_mqtt5_to_mqtt3_adapter_test_fixture_init(&fixture, allocator, &test_fixture_options)); struct aws_mqtt_client_connection *adapter = fixture.connection; struct aws_mqtt_connection_options connection_options; s_init_adapter_connection_options_from_fixture(&connection_options, &fixture); connection_options.on_connection_complete = s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_connection_complete; connection_options.user_data = &fixture; aws_mqtt_client_connection_connect(adapter, &connection_options); s_wait_for_n_adapter_lifecycle_events(&fixture, AWS_MQTT3_LET_CONNECTION_COMPLETE, 1); aws_mqtt5_client_stop(fixture.mqtt5_fixture.client, NULL, NULL); aws_wait_for_stopped_lifecycle_event(&fixture.mqtt5_fixture); aws_mqtt_client_connection_disconnect( adapter, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_disconnection_complete, &fixture); s_wait_for_n_adapter_lifecycle_events(&fixture, AWS_MQTT3_LET_DISCONNECTION_COMPLETE, 1); aws_mqtt_client_connection_disconnect( adapter, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_disconnection_complete, &fixture); aws_mqtt_client_connection_disconnect( adapter, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_disconnection_complete, &fixture); s_wait_for_n_adapter_lifecycle_events(&fixture, AWS_MQTT3_LET_DISCONNECTION_COMPLETE, 3); s_wait_for_n_adapter_lifecycle_events(&fixture, AWS_MQTT3_LET_CLOSED, 1); aws_mqtt5_to_mqtt3_adapter_test_fixture_clean_up(&fixture); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE( mqtt5to3_adapter_connect_success_disconnect_success_disconnect_success, s_mqtt5to3_adapter_connect_success_disconnect_success_disconnect_success_fn) #define SIMPLE_ALLOCATION_COUNT 10 static int s_mqtt5to3_adapter_operation_allocation_simple_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, }; struct aws_mqtt5_to_mqtt3_adapter_test_fixture fixture; ASSERT_SUCCESS(aws_mqtt5_to_mqtt3_adapter_test_fixture_init(&fixture, allocator, &test_fixture_options)); struct aws_mqtt_client_connection *connection = fixture.connection; struct aws_mqtt_client_connection_5_impl *adapter = connection->impl; struct aws_mqtt5_to_mqtt3_adapter_operation_table *operational_state = &adapter->operational_state; for (size_t i = 0; i < SIMPLE_ALLOCATION_COUNT; ++i) { struct aws_mqtt5_to_mqtt3_adapter_publish_options publish_options = { .adapter = adapter, .topic = aws_byte_cursor_from_c_str("derp"), .qos = AWS_MQTT_QOS_AT_LEAST_ONCE, }; struct aws_mqtt5_to_mqtt3_adapter_operation_publish *publish = aws_mqtt5_to_mqtt3_adapter_operation_new_publish(allocator, &publish_options); ASSERT_SUCCESS(aws_mqtt5_to_mqtt3_adapter_operation_table_add_operation(operational_state, &publish->base)); ASSERT_INT_EQUALS(i + 1, (size_t)(publish->base.id)); } aws_mqtt5_to_mqtt3_adapter_test_fixture_clean_up(&fixture); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5to3_adapter_operation_allocation_simple, s_mqtt5to3_adapter_operation_allocation_simple_fn) #define ALLOCATION_WRAP_AROUND_ID_START 100 static int s_mqtt5to3_adapter_operation_allocation_wraparound_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, }; struct aws_mqtt5_to_mqtt3_adapter_test_fixture fixture; ASSERT_SUCCESS(aws_mqtt5_to_mqtt3_adapter_test_fixture_init(&fixture, allocator, &test_fixture_options)); struct aws_mqtt_client_connection *connection = fixture.connection; struct aws_mqtt_client_connection_5_impl *adapter = connection->impl; struct aws_mqtt5_to_mqtt3_adapter_operation_table *operational_state = &adapter->operational_state; operational_state->next_id = ALLOCATION_WRAP_AROUND_ID_START; for (size_t i = 0; i < UINT16_MAX + 50; ++i) { struct aws_mqtt5_to_mqtt3_adapter_publish_options publish_options = { .adapter = adapter, .topic = aws_byte_cursor_from_c_str("derp"), .qos = AWS_MQTT_QOS_AT_LEAST_ONCE, }; struct aws_mqtt5_to_mqtt3_adapter_operation_publish *publish = aws_mqtt5_to_mqtt3_adapter_operation_new_publish(allocator, &publish_options); ASSERT_SUCCESS(aws_mqtt5_to_mqtt3_adapter_operation_table_add_operation(operational_state, &publish->base)); size_t expected_id = (i + ALLOCATION_WRAP_AROUND_ID_START) % 65536; if (i > UINT16_MAX - ALLOCATION_WRAP_AROUND_ID_START) { ++expected_id; } ASSERT_INT_EQUALS(expected_id, (size_t)(publish->base.id)); aws_mqtt5_to_mqtt3_adapter_operation_table_remove_operation(operational_state, publish->base.id); } aws_mqtt5_to_mqtt3_adapter_test_fixture_clean_up(&fixture); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5to3_adapter_operation_allocation_wraparound, s_mqtt5to3_adapter_operation_allocation_wraparound_fn) static int s_mqtt5to3_adapter_operation_allocation_exhaustion_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, }; struct aws_mqtt5_to_mqtt3_adapter_test_fixture fixture; ASSERT_SUCCESS(aws_mqtt5_to_mqtt3_adapter_test_fixture_init(&fixture, allocator, &test_fixture_options)); struct aws_mqtt_client_connection *connection = fixture.connection; struct aws_mqtt_client_connection_5_impl *adapter = connection->impl; struct aws_mqtt5_to_mqtt3_adapter_operation_table *operational_state = &adapter->operational_state; operational_state->next_id = ALLOCATION_WRAP_AROUND_ID_START; for (size_t i = 0; i < UINT16_MAX + 50; ++i) { struct aws_mqtt5_to_mqtt3_adapter_publish_options publish_options = { .adapter = adapter, .topic = aws_byte_cursor_from_c_str("derp"), .qos = AWS_MQTT_QOS_AT_LEAST_ONCE, }; struct aws_mqtt5_to_mqtt3_adapter_operation_publish *publish = aws_mqtt5_to_mqtt3_adapter_operation_new_publish(allocator, &publish_options); if (i >= UINT16_MAX) { ASSERT_FAILS(aws_mqtt5_to_mqtt3_adapter_operation_table_add_operation(operational_state, &publish->base)); aws_mqtt5_to_mqtt3_adapter_operation_release(&publish->base); continue; } ASSERT_SUCCESS(aws_mqtt5_to_mqtt3_adapter_operation_table_add_operation(operational_state, &publish->base)); size_t expected_id = (i + ALLOCATION_WRAP_AROUND_ID_START) % 65536; if (i > UINT16_MAX - ALLOCATION_WRAP_AROUND_ID_START) { ++expected_id; } ASSERT_INT_EQUALS(expected_id, (size_t)(publish->base.id)); } aws_mqtt5_to_mqtt3_adapter_test_fixture_clean_up(&fixture); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5to3_adapter_operation_allocation_exhaustion, s_mqtt5to3_adapter_operation_allocation_exhaustion_fn) static int s_aws_mqtt5_mock_server_handle_connect_succeed_with_small_payload( void *packet, struct aws_mqtt5_server_mock_connection_context *connection, void *user_data) { (void)packet; (void)user_data; struct aws_mqtt5_packet_connack_view connack_view; AWS_ZERO_STRUCT(connack_view); uint32_t maximum_packet_size = 1024; connack_view.reason_code = AWS_MQTT5_CRC_SUCCESS; connack_view.maximum_packet_size = &maximum_packet_size; return aws_mqtt5_mock_server_send_packet(connection, AWS_MQTT5_PT_CONNACK, &connack_view); } static void s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_publish_complete( struct aws_mqtt_client_connection *connection, uint16_t packet_id, int error_code, void *userdata) { (void)connection; (void)packet_id; struct aws_mqtt5_to_mqtt3_adapter_test_fixture *fixture = userdata; struct aws_mqtt3_operation_event operation_event = { .type = AWS_MQTT3_OET_PUBLISH_COMPLETE, .error_code = error_code, }; aws_mutex_lock(&fixture->lock); aws_array_list_push_back(&fixture->operation_events, &operation_event); aws_mutex_unlock(&fixture->lock); aws_condition_variable_notify_all(&fixture->signal); } static int s_mqtt5to3_adapter_publish_failure_invalid_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); /* only allow small payloads to force a publish error */ test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_CONNECT] = s_aws_mqtt5_mock_server_handle_connect_succeed_with_small_payload; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, }; struct aws_mqtt5_to_mqtt3_adapter_test_fixture fixture; ASSERT_SUCCESS(aws_mqtt5_to_mqtt3_adapter_test_fixture_init(&fixture, allocator, &test_fixture_options)); struct aws_mqtt_client_connection *connection = fixture.connection; struct aws_mqtt_connection_options connection_options; s_init_adapter_connection_options_from_fixture(&connection_options, &fixture); connection_options.on_connection_complete = s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_connection_complete; connection_options.user_data = &fixture; aws_mqtt_client_connection_connect(connection, &connection_options); s_wait_for_n_adapter_lifecycle_events(&fixture, AWS_MQTT3_LET_CONNECTION_COMPLETE, 1); struct aws_byte_cursor topic = aws_byte_cursor_from_c_str("derp"); uint8_t payload_array[2 * 1024]; struct aws_byte_cursor payload = aws_byte_cursor_from_array(payload_array, AWS_ARRAY_SIZE(payload_array)); aws_mqtt_client_connection_publish( connection, &topic, AWS_MQTT_QOS_AT_LEAST_ONCE, false, &payload, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_publish_complete, &fixture); s_wait_for_n_adapter_operation_events(&fixture, AWS_MQTT3_OET_PUBLISH_COMPLETE, 1); struct aws_mqtt3_operation_event expected_events[] = {{ .type = AWS_MQTT3_OET_PUBLISH_COMPLETE, .error_code = AWS_ERROR_MQTT5_PACKET_VALIDATION, }}; s_aws_mqtt5_to_mqtt3_adapter_test_fixture_verify_operation_sequence( &fixture, AWS_ARRAY_SIZE(expected_events), expected_events, AWS_ARRAY_SIZE(expected_events)); aws_mqtt5_to_mqtt3_adapter_test_fixture_clean_up(&fixture); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5to3_adapter_publish_failure_invalid, s_mqtt5to3_adapter_publish_failure_invalid_fn) static int s_mqtt5to3_adapter_publish_failure_offline_queue_policy_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); test_options.client_options.offline_queue_behavior = AWS_MQTT5_COQBT_FAIL_QOS0_PUBLISH_ON_DISCONNECT; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, }; struct aws_mqtt5_to_mqtt3_adapter_test_fixture fixture; ASSERT_SUCCESS(aws_mqtt5_to_mqtt3_adapter_test_fixture_init(&fixture, allocator, &test_fixture_options)); struct aws_mqtt_client_connection *connection = fixture.connection; struct aws_mqtt_connection_options connection_options; s_init_adapter_connection_options_from_fixture(&connection_options, &fixture); connection_options.on_connection_complete = s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_connection_complete; connection_options.user_data = &fixture; aws_mqtt_client_connection_connect(connection, &connection_options); s_wait_for_n_adapter_lifecycle_events(&fixture, AWS_MQTT3_LET_CONNECTION_COMPLETE, 1); aws_mqtt_client_connection_disconnect( connection, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_disconnection_complete, &fixture); s_wait_for_n_adapter_lifecycle_events(&fixture, AWS_MQTT3_LET_DISCONNECTION_COMPLETE, 1); struct aws_byte_cursor topic = aws_byte_cursor_from_c_str("derp"); aws_mqtt_client_connection_publish( connection, &topic, AWS_MQTT_QOS_AT_MOST_ONCE, false, NULL, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_publish_complete, &fixture); s_wait_for_n_adapter_operation_events(&fixture, AWS_MQTT3_OET_PUBLISH_COMPLETE, 1); struct aws_mqtt3_operation_event expected_events[] = {{ .type = AWS_MQTT3_OET_PUBLISH_COMPLETE, .error_code = AWS_ERROR_MQTT5_OPERATION_FAILED_DUE_TO_OFFLINE_QUEUE_POLICY, }}; s_aws_mqtt5_to_mqtt3_adapter_test_fixture_verify_operation_sequence( &fixture, AWS_ARRAY_SIZE(expected_events), expected_events, AWS_ARRAY_SIZE(expected_events)); aws_mqtt5_to_mqtt3_adapter_test_fixture_clean_up(&fixture); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE( mqtt5to3_adapter_publish_failure_offline_queue_policy, s_mqtt5to3_adapter_publish_failure_offline_queue_policy_fn) static int s_mqtt5to3_adapter_publish_success_qos0_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, }; struct aws_mqtt5_to_mqtt3_adapter_test_fixture fixture; ASSERT_SUCCESS(aws_mqtt5_to_mqtt3_adapter_test_fixture_init(&fixture, allocator, &test_fixture_options)); struct aws_mqtt_client_connection *connection = fixture.connection; struct aws_mqtt_connection_options connection_options; s_init_adapter_connection_options_from_fixture(&connection_options, &fixture); connection_options.on_connection_complete = s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_connection_complete; connection_options.user_data = &fixture; aws_mqtt_client_connection_connect(connection, &connection_options); s_wait_for_n_adapter_lifecycle_events(&fixture, AWS_MQTT3_LET_CONNECTION_COMPLETE, 1); struct aws_byte_cursor topic = aws_byte_cursor_from_c_str("derp"); aws_mqtt_client_connection_publish( connection, &topic, AWS_MQTT_QOS_AT_MOST_ONCE, false, NULL, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_publish_complete, &fixture); s_wait_for_n_adapter_operation_events(&fixture, AWS_MQTT3_OET_PUBLISH_COMPLETE, 1); struct aws_mqtt3_operation_event expected_events[] = {{ .type = AWS_MQTT3_OET_PUBLISH_COMPLETE, .error_code = AWS_ERROR_SUCCESS, }}; s_aws_mqtt5_to_mqtt3_adapter_test_fixture_verify_operation_sequence( &fixture, AWS_ARRAY_SIZE(expected_events), expected_events, AWS_ARRAY_SIZE(expected_events)); aws_mqtt5_to_mqtt3_adapter_test_fixture_clean_up(&fixture); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5to3_adapter_publish_success_qos0, s_mqtt5to3_adapter_publish_success_qos0_fn) static int s_mqtt5to3_adapter_publish_success_qos1_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_PUBLISH] = aws_mqtt5_mock_server_handle_publish_puback; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, }; struct aws_mqtt5_to_mqtt3_adapter_test_fixture fixture; ASSERT_SUCCESS(aws_mqtt5_to_mqtt3_adapter_test_fixture_init(&fixture, allocator, &test_fixture_options)); struct aws_mqtt_client_connection *connection = fixture.connection; struct aws_mqtt_connection_options connection_options; s_init_adapter_connection_options_from_fixture(&connection_options, &fixture); connection_options.on_connection_complete = s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_connection_complete; connection_options.user_data = &fixture; aws_mqtt_client_connection_connect(connection, &connection_options); s_wait_for_n_adapter_lifecycle_events(&fixture, AWS_MQTT3_LET_CONNECTION_COMPLETE, 1); struct aws_byte_cursor topic = aws_byte_cursor_from_c_str("derp"); aws_mqtt_client_connection_publish( connection, &topic, AWS_MQTT_QOS_AT_LEAST_ONCE, false, NULL, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_publish_complete, &fixture); s_wait_for_n_adapter_operation_events(&fixture, AWS_MQTT3_OET_PUBLISH_COMPLETE, 1); struct aws_mqtt3_operation_event expected_events[] = {{ .type = AWS_MQTT3_OET_PUBLISH_COMPLETE, .error_code = AWS_ERROR_SUCCESS, }}; s_aws_mqtt5_to_mqtt3_adapter_test_fixture_verify_operation_sequence( &fixture, AWS_ARRAY_SIZE(expected_events), expected_events, AWS_ARRAY_SIZE(expected_events)); aws_mqtt5_to_mqtt3_adapter_test_fixture_clean_up(&fixture); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5to3_adapter_publish_success_qos1, s_mqtt5to3_adapter_publish_success_qos1_fn) static int s_mqtt5to3_adapter_publish_no_ack_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); /* Ignore publishes, triggering client-side timeout */ test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_PUBLISH] = NULL; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, }; struct aws_mqtt5_to_mqtt3_adapter_test_fixture fixture; ASSERT_SUCCESS(aws_mqtt5_to_mqtt3_adapter_test_fixture_init(&fixture, allocator, &test_fixture_options)); struct aws_mqtt_client_connection *connection = fixture.connection; struct aws_mqtt_connection_options connection_options; s_init_adapter_connection_options_from_fixture(&connection_options, &fixture); connection_options.on_connection_complete = s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_connection_complete; connection_options.user_data = &fixture; connection_options.protocol_operation_timeout_ms = 5000; aws_mqtt_client_connection_connect(connection, &connection_options); s_wait_for_n_adapter_lifecycle_events(&fixture, AWS_MQTT3_LET_CONNECTION_COMPLETE, 1); struct aws_byte_cursor topic = aws_byte_cursor_from_c_str("derp"); aws_mqtt_client_connection_publish( connection, &topic, AWS_MQTT_QOS_AT_LEAST_ONCE, false, NULL, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_publish_complete, &fixture); s_wait_for_n_adapter_operation_events(&fixture, AWS_MQTT3_OET_PUBLISH_COMPLETE, 1); struct aws_mqtt3_operation_event expected_events[] = {{ .type = AWS_MQTT3_OET_PUBLISH_COMPLETE, .error_code = AWS_ERROR_MQTT_TIMEOUT, }}; s_aws_mqtt5_to_mqtt3_adapter_test_fixture_verify_operation_sequence( &fixture, AWS_ARRAY_SIZE(expected_events), expected_events, AWS_ARRAY_SIZE(expected_events)); aws_mqtt5_to_mqtt3_adapter_test_fixture_clean_up(&fixture); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5to3_adapter_publish_no_ack, s_mqtt5to3_adapter_publish_no_ack_fn) static int s_mqtt5to3_adapter_publish_interrupted_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); /* Ignore publishes */ test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_PUBLISH] = NULL; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, }; struct aws_mqtt5_to_mqtt3_adapter_test_fixture fixture; ASSERT_SUCCESS(aws_mqtt5_to_mqtt3_adapter_test_fixture_init(&fixture, allocator, &test_fixture_options)); struct aws_mqtt_client_connection *connection = fixture.connection; struct aws_mqtt_connection_options connection_options; s_init_adapter_connection_options_from_fixture(&connection_options, &fixture); connection_options.on_connection_complete = s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_connection_complete; connection_options.user_data = &fixture; aws_mqtt_client_connection_connect(connection, &connection_options); s_wait_for_n_adapter_lifecycle_events(&fixture, AWS_MQTT3_LET_CONNECTION_COMPLETE, 1); struct aws_byte_cursor topic = aws_byte_cursor_from_c_str("derp"); aws_mqtt_client_connection_publish( connection, &topic, AWS_MQTT_QOS_AT_LEAST_ONCE, false, NULL, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_publish_complete, &fixture); /* * wait for a little bit, we aren't going to get a response, shutdown while the operation is still pending * While we don't verify anything afterwards, consequent race conditions and leaks would show up. */ aws_thread_current_sleep(aws_timestamp_convert(2, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_MILLIS, NULL)); aws_mqtt5_to_mqtt3_adapter_test_fixture_clean_up(&fixture); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5to3_adapter_publish_interrupted, s_mqtt5to3_adapter_publish_interrupted_fn) void s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_subscribe_complete( struct aws_mqtt_client_connection *connection, uint16_t packet_id, const struct aws_byte_cursor *topic, enum aws_mqtt_qos qos, int error_code, void *userdata) { (void)connection; (void)packet_id; (void)topic; struct aws_mqtt5_to_mqtt3_adapter_test_fixture *fixture = userdata; struct aws_mqtt3_operation_event operation_event = { .type = AWS_MQTT3_OET_SUBSCRIBE_COMPLETE, .error_code = error_code, }; aws_array_list_init_dynamic( &operation_event.granted_subscriptions, fixture->mqtt5_fixture.allocator, 1, sizeof(struct aws_mqtt_topic_subscription)); aws_byte_buf_init_copy_from_cursor(&operation_event.topic_storage, fixture->mqtt5_fixture.allocator, *topic); /* * technically it's not safe to persist the topic cursor but they way the tests are built, the cursor will stay * valid until the events are checked (as long as we don't delete the subscription internally) */ struct aws_mqtt_topic_subscription sub = { .topic = aws_byte_cursor_from_buf(&operation_event.topic_storage), .qos = qos, }; aws_array_list_push_back(&operation_event.granted_subscriptions, (void *)&sub); aws_mutex_lock(&fixture->lock); aws_array_list_push_back(&fixture->operation_events, &operation_event); aws_mutex_unlock(&fixture->lock); aws_condition_variable_notify_all(&fixture->signal); } static void s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_publish_received( struct aws_mqtt5_to_mqtt3_adapter_test_fixture *fixture, enum aws_mqtt3_operation_event_type event_type, struct aws_byte_cursor topic, struct aws_byte_cursor payload, enum aws_mqtt_qos qos) { struct aws_mqtt3_operation_event operation_event = { .type = event_type, .qos = qos, }; aws_byte_buf_init_copy_from_cursor(&operation_event.topic, fixture->mqtt5_fixture.allocator, topic); operation_event.topic_cursor = aws_byte_cursor_from_buf(&operation_event.topic); aws_byte_buf_init_copy_from_cursor(&operation_event.payload, fixture->mqtt5_fixture.allocator, payload); operation_event.payload_cursor = aws_byte_cursor_from_buf(&operation_event.payload); aws_mutex_lock(&fixture->lock); aws_array_list_push_back(&fixture->operation_events, &operation_event); aws_mutex_unlock(&fixture->lock); aws_condition_variable_notify_all(&fixture->signal); } static void s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_on_any_publish( struct aws_mqtt_client_connection *connection, const struct aws_byte_cursor *topic, const struct aws_byte_cursor *payload, bool dup, enum aws_mqtt_qos qos, bool retain, void *userdata) { (void)connection; (void)dup; (void)retain; struct aws_mqtt5_to_mqtt3_adapter_test_fixture *fixture = userdata; s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_publish_received( fixture, AWS_MQTT3_OET_PUBLISH_RECEIVED_ANY, *topic, *payload, qos); } void s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_topic_specific_publish( struct aws_mqtt_client_connection *connection, const struct aws_byte_cursor *topic, const struct aws_byte_cursor *payload, bool dup, enum aws_mqtt_qos qos, bool retain, void *userdata) { (void)connection; (void)dup; (void)retain; struct aws_mqtt5_to_mqtt3_adapter_test_fixture *fixture = userdata; s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_publish_received( fixture, AWS_MQTT3_OET_PUBLISH_RECEIVED_SUBSCRIBED, *topic, *payload, qos); } static int s_mqtt5_mock_server_handle_subscribe_suback_success( void *packet, struct aws_mqtt5_server_mock_connection_context *connection, void *user_data) { (void)user_data; struct aws_mqtt5_packet_subscribe_view *subscribe_view = packet; AWS_VARIABLE_LENGTH_ARRAY( enum aws_mqtt5_suback_reason_code, mqtt5_suback_codes, subscribe_view->subscription_count); for (size_t i = 0; i < subscribe_view->subscription_count; ++i) { enum aws_mqtt5_suback_reason_code *reason_code_ptr = &mqtt5_suback_codes[i]; *reason_code_ptr = (enum aws_mqtt5_suback_reason_code)subscribe_view->subscriptions[i].qos; } struct aws_mqtt5_packet_suback_view suback_view = { .packet_id = subscribe_view->packet_id, .reason_code_count = subscribe_view->subscription_count, .reason_codes = mqtt5_suback_codes, }; return aws_mqtt5_mock_server_send_packet(connection, AWS_MQTT5_PT_SUBACK, &suback_view); } static int s_mqtt5to3_adapter_subscribe_single_success_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_SUBSCRIBE] = s_mqtt5_mock_server_handle_subscribe_suback_success; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, }; struct aws_mqtt5_to_mqtt3_adapter_test_fixture fixture; ASSERT_SUCCESS(aws_mqtt5_to_mqtt3_adapter_test_fixture_init(&fixture, allocator, &test_fixture_options)); struct aws_mqtt_client_connection *connection = fixture.connection; struct aws_mqtt_connection_options connection_options; s_init_adapter_connection_options_from_fixture(&connection_options, &fixture); connection_options.on_connection_complete = s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_connection_complete; connection_options.user_data = &fixture; aws_mqtt_client_connection_connect(connection, &connection_options); s_wait_for_n_adapter_lifecycle_events(&fixture, AWS_MQTT3_LET_CONNECTION_COMPLETE, 1); struct aws_byte_cursor topic = aws_byte_cursor_from_c_str("derp"); aws_mqtt_client_connection_subscribe( connection, &topic, AWS_MQTT_QOS_AT_LEAST_ONCE, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_topic_specific_publish, &fixture, NULL, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_subscribe_complete, &fixture); s_wait_for_n_adapter_operation_events(&fixture, AWS_MQTT3_OET_SUBSCRIBE_COMPLETE, 1); struct aws_mqtt_topic_subscription expected_subs[1] = { { .topic = topic, .qos = AWS_MQTT_QOS_AT_LEAST_ONCE, }, }; struct aws_mqtt3_operation_event expected_events[] = { { .type = AWS_MQTT3_OET_SUBSCRIBE_COMPLETE, .error_code = AWS_ERROR_SUCCESS, }, }; aws_array_list_init_static_from_initialized( &expected_events[0].granted_subscriptions, (void *)expected_subs, 1, sizeof(struct aws_mqtt_topic_subscription)); ASSERT_SUCCESS(s_aws_mqtt5_to_mqtt3_adapter_test_fixture_verify_operation_sequence( &fixture, AWS_ARRAY_SIZE(expected_events), expected_events, AWS_ARRAY_SIZE(expected_events))); aws_mqtt5_to_mqtt3_adapter_test_fixture_clean_up(&fixture); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5to3_adapter_subscribe_single_success, s_mqtt5to3_adapter_subscribe_single_success_fn) /* * This function tests receiving a subscribe acknowledge after disconnecting from * the server. * it expects a AWS_MQTT_QOS_FAILURE return */ static int s_mqtt5to3_adapter_subscribe_single_null_suback_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, }; struct aws_mqtt5_to_mqtt3_adapter_test_fixture fixture; ASSERT_SUCCESS(aws_mqtt5_to_mqtt3_adapter_test_fixture_init(&fixture, allocator, &test_fixture_options)); struct aws_mqtt_client_connection *connection = fixture.connection; struct aws_mqtt_connection_options connection_options; s_init_adapter_connection_options_from_fixture(&connection_options, &fixture); connection_options.on_connection_complete = s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_connection_complete; connection_options.user_data = &fixture; aws_mqtt_client_connection_connect(connection, &connection_options); s_wait_for_n_adapter_lifecycle_events(&fixture, AWS_MQTT3_LET_CONNECTION_COMPLETE, 1); struct aws_byte_cursor topic = aws_byte_cursor_from_c_str("derp"); aws_mqtt_client_connection_subscribe( connection, &topic, AWS_MQTT_QOS_AT_LEAST_ONCE, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_topic_specific_publish, &fixture, NULL, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_subscribe_complete, &fixture); struct aws_mqtt_topic_subscription expected_subs[1] = { { .topic = topic, .qos = AWS_MQTT_QOS_FAILURE, }, }; struct aws_mqtt3_operation_event expected_events[] = { { .type = AWS_MQTT3_OET_SUBSCRIBE_COMPLETE, .error_code = AWS_ERROR_MQTT5_USER_REQUESTED_STOP, }, }; aws_array_list_init_static_from_initialized( &expected_events[0].granted_subscriptions, (void *)expected_subs, 1, sizeof(struct aws_mqtt_topic_subscription)); aws_mqtt_client_connection_disconnect( connection, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_disconnection_complete, &fixture); s_wait_for_n_adapter_lifecycle_events(&fixture, AWS_MQTT3_LET_DISCONNECTION_COMPLETE, 1); s_wait_for_n_adapter_operation_events(&fixture, AWS_MQTT3_OET_SUBSCRIBE_COMPLETE, 1); ASSERT_SUCCESS(s_aws_mqtt5_to_mqtt3_adapter_test_fixture_verify_operation_sequence( &fixture, AWS_ARRAY_SIZE(expected_events), expected_events, AWS_ARRAY_SIZE(expected_events))); aws_mqtt5_to_mqtt3_adapter_test_fixture_clean_up(&fixture); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5to3_adapter_subscribe_single_null_suback, s_mqtt5to3_adapter_subscribe_single_null_suback_fn) static void s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_subscribe_multi_complete( struct aws_mqtt_client_connection *connection, uint16_t packet_id, const struct aws_array_list *topic_subacks, /* contains aws_mqtt_topic_subscription pointers */ int error_code, void *userdata) { (void)connection; (void)packet_id; struct aws_mqtt5_to_mqtt3_adapter_test_fixture *fixture = userdata; struct aws_mqtt3_operation_event operation_event = { .type = AWS_MQTT3_OET_SUBSCRIBE_COMPLETE, .error_code = error_code, }; if (error_code == AWS_ERROR_SUCCESS) { size_t granted_count = aws_array_list_length(topic_subacks); aws_array_list_init_dynamic( &operation_event.granted_subscriptions, fixture->mqtt5_fixture.allocator, granted_count, sizeof(struct aws_mqtt_topic_subscription)); size_t topic_length = 0; for (size_t i = 0; i < granted_count; ++i) { struct aws_mqtt_topic_subscription *granted_sub = NULL; aws_array_list_get_at(topic_subacks, &granted_sub, i); aws_array_list_push_back(&operation_event.granted_subscriptions, (void *)granted_sub); topic_length += granted_sub->topic.len; } aws_byte_buf_init(&operation_event.topic_storage, fixture->mqtt5_fixture.allocator, topic_length); for (size_t i = 0; i < granted_count; ++i) { struct aws_mqtt_topic_subscription *granted_sub = NULL; aws_array_list_get_at_ptr(&operation_event.granted_subscriptions, (void **)&granted_sub, i); aws_byte_buf_append_and_update(&operation_event.topic_storage, &granted_sub->topic); } } aws_mutex_lock(&fixture->lock); aws_array_list_push_back(&fixture->operation_events, &operation_event); aws_mutex_unlock(&fixture->lock); aws_condition_variable_notify_all(&fixture->signal); } static int s_mqtt5to3_adapter_subscribe_multi_success_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_SUBSCRIBE] = s_mqtt5_mock_server_handle_subscribe_suback_success; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, }; struct aws_mqtt5_to_mqtt3_adapter_test_fixture fixture; ASSERT_SUCCESS(aws_mqtt5_to_mqtt3_adapter_test_fixture_init(&fixture, allocator, &test_fixture_options)); struct aws_mqtt_client_connection *connection = fixture.connection; struct aws_mqtt_connection_options connection_options; s_init_adapter_connection_options_from_fixture(&connection_options, &fixture); connection_options.on_connection_complete = s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_connection_complete; connection_options.user_data = &fixture; aws_mqtt_client_connection_connect(connection, &connection_options); s_wait_for_n_adapter_lifecycle_events(&fixture, AWS_MQTT3_LET_CONNECTION_COMPLETE, 1); struct aws_mqtt_topic_subscription subscriptions[] = { { .topic = aws_byte_cursor_from_c_str("topic/1"), .qos = AWS_MQTT_QOS_AT_LEAST_ONCE, }, { .topic = aws_byte_cursor_from_c_str("topic/2"), .qos = AWS_MQTT_QOS_AT_MOST_ONCE, }, }; struct aws_array_list subscription_list; aws_array_list_init_static_from_initialized( &subscription_list, subscriptions, 2, sizeof(struct aws_mqtt_topic_subscription)); aws_mqtt_client_connection_subscribe_multiple( connection, &subscription_list, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_subscribe_multi_complete, &fixture); s_wait_for_n_adapter_operation_events(&fixture, AWS_MQTT3_OET_SUBSCRIBE_COMPLETE, 1); struct aws_mqtt3_operation_event expected_events[] = { { .type = AWS_MQTT3_OET_SUBSCRIBE_COMPLETE, .error_code = AWS_ERROR_SUCCESS, }, }; aws_array_list_init_static_from_initialized( &expected_events[0].granted_subscriptions, (void *)subscriptions, 2, sizeof(struct aws_mqtt_topic_subscription)); ASSERT_SUCCESS(s_aws_mqtt5_to_mqtt3_adapter_test_fixture_verify_operation_sequence( &fixture, AWS_ARRAY_SIZE(expected_events), expected_events, AWS_ARRAY_SIZE(expected_events))); aws_mqtt5_to_mqtt3_adapter_test_fixture_clean_up(&fixture); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5to3_adapter_subscribe_multi_success, s_mqtt5to3_adapter_subscribe_multi_success_fn) /* * This function tests receiving a subscribe acknowledge after disconnecting from * the server. * it expects a AWS_MQTT_QOS_FAILURE return */ static int s_mqtt5to3_adapter_subscribe_multi_null_suback_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, }; struct aws_mqtt5_to_mqtt3_adapter_test_fixture fixture; ASSERT_SUCCESS(aws_mqtt5_to_mqtt3_adapter_test_fixture_init(&fixture, allocator, &test_fixture_options)); struct aws_mqtt_client_connection *connection = fixture.connection; struct aws_mqtt_connection_options connection_options; s_init_adapter_connection_options_from_fixture(&connection_options, &fixture); connection_options.on_connection_complete = s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_connection_complete; connection_options.user_data = &fixture; aws_mqtt_client_connection_connect(connection, &connection_options); s_wait_for_n_adapter_lifecycle_events(&fixture, AWS_MQTT3_LET_CONNECTION_COMPLETE, 1); struct aws_mqtt_topic_subscription subscriptions[] = { { .topic = aws_byte_cursor_from_c_str("topic/1"), .qos = AWS_MQTT_QOS_AT_LEAST_ONCE, }, { .topic = aws_byte_cursor_from_c_str("topic/2"), .qos = AWS_MQTT_QOS_AT_MOST_ONCE, }, }; struct aws_array_list subscription_list; aws_array_list_init_static_from_initialized( &subscription_list, subscriptions, 2, sizeof(struct aws_mqtt_topic_subscription)); aws_mqtt_client_connection_subscribe_multiple( connection, &subscription_list, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_subscribe_multi_complete, &fixture); struct aws_mqtt3_operation_event expected_events[] = { { .type = AWS_MQTT3_OET_SUBSCRIBE_COMPLETE, .error_code = AWS_ERROR_MQTT5_USER_REQUESTED_STOP, }, }; aws_mqtt_client_connection_disconnect( connection, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_disconnection_complete, &fixture); s_wait_for_n_adapter_lifecycle_events(&fixture, AWS_MQTT3_LET_DISCONNECTION_COMPLETE, 1); s_wait_for_n_adapter_operation_events(&fixture, AWS_MQTT3_OET_SUBSCRIBE_COMPLETE, 1); ASSERT_SUCCESS(s_aws_mqtt5_to_mqtt3_adapter_test_fixture_verify_operation_sequence( &fixture, AWS_ARRAY_SIZE(expected_events), expected_events, AWS_ARRAY_SIZE(expected_events))); aws_mqtt5_to_mqtt3_adapter_test_fixture_clean_up(&fixture); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5to3_adapter_subscribe_multi_null_suback, s_mqtt5to3_adapter_subscribe_multi_null_suback_fn) static int s_mqtt5_mock_server_handle_subscribe_suback_failure( void *packet, struct aws_mqtt5_server_mock_connection_context *connection, void *user_data) { (void)user_data; struct aws_mqtt5_packet_subscribe_view *subscribe_view = packet; AWS_VARIABLE_LENGTH_ARRAY( enum aws_mqtt5_suback_reason_code, mqtt5_suback_codes, subscribe_view->subscription_count); for (size_t i = 0; i < subscribe_view->subscription_count; ++i) { enum aws_mqtt5_suback_reason_code *reason_code_ptr = &mqtt5_suback_codes[i]; *reason_code_ptr = (i % 2) ? (enum aws_mqtt5_suback_reason_code)subscribe_view->subscriptions[i].qos : AWS_MQTT5_SARC_QUOTA_EXCEEDED; } struct aws_mqtt5_packet_suback_view suback_view = { .packet_id = subscribe_view->packet_id, .reason_code_count = subscribe_view->subscription_count, .reason_codes = mqtt5_suback_codes, }; return aws_mqtt5_mock_server_send_packet(connection, AWS_MQTT5_PT_SUBACK, &suback_view); } static int s_mqtt5to3_adapter_subscribe_single_failure_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_SUBSCRIBE] = s_mqtt5_mock_server_handle_subscribe_suback_failure; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, }; struct aws_mqtt5_to_mqtt3_adapter_test_fixture fixture; ASSERT_SUCCESS(aws_mqtt5_to_mqtt3_adapter_test_fixture_init(&fixture, allocator, &test_fixture_options)); struct aws_mqtt_client_connection *connection = fixture.connection; struct aws_mqtt_connection_options connection_options; s_init_adapter_connection_options_from_fixture(&connection_options, &fixture); connection_options.on_connection_complete = s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_connection_complete; connection_options.user_data = &fixture; aws_mqtt_client_connection_connect(connection, &connection_options); s_wait_for_n_adapter_lifecycle_events(&fixture, AWS_MQTT3_LET_CONNECTION_COMPLETE, 1); struct aws_byte_cursor topic = aws_byte_cursor_from_c_str("derp"); aws_mqtt_client_connection_subscribe( connection, &topic, AWS_MQTT_QOS_AT_LEAST_ONCE, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_topic_specific_publish, &fixture, NULL, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_subscribe_complete, &fixture); s_wait_for_n_adapter_operation_events(&fixture, AWS_MQTT3_OET_SUBSCRIBE_COMPLETE, 1); struct aws_mqtt_topic_subscription expected_subs[1] = { { .topic = topic, .qos = AWS_MQTT_QOS_FAILURE, }, }; struct aws_mqtt3_operation_event expected_events[] = { { .type = AWS_MQTT3_OET_SUBSCRIBE_COMPLETE, .error_code = AWS_ERROR_SUCCESS, }, }; aws_array_list_init_static_from_initialized( &expected_events[0].granted_subscriptions, (void *)expected_subs, 1, sizeof(struct aws_mqtt_topic_subscription)); ASSERT_SUCCESS(s_aws_mqtt5_to_mqtt3_adapter_test_fixture_verify_operation_sequence( &fixture, AWS_ARRAY_SIZE(expected_events), expected_events, AWS_ARRAY_SIZE(expected_events))); aws_mqtt5_to_mqtt3_adapter_test_fixture_clean_up(&fixture); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5to3_adapter_subscribe_single_failure, s_mqtt5to3_adapter_subscribe_single_failure_fn) static int s_mqtt5to3_adapter_subscribe_single_invalid_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, }; struct aws_mqtt5_to_mqtt3_adapter_test_fixture fixture; ASSERT_SUCCESS(aws_mqtt5_to_mqtt3_adapter_test_fixture_init(&fixture, allocator, &test_fixture_options)); struct aws_mqtt_client_connection *connection = fixture.connection; struct aws_mqtt_connection_options connection_options; s_init_adapter_connection_options_from_fixture(&connection_options, &fixture); connection_options.on_connection_complete = s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_connection_complete; connection_options.user_data = &fixture; aws_mqtt_client_connection_connect(connection, &connection_options); s_wait_for_n_adapter_lifecycle_events(&fixture, AWS_MQTT3_LET_CONNECTION_COMPLETE, 1); struct aws_byte_cursor bad_topic = aws_byte_cursor_from_c_str("#/derp"); ASSERT_INT_EQUALS( 0, aws_mqtt_client_connection_subscribe( connection, &bad_topic, AWS_MQTT_QOS_AT_LEAST_ONCE, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_topic_specific_publish, &fixture, NULL, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_subscribe_complete, &fixture)); aws_mqtt5_to_mqtt3_adapter_test_fixture_clean_up(&fixture); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5to3_adapter_subscribe_single_invalid, s_mqtt5to3_adapter_subscribe_single_invalid_fn) static int s_mqtt5to3_adapter_subscribe_multi_failure_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_SUBSCRIBE] = s_mqtt5_mock_server_handle_subscribe_suback_failure; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, }; struct aws_mqtt5_to_mqtt3_adapter_test_fixture fixture; ASSERT_SUCCESS(aws_mqtt5_to_mqtt3_adapter_test_fixture_init(&fixture, allocator, &test_fixture_options)); struct aws_mqtt_client_connection *connection = fixture.connection; struct aws_mqtt_connection_options connection_options; s_init_adapter_connection_options_from_fixture(&connection_options, &fixture); connection_options.on_connection_complete = s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_connection_complete; connection_options.user_data = &fixture; aws_mqtt_client_connection_connect(connection, &connection_options); s_wait_for_n_adapter_lifecycle_events(&fixture, AWS_MQTT3_LET_CONNECTION_COMPLETE, 1); struct aws_mqtt_topic_subscription subscriptions[] = { { .topic = aws_byte_cursor_from_c_str("topic/1"), .qos = AWS_MQTT_QOS_AT_LEAST_ONCE, }, { .topic = aws_byte_cursor_from_c_str("topic/2"), .qos = AWS_MQTT_QOS_AT_MOST_ONCE, }, }; struct aws_array_list subscription_list; aws_array_list_init_static_from_initialized( &subscription_list, subscriptions, 2, sizeof(struct aws_mqtt_topic_subscription)); aws_mqtt_client_connection_subscribe_multiple( connection, &subscription_list, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_subscribe_multi_complete, &fixture); s_wait_for_n_adapter_operation_events(&fixture, AWS_MQTT3_OET_SUBSCRIBE_COMPLETE, 1); /* reuse the subscriptions array for validation, but the first one will fail */ subscriptions[0].qos = AWS_MQTT_QOS_FAILURE; struct aws_mqtt3_operation_event expected_events[] = {{ .type = AWS_MQTT3_OET_SUBSCRIBE_COMPLETE, .error_code = AWS_ERROR_SUCCESS, }}; aws_array_list_init_static_from_initialized( &expected_events[0].granted_subscriptions, (void *)subscriptions, 2, sizeof(struct aws_mqtt_topic_subscription)); ASSERT_SUCCESS(s_aws_mqtt5_to_mqtt3_adapter_test_fixture_verify_operation_sequence( &fixture, AWS_ARRAY_SIZE(expected_events), expected_events, AWS_ARRAY_SIZE(expected_events))); aws_mqtt5_to_mqtt3_adapter_test_fixture_clean_up(&fixture); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5to3_adapter_subscribe_multi_failure, s_mqtt5to3_adapter_subscribe_multi_failure_fn) static int s_mqtt5to3_adapter_subscribe_multi_invalid_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_SUBSCRIBE] = s_mqtt5_mock_server_handle_subscribe_suback_failure; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, }; struct aws_mqtt5_to_mqtt3_adapter_test_fixture fixture; ASSERT_SUCCESS(aws_mqtt5_to_mqtt3_adapter_test_fixture_init(&fixture, allocator, &test_fixture_options)); struct aws_mqtt_client_connection *connection = fixture.connection; struct aws_mqtt_connection_options connection_options; s_init_adapter_connection_options_from_fixture(&connection_options, &fixture); connection_options.on_connection_complete = s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_connection_complete; connection_options.user_data = &fixture; aws_mqtt_client_connection_connect(connection, &connection_options); s_wait_for_n_adapter_lifecycle_events(&fixture, AWS_MQTT3_LET_CONNECTION_COMPLETE, 1); struct aws_mqtt_topic_subscription subscriptions[] = { { .topic = aws_byte_cursor_from_c_str("topic/1"), .qos = AWS_MQTT_QOS_AT_LEAST_ONCE, }, { .topic = aws_byte_cursor_from_c_str("#/#"), .qos = AWS_MQTT_QOS_AT_MOST_ONCE, }, }; struct aws_array_list subscription_list; aws_array_list_init_static_from_initialized( &subscription_list, subscriptions, 2, sizeof(struct aws_mqtt_topic_subscription)); ASSERT_INT_EQUALS( 0, aws_mqtt_client_connection_subscribe_multiple( connection, &subscription_list, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_subscribe_multi_complete, &fixture)); aws_mqtt5_to_mqtt3_adapter_test_fixture_clean_up(&fixture); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5to3_adapter_subscribe_multi_invalid, s_mqtt5to3_adapter_subscribe_multi_invalid_fn) static int s_mqtt5to3_adapter_subscribe_single_publish_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_SUBSCRIBE] = s_mqtt5_mock_server_handle_subscribe_suback_success; test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_PUBLISH] = aws_mqtt5_mock_server_handle_publish_puback_and_forward; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, }; struct aws_mqtt5_to_mqtt3_adapter_test_fixture fixture; ASSERT_SUCCESS(aws_mqtt5_to_mqtt3_adapter_test_fixture_init(&fixture, allocator, &test_fixture_options)); struct aws_mqtt_client_connection *connection = fixture.connection; aws_mqtt_client_connection_set_on_any_publish_handler( connection, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_on_any_publish, &fixture); struct aws_mqtt_connection_options connection_options; s_init_adapter_connection_options_from_fixture(&connection_options, &fixture); connection_options.on_connection_complete = s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_connection_complete; connection_options.user_data = &fixture; aws_mqtt_client_connection_connect(connection, &connection_options); s_wait_for_n_adapter_lifecycle_events(&fixture, AWS_MQTT3_LET_CONNECTION_COMPLETE, 1); struct aws_byte_cursor topic = aws_byte_cursor_from_c_str("derp"); aws_mqtt_client_connection_subscribe( connection, &topic, AWS_MQTT_QOS_AT_LEAST_ONCE, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_topic_specific_publish, &fixture, NULL, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_subscribe_complete, &fixture); s_wait_for_n_adapter_operation_events(&fixture, AWS_MQTT3_OET_SUBSCRIBE_COMPLETE, 1); struct aws_byte_cursor payload = aws_byte_cursor_from_c_str("Payload!"); aws_mqtt_client_connection_publish( connection, &topic, AWS_MQTT_QOS_AT_LEAST_ONCE, false, &payload, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_publish_complete, &fixture); s_wait_for_n_adapter_operation_events(&fixture, AWS_MQTT3_OET_PUBLISH_COMPLETE, 1); s_wait_for_n_adapter_operation_events(&fixture, AWS_MQTT3_OET_PUBLISH_RECEIVED_SUBSCRIBED, 1); s_wait_for_n_adapter_operation_events(&fixture, AWS_MQTT3_OET_PUBLISH_RECEIVED_ANY, 1); struct aws_mqtt3_operation_event expected_events[] = { { .type = AWS_MQTT3_OET_PUBLISH_COMPLETE, .error_code = AWS_ERROR_SUCCESS, }, { .type = AWS_MQTT3_OET_PUBLISH_RECEIVED_SUBSCRIBED, .error_code = AWS_ERROR_SUCCESS, .qos = AWS_MQTT_QOS_AT_LEAST_ONCE, .topic_cursor = topic, .payload_cursor = payload, }, { .type = AWS_MQTT3_OET_PUBLISH_RECEIVED_ANY, .error_code = AWS_ERROR_SUCCESS, .qos = AWS_MQTT_QOS_AT_LEAST_ONCE, .topic_cursor = topic, .payload_cursor = payload, }, }; ASSERT_SUCCESS(s_aws_mqtt5_to_mqtt3_adapter_test_fixture_verify_operation_sequence_contains( &fixture, AWS_ARRAY_SIZE(expected_events), expected_events)); aws_mqtt5_to_mqtt3_adapter_test_fixture_clean_up(&fixture); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5to3_adapter_subscribe_single_publish, s_mqtt5to3_adapter_subscribe_single_publish_fn) static int s_mqtt5to3_adapter_subscribe_multi_overlapping_publish_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_SUBSCRIBE] = s_mqtt5_mock_server_handle_subscribe_suback_success; test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_PUBLISH] = aws_mqtt5_mock_server_handle_publish_puback_and_forward; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, }; struct aws_mqtt5_to_mqtt3_adapter_test_fixture fixture; ASSERT_SUCCESS(aws_mqtt5_to_mqtt3_adapter_test_fixture_init(&fixture, allocator, &test_fixture_options)); struct aws_mqtt_client_connection *connection = fixture.connection; aws_mqtt_client_connection_set_on_any_publish_handler( connection, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_on_any_publish, &fixture); struct aws_mqtt_connection_options connection_options; s_init_adapter_connection_options_from_fixture(&connection_options, &fixture); connection_options.on_connection_complete = s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_connection_complete; connection_options.user_data = &fixture; aws_mqtt_client_connection_connect(connection, &connection_options); s_wait_for_n_adapter_lifecycle_events(&fixture, AWS_MQTT3_LET_CONNECTION_COMPLETE, 1); struct aws_byte_cursor topic1 = aws_byte_cursor_from_c_str("hello/world"); struct aws_byte_cursor topic2 = aws_byte_cursor_from_c_str("hello/+"); struct aws_byte_cursor topic3 = aws_byte_cursor_from_c_str("derp"); ASSERT_TRUE( 0 != aws_mqtt_client_connection_subscribe( connection, &topic1, AWS_MQTT_QOS_AT_LEAST_ONCE, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_topic_specific_publish, &fixture, NULL, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_subscribe_complete, &fixture)); ASSERT_TRUE( 0 != aws_mqtt_client_connection_subscribe( connection, &topic2, AWS_MQTT_QOS_AT_MOST_ONCE, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_topic_specific_publish, &fixture, NULL, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_subscribe_complete, &fixture)); ASSERT_TRUE( 0 != aws_mqtt_client_connection_subscribe( connection, &topic3, AWS_MQTT_QOS_AT_LEAST_ONCE, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_topic_specific_publish, &fixture, NULL, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_subscribe_complete, &fixture)); s_wait_for_n_adapter_operation_events(&fixture, AWS_MQTT3_OET_SUBSCRIBE_COMPLETE, 3); struct aws_byte_cursor payload1 = aws_byte_cursor_from_c_str("Payload 1!"); struct aws_byte_cursor payload2 = aws_byte_cursor_from_c_str("Payload 2!"); aws_mqtt_client_connection_publish( connection, &topic1, AWS_MQTT_QOS_AT_LEAST_ONCE, false, &payload1, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_publish_complete, &fixture); aws_mqtt_client_connection_publish( connection, &topic3, AWS_MQTT_QOS_AT_LEAST_ONCE, false, &payload2, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_publish_complete, &fixture); s_wait_for_n_adapter_operation_events(&fixture, AWS_MQTT3_OET_PUBLISH_COMPLETE, 2); s_wait_for_n_adapter_operation_events(&fixture, AWS_MQTT3_OET_PUBLISH_RECEIVED_ANY, 2); s_wait_for_n_adapter_operation_events(&fixture, AWS_MQTT3_OET_PUBLISH_RECEIVED_SUBSCRIBED, 3); struct aws_mqtt3_operation_event expected_events[] = { { .type = AWS_MQTT3_OET_PUBLISH_RECEIVED_ANY, .error_code = AWS_ERROR_SUCCESS, .qos = AWS_MQTT_QOS_AT_LEAST_ONCE, .topic_cursor = topic1, .payload_cursor = payload1, }, { .type = AWS_MQTT3_OET_PUBLISH_RECEIVED_ANY, .error_code = AWS_ERROR_SUCCESS, .qos = AWS_MQTT_QOS_AT_LEAST_ONCE, .topic_cursor = topic3, .payload_cursor = payload2, }, { .type = AWS_MQTT3_OET_PUBLISH_RECEIVED_SUBSCRIBED, .error_code = AWS_ERROR_SUCCESS, .qos = AWS_MQTT_QOS_AT_LEAST_ONCE, .topic_cursor = topic1, .payload_cursor = payload1, .expected_count = 2, }, { .type = AWS_MQTT3_OET_PUBLISH_RECEIVED_SUBSCRIBED, .error_code = AWS_ERROR_SUCCESS, .qos = AWS_MQTT_QOS_AT_LEAST_ONCE, .topic_cursor = topic3, .payload_cursor = payload2, }, }; ASSERT_SUCCESS(s_aws_mqtt5_to_mqtt3_adapter_test_fixture_verify_operation_sequence_contains( &fixture, AWS_ARRAY_SIZE(expected_events), expected_events)); aws_mqtt5_to_mqtt3_adapter_test_fixture_clean_up(&fixture); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE( mqtt5to3_adapter_subscribe_multi_overlapping_publish, s_mqtt5to3_adapter_subscribe_multi_overlapping_publish_fn) static void s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_unsubscribe_complete( struct aws_mqtt_client_connection *connection, uint16_t packet_id, int error_code, void *userdata) { (void)connection; (void)packet_id; struct aws_mqtt5_to_mqtt3_adapter_test_fixture *fixture = userdata; struct aws_mqtt3_operation_event operation_event = { .type = AWS_MQTT3_OET_UNSUBSCRIBE_COMPLETE, .error_code = error_code, }; aws_mutex_lock(&fixture->lock); aws_array_list_push_back(&fixture->operation_events, &operation_event); aws_mutex_unlock(&fixture->lock); aws_condition_variable_notify_all(&fixture->signal); } static int s_mqtt5to3_adapter_unsubscribe_success_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_SUBSCRIBE] = s_mqtt5_mock_server_handle_subscribe_suback_success; test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_PUBLISH] = aws_mqtt5_mock_server_handle_publish_puback_and_forward; test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_UNSUBSCRIBE] = aws_mqtt5_mock_server_handle_unsubscribe_unsuback_success; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, }; struct aws_mqtt5_to_mqtt3_adapter_test_fixture fixture; ASSERT_SUCCESS(aws_mqtt5_to_mqtt3_adapter_test_fixture_init(&fixture, allocator, &test_fixture_options)); struct aws_mqtt_client_connection *connection = fixture.connection; aws_mqtt_client_connection_set_on_any_publish_handler( connection, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_on_any_publish, &fixture); struct aws_mqtt_connection_options connection_options; s_init_adapter_connection_options_from_fixture(&connection_options, &fixture); connection_options.on_connection_complete = s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_connection_complete; connection_options.user_data = &fixture; aws_mqtt_client_connection_connect(connection, &connection_options); s_wait_for_n_adapter_lifecycle_events(&fixture, AWS_MQTT3_LET_CONNECTION_COMPLETE, 1); struct aws_byte_cursor topic = aws_byte_cursor_from_c_str("hello/world"); aws_mqtt_client_connection_subscribe( connection, &topic, AWS_MQTT_QOS_AT_LEAST_ONCE, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_topic_specific_publish, &fixture, NULL, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_subscribe_complete, &fixture); s_wait_for_n_adapter_operation_events(&fixture, AWS_MQTT3_OET_SUBSCRIBE_COMPLETE, 1); struct aws_byte_cursor payload = aws_byte_cursor_from_c_str("Payload 1!"); aws_mqtt_client_connection_publish( connection, &topic, AWS_MQTT_QOS_AT_LEAST_ONCE, false, &payload, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_publish_complete, &fixture); s_wait_for_n_adapter_operation_events(&fixture, AWS_MQTT3_OET_PUBLISH_COMPLETE, 1); s_wait_for_n_adapter_operation_events(&fixture, AWS_MQTT3_OET_PUBLISH_RECEIVED_ANY, 1); s_wait_for_n_adapter_operation_events(&fixture, AWS_MQTT3_OET_PUBLISH_RECEIVED_SUBSCRIBED, 1); struct aws_mqtt3_operation_event expected_events_before[] = { { .type = AWS_MQTT3_OET_PUBLISH_RECEIVED_ANY, .error_code = AWS_ERROR_SUCCESS, .qos = AWS_MQTT_QOS_AT_LEAST_ONCE, .topic_cursor = topic, .payload_cursor = payload, }, { .type = AWS_MQTT3_OET_PUBLISH_RECEIVED_SUBSCRIBED, .error_code = AWS_ERROR_SUCCESS, .qos = AWS_MQTT_QOS_AT_LEAST_ONCE, .topic_cursor = topic, .payload_cursor = payload, }, }; ASSERT_SUCCESS(s_aws_mqtt5_to_mqtt3_adapter_test_fixture_verify_operation_sequence_contains( &fixture, AWS_ARRAY_SIZE(expected_events_before), expected_events_before)); aws_mqtt_client_connection_unsubscribe( connection, &topic, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_unsubscribe_complete, &fixture); s_wait_for_n_adapter_operation_events(&fixture, AWS_MQTT3_OET_UNSUBSCRIBE_COMPLETE, 1); aws_mqtt_client_connection_publish( connection, &topic, AWS_MQTT_QOS_AT_LEAST_ONCE, false, &payload, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_publish_complete, &fixture); s_wait_for_n_adapter_operation_events(&fixture, AWS_MQTT3_OET_PUBLISH_RECEIVED_ANY, 2); struct aws_mqtt3_operation_event expected_events_after[] = { { .type = AWS_MQTT3_OET_PUBLISH_RECEIVED_ANY, .error_code = AWS_ERROR_SUCCESS, .qos = AWS_MQTT_QOS_AT_LEAST_ONCE, .topic_cursor = topic, .payload_cursor = payload, .expected_count = 2, }, { .type = AWS_MQTT3_OET_PUBLISH_RECEIVED_SUBSCRIBED, .error_code = AWS_ERROR_SUCCESS, .qos = AWS_MQTT_QOS_AT_LEAST_ONCE, .topic_cursor = topic, .payload_cursor = payload, }, { .type = AWS_MQTT3_OET_UNSUBSCRIBE_COMPLETE, }, }; ASSERT_SUCCESS(s_aws_mqtt5_to_mqtt3_adapter_test_fixture_verify_operation_sequence_contains( &fixture, AWS_ARRAY_SIZE(expected_events_after), expected_events_after)); aws_mqtt5_to_mqtt3_adapter_test_fixture_clean_up(&fixture); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5to3_adapter_unsubscribe_success, s_mqtt5to3_adapter_unsubscribe_success_fn) static int s_mqtt5_mock_server_handle_unsubscribe_unsuback_failure( void *packet, struct aws_mqtt5_server_mock_connection_context *connection, void *user_data) { (void)packet; (void)user_data; struct aws_mqtt5_packet_unsubscribe_view *unsubscribe_view = packet; AWS_VARIABLE_LENGTH_ARRAY( enum aws_mqtt5_unsuback_reason_code, mqtt5_unsuback_codes, unsubscribe_view->topic_filter_count); for (size_t i = 0; i < unsubscribe_view->topic_filter_count; ++i) { enum aws_mqtt5_unsuback_reason_code *reason_code_ptr = &mqtt5_unsuback_codes[i]; *reason_code_ptr = AWS_MQTT5_UARC_IMPLEMENTATION_SPECIFIC_ERROR; } struct aws_mqtt5_packet_unsuback_view unsuback_view = { .packet_id = unsubscribe_view->packet_id, .reason_code_count = AWS_ARRAY_SIZE(mqtt5_unsuback_codes), .reason_codes = mqtt5_unsuback_codes, }; return aws_mqtt5_mock_server_send_packet(connection, AWS_MQTT5_PT_UNSUBACK, &unsuback_view); } static int s_mqtt5to3_adapter_unsubscribe_failure_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_UNSUBSCRIBE] = s_mqtt5_mock_server_handle_unsubscribe_unsuback_failure; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, }; struct aws_mqtt5_to_mqtt3_adapter_test_fixture fixture; ASSERT_SUCCESS(aws_mqtt5_to_mqtt3_adapter_test_fixture_init(&fixture, allocator, &test_fixture_options)); struct aws_mqtt_client_connection *connection = fixture.connection; struct aws_mqtt_connection_options connection_options; s_init_adapter_connection_options_from_fixture(&connection_options, &fixture); connection_options.on_connection_complete = s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_connection_complete; connection_options.user_data = &fixture; aws_mqtt_client_connection_connect(connection, &connection_options); s_wait_for_n_adapter_lifecycle_events(&fixture, AWS_MQTT3_LET_CONNECTION_COMPLETE, 1); struct aws_byte_cursor topic = aws_byte_cursor_from_c_str("hello/world"); aws_mqtt_client_connection_unsubscribe( connection, &topic, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_unsubscribe_complete, &fixture); s_wait_for_n_adapter_operation_events(&fixture, AWS_MQTT3_OET_UNSUBSCRIBE_COMPLETE, 1); struct aws_mqtt3_operation_event expected_events[] = { { .type = AWS_MQTT3_OET_UNSUBSCRIBE_COMPLETE, }, }; ASSERT_SUCCESS(s_aws_mqtt5_to_mqtt3_adapter_test_fixture_verify_operation_sequence_contains( &fixture, AWS_ARRAY_SIZE(expected_events), expected_events)); aws_mqtt5_to_mqtt3_adapter_test_fixture_clean_up(&fixture); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5to3_adapter_unsubscribe_failure, s_mqtt5to3_adapter_unsubscribe_failure_fn) static int s_mqtt5to3_adapter_unsubscribe_invalid_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_UNSUBSCRIBE] = s_mqtt5_mock_server_handle_unsubscribe_unsuback_failure; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, }; struct aws_mqtt5_to_mqtt3_adapter_test_fixture fixture; ASSERT_SUCCESS(aws_mqtt5_to_mqtt3_adapter_test_fixture_init(&fixture, allocator, &test_fixture_options)); struct aws_mqtt_client_connection *connection = fixture.connection; struct aws_mqtt_connection_options connection_options; s_init_adapter_connection_options_from_fixture(&connection_options, &fixture); connection_options.on_connection_complete = s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_connection_complete; connection_options.user_data = &fixture; aws_mqtt_client_connection_connect(connection, &connection_options); s_wait_for_n_adapter_lifecycle_events(&fixture, AWS_MQTT3_LET_CONNECTION_COMPLETE, 1); struct aws_byte_cursor topic = aws_byte_cursor_from_c_str("#/bad"); ASSERT_INT_EQUALS( 0, aws_mqtt_client_connection_unsubscribe( connection, &topic, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_unsubscribe_complete, &fixture)); aws_mqtt5_to_mqtt3_adapter_test_fixture_clean_up(&fixture); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5to3_adapter_unsubscribe_invalid, s_mqtt5to3_adapter_unsubscribe_invalid_fn) static int s_mqtt5to3_adapter_unsubscribe_overlapped_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_SUBSCRIBE] = s_mqtt5_mock_server_handle_subscribe_suback_success; test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_PUBLISH] = aws_mqtt5_mock_server_handle_publish_puback_and_forward; test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_UNSUBSCRIBE] = aws_mqtt5_mock_server_handle_unsubscribe_unsuback_success; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, }; struct aws_mqtt5_to_mqtt3_adapter_test_fixture fixture; ASSERT_SUCCESS(aws_mqtt5_to_mqtt3_adapter_test_fixture_init(&fixture, allocator, &test_fixture_options)); struct aws_mqtt_client_connection *connection = fixture.connection; aws_mqtt_client_connection_set_on_any_publish_handler( connection, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_on_any_publish, &fixture); struct aws_mqtt_connection_options connection_options; s_init_adapter_connection_options_from_fixture(&connection_options, &fixture); connection_options.on_connection_complete = s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_connection_complete; connection_options.user_data = &fixture; aws_mqtt_client_connection_connect(connection, &connection_options); s_wait_for_n_adapter_lifecycle_events(&fixture, AWS_MQTT3_LET_CONNECTION_COMPLETE, 1); struct aws_byte_cursor topic1 = aws_byte_cursor_from_c_str("hello/world"); struct aws_byte_cursor topic2 = aws_byte_cursor_from_c_str("hello/+"); aws_mqtt_client_connection_subscribe( connection, &topic1, AWS_MQTT_QOS_AT_LEAST_ONCE, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_topic_specific_publish, &fixture, NULL, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_subscribe_complete, &fixture); aws_mqtt_client_connection_subscribe( connection, &topic2, AWS_MQTT_QOS_AT_MOST_ONCE, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_topic_specific_publish, &fixture, NULL, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_subscribe_complete, &fixture); s_wait_for_n_adapter_operation_events(&fixture, AWS_MQTT3_OET_SUBSCRIBE_COMPLETE, 2); struct aws_byte_cursor payload1 = aws_byte_cursor_from_c_str("Payload 1!"); aws_mqtt_client_connection_publish( connection, &topic1, AWS_MQTT_QOS_AT_LEAST_ONCE, false, &payload1, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_publish_complete, &fixture); s_wait_for_n_adapter_operation_events(&fixture, AWS_MQTT3_OET_PUBLISH_COMPLETE, 1); s_wait_for_n_adapter_operation_events(&fixture, AWS_MQTT3_OET_PUBLISH_RECEIVED_ANY, 1); s_wait_for_n_adapter_operation_events(&fixture, AWS_MQTT3_OET_PUBLISH_RECEIVED_SUBSCRIBED, 2); struct aws_mqtt3_operation_event expected_events_before[] = { { .type = AWS_MQTT3_OET_PUBLISH_RECEIVED_ANY, .error_code = AWS_ERROR_SUCCESS, .qos = AWS_MQTT_QOS_AT_LEAST_ONCE, .topic_cursor = topic1, .payload_cursor = payload1, }, { .type = AWS_MQTT3_OET_PUBLISH_RECEIVED_SUBSCRIBED, .error_code = AWS_ERROR_SUCCESS, .qos = AWS_MQTT_QOS_AT_LEAST_ONCE, .topic_cursor = topic1, .payload_cursor = payload1, .expected_count = 2, }, }; ASSERT_SUCCESS(s_aws_mqtt5_to_mqtt3_adapter_test_fixture_verify_operation_sequence_contains( &fixture, AWS_ARRAY_SIZE(expected_events_before), expected_events_before)); /* drop the wildcard subscription and publish again, should only get one more publish received subscribed */ aws_mqtt_client_connection_unsubscribe( connection, &topic2, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_unsubscribe_complete, &fixture); s_wait_for_n_adapter_operation_events(&fixture, AWS_MQTT3_OET_UNSUBSCRIBE_COMPLETE, 1); aws_mqtt_client_connection_publish( connection, &topic1, AWS_MQTT_QOS_AT_LEAST_ONCE, false, &payload1, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_publish_complete, &fixture); s_wait_for_n_adapter_operation_events(&fixture, AWS_MQTT3_OET_PUBLISH_COMPLETE, 2); s_wait_for_n_adapter_operation_events(&fixture, AWS_MQTT3_OET_PUBLISH_RECEIVED_ANY, 2); s_wait_for_n_adapter_operation_events(&fixture, AWS_MQTT3_OET_PUBLISH_RECEIVED_SUBSCRIBED, 3); struct aws_mqtt3_operation_event expected_events_after[] = { { .type = AWS_MQTT3_OET_PUBLISH_RECEIVED_ANY, .error_code = AWS_ERROR_SUCCESS, .qos = AWS_MQTT_QOS_AT_LEAST_ONCE, .topic_cursor = topic1, .payload_cursor = payload1, .expected_count = 2, }, { .type = AWS_MQTT3_OET_PUBLISH_RECEIVED_SUBSCRIBED, .error_code = AWS_ERROR_SUCCESS, .qos = AWS_MQTT_QOS_AT_LEAST_ONCE, .topic_cursor = topic1, .payload_cursor = payload1, .expected_count = 3, }, { .type = AWS_MQTT3_OET_UNSUBSCRIBE_COMPLETE, }, }; ASSERT_SUCCESS(s_aws_mqtt5_to_mqtt3_adapter_test_fixture_verify_operation_sequence_contains( &fixture, AWS_ARRAY_SIZE(expected_events_after), expected_events_after)); aws_mqtt5_to_mqtt3_adapter_test_fixture_clean_up(&fixture); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5to3_adapter_unsubscribe_overlapped, s_mqtt5to3_adapter_unsubscribe_overlapped_fn) static int s_mqtt5_mock_server_handle_connect_publish_throttled( void *packet, struct aws_mqtt5_server_mock_connection_context *connection, void *user_data) { (void)packet; (void)user_data; struct aws_mqtt5_packet_connack_view connack_view; AWS_ZERO_STRUCT(connack_view); uint16_t receive_maximum = 1; connack_view.reason_code = AWS_MQTT5_CRC_SUCCESS; connack_view.receive_maximum = &receive_maximum; return aws_mqtt5_mock_server_send_packet(connection, AWS_MQTT5_PT_CONNACK, &connack_view); } /* * In this test, we configure the server to only allow a single unacked QoS1 publish and to not respond to * publishes. Then we throw three QoS 1 publishes at the client. This leads to the client being "paralyzed" waiting * for a PUBACK for the first publish. We then query the client stats and expected to see one unacked operation and * two additional (for a total of three) incomplete operations. */ static int s_mqtt5to3_adapter_get_stats_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_CONNECT] = s_mqtt5_mock_server_handle_connect_publish_throttled; test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_PUBLISH] = NULL; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, }; struct aws_mqtt5_to_mqtt3_adapter_test_fixture fixture; ASSERT_SUCCESS(aws_mqtt5_to_mqtt3_adapter_test_fixture_init(&fixture, allocator, &test_fixture_options)); struct aws_mqtt_client_connection *connection = fixture.connection; aws_mqtt_client_connection_set_on_any_publish_handler( connection, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_on_any_publish, &fixture); struct aws_mqtt_connection_options connection_options; s_init_adapter_connection_options_from_fixture(&connection_options, &fixture); connection_options.on_connection_complete = s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_connection_complete; connection_options.user_data = &fixture; aws_mqtt_client_connection_connect(connection, &connection_options); s_wait_for_n_adapter_lifecycle_events(&fixture, AWS_MQTT3_LET_CONNECTION_COMPLETE, 1); struct aws_byte_cursor topic = aws_byte_cursor_from_c_str("hi/there"); struct aws_byte_cursor payload = aws_byte_cursor_from_c_str("something"); aws_mqtt_client_connection_publish( connection, &topic, AWS_MQTT_QOS_AT_LEAST_ONCE, false, &payload, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_publish_complete, &fixture); aws_mqtt_client_connection_publish( connection, &topic, AWS_MQTT_QOS_AT_LEAST_ONCE, false, &payload, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_publish_complete, &fixture); aws_mqtt_client_connection_publish( connection, &topic, AWS_MQTT_QOS_AT_LEAST_ONCE, false, &payload, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_publish_complete, &fixture); aws_thread_current_sleep(aws_timestamp_convert(1, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL)); struct aws_mqtt_connection_operation_statistics stats; aws_mqtt_client_connection_get_stats(connection, &stats); ASSERT_INT_EQUALS(1, stats.unacked_operation_count); ASSERT_INT_EQUALS(3, stats.incomplete_operation_count); ASSERT_TRUE(stats.unacked_operation_size > 0); ASSERT_TRUE(stats.incomplete_operation_size > 0); ASSERT_INT_EQUALS(stats.unacked_operation_size * 3, stats.incomplete_operation_size); aws_mqtt5_to_mqtt3_adapter_test_fixture_clean_up(&fixture); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5to3_adapter_get_stats, s_mqtt5to3_adapter_get_stats_fn) /* * In this test we invoke a resubscribe while there are no active subscriptions. This hits a degenerate pathway * that we have to handle specially inside the adapter since an empty subscribe is invalid. */ static int s_mqtt5to3_adapter_resubscribe_nothing_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_SUBSCRIBE] = s_mqtt5_mock_server_handle_subscribe_suback_success; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, }; struct aws_mqtt5_to_mqtt3_adapter_test_fixture fixture; ASSERT_SUCCESS(aws_mqtt5_to_mqtt3_adapter_test_fixture_init(&fixture, allocator, &test_fixture_options)); struct aws_mqtt_client_connection *connection = fixture.connection; aws_mqtt_client_connection_set_on_any_publish_handler( connection, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_on_any_publish, &fixture); struct aws_mqtt_connection_options connection_options; s_init_adapter_connection_options_from_fixture(&connection_options, &fixture); connection_options.on_connection_complete = s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_connection_complete; connection_options.user_data = &fixture; aws_mqtt_client_connection_connect(connection, &connection_options); s_wait_for_n_adapter_lifecycle_events(&fixture, AWS_MQTT3_LET_CONNECTION_COMPLETE, 1); aws_mqtt_resubscribe_existing_topics( connection, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_subscribe_multi_complete, &fixture); s_wait_for_n_adapter_operation_events(&fixture, AWS_MQTT3_OET_SUBSCRIBE_COMPLETE, 1); struct aws_mqtt3_operation_event resubscribe_ack[] = {{ .type = AWS_MQTT3_OET_SUBSCRIBE_COMPLETE, .error_code = AWS_ERROR_MQTT_CONNECTION_RESUBSCRIBE_NO_TOPICS, /* no granted subscriptions */ }}; ASSERT_SUCCESS(s_aws_mqtt5_to_mqtt3_adapter_test_fixture_verify_operation_sequence_contains( &fixture, AWS_ARRAY_SIZE(resubscribe_ack), resubscribe_ack)); aws_mqtt5_to_mqtt3_adapter_test_fixture_clean_up(&fixture); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5to3_adapter_resubscribe_nothing, s_mqtt5to3_adapter_resubscribe_nothing_fn) /* * In this test we subscribe individually to three separate topics, wait, then invoke resubscribe on the client and * verify that we record 4 subacks, 3 for the individual and one 3-sized multi-sub for appropriate topics. */ static int s_mqtt5to3_adapter_resubscribe_something_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_SUBSCRIBE] = s_mqtt5_mock_server_handle_subscribe_suback_success; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, }; struct aws_mqtt5_to_mqtt3_adapter_test_fixture fixture; ASSERT_SUCCESS(aws_mqtt5_to_mqtt3_adapter_test_fixture_init(&fixture, allocator, &test_fixture_options)); struct aws_mqtt_client_connection *connection = fixture.connection; aws_mqtt_client_connection_set_on_any_publish_handler( connection, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_on_any_publish, &fixture); struct aws_mqtt_connection_options connection_options; s_init_adapter_connection_options_from_fixture(&connection_options, &fixture); connection_options.on_connection_complete = s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_connection_complete; connection_options.user_data = &fixture; aws_mqtt_client_connection_connect(connection, &connection_options); s_wait_for_n_adapter_lifecycle_events(&fixture, AWS_MQTT3_LET_CONNECTION_COMPLETE, 1); struct aws_byte_cursor topic1 = aws_byte_cursor_from_c_str("hello/world"); struct aws_byte_cursor topic2 = aws_byte_cursor_from_c_str("foo/bar"); struct aws_byte_cursor topic3 = aws_byte_cursor_from_c_str("a/b/c"); aws_mqtt_client_connection_subscribe( connection, &topic1, AWS_MQTT_QOS_AT_LEAST_ONCE, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_topic_specific_publish, &fixture, NULL, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_subscribe_complete, &fixture); aws_mqtt_client_connection_subscribe( connection, &topic2, AWS_MQTT_QOS_AT_MOST_ONCE, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_topic_specific_publish, &fixture, NULL, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_subscribe_complete, &fixture); aws_mqtt_client_connection_subscribe( connection, &topic3, AWS_MQTT_QOS_AT_LEAST_ONCE, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_topic_specific_publish, &fixture, NULL, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_subscribe_complete, &fixture); s_wait_for_n_adapter_operation_events(&fixture, AWS_MQTT3_OET_SUBSCRIBE_COMPLETE, 3); aws_mqtt_resubscribe_existing_topics( connection, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_subscribe_multi_complete, &fixture); s_wait_for_n_adapter_operation_events(&fixture, AWS_MQTT3_OET_SUBSCRIBE_COMPLETE, 4); struct aws_mqtt3_operation_event resubscribe_ack[] = {{ .type = AWS_MQTT3_OET_SUBSCRIBE_COMPLETE, }}; struct aws_mqtt_topic_subscription subscriptions[] = { { .topic = topic1, .qos = AWS_MQTT_QOS_AT_LEAST_ONCE, }, { .topic = topic2, .qos = AWS_MQTT_QOS_AT_MOST_ONCE, }, { .topic = topic3, .qos = AWS_MQTT_QOS_AT_LEAST_ONCE, }, }; aws_array_list_init_static_from_initialized( &resubscribe_ack[0].granted_subscriptions, (void *)subscriptions, AWS_ARRAY_SIZE(subscriptions), sizeof(struct aws_mqtt_topic_subscription)); ASSERT_SUCCESS(s_aws_mqtt5_to_mqtt3_adapter_test_fixture_verify_operation_sequence_contains( &fixture, AWS_ARRAY_SIZE(resubscribe_ack), resubscribe_ack)); aws_mqtt5_to_mqtt3_adapter_test_fixture_clean_up(&fixture); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5to3_adapter_resubscribe_something, s_mqtt5to3_adapter_resubscribe_something_fn) static int s_mqtt5to3_adapter_operation_callbacks_after_shutdown_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct mqtt5_client_test_options test_options; aws_mqtt5_client_test_init_default_options(&test_options); test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_SUBSCRIBE] = NULL; test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_PUBLISH] = NULL; test_options.server_function_table.packet_handlers[AWS_MQTT5_PT_UNSUBSCRIBE] = NULL; struct aws_mqtt5_client_mqtt5_mock_test_fixture_options test_fixture_options = { .client_options = &test_options.client_options, .server_function_table = &test_options.server_function_table, }; struct aws_mqtt5_to_mqtt3_adapter_test_fixture fixture; ASSERT_SUCCESS(aws_mqtt5_to_mqtt3_adapter_test_fixture_init(&fixture, allocator, &test_fixture_options)); struct aws_mqtt_client_connection *connection = fixture.connection; struct aws_mqtt_connection_options connection_options; s_init_adapter_connection_options_from_fixture(&connection_options, &fixture); connection_options.on_connection_complete = s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_connection_complete; connection_options.user_data = &fixture; aws_mqtt_client_connection_connect(connection, &connection_options); s_wait_for_n_adapter_lifecycle_events(&fixture, AWS_MQTT3_LET_CONNECTION_COMPLETE, 1); struct aws_byte_cursor topic1 = aws_byte_cursor_from_c_str("hello/world"); struct aws_byte_cursor topic2 = aws_byte_cursor_from_c_str("hello/+"); aws_mqtt_client_connection_subscribe( connection, &topic1, AWS_MQTT_QOS_AT_LEAST_ONCE, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_topic_specific_publish, &fixture, NULL, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_subscribe_complete, &fixture); struct aws_byte_cursor payload1 = aws_byte_cursor_from_c_str("Payload 1!"); aws_mqtt_client_connection_publish( connection, &topic1, AWS_MQTT_QOS_AT_LEAST_ONCE, false, &payload1, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_publish_complete, &fixture); aws_mqtt_client_connection_unsubscribe( connection, &topic2, s_aws_mqtt5_to_mqtt3_adapter_test_fixture_record_unsubscribe_complete, &fixture); aws_mqtt_client_connection_release(connection); s_wait_for_n_adapter_lifecycle_events(&fixture, AWS_MQTT3_LET_TERMINATION, 1); fixture.connection = NULL; s_wait_for_n_adapter_operation_events(&fixture, AWS_MQTT3_OET_SUBSCRIBE_COMPLETE, 1); s_wait_for_n_adapter_operation_events(&fixture, AWS_MQTT3_OET_PUBLISH_COMPLETE, 1); s_wait_for_n_adapter_operation_events(&fixture, AWS_MQTT3_OET_UNSUBSCRIBE_COMPLETE, 1); struct aws_mqtt3_operation_event failed_ops[] = { { .type = AWS_MQTT3_OET_SUBSCRIBE_COMPLETE, .error_code = AWS_ERROR_MQTT_CONNECTION_DESTROYED, }, { .type = AWS_MQTT3_OET_PUBLISH_COMPLETE, .error_code = AWS_ERROR_MQTT_CONNECTION_DESTROYED, }, { .type = AWS_MQTT3_OET_UNSUBSCRIBE_COMPLETE, .error_code = AWS_ERROR_MQTT_CONNECTION_DESTROYED, }, }; struct aws_mqtt_topic_subscription failed_subscriptions[] = { { .topic = topic1, .qos = AWS_MQTT_QOS_FAILURE, }, }; aws_array_list_init_static_from_initialized( &failed_ops[0].granted_subscriptions, (void *)failed_subscriptions, AWS_ARRAY_SIZE(failed_subscriptions), sizeof(struct aws_mqtt_topic_subscription)); ASSERT_SUCCESS(s_aws_mqtt5_to_mqtt3_adapter_test_fixture_verify_operation_sequence_contains( &fixture, AWS_ARRAY_SIZE(failed_ops), failed_ops)); aws_mqtt5_to_mqtt3_adapter_test_fixture_clean_up(&fixture); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE( mqtt5to3_adapter_operation_callbacks_after_shutdown, s_mqtt5to3_adapter_operation_callbacks_after_shutdown_fn) aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/tests/v5/mqtt5_topic_alias_tests.c000066400000000000000000000531761456575232400267340ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include AWS_STATIC_STRING_FROM_LITERAL(s_topic1, "hello/world"); AWS_STATIC_STRING_FROM_LITERAL(s_topic2, "this/is/a/longer/topic"); static int s_mqtt5_inbound_topic_alias_register_failure_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt5_inbound_topic_alias_resolver resolver; ASSERT_SUCCESS(aws_mqtt5_inbound_topic_alias_resolver_init(&resolver, allocator)); ASSERT_SUCCESS(aws_mqtt5_inbound_topic_alias_resolver_reset(&resolver, 10)); ASSERT_FAILS( aws_mqtt5_inbound_topic_alias_resolver_register_alias(&resolver, 0, aws_byte_cursor_from_string(s_topic1))); ASSERT_FAILS( aws_mqtt5_inbound_topic_alias_resolver_register_alias(&resolver, 11, aws_byte_cursor_from_string(s_topic1))); aws_mqtt5_inbound_topic_alias_resolver_clean_up(&resolver); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_inbound_topic_alias_register_failure, s_mqtt5_inbound_topic_alias_register_failure_fn) static int s_mqtt5_inbound_topic_alias_resolve_success_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt5_inbound_topic_alias_resolver resolver; ASSERT_SUCCESS(aws_mqtt5_inbound_topic_alias_resolver_init(&resolver, allocator)); ASSERT_SUCCESS(aws_mqtt5_inbound_topic_alias_resolver_reset(&resolver, 10)); ASSERT_SUCCESS( aws_mqtt5_inbound_topic_alias_resolver_register_alias(&resolver, 1, aws_byte_cursor_from_string(s_topic1))); ASSERT_SUCCESS( aws_mqtt5_inbound_topic_alias_resolver_register_alias(&resolver, 10, aws_byte_cursor_from_string(s_topic2))); struct aws_byte_cursor topic1; ASSERT_SUCCESS(aws_mqtt5_inbound_topic_alias_resolver_resolve_alias(&resolver, 1, &topic1)); ASSERT_BIN_ARRAYS_EQUALS(s_topic1->bytes, s_topic1->len, topic1.ptr, topic1.len); struct aws_byte_cursor topic2; ASSERT_SUCCESS(aws_mqtt5_inbound_topic_alias_resolver_resolve_alias(&resolver, 10, &topic2)); ASSERT_BIN_ARRAYS_EQUALS(s_topic2->bytes, s_topic2->len, topic2.ptr, topic2.len); /* overwrite an existing alias to verify memory is cleaned up */ ASSERT_SUCCESS( aws_mqtt5_inbound_topic_alias_resolver_register_alias(&resolver, 10, aws_byte_cursor_from_string(s_topic1))); struct aws_byte_cursor topic3; ASSERT_SUCCESS(aws_mqtt5_inbound_topic_alias_resolver_resolve_alias(&resolver, 10, &topic3)); ASSERT_BIN_ARRAYS_EQUALS(s_topic1->bytes, s_topic1->len, topic3.ptr, topic3.len); aws_mqtt5_inbound_topic_alias_resolver_clean_up(&resolver); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_inbound_topic_alias_resolve_success, s_mqtt5_inbound_topic_alias_resolve_success_fn) static int s_mqtt5_inbound_topic_alias_resolve_failure_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt5_inbound_topic_alias_resolver resolver; ASSERT_SUCCESS(aws_mqtt5_inbound_topic_alias_resolver_init(&resolver, allocator)); ASSERT_SUCCESS(aws_mqtt5_inbound_topic_alias_resolver_reset(&resolver, 10)); struct aws_byte_cursor topic; ASSERT_FAILS(aws_mqtt5_inbound_topic_alias_resolver_resolve_alias(&resolver, 0, &topic)); ASSERT_FAILS(aws_mqtt5_inbound_topic_alias_resolver_resolve_alias(&resolver, 11, &topic)); ASSERT_FAILS(aws_mqtt5_inbound_topic_alias_resolver_resolve_alias(&resolver, 10, &topic)); aws_mqtt5_inbound_topic_alias_resolver_clean_up(&resolver); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_inbound_topic_alias_resolve_failure, s_mqtt5_inbound_topic_alias_resolve_failure_fn) static int s_mqtt5_inbound_topic_alias_reset_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt5_inbound_topic_alias_resolver resolver; ASSERT_SUCCESS(aws_mqtt5_inbound_topic_alias_resolver_init(&resolver, allocator)); ASSERT_SUCCESS(aws_mqtt5_inbound_topic_alias_resolver_reset(&resolver, 10)); ASSERT_SUCCESS( aws_mqtt5_inbound_topic_alias_resolver_register_alias(&resolver, 1, aws_byte_cursor_from_string(s_topic1))); struct aws_byte_cursor topic; ASSERT_SUCCESS(aws_mqtt5_inbound_topic_alias_resolver_resolve_alias(&resolver, 1, &topic)); ASSERT_BIN_ARRAYS_EQUALS(s_topic1->bytes, s_topic1->len, topic.ptr, topic.len); ASSERT_SUCCESS(aws_mqtt5_inbound_topic_alias_resolver_reset(&resolver, 10)); ASSERT_FAILS(aws_mqtt5_inbound_topic_alias_resolver_resolve_alias(&resolver, 1, &topic)); aws_mqtt5_inbound_topic_alias_resolver_clean_up(&resolver); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_inbound_topic_alias_reset, s_mqtt5_inbound_topic_alias_reset_fn) static int s_mqtt5_outbound_topic_alias_disabled_resolve_success_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt5_outbound_topic_alias_resolver *resolver = aws_mqtt5_outbound_topic_alias_resolver_new(allocator, AWS_MQTT5_COTABT_DISABLED); ASSERT_NOT_NULL(resolver); struct aws_mqtt5_packet_publish_view publish_view = {.topic = aws_byte_cursor_from_string(s_topic1)}; uint16_t outbound_alias_id = 0; struct aws_byte_cursor outbound_topic; AWS_ZERO_STRUCT(outbound_topic); ASSERT_SUCCESS(aws_mqtt5_outbound_topic_alias_resolver_resolve_outbound_publish( resolver, &publish_view, &outbound_alias_id, &outbound_topic)); ASSERT_INT_EQUALS(0, outbound_alias_id); ASSERT_BIN_ARRAYS_EQUALS(s_topic1->bytes, s_topic1->len, outbound_topic.ptr, outbound_topic.len); ASSERT_SUCCESS(aws_mqtt5_outbound_topic_alias_resolver_reset(resolver, 0)); aws_mqtt5_outbound_topic_alias_resolver_destroy(resolver); return AWS_OP_SUCCESS; } AWS_TEST_CASE( mqtt5_outbound_topic_alias_disabled_resolve_success, s_mqtt5_outbound_topic_alias_disabled_resolve_success_fn) static int s_mqtt5_outbound_topic_alias_disabled_resolve_failure_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt5_outbound_topic_alias_resolver *resolver = aws_mqtt5_outbound_topic_alias_resolver_new(allocator, AWS_MQTT5_COTABT_DISABLED); ASSERT_NOT_NULL(resolver); struct aws_mqtt5_packet_publish_view publish_view = { .topic = { .ptr = NULL, .len = 0, }, }; uint16_t outbound_alias_id = 0; struct aws_byte_cursor outbound_topic; AWS_ZERO_STRUCT(outbound_topic); ASSERT_FAILS(aws_mqtt5_outbound_topic_alias_resolver_resolve_outbound_publish( resolver, &publish_view, &outbound_alias_id, &outbound_topic)); aws_mqtt5_outbound_topic_alias_resolver_destroy(resolver); return AWS_OP_SUCCESS; } AWS_TEST_CASE( mqtt5_outbound_topic_alias_disabled_resolve_failure, s_mqtt5_outbound_topic_alias_disabled_resolve_failure_fn) static int s_mqtt5_outbound_topic_alias_manual_resolve_success_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt5_outbound_topic_alias_resolver *resolver = aws_mqtt5_outbound_topic_alias_resolver_new(allocator, AWS_MQTT5_COTABT_MANUAL); ASSERT_NOT_NULL(resolver); aws_mqtt5_outbound_topic_alias_resolver_reset(resolver, 5); struct aws_mqtt5_packet_publish_view publish_view1 = { .topic = aws_byte_cursor_from_string(s_topic1), }; uint16_t outbound_alias_id = 0; struct aws_byte_cursor outbound_topic; AWS_ZERO_STRUCT(outbound_topic); /* no alias case */ ASSERT_SUCCESS(aws_mqtt5_outbound_topic_alias_resolver_resolve_outbound_publish( resolver, &publish_view1, &outbound_alias_id, &outbound_topic)); ASSERT_INT_EQUALS(0, outbound_alias_id); ASSERT_BIN_ARRAYS_EQUALS(s_topic1->bytes, s_topic1->len, outbound_topic.ptr, outbound_topic.len); uint16_t alias = 1; struct aws_mqtt5_packet_publish_view publish_view2 = { .topic = aws_byte_cursor_from_string(s_topic1), .topic_alias = &alias, }; outbound_alias_id = 0; AWS_ZERO_STRUCT(outbound_topic); /* new valid alias assignment case */ ASSERT_SUCCESS(aws_mqtt5_outbound_topic_alias_resolver_resolve_outbound_publish( resolver, &publish_view2, &outbound_alias_id, &outbound_topic)); ASSERT_INT_EQUALS(1, outbound_alias_id); ASSERT_BIN_ARRAYS_EQUALS(s_topic1->bytes, s_topic1->len, outbound_topic.ptr, outbound_topic.len); struct aws_mqtt5_packet_publish_view publish_view3 = { .topic = aws_byte_cursor_from_string(s_topic1), .topic_alias = &alias, }; outbound_alias_id = 0; AWS_ZERO_STRUCT(outbound_topic); /* reuse valid alias assignment case */ ASSERT_SUCCESS(aws_mqtt5_outbound_topic_alias_resolver_resolve_outbound_publish( resolver, &publish_view3, &outbound_alias_id, &outbound_topic)); ASSERT_INT_EQUALS(1, outbound_alias_id); ASSERT_INT_EQUALS(0, outbound_topic.len); /* switch topics but keep the alias, we should resolve to a full binding with the new topic */ struct aws_mqtt5_packet_publish_view publish_view4 = { .topic = aws_byte_cursor_from_string(s_topic2), .topic_alias = &alias, }; outbound_alias_id = 0; AWS_ZERO_STRUCT(outbound_topic); /* reuse valid alias assignment case */ ASSERT_SUCCESS(aws_mqtt5_outbound_topic_alias_resolver_resolve_outbound_publish( resolver, &publish_view4, &outbound_alias_id, &outbound_topic)); ASSERT_INT_EQUALS(1, outbound_alias_id); ASSERT_BIN_ARRAYS_EQUALS(s_topic2->bytes, s_topic2->len, outbound_topic.ptr, outbound_topic.len); aws_mqtt5_outbound_topic_alias_resolver_destroy(resolver); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_outbound_topic_alias_manual_resolve_success, s_mqtt5_outbound_topic_alias_manual_resolve_success_fn) static int s_mqtt5_outbound_topic_alias_manual_resolve_failure_zero_alias_fn( struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt5_outbound_topic_alias_resolver *resolver = aws_mqtt5_outbound_topic_alias_resolver_new(allocator, AWS_MQTT5_COTABT_MANUAL); ASSERT_NOT_NULL(resolver); aws_mqtt5_outbound_topic_alias_resolver_reset(resolver, 5); uint16_t alias = 0; struct aws_mqtt5_packet_publish_view publish_view = { .topic = aws_byte_cursor_from_string(s_topic1), .topic_alias = &alias, }; uint16_t outbound_alias_id = 0; struct aws_byte_cursor outbound_topic; AWS_ZERO_STRUCT(outbound_topic); ASSERT_FAILS(aws_mqtt5_outbound_topic_alias_resolver_resolve_outbound_publish( resolver, &publish_view, &outbound_alias_id, &outbound_topic)); aws_mqtt5_outbound_topic_alias_resolver_destroy(resolver); return AWS_OP_SUCCESS; } AWS_TEST_CASE( mqtt5_outbound_topic_alias_manual_resolve_failure_zero_alias, s_mqtt5_outbound_topic_alias_manual_resolve_failure_zero_alias_fn) static int s_mqtt5_outbound_topic_alias_manual_resolve_failure_too_big_alias_fn( struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt5_outbound_topic_alias_resolver *resolver = aws_mqtt5_outbound_topic_alias_resolver_new(allocator, AWS_MQTT5_COTABT_MANUAL); ASSERT_NOT_NULL(resolver); aws_mqtt5_outbound_topic_alias_resolver_reset(resolver, 5); uint16_t alias = 6; struct aws_mqtt5_packet_publish_view publish_view = { .topic = aws_byte_cursor_from_string(s_topic1), .topic_alias = &alias, }; uint16_t outbound_alias_id = 0; struct aws_byte_cursor outbound_topic; AWS_ZERO_STRUCT(outbound_topic); ASSERT_FAILS(aws_mqtt5_outbound_topic_alias_resolver_resolve_outbound_publish( resolver, &publish_view, &outbound_alias_id, &outbound_topic)); aws_mqtt5_outbound_topic_alias_resolver_destroy(resolver); return AWS_OP_SUCCESS; } AWS_TEST_CASE( mqtt5_outbound_topic_alias_manual_resolve_failure_too_big_alias, s_mqtt5_outbound_topic_alias_manual_resolve_failure_too_big_alias_fn) static int s_mqtt5_outbound_topic_alias_manual_reset_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt5_outbound_topic_alias_resolver *resolver = aws_mqtt5_outbound_topic_alias_resolver_new(allocator, AWS_MQTT5_COTABT_MANUAL); ASSERT_NOT_NULL(resolver); aws_mqtt5_outbound_topic_alias_resolver_reset(resolver, 5); uint16_t alias = 2; struct aws_mqtt5_packet_publish_view publish_view = { .topic = aws_byte_cursor_from_string(s_topic1), .topic_alias = &alias, }; uint16_t outbound_alias_id = 0; struct aws_byte_cursor outbound_topic; AWS_ZERO_STRUCT(outbound_topic); /* First, successfully bind an alias */ ASSERT_SUCCESS(aws_mqtt5_outbound_topic_alias_resolver_resolve_outbound_publish( resolver, &publish_view, &outbound_alias_id, &outbound_topic)); ASSERT_INT_EQUALS(2, outbound_alias_id); ASSERT_BIN_ARRAYS_EQUALS(s_topic1->bytes, s_topic1->len, outbound_topic.ptr, outbound_topic.len); /* Successfully use the alias */ ASSERT_SUCCESS(aws_mqtt5_outbound_topic_alias_resolver_resolve_outbound_publish( resolver, &publish_view, &outbound_alias_id, &outbound_topic)); ASSERT_INT_EQUALS(2, outbound_alias_id); ASSERT_INT_EQUALS(0, outbound_topic.len); /* Reset */ aws_mqtt5_outbound_topic_alias_resolver_reset(resolver, 5); /* Fail to reuse the alias */ ASSERT_SUCCESS(aws_mqtt5_outbound_topic_alias_resolver_resolve_outbound_publish( resolver, &publish_view, &outbound_alias_id, &outbound_topic)); ASSERT_INT_EQUALS(2, outbound_alias_id); ASSERT_BIN_ARRAYS_EQUALS(s_topic1->bytes, s_topic1->len, outbound_topic.ptr, outbound_topic.len); aws_mqtt5_outbound_topic_alias_resolver_destroy(resolver); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_outbound_topic_alias_manual_reset, s_mqtt5_outbound_topic_alias_manual_reset_fn) static int s_mqtt5_outbound_topic_alias_lru_zero_size_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt5_outbound_topic_alias_resolver *resolver = aws_mqtt5_outbound_topic_alias_resolver_new(allocator, AWS_MQTT5_COTABT_LRU); ASSERT_NOT_NULL(resolver); aws_mqtt5_outbound_topic_alias_resolver_reset(resolver, 0); struct aws_mqtt5_packet_publish_view publish_view = { .topic = aws_byte_cursor_from_string(s_topic1), }; uint16_t outbound_alias_id = 0; struct aws_byte_cursor outbound_topic; AWS_ZERO_STRUCT(outbound_topic); ASSERT_SUCCESS(aws_mqtt5_outbound_topic_alias_resolver_resolve_outbound_publish( resolver, &publish_view, &outbound_alias_id, &outbound_topic)); ASSERT_INT_EQUALS(0, outbound_alias_id); ASSERT_BIN_ARRAYS_EQUALS(publish_view.topic.ptr, publish_view.topic.len, outbound_topic.ptr, outbound_topic.len); aws_mqtt5_outbound_topic_alias_resolver_destroy(resolver); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_outbound_topic_alias_lru_zero_size, s_mqtt5_outbound_topic_alias_lru_zero_size_fn) #define LRU_SEQUENCE_TEST_CACHE_SIZE 2 struct lru_test_operation { struct aws_byte_cursor topic; size_t expected_alias_id; bool expected_reuse; }; #define DEFINE_LRU_TEST_OPERATION(topic_type, alias_index, reused) \ { \ .topic = aws_byte_cursor_from_string(s_topic_##topic_type), .expected_alias_id = alias_index, \ .expected_reuse = reused, \ } static int s_perform_lru_test_operation( struct aws_mqtt5_outbound_topic_alias_resolver *resolver, struct lru_test_operation *operation) { struct aws_mqtt5_packet_publish_view publish_view = { .topic = operation->topic, }; uint16_t outbound_alias_id = 0; struct aws_byte_cursor outbound_topic; AWS_ZERO_STRUCT(outbound_topic); ASSERT_SUCCESS(aws_mqtt5_outbound_topic_alias_resolver_resolve_outbound_publish( resolver, &publish_view, &outbound_alias_id, &outbound_topic)); ASSERT_INT_EQUALS(operation->expected_alias_id, outbound_alias_id); if (operation->expected_reuse) { ASSERT_INT_EQUALS(0, outbound_topic.len); } else { ASSERT_BIN_ARRAYS_EQUALS(operation->topic.ptr, operation->topic.len, outbound_topic.ptr, outbound_topic.len); } return AWS_OP_SUCCESS; } static int s_check_lru_sequence( struct aws_mqtt5_outbound_topic_alias_resolver *resolver, struct lru_test_operation *operations, size_t operation_count) { for (size_t i = 0; i < operation_count; ++i) { struct lru_test_operation *operation = &operations[i]; ASSERT_SUCCESS(s_perform_lru_test_operation(resolver, operation)); } return AWS_OP_SUCCESS; } static int s_perform_lru_sequence_test( struct aws_allocator *allocator, struct lru_test_operation *operations, size_t operation_count) { struct aws_mqtt5_outbound_topic_alias_resolver *resolver = aws_mqtt5_outbound_topic_alias_resolver_new(allocator, AWS_MQTT5_COTABT_LRU); ASSERT_NOT_NULL(resolver); aws_mqtt5_outbound_topic_alias_resolver_reset(resolver, LRU_SEQUENCE_TEST_CACHE_SIZE); ASSERT_SUCCESS(s_check_lru_sequence(resolver, operations, operation_count)); aws_mqtt5_outbound_topic_alias_resolver_destroy(resolver); return AWS_OP_SUCCESS; } AWS_STATIC_STRING_FROM_LITERAL(s_topic_a, "topic/a"); AWS_STATIC_STRING_FROM_LITERAL(s_topic_b, "b/topic"); AWS_STATIC_STRING_FROM_LITERAL(s_topic_c, "topic/c"); static int s_mqtt5_outbound_topic_alias_lru_a_ar_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct lru_test_operation test_operations[] = { DEFINE_LRU_TEST_OPERATION(a, 1, false), DEFINE_LRU_TEST_OPERATION(a, 1, true), }; ASSERT_SUCCESS(s_perform_lru_sequence_test(allocator, test_operations, AWS_ARRAY_SIZE(test_operations))); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_outbound_topic_alias_lru_a_ar, s_mqtt5_outbound_topic_alias_lru_a_ar_fn) static int s_mqtt5_outbound_topic_alias_lru_b_a_br_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct lru_test_operation test_operations[] = { DEFINE_LRU_TEST_OPERATION(b, 1, false), DEFINE_LRU_TEST_OPERATION(a, 2, false), DEFINE_LRU_TEST_OPERATION(b, 1, true), }; ASSERT_SUCCESS(s_perform_lru_sequence_test(allocator, test_operations, AWS_ARRAY_SIZE(test_operations))); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_outbound_topic_alias_lru_b_a_br, s_mqtt5_outbound_topic_alias_lru_b_a_br_fn) static int s_mqtt5_outbound_topic_alias_lru_a_b_ar_br_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct lru_test_operation test_operations[] = { DEFINE_LRU_TEST_OPERATION(a, 1, false), DEFINE_LRU_TEST_OPERATION(b, 2, false), DEFINE_LRU_TEST_OPERATION(a, 1, true), DEFINE_LRU_TEST_OPERATION(b, 2, true), }; ASSERT_SUCCESS(s_perform_lru_sequence_test(allocator, test_operations, AWS_ARRAY_SIZE(test_operations))); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_outbound_topic_alias_lru_a_b_ar_br, s_mqtt5_outbound_topic_alias_lru_a_b_ar_br_fn) static int s_mqtt5_outbound_topic_alias_lru_a_b_c_br_cr_br_cr_a_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct lru_test_operation test_operations[] = { DEFINE_LRU_TEST_OPERATION(a, 1, false), DEFINE_LRU_TEST_OPERATION(b, 2, false), DEFINE_LRU_TEST_OPERATION(c, 1, false), DEFINE_LRU_TEST_OPERATION(b, 2, true), DEFINE_LRU_TEST_OPERATION(c, 1, true), DEFINE_LRU_TEST_OPERATION(b, 2, true), DEFINE_LRU_TEST_OPERATION(c, 1, true), DEFINE_LRU_TEST_OPERATION(a, 2, false), }; ASSERT_SUCCESS(s_perform_lru_sequence_test(allocator, test_operations, AWS_ARRAY_SIZE(test_operations))); return AWS_OP_SUCCESS; } AWS_TEST_CASE( mqtt5_outbound_topic_alias_lru_a_b_c_br_cr_br_cr_a, s_mqtt5_outbound_topic_alias_lru_a_b_c_br_cr_br_cr_a_fn) static int s_mqtt5_outbound_topic_alias_lru_a_b_c_a_cr_b_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct lru_test_operation test_operations[] = { DEFINE_LRU_TEST_OPERATION(a, 1, false), DEFINE_LRU_TEST_OPERATION(b, 2, false), DEFINE_LRU_TEST_OPERATION(c, 1, false), DEFINE_LRU_TEST_OPERATION(a, 2, false), DEFINE_LRU_TEST_OPERATION(c, 1, true), DEFINE_LRU_TEST_OPERATION(b, 2, false), }; ASSERT_SUCCESS(s_perform_lru_sequence_test(allocator, test_operations, AWS_ARRAY_SIZE(test_operations))); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_outbound_topic_alias_lru_a_b_c_a_cr_b, s_mqtt5_outbound_topic_alias_lru_a_b_c_a_cr_b_fn) static int s_mqtt5_outbound_topic_alias_lru_a_b_reset_a_b_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_mqtt5_outbound_topic_alias_resolver *resolver = aws_mqtt5_outbound_topic_alias_resolver_new(allocator, AWS_MQTT5_COTABT_LRU); ASSERT_NOT_NULL(resolver); aws_mqtt5_outbound_topic_alias_resolver_reset(resolver, LRU_SEQUENCE_TEST_CACHE_SIZE); struct lru_test_operation test_operations[] = { DEFINE_LRU_TEST_OPERATION(a, 1, false), DEFINE_LRU_TEST_OPERATION(b, 2, false), }; ASSERT_SUCCESS(s_check_lru_sequence(resolver, test_operations, AWS_ARRAY_SIZE(test_operations))); aws_mqtt5_outbound_topic_alias_resolver_reset(resolver, LRU_SEQUENCE_TEST_CACHE_SIZE); ASSERT_SUCCESS(s_check_lru_sequence(resolver, test_operations, AWS_ARRAY_SIZE(test_operations))); aws_mqtt5_outbound_topic_alias_resolver_destroy(resolver); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_outbound_topic_alias_lru_a_b_reset_a_b, s_mqtt5_outbound_topic_alias_lru_a_b_reset_a_b_fn) aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/tests/v5/mqtt5_utils_tests.c000066400000000000000000000170761456575232400256040ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include static int s_mqtt5_topic_skip_rules_prefix_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; (void)allocator; struct aws_byte_cursor skip_cursor; struct aws_byte_cursor expected_cursor; /* nothing should be skipped */ skip_cursor = aws_mqtt5_topic_skip_aws_iot_core_uncounted_prefix(aws_byte_cursor_from_c_str("dont/skip/anything")); expected_cursor = aws_byte_cursor_from_c_str("dont/skip/anything"); ASSERT_TRUE(aws_byte_cursor_eq(&skip_cursor, &expected_cursor)); skip_cursor = aws_mqtt5_topic_skip_aws_iot_core_uncounted_prefix(aws_byte_cursor_from_c_str("")); expected_cursor = aws_byte_cursor_from_c_str(""); ASSERT_TRUE(aws_byte_cursor_eq(&skip_cursor, &expected_cursor)); skip_cursor = aws_mqtt5_topic_skip_aws_iot_core_uncounted_prefix(aws_byte_cursor_from_c_str("/")); expected_cursor = aws_byte_cursor_from_c_str("/"); ASSERT_TRUE(aws_byte_cursor_eq(&skip_cursor, &expected_cursor)); skip_cursor = aws_mqtt5_topic_skip_aws_iot_core_uncounted_prefix(aws_byte_cursor_from_c_str("$aws")); expected_cursor = aws_byte_cursor_from_c_str("$aws"); ASSERT_TRUE(aws_byte_cursor_eq(&skip_cursor, &expected_cursor)); skip_cursor = aws_mqtt5_topic_skip_aws_iot_core_uncounted_prefix(aws_byte_cursor_from_c_str("$aws/rules")); expected_cursor = aws_byte_cursor_from_c_str("$aws/rules"); ASSERT_TRUE(aws_byte_cursor_eq(&skip_cursor, &expected_cursor)); skip_cursor = aws_mqtt5_topic_skip_aws_iot_core_uncounted_prefix(aws_byte_cursor_from_c_str("$aws/rules/")); expected_cursor = aws_byte_cursor_from_c_str("$aws/rules/"); ASSERT_TRUE(aws_byte_cursor_eq(&skip_cursor, &expected_cursor)); skip_cursor = aws_mqtt5_topic_skip_aws_iot_core_uncounted_prefix(aws_byte_cursor_from_c_str("$aws/rules/rulename")); expected_cursor = aws_byte_cursor_from_c_str("$aws/rules/rulename"); ASSERT_TRUE(aws_byte_cursor_eq(&skip_cursor, &expected_cursor)); skip_cursor = aws_mqtt5_topic_skip_aws_iot_core_uncounted_prefix(aws_byte_cursor_from_c_str("$share")); expected_cursor = aws_byte_cursor_from_c_str("$share"); ASSERT_TRUE(aws_byte_cursor_eq(&skip_cursor, &expected_cursor)); skip_cursor = aws_mqtt5_topic_skip_aws_iot_core_uncounted_prefix(aws_byte_cursor_from_c_str("$share/")); expected_cursor = aws_byte_cursor_from_c_str("$share/"); ASSERT_TRUE(aws_byte_cursor_eq(&skip_cursor, &expected_cursor)); skip_cursor = aws_mqtt5_topic_skip_aws_iot_core_uncounted_prefix(aws_byte_cursor_from_c_str("$share/share-name")); expected_cursor = aws_byte_cursor_from_c_str("$share/share-name"); ASSERT_TRUE(aws_byte_cursor_eq(&skip_cursor, &expected_cursor)); /* prefix should be skipped */ skip_cursor = aws_mqtt5_topic_skip_aws_iot_core_uncounted_prefix(aws_byte_cursor_from_c_str("$aws/rules/rulename/")); expected_cursor = aws_byte_cursor_from_c_str(""); ASSERT_TRUE(aws_byte_cursor_eq(&skip_cursor, &expected_cursor)); skip_cursor = aws_mqtt5_topic_skip_aws_iot_core_uncounted_prefix( aws_byte_cursor_from_c_str("$aws/rules/some-rule/segment1/segment2")); expected_cursor = aws_byte_cursor_from_c_str("segment1/segment2"); ASSERT_TRUE(aws_byte_cursor_eq(&skip_cursor, &expected_cursor)); skip_cursor = aws_mqtt5_topic_skip_aws_iot_core_uncounted_prefix( aws_byte_cursor_from_c_str("$share/share-name/segment1/segment2")); expected_cursor = aws_byte_cursor_from_c_str("segment1/segment2"); ASSERT_TRUE(aws_byte_cursor_eq(&skip_cursor, &expected_cursor)); skip_cursor = aws_mqtt5_topic_skip_aws_iot_core_uncounted_prefix( aws_byte_cursor_from_c_str("$share/share-name/$aws/rules/some-rule/segment1/segment2")); expected_cursor = aws_byte_cursor_from_c_str("segment1/segment2"); ASSERT_TRUE(aws_byte_cursor_eq(&skip_cursor, &expected_cursor)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_topic_skip_rules_prefix, s_mqtt5_topic_skip_rules_prefix_fn) static int s_mqtt5_topic_get_segment_count_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; (void)allocator; ASSERT_INT_EQUALS(1, aws_mqtt5_topic_get_segment_count(aws_byte_cursor_from_c_str(""))); ASSERT_INT_EQUALS(1, aws_mqtt5_topic_get_segment_count(aws_byte_cursor_from_c_str("hello"))); ASSERT_INT_EQUALS(2, aws_mqtt5_topic_get_segment_count(aws_byte_cursor_from_c_str("hello/"))); ASSERT_INT_EQUALS(2, aws_mqtt5_topic_get_segment_count(aws_byte_cursor_from_c_str("hello/world"))); ASSERT_INT_EQUALS(3, aws_mqtt5_topic_get_segment_count(aws_byte_cursor_from_c_str("a/b/c"))); ASSERT_INT_EQUALS(3, aws_mqtt5_topic_get_segment_count(aws_byte_cursor_from_c_str("//"))); ASSERT_INT_EQUALS(4, aws_mqtt5_topic_get_segment_count(aws_byte_cursor_from_c_str("$SYS/bad/no/"))); ASSERT_INT_EQUALS(1, aws_mqtt5_topic_get_segment_count(aws_byte_cursor_from_c_str("$aws"))); ASSERT_INT_EQUALS(8, aws_mqtt5_topic_get_segment_count(aws_byte_cursor_from_c_str("//a//b/c//"))); ASSERT_INT_EQUALS( 2, aws_mqtt5_topic_get_segment_count(aws_mqtt5_topic_skip_aws_iot_core_uncounted_prefix( aws_byte_cursor_from_c_str("$aws/rules/some-rule/segment1/segment2")))); ASSERT_INT_EQUALS( 1, aws_mqtt5_topic_get_segment_count(aws_mqtt5_topic_skip_aws_iot_core_uncounted_prefix( aws_byte_cursor_from_c_str("$aws/rules/some-rule/segment1")))); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_topic_get_segment_count, s_mqtt5_topic_get_segment_count_fn) static int s_mqtt5_shared_subscription_validation_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; (void)allocator; ASSERT_FALSE(aws_mqtt_is_topic_filter_shared_subscription(aws_byte_cursor_from_c_str(""))); ASSERT_FALSE(aws_mqtt_is_topic_filter_shared_subscription(aws_byte_cursor_from_c_str("oof"))); ASSERT_FALSE(aws_mqtt_is_topic_filter_shared_subscription(aws_byte_cursor_from_c_str("$sha"))); ASSERT_FALSE(aws_mqtt_is_topic_filter_shared_subscription(aws_byte_cursor_from_c_str("$share"))); ASSERT_FALSE(aws_mqtt_is_topic_filter_shared_subscription(aws_byte_cursor_from_c_str("$share/"))); ASSERT_FALSE(aws_mqtt_is_topic_filter_shared_subscription(aws_byte_cursor_from_c_str("$share//"))); ASSERT_FALSE(aws_mqtt_is_topic_filter_shared_subscription(aws_byte_cursor_from_c_str("$share//test"))); ASSERT_FALSE(aws_mqtt_is_topic_filter_shared_subscription(aws_byte_cursor_from_c_str("$share/m+/"))); ASSERT_FALSE(aws_mqtt_is_topic_filter_shared_subscription(aws_byte_cursor_from_c_str("$share/m#/"))); ASSERT_FALSE(aws_mqtt_is_topic_filter_shared_subscription(aws_byte_cursor_from_c_str("$share/m"))); ASSERT_FALSE(aws_mqtt_is_topic_filter_shared_subscription(aws_byte_cursor_from_c_str("$share/m/"))); ASSERT_TRUE(aws_mqtt_is_topic_filter_shared_subscription(aws_byte_cursor_from_c_str("$share/m/#"))); ASSERT_TRUE(aws_mqtt_is_topic_filter_shared_subscription(aws_byte_cursor_from_c_str("$share/m/great"))); ASSERT_TRUE(aws_mqtt_is_topic_filter_shared_subscription(aws_byte_cursor_from_c_str("$share/m/test/+"))); ASSERT_TRUE(aws_mqtt_is_topic_filter_shared_subscription(aws_byte_cursor_from_c_str("$share/m/+/test"))); ASSERT_TRUE(aws_mqtt_is_topic_filter_shared_subscription(aws_byte_cursor_from_c_str("$share/m/test/#"))); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt5_shared_subscription_validation, s_mqtt5_shared_subscription_validation_fn) aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/tests/v5/mqtt_subscription_set_tests.c000066400000000000000000001137361456575232400277560ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "aws/mqtt/private/mqtt_subscription_set.h" #include #include #include struct subscription_test_context_callback_record { struct aws_allocator *allocator; struct aws_byte_cursor topic; struct aws_byte_buf topic_buffer; size_t callback_count; }; static struct subscription_test_context_callback_record *s_subscription_test_context_callback_record_new( struct aws_allocator *allocator, struct aws_byte_cursor topic) { struct subscription_test_context_callback_record *record = aws_mem_calloc(allocator, 1, sizeof(struct subscription_test_context_callback_record)); record->allocator = allocator; record->callback_count = 1; aws_byte_buf_init_copy_from_cursor(&record->topic_buffer, allocator, topic); record->topic = aws_byte_cursor_from_buf(&record->topic_buffer); return record; } static void s_subscription_test_context_callback_record_destroy( struct subscription_test_context_callback_record *record) { if (record == NULL) { return; } aws_byte_buf_clean_up(&record->topic_buffer); aws_mem_release(record->allocator, record); } static void s_destroy_callback_record(void *element) { struct subscription_test_context_callback_record *record = element; s_subscription_test_context_callback_record_destroy(record); } struct aws_mqtt_subscription_set_test_context { struct aws_allocator *allocator; struct aws_hash_table callbacks; }; static void s_aws_mqtt_subscription_set_test_context_init( struct aws_mqtt_subscription_set_test_context *context, struct aws_allocator *allocator) { context->allocator = allocator; aws_hash_table_init( &context->callbacks, allocator, 10, aws_hash_byte_cursor_ptr, aws_mqtt_byte_cursor_hash_equality, NULL, s_destroy_callback_record); } static void s_aws_mqtt_subscription_set_test_context_clean_up(struct aws_mqtt_subscription_set_test_context *context) { aws_hash_table_clean_up(&context->callbacks); } static void s_aws_mqtt_subscription_set_test_context_record_callback( struct aws_mqtt_subscription_set_test_context *context, struct aws_byte_cursor topic) { struct aws_hash_element *element = NULL; aws_hash_table_find(&context->callbacks, &topic, &element); if (element == NULL) { struct subscription_test_context_callback_record *record = s_subscription_test_context_callback_record_new(context->allocator, topic); aws_hash_table_put(&context->callbacks, &record->topic, record, NULL); } else { struct subscription_test_context_callback_record *record = element->value; ++record->callback_count; } } static int s_aws_mqtt_subscription_set_test_context_validate_callbacks( struct aws_mqtt_subscription_set_test_context *context, struct subscription_test_context_callback_record *expected_records, size_t expected_record_count) { ASSERT_INT_EQUALS(expected_record_count, aws_hash_table_get_entry_count(&context->callbacks)); for (size_t i = 0; i < expected_record_count; ++i) { struct subscription_test_context_callback_record *expected_record = expected_records + i; struct aws_hash_element *element = NULL; aws_hash_table_find(&context->callbacks, &expected_record->topic, &element); ASSERT_TRUE(element != NULL); ASSERT_TRUE(element->value != NULL); struct subscription_test_context_callback_record *actual_record = element->value; ASSERT_INT_EQUALS(expected_record->callback_count, actual_record->callback_count); } return AWS_OP_SUCCESS; } static void s_subscription_set_test_on_publish_received( struct aws_mqtt_client_connection *connection, const struct aws_byte_cursor *topic, const struct aws_byte_cursor *payload, bool dup, enum aws_mqtt_qos qos, bool retain, void *userdata) { (void)connection; (void)payload; (void)qos; (void)dup; (void)retain; struct aws_mqtt_subscription_set_test_context *context = userdata; s_aws_mqtt_subscription_set_test_context_record_callback(context, *topic); } enum subscription_set_operation_type { SSOT_ADD, SSOT_REMOVE, SSOT_PUBLISH, }; struct subscription_set_operation { enum subscription_set_operation_type type; const char *topic_filter; const char *topic; }; static void s_subscription_set_perform_operations( struct aws_mqtt_subscription_set_test_context *context, struct aws_mqtt_subscription_set *subscription_set, struct subscription_set_operation *operations, size_t operation_count) { for (size_t i = 0; i < operation_count; ++i) { struct subscription_set_operation *operation = operations + i; switch (operation->type) { case SSOT_ADD: { struct aws_mqtt_subscription_set_subscription_options subscription_options = { .topic_filter = aws_byte_cursor_from_c_str(operation->topic_filter), .callback_user_data = context, .on_publish_received = s_subscription_set_test_on_publish_received, }; aws_mqtt_subscription_set_add_subscription(subscription_set, &subscription_options); break; } case SSOT_REMOVE: aws_mqtt_subscription_set_remove_subscription( subscription_set, aws_byte_cursor_from_c_str(operation->topic_filter)); break; case SSOT_PUBLISH: { struct aws_mqtt_subscription_set_publish_received_options publish_options = { .topic = aws_byte_cursor_from_c_str(operation->topic), }; aws_mqtt_subscription_set_on_publish_received(subscription_set, &publish_options); break; } } } } static int s_mqtt_subscription_set_add_empty_not_subbed_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct aws_mqtt_subscription_set *subscription_set = aws_mqtt_subscription_set_new(allocator); ASSERT_FALSE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("/"))); ASSERT_FALSE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("a/b/c"))); ASSERT_FALSE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("#"))); ASSERT_FALSE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("abc"))); aws_mqtt_subscription_set_destroy(subscription_set); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt_subscription_set_add_empty_not_subbed, s_mqtt_subscription_set_add_empty_not_subbed_fn) static int s_mqtt_subscription_set_add_single_path_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct aws_mqtt_subscription_set_test_context context; s_aws_mqtt_subscription_set_test_context_init(&context, allocator); struct aws_mqtt_subscription_set *subscription_set = aws_mqtt_subscription_set_new(allocator); struct subscription_set_operation operations[] = { { .type = SSOT_ADD, .topic_filter = "a/b/c", }, }; s_subscription_set_perform_operations(&context, subscription_set, operations, AWS_ARRAY_SIZE(operations)); ASSERT_TRUE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("a/b/c"))); ASSERT_FALSE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("/"))); ASSERT_FALSE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("#"))); ASSERT_FALSE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("abc"))); ASSERT_FALSE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("a"))); ASSERT_FALSE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("a/b"))); ASSERT_FALSE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("a/b/c/d"))); ASSERT_FALSE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("a/b/c/"))); aws_mqtt_subscription_set_destroy(subscription_set); s_aws_mqtt_subscription_set_test_context_clean_up(&context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt_subscription_set_add_single_path, s_mqtt_subscription_set_add_single_path_fn) static int s_mqtt_subscription_set_add_overlapped_branching_paths_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct aws_mqtt_subscription_set_test_context context; s_aws_mqtt_subscription_set_test_context_init(&context, allocator); struct aws_mqtt_subscription_set *subscription_set = aws_mqtt_subscription_set_new(allocator); struct subscription_set_operation operations[] = { {.type = SSOT_ADD, .topic_filter = "a/+/c"}, {.type = SSOT_ADD, .topic_filter = "a/b/c"}, {.type = SSOT_ADD, .topic_filter = "+/b/c"}, {.type = SSOT_ADD, .topic_filter = "+/+/+"}, {.type = SSOT_ADD, .topic_filter = "/"}, {.type = SSOT_ADD, .topic_filter = " "}, {.type = SSOT_ADD, .topic_filter = "a"}, {.type = SSOT_ADD, .topic_filter = "#"}, {.type = SSOT_ADD, .topic_filter = "a/#"}, {.type = SSOT_ADD, .topic_filter = "a/b/c/d/e/f/g"}, {.type = SSOT_ADD, .topic_filter = "a/b/c/"}, }; s_subscription_set_perform_operations(&context, subscription_set, operations, AWS_ARRAY_SIZE(operations)); ASSERT_TRUE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("a/+/c"))); ASSERT_TRUE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("a/b/c"))); ASSERT_TRUE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("+/b/c"))); ASSERT_TRUE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("+/+/+"))); ASSERT_TRUE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("/"))); ASSERT_TRUE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str(" "))); ASSERT_TRUE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("a"))); ASSERT_TRUE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("#"))); ASSERT_TRUE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("a/#"))); ASSERT_TRUE( aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("a/b/c/d/e/f/g"))); ASSERT_TRUE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("a/b/c/"))); ASSERT_FALSE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("a/+"))); ASSERT_FALSE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("a/+/b"))); ASSERT_FALSE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("+/+/c"))); ASSERT_FALSE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("a/b"))); ASSERT_FALSE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("a/b/c/d"))); ASSERT_FALSE( aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("a/b/c/d/e/f"))); ASSERT_FALSE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("+/b/b"))); ASSERT_FALSE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("a/+/+"))); ASSERT_FALSE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str(" /"))); ASSERT_FALSE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("/ "))); ASSERT_FALSE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("b"))); ASSERT_FALSE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("a/"))); ASSERT_FALSE( aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("a/b/c/d/e/f/g/"))); aws_mqtt_subscription_set_destroy(subscription_set); s_aws_mqtt_subscription_set_test_context_clean_up(&context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE( mqtt_subscription_set_add_overlapped_branching_paths, s_mqtt_subscription_set_add_overlapped_branching_paths_fn) static int s_mqtt_subscription_set_remove_overlapping_path_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct aws_mqtt_subscription_set_test_context context; s_aws_mqtt_subscription_set_test_context_init(&context, allocator); struct aws_mqtt_subscription_set *subscription_set = aws_mqtt_subscription_set_new(allocator); struct subscription_set_operation operations[] = { {.type = SSOT_ADD, .topic_filter = "a/b/c/d"}, {.type = SSOT_ADD, .topic_filter = "a/b/c"}, {.type = SSOT_ADD, .topic_filter = "a/b"}, {.type = SSOT_ADD, .topic_filter = "a/b/c/d/e"}, }; s_subscription_set_perform_operations(&context, subscription_set, operations, AWS_ARRAY_SIZE(operations)); ASSERT_TRUE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("a/b/c/d"))); ASSERT_TRUE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("a/b/c"))); ASSERT_TRUE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("a/b"))); ASSERT_TRUE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("a/b/c/d/e"))); aws_mqtt_subscription_set_remove_subscription(subscription_set, aws_byte_cursor_from_c_str("a/b/c")); ASSERT_TRUE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("a/b/c/d"))); ASSERT_FALSE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("a/b/c"))); ASSERT_TRUE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("a/b"))); ASSERT_TRUE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("a/b/c/d/e"))); aws_mqtt_subscription_set_remove_subscription(subscription_set, aws_byte_cursor_from_c_str("a/b/c/d")); ASSERT_FALSE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("a/b/c/d"))); ASSERT_FALSE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("a/b/c"))); ASSERT_TRUE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("a/b"))); ASSERT_TRUE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("a/b/c/d/e"))); aws_mqtt_subscription_set_remove_subscription(subscription_set, aws_byte_cursor_from_c_str("a/b")); ASSERT_FALSE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("a/b/c/d"))); ASSERT_FALSE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("a/b/c"))); ASSERT_FALSE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("a/b"))); ASSERT_TRUE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("a/b/c/d/e"))); aws_mqtt_subscription_set_remove_subscription(subscription_set, aws_byte_cursor_from_c_str("a/b/c/d/e")); ASSERT_FALSE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("a/b/c/d"))); ASSERT_FALSE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("a/b/c"))); ASSERT_FALSE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("a/b"))); ASSERT_FALSE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("a/b/c/d/e"))); aws_mqtt_subscription_set_destroy(subscription_set); s_aws_mqtt_subscription_set_test_context_clean_up(&context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt_subscription_set_remove_overlapping_path, s_mqtt_subscription_set_remove_overlapping_path_fn) static int s_mqtt_subscription_set_remove_branching_path_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct aws_mqtt_subscription_set_test_context context; s_aws_mqtt_subscription_set_test_context_init(&context, allocator); struct aws_mqtt_subscription_set *subscription_set = aws_mqtt_subscription_set_new(allocator); struct subscription_set_operation operations[] = { {.type = SSOT_ADD, .topic_filter = "+/+/#"}, {.type = SSOT_ADD, .topic_filter = "#"}, {.type = SSOT_ADD, .topic_filter = "+/b/c"}, {.type = SSOT_ADD, .topic_filter = "+/+/c"}, }; s_subscription_set_perform_operations(&context, subscription_set, operations, AWS_ARRAY_SIZE(operations)); ASSERT_TRUE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("+/+/#"))); ASSERT_TRUE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("#"))); ASSERT_TRUE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("+/b/c"))); ASSERT_TRUE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("+/+/c"))); aws_mqtt_subscription_set_remove_subscription(subscription_set, aws_byte_cursor_from_c_str("+/b/c")); ASSERT_TRUE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("+/+/#"))); ASSERT_TRUE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("#"))); ASSERT_FALSE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("+/b/c"))); ASSERT_TRUE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("+/+/c"))); aws_mqtt_subscription_set_remove_subscription(subscription_set, aws_byte_cursor_from_c_str("#")); ASSERT_TRUE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("+/+/#"))); ASSERT_FALSE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("#"))); ASSERT_FALSE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("+/b/c"))); ASSERT_TRUE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("+/+/c"))); aws_mqtt_subscription_set_remove_subscription(subscription_set, aws_byte_cursor_from_c_str("+/+/#")); ASSERT_FALSE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("+/+/#"))); ASSERT_FALSE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("#"))); ASSERT_FALSE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("+/b/c"))); ASSERT_TRUE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("+/+/c"))); aws_mqtt_subscription_set_remove_subscription(subscription_set, aws_byte_cursor_from_c_str("+/+/c")); ASSERT_FALSE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("+/+/#"))); ASSERT_FALSE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("#"))); ASSERT_FALSE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("+/b/c"))); ASSERT_FALSE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("+/+/c"))); aws_mqtt_subscription_set_destroy(subscription_set); s_aws_mqtt_subscription_set_test_context_clean_up(&context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt_subscription_set_remove_branching_path, s_mqtt_subscription_set_remove_branching_path_fn) static int s_mqtt_subscription_set_remove_invalid_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct aws_mqtt_subscription_set_test_context context; s_aws_mqtt_subscription_set_test_context_init(&context, allocator); struct aws_mqtt_subscription_set *subscription_set = aws_mqtt_subscription_set_new(allocator); struct subscription_set_operation operations[] = { {.type = SSOT_ADD, .topic_filter = "a/b/c"}, }; s_subscription_set_perform_operations(&context, subscription_set, operations, AWS_ARRAY_SIZE(operations)); ASSERT_TRUE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("a/b/c"))); aws_mqtt_subscription_set_remove_subscription(subscription_set, aws_byte_cursor_from_c_str("+/b/c")); aws_mqtt_subscription_set_remove_subscription(subscription_set, aws_byte_cursor_from_c_str("a")); aws_mqtt_subscription_set_remove_subscription(subscription_set, aws_byte_cursor_from_c_str("a/b")); aws_mqtt_subscription_set_remove_subscription(subscription_set, aws_byte_cursor_from_c_str("#")); ASSERT_TRUE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("a/b/c"))); aws_mqtt_subscription_set_remove_subscription(subscription_set, aws_byte_cursor_from_c_str("a/b/c")); ASSERT_FALSE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("a/b/c"))); aws_mqtt_subscription_set_destroy(subscription_set); s_aws_mqtt_subscription_set_test_context_clean_up(&context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt_subscription_set_remove_invalid, s_mqtt_subscription_set_remove_invalid_fn) static int s_mqtt_subscription_set_remove_empty_segments_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct aws_mqtt_subscription_set_test_context context; s_aws_mqtt_subscription_set_test_context_init(&context, allocator); struct aws_mqtt_subscription_set *subscription_set = aws_mqtt_subscription_set_new(allocator); // Add '///' Subbed, Remove '///' NotSubbed struct subscription_set_operation operations[] = { {.type = SSOT_ADD, .topic_filter = "///"}, }; s_subscription_set_perform_operations(&context, subscription_set, operations, AWS_ARRAY_SIZE(operations)); ASSERT_TRUE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("///"))); ASSERT_FALSE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("////"))); ASSERT_FALSE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("//"))); aws_mqtt_subscription_set_remove_subscription(subscription_set, aws_byte_cursor_from_c_str("///")); ASSERT_FALSE(aws_mqtt_subscription_set_is_in_topic_tree(subscription_set, aws_byte_cursor_from_c_str("///"))); aws_mqtt_subscription_set_destroy(subscription_set); s_aws_mqtt_subscription_set_test_context_clean_up(&context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt_subscription_set_remove_empty_segments, s_mqtt_subscription_set_remove_empty_segments_fn) static int s_mqtt_subscription_set_add_remove_repeated_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct aws_mqtt_subscription_set_test_context context; s_aws_mqtt_subscription_set_test_context_init(&context, allocator); struct aws_mqtt_subscription_set *subscription_set = aws_mqtt_subscription_set_new(allocator); const char *topic_filters[] = {"+/+/#", "#", "+/+/c/d", "+/b/c", "+/+/c"}; size_t filter_count = AWS_ARRAY_SIZE(topic_filters); for (size_t i = 0; i < filter_count; ++i) { for (size_t j = 0; j < filter_count; ++j) { /* * this does not cover all permutations (n!) but lacking a permutation generator, this gets "reasonable" * coverage (n * n) */ /* Add the topic filters in a shifting sequence */ for (size_t add_index = 0; add_index < filter_count; ++add_index) { size_t final_index = (add_index + j) % filter_count; struct aws_mqtt_subscription_set_subscription_options subscription_options = { .topic_filter = aws_byte_cursor_from_c_str(topic_filters[final_index]), }; aws_mqtt_subscription_set_add_subscription(subscription_set, &subscription_options); } /* One-by-one, remove the topic filters in an independent shifting sequence */ for (size_t remove_index = 0; remove_index < filter_count; ++remove_index) { size_t final_remove_index = (remove_index + i) % filter_count; aws_mqtt_subscription_set_remove_subscription( subscription_set, aws_byte_cursor_from_c_str(topic_filters[final_remove_index])); for (size_t validate_index = 0; validate_index < filter_count; ++validate_index) { size_t final_validate_index = (validate_index + i) % filter_count; if (validate_index <= remove_index) { ASSERT_FALSE(aws_mqtt_subscription_set_is_in_topic_tree( subscription_set, aws_byte_cursor_from_c_str(topic_filters[final_validate_index]))); } else { ASSERT_TRUE(aws_mqtt_subscription_set_is_in_topic_tree( subscription_set, aws_byte_cursor_from_c_str(topic_filters[final_validate_index]))); } } } } } aws_mqtt_subscription_set_destroy(subscription_set); s_aws_mqtt_subscription_set_test_context_clean_up(&context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt_subscription_set_add_remove_repeated, s_mqtt_subscription_set_add_remove_repeated_fn) static int s_mqtt_subscription_set_publish_single_path_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct aws_mqtt_subscription_set_test_context context; s_aws_mqtt_subscription_set_test_context_init(&context, allocator); struct aws_mqtt_subscription_set *subscription_set = aws_mqtt_subscription_set_new(allocator); struct subscription_set_operation operations[] = { {.type = SSOT_ADD, .topic_filter = "a/b/c/d"}, {.type = SSOT_ADD, .topic_filter = "a"}, {.type = SSOT_ADD, .topic_filter = "a/b"}, {.type = SSOT_PUBLISH, .topic = "a/b/c"}, }; s_subscription_set_perform_operations(&context, subscription_set, operations, AWS_ARRAY_SIZE(operations)); struct subscription_test_context_callback_record expected_callback_records[] = { {.topic = aws_byte_cursor_from_c_str("a"), .callback_count = 1}, {.topic = aws_byte_cursor_from_c_str("a/b/c/d"), .callback_count = 1}, {.topic = aws_byte_cursor_from_c_str("a/b"), .callback_count = 1}, }; ASSERT_SUCCESS(s_aws_mqtt_subscription_set_test_context_validate_callbacks(&context, expected_callback_records, 0)); for (size_t i = 0; i < AWS_ARRAY_SIZE(expected_callback_records); ++i) { struct aws_mqtt_subscription_set_publish_received_options publish_options = { .topic = expected_callback_records[i].topic, }; aws_mqtt_subscription_set_on_publish_received(subscription_set, &publish_options); ASSERT_SUCCESS( s_aws_mqtt_subscription_set_test_context_validate_callbacks(&context, expected_callback_records, i + 1)); } aws_mqtt_subscription_set_destroy(subscription_set); s_aws_mqtt_subscription_set_test_context_clean_up(&context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt_subscription_set_publish_single_path, s_mqtt_subscription_set_publish_single_path_fn) static int s_mqtt_subscription_set_publish_multi_path_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct aws_mqtt_subscription_set_test_context context; s_aws_mqtt_subscription_set_test_context_init(&context, allocator); struct aws_mqtt_subscription_set *subscription_set = aws_mqtt_subscription_set_new(allocator); struct subscription_set_operation operations[] = { {.type = SSOT_ADD, .topic_filter = "a/b/c/d"}, {.type = SSOT_ADD, .topic_filter = "a/b/d"}, {.type = SSOT_ADD, .topic_filter = "b"}, {.type = SSOT_ADD, .topic_filter = "c/d"}, {.type = SSOT_ADD, .topic_filter = "a/c"}, {.type = SSOT_PUBLISH, .topic = "a"}, {.type = SSOT_PUBLISH, .topic = "a/b"}, {.type = SSOT_PUBLISH, .topic = "a/b/c"}, {.type = SSOT_PUBLISH, .topic = "a/b/c/d/"}, {.type = SSOT_PUBLISH, .topic = "b/c/d/"}, {.type = SSOT_PUBLISH, .topic = "c"}, }; s_subscription_set_perform_operations(&context, subscription_set, operations, AWS_ARRAY_SIZE(operations)); struct subscription_test_context_callback_record expected_callback_records[] = { {.topic = aws_byte_cursor_from_c_str("a/b/d"), .callback_count = 1}, {.topic = aws_byte_cursor_from_c_str("c/d"), .callback_count = 1}, {.topic = aws_byte_cursor_from_c_str("a/b/c/d"), .callback_count = 1}, {.topic = aws_byte_cursor_from_c_str("b"), .callback_count = 1}, {.topic = aws_byte_cursor_from_c_str("a/c"), .callback_count = 1}, }; ASSERT_SUCCESS(s_aws_mqtt_subscription_set_test_context_validate_callbacks(&context, NULL, 0)); for (size_t i = 0; i < AWS_ARRAY_SIZE(expected_callback_records); ++i) { struct aws_mqtt_subscription_set_publish_received_options publish_options = { .topic = expected_callback_records[i].topic, }; aws_mqtt_subscription_set_on_publish_received(subscription_set, &publish_options); ASSERT_SUCCESS( s_aws_mqtt_subscription_set_test_context_validate_callbacks(&context, expected_callback_records, i + 1)); } aws_mqtt_subscription_set_destroy(subscription_set); s_aws_mqtt_subscription_set_test_context_clean_up(&context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt_subscription_set_publish_multi_path, s_mqtt_subscription_set_publish_multi_path_fn) static int s_mqtt_subscription_set_publish_single_level_wildcards_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct aws_mqtt_subscription_set_test_context context; s_aws_mqtt_subscription_set_test_context_init(&context, allocator); struct aws_mqtt_subscription_set *subscription_set = aws_mqtt_subscription_set_new(allocator); struct subscription_set_operation operations[] = { {.type = SSOT_ADD, .topic_filter = "a/b/c"}, {.type = SSOT_ADD, .topic_filter = "+/b/c"}, {.type = SSOT_ADD, .topic_filter = "a/+/c"}, {.type = SSOT_ADD, .topic_filter = "a/b/+"}, {.type = SSOT_ADD, .topic_filter = "+/+/+"}, {.type = SSOT_ADD, .topic_filter = "a/+/+"}, {.type = SSOT_PUBLISH, .topic = "a"}, {.type = SSOT_PUBLISH, .topic = "a/b"}, {.type = SSOT_PUBLISH, .topic = "a/b/c/d"}, {.type = SSOT_PUBLISH, .topic = "a/b/c/d/"}, }; s_subscription_set_perform_operations(&context, subscription_set, operations, AWS_ARRAY_SIZE(operations)); struct subscription_test_context_callback_record expected_callback_records[] = { {.topic = aws_byte_cursor_from_c_str("a/b/d"), .callback_count = 3}, {.topic = aws_byte_cursor_from_c_str("b/c/d"), .callback_count = 1}, {.topic = aws_byte_cursor_from_c_str("a/c/d"), .callback_count = 2}, {.topic = aws_byte_cursor_from_c_str("c/b/c"), .callback_count = 2}, {.topic = aws_byte_cursor_from_c_str("a/b/c"), .callback_count = 6}, }; ASSERT_SUCCESS(s_aws_mqtt_subscription_set_test_context_validate_callbacks(&context, NULL, 0)); for (size_t i = 0; i < AWS_ARRAY_SIZE(expected_callback_records); ++i) { struct aws_mqtt_subscription_set_publish_received_options publish_options = { .topic = expected_callback_records[i].topic, }; aws_mqtt_subscription_set_on_publish_received(subscription_set, &publish_options); ASSERT_SUCCESS( s_aws_mqtt_subscription_set_test_context_validate_callbacks(&context, expected_callback_records, i + 1)); } aws_mqtt_subscription_set_destroy(subscription_set); s_aws_mqtt_subscription_set_test_context_clean_up(&context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE( mqtt_subscription_set_publish_single_level_wildcards, s_mqtt_subscription_set_publish_single_level_wildcards_fn) static int s_mqtt_subscription_set_publish_multi_level_wildcards_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct aws_mqtt_subscription_set_test_context context; s_aws_mqtt_subscription_set_test_context_init(&context, allocator); struct aws_mqtt_subscription_set *subscription_set = aws_mqtt_subscription_set_new(allocator); struct subscription_set_operation operations[] = { {.type = SSOT_ADD, .topic_filter = "#"}, {.type = SSOT_ADD, .topic_filter = "a/#"}, {.type = SSOT_ADD, .topic_filter = "a/b/c/#"}, {.type = SSOT_ADD, .topic_filter = "/#"}, }; s_subscription_set_perform_operations(&context, subscription_set, operations, AWS_ARRAY_SIZE(operations)); struct subscription_test_context_callback_record expected_callback_records[] = { {.topic = aws_byte_cursor_from_c_str("b"), .callback_count = 1}, {.topic = aws_byte_cursor_from_c_str("/"), .callback_count = 2}, {.topic = aws_byte_cursor_from_c_str("a"), .callback_count = 2}, {.topic = aws_byte_cursor_from_c_str("a/b"), .callback_count = 2}, {.topic = aws_byte_cursor_from_c_str("a/b/c"), .callback_count = 3}, {.topic = aws_byte_cursor_from_c_str("a/b/c/d"), .callback_count = 3}, {.topic = aws_byte_cursor_from_c_str("/x/y/z"), .callback_count = 2}, }; ASSERT_SUCCESS(s_aws_mqtt_subscription_set_test_context_validate_callbacks(&context, NULL, 0)); for (size_t i = 0; i < AWS_ARRAY_SIZE(expected_callback_records); ++i) { struct aws_mqtt_subscription_set_publish_received_options publish_options = { .topic = expected_callback_records[i].topic, }; aws_mqtt_subscription_set_on_publish_received(subscription_set, &publish_options); ASSERT_SUCCESS( s_aws_mqtt_subscription_set_test_context_validate_callbacks(&context, expected_callback_records, i + 1)); } aws_mqtt_subscription_set_destroy(subscription_set); s_aws_mqtt_subscription_set_test_context_clean_up(&context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE( mqtt_subscription_set_publish_multi_level_wildcards, s_mqtt_subscription_set_publish_multi_level_wildcards_fn) static int s_mqtt_subscription_set_get_subscriptions_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_mqtt_library_init(allocator); struct aws_mqtt_subscription_set_test_context context; s_aws_mqtt_subscription_set_test_context_init(&context, allocator); struct aws_mqtt_subscription_set *subscription_set = aws_mqtt_subscription_set_new(allocator); struct subscription_set_operation operations[] = { {.type = SSOT_ADD, .topic_filter = "#"}, {.type = SSOT_ADD, .topic_filter = "a/#"}, {.type = SSOT_ADD, .topic_filter = "a/b/c/#"}, {.type = SSOT_ADD, .topic_filter = "/#"}, {.type = SSOT_ADD, .topic_filter = "/"}, {.type = SSOT_ADD, .topic_filter = "a/b/c"}, {.type = SSOT_ADD, .topic_filter = "a/#"}, {.type = SSOT_REMOVE, .topic_filter = "/#"}, {.type = SSOT_REMOVE, .topic_filter = "/"}, }; s_subscription_set_perform_operations(&context, subscription_set, operations, AWS_ARRAY_SIZE(operations)); ASSERT_TRUE(aws_mqtt_subscription_set_is_subscribed(subscription_set, aws_byte_cursor_from_c_str("#"))); ASSERT_TRUE(aws_mqtt_subscription_set_is_subscribed(subscription_set, aws_byte_cursor_from_c_str("a/#"))); ASSERT_TRUE(aws_mqtt_subscription_set_is_subscribed(subscription_set, aws_byte_cursor_from_c_str("a/b/c/#"))); ASSERT_TRUE(aws_mqtt_subscription_set_is_subscribed(subscription_set, aws_byte_cursor_from_c_str("a/b/c"))); ASSERT_FALSE(aws_mqtt_subscription_set_is_subscribed(subscription_set, aws_byte_cursor_from_c_str("/#"))); ASSERT_FALSE(aws_mqtt_subscription_set_is_subscribed(subscription_set, aws_byte_cursor_from_c_str("/"))); ASSERT_FALSE(aws_mqtt_subscription_set_is_subscribed(subscription_set, aws_byte_cursor_from_c_str("a"))); struct aws_array_list subscriptions; aws_mqtt_subscription_set_get_subscriptions(subscription_set, &subscriptions); size_t subscription_count = aws_array_list_length(&subscriptions); ASSERT_INT_EQUALS(4, subscription_count); for (size_t i = 0; i < subscription_count; ++i) { struct aws_mqtt_subscription_set_subscription_options subscription; aws_array_list_get_at(&subscriptions, &subscription, i); ASSERT_TRUE(aws_mqtt_subscription_set_is_subscribed(subscription_set, subscription.topic_filter)); } aws_array_list_clean_up(&subscriptions); aws_mqtt_subscription_set_destroy(subscription_set); s_aws_mqtt_subscription_set_test_context_clean_up(&context); aws_mqtt_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(mqtt_subscription_set_get_subscriptions, s_mqtt_subscription_set_get_subscriptions_fn)aws-crt-python-0.20.4+dfsg/crt/aws-c-mqtt/tests/v5/rate_limiter_tests.c000066400000000000000000000364241456575232400257700ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include static uint64_t s_test_time = 0; static int s_get_test_time(uint64_t *time) { *time = s_test_time; return AWS_OP_SUCCESS; } static int s_rate_limiter_token_bucket_init_invalid_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; (void)allocator; struct aws_rate_limiter_token_bucket token_bucket; struct aws_rate_limiter_token_bucket_options invalid_options1 = { .clock_fn = s_get_test_time, .tokens_per_second = 0, .maximum_token_count = 1, }; ASSERT_FAILS(aws_rate_limiter_token_bucket_init(&token_bucket, &invalid_options1)); struct aws_rate_limiter_token_bucket_options invalid_options2 = { .clock_fn = s_get_test_time, .tokens_per_second = 1000, .maximum_token_count = 0, }; ASSERT_FAILS(aws_rate_limiter_token_bucket_init(&token_bucket, &invalid_options2)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(rate_limiter_token_bucket_init_invalid, s_rate_limiter_token_bucket_init_invalid_fn) static int s_rate_limiter_token_bucket_regeneration_integral_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; (void)allocator; struct aws_rate_limiter_token_bucket_options options = { .clock_fn = s_get_test_time, .tokens_per_second = 5, .maximum_token_count = 10, }; struct aws_rate_limiter_token_bucket token_bucket; ASSERT_SUCCESS(aws_rate_limiter_token_bucket_init(&token_bucket, &options)); ASSERT_TRUE(aws_rate_limiter_token_bucket_can_take_tokens(&token_bucket, 0)); ASSERT_FALSE(aws_rate_limiter_token_bucket_can_take_tokens(&token_bucket, 1)); ASSERT_SUCCESS(aws_rate_limiter_token_bucket_take_tokens(&token_bucket, 0)); ASSERT_FAILS(aws_rate_limiter_token_bucket_take_tokens(&token_bucket, 1)); ASSERT_INT_EQUALS(0, aws_rate_limiter_token_bucket_compute_wait_for_tokens(&token_bucket, 0)); ASSERT_INT_EQUALS( 2 * AWS_TIMESTAMP_NANOS, aws_rate_limiter_token_bucket_compute_wait_for_tokens(&token_bucket, 10)); /* one second elapsed, should be able to take 5 tokens now */ s_test_time = AWS_TIMESTAMP_NANOS; ASSERT_TRUE(aws_rate_limiter_token_bucket_can_take_tokens(&token_bucket, 5)); ASSERT_FALSE(aws_rate_limiter_token_bucket_can_take_tokens(&token_bucket, 6)); ASSERT_INT_EQUALS(AWS_TIMESTAMP_NANOS, aws_rate_limiter_token_bucket_compute_wait_for_tokens(&token_bucket, 10)); ASSERT_INT_EQUALS(0, aws_rate_limiter_token_bucket_compute_wait_for_tokens(&token_bucket, 1)); ASSERT_INT_EQUALS(0, aws_rate_limiter_token_bucket_compute_wait_for_tokens(&token_bucket, 5)); ASSERT_SUCCESS(aws_rate_limiter_token_bucket_take_tokens(&token_bucket, 5)); ASSERT_FALSE(aws_rate_limiter_token_bucket_can_take_tokens(&token_bucket, 1)); ASSERT_FAILS(aws_rate_limiter_token_bucket_take_tokens(&token_bucket, 1)); ASSERT_INT_EQUALS( 2 * AWS_TIMESTAMP_NANOS, aws_rate_limiter_token_bucket_compute_wait_for_tokens(&token_bucket, 10)); /* three more elapsed seconds, regen should be maxed but clamped */ s_test_time += 3 * (uint64_t)AWS_TIMESTAMP_NANOS; ASSERT_TRUE(aws_rate_limiter_token_bucket_can_take_tokens(&token_bucket, 10)); ASSERT_FALSE(aws_rate_limiter_token_bucket_can_take_tokens(&token_bucket, 11)); ASSERT_INT_EQUALS(0, aws_rate_limiter_token_bucket_compute_wait_for_tokens(&token_bucket, 10)); ASSERT_SUCCESS(aws_rate_limiter_token_bucket_take_tokens(&token_bucket, 10)); ASSERT_FALSE(aws_rate_limiter_token_bucket_can_take_tokens(&token_bucket, 1)); ASSERT_FAILS(aws_rate_limiter_token_bucket_take_tokens(&token_bucket, 1)); ASSERT_INT_EQUALS( 2 * (uint64_t)AWS_TIMESTAMP_NANOS, aws_rate_limiter_token_bucket_compute_wait_for_tokens(&token_bucket, 10)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(rate_limiter_token_bucket_regeneration_integral, s_rate_limiter_token_bucket_regeneration_integral_fn) static int s_rate_limiter_token_bucket_regeneration_fractional_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; (void)allocator; struct aws_rate_limiter_token_bucket_options options = { .clock_fn = s_get_test_time, .initial_token_count = 2, .tokens_per_second = 3, .maximum_token_count = 20, }; struct aws_rate_limiter_token_bucket token_bucket; ASSERT_SUCCESS(aws_rate_limiter_token_bucket_init(&token_bucket, &options)); uint64_t initial_wait_for_3 = AWS_TIMESTAMP_NANOS / 3 + 1; uint64_t initial_wait_for_5 = AWS_TIMESTAMP_NANOS; uint64_t initial_wait_for_6 = AWS_TIMESTAMP_NANOS + AWS_TIMESTAMP_NANOS / 3 + 1; uint64_t initial_wait_for_7 = AWS_TIMESTAMP_NANOS + AWS_TIMESTAMP_NANOS / 3 * 2 + 1; uint64_t initial_wait_for_8 = 2 * AWS_TIMESTAMP_NANOS; uint64_t initial_wait_for_11 = 3 * (uint64_t)AWS_TIMESTAMP_NANOS; ASSERT_TRUE(aws_rate_limiter_token_bucket_can_take_tokens(&token_bucket, 2)); ASSERT_FALSE(aws_rate_limiter_token_bucket_can_take_tokens(&token_bucket, 3)); ASSERT_INT_EQUALS(initial_wait_for_3, aws_rate_limiter_token_bucket_compute_wait_for_tokens(&token_bucket, 3)); ASSERT_INT_EQUALS(initial_wait_for_5, aws_rate_limiter_token_bucket_compute_wait_for_tokens(&token_bucket, 5)); ASSERT_INT_EQUALS(initial_wait_for_6, aws_rate_limiter_token_bucket_compute_wait_for_tokens(&token_bucket, 6)); ASSERT_INT_EQUALS(initial_wait_for_7, aws_rate_limiter_token_bucket_compute_wait_for_tokens(&token_bucket, 7)); ASSERT_INT_EQUALS(initial_wait_for_8, aws_rate_limiter_token_bucket_compute_wait_for_tokens(&token_bucket, 8)); /* one nanosecond elapsed, wait should shrink by 1 */ s_test_time = 1; ASSERT_INT_EQUALS( initial_wait_for_3 - s_test_time, aws_rate_limiter_token_bucket_compute_wait_for_tokens(&token_bucket, 3)); /* idempotent check */ ASSERT_INT_EQUALS( initial_wait_for_3 - s_test_time, aws_rate_limiter_token_bucket_compute_wait_for_tokens(&token_bucket, 3)); ASSERT_INT_EQUALS( initial_wait_for_5 - s_test_time, aws_rate_limiter_token_bucket_compute_wait_for_tokens(&token_bucket, 5)); ASSERT_INT_EQUALS( initial_wait_for_6 - s_test_time, aws_rate_limiter_token_bucket_compute_wait_for_tokens(&token_bucket, 6)); ASSERT_INT_EQUALS( initial_wait_for_7 - s_test_time, aws_rate_limiter_token_bucket_compute_wait_for_tokens(&token_bucket, 7)); ASSERT_INT_EQUALS( initial_wait_for_8 - s_test_time, aws_rate_limiter_token_bucket_compute_wait_for_tokens(&token_bucket, 8)); s_test_time = 2; ASSERT_INT_EQUALS( initial_wait_for_3 - s_test_time, aws_rate_limiter_token_bucket_compute_wait_for_tokens(&token_bucket, 3)); ASSERT_INT_EQUALS( initial_wait_for_5 - s_test_time, aws_rate_limiter_token_bucket_compute_wait_for_tokens(&token_bucket, 5)); ASSERT_INT_EQUALS( initial_wait_for_6 - s_test_time, aws_rate_limiter_token_bucket_compute_wait_for_tokens(&token_bucket, 6)); ASSERT_INT_EQUALS( initial_wait_for_7 - s_test_time, aws_rate_limiter_token_bucket_compute_wait_for_tokens(&token_bucket, 7)); ASSERT_INT_EQUALS( initial_wait_for_8 - s_test_time, aws_rate_limiter_token_bucket_compute_wait_for_tokens(&token_bucket, 8)); /* one nanosecond short of a token's worth of time, nothing should change */ s_test_time = AWS_TIMESTAMP_NANOS / 3; ASSERT_TRUE(aws_rate_limiter_token_bucket_can_take_tokens(&token_bucket, 2)); ASSERT_FALSE(aws_rate_limiter_token_bucket_can_take_tokens(&token_bucket, 3)); ASSERT_INT_EQUALS( initial_wait_for_3 - s_test_time, aws_rate_limiter_token_bucket_compute_wait_for_tokens(&token_bucket, 3)); ASSERT_INT_EQUALS( initial_wait_for_5 - s_test_time, aws_rate_limiter_token_bucket_compute_wait_for_tokens(&token_bucket, 5)); ASSERT_INT_EQUALS( initial_wait_for_6 - s_test_time, aws_rate_limiter_token_bucket_compute_wait_for_tokens(&token_bucket, 6)); ASSERT_INT_EQUALS( initial_wait_for_7 - s_test_time, aws_rate_limiter_token_bucket_compute_wait_for_tokens(&token_bucket, 7)); ASSERT_INT_EQUALS( initial_wait_for_8 - s_test_time, aws_rate_limiter_token_bucket_compute_wait_for_tokens(&token_bucket, 8)); /* one more nanosecond, should give us a token */ s_test_time += 1; ASSERT_TRUE(aws_rate_limiter_token_bucket_can_take_tokens(&token_bucket, 3)); ASSERT_FALSE(aws_rate_limiter_token_bucket_can_take_tokens(&token_bucket, 4)); ASSERT_INT_EQUALS( initial_wait_for_3 - s_test_time, aws_rate_limiter_token_bucket_compute_wait_for_tokens(&token_bucket, 3)); ASSERT_INT_EQUALS( initial_wait_for_5 - s_test_time, aws_rate_limiter_token_bucket_compute_wait_for_tokens(&token_bucket, 5)); ASSERT_INT_EQUALS( initial_wait_for_6 - s_test_time, aws_rate_limiter_token_bucket_compute_wait_for_tokens(&token_bucket, 6)); ASSERT_INT_EQUALS( initial_wait_for_7 - s_test_time, aws_rate_limiter_token_bucket_compute_wait_for_tokens(&token_bucket, 7)); ASSERT_INT_EQUALS( initial_wait_for_8 - s_test_time, aws_rate_limiter_token_bucket_compute_wait_for_tokens(&token_bucket, 8)); /* now let's do multi-second plus fractional */ s_test_time += (uint64_t)AWS_TIMESTAMP_NANOS * 2 + AWS_TIMESTAMP_NANOS / 2; ASSERT_TRUE(aws_rate_limiter_token_bucket_can_take_tokens(&token_bucket, 10)); ASSERT_FALSE(aws_rate_limiter_token_bucket_can_take_tokens(&token_bucket, 11)); ASSERT_INT_EQUALS(0, aws_rate_limiter_token_bucket_compute_wait_for_tokens(&token_bucket, 10)); ASSERT_INT_EQUALS( initial_wait_for_11 - s_test_time, aws_rate_limiter_token_bucket_compute_wait_for_tokens(&token_bucket, 11)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(rate_limiter_token_bucket_regeneration_fractional, s_rate_limiter_token_bucket_regeneration_fractional_fn) #define REGENERATION_INTERVAL 9973 static int s_rate_limiter_token_bucket_fractional_iteration_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; (void)allocator; struct aws_rate_limiter_token_bucket_options options = { .clock_fn = s_get_test_time, .initial_token_count = 0, .tokens_per_second = 7, .maximum_token_count = 100, }; s_test_time = 47; struct aws_rate_limiter_token_bucket token_bucket; ASSERT_SUCCESS(aws_rate_limiter_token_bucket_init(&token_bucket, &options)); size_t iterations = 2 * AWS_TIMESTAMP_NANOS / REGENERATION_INTERVAL + 3; uint64_t expected_wait_time = (uint64_t)3 * AWS_TIMESTAMP_NANOS; for (size_t i = 0; i < iterations; ++i) { ASSERT_INT_EQUALS(expected_wait_time, aws_rate_limiter_token_bucket_compute_wait_for_tokens(&token_bucket, 21)); s_test_time += REGENERATION_INTERVAL; expected_wait_time -= REGENERATION_INTERVAL; } return AWS_OP_SUCCESS; } AWS_TEST_CASE(rate_limiter_token_bucket_fractional_iteration, s_rate_limiter_token_bucket_fractional_iteration_fn) #define LARGE_REGENERATION_INTERVAL (43 * (uint64_t)AWS_TIMESTAMP_NANOS + AWS_TIMESTAMP_NANOS / 13) static int s_rate_limiter_token_bucket_large_fractional_iteration_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; (void)allocator; struct aws_rate_limiter_token_bucket_options options = { .clock_fn = s_get_test_time, .initial_token_count = 0, .tokens_per_second = 7, .maximum_token_count = 100000, }; s_test_time = 47; struct aws_rate_limiter_token_bucket token_bucket; ASSERT_SUCCESS(aws_rate_limiter_token_bucket_init(&token_bucket, &options)); uint64_t expected_wait_time = aws_timestamp_convert(1001, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL); while (expected_wait_time >= LARGE_REGENERATION_INTERVAL) { ASSERT_INT_EQUALS( expected_wait_time, aws_rate_limiter_token_bucket_compute_wait_for_tokens(&token_bucket, 7007)); s_test_time += LARGE_REGENERATION_INTERVAL; expected_wait_time -= LARGE_REGENERATION_INTERVAL; } return AWS_OP_SUCCESS; } AWS_TEST_CASE( rate_limiter_token_bucket_large_fractional_iteration, s_rate_limiter_token_bucket_large_fractional_iteration_fn) #define TOKEN_REGENERATION_RATE_REAL 111111 static int s_rate_limiter_token_bucket_real_iteration_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; (void)allocator; struct aws_rate_limiter_token_bucket_options options = { .initial_token_count = 0, .tokens_per_second = TOKEN_REGENERATION_RATE_REAL, .maximum_token_count = 100, }; struct aws_rate_limiter_token_bucket token_bucket; ASSERT_SUCCESS(aws_rate_limiter_token_bucket_init(&token_bucket, &options)); uint64_t start_time = 0; aws_high_res_clock_get_ticks(&start_time); uint64_t tokens_taken = 0; while (tokens_taken < TOKEN_REGENERATION_RATE_REAL * 3) { uint64_t wait = aws_rate_limiter_token_bucket_compute_wait_for_tokens(&token_bucket, 1); if (wait > 0) { aws_thread_current_sleep(wait); } if (!aws_rate_limiter_token_bucket_take_tokens(&token_bucket, 1)) { ++tokens_taken; } } uint64_t end_time = 0; aws_high_res_clock_get_ticks(&end_time); uint64_t elapsed_time = end_time - start_time; uint64_t expected_elapsed = aws_timestamp_convert(3, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL); ASSERT_TRUE(elapsed_time > (uint64_t)(expected_elapsed * .99)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(rate_limiter_token_bucket_real_iteration, s_rate_limiter_token_bucket_real_iteration_fn) static int s_rate_limiter_token_bucket_reset_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; (void)allocator; struct aws_rate_limiter_token_bucket_options options = { .clock_fn = s_get_test_time, .initial_token_count = 2, .tokens_per_second = 3, .maximum_token_count = 20, }; struct aws_rate_limiter_token_bucket token_bucket; ASSERT_SUCCESS(aws_rate_limiter_token_bucket_init(&token_bucket, &options)); uint64_t initial_wait_for_3 = AWS_TIMESTAMP_NANOS / 3 + 1; uint64_t initial_wait_for_5 = AWS_TIMESTAMP_NANOS; ASSERT_TRUE(aws_rate_limiter_token_bucket_can_take_tokens(&token_bucket, 2)); ASSERT_FALSE(aws_rate_limiter_token_bucket_can_take_tokens(&token_bucket, 3)); ASSERT_INT_EQUALS(initial_wait_for_3, aws_rate_limiter_token_bucket_compute_wait_for_tokens(&token_bucket, 3)); ASSERT_INT_EQUALS(initial_wait_for_5, aws_rate_limiter_token_bucket_compute_wait_for_tokens(&token_bucket, 5)); s_test_time = AWS_TIMESTAMP_NANOS * 2 + 1; ASSERT_TRUE(aws_rate_limiter_token_bucket_can_take_tokens(&token_bucket, 8)); ASSERT_FALSE(aws_rate_limiter_token_bucket_can_take_tokens(&token_bucket, 9)); aws_rate_limiter_token_bucket_reset(&token_bucket); ASSERT_TRUE(aws_rate_limiter_token_bucket_can_take_tokens(&token_bucket, 2)); ASSERT_FALSE(aws_rate_limiter_token_bucket_can_take_tokens(&token_bucket, 3)); ASSERT_INT_EQUALS(initial_wait_for_3, aws_rate_limiter_token_bucket_compute_wait_for_tokens(&token_bucket, 3)); ASSERT_INT_EQUALS(initial_wait_for_5, aws_rate_limiter_token_bucket_compute_wait_for_tokens(&token_bucket, 5)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(rate_limiter_token_bucket_reset, s_rate_limiter_token_bucket_reset_fn) aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/000077500000000000000000000000001456575232400175555ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/.builder/000077500000000000000000000000001456575232400212615ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/.builder/actions/000077500000000000000000000000001456575232400227215ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/.builder/actions/mock_server_setup.py000066400000000000000000000031351456575232400270340ustar00rootroot00000000000000""" Setup local mock server for tests """ import Builder import os import sys import subprocess import atexit class MockServerSetup(Builder.Action): """ Set up this machine for running the mock server test This action should be run in the 'pre_build_steps' or 'build_steps' stage. """ def run(self, env): if not env.project.needs_tests(env): print("Skipping mock server setup because tests disabled for project") return self.env = env python_path = sys.executable # install dependency for mock server self.env.shell.exec(python_path, '-m', 'pip', 'install', 'h11', 'trio', 'proxy.py', check=True) # check the deps can be import correctly self.env.shell.exec(python_path, '-c', 'import h11, trio', check=True) # set cmake flag so mock server tests are enabled env.project.config['cmake_args'].extend( ['-DENABLE_MOCK_SERVER_TESTS=ON', '-DASSERT_LOCK_HELD=ON']) base_dir = os.path.dirname(os.path.realpath(__file__)) dir = os.path.join(base_dir, "..", "..", "tests", "mock_s3_server") p1 = subprocess.Popen([python_path, "mock_s3_server.py"], cwd=dir) try: p2 = subprocess.Popen("proxy", cwd=dir) except Exception as e: # Okay for proxy to fail starting up as it may not be in the path print(e) p2 = None @atexit.register def close_mock_server(): p1.terminate() if p2 != None: p2.terminate() aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/.clang-format000066400000000000000000000031611456575232400221310ustar00rootroot00000000000000--- Language: Cpp # BasedOnStyle: Mozilla AlignAfterOpenBracket: AlwaysBreak AlignConsecutiveAssignments: false AlignConsecutiveDeclarations: false AlignEscapedNewlines: Right AlignOperands: true AlignTrailingComments: true AllowAllParametersOfDeclarationOnNextLine: false AllowShortBlocksOnASingleLine: false AllowShortCaseLabelsOnASingleLine: false AllowShortFunctionsOnASingleLine: Inline AllowShortIfStatementsOnASingleLine: false AllowShortLoopsOnASingleLine: false AlwaysBreakAfterReturnType: None AlwaysBreakBeforeMultilineStrings: false BinPackArguments: false BinPackParameters: false BreakBeforeBinaryOperators: None BreakBeforeBraces: Attach BreakBeforeTernaryOperators: true BreakStringLiterals: true ColumnLimit: 120 ContinuationIndentWidth: 4 DerivePointerAlignment: false IncludeBlocks: Preserve IndentCaseLabels: true IndentPPDirectives: AfterHash IndentWidth: 4 IndentWrappedFunctionNames: true KeepEmptyLinesAtTheStartOfBlocks: true MacroBlockBegin: '' MacroBlockEnd: '' MaxEmptyLinesToKeep: 1 PenaltyBreakAssignment: 2 PenaltyBreakBeforeFirstCallParameter: 19 PenaltyBreakComment: 300 PenaltyBreakFirstLessLess: 120 PenaltyBreakString: 1000 PenaltyExcessCharacter: 1000000 PenaltyReturnTypeOnItsOwnLine: 100000 PointerAlignment: Right ReflowComments: true SortIncludes: true SpaceAfterCStyleCast: false SpaceBeforeAssignmentOperators: true SpaceBeforeParens: ControlStatements SpaceInEmptyParentheses: false SpacesInContainerLiterals: true SpacesInCStyleCastParentheses: false SpacesInParentheses: false SpacesInSquareBrackets: false Standard: Cpp11 TabWidth: 4 UseTab: Never ... aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/.clang-tidy000066400000000000000000000013651456575232400216160ustar00rootroot00000000000000--- Checks: 'clang-diagnostic-*,clang-analyzer-*,readability-*,modernize-*,bugprone-*,misc-*,google-runtime-int,llvm-header-guard,fuchsia-restrict-system-includes,-clang-analyzer-valist.Uninitialized,-clang-analyzer-security.insecureAPI.rand,-clang-analyzer-alpha.*,-readability-magic-numbers,-readability-non-const-parameter,-readability-isolate-declaration' WarningsAsErrors: '*' HeaderFilterRegex: '.*(? packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ matrix.image }} build -p ${{ env.PACKAGE_NAME }} linux-compiler-compat: runs-on: ubuntu-22.04 # latest strategy: matrix: compiler: - clang-3 - clang-6 - clang-8 - clang-9 - clang-10 - clang-11 - gcc-4.8 - gcc-5 - gcc-6 - gcc-7 - gcc-8 steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ env.LINUX_BASE_IMAGE }} build -p ${{ env.PACKAGE_NAME }} --compiler=${{ matrix.compiler }} --cmake-extra=-DASSERT_LOCK_HELD=ON --cmake-extra=-DAWS_ENABLE_S3_ENDPOINT_RESOLVER=ON clang-sanitizers: runs-on: ubuntu-22.04 # latest strategy: matrix: sanitizers: [",thread", ",address,undefined"] steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ env.LINUX_BASE_IMAGE }} build -p ${{ env.PACKAGE_NAME }} --compiler=clang-11 --cmake-extra=-DENABLE_SANITIZERS=ON --cmake-extra=-DSANITIZERS="${{ matrix.sanitizers }}" --cmake-extra=-DASSERT_LOCK_HELD=ON linux-shared-libs: runs-on: ubuntu-22.04 # latest steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ env.LINUX_BASE_IMAGE }} build -p ${{ env.PACKAGE_NAME }} --cmake-extra=-DBUILD_SHARED_LIBS=ON --cmake-extra=-DASSERT_LOCK_HELD=ON byo-crypto: runs-on: ubuntu-22.04 # latest steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ env.LINUX_BASE_IMAGE }} build -p ${{ env.PACKAGE_NAME }} --cmake-extra=-DBYO_CRYPTO=ON --cmake-extra=-DASSERT_LOCK_HELD=ON windows: runs-on: windows-2022 # latest steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python builder.pyz build -p ${{ env.PACKAGE_NAME }} --cmake-extra=-DASSERT_LOCK_HELD=ON windows-vc14: runs-on: windows-2019 # windows-2019 is last env with Visual Studio 2015 (v14.0) strategy: matrix: arch: [x86, x64] steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python builder.pyz build -p ${{ env.PACKAGE_NAME }} --target windows-${{ matrix.arch }} --cmake-extra=-DASSERT_LOCK_HELD=ON --compiler msvc-14 windows-shared-libs: runs-on: windows-2022 # latest steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python builder.pyz build -p ${{ env.PACKAGE_NAME }} --cmake-extra=-DBUILD_SHARED_LIBS=ON --cmake-extra=-DASSERT_LOCK_HELD=ON windows-app-verifier: runs-on: windows-2022 # latest steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python builder.pyz build -p ${{ env.PACKAGE_NAME }} run_tests=false --cmake-extra=-DBUILD_TESTING=ON - name: Run and check AppVerifier run: | python .\aws-c-s3\build\deps\aws-c-common\scripts\appverifier_ctest.py --build_directory .\aws-c-s3\build\aws-c-s3 osx: runs-on: macos-13 # latest steps: - name: Checkout Sources uses: actions/checkout@v3 - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python3 -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder')" chmod a+x builder ./builder build -p ${{ env.PACKAGE_NAME }} --cmake-extra=-DASSERT_LOCK_HELD=ON # Test downstream repos. # This should not be required because we can run into a chicken and egg problem if there is a change that needs some fix in a downstream repo. downstream: runs-on: ubuntu-22.04 # latest steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ env.LINUX_BASE_IMAGE }} build downstream -p ${{ env.PACKAGE_NAME }} linux-debug: runs-on: ubuntu-22.04 # latest steps: - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ env.LINUX_BASE_IMAGE }} build -p ${{ env.PACKAGE_NAME }} --cmake-extra=-DASSERT_LOCK_HELD=ON --config Debug aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/.github/workflows/clang-format.yml000066400000000000000000000004671456575232400262560ustar00rootroot00000000000000name: Lint on: [push] jobs: clang-format: runs-on: ubuntu-20.04 # latest steps: - name: Checkout Sources uses: actions/checkout@v1 - name: clang-format lint uses: DoozyX/clang-format-lint-action@v0.3.1 with: # List of extensions to check extensions: c,h aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/.github/workflows/closed-issue-message.yml000066400000000000000000000013271456575232400277210ustar00rootroot00000000000000name: Closed Issue Message on: issues: types: [closed] jobs: auto_comment: runs-on: ubuntu-latest steps: - uses: aws-actions/closed-issue-message@v1 with: # These inputs are both required repo-token: "${{ secrets.GITHUB_TOKEN }}" message: | ### ⚠️COMMENT VISIBILITY WARNING⚠️ Comments on closed issues are hard for our team to see. If you need more assistance, please either tag a team member or open a new issue that references this one. If you wish to keep having a conversation with other community members under this issue feel free to do so. aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/.github/workflows/codecov.yml000066400000000000000000000017621456575232400253250ustar00rootroot00000000000000name: Code coverage check on: push: env: BUILDER_VERSION: v0.9.55 BUILDER_SOURCE: releases BUILDER_HOST: https://d19elf31gohf1l.cloudfront.net PACKAGE_NAME: aws-c-s3 RUN: ${{ github.run_id }}-${{ github.run_number }} AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} AWS_REGION: us-east-1 jobs: codecov-linux: runs-on: ubuntu-22.04 steps: - name: Checkout Sources uses: actions/checkout@v3 - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python3 -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder')" chmod a+x builder ./builder build -p ${{ env.PACKAGE_NAME }} --compiler=gcc-9 --cmake-extra=-DASSERT_LOCK_HELD=ON --coverage --coverage-exclude=source/s3_copy_object.c aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/.github/workflows/handle-stale-discussions.yml000066400000000000000000000006471456575232400306110ustar00rootroot00000000000000name: HandleStaleDiscussions on: schedule: - cron: '0 */4 * * *' discussion_comment: types: [created] jobs: handle-stale-discussions: name: Handle stale discussions runs-on: ubuntu-latest permissions: discussions: write steps: - name: Stale discussions action uses: aws-github-ops/handle-stale-discussions@v1 env: GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}}aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/.github/workflows/stale_issue.yml000066400000000000000000000046321456575232400262220ustar00rootroot00000000000000name: "Close stale issues" # Controls when the action will run. on: schedule: - cron: "*/60 * * * *" jobs: cleanup: runs-on: ubuntu-latest name: Stale issue job permissions: issues: write pull-requests: write steps: - uses: aws-actions/stale-issue-cleanup@v3 with: # Setting messages to an empty string will cause the automation to skip # that category ancient-issue-message: Greetings! Sorry to say but this is a very old issue that is probably not getting as much attention as it deservers. We encourage you to check if this is still an issue in the latest release and if you find that this is still a problem, please feel free to open a new one. stale-issue-message: Greetings! It looks like this issue hasn’t been active in longer than a week. We encourage you to check if this is still an issue in the latest release. Because it has been longer than a week since the last update on this, and in the absence of more information, we will be closing this issue soon. If you find that this is still a problem, please feel free to provide a comment or add an upvote to prevent automatic closure, or if the issue is already closed, please feel free to open a new one. stale-pr-message: Greetings! It looks like this PR hasn’t been active in longer than a week, add a comment or an upvote to prevent automatic closure, or if the issue is already closed, please feel free to open a new one. # These labels are required stale-issue-label: closing-soon exempt-issue-label: automation-exempt stale-pr-label: closing-soon exempt-pr-label: pr/needs-review response-requested-label: response-requested # Don't set closed-for-staleness label to skip closing very old issues # regardless of label closed-for-staleness-label: closed-for-staleness # Issue timing days-before-stale: 2 days-before-close: 5 days-before-ancient: 36500 # If you don't want to mark a issue as being ancient based on a # threshold of "upvotes", you can set this here. An "upvote" is # the total number of +1, heart, hooray, and rocket reactions # on an issue. minimum-upvotes-to-exempt: 1 repo-token: ${{ secrets.GITHUB_TOKEN }} loglevel: DEBUG # Set dry-run to true to not perform label or close actions. dry-run: false aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/.gitignore000066400000000000000000000013131456575232400215430ustar00rootroot00000000000000# IDE Artifacts .metadata .vscode .build .idea *.d Debug Release *~ *# *.iml tags #vim swap file *.swp #compiled python files *.pyc #Vagrant stuff Vagrantfile .vagrant #Mac stuff .DS_Store #doxygen doxygen/html/ doxygen/latex/ #cmake artifacts dependencies _build build _build_* cmake-build* # Compiled Object files *.slo *.lo *.o *.obj # Precompiled Headers *.gch *.pch # Compiled Dynamic libraries *.so *.dylib *.dll # Fortran module files *.mod # Compiled Static libraries *.lai *.la *.a *.lib # Executables *.exe *.out *.app # js package locks irrelevant to the overall package's security benchmarks/benchmarks-stack/benchmarks-stack/package-lock.json benchmarks/dashboard-stack/package-lock.json aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/CMakeLists.txt000066400000000000000000000077401456575232400223250ustar00rootroot00000000000000 cmake_minimum_required(VERSION 3.1) project(aws-c-s3 C) if (POLICY CMP0069) cmake_policy(SET CMP0069 NEW) # Enable LTO/IPO if available in the compiler, see AwsCFlags endif() option(ASSERT_LOCK_HELD "Enable ASSERT_SYNCED_DATA_LOCK_HELD for checking thread issue" OFF) option(ENABLE_MOCK_SERVER_TESTS "Whether to run the integration tests that rely on pre-configured mock server" OFF) option(ENABLE_MRAP_TESTS "Whether to run the integration tests that rely on pre-configured multi-region access point" OFF) option(AWS_ENABLE_S3_ENDPOINT_RESOLVER "Whether to include the s3 endpoint resolver and related config files" OFF) if (ASSERT_LOCK_HELD) add_definitions(-DASSERT_LOCK_HELD) endif() if (DEFINED CMAKE_PREFIX_PATH) file(TO_CMAKE_PATH "${CMAKE_PREFIX_PATH}" CMAKE_PREFIX_PATH) endif() if (DEFINED CMAKE_INSTALL_PREFIX) file(TO_CMAKE_PATH "${CMAKE_INSTALL_PREFIX}" CMAKE_INSTALL_PREFIX) endif() if (UNIX AND NOT APPLE) include(GNUInstallDirs) elseif(NOT DEFINED CMAKE_INSTALL_LIBDIR) set(CMAKE_INSTALL_LIBDIR "lib") endif() # This is required in order to append /lib/cmake to each element in CMAKE_PREFIX_PATH set(AWS_MODULE_DIR "/${CMAKE_INSTALL_LIBDIR}/cmake") string(REPLACE ";" "${AWS_MODULE_DIR};" AWS_MODULE_PATH "${CMAKE_PREFIX_PATH}${AWS_MODULE_DIR}") # Append that generated list to the module search path list(APPEND CMAKE_MODULE_PATH ${AWS_MODULE_PATH}) include(AwsCFlags) include(AwsCheckHeaders) include(AwsSharedLibSetup) include(AwsSanitizers) include(AwsFindPackage) file(GLOB AWS_S3_ROOT_HEADERS "include/aws/s3/*.h" ) file(GLOB AWS_S3_PRIVATE_HEADERS "include/aws/s3/private/*.h" ) file(GLOB AWS_S3_ROOT_SRC "source/*.c" ) file(GLOB AWS_S3_ENDPOINT_RESOLVER_SRC "source/s3_endpoint_resolver/*.c" ) if (WIN32) if (MSVC) source_group("Header Files\\aws\\s3" FILES ${AWS_S3_HEADERS}) source_group("Source Files" FILES ${AWS_S3_SRC}) endif () endif() file(GLOB S3_HEADERS ${AWS_S3_ROOT_HEADERS} ${AWS_S3_PRIVATE_HEADERS} ${AWS_S3_EXTERNAL_HEADERS} ) file(GLOB S3_SRC ${AWS_S3_ROOT_SRC} ) if (AWS_ENABLE_S3_ENDPOINT_RESOLVER) list(APPEND S3_SRC ${AWS_S3_ENDPOINT_RESOLVER_SRC}) endif() add_library(${PROJECT_NAME} ${S3_HEADERS} ${S3_SRC}) aws_set_common_properties(${PROJECT_NAME}) aws_prepare_symbol_visibility_args(${PROJECT_NAME} "AWS_S3") aws_check_headers(${PROJECT_NAME} ${AWS_S3_ROOT_HEADERS}) aws_add_sanitizers(${PROJECT_NAME}) # We are not ABI stable yet set_target_properties(${PROJECT_NAME} PROPERTIES VERSION 1.0.0) set_target_properties(${PROJECT_NAME} PROPERTIES SOVERSION 0unstable) target_compile_definitions(${PROJECT_NAME} PRIVATE -DCJSON_HIDE_SYMBOLS) if (AWS_ENABLE_S3_ENDPOINT_RESOLVER) target_compile_definitions(${PROJECT_NAME} PRIVATE "-DAWS_ENABLE_S3_ENDPOINT_RESOLVER") endif() target_include_directories(${PROJECT_NAME} PUBLIC $ $) aws_use_package(aws-c-auth) aws_use_package(aws-checksums) target_link_libraries(${PROJECT_NAME} PUBLIC ${DEP_AWS_LIBS}) aws_prepare_shared_lib_exports(${PROJECT_NAME}) install(FILES ${AWS_S3_ROOT_HEADERS} DESTINATION "include/aws/s3" COMPONENT Development) if (BUILD_SHARED_LIBS) set (TARGET_DIR "shared") else() set (TARGET_DIR "static") endif() install(EXPORT "${PROJECT_NAME}-targets" DESTINATION "${LIBRARY_DIRECTORY}/${PROJECT_NAME}/cmake/${TARGET_DIR}/" NAMESPACE AWS:: COMPONENT Development) configure_file("cmake/${PROJECT_NAME}-config.cmake" "${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-config.cmake" @ONLY) install(FILES "${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-config.cmake" DESTINATION "${LIBRARY_DIRECTORY}/${PROJECT_NAME}/cmake/" COMPONENT Development) include(CTest) if (BUILD_TESTING) add_subdirectory(tests) if (NOT BYO_CRYPTO AND NOT CMAKE_CROSSCOMPILING) add_subdirectory(samples) endif() endif() aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/CODE_OF_CONDUCT.md000066400000000000000000000004651456575232400223610ustar00rootroot00000000000000## Code of Conduct This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact opensource-codeofconduct@amazon.com with any additional questions or comments. aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/CONTRIBUTING.md000066400000000000000000000067331456575232400220170ustar00rootroot00000000000000# Contributing Guidelines Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional documentation, we greatly value feedback and contributions from our community. Please read through this document before submitting any issues or pull requests to ensure we have all the necessary information to effectively respond to your bug report or contribution. ## Reporting Bugs/Feature Requests We welcome you to use the GitHub issue tracker to report bugs or suggest features. When filing an issue, please check [existing open](https://github.com/awslabs/aws-c-s3/issues), or [recently closed](https://github.com/awslabs/aws-c-s3/issues?utf8=%E2%9C%93&q=is%3Aissue%20is%3Aclosed%20), issues to make sure somebody else hasn't already reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: * A reproducible test case or series of steps * The version of our code being used * Any modifications you've made relevant to the bug * Anything unusual about your environment or deployment ## Contributing via Pull Requests Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: 1. You are working against the latest source on the *main* branch. 2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. 3. You open an issue to discuss any significant work - we would hate for your time to be wasted. To send us a pull request, please: 1. Fork the repository. 2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. 3. Ensure local tests pass. 4. Commit to your fork using clear commit messages. 5. Send us a pull request, answering any default questions in the pull request interface. 6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and [creating a pull request](https://help.github.com/articles/creating-a-pull-request/). ## Finding contributions to work on Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any ['help wanted'](https://github.com/awslabs/aws-c-s3/labels/help%20wanted) issues is a great place to start. ## Code of Conduct This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact opensource-codeofconduct@amazon.com with any additional questions or comments. ## Security issue notifications If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. ## Licensing See the [LICENSE](https://github.com/awslabs/aws-c-s3/blob/main/LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. We may ask you to sign a [Contributor License Agreement (CLA)](http://en.wikipedia.org/wiki/Contributor_License_Agreement) for larger changes. aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/LICENSE000066400000000000000000000236361456575232400205740ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/NOTICE000066400000000000000000000001031456575232400204530ustar00rootroot00000000000000Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/README.md000066400000000000000000000115121456575232400210340ustar00rootroot00000000000000## AWS C S3 The AWS-C-S3 library is an asynchronous AWS S3 client focused on maximizing throughput and network utilization. ### Key features: - **Automatic Request Splitting**: Improves throughput by automatically splitting the request into part-sized chunks and performing parallel uploads/downloads of these chunks over multiple connections. There's a cap on the throughput of single S3 connection, the only way to go faster is multiple parallel connections. - **Automatic Retries**: Increases resilience by retrying individual failed chunks of a file transfer, eliminating the need to restart transfers from scratch after an intermittent error. - **DNS Load Balancing**: DNS resolver continuously harvests Amazon S3 IP addresses. When load is spread across the S3 fleet, overall throughput is better than if all connections were hammering the same IP simultaneously. - **Advanced Network Management**: The client incorporates automatic request parallelization, effective timeouts and retries, and efficient connection reuse. This approach helps to maximize throughput and network utilization, and to avoid network overloads. - **Thread Pools and Async I/O**: Avoids bottlenecks associated with single-thread processing. - **Parallel Reads**: When uploading a large file from disk, reads from multiple parts of the file in parallel. This is faster than reading the file sequentially from beginning to end. ### Documentation - [GetObject](docs/GetObject.md): A visual representation of the GetObject request flow. - [Memory Aware Requests Execution](docs/memory_aware_request_execution.md): An in-depth guide on optimizing memory usage during request executions. ## License This library is licensed under the Apache 2.0 License. ## Usage ### Building CMake 3.1+ is required to build. `` must be an absolute path in the following instructions. #### Linux-Only Dependencies If you are building on Linux, you will need to build aws-lc and s2n-tls first. ``` git clone git@github.com:awslabs/aws-lc.git cmake -S aws-lc -B aws-lc/build -DCMAKE_INSTALL_PREFIX= cmake --build aws-lc/build --target install git clone git@github.com:aws/s2n-tls.git cmake -S s2n-tls -B s2n-tls/build -DCMAKE_INSTALL_PREFIX= -DCMAKE_PREFIX_PATH= cmake --build s2n-tls/build --target install ``` #### Building aws-c-s3 and Remaining Dependencies ``` git clone git@github.com:awslabs/aws-c-common.git cmake -S aws-c-common -B aws-c-common/build -DCMAKE_INSTALL_PREFIX= cmake --build aws-c-common/build --target install git clone git@github.com:awslabs/aws-checksums.git cmake -S aws-checksums -B aws-checksums/build -DCMAKE_INSTALL_PREFIX= -DCMAKE_PREFIX_PATH= cmake --build aws-checksums/build --target install git clone git@github.com:awslabs/aws-c-cal.git cmake -S aws-c-cal -B aws-c-cal/build -DCMAKE_INSTALL_PREFIX= -DCMAKE_PREFIX_PATH= cmake --build aws-c-cal/build --target install git clone git@github.com:awslabs/aws-c-io.git cmake -S aws-c-io -B aws-c-io/build -DCMAKE_INSTALL_PREFIX= -DCMAKE_PREFIX_PATH= cmake --build aws-c-io/build --target install git clone git@github.com:awslabs/aws-c-compression.git cmake -S aws-c-compression -B aws-c-compression/build -DCMAKE_INSTALL_PREFIX= -DCMAKE_PREFIX_PATH= cmake --build aws-c-compression/build --target install git clone git@github.com:awslabs/aws-c-http.git cmake -S aws-c-http -B aws-c-http/build -DCMAKE_INSTALL_PREFIX= -DCMAKE_PREFIX_PATH= cmake --build aws-c-http/build --target install git clone git@github.com:awslabs/aws-c-sdkutils.git cmake -S aws-c-sdkutils -B aws-c-sdkutils/build -DCMAKE_INSTALL_PREFIX= -DCMAKE_PREFIX_PATH= cmake --build aws-c-sdkutils/build --target install git clone git@github.com:awslabs/aws-c-auth.git cmake -S aws-c-auth -B aws-c-auth/build -DCMAKE_INSTALL_PREFIX= -DCMAKE_PREFIX_PATH= cmake --build aws-c-auth/build --target install git clone git@github.com:awslabs/aws-c-s3.git cmake -S aws-c-s3 -B aws-c-s3/build -DCMAKE_INSTALL_PREFIX= -DCMAKE_PREFIX_PATH= cmake --build aws-c-s3/build --target install ``` #### Running S3 sample After installing all the dependencies, and building aws-c-s3, you can run the sample directly from the s3 build directory. To download: ``` aws-c-s3/build/samples/s3/s3 cp s3:/// --region ``` To upload: ``` aws-c-s3/build/samples/s3/s3 cp s3:/// --region ``` To list objects: ``` aws-c-s3/build/samples/s3/s3 ls s3:// --region ``` ## Testing The unit tests require an AWS account with S3 buckets set up in a particular way. Use the [test_helper script](./tests/test_helper/) to set this up. aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/benchmarks/000077500000000000000000000000001456575232400216725ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/benchmarks/README.md000066400000000000000000000100051456575232400231450ustar00rootroot00000000000000# S3 Benchmark CDK kit This will deploy an EC2 instance with the S3Canary on it, and will dump a run to CloudWatch when the instance boots. ## Pre-request * Node * npm 7.0+ * aws CDK 1.103.0+ (`npm install -g aws-cdk@1.103.0`) * AWS account with CDK (`cdk bootstrap aws:///`) ## Structures ### Dashboard-stack The stack deploys dashboard and all the other resources for Benchmarks-stack. It will deploy a cloudwatch event to trigger the Benchmarks-stack running daily and clean it up after each test. Usually, user will need to manually deploy this stack by following steps: * `cd dashboard-stack/` change directory to `dashboard-stack/`. If the current directory is not `aws-c-s3/benchmarks`, use the correct path instead. * `npm install` install the dependencies * `npm run build` compile typescript to js * `cdk deploy` deploy this stack to your default AWS account/region (Use aws cli to setup the default AWS account via `aws configure`) Will use following resource: * Code build: Deploy the Benchmarks-stack. * Lambda function: Invoke code build and automatically delete Benchmarks-stack after testing. * Cloud Watch Event: Schedule the Benchmark test to run daily. * EC2 VPC: Control the VPC of the EC2 instances in Benchmarks-stack. * Cloud Watch Dashboard: Log and visualize the performance test result. * IAM roles: permission of the services. * Key Management Service: Generate the Key-pair for the EC2 instance created by Benchmarks-stack. * Secret Manger: Store the key-pair generated. To get the key generated via aws CLI `aws secretsmanager get-secret-value --secret-id ec2-ssh-key/S3-EC2-Canary-key-pair/private`. ### Benchmarks-stack The stack deploy the ec2 instance with the S3Canary on it, and will dump a run to CloudWatch when the instance boots. Usually controlled by dashboard-stack, user don't need to touch anything in it. ### Configuration The configuration for `benchmarks-stack` are listed here **(TO BE FINALIZED)**, which is the defined context value for cdk to deploy: * StackName (string): Name of the stack to be created * UserName (string): *Optional* default: ec2-user * ProjectName (string): The project BenchmarkStack runs on. * CIDRRange (string): *Optional* The inbound IP range for the ec2 instances created by the stack. * InstanceConfigName (string): The ec2 instance type to create * ThroughputGbps (string): String of the thought put target in Gbp * AutoTearDown (1 or 0): Whether to tear down the benchmarks stack after test or not, default: 1 The configuration for `benchmark-config.json` are listed here **(TO BE FINALIZED)**, which is defined : * `projects`, the name of the project to run, eg `aws-crt-java`, will run the performance test within aws-crt-java, which can be found [here](https://github.com/awslabs/aws-crt-java). * Each projects contains configuration for the project: * `shell_script`: the script to run the test. * `branch`: the branch of the repo that test against. * `s3_bucket_name`: the S3 bucket to use for the test * `instances`: The ec2 instance type to create * `throughput_gbps`: the target throughput to test against * `auto-tear-down`: Tear down the tests automatically after it runs or not. * `key-pair-name`: Set to the key pair name to an existing EC2 key pair for the EC2 instance to use, if not set, CDK will create one and it can be accessed via aws CLI `aws secretsmanager get-secret-value --secret-id ec2-ssh-key/S3-EC2-Canary-key-pair/private` The dashboard-stack has been deployed to AWS account associated with aws-sdk-common-runtime@amazon.com at us-west-2. If you have access to that and need to update the config (eg: test performance from another branch), you can update `aws-c-s3/benchmarks/benchmark-config.json` and rerun the deploy steps for dashboard stack as shown above. Note: Once you deploy the stack, the benchmark stack will be trigger by event that runs once per day. If you want to trigger the benchmark stack to run performance test, you can manually start the codebuild job that associated with the dashboard stack. aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/benchmarks/benchmark-config.json000066400000000000000000000004331456575232400257620ustar00rootroot00000000000000{ "projects": { "aws-crt-java": { "shell_script": "project_scripts/run_java_crt.sh", "branch": "main" } }, "instances": { "c5n.18xlarge": { "throughput_gbps": 100 } }, "auto-tear-down": true, "key-pair-name": "aws-common-runtime-keys" } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/benchmarks/benchmarks-stack/000077500000000000000000000000001456575232400251125ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/benchmarks/benchmarks-stack/benchmarks-stack/000077500000000000000000000000001456575232400303325ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/benchmarks/benchmarks-stack/benchmarks-stack/.gitignore000066400000000000000000000003141456575232400323200ustar00rootroot00000000000000*.js !benchmarks_deploy.js !jest.config.js *.d.ts node_modules # CDK asset staging directory .cdk.staging cdk.out # The config will be copied from upper level lib/benchmark-config.json cdk.context.json aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/benchmarks/benchmarks-stack/benchmarks-stack/.npmignore000066400000000000000000000001011456575232400323210ustar00rootroot00000000000000*.ts !*.d.ts # CDK asset staging directory .cdk.staging cdk.out aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/benchmarks/benchmarks-stack/benchmarks-stack/bin/000077500000000000000000000000001456575232400311025ustar00rootroot00000000000000benchmarks-stack.ts000066400000000000000000000011061456575232400346110ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/benchmarks/benchmarks-stack/benchmarks-stack/bin#!/usr/bin/env node import 'source-map-support/register'; import * as cdk from '@aws-cdk/core'; import { BenchmarksStack } from '../lib/benchmarks-stack'; const app = new cdk.App(); const base_stack_name = app.node.tryGetContext('StackName') as string; let benchmarks_stack_name = 'BenchmarksStack' if (base_stack_name != null) { benchmarks_stack_name = benchmarks_stack_name + '-' + base_stack_name; } new BenchmarksStack(app, 'BenchmarksStack', { stackName: benchmarks_stack_name, env: { region: process.env.CDK_DEFAULT_REGION, account: process.env.CDK_DEFAULT_ACCOUNT } }); aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/benchmarks/benchmarks-stack/benchmarks-stack/buildspec.yml000066400000000000000000000004541456575232400330320ustar00rootroot00000000000000version: 0.2 phases: build: commands: - echo Build started on `date` - node --version - npm i -g npm - npm --version - npm install - npm run build post_build: commands: - echo Build completed on `date` - node ./deploy/benchmarks_deploy.js aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/benchmarks/benchmarks-stack/benchmarks-stack/cdk.json000066400000000000000000000012611456575232400317660ustar00rootroot00000000000000{ "app": "npx ts-node --prefer-ts-exts bin/benchmarks-stack.ts", "context": { "@aws-cdk/aws-apigateway:usagePlanKeyOrderInsensitiveId": true, "@aws-cdk/core:enableStackNameDuplicates": "true", "aws-cdk:enableDiffNoFail": "true", "@aws-cdk/core:stackRelativeExports": "true", "@aws-cdk/aws-ecr-assets:dockerIgnoreSupport": true, "@aws-cdk/aws-secretsmanager:parseOwnedSecretName": true, "@aws-cdk/aws-kms:defaultKeyPolicies": true, "@aws-cdk/aws-s3:grantWriteWithoutAcl": true, "@aws-cdk/aws-ecs-patterns:removeDefaultDesiredCount": true, "@aws-cdk/aws-rds:lowercaseDbIdentifier": true, "@aws-cdk/aws-efs:defaultEncryptionAtRest": true } } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/benchmarks/benchmarks-stack/benchmarks-stack/deploy/000077500000000000000000000000001456575232400316265ustar00rootroot00000000000000benchmarks_deploy.js000066400000000000000000000040431456575232400355770ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/benchmarks/benchmarks-stack/benchmarks-stack/deployconst AWS = require("aws-sdk"); const { execSync } = require("child_process"); const path = require("path"); const fs = require("fs"); const benchmark_config_json = fs.readFileSync( path.join(__dirname, "..", "lib", "benchmark-config.json"), "utf8" ); const benchmark_config = JSON.parse(benchmark_config_json); let auto_tear_down = "1"; if (benchmark_config["auto-tear-down"] != undefined) { auto_tear_down = benchmark_config["auto-tear-down"] ? "1" : "0"; } const auto_tear_down_config = " -c AutoTearDown=" + auto_tear_down; // Configurations for the Stack. Keys listed as below: // - StackName (string): Name of the stack to be created // - UserName (string): default: ec2-user // - ProjectName (string): The project BenchmarkStack runs on. eg: aws-crt-java // - CIDRRange (string): The inbound IP range for the ec2 instances created by the stack. // - InstanceConfigName (string): The ec2 instance type to create, default: c5n.18xlarge // - ThroughputGbps (string): String of the thought put target in Gbps, default: 100 // - AutoTearDown (1 or 0): Whether to tear down the benchmarks stack after test or not, default: 1 for (let project_name in benchmark_config.projects) { const project_name_config = " -c ProjectName=" + project_name; let instance_count = 0; for (let instance_config_name in benchmark_config.instances) { instance_count++; const instance_config = benchmark_config.instances[instance_config_name]; const instance_config_name_config = " -c InstanceConfigName=" + instance_config_name; const throuphput_gbps_config = " -c ThroughputGbps=" + instance_config["throughput_gbps"]; let cmd = "./node_modules/aws-cdk/bin/cdk deploy -v --require-approval never -c UserName=ec2-user"; const name_config = " -c StackName=" + project_name + "-instance-" + instance_count; cmd = cmd + project_name_config + instance_config_name_config + throuphput_gbps_config + name_config + auto_tear_down_config; result = execSync(cmd).toString(); console.log(result); } } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/benchmarks/benchmarks-stack/benchmarks-stack/jest.config.js000066400000000000000000000002021456575232400330730ustar00rootroot00000000000000module.exports = { roots: ['/test'], testMatch: ['**/*.test.ts'], transform: { '^.+\\.tsx?$': 'ts-jest' } }; aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/benchmarks/benchmarks-stack/benchmarks-stack/lib/000077500000000000000000000000001456575232400311005ustar00rootroot00000000000000benchmarks-stack.ts000066400000000000000000000134561456575232400346220ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/benchmarks/benchmarks-stack/benchmarks-stack/libimport * as cdk from '@aws-cdk/core'; import * as s3 from '@aws-cdk/aws-s3'; import * as ec2 from '@aws-cdk/aws-ec2'; import * as assets from '@aws-cdk/aws-s3-assets'; import * as iam from '@aws-cdk/aws-iam'; import * as path from 'path'; import * as fs from 'fs'; export class BenchmarksStack extends cdk.Stack { constructor(scope: cdk.Construct, id: string, props?: cdk.StackProps) { super(scope, id, props); // Set by the lambda function running the stack const user_name = this.node.tryGetContext('UserName') as string; const project_name = this.node.tryGetContext('ProjectName') as string; const cidr = this.node.tryGetContext('CIDRRange') as string; const instance_config_name = this.node.tryGetContext('InstanceConfigName') as string; const throughput_gbps = this.node.tryGetContext('ThroughputGbps') as string; const auto_tear_down = this.node.tryGetContext('AutoTearDown') as string; const benchmark_config_json = fs.readFileSync(path.join(__dirname, 'benchmark-config.json'), 'utf8') const benchmark_config = JSON.parse(benchmark_config_json) const project_config = benchmark_config.projects[project_name]; const branch_name = project_config.branch; let region = 'unknown'; if (props != undefined && props.env != undefined && props.env.region != undefined) { region = props.env.region; } const init_instance_sh = new assets.Asset(this, 'init_instance.sh', { path: path.join(__dirname, 'init_instance.sh') }); const show_instance_dashboard_sh = new assets.Asset(this, 'show_instance_dashboard.sh', { path: path.join(__dirname, 'show_instance_dashboard.sh') }); const run_project_template_sh = new assets.Asset(this, 'run_project_template.sh', { path: path.join(__dirname, 'run_project_template.sh') }); const get_p90_py = new assets.Asset(this, 'get_p90.py', { path: path.join(__dirname, 'get_p90.py') }); const project_shell_script_sh = new assets.Asset(this, project_config.shell_script, { path: path.join(__dirname, project_config.shell_script) }); const assetBucket = s3.Bucket.fromBucketName(this, 'AssetBucket', init_instance_sh.s3BucketName) /** * This bucket already exists in the aws crt team account. * It has lifetime rules to delete objects older than a day. * If you are trying to run this stack in a different account, * you will have to create a bucket and change this variable name to the bucket name. */ const canaryBucketName = "crt-s3canary-bucket-123124136734"; const vpc = ec2.Vpc.fromLookup(this, 'VPC', { tags: { 'S3CanaryResources': 'VPC' } }); const subnetSelection: ec2.SubnetSelection = { subnets: vpc.publicSubnets }; const security_group = new ec2.SecurityGroup(this, 'SecurityGroup', { vpc: vpc, }); security_group.addIngressRule( cidr ? ec2.Peer.ipv4(cidr) : ec2.Peer.anyIpv4(), ec2.Port.tcp(22), 'SSH' ); const policy_doc_json_path = path.join( __dirname, "canary-policy-doc.json" ); const policy_doc_json = fs.readFileSync(policy_doc_json_path, 'utf8'); const policy_doc_obj = JSON.parse(policy_doc_json); const policy_doc = iam.PolicyDocument.fromJson(policy_doc_obj); const canary_role = new iam.Role(this, 'EC2CanaryRole', { assumedBy: new iam.ServicePrincipal("ec2.amazonaws.com"), inlinePolicies: { "CanaryEC2Policy": policy_doc } }); const project_run_commands = [ "DOWNLOAD_PERFORMANCE", "UPLOAD_PERFORMANCE", ]; const key_name = benchmark_config["key-pair-name"]; for (let run_command_index in project_run_commands) { const run_command = project_run_commands[run_command_index]; const instance_user_data = ec2.UserData.forLinux(); const init_instance_sh_path = instance_user_data.addS3DownloadCommand({ bucket: assetBucket, bucketKey: init_instance_sh.s3ObjectKey }); const show_instance_dashboard_sh_path = instance_user_data.addS3DownloadCommand({ bucket: assetBucket, bucketKey: show_instance_dashboard_sh.s3ObjectKey }); const run_project_template_sh_path = instance_user_data.addS3DownloadCommand({ bucket: assetBucket, bucketKey: run_project_template_sh.s3ObjectKey }); const project_shell_script_sh_path = instance_user_data.addS3DownloadCommand({ bucket: assetBucket, bucketKey: project_shell_script_sh.s3ObjectKey }) const get_p90_py_path = instance_user_data.addS3DownloadCommand({ bucket: assetBucket, bucketKey: get_p90_py.s3ObjectKey }) const init_instance_arguments = user_name + ' ' + show_instance_dashboard_sh_path + ' ' + run_project_template_sh_path + ' ' + project_name + ' ' + branch_name + ' ' + throughput_gbps + ' ' + project_shell_script_sh_path + ' ' + instance_config_name + ' ' + region + ' ' + run_command + ' ' + this.stackName + ' ' + canaryBucketName + ' ' + get_p90_py_path + ' ' + auto_tear_down; instance_user_data.addExecuteFileCommand({ filePath: init_instance_sh_path, arguments: init_instance_arguments }); const ec2instance = new ec2.Instance(this, 'S3BenchmarkClient_' + this.stackName + "_" + instance_config_name + "_" + run_command, { instanceType: new ec2.InstanceType(instance_config_name), vpc: vpc, machineImage: ec2.MachineImage.latestAmazonLinux({ generation: ec2.AmazonLinuxGeneration.AMAZON_LINUX_2 }), userData: instance_user_data, role: canary_role, keyName: key_name ? key_name : 'S3-EC2-Canary-key-pair', securityGroup: security_group, vpcSubnets: subnetSelection, requireImdsv2: true }); } } } canary-policy-doc.json000066400000000000000000000051101456575232400352260ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/benchmarks/benchmarks-stack/benchmarks-stack/lib{ "Version": "2012-10-17", "Statement": [ { "Sid": "S3BenchMarksEc2Role", "Effect": "Allow", "Action": [ "cloudwatch:PutMetricData", "cloudwatch:GetMetricStatistics", "cloudwatch:ListMetrics", "cloudwatch:GetMetricData", "cloudwatch:ListDashboards", "ec2:DescribeInstances", "ec2:MonitorInstances", "ec2:DescribeImages", "lambda:InvokeAsync", "lambda:InvokeFunction", "s3:PutLifecycleConfiguration", "s3:DeleteObject", "s3:GetBucketWebsite", "s3:PutReplicationConfiguration", "s3:GetObjectLegalHold", "s3:GetBucketNotification", "s3:GetReplicationConfiguration", "s3:PutObject", "s3:PutBucketNotification", "s3:PutBucketObjectLockConfiguration", "s3:GetLifecycleConfiguration", "s3:GetInventoryConfiguration", "s3:GetBucketTagging", "s3:ListBucket", "s3:AbortMultipartUpload", "s3:DeleteBucket", "s3:PutBucketVersioning", "s3:ListBucketMultipartUploads", "s3:PutMetricsConfiguration", "s3:GetBucketVersioning", "s3:PutInventoryConfiguration", "s3:PutBucketWebsite", "s3:PutBucketRequestPayment", "s3:PutObjectRetention", "s3:GetBucketCORS", "s3:GetObjectVersion", "s3:PutAnalyticsConfiguration", "s3:GetObjectVersionTagging", "s3:CreateBucket", "s3:ReplicateObject", "s3:GetObjectAcl", "s3:GetBucketObjectLockConfiguration", "s3:DeleteBucketWebsite", "s3:GetObjectVersionAcl", "s3:GetBucketPolicyStatus", "s3:GetObjectRetention", "s3:PutObjectLegalHold", "s3:PutBucketCORS", "s3:ListMultipartUploadParts", "s3:GetObject", "s3:PutBucketLogging", "s3:GetAnalyticsConfiguration", "s3:GetObjectVersionForReplication", "s3:PutAccelerateConfiguration", "s3:DeleteObjectVersion", "s3:GetBucketLogging", "s3:ListBucketVersions", "s3:RestoreObject", "s3:GetAccelerateConfiguration", "s3:GetBucketPolicy", "s3:PutEncryptionConfiguration", "s3:GetEncryptionConfiguration", "s3:GetObjectVersionTorrent", "s3:GetBucketRequestPayment", "s3:GetObjectTagging", "s3:GetMetricsConfiguration", "s3:GetBucketPublicAccessBlock", "s3:GetBucketAcl", "s3:GetObjectTorrent", "s3:GetBucketLocation", "s3:ReplicateDelete" ], "Resource": "*" } ] } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/benchmarks/benchmarks-stack/benchmarks-stack/lib/canary.sh000066400000000000000000000022741456575232400327160ustar00rootroot00000000000000#!/bin/bash set -ex if [ ! -x /home/ec2-user/install/bin/aws-crt-cpp-canary ]; then cd /tmp yum update -y yum install -y cmake3 git gcc72 gcc72-c++ htop git clone https://github.com/awslabs/aws-crt-cpp.git cd aws-crt-cpp git checkout s3_canary_vertical git submodule update --init cd aws-common-runtime/s2n/libcrypto-build curl -LO https://www.openssl.org/source/openssl-1.1.1-latest.tar.gz tar -xzvf openssl-1.1.1-latest.tar.gz cd `tar ztf openssl-1.1.1-latest.tar.gz | head -n1 | cut -f1 -d/` ./config -fPIC no-shared no-md2 no-rc5 no-rfc3779 no-sctp no-ssl-trace no-zlib no-hw no-mdc2 no-seed no-idea enable-ec_nistp_64_gcc_128 no-camellia no-bf no-ripemd no-dsa no-ssl2 no-ssl3 no-capieng -DSSL_FORBID_ENULL -DOPENSSL_NO_DTLS1 -DOPENSSL_NO_HEARTBEATS --prefix=/home/ec2-user/install make -j make install_sw mkdir -p /tmp/aws-crt-cpp/build cd /tmp/aws-crt-cpp/build cmake3 .. -DCMAKE_BUILD_TYPE=Release -DBUILD_DEPS=true -DCMAKE_INSTALL_PREFIX=/home/ec2-user/install -DCMAKE_PREFIX_PATH=/home/ec2-user/install cmake3 --build . --target install -- -j fi if [ -n "$1" ]; then /home/ec2-user/install/bin/aws-crt-cpp-canary -g $1 fi aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/benchmarks/benchmarks-stack/benchmarks-stack/lib/get_p90.py000066400000000000000000000023421456575232400327220ustar00rootroot00000000000000import string import sys import numpy as np import os def get_p90_max(filepath: string): bytes_result = [] with open(filepath) as f: for i in f.readlines(): bytes_result.append(float(i)*8/1000/1000/1000) npy_array = np.array(bytes_result) p90 = np.percentile(npy_array, 90) localmax = max(bytes_result) return p90, localmax def publish_metric(metric_name, project_name, branch_name, instance_name, value): os.system("aws cloudwatch put-metric-data \ --no-cli-pager \ --namespace S3Benchmark \ --metric-name {} \ --unit Gigabits/Second \ --dimensions Project={},Branch={},InstanceType={} \ --value {}".format(metric_name, project_name, branch_name, instance_name, value)) file_path = sys.argv[1] project_name = sys.argv[2] branch_name = sys.argv[3] instance_name = sys.argv[4] p90, localmax = get_p90_max(file_path) metric_prefix = "BytesIn" if "BytesOut" in file_path: metric_prefix = "BytesOut" publish_metric("{}P90".format(metric_prefix), project_name, branch_name, instance_name, p90) publish_metric("{}Max".format(metric_prefix), project_name, branch_name, instance_name, localmax) init_instance.sh000066400000000000000000000134621456575232400342120ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/benchmarks/benchmarks-stack/benchmarks-stack/lib#!/usr/bin/env bash export USER_NAME=$1 export SHOW_INSTANCE_DASHBOARD_SCRIPT=$2 export RUN_PROJECT_TEMPLATE=$3 export PROJECT_NAME=$4 export BRANCH_NAME=$5 export THROUGHPUT_GBPS=$6 export PROJECT_SHELL_SCRIPT=$7 export INSTANCE_TYPE=$8 export REGION=$9 export RUN_COMMAND=${10} export CFN_NAME=${11} export S3_BUCKET_NAME=${12} export P90_SCRIPT=${13} # TODO the auto tear down should be a flag that makes more sense export AUTO_TEAR_DOWN=${14:-1} export TEST_OBJECT_NAME=crt-canary-obj-multipart export RUN_PROJECT_LOG_FN=/tmp/benchmark.log export PUBLISH_METRICS_LOG_FN=/tmp/publish_metrics.log export SHOW_INSTANCE_DASHBOARD_USER_DEST=/home/$USER_NAME/show_instance_dashboard.sh export PERF_SCRIPT_TEMP=/tmp/perf_script_temp.tmp export DOWNLOAD_PERF_SCRIPT=/home/$USER_NAME/download_performance.sh export UPLOAD_PERF_SCRIPT=/home/$USER_NAME/upload_performance.sh export USER_DIR=/home/$USER_NAME/ function publish_bytes_in_metric() { aws cloudwatch put-metric-data \ --no-cli-pager \ --namespace S3Benchmark \ --metric-name BytesIn \ --unit Bytes \ --dimensions Project=$PROJECT_NAME,Branch=$BRANCH_NAME,InstanceType=$INSTANCE_TYPE \ --storage-resolution 1 \ --value $3 >> $PUBLISH_METRICS_LOG_FN # Store the value to a temp file echo $3 >> "/tmp/BytesIn.txt" } function publish_bytes_out_metric() { aws cloudwatch put-metric-data \ --no-cli-pager \ --namespace S3Benchmark \ --metric-name BytesOut \ --unit Bytes \ --dimensions Project=$PROJECT_NAME,Branch=$BRANCH_NAME,InstanceType=$INSTANCE_TYPE \ --storage-resolution 1 \ --value $2 >> $PUBLISH_METRICS_LOG_FN # Store the value to a temp file echo $2 >> "/tmp/BytesOut.txt" } export -f publish_bytes_in_metric export -f publish_bytes_out_metric sudo yum update -y sudo yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm sudo yum-config-manager --enable epel sudo yum install -y tmux bwm-ng htop sudo chmod +x $SHOW_INSTANCE_DASHBOARD_SCRIPT cp $SHOW_INSTANCE_DASHBOARD_SCRIPT $SHOW_INSTANCE_DASHBOARD_USER_DEST sudo yum install -y cmake3 git gcc clang sudo alternatives --install /usr/bin/cmake cmake /usr/bin/cmake3 100 \ --slave /usr/bin/ctest ctest /usr/bin/ctest3 \ --slave /usr/bin/cpack cpack /usr/bin/cpack3 \ --slave /usr/bin/ccmake ccmake /usr/bin/ccmake3 curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" unzip awscliv2.zip sudo ./aws/install rm -rf aws rm -rf awscliv2.zip pip3 install numpy TOKEN=`curl -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600"` INSTANCE_ID=`curl -H "X-aws-ec2-metadata-token: $TOKEN" http://169.254.169.254/latest/meta-data/instance-id` aws ec2 monitor-instances --instance-ids $INSTANCE_ID sudo sysctl kernel.perf_event_paranoid=0 sudo mkdir /home/$USER_NAME/ sudo chmod +x $PROJECT_SHELL_SCRIPT ${PROJECT_SHELL_SCRIPT} 'SETUP' AWK_SCRIPT="{" AWK_SCRIPT="$AWK_SCRIPT sub(\"{PROJECT_NAME}\", \"$PROJECT_NAME\");"; AWK_SCRIPT="$AWK_SCRIPT sub(\"{PROJECT_SHELL_SCRIPT}\", \"$PROJECT_SHELL_SCRIPT\");" AWK_SCRIPT="$AWK_SCRIPT sub(\"{BRANCH_NAME}\", \"$BRANCH_NAME\");" AWK_SCRIPT="$AWK_SCRIPT sub(\"{THROUGHPUT_GBPS}\", \"$THROUGHPUT_GBPS\");" AWK_SCRIPT="$AWK_SCRIPT sub(\"{INSTANCE_TYPE}\", \"$INSTANCE_TYPE\");" AWK_SCRIPT="$AWK_SCRIPT sub(\"{REGION}\", \"$REGION\");" AWK_SCRIPT="$AWK_SCRIPT sub(\"{USER_NAME}\", \"$USER_NAME\");" AWK_SCRIPT="$AWK_SCRIPT sub(\"{RUN_PROJECT_LOG_FN}\", \"$RUN_PROJECT_LOG_FN\");" AWK_SCRIPT="$AWK_SCRIPT sub(\"{PUBLISH_METRICS_LOG_FN}\", \"$PUBLISH_METRICS_LOG_FN\");" AWK_SCRIPT="$AWK_SCRIPT sub(\"{TEST_OBJECT_NAME}\", \"$TEST_OBJECT_NAME\");" AWK_SCRIPT="$AWK_SCRIPT sub(\"{S3_BUCKET_NAME}\", \"$S3_BUCKET_NAME\");" AWK_SCRIPT="$AWK_SCRIPT print}" awk "$AWK_SCRIPT" $RUN_PROJECT_TEMPLATE > $PERF_SCRIPT_TEMP awk "{sub(\"{RUN_COMMAND}\", \"DOWNLOAD_PERFORMANCE\"); print}" $PERF_SCRIPT_TEMP > $DOWNLOAD_PERF_SCRIPT awk "{sub(\"{RUN_COMMAND}\", \"UPLOAD_PERFORMANCE\"); print}" $PERF_SCRIPT_TEMP > $UPLOAD_PERF_SCRIPT sudo chmod +x $DOWNLOAD_PERF_SCRIPT sudo chmod +x $UPLOAD_PERF_SCRIPT CURRENT_TIME=`date +"%Y-%m-%d-%H"` if [ $RUN_COMMAND = "DOWNLOAD_PERFORMANCE" ]; then truncate -s 5G $TEST_OBJECT_NAME aws s3 cp $TEST_OBJECT_NAME s3://$S3_BUCKET_NAME stdbuf -i0 -o0 -e0 bwm-ng -I eth0 -o csv -u bits -d -c 0 \ | stdbuf -o0 grep -v total \ | stdbuf -o0 cut -f1,3,4 -d\; --output-delimiter=' ' \ | xargs -n3 -t -P 32 bash -c 'publish_bytes_in_metric "$@"' _ & sudo $DOWNLOAD_PERF_SCRIPT # Store the data to an S3 bucket for future refrence. aws s3 cp "/tmp/BytesIn.txt" "s3://s3-canary-logs/${PROJECT_NAME}_${BRANCH_NAME}/${CURRENT_TIME}_${INSTANCE_TYPE}/${PROJECT_NAME}_${BRANCH_NAME}_${CURRENT_TIME}_${INSTANCE_TYPE}_BytesIn.txt" python3 $P90_SCRIPT "/tmp/BytesIn.txt" $PROJECT_NAME $BRANCH_NAME $INSTANCE_TYPE elif [ $RUN_COMMAND = "UPLOAD_PERFORMANCE" ]; then stdbuf -i0 -o0 -e0 bwm-ng -I eth0 -o csv -u bits -d -c 0 \ | stdbuf -o0 grep -v total \ | stdbuf -o0 cut -f1,3,4 -d\; --output-delimiter=' ' \ | xargs -n3 -t -P 32 bash -c 'publish_bytes_out_metric "$@"' _ & sudo $UPLOAD_PERF_SCRIPT # Store the data to an S3 bucket for future refrence. aws s3 cp "/tmp/BytesOut.txt" "s3://s3-canary-logs/${PROJECT_NAME}_${BRANCH_NAME}/${CURRENT_TIME}_${INSTANCE_TYPE}/${PROJECT_NAME}_${BRANCH_NAME}_${CURRENT_TIME}_${INSTANCE_TYPE}_BytesOut.txt" python3 $P90_SCRIPT "/tmp/BytesOut.txt" $PROJECT_NAME $BRANCH_NAME $INSTANCE_TYPE fi if [ $AUTO_TEAR_DOWN = 1 ]; then aws lambda invoke \ --cli-binary-format raw-in-base64-out \ --function-name BenchmarkManager \ --invocation-type Event \ --payload '{ "action": "delete", "stack_name": '\"${CFN_NAME}\"' }' \ response.json fi project_scripts/000077500000000000000000000000001456575232400342365ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/benchmarks/benchmarks-stack/benchmarks-stack/librun_aws_c_s3.sh000066400000000000000000000062011456575232400371560ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/benchmarks/benchmarks-stack/benchmarks-stack/lib/project_scripts#!/usr/bin/env bash if [ $1 = "SETUP" ]; then cd $USER_DIR mkdir install export INSTALL_PATH=$USER_DIR/install git clone https://github.com/awslabs/aws-lc.git cmake -S aws-lc -B aws-lc/build -DCMAKE_INSTALL_PREFIX=$INSTALL_PATH -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DCMAKE_BUILD_TYPE=RelWithDebInfo -DBUILD_TESTING=OFF -DENABLE_SANITIZERS=ON -DPERFORM_HEADER_CHECK=ON cmake --build aws-lc/build --target install git clone https://github.com/aws/s2n-tls.git cmake -S s2n-tls -B s2n-tls/build -DCMAKE_INSTALL_PREFIX=$INSTALL_PATH -DCMAKE_PREFIX_PATH=$INSTALL_PATH -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DCMAKE_BUILD_TYPE=RelWithDebInfo -DBUILD_TESTING=OFF -DENABLE_SANITIZERS=ON -DPERFORM_HEADER_CHECK=ON cmake --build s2n-tls/build --target install git clone https://github.com/awslabs/aws-c-common.git cmake -S aws-c-common -B aws-c-common/build -DCMAKE_INSTALL_PREFIX=$INSTALL_PATH cmake --build aws-c-common/build --target install git clone https://github.com/awslabs/aws-checksums.git cmake -S aws-checksums -B aws-checksums/build -DCMAKE_INSTALL_PREFIX=$INSTALL_PATH -DCMAKE_PREFIX_PATH=$INSTALL_PATH cmake --build aws-checksums/build --target install git clone https://github.com/awslabs/aws-c-sdkutils.git cmake -S aws-c-sdkutils -B aws-c-sdkutils/build -DCMAKE_INSTALL_PREFIX=$INSTALL_PATH -DCMAKE_PREFIX_PATH=$INSTALL_PATH cmake --build aws-c-sdkutils/build --target install git clone https://github.com/awslabs/aws-c-cal.git cmake -S aws-c-cal -B aws-c-cal/build -DCMAKE_INSTALL_PREFIX=$INSTALL_PATH -DCMAKE_PREFIX_PATH=$INSTALL_PATH cmake --build aws-c-cal/build --target install git clone https://github.com/awslabs/aws-c-io.git cmake -S aws-c-io -B aws-c-io/build -DCMAKE_INSTALL_PREFIX=$INSTALL_PATH -DCMAKE_PREFIX_PATH=$INSTALL_PATH cmake --build aws-c-io/build --target install git clone https://github.com/awslabs/aws-c-compression.git cmake -S aws-c-compression -B aws-c-compression/build -DCMAKE_INSTALL_PREFIX=$INSTALL_PATH -DCMAKE_PREFIX_PATH=$INSTALL_PATH cmake --build aws-c-compression/build --target install git clone https://github.com/awslabs/aws-c-http.git cmake -S aws-c-http -B aws-c-http/build -DCMAKE_INSTALL_PREFIX=$INSTALL_PATH -DCMAKE_PREFIX_PATH=$INSTALL_PATH cmake --build aws-c-http/build --target install git clone https://github.com/awslabs/aws-c-auth.git cmake -S aws-c-auth -B aws-c-auth/build -DCMAKE_INSTALL_PREFIX=$INSTALL_PATH -DCMAKE_PREFIX_PATH=$INSTALL_PATH cmake --build aws-c-auth/build --target install git clone https://github.com/awslabs/aws-c-s3.git cd aws-c-s3 git checkout $BRANCH_NAME cd .. cmake -S aws-c-s3 -B aws-c-s3/build -DCMAKE_INSTALL_PREFIX=$INSTALL_PATH -DCMAKE_PREFIX_PATH=$INSTALL_PATH -DENABLE_S3_NET_TESTS=ON -DENABLE_S3_PERFORMANCE_TESTS=ON -DPERFORMANCE_TEST_NUM_TRANSFERS=100 cmake --build aws-c-s3/build --target install elif [ $1 = "DOWNLOAD_PERFORMANCE" ]; then $USER_DIR/aws-c-s3/build/tests/aws-c-s3-tests test_s3_get_performance elif [ $1 = "UPLOAD_PERFORMANCE" ]; then $USER_DIR/aws-c-s3/build/tests/aws-c-s3-tests test_s3_put_performance fi run_java_crt.sh000066400000000000000000000031371456575232400372530ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/benchmarks/benchmarks-stack/benchmarks-stack/lib/project_scripts#!/usr/bin/env bash cd $USER_DIR if [ $1 = "SETUP" ]; then sudo yum install java-1.8.0-devel -y sudo alternatives --set java /usr/lib/jvm/jre-1.8.0-openjdk.x86_64/bin/java sudo alternatives --set javac /usr/lib/jvm/java-1.8.0-openjdk.x86_64/bin/javac sudo yum install maven -y git clone https://github.com/awslabs/aws-crt-java.git --recursive cd aws-crt-java git checkout $BRANCH_NAME git submodule init git submodule update mvn install -DskipTests elif [ $1 = "DOWNLOAD_PERFORMANCE" ]; then cd aws-crt-java mvn test -DforkCount=0 -Dtest="S3ClientTest#benchmarkS3Get" -Daws.crt.s3.benchmark=1 \ -Daws.crt.s3.benchmark.region=$REGION \ -Daws.crt.s3.benchmark.gbps=$THROUGHPUT_GBPS \ -Daws.crt.s3.benchmark.transfers=1600 \ -Daws.crt.s3.benchmark.concurrent=1600 \ -Daws.crt.s3.benchmark.object=$TEST_OBJECT_NAME \ -Daws.crt.s3.benchmark.bucket=$S3_BUCKET_NAME \ -Daws.crt.s3.benchmark.threads=18 \ -Daws.crt.s3.benchmark.warmup=30 \ -Daws.crt.s3.benchmark.tls=true elif [ $1 = "UPLOAD_PERFORMANCE" ]; then cd aws-crt-java mvn test -DforkCount=0 -Dtest="S3ClientTest#benchmarkS3Put" -Daws.crt.s3.benchmark=1 \ -Daws.crt.s3.benchmark.region=$REGION \ -Daws.crt.s3.benchmark.gbps=$THROUGHPUT_GBPS \ -Daws.crt.s3.benchmark.bucket=$S3_BUCKET_NAME \ -Daws.crt.s3.benchmark.transfers=1600 \ -Daws.crt.s3.benchmark.concurrent=1600 \ -Daws.crt.s3.benchmark.threads=18 \ -Daws.crt.s3.benchmark.warmup=30 \ -Daws.crt.s3.benchmark.tls=true fi run_project_template.sh000066400000000000000000000011111456575232400355740ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/benchmarks/benchmarks-stack/benchmarks-stack/lib#!/usr/bin/env bash export PROJECT_NAME={PROJECT_NAME} export PROJECT_SHELL_SCRIPT={PROJECT_SHELL_SCRIPT} export BRANCH_NAME={BRANCH_NAME} export THROUGHPUT_GBPS={THROUGHPUT_GBPS} export INSTANCE_TYPE={INSTANCE_TYPE} export REGION={REGION} export USER_NAME={USER_NAME} export RUN_PROJECT_LOG_FN={RUN_PROJECT_LOG_FN} export PUBLISH_METRICS_LOG_FN={PUBLISH_METRICS_LOG_FN} export RUN_COMMAND={RUN_COMMAND} export TEST_OBJECT_NAME={TEST_OBJECT_NAME} export S3_BUCKET_NAME={S3_BUCKET_NAME} export USER_DIR=/home/$USER_NAME/ $PROJECT_SHELL_SCRIPT "$RUN_COMMAND" 2>&1 $RUN_PROJECT_LOG_FN show_instance_dashboard.sh000066400000000000000000000027411456575232400362340ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/benchmarks/benchmarks-stack/benchmarks-stack/lib#!/usr/bin/env bash export BENCHMARK_LOG_FN=/tmp/benchmark.log export PUBLISH_METRICS_LOG_FN=/tmp/publish_metrics.log export CLOUD_INIT_LOG_FN=/var/log/cloud-init-output.log function show_publish_metrics_log() { echo -n "" > $PUBLISH_METRICS_LOG_FN tail -f $PUBLISH_METRICS_LOG_FN } export -f show_publish_metrics_log function show_cloud_init_log() { echo -n "" > $CLOUD_INIT_LOG_FN tail -f $CLOUD_INIT_LOG_FN } export -f show_cloud_init_log function show_benchmark_log() { echo -n "" > $BENCHMARK_LOG_FN tail -f $BENCHMARK_LOG_FN } export -f show_benchmark_log function show_bandwidth() { bwm-ng -I eth0 -u bits } export -f show_bandwidth function show_connection_count() { while true; do echo $(date --rfc-3339=seconds) Connections: $(sudo lsof -i TCP:https | wc -l); sleep 1; done } export -f show_connection_count tmux attach -t dashboard ATTACH_DASHBOARD_RESULT=$? if [ $ATTACH_DASHBOARD_RESULT -ne 0 ]; then tmux new -s dashboard \; \ split-window -h -p 50 \; \ select-pane -t 0 \; \ send-keys 'show_publish_metrics_log' C-m \; \ split-window -v -p 66 \; \ send-keys 'show_cloud_init_log' C-m \; \ split-window -v -p 50 \; \ send-keys 'htop' C-m \; \ select-pane -R \; \ send-keys 'show_benchmark_log' C-m \; \ split-window -v -p 66 \; \ send-keys 'show_bandwidth' C-m \; \ split-window -v -p 50 \; \ send-keys 'show_connection_count' C-m \; fi aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/benchmarks/benchmarks-stack/benchmarks-stack/package.json000066400000000000000000000014251456575232400326220ustar00rootroot00000000000000{ "name": "benchmarks-stack", "version": "0.1.0", "bin": { "benchmarks-stack": "bin/benchmarks-stack.js" }, "scripts": { "build": "tsc", "watch": "tsc -w", "test": "jest", "cdk": "cdk" }, "devDependencies": { "@aws-cdk/assert": "^v1.126.0", "@types/jest": "^27.0.1", "@types/node": "10.17.27", "aws-cdk": "^1.20.0", "jest": "^27.2.1", "ts-jest": "^27.0.5", "ts-node": "^10.9.1", "typescript": "^4.2.0" }, "dependencies": { "@aws-cdk/aws-ec2": "^v1.126.0", "@aws-cdk/aws-iam": "^v1.126.0", "@aws-cdk/aws-lambda": "^v1.126.0", "@aws-cdk/aws-s3": "^v1.126.0", "@aws-cdk/aws-s3-assets": "^v1.126.0", "@aws-cdk/core": "^v1.126.0", "aws-sdk": "^2.910.0", "source-map-support": "^0.5.16" } } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/benchmarks/benchmarks-stack/benchmarks-stack/response.json000066400000000000000000000000001456575232400330510ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/benchmarks/benchmarks-stack/benchmarks-stack/test/000077500000000000000000000000001456575232400313115ustar00rootroot00000000000000benchmarks.test.ts000066400000000000000000000021451456575232400346770ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/benchmarks/benchmarks-stack/benchmarks-stack/testimport { expect as expectCDK, haveResourceLike } from '@aws-cdk/assert'; import * as cdk from '@aws-cdk/core'; import * as Benchmarks from '../lib/benchmarks-stack'; test('CIDRRange default', () => { const anyCIDR = "0.0.0.0/0"; const app = new cdk.App({ context: { "UserName": 'ec2-user', "ProjectName": 'aws-c-s3' } }); const stack = new Benchmarks.BenchmarksStack(app, 'MyTestStack'); expectCDK(stack).to(haveResourceLike("AWS::EC2::SecurityGroup", { "SecurityGroupIngress": [{ "CidrIp": anyCIDR, "Description": "SSH", "FromPort": 22, "IpProtocol": "tcp", "ToPort": 22 }] })) }); test('CIDRRange configured', () => { const CIDR = '172.31.0.0/24'; const app = new cdk.App({ context: { "UserName": 'ec2-user', "ProjectName": 'aws-c-s3', "CIDRRange": CIDR } }); const stack = new Benchmarks.BenchmarksStack(app, 'MyTestStack'); expectCDK(stack).to(haveResourceLike("AWS::EC2::SecurityGroup", { "SecurityGroupIngress": [{ "CidrIp": CIDR, "Description": "SSH", "FromPort": 22, "IpProtocol": "tcp", "ToPort": 22 }] })) }); aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/benchmarks/benchmarks-stack/benchmarks-stack/tsconfig.json000066400000000000000000000011261456575232400330410ustar00rootroot00000000000000{ "compilerOptions": { "target": "ES2018", "module": "commonjs", "lib": ["es2018"], "declaration": true, "strict": true, "noImplicitAny": true, "strictNullChecks": true, "noImplicitThis": true, "alwaysStrict": true, "noUnusedLocals": false, "noUnusedParameters": false, "noImplicitReturns": true, "noFallthroughCasesInSwitch": false, "inlineSourceMap": true, "inlineSources": true, "experimentalDecorators": true, "strictPropertyInitialization": false, "typeRoots": ["./node_modules/@types"] }, "exclude": ["cdk.out"] } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/benchmarks/dashboard-stack/000077500000000000000000000000001456575232400247245ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/benchmarks/dashboard-stack/.gitignore000066400000000000000000000002571456575232400267200ustar00rootroot00000000000000*.js !jest.config.js *.d.ts node_modules # CDK asset staging directory .cdk.staging cdk.out # Parcel default cache directory .parcel-cache cdk.context.json lib/canary.json aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/benchmarks/dashboard-stack/.npmignore000066400000000000000000000001011456575232400267130ustar00rootroot00000000000000*.ts !*.d.ts # CDK asset staging directory .cdk.staging cdk.out aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/benchmarks/dashboard-stack/bin/000077500000000000000000000000001456575232400254745ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/benchmarks/dashboard-stack/bin/benchmarks.ts000066400000000000000000000011011456575232400301520ustar00rootroot00000000000000#!/usr/bin/env node import 'source-map-support/register'; import * as cdk from '@aws-cdk/core'; import { DashboardStack } from '../lib/dashboard-stack'; const app = new cdk.App(); const base_stack_name = app.node.tryGetContext('StackName') as string; let dashboard_stack_name = 'DashboardStack'; if (base_stack_name != null) { dashboard_stack_name = dashboard_stack_name + '-' + base_stack_name; } new DashboardStack(app, 'DashboardStack', { stackName: dashboard_stack_name, env: { region: process.env.CDK_DEFAULT_REGION, account: process.env.CDK_DEFAULT_ACCOUNT } }); aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/benchmarks/dashboard-stack/cdk.json000066400000000000000000000004731456575232400263640ustar00rootroot00000000000000{ "app": "npx ts-node bin/benchmarks.ts", "context": { "@aws-cdk/core:enableStackNameDuplicates": "true", "aws-cdk:enableDiffNoFail": "true", "@aws-cdk/core:stackRelativeExports": "true", "InstanceType": "c5n.18xlarge", "Uploads": 0, "Downloads": 160 }, "requireApproval": "never" } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/benchmarks/dashboard-stack/jest.config.js000066400000000000000000000002021456575232400274650ustar00rootroot00000000000000module.exports = { roots: ['/test'], testMatch: ['**/*.test.ts'], transform: { '^.+\\.tsx?$': 'ts-jest' } }; aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/benchmarks/dashboard-stack/lambda/000077500000000000000000000000001456575232400261445ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/benchmarks/dashboard-stack/lambda/benchmarkManager.py000066400000000000000000000044061456575232400317470ustar00rootroot00000000000000import boto3 CODE_BUILD_NAME = "S3BenchmarksDeploy" def benchmarkManager(event, context): ''' Lambda handler. Action in event determing how manager runs benchmark stack. delete: Delete a stack. - stack_name (string): the name of stack to delete. test: Deploy the stack via code build. ''' cf_client = boto3.client('cloudformation') print("## LOG started") print(event) print("## event ends") if 'action' not in event: print("\'action\' is required in the event for BenchmarkManager") return { 'statusCode': 400, 'headers': { 'Content-Type': 'text/plain' }, 'body': 'event {} is invalid, \ \'action\' is required'.format(event) } if event['action'] == 'delete': if 'stack_name' in event: stack_name = event['stack_name'] else: print("\'stack_name\' is required for delete action") return { 'statusCode': 400, 'headers': { 'Content-Type': 'text/plain' }, 'body': 'event {} is invalid, \ \'stack_name\' is required for delete action'.format(event) } print("Deleting stack, name is {}".format(stack_name)) response = cf_client.delete_stack( StackName=stack_name) print("Delete stack response: {}".format(response)) return { 'statusCode': 200, 'headers': { 'Content-Type': 'text/plain' }, 'body': 'Deleting {}, it may fail, \ check your consol to see it succeed or not'.format(stack_name) } elif event['action'] == 'test': # trigger codebuild to deploy the benchmarks stack codebuild = boto3.client('codebuild') response = codebuild.start_build(projectName=CODE_BUILD_NAME) print("Code build: Response: {}".format(response)) return { 'statusCode': 200, 'headers': { 'Content-Type': 'text/plain' }, 'body': '{} code build in process, \ check logs if anything failed'.format(CODE_BUILD_NAME) } print("## LOG ended") aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/benchmarks/dashboard-stack/lib/000077500000000000000000000000001456575232400254725ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/benchmarks/dashboard-stack/lib/dashboard-stack.ts000066400000000000000000000354471456575232400311110ustar00rootroot00000000000000import * as cdk from '@aws-cdk/core'; import * as cloudwatch from '@aws-cdk/aws-cloudwatch'; import * as iam from '@aws-cdk/aws-iam'; import * as _lambda from '@aws-cdk/aws-lambda'; import * as codebuild from '@aws-cdk/aws-codebuild'; import * as events from '@aws-cdk/aws-events'; import * as targets from '@aws-cdk/aws-events-targets'; import * as s3 from '@aws-cdk/aws-s3'; import * as ec2 from '@aws-cdk/aws-ec2'; import * as s3deploy from '@aws-cdk/aws-s3-deployment'; import * as path from 'path'; import * as fs from 'fs'; import { KeyPair } from 'cdk-ec2-key-pair'; function policy_doc_helper(path: string): iam.PolicyDocument { const policy_doc_json = fs.readFileSync(path, 'utf8'); const policy_doc = JSON.parse(policy_doc_json); return iam.PolicyDocument.fromJson(policy_doc); } export class DashboardStack extends cdk.Stack { constructor(scope: cdk.Construct, id: string, props?: cdk.StackProps) { super(scope, id, props); const benchmark_config_json_path = path.join( __dirname, "..", "..", "benchmark-config.json" ); const benchmark_config_json = fs.readFileSync(benchmark_config_json_path, 'utf8'); const benchmark_config = JSON.parse(benchmark_config_json); // Lambda to handle trigger codebuild to deploy benchmark and clean up benchmark after tests // use admin role here. TODO: simplify later. // Permission to delete CFN stack with ec2,s3, iam and security group in it and invoke codebuild. const admin_policy_doc = policy_doc_helper(path.join( __dirname, "policy-doc", "admin-policy-doc.json" )); const lambda_role = new iam.Role(this, 'LambdaRole', { assumedBy: new iam.ServicePrincipal("lambda.amazonaws.com"), inlinePolicies: { "AdminPolicy": admin_policy_doc } }); const lambda = new _lambda.Function(this, 'BenchmarkManager', { runtime: _lambda.Runtime.PYTHON_3_8, code: _lambda.Code.fromAsset("lambda"), handler: "benchmarkManager.benchmarkManager", role: lambda_role, functionName: "BenchmarkManager", timeout: cdk.Duration.minutes(15) }); // Creat the Cloudwatch Event to schedule the lambda to run daily const lambda_target = new targets.LambdaFunction(lambda, { event: events.RuleTargetInput.fromObject({ 'action': 'test' }) }); new events.Rule(this, 'ScheduleRule', { schedule: events.Schedule.rate(cdk.Duration.days(1)), targets: [lambda_target], ruleName: 'BenchmarksTrigger', description: 'Trigger Benchmarks test.' }); let region = 'unknown'; if (props != undefined && props.env != undefined && props.env.region != undefined) { region = props.env.region; } const vpc = new ec2.Vpc(this, 'VPC', { enableDnsSupport: true, enableDnsHostnames: true }) cdk.Tags.of(vpc).add('S3CanaryResources', 'VPC'); const metrics_namespace = "S3Benchmark"; const metric_widget_width = 6; const metric_widget_height = 6; const num_widgets_per_row = 4; let x = 0; let y = 0; let dashboard_body = { widgets: [] as any }; for (let project_name in benchmark_config.projects) { const project_config = benchmark_config.projects[project_name]; const branch_name = project_config.branch; let project_header_widget = { type: "text", width: 24, height: 1, properties: { markdown: "## " + project_name + " (" + branch_name + ")" } }; dashboard_body.widgets.push(project_header_widget); for (let instance_config_name in benchmark_config.instances) { const instance_config = benchmark_config.instances[instance_config_name]; if (x >= num_widgets_per_row) { x = 0; y += metric_widget_height; } let instance_widget = { type: "metric", width: metric_widget_width, height: metric_widget_height, properties: { metrics: [ [ metrics_namespace, "BytesIn", "Project", project_name, "Branch", branch_name, "InstanceType", instance_config_name, { id: "m1", visible: false, } ], [ metrics_namespace, "BytesOut", "Project", project_name, "Branch", branch_name, "InstanceType", instance_config_name, { id: "m2", visible: false, } ], [ metrics_namespace, "BytesInP90", "Project", project_name, "Branch", branch_name, "InstanceType", instance_config_name, { id: "p1", visible: false, } ], [ metrics_namespace, "BytesOutP90", "Project", project_name, "Branch", branch_name, "InstanceType", instance_config_name, { id: "p2", visible: false, } ], [ metrics_namespace, "BytesInMax", "Project", project_name, "Branch", branch_name, "InstanceType", instance_config_name, { id: "max1", visible: false, } ], [ metrics_namespace, "BytesOutMax", "Project", project_name, "Branch", branch_name, "InstanceType", instance_config_name, { id: "max2", visible: false, } ], [ { expression: "m1*8/1000/1000/1000", "label": "Gbps Download", "id": "e1", color: "#0047ab" } ], [ { expression: "m2*8/1000/1000/1000", "label": "Gbps Upload", "id": "e2", color: "#ffa500" } ] ], yAxis: { showUnits: false }, stat: "Average", period: 1, region: region, title: instance_config_name } }; const tagName = project_name + "/" + branch_name + '-' + instance_config_name; const alarmAction = "arn:aws:cloudwatch::cwa-internal:ticket:3:AWS:SDKs+and+Tools:Common+\ Runtime:AWS+SDKs+Common+Runtime:Data+missing+or+performance+issue+from+S3+canary.+\ Check+the+status+of+S3+canary+in+us-west-2%2C+which+is+the+DashboardStack+CFN+for+" + tagName; const cfnAlarmDownload = new cloudwatch.CfnAlarm(this, tagName + "download", { comparisonOperator: 'LessThanOrEqualToThreshold', evaluationPeriods: 1, // the properties below are optional actionsEnabled: true, alarmActions: [alarmAction], alarmDescription: "S3 canary has no data or low performance for a day. Check the canary is working or not or something related to performance has been merged. For download " + tagName, alarmName: "S3 Canary Alarm Download " + tagName, datapointsToAlarm: 1, dimensions: [ { name: "Project", value: project_name }, { name: "Branch", value: branch_name }, { name: "InstanceType", value: instance_config_name } ], insufficientDataActions: [alarmAction], metricName: 'BytesInP90', namespace: 'S3Benchmark', okActions: [], period: 86400, statistic: 'Maximum', threshold: 70.0, // Set a 70 Gbps threshold for now, we can update it later. treatMissingData: 'breaching', }); const cfnAlarmUpload = new cloudwatch.CfnAlarm(this, tagName + "upload", { comparisonOperator: 'LessThanOrEqualToThreshold', evaluationPeriods: 1, // the properties below are optional actionsEnabled: true, alarmActions: [alarmAction], alarmDescription: "S3 canary has no data or low performance for a day. Check the canary is working or not or something related to performance has been merged. For upload " + tagName, alarmName: "S3 Canary Alarm Upload " + tagName, datapointsToAlarm: 1, dimensions: [ { name: "Project", value: project_name }, { name: "Branch", value: branch_name }, { name: "InstanceType", value: instance_config_name } ], insufficientDataActions: [alarmAction], metricName: 'BytesOutP90', namespace: 'S3Benchmark', okActions: [], period: 86400, statistic: 'Maximum', threshold: 70.0, // Set a 70 Gbps threshold for now, we can update it later. treatMissingData: 'breaching', }); dashboard_body.widgets.push(instance_widget); } } const dashboard = new cloudwatch.CfnDashboard(this, id, { dashboardBody: JSON.stringify(dashboard_body), dashboardName: id + "_" + region }); // Permission to create CFN stack with ec2,s3, iam and security group in it. // TODO: simplify it later. Use admin policy for simplicity now. const codebuild_role = new iam.Role(this, 'CodeBuildRole', { assumedBy: new iam.ServicePrincipal("codebuild.amazonaws.com"), inlinePolicies: { "AdminPolicy": admin_policy_doc } }); const code_bucket = new s3.Bucket(this, 'CodeBucket', { removalPolicy: cdk.RemovalPolicy.DESTROY, autoDeleteObjects: true, }); //Write the config for deploy benchmarks-stack fs.writeFileSync(path.join( __dirname, "..", "..", "benchmarks-stack", "benchmarks-stack", "lib", "benchmark-config.json" ), benchmark_config_json); new s3deploy.BucketDeployment(this, 'DeployCodeBase', { sources: [s3deploy.Source.asset('../benchmarks-stack')], destinationBucket: code_bucket }); new codebuild.Project(this, 'S3BenchmarksDeploy', { source: codebuild.Source.s3({ bucket: code_bucket, path: 'benchmarks-stack/', }), environment: { buildImage: codebuild.LinuxBuildImage.fromCodeBuildImageId('aws/codebuild/standard:5.0'), }, role: codebuild_role, projectName: "S3BenchmarksDeploy" }); if (benchmark_config["key-pair-name"] == undefined) { // Create the Key Pair const key = new KeyPair(this, 'EC2CanaryKeyPair', { name: 'S3-EC2-Canary-key-pair', description: 'Key pair for BenchMarks stack to launch Ec2 instance. The private key is stored in \ Secrets Manager as ec2-ssh-key/S3-EC2-Canary-key-pair/private', storePublicKey: true, // by default the public key will not be stored in Secrets Manager }); } } } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/benchmarks/dashboard-stack/lib/policy-doc/000077500000000000000000000000001456575232400275345ustar00rootroot00000000000000admin-policy-doc.json000066400000000000000000000002041456575232400334740ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/benchmarks/dashboard-stack/lib/policy-doc{ "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": "*", "Resource": "*" } ] } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/benchmarks/dashboard-stack/package.json000066400000000000000000000020141456575232400272070ustar00rootroot00000000000000{ "name": "benchmarks", "version": "0.1.0", "bin": { "benchmarks": "bin/benchmarks.js" }, "scripts": { "build": "tsc", "watch": "tsc -w", "test": "jest", "cdk": "cdk" }, "devDependencies": { "@aws-cdk/assert": "^v1.126.0", "@types/jest": "^27.0.1", "@types/node": "10.17.27", "aws-cdk": "^1.20.0", "jest": "^27.2.1", "ts-jest": "^27.0.5", "ts-node": "^10.9.1", "typescript": "^4.2.0" }, "dependencies": { "@aws-cdk/aws-cloudwatch": "^v1.126.0", "@aws-cdk/aws-codebuild": "^v1.126.0", "@aws-cdk/aws-ec2": "^v1.126.0", "@aws-cdk/aws-events": "^v1.126.0", "@aws-cdk/aws-events-targets": "^v1.126.0", "@aws-cdk/aws-iam": "^v1.126.0", "@aws-cdk/aws-kms": "^v1.126.0", "@aws-cdk/aws-lambda": "^v1.126.0", "@aws-cdk/aws-s3": "^v1.126.0", "@aws-cdk/aws-s3-assets": "^v1.126.0", "@aws-cdk/aws-s3-deployment": "^v1.126.0", "@aws-cdk/core": "^v1.126.0", "cdk-ec2-key-pair": "2.2.0", "source-map-support": "^0.5.16" } } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/benchmarks/dashboard-stack/response.json000066400000000000000000000000001456575232400274430ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/benchmarks/dashboard-stack/tsconfig.json000066400000000000000000000011261456575232400274330ustar00rootroot00000000000000{ "compilerOptions": { "target": "ES2018", "module": "commonjs", "lib": ["es2018"], "declaration": true, "strict": true, "noImplicitAny": true, "strictNullChecks": true, "noImplicitThis": true, "alwaysStrict": true, "noUnusedLocals": false, "noUnusedParameters": false, "noImplicitReturns": true, "noFallthroughCasesInSwitch": false, "inlineSourceMap": true, "inlineSources": true, "experimentalDecorators": true, "strictPropertyInitialization": false, "typeRoots": ["./node_modules/@types"] }, "exclude": ["cdk.out"] } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/builder.json000066400000000000000000000010711456575232400220750ustar00rootroot00000000000000{ "name": "aws-c-s3", "targets": { "android": { "enabled": false, "_comment": "disabled until we need to support it. LibCrypto needs to be configured on build machine." } }, "upstream": [ { "name": "aws-c-auth" }, { "name": "aws-c-http" }, { "name": "aws-checksums" }, { "name": "aws-c-sdkutils"} ], "downstream": [ ], "test_steps": [ "test" ], "+cmake_args": [ "-DENABLE_MRAP_TESTS=ON" ], "pre_build_steps": ["mock-server-setup"] } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/cmake/000077500000000000000000000000001456575232400206355ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/cmake/aws-c-s3-config.cmake000066400000000000000000000011661456575232400244430ustar00rootroot00000000000000include(CMakeFindDependencyMacro) find_dependency(aws-c-auth) find_dependency(aws-c-http) find_dependency(aws-checksums) macro(aws_load_targets type) include(${CMAKE_CURRENT_LIST_DIR}/${type}/@PROJECT_NAME@-targets.cmake) endmacro() # try to load the lib follow BUILD_SHARED_LIBS. Fall back if not exist. if (BUILD_SHARED_LIBS) if (EXISTS "${CMAKE_CURRENT_LIST_DIR}/shared") aws_load_targets(shared) else() aws_load_targets(static) endif() else() if (EXISTS "${CMAKE_CURRENT_LIST_DIR}/static") aws_load_targets(static) else() aws_load_targets(shared) endif() endif() aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/docs/000077500000000000000000000000001456575232400205055ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/docs/GetObject.md000066400000000000000000000005651456575232400227030ustar00rootroot00000000000000# GetObject ## Overview The `GetObject` is used to download objects from Amazon S3. Optimized for throughput, the CRT S3 client enhances performance and reliability by parallelizing multiple part-sized `GetObject` with range requests. ## Flow Diagram Below is the typical flow of a GetObject request made by the user. ![GetObject Flow Diagram](images/GetObjectFlow.svg) aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/docs/images/000077500000000000000000000000001456575232400217525ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/docs/images/GetObjectFlow.svg000066400000000000000000002754521456575232400252100ustar00rootroot00000000000000
ObjectSize and Potentially FirstPart Request
ObjectSize and Potentially Fir...
User makes a GetObject Request
User makes a GetObje...
No
No
Default Request
Default Request
Yes
Yes
Yes
Yes
Yes
Yes
Yes
Yes
No
No
Yes
Yes
No
No
HeadObject
HeadObject
GetObject with partNumber=1
GetObject with partN...
No
No
Parallel Download All Parts
Parallel Download All Parts
No
No
Has
More Parts?
Has...
6
6
GetObject with range of part
GetObject with range...
Yes
Yes
End
End
  1. If the initial request contains a partNumber, we currently send it as a default request since we have not yet implemented downloading a single part with splitting.
  2. If there was a range header without a startRange, we perform a HeadObject to determine the startRange first; otherwise, we execute a GetObject from the startRange.
  3. If checksum validation is enabled, we validate the checksum for part aligned ranged gets and also have a workaround to validate checksum if the file was uploaded as a single part but is now being downloaded as multipart. This is done using HeadObject or GetObject with partNumber=1.
  4. If the size_hint indicates a small file, we execute GetObject with partNumber=1 to avoid the HeadObject for small files. If the actual part size exceeds our part_size, we cancel the request upon receiving the headers and retrieve the first part again as part of parallelized ranged gets. Otherwise, we perform a HeadObject to discover both the object size and checksum if available.
  5. When we do the first GetObject with range request to discover object size, it can fail if the file is empty. Now that we know the file is empty, we send another GetObject with partNumber=1 request to provide successful response headers to the user. It will succeed if the file is empty, or it will proceed to download the file.
  6. We perform multiple GetObject with range requests in parallel to download the remaining portion of the file once its total size is known.
If the initial request contains a partNumber, we currently send it as a default request since we have not yet implemented d...
Contains
partNumber?
Contains...
1
1
EmptyFile
Error?
EmptyFile...
5
5
Yes
Yes
Legend
Request to S3
Request to S3
Comment
Comment
No
No
Yes
Yes
No
No
Yes
Yes
GetObject with range 0-partSize
GetObject with range...
Test tooltip Test tooltip
Contains
 Range
Header?
Contains...
No
No
Test tooltip Test tooltip
EmptyFile?
EmptyFile?
5
5
GetObject from startRange
GetObject from start...
size_hint_available &&
size_hint<=part_size
size_hint_available &&...
4
4
Validate Checksum?
Validate Checksu...
3
3
Contains
Start Range?
Contains...
2
2
Text is not SVG - cannot display
aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/docs/memory_aware_request_execution.md000066400000000000000000000105001456575232400273450ustar00rootroot00000000000000CRT S3 client was designed with throughput as a primary goal. As such, the client scales resource usage, such as number of parallel requests in flight, to achieve target throughput. The client creates buffers to hold data it is sending or receiving for each request and scaling requests in flight has direct impact on memory used. In practice, setting high target throughput or larger part size can lead to high observed memory usage. To mitigate high memory usages, memory reuse improvements were added to the client along with options to limit max memory used. The following sections will go into more detail on aspects of those changes and how the affect the client. ### Memory Reuse At the basic level, CRT S3 client starts with a meta request for operation like put or get, breaks it into smaller part-sized requests and executes those in parallel. CRT S3 client used to allocate part sized buffer for each of those requests and release it right after the request was done. That approach, resulted in a lot of very short lived allocations and allocator thrashing, overall leading to memory use spikes considerably higher than whats needed. To address that, the client is switching to a pooled buffer approach, discussed below. Note: approach described below is work in progress and concentrates on improving the common cases (default 8mb part sizes and part sizes smaller than 64mb). Several observations about the client usage of buffers: - Client does not automatically switch to buffers above default 8mb for upload, until upload passes 10,000 parts (~80 GB). - Get operations always use either the configured part size or default of 8mb. Part size for get is not adjusted, since there is no 10,000 part limitation. - Both Put and Get operations go through fill and drain phases. Ex. for Put, the client first schedules a number of reads to 'fill' the buffers from the source and as those reads complete, the buffer are send over to the networking layer are 'drained' - individual uploadParts or ranged gets operations typically have a similar lifespan (with some caveats). in practice part buffers are acquired/released in bulk at the same time The buffer pooling takes advantage of some of those allocation patterns and works as follows. The memory is split into primary and secondary areas. Secondary area is used for requests with part size bigger than a predefined value (currently 4 times part size) allocations from it got directly to allocator and are effectively old way of doing things. Primary memory area is split into blocks of fixed size (part size if defined or 8mb if not times 16). Blocks are allocated on demand. Each block is logically subdivided into part sized chunks. Pool allocates and releases in chunk sizes only, and supports acquiring several chunks (up to 4) at once. Blocks are kept around while there are ongoing requests and are released async, when there is low pressure on memory. ### Scheduling Running out of memory is a terminal condition within CRT and in general its not practical to try to set overall memory limit on all allocations, since it dramatically increases the complexity of the code that deals with cases where only part of a memory was allocated for a task. Comparatively, majority of memory usage within S3 Client comes from buffers allocated for Put/Get parts. So to control memory usage, the client will concentrate on controlling the number of buffers allocated. Effectively, this boils down to a back pressure mechanism of limiting the number of parts scheduled as memory gets closer to the limit. Memory used for other resources, ex. http connections data, various supporting structures, are not actively controlled and instead some memory is taken out from overall limit. Overall, scheduling does a best-effort memory limiting. At the time of scheduling, the client reserves memory by using buffer pool ticketing mechanism. Buffer is acquired from the pool using the ticket as close to the usage as possible (this approach peaks at lower mem usage than preallocating all mem upfront because buffers cannot be used right away, ex reading from file will fill buffers slower than they are sent, leading to decent amount of buffer reuse) Reservation mechanism is approximate and in some cases can lead to actual memory usage being higher once tickets are redeemed. The client reserves some memory to mitigate overflows like that. aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/format-check.sh000077500000000000000000000007731456575232400224660ustar00rootroot00000000000000#!/bin/bash if [[ -z $CLANG_FORMAT ]] ; then CLANG_FORMAT=clang-format fi if NOT type $CLANG_FORMAT 2> /dev/null ; then echo "No appropriate clang-format found." exit 1 fi FAIL=0 SOURCE_FILES=`find source include tests samples -type f \( -name '*.h' -o -name '*.c' \)` for i in $SOURCE_FILES do $CLANG_FORMAT -output-replacements-xml $i | grep -c " /dev/null if [ $? -ne 1 ] then echo "$i failed clang-format check." FAIL=1 fi done exit $FAIL aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/include/000077500000000000000000000000001456575232400212005ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/include/aws/000077500000000000000000000000001456575232400217725ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/include/aws/s3/000077500000000000000000000000001456575232400223175ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/include/aws/s3/exports.h000066400000000000000000000015751456575232400242040ustar00rootroot00000000000000#ifndef AWS_S3_EXPORTS_H #define AWS_S3_EXPORTS_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #if defined(USE_WINDOWS_DLL_SEMANTICS) || defined(WIN32) # ifdef AWS_S3_USE_IMPORT_EXPORT # ifdef AWS_S3_EXPORTS # define AWS_S3_API __declspec(dllexport) # else # define AWS_S3_API __declspec(dllimport) # endif /* AWS_S3_EXPORTS */ # else # define AWS_S3_API # endif /*USE_IMPORT_EXPORT */ #else # if ((__GNUC__ >= 4) || defined(__clang__)) && defined(AWS_S3_USE_IMPORT_EXPORT) && defined(AWS_S3_EXPORTS) # define AWS_S3_API __attribute__((visibility("default"))) # else # define AWS_S3_API # endif /* __GNUC__ >= 4 || defined(__clang__) */ #endif /* defined(USE_WINDOWS_DLL_SEMANTICS) || defined(WIN32) */ #endif /* AWS_S3_EXPORTS_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/include/aws/s3/private/000077500000000000000000000000001456575232400237715ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/include/aws/s3/private/s3_auto_ranged_get.h000066400000000000000000000054051456575232400277020ustar00rootroot00000000000000#ifndef AWS_S3_AUTO_RANGED_GET_H #define AWS_S3_AUTO_RANGED_GET_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "aws/s3/private/s3_meta_request_impl.h" enum aws_s3_auto_ranged_get_request_type { AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_HEAD_OBJECT, AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_GET_OBJECT_WITH_RANGE, AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_GET_OBJECT_WITH_PART_NUMBER_1, }; struct aws_s3_auto_ranged_get { struct aws_s3_meta_request base; enum aws_s3_checksum_algorithm validation_algorithm; struct aws_string *etag; bool initial_message_has_start_range; bool initial_message_has_end_range; uint64_t initial_range_start; uint64_t initial_range_end; uint64_t object_size_hint; bool object_size_hint_available; /* Members to only be used when the mutex in the base type is locked. */ struct { /* The starting byte of the data that we will be retrieved from the object. * (ignore this if object_range_empty) */ uint64_t object_range_start; /* The last byte of the data that will be retrieved from the object. * (ignore this if object_range_empty) * Note this is inclusive: https://developer.mozilla.org/en-US/docs/Web/HTTP/Range_requests * So if begin=0 and end=0 then 1 byte is being downloaded. */ uint64_t object_range_end; uint64_t first_part_size; /* The total number of parts that are being used in downloading the object range. Note that "part" here * currently refers to a range-get, and does not require a "part" on the service side. */ uint32_t total_num_parts; uint32_t num_parts_requested; uint32_t num_parts_completed; uint32_t num_parts_successful; uint32_t num_parts_failed; uint32_t num_parts_checksum_validated; uint32_t object_range_known : 1; /* True if object_range_known, and it's found to be empty. * If this is true, ignore object_range_start and object_range_end */ uint32_t object_range_empty : 1; uint32_t head_object_sent : 1; uint32_t head_object_completed : 1; uint32_t read_window_warning_issued : 1; } synced_data; uint32_t initial_message_has_range_header : 1; uint32_t initial_message_has_if_match_header : 1; }; AWS_EXTERN_C_BEGIN /* Creates a new auto-ranged get meta request. This will do multiple parallel ranged-gets when appropriate. */ AWS_S3_API struct aws_s3_meta_request *aws_s3_meta_request_auto_ranged_get_new( struct aws_allocator *allocator, struct aws_s3_client *client, size_t part_size, const struct aws_s3_meta_request_options *options); AWS_EXTERN_C_END #endif /* AWS_S3_AUTO_RANGED_GET_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/include/aws/s3/private/s3_auto_ranged_put.h000066400000000000000000000111711456575232400277300ustar00rootroot00000000000000#ifndef AWS_S3_AUTO_RANGED_PUT_H #define AWS_S3_AUTO_RANGED_PUT_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "aws/s3/private/s3_meta_request_impl.h" #include "s3_paginator.h" enum aws_s3_auto_ranged_put_request_tag { AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_LIST_PARTS, AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_CREATE_MULTIPART_UPLOAD, AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_PART, AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_ABORT_MULTIPART_UPLOAD, AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_COMPLETE_MULTIPART_UPLOAD, AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_MAX, }; struct aws_s3_auto_ranged_put { struct aws_s3_meta_request base; /* Initialized either during creation in resume flow or as result of create multipart upload during normal flow. */ struct aws_string *upload_id; /* Resume token used to resume the operation */ struct aws_s3_meta_request_resume_token *resume_token; uint64_t content_length; bool has_content_length; /* * total_num_parts_from_content_length is calculated by content_length / part_size. * It will be 0 if there is no content_length. */ uint32_t total_num_parts_from_content_length; /* Only meant for use in the update function, which is never called concurrently. */ struct { /* * Next part number to send. * Note: this follows s3 part number convention and counting starts with 1. * Throughout codebase 0 based part numbers are usually referred to as part index. */ uint32_t next_part_number; } threaded_update_data; /* Members to only be used when the mutex in the base type is locked. */ struct { /* Array list of `struct aws_s3_mpu_part_info *` * Info about each part, that we need to remember for CompleteMultipartUpload. * This is updated as we upload each part. * If resuming an upload, we first call ListParts and store the details * of previously uploaded parts here. In this case, the array may start with gaps * (e.g. if parts 1 and 3 were previously uploaded, but not part 2). */ struct aws_array_list part_list; struct aws_s3_paginated_operation *list_parts_operation; struct aws_string *list_parts_continuation_token; /* Number of parts we've started work on */ uint32_t num_parts_started; /* Number of parts we've started, and we have no more work to do */ uint32_t num_parts_completed; uint32_t num_parts_successful; uint32_t num_parts_failed; /* When content length is not known, requests are optimistically * scheduled, below represents how many requests were scheduled and had no * work to do*/ uint32_t num_parts_noop; /* Number of parts we've started, but they're not done reading from stream yet. * Though reads are serial (only 1 part can be reading from stream at a time) * we may queue up more to minimize delays between each read. */ uint32_t num_parts_pending_read; struct aws_http_headers *needed_response_headers; /* Whether body stream is exhausted. */ bool is_body_stream_at_end; int list_parts_error_code; int create_multipart_upload_error_code; int complete_multipart_upload_error_code; int abort_multipart_upload_error_code; struct { /* Mark a single ListParts request has started or not */ uint32_t started : 1; /* Mark ListParts need to continue or not */ uint32_t continues : 1; /* Mark ListParts has completed all the pages or not */ uint32_t completed : 1; } list_parts_state; uint32_t create_multipart_upload_sent : 1; uint32_t create_multipart_upload_completed : 1; uint32_t complete_multipart_upload_sent : 1; uint32_t complete_multipart_upload_completed : 1; uint32_t abort_multipart_upload_sent : 1; uint32_t abort_multipart_upload_completed : 1; } synced_data; }; AWS_EXTERN_C_BEGIN /* Creates a new auto-ranged put meta request. * This will do a multipart upload in parallel when appropriate. * Note: if has_content_length is false, content_length and num_parts are ignored. */ AWS_S3_API struct aws_s3_meta_request *aws_s3_meta_request_auto_ranged_put_new( struct aws_allocator *allocator, struct aws_s3_client *client, size_t part_size, bool has_content_length, uint64_t content_length, uint32_t num_parts, const struct aws_s3_meta_request_options *options); AWS_EXTERN_C_END #endif /* AWS_S3_AUTO_RANGED_PUT_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/include/aws/s3/private/s3_buffer_pool.h000066400000000000000000000116631456575232400270600ustar00rootroot00000000000000#ifndef AWS_S3_BUFFER_ALLOCATOR_H #define AWS_S3_BUFFER_ALLOCATOR_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include /* * S3 buffer pool. * Buffer pool used for pooling part sized buffers for Put/Get operations. * Provides additional functionally for limiting overall memory used. * High-level buffer pool usage flow: * - Create buffer with overall memory limit and common buffer size, aka chunk * size (typically part size configured on client) * - For each request: * -- call reserve to acquire ticket for future buffer acquisition. this will * mark memory reserved, but would not allocate it. if reserve call hits * memory limit, it fails and reservation hold is put on the whole buffer * pool. (aws_s3_buffer_pool_remove_reservation_hold can be used to remove * reservation hold). * -- once request needs memory, it can exchange ticket for a buffer using * aws_s3_buffer_pool_acquire_buffer. this operation never fails, even if it * ends up going over memory limit. * -- buffer lifetime is tied to the ticket. so once request is done with the * buffer, ticket is released and buffer returns back to the pool. */ AWS_EXTERN_C_BEGIN struct aws_s3_buffer_pool; struct aws_s3_buffer_pool_ticket; struct aws_s3_buffer_pool_usage_stats { /* Effective Max memory limit. Memory limit value provided during construction minus * buffer reserved for overhead of the pool */ size_t mem_limit; /* Max size of buffer to be allocated from primary. */ size_t primary_cutoff; /* How much mem is used in primary storage. includes memory used by blocks * that are waiting on all allocs to release before being put back in circulation. */ size_t primary_used; /* Overall memory allocated for blocks. */ size_t primary_allocated; /* Reserved memory. Does not account for how that memory will map into * blocks and in practice can be lower than used memory. */ size_t primary_reserved; /* Number of blocks allocated in primary. */ size_t primary_num_blocks; /* Secondary mem used. Accurate, maps directly to base allocator. */ size_t secondary_used; /* Secondary mem reserved. Accurate, maps directly to base allocator. */ size_t secondary_reserved; }; /* * Create new buffer pool. * chunk_size - specifies the size of memory that will most commonly be acquired * from the pool (typically part size). * mem_limit - limit on how much mem buffer pool can use. once limit is hit, * buffers can no longer be reserved from (reservation hold is placed on the pool). * Returns buffer pool pointer on success and NULL on failure. */ AWS_S3_API struct aws_s3_buffer_pool *aws_s3_buffer_pool_new( struct aws_allocator *allocator, size_t chunk_size, size_t mem_limit); /* * Destroys buffer pool. * Does nothing if buffer_pool is NULL. */ AWS_S3_API void aws_s3_buffer_pool_destroy(struct aws_s3_buffer_pool *buffer_pool); /* * Reserves memory from the pool for later use. * Best effort and can potentially reserve memory slightly over the limit. * Reservation takes some memory out of the available pool, but does not * allocate it right away. * On success ticket will be returned. * On failure NULL is returned, error is raised and reservation hold is placed * on the buffer. Any further reservations while hold is active will fail. * Remove reservation hold to unblock reservations. */ AWS_S3_API struct aws_s3_buffer_pool_ticket *aws_s3_buffer_pool_reserve( struct aws_s3_buffer_pool *buffer_pool, size_t size); /* * Whether pool has a reservation hold. */ AWS_S3_API bool aws_s3_buffer_pool_has_reservation_hold(struct aws_s3_buffer_pool *buffer_pool); /* * Remove reservation hold on pool. */ AWS_S3_API void aws_s3_buffer_pool_remove_reservation_hold(struct aws_s3_buffer_pool *buffer_pool); /* * Trades in the ticket for a buffer. * Cannot fail and can over allocate above mem limit if reservation was not accurate. * Using the same ticket twice will return the same buffer. * Buffer is only valid until the ticket is released. */ AWS_S3_API struct aws_byte_buf aws_s3_buffer_pool_acquire_buffer( struct aws_s3_buffer_pool *buffer_pool, struct aws_s3_buffer_pool_ticket *ticket); /* * Releases the ticket. * Any buffers associated with the ticket are invalidated. */ AWS_S3_API void aws_s3_buffer_pool_release_ticket( struct aws_s3_buffer_pool *buffer_pool, struct aws_s3_buffer_pool_ticket *ticket); /* * Get pool memory usage stats. */ AWS_S3_API struct aws_s3_buffer_pool_usage_stats aws_s3_buffer_pool_get_usage(struct aws_s3_buffer_pool *buffer_pool); /* * Trims all unused mem from the pool. * Warning: fairly slow operation, do not use in critical path. * TODO: partial trimming? ex. only trim down to 50% of max? */ AWS_S3_API void aws_s3_buffer_pool_trim(struct aws_s3_buffer_pool *buffer_pool); AWS_EXTERN_C_END #endif /* AWS_S3_BUFFER_ALLOCATOR_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/include/aws/s3/private/s3_checksums.h000066400000000000000000000132211456575232400265330ustar00rootroot00000000000000#ifndef AWS_S3_CHECKSUMS_H #define AWS_S3_CHECKSUMS_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "aws/s3/s3_client.h" /* TODO: consider moving the aws_checksum_stream to aws-c-checksum, and the rest about checksum headers and trailer to * aws-c-sdkutil. */ struct aws_s3_checksum; struct aws_checksum_vtable { void (*destroy)(struct aws_s3_checksum *checksum); int (*update)(struct aws_s3_checksum *checksum, const struct aws_byte_cursor *buf); int (*finalize)(struct aws_s3_checksum *checksum, struct aws_byte_buf *out, size_t truncate_to); }; struct aws_s3_checksum { struct aws_allocator *allocator; struct aws_checksum_vtable *vtable; void *impl; size_t digest_size; enum aws_s3_checksum_algorithm algorithm; bool good; }; struct checksum_config { enum aws_s3_checksum_location location; enum aws_s3_checksum_algorithm checksum_algorithm; bool validate_response_checksum; struct { bool crc32c; bool crc32; bool sha1; bool sha256; } response_checksum_algorithms; }; /** * a stream that takes in a stream, computes a running checksum as it is read, and outputs the checksum when the stream * is destroyed. * Note: seek this stream will immediately fail, as it would prevent an accurate calculation of the * checksum. * * @param allocator * @param existing_stream The real content to read from. Destroying the checksum stream destroys the existing stream. * outputs the checksum of existing stream to checksum_output upon destruction. Will be kept * alive by the checksum stream * @param algorithm Checksum algorithm to use. * @param checksum_output Checksum of the `existing_stream`, owned by caller, which will be calculated when this stream * is destroyed. */ AWS_S3_API struct aws_input_stream *aws_checksum_stream_new( struct aws_allocator *allocator, struct aws_input_stream *existing_stream, enum aws_s3_checksum_algorithm algorithm, struct aws_byte_buf *checksum_output); /** * TODO: properly support chunked encoding. * * A stream that takes in a stream, encodes it to aws_chunked. Computes a running checksum as it is read and add the * checksum as trailer at the end of the stream. All of the added bytes will be counted to the length of the stream. * Note: seek this stream will immediately fail, as it would prevent an accurate calculation of the * checksum. * * @param allocator * @param existing_stream The data to be chunkified prepended by information on the stream length followed by a final * chunk and a trailing chunk containing a checksum of the existing stream. Destroying the * chunk stream will destroy the existing stream. * @param checksum_output Optional argument, if provided the buffer will be initialized to the appropriate size and * filled with the checksum result when calculated. Callers responsibility to cleanup. */ AWS_S3_API struct aws_input_stream *aws_chunk_stream_new( struct aws_allocator *allocator, struct aws_input_stream *existing_stream, enum aws_s3_checksum_algorithm algorithm, struct aws_byte_buf *checksum_output); /** * Get the size of the checksum output corresponding to the aws_s3_checksum_algorithm enum value. */ AWS_S3_API size_t aws_get_digest_size_from_algorithm(enum aws_s3_checksum_algorithm algorithm); /** * Get the header name corresponding to the aws_s3_checksum_algorithm enum value. */ AWS_S3_API const struct aws_byte_cursor *aws_get_http_header_name_from_algorithm(enum aws_s3_checksum_algorithm algorithm); /** * Get the multipart upload header name corresponding to the aws_s3_checksum_algorithm enum value. */ AWS_S3_API const struct aws_byte_cursor *aws_get_create_mpu_header_name_from_algorithm(enum aws_s3_checksum_algorithm algorithm); /** * Get the complete multipart upload name corresponding to the aws_s3_checksum_algorithm enum value. */ AWS_S3_API const struct aws_byte_cursor *aws_get_complete_mpu_name_from_algorithm(enum aws_s3_checksum_algorithm algorithm); /** * create a new aws_checksum corresponding to the aws_s3_checksum_algorithm enum value. */ AWS_S3_API struct aws_s3_checksum *aws_checksum_new(struct aws_allocator *allocator, enum aws_s3_checksum_algorithm algorithm); /** * Compute an aws_checksum corresponding to the provided enum, passing a function pointer around instead of using a * conditional would be faster, but would be a negligible improvement compared to the cost of processing data twice * which would be the only time this function would be used, and would be harder to follow. */ AWS_S3_API int aws_checksum_compute( struct aws_allocator *allocator, enum aws_s3_checksum_algorithm algorithm, const struct aws_byte_cursor *input, struct aws_byte_buf *output, size_t truncate_to); /** * Cleans up and deallocates checksum. */ AWS_S3_API void aws_checksum_destroy(struct aws_s3_checksum *checksum); /** * Updates the running checksum with to_checksum. this can be called multiple times. */ AWS_S3_API int aws_checksum_update(struct aws_s3_checksum *checksum, const struct aws_byte_cursor *to_checksum); /** * Completes the checksum computation and writes the final digest to output. * Allocation of output is the caller's responsibility. */ AWS_S3_API int aws_checksum_finalize(struct aws_s3_checksum *checksum, struct aws_byte_buf *output, size_t truncate_to); AWS_S3_API void checksum_config_init(struct checksum_config *internal_config, const struct aws_s3_checksum_config *config); #endif /* AWS_S3_CHECKSUMS_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/include/aws/s3/private/s3_client_impl.h000066400000000000000000000430741456575232400270560ustar00rootroot00000000000000#ifndef AWS_S3_CLIENT_IMPL_H #define AWS_S3_CLIENT_IMPL_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "aws/s3/s3_client.h" #include #include #include #include #include #include #include #include #include #include /* TODO automate this value in the future to prevent it from becoming out-of-sync. */ #define AWS_S3_CLIENT_VERSION "0.1.x" struct aws_http_connection; struct aws_http_connection_manager; struct aws_host_resolver; struct aws_s3_endpoint; enum aws_s3_connection_finish_code { AWS_S3_CONNECTION_FINISH_CODE_SUCCESS, AWS_S3_CONNECTION_FINISH_CODE_FAILED, AWS_S3_CONNECTION_FINISH_CODE_RETRY, }; /* Callback for the owner of the endpoint when the endpoint has completely cleaned up. */ typedef void(aws_s3_endpoint_shutdown_fn)(void *user_data); struct aws_s3_endpoint_options { /* URL of the host that this endpoint refers to. */ struct aws_string *host_name; /* Callback for when this endpoint completely shuts down. */ aws_s3_endpoint_shutdown_fn *shutdown_callback; /* Bootstrap of the client to be used for spawning a connection manager. */ struct aws_client_bootstrap *client_bootstrap; /* TLS connection options to be used for the connection manager. */ const struct aws_tls_connection_options *tls_connection_options; /* DNS TTL to use for addresses for this endpoint. */ size_t dns_host_address_ttl_seconds; /* Client that owns this endpoint */ struct aws_s3_client *client; /* Maximum number of connections that can be spawned for this endpoint. */ uint32_t max_connections; /* HTTP port override. If zero, determine port based on TLS context */ uint32_t port; /** * Optional. * Proxy configuration for http connection. */ struct aws_http_proxy_config *proxy_config; /** * Optional. * Configuration for fetching proxy configuration from environment. * By Default proxy_ev_settings.aws_http_proxy_env_var_type is set to AWS_HPEV_ENABLE which means read proxy * configuration from environment. * Only works when proxy_config is not set. If both are set, configuration from proxy_config is used. */ struct proxy_env_var_settings *proxy_ev_settings; /** * Optional. * If set to 0, default value is used. */ uint32_t connect_timeout_ms; /** * Optional. * Set keepalive to periodically transmit messages for detecting a disconnected peer. */ struct aws_s3_tcp_keep_alive_options *tcp_keep_alive_options; /** * Optional. * Configuration options for connection monitoring. * If the transfer speed falls below the specified minimum_throughput_bytes_per_second, the operation is aborted. */ struct aws_http_connection_monitoring_options *monitoring_options; }; /* global vtable, only used when mocking for tests */ struct aws_s3_endpoint_system_vtable { void (*acquire)(struct aws_s3_endpoint *endpoint, bool already_holding_lock); void (*release)(struct aws_s3_endpoint *endpoint); }; struct aws_s3_endpoint { struct { /* This is NOT an atomic ref-count. * The endpoint lives in hashtable: `aws_s3_client.synced_data.endpoints` * This ref-count can only be touched while holding client's lock */ size_t ref_count; } client_synced_data; /* What allocator was used to create this endpoint. */ struct aws_allocator *allocator; /* URL of the host that this endpoint refers to. */ struct aws_string *host_name; /* Connection manager that manages all connections to this endpoint. */ struct aws_http_connection_manager *http_connection_manager; /* Client that owns this endpoint */ struct aws_s3_client *client; }; /* Represents one connection on a particular VIP. */ struct aws_s3_connection { /* Endpoint that this connection is connected to. */ struct aws_s3_endpoint *endpoint; /* The underlying, currently in-use HTTP connection. */ struct aws_http_connection *http_connection; /* Request currently being processed on this connection. */ struct aws_s3_request *request; /* Current retry token for the request. If it has never been retried, this will be NULL. */ struct aws_retry_token *retry_token; }; struct aws_s3_client_vtable { struct aws_s3_meta_request *( *meta_request_factory)(struct aws_s3_client *client, const struct aws_s3_meta_request_options *options); void (*create_connection_for_request)(struct aws_s3_client *client, struct aws_s3_request *request); void (*acquire_http_connection)( struct aws_http_connection_manager *conn_manager, aws_http_connection_manager_on_connection_setup_fn *on_connection_acquired_callback, void *user_data); size_t (*get_host_address_count)( struct aws_host_resolver *host_resolver, const struct aws_string *host_name, uint32_t flags); void (*schedule_process_work_synced)(struct aws_s3_client *client); void (*process_work)(struct aws_s3_client *client); void (*endpoint_shutdown_callback)(struct aws_s3_client *client); void (*finish_destroy)(struct aws_s3_client *client); struct aws_parallel_input_stream *( *parallel_input_stream_new_from_file)(struct aws_allocator *allocator, struct aws_byte_cursor file_name); }; struct aws_s3_upload_part_timeout_stats { bool stop_timeout; /* Total number of successful upload requests */ uint64_t num_successful_upload_requests; /* Stats for the request time of first 10 succeed requests */ struct { uint64_t sum_ns; uint64_t num_samples; } initial_request_time; /* Track the timeout rate. */ struct { uint64_t num_completed; uint64_t num_failed; } timeout_rate_tracking; /* Stats for the response to first byte time of tracked succeed requests */ struct { uint64_t sum_ns; uint64_t num_samples; } response_to_first_byte_time; }; /* Represents the state of the S3 client. */ struct aws_s3_client { struct aws_allocator *allocator; struct aws_s3_buffer_pool *buffer_pool; struct aws_s3_client_vtable *vtable; struct aws_ref_count ref_count; /* Client bootstrap for setting up connection managers. */ struct aws_client_bootstrap *client_bootstrap; /* Event loop on the client bootstrap ELG for processing work/dispatching requests. */ struct aws_event_loop *process_work_event_loop; /* Event loop group for streaming request bodies back to the user. */ struct aws_event_loop_group *body_streaming_elg; /* Region of the S3 bucket. */ struct aws_string *region; /* Size of parts for files when doing gets or puts. This exists on the client as configurable option that is passed * to meta requests for use. */ const size_t part_size; /* Size of parts for files when doing gets or puts. This exists on the client as configurable option that is passed * to meta requests for use. */ const uint64_t max_part_size; /* The size threshold in bytes for when to use multipart uploads for a AWS_S3_META_REQUEST_TYPE_PUT_OBJECT meta * request. Uploads over this size will automatically use a multipart upload strategy, while uploads smaller or * equal to this threshold will use a single request to upload the whole object. If not set, `part_size` will be * used as threshold. */ const uint64_t multipart_upload_threshold; /* TLS Options to be used for each connection. */ struct aws_tls_connection_options *tls_connection_options; /* Cached signing config. Can be NULL if no signing config was specified. */ struct aws_cached_signing_config_aws *cached_signing_config; /* The auth provider for S3 Express. */ aws_s3express_provider_factory_fn *s3express_provider_factory; void *factory_user_data; struct aws_s3express_credentials_provider *s3express_provider; /* Throughput target in Gbps that we are trying to reach. */ const double throughput_target_gbps; /* The calculated ideal number of VIP's based on throughput target and throughput per vip. */ const uint32_t ideal_vip_count; /** * For multi-part upload, content-md5 will be calculated if the AWS_MR_CONTENT_MD5_ENABLED is specified * or initial request has content-md5 header. * For single-part upload, if the content-md5 header is specified, it will remain unchanged. If the header is not * specified, and this is set to AWS_MR_CONTENT_MD5_ENABLED, it will be calculated. */ const enum aws_s3_meta_request_compute_content_md5 compute_content_md5; /* Hard limit on max connections set through the client config. */ const uint32_t max_active_connections_override; struct aws_atomic_var max_allowed_connections; /* Retry strategy used for scheduling request retries. */ struct aws_retry_strategy *retry_strategy; /** * Optional. * Proxy configuration for http connection. */ struct aws_http_proxy_config *proxy_config; /** * Optional. * Configuration for fetching proxy configuration from environment. * By Default proxy_ev_settings.aws_http_proxy_env_var_type is set to AWS_HPEV_ENABLE which means read proxy * configuration from environment. * Only works when proxy_config is not set. If both are set, configuration from proxy_config is used. */ struct proxy_env_var_settings *proxy_ev_settings; /** * Optional. * If set to 0, default value is used. */ uint32_t connect_timeout_ms; /** * Optional. * Set keepalive to periodically transmit messages for detecting a disconnected peer. */ struct aws_s3_tcp_keep_alive_options *tcp_keep_alive_options; /** * Configuration options for connection monitoring. * If the transfer speed falls below the specified minimum_throughput_bytes_per_second, the operation is aborted. * If user passes in NULL, default values are used. */ struct aws_http_connection_monitoring_options monitoring_options; /* tls options from proxy environment settings. */ struct aws_tls_connection_options *proxy_ev_tls_options; /* Shutdown callbacks to notify when the client is completely cleaned up. */ aws_s3_client_shutdown_complete_callback_fn *shutdown_callback; void *shutdown_callback_user_data; /* Whether read backpressure (aka flow-control window) is being applied. */ const bool enable_read_backpressure; /* The starting size of each meta request's flow-control window, in bytes. * Ignored unless `enable_read_backpressure` is true. */ const size_t initial_read_window; /** * Timeout in ms for upload request for request after sending to the response first byte received. */ struct aws_atomic_var upload_timeout_ms; struct { /* Number of overall requests currently being processed by the client. */ struct aws_atomic_var num_requests_in_flight; /* Number of requests being sent/received over network. */ struct aws_atomic_var num_requests_network_io[AWS_S3_META_REQUEST_TYPE_MAX]; /* Number of requests sitting in their meta request priority queue, waiting to be streamed. */ struct aws_atomic_var num_requests_stream_queued_waiting; /* Number of requests currently scheduled to be streamed the response body or are actively being streamed. */ struct aws_atomic_var num_requests_streaming_response; } stats; struct { struct aws_mutex lock; /* Hash table of endpoints that are in-use by the client. * Key: aws_string of endpoint hostname. Value: aws_s3_endpoint */ struct aws_hash_table endpoints; /* How many requests failed to be prepared. */ uint32_t num_failed_prepare_requests; /* Meta requests that need added in the work event loop. * List contains aws_s3_meta_request_work */ struct aws_linked_list pending_meta_request_work; /* aws_s3_request that are prepared and ready to be put in the threaded_data request queue. */ struct aws_linked_list prepared_requests; /* Task for processing requests from meta requests on connections. */ struct aws_task process_work_task; /* Task for trimming buffer bool. */ struct aws_task trim_buffer_pool_task; /* Number of endpoints currently allocated. Used during clean up to know how many endpoints are still in * memory.*/ uint32_t num_endpoints_allocated; /* Whether or not the client has started cleaning up all of its resources */ uint32_t active : 1; /* True if the start_destroy function is still executing, which blocks shutdown from completing. */ uint32_t start_destroy_executing : 1; /* Whether or not work processing is currently scheduled. */ uint32_t process_work_task_scheduled : 1; /* Whether or not work process is currently in progress. */ uint32_t process_work_task_in_progress : 1; /* Whether or not the body streaming ELG is allocated. If the body streaming ELG is NULL, but this is true, the * shutdown callback has not yet been called.*/ uint32_t body_streaming_elg_allocated : 1; /* Whether or not a S3 Express provider is active with the client.*/ uint32_t s3express_provider_active : 1; /* True if client has been flagged to finish destroying itself. Used to catch double-destroy bugs.*/ uint32_t finish_destroy : 1; struct aws_s3_upload_part_timeout_stats upload_part_stats; } synced_data; struct { /* Queue of prepared aws_s3_request that are waiting to be assigned to connections. */ struct aws_linked_list request_queue; /* Client list of ongoing aws_s3_meta_requests. */ struct aws_linked_list meta_requests; /* Number of requests in the request_queue linked_list. */ uint32_t request_queue_size; /* Number of requests currently being prepared. */ uint32_t num_requests_being_prepared; /* Whether or not work processing is currently scheduled. */ uint32_t trim_buffer_pool_task_scheduled : 1; } threaded_data; }; struct aws_s3_meta_request_resume_token { struct aws_allocator *allocator; struct aws_ref_count ref_count; enum aws_s3_meta_request_type type; /* Note: since pause currently only supports upload, this structure only has upload specific fields. Extending it to support other types is left as exercise for future. */ struct aws_string *multipart_upload_id; size_t part_size; size_t total_num_parts; /* Note: this field is used only when s3 tells us that upload id no longer exists, and if this indicates that all parts have already been uploaded, request is completed instead of failing it.*/ size_t num_parts_completed; }; void aws_s3_client_notify_connection_finished( struct aws_s3_client *client, struct aws_s3_connection *connection, int error_code, enum aws_s3_connection_finish_code finish_code); AWS_EXTERN_C_BEGIN AWS_S3_API struct aws_s3_meta_request_resume_token *aws_s3_meta_request_resume_token_new(struct aws_allocator *allocator); AWS_S3_API void aws_s3_set_dns_ttl(size_t ttl); AWS_S3_API uint32_t aws_s3_client_get_max_requests_prepare(struct aws_s3_client *client); AWS_S3_API uint32_t aws_s3_client_get_max_active_connections( struct aws_s3_client *client, struct aws_s3_meta_request *meta_request); AWS_S3_API uint32_t aws_s3_client_get_max_requests_in_flight(struct aws_s3_client *client); AWS_S3_API uint32_t aws_s3_client_queue_requests_threaded( struct aws_s3_client *client, struct aws_linked_list *request_list, bool queue_front); AWS_S3_API struct aws_s3_request *aws_s3_client_dequeue_request_threaded(struct aws_s3_client *client); AWS_S3_API void aws_s3_client_schedule_process_work(struct aws_s3_client *client); AWS_S3_API void aws_s3_client_update_meta_requests_threaded(struct aws_s3_client *client); AWS_S3_API void aws_s3_client_update_connections_threaded(struct aws_s3_client *client); AWS_S3_API struct aws_s3_endpoint *aws_s3_endpoint_new( struct aws_allocator *allocator, const struct aws_s3_endpoint_options *options); AWS_S3_API void aws_s3_client_lock_synced_data(struct aws_s3_client *client); AWS_S3_API void aws_s3_client_unlock_synced_data(struct aws_s3_client *client); /* Used for mocking */ AWS_S3_API void aws_s3_endpoint_set_system_vtable(const struct aws_s3_endpoint_system_vtable *vtable); /* Increment the endpoint's ref-count. * If `already_holding_lock` is false, then this call will briefly take hold of the client's lock */ struct aws_s3_endpoint *aws_s3_endpoint_acquire(struct aws_s3_endpoint *endpoint, bool already_holding_lock); /* Decrement the endpoint's ref-count. * You MUST NOT call this while the client's lock is held. * (this call briefly holds the client's lock and may remove the endpoint * from the client's hashtable) */ void aws_s3_endpoint_release(struct aws_s3_endpoint *endpoint); AWS_S3_API extern const uint32_t g_max_num_connections_per_vip; AWS_S3_API extern const uint32_t g_num_conns_per_vip_meta_request_look_up[]; AWS_S3_API extern const size_t g_expect_timeout_offset_ms; AWS_S3_API void aws_s3_client_update_upload_part_timeout( struct aws_s3_client *client, struct aws_s3_request *finished_upload_part_request, int finished_error_code); AWS_EXTERN_C_END #endif /* AWS_S3_CLIENT_IMPL_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/include/aws/s3/private/s3_copy_object.h000066400000000000000000000053151456575232400270530ustar00rootroot00000000000000#ifndef AWS_S3_COPY_OBJECT_H #define AWS_S3_COPY_OBJECT_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "aws/s3/private/s3_meta_request_impl.h" enum aws_s3_copy_object_request_tag { AWS_S3_COPY_OBJECT_REQUEST_TAG_GET_OBJECT_SIZE, AWS_S3_COPY_OBJECT_REQUEST_TAG_BYPASS, AWS_S3_COPY_OBJECT_REQUEST_TAG_CREATE_MULTIPART_UPLOAD, AWS_S3_COPY_OBJECT_REQUEST_TAG_MULTIPART_COPY, AWS_S3_COPY_OBJECT_REQUEST_TAG_ABORT_MULTIPART_UPLOAD, AWS_S3_COPY_OBJECT_REQUEST_TAG_COMPLETE_MULTIPART_UPLOAD, AWS_S3_COPY_OBJECT_REQUEST_TAG_MAX, }; struct aws_s3_copy_object { struct aws_s3_meta_request base; /* Usable after the Create Multipart Upload request succeeds. */ struct aws_string *upload_id; /* Only meant for use in the update function, which is never called concurrently. */ struct { uint32_t next_part_number; } threaded_update_data; /* Members to only be used when the mutex in the base type is locked. */ struct { /* Array-list of `struct aws_s3_mpu_part_info *`. * If copying via multipart upload, we fill in this info as each part gets copied, * and it's used to generate the final CompleteMultipartUpload. */ struct aws_array_list part_list; /* obtained through a HEAD request against the source object */ uint64_t content_length; size_t part_size; uint32_t total_num_parts; uint32_t num_parts_sent; uint32_t num_parts_completed; uint32_t num_parts_successful; uint32_t num_parts_failed; struct aws_http_headers *needed_response_headers; int create_multipart_upload_error_code; int complete_multipart_upload_error_code; int abort_multipart_upload_error_code; uint32_t head_object_sent : 1; uint32_t head_object_completed : 1; uint32_t copy_request_bypass_sent : 1; uint32_t copy_request_bypass_completed : 1; uint32_t create_multipart_upload_sent : 1; uint32_t create_multipart_upload_completed : 1; uint32_t complete_multipart_upload_sent : 1; uint32_t complete_multipart_upload_completed : 1; uint32_t abort_multipart_upload_sent : 1; uint32_t abort_multipart_upload_completed : 1; } synced_data; }; /* Creates a new CopyObject meta request. This will perform either * 1) A CopyObject S3 API call if the source object length is < 1 GB or * 2) a multipart copy in parallel otherwise. */ struct aws_s3_meta_request *aws_s3_meta_request_copy_object_new( struct aws_allocator *allocator, struct aws_s3_client *client, const struct aws_s3_meta_request_options *options); #endif aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/include/aws/s3/private/s3_default_meta_request.h000066400000000000000000000023711456575232400307540ustar00rootroot00000000000000#ifndef AWS_S3_DEFAULT_META_REQUEST_H #define AWS_S3_DEFAULT_META_REQUEST_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "aws/s3/private/s3_meta_request_impl.h" struct aws_s3_client; struct aws_s3_meta_request_default { struct aws_s3_meta_request base; size_t content_length; /* Actual type for the single request (may be AWS_S3_REQUEST_TYPE_UNKNOWN) */ enum aws_s3_request_type request_type; /* S3 operation name for the single request (NULL if unknown) */ struct aws_string *operation_name; /* Members to only be used when the mutex in the base type is locked. */ struct { int cached_response_status; int request_error_code; uint32_t request_sent : 1; uint32_t request_completed : 1; } synced_data; }; /* Creates a new default meta request. This will send the request as is and pass back the response. */ struct aws_s3_meta_request *aws_s3_meta_request_default_new( struct aws_allocator *allocator, struct aws_s3_client *client, enum aws_s3_request_type request_type, uint64_t content_length, bool should_compute_content_md5, const struct aws_s3_meta_request_options *options); #endif aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/include/aws/s3/private/s3_endpoint_resolver.h000066400000000000000000000006061456575232400303120ustar00rootroot00000000000000#ifndef AWS_S3_ENDPOINT_RESOLVER_PRIVATE_H #define AWS_S3_ENDPOINT_RESOLVER_PRIVATE_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ extern const struct aws_byte_cursor aws_s3_endpoint_resolver_partitions; extern const struct aws_byte_cursor aws_s3_endpoint_rule_set; #endif /* AWS_S3_ENDPOINT_RESOLVER_PRIVATE_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/include/aws/s3/private/s3_list_objects.h000066400000000000000000000102661456575232400272400ustar00rootroot00000000000000#ifndef AWS_S3_LIST_OBJECTS_H #define AWS_S3_LIST_OBJECTS_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include /** Struct representing the file system relevant data for an object returned from a ListObjectsV2 API call. */ struct aws_s3_object_info { /** * When a delimiter is specified in the request, S3 groups the common prefixes that contain the delimiter. * This member is set to the prefix substring ending at the first occurrence of the specified delimiter, * analogous to a directory entry of a file system. */ struct aws_byte_cursor prefix; /** * Prefix is not included. This is the object name for use with prefix for a call to GetObject() */ struct aws_byte_cursor key; /** * Size of the object in bytes. */ uint64_t size; /** * Timestamp from S3 on the latest modification, if you have a reliable clock on your machine, you COULD use this * to implement caching. */ struct aws_date_time last_modified; /** * Etag for the object, usually an MD5 hash. you COULD also use this to implement caching. */ struct aws_byte_cursor e_tag; }; /** * Invoked when an object or prefix is encountered during a ListObjectsV2 API call. Return false, to immediately * terminate the list operation. Returning true will continue until at least the current page is iterated. */ typedef int(aws_s3_on_object_fn)(const struct aws_s3_object_info *info, void *user_data); /** * Invoked upon the complete fetch and parsing of a page. If error_code is AWS_OP_SUCCESS and * aws_s3_paginator_has_more_results() returns true, you may want to call, * aws_s3_paginator_continue() from here to fetch the rest of the bucket contents. */ typedef void(aws_s3_on_object_list_finished_fn)(struct aws_s3_paginator *paginator, int error_code, void *user_data); /** * Parameters for calling aws_s3_initiate_list_objects(). All values are copied out or re-seated and reference counted. */ struct aws_s3_list_objects_params { /** * Must not be NULL. The internal call will increment the reference count on client. */ struct aws_s3_client *client; /** * Must not be empty. Name of the bucket to list. */ struct aws_byte_cursor bucket_name; /** * Optional. The prefix to list. By default, this will be the root of the bucket. If you would like to start the * list operation at a prefix (similar to a file system directory), specify that here. */ struct aws_byte_cursor prefix; /** * Optional. The prefix delimiter. By default, this is the '/' character. */ struct aws_byte_cursor delimiter; /** * Optional. The continuation token for fetching the next page for ListBucketV2. You likely shouldn't set this * unless you have a special use case. */ struct aws_byte_cursor continuation_token; /** * Must not be empty. The endpoint for the S3 bucket to hit. Can be virtual or path style. */ struct aws_byte_cursor endpoint; /** * Callback to invoke on each object that's listed. */ aws_s3_on_object_fn *on_object; /** * Callback to invoke when each page of the bucket listing completes. */ aws_s3_on_object_list_finished_fn *on_list_finished; void *user_data; }; AWS_EXTERN_C_BEGIN /** * Initiates a list objects command (without executing it), and returns a paginator object to iterate the bucket with if * successful. * * Returns NULL on failure. Check aws_last_error() for details on the error that occurred. * * this is a reference counted object. It is returned with a reference count of 1. You must call * aws_s3_paginator_release() on this object when you are finished with it. * * This does not start the actual list operation. You need to call aws_s3_paginator_continue() to start * the operation. */ AWS_S3_API struct aws_s3_paginator *aws_s3_initiate_list_objects( struct aws_allocator *allocator, const struct aws_s3_list_objects_params *params); AWS_EXTERN_C_END #endif /* AWS_S3_LIST_OBJECTS_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/include/aws/s3/private/s3_list_parts.h000066400000000000000000000054621456575232400267420ustar00rootroot00000000000000#ifndef AWS_S3_LIST_PARTS_H #define AWS_S3_LIST_PARTS_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include /** Struct representing part info as returned from ListParts call. */ struct aws_s3_part_info { /** * Size of the object in bytes. */ uint64_t size; /** * Part number of the given part. */ uint32_t part_number; /** * Timestamp from S3 on the latest modification, if you have a reliable clock on your machine, you COULD use this * to implement caching. */ struct aws_date_time last_modified; /** * Etag for the object, usually an MD5 hash. you COULD also use this to implement caching. */ struct aws_byte_cursor e_tag; /** * CRC32 checksum for the part. Optional. */ struct aws_byte_cursor checksumCRC32; /** * CRC32C checksum for the part. Optional. */ struct aws_byte_cursor checksumCRC32C; /** * SHA1 checksum for the part. Optional. */ struct aws_byte_cursor checksumSHA1; /** * SHA256 checksum for the part. Optional. */ struct aws_byte_cursor checksumSHA256; }; /** * Invoked when a part is encountered during ListParts call. * Return AWS_OP_ERR (after an error has been raised) to fail the list operation. * Return AWS_OP_SUCCESS to continue until at least the current page is iterated. */ typedef int(aws_s3_on_part_fn)(const struct aws_s3_part_info *info, void *user_data); /** * Parameters for calling aws_s3_list_parts_operation_new(). All values are copied out or re-seated and reference * counted. */ struct aws_s3_list_parts_params { /** * Must not be NULL. The internal call will increment the reference count on client. */ struct aws_s3_client *client; /** * Must not be empty. Name of the bucket to list. */ struct aws_byte_cursor bucket_name; /** * Must not be empty. Key with which multipart upload was initiated. */ struct aws_byte_cursor key; /** * Must not be empty. Id identifying multipart upload. */ struct aws_byte_cursor upload_id; /** * Must not be empty. The endpoint for the S3 bucket to hit. Can be virtual or path style. */ struct aws_byte_cursor endpoint; /** * Callback to invoke on each part that's listed. */ aws_s3_on_part_fn *on_part; /** * Associated user data. */ void *user_data; }; AWS_EXTERN_C_BEGIN AWS_S3_API struct aws_s3_paginated_operation *aws_s3_list_parts_operation_new( struct aws_allocator *allocator, const struct aws_s3_list_parts_params *params); AWS_EXTERN_C_END #endif /* AWS_S3_LIST_PARTS_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/include/aws/s3/private/s3_meta_request_impl.h000066400000000000000000000440261456575232400302740ustar00rootroot00000000000000#ifndef AWS_S3_META_REQUEST_IMPL_H #define AWS_S3_META_REQUEST_IMPL_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include "aws/s3/private/s3_checksums.h" #include "aws/s3/private/s3_client_impl.h" #include "aws/s3/private/s3_request.h" struct aws_s3_client; struct aws_s3_connection; struct aws_s3_meta_request; struct aws_s3_request; struct aws_http_headers; struct aws_http_make_request_options; struct aws_retry_strategy; enum aws_s3_meta_request_state { AWS_S3_META_REQUEST_STATE_ACTIVE, AWS_S3_META_REQUEST_STATE_FINISHED, }; enum aws_s3_meta_request_update_flags { /* The client potentially has multiple meta requests that it can spread across connections, and the given meta request can selectively not return a request if there is a performance reason to do so.*/ AWS_S3_META_REQUEST_UPDATE_FLAG_CONSERVATIVE = 0x00000002, }; typedef void(aws_s3_meta_request_prepare_request_callback_fn)( struct aws_s3_meta_request *meta_request, struct aws_s3_request *request, int error_code, void *user_data); struct aws_s3_prepare_request_payload { struct aws_allocator *allocator; struct aws_s3_request *request; struct aws_task task; /* async step: wait for vtable->prepare_request() call to complete */ struct aws_future_void *asyncstep_prepare_request; /* callback to invoke when all request preparation work is complete */ aws_s3_meta_request_prepare_request_callback_fn *callback; void *user_data; }; /* An event to be delivered on the meta-request's io_event_loop thread. */ struct aws_s3_meta_request_event { enum aws_s3_meta_request_event_type { AWS_S3_META_REQUEST_EVENT_RESPONSE_BODY, /* body_callback */ AWS_S3_META_REQUEST_EVENT_PROGRESS, /* progress_callback */ AWS_S3_META_REQUEST_EVENT_TELEMETRY, /* telemetry_callback */ } type; union { /* data for AWS_S3_META_REQUEST_EVENT_RESPONSE_BODY */ struct { struct aws_s3_request *completed_request; } response_body; /* data for AWS_S3_META_REQUEST_EVENT_PROGRESS */ struct { struct aws_s3_meta_request_progress info; } progress; /* data for AWS_S3_META_REQUEST_EVENT_TELEMETRY */ struct { struct aws_s3_request_metrics *metrics; } telemetry; } u; }; struct aws_s3_meta_request_vtable { /* Update the meta request. out_request is required to be non-null. Returns true if there is any work in * progress, false if there is not. */ bool (*update)(struct aws_s3_meta_request *meta_request, uint32_t flags, struct aws_s3_request **out_request); /* Run vtable->prepare_request() on the meta-request's event loop. * We do this because body streaming is slow, and we don't want it on our networking threads. * The callback may fire on any thread (an async sub-step may run on another thread). */ void (*schedule_prepare_request)( struct aws_s3_meta_request *meta_request, struct aws_s3_request *request, aws_s3_meta_request_prepare_request_callback_fn *callback, void *user_data); /* Given a request, asynchronously prepare it for sending * (creating the correct HTTP message, reading from a stream (if necessary), computing hashes, etc.). * Returns a future, which may complete on any thread (and may complete synchronously). */ struct aws_future_void *(*prepare_request)(struct aws_s3_request *request); void (*init_signing_date_time)(struct aws_s3_meta_request *meta_request, struct aws_date_time *date_time); /* Sign the given request. */ void (*sign_request)( struct aws_s3_meta_request *meta_request, struct aws_s3_request *request, aws_signing_complete_fn *on_signing_complete, void *user_data); /* Called when any sending of the request is finished, including for each retry. */ void (*send_request_finish)(struct aws_s3_connection *connection, struct aws_http_stream *stream, int error_code); /* Called when the request is done being sent, and will not be retried/sent again. */ void (*finished_request)(struct aws_s3_meta_request *meta_request, struct aws_s3_request *request, int error_code); /* Called by the derived meta request when the meta request is completely finished. */ void (*finish)(struct aws_s3_meta_request *meta_request); /* Handle de-allocation of the meta request. */ void (*destroy)(struct aws_s3_meta_request *); /* Pause the given request */ int (*pause)(struct aws_s3_meta_request *meta_request, struct aws_s3_meta_request_resume_token **resume_token); }; /** * This represents one meta request, ie, one accelerated file transfer. One S3 meta request can represent multiple S3 * requests. */ struct aws_s3_meta_request { struct aws_allocator *allocator; struct aws_ref_count ref_count; void *impl; struct aws_s3_meta_request_vtable *vtable; /* Initial HTTP Message that this meta request is based on. */ struct aws_http_message *initial_request_message; /* The meta request's outgoing body comes from one of these: * 1) request_body_async_stream: if set, then async stream 1 part at a time * 2) request_body_parallel_stream: if set, then stream multiple parts in parallel * 3) initial_request_message's body_stream: else synchronously stream parts */ struct aws_async_input_stream *request_body_async_stream; struct aws_parallel_input_stream *request_body_parallel_stream; /* Part size to use for uploads and downloads. Passed down by the creating client. */ const size_t part_size; struct aws_cached_signing_config_aws *cached_signing_config; /* Client that created this meta request which also processes this request. After the meta request is finished, this * reference is removed.*/ struct aws_s3_client *client; struct aws_s3_endpoint *endpoint; /* Event loop to schedule IO work related on, ie, reading from streams, streaming parts back to the caller, etc... * After the meta request is finished, this will be reset along with the client reference.*/ struct aws_event_loop *io_event_loop; /* User data to be passed to each customer specified callback.*/ void *user_data; /* Customer specified callbacks. */ aws_s3_meta_request_headers_callback_fn *headers_callback; aws_s3_meta_request_receive_body_callback_fn *body_callback; aws_s3_meta_request_finish_fn *finish_callback; aws_s3_meta_request_shutdown_fn *shutdown_callback; aws_s3_meta_request_progress_fn *progress_callback; aws_s3_meta_request_telemetry_fn *telemetry_callback; aws_s3_meta_request_upload_review_fn *upload_review_callback; /* Customer specified callbacks to be called by our specialized callback to calculate the response checksum. */ aws_s3_meta_request_headers_callback_fn *headers_user_callback_after_checksum; aws_s3_meta_request_receive_body_callback_fn *body_user_callback_after_checksum; aws_s3_meta_request_finish_fn *finish_user_callback_after_checksum; enum aws_s3_meta_request_type type; struct aws_string *s3express_session_host; struct { struct aws_mutex lock; /* Priority queue for pending streaming requests. We use a priority queue to keep parts in order so that we * can stream them to the caller in order. */ struct aws_priority_queue pending_body_streaming_requests; /* Current state of the meta request. */ enum aws_s3_meta_request_state state; /* The sum of initial_read_window, plus all window_increment() calls. This number never goes down. */ uint64_t read_window_running_total; /* The next expected streaming part number needed to continue streaming part bodies. (For example, this will * initially be 1 for part 1, and after that part is received, it will be 2, then 3, etc.. )*/ uint32_t next_streaming_part; /* Number of parts scheduled for delivery. */ uint32_t num_parts_delivery_sent; /* Total number of parts that have been attempted to be delivered. (Will equal the sum of succeeded and * failed.)*/ uint32_t num_parts_delivery_completed; /* Task for delivering events on the meta-request's io_event_loop thread. * We do this to ensure a meta-request's callbacks are fired sequentially and non-overlapping. * If `event_delivery_array` has items in it, then this task is scheduled. * If `event_delivery_active` is true, then this task is actively running. * Delivery is not 100% complete until `event_delivery_array` is empty AND `event_delivery_active` is false * (use aws_s3_meta_request_are_events_out_for_delivery_synced() to check) */ struct aws_task event_delivery_task; /* Array of `struct aws_s3_meta_request_event` to deliver when the `event_delivery_task` runs. */ struct aws_array_list event_delivery_array; /* When true, events are actively being delivered to the user. */ bool event_delivery_active; /* The end finish result of the meta request. */ struct aws_s3_meta_request_result finish_result; /* True if the finish result has been set. */ uint32_t finish_result_set : 1; /* To track aws_s3_requests with cancellable HTTP streams */ struct aws_linked_list cancellable_http_streams_list; } synced_data; /* Anything in this structure should only ever be accessed by the client on its process work event loop task. */ struct { /* Linked list node for the meta requests linked list in the client. */ /* Note: this needs to be first for using AWS_CONTAINER_OF with the nested structure. */ struct aws_linked_list_node node; /* True if this meta request is currently in the client's list. */ bool scheduled; } client_process_work_threaded_data; /* Anything in this structure should only ever be accessed by the meta-request from its io_event_loop thread. */ struct { /* When delivering events, we swap contents with `synced_data.event_delivery_array`. * This is an optimization, we could have just copied the array when the task runs, * but swapping two array-lists back and forth avoids an allocation. */ struct aws_array_list event_delivery_array; } io_threaded_data; const bool should_compute_content_md5; /* deep copy of the checksum config. */ struct checksum_config checksum_config; /* checksum found in either a default get request, or in the initial head request of a multipart get */ struct aws_byte_buf meta_request_level_response_header_checksum; /* running checksum of all the parts of a default get, or ranged get meta request*/ struct aws_s3_checksum *meta_request_level_running_response_sum; }; /* Info for each part, that we need to remember until we send CompleteMultipartUpload */ struct aws_s3_mpu_part_info { uint64_t size; struct aws_string *etag; struct aws_byte_buf checksum_base64; bool was_previously_uploaded; }; AWS_EXTERN_C_BEGIN /* Initialize the base meta request structure. */ AWS_S3_API int aws_s3_meta_request_init_base( struct aws_allocator *allocator, struct aws_s3_client *client, size_t part_size, bool should_compute_content_md5, const struct aws_s3_meta_request_options *options, void *impl, struct aws_s3_meta_request_vtable *vtable, struct aws_s3_meta_request *base_type); /* Returns true if the meta request is still in the "active" state. */ AWS_S3_API bool aws_s3_meta_request_is_active(struct aws_s3_meta_request *meta_request); /* Returns true if the meta request is in the "finished" state. */ AWS_S3_API bool aws_s3_meta_request_is_finished(struct aws_s3_meta_request *meta_request); /* Returns true if the meta request has a finish result, which indicates that the meta request has trying to finish or * has already finished. */ AWS_S3_API bool aws_s3_meta_request_has_finish_result(struct aws_s3_meta_request *meta_request); AWS_S3_API void aws_s3_meta_request_lock_synced_data(struct aws_s3_meta_request *meta_request); AWS_S3_API void aws_s3_meta_request_unlock_synced_data(struct aws_s3_meta_request *meta_request); /* Called by the client to retrieve the next request and update the meta request's internal state. out_request is * optional, and can be NULL if just desiring to update internal state. */ AWS_S3_API bool aws_s3_meta_request_update( struct aws_s3_meta_request *meta_request, uint32_t flags, struct aws_s3_request **out_request); AWS_S3_API void aws_s3_meta_request_prepare_request( struct aws_s3_meta_request *meta_request, struct aws_s3_request *request, aws_s3_meta_request_prepare_request_callback_fn *callback, void *user_data); AWS_S3_API void aws_s3_meta_request_send_request(struct aws_s3_meta_request *meta_request, struct aws_s3_connection *connection); AWS_S3_API void aws_s3_meta_request_init_signing_date_time_default( struct aws_s3_meta_request *meta_request, struct aws_date_time *date_time); AWS_S3_API void aws_s3_meta_request_sign_request_default( struct aws_s3_meta_request *meta_request, struct aws_s3_request *request, aws_signing_complete_fn *on_signing_complete, void *user_data); /* Default implementation for when a request finishes a particular send. */ AWS_S3_API void aws_s3_meta_request_send_request_finish_default( struct aws_s3_connection *connection, struct aws_http_stream *stream, int error_code); /* Called by the client when a request is completely finished and not doing any further retries. */ AWS_S3_API void aws_s3_meta_request_finished_request( struct aws_s3_meta_request *meta_request, struct aws_s3_request *request, int error_code); /* Called to place the request in the meta request's priority queue for streaming back to the caller. Once all requests * with a part number less than the given request has been received, the given request and the previous requests will * be scheduled for streaming. */ AWS_S3_API void aws_s3_meta_request_stream_response_body_synced( struct aws_s3_meta_request *meta_request, struct aws_s3_request *request); /* Add an event for delivery on the meta-request's io_event_loop thread. * These events usually correspond to callbacks that must fire sequentially and non-overlapping, * such as delivery of a part's response body. */ void aws_s3_meta_request_add_event_for_delivery_synced( struct aws_s3_meta_request *meta_request, const struct aws_s3_meta_request_event *event); /* Returns whether any events are out for delivery. * The meta-request's finish callback must not be invoked until this returns false. */ bool aws_s3_meta_request_are_events_out_for_delivery_synced(struct aws_s3_meta_request *meta_request); /* Cancel the requests with cancellable HTTP stream for the meta request */ void aws_s3_meta_request_cancel_cancellable_requests_synced(struct aws_s3_meta_request *meta_request, int error_code); /* Asynchronously read from the meta request's input stream. Should always be done outside of any mutex, * as reading from the stream could cause user code to call back into aws-c-s3. * This will fill the buffer to capacity, unless end of stream is reached. * It may read from the underlying stream multiple times, if that's what it takes to fill the buffer. * Returns a future whose result bool indicates whether end of stream was reached. * This future may complete on any thread, and may complete synchronously. * * Read from offset to fill the buffer */ AWS_S3_API struct aws_future_bool *aws_s3_meta_request_read_body( struct aws_s3_meta_request *meta_request, uint64_t offset, struct aws_byte_buf *buffer); bool aws_s3_meta_request_body_has_no_more_data(const struct aws_s3_meta_request *meta_request); /* Set the meta request finish result as failed. This is meant to be called sometime before aws_s3_meta_request_finish. * Subsequent calls to this function or to aws_s3_meta_request_set_success_synced will not overwrite the end result of * the meta request. */ AWS_S3_API void aws_s3_meta_request_set_fail_synced( struct aws_s3_meta_request *meta_request, struct aws_s3_request *failed_request, int error_code); /* Set the meta request finish result as successful. This is meant to be called sometime before * aws_s3_meta_request_finish. Subsequent calls this function or to aws_s3_meta_request_set_fail_synced will not * overwrite the end result of the meta request. */ AWS_S3_API void aws_s3_meta_request_set_success_synced(struct aws_s3_meta_request *meta_request, int response_status); /* Returns true if the finish result has been set (ie: either aws_s3_meta_request_set_fail_synced or * aws_s3_meta_request_set_success_synced have been called.) */ AWS_S3_API bool aws_s3_meta_request_has_finish_result_synced(struct aws_s3_meta_request *meta_request); /* Virtual function called by the meta request derived type when it's completely finished and there is no other work to * be done. */ AWS_S3_API void aws_s3_meta_request_finish(struct aws_s3_meta_request *meta_request); /* Default implementation of the meta request finish function. */ AWS_S3_API void aws_s3_meta_request_finish_default(struct aws_s3_meta_request *meta_request); /* Sets up a meta request result structure. */ AWS_S3_API void aws_s3_meta_request_result_setup( struct aws_s3_meta_request *meta_request, struct aws_s3_meta_request_result *result, struct aws_s3_request *failed_request, int response_status, int error_code); /* Cleans up a meta request result structure. */ AWS_S3_API void aws_s3_meta_request_result_clean_up( struct aws_s3_meta_request *meta_request, struct aws_s3_meta_request_result *result); AWS_S3_API bool aws_s3_meta_request_checksum_config_has_algorithm( struct aws_s3_meta_request *meta_request, enum aws_s3_checksum_algorithm algorithm); AWS_EXTERN_C_END #endif /* AWS_S3_META_REQUEST_IMPL_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/include/aws/s3/private/s3_paginator.h000066400000000000000000000117751456575232400265460ustar00rootroot00000000000000#ifndef AWS_S3_PAGINATOR_H #define AWS_S3_PAGINATOR_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include /** * Wrapper for a generic paginated operation. * Provides implementations for how to construct next paginated and how to read the request. * Can be used with either paginator or plugged into request loop. */ struct aws_s3_paginated_operation; /** * Generic driver for paginated operations. * Provides functionality to send requests to iterate over pages of the operation. */ struct aws_s3_paginator; typedef int(aws_s3_next_http_message_fn)( struct aws_byte_cursor *continuation_token, void *user_data, struct aws_http_message **out_message); typedef int(aws_s3_on_result_node_encountered_fn)(struct aws_xml_node *node, void *user_data); typedef void(aws_s3_on_page_finished_fn)(struct aws_s3_paginator *paginator, int error_code, void *user_data); typedef void(aws_s3_on_paginated_operation_cleanup_fn)(void *user_data); /** * Parameters for initiating paginator. All values are copied out or re-seated and reference counted. */ struct aws_s3_paginator_params { /** * Must not be NULL. The internal call will increment the reference count on client. */ struct aws_s3_client *client; /** * Underlying paginated operation. Must not be NULL. */ struct aws_s3_paginated_operation *operation; /** * Optional. The continuation token for fetching the next page. You likely shouldn't set this * unless you have a special use case. */ struct aws_byte_cursor continuation_token; /** * Must not be empty. Name of the bucket to list. */ struct aws_byte_cursor bucket_name; /** * Must not be empty. Key with which multipart upload was initiated. */ struct aws_byte_cursor endpoint; /** * Callback to invoke on each part that's listed. */ aws_s3_on_page_finished_fn *on_page_finished_fn; /** * User data passed back into callbacks. */ void *user_data; }; /** * Parameters for initiating paginated operation. All values are copied out or re-seated and reference counted. */ struct aws_s3_paginated_operation_params { /** * Name of the top level result node. Must not be empty. */ struct aws_byte_cursor result_xml_node_name; /** * Name of the continuation token node. Must not be empty. */ struct aws_byte_cursor continuation_token_node_name; /** * Function to generate next message. */ aws_s3_next_http_message_fn *next_message; /** * Function to parse result node. */ aws_s3_on_result_node_encountered_fn *on_result_node_encountered_fn; /** * Callback for when operation is cleaned. */ aws_s3_on_paginated_operation_cleanup_fn *on_paginated_operation_cleanup; /** * Associated user data. */ void *user_data; }; AWS_EXTERN_C_BEGIN AWS_S3_API struct aws_s3_paginator *aws_s3_initiate_paginator( struct aws_allocator *allocator, const struct aws_s3_paginator_params *params); AWS_S3_API void aws_s3_paginator_acquire(struct aws_s3_paginator *paginator); AWS_S3_API void aws_s3_paginator_release(struct aws_s3_paginator *paginator); AWS_S3_API struct aws_s3_paginated_operation *aws_s3_paginated_operation_new( struct aws_allocator *allocator, const struct aws_s3_paginated_operation_params *params); AWS_S3_API void aws_s3_paginated_operation_acquire(struct aws_s3_paginated_operation *operation); AWS_S3_API void aws_s3_paginated_operation_release(struct aws_s3_paginated_operation *operation); /** * Start the paginated operation. If there are more results to fetch, it will begin that work. * * Signing_config contains information for SigV4 signing for the operation. It must not be NULL. It will be copied. * * Returns AWS_OP_SUCCESS on successful start of the operation, and AWS_OP_ERR otherwise. Check aws_last_error() for * more information on the error that occurred. */ AWS_S3_API int aws_s3_paginator_continue( struct aws_s3_paginator *paginator, const struct aws_signing_config_aws *signing_config); /** * If the paginator has more results to fetch, returns true. */ AWS_S3_API bool aws_s3_paginator_has_more_results(const struct aws_s3_paginator *paginator); /** * Construct next message for the given operation. */ AWS_S3_API int aws_s3_construct_next_paginated_request_http_message( struct aws_s3_paginated_operation *operation, struct aws_byte_cursor *continuation_token, struct aws_http_message **out_message); /** * Parse received response for operation. */ AWS_S3_API int aws_s3_paginated_operation_on_response( struct aws_s3_paginated_operation *operation, struct aws_byte_cursor *response_body, struct aws_string **continuation_token_out, bool *has_more_results_out); AWS_EXTERN_C_END #endif /* AWS_S3_PAGINATOR_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/include/aws/s3/private/s3_parallel_input_stream.h000066400000000000000000000060561456575232400311440ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #ifndef AWS_S3_PARALLEL_INPUT_STREAM_H #define AWS_S3_PARALLEL_INPUT_STREAM_H #include #include AWS_PUSH_SANE_WARNING_LEVEL struct aws_byte_buf; struct aws_future_bool; struct aws_input_stream; struct aws_event_loop_group; struct aws_parallel_input_stream { const struct aws_parallel_input_stream_vtable *vtable; struct aws_allocator *alloc; struct aws_ref_count ref_count; void *impl; }; struct aws_parallel_input_stream_vtable { /** * Destroy the stream, its refcount has reached 0. */ void (*destroy)(struct aws_parallel_input_stream *stream); /** * Read into the buffer in parallel. * The implementation needs to support this to be invoked concurrently from multiple threads */ struct aws_future_bool *( *read)(struct aws_parallel_input_stream *stream, uint64_t offset, struct aws_byte_buf *dest); }; AWS_EXTERN_C_BEGIN /** * Initialize aws_parallel_input_stream "base class" */ AWS_S3_API void aws_parallel_input_stream_init_base( struct aws_parallel_input_stream *stream, struct aws_allocator *alloc, const struct aws_parallel_input_stream_vtable *vtable, void *impl); /** * Increment reference count. * You may pass in NULL (has no effect). * Returns whatever pointer was passed in. */ AWS_S3_API struct aws_parallel_input_stream *aws_parallel_input_stream_acquire(struct aws_parallel_input_stream *stream); /** * Decrement reference count. * You may pass in NULL (has no effect). * Always returns NULL. */ AWS_S3_API struct aws_parallel_input_stream *aws_parallel_input_stream_release(struct aws_parallel_input_stream *stream); /** * Read from the offset until fill the dest, or EOF reached. * It's thread safe to be called from multiple threads without waiting for other read to complete * * @param stream The stream to read from * @param offset The offset in the stream from beginning to start reading * @param dest The output buffer read to * @return a future, which will contain an error code if something went wrong, * or a result bool indicating whether EOF has been reached. */ AWS_S3_API struct aws_future_bool *aws_parallel_input_stream_read( struct aws_parallel_input_stream *stream, uint64_t offset, struct aws_byte_buf *dest); /** * Create a new file based parallel input stream. * * This implementation will open a file handler when the read happens, and seek to the offset to start reading. Close * the file handler as read finishes. * * @param allocator memory allocator * @param file_name The file path to read from * @return aws_parallel_input_stream */ AWS_S3_API struct aws_parallel_input_stream *aws_parallel_input_stream_new_from_file( struct aws_allocator *allocator, struct aws_byte_cursor file_name); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_S3_PARALLEL_INPUT_STREAM_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/include/aws/s3/private/s3_platform_info.h000066400000000000000000000060021456575232400274040ustar00rootroot00000000000000#ifndef AWS_S3_S3_PLATFORM_INFO_H #define AWS_S3_S3_PLATFORM_INFO_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include struct aws_s3_platform_info_loader; AWS_EXTERN_C_BEGIN /** * Initializes and returns a loader for querying the compute platform for information needed for making configuration * decisions. * This will never be NULL. */ AWS_S3_API struct aws_s3_platform_info_loader *aws_s3_platform_info_loader_new(struct aws_allocator *allocator); AWS_S3_API struct aws_s3_platform_info_loader *aws_s3_platform_info_loader_acquire(struct aws_s3_platform_info_loader *loader); AWS_S3_API struct aws_s3_platform_info_loader *aws_s3_platform_info_loader_release(struct aws_s3_platform_info_loader *loader); /** * Retrieves the pre-configured metadata for a given ec2 instance type. If no such pre-configuration exists, returns * NULL. */ AWS_S3_API const struct aws_s3_platform_info *aws_s3_get_platform_info_for_instance_type( struct aws_s3_platform_info_loader *loader, struct aws_byte_cursor instance_type_name); /** * Retrieves the metadata for the current environment. If EC2 instance type is unknown, or it is not an EC2 instance at * all, this value will still include the information about the system that could be determined. This value will never * be NULL. * This API is not thread safe. */ AWS_S3_API const struct aws_s3_platform_info *aws_s3_get_platform_info_for_current_environment( struct aws_s3_platform_info_loader *loader); /* * Retrieves a list of EC2 instance types with recommended configuration. * Returns aws_array_list. The caller is responsible for cleaning up the array list. */ AWS_S3_API struct aws_array_list aws_s3_get_recommended_platforms(struct aws_s3_platform_info_loader *loader); /** * Returns true if the current process is running on an Amazon EC2 instance powered by Nitro. */ AWS_S3_API bool aws_s3_is_running_on_ec2_nitro(struct aws_s3_platform_info_loader *loader); /** * Returns an EC2 instance type assuming this executable is running on Amazon EC2 powered by nitro. * * First this function will check it's running on EC2 via. attempting to read DMI info to avoid making IMDS calls. * * If the function detects it's on EC2, and it was able to detect the instance type without a call to IMDS * it will return it. * * Finally, it will call IMDS and return the instance type from there. * * Note that in the case of the IMDS call, a new client stack is spun up using 1 background thread. The call is made * synchronously with a 1 second timeout: It's not cheap. To make this easier, the underlying result is cached * internally and will be freed when aws_s3_library_clean_up() is called. * @return byte_cursor containing the instance type. If this is empty, the instance type could not be determined. */ AWS_S3_API struct aws_byte_cursor aws_s3_get_ec2_instance_type(struct aws_s3_platform_info_loader *loader); AWS_EXTERN_C_END #endif /* AWS_S3_S3_PLATFORM_INFO_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/include/aws/s3/private/s3_request.h000066400000000000000000000300711456575232400262400ustar00rootroot00000000000000#ifndef AWS_S3_REQUEST_H #define AWS_S3_REQUEST_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include struct aws_http_message; struct aws_signable; struct aws_s3_meta_request; enum aws_s3_request_flags { AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS = 0x00000001, AWS_S3_REQUEST_FLAG_PART_SIZE_RESPONSE_BODY = 0x00000002, AWS_S3_REQUEST_FLAG_ALWAYS_SEND = 0x00000004, AWS_S3_REQUEST_FLAG_PART_SIZE_REQUEST_BODY = 0x00000008, }; /** * Information sent in the telemetry_callback after each aws_s3_request finished/retried from meta request. */ struct aws_s3_request_metrics { struct aws_allocator *allocator; struct { /* The time stamp when the request started by S3 client, which is prepared time by the client. Timestamps * are from `aws_high_res_clock_get_ticks`. This will always be available. */ int64_t start_timestamp_ns; /* The time stamp when the request finished by S3 client succeed or failed or to be retried. Timestamps * are from `aws_high_res_clock_get_ticks`. This will always be available. */ int64_t end_timestamp_ns; /* The time duration for the request from start to finish. end_timestamp_ns - start_timestamp_ns. This will * always be available. */ int64_t total_duration_ns; /* The time stamp when the request started to be encoded. -1 means data not available. Timestamp * are from `aws_high_res_clock_get_ticks` */ int64_t send_start_timestamp_ns; /* The time stamp when the request finished to be encoded. -1 means data not available. * Timestamp are from `aws_high_res_clock_get_ticks` */ int64_t send_end_timestamp_ns; /* The time duration for the request from start encoding to finish encoding (send_end_timestamp_ns - * send_start_timestamp_ns). When send_end_timestamp_ns is -1, means data not available. */ int64_t sending_duration_ns; /* The time stamp when the response started to be received from the network channel. -1 means data not * available. Timestamp are from `aws_high_res_clock_get_ticks` */ int64_t receive_start_timestamp_ns; /* The time stamp when the response finished to be received from the network channel. -1 means data not * available. Timestamp are from `aws_high_res_clock_get_ticks` */ int64_t receive_end_timestamp_ns; /* The time duration for the request from start receiving to finish receiving (receive_end_timestamp_ns - * receive_start_timestamp_ns). When receive_end_timestamp_ns is 0, means data not available. */ int64_t receiving_duration_ns; /* The time stamp when the request started to be signed. -1 means data not * available. Timestamp are from `aws_high_res_clock_get_ticks` */ int64_t sign_start_timestamp_ns; /* The time stamp when the response finished to be signed. -1 means data not * available. Timestamp are from `aws_high_res_clock_get_ticks` */ int64_t sign_end_timestamp_ns; /* The time duration for the request from start signing to finish signing (sign_end_timestamp_ns - * sign_start_timestamp_ns). When sign_end_timestamp_ns is 0, means data not available. */ int64_t signing_duration_ns; } time_metrics; struct { /* Response status code for the request */ int response_status; /* HTTP Headers of the response received. */ struct aws_http_headers *response_headers; /* Path and query of the request. */ struct aws_string *request_path_query; /* The host address of the request. */ struct aws_string *host_address; /* The the request ID header value. */ struct aws_string *request_id; /* S3 operation name for the request (NULL if unknown) */ struct aws_string *operation_name; /* The type of request made */ enum aws_s3_request_type request_type; } req_resp_info_metrics; struct { /* The IP address of the request connected to */ struct aws_string *ip_address; /* The pointer to the connection that request was made from */ void *connection_id; /* The aws_thread_id_t to the thread that request ran on */ aws_thread_id_t thread_id; /* The stream-id, which is the idex when the stream was activated. */ uint32_t stream_id; /* CRT error code when the aws_s3_request finishes. */ int error_code; } crt_info_metrics; struct aws_ref_count ref_count; }; /* Represents a single request made to S3. */ struct aws_s3_request { /* Linked list node used for queuing. */ struct aws_linked_list_node node; /* Linked list node used for tracking the request is active from HTTP level. */ struct aws_linked_list_node cancellable_http_streams_list_node; /* The meta request lock must be held to access the data */ struct { /* The underlying http stream, only valid when the request is active from HTTP level */ struct aws_http_stream *cancellable_http_stream; } synced_data; /* TODO Ref count on the request is no longer needed--only one part of code should ever be holding onto a request, * and we can just transfer ownership.*/ struct aws_ref_count ref_count; struct aws_allocator *allocator; /* Owning meta request. */ struct aws_s3_meta_request *meta_request; /* Request body to use when sending the request. The contents of this body will be re-used if a request is * retried.*/ struct aws_byte_buf request_body; struct aws_s3_buffer_pool_ticket *ticket; /* Beginning range of this part. */ /* TODO currently only used by auto_range_get, could be hooked up to auto_range_put as well. */ uint64_t part_range_start; /* Last byte of this part.*/ /* TODO currently only used by auto_range_get, could be hooked up to auto_range_put as well. */ uint64_t part_range_end; /* Part number that this request refers to. If this is not a part, this can be 0. (S3 Part Numbers start at 1.) * However, must currently be a valid part number (ie: greater than 0) if the response body is to be streamed to the * caller. */ uint32_t part_number; /* The upload_timeout used. Zero, if the request is not a upload part */ size_t upload_timeout_ms; /* Number of times aws_s3_meta_request_prepare has been called for a request. During the first call to the virtual * prepare function, this will be 0.*/ uint32_t num_times_prepared; /* checksum found in the header of an individual get part http request */ struct aws_byte_buf request_level_response_header_checksum; /* running checksum of the response to an individual get part http request */ struct aws_s3_checksum *request_level_running_response_sum; /* The algorithm used to validate the checksum */ enum aws_s3_checksum_algorithm validation_algorithm; /* Get request only, was there a checksum to validate */ bool did_validate; /* Get request only, if there was an attached checksum to validate did it match the computed checksum */ bool checksum_match; /* Tag that defines what the built request will actually consist of. This is meant to be space for an enum defined * by the derived type. Request tags do not necessarily map 1:1 with actual S3 API requests. (For example, they can * be more contextual, like "first part" instead of just "part".) */ /* TODO: Eliminate the concept of "request tag" and just use request_type. * It's confusing having 2 concepts that are so similar. * There's only 1 case where 2 tags used the same type, * we can use some other bool/flag to differentiate this 1 case. */ int request_tag; /* Actual S3 type for the single request (may be AWS_S3_REQUEST_TYPE_UNKNOWN) */ enum aws_s3_request_type request_type; /* S3 operation name for the single request (e.g. "CompleteMultipartUpload") (NULL if unknown) */ struct aws_string *operation_name; /* Members of this structure will be repopulated each time the request is sent. If the request fails, and needs to * be retried, then the members of this structure will be cleaned up and re-populated on the next send. */ /* TODO rename this anonymous structure to something more intuitive. (Maybe "attempt_data")*/ struct { /* The HTTP message to send for this request. */ struct aws_http_message *message; /* Signable created for the above message. */ struct aws_signable *signable; /* Recorded response headers for the request. Set only when the request desc has record_response_headers set to * true or when this response indicates an error. */ struct aws_http_headers *response_headers; /* Recorded response body of the request. */ struct aws_byte_buf response_body; /* Returned response status of this request. */ int response_status; /* The metrics for the request telemetry */ struct aws_s3_request_metrics *metrics; } send_data; /* When true, response headers from the request will be stored in the request's response_headers variable. */ uint32_t record_response_headers : 1; /* When true, the response body buffer will be allocated in the size of a part. */ uint32_t has_part_size_response_body : 1; /* When true, the request body buffer will be allocated in the size of a part. */ uint32_t has_part_size_request_body : 1; /* When true, this request is being tracked by the client for limiting the amount of in-flight-requests/stats. */ uint32_t tracked_by_client : 1; /* When true, even when the meta request has a finish result set, this request will be sent. */ uint32_t always_send : 1; /* When true, this request is intended to find out the object size. This is currently only used by auto_range_get. */ uint32_t discovers_object_size : 1; /* When true, this request does not represent a useful http request and * must not be sent, however client must still call corresponding finished * callback for the request. Those requests can occur when request is * optimistically created during update, but cannot be prepared. ex. when * put has no content length, requests will be scheduled as regular to * ensure fair distribution against other requests, but can also result in * requests for uploading data after the end of the stream (those requests * will use below flag to indicate that they should not be sent). */ uint32_t is_noop : 1; /* When true, this request has already been uploaded. we still prepare the request to check the durability. */ uint32_t was_previously_uploaded : 1; }; AWS_EXTERN_C_BEGIN /* Create a new s3 request structure with the given options. */ AWS_S3_API struct aws_s3_request *aws_s3_request_new( struct aws_s3_meta_request *meta_request, int request_tag, enum aws_s3_request_type request_type, uint32_t part_number, uint32_t flags); /* Set up the request to be sent. Called each time before the request is sent. Will initially call * aws_s3_request_clean_up_send_data to clear out anything previously existing in send_data. */ AWS_S3_API void aws_s3_request_setup_send_data(struct aws_s3_request *request, struct aws_http_message *message); /* Clear out send_data members so that they can be repopulated before the next send. */ AWS_S3_API void aws_s3_request_clean_up_send_data(struct aws_s3_request *request); AWS_S3_API struct aws_s3_request *aws_s3_request_acquire(struct aws_s3_request *request); AWS_S3_API struct aws_s3_request *aws_s3_request_release(struct aws_s3_request *request); AWS_S3_API struct aws_s3_request_metrics *aws_s3_request_metrics_new( struct aws_allocator *allocator, const struct aws_s3_request *request, const struct aws_http_message *message); AWS_EXTERN_C_END #endif aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/include/aws/s3/private/s3_request_messages.h000066400000000000000000000152161456575232400301330ustar00rootroot00000000000000#ifndef AWS_S3_REQUEST_MESSAGES_H #define AWS_S3_REQUEST_MESSAGES_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "aws/s3/s3.h" #include "aws/s3/s3_client.h" #include #include #include struct aws_allocator; struct aws_http_message; struct aws_byte_buf; struct aws_byte_cursor; struct aws_string; struct aws_array_list; struct checksum_config; AWS_EXTERN_C_BEGIN /* Copy message (but not the body) and retain all headers */ AWS_S3_API struct aws_http_message *aws_s3_message_util_copy_http_message_no_body_all_headers( struct aws_allocator *allocator, struct aws_http_message *message); /* Copy message (but not the body) and exclude specific headers. * exclude_x_amz_meta controls whether S3 user metadata headers (prefixed with "x-amz-meta) are excluded.*/ AWS_S3_API struct aws_http_message *aws_s3_message_util_copy_http_message_no_body_filter_headers( struct aws_allocator *allocator, struct aws_http_message *message, const struct aws_byte_cursor *excluded_headers_arrays, size_t excluded_headers_size, bool exclude_x_amz_meta); /* Copy headers from one message to the other and exclude specific headers. * exclude_x_amz_meta controls whether S3 user metadata headers (prefixed with "x-amz-meta) are excluded.*/ AWS_S3_API void aws_s3_message_util_copy_headers( struct aws_http_message *source_message, struct aws_http_message *dest_message, const struct aws_byte_cursor *excluded_headers_arrays, size_t excluded_headers_size, bool exclude_x_amz_meta); AWS_S3_API struct aws_input_stream *aws_s3_message_util_assign_body( struct aws_allocator *allocator, struct aws_byte_buf *byte_buf, struct aws_http_message *out_message, const struct checksum_config *checksum_config, struct aws_byte_buf *out_checksum); /* Return true if checksum headers has been set. */ AWS_S3_API bool aws_s3_message_util_check_checksum_header(struct aws_http_message *message); /* Create an HTTP request for an S3 Ranged Get Object Request, using the given request as a basis */ AWS_S3_API struct aws_http_message *aws_s3_ranged_get_object_message_new( struct aws_allocator *allocator, struct aws_http_message *base_message, uint64_t range_start, uint64_t range_end); AWS_S3_API int aws_s3_message_util_set_multipart_request_path( struct aws_allocator *allocator, const struct aws_string *upload_id, uint32_t part_number, bool append_uploads_suffix, struct aws_http_message *message); /* Create an HTTP request for an S3 Create-Multipart-Upload request. */ AWS_S3_API struct aws_http_message *aws_s3_create_multipart_upload_message_new( struct aws_allocator *allocator, struct aws_http_message *base_message, enum aws_s3_checksum_algorithm algorithm); /* Create an HTTP request for an S3 Put Object request, using the original request as a basis. Creates and assigns a * body stream using the passed in buffer. If multipart is not needed, part number and upload_id can be 0 and NULL, * respectively. */ AWS_S3_API struct aws_http_message *aws_s3_upload_part_message_new( struct aws_allocator *allocator, struct aws_http_message *base_message, struct aws_byte_buf *buffer, uint32_t part_number, const struct aws_string *upload_id, bool should_compute_content_md5, const struct checksum_config *checksum_config, struct aws_byte_buf *encoded_checksum_output); /* Create an HTTP request for an S3 UploadPartCopy request, using the original request as a basis. * If multipart is not needed, part number and upload_id can be 0 and NULL, * respectively. */ AWS_S3_API struct aws_http_message *aws_s3_upload_part_copy_message_new( struct aws_allocator *allocator, struct aws_http_message *base_message, struct aws_byte_buf *buffer, uint32_t part_number, uint64_t range_start, uint64_t range_end, const struct aws_string *upload_id, bool should_compute_content_md5); /* Create an HTTP request for an S3 Complete-Multipart-Upload request. Creates the necessary XML payload using the * passed in array list of `struct aws_s3_mpu_part_info *`. Buffer passed in will be used to store * said XML payload, which will be used as the body. */ AWS_S3_API struct aws_http_message *aws_s3_complete_multipart_message_new( struct aws_allocator *allocator, struct aws_http_message *base_message, struct aws_byte_buf *body_buffer, const struct aws_string *upload_id, const struct aws_array_list *parts, enum aws_s3_checksum_algorithm algorithm); AWS_S3_API struct aws_http_message *aws_s3_abort_multipart_upload_message_new( struct aws_allocator *allocator, struct aws_http_message *base_message, const struct aws_string *upload_id); /* Creates a HEAD GetObject request to get the size of the specified object. */ AWS_S3_API struct aws_http_message *aws_s3_get_object_size_message_new( struct aws_allocator *allocator, struct aws_http_message *base_message, struct aws_byte_cursor source_bucket, struct aws_byte_cursor source_key); /* Creates a HEAD GetObject sub-request to get the size of the source object of a Copy meta request. */ AWS_S3_API struct aws_http_message *aws_s3_get_source_object_size_message_new( struct aws_allocator *allocator, struct aws_http_message *base_message); /* Add content-md5 header to the http message passed in. The MD5 will be computed from the input_buf */ AWS_S3_API int aws_s3_message_util_add_content_md5_header( struct aws_allocator *allocator, struct aws_byte_buf *input_buf, struct aws_http_message *message); AWS_S3_API extern const struct aws_byte_cursor g_s3_create_multipart_upload_excluded_headers[]; AWS_S3_API extern const size_t g_s3_create_multipart_upload_excluded_headers_count; AWS_S3_API extern const struct aws_byte_cursor g_s3_upload_part_excluded_headers[]; AWS_S3_API extern const size_t g_s3_upload_part_excluded_headers_count; AWS_S3_API extern const struct aws_byte_cursor g_s3_complete_multipart_upload_excluded_headers[]; AWS_S3_API extern const size_t g_s3_complete_multipart_upload_excluded_headers_count; AWS_S3_API extern const struct aws_byte_cursor g_s3_abort_multipart_upload_excluded_headers[]; AWS_S3_API extern const size_t g_s3_abort_multipart_upload_excluded_headers_count; AWS_S3_API extern const struct aws_byte_cursor g_s3_list_parts_excluded_headers[]; AWS_S3_API extern const size_t g_s3_list_parts_excluded_headers_count; AWS_S3_API extern const struct aws_byte_cursor g_s3_list_parts_with_checksum_excluded_headers[]; AWS_S3_API extern const size_t g_s3_list_parts_with_checksum_excluded_headers_count; AWS_EXTERN_C_END #endif /* AWS_S3_REQUEST_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/include/aws/s3/private/s3_util.h000066400000000000000000000225571456575232400255370ustar00rootroot00000000000000#ifndef AWS_S3_UTIL_H #define AWS_S3_UTIL_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ /* This file provides access to useful constants and simple utility functions. */ #include #include #include #if ASSERT_LOCK_HELD # define ASSERT_SYNCED_DATA_LOCK_HELD(object) \ { \ int cached_error = aws_last_error(); \ AWS_ASSERT(aws_mutex_try_lock(&(object)->synced_data.lock) == AWS_OP_ERR); \ aws_raise_error(cached_error); \ } #else # define ASSERT_SYNCED_DATA_LOCK_HELD(object) #endif #define KB_TO_BYTES(kb) ((kb)*1024) #define MB_TO_BYTES(mb) ((mb)*1024 * 1024) #define GB_TO_BYTES(gb) ((gb)*1024 * 1024 * 1024ULL) #define MS_TO_NS(ms) ((uint64_t)(ms)*1000000) #define SEC_TO_NS(ms) ((uint64_t)(ms)*1000000000) struct aws_allocator; struct aws_http_stream; struct aws_http_headers; struct aws_http_message; struct aws_s3_client; struct aws_s3_request; struct aws_s3_meta_request; struct aws_cached_signing_config_aws { struct aws_allocator *allocator; struct aws_string *service; struct aws_string *region; struct aws_string *signed_body_value; struct aws_signing_config_aws config; }; AWS_EXTERN_C_BEGIN AWS_S3_API extern const struct aws_byte_cursor g_content_md5_header_name; AWS_S3_API extern const struct aws_byte_cursor g_trailer_header_name; AWS_S3_API extern const struct aws_byte_cursor g_request_validation_mode; AWS_S3_API extern const struct aws_byte_cursor g_enabled; AWS_S3_API extern const struct aws_byte_cursor g_create_mpu_checksum_header_name; AWS_S3_API extern const struct aws_byte_cursor g_crc32c_header_name; AWS_S3_API extern const struct aws_byte_cursor g_crc32_header_name; AWS_S3_API extern const struct aws_byte_cursor g_sha1_header_name; AWS_S3_API extern const struct aws_byte_cursor g_sha256_header_name; AWS_S3_API extern const struct aws_byte_cursor g_crc32c_create_mpu_header_name; AWS_S3_API extern const struct aws_byte_cursor g_crc32_create_mpu_header_name; AWS_S3_API extern const struct aws_byte_cursor g_sha1_create_mpu_header_name; AWS_S3_API extern const struct aws_byte_cursor g_sha256_create_mpu_header_name; AWS_S3_API extern const struct aws_byte_cursor g_crc32c_complete_mpu_name; AWS_S3_API extern const struct aws_byte_cursor g_crc32_complete_mpu_name; AWS_S3_API extern const struct aws_byte_cursor g_sha1_complete_mpu_name; AWS_S3_API extern const struct aws_byte_cursor g_sha256_complete_mpu_name; AWS_S3_API extern const struct aws_byte_cursor g_s3_client_version; AWS_S3_API extern const struct aws_byte_cursor g_user_agent_header_name; AWS_S3_API extern const struct aws_byte_cursor g_user_agent_header_product_name; AWS_S3_API extern const struct aws_byte_cursor g_acl_header_name; AWS_S3_API extern const struct aws_byte_cursor g_host_header_name; AWS_S3_API extern const struct aws_byte_cursor g_content_type_header_name; AWS_S3_API extern const struct aws_byte_cursor g_content_encoding_header_name; AWS_S3_API extern const struct aws_byte_cursor g_content_encoding_header_aws_chunked; AWS_S3_API extern const struct aws_byte_cursor g_content_length_header_name; AWS_S3_API extern const struct aws_byte_cursor g_decoded_content_length_header_name; AWS_S3_API extern const struct aws_byte_cursor g_etag_header_name; AWS_S3_API extern const size_t g_s3_min_upload_part_size; AWS_S3_API extern const struct aws_byte_cursor g_s3_service_name; AWS_S3_API extern const struct aws_byte_cursor g_s3express_service_name; AWS_S3_API extern const struct aws_byte_cursor g_range_header_name; extern const struct aws_byte_cursor g_if_match_header_name; extern const struct aws_byte_cursor g_request_id_header_name; AWS_S3_API extern const struct aws_byte_cursor g_content_range_header_name; AWS_S3_API extern const struct aws_byte_cursor g_accept_ranges_header_name; AWS_S3_API extern const struct aws_byte_cursor g_mp_parts_count_header_name; AWS_S3_API extern const struct aws_byte_cursor g_post_method; AWS_S3_API extern const struct aws_byte_cursor g_head_method; AWS_S3_API extern const struct aws_byte_cursor g_delete_method; AWS_S3_API extern const uint32_t g_s3_max_num_upload_parts; /** * Cache and initial the signing config based on the client. * * @param client * @param signing_config * @return struct aws_cached_signing_config_aws* */ struct aws_cached_signing_config_aws *aws_cached_signing_config_new( struct aws_s3_client *client, const struct aws_signing_config_aws *signing_config); void aws_cached_signing_config_destroy(struct aws_cached_signing_config_aws *cached_signing_config); /* Sets all headers specified for src on dest */ AWS_S3_API void copy_http_headers(const struct aws_http_headers *src, struct aws_http_headers *dest); /** * Get content of XML element at path. * * path_name_array must be a C array of char*, with a NULL as its final entry. * * For example: * Given `xml_doc`: "SlowDown" * And `path_name_array`: {"Error", "Code", NULL} * `out_body` will get set to: "SlowDown" * * Returns AWS_OP_SUCCESS or AWS_OP_ERR. * Raises AWS_ERROR_STRING_MATCH_NOT_FOUND if path not found in XML, * or AWS_ERROR_INVALID_XML if the XML can't be parsed. * * DO NOT make this function public without a lot of thought. * The whole thing of passing a C-array of C-strings with a NULL sentinel * is unconventional for this codebase. */ AWS_S3_API int aws_xml_get_body_at_path( struct aws_allocator *allocator, struct aws_byte_cursor xml_doc, const char *path_name_array[], struct aws_byte_cursor *out_body); /* replace " with escaped /" * Returns initialized aws_byte_buf */ AWS_S3_API struct aws_byte_buf aws_replace_quote_entities(struct aws_allocator *allocator, struct aws_byte_cursor src); /* strip quotes if string is enclosed in quotes. does not remove quotes if they only appear on either side of the string */ AWS_S3_API struct aws_string *aws_strip_quotes(struct aws_allocator *allocator, struct aws_byte_cursor in_cur); /* TODO could be moved to aws-c-common. */ AWS_S3_API int aws_last_error_or_unknown(void); AWS_S3_API void aws_s3_add_user_agent_header(struct aws_allocator *allocator, struct aws_http_message *message); /* Given the response headers list, finds the Content-Range header and parses the range-start, range-end and * object-size. All output arguments are optional.*/ AWS_S3_API int aws_s3_parse_content_range_response_header( struct aws_allocator *allocator, struct aws_http_headers *response_headers, uint64_t *out_range_start, uint64_t *out_range_end, uint64_t *out_object_size); /* Given response headers, parses the content-length from a content-length response header.*/ AWS_S3_API int aws_s3_parse_content_length_response_header( struct aws_allocator *allocator, struct aws_http_headers *response_headers, uint64_t *out_content_length); /* * Given the request headers list, finds the Range header and parses the range-start and range-end. All arguments are * required. * */ AWS_S3_API int aws_s3_parse_request_range_header( struct aws_http_headers *request_headers, bool *out_has_start_range, bool *out_has_end_range, uint64_t *out_start_range, uint64_t *out_end_range); /* Calculate the number of parts based on overall object-range and part_size. */ AWS_S3_API uint32_t aws_s3_calculate_auto_ranged_get_num_parts( size_t part_size, uint64_t first_part_size, uint64_t object_range_start, uint64_t object_range_end); /** * Calculates the optimal part size and num parts given the 'content_length' and 'client_part_size'. * This will increase the part size to stay within S3's number of parts. * If the required part size exceeds the 'client_max_part_size' or * if the system cannot support the required part size, it will raise an 'AWS_ERROR_INVALID_ARGUMENT' argument. */ AWS_S3_API int aws_s3_calculate_optimal_mpu_part_size_and_num_parts( uint64_t content_length, size_t client_part_size, uint64_t client_max_part_size, size_t *out_part_size, uint32_t *out_num_parts); /* Calculates the part range for a part given overall object range, size of each part, and the part's number. Note: part * numbers begin at one. Intended to be used in conjunction * with aws_s3_calculate_auto_ranged_get_num_parts. part_number should be less than or equal to the result of * aws_s3_calculate_auto_ranged_get_num_parts. */ AWS_S3_API void aws_s3_calculate_auto_ranged_get_part_range( uint64_t object_range_start, uint64_t object_range_end, size_t part_size, uint64_t first_part_size, uint32_t part_number, uint64_t *out_part_range_start, uint64_t *out_part_range_end); /* Match the S3 error code to CRT error code, return AWS_ERROR_UNKNOWN when not matched */ AWS_S3_API int aws_s3_crt_error_code_from_server_error_code_string(struct aws_byte_cursor error_code_string); AWS_S3_API void aws_s3_request_finish_up_metrics_synced(struct aws_s3_request *request, struct aws_s3_meta_request *meta_request); AWS_EXTERN_C_END #endif /* AWS_S3_UTIL_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/include/aws/s3/private/s3express_credentials_provider_impl.h000066400000000000000000000074331456575232400334200ustar00rootroot00000000000000#ifndef AWS_S3EXPRESS_CREDENTIALS_PROVIDER_IMPL_H #define AWS_S3EXPRESS_CREDENTIALS_PROVIDER_IMPL_H #include #include #include #include #include struct aws_cache; /** * Everything in the session should ONLY be accessed with lock HELD */ struct aws_s3express_session { struct aws_allocator *allocator; /* The hash key for the table storing creator and session. */ struct aws_string *hash_key; /* The s3express credentials cached for the session */ struct aws_credentials *s3express_credentials; /* Pointer to the creator if the session is in process creating */ struct aws_s3express_session_creator *creator; /* The region and host of the session */ struct aws_string *region; struct aws_string *host; bool inactive; /* Only used for mock tests */ struct aws_s3express_credentials_provider_impl *impl; }; struct aws_s3express_credentials_provider_impl { struct aws_s3_client *client; /* Internal Refcount to make sure the provider out lives all the context. */ struct aws_ref_count internal_ref; struct aws_task *bg_refresh_task; struct aws_event_loop *bg_event_loop; const struct aws_credentials *default_original_credentials; struct aws_credentials_provider *default_original_credentials_provider; struct { /* Protected by the impl lock */ struct aws_mutex lock; /** * Store the session creators in process. * `struct aws_string *` as Key. `struct aws_s3express_session_creator *` as Value */ struct aws_hash_table session_creator_table; /** * An LRU cache to store all the sessions. * `struct aws_string *` as Key. `struct aws_s3express_session *` as Value **/ struct aws_cache *cache; bool destroying; } synced_data; struct { /* Overrides for testing purpose. */ struct aws_uri *endpoint_override; uint64_t bg_refresh_secs_override; bool (*s3express_session_is_valid_override)(struct aws_s3express_session *session, uint64_t now_seconds); bool (*s3express_session_about_to_expire_override)(struct aws_s3express_session *session, uint64_t now_seconds); /* The callback to be invoked before the real meta request finished callback for provider */ aws_s3_meta_request_finish_fn *meta_request_finished_overhead; } mock_test; }; /** * Configuration options for the default S3 Express credentials provider */ struct aws_s3express_credentials_provider_default_options { /** * The S3 client to fetch credentials. * Note, the client is not owned by the provider, user should keep the s3 client outlive the provider. */ struct aws_s3_client *client; /* Optional callback for shutdown complete of the provider */ aws_simple_completion_callback *shutdown_complete_callback; void *shutdown_user_data; struct { uint64_t bg_refresh_secs_override; } mock_test; }; AWS_EXTERN_C_BEGIN /** * Create the default S3 Express credentials provider. * * @param allocator * @return */ AWS_S3_API struct aws_s3express_credentials_provider *aws_s3express_credentials_provider_new_default( struct aws_allocator *allocator, const struct aws_s3express_credentials_provider_default_options *options); /** * Encode the hash key to be [host_value][hash_of_credentials] * hash_of_credentials is the sha256 of [access_key][secret_access_key] */ AWS_S3_API struct aws_string *aws_encode_s3express_hash_key_new( struct aws_allocator *allocator, const struct aws_credentials *original_credentials, struct aws_byte_cursor host_value); AWS_EXTERN_C_END #endif /* AWS_S3EXPRESS_CREDENTIALS_PROVIDER_IMPL_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/include/aws/s3/s3.h000066400000000000000000000100051456575232400230110ustar00rootroot00000000000000#ifndef AWS_S3_H #define AWS_S3_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include AWS_PUSH_SANE_WARNING_LEVEL #define AWS_C_S3_PACKAGE_ID 14 enum aws_s3_errors { AWS_ERROR_S3_MISSING_CONTENT_RANGE_HEADER = AWS_ERROR_ENUM_BEGIN_RANGE(AWS_C_S3_PACKAGE_ID), AWS_ERROR_S3_INVALID_CONTENT_RANGE_HEADER, AWS_ERROR_S3_MISSING_CONTENT_LENGTH_HEADER, AWS_ERROR_S3_INVALID_CONTENT_LENGTH_HEADER, AWS_ERROR_S3_MISSING_ETAG, AWS_ERROR_S3_INTERNAL_ERROR, AWS_ERROR_S3_SLOW_DOWN, AWS_ERROR_S3_INVALID_RESPONSE_STATUS, AWS_ERROR_S3_MISSING_UPLOAD_ID, AWS_ERROR_S3_PROXY_PARSE_FAILED, AWS_ERROR_S3_UNSUPPORTED_PROXY_SCHEME, AWS_ERROR_S3_CANCELED, AWS_ERROR_S3_INVALID_RANGE_HEADER, AWS_ERROR_S3_MULTIRANGE_HEADER_UNSUPPORTED, AWS_ERROR_S3_RESPONSE_CHECKSUM_MISMATCH, AWS_ERROR_S3_CHECKSUM_CALCULATION_FAILED, AWS_ERROR_S3_PAUSED, AWS_ERROR_S3_LIST_PARTS_PARSE_FAILED, AWS_ERROR_S3_RESUMED_PART_CHECKSUM_MISMATCH, AWS_ERROR_S3_RESUME_FAILED, AWS_ERROR_S3_OBJECT_MODIFIED, AWS_ERROR_S3_NON_RECOVERABLE_ASYNC_ERROR, AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE, AWS_ERROR_S3_INCORRECT_CONTENT_LENGTH, AWS_ERROR_S3_REQUEST_TIME_TOO_SKEWED, AWS_ERROR_S3_FILE_MODIFIED, AWS_ERROR_S3_EXCEEDS_MEMORY_LIMIT, AWS_ERROR_S3_INVALID_MEMORY_LIMIT_CONFIG, AWS_ERROR_S3EXPRESS_CREATE_SESSION_FAILED, AWS_ERROR_S3_INTERNAL_PART_SIZE_MISMATCH_RETRYING_WITH_RANGE, AWS_ERROR_S3_END_RANGE = AWS_ERROR_ENUM_END_RANGE(AWS_C_S3_PACKAGE_ID) }; enum aws_s3_subject { AWS_LS_S3_GENERAL = AWS_LOG_SUBJECT_BEGIN_RANGE(AWS_C_S3_PACKAGE_ID), AWS_LS_S3_CLIENT, AWS_LS_S3_CLIENT_STATS, AWS_LS_S3_REQUEST, AWS_LS_S3_META_REQUEST, AWS_LS_S3_ENDPOINT, AWS_LS_S3_LAST = AWS_LOG_SUBJECT_END_RANGE(AWS_C_S3_PACKAGE_ID) }; struct aws_s3_platform_info; struct aws_s3_cpu_group_info { /* group index, this usually refers to a particular numa node */ uint16_t cpu_group; /* array of network devices on this node */ struct aws_byte_cursor *nic_name_array; /* length of network devices array */ size_t nic_name_array_length; size_t cpus_in_group; }; #ifdef _MSC_VER # pragma warning(push) # pragma warning(disable : 4626) /* assignment operator was implicitly defined as deleted */ # pragma warning(disable : 5027) /* move assignment operator was implicitly defined as deleted */ #endif struct aws_s3_platform_info { /* name of the instance-type: example c5n.18xlarge */ struct aws_byte_cursor instance_type; /* max throughput for this instance type, in gigabits per second */ double max_throughput_gbps; /* array of cpu group info. This will always have at least one entry. */ struct aws_s3_cpu_group_info *cpu_group_info_array; /* length of cpu group info array */ size_t cpu_group_info_array_length; /* The current build of this library specifically knows an optimal configuration for this * platform */ bool has_recommended_configuration; }; #ifdef _MSC_VER # pragma warning(pop) #endif AWS_EXTERN_C_BEGIN /** * Initializes internal datastructures used by aws-c-s3. * Must be called before using any functionality in aws-c-s3. */ AWS_S3_API void aws_s3_library_init(struct aws_allocator *allocator); /** * Shuts down the internal datastructures used by aws-c-s3. */ AWS_S3_API void aws_s3_library_clean_up(void); /* * Returns the aws_s3_platform_info for current platform * NOTE: THIS API IS EXPERIMENTAL AND UNSTABLE */ AWS_S3_API const struct aws_s3_platform_info *aws_s3_get_current_platform_info(void); /* * Retrieves a list of EC2 instance types with recommended configuration. * Returns aws_array_list. The caller is responsible for cleaning up the array list. */ AWS_S3_API struct aws_array_list aws_s3_get_platforms_with_recommended_config(void); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_S3_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/include/aws/s3/s3_client.h000066400000000000000000001361101456575232400243550ustar00rootroot00000000000000#ifndef AWS_S3_CLIENT_H #define AWS_S3_CLIENT_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include AWS_PUSH_SANE_WARNING_LEVEL struct aws_allocator; struct aws_http_stream; struct aws_http_message; struct aws_http_headers; struct aws_tls_connection_options; struct aws_input_stream; struct aws_hash_table; struct aws_s3_client; struct aws_s3_request; struct aws_s3_meta_request; struct aws_s3_meta_request_result; struct aws_s3_meta_request_resume_token; struct aws_uri; struct aws_string; struct aws_s3_request_metrics; struct aws_s3express_credentials_provider; struct aws_credentials_properties_s3express; /** * A Meta Request represents a group of generated requests that are being done on behalf of the * original request. For example, one large GetObject request can be transformed into a series * of ranged GetObject requests that are executed in parallel to improve throughput. * * The aws_s3_meta_request_type is a hint of transformation to be applied. */ enum aws_s3_meta_request_type { /** * The Default meta request type sends any request to S3 as-is (with no transformation). For example, * it can be used to pass a CreateBucket request. */ AWS_S3_META_REQUEST_TYPE_DEFAULT, /** * The GetObject request will be split into a series of ranged GetObject requests that are * executed in parallel to improve throughput, when possible. */ AWS_S3_META_REQUEST_TYPE_GET_OBJECT, /** * The PutObject request will be split into MultiPart uploads that are executed in parallel * to improve throughput, when possible. * Note: put object supports both known and unknown body length. The client * relies on Content-Length header to determine length of the body. * Request with unknown content length are always sent using multipart * upload regardless of final number of parts and do have the following limitations: * - multipart threshold is ignored and all request are made through mpu, * even if they only need one part * - pause/resume is not supported * - meta request will throw error if checksum header is provider (due to * general limitation of checksum not being usable if meta request is * getting split) */ AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, /** * The CopyObject meta request performs a multi-part copy * using multiple S3 UploadPartCopy requests in parallel, or bypasses * a CopyObject request to S3 if the object size is not large enough for * a multipart upload. * Note: copy support is still in development and has following limitations: * - host header must use virtual host addressing style (path style is not * supported) and both source and dest buckets must have dns compliant name * - only {bucket}/{key} format is supported for source and passing arn as * source will not work * - source bucket is assumed to be in the same region as dest */ AWS_S3_META_REQUEST_TYPE_COPY_OBJECT, AWS_S3_META_REQUEST_TYPE_MAX, }; /** * The type of a single S3 HTTP request. Used by metrics. * A meta-request can make multiple S3 HTTP requests under the hood. * * For example, AWS_S3_META_REQUEST_TYPE_PUT_OBJECT for a large file will * do multipart upload, resulting in 3+ HTTP requests: * AWS_S3_REQUEST_TYPE_CREATE_MULTIPART_UPLOAD, one or more AWS_S3_REQUEST_TYPE_UPLOAD_PART, * and finally AWS_S3_REQUEST_TYPE_COMPLETE_MULTIPART_UPLOAD. * * aws_s3_request_type_operation_name() returns the S3 operation name * for types that map (e.g. AWS_S3_REQUEST_TYPE_HEAD_OBJECT -> "HeadObject"), * or empty string for types that don't map (e.g. AWS_S3_REQUEST_TYPE_UNKNOWN -> ""). */ enum aws_s3_request_type { /* The actual type of the single S3 HTTP request is unknown */ AWS_S3_REQUEST_TYPE_UNKNOWN, /* S3 APIs */ AWS_S3_REQUEST_TYPE_HEAD_OBJECT, AWS_S3_REQUEST_TYPE_GET_OBJECT, AWS_S3_REQUEST_TYPE_LIST_PARTS, AWS_S3_REQUEST_TYPE_CREATE_MULTIPART_UPLOAD, AWS_S3_REQUEST_TYPE_UPLOAD_PART, AWS_S3_REQUEST_TYPE_ABORT_MULTIPART_UPLOAD, AWS_S3_REQUEST_TYPE_COMPLETE_MULTIPART_UPLOAD, AWS_S3_REQUEST_TYPE_UPLOAD_PART_COPY, AWS_S3_REQUEST_TYPE_COPY_OBJECT, AWS_S3_REQUEST_TYPE_PUT_OBJECT, /* Max enum value */ AWS_S3_REQUEST_TYPE_MAX, /** @deprecated Use AWS_S3_REQUEST_TYPE_UNKNOWN if the actual S3 HTTP request type is unknown */ AWS_S3_REQUEST_TYPE_DEFAULT = AWS_S3_REQUEST_TYPE_UNKNOWN, }; /** * Invoked to provide response headers received during execution of the meta request, both for * success and error HTTP status codes. * * Return AWS_OP_SUCCESS to continue processing the request. * * Return aws_raise_error(E) to cancel the request. * The error you raise will be reflected in `aws_s3_meta_request_result.error_code`. * If you're not sure which error to raise, use AWS_ERROR_S3_CANCELED. */ typedef int(aws_s3_meta_request_headers_callback_fn)( struct aws_s3_meta_request *meta_request, const struct aws_http_headers *headers, int response_status, void *user_data); /** * Invoked to provide the response body as it is received. * * Note: If you set `enable_read_backpressure` true on the S3 client, * you must maintain the flow-control window. * The flow-control window shrinks as you receive body data via this callback. * Whenever the flow-control window reaches 0 you will stop downloading data. * Use aws_s3_meta_request_increment_read_window() to increment the window and keep data flowing. * Maintain a larger window to keep up a high download throughput, * parts cannot download in parallel unless the window is large enough to hold multiple parts. * Maintain a smaller window to limit the amount of data buffered in memory. * * If `manual_window_management` is false, you do not need to maintain the flow-control window. * No back-pressure is applied and data arrives as fast as possible. * * Return AWS_OP_SUCCESS to continue processing the request. * * Return aws_raise_error(E) to cancel the request. * The error you raise will be reflected in `aws_s3_meta_request_result.error_code`. * If you're not sure which error to raise, use AWS_ERROR_S3_CANCELED. */ typedef int(aws_s3_meta_request_receive_body_callback_fn)( /* The meta request that the callback is being issued for. */ struct aws_s3_meta_request *meta_request, /* The body data for this chunk of the object. */ const struct aws_byte_cursor *body, /* The byte index of the object that this refers to. For example, for an HTTP message that has a range header, the first chunk received will have a range_start that matches the range header's range-start.*/ uint64_t range_start, /* User data specified by aws_s3_meta_request_options.*/ void *user_data); /** * Invoked when the entire meta request execution is complete. */ typedef void(aws_s3_meta_request_finish_fn)( struct aws_s3_meta_request *meta_request, const struct aws_s3_meta_request_result *meta_request_result, void *user_data); /** * Information sent in the meta_request progress callback. */ struct aws_s3_meta_request_progress { /* Bytes transferred since the previous progress update */ uint64_t bytes_transferred; /* Length of the entire meta request operation */ uint64_t content_length; }; /** * Invoked to report progress of a meta-request. * For PutObject, progress refers to bytes uploaded. * For CopyObject, progress refers to bytes copied. * For GetObject, progress refers to bytes downloaded. * For anything else, progress refers to response body bytes received. */ typedef void(aws_s3_meta_request_progress_fn)( struct aws_s3_meta_request *meta_request, const struct aws_s3_meta_request_progress *progress, void *user_data); /** * Invoked to report the telemetry of the meta request once a single request finishes. * Note: *metrics is only valid for the duration of the callback. If you need to keep it around, use * `aws_s3_request_metrics_acquire` */ typedef void(aws_s3_meta_request_telemetry_fn)( struct aws_s3_meta_request *meta_request, struct aws_s3_request_metrics *metrics, void *user_data); typedef void(aws_s3_meta_request_shutdown_fn)(void *user_data); typedef void(aws_s3_client_shutdown_complete_callback_fn)(void *user_data); enum aws_s3_meta_request_tls_mode { AWS_MR_TLS_ENABLED, AWS_MR_TLS_DISABLED, }; enum aws_s3_meta_request_compute_content_md5 { AWS_MR_CONTENT_MD5_DISABLED, AWS_MR_CONTENT_MD5_ENABLED, }; enum aws_s3_checksum_algorithm { AWS_SCA_NONE = 0, AWS_SCA_INIT, AWS_SCA_CRC32C = AWS_SCA_INIT, AWS_SCA_CRC32, AWS_SCA_SHA1, AWS_SCA_SHA256, AWS_SCA_END = AWS_SCA_SHA256, }; enum aws_s3_checksum_location { AWS_SCL_NONE = 0, AWS_SCL_HEADER, AWS_SCL_TRAILER, }; /** * Info about a single part, for you to review before the upload completes. */ struct aws_s3_upload_part_review { /* Size in bytes of this part */ uint64_t size; /* Checksum string, as sent in the UploadPart request (usually base64-encoded): * https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html#API_UploadPart_RequestSyntax * This is empty if no checksum is used. */ struct aws_byte_cursor checksum; }; /** * Info for you to review before an upload completes. * * WARNING: This feature is experimental/unstable. * At this time, review is only available for multipart upload * (when Content-Length is above the `multipart_upload_threshold`, * or Content-Length not specified). */ struct aws_s3_upload_review { /* The checksum algorithm used. */ enum aws_s3_checksum_algorithm checksum_algorithm; /* Number of parts uploaded. */ size_t part_count; /* Array of info about each part uploaded (array is `part_count` in length) */ struct aws_s3_upload_part_review *part_array; }; /** * Optional callback, for you to review an upload before it completes. * For example, you can review each part's checksum and fail the upload if * you do not agree with them. * * @param meta_request pointer to the aws_s3_meta_request of the upload. * @param info Detailed info about the upload. * * Return AWS_OP_SUCCESS to continue processing the request. * * Return aws_raise_error(E) to cancel the request. * The error you raise will be reflected in `aws_s3_meta_request_result.error_code`. * If you're not sure which error to raise, use AWS_ERROR_S3_CANCELED. * * WARNING: This feature is experimental/unstable. * At this time, the callback is only invoked for multipart upload * (when Content-Length is above the `multipart_upload_threshold`, * or Content-Length not specified). */ typedef int(aws_s3_meta_request_upload_review_fn)( struct aws_s3_meta_request *meta_request, const struct aws_s3_upload_review *review, void *user_data); /** * The factory function for S3 client to create a S3 Express credentials provider. * The S3 client will be the only owner of the S3 Express credentials provider. * * During S3 client destruction, S3 client will start the destruction of the provider, and wait the * on_provider_shutdown_callback to be invoked before the S3 client finish destruction. * * Note to implement the factory properly: * - Make sure `on_provider_shutdown_callback` will be invoked after the provider finish shutdown, otherwise, * leak will happen. * - The provider must not acquire a reference to the client; otherwise, a circular reference will cause a deadlock. * - The `client` provided CANNOT be used within the factory function call or the destructor. * * @param allocator memory allocator to create the provider. * @param client The S3 client uses and owns the provider. * @param on_provider_shutdown_callback The callback to be invoked when the provider finishes shutdown. * @param shutdown_user_data The user data to invoke shutdown callback with * @param user_data The user data with the factory * * @return The aws_s3express_credentials_provider. */ typedef struct aws_s3express_credentials_provider *(aws_s3express_provider_factory_fn)( struct aws_allocator *allocator, struct aws_s3_client *client, aws_simple_completion_callback on_provider_shutdown_callback, void *shutdown_user_data, void *factory_user_data); /* Keepalive properties are TCP only. * If interval or timeout are zero, then default values are used. */ struct aws_s3_tcp_keep_alive_options { uint16_t keep_alive_interval_sec; uint16_t keep_alive_timeout_sec; /* If set, sets the number of keep alive probes allowed to fail before the connection is considered * lost. If zero OS defaults are used. On Windows, this option is meaningless until Windows 10 1703.*/ uint16_t keep_alive_max_failed_probes; }; /* Options for a new client. */ struct aws_s3_client_config { /* When set, this will cap the number of active connections. When 0, the client will determine this value based on * throughput_target_gbps. (Recommended) */ uint32_t max_active_connections_override; /* Region that the client default to. */ struct aws_byte_cursor region; /* Client bootstrap used for common staples such as event loop group, host resolver, etc.. s*/ struct aws_client_bootstrap *client_bootstrap; /* How tls should be used while performing the request * If this is ENABLED: * If tls_connection_options is not-null, then those tls options will be used * If tls_connection_options is NULL, then default tls options will be used * If this is DISABLED: * No tls options will be used, regardless of tls_connection_options value. */ enum aws_s3_meta_request_tls_mode tls_mode; /* TLS Options to be used for each connection, if tls_mode is ENABLED. When compiling with BYO_CRYPTO, and tls_mode * is ENABLED, this is required. Otherwise, this is optional. */ struct aws_tls_connection_options *tls_connection_options; /** * Required. * Configure the signing for the requests made from the client. * - Credentials or credentials provider is required. Other configs are all optional, and will be default to what * needs to sign the request for S3, only overrides when Non-zero/Not-empty is set. * - To skip signing, you can config it with anonymous credentials. * - S3 Client will derive the right config for signing process based on this. * * Notes: * - For AWS_SIGNING_ALGORITHM_V4_S3EXPRESS, S3 client will use the credentials in the config to derive the * S3 Express credentials that are used in the signing process. * - For other auth algorithm, client may make modifications to signing config before passing it on to signer. * * TODO: deprecate this structure from auth, introduce a new S3 specific one. */ struct aws_signing_config_aws *signing_config; /** * Optional. * Size of parts the object will be downloaded or uploaded in, in bytes. * This only affects AWS_S3_META_REQUEST_TYPE_GET_OBJECT and AWS_S3_META_REQUEST_TYPE_PUT_OBJECT. * If not set, this defaults to 8 MiB. * The client will adjust the part size for AWS_S3_META_REQUEST_TYPE_PUT_OBJECT if needed for service limits (max * number of parts per upload is 10,000, minimum upload part size is 5 MiB). * * You can also set this per meta-request, via `aws_s3_meta_request_options.part_size`. */ uint64_t part_size; /* If the part size needs to be adjusted for service limits, this is the maximum size it will be adjusted to. On 32 * bit machine, it will be forced to SIZE_MAX, which is around 4GiB. The server limit is 5GiB, but object size limit * is 5TiB for now. We should be good enough for all the cases. */ uint64_t max_part_size; /** * Optional. * The size threshold in bytes for when to use multipart uploads. * Uploads larger than this will use the multipart upload strategy. * Uploads smaller or equal to this will use a single HTTP request. * This only affects AWS_S3_META_REQUEST_TYPE_PUT_OBJECT. * If set, this should be at least `part_size`. * If not set, maximal of `part_size` and 5 MiB will be used. * * You can also set this per meta-request, via `aws_s3_meta_request_options.multipart_upload_threshold`. */ uint64_t multipart_upload_threshold; /* Throughput target in gigabits per second (Gbps) that we are trying to reach. */ double throughput_target_gbps; /* How much memory can we use. This will be capped to SIZE_MAX */ uint64_t memory_limit_in_bytes; /* Retry strategy to use. If NULL, a default retry strategy will be used. */ struct aws_retry_strategy *retry_strategy; /** * TODO: move MD5 config to checksum config. * For multi-part upload, content-md5 will be calculated if the AWS_MR_CONTENT_MD5_ENABLED is specified * or initial request has content-md5 header. * For single-part upload, keep the content-md5 in the initial request unchanged. */ enum aws_s3_meta_request_compute_content_md5 compute_content_md5; /* Callback and associated user data for when the client has completed its shutdown process. */ aws_s3_client_shutdown_complete_callback_fn *shutdown_callback; void *shutdown_callback_user_data; /** * Optional. * Proxy configuration for http connection. * If the connection_type is AWS_HPCT_HTTP_LEGACY, it will be converted to AWS_HPCT_HTTP_TUNNEL if tls_mode is * ENABLED. Otherwise, it will be converted to AWS_HPCT_HTTP_FORWARD. */ struct aws_http_proxy_options *proxy_options; /** * Optional. * Configuration for fetching proxy configuration from environment. * By Default proxy_ev_settings.aws_http_proxy_env_var_type is set to AWS_HPEV_ENABLE which means read proxy * configuration from environment. * Only works when proxy_options is not set. If both are set, configuration from proxy_options is used. */ struct proxy_env_var_settings *proxy_ev_settings; /** * Optional. * If set to 0, default value is used. */ uint32_t connect_timeout_ms; /** * Optional. * Set keepalive to periodically transmit messages for detecting a disconnected peer. */ struct aws_s3_tcp_keep_alive_options *tcp_keep_alive_options; /** * Optional. * Configuration options for connection monitoring. * If the transfer speed falls below the specified minimum_throughput_bytes_per_second, the operation is aborted. * If set to NULL, default values are used. */ struct aws_http_connection_monitoring_options *monitoring_options; /** * Enable backpressure and prevent response data from downloading faster than you can handle it. * * If false (default), no backpressure is applied and data will download as fast as possible. * * If true, each meta request has a flow-control window that shrinks as * response body data is downloaded (headers do not affect the window). * `initial_read_window` determines the starting size of each meta request's window. * You will stop downloading data whenever the flow-control window reaches 0 * You must call aws_s3_meta_request_increment_read_window() to keep data flowing. * * WARNING: This feature is experimental. * Currently, backpressure is only applied to GetObject requests which are split into multiple parts, * and you may still receive some data after the window reaches 0. */ bool enable_read_backpressure; /** * The starting size of each meta request's flow-control window, in bytes. * Ignored unless `enable_read_backpressure` is true. */ size_t initial_read_window; /** * To enable S3 Express support or not. */ bool enable_s3express; /** * Optional. * Only used when `enable_s3express` is set. * * If set, client will invoke the factory to get the provider to use, when needed. * * If not set, client will create a default S3 Express provider under the hood. */ aws_s3express_provider_factory_fn *s3express_provider_override_factory; void *factory_user_data; }; struct aws_s3_checksum_config { /** * The location of client added checksum header. * * If AWS_SCL_NONE. No request payload checksum will be add and calculated. * * If AWS_SCL_HEADER, the checksum will be calculated by client and added related header to the request sent. * * If AWS_SCL_TRAILER, the payload will be aws_chunked encoded, The checksum will be calculate while reading the * payload by client. Related header will be added to the trailer part of the encoded payload. Note the payload of * the original request cannot be aws-chunked encoded already. Otherwise, error will be raised. */ enum aws_s3_checksum_location location; /** * The checksum algorithm used. * Must be set if location is not AWS_SCL_NONE. Must be AWS_SCA_NONE if location is AWS_SCL_NONE. */ enum aws_s3_checksum_algorithm checksum_algorithm; /** * Enable checksum mode header will be attached to GET requests, this will tell s3 to send back checksums headers if * they exist. Calculate the corresponding checksum on the response bodies. The meta request will finish with a did * validate field and set the error code to AWS_ERROR_S3_RESPONSE_CHECKSUM_MISMATCH if the calculated * checksum, and checksum found in the response header do not match. */ bool validate_response_checksum; /** * Optional array of `enum aws_s3_checksum_algorithm`. * * Ignored when validate_response_checksum is not set. * If not set all the algorithms will be selected as default behavior. * Owned by the caller. * * The list of algorithms for user to pick up when validate the checksum. Client will pick up the algorithm from the * list with the priority based on performance, and the algorithm sent by server. The priority based on performance * is [CRC32C, CRC32, SHA1, SHA256]. * * If the response checksum was validated by client, the result will indicate which algorithm was picked. */ struct aws_array_list *validate_checksum_algorithms; }; /** * Options for a new meta request, ie, file transfer that will be handled by the high performance client. * * There are several ways to pass the request's body data: * 1) If the data is already in memory, set the body-stream on `message`. * 2) If the data is on disk, set `send_filepath` for best performance. * 3) If the data will be be produced in asynchronous chunks, set `send_async_stream`. */ struct aws_s3_meta_request_options { /* The type of meta request we will be trying to accelerate. */ enum aws_s3_meta_request_type type; /** * Optional. * The S3 operation name (e.g. "CreateBucket"). * This will only be used when type is AWS_S3_META_REQUEST_TYPE_DEFAULT; * it is automatically populated for other meta-request types. * This name is used to fill out details in metrics and error reports. */ struct aws_byte_cursor operation_name; /** * Configure the signing for each request created for this meta request. If NULL, options in the client will be * used. * - The credentials will be obtained based on the precedence of: * 1. `credentials` from `signing_config` in `aws_s3_meta_request_options` * 2. `credentials_provider` from `signing_config` in `aws_s3_meta_request_options` * 3. `credentials` from `signing_config` cached in the client * 4. `credentials_provider` cached in the client * - To skip signing, you can config it with anonymous credentials. * - S3 Client will derive the right config for signing process based on this. * * Notes: * - For AWS_SIGNING_ALGORITHM_V4_S3EXPRESS, S3 client will use the credentials in the config to derive the * S3 Express credentials that are used in the signing process. * - For other auth algorithm, client may make modifications to signing config before passing it on to signer. **/ const struct aws_signing_config_aws *signing_config; /* Initial HTTP message that defines what operation we are doing. * Do not set the message's body-stream if the body is being passed by other means (see note above) */ struct aws_http_message *message; /** * Optional. * If set, this file is sent as the request body. * This gives the best performance when sending data from a file. * Do not set if the body is being passed by other means (see note above). */ struct aws_byte_cursor send_filepath; /** * Optional - EXPERIMENTAL/UNSTABLE * If set, the request body comes from this async stream. * Use this when outgoing data will be produced in asynchronous chunks. * Do not set if the body is being passed by other means (see note above). */ struct aws_async_input_stream *send_async_stream; /** * Optional. * if set, the flexible checksum will be performed by client based on the config. */ const struct aws_s3_checksum_config *checksum_config; /** * Optional. * Size of parts the object will be downloaded or uploaded in, in bytes. * This only affects AWS_S3_META_REQUEST_TYPE_GET_OBJECT and AWS_S3_META_REQUEST_TYPE_PUT_OBJECT. * If not set, the value from `aws_s3_client_config.part_size` is used, which defaults to 8MiB. * * The client will adjust the part size for AWS_S3_META_REQUEST_TYPE_PUT_OBJECT if needed for service limits (max * number of parts per upload is 10,000, minimum upload part size is 5 MiB). */ uint64_t part_size; /** * Optional. * The size threshold in bytes for when to use multipart uploads. * Uploads larger than this will use the multipart upload strategy. * Uploads smaller or equal to this will use a single HTTP request. * This only affects AWS_S3_META_REQUEST_TYPE_PUT_OBJECT. * If set, this should be at least `part_size`. * If not set, `part_size` adjusted by client will be used as the threshold. * If both `part_size` and `multipart_upload_threshold` are not set, * the values from `aws_s3_client_config` are used. */ uint64_t multipart_upload_threshold; /* User data for all callbacks. */ void *user_data; /** * Optional. * Invoked to provide response headers received during execution of the meta request. * Note: this callback will not be fired for cases when resuming an * operation that was already completed (ex. pausing put object after it * uploaded all data and then resuming it) * See `aws_s3_meta_request_headers_callback_fn`. */ aws_s3_meta_request_headers_callback_fn *headers_callback; /** * Invoked to provide the response body as it is received. * See `aws_s3_meta_request_receive_body_callback_fn`. */ aws_s3_meta_request_receive_body_callback_fn *body_callback; /** * Invoked when the entire meta request execution is complete. * See `aws_s3_meta_request_finish_fn`. */ aws_s3_meta_request_finish_fn *finish_callback; /* Callback for when the meta request has completely cleaned up. */ aws_s3_meta_request_shutdown_fn *shutdown_callback; /** * Invoked to report progress of the meta request execution. * See `aws_s3_meta_request_progress_fn`. */ aws_s3_meta_request_progress_fn *progress_callback; /** * Optional. * To get telemetry metrics when a single request finishes. * If set the request will keep track of the metrics from `aws_s3_request_metrics`, and fire the callback when the * request finishes receiving response. * See `aws_s3_meta_request_telemetry_fn` */ aws_s3_meta_request_telemetry_fn *telemetry_callback; /** * Optional. * Callback for reviewing an upload before it completes. * WARNING: experimental/unstable * See `aws_s3_upload_review_fn` */ aws_s3_meta_request_upload_review_fn *upload_review_callback; /** * Optional. * Endpoint override for request. Can be used to override scheme and port of * the endpoint. * There is some overlap between Host header and Endpoint and corner cases * are handled as follows: * - Only Host header is set - Host is used to construct endpoint. https is * default with corresponding port * - Only endpoint is set - Host header is created from endpoint. Port and * Scheme from endpoint is used. * - Both Host and Endpoint is set - Host header must match Authority of * Endpoint uri. Port and Scheme from endpoint is used. */ struct aws_uri *endpoint; /** * Optional. * For meta requests that support pause/resume (e.g. PutObject), serialized resume token returned by * aws_s3_meta_request_pause() can be provided here. * Note: If PutObject request specifies a checksum algorithm, client will calculate checksums while skipping parts * from the buffer and compare them them to previously uploaded part checksums. */ struct aws_s3_meta_request_resume_token *resume_token; /* * Optional. * Total object size hint, in bytes. * The optimal strategy for downloading a file depends on its size. * Set this hint to help the S3 client choose the best strategy for this particular file. * This is just used as an estimate, so it's okay to provide an approximate value if the exact size is unknown. */ uint64_t *object_size_hint; }; /* Result details of a meta request. * * If error_code is AWS_ERROR_SUCCESS, then response_status will match the response_status passed earlier by the header * callback and error_response_headers and error_response_body will be NULL. * * If error_code is equal to AWS_ERROR_S3_INVALID_RESPONSE_STATUS, then error_response_headers, error_response_body, and * response_status will be populated by the failed request. * * For all other error codes, response_status will be 0, and the error_response variables will be NULL. */ struct aws_s3_meta_request_result { /* If meta request failed due to an HTTP error response from S3, these are the headers. * NULL if meta request failed for another reason. */ struct aws_http_headers *error_response_headers; /* If meta request failed due to an HTTP error response from S3, this the body. * NULL if meta request failed for another reason, or if the response had no body (such as a HEAD response). */ struct aws_byte_buf *error_response_body; /* If meta request failed due to an HTTP error response from S3, * this is the name of the S3 operation it was responding to. * For example, if a AWS_S3_META_REQUEST_TYPE_PUT_OBJECT fails this could be * "PutObject, "CreateMultipartUpload", "UploadPart", "CompleteMultipartUpload", or others. * For AWS_S3_META_REQUEST_TYPE_DEFAULT, this is the same value passed to * aws_s3_meta_request_options.operation_name. * NULL if the meta request failed for another reason, or the operation name is not known. */ struct aws_string *error_response_operation_name; /* Response status of the failed request or of the entire meta request. */ int response_status; /* Only set for GET request. * Was the server side checksum compared against a calculated checksum of the response body. This may be false * even if validate_get_response_checksum was set because the object was uploaded without a checksum, or was * uploaded as a multipart object. * * If the object to get is multipart object, the part checksum MAY be validated if the part size to get matches the * part size uploaded. In that case, if any part mismatch the checksum received, the meta request will fail with * checksum mismatch. However, even if the parts checksum were validated, this will NOT be set to true, as the * checksum for the whole meta request was NOT validated. **/ bool did_validate; /* algorithm used to validate checksum */ enum aws_s3_checksum_algorithm validation_algorithm; /* Final error code of the meta request. */ int error_code; }; AWS_EXTERN_C_BEGIN AWS_S3_API struct aws_s3_client *aws_s3_client_new( struct aws_allocator *allocator, const struct aws_s3_client_config *client_config); /** * Add a reference, keeping this object alive. * The reference must be released when you are done with it, or it's memory will never be cleaned up. * You must not pass in NULL. * Always returns the same pointer that was passed in. */ AWS_S3_API struct aws_s3_client *aws_s3_client_acquire(struct aws_s3_client *client); /** * Release a reference. * When the reference count drops to 0, this object will be cleaned up. * It's OK to pass in NULL (nothing happens). * Always returns NULL. */ AWS_S3_API struct aws_s3_client *aws_s3_client_release(struct aws_s3_client *client); AWS_S3_API struct aws_s3_meta_request *aws_s3_client_make_meta_request( struct aws_s3_client *client, const struct aws_s3_meta_request_options *options); /** * Increment the flow-control window, so that response data continues downloading. * * If the client was created with `enable_read_backpressure` set true, * each meta request has a flow-control window that shrinks as response * body data is downloaded (headers do not affect the size of the window). * The client's `initial_read_window` determines the starting size of each meta request's window. * If a meta request's flow-control window reaches 0, no further data will be downloaded. * If the `initial_read_window` is 0, the request will not start until the window is incremented. * Maintain a larger window to keep up a high download throughput, * parts cannot download in parallel unless the window is large enough to hold multiple parts. * Maintain a smaller window to limit the amount of data buffered in memory. * * If `enable_read_backpressure` is false this call will have no effect, * no backpressure is being applied and data is being downloaded as fast as possible. * * WARNING: This feature is experimental. * Currently, backpressure is only applied to GetObject requests which are split into multiple parts, * and you may still receive some data after the window reaches 0. */ AWS_S3_API void aws_s3_meta_request_increment_read_window(struct aws_s3_meta_request *meta_request, uint64_t bytes); AWS_S3_API void aws_s3_meta_request_cancel(struct aws_s3_meta_request *meta_request); /** * Note: pause is currently only supported on upload requests. * In order to pause an ongoing upload, call aws_s3_meta_request_pause() that * will return resume token. Token can be used to query the state of operation * at the pausing time. * To resume an upload that was paused, supply resume token in the meta * request options structure member aws_s3_meta_request_options.resume_token. * The upload can be resumed either from the same client or a different one. * Corner cases for resume upload are as follows: * - upload is not MPU - fail with AWS_ERROR_UNSUPPORTED_OPERATION * - pausing before MPU is created - NULL resume token returned. NULL resume * token is equivalent to restarting upload * - pausing in the middle of part transfer - return resume token. scheduling of * new part uploads stops. * - pausing after completeMPU started - return resume token. if s3 cannot find * find associated MPU id when resuming with that token and num of parts * uploaded equals to total num parts, then operation is a no op. Otherwise * operation fails. * Note: for no op case the call will succeed and finish/shutdown request callbacks will * fire, but on headers callback will not fire. * Note: similar to cancel pause does not cancel requests already in flight and * and parts might complete after pause is requested. * @param meta_request pointer to the aws_s3_meta_request of the upload to be paused * @param resume_token resume token * @return either AWS_OP_ERR or AWS_OP_SUCCESS */ AWS_S3_API int aws_s3_meta_request_pause( struct aws_s3_meta_request *meta_request, struct aws_s3_meta_request_resume_token **out_resume_token); /* * Options to construct upload resume token. * Note: fields correspond to getters on the token below and it up to the caller * to persist those in whichever way they choose. */ struct aws_s3_upload_resume_token_options { struct aws_byte_cursor upload_id; /* Required */ uint64_t part_size; /* Required. Must be less than SIZE_MAX */ size_t total_num_parts; /* Required */ /** * Optional. * * Note: during resume num_parts_uploaded is used for sanity checking against * uploads on s3 side. * In cases where upload id does not exist (already resumed using this token * or pause called after upload completes, etc...) and num_parts_uploaded * equals to total num parts, resume will become a noop. */ size_t num_parts_completed; }; /** * Create upload resume token from persisted data. * Note: Data required for resume token varies per operation. */ AWS_S3_API struct aws_s3_meta_request_resume_token *aws_s3_meta_request_resume_token_new_upload( struct aws_allocator *allocator, const struct aws_s3_upload_resume_token_options *options); /* * Increment resume token ref count. */ AWS_S3_API struct aws_s3_meta_request_resume_token *aws_s3_meta_request_resume_token_acquire( struct aws_s3_meta_request_resume_token *resume_token); /* * Decrement resume token ref count. */ AWS_S3_API struct aws_s3_meta_request_resume_token *aws_s3_meta_request_resume_token_release( struct aws_s3_meta_request_resume_token *resume_token); /* * Type of resume token. */ AWS_S3_API enum aws_s3_meta_request_type aws_s3_meta_request_resume_token_type( struct aws_s3_meta_request_resume_token *resume_token); /* * Part size associated with operation. */ AWS_S3_API uint64_t aws_s3_meta_request_resume_token_part_size(struct aws_s3_meta_request_resume_token *resume_token); /* * Total num parts associated with operation. */ AWS_S3_API size_t aws_s3_meta_request_resume_token_total_num_parts(struct aws_s3_meta_request_resume_token *resume_token); /* * Num parts completed. */ AWS_S3_API size_t aws_s3_meta_request_resume_token_num_parts_completed(struct aws_s3_meta_request_resume_token *resume_token); /* * Upload id associated with operation. * Only valid for tokens returned from upload operation. For all other operations * this will return empty. */ AWS_S3_API struct aws_byte_cursor aws_s3_meta_request_resume_token_upload_id( struct aws_s3_meta_request_resume_token *resume_token); /** * Add a reference, keeping this object alive. * The reference must be released when you are done with it, or it's memory will never be cleaned up. * You must not pass in NULL. * Always returns the same pointer that was passed in. */ AWS_S3_API struct aws_s3_meta_request *aws_s3_meta_request_acquire(struct aws_s3_meta_request *meta_request); /** * Release a reference. * When the reference count drops to 0, this object will be cleaned up. * It's OK to pass in NULL (nothing happens). * Always returns NULL. */ AWS_S3_API struct aws_s3_meta_request *aws_s3_meta_request_release(struct aws_s3_meta_request *meta_request); /** * Initialize the configuration for a default S3 signing. */ AWS_S3_API void aws_s3_init_default_signing_config( struct aws_signing_config_aws *signing_config, const struct aws_byte_cursor region, struct aws_credentials_provider *credentials_provider); /** * Return operation name for aws_s3_request_type, * or empty string if the type doesn't map to an actual operation. * For example: * AWS_S3_REQUEST_TYPE_HEAD_OBJECT -> "HeadObject" * AWS_S3_REQUEST_TYPE_UNKNOWN -> "" * AWS_S3_REQUEST_TYPE_MAX -> "" */ AWS_S3_API const char *aws_s3_request_type_operation_name(enum aws_s3_request_type type); /** * Add a reference, keeping this object alive. * The reference must be released when you are done with it, or it's memory will never be cleaned up. * Always returns the same pointer that was passed in. */ AWS_S3_API struct aws_s3_request_metrics *aws_s3_request_metrics_acquire(struct aws_s3_request_metrics *metrics); /** * Release a reference. * When the reference count drops to 0, this object will be cleaned up. * It's OK to pass in NULL (nothing happens). * Always returns NULL. */ AWS_S3_API struct aws_s3_request_metrics *aws_s3_request_metrics_release(struct aws_s3_request_metrics *metrics); /************************************* Getters for s3 request metrics ************************************************/ /** * Get the request ID from aws_s3_request_metrics. * If unavailable, AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE will be raised. * If available, out_request_id will be set to a string. Be warned this string's lifetime is tied to the metrics * object. **/ AWS_S3_API int aws_s3_request_metrics_get_request_id( const struct aws_s3_request_metrics *metrics, const struct aws_string **out_request_id); /* Get the start time from aws_s3_request_metrics, which is when S3 client prepare the request to be sent. Always * available. Timestamp are from `aws_high_res_clock_get_ticks` */ AWS_S3_API void aws_s3_request_metrics_get_start_timestamp_ns( const struct aws_s3_request_metrics *metrics, uint64_t *out_start_time); /* Get the end time from aws_s3_request_metrics. Always available */ AWS_S3_API void aws_s3_request_metrics_get_end_timestamp_ns(const struct aws_s3_request_metrics *metrics, uint64_t *out_end_time); /* Get the total duration time from aws_s3_request_metrics. Always available */ AWS_S3_API void aws_s3_request_metrics_get_total_duration_ns( const struct aws_s3_request_metrics *metrics, uint64_t *out_total_duration); /* Get the time stamp when the request started to be encoded. Timestamps are from `aws_high_res_clock_get_ticks` * AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE will be raised if the request ended before it gets sent. */ AWS_S3_API int aws_s3_request_metrics_get_send_start_timestamp_ns( const struct aws_s3_request_metrics *metrics, uint64_t *out_send_start_time); /* Get the time stamp when the request finished to be encoded. Timestamps are from `aws_high_res_clock_get_ticks` * AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE will be raised if data not available. */ AWS_S3_API int aws_s3_request_metrics_get_send_end_timestamp_ns( const struct aws_s3_request_metrics *metrics, uint64_t *out_send_end_time); /* The time duration for the request from start encoding to finish encoding (send_end_timestamp_ns - * send_start_timestamp_ns). * AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE will be raised if data not available. */ AWS_S3_API int aws_s3_request_metrics_get_sending_duration_ns( const struct aws_s3_request_metrics *metrics, uint64_t *out_sending_duration); /* Get the time stamp when the response started to be received from the network channel. Timestamps are from * `aws_high_res_clock_get_ticks` AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE will be raised if data not available. */ AWS_S3_API int aws_s3_request_metrics_get_receive_start_timestamp_ns( const struct aws_s3_request_metrics *metrics, uint64_t *out_receive_start_time); /* Get the time stamp when the response finished to be received from the network channel. Timestamps are from * `aws_high_res_clock_get_ticks` AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE will be raised if data not available. */ AWS_S3_API int aws_s3_request_metrics_get_receive_end_timestamp_ns( const struct aws_s3_request_metrics *metrics, uint64_t *out_receive_end_time); /* The time duration for the request from start receiving to finish receiving (receive_end_timestamp_ns - * receive_start_timestamp_ns). * AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE will be raised if data not available. */ AWS_S3_API int aws_s3_request_metrics_get_receiving_duration_ns( const struct aws_s3_request_metrics *metrics, uint64_t *out_receiving_duration); /* Get the response status code for the request. AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE will be raised if data not * available. */ AWS_S3_API int aws_s3_request_metrics_get_response_status_code( const struct aws_s3_request_metrics *metrics, int *out_response_status); /* Get the HTTP Headers of the response received for the request. AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE will be raised * if data not available. */ AWS_S3_API int aws_s3_request_metrics_get_response_headers( const struct aws_s3_request_metrics *metrics, struct aws_http_headers **out_response_headers); /** * Get the path and query of the request. * If unavailable, AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE will be raised. * If available, out_request_path_query will be set to a string. Be warned this string's lifetime is tied to the metrics * object. */ AWS_S3_API void aws_s3_request_metrics_get_request_path_query( const struct aws_s3_request_metrics *metrics, const struct aws_string **out_request_path_query); /** * Get the host_address of the request. * If unavailable, AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE will be raised. * If available, out_host_address will be set to a string. Be warned this string's lifetime is tied to the metrics * object. */ AWS_S3_API void aws_s3_request_metrics_get_host_address( const struct aws_s3_request_metrics *metrics, const struct aws_string **out_host_address); /** * Get the IP address of the request connected to. * If unavailable, AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE will be raised. * If available, out_ip_address will be set to a string. Be warned this string's lifetime is tied to the metrics object. */ AWS_S3_API int aws_s3_request_metrics_get_ip_address( const struct aws_s3_request_metrics *metrics, const struct aws_string **out_ip_address); /* Get the id of connection that request was made from. AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE will be raised if data * not available */ AWS_S3_API int aws_s3_request_metrics_get_connection_id(const struct aws_s3_request_metrics *metrics, size_t *out_connection_id); /* Get the thread ID of the thread that request was made from. AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE will be raised if * data not available */ AWS_S3_API int aws_s3_request_metrics_get_thread_id(const struct aws_s3_request_metrics *metrics, aws_thread_id_t *out_thread_id); /* Get the stream-id, which is the idex when the stream was activated. AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE will be * raised if data not available */ AWS_S3_API int aws_s3_request_metrics_get_request_stream_id(const struct aws_s3_request_metrics *metrics, uint32_t *out_stream_id); /** * Get the S3 operation name of the request (e.g. "HeadObject"). * If unavailable, AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE will be raised. * If available, out_operation_name will be set to a string. * Be warned this string's lifetime is tied to the metrics object. */ AWS_S3_API int aws_s3_request_metrics_get_operation_name( const struct aws_s3_request_metrics *metrics, const struct aws_string **out_operation_name); /* Get the request type from request metrics. * If you just need a string, aws_s3_request_metrics_get_operation_name() is more reliable. */ AWS_S3_API void aws_s3_request_metrics_get_request_type( const struct aws_s3_request_metrics *metrics, enum aws_s3_request_type *out_request_type); /* Get the AWS CRT error code from request metrics. */ AWS_S3_API int aws_s3_request_metrics_get_error_code(const struct aws_s3_request_metrics *metrics); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_S3_CLIENT_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/include/aws/s3/s3_endpoint_resolver.h000066400000000000000000000012701456575232400266360ustar00rootroot00000000000000#ifndef AWS_S3_ENDPOINT_RESOLVER_H #define AWS_S3_ENDPOINT_RESOLVER_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include AWS_PUSH_SANE_WARNING_LEVEL struct aws_endpoints_request_context; struct aws_endpoints_rule_engine; AWS_EXTERN_C_BEGIN /** * Creates a new S3 endpoint resolver. * Warning: Before using this header, you have to enable it by * setting cmake config AWS_ENABLE_S3_ENDPOINT_RESOLVER=ON */ AWS_S3_API struct aws_endpoints_rule_engine *aws_s3_endpoint_resolver_new(struct aws_allocator *allocator); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_S3_ENDPOINT_RESOLVER_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/include/aws/s3/s3express_credentials_provider.h000066400000000000000000000062451456575232400307250ustar00rootroot00000000000000#ifndef AWS_S3EXPRESS_CREDENTIALS_PROVIDER_H #define AWS_S3EXPRESS_CREDENTIALS_PROVIDER_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include AWS_PUSH_SANE_WARNING_LEVEL struct aws_s3_client; struct aws_s3express_credentials_provider; struct aws_credentials_properties_s3express { /** * Required. * The host address of the s3 bucket for the request. */ struct aws_byte_cursor host; /** * Optional. * The region of the bucket. * If empty, the region of the S3 client will be used. */ struct aws_byte_cursor region; }; struct aws_s3express_credentials_provider_vtable { /** * Implementation for S3 Express provider to get S3 Express credentials */ int (*get_credentials)( struct aws_s3express_credentials_provider *provider, const struct aws_credentials *original_credentials, const struct aws_credentials_properties_s3express *properties, aws_on_get_credentials_callback_fn callback, void *user_data); /** * Implementation to destroy the provider. */ void (*destroy)(struct aws_s3express_credentials_provider *provider); }; struct aws_s3express_credentials_provider { struct aws_s3express_credentials_provider_vtable *vtable; struct aws_allocator *allocator; /* Optional callback for shutdown complete of the provider */ aws_simple_completion_callback *shutdown_complete_callback; void *shutdown_user_data; void *impl; struct aws_ref_count ref_count; }; AWS_EXTERN_C_BEGIN AWS_S3_API struct aws_s3express_credentials_provider *aws_s3express_credentials_provider_release( struct aws_s3express_credentials_provider *provider); /** * To initialize the provider with basic vtable and refcount. And hook up the refcount with vtable functions. * * @param provider * @param allocator * @param vtable * @param impl Optional, the impl for the provider * @return AWS_S3_API */ AWS_S3_API void aws_s3express_credentials_provider_init_base( struct aws_s3express_credentials_provider *provider, struct aws_allocator *allocator, struct aws_s3express_credentials_provider_vtable *vtable, void *impl); /** * Async function for retrieving specific credentials based on properties. * * @param provider aws_s3express_credentials_provider provider to source from * @param original_credentials The credentials used to derive the credentials for S3 Express. * @param properties Specific properties for credentials being fetched. * @param user_data user data to pass to the completion callback * * callback will only be invoked if-and-only-if the return value was AWS_OP_SUCCESS. * */ AWS_S3_API int aws_s3express_credentials_provider_get_credentials( struct aws_s3express_credentials_provider *provider, const struct aws_credentials *original_credentials, const struct aws_credentials_properties_s3express *properties, aws_on_get_credentials_callback_fn callback, void *user_data); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_S3EXPRESS_CREDENTIALS_PROVIDER_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/samples/000077500000000000000000000000001456575232400212215ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/samples/CMakeLists.txt000066400000000000000000000000261456575232400237570ustar00rootroot00000000000000 add_subdirectory(s3) aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/samples/s3/000077500000000000000000000000001456575232400215465ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/samples/s3/CMakeLists.txt000066400000000000000000000014641456575232400243130ustar00rootroot00000000000000project(s3 C) list(APPEND CMAKE_MODULE_PATH "${CMAKE_INSTALL_PREFIX}/lib/cmake") file(GLOB S3_SRC "*.c" ) set(S3_PROJECT_NAME s3) add_executable(${S3_PROJECT_NAME} ${S3_SRC}) aws_set_common_properties(${S3_PROJECT_NAME}) target_include_directories(${S3_PROJECT_NAME} PUBLIC $ $) target_link_libraries(${S3_PROJECT_NAME} PRIVATE aws-c-s3) if (BUILD_SHARED_LIBS AND NOT WIN32) message(INFO " s3 will be built with shared libs, but you may need to set LD_LIBRARY_PATH=${CMAKE_INSTALL_PREFIX}/lib to run the application") endif() install(TARGETS ${S3_PROJECT_NAME} EXPORT ${S3_PROJECT_NAME}-targets COMPONENT Runtime RUNTIME DESTINATION bin COMPONENT Runtime) aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/samples/s3/app_ctx.h000066400000000000000000000013411456575232400233540ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include struct app_ctx { struct aws_allocator *allocator; struct aws_s3_client *client; struct aws_credentials_provider *credentials_provider; struct aws_client_bootstrap *client_bootstrap; struct aws_logger logger; struct aws_mutex mutex; struct aws_condition_variable c_var; bool execution_completed; struct aws_signing_config_aws signing_config; const char *region; enum aws_log_level log_level; bool help_requested; void *sub_command_data; }; aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/samples/s3/cli_progress_bar.c000066400000000000000000000202641456575232400252350ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "cli_progress_bar.h" #include #include #include #include #include #include #include struct progress_listener_group { struct aws_allocator *allocator; struct aws_array_list listeners; struct aws_mutex mutex; FILE *render_sink; struct aws_thread_scheduler *scheduler; bool run_in_background; }; struct progress_listener { struct progress_listener_group *owning_group; struct aws_string *label; struct aws_string *state; struct aws_mutex mutex; uint64_t max; uint64_t current; bool render_update_pending; }; static void s_progress_listener_delete(struct progress_listener *listener) { aws_string_destroy(listener->label); aws_mutex_clean_up(&listener->mutex); aws_mem_release(listener->owning_group->allocator, listener); } struct progress_listener_group *progress_listener_group_new(struct aws_allocator *allocator) { struct progress_listener_group *group = aws_mem_calloc(allocator, 1, sizeof(struct progress_listener_group)); group->allocator = allocator; aws_mutex_init(&group->mutex); group->render_sink = stdout; aws_array_list_init_dynamic(&group->listeners, allocator, 16, sizeof(struct progress_listener *)); struct aws_thread_options options = *aws_default_thread_options(); group->scheduler = aws_thread_scheduler_new(allocator, &options); return group; } void progress_listener_group_delete(struct progress_listener_group *group) { aws_mutex_lock(&group->mutex); group->run_in_background = false; aws_mutex_unlock(&group->mutex); aws_thread_scheduler_release(group->scheduler); size_t listeners_len = aws_array_list_length(&group->listeners); for (size_t i = 0; i < listeners_len; ++i) { struct progress_listener *listener; aws_array_list_get_at(&group->listeners, (void **)&listener, i); s_progress_listener_delete(listener); } aws_array_list_clean_up(&group->listeners); aws_mutex_clean_up(&group->mutex); aws_mem_release(group->allocator, group); } void progress_listener_group_render(struct progress_listener_group *group) { aws_mutex_lock(&group->mutex); size_t listeners_len = aws_array_list_length(&group->listeners); size_t lines_per_render = 3; size_t lines_render_count = 1; if (listeners_len > 0) { for (int i = (int)listeners_len - 1; i >= 0; i--) { size_t line_skip = lines_per_render * lines_render_count++; struct progress_listener *listener; aws_array_list_get_at(&group->listeners, (void **)&listener, i); aws_mutex_lock(&listener->mutex); if (listener->render_update_pending) { /* move from the bottom up to the row we need. */ fprintf(group->render_sink, "\033[%zuA", line_skip); progress_listener_render(listener); listener->render_update_pending = false; /* now go back so the next tick gets the same offset to work from. */ fprintf(group->render_sink, "\033[%zuB", line_skip - lines_per_render); } aws_mutex_unlock(&listener->mutex); } } aws_mutex_unlock(&group->mutex); } static void s_render_task(struct aws_task *task, void *arg, enum aws_task_status status) { struct progress_listener_group *group = arg; struct aws_allocator *allocator = group->allocator; if (status == AWS_TASK_STATUS_RUN_READY) { progress_listener_group_render(group); bool run_again = false; aws_mutex_lock(&group->mutex); run_again = group->run_in_background; aws_mutex_unlock(&group->mutex); if (run_again) { struct aws_task *new_task = aws_mem_calloc(group->allocator, 1, sizeof(struct aws_task)); new_task->arg = group; new_task->fn = s_render_task; uint64_t run_at = 0; aws_high_res_clock_get_ticks(&run_at); /* run at TV framerate */ run_at += (AWS_TIMESTAMP_NANOS / 25); aws_thread_scheduler_schedule_future(group->scheduler, new_task, run_at); } } aws_mem_release(allocator, task); } void progress_listener_group_run_background_render_thread(struct progress_listener_group *group) { aws_mutex_lock(&group->mutex); group->run_in_background = true; aws_mutex_unlock(&group->mutex); struct aws_task *task = aws_mem_calloc(group->allocator, 1, sizeof(struct aws_task)); task->arg = group; task->fn = s_render_task; aws_thread_scheduler_schedule_now(group->scheduler, task); } struct progress_listener *progress_listener_new( struct progress_listener_group *group, struct aws_string *label, struct aws_string *state_name, uint64_t max_value) { struct progress_listener *listener = aws_mem_calloc(group->allocator, 1, sizeof(struct progress_listener)); aws_mutex_init(&listener->mutex); listener->max = max_value; listener->current = 0; listener->label = aws_string_clone_or_reuse(group->allocator, label); listener->state = aws_string_clone_or_reuse(group->allocator, state_name); listener->owning_group = group; listener->render_update_pending = false; aws_mutex_lock(&group->mutex); aws_array_list_push_back(&group->listeners, &listener); progress_listener_render(listener); aws_mutex_unlock(&group->mutex); return listener; } void progress_listener_update_progress(struct progress_listener *listener, uint64_t progress_update) { aws_mutex_lock(&listener->mutex); listener->current += progress_update; listener->render_update_pending = true; aws_mutex_unlock(&listener->mutex); } void progress_listener_reset_progress(struct progress_listener *listener) { aws_mutex_lock(&listener->mutex); listener->current = 0; listener->render_update_pending = true; aws_mutex_unlock(&listener->mutex); } void progress_listener_update_max_value(struct progress_listener *listener, uint64_t max_value) { aws_mutex_lock(&listener->mutex); listener->max = max_value; listener->render_update_pending = true; aws_mutex_unlock(&listener->mutex); } void progress_listener_update_state(struct progress_listener *listener, struct aws_string *state_name) { aws_mutex_lock(&listener->mutex); aws_string_destroy(listener->state); listener->state = aws_string_clone_or_reuse(listener->owning_group->allocator, state_name); listener->render_update_pending = true; aws_mutex_unlock(&listener->mutex); } void progress_listener_update_label(struct progress_listener *listener, struct aws_string *new_label) { aws_mutex_lock(&listener->mutex); aws_string_destroy(listener->label); listener->label = aws_string_clone_or_reuse(listener->owning_group->allocator, new_label); listener->render_update_pending = true; aws_mutex_unlock(&listener->mutex); } void progress_listener_render(struct progress_listener *listener) { struct progress_listener_group *group = listener->owning_group; fprintf(group->render_sink, "\33[2K"); /* clamp it to 80 characters to avoid overflow messing up the line calculations. */ fprintf(group->render_sink, "%.100s\n", aws_string_c_str(listener->label)); fprintf(group->render_sink, "\33[2K"); size_t completion = (size_t)(((double)listener->current / (double)listener->max) * 100); size_t ticks = 50; size_t completed_ticks = completion / (100 / ticks); fprintf(group->render_sink, " ["); for (size_t i = 0; i < ticks; ++i) { if (completed_ticks > i) { fprintf(group->render_sink, "="); } else { fprintf(group->render_sink, "-"); } } fprintf(group->render_sink, "]"); /* clamp the state to 20 characters to avoid overflowing the line and missing up the line calculations */ fprintf( group->render_sink, " %" PRIu64 "/%" PRIu64 "(%zu%%) %.20s\n\33[2K\n", listener->current, listener->max, completion, aws_string_c_str(listener->state)); } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/samples/s3/cli_progress_bar.h000066400000000000000000000056041456575232400252430ustar00rootroot00000000000000#ifndef CLI_PROGRESS_BAR_H #define CLI_PROGRESS_BAR_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include struct progress_listener_group; struct progress_listener; /** * Creates parent container for progress bars. It's rendered as a single block, and in order to work correctly * must be the last thing currently rendered on the terminal. It will render all progress bars at ~25 FPS * if you call progress_listener_group_run_background_render_thread(). Otherwise, you can always call * progress_listener_group_render() manually. */ struct progress_listener_group *progress_listener_group_new(struct aws_allocator *allocator); /** * Wait on any background thread resources to clean up, then delete the group. */ void progress_listener_group_delete(struct progress_listener_group *group); /** * Render the current state of the progress bars in this group. Please keep in mind. This works as long as this is the * last block of text currently rendered on the terminal (the cursor position should be immediately after the last line * of this group. */ void progress_listener_group_render(struct progress_listener_group *group); /** * Initiates a background thread to run progress_listener_group_render at ~25 FPS */ void progress_listener_group_run_background_render_thread(struct progress_listener_group *group); /** * Creates a new progress bar and returns a listener back for updating state, labels, and progress. * @param group group to render the progress bar into. * @param label label (what are you tracking progress for?) * @param state_name name of the state (In progress, success, failed etc...). * @param max_value The 100% value of the progress you're tracking */ struct progress_listener *progress_listener_new( struct progress_listener_group *group, struct aws_string *label, struct aws_string *state_name, uint64_t max_value); /** * Update the state of the progress bar. */ void progress_listener_update_state(struct progress_listener *listener, struct aws_string *state_name); /** * Update the progress of the progress bar. * @param progress_update amount to increment the progress by. */ void progress_listener_update_progress(struct progress_listener *listener, uint64_t progress_update); void progress_listener_reset_progress(struct progress_listener *listener); void progress_listener_update_max_value(struct progress_listener *listener, uint64_t max_value); /** * Update the label for the progress bar. */ void progress_listener_update_label(struct progress_listener *listener, struct aws_string *new_label); /** * Render just the bar. This will not render in place and you probably should rely on the group render * to handle this for you. */ void progress_listener_render(struct progress_listener *listener); #endif /* CLI_PROGRESS_BAR_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/samples/s3/main.c000066400000000000000000000146661456575232400226530ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include "app_ctx.h" #include int s3_ls_main(int argc, char *const argv[], const char *command_name, void *user_data); int s3_cp_main(int argc, char *const argv[], const char *command_name, void *user_data); int s3_compute_platform_info_main(int argc, char *const argv[], const char *command_name, void *user_data); static struct aws_cli_subcommand_dispatch s_dispatch_table[] = { { .command_name = "ls", .subcommand_fn = s3_ls_main, }, { .command_name = "cp", .subcommand_fn = s3_cp_main, }, { .command_name = "platform-info", .subcommand_fn = s3_compute_platform_info_main, }, }; static void s_usage(int exit_code) { FILE *output = exit_code == 0 ? stdout : stderr; fprintf(output, "usage: s3 \n"); fprintf(output, " available commands:\n"); for (size_t i = 0; i < AWS_ARRAY_SIZE(s_dispatch_table); ++i) { fprintf(output, " %s\n", s_dispatch_table[i].command_name); } fflush(output); exit(exit_code); } static void s_setup_logger(struct app_ctx *app_ctx) { struct aws_logger_standard_options logger_options = { .level = app_ctx->log_level, .file = stderr, }; aws_logger_init_standard(&app_ctx->logger, app_ctx->allocator, &logger_options); aws_logger_set(&app_ctx->logger); } static struct aws_cli_option s_long_options[] = { {"region", AWS_CLI_OPTIONS_REQUIRED_ARGUMENT, NULL, 'r'}, {"verbose", AWS_CLI_OPTIONS_REQUIRED_ARGUMENT, NULL, 'v'}, {"help", AWS_CLI_OPTIONS_NO_ARGUMENT, NULL, 'h'}, /* Per getopt(3) the last element of the array has to be filled with all zeros */ {NULL, AWS_CLI_OPTIONS_NO_ARGUMENT, NULL, 0}, }; static void s_parse_app_ctx(int argc, char *const argv[], struct app_ctx *app_ctx) { while (true) { int option_index = 0; int c = aws_cli_getopt_long(argc, argv, "r:v:h", s_long_options, &option_index); if (c == -1) { break; } switch (c) { case 0: /* getopt_long() returns 0 if an option.flag is non-null */ break; case 'r': app_ctx->region = aws_cli_optarg; break; case 'v': if (!strcmp(aws_cli_optarg, "TRACE")) { app_ctx->log_level = AWS_LL_TRACE; } else if (!strcmp(aws_cli_optarg, "INFO")) { app_ctx->log_level = AWS_LL_INFO; } else if (!strcmp(aws_cli_optarg, "DEBUG")) { app_ctx->log_level = AWS_LL_DEBUG; } else if (!strcmp(aws_cli_optarg, "ERROR")) { app_ctx->log_level = AWS_LL_ERROR; } else { fprintf(stderr, "unsupported log level %s.\n", aws_cli_optarg); s_usage(1); } break; case 'h': app_ctx->help_requested = true; break; default: break; } } if (!app_ctx->help_requested) { if (app_ctx->log_level != AWS_LOG_LEVEL_NONE) { s_setup_logger(app_ctx); } } /* reset for the next parser */ aws_cli_reset_state(); /* signing config */ aws_s3_init_default_signing_config( &app_ctx->signing_config, aws_byte_cursor_from_c_str(app_ctx->region), app_ctx->credentials_provider); app_ctx->signing_config.flags.use_double_uri_encode = false; /* s3 client */ struct aws_s3_client_config client_config; AWS_ZERO_STRUCT(client_config); client_config.client_bootstrap = app_ctx->client_bootstrap; client_config.region = aws_byte_cursor_from_c_str(app_ctx->region); client_config.signing_config = &app_ctx->signing_config; app_ctx->client = aws_s3_client_new(app_ctx->allocator, &client_config); } int main(int argc, char *argv[]) { struct aws_allocator *allocator = aws_default_allocator(); aws_s3_library_init(allocator); struct app_ctx app_ctx; AWS_ZERO_STRUCT(app_ctx); app_ctx.allocator = allocator; app_ctx.c_var = (struct aws_condition_variable)AWS_CONDITION_VARIABLE_INIT; aws_mutex_init(&app_ctx.mutex); /* event loop */ struct aws_event_loop_group *event_loop_group = aws_event_loop_group_new_default(allocator, 0, NULL); /* resolver */ struct aws_host_resolver_default_options resolver_options = { .el_group = event_loop_group, .max_entries = 8, }; struct aws_host_resolver *resolver = aws_host_resolver_new_default(allocator, &resolver_options); /* client bootstrap */ struct aws_client_bootstrap_options bootstrap_options = { .event_loop_group = event_loop_group, .host_resolver = resolver, }; app_ctx.client_bootstrap = aws_client_bootstrap_new(allocator, &bootstrap_options); if (app_ctx.client_bootstrap == NULL) { printf("ERROR initializing client bootstrap\n"); return -1; } /* credentials */ struct aws_credentials_provider_chain_default_options credentials_provider_options; AWS_ZERO_STRUCT(credentials_provider_options); credentials_provider_options.bootstrap = app_ctx.client_bootstrap; app_ctx.credentials_provider = aws_credentials_provider_new_chain_default(allocator, &credentials_provider_options); s_parse_app_ctx(argc, argv, &app_ctx); int dispatch_return_code = aws_cli_dispatch_on_subcommand(argc, argv, s_dispatch_table, AWS_ARRAY_SIZE(s_dispatch_table), &app_ctx); if (dispatch_return_code && (aws_last_error() == AWS_ERROR_INVALID_ARGUMENT || aws_last_error() == AWS_ERROR_UNIMPLEMENTED)) { s_usage(app_ctx.help_requested == true ? 0 : 1); } /* release resources */ aws_s3_client_release(app_ctx.client); aws_credentials_provider_release(app_ctx.credentials_provider); aws_client_bootstrap_release(app_ctx.client_bootstrap); aws_host_resolver_release(resolver); aws_event_loop_group_release(event_loop_group); aws_mutex_clean_up(&app_ctx.mutex); aws_s3_library_clean_up(); return dispatch_return_code; } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/samples/s3/s3-cp.c000066400000000000000000001130101456575232400226330ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include "app_ctx.h" #include "cli_progress_bar.h" #include #include #ifdef _MSC_VER # pragma warning(disable : 4706) /* assignment in conditional */ #endif /* _MSC_VER */ struct cp_app_ctx { struct app_ctx *app_ctx; struct aws_uri source_uri; struct aws_uri destination_uri; struct progress_listener_group *listener_group; struct aws_mutex mutex; struct aws_condition_variable c_var; const char *source_endpoint; const char *dest_endpoint; size_t expected_transfers; size_t completed_transfers; bool list_objects_completed; bool source_s3; bool source_file_system; bool dest_s3; bool dest_file_system; bool source_is_directory_or_prefix; }; struct single_transfer_ctx { struct cp_app_ctx *cp_app_ctx; struct progress_listener *listener; struct aws_s3_meta_request *meta_request; FILE *output_sink; }; static void s_usage(int exit_code) { FILE *sink = exit_code == 0 ? stdout : stderr; fprintf( sink, "usage: s3-cp [options] source_path " "destination_path\n"); fprintf( sink, " source_path: an S3 bucket containing the object prefix to copy, or a local filesystem path to upload.\n"); fprintf(sink, " In the case of an S3 bucket, use URI format S3://{bucket_name}/{prefix_and_or_key}.\n"); fprintf( sink, " destination_path: an S3 bucket containing the object prefix to copy, or a local filesystem path to download " "to.\n"); fprintf(sink, " In the case of an S3 bucket, use URI format S3://{bucket_name}/{prefix_and_or_key}.\n"); fprintf(sink, " If both the source_path and destination_path are S3 URIs it will attempt an object copy. \n"); fprintf(sink, " Note: both buckets must be in the same region for the copy to work\n"); exit(exit_code); } static struct aws_cli_option s_long_options[] = { /* Per getopt(3) the last element of the array has to be filled with all zeros */ {NULL, AWS_CLI_OPTIONS_NO_ARGUMENT, NULL, 0}, }; static void s_parse_options(int argc, char **argv, struct cp_app_ctx *ctx) { bool src_uri_found = false; bool dest_uri_found = false; int option_index = 0; int opt_val = -1; do { opt_val = aws_cli_getopt_long(argc, argv, "", s_long_options, &option_index); /* START_OF_TEXT means our positional argument */ if (opt_val == 0x02) { struct aws_byte_cursor uri_cursor = aws_byte_cursor_from_c_str(aws_cli_positional_arg); struct aws_uri *uri_to_parse = !src_uri_found ? &ctx->source_uri : &ctx->destination_uri; if (aws_uri_init_parse(uri_to_parse, ctx->app_ctx->allocator, &uri_cursor)) { fprintf( stderr, "Failed to parse uri %s with error %s\n", (char *)uri_cursor.ptr, aws_error_debug_str(aws_last_error())); s_usage(1); } if (uri_to_parse == &ctx->source_uri) { src_uri_found = true; } else { dest_uri_found = true; } } } while (opt_val != -1); if (!(src_uri_found && dest_uri_found)) { fprintf(stderr, "An URI for the source and destination must be provided.\n"); s_usage(1); } } static void s_dispatch_and_run_transfers(struct cp_app_ctx *cp_app_ctx); int s3_cp_main(int argc, char *argv[], const char *command_name, void *user_data) { (void)command_name; struct app_ctx *app_ctx = user_data; if (app_ctx->help_requested) { s_usage(0); } if (!app_ctx->region) { fprintf(stderr, "region is a required argument\n"); s_usage(1); } struct cp_app_ctx cp_app_ctx = { .app_ctx = app_ctx, .mutex = AWS_MUTEX_INIT, .c_var = AWS_CONDITION_VARIABLE_INIT, }; app_ctx->sub_command_data = &cp_app_ctx; s_parse_options(argc, argv, &cp_app_ctx); cp_app_ctx.listener_group = progress_listener_group_new(app_ctx->allocator); progress_listener_group_run_background_render_thread(cp_app_ctx.listener_group); char source_endpoint[1024]; AWS_ZERO_ARRAY(source_endpoint); char dest_endpoint[1024]; AWS_ZERO_ARRAY(dest_endpoint); struct aws_byte_cursor s3_scheme = aws_byte_cursor_from_c_str("s3"); struct aws_byte_cursor file_scheme = aws_byte_cursor_from_c_str("file"); if (aws_byte_cursor_eq_ignore_case(&cp_app_ctx.source_uri.scheme, &s3_scheme)) { cp_app_ctx.source_s3 = true; cp_app_ctx.source_is_directory_or_prefix = true; struct aws_byte_cursor source_bucket = cp_app_ctx.source_uri.host_name; snprintf( source_endpoint, sizeof(source_endpoint), "%.*s.s3.%s.amazonaws.com", (int)source_bucket.len, source_bucket.ptr, app_ctx->region); cp_app_ctx.source_endpoint = source_endpoint; } else if ( aws_byte_cursor_eq_ignore_case(&cp_app_ctx.source_uri.scheme, &file_scheme) || cp_app_ctx.source_uri.scheme.len == 0) { cp_app_ctx.source_file_system = true; struct aws_string *path_str = aws_string_new_from_buf(app_ctx->allocator, &cp_app_ctx.source_uri.uri_str); struct aws_string *path_open_mode = aws_string_new_from_c_str(app_ctx->allocator, "r"); FILE *file_open_check = NULL; if (aws_directory_exists(path_str)) { cp_app_ctx.source_is_directory_or_prefix = true; } else if ((file_open_check = aws_fopen_safe(path_str, path_open_mode))) { cp_app_ctx.source_is_directory_or_prefix = false; fclose(file_open_check); } else { fprintf(stderr, "Source path does not exist\n"); s_usage(1); } aws_string_destroy(path_open_mode); aws_string_destroy(path_str); } else { fprintf(stderr, "Source URI type is unsupported. s3://, file://, or / are currently supported\n"); s_usage(1); } if (aws_byte_cursor_eq_ignore_case(&cp_app_ctx.destination_uri.scheme, &s3_scheme)) { cp_app_ctx.dest_s3 = true; struct aws_byte_cursor destination_bucket = cp_app_ctx.destination_uri.host_name; snprintf( dest_endpoint, sizeof(dest_endpoint), "%.*s.s3.%s.amazonaws.com", (int)destination_bucket.len, destination_bucket.ptr, app_ctx->region); cp_app_ctx.dest_endpoint = dest_endpoint; } else if ( aws_byte_cursor_eq_ignore_case(&cp_app_ctx.destination_uri.scheme, &file_scheme) || cp_app_ctx.destination_uri.scheme.len == 0) { cp_app_ctx.dest_file_system = true; } else { fprintf(stderr, "Destination URI type is unsupported. s3://, file://, or / are currently supported\n"); s_usage(1); } s_dispatch_and_run_transfers(&cp_app_ctx); aws_condition_variable_clean_up(&cp_app_ctx.c_var); aws_mutex_clean_up(&cp_app_ctx.mutex); return 0; } /* this stream wrapper is purely for updating progress bars on upload */ struct progress_update_stream { struct aws_input_stream base; struct single_transfer_ctx *transfer; struct aws_input_stream *wrapped_stream; struct aws_allocator *allocator; }; int s_input_seek(struct aws_input_stream *stream, int64_t offset, enum aws_stream_seek_basis basis) { struct progress_update_stream *update_stream = AWS_CONTAINER_OF(stream, struct progress_update_stream, base); if (basis == AWS_SSB_BEGIN && offset == 0) { progress_listener_reset_progress(update_stream->transfer->listener); struct aws_string *state = aws_string_new_from_c_str(update_stream->transfer->cp_app_ctx->app_ctx->allocator, "In Progress"); progress_listener_update_state(update_stream->transfer->listener, state); aws_string_destroy(state); } return aws_input_stream_seek(update_stream->wrapped_stream, offset, basis); } int s_input_read(struct aws_input_stream *stream, struct aws_byte_buf *dest) { struct progress_update_stream *update_stream = AWS_CONTAINER_OF(stream, struct progress_update_stream, base); size_t current_len = dest->len; int val = aws_input_stream_read(update_stream->wrapped_stream, dest); size_t progress = dest->len - current_len; progress_listener_update_progress(update_stream->transfer->listener, progress); return val; } static int s_input_get_status(struct aws_input_stream *stream, struct aws_stream_status *status) { struct progress_update_stream *update_stream = AWS_CONTAINER_OF(stream, struct progress_update_stream, base); return aws_input_stream_get_status(update_stream->wrapped_stream, status); } static int s_input_get_length(struct aws_input_stream *stream, int64_t *out_length) { struct progress_update_stream *update_stream = AWS_CONTAINER_OF(stream, struct progress_update_stream, base); return aws_input_stream_get_length(update_stream->wrapped_stream, out_length); } static void s_input_destroy(void *data) { struct progress_update_stream *update_stream = (struct progress_update_stream *)data; aws_input_stream_release(update_stream->wrapped_stream); aws_mem_release(update_stream->allocator, update_stream); } static struct aws_input_stream_vtable s_update_input_stream_vtable = { .get_length = s_input_get_length, .seek = s_input_seek, .read = s_input_read, .get_status = s_input_get_status, }; /* end of stream for the progress bar */ static const struct aws_byte_cursor g_host_header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Host"); static const struct aws_byte_cursor g_x_amz_copy_source_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-copy-source"); /* helper function for getting an http request to make the copy object request. */ static struct aws_http_message *s_copy_object_request_new( struct cp_app_ctx *cp_app_ctx, const struct aws_byte_cursor *source_bucket, const struct aws_byte_cursor *source_key, const struct aws_byte_cursor *destination_key) { struct aws_http_message *message = aws_http_message_new_request(cp_app_ctx->app_ctx->allocator); if (aws_http_message_set_request_path(message, *destination_key)) { goto error_clean_up_message; } struct aws_byte_cursor endpoint = aws_byte_cursor_from_c_str(cp_app_ctx->dest_endpoint); struct aws_http_header host_header = {.name = g_host_header_name, .value = endpoint}; if (aws_http_message_add_header(message, host_header)) { goto error_clean_up_message; } char copy_source_value[1024]; snprintf( copy_source_value, sizeof(copy_source_value), "%.*s/%.*s", (int)source_bucket->len, source_bucket->ptr, (int)source_key->len, source_key->ptr); struct aws_byte_cursor copy_source_cursor = aws_byte_cursor_from_c_str(copy_source_value); struct aws_byte_buf copy_source_value_encoded; aws_byte_buf_init(©_source_value_encoded, cp_app_ctx->app_ctx->allocator, 1024); aws_byte_buf_append_encoding_uri_param(©_source_value_encoded, ©_source_cursor); struct aws_http_header copy_source_header = { .name = g_x_amz_copy_source_name, .value = aws_byte_cursor_from_buf(©_source_value_encoded), }; if (aws_http_message_add_header(message, copy_source_header)) { goto error_clean_up_message; } if (aws_http_message_set_request_method(message, aws_http_method_put)) { goto error_clean_up_message; } aws_byte_buf_clean_up(©_source_value_encoded); return message; error_clean_up_message: aws_byte_buf_clean_up(©_source_value_encoded); if (message != NULL) { aws_http_message_release(message); message = NULL; } return NULL; } /* upon copy object progress, update the progress bar. */ static void s_copy_object_progress( struct aws_s3_meta_request *meta_request, const struct aws_s3_meta_request_progress *progress, void *user_data) { (void)meta_request; struct single_transfer_ctx *transfer_ctx = user_data; progress_listener_update_max_value(transfer_ctx->listener, progress->content_length); progress_listener_update_progress(transfer_ctx->listener, progress->bytes_transferred); } /* invoked upon the completion of a copy object request. */ static void s_copy_object_request_finish( struct aws_s3_meta_request *meta_request, const struct aws_s3_meta_request_result *meta_request_result, void *user_data) { (void)meta_request; struct single_transfer_ctx *transfer_ctx = user_data; struct aws_string *new_state = NULL; if (meta_request_result->error_code == AWS_ERROR_SUCCESS) { new_state = aws_string_new_from_c_str(transfer_ctx->cp_app_ctx->app_ctx->allocator, "Completed"); progress_listener_update_progress(transfer_ctx->listener, 100); } else { new_state = aws_string_new_from_c_str(transfer_ctx->cp_app_ctx->app_ctx->allocator, "Error"); } progress_listener_update_state(transfer_ctx->listener, new_state); aws_s3_meta_request_release(transfer_ctx->meta_request); aws_mutex_lock(&transfer_ctx->cp_app_ctx->mutex); transfer_ctx->cp_app_ctx->completed_transfers++; aws_mutex_unlock(&transfer_ctx->cp_app_ctx->mutex); aws_condition_variable_notify_one(&transfer_ctx->cp_app_ctx->c_var); aws_mem_release(transfer_ctx->cp_app_ctx->app_ctx->allocator, transfer_ctx); } /* create a copy object request and send it based on the source bucket/key and destination bucket/key. */ static int s_kick_off_copy_object_request( struct cp_app_ctx *cp_app_ctx, const struct aws_byte_cursor *source_bucket, const struct aws_byte_cursor *source_key, const struct aws_byte_cursor *destination_key) { struct aws_http_message *message = s_copy_object_request_new(cp_app_ctx, source_bucket, source_key, destination_key); struct single_transfer_ctx *transfer_ctx = aws_mem_calloc(cp_app_ctx->app_ctx->allocator, 1, sizeof(struct single_transfer_ctx)); transfer_ctx->cp_app_ctx = cp_app_ctx; struct aws_byte_buf label_buf; struct aws_byte_cursor label_start_cur = aws_byte_cursor_from_c_str("copy: s3://"); aws_byte_buf_init_copy_from_cursor(&label_buf, cp_app_ctx->app_ctx->allocator, label_start_cur); aws_byte_buf_append_dynamic(&label_buf, source_bucket); aws_byte_buf_append_byte_dynamic(&label_buf, '/'); aws_byte_buf_append_dynamic(&label_buf, source_key); struct aws_byte_cursor to_cur = aws_byte_cursor_from_c_str(" to s3://"); aws_byte_buf_append_dynamic(&label_buf, &to_cur); aws_byte_buf_append_dynamic(&label_buf, &cp_app_ctx->destination_uri.host_name); aws_byte_buf_append_dynamic(&label_buf, destination_key); struct aws_string *label = aws_string_new_from_buf(cp_app_ctx->app_ctx->allocator, &label_buf); aws_byte_buf_clean_up(&label_buf); struct aws_string *state = aws_string_new_from_c_str(cp_app_ctx->app_ctx->allocator, "In Progress"); transfer_ctx->listener = progress_listener_new(cp_app_ctx->listener_group, label, state, 100); struct aws_s3_meta_request_options meta_request_options = { .user_data = transfer_ctx, .body_callback = NULL, .signing_config = &cp_app_ctx->app_ctx->signing_config, .finish_callback = s_copy_object_request_finish, .headers_callback = NULL, .message = message, .shutdown_callback = NULL, .progress_callback = s_copy_object_progress, .type = AWS_S3_META_REQUEST_TYPE_COPY_OBJECT, }; transfer_ctx->meta_request = aws_s3_client_make_meta_request(cp_app_ctx->app_ctx->client, &meta_request_options); if (transfer_ctx->meta_request == NULL) { fprintf( stderr, "Failure when initiating copy object request with error %s\n", aws_error_debug_str(aws_last_error())); exit(1); } aws_mutex_lock(&transfer_ctx->cp_app_ctx->mutex); transfer_ctx->cp_app_ctx->expected_transfers++; aws_mutex_unlock(&transfer_ctx->cp_app_ctx->mutex); return AWS_OP_SUCCESS; } /* invoked upon an upload completion. */ void s_put_request_finished( struct aws_s3_meta_request *meta_request, const struct aws_s3_meta_request_result *meta_request_result, void *user_data) { (void)meta_request; struct single_transfer_ctx *transfer_ctx = user_data; struct aws_string *state = NULL; if (meta_request_result->error_code == AWS_ERROR_SUCCESS) { state = aws_string_new_from_c_str(transfer_ctx->cp_app_ctx->app_ctx->allocator, "Completed"); } else { state = aws_string_new_from_c_str(transfer_ctx->cp_app_ctx->app_ctx->allocator, "Failed"); } progress_listener_update_state(transfer_ctx->listener, state); aws_string_destroy(state); aws_mutex_lock(&transfer_ctx->cp_app_ctx->mutex); transfer_ctx->cp_app_ctx->completed_transfers++; aws_mutex_unlock(&transfer_ctx->cp_app_ctx->mutex); aws_condition_variable_notify_one(&transfer_ctx->cp_app_ctx->c_var); aws_s3_meta_request_release(transfer_ctx->meta_request); aws_mem_release(transfer_ctx->cp_app_ctx->app_ctx->allocator, transfer_ctx); } static bool s_are_all_transfers_done(void *arg) { struct cp_app_ctx *cp_app_ctx = arg; return cp_app_ctx->expected_transfers == cp_app_ctx->completed_transfers; } /* kick off a PUT request for an object from a file on disk */ static int s_kickoff_put_object( struct cp_app_ctx *cp_app_ctx, const struct aws_byte_cursor *src_path, const struct aws_byte_cursor *dest_path, uint64_t file_size) { struct single_transfer_ctx *transfer_ctx = aws_mem_calloc(cp_app_ctx->app_ctx->allocator, 1, sizeof(struct single_transfer_ctx)); transfer_ctx->cp_app_ctx = cp_app_ctx; struct aws_byte_buf uri_path; struct aws_byte_cursor destination_path = *dest_path; aws_byte_buf_init_copy_from_cursor(&uri_path, cp_app_ctx->app_ctx->allocator, cp_app_ctx->destination_uri.path); aws_byte_buf_append_dynamic(&uri_path, &destination_path); for (size_t i = 0; i < uri_path.len; ++i) { if (uri_path.buffer[i] == '\\') { uri_path.buffer[i] = '/'; } } struct aws_byte_cursor full_path = aws_byte_cursor_from_buf(&uri_path); if (uri_path.buffer[0] == '.') { aws_byte_cursor_advance(&full_path, 1); } struct aws_byte_buf label_buf; struct aws_byte_cursor operation_name_cur = aws_byte_cursor_from_c_str("upload: "); aws_byte_buf_init_copy_from_cursor(&label_buf, cp_app_ctx->app_ctx->allocator, operation_name_cur); aws_byte_buf_append_dynamic(&label_buf, dest_path); struct aws_byte_cursor to_cur = aws_byte_cursor_from_c_str(" to s3://"); aws_byte_buf_append_dynamic(&label_buf, &to_cur); aws_byte_buf_append_dynamic(&label_buf, &cp_app_ctx->destination_uri.authority); aws_byte_buf_append_dynamic(&label_buf, &full_path); struct aws_string *label = aws_string_new_from_buf(cp_app_ctx->app_ctx->allocator, &label_buf); aws_byte_buf_clean_up(&label_buf); struct aws_string *state = aws_string_new_from_c_str(cp_app_ctx->app_ctx->allocator, "In Progress"); transfer_ctx->listener = progress_listener_new(cp_app_ctx->listener_group, label, state, file_size); aws_string_destroy(state); aws_string_destroy(label); aws_byte_buf_clean_up(&label_buf); struct aws_s3_meta_request_options request_options = { .user_data = transfer_ctx, .signing_config = &cp_app_ctx->app_ctx->signing_config, .type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .finish_callback = s_put_request_finished, }; struct aws_http_header host_header = { .name = g_host_header_name, .value = aws_byte_cursor_from_c_str(cp_app_ctx->dest_endpoint), }; char content_length[256]; AWS_ZERO_ARRAY(content_length); snprintf(content_length, AWS_ARRAY_SIZE(content_length), "%" PRIu64, file_size); struct aws_http_header content_length_header = { .name = aws_byte_cursor_from_c_str("content-length"), .value = aws_byte_cursor_from_c_str(content_length), }; request_options.message = aws_http_message_new_request(cp_app_ctx->app_ctx->allocator); aws_http_message_add_header(request_options.message, host_header); aws_http_message_add_header(request_options.message, content_length_header); aws_http_message_set_request_method(request_options.message, aws_http_method_put); aws_http_message_set_request_path(request_options.message, full_path); struct aws_input_stream *body_input = aws_input_stream_new_from_file(cp_app_ctx->app_ctx->allocator, (const char *)src_path->ptr); if (!body_input) { aws_mem_release(cp_app_ctx->app_ctx->allocator, transfer_ctx); return AWS_OP_ERR; } struct progress_update_stream *update_stream = aws_mem_calloc(cp_app_ctx->app_ctx->allocator, 1, sizeof(struct progress_update_stream)); update_stream->transfer = transfer_ctx; update_stream->wrapped_stream = body_input; update_stream->allocator = cp_app_ctx->app_ctx->allocator; update_stream->base.vtable = &s_update_input_stream_vtable; aws_ref_count_init(&update_stream->base.ref_count, update_stream, s_input_destroy); struct aws_input_stream *wrap_stream = &update_stream->base; aws_http_message_set_body_stream(request_options.message, wrap_stream); transfer_ctx->meta_request = aws_s3_client_make_meta_request(cp_app_ctx->app_ctx->client, &request_options); /* message owns the stream */ aws_input_stream_release(wrap_stream); aws_http_message_release(request_options.message); if (!transfer_ctx->meta_request) { aws_mem_release(cp_app_ctx->app_ctx->allocator, transfer_ctx); return AWS_OP_ERR; } aws_mutex_lock(&transfer_ctx->cp_app_ctx->mutex); transfer_ctx->cp_app_ctx->expected_transfers++; aws_mutex_unlock(&transfer_ctx->cp_app_ctx->mutex); return AWS_OP_SUCCESS; } /* invoked upon walking a directory. it's invoked for each entry found in the directory. */ static bool s_on_directory_entry(const struct aws_directory_entry *entry, void *user_data) { struct cp_app_ctx *cp_app_ctx = user_data; if (entry->file_type & AWS_FILE_TYPE_FILE) { struct aws_byte_cursor escaped_dest_path = entry->relative_path; aws_byte_cursor_advance(&escaped_dest_path, cp_app_ctx->source_uri.uri_str.len); int ret_val = s_kickoff_put_object(cp_app_ctx, &entry->relative_path, &escaped_dest_path, entry->file_size); return ret_val == AWS_OP_SUCCESS; } return true; } /* upon each byte downloaded, update the progress bar, and write to disk. */ int s_get_body_callback( struct aws_s3_meta_request *meta_request, const struct aws_byte_cursor *body, uint64_t range_start, void *user_data) { (void)meta_request; (void)range_start; struct single_transfer_ctx *transfer_ctx = user_data; fwrite(body->ptr, sizeof(uint8_t), body->len, transfer_ctx->output_sink); progress_listener_update_progress(transfer_ctx->listener, body->len); return AWS_OP_SUCCESS; } /* Invoked when the get object request completes. */ void s_get_request_finished( struct aws_s3_meta_request *meta_request, const struct aws_s3_meta_request_result *meta_request_result, void *user_data) { (void)meta_request; struct single_transfer_ctx *transfer_ctx = user_data; struct aws_string *state = NULL; if (meta_request_result->error_code == AWS_ERROR_SUCCESS) { state = aws_string_new_from_c_str(transfer_ctx->cp_app_ctx->app_ctx->allocator, "Completed"); } else { state = aws_string_new_from_c_str(transfer_ctx->cp_app_ctx->app_ctx->allocator, "Failed"); } progress_listener_update_state(transfer_ctx->listener, state); aws_string_destroy(state); fclose(transfer_ctx->output_sink); aws_mutex_lock(&transfer_ctx->cp_app_ctx->mutex); transfer_ctx->cp_app_ctx->completed_transfers++; aws_mutex_unlock(&transfer_ctx->cp_app_ctx->mutex); aws_condition_variable_notify_one(&transfer_ctx->cp_app_ctx->c_var); aws_s3_meta_request_release(transfer_ctx->meta_request); aws_mem_release(transfer_ctx->cp_app_ctx->app_ctx->allocator, transfer_ctx); } /* Setup a get object request and write the file to disk as it's downloaded. */ static int s_kickoff_get_object( struct cp_app_ctx *cp_app_ctx, const struct aws_byte_cursor *key, const struct aws_byte_cursor *destination, uint64_t size) { struct single_transfer_ctx *transfer_ctx = aws_mem_calloc(cp_app_ctx->app_ctx->allocator, 1, sizeof(struct single_transfer_ctx)); transfer_ctx->cp_app_ctx = cp_app_ctx; struct aws_byte_buf label_buf; struct aws_byte_cursor operation_name_cur = aws_byte_cursor_from_c_str("download: s3://"); aws_byte_buf_init_copy_from_cursor(&label_buf, cp_app_ctx->app_ctx->allocator, operation_name_cur); aws_byte_buf_append_dynamic(&label_buf, &cp_app_ctx->source_uri.host_name); struct aws_byte_cursor slash_cur = aws_byte_cursor_from_c_str("/"); aws_byte_buf_append_dynamic(&label_buf, &slash_cur); aws_byte_buf_append_dynamic(&label_buf, key); struct aws_byte_cursor to_cur = aws_byte_cursor_from_c_str(" to "); aws_byte_buf_append_dynamic(&label_buf, &to_cur); aws_byte_buf_append_dynamic(&label_buf, destination); struct aws_string *label = aws_string_new_from_buf(cp_app_ctx->app_ctx->allocator, &label_buf); aws_byte_buf_clean_up(&label_buf); struct aws_string *state = aws_string_new_from_c_str(cp_app_ctx->app_ctx->allocator, "In Progress"); transfer_ctx->listener = progress_listener_new(cp_app_ctx->listener_group, label, state, size); aws_string_destroy(state); aws_string_destroy(label); aws_byte_buf_clean_up(&label_buf); struct aws_string *file_path = aws_string_new_from_cursor(cp_app_ctx->app_ctx->allocator, destination); struct aws_string *mode = aws_string_new_from_c_str(cp_app_ctx->app_ctx->allocator, "wb"); transfer_ctx->output_sink = aws_fopen_safe(file_path, mode); aws_string_destroy(mode); aws_string_destroy(file_path); if (!transfer_ctx->output_sink) { return AWS_OP_ERR; } struct aws_s3_meta_request_options request_options = { .user_data = transfer_ctx, .signing_config = &cp_app_ctx->app_ctx->signing_config, .type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT, .finish_callback = s_get_request_finished, .body_callback = s_get_body_callback, }; struct aws_http_header host_header = { .name = g_host_header_name, .value = aws_byte_cursor_from_c_str(cp_app_ctx->source_endpoint), }; struct aws_http_header accept_header = { .name = aws_byte_cursor_from_c_str("accept"), .value = aws_byte_cursor_from_c_str("*/*"), }; struct aws_http_header user_agent_header = { .name = aws_byte_cursor_from_c_str("user-agent"), .value = aws_byte_cursor_from_c_str("AWS common runtime command-line client"), }; request_options.message = aws_http_message_new_request(cp_app_ctx->app_ctx->allocator); aws_http_message_add_header(request_options.message, host_header); aws_http_message_add_header(request_options.message, accept_header); aws_http_message_add_header(request_options.message, user_agent_header); aws_http_message_set_request_method(request_options.message, aws_http_method_get); struct aws_byte_buf path_buf; aws_byte_buf_init(&path_buf, cp_app_ctx->app_ctx->allocator, key->len + 1); aws_byte_buf_append_dynamic(&path_buf, &slash_cur); aws_byte_buf_append_dynamic(&path_buf, key); struct aws_byte_cursor path_cur = aws_byte_cursor_from_buf(&path_buf); aws_http_message_set_request_path(request_options.message, path_cur); aws_byte_buf_clean_up(&path_buf); transfer_ctx->meta_request = aws_s3_client_make_meta_request(cp_app_ctx->app_ctx->client, &request_options); if (!transfer_ctx->meta_request) { return AWS_OP_ERR; } aws_mutex_lock(&transfer_ctx->cp_app_ctx->mutex); transfer_ctx->cp_app_ctx->expected_transfers++; aws_mutex_unlock(&transfer_ctx->cp_app_ctx->mutex); return AWS_OP_SUCCESS; } /* upon listing the objects in a bucket, this is invoked for each object encountered. */ static int s_on_list_object(const struct aws_s3_object_info *info, void *user_data) { struct cp_app_ctx *cp_app_ctx = user_data; /* size greater than zero means it's an actual object. */ if (info->key.len > 0) { struct aws_byte_cursor trimmed_key = info->key; /* in this first case, if prefix and key are the same, we just download to the dest uri at key name. */ if (aws_byte_cursor_eq_ignore_case(&info->key, &info->prefix)) { for (size_t i = trimmed_key.len - 1; i > 0; --i) { if (trimmed_key.ptr[i] == '/') { aws_byte_cursor_advance(&trimmed_key, i + 1); break; } } } else if (info->prefix.len) { aws_byte_cursor_advance(&trimmed_key, info->prefix.len); } /* if we're going to be downloading to disk, set up the get object requests here. */ if (cp_app_ctx->dest_file_system) { struct aws_byte_buf dest_directory; aws_byte_buf_init_copy( &dest_directory, cp_app_ctx->app_ctx->allocator, &cp_app_ctx->destination_uri.uri_str); struct aws_string *dir_path = aws_string_new_from_buf(cp_app_ctx->app_ctx->allocator, &dest_directory); if (!aws_directory_exists(dir_path)) { aws_directory_create(dir_path); } aws_string_destroy(dir_path); struct aws_array_list splits; aws_array_list_init_dynamic(&splits, cp_app_ctx->app_ctx->allocator, 8, sizeof(struct aws_byte_cursor)); aws_byte_cursor_split_on_char(&trimmed_key, '/', &splits); for (size_t i = 0; i < aws_array_list_length(&splits); ++i) { struct aws_byte_cursor path_component; aws_array_list_get_at(&splits, &path_component, i); if (path_component.len > 0) { if (dest_directory.buffer[dest_directory.len - 1] != AWS_PATH_DELIM) { struct aws_byte_cursor slash_cur = aws_byte_cursor_from_c_str(AWS_PATH_DELIM_STR); aws_byte_buf_append_dynamic(&dest_directory, &slash_cur); } aws_byte_buf_append_dynamic(&dest_directory, &path_component); dir_path = aws_string_new_from_buf(cp_app_ctx->app_ctx->allocator, &dest_directory); if (i < aws_array_list_length(&splits) - 1 && !aws_directory_exists(dir_path)) { aws_directory_create(dir_path); } aws_string_destroy(dir_path); } } struct aws_byte_cursor destination_cur = aws_byte_cursor_from_buf(&dest_directory); int ret_val = s_kickoff_get_object(cp_app_ctx, &info->key, &destination_cur, info->size); aws_byte_buf_clean_up(&dest_directory); return ret_val; } /* otherwise, we're copying between buckets. Set up the copy here. */ struct aws_byte_buf destination_key; aws_byte_buf_init_copy_from_cursor( &destination_key, cp_app_ctx->app_ctx->allocator, cp_app_ctx->destination_uri.path); aws_byte_buf_append_dynamic(&destination_key, &trimmed_key); struct aws_byte_cursor destination_key_cur = aws_byte_cursor_from_buf(&destination_key); int return_code = s_kick_off_copy_object_request( cp_app_ctx, &cp_app_ctx->source_uri.host_name, &info->key, &destination_key_cur); aws_byte_buf_clean_up(&destination_key); return return_code; } return AWS_OP_SUCCESS; } static bool s_are_all_transfers_and_listings_done(void *arg) { struct cp_app_ctx *cp_app_ctx = arg; return cp_app_ctx->expected_transfers == cp_app_ctx->completed_transfers && cp_app_ctx->list_objects_completed; } void s_on_object_list_finished(struct aws_s3_paginator *paginator, int error_code, void *user_data) { struct cp_app_ctx *cp_app_ctx = user_data; if (error_code != AWS_OP_SUCCESS) { fprintf( stderr, "Failure while listing objects. Please check if you have valid credentials and s3 path is correct. Error: " "%s\n", aws_error_debug_str(error_code)); exit(1); } if (aws_s3_paginator_has_more_results(paginator)) { aws_s3_paginator_continue(paginator, &cp_app_ctx->app_ctx->signing_config); } else { aws_mutex_lock(&cp_app_ctx->mutex); cp_app_ctx->list_objects_completed = true; aws_mutex_unlock(&cp_app_ctx->mutex); } } void s_dispatch_and_run_transfers(struct cp_app_ctx *cp_app_ctx) { struct aws_s3_paginator *paginator = NULL; /* the source argument is either a directory on disk or s3 (it's not actually possible to tell a prefix from an * object) */ if (cp_app_ctx->source_is_directory_or_prefix) { /* uploading a directory from disk to S3 */ if (cp_app_ctx->source_file_system) { struct aws_string *path = aws_string_new_from_buf(cp_app_ctx->app_ctx->allocator, &cp_app_ctx->source_uri.uri_str); if (aws_directory_traverse(cp_app_ctx->app_ctx->allocator, path, true, s_on_directory_entry, cp_app_ctx)) { fprintf( stderr, "Failure while traversing directory. Error %s\n", aws_error_debug_str(aws_last_error())); exit(1); } aws_string_destroy(path); aws_mutex_lock(&cp_app_ctx->mutex); aws_condition_variable_wait_pred( &cp_app_ctx->c_var, &cp_app_ctx->mutex, s_are_all_transfers_done, cp_app_ctx); aws_mutex_unlock(&cp_app_ctx->mutex); /* due to trickery in list_objects, this handles the source is s3 key instead of just a prefix, so we * don't need to handle that later.*/ } else { /* this is an s3 bucket, and we'll be downloading it to disk. Incidentally, this also works for plain ole * single objects. */ char main_endpoint[1024]; AWS_ZERO_ARRAY(main_endpoint); snprintf(main_endpoint, sizeof(main_endpoint), "s3.%s.amazonaws.com", cp_app_ctx->app_ctx->region); struct aws_byte_cursor prefix_cur = cp_app_ctx->source_uri.path; aws_byte_cursor_advance(&prefix_cur, 1); struct aws_s3_list_objects_params list_objects_params = { .user_data = cp_app_ctx, .endpoint = aws_byte_cursor_from_c_str(main_endpoint), .client = cp_app_ctx->app_ctx->client, .prefix = prefix_cur, .bucket_name = cp_app_ctx->source_uri.host_name, .on_object = s_on_list_object, .on_list_finished = s_on_object_list_finished, }; paginator = aws_s3_initiate_list_objects(cp_app_ctx->app_ctx->allocator, &list_objects_params); if (!paginator) { fprintf(stderr, "List objects failed with error %s\n", aws_error_debug_str(aws_last_error())); s_usage(1); } aws_s3_paginator_continue(paginator, &cp_app_ctx->app_ctx->signing_config); aws_mutex_lock(&cp_app_ctx->mutex); aws_condition_variable_wait_pred( &cp_app_ctx->c_var, &cp_app_ctx->mutex, s_are_all_transfers_and_listings_done, cp_app_ctx); aws_mutex_unlock(&cp_app_ctx->mutex); aws_s3_paginator_release(paginator); } } else { /* only handles a single file from disk being uploaded to s3. */ if (cp_app_ctx->source_file_system) { struct aws_byte_cursor source_path = aws_byte_cursor_from_buf(&cp_app_ctx->source_uri.uri_str); int64_t source_file_length = 0; struct aws_string *source_path_str = aws_string_new_from_cursor(cp_app_ctx->app_ctx->allocator, &source_path); struct aws_string *open_mode = aws_string_new_from_c_str(cp_app_ctx->app_ctx->allocator, "rb"); FILE *src_file = aws_fopen_safe(source_path_str, open_mode); if (!src_file) { fprintf(stderr, "File open failed with error %s\n", aws_error_debug_str(aws_last_error())); s_usage(1); } aws_file_get_length(src_file, &source_file_length); fclose(src_file); aws_string_destroy(open_mode); aws_string_destroy(source_path_str); struct aws_byte_cursor file_name_portion = cp_app_ctx->source_uri.path; /* just the final segment of the key */ for (size_t i = file_name_portion.len - 1; i > 0; --i) { if (file_name_portion.ptr[i] == AWS_PATH_DELIM) { aws_byte_cursor_advance(&file_name_portion, i + 1); break; } } if (s_kickoff_put_object(cp_app_ctx, &source_path, &file_name_portion, source_file_length)) { fprintf(stderr, "File transfer failed with error %s\n", aws_error_debug_str(aws_last_error())); s_usage(1); } aws_mutex_lock(&cp_app_ctx->mutex); aws_condition_variable_wait_pred( &cp_app_ctx->c_var, &cp_app_ctx->mutex, s_are_all_transfers_done, cp_app_ctx); aws_mutex_unlock(&cp_app_ctx->mutex); } } } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/samples/s3/s3-ls.c000066400000000000000000000144531456575232400226620ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include "app_ctx.h" #include #include struct s3_ls_app_data { struct aws_uri uri; struct app_ctx *app_ctx; struct aws_mutex mutex; struct aws_condition_variable cvar; bool execution_completed; bool long_format; }; static void s_usage(int exit_code) { FILE *output = exit_code == 0 ? stdout : stderr; fprintf(output, "usage: s3 ls [options] s3://{bucket}[/prefix]\n"); fprintf(output, " bucket: the S3 bucket to list objects\n"); fprintf(output, " prefix: the prefix to filter\n"); fprintf(output, " -l, List in long format\n"); fprintf(output, " -h, --help\n"); fprintf(output, " Display this message and quit.\n"); exit(exit_code); } static struct aws_cli_option s_long_options[] = { {"long-format", AWS_CLI_OPTIONS_NO_ARGUMENT, NULL, 'l'}, /* Per getopt(3) the last element of the array has to be filled with all zeros */ {NULL, AWS_CLI_OPTIONS_NO_ARGUMENT, NULL, 0}, }; static void s_parse_options(int argc, char **argv, struct s3_ls_app_data *ctx) { int option_index = 0; int opt_val = 0; bool uri_found = false; do { opt_val = aws_cli_getopt_long(argc, argv, "l", s_long_options, &option_index); /* START_OF_TEXT means our positional argument */ if (opt_val == 'l') { ctx->long_format = true; } if (opt_val == 0x02) { struct aws_byte_cursor uri_cursor = aws_byte_cursor_from_c_str(aws_cli_positional_arg); if (aws_uri_init_parse(&ctx->uri, ctx->app_ctx->allocator, &uri_cursor)) { fprintf( stderr, "Failed to parse uri %s with error %s\n", (char *)uri_cursor.ptr, aws_error_debug_str(aws_last_error())); s_usage(1); } uri_found = true; } } while (opt_val != -1); if (!uri_found) { fprintf(stderr, "A URI for the request must be supplied.\n"); s_usage(1); } } /** * Predicate used to decide if the application is ready to exit. * The corresponding condition variable is set when the last * page of ListObjects is received. */ static bool s_app_completion_predicate(void *arg) { struct s3_ls_app_data *app_ctx = arg; return app_ctx->execution_completed; } /** * Called once for each object returned in the ListObjectsV2 responses. */ int s_on_object(const struct aws_s3_object_info *info, void *user_data) { struct s3_ls_app_data *app_ctx = user_data; if (app_ctx->long_format) { printf("%-18" PRIu64 " ", info->size); } printf("%.*s\n", (int)info->key.len, info->key.ptr); return AWS_OP_SUCCESS; } /** * Called once for each ListObjectsV2 response received. * If the response contains a continuation token indicating there are more results to be fetched, * requests the next page using aws_s3_paginator_continue. */ void s_on_list_finished(struct aws_s3_paginator *paginator, int error_code, void *user_data) { struct s3_ls_app_data *app_ctx = user_data; if (error_code == 0) { bool has_more_results = aws_s3_paginator_has_more_results(paginator); if (has_more_results) { /* get next page */ int result = aws_s3_paginator_continue(paginator, &app_ctx->app_ctx->signing_config); if (result) { fprintf(stderr, "ERROR returned by aws_s3_paginator_continue from s_on_list_finished: %d\n", result); } return; } } else { fprintf( stderr, "Failure while listing objects. Please check if you have valid credentials and s3 path is correct. " "Error: " "%s\n", aws_error_debug_str(error_code)); } /* all pages received. triggers the condition variable to exit the application. */ aws_mutex_lock(&app_ctx->mutex); app_ctx->execution_completed = true; aws_mutex_unlock(&app_ctx->mutex); aws_condition_variable_notify_one(&app_ctx->cvar); } int s3_ls_main(int argc, char *argv[], const char *command_name, void *user_data) { (void)command_name; struct app_ctx *app_ctx = user_data; if (app_ctx->help_requested) { s_usage(0); } if (!app_ctx->region) { fprintf(stderr, "region is a required argument\n"); s_usage(1); } struct s3_ls_app_data impl_data = { .app_ctx = app_ctx, .mutex = AWS_MUTEX_INIT, .cvar = AWS_CONDITION_VARIABLE_INIT, }; app_ctx->sub_command_data = &impl_data; s_parse_options(argc, argv, &impl_data); struct aws_byte_cursor bucket = impl_data.uri.host_name; struct aws_byte_cursor prefix; if (impl_data.uri.path.len == 0 || (impl_data.uri.path.len == 1 && impl_data.uri.path.ptr[0] == '/')) { prefix.len = 0; prefix.ptr = NULL; } else { /* skips the initial / in the path */ prefix.len = impl_data.uri.path.len - 1; prefix.ptr = impl_data.uri.path.ptr + 1; } /* listObjects */ struct aws_s3_list_objects_params params = {.client = app_ctx->client, .bucket_name = bucket, .prefix = prefix}; char endpoint[1024]; snprintf(endpoint, sizeof(endpoint), "s3.%s.amazonaws.com", app_ctx->region); params.endpoint = aws_byte_cursor_from_c_str(endpoint); params.user_data = &impl_data; params.on_object = &s_on_object; params.on_list_finished = &s_on_list_finished; struct aws_s3_paginator *paginator = aws_s3_initiate_list_objects(app_ctx->allocator, ¶ms); int paginator_result = aws_s3_paginator_continue(paginator, &app_ctx->signing_config); if (paginator_result) { printf("ERROR returned from initial call to aws_s3_paginator_continue: %d \n", paginator_result); } aws_s3_paginator_release(paginator); /* wait completion of last page */ aws_mutex_lock(&impl_data.mutex); aws_condition_variable_wait_pred(&impl_data.cvar, &impl_data.mutex, s_app_completion_predicate, &impl_data); aws_mutex_unlock(&impl_data.mutex); return 0; } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/samples/s3/s3-platform_info.c000066400000000000000000000064631456575232400251050ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include "app_ctx.h" struct s3_compute_platform_ctx { struct app_ctx *app_ctx; struct aws_byte_cursor instance_type; }; static void s_usage(int exit_code) { FILE *output = exit_code == 0 ? stdout : stderr; fprintf(output, "usage: s3 platform-info [options]\n"); fprintf( output, " -instance-type, (optional) Instance type to look up configuration for, if not set it will be the current " "executing environment. \n"); fprintf(output, " -h, --help\n"); fprintf(output, " Display this message and quit.\n"); exit(exit_code); } static struct aws_cli_option s_long_options[] = { {"instance-type", AWS_CLI_OPTIONS_REQUIRED_ARGUMENT, NULL, 'i'}, /* Per getopt(3) the last element of the array has to be filled with all zeros */ {NULL, AWS_CLI_OPTIONS_NO_ARGUMENT, NULL, 0}, }; static void s_parse_options(int argc, char **argv, struct s3_compute_platform_ctx *ctx) { int option_index = 0; int opt_val = 0; do { opt_val = aws_cli_getopt_long(argc, argv, "i:", s_long_options, &option_index); /* START_OF_TEXT means our positional argument */ if (opt_val == 'i') { ctx->instance_type = aws_byte_cursor_from_c_str(aws_cli_optarg); } } while (opt_val != -1); } int s3_compute_platform_info_main(int argc, char *argv[], const char *command_name, void *user_data) { (void)command_name; struct app_ctx *app_ctx = user_data; if (app_ctx->help_requested) { s_usage(0); } struct s3_compute_platform_ctx compute_platform_app_ctx = { .app_ctx = app_ctx, }; app_ctx->sub_command_data = &compute_platform_app_ctx; s_parse_options(argc, argv, &compute_platform_app_ctx); const struct aws_s3_platform_info *platform_info = aws_s3_get_current_platform_info(); printf("{\n"); printf("\t'instance_type': '" PRInSTR "',\n", AWS_BYTE_CURSOR_PRI(platform_info->instance_type)); printf("\t'max_throughput_gbps': %d,\n", (int)platform_info->max_throughput_gbps); printf("\t'has_recommended_configuration': %s,\n", platform_info->has_recommended_configuration ? "true" : "false"); printf("\t'cpu_groups': [\n"); for (size_t i = 0; i < platform_info->cpu_group_info_array_length; ++i) { printf("\t{\n"); printf("\t\t'cpu_group_index': %d,\n", (int)platform_info->cpu_group_info_array[i].cpu_group); printf("\t\t'cpus_in_group': %d,\n", (int)platform_info->cpu_group_info_array[i].cpus_in_group); printf("\t\t'usable_network_devices': [\n"); for (size_t j = 0; j < platform_info->cpu_group_info_array[i].nic_name_array_length; j++) { printf( "\t\t\t'" PRInSTR "'", AWS_BYTE_CURSOR_PRI(platform_info->cpu_group_info_array[i].nic_name_array[j])); if (j < platform_info->cpu_group_info_array[i].nic_name_array_length - 1) { printf(","); } printf("\n"); } printf("\t\t]\n"); printf("\t}"); if (i < platform_info->cpu_group_info_array_length - 1) { printf(","); } printf("\n"); } printf("\t]\n"); printf("}\n"); return 0; } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/scripts/000077500000000000000000000000001456575232400212445ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/scripts/update_s3_endpoint_resolver_artifacts.py000066400000000000000000000104501456575232400313660ustar00rootroot00000000000000# This script pulls latest 'partitions.json' and 's3-endpoint-rule-set.json' from Git. # You will need a secret in secrets manager which has the 'ruleset-url' and 'ruleset-token'. # It uses the latest files to generate 'source/s3_endpoint_resolver/aws_s3_endpoint_rule_set.c' and # 'source/s3_endpoint_resolver/aws_s3_endpoint_resolver_partition.c' import argparse import json import boto3 import requests def escape_char(c): escape_dict = { '\\': '\\\\', '\'': '\\\'', '\0': '\\0', '\a': '\\a', '\b': '\\b', '\f': '\\f', '\n': '\\n', '\r': '\\r', '\t': '\\t', '\v': '\\v' } return escape_dict.get(c, c) def get_header(): return """\ /** * Copyright Amazon.com, Inc. or its affiliates. * All Rights Reserved. SPDX-License-Identifier: Apache-2.0. */ #include "aws/s3/private/s3_endpoint_resolver.h" #include /** * This file is generated using scripts/update_s3_endpoint_resolver_artifacts.py. * Do not modify directly. */ /* clang-format off */ """ def generate_c_file_from_json(json_content, c_file_name, c_struct_name): num_chars_per_line = 20 try: # Compact the json compact_json_str = json.dumps(json_content, separators=(',', ':')) compact_c = [] for i in range(0, len(compact_json_str), num_chars_per_line): compact_c.append( ', '.join("'{}'".format(escape_char(char)) for char in compact_json_str[i:i + num_chars_per_line])) # Write json to a C file with open(c_file_name, 'w') as f: f.write(get_header()) f.write(f"static const char s_generated_array[] = {{\n\t") f.write(",\n\t".join(compact_c)) f.write("};\n\n") f.write(f"const struct aws_byte_cursor {c_struct_name} = {{\n\t") f.write(f".len = {len(compact_json_str)},\n\t") f.write(f".ptr = (uint8_t *) s_generated_array\n}};\n") print(f"{c_file_name} has been created successfully.") except Exception as e: print(f"An error occurred: {e}") def get_secret_from_secrets_manager(secret_name, region_name): session = boto3.session.Session() client = session.client( service_name='secretsmanager', region_name=region_name ) try: get_secret_value_response = client.get_secret_value( SecretId=secret_name ) except Exception as e: raise e return json.loads(get_secret_value_response['SecretString']) def download_from_git(url, token=None): headers = {'Accept': 'application/vnd.github+json'} if token is not None: headers['Authorization'] = f"Bearer {token}" http_response = requests.get(url, headers=headers) if http_response.status_code != 200: raise Exception(f"HTTP Status code is {http_response.status_code}") return json.loads(http_response.content.decode()) if __name__ == '__main__': argument_parser = argparse.ArgumentParser(description="Endpoint Ruleset Updater") argument_parser.add_argument("--ruleset", metavar="", required=False, help="Path to endpoint ruleset json file") argument_parser.add_argument("--partitions", metavar="", required=False, help="Path to partitions json file") parsed_args = argument_parser.parse_args() git_secret = get_secret_from_secrets_manager("s3/endpoint/resolver/artifacts/git", "us-east-1") if (parsed_args.ruleset): with open(parsed_args.ruleset) as f: rule_set = json.load(f) else: rule_set = download_from_git(git_secret['ruleset-url'], git_secret['ruleset-token']) if (parsed_args.partitions): with open(parsed_args.partitions) as f: partition = json.load(f) else: partition = download_from_git('https://raw.githubusercontent.com/aws/aws-sdk-cpp/main/tools/code-generation/partitions/partitions.json') generate_c_file_from_json( rule_set, 'source/s3_endpoint_resolver/aws_s3_endpoint_rule_set.c', 'aws_s3_endpoint_rule_set') generate_c_file_from_json( partition, 'source/s3_endpoint_resolver/aws_s3_endpoint_resolver_partition.c', 'aws_s3_endpoint_resolver_partitions') aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/source/000077500000000000000000000000001456575232400210555ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/source/s3.c000066400000000000000000000151631456575232400215540ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #define AWS_DEFINE_ERROR_INFO_S3(CODE, STR) AWS_DEFINE_ERROR_INFO(CODE, STR, "aws-c-s3") /* clang-format off */ static struct aws_error_info s_errors[] = { AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_MISSING_CONTENT_RANGE_HEADER, "Response missing required Content-Range header."), AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_INVALID_CONTENT_RANGE_HEADER, "Response contains invalid Content-Range header."), AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_MISSING_CONTENT_LENGTH_HEADER, "Response missing required Content-Length header."), AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_INVALID_CONTENT_LENGTH_HEADER, "Response contains invalid Content-Length header."), AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_MISSING_ETAG, "Response missing required ETag header."), AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_INTERNAL_ERROR, "Response code indicates internal server error"), AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_SLOW_DOWN, "Response code indicates throttling"), AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_INVALID_RESPONSE_STATUS, "Invalid response status from request"), AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_MISSING_UPLOAD_ID, "Upload Id not found in create-multipart-upload response"), AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_PROXY_PARSE_FAILED, "Could not parse proxy URI"), AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_UNSUPPORTED_PROXY_SCHEME, "Given Proxy URI has an unsupported scheme"), AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_CANCELED, "Request successfully cancelled"), AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_INVALID_RANGE_HEADER, "Range header has invalid syntax"), AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_MULTIRANGE_HEADER_UNSUPPORTED, "Range header specifies multiple ranges which is unsupported"), AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_RESPONSE_CHECKSUM_MISMATCH, "response checksum header does not match calculated checksum"), AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_CHECKSUM_CALCULATION_FAILED, "failed to calculate a checksum for the provided stream"), AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_PAUSED, "Request successfully paused"), AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_LIST_PARTS_PARSE_FAILED, "Failed to parse response from ListParts"), AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_RESUMED_PART_CHECKSUM_MISMATCH, "Checksum does not match previously uploaded part"), AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_RESUME_FAILED, "Resuming request failed"), AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_OBJECT_MODIFIED, "The object modifed during download."), AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_NON_RECOVERABLE_ASYNC_ERROR, "Async error received from S3 and not recoverable from retry."), AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE, "The metric data is not available, the requests ends before the metric happens."), AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_INCORRECT_CONTENT_LENGTH, "Request body length must match Content-Length header."), AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_REQUEST_TIME_TOO_SKEWED, "RequestTimeTooSkewed error received from S3."), AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_FILE_MODIFIED, "The file was modified during upload."), AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_EXCEEDS_MEMORY_LIMIT, "Request was not created due to used memory exceeding memory limit."), AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_INVALID_MEMORY_LIMIT_CONFIG, "Specified memory configuration is invalid for the system. " "Memory limit should be at least 1GiB. Part size and max part size should be smaller than memory limit."), AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3EXPRESS_CREATE_SESSION_FAILED, "CreateSession call failed when signing with S3 Express."), AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_INTERNAL_PART_SIZE_MISMATCH_RETRYING_WITH_RANGE, "part_size mismatch, possibly due to wrong object_size_hint. Retrying with Range instead of partNumber."), }; /* clang-format on */ static struct aws_error_info_list s_error_list = { .error_list = s_errors, .count = AWS_ARRAY_SIZE(s_errors), }; static struct aws_log_subject_info s_s3_log_subject_infos[] = { DEFINE_LOG_SUBJECT_INFO(AWS_LS_S3_GENERAL, "S3General", "Subject for aws-c-s3 logging that defies categorization."), DEFINE_LOG_SUBJECT_INFO(AWS_LS_S3_CLIENT, "S3Client", "Subject for aws-c-s3 logging from an aws_s3_client."), DEFINE_LOG_SUBJECT_INFO( AWS_LS_S3_CLIENT_STATS, "S3ClientStats", "Subject for aws-c-s3 logging for stats tracked by an aws_s3_client."), DEFINE_LOG_SUBJECT_INFO(AWS_LS_S3_REQUEST, "S3Request", "Subject for aws-c-s3 logging from an aws_s3_request."), DEFINE_LOG_SUBJECT_INFO( AWS_LS_S3_META_REQUEST, "S3MetaRequest", "Subject for aws-c-s3 logging from an aws_s3_meta_request."), DEFINE_LOG_SUBJECT_INFO(AWS_LS_S3_ENDPOINT, "S3Endpoint", "Subject for aws-c-s3 logging from an aws_s3_endpoint."), }; static struct aws_log_subject_info_list s_s3_log_subject_list = { .subject_list = s_s3_log_subject_infos, .count = AWS_ARRAY_SIZE(s_s3_log_subject_infos), }; static bool s_library_initialized = false; static struct aws_allocator *s_library_allocator = NULL; static struct aws_s3_platform_info_loader *s_loader; void aws_s3_library_init(struct aws_allocator *allocator) { if (s_library_initialized) { return; } if (allocator) { s_library_allocator = allocator; } else { s_library_allocator = aws_default_allocator(); } aws_auth_library_init(s_library_allocator); aws_http_library_init(s_library_allocator); aws_register_error_info(&s_error_list); aws_register_log_subject_info_list(&s_s3_log_subject_list); s_loader = aws_s3_platform_info_loader_new(allocator); AWS_FATAL_ASSERT(s_loader); s_library_initialized = true; } const struct aws_s3_platform_info *aws_s3_get_current_platform_info(void) { return aws_s3_get_platform_info_for_current_environment(s_loader); } struct aws_array_list aws_s3_get_platforms_with_recommended_config(void) { return aws_s3_get_recommended_platforms(s_loader); } void aws_s3_library_clean_up(void) { if (!s_library_initialized) { return; } s_library_initialized = false; s_loader = aws_s3_platform_info_loader_release(s_loader); aws_thread_join_all_managed(); aws_unregister_log_subject_info_list(&s_s3_log_subject_list); aws_unregister_error_info(&s_error_list); aws_http_library_clean_up(); aws_auth_library_clean_up(); s_library_allocator = NULL; } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/source/s3_auto_ranged_get.c000066400000000000000000001276401456575232400247670ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "aws/s3/private/s3_auto_ranged_get.h" #include "aws/s3/private/s3_client_impl.h" #include "aws/s3/private/s3_meta_request_impl.h" #include "aws/s3/private/s3_request_messages.h" #include "aws/s3/private/s3_util.h" #include #include const uint32_t s_conservative_max_requests_in_flight = 8; const struct aws_byte_cursor g_application_xml_value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("application/xml"); static void s_s3_meta_request_auto_ranged_get_destroy(struct aws_s3_meta_request *meta_request); static bool s_s3_auto_ranged_get_update( struct aws_s3_meta_request *meta_request, uint32_t flags, struct aws_s3_request **out_request); static struct aws_future_void *s_s3_auto_ranged_get_prepare_request(struct aws_s3_request *request); static void s_s3_auto_ranged_get_request_finished( struct aws_s3_meta_request *meta_request, struct aws_s3_request *request, int error_code); static struct aws_s3_meta_request_vtable s_s3_auto_ranged_get_vtable = { .update = s_s3_auto_ranged_get_update, .send_request_finish = aws_s3_meta_request_send_request_finish_default, .prepare_request = s_s3_auto_ranged_get_prepare_request, .init_signing_date_time = aws_s3_meta_request_init_signing_date_time_default, .sign_request = aws_s3_meta_request_sign_request_default, .finished_request = s_s3_auto_ranged_get_request_finished, .destroy = s_s3_meta_request_auto_ranged_get_destroy, .finish = aws_s3_meta_request_finish_default, }; static int s_s3_auto_ranged_get_success_status(struct aws_s3_meta_request *meta_request) { AWS_PRECONDITION(meta_request); struct aws_s3_auto_ranged_get *auto_ranged_get = meta_request->impl; AWS_PRECONDITION(auto_ranged_get); if (auto_ranged_get->initial_message_has_range_header) { return AWS_HTTP_STATUS_CODE_206_PARTIAL_CONTENT; } return AWS_HTTP_STATUS_CODE_200_OK; } /* Allocate a new auto-ranged-get meta request. */ struct aws_s3_meta_request *aws_s3_meta_request_auto_ranged_get_new( struct aws_allocator *allocator, struct aws_s3_client *client, size_t part_size, const struct aws_s3_meta_request_options *options) { AWS_PRECONDITION(allocator); AWS_PRECONDITION(client); AWS_PRECONDITION(options); AWS_PRECONDITION(options->message); struct aws_s3_auto_ranged_get *auto_ranged_get = aws_mem_calloc(allocator, 1, sizeof(struct aws_s3_auto_ranged_get)); /* Try to initialize the base type. */ if (aws_s3_meta_request_init_base( allocator, client, part_size, false, options, auto_ranged_get, &s_s3_auto_ranged_get_vtable, &auto_ranged_get->base)) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "id=%p Could not initialize base type for Auto-Ranged-Get Meta Request.", (void *)auto_ranged_get); aws_mem_release(allocator, auto_ranged_get); return NULL; } struct aws_http_headers *headers = aws_http_message_get_headers(auto_ranged_get->base.initial_request_message); AWS_ASSERT(headers != NULL); if (aws_http_headers_has(headers, g_range_header_name)) { auto_ranged_get->initial_message_has_range_header = true; if (aws_s3_parse_request_range_header( headers, &auto_ranged_get->initial_message_has_start_range, &auto_ranged_get->initial_message_has_end_range, &auto_ranged_get->initial_range_start, &auto_ranged_get->initial_range_end)) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "id=%p Could not parse Range header for Auto-Ranged-Get Meta Request.", (void *)auto_ranged_get); goto on_error; } } auto_ranged_get->initial_message_has_if_match_header = aws_http_headers_has(headers, g_if_match_header_name); auto_ranged_get->synced_data.first_part_size = auto_ranged_get->base.part_size; if (options->object_size_hint != NULL) { auto_ranged_get->object_size_hint_available = true; auto_ranged_get->object_size_hint = *options->object_size_hint; } AWS_LOGF_DEBUG( AWS_LS_S3_META_REQUEST, "id=%p Created new Auto-Ranged Get Meta Request.", (void *)&auto_ranged_get->base); return &auto_ranged_get->base; on_error: /* This will also clean up the auto_ranged_get */ aws_s3_meta_request_release(&(auto_ranged_get->base)); return NULL; } static void s_s3_meta_request_auto_ranged_get_destroy(struct aws_s3_meta_request *meta_request) { AWS_PRECONDITION(meta_request); AWS_PRECONDITION(meta_request->impl); struct aws_s3_auto_ranged_get *auto_ranged_get = meta_request->impl; aws_string_destroy(auto_ranged_get->etag); aws_mem_release(meta_request->allocator, auto_ranged_get); } /* * This function returns the type of first request which we will also use to discover overall object size. */ static enum aws_s3_auto_ranged_get_request_type s_s3_get_request_type_for_discovering_object_size( const struct aws_s3_meta_request *meta_request) { AWS_PRECONDITION(meta_request); struct aws_s3_auto_ranged_get *auto_ranged_get = meta_request->impl; AWS_ASSERT(auto_ranged_get); /* * When we attempt to download an empty file using the `AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_GET_OBJECT_WITH_RANGE` * request type, the request fails with an empty file error. We then reset `object_range_known` * (`object_range_empty` is set to true) and try to download the file again with * `AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_GET_OBJECT_WITH_PART_NUMBER_1`. We send another request, even though there is * no body, to provide successful response headers to the user. If the file is still empty, successful response * headers will be provided to the users. Otherwise, the newer version of the file will be downloaded. */ if (auto_ranged_get->synced_data.object_range_empty != 0) { auto_ranged_get->synced_data.object_range_empty = 0; return AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_GET_OBJECT_WITH_PART_NUMBER_1; } /* * If a range header exists but has no start-range (i.e. Range: bytes=-100), we perform a HeadRequest. If the * start-range is unknown, we could potentially execute a request from the end-range and keep that request around * until the meta request finishes. However, this approach involves the complexity of managing backpressure. For * simplicity, we execute a HeadRequest if the start-range is not specified. */ if (auto_ranged_get->initial_message_has_range_header != 0) { return auto_ranged_get->initial_message_has_start_range ? AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_GET_OBJECT_WITH_RANGE : AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_HEAD_OBJECT; } /* If we don't need checksum validation, then discover the size of the object while trying to get the first part. */ if (!meta_request->checksum_config.validate_response_checksum) { return AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_GET_OBJECT_WITH_RANGE; } /* If the object_size_hint indicates that it is a small one part file, then try to get the file directly * TODO: Bypass memory limiter so that we don't overallocate memory for small files */ if (auto_ranged_get->object_size_hint_available && auto_ranged_get->object_size_hint <= meta_request->part_size) { return AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_GET_OBJECT_WITH_PART_NUMBER_1; } /* Otherwise, do a headObject so that we can validate checksum if the file was uploaded as a single part */ return AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_HEAD_OBJECT; } static bool s_s3_auto_ranged_get_update( struct aws_s3_meta_request *meta_request, uint32_t flags, struct aws_s3_request **out_request) { AWS_PRECONDITION(meta_request); AWS_PRECONDITION(out_request); struct aws_s3_auto_ranged_get *auto_ranged_get = meta_request->impl; struct aws_s3_request *request = NULL; bool work_remaining = false; /* BEGIN CRITICAL SECTION */ { aws_s3_meta_request_lock_synced_data(meta_request); /* If nothing has set the "finish result" then this meta request is still in progress, and we can potentially * send additional requests. */ if (!aws_s3_meta_request_has_finish_result_synced(meta_request)) { if ((flags & AWS_S3_META_REQUEST_UPDATE_FLAG_CONSERVATIVE) != 0) { uint32_t num_requests_in_flight = (auto_ranged_get->synced_data.num_parts_requested - auto_ranged_get->synced_data.num_parts_completed) + (uint32_t)aws_priority_queue_size(&meta_request->synced_data.pending_body_streaming_requests); /* auto-ranged-gets make use of body streaming, which will hold onto response bodies if parts earlier in * the file haven't arrived yet. This can potentially create a lot of backed up requests, causing us to * hit our global request limit. To help mitigate this, when the "conservative" flag is passed in, we * only allow the total amount of requests being sent/streamed to be inside a set limit. */ if (num_requests_in_flight > s_conservative_max_requests_in_flight) { goto has_work_remaining; } } /* If the overall range of the object that we are trying to retrieve isn't known yet, then we need to send a * request to figure that out. */ if (!auto_ranged_get->synced_data.object_range_known) { if (auto_ranged_get->synced_data.head_object_sent || auto_ranged_get->synced_data.num_parts_requested > 0) { goto has_work_remaining; } struct aws_s3_buffer_pool_ticket *ticket = NULL; switch (s_s3_get_request_type_for_discovering_object_size(meta_request)) { case AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_HEAD_OBJECT: AWS_LOGF_INFO( AWS_LS_S3_META_REQUEST, "id=%p: Doing a HeadObject to discover the size of the object", (void *)meta_request); request = aws_s3_request_new( meta_request, AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_HEAD_OBJECT, AWS_S3_REQUEST_TYPE_HEAD_OBJECT, 0 /*part_number*/, AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS); auto_ranged_get->synced_data.head_object_sent = true; break; case AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_GET_OBJECT_WITH_PART_NUMBER_1: AWS_LOGF_INFO( AWS_LS_S3_META_REQUEST, "id=%p: Doing a 'GET_OBJECT_WITH_PART_NUMBER_1' to discover the size of the object and get " "the first part", (void *)meta_request); ticket = aws_s3_buffer_pool_reserve(meta_request->client->buffer_pool, meta_request->part_size); if (ticket == NULL) { goto has_work_remaining; } request = aws_s3_request_new( meta_request, AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_GET_OBJECT_WITH_PART_NUMBER_1, AWS_S3_REQUEST_TYPE_GET_OBJECT, 1 /*part_number*/, AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS | AWS_S3_REQUEST_FLAG_PART_SIZE_RESPONSE_BODY); request->ticket = ticket; ++auto_ranged_get->synced_data.num_parts_requested; break; case AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_GET_OBJECT_WITH_RANGE: AWS_LOGF_INFO( AWS_LS_S3_META_REQUEST, "id=%p: Doing a 'GET_OBJECT_WITH_RANGE' to discover the size of the object and get the " "first part", (void *)meta_request); uint64_t part_range_start = 0; uint64_t first_part_size = meta_request->part_size; if (auto_ranged_get->initial_message_has_range_header) { /* * Currently, we only discover the size of the object when the initial range header includes * a start-range. If we ever implement skipping the HeadRequest for a Range request without * a start-range, this will need to update. */ AWS_ASSERT(auto_ranged_get->initial_message_has_start_range); part_range_start = auto_ranged_get->initial_range_start; if (auto_ranged_get->initial_message_has_end_range) { first_part_size = aws_min_u64( first_part_size, auto_ranged_get->initial_range_end - auto_ranged_get->initial_range_start + 1); } auto_ranged_get->synced_data.first_part_size = first_part_size; } AWS_LOGF_INFO( AWS_LS_S3_META_REQUEST, "id=%p: Doing a ranged get to discover the size of the object and get the first part", (void *)meta_request); ticket = aws_s3_buffer_pool_reserve(meta_request->client->buffer_pool, (size_t)first_part_size); if (ticket == NULL) { goto has_work_remaining; } request = aws_s3_request_new( meta_request, AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_GET_OBJECT_WITH_RANGE, AWS_S3_REQUEST_TYPE_GET_OBJECT, 1 /*part_number*/, AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS | AWS_S3_REQUEST_FLAG_PART_SIZE_RESPONSE_BODY); request->ticket = ticket; request->part_range_start = part_range_start; request->part_range_end = part_range_start + first_part_size - 1; /* range-end is inclusive */ ++auto_ranged_get->synced_data.num_parts_requested; break; default: AWS_FATAL_ASSERT( 0 && "s_s3_get_request_type_for_discovering_object_size returned unexpected discover " "object size request type"); } request->discovers_object_size = true; goto has_work_remaining; } /* If there are still more parts to be requested */ if (auto_ranged_get->synced_data.num_parts_requested < auto_ranged_get->synced_data.total_num_parts) { if (meta_request->client->enable_read_backpressure) { /* Don't start a part until we have enough window to send bytes to the user. * * Note that we start a part once we have enough window to deliver ANY of its bytes. * If we waited until the window was large enough for the WHOLE part, * we could end up stuck in a situation where the user is * waiting for more bytes before they'll open the window, * and this implementation is waiting for more window before it will send more parts. */ uint64_t read_data_requested = auto_ranged_get->synced_data.num_parts_requested * meta_request->part_size; if (read_data_requested >= meta_request->synced_data.read_window_running_total) { /* Avoid spamming users with this DEBUG message */ if (auto_ranged_get->synced_data.read_window_warning_issued == 0) { auto_ranged_get->synced_data.read_window_warning_issued = 1; AWS_LOGF_DEBUG( AWS_LS_S3_META_REQUEST, "id=%p: Download paused because read window is zero. " "You must increment to window to continue.", (void *)meta_request); } goto has_work_remaining; } auto_ranged_get->synced_data.read_window_warning_issued = 0; } struct aws_s3_buffer_pool_ticket *ticket = aws_s3_buffer_pool_reserve(meta_request->client->buffer_pool, meta_request->part_size); if (ticket == NULL) { goto has_work_remaining; } request = aws_s3_request_new( meta_request, AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_GET_OBJECT_WITH_RANGE, AWS_S3_REQUEST_TYPE_GET_OBJECT, auto_ranged_get->synced_data.num_parts_requested + 1 /*part_number*/, AWS_S3_REQUEST_FLAG_PART_SIZE_RESPONSE_BODY); request->ticket = ticket; aws_s3_calculate_auto_ranged_get_part_range( auto_ranged_get->synced_data.object_range_start, auto_ranged_get->synced_data.object_range_end, meta_request->part_size, auto_ranged_get->synced_data.first_part_size, request->part_number, &request->part_range_start, &request->part_range_end); ++auto_ranged_get->synced_data.num_parts_requested; goto has_work_remaining; } /* If there are parts that have not attempted delivery to the caller, then there is still work being done. */ if (meta_request->synced_data.num_parts_delivery_completed < auto_ranged_get->synced_data.total_num_parts) { goto has_work_remaining; } } else { /* Else, if there is a finish result set, make sure that all work-in-progress winds down before the meta * request completely exits. */ if (auto_ranged_get->synced_data.head_object_sent && !auto_ranged_get->synced_data.head_object_completed) { goto has_work_remaining; } /* Wait for all requests to complete (successfully or unsuccessfully) before finishing.*/ if (auto_ranged_get->synced_data.num_parts_completed < auto_ranged_get->synced_data.num_parts_requested) { goto has_work_remaining; } /* If some parts are still being delivered to the caller, then wait for those to finish. */ if (meta_request->synced_data.num_parts_delivery_completed < meta_request->synced_data.num_parts_delivery_sent) { goto has_work_remaining; } } goto no_work_remaining; has_work_remaining: work_remaining = true; if (request != NULL) { AWS_LOGF_DEBUG( AWS_LS_S3_META_REQUEST, "id=%p: Returning request %p for part %d of %d", (void *)meta_request, (void *)request, request->part_number, auto_ranged_get->synced_data.total_num_parts); } no_work_remaining: /* If some events are still being delivered to caller, then wait for those to finish */ if (!work_remaining && aws_s3_meta_request_are_events_out_for_delivery_synced(meta_request)) { work_remaining = true; } if (!work_remaining) { aws_s3_meta_request_set_success_synced(meta_request, s_s3_auto_ranged_get_success_status(meta_request)); if (auto_ranged_get->synced_data.num_parts_checksum_validated == auto_ranged_get->synced_data.num_parts_requested) { /* If we have validated the checksum for every part, we set the meta request level checksum validation * result.*/ meta_request->synced_data.finish_result.did_validate = true; meta_request->synced_data.finish_result.validation_algorithm = auto_ranged_get->validation_algorithm; } } aws_s3_meta_request_unlock_synced_data(meta_request); } /* END CRITICAL SECTION */ if (work_remaining) { *out_request = request; } else { AWS_ASSERT(request == NULL); aws_s3_meta_request_finish(meta_request); } return work_remaining; } /* Given a request, prepare it for sending based on its description. * Currently, this is actually synchronous. */ static struct aws_future_void *s_s3_auto_ranged_get_prepare_request(struct aws_s3_request *request) { AWS_PRECONDITION(request); struct aws_s3_meta_request *meta_request = request->meta_request; /* Generate a new ranged get request based on the original message. */ struct aws_http_message *message = NULL; struct aws_s3_auto_ranged_get *auto_ranged_get = meta_request->impl; bool success = false; switch (request->request_tag) { case AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_HEAD_OBJECT: /* A head object will be a copy of the original headers but with a HEAD request method. */ message = aws_s3_message_util_copy_http_message_no_body_all_headers( meta_request->allocator, meta_request->initial_request_message); if (message) { aws_http_message_set_request_method(message, g_head_method); } break; case AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_GET_OBJECT_WITH_RANGE: message = aws_s3_ranged_get_object_message_new( meta_request->allocator, meta_request->initial_request_message, request->part_range_start, request->part_range_end); break; case AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_GET_OBJECT_WITH_PART_NUMBER_1: message = aws_s3_message_util_copy_http_message_no_body_all_headers( meta_request->allocator, meta_request->initial_request_message); if (message) { aws_s3_message_util_set_multipart_request_path( meta_request->allocator, NULL, request->part_number, false, message); } break; } if (message == NULL) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "id=%p Could not create message for request with tag %d for auto-ranged-get meta request.", (void *)meta_request, request->request_tag); goto finish; } if (meta_request->checksum_config.validate_response_checksum) { aws_http_headers_set(aws_http_message_get_headers(message), g_request_validation_mode, g_enabled); } if (!auto_ranged_get->initial_message_has_if_match_header && auto_ranged_get->etag) { /* Add the if_match to the request */ AWS_LOGF_DEBUG( AWS_LS_S3_META_REQUEST, "id=%p: Added the If-Match header to request %p for part %d", (void *)meta_request, (void *)request, request->part_number); aws_http_headers_set( aws_http_message_get_headers(message), g_if_match_header_name, aws_byte_cursor_from_string(auto_ranged_get->etag)); } aws_s3_request_setup_send_data(request, message); aws_http_message_release(message); /* Success! */ AWS_LOGF_DEBUG( AWS_LS_S3_META_REQUEST, "id=%p: Created request %p for part %d part sized %d", (void *)meta_request, (void *)request, request->part_number, request->has_part_size_response_body); success = true; finish:; struct aws_future_void *future = aws_future_void_new(meta_request->allocator); if (success) { aws_future_void_set_result(future); } else { aws_future_void_set_error(future, aws_last_error_or_unknown()); } return future; } /* Check the finish result of meta request. * Return true if the request failed because it downloaded an empty file. * Return false if the request failed for any other reason */ static bool s_check_empty_file_download_error(struct aws_s3_request *failed_request) { struct aws_http_headers *failed_headers = failed_request->send_data.response_headers; struct aws_byte_buf failed_body = failed_request->send_data.response_body; if (failed_headers && failed_body.capacity > 0) { struct aws_byte_cursor content_type; AWS_ZERO_STRUCT(content_type); if (!aws_http_headers_get(failed_headers, g_content_type_header_name, &content_type)) { /* Content type found */ if (aws_byte_cursor_eq_ignore_case(&content_type, &g_application_xml_value)) { /* XML response */ struct aws_byte_cursor xml_doc = aws_byte_cursor_from_buf(&failed_body); const char *path_to_size[] = {"Error", "ActualObjectSize", NULL}; struct aws_byte_cursor size = {0}; aws_xml_get_body_at_path(failed_request->allocator, xml_doc, path_to_size, &size); if (aws_byte_cursor_eq_c_str(&size, "0")) { return true; } } } } return false; } static int s_discover_object_range_and_size( struct aws_s3_meta_request *meta_request, struct aws_s3_request *request, int error_code, uint64_t *out_object_range_start, uint64_t *out_object_range_end, uint64_t *out_object_size, uint64_t *out_first_part_size, bool *out_empty_file_error) { AWS_PRECONDITION(out_object_size); AWS_PRECONDITION(out_object_range_start); AWS_PRECONDITION(out_object_range_end); AWS_PRECONDITION(out_first_part_size); int result = AWS_OP_ERR; uint64_t content_length = 0; uint64_t object_size = 0; uint64_t object_range_start = 0; uint64_t object_range_end = 0; uint64_t first_part_size = 0; AWS_ASSERT(request->discovers_object_size); struct aws_s3_auto_ranged_get *auto_ranged_get = meta_request->impl; switch (request->request_tag) { case AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_HEAD_OBJECT: if (error_code != AWS_ERROR_SUCCESS) { /* If the head request failed, there's nothing we can do, so resurface the error code. */ aws_raise_error(error_code); break; } /* There should be a Content-Length header that indicates the total size of the range.*/ if (aws_s3_parse_content_length_response_header( meta_request->allocator, request->send_data.response_headers, &content_length)) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "id=%p Could not find content-length header for request %p", (void *)meta_request, (void *)request); break; } /* if the inital message had a ranged header, there should also be a Content-Range header that specifies the * object range and total object size. Otherwise, the size and range should be equal to the * total_content_length. */ if (!auto_ranged_get->initial_message_has_range_header) { object_size = content_length; if (content_length > 0) { object_range_end = content_length - 1; /* range-end is inclusive */ } } else if (aws_s3_parse_content_range_response_header( meta_request->allocator, request->send_data.response_headers, &object_range_start, &object_range_end, &object_size)) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "id=%p Could not find content-range header for request %p", (void *)meta_request, (void *)request); break; } result = AWS_OP_SUCCESS; break; case AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_GET_OBJECT_WITH_PART_NUMBER_1: AWS_ASSERT(request->part_number == 1); AWS_ASSERT(request->send_data.response_headers != NULL); /* There should be a Content-Length header that indicates the size of first part. */ if (aws_s3_parse_content_length_response_header( meta_request->allocator, request->send_data.response_headers, &content_length)) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "id=%p Could not find content-length header for request %p", (void *)meta_request, (void *)request); break; } first_part_size = content_length; if (first_part_size > 0) { /* Parse the object size from the part response. */ if (aws_s3_parse_content_range_response_header( meta_request->allocator, request->send_data.response_headers, NULL, NULL, &object_size)) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "id=%p Could not find content-range header for request %p", (void *)meta_request, (void *)request); break; } /* When discovering the object size via GET_OBJECT_WITH_PART_NUMBER_1, the object range is the entire * object. */ object_range_start = 0; object_range_end = object_size - 1; /* range-end is inclusive */ } result = AWS_OP_SUCCESS; break; case AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_GET_OBJECT_WITH_RANGE: AWS_ASSERT(request->part_number == 1); if (error_code != AWS_ERROR_SUCCESS) { /* If we hit an empty file while trying to discover the object-size via part, then this request failure * is as designed. */ if (!auto_ranged_get->initial_message_has_range_header && s_check_empty_file_download_error(request)) { AWS_LOGF_DEBUG( AWS_LS_S3_META_REQUEST, "id=%p Detected empty file with request %p. Sending new request without range header.", (void *)meta_request, (void *)request); object_size = 0ULL; *out_empty_file_error = true; result = AWS_OP_SUCCESS; } else { /* Otherwise, resurface the error code. */ aws_raise_error(error_code); } break; } AWS_ASSERT(request->send_data.response_headers != NULL); /* Parse the object size from the part response. */ if (aws_s3_parse_content_range_response_header( meta_request->allocator, request->send_data.response_headers, &object_range_start, &object_range_end, &object_size)) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "id=%p Could not find content-range header for request %p", (void *)meta_request, (void *)request); break; } if (auto_ranged_get->initial_message_has_range_header) { if (auto_ranged_get->initial_message_has_end_range) { object_range_end = aws_min_u64(object_size - 1, auto_ranged_get->initial_range_end); } else { object_range_end = object_size - 1; } } else { /* When discovering the object size via GET_OBJECT_WITH_RANGE, the object range is the entire object. */ object_range_start = 0; object_range_end = object_size - 1; /* range-end is inclusive */ } result = AWS_OP_SUCCESS; break; default: AWS_ASSERT(false); break; } if (result == AWS_OP_SUCCESS) { *out_object_size = object_size; *out_object_range_start = object_range_start; *out_object_range_end = object_range_end; *out_first_part_size = first_part_size; } return result; } static void s_s3_auto_ranged_get_request_finished( struct aws_s3_meta_request *meta_request, struct aws_s3_request *request, int error_code) { AWS_PRECONDITION(meta_request); AWS_PRECONDITION(meta_request->impl); AWS_PRECONDITION(request); struct aws_s3_auto_ranged_get *auto_ranged_get = meta_request->impl; AWS_PRECONDITION(auto_ranged_get); uint64_t object_range_start = 0ULL; uint64_t object_range_end = 0ULL; uint64_t object_size = 0ULL; uint64_t first_part_size = 0ULL; bool found_object_size = false; bool request_failed = error_code != AWS_ERROR_SUCCESS; bool first_part_size_mismatch = (error_code == AWS_ERROR_S3_INTERNAL_PART_SIZE_MISMATCH_RETRYING_WITH_RANGE); bool empty_file_error = false; if (request->discovers_object_size) { /* Try to discover the object-range and object-size.*/ if (s_discover_object_range_and_size( meta_request, request, error_code, &object_range_start, &object_range_end, &object_size, &first_part_size, &empty_file_error)) { error_code = aws_last_error_or_unknown(); goto update_synced_data; } if ((!request_failed || first_part_size_mismatch) && !auto_ranged_get->initial_message_has_if_match_header) { AWS_ASSERT(auto_ranged_get->etag == NULL); struct aws_byte_cursor etag_header_value; if (aws_http_headers_get(request->send_data.response_headers, g_etag_header_name, &etag_header_value)) { aws_raise_error(AWS_ERROR_S3_MISSING_ETAG); error_code = AWS_ERROR_S3_MISSING_ETAG; goto update_synced_data; } AWS_LOGF_TRACE( AWS_LS_S3_META_REQUEST, "id=%p Etag received for the meta request. value is: " PRInSTR "", (void *)meta_request, AWS_BYTE_CURSOR_PRI(etag_header_value)); auto_ranged_get->etag = aws_string_new_from_cursor(auto_ranged_get->base.allocator, &etag_header_value); } /* If we were able to discover the object-range/content length successfully, then any error code that was passed * into this function is being handled and does not indicate an overall failure.*/ error_code = AWS_ERROR_SUCCESS; found_object_size = true; if (!empty_file_error && meta_request->headers_callback != NULL) { struct aws_http_headers *response_headers = aws_http_headers_new(meta_request->allocator); copy_http_headers(request->send_data.response_headers, response_headers); if (request->request_tag == AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_GET_OBJECT_WITH_RANGE || request->request_tag == AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_GET_OBJECT_WITH_PART_NUMBER_1) { if (auto_ranged_get->initial_message_has_range_header) { /* Populate the header with object_range */ char content_range_buffer[64] = ""; snprintf( content_range_buffer, sizeof(content_range_buffer), "bytes %" PRIu64 "-%" PRIu64 "/%" PRIu64, object_range_start, object_range_end, object_size); aws_http_headers_set( response_headers, g_content_range_header_name, aws_byte_cursor_from_c_str(content_range_buffer)); } else { /* content range isn't applicable. */ aws_http_headers_erase(response_headers, g_content_range_header_name); } } uint64_t content_length = object_size ? object_range_end - object_range_start + 1 : 0; char content_length_buffer[64] = ""; snprintf(content_length_buffer, sizeof(content_length_buffer), "%" PRIu64, content_length); aws_http_headers_set( response_headers, g_content_length_header_name, aws_byte_cursor_from_c_str(content_length_buffer)); if (meta_request->headers_callback( meta_request, response_headers, s_s3_auto_ranged_get_success_status(meta_request), meta_request->user_data)) { error_code = aws_last_error_or_unknown(); } meta_request->headers_callback = NULL; aws_http_headers_release(response_headers); } } update_synced_data: /* BEGIN CRITICAL SECTION */ { aws_s3_meta_request_lock_synced_data(meta_request); bool finishing_metrics = true; /* If the object range was found, then record it. */ if (found_object_size) { AWS_ASSERT(!auto_ranged_get->synced_data.object_range_known); auto_ranged_get->synced_data.object_range_known = true; auto_ranged_get->synced_data.object_range_empty = (object_size == 0); auto_ranged_get->synced_data.object_range_start = object_range_start; auto_ranged_get->synced_data.object_range_end = object_range_end; if (!first_part_size_mismatch && first_part_size) { auto_ranged_get->synced_data.first_part_size = first_part_size; } if (auto_ranged_get->synced_data.object_range_empty == 0) { auto_ranged_get->synced_data.total_num_parts = aws_s3_calculate_auto_ranged_get_num_parts( meta_request->part_size, auto_ranged_get->synced_data.first_part_size, object_range_start, object_range_end); } } switch (request->request_tag) { case AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_HEAD_OBJECT: auto_ranged_get->synced_data.head_object_completed = true; AWS_LOGF_DEBUG(AWS_LS_S3_META_REQUEST, "id=%p Head object completed.", (void *)meta_request); break; case AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_GET_OBJECT_WITH_PART_NUMBER_1: AWS_LOGF_DEBUG(AWS_LS_S3_META_REQUEST, "id=%p Get Part Number completed.", (void *)meta_request); if (first_part_size_mismatch && found_object_size) { /* We canceled GET_OBJECT_WITH_PART_NUMBER_1 request because the Content-Length was bigger than * part_size. Try to fetch the first part again as a ranged get */ auto_ranged_get->synced_data.num_parts_requested = 0; break; } /* fall through */ case AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_GET_OBJECT_WITH_RANGE: if (empty_file_error) { /* * Try to download the object again using GET_OBJECT_WITH_PART_NUMBER_1. If the file is still * empty, successful response headers will be provided to users. If not, the newer version of the * file will be downloaded. */ auto_ranged_get->synced_data.num_parts_requested = 0; auto_ranged_get->synced_data.object_range_known = 0; break; } ++auto_ranged_get->synced_data.num_parts_completed; if (!request_failed) { /* Record the number of parts that checksum has been validated */ if (request->did_validate) { if (auto_ranged_get->validation_algorithm == AWS_SCA_NONE) { auto_ranged_get->validation_algorithm = request->validation_algorithm; } /* They should be the same. */ AWS_ASSERT(auto_ranged_get->validation_algorithm == request->validation_algorithm); ++auto_ranged_get->synced_data.num_parts_checksum_validated; } ++auto_ranged_get->synced_data.num_parts_successful; /* Send progress_callback for delivery on io_event_loop thread */ if (meta_request->progress_callback != NULL) { struct aws_s3_meta_request_event event = {.type = AWS_S3_META_REQUEST_EVENT_PROGRESS}; event.u.progress.info.bytes_transferred = request->send_data.response_body.len; if (auto_ranged_get->synced_data.object_range_empty) { event.u.progress.info.content_length = 0; } else { /* Note that range-end is inclusive */ event.u.progress.info.content_length = auto_ranged_get->synced_data.object_range_end + 1 - auto_ranged_get->synced_data.object_range_start; } aws_s3_meta_request_add_event_for_delivery_synced(meta_request, &event); } aws_s3_meta_request_stream_response_body_synced(meta_request, request); /* The body of the request is queued to be streamed, don't finish the metrics yet. */ finishing_metrics = false; AWS_LOGF_DEBUG( AWS_LS_S3_META_REQUEST, "id=%p: %d out of %d parts have completed.", (void *)meta_request, (auto_ranged_get->synced_data.num_parts_successful + auto_ranged_get->synced_data.num_parts_failed), auto_ranged_get->synced_data.total_num_parts); } else { ++auto_ranged_get->synced_data.num_parts_failed; } break; } if (error_code != AWS_ERROR_SUCCESS) { if (error_code == AWS_ERROR_S3_INVALID_RESPONSE_STATUS && request->send_data.response_status == AWS_HTTP_STATUS_CODE_412_PRECONDITION_FAILED && !auto_ranged_get->initial_message_has_if_match_header) { /* Use more clear error code as we added the if-match header under the hood. */ error_code = AWS_ERROR_S3_OBJECT_MODIFIED; } aws_s3_meta_request_set_fail_synced(meta_request, request, error_code); if (error_code == AWS_ERROR_S3_RESPONSE_CHECKSUM_MISMATCH) { /* It's a mismatch of checksum, tell user that we validated the checksum and the algorithm we validated */ meta_request->synced_data.finish_result.did_validate = true; meta_request->synced_data.finish_result.validation_algorithm = request->validation_algorithm; } } if (finishing_metrics) { aws_s3_request_finish_up_metrics_synced(request, meta_request); } aws_s3_meta_request_unlock_synced_data(meta_request); } /* END CRITICAL SECTION */ } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/source/s3_auto_ranged_put.c000066400000000000000000002212141456575232400250100ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "aws/s3/private/s3_auto_ranged_put.h" #include "aws/s3/private/s3_checksums.h" #include "aws/s3/private/s3_list_parts.h" #include "aws/s3/private/s3_request_messages.h" #include "aws/s3/private/s3_util.h" #include #include #include #include /* TODO: better logging of steps */ static const size_t s_complete_multipart_upload_init_body_size_bytes = 512; static const size_t s_abort_multipart_upload_init_body_size_bytes = 512; /* For unknown length body we no longer know the number of parts. to avoid * resizing arrays for etags/checksums too much, those array start out with * capacity specified by the constant below. Note: constant has been arbitrary * picked to avoid using allocations and using too much memory. might change in future. */ static const uint32_t s_unknown_length_default_num_parts = 32; /* Max number of parts (per meta-request) that can be: "started, but not done reading from stream". * Though reads are serial (only 1 part can be reading from stream at a time) * we may queue up more to minimize delays between each read. * * If this number is too low, there could be an avoidable delay between each read * (meta-request ready for more work, but client hasn't run update and given it more work yet) * * If this number is too high, early meta-requests could hog all the "work tokens" * (1st meta-request as queue of 100 "work tokens" that it needs to read * the stream for, while later meta-requests are doing nothing waiting for work tokens) * * TODO: this value needs further benchmarking. */ static const uint32_t s_max_parts_pending_read = 5; static const struct aws_byte_cursor s_create_multipart_upload_copy_headers[] = { AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-customer-algorithm"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-customer-key-MD5"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-context"), }; /* Data for aws_s3_auto_ranged_put's async vtable->prepare_request() job */ struct aws_s3_auto_ranged_put_prepare_request_job { struct aws_allocator *allocator; struct aws_s3_request *request; /* async step: prepare type-specific message */ struct aws_future_http_message *asyncstep_prepare_message; /* future to set when this job completes */ struct aws_future_void *on_complete; }; /* Data for async preparation of an UploadPart request */ struct aws_s3_prepare_upload_part_job { struct aws_allocator *allocator; struct aws_s3_request *request; /* async step: read this part from input stream */ struct aws_future_bool *asyncstep_read_part; /* future to set when this job completes */ struct aws_future_http_message *on_complete; }; /* Data for async preparation of a CompleteMultipartUpload request */ struct aws_s3_prepare_complete_multipart_upload_job { struct aws_allocator *allocator; struct aws_s3_request *request; /* future to set when this job completes */ struct aws_future_http_message *on_complete; }; static void s_s3_meta_request_auto_ranged_put_destroy(struct aws_s3_meta_request *meta_request); static void s_s3_auto_ranged_put_send_request_finish( struct aws_s3_connection *connection, struct aws_http_stream *stream, int error_code); static bool s_s3_auto_ranged_put_update( struct aws_s3_meta_request *meta_request, uint32_t flags, struct aws_s3_request **out_request); static struct aws_future_void *s_s3_auto_ranged_put_prepare_request(struct aws_s3_request *request); static void s_s3_auto_ranged_put_prepare_request_finish(void *user_data); static struct aws_future_http_message *s_s3_prepare_list_parts(struct aws_s3_request *request); static struct aws_future_http_message *s_s3_prepare_create_multipart_upload(struct aws_s3_request *request); static struct aws_future_http_message *s_s3_prepare_upload_part(struct aws_s3_request *request); static void s_s3_prepare_upload_part_on_read_done(void *user_data); static void s_s3_prepare_upload_part_finish(struct aws_s3_prepare_upload_part_job *part_prep, int error_code); static struct aws_future_http_message *s_s3_prepare_complete_multipart_upload(struct aws_s3_request *request); static struct aws_future_http_message *s_s3_prepare_abort_multipart_upload(struct aws_s3_request *request); static void s_s3_auto_ranged_put_request_finished( struct aws_s3_meta_request *meta_request, struct aws_s3_request *request, int error_code); static int s_s3_auto_ranged_put_pause( struct aws_s3_meta_request *meta_request, struct aws_s3_meta_request_resume_token **resume_token); static int s_process_part_info_synced(const struct aws_s3_part_info *info, void *user_data) { struct aws_s3_auto_ranged_put *auto_ranged_put = user_data; struct aws_s3_meta_request *meta_request = &auto_ranged_put->base; ASSERT_SYNCED_DATA_LOCK_HELD(&auto_ranged_put->base); if (info->part_number == 0) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "id=%p: ListParts reported Part without valid PartNumber", (void *)meta_request); return aws_raise_error(AWS_ERROR_S3_LIST_PARTS_PARSE_FAILED); } struct aws_s3_mpu_part_info *part = aws_mem_calloc(meta_request->allocator, 1, sizeof(struct aws_s3_mpu_part_info)); part->size = info->size; part->etag = aws_strip_quotes(meta_request->allocator, info->e_tag); part->was_previously_uploaded = true; const struct aws_byte_cursor *checksum_cur = NULL; switch (auto_ranged_put->base.checksum_config.checksum_algorithm) { case AWS_SCA_CRC32: checksum_cur = &info->checksumCRC32; break; case AWS_SCA_CRC32C: checksum_cur = &info->checksumCRC32C; break; case AWS_SCA_SHA1: checksum_cur = &info->checksumSHA1; break; case AWS_SCA_SHA256: checksum_cur = &info->checksumSHA256; break; case AWS_SCA_NONE: break; default: AWS_ASSERT(false); break; } if ((checksum_cur != NULL) && (checksum_cur->len > 0)) { aws_byte_buf_init_copy_from_cursor(&part->checksum_base64, auto_ranged_put->base.allocator, *checksum_cur); } /* Parts might be out of order or have gaps in them. * Resize array-list to be long enough to hold this part, * filling any intermediate slots with NULL. */ aws_array_list_ensure_capacity(&auto_ranged_put->synced_data.part_list, info->part_number); while (aws_array_list_length(&auto_ranged_put->synced_data.part_list) < info->part_number) { struct aws_s3_mpu_part_info *null_part = NULL; aws_array_list_push_back(&auto_ranged_put->synced_data.part_list, &null_part); } /* Add this part */ aws_array_list_set_at(&auto_ranged_put->synced_data.part_list, &part, info->part_number - 1); return AWS_OP_SUCCESS; } /* * Validates token and updates part variables. Noop if token is null. */ static int s_try_update_part_info_from_resume_token( uint64_t content_length, const struct aws_s3_meta_request_resume_token *resume_token, size_t *out_part_size, uint32_t *out_total_num_parts) { if (!resume_token) { return AWS_OP_SUCCESS; } if (resume_token->type != AWS_S3_META_REQUEST_TYPE_PUT_OBJECT) { AWS_LOGF_ERROR(AWS_LS_S3_META_REQUEST, "Could not load persisted state. Invalid token type."); goto invalid_argument_cleanup; } if (resume_token->multipart_upload_id == NULL) { AWS_LOGF_ERROR(AWS_LS_S3_META_REQUEST, "Could not load persisted state. Multipart upload id missing."); goto invalid_argument_cleanup; } if (resume_token->part_size < g_s3_min_upload_part_size) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "Could not create resume auto-ranged-put meta request; part size of %" PRIu64 " specified in the token is below minimum threshold for multi-part.", (uint64_t)resume_token->part_size); goto invalid_argument_cleanup; } if ((uint32_t)resume_token->total_num_parts > g_s3_max_num_upload_parts) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "Could not create resume auto-ranged-put meta request; total number of parts %" PRIu32 " specified in the token is too large for platform.", (uint32_t)resume_token->total_num_parts); goto invalid_argument_cleanup; } uint32_t num_parts = (uint32_t)(content_length / resume_token->part_size); if ((content_length % resume_token->part_size) > 0) { ++num_parts; } if (resume_token->total_num_parts != num_parts) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "Could not create auto-ranged-put meta request; persisted number of parts %zu" " does not match expected number of parts based on length of the body.", resume_token->total_num_parts); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } *out_part_size = resume_token->part_size; *out_total_num_parts = (uint32_t)resume_token->total_num_parts; return AWS_OP_SUCCESS; invalid_argument_cleanup: return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } /** * Initializes state necessary to resume upload. Noop if token is null. */ static int s_try_init_resume_state_from_persisted_data( struct aws_allocator *allocator, struct aws_s3_auto_ranged_put *auto_ranged_put, const struct aws_s3_meta_request_resume_token *resume_token) { if (resume_token == NULL) { auto_ranged_put->synced_data.list_parts_operation = NULL; auto_ranged_put->synced_data.list_parts_state.completed = true; auto_ranged_put->synced_data.list_parts_state.started = true; return AWS_OP_SUCCESS; } AWS_FATAL_ASSERT(auto_ranged_put->has_content_length); struct aws_byte_cursor request_path; if (aws_http_message_get_request_path(auto_ranged_put->base.initial_request_message, &request_path)) { AWS_LOGF_ERROR(AWS_LS_S3_META_REQUEST, "Could not load persisted state. Request path could not be read."); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } auto_ranged_put->synced_data.num_parts_started = 0; auto_ranged_put->synced_data.num_parts_completed = 0; auto_ranged_put->synced_data.num_parts_noop = 0; auto_ranged_put->synced_data.create_multipart_upload_sent = true; auto_ranged_put->synced_data.create_multipart_upload_completed = true; auto_ranged_put->upload_id = aws_string_clone_or_reuse(allocator, resume_token->multipart_upload_id); struct aws_s3_list_parts_params list_parts_params = { .key = request_path, .upload_id = aws_byte_cursor_from_string(auto_ranged_put->upload_id), .on_part = s_process_part_info_synced, .user_data = auto_ranged_put, }; auto_ranged_put->synced_data.list_parts_operation = aws_s3_list_parts_operation_new(allocator, &list_parts_params); struct aws_http_headers *needed_response_headers = aws_http_headers_new(allocator); const size_t copy_header_count = AWS_ARRAY_SIZE(s_create_multipart_upload_copy_headers); const struct aws_http_headers *initial_headers = aws_http_message_get_headers(auto_ranged_put->base.initial_request_message); /* Copy headers that would have been used for create multipart from initial message, since create will never be * called in this flow */ for (size_t header_index = 0; header_index < copy_header_count; ++header_index) { const struct aws_byte_cursor *header_name = &s_create_multipart_upload_copy_headers[header_index]; struct aws_byte_cursor header_value; AWS_ZERO_STRUCT(header_value); if (aws_http_headers_get(initial_headers, *header_name, &header_value) == AWS_OP_SUCCESS) { aws_http_headers_set(needed_response_headers, *header_name, header_value); } } auto_ranged_put->synced_data.needed_response_headers = needed_response_headers; return AWS_OP_SUCCESS; } static struct aws_s3_meta_request_vtable s_s3_auto_ranged_put_vtable = { .update = s_s3_auto_ranged_put_update, .send_request_finish = s_s3_auto_ranged_put_send_request_finish, .prepare_request = s_s3_auto_ranged_put_prepare_request, .init_signing_date_time = aws_s3_meta_request_init_signing_date_time_default, .sign_request = aws_s3_meta_request_sign_request_default, .finished_request = s_s3_auto_ranged_put_request_finished, .destroy = s_s3_meta_request_auto_ranged_put_destroy, .finish = aws_s3_meta_request_finish_default, .pause = s_s3_auto_ranged_put_pause, }; /* Allocate a new auto-ranged put meta request */ struct aws_s3_meta_request *aws_s3_meta_request_auto_ranged_put_new( struct aws_allocator *allocator, struct aws_s3_client *client, size_t part_size, bool has_content_length, uint64_t content_length, uint32_t num_parts, const struct aws_s3_meta_request_options *options) { /* These should already have been validated by the caller. */ AWS_PRECONDITION(allocator); AWS_PRECONDITION(client); AWS_PRECONDITION(options); AWS_PRECONDITION(options->message); if (s_try_update_part_info_from_resume_token(content_length, options->resume_token, &part_size, &num_parts)) { return NULL; } struct aws_s3_auto_ranged_put *auto_ranged_put = aws_mem_calloc(allocator, 1, sizeof(struct aws_s3_auto_ranged_put)); if (aws_s3_meta_request_init_base( allocator, client, part_size, client->compute_content_md5 == AWS_MR_CONTENT_MD5_ENABLED || aws_http_headers_has(aws_http_message_get_headers(options->message), g_content_md5_header_name), options, auto_ranged_put, &s_s3_auto_ranged_put_vtable, &auto_ranged_put->base)) { aws_mem_release(allocator, auto_ranged_put); return NULL; } auto_ranged_put->has_content_length = has_content_length; auto_ranged_put->content_length = has_content_length ? content_length : 0; auto_ranged_put->total_num_parts_from_content_length = has_content_length ? num_parts : 0; auto_ranged_put->upload_id = NULL; auto_ranged_put->resume_token = options->resume_token; aws_s3_meta_request_resume_token_acquire(auto_ranged_put->resume_token); auto_ranged_put->threaded_update_data.next_part_number = 1; auto_ranged_put->synced_data.is_body_stream_at_end = false; uint32_t initial_num_parts = auto_ranged_put->has_content_length ? num_parts : s_unknown_length_default_num_parts; aws_array_list_init_dynamic( &auto_ranged_put->synced_data.part_list, allocator, initial_num_parts, sizeof(struct aws_s3_mpu_part_info *)); if (s_try_init_resume_state_from_persisted_data(allocator, auto_ranged_put, options->resume_token)) { goto error_clean_up; } AWS_LOGF_DEBUG( AWS_LS_S3_META_REQUEST, "id=%p Created new Auto-Ranged Put Meta Request.", (void *)&auto_ranged_put->base); return &auto_ranged_put->base; error_clean_up: aws_s3_meta_request_release(&auto_ranged_put->base); return NULL; } /* Destroy our auto-ranged put meta request */ static void s_s3_meta_request_auto_ranged_put_destroy(struct aws_s3_meta_request *meta_request) { AWS_PRECONDITION(meta_request); AWS_PRECONDITION(meta_request->impl); struct aws_s3_auto_ranged_put *auto_ranged_put = meta_request->impl; aws_string_destroy(auto_ranged_put->upload_id); auto_ranged_put->upload_id = NULL; auto_ranged_put->resume_token = aws_s3_meta_request_resume_token_release(auto_ranged_put->resume_token); aws_s3_paginated_operation_release(auto_ranged_put->synced_data.list_parts_operation); for (size_t part_index = 0; part_index < aws_array_list_length(&auto_ranged_put->synced_data.part_list); ++part_index) { struct aws_s3_mpu_part_info *part; aws_array_list_get_at(&auto_ranged_put->synced_data.part_list, &part, part_index); if (part != NULL) { aws_byte_buf_clean_up(&part->checksum_base64); aws_string_destroy(part->etag); aws_mem_release(auto_ranged_put->base.allocator, part); } } aws_array_list_clean_up(&auto_ranged_put->synced_data.part_list); aws_string_destroy(auto_ranged_put->synced_data.list_parts_continuation_token); aws_http_headers_release(auto_ranged_put->synced_data.needed_response_headers); aws_mem_release(meta_request->allocator, auto_ranged_put); } /* Check flags and corresponding conditions to see if any more parts can be * scheduled during this pass. */ static bool s_should_skip_scheduling_more_parts_based_on_flags( const struct aws_s3_auto_ranged_put *auto_ranged_put, uint32_t flags) { /* If the stream is actually async, only allow 1 pending-read. * We need to wait for async read() to complete before calling it again. */ if (auto_ranged_put->base.request_body_async_stream != NULL) { return auto_ranged_put->synced_data.num_parts_pending_read > 0; } /* If this is the conservative pass, only allow 1 pending-read. * Reads are serial anyway, so queuing up a whole bunch isn't necessarily a speedup. */ if ((flags & AWS_S3_META_REQUEST_UPDATE_FLAG_CONSERVATIVE) != 0) { return auto_ranged_put->synced_data.num_parts_pending_read > 0; } /* In all other cases, cap the number of pending-reads to something reasonable */ return auto_ranged_put->synced_data.num_parts_pending_read >= s_max_parts_pending_read; } static void s_s3_auto_ranged_put_send_request_finish( struct aws_s3_connection *connection, struct aws_http_stream *stream, int error_code) { struct aws_s3_request *request = connection->request; if (request->request_tag == AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_PART) { /* TODO: the single part upload may also be improved from a timeout as multipart. */ aws_s3_client_update_upload_part_timeout(request->meta_request->client, request, error_code); } aws_s3_meta_request_send_request_finish_default(connection, stream, error_code); } static bool s_s3_auto_ranged_put_update( struct aws_s3_meta_request *meta_request, uint32_t flags, struct aws_s3_request **out_request) { AWS_PRECONDITION(meta_request); AWS_PRECONDITION(out_request); struct aws_s3_request *request = NULL; bool work_remaining = false; struct aws_s3_auto_ranged_put *auto_ranged_put = meta_request->impl; /* BEGIN CRITICAL SECTION */ { aws_s3_meta_request_lock_synced_data(meta_request); if (!aws_s3_meta_request_has_finish_result_synced(meta_request)) { /* If resuming and list part has not been sent, do it now. */ if (!auto_ranged_put->synced_data.list_parts_state.started) { request = aws_s3_request_new( meta_request, AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_LIST_PARTS, AWS_S3_REQUEST_TYPE_LIST_PARTS, 0 /*part_number*/, AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS); auto_ranged_put->synced_data.list_parts_state.started = true; goto has_work_remaining; } if (auto_ranged_put->synced_data.list_parts_state.continues) { /* If list parts need to continue, send another list parts request. */ AWS_ASSERT(auto_ranged_put->synced_data.list_parts_continuation_token != NULL); request = aws_s3_request_new( meta_request, AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_LIST_PARTS, AWS_S3_REQUEST_TYPE_LIST_PARTS, 0 /*part_number*/, AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS); auto_ranged_put->synced_data.list_parts_state.continues = false; goto has_work_remaining; } if (!auto_ranged_put->synced_data.list_parts_state.completed) { /* waiting on list parts to finish. */ goto has_work_remaining; } /* If we haven't already sent a create-multipart-upload message, do so now. */ if (!auto_ranged_put->synced_data.create_multipart_upload_sent) { request = aws_s3_request_new( meta_request, AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_CREATE_MULTIPART_UPLOAD, AWS_S3_REQUEST_TYPE_CREATE_MULTIPART_UPLOAD, 0 /*part_number*/, AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS); auto_ranged_put->synced_data.create_multipart_upload_sent = true; goto has_work_remaining; } /* If the create-multipart-upload message hasn't been completed, then there is still additional work to do, * but it can't be done yet. */ if (!auto_ranged_put->synced_data.create_multipart_upload_completed) { goto has_work_remaining; } bool should_create_next_part_request = false; bool request_previously_uploaded = false; if (auto_ranged_put->has_content_length && (auto_ranged_put->synced_data.num_parts_started < auto_ranged_put->total_num_parts_from_content_length)) { /* Check if next part was previously uploaded (due to resume) */ size_t part_index = auto_ranged_put->threaded_update_data.next_part_number - 1; struct aws_s3_mpu_part_info *part = NULL; aws_array_list_get_at(&auto_ranged_put->synced_data.part_list, &part, part_index); if (part != NULL) { AWS_ASSERT(part->was_previously_uploaded == true); /* This part has been uploaded. */ request_previously_uploaded = true; } if (s_should_skip_scheduling_more_parts_based_on_flags(auto_ranged_put, flags)) { goto has_work_remaining; } should_create_next_part_request = true; } else if (!auto_ranged_put->has_content_length && !auto_ranged_put->synced_data.is_body_stream_at_end) { if (s_should_skip_scheduling_more_parts_based_on_flags(auto_ranged_put, flags)) { goto has_work_remaining; } should_create_next_part_request = true; } if (should_create_next_part_request) { struct aws_s3_buffer_pool_ticket *ticket = aws_s3_buffer_pool_reserve(meta_request->client->buffer_pool, meta_request->part_size); if (ticket != NULL) { /* Allocate a request for another part. */ request = aws_s3_request_new( meta_request, AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_PART, AWS_S3_REQUEST_TYPE_UPLOAD_PART, 0 /*part_number*/, AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS | AWS_S3_REQUEST_FLAG_PART_SIZE_REQUEST_BODY); request->part_number = auto_ranged_put->threaded_update_data.next_part_number; /* If request was previously uploaded, we prepare it to ensure checksums still match, * but ultimately it gets marked no-op and we don't send it */ request->was_previously_uploaded = request_previously_uploaded; request->ticket = ticket; ++auto_ranged_put->threaded_update_data.next_part_number; ++auto_ranged_put->synced_data.num_parts_started; ++auto_ranged_put->synced_data.num_parts_pending_read; AWS_LOGF_DEBUG( AWS_LS_S3_META_REQUEST, "id=%p: Returning request %p for part %d", (void *)meta_request, (void *)request, request->part_number); } goto has_work_remaining; } /* There is one more request to send after all the parts (the complete-multipart-upload) but it can't be * done until all the parts have been completed.*/ if (auto_ranged_put->has_content_length) { if (auto_ranged_put->synced_data.num_parts_completed != auto_ranged_put->total_num_parts_from_content_length) { goto has_work_remaining; } } else { if ((!auto_ranged_put->synced_data.is_body_stream_at_end) || auto_ranged_put->synced_data.num_parts_completed != auto_ranged_put->synced_data.num_parts_started) { goto has_work_remaining; } } /* If the complete-multipart-upload request hasn't been set yet, then send it now. */ if (!auto_ranged_put->synced_data.complete_multipart_upload_sent) { request = aws_s3_request_new( meta_request, AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_COMPLETE_MULTIPART_UPLOAD, AWS_S3_REQUEST_TYPE_COMPLETE_MULTIPART_UPLOAD, 0 /*part_number*/, AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS); auto_ranged_put->synced_data.complete_multipart_upload_sent = true; goto has_work_remaining; } /* Wait for the complete-multipart-upload request to finish. */ if (!auto_ranged_put->synced_data.complete_multipart_upload_completed) { goto has_work_remaining; } goto no_work_remaining; } else { /* If the create multipart upload hasn't been sent, then there is nothing left to do when canceling. */ if (!auto_ranged_put->synced_data.create_multipart_upload_sent) { goto no_work_remaining; } /* If the create-multipart-upload request is still in flight, wait for it to finish. */ if (!auto_ranged_put->synced_data.create_multipart_upload_completed) { goto has_work_remaining; } /* If the number of parts completed is less than the number of parts sent, then we need to wait until all of * those parts are done sending before aborting. */ if (auto_ranged_put->synced_data.num_parts_completed < auto_ranged_put->synced_data.num_parts_started) { goto has_work_remaining; } /* If the complete-multipart-upload is already in flight, then we can't necessarily send an abort. */ if (auto_ranged_put->synced_data.complete_multipart_upload_sent && !auto_ranged_put->synced_data.complete_multipart_upload_completed) { goto has_work_remaining; } /* If the upload was paused or resume failed, we don't abort the multipart upload. */ if (meta_request->synced_data.finish_result.error_code == AWS_ERROR_S3_PAUSED || meta_request->synced_data.finish_result.error_code == AWS_ERROR_S3_RESUME_FAILED) { goto no_work_remaining; } /* If the complete-multipart-upload completed successfully, then there is nothing to abort since the * transfer has already finished. */ if (auto_ranged_put->synced_data.complete_multipart_upload_completed && auto_ranged_put->synced_data.complete_multipart_upload_error_code == AWS_ERROR_SUCCESS) { goto no_work_remaining; } /* If we made it here, and the abort-multipart-upload message hasn't been sent yet, then do so now. */ if (!auto_ranged_put->synced_data.abort_multipart_upload_sent) { if (auto_ranged_put->upload_id == NULL) { goto no_work_remaining; } if (auto_ranged_put->base.synced_data.finish_result.error_code == AWS_ERROR_SUCCESS) { /* Not sending abort when success even if we haven't sent complete MPU, in case we resume after MPU * already completed. */ goto no_work_remaining; } request = aws_s3_request_new( meta_request, AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_ABORT_MULTIPART_UPLOAD, AWS_S3_REQUEST_TYPE_ABORT_MULTIPART_UPLOAD, 0 /*part_number*/, AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS | AWS_S3_REQUEST_FLAG_ALWAYS_SEND); auto_ranged_put->synced_data.abort_multipart_upload_sent = true; goto has_work_remaining; } /* Wait for the multipart upload to be completed. */ if (!auto_ranged_put->synced_data.abort_multipart_upload_completed) { goto has_work_remaining; } goto no_work_remaining; } has_work_remaining: work_remaining = true; no_work_remaining: /* If some events are still being delivered to caller, then wait for those to finish */ if (!work_remaining && aws_s3_meta_request_are_events_out_for_delivery_synced(meta_request)) { work_remaining = true; } if (!work_remaining) { aws_s3_meta_request_set_success_synced(meta_request, AWS_HTTP_STATUS_CODE_200_OK); } aws_s3_meta_request_unlock_synced_data(meta_request); } /* END CRITICAL SECTION */ if (work_remaining) { *out_request = request; } else { AWS_ASSERT(request == NULL); aws_s3_meta_request_finish(meta_request); } return work_remaining; } /** * Helper to compute request body size. * Basically returns either part size or if content is not equally divisible into parts, the size of the remaining last * part. */ static size_t s_compute_request_body_size( const struct aws_s3_meta_request *meta_request, uint32_t part_number, uint64_t *offset_out) { AWS_PRECONDITION(meta_request); const struct aws_s3_auto_ranged_put *auto_ranged_put = meta_request->impl; size_t request_body_size = meta_request->part_size; /* Last part--adjust size to match remaining content length. */ if (auto_ranged_put->has_content_length && part_number == auto_ranged_put->total_num_parts_from_content_length) { size_t content_remainder = (size_t)(auto_ranged_put->content_length % (uint64_t)meta_request->part_size); if (content_remainder > 0) { request_body_size = content_remainder; } } /* The part_number starts at 1 */ *offset_out = (part_number - 1) * meta_request->part_size; return request_body_size; } static int s_verify_part_matches_checksum( struct aws_allocator *allocator, struct aws_byte_cursor body_cur, enum aws_s3_checksum_algorithm algorithm, struct aws_byte_cursor part_checksum) { AWS_PRECONDITION(allocator); if (algorithm == AWS_SCA_NONE) { return AWS_OP_SUCCESS; } struct aws_byte_buf checksum; if (aws_byte_buf_init(&checksum, allocator, aws_get_digest_size_from_algorithm(algorithm))) { return AWS_OP_ERR; } struct aws_byte_buf encoded_checksum = {0}; int return_status = AWS_OP_SUCCESS; size_t encoded_len = 0; if (aws_base64_compute_encoded_len(aws_get_digest_size_from_algorithm(algorithm), &encoded_len)) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "Failed to resume upload. Unable to determine length of encoded checksum."); return_status = aws_raise_error(AWS_ERROR_S3_RESUME_FAILED); goto on_done; } if (aws_checksum_compute(allocator, algorithm, &body_cur, &checksum, 0)) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "Failed to resume upload. Unable to compute checksum for the skipped part."); return_status = aws_raise_error(AWS_ERROR_S3_RESUME_FAILED); goto on_done; } if (aws_byte_buf_init(&encoded_checksum, allocator, encoded_len)) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "Failed to resume upload. Unable to allocate buffer for encoded checksum."); return_status = aws_raise_error(AWS_ERROR_S3_RESUME_FAILED); goto on_done; } struct aws_byte_cursor checksum_cur = aws_byte_cursor_from_buf(&checksum); if (aws_base64_encode(&checksum_cur, &encoded_checksum)) { AWS_LOGF_ERROR(AWS_LS_S3_META_REQUEST, "Failed to resume upload. Unable to encode checksum."); return_status = aws_raise_error(AWS_ERROR_S3_RESUME_FAILED); goto on_done; } if (!aws_byte_cursor_eq_byte_buf(&part_checksum, &encoded_checksum)) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "Failed to resume upload. Checksum for previously uploaded part does not match"); return_status = aws_raise_error(AWS_ERROR_S3_RESUMED_PART_CHECKSUM_MISMATCH); goto on_done; } on_done: aws_byte_buf_clean_up(&checksum); aws_byte_buf_clean_up(&encoded_checksum); return return_status; } /* Given a request, prepare it for sending based on its description. */ static struct aws_future_void *s_s3_auto_ranged_put_prepare_request(struct aws_s3_request *request) { struct aws_future_void *asyncstep_prepare_request = aws_future_void_new(request->allocator); /* Store data for async job */ struct aws_s3_auto_ranged_put_prepare_request_job *request_prep = aws_mem_calloc(request->allocator, 1, sizeof(struct aws_s3_auto_ranged_put_prepare_request_job)); request_prep->allocator = request->allocator; request_prep->on_complete = aws_future_void_acquire(asyncstep_prepare_request); request_prep->request = request; /* Each type of request prepares an aws_http_message in its own way, which maybe require async substeps */ switch (request->request_tag) { case AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_LIST_PARTS: request_prep->asyncstep_prepare_message = s_s3_prepare_list_parts(request); break; case AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_CREATE_MULTIPART_UPLOAD: request_prep->asyncstep_prepare_message = s_s3_prepare_create_multipart_upload(request); break; case AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_PART: request_prep->asyncstep_prepare_message = s_s3_prepare_upload_part(request); break; case AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_COMPLETE_MULTIPART_UPLOAD: request_prep->asyncstep_prepare_message = s_s3_prepare_complete_multipart_upload(request); break; case AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_ABORT_MULTIPART_UPLOAD: request_prep->asyncstep_prepare_message = s_s3_prepare_abort_multipart_upload(request); break; default: AWS_FATAL_ASSERT(0); break; } /* When the specific type of message is ready, finish common preparation steps */ aws_future_http_message_register_callback( request_prep->asyncstep_prepare_message, s_s3_auto_ranged_put_prepare_request_finish, request_prep); return asyncstep_prepare_request; } /* Prepare a ListParts request. * Currently, this is actually synchronous. */ static struct aws_future_http_message *s_s3_prepare_list_parts(struct aws_s3_request *request) { struct aws_s3_meta_request *meta_request = request->meta_request; struct aws_s3_auto_ranged_put *auto_ranged_put = meta_request->impl; struct aws_http_message *message = NULL; int message_creation_result = AWS_OP_ERR; /* BEGIN CRITICAL SECTION */ { aws_s3_meta_request_lock_synced_data(meta_request); if (auto_ranged_put->synced_data.list_parts_continuation_token) { AWS_LOGF_DEBUG( AWS_LS_S3_META_REQUEST, "id=%p ListParts for Multi-part Upload, with ID:%s, continues with token:%s.", (void *)meta_request, aws_string_c_str(auto_ranged_put->upload_id), aws_string_c_str(auto_ranged_put->synced_data.list_parts_continuation_token)); struct aws_byte_cursor continuation_cur = aws_byte_cursor_from_string(auto_ranged_put->synced_data.list_parts_continuation_token); message_creation_result = aws_s3_construct_next_paginated_request_http_message( auto_ranged_put->synced_data.list_parts_operation, &continuation_cur, &message); } else { message_creation_result = aws_s3_construct_next_paginated_request_http_message( auto_ranged_put->synced_data.list_parts_operation, NULL, &message); } aws_s3_meta_request_unlock_synced_data(meta_request); } /* END CRITICAL SECTION */ /* ListPart will not fail to create the next message `s_construct_next_request_http_message` */ AWS_FATAL_ASSERT(message_creation_result == AWS_OP_SUCCESS); if (meta_request->checksum_config.checksum_algorithm == AWS_SCA_NONE) { /* We don't need to worry about the pre-calculated checksum from user as for multipart upload, only way * to calculate checksum for multipart upload is from client. */ aws_s3_message_util_copy_headers( meta_request->initial_request_message, message, g_s3_list_parts_excluded_headers, g_s3_list_parts_excluded_headers_count, true); } else { aws_s3_message_util_copy_headers( meta_request->initial_request_message, message, g_s3_list_parts_with_checksum_excluded_headers, g_s3_list_parts_with_checksum_excluded_headers_count, true); } AWS_ASSERT(message); struct aws_future_http_message *future = aws_future_http_message_new(request->allocator); aws_future_http_message_set_result_by_move(future, &message); return future; } /* Prepare a CreateMultipartUpload request. * Currently, this is actually synchronous. */ struct aws_future_http_message *s_s3_prepare_create_multipart_upload(struct aws_s3_request *request) { struct aws_s3_meta_request *meta_request = request->meta_request; /* Create the message to create a new multipart upload. */ struct aws_http_message *message = aws_s3_create_multipart_upload_message_new( meta_request->allocator, meta_request->initial_request_message, meta_request->checksum_config.checksum_algorithm); struct aws_future_http_message *future = aws_future_http_message_new(request->allocator); if (message != NULL) { aws_future_http_message_set_result_by_move(future, &message); } else { aws_future_http_message_set_error(future, aws_last_error_or_unknown()); } return future; } /* Prepare an UploadPart request */ struct aws_future_http_message *s_s3_prepare_upload_part(struct aws_s3_request *request) { struct aws_s3_meta_request *meta_request = request->meta_request; struct aws_allocator *allocator = request->allocator; struct aws_future_http_message *message_future = aws_future_http_message_new(allocator); struct aws_s3_prepare_upload_part_job *part_prep = aws_mem_calloc(allocator, 1, sizeof(struct aws_s3_prepare_upload_part_job)); part_prep->allocator = allocator; part_prep->request = request; part_prep->on_complete = aws_future_http_message_acquire(message_future); if (request->num_times_prepared == 0) { /* Preparing request for the first time. * Next async step: read through the body stream until we've * skipped over parts that were already uploaded (in case we're resuming * from an upload that had been paused) */ /* Read the body */ uint64_t offset = 0; size_t request_body_size = s_compute_request_body_size(meta_request, request->part_number, &offset); if (request->request_body.capacity == 0) { AWS_FATAL_ASSERT(request->ticket); request->request_body = aws_s3_buffer_pool_acquire_buffer(request->meta_request->client->buffer_pool, request->ticket); request->request_body.capacity = request_body_size; } part_prep->asyncstep_read_part = aws_s3_meta_request_read_body(meta_request, offset, &request->request_body); aws_future_bool_register_callback( part_prep->asyncstep_read_part, s_s3_prepare_upload_part_on_read_done, part_prep); } else { /* Not the first time preparing request (e.g. retry). * We can skip over the async steps that read the body stream */ s_s3_prepare_upload_part_finish(part_prep, AWS_ERROR_SUCCESS); } return message_future; } /* Completion callback for reading this part's chunk of the body stream */ static void s_s3_prepare_upload_part_on_read_done(void *user_data) { struct aws_s3_prepare_upload_part_job *part_prep = user_data; struct aws_s3_request *request = part_prep->request; struct aws_s3_meta_request *meta_request = request->meta_request; struct aws_s3_auto_ranged_put *auto_ranged_put = meta_request->impl; bool has_content_length = auto_ranged_put->has_content_length != 0; int error_code = aws_future_bool_get_error(part_prep->asyncstep_read_part); /* If reading failed, the prepare-upload-part job has failed */ if (error_code != AWS_ERROR_SUCCESS) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "id=%p: Failed reading request body, error %d (%s) req len %zu req cap %zu", (void *)meta_request, error_code, aws_error_str(error_code), request->request_body.len, request->request_body.capacity); goto on_done; } /* Reading succeeded. */ bool is_body_stream_at_end = aws_future_bool_get_result(part_prep->asyncstep_read_part); uint64_t offset = 0; size_t request_body_size = s_compute_request_body_size(meta_request, request->part_number, &offset); /* If Content-Length is defined, check that we read the expected amount */ if (has_content_length && (request->request_body.len < request_body_size)) { error_code = AWS_ERROR_S3_INCORRECT_CONTENT_LENGTH; AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "id=%p: Request body is smaller than 'Content-Length' header said it would be", (void *)meta_request); goto on_done; } request->is_noop = request->part_number > 1 && /* allow first part to have 0 length to support empty unknown content length objects. */ request->request_body.len == 0; /* BEGIN CRITICAL SECTION */ aws_s3_meta_request_lock_synced_data(meta_request); --auto_ranged_put->synced_data.num_parts_pending_read; auto_ranged_put->synced_data.is_body_stream_at_end = is_body_stream_at_end; struct aws_s3_mpu_part_info *previously_uploaded_info = NULL; if (request->was_previously_uploaded) { aws_array_list_get_at( &auto_ranged_put->synced_data.part_list, &previously_uploaded_info, request->part_number - 1); AWS_ASSERT(previously_uploaded_info != NULL && previously_uploaded_info->was_previously_uploaded == true); /* Already uploaded, set the noop to be true. */ request->is_noop = true; } if (!request->is_noop) { /* The part can finish out of order. Resize array-list to be long enough to hold this part, * filling any intermediate slots with NULL. */ aws_array_list_ensure_capacity(&auto_ranged_put->synced_data.part_list, request->part_number); while (aws_array_list_length(&auto_ranged_put->synced_data.part_list) < request->part_number) { struct aws_s3_mpu_part_info *null_part = NULL; aws_array_list_push_back(&auto_ranged_put->synced_data.part_list, &null_part); } /* Add part to array-list */ struct aws_s3_mpu_part_info *part = aws_mem_calloc(meta_request->allocator, 1, sizeof(struct aws_s3_mpu_part_info)); part->size = request->request_body.len; aws_array_list_set_at(&auto_ranged_put->synced_data.part_list, &part, request->part_number - 1); } aws_s3_meta_request_unlock_synced_data(meta_request); /* END CRITICAL SECTION */ if (previously_uploaded_info) { /* Part was previously uploaded, check that it matches what we just read. * (Yes it's weird that we keep a pointer to the part_info even after * releasing the lock that protects part_list. But it's the resizable * part_list that needs lock protection. A previously uploaded part_info is const, * and it's on the heap, so it's safe to keep the pointer around) */ if (request->request_body.len != previously_uploaded_info->size) { error_code = AWS_ERROR_S3_RESUME_FAILED; AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "id=%p: Failed resuming upload, previous upload used different part size.", (void *)meta_request); goto on_done; } /* if previously uploaded part had a checksum, compare it to what we just skipped */ if (previously_uploaded_info->checksum_base64.len > 0 && s_verify_part_matches_checksum( meta_request->allocator, aws_byte_cursor_from_buf(&request->request_body), meta_request->checksum_config.checksum_algorithm, aws_byte_cursor_from_buf(&previously_uploaded_info->checksum_base64))) { error_code = aws_last_error_or_unknown(); goto on_done; } } /* We throttle the number of parts that can be "pending read" * (e.g. only 1 at a time if reading from async-stream). * Now that read is complete, poke the client to see if it can give us more work. * * Poking now gives measurable speedup (1%) for async streaming, * vs waiting until all the part-prep steps are complete (still need to sign, etc) */ aws_s3_client_schedule_process_work(meta_request->client); on_done: s_s3_prepare_upload_part_finish(part_prep, error_code); } /* Finish async preparation of an UploadPart request */ static void s_s3_prepare_upload_part_finish(struct aws_s3_prepare_upload_part_job *part_prep, int error_code) { struct aws_s3_request *request = part_prep->request; struct aws_s3_meta_request *meta_request = request->meta_request; struct aws_s3_auto_ranged_put *auto_ranged_put = meta_request->impl; if (error_code != AWS_ERROR_SUCCESS) { aws_future_http_message_set_error(part_prep->on_complete, error_code); goto on_done; } struct aws_byte_buf *checksum_buf = NULL; if (request->is_noop) { AWS_LOGF_DEBUG( AWS_LS_S3_META_REQUEST, "id=%p UploadPart with part num %u for Multi-part Upload, with ID:%s" "is noop due to encountering end of stream", (void *)meta_request, request->part_number, aws_string_c_str(auto_ranged_put->upload_id)); } else { /* BEGIN CRITICAL SECTION */ { aws_s3_meta_request_lock_synced_data(meta_request); struct aws_s3_mpu_part_info *part = NULL; aws_array_list_get_at(&auto_ranged_put->synced_data.part_list, &part, request->part_number - 1); AWS_ASSERT(part != NULL); checksum_buf = &part->checksum_base64; /* Clean up the buffer in case of it's initialized before and retry happens. */ aws_byte_buf_clean_up(checksum_buf); aws_s3_meta_request_unlock_synced_data(meta_request); } /* END CRITICAL SECTION */ AWS_LOGF_DEBUG( AWS_LS_S3_META_REQUEST, "id=%p UploadPart for Multi-part Upload, with ID:%s", (void *)meta_request, aws_string_c_str(auto_ranged_put->upload_id)); } /* Create a new put-object message to upload a part. */ struct aws_http_message *message = aws_s3_upload_part_message_new( meta_request->allocator, meta_request->initial_request_message, &request->request_body, request->part_number, auto_ranged_put->upload_id, meta_request->should_compute_content_md5, &meta_request->checksum_config, checksum_buf); if (message == NULL) { aws_future_http_message_set_error(part_prep->on_complete, aws_last_error()); goto on_done; } /* Success! */ aws_future_http_message_set_result_by_move(part_prep->on_complete, &message); on_done: AWS_FATAL_ASSERT(aws_future_http_message_is_done(part_prep->on_complete)); aws_future_bool_release(part_prep->asyncstep_read_part); aws_future_http_message_release(part_prep->on_complete); aws_mem_release(part_prep->allocator, part_prep); } /* Allow user to review what we've uploaded, and fail the meta-request if they don't approve. */ static int s_s3_review_multipart_upload(struct aws_s3_request *request) { struct aws_s3_meta_request *meta_request = request->meta_request; struct aws_s3_auto_ranged_put *auto_ranged_put = meta_request->impl; /* If user registered no callback, then success! */ if (meta_request->upload_review_callback == NULL) { return AWS_OP_SUCCESS; } /* Prepare review info */ struct aws_s3_upload_review review = { .checksum_algorithm = meta_request->checksum_config.checksum_algorithm, }; /* BEGIN CRITICAL SECTION */ aws_s3_meta_request_lock_synced_data(meta_request); review.part_count = aws_array_list_length(&auto_ranged_put->synced_data.part_list); if (review.part_count > 0) { review.part_array = aws_mem_calloc(meta_request->allocator, review.part_count, sizeof(struct aws_s3_upload_part_review)); for (size_t part_index = 0; part_index < review.part_count; ++part_index) { struct aws_s3_mpu_part_info *part; aws_array_list_get_at(&auto_ranged_put->synced_data.part_list, &part, part_index); struct aws_s3_upload_part_review *part_review = &review.part_array[part_index]; part_review->size = part->size; part_review->checksum = aws_byte_cursor_from_buf(&part->checksum_base64); } } aws_s3_meta_request_unlock_synced_data(meta_request); /* END CRITICAL SECTION */ /* Invoke callback */ int error_code = AWS_ERROR_SUCCESS; if (meta_request->upload_review_callback(meta_request, &review, meta_request->user_data) != AWS_OP_SUCCESS) { error_code = aws_last_error_or_unknown(); } /* Clean up review info */ aws_mem_release(meta_request->allocator, review.part_array); if (error_code == AWS_ERROR_SUCCESS) { return AWS_OP_SUCCESS; } else { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "id=%p: Upload review callback raised error %d (%s)", (void *)meta_request, error_code, aws_error_str(error_code)); return aws_raise_error(error_code); } } /* Prepare a CompleteMultipartUpload request. */ static struct aws_future_http_message *s_s3_prepare_complete_multipart_upload(struct aws_s3_request *request) { struct aws_s3_meta_request *meta_request = request->meta_request; struct aws_s3_auto_ranged_put *auto_ranged_put = meta_request->impl; struct aws_allocator *allocator = request->allocator; struct aws_future_http_message *message_future = aws_future_http_message_new(allocator); AWS_FATAL_ASSERT(auto_ranged_put->upload_id); if (request->num_times_prepared == 0) { /* Invoke upload_review_callback, and fail meta-request if user raises an error */ if (s_s3_review_multipart_upload(request) != AWS_OP_SUCCESS) { aws_future_http_message_set_error(message_future, aws_last_error()); goto on_done; } /* Allocate request body */ aws_byte_buf_init( &request->request_body, meta_request->allocator, s_complete_multipart_upload_init_body_size_bytes); } else { /* This is a retry, reset request body */ aws_byte_buf_reset(&request->request_body, false); } /* BEGIN CRITICAL SECTION */ aws_s3_meta_request_lock_synced_data(meta_request); /* Build the message to complete our multipart upload, which includes a payload describing all of * our completed parts. */ struct aws_http_message *message = aws_s3_complete_multipart_message_new( meta_request->allocator, meta_request->initial_request_message, &request->request_body, auto_ranged_put->upload_id, &auto_ranged_put->synced_data.part_list, meta_request->checksum_config.checksum_algorithm); aws_s3_meta_request_unlock_synced_data(meta_request); /* END CRITICAL SECTION */ if (message == NULL) { aws_future_http_message_set_error(message_future, aws_last_error()); goto on_done; } /* Success! */ aws_future_http_message_set_result_by_move(message_future, &message); on_done: AWS_FATAL_ASSERT(aws_future_http_message_is_done(message_future)); return message_future; } /* Prepare an AbortMultipartUpload request. * Currently, this is actually synchronous. */ struct aws_future_http_message *s_s3_prepare_abort_multipart_upload(struct aws_s3_request *request) { struct aws_s3_meta_request *meta_request = request->meta_request; struct aws_s3_auto_ranged_put *auto_ranged_put = meta_request->impl; AWS_FATAL_ASSERT(auto_ranged_put->upload_id); AWS_LOGF_DEBUG( AWS_LS_S3_META_REQUEST, "id=%p Abort multipart upload request for upload id %s.", (void *)meta_request, aws_string_c_str(auto_ranged_put->upload_id)); if (request->num_times_prepared == 0) { aws_byte_buf_init( &request->request_body, meta_request->allocator, s_abort_multipart_upload_init_body_size_bytes); } else { aws_byte_buf_reset(&request->request_body, false); } /* Build the message to abort our multipart upload */ struct aws_http_message *message = aws_s3_abort_multipart_upload_message_new( meta_request->allocator, meta_request->initial_request_message, auto_ranged_put->upload_id); struct aws_future_http_message *future = aws_future_http_message_new(request->allocator); if (message != NULL) { aws_future_http_message_set_result_by_move(future, &message); } else { aws_future_http_message_set_error(future, aws_last_error_or_unknown()); } return future; } /* Finish the vtable->prepare_request() job */ static void s_s3_auto_ranged_put_prepare_request_finish(void *user_data) { struct aws_s3_auto_ranged_put_prepare_request_job *request_prep = user_data; struct aws_s3_request *request = request_prep->request; struct aws_s3_meta_request *meta_request = request->meta_request; /* Did we successfully create the type-specific HTTP message? */ int error_code = aws_future_http_message_get_error(request_prep->asyncstep_prepare_message); if (error_code != AWS_ERROR_SUCCESS) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "id=%p Could not allocate message for request with tag %d for auto-ranged-put meta request.", (void *)meta_request, request->request_tag); goto on_done; } /* Success! Apply aws_http_message to aws_s3_request */ struct aws_http_message *message = aws_future_http_message_get_result_by_move(request_prep->asyncstep_prepare_message); aws_s3_request_setup_send_data(request, message); aws_http_message_release(message); AWS_LOGF_DEBUG( AWS_LS_S3_META_REQUEST, "id=%p: Prepared request %p for part %d", (void *)meta_request, (void *)request, request->part_number); on_done: if (error_code == AWS_ERROR_SUCCESS) { aws_future_void_set_result(request_prep->on_complete); } else { aws_future_void_set_error(request_prep->on_complete, error_code); } aws_future_http_message_release(request_prep->asyncstep_prepare_message); aws_future_void_release(request_prep->on_complete); aws_mem_release(request_prep->allocator, request_prep); } /* Invoked when no-retry will happen */ static void s_s3_auto_ranged_put_request_finished( struct aws_s3_meta_request *meta_request, struct aws_s3_request *request, int error_code) { AWS_PRECONDITION(meta_request); AWS_PRECONDITION(meta_request->impl); AWS_PRECONDITION(request); struct aws_s3_auto_ranged_put *auto_ranged_put = meta_request->impl; aws_s3_meta_request_lock_synced_data(meta_request); switch (request->request_tag) { case AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_LIST_PARTS: { bool has_more_results = false; if (error_code == AWS_ERROR_SUCCESS) { struct aws_byte_cursor body_cursor = aws_byte_cursor_from_buf(&request->send_data.response_body); /* Clear the token before */ aws_string_destroy(auto_ranged_put->synced_data.list_parts_continuation_token); auto_ranged_put->synced_data.list_parts_continuation_token = NULL; if (aws_s3_paginated_operation_on_response( auto_ranged_put->synced_data.list_parts_operation, &body_cursor, &auto_ranged_put->synced_data.list_parts_continuation_token, &has_more_results)) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "id=%p Failed to parse list parts response.", (void *)meta_request); error_code = AWS_ERROR_S3_LIST_PARTS_PARSE_FAILED; } else if (!has_more_results) { uint64_t bytes_previously_uploaded = 0; int parts_previously_uploaded = 0; for (size_t part_index = 0; part_index < aws_array_list_length(&auto_ranged_put->synced_data.part_list); part_index++) { struct aws_s3_mpu_part_info *part = NULL; aws_array_list_get_at(&auto_ranged_put->synced_data.part_list, &part, part_index); if (part != NULL) { /* Update the number of parts sent/completed previously */ ++parts_previously_uploaded; bytes_previously_uploaded += part->size; } } AWS_LOGF_DEBUG( AWS_LS_S3_META_REQUEST, "id=%p: Resuming PutObject. %d out of %d parts have completed during previous request.", (void *)meta_request, parts_previously_uploaded, auto_ranged_put->total_num_parts_from_content_length); /* Deliver an initial progress_callback to report all previously uploaded parts. */ if (meta_request->progress_callback != NULL && bytes_previously_uploaded > 0) { struct aws_s3_meta_request_event event = {.type = AWS_S3_META_REQUEST_EVENT_PROGRESS}; event.u.progress.info.bytes_transferred = bytes_previously_uploaded; event.u.progress.info.content_length = auto_ranged_put->content_length; aws_s3_meta_request_add_event_for_delivery_synced(meta_request, &event); } } } if (has_more_results) { /* If list parts has more result, make sure list parts continues */ auto_ranged_put->synced_data.list_parts_state.continues = true; auto_ranged_put->synced_data.list_parts_state.completed = false; } else { /* No more result, complete the list parts */ auto_ranged_put->synced_data.list_parts_state.continues = false; auto_ranged_put->synced_data.list_parts_state.completed = true; } auto_ranged_put->synced_data.list_parts_error_code = error_code; if (error_code != AWS_ERROR_SUCCESS) { if (request->send_data.response_status == AWS_HTTP_STATUS_CODE_404_NOT_FOUND && auto_ranged_put->resume_token->num_parts_completed == auto_ranged_put->resume_token->total_num_parts) { AWS_LOGF_DEBUG( AWS_LS_S3_META_REQUEST, "id=%p: Resuming PutObject ended early, since there is nothing to resume" "(request finished prior to being paused?)", (void *)meta_request); aws_s3_meta_request_set_success_synced(meta_request, AWS_HTTP_STATUS_CODE_200_OK); } else { aws_s3_meta_request_set_fail_synced(meta_request, request, error_code); } } } break; case AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_CREATE_MULTIPART_UPLOAD: { struct aws_http_headers *needed_response_headers = NULL; if (error_code == AWS_ERROR_SUCCESS) { needed_response_headers = aws_http_headers_new(meta_request->allocator); const size_t copy_header_count = AWS_ARRAY_SIZE(s_create_multipart_upload_copy_headers); /* Copy any headers now that we'll need for the final, transformed headers later. */ for (size_t header_index = 0; header_index < copy_header_count; ++header_index) { const struct aws_byte_cursor *header_name = &s_create_multipart_upload_copy_headers[header_index]; struct aws_byte_cursor header_value; AWS_ZERO_STRUCT(header_value); if (aws_http_headers_get(request->send_data.response_headers, *header_name, &header_value) == AWS_OP_SUCCESS) { aws_http_headers_set(needed_response_headers, *header_name, header_value); } } struct aws_byte_cursor xml_doc = aws_byte_cursor_from_buf(&request->send_data.response_body); /* Find the upload id for this multipart upload. */ struct aws_byte_cursor upload_id = {0}; const char *xml_path[] = {"InitiateMultipartUploadResult", "UploadId", NULL}; aws_xml_get_body_at_path(meta_request->allocator, xml_doc, xml_path, &upload_id); if (upload_id.len == 0) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "id=%p Could not find upload-id in create-multipart-upload response", (void *)meta_request); aws_raise_error(AWS_ERROR_S3_MISSING_UPLOAD_ID); error_code = AWS_ERROR_S3_MISSING_UPLOAD_ID; } else { /* Store the multipart upload id. */ auto_ranged_put->upload_id = aws_string_new_from_cursor(meta_request->allocator, &upload_id); } } AWS_ASSERT(auto_ranged_put->synced_data.needed_response_headers == NULL); auto_ranged_put->synced_data.needed_response_headers = needed_response_headers; auto_ranged_put->synced_data.create_multipart_upload_completed = true; auto_ranged_put->synced_data.list_parts_error_code = error_code; if (error_code != AWS_ERROR_SUCCESS) { aws_s3_meta_request_set_fail_synced(meta_request, request, error_code); } } break; case AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_PART: { size_t part_number = request->part_number; AWS_FATAL_ASSERT(part_number > 0); size_t part_index = part_number - 1; struct aws_string *etag = NULL; bool request_is_noop = request->is_noop != 0; if (!request_is_noop) { if (error_code == AWS_ERROR_SUCCESS) { /* Find the ETag header if it exists and cache it. */ struct aws_byte_cursor etag_within_quotes; AWS_ASSERT(request->send_data.response_headers); if (aws_http_headers_get( request->send_data.response_headers, g_etag_header_name, &etag_within_quotes) != AWS_OP_SUCCESS) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "id=%p Could not find ETag header for request %p", (void *)meta_request, (void *)request); error_code = AWS_ERROR_S3_MISSING_ETAG; } else { /* The ETag value arrives in quotes, but we don't want it in quotes when we send it back up * later, so just get rid of the quotes now. */ etag = aws_strip_quotes(meta_request->allocator, etag_within_quotes); } } } ++auto_ranged_put->synced_data.num_parts_completed; if (request_is_noop) { ++auto_ranged_put->synced_data.num_parts_noop; } if (auto_ranged_put->has_content_length) { AWS_LOGF_DEBUG( AWS_LS_S3_META_REQUEST, "id=%p: %d out of %d parts have completed.", (void *)meta_request, auto_ranged_put->synced_data.num_parts_completed, auto_ranged_put->total_num_parts_from_content_length); } else { AWS_LOGF_DEBUG( AWS_LS_S3_META_REQUEST, "id=%p: %d parts have completed.", (void *)meta_request, auto_ranged_put->synced_data.num_parts_completed); } if (!request_is_noop) { if (error_code == AWS_ERROR_SUCCESS) { AWS_ASSERT(etag != NULL); ++auto_ranged_put->synced_data.num_parts_successful; /* Send progress_callback for delivery on io_event_loop thread */ if (meta_request->progress_callback != NULL) { struct aws_s3_meta_request_event event = {.type = AWS_S3_META_REQUEST_EVENT_PROGRESS}; event.u.progress.info.bytes_transferred = request->request_body.len; event.u.progress.info.content_length = auto_ranged_put->content_length; aws_s3_meta_request_add_event_for_delivery_synced(meta_request, &event); } /* Store part's ETag */ struct aws_s3_mpu_part_info *part = NULL; aws_array_list_get_at(&auto_ranged_put->synced_data.part_list, &part, part_index); AWS_ASSERT(part != NULL); AWS_ASSERT(part->etag == NULL); part->etag = etag; } else { ++auto_ranged_put->synced_data.num_parts_failed; aws_s3_meta_request_set_fail_synced(meta_request, request, error_code); } } } break; case AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_COMPLETE_MULTIPART_UPLOAD: { if (error_code == AWS_ERROR_SUCCESS && meta_request->headers_callback != NULL) { struct aws_http_headers *final_response_headers = aws_http_headers_new(meta_request->allocator); /* Copy all the response headers from this request. */ copy_http_headers(request->send_data.response_headers, final_response_headers); /* Copy over any response headers that we've previously determined are needed for this final * response. */ copy_http_headers(auto_ranged_put->synced_data.needed_response_headers, final_response_headers); struct aws_byte_cursor xml_doc = aws_byte_cursor_from_buf(&request->send_data.response_body); /** * TODO: The body of the response can be ERROR, check Error specified in body part from * https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html#AmazonS3-CompleteMultipartUpload-response-CompleteMultipartUploadOutput * We need to handle this case. * TODO: the checksum returned within the response of complete multipart upload need to be exposed? */ /* Grab the ETag for the entire object, and set it as a header. */ struct aws_byte_cursor etag_header_value = {0}; const char *xml_path[] = {"CompleteMultipartUploadResult", "ETag", NULL}; aws_xml_get_body_at_path(meta_request->allocator, xml_doc, xml_path, &etag_header_value); if (etag_header_value.len > 0) { struct aws_byte_buf etag_header_value_byte_buf = aws_replace_quote_entities(meta_request->allocator, etag_header_value); aws_http_headers_set( final_response_headers, g_etag_header_name, aws_byte_cursor_from_buf(&etag_header_value_byte_buf)); aws_byte_buf_clean_up(&etag_header_value_byte_buf); } /* Invoke the callback without lock */ aws_s3_meta_request_unlock_synced_data(meta_request); /* Notify the user of the headers. */ if (meta_request->headers_callback( meta_request, final_response_headers, request->send_data.response_status, meta_request->user_data)) { error_code = aws_last_error_or_unknown(); } meta_request->headers_callback = NULL; /* Grab the lock again after the callback */ aws_s3_meta_request_lock_synced_data(meta_request); aws_http_headers_release(final_response_headers); } auto_ranged_put->synced_data.complete_multipart_upload_completed = true; auto_ranged_put->synced_data.complete_multipart_upload_error_code = error_code; if (error_code != AWS_ERROR_SUCCESS) { aws_s3_meta_request_set_fail_synced(meta_request, request, error_code); } } break; case AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_ABORT_MULTIPART_UPLOAD: { auto_ranged_put->synced_data.abort_multipart_upload_error_code = error_code; auto_ranged_put->synced_data.abort_multipart_upload_completed = true; } break; } aws_s3_request_finish_up_metrics_synced(request, meta_request); aws_s3_meta_request_unlock_synced_data(meta_request); } static int s_s3_auto_ranged_put_pause( struct aws_s3_meta_request *meta_request, struct aws_s3_meta_request_resume_token **out_resume_token) { *out_resume_token = NULL; struct aws_s3_auto_ranged_put *auto_ranged_put = meta_request->impl; if (!auto_ranged_put->has_content_length) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "id=%p: Failed to pause request with unknown content length", (void *)meta_request); return aws_raise_error(AWS_ERROR_UNSUPPORTED_OPERATION); } /* lock */ aws_s3_meta_request_lock_synced_data(meta_request); AWS_LOGF_DEBUG( AWS_LS_S3_META_REQUEST, "id=%p: Pausing request with %u out of %u parts have completed.", (void *)meta_request, auto_ranged_put->synced_data.num_parts_completed, auto_ranged_put->total_num_parts_from_content_length); /* upload can be in one of several states: * - not started, i.e. we didn't even call crete mpu yet - return success, * token is NULL and cancel the upload * - in the middle of upload - return success, create token and cancel * upload * - complete MPU started - return success, generate token and try to cancel * complete MPU */ if (auto_ranged_put->synced_data.create_multipart_upload_completed) { *out_resume_token = aws_s3_meta_request_resume_token_new(meta_request->allocator); (*out_resume_token)->type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT; (*out_resume_token)->multipart_upload_id = aws_string_clone_or_reuse(meta_request->allocator, auto_ranged_put->upload_id); (*out_resume_token)->part_size = meta_request->part_size; (*out_resume_token)->total_num_parts = auto_ranged_put->total_num_parts_from_content_length; (*out_resume_token)->num_parts_completed = auto_ranged_put->synced_data.num_parts_completed; } /** * Cancels the meta request using the PAUSED flag to avoid deletion of uploaded parts. * This allows the client to resume the upload later, setting the persistable state in the meta request options. */ aws_s3_meta_request_set_fail_synced(meta_request, NULL, AWS_ERROR_S3_PAUSED); aws_s3_meta_request_cancel_cancellable_requests_synced(meta_request, AWS_ERROR_S3_PAUSED); /* unlock */ aws_s3_meta_request_unlock_synced_data(meta_request); return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/source/s3_buffer_pool.c000066400000000000000000000377071456575232400241460ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include /* * S3 Buffer Pool. * Fairly trivial implementation of "arena" style allocator. * Note: current implementation is not optimized and instead tries to be * as straightforward as possible. Given that pool manages a small number * of big allocations, performance impact is not that bad, but something we need * to look into on the next iteration. * * Basic approach is to divide acquires into primary and secondary. * User provides chunk size during construction. Acquires below 4 * chunks_size * are done from primary and the rest are from secondary. * * Primary storage consists of blocks that are each s_chunks_per_block * * chunk_size in size. blocks are created on demand as needed. * Acquire operation from primary basically works by determining how many chunks * are needed and then finding available space in existing blocks or creating a * new block. Acquire will always take over the whole chunk, so some space is * likely wasted. * Ex. say chunk_size is 8mb and s_chunks_per_block is 16, which makes block size 128mb. * acquires up to 32mb will be done from primary. So 1 block can hold 4 buffers * of 32mb (4 chunks) or 16 buffers of 8mb (1 chunk). If requested buffer size * is 12mb, 2 chunks are used for acquire and 4mb will be wasted. * Secondary storage delegates directly to system allocator. */ struct aws_s3_buffer_pool_ticket { size_t size; uint8_t *ptr; size_t chunks_used; }; /* Default size for blocks array. Note: this is just for meta info, blocks * themselves are not preallocated. */ static size_t s_block_list_initial_capacity = 5; /* Amount of mem reserved for use outside of buffer pool. * This is an optimistic upper bound on mem used as we dont track it. * Covers both usage outside of pool, i.e. all allocations done as part of s3 * client as well as any allocations overruns due to memory waste in the pool. */ static const size_t s_buffer_pool_reserved_mem = MB_TO_BYTES(128); /* * How many chunks make up a block in primary storage. */ static const size_t s_chunks_per_block = 16; /* * Max size of chunks in primary. * Effectively if client part size is above the following number, primary * storage along with buffer reuse is disabled and all buffers are allocated * directly using allocator. */ static const size_t s_max_chunk_size_for_buffer_reuse = MB_TO_BYTES(64); struct aws_s3_buffer_pool { struct aws_allocator *base_allocator; struct aws_mutex mutex; size_t block_size; size_t chunk_size; /* size at which allocations should go to secondary */ size_t primary_size_cutoff; size_t mem_limit; bool has_reservation_hold; size_t primary_allocated; size_t primary_reserved; size_t primary_used; size_t secondary_reserved; size_t secondary_used; struct aws_array_list blocks; }; struct s3_buffer_pool_block { size_t block_size; uint8_t *block_ptr; uint16_t alloc_bit_mask; }; /* * Sets n bits at position starting with LSB. * Note: n must be at most 8, but in practice will always be at most 4. * position + n should at most be 16 */ static inline uint16_t s_set_bits(uint16_t num, size_t position, size_t n) { AWS_PRECONDITION(n <= 8); AWS_PRECONDITION(position + n <= 16); uint16_t mask = ((uint16_t)0x00FF) >> (8 - n); return num | (mask << position); } /* * Clears n bits at position starting with LSB. * Note: n must be at most 8, but in practice will always be at most 4. * position + n should at most be 16 */ static inline uint16_t s_clear_bits(uint16_t num, size_t position, size_t n) { AWS_PRECONDITION(n <= 8); AWS_PRECONDITION(position + n <= 16); uint16_t mask = ((uint16_t)0x00FF) >> (8 - n); return num & ~(mask << position); } /* * Checks whether n bits are set at position starting with LSB. * Note: n must be at most 8, but in practice will always be at most 4. * position + n should at most be 16 */ static inline bool s_check_bits(uint16_t num, size_t position, size_t n) { AWS_PRECONDITION(n <= 8); AWS_PRECONDITION(position + n <= 16); uint16_t mask = ((uint16_t)0x00FF) >> (8 - n); return (num >> position) & mask; } struct aws_s3_buffer_pool *aws_s3_buffer_pool_new( struct aws_allocator *allocator, size_t chunk_size, size_t mem_limit) { if (mem_limit < GB_TO_BYTES(1)) { AWS_LOGF_ERROR( AWS_LS_S3_CLIENT, "Failed to initialize buffer pool. " "Minimum supported value for Memory Limit is 1GB."); aws_raise_error(AWS_ERROR_S3_INVALID_MEMORY_LIMIT_CONFIG); return NULL; } if (chunk_size < (1024) || chunk_size % (4 * 1024) != 0) { AWS_LOGF_WARN( AWS_LS_S3_CLIENT, "Part size specified on the client can lead to suboptimal performance. " "Consider specifying size in multiples of 4KiB. Ideal part size for most transfers is " "1MiB multiple between 8MiB and 16MiB. Note: the client will automatically scale part size " "if its not sufficient to transfer data within the maximum number of parts"); } size_t adjusted_mem_lim = mem_limit - s_buffer_pool_reserved_mem; /* * TODO: There is several things we can consider tweaking here: * - if chunk size is a weird number of bytes, force it to the closest page size? * - grow chunk size max based on overall mem lim (ex. for 4gb it might be * 64mb, but for 8gb it can be 128mb) * - align chunk size to better fill available mem? some chunk sizes can * result in memory being wasted because overall limit does not divide * nicely into chunks */ if (chunk_size > s_max_chunk_size_for_buffer_reuse || chunk_size * s_chunks_per_block > adjusted_mem_lim) { AWS_LOGF_WARN( AWS_LS_S3_CLIENT, "Part size specified on the client is too large for automatic buffer reuse. " "Consider specifying a smaller part size to improve performance and memory utilization"); chunk_size = 0; } struct aws_s3_buffer_pool *buffer_pool = aws_mem_calloc(allocator, 1, sizeof(struct aws_s3_buffer_pool)); AWS_FATAL_ASSERT(buffer_pool != NULL); buffer_pool->base_allocator = allocator; buffer_pool->chunk_size = chunk_size; buffer_pool->block_size = s_chunks_per_block * chunk_size; /* Somewhat arbitrary number. * Tries to balance between how many allocations use buffer and buffer space * being wasted. */ buffer_pool->primary_size_cutoff = chunk_size * 4; buffer_pool->mem_limit = adjusted_mem_lim; int mutex_error = aws_mutex_init(&buffer_pool->mutex); AWS_FATAL_ASSERT(mutex_error == AWS_OP_SUCCESS); aws_array_list_init_dynamic( &buffer_pool->blocks, allocator, s_block_list_initial_capacity, sizeof(struct s3_buffer_pool_block)); return buffer_pool; } void aws_s3_buffer_pool_destroy(struct aws_s3_buffer_pool *buffer_pool) { if (buffer_pool == NULL) { return; } for (size_t i = 0; i < aws_array_list_length(&buffer_pool->blocks); ++i) { struct s3_buffer_pool_block *block; aws_array_list_get_at_ptr(&buffer_pool->blocks, (void **)&block, i); AWS_FATAL_ASSERT(block->alloc_bit_mask == 0 && "Allocator still has outstanding blocks"); aws_mem_release(buffer_pool->base_allocator, block->block_ptr); } aws_array_list_clean_up(&buffer_pool->blocks); aws_mutex_clean_up(&buffer_pool->mutex); struct aws_allocator *base = buffer_pool->base_allocator; aws_mem_release(base, buffer_pool); } void s_buffer_pool_trim_synced(struct aws_s3_buffer_pool *buffer_pool) { for (size_t i = 0; i < aws_array_list_length(&buffer_pool->blocks);) { struct s3_buffer_pool_block *block; aws_array_list_get_at_ptr(&buffer_pool->blocks, (void **)&block, i); if (block->alloc_bit_mask == 0) { aws_mem_release(buffer_pool->base_allocator, block->block_ptr); aws_array_list_erase(&buffer_pool->blocks, i); /* do not increment since we just released element */ } else { ++i; } } } void aws_s3_buffer_pool_trim(struct aws_s3_buffer_pool *buffer_pool) { aws_mutex_lock(&buffer_pool->mutex); s_buffer_pool_trim_synced(buffer_pool); aws_mutex_unlock(&buffer_pool->mutex); } struct aws_s3_buffer_pool_ticket *aws_s3_buffer_pool_reserve(struct aws_s3_buffer_pool *buffer_pool, size_t size) { AWS_PRECONDITION(buffer_pool); if (buffer_pool->has_reservation_hold) { return NULL; } AWS_FATAL_ASSERT(size != 0); AWS_FATAL_ASSERT(size <= buffer_pool->mem_limit); struct aws_s3_buffer_pool_ticket *ticket = NULL; aws_mutex_lock(&buffer_pool->mutex); size_t overall_taken = buffer_pool->primary_used + buffer_pool->primary_reserved + buffer_pool->secondary_used + buffer_pool->secondary_reserved; /* * If we are allocating from secondary and there is unused space in * primary, trim the primary in hopes we can free up enough memory. * TODO: something smarter, like partial trim? */ if (size > buffer_pool->primary_size_cutoff && (size + overall_taken) > buffer_pool->mem_limit && (buffer_pool->primary_allocated > (buffer_pool->primary_used + buffer_pool->primary_reserved + buffer_pool->block_size))) { s_buffer_pool_trim_synced(buffer_pool); overall_taken = buffer_pool->primary_used + buffer_pool->primary_reserved + buffer_pool->secondary_used + buffer_pool->secondary_reserved; } if ((size + overall_taken) <= buffer_pool->mem_limit) { ticket = aws_mem_calloc(buffer_pool->base_allocator, 1, sizeof(struct aws_s3_buffer_pool_ticket)); ticket->size = size; if (size <= buffer_pool->primary_size_cutoff) { buffer_pool->primary_reserved += size; } else { buffer_pool->secondary_reserved += size; } } else { buffer_pool->has_reservation_hold = true; } aws_mutex_unlock(&buffer_pool->mutex); if (ticket == NULL) { AWS_LOGF_TRACE( AWS_LS_S3_CLIENT, "Memory limit reached while trying to allocate buffer of size %zu. " "Putting new buffer reservations on hold...", size); aws_raise_error(AWS_ERROR_S3_EXCEEDS_MEMORY_LIMIT); } return ticket; } bool aws_s3_buffer_pool_has_reservation_hold(struct aws_s3_buffer_pool *buffer_pool) { AWS_PRECONDITION(buffer_pool); AWS_LOGF_TRACE(AWS_LS_S3_CLIENT, "Releasing buffer reservation hold."); return buffer_pool->has_reservation_hold; } void aws_s3_buffer_pool_remove_reservation_hold(struct aws_s3_buffer_pool *buffer_pool) { AWS_PRECONDITION(buffer_pool); buffer_pool->has_reservation_hold = false; } static uint8_t *s_primary_acquire_synced(struct aws_s3_buffer_pool *buffer_pool, size_t size, size_t *out_chunks_used) { uint8_t *alloc_ptr = NULL; size_t chunks_needed = size / buffer_pool->chunk_size; if (size % buffer_pool->chunk_size != 0) { ++chunks_needed; /* round up */ } *out_chunks_used = chunks_needed; /* Look for space in existing blocks */ for (size_t i = 0; i < aws_array_list_length(&buffer_pool->blocks); ++i) { struct s3_buffer_pool_block *block; aws_array_list_get_at_ptr(&buffer_pool->blocks, (void **)&block, i); for (size_t chunk_i = 0; chunk_i < s_chunks_per_block - chunks_needed + 1; ++chunk_i) { if (!s_check_bits(block->alloc_bit_mask, chunk_i, chunks_needed)) { alloc_ptr = block->block_ptr + chunk_i * buffer_pool->chunk_size; block->alloc_bit_mask = s_set_bits(block->alloc_bit_mask, chunk_i, chunks_needed); goto on_allocated; } } } /* No space available. Allocate new block. */ struct s3_buffer_pool_block block; block.alloc_bit_mask = s_set_bits(0, 0, chunks_needed); block.block_ptr = aws_mem_acquire(buffer_pool->base_allocator, buffer_pool->block_size); block.block_size = buffer_pool->block_size; aws_array_list_push_back(&buffer_pool->blocks, &block); alloc_ptr = block.block_ptr; buffer_pool->primary_allocated += buffer_pool->block_size; on_allocated: buffer_pool->primary_reserved -= size; buffer_pool->primary_used += size; return alloc_ptr; } struct aws_byte_buf aws_s3_buffer_pool_acquire_buffer( struct aws_s3_buffer_pool *buffer_pool, struct aws_s3_buffer_pool_ticket *ticket) { AWS_PRECONDITION(buffer_pool); AWS_PRECONDITION(ticket); if (ticket->ptr != NULL) { return aws_byte_buf_from_empty_array(ticket->ptr, ticket->size); } uint8_t *alloc_ptr = NULL; aws_mutex_lock(&buffer_pool->mutex); if (ticket->size <= buffer_pool->primary_size_cutoff) { alloc_ptr = s_primary_acquire_synced(buffer_pool, ticket->size, &ticket->chunks_used); } else { alloc_ptr = aws_mem_acquire(buffer_pool->base_allocator, ticket->size); buffer_pool->secondary_reserved -= ticket->size; buffer_pool->secondary_used += ticket->size; } aws_mutex_unlock(&buffer_pool->mutex); ticket->ptr = alloc_ptr; return aws_byte_buf_from_empty_array(ticket->ptr, ticket->size); } void aws_s3_buffer_pool_release_ticket( struct aws_s3_buffer_pool *buffer_pool, struct aws_s3_buffer_pool_ticket *ticket) { if (buffer_pool == NULL || ticket == NULL) { return; } if (ticket->ptr == NULL) { /* Ticket was never used, make sure to clean up reserved count. */ aws_mutex_lock(&buffer_pool->mutex); if (ticket->size <= buffer_pool->primary_size_cutoff) { buffer_pool->primary_reserved -= ticket->size; } else { buffer_pool->secondary_reserved -= ticket->size; } aws_mutex_unlock(&buffer_pool->mutex); aws_mem_release(buffer_pool->base_allocator, ticket); return; } aws_mutex_lock(&buffer_pool->mutex); if (ticket->size <= buffer_pool->primary_size_cutoff) { size_t chunks_used = ticket->size / buffer_pool->chunk_size; if (ticket->size % buffer_pool->chunk_size != 0) { ++chunks_used; /* round up */ } bool found = false; for (size_t i = 0; i < aws_array_list_length(&buffer_pool->blocks); ++i) { struct s3_buffer_pool_block *block; aws_array_list_get_at_ptr(&buffer_pool->blocks, (void **)&block, i); if (block->block_ptr <= ticket->ptr && block->block_ptr + block->block_size > ticket->ptr) { size_t alloc_i = (ticket->ptr - block->block_ptr) / buffer_pool->chunk_size; block->alloc_bit_mask = s_clear_bits(block->alloc_bit_mask, alloc_i, chunks_used); buffer_pool->primary_used -= ticket->size; found = true; break; } } AWS_FATAL_ASSERT(found); } else { aws_mem_release(buffer_pool->base_allocator, ticket->ptr); buffer_pool->secondary_used -= ticket->size; } aws_mem_release(buffer_pool->base_allocator, ticket); aws_mutex_unlock(&buffer_pool->mutex); } struct aws_s3_buffer_pool_usage_stats aws_s3_buffer_pool_get_usage(struct aws_s3_buffer_pool *buffer_pool) { aws_mutex_lock(&buffer_pool->mutex); struct aws_s3_buffer_pool_usage_stats ret = (struct aws_s3_buffer_pool_usage_stats){ .mem_limit = buffer_pool->mem_limit, .primary_cutoff = buffer_pool->primary_size_cutoff, .primary_allocated = buffer_pool->primary_allocated, .primary_used = buffer_pool->primary_used, .primary_reserved = buffer_pool->primary_reserved, .primary_num_blocks = aws_array_list_length(&buffer_pool->blocks), .secondary_used = buffer_pool->secondary_used, .secondary_reserved = buffer_pool->secondary_reserved, }; aws_mutex_unlock(&buffer_pool->mutex); return ret; } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/source/s3_checksum_stream.c000066400000000000000000000112751456575232400250110ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "aws/s3/private/s3_checksums.h" #include #include struct aws_checksum_stream { struct aws_input_stream base; struct aws_allocator *allocator; struct aws_input_stream *old_stream; struct aws_s3_checksum *checksum; struct aws_byte_buf checksum_result; /* base64 encoded checksum of the stream, updated on destruction of stream */ struct aws_byte_buf *encoded_checksum_output; }; static int s_aws_input_checksum_stream_seek( struct aws_input_stream *stream, int64_t offset, enum aws_stream_seek_basis basis) { (void)stream; (void)offset; (void)basis; AWS_LOGF_ERROR( AWS_LS_S3_CLIENT, "Cannot seek on checksum stream, as it will cause the checksum output to mismatch the checksum of the stream " "contents"); AWS_ASSERT(false); return aws_raise_error(AWS_ERROR_UNSUPPORTED_OPERATION); } static int s_aws_input_checksum_stream_read(struct aws_input_stream *stream, struct aws_byte_buf *dest) { struct aws_checksum_stream *impl = AWS_CONTAINER_OF(stream, struct aws_checksum_stream, base); size_t original_len = dest->len; if (aws_input_stream_read(impl->old_stream, dest)) { return AWS_OP_ERR; } struct aws_byte_cursor to_sum = aws_byte_cursor_from_buf(dest); /* Move the cursor to the part to calculate the checksum */ aws_byte_cursor_advance(&to_sum, original_len); /* If read failed, `aws_input_stream_read` will handle the error to restore the dest. No need to handle error here */ return aws_checksum_update(impl->checksum, &to_sum); } static int s_aws_input_checksum_stream_get_status(struct aws_input_stream *stream, struct aws_stream_status *status) { struct aws_checksum_stream *impl = AWS_CONTAINER_OF(stream, struct aws_checksum_stream, base); return aws_input_stream_get_status(impl->old_stream, status); } static int s_aws_input_checksum_stream_get_length(struct aws_input_stream *stream, int64_t *out_length) { struct aws_checksum_stream *impl = AWS_CONTAINER_OF(stream, struct aws_checksum_stream, base); return aws_input_stream_get_length(impl->old_stream, out_length); } /* We take ownership of the old input stream, and destroy it with this input stream. This is because we want to be able * to substitute in the chunk_stream for the cursor stream currently used in s_s3_meta_request_default_prepare_request * which returns the new stream. So in order to prevent the need of keeping track of two input streams we instead * consume the cursor stream and destroy it with this one */ static void s_aws_input_checksum_stream_destroy(struct aws_checksum_stream *impl) { if (!impl) { return; } int result = aws_checksum_finalize(impl->checksum, &impl->checksum_result, 0); if (result != AWS_OP_SUCCESS) { aws_byte_buf_reset(&impl->checksum_result, true); } AWS_ASSERT(result == AWS_OP_SUCCESS); struct aws_byte_cursor checksum_result_cursor = aws_byte_cursor_from_buf(&impl->checksum_result); AWS_FATAL_ASSERT(aws_base64_encode(&checksum_result_cursor, impl->encoded_checksum_output) == AWS_OP_SUCCESS); aws_checksum_destroy(impl->checksum); aws_input_stream_release(impl->old_stream); aws_byte_buf_clean_up(&impl->checksum_result); aws_mem_release(impl->allocator, impl); } static struct aws_input_stream_vtable s_aws_input_checksum_stream_vtable = { .seek = s_aws_input_checksum_stream_seek, .read = s_aws_input_checksum_stream_read, .get_status = s_aws_input_checksum_stream_get_status, .get_length = s_aws_input_checksum_stream_get_length, }; struct aws_input_stream *aws_checksum_stream_new( struct aws_allocator *allocator, struct aws_input_stream *existing_stream, enum aws_s3_checksum_algorithm algorithm, struct aws_byte_buf *checksum_output) { AWS_PRECONDITION(existing_stream); struct aws_checksum_stream *impl = aws_mem_calloc(allocator, 1, sizeof(struct aws_checksum_stream)); impl->allocator = allocator; impl->base.vtable = &s_aws_input_checksum_stream_vtable; impl->checksum = aws_checksum_new(allocator, algorithm); if (impl->checksum == NULL) { goto on_error; } aws_byte_buf_init(&impl->checksum_result, allocator, impl->checksum->digest_size); impl->old_stream = aws_input_stream_acquire(existing_stream); impl->encoded_checksum_output = checksum_output; aws_ref_count_init( &impl->base.ref_count, impl, (aws_simple_completion_callback *)s_aws_input_checksum_stream_destroy); return &impl->base; on_error: aws_mem_release(impl->allocator, impl); return NULL; } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/source/s3_checksums.c000066400000000000000000000254531456575232400236240ustar00rootroot00000000000000#include "aws/s3/private/s3_checksums.h" #include "aws/s3/private/s3_util.h" #include #include #define AWS_CRC32_LEN 4 #define AWS_CRC32C_LEN 4 size_t aws_get_digest_size_from_algorithm(enum aws_s3_checksum_algorithm algorithm) { switch (algorithm) { case AWS_SCA_CRC32C: return AWS_CRC32C_LEN; case AWS_SCA_CRC32: return AWS_CRC32_LEN; case AWS_SCA_SHA1: return AWS_SHA1_LEN; case AWS_SCA_SHA256: return AWS_SHA256_LEN; default: return 0; } } const struct aws_byte_cursor *aws_get_http_header_name_from_algorithm(enum aws_s3_checksum_algorithm algorithm) { switch (algorithm) { case AWS_SCA_CRC32C: return &g_crc32c_header_name; case AWS_SCA_CRC32: return &g_crc32_header_name; case AWS_SCA_SHA1: return &g_sha1_header_name; case AWS_SCA_SHA256: return &g_sha256_header_name; default: return NULL; } } const struct aws_byte_cursor *aws_get_create_mpu_header_name_from_algorithm(enum aws_s3_checksum_algorithm algorithm) { switch (algorithm) { case AWS_SCA_CRC32C: return &g_crc32c_create_mpu_header_name; case AWS_SCA_CRC32: return &g_crc32_create_mpu_header_name; case AWS_SCA_SHA1: return &g_sha1_create_mpu_header_name; case AWS_SCA_SHA256: return &g_sha256_create_mpu_header_name; default: return NULL; } } const struct aws_byte_cursor *aws_get_complete_mpu_name_from_algorithm(enum aws_s3_checksum_algorithm algorithm) { switch (algorithm) { case AWS_SCA_CRC32C: return &g_crc32c_complete_mpu_name; case AWS_SCA_CRC32: return &g_crc32_complete_mpu_name; case AWS_SCA_SHA1: return &g_sha1_complete_mpu_name; case AWS_SCA_SHA256: return &g_sha256_complete_mpu_name; default: return NULL; } } void s3_hash_destroy(struct aws_s3_checksum *checksum) { struct aws_hash *hash = (struct aws_hash *)checksum->impl; aws_hash_destroy(hash); aws_mem_release(checksum->allocator, checksum); } int s3_hash_update(struct aws_s3_checksum *checksum, const struct aws_byte_cursor *to_checksum) { struct aws_hash *hash = (struct aws_hash *)checksum->impl; return aws_hash_update(hash, to_checksum); } int s3_hash_finalize(struct aws_s3_checksum *checksum, struct aws_byte_buf *output, size_t truncate_to) { struct aws_hash *hash = (struct aws_hash *)checksum->impl; checksum->good = false; return aws_hash_finalize(hash, output, truncate_to); } typedef uint32_t (*crc_fn)(const uint8_t *, int, uint32_t); uint32_t aws_crc32_common(uint32_t previous, const struct aws_byte_cursor *buf, crc_fn checksum_fn) { size_t length = buf->len; uint8_t *buffer = buf->ptr; uint32_t val = previous; while (length > INT_MAX) { val = checksum_fn(buffer, INT_MAX, val); buffer += (size_t)INT_MAX; length -= (size_t)INT_MAX; } return checksum_fn(buffer, (int)length, val); } int aws_crc_finalize(struct aws_s3_checksum *checksum, struct aws_byte_buf *out, size_t truncate_to) { if (!checksum->good) { return aws_raise_error(AWS_ERROR_INVALID_STATE); } checksum->good = false; size_t available_buffer = out->capacity - out->len; size_t len = checksum->digest_size; if (truncate_to && truncate_to < len) { len = truncate_to; } if (available_buffer < len) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } AWS_PRECONDITION(aws_byte_buf_is_valid(out)); uint32_t tmp = aws_hton32(*(uint32_t *)checksum->impl); if (aws_byte_buf_write(out, (uint8_t *)&tmp, len)) { return AWS_OP_SUCCESS; } return aws_raise_error(AWS_ERROR_INVALID_BUFFER_SIZE); } int aws_crc32_checksum_update(struct aws_s3_checksum *checksum, const struct aws_byte_cursor *buf) { if (!checksum->good) { return aws_raise_error(AWS_ERROR_INVALID_STATE); } *(uint32_t *)checksum->impl = aws_crc32_common(*(uint32_t *)checksum->impl, buf, aws_checksums_crc32); return AWS_OP_SUCCESS; } int aws_crc32c_checksum_update(struct aws_s3_checksum *checksum, const struct aws_byte_cursor *buf) { if (!checksum->good) { return aws_raise_error(AWS_ERROR_INVALID_STATE); } *(uint32_t *)checksum->impl = aws_crc32_common(*(uint32_t *)checksum->impl, buf, aws_checksums_crc32c); return AWS_OP_SUCCESS; } void aws_crc_destroy(struct aws_s3_checksum *checksum) { aws_mem_release(checksum->allocator, checksum->impl); aws_mem_release(checksum->allocator, checksum); } static struct aws_checksum_vtable hash_vtable = { .update = s3_hash_update, .finalize = s3_hash_finalize, .destroy = s3_hash_destroy, }; static struct aws_checksum_vtable crc32_vtable = { .update = aws_crc32_checksum_update, .finalize = aws_crc_finalize, .destroy = aws_crc_destroy, }; static struct aws_checksum_vtable crc32c_vtable = { .update = aws_crc32c_checksum_update, .finalize = aws_crc_finalize, .destroy = aws_crc_destroy, }; struct aws_s3_checksum *aws_hash_new(struct aws_allocator *allocator, aws_hash_new_fn hash_fn) { struct aws_s3_checksum *checksum = aws_mem_acquire(allocator, sizeof(struct aws_s3_checksum)); struct aws_hash *hash = hash_fn(allocator); checksum->impl = (void *)hash; checksum->allocator = allocator; checksum->vtable = &hash_vtable; checksum->good = true; checksum->digest_size = hash->digest_size; return checksum; } struct aws_s3_checksum *aws_crc32_checksum_new(struct aws_allocator *allocator) { struct aws_s3_checksum *checksum = aws_mem_acquire(allocator, sizeof(struct aws_s3_checksum)); uint32_t *crc_val = aws_mem_acquire(allocator, sizeof(uint32_t)); *crc_val = 0; checksum->vtable = &crc32_vtable; checksum->allocator = allocator; checksum->impl = crc_val; checksum->good = true; checksum->digest_size = AWS_CRC32_LEN; return checksum; } struct aws_s3_checksum *aws_crc32c_checksum_new(struct aws_allocator *allocator) { struct aws_s3_checksum *checksum = aws_mem_acquire(allocator, sizeof(struct aws_s3_checksum)); uint32_t *crc_val = aws_mem_acquire(allocator, sizeof(uint32_t)); *crc_val = 0; checksum->vtable = &crc32c_vtable; checksum->allocator = allocator; checksum->impl = crc_val; checksum->good = true; checksum->digest_size = AWS_CRC32_LEN; return checksum; } struct aws_s3_checksum *aws_checksum_new(struct aws_allocator *allocator, enum aws_s3_checksum_algorithm algorithm) { struct aws_s3_checksum *checksum = NULL; switch (algorithm) { case AWS_SCA_CRC32C: checksum = aws_crc32c_checksum_new(allocator); break; case AWS_SCA_CRC32: checksum = aws_crc32_checksum_new(allocator); break; case AWS_SCA_SHA1: checksum = aws_hash_new(allocator, aws_sha1_new); break; case AWS_SCA_SHA256: checksum = aws_hash_new(allocator, aws_sha256_new); break; default: return NULL; } checksum->algorithm = algorithm; return checksum; } int aws_checksum_compute_fn( struct aws_allocator *allocator, const struct aws_byte_cursor *input, struct aws_byte_buf *output, struct aws_s3_checksum *(*aws_crc_new)(struct aws_allocator *), size_t truncate_to) { struct aws_s3_checksum *checksum = aws_crc_new(allocator); if (aws_checksum_update(checksum, input)) { aws_checksum_destroy(checksum); return AWS_OP_ERR; } if (aws_checksum_finalize(checksum, output, truncate_to)) { aws_checksum_destroy(checksum); return AWS_OP_ERR; } aws_checksum_destroy(checksum); return AWS_OP_SUCCESS; } void aws_checksum_destroy(struct aws_s3_checksum *checksum) { if (checksum != NULL) { checksum->vtable->destroy(checksum); } } int aws_checksum_update(struct aws_s3_checksum *checksum, const struct aws_byte_cursor *to_checksum) { return checksum->vtable->update(checksum, to_checksum); } int aws_checksum_finalize(struct aws_s3_checksum *checksum, struct aws_byte_buf *output, size_t truncate_to) { return checksum->vtable->finalize(checksum, output, truncate_to); } int aws_checksum_compute( struct aws_allocator *allocator, enum aws_s3_checksum_algorithm algorithm, const struct aws_byte_cursor *input, struct aws_byte_buf *output, size_t truncate_to) { switch (algorithm) { case AWS_SCA_SHA1: return aws_sha1_compute(allocator, input, output, truncate_to); case AWS_SCA_SHA256: return aws_sha256_compute(allocator, input, output, truncate_to); case AWS_SCA_CRC32: return aws_checksum_compute_fn(allocator, input, output, aws_crc32_checksum_new, truncate_to); case AWS_SCA_CRC32C: return aws_checksum_compute_fn(allocator, input, output, aws_crc32c_checksum_new, truncate_to); default: return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } } void checksum_config_init(struct checksum_config *internal_config, const struct aws_s3_checksum_config *config) { AWS_ZERO_STRUCT(*internal_config); if (!config) { return; } internal_config->checksum_algorithm = config->checksum_algorithm; internal_config->location = config->location; internal_config->validate_response_checksum = config->validate_response_checksum; if (config->validate_checksum_algorithms) { const size_t count = aws_array_list_length(config->validate_checksum_algorithms); for (size_t i = 0; i < count; ++i) { enum aws_s3_checksum_algorithm algorithm; aws_array_list_get_at(config->validate_checksum_algorithms, &algorithm, i); switch (algorithm) { case AWS_SCA_CRC32C: internal_config->response_checksum_algorithms.crc32c = true; break; case AWS_SCA_CRC32: internal_config->response_checksum_algorithms.crc32 = true; break; case AWS_SCA_SHA1: internal_config->response_checksum_algorithms.sha1 = true; break; case AWS_SCA_SHA256: internal_config->response_checksum_algorithms.sha256 = true; break; default: break; } } } else if (config->validate_response_checksum) { internal_config->response_checksum_algorithms.crc32 = true; internal_config->response_checksum_algorithms.crc32c = true; internal_config->response_checksum_algorithms.sha1 = true; internal_config->response_checksum_algorithms.sha256 = true; } } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/source/s3_chunk_stream.c000066400000000000000000000260441456575232400243170ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "aws/s3/private/s3_checksums.h" #include #include #include #include AWS_STATIC_STRING_FROM_LITERAL(s_carriage_return, "\r\n"); AWS_STATIC_STRING_FROM_LITERAL(s_empty_chunk, "0\r\n"); AWS_STATIC_STRING_FROM_LITERAL(s_final_chunk, "\r\n0\r\n"); AWS_STATIC_STRING_FROM_LITERAL(s_colon, ":"); AWS_STATIC_STRING_FROM_LITERAL(s_post_trailer, "\r\n\r\n"); struct aws_chunk_stream; typedef int(set_stream_fn)(struct aws_chunk_stream *parent_stream); struct aws_chunk_stream { struct aws_input_stream base; struct aws_allocator *allocator; /* aws_input_stream_byte_cursor provides our actual functionality */ /* Pointing to the stream we read from */ struct aws_input_stream *current_stream; struct aws_input_stream *checksum_stream; struct aws_byte_buf checksum_result; struct aws_byte_buf *checksum_result_output; struct aws_byte_buf pre_chunk_buffer; struct aws_byte_buf post_chunk_buffer; const struct aws_byte_cursor *checksum_header_name; int64_t length; set_stream_fn *set_current_stream_fn; }; static int s_set_null_stream(struct aws_chunk_stream *parent_stream) { aws_input_stream_release(parent_stream->current_stream); parent_stream->current_stream = NULL; parent_stream->set_current_stream_fn = NULL; aws_byte_buf_clean_up(&parent_stream->post_chunk_buffer); return AWS_OP_SUCCESS; } static int s_set_post_chunk_stream(struct aws_chunk_stream *parent_stream) { int64_t current_stream_length; if (aws_input_stream_get_length(parent_stream->current_stream, ¤t_stream_length)) { aws_input_stream_release(parent_stream->current_stream); return AWS_OP_ERR; } aws_input_stream_release(parent_stream->current_stream); struct aws_byte_cursor final_chunk_cursor; if (current_stream_length > 0) { final_chunk_cursor = aws_byte_cursor_from_string(s_final_chunk); } else { final_chunk_cursor = aws_byte_cursor_from_string(s_empty_chunk); } struct aws_byte_cursor post_trailer_cursor = aws_byte_cursor_from_string(s_post_trailer); struct aws_byte_cursor colon_cursor = aws_byte_cursor_from_string(s_colon); if (parent_stream->checksum_result.len == 0) { AWS_LOGF_ERROR(AWS_LS_S3_META_REQUEST, "Failed to extract base64 encoded checksum of stream"); return aws_raise_error(AWS_ERROR_S3_CHECKSUM_CALCULATION_FAILED); } struct aws_byte_cursor checksum_result_cursor = aws_byte_cursor_from_buf(&parent_stream->checksum_result); if (parent_stream->checksum_result_output && aws_byte_buf_init_copy_from_cursor( parent_stream->checksum_result_output, parent_stream->allocator, checksum_result_cursor)) { return AWS_OP_ERR; } if (aws_byte_buf_init( &parent_stream->post_chunk_buffer, parent_stream->allocator, final_chunk_cursor.len + parent_stream->checksum_header_name->len + colon_cursor.len + checksum_result_cursor.len + post_trailer_cursor.len)) { goto error; } if (aws_byte_buf_append(&parent_stream->post_chunk_buffer, &final_chunk_cursor) || aws_byte_buf_append(&parent_stream->post_chunk_buffer, parent_stream->checksum_header_name) || aws_byte_buf_append(&parent_stream->post_chunk_buffer, &colon_cursor) || aws_byte_buf_append(&parent_stream->post_chunk_buffer, &checksum_result_cursor) || aws_byte_buf_append(&parent_stream->post_chunk_buffer, &post_trailer_cursor)) { goto error; } struct aws_byte_cursor post_chunk_cursor = aws_byte_cursor_from_buf(&parent_stream->post_chunk_buffer); parent_stream->current_stream = aws_input_stream_new_from_cursor(parent_stream->allocator, &post_chunk_cursor); parent_stream->set_current_stream_fn = s_set_null_stream; return AWS_OP_SUCCESS; error: aws_byte_buf_clean_up(parent_stream->checksum_result_output); aws_byte_buf_clean_up(&parent_stream->post_chunk_buffer); return AWS_OP_ERR; } static int s_set_chunk_stream(struct aws_chunk_stream *parent_stream) { aws_input_stream_release(parent_stream->current_stream); parent_stream->current_stream = parent_stream->checksum_stream; aws_byte_buf_clean_up(&parent_stream->pre_chunk_buffer); parent_stream->checksum_stream = NULL; parent_stream->set_current_stream_fn = s_set_post_chunk_stream; return AWS_OP_SUCCESS; } static int s_aws_input_chunk_stream_seek( struct aws_input_stream *stream, int64_t offset, enum aws_stream_seek_basis basis) { (void)stream; (void)offset; (void)basis; AWS_LOGF_ERROR( AWS_LS_S3_CLIENT, "Cannot seek on chunk stream, as it will cause the checksum output to mismatch the checksum of the stream" "contents"); AWS_ASSERT(false); return aws_raise_error(AWS_ERROR_UNSUPPORTED_OPERATION); } static int s_aws_input_chunk_stream_read(struct aws_input_stream *stream, struct aws_byte_buf *dest) { struct aws_chunk_stream *impl = AWS_CONTAINER_OF(stream, struct aws_chunk_stream, base); struct aws_stream_status status; AWS_ZERO_STRUCT(status); while (impl->current_stream != NULL && dest->len < dest->capacity) { int err = aws_input_stream_read(impl->current_stream, dest); if (err) { return err; } if (aws_input_stream_get_status(impl->current_stream, &status)) { return AWS_OP_ERR; } if (status.is_end_of_stream && impl->set_current_stream_fn(impl)) { return AWS_OP_ERR; } } return AWS_OP_SUCCESS; } static int s_aws_input_chunk_stream_get_status(struct aws_input_stream *stream, struct aws_stream_status *status) { struct aws_chunk_stream *impl = AWS_CONTAINER_OF(stream, struct aws_chunk_stream, base); if (impl->current_stream == NULL) { status->is_end_of_stream = true; status->is_valid = true; return AWS_OP_SUCCESS; } int res = aws_input_stream_get_status(impl->current_stream, status); if (res != AWS_OP_SUCCESS) { /* Only when the current_stream is NULL, it is end of stream, as the current stream will be updated to feed to * data */ status->is_end_of_stream = false; } return res; } static int s_aws_input_chunk_stream_get_length(struct aws_input_stream *stream, int64_t *out_length) { struct aws_chunk_stream *impl = AWS_CONTAINER_OF(stream, struct aws_chunk_stream, base); *out_length = impl->length; return AWS_OP_SUCCESS; } static void s_aws_input_chunk_stream_destroy(struct aws_chunk_stream *impl) { if (impl) { if (impl->current_stream) { aws_input_stream_release(impl->current_stream); } if (impl->checksum_stream) { aws_input_stream_release(impl->checksum_stream); } aws_byte_buf_clean_up(&impl->pre_chunk_buffer); aws_byte_buf_clean_up(&impl->checksum_result); aws_byte_buf_clean_up(&impl->post_chunk_buffer); aws_mem_release(impl->allocator, impl); } } static struct aws_input_stream_vtable s_aws_input_chunk_stream_vtable = { .seek = s_aws_input_chunk_stream_seek, .read = s_aws_input_chunk_stream_read, .get_status = s_aws_input_chunk_stream_get_status, .get_length = s_aws_input_chunk_stream_get_length, }; struct aws_input_stream *aws_chunk_stream_new( struct aws_allocator *allocator, struct aws_input_stream *existing_stream, enum aws_s3_checksum_algorithm algorithm, struct aws_byte_buf *checksum_output) { struct aws_chunk_stream *impl = aws_mem_calloc(allocator, 1, sizeof(struct aws_chunk_stream)); impl->allocator = allocator; impl->base.vtable = &s_aws_input_chunk_stream_vtable; impl->checksum_result_output = checksum_output; int64_t stream_length = 0; int64_t final_chunk_len = 0; if (aws_input_stream_get_length(existing_stream, &stream_length)) { goto error; } struct aws_byte_cursor pre_chunk_cursor = aws_byte_cursor_from_string(s_carriage_return); char stream_length_string[32]; AWS_ZERO_ARRAY(stream_length_string); snprintf(stream_length_string, AWS_ARRAY_SIZE(stream_length_string), "%" PRIX64, stream_length); struct aws_string *stream_length_aws_string = aws_string_new_from_c_str(allocator, stream_length_string); struct aws_byte_cursor stream_length_cursor = aws_byte_cursor_from_string(stream_length_aws_string); if (aws_byte_buf_init(&impl->pre_chunk_buffer, allocator, stream_length_cursor.len + pre_chunk_cursor.len)) { goto error; } if (aws_byte_buf_append(&impl->pre_chunk_buffer, &stream_length_cursor)) { goto error; } aws_string_destroy(stream_length_aws_string); if (aws_byte_buf_append(&impl->pre_chunk_buffer, &pre_chunk_cursor)) { goto error; } size_t checksum_len = aws_get_digest_size_from_algorithm(algorithm); size_t encoded_checksum_len = 0; if (aws_base64_compute_encoded_len(checksum_len, &encoded_checksum_len)) { goto error; } if (aws_byte_buf_init(&impl->checksum_result, allocator, encoded_checksum_len)) { goto error; } impl->checksum_stream = aws_checksum_stream_new(allocator, existing_stream, algorithm, &impl->checksum_result); if (impl->checksum_stream == NULL) { goto error; } int64_t prechunk_stream_len = 0; int64_t colon_len = s_colon->len; int64_t post_trailer_len = s_post_trailer->len; struct aws_byte_cursor complete_pre_chunk_cursor = aws_byte_cursor_from_buf(&impl->pre_chunk_buffer); if (stream_length > 0) { impl->current_stream = aws_input_stream_new_from_cursor(allocator, &complete_pre_chunk_cursor); final_chunk_len = s_final_chunk->len; if (impl->current_stream == NULL) { goto error; } impl->set_current_stream_fn = s_set_chunk_stream; } else { impl->current_stream = impl->checksum_stream; final_chunk_len = s_empty_chunk->len; impl->checksum_stream = NULL; impl->set_current_stream_fn = s_set_post_chunk_stream; } impl->checksum_header_name = aws_get_http_header_name_from_algorithm(algorithm); if (aws_input_stream_get_length(impl->current_stream, &prechunk_stream_len)) { goto error; } /* we subtract one since aws_base64_compute_encoded_len accounts for the null terminator which won't show up in our * stream */ impl->length = prechunk_stream_len + stream_length + final_chunk_len + impl->checksum_header_name->len + colon_len + encoded_checksum_len + post_trailer_len - 1; AWS_ASSERT(impl->current_stream); aws_ref_count_init(&impl->base.ref_count, impl, (aws_simple_completion_callback *)s_aws_input_chunk_stream_destroy); return &impl->base; error: aws_input_stream_release(impl->checksum_stream); aws_input_stream_release(impl->current_stream); aws_byte_buf_clean_up(&impl->pre_chunk_buffer); aws_byte_buf_clean_up(&impl->checksum_result); aws_mem_release(impl->allocator, impl); return NULL; } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/source/s3_client.c000066400000000000000000003110021456575232400231010ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "aws/s3/private/s3_auto_ranged_get.h" #include "aws/s3/private/s3_auto_ranged_put.h" #include "aws/s3/private/s3_buffer_pool.h" #include "aws/s3/private/s3_client_impl.h" #include "aws/s3/private/s3_copy_object.h" #include "aws/s3/private/s3_default_meta_request.h" #include "aws/s3/private/s3_meta_request_impl.h" #include "aws/s3/private/s3_parallel_input_stream.h" #include "aws/s3/private/s3_request_messages.h" #include "aws/s3/private/s3_util.h" #include "aws/s3/private/s3express_credentials_provider_impl.h" #include "aws/s3/s3express_credentials_provider.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef _MSC_VER # pragma warning(disable : 4232) /* function pointer to dll symbol */ #endif /* _MSC_VER */ struct aws_s3_meta_request_work { struct aws_linked_list_node node; struct aws_s3_meta_request *meta_request; }; static const enum aws_log_level s_log_level_client_stats = AWS_LL_INFO; static const uint32_t s_max_requests_multiplier = 4; /* TODO Provide analysis on origins of this value. */ static const double s_throughput_per_vip_gbps = 4.0; /* Preferred amount of active connections per meta request type. */ const uint32_t g_num_conns_per_vip_meta_request_look_up[AWS_S3_META_REQUEST_TYPE_MAX] = { 10, /* AWS_S3_META_REQUEST_TYPE_DEFAULT */ 10, /* AWS_S3_META_REQUEST_TYPE_GET_OBJECT */ 10, /* AWS_S3_META_REQUEST_TYPE_PUT_OBJECT */ 10 /* AWS_S3_META_REQUEST_TYPE_COPY_OBJECT */ }; /* Should be max of s_num_conns_per_vip_meta_request_look_up */ const uint32_t g_max_num_connections_per_vip = 10; /** * Default part size is 8 MiB to reach the best performance from the experiments we had. * Default max part size is 5GiB as the server limit. Object size limit is 5TiB for now. * max number of upload parts is 10000. * TODO Provide more information on other values. */ static const size_t s_default_part_size = 8 * 1024 * 1024; static const uint64_t s_default_max_part_size = 5368709120ULL; static const double s_default_throughput_target_gbps = 10.0; static const uint32_t s_default_max_retries = 5; static size_t s_dns_host_address_ttl_seconds = 5 * 60; /* Default time until a connection is declared dead, while handling a request but seeing no activity. * 30 seconds mirrors the value currently used by the Java SDK. */ static const uint32_t s_default_throughput_failure_interval_seconds = 30; /* Amount of time spent idling before trimming buffer. */ static const size_t s_buffer_pool_trim_time_offset_in_s = 5; /* Called when ref count is 0. */ static void s_s3_client_start_destroy(void *user_data); /* Called by s_s3_client_process_work_default when all shutdown criteria has been met. */ static void s_s3_client_finish_destroy_default(struct aws_s3_client *client); /* Called when the body streaming elg shutdown has completed. */ static void s_s3_client_body_streaming_elg_shutdown(void *user_data); static void s_s3_client_create_connection_for_request(struct aws_s3_client *client, struct aws_s3_request *request); /* Callback which handles the HTTP connection retrieved by acquire_http_connection. */ static void s_s3_client_on_acquire_http_connection( struct aws_http_connection *http_connection, int error_code, void *user_data); static void s_s3_client_push_meta_request_synced( struct aws_s3_client *client, struct aws_s3_meta_request *meta_request); /* Schedule task for processing work. (Calls the corresponding vtable function.) */ static void s_s3_client_schedule_process_work_synced(struct aws_s3_client *client); /* Default implementation for scheduling processing of work. */ static void s_s3_client_schedule_process_work_synced_default(struct aws_s3_client *client); /* Actual task function that processes work. */ static void s_s3_client_process_work_task(struct aws_task *task, void *arg, enum aws_task_status task_status); static void s_s3_client_process_work_default(struct aws_s3_client *client); static void s_s3_client_endpoint_shutdown_callback(struct aws_s3_client *client); /* Default factory function for creating a meta request. */ static struct aws_s3_meta_request *s_s3_client_meta_request_factory_default( struct aws_s3_client *client, const struct aws_s3_meta_request_options *options); static struct aws_s3_client_vtable s_s3_client_default_vtable = { .meta_request_factory = s_s3_client_meta_request_factory_default, .acquire_http_connection = aws_http_connection_manager_acquire_connection, .get_host_address_count = aws_host_resolver_get_host_address_count, .schedule_process_work_synced = s_s3_client_schedule_process_work_synced_default, .process_work = s_s3_client_process_work_default, .endpoint_shutdown_callback = s_s3_client_endpoint_shutdown_callback, .finish_destroy = s_s3_client_finish_destroy_default, .parallel_input_stream_new_from_file = aws_parallel_input_stream_new_from_file, }; void aws_s3_set_dns_ttl(size_t ttl) { s_dns_host_address_ttl_seconds = ttl; } /* Returns the max number of connections allowed. * * When meta request is NULL, this will return the overall allowed number of connections. * * If meta_request is not NULL, this will give the max number of connections allowed for that meta request type on * that endpoint. */ uint32_t aws_s3_client_get_max_active_connections( struct aws_s3_client *client, struct aws_s3_meta_request *meta_request) { AWS_PRECONDITION(client); uint32_t num_connections_per_vip = g_max_num_connections_per_vip; uint32_t num_vips = client->ideal_vip_count; if (meta_request != NULL) { num_connections_per_vip = g_num_conns_per_vip_meta_request_look_up[meta_request->type]; struct aws_s3_endpoint *endpoint = meta_request->endpoint; AWS_ASSERT(endpoint != NULL); AWS_ASSERT(client->vtable->get_host_address_count); size_t num_known_vips = client->vtable->get_host_address_count( client->client_bootstrap->host_resolver, endpoint->host_name, AWS_GET_HOST_ADDRESS_COUNT_RECORD_TYPE_A); /* If the number of known vips is less than our ideal VIP count, clamp it. */ if (num_known_vips < (size_t)num_vips) { num_vips = (uint32_t)num_known_vips; } } /* We always want to allow for at least one VIP worth of connections. */ if (num_vips == 0) { num_vips = 1; } uint32_t max_active_connections = num_vips * num_connections_per_vip; if (client->max_active_connections_override > 0 && client->max_active_connections_override < max_active_connections) { max_active_connections = client->max_active_connections_override; } return max_active_connections; } /* Returns the max number of requests allowed to be in memory */ uint32_t aws_s3_client_get_max_requests_in_flight(struct aws_s3_client *client) { AWS_PRECONDITION(client); return aws_s3_client_get_max_active_connections(client, NULL) * s_max_requests_multiplier; } /* Returns the max number of requests that should be in preparation stage (ie: reading from a stream, being signed, * etc.) */ uint32_t aws_s3_client_get_max_requests_prepare(struct aws_s3_client *client) { return aws_s3_client_get_max_active_connections(client, NULL); } static uint32_t s_s3_client_get_num_requests_network_io( struct aws_s3_client *client, enum aws_s3_meta_request_type meta_request_type) { AWS_PRECONDITION(client); uint32_t num_requests_network_io = 0; if (meta_request_type == AWS_S3_META_REQUEST_TYPE_MAX) { for (uint32_t i = 0; i < AWS_S3_META_REQUEST_TYPE_MAX; ++i) { num_requests_network_io += (uint32_t)aws_atomic_load_int(&client->stats.num_requests_network_io[i]); } } else { num_requests_network_io = (uint32_t)aws_atomic_load_int(&client->stats.num_requests_network_io[meta_request_type]); } return num_requests_network_io; } void aws_s3_client_lock_synced_data(struct aws_s3_client *client) { aws_mutex_lock(&client->synced_data.lock); } void aws_s3_client_unlock_synced_data(struct aws_s3_client *client) { aws_mutex_unlock(&client->synced_data.lock); } static void s_s3express_provider_finish_destroy(void *user_data) { struct aws_s3_client *client = user_data; AWS_PRECONDITION(client); /* BEGIN CRITICAL SECTION */ { aws_s3_client_lock_synced_data(client); client->synced_data.s3express_provider_active = false; /* Schedule the work task to call s_s3_client_finish_destroy function if * everything cleaning up asynchronously has finished. */ s_s3_client_schedule_process_work_synced(client); aws_s3_client_unlock_synced_data(client); } /* END CRITICAL SECTION */ } struct aws_s3express_credentials_provider *s_s3express_provider_default_factory( struct aws_allocator *allocator, struct aws_s3_client *client, aws_simple_completion_callback on_provider_shutdown_callback, void *shutdown_user_data, void *factory_user_data) { (void)factory_user_data; struct aws_s3express_credentials_provider_default_options options = { .client = client, .shutdown_complete_callback = on_provider_shutdown_callback, .shutdown_user_data = shutdown_user_data, }; struct aws_s3express_credentials_provider *s3express_provider = aws_s3express_credentials_provider_new_default(allocator, &options); return s3express_provider; } struct aws_s3_client *aws_s3_client_new( struct aws_allocator *allocator, const struct aws_s3_client_config *client_config) { AWS_PRECONDITION(allocator); AWS_PRECONDITION(client_config); if (client_config->client_bootstrap == NULL) { AWS_LOGF_ERROR( AWS_LS_S3_CLIENT, "Cannot create client from client_config; client_bootstrap provided in options is invalid."); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } /* Cannot be less than zero. If zero, use default. */ if (client_config->throughput_target_gbps < 0.0) { AWS_LOGF_ERROR( AWS_LS_S3_CLIENT, "Cannot create client from client_config; throughput_target_gbps cannot less than or equal to 0."); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } if (client_config->signing_config == NULL) { AWS_LOGF_ERROR(AWS_LS_S3_CLIENT, "Cannot create client from client_config; signing_config is required."); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } if (client_config->signing_config->credentials == NULL && client_config->signing_config->credentials_provider == NULL) { AWS_LOGF_ERROR( AWS_LS_S3_CLIENT, "Cannot create client from client_config; Invalid signing_config provided, either credentials or " "credentials provider has to be set."); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } if (!client_config->enable_s3express && client_config->signing_config->algorithm == AWS_SIGNING_ALGORITHM_V4_S3EXPRESS) { AWS_LOGF_ERROR( AWS_LS_S3_CLIENT, "Cannot create client from client_config; Client config is set use S3 Express signing, but S3 Express " "support is " "not configured."); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } #ifdef BYO_CRYPTO if (client_config->tls_mode == AWS_MR_TLS_ENABLED && client_config->tls_connection_options == NULL) { AWS_LOGF_ERROR( AWS_LS_S3_CLIENT, "Cannot create client from client_config; when using BYO_CRYPTO, tls_connection_options can not be " "NULL when TLS is enabled."); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } #endif struct aws_s3_client *client = aws_mem_calloc(allocator, 1, sizeof(struct aws_s3_client)); client->allocator = allocator; size_t mem_limit = 0; if (client_config->memory_limit_in_bytes == 0) { #if SIZE_BITS == 32 if (client_config->throughput_target_gbps > 25.0) { mem_limit = GB_TO_BYTES(2); } else { mem_limit = GB_TO_BYTES(1); } #else if (client_config->throughput_target_gbps > 75.0) { mem_limit = GB_TO_BYTES(8); } else if (client_config->throughput_target_gbps > 25.0) { mem_limit = GB_TO_BYTES(4); } else { mem_limit = GB_TO_BYTES(2); } #endif } else { // cap memory limit to SIZE_MAX if (client_config->memory_limit_in_bytes > SIZE_MAX) { mem_limit = SIZE_MAX; } else { mem_limit = (size_t)client_config->memory_limit_in_bytes; } } size_t part_size = s_default_part_size; if (client_config->part_size != 0) { if (client_config->part_size > SIZE_MAX) { part_size = SIZE_MAX; } else { part_size = (size_t)client_config->part_size; } } client->buffer_pool = aws_s3_buffer_pool_new(allocator, part_size, mem_limit); if (client->buffer_pool == NULL) { goto on_early_fail; } struct aws_s3_buffer_pool_usage_stats pool_usage = aws_s3_buffer_pool_get_usage(client->buffer_pool); if (client_config->max_part_size > pool_usage.mem_limit) { AWS_LOGF_ERROR( AWS_LS_S3_CLIENT, "Cannot create client from client_config; configured max part size should not exceed memory limit." "size."); aws_raise_error(AWS_ERROR_S3_INVALID_MEMORY_LIMIT_CONFIG); goto on_early_fail; } client->vtable = &s_s3_client_default_vtable; aws_ref_count_init(&client->ref_count, client, (aws_simple_completion_callback *)s_s3_client_start_destroy); if (aws_mutex_init(&client->synced_data.lock) != AWS_OP_SUCCESS) { goto on_early_fail; } aws_linked_list_init(&client->synced_data.pending_meta_request_work); aws_linked_list_init(&client->synced_data.prepared_requests); aws_linked_list_init(&client->threaded_data.meta_requests); aws_linked_list_init(&client->threaded_data.request_queue); aws_atomic_init_int(&client->stats.num_requests_in_flight, 0); for (uint32_t i = 0; i < (uint32_t)AWS_S3_META_REQUEST_TYPE_MAX; ++i) { aws_atomic_init_int(&client->stats.num_requests_network_io[i], 0); } aws_atomic_init_int(&client->stats.num_requests_stream_queued_waiting, 0); aws_atomic_init_int(&client->stats.num_requests_streaming_response, 0); *((uint32_t *)&client->max_active_connections_override) = client_config->max_active_connections_override; /* Store our client bootstrap. */ client->client_bootstrap = aws_client_bootstrap_acquire(client_config->client_bootstrap); struct aws_event_loop_group *event_loop_group = client_config->client_bootstrap->event_loop_group; aws_event_loop_group_acquire(event_loop_group); client->process_work_event_loop = aws_event_loop_group_get_next_loop(event_loop_group); /* Make a copy of the region string. */ client->region = aws_string_new_from_array(allocator, client_config->region.ptr, client_config->region.len); *((size_t *)&client->part_size) = part_size; if (client_config->max_part_size != 0) { *((uint64_t *)&client->max_part_size) = client_config->max_part_size; } else { *((uint64_t *)&client->max_part_size) = s_default_max_part_size; } if (client_config->max_part_size > pool_usage.mem_limit) { *((uint64_t *)&client->max_part_size) = pool_usage.mem_limit; } if (client->max_part_size > SIZE_MAX) { /* For the 32bit max part size to be SIZE_MAX */ *((uint64_t *)&client->max_part_size) = SIZE_MAX; } if (client_config->multipart_upload_threshold != 0) { *((uint64_t *)&client->multipart_upload_threshold) = client_config->multipart_upload_threshold; } else { *((uint64_t *)&client->multipart_upload_threshold) = part_size > g_s3_min_upload_part_size ? part_size : g_s3_min_upload_part_size; } if (client_config->max_part_size < client_config->part_size) { *((uint64_t *)&client_config->max_part_size) = client_config->part_size; } client->connect_timeout_ms = client_config->connect_timeout_ms; if (client_config->proxy_ev_settings) { client->proxy_ev_settings = aws_mem_calloc(allocator, 1, sizeof(struct proxy_env_var_settings)); *client->proxy_ev_settings = *client_config->proxy_ev_settings; if (client_config->proxy_ev_settings->tls_options) { client->proxy_ev_tls_options = aws_mem_calloc(allocator, 1, sizeof(struct aws_tls_connection_options)); if (aws_tls_connection_options_copy(client->proxy_ev_tls_options, client->proxy_ev_settings->tls_options)) { goto on_error; } client->proxy_ev_settings->tls_options = client->proxy_ev_tls_options; } } if (client_config->tcp_keep_alive_options) { client->tcp_keep_alive_options = aws_mem_calloc(allocator, 1, sizeof(struct aws_s3_tcp_keep_alive_options)); *client->tcp_keep_alive_options = *client_config->tcp_keep_alive_options; } if (client_config->monitoring_options) { client->monitoring_options = *client_config->monitoring_options; } else { client->monitoring_options.minimum_throughput_bytes_per_second = 1; client->monitoring_options.allowable_throughput_failure_interval_seconds = s_default_throughput_failure_interval_seconds; } if (client_config->tls_mode == AWS_MR_TLS_ENABLED) { client->tls_connection_options = aws_mem_calloc(client->allocator, 1, sizeof(struct aws_tls_connection_options)); if (client_config->tls_connection_options != NULL) { aws_tls_connection_options_copy(client->tls_connection_options, client_config->tls_connection_options); } else { #ifdef BYO_CRYPTO AWS_FATAL_ASSERT(false); goto on_error; #else struct aws_tls_ctx_options default_tls_ctx_options; AWS_ZERO_STRUCT(default_tls_ctx_options); aws_tls_ctx_options_init_default_client(&default_tls_ctx_options, allocator); struct aws_tls_ctx *default_tls_ctx = aws_tls_client_ctx_new(allocator, &default_tls_ctx_options); if (default_tls_ctx == NULL) { goto on_error; } aws_tls_connection_options_init_from_ctx(client->tls_connection_options, default_tls_ctx); aws_tls_ctx_release(default_tls_ctx); aws_tls_ctx_options_clean_up(&default_tls_ctx_options); #endif } } if (client_config->proxy_options) { client->proxy_config = aws_http_proxy_config_new_from_proxy_options_with_tls_info( allocator, client_config->proxy_options, client_config->tls_mode == AWS_MR_TLS_ENABLED); if (client->proxy_config == NULL) { goto on_error; } } /* Set up body streaming ELG */ { uint16_t num_event_loops = (uint16_t)aws_array_list_length(&client->client_bootstrap->event_loop_group->event_loops); uint16_t num_streaming_threads = num_event_loops; if (num_streaming_threads < 1) { num_streaming_threads = 1; } struct aws_shutdown_callback_options body_streaming_elg_shutdown_options = { .shutdown_callback_fn = s_s3_client_body_streaming_elg_shutdown, .shutdown_callback_user_data = client, }; client->body_streaming_elg = aws_event_loop_group_new_default( client->allocator, num_streaming_threads, &body_streaming_elg_shutdown_options); if (!client->body_streaming_elg) { /* Fail to create elg, we should fail the call */ goto on_error; } client->synced_data.body_streaming_elg_allocated = true; } /* Setup cannot fail after this point. */ if (client_config->throughput_target_gbps != 0.0) { *((double *)&client->throughput_target_gbps) = client_config->throughput_target_gbps; } else { *((double *)&client->throughput_target_gbps) = s_default_throughput_target_gbps; } *((enum aws_s3_meta_request_compute_content_md5 *)&client->compute_content_md5) = client_config->compute_content_md5; /* Determine how many vips are ideal by dividing target-throughput by throughput-per-vip. */ { double ideal_vip_count_double = client->throughput_target_gbps / s_throughput_per_vip_gbps; *((uint32_t *)&client->ideal_vip_count) = (uint32_t)ceil(ideal_vip_count_double); } client->cached_signing_config = aws_cached_signing_config_new(client, client_config->signing_config); if (client_config->enable_s3express) { if (client_config->s3express_provider_override_factory) { client->s3express_provider_factory = client_config->s3express_provider_override_factory; client->factory_user_data = client_config->factory_user_data; } else { client->s3express_provider_factory = s_s3express_provider_default_factory; } } client->synced_data.active = true; if (client_config->retry_strategy != NULL) { aws_retry_strategy_acquire(client_config->retry_strategy); client->retry_strategy = client_config->retry_strategy; } else { struct aws_exponential_backoff_retry_options backoff_retry_options = { .el_group = client_config->client_bootstrap->event_loop_group, .max_retries = s_default_max_retries, }; struct aws_standard_retry_options retry_options = { .backoff_retry_options = backoff_retry_options, }; client->retry_strategy = aws_retry_strategy_new_standard(allocator, &retry_options); } aws_hash_table_init( &client->synced_data.endpoints, client->allocator, 10, aws_hash_string, aws_hash_callback_string_eq, aws_hash_callback_string_destroy, NULL); /* Initialize shutdown options and tracking. */ client->shutdown_callback = client_config->shutdown_callback; client->shutdown_callback_user_data = client_config->shutdown_callback_user_data; *((bool *)&client->enable_read_backpressure) = client_config->enable_read_backpressure; *((size_t *)&client->initial_read_window) = client_config->initial_read_window; return client; on_error: aws_string_destroy(client->region); if (client->tls_connection_options) { aws_tls_connection_options_clean_up(client->tls_connection_options); aws_mem_release(client->allocator, client->tls_connection_options); client->tls_connection_options = NULL; } if (client->proxy_config) { aws_http_proxy_config_destroy(client->proxy_config); } if (client->proxy_ev_tls_options) { aws_tls_connection_options_clean_up(client->proxy_ev_tls_options); aws_mem_release(client->allocator, client->proxy_ev_tls_options); client->proxy_ev_settings->tls_options = NULL; } aws_mem_release(client->allocator, client->proxy_ev_settings); aws_mem_release(client->allocator, client->tcp_keep_alive_options); aws_event_loop_group_release(client->client_bootstrap->event_loop_group); aws_client_bootstrap_release(client->client_bootstrap); aws_mutex_clean_up(&client->synced_data.lock); on_early_fail: aws_mem_release(client->allocator, client); return NULL; } struct aws_s3_client *aws_s3_client_acquire(struct aws_s3_client *client) { AWS_PRECONDITION(client); aws_ref_count_acquire(&client->ref_count); return client; } struct aws_s3_client *aws_s3_client_release(struct aws_s3_client *client) { if (client != NULL) { aws_ref_count_release(&client->ref_count); } return NULL; } static void s_s3_client_start_destroy(void *user_data) { struct aws_s3_client *client = user_data; AWS_PRECONDITION(client); AWS_LOGF_DEBUG(AWS_LS_S3_CLIENT, "id=%p Client starting destruction.", (void *)client); struct aws_linked_list local_vip_list; aws_linked_list_init(&local_vip_list); /* BEGIN CRITICAL SECTION */ { aws_s3_client_lock_synced_data(client); client->synced_data.active = false; /* Prevent the client from cleaning up in between the mutex unlock/re-lock below.*/ client->synced_data.start_destroy_executing = true; aws_s3_client_unlock_synced_data(client); } /* END CRITICAL SECTION */ aws_event_loop_group_release(client->body_streaming_elg); client->body_streaming_elg = NULL; aws_s3express_credentials_provider_release(client->s3express_provider); /* BEGIN CRITICAL SECTION */ { aws_s3_client_lock_synced_data(client); client->synced_data.start_destroy_executing = false; /* Schedule the work task to clean up outstanding connections and to call s_s3_client_finish_destroy function if * everything cleaning up asynchronously has finished. */ s_s3_client_schedule_process_work_synced(client); aws_s3_client_unlock_synced_data(client); } /* END CRITICAL SECTION */ } static void s_s3_client_finish_destroy_default(struct aws_s3_client *client) { AWS_PRECONDITION(client); AWS_LOGF_DEBUG(AWS_LS_S3_CLIENT, "id=%p Client finishing destruction.", (void *)client); if (client->threaded_data.trim_buffer_pool_task_scheduled) { aws_event_loop_cancel_task(client->process_work_event_loop, &client->synced_data.trim_buffer_pool_task); } aws_string_destroy(client->region); client->region = NULL; if (client->tls_connection_options) { aws_tls_connection_options_clean_up(client->tls_connection_options); aws_mem_release(client->allocator, client->tls_connection_options); client->tls_connection_options = NULL; } if (client->proxy_config) { aws_http_proxy_config_destroy(client->proxy_config); } if (client->proxy_ev_tls_options) { aws_tls_connection_options_clean_up(client->proxy_ev_tls_options); aws_mem_release(client->allocator, client->proxy_ev_tls_options); client->proxy_ev_settings->tls_options = NULL; } aws_mem_release(client->allocator, client->proxy_ev_settings); aws_mem_release(client->allocator, client->tcp_keep_alive_options); aws_mutex_clean_up(&client->synced_data.lock); AWS_ASSERT(aws_linked_list_empty(&client->synced_data.pending_meta_request_work)); AWS_ASSERT(aws_linked_list_empty(&client->threaded_data.meta_requests)); aws_hash_table_clean_up(&client->synced_data.endpoints); aws_retry_strategy_release(client->retry_strategy); aws_event_loop_group_release(client->client_bootstrap->event_loop_group); aws_client_bootstrap_release(client->client_bootstrap); aws_cached_signing_config_destroy(client->cached_signing_config); aws_s3_client_shutdown_complete_callback_fn *shutdown_callback = client->shutdown_callback; void *shutdown_user_data = client->shutdown_callback_user_data; aws_s3_buffer_pool_destroy(client->buffer_pool); aws_mem_release(client->allocator, client); client = NULL; if (shutdown_callback != NULL) { shutdown_callback(shutdown_user_data); } } static void s_s3_client_body_streaming_elg_shutdown(void *user_data) { struct aws_s3_client *client = user_data; AWS_PRECONDITION(client); AWS_LOGF_DEBUG(AWS_LS_S3_CLIENT, "id=%p Client body streaming ELG shutdown.", (void *)client); /* BEGIN CRITICAL SECTION */ { aws_s3_client_lock_synced_data(client); client->synced_data.body_streaming_elg_allocated = false; s_s3_client_schedule_process_work_synced(client); aws_s3_client_unlock_synced_data(client); } /* END CRITICAL SECTION */ } uint32_t aws_s3_client_queue_requests_threaded( struct aws_s3_client *client, struct aws_linked_list *request_list, bool queue_front) { AWS_PRECONDITION(client); AWS_PRECONDITION(request_list); if (aws_linked_list_empty(request_list)) { return 0; } uint32_t request_list_size = 0; for (struct aws_linked_list_node *node = aws_linked_list_begin(request_list); node != aws_linked_list_end(request_list); node = aws_linked_list_next(node)) { ++request_list_size; } if (queue_front) { aws_linked_list_move_all_front(&client->threaded_data.request_queue, request_list); } else { aws_linked_list_move_all_back(&client->threaded_data.request_queue, request_list); } client->threaded_data.request_queue_size += request_list_size; return request_list_size; } struct aws_s3_request *aws_s3_client_dequeue_request_threaded(struct aws_s3_client *client) { AWS_PRECONDITION(client); if (aws_linked_list_empty(&client->threaded_data.request_queue)) { return NULL; } struct aws_linked_list_node *request_node = aws_linked_list_pop_front(&client->threaded_data.request_queue); struct aws_s3_request *request = AWS_CONTAINER_OF(request_node, struct aws_s3_request, node); --client->threaded_data.request_queue_size; return request; } /* * There is currently some overlap between user provided Host header and endpoint * override. This function handles the corner cases for when either or both are provided. */ int s_apply_endpoint_override( const struct aws_s3_client *client, struct aws_http_headers *message_headers, const struct aws_uri *endpoint) { AWS_PRECONDITION(message_headers); const struct aws_byte_cursor *endpoint_authority = endpoint == NULL ? NULL : aws_uri_authority(endpoint); if (!aws_http_headers_has(message_headers, g_host_header_name)) { if (endpoint_authority == NULL) { AWS_LOGF_ERROR( AWS_LS_S3_CLIENT, "id=%p Cannot create meta s3 request; message provided in options does not have either 'Host' header " "set or endpoint override.", (void *)client); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } if (aws_http_headers_set(message_headers, g_host_header_name, *endpoint_authority)) { AWS_LOGF_ERROR( AWS_LS_S3_CLIENT, "id=%p Cannot create meta s3 request; failed to set 'Host' header based on endpoint override.", (void *)client); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } } struct aws_byte_cursor host_value; AWS_FATAL_ASSERT(aws_http_headers_get(message_headers, g_host_header_name, &host_value) == AWS_OP_SUCCESS); if (endpoint_authority != NULL && !aws_byte_cursor_eq(&host_value, endpoint_authority)) { AWS_LOGF_ERROR( AWS_LS_S3_CLIENT, "id=%p Cannot create meta s3 request; host header value " PRInSTR " does not match endpoint override " PRInSTR, (void *)client, AWS_BYTE_CURSOR_PRI(host_value), AWS_BYTE_CURSOR_PRI(*endpoint_authority)); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } return AWS_OP_SUCCESS; } /* Public facing make-meta-request function. */ struct aws_s3_meta_request *aws_s3_client_make_meta_request( struct aws_s3_client *client, const struct aws_s3_meta_request_options *options) { AWS_LOGF_INFO(AWS_LS_S3_CLIENT, "id=%p Initiating making of meta request", (void *)client); AWS_PRECONDITION(client); AWS_PRECONDITION(client->vtable); AWS_PRECONDITION(client->vtable->meta_request_factory); AWS_PRECONDITION(options); bool use_s3express_signing = false; if (options->signing_config != NULL) { use_s3express_signing = options->signing_config->algorithm == AWS_SIGNING_ALGORITHM_V4_S3EXPRESS; } else if (client->cached_signing_config) { use_s3express_signing = client->cached_signing_config->config.algorithm == AWS_SIGNING_ALGORITHM_V4_S3EXPRESS; } if (options->type >= AWS_S3_META_REQUEST_TYPE_MAX) { AWS_LOGF_ERROR( AWS_LS_S3_CLIENT, "id=%p Cannot create meta s3 request; invalid meta request type specified.", (void *)client); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } if (options->message == NULL) { AWS_LOGF_ERROR( AWS_LS_S3_CLIENT, "id=%p Cannot create meta s3 request; message provided in options is invalid.", (void *)client); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } if (use_s3express_signing && client->s3express_provider_factory == NULL) { AWS_LOGF_ERROR( AWS_LS_S3_CLIENT, "id=%p Cannot create meta s3 request; client doesn't support S3 Express signing.", (void *)client); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } struct aws_http_headers *message_headers = aws_http_message_get_headers(options->message); if (message_headers == NULL) { AWS_LOGF_ERROR( AWS_LS_S3_CLIENT, "id=%p Cannot create meta s3 request; message provided in options does not contain headers.", (void *)client); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } if (options->checksum_config) { if (options->checksum_config->location == AWS_SCL_TRAILER) { struct aws_http_headers *headers = aws_http_message_get_headers(options->message); struct aws_byte_cursor existing_encoding; AWS_ZERO_STRUCT(existing_encoding); if (aws_http_headers_get(headers, g_content_encoding_header_name, &existing_encoding) == AWS_OP_SUCCESS) { if (aws_byte_cursor_find_exact(&existing_encoding, &g_content_encoding_header_aws_chunked, NULL) == AWS_OP_SUCCESS) { AWS_LOGF_ERROR( AWS_LS_S3_CLIENT, "id=%p Cannot create meta s3 request; for trailer checksum, the original request cannot be " "aws-chunked encoding. The client will encode the request instead.", (void *)client); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } } } if (options->checksum_config->location == AWS_SCL_HEADER) { /* TODO: support calculate checksum to add to header */ aws_raise_error(AWS_ERROR_UNSUPPORTED_OPERATION); return NULL; } if (options->checksum_config->location != AWS_SCL_NONE && options->checksum_config->checksum_algorithm == AWS_SCA_NONE) { AWS_LOGF_ERROR( AWS_LS_S3_CLIENT, "id=%p Cannot create meta s3 request; checksum location is set, but no checksum algorithm selected.", (void *)client); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } if (options->checksum_config->checksum_algorithm != AWS_SCA_NONE && options->checksum_config->location == AWS_SCL_NONE) { AWS_LOGF_ERROR( AWS_LS_S3_CLIENT, "id=%p Cannot create meta s3 request; checksum algorithm is set, but no checksum location selected.", (void *)client); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } } if (s_apply_endpoint_override(client, message_headers, options->endpoint)) { return NULL; } struct aws_byte_cursor host_header_value; /* The Host header must be set from s_apply_endpoint_override, if not errored out */ AWS_FATAL_ASSERT(aws_http_headers_get(message_headers, g_host_header_name, &host_header_value) == AWS_OP_SUCCESS); bool is_https = true; uint32_t port = 0; if (options->endpoint != NULL) { struct aws_byte_cursor https_scheme = aws_byte_cursor_from_c_str("https"); struct aws_byte_cursor http_scheme = aws_byte_cursor_from_c_str("http"); const struct aws_byte_cursor *scheme = aws_uri_scheme(options->endpoint); is_https = aws_byte_cursor_eq_ignore_case(scheme, &https_scheme); if (!is_https && !aws_byte_cursor_eq_ignore_case(scheme, &http_scheme)) { AWS_LOGF_ERROR( AWS_LS_S3_CLIENT, "id=%p Cannot create meta s3 request; unexpected scheme '" PRInSTR "' in endpoint override.", (void *)client, AWS_BYTE_CURSOR_PRI(*scheme)); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } port = aws_uri_port(options->endpoint); } struct aws_s3_meta_request *meta_request = client->vtable->meta_request_factory(client, options); if (meta_request == NULL) { AWS_LOGF_ERROR(AWS_LS_S3_CLIENT, "id=%p: Could not create new meta request.", (void *)client); return NULL; } bool error_occurred = false; /* BEGIN CRITICAL SECTION */ { aws_s3_client_lock_synced_data(client); if (use_s3express_signing && !client->synced_data.s3express_provider_active) { AWS_LOGF_TRACE(AWS_LS_S3_CLIENT, "id=%p Create S3 Express provider for the client.", (void *)client); /** * Invoke the factory within the lock. We WARNED people uses their own factory to not use ANY client related * api during the factory. * * We cannot just release the lock and invoke the factory, because it can lead to the other request assume * the provider is active, and not waiting for the provider to be created. And lead to unexpected behavior. */ client->s3express_provider = client->s3express_provider_factory( client->allocator, client, s_s3express_provider_finish_destroy, client, client->factory_user_data); /* Provider is related to client, we don't need to clean it up if meta request failed. But, if provider * failed to be created, let's bail out earlier. */ if (!client->s3express_provider) { AWS_LOGF_ERROR( AWS_LS_S3_CLIENT, "id=%p Failed to create S3 Express provider for client due to error %d (%s)", (void *)client, aws_last_error_or_unknown(), aws_error_str(aws_last_error_or_unknown())); error_occurred = true; goto unlock; } client->synced_data.s3express_provider_active = true; } struct aws_string *endpoint_host_name = NULL; if (options->endpoint != NULL) { endpoint_host_name = aws_string_new_from_cursor(client->allocator, aws_uri_host_name(options->endpoint)); } else { struct aws_uri host_uri; if (aws_uri_init_parse(&host_uri, client->allocator, &host_header_value)) { error_occurred = true; goto unlock; } endpoint_host_name = aws_string_new_from_cursor(client->allocator, aws_uri_host_name(&host_uri)); aws_uri_clean_up(&host_uri); } struct aws_s3_endpoint *endpoint = NULL; struct aws_hash_element *endpoint_hash_element = NULL; if (use_s3express_signing) { meta_request->s3express_session_host = aws_string_new_from_string(client->allocator, endpoint_host_name); } int was_created = 0; if (aws_hash_table_create( &client->synced_data.endpoints, endpoint_host_name, &endpoint_hash_element, &was_created)) { aws_string_destroy(endpoint_host_name); error_occurred = true; goto unlock; } if (was_created) { struct aws_s3_endpoint_options endpoint_options = { .host_name = endpoint_host_name, .client_bootstrap = client->client_bootstrap, .tls_connection_options = is_https ? client->tls_connection_options : NULL, .dns_host_address_ttl_seconds = s_dns_host_address_ttl_seconds, .client = client, .max_connections = aws_s3_client_get_max_active_connections(client, NULL), .port = port, .proxy_config = client->proxy_config, .proxy_ev_settings = client->proxy_ev_settings, .connect_timeout_ms = client->connect_timeout_ms, .tcp_keep_alive_options = client->tcp_keep_alive_options, .monitoring_options = &client->monitoring_options, }; endpoint = aws_s3_endpoint_new(client->allocator, &endpoint_options); if (endpoint == NULL) { aws_hash_table_remove(&client->synced_data.endpoints, endpoint_host_name, NULL, NULL); aws_string_destroy(endpoint_host_name); error_occurred = true; goto unlock; } endpoint_hash_element->value = endpoint; ++client->synced_data.num_endpoints_allocated; } else { endpoint = endpoint_hash_element->value; aws_s3_endpoint_acquire(endpoint, true /*already_holding_lock*/); aws_string_destroy(endpoint_host_name); endpoint_host_name = NULL; } meta_request->endpoint = endpoint; s_s3_client_push_meta_request_synced(client, meta_request); s_s3_client_schedule_process_work_synced(client); unlock: aws_s3_client_unlock_synced_data(client); } /* END CRITICAL SECTION */ if (error_occurred) { AWS_LOGF_ERROR( AWS_LS_S3_CLIENT, "id=%p Could not create meta request due to error %d (%s)", (void *)client, aws_last_error(), aws_error_str(aws_last_error())); meta_request = aws_s3_meta_request_release(meta_request); } else { AWS_LOGF_INFO(AWS_LS_S3_CLIENT, "id=%p: Created meta request %p", (void *)client, (void *)meta_request); } return meta_request; } static void s_s3_client_endpoint_shutdown_callback(struct aws_s3_client *client) { AWS_PRECONDITION(client); /* BEGIN CRITICAL SECTION */ { aws_s3_client_lock_synced_data(client); --client->synced_data.num_endpoints_allocated; s_s3_client_schedule_process_work_synced(client); aws_s3_client_unlock_synced_data(client); } /* END CRITICAL SECTION */ } static struct aws_s3_meta_request *s_s3_client_meta_request_factory_default( struct aws_s3_client *client, const struct aws_s3_meta_request_options *options) { AWS_PRECONDITION(client); AWS_PRECONDITION(options); const struct aws_http_headers *initial_message_headers = aws_http_message_get_headers(options->message); AWS_ASSERT(initial_message_headers); uint64_t content_length = 0; struct aws_byte_cursor content_length_cursor; bool content_length_found = false; if (!aws_http_headers_get(initial_message_headers, g_content_length_header_name, &content_length_cursor)) { if (aws_byte_cursor_utf8_parse_u64(content_length_cursor, &content_length)) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "Could not parse Content-Length header. header value is:" PRInSTR "", AWS_BYTE_CURSOR_PRI(content_length_cursor)); aws_raise_error(AWS_ERROR_S3_INVALID_CONTENT_LENGTH_HEADER); return NULL; } content_length_found = true; } /* There are multiple ways to pass the body in, ensure only 1 was used */ int body_source_count = 0; if (aws_http_message_get_body_stream(options->message) != NULL) { ++body_source_count; } if (options->send_filepath.len > 0) { ++body_source_count; } if (options->send_async_stream != NULL) { ++body_source_count; } if (body_source_count > 1) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "Could not create auto-ranged-put meta request." " More than one data source is set (filepath, async stream, body stream)."); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } size_t part_size = client->part_size; if (options->part_size != 0) { if (options->part_size > SIZE_MAX) { part_size = SIZE_MAX; } else { part_size = (size_t)options->part_size; } } /* Call the appropriate meta-request new function. */ switch (options->type) { case AWS_S3_META_REQUEST_TYPE_GET_OBJECT: { struct aws_byte_cursor path_and_query; if (aws_http_message_get_request_path(options->message, &path_and_query) == AWS_OP_SUCCESS) { /* If the initial request already has partNumber, the request is not * splittable(?). Treat it as a Default request. * TODO: Still need tests to verify that the request of a part is * splittable or not */ struct aws_byte_cursor sub_string; AWS_ZERO_STRUCT(sub_string); /* The first split on '?' for path and query is path, the second is query */ if (aws_byte_cursor_next_split(&path_and_query, '?', &sub_string) == true) { aws_byte_cursor_next_split(&path_and_query, '?', &sub_string); struct aws_uri_param param; AWS_ZERO_STRUCT(param); struct aws_byte_cursor part_number_query_str = aws_byte_cursor_from_c_str("partNumber"); while (aws_query_string_next_param(sub_string, ¶m)) { if (aws_byte_cursor_eq(¶m.key, &part_number_query_str)) { return aws_s3_meta_request_default_new( client->allocator, client, AWS_S3_REQUEST_TYPE_GET_OBJECT, content_length, false /*should_compute_content_md5*/, options); } } } } return aws_s3_meta_request_auto_ranged_get_new(client->allocator, client, part_size, options); } case AWS_S3_META_REQUEST_TYPE_PUT_OBJECT: { if (body_source_count == 0) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "Could not create auto-ranged-put meta request." " Body must be set via filepath, async stream, or body stream."); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } if (options->resume_token == NULL) { uint64_t client_max_part_size = client->max_part_size; if (part_size < g_s3_min_upload_part_size) { AWS_LOGF_WARN( AWS_LS_S3_META_REQUEST, "Config part size of %" PRIu64 " is less than the minimum upload part size of %" PRIu64 ". Using to the minimum part-size for upload.", (uint64_t)part_size, (uint64_t)g_s3_min_upload_part_size); part_size = g_s3_min_upload_part_size; } if (client_max_part_size < (uint64_t)g_s3_min_upload_part_size) { AWS_LOGF_WARN( AWS_LS_S3_META_REQUEST, "Client config max part size of %" PRIu64 " is less than the minimum upload part size of %" PRIu64 ". Clamping to the minimum part-size for upload.", (uint64_t)client_max_part_size, (uint64_t)g_s3_min_upload_part_size); client_max_part_size = (uint64_t)g_s3_min_upload_part_size; } uint32_t num_parts = 0; if (content_length_found) { size_t out_part_size = 0; if (aws_s3_calculate_optimal_mpu_part_size_and_num_parts( content_length, part_size, client_max_part_size, &out_part_size, &num_parts)) { return NULL; } part_size = out_part_size; } if (part_size != options->part_size && part_size != client->part_size) { AWS_LOGF_DEBUG( AWS_LS_S3_META_REQUEST, "The multipart upload part size has been adjusted to %" PRIu64 "", (uint64_t)part_size); } /* Default to client level setting */ uint64_t multipart_upload_threshold = client->multipart_upload_threshold; if (options->multipart_upload_threshold != 0) { /* If the threshold is set for the meta request, use it */ multipart_upload_threshold = options->multipart_upload_threshold; } else if (options->part_size != 0) { /* If the threshold is not set, but the part size is set for the meta request, use it */ multipart_upload_threshold = part_size; } if (content_length_found && content_length <= multipart_upload_threshold) { return aws_s3_meta_request_default_new( client->allocator, client, AWS_S3_REQUEST_TYPE_PUT_OBJECT, content_length, client->compute_content_md5 == AWS_MR_CONTENT_MD5_ENABLED && !aws_http_headers_has(initial_message_headers, g_content_md5_header_name), options); } else { if (aws_s3_message_util_check_checksum_header(options->message)) { /* The checksum header has been set and the request will be split. We fail the request */ AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "Could not create auto-ranged-put meta request; checksum headers has been set for " "auto-ranged-put that will be split. Pre-calculated checksums are only supported for " "single part upload."); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } } return aws_s3_meta_request_auto_ranged_put_new( client->allocator, client, part_size, content_length_found, content_length, num_parts, options); } else { /* else using resume token */ if (!content_length_found) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "Could not create auto-ranged-put resume meta request; content_length must be specified."); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } /* don't pass part size and total num parts. constructor will pick it up from token */ return aws_s3_meta_request_auto_ranged_put_new( client->allocator, client, 0, true, content_length, 0, options); } } case AWS_S3_META_REQUEST_TYPE_COPY_OBJECT: { return aws_s3_meta_request_copy_object_new(client->allocator, client, options); } case AWS_S3_META_REQUEST_TYPE_DEFAULT: return aws_s3_meta_request_default_new( client->allocator, client, AWS_S3_REQUEST_TYPE_UNKNOWN, content_length, false /*should_compute_content_md5*/, options); default: AWS_FATAL_ASSERT(false); } return NULL; } static void s_s3_client_push_meta_request_synced( struct aws_s3_client *client, struct aws_s3_meta_request *meta_request) { AWS_PRECONDITION(client); AWS_PRECONDITION(meta_request); ASSERT_SYNCED_DATA_LOCK_HELD(client); struct aws_s3_meta_request_work *meta_request_work = aws_mem_calloc(client->allocator, 1, sizeof(struct aws_s3_meta_request_work)); meta_request_work->meta_request = aws_s3_meta_request_acquire(meta_request); aws_linked_list_push_back(&client->synced_data.pending_meta_request_work, &meta_request_work->node); } static void s_s3_client_schedule_process_work_synced(struct aws_s3_client *client) { AWS_PRECONDITION(client); AWS_PRECONDITION(client->vtable); AWS_PRECONDITION(client->vtable->schedule_process_work_synced); ASSERT_SYNCED_DATA_LOCK_HELD(client); client->vtable->schedule_process_work_synced(client); } static void s_s3_client_schedule_process_work_synced_default(struct aws_s3_client *client) { ASSERT_SYNCED_DATA_LOCK_HELD(client); if (client->synced_data.process_work_task_scheduled) { return; } aws_task_init( &client->synced_data.process_work_task, s_s3_client_process_work_task, client, "s3_client_process_work_task"); aws_event_loop_schedule_task_now(client->process_work_event_loop, &client->synced_data.process_work_task); client->synced_data.process_work_task_scheduled = true; } /* Task function for trying to find a request that can be processed. */ static void s_s3_client_trim_buffer_pool_task(struct aws_task *task, void *arg, enum aws_task_status task_status) { AWS_PRECONDITION(task); (void)task; (void)task_status; if (task_status != AWS_TASK_STATUS_RUN_READY) { return; } struct aws_s3_client *client = arg; AWS_PRECONDITION(client); client->threaded_data.trim_buffer_pool_task_scheduled = false; uint32_t num_reqs_in_flight = (uint32_t)aws_atomic_load_int(&client->stats.num_requests_in_flight); if (num_reqs_in_flight == 0) { aws_s3_buffer_pool_trim(client->buffer_pool); } } static void s_s3_client_schedule_buffer_pool_trim_synced(struct aws_s3_client *client) { ASSERT_SYNCED_DATA_LOCK_HELD(client); if (client->threaded_data.trim_buffer_pool_task_scheduled) { return; } uint32_t num_reqs_in_flight = (uint32_t)aws_atomic_load_int(&client->stats.num_requests_in_flight); if (num_reqs_in_flight > 0) { return; } aws_task_init( &client->synced_data.trim_buffer_pool_task, s_s3_client_trim_buffer_pool_task, client, "s3_client_buffer_pool_trim_task"); uint64_t trim_time = 0; aws_event_loop_current_clock_time(client->process_work_event_loop, &trim_time); trim_time += aws_timestamp_convert(s_buffer_pool_trim_time_offset_in_s, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL); aws_event_loop_schedule_task_future( client->process_work_event_loop, &client->synced_data.trim_buffer_pool_task, trim_time); client->threaded_data.trim_buffer_pool_task_scheduled = true; } void aws_s3_client_schedule_process_work(struct aws_s3_client *client) { AWS_PRECONDITION(client); /* BEGIN CRITICAL SECTION */ { aws_s3_client_lock_synced_data(client); s_s3_client_schedule_process_work_synced(client); aws_s3_client_unlock_synced_data(client); } /* END CRITICAL SECTION */ } static void s_s3_client_remove_meta_request_threaded( struct aws_s3_client *client, struct aws_s3_meta_request *meta_request) { AWS_PRECONDITION(client); AWS_PRECONDITION(meta_request); (void)client; aws_linked_list_remove(&meta_request->client_process_work_threaded_data.node); meta_request->client_process_work_threaded_data.scheduled = false; aws_s3_meta_request_release(meta_request); } /* Task function for trying to find a request that can be processed. */ static void s_s3_client_process_work_task(struct aws_task *task, void *arg, enum aws_task_status task_status) { AWS_PRECONDITION(task); (void)task; (void)task_status; /* Client keeps a reference to the event loop group; a 'canceled' status should not happen.*/ AWS_ASSERT(task_status == AWS_TASK_STATUS_RUN_READY); struct aws_s3_client *client = arg; AWS_PRECONDITION(client); AWS_PRECONDITION(client->vtable); AWS_PRECONDITION(client->vtable->process_work); client->vtable->process_work(client); } static void s_s3_client_process_work_default(struct aws_s3_client *client) { AWS_PRECONDITION(client); AWS_PRECONDITION(client->vtable); AWS_PRECONDITION(client->vtable->finish_destroy); struct aws_linked_list meta_request_work_list; aws_linked_list_init(&meta_request_work_list); /*******************/ /* Step 1: Move relevant data into thread local memory. */ /*******************/ AWS_LOGF_DEBUG( AWS_LS_S3_CLIENT, "id=%p s_s3_client_process_work_default - Moving relevant synced_data into threaded_data.", (void *)client); /* BEGIN CRITICAL SECTION */ aws_s3_client_lock_synced_data(client); /* Once we exit this mutex, someone can reschedule this task. */ client->synced_data.process_work_task_scheduled = false; client->synced_data.process_work_task_in_progress = true; if (client->synced_data.active) { s_s3_client_schedule_buffer_pool_trim_synced(client); } aws_linked_list_swap_contents(&meta_request_work_list, &client->synced_data.pending_meta_request_work); uint32_t num_requests_queued = aws_s3_client_queue_requests_threaded(client, &client->synced_data.prepared_requests, false); { int sub_result = aws_sub_u32_checked( client->threaded_data.num_requests_being_prepared, num_requests_queued, &client->threaded_data.num_requests_being_prepared); AWS_ASSERT(sub_result == AWS_OP_SUCCESS); (void)sub_result; } { int sub_result = aws_sub_u32_checked( client->threaded_data.num_requests_being_prepared, client->synced_data.num_failed_prepare_requests, &client->threaded_data.num_requests_being_prepared); client->synced_data.num_failed_prepare_requests = 0; AWS_ASSERT(sub_result == AWS_OP_SUCCESS); (void)sub_result; } uint32_t num_endpoints_in_table = (uint32_t)aws_hash_table_get_entry_count(&client->synced_data.endpoints); uint32_t num_endpoints_allocated = client->synced_data.num_endpoints_allocated; aws_s3_client_unlock_synced_data(client); /* END CRITICAL SECTION */ /*******************/ /* Step 2: Push meta requests into the thread local list if they haven't already been scheduled. */ /*******************/ AWS_LOGF_DEBUG( AWS_LS_S3_CLIENT, "id=%p s_s3_client_process_work_default - Processing any new meta requests.", (void *)client); while (!aws_linked_list_empty(&meta_request_work_list)) { struct aws_linked_list_node *node = aws_linked_list_pop_back(&meta_request_work_list); struct aws_s3_meta_request_work *meta_request_work = AWS_CONTAINER_OF(node, struct aws_s3_meta_request_work, node); AWS_FATAL_ASSERT(meta_request_work != NULL); AWS_FATAL_ASSERT(meta_request_work->meta_request != NULL); struct aws_s3_meta_request *meta_request = meta_request_work->meta_request; if (!meta_request->client_process_work_threaded_data.scheduled) { aws_linked_list_push_back( &client->threaded_data.meta_requests, &meta_request->client_process_work_threaded_data.node); meta_request->client_process_work_threaded_data.scheduled = true; } else { meta_request = aws_s3_meta_request_release(meta_request); } aws_mem_release(client->allocator, meta_request_work); } /*******************/ /* Step 3: Update relevant meta requests and connections. */ /*******************/ { AWS_LOGF_DEBUG(AWS_LS_S3_CLIENT, "id=%p Updating meta requests.", (void *)client); aws_s3_client_update_meta_requests_threaded(client); AWS_LOGF_DEBUG( AWS_LS_S3_CLIENT, "id=%p Updating connections, assigning requests where possible.", (void *)client); aws_s3_client_update_connections_threaded(client); } /*******************/ /* Step 4: Log client stats. */ /*******************/ { uint32_t num_requests_tracked_requests = (uint32_t)aws_atomic_load_int(&client->stats.num_requests_in_flight); uint32_t num_auto_ranged_get_network_io = s_s3_client_get_num_requests_network_io(client, AWS_S3_META_REQUEST_TYPE_GET_OBJECT); uint32_t num_auto_ranged_put_network_io = s_s3_client_get_num_requests_network_io(client, AWS_S3_META_REQUEST_TYPE_PUT_OBJECT); uint32_t num_auto_default_network_io = s_s3_client_get_num_requests_network_io(client, AWS_S3_META_REQUEST_TYPE_DEFAULT); uint32_t num_requests_network_io = s_s3_client_get_num_requests_network_io(client, AWS_S3_META_REQUEST_TYPE_MAX); uint32_t num_requests_stream_queued_waiting = (uint32_t)aws_atomic_load_int(&client->stats.num_requests_stream_queued_waiting); uint32_t num_requests_being_prepared = client->threaded_data.num_requests_being_prepared; uint32_t num_requests_streaming_response = (uint32_t)aws_atomic_load_int(&client->stats.num_requests_streaming_response); uint32_t total_approx_requests = num_requests_network_io + num_requests_stream_queued_waiting + num_requests_streaming_response + num_requests_being_prepared + client->threaded_data.request_queue_size; AWS_LOGF( s_log_level_client_stats, AWS_LS_S3_CLIENT_STATS, "id=%p Requests-in-flight(approx/exact):%d/%d Requests-preparing:%d Requests-queued:%d " "Requests-network(get/put/default/total):%d/%d/%d/%d Requests-streaming-waiting:%d " "Requests-streaming-response:%d " " Endpoints(in-table/allocated):%d/%d", (void *)client, total_approx_requests, num_requests_tracked_requests, num_requests_being_prepared, client->threaded_data.request_queue_size, num_auto_ranged_get_network_io, num_auto_ranged_put_network_io, num_auto_default_network_io, num_requests_network_io, num_requests_stream_queued_waiting, num_requests_streaming_response, num_endpoints_in_table, num_endpoints_allocated); } /*******************/ /* Step 5: Check for client shutdown. */ /*******************/ { /* BEGIN CRITICAL SECTION */ aws_s3_client_lock_synced_data(client); client->synced_data.process_work_task_in_progress = false; /* This flag should never be set twice. If it was, that means a double-free could occur.*/ AWS_ASSERT(!client->synced_data.finish_destroy); bool finish_destroy = client->synced_data.active == false && client->synced_data.start_destroy_executing == false && client->synced_data.body_streaming_elg_allocated == false && client->synced_data.process_work_task_scheduled == false && client->synced_data.process_work_task_in_progress == false && client->synced_data.s3express_provider_active == false && client->synced_data.num_endpoints_allocated == 0; client->synced_data.finish_destroy = finish_destroy; if (!client->synced_data.active) { AWS_LOGF_DEBUG( AWS_LS_S3_CLIENT, "id=%p Client shutdown progress: starting_destroy_executing=%d body_streaming_elg_allocated=%d " "process_work_task_scheduled=%d process_work_task_in_progress=%d num_endpoints_allocated=%d " "s3express_provider_active=%d finish_destroy=%d", (void *)client, (int)client->synced_data.start_destroy_executing, (int)client->synced_data.body_streaming_elg_allocated, (int)client->synced_data.process_work_task_scheduled, (int)client->synced_data.process_work_task_in_progress, (int)client->synced_data.num_endpoints_allocated, (int)client->synced_data.s3express_provider_active, (int)client->synced_data.finish_destroy); } aws_s3_client_unlock_synced_data(client); /* END CRITICAL SECTION */ if (finish_destroy) { client->vtable->finish_destroy(client); } } } static void s_s3_client_prepare_callback_queue_request( struct aws_s3_meta_request *meta_request, struct aws_s3_request *request, int error_code, void *user_data); static bool s_s3_client_should_update_meta_request( struct aws_s3_client *client, struct aws_s3_meta_request *meta_request, uint32_t num_requests_in_flight, const uint32_t max_requests_in_flight, const uint32_t max_requests_prepare) { /* CreateSession has high priority to bypass the checks. */ if (meta_request->type == AWS_S3_META_REQUEST_TYPE_DEFAULT) { struct aws_s3_meta_request_default *meta_request_default = meta_request->impl; if (aws_string_eq_c_str(meta_request_default->operation_name, "CreateSession")) { return true; } } /** * If number of being-prepared + already-prepared-and-queued requests is more than the max that can * be in the preparation stage. * Or total number of requests tracked by the client is more than the max tracked ("in flight") * requests. * * We cannot create more requests for this meta request. */ if ((client->threaded_data.num_requests_being_prepared + client->threaded_data.request_queue_size) >= max_requests_prepare) { return false; } if (num_requests_in_flight >= max_requests_in_flight) { return false; } /* If this particular endpoint doesn't have any known addresses yet, then we don't want to go full speed in * ramping up requests just yet. If there is already enough in the queue for one address (even if those * aren't for this particular endpoint) we skip over this meta request for now. */ struct aws_s3_endpoint *endpoint = meta_request->endpoint; AWS_ASSERT(endpoint != NULL); AWS_ASSERT(client->vtable->get_host_address_count); size_t num_known_vips = client->vtable->get_host_address_count( client->client_bootstrap->host_resolver, endpoint->host_name, AWS_GET_HOST_ADDRESS_COUNT_RECORD_TYPE_A); if (num_known_vips == 0 && (client->threaded_data.num_requests_being_prepared + client->threaded_data.request_queue_size) >= g_max_num_connections_per_vip) { return false; } /* Nothing blocks the meta request to create more requests */ return true; } void aws_s3_client_update_meta_requests_threaded(struct aws_s3_client *client) { AWS_PRECONDITION(client); const uint32_t max_requests_in_flight = aws_s3_client_get_max_requests_in_flight(client); const uint32_t max_requests_prepare = aws_s3_client_get_max_requests_prepare(client); struct aws_linked_list meta_requests_work_remaining; aws_linked_list_init(&meta_requests_work_remaining); uint32_t num_requests_in_flight = (uint32_t)aws_atomic_load_int(&client->stats.num_requests_in_flight); const uint32_t pass_flags[] = { AWS_S3_META_REQUEST_UPDATE_FLAG_CONSERVATIVE, 0, }; const uint32_t num_passes = AWS_ARRAY_SIZE(pass_flags); aws_s3_buffer_pool_remove_reservation_hold(client->buffer_pool); for (uint32_t pass_index = 0; pass_index < num_passes; ++pass_index) { /** * Iterate through the meta requests to update meta requests and get new requests that can then be prepared + * (reading from any streams, signing, etc.) for sending. */ while (!aws_linked_list_empty(&client->threaded_data.meta_requests)) { struct aws_linked_list_node *meta_request_node = aws_linked_list_begin(&client->threaded_data.meta_requests); struct aws_s3_meta_request *meta_request = AWS_CONTAINER_OF(meta_request_node, struct aws_s3_meta_request, client_process_work_threaded_data); if (!s_s3_client_should_update_meta_request( client, meta_request, num_requests_in_flight, max_requests_in_flight, max_requests_prepare)) { /* Move the meta request to be processed from next loop. */ aws_linked_list_remove(&meta_request->client_process_work_threaded_data.node); aws_linked_list_push_back( &meta_requests_work_remaining, &meta_request->client_process_work_threaded_data.node); continue; } struct aws_s3_request *request = NULL; /* Try to grab the next request from the meta request. */ /* TODO: should we bail out if request fails to update due to mem or * continue going and hopping that following reqs can fit into mem? * check if avail space is at least part size? */ bool work_remaining = aws_s3_meta_request_update(meta_request, pass_flags[pass_index], &request); if (work_remaining) { /* If there is work remaining, but we didn't get a request back, take the meta request out of the * list so that we don't use it again during this function, with the intention of putting it back in * the list before this function ends. */ if (request == NULL) { aws_linked_list_remove(&meta_request->client_process_work_threaded_data.node); aws_linked_list_push_back( &meta_requests_work_remaining, &meta_request->client_process_work_threaded_data.node); } else { request->tracked_by_client = true; ++client->threaded_data.num_requests_being_prepared; num_requests_in_flight = (uint32_t)aws_atomic_fetch_add(&client->stats.num_requests_in_flight, 1) + 1; aws_s3_meta_request_prepare_request( meta_request, request, s_s3_client_prepare_callback_queue_request, client); } } else { s_s3_client_remove_meta_request_threaded(client, meta_request); } } aws_linked_list_move_all_front(&client->threaded_data.meta_requests, &meta_requests_work_remaining); } } static void s_s3_client_meta_request_finished_request( struct aws_s3_client *client, struct aws_s3_meta_request *meta_request, struct aws_s3_request *request, int error_code) { AWS_PRECONDITION(client); AWS_PRECONDITION(request); if (request->tracked_by_client) { /* BEGIN CRITICAL SECTION */ aws_s3_client_lock_synced_data(client); aws_atomic_fetch_sub(&client->stats.num_requests_in_flight, 1); s_s3_client_schedule_process_work_synced(client); aws_s3_client_unlock_synced_data(client); /* END CRITICAL SECTION */ } aws_s3_meta_request_finished_request(meta_request, request, error_code); } static void s_s3_client_prepare_callback_queue_request( struct aws_s3_meta_request *meta_request, struct aws_s3_request *request, int error_code, void *user_data) { AWS_PRECONDITION(meta_request); AWS_PRECONDITION(request); struct aws_s3_client *client = user_data; AWS_PRECONDITION(client); if (error_code != AWS_ERROR_SUCCESS) { s_s3_client_meta_request_finished_request(client, meta_request, request, error_code); request = aws_s3_request_release(request); } /* BEGIN CRITICAL SECTION */ { aws_s3_client_lock_synced_data(client); if (error_code == AWS_ERROR_SUCCESS) { aws_linked_list_push_back(&client->synced_data.prepared_requests, &request->node); } else { ++client->synced_data.num_failed_prepare_requests; } s_s3_client_schedule_process_work_synced(client); aws_s3_client_unlock_synced_data(client); } /* END CRITICAL SECTION */ } void aws_s3_client_update_connections_threaded(struct aws_s3_client *client) { AWS_PRECONDITION(client); AWS_PRECONDITION(client->vtable); struct aws_linked_list left_over_requests; aws_linked_list_init(&left_over_requests); while (s_s3_client_get_num_requests_network_io(client, AWS_S3_META_REQUEST_TYPE_MAX) < aws_s3_client_get_max_active_connections(client, NULL) && !aws_linked_list_empty(&client->threaded_data.request_queue)) { struct aws_s3_request *request = aws_s3_client_dequeue_request_threaded(client); const uint32_t max_active_connections = aws_s3_client_get_max_active_connections(client, request->meta_request); if (request->is_noop) { /* If request is no-op, finishes and cleans up the request */ s_s3_client_meta_request_finished_request(client, request->meta_request, request, AWS_ERROR_SUCCESS); request = aws_s3_request_release(request); } else if (!request->always_send && aws_s3_meta_request_has_finish_result(request->meta_request)) { /* Unless the request is marked "always send", if this meta request has a finish result, then finish the * request now and release it. */ s_s3_client_meta_request_finished_request(client, request->meta_request, request, AWS_ERROR_S3_CANCELED); request = aws_s3_request_release(request); } else if ( s_s3_client_get_num_requests_network_io(client, request->meta_request->type) < max_active_connections) { s_s3_client_create_connection_for_request(client, request); } else { /* Push the request into the left-over list to be used in a future call of this function. */ aws_linked_list_push_back(&left_over_requests, &request->node); } } aws_s3_client_queue_requests_threaded(client, &left_over_requests, true); } static void s_s3_client_acquired_retry_token( struct aws_retry_strategy *retry_strategy, int error_code, struct aws_retry_token *token, void *user_data); static void s_s3_client_retry_ready(struct aws_retry_token *token, int error_code, void *user_data); static void s_s3_client_create_connection_for_request_default( struct aws_s3_client *client, struct aws_s3_request *request); static void s_s3_client_create_connection_for_request(struct aws_s3_client *client, struct aws_s3_request *request) { AWS_PRECONDITION(client); AWS_PRECONDITION(client->vtable); if (client->vtable->create_connection_for_request) { client->vtable->create_connection_for_request(client, request); return; } s_s3_client_create_connection_for_request_default(client, request); } static void s_s3_client_create_connection_for_request_default( struct aws_s3_client *client, struct aws_s3_request *request) { AWS_PRECONDITION(client); AWS_PRECONDITION(request); struct aws_s3_meta_request *meta_request = request->meta_request; AWS_PRECONDITION(meta_request); aws_atomic_fetch_add(&client->stats.num_requests_network_io[meta_request->type], 1); struct aws_s3_connection *connection = aws_mem_calloc(client->allocator, 1, sizeof(struct aws_s3_connection)); connection->endpoint = aws_s3_endpoint_acquire(meta_request->endpoint, false /*already_holding_lock*/); connection->request = request; struct aws_byte_cursor host_header_value; AWS_ZERO_STRUCT(host_header_value); struct aws_http_headers *message_headers = aws_http_message_get_headers(meta_request->initial_request_message); AWS_ASSERT(message_headers); int result = aws_http_headers_get(message_headers, g_host_header_name, &host_header_value); AWS_ASSERT(result == AWS_OP_SUCCESS); (void)result; if (aws_retry_strategy_acquire_retry_token( client->retry_strategy, &host_header_value, s_s3_client_acquired_retry_token, connection, 0)) { AWS_LOGF_ERROR( AWS_LS_S3_CLIENT, "id=%p Client could not acquire retry token for request %p due to error %d (%s)", (void *)client, (void *)request, aws_last_error_or_unknown(), aws_error_str(aws_last_error_or_unknown())); goto reset_connection; } return; reset_connection: aws_s3_client_notify_connection_finished( client, connection, aws_last_error_or_unknown(), AWS_S3_CONNECTION_FINISH_CODE_FAILED); } static void s_s3_client_acquired_retry_token( struct aws_retry_strategy *retry_strategy, int error_code, struct aws_retry_token *token, void *user_data) { AWS_PRECONDITION(retry_strategy); (void)retry_strategy; struct aws_s3_connection *connection = user_data; AWS_PRECONDITION(connection); struct aws_s3_request *request = connection->request; AWS_PRECONDITION(request); struct aws_s3_meta_request *meta_request = request->meta_request; AWS_PRECONDITION(meta_request); struct aws_s3_endpoint *endpoint = meta_request->endpoint; AWS_ASSERT(endpoint != NULL); struct aws_s3_client *client = endpoint->client; AWS_ASSERT(client != NULL); if (error_code != AWS_ERROR_SUCCESS) { AWS_LOGF_ERROR( AWS_LS_S3_CLIENT, "id=%p Client could not get retry token for connection %p processing request %p due to error %d (%s)", (void *)client, (void *)connection, (void *)request, error_code, aws_error_str(error_code)); goto error_clean_up; } AWS_ASSERT(token); connection->retry_token = token; AWS_ASSERT(client->vtable->acquire_http_connection); /* client needs to be kept alive until s_s3_client_on_acquire_http_connection completes */ /* TODO: not a blocker, consider managing the life time of aws_s3_client from aws_s3_endpoint to simplify usage */ aws_s3_client_acquire(client); client->vtable->acquire_http_connection( endpoint->http_connection_manager, s_s3_client_on_acquire_http_connection, connection); return; error_clean_up: aws_s3_client_notify_connection_finished(client, connection, error_code, AWS_S3_CONNECTION_FINISH_CODE_FAILED); } static void s_s3_client_on_acquire_http_connection( struct aws_http_connection *incoming_http_connection, int error_code, void *user_data) { struct aws_s3_connection *connection = user_data; AWS_PRECONDITION(connection); struct aws_s3_request *request = connection->request; AWS_PRECONDITION(request); struct aws_s3_meta_request *meta_request = request->meta_request; AWS_PRECONDITION(meta_request); struct aws_s3_endpoint *endpoint = meta_request->endpoint; AWS_ASSERT(endpoint != NULL); struct aws_s3_client *client = endpoint->client; AWS_ASSERT(client != NULL); if (error_code != AWS_ERROR_SUCCESS) { AWS_LOGF_ERROR( AWS_LS_S3_ENDPOINT, "id=%p: Could not acquire connection due to error code %d (%s)", (void *)endpoint, error_code, aws_error_str(error_code)); if (error_code == AWS_IO_DNS_INVALID_NAME || error_code == AWS_IO_TLS_ERROR_NEGOTIATION_FAILURE) { /** * Fall fast without retry * - Invalid DNS name will not change after retry. * - TLS negotiation is expensive and retry will not help in most case. */ goto error_fail; } goto error_retry; } connection->http_connection = incoming_http_connection; aws_s3_meta_request_send_request(meta_request, connection); aws_s3_client_release(client); /* kept since this callback was registered */ return; error_retry: aws_s3_client_notify_connection_finished(client, connection, error_code, AWS_S3_CONNECTION_FINISH_CODE_RETRY); aws_s3_client_release(client); /* kept since this callback was registered */ return; error_fail: aws_s3_client_notify_connection_finished(client, connection, error_code, AWS_S3_CONNECTION_FINISH_CODE_FAILED); aws_s3_client_release(client); /* kept since this callback was registered */ } /* Called by aws_s3_meta_request when it has finished using this connection for a single request. */ void aws_s3_client_notify_connection_finished( struct aws_s3_client *client, struct aws_s3_connection *connection, int error_code, enum aws_s3_connection_finish_code finish_code) { AWS_PRECONDITION(client); AWS_PRECONDITION(connection); struct aws_s3_request *request = connection->request; AWS_PRECONDITION(request); struct aws_s3_meta_request *meta_request = request->meta_request; AWS_PRECONDITION(meta_request); AWS_PRECONDITION(meta_request->initial_request_message); struct aws_s3_endpoint *endpoint = meta_request->endpoint; AWS_PRECONDITION(endpoint); if (request->send_data.metrics) { request->send_data.metrics->crt_info_metrics.error_code = error_code; } /* If we're trying to set up a retry... */ if (finish_code == AWS_S3_CONNECTION_FINISH_CODE_RETRY) { if (connection->retry_token == NULL) { AWS_LOGF_ERROR( AWS_LS_S3_CLIENT, "id=%p Client could not schedule retry of request %p for meta request %p, as retry token is NULL.", (void *)client, (void *)request, (void *)meta_request); goto reset_connection; } if (aws_s3_meta_request_is_finished(meta_request)) { AWS_LOGF_DEBUG( AWS_LS_S3_CLIENT, "id=%p Client not scheduling retry of request %p for meta request %p with token %p because meta " "request has been flagged as finished.", (void *)client, (void *)request, (void *)meta_request, (void *)connection->retry_token); goto reset_connection; } AWS_LOGF_DEBUG( AWS_LS_S3_CLIENT, "id=%p Client scheduling retry of request %p for meta request %p with token %p with error code %d (%s).", (void *)client, (void *)request, (void *)meta_request, (void *)connection->retry_token, error_code, aws_error_str(error_code)); enum aws_retry_error_type error_type = AWS_RETRY_ERROR_TYPE_TRANSIENT; switch (error_code) { case AWS_ERROR_S3_INTERNAL_ERROR: error_type = AWS_RETRY_ERROR_TYPE_SERVER_ERROR; break; case AWS_ERROR_S3_SLOW_DOWN: error_type = AWS_RETRY_ERROR_TYPE_THROTTLING; break; } if (connection->http_connection != NULL) { AWS_ASSERT(endpoint->http_connection_manager); aws_http_connection_manager_release_connection( endpoint->http_connection_manager, connection->http_connection); connection->http_connection = NULL; } /* Ask the retry strategy to schedule a retry of the request. */ if (aws_retry_strategy_schedule_retry( connection->retry_token, error_type, s_s3_client_retry_ready, connection)) { AWS_LOGF_ERROR( AWS_LS_S3_CLIENT, "id=%p Client could not retry request %p for meta request %p with token %p due to error %d (%s)", (void *)client, (void *)request, (void *)meta_request, (void *)connection->retry_token, aws_last_error_or_unknown(), aws_error_str(aws_last_error_or_unknown())); goto reset_connection; } return; } reset_connection: if (connection->retry_token != NULL) { /* If we have a retry token and successfully finished, record that success. */ if (finish_code == AWS_S3_CONNECTION_FINISH_CODE_SUCCESS) { aws_retry_token_record_success(connection->retry_token); } aws_retry_token_release(connection->retry_token); connection->retry_token = NULL; } /* If we weren't successful, and we're here, that means this failure is not eligible for a retry. So finish the * request, and close our HTTP connection. */ if (finish_code != AWS_S3_CONNECTION_FINISH_CODE_SUCCESS) { if (connection->http_connection != NULL) { aws_http_connection_close(connection->http_connection); } } aws_atomic_fetch_sub(&client->stats.num_requests_network_io[meta_request->type], 1); s_s3_client_meta_request_finished_request(client, meta_request, request, error_code); if (connection->http_connection != NULL) { AWS_ASSERT(endpoint->http_connection_manager); aws_http_connection_manager_release_connection(endpoint->http_connection_manager, connection->http_connection); connection->http_connection = NULL; } if (connection->request != NULL) { connection->request = aws_s3_request_release(connection->request); } aws_retry_token_release(connection->retry_token); connection->retry_token = NULL; aws_s3_endpoint_release(connection->endpoint); connection->endpoint = NULL; aws_mem_release(client->allocator, connection); connection = NULL; /* BEGIN CRITICAL SECTION */ { aws_s3_client_lock_synced_data(client); s_s3_client_schedule_process_work_synced(client); aws_s3_client_unlock_synced_data(client); } /* END CRITICAL SECTION */ } static void s_s3_client_prepare_request_callback_retry_request( struct aws_s3_meta_request *meta_request, struct aws_s3_request *request, int error_code, void *user_data); static void s_s3_client_retry_ready(struct aws_retry_token *token, int error_code, void *user_data) { AWS_PRECONDITION(token); (void)token; struct aws_s3_connection *connection = user_data; AWS_PRECONDITION(connection); struct aws_s3_request *request = connection->request; AWS_PRECONDITION(request); struct aws_s3_meta_request *meta_request = request->meta_request; AWS_PRECONDITION(meta_request); struct aws_s3_endpoint *endpoint = meta_request->endpoint; AWS_PRECONDITION(endpoint); struct aws_s3_client *client = endpoint->client; AWS_PRECONDITION(client); /* If we couldn't retry this request, then bail on the entire meta request. */ if (error_code != AWS_ERROR_SUCCESS) { AWS_LOGF_ERROR( AWS_LS_S3_CLIENT, "id=%p Client could not retry request %p for meta request %p due to error %d (%s)", (void *)client, (void *)meta_request, (void *)request, error_code, aws_error_str(error_code)); goto error_clean_up; } AWS_LOGF_DEBUG( AWS_LS_S3_META_REQUEST, "id=%p Client retrying request %p for meta request %p on connection %p with retry token %p", (void *)client, (void *)request, (void *)meta_request, (void *)connection, (void *)connection->retry_token); aws_s3_meta_request_prepare_request( meta_request, request, s_s3_client_prepare_request_callback_retry_request, connection); return; error_clean_up: aws_s3_client_notify_connection_finished(client, connection, error_code, AWS_S3_CONNECTION_FINISH_CODE_FAILED); } static void s_s3_client_prepare_request_callback_retry_request( struct aws_s3_meta_request *meta_request, struct aws_s3_request *request, int error_code, void *user_data) { AWS_PRECONDITION(meta_request); (void)meta_request; AWS_PRECONDITION(request); (void)request; struct aws_s3_connection *connection = user_data; AWS_PRECONDITION(connection); struct aws_s3_endpoint *endpoint = meta_request->endpoint; AWS_ASSERT(endpoint != NULL); struct aws_s3_client *client = endpoint->client; AWS_ASSERT(client != NULL); if (error_code == AWS_ERROR_SUCCESS) { AWS_ASSERT(connection->retry_token); s_s3_client_acquired_retry_token( client->retry_strategy, AWS_ERROR_SUCCESS, connection->retry_token, connection); } else { aws_s3_client_notify_connection_finished(client, connection, error_code, AWS_S3_CONNECTION_FINISH_CODE_FAILED); } } static void s_resume_token_ref_count_zero_callback(void *arg) { struct aws_s3_meta_request_resume_token *token = arg; aws_string_destroy(token->multipart_upload_id); aws_mem_release(token->allocator, token); } struct aws_s3_meta_request_resume_token *aws_s3_meta_request_resume_token_new(struct aws_allocator *allocator) { struct aws_s3_meta_request_resume_token *token = aws_mem_calloc(allocator, 1, sizeof(struct aws_s3_meta_request_resume_token)); token->allocator = allocator; aws_ref_count_init(&token->ref_count, token, s_resume_token_ref_count_zero_callback); return token; } struct aws_s3_meta_request_resume_token *aws_s3_meta_request_resume_token_new_upload( struct aws_allocator *allocator, const struct aws_s3_upload_resume_token_options *options) { AWS_PRECONDITION(allocator); AWS_PRECONDITION(options); if (options->part_size > SIZE_MAX) { aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED); return NULL; } struct aws_s3_meta_request_resume_token *token = aws_s3_meta_request_resume_token_new(allocator); token->multipart_upload_id = aws_string_new_from_cursor(allocator, &options->upload_id); token->part_size = (size_t)options->part_size; token->total_num_parts = options->total_num_parts; token->num_parts_completed = options->num_parts_completed; token->type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT; return token; } struct aws_s3_meta_request_resume_token *aws_s3_meta_request_resume_token_acquire( struct aws_s3_meta_request_resume_token *resume_token) { if (resume_token) { aws_ref_count_acquire(&resume_token->ref_count); } return resume_token; } struct aws_s3_meta_request_resume_token *aws_s3_meta_request_resume_token_release( struct aws_s3_meta_request_resume_token *resume_token) { if (resume_token) { aws_ref_count_release(&resume_token->ref_count); } return NULL; } enum aws_s3_meta_request_type aws_s3_meta_request_resume_token_type( struct aws_s3_meta_request_resume_token *resume_token) { AWS_FATAL_PRECONDITION(resume_token); return resume_token->type; } uint64_t aws_s3_meta_request_resume_token_part_size(struct aws_s3_meta_request_resume_token *resume_token) { AWS_FATAL_PRECONDITION(resume_token); return (uint64_t)resume_token->part_size; } size_t aws_s3_meta_request_resume_token_total_num_parts(struct aws_s3_meta_request_resume_token *resume_token) { AWS_FATAL_PRECONDITION(resume_token); return resume_token->total_num_parts; } size_t aws_s3_meta_request_resume_token_num_parts_completed(struct aws_s3_meta_request_resume_token *resume_token) { AWS_FATAL_PRECONDITION(resume_token); return resume_token->num_parts_completed; } struct aws_byte_cursor aws_s3_meta_request_resume_token_upload_id( struct aws_s3_meta_request_resume_token *resume_token) { AWS_FATAL_PRECONDITION(resume_token); if (resume_token->type == AWS_S3_META_REQUEST_TYPE_PUT_OBJECT && resume_token->multipart_upload_id != NULL) { return aws_byte_cursor_from_string(resume_token->multipart_upload_id); } return aws_byte_cursor_from_c_str(""); } static uint64_t s_upload_timeout_threshold_ns = 5000000000; /* 5 Secs */ const size_t g_expect_timeout_offset_ms = 700; /* 0.7 Secs. From experiments on c5n.18xlarge machine for 30 GiB upload, it gave us best performance. */ /** * The upload timeout optimization: explained. * * Sometimes, S3 is extremely slow responding to an upload. * In these cases, it's much faster to cancel and resend the upload, * vs waiting 5sec for the slow response. * * Typically, S3 responds to an upload in 0.2sec after the request is fully received. * But occasionally (about 0.1%) it takes 5sec to respond. * In a large 30GiB file upload, you can expect about 4 parts to suffer from * a slow response. If one of these parts is near the end of the file, * then we end up sitting around doing nothing for up to 5sec, waiting * for this final slow upload to complete. * * We use the response_first_byte_timeout HTTP option to cancel uploads * suffering from a slow response. But how should we set it? A fast 100Gbps * machine definitely wants it! But a slow computer does not. A slow computer * would be better off waiting 5sec for the response, vs re-uploading the whole request. * * The current algorithm: * 1. Start without a timeout value. After 10 requests completed, we know the average of how long the * request takes. We decide if it's worth to set a timeout value or not. (If the average of request takes more than * 5 secs or not) TODO: if the client have different part size, this doesn't make sense * 2. If it is worth to retry, start with a default timeout value, 1 sec. * 3. If a request finishes successfully, use the average response_to_first_byte_time + g_expect_timeout_offset_ms as * our expected timeout value. (TODO: The real expected timeout value should be a P99 of all the requests.) * 3.1 Adjust the current timeout value against the expected timeout value, via 0.99 * + 0.01 * * to get closer to the expected timeout value. * 4. If request had timed out. We check the timeout rate. * 4.1 If timeout rate is larger than 0.1%, we increase the timeout value by 100ms (Check the timeout value when the * request was made, if the updated timeout value is larger than the expected, skip update). * 4.2 If timeout rate is larger than 1%, we increase the timeout value by 1 secs (If needed). And clear the rate * to get the exact rate with new timeout value. * 4.3 Once the timeout value is larger than 5 secs, we stop the process. * * Invoked from `s_s3_auto_ranged_put_send_request_finish`. */ void aws_s3_client_update_upload_part_timeout( struct aws_s3_client *client, struct aws_s3_request *finished_upload_part_request, int finished_error_code) { aws_s3_client_lock_synced_data(client); struct aws_s3_upload_part_timeout_stats *stats = &client->synced_data.upload_part_stats; if (stats->stop_timeout) { /* Timeout was disabled */ goto unlock; } struct aws_s3_request_metrics *metrics = finished_upload_part_request->send_data.metrics; size_t current_timeout_ms = aws_atomic_load_int(&client->upload_timeout_ms); uint64_t current_timeout_ns = aws_timestamp_convert(current_timeout_ms, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL); uint64_t updated_timeout_ns = 0; uint64_t expect_timeout_offset_ns = aws_timestamp_convert(g_expect_timeout_offset_ms, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL); switch (finished_error_code) { case AWS_ERROR_SUCCESS: /* We only interested in request succeed */ stats->num_successful_upload_requests = aws_add_u64_saturating(stats->num_successful_upload_requests, 1); if (stats->num_successful_upload_requests <= 10) { /* Gether the data */ uint64_t request_time_ns = metrics->time_metrics.receive_end_timestamp_ns - metrics->time_metrics.send_start_timestamp_ns; stats->initial_request_time.sum_ns = aws_add_u64_saturating(stats->initial_request_time.sum_ns, request_time_ns); ++stats->initial_request_time.num_samples; if (stats->num_successful_upload_requests == 10) { /* Decide we need a timeout or not */ uint64_t average_request_time_ns = stats->initial_request_time.sum_ns / stats->initial_request_time.num_samples; if (average_request_time_ns >= s_upload_timeout_threshold_ns) { /* We don't need a timeout, as retry will be slower than just wait for the server to response */ stats->stop_timeout = true; } else { /* Start the timeout at 1 secs */ aws_atomic_store_int(&client->upload_timeout_ms, 1000); } } goto unlock; } /* Starts to update timeout on case of succeed */ stats->timeout_rate_tracking.num_completed = aws_add_u64_saturating(stats->timeout_rate_tracking.num_completed, 1); /* Response to first byte is time taken for the first byte data received from the request finished * sending */ uint64_t response_to_first_byte_time_ns = metrics->time_metrics.receive_start_timestamp_ns - metrics->time_metrics.send_end_timestamp_ns; stats->response_to_first_byte_time.sum_ns = aws_add_u64_saturating(stats->response_to_first_byte_time.sum_ns, response_to_first_byte_time_ns); stats->response_to_first_byte_time.num_samples = aws_add_u64_saturating(stats->response_to_first_byte_time.num_samples, 1); uint64_t average_response_to_first_byte_time_ns = stats->response_to_first_byte_time.sum_ns / stats->response_to_first_byte_time.num_samples; uint64_t expected_timeout_ns = average_response_to_first_byte_time_ns + expect_timeout_offset_ns; double timeout_ns_double = (double)current_timeout_ns * 0.99 + (double)expected_timeout_ns * 0.01; updated_timeout_ns = (uint64_t)timeout_ns_double; break; case AWS_ERROR_HTTP_RESPONSE_FIRST_BYTE_TIMEOUT: if (stats->num_successful_upload_requests < 10) { goto unlock; } /* Starts to update timeout on case of timed out */ stats->timeout_rate_tracking.num_completed = aws_add_u64_saturating(stats->timeout_rate_tracking.num_completed, 1); stats->timeout_rate_tracking.num_failed = aws_add_u64_saturating(stats->timeout_rate_tracking.num_failed, 1); uint64_t timeout_threshold = (uint64_t)ceil((double)stats->timeout_rate_tracking.num_completed / 100); uint64_t warning_threshold = (uint64_t)ceil((double)stats->timeout_rate_tracking.num_completed / 1000); if (stats->timeout_rate_tracking.num_failed > timeout_threshold) { /** * Restore the rate track, as we are larger than 1%, it goes off the record. */ AWS_LOGF_WARN( AWS_LS_S3_CLIENT, "id=%p Client upload part timeout rate is larger than expected, current timeout is %zu, bump it " "up. Request original timeout is: %zu", (void *)client, current_timeout_ms, finished_upload_part_request->upload_timeout_ms); stats->timeout_rate_tracking.num_completed = 0; stats->timeout_rate_tracking.num_failed = 0; if (finished_upload_part_request->upload_timeout_ms + 1000 > current_timeout_ms) { /* Update the timeout by adding 1 secs only when it's worth to do so */ updated_timeout_ns = aws_add_u64_saturating( current_timeout_ns, aws_timestamp_convert(1, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL)); } } else if (stats->timeout_rate_tracking.num_failed > warning_threshold) { if (finished_upload_part_request->upload_timeout_ms + 100 > current_timeout_ms) { /* Only update the timeout by adding 100 ms if the request was made with a longer time out. */ updated_timeout_ns = aws_add_u64_saturating( current_timeout_ns, aws_timestamp_convert(100, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL)); } } break; default: break; } if (updated_timeout_ns != 0) { if (updated_timeout_ns > s_upload_timeout_threshold_ns) { /* Stops timeout, as wait for server to response will be faster to set our own timeout */ stats->stop_timeout = true; /* Unset the upload_timeout */ updated_timeout_ns = 0; } /* Apply the updated timeout */ aws_atomic_store_int( &client->upload_timeout_ms, (size_t)aws_timestamp_convert(updated_timeout_ns, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_MILLIS, NULL)); } unlock: aws_s3_client_unlock_synced_data(client); } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/source/s3_copy_object.c000066400000000000000000001033141456575232400241300ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "aws/s3/private/s3_copy_object.h" #include "aws/s3/private/s3_request_messages.h" #include "aws/s3/private/s3_util.h" #include /* Objects with size smaller than the constant below are bypassed as S3 CopyObject instead of multipart copy */ static const size_t s_multipart_copy_minimum_object_size = GB_TO_BYTES(1); static const size_t s_complete_multipart_upload_init_body_size_bytes = 512; static const size_t s_abort_multipart_upload_init_body_size_bytes = 512; /* TODO: make this configurable or at least expose it. */ const size_t s_min_copy_part_size = MB_TO_BYTES(128); static const struct aws_byte_cursor s_create_multipart_upload_copy_headers[] = { AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-customer-algorithm"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-customer-key-MD5"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-context"), }; static void s_s3_meta_request_copy_object_destroy(struct aws_s3_meta_request *meta_request); static bool s_s3_copy_object_update( struct aws_s3_meta_request *meta_request, uint32_t flags, struct aws_s3_request **out_request); static struct aws_future_void *s_s3_copy_object_prepare_request(struct aws_s3_request *request); static void s_s3_copy_object_request_finished( struct aws_s3_meta_request *meta_request, struct aws_s3_request *request, int error_code); static struct aws_s3_meta_request_vtable s_s3_copy_object_vtable = { .update = s_s3_copy_object_update, .send_request_finish = aws_s3_meta_request_send_request_finish_default, .prepare_request = s_s3_copy_object_prepare_request, .init_signing_date_time = aws_s3_meta_request_init_signing_date_time_default, .sign_request = aws_s3_meta_request_sign_request_default, .finished_request = s_s3_copy_object_request_finished, .destroy = s_s3_meta_request_copy_object_destroy, .finish = aws_s3_meta_request_finish_default, }; /* Allocate a new copy object meta request */ struct aws_s3_meta_request *aws_s3_meta_request_copy_object_new( struct aws_allocator *allocator, struct aws_s3_client *client, const struct aws_s3_meta_request_options *options) { /* These should already have been validated by the caller. */ AWS_PRECONDITION(allocator); AWS_PRECONDITION(client); AWS_PRECONDITION(options); AWS_PRECONDITION(options->message); struct aws_s3_copy_object *copy_object = aws_mem_calloc(allocator, 1, sizeof(struct aws_s3_copy_object)); /* part size and content length will be fetched later using a HEAD object request */ const size_t UNKNOWN_PART_SIZE = 0; const size_t UNKNOWN_CONTENT_LENGTH = 0; const int UNKNOWN_NUM_PARTS = 0; if (aws_s3_meta_request_init_base( allocator, client, UNKNOWN_PART_SIZE, false, options, copy_object, &s_s3_copy_object_vtable, ©_object->base)) { aws_mem_release(allocator, copy_object); return NULL; } aws_array_list_init_dynamic( ©_object->synced_data.part_list, allocator, 0, sizeof(struct aws_s3_mpu_part_info *)); copy_object->synced_data.content_length = UNKNOWN_CONTENT_LENGTH; copy_object->synced_data.total_num_parts = UNKNOWN_NUM_PARTS; copy_object->threaded_update_data.next_part_number = 1; AWS_LOGF_DEBUG(AWS_LS_S3_META_REQUEST, "id=%p Created new CopyObject Meta Request.", (void *)©_object->base); return ©_object->base; } static void s_s3_meta_request_copy_object_destroy(struct aws_s3_meta_request *meta_request) { AWS_PRECONDITION(meta_request); AWS_PRECONDITION(meta_request->impl); struct aws_s3_copy_object *copy_object = meta_request->impl; aws_string_destroy(copy_object->upload_id); copy_object->upload_id = NULL; for (size_t part_index = 0; part_index < aws_array_list_length(©_object->synced_data.part_list); ++part_index) { struct aws_s3_mpu_part_info *part = NULL; aws_array_list_get_at(©_object->synced_data.part_list, &part, part_index); aws_string_destroy(part->etag); aws_byte_buf_clean_up(&part->checksum_base64); aws_mem_release(meta_request->allocator, part); } aws_array_list_clean_up(©_object->synced_data.part_list); aws_http_headers_release(copy_object->synced_data.needed_response_headers); aws_mem_release(meta_request->allocator, copy_object); } static bool s_s3_copy_object_update( struct aws_s3_meta_request *meta_request, uint32_t flags, struct aws_s3_request **out_request) { AWS_PRECONDITION(meta_request); AWS_PRECONDITION(out_request); struct aws_s3_request *request = NULL; bool work_remaining = false; struct aws_s3_copy_object *copy_object = meta_request->impl; aws_s3_meta_request_lock_synced_data(meta_request); if (!aws_s3_meta_request_has_finish_result_synced(meta_request)) { /* If we haven't already sent the GetObject HEAD request to get the source object size, do so now. */ if (!copy_object->synced_data.head_object_sent) { request = aws_s3_request_new( meta_request, AWS_S3_COPY_OBJECT_REQUEST_TAG_GET_OBJECT_SIZE, AWS_S3_REQUEST_TYPE_HEAD_OBJECT, 0 /*part_number*/, AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS); copy_object->synced_data.head_object_sent = true; goto has_work_remaining; } if (!copy_object->synced_data.head_object_completed) { /* we have not received the object size response yet */ goto has_work_remaining; } if (copy_object->synced_data.content_length < s_multipart_copy_minimum_object_size) { /* object is too small to use multipart upload: forwards the original CopyObject request to S3 instead. */ if (!copy_object->synced_data.copy_request_bypass_sent) { request = aws_s3_request_new( meta_request, AWS_S3_COPY_OBJECT_REQUEST_TAG_BYPASS, AWS_S3_REQUEST_TYPE_COPY_OBJECT, 1 /*part_number*/, AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS); AWS_LOGF_DEBUG( AWS_LS_S3_META_REQUEST, "id=%p: Meta Request CopyObject created bypass request %p", (void *)meta_request, (void *)request); copy_object->synced_data.copy_request_bypass_sent = true; goto has_work_remaining; } /* If the bypass request hasn't been completed, then wait for it to be completed. */ if (!copy_object->synced_data.copy_request_bypass_completed) { goto has_work_remaining; } else { goto no_work_remaining; } } /* Object size is large enough to use multipart copy. If we haven't already sent a create-multipart-upload * message, do so now. */ if (!copy_object->synced_data.create_multipart_upload_sent) { request = aws_s3_request_new( meta_request, AWS_S3_COPY_OBJECT_REQUEST_TAG_CREATE_MULTIPART_UPLOAD, AWS_S3_REQUEST_TYPE_CREATE_MULTIPART_UPLOAD, 0 /*part_number*/, AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS); copy_object->synced_data.create_multipart_upload_sent = true; goto has_work_remaining; } /* If the create-multipart-upload message hasn't been completed, then there is still additional work to do, but * it can't be done yet. */ if (!copy_object->synced_data.create_multipart_upload_completed) { goto has_work_remaining; } /* If we haven't sent all of the parts yet, then set up to send a new part now. */ if (copy_object->synced_data.num_parts_sent < copy_object->synced_data.total_num_parts) { if ((flags & AWS_S3_META_REQUEST_UPDATE_FLAG_CONSERVATIVE) != 0) { uint32_t num_parts_in_flight = (copy_object->synced_data.num_parts_sent - copy_object->synced_data.num_parts_completed); /* TODO: benchmark if there is need to limit the amount of upload part copy in flight requests */ if (num_parts_in_flight > 0) { goto has_work_remaining; } } /* Allocate a request for another part. */ request = aws_s3_request_new( meta_request, AWS_S3_COPY_OBJECT_REQUEST_TAG_MULTIPART_COPY, AWS_S3_REQUEST_TYPE_UPLOAD_PART_COPY, copy_object->threaded_update_data.next_part_number, AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS); ++copy_object->threaded_update_data.next_part_number; ++copy_object->synced_data.num_parts_sent; AWS_LOGF_DEBUG( AWS_LS_S3_META_REQUEST, "id=%p: Returning request %p for part %d", (void *)meta_request, (void *)request, request->part_number); goto has_work_remaining; } /* There is one more request to send after all of the parts (the complete-multipart-upload) but it can't be done * until all of the parts have been completed.*/ if (copy_object->synced_data.num_parts_completed != copy_object->synced_data.total_num_parts) { goto has_work_remaining; } /* If the complete-multipart-upload request hasn't been set yet, then send it now. */ if (!copy_object->synced_data.complete_multipart_upload_sent) { request = aws_s3_request_new( meta_request, AWS_S3_COPY_OBJECT_REQUEST_TAG_COMPLETE_MULTIPART_UPLOAD, AWS_S3_REQUEST_TYPE_COMPLETE_MULTIPART_UPLOAD, 0 /*part_number*/, AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS); copy_object->synced_data.complete_multipart_upload_sent = true; goto has_work_remaining; } /* Wait for the complete-multipart-upload request to finish. */ if (!copy_object->synced_data.complete_multipart_upload_completed) { goto has_work_remaining; } goto no_work_remaining; } else { /* If the create multipart upload hasn't been sent, then there is nothing left to do when canceling. */ if (!copy_object->synced_data.create_multipart_upload_sent) { goto no_work_remaining; } /* If the create-multipart-upload request is still in flight, wait for it to finish. */ if (!copy_object->synced_data.create_multipart_upload_completed) { goto has_work_remaining; } /* If the number of parts completed is less than the number of parts sent, then we need to wait until all of * those parts are done sending before aborting. */ if (copy_object->synced_data.num_parts_completed < copy_object->synced_data.num_parts_sent) { goto has_work_remaining; } /* If the complete-multipart-upload is already in flight, then we can't necessarily send an abort. */ if (copy_object->synced_data.complete_multipart_upload_sent && !copy_object->synced_data.complete_multipart_upload_completed) { goto has_work_remaining; } /* If the complete-multipart-upload completed successfully, then there is nothing to abort since the transfer * has already finished. */ if (copy_object->synced_data.complete_multipart_upload_completed && copy_object->synced_data.complete_multipart_upload_error_code == AWS_ERROR_SUCCESS) { goto no_work_remaining; } /* If we made it here, and the abort-multipart-upload message hasn't been sent yet, then do so now. */ if (!copy_object->synced_data.abort_multipart_upload_sent) { if (copy_object->upload_id == NULL) { goto no_work_remaining; } request = aws_s3_request_new( meta_request, AWS_S3_COPY_OBJECT_REQUEST_TAG_ABORT_MULTIPART_UPLOAD, AWS_S3_REQUEST_TYPE_ABORT_MULTIPART_UPLOAD, 0 /*part_number*/, AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS | AWS_S3_REQUEST_FLAG_ALWAYS_SEND); copy_object->synced_data.abort_multipart_upload_sent = true; goto has_work_remaining; } /* Wait for the multipart upload to be completed. */ if (!copy_object->synced_data.abort_multipart_upload_completed) { goto has_work_remaining; } goto no_work_remaining; } has_work_remaining: work_remaining = true; no_work_remaining: /* If some events are still being delivered to caller, then wait for those to finish */ if (!work_remaining && aws_s3_meta_request_are_events_out_for_delivery_synced(meta_request)) { work_remaining = true; } if (!work_remaining) { aws_s3_meta_request_set_success_synced(meta_request, AWS_HTTP_STATUS_CODE_200_OK); } aws_s3_meta_request_unlock_synced_data(meta_request); if (work_remaining) { *out_request = request; } else { AWS_ASSERT(request == NULL); aws_s3_meta_request_finish(meta_request); } return work_remaining; } /* Given a request, prepare it for sending based on its description. */ static struct aws_future_void *s_s3_copy_object_prepare_request(struct aws_s3_request *request) { struct aws_s3_meta_request *meta_request = request->meta_request; AWS_PRECONDITION(meta_request); struct aws_s3_copy_object *copy_object = meta_request->impl; AWS_PRECONDITION(copy_object); aws_s3_meta_request_lock_synced_data(meta_request); struct aws_http_message *message = NULL; bool success = false; switch (request->request_tag) { /* Prepares the GetObject HEAD request to get the source object size. */ case AWS_S3_COPY_OBJECT_REQUEST_TAG_GET_OBJECT_SIZE: { message = aws_s3_get_source_object_size_message_new( meta_request->allocator, meta_request->initial_request_message); break; } /* The S3 object is not large enough for multi-part copy. Bypasses a copy of the original CopyObject request to * S3. */ case AWS_S3_COPY_OBJECT_REQUEST_TAG_BYPASS: { message = aws_s3_message_util_copy_http_message_no_body_all_headers( meta_request->allocator, meta_request->initial_request_message); break; } /* Prepares the CreateMultipartUpload sub-request. */ case AWS_S3_COPY_OBJECT_REQUEST_TAG_CREATE_MULTIPART_UPLOAD: { uint64_t part_size_uint64 = copy_object->synced_data.content_length / (uint64_t)g_s3_max_num_upload_parts; if (part_size_uint64 > SIZE_MAX) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "Could not create multipart copy meta request; required part size of %" PRIu64 " bytes is too large for platform.", part_size_uint64); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); goto finish; } uint64_t max_part_size = GB_TO_BYTES((uint64_t)5); if (max_part_size > SIZE_MAX) { max_part_size = SIZE_MAX; } uint32_t num_parts = 0; size_t part_size = 0; aws_s3_calculate_optimal_mpu_part_size_and_num_parts( copy_object->synced_data.content_length, s_min_copy_part_size, max_part_size, &part_size, &num_parts); copy_object->synced_data.total_num_parts = num_parts; copy_object->synced_data.part_size = part_size; /* Fill part_list */ aws_array_list_ensure_capacity(©_object->synced_data.part_list, num_parts); while (aws_array_list_length(©_object->synced_data.part_list) < num_parts) { struct aws_s3_mpu_part_info *part = aws_mem_calloc(meta_request->allocator, 1, sizeof(struct aws_s3_mpu_part_info)); aws_array_list_push_back(©_object->synced_data.part_list, &part); } AWS_LOGF_DEBUG( AWS_LS_S3_META_REQUEST, "Starting multi-part Copy using part size=%zu, total_num_parts=%zu", part_size, (size_t)num_parts); /* Create the message to create a new multipart upload. */ message = aws_s3_create_multipart_upload_message_new( meta_request->allocator, meta_request->initial_request_message, meta_request->checksum_config.checksum_algorithm); break; } /* Prepares the UploadPartCopy sub-request. */ case AWS_S3_COPY_OBJECT_REQUEST_TAG_MULTIPART_COPY: { /* Create a new uploadPartCopy message to upload a part. */ /* compute sub-request range */ /* note that range-end is inclusive */ uint64_t range_start = (request->part_number - 1) * copy_object->synced_data.part_size; uint64_t range_end = range_start + copy_object->synced_data.part_size - 1; if (range_end >= copy_object->synced_data.content_length) { /* adjust size of last part */ range_end = copy_object->synced_data.content_length - 1; } AWS_LOGF_DEBUG( AWS_LS_S3_META_REQUEST, "Starting UploadPartCopy for partition %" PRIu32 ", range_start=%" PRIu64 ", range_end=%" PRIu64 ", full object length=%" PRIu64, request->part_number, range_start, range_end, copy_object->synced_data.content_length); message = aws_s3_upload_part_copy_message_new( meta_request->allocator, meta_request->initial_request_message, &request->request_body, request->part_number, range_start, range_end, copy_object->upload_id, meta_request->should_compute_content_md5); break; } /* Prepares the CompleteMultiPartUpload sub-request. */ case AWS_S3_COPY_OBJECT_REQUEST_TAG_COMPLETE_MULTIPART_UPLOAD: { if (request->num_times_prepared == 0) { aws_byte_buf_init( &request->request_body, meta_request->allocator, s_complete_multipart_upload_init_body_size_bytes); } else { aws_byte_buf_reset(&request->request_body, false); } AWS_FATAL_ASSERT(copy_object->upload_id); AWS_ASSERT(request->request_body.capacity > 0); aws_byte_buf_reset(&request->request_body, false); /* Build the message to complete our multipart upload, which includes a payload describing all of our * completed parts. */ message = aws_s3_complete_multipart_message_new( meta_request->allocator, meta_request->initial_request_message, &request->request_body, copy_object->upload_id, ©_object->synced_data.part_list, AWS_SCA_NONE); break; } /* Prepares the AbortMultiPartUpload sub-request. */ case AWS_S3_COPY_OBJECT_REQUEST_TAG_ABORT_MULTIPART_UPLOAD: { AWS_FATAL_ASSERT(copy_object->upload_id); AWS_LOGF_DEBUG( AWS_LS_S3_META_REQUEST, "id=%p Abort multipart upload request for upload id %s.", (void *)meta_request, aws_string_c_str(copy_object->upload_id)); if (request->num_times_prepared == 0) { aws_byte_buf_init( &request->request_body, meta_request->allocator, s_abort_multipart_upload_init_body_size_bytes); } else { aws_byte_buf_reset(&request->request_body, false); } /* Build the message to abort our multipart upload */ message = aws_s3_abort_multipart_upload_message_new( meta_request->allocator, meta_request->initial_request_message, copy_object->upload_id); break; } } aws_s3_meta_request_unlock_synced_data(meta_request); if (message == NULL) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "id=%p Could not allocate message for request with tag %d for CopyObject meta request.", (void *)meta_request, request->request_tag); goto finish; } aws_s3_request_setup_send_data(request, message); aws_http_message_release(message); /* Success! */ AWS_LOGF_DEBUG( AWS_LS_S3_META_REQUEST, "id=%p: Prepared request %p for part %d", (void *)meta_request, (void *)request, request->part_number); success = true; finish:; struct aws_future_void *future = aws_future_void_new(meta_request->allocator); if (success) { aws_future_void_set_result(future); } else { aws_future_void_set_error(future, aws_last_error_or_unknown()); } return future; } /* For UploadPartCopy requests, etag is sent in the request body, within XML entity quotes */ static struct aws_string *s_etag_new_from_upload_part_copy_response( struct aws_allocator *allocator, struct aws_byte_buf *response_body) { struct aws_byte_cursor xml_doc = aws_byte_cursor_from_buf(response_body); struct aws_byte_cursor etag_within_xml_quotes = {0}; const char *xml_path[] = {"CopyPartResult", "ETag", NULL}; aws_xml_get_body_at_path(allocator, xml_doc, xml_path, &etag_within_xml_quotes); struct aws_byte_buf etag_within_quotes_byte_buf = aws_replace_quote_entities(allocator, etag_within_xml_quotes); struct aws_string *stripped_etag = aws_strip_quotes(allocator, aws_byte_cursor_from_buf(&etag_within_quotes_byte_buf)); aws_byte_buf_clean_up(&etag_within_quotes_byte_buf); return stripped_etag; } static void s_s3_copy_object_request_finished( struct aws_s3_meta_request *meta_request, struct aws_s3_request *request, int error_code) { AWS_PRECONDITION(meta_request); AWS_PRECONDITION(meta_request->impl); AWS_PRECONDITION(request); struct aws_s3_copy_object *copy_object = meta_request->impl; aws_s3_meta_request_lock_synced_data(meta_request); switch (request->request_tag) { case AWS_S3_COPY_OBJECT_REQUEST_TAG_GET_OBJECT_SIZE: { if (error_code == AWS_ERROR_SUCCESS) { struct aws_byte_cursor content_length_cursor; if (!aws_http_headers_get( request->send_data.response_headers, g_content_length_header_name, &content_length_cursor)) { if (!aws_byte_cursor_utf8_parse_u64( content_length_cursor, ©_object->synced_data.content_length)) { copy_object->synced_data.head_object_completed = true; } else { /* HEAD request returned an invalid content-length */ aws_s3_meta_request_set_fail_synced( meta_request, request, AWS_ERROR_S3_INVALID_CONTENT_LENGTH_HEADER); } } else { /* HEAD request didn't return content-length header */ aws_s3_meta_request_set_fail_synced( meta_request, request, AWS_ERROR_S3_INVALID_CONTENT_LENGTH_HEADER); } } else { aws_s3_meta_request_set_fail_synced(meta_request, request, error_code); } break; } /* The S3 object is not large enough for multi-part copy. A copy of the original CopyObject request * was bypassed to S3 and is now finished. */ case AWS_S3_COPY_OBJECT_REQUEST_TAG_BYPASS: { /* Invoke headers callback if it was requested for this meta request */ if (meta_request->headers_callback != NULL) { struct aws_http_headers *final_response_headers = aws_http_headers_new(meta_request->allocator); /* Copy all the response headers from this request. */ copy_http_headers(request->send_data.response_headers, final_response_headers); /* Invoke the callback without lock */ aws_s3_meta_request_unlock_synced_data(meta_request); /* Notify the user of the headers. */ if (meta_request->headers_callback( meta_request, final_response_headers, request->send_data.response_status, meta_request->user_data)) { error_code = aws_last_error_or_unknown(); } meta_request->headers_callback = NULL; /* Grab the lock again after the callback */ aws_s3_meta_request_lock_synced_data(meta_request); aws_http_headers_release(final_response_headers); } /* Signals completion of the meta request */ if (error_code == AWS_ERROR_SUCCESS) { /* Send progress_callback for delivery on io_event_loop thread */ if (meta_request->progress_callback != NULL) { struct aws_s3_meta_request_event event = {.type = AWS_S3_META_REQUEST_EVENT_PROGRESS}; event.u.progress.info.bytes_transferred = copy_object->synced_data.content_length; event.u.progress.info.content_length = copy_object->synced_data.content_length; aws_s3_meta_request_add_event_for_delivery_synced(meta_request, &event); } copy_object->synced_data.copy_request_bypass_completed = true; } else { /* Bypassed CopyObject request failed */ aws_s3_meta_request_set_fail_synced(meta_request, request, error_code); } break; } case AWS_S3_COPY_OBJECT_REQUEST_TAG_CREATE_MULTIPART_UPLOAD: { struct aws_http_headers *needed_response_headers = NULL; if (error_code == AWS_ERROR_SUCCESS) { needed_response_headers = aws_http_headers_new(meta_request->allocator); const size_t copy_header_count = AWS_ARRAY_SIZE(s_create_multipart_upload_copy_headers); /* Copy any headers now that we'll need for the final, transformed headers later. */ for (size_t header_index = 0; header_index < copy_header_count; ++header_index) { const struct aws_byte_cursor *header_name = &s_create_multipart_upload_copy_headers[header_index]; struct aws_byte_cursor header_value; AWS_ZERO_STRUCT(header_value); if (!aws_http_headers_get(request->send_data.response_headers, *header_name, &header_value)) { aws_http_headers_set(needed_response_headers, *header_name, header_value); } } struct aws_byte_cursor xml_doc = aws_byte_cursor_from_buf(&request->send_data.response_body); /* Find the upload id for this multipart upload. */ struct aws_byte_cursor upload_id = {0}; const char *xml_path[] = {"InitiateMultipartUploadResult", "UploadId", NULL}; aws_xml_get_body_at_path(meta_request->allocator, xml_doc, xml_path, &upload_id); if (upload_id.len == 0) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "id=%p Could not find upload-id in create-multipart-upload response", (void *)meta_request); aws_raise_error(AWS_ERROR_S3_MISSING_UPLOAD_ID); error_code = AWS_ERROR_S3_MISSING_UPLOAD_ID; } else { /* Store the multipart upload id. */ copy_object->upload_id = aws_string_new_from_cursor(meta_request->allocator, &upload_id); } } AWS_ASSERT(copy_object->synced_data.needed_response_headers == NULL); copy_object->synced_data.needed_response_headers = needed_response_headers; copy_object->synced_data.create_multipart_upload_completed = true; copy_object->synced_data.create_multipart_upload_error_code = error_code; if (error_code != AWS_ERROR_SUCCESS) { aws_s3_meta_request_set_fail_synced(meta_request, request, error_code); } break; } case AWS_S3_COPY_OBJECT_REQUEST_TAG_MULTIPART_COPY: { size_t part_number = request->part_number; AWS_FATAL_ASSERT(part_number > 0); size_t part_index = part_number - 1; ++copy_object->synced_data.num_parts_completed; AWS_LOGF_DEBUG( AWS_LS_S3_META_REQUEST, "id=%p: %d out of %d parts have completed.", (void *)meta_request, copy_object->synced_data.num_parts_completed, copy_object->synced_data.total_num_parts); if (error_code == AWS_ERROR_SUCCESS) { struct aws_string *etag = s_etag_new_from_upload_part_copy_response( meta_request->allocator, &request->send_data.response_body); AWS_ASSERT(etag != NULL); ++copy_object->synced_data.num_parts_successful; /* Send progress_callback for delivery on io_event_loop thread. */ if (meta_request->progress_callback != NULL) { struct aws_s3_meta_request_event event = {.type = AWS_S3_META_REQUEST_EVENT_PROGRESS}; event.u.progress.info.bytes_transferred = copy_object->synced_data.part_size; event.u.progress.info.content_length = copy_object->synced_data.content_length; aws_s3_meta_request_add_event_for_delivery_synced(meta_request, &event); } struct aws_s3_mpu_part_info *part = NULL; aws_array_list_get_at(©_object->synced_data.part_list, &part, part_index); AWS_ASSERT(part != NULL); part->etag = etag; } else { ++copy_object->synced_data.num_parts_failed; aws_s3_meta_request_set_fail_synced(meta_request, request, error_code); } break; } case AWS_S3_COPY_OBJECT_REQUEST_TAG_COMPLETE_MULTIPART_UPLOAD: { if (error_code == AWS_ERROR_SUCCESS && meta_request->headers_callback != NULL) { struct aws_http_headers *final_response_headers = aws_http_headers_new(meta_request->allocator); /* Copy all the response headers from this request. */ copy_http_headers(request->send_data.response_headers, final_response_headers); /* Copy over any response headers that we've previously determined are needed for this final * response. */ copy_http_headers(copy_object->synced_data.needed_response_headers, final_response_headers); struct aws_byte_cursor xml_doc = aws_byte_cursor_from_buf(&request->send_data.response_body); /* Grab the ETag for the entire object, and set it as a header. */ struct aws_byte_cursor etag_header_value = {0}; const char *xml_path[] = {"CompleteMultipartUploadResult", "ETag", NULL}; aws_xml_get_body_at_path(meta_request->allocator, xml_doc, xml_path, &etag_header_value); if (etag_header_value.len > 0) { struct aws_byte_buf etag_header_value_byte_buf = aws_replace_quote_entities(meta_request->allocator, etag_header_value); aws_http_headers_set( final_response_headers, g_etag_header_name, aws_byte_cursor_from_buf(&etag_header_value_byte_buf)); aws_byte_buf_clean_up(&etag_header_value_byte_buf); } /* Notify the user of the headers. */ /* Invoke the callback without lock */ aws_s3_meta_request_unlock_synced_data(meta_request); if (meta_request->headers_callback( meta_request, final_response_headers, request->send_data.response_status, meta_request->user_data)) { error_code = aws_last_error_or_unknown(); } meta_request->headers_callback = NULL; /* Grab the lock again after the callback */ aws_s3_meta_request_lock_synced_data(meta_request); aws_http_headers_release(final_response_headers); } copy_object->synced_data.complete_multipart_upload_completed = true; copy_object->synced_data.complete_multipart_upload_error_code = error_code; if (error_code != AWS_ERROR_SUCCESS) { aws_s3_meta_request_set_fail_synced(meta_request, request, error_code); } break; } case AWS_S3_COPY_OBJECT_REQUEST_TAG_ABORT_MULTIPART_UPLOAD: { copy_object->synced_data.abort_multipart_upload_error_code = error_code; copy_object->synced_data.abort_multipart_upload_completed = true; break; } } aws_s3_request_finish_up_metrics_synced(request, meta_request); aws_s3_meta_request_unlock_synced_data(meta_request); } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/source/s3_default_meta_request.c000066400000000000000000000422111456575232400260300ustar00rootroot00000000000000#include "aws/s3/private/s3_default_meta_request.h" #include "aws/s3/private/s3_client_impl.h" #include "aws/s3/private/s3_meta_request_impl.h" #include "aws/s3/private/s3_request_messages.h" #include "aws/s3/private/s3_util.h" #include #include /* Data for aws_s3_meta_request_default's vtable->prepare_request() job */ struct aws_s3_default_prepare_request_job { struct aws_allocator *allocator; struct aws_s3_request *request; /* async step: read request body */ struct aws_future_bool *step1_read_body; /* future to set when this whole job completes */ struct aws_future_void *on_complete; }; static void s_s3_meta_request_default_destroy(struct aws_s3_meta_request *meta_request); static bool s_s3_meta_request_default_update( struct aws_s3_meta_request *meta_request, uint32_t flags, struct aws_s3_request **out_request); static struct aws_future_void *s_s3_default_prepare_request(struct aws_s3_request *request); static void s_s3_default_prepare_request_on_read_done(void *user_data); static void s_s3_default_prepare_request_finish( struct aws_s3_default_prepare_request_job *request_prep, int error_code); static void s_s3_meta_request_default_request_finished( struct aws_s3_meta_request *meta_request, struct aws_s3_request *request, int error_code); static struct aws_s3_meta_request_vtable s_s3_meta_request_default_vtable = { .update = s_s3_meta_request_default_update, .send_request_finish = aws_s3_meta_request_send_request_finish_default, .prepare_request = s_s3_default_prepare_request, .init_signing_date_time = aws_s3_meta_request_init_signing_date_time_default, .sign_request = aws_s3_meta_request_sign_request_default, .finished_request = s_s3_meta_request_default_request_finished, .destroy = s_s3_meta_request_default_destroy, .finish = aws_s3_meta_request_finish_default, }; /* Allocate a new default meta request. */ struct aws_s3_meta_request *aws_s3_meta_request_default_new( struct aws_allocator *allocator, struct aws_s3_client *client, enum aws_s3_request_type request_type, uint64_t content_length, bool should_compute_content_md5, const struct aws_s3_meta_request_options *options) { AWS_PRECONDITION(allocator); AWS_PRECONDITION(client); AWS_PRECONDITION(options); AWS_PRECONDITION(options->message); struct aws_byte_cursor request_method; if (aws_http_message_get_request_method(options->message, &request_method)) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "Could not create Default Meta Request; could not get request method from message."); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } if (content_length > SIZE_MAX) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "Could not create Default Meta Request; content length of %" PRIu64 " bytes is too large for platform.", content_length); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } struct aws_s3_meta_request_default *meta_request_default = aws_mem_calloc(allocator, 1, sizeof(struct aws_s3_meta_request_default)); /* Try to initialize the base type. */ if (aws_s3_meta_request_init_base( allocator, client, 0, should_compute_content_md5, options, meta_request_default, &s_s3_meta_request_default_vtable, &meta_request_default->base)) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "id=%p Could not initialize base type for Default Meta Request.", (void *)meta_request_default); aws_mem_release(allocator, meta_request_default); return NULL; } meta_request_default->content_length = (size_t)content_length; meta_request_default->request_type = request_type; /* Try to get operation name. * When internal aws-c-s3 code creates a default meta-request, * a valid request_type is always passed in, and we can get its operation name. * When external users create a default meta-request, they may have provided * operation name in the options. */ const char *operation_name = aws_s3_request_type_operation_name(request_type); if (operation_name[0] != '\0') { meta_request_default->operation_name = aws_string_new_from_c_str(allocator, operation_name); } else if (options->operation_name.len != 0) { meta_request_default->operation_name = aws_string_new_from_cursor(allocator, &options->operation_name); } AWS_LOGF_DEBUG( AWS_LS_S3_META_REQUEST, "id=%p Created new Default Meta Request. operation=%s", (void *)meta_request_default, meta_request_default->operation_name ? aws_string_c_str(meta_request_default->operation_name) : "?"); return &meta_request_default->base; } static void s_s3_meta_request_default_destroy(struct aws_s3_meta_request *meta_request) { AWS_PRECONDITION(meta_request); AWS_PRECONDITION(meta_request->impl); struct aws_s3_meta_request_default *meta_request_default = meta_request->impl; aws_string_destroy(meta_request_default->operation_name); aws_mem_release(meta_request->allocator, meta_request_default); } /* Try to get the next request that should be processed. */ static bool s_s3_meta_request_default_update( struct aws_s3_meta_request *meta_request, uint32_t flags, struct aws_s3_request **out_request) { (void)flags; AWS_PRECONDITION(meta_request); AWS_PRECONDITION(out_request); struct aws_s3_meta_request_default *meta_request_default = meta_request->impl; struct aws_s3_request *request = NULL; bool work_remaining = false; /* BEGIN CRITICAL SECTION */ { aws_s3_meta_request_lock_synced_data(meta_request); if (!aws_s3_meta_request_has_finish_result_synced(meta_request)) { /* If the request hasn't been sent, then create and send it now. */ if (!meta_request_default->synced_data.request_sent) { if (out_request == NULL) { goto has_work_remaining; } request = aws_s3_request_new( meta_request, 0 /*request_tag*/, meta_request_default->request_type, 1 /*part_number*/, AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS); /* Default meta-request might know operation name, despite not knowing valid request_type. * If so, pass the name along. */ if (request->operation_name == NULL && meta_request_default->operation_name != NULL) { request->operation_name = aws_string_new_from_string(meta_request->allocator, meta_request_default->operation_name); } AWS_LOGF_DEBUG( AWS_LS_S3_META_REQUEST, "id=%p: Meta Request Default created request %p", (void *)meta_request, (void *)request); meta_request_default->synced_data.request_sent = true; goto has_work_remaining; } /* If the request hasn't been completed, then wait for it to be completed. */ if (!meta_request_default->synced_data.request_completed) { goto has_work_remaining; } /* If delivery hasn't been attempted yet for the response body, wait for that to happen. */ if (meta_request->synced_data.num_parts_delivery_completed < 1) { goto has_work_remaining; } goto no_work_remaining; } else { /* If we are canceling, and the request hasn't been sent yet, then there is nothing to wait for. */ if (!meta_request_default->synced_data.request_sent) { goto no_work_remaining; } /* If the request hasn't been completed yet, then wait for that to happen. */ if (!meta_request_default->synced_data.request_completed) { goto has_work_remaining; } /* If some parts are still being delivered to the caller, then wait for those to finish. */ if (meta_request->synced_data.num_parts_delivery_completed < meta_request->synced_data.num_parts_delivery_sent) { goto has_work_remaining; } goto no_work_remaining; } has_work_remaining: work_remaining = true; no_work_remaining: /* If some events are still being delivered to caller, then wait for those to finish */ if (!work_remaining && aws_s3_meta_request_are_events_out_for_delivery_synced(meta_request)) { work_remaining = true; } if (!work_remaining) { aws_s3_meta_request_set_success_synced( meta_request, meta_request_default->synced_data.cached_response_status); } aws_s3_meta_request_unlock_synced_data(meta_request); } /* END CRITICAL SECTION */ if (work_remaining) { if (request != NULL) { AWS_ASSERT(out_request != NULL); *out_request = request; } } else { AWS_ASSERT(request == NULL); aws_s3_meta_request_finish(meta_request); } return work_remaining; } /* Given a request, prepare it for sending based on its description. */ static struct aws_future_void *s_s3_default_prepare_request(struct aws_s3_request *request) { AWS_PRECONDITION(request); struct aws_s3_meta_request *meta_request = request->meta_request; AWS_PRECONDITION(meta_request); struct aws_s3_meta_request_default *meta_request_default = meta_request->impl; AWS_PRECONDITION(meta_request_default); struct aws_future_void *asyncstep_prepare_request = aws_future_void_new(request->allocator); /* Store data for async job */ struct aws_s3_default_prepare_request_job *request_prep = aws_mem_calloc(request->allocator, 1, sizeof(struct aws_s3_default_prepare_request_job)); request_prep->allocator = request->allocator; request_prep->request = request; request_prep->on_complete = aws_future_void_acquire(asyncstep_prepare_request); if (meta_request_default->content_length > 0 && request->num_times_prepared == 0) { aws_byte_buf_init(&request->request_body, meta_request->allocator, meta_request_default->content_length); /* Kick off the async read */ request_prep->step1_read_body = aws_s3_meta_request_read_body(meta_request, 0 /*offset*/, &request->request_body); aws_future_bool_register_callback( request_prep->step1_read_body, s_s3_default_prepare_request_on_read_done, request_prep); } else { /* Don't need to read body, jump directly to the last step */ s_s3_default_prepare_request_finish(request_prep, AWS_ERROR_SUCCESS); } return asyncstep_prepare_request; } /* Completion callback for reading the body stream */ static void s_s3_default_prepare_request_on_read_done(void *user_data) { struct aws_s3_default_prepare_request_job *request_prep = user_data; struct aws_s3_request *request = request_prep->request; struct aws_s3_meta_request *meta_request = request->meta_request; int error_code = aws_future_bool_get_error(request_prep->step1_read_body); if (error_code != AWS_OP_SUCCESS) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "id=%p: Failed reading request body, error %d (%s)", (void *)meta_request, error_code, aws_error_str(error_code)); goto finish; } if (request->request_body.len < request->request_body.capacity) { error_code = AWS_ERROR_S3_INCORRECT_CONTENT_LENGTH; AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "id=%p: Request body is smaller than 'Content-Length' header said it would be", (void *)meta_request); goto finish; } finish: s_s3_default_prepare_request_finish(request_prep, error_code); } /* Finish async preparation of the request */ static void s_s3_default_prepare_request_finish( struct aws_s3_default_prepare_request_job *request_prep, int error_code) { struct aws_s3_request *request = request_prep->request; struct aws_s3_meta_request *meta_request = request->meta_request; if (error_code != AWS_ERROR_SUCCESS) { goto finish; } struct aws_http_message *message = aws_s3_message_util_copy_http_message_no_body_all_headers( meta_request->allocator, meta_request->initial_request_message); bool flexible_checksum = meta_request->checksum_config.location != AWS_SCL_NONE; if (!flexible_checksum && meta_request->should_compute_content_md5) { /* If flexible checksum used, client MUST skip Content-MD5 header computation */ aws_s3_message_util_add_content_md5_header(meta_request->allocator, &request->request_body, message); } if (meta_request->checksum_config.validate_response_checksum) { struct aws_http_headers *headers = aws_http_message_get_headers(message); aws_http_headers_set(headers, g_request_validation_mode, g_enabled); } aws_s3_message_util_assign_body( meta_request->allocator, &request->request_body, message, &meta_request->checksum_config, NULL /* out_checksum */); aws_s3_request_setup_send_data(request, message); aws_http_message_release(message); AWS_LOGF_DEBUG( AWS_LS_S3_META_REQUEST, "id=%p: Meta Request prepared request %p", (void *)meta_request, (void *)request); finish: if (error_code == AWS_ERROR_SUCCESS) { aws_future_void_set_result(request_prep->on_complete); } else { aws_future_void_set_error(request_prep->on_complete, error_code); } aws_future_bool_release(request_prep->step1_read_body); aws_future_void_release(request_prep->on_complete); aws_mem_release(request_prep->allocator, request_prep); } static void s_s3_meta_request_default_request_finished( struct aws_s3_meta_request *meta_request, struct aws_s3_request *request, int error_code) { AWS_PRECONDITION(meta_request); AWS_PRECONDITION(meta_request->impl); AWS_PRECONDITION(request); struct aws_s3_meta_request_default *meta_request_default = meta_request->impl; AWS_PRECONDITION(meta_request_default); if (error_code == AWS_ERROR_SUCCESS && meta_request->headers_callback != NULL && request->send_data.response_headers != NULL) { if (meta_request->headers_callback( meta_request, request->send_data.response_headers, request->send_data.response_status, meta_request->user_data)) { error_code = aws_last_error_or_unknown(); } meta_request->headers_callback = NULL; } /* BEGIN CRITICAL SECTION */ { aws_s3_meta_request_lock_synced_data(meta_request); meta_request_default->synced_data.cached_response_status = request->send_data.response_status; meta_request_default->synced_data.request_completed = true; meta_request_default->synced_data.request_error_code = error_code; bool finishing_metrics = true; if (error_code == AWS_ERROR_SUCCESS) { /* Send progress_callback for delivery on io_event_loop thread. * For default meta-requests, we invoke the progress_callback once, after the sole HTTP request completes. * This is simpler than reporting incremental progress as the response body is received, * or the request body is streamed out, since then we'd also need to handle retries that reset * progress back to 0% (our existing API only lets us report forward progress). */ if (meta_request->progress_callback != NULL) { struct aws_s3_meta_request_event event = {.type = AWS_S3_META_REQUEST_EVENT_PROGRESS}; if (meta_request->type == AWS_S3_META_REQUEST_TYPE_PUT_OBJECT) { /* For uploads, report request body size */ event.u.progress.info.bytes_transferred = request->request_body.len; event.u.progress.info.content_length = request->request_body.len; } else { /* For anything else, report response body size */ event.u.progress.info.bytes_transferred = request->send_data.response_body.len; event.u.progress.info.content_length = request->send_data.response_body.len; } aws_s3_meta_request_add_event_for_delivery_synced(meta_request, &event); } aws_s3_meta_request_stream_response_body_synced(meta_request, request); /* The body of the request is queued to be streamed, don't record the end timestamp for the request * yet. */ finishing_metrics = false; } else { aws_s3_meta_request_set_fail_synced(meta_request, request, error_code); } if (finishing_metrics) { aws_s3_request_finish_up_metrics_synced(request, meta_request); } aws_s3_meta_request_unlock_synced_data(meta_request); } /* END CRITICAL SECTION */ } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/source/s3_endpoint.c000066400000000000000000000272251456575232400234560ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "aws/s3/private/s3_client_impl.h" #include "aws/s3/private/s3_meta_request_impl.h" #include "aws/s3/private/s3_util.h" #include #include #include #include #include #include #include #include #include #include #include #include #include static const uint32_t s_connection_timeout_ms = 3000; static const uint32_t s_http_port = 80; static const uint32_t s_https_port = 443; static void s_s3_endpoint_on_host_resolver_address_resolved( struct aws_host_resolver *resolver, const struct aws_string *host_name, int err_code, const struct aws_array_list *host_addresses, void *user_data); static struct aws_http_connection_manager *s_s3_endpoint_create_http_connection_manager( struct aws_s3_endpoint *endpoint, const struct aws_string *host_name, struct aws_client_bootstrap *client_bootstrap, const struct aws_tls_connection_options *tls_connection_options, uint32_t max_connections, uint32_t port, const struct aws_http_proxy_config *proxy_config, const struct proxy_env_var_settings *proxy_ev_settings, uint32_t connect_timeout_ms, const struct aws_s3_tcp_keep_alive_options *tcp_keep_alive_options, const struct aws_http_connection_monitoring_options *monitoring_options); static void s_s3_endpoint_http_connection_manager_shutdown_callback(void *user_data); static void s_s3_endpoint_ref_count_zero(struct aws_s3_endpoint *endpoint); static void s_s3_endpoint_acquire(struct aws_s3_endpoint *endpoint, bool already_holding_lock); static void s_s3_endpoint_release(struct aws_s3_endpoint *endpoint); static const struct aws_s3_endpoint_system_vtable s_s3_endpoint_default_system_vtable = { .acquire = s_s3_endpoint_acquire, .release = s_s3_endpoint_release, }; static const struct aws_s3_endpoint_system_vtable *s_s3_endpoint_system_vtable = &s_s3_endpoint_default_system_vtable; void aws_s3_endpoint_set_system_vtable(const struct aws_s3_endpoint_system_vtable *vtable) { s_s3_endpoint_system_vtable = vtable; } struct aws_s3_endpoint *aws_s3_endpoint_new( struct aws_allocator *allocator, const struct aws_s3_endpoint_options *options) { AWS_PRECONDITION(allocator); AWS_PRECONDITION(options); AWS_PRECONDITION(options->host_name); struct aws_s3_endpoint *endpoint = aws_mem_calloc(allocator, 1, sizeof(struct aws_s3_endpoint)); endpoint->client_synced_data.ref_count = 1; endpoint->allocator = allocator; endpoint->host_name = options->host_name; struct aws_host_resolution_config host_resolver_config; AWS_ZERO_STRUCT(host_resolver_config); host_resolver_config.impl = aws_default_dns_resolve; host_resolver_config.max_ttl = options->dns_host_address_ttl_seconds; host_resolver_config.impl_data = NULL; if (aws_host_resolver_resolve_host( options->client_bootstrap->host_resolver, endpoint->host_name, s_s3_endpoint_on_host_resolver_address_resolved, &host_resolver_config, NULL)) { AWS_LOGF_ERROR( AWS_LS_S3_ENDPOINT, "id=%p: Error trying to resolve host for endpoint %s", (void *)endpoint, (const char *)endpoint->host_name->bytes); goto error_cleanup; } endpoint->http_connection_manager = s_s3_endpoint_create_http_connection_manager( endpoint, options->host_name, options->client_bootstrap, options->tls_connection_options, options->max_connections, options->port, options->proxy_config, options->proxy_ev_settings, options->connect_timeout_ms, options->tcp_keep_alive_options, options->monitoring_options); if (endpoint->http_connection_manager == NULL) { goto error_cleanup; } endpoint->client = options->client; return endpoint; error_cleanup: aws_string_destroy(options->host_name); aws_mem_release(allocator, endpoint); return NULL; } static struct aws_http_connection_manager *s_s3_endpoint_create_http_connection_manager( struct aws_s3_endpoint *endpoint, const struct aws_string *host_name, struct aws_client_bootstrap *client_bootstrap, const struct aws_tls_connection_options *tls_connection_options, uint32_t max_connections, uint32_t port, const struct aws_http_proxy_config *proxy_config, const struct proxy_env_var_settings *proxy_ev_settings, uint32_t connect_timeout_ms, const struct aws_s3_tcp_keep_alive_options *tcp_keep_alive_options, const struct aws_http_connection_monitoring_options *monitoring_options) { AWS_PRECONDITION(endpoint); AWS_PRECONDITION(client_bootstrap); AWS_PRECONDITION(host_name); struct aws_byte_cursor host_name_cursor = aws_byte_cursor_from_string(host_name); /* Try to set up an HTTP connection manager. */ struct aws_socket_options socket_options; AWS_ZERO_STRUCT(socket_options); socket_options.type = AWS_SOCKET_STREAM; socket_options.domain = AWS_SOCKET_IPV4; socket_options.connect_timeout_ms = connect_timeout_ms == 0 ? s_connection_timeout_ms : connect_timeout_ms; if (tcp_keep_alive_options != NULL) { socket_options.keepalive = true; socket_options.keep_alive_interval_sec = tcp_keep_alive_options->keep_alive_interval_sec; socket_options.keep_alive_timeout_sec = tcp_keep_alive_options->keep_alive_timeout_sec; socket_options.keep_alive_max_failed_probes = tcp_keep_alive_options->keep_alive_max_failed_probes; } struct proxy_env_var_settings proxy_ev_settings_default; /* Turn on environment variable for proxy by default */ if (proxy_ev_settings == NULL) { AWS_ZERO_STRUCT(proxy_ev_settings_default); proxy_ev_settings_default.env_var_type = AWS_HPEV_ENABLE; proxy_ev_settings = &proxy_ev_settings_default; } struct aws_http_connection_manager_options manager_options; AWS_ZERO_STRUCT(manager_options); manager_options.bootstrap = client_bootstrap; manager_options.initial_window_size = SIZE_MAX; manager_options.socket_options = &socket_options; manager_options.host = host_name_cursor; manager_options.max_connections = max_connections; manager_options.shutdown_complete_callback = s_s3_endpoint_http_connection_manager_shutdown_callback; manager_options.shutdown_complete_user_data = endpoint; manager_options.proxy_ev_settings = proxy_ev_settings; if (monitoring_options != NULL) { manager_options.monitoring_options = monitoring_options; } struct aws_http_proxy_options proxy_options; if (proxy_config != NULL) { aws_http_proxy_options_init_from_config(&proxy_options, proxy_config); manager_options.proxy_options = &proxy_options; } struct aws_tls_connection_options *manager_tls_options = NULL; if (tls_connection_options != NULL) { manager_tls_options = aws_mem_calloc(endpoint->allocator, 1, sizeof(struct aws_tls_connection_options)); aws_tls_connection_options_copy(manager_tls_options, tls_connection_options); /* TODO fix this in the actual aws_tls_connection_options_set_server_name function. */ if (manager_tls_options->server_name != NULL) { aws_string_destroy(manager_tls_options->server_name); manager_tls_options->server_name = NULL; } aws_tls_connection_options_set_server_name(manager_tls_options, endpoint->allocator, &host_name_cursor); manager_options.tls_connection_options = manager_tls_options; manager_options.port = port == 0 ? s_https_port : port; } else { manager_options.port = port == 0 ? s_http_port : port; } struct aws_http_connection_manager *http_connection_manager = aws_http_connection_manager_new(endpoint->allocator, &manager_options); if (manager_tls_options != NULL) { aws_tls_connection_options_clean_up(manager_tls_options); aws_mem_release(endpoint->allocator, manager_tls_options); manager_tls_options = NULL; } if (http_connection_manager == NULL) { AWS_LOGF_ERROR(AWS_LS_S3_ENDPOINT, "id=%p: Could not create http connection manager.", (void *)endpoint); return NULL; } AWS_LOGF_DEBUG( AWS_LS_S3_ENDPOINT, "id=%p: Created connection manager %p for endpoint", (void *)endpoint, (void *)http_connection_manager); return http_connection_manager; } struct aws_s3_endpoint *aws_s3_endpoint_acquire(struct aws_s3_endpoint *endpoint, bool already_holding_lock) { if (endpoint) { s_s3_endpoint_system_vtable->acquire(endpoint, already_holding_lock); } return endpoint; } static void s_s3_endpoint_acquire(struct aws_s3_endpoint *endpoint, bool already_holding_lock) { AWS_PRECONDITION(endpoint); if (!already_holding_lock) { aws_s3_client_lock_synced_data(endpoint->client); } AWS_ASSERT(endpoint->client_synced_data.ref_count > 0); ++endpoint->client_synced_data.ref_count; if (!already_holding_lock) { aws_s3_client_unlock_synced_data(endpoint->client); } } void aws_s3_endpoint_release(struct aws_s3_endpoint *endpoint) { if (endpoint) { s_s3_endpoint_system_vtable->release(endpoint); } } static void s_s3_endpoint_release(struct aws_s3_endpoint *endpoint) { AWS_PRECONDITION(endpoint); AWS_PRECONDITION(endpoint->client); /* BEGIN CRITICAL SECTION */ aws_s3_client_lock_synced_data(endpoint->client); bool should_destroy = (endpoint->client_synced_data.ref_count == 1); if (should_destroy) { aws_hash_table_remove(&endpoint->client->synced_data.endpoints, endpoint->host_name, NULL, NULL); } else { --endpoint->client_synced_data.ref_count; } aws_s3_client_unlock_synced_data(endpoint->client); /* END CRITICAL SECTION */ if (should_destroy) { /* The endpoint may have async cleanup to do (connection manager). * When that's all done we'll invoke a completion callback. * Since it's a crime to hold a lock while invoking a callback, * we make sure that we've released the client's lock before proceeding... */ s_s3_endpoint_ref_count_zero(endpoint); } } static void s_s3_endpoint_ref_count_zero(struct aws_s3_endpoint *endpoint) { AWS_PRECONDITION(endpoint); AWS_PRECONDITION(endpoint->http_connection_manager); struct aws_http_connection_manager *http_connection_manager = endpoint->http_connection_manager; endpoint->http_connection_manager = NULL; /* Cleanup continues once the manager's shutdown callback is invoked */ aws_http_connection_manager_release(http_connection_manager); } static void s_s3_endpoint_http_connection_manager_shutdown_callback(void *user_data) { struct aws_s3_endpoint *endpoint = user_data; AWS_ASSERT(endpoint); struct aws_s3_client *client = endpoint->client; aws_mem_release(endpoint->allocator, endpoint); client->vtable->endpoint_shutdown_callback(client); } static void s_s3_endpoint_on_host_resolver_address_resolved( struct aws_host_resolver *resolver, const struct aws_string *host_name, int err_code, const struct aws_array_list *host_addresses, void *user_data) { (void)resolver; (void)host_name; (void)err_code; (void)host_addresses; (void)user_data; /* DO NOT add any logic here, unless you also ensure the endpoint lives long enough */ } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/source/s3_endpoint_resolver/000077500000000000000000000000001456575232400252235ustar00rootroot00000000000000aws_s3_endpoint_resolver_partition.c000066400000000000000000000503251456575232400344260ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/source/s3_endpoint_resolver/** * Copyright Amazon.com, Inc. or its affiliates. * All Rights Reserved. SPDX-License-Identifier: Apache-2.0. */ #include "aws/s3/private/s3_endpoint_resolver.h" #include /** * This file is generated using scripts/update_s3_endpoint_resolver_artifacts.py. * Do not modify directly. */ /* clang-format off */ static const char s_generated_array[] = { '{', '"', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'i', 'd', '"', ':', '"', 'a', 'w', 's', '"', ',', '"', 'o', 'u', 't', 'p', 'u', 't', 's', '"', ':', '{', '"', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '"', ':', '"', 'a', 'm', 'a', 'z', 'o', 'n', 'a', 'w', 's', '.', 'c', 'o', 'm', '"', ',', '"', 'd', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', 'D', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '"', ':', '"', 'a', 'p', 'i', '.', 'a', 'w', 's', '"', ',', '"', 'i', 'm', 'p', 'l', 'i', 'c', 'i', 't', 'G', 'l', 'o', 'b', 'a', 'l', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', 'u', 's', '-', 'e', 'a', 's', 't', '-', '1', '"', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 'a', 'w', 's', '"', ',', '"', 's', 'u', 'p', 'p', 'o', 'r', 't', 's', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', ':', 't', 'r', 'u', 'e', ',', '"', 's', 'u', 'p', 'p', 'o', 'r', 't', 's', 'F', 'I', 'P', 'S', '"', ':', 't', 'r', 'u', 'e', '}', ',', '"', 'r', 'e', 'g', 'i', 'o', 'n', 'R', 'e', 'g', 'e', 'x', '"', ':', '"', '^', '(', 'u', 's', '|', 'e', 'u', '|', 'a', 'p', '|', 's', 'a', '|', 'c', 'a', '|', 'm', 'e', '|', 'a', 'f', '|', 'i', 'l', ')', '\\', '\\', '-', '\\', '\\', 'w', '+', '\\', '\\', '-', '\\', '\\', 'd', '+', '$', '"', ',', '"', 'r', 'e', 'g', 'i', 'o', 'n', 's', '"', ':', '{', '"', 'a', 'f', '-', 's', 'o', 'u', 't', 'h', '-', '1', '"', ':', '{', '"', 'd', 'e', 's', 'c', 'r', 'i', 'p', 't', 'i', 'o', 'n', '"', ':', '"', 'A', 'f', 'r', 'i', 'c', 'a', ' ', '(', 'C', 'a', 'p', 'e', ' ', 'T', 'o', 'w', 'n', ')', '"', '}', ',', '"', 'a', 'p', '-', 'e', 'a', 's', 't', '-', '1', '"', ':', '{', '"', 'd', 'e', 's', 'c', 'r', 'i', 'p', 't', 'i', 'o', 'n', '"', ':', '"', 'A', 's', 'i', 'a', ' ', 'P', 'a', 'c', 'i', 'f', 'i', 'c', ' ', '(', 'H', 'o', 'n', 'g', ' ', 'K', 'o', 'n', 'g', ')', '"', '}', ',', '"', 'a', 'p', '-', 'n', 'o', 'r', 't', 'h', 'e', 'a', 's', 't', '-', '1', '"', ':', '{', '"', 'd', 'e', 's', 'c', 'r', 'i', 'p', 't', 'i', 'o', 'n', '"', ':', '"', 'A', 's', 'i', 'a', ' ', 'P', 'a', 'c', 'i', 'f', 'i', 'c', ' ', '(', 'T', 'o', 'k', 'y', 'o', ')', '"', '}', ',', '"', 'a', 'p', '-', 'n', 'o', 'r', 't', 'h', 'e', 'a', 's', 't', '-', '2', '"', ':', '{', '"', 'd', 'e', 's', 'c', 'r', 'i', 'p', 't', 'i', 'o', 'n', '"', ':', '"', 'A', 's', 'i', 'a', ' ', 'P', 'a', 'c', 'i', 'f', 'i', 'c', ' ', '(', 'S', 'e', 'o', 'u', 'l', ')', '"', '}', ',', '"', 'a', 'p', '-', 'n', 'o', 'r', 't', 'h', 'e', 'a', 's', 't', '-', '3', '"', ':', '{', '"', 'd', 'e', 's', 'c', 'r', 'i', 'p', 't', 'i', 'o', 'n', '"', ':', '"', 'A', 's', 'i', 'a', ' ', 'P', 'a', 'c', 'i', 'f', 'i', 'c', ' ', '(', 'O', 's', 'a', 'k', 'a', ')', '"', '}', ',', '"', 'a', 'p', '-', 's', 'o', 'u', 't', 'h', '-', '1', '"', ':', '{', '"', 'd', 'e', 's', 'c', 'r', 'i', 'p', 't', 'i', 'o', 'n', '"', ':', '"', 'A', 's', 'i', 'a', ' ', 'P', 'a', 'c', 'i', 'f', 'i', 'c', ' ', '(', 'M', 'u', 'm', 'b', 'a', 'i', ')', '"', '}', ',', '"', 'a', 'p', '-', 's', 'o', 'u', 't', 'h', '-', '2', '"', ':', '{', '"', 'd', 'e', 's', 'c', 'r', 'i', 'p', 't', 'i', 'o', 'n', '"', ':', '"', 'A', 's', 'i', 'a', ' ', 'P', 'a', 'c', 'i', 'f', 'i', 'c', ' ', '(', 'H', 'y', 'd', 'e', 'r', 'a', 'b', 'a', 'd', ')', '"', '}', ',', '"', 'a', 'p', '-', 's', 'o', 'u', 't', 'h', 'e', 'a', 's', 't', '-', '1', '"', ':', '{', '"', 'd', 'e', 's', 'c', 'r', 'i', 'p', 't', 'i', 'o', 'n', '"', ':', '"', 'A', 's', 'i', 'a', ' ', 'P', 'a', 'c', 'i', 'f', 'i', 'c', ' ', '(', 'S', 'i', 'n', 'g', 'a', 'p', 'o', 'r', 'e', ')', '"', '}', ',', '"', 'a', 'p', '-', 's', 'o', 'u', 't', 'h', 'e', 'a', 's', 't', '-', '2', '"', ':', '{', '"', 'd', 'e', 's', 'c', 'r', 'i', 'p', 't', 'i', 'o', 'n', '"', ':', '"', 'A', 's', 'i', 'a', ' ', 'P', 'a', 'c', 'i', 'f', 'i', 'c', ' ', '(', 'S', 'y', 'd', 'n', 'e', 'y', ')', '"', '}', ',', '"', 'a', 'p', '-', 's', 'o', 'u', 't', 'h', 'e', 'a', 's', 't', '-', '3', '"', ':', '{', '"', 'd', 'e', 's', 'c', 'r', 'i', 'p', 't', 'i', 'o', 'n', '"', ':', '"', 'A', 's', 'i', 'a', ' ', 'P', 'a', 'c', 'i', 'f', 'i', 'c', ' ', '(', 'J', 'a', 'k', 'a', 'r', 't', 'a', ')', '"', '}', ',', '"', 'a', 'p', '-', 's', 'o', 'u', 't', 'h', 'e', 'a', 's', 't', '-', '4', '"', ':', '{', '"', 'd', 'e', 's', 'c', 'r', 'i', 'p', 't', 'i', 'o', 'n', '"', ':', '"', 'A', 's', 'i', 'a', ' ', 'P', 'a', 'c', 'i', 'f', 'i', 'c', ' ', '(', 'M', 'e', 'l', 'b', 'o', 'u', 'r', 'n', 'e', ')', '"', '}', ',', '"', 'a', 'w', 's', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ':', '{', '"', 'd', 'e', 's', 'c', 'r', 'i', 'p', 't', 'i', 'o', 'n', '"', ':', '"', 'A', 'W', 'S', ' ', 'S', 't', 'a', 'n', 'd', 'a', 'r', 'd', ' ', 'g', 'l', 'o', 'b', 'a', 'l', ' ', 'r', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'c', 'a', '-', 'c', 'e', 'n', 't', 'r', 'a', 'l', '-', '1', '"', ':', '{', '"', 'd', 'e', 's', 'c', 'r', 'i', 'p', 't', 'i', 'o', 'n', '"', ':', '"', 'C', 'a', 'n', 'a', 'd', 'a', ' ', '(', 'C', 'e', 'n', 't', 'r', 'a', 'l', ')', '"', '}', ',', '"', 'c', 'a', '-', 'w', 'e', 's', 't', '-', '1', '"', ':', '{', '"', 'd', 'e', 's', 'c', 'r', 'i', 'p', 't', 'i', 'o', 'n', '"', ':', '"', 'C', 'a', 'n', 'a', 'd', 'a', ' ', 'W', 'e', 's', 't', ' ', '(', 'C', 'a', 'l', 'g', 'a', 'r', 'y', ')', '"', '}', ',', '"', 'e', 'u', '-', 'c', 'e', 'n', 't', 'r', 'a', 'l', '-', '1', '"', ':', '{', '"', 'd', 'e', 's', 'c', 'r', 'i', 'p', 't', 'i', 'o', 'n', '"', ':', '"', 'E', 'u', 'r', 'o', 'p', 'e', ' ', '(', 'F', 'r', 'a', 'n', 'k', 'f', 'u', 'r', 't', ')', '"', '}', ',', '"', 'e', 'u', '-', 'c', 'e', 'n', 't', 'r', 'a', 'l', '-', '2', '"', ':', '{', '"', 'd', 'e', 's', 'c', 'r', 'i', 'p', 't', 'i', 'o', 'n', '"', ':', '"', 'E', 'u', 'r', 'o', 'p', 'e', ' ', '(', 'Z', 'u', 'r', 'i', 'c', 'h', ')', '"', '}', ',', '"', 'e', 'u', '-', 'n', 'o', 'r', 't', 'h', '-', '1', '"', ':', '{', '"', 'd', 'e', 's', 'c', 'r', 'i', 'p', 't', 'i', 'o', 'n', '"', ':', '"', 'E', 'u', 'r', 'o', 'p', 'e', ' ', '(', 'S', 't', 'o', 'c', 'k', 'h', 'o', 'l', 'm', ')', '"', '}', ',', '"', 'e', 'u', '-', 's', 'o', 'u', 't', 'h', '-', '1', '"', ':', '{', '"', 'd', 'e', 's', 'c', 'r', 'i', 'p', 't', 'i', 'o', 'n', '"', ':', '"', 'E', 'u', 'r', 'o', 'p', 'e', ' ', '(', 'M', 'i', 'l', 'a', 'n', ')', '"', '}', ',', '"', 'e', 'u', '-', 's', 'o', 'u', 't', 'h', '-', '2', '"', ':', '{', '"', 'd', 'e', 's', 'c', 'r', 'i', 'p', 't', 'i', 'o', 'n', '"', ':', '"', 'E', 'u', 'r', 'o', 'p', 'e', ' ', '(', 'S', 'p', 'a', 'i', 'n', ')', '"', '}', ',', '"', 'e', 'u', '-', 'w', 'e', 's', 't', '-', '1', '"', ':', '{', '"', 'd', 'e', 's', 'c', 'r', 'i', 'p', 't', 'i', 'o', 'n', '"', ':', '"', 'E', 'u', 'r', 'o', 'p', 'e', ' ', '(', 'I', 'r', 'e', 'l', 'a', 'n', 'd', ')', '"', '}', ',', '"', 'e', 'u', '-', 'w', 'e', 's', 't', '-', '2', '"', ':', '{', '"', 'd', 'e', 's', 'c', 'r', 'i', 'p', 't', 'i', 'o', 'n', '"', ':', '"', 'E', 'u', 'r', 'o', 'p', 'e', ' ', '(', 'L', 'o', 'n', 'd', 'o', 'n', ')', '"', '}', ',', '"', 'e', 'u', '-', 'w', 'e', 's', 't', '-', '3', '"', ':', '{', '"', 'd', 'e', 's', 'c', 'r', 'i', 'p', 't', 'i', 'o', 'n', '"', ':', '"', 'E', 'u', 'r', 'o', 'p', 'e', ' ', '(', 'P', 'a', 'r', 'i', 's', ')', '"', '}', ',', '"', 'i', 'l', '-', 'c', 'e', 'n', 't', 'r', 'a', 'l', '-', '1', '"', ':', '{', '"', 'd', 'e', 's', 'c', 'r', 'i', 'p', 't', 'i', 'o', 'n', '"', ':', '"', 'I', 's', 'r', 'a', 'e', 'l', ' ', '(', 'T', 'e', 'l', ' ', 'A', 'v', 'i', 'v', ')', '"', '}', ',', '"', 'm', 'e', '-', 'c', 'e', 'n', 't', 'r', 'a', 'l', '-', '1', '"', ':', '{', '"', 'd', 'e', 's', 'c', 'r', 'i', 'p', 't', 'i', 'o', 'n', '"', ':', '"', 'M', 'i', 'd', 'd', 'l', 'e', ' ', 'E', 'a', 's', 't', ' ', '(', 'U', 'A', 'E', ')', '"', '}', ',', '"', 'm', 'e', '-', 's', 'o', 'u', 't', 'h', '-', '1', '"', ':', '{', '"', 'd', 'e', 's', 'c', 'r', 'i', 'p', 't', 'i', 'o', 'n', '"', ':', '"', 'M', 'i', 'd', 'd', 'l', 'e', ' ', 'E', 'a', 's', 't', ' ', '(', 'B', 'a', 'h', 'r', 'a', 'i', 'n', ')', '"', '}', ',', '"', 's', 'a', '-', 'e', 'a', 's', 't', '-', '1', '"', ':', '{', '"', 'd', 'e', 's', 'c', 'r', 'i', 'p', 't', 'i', 'o', 'n', '"', ':', '"', 'S', 'o', 'u', 't', 'h', ' ', 'A', 'm', 'e', 'r', 'i', 'c', 'a', ' ', '(', 'S', 'a', 'o', ' ', 'P', 'a', 'u', 'l', 'o', ')', '"', '}', ',', '"', 'u', 's', '-', 'e', 'a', 's', 't', '-', '1', '"', ':', '{', '"', 'd', 'e', 's', 'c', 'r', 'i', 'p', 't', 'i', 'o', 'n', '"', ':', '"', 'U', 'S', ' ', 'E', 'a', 's', 't', ' ', '(', 'N', '.', ' ', 'V', 'i', 'r', 'g', 'i', 'n', 'i', 'a', ')', '"', '}', ',', '"', 'u', 's', '-', 'e', 'a', 's', 't', '-', '2', '"', ':', '{', '"', 'd', 'e', 's', 'c', 'r', 'i', 'p', 't', 'i', 'o', 'n', '"', ':', '"', 'U', 'S', ' ', 'E', 'a', 's', 't', ' ', '(', 'O', 'h', 'i', 'o', ')', '"', '}', ',', '"', 'u', 's', '-', 'w', 'e', 's', 't', '-', '1', '"', ':', '{', '"', 'd', 'e', 's', 'c', 'r', 'i', 'p', 't', 'i', 'o', 'n', '"', ':', '"', 'U', 'S', ' ', 'W', 'e', 's', 't', ' ', '(', 'N', '.', ' ', 'C', 'a', 'l', 'i', 'f', 'o', 'r', 'n', 'i', 'a', ')', '"', '}', ',', '"', 'u', 's', '-', 'w', 'e', 's', 't', '-', '2', '"', ':', '{', '"', 'd', 'e', 's', 'c', 'r', 'i', 'p', 't', 'i', 'o', 'n', '"', ':', '"', 'U', 'S', ' ', 'W', 'e', 's', 't', ' ', '(', 'O', 'r', 'e', 'g', 'o', 'n', ')', '"', '}', '}', '}', ',', '{', '"', 'i', 'd', '"', ':', '"', 'a', 'w', 's', '-', 'c', 'n', '"', ',', '"', 'o', 'u', 't', 'p', 'u', 't', 's', '"', ':', '{', '"', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '"', ':', '"', 'a', 'm', 'a', 'z', 'o', 'n', 'a', 'w', 's', '.', 'c', 'o', 'm', '.', 'c', 'n', '"', ',', '"', 'd', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', 'D', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '"', ':', '"', 'a', 'p', 'i', '.', 'a', 'm', 'a', 'z', 'o', 'n', 'w', 'e', 'b', 's', 'e', 'r', 'v', 'i', 'c', 'e', 's', '.', 'c', 'o', 'm', '.', 'c', 'n', '"', ',', '"', 'i', 'm', 'p', 'l', 'i', 'c', 'i', 't', 'G', 'l', 'o', 'b', 'a', 'l', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', 'c', 'n', '-', 'n', 'o', 'r', 't', 'h', 'w', 'e', 's', 't', '-', '1', '"', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 'a', 'w', 's', '-', 'c', 'n', '"', ',', '"', 's', 'u', 'p', 'p', 'o', 'r', 't', 's', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', ':', 't', 'r', 'u', 'e', ',', '"', 's', 'u', 'p', 'p', 'o', 'r', 't', 's', 'F', 'I', 'P', 'S', '"', ':', 't', 'r', 'u', 'e', '}', ',', '"', 'r', 'e', 'g', 'i', 'o', 'n', 'R', 'e', 'g', 'e', 'x', '"', ':', '"', '^', 'c', 'n', '\\', '\\', '-', '\\', '\\', 'w', '+', '\\', '\\', '-', '\\', '\\', 'd', '+', '$', '"', ',', '"', 'r', 'e', 'g', 'i', 'o', 'n', 's', '"', ':', '{', '"', 'a', 'w', 's', '-', 'c', 'n', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ':', '{', '"', 'd', 'e', 's', 'c', 'r', 'i', 'p', 't', 'i', 'o', 'n', '"', ':', '"', 'A', 'W', 'S', ' ', 'C', 'h', 'i', 'n', 'a', ' ', 'g', 'l', 'o', 'b', 'a', 'l', ' ', 'r', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'c', 'n', '-', 'n', 'o', 'r', 't', 'h', '-', '1', '"', ':', '{', '"', 'd', 'e', 's', 'c', 'r', 'i', 'p', 't', 'i', 'o', 'n', '"', ':', '"', 'C', 'h', 'i', 'n', 'a', ' ', '(', 'B', 'e', 'i', 'j', 'i', 'n', 'g', ')', '"', '}', ',', '"', 'c', 'n', '-', 'n', 'o', 'r', 't', 'h', 'w', 'e', 's', 't', '-', '1', '"', ':', '{', '"', 'd', 'e', 's', 'c', 'r', 'i', 'p', 't', 'i', 'o', 'n', '"', ':', '"', 'C', 'h', 'i', 'n', 'a', ' ', '(', 'N', 'i', 'n', 'g', 'x', 'i', 'a', ')', '"', '}', '}', '}', ',', '{', '"', 'i', 'd', '"', ':', '"', 'a', 'w', 's', '-', 'u', 's', '-', 'g', 'o', 'v', '"', ',', '"', 'o', 'u', 't', 'p', 'u', 't', 's', '"', ':', '{', '"', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '"', ':', '"', 'a', 'm', 'a', 'z', 'o', 'n', 'a', 'w', 's', '.', 'c', 'o', 'm', '"', ',', '"', 'd', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', 'D', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '"', ':', '"', 'a', 'p', 'i', '.', 'a', 'w', 's', '"', ',', '"', 'i', 'm', 'p', 'l', 'i', 'c', 'i', 't', 'G', 'l', 'o', 'b', 'a', 'l', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', 'u', 's', '-', 'g', 'o', 'v', '-', 'w', 'e', 's', 't', '-', '1', '"', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 'a', 'w', 's', '-', 'u', 's', '-', 'g', 'o', 'v', '"', ',', '"', 's', 'u', 'p', 'p', 'o', 'r', 't', 's', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', ':', 't', 'r', 'u', 'e', ',', '"', 's', 'u', 'p', 'p', 'o', 'r', 't', 's', 'F', 'I', 'P', 'S', '"', ':', 't', 'r', 'u', 'e', '}', ',', '"', 'r', 'e', 'g', 'i', 'o', 'n', 'R', 'e', 'g', 'e', 'x', '"', ':', '"', '^', 'u', 's', '\\', '\\', '-', 'g', 'o', 'v', '\\', '\\', '-', '\\', '\\', 'w', '+', '\\', '\\', '-', '\\', '\\', 'd', '+', '$', '"', ',', '"', 'r', 'e', 'g', 'i', 'o', 'n', 's', '"', ':', '{', '"', 'a', 'w', 's', '-', 'u', 's', '-', 'g', 'o', 'v', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ':', '{', '"', 'd', 'e', 's', 'c', 'r', 'i', 'p', 't', 'i', 'o', 'n', '"', ':', '"', 'A', 'W', 'S', ' ', 'G', 'o', 'v', 'C', 'l', 'o', 'u', 'd', ' ', '(', 'U', 'S', ')', ' ', 'g', 'l', 'o', 'b', 'a', 'l', ' ', 'r', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'u', 's', '-', 'g', 'o', 'v', '-', 'e', 'a', 's', 't', '-', '1', '"', ':', '{', '"', 'd', 'e', 's', 'c', 'r', 'i', 'p', 't', 'i', 'o', 'n', '"', ':', '"', 'A', 'W', 'S', ' ', 'G', 'o', 'v', 'C', 'l', 'o', 'u', 'd', ' ', '(', 'U', 'S', '-', 'E', 'a', 's', 't', ')', '"', '}', ',', '"', 'u', 's', '-', 'g', 'o', 'v', '-', 'w', 'e', 's', 't', '-', '1', '"', ':', '{', '"', 'd', 'e', 's', 'c', 'r', 'i', 'p', 't', 'i', 'o', 'n', '"', ':', '"', 'A', 'W', 'S', ' ', 'G', 'o', 'v', 'C', 'l', 'o', 'u', 'd', ' ', '(', 'U', 'S', '-', 'W', 'e', 's', 't', ')', '"', '}', '}', '}', ',', '{', '"', 'i', 'd', '"', ':', '"', 'a', 'w', 's', '-', 'i', 's', 'o', '"', ',', '"', 'o', 'u', 't', 'p', 'u', 't', 's', '"', ':', '{', '"', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '"', ':', '"', 'c', '2', 's', '.', 'i', 'c', '.', 'g', 'o', 'v', '"', ',', '"', 'd', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', 'D', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '"', ':', '"', 'c', '2', 's', '.', 'i', 'c', '.', 'g', 'o', 'v', '"', ',', '"', 'i', 'm', 'p', 'l', 'i', 'c', 'i', 't', 'G', 'l', 'o', 'b', 'a', 'l', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', 'u', 's', '-', 'i', 's', 'o', '-', 'e', 'a', 's', 't', '-', '1', '"', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 'a', 'w', 's', '-', 'i', 's', 'o', '"', ',', '"', 's', 'u', 'p', 'p', 'o', 'r', 't', 's', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', ':', 'f', 'a', 'l', 's', 'e', ',', '"', 's', 'u', 'p', 'p', 'o', 'r', 't', 's', 'F', 'I', 'P', 'S', '"', ':', 't', 'r', 'u', 'e', '}', ',', '"', 'r', 'e', 'g', 'i', 'o', 'n', 'R', 'e', 'g', 'e', 'x', '"', ':', '"', '^', 'u', 's', '\\', '\\', '-', 'i', 's', 'o', '\\', '\\', '-', '\\', '\\', 'w', '+', '\\', '\\', '-', '\\', '\\', 'd', '+', '$', '"', ',', '"', 'r', 'e', 'g', 'i', 'o', 'n', 's', '"', ':', '{', '"', 'a', 'w', 's', '-', 'i', 's', 'o', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ':', '{', '"', 'd', 'e', 's', 'c', 'r', 'i', 'p', 't', 'i', 'o', 'n', '"', ':', '"', 'A', 'W', 'S', ' ', 'I', 'S', 'O', ' ', '(', 'U', 'S', ')', ' ', 'g', 'l', 'o', 'b', 'a', 'l', ' ', 'r', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'u', 's', '-', 'i', 's', 'o', '-', 'e', 'a', 's', 't', '-', '1', '"', ':', '{', '"', 'd', 'e', 's', 'c', 'r', 'i', 'p', 't', 'i', 'o', 'n', '"', ':', '"', 'U', 'S', ' ', 'I', 'S', 'O', ' ', 'E', 'a', 's', 't', '"', '}', ',', '"', 'u', 's', '-', 'i', 's', 'o', '-', 'w', 'e', 's', 't', '-', '1', '"', ':', '{', '"', 'd', 'e', 's', 'c', 'r', 'i', 'p', 't', 'i', 'o', 'n', '"', ':', '"', 'U', 'S', ' ', 'I', 'S', 'O', ' ', 'W', 'E', 'S', 'T', '"', '}', '}', '}', ',', '{', '"', 'i', 'd', '"', ':', '"', 'a', 'w', 's', '-', 'i', 's', 'o', '-', 'b', '"', ',', '"', 'o', 'u', 't', 'p', 'u', 't', 's', '"', ':', '{', '"', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '"', ':', '"', 's', 'c', '2', 's', '.', 's', 'g', 'o', 'v', '.', 'g', 'o', 'v', '"', ',', '"', 'd', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', 'D', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '"', ':', '"', 's', 'c', '2', 's', '.', 's', 'g', 'o', 'v', '.', 'g', 'o', 'v', '"', ',', '"', 'i', 'm', 'p', 'l', 'i', 'c', 'i', 't', 'G', 'l', 'o', 'b', 'a', 'l', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', 'u', 's', '-', 'i', 's', 'o', 'b', '-', 'e', 'a', 's', 't', '-', '1', '"', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 'a', 'w', 's', '-', 'i', 's', 'o', '-', 'b', '"', ',', '"', 's', 'u', 'p', 'p', 'o', 'r', 't', 's', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', ':', 'f', 'a', 'l', 's', 'e', ',', '"', 's', 'u', 'p', 'p', 'o', 'r', 't', 's', 'F', 'I', 'P', 'S', '"', ':', 't', 'r', 'u', 'e', '}', ',', '"', 'r', 'e', 'g', 'i', 'o', 'n', 'R', 'e', 'g', 'e', 'x', '"', ':', '"', '^', 'u', 's', '\\', '\\', '-', 'i', 's', 'o', 'b', '\\', '\\', '-', '\\', '\\', 'w', '+', '\\', '\\', '-', '\\', '\\', 'd', '+', '$', '"', ',', '"', 'r', 'e', 'g', 'i', 'o', 'n', 's', '"', ':', '{', '"', 'a', 'w', 's', '-', 'i', 's', 'o', '-', 'b', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ':', '{', '"', 'd', 'e', 's', 'c', 'r', 'i', 'p', 't', 'i', 'o', 'n', '"', ':', '"', 'A', 'W', 'S', ' ', 'I', 'S', 'O', 'B', ' ', '(', 'U', 'S', ')', ' ', 'g', 'l', 'o', 'b', 'a', 'l', ' ', 'r', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'u', 's', '-', 'i', 's', 'o', 'b', '-', 'e', 'a', 's', 't', '-', '1', '"', ':', '{', '"', 'd', 'e', 's', 'c', 'r', 'i', 'p', 't', 'i', 'o', 'n', '"', ':', '"', 'U', 'S', ' ', 'I', 'S', 'O', 'B', ' ', 'E', 'a', 's', 't', ' ', '(', 'O', 'h', 'i', 'o', ')', '"', '}', '}', '}', ',', '{', '"', 'i', 'd', '"', ':', '"', 'a', 'w', 's', '-', 'i', 's', 'o', '-', 'e', '"', ',', '"', 'o', 'u', 't', 'p', 'u', 't', 's', '"', ':', '{', '"', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '"', ':', '"', 'c', 'l', 'o', 'u', 'd', '.', 'a', 'd', 'c', '-', 'e', '.', 'u', 'k', '"', ',', '"', 'd', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', 'D', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '"', ':', '"', 'c', 'l', 'o', 'u', 'd', '.', 'a', 'd', 'c', '-', 'e', '.', 'u', 'k', '"', ',', '"', 'i', 'm', 'p', 'l', 'i', 'c', 'i', 't', 'G', 'l', 'o', 'b', 'a', 'l', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', 'e', 'u', '-', 'i', 's', 'o', 'e', '-', 'w', 'e', 's', 't', '-', '1', '"', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 'a', 'w', 's', '-', 'i', 's', 'o', '-', 'e', '"', ',', '"', 's', 'u', 'p', 'p', 'o', 'r', 't', 's', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', ':', 'f', 'a', 'l', 's', 'e', ',', '"', 's', 'u', 'p', 'p', 'o', 'r', 't', 's', 'F', 'I', 'P', 'S', '"', ':', 't', 'r', 'u', 'e', '}', ',', '"', 'r', 'e', 'g', 'i', 'o', 'n', 'R', 'e', 'g', 'e', 'x', '"', ':', '"', '^', 'e', 'u', '\\', '\\', '-', 'i', 's', 'o', 'e', '\\', '\\', '-', '\\', '\\', 'w', '+', '\\', '\\', '-', '\\', '\\', 'd', '+', '$', '"', ',', '"', 'r', 'e', 'g', 'i', 'o', 'n', 's', '"', ':', '{', '}', '}', ',', '{', '"', 'i', 'd', '"', ':', '"', 'a', 'w', 's', '-', 'i', 's', 'o', '-', 'f', '"', ',', '"', 'o', 'u', 't', 'p', 'u', 't', 's', '"', ':', '{', '"', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '"', ':', '"', 'c', 's', 'p', '.', 'h', 'c', 'i', '.', 'i', 'c', '.', 'g', 'o', 'v', '"', ',', '"', 'd', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', 'D', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '"', ':', '"', 'c', 's', 'p', '.', 'h', 'c', 'i', '.', 'i', 'c', '.', 'g', 'o', 'v', '"', ',', '"', 'i', 'm', 'p', 'l', 'i', 'c', 'i', 't', 'G', 'l', 'o', 'b', 'a', 'l', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', 'u', 's', '-', 'i', 's', 'o', 'f', '-', 's', 'o', 'u', 't', 'h', '-', '1', '"', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 'a', 'w', 's', '-', 'i', 's', 'o', '-', 'f', '"', ',', '"', 's', 'u', 'p', 'p', 'o', 'r', 't', 's', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', ':', 'f', 'a', 'l', 's', 'e', ',', '"', 's', 'u', 'p', 'p', 'o', 'r', 't', 's', 'F', 'I', 'P', 'S', '"', ':', 't', 'r', 'u', 'e', '}', ',', '"', 'r', 'e', 'g', 'i', 'o', 'n', 'R', 'e', 'g', 'e', 'x', '"', ':', '"', '^', 'u', 's', '\\', '\\', '-', 'i', 's', 'o', 'f', '\\', '\\', '-', '\\', '\\', 'w', '+', '\\', '\\', '-', '\\', '\\', 'd', '+', '$', '"', ',', '"', 'r', 'e', 'g', 'i', 'o', 'n', 's', '"', ':', '{', '}', '}', ']', ',', '"', 'v', 'e', 'r', 's', 'i', 'o', 'n', '"', ':', '"', '1', '.', '1', '"', '}'}; const struct aws_byte_cursor aws_s3_endpoint_resolver_partitions = { .len = 3982, .ptr = (uint8_t *) s_generated_array }; aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/source/s3_endpoint_resolver/aws_s3_endpoint_rule_set.c000066400000000000000000013377351456575232400324130ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. * All Rights Reserved. SPDX-License-Identifier: Apache-2.0. */ #include "aws/s3/private/s3_endpoint_resolver.h" #include /** * This file is generated using scripts/update_s3_endpoint_resolver_artifacts.py. * Do not modify directly. */ /* clang-format off */ static const char s_generated_array[] = { '{', '"', 'v', 'e', 'r', 's', 'i', 'o', 'n', '"', ':', '"', '1', '.', '0', '"', ',', '"', 'p', 'a', 'r', 'a', 'm', 'e', 't', 'e', 'r', 's', '"', ':', '{', '"', 'B', 'u', 'c', 'k', 'e', 't', '"', ':', '{', '"', 'r', 'e', 'q', 'u', 'i', 'r', 'e', 'd', '"', ':', 'f', 'a', 'l', 's', 'e', ',', '"', 'd', 'o', 'c', 'u', 'm', 'e', 'n', 't', 'a', 't', 'i', 'o', 'n', '"', ':', '"', 'T', 'h', 'e', ' ', 'S', '3', ' ', 'b', 'u', 'c', 'k', 'e', 't', ' ', 'u', 's', 'e', 'd', ' ', 't', 'o', ' ', 's', 'e', 'n', 'd', ' ', 't', 'h', 'e', ' ', 'r', 'e', 'q', 'u', 'e', 's', 't', '.', ' ', 'T', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', 'n', ' ', 'o', 'p', 't', 'i', 'o', 'n', 'a', 'l', ' ', 'p', 'a', 'r', 'a', 'm', 'e', 't', 'e', 'r', ' ', 't', 'h', 'a', 't', ' ', 'w', 'i', 'l', 'l', ' ', 'b', 'e', ' ', 's', 'e', 't', ' ', 'a', 'u', 't', 'o', 'm', 'a', 't', 'i', 'c', 'a', 'l', 'l', 'y', ' ', 'f', 'o', 'r', ' ', 'o', 'p', 'e', 'r', 'a', 't', 'i', 'o', 'n', 's', ' ', 't', 'h', 'a', 't', ' ', 'a', 'r', 'e', ' ', 's', 'c', 'o', 'p', 'e', 'd', ' ', 't', 'o', ' ', 'a', 'n', ' ', 'S', '3', ' ', 'b', 'u', 'c', 'k', 'e', 't', '.', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'S', 't', 'r', 'i', 'n', 'g', '"', '}', ',', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '{', '"', 'b', 'u', 'i', 'l', 't', 'I', 'n', '"', ':', '"', 'A', 'W', 'S', ':', ':', 'R', 'e', 'g', 'i', 'o', 'n', '"', ',', '"', 'r', 'e', 'q', 'u', 'i', 'r', 'e', 'd', '"', ':', 'f', 'a', 'l', 's', 'e', ',', '"', 'd', 'o', 'c', 'u', 'm', 'e', 'n', 't', 'a', 't', 'i', 'o', 'n', '"', ':', '"', 'T', 'h', 'e', ' ', 'A', 'W', 'S', ' ', 'r', 'e', 'g', 'i', 'o', 'n', ' ', 'u', 's', 'e', 'd', ' ', 't', 'o', ' ', 'd', 'i', 's', 'p', 'a', 't', 'c', 'h', ' ', 't', 'h', 'e', ' ', 'r', 'e', 'q', 'u', 'e', 's', 't', '.', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'S', 't', 'r', 'i', 'n', 'g', '"', '}', ',', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', ':', '{', '"', 'b', 'u', 'i', 'l', 't', 'I', 'n', '"', ':', '"', 'A', 'W', 'S', ':', ':', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', ',', '"', 'r', 'e', 'q', 'u', 'i', 'r', 'e', 'd', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'd', 'e', 'f', 'a', 'u', 'l', 't', '"', ':', 'f', 'a', 'l', 's', 'e', ',', '"', 'd', 'o', 'c', 'u', 'm', 'e', 'n', 't', 'a', 't', 'i', 'o', 'n', '"', ':', '"', 'W', 'h', 'e', 'n', ' ', 't', 'r', 'u', 'e', ',', ' ', 's', 'e', 'n', 'd', ' ', 't', 'h', 'i', 's', ' ', 'r', 'e', 'q', 'u', 'e', 's', 't', ' ', 't', 'o', ' ', 't', 'h', 'e', ' ', 'F', 'I', 'P', 'S', '-', 'c', 'o', 'm', 'p', 'l', 'i', 'a', 'n', 't', ' ', 'r', 'e', 'g', 'i', 'o', 'n', 'a', 'l', ' ', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '.', ' ', 'I', 'f', ' ', 't', 'h', 'e', ' ', 'c', 'o', 'n', 'f', 'i', 'g', 'u', 'r', 'e', 'd', ' ', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', ' ', 'd', 'o', 'e', 's', ' ', 'n', 'o', 't', ' ', 'h', 'a', 'v', 'e', ' ', 'a', ' ', 'F', 'I', 'P', 'S', ' ', 'c', 'o', 'm', 'p', 'l', 'i', 'a', 'n', 't', ' ', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', ',', ' ', 'd', 'i', 's', 'p', 'a', 't', 'c', 'h', 'i', 'n', 'g', ' ', 't', 'h', 'e', ' ', 'r', 'e', 'q', 'u', 'e', 's', 't', ' ', 'w', 'i', 'l', 'l', ' ', 'r', 'e', 't', 'u', 'r', 'n', ' ', 'a', 'n', ' ', 'e', 'r', 'r', 'o', 'r', '.', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'B', 'o', 'o', 'l', 'e', 'a', 'n', '"', '}', ',', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', ':', '{', '"', 'b', 'u', 'i', 'l', 't', 'I', 'n', '"', ':', '"', 'A', 'W', 'S', ':', ':', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', ',', '"', 'r', 'e', 'q', 'u', 'i', 'r', 'e', 'd', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'd', 'e', 'f', 'a', 'u', 'l', 't', '"', ':', 'f', 'a', 'l', 's', 'e', ',', '"', 'd', 'o', 'c', 'u', 'm', 'e', 'n', 't', 'a', 't', 'i', 'o', 'n', '"', ':', '"', 'W', 'h', 'e', 'n', ' ', 't', 'r', 'u', 'e', ',', ' ', 'u', 's', 'e', ' ', 't', 'h', 'e', ' ', 'd', 'u', 'a', 'l', '-', 's', 't', 'a', 'c', 'k', ' ', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '.', ' ', 'I', 'f', ' ', 't', 'h', 'e', ' ', 'c', 'o', 'n', 'f', 'i', 'g', 'u', 'r', 'e', 'd', ' ', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', ' ', 'd', 'o', 'e', 's', ' ', 'n', 'o', 't', ' ', 's', 'u', 'p', 'p', 'o', 'r', 't', ' ', 'd', 'u', 'a', 'l', '-', 's', 't', 'a', 'c', 'k', ',', ' ', 'd', 'i', 's', 'p', 'a', 't', 'c', 'h', 'i', 'n', 'g', ' ', 't', 'h', 'e', ' ', 'r', 'e', 'q', 'u', 'e', 's', 't', ' ', 'M', 'A', 'Y', ' ', 'r', 'e', 't', 'u', 'r', 'n', ' ', 'a', 'n', ' ', 'e', 'r', 'r', 'o', 'r', '.', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'B', 'o', 'o', 'l', 'e', 'a', 'n', '"', '}', ',', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'b', 'u', 'i', 'l', 't', 'I', 'n', '"', ':', '"', 'S', 'D', 'K', ':', ':', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ',', '"', 'r', 'e', 'q', 'u', 'i', 'r', 'e', 'd', '"', ':', 'f', 'a', 'l', 's', 'e', ',', '"', 'd', 'o', 'c', 'u', 'm', 'e', 'n', 't', 'a', 't', 'i', 'o', 'n', '"', ':', '"', 'O', 'v', 'e', 'r', 'r', 'i', 'd', 'e', ' ', 't', 'h', 'e', ' ', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', ' ', 'u', 's', 'e', 'd', ' ', 't', 'o', ' ', 's', 'e', 'n', 'd', ' ', 't', 'h', 'i', 's', ' ', 'r', 'e', 'q', 'u', 'e', 's', 't', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'S', 't', 'r', 'i', 'n', 'g', '"', '}', ',', '"', 'F', 'o', 'r', 'c', 'e', 'P', 'a', 't', 'h', 'S', 't', 'y', 'l', 'e', '"', ':', '{', '"', 'b', 'u', 'i', 'l', 't', 'I', 'n', '"', ':', '"', 'A', 'W', 'S', ':', ':', 'S', '3', ':', ':', 'F', 'o', 'r', 'c', 'e', 'P', 'a', 't', 'h', 'S', 't', 'y', 'l', 'e', '"', ',', '"', 'r', 'e', 'q', 'u', 'i', 'r', 'e', 'd', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'd', 'e', 'f', 'a', 'u', 'l', 't', '"', ':', 'f', 'a', 'l', 's', 'e', ',', '"', 'd', 'o', 'c', 'u', 'm', 'e', 'n', 't', 'a', 't', 'i', 'o', 'n', '"', ':', '"', 'W', 'h', 'e', 'n', ' ', 't', 'r', 'u', 'e', ',', ' ', 'f', 'o', 'r', 'c', 'e', ' ', 'a', ' ', 'p', 'a', 't', 'h', '-', 's', 't', 'y', 'l', 'e', ' ', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', ' ', 't', 'o', ' ', 'b', 'e', ' ', 'u', 's', 'e', 'd', ' ', 'w', 'h', 'e', 'r', 'e', ' ', 't', 'h', 'e', ' ', 'b', 'u', 'c', 'k', 'e', 't', ' ', 'n', 'a', 'm', 'e', ' ', 'i', 's', ' ', 'p', 'a', 'r', 't', ' ', 'o', 'f', ' ', 't', 'h', 'e', ' ', 'p', 'a', 't', 'h', '.', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'B', 'o', 'o', 'l', 'e', 'a', 'n', '"', '}', ',', '"', 'A', 'c', 'c', 'e', 'l', 'e', 'r', 'a', 't', 'e', '"', ':', '{', '"', 'b', 'u', 'i', 'l', 't', 'I', 'n', '"', ':', '"', 'A', 'W', 'S', ':', ':', 'S', '3', ':', ':', 'A', 'c', 'c', 'e', 'l', 'e', 'r', 'a', 't', 'e', '"', ',', '"', 'r', 'e', 'q', 'u', 'i', 'r', 'e', 'd', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'd', 'e', 'f', 'a', 'u', 'l', 't', '"', ':', 'f', 'a', 'l', 's', 'e', ',', '"', 'd', 'o', 'c', 'u', 'm', 'e', 'n', 't', 'a', 't', 'i', 'o', 'n', '"', ':', '"', 'W', 'h', 'e', 'n', ' ', 't', 'r', 'u', 'e', ',', ' ', 'u', 's', 'e', ' ', 'S', '3', ' ', 'A', 'c', 'c', 'e', 'l', 'e', 'r', 'a', 't', 'e', '.', ' ', 'N', 'O', 'T', 'E', ':', ' ', 'N', 'o', 't', ' ', 'a', 'l', 'l', ' ', 'r', 'e', 'g', 'i', 'o', 'n', 's', ' ', 's', 'u', 'p', 'p', 'o', 'r', 't', ' ', 'S', '3', ' ', 'a', 'c', 'c', 'e', 'l', 'e', 'r', 'a', 't', 'e', '.', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'B', 'o', 'o', 'l', 'e', 'a', 'n', '"', '}', ',', '"', 'U', 's', 'e', 'G', 'l', 'o', 'b', 'a', 'l', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'b', 'u', 'i', 'l', 't', 'I', 'n', '"', ':', '"', 'A', 'W', 'S', ':', ':', 'S', '3', ':', ':', 'U', 's', 'e', 'G', 'l', 'o', 'b', 'a', 'l', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ',', '"', 'r', 'e', 'q', 'u', 'i', 'r', 'e', 'd', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'd', 'e', 'f', 'a', 'u', 'l', 't', '"', ':', 'f', 'a', 'l', 's', 'e', ',', '"', 'd', 'o', 'c', 'u', 'm', 'e', 'n', 't', 'a', 't', 'i', 'o', 'n', '"', ':', '"', 'W', 'h', 'e', 't', 'h', 'e', 'r', ' ', 't', 'h', 'e', ' ', 'g', 'l', 'o', 'b', 'a', 'l', ' ', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', ' ', 's', 'h', 'o', 'u', 'l', 'd', ' ', 'b', 'e', ' ', 'u', 's', 'e', 'd', ',', ' ', 'r', 'a', 't', 'h', 'e', 'r', ' ', 't', 'h', 'e', 'n', ' ', 't', 'h', 'e', ' ', 'r', 'e', 'g', 'i', 'o', 'n', 'a', 'l', ' ', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', ' ', 'f', 'o', 'r', ' ', 'u', 's', '-', 'e', 'a', 's', 't', '-', '1', '.', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'B', 'o', 'o', 'l', 'e', 'a', 'n', '"', '}', ',', '"', 'U', 's', 'e', 'O', 'b', 'j', 'e', 'c', 't', 'L', 'a', 'm', 'b', 'd', 'a', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'r', 'e', 'q', 'u', 'i', 'r', 'e', 'd', '"', ':', 'f', 'a', 'l', 's', 'e', ',', '"', 'd', 'o', 'c', 'u', 'm', 'e', 'n', 't', 'a', 't', 'i', 'o', 'n', '"', ':', '"', 'I', 'n', 't', 'e', 'r', 'n', 'a', 'l', ' ', 'p', 'a', 'r', 'a', 'm', 'e', 't', 'e', 'r', ' ', 't', 'o', ' ', 'u', 's', 'e', ' ', 'o', 'b', 'j', 'e', 'c', 't', ' ', 'l', 'a', 'm', 'b', 'd', 'a', ' ', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', ' ', 'f', 'o', 'r', ' ', 'a', 'n', ' ', 'o', 'p', 'e', 'r', 'a', 't', 'i', 'o', 'n', ' ', '(', 'e', 'g', ':', ' ', 'W', 'r', 'i', 't', 'e', 'G', 'e', 't', 'O', 'b', 'j', 'e', 'c', 't', 'R', 'e', 's', 'p', 'o', 'n', 's', 'e', ')', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'B', 'o', 'o', 'l', 'e', 'a', 'n', '"', '}', ',', '"', 'K', 'e', 'y', '"', ':', '{', '"', 'r', 'e', 'q', 'u', 'i', 'r', 'e', 'd', '"', ':', 'f', 'a', 'l', 's', 'e', ',', '"', 'd', 'o', 'c', 'u', 'm', 'e', 'n', 't', 'a', 't', 'i', 'o', 'n', '"', ':', '"', 'T', 'h', 'e', ' ', 'S', '3', ' ', 'K', 'e', 'y', ' ', 'u', 's', 'e', 'd', ' ', 't', 'o', ' ', 's', 'e', 'n', 'd', ' ', 't', 'h', 'e', ' ', 'r', 'e', 'q', 'u', 'e', 's', 't', '.', ' ', 'T', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', 'n', ' ', 'o', 'p', 't', 'i', 'o', 'n', 'a', 'l', ' ', 'p', 'a', 'r', 'a', 'm', 'e', 't', 'e', 'r', ' ', 't', 'h', 'a', 't', ' ', 'w', 'i', 'l', 'l', ' ', 'b', 'e', ' ', 's', 'e', 't', ' ', 'a', 'u', 't', 'o', 'm', 'a', 't', 'i', 'c', 'a', 'l', 'l', 'y', ' ', 'f', 'o', 'r', ' ', 'o', 'p', 'e', 'r', 'a', 't', 'i', 'o', 'n', 's', ' ', 't', 'h', 'a', 't', ' ', 'a', 'r', 'e', ' ', 's', 'c', 'o', 'p', 'e', 'd', ' ', 't', 'o', ' ', 'a', 'n', ' ', 'S', '3', ' ', 'K', 'e', 'y', '.', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'S', 't', 'r', 'i', 'n', 'g', '"', '}', ',', '"', 'P', 'r', 'e', 'f', 'i', 'x', '"', ':', '{', '"', 'r', 'e', 'q', 'u', 'i', 'r', 'e', 'd', '"', ':', 'f', 'a', 'l', 's', 'e', ',', '"', 'd', 'o', 'c', 'u', 'm', 'e', 'n', 't', 'a', 't', 'i', 'o', 'n', '"', ':', '"', 'T', 'h', 'e', ' ', 'S', '3', ' ', 'P', 'r', 'e', 'f', 'i', 'x', ' ', 'u', 's', 'e', 'd', ' ', 't', 'o', ' ', 's', 'e', 'n', 'd', ' ', 't', 'h', 'e', ' ', 'r', 'e', 'q', 'u', 'e', 's', 't', '.', ' ', 'T', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', 'n', ' ', 'o', 'p', 't', 'i', 'o', 'n', 'a', 'l', ' ', 'p', 'a', 'r', 'a', 'm', 'e', 't', 'e', 'r', ' ', 't', 'h', 'a', 't', ' ', 'w', 'i', 'l', 'l', ' ', 'b', 'e', ' ', 's', 'e', 't', ' ', 'a', 'u', 't', 'o', 'm', 'a', 't', 'i', 'c', 'a', 'l', 'l', 'y', ' ', 'f', 'o', 'r', ' ', 'o', 'p', 'e', 'r', 'a', 't', 'i', 'o', 'n', 's', ' ', 't', 'h', 'a', 't', ' ', 'a', 'r', 'e', ' ', 's', 'c', 'o', 'p', 'e', 'd', ' ', 't', 'o', ' ', 'a', 'n', ' ', 'S', '3', ' ', 'P', 'r', 'e', 'f', 'i', 'x', '.', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'S', 't', 'r', 'i', 'n', 'g', '"', '}', ',', '"', 'D', 'i', 's', 'a', 'b', 'l', 'e', 'A', 'c', 'c', 'e', 's', 's', 'P', 'o', 'i', 'n', 't', 's', '"', ':', '{', '"', 'r', 'e', 'q', 'u', 'i', 'r', 'e', 'd', '"', ':', 'f', 'a', 'l', 's', 'e', ',', '"', 'd', 'o', 'c', 'u', 'm', 'e', 'n', 't', 'a', 't', 'i', 'o', 'n', '"', ':', '"', 'I', 'n', 't', 'e', 'r', 'n', 'a', 'l', ' ', 'p', 'a', 'r', 'a', 'm', 'e', 't', 'e', 'r', ' ', 't', 'o', ' ', 'd', 'i', 's', 'a', 'b', 'l', 'e', ' ', 'A', 'c', 'c', 'e', 's', 's', ' ', 'P', 'o', 'i', 'n', 't', ' ', 'B', 'u', 'c', 'k', 'e', 't', 's', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'B', 'o', 'o', 'l', 'e', 'a', 'n', '"', '}', ',', '"', 'D', 'i', 's', 'a', 'b', 'l', 'e', 'M', 'u', 'l', 't', 'i', 'R', 'e', 'g', 'i', 'o', 'n', 'A', 'c', 'c', 'e', 's', 's', 'P', 'o', 'i', 'n', 't', 's', '"', ':', '{', '"', 'b', 'u', 'i', 'l', 't', 'I', 'n', '"', ':', '"', 'A', 'W', 'S', ':', ':', 'S', '3', ':', ':', 'D', 'i', 's', 'a', 'b', 'l', 'e', 'M', 'u', 'l', 't', 'i', 'R', 'e', 'g', 'i', 'o', 'n', 'A', 'c', 'c', 'e', 's', 's', 'P', 'o', 'i', 'n', 't', 's', '"', ',', '"', 'r', 'e', 'q', 'u', 'i', 'r', 'e', 'd', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'd', 'e', 'f', 'a', 'u', 'l', 't', '"', ':', 'f', 'a', 'l', 's', 'e', ',', '"', 'd', 'o', 'c', 'u', 'm', 'e', 'n', 't', 'a', 't', 'i', 'o', 'n', '"', ':', '"', 'W', 'h', 'e', 't', 'h', 'e', 'r', ' ', 'm', 'u', 'l', 't', 'i', '-', 'r', 'e', 'g', 'i', 'o', 'n', ' ', 'a', 'c', 'c', 'e', 's', 's', ' ', 'p', 'o', 'i', 'n', 't', 's', ' ', '(', 'M', 'R', 'A', 'P', ')', ' ', 's', 'h', 'o', 'u', 'l', 'd', ' ', 'b', 'e', ' ', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'd', '.', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'B', 'o', 'o', 'l', 'e', 'a', 'n', '"', '}', ',', '"', 'U', 's', 'e', 'A', 'r', 'n', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '{', '"', 'b', 'u', 'i', 'l', 't', 'I', 'n', '"', ':', '"', 'A', 'W', 'S', ':', ':', 'S', '3', ':', ':', 'U', 's', 'e', 'A', 'r', 'n', 'R', 'e', 'g', 'i', 'o', 'n', '"', ',', '"', 'r', 'e', 'q', 'u', 'i', 'r', 'e', 'd', '"', ':', 'f', 'a', 'l', 's', 'e', ',', '"', 'd', 'o', 'c', 'u', 'm', 'e', 'n', 't', 'a', 't', 'i', 'o', 'n', '"', ':', '"', 'W', 'h', 'e', 'n', ' ', 'a', 'n', ' ', 'A', 'c', 'c', 'e', 's', 's', ' ', 'P', 'o', 'i', 'n', 't', ' ', 'A', 'R', 'N', ' ', 'i', 's', ' ', 'p', 'r', 'o', 'v', 'i', 'd', 'e', 'd', ' ', 'a', 'n', 'd', ' ', 't', 'h', 'i', 's', ' ', 'f', 'l', 'a', 'g', ' ', 'i', 's', ' ', 'e', 'n', 'a', 'b', 'l', 'e', 'd', ',', ' ', 't', 'h', 'e', ' ', 'S', 'D', 'K', ' ', 'M', 'U', 'S', 'T', ' ', 'u', 's', 'e', ' ', 't', 'h', 'e', ' ', 'A', 'R', 'N', '\'', 's', ' ', 'r', 'e', 'g', 'i', 'o', 'n', ' ', 'w', 'h', 'e', 'n', ' ', 'c', 'o', 'n', 's', 't', 'r', 'u', 'c', 't', 'i', 'n', 'g', ' ', 't', 'h', 'e', ' ', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', ' ', 'i', 'n', 's', 't', 'e', 'a', 'd', ' ', 'o', 'f', ' ', 't', 'h', 'e', ' ', 'c', 'l', 'i', 'e', 'n', 't', '\'', 's', ' ', 'c', 'o', 'n', 'f', 'i', 'g', 'u', 'r', 'e', 'd', ' ', 'r', 'e', 'g', 'i', 'o', 'n', '.', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'B', 'o', 'o', 'l', 'e', 'a', 'n', '"', '}', ',', '"', 'U', 's', 'e', 'S', '3', 'E', 'x', 'p', 'r', 'e', 's', 's', 'C', 'o', 'n', 't', 'r', 'o', 'l', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'r', 'e', 'q', 'u', 'i', 'r', 'e', 'd', '"', ':', 'f', 'a', 'l', 's', 'e', ',', '"', 'd', 'o', 'c', 'u', 'm', 'e', 'n', 't', 'a', 't', 'i', 'o', 'n', '"', ':', '"', 'I', 'n', 't', 'e', 'r', 'n', 'a', 'l', ' ', 'p', 'a', 'r', 'a', 'm', 'e', 't', 'e', 'r', ' ', 't', 'o', ' ', 'i', 'n', 'd', 'i', 'c', 'a', 't', 'e', ' ', 'w', 'h', 'e', 't', 'h', 'e', 'r', ' ', 'S', '3', 'E', 'x', 'p', 'r', 'e', 's', 's', ' ', 'o', 'p', 'e', 'r', 'a', 't', 'i', 'o', 'n', ' ', 's', 'h', 'o', 'u', 'l', 'd', ' ', 'u', 's', 'e', ' ', 'c', 'o', 'n', 't', 'r', 'o', 'l', ' ', 'p', 'l', 'a', 'n', 'e', ',', ' ', '(', 'e', 'x', '.', ' ', 'C', 'r', 'e', 'a', 't', 'e', 'B', 'u', 'c', 'k', 'e', 't', ')', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'B', 'o', 'o', 'l', 'e', 'a', 'n', '"', '}', ',', '"', 'D', 'i', 's', 'a', 'b', 'l', 'e', 'S', '3', 'E', 'x', 'p', 'r', 'e', 's', 's', 'S', 'e', 's', 's', 'i', 'o', 'n', 'A', 'u', 't', 'h', '"', ':', '{', '"', 'r', 'e', 'q', 'u', 'i', 'r', 'e', 'd', '"', ':', 'f', 'a', 'l', 's', 'e', ',', '"', 'd', 'o', 'c', 'u', 'm', 'e', 'n', 't', 'a', 't', 'i', 'o', 'n', '"', ':', '"', 'P', 'a', 'r', 'a', 'm', 'e', 't', 'e', 'r', ' ', 't', 'o', ' ', 'i', 'n', 'd', 'i', 'c', 'a', 't', 'e', ' ', 'w', 'h', 'e', 't', 'h', 'e', 'r', ' ', 'S', '3', 'E', 'x', 'p', 'r', 'e', 's', 's', ' ', 's', 'e', 's', 's', 'i', 'o', 'n', ' ', 'a', 'u', 't', 'h', ' ', 's', 'h', 'o', 'u', 'l', 'd', ' ', 'b', 'e', ' ', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'd', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'B', 'o', 'o', 'l', 'e', 'a', 'n', '"', '}', '}', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'A', 'c', 'c', 'e', 'l', 'e', 'r', 'a', 't', 'e', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'A', 'c', 'c', 'e', 'l', 'e', 'r', 'a', 't', 'e', ' ', 'c', 'a', 'n', 'n', 'o', 't', ' ', 'b', 'e', ' ', 'u', 's', 'e', 'd', ' ', 'w', 'i', 't', 'h', ' ', 'F', 'I', 'P', 'S', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'C', 'a', 'n', 'n', 'o', 't', ' ', 's', 'e', 't', ' ', 'd', 'u', 'a', 'l', '-', 's', 't', 'a', 'c', 'k', ' ', 'i', 'n', ' ', 'c', 'o', 'm', 'b', 'i', 'n', 'a', 't', 'i', 'o', 'n', ' ', 'w', 'i', 't', 'h', ' ', 'a', ' ', 'c', 'u', 's', 't', 'o', 'm', ' ', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '.', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'A', ' ', 'c', 'u', 's', 't', 'o', 'm', ' ', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', ' ', 'c', 'a', 'n', 'n', 'o', 't', ' ', 'b', 'e', ' ', 'c', 'o', 'm', 'b', 'i', 'n', 'e', 'd', ' ', 'w', 'i', 't', 'h', ' ', 'F', 'I', 'P', 'S', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'A', 'c', 'c', 'e', 'l', 'e', 'r', 'a', 't', 'e', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'A', ' ', 'c', 'u', 's', 't', 'o', 'm', ' ', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', ' ', 'c', 'a', 'n', 'n', 'o', 't', ' ', 'b', 'e', ' ', 'c', 'o', 'm', 'b', 'i', 'n', 'e', 'd', ' ', 'w', 'i', 't', 'h', ' ', 'S', '3', ' ', 'A', 'c', 'c', 'e', 'l', 'e', 'r', 'a', 't', 'e', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'a', 'w', 's', '.', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '"', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'g', 'e', 't', 'A', 't', 't', 'r', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '"', '}', ',', '"', 'n', 'a', 'm', 'e', '"', ']', '}', ',', '"', 'a', 'w', 's', '-', 'c', 'n', '"', ']', '}', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'P', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', ' ', 'd', 'o', 'e', 's', ' ', 'n', 'o', 't', ' ', 's', 'u', 'p', 'p', 'o', 'r', 't', ' ', 'F', 'I', 'P', 'S', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'B', 'u', 'c', 'k', 'e', 't', '"', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 's', 'u', 'b', 's', 't', 'r', 'i', 'n', 'g', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'B', 'u', 'c', 'k', 'e', 't', '"', '}', ',', '0', ',', '6', ',', 't', 'r', 'u', 'e', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 'b', 'u', 'c', 'k', 'e', 't', 'S', 'u', 'f', 'f', 'i', 'x', '"', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'b', 'u', 'c', 'k', 'e', 't', 'S', 'u', 'f', 'f', 'i', 'x', '"', '}', ',', '"', '-', '-', 'x', '-', 's', '3', '"', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'S', '3', 'E', 'x', 'p', 'r', 'e', 's', 's', ' ', 'd', 'o', 'e', 's', ' ', 'n', 'o', 't', ' ', 's', 'u', 'p', 'p', 'o', 'r', 't', ' ', 'D', 'u', 'a', 'l', '-', 's', 't', 'a', 'c', 'k', '.', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'A', 'c', 'c', 'e', 'l', 'e', 'r', 'a', 't', 'e', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'S', '3', 'E', 'x', 'p', 'r', 'e', 's', 's', ' ', 'd', 'o', 'e', 's', ' ', 'n', 'o', 't', ' ', 's', 'u', 'p', 'p', 'o', 'r', 't', ' ', 'S', '3', ' ', 'A', 'c', 'c', 'e', 'l', 'e', 'r', 'a', 't', 'e', '.', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'p', 'a', 'r', 's', 'e', 'U', 'R', 'L', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 'u', 'r', 'l', '"', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'D', 'i', 's', 'a', 'b', 'l', 'e', 'S', '3', 'E', 'x', 'p', 'r', 'e', 's', 's', 'S', 'e', 's', 's', 'i', 'o', 'n', 'A', 'u', 't', 'h', '"', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'D', 'i', 's', 'a', 'b', 'l', 'e', 'S', '3', 'E', 'x', 'p', 'r', 'e', 's', 's', 'S', 'e', 's', 's', 'i', 'o', 'n', 'A', 'u', 't', 'h', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'g', 'e', 't', 'A', 't', 't', 'r', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'u', 'r', 'l', '"', '}', ',', '"', 'i', 's', 'I', 'p', '"', ']', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'u', 'r', 'i', 'E', 'n', 'c', 'o', 'd', 'e', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'B', 'u', 'c', 'k', 'e', 't', '"', '}', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 'u', 'r', 'i', '_', 'e', 'n', 'c', 'o', 'd', 'e', 'd', '_', 'b', 'u', 'c', 'k', 'e', 't', '"', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', '{', 'u', 'r', 'l', '#', 's', 'c', 'h', 'e', 'm', 'e', '}', ':', '/', '/', '{', 'u', 'r', 'l', '#', 'a', 'u', 't', 'h', 'o', 'r', 'i', 't', 'y', '}', '/', '{', 'u', 'r', 'i', '_', 'e', 'n', 'c', 'o', 'd', 'e', 'd', '_', 'b', 'u', 'c', 'k', 'e', 't', '}', '{', 'u', 'r', 'l', '#', 'p', 'a', 't', 'h', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'b', 'a', 'c', 'k', 'e', 'n', 'd', '"', ':', '"', 'S', '3', 'E', 'x', 'p', 'r', 'e', 's', 's', '"', ',', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', 'e', 'x', 'p', 'r', 'e', 's', 's', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'a', 'w', 's', '.', 'i', 's', 'V', 'i', 'r', 't', 'u', 'a', 'l', 'H', 'o', 's', 't', 'a', 'b', 'l', 'e', 'S', '3', 'B', 'u', 'c', 'k', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'B', 'u', 'c', 'k', 'e', 't', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', '{', 'u', 'r', 'l', '#', 's', 'c', 'h', 'e', 'm', 'e', '}', ':', '/', '/', '{', 'B', 'u', 'c', 'k', 'e', 't', '}', '.', '{', 'u', 'r', 'l', '#', 'a', 'u', 't', 'h', 'o', 'r', 'i', 't', 'y', '}', '{', 'u', 'r', 'l', '#', 'p', 'a', 't', 'h', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'b', 'a', 'c', 'k', 'e', 'n', 'd', '"', ':', '"', 'S', '3', 'E', 'x', 'p', 'r', 'e', 's', 's', '"', ',', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', 'e', 'x', 'p', 'r', 'e', 's', 's', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'S', '3', 'E', 'x', 'p', 'r', 'e', 's', 's', ' ', 'b', 'u', 'c', 'k', 'e', 't', ' ', 'n', 'a', 'm', 'e', ' ', 'i', 's', ' ', 'n', 'o', 't', ' ', 'a', ' ', 'v', 'a', 'l', 'i', 'd', ' ', 'v', 'i', 'r', 't', 'u', 'a', 'l', ' ', 'h', 'o', 's', 't', 'a', 'b', 'l', 'e', ' ', 'n', 'a', 'm', 'e', '.', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'g', 'e', 't', 'A', 't', 't', 'r', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'u', 'r', 'l', '"', '}', ',', '"', 'i', 's', 'I', 'p', '"', ']', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'u', 'r', 'i', 'E', 'n', 'c', 'o', 'd', 'e', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'B', 'u', 'c', 'k', 'e', 't', '"', '}', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 'u', 'r', 'i', '_', 'e', 'n', 'c', 'o', 'd', 'e', 'd', '_', 'b', 'u', 'c', 'k', 'e', 't', '"', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', '{', 'u', 'r', 'l', '#', 's', 'c', 'h', 'e', 'm', 'e', '}', ':', '/', '/', '{', 'u', 'r', 'l', '#', 'a', 'u', 't', 'h', 'o', 'r', 'i', 't', 'y', '}', '/', '{', 'u', 'r', 'i', '_', 'e', 'n', 'c', 'o', 'd', 'e', 'd', '_', 'b', 'u', 'c', 'k', 'e', 't', '}', '{', 'u', 'r', 'l', '#', 'p', 'a', 't', 'h', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'b', 'a', 'c', 'k', 'e', 'n', 'd', '"', ':', '"', 'S', '3', 'E', 'x', 'p', 'r', 'e', 's', 's', '"', ',', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '-', 's', '3', 'e', 'x', 'p', 'r', 'e', 's', 's', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', 'e', 'x', 'p', 'r', 'e', 's', 's', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'a', 'w', 's', '.', 'i', 's', 'V', 'i', 'r', 't', 'u', 'a', 'l', 'H', 'o', 's', 't', 'a', 'b', 'l', 'e', 'S', '3', 'B', 'u', 'c', 'k', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'B', 'u', 'c', 'k', 'e', 't', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', '{', 'u', 'r', 'l', '#', 's', 'c', 'h', 'e', 'm', 'e', '}', ':', '/', '/', '{', 'B', 'u', 'c', 'k', 'e', 't', '}', '.', '{', 'u', 'r', 'l', '#', 'a', 'u', 't', 'h', 'o', 'r', 'i', 't', 'y', '}', '{', 'u', 'r', 'l', '#', 'p', 'a', 't', 'h', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'b', 'a', 'c', 'k', 'e', 'n', 'd', '"', ':', '"', 'S', '3', 'E', 'x', 'p', 'r', 'e', 's', 's', '"', ',', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '-', 's', '3', 'e', 'x', 'p', 'r', 'e', 's', 's', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', 'e', 'x', 'p', 'r', 'e', 's', 's', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'S', '3', 'E', 'x', 'p', 'r', 'e', 's', 's', ' ', 'b', 'u', 'c', 'k', 'e', 't', ' ', 'n', 'a', 'm', 'e', ' ', 'i', 's', ' ', 'n', 'o', 't', ' ', 'a', ' ', 'v', 'a', 'l', 'i', 'd', ' ', 'v', 'i', 'r', 't', 'u', 'a', 'l', ' ', 'h', 'o', 's', 't', 'a', 'b', 'l', 'e', ' ', 'n', 'a', 'm', 'e', '.', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'S', '3', 'E', 'x', 'p', 'r', 'e', 's', 's', 'C', 'o', 'n', 't', 'r', 'o', 'l', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'S', '3', 'E', 'x', 'p', 'r', 'e', 's', 's', 'C', 'o', 'n', 't', 'r', 'o', 'l', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'u', 'r', 'i', 'E', 'n', 'c', 'o', 'd', 'e', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'B', 'u', 'c', 'k', 'e', 't', '"', '}', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 'u', 'r', 'i', '_', 'e', 'n', 'c', 'o', 'd', 'e', 'd', '_', 'b', 'u', 'c', 'k', 'e', 't', '"', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', 's', '3', 'e', 'x', 'p', 'r', 'e', 's', 's', '-', 'c', 'o', 'n', 't', 'r', 'o', 'l', '-', 'f', 'i', 'p', 's', '.', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '.', 'a', 'm', 'a', 'z', 'o', 'n', 'a', 'w', 's', '.', 'c', 'o', 'm', '/', '{', 'u', 'r', 'i', '_', 'e', 'n', 'c', 'o', 'd', 'e', 'd', '_', 'b', 'u', 'c', 'k', 'e', 't', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'b', 'a', 'c', 'k', 'e', 'n', 'd', '"', ':', '"', 'S', '3', 'E', 'x', 'p', 'r', 'e', 's', 's', '"', ',', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', 'e', 'x', 'p', 'r', 'e', 's', 's', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', 's', '3', 'e', 'x', 'p', 'r', 'e', 's', 's', '-', 'c', 'o', 'n', 't', 'r', 'o', 'l', '.', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '.', 'a', 'm', 'a', 'z', 'o', 'n', 'a', 'w', 's', '.', 'c', 'o', 'm', '/', '{', 'u', 'r', 'i', '_', 'e', 'n', 'c', 'o', 'd', 'e', 'd', '_', 'b', 'u', 'c', 'k', 'e', 't', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'b', 'a', 'c', 'k', 'e', 'n', 'd', '"', ':', '"', 'S', '3', 'E', 'x', 'p', 'r', 'e', 's', 's', '"', ',', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', 'e', 'x', 'p', 'r', 'e', 's', 's', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'a', 'w', 's', '.', 'i', 's', 'V', 'i', 'r', 't', 'u', 'a', 'l', 'H', 'o', 's', 't', 'a', 'b', 'l', 'e', 'S', '3', 'B', 'u', 'c', 'k', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'B', 'u', 'c', 'k', 'e', 't', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'D', 'i', 's', 'a', 'b', 'l', 'e', 'S', '3', 'E', 'x', 'p', 'r', 'e', 's', 's', 'S', 'e', 's', 's', 'i', 'o', 'n', 'A', 'u', 't', 'h', '"', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'D', 'i', 's', 'a', 'b', 'l', 'e', 'S', '3', 'E', 'x', 'p', 'r', 'e', 's', 's', 'S', 'e', 's', 's', 'i', 'o', 'n', 'A', 'u', 't', 'h', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 'u', 'b', 's', 't', 'r', 'i', 'n', 'g', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'B', 'u', 'c', 'k', 'e', 't', '"', '}', ',', '6', ',', '1', '4', ',', 't', 'r', 'u', 'e', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 's', '3', 'e', 'x', 'p', 'r', 'e', 's', 's', 'A', 'v', 'a', 'i', 'l', 'a', 'b', 'i', 'l', 'i', 't', 'y', 'Z', 'o', 'n', 'e', 'I', 'd', '"', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 's', 'u', 'b', 's', 't', 'r', 'i', 'n', 'g', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'B', 'u', 'c', 'k', 'e', 't', '"', '}', ',', '1', '4', ',', '1', '6', ',', 't', 'r', 'u', 'e', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 's', '3', 'e', 'x', 'p', 'r', 'e', 's', 's', 'A', 'v', 'a', 'i', 'l', 'a', 'b', 'i', 'l', 'i', 't', 'y', 'Z', 'o', 'n', 'e', 'D', 'e', 'l', 'i', 'm', '"', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 's', '3', 'e', 'x', 'p', 'r', 'e', 's', 's', 'A', 'v', 'a', 'i', 'l', 'a', 'b', 'i', 'l', 'i', 't', 'y', 'Z', 'o', 'n', 'e', 'D', 'e', 'l', 'i', 'm', '"', '}', ',', '"', '-', '-', '"', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', '{', 'B', 'u', 'c', 'k', 'e', 't', '}', '.', 's', '3', 'e', 'x', 'p', 'r', 'e', 's', 's', '-', 'f', 'i', 'p', 's', '-', '{', 's', '3', 'e', 'x', 'p', 'r', 'e', 's', 's', 'A', 'v', 'a', 'i', 'l', 'a', 'b', 'i', 'l', 'i', 't', 'y', 'Z', 'o', 'n', 'e', 'I', 'd', '}', '.', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '.', 'a', 'm', 'a', 'z', 'o', 'n', 'a', 'w', 's', '.', 'c', 'o', 'm', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'b', 'a', 'c', 'k', 'e', 'n', 'd', '"', ':', '"', 'S', '3', 'E', 'x', 'p', 'r', 'e', 's', 's', '"', ',', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', 'e', 'x', 'p', 'r', 'e', 's', 's', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', '{', 'B', 'u', 'c', 'k', 'e', 't', '}', '.', 's', '3', 'e', 'x', 'p', 'r', 'e', 's', 's', '-', '{', 's', '3', 'e', 'x', 'p', 'r', 'e', 's', 's', 'A', 'v', 'a', 'i', 'l', 'a', 'b', 'i', 'l', 'i', 't', 'y', 'Z', 'o', 'n', 'e', 'I', 'd', '}', '.', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '.', 'a', 'm', 'a', 'z', 'o', 'n', 'a', 'w', 's', '.', 'c', 'o', 'm', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'b', 'a', 'c', 'k', 'e', 'n', 'd', '"', ':', '"', 'S', '3', 'E', 'x', 'p', 'r', 'e', 's', 's', '"', ',', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', 'e', 'x', 'p', 'r', 'e', 's', 's', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 'u', 'b', 's', 't', 'r', 'i', 'n', 'g', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'B', 'u', 'c', 'k', 'e', 't', '"', '}', ',', '6', ',', '1', '5', ',', 't', 'r', 'u', 'e', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 's', '3', 'e', 'x', 'p', 'r', 'e', 's', 's', 'A', 'v', 'a', 'i', 'l', 'a', 'b', 'i', 'l', 'i', 't', 'y', 'Z', 'o', 'n', 'e', 'I', 'd', '"', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 's', 'u', 'b', 's', 't', 'r', 'i', 'n', 'g', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'B', 'u', 'c', 'k', 'e', 't', '"', '}', ',', '1', '5', ',', '1', '7', ',', 't', 'r', 'u', 'e', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 's', '3', 'e', 'x', 'p', 'r', 'e', 's', 's', 'A', 'v', 'a', 'i', 'l', 'a', 'b', 'i', 'l', 'i', 't', 'y', 'Z', 'o', 'n', 'e', 'D', 'e', 'l', 'i', 'm', '"', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 's', '3', 'e', 'x', 'p', 'r', 'e', 's', 's', 'A', 'v', 'a', 'i', 'l', 'a', 'b', 'i', 'l', 'i', 't', 'y', 'Z', 'o', 'n', 'e', 'D', 'e', 'l', 'i', 'm', '"', '}', ',', '"', '-', '-', '"', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', '{', 'B', 'u', 'c', 'k', 'e', 't', '}', '.', 's', '3', 'e', 'x', 'p', 'r', 'e', 's', 's', '-', 'f', 'i', 'p', 's', '-', '{', 's', '3', 'e', 'x', 'p', 'r', 'e', 's', 's', 'A', 'v', 'a', 'i', 'l', 'a', 'b', 'i', 'l', 'i', 't', 'y', 'Z', 'o', 'n', 'e', 'I', 'd', '}', '.', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '.', 'a', 'm', 'a', 'z', 'o', 'n', 'a', 'w', 's', '.', 'c', 'o', 'm', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'b', 'a', 'c', 'k', 'e', 'n', 'd', '"', ':', '"', 'S', '3', 'E', 'x', 'p', 'r', 'e', 's', 's', '"', ',', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', 'e', 'x', 'p', 'r', 'e', 's', 's', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', '{', 'B', 'u', 'c', 'k', 'e', 't', '}', '.', 's', '3', 'e', 'x', 'p', 'r', 'e', 's', 's', '-', '{', 's', '3', 'e', 'x', 'p', 'r', 'e', 's', 's', 'A', 'v', 'a', 'i', 'l', 'a', 'b', 'i', 'l', 'i', 't', 'y', 'Z', 'o', 'n', 'e', 'I', 'd', '}', '.', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '.', 'a', 'm', 'a', 'z', 'o', 'n', 'a', 'w', 's', '.', 'c', 'o', 'm', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'b', 'a', 'c', 'k', 'e', 'n', 'd', '"', ':', '"', 'S', '3', 'E', 'x', 'p', 'r', 'e', 's', 's', '"', ',', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', 'e', 'x', 'p', 'r', 'e', 's', 's', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'U', 'n', 'r', 'e', 'c', 'o', 'g', 'n', 'i', 'z', 'e', 'd', ' ', 'S', '3', 'E', 'x', 'p', 'r', 'e', 's', 's', ' ', 'b', 'u', 'c', 'k', 'e', 't', ' ', 'n', 'a', 'm', 'e', ' ', 'f', 'o', 'r', 'm', 'a', 't', '.', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 'u', 'b', 's', 't', 'r', 'i', 'n', 'g', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'B', 'u', 'c', 'k', 'e', 't', '"', '}', ',', '6', ',', '1', '4', ',', 't', 'r', 'u', 'e', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 's', '3', 'e', 'x', 'p', 'r', 'e', 's', 's', 'A', 'v', 'a', 'i', 'l', 'a', 'b', 'i', 'l', 'i', 't', 'y', 'Z', 'o', 'n', 'e', 'I', 'd', '"', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 's', 'u', 'b', 's', 't', 'r', 'i', 'n', 'g', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'B', 'u', 'c', 'k', 'e', 't', '"', '}', ',', '1', '4', ',', '1', '6', ',', 't', 'r', 'u', 'e', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 's', '3', 'e', 'x', 'p', 'r', 'e', 's', 's', 'A', 'v', 'a', 'i', 'l', 'a', 'b', 'i', 'l', 'i', 't', 'y', 'Z', 'o', 'n', 'e', 'D', 'e', 'l', 'i', 'm', '"', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 's', '3', 'e', 'x', 'p', 'r', 'e', 's', 's', 'A', 'v', 'a', 'i', 'l', 'a', 'b', 'i', 'l', 'i', 't', 'y', 'Z', 'o', 'n', 'e', 'D', 'e', 'l', 'i', 'm', '"', '}', ',', '"', '-', '-', '"', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', '{', 'B', 'u', 'c', 'k', 'e', 't', '}', '.', 's', '3', 'e', 'x', 'p', 'r', 'e', 's', 's', '-', 'f', 'i', 'p', 's', '-', '{', 's', '3', 'e', 'x', 'p', 'r', 'e', 's', 's', 'A', 'v', 'a', 'i', 'l', 'a', 'b', 'i', 'l', 'i', 't', 'y', 'Z', 'o', 'n', 'e', 'I', 'd', '}', '.', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '.', 'a', 'm', 'a', 'z', 'o', 'n', 'a', 'w', 's', '.', 'c', 'o', 'm', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'b', 'a', 'c', 'k', 'e', 'n', 'd', '"', ':', '"', 'S', '3', 'E', 'x', 'p', 'r', 'e', 's', 's', '"', ',', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '-', 's', '3', 'e', 'x', 'p', 'r', 'e', 's', 's', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', 'e', 'x', 'p', 'r', 'e', 's', 's', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', '{', 'B', 'u', 'c', 'k', 'e', 't', '}', '.', 's', '3', 'e', 'x', 'p', 'r', 'e', 's', 's', '-', '{', 's', '3', 'e', 'x', 'p', 'r', 'e', 's', 's', 'A', 'v', 'a', 'i', 'l', 'a', 'b', 'i', 'l', 'i', 't', 'y', 'Z', 'o', 'n', 'e', 'I', 'd', '}', '.', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '.', 'a', 'm', 'a', 'z', 'o', 'n', 'a', 'w', 's', '.', 'c', 'o', 'm', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'b', 'a', 'c', 'k', 'e', 'n', 'd', '"', ':', '"', 'S', '3', 'E', 'x', 'p', 'r', 'e', 's', 's', '"', ',', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '-', 's', '3', 'e', 'x', 'p', 'r', 'e', 's', 's', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', 'e', 'x', 'p', 'r', 'e', 's', 's', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 'u', 'b', 's', 't', 'r', 'i', 'n', 'g', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'B', 'u', 'c', 'k', 'e', 't', '"', '}', ',', '6', ',', '1', '5', ',', 't', 'r', 'u', 'e', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 's', '3', 'e', 'x', 'p', 'r', 'e', 's', 's', 'A', 'v', 'a', 'i', 'l', 'a', 'b', 'i', 'l', 'i', 't', 'y', 'Z', 'o', 'n', 'e', 'I', 'd', '"', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 's', 'u', 'b', 's', 't', 'r', 'i', 'n', 'g', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'B', 'u', 'c', 'k', 'e', 't', '"', '}', ',', '1', '5', ',', '1', '7', ',', 't', 'r', 'u', 'e', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 's', '3', 'e', 'x', 'p', 'r', 'e', 's', 's', 'A', 'v', 'a', 'i', 'l', 'a', 'b', 'i', 'l', 'i', 't', 'y', 'Z', 'o', 'n', 'e', 'D', 'e', 'l', 'i', 'm', '"', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 's', '3', 'e', 'x', 'p', 'r', 'e', 's', 's', 'A', 'v', 'a', 'i', 'l', 'a', 'b', 'i', 'l', 'i', 't', 'y', 'Z', 'o', 'n', 'e', 'D', 'e', 'l', 'i', 'm', '"', '}', ',', '"', '-', '-', '"', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', '{', 'B', 'u', 'c', 'k', 'e', 't', '}', '.', 's', '3', 'e', 'x', 'p', 'r', 'e', 's', 's', '-', 'f', 'i', 'p', 's', '-', '{', 's', '3', 'e', 'x', 'p', 'r', 'e', 's', 's', 'A', 'v', 'a', 'i', 'l', 'a', 'b', 'i', 'l', 'i', 't', 'y', 'Z', 'o', 'n', 'e', 'I', 'd', '}', '.', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '.', 'a', 'm', 'a', 'z', 'o', 'n', 'a', 'w', 's', '.', 'c', 'o', 'm', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'b', 'a', 'c', 'k', 'e', 'n', 'd', '"', ':', '"', 'S', '3', 'E', 'x', 'p', 'r', 'e', 's', 's', '"', ',', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '-', 's', '3', 'e', 'x', 'p', 'r', 'e', 's', 's', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', 'e', 'x', 'p', 'r', 'e', 's', 's', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', '{', 'B', 'u', 'c', 'k', 'e', 't', '}', '.', 's', '3', 'e', 'x', 'p', 'r', 'e', 's', 's', '-', '{', 's', '3', 'e', 'x', 'p', 'r', 'e', 's', 's', 'A', 'v', 'a', 'i', 'l', 'a', 'b', 'i', 'l', 'i', 't', 'y', 'Z', 'o', 'n', 'e', 'I', 'd', '}', '.', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '.', 'a', 'm', 'a', 'z', 'o', 'n', 'a', 'w', 's', '.', 'c', 'o', 'm', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'b', 'a', 'c', 'k', 'e', 'n', 'd', '"', ':', '"', 'S', '3', 'E', 'x', 'p', 'r', 'e', 's', 's', '"', ',', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '-', 's', '3', 'e', 'x', 'p', 'r', 'e', 's', 's', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', 'e', 'x', 'p', 'r', 'e', 's', 's', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'U', 'n', 'r', 'e', 'c', 'o', 'g', 'n', 'i', 'z', 'e', 'd', ' ', 'S', '3', 'E', 'x', 'p', 'r', 'e', 's', 's', ' ', 'b', 'u', 'c', 'k', 'e', 't', ' ', 'n', 'a', 'm', 'e', ' ', 'f', 'o', 'r', 'm', 'a', 't', '.', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'S', '3', 'E', 'x', 'p', 'r', 'e', 's', 's', ' ', 'b', 'u', 'c', 'k', 'e', 't', ' ', 'n', 'a', 'm', 'e', ' ', 'i', 's', ' ', 'n', 'o', 't', ' ', 'a', ' ', 'v', 'a', 'l', 'i', 'd', ' ', 'v', 'i', 'r', 't', 'u', 'a', 'l', ' ', 'h', 'o', 's', 't', 'a', 'b', 'l', 'e', ' ', 'n', 'a', 'm', 'e', '.', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'B', 'u', 'c', 'k', 'e', 't', '"', '}', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'S', '3', 'E', 'x', 'p', 'r', 'e', 's', 's', 'C', 'o', 'n', 't', 'r', 'o', 'l', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'S', '3', 'E', 'x', 'p', 'r', 'e', 's', 's', 'C', 'o', 'n', 't', 'r', 'o', 'l', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'p', 'a', 'r', 's', 'e', 'U', 'R', 'L', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 'u', 'r', 'l', '"', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', '{', 'u', 'r', 'l', '#', 's', 'c', 'h', 'e', 'm', 'e', '}', ':', '/', '/', '{', 'u', 'r', 'l', '#', 'a', 'u', 't', 'h', 'o', 'r', 'i', 't', 'y', '}', '{', 'u', 'r', 'l', '#', 'p', 'a', 't', 'h', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'b', 'a', 'c', 'k', 'e', 'n', 'd', '"', ':', '"', 'S', '3', 'E', 'x', 'p', 'r', 'e', 's', 's', '"', ',', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', 'e', 'x', 'p', 'r', 'e', 's', 's', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', 's', '3', 'e', 'x', 'p', 'r', 'e', 's', 's', '-', 'c', 'o', 'n', 't', 'r', 'o', 'l', '-', 'f', 'i', 'p', 's', '.', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '.', 'a', 'm', 'a', 'z', 'o', 'n', 'a', 'w', 's', '.', 'c', 'o', 'm', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'b', 'a', 'c', 'k', 'e', 'n', 'd', '"', ':', '"', 'S', '3', 'E', 'x', 'p', 'r', 'e', 's', 's', '"', ',', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', 'e', 'x', 'p', 'r', 'e', 's', 's', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', 's', '3', 'e', 'x', 'p', 'r', 'e', 's', 's', '-', 'c', 'o', 'n', 't', 'r', 'o', 'l', '.', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '.', 'a', 'm', 'a', 'z', 'o', 'n', 'a', 'w', 's', '.', 'c', 'o', 'm', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'b', 'a', 'c', 'k', 'e', 'n', 'd', '"', ':', '"', 'S', '3', 'E', 'x', 'p', 'r', 'e', 's', 's', '"', ',', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', 'e', 'x', 'p', 'r', 'e', 's', 's', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'B', 'u', 'c', 'k', 'e', 't', '"', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 's', 'u', 'b', 's', 't', 'r', 'i', 'n', 'g', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'B', 'u', 'c', 'k', 'e', 't', '"', '}', ',', '4', '9', ',', '5', '0', ',', 't', 'r', 'u', 'e', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 'h', 'a', 'r', 'd', 'w', 'a', 'r', 'e', 'T', 'y', 'p', 'e', '"', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 's', 'u', 'b', 's', 't', 'r', 'i', 'n', 'g', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'B', 'u', 'c', 'k', 'e', 't', '"', '}', ',', '8', ',', '1', '2', ',', 't', 'r', 'u', 'e', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 'r', 'e', 'g', 'i', 'o', 'n', 'P', 'r', 'e', 'f', 'i', 'x', '"', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 's', 'u', 'b', 's', 't', 'r', 'i', 'n', 'g', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'B', 'u', 'c', 'k', 'e', 't', '"', '}', ',', '0', ',', '7', ',', 't', 'r', 'u', 'e', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'l', 'i', 'a', 's', 'S', 'u', 'f', 'f', 'i', 'x', '"', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 's', 'u', 'b', 's', 't', 'r', 'i', 'n', 'g', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'B', 'u', 'c', 'k', 'e', 't', '"', '}', ',', '3', '2', ',', '4', '9', ',', 't', 'r', 'u', 'e', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 'o', 'u', 't', 'p', 'o', 's', 't', 'I', 'd', '"', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'a', 'w', 's', '.', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 'r', 'e', 'g', 'i', 'o', 'n', 'P', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', '"', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'l', 'i', 'a', 's', 'S', 'u', 'f', 'f', 'i', 'x', '"', '}', ',', '"', '-', '-', 'o', 'p', '-', 's', '3', '"', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'V', 'a', 'l', 'i', 'd', 'H', 'o', 's', 't', 'L', 'a', 'b', 'e', 'l', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'o', 'u', 't', 'p', 'o', 's', 't', 'I', 'd', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'h', 'a', 'r', 'd', 'w', 'a', 'r', 'e', 'T', 'y', 'p', 'e', '"', '}', ',', '"', 'e', '"', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'r', 'e', 'g', 'i', 'o', 'n', 'P', 'r', 'e', 'f', 'i', 'x', '"', '}', ',', '"', 'b', 'e', 't', 'a', '"', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ']', '}', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'E', 'x', 'p', 'e', 'c', 't', 'e', 'd', ' ', 'a', ' ', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', ' ', 't', 'o', ' ', 'b', 'e', ' ', 's', 'p', 'e', 'c', 'i', 'f', 'i', 'e', 'd', ' ', 'b', 'u', 't', ' ', 'n', 'o', ' ', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', ' ', 'w', 'a', 's', ' ', 'f', 'o', 'u', 'n', 'd', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'p', 'a', 'r', 's', 'e', 'U', 'R', 'L', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 'u', 'r', 'l', '"', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', '{', 'B', 'u', 'c', 'k', 'e', 't', '}', '.', 'e', 'c', '2', '.', '{', 'u', 'r', 'l', '#', 'a', 'u', 't', 'h', 'o', 'r', 'i', 't', 'y', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '-', 'o', 'u', 't', 'p', 'o', 's', 't', 's', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', '{', 'B', 'u', 'c', 'k', 'e', 't', '}', '.', 'e', 'c', '2', '.', 's', '3', '-', 'o', 'u', 't', 'p', 'o', 's', 't', 's', '.', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '.', '{', 'r', 'e', 'g', 'i', 'o', 'n', 'P', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '-', 'o', 'u', 't', 'p', 'o', 's', 't', 's', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'h', 'a', 'r', 'd', 'w', 'a', 'r', 'e', 'T', 'y', 'p', 'e', '"', '}', ',', '"', 'o', '"', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'r', 'e', 'g', 'i', 'o', 'n', 'P', 'r', 'e', 'f', 'i', 'x', '"', '}', ',', '"', 'b', 'e', 't', 'a', '"', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ']', '}', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'E', 'x', 'p', 'e', 'c', 't', 'e', 'd', ' ', 'a', ' ', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', ' ', 't', 'o', ' ', 'b', 'e', ' ', 's', 'p', 'e', 'c', 'i', 'f', 'i', 'e', 'd', ' ', 'b', 'u', 't', ' ', 'n', 'o', ' ', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', ' ', 'w', 'a', 's', ' ', 'f', 'o', 'u', 'n', 'd', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'p', 'a', 'r', 's', 'e', 'U', 'R', 'L', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 'u', 'r', 'l', '"', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', '{', 'B', 'u', 'c', 'k', 'e', 't', '}', '.', 'o', 'p', '-', '{', 'o', 'u', 't', 'p', 'o', 's', 't', 'I', 'd', '}', '.', '{', 'u', 'r', 'l', '#', 'a', 'u', 't', 'h', 'o', 'r', 'i', 't', 'y', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '-', 'o', 'u', 't', 'p', 'o', 's', 't', 's', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', '{', 'B', 'u', 'c', 'k', 'e', 't', '}', '.', 'o', 'p', '-', '{', 'o', 'u', 't', 'p', 'o', 's', 't', 'I', 'd', '}', '.', 's', '3', '-', 'o', 'u', 't', 'p', 'o', 's', 't', 's', '.', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '.', '{', 'r', 'e', 'g', 'i', 'o', 'n', 'P', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '-', 'o', 'u', 't', 'p', 'o', 's', 't', 's', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'U', 'n', 'r', 'e', 'c', 'o', 'g', 'n', 'i', 'z', 'e', 'd', ' ', 'h', 'a', 'r', 'd', 'w', 'a', 'r', 'e', ' ', 't', 'y', 'p', 'e', ':', ' ', '\\', '"', 'E', 'x', 'p', 'e', 'c', 't', 'e', 'd', ' ', 'h', 'a', 'r', 'd', 'w', 'a', 'r', 'e', ' ', 't', 'y', 'p', 'e', ' ', 'o', ' ', 'o', 'r', ' ', 'e', ' ', 'b', 'u', 't', ' ', 'g', 'o', 't', ' ', '{', 'h', 'a', 'r', 'd', 'w', 'a', 'r', 'e', 'T', 'y', 'p', 'e', '}', '\\', '"', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'I', 'n', 'v', 'a', 'l', 'i', 'd', ' ', 'A', 'R', 'N', ':', ' ', 'T', 'h', 'e', ' ', 'o', 'u', 't', 'p', 'o', 's', 't', ' ', 'I', 'd', ' ', 'm', 'u', 's', 't', ' ', 'o', 'n', 'l', 'y', ' ', 'c', 'o', 'n', 't', 'a', 'i', 'n', ' ', 'a', '-', 'z', ',', ' ', 'A', '-', 'Z', ',', ' ', '0', '-', '9', ' ', 'a', 'n', 'd', ' ', '`', '-', '`', '.', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'B', 'u', 'c', 'k', 'e', 't', '"', '}', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'p', 'a', 'r', 's', 'e', 'U', 'R', 'L', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ']', '}', ']', '}', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'C', 'u', 's', 't', 'o', 'm', ' ', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', ' ', '`', '{', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '}', '`', ' ', 'w', 'a', 's', ' ', 'n', 'o', 't', ' ', 'a', ' ', 'v', 'a', 'l', 'i', 'd', ' ', 'U', 'R', 'I', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'F', 'o', 'r', 'c', 'e', 'P', 'a', 't', 'h', 'S', 't', 'y', 'l', 'e', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'a', 'w', 's', '.', 'i', 's', 'V', 'i', 'r', 't', 'u', 'a', 'l', 'H', 'o', 's', 't', 'a', 'b', 'l', 'e', 'S', '3', 'B', 'u', 'c', 'k', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'B', 'u', 'c', 'k', 'e', 't', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'a', 'w', 's', '.', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '"', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'V', 'a', 'l', 'i', 'd', 'H', 'o', 's', 't', 'L', 'a', 'b', 'e', 'l', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'A', 'c', 'c', 'e', 'l', 'e', 'r', 'a', 't', 'e', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'g', 'e', 't', 'A', 't', 't', 'r', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '"', '}', ',', '"', 'n', 'a', 'm', 'e', '"', ']', '}', ',', '"', 'a', 'w', 's', '-', 'c', 'n', '"', ']', '}', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'S', '3', ' ', 'A', 'c', 'c', 'e', 'l', 'e', 'r', 'a', 't', 'e', ' ', 'c', 'a', 'n', 'n', 'o', 't', ' ', 'b', 'e', ' ', 'u', 's', 'e', 'd', ' ', 'i', 'n', ' ', 't', 'h', 'i', 's', ' ', 'r', 'e', 'g', 'i', 'o', 'n', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'A', 'c', 'c', 'e', 'l', 'e', 'r', 'a', 't', 'e', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'a', 'w', 's', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ']', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', '{', 'B', 'u', 'c', 'k', 'e', 't', '}', '.', 's', '3', '-', 'f', 'i', 'p', 's', '.', 'd', 'u', 'a', 'l', 's', 't', 'a', 'c', 'k', '.', 'u', 's', '-', 'e', 'a', 's', 't', '-', '1', '.', '{', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', 'u', 's', '-', 'e', 'a', 's', 't', '-', '1', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'A', 'c', 'c', 'e', 'l', 'e', 'r', 'a', 't', 'e', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'a', 'w', 's', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'G', 'l', 'o', 'b', 'a', 'l', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', '{', 'B', 'u', 'c', 'k', 'e', 't', '}', '.', 's', '3', '-', 'f', 'i', 'p', 's', '.', 'd', 'u', 'a', 'l', 's', 't', 'a', 'c', 'k', '.', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '.', '{', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'A', 'c', 'c', 'e', 'l', 'e', 'r', 'a', 't', 'e', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'a', 'w', 's', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'G', 'l', 'o', 'b', 'a', 'l', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', '{', 'B', 'u', 'c', 'k', 'e', 't', '}', '.', 's', '3', '-', 'f', 'i', 'p', 's', '.', 'd', 'u', 'a', 'l', 's', 't', 'a', 'c', 'k', '.', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '.', '{', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'A', 'c', 'c', 'e', 'l', 'e', 'r', 'a', 't', 'e', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'a', 'w', 's', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ']', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', '{', 'B', 'u', 'c', 'k', 'e', 't', '}', '.', 's', '3', '-', 'f', 'i', 'p', 's', '.', 'u', 's', '-', 'e', 'a', 's', 't', '-', '1', '.', '{', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', 'u', 's', '-', 'e', 'a', 's', 't', '-', '1', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'A', 'c', 'c', 'e', 'l', 'e', 'r', 'a', 't', 'e', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'a', 'w', 's', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'G', 'l', 'o', 'b', 'a', 'l', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', '{', 'B', 'u', 'c', 'k', 'e', 't', '}', '.', 's', '3', '-', 'f', 'i', 'p', 's', '.', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '.', '{', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'A', 'c', 'c', 'e', 'l', 'e', 'r', 'a', 't', 'e', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'a', 'w', 's', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'G', 'l', 'o', 'b', 'a', 'l', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', '{', 'B', 'u', 'c', 'k', 'e', 't', '}', '.', 's', '3', '-', 'f', 'i', 'p', 's', '.', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '.', '{', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'A', 'c', 'c', 'e', 'l', 'e', 'r', 'a', 't', 'e', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'a', 'w', 's', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ']', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', '{', 'B', 'u', 'c', 'k', 'e', 't', '}', '.', 's', '3', '-', 'a', 'c', 'c', 'e', 'l', 'e', 'r', 'a', 't', 'e', '.', 'd', 'u', 'a', 'l', 's', 't', 'a', 'c', 'k', '.', 'u', 's', '-', 'e', 'a', 's', 't', '-', '1', '.', '{', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', 'u', 's', '-', 'e', 'a', 's', 't', '-', '1', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'A', 'c', 'c', 'e', 'l', 'e', 'r', 'a', 't', 'e', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'a', 'w', 's', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'G', 'l', 'o', 'b', 'a', 'l', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', '{', 'B', 'u', 'c', 'k', 'e', 't', '}', '.', 's', '3', '-', 'a', 'c', 'c', 'e', 'l', 'e', 'r', 'a', 't', 'e', '.', 'd', 'u', 'a', 'l', 's', 't', 'a', 'c', 'k', '.', '{', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'A', 'c', 'c', 'e', 'l', 'e', 'r', 'a', 't', 'e', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'a', 'w', 's', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'G', 'l', 'o', 'b', 'a', 'l', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', '{', 'B', 'u', 'c', 'k', 'e', 't', '}', '.', 's', '3', '-', 'a', 'c', 'c', 'e', 'l', 'e', 'r', 'a', 't', 'e', '.', 'd', 'u', 'a', 'l', 's', 't', 'a', 'c', 'k', '.', '{', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'A', 'c', 'c', 'e', 'l', 'e', 'r', 'a', 't', 'e', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'a', 'w', 's', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ']', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', '{', 'B', 'u', 'c', 'k', 'e', 't', '}', '.', 's', '3', '.', 'd', 'u', 'a', 'l', 's', 't', 'a', 'c', 'k', '.', 'u', 's', '-', 'e', 'a', 's', 't', '-', '1', '.', '{', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', 'u', 's', '-', 'e', 'a', 's', 't', '-', '1', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'A', 'c', 'c', 'e', 'l', 'e', 'r', 'a', 't', 'e', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'a', 'w', 's', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'G', 'l', 'o', 'b', 'a', 'l', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', '{', 'B', 'u', 'c', 'k', 'e', 't', '}', '.', 's', '3', '.', 'd', 'u', 'a', 'l', 's', 't', 'a', 'c', 'k', '.', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '.', '{', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'A', 'c', 'c', 'e', 'l', 'e', 'r', 'a', 't', 'e', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'a', 'w', 's', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'G', 'l', 'o', 'b', 'a', 'l', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', '{', 'B', 'u', 'c', 'k', 'e', 't', '}', '.', 's', '3', '.', 'd', 'u', 'a', 'l', 's', 't', 'a', 'c', 'k', '.', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '.', '{', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'A', 'c', 'c', 'e', 'l', 'e', 'r', 'a', 't', 'e', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'p', 'a', 'r', 's', 'e', 'U', 'R', 'L', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 'u', 'r', 'l', '"', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'g', 'e', 't', 'A', 't', 't', 'r', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'u', 'r', 'l', '"', '}', ',', '"', 'i', 's', 'I', 'p', '"', ']', '}', ',', 't', 'r', 'u', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'a', 'w', 's', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ']', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', '{', 'u', 'r', 'l', '#', 's', 'c', 'h', 'e', 'm', 'e', '}', ':', '/', '/', '{', 'u', 'r', 'l', '#', 'a', 'u', 't', 'h', 'o', 'r', 'i', 't', 'y', '}', '{', 'u', 'r', 'l', '#', 'n', 'o', 'r', 'm', 'a', 'l', 'i', 'z', 'e', 'd', 'P', 'a', 't', 'h', '}', '{', 'B', 'u', 'c', 'k', 'e', 't', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', 'u', 's', '-', 'e', 'a', 's', 't', '-', '1', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'A', 'c', 'c', 'e', 'l', 'e', 'r', 'a', 't', 'e', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'p', 'a', 'r', 's', 'e', 'U', 'R', 'L', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 'u', 'r', 'l', '"', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'g', 'e', 't', 'A', 't', 't', 'r', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'u', 'r', 'l', '"', '}', ',', '"', 'i', 's', 'I', 'p', '"', ']', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'a', 'w', 's', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ']', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', '{', 'u', 'r', 'l', '#', 's', 'c', 'h', 'e', 'm', 'e', '}', ':', '/', '/', '{', 'B', 'u', 'c', 'k', 'e', 't', '}', '.', '{', 'u', 'r', 'l', '#', 'a', 'u', 't', 'h', 'o', 'r', 'i', 't', 'y', '}', '{', 'u', 'r', 'l', '#', 'p', 'a', 't', 'h', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', 'u', 's', '-', 'e', 'a', 's', 't', '-', '1', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'A', 'c', 'c', 'e', 'l', 'e', 'r', 'a', 't', 'e', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'p', 'a', 'r', 's', 'e', 'U', 'R', 'L', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 'u', 'r', 'l', '"', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'g', 'e', 't', 'A', 't', 't', 'r', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'u', 'r', 'l', '"', '}', ',', '"', 'i', 's', 'I', 'p', '"', ']', '}', ',', 't', 'r', 'u', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'a', 'w', 's', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'G', 'l', 'o', 'b', 'a', 'l', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'u', 's', '-', 'e', 'a', 's', 't', '-', '1', '"', ']', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', '{', 'u', 'r', 'l', '#', 's', 'c', 'h', 'e', 'm', 'e', '}', ':', '/', '/', '{', 'u', 'r', 'l', '#', 'a', 'u', 't', 'h', 'o', 'r', 'i', 't', 'y', '}', '{', 'u', 'r', 'l', '#', 'n', 'o', 'r', 'm', 'a', 'l', 'i', 'z', 'e', 'd', 'P', 'a', 't', 'h', '}', '{', 'B', 'u', 'c', 'k', 'e', 't', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', '{', 'u', 'r', 'l', '#', 's', 'c', 'h', 'e', 'm', 'e', '}', ':', '/', '/', '{', 'u', 'r', 'l', '#', 'a', 'u', 't', 'h', 'o', 'r', 'i', 't', 'y', '}', '{', 'u', 'r', 'l', '#', 'n', 'o', 'r', 'm', 'a', 'l', 'i', 'z', 'e', 'd', 'P', 'a', 't', 'h', '}', '{', 'B', 'u', 'c', 'k', 'e', 't', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'A', 'c', 'c', 'e', 'l', 'e', 'r', 'a', 't', 'e', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'p', 'a', 'r', 's', 'e', 'U', 'R', 'L', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 'u', 'r', 'l', '"', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'g', 'e', 't', 'A', 't', 't', 'r', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'u', 'r', 'l', '"', '}', ',', '"', 'i', 's', 'I', 'p', '"', ']', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'a', 'w', 's', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'G', 'l', 'o', 'b', 'a', 'l', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'u', 's', '-', 'e', 'a', 's', 't', '-', '1', '"', ']', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', '{', 'u', 'r', 'l', '#', 's', 'c', 'h', 'e', 'm', 'e', '}', ':', '/', '/', '{', 'B', 'u', 'c', 'k', 'e', 't', '}', '.', '{', 'u', 'r', 'l', '#', 'a', 'u', 't', 'h', 'o', 'r', 'i', 't', 'y', '}', '{', 'u', 'r', 'l', '#', 'p', 'a', 't', 'h', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', '{', 'u', 'r', 'l', '#', 's', 'c', 'h', 'e', 'm', 'e', '}', ':', '/', '/', '{', 'B', 'u', 'c', 'k', 'e', 't', '}', '.', '{', 'u', 'r', 'l', '#', 'a', 'u', 't', 'h', 'o', 'r', 'i', 't', 'y', '}', '{', 'u', 'r', 'l', '#', 'p', 'a', 't', 'h', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'A', 'c', 'c', 'e', 'l', 'e', 'r', 'a', 't', 'e', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'p', 'a', 'r', 's', 'e', 'U', 'R', 'L', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 'u', 'r', 'l', '"', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'g', 'e', 't', 'A', 't', 't', 'r', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'u', 'r', 'l', '"', '}', ',', '"', 'i', 's', 'I', 'p', '"', ']', '}', ',', 't', 'r', 'u', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'a', 'w', 's', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'G', 'l', 'o', 'b', 'a', 'l', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', '{', 'u', 'r', 'l', '#', 's', 'c', 'h', 'e', 'm', 'e', '}', ':', '/', '/', '{', 'u', 'r', 'l', '#', 'a', 'u', 't', 'h', 'o', 'r', 'i', 't', 'y', '}', '{', 'u', 'r', 'l', '#', 'n', 'o', 'r', 'm', 'a', 'l', 'i', 'z', 'e', 'd', 'P', 'a', 't', 'h', '}', '{', 'B', 'u', 'c', 'k', 'e', 't', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'A', 'c', 'c', 'e', 'l', 'e', 'r', 'a', 't', 'e', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'p', 'a', 'r', 's', 'e', 'U', 'R', 'L', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 'u', 'r', 'l', '"', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'g', 'e', 't', 'A', 't', 't', 'r', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'u', 'r', 'l', '"', '}', ',', '"', 'i', 's', 'I', 'p', '"', ']', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'a', 'w', 's', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'G', 'l', 'o', 'b', 'a', 'l', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', '{', 'u', 'r', 'l', '#', 's', 'c', 'h', 'e', 'm', 'e', '}', ':', '/', '/', '{', 'B', 'u', 'c', 'k', 'e', 't', '}', '.', '{', 'u', 'r', 'l', '#', 'a', 'u', 't', 'h', 'o', 'r', 'i', 't', 'y', '}', '{', 'u', 'r', 'l', '#', 'p', 'a', 't', 'h', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'A', 'c', 'c', 'e', 'l', 'e', 'r', 'a', 't', 'e', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'a', 'w', 's', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ']', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', '{', 'B', 'u', 'c', 'k', 'e', 't', '}', '.', 's', '3', '-', 'a', 'c', 'c', 'e', 'l', 'e', 'r', 'a', 't', 'e', '.', '{', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', 'u', 's', '-', 'e', 'a', 's', 't', '-', '1', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'A', 'c', 'c', 'e', 'l', 'e', 'r', 'a', 't', 'e', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'a', 'w', 's', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'G', 'l', 'o', 'b', 'a', 'l', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'u', 's', '-', 'e', 'a', 's', 't', '-', '1', '"', ']', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', '{', 'B', 'u', 'c', 'k', 'e', 't', '}', '.', 's', '3', '-', 'a', 'c', 'c', 'e', 'l', 'e', 'r', 'a', 't', 'e', '.', '{', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', '{', 'B', 'u', 'c', 'k', 'e', 't', '}', '.', 's', '3', '-', 'a', 'c', 'c', 'e', 'l', 'e', 'r', 'a', 't', 'e', '.', '{', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'A', 'c', 'c', 'e', 'l', 'e', 'r', 'a', 't', 'e', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'a', 'w', 's', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'G', 'l', 'o', 'b', 'a', 'l', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', '{', 'B', 'u', 'c', 'k', 'e', 't', '}', '.', 's', '3', '-', 'a', 'c', 'c', 'e', 'l', 'e', 'r', 'a', 't', 'e', '.', '{', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'A', 'c', 'c', 'e', 'l', 'e', 'r', 'a', 't', 'e', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'a', 'w', 's', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ']', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', '{', 'B', 'u', 'c', 'k', 'e', 't', '}', '.', 's', '3', '.', '{', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', 'u', 's', '-', 'e', 'a', 's', 't', '-', '1', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'A', 'c', 'c', 'e', 'l', 'e', 'r', 'a', 't', 'e', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'a', 'w', 's', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'G', 'l', 'o', 'b', 'a', 'l', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'u', 's', '-', 'e', 'a', 's', 't', '-', '1', '"', ']', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', '{', 'B', 'u', 'c', 'k', 'e', 't', '}', '.', 's', '3', '.', '{', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', '{', 'B', 'u', 'c', 'k', 'e', 't', '}', '.', 's', '3', '.', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '.', '{', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'A', 'c', 'c', 'e', 'l', 'e', 'r', 'a', 't', 'e', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'a', 'w', 's', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'G', 'l', 'o', 'b', 'a', 'l', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', '{', 'B', 'u', 'c', 'k', 'e', 't', '}', '.', 's', '3', '.', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '.', '{', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'I', 'n', 'v', 'a', 'l', 'i', 'd', ' ', 'r', 'e', 'g', 'i', 'o', 'n', ':', ' ', 'r', 'e', 'g', 'i', 'o', 'n', ' ', 'w', 'a', 's', ' ', 'n', 'o', 't', ' ', 'a', ' ', 'v', 'a', 'l', 'i', 'd', ' ', 'D', 'N', 'S', ' ', 'n', 'a', 'm', 'e', '.', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'p', 'a', 'r', 's', 'e', 'U', 'R', 'L', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 'u', 'r', 'l', '"', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'g', 'e', 't', 'A', 't', 't', 'r', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'u', 'r', 'l', '"', '}', ',', '"', 's', 'c', 'h', 'e', 'm', 'e', '"', ']', '}', ',', '"', 'h', 't', 't', 'p', '"', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'a', 'w', 's', '.', 'i', 's', 'V', 'i', 'r', 't', 'u', 'a', 'l', 'H', 'o', 's', 't', 'a', 'b', 'l', 'e', 'S', '3', 'B', 'u', 'c', 'k', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'B', 'u', 'c', 'k', 'e', 't', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'F', 'o', 'r', 'c', 'e', 'P', 'a', 't', 'h', 'S', 't', 'y', 'l', 'e', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'A', 'c', 'c', 'e', 'l', 'e', 'r', 'a', 't', 'e', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'a', 'w', 's', '.', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '"', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'V', 'a', 'l', 'i', 'd', 'H', 'o', 's', 't', 'L', 'a', 'b', 'e', 'l', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', '{', 'u', 'r', 'l', '#', 's', 'c', 'h', 'e', 'm', 'e', '}', ':', '/', '/', '{', 'B', 'u', 'c', 'k', 'e', 't', '}', '.', '{', 'u', 'r', 'l', '#', 'a', 'u', 't', 'h', 'o', 'r', 'i', 't', 'y', '}', '{', 'u', 'r', 'l', '#', 'p', 'a', 't', 'h', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'I', 'n', 'v', 'a', 'l', 'i', 'd', ' ', 'r', 'e', 'g', 'i', 'o', 'n', ':', ' ', 'r', 'e', 'g', 'i', 'o', 'n', ' ', 'w', 'a', 's', ' ', 'n', 'o', 't', ' ', 'a', ' ', 'v', 'a', 'l', 'i', 'd', ' ', 'D', 'N', 'S', ' ', 'n', 'a', 'm', 'e', '.', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'F', 'o', 'r', 'c', 'e', 'P', 'a', 't', 'h', 'S', 't', 'y', 'l', 'e', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'a', 'w', 's', '.', 'p', 'a', 'r', 's', 'e', 'A', 'r', 'n', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'B', 'u', 'c', 'k', 'e', 't', '"', '}', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '"', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'g', 'e', 't', 'A', 't', 't', 'r', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '"', '}', ',', '"', 'r', 'e', 's', 'o', 'u', 'r', 'c', 'e', 'I', 'd', '[', '0', ']', '"', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 'a', 'r', 'n', 'T', 'y', 'p', 'e', '"', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'a', 'r', 'n', 'T', 'y', 'p', 'e', '"', '}', ',', '"', '"', ']', '}', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'g', 'e', 't', 'A', 't', 't', 'r', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '"', '}', ',', '"', 's', 'e', 'r', 'v', 'i', 'c', 'e', '"', ']', '}', ',', '"', 's', '3', '-', 'o', 'b', 'j', 'e', 'c', 't', '-', 'l', 'a', 'm', 'b', 'd', 'a', '"', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'a', 'r', 'n', 'T', 'y', 'p', 'e', '"', '}', ',', '"', 'a', 'c', 'c', 'e', 's', 's', 'p', 'o', 'i', 'n', 't', '"', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'g', 'e', 't', 'A', 't', 't', 'r', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '"', '}', ',', '"', 'r', 'e', 's', 'o', 'u', 'r', 'c', 'e', 'I', 'd', '[', '1', ']', '"', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 'a', 'c', 'c', 'e', 's', 's', 'P', 'o', 'i', 'n', 't', 'N', 'a', 'm', 'e', '"', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'a', 'c', 'c', 'e', 's', 's', 'P', 'o', 'i', 'n', 't', 'N', 'a', 'm', 'e', '"', '}', ',', '"', '"', ']', '}', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'S', '3', ' ', 'O', 'b', 'j', 'e', 'c', 't', ' ', 'L', 'a', 'm', 'b', 'd', 'a', ' ', 'd', 'o', 'e', 's', ' ', 'n', 'o', 't', ' ', 's', 'u', 'p', 'p', 'o', 'r', 't', ' ', 'D', 'u', 'a', 'l', '-', 's', 't', 'a', 'c', 'k', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'A', 'c', 'c', 'e', 'l', 'e', 'r', 'a', 't', 'e', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'S', '3', ' ', 'O', 'b', 'j', 'e', 'c', 't', ' ', 'L', 'a', 'm', 'b', 'd', 'a', ' ', 'd', 'o', 'e', 's', ' ', 'n', 'o', 't', ' ', 's', 'u', 'p', 'p', 'o', 'r', 't', ' ', 'S', '3', ' ', 'A', 'c', 'c', 'e', 'l', 'e', 'r', 'a', 't', 'e', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'g', 'e', 't', 'A', 't', 't', 'r', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '"', '}', ',', '"', 'r', 'e', 'g', 'i', 'o', 'n', '"', ']', '}', ',', '"', '"', ']', '}', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'D', 'i', 's', 'a', 'b', 'l', 'e', 'A', 'c', 'c', 'e', 's', 's', 'P', 'o', 'i', 'n', 't', 's', '"', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'D', 'i', 's', 'a', 'b', 'l', 'e', 'A', 'c', 'c', 'e', 's', 's', 'P', 'o', 'i', 'n', 't', 's', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'A', 'c', 'c', 'e', 's', 's', ' ', 'p', 'o', 'i', 'n', 't', 's', ' ', 'a', 'r', 'e', ' ', 'n', 'o', 't', ' ', 's', 'u', 'p', 'p', 'o', 'r', 't', 'e', 'd', ' ', 'f', 'o', 'r', ' ', 't', 'h', 'i', 's', ' ', 'o', 'p', 'e', 'r', 'a', 't', 'i', 'o', 'n', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'g', 'e', 't', 'A', 't', 't', 'r', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '"', '}', ',', '"', 'r', 'e', 's', 'o', 'u', 'r', 'c', 'e', 'I', 'd', '[', '2', ']', '"', ']', '}', ']', '}', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'A', 'r', 'n', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'A', 'r', 'n', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'g', 'e', 't', 'A', 't', 't', 'r', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '"', '}', ',', '"', 'r', 'e', 'g', 'i', 'o', 'n', '"', ']', '}', ',', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', ']', '}', ']', '}', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'I', 'n', 'v', 'a', 'l', 'i', 'd', ' ', 'c', 'o', 'n', 'f', 'i', 'g', 'u', 'r', 'a', 't', 'i', 'o', 'n', ':', ' ', 'r', 'e', 'g', 'i', 'o', 'n', ' ', 'f', 'r', 'o', 'm', ' ', 'A', 'R', 'N', ' ', '`', '{', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '#', 'r', 'e', 'g', 'i', 'o', 'n', '}', '`', ' ', 'd', 'o', 'e', 's', ' ', 'n', 'o', 't', ' ', 'm', 'a', 't', 'c', 'h', ' ', 'c', 'l', 'i', 'e', 'n', 't', ' ', 'r', 'e', 'g', 'i', 'o', 'n', ' ', '`', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '`', ' ', 'a', 'n', 'd', ' ', 'U', 's', 'e', 'A', 'r', 'n', 'R', 'e', 'g', 'i', 'o', 'n', ' ', 'i', 's', ' ', '`', 'f', 'a', 'l', 's', 'e', '`', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'a', 'w', 's', '.', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'g', 'e', 't', 'A', 't', 't', 'r', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '"', '}', ',', '"', 'r', 'e', 'g', 'i', 'o', 'n', '"', ']', '}', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 'b', 'u', 'c', 'k', 'e', 't', 'P', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', '"', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'a', 'w', 's', '.', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '"', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'g', 'e', 't', 'A', 't', 't', 'r', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'b', 'u', 'c', 'k', 'e', 't', 'P', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', '"', '}', ',', '"', 'n', 'a', 'm', 'e', '"', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'g', 'e', 't', 'A', 't', 't', 'r', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '"', '}', ',', '"', 'n', 'a', 'm', 'e', '"', ']', '}', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'V', 'a', 'l', 'i', 'd', 'H', 'o', 's', 't', 'L', 'a', 'b', 'e', 'l', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'g', 'e', 't', 'A', 't', 't', 'r', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '"', '}', ',', '"', 'r', 'e', 'g', 'i', 'o', 'n', '"', ']', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'g', 'e', 't', 'A', 't', 't', 'r', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '"', '}', ',', '"', 'a', 'c', 'c', 'o', 'u', 'n', 't', 'I', 'd', '"', ']', '}', ',', '"', '"', ']', '}', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'I', 'n', 'v', 'a', 'l', 'i', 'd', ' ', 'A', 'R', 'N', ':', ' ', 'M', 'i', 's', 's', 'i', 'n', 'g', ' ', 'a', 'c', 'c', 'o', 'u', 'n', 't', ' ', 'i', 'd', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'V', 'a', 'l', 'i', 'd', 'H', 'o', 's', 't', 'L', 'a', 'b', 'e', 'l', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'g', 'e', 't', 'A', 't', 't', 'r', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '"', '}', ',', '"', 'a', 'c', 'c', 'o', 'u', 'n', 't', 'I', 'd', '"', ']', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'V', 'a', 'l', 'i', 'd', 'H', 'o', 's', 't', 'L', 'a', 'b', 'e', 'l', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'a', 'c', 'c', 'e', 's', 's', 'P', 'o', 'i', 'n', 't', 'N', 'a', 'm', 'e', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'p', 'a', 'r', 's', 'e', 'U', 'R', 'L', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 'u', 'r', 'l', '"', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', '{', 'u', 'r', 'l', '#', 's', 'c', 'h', 'e', 'm', 'e', '}', ':', '/', '/', '{', 'a', 'c', 'c', 'e', 's', 's', 'P', 'o', 'i', 'n', 't', 'N', 'a', 'm', 'e', '}', '-', '{', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '#', 'a', 'c', 'c', 'o', 'u', 'n', 't', 'I', 'd', '}', '.', '{', 'u', 'r', 'l', '#', 'a', 'u', 't', 'h', 'o', 'r', 'i', 't', 'y', '}', '{', 'u', 'r', 'l', '#', 'p', 'a', 't', 'h', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '-', 'o', 'b', 'j', 'e', 'c', 't', '-', 'l', 'a', 'm', 'b', 'd', 'a', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '#', 'r', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', '{', 'a', 'c', 'c', 'e', 's', 's', 'P', 'o', 'i', 'n', 't', 'N', 'a', 'm', 'e', '}', '-', '{', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '#', 'a', 'c', 'c', 'o', 'u', 'n', 't', 'I', 'd', '}', '.', 's', '3', '-', 'o', 'b', 'j', 'e', 'c', 't', '-', 'l', 'a', 'm', 'b', 'd', 'a', '-', 'f', 'i', 'p', 's', '.', '{', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '#', 'r', 'e', 'g', 'i', 'o', 'n', '}', '.', '{', 'b', 'u', 'c', 'k', 'e', 't', 'P', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '-', 'o', 'b', 'j', 'e', 'c', 't', '-', 'l', 'a', 'm', 'b', 'd', 'a', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '#', 'r', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', '{', 'a', 'c', 'c', 'e', 's', 's', 'P', 'o', 'i', 'n', 't', 'N', 'a', 'm', 'e', '}', '-', '{', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '#', 'a', 'c', 'c', 'o', 'u', 'n', 't', 'I', 'd', '}', '.', 's', '3', '-', 'o', 'b', 'j', 'e', 'c', 't', '-', 'l', 'a', 'm', 'b', 'd', 'a', '.', '{', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '#', 'r', 'e', 'g', 'i', 'o', 'n', '}', '.', '{', 'b', 'u', 'c', 'k', 'e', 't', 'P', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '-', 'o', 'b', 'j', 'e', 'c', 't', '-', 'l', 'a', 'm', 'b', 'd', 'a', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '#', 'r', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'I', 'n', 'v', 'a', 'l', 'i', 'd', ' ', 'A', 'R', 'N', ':', ' ', 'T', 'h', 'e', ' ', 'a', 'c', 'c', 'e', 's', 's', ' ', 'p', 'o', 'i', 'n', 't', ' ', 'n', 'a', 'm', 'e', ' ', 'm', 'a', 'y', ' ', 'o', 'n', 'l', 'y', ' ', 'c', 'o', 'n', 't', 'a', 'i', 'n', ' ', 'a', '-', 'z', ',', ' ', 'A', '-', 'Z', ',', ' ', '0', '-', '9', ' ', 'a', 'n', 'd', ' ', '`', '-', '`', '.', ' ', 'F', 'o', 'u', 'n', 'd', ':', ' ', '`', '{', 'a', 'c', 'c', 'e', 's', 's', 'P', 'o', 'i', 'n', 't', 'N', 'a', 'm', 'e', '}', '`', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'I', 'n', 'v', 'a', 'l', 'i', 'd', ' ', 'A', 'R', 'N', ':', ' ', 'T', 'h', 'e', ' ', 'a', 'c', 'c', 'o', 'u', 'n', 't', ' ', 'i', 'd', ' ', 'm', 'a', 'y', ' ', 'o', 'n', 'l', 'y', ' ', 'c', 'o', 'n', 't', 'a', 'i', 'n', ' ', 'a', '-', 'z', ',', ' ', 'A', '-', 'Z', ',', ' ', '0', '-', '9', ' ', 'a', 'n', 'd', ' ', '`', '-', '`', '.', ' ', 'F', 'o', 'u', 'n', 'd', ':', ' ', '`', '{', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '#', 'a', 'c', 'c', 'o', 'u', 'n', 't', 'I', 'd', '}', '`', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'I', 'n', 'v', 'a', 'l', 'i', 'd', ' ', 'r', 'e', 'g', 'i', 'o', 'n', ' ', 'i', 'n', ' ', 'A', 'R', 'N', ':', ' ', '`', '{', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '#', 'r', 'e', 'g', 'i', 'o', 'n', '}', '`', ' ', '(', 'i', 'n', 'v', 'a', 'l', 'i', 'd', ' ', 'D', 'N', 'S', ' ', 'n', 'a', 'm', 'e', ')', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'C', 'l', 'i', 'e', 'n', 't', ' ', 'w', 'a', 's', ' ', 'c', 'o', 'n', 'f', 'i', 'g', 'u', 'r', 'e', 'd', ' ', 'f', 'o', 'r', ' ', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', ' ', '`', '{', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '#', 'n', 'a', 'm', 'e', '}', '`', ' ', 'b', 'u', 't', ' ', 'A', 'R', 'N', ' ', '(', '`', '{', 'B', 'u', 'c', 'k', 'e', 't', '}', '`', ')', ' ', 'h', 'a', 's', ' ', '`', '{', 'b', 'u', 'c', 'k', 'e', 't', 'P', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', '#', 'n', 'a', 'm', 'e', '}', '`', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'I', 'n', 'v', 'a', 'l', 'i', 'd', ' ', 'A', 'R', 'N', ':', ' ', 'T', 'h', 'e', ' ', 'A', 'R', 'N', ' ', 'm', 'a', 'y', ' ', 'o', 'n', 'l', 'y', ' ', 'c', 'o', 'n', 't', 'a', 'i', 'n', ' ', 'a', ' ', 's', 'i', 'n', 'g', 'l', 'e', ' ', 'r', 'e', 's', 'o', 'u', 'r', 'c', 'e', ' ', 'c', 'o', 'm', 'p', 'o', 'n', 'e', 'n', 't', ' ', 'a', 'f', 't', 'e', 'r', ' ', '`', 'a', 'c', 'c', 'e', 's', 's', 'p', 'o', 'i', 'n', 't', '`', '.', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'I', 'n', 'v', 'a', 'l', 'i', 'd', ' ', 'A', 'R', 'N', ':', ' ', 'b', 'u', 'c', 'k', 'e', 't', ' ', 'A', 'R', 'N', ' ', 'i', 's', ' ', 'm', 'i', 's', 's', 'i', 'n', 'g', ' ', 'a', ' ', 'r', 'e', 'g', 'i', 'o', 'n', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'I', 'n', 'v', 'a', 'l', 'i', 'd', ' ', 'A', 'R', 'N', ':', ' ', 'E', 'x', 'p', 'e', 'c', 't', 'e', 'd', ' ', 'a', ' ', 'r', 'e', 's', 'o', 'u', 'r', 'c', 'e', ' ', 'o', 'f', ' ', 't', 'h', 'e', ' ', 'f', 'o', 'r', 'm', 'a', 't', ' ', '`', 'a', 'c', 'c', 'e', 's', 's', 'p', 'o', 'i', 'n', 't', ':', '<', 'a', 'c', 'c', 'e', 's', 's', 'p', 'o', 'i', 'n', 't', ' ', 'n', 'a', 'm', 'e', '>', '`', ' ', 'b', 'u', 't', ' ', 'n', 'o', ' ', 'n', 'a', 'm', 'e', ' ', 'w', 'a', 's', ' ', 'p', 'r', 'o', 'v', 'i', 'd', 'e', 'd', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'I', 'n', 'v', 'a', 'l', 'i', 'd', ' ', 'A', 'R', 'N', ':', ' ', 'O', 'b', 'j', 'e', 'c', 't', ' ', 'L', 'a', 'm', 'b', 'd', 'a', ' ', 'A', 'R', 'N', 's', ' ', 'o', 'n', 'l', 'y', ' ', 's', 'u', 'p', 'p', 'o', 'r', 't', ' ', '`', 'a', 'c', 'c', 'e', 's', 's', 'p', 'o', 'i', 'n', 't', '`', ' ', 'a', 'r', 'n', ' ', 't', 'y', 'p', 'e', 's', ',', ' ', 'b', 'u', 't', ' ', 'f', 'o', 'u', 'n', 'd', ':', ' ', '`', '{', 'a', 'r', 'n', 'T', 'y', 'p', 'e', '}', '`', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'a', 'r', 'n', 'T', 'y', 'p', 'e', '"', '}', ',', '"', 'a', 'c', 'c', 'e', 's', 's', 'p', 'o', 'i', 'n', 't', '"', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'g', 'e', 't', 'A', 't', 't', 'r', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '"', '}', ',', '"', 'r', 'e', 's', 'o', 'u', 'r', 'c', 'e', 'I', 'd', '[', '1', ']', '"', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 'a', 'c', 'c', 'e', 's', 's', 'P', 'o', 'i', 'n', 't', 'N', 'a', 'm', 'e', '"', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'a', 'c', 'c', 'e', 's', 's', 'P', 'o', 'i', 'n', 't', 'N', 'a', 'm', 'e', '"', '}', ',', '"', '"', ']', '}', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'g', 'e', 't', 'A', 't', 't', 'r', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '"', '}', ',', '"', 'r', 'e', 'g', 'i', 'o', 'n', '"', ']', '}', ',', '"', '"', ']', '}', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'a', 'r', 'n', 'T', 'y', 'p', 'e', '"', '}', ',', '"', 'a', 'c', 'c', 'e', 's', 's', 'p', 'o', 'i', 'n', 't', '"', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'g', 'e', 't', 'A', 't', 't', 'r', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '"', '}', ',', '"', 'r', 'e', 'g', 'i', 'o', 'n', '"', ']', '}', ',', '"', '"', ']', '}', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'D', 'i', 's', 'a', 'b', 'l', 'e', 'A', 'c', 'c', 'e', 's', 's', 'P', 'o', 'i', 'n', 't', 's', '"', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'D', 'i', 's', 'a', 'b', 'l', 'e', 'A', 'c', 'c', 'e', 's', 's', 'P', 'o', 'i', 'n', 't', 's', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'A', 'c', 'c', 'e', 's', 's', ' ', 'p', 'o', 'i', 'n', 't', 's', ' ', 'a', 'r', 'e', ' ', 'n', 'o', 't', ' ', 's', 'u', 'p', 'p', 'o', 'r', 't', 'e', 'd', ' ', 'f', 'o', 'r', ' ', 't', 'h', 'i', 's', ' ', 'o', 'p', 'e', 'r', 'a', 't', 'i', 'o', 'n', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'g', 'e', 't', 'A', 't', 't', 'r', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '"', '}', ',', '"', 'r', 'e', 's', 'o', 'u', 'r', 'c', 'e', 'I', 'd', '[', '2', ']', '"', ']', '}', ']', '}', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'A', 'r', 'n', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'A', 'r', 'n', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'g', 'e', 't', 'A', 't', 't', 'r', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '"', '}', ',', '"', 'r', 'e', 'g', 'i', 'o', 'n', '"', ']', '}', ',', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', ']', '}', ']', '}', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'I', 'n', 'v', 'a', 'l', 'i', 'd', ' ', 'c', 'o', 'n', 'f', 'i', 'g', 'u', 'r', 'a', 't', 'i', 'o', 'n', ':', ' ', 'r', 'e', 'g', 'i', 'o', 'n', ' ', 'f', 'r', 'o', 'm', ' ', 'A', 'R', 'N', ' ', '`', '{', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '#', 'r', 'e', 'g', 'i', 'o', 'n', '}', '`', ' ', 'd', 'o', 'e', 's', ' ', 'n', 'o', 't', ' ', 'm', 'a', 't', 'c', 'h', ' ', 'c', 'l', 'i', 'e', 'n', 't', ' ', 'r', 'e', 'g', 'i', 'o', 'n', ' ', '`', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '`', ' ', 'a', 'n', 'd', ' ', 'U', 's', 'e', 'A', 'r', 'n', 'R', 'e', 'g', 'i', 'o', 'n', ' ', 'i', 's', ' ', '`', 'f', 'a', 'l', 's', 'e', '`', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'a', 'w', 's', '.', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'g', 'e', 't', 'A', 't', 't', 'r', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '"', '}', ',', '"', 'r', 'e', 'g', 'i', 'o', 'n', '"', ']', '}', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 'b', 'u', 'c', 'k', 'e', 't', 'P', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', '"', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'a', 'w', 's', '.', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '"', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'g', 'e', 't', 'A', 't', 't', 'r', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'b', 'u', 'c', 'k', 'e', 't', 'P', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', '"', '}', ',', '"', 'n', 'a', 'm', 'e', '"', ']', '}', ',', '"', '{', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '#', 'n', 'a', 'm', 'e', '}', '"', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'V', 'a', 'l', 'i', 'd', 'H', 'o', 's', 't', 'L', 'a', 'b', 'e', 'l', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'g', 'e', 't', 'A', 't', 't', 'r', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '"', '}', ',', '"', 'r', 'e', 'g', 'i', 'o', 'n', '"', ']', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'g', 'e', 't', 'A', 't', 't', 'r', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '"', '}', ',', '"', 's', 'e', 'r', 'v', 'i', 'c', 'e', '"', ']', '}', ',', '"', 's', '3', '"', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'V', 'a', 'l', 'i', 'd', 'H', 'o', 's', 't', 'L', 'a', 'b', 'e', 'l', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'g', 'e', 't', 'A', 't', 't', 'r', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '"', '}', ',', '"', 'a', 'c', 'c', 'o', 'u', 'n', 't', 'I', 'd', '"', ']', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'V', 'a', 'l', 'i', 'd', 'H', 'o', 's', 't', 'L', 'a', 'b', 'e', 'l', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'a', 'c', 'c', 'e', 's', 's', 'P', 'o', 'i', 'n', 't', 'N', 'a', 'm', 'e', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'A', 'c', 'c', 'e', 'l', 'e', 'r', 'a', 't', 'e', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'A', 'c', 'c', 'e', 's', 's', ' ', 'P', 'o', 'i', 'n', 't', 's', ' ', 'd', 'o', ' ', 'n', 'o', 't', ' ', 's', 'u', 'p', 'p', 'o', 'r', 't', ' ', 'S', '3', ' ', 'A', 'c', 'c', 'e', 'l', 'e', 'r', 'a', 't', 'e', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', '{', 'a', 'c', 'c', 'e', 's', 's', 'P', 'o', 'i', 'n', 't', 'N', 'a', 'm', 'e', '}', '-', '{', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '#', 'a', 'c', 'c', 'o', 'u', 'n', 't', 'I', 'd', '}', '.', 's', '3', '-', 'a', 'c', 'c', 'e', 's', 's', 'p', 'o', 'i', 'n', 't', '-', 'f', 'i', 'p', 's', '.', 'd', 'u', 'a', 'l', 's', 't', 'a', 'c', 'k', '.', '{', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '#', 'r', 'e', 'g', 'i', 'o', 'n', '}', '.', '{', 'b', 'u', 'c', 'k', 'e', 't', 'P', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '#', 'r', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', '{', 'a', 'c', 'c', 'e', 's', 's', 'P', 'o', 'i', 'n', 't', 'N', 'a', 'm', 'e', '}', '-', '{', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '#', 'a', 'c', 'c', 'o', 'u', 'n', 't', 'I', 'd', '}', '.', 's', '3', '-', 'a', 'c', 'c', 'e', 's', 's', 'p', 'o', 'i', 'n', 't', '-', 'f', 'i', 'p', 's', '.', '{', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '#', 'r', 'e', 'g', 'i', 'o', 'n', '}', '.', '{', 'b', 'u', 'c', 'k', 'e', 't', 'P', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '#', 'r', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', '{', 'a', 'c', 'c', 'e', 's', 's', 'P', 'o', 'i', 'n', 't', 'N', 'a', 'm', 'e', '}', '-', '{', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '#', 'a', 'c', 'c', 'o', 'u', 'n', 't', 'I', 'd', '}', '.', 's', '3', '-', 'a', 'c', 'c', 'e', 's', 's', 'p', 'o', 'i', 'n', 't', '.', 'd', 'u', 'a', 'l', 's', 't', 'a', 'c', 'k', '.', '{', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '#', 'r', 'e', 'g', 'i', 'o', 'n', '}', '.', '{', 'b', 'u', 'c', 'k', 'e', 't', 'P', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '#', 'r', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'p', 'a', 'r', 's', 'e', 'U', 'R', 'L', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 'u', 'r', 'l', '"', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', '{', 'u', 'r', 'l', '#', 's', 'c', 'h', 'e', 'm', 'e', '}', ':', '/', '/', '{', 'a', 'c', 'c', 'e', 's', 's', 'P', 'o', 'i', 'n', 't', 'N', 'a', 'm', 'e', '}', '-', '{', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '#', 'a', 'c', 'c', 'o', 'u', 'n', 't', 'I', 'd', '}', '.', '{', 'u', 'r', 'l', '#', 'a', 'u', 't', 'h', 'o', 'r', 'i', 't', 'y', '}', '{', 'u', 'r', 'l', '#', 'p', 'a', 't', 'h', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '#', 'r', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', '{', 'a', 'c', 'c', 'e', 's', 's', 'P', 'o', 'i', 'n', 't', 'N', 'a', 'm', 'e', '}', '-', '{', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '#', 'a', 'c', 'c', 'o', 'u', 'n', 't', 'I', 'd', '}', '.', 's', '3', '-', 'a', 'c', 'c', 'e', 's', 's', 'p', 'o', 'i', 'n', 't', '.', '{', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '#', 'r', 'e', 'g', 'i', 'o', 'n', '}', '.', '{', 'b', 'u', 'c', 'k', 'e', 't', 'P', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '#', 'r', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'I', 'n', 'v', 'a', 'l', 'i', 'd', ' ', 'A', 'R', 'N', ':', ' ', 'T', 'h', 'e', ' ', 'a', 'c', 'c', 'e', 's', 's', ' ', 'p', 'o', 'i', 'n', 't', ' ', 'n', 'a', 'm', 'e', ' ', 'm', 'a', 'y', ' ', 'o', 'n', 'l', 'y', ' ', 'c', 'o', 'n', 't', 'a', 'i', 'n', ' ', 'a', '-', 'z', ',', ' ', 'A', '-', 'Z', ',', ' ', '0', '-', '9', ' ', 'a', 'n', 'd', ' ', '`', '-', '`', '.', ' ', 'F', 'o', 'u', 'n', 'd', ':', ' ', '`', '{', 'a', 'c', 'c', 'e', 's', 's', 'P', 'o', 'i', 'n', 't', 'N', 'a', 'm', 'e', '}', '`', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'I', 'n', 'v', 'a', 'l', 'i', 'd', ' ', 'A', 'R', 'N', ':', ' ', 'T', 'h', 'e', ' ', 'a', 'c', 'c', 'o', 'u', 'n', 't', ' ', 'i', 'd', ' ', 'm', 'a', 'y', ' ', 'o', 'n', 'l', 'y', ' ', 'c', 'o', 'n', 't', 'a', 'i', 'n', ' ', 'a', '-', 'z', ',', ' ', 'A', '-', 'Z', ',', ' ', '0', '-', '9', ' ', 'a', 'n', 'd', ' ', '`', '-', '`', '.', ' ', 'F', 'o', 'u', 'n', 'd', ':', ' ', '`', '{', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '#', 'a', 'c', 'c', 'o', 'u', 'n', 't', 'I', 'd', '}', '`', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'I', 'n', 'v', 'a', 'l', 'i', 'd', ' ', 'A', 'R', 'N', ':', ' ', 'T', 'h', 'e', ' ', 'A', 'R', 'N', ' ', 'w', 'a', 's', ' ', 'n', 'o', 't', ' ', 'f', 'o', 'r', ' ', 't', 'h', 'e', ' ', 'S', '3', ' ', 's', 'e', 'r', 'v', 'i', 'c', 'e', ',', ' ', 'f', 'o', 'u', 'n', 'd', ':', ' ', '{', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '#', 's', 'e', 'r', 'v', 'i', 'c', 'e', '}', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'I', 'n', 'v', 'a', 'l', 'i', 'd', ' ', 'r', 'e', 'g', 'i', 'o', 'n', ' ', 'i', 'n', ' ', 'A', 'R', 'N', ':', ' ', '`', '{', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '#', 'r', 'e', 'g', 'i', 'o', 'n', '}', '`', ' ', '(', 'i', 'n', 'v', 'a', 'l', 'i', 'd', ' ', 'D', 'N', 'S', ' ', 'n', 'a', 'm', 'e', ')', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'C', 'l', 'i', 'e', 'n', 't', ' ', 'w', 'a', 's', ' ', 'c', 'o', 'n', 'f', 'i', 'g', 'u', 'r', 'e', 'd', ' ', 'f', 'o', 'r', ' ', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', ' ', '`', '{', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '#', 'n', 'a', 'm', 'e', '}', '`', ' ', 'b', 'u', 't', ' ', 'A', 'R', 'N', ' ', '(', '`', '{', 'B', 'u', 'c', 'k', 'e', 't', '}', '`', ')', ' ', 'h', 'a', 's', ' ', '`', '{', 'b', 'u', 'c', 'k', 'e', 't', 'P', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', '#', 'n', 'a', 'm', 'e', '}', '`', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'I', 'n', 'v', 'a', 'l', 'i', 'd', ' ', 'A', 'R', 'N', ':', ' ', 'T', 'h', 'e', ' ', 'A', 'R', 'N', ' ', 'm', 'a', 'y', ' ', 'o', 'n', 'l', 'y', ' ', 'c', 'o', 'n', 't', 'a', 'i', 'n', ' ', 'a', ' ', 's', 'i', 'n', 'g', 'l', 'e', ' ', 'r', 'e', 's', 'o', 'u', 'r', 'c', 'e', ' ', 'c', 'o', 'm', 'p', 'o', 'n', 'e', 'n', 't', ' ', 'a', 'f', 't', 'e', 'r', ' ', '`', 'a', 'c', 'c', 'e', 's', 's', 'p', 'o', 'i', 'n', 't', '`', '.', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'V', 'a', 'l', 'i', 'd', 'H', 'o', 's', 't', 'L', 'a', 'b', 'e', 'l', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'a', 'c', 'c', 'e', 's', 's', 'P', 'o', 'i', 'n', 't', 'N', 'a', 'm', 'e', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'S', '3', ' ', 'M', 'R', 'A', 'P', ' ', 'd', 'o', 'e', 's', ' ', 'n', 'o', 't', ' ', 's', 'u', 'p', 'p', 'o', 'r', 't', ' ', 'd', 'u', 'a', 'l', '-', 's', 't', 'a', 'c', 'k', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'S', '3', ' ', 'M', 'R', 'A', 'P', ' ', 'd', 'o', 'e', 's', ' ', 'n', 'o', 't', ' ', 's', 'u', 'p', 'p', 'o', 'r', 't', ' ', 'F', 'I', 'P', 'S', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'A', 'c', 'c', 'e', 'l', 'e', 'r', 'a', 't', 'e', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'S', '3', ' ', 'M', 'R', 'A', 'P', ' ', 'd', 'o', 'e', 's', ' ', 'n', 'o', 't', ' ', 's', 'u', 'p', 'p', 'o', 'r', 't', ' ', 'S', '3', ' ', 'A', 'c', 'c', 'e', 'l', 'e', 'r', 'a', 't', 'e', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'D', 'i', 's', 'a', 'b', 'l', 'e', 'M', 'u', 'l', 't', 'i', 'R', 'e', 'g', 'i', 'o', 'n', 'A', 'c', 'c', 'e', 's', 's', 'P', 'o', 'i', 'n', 't', 's', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'I', 'n', 'v', 'a', 'l', 'i', 'd', ' ', 'c', 'o', 'n', 'f', 'i', 'g', 'u', 'r', 'a', 't', 'i', 'o', 'n', ':', ' ', 'M', 'u', 'l', 't', 'i', '-', 'R', 'e', 'g', 'i', 'o', 'n', ' ', 'A', 'c', 'c', 'e', 's', 's', ' ', 'P', 'o', 'i', 'n', 't', ' ', 'A', 'R', 'N', 's', ' ', 'a', 'r', 'e', ' ', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'd', '.', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'a', 'w', 's', '.', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 'm', 'r', 'a', 'p', 'P', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', '"', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'g', 'e', 't', 'A', 't', 't', 'r', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'm', 'r', 'a', 'p', 'P', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', '"', '}', ',', '"', 'n', 'a', 'm', 'e', '"', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'g', 'e', 't', 'A', 't', 't', 'r', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '"', '}', ',', '"', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', '"', ']', '}', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', '{', 'a', 'c', 'c', 'e', 's', 's', 'P', 'o', 'i', 'n', 't', 'N', 'a', 'm', 'e', '}', '.', 'a', 'c', 'c', 'e', 's', 's', 'p', 'o', 'i', 'n', 't', '.', 's', '3', '-', 'g', 'l', 'o', 'b', 'a', 'l', '.', '{', 'm', 'r', 'a', 'p', 'P', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', 'a', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', 'S', 'e', 't', '"', ':', '[', '"', '*', '"', ']', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'C', 'l', 'i', 'e', 'n', 't', ' ', 'w', 'a', 's', ' ', 'c', 'o', 'n', 'f', 'i', 'g', 'u', 'r', 'e', 'd', ' ', 'f', 'o', 'r', ' ', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', ' ', '`', '{', 'm', 'r', 'a', 'p', 'P', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', '#', 'n', 'a', 'm', 'e', '}', '`', ' ', 'b', 'u', 't', ' ', 'b', 'u', 'c', 'k', 'e', 't', ' ', 'r', 'e', 'f', 'e', 'r', 'r', 'e', 'd', ' ', 't', 'o', ' ', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', ' ', '`', '{', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '#', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', '}', '`', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'I', 'n', 'v', 'a', 'l', 'i', 'd', ' ', 'A', 'c', 'c', 'e', 's', 's', ' ', 'P', 'o', 'i', 'n', 't', ' ', 'N', 'a', 'm', 'e', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'I', 'n', 'v', 'a', 'l', 'i', 'd', ' ', 'A', 'R', 'N', ':', ' ', 'E', 'x', 'p', 'e', 'c', 't', 'e', 'd', ' ', 'a', ' ', 'r', 'e', 's', 'o', 'u', 'r', 'c', 'e', ' ', 'o', 'f', ' ', 't', 'h', 'e', ' ', 'f', 'o', 'r', 'm', 'a', 't', ' ', '`', 'a', 'c', 'c', 'e', 's', 's', 'p', 'o', 'i', 'n', 't', ':', '<', 'a', 'c', 'c', 'e', 's', 's', 'p', 'o', 'i', 'n', 't', ' ', 'n', 'a', 'm', 'e', '>', '`', ' ', 'b', 'u', 't', ' ', 'n', 'o', ' ', 'n', 'a', 'm', 'e', ' ', 'w', 'a', 's', ' ', 'p', 'r', 'o', 'v', 'i', 'd', 'e', 'd', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'g', 'e', 't', 'A', 't', 't', 'r', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '"', '}', ',', '"', 's', 'e', 'r', 'v', 'i', 'c', 'e', '"', ']', '}', ',', '"', 's', '3', '-', 'o', 'u', 't', 'p', 'o', 's', 't', 's', '"', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'S', '3', ' ', 'O', 'u', 't', 'p', 'o', 's', 't', 's', ' ', 'd', 'o', 'e', 's', ' ', 'n', 'o', 't', ' ', 's', 'u', 'p', 'p', 'o', 'r', 't', ' ', 'D', 'u', 'a', 'l', '-', 's', 't', 'a', 'c', 'k', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'S', '3', ' ', 'O', 'u', 't', 'p', 'o', 's', 't', 's', ' ', 'd', 'o', 'e', 's', ' ', 'n', 'o', 't', ' ', 's', 'u', 'p', 'p', 'o', 'r', 't', ' ', 'F', 'I', 'P', 'S', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'A', 'c', 'c', 'e', 'l', 'e', 'r', 'a', 't', 'e', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'S', '3', ' ', 'O', 'u', 't', 'p', 'o', 's', 't', 's', ' ', 'd', 'o', 'e', 's', ' ', 'n', 'o', 't', ' ', 's', 'u', 'p', 'p', 'o', 'r', 't', ' ', 'S', '3', ' ', 'A', 'c', 'c', 'e', 'l', 'e', 'r', 'a', 't', 'e', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'g', 'e', 't', 'A', 't', 't', 'r', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '"', '}', ',', '"', 'r', 'e', 's', 'o', 'u', 'r', 'c', 'e', 'I', 'd', '[', '4', ']', '"', ']', '}', ']', '}', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'I', 'n', 'v', 'a', 'l', 'i', 'd', ' ', 'A', 'r', 'n', ':', ' ', 'O', 'u', 't', 'p', 'o', 's', 't', ' ', 'A', 'c', 'c', 'e', 's', 's', ' ', 'P', 'o', 'i', 'n', 't', ' ', 'A', 'R', 'N', ' ', 'c', 'o', 'n', 't', 'a', 'i', 'n', 's', ' ', 's', 'u', 'b', ' ', 'r', 'e', 's', 'o', 'u', 'r', 'c', 'e', 's', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'g', 'e', 't', 'A', 't', 't', 'r', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '"', '}', ',', '"', 'r', 'e', 's', 'o', 'u', 'r', 'c', 'e', 'I', 'd', '[', '1', ']', '"', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 'o', 'u', 't', 'p', 'o', 's', 't', 'I', 'd', '"', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'V', 'a', 'l', 'i', 'd', 'H', 'o', 's', 't', 'L', 'a', 'b', 'e', 'l', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'o', 'u', 't', 'p', 'o', 's', 't', 'I', 'd', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'A', 'r', 'n', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'A', 'r', 'n', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'g', 'e', 't', 'A', 't', 't', 'r', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '"', '}', ',', '"', 'r', 'e', 'g', 'i', 'o', 'n', '"', ']', '}', ',', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', ']', '}', ']', '}', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'I', 'n', 'v', 'a', 'l', 'i', 'd', ' ', 'c', 'o', 'n', 'f', 'i', 'g', 'u', 'r', 'a', 't', 'i', 'o', 'n', ':', ' ', 'r', 'e', 'g', 'i', 'o', 'n', ' ', 'f', 'r', 'o', 'm', ' ', 'A', 'R', 'N', ' ', '`', '{', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '#', 'r', 'e', 'g', 'i', 'o', 'n', '}', '`', ' ', 'd', 'o', 'e', 's', ' ', 'n', 'o', 't', ' ', 'm', 'a', 't', 'c', 'h', ' ', 'c', 'l', 'i', 'e', 'n', 't', ' ', 'r', 'e', 'g', 'i', 'o', 'n', ' ', '`', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '`', ' ', 'a', 'n', 'd', ' ', 'U', 's', 'e', 'A', 'r', 'n', 'R', 'e', 'g', 'i', 'o', 'n', ' ', 'i', 's', ' ', '`', 'f', 'a', 'l', 's', 'e', '`', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'a', 'w', 's', '.', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'g', 'e', 't', 'A', 't', 't', 'r', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '"', '}', ',', '"', 'r', 'e', 'g', 'i', 'o', 'n', '"', ']', '}', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 'b', 'u', 'c', 'k', 'e', 't', 'P', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', '"', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'a', 'w', 's', '.', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '"', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'g', 'e', 't', 'A', 't', 't', 'r', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'b', 'u', 'c', 'k', 'e', 't', 'P', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', '"', '}', ',', '"', 'n', 'a', 'm', 'e', '"', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'g', 'e', 't', 'A', 't', 't', 'r', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '"', '}', ',', '"', 'n', 'a', 'm', 'e', '"', ']', '}', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'V', 'a', 'l', 'i', 'd', 'H', 'o', 's', 't', 'L', 'a', 'b', 'e', 'l', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'g', 'e', 't', 'A', 't', 't', 'r', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '"', '}', ',', '"', 'r', 'e', 'g', 'i', 'o', 'n', '"', ']', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'V', 'a', 'l', 'i', 'd', 'H', 'o', 's', 't', 'L', 'a', 'b', 'e', 'l', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'g', 'e', 't', 'A', 't', 't', 'r', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '"', '}', ',', '"', 'a', 'c', 'c', 'o', 'u', 'n', 't', 'I', 'd', '"', ']', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'g', 'e', 't', 'A', 't', 't', 'r', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '"', '}', ',', '"', 'r', 'e', 's', 'o', 'u', 'r', 'c', 'e', 'I', 'd', '[', '2', ']', '"', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 'o', 'u', 't', 'p', 'o', 's', 't', 'T', 'y', 'p', 'e', '"', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'g', 'e', 't', 'A', 't', 't', 'r', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '"', '}', ',', '"', 'r', 'e', 's', 'o', 'u', 'r', 'c', 'e', 'I', 'd', '[', '3', ']', '"', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 'a', 'c', 'c', 'e', 's', 's', 'P', 'o', 'i', 'n', 't', 'N', 'a', 'm', 'e', '"', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'o', 'u', 't', 'p', 'o', 's', 't', 'T', 'y', 'p', 'e', '"', '}', ',', '"', 'a', 'c', 'c', 'e', 's', 's', 'p', 'o', 'i', 'n', 't', '"', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'p', 'a', 'r', 's', 'e', 'U', 'R', 'L', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 'u', 'r', 'l', '"', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', '{', 'a', 'c', 'c', 'e', 's', 's', 'P', 'o', 'i', 'n', 't', 'N', 'a', 'm', 'e', '}', '-', '{', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '#', 'a', 'c', 'c', 'o', 'u', 'n', 't', 'I', 'd', '}', '.', '{', 'o', 'u', 't', 'p', 'o', 's', 't', 'I', 'd', '}', '.', '{', 'u', 'r', 'l', '#', 'a', 'u', 't', 'h', 'o', 'r', 'i', 't', 'y', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '-', 'o', 'u', 't', 'p', 'o', 's', 't', 's', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '#', 'r', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', '{', 'a', 'c', 'c', 'e', 's', 's', 'P', 'o', 'i', 'n', 't', 'N', 'a', 'm', 'e', '}', '-', '{', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '#', 'a', 'c', 'c', 'o', 'u', 'n', 't', 'I', 'd', '}', '.', '{', 'o', 'u', 't', 'p', 'o', 's', 't', 'I', 'd', '}', '.', 's', '3', '-', 'o', 'u', 't', 'p', 'o', 's', 't', 's', '.', '{', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '#', 'r', 'e', 'g', 'i', 'o', 'n', '}', '.', '{', 'b', 'u', 'c', 'k', 'e', 't', 'P', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '-', 'o', 'u', 't', 'p', 'o', 's', 't', 's', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '#', 'r', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'E', 'x', 'p', 'e', 'c', 't', 'e', 'd', ' ', 'a', 'n', ' ', 'o', 'u', 't', 'p', 'o', 's', 't', ' ', 't', 'y', 'p', 'e', ' ', '`', 'a', 'c', 'c', 'e', 's', 's', 'p', 'o', 'i', 'n', 't', '`', ',', ' ', 'f', 'o', 'u', 'n', 'd', ' ', '{', 'o', 'u', 't', 'p', 'o', 's', 't', 'T', 'y', 'p', 'e', '}', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'I', 'n', 'v', 'a', 'l', 'i', 'd', ' ', 'A', 'R', 'N', ':', ' ', 'e', 'x', 'p', 'e', 'c', 't', 'e', 'd', ' ', 'a', 'n', ' ', 'a', 'c', 'c', 'e', 's', 's', ' ', 'p', 'o', 'i', 'n', 't', ' ', 'n', 'a', 'm', 'e', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'I', 'n', 'v', 'a', 'l', 'i', 'd', ' ', 'A', 'R', 'N', ':', ' ', 'E', 'x', 'p', 'e', 'c', 't', 'e', 'd', ' ', 'a', ' ', '4', '-', 'c', 'o', 'm', 'p', 'o', 'n', 'e', 'n', 't', ' ', 'r', 'e', 's', 'o', 'u', 'r', 'c', 'e', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'I', 'n', 'v', 'a', 'l', 'i', 'd', ' ', 'A', 'R', 'N', ':', ' ', 'T', 'h', 'e', ' ', 'a', 'c', 'c', 'o', 'u', 'n', 't', ' ', 'i', 'd', ' ', 'm', 'a', 'y', ' ', 'o', 'n', 'l', 'y', ' ', 'c', 'o', 'n', 't', 'a', 'i', 'n', ' ', 'a', '-', 'z', ',', ' ', 'A', '-', 'Z', ',', ' ', '0', '-', '9', ' ', 'a', 'n', 'd', ' ', '`', '-', '`', '.', ' ', 'F', 'o', 'u', 'n', 'd', ':', ' ', '`', '{', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '#', 'a', 'c', 'c', 'o', 'u', 'n', 't', 'I', 'd', '}', '`', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'I', 'n', 'v', 'a', 'l', 'i', 'd', ' ', 'r', 'e', 'g', 'i', 'o', 'n', ' ', 'i', 'n', ' ', 'A', 'R', 'N', ':', ' ', '`', '{', 'b', 'u', 'c', 'k', 'e', 't', 'A', 'r', 'n', '#', 'r', 'e', 'g', 'i', 'o', 'n', '}', '`', ' ', '(', 'i', 'n', 'v', 'a', 'l', 'i', 'd', ' ', 'D', 'N', 'S', ' ', 'n', 'a', 'm', 'e', ')', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'C', 'l', 'i', 'e', 'n', 't', ' ', 'w', 'a', 's', ' ', 'c', 'o', 'n', 'f', 'i', 'g', 'u', 'r', 'e', 'd', ' ', 'f', 'o', 'r', ' ', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', ' ', '`', '{', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '#', 'n', 'a', 'm', 'e', '}', '`', ' ', 'b', 'u', 't', ' ', 'A', 'R', 'N', ' ', '(', '`', '{', 'B', 'u', 'c', 'k', 'e', 't', '}', '`', ')', ' ', 'h', 'a', 's', ' ', '`', '{', 'b', 'u', 'c', 'k', 'e', 't', 'P', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', '#', 'n', 'a', 'm', 'e', '}', '`', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'I', 'n', 'v', 'a', 'l', 'i', 'd', ' ', 'A', 'R', 'N', ':', ' ', 'T', 'h', 'e', ' ', 'o', 'u', 't', 'p', 'o', 's', 't', ' ', 'I', 'd', ' ', 'm', 'a', 'y', ' ', 'o', 'n', 'l', 'y', ' ', 'c', 'o', 'n', 't', 'a', 'i', 'n', ' ', 'a', '-', 'z', ',', ' ', 'A', '-', 'Z', ',', ' ', '0', '-', '9', ' ', 'a', 'n', 'd', ' ', '`', '-', '`', '.', ' ', 'F', 'o', 'u', 'n', 'd', ':', ' ', '`', '{', 'o', 'u', 't', 'p', 'o', 's', 't', 'I', 'd', '}', '`', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'I', 'n', 'v', 'a', 'l', 'i', 'd', ' ', 'A', 'R', 'N', ':', ' ', 'T', 'h', 'e', ' ', 'O', 'u', 't', 'p', 'o', 's', 't', ' ', 'I', 'd', ' ', 'w', 'a', 's', ' ', 'n', 'o', 't', ' ', 's', 'e', 't', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'I', 'n', 'v', 'a', 'l', 'i', 'd', ' ', 'A', 'R', 'N', ':', ' ', 'U', 'n', 'r', 'e', 'c', 'o', 'g', 'n', 'i', 'z', 'e', 'd', ' ', 'f', 'o', 'r', 'm', 'a', 't', ':', ' ', '{', 'B', 'u', 'c', 'k', 'e', 't', '}', ' ', '(', 't', 'y', 'p', 'e', ':', ' ', '{', 'a', 'r', 'n', 'T', 'y', 'p', 'e', '}', ')', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'I', 'n', 'v', 'a', 'l', 'i', 'd', ' ', 'A', 'R', 'N', ':', ' ', 'N', 'o', ' ', 'A', 'R', 'N', ' ', 't', 'y', 'p', 'e', ' ', 's', 'p', 'e', 'c', 'i', 'f', 'i', 'e', 'd', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 'u', 'b', 's', 't', 'r', 'i', 'n', 'g', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'B', 'u', 'c', 'k', 'e', 't', '"', '}', ',', '0', ',', '4', ',', 'f', 'a', 'l', 's', 'e', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 'a', 'r', 'n', 'P', 'r', 'e', 'f', 'i', 'x', '"', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'a', 'r', 'n', 'P', 'r', 'e', 'f', 'i', 'x', '"', '}', ',', '"', 'a', 'r', 'n', ':', '"', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'a', 'w', 's', '.', 'p', 'a', 'r', 's', 'e', 'A', 'r', 'n', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'B', 'u', 'c', 'k', 'e', 't', '"', '}', ']', '}', ']', '}', ']', '}', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'I', 'n', 'v', 'a', 'l', 'i', 'd', ' ', 'A', 'R', 'N', ':', ' ', '`', '{', 'B', 'u', 'c', 'k', 'e', 't', '}', '`', ' ', 'w', 'a', 's', ' ', 'n', 'o', 't', ' ', 'a', ' ', 'v', 'a', 'l', 'i', 'd', ' ', 'A', 'R', 'N', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'F', 'o', 'r', 'c', 'e', 'P', 'a', 't', 'h', 'S', 't', 'y', 'l', 'e', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'a', 'w', 's', '.', 'p', 'a', 'r', 's', 'e', 'A', 'r', 'n', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'B', 'u', 'c', 'k', 'e', 't', '"', '}', ']', '}', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'P', 'a', 't', 'h', '-', 's', 't', 'y', 'l', 'e', ' ', 'a', 'd', 'd', 'r', 'e', 's', 's', 'i', 'n', 'g', ' ', 'c', 'a', 'n', 'n', 'o', 't', ' ', 'b', 'e', ' ', 'u', 's', 'e', 'd', ' ', 'w', 'i', 't', 'h', ' ', 'A', 'R', 'N', ' ', 'b', 'u', 'c', 'k', 'e', 't', 's', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'u', 'r', 'i', 'E', 'n', 'c', 'o', 'd', 'e', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'B', 'u', 'c', 'k', 'e', 't', '"', '}', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 'u', 'r', 'i', '_', 'e', 'n', 'c', 'o', 'd', 'e', 'd', '_', 'b', 'u', 'c', 'k', 'e', 't', '"', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'a', 'w', 's', '.', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '"', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'A', 'c', 'c', 'e', 'l', 'e', 'r', 'a', 't', 'e', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'a', 'w', 's', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ']', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', 's', '3', '-', 'f', 'i', 'p', 's', '.', 'd', 'u', 'a', 'l', 's', 't', 'a', 'c', 'k', '.', 'u', 's', '-', 'e', 'a', 's', 't', '-', '1', '.', '{', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '/', '{', 'u', 'r', 'i', '_', 'e', 'n', 'c', 'o', 'd', 'e', 'd', '_', 'b', 'u', 'c', 'k', 'e', 't', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', 'u', 's', '-', 'e', 'a', 's', 't', '-', '1', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'a', 'w', 's', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'G', 'l', 'o', 'b', 'a', 'l', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', 's', '3', '-', 'f', 'i', 'p', 's', '.', 'd', 'u', 'a', 'l', 's', 't', 'a', 'c', 'k', '.', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '.', '{', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '/', '{', 'u', 'r', 'i', '_', 'e', 'n', 'c', 'o', 'd', 'e', 'd', '_', 'b', 'u', 'c', 'k', 'e', 't', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'a', 'w', 's', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'G', 'l', 'o', 'b', 'a', 'l', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', 's', '3', '-', 'f', 'i', 'p', 's', '.', 'd', 'u', 'a', 'l', 's', 't', 'a', 'c', 'k', '.', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '.', '{', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '/', '{', 'u', 'r', 'i', '_', 'e', 'n', 'c', 'o', 'd', 'e', 'd', '_', 'b', 'u', 'c', 'k', 'e', 't', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'a', 'w', 's', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ']', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', 's', '3', '-', 'f', 'i', 'p', 's', '.', 'u', 's', '-', 'e', 'a', 's', 't', '-', '1', '.', '{', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '/', '{', 'u', 'r', 'i', '_', 'e', 'n', 'c', 'o', 'd', 'e', 'd', '_', 'b', 'u', 'c', 'k', 'e', 't', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', 'u', 's', '-', 'e', 'a', 's', 't', '-', '1', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'a', 'w', 's', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'G', 'l', 'o', 'b', 'a', 'l', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', 's', '3', '-', 'f', 'i', 'p', 's', '.', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '.', '{', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '/', '{', 'u', 'r', 'i', '_', 'e', 'n', 'c', 'o', 'd', 'e', 'd', '_', 'b', 'u', 'c', 'k', 'e', 't', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'a', 'w', 's', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'G', 'l', 'o', 'b', 'a', 'l', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', 's', '3', '-', 'f', 'i', 'p', 's', '.', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '.', '{', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '/', '{', 'u', 'r', 'i', '_', 'e', 'n', 'c', 'o', 'd', 'e', 'd', '_', 'b', 'u', 'c', 'k', 'e', 't', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'a', 'w', 's', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ']', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', 's', '3', '.', 'd', 'u', 'a', 'l', 's', 't', 'a', 'c', 'k', '.', 'u', 's', '-', 'e', 'a', 's', 't', '-', '1', '.', '{', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '/', '{', 'u', 'r', 'i', '_', 'e', 'n', 'c', 'o', 'd', 'e', 'd', '_', 'b', 'u', 'c', 'k', 'e', 't', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', 'u', 's', '-', 'e', 'a', 's', 't', '-', '1', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'a', 'w', 's', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'G', 'l', 'o', 'b', 'a', 'l', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', 's', '3', '.', 'd', 'u', 'a', 'l', 's', 't', 'a', 'c', 'k', '.', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '.', '{', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '/', '{', 'u', 'r', 'i', '_', 'e', 'n', 'c', 'o', 'd', 'e', 'd', '_', 'b', 'u', 'c', 'k', 'e', 't', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'a', 'w', 's', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'G', 'l', 'o', 'b', 'a', 'l', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', 's', '3', '.', 'd', 'u', 'a', 'l', 's', 't', 'a', 'c', 'k', '.', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '.', '{', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '/', '{', 'u', 'r', 'i', '_', 'e', 'n', 'c', 'o', 'd', 'e', 'd', '_', 'b', 'u', 'c', 'k', 'e', 't', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'p', 'a', 'r', 's', 'e', 'U', 'R', 'L', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 'u', 'r', 'l', '"', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'a', 'w', 's', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ']', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', '{', 'u', 'r', 'l', '#', 's', 'c', 'h', 'e', 'm', 'e', '}', ':', '/', '/', '{', 'u', 'r', 'l', '#', 'a', 'u', 't', 'h', 'o', 'r', 'i', 't', 'y', '}', '{', 'u', 'r', 'l', '#', 'n', 'o', 'r', 'm', 'a', 'l', 'i', 'z', 'e', 'd', 'P', 'a', 't', 'h', '}', '{', 'u', 'r', 'i', '_', 'e', 'n', 'c', 'o', 'd', 'e', 'd', '_', 'b', 'u', 'c', 'k', 'e', 't', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', 'u', 's', '-', 'e', 'a', 's', 't', '-', '1', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'p', 'a', 'r', 's', 'e', 'U', 'R', 'L', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 'u', 'r', 'l', '"', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'a', 'w', 's', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'G', 'l', 'o', 'b', 'a', 'l', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'u', 's', '-', 'e', 'a', 's', 't', '-', '1', '"', ']', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', '{', 'u', 'r', 'l', '#', 's', 'c', 'h', 'e', 'm', 'e', '}', ':', '/', '/', '{', 'u', 'r', 'l', '#', 'a', 'u', 't', 'h', 'o', 'r', 'i', 't', 'y', '}', '{', 'u', 'r', 'l', '#', 'n', 'o', 'r', 'm', 'a', 'l', 'i', 'z', 'e', 'd', 'P', 'a', 't', 'h', '}', '{', 'u', 'r', 'i', '_', 'e', 'n', 'c', 'o', 'd', 'e', 'd', '_', 'b', 'u', 'c', 'k', 'e', 't', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', '{', 'u', 'r', 'l', '#', 's', 'c', 'h', 'e', 'm', 'e', '}', ':', '/', '/', '{', 'u', 'r', 'l', '#', 'a', 'u', 't', 'h', 'o', 'r', 'i', 't', 'y', '}', '{', 'u', 'r', 'l', '#', 'n', 'o', 'r', 'm', 'a', 'l', 'i', 'z', 'e', 'd', 'P', 'a', 't', 'h', '}', '{', 'u', 'r', 'i', '_', 'e', 'n', 'c', 'o', 'd', 'e', 'd', '_', 'b', 'u', 'c', 'k', 'e', 't', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'p', 'a', 'r', 's', 'e', 'U', 'R', 'L', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 'u', 'r', 'l', '"', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'a', 'w', 's', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'G', 'l', 'o', 'b', 'a', 'l', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', '{', 'u', 'r', 'l', '#', 's', 'c', 'h', 'e', 'm', 'e', '}', ':', '/', '/', '{', 'u', 'r', 'l', '#', 'a', 'u', 't', 'h', 'o', 'r', 'i', 't', 'y', '}', '{', 'u', 'r', 'l', '#', 'n', 'o', 'r', 'm', 'a', 'l', 'i', 'z', 'e', 'd', 'P', 'a', 't', 'h', '}', '{', 'u', 'r', 'i', '_', 'e', 'n', 'c', 'o', 'd', 'e', 'd', '_', 'b', 'u', 'c', 'k', 'e', 't', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'a', 'w', 's', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ']', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', 's', '3', '.', '{', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '/', '{', 'u', 'r', 'i', '_', 'e', 'n', 'c', 'o', 'd', 'e', 'd', '_', 'b', 'u', 'c', 'k', 'e', 't', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', 'u', 's', '-', 'e', 'a', 's', 't', '-', '1', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'a', 'w', 's', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'G', 'l', 'o', 'b', 'a', 'l', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'u', 's', '-', 'e', 'a', 's', 't', '-', '1', '"', ']', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', 's', '3', '.', '{', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '/', '{', 'u', 'r', 'i', '_', 'e', 'n', 'c', 'o', 'd', 'e', 'd', '_', 'b', 'u', 'c', 'k', 'e', 't', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', 's', '3', '.', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '.', '{', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '/', '{', 'u', 'r', 'i', '_', 'e', 'n', 'c', 'o', 'd', 'e', 'd', '_', 'b', 'u', 'c', 'k', 'e', 't', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'a', 'w', 's', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'G', 'l', 'o', 'b', 'a', 'l', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', 's', '3', '.', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '.', '{', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '/', '{', 'u', 'r', 'i', '_', 'e', 'n', 'c', 'o', 'd', 'e', 'd', '_', 'b', 'u', 'c', 'k', 'e', 't', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'P', 'a', 't', 'h', '-', 's', 't', 'y', 'l', 'e', ' ', 'a', 'd', 'd', 'r', 'e', 's', 's', 'i', 'n', 'g', ' ', 'c', 'a', 'n', 'n', 'o', 't', ' ', 'b', 'e', ' ', 'u', 's', 'e', 'd', ' ', 'w', 'i', 't', 'h', ' ', 'S', '3', ' ', 'A', 'c', 'c', 'e', 'l', 'e', 'r', 'a', 't', 'e', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'O', 'b', 'j', 'e', 'c', 't', 'L', 'a', 'm', 'b', 'd', 'a', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'O', 'b', 'j', 'e', 'c', 't', 'L', 'a', 'm', 'b', 'd', 'a', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'a', 'w', 's', '.', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '"', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'V', 'a', 'l', 'i', 'd', 'H', 'o', 's', 't', 'L', 'a', 'b', 'e', 'l', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'S', '3', ' ', 'O', 'b', 'j', 'e', 'c', 't', ' ', 'L', 'a', 'm', 'b', 'd', 'a', ' ', 'd', 'o', 'e', 's', ' ', 'n', 'o', 't', ' ', 's', 'u', 'p', 'p', 'o', 'r', 't', ' ', 'D', 'u', 'a', 'l', '-', 's', 't', 'a', 'c', 'k', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'A', 'c', 'c', 'e', 'l', 'e', 'r', 'a', 't', 'e', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'S', '3', ' ', 'O', 'b', 'j', 'e', 'c', 't', ' ', 'L', 'a', 'm', 'b', 'd', 'a', ' ', 'd', 'o', 'e', 's', ' ', 'n', 'o', 't', ' ', 's', 'u', 'p', 'p', 'o', 'r', 't', ' ', 'S', '3', ' ', 'A', 'c', 'c', 'e', 'l', 'e', 'r', 'a', 't', 'e', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'p', 'a', 'r', 's', 'e', 'U', 'R', 'L', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 'u', 'r', 'l', '"', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', '{', 'u', 'r', 'l', '#', 's', 'c', 'h', 'e', 'm', 'e', '}', ':', '/', '/', '{', 'u', 'r', 'l', '#', 'a', 'u', 't', 'h', 'o', 'r', 'i', 't', 'y', '}', '{', 'u', 'r', 'l', '#', 'p', 'a', 't', 'h', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '-', 'o', 'b', 'j', 'e', 'c', 't', '-', 'l', 'a', 'm', 'b', 'd', 'a', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', 's', '3', '-', 'o', 'b', 'j', 'e', 'c', 't', '-', 'l', 'a', 'm', 'b', 'd', 'a', '-', 'f', 'i', 'p', 's', '.', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '.', '{', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '-', 'o', 'b', 'j', 'e', 'c', 't', '-', 'l', 'a', 'm', 'b', 'd', 'a', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', 's', '3', '-', 'o', 'b', 'j', 'e', 'c', 't', '-', 'l', 'a', 'm', 'b', 'd', 'a', '.', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '.', '{', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '-', 'o', 'b', 'j', 'e', 'c', 't', '-', 'l', 'a', 'm', 'b', 'd', 'a', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'I', 'n', 'v', 'a', 'l', 'i', 'd', ' ', 'r', 'e', 'g', 'i', 'o', 'n', ':', ' ', 'r', 'e', 'g', 'i', 'o', 'n', ' ', 'w', 'a', 's', ' ', 'n', 'o', 't', ' ', 'a', ' ', 'v', 'a', 'l', 'i', 'd', ' ', 'D', 'N', 'S', ' ', 'n', 'a', 'm', 'e', '.', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'B', 'u', 'c', 'k', 'e', 't', '"', '}', ']', '}', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'a', 'w', 's', '.', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '"', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'V', 'a', 'l', 'i', 'd', 'H', 'o', 's', 't', 'L', 'a', 'b', 'e', 'l', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'a', 'w', 's', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ']', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', 's', '3', '-', 'f', 'i', 'p', 's', '.', 'd', 'u', 'a', 'l', 's', 't', 'a', 'c', 'k', '.', 'u', 's', '-', 'e', 'a', 's', 't', '-', '1', '.', '{', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', 'u', 's', '-', 'e', 'a', 's', 't', '-', '1', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'a', 'w', 's', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'G', 'l', 'o', 'b', 'a', 'l', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', 's', '3', '-', 'f', 'i', 'p', 's', '.', 'd', 'u', 'a', 'l', 's', 't', 'a', 'c', 'k', '.', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '.', '{', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'a', 'w', 's', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'G', 'l', 'o', 'b', 'a', 'l', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', 's', '3', '-', 'f', 'i', 'p', 's', '.', 'd', 'u', 'a', 'l', 's', 't', 'a', 'c', 'k', '.', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '.', '{', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'a', 'w', 's', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ']', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', 's', '3', '-', 'f', 'i', 'p', 's', '.', 'u', 's', '-', 'e', 'a', 's', 't', '-', '1', '.', '{', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', 'u', 's', '-', 'e', 'a', 's', 't', '-', '1', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'a', 'w', 's', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'G', 'l', 'o', 'b', 'a', 'l', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', 's', '3', '-', 'f', 'i', 'p', 's', '.', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '.', '{', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'a', 'w', 's', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'G', 'l', 'o', 'b', 'a', 'l', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', 's', '3', '-', 'f', 'i', 'p', 's', '.', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '.', '{', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'a', 'w', 's', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ']', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', 's', '3', '.', 'd', 'u', 'a', 'l', 's', 't', 'a', 'c', 'k', '.', 'u', 's', '-', 'e', 'a', 's', 't', '-', '1', '.', '{', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', 'u', 's', '-', 'e', 'a', 's', 't', '-', '1', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'a', 'w', 's', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'G', 'l', 'o', 'b', 'a', 'l', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', 's', '3', '.', 'd', 'u', 'a', 'l', 's', 't', 'a', 'c', 'k', '.', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '.', '{', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'a', 'w', 's', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'G', 'l', 'o', 'b', 'a', 'l', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', 's', '3', '.', 'd', 'u', 'a', 'l', 's', 't', 'a', 'c', 'k', '.', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '.', '{', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'p', 'a', 'r', 's', 'e', 'U', 'R', 'L', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 'u', 'r', 'l', '"', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'a', 'w', 's', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ']', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', '{', 'u', 'r', 'l', '#', 's', 'c', 'h', 'e', 'm', 'e', '}', ':', '/', '/', '{', 'u', 'r', 'l', '#', 'a', 'u', 't', 'h', 'o', 'r', 'i', 't', 'y', '}', '{', 'u', 'r', 'l', '#', 'p', 'a', 't', 'h', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', 'u', 's', '-', 'e', 'a', 's', 't', '-', '1', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'p', 'a', 'r', 's', 'e', 'U', 'R', 'L', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 'u', 'r', 'l', '"', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'a', 'w', 's', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'G', 'l', 'o', 'b', 'a', 'l', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'u', 's', '-', 'e', 'a', 's', 't', '-', '1', '"', ']', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', '{', 'u', 'r', 'l', '#', 's', 'c', 'h', 'e', 'm', 'e', '}', ':', '/', '/', '{', 'u', 'r', 'l', '#', 'a', 'u', 't', 'h', 'o', 'r', 'i', 't', 'y', '}', '{', 'u', 'r', 'l', '#', 'p', 'a', 't', 'h', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', '{', 'u', 'r', 'l', '#', 's', 'c', 'h', 'e', 'm', 'e', '}', ':', '/', '/', '{', 'u', 'r', 'l', '#', 'a', 'u', 't', 'h', 'o', 'r', 'i', 't', 'y', '}', '{', 'u', 'r', 'l', '#', 'p', 'a', 't', 'h', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'p', 'a', 'r', 's', 'e', 'U', 'R', 'L', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 'a', 's', 's', 'i', 'g', 'n', '"', ':', '"', 'u', 'r', 'l', '"', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'a', 'w', 's', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'G', 'l', 'o', 'b', 'a', 'l', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', '{', 'u', 'r', 'l', '#', 's', 'c', 'h', 'e', 'm', 'e', '}', ':', '/', '/', '{', 'u', 'r', 'l', '#', 'a', 'u', 't', 'h', 'o', 'r', 'i', 't', 'y', '}', '{', 'u', 'r', 'l', '#', 'p', 'a', 't', 'h', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'a', 'w', 's', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ']', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', 's', '3', '.', '{', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', 'u', 's', '-', 'e', 'a', 's', 't', '-', '1', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'a', 'w', 's', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'G', 'l', 'o', 'b', 'a', 'l', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', 't', 'r', 'u', 'e', ']', '}', ']', ',', '"', 'r', 'u', 'l', 'e', 's', '"', ':', '[', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'u', 's', '-', 'e', 'a', 's', 't', '-', '1', '"', ']', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', 's', '3', '.', '{', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', 's', '3', '.', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '.', '{', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'F', 'I', 'P', 'S', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'D', 'u', 'a', 'l', 'S', 't', 'a', 'c', 'k', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 'i', 's', 'S', 'e', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'n', 'o', 't', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'f', 'n', '"', ':', '"', 's', 't', 'r', 'i', 'n', 'g', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'R', 'e', 'g', 'i', 'o', 'n', '"', '}', ',', '"', 'a', 'w', 's', '-', 'g', 'l', 'o', 'b', 'a', 'l', '"', ']', '}', ']', '}', ',', '{', '"', 'f', 'n', '"', ':', '"', 'b', 'o', 'o', 'l', 'e', 'a', 'n', 'E', 'q', 'u', 'a', 'l', 's', '"', ',', '"', 'a', 'r', 'g', 'v', '"', ':', '[', '{', '"', 'r', 'e', 'f', '"', ':', '"', 'U', 's', 'e', 'G', 'l', 'o', 'b', 'a', 'l', 'E', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ',', 'f', 'a', 'l', 's', 'e', ']', '}', ']', ',', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', ':', '{', '"', 'u', 'r', 'l', '"', ':', '"', 'h', 't', 't', 'p', 's', ':', '/', '/', 's', '3', '.', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '.', '{', 'p', 'a', 'r', 't', 'i', 't', 'i', 'o', 'n', 'R', 'e', 's', 'u', 'l', 't', '#', 'd', 'n', 's', 'S', 'u', 'f', 'f', 'i', 'x', '}', '"', ',', '"', 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's', '"', ':', '{', '"', 'a', 'u', 't', 'h', 'S', 'c', 'h', 'e', 'm', 'e', 's', '"', ':', '[', '{', '"', 'd', 'i', 's', 'a', 'b', 'l', 'e', 'D', 'o', 'u', 'b', 'l', 'e', 'E', 'n', 'c', 'o', 'd', 'i', 'n', 'g', '"', ':', 't', 'r', 'u', 'e', ',', '"', 'n', 'a', 'm', 'e', '"', ':', '"', 's', 'i', 'g', 'v', '4', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'N', 'a', 'm', 'e', '"', ':', '"', 's', '3', '"', ',', '"', 's', 'i', 'g', 'n', 'i', 'n', 'g', 'R', 'e', 'g', 'i', 'o', 'n', '"', ':', '"', '{', 'R', 'e', 'g', 'i', 'o', 'n', '}', '"', '}', ']', '}', ',', '"', 'h', 'e', 'a', 'd', 'e', 'r', 's', '"', ':', '{', '}', '}', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'n', 'd', 'p', 'o', 'i', 'n', 't', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'I', 'n', 'v', 'a', 'l', 'i', 'd', ' ', 'r', 'e', 'g', 'i', 'o', 'n', ':', ' ', 'r', 'e', 'g', 'i', 'o', 'n', ' ', 'w', 'a', 's', ' ', 'n', 'o', 't', ' ', 'a', ' ', 'v', 'a', 'l', 'i', 'd', ' ', 'D', 'N', 'S', ' ', 'n', 'a', 'm', 'e', '.', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ']', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 't', 'r', 'e', 'e', '"', '}', ',', '{', '"', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', 's', '"', ':', '[', ']', ',', '"', 'e', 'r', 'r', 'o', 'r', '"', ':', '"', 'A', ' ', 'r', 'e', 'g', 'i', 'o', 'n', ' ', 'm', 'u', 's', 't', ' ', 'b', 'e', ' ', 's', 'e', 't', ' ', 'w', 'h', 'e', 'n', ' ', 's', 'e', 'n', 'd', 'i', 'n', 'g', ' ', 'r', 'e', 'q', 'u', 'e', 's', 't', 's', ' ', 't', 'o', ' ', 'S', '3', '.', '"', ',', '"', 't', 'y', 'p', 'e', '"', ':', '"', 'e', 'r', 'r', 'o', 'r', '"', '}', ']', '}'}; const struct aws_byte_cursor aws_s3_endpoint_rule_set = { .len = 74512, .ptr = (uint8_t *) s_generated_array }; aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/source/s3_endpoint_resolver/s3_endpoint_resolver.c000066400000000000000000000020421456575232400315330ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "aws/s3/private/s3_endpoint_resolver.h" #include #include #include struct aws_endpoints_rule_engine *aws_s3_endpoint_resolver_new(struct aws_allocator *allocator) { struct aws_endpoints_ruleset *ruleset = NULL; struct aws_partitions_config *partitions = NULL; struct aws_endpoints_rule_engine *rule_engine = NULL; ruleset = aws_endpoints_ruleset_new_from_string(allocator, aws_s3_endpoint_rule_set); if (!ruleset) { goto cleanup; } partitions = aws_partitions_config_new_from_string(allocator, aws_s3_endpoint_resolver_partitions); if (!partitions) { goto cleanup; } rule_engine = aws_endpoints_rule_engine_new(allocator, ruleset, partitions); cleanup: aws_endpoints_ruleset_release(ruleset); aws_partitions_config_release(partitions); return rule_engine; } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/source/s3_list_objects.c000066400000000000000000000226261456575232400243220ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include struct aws_s3_operation_data { struct aws_allocator *allocator; struct aws_string *prefix; struct aws_string *delimiter; struct aws_ref_count ref_count; aws_s3_on_object_fn *on_object; void *user_data; }; static void s_ref_count_zero_callback(void *arg) { struct aws_s3_operation_data *operation_data = arg; if (operation_data->delimiter) { aws_string_destroy(operation_data->delimiter); } if (operation_data->prefix) { aws_string_destroy(operation_data->prefix); } aws_mem_release(operation_data->allocator, operation_data); } static void s_on_paginator_cleanup(void *user_data) { struct aws_s3_operation_data *operation_data = user_data; aws_ref_count_release(&operation_data->ref_count); } struct fs_parser_wrapper { struct aws_allocator *allocator; struct aws_s3_object_info fs_info; }; /* invoked when the ListBucketResult/Contents node is iterated. */ static int s_on_contents_node(struct aws_xml_node *node, void *user_data) { struct fs_parser_wrapper *fs_wrapper = user_data; struct aws_s3_object_info *fs_info = &fs_wrapper->fs_info; /* for each Contents node, get the info from it and send it off as an object we've encountered */ struct aws_byte_cursor node_name = aws_xml_node_get_name(node); if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "ETag")) { return aws_xml_node_as_body(node, &fs_info->e_tag); } if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "Key")) { return aws_xml_node_as_body(node, &fs_info->key); } if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "LastModified")) { struct aws_byte_cursor date_cur; if (aws_xml_node_as_body(node, &date_cur) != AWS_OP_SUCCESS) { return AWS_OP_ERR; } if (aws_date_time_init_from_str_cursor(&fs_info->last_modified, &date_cur, AWS_DATE_FORMAT_ISO_8601)) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "Size")) { struct aws_byte_cursor size_cur; if (aws_xml_node_as_body(node, &size_cur) != AWS_OP_SUCCESS) { return AWS_OP_ERR; } if (aws_byte_cursor_utf8_parse_u64(size_cur, &fs_info->size) != AWS_OP_SUCCESS) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } return AWS_OP_SUCCESS; } /* invoked when the ListBucketResult/CommonPrefixes node is iterated. */ static int s_on_common_prefixes_node(struct aws_xml_node *node, void *user_data) { struct fs_parser_wrapper *fs_wrapper = user_data; struct aws_byte_cursor node_name = aws_xml_node_get_name(node); if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "Prefix")) { return aws_xml_node_as_body(node, &fs_wrapper->fs_info.prefix); } return AWS_OP_SUCCESS; } static int s_on_list_bucket_result_node_encountered(struct aws_xml_node *node, void *user_data) { struct aws_s3_operation_data *operation_data = user_data; struct aws_byte_cursor node_name = aws_xml_node_get_name(node); struct fs_parser_wrapper fs_wrapper; AWS_ZERO_STRUCT(fs_wrapper); if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "Contents")) { fs_wrapper.allocator = operation_data->allocator; /* this will traverse the current Contents node, get the metadata necessary to construct * an instance of fs_info so we can invoke the callback on it. This happens once per object. */ if (aws_xml_node_traverse(node, s_on_contents_node, &fs_wrapper) != AWS_OP_SUCCESS) { return AWS_OP_ERR; } if (operation_data->prefix && !fs_wrapper.fs_info.prefix.len) { fs_wrapper.fs_info.prefix = aws_byte_cursor_from_string(operation_data->prefix); } struct aws_byte_buf trimmed_etag = aws_replace_quote_entities(fs_wrapper.allocator, fs_wrapper.fs_info.e_tag); fs_wrapper.fs_info.e_tag = aws_byte_cursor_from_buf(&trimmed_etag); int ret_val = AWS_OP_SUCCESS; if (operation_data->on_object) { ret_val = operation_data->on_object(&fs_wrapper.fs_info, operation_data->user_data); } aws_byte_buf_clean_up(&trimmed_etag); return ret_val; } if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "CommonPrefixes")) { /* this will traverse the current CommonPrefixes node, get the metadata necessary to construct * an instance of fs_info so we can invoke the callback on it. This happens once per prefix. */ if (aws_xml_node_traverse(node, s_on_common_prefixes_node, &fs_wrapper) != AWS_OP_SUCCESS) { return AWS_OP_ERR; } int ret_val = AWS_OP_SUCCESS; if (operation_data->on_object) { ret_val = operation_data->on_object(&fs_wrapper.fs_info, operation_data->user_data); } return ret_val; } return AWS_OP_SUCCESS; } static int s_construct_next_request_http_message( struct aws_byte_cursor *continuation_token, void *user_data, struct aws_http_message **out_message) { AWS_PRECONDITION(user_data); struct aws_s3_operation_data *operation_data = user_data; struct aws_byte_cursor s_path_start = aws_byte_cursor_from_c_str("/?list-type=2"); struct aws_byte_buf request_path; aws_byte_buf_init_copy_from_cursor(&request_path, operation_data->allocator, s_path_start); if (operation_data->prefix) { struct aws_byte_cursor s_prefix = aws_byte_cursor_from_c_str("&prefix="); aws_byte_buf_append_dynamic(&request_path, &s_prefix); struct aws_byte_cursor s_prefix_val = aws_byte_cursor_from_string(operation_data->prefix); aws_byte_buf_append_encoding_uri_param(&request_path, &s_prefix_val); } if (operation_data->delimiter) { struct aws_byte_cursor s_delimiter = aws_byte_cursor_from_c_str("&delimiter="); aws_byte_buf_append_dynamic(&request_path, &s_delimiter); struct aws_byte_cursor s_delimiter_val = aws_byte_cursor_from_string(operation_data->delimiter); aws_byte_buf_append_dynamic(&request_path, &s_delimiter_val); } if (continuation_token) { struct aws_byte_cursor s_continuation = aws_byte_cursor_from_c_str("&continuation-token="); aws_byte_buf_append_dynamic(&request_path, &s_continuation); aws_byte_buf_append_encoding_uri_param(&request_path, continuation_token); } struct aws_http_message *list_objects_v2_request = aws_http_message_new_request(operation_data->allocator); aws_http_message_set_request_path(list_objects_v2_request, aws_byte_cursor_from_buf(&request_path)); aws_byte_buf_clean_up(&request_path); struct aws_http_header accept_header = { .name = aws_byte_cursor_from_c_str("accept"), .value = aws_byte_cursor_from_c_str("application/xml"), }; aws_http_message_add_header(list_objects_v2_request, accept_header); aws_http_message_set_request_method(list_objects_v2_request, aws_http_method_get); *out_message = list_objects_v2_request; return AWS_OP_SUCCESS; } struct aws_s3_paginator *aws_s3_initiate_list_objects( struct aws_allocator *allocator, const struct aws_s3_list_objects_params *params) { AWS_FATAL_PRECONDITION(params); AWS_FATAL_PRECONDITION(params->client); AWS_FATAL_PRECONDITION(params->bucket_name.len); AWS_FATAL_PRECONDITION(params->endpoint.len); struct aws_s3_operation_data *operation_data = aws_mem_calloc(allocator, 1, sizeof(struct aws_s3_operation_data)); operation_data->allocator = allocator; operation_data->delimiter = params->delimiter.len > 0 ? aws_string_new_from_cursor(allocator, ¶ms->delimiter) : NULL; operation_data->prefix = params->prefix.len > 0 ? aws_string_new_from_cursor(allocator, ¶ms->prefix) : NULL; operation_data->on_object = params->on_object; operation_data->user_data = params->user_data; aws_ref_count_init(&operation_data->ref_count, operation_data, s_ref_count_zero_callback); struct aws_s3_paginated_operation_params operation_params = { .next_message = s_construct_next_request_http_message, .on_result_node_encountered_fn = s_on_list_bucket_result_node_encountered, .on_paginated_operation_cleanup = s_on_paginator_cleanup, .result_xml_node_name = aws_byte_cursor_from_c_str("ListBucketResult"), .continuation_token_node_name = aws_byte_cursor_from_c_str("NextContinuationToken"), .user_data = operation_data, }; struct aws_s3_paginated_operation *operation = aws_s3_paginated_operation_new(allocator, &operation_params); struct aws_s3_paginator_params paginator_params = { .client = params->client, .bucket_name = params->bucket_name, .endpoint = params->endpoint, .on_page_finished_fn = params->on_list_finished, .operation = operation, .user_data = params->user_data, }; struct aws_s3_paginator *paginator = aws_s3_initiate_paginator(allocator, &paginator_params); // transfer control to paginator aws_s3_paginated_operation_release(operation); return paginator; } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/source/s3_list_parts.c000066400000000000000000000203231456575232400240120ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include struct aws_s3_operation_data { struct aws_allocator *allocator; struct aws_string *key; struct aws_string *upload_id; struct aws_ref_count ref_count; aws_s3_on_part_fn *on_part; void *user_data; }; static void s_ref_count_zero_callback(void *arg) { struct aws_s3_operation_data *operation_data = arg; if (operation_data->key) { aws_string_destroy(operation_data->key); } if (operation_data->upload_id) { aws_string_destroy(operation_data->upload_id); } aws_mem_release(operation_data->allocator, operation_data); } static void s_on_paginator_cleanup(void *user_data) { struct aws_s3_operation_data *operation_data = user_data; aws_ref_count_release(&operation_data->ref_count); } struct result_wrapper { struct aws_allocator *allocator; struct aws_s3_part_info part_info; }; /* invoked as each child element of ListPartResult/Part is iterated. */ static int s_xml_on_Part_child(struct aws_xml_node *node, void *user_data) { struct result_wrapper *result_wrapper = user_data; struct aws_s3_part_info *part_info = &result_wrapper->part_info; /* for each Parts node, get the info from it and send it off as an part we've encountered */ struct aws_byte_cursor node_name = aws_xml_node_get_name(node); if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "ETag")) { return aws_xml_node_as_body(node, &part_info->e_tag); } if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "LastModified")) { struct aws_byte_cursor date_cur; if (aws_xml_node_as_body(node, &date_cur) != AWS_OP_SUCCESS) { return AWS_OP_ERR; } if (aws_date_time_init_from_str_cursor(&part_info->last_modified, &date_cur, AWS_DATE_FORMAT_ISO_8601)) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "Size")) { struct aws_byte_cursor size_cur; if (aws_xml_node_as_body(node, &size_cur) != AWS_OP_SUCCESS) { return AWS_OP_ERR; } if (aws_byte_cursor_utf8_parse_u64(size_cur, &part_info->size) != AWS_OP_SUCCESS) { return AWS_OP_ERR; } return AWS_OP_SUCCESS; } if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "PartNumber")) { struct aws_byte_cursor part_number_cur; if (aws_xml_node_as_body(node, &part_number_cur) != AWS_OP_SUCCESS) { return AWS_OP_ERR; } uint64_t part_number = 0; if (aws_byte_cursor_utf8_parse_u64(part_number_cur, &part_number) != AWS_OP_SUCCESS) { return AWS_OP_ERR; } if (part_number > UINT32_MAX) { return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED); } part_info->part_number = (uint32_t)part_number; return AWS_OP_SUCCESS; } if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "ChecksumCRC32")) { return aws_xml_node_as_body(node, &part_info->checksumCRC32); } if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "ChecksumCRC32C")) { return aws_xml_node_as_body(node, &part_info->checksumCRC32C); } if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "ChecksumSHA1")) { return aws_xml_node_as_body(node, &part_info->checksumSHA1); } if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "ChecksumSHA256")) { return aws_xml_node_as_body(node, &part_info->checksumSHA256); } return AWS_OP_SUCCESS; } static int s_xml_on_ListPartsResult_child(struct aws_xml_node *node, void *user_data) { struct aws_s3_operation_data *operation_data = user_data; struct aws_byte_cursor node_name = aws_xml_node_get_name(node); if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "Part")) { struct result_wrapper result_wrapper = { .allocator = operation_data->allocator, }; /* this will traverse the current Parts node, get the metadata necessary to construct * an instance of part_info so we can invoke the callback on it. This happens once per part. */ if (aws_xml_node_traverse(node, s_xml_on_Part_child, &result_wrapper) != AWS_OP_SUCCESS) { return AWS_OP_ERR; } struct aws_byte_buf trimmed_etag = aws_replace_quote_entities(result_wrapper.allocator, result_wrapper.part_info.e_tag); result_wrapper.part_info.e_tag = aws_byte_cursor_from_buf(&trimmed_etag); int ret_val = AWS_OP_SUCCESS; if (operation_data->on_part) { ret_val = operation_data->on_part(&result_wrapper.part_info, operation_data->user_data); } aws_byte_buf_clean_up(&trimmed_etag); return ret_val; } return AWS_OP_SUCCESS; } static int s_construct_next_request_http_message( struct aws_byte_cursor *continuation_token, void *user_data, struct aws_http_message **out_message) { AWS_PRECONDITION(user_data); struct aws_s3_operation_data *operation_data = user_data; struct aws_byte_buf request_path; struct aws_byte_cursor key_val = aws_byte_cursor_from_string(operation_data->key); aws_byte_buf_init_copy_from_cursor(&request_path, operation_data->allocator, key_val); if (operation_data->upload_id) { struct aws_byte_cursor upload_id = aws_byte_cursor_from_c_str("?uploadId="); aws_byte_buf_append_dynamic(&request_path, &upload_id); struct aws_byte_cursor upload_id_val = aws_byte_cursor_from_string(operation_data->upload_id); aws_byte_buf_append_dynamic(&request_path, &upload_id_val); } if (continuation_token) { struct aws_byte_cursor continuation = aws_byte_cursor_from_c_str("&part-number-marker="); aws_byte_buf_append_dynamic(&request_path, &continuation); aws_byte_buf_append_encoding_uri_param(&request_path, continuation_token); } struct aws_http_message *list_parts_request = aws_http_message_new_request(operation_data->allocator); aws_http_message_set_request_path(list_parts_request, aws_byte_cursor_from_buf(&request_path)); aws_byte_buf_clean_up(&request_path); struct aws_http_header accept_header = { .name = aws_byte_cursor_from_c_str("accept"), .value = aws_byte_cursor_from_c_str("application/xml"), }; aws_http_message_add_header(list_parts_request, accept_header); aws_http_message_set_request_method(list_parts_request, aws_http_method_get); *out_message = list_parts_request; return AWS_OP_SUCCESS; } struct aws_s3_paginated_operation *aws_s3_list_parts_operation_new( struct aws_allocator *allocator, const struct aws_s3_list_parts_params *params) { AWS_FATAL_PRECONDITION(params); AWS_FATAL_PRECONDITION(params->key.len); AWS_FATAL_PRECONDITION(params->upload_id.len); struct aws_s3_operation_data *operation_data = aws_mem_calloc(allocator, 1, sizeof(struct aws_s3_operation_data)); operation_data->allocator = allocator; operation_data->key = aws_string_new_from_cursor(allocator, ¶ms->key); operation_data->upload_id = aws_string_new_from_cursor(allocator, ¶ms->upload_id); operation_data->on_part = params->on_part; operation_data->user_data = params->user_data; aws_ref_count_init(&operation_data->ref_count, operation_data, s_ref_count_zero_callback); struct aws_s3_paginated_operation_params operation_params = { .next_message = s_construct_next_request_http_message, .on_result_node_encountered_fn = s_xml_on_ListPartsResult_child, .on_paginated_operation_cleanup = s_on_paginator_cleanup, .result_xml_node_name = aws_byte_cursor_from_c_str("ListPartsResult"), .continuation_token_node_name = aws_byte_cursor_from_c_str("NextPartNumberMarker"), .user_data = operation_data, }; struct aws_s3_paginated_operation *operation = aws_s3_paginated_operation_new(allocator, &operation_params); return operation; } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/source/s3_meta_request.c000066400000000000000000002623311456575232400243330ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "aws/s3/private/s3_auto_ranged_get.h" #include "aws/s3/private/s3_checksums.h" #include "aws/s3/private/s3_client_impl.h" #include "aws/s3/private/s3_meta_request_impl.h" #include "aws/s3/private/s3_parallel_input_stream.h" #include "aws/s3/private/s3_request_messages.h" #include "aws/s3/private/s3_util.h" #include "aws/s3/s3express_credentials_provider.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include static const size_t s_dynamic_body_initial_buf_size = KB_TO_BYTES(1); static const size_t s_default_body_streaming_priority_queue_size = 16; static const size_t s_default_event_delivery_array_size = 16; static int s_s3_request_priority_queue_pred(const void *a, const void *b); static void s_s3_meta_request_destroy(void *user_data); static void s_s3_meta_request_init_signing_date_time( struct aws_s3_meta_request *meta_request, struct aws_date_time *date_time); static void s_s3_meta_request_sign_request( struct aws_s3_meta_request *meta_request, struct aws_s3_request *request, aws_signing_complete_fn *on_signing_complete, void *user_data); static void s_s3_meta_request_request_on_signed( struct aws_signing_result *signing_result, int error_code, void *user_data); static int s_s3_meta_request_incoming_body( struct aws_http_stream *stream, const struct aws_byte_cursor *data, void *user_data); static int s_s3_meta_request_incoming_headers( struct aws_http_stream *stream, enum aws_http_header_block header_block, const struct aws_http_header *headers, size_t headers_count, void *user_data); static int s_s3_meta_request_headers_block_done( struct aws_http_stream *stream, enum aws_http_header_block header_block, void *user_data); static void s_s3_meta_request_stream_metrics( struct aws_http_stream *stream, const struct aws_http_stream_metrics *metrics, void *user_data); static void s_s3_meta_request_stream_complete(struct aws_http_stream *stream, int error_code, void *user_data); static void s_s3_meta_request_send_request_finish( struct aws_s3_connection *connection, struct aws_http_stream *stream, int error_code); void aws_s3_meta_request_lock_synced_data(struct aws_s3_meta_request *meta_request) { AWS_PRECONDITION(meta_request); aws_mutex_lock(&meta_request->synced_data.lock); } void aws_s3_meta_request_unlock_synced_data(struct aws_s3_meta_request *meta_request) { AWS_PRECONDITION(meta_request); aws_mutex_unlock(&meta_request->synced_data.lock); } static int s_meta_request_get_response_headers_checksum_callback( struct aws_s3_meta_request *meta_request, const struct aws_http_headers *headers, int response_status, void *user_data) { for (int i = AWS_SCA_INIT; i <= AWS_SCA_END; i++) { if (!aws_s3_meta_request_checksum_config_has_algorithm(meta_request, i)) { /* If user doesn't select this algorithm, skip */ continue; } const struct aws_byte_cursor *algorithm_header_name = aws_get_http_header_name_from_algorithm(i); if (aws_http_headers_has(headers, *algorithm_header_name) && !aws_http_headers_has(headers, g_mp_parts_count_header_name)) { struct aws_byte_cursor header_sum; aws_http_headers_get(headers, *algorithm_header_name, &header_sum); size_t encoded_len = 0; aws_base64_compute_encoded_len(aws_get_digest_size_from_algorithm(i), &encoded_len); if (header_sum.len == encoded_len - 1) { /* encoded_len includes the nullptr length. -1 is the expected length. */ aws_byte_buf_init_copy_from_cursor( &meta_request->meta_request_level_response_header_checksum, meta_request->allocator, header_sum); meta_request->meta_request_level_running_response_sum = aws_checksum_new(meta_request->allocator, i); } break; } } if (meta_request->headers_user_callback_after_checksum) { return meta_request->headers_user_callback_after_checksum(meta_request, headers, response_status, user_data); } else { return AWS_OP_SUCCESS; } } /* warning this might get screwed up with retries/restarts */ static int s_meta_request_get_response_body_checksum_callback( struct aws_s3_meta_request *meta_request, const struct aws_byte_cursor *body, uint64_t range_start, void *user_data) { if (meta_request->meta_request_level_running_response_sum) { aws_checksum_update(meta_request->meta_request_level_running_response_sum, body); } if (meta_request->body_user_callback_after_checksum) { return meta_request->body_user_callback_after_checksum(meta_request, body, range_start, user_data); } else { return AWS_OP_SUCCESS; } } static void s_meta_request_get_response_finish_checksum_callback( struct aws_s3_meta_request *meta_request, const struct aws_s3_meta_request_result *meta_request_result, void *user_data) { struct aws_byte_buf response_body_sum; struct aws_byte_buf encoded_response_body_sum; AWS_ZERO_STRUCT(response_body_sum); AWS_ZERO_STRUCT(encoded_response_body_sum); struct aws_s3_meta_request_result *mut_meta_request_result = (struct aws_s3_meta_request_result *)meta_request_result; if (meta_request_result->error_code == AWS_OP_SUCCESS && meta_request->meta_request_level_running_response_sum) { mut_meta_request_result->did_validate = true; mut_meta_request_result->validation_algorithm = meta_request->meta_request_level_running_response_sum->algorithm; size_t encoded_checksum_len = 0; /* what error should I raise for these? */ aws_base64_compute_encoded_len( meta_request->meta_request_level_running_response_sum->digest_size, &encoded_checksum_len); aws_byte_buf_init(&encoded_response_body_sum, meta_request->allocator, encoded_checksum_len); aws_byte_buf_init( &response_body_sum, meta_request->allocator, meta_request->meta_request_level_running_response_sum->digest_size); aws_checksum_finalize(meta_request->meta_request_level_running_response_sum, &response_body_sum, 0); struct aws_byte_cursor response_body_sum_cursor = aws_byte_cursor_from_buf(&response_body_sum); aws_base64_encode(&response_body_sum_cursor, &encoded_response_body_sum); if (!aws_byte_buf_eq(&encoded_response_body_sum, &meta_request->meta_request_level_response_header_checksum)) { mut_meta_request_result->error_code = AWS_ERROR_S3_RESPONSE_CHECKSUM_MISMATCH; } } if (meta_request->finish_user_callback_after_checksum) { meta_request->finish_user_callback_after_checksum(meta_request, meta_request_result, user_data); } aws_byte_buf_clean_up(&response_body_sum); aws_byte_buf_clean_up(&encoded_response_body_sum); aws_checksum_destroy(meta_request->meta_request_level_running_response_sum); aws_byte_buf_clean_up(&meta_request->meta_request_level_response_header_checksum); } int aws_s3_meta_request_init_base( struct aws_allocator *allocator, struct aws_s3_client *client, size_t part_size, bool should_compute_content_md5, const struct aws_s3_meta_request_options *options, void *impl, struct aws_s3_meta_request_vtable *vtable, struct aws_s3_meta_request *meta_request) { AWS_PRECONDITION(allocator); AWS_PRECONDITION(options); AWS_PRECONDITION(options->message); AWS_PRECONDITION(impl); AWS_PRECONDITION(meta_request); AWS_ZERO_STRUCT(*meta_request); AWS_ASSERT(vtable->update); AWS_ASSERT(vtable->prepare_request); AWS_ASSERT(vtable->destroy); AWS_ASSERT(vtable->sign_request); AWS_ASSERT(vtable->init_signing_date_time); AWS_ASSERT(vtable->finished_request); AWS_ASSERT(vtable->send_request_finish); meta_request->allocator = allocator; meta_request->type = options->type; /* Set up reference count. */ aws_ref_count_init(&meta_request->ref_count, meta_request, s_s3_meta_request_destroy); aws_linked_list_init(&meta_request->synced_data.cancellable_http_streams_list); if (part_size == SIZE_MAX) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); goto error; } if (aws_mutex_init(&meta_request->synced_data.lock)) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "id=%p Could not initialize mutex for meta request", (void *)meta_request); goto error; } if (aws_priority_queue_init_dynamic( &meta_request->synced_data.pending_body_streaming_requests, meta_request->allocator, s_default_body_streaming_priority_queue_size, sizeof(struct aws_s3_request *), s_s3_request_priority_queue_pred)) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "id=%p Could not initialize priority queue for meta request", (void *)meta_request); /* Priority queue */ goto error; } aws_array_list_init_dynamic( &meta_request->synced_data.event_delivery_array, meta_request->allocator, s_default_event_delivery_array_size, sizeof(struct aws_s3_meta_request_event)); aws_array_list_init_dynamic( &meta_request->io_threaded_data.event_delivery_array, meta_request->allocator, s_default_event_delivery_array_size, sizeof(struct aws_s3_meta_request_event)); *((size_t *)&meta_request->part_size) = part_size; *((bool *)&meta_request->should_compute_content_md5) = should_compute_content_md5; checksum_config_init(&meta_request->checksum_config, options->checksum_config); if (options->signing_config) { meta_request->cached_signing_config = aws_cached_signing_config_new(client, options->signing_config); } /* Client is currently optional to allow spinning up a meta_request without a client in a test. */ if (client != NULL) { meta_request->client = aws_s3_client_acquire(client); meta_request->io_event_loop = aws_event_loop_group_get_next_loop(client->body_streaming_elg); meta_request->synced_data.read_window_running_total = client->initial_read_window; } /* Set initial_meta_request, based on how the request's body is being passed in * (we checked earlier that it's not being passed multiple ways) */ if (options->send_filepath.len > 0) { /* Create parallel read stream from file */ meta_request->request_body_parallel_stream = client->vtable->parallel_input_stream_new_from_file(allocator, options->send_filepath); if (meta_request->request_body_parallel_stream == NULL) { goto error; } /* but keep original message around for headers, method, etc */ meta_request->initial_request_message = aws_http_message_acquire(options->message); } else if (options->send_async_stream != NULL) { /* Read from async body-stream, but keep original message around for headers, method, etc */ meta_request->request_body_async_stream = aws_async_input_stream_acquire(options->send_async_stream); meta_request->initial_request_message = aws_http_message_acquire(options->message); } else { /* Keep original message around, we'll read from its synchronous body-stream */ meta_request->initial_request_message = aws_http_message_acquire(options->message); } meta_request->synced_data.next_streaming_part = 1; meta_request->meta_request_level_running_response_sum = NULL; meta_request->user_data = options->user_data; meta_request->shutdown_callback = options->shutdown_callback; meta_request->progress_callback = options->progress_callback; meta_request->telemetry_callback = options->telemetry_callback; meta_request->upload_review_callback = options->upload_review_callback; if (meta_request->checksum_config.validate_response_checksum) { /* TODO: the validate for auto range get should happen for each response received. */ meta_request->headers_user_callback_after_checksum = options->headers_callback; meta_request->body_user_callback_after_checksum = options->body_callback; meta_request->finish_user_callback_after_checksum = options->finish_callback; meta_request->headers_callback = s_meta_request_get_response_headers_checksum_callback; meta_request->body_callback = s_meta_request_get_response_body_checksum_callback; meta_request->finish_callback = s_meta_request_get_response_finish_checksum_callback; } else { meta_request->headers_callback = options->headers_callback; meta_request->body_callback = options->body_callback; meta_request->finish_callback = options->finish_callback; } /* Nothing can fail after here. Leave the impl not affected by failure of initializing base. */ meta_request->impl = impl; meta_request->vtable = vtable; return AWS_OP_SUCCESS; error: s_s3_meta_request_destroy((void *)meta_request); return AWS_OP_ERR; } void aws_s3_meta_request_increment_read_window(struct aws_s3_meta_request *meta_request, uint64_t bytes) { AWS_PRECONDITION(meta_request); if (bytes == 0) { return; } if (!meta_request->client->enable_read_backpressure) { AWS_LOGF_DEBUG( AWS_LS_S3_META_REQUEST, "id=%p: Ignoring call to increment read window. This client has not enabled read backpressure.", (void *)meta_request); return; } AWS_LOGF_TRACE(AWS_LS_S3_META_REQUEST, "id=%p: Incrementing read window by %" PRIu64, (void *)meta_request, bytes); /* BEGIN CRITICAL SECTION */ aws_s3_meta_request_lock_synced_data(meta_request); /* Response will never approach UINT64_MAX, so do a saturating sum instead of worrying about overflow */ meta_request->synced_data.read_window_running_total = aws_add_u64_saturating(bytes, meta_request->synced_data.read_window_running_total); aws_s3_meta_request_unlock_synced_data(meta_request); /* END CRITICAL SECTION */ /* Schedule the work task, to continue processing the meta-request */ aws_s3_client_schedule_process_work(meta_request->client); } void aws_s3_meta_request_cancel(struct aws_s3_meta_request *meta_request) { /* BEGIN CRITICAL SECTION */ aws_s3_meta_request_lock_synced_data(meta_request); aws_s3_meta_request_set_fail_synced(meta_request, NULL, AWS_ERROR_S3_CANCELED); aws_s3_meta_request_cancel_cancellable_requests_synced(meta_request, AWS_ERROR_S3_CANCELED); aws_s3_meta_request_unlock_synced_data(meta_request); /* END CRITICAL SECTION */ } int aws_s3_meta_request_pause( struct aws_s3_meta_request *meta_request, struct aws_s3_meta_request_resume_token **out_resume_token) { AWS_PRECONDITION(meta_request); AWS_PRECONDITION(meta_request->vtable); *out_resume_token = NULL; if (!meta_request->vtable->pause) { return aws_raise_error(AWS_ERROR_UNSUPPORTED_OPERATION); } return meta_request->vtable->pause(meta_request, out_resume_token); } void aws_s3_meta_request_set_fail_synced( struct aws_s3_meta_request *meta_request, struct aws_s3_request *failed_request, int error_code) { AWS_PRECONDITION(meta_request); ASSERT_SYNCED_DATA_LOCK_HELD(meta_request); /* Protect against bugs */ if (error_code == AWS_ERROR_SUCCESS) { AWS_ASSERT(false); AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "id=%p Meta request failed but error code not set, AWS_ERROR_UNKNOWN will be reported", (void *)meta_request); error_code = AWS_ERROR_UNKNOWN; } if (meta_request->synced_data.finish_result_set) { return; } meta_request->synced_data.finish_result_set = true; if ((error_code == AWS_ERROR_S3_INVALID_RESPONSE_STATUS || error_code == AWS_ERROR_S3_NON_RECOVERABLE_ASYNC_ERROR || error_code == AWS_ERROR_S3_OBJECT_MODIFIED) && failed_request != NULL) { aws_s3_meta_request_result_setup( meta_request, &meta_request->synced_data.finish_result, failed_request, failed_request->send_data.response_status, error_code); } else { AWS_ASSERT(error_code != AWS_ERROR_S3_INVALID_RESPONSE_STATUS); aws_s3_meta_request_result_setup(meta_request, &meta_request->synced_data.finish_result, NULL, 0, error_code); } } void aws_s3_meta_request_set_success_synced(struct aws_s3_meta_request *meta_request, int response_status) { AWS_PRECONDITION(meta_request); ASSERT_SYNCED_DATA_LOCK_HELD(meta_request); if (meta_request->synced_data.finish_result_set) { return; } meta_request->synced_data.finish_result_set = true; aws_s3_meta_request_result_setup( meta_request, &meta_request->synced_data.finish_result, NULL, response_status, AWS_ERROR_SUCCESS); } bool aws_s3_meta_request_has_finish_result(struct aws_s3_meta_request *meta_request) { AWS_PRECONDITION(meta_request); /* BEGIN CRITICAL SECTION */ aws_s3_meta_request_lock_synced_data(meta_request); bool is_finishing = aws_s3_meta_request_has_finish_result_synced(meta_request); aws_s3_meta_request_unlock_synced_data(meta_request); /* END CRITICAL SECTION */ return is_finishing; } bool aws_s3_meta_request_has_finish_result_synced(struct aws_s3_meta_request *meta_request) { AWS_PRECONDITION(meta_request); ASSERT_SYNCED_DATA_LOCK_HELD(meta_request); if (!meta_request->synced_data.finish_result_set) { return false; } return true; } struct aws_s3_meta_request *aws_s3_meta_request_acquire(struct aws_s3_meta_request *meta_request) { AWS_PRECONDITION(meta_request); aws_ref_count_acquire(&meta_request->ref_count); return meta_request; } struct aws_s3_meta_request *aws_s3_meta_request_release(struct aws_s3_meta_request *meta_request) { if (meta_request != NULL) { aws_ref_count_release(&meta_request->ref_count); } return NULL; } static void s_s3_meta_request_destroy(void *user_data) { struct aws_s3_meta_request *meta_request = user_data; AWS_PRECONDITION(meta_request); void *log_id = meta_request; AWS_LOGF_DEBUG(AWS_LS_S3_META_REQUEST, "id=%p Cleaning up meta request", (void *)meta_request); /* Clean up our initial http message */ meta_request->request_body_async_stream = aws_async_input_stream_release(meta_request->request_body_async_stream); meta_request->initial_request_message = aws_http_message_release(meta_request->initial_request_message); void *meta_request_user_data = meta_request->user_data; aws_s3_meta_request_shutdown_fn *shutdown_callback = meta_request->shutdown_callback; aws_cached_signing_config_destroy(meta_request->cached_signing_config); aws_string_destroy(meta_request->s3express_session_host); aws_mutex_clean_up(&meta_request->synced_data.lock); /* endpoint should have already been released and set NULL by the meta request finish call. * But call release() again, just in case we're tearing down a half-initialized meta request */ aws_s3_endpoint_release(meta_request->endpoint); meta_request->client = aws_s3_client_release(meta_request->client); AWS_ASSERT(aws_priority_queue_size(&meta_request->synced_data.pending_body_streaming_requests) == 0); aws_priority_queue_clean_up(&meta_request->synced_data.pending_body_streaming_requests); AWS_ASSERT(aws_array_list_length(&meta_request->synced_data.event_delivery_array) == 0); aws_array_list_clean_up(&meta_request->synced_data.event_delivery_array); AWS_ASSERT(aws_array_list_length(&meta_request->io_threaded_data.event_delivery_array) == 0); aws_array_list_clean_up(&meta_request->io_threaded_data.event_delivery_array); AWS_ASSERT(aws_linked_list_empty(&meta_request->synced_data.cancellable_http_streams_list)); aws_s3_meta_request_result_clean_up(meta_request, &meta_request->synced_data.finish_result); if (meta_request->vtable != NULL) { AWS_LOGF_TRACE(AWS_LS_S3_META_REQUEST, "id=%p Calling virtual meta request destroy function.", log_id); meta_request->vtable->destroy(meta_request); } meta_request = NULL; if (shutdown_callback != NULL) { AWS_LOGF_TRACE(AWS_LS_S3_META_REQUEST, "id=%p Calling meta request shutdown callback.", log_id); shutdown_callback(meta_request_user_data); } AWS_LOGF_TRACE(AWS_LS_S3_META_REQUEST, "id=%p Meta request clean up finished.", log_id); } static int s_s3_request_priority_queue_pred(const void *a, const void *b) { const struct aws_s3_request *const *request_a = a; AWS_PRECONDITION(request_a); AWS_PRECONDITION(*request_a); const struct aws_s3_request *const *request_b = b; AWS_PRECONDITION(request_b); AWS_PRECONDITION(*request_b); return (*request_a)->part_number > (*request_b)->part_number; } bool aws_s3_meta_request_update( struct aws_s3_meta_request *meta_request, uint32_t flags, struct aws_s3_request **out_request) { AWS_PRECONDITION(meta_request); AWS_PRECONDITION(meta_request->vtable); AWS_PRECONDITION(meta_request->vtable->update); return meta_request->vtable->update(meta_request, flags, out_request); } bool aws_s3_meta_request_is_active(struct aws_s3_meta_request *meta_request) { AWS_PRECONDITION(meta_request); /* BEGIN CRITICAL SECTION */ aws_s3_meta_request_lock_synced_data(meta_request); bool active = meta_request->synced_data.state == AWS_S3_META_REQUEST_STATE_ACTIVE; aws_s3_meta_request_unlock_synced_data(meta_request); /* END CRITICAL SECTION */ return active; } bool aws_s3_meta_request_is_finished(struct aws_s3_meta_request *meta_request) { AWS_PRECONDITION(meta_request); /* BEGIN CRITICAL SECTION */ aws_s3_meta_request_lock_synced_data(meta_request); bool is_finished = meta_request->synced_data.state == AWS_S3_META_REQUEST_STATE_FINISHED; aws_s3_meta_request_unlock_synced_data(meta_request); /* END CRITICAL SECTION */ return is_finished; } static void s_s3_meta_request_prepare_request_task(struct aws_task *task, void *arg, enum aws_task_status task_status); static void s_s3_meta_request_on_request_prepared(void *user_data); /* TODO: document how this is final step in prepare-request sequence. * Could be invoked on any thread. */ static void s_s3_prepare_request_payload_callback_and_destroy( struct aws_s3_prepare_request_payload *payload, int error_code) { AWS_PRECONDITION(payload); AWS_PRECONDITION(payload->request); struct aws_s3_meta_request *meta_request = payload->request->meta_request; AWS_PRECONDITION(meta_request); ++payload->request->num_times_prepared; if (error_code) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "id=%p Could not prepare request %p due to error %d (%s).", (void *)meta_request, (void *)payload->request, error_code, aws_error_str(error_code)); /* BEGIN CRITICAL SECTION */ aws_s3_meta_request_lock_synced_data(meta_request); aws_s3_meta_request_set_fail_synced(meta_request, payload->request, error_code); aws_s3_meta_request_unlock_synced_data(meta_request); /* END CRITICAL SECTION */ } if (payload->callback != NULL) { payload->callback(meta_request, payload->request, error_code, payload->user_data); } aws_future_void_release(payload->asyncstep_prepare_request); aws_mem_release(payload->allocator, payload); } static void s_s3_meta_request_schedule_prepare_request_default( struct aws_s3_meta_request *meta_request, struct aws_s3_request *request, aws_s3_meta_request_prepare_request_callback_fn *callback, void *user_data); void aws_s3_meta_request_prepare_request( struct aws_s3_meta_request *meta_request, struct aws_s3_request *request, aws_s3_meta_request_prepare_request_callback_fn *callback, void *user_data) { AWS_PRECONDITION(meta_request); AWS_PRECONDITION(meta_request->vtable); if (meta_request->vtable->schedule_prepare_request) { meta_request->vtable->schedule_prepare_request(meta_request, request, callback, user_data); } else { s_s3_meta_request_schedule_prepare_request_default(meta_request, request, callback, user_data); } } static void s_s3_meta_request_schedule_prepare_request_default( struct aws_s3_meta_request *meta_request, struct aws_s3_request *request, aws_s3_meta_request_prepare_request_callback_fn *callback, void *user_data) { AWS_PRECONDITION(meta_request); AWS_PRECONDITION(request); struct aws_s3_client *client = meta_request->client; AWS_PRECONDITION(client); struct aws_allocator *allocator = client->allocator; AWS_PRECONDITION(allocator); struct aws_s3_prepare_request_payload *payload = aws_mem_calloc(allocator, 1, sizeof(struct aws_s3_prepare_request_payload)); payload->allocator = allocator; payload->request = request; payload->callback = callback; payload->user_data = user_data; aws_task_init( &payload->task, s_s3_meta_request_prepare_request_task, payload, "s3_meta_request_prepare_request_task"); if (meta_request->request_body_parallel_stream) { /* The body stream supports reading in parallel, so schedule task on any I/O thread. * If we always used the meta-request's dedicated io_event_loop, we wouldn't get any parallelism. */ struct aws_event_loop *loop = aws_event_loop_group_get_next_loop(client->body_streaming_elg); aws_event_loop_schedule_task_now(loop, &payload->task); } else { aws_event_loop_schedule_task_now(meta_request->io_event_loop, &payload->task); } } static void s_s3_meta_request_prepare_request_task(struct aws_task *task, void *arg, enum aws_task_status task_status) { (void)task; (void)task_status; struct aws_s3_prepare_request_payload *payload = arg; AWS_PRECONDITION(payload); struct aws_s3_request *request = payload->request; AWS_PRECONDITION(request); struct aws_s3_meta_request *meta_request = request->meta_request; AWS_PRECONDITION(meta_request); const struct aws_s3_meta_request_vtable *vtable = meta_request->vtable; AWS_PRECONDITION(vtable); /* Client owns this event loop group. A cancel should not be possible. */ AWS_ASSERT(task_status == AWS_TASK_STATUS_RUN_READY); if (!request->always_send && aws_s3_meta_request_has_finish_result(meta_request)) { s_s3_prepare_request_payload_callback_and_destroy(payload, AWS_ERROR_S3_CANCELED); return; } /* Kick off the async vtable->prepare_request() * Each subclass has its own implementation of this. */ payload->asyncstep_prepare_request = vtable->prepare_request(request); aws_future_void_register_callback( payload->asyncstep_prepare_request, s_s3_meta_request_on_request_prepared, payload); return; } /* Called after vtable->prepare_request has succeeded or failed. */ static void s_s3_meta_request_on_request_prepared(void *user_data) { struct aws_s3_prepare_request_payload *payload = user_data; struct aws_s3_request *request = payload->request; struct aws_s3_meta_request *meta_request = request->meta_request; int error_code = aws_future_void_get_error(payload->asyncstep_prepare_request); if (error_code) { s_s3_prepare_request_payload_callback_and_destroy(payload, error_code); return; } aws_s3_add_user_agent_header(meta_request->allocator, request->send_data.message); /* Next step is to sign the newly created message (completion callback could happen on any thread) */ s_s3_meta_request_sign_request(meta_request, request, s_s3_meta_request_request_on_signed, payload); } static void s_s3_meta_request_init_signing_date_time( struct aws_s3_meta_request *meta_request, struct aws_date_time *date_time) { AWS_PRECONDITION(meta_request); AWS_PRECONDITION(meta_request->vtable); AWS_PRECONDITION(meta_request->vtable->init_signing_date_time); meta_request->vtable->init_signing_date_time(meta_request, date_time); } void aws_s3_meta_request_init_signing_date_time_default( struct aws_s3_meta_request *meta_request, struct aws_date_time *date_time) { AWS_PRECONDITION(meta_request); AWS_PRECONDITION(date_time); (void)meta_request; aws_date_time_init_now(date_time); } static void s_s3_meta_request_sign_request( struct aws_s3_meta_request *meta_request, struct aws_s3_request *request, aws_signing_complete_fn *on_signing_complete, void *user_data) { AWS_PRECONDITION(meta_request); AWS_PRECONDITION(meta_request->vtable); AWS_PRECONDITION(meta_request->vtable->sign_request); if (request->send_data.metrics) { struct aws_s3_request_metrics *metric = request->send_data.metrics; aws_high_res_clock_get_ticks((uint64_t *)&metric->time_metrics.sign_start_timestamp_ns); } meta_request->vtable->sign_request(meta_request, request, on_signing_complete, user_data); } struct aws_get_s3express_credentials_user_data { /* Keep our own reference to allocator, because the meta request can be gone after the callback invoked. */ struct aws_allocator *allocator; struct aws_s3_meta_request *meta_request; struct aws_s3_request *request; aws_signing_complete_fn *on_signing_complete; const struct aws_credentials *original_credentials; struct aws_signing_config_aws base_signing_config; struct aws_credentials_properties_s3express properties; void *user_data; }; static void s_aws_get_s3express_credentials_user_data_destroy(struct aws_get_s3express_credentials_user_data *context) { aws_s3_meta_request_release(context->meta_request); aws_credentials_release(context->original_credentials); aws_mem_release(context->allocator, context); } static void s_get_s3express_credentials_callback(struct aws_credentials *credentials, int error_code, void *user_data) { struct aws_get_s3express_credentials_user_data *context = user_data; struct aws_signing_config_aws signing_config = context->base_signing_config; if (error_code) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "id=%p: Failed to get S3 Express credentials %p. due to error code %d (%s)", (void *)context->meta_request, (void *)context->request, error_code, aws_error_str(error_code)); context->on_signing_complete(NULL, error_code, context->user_data); goto done; } s_s3_meta_request_init_signing_date_time(context->meta_request, &signing_config.date); /* Override the credentials */ signing_config.credentials = credentials; signing_config.algorithm = AWS_SIGNING_ALGORITHM_V4_S3EXPRESS; if (aws_sign_request_aws( context->allocator, context->request->send_data.signable, (struct aws_signing_config_base *)&signing_config, context->on_signing_complete, context->user_data)) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "id=%p: Could not sign request %p. due to error code %d (%s)", (void *)context->meta_request, (void *)context->request, aws_last_error_or_unknown(), aws_error_str(aws_last_error_or_unknown())); context->on_signing_complete(NULL, aws_last_error_or_unknown(), context->user_data); } done: s_aws_get_s3express_credentials_user_data_destroy(context); } static void s_get_original_credentials_callback(struct aws_credentials *credentials, int error_code, void *user_data) { struct aws_get_s3express_credentials_user_data *context = user_data; struct aws_s3_meta_request *meta_request = context->meta_request; if (error_code) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "id=%p: Failed to get S3 Express credentials %p. due to error code %d (%s)", (void *)context->meta_request, (void *)context->request, error_code, aws_error_str(error_code)); context->on_signing_complete(NULL, error_code, context->user_data); s_aws_get_s3express_credentials_user_data_destroy(context); return; } context->original_credentials = credentials; aws_credentials_acquire(context->original_credentials); /** * Derive the credentials for S3 Express. */ struct aws_s3_client *client = meta_request->client; if (aws_s3express_credentials_provider_get_credentials( client->s3express_provider, context->original_credentials, &context->properties, s_get_s3express_credentials_callback, context)) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "id=%p: Could not get S3 Express credentials %p", (void *)meta_request, (void *)context->request); context->on_signing_complete(NULL, aws_last_error_or_unknown(), user_data); s_aws_get_s3express_credentials_user_data_destroy(context); } } static int s_meta_request_resolve_signing_config( struct aws_signing_config_aws *out_signing_config, struct aws_s3_request *request, struct aws_s3_meta_request *meta_request) { struct aws_s3_client *client = meta_request->client; if (meta_request->cached_signing_config != NULL) { *out_signing_config = meta_request->cached_signing_config->config; if (out_signing_config->credentials == NULL && out_signing_config->credentials_provider == NULL) { /* When no credentials available from meta request level override, we use the credentials from client */ out_signing_config->credentials = client->cached_signing_config->config.credentials; out_signing_config->credentials_provider = client->cached_signing_config->config.credentials_provider; } } else if (client->cached_signing_config != NULL) { *out_signing_config = client->cached_signing_config->config; } else { /* Not possible to have no cached signing config from both client and request */ AWS_FATAL_ASSERT(false); } /* If the checksum is configured to be added to the trailer, the payload will be aws-chunked encoded. The payload * will need to be streaming signed/unsigned. */ if (meta_request->checksum_config.location == AWS_SCL_TRAILER && aws_byte_cursor_eq(&out_signing_config->signed_body_value, &g_aws_signed_body_value_unsigned_payload)) { out_signing_config->signed_body_value = g_aws_signed_body_value_streaming_unsigned_payload_trailer; } /* However the initial request for a multipart upload does not have a trailing checksum and is not chunked so it * must have an unsigned_payload signed_body value*/ if (request->part_number == 0 && aws_byte_cursor_eq( &out_signing_config->signed_body_value, &g_aws_signed_body_value_streaming_unsigned_payload_trailer)) { out_signing_config->signed_body_value = g_aws_signed_body_value_unsigned_payload; } return AWS_OP_SUCCESS; } /* Handles signing a message for the caller. */ void aws_s3_meta_request_sign_request_default( struct aws_s3_meta_request *meta_request, struct aws_s3_request *request, aws_signing_complete_fn *on_signing_complete, void *user_data) { AWS_PRECONDITION(meta_request); AWS_PRECONDITION(request); AWS_PRECONDITION(on_signing_complete); struct aws_s3_client *client = meta_request->client; AWS_ASSERT(client); struct aws_signing_config_aws signing_config; if (s_meta_request_resolve_signing_config(&signing_config, request, meta_request)) { AWS_LOGF_DEBUG( AWS_LS_S3_META_REQUEST, "id=%p: No signing config present. Not signing request %p.", (void *)meta_request, (void *)request); on_signing_complete(NULL, AWS_ERROR_SUCCESS, user_data); return; } request->send_data.signable = aws_signable_new_http_request(meta_request->allocator, request->send_data.message); AWS_LOGF_TRACE( AWS_LS_S3_META_REQUEST, "id=%p Created signable %p for request %p with message %p", (void *)meta_request, (void *)request->send_data.signable, (void *)request, (void *)request->send_data.message); if (request->send_data.signable == NULL) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "id=%p: Could not allocate signable for request %p", (void *)meta_request, (void *)request); on_signing_complete(NULL, aws_last_error_or_unknown(), user_data); return; } if (signing_config.algorithm == AWS_SIGNING_ALGORITHM_V4_S3EXPRESS) { /* Fetch credentials from S3 Express provider. */ struct aws_get_s3express_credentials_user_data *context = aws_mem_calloc(meta_request->allocator, 1, sizeof(struct aws_get_s3express_credentials_user_data)); context->allocator = meta_request->allocator; context->base_signing_config = signing_config; context->meta_request = aws_s3_meta_request_acquire(meta_request); context->on_signing_complete = on_signing_complete; context->request = request; context->user_data = user_data; context->properties.host = aws_byte_cursor_from_string(meta_request->s3express_session_host); context->properties.region = signing_config.region; if (signing_config.credentials) { context->original_credentials = signing_config.credentials; aws_credentials_acquire(context->original_credentials); /** * Derive the credentials for S3 Express. */ if (aws_s3express_credentials_provider_get_credentials( client->s3express_provider, context->original_credentials, &context->properties, s_get_s3express_credentials_callback, context)) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "id=%p: Could not get S3 Express credentials %p", (void *)meta_request, (void *)request); on_signing_complete(NULL, aws_last_error_or_unknown(), user_data); s_aws_get_s3express_credentials_user_data_destroy(context); return; } } else if (signing_config.credentials_provider) { /* Get the credentials from provider first. */ if (aws_credentials_provider_get_credentials( signing_config.credentials_provider, s_get_original_credentials_callback, context)) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "id=%p: Could not get S3 Express credentials %p", (void *)meta_request, (void *)request); on_signing_complete(NULL, aws_last_error_or_unknown(), user_data); s_aws_get_s3express_credentials_user_data_destroy(context); return; } } } else { /* Regular signing. */ s_s3_meta_request_init_signing_date_time(meta_request, &signing_config.date); if (aws_sign_request_aws( meta_request->allocator, request->send_data.signable, (struct aws_signing_config_base *)&signing_config, on_signing_complete, user_data)) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "id=%p: Could not sign request %p", (void *)meta_request, (void *)request); on_signing_complete(NULL, aws_last_error_or_unknown(), user_data); return; } } } /* Handle the signing result */ static void s_s3_meta_request_request_on_signed( struct aws_signing_result *signing_result, int error_code, void *user_data) { struct aws_s3_prepare_request_payload *payload = user_data; AWS_PRECONDITION(payload); struct aws_s3_request *request = payload->request; AWS_PRECONDITION(request); struct aws_s3_meta_request *meta_request = request->meta_request; AWS_PRECONDITION(meta_request); if (error_code != AWS_ERROR_SUCCESS) { goto finish; } if (signing_result != NULL && aws_apply_signing_result_to_http_request(request->send_data.message, meta_request->allocator, signing_result)) { error_code = aws_last_error_or_unknown(); goto finish; } if (request->send_data.metrics) { struct aws_s3_request_metrics *metric = request->send_data.metrics; aws_high_res_clock_get_ticks((uint64_t *)&metric->time_metrics.sign_end_timestamp_ns); AWS_ASSERT(metric->time_metrics.sign_start_timestamp_ns != 0); metric->time_metrics.signing_duration_ns = metric->time_metrics.sign_end_timestamp_ns - metric->time_metrics.sign_start_timestamp_ns; } finish: if (error_code != AWS_ERROR_SUCCESS) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "id=%p Meta request could not sign HTTP request due to error code %d (%s)", (void *)meta_request, error_code, aws_error_str(error_code)); } s_s3_prepare_request_payload_callback_and_destroy(payload, error_code); } void aws_s3_meta_request_send_request(struct aws_s3_meta_request *meta_request, struct aws_s3_connection *connection) { AWS_PRECONDITION(meta_request); AWS_PRECONDITION(connection); AWS_PRECONDITION(connection->http_connection); struct aws_s3_request *request = connection->request; AWS_PRECONDITION(request); /* Now that we have a signed request and a connection, go ahead and issue the request. */ struct aws_http_make_request_options options; AWS_ZERO_STRUCT(options); options.self_size = sizeof(struct aws_http_make_request_options); options.request = request->send_data.message; options.user_data = connection; options.on_response_headers = s_s3_meta_request_incoming_headers; options.on_response_header_block_done = s_s3_meta_request_headers_block_done; options.on_response_body = s_s3_meta_request_incoming_body; if (request->send_data.metrics) { options.on_metrics = s_s3_meta_request_stream_metrics; } options.on_complete = s_s3_meta_request_stream_complete; if (request->request_type == AWS_S3_REQUEST_TYPE_UPLOAD_PART) { options.response_first_byte_timeout_ms = aws_atomic_load_int(&meta_request->client->upload_timeout_ms); request->upload_timeout_ms = (size_t)options.response_first_byte_timeout_ms; } struct aws_http_stream *stream = aws_http_connection_make_request(connection->http_connection, &options); if (stream == NULL) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "id=%p: Could not make HTTP request %p", (void *)meta_request, (void *)request); goto error_finish; } AWS_LOGF_TRACE(AWS_LS_S3_META_REQUEST, "id=%p: Sending request %p", (void *)meta_request, (void *)request); if (!request->always_send) { /* BEGIN CRITICAL SECTION */ aws_s3_meta_request_lock_synced_data(meta_request); if (aws_s3_meta_request_has_finish_result_synced(meta_request)) { /* The meta request has finish result already, for this request, treat it as canceled. */ aws_raise_error(AWS_ERROR_S3_CANCELED); aws_s3_meta_request_unlock_synced_data(meta_request); goto error_finish; } /* Activate the stream within the lock as once the activate invoked, the HTTP level callback can happen right * after. */ if (aws_http_stream_activate(stream) != AWS_OP_SUCCESS) { aws_s3_meta_request_unlock_synced_data(meta_request); AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "id=%p: Could not activate HTTP stream %p", (void *)meta_request, (void *)request); goto error_finish; } aws_linked_list_push_back( &meta_request->synced_data.cancellable_http_streams_list, &request->cancellable_http_streams_list_node); request->synced_data.cancellable_http_stream = stream; aws_s3_meta_request_unlock_synced_data(meta_request); /* END CRITICAL SECTION */ } else { /* If the request always send, it is not cancellable. We simply activate the stream. */ if (aws_http_stream_activate(stream) != AWS_OP_SUCCESS) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "id=%p: Could not activate HTTP stream %p", (void *)meta_request, (void *)request); goto error_finish; } } return; error_finish: if (stream) { aws_http_stream_release(stream); stream = NULL; } s_s3_meta_request_send_request_finish(connection, NULL, aws_last_error_or_unknown()); } static int s_s3_meta_request_error_code_from_response_status(int response_status) { int error_code = AWS_ERROR_UNKNOWN; switch (response_status) { case AWS_HTTP_STATUS_CODE_200_OK: case AWS_HTTP_STATUS_CODE_206_PARTIAL_CONTENT: case AWS_HTTP_STATUS_CODE_204_NO_CONTENT: error_code = AWS_ERROR_SUCCESS; break; case AWS_HTTP_STATUS_CODE_500_INTERNAL_SERVER_ERROR: error_code = AWS_ERROR_S3_INTERNAL_ERROR; break; case AWS_HTTP_STATUS_CODE_503_SERVICE_UNAVAILABLE: /* S3 response 503 for throttling, slow down the sending */ error_code = AWS_ERROR_S3_SLOW_DOWN; break; default: error_code = AWS_ERROR_S3_INVALID_RESPONSE_STATUS; break; } return error_code; } static bool s_header_value_from_list( const struct aws_http_header *headers, size_t headers_count, const struct aws_byte_cursor *name, struct aws_byte_cursor *out_value) { for (size_t i = 0; i < headers_count; ++i) { if (aws_byte_cursor_eq(&headers[i].name, name)) { *out_value = headers[i].value; return true; } } return false; } static void s_get_part_response_headers_checksum_helper( struct aws_s3_connection *connection, struct aws_s3_meta_request *meta_request, const struct aws_http_header *headers, size_t headers_count) { for (int i = AWS_SCA_INIT; i <= AWS_SCA_END; i++) { if (!aws_s3_meta_request_checksum_config_has_algorithm(meta_request, i)) { /* If user doesn't select this algorithm, skip */ continue; } const struct aws_byte_cursor *algorithm_header_name = aws_get_http_header_name_from_algorithm(i); struct aws_byte_cursor header_sum; if (s_header_value_from_list(headers, headers_count, algorithm_header_name, &header_sum)) { size_t encoded_len = 0; aws_base64_compute_encoded_len(aws_get_digest_size_from_algorithm(i), &encoded_len); if (header_sum.len == encoded_len - 1) { aws_byte_buf_init_copy_from_cursor( &connection->request->request_level_response_header_checksum, meta_request->allocator, header_sum); connection->request->request_level_running_response_sum = aws_checksum_new(meta_request->allocator, i); } break; } } } /* warning this might get screwed up with retries/restarts */ static void s_get_part_response_body_checksum_helper( struct aws_s3_checksum *running_response_sum, const struct aws_byte_cursor *body) { if (running_response_sum) { aws_checksum_update(running_response_sum, body); } } static void s_get_response_part_finish_checksum_helper(struct aws_s3_connection *connection, int error_code) { struct aws_byte_buf response_body_sum; struct aws_byte_buf encoded_response_body_sum; AWS_ZERO_STRUCT(response_body_sum); AWS_ZERO_STRUCT(encoded_response_body_sum); struct aws_s3_request *request = connection->request; if (error_code == AWS_OP_SUCCESS && request->request_level_running_response_sum) { size_t encoded_checksum_len = 0; request->did_validate = true; aws_base64_compute_encoded_len(request->request_level_running_response_sum->digest_size, &encoded_checksum_len); aws_byte_buf_init(&encoded_response_body_sum, request->allocator, encoded_checksum_len); aws_byte_buf_init( &response_body_sum, request->allocator, request->request_level_running_response_sum->digest_size); aws_checksum_finalize(request->request_level_running_response_sum, &response_body_sum, 0); struct aws_byte_cursor response_body_sum_cursor = aws_byte_cursor_from_buf(&response_body_sum); aws_base64_encode(&response_body_sum_cursor, &encoded_response_body_sum); request->checksum_match = aws_byte_buf_eq(&encoded_response_body_sum, &request->request_level_response_header_checksum); request->validation_algorithm = request->request_level_running_response_sum->algorithm; aws_byte_buf_clean_up(&response_body_sum); aws_byte_buf_clean_up(&encoded_response_body_sum); } else { request->did_validate = false; } aws_checksum_destroy(request->request_level_running_response_sum); aws_byte_buf_clean_up(&request->request_level_response_header_checksum); request->request_level_running_response_sum = NULL; } static int s_s3_meta_request_incoming_headers( struct aws_http_stream *stream, enum aws_http_header_block header_block, const struct aws_http_header *headers, size_t headers_count, void *user_data) { (void)header_block; AWS_PRECONDITION(stream); struct aws_s3_connection *connection = user_data; AWS_PRECONDITION(connection); struct aws_s3_request *request = connection->request; AWS_PRECONDITION(request); struct aws_s3_meta_request *meta_request = request->meta_request; AWS_PRECONDITION(meta_request); if (aws_http_stream_get_incoming_response_status(stream, &request->send_data.response_status)) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "id=%p Could not get incoming response status for request %p", (void *)meta_request, (void *)request); } if (request->send_data.metrics) { /* Record the headers to the metrics */ struct aws_s3_request_metrics *s3_metrics = request->send_data.metrics; if (s3_metrics->req_resp_info_metrics.response_headers == NULL) { s3_metrics->req_resp_info_metrics.response_headers = aws_http_headers_new(meta_request->allocator); } for (size_t i = 0; i < headers_count; ++i) { const struct aws_byte_cursor *name = &headers[i].name; const struct aws_byte_cursor *value = &headers[i].value; if (aws_byte_cursor_eq(name, &g_request_id_header_name)) { s3_metrics->req_resp_info_metrics.request_id = aws_string_new_from_cursor(connection->request->allocator, value); } aws_http_headers_add(s3_metrics->req_resp_info_metrics.response_headers, *name, *value); } s3_metrics->req_resp_info_metrics.response_status = request->send_data.response_status; } bool successful_response = s_s3_meta_request_error_code_from_response_status(request->send_data.response_status) == AWS_ERROR_SUCCESS; if (successful_response && meta_request->checksum_config.validate_response_checksum && request->request_type == AWS_S3_REQUEST_TYPE_GET_OBJECT) { s_get_part_response_headers_checksum_helper(connection, meta_request, headers, headers_count); } /* Only record headers if an error has taken place, or if the request_desc has asked for them. */ bool should_record_headers = !successful_response || request->record_response_headers; if (should_record_headers) { if (request->send_data.response_headers == NULL) { request->send_data.response_headers = aws_http_headers_new(meta_request->allocator); } for (size_t i = 0; i < headers_count; ++i) { const struct aws_byte_cursor *name = &headers[i].name; const struct aws_byte_cursor *value = &headers[i].value; aws_http_headers_add(request->send_data.response_headers, *name, *value); } } return AWS_OP_SUCCESS; } static int s_s3_meta_request_headers_block_done( struct aws_http_stream *stream, enum aws_http_header_block header_block, void *user_data) { (void)stream; if (header_block != AWS_HTTP_HEADER_BLOCK_MAIN) { return AWS_OP_SUCCESS; } struct aws_s3_connection *connection = user_data; AWS_PRECONDITION(connection); struct aws_s3_request *request = connection->request; AWS_PRECONDITION(request); struct aws_s3_meta_request *meta_request = request->meta_request; AWS_PRECONDITION(meta_request); /* * When downloading parts via partNumber, if the size is larger than expected, cancel the request immediately so we * don't end up downloading more into memory than we can handle. We'll retry the download using ranged gets instead. */ if (request->request_type == AWS_S3_REQUEST_TYPE_GET_OBJECT && request->request_tag == AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_GET_OBJECT_WITH_PART_NUMBER_1) { uint64_t content_length; if (!aws_s3_parse_content_length_response_header( request->allocator, request->send_data.response_headers, &content_length) && content_length > meta_request->part_size) { return aws_raise_error(AWS_ERROR_S3_INTERNAL_PART_SIZE_MISMATCH_RETRYING_WITH_RANGE); } } return AWS_OP_SUCCESS; } /* * Small helper to either do a static or dynamic append. * TODO: something like this would be useful in common. */ static int s_response_body_append(bool is_dynamic, struct aws_byte_buf *buf, const struct aws_byte_cursor *data) { return is_dynamic ? aws_byte_buf_append_dynamic(buf, data) : aws_byte_buf_append(buf, data); } static int s_s3_meta_request_incoming_body( struct aws_http_stream *stream, const struct aws_byte_cursor *data, void *user_data) { (void)stream; struct aws_s3_connection *connection = user_data; AWS_PRECONDITION(connection); struct aws_s3_request *request = connection->request; AWS_PRECONDITION(request); struct aws_s3_meta_request *meta_request = request->meta_request; AWS_PRECONDITION(meta_request); AWS_PRECONDITION(meta_request->vtable); AWS_LOGF_TRACE( AWS_LS_S3_META_REQUEST, "id=%p Incoming body for request %p. Response status: %d. Data Size: %" PRIu64 ". connection: %p.", (void *)meta_request, (void *)request, request->send_data.response_status, (uint64_t)data->len, (void *)connection); bool successful_response = s_s3_meta_request_error_code_from_response_status(request->send_data.response_status) == AWS_ERROR_SUCCESS; if (!successful_response) { AWS_LOGF_TRACE(AWS_LS_S3_META_REQUEST, "response body: \n" PRInSTR "\n", AWS_BYTE_CURSOR_PRI(*data)); } if (meta_request->checksum_config.validate_response_checksum) { s_get_part_response_body_checksum_helper(request->request_level_running_response_sum, data); } if (request->send_data.response_body.capacity == 0) { if (request->has_part_size_response_body && successful_response) { AWS_FATAL_ASSERT(request->ticket); request->send_data.response_body = aws_s3_buffer_pool_acquire_buffer(request->meta_request->client->buffer_pool, request->ticket); } else { size_t buffer_size = s_dynamic_body_initial_buf_size; aws_byte_buf_init(&request->send_data.response_body, meta_request->allocator, buffer_size); } } /* Note: not having part sized response body means the buffer is dynamic and * can grow. */ if (s_response_body_append(!request->has_part_size_response_body, &request->send_data.response_body, data)) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "id=%p: Request %p could not append to response body due to error %d (%s)", (void *)meta_request, (void *)request, aws_last_error_or_unknown(), aws_error_str(aws_last_error_or_unknown())); return AWS_OP_ERR; } return AWS_OP_SUCCESS; } static void s_s3_meta_request_stream_metrics( struct aws_http_stream *stream, const struct aws_http_stream_metrics *http_metrics, void *user_data) { (void)stream; struct aws_s3_connection *connection = user_data; AWS_PRECONDITION(connection); struct aws_s3_request *request = connection->request; AWS_PRECONDITION(request); AWS_ASSERT(request->send_data.metrics); struct aws_s3_request_metrics *s3_metrics = request->send_data.metrics; /* Copy over the time metrics from aws_http_stream_metrics to aws_s3_request_metrics */ s3_metrics->time_metrics.send_start_timestamp_ns = http_metrics->send_start_timestamp_ns; s3_metrics->time_metrics.send_end_timestamp_ns = http_metrics->send_end_timestamp_ns; s3_metrics->time_metrics.sending_duration_ns = http_metrics->sending_duration_ns; s3_metrics->time_metrics.receive_start_timestamp_ns = http_metrics->receive_start_timestamp_ns; s3_metrics->time_metrics.receive_end_timestamp_ns = http_metrics->receive_end_timestamp_ns; s3_metrics->time_metrics.receiving_duration_ns = http_metrics->receiving_duration_ns; s3_metrics->crt_info_metrics.stream_id = http_metrics->stream_id; /* Also related metrics from the request/response. */ s3_metrics->crt_info_metrics.connection_id = (void *)connection->http_connection; const struct aws_socket_endpoint *endpoint = aws_http_connection_get_remote_endpoint(connection->http_connection); request->send_data.metrics->crt_info_metrics.ip_address = aws_string_new_from_c_str(request->allocator, endpoint->address); AWS_ASSERT(request->send_data.metrics->crt_info_metrics.ip_address != NULL); s3_metrics->crt_info_metrics.thread_id = aws_thread_current_thread_id(); } /* Finish up the processing of the request work. */ static void s_s3_meta_request_stream_complete(struct aws_http_stream *stream, int error_code, void *user_data) { struct aws_s3_connection *connection = user_data; AWS_PRECONDITION(connection); struct aws_s3_request *request = connection->request; struct aws_s3_meta_request *meta_request = request->meta_request; if (meta_request->checksum_config.validate_response_checksum) { s_get_response_part_finish_checksum_helper(connection, error_code); } /* BEGIN CRITICAL SECTION */ { aws_s3_meta_request_lock_synced_data(meta_request); if (request->synced_data.cancellable_http_stream) { aws_linked_list_remove(&request->cancellable_http_streams_list_node); request->synced_data.cancellable_http_stream = NULL; } aws_s3_meta_request_unlock_synced_data(meta_request); } /* END CRITICAL SECTION */ s_s3_meta_request_send_request_finish(connection, stream, error_code); } static void s_s3_meta_request_send_request_finish( struct aws_s3_connection *connection, struct aws_http_stream *stream, int error_code) { AWS_PRECONDITION(connection); struct aws_s3_request *request = connection->request; AWS_PRECONDITION(request); struct aws_s3_meta_request *meta_request = request->meta_request; AWS_PRECONDITION(meta_request); struct aws_s3_meta_request_vtable *vtable = meta_request->vtable; AWS_PRECONDITION(vtable); vtable->send_request_finish(connection, stream, error_code); } /* Return whether the response to this request might contain an error, even though we got 200 OK. * see: https://repost.aws/knowledge-center/s3-resolve-200-internalerror */ static bool s_should_check_for_error_despite_200_OK(const struct aws_s3_request *request) { /* We handle async error for every request BUT get object. */ struct aws_s3_meta_request *meta_request = request->meta_request; if (meta_request->type == AWS_S3_META_REQUEST_TYPE_GET_OBJECT) { return false; } return true; } static int s_s3_meta_request_error_code_from_response(struct aws_s3_request *request) { AWS_PRECONDITION(request); int error_code_from_status = s_s3_meta_request_error_code_from_response_status(request->send_data.response_status); /* Response body might be XML with an inside. * The is very likely when status-code is bad. * In some cases, it's even possible after 200 OK. */ int error_code_from_xml = AWS_ERROR_SUCCESS; if (error_code_from_status != AWS_ERROR_SUCCESS || s_should_check_for_error_despite_200_OK(request)) { if (request->send_data.response_body.len > 0) { /* Attempt to read as XML, it's fine if this fails. */ struct aws_byte_cursor xml_doc = aws_byte_cursor_from_buf(&request->send_data.response_body); struct aws_byte_cursor error_code_string = {0}; const char *xml_path[] = {"Error", "Code", NULL}; if (aws_xml_get_body_at_path(request->allocator, xml_doc, xml_path, &error_code_string) == AWS_OP_SUCCESS) { /* Found an string! Map it to CRT error code. */ error_code_from_xml = aws_s3_crt_error_code_from_server_error_code_string(error_code_string); } } } if (error_code_from_status == AWS_ERROR_SUCCESS) { /* Status-code was OK, so assume everything's good, unless we found an in the XML */ switch (error_code_from_xml) { case AWS_ERROR_SUCCESS: return AWS_ERROR_SUCCESS; case AWS_ERROR_UNKNOWN: return AWS_ERROR_S3_NON_RECOVERABLE_ASYNC_ERROR; default: return error_code_from_xml; } } else { /* Return error based on status-code, unless we got something more specific from XML */ switch (error_code_from_xml) { case AWS_ERROR_SUCCESS: return error_code_from_status; case AWS_ERROR_UNKNOWN: return error_code_from_status; default: return error_code_from_xml; } } } void aws_s3_meta_request_send_request_finish_default( struct aws_s3_connection *connection, struct aws_http_stream *stream, int error_code) { struct aws_s3_request *request = connection->request; AWS_PRECONDITION(request); struct aws_s3_meta_request *meta_request = request->meta_request; AWS_PRECONDITION(meta_request); struct aws_s3_client *client = meta_request->client; AWS_PRECONDITION(client); int response_status = request->send_data.response_status; /* If our error code is currently success, then we have some other calls to make that could still indicate a * failure. */ if (error_code == AWS_ERROR_SUCCESS) { error_code = s_s3_meta_request_error_code_from_response(request); if (error_code != AWS_ERROR_SUCCESS) { aws_raise_error(error_code); } } AWS_LOGF_DEBUG( AWS_LS_S3_META_REQUEST, "id=%p: Request %p finished with error code %d (%s) and response status %d", (void *)meta_request, (void *)request, error_code, aws_error_debug_str(error_code), response_status); enum aws_s3_connection_finish_code finish_code = AWS_S3_CONNECTION_FINISH_CODE_FAILED; if (error_code == AWS_ERROR_SUCCESS) { if (connection->request->meta_request->type == AWS_S3_META_REQUEST_TYPE_GET_OBJECT && request->did_validate && !request->checksum_match) { finish_code = AWS_S3_CONNECTION_FINISH_CODE_FAILED; error_code = AWS_ERROR_S3_RESPONSE_CHECKSUM_MISMATCH; AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "id=%p Meta request cannot recover from checksum mismatch. (request=%p, response status=%d)", (void *)meta_request, (void *)request, response_status); } else { finish_code = AWS_S3_CONNECTION_FINISH_CODE_SUCCESS; } } else { /* BEGIN CRITICAL SECTION */ aws_s3_meta_request_lock_synced_data(meta_request); bool meta_request_finishing = aws_s3_meta_request_has_finish_result_synced(meta_request); aws_s3_meta_request_unlock_synced_data(meta_request); /* END CRITICAL SECTION */ /* If the request failed due to an invalid (ie: unrecoverable) response status, or the meta request already * has a result, then make sure that this request isn't retried. */ if (error_code == AWS_ERROR_S3_INVALID_RESPONSE_STATUS || error_code == AWS_ERROR_S3_INTERNAL_PART_SIZE_MISMATCH_RETRYING_WITH_RANGE || error_code == AWS_ERROR_S3_NON_RECOVERABLE_ASYNC_ERROR || meta_request_finishing) { finish_code = AWS_S3_CONNECTION_FINISH_CODE_FAILED; if (error_code == AWS_ERROR_S3_INTERNAL_PART_SIZE_MISMATCH_RETRYING_WITH_RANGE) { /* Log at info level instead of error as it's expected and not a fatal error */ AWS_LOGF_INFO( AWS_LS_S3_META_REQUEST, "id=%p Cancelling the request because of error %d (%s). (request=%p, response status=%d)", (void *)meta_request, error_code, aws_error_str(error_code), (void *)request, response_status); } else { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "id=%p Meta request cannot recover from error %d (%s). (request=%p, response status=%d)", (void *)meta_request, error_code, aws_error_str(error_code), (void *)request, response_status); } } else { if (error_code == AWS_ERROR_HTTP_RESPONSE_FIRST_BYTE_TIMEOUT) { /* Log at info level instead of error as it's somewhat expected. */ AWS_LOGF_INFO( AWS_LS_S3_META_REQUEST, "id=%p Request failed from error %d (%s). (request=%p). Try to setup a retry.", (void *)meta_request, error_code, aws_error_str(error_code), (void *)request); } else { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "id=%p Request failed from error %d (%s). (request=%p, response status=%d). Try to setup a " "retry.", (void *)meta_request, error_code, aws_error_str(error_code), (void *)request, response_status); } /* Otherwise, set this up for a retry if the meta request is active. */ finish_code = AWS_S3_CONNECTION_FINISH_CODE_RETRY; } } if (stream != NULL) { aws_http_stream_release(stream); stream = NULL; } aws_s3_client_notify_connection_finished(client, connection, error_code, finish_code); } void aws_s3_meta_request_finished_request( struct aws_s3_meta_request *meta_request, struct aws_s3_request *request, int error_code) { AWS_PRECONDITION(meta_request); AWS_PRECONDITION(meta_request->vtable); AWS_PRECONDITION(meta_request->vtable->finished_request); meta_request->vtable->finished_request(meta_request, request, error_code); } /* Pushes a request into the body streaming priority queue. Derived meta request types should not call this--they * should instead call aws_s3_meta_request_stream_response_body_synced.*/ static void s_s3_meta_request_body_streaming_push_synced( struct aws_s3_meta_request *meta_request, struct aws_s3_request *request); /* Pops the next available request from the body streaming priority queue. If the parts previous the next request in * the priority queue have not been placed in the priority queue yet, the priority queue will remain the same, and * NULL will be returned. (Should not be needed to be called by derived types.) */ static struct aws_s3_request *s_s3_meta_request_body_streaming_pop_next_synced( struct aws_s3_meta_request *meta_request); static void s_s3_meta_request_event_delivery_task(struct aws_task *task, void *arg, enum aws_task_status task_status); void aws_s3_meta_request_stream_response_body_synced( struct aws_s3_meta_request *meta_request, struct aws_s3_request *request) { ASSERT_SYNCED_DATA_LOCK_HELD(meta_request); AWS_PRECONDITION(meta_request); AWS_PRECONDITION(request); AWS_PRECONDITION(request->part_number > 0); /* Push it into the priority queue. */ s_s3_meta_request_body_streaming_push_synced(meta_request, request); struct aws_s3_client *client = meta_request->client; AWS_PRECONDITION(client); aws_atomic_fetch_add(&client->stats.num_requests_stream_queued_waiting, 1); /* Grab any requests that can be streamed back to the caller * and send them for delivery on io_event_loop thread. */ uint32_t num_streaming_requests = 0; struct aws_s3_request *next_streaming_request; while ((next_streaming_request = s_s3_meta_request_body_streaming_pop_next_synced(meta_request)) != NULL) { struct aws_s3_meta_request_event event = {.type = AWS_S3_META_REQUEST_EVENT_RESPONSE_BODY}; event.u.response_body.completed_request = next_streaming_request; aws_s3_meta_request_add_event_for_delivery_synced(meta_request, &event); ++num_streaming_requests; } if (num_streaming_requests == 0) { return; } aws_atomic_fetch_add(&client->stats.num_requests_streaming_response, num_streaming_requests); aws_atomic_fetch_sub(&client->stats.num_requests_stream_queued_waiting, num_streaming_requests); meta_request->synced_data.num_parts_delivery_sent += num_streaming_requests; } void aws_s3_meta_request_add_event_for_delivery_synced( struct aws_s3_meta_request *meta_request, const struct aws_s3_meta_request_event *event) { ASSERT_SYNCED_DATA_LOCK_HELD(meta_request); aws_array_list_push_back(&meta_request->synced_data.event_delivery_array, event); /* If the array was empty before, schedule task to deliver all events in the array. * If the array already had things in it, then the task is already scheduled and will run soon. */ if (aws_array_list_length(&meta_request->synced_data.event_delivery_array) == 1) { aws_s3_meta_request_acquire(meta_request); aws_task_init( &meta_request->synced_data.event_delivery_task, s_s3_meta_request_event_delivery_task, meta_request, "s3_meta_request_event_delivery"); aws_event_loop_schedule_task_now(meta_request->io_event_loop, &meta_request->synced_data.event_delivery_task); } } bool aws_s3_meta_request_are_events_out_for_delivery_synced(struct aws_s3_meta_request *meta_request) { ASSERT_SYNCED_DATA_LOCK_HELD(meta_request); return aws_array_list_length(&meta_request->synced_data.event_delivery_array) > 0 || meta_request->synced_data.event_delivery_active; } void aws_s3_meta_request_cancel_cancellable_requests_synced(struct aws_s3_meta_request *meta_request, int error_code) { ASSERT_SYNCED_DATA_LOCK_HELD(meta_request); while (!aws_linked_list_empty(&meta_request->synced_data.cancellable_http_streams_list)) { struct aws_linked_list_node *request_node = aws_linked_list_pop_front(&meta_request->synced_data.cancellable_http_streams_list); struct aws_s3_request *request = AWS_CONTAINER_OF(request_node, struct aws_s3_request, cancellable_http_streams_list_node); AWS_ASSERT(!request->always_send); aws_http_stream_cancel(request->synced_data.cancellable_http_stream, error_code); request->synced_data.cancellable_http_stream = NULL; } } static struct aws_s3_request_metrics *s_s3_request_finish_up_and_release_metrics( struct aws_s3_request_metrics *metrics, struct aws_s3_meta_request *meta_request) { if (metrics != NULL) { /* Request is done streaming the body, complete the metrics for the request now. */ if (metrics->time_metrics.end_timestamp_ns == -1) { aws_high_res_clock_get_ticks((uint64_t *)&metrics->time_metrics.end_timestamp_ns); metrics->time_metrics.total_duration_ns = metrics->time_metrics.end_timestamp_ns - metrics->time_metrics.start_timestamp_ns; } if (meta_request->telemetry_callback != NULL) { /* We already in the meta request event thread, invoke the telemetry callback directly */ meta_request->telemetry_callback(meta_request, metrics, meta_request->user_data); } aws_s3_request_metrics_release(metrics); } return NULL; } /* Deliver events in event_delivery_array. * This task runs on the meta-request's io_event_loop thread. */ static void s_s3_meta_request_event_delivery_task(struct aws_task *task, void *arg, enum aws_task_status task_status) { (void)task; (void)task_status; struct aws_s3_meta_request *meta_request = arg; AWS_PRECONDITION(meta_request); AWS_PRECONDITION(meta_request->vtable); struct aws_s3_client *client = meta_request->client; AWS_PRECONDITION(client); /* Client owns this event loop group. A cancel should not be possible. */ AWS_ASSERT(task_status == AWS_TASK_STATUS_RUN_READY); /* Swap contents of synced_data.event_delivery_array into this pre-allocated array-list, then process events */ struct aws_array_list *event_delivery_array = &meta_request->io_threaded_data.event_delivery_array; AWS_FATAL_ASSERT(aws_array_list_length(event_delivery_array) == 0); /* If an error occurs, don't fire callbacks anymore. */ int error_code = AWS_ERROR_SUCCESS; uint32_t num_parts_delivered = 0; /* BEGIN CRITICAL SECTION */ { aws_s3_meta_request_lock_synced_data(meta_request); aws_array_list_swap_contents(event_delivery_array, &meta_request->synced_data.event_delivery_array); meta_request->synced_data.event_delivery_active = true; if (aws_s3_meta_request_has_finish_result_synced(meta_request)) { error_code = AWS_ERROR_S3_CANCELED; } aws_s3_meta_request_unlock_synced_data(meta_request); } /* END CRITICAL SECTION */ /* Deliver all events */ for (size_t event_i = 0; event_i < aws_array_list_length(event_delivery_array); ++event_i) { struct aws_s3_meta_request_event event; aws_array_list_get_at(event_delivery_array, &event, event_i); switch (event.type) { case AWS_S3_META_REQUEST_EVENT_RESPONSE_BODY: { struct aws_s3_request *request = event.u.response_body.completed_request; AWS_ASSERT(meta_request == request->meta_request); struct aws_byte_cursor response_body = aws_byte_cursor_from_buf(&request->send_data.response_body); AWS_ASSERT(request->part_number >= 1); if (error_code == AWS_ERROR_SUCCESS && response_body.len > 0 && meta_request->body_callback != NULL) { if (meta_request->body_callback( meta_request, &response_body, request->part_range_start, meta_request->user_data)) { error_code = aws_last_error_or_unknown(); AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "id=%p Response body callback raised error %d (%s).", (void *)meta_request, error_code, aws_error_str(error_code)); } } aws_atomic_fetch_sub(&client->stats.num_requests_streaming_response, 1); ++num_parts_delivered; request->send_data.metrics = s_s3_request_finish_up_and_release_metrics(request->send_data.metrics, meta_request); aws_s3_request_release(request); } break; case AWS_S3_META_REQUEST_EVENT_PROGRESS: { if (error_code == AWS_ERROR_SUCCESS && meta_request->progress_callback != NULL) { /* Don't report 0 byte progress events. * The reasoning behind this is: * * In some code paths, when no data is transferred, there are no progress events, * but in other code paths there might be one progress event of 0 bytes. * We want to be consistent, either: * - REPORT AT LEAST ONCE: even if no data is being transferred. * This would require finding every code path where no progress events are sent, * and sending an appropriate progress event, even if it's for 0 bytes. * One example of ending early is: when resuming a paused upload, * we do ListParts on the UploadID, and if that 404s we assume the * previous "paused" meta-request actually completed, * and so we immediately end the "resuming" meta-request * as successful without sending any further HTTP requests. * It would be tough to accurately report progress here because * we don't know the total size, since we never read the request body, * and didn't get any info about the previous upload. * OR * - NEVER REPORT ZERO BYTES: even if that means no progress events at all. * This is easy to do. We'd only send progress events when data is transferred, * and if a 0 byte event slips through somehow, just check before firing the callback. * Since the NEVER REPORT ZERO BYTES path is simpler to implement, we went with that. */ if (event.u.progress.info.bytes_transferred > 0) { meta_request->progress_callback(meta_request, &event.u.progress.info, meta_request->user_data); } } } break; case AWS_S3_META_REQUEST_EVENT_TELEMETRY: { struct aws_s3_request_metrics *metrics = event.u.telemetry.metrics; AWS_FATAL_ASSERT(meta_request->telemetry_callback != NULL); AWS_FATAL_ASSERT(metrics != NULL); event.u.telemetry.metrics = s_s3_request_finish_up_and_release_metrics(event.u.telemetry.metrics, meta_request); } break; default: AWS_FATAL_ASSERT(false); } } /* Done delivering events */ aws_array_list_clear(event_delivery_array); /* BEGIN CRITICAL SECTION */ { aws_s3_meta_request_lock_synced_data(meta_request); if (error_code != AWS_ERROR_SUCCESS) { aws_s3_meta_request_set_fail_synced(meta_request, NULL, error_code); } meta_request->synced_data.num_parts_delivery_completed += num_parts_delivered; meta_request->synced_data.event_delivery_active = false; aws_s3_meta_request_unlock_synced_data(meta_request); } /* END CRITICAL SECTION */ aws_s3_client_schedule_process_work(client); aws_s3_meta_request_release(meta_request); } static void s_s3_meta_request_body_streaming_push_synced( struct aws_s3_meta_request *meta_request, struct aws_s3_request *request) { ASSERT_SYNCED_DATA_LOCK_HELD(meta_request); AWS_PRECONDITION(meta_request); AWS_PRECONDITION(request); AWS_ASSERT(request->meta_request == meta_request); aws_s3_request_acquire(request); aws_priority_queue_push(&meta_request->synced_data.pending_body_streaming_requests, &request); } static struct aws_s3_request *s_s3_meta_request_body_streaming_pop_next_synced( struct aws_s3_meta_request *meta_request) { AWS_PRECONDITION(meta_request); ASSERT_SYNCED_DATA_LOCK_HELD(meta_request); if (0 == aws_priority_queue_size(&meta_request->synced_data.pending_body_streaming_requests)) { return NULL; } struct aws_s3_request **top_request = NULL; aws_priority_queue_top(&meta_request->synced_data.pending_body_streaming_requests, (void **)&top_request); AWS_ASSERT(top_request); AWS_FATAL_ASSERT(*top_request); if ((*top_request)->part_number != meta_request->synced_data.next_streaming_part) { return NULL; } struct aws_s3_request *request = NULL; aws_priority_queue_pop(&meta_request->synced_data.pending_body_streaming_requests, (void **)&request); ++meta_request->synced_data.next_streaming_part; return request; } void aws_s3_meta_request_finish(struct aws_s3_meta_request *meta_request) { AWS_PRECONDITION(meta_request); AWS_PRECONDITION(meta_request->vtable); AWS_PRECONDITION(meta_request->vtable->finish); meta_request->vtable->finish(meta_request); } void aws_s3_meta_request_finish_default(struct aws_s3_meta_request *meta_request) { AWS_PRECONDITION(meta_request); bool already_finished = false; struct aws_linked_list release_request_list; aws_linked_list_init(&release_request_list); struct aws_s3_meta_request_result finish_result; AWS_ZERO_STRUCT(finish_result); /* BEGIN CRITICAL SECTION */ { aws_s3_meta_request_lock_synced_data(meta_request); if (meta_request->synced_data.state == AWS_S3_META_REQUEST_STATE_FINISHED) { already_finished = true; goto unlock; } meta_request->synced_data.state = AWS_S3_META_REQUEST_STATE_FINISHED; /* Clean out the pending-stream-to-caller priority queue*/ while (aws_priority_queue_size(&meta_request->synced_data.pending_body_streaming_requests) > 0) { struct aws_s3_request *request = NULL; aws_priority_queue_pop(&meta_request->synced_data.pending_body_streaming_requests, (void **)&request); AWS_FATAL_ASSERT(request != NULL); aws_linked_list_push_back(&release_request_list, &request->node); } finish_result = meta_request->synced_data.finish_result; AWS_ZERO_STRUCT(meta_request->synced_data.finish_result); unlock: aws_s3_meta_request_unlock_synced_data(meta_request); } /* END CRITICAL SECTION */ if (already_finished) { return; } while (!aws_linked_list_empty(&release_request_list)) { struct aws_linked_list_node *request_node = aws_linked_list_pop_front(&release_request_list); struct aws_s3_request *release_request = AWS_CONTAINER_OF(request_node, struct aws_s3_request, node); AWS_FATAL_ASSERT(release_request != NULL); /* This pending-body-streaming request was never moved to the event-delivery queue, * so its metrics were never finished. Finish them now. */ release_request->send_data.metrics = s_s3_request_finish_up_and_release_metrics(release_request->send_data.metrics, meta_request); aws_s3_request_release(release_request); } if (meta_request->headers_callback && finish_result.error_response_headers) { if (meta_request->headers_callback( meta_request, finish_result.error_response_headers, finish_result.response_status, meta_request->user_data)) { finish_result.error_code = aws_last_error_or_unknown(); } meta_request->headers_callback = NULL; } AWS_LOGF_DEBUG( AWS_LS_S3_META_REQUEST, "id=%p Meta request finished with error code %d (%s)", (void *)meta_request, finish_result.error_code, aws_error_str(finish_result.error_code)); /* As the meta request has been finished with any HTTP message, we can safely release the http message that * hold. So that, the downstream high level language doesn't need to wait for shutdown to clean related resource * (eg: input stream) */ meta_request->request_body_async_stream = aws_async_input_stream_release(meta_request->request_body_async_stream); meta_request->request_body_parallel_stream = aws_parallel_input_stream_release(meta_request->request_body_parallel_stream); meta_request->initial_request_message = aws_http_message_release(meta_request->initial_request_message); if (meta_request->finish_callback != NULL) { meta_request->finish_callback(meta_request, &finish_result, meta_request->user_data); } aws_s3_meta_request_result_clean_up(meta_request, &finish_result); aws_s3_endpoint_release(meta_request->endpoint); meta_request->endpoint = NULL; meta_request->io_event_loop = NULL; } struct aws_future_bool *aws_s3_meta_request_read_body( struct aws_s3_meta_request *meta_request, uint64_t offset, struct aws_byte_buf *buffer) { AWS_PRECONDITION(meta_request); AWS_PRECONDITION(buffer); /* If async-stream, simply call read_to_fill() */ if (meta_request->request_body_async_stream != NULL) { return aws_async_input_stream_read_to_fill(meta_request->request_body_async_stream, buffer); } /* If parallel-stream, simply call read(), which must fill the buffer and/or EOF */ if (meta_request->request_body_parallel_stream != NULL) { return aws_parallel_input_stream_read(meta_request->request_body_parallel_stream, offset, buffer); } /* Else synchronous aws_input_stream */ struct aws_input_stream *synchronous_stream = aws_http_message_get_body_stream(meta_request->initial_request_message); AWS_FATAL_ASSERT(synchronous_stream); struct aws_future_bool *synchronous_read_future = aws_future_bool_new(meta_request->allocator); /* Keep calling read() until we fill the buffer, or hit EOF */ struct aws_stream_status status = {.is_end_of_stream = false, .is_valid = true}; while ((buffer->len < buffer->capacity) && !status.is_end_of_stream) { /* Read from stream */ if (aws_input_stream_read(synchronous_stream, buffer) != AWS_OP_SUCCESS) { aws_future_bool_set_error(synchronous_read_future, aws_last_error()); goto synchronous_read_done; } /* Check if stream is done */ if (aws_input_stream_get_status(synchronous_stream, &status) != AWS_OP_SUCCESS) { aws_future_bool_set_error(synchronous_read_future, aws_last_error()); goto synchronous_read_done; } } aws_future_bool_set_result(synchronous_read_future, status.is_end_of_stream); synchronous_read_done: return synchronous_read_future; } bool aws_s3_meta_request_body_has_no_more_data(const struct aws_s3_meta_request *meta_request) { AWS_PRECONDITION(meta_request); struct aws_input_stream *initial_body_stream = aws_http_message_get_body_stream(meta_request->initial_request_message); AWS_FATAL_ASSERT(initial_body_stream); struct aws_stream_status status; if (aws_input_stream_get_status(initial_body_stream, &status)) { return true; } return status.is_end_of_stream; } void aws_s3_meta_request_result_setup( struct aws_s3_meta_request *meta_request, struct aws_s3_meta_request_result *result, struct aws_s3_request *failed_request, int response_status, int error_code) { if (failed_request != NULL) { if (failed_request->send_data.response_headers != NULL) { result->error_response_headers = failed_request->send_data.response_headers; aws_http_headers_acquire(result->error_response_headers); } if (failed_request->send_data.response_body.capacity > 0) { result->error_response_body = aws_mem_calloc(meta_request->allocator, 1, sizeof(struct aws_byte_buf)); aws_byte_buf_init_copy( result->error_response_body, meta_request->allocator, &failed_request->send_data.response_body); } if (failed_request->operation_name != NULL) { result->error_response_operation_name = aws_string_new_from_string(meta_request->allocator, failed_request->operation_name); } } result->response_status = response_status; result->error_code = error_code; } void aws_s3_meta_request_result_clean_up( struct aws_s3_meta_request *meta_request, struct aws_s3_meta_request_result *result) { AWS_PRECONDITION(meta_request); AWS_PRECONDITION(result); aws_http_headers_release(result->error_response_headers); if (result->error_response_body != NULL) { aws_byte_buf_clean_up(result->error_response_body); aws_mem_release(meta_request->allocator, result->error_response_body); } aws_string_destroy(result->error_response_operation_name); AWS_ZERO_STRUCT(*result); } bool aws_s3_meta_request_checksum_config_has_algorithm( struct aws_s3_meta_request *meta_request, enum aws_s3_checksum_algorithm algorithm) { AWS_PRECONDITION(meta_request); switch (algorithm) { case AWS_SCA_CRC32C: return meta_request->checksum_config.response_checksum_algorithms.crc32c; case AWS_SCA_CRC32: return meta_request->checksum_config.response_checksum_algorithms.crc32; case AWS_SCA_SHA1: return meta_request->checksum_config.response_checksum_algorithms.sha1; case AWS_SCA_SHA256: return meta_request->checksum_config.response_checksum_algorithms.sha256; default: return false; } } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/source/s3_paginator.c000066400000000000000000000411531456575232400236160ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include static const size_t s_dynamic_body_initial_buf_size = 1024; enum operation_state { OS_NOT_STARTED, OS_INITIATED, OS_COMPLETED, OS_ERROR, }; struct aws_s3_paginated_operation { struct aws_allocator *allocator; struct aws_string *result_xml_node_name; struct aws_string *continuation_xml_node_name; aws_s3_next_http_message_fn *next_http_message; aws_s3_on_result_node_encountered_fn *on_result_node_encountered; aws_s3_on_paginated_operation_cleanup_fn *on_paginated_operation_cleanup; void *user_data; struct aws_ref_count ref_count; }; struct aws_s3_paginator { struct aws_allocator *allocator; struct aws_s3_client *client; /** The current, in-flight paginated request to s3. */ struct aws_atomic_var current_request; struct aws_string *bucket_name; struct aws_string *endpoint; struct aws_s3_paginated_operation *operation; struct aws_ref_count ref_count; struct { struct aws_string *continuation_token; enum operation_state operation_state; struct aws_mutex lock; bool has_more_results; } shared_mt_state; struct aws_byte_buf result_body; aws_s3_on_page_finished_fn *on_page_finished; void *user_data; }; static void s_operation_ref_count_zero_callback(void *arg) { struct aws_s3_paginated_operation *operation = arg; if (operation->on_paginated_operation_cleanup) { operation->on_paginated_operation_cleanup(operation->user_data); } if (operation->result_xml_node_name) { aws_string_destroy(operation->result_xml_node_name); } if (operation->continuation_xml_node_name) { aws_string_destroy(operation->continuation_xml_node_name); } aws_mem_release(operation->allocator, operation); } static void s_paginator_ref_count_zero_callback(void *arg) { struct aws_s3_paginator *paginator = arg; aws_s3_client_release(paginator->client); aws_s3_paginated_operation_release(paginator->operation); aws_byte_buf_clean_up(&paginator->result_body); struct aws_s3_meta_request *previous_request = aws_atomic_exchange_ptr(&paginator->current_request, NULL); if (previous_request != NULL) { aws_s3_meta_request_release(previous_request); } if (paginator->bucket_name) { aws_string_destroy(paginator->bucket_name); } if (paginator->endpoint) { aws_string_destroy(paginator->endpoint); } if (paginator->shared_mt_state.continuation_token) { aws_string_destroy(paginator->shared_mt_state.continuation_token); } aws_mem_release(paginator->allocator, paginator); } struct aws_s3_paginator *aws_s3_initiate_paginator( struct aws_allocator *allocator, const struct aws_s3_paginator_params *params) { AWS_FATAL_PRECONDITION(params); AWS_FATAL_PRECONDITION(params->client); struct aws_s3_paginator *paginator = aws_mem_calloc(allocator, 1, sizeof(struct aws_s3_paginator)); paginator->allocator = allocator; paginator->client = aws_s3_client_acquire(params->client); paginator->operation = params->operation; paginator->on_page_finished = params->on_page_finished_fn; paginator->user_data = params->user_data; paginator->bucket_name = aws_string_new_from_cursor(allocator, ¶ms->bucket_name); paginator->endpoint = aws_string_new_from_cursor(allocator, ¶ms->endpoint); aws_s3_paginated_operation_acquire(params->operation); aws_byte_buf_init(&paginator->result_body, allocator, s_dynamic_body_initial_buf_size); aws_ref_count_init(&paginator->ref_count, paginator, s_paginator_ref_count_zero_callback); aws_mutex_init(&paginator->shared_mt_state.lock); aws_atomic_init_ptr(&paginator->current_request, NULL); paginator->shared_mt_state.operation_state = OS_NOT_STARTED; return paginator; } void aws_s3_paginator_release(struct aws_s3_paginator *paginator) { if (paginator) { aws_ref_count_release(&paginator->ref_count); } } void aws_s3_paginator_acquire(struct aws_s3_paginator *paginator) { AWS_FATAL_PRECONDITION(paginator); aws_ref_count_acquire(&paginator->ref_count); } struct aws_s3_paginated_operation *aws_s3_paginated_operation_new( struct aws_allocator *allocator, const struct aws_s3_paginated_operation_params *params) { struct aws_s3_paginated_operation *operation = aws_mem_calloc(allocator, 1, sizeof(struct aws_s3_paginated_operation)); operation->allocator = allocator; operation->result_xml_node_name = aws_string_new_from_cursor(allocator, ¶ms->result_xml_node_name); operation->continuation_xml_node_name = aws_string_new_from_cursor(allocator, ¶ms->continuation_token_node_name); operation->next_http_message = params->next_message; operation->on_result_node_encountered = params->on_result_node_encountered_fn; operation->on_paginated_operation_cleanup = params->on_paginated_operation_cleanup; operation->user_data = params->user_data; aws_ref_count_init(&operation->ref_count, operation, s_operation_ref_count_zero_callback); return operation; } void aws_s3_paginated_operation_acquire(struct aws_s3_paginated_operation *operation) { AWS_FATAL_PRECONDITION(operation); aws_ref_count_acquire(&operation->ref_count); } void aws_s3_paginated_operation_release(struct aws_s3_paginated_operation *operation) { if (operation) { aws_ref_count_release(&operation->ref_count); } } bool aws_s3_paginator_has_more_results(const struct aws_s3_paginator *paginator) { AWS_PRECONDITION(paginator); bool has_more_results = false; struct aws_s3_paginator *paginator_mut = (struct aws_s3_paginator *)paginator; aws_mutex_lock(&paginator_mut->shared_mt_state.lock); has_more_results = paginator->shared_mt_state.has_more_results; aws_mutex_unlock(&paginator_mut->shared_mt_state.lock); AWS_LOGF_INFO(AWS_LS_S3_GENERAL, "has more %d", has_more_results); return has_more_results; } struct aws_string *s_paginator_get_continuation_token(const struct aws_s3_paginator *paginator) { AWS_PRECONDITION(paginator); struct aws_string *continuation_token = NULL; struct aws_s3_paginator *paginator_mut = (struct aws_s3_paginator *)paginator; aws_mutex_lock(&paginator_mut->shared_mt_state.lock); if (paginator->shared_mt_state.continuation_token) { continuation_token = aws_string_clone_or_reuse(paginator->allocator, paginator->shared_mt_state.continuation_token); } aws_mutex_unlock(&paginator_mut->shared_mt_state.lock); return continuation_token; } static inline int s_set_paginator_state_if_legal( struct aws_s3_paginator *paginator, enum operation_state expected, enum operation_state state) { aws_mutex_lock(&paginator->shared_mt_state.lock); if (paginator->shared_mt_state.operation_state != expected) { aws_mutex_unlock(&paginator->shared_mt_state.lock); return aws_raise_error(AWS_ERROR_INVALID_STATE); } paginator->shared_mt_state.operation_state = state; aws_mutex_unlock(&paginator->shared_mt_state.lock); return AWS_OP_SUCCESS; } /** * On a successful operation, this is an xml document. Just copy the buffers over until we're ready to parse (upon * completion) of the response body. */ static int s_receive_body_callback( struct aws_s3_meta_request *meta_request, const struct aws_byte_cursor *body, uint64_t range_start, void *user_data) { (void)range_start; (void)meta_request; struct aws_s3_paginator *paginator = user_data; if (body && body->len) { aws_byte_buf_append_dynamic(&paginator->result_body, body); } return AWS_OP_SUCCESS; } struct parser_wrapper { struct aws_s3_paginated_operation *operation; struct aws_string *next_continuation_token; bool has_more_results; }; static int s_on_result_node_encountered(struct aws_xml_node *node, void *user_data) { struct parser_wrapper *wrapper = user_data; struct aws_byte_cursor node_name = aws_xml_node_get_name(node); struct aws_byte_cursor continuation_name_val = aws_byte_cursor_from_string(wrapper->operation->continuation_xml_node_name); if (aws_byte_cursor_eq_ignore_case(&node_name, &continuation_name_val)) { struct aws_byte_cursor continuation_token_cur; if (aws_xml_node_as_body(node, &continuation_token_cur) != AWS_OP_SUCCESS) { return AWS_OP_ERR; } wrapper->next_continuation_token = aws_string_new_from_cursor(wrapper->operation->allocator, &continuation_token_cur); return AWS_OP_SUCCESS; } if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "IsTruncated")) { struct aws_byte_cursor truncated_cur; if (aws_xml_node_as_body(node, &truncated_cur) != AWS_OP_SUCCESS) { return AWS_OP_ERR; } if (aws_byte_cursor_eq_c_str_ignore_case(&truncated_cur, "true")) { wrapper->has_more_results = true; } return AWS_OP_SUCCESS; } return wrapper->operation->on_result_node_encountered(node, wrapper->operation->user_data); } static int s_on_root_node_encountered(struct aws_xml_node *node, void *user_data) { struct parser_wrapper *wrapper = user_data; struct aws_byte_cursor node_name = aws_xml_node_get_name(node); struct aws_byte_cursor result_name_val = aws_byte_cursor_from_string(wrapper->operation->result_xml_node_name); if (aws_byte_cursor_eq_ignore_case(&node_name, &result_name_val)) { return aws_xml_node_traverse(node, s_on_result_node_encountered, wrapper); } /* root element not what we expected */ return aws_raise_error(AWS_ERROR_INVALID_XML); } static void s_on_request_finished( struct aws_s3_meta_request *meta_request, const struct aws_s3_meta_request_result *meta_request_result, void *user_data) { (void)meta_request; struct aws_s3_paginator *paginator = user_data; if (meta_request_result->response_status == 200) { /* clears previous continuation token */ aws_mutex_lock(&paginator->shared_mt_state.lock); if (paginator->shared_mt_state.continuation_token) { aws_string_destroy(paginator->shared_mt_state.continuation_token); paginator->shared_mt_state.continuation_token = NULL; paginator->shared_mt_state.has_more_results = false; } aws_mutex_unlock(&paginator->shared_mt_state.lock); struct aws_byte_cursor result_body_cursor = aws_byte_cursor_from_buf(&paginator->result_body); struct aws_string *continuation_token = NULL; bool has_more_results = false; aws_s3_paginated_operation_on_response( paginator->operation, &result_body_cursor, &continuation_token, &has_more_results); aws_mutex_lock(&paginator->shared_mt_state.lock); if (paginator->shared_mt_state.continuation_token) { aws_string_destroy(paginator->shared_mt_state.continuation_token); } paginator->shared_mt_state.continuation_token = continuation_token; paginator->shared_mt_state.has_more_results = has_more_results; aws_mutex_unlock(&paginator->shared_mt_state.lock); if (has_more_results) { s_set_paginator_state_if_legal(paginator, OS_INITIATED, OS_NOT_STARTED); } else { s_set_paginator_state_if_legal(paginator, OS_INITIATED, OS_COMPLETED); } } else { s_set_paginator_state_if_legal(paginator, OS_INITIATED, OS_ERROR); } if (paginator->on_page_finished) { paginator->on_page_finished(paginator, meta_request_result->error_code, paginator->user_data); } /* this ref count was done right before we kicked off the request to keep the paginator object alive. Release it now * that the operation has completed. */ aws_s3_paginator_release(paginator); } int aws_s3_paginated_operation_on_response( struct aws_s3_paginated_operation *operation, struct aws_byte_cursor *response_body, struct aws_string **continuation_token_out, bool *has_more_results_out) { struct parser_wrapper wrapper = {.operation = operation}; /* we've got a full xml document now and the request succeeded, parse the document and fire all the callbacks * for each object and prefix. All of that happens in these three lines. */ struct aws_xml_parser_options parser_options = { .doc = *response_body, .max_depth = 16U, .on_root_encountered = s_on_root_node_encountered, .user_data = &wrapper, }; if (aws_xml_parse(operation->allocator, &parser_options) != AWS_OP_SUCCESS) { aws_string_destroy(wrapper.next_continuation_token); *continuation_token_out = NULL; *has_more_results_out = false; return AWS_OP_ERR; } *continuation_token_out = wrapper.next_continuation_token; *has_more_results_out = wrapper.has_more_results; return AWS_OP_SUCCESS; } int aws_s3_construct_next_paginated_request_http_message( struct aws_s3_paginated_operation *operation, struct aws_byte_cursor *continuation_token, struct aws_http_message **out_message) { return operation->next_http_message(continuation_token, operation->user_data, out_message); } int aws_s3_paginator_continue(struct aws_s3_paginator *paginator, const struct aws_signing_config_aws *signing_config) { AWS_PRECONDITION(paginator); AWS_PRECONDITION(signing_config); int re_code = AWS_OP_ERR; if (s_set_paginator_state_if_legal(paginator, OS_NOT_STARTED, OS_INITIATED)) { return re_code; } struct aws_http_message *paginated_request_message = NULL; struct aws_string *continuation_string = NULL; struct aws_byte_buf host_buf; AWS_ZERO_STRUCT(host_buf); struct aws_byte_cursor host_cur = aws_byte_cursor_from_string(paginator->bucket_name); struct aws_byte_cursor period_cur = aws_byte_cursor_from_c_str("."); struct aws_byte_cursor endpoint_val = aws_byte_cursor_from_string(paginator->endpoint); if (aws_byte_buf_init_copy_from_cursor(&host_buf, paginator->allocator, host_cur) || aws_byte_buf_append_dynamic(&host_buf, &period_cur) || aws_byte_buf_append_dynamic(&host_buf, &endpoint_val)) { goto done; } struct aws_http_header host_header = { .name = aws_byte_cursor_from_c_str("host"), .value = aws_byte_cursor_from_buf(&host_buf), }; continuation_string = s_paginator_get_continuation_token(paginator); struct aws_byte_cursor continuation_cursor; AWS_ZERO_STRUCT(continuation_cursor); struct aws_byte_cursor *continuation = NULL; if (continuation_string) { continuation_cursor = aws_byte_cursor_from_string(continuation_string); continuation = &continuation_cursor; } if (paginator->operation->next_http_message( continuation, paginator->operation->user_data, &paginated_request_message)) { goto done; } if (aws_http_message_add_header(paginated_request_message, host_header)) { goto done; } struct aws_s3_meta_request_options request_options = { .user_data = paginator, .signing_config = (struct aws_signing_config_aws *)signing_config, .type = AWS_S3_META_REQUEST_TYPE_DEFAULT, .body_callback = s_receive_body_callback, .finish_callback = s_on_request_finished, .message = paginated_request_message, }; /* re-use the current buffer. */ aws_byte_buf_reset(&paginator->result_body, false); /* we're kicking off an asynchronous request. ref-count the paginator to keep it alive until we finish. */ aws_s3_paginator_acquire(paginator); struct aws_s3_meta_request *previous_request = aws_atomic_exchange_ptr(&paginator->current_request, NULL); if (previous_request != NULL) { /* release request from previous page */ aws_s3_meta_request_release(previous_request); } struct aws_s3_meta_request *new_request = aws_s3_client_make_meta_request(paginator->client, &request_options); aws_atomic_store_ptr(&paginator->current_request, new_request); if (new_request == NULL) { s_set_paginator_state_if_legal(paginator, OS_INITIATED, OS_ERROR); goto done; } re_code = AWS_OP_SUCCESS; done: aws_http_message_release(paginated_request_message); aws_string_destroy(continuation_string); aws_byte_buf_clean_up(&host_buf); return re_code; } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/source/s3_parallel_input_stream.c000066400000000000000000000105641456575232400262220ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "aws/s3/private/s3_parallel_input_stream.h" #include #include #include #include void aws_parallel_input_stream_init_base( struct aws_parallel_input_stream *stream, struct aws_allocator *alloc, const struct aws_parallel_input_stream_vtable *vtable, void *impl) { AWS_ZERO_STRUCT(*stream); stream->alloc = alloc; stream->vtable = vtable; stream->impl = impl; aws_ref_count_init(&stream->ref_count, stream, (aws_simple_completion_callback *)vtable->destroy); } struct aws_parallel_input_stream *aws_parallel_input_stream_acquire(struct aws_parallel_input_stream *stream) { if (stream != NULL) { aws_ref_count_acquire(&stream->ref_count); } return stream; } struct aws_parallel_input_stream *aws_parallel_input_stream_release(struct aws_parallel_input_stream *stream) { if (stream != NULL) { aws_ref_count_release(&stream->ref_count); } return NULL; } struct aws_future_bool *aws_parallel_input_stream_read( struct aws_parallel_input_stream *stream, uint64_t offset, struct aws_byte_buf *dest) { /* Ensure the buffer has space available */ if (dest->len == dest->capacity) { struct aws_future_bool *future = aws_future_bool_new(stream->alloc); aws_future_bool_set_error(future, AWS_ERROR_SHORT_BUFFER); return future; } struct aws_future_bool *future = stream->vtable->read(stream, offset, dest); return future; } struct aws_parallel_input_stream_from_file_impl { struct aws_parallel_input_stream base; struct aws_string *file_path; }; static void s_para_from_file_destroy(struct aws_parallel_input_stream *stream) { struct aws_parallel_input_stream_from_file_impl *impl = stream->impl; aws_string_destroy(impl->file_path); aws_mem_release(stream->alloc, impl); } struct aws_future_bool *s_para_from_file_read( struct aws_parallel_input_stream *stream, uint64_t offset, struct aws_byte_buf *dest) { struct aws_future_bool *future = aws_future_bool_new(stream->alloc); struct aws_parallel_input_stream_from_file_impl *impl = stream->impl; bool success = false; struct aws_input_stream *file_stream = NULL; struct aws_stream_status status = { .is_end_of_stream = false, .is_valid = true, }; file_stream = aws_input_stream_new_from_file(stream->alloc, aws_string_c_str(impl->file_path)); if (!file_stream) { goto done; } if (aws_input_stream_seek(file_stream, offset, AWS_SSB_BEGIN)) { goto done; } /* Keep reading until fill the buffer. * Note that we must read() after seek() to determine if we're EOF, the seek alone won't trigger it. */ while ((dest->len < dest->capacity) && !status.is_end_of_stream) { /* Read from stream */ if (aws_input_stream_read(file_stream, dest) != AWS_OP_SUCCESS) { goto done; } /* Check if stream is done */ if (aws_input_stream_get_status(file_stream, &status) != AWS_OP_SUCCESS) { goto done; } } success = true; done: if (success) { aws_future_bool_set_result(future, status.is_end_of_stream); } else { aws_future_bool_set_error(future, aws_last_error()); } aws_input_stream_release(file_stream); return future; } static struct aws_parallel_input_stream_vtable s_parallel_input_stream_from_file_vtable = { .destroy = s_para_from_file_destroy, .read = s_para_from_file_read, }; struct aws_parallel_input_stream *aws_parallel_input_stream_new_from_file( struct aws_allocator *allocator, struct aws_byte_cursor file_name) { struct aws_parallel_input_stream_from_file_impl *impl = aws_mem_calloc(allocator, 1, sizeof(struct aws_parallel_input_stream_from_file_impl)); aws_parallel_input_stream_init_base(&impl->base, allocator, &s_parallel_input_stream_from_file_vtable, impl); impl->file_path = aws_string_new_from_cursor(allocator, &file_name); if (!aws_path_exists(impl->file_path)) { /* If file path not exists, raise error from errno. */ aws_translate_and_raise_io_error(errno); goto error; } return &impl->base; error: s_para_from_file_destroy(&impl->base); return NULL; } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/source/s3_platform_info.c000066400000000000000000000551361456575232400244770ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include /**** Configuration info for the c5n.18xlarge *****/ static struct aws_byte_cursor s_c5n_nic_array[] = {AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("eth0")}; static struct aws_s3_cpu_group_info s_c5n_18xlarge_cpu_group_info_array[] = { { .cpu_group = 0u, .nic_name_array = s_c5n_nic_array, .nic_name_array_length = AWS_ARRAY_SIZE(s_c5n_nic_array), .cpus_in_group = 36, }, { .cpu_group = 1u, .nic_name_array = NULL, .nic_name_array_length = 0u, .cpus_in_group = 36, }, }; static struct aws_s3_platform_info s_c5n_18xlarge_platform_info = { .instance_type = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("c5n.18xlarge"), .max_throughput_gbps = 100u, .cpu_group_info_array = s_c5n_18xlarge_cpu_group_info_array, .cpu_group_info_array_length = AWS_ARRAY_SIZE(s_c5n_18xlarge_cpu_group_info_array), /** not yet **/ .has_recommended_configuration = false, }; static struct aws_s3_platform_info s_c5n_metal_platform_info = { .instance_type = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("c5n.metal"), .max_throughput_gbps = 100u, .cpu_group_info_array = s_c5n_18xlarge_cpu_group_info_array, .cpu_group_info_array_length = AWS_ARRAY_SIZE(s_c5n_18xlarge_cpu_group_info_array), /** not yet **/ .has_recommended_configuration = false, }; /****** End c5n.18xlarge *****/ /****** Begin c5n.large ******/ static struct aws_s3_cpu_group_info s_c5n_9xlarge_cpu_group_info_array[] = { { .cpu_group = 0u, .nic_name_array = s_c5n_nic_array, .nic_name_array_length = AWS_ARRAY_SIZE(s_c5n_nic_array), .cpus_in_group = 36, }, }; static struct aws_s3_platform_info s_c5n_9xlarge_platform_info = { .instance_type = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("c5n.9xlarge"), .max_throughput_gbps = 50u, .cpu_group_info_array = s_c5n_9xlarge_cpu_group_info_array, .cpu_group_info_array_length = AWS_ARRAY_SIZE(s_c5n_9xlarge_cpu_group_info_array), /** not yet **/ .has_recommended_configuration = false, }; /****** End c5n.9large *****/ /***** Begin p4d.24xlarge and p4de.24xlarge ****/ static struct aws_byte_cursor s_p4d_socket1_array[] = { AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("eth0"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("eth1"), }; static struct aws_byte_cursor s_p4d_socket2_array[] = { AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("eth2"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("eth3"), }; static struct aws_s3_cpu_group_info s_p4d_cpu_group_info_array[] = { { .cpu_group = 0u, .nic_name_array = s_p4d_socket1_array, .nic_name_array_length = AWS_ARRAY_SIZE(s_p4d_socket1_array), .cpus_in_group = 48, }, { .cpu_group = 1u, .nic_name_array = s_p4d_socket2_array, .nic_name_array_length = AWS_ARRAY_SIZE(s_p4d_socket1_array), .cpus_in_group = 48, }, }; static struct aws_s3_platform_info s_p4d_platform_info = { .instance_type = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("p4d.24xlarge"), .max_throughput_gbps = 400u, .cpu_group_info_array = s_p4d_cpu_group_info_array, .cpu_group_info_array_length = AWS_ARRAY_SIZE(s_p4d_cpu_group_info_array), .has_recommended_configuration = true, }; static struct aws_s3_platform_info s_p4de_platform_info = { .instance_type = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("p4de.24xlarge"), .max_throughput_gbps = 400u, .cpu_group_info_array = s_p4d_cpu_group_info_array, .cpu_group_info_array_length = AWS_ARRAY_SIZE(s_p4d_cpu_group_info_array), .has_recommended_configuration = true, }; /***** End p4d.24xlarge and p4de.24xlarge ****/ /***** Begin p5.48xlarge ******/ /* note: the p5 is a stunningly massive instance type. * While the specs have 3.2 TB/s for the network bandwidth * not all of that is accessible from the CPU. From the CPU we'll * be able to get around 400 Gbps. Also note, 3.2 TB/s * with 2 sockets on a nitro instance inplies 16 NICs * per node. However, practically, due to the topology of this instance * as far as this client is concerned, there are two NICs per node, similar * to the p4d. The rest is for other things on the machine to use. */ struct aws_byte_cursor s_p5_socket1_array[] = { AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("eth0"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("eth1"), }; static struct aws_byte_cursor s_p5_socket2_array[] = { AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("eth2"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("eth3"), }; static struct aws_s3_cpu_group_info s_p5_cpu_group_info_array[] = { { .cpu_group = 0u, .nic_name_array = s_p5_socket1_array, .nic_name_array_length = AWS_ARRAY_SIZE(s_p5_socket1_array), .cpus_in_group = 96, }, { .cpu_group = 1u, .nic_name_array = s_p5_socket2_array, .nic_name_array_length = AWS_ARRAY_SIZE(s_p5_socket2_array), .cpus_in_group = 96, }, }; struct aws_s3_platform_info s_p5_platform_info = { .instance_type = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("p5.48xlarge"), .max_throughput_gbps = 400u, .cpu_group_info_array = s_p5_cpu_group_info_array, .cpu_group_info_array_length = AWS_ARRAY_SIZE(s_p5_cpu_group_info_array), .has_recommended_configuration = true, }; /***** End p5.48xlarge *****/ /**** Begin trn1_32_large *****/ struct aws_byte_cursor s_trn1_n_socket1_array[] = { AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("eth0"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("eth1"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("eth2"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("eth3"), }; static struct aws_byte_cursor s_trn1_n_socket2_array[] = { AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("eth4"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("eth5"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("eth6"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("eth7"), }; static struct aws_s3_cpu_group_info s_trn1_n_cpu_group_info_array[] = { { .cpu_group = 0u, .nic_name_array = s_trn1_n_socket1_array, .nic_name_array_length = AWS_ARRAY_SIZE(s_trn1_n_socket1_array), .cpus_in_group = 64, }, { .cpu_group = 1u, .nic_name_array = s_trn1_n_socket2_array, .nic_name_array_length = AWS_ARRAY_SIZE(s_trn1_n_socket2_array), .cpus_in_group = 64, }, }; static struct aws_s3_platform_info s_trn1_n_platform_info = { .instance_type = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("trn1n.32xlarge"), /* not all of the advertised 1600 Gbps bandwidth can be hit from the cpu in user-space */ .max_throughput_gbps = 800, .cpu_group_info_array = s_trn1_n_cpu_group_info_array, .cpu_group_info_array_length = AWS_ARRAY_SIZE(s_trn1_n_cpu_group_info_array), .has_recommended_configuration = true, }; struct aws_byte_cursor s_trn1_socket1_array[] = { AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("eth0"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("eth1"), }; static struct aws_byte_cursor s_trn1_socket2_array[] = { AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("eth3"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("eth4"), }; static struct aws_s3_cpu_group_info s_trn1_cpu_group_info_array[] = { { .cpu_group = 0u, .nic_name_array = s_trn1_socket1_array, .nic_name_array_length = AWS_ARRAY_SIZE(s_trn1_socket1_array), .cpus_in_group = 64, }, { .cpu_group = 1u, .nic_name_array = s_trn1_socket2_array, .nic_name_array_length = AWS_ARRAY_SIZE(s_trn1_socket2_array), .cpus_in_group = 64, }, }; static struct aws_s3_platform_info s_trn1_platform_info = { .instance_type = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("trn1.32xlarge"), /* not all of the advertised 800 Gbps bandwidth can be hit from the cpu in user-space */ .max_throughput_gbps = 600, .cpu_group_info_array = s_trn1_cpu_group_info_array, .cpu_group_info_array_length = AWS_ARRAY_SIZE(s_trn1_cpu_group_info_array), .has_recommended_configuration = true, }; /**** End trn1.x32_large ******/ struct aws_s3_platform_info_loader { struct aws_allocator *allocator; struct aws_ref_count ref_count; struct { struct aws_string *detected_instance_type; struct aws_s3_platform_info current_env_platform_info; /* aws_hash_table * the table does not "own" any of the data inside it. */ struct aws_hash_table compute_platform_info_table; struct aws_mutex lock; } lock_data; struct aws_system_environment *current_env; }; void s_add_platform_info_to_table(struct aws_s3_platform_info_loader *loader, struct aws_s3_platform_info *info) { AWS_PRECONDITION(info->instance_type.len > 0); AWS_LOGF_TRACE( AWS_LS_S3_GENERAL, "id=%p: adding platform entry for \"" PRInSTR "\".", (void *)loader, AWS_BYTE_CURSOR_PRI(info->instance_type)); struct aws_hash_element *platform_info_element = NULL; aws_hash_table_find(&loader->lock_data.compute_platform_info_table, &info->instance_type, &platform_info_element); if (platform_info_element) { AWS_LOGF_TRACE( AWS_LS_S3_GENERAL, "id=%p: existing entry for \"" PRInSTR "\" found, syncing the values.", (void *)loader, AWS_BYTE_CURSOR_PRI(info->instance_type)); /* detected runtime NIC data is better than the pre-known config data but we don't always have it, * so copy over any better info than we have. Assume if info has NIC data, it was discovered at runtime. * The other data should be identical and we don't want to add complications to the memory model. * You're guaranteed only one instance of an instance type's info, the initial load is static memory */ struct aws_s3_platform_info *existing = platform_info_element->value; // TODO: sync the cpu group and NIC data info->has_recommended_configuration = existing->has_recommended_configuration; /* always prefer a pre-known bandwidth, as we estimate low on EC2 by default for safety. */ info->max_throughput_gbps = existing->max_throughput_gbps; } else { AWS_FATAL_ASSERT( !aws_hash_table_put( &loader->lock_data.compute_platform_info_table, &info->instance_type, (void *)info, NULL) && "hash table put failed!"); } } static void s_destroy_loader(void *arg) { struct aws_s3_platform_info_loader *loader = arg; aws_hash_table_clean_up(&loader->lock_data.compute_platform_info_table); aws_mutex_clean_up(&loader->lock_data.lock); if (loader->lock_data.detected_instance_type) { aws_string_destroy(loader->lock_data.detected_instance_type); } aws_system_environment_release(loader->current_env); aws_mem_release(loader->allocator, loader); } struct aws_s3_platform_info_loader *aws_s3_platform_info_loader_new(struct aws_allocator *allocator) { struct aws_s3_platform_info_loader *loader = aws_mem_calloc(allocator, 1, sizeof(struct aws_s3_platform_info_loader)); loader->allocator = allocator; loader->current_env = aws_system_environment_load(allocator); AWS_FATAL_ASSERT(loader->current_env && "Failed to load system environment"); aws_mutex_init(&loader->lock_data.lock); aws_ref_count_init(&loader->ref_count, loader, s_destroy_loader); /* TODO: Implement runtime CPU information retrieval from the system. Currently, Valgrind detects a memory leak * associated with the g_numa_node_of_cpu_ptr function (see: https://github.com/numactl/numactl/issues/3). This * issue was addressed in version v2.0.13 of libnuma (see: https://github.com/numactl/numactl/pull/43). However, * Amazon Linux 2 defaults to libnuma version v2.0.9, which lacks this fix. We need to suppress this * warning as a false positive in older versions of libnuma. In the future, however, we will probably eliminate the * use of numactl altogether. */ AWS_FATAL_ASSERT( !aws_hash_table_init( &loader->lock_data.compute_platform_info_table, allocator, 32, aws_hash_byte_cursor_ptr_ignore_case, (aws_hash_callback_eq_fn *)aws_byte_cursor_eq_ignore_case, NULL, NULL) && "Hash table init failed!"); s_add_platform_info_to_table(loader, &s_c5n_18xlarge_platform_info); s_add_platform_info_to_table(loader, &s_c5n_9xlarge_platform_info); s_add_platform_info_to_table(loader, &s_c5n_metal_platform_info); s_add_platform_info_to_table(loader, &s_p4d_platform_info); s_add_platform_info_to_table(loader, &s_p4de_platform_info); s_add_platform_info_to_table(loader, &s_p5_platform_info); s_add_platform_info_to_table(loader, &s_trn1_n_platform_info); s_add_platform_info_to_table(loader, &s_trn1_platform_info); return loader; } struct aws_s3_platform_info_loader *aws_s3_platform_info_loader_acquire(struct aws_s3_platform_info_loader *loader) { aws_ref_count_acquire(&loader->ref_count); return loader; } struct aws_s3_platform_info_loader *aws_s3_platform_info_loader_release(struct aws_s3_platform_info_loader *loader) { if (loader) { aws_ref_count_release(&loader->ref_count); } return NULL; } struct imds_callback_info { struct aws_allocator *allocator; struct aws_string *instance_type; struct aws_condition_variable c_var; int error_code; bool shutdown_completed; struct aws_mutex mutex; }; static void s_imds_client_shutdown_completed(void *user_data) { struct imds_callback_info *info = user_data; aws_mutex_lock(&info->mutex); info->shutdown_completed = true; aws_condition_variable_notify_all(&info->c_var); aws_mutex_unlock(&info->mutex); } static bool s_client_shutdown_predicate(void *arg) { struct imds_callback_info *info = arg; return info->shutdown_completed; } static void s_imds_client_on_get_instance_info_callback( const struct aws_imds_instance_info *instance_info, int error_code, void *user_data) { struct imds_callback_info *info = user_data; aws_mutex_lock(&info->mutex); if (error_code) { info->error_code = error_code; } else { info->instance_type = aws_string_new_from_cursor(info->allocator, &instance_info->instance_type); } aws_condition_variable_notify_all(&info->c_var); aws_mutex_unlock(&info->mutex); } static bool s_completion_predicate(void *arg) { struct imds_callback_info *info = arg; return info->error_code != 0 || info->instance_type != NULL; } struct aws_string *s_query_imds_for_instance_type(struct aws_allocator *allocator) { struct imds_callback_info callback_info = { .mutex = AWS_MUTEX_INIT, .c_var = AWS_CONDITION_VARIABLE_INIT, .allocator = allocator, }; struct aws_event_loop_group *el_group = NULL; struct aws_host_resolver *resolver = NULL; struct aws_client_bootstrap *client_bootstrap = NULL; /* now call IMDS */ el_group = aws_event_loop_group_new_default(allocator, 1, NULL); if (!el_group) { goto tear_down; } struct aws_host_resolver_default_options resolver_options = { .max_entries = 1, .el_group = el_group, }; resolver = aws_host_resolver_new_default(allocator, &resolver_options); if (!resolver) { goto tear_down; } struct aws_client_bootstrap_options bootstrap_options = { .event_loop_group = el_group, .host_resolver = resolver, }; client_bootstrap = aws_client_bootstrap_new(allocator, &bootstrap_options); if (!client_bootstrap) { goto tear_down; } struct aws_imds_client_shutdown_options imds_shutdown_options = { .shutdown_callback = s_imds_client_shutdown_completed, .shutdown_user_data = &callback_info, }; struct aws_imds_client_options imds_options = { .bootstrap = client_bootstrap, .imds_version = IMDS_PROTOCOL_V2, .shutdown_options = imds_shutdown_options, }; struct aws_imds_client *imds_client = aws_imds_client_new(allocator, &imds_options); if (!imds_client) { goto tear_down; } aws_mutex_lock(&callback_info.mutex); if (aws_imds_client_get_instance_info(imds_client, s_imds_client_on_get_instance_info_callback, &callback_info)) { aws_condition_variable_wait_for_pred( &callback_info.c_var, &callback_info.mutex, AWS_TIMESTAMP_SECS, s_completion_predicate, &callback_info); } aws_imds_client_release(imds_client); aws_condition_variable_wait_pred( &callback_info.c_var, &callback_info.mutex, s_client_shutdown_predicate, &callback_info); aws_mutex_unlock(&callback_info.mutex); if (callback_info.error_code) { aws_raise_error(callback_info.error_code); AWS_LOGF_ERROR( AWS_LS_S3_CLIENT, "IMDS call failed with error %s.", aws_error_debug_str(callback_info.error_code)); } tear_down: if (client_bootstrap) { aws_client_bootstrap_release(client_bootstrap); } if (resolver) { aws_host_resolver_release(resolver); } if (el_group) { aws_event_loop_group_release(el_group); } return callback_info.instance_type; } struct aws_byte_cursor aws_s3_get_ec2_instance_type(struct aws_s3_platform_info_loader *loader) { aws_mutex_lock(&loader->lock_data.lock); struct aws_byte_cursor return_cur; AWS_ZERO_STRUCT(return_cur); if (loader->lock_data.detected_instance_type) { AWS_LOGF_TRACE( AWS_LS_S3_CLIENT, "id=%p: Instance type has already been determined to be %s. Returning cached version.", (void *)loader, aws_string_bytes(loader->lock_data.detected_instance_type)); goto return_instance_and_unlock; } AWS_LOGF_TRACE( AWS_LS_S3_CLIENT, "id=%p: Instance type has not been determined, checking to see if running in EC2 nitro environment.", (void *)loader); /* * We want to only imds call if we know that we are on an ec2 instance. All new instances are Nitro and we don't * care about the old ones. */ if (aws_s3_is_running_on_ec2_nitro(loader)) { AWS_LOGF_INFO( AWS_LS_S3_CLIENT, "id=%p: Detected Amazon EC2 with nitro as the current environment.", (void *)loader); /* easy case not requiring any calls out to IMDS. If we detected we're running on ec2, then the dmi info is * correct, and we can use it if we have it. Otherwise call out to IMDS. */ struct aws_byte_cursor product_name = aws_system_environment_get_virtualization_product_name(loader->current_env); if (product_name.len) { loader->lock_data.detected_instance_type = aws_string_new_from_cursor(loader->allocator, &product_name); loader->lock_data.current_env_platform_info.instance_type = aws_byte_cursor_from_string(loader->lock_data.detected_instance_type); s_add_platform_info_to_table(loader, &loader->lock_data.current_env_platform_info); AWS_LOGF_INFO( AWS_LS_S3_CLIENT, "id=%p: Determined instance type to be %s, from dmi info. Caching.", (void *)loader, aws_string_bytes(loader->lock_data.detected_instance_type)); goto return_instance_and_unlock; } AWS_LOGF_DEBUG( AWS_LS_S3_CLIENT, "static: DMI info was insufficient to determine instance type. Making call to IMDS to determine"); struct aws_string *instance_type = s_query_imds_for_instance_type(loader->allocator); if (instance_type) { loader->lock_data.detected_instance_type = instance_type; loader->lock_data.current_env_platform_info.instance_type = aws_byte_cursor_from_string(instance_type); s_add_platform_info_to_table(loader, &loader->lock_data.current_env_platform_info); AWS_LOGF_INFO( AWS_LS_S3_CLIENT, "id=%p: Determined instance type to be %s, from IMDS.", (void *)loader, aws_string_bytes(loader->lock_data.detected_instance_type)); } } return_instance_and_unlock: return_cur = loader->lock_data.current_env_platform_info.instance_type; aws_mutex_unlock(&loader->lock_data.lock); return return_cur; } const struct aws_s3_platform_info *aws_s3_get_platform_info_for_current_environment( struct aws_s3_platform_info_loader *loader) { /* getting the instance type will set it on the loader the first time if it can */ aws_s3_get_ec2_instance_type(loader); /* will never be mutated after the above call. */ return &loader->lock_data.current_env_platform_info; } struct aws_array_list aws_s3_get_recommended_platforms(struct aws_s3_platform_info_loader *loader) { struct aws_array_list array_list; aws_mutex_lock(&loader->lock_data.lock); aws_array_list_init_dynamic(&array_list, loader->allocator, 5, sizeof(struct aws_byte_cursor)); /* Iterate over the map and add instance types to the array list which have * platform_info->has_recommended_configuration == true */ for (struct aws_hash_iter iter = aws_hash_iter_begin(&loader->lock_data.compute_platform_info_table); !aws_hash_iter_done(&iter); aws_hash_iter_next(&iter)) { struct aws_s3_platform_info *platform_info = iter.element.value; if (platform_info->has_recommended_configuration) { aws_array_list_push_back(&array_list, &platform_info->instance_type); } } aws_mutex_unlock(&loader->lock_data.lock); return array_list; } const struct aws_s3_platform_info *aws_s3_get_platform_info_for_instance_type( struct aws_s3_platform_info_loader *loader, struct aws_byte_cursor instance_type_name) { aws_mutex_lock(&loader->lock_data.lock); struct aws_hash_element *platform_info_element = NULL; aws_hash_table_find(&loader->lock_data.compute_platform_info_table, &instance_type_name, &platform_info_element); aws_mutex_unlock(&loader->lock_data.lock); if (platform_info_element) { return platform_info_element->value; } return NULL; } bool aws_s3_is_running_on_ec2_nitro(struct aws_s3_platform_info_loader *loader) { struct aws_byte_cursor system_virt_name = aws_system_environment_get_virtualization_vendor(loader->current_env); if (aws_byte_cursor_eq_c_str_ignore_case(&system_virt_name, "amazon ec2")) { return true; } return false; } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/source/s3_request.c000066400000000000000000000403431456575232400233220ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "aws/s3/private/s3_request.h" #include "aws/s3/private/s3_meta_request_impl.h" #include "aws/s3/private/s3_util.h" #include #include #include #include static void s_s3_request_destroy(void *user_data); struct aws_s3_request *aws_s3_request_new( struct aws_s3_meta_request *meta_request, int request_tag, enum aws_s3_request_type request_type, uint32_t part_number, uint32_t flags) { AWS_PRECONDITION(meta_request); AWS_PRECONDITION(meta_request->allocator); struct aws_s3_request *request = aws_mem_calloc(meta_request->allocator, 1, sizeof(struct aws_s3_request)); aws_ref_count_init(&request->ref_count, request, (aws_simple_completion_callback *)s_s3_request_destroy); request->allocator = meta_request->allocator; request->meta_request = aws_s3_meta_request_acquire(meta_request); request->request_tag = request_tag; request->request_type = request_type; const char *operation_name = aws_s3_request_type_operation_name(request_type); if (operation_name[0] != '\0') { request->operation_name = aws_string_new_from_c_str(request->allocator, operation_name); } request->part_number = part_number; request->record_response_headers = (flags & AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS) != 0; request->has_part_size_response_body = (flags & AWS_S3_REQUEST_FLAG_PART_SIZE_RESPONSE_BODY) != 0; request->has_part_size_request_body = (flags & AWS_S3_REQUEST_FLAG_PART_SIZE_REQUEST_BODY) != 0; request->always_send = (flags & AWS_S3_REQUEST_FLAG_ALWAYS_SEND) != 0; return request; } void aws_s3_request_setup_send_data(struct aws_s3_request *request, struct aws_http_message *message) { AWS_PRECONDITION(request); AWS_PRECONDITION(message); if (request != NULL && request->send_data.metrics != NULL) { /* If there is a metrics from previous attempt, complete it now. */ struct aws_s3_request_metrics *metric = request->send_data.metrics; aws_high_res_clock_get_ticks((uint64_t *)&metric->time_metrics.end_timestamp_ns); metric->time_metrics.total_duration_ns = metric->time_metrics.end_timestamp_ns - metric->time_metrics.start_timestamp_ns; struct aws_s3_meta_request *meta_request = request->meta_request; if (meta_request != NULL && meta_request->telemetry_callback != NULL) { aws_s3_meta_request_lock_synced_data(meta_request); struct aws_s3_meta_request_event event = {.type = AWS_S3_META_REQUEST_EVENT_TELEMETRY}; event.u.telemetry.metrics = aws_s3_request_metrics_acquire(metric); aws_s3_meta_request_add_event_for_delivery_synced(meta_request, &event); aws_s3_meta_request_unlock_synced_data(meta_request); } request->send_data.metrics = aws_s3_request_metrics_release(metric); } aws_s3_request_clean_up_send_data(request); request->send_data.message = message; request->send_data.metrics = aws_s3_request_metrics_new(request->allocator, request, message); /* Start the timestamp */ aws_high_res_clock_get_ticks((uint64_t *)&request->send_data.metrics->time_metrics.start_timestamp_ns); aws_http_message_acquire(message); } static void s_s3_request_clean_up_send_data_message(struct aws_s3_request *request) { AWS_PRECONDITION(request); struct aws_http_message *message = request->send_data.message; if (message == NULL) { return; } request->send_data.message = NULL; aws_http_message_release(message); } void aws_s3_request_clean_up_send_data(struct aws_s3_request *request) { AWS_PRECONDITION(request); /* The metrics should be collected and provided to user before reaching here */ AWS_FATAL_ASSERT(request->send_data.metrics == NULL); s_s3_request_clean_up_send_data_message(request); aws_signable_destroy(request->send_data.signable); request->send_data.signable = NULL; aws_http_headers_release(request->send_data.response_headers); request->send_data.response_headers = NULL; aws_byte_buf_clean_up(&request->send_data.response_body); AWS_ZERO_STRUCT(request->send_data); } struct aws_s3_request *aws_s3_request_acquire(struct aws_s3_request *request) { if (request != NULL) { aws_ref_count_acquire(&request->ref_count); } return request; } struct aws_s3_request *aws_s3_request_release(struct aws_s3_request *request) { if (request != NULL) { aws_ref_count_release(&request->ref_count); } return NULL; } static void s_s3_request_destroy(void *user_data) { struct aws_s3_request *request = user_data; if (request == NULL) { return; } aws_s3_request_clean_up_send_data(request); aws_byte_buf_clean_up(&request->request_body); aws_s3_buffer_pool_release_ticket(request->meta_request->client->buffer_pool, request->ticket); aws_string_destroy(request->operation_name); aws_s3_meta_request_release(request->meta_request); aws_mem_release(request->allocator, request); } static void s_s3_request_metrics_destroy(void *arg) { struct aws_s3_request_metrics *metrics = arg; if (metrics == NULL) { return; } aws_http_headers_release(metrics->req_resp_info_metrics.response_headers); aws_string_destroy(metrics->req_resp_info_metrics.request_path_query); aws_string_destroy(metrics->req_resp_info_metrics.host_address); aws_string_destroy(metrics->req_resp_info_metrics.request_id); aws_string_destroy(metrics->req_resp_info_metrics.operation_name); aws_string_destroy(metrics->crt_info_metrics.ip_address); aws_mem_release(metrics->allocator, metrics); } struct aws_s3_request_metrics *aws_s3_request_metrics_new( struct aws_allocator *allocator, const struct aws_s3_request *request, const struct aws_http_message *message) { struct aws_s3_request_metrics *metrics = aws_mem_calloc(allocator, 1, sizeof(struct aws_s3_request_metrics)); metrics->allocator = allocator; struct aws_byte_cursor out_path; AWS_ZERO_STRUCT(out_path); int err = aws_http_message_get_request_path(message, &out_path); /* If there is no path of the message, it should be a program error. */ AWS_ASSERT(!err); metrics->req_resp_info_metrics.request_path_query = aws_string_new_from_cursor(allocator, &out_path); AWS_ASSERT(metrics->req_resp_info_metrics.request_path_query != NULL); /* Get the host header value */ struct aws_byte_cursor host_header_value; AWS_ZERO_STRUCT(host_header_value); struct aws_http_headers *message_headers = aws_http_message_get_headers(message); AWS_ASSERT(message_headers); err = aws_http_headers_get(message_headers, g_host_header_name, &host_header_value); AWS_ASSERT(!err); metrics->req_resp_info_metrics.host_address = aws_string_new_from_cursor(allocator, &host_header_value); AWS_ASSERT(metrics->req_resp_info_metrics.host_address != NULL); metrics->req_resp_info_metrics.request_type = request->request_type; if (request->operation_name != NULL) { metrics->req_resp_info_metrics.operation_name = aws_string_new_from_string(allocator, request->operation_name); } metrics->time_metrics.start_timestamp_ns = -1; metrics->time_metrics.end_timestamp_ns = -1; metrics->time_metrics.total_duration_ns = -1; metrics->time_metrics.send_start_timestamp_ns = -1; metrics->time_metrics.send_end_timestamp_ns = -1; metrics->time_metrics.sending_duration_ns = -1; metrics->time_metrics.receive_start_timestamp_ns = -1; metrics->time_metrics.receive_end_timestamp_ns = -1; metrics->time_metrics.receiving_duration_ns = -1; metrics->time_metrics.sign_start_timestamp_ns = -1; metrics->time_metrics.sign_end_timestamp_ns = -1; metrics->time_metrics.signing_duration_ns = -1; metrics->req_resp_info_metrics.response_status = -1; (void)err; aws_ref_count_init(&metrics->ref_count, metrics, s_s3_request_metrics_destroy); return metrics; } struct aws_s3_request_metrics *aws_s3_request_metrics_acquire(struct aws_s3_request_metrics *metrics) { if (!metrics) { return NULL; } aws_ref_count_acquire(&metrics->ref_count); return metrics; } struct aws_s3_request_metrics *aws_s3_request_metrics_release(struct aws_s3_request_metrics *metrics) { if (metrics != NULL) { aws_ref_count_release(&metrics->ref_count); } return NULL; } int aws_s3_request_metrics_get_request_id( const struct aws_s3_request_metrics *metrics, const struct aws_string **out_request_id) { AWS_PRECONDITION(metrics); AWS_PRECONDITION(out_request_id); if (metrics->req_resp_info_metrics.request_id == NULL) { return aws_raise_error(AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE); } *out_request_id = metrics->req_resp_info_metrics.request_id; return AWS_OP_SUCCESS; } void aws_s3_request_metrics_get_start_timestamp_ns(const struct aws_s3_request_metrics *metrics, uint64_t *start_time) { AWS_PRECONDITION(metrics); AWS_PRECONDITION(start_time); *start_time = metrics->time_metrics.start_timestamp_ns; } void aws_s3_request_metrics_get_end_timestamp_ns(const struct aws_s3_request_metrics *metrics, uint64_t *end_time) { AWS_PRECONDITION(metrics); AWS_PRECONDITION(end_time); *end_time = metrics->time_metrics.end_timestamp_ns; } void aws_s3_request_metrics_get_total_duration_ns( const struct aws_s3_request_metrics *metrics, uint64_t *total_duration) { AWS_PRECONDITION(metrics); AWS_PRECONDITION(total_duration); *total_duration = metrics->time_metrics.total_duration_ns; } int aws_s3_request_metrics_get_send_start_timestamp_ns( const struct aws_s3_request_metrics *metrics, uint64_t *send_start_time) { AWS_PRECONDITION(metrics); AWS_PRECONDITION(send_start_time); if (metrics->time_metrics.send_start_timestamp_ns < 0) { return aws_raise_error(AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE); } *send_start_time = metrics->time_metrics.send_start_timestamp_ns; return AWS_OP_SUCCESS; } int aws_s3_request_metrics_get_send_end_timestamp_ns( const struct aws_s3_request_metrics *metrics, uint64_t *send_end_time) { AWS_PRECONDITION(metrics); AWS_PRECONDITION(send_end_time); if (metrics->time_metrics.send_end_timestamp_ns < 0) { return aws_raise_error(AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE); } *send_end_time = metrics->time_metrics.send_end_timestamp_ns; return AWS_OP_SUCCESS; } int aws_s3_request_metrics_get_sending_duration_ns( const struct aws_s3_request_metrics *metrics, uint64_t *sending_duration) { AWS_PRECONDITION(metrics); AWS_PRECONDITION(sending_duration); if (metrics->time_metrics.sending_duration_ns < 0) { return aws_raise_error(AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE); } *sending_duration = metrics->time_metrics.sending_duration_ns; return AWS_OP_SUCCESS; } int aws_s3_request_metrics_get_receive_start_timestamp_ns( const struct aws_s3_request_metrics *metrics, uint64_t *receive_start_time) { AWS_PRECONDITION(metrics); AWS_PRECONDITION(receive_start_time); if (metrics->time_metrics.receive_start_timestamp_ns < 0) { return aws_raise_error(AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE); } *receive_start_time = metrics->time_metrics.receive_start_timestamp_ns; return AWS_OP_SUCCESS; } int aws_s3_request_metrics_get_receive_end_timestamp_ns( const struct aws_s3_request_metrics *metrics, uint64_t *receive_end_time) { AWS_PRECONDITION(metrics); AWS_PRECONDITION(receive_end_time); if (metrics->time_metrics.receive_end_timestamp_ns < 0) { return aws_raise_error(AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE); } *receive_end_time = metrics->time_metrics.receive_end_timestamp_ns; return AWS_OP_SUCCESS; } int aws_s3_request_metrics_get_receiving_duration_ns( const struct aws_s3_request_metrics *metrics, uint64_t *receiving_duration) { AWS_PRECONDITION(metrics); AWS_PRECONDITION(receiving_duration); if (metrics->time_metrics.receiving_duration_ns < 0) { return aws_raise_error(AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE); } *receiving_duration = metrics->time_metrics.receiving_duration_ns; return AWS_OP_SUCCESS; } int aws_s3_request_metrics_get_response_status_code( const struct aws_s3_request_metrics *metrics, int *response_status) { AWS_PRECONDITION(metrics); AWS_PRECONDITION(response_status); if (metrics->req_resp_info_metrics.response_status == -1) { return aws_raise_error(AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE); } *response_status = metrics->req_resp_info_metrics.response_status; return AWS_OP_SUCCESS; } int aws_s3_request_metrics_get_response_headers( const struct aws_s3_request_metrics *metrics, struct aws_http_headers **response_headers) { AWS_PRECONDITION(metrics); AWS_PRECONDITION(response_headers); if (metrics->req_resp_info_metrics.response_headers == NULL) { return aws_raise_error(AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE); } *response_headers = metrics->req_resp_info_metrics.response_headers; return AWS_OP_SUCCESS; } void aws_s3_request_metrics_get_request_path_query( const struct aws_s3_request_metrics *metrics, const struct aws_string **request_path_query) { AWS_PRECONDITION(metrics); AWS_PRECONDITION(request_path_query); *request_path_query = metrics->req_resp_info_metrics.request_path_query; } void aws_s3_request_metrics_get_host_address( const struct aws_s3_request_metrics *metrics, const struct aws_string **host_address) { AWS_PRECONDITION(metrics); AWS_PRECONDITION(host_address); *host_address = metrics->req_resp_info_metrics.host_address; } int aws_s3_request_metrics_get_ip_address( const struct aws_s3_request_metrics *metrics, const struct aws_string **ip_address) { AWS_PRECONDITION(metrics); AWS_PRECONDITION(ip_address); if (metrics->crt_info_metrics.ip_address == NULL) { return aws_raise_error(AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE); } *ip_address = metrics->crt_info_metrics.ip_address; return AWS_OP_SUCCESS; } int aws_s3_request_metrics_get_connection_id(const struct aws_s3_request_metrics *metrics, size_t *connection_id) { AWS_PRECONDITION(metrics); AWS_PRECONDITION(connection_id); if (metrics->crt_info_metrics.connection_id == NULL) { return aws_raise_error(AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE); } *connection_id = (size_t)metrics->crt_info_metrics.connection_id; return AWS_OP_SUCCESS; } int aws_s3_request_metrics_get_thread_id(const struct aws_s3_request_metrics *metrics, aws_thread_id_t *thread_id) { AWS_PRECONDITION(metrics); AWS_PRECONDITION(thread_id); if (metrics->crt_info_metrics.thread_id == 0) { return aws_raise_error(AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE); } *thread_id = metrics->crt_info_metrics.thread_id; return AWS_OP_SUCCESS; } int aws_s3_request_metrics_get_request_stream_id(const struct aws_s3_request_metrics *metrics, uint32_t *stream_id) { AWS_PRECONDITION(metrics); AWS_PRECONDITION(stream_id); if (metrics->crt_info_metrics.stream_id == 0) { return aws_raise_error(AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE); } *stream_id = metrics->crt_info_metrics.stream_id; return AWS_OP_SUCCESS; } int aws_s3_request_metrics_get_operation_name( const struct aws_s3_request_metrics *metrics, const struct aws_string **out_operation_name) { AWS_PRECONDITION(metrics); AWS_PRECONDITION(out_operation_name); if (metrics->req_resp_info_metrics.operation_name == NULL) { return aws_raise_error(AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE); } *out_operation_name = metrics->req_resp_info_metrics.operation_name; return AWS_OP_SUCCESS; } void aws_s3_request_metrics_get_request_type( const struct aws_s3_request_metrics *metrics, enum aws_s3_request_type *out_request_type) { AWS_PRECONDITION(metrics); AWS_PRECONDITION(out_request_type); *out_request_type = metrics->req_resp_info_metrics.request_type; } int aws_s3_request_metrics_get_error_code(const struct aws_s3_request_metrics *metrics) { AWS_PRECONDITION(metrics); return metrics->crt_info_metrics.error_code; } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/source/s3_request_messages.c000066400000000000000000001332021456575232400252060ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "aws/s3/private/s3_request_messages.h" #include "aws/s3/private/s3_meta_request_impl.h" #include "aws/s3/private/s3_util.h" #include #include #include #include #include #include #include #include const struct aws_byte_cursor g_s3_create_multipart_upload_excluded_headers[] = { AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Length"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-MD5"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-copy-source"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-copy-source-range"), }; const size_t g_s3_create_multipart_upload_excluded_headers_count = AWS_ARRAY_SIZE(g_s3_create_multipart_upload_excluded_headers); const struct aws_byte_cursor g_s3_upload_part_excluded_headers[] = { AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-acl"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Cache-Control"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Disposition"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Encoding"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Language"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Length"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-MD5"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Type"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Expires"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-grant-full-control"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-grant-read"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-grant-read-acp"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-grant-write-acp"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-storage-class"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-website-redirect-location"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-aws-kms-key-id"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-context"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-bucket-key-enabled"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-tagging"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-object-lock-mode"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-object-lock-retain-until-date"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-object-lock-legal-hold"), }; const size_t g_s3_upload_part_excluded_headers_count = AWS_ARRAY_SIZE(g_s3_upload_part_excluded_headers); const struct aws_byte_cursor g_s3_complete_multipart_upload_excluded_headers[] = { AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-acl"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Cache-Control"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Disposition"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Encoding"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Language"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Length"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-MD5"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Type"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Expires"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-grant-full-control"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-grant-read"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-grant-read-acp"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-grant-write-acp"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-storage-class"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-website-redirect-location"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-customer-algorithm"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-customer-key"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-customer-key-MD5"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-aws-kms-key-id"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-context"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-bucket-key-enabled"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-tagging"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-object-lock-mode"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-object-lock-retain-until-date"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-object-lock-legal-hold"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-copy-source"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-copy-source-range"), }; const size_t g_s3_complete_multipart_upload_excluded_headers_count = AWS_ARRAY_SIZE(g_s3_complete_multipart_upload_excluded_headers); /* The server-side encryption (SSE) is needed only when the object was created using a checksum algorithm for complete * multipart upload. */ const struct aws_byte_cursor g_s3_complete_multipart_upload_with_checksum_excluded_headers[] = { AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-acl"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Cache-Control"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Disposition"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Encoding"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Language"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Length"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-MD5"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Type"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Expires"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-grant-full-control"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-grant-read"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-grant-read-acp"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-grant-write-acp"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-storage-class"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-website-redirect-location"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-aws-kms-key-id"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-context"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-bucket-key-enabled"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-tagging"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-object-lock-mode"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-object-lock-retain-until-date"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-object-lock-legal-hold"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-copy-source"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-copy-source-range"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-sdk-checksum-algorithm"), }; const struct aws_byte_cursor g_s3_list_parts_excluded_headers[] = { AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-acl"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Cache-Control"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Disposition"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Encoding"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Language"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Length"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-MD5"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Type"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Expires"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-grant-full-control"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-grant-read"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-grant-read-acp"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-grant-write-acp"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-storage-class"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-website-redirect-location"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-customer-algorithm"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-customer-key"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-customer-key-MD5"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-aws-kms-key-id"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-context"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-bucket-key-enabled"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-tagging"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-object-lock-mode"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-object-lock-retain-until-date"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-object-lock-legal-hold"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-copy-source"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-copy-source-range"), }; const size_t g_s3_list_parts_excluded_headers_count = AWS_ARRAY_SIZE(g_s3_list_parts_excluded_headers); const struct aws_byte_cursor g_s3_list_parts_with_checksum_excluded_headers[] = { AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-acl"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Cache-Control"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Disposition"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Encoding"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Language"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Length"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-MD5"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Type"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Expires"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-grant-full-control"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-grant-read"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-grant-read-acp"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-grant-write-acp"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-storage-class"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-website-redirect-location"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-aws-kms-key-id"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-context"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-bucket-key-enabled"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-tagging"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-object-lock-mode"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-object-lock-retain-until-date"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-object-lock-legal-hold"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-copy-source"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-copy-source-range"), }; const size_t g_s3_list_parts_with_checksum_excluded_headers_count = AWS_ARRAY_SIZE(g_s3_list_parts_with_checksum_excluded_headers); const struct aws_byte_cursor g_s3_abort_multipart_upload_excluded_headers[] = { AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-acl"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Cache-Control"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Disposition"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Encoding"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Language"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Length"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-MD5"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Type"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Expires"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-grant-full-control"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-grant-read"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-grant-read-acp"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-grant-write-acp"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-storage-class"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-website-redirect-location"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-customer-algorithm"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-customer-key"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-customer-key-MD5"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-aws-kms-key-id"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-context"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-bucket-key-enabled"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-tagging"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-object-lock-mode"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-object-lock-retain-until-date"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-object-lock-legal-hold"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-copy-source"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-copy-source-range"), }; static const struct aws_byte_cursor s_x_amz_meta_prefix = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-meta-"); const size_t g_s3_abort_multipart_upload_excluded_headers_count = AWS_ARRAY_SIZE(g_s3_abort_multipart_upload_excluded_headers); static void s_s3_message_util_add_range_header( uint64_t part_range_start, uint64_t part_range_end, struct aws_http_message *out_message); /* Create a new get object request from an existing get object request. Currently just adds an optional ranged header. */ struct aws_http_message *aws_s3_ranged_get_object_message_new( struct aws_allocator *allocator, struct aws_http_message *base_message, uint64_t range_start, uint64_t range_end) { AWS_PRECONDITION(allocator); AWS_PRECONDITION(base_message); struct aws_http_message *message = aws_s3_message_util_copy_http_message_no_body_all_headers(allocator, base_message); if (message == NULL) { return NULL; } s_s3_message_util_add_range_header(range_start, range_end, message); return message; } /* Creates a create-multipart-upload request from a given put objet request. */ struct aws_http_message *aws_s3_create_multipart_upload_message_new( struct aws_allocator *allocator, struct aws_http_message *base_message, enum aws_s3_checksum_algorithm algorithm) { AWS_PRECONDITION(allocator); /* For multipart upload, some headers should ONLY be in the initial create-multipart request. * Headers such as: * - SSE related headers * - user metadata (prefixed "x-amz-meta-") headers */ struct aws_http_message *message = aws_s3_message_util_copy_http_message_no_body_filter_headers( allocator, base_message, g_s3_create_multipart_upload_excluded_headers, AWS_ARRAY_SIZE(g_s3_create_multipart_upload_excluded_headers), false /*exclude_x_amz_meta*/); if (message == NULL) { return NULL; } if (aws_s3_message_util_set_multipart_request_path(allocator, NULL, 0, true, message)) { goto error_clean_up; } struct aws_http_headers *headers = aws_http_message_get_headers(message); if (headers == NULL) { goto error_clean_up; } if (aws_http_headers_erase(headers, g_content_md5_header_name)) { if (aws_last_error_or_unknown() != AWS_ERROR_HTTP_HEADER_NOT_FOUND) { goto error_clean_up; } } if (algorithm) { if (aws_http_headers_set( headers, g_create_mpu_checksum_header_name, *aws_get_create_mpu_header_name_from_algorithm(algorithm))) { goto error_clean_up; } } aws_http_message_set_request_method(message, g_post_method); aws_http_message_set_body_stream(message, NULL); return message; error_clean_up: aws_http_message_release(message); return NULL; } /* Create a new put object request from an existing put object request. Currently just optionally adds part information * for a multipart upload. */ struct aws_http_message *aws_s3_upload_part_message_new( struct aws_allocator *allocator, struct aws_http_message *base_message, struct aws_byte_buf *buffer, uint32_t part_number, const struct aws_string *upload_id, bool should_compute_content_md5, const struct checksum_config *checksum_config, struct aws_byte_buf *encoded_checksum_output) { AWS_PRECONDITION(allocator); AWS_PRECONDITION(base_message); AWS_PRECONDITION(part_number > 0); AWS_PRECONDITION(buffer); struct aws_http_message *message = aws_s3_message_util_copy_http_message_no_body_filter_headers( allocator, base_message, g_s3_upload_part_excluded_headers, AWS_ARRAY_SIZE(g_s3_upload_part_excluded_headers), true /*exclude_x_amz_meta*/); if (message == NULL) { return NULL; } if (aws_s3_message_util_set_multipart_request_path(allocator, upload_id, part_number, false, message)) { goto error_clean_up; } if (aws_s3_message_util_assign_body(allocator, buffer, message, checksum_config, encoded_checksum_output) == NULL) { goto error_clean_up; } if (should_compute_content_md5) { if (!checksum_config || checksum_config->location == AWS_SCL_NONE) { /* MD5 will be skipped if flexible checksum used */ if (aws_s3_message_util_add_content_md5_header(allocator, buffer, message)) { goto error_clean_up; } } } return message; error_clean_up: aws_http_message_release(message); return NULL; } struct aws_http_message *aws_s3_upload_part_copy_message_new( struct aws_allocator *allocator, struct aws_http_message *base_message, struct aws_byte_buf *buffer, uint32_t part_number, uint64_t range_start, uint64_t range_end, const struct aws_string *upload_id, bool should_compute_content_md5) { AWS_PRECONDITION(allocator); AWS_PRECONDITION(base_message); AWS_PRECONDITION(part_number > 0); struct aws_http_message *message = aws_s3_message_util_copy_http_message_no_body_filter_headers( allocator, base_message, g_s3_upload_part_excluded_headers, AWS_ARRAY_SIZE(g_s3_upload_part_excluded_headers), true /*exclude_x_amz_meta*/); if (message == NULL) { goto error_clean_up; } if (aws_s3_message_util_set_multipart_request_path(allocator, upload_id, part_number, false, message)) { goto error_clean_up; } if (buffer != NULL) { /* part copy does not have a ChecksumAlgorithm member, it will use the same algorithm as the create * multipart upload request specifies */ if (aws_s3_message_util_assign_body( allocator, buffer, message, NULL /* checksum_config */, NULL /* out_checksum */) == NULL) { goto error_clean_up; } if (should_compute_content_md5) { if (aws_s3_message_util_add_content_md5_header(allocator, buffer, message)) { goto error_clean_up; } } } char source_range[1024]; snprintf(source_range, sizeof(source_range), "bytes=%" PRIu64 "-%" PRIu64, range_start, range_end); struct aws_http_header source_range_header = { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-copy-source-range"), .value = aws_byte_cursor_from_c_str(source_range), }; struct aws_http_headers *headers = aws_http_message_get_headers(message); aws_http_headers_add_header(headers, &source_range_header); return message; error_clean_up: if (message != NULL) { aws_http_message_release(message); message = NULL; } return NULL; } static const struct aws_byte_cursor s_slash_char = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/"); /** * For the CopyObject operation, create the initial HEAD message to retrieve the size of the copy source. */ struct aws_http_message *aws_s3_get_source_object_size_message_new( struct aws_allocator *allocator, struct aws_http_message *base_message) { struct aws_http_message *message = NULL; struct aws_byte_buf head_object_host_header; AWS_ZERO_STRUCT(head_object_host_header); AWS_PRECONDITION(allocator); /* Find the x-amz-copy-source header, to extract source bucket/key information. */ struct aws_http_headers *headers = aws_http_message_get_headers(base_message); if (!headers) { AWS_LOGF_ERROR(AWS_LS_S3_GENERAL, "CopyRequest is missing headers"); return NULL; } struct aws_byte_cursor source_header; const struct aws_byte_cursor copy_source_header = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-copy-source"); if (aws_http_headers_get(headers, copy_source_header, &source_header) != AWS_OP_SUCCESS) { AWS_LOGF_ERROR(AWS_LS_S3_GENERAL, "CopyRequest is missing the x-amz-copy-source header"); return NULL; } struct aws_byte_cursor host; if (aws_http_headers_get(headers, g_host_header_name, &host) != AWS_OP_SUCCESS) { AWS_LOGF_ERROR(AWS_LS_S3_GENERAL, "CopyRequest is missing the Host header"); return NULL; } struct aws_byte_cursor request_path = source_header; /* Skip optional leading slash. */ if (aws_byte_cursor_starts_with(&request_path, &s_slash_char)) { aws_byte_cursor_advance(&request_path, 1); } /* From this point forward, the format is {bucket}/{key} - split components.*/ struct aws_byte_cursor source_bucket = {0}; if (aws_byte_cursor_next_split(&request_path, '/', &source_bucket)) { aws_byte_cursor_advance(&request_path, source_bucket.len); } if (source_bucket.len == 0 || request_path.len == 0) { AWS_LOGF_ERROR( AWS_LS_S3_GENERAL, "CopyRequest x-amz-copy-source header does not follow expected bucket/key format: " PRInSTR, AWS_BYTE_CURSOR_PRI(source_header)); goto error_cleanup; } if (aws_byte_buf_init_copy_from_cursor(&head_object_host_header, allocator, source_bucket)) { goto error_cleanup; } /* Reuse the domain name from the original Host header for the HEAD request. * TODO: following code works by replacing bucket name in the host with the * source bucket name. this only works for virtual host endpoints and has a * slew of other issues, like not supporting source in a different region. * This covers common case, but we need to rethink how we can support all * cases in general. */ struct aws_byte_cursor domain_name; const struct aws_byte_cursor dot = aws_byte_cursor_from_c_str("."); if (aws_byte_cursor_find_exact(&host, &dot, &domain_name)) { AWS_LOGF_ERROR(AWS_LS_S3_GENERAL, "CopyRequest Host header not in FQDN format"); goto error_cleanup; } if (aws_byte_buf_append_dynamic(&head_object_host_header, &domain_name)) { goto error_cleanup; } message = aws_http_message_new_request(allocator); if (message == NULL) { goto error_cleanup; } if (aws_http_message_set_request_method(message, g_head_method)) { goto error_cleanup; } struct aws_http_header host_header = { .name = g_host_header_name, .value = aws_byte_cursor_from_buf(&head_object_host_header), }; if (aws_http_message_add_header(message, host_header)) { goto error_cleanup; } if (aws_http_message_set_request_path(message, request_path)) { goto error_cleanup; } aws_byte_buf_clean_up(&head_object_host_header); return message; error_cleanup: aws_byte_buf_clean_up(&head_object_host_header); aws_http_message_release(message); return NULL; } static const struct aws_byte_cursor s_complete_payload_begin = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL( "\n" "\n"); static const struct aws_byte_cursor s_complete_payload_end = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(""); static const struct aws_byte_cursor s_part_section_string_0 = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(" \n" " "); static const struct aws_byte_cursor s_part_section_string_1 = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\n" " "); static const struct aws_byte_cursor s_close_part_number_tag = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\n"); static const struct aws_byte_cursor s_close_part_tag = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(" \n"); static const struct aws_byte_cursor s_open_start_bracket = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(" <"); static const struct aws_byte_cursor s_open_end_bracket = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(""); static const struct aws_byte_cursor s_close_bracket_new_line = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(">\n"); /* Create a complete-multipart message, which includes an XML payload of all completed parts. */ struct aws_http_message *aws_s3_complete_multipart_message_new( struct aws_allocator *allocator, struct aws_http_message *base_message, struct aws_byte_buf *body_buffer, const struct aws_string *upload_id, const struct aws_array_list *parts, enum aws_s3_checksum_algorithm algorithm) { AWS_PRECONDITION(allocator); AWS_PRECONDITION(base_message); AWS_PRECONDITION(body_buffer); AWS_PRECONDITION(upload_id); AWS_PRECONDITION(parts); const struct aws_byte_cursor *mpu_algorithm_checksum_name = aws_get_complete_mpu_name_from_algorithm(algorithm); struct aws_http_message *message = NULL; if (algorithm == AWS_SCA_NONE) { /* We don't need to worry about the pre-calculated checksum from user as for multipart upload, only way to * calculate checksum is from client. */ message = aws_s3_message_util_copy_http_message_no_body_filter_headers( allocator, base_message, g_s3_complete_multipart_upload_excluded_headers, AWS_ARRAY_SIZE(g_s3_complete_multipart_upload_excluded_headers), true /*exclude_x_amz_meta*/); } else { message = aws_s3_message_util_copy_http_message_no_body_filter_headers( allocator, base_message, g_s3_complete_multipart_upload_with_checksum_excluded_headers, AWS_ARRAY_SIZE(g_s3_complete_multipart_upload_with_checksum_excluded_headers), true /*exclude_x_amz_meta*/); } struct aws_http_headers *headers = NULL; if (message == NULL) { goto error_clean_up; } if (aws_s3_message_util_set_multipart_request_path(allocator, upload_id, 0, false, message)) { goto error_clean_up; } aws_http_message_set_request_method(message, g_post_method); headers = aws_http_message_get_headers(message); if (headers == NULL) { goto error_clean_up; } /* Create XML payload with all the etags of finished parts */ { aws_byte_buf_reset(body_buffer, false); if (aws_byte_buf_append_dynamic(body_buffer, &s_complete_payload_begin)) { goto error_clean_up; } for (size_t part_index = 0; part_index < aws_array_list_length(parts); ++part_index) { struct aws_s3_mpu_part_info *part = NULL; aws_array_list_get_at(parts, &part, part_index); AWS_FATAL_ASSERT(part != NULL); if (aws_byte_buf_append_dynamic(body_buffer, &s_part_section_string_0)) { goto error_clean_up; } struct aws_byte_cursor etag_byte_cursor = aws_byte_cursor_from_string(part->etag); if (aws_byte_buf_append_dynamic(body_buffer, &etag_byte_cursor)) { goto error_clean_up; } if (aws_byte_buf_append_dynamic(body_buffer, &s_part_section_string_1)) { goto error_clean_up; } char part_number_buffer[32] = ""; int part_number = (int)(part_index + 1); int part_number_num_char = snprintf(part_number_buffer, sizeof(part_number_buffer), "%d", part_number); struct aws_byte_cursor part_number_byte_cursor = aws_byte_cursor_from_array(part_number_buffer, part_number_num_char); if (aws_byte_buf_append_dynamic(body_buffer, &part_number_byte_cursor)) { goto error_clean_up; } if (aws_byte_buf_append_dynamic(body_buffer, &s_close_part_number_tag)) { goto error_clean_up; } if (mpu_algorithm_checksum_name) { struct aws_byte_cursor checksum = aws_byte_cursor_from_buf(&part->checksum_base64); if (aws_byte_buf_append_dynamic(body_buffer, &s_open_start_bracket)) { goto error_clean_up; } if (aws_byte_buf_append_dynamic(body_buffer, mpu_algorithm_checksum_name)) { goto error_clean_up; } if (aws_byte_buf_append_dynamic(body_buffer, &s_close_bracket)) { goto error_clean_up; } if (aws_byte_buf_append_dynamic(body_buffer, &checksum)) { goto error_clean_up; } if (aws_byte_buf_append_dynamic(body_buffer, &s_open_end_bracket)) { goto error_clean_up; } if (aws_byte_buf_append_dynamic(body_buffer, mpu_algorithm_checksum_name)) { goto error_clean_up; } if (aws_byte_buf_append_dynamic(body_buffer, &s_close_bracket_new_line)) { goto error_clean_up; } } if (aws_byte_buf_append_dynamic(body_buffer, &s_close_part_tag)) { goto error_clean_up; } } if (aws_byte_buf_append_dynamic(body_buffer, &s_complete_payload_end)) { goto error_clean_up; } aws_s3_message_util_assign_body( allocator, body_buffer, message, NULL /* checksum_config */, NULL /* out_checksum */); } return message; error_clean_up: AWS_LOGF_ERROR(AWS_LS_S3_GENERAL, "Could not create complete multipart message"); if (message != NULL) { aws_http_message_release(message); message = NULL; } return NULL; } struct aws_http_message *aws_s3_abort_multipart_upload_message_new( struct aws_allocator *allocator, struct aws_http_message *base_message, const struct aws_string *upload_id) { struct aws_http_message *message = aws_s3_message_util_copy_http_message_no_body_filter_headers( allocator, base_message, g_s3_abort_multipart_upload_excluded_headers, AWS_ARRAY_SIZE(g_s3_abort_multipart_upload_excluded_headers), true /*exclude_x_amz_meta*/); if (aws_s3_message_util_set_multipart_request_path(allocator, upload_id, 0, false, message)) { goto error_clean_up; } aws_http_message_set_request_method(message, g_delete_method); return message; error_clean_up: AWS_LOGF_ERROR(AWS_LS_S3_GENERAL, "Could not create abort multipart upload message"); if (message != NULL) { aws_http_message_release(message); message = NULL; } return NULL; } /* Assign a buffer to an HTTP message, creating a stream and setting the content-length header */ struct aws_input_stream *aws_s3_message_util_assign_body( struct aws_allocator *allocator, struct aws_byte_buf *byte_buf, struct aws_http_message *out_message, const struct checksum_config *checksum_config, struct aws_byte_buf *out_checksum) { AWS_PRECONDITION(allocator); AWS_PRECONDITION(out_message); AWS_PRECONDITION(byte_buf); struct aws_byte_cursor buffer_byte_cursor = aws_byte_cursor_from_buf(byte_buf); struct aws_http_headers *headers = aws_http_message_get_headers(out_message); if (headers == NULL) { return NULL; } struct aws_input_stream *input_stream = aws_input_stream_new_from_cursor(allocator, &buffer_byte_cursor); struct aws_byte_buf content_encoding_header_buf; AWS_ZERO_STRUCT(content_encoding_header_buf); if (input_stream == NULL) { goto error_clean_up; } if (checksum_config) { if (checksum_config->location == AWS_SCL_TRAILER) { /* aws-chunked encode the payload and add related headers */ /* set Content-Encoding header. If the header already exists, append the exisiting value to aws-chunked * We already made sure that the existing value is not 'aws_chunked' in 'aws_s3_client_make_meta_request' * function. */ struct aws_byte_cursor content_encoding_header_cursor; bool has_content_encoding_header = aws_http_headers_get(headers, g_content_encoding_header_name, &content_encoding_header_cursor) == AWS_OP_SUCCESS; size_t content_encoding_header_buf_size = has_content_encoding_header ? g_content_encoding_header_aws_chunked.len + content_encoding_header_cursor.len + 1 : g_content_encoding_header_aws_chunked.len; aws_byte_buf_init(&content_encoding_header_buf, allocator, content_encoding_header_buf_size); if (has_content_encoding_header) { aws_byte_buf_append_dynamic(&content_encoding_header_buf, &content_encoding_header_cursor); aws_byte_buf_append_byte_dynamic(&content_encoding_header_buf, ','); } aws_byte_buf_append_dynamic(&content_encoding_header_buf, &g_content_encoding_header_aws_chunked); if (aws_http_headers_set( headers, g_content_encoding_header_name, aws_byte_cursor_from_buf(&content_encoding_header_buf))) { goto error_clean_up; } /* set x-amz-trailer header */ if (aws_http_headers_set( headers, g_trailer_header_name, *aws_get_http_header_name_from_algorithm(checksum_config->checksum_algorithm))) { goto error_clean_up; } /* set x-amz-decoded-content-length header */ char decoded_content_length_buffer[64] = ""; snprintf( decoded_content_length_buffer, sizeof(decoded_content_length_buffer), "%" PRIu64, (uint64_t)buffer_byte_cursor.len); struct aws_byte_cursor decode_content_length_cursor = aws_byte_cursor_from_array(decoded_content_length_buffer, strlen(decoded_content_length_buffer)); if (aws_http_headers_set(headers, g_decoded_content_length_header_name, decode_content_length_cursor)) { goto error_clean_up; } /* set input stream to chunk stream */ struct aws_input_stream *chunk_stream = aws_chunk_stream_new(allocator, input_stream, checksum_config->checksum_algorithm, out_checksum); if (!chunk_stream) { goto error_clean_up; } aws_input_stream_release(input_stream); input_stream = chunk_stream; } } int64_t stream_length = 0; if (aws_input_stream_get_length(input_stream, &stream_length)) { goto error_clean_up; } char content_length_buffer[64] = ""; snprintf(content_length_buffer, sizeof(content_length_buffer), "%" PRIu64, (uint64_t)stream_length); struct aws_byte_cursor content_length_cursor = aws_byte_cursor_from_array(content_length_buffer, strlen(content_length_buffer)); if (aws_http_headers_set(headers, g_content_length_header_name, content_length_cursor)) { goto error_clean_up; } aws_http_message_set_body_stream(out_message, input_stream); /* Let the message take the full ownership */ aws_input_stream_release(input_stream); aws_byte_buf_clean_up(&content_encoding_header_buf); return input_stream; error_clean_up: AWS_LOGF_ERROR(AWS_LS_S3_CLIENT, "Failed to assign body for s3 request http message, from body buffer ."); aws_input_stream_release(input_stream); aws_byte_buf_clean_up(&content_encoding_header_buf); return NULL; } bool aws_s3_message_util_check_checksum_header(struct aws_http_message *message) { struct aws_http_headers *headers = aws_http_message_get_headers(message); for (int algorithm = AWS_SCA_INIT; algorithm <= AWS_SCA_END; algorithm++) { const struct aws_byte_cursor *algorithm_header_name = aws_get_http_header_name_from_algorithm(algorithm); if (aws_http_headers_has(headers, *algorithm_header_name)) { return true; } } return false; } /* Add a content-md5 header. */ int aws_s3_message_util_add_content_md5_header( struct aws_allocator *allocator, struct aws_byte_buf *input_buf, struct aws_http_message *out_message) { AWS_PRECONDITION(out_message); /* Compute MD5 */ struct aws_byte_cursor md5_input = aws_byte_cursor_from_buf(input_buf); uint8_t md5_output[AWS_MD5_LEN]; struct aws_byte_buf md5_output_buf = aws_byte_buf_from_empty_array(md5_output, sizeof(md5_output)); if (aws_md5_compute(allocator, &md5_input, &md5_output_buf, 0)) { return AWS_OP_ERR; } /* Compute Base64 encoding of MD5 */ struct aws_byte_cursor base64_input = aws_byte_cursor_from_buf(&md5_output_buf); size_t base64_output_size = 0; if (aws_base64_compute_encoded_len(md5_output_buf.len, &base64_output_size)) { return AWS_OP_ERR; } struct aws_byte_buf base64_output_buf; if (aws_byte_buf_init(&base64_output_buf, allocator, base64_output_size)) { return AWS_OP_ERR; } if (aws_base64_encode(&base64_input, &base64_output_buf)) { goto error_clean_up; } struct aws_http_headers *headers = aws_http_message_get_headers(out_message); if (aws_http_headers_set(headers, g_content_md5_header_name, aws_byte_cursor_from_buf(&base64_output_buf))) { goto error_clean_up; } aws_byte_buf_clean_up(&base64_output_buf); return AWS_OP_SUCCESS; error_clean_up: aws_byte_buf_clean_up(&base64_output_buf); return AWS_OP_ERR; } /* Copy an existing HTTP message's headers, method, and path. */ struct aws_http_message *aws_s3_message_util_copy_http_message_no_body_all_headers( struct aws_allocator *allocator, struct aws_http_message *base_message) { return aws_s3_message_util_copy_http_message_no_body_filter_headers(allocator, base_message, NULL, 0, false); } struct aws_http_message *aws_s3_message_util_copy_http_message_no_body_filter_headers( struct aws_allocator *allocator, struct aws_http_message *base_message, const struct aws_byte_cursor *excluded_header_array, size_t excluded_header_array_size, bool exclude_x_amz_meta) { AWS_PRECONDITION(allocator); AWS_PRECONDITION(base_message); struct aws_http_message *message = aws_http_message_new_request(allocator); AWS_ASSERT(message); struct aws_byte_cursor request_method; if (aws_http_message_get_request_method(base_message, &request_method)) { AWS_LOGF_ERROR(AWS_LS_S3_CLIENT, "Failed to get request method."); goto error_clean_up; } if (aws_http_message_set_request_method(message, request_method)) { goto error_clean_up; } struct aws_byte_cursor request_path; if (aws_http_message_get_request_path(base_message, &request_path)) { AWS_LOGF_ERROR(AWS_LS_S3_CLIENT, "Failed to get request path."); goto error_clean_up; } if (aws_http_message_set_request_path(message, request_path)) { goto error_clean_up; } aws_s3_message_util_copy_headers( base_message, message, excluded_header_array, excluded_header_array_size, exclude_x_amz_meta); return message; error_clean_up: aws_http_message_release(message); return NULL; } void aws_s3_message_util_copy_headers( struct aws_http_message *source_message, struct aws_http_message *dest_message, const struct aws_byte_cursor *excluded_header_array, size_t excluded_header_array_size, bool exclude_x_amz_meta) { size_t num_headers = aws_http_message_get_header_count(source_message); for (size_t header_index = 0; header_index < num_headers; ++header_index) { struct aws_http_header header; int error = aws_http_message_get_header(source_message, &header, header_index); if (excluded_header_array && excluded_header_array_size > 0) { bool exclude_header = false; for (size_t exclude_index = 0; exclude_index < excluded_header_array_size; ++exclude_index) { if (aws_byte_cursor_eq_ignore_case(&header.name, &excluded_header_array[exclude_index])) { exclude_header = true; break; } } if (exclude_header) { continue; } } if (exclude_x_amz_meta) { if (aws_byte_cursor_starts_with_ignore_case(&header.name, &s_x_amz_meta_prefix)) { continue; } } error |= aws_http_message_add_header(dest_message, header); (void)error; AWS_ASSERT(!error); } } /* Add a range header.*/ static void s_s3_message_util_add_range_header( uint64_t part_range_start, uint64_t part_range_end, struct aws_http_message *out_message) { AWS_PRECONDITION(out_message); /* ((2^64)-1 = 20 characters; 2*20 + length-of("bytes=-") < 128) */ char range_value_buffer[128] = ""; snprintf( range_value_buffer, sizeof(range_value_buffer), "bytes=%" PRIu64 "-%" PRIu64, part_range_start, part_range_end); struct aws_http_header range_header; AWS_ZERO_STRUCT(range_header); range_header.name = g_range_header_name; range_header.value = aws_byte_cursor_from_c_str(range_value_buffer); struct aws_http_headers *headers = aws_http_message_get_headers(out_message); AWS_ASSERT(headers != NULL); int erase_result = aws_http_headers_erase(headers, range_header.name); AWS_ASSERT(erase_result == AWS_OP_SUCCESS || aws_last_error() == AWS_ERROR_HTTP_HEADER_NOT_FOUND); /* Only failed when the header has invalid name, which is impossible here. */ erase_result = aws_http_message_add_header(out_message, range_header); AWS_ASSERT(erase_result == AWS_OP_SUCCESS); (void)erase_result; } /* Handle setting up the multipart request path for a message. */ int aws_s3_message_util_set_multipart_request_path( struct aws_allocator *allocator, const struct aws_string *upload_id, uint32_t part_number, bool append_uploads_suffix, struct aws_http_message *message) { const struct aws_byte_cursor question_mark = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("?"); const struct aws_byte_cursor ampersand = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("&"); const struct aws_byte_cursor uploads_suffix = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("uploads"); const struct aws_byte_cursor part_number_arg = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("partNumber="); const struct aws_byte_cursor upload_id_arg = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("uploadId="); struct aws_byte_buf request_path_buf; struct aws_byte_cursor request_path; if (aws_http_message_get_request_path(message, &request_path)) { return AWS_OP_ERR; } if (aws_byte_buf_init(&request_path_buf, allocator, request_path.len)) { return AWS_OP_ERR; } if (aws_byte_buf_append_dynamic(&request_path_buf, &request_path)) { goto error_clean_up; } bool has_existing_query_parameters = false; for (size_t i = 0; i < request_path.len; ++i) { if (request_path.ptr[i] == '?') { has_existing_query_parameters = true; break; } } if (part_number > 0) { if (aws_byte_buf_append_dynamic( &request_path_buf, has_existing_query_parameters ? &ersand : &question_mark)) { goto error_clean_up; } if (aws_byte_buf_append_dynamic(&request_path_buf, &part_number_arg)) { goto error_clean_up; } char part_number_buffer[32] = ""; snprintf(part_number_buffer, sizeof(part_number_buffer), "%d", part_number); struct aws_byte_cursor part_number_cursor = aws_byte_cursor_from_array(part_number_buffer, strlen(part_number_buffer)); if (aws_byte_buf_append_dynamic(&request_path_buf, &part_number_cursor)) { goto error_clean_up; } has_existing_query_parameters = true; } if (upload_id != NULL) { struct aws_byte_cursor upload_id_cursor = aws_byte_cursor_from_string(upload_id); if (aws_byte_buf_append_dynamic( &request_path_buf, has_existing_query_parameters ? &ersand : &question_mark)) { goto error_clean_up; } if (aws_byte_buf_append_dynamic(&request_path_buf, &upload_id_arg)) { goto error_clean_up; } if (aws_byte_buf_append_dynamic(&request_path_buf, &upload_id_cursor)) { goto error_clean_up; } has_existing_query_parameters = true; } if (append_uploads_suffix) { if (aws_byte_buf_append_dynamic( &request_path_buf, has_existing_query_parameters ? &ersand : &question_mark)) { goto error_clean_up; } if (aws_byte_buf_append_dynamic(&request_path_buf, &uploads_suffix)) { goto error_clean_up; } has_existing_query_parameters = true; } struct aws_byte_cursor new_request_path = aws_byte_cursor_from_buf(&request_path_buf); if (aws_http_message_set_request_path(message, new_request_path)) { goto error_clean_up; } aws_byte_buf_clean_up(&request_path_buf); return AWS_OP_SUCCESS; error_clean_up: aws_byte_buf_clean_up(&request_path_buf); return AWS_OP_ERR; } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/source/s3_util.c000066400000000000000000000712611456575232400226120ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "aws/s3/private/s3_util.h" #include "aws/s3/private/s3_client_impl.h" #include "aws/s3/private/s3_meta_request_impl.h" #include "aws/s3/private/s3_request.h" #include #include #include #include #include #include #include #ifdef _MSC_VER /* sscanf warning (not currently scanning for strings) */ # pragma warning(disable : 4996) #endif const struct aws_byte_cursor g_s3_client_version = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(AWS_S3_CLIENT_VERSION); const struct aws_byte_cursor g_s3_service_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("s3"); const struct aws_byte_cursor g_s3express_service_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("s3express"); const struct aws_byte_cursor g_host_header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Host"); const struct aws_byte_cursor g_range_header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Range"); const struct aws_byte_cursor g_if_match_header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("If-Match"); const struct aws_byte_cursor g_request_id_header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-request-id"); const struct aws_byte_cursor g_etag_header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("ETag"); const struct aws_byte_cursor g_content_range_header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Range"); const struct aws_byte_cursor g_content_type_header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Type"); const struct aws_byte_cursor g_content_encoding_header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Encoding"); const struct aws_byte_cursor g_content_encoding_header_aws_chunked = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("aws-chunked"); const struct aws_byte_cursor g_content_length_header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Length"); const struct aws_byte_cursor g_decoded_content_length_header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-decoded-content-length"); const struct aws_byte_cursor g_content_md5_header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-MD5"); const struct aws_byte_cursor g_trailer_header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-trailer"); const struct aws_byte_cursor g_request_validation_mode = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-checksum-mode"); const struct aws_byte_cursor g_enabled = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("enabled"); const struct aws_byte_cursor g_create_mpu_checksum_header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-checksum-algorithm"); const struct aws_byte_cursor g_crc32c_header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-checksum-crc32c"); const struct aws_byte_cursor g_crc32_header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-checksum-crc32"); const struct aws_byte_cursor g_sha1_header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-checksum-sha1"); const struct aws_byte_cursor g_sha256_header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-checksum-sha256"); const struct aws_byte_cursor g_crc32c_create_mpu_header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("CRC32C"); const struct aws_byte_cursor g_crc32_create_mpu_header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("CRC32"); const struct aws_byte_cursor g_sha1_create_mpu_header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("SHA1"); const struct aws_byte_cursor g_sha256_create_mpu_header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("SHA256"); const struct aws_byte_cursor g_crc32c_complete_mpu_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("ChecksumCRC32C"); const struct aws_byte_cursor g_crc32_complete_mpu_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("ChecksumCRC32"); const struct aws_byte_cursor g_sha1_complete_mpu_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("ChecksumSHA1"); const struct aws_byte_cursor g_sha256_complete_mpu_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("ChecksumSHA256"); const struct aws_byte_cursor g_accept_ranges_header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("accept-ranges"); const struct aws_byte_cursor g_acl_header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-acl"); const struct aws_byte_cursor g_mp_parts_count_header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-mp-parts-count"); const struct aws_byte_cursor g_post_method = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("POST"); const struct aws_byte_cursor g_head_method = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("HEAD"); const struct aws_byte_cursor g_delete_method = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("DELETE"); const struct aws_byte_cursor g_user_agent_header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("User-Agent"); const struct aws_byte_cursor g_user_agent_header_product_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("CRTS3NativeClient"); const uint32_t g_s3_max_num_upload_parts = 10000; const size_t g_s3_min_upload_part_size = MB_TO_BYTES(5); const char *aws_s3_request_type_operation_name(enum aws_s3_request_type type) { switch (type) { case AWS_S3_REQUEST_TYPE_HEAD_OBJECT: return "HeadObject"; case AWS_S3_REQUEST_TYPE_GET_OBJECT: return "GetObject"; case AWS_S3_REQUEST_TYPE_LIST_PARTS: return "ListParts"; case AWS_S3_REQUEST_TYPE_CREATE_MULTIPART_UPLOAD: return "CreateMultipartUpload"; case AWS_S3_REQUEST_TYPE_UPLOAD_PART: return "UploadPart"; case AWS_S3_REQUEST_TYPE_ABORT_MULTIPART_UPLOAD: return "AbortMultipartUpload"; case AWS_S3_REQUEST_TYPE_COMPLETE_MULTIPART_UPLOAD: return "CompleteMultipartUpload"; case AWS_S3_REQUEST_TYPE_UPLOAD_PART_COPY: return "UploadPartCopy"; case AWS_S3_REQUEST_TYPE_COPY_OBJECT: return "CopyObject"; case AWS_S3_REQUEST_TYPE_PUT_OBJECT: return "PutObject"; default: return ""; } } void copy_http_headers(const struct aws_http_headers *src, struct aws_http_headers *dest) { AWS_PRECONDITION(src); AWS_PRECONDITION(dest); size_t headers_count = aws_http_headers_count(src); for (size_t header_index = 0; header_index < headers_count; ++header_index) { struct aws_http_header header; aws_http_headers_get_index(src, header_index, &header); aws_http_headers_set(dest, header.name, header.value); } } /* user_data for XML traversal */ struct xml_get_body_at_path_traversal { struct aws_allocator *allocator; const char **path_name_array; size_t path_name_count; size_t path_name_i; struct aws_byte_cursor *out_body; bool found_node; }; static int s_xml_get_body_at_path_on_node(struct aws_xml_node *node, void *user_data) { struct xml_get_body_at_path_traversal *traversal = user_data; /* if we already found what we're looking for, just finish parsing */ if (traversal->found_node) { return AWS_OP_SUCCESS; } /* check if this node is on the path */ struct aws_byte_cursor node_name = aws_xml_node_get_name(node); const char *expected_name = traversal->path_name_array[traversal->path_name_i]; if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, expected_name)) { bool is_final_node_on_path = traversal->path_name_i + 1 == traversal->path_name_count; if (is_final_node_on_path) { /* retrieve the body */ if (aws_xml_node_as_body(node, traversal->out_body) != AWS_OP_SUCCESS) { return AWS_OP_ERR; } traversal->found_node = true; return AWS_OP_SUCCESS; } else { /* node is on path, but it's not the final node, so traverse its children */ traversal->path_name_i++; if (aws_xml_node_traverse(node, s_xml_get_body_at_path_on_node, traversal) != AWS_OP_SUCCESS) { return AWS_OP_ERR; } traversal->path_name_i--; return AWS_OP_SUCCESS; } } else { /* this node is not on the path, continue parsing siblings */ return AWS_OP_SUCCESS; } } int aws_xml_get_body_at_path( struct aws_allocator *allocator, struct aws_byte_cursor xml_doc, const char **path_name_array, struct aws_byte_cursor *out_body) { struct xml_get_body_at_path_traversal traversal = { .allocator = allocator, .path_name_array = path_name_array, .path_name_count = 0, .out_body = out_body, }; /* find path_name_count */ while (path_name_array[traversal.path_name_count] != NULL) { traversal.path_name_count++; AWS_ASSERT(traversal.path_name_count < 4); /* sanity check, increase cap if necessary */ } AWS_ASSERT(traversal.path_name_count > 0); /* parse XML */ struct aws_xml_parser_options parse_options = { .doc = xml_doc, .on_root_encountered = s_xml_get_body_at_path_on_node, .user_data = &traversal, }; if (aws_xml_parse(allocator, &parse_options)) { goto error; } if (!traversal.found_node) { aws_raise_error(AWS_ERROR_STRING_MATCH_NOT_FOUND); goto error; } return AWS_OP_SUCCESS; error: AWS_ZERO_STRUCT(*out_body); return AWS_OP_ERR; } struct aws_cached_signing_config_aws *aws_cached_signing_config_new( struct aws_s3_client *client, const struct aws_signing_config_aws *signing_config) { AWS_PRECONDITION(client); AWS_PRECONDITION(signing_config); struct aws_allocator *allocator = client->allocator; struct aws_cached_signing_config_aws *cached_signing_config = aws_mem_calloc(allocator, 1, sizeof(struct aws_cached_signing_config_aws)); cached_signing_config->allocator = allocator; cached_signing_config->config.config_type = signing_config->config_type ? signing_config->config_type : AWS_SIGNING_CONFIG_AWS; AWS_ASSERT(aws_byte_cursor_is_valid(&signing_config->region)); if (signing_config->region.len > 0) { cached_signing_config->region = aws_string_new_from_cursor(allocator, &signing_config->region); } else { /* Fall back to client region. */ cached_signing_config->region = aws_string_new_from_string(allocator, client->region); } cached_signing_config->config.region = aws_byte_cursor_from_string(cached_signing_config->region); if (signing_config->service.len > 0) { cached_signing_config->service = aws_string_new_from_cursor(allocator, &signing_config->service); cached_signing_config->config.service = aws_byte_cursor_from_string(cached_signing_config->service); } else { cached_signing_config->config.service = g_s3_service_name; } cached_signing_config->config.date = signing_config->date; AWS_ASSERT(aws_byte_cursor_is_valid(&signing_config->signed_body_value)); if (signing_config->signed_body_value.len > 0) { cached_signing_config->signed_body_value = aws_string_new_from_cursor(allocator, &signing_config->signed_body_value); cached_signing_config->config.signed_body_value = aws_byte_cursor_from_string(cached_signing_config->signed_body_value); } else { cached_signing_config->config.signed_body_value = g_aws_signed_body_value_unsigned_payload; } if (signing_config->credentials != NULL) { aws_credentials_acquire(signing_config->credentials); cached_signing_config->config.credentials = signing_config->credentials; } if (signing_config->credentials_provider != NULL) { aws_credentials_provider_acquire(signing_config->credentials_provider); cached_signing_config->config.credentials_provider = signing_config->credentials_provider; } /* Configs default to Zero. */ cached_signing_config->config.algorithm = signing_config->algorithm; cached_signing_config->config.signature_type = signing_config->signature_type; /* TODO: you don't have a way to override this config as the other option is zero. But, you cannot really use the * other value, as it is always required. */ cached_signing_config->config.signed_body_header = AWS_SBHT_X_AMZ_CONTENT_SHA256; cached_signing_config->config.should_sign_header = signing_config->should_sign_header; /* It's the user's responsibility to keep the user data around */ cached_signing_config->config.should_sign_header_ud = signing_config->should_sign_header_ud; cached_signing_config->config.flags = signing_config->flags; cached_signing_config->config.expiration_in_seconds = signing_config->expiration_in_seconds; return cached_signing_config; } void aws_cached_signing_config_destroy(struct aws_cached_signing_config_aws *cached_signing_config) { if (cached_signing_config == NULL) { return; } aws_credentials_release(cached_signing_config->config.credentials); aws_credentials_provider_release(cached_signing_config->config.credentials_provider); aws_string_destroy(cached_signing_config->service); aws_string_destroy(cached_signing_config->region); aws_string_destroy(cached_signing_config->signed_body_value); aws_mem_release(cached_signing_config->allocator, cached_signing_config); } void aws_s3_init_default_signing_config( struct aws_signing_config_aws *signing_config, const struct aws_byte_cursor region, struct aws_credentials_provider *credentials_provider) { AWS_PRECONDITION(signing_config); AWS_PRECONDITION(credentials_provider); AWS_ZERO_STRUCT(*signing_config); signing_config->config_type = AWS_SIGNING_CONFIG_AWS; signing_config->algorithm = AWS_SIGNING_ALGORITHM_V4; signing_config->credentials_provider = credentials_provider; signing_config->region = region; signing_config->service = g_s3_service_name; signing_config->signed_body_header = AWS_SBHT_X_AMZ_CONTENT_SHA256; signing_config->signed_body_value = g_aws_signed_body_value_unsigned_payload; } static struct aws_byte_cursor s_quote_entity_literal = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("""); static struct aws_byte_cursor s_quote_literal = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\""); struct aws_byte_buf aws_replace_quote_entities(struct aws_allocator *allocator, struct aws_byte_cursor src) { struct aws_byte_buf out_buf; aws_byte_buf_init(&out_buf, allocator, src.len); for (size_t i = 0; i < src.len; ++i) { size_t chars_remaining = src.len - i; if (chars_remaining >= s_quote_entity_literal.len && !strncmp((const char *)&src.ptr[i], (const char *)s_quote_entity_literal.ptr, s_quote_entity_literal.len)) { /* Append quote */ aws_byte_buf_append(&out_buf, &s_quote_literal); i += s_quote_entity_literal.len - 1; } else { /* Append character */ struct aws_byte_cursor character_cursor = aws_byte_cursor_from_array(&src.ptr[i], 1); aws_byte_buf_append(&out_buf, &character_cursor); } } return out_buf; } struct aws_string *aws_strip_quotes(struct aws_allocator *allocator, struct aws_byte_cursor in_cur) { if (in_cur.len >= 2 && in_cur.ptr[0] == '"' && in_cur.ptr[in_cur.len - 1] == '"') { aws_byte_cursor_advance(&in_cur, 1); --in_cur.len; } return aws_string_new_from_cursor(allocator, &in_cur); } int aws_last_error_or_unknown(void) { int error = aws_last_error(); AWS_ASSERT(error != AWS_ERROR_SUCCESS); /* Someone forgot to call aws_raise_error() */ if (error == AWS_ERROR_SUCCESS) { return AWS_ERROR_UNKNOWN; } return error; } void aws_s3_add_user_agent_header(struct aws_allocator *allocator, struct aws_http_message *message) { AWS_PRECONDITION(allocator); AWS_PRECONDITION(message); const struct aws_byte_cursor space_delimiter = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(" "); const struct aws_byte_cursor forward_slash = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/"); const size_t user_agent_product_version_length = g_user_agent_header_product_name.len + forward_slash.len + g_s3_client_version.len; struct aws_http_headers *headers = aws_http_message_get_headers(message); AWS_ASSERT(headers != NULL); struct aws_byte_cursor current_user_agent_header; AWS_ZERO_STRUCT(current_user_agent_header); struct aws_byte_buf user_agent_buffer; AWS_ZERO_STRUCT(user_agent_buffer); if (aws_http_headers_get(headers, g_user_agent_header_name, ¤t_user_agent_header) == AWS_OP_SUCCESS) { /* If the header was found, then create a buffer with the total size we'll need, and append the current user * agent header with a trailing space. */ aws_byte_buf_init( &user_agent_buffer, allocator, current_user_agent_header.len + space_delimiter.len + user_agent_product_version_length); aws_byte_buf_append_dynamic(&user_agent_buffer, ¤t_user_agent_header); aws_byte_buf_append_dynamic(&user_agent_buffer, &space_delimiter); } else { AWS_ASSERT(aws_last_error() == AWS_ERROR_HTTP_HEADER_NOT_FOUND); /* If the header was not found, then create a buffer with just the size of the user agent string that is about * to be appended to the buffer. */ aws_byte_buf_init(&user_agent_buffer, allocator, user_agent_product_version_length); } /* Append the client's user-agent string. */ { aws_byte_buf_append_dynamic(&user_agent_buffer, &g_user_agent_header_product_name); aws_byte_buf_append_dynamic(&user_agent_buffer, &forward_slash); aws_byte_buf_append_dynamic(&user_agent_buffer, &g_s3_client_version); } /* Apply the updated header. */ aws_http_headers_set(headers, g_user_agent_header_name, aws_byte_cursor_from_buf(&user_agent_buffer)); /* Clean up the scratch buffer. */ aws_byte_buf_clean_up(&user_agent_buffer); } int aws_s3_parse_content_range_response_header( struct aws_allocator *allocator, struct aws_http_headers *response_headers, uint64_t *out_range_start, uint64_t *out_range_end, uint64_t *out_object_size) { AWS_PRECONDITION(allocator); AWS_PRECONDITION(response_headers); struct aws_byte_cursor content_range_header_value; if (aws_http_headers_get(response_headers, g_content_range_header_name, &content_range_header_value)) { aws_raise_error(AWS_ERROR_S3_MISSING_CONTENT_RANGE_HEADER); return AWS_OP_ERR; } int result = AWS_OP_ERR; uint64_t range_start = 0; uint64_t range_end = 0; uint64_t object_size = 0; struct aws_string *content_range_header_value_str = aws_string_new_from_cursor(allocator, &content_range_header_value); /* Expected Format of header is: "bytes StartByte-EndByte/TotalObjectSize" */ int num_fields_found = sscanf( (const char *)content_range_header_value_str->bytes, "bytes %" PRIu64 "-%" PRIu64 "/%" PRIu64, &range_start, &range_end, &object_size); if (num_fields_found < 3) { aws_raise_error(AWS_ERROR_S3_INVALID_CONTENT_RANGE_HEADER); goto clean_up; } if (out_range_start != NULL) { *out_range_start = range_start; } if (out_range_end != NULL) { *out_range_end = range_end; } if (out_object_size != NULL) { *out_object_size = object_size; } result = AWS_OP_SUCCESS; clean_up: aws_string_destroy(content_range_header_value_str); content_range_header_value_str = NULL; return result; } int aws_s3_parse_content_length_response_header( struct aws_allocator *allocator, struct aws_http_headers *response_headers, uint64_t *out_content_length) { AWS_PRECONDITION(allocator); AWS_PRECONDITION(response_headers); AWS_PRECONDITION(out_content_length); struct aws_byte_cursor content_length_header_value; if (aws_http_headers_get(response_headers, g_content_length_header_name, &content_length_header_value)) { aws_raise_error(AWS_ERROR_S3_MISSING_CONTENT_LENGTH_HEADER); return AWS_OP_ERR; } struct aws_string *content_length_header_value_str = aws_string_new_from_cursor(allocator, &content_length_header_value); int result = AWS_OP_ERR; if (sscanf((const char *)content_length_header_value_str->bytes, "%" PRIu64, out_content_length) == 1) { result = AWS_OP_SUCCESS; } else { aws_raise_error(AWS_ERROR_S3_INVALID_CONTENT_LENGTH_HEADER); } aws_string_destroy(content_length_header_value_str); return result; } int aws_s3_parse_request_range_header( struct aws_http_headers *request_headers, bool *out_has_start_range, bool *out_has_end_range, uint64_t *out_start_range, uint64_t *out_end_range) { AWS_PRECONDITION(request_headers); AWS_PRECONDITION(out_has_start_range); AWS_PRECONDITION(out_has_end_range); AWS_PRECONDITION(out_start_range); AWS_PRECONDITION(out_end_range); bool has_start_range = false; bool has_end_range = false; uint64_t start_range = 0; uint64_t end_range = 0; struct aws_byte_cursor range_header_value; if (aws_http_headers_get(request_headers, g_range_header_name, &range_header_value)) { return aws_raise_error(AWS_ERROR_S3_INVALID_RANGE_HEADER); } struct aws_byte_cursor range_header_start = aws_byte_cursor_from_c_str("bytes="); /* verify bytes= */ if (!aws_byte_cursor_starts_with(&range_header_value, &range_header_start)) { return aws_raise_error(AWS_ERROR_S3_INVALID_RANGE_HEADER); } aws_byte_cursor_advance(&range_header_value, range_header_start.len); struct aws_byte_cursor substr = {0}; /* parse start range */ if (!aws_byte_cursor_next_split(&range_header_value, '-', &substr)) { return aws_raise_error(AWS_ERROR_S3_INVALID_RANGE_HEADER); } if (substr.len > 0) { if (aws_byte_cursor_utf8_parse_u64(substr, &start_range)) { return aws_raise_error(AWS_ERROR_S3_INVALID_RANGE_HEADER); } has_start_range = true; } /* parse end range */ if (!aws_byte_cursor_next_split(&range_header_value, '-', &substr)) { return aws_raise_error(AWS_ERROR_S3_INVALID_RANGE_HEADER); } if (substr.len > 0) { if (aws_byte_cursor_utf8_parse_u64(substr, &end_range)) { return aws_raise_error(AWS_ERROR_S3_INVALID_RANGE_HEADER); } has_end_range = true; } /* verify that there is nothing extra */ if (aws_byte_cursor_next_split(&range_header_value, '-', &substr)) { return aws_raise_error(AWS_ERROR_S3_INVALID_RANGE_HEADER); } /* verify that start-range <= end-range */ if (has_end_range && start_range > end_range) { return aws_raise_error(AWS_ERROR_S3_INVALID_RANGE_HEADER); } /* verify that start-range or end-range is present */ if (!has_start_range && !has_end_range) { return aws_raise_error(AWS_ERROR_S3_INVALID_RANGE_HEADER); } *out_has_start_range = has_start_range; *out_has_end_range = has_end_range; *out_start_range = start_range; *out_end_range = end_range; return AWS_OP_SUCCESS; } uint32_t aws_s3_calculate_auto_ranged_get_num_parts( size_t part_size, uint64_t first_part_size, uint64_t object_range_start, uint64_t object_range_end) { uint32_t num_parts = 1; if (first_part_size == 0) { return num_parts; } uint64_t second_part_start = object_range_start + first_part_size; /* If the range has room for a second part, calculate the additional amount of parts. */ if (second_part_start <= object_range_end) { uint64_t aligned_range_remainder = object_range_end + 1 - second_part_start; /* range-end is inclusive */ num_parts += (uint32_t)(aligned_range_remainder / (uint64_t)part_size); if ((aligned_range_remainder % part_size) > 0) { ++num_parts; } } return num_parts; } void aws_s3_calculate_auto_ranged_get_part_range( uint64_t object_range_start, uint64_t object_range_end, size_t part_size, uint64_t first_part_size, uint32_t part_number, uint64_t *out_part_range_start, uint64_t *out_part_range_end) { AWS_PRECONDITION(out_part_range_start); AWS_PRECONDITION(out_part_range_end); AWS_ASSERT(part_number > 0); const uint32_t part_index = part_number - 1; /* Part index is assumed to be in a valid range. */ AWS_ASSERT( part_index < aws_s3_calculate_auto_ranged_get_num_parts(part_size, first_part_size, object_range_start, object_range_end)); uint64_t part_size_uint64 = (uint64_t)part_size; if (part_index == 0) { /* If this is the first part, then use the first part size. */ *out_part_range_start = object_range_start; *out_part_range_end = *out_part_range_start + first_part_size - 1; } else { /* Else, find the next part by adding the object range + total number of whole parts before this one + initial * part size*/ *out_part_range_start = object_range_start + ((uint64_t)(part_index - 1)) * part_size_uint64 + first_part_size; *out_part_range_end = *out_part_range_start + part_size_uint64 - 1; /* range-end is inclusive */ } /* Cap the part's range end using the object's range end. */ if (*out_part_range_end > object_range_end) { *out_part_range_end = object_range_end; } } int aws_s3_calculate_optimal_mpu_part_size_and_num_parts( uint64_t content_length, size_t client_part_size, uint64_t client_max_part_size, size_t *out_part_size, uint32_t *out_num_parts) { AWS_FATAL_ASSERT(out_part_size); AWS_FATAL_ASSERT(out_num_parts); if (content_length == 0) { *out_part_size = 0; *out_num_parts = 0; return AWS_OP_SUCCESS; } uint64_t part_size_uint64 = content_length / (uint64_t)g_s3_max_num_upload_parts; if ((content_length % g_s3_max_num_upload_parts) > 0) { ++part_size_uint64; } if (part_size_uint64 > SIZE_MAX) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "Could not create meta request; required part size of %" PRIu64 " bytes is too large for platform.", part_size_uint64); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } size_t part_size = (size_t)part_size_uint64; if (part_size > client_max_part_size) { AWS_LOGF_ERROR( AWS_LS_S3_META_REQUEST, "Could not create meta request; required part size for request is %" PRIu64 ", but current maximum part size is %" PRIu64, (uint64_t)part_size, (uint64_t)client_max_part_size); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } if (part_size < client_part_size) { part_size = client_part_size; } if (content_length < part_size) { /* When the content length is smaller than part size and larger than the threshold, we set one part * with the whole length */ part_size = (size_t)content_length; } uint32_t num_parts = (uint32_t)(content_length / part_size); if ((content_length % part_size) > 0) { ++num_parts; } AWS_ASSERT(num_parts <= g_s3_max_num_upload_parts); *out_part_size = part_size; *out_num_parts = num_parts; return AWS_OP_SUCCESS; } int aws_s3_crt_error_code_from_server_error_code_string(struct aws_byte_cursor error_code_string) { if (aws_byte_cursor_eq_c_str_ignore_case(&error_code_string, "SlowDown")) { return AWS_ERROR_S3_SLOW_DOWN; } if (aws_byte_cursor_eq_c_str_ignore_case(&error_code_string, "InternalError") || aws_byte_cursor_eq_c_str_ignore_case(&error_code_string, "InternalErrors")) { return AWS_ERROR_S3_INTERNAL_ERROR; } if (aws_byte_cursor_eq_c_str_ignore_case(&error_code_string, "RequestTimeTooSkewed")) { return AWS_ERROR_S3_REQUEST_TIME_TOO_SKEWED; } return AWS_ERROR_UNKNOWN; } void aws_s3_request_finish_up_metrics_synced(struct aws_s3_request *request, struct aws_s3_meta_request *meta_request) { AWS_PRECONDITION(meta_request); AWS_PRECONDITION(request); ASSERT_SYNCED_DATA_LOCK_HELD(meta_request); if (request->send_data.metrics != NULL) { /* Request is done, complete the metrics for the request now. */ struct aws_s3_request_metrics *metrics = request->send_data.metrics; aws_high_res_clock_get_ticks((uint64_t *)&metrics->time_metrics.end_timestamp_ns); metrics->time_metrics.total_duration_ns = metrics->time_metrics.end_timestamp_ns - metrics->time_metrics.start_timestamp_ns; if (meta_request->telemetry_callback != NULL) { struct aws_s3_meta_request_event event = {.type = AWS_S3_META_REQUEST_EVENT_TELEMETRY}; event.u.telemetry.metrics = aws_s3_request_metrics_acquire(metrics); aws_s3_meta_request_add_event_for_delivery_synced(meta_request, &event); } request->send_data.metrics = aws_s3_request_metrics_release(metrics); } } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/source/s3express_credentials_provider.c000066400000000000000000001222561456575232400274570ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "aws/s3/s3express_credentials_provider.h" #include "aws/s3/private/s3_client_impl.h" #include "aws/s3/private/s3express_credentials_provider_impl.h" #include #include #include #include #include #include #include #include #include #include #include #include #include static struct aws_byte_cursor s_create_session_path_query = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/?session="); static const size_t s_default_cache_capacity = 100; /* Those number are from C++ SDK impl */ static const uint64_t s_expired_threshold_secs = 5; static const uint64_t s_about_to_expire_threshold_secs = 60; static const uint64_t s_background_refresh_interval_secs = 60; struct aws_query_callback_node { struct aws_linked_list_node node; aws_on_get_credentials_callback_fn *get_cred_callback; void *get_cred_user_data; }; struct aws_s3express_session_creator { struct aws_allocator *allocator; /* The hash key for the table storing creator and session. */ struct aws_string *hash_key; struct aws_s3express_credentials_provider *provider; struct aws_byte_buf response_buf; /* The region and host of the session we are creating */ struct aws_string *region; struct aws_string *host; struct { /* Protected by the impl lock */ /* If creating a new session, this is NULL. * If refreshing an existing session, this points to it. */ struct aws_s3express_session *session; /* Node of `struct aws_query_callback_node*` */ struct aws_linked_list query_queue; struct aws_s3_meta_request *meta_request; } synced_data; }; static struct aws_s3express_session *s_aws_s3express_session_new( struct aws_s3express_credentials_provider *provider, const struct aws_string *hash_key, const struct aws_string *region, const struct aws_string *host, struct aws_credentials *credentials) { struct aws_s3express_session *session = aws_mem_calloc(provider->allocator, 1, sizeof(struct aws_s3express_session)); session->allocator = provider->allocator; session->impl = provider->impl; session->hash_key = aws_string_new_from_string(provider->allocator, hash_key); session->host = aws_string_new_from_string(provider->allocator, host); if (region) { session->region = aws_string_new_from_string(provider->allocator, region); } session->s3express_credentials = credentials; aws_credentials_acquire(credentials); return session; } static void s_aws_s3express_session_destroy(struct aws_s3express_session *session) { if (!session) { return; } if (session->creator) { /* The session is always protected by the lock, we can safely touch the synced data here */ /* Unset the session, but keep the creator going */ session->creator->synced_data.session = NULL; } aws_string_destroy(session->hash_key); aws_string_destroy(session->region); aws_string_destroy(session->host); aws_credentials_release(session->s3express_credentials); aws_mem_release(session->allocator, session); } static bool s_s3express_session_is_valid(struct aws_s3express_session *session, uint64_t now_seconds) { AWS_ASSERT(session->s3express_credentials); if (session->impl->mock_test.s3express_session_is_valid_override) { /* Mock override for testing. */ return session->impl->mock_test.s3express_session_is_valid_override(session, now_seconds); } uint64_t expire_secs = aws_credentials_get_expiration_timepoint_seconds(session->s3express_credentials); uint64_t threshold_secs = 0; int overflow = aws_add_u64_checked(now_seconds, s_expired_threshold_secs, &threshold_secs); AWS_ASSERT(!overflow); (void)overflow; /* If it's too close to be expired, we consider the session is invalid */ return threshold_secs < expire_secs; } static bool s_s3express_session_about_to_expire(struct aws_s3express_session *session, uint64_t now_seconds) { AWS_ASSERT(session->s3express_credentials); if (session->impl->mock_test.s3express_session_about_to_expire_override) { /* Mock override for testing. */ return session->impl->mock_test.s3express_session_about_to_expire_override(session, now_seconds); } uint64_t expire_secs = aws_credentials_get_expiration_timepoint_seconds(session->s3express_credentials); uint64_t threshold_secs = 0; int overflow = aws_add_u64_checked(now_seconds, s_about_to_expire_threshold_secs, &threshold_secs); AWS_ASSERT(!overflow); (void)overflow; return threshold_secs >= expire_secs; } static struct aws_s3express_session_creator *s_aws_s3express_session_creator_destroy( struct aws_s3express_session_creator *session_creator); static void s_credentials_provider_s3express_impl_lock_synced_data( struct aws_s3express_credentials_provider_impl *impl) { int err = aws_mutex_lock(&impl->synced_data.lock); AWS_ASSERT(!err); (void)err; } static void s_credentials_provider_s3express_impl_unlock_synced_data( struct aws_s3express_credentials_provider_impl *impl) { int err = aws_mutex_unlock(&impl->synced_data.lock); AWS_ASSERT(!err); (void)err; } static int s_on_incoming_body_fn( struct aws_s3_meta_request *meta_request, const struct aws_byte_cursor *body, uint64_t range_start, void *user_data) { (void)meta_request; (void)range_start; struct aws_s3express_session_creator *session_creator = user_data; return aws_byte_buf_append_dynamic(&session_creator->response_buf, body); } /* parse credentials of form sessionToken secretKey accessKeyId 2023-06-26T17:33:30Z */ struct aws_s3express_xml_parser_user_data { struct aws_allocator *allocator; struct aws_string *access_key_id; struct aws_string *secret_access_key; struct aws_string *session_token; void *log_id; uint64_t expire_timestamp_secs; }; static int s_s3express_xml_traversing_credentials(struct aws_xml_node *node, void *user_data) { struct aws_byte_cursor node_name = aws_xml_node_get_name(node); struct aws_s3express_xml_parser_user_data *parser_ud = user_data; struct aws_byte_cursor credential_data; AWS_ZERO_STRUCT(credential_data); if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "SessionToken")) { if (aws_xml_node_as_body(node, &credential_data)) { return AWS_OP_ERR; } parser_ud->session_token = aws_string_new_from_array(parser_ud->allocator, credential_data.ptr, credential_data.len); } if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "SecretAccessKey")) { if (aws_xml_node_as_body(node, &credential_data)) { return AWS_OP_ERR; } parser_ud->secret_access_key = aws_string_new_from_array(parser_ud->allocator, credential_data.ptr, credential_data.len); } if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "AccessKeyId")) { if (aws_xml_node_as_body(node, &credential_data)) { return AWS_OP_ERR; } parser_ud->access_key_id = aws_string_new_from_array(parser_ud->allocator, credential_data.ptr, credential_data.len); } if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "Expiration")) { if (aws_xml_node_as_body(node, &credential_data)) { return AWS_OP_ERR; } AWS_LOGF_TRACE( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): Read Expiration " PRInSTR "", (void *)parser_ud->log_id, AWS_BYTE_CURSOR_PRI(credential_data)); struct aws_date_time dt; if (aws_date_time_init_from_str_cursor(&dt, &credential_data, AWS_DATE_FORMAT_AUTO_DETECT)) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): Failed to parse Expiration " PRInSTR "", (void *)parser_ud->log_id, AWS_BYTE_CURSOR_PRI(credential_data)); return AWS_OP_ERR; } parser_ud->expire_timestamp_secs = (uint64_t)aws_date_time_as_epoch_secs(&dt); } return AWS_OP_SUCCESS; } static int s_s3express_xml_traversing_CreateSessionResult_children(struct aws_xml_node *node, void *user_data) { struct aws_byte_cursor node_name = aws_xml_node_get_name(node); if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "Credentials")) { return aws_xml_node_traverse(node, s_s3express_xml_traversing_credentials, user_data); } return AWS_OP_SUCCESS; } static int s_s3express_xml_traversing_root(struct aws_xml_node *node, void *user_data) { struct aws_byte_cursor node_name = aws_xml_node_get_name(node); if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "CreateSessionResult")) { return aws_xml_node_traverse(node, s_s3express_xml_traversing_CreateSessionResult_children, user_data); } return AWS_OP_SUCCESS; } static struct aws_credentials *s_parse_s3express_xml( struct aws_allocator *alloc, struct aws_byte_cursor xml, void *logging_id) { struct aws_credentials *credentials = NULL; struct aws_s3express_xml_parser_user_data user_data = { .allocator = alloc, .log_id = logging_id, }; struct aws_xml_parser_options options = { .doc = xml, .on_root_encountered = s_s3express_xml_traversing_root, .user_data = &user_data, }; if (aws_xml_parse(alloc, &options)) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): credentials parsing failed with error %s", logging_id, aws_error_debug_str(aws_last_error())); goto done; } if (user_data.access_key_id && user_data.secret_access_key && user_data.session_token && user_data.expire_timestamp_secs) { credentials = aws_credentials_new_from_string( alloc, user_data.access_key_id, user_data.secret_access_key, user_data.session_token, user_data.expire_timestamp_secs); } done: /* Clean up resource */ aws_string_destroy(user_data.access_key_id); aws_string_destroy(user_data.secret_access_key); aws_string_destroy(user_data.session_token); return credentials; } /* called upon completion of meta request */ static void s_on_request_finished( struct aws_s3_meta_request *meta_request, const struct aws_s3_meta_request_result *meta_request_result, void *user_data) { (void)meta_request; struct aws_s3express_session_creator *session_creator = user_data; struct aws_s3express_credentials_provider_impl *impl = session_creator->provider->impl; if (impl->mock_test.meta_request_finished_overhead) { impl->mock_test.meta_request_finished_overhead(meta_request, meta_request_result, user_data); } struct aws_linked_list pending_callbacks; aws_linked_list_init(&pending_callbacks); struct aws_credentials *credentials = NULL; int error_code = meta_request_result->error_code; AWS_LOGF_DEBUG( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): CreateSession call completed with http status: %d and error code %s", (void *)session_creator->provider, meta_request_result->response_status, aws_error_debug_str(error_code)); if (error_code && meta_request_result->error_response_body && meta_request_result->error_response_body->len > 0) { /* The Create Session failed with an error response from S3, provide a specific error code for user. */ error_code = AWS_ERROR_S3EXPRESS_CREATE_SESSION_FAILED; AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): CreateSession call failed with http status: %d, and error response body is: %.*s", (void *)session_creator->provider, meta_request_result->response_status, (int)meta_request_result->error_response_body->len, meta_request_result->error_response_body->buffer); } if (error_code == AWS_ERROR_SUCCESS) { credentials = s_parse_s3express_xml( session_creator->allocator, aws_byte_cursor_from_buf(&session_creator->response_buf), session_creator); if (!credentials) { error_code = AWS_AUTH_PROVIDER_PARSER_UNEXPECTED_RESPONSE; AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): failed to read credentials from document, treating as an error.", (void *)session_creator->provider); } } { /* BEGIN CRITICAL SECTION */ s_credentials_provider_s3express_impl_lock_synced_data(impl); aws_linked_list_swap_contents(&session_creator->synced_data.query_queue, &pending_callbacks); aws_hash_table_remove(&impl->synced_data.session_creator_table, session_creator->hash_key, NULL, NULL); struct aws_s3express_session *session = session_creator->synced_data.session; if (session) { session->creator = NULL; if (error_code == AWS_ERROR_SUCCESS) { /* The session already existed, just update the credentials for the session */ aws_credentials_release(session->s3express_credentials); session->s3express_credentials = credentials; aws_credentials_acquire(credentials); } else { /* The session failed to be created, remove the session from the cache. */ aws_cache_remove(impl->synced_data.cache, session->hash_key); } } else if (error_code == AWS_ERROR_SUCCESS) { /* Create a new session when we get valid credentials and put it into cache */ session = s_aws_s3express_session_new( session_creator->provider, session_creator->hash_key, session_creator->region, session_creator->host, credentials); aws_cache_put(impl->synced_data.cache, session->hash_key, session); } s_credentials_provider_s3express_impl_unlock_synced_data(impl); } /* END CRITICAL SECTION */ /* Invoked all callbacks */ while (!aws_linked_list_empty(&pending_callbacks)) { struct aws_linked_list_node *node = aws_linked_list_pop_front(&pending_callbacks); struct aws_query_callback_node *callback_node = AWS_CONTAINER_OF(node, struct aws_query_callback_node, node); callback_node->get_cred_callback(credentials, error_code, callback_node->get_cred_user_data); aws_mem_release(session_creator->allocator, callback_node); } aws_credentials_release(credentials); s_aws_s3express_session_creator_destroy(session_creator); } static struct aws_http_message *s_create_session_request_new( struct aws_allocator *allocator, struct aws_byte_cursor host_value) { struct aws_http_message *request = aws_http_message_new_request(allocator); struct aws_http_header host_header = { .name = g_host_header_name, .value = host_value, }; if (aws_http_message_add_header(request, host_header)) { goto error; } struct aws_http_header user_agent_header = { .name = g_user_agent_header_name, .value = aws_byte_cursor_from_c_str("aws-sdk-crt/s3express-credentials-provider"), }; if (aws_http_message_add_header(request, user_agent_header)) { goto error; } if (aws_http_message_set_request_method(request, aws_http_method_get)) { goto error; } if (aws_http_message_set_request_path(request, s_create_session_path_query)) { goto error; } return request; error: return aws_http_message_release(request); } /* Clean up resources that only related to one create session call */ static struct aws_s3express_session_creator *s_aws_s3express_session_creator_destroy( struct aws_s3express_session_creator *session_creator) { if (session_creator == NULL) { return NULL; } AWS_FATAL_ASSERT(aws_linked_list_empty(&session_creator->synced_data.query_queue)); struct aws_s3express_credentials_provider_impl *impl = session_creator->provider->impl; aws_s3_meta_request_release(session_creator->synced_data.meta_request); aws_ref_count_release(&impl->internal_ref); aws_string_destroy(session_creator->hash_key); aws_string_destroy(session_creator->region); aws_string_destroy(session_creator->host); aws_byte_buf_clean_up(&session_creator->response_buf); aws_mem_release(session_creator->allocator, session_creator); return NULL; } /** * Encode the hash key to be [host_value][hash_of_credentials] * hash_of_credentials is the sha256 of [access_key][secret_access_key] **/ struct aws_string *aws_encode_s3express_hash_key_new( struct aws_allocator *allocator, const struct aws_credentials *original_credentials, struct aws_byte_cursor host_value) { struct aws_byte_buf combine_key_buf; /* 1. Combine access_key and secret_access_key into one buffer */ struct aws_byte_cursor access_key = aws_credentials_get_access_key_id(original_credentials); struct aws_byte_cursor secret_access_key = aws_credentials_get_secret_access_key(original_credentials); aws_byte_buf_init(&combine_key_buf, allocator, access_key.len + secret_access_key.len); aws_byte_buf_write_from_whole_cursor(&combine_key_buf, access_key); aws_byte_buf_write_from_whole_cursor(&combine_key_buf, secret_access_key); /* 2. Get sha256 digest from the combined key */ struct aws_byte_cursor combine_key = aws_byte_cursor_from_buf(&combine_key_buf); struct aws_byte_buf digest_buf; aws_byte_buf_init(&digest_buf, allocator, AWS_SHA256_LEN); aws_sha256_compute(allocator, &combine_key, &digest_buf, 0); /* 3. Encode the result to be [host_value][hash_of_credentials] */ struct aws_byte_buf result_buffer; aws_byte_buf_init(&result_buffer, allocator, host_value.len + digest_buf.len); aws_byte_buf_write_from_whole_cursor(&result_buffer, host_value); aws_byte_buf_write_from_whole_buffer(&result_buffer, digest_buf); struct aws_string *result = aws_string_new_from_buf(allocator, &result_buffer); /* Clean up */ aws_byte_buf_clean_up(&result_buffer); aws_byte_buf_clean_up(&combine_key_buf); aws_byte_buf_clean_up(&digest_buf); return result; } static struct aws_s3express_session_creator *s_session_creator_new( struct aws_s3express_credentials_provider *provider, const struct aws_credentials *original_credentials, const struct aws_credentials_properties_s3express *s3express_properties) { struct aws_s3express_credentials_provider_impl *impl = provider->impl; struct aws_http_message *request = s_create_session_request_new(provider->allocator, s3express_properties->host); if (!request) { return NULL; } if (impl->mock_test.endpoint_override) { /* NOTE: ONLY FOR TESTS. Erase the host header for endpoint override. */ aws_http_headers_erase(aws_http_message_get_headers(request), g_host_header_name); } struct aws_s3express_session_creator *session_creator = aws_mem_calloc(provider->allocator, 1, sizeof(struct aws_s3express_session_creator)); session_creator->allocator = provider->allocator; session_creator->provider = provider; session_creator->host = aws_string_new_from_cursor(session_creator->allocator, &s3express_properties->host); session_creator->region = aws_string_new_from_cursor(session_creator->allocator, &s3express_properties->region); struct aws_signing_config_aws s3express_signing_config = { .credentials = original_credentials, .service = g_s3express_service_name, .region = s3express_properties->region, }; aws_byte_buf_init(&session_creator->response_buf, provider->allocator, 512); struct aws_s3_meta_request_options options = { .message = request, .type = AWS_S3_META_REQUEST_TYPE_DEFAULT, .body_callback = s_on_incoming_body_fn, .finish_callback = s_on_request_finished, .signing_config = &s3express_signing_config, /* Override endpoint only for tests. */ .endpoint = impl->mock_test.endpoint_override ? impl->mock_test.endpoint_override : NULL, .user_data = session_creator, .operation_name = aws_byte_cursor_from_c_str("CreateSession"), }; session_creator->synced_data.meta_request = aws_s3_client_make_meta_request(impl->client, &options); AWS_FATAL_ASSERT(session_creator->synced_data.meta_request); aws_http_message_release(request); aws_ref_count_acquire(&impl->internal_ref); aws_linked_list_init(&session_creator->synced_data.query_queue); return session_creator; } static int s_s3express_get_creds( struct aws_s3express_credentials_provider *provider, const struct aws_credentials *original_credentials, const struct aws_credentials_properties_s3express *s3express_properties, aws_on_get_credentials_callback_fn callback, void *user_data) { if (s3express_properties->host.len == 0) { AWS_LOGF_ERROR( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): The host property is empty to get credentials from S3 Express", (void *)provider); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } struct aws_s3express_credentials_provider_impl *impl = provider->impl; struct aws_hash_element *session_creator_hash_element = NULL; int was_created = 0; struct aws_credentials *s3express_credentials = NULL; struct aws_byte_cursor access_key; AWS_ZERO_STRUCT(access_key); if (original_credentials) { access_key = aws_credentials_get_access_key_id(original_credentials); } uint64_t current_stamp = UINT64_MAX; aws_sys_clock_get_ticks(¤t_stamp); struct aws_string *hash_key = aws_encode_s3express_hash_key_new(provider->allocator, original_credentials, s3express_properties->host); uint64_t now_seconds = aws_timestamp_convert(current_stamp, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_SECS, NULL); s_credentials_provider_s3express_impl_lock_synced_data(impl); /* Used after free is a crime */ AWS_FATAL_ASSERT(!impl->synced_data.destroying); /* Step 1: Check cache. */ struct aws_s3express_session *session = NULL; int ret_code = aws_cache_find(impl->synced_data.cache, hash_key, (void **)&session); AWS_ASSERT(ret_code == AWS_OP_SUCCESS); if (session) { /* We found a session */ session->inactive = false; AWS_ASSERT(session->s3express_credentials != NULL); if (s_s3express_session_is_valid(session, now_seconds)) { s3express_credentials = session->s3express_credentials; /* Make sure the creds are valid until the callback invokes */ aws_credentials_acquire(s3express_credentials); aws_string_destroy(hash_key); goto unlock; } else { /* Remove the session from cache and fall to try to creating the session */ aws_cache_remove(impl->synced_data.cache, hash_key); } } /* Step 2: Check the creator map */ ret_code = aws_hash_table_create( &impl->synced_data.session_creator_table, hash_key, &session_creator_hash_element, &was_created); AWS_ASSERT(ret_code == AWS_OP_SUCCESS); (void)ret_code; /* Step 3: Create session if needed */ if (was_created) { /* A new session creator needed */ struct aws_s3express_session_creator *new_session_creator = s_session_creator_new(provider, original_credentials, s3express_properties); /* If we failed to create session creator, it's probably OOM or impl error we don't want to handle */ AWS_FATAL_ASSERT(new_session_creator); new_session_creator->hash_key = hash_key; session_creator_hash_element->value = new_session_creator; } else { aws_string_destroy(hash_key); } if (s3express_credentials == NULL) { /* Queue the callback if we don't have a creds to return now. */ struct aws_s3express_session_creator *session_creator = session_creator_hash_element->value; struct aws_query_callback_node *callback_node = aws_mem_acquire(provider->allocator, sizeof(struct aws_query_callback_node)); callback_node->get_cred_callback = callback; callback_node->get_cred_user_data = user_data; aws_linked_list_push_back(&session_creator->synced_data.query_queue, &callback_node->node); } unlock: s_credentials_provider_s3express_impl_unlock_synced_data(impl); if (s3express_credentials) { uint64_t expire_secs = aws_credentials_get_expiration_timepoint_seconds(s3express_credentials); AWS_LOGF_TRACE( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): Found credentials from cache. Timestamp to expire is %" PRIu64 ", while now is %" PRIu64 ".", (void *)provider, expire_secs, now_seconds); /* TODO: invoke callback asynced? */ callback(s3express_credentials, AWS_ERROR_SUCCESS, user_data); aws_credentials_release(s3express_credentials); return AWS_OP_SUCCESS; } return AWS_OP_SUCCESS; } static void s_finish_provider_destroy(struct aws_s3express_credentials_provider *provider) { AWS_LOGF_TRACE( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): finishing destroying S3 Express credentials provider", (void *)provider); struct aws_s3express_credentials_provider_impl *impl = provider->impl; aws_hash_table_clean_up(&impl->synced_data.session_creator_table); aws_cache_destroy(impl->synced_data.cache); aws_credentials_release(impl->default_original_credentials); aws_credentials_provider_release(impl->default_original_credentials_provider); aws_mutex_clean_up(&impl->synced_data.lock); aws_mem_release(provider->allocator, impl->bg_refresh_task); /* Invoke provider shutdown callback */ if (provider && provider->shutdown_complete_callback) { provider->shutdown_complete_callback(provider->shutdown_user_data); } aws_mem_release(provider->allocator, provider); } /* This is scheduled to run on the background task's event loop. */ static void s_clean_up_background_task(struct aws_task *task, void *arg, enum aws_task_status status) { (void)status; struct aws_s3express_credentials_provider *provider = arg; struct aws_s3express_credentials_provider_impl *impl = provider->impl; /* Cancel the task will run the task synchronously */ aws_event_loop_cancel_task(impl->bg_event_loop, impl->bg_refresh_task); aws_mem_release(provider->allocator, task); /* Safely remove the internal ref as the background task is killed. */ aws_ref_count_release(&impl->internal_ref); } static void s_external_destroy(struct aws_s3express_credentials_provider *provider) { AWS_LOGF_TRACE( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): external refcount drops to zero, start destroying", (void *)provider); struct aws_s3express_credentials_provider_impl *impl = provider->impl; { /* BEGIN CRITICAL SECTION */ s_credentials_provider_s3express_impl_lock_synced_data(impl); impl->synced_data.destroying = true; aws_cache_clear(impl->synced_data.cache); for (struct aws_hash_iter iter = aws_hash_iter_begin(&impl->synced_data.session_creator_table); !aws_hash_iter_done(&iter); aws_hash_iter_next(&iter)) { struct aws_s3express_session_creator *session_creator = (struct aws_s3express_session_creator *)iter.element.value; /* Cancel all meta requests */ aws_s3_meta_request_cancel(session_creator->synced_data.meta_request); } s_credentials_provider_s3express_impl_unlock_synced_data(impl); } /* END CRITICAL SECTION */ /* Clean up the background thread */ struct aws_task *clean_up_background_task = aws_mem_calloc(provider->allocator, 1, sizeof(struct aws_task)); aws_task_init(clean_up_background_task, s_clean_up_background_task, provider, "clean_up_s3express_background"); aws_event_loop_schedule_task_now(impl->bg_event_loop, clean_up_background_task); } static struct aws_s3express_credentials_provider_vtable s_aws_s3express_credentials_provider_vtable = { .get_credentials = s_s3express_get_creds, .destroy = s_external_destroy, }; static void s_schedule_bg_refresh(struct aws_s3express_credentials_provider *provider) { struct aws_s3express_credentials_provider_impl *impl = provider->impl; AWS_FATAL_ASSERT(impl->bg_event_loop != NULL); uint64_t current_stamp = UINT64_MAX; /* Use high res clock to schedule the task in the future. */ aws_high_res_clock_get_ticks(¤t_stamp); uint64_t interval_secs = impl->mock_test.bg_refresh_secs_override == 0 ? s_background_refresh_interval_secs : impl->mock_test.bg_refresh_secs_override; /* Schedule the refresh task to happen in the future. */ aws_event_loop_schedule_task_future( impl->bg_event_loop, impl->bg_refresh_task, current_stamp + aws_timestamp_convert(interval_secs, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL)); return; } static void s_refresh_session_list( struct aws_s3express_credentials_provider *provider, const struct aws_credentials *current_original_credentials) { struct aws_s3express_credentials_provider_impl *impl = provider->impl; uint64_t current_stamp = UINT64_MAX; aws_sys_clock_get_ticks(¤t_stamp); uint64_t now_seconds = aws_timestamp_convert(current_stamp, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_SECS, NULL); AWS_LOGF_TRACE( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): background refreshing task in process", (void *)provider); { /* BEGIN CRITICAL SECTION */ s_credentials_provider_s3express_impl_lock_synced_data(impl); if (impl->synced_data.destroying) { /* Client is gone, stops doing anything */ s_credentials_provider_s3express_impl_unlock_synced_data(impl); return; } const struct aws_linked_list *session_list = aws_linked_hash_table_get_iteration_list(&impl->synced_data.cache->table); /* Iterate through the cache without changing the priority */ struct aws_linked_list_node *node = NULL; for (node = aws_linked_list_begin(session_list); node != aws_linked_list_end(session_list);) { /* Iterate through all nodes and clean the resource up */ struct aws_linked_hash_table_node *table_node = AWS_CONTAINER_OF(node, struct aws_linked_hash_table_node, node); node = aws_linked_list_next(node); struct aws_s3express_session *session = table_node->value; if (s_s3express_session_about_to_expire(session, now_seconds)) { if (session->inactive) { /* The session has been inactive since last refresh, remove it from the cache. */ aws_cache_remove(impl->synced_data.cache, session->hash_key); } else { /* If we are about to expire, try to refresh the credentials */ /* Check the creator map */ struct aws_hash_element *session_creator_hash_element = NULL; int was_created = 0; struct aws_string *hash_key = aws_string_new_from_string(provider->allocator, session->hash_key); int ret_code = aws_hash_table_create( &impl->synced_data.session_creator_table, hash_key, &session_creator_hash_element, &was_created); AWS_ASSERT(ret_code == AWS_OP_SUCCESS); (void)ret_code; if (was_created) { struct aws_string *current_creds_hash = aws_encode_s3express_hash_key_new( provider->allocator, current_original_credentials, aws_byte_cursor_from_string(session->host)); bool creds_match = aws_string_eq(current_creds_hash, hash_key); aws_string_destroy(current_creds_hash); if (!creds_match) { /* The session was created with a separate credentials, we skip refreshing it. */ if (!s_s3express_session_is_valid(session, now_seconds)) { /* Purge the session when it is expired. */ aws_cache_remove(impl->synced_data.cache, session->hash_key); } /* Mark it as inactive, so that we can purge the session directly from next refresh */ session->inactive = true; /* Remove the element we just created as we skip refrshing. */ aws_string_destroy(hash_key); aws_hash_table_remove_element( &impl->synced_data.session_creator_table, session_creator_hash_element); goto unlock; } struct aws_credentials_properties_s3express s3express_properties = { .host = aws_byte_cursor_from_string(session->host), }; if (session->region) { s3express_properties.region = aws_byte_cursor_from_string(session->region); } /* A new session creator needed to refresh the session */ struct aws_s3express_session_creator *new_session_creator = s_session_creator_new(provider, current_original_credentials, &s3express_properties); AWS_FATAL_ASSERT(new_session_creator); new_session_creator->synced_data.session = session; session->creator = new_session_creator; new_session_creator->hash_key = hash_key; session_creator_hash_element->value = new_session_creator; } else { /* The session is in process of refreshing. Only valid if the previous create session to * refresh still not finished, otherwise, it's a bug */ aws_string_destroy(hash_key); struct aws_s3express_session_creator *session_creator = session_creator_hash_element->value; AWS_FATAL_ASSERT(session_creator->synced_data.session == session); } session->inactive = true; } } } unlock: s_credentials_provider_s3express_impl_unlock_synced_data(impl); } /* END CRITICAL SECTION */ s_schedule_bg_refresh(provider); } static void s_get_original_credentials_callback(struct aws_credentials *credentials, int error_code, void *user_data) { struct aws_s3express_credentials_provider *provider = user_data; if (error_code) { AWS_LOGF_DEBUG( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "id=%p: S3 Express Provider back ground refresh failed: Failed to fetch original credentials with " "error %s. Skipping refresh.", (void *)provider, aws_error_debug_str(aws_last_error())); /* Skip this refresh, but keep schedule the next one */ s_schedule_bg_refresh(provider); return; } s_refresh_session_list(provider, credentials); } static void s_bg_refresh_task(struct aws_task *task, void *arg, enum aws_task_status status) { (void)task; if (status != AWS_TASK_STATUS_RUN_READY) { return; } struct aws_s3express_credentials_provider *provider = arg; struct aws_s3express_credentials_provider_impl *impl = provider->impl; if (impl->default_original_credentials) { s_refresh_session_list(provider, impl->default_original_credentials); } else { /* Get the credentials from provider first. */ if (aws_credentials_provider_get_credentials( impl->default_original_credentials_provider, s_get_original_credentials_callback, provider)) { AWS_LOGF_DEBUG( AWS_LS_AUTH_CREDENTIALS_PROVIDER, "id=%p: S3 Express Provider back ground refresh failed: Failed to get original credentials from " "provider with error %s. Skipping refresh.", (void *)provider, aws_error_debug_str(aws_last_error())); /* Skip this refresh, but keep schedule the next one */ s_schedule_bg_refresh(provider); return; } } } void aws_s3express_credentials_provider_init_base( struct aws_s3express_credentials_provider *provider, struct aws_allocator *allocator, struct aws_s3express_credentials_provider_vtable *vtable, void *impl) { AWS_PRECONDITION(provider); AWS_PRECONDITION(vtable); provider->allocator = allocator; provider->vtable = vtable; provider->impl = impl; aws_ref_count_init(&provider->ref_count, provider, (aws_simple_completion_callback *)provider->vtable->destroy); } struct aws_s3express_credentials_provider *aws_s3express_credentials_provider_new_default( struct aws_allocator *allocator, const struct aws_s3express_credentials_provider_default_options *options) { if (!options->client) { AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "a S3 client is necessary for querying S3 Express"); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } struct aws_s3express_credentials_provider *provider = NULL; struct aws_s3express_credentials_provider_impl *impl = NULL; aws_mem_acquire_many( allocator, 2, &provider, sizeof(struct aws_s3express_credentials_provider), &impl, sizeof(struct aws_s3express_credentials_provider_impl)); AWS_LOGF_DEBUG(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "static: creating S3 Express credentials provider"); AWS_ZERO_STRUCT(*provider); AWS_ZERO_STRUCT(*impl); aws_s3express_credentials_provider_init_base( provider, allocator, &s_aws_s3express_credentials_provider_vtable, impl); aws_hash_table_init( &impl->synced_data.session_creator_table, allocator, 10, aws_hash_string, aws_hash_callback_string_eq, NULL, NULL); impl->synced_data.cache = aws_cache_new_lru( allocator, aws_hash_string, (aws_hash_callback_eq_fn *)aws_string_eq, NULL, (aws_hash_callback_destroy_fn *)s_aws_s3express_session_destroy, s_default_cache_capacity); AWS_ASSERT(impl->synced_data.cache); /* Not keep the s3 client alive to avoid recursive reference */ impl->client = options->client; struct aws_signing_config_aws client_cached_config = impl->client->cached_signing_config->config; if (client_cached_config.credentials) { impl->default_original_credentials = client_cached_config.credentials; aws_credentials_acquire(impl->default_original_credentials); } else { impl->default_original_credentials_provider = aws_credentials_provider_acquire(client_cached_config.credentials_provider); } provider->shutdown_complete_callback = options->shutdown_complete_callback; provider->shutdown_user_data = options->shutdown_user_data; aws_mutex_init(&impl->synced_data.lock); aws_ref_count_init(&impl->internal_ref, provider, (aws_simple_completion_callback *)s_finish_provider_destroy); /* Init the background refresh task */ impl->bg_refresh_task = aws_mem_calloc(provider->allocator, 1, sizeof(struct aws_task)); aws_task_init(impl->bg_refresh_task, s_bg_refresh_task, provider, "s3express_background_refresh"); /* Get an event loop from the client */ impl->bg_event_loop = aws_event_loop_group_get_next_loop(impl->client->client_bootstrap->event_loop_group); impl->mock_test.bg_refresh_secs_override = options->mock_test.bg_refresh_secs_override; s_schedule_bg_refresh(provider); return provider; } struct aws_s3express_credentials_provider *aws_s3express_credentials_provider_release( struct aws_s3express_credentials_provider *provider) { if (provider) { aws_ref_count_release(&provider->ref_count); } return NULL; } int aws_s3express_credentials_provider_get_credentials( struct aws_s3express_credentials_provider *provider, const struct aws_credentials *original_credentials, const struct aws_credentials_properties_s3express *property, aws_on_get_credentials_callback_fn callback, void *user_data) { AWS_PRECONDITION(property); AWS_PRECONDITION(provider); AWS_ASSERT(provider->vtable->get_credentials); return provider->vtable->get_credentials(provider, original_credentials, property, callback, user_data); } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/000077500000000000000000000000001456575232400207175ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/CMakeLists.txt000066400000000000000000000422011456575232400234560ustar00rootroot00000000000000include(AwsTestHarness) enable_testing() option(BYO_CRYPTO "Don't build a tls implementation or link against a crypto interface. This feature is only for unix builds currently." OFF) if(BYO_CRYPTO) set(ENABLE_NET_TESTS OFF) add_test_case(test_s3_client_byo_crypto_no_options) add_test_case(test_s3_client_byo_crypto_with_options) endif() file(GLOB TEST_SRC "*.c") file(GLOB TEST_HDRS "*.h") file(GLOB TESTS ${TEST_HDRS} ${TEST_SRC}) add_test_case(test_s3_copy_http_message) add_test_case(test_s3_message_util_assign_body) add_test_case(test_s3_ranged_get_object_message_new) add_test_case(test_s3_set_multipart_request_path) add_test_case(test_s3_create_multipart_upload_message_new) add_test_case(test_s3_upload_part_message_new) add_test_case(test_s3_upload_part_message_fail) add_test_case(test_s3_complete_multipart_message_new) add_test_case(test_s3_abort_multipart_upload_message_new) add_net_test_case(test_s3_client_create_destroy) add_net_test_case(test_s3_client_create_error) add_net_test_case(test_s3_client_monitoring_options_override) add_net_test_case(test_s3_client_proxy_ev_settings_override) add_net_test_case(test_s3_client_tcp_keep_alive_options_override) add_net_test_case(test_s3_client_max_active_connections_override) add_test_case(test_s3_client_get_max_active_connections) add_test_case(test_s3_request_create_destroy) add_test_case(test_s3_client_queue_requests) add_test_case(test_s3_meta_request_body_streaming) add_test_case(test_s3_update_meta_requests_trigger_prepare) add_test_case(test_s3_client_update_connections_finish_result) add_net_test_case(test_s3_client_exceed_retries) add_net_test_case(test_s3_client_acquire_connection_fail) add_net_test_case(test_s3_meta_request_fail_prepare_request) add_net_test_case(test_s3_meta_request_sign_request_fail) add_net_test_case(test_s3_meta_request_send_request_finish_fail) add_net_test_case(test_s3_auto_range_put_missing_upload_id) add_net_test_case(test_s3_cancel_mpu_create_not_sent) add_net_test_case(test_s3_cancel_mpu_create_completed) add_net_test_case(test_s3_cancel_mpu_one_part_completed) add_net_test_case(test_s3_cancel_mpu_one_part_completed_async) add_net_test_case(test_s3_cancel_mpu_all_parts_completed) add_net_test_case(test_s3_cancel_mpu_cancellable_requests) add_net_test_case(test_s3_pause_mpu_cancellable_requests) add_net_test_case(test_s3_cancel_mpd_nothing_sent) add_net_test_case(test_s3_cancel_mpd_one_part_sent) add_net_test_case(test_s3_cancel_mpd_one_part_completed) add_net_test_case(test_s3_cancel_mpd_two_parts_completed) add_net_test_case(test_s3_cancel_mpd_head_object_sent) add_net_test_case(test_s3_cancel_mpd_head_object_completed) add_net_test_case(test_s3_cancel_mpd_empty_object_get_with_part_number_1_sent) add_net_test_case(test_s3_cancel_mpd_empty_object_get_with_part_number_1_completed) add_net_test_case(test_s3_cancel_mpd_pending_streaming) add_net_test_case(test_s3_cancel_prepare) add_net_test_case(test_s3_get_object_tls_disabled) add_net_test_case(test_s3_get_object_tls_enabled) add_net_test_case(test_s3_get_object_tls_default) add_net_test_case(test_s3_get_object_less_than_part_size) add_net_test_case(test_s3_get_object_empty_object) add_net_test_case(test_s3_get_object_multiple) add_net_test_case(test_s3_get_object_sse_kms) add_net_test_case(test_s3_get_object_sse_aes256) add_net_test_case(test_s3_get_object_backpressure_small_increments) add_net_test_case(test_s3_get_object_backpressure_big_increments) add_net_test_case(test_s3_get_object_backpressure_initial_size_zero) add_net_test_case(test_s3_get_object_part) add_net_test_case(test_s3_no_signing) add_net_test_case(test_s3_signing_override) add_net_test_case(test_s3_put_object_tls_disabled) add_net_test_case(test_s3_put_object_tls_enabled) add_net_test_case(test_s3_put_object_tls_default) add_net_test_case(test_s3_multipart_put_object_with_acl) add_net_test_case(test_s3_put_object_multiple) add_net_test_case(test_s3_put_object_multiple_with_filepath) add_net_test_case(test_s3_put_object_less_than_part_size) add_net_test_case(test_s3_put_object_less_than_part_size_with_content_encoding) add_net_test_case(test_s3_put_object_mpu_with_content_encoding) add_net_test_case(test_s3_put_object_multipart_threshold) add_net_test_case(test_s3_put_object_multipart_threshold_less_than_part_size) add_net_test_case(test_s3_put_object_empty_object) add_net_test_case(test_s3_put_object_with_part_remainder) add_net_test_case(test_s3_put_object_sse_kms) add_net_test_case(test_s3_put_object_sse_kms_multipart) add_net_test_case(test_s3_put_object_sse_aes256) add_net_test_case(test_s3_put_object_sse_aes256_multipart) add_net_test_case(test_s3_put_object_sse_c_aes256_multipart) add_net_test_case(test_s3_put_object_sse_c_aes256_multipart_with_checksum) add_net_test_case(test_s3_put_object_singlepart_no_content_md5_enabled) add_net_test_case(test_s3_put_object_singlepart_no_content_md5_disabled) add_net_test_case(test_s3_put_object_singlepart_correct_content_md5_enabled) add_net_test_case(test_s3_put_object_singlepart_correct_content_md5_disabled) add_net_test_case(test_s3_put_object_singlepart_incorrect_content_md5_enabled) add_net_test_case(test_s3_put_object_singlepart_incorrect_content_md5_disabled) add_net_test_case(test_s3_put_object_multipart_no_content_md5_enabled) add_net_test_case(test_s3_put_object_multipart_no_content_md5_disabled) add_net_test_case(test_s3_put_object_multipart_correct_content_md5_enabled) add_net_test_case(test_s3_put_object_multipart_correct_content_md5_disabled) add_net_test_case(test_s3_put_object_multipart_incorrect_content_md5_enabled) add_net_test_case(test_s3_put_object_multipart_incorrect_content_md5_disabled) add_net_test_case(test_s3_upload_part_message_with_content_md5) add_net_test_case(test_s3_upload_part_message_without_content_md5) add_net_test_case(test_s3_create_multipart_upload_message_with_content_md5) add_net_test_case(test_s3_complete_multipart_message_with_content_md5) add_net_test_case(test_s3_put_object_double_slashes) add_net_test_case(test_s3_put_object_no_content_length) add_net_test_case(test_s3_put_object_single_part_no_content_length) add_net_test_case(test_s3_put_object_zero_size_no_content_length) add_net_test_case(test_s3_put_large_object_no_content_length_with_checksum) add_net_test_case(test_s3_put_object_no_content_length_multiple) add_net_test_case(test_s3_put_object_async_singlepart) add_net_test_case(test_s3_put_object_async_multipart) add_net_test_case(test_s3_put_object_async_read_completes_synchronously) add_net_test_case(test_s3_put_object_small_reads) add_net_test_case(test_s3_put_object_async_small_reads) add_net_test_case(test_s3_put_object_async_no_content_length_partial_part) add_net_test_case(test_s3_put_object_async_no_content_length_1part) add_net_test_case(test_s3_put_object_async_no_content_length_empty_part2) add_net_test_case(test_s3_put_object_async_no_content_length_2parts) add_net_test_case(test_s3_put_object_async_fail_reading) add_net_test_case(test_s3_download_empty_file_with_checksum) add_net_test_case(test_s3_download_single_part_file_with_checksum) add_net_test_case(test_s3_download_multipart_file_with_checksum) if(ENABLE_MRAP_TESTS) add_net_test_case(test_s3_get_object_less_than_part_size_mrap) add_net_test_case(test_s3_get_object_multipart_mrap) add_net_test_case(test_s3_put_object_less_than_part_size_mrap) add_net_test_case(test_s3_put_object_multipart_mrap) endif() add_net_test_case(test_s3_round_trip) add_net_test_case(test_s3_round_trip_default_get) add_net_test_case(test_s3_round_trip_multipart_get_fc) add_net_test_case(test_s3_round_trip_default_get_fc) add_net_test_case(test_s3_round_trip_mpu_multipart_get_fc) add_net_test_case(test_s3_round_trip_mpu_multipart_get_with_list_algorithm_fc) add_net_test_case(test_s3_round_trip_mpu_default_get_fc) add_net_test_case(test_s3_round_trip_with_filepath) add_net_test_case(test_s3_round_trip_mpu_with_filepath) add_net_test_case(test_s3_round_trip_with_filepath_no_content_length) add_net_test_case(test_s3_round_trip_mpu_with_filepath_no_content_length) add_net_test_case(test_s3_chunked_then_unchunked) add_net_test_case(test_s3_cancel_mpu_one_part_completed_fc) add_net_test_case(test_s3_cancel_mpd_one_part_completed_fc) add_net_test_case(test_s3_meta_request_default) add_net_test_case(test_s3_put_object_fail_headers_callback) add_net_test_case(test_s3_put_object_fail_body_callback) add_net_test_case(test_s3_get_object_fail_headers_callback) add_net_test_case(test_s3_get_object_fail_body_callback) add_net_test_case(test_s3_default_fail_headers_callback) add_net_test_case(test_s3_default_invoke_headers_callback_on_error) add_net_test_case(test_s3_default_invoke_headers_callback_cancels_on_error) add_net_test_case(test_s3_get_object_invoke_headers_callback_on_error) add_net_test_case(test_s3_put_object_invoke_headers_callback_on_error) add_net_test_case(test_s3_put_object_invoke_headers_callback_on_error_with_user_cancellation) add_net_test_case(test_s3_default_fail_body_callback) add_net_test_case(test_s3_default_fail_operation_name) add_net_test_case(test_s3_error_missing_file) add_net_test_case(test_s3_existing_host_entry) add_net_test_case(test_s3_put_fail_object_invalid_request) add_net_test_case(test_s3_put_fail_object_invalid_send_filepath) add_net_test_case(test_s3_put_fail_object_bad_parallel_read_stream) add_net_test_case(test_s3_put_fail_object_inputstream_fail_reading) add_net_test_case(test_s3_put_fail_object_inputstream_mismatch_content_length) add_net_test_case(test_s3_put_single_part_fail_object_inputstream_fail_reading) add_net_test_case(test_s3_put_single_part_fail_object_inputstream_mismatch_content_length) add_net_test_case(test_s3_put_object_clamp_part_size) add_net_test_case(test_s3_auto_ranged_get_sending_user_agent) add_net_test_case(test_s3_auto_ranged_put_sending_user_agent) add_net_test_case(test_s3_default_sending_meta_request_user_agent) add_net_test_case(test_s3_range_requests) add_net_test_case(test_s3_not_satisfiable_range) add_net_test_case(test_s3_invalid_start_range_greator_than_end_range) add_net_test_case(test_s3_invalid_empty_file_with_range) add_net_test_case(test_s3_bad_endpoint) add_net_test_case(test_s3_different_endpoints) add_test_case(test_s3_request_type_operation_name) add_test_case(test_s3_replace_quote_entities) add_test_case(test_s3_strip_quotes) add_test_case(test_s3_parse_request_range_header) add_test_case(test_s3_parse_content_range_response_header) add_test_case(test_s3_parse_content_length_response_header) add_test_case(test_s3_get_num_parts_and_get_part_range) add_test_case(test_s3_mpu_get_part_size_and_num_parts) add_test_case(test_s3_aws_xml_get_body_at_path) add_test_case(test_add_user_agent_header) add_test_case(test_get_existing_platform_info) add_test_case(test_get_nonexistent_platform_info) add_test_case(test_get_platforms_with_recommended_config) add_net_test_case(load_platform_info_from_global_state_sanity_test) add_net_test_case(sha1_nist_test_case_1) add_net_test_case(sha1_nist_test_case_2) add_net_test_case(sha1_nist_test_case_3) add_net_test_case(sha1_nist_test_case_4) add_net_test_case(sha1_nist_test_case_5) add_net_test_case(sha1_nist_test_case_5_truncated) add_net_test_case(sha1_nist_test_case_6) add_net_test_case(sha1_test_invalid_buffer) add_net_test_case(sha1_test_oneshot) add_net_test_case(sha1_test_invalid_state) add_net_test_case(sha256_nist_test_case_1) add_net_test_case(sha256_nist_test_case_2) add_net_test_case(sha256_nist_test_case_3) add_net_test_case(sha256_nist_test_case_4) add_net_test_case(sha256_nist_test_case_5) add_net_test_case(sha256_nist_test_case_5_truncated) add_net_test_case(sha256_nist_test_case_6) add_net_test_case(sha256_test_invalid_buffer) add_net_test_case(sha256_test_oneshot) add_net_test_case(sha256_test_invalid_state) add_test_case(crc32_nist_test_case_1) add_test_case(crc32_nist_test_case_2) add_test_case(crc32_nist_test_case_3) add_test_case(crc32_nist_test_case_4) add_test_case(crc32_nist_test_case_5) add_test_case(crc32_nist_test_case_5_truncated) add_test_case(crc32_nist_test_case_6) add_test_case(crc32_test_invalid_buffer) add_test_case(crc32_test_oneshot) add_test_case(crc32_test_invalid_state) add_test_case(crc32c_nist_test_case_1) add_test_case(crc32c_nist_test_case_2) add_test_case(crc32c_nist_test_case_3) add_test_case(crc32c_nist_test_case_4) add_test_case(crc32c_nist_test_case_5) add_test_case(crc32c_nist_test_case_5_truncated) add_test_case(crc32c_nist_test_case_6) add_test_case(crc32c_test_invalid_buffer) add_test_case(crc32c_test_oneshot) add_test_case(crc32c_test_invalid_state) add_net_test_case(verify_checksum_stream) add_net_test_case(verify_chunk_stream) add_net_test_case(test_s3_copy_small_object) add_net_test_case(test_s3_copy_small_object_special_char) add_net_test_case(test_s3_multipart_copy_large_object_special_char) add_net_test_case(test_s3_multipart_copy_large_object) add_net_test_case(test_s3_copy_object_invalid_source_key) add_net_test_case(test_s3_copy_source_prefixed_by_slash) add_net_test_case(test_s3_copy_source_prefixed_by_slash_multipart) add_net_test_case(test_s3_put_pause_resume_happy_path) add_net_test_case(test_s3_put_pause_resume_all_parts_done) add_net_test_case(test_s3_put_pause_resume_invalid_resume_data) add_net_test_case(test_s3_put_pause_resume_invalid_content_length) add_net_test_case(test_s3_upload_review) add_net_test_case(test_s3_upload_review_no_content_length) add_net_test_case(test_s3_upload_review_rejection) add_net_test_case(test_s3_list_bucket_init_mem_safety) add_net_test_case(test_s3_list_bucket_init_mem_safety_optional_copies) add_net_test_case(test_s3_list_bucket_valid) # Tests against local mock server if(ENABLE_MOCK_SERVER_TESTS) add_net_test_case(multipart_upload_mock_server) add_net_test_case(multipart_upload_checksum_with_retry_mock_server) add_net_test_case(multipart_download_checksum_with_retry_mock_server) add_net_test_case(async_internal_error_from_complete_multipart_mock_server) add_net_test_case(async_access_denied_from_complete_multipart_mock_server) add_net_test_case(get_object_modified_mock_server) add_net_test_case(get_object_invalid_responses_mock_server) add_net_test_case(get_object_mismatch_checksum_responses_mock_server) add_net_test_case(get_object_throughput_failure_mock_server) add_net_test_case(upload_part_invalid_response_mock_server) add_net_test_case(upload_part_async_invalid_response_mock_server) add_net_test_case(resume_first_part_not_completed_mock_server) add_net_test_case(resume_multi_page_list_parts_mock_server) add_net_test_case(resume_list_parts_failed_mock_server) add_net_test_case(resume_after_finished_mock_server) add_net_test_case(multipart_upload_proxy_mock_server) add_net_test_case(endpoint_override_mock_server) add_net_test_case(s3express_provider_sanity_mock_server) add_net_test_case(s3express_provider_get_credentials_mock_server) add_net_test_case(s3express_provider_get_credentials_multiple_mock_server) add_net_test_case(s3express_provider_get_credentials_cancel_mock_server) add_net_test_case(s3express_provider_get_credentials_cache_mock_server) add_net_test_case(s3express_provider_background_refresh_mock_server) add_net_test_case(s3express_provider_background_refresh_remove_inactive_creds_mock_server) add_net_test_case(s3express_provider_stress_mock_server) add_net_test_case(s3express_client_sanity_test_mock_server) add_net_test_case(s3express_client_sanity_override_test_mock_server) add_net_test_case(request_time_too_skewed_mock_server) endif() add_net_test_case(s3express_provider_long_running_session_refresh) add_net_test_case(s3express_client_put_object) add_net_test_case(s3express_client_put_object_multipart) add_net_test_case(s3express_client_put_object_multipart_multiple) add_net_test_case(s3express_client_put_object_long_running_session_refresh) add_net_test_case(s3express_client_get_object) add_net_test_case(s3express_client_get_object_multiple) add_net_test_case(s3express_client_get_object_create_session_error) add_net_test_case(meta_request_auto_ranged_get_new_error_handling) add_net_test_case(meta_request_auto_ranged_put_new_error_handling) add_net_test_case(bad_request_error_handling) add_net_test_case(make_meta_request_error_handling) if(AWS_ENABLE_S3_ENDPOINT_RESOLVER) add_test_case(test_s3_endpoint_resolver_resolve_endpoint) add_test_case(test_s3_endpoint_resolver_resolve_endpoint_fips) add_test_case(test_s3_endpoint_resolver_resolve_endpoint_force_path_style) endif() add_test_case(parallel_read_stream_from_file_sanity_test) add_test_case(parallel_read_stream_from_large_file_test) add_test_case(test_s3_buffer_pool_threaded_allocs_and_frees) add_test_case(test_s3_buffer_pool_large_chunk_threaded_allocs_and_frees) add_test_case(test_s3_buffer_pool_limits) add_test_case(test_s3_buffer_pool_trim) add_test_case(test_s3_buffer_pool_reservation_hold) add_test_case(test_s3_buffer_pool_too_small) add_net_test_case(test_s3_put_object_buffer_pool_trim) add_net_test_case(client_update_upload_part_timeout) add_net_test_case(client_meta_request_override_part_size) add_net_test_case(client_meta_request_override_multipart_upload_threshold) set(TEST_BINARY_NAME ${PROJECT_NAME}-tests) generate_test_driver(${TEST_BINARY_NAME}) if(AWS_ENABLE_S3_ENDPOINT_RESOLVER) target_compile_definitions(${PROJECT_NAME}-tests PRIVATE "-DAWS_ENABLE_S3_ENDPOINT_RESOLVER") endif() aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/mock_s3_server/000077500000000000000000000000001456575232400236435ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/mock_s3_server/AbortMultipartUpload/000077500000000000000000000000001456575232400277615ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/mock_s3_server/AbortMultipartUpload/default.json000066400000000000000000000001201456575232400322710ustar00rootroot00000000000000{ "status": 204, "headers": {"Connection": "keep-alive"}, "body": [ ] } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/mock_s3_server/CompleteMultipartUpload/000077500000000000000000000000001456575232400304625ustar00rootroot00000000000000async_access_denied_error.json000066400000000000000000000006531456575232400364610ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/mock_s3_server/CompleteMultipartUpload{ "status": 200, "headers": {"Connection": "close"}, "body": [ "", "", "", "AccessDenied", "Access denied.", "656c76696e6727732072657175657374", "Uuag1LuByRx9e6j5Onimru9pO4ZVKnJ2Qz7/C1NPcfTWAtRPfTaOFg==", "" ] } async_internal_error.json000066400000000000000000000007171456575232400355250ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/mock_s3_server/CompleteMultipartUpload{ "status": 200, "headers": {"Connection": "close"}, "body": [ "", "", "", "InternalError", "We encountered an internal error. Please try again.", "656c76696e6727732072657175657374", "Uuag1LuByRx9e6j5Onimru9pO4ZVKnJ2Qz7/C1NPcfTWAtRPfTaOFg==", "" ] } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/mock_s3_server/CompleteMultipartUpload/default.json000066400000000000000000000006771456575232400330130ustar00rootroot00000000000000{ "status": 200, "headers": {"Connection": "close"}, "body": [ "", "", "http://default.s3.us-west-2.amazonaws.com/default", "default", "default", "\"3858f62230ac3c915f300c664312c11f-9\"", "" ] } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/mock_s3_server/CreateMultipartUpload/000077500000000000000000000000001456575232400301155ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/mock_s3_server/CreateMultipartUpload/default.json000066400000000000000000000004611456575232400324350ustar00rootroot00000000000000{ "status": 200, "headers": {"x-amz-request-id": "12345"}, "body": [ "", "", "default", "default", "defaultID", "" ] } request_time_too_skewed.json000066400000000000000000000010311456575232400356550ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/mock_s3_server/CreateMultipartUpload{ "status": 403, "headers": {"x-amz-request-id": "12345"}, "body": [ "", "", "", "RequestTimeTooSkewed", "The difference between the request time and the current time is too large.", "20230725T161257Z", "2023-07-25T16:27:59Z", "900000", "" ] } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/mock_s3_server/CreateSession/000077500000000000000000000000001456575232400264125ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/mock_s3_server/CreateSession/default.json000066400000000000000000000007501456575232400307330ustar00rootroot00000000000000{ "status": 200, "headers": {"x-amz-request-id": "12345"}, "body": [ "", "", "", "sessionToken", "secretKey", "accessKeyId", "2023-06-26T17:33:30Z", "", "" ] } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/mock_s3_server/GetObject/000077500000000000000000000000001456575232400255115ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/mock_s3_server/GetObject/default.json000066400000000000000000000001001456575232400300170ustar00rootroot00000000000000{ "status": 404, "headers": {}, "body": [ ] } get_object_checksum_retry.json000066400000000000000000000005531456575232400335440ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/mock_s3_server/GetObject{ "status": 200, "headers": { "ETag": "b54357faf0632cce46e942fa68356b38", "Date": "Thu, 12 Jan 2023 00:04:21 GMT", "Last-Modified": "Tue, 10 Jan 2023 23:39:32 GMT", "Accept-Ranges": "bytes", "Content-Range": "bytes 0-65535/65536", "Content-Type": "binary/octet-stream", "x-amz-checksum-crc32": "q1875w==" } } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/mock_s3_server/GetObject/get_object_delay_60s.json000066400000000000000000000005761456575232400323670ustar00rootroot00000000000000{ "delay": 60, "status": 200, "headers": { "ETag": "b54357faf0632cce46e942fa68356b38", "Date": "Thu, 12 Jan 2023 00:04:21 GMT", "Last-Modified": "Tue, 10 Jan 2023 23:39:32 GMT", "Accept-Ranges": "bytes", "Content-Range": "bytes 0-65535/65536", "Content-Type": "binary/octet-stream" }, "body": [ "" ] } get_object_invalid_response_missing_content_range.json000066400000000000000000000004511456575232400405150ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/mock_s3_server/GetObject{ "status": 206, "headers": { "ETag": "b54357faf0632cce46e942fa68356b38", "Date": "Thu, 12 Jan 2023 00:04:21 GMT", "Last-Modified": "Tue, 10 Jan 2023 23:39:32 GMT", "Accept-Ranges": "bytes", "Content-Type": "binary/octet-stream" }, "body": [ "" ] } get_object_invalid_response_missing_etags.json000066400000000000000000000004471456575232400367770ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/mock_s3_server/GetObject{ "status": 206, "headers": { "Date": "Thu, 12 Jan 2023 00:04:21 GMT", "Last-Modified": "Tue, 10 Jan 2023 23:39:32 GMT", "Accept-Ranges": "bytes", "Content-Range": "bytes 0-65535/1048576", "Content-Type": "binary/octet-stream" }, "body": [ "" ] } get_object_modified_failure.json000066400000000000000000000001101456575232400337710ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/mock_s3_server/GetObject{ "status": 400, "headers": {}, "body": [ "bad_request" ] } get_object_modified_first_part.json000066400000000000000000000005271456575232400345330ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/mock_s3_server/GetObject{ "status": 206, "headers": { "ETag": "b54357faf0632cce46e942fa68356b38", "Date": "Thu, 12 Jan 2023 00:04:21 GMT", "Last-Modified": "Tue, 10 Jan 2023 23:39:32 GMT", "Accept-Ranges": "bytes", "Content-Range": "bytes 0-65535/1048576", "Content-Type": "binary/octet-stream" }, "body": [ "" ] } get_object_modified_success.json000066400000000000000000000002651456575232400340250ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/mock_s3_server/GetObject{ "status": 412, "headers": { "Date": "Thu, 12 Jan 2023 00:04:21 GMT", "Last-Modified": "Tue, 10 Jan 2023 23:39:32 GMT" }, "body": [ "precondition failed" ] } get_object_unmatch_checksum_crc32.json000066400000000000000000000006271456575232400350340ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/mock_s3_server/GetObject{ "status": 200, "headers": { "ETag": "b54357faf0632cce46e942fa68356b38", "Date": "Thu, 12 Jan 2023 00:04:21 GMT", "Last-Modified": "Tue, 10 Jan 2023 23:39:32 GMT", "Accept-Ranges": "bytes", "Content-Range": "bytes 0-65535/65536", "Content-Type": "binary/octet-stream", "x-amz-checksum-crc32": "q1875w==" }, "body": [ "" ] } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/mock_s3_server/ListParts/000077500000000000000000000000001456575232400255705ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/mock_s3_server/ListParts/default.json000066400000000000000000000001001456575232400300760ustar00rootroot00000000000000{ "status": 404, "headers": {}, "body": [ ] } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/mock_s3_server/ListParts/multiple_list_parts_1.json000066400000000000000000000014221456575232400330010ustar00rootroot00000000000000{ "status": 200, "headers": {"Connection": "keep-alive"}, "body": [ "", "", "example-bucket", "example-object", "XXBsb2FkIElEIGZvciBlbHZpbmcncyVcdS1tb3ZpZS5tMnRzEEEwbG9hZA", "2", "true", "", "2", "KtQF9Q==", "2010-11-10T20:48:34.000Z", "\"7778aef83f66abc1fa1e8477f296d394\"", "8388608", "", "" ] } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/mock_s3_server/ListParts/multiple_list_parts_2.json000066400000000000000000000014131456575232400330020ustar00rootroot00000000000000{ "status": 200, "headers": {"Connection": "keep-alive"}, "body": [ "", "", "example-bucket", "example-object", "XXBsb2FkIElEIGZvciBlbHZpbmcncyVcdS1tb3ZpZS5tMnRzEEEwbG9hZA", "2", "false", "", "3", "yagJog==", "2010-11-10T20:48:33.000Z", "\"aaaa18db4cc2f85cedef654fccc4a4x8\"", "8388608", "", "" ] } resume_first_part_not_completed.json000066400000000000000000000016151456575232400350600ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/mock_s3_server/ListParts{ "status": 200, "headers": {"Connection": "keep-alive"}, "body": [ "", "", "example-bucket", "example-object", "XXBsb2FkIElEIGZvciBlbHZpbmcncyVcdS1tb3ZpZS5tMnRzEEEwbG9hZA", "false", "", "2", "2010-11-10T20:48:34.000Z", "\"7778aef83f66abc1fa1e8477f296d394\"", "8388608", "", "", "3", "2010-11-10T20:48:33.000Z", "\"aaaa18db4cc2f85cedef654fccc4a4x8\"", "8388608", "", "" ] } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/mock_s3_server/README.md000066400000000000000000000035071456575232400251270ustar00rootroot00000000000000# Mock S3 server A **NON-TLS** mock S3 server based on [python-hyper/h11](https://github.com/python-hyper/h11) and [trio](http://trio.readthedocs.io/en/latest/index.html). The server code implementation is based on the trio-server example from python-hyper/h11 [here](https://github.com/python-hyper/h11/blob/master/examples/trio-server.py). Only supports very basic mock response for request received. ## How to run the server Python 3.7+ required. - Install hyper/h11 and trio python module. `python3 -m pip install h11 trio` - Run python. `python3 ./mock_s3_server.py`. ### Supported Operations - CreateMultipartUpload - CompleteMultipartUpload - UploadPart - AbortMultipartUpload - GetObject ### Defined response The server will read from ./{OperationName}/{Key}.json. The json file is formatted as following: ```json { "status": 200, "headers": {"Connection": "close"}, "body": [ "", "", "", "InternalError", "We encountered an internal error. Please try again.", "656c76696e6727732072657175657374", "Uuag1LuByRx9e6j5Onimru9pO4ZVKnJ2Qz7/C1NPcfTWAtRPfTaOFg==", "" ] } ``` Where you can define the expected response status, header and response body. If the {Key}.json is not found from file system, it will load the `default.json`. If the "delay" field is present, the response will be delayed by X seconds. ### GetObject Response By default, the GetObject response will read from ./{OperationName}/{Key}.json for the status and headers. But the body will be generated to match the range in the request. To proper handle ranged GetObject, you will need to modify the mock server code. Check function `handle_get_object` for details. aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/mock_s3_server/UploadPart/000077500000000000000000000000001456575232400257165ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/mock_s3_server/UploadPart/default.json000066400000000000000000000001741456575232400302370ustar00rootroot00000000000000{ "status": 200, "headers": {"ETag": "b54357faf0632cce46e942fa68356b38", "Connection": "keep-alive"}, "body": [ ] } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/mock_s3_server/UploadPart/missing_etag.json000066400000000000000000000001321456575232400312560ustar00rootroot00000000000000{ "status": 200, "headers": {"Connection": "keep-alive"}, "body": [ ] } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/mock_s3_server/UploadPart/throttle.json000066400000000000000000000006521456575232400304610ustar00rootroot00000000000000{ "status": 503, "headers": {"ETag": "b54357faf0632cce46e942fa68356b38", "Connection": "keep-alive"}, "body": [ "", "", "", "SlowDown", "656c76696e6727732072657175657374", "Uuag1LuByRx9e6j5Onimru9pO4ZVKnJ2Qz7/C1NPcfTWAtRPfTaOFg==", "" ] } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/mock_s3_server/mock_s3_server.py000066400000000000000000000447671456575232400271630ustar00rootroot00000000000000# A simple HTTP server implemented using h11 and Trio: # http://trio.readthedocs.io/en/latest/index.html # # S3 Mock server logic starts from handle_mock_s3_request from dataclasses import dataclass import json from itertools import count from urllib.parse import parse_qs, urlparse import os from typing import Optional from enum import Enum import trio import h11 MAX_RECV = 2**16 TIMEOUT = 120 # this must be higher than any response's "delay" setting VERBOSE = False # Flags to keep between requests SHOULD_THROTTLE = True RETRY_REQUEST_COUNT = 0 base_dir = os.path.dirname(os.path.realpath(__file__)) class S3Opts(Enum): CreateMultipartUpload = 1 CompleteMultipartUpload = 2 UploadPart = 3 AbortMultipartUpload = 4 GetObject = 5 ListParts = 6 CreateSession = 7 @dataclass class Response: status_code: int delay: int headers: any data: str chunked: bool head_request: bool @dataclass class ResponseConfig: path: str disconnect_after_headers = False generate_body_size: Optional[int] = None json_path: str = None throttle: bool = False force_retry: bool = False def _resolve_file_path(self, wrapper, request_type): global SHOULD_THROTTLE if self.json_path is None: response_file = os.path.join( base_dir, request_type.name, f"{self.path[1:]}.json") if os.path.exists(response_file) == False: wrapper.info( response_file, "not exist, using the default response") response_file = os.path.join( base_dir, request_type.name, f"default.json") if "throttle" in response_file: # We throttle the request half the time to make sure it succeeds after a retry if SHOULD_THROTTLE is False: wrapper.info("Skipping throttling") response_file = os.path.join( base_dir, request_type.name, f"default.json") else: wrapper.info("Throttling") # Flip the flag SHOULD_THROTTLE = not SHOULD_THROTTLE self.json_path = response_file def resolve_response(self, wrapper, request_type, chunked=False, head_request=False): self._resolve_file_path(wrapper, request_type) wrapper.info("resolving response from json file: ", self.json_path, ".\n generate_body_size: ", self.generate_body_size) with open(self.json_path, 'r') as f: data = json.load(f) # if response has delay, then sleep before sending it delay = data.get('delay', 0) status_code = data['status'] if self.generate_body_size is not None: # generate body with a specific size instead body = "a" * self.generate_body_size else: body = "\n".join(data['body']) headers = wrapper.basic_headers() content_length_set = False for header in data['headers'].items(): headers.append((header[0], str(header[1]))) if header[0].lower() == "content-length": content_length_set = True if chunked: headers.append(('Transfer-Encoding', "chunked")) else: if self.force_retry: # Use a long `content-length` header to trigger error when we try to send EOM. # so that the server will close connection after we send the header. headers.append(("Content-Length", str(123456))) elif content_length_set is False: headers.append(("Content-Length", str(len(body)))) response = Response(status_code=status_code, delay=delay, headers=headers, data=body, chunked=chunked, head_request=head_request) return response class TrioHTTPWrapper: _next_id = count() def __init__(self, stream): self.stream = stream self.conn = h11.Connection(h11.SERVER) # A unique id for this connection, to include in debugging output # (useful for understanding what's going on if there are multiple # simultaneous clients). self._obj_id = next(TrioHTTPWrapper._next_id) async def send(self, event): assert type(event) is not h11.ConnectionClosed data = self.conn.send(event) try: await self.stream.send_all(data) except BaseException: # If send_all raises an exception (especially trio.Cancelled), # we have no choice but to give it up. self.conn.send_failed() raise async def _read_from_peer(self): if self.conn.they_are_waiting_for_100_continue: self.info("Sending 100 Continue") go_ahead = h11.InformationalResponseConfig( status_code=100, headers=self.basic_headers() ) await self.send(go_ahead) try: data = await self.stream.receive_some(MAX_RECV) except ConnectionError: # They've stopped listening. Not much we can do about it here. data = b"" self.conn.receive_data(data) async def next_event(self): while True: event = self.conn.next_event() if event is h11.NEED_DATA: await self._read_from_peer() continue return event async def shutdown_and_clean_up(self): try: await self.stream.send_eof() except trio.BrokenResourceError: return with trio.move_on_after(TIMEOUT): try: while True: # Attempt to read until EOF got = await self.stream.receive_some(MAX_RECV) if not got: break except trio.BrokenResourceError: pass finally: await self.stream.aclose() def basic_headers(self): # HTTP requires these headers in all responses (client would do # something different here) return [ ("Server", "mock_s3_server"), ] def info(self, *args): # Little debugging method if VERBOSE: print("{}:".format(self._obj_id), *args) ################################################################ # Server main loop ################################################################ async def send_simple_response(wrapper, status_code, content_type, body): wrapper.info("Sending", status_code, "response with", len(body), "bytes") headers = wrapper.basic_headers() headers.append(("Content-Type", content_type)) headers.append(("Content-Length", str(len(body)))) res = h11.Response(status_code=status_code, headers=headers) await wrapper.send(res) await wrapper.send(h11.Data(data=body)) await wrapper.send(h11.EndOfMessage()) async def maybe_send_error_response(wrapper, exc): if wrapper.conn.our_state not in {h11.IDLE, h11.SEND_RESPONSE}: wrapper.info("...but I can't, because our state is", wrapper.conn.our_state) return try: if isinstance(exc, h11.RemoteProtocolError): status_code = exc.error_status_hint elif isinstance(exc, trio.TooSlowError): status_code = 408 # Request Timeout else: status_code = 500 body = str(exc).encode("utf-8") await send_simple_response( wrapper, status_code, "text/plain; charset=utf-8", body ) except Exception as exc: wrapper.info("error while sending error response:", exc) async def http_serve(stream): wrapper = TrioHTTPWrapper(stream) wrapper.info("Got new connection") while True: assert wrapper.conn.states == { h11.CLIENT: h11.IDLE, h11.SERVER: h11.IDLE} try: with trio.fail_after(TIMEOUT): wrapper.info("Server main loop waiting for request") event = await wrapper.next_event() wrapper.info("Server main loop got event:", event) if type(event) is h11.Request: await handle_mock_s3_request(wrapper, event) except Exception as exc: wrapper.info("Error during response handler: {!r}".format(exc)) await maybe_send_error_response(wrapper, exc) if wrapper.conn.our_state is h11.MUST_CLOSE: wrapper.info("connection is not reusable, so shutting down") await wrapper.shutdown_and_clean_up() return else: try: wrapper.info("trying to re-use connection") wrapper.conn.start_next_cycle() except h11.ProtocolError: states = wrapper.conn.states wrapper.info("unexpected state", states, "-- bailing out") await maybe_send_error_response( wrapper, RuntimeError("unexpected state {}".format(states)) ) await wrapper.shutdown_and_clean_up() return ################################################################ # Actual response handlers ################################################################ # Helper function async def send_response(wrapper, response): if response.delay > 0: assert response.delay < TIMEOUT await trio.sleep(response.delay) wrapper.info("Sending", response.status_code, "response with", len(response.data), "bytes") res = h11.Response(status_code=response.status_code, headers=response.headers) try: await wrapper.send(res) except Exception as e: print(e) if not response.head_request: if response.chunked: await wrapper.send(h11.Data(data=b"%X\r\n%s\r\n" % (len(response.data), response.data.encode()))) else: await wrapper.send(h11.Data(data=response.data.encode())) await wrapper.send(h11.EndOfMessage()) res = h11.Response(status_code=response.status_code, headers=response.headers) async def send_simple_response(wrapper, status_code, content_type, body): wrapper.info("Sending", status_code, "response with", len(body), "bytes") headers = wrapper.basic_headers() headers.append(("Content-Type", content_type)) headers.append(("Content-Length", str(len(body)))) res = h11.Response(status_code=status_code, headers=headers) await wrapper.send(res) await wrapper.send(h11.Data(data=body)) await wrapper.send(h11.EndOfMessage()) async def send_response_from_json(wrapper, response_json_path, chunked=False, generate_body=False, generate_body_size=0, head_request=False): wrapper.info("sending response from json file: ", response_json_path, ".\n generate_body: ", generate_body, "generate_body_size: ", generate_body_size) with open(response_json_path, 'r') as f: data = json.load(f) # if response has delay, then sleep before sending it delay = data.get('delay', 0) if delay > 0: assert delay < TIMEOUT await trio.sleep(delay) status_code = data['status'] if generate_body: # generate body with a specific size instead body = "a" * generate_body_size else: body = "\n".join(data['body']) wrapper.info("Sending", status_code, "response with", len(body), "bytes") headers = wrapper.basic_headers() for header in data['headers'].items(): headers.append((header[0], header[1])) if chunked: headers.append(('Transfer-Encoding', "chunked")) res = h11.Response(status_code=status_code, headers=headers) await wrapper.send(res) await wrapper.send(h11.Data(data=b"%X\r\n%s\r\n" % (len(body), body.encode()))) else: headers.append(("Content-Length", str(len(body)))) res = h11.Response(status_code=status_code, headers=headers) await wrapper.send(res) if head_request: await wrapper.send(h11.EndOfMessage()) return await wrapper.send(h11.Data(data=body.encode())) await wrapper.send(h11.EndOfMessage()) async def send_mock_s3_response(wrapper, request_type, path, generate_body=False, generate_body_size=0, head_request=False): response_file = os.path.join( base_dir, request_type.name, f"{path[1:]}.json") if os.path.exists(response_file) == False: wrapper.info(response_file, "not exist, using the default response") response_file = os.path.join( base_dir, request_type.name, f"default.json") if "throttle" in response_file: # We throttle the request half the time to make sure it succeeds after a retry if wrapper.should_throttle is False: wrapper.info("Skipping throttling") response_file = os.path.join( base_dir, request_type.name, f"default.json") else: wrapper.info("Throttling") # Flip the flag wrapper.should_throttle = not wrapper.should_throttle await send_response_from_json(wrapper, response_file, generate_body=generate_body, generate_body_size=generate_body_size, head_request=head_request) async def maybe_send_error_response(wrapper, exc): if wrapper.conn.our_state not in {h11.IDLE, h11.SEND_RESPONSE}: wrapper.info("...but I can't, because our state is", wrapper.conn.our_state) return try: await wrapper.send(res) except Exception as e: print(e) if not response.head_request: if response.chunked: await wrapper.send(h11.Data(data=b"%X\r\n%s\r\n" % (len(response.data), response.data.encode()))) else: await wrapper.send(h11.Data(data=response.data.encode())) await wrapper.send(h11.EndOfMessage()) def get_request_header_value(request, header_name): for header in request.headers: if header[0].decode("utf-8").lower() == header_name.lower(): return header[1].decode("utf-8") return None def handle_get_object_modified(start_range, end_range, request): data_length = end_range - start_range if start_range == 0: return ResponseConfig("/get_object_modified_first_part", generate_body_size=data_length) else: # Check the request header to make sure "If-Match" is set etag = get_request_header_value(request, "if-match") print(etag) # fetch Etag from the first_part response file response_file = os.path.join( base_dir, S3Opts.GetObject.name, f"get_object_modified_first_part.json") with open(response_file, 'r') as f: data = json.load(f) if data['headers']['ETag'] == etag: return ResponseConfig("/get_object_modified_success") return ResponseConfig("/get_object_modified_failure") def handle_get_object(wrapper, request, parsed_path, head_request=False): global RETRY_REQUEST_COUNT response_config = ResponseConfig(parsed_path.path) if parsed_path.path == "/get_object_checksum_retry" and not head_request: RETRY_REQUEST_COUNT = RETRY_REQUEST_COUNT + 1 if RETRY_REQUEST_COUNT == 1: wrapper.info("Force retry on the request") response_config.force_retry = True else: RETRY_REQUEST_COUNT = 0 if parsed_path.path == "/get_object_invalid_response_missing_content_range" or parsed_path.path == "/get_object_invalid_response_missing_etags": # Don't generate the body for those requests return response_config body_range_value = get_request_header_value(request, "range") if body_range_value: body_range = body_range_value.split("=")[1] start_range = int(body_range.split("-")[0]) end_range = int(body_range.split("-")[1]) else: # default length is 65535 start_range = 0 end_range = 65535 data_length = end_range - start_range if parsed_path.path == "/get_object_modified": return handle_get_object_modified(start_range, end_range, request) response_config.generate_body_size = data_length return response_config def handle_list_parts(parsed_path): if parsed_path.path == "/multiple_list_parts": if parsed_path.query.find("part-number-marker") != -1: return ResponseConfig("/multiple_list_parts_2") else: return ResponseConfig("/multiple_list_parts_1") return ResponseConfig(parsed_path.path) async def handle_mock_s3_request(wrapper, request): parsed_path = urlparse(request.target.decode("ascii")) method = request.method.decode("utf-8") response_config = None if method == "POST": if parsed_path.query == "uploads": # POST /{Key+}?uploads HTTP/1.1 -- Create MPU request_type = S3Opts.CreateMultipartUpload else: # POST /Key+?uploadId=UploadId HTTP/1.1 -- Complete MPU request_type = S3Opts.CompleteMultipartUpload elif method == "PUT": request_type = S3Opts.UploadPart elif method == "DELETE": request_type = S3Opts.AbortMultipartUpload elif method == "GET" or method == "HEAD": if parsed_path.query.find("uploadId") != -1: # GET /Key+?max-parts=MaxParts&part-number-marker=PartNumberMarker&uploadId=UploadId HTTP/1.1 -- List Parts request_type = S3Opts.ListParts response_config = handle_list_parts(parsed_path) elif parsed_path.query.find("session") != -1: request_type = S3Opts.CreateSession else: request_type = S3Opts.GetObject response_config = handle_get_object( wrapper, request, parsed_path, head_request=method == "HEAD") else: # TODO: support more type. wrapper.info("unsupported request:", request) request_type = S3Opts.CreateMultipartUpload while True: event = await wrapper.next_event() if type(event) is h11.EndOfMessage: break assert type(event) is h11.Data if response_config is None: response_config = ResponseConfig(parsed_path.path) response = response_config.resolve_response( wrapper, request_type, head_request=method == "HEAD") await send_response(wrapper, response) ################################################################ # Run the server ################################################################ async def serve(port): print("listening on http://localhost:{}".format(port)) try: await trio.serve_tcp(http_serve, port) except KeyboardInterrupt: print("KeyboardInterrupt - shutting down") if __name__ == "__main__": trio.run(serve, 8080) aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/s3_buffer_pool_tests.c000066400000000000000000000177441456575232400252310ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #define NUM_TEST_ALLOCS 100 #define NUM_TEST_THREADS 8 struct pool_thread_test_data { struct aws_s3_buffer_pool *pool; uint32_t thread_idx; }; static void s_thread_test(struct aws_allocator *allocator, void (*thread_fn)(void *), struct aws_s3_buffer_pool *pool) { const struct aws_thread_options *thread_options = aws_default_thread_options(); struct aws_thread threads[NUM_TEST_THREADS]; struct pool_thread_test_data thread_data[NUM_TEST_THREADS]; AWS_ZERO_ARRAY(threads); AWS_ZERO_ARRAY(thread_data); for (size_t thread_idx = 0; thread_idx < AWS_ARRAY_SIZE(threads); ++thread_idx) { struct aws_thread *thread = &threads[thread_idx]; aws_thread_init(thread, allocator); struct pool_thread_test_data *data = &thread_data[thread_idx]; data->pool = pool; data->thread_idx = (uint32_t)thread_idx; aws_thread_launch(thread, thread_fn, data, thread_options); } for (size_t thread_idx = 0; thread_idx < AWS_ARRAY_SIZE(threads); ++thread_idx) { struct aws_thread *thread = &threads[thread_idx]; aws_thread_join(thread); } } static void s_threaded_alloc_worker(void *user_data) { struct aws_s3_buffer_pool *pool = ((struct pool_thread_test_data *)user_data)->pool; struct aws_s3_buffer_pool_ticket *tickets[NUM_TEST_ALLOCS]; for (size_t count = 0; count < NUM_TEST_ALLOCS / NUM_TEST_THREADS; ++count) { size_t size = 8 * 1024 * 1024; struct aws_s3_buffer_pool_ticket *ticket = aws_s3_buffer_pool_reserve(pool, size); AWS_FATAL_ASSERT(ticket); struct aws_byte_buf buf = aws_s3_buffer_pool_acquire_buffer(pool, ticket); AWS_FATAL_ASSERT(buf.buffer); memset(buf.buffer, 0, buf.capacity); tickets[count] = ticket; } for (size_t count = 0; count < NUM_TEST_ALLOCS / NUM_TEST_THREADS; ++count) { aws_s3_buffer_pool_release_ticket(pool, tickets[count]); } } static int s_test_s3_buffer_pool_threaded_allocs_and_frees(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_s3_buffer_pool *buffer_pool = aws_s3_buffer_pool_new(allocator, MB_TO_BYTES(8), GB_TO_BYTES(2)); s_thread_test(allocator, s_threaded_alloc_worker, buffer_pool); aws_s3_buffer_pool_destroy(buffer_pool); return 0; } AWS_TEST_CASE(test_s3_buffer_pool_threaded_allocs_and_frees, s_test_s3_buffer_pool_threaded_allocs_and_frees) static int s_test_s3_buffer_pool_large_chunk_threaded_allocs_and_frees(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_s3_buffer_pool *buffer_pool = aws_s3_buffer_pool_new(allocator, MB_TO_BYTES(65), GB_TO_BYTES(2)); struct aws_s3_buffer_pool_usage_stats stats = aws_s3_buffer_pool_get_usage(buffer_pool); ASSERT_INT_EQUALS(0, stats.primary_cutoff); s_thread_test(allocator, s_threaded_alloc_worker, buffer_pool); aws_s3_buffer_pool_destroy(buffer_pool); return 0; } AWS_TEST_CASE( test_s3_buffer_pool_large_chunk_threaded_allocs_and_frees, s_test_s3_buffer_pool_large_chunk_threaded_allocs_and_frees) static int s_test_s3_buffer_pool_limits(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_s3_buffer_pool *buffer_pool = aws_s3_buffer_pool_new(allocator, MB_TO_BYTES(8), GB_TO_BYTES(1)); struct aws_s3_buffer_pool_ticket *ticket1 = aws_s3_buffer_pool_reserve(buffer_pool, MB_TO_BYTES(64)); ASSERT_NOT_NULL(ticket1); struct aws_byte_buf buf1 = aws_s3_buffer_pool_acquire_buffer(buffer_pool, ticket1); ASSERT_NOT_NULL(buf1.buffer); struct aws_s3_buffer_pool_ticket *tickets[6]; for (size_t i = 0; i < 6; ++i) { tickets[i] = aws_s3_buffer_pool_reserve(buffer_pool, MB_TO_BYTES(128)); ASSERT_NOT_NULL(tickets[i]); struct aws_byte_buf buf = aws_s3_buffer_pool_acquire_buffer(buffer_pool, tickets[i]); ASSERT_NOT_NULL(buf.buffer); } ASSERT_NULL(aws_s3_buffer_pool_reserve(buffer_pool, MB_TO_BYTES(128))); ASSERT_NULL(aws_s3_buffer_pool_reserve(buffer_pool, MB_TO_BYTES(96))); aws_s3_buffer_pool_remove_reservation_hold(buffer_pool); struct aws_s3_buffer_pool_ticket *ticket2 = aws_s3_buffer_pool_reserve(buffer_pool, MB_TO_BYTES(32)); ASSERT_NOT_NULL(ticket2); struct aws_byte_buf buf2 = aws_s3_buffer_pool_acquire_buffer(buffer_pool, ticket2); ASSERT_NOT_NULL(buf2.buffer); for (size_t i = 0; i < 6; ++i) { aws_s3_buffer_pool_release_ticket(buffer_pool, tickets[i]); } aws_s3_buffer_pool_release_ticket(buffer_pool, ticket1); aws_s3_buffer_pool_release_ticket(buffer_pool, ticket2); aws_s3_buffer_pool_destroy(buffer_pool); return 0; } AWS_TEST_CASE(test_s3_buffer_pool_limits, s_test_s3_buffer_pool_limits) static int s_test_s3_buffer_pool_trim(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_s3_buffer_pool *buffer_pool = aws_s3_buffer_pool_new(allocator, MB_TO_BYTES(8), GB_TO_BYTES(1)); struct aws_s3_buffer_pool_ticket *tickets[40]; for (size_t i = 0; i < 40; ++i) { tickets[i] = aws_s3_buffer_pool_reserve(buffer_pool, MB_TO_BYTES(8)); ASSERT_NOT_NULL(tickets[i]); struct aws_byte_buf buf = aws_s3_buffer_pool_acquire_buffer(buffer_pool, tickets[i]); ASSERT_NOT_NULL(buf.buffer); } struct aws_s3_buffer_pool_usage_stats stats_before = aws_s3_buffer_pool_get_usage(buffer_pool); for (size_t i = 0; i < 20; ++i) { aws_s3_buffer_pool_release_ticket(buffer_pool, tickets[i]); } aws_s3_buffer_pool_trim(buffer_pool); struct aws_s3_buffer_pool_usage_stats stats_after = aws_s3_buffer_pool_get_usage(buffer_pool); ASSERT_TRUE(stats_before.primary_num_blocks > stats_after.primary_num_blocks); for (size_t i = 20; i < 40; ++i) { aws_s3_buffer_pool_release_ticket(buffer_pool, tickets[i]); } aws_s3_buffer_pool_destroy(buffer_pool); return 0; }; AWS_TEST_CASE(test_s3_buffer_pool_trim, s_test_s3_buffer_pool_trim) static int s_test_s3_buffer_pool_reservation_hold(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_s3_buffer_pool *buffer_pool = aws_s3_buffer_pool_new(allocator, MB_TO_BYTES(8), GB_TO_BYTES(1)); struct aws_s3_buffer_pool_ticket *tickets[112]; for (size_t i = 0; i < 112; ++i) { tickets[i] = aws_s3_buffer_pool_reserve(buffer_pool, MB_TO_BYTES(8)); ASSERT_NOT_NULL(tickets[i]); struct aws_byte_buf buf = aws_s3_buffer_pool_acquire_buffer(buffer_pool, tickets[i]); ASSERT_NOT_NULL(buf.buffer); } ASSERT_NULL(aws_s3_buffer_pool_reserve(buffer_pool, MB_TO_BYTES(8))); ASSERT_TRUE(aws_s3_buffer_pool_has_reservation_hold(buffer_pool)); for (size_t i = 0; i < 112; ++i) { aws_s3_buffer_pool_release_ticket(buffer_pool, tickets[i]); } ASSERT_NULL(aws_s3_buffer_pool_reserve(buffer_pool, MB_TO_BYTES(8))); aws_s3_buffer_pool_remove_reservation_hold(buffer_pool); struct aws_s3_buffer_pool_ticket *ticket = aws_s3_buffer_pool_reserve(buffer_pool, MB_TO_BYTES(8)); ASSERT_NOT_NULL(ticket); aws_s3_buffer_pool_release_ticket(buffer_pool, ticket); aws_s3_buffer_pool_destroy(buffer_pool); return 0; }; AWS_TEST_CASE(test_s3_buffer_pool_reservation_hold, s_test_s3_buffer_pool_reservation_hold) static int s_test_s3_buffer_pool_too_small(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_s3_buffer_pool *buffer_pool = aws_s3_buffer_pool_new(allocator, MB_TO_BYTES(8), MB_TO_BYTES(512)); ASSERT_NULL(buffer_pool); ASSERT_INT_EQUALS(AWS_ERROR_S3_INVALID_MEMORY_LIMIT_CONFIG, aws_last_error()); return 0; }; AWS_TEST_CASE(test_s3_buffer_pool_too_small, s_test_s3_buffer_pool_too_small) aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/s3_cancel_tests.c000066400000000000000000000755461456575232400241600ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "aws/s3/private/s3_auto_ranged_get.h" #include "aws/s3/private/s3_auto_ranged_put.h" #include "aws/s3/private/s3_client_impl.h" #include "aws/s3/private/s3_meta_request_impl.h" #include "aws/s3/private/s3_util.h" #include "aws/s3/s3_client.h" #include "s3_tester.h" #include enum s3_update_cancel_type { S3_UPDATE_CANCEL_TYPE_NO_CANCEL, S3_UPDATE_CANCEL_TYPE_MPU_CREATE_NOT_SENT, S3_UPDATE_CANCEL_TYPE_MPU_CREATE_COMPLETED, S3_UPDATE_CANCEL_TYPE_MPU_ONE_PART_COMPLETED, S3_UPDATE_CANCEL_TYPE_MPU_ALL_PARTS_COMPLETED, S3_UPDATE_CANCEL_TYPE_MPU_ONGOING_HTTP_REQUESTS, S3_UPDATE_CANCEL_TYPE_NUM_MPU_CANCEL_TYPES, S3_UPDATE_CANCEL_TYPE_MPD_NOTHING_SENT, S3_UPDATE_CANCEL_TYPE_MPD_HEAD_OBJECT_SENT, S3_UPDATE_CANCEL_TYPE_MPD_HEAD_OBJECT_COMPLETED, S3_UPDATE_CANCEL_TYPE_MPD_GET_EMPTY_OBJECT_WITH_PART_NUMBER_1_SENT, S3_UPDATE_CANCEL_TYPE_MPD_GET_EMPTY_OBJECT_WITH_PART_NUMBER_1_COMPLETED, S3_UPDATE_CANCEL_TYPE_MPD_ONE_PART_SENT, S3_UPDATE_CANCEL_TYPE_MPD_ONE_PART_COMPLETED, S3_UPDATE_CANCEL_TYPE_MPD_TWO_PARTS_COMPLETED, S3_UPDATE_CANCEL_TYPE_MPD_PENDING_STREAMING, }; struct s3_cancel_test_user_data { enum s3_update_cancel_type type; bool pause; struct aws_s3_meta_request_resume_token *resume_token; bool abort_successful; }; static bool s_s3_meta_request_update_cancel_test( struct aws_s3_meta_request *meta_request, uint32_t flags, struct aws_s3_request **out_request) { AWS_PRECONDITION(meta_request); AWS_PRECONDITION(out_request); struct aws_s3_meta_request_test_results *results = meta_request->user_data; struct aws_s3_tester *tester = results->tester; struct s3_cancel_test_user_data *cancel_test_user_data = tester->user_data; struct aws_s3_auto_ranged_put *auto_ranged_put = meta_request->impl; struct aws_s3_auto_ranged_get *auto_ranged_get = meta_request->impl; bool call_cancel_or_pause = false; bool block_update = false; aws_s3_meta_request_lock_synced_data(meta_request); switch (cancel_test_user_data->type) { case S3_UPDATE_CANCEL_TYPE_NO_CANCEL: break; case S3_UPDATE_CANCEL_TYPE_MPU_CREATE_NOT_SENT: call_cancel_or_pause = auto_ranged_put->synced_data.create_multipart_upload_sent != 0; break; case S3_UPDATE_CANCEL_TYPE_MPU_CREATE_COMPLETED: call_cancel_or_pause = auto_ranged_put->synced_data.create_multipart_upload_completed != 0; break; case S3_UPDATE_CANCEL_TYPE_MPU_ONE_PART_COMPLETED: call_cancel_or_pause = auto_ranged_put->synced_data.num_parts_completed == 1; block_update = !call_cancel_or_pause && auto_ranged_put->synced_data.num_parts_started == 1; break; case S3_UPDATE_CANCEL_TYPE_MPU_ALL_PARTS_COMPLETED: call_cancel_or_pause = auto_ranged_put->synced_data.num_parts_completed == auto_ranged_put->total_num_parts_from_content_length; break; case S3_UPDATE_CANCEL_TYPE_MPU_ONGOING_HTTP_REQUESTS: call_cancel_or_pause = !aws_linked_list_empty(&meta_request->synced_data.cancellable_http_streams_list); break; case S3_UPDATE_CANCEL_TYPE_NUM_MPU_CANCEL_TYPES: AWS_ASSERT(false); break; case S3_UPDATE_CANCEL_TYPE_MPD_NOTHING_SENT: call_cancel_or_pause = auto_ranged_get->synced_data.num_parts_requested == 0; break; case S3_UPDATE_CANCEL_TYPE_MPD_HEAD_OBJECT_SENT: call_cancel_or_pause = auto_ranged_get->synced_data.head_object_sent != 0; break; case S3_UPDATE_CANCEL_TYPE_MPD_HEAD_OBJECT_COMPLETED: call_cancel_or_pause = auto_ranged_get->synced_data.head_object_completed != 0; break; case S3_UPDATE_CANCEL_TYPE_MPD_GET_EMPTY_OBJECT_WITH_PART_NUMBER_1_SENT: call_cancel_or_pause = auto_ranged_get->synced_data.object_range_known != 0 && auto_ranged_get->synced_data.num_parts_requested > 0; break; case S3_UPDATE_CANCEL_TYPE_MPD_GET_EMPTY_OBJECT_WITH_PART_NUMBER_1_COMPLETED: call_cancel_or_pause = auto_ranged_get->synced_data.num_parts_completed > 0; break; case S3_UPDATE_CANCEL_TYPE_MPD_ONE_PART_SENT: call_cancel_or_pause = auto_ranged_get->synced_data.num_parts_requested == 1; break; case S3_UPDATE_CANCEL_TYPE_MPD_ONE_PART_COMPLETED: call_cancel_or_pause = auto_ranged_get->synced_data.num_parts_completed == 1; /* Prevent other parts from being queued while we wait for this one to complete. */ block_update = !call_cancel_or_pause && auto_ranged_get->synced_data.num_parts_requested == 1; break; case S3_UPDATE_CANCEL_TYPE_MPD_TWO_PARTS_COMPLETED: call_cancel_or_pause = auto_ranged_get->synced_data.num_parts_completed == 2; /* Prevent other parts from being queued while we wait for these two to complete. */ block_update = !call_cancel_or_pause && auto_ranged_get->synced_data.num_parts_requested == 2; break; case S3_UPDATE_CANCEL_TYPE_MPD_PENDING_STREAMING: call_cancel_or_pause = aws_priority_queue_size(&meta_request->synced_data.pending_body_streaming_requests) > 0; break; } aws_s3_meta_request_unlock_synced_data(meta_request); if (call_cancel_or_pause) { if (cancel_test_user_data->pause) { aws_s3_meta_request_pause(meta_request, &cancel_test_user_data->resume_token); } else { aws_s3_meta_request_cancel(meta_request); } } if (block_update) { return true; } struct aws_s3_meta_request_vtable *original_meta_request_vtable = aws_s3_tester_get_meta_request_vtable_patch(tester, 0)->original_vtable; return original_meta_request_vtable->update(meta_request, flags, out_request); } static void s_s3_meta_request_finished_request_cancel_test( struct aws_s3_meta_request *meta_request, struct aws_s3_request *request, int error_code) { AWS_ASSERT(meta_request); AWS_ASSERT(request); struct aws_s3_meta_request_test_results *results = meta_request->user_data; struct aws_s3_tester *tester = results->tester; struct s3_cancel_test_user_data *cancel_test_user_data = tester->user_data; if (meta_request->type == AWS_S3_META_REQUEST_TYPE_PUT_OBJECT && request->request_tag == AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_ABORT_MULTIPART_UPLOAD) { cancel_test_user_data->abort_successful = error_code == AWS_ERROR_SUCCESS; } struct aws_s3_meta_request_vtable *original_meta_request_vtable = aws_s3_tester_get_meta_request_vtable_patch(tester, 0)->original_vtable; original_meta_request_vtable->finished_request(meta_request, request, error_code); } static struct aws_s3_meta_request *s_meta_request_factory_patch_update_cancel_test( struct aws_s3_client *client, const struct aws_s3_meta_request_options *options) { AWS_ASSERT(client != NULL); struct aws_s3_tester *tester = client->shutdown_callback_user_data; AWS_ASSERT(tester != NULL); struct aws_s3_client_vtable *original_client_vtable = aws_s3_tester_get_client_vtable_patch(tester, 0)->original_vtable; struct aws_s3_meta_request *meta_request = original_client_vtable->meta_request_factory(client, options); struct aws_s3_meta_request_vtable *patched_meta_request_vtable = aws_s3_tester_patch_meta_request_vtable(tester, meta_request, NULL); patched_meta_request_vtable->update = s_s3_meta_request_update_cancel_test; patched_meta_request_vtable->finished_request = s_s3_meta_request_finished_request_cancel_test; return meta_request; } static int s3_cancel_test_helper_ex( struct aws_allocator *allocator, enum s3_update_cancel_type cancel_type, bool async_input_stream, bool pause) { AWS_ASSERT(allocator); struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct s3_cancel_test_user_data test_user_data = { .type = cancel_type, .pause = pause, }; tester.user_data = &test_user_data; size_t client_part_size = 0; if (cancel_type > S3_UPDATE_CANCEL_TYPE_NUM_MPU_CANCEL_TYPES) { client_part_size = 16 * 1024; } struct aws_s3_client *client = NULL; struct aws_s3_tester_client_options client_options = { .part_size = client_part_size, }; ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); struct aws_s3_client_vtable *patched_client_vtable = aws_s3_tester_patch_client_vtable(&tester, client, NULL); patched_client_vtable->meta_request_factory = s_meta_request_factory_patch_update_cancel_test; if (cancel_type < S3_UPDATE_CANCEL_TYPE_NUM_MPU_CANCEL_TYPES) { struct aws_s3_meta_request_test_results meta_request_test_results; aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator); struct aws_s3_tester_meta_request_options options = { .allocator = allocator, .client = client, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_FAILURE, .put_options = { .ensure_multipart = true, .async_input_stream = async_input_stream, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &options, &meta_request_test_results)); int expected_error_code = pause ? AWS_ERROR_S3_PAUSED : AWS_ERROR_S3_CANCELED; ASSERT_INT_EQUALS(expected_error_code, meta_request_test_results.finished_error_code); if (cancel_type == S3_UPDATE_CANCEL_TYPE_MPU_ONGOING_HTTP_REQUESTS) { /* Check the metric and see we have at least a request completed with AWS_ERROR_S3_CANCELED */ /* The meta request completed, we can access the synced data now. */ struct aws_array_list *metrics_list = &meta_request_test_results.synced_data.metrics; bool cancelled_successfully = false; for (size_t i = 0; i < aws_array_list_length(metrics_list); ++i) { struct aws_s3_request_metrics *metrics = NULL; aws_array_list_get_at(metrics_list, (void **)&metrics, i); if (metrics->crt_info_metrics.error_code == expected_error_code) { cancelled_successfully = true; break; } } ASSERT_TRUE(cancelled_successfully); } aws_s3_meta_request_test_results_clean_up(&meta_request_test_results); if (cancel_type != S3_UPDATE_CANCEL_TYPE_MPU_CREATE_NOT_SENT && !pause) { ASSERT_TRUE(test_user_data.abort_successful); } if (pause) { /* Resume the paused request. */ ASSERT_NOT_NULL(test_user_data.resume_token); test_user_data.type = S3_UPDATE_CANCEL_TYPE_NO_CANCEL; struct aws_s3_tester_meta_request_options resume_options = { .allocator = allocator, .client = client, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_SUCCESS, .put_options = { .ensure_multipart = true, .async_input_stream = async_input_stream, .resume_token = test_user_data.resume_token, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &resume_options, NULL)); aws_s3_meta_request_resume_token_release(test_user_data.resume_token); } /* TODO: perform additional verification with list-multipart-uploads */ } else { struct aws_s3_meta_request_test_results meta_request_test_results; aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator); /* Specify a range without start-range to trigger HeadRequest */ const struct aws_byte_cursor range = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("bytes=-32767"); struct aws_s3_tester_meta_request_options options = { .allocator = allocator, .client = client, .meta_request_type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_FAILURE, .get_options = { /* Note 1: 10MB object with 16KB parts, so that tests have many requests in-flight. * We want to try and stress stuff like parts arriving out of order. */ .object_path = g_pre_existing_object_10MB, }, }; switch (cancel_type) { case S3_UPDATE_CANCEL_TYPE_MPD_HEAD_OBJECT_SENT: options.get_options.object_range = range; break; case S3_UPDATE_CANCEL_TYPE_MPD_HEAD_OBJECT_COMPLETED: options.get_options.object_range = range; break; case S3_UPDATE_CANCEL_TYPE_MPD_GET_EMPTY_OBJECT_WITH_PART_NUMBER_1_SENT: options.get_options.object_path = g_pre_existing_empty_object; break; case S3_UPDATE_CANCEL_TYPE_MPD_GET_EMPTY_OBJECT_WITH_PART_NUMBER_1_COMPLETED: options.get_options.object_path = g_pre_existing_empty_object; break; default: break; } ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &options, &meta_request_test_results)); ASSERT_TRUE(meta_request_test_results.finished_error_code == AWS_ERROR_S3_CANCELED); aws_s3_meta_request_test_results_clean_up(&meta_request_test_results); } aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; } static int s3_cancel_test_helper(struct aws_allocator *allocator, enum s3_update_cancel_type cancel_type) { return s3_cancel_test_helper_ex(allocator, cancel_type, false /*async_input_stream*/, false /*pause*/); } static int s3_cancel_test_helper_fc( struct aws_allocator *allocator, enum s3_update_cancel_type cancel_type, struct aws_byte_cursor object_path, enum aws_s3_checksum_algorithm checksum_algorithm) { AWS_ASSERT(allocator); struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct s3_cancel_test_user_data test_user_data = { .type = cancel_type, }; tester.user_data = &test_user_data; size_t client_part_size = 0; if (cancel_type > S3_UPDATE_CANCEL_TYPE_NUM_MPU_CANCEL_TYPES) { client_part_size = 16 * 1024; } struct aws_s3_client *client = NULL; struct aws_s3_tester_client_options client_options = { .part_size = client_part_size, }; ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); struct aws_s3_client_vtable *patched_client_vtable = aws_s3_tester_patch_client_vtable(&tester, client, NULL); patched_client_vtable->meta_request_factory = s_meta_request_factory_patch_update_cancel_test; if (cancel_type < S3_UPDATE_CANCEL_TYPE_NUM_MPU_CANCEL_TYPES) { struct aws_s3_meta_request_test_results meta_request_test_results; aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator); struct aws_s3_tester_meta_request_options options = { .allocator = allocator, .client = client, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_FAILURE, .checksum_algorithm = checksum_algorithm, .validate_get_response_checksum = false, .put_options = { .ensure_multipart = true, .object_path_override = object_path, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &options, &meta_request_test_results)); ASSERT_TRUE(meta_request_test_results.finished_error_code == AWS_ERROR_S3_CANCELED); aws_s3_meta_request_test_results_clean_up(&meta_request_test_results); if (cancel_type != S3_UPDATE_CANCEL_TYPE_MPU_CREATE_NOT_SENT) { ASSERT_TRUE(test_user_data.abort_successful); } /* TODO: perform additional verification with list-multipart-uploads */ } else { struct aws_s3_meta_request_test_results meta_request_test_results; aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator); // Range for the second 16k const struct aws_byte_cursor range = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("bytes=16384-32767"); struct aws_s3_tester_meta_request_options options = { .allocator = allocator, .client = client, .validate_get_response_checksum = true, .meta_request_type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_FAILURE, .get_options = { .object_path = object_path, }, }; switch (cancel_type) { case S3_UPDATE_CANCEL_TYPE_MPD_HEAD_OBJECT_SENT: options.get_options.object_range = range; break; case S3_UPDATE_CANCEL_TYPE_MPD_HEAD_OBJECT_COMPLETED: options.get_options.object_range = range; break; case S3_UPDATE_CANCEL_TYPE_MPD_GET_EMPTY_OBJECT_WITH_PART_NUMBER_1_SENT: options.get_options.object_path = g_pre_existing_empty_object; break; case S3_UPDATE_CANCEL_TYPE_MPD_GET_EMPTY_OBJECT_WITH_PART_NUMBER_1_COMPLETED: options.get_options.object_path = g_pre_existing_empty_object; break; default: break; } ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &options, &meta_request_test_results)); ASSERT_TRUE(meta_request_test_results.finished_error_code == AWS_ERROR_S3_CANCELED); aws_s3_meta_request_test_results_clean_up(&meta_request_test_results); } aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_s3_cancel_mpu_one_part_completed_fc, s_test_s3_cancel_mpu_one_part_completed_fc) static int s_test_s3_cancel_mpu_one_part_completed_fc(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_buf path_buf; AWS_ZERO_STRUCT(path_buf); ASSERT_SUCCESS(aws_s3_tester_upload_file_path_init( allocator, &path_buf, aws_byte_cursor_from_c_str("/prefix/cancel/upload_one_part_complete_fc.txt"))); ASSERT_SUCCESS(s3_cancel_test_helper_fc( allocator, S3_UPDATE_CANCEL_TYPE_MPU_ONE_PART_COMPLETED, aws_byte_cursor_from_buf(&path_buf), AWS_SCA_CRC32)); aws_byte_buf_clean_up(&path_buf); return 0; } AWS_TEST_CASE(test_s3_cancel_mpd_one_part_completed_fc, s_test_s3_cancel_mpd_one_part_completed_fc) static int s_test_s3_cancel_mpd_one_part_completed_fc(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(s3_cancel_test_helper_fc( allocator, S3_UPDATE_CANCEL_TYPE_MPD_ONE_PART_COMPLETED, g_pre_existing_object_10MB, AWS_SCA_CRC32)); return 0; } AWS_TEST_CASE(test_s3_cancel_mpu_create_not_sent, s_test_s3_cancel_mpu_create_not_sent) static int s_test_s3_cancel_mpu_create_not_sent(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(s3_cancel_test_helper(allocator, S3_UPDATE_CANCEL_TYPE_MPU_CREATE_NOT_SENT)); return 0; } AWS_TEST_CASE(test_s3_cancel_mpu_create_completed, s_test_s3_cancel_mpu_create_completed) static int s_test_s3_cancel_mpu_create_completed(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(s3_cancel_test_helper(allocator, S3_UPDATE_CANCEL_TYPE_MPU_CREATE_COMPLETED)); return 0; } AWS_TEST_CASE(test_s3_cancel_mpu_one_part_completed, s_test_s3_cancel_mpu_one_part_completed) static int s_test_s3_cancel_mpu_one_part_completed(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(s3_cancel_test_helper(allocator, S3_UPDATE_CANCEL_TYPE_MPU_ONE_PART_COMPLETED)); return 0; } AWS_TEST_CASE(test_s3_cancel_mpu_one_part_completed_async, s_test_s3_cancel_mpu_one_part_completed_async) static int s_test_s3_cancel_mpu_one_part_completed_async(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(s3_cancel_test_helper_ex( allocator, S3_UPDATE_CANCEL_TYPE_MPU_ONE_PART_COMPLETED, true /*async_input_stream*/, false /*pause*/)); return 0; } AWS_TEST_CASE(test_s3_cancel_mpu_all_parts_completed, s_test_s3_cancel_mpu_all_parts_completed) static int s_test_s3_cancel_mpu_all_parts_completed(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(s3_cancel_test_helper(allocator, S3_UPDATE_CANCEL_TYPE_MPU_ALL_PARTS_COMPLETED)); return 0; } AWS_TEST_CASE(test_s3_cancel_mpu_cancellable_requests, s_test_s3_cancel_mpu_cancellable_requests) static int s_test_s3_cancel_mpu_cancellable_requests(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(s3_cancel_test_helper(allocator, S3_UPDATE_CANCEL_TYPE_MPU_ONGOING_HTTP_REQUESTS)); return 0; } AWS_TEST_CASE(test_s3_pause_mpu_cancellable_requests, s_test_s3_pause_mpu_cancellable_requests) static int s_test_s3_pause_mpu_cancellable_requests(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(s3_cancel_test_helper_ex( allocator, S3_UPDATE_CANCEL_TYPE_MPU_ONGOING_HTTP_REQUESTS, false /*async_input_stream*/, true /*pause*/)); return 0; } AWS_TEST_CASE(test_s3_cancel_mpd_nothing_sent, s_test_s3_cancel_mpd_nothing_sent) static int s_test_s3_cancel_mpd_nothing_sent(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(s3_cancel_test_helper(allocator, S3_UPDATE_CANCEL_TYPE_MPD_NOTHING_SENT)); return 0; } AWS_TEST_CASE(test_s3_cancel_mpd_one_part_sent, s_test_s3_cancel_mpd_one_part_sent) static int s_test_s3_cancel_mpd_one_part_sent(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(s3_cancel_test_helper(allocator, S3_UPDATE_CANCEL_TYPE_MPD_ONE_PART_SENT)); return 0; } AWS_TEST_CASE(test_s3_cancel_mpd_one_part_completed, s_test_s3_cancel_mpd_one_part_completed) static int s_test_s3_cancel_mpd_one_part_completed(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(s3_cancel_test_helper(allocator, S3_UPDATE_CANCEL_TYPE_MPD_ONE_PART_COMPLETED)); return 0; } AWS_TEST_CASE(test_s3_cancel_mpd_two_parts_completed, s_test_s3_cancel_mpd_two_parts_completed) static int s_test_s3_cancel_mpd_two_parts_completed(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(s3_cancel_test_helper(allocator, S3_UPDATE_CANCEL_TYPE_MPD_TWO_PARTS_COMPLETED)); return 0; } AWS_TEST_CASE(test_s3_cancel_mpd_head_object_sent, s_test_s3_cancel_mpd_head_object_sent) static int s_test_s3_cancel_mpd_head_object_sent(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(s3_cancel_test_helper(allocator, S3_UPDATE_CANCEL_TYPE_MPD_HEAD_OBJECT_SENT)); return 0; } AWS_TEST_CASE(test_s3_cancel_mpd_head_object_completed, s_test_s3_cancel_mpd_head_object_completed) static int s_test_s3_cancel_mpd_head_object_completed(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(s3_cancel_test_helper(allocator, S3_UPDATE_CANCEL_TYPE_MPD_HEAD_OBJECT_COMPLETED)); return 0; } AWS_TEST_CASE( test_s3_cancel_mpd_empty_object_get_with_part_number_1_sent, s_test_s3_cancel_mpd_empty_object_get_with_part_number_1_sent) static int s_test_s3_cancel_mpd_empty_object_get_with_part_number_1_sent(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS( s3_cancel_test_helper(allocator, S3_UPDATE_CANCEL_TYPE_MPD_GET_EMPTY_OBJECT_WITH_PART_NUMBER_1_SENT)); return 0; } AWS_TEST_CASE( test_s3_cancel_mpd_empty_object_get_with_part_number_1_completed, s_test_s3_cancel_mpd_empty_object_get_with_part_number_1_completed) static int s_test_s3_cancel_mpd_empty_object_get_with_part_number_1_completed( struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS( s3_cancel_test_helper(allocator, S3_UPDATE_CANCEL_TYPE_MPD_GET_EMPTY_OBJECT_WITH_PART_NUMBER_1_COMPLETED)); return 0; } AWS_TEST_CASE(test_s3_cancel_mpd_pending_streaming, s_test_s3_cancel_mpd_pending_streaming) static int s_test_s3_cancel_mpd_pending_streaming(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(s3_cancel_test_helper(allocator, S3_UPDATE_CANCEL_TYPE_MPD_PENDING_STREAMING)); return 0; } struct test_s3_cancel_prepare_user_data { uint32_t request_prepare_counters[AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_MAX]; }; /* Data for async cancel-prepare-meta-request job */ struct test_s3_cancel_prepare_meta_request_prepare_request_job { struct aws_allocator *allocator; struct aws_s3_request *request; struct aws_future_void *original_future; /* original future that we're intercepting and patching */ struct aws_future_void *patched_future; /* patched future to set when this job completes */ }; static void s_test_s3_cancel_prepare_meta_request_prepare_request_on_original_done(void *user_data); static struct aws_future_void *s_test_s3_cancel_prepare_meta_request_prepare_request(struct aws_s3_request *request) { struct aws_s3_meta_request *meta_request = request->meta_request; AWS_ASSERT(meta_request != NULL); struct aws_s3_meta_request_test_results *results = meta_request->user_data; AWS_ASSERT(results != NULL); struct aws_s3_tester *tester = results->tester; AWS_ASSERT(tester != NULL); struct aws_future_void *patched_future = aws_future_void_new(meta_request->allocator); struct test_s3_cancel_prepare_meta_request_prepare_request_job *patched_prep = aws_mem_calloc( meta_request->allocator, 1, sizeof(struct test_s3_cancel_prepare_meta_request_prepare_request_job)); patched_prep->allocator = meta_request->allocator; patched_prep->request = request; patched_prep->patched_future = aws_future_void_acquire(patched_future); struct aws_s3_meta_request_vtable *original_meta_request_vtable = aws_s3_tester_get_meta_request_vtable_patch(tester, 0)->original_vtable; patched_prep->original_future = original_meta_request_vtable->prepare_request(request); aws_future_void_register_callback( patched_prep->original_future, s_test_s3_cancel_prepare_meta_request_prepare_request_on_original_done, patched_prep); return patched_future; } static void s_test_s3_cancel_prepare_meta_request_prepare_request_on_original_done(void *user_data) { struct test_s3_cancel_prepare_meta_request_prepare_request_job *patched_prep = user_data; struct aws_s3_request *request = patched_prep->request; struct aws_s3_meta_request *meta_request = request->meta_request; struct aws_s3_meta_request_test_results *results = meta_request->user_data; struct aws_s3_tester *tester = results->tester; struct test_s3_cancel_prepare_user_data *test_user_data = tester->user_data; int error_code = aws_future_void_get_error(patched_prep->original_future); if (error_code != AWS_ERROR_SUCCESS) { aws_future_void_set_error(patched_prep->patched_future, error_code); goto finish; } ++test_user_data->request_prepare_counters[request->request_tag]; /* Cancel after the first part is prepared, preventing any additional parts from being prepared. */ if (request->request_tag == AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_PART && test_user_data->request_prepare_counters[AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_PART] == 1) { aws_s3_meta_request_cancel(meta_request); } aws_future_void_set_result(patched_prep->patched_future); finish: aws_future_void_release(patched_prep->original_future); aws_future_void_release(patched_prep->patched_future); aws_mem_release(patched_prep->allocator, patched_prep); } static struct aws_s3_meta_request *s_test_s3_cancel_prepare_meta_request_factory( struct aws_s3_client *client, const struct aws_s3_meta_request_options *options) { AWS_ASSERT(client != NULL); struct aws_s3_tester *tester = client->shutdown_callback_user_data; AWS_ASSERT(tester != NULL); struct aws_s3_client_vtable *original_client_vtable = aws_s3_tester_get_client_vtable_patch(tester, 0)->original_vtable; struct aws_s3_meta_request *meta_request = original_client_vtable->meta_request_factory(client, options); struct aws_s3_meta_request_vtable *patched_meta_request_vtable = aws_s3_tester_patch_meta_request_vtable(tester, meta_request, NULL); patched_meta_request_vtable->prepare_request = s_test_s3_cancel_prepare_meta_request_prepare_request; return meta_request; } AWS_TEST_CASE(test_s3_cancel_prepare, s_test_s3_cancel_prepare) static int s_test_s3_cancel_prepare(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct test_s3_cancel_prepare_user_data test_user_data; AWS_ZERO_STRUCT(test_user_data); tester.user_data = &test_user_data; struct aws_s3_client *client = NULL; struct aws_s3_tester_client_options client_options; AWS_ZERO_STRUCT(client_options); ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); struct aws_s3_client_vtable *patched_client_vtable = aws_s3_tester_patch_client_vtable(&tester, client, NULL); patched_client_vtable->meta_request_factory = s_test_s3_cancel_prepare_meta_request_factory; { struct aws_s3_tester_meta_request_options options = { .allocator = allocator, .client = client, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_FAILURE, .put_options = { .ensure_multipart = true, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &options, NULL)); } ASSERT_TRUE( test_user_data.request_prepare_counters[AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_CREATE_MULTIPART_UPLOAD] == 1); ASSERT_TRUE(test_user_data.request_prepare_counters[AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_PART] == 1); ASSERT_TRUE( test_user_data.request_prepare_counters[AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_ABORT_MULTIPART_UPLOAD] == 1); ASSERT_TRUE( test_user_data.request_prepare_counters[AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_COMPLETE_MULTIPART_UPLOAD] == 0); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/s3_checksum_stream_test.c000066400000000000000000000265521456575232400257160ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "aws/s3/private/s3_checksums.h" #include "s3_tester.h" #include #include #include #include static int compare_checksum_stream(struct aws_allocator *allocator, struct aws_byte_cursor *input, size_t buffer_size) { struct aws_byte_buf compute_checksum_output; struct aws_byte_buf compute_encoded_checksum_output; struct aws_byte_buf stream_checksum_output; struct aws_byte_buf read_buf; size_t encoded_len = 0; aws_byte_buf_init(&read_buf, allocator, buffer_size); for (int algorithm = AWS_SCA_INIT; algorithm <= AWS_SCA_END; algorithm++) { aws_base64_compute_encoded_len(aws_get_digest_size_from_algorithm(algorithm), &encoded_len); aws_byte_buf_init(&compute_checksum_output, allocator, aws_get_digest_size_from_algorithm(algorithm)); aws_byte_buf_init(&stream_checksum_output, allocator, encoded_len); aws_byte_buf_init(&compute_encoded_checksum_output, allocator, encoded_len); aws_checksum_compute(allocator, algorithm, input, &compute_checksum_output, 0); struct aws_byte_cursor checksum_result_cursor = aws_byte_cursor_from_buf(&compute_checksum_output); aws_base64_encode(&checksum_result_cursor, &compute_encoded_checksum_output); struct aws_input_stream *cursor_stream = aws_input_stream_new_from_cursor(allocator, input); struct aws_input_stream *stream = aws_checksum_stream_new(allocator, cursor_stream, algorithm, &stream_checksum_output); aws_input_stream_release(cursor_stream); struct aws_stream_status status; AWS_ZERO_STRUCT(status); while (!status.is_end_of_stream) { ASSERT_SUCCESS(aws_input_stream_read(stream, &read_buf)); read_buf.len = 0; ASSERT_TRUE(aws_input_stream_get_status(stream, &status) == 0); } aws_input_stream_release(stream); ASSERT_TRUE(aws_byte_buf_eq(&compute_encoded_checksum_output, &stream_checksum_output)); aws_byte_buf_clean_up(&compute_checksum_output); aws_byte_buf_clean_up(&stream_checksum_output); aws_byte_buf_clean_up(&compute_encoded_checksum_output); } aws_byte_buf_clean_up(&read_buf); return AWS_OP_SUCCESS; } AWS_STATIC_STRING_FROM_LITERAL(s_0pre_chunk, "0\r\n"); AWS_STATIC_STRING_FROM_LITERAL(s_3pre_chunk, "3\r\n"); AWS_STATIC_STRING_FROM_LITERAL(s_56pre_chunk, "38\r\n"); AWS_STATIC_STRING_FROM_LITERAL(s_112pre_chunk, "70\r\n"); AWS_STATIC_STRING_FROM_LITERAL(s_11pre_chunk, "B\r\n"); AWS_STATIC_STRING_FROM_LITERAL(s_final_chunk, "\r\n0\r\n"); AWS_STATIC_STRING_FROM_LITERAL(s_colon, ":"); AWS_STATIC_STRING_FROM_LITERAL(s_post_trailer, "\r\n\r\n"); static int s_compute_chunk_stream( struct aws_allocator *allocator, const struct aws_string *pre_chunk, struct aws_byte_cursor *input, struct aws_byte_buf *output, enum aws_s3_checksum_algorithm algorithm, struct aws_byte_buf *encoded_checksum_output) { struct aws_byte_cursor pre_chunk_cursor = aws_byte_cursor_from_string(pre_chunk); struct aws_byte_cursor final_chunk = aws_byte_cursor_from_string(s_final_chunk); const struct aws_byte_cursor *checksum_header_name = aws_get_http_header_name_from_algorithm(algorithm); struct aws_byte_cursor colon = aws_byte_cursor_from_string(s_colon); struct aws_byte_cursor post_trailer = aws_byte_cursor_from_string(s_post_trailer); struct aws_byte_buf checksum_result; aws_byte_buf_init(&checksum_result, allocator, aws_get_digest_size_from_algorithm(algorithm)); if (aws_byte_buf_append(output, &pre_chunk_cursor)) { return AWS_OP_ERR; } if (aws_byte_buf_append(output, input)) { return AWS_OP_ERR; } if (input->len > 0) { if (aws_byte_buf_append(output, &final_chunk)) { return AWS_OP_ERR; } } if (aws_byte_buf_append(output, checksum_header_name)) { return AWS_OP_ERR; } if (aws_byte_buf_append(output, &colon)) { return AWS_OP_ERR; } if (aws_checksum_compute(allocator, algorithm, input, &checksum_result, 0)) { return AWS_OP_ERR; } struct aws_byte_cursor checksum_result_cursor = aws_byte_cursor_from_buf(&checksum_result); if (aws_base64_encode(&checksum_result_cursor, encoded_checksum_output)) { return AWS_OP_ERR; } if (aws_base64_encode(&checksum_result_cursor, output)) { return AWS_OP_ERR; } if (aws_byte_buf_append(output, &post_trailer)) { return AWS_OP_ERR; } aws_byte_buf_clean_up(&checksum_result); return AWS_OP_SUCCESS; } static int s_stream_chunk( struct aws_allocator *allocator, struct aws_byte_cursor *input, struct aws_byte_buf *read_buf, struct aws_byte_buf *output, enum aws_s3_checksum_algorithm algorithm, struct aws_byte_buf *checksum_result) { struct aws_input_stream *cursor_stream = aws_input_stream_new_from_cursor(allocator, input); struct aws_input_stream *stream = aws_chunk_stream_new(allocator, cursor_stream, algorithm, checksum_result); aws_input_stream_release(cursor_stream); struct aws_stream_status status; AWS_ZERO_STRUCT(status); while (!status.is_end_of_stream) { ASSERT_SUCCESS(aws_input_stream_read(stream, read_buf)); struct aws_byte_cursor read_cursor = aws_byte_cursor_from_buf(read_buf); aws_byte_buf_append(output, &read_cursor); read_buf->len = 0; ASSERT_TRUE(aws_input_stream_get_status(stream, &status) == 0); } aws_input_stream_release(stream); return AWS_OP_SUCCESS; } static int compare_chunk_stream( struct aws_allocator *allocator, const struct aws_string *pre_chunk, struct aws_byte_cursor *input, size_t buffer_size) { struct aws_byte_buf compute_chunk_output; struct aws_byte_buf stream_chunk_output; struct aws_byte_buf stream_chunk_output1; struct aws_byte_buf streamed_encoded_checksum; struct aws_byte_buf computed_encoded_checksum; size_t len_no_checksum = pre_chunk->len + input->len + s_final_chunk->len + s_post_trailer->len + s_colon->len; size_t encoded_len = 0; struct aws_byte_buf read_buf; aws_byte_buf_init(&read_buf, allocator, buffer_size); for (int algorithm = AWS_SCA_INIT; algorithm <= AWS_SCA_END; algorithm++) { aws_base64_compute_encoded_len(aws_get_digest_size_from_algorithm(algorithm), &encoded_len); size_t total_len = len_no_checksum + encoded_len + aws_get_http_header_name_from_algorithm(algorithm)->len; aws_byte_buf_init(&computed_encoded_checksum, allocator, encoded_len); aws_byte_buf_init(&compute_chunk_output, allocator, total_len); aws_byte_buf_init(&stream_chunk_output, allocator, total_len); aws_byte_buf_init(&stream_chunk_output1, allocator, total_len); ASSERT_SUCCESS(s_compute_chunk_stream( allocator, pre_chunk, input, &compute_chunk_output, algorithm, &computed_encoded_checksum)); ASSERT_SUCCESS(s_stream_chunk(allocator, input, &read_buf, &stream_chunk_output, algorithm, NULL)); ASSERT_SUCCESS( s_stream_chunk(allocator, input, &read_buf, &stream_chunk_output1, algorithm, &streamed_encoded_checksum)); ASSERT_TRUE(aws_byte_buf_eq(&compute_chunk_output, &stream_chunk_output)); ASSERT_TRUE(aws_byte_buf_eq(&compute_chunk_output, &stream_chunk_output1)); ASSERT_TRUE(aws_byte_buf_eq(&computed_encoded_checksum, &streamed_encoded_checksum)); aws_byte_buf_clean_up(&compute_chunk_output); aws_byte_buf_clean_up(&stream_chunk_output); aws_byte_buf_clean_up(&stream_chunk_output1); aws_byte_buf_clean_up(&streamed_encoded_checksum); aws_byte_buf_clean_up(&computed_encoded_checksum); } aws_byte_buf_clean_up(&read_buf); return AWS_OP_SUCCESS; } static int s_verify_checksum_stream_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_s3_library_init(allocator); struct aws_byte_cursor input0 = aws_byte_cursor_from_c_str(""); struct aws_byte_cursor input1 = aws_byte_cursor_from_c_str("abc"); struct aws_byte_cursor input2 = aws_byte_cursor_from_c_str("abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"); struct aws_byte_cursor input3 = aws_byte_cursor_from_c_str("abcdefghbcdefghicdefghijdefghijkefghijklfghij" "klmghijklmnhijklmnoijklmnopjklmnopqklm" "nopqrlmnopqrsmnopqrstnopqrstu"); struct aws_byte_cursor input4 = aws_byte_cursor_from_c_str("Hello world"); for (size_t buffer_size = 1; buffer_size < input0.len + 3; buffer_size++) { ASSERT_SUCCESS(compare_checksum_stream(allocator, &input0, buffer_size)); } for (size_t buffer_size = 1; buffer_size < input1.len + 3; buffer_size++) { ASSERT_SUCCESS(compare_checksum_stream(allocator, &input1, buffer_size)); } for (size_t buffer_size = 1; buffer_size < input2.len + 3; buffer_size++) { ASSERT_SUCCESS(compare_checksum_stream(allocator, &input2, buffer_size)); } for (size_t buffer_size = 1; buffer_size < input3.len + 3; buffer_size++) { ASSERT_SUCCESS(compare_checksum_stream(allocator, &input3, buffer_size)); } for (size_t buffer_size = 1; buffer_size < input4.len + 3; buffer_size++) { ASSERT_SUCCESS(compare_checksum_stream(allocator, &input4, buffer_size)); } aws_s3_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(verify_checksum_stream, s_verify_checksum_stream_fn) static int s_verify_chunk_stream_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_s3_library_init(allocator); struct aws_byte_cursor input0 = aws_byte_cursor_from_c_str(""); struct aws_byte_cursor input1 = aws_byte_cursor_from_c_str("abc"); struct aws_byte_cursor input2 = aws_byte_cursor_from_c_str("abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"); struct aws_byte_cursor input3 = aws_byte_cursor_from_c_str("abcdefghbcdefghicdefghijdefghijkefghijklfghij" "klmghijklmnhijklmnoijklmnopjklmnopqklm" "nopqrlmnopqrsmnopqrstnopqrstu"); struct aws_byte_cursor input4 = aws_byte_cursor_from_c_str("Hello world"); for (size_t buffer_size = 1; buffer_size < input0.len + 70; buffer_size++) { ASSERT_SUCCESS(compare_chunk_stream(allocator, s_0pre_chunk, &input0, buffer_size)); } for (size_t buffer_size = 1; buffer_size < input1.len + 70; buffer_size++) { ASSERT_SUCCESS(compare_chunk_stream(allocator, s_3pre_chunk, &input1, buffer_size)); } for (size_t buffer_size = 1; buffer_size < input2.len + 70; buffer_size++) { ASSERT_SUCCESS(compare_chunk_stream(allocator, s_56pre_chunk, &input2, buffer_size)); } for (size_t buffer_size = 1; buffer_size < input3.len + 70; buffer_size++) { ASSERT_SUCCESS(compare_chunk_stream(allocator, s_112pre_chunk, &input3, buffer_size)); } for (size_t buffer_size = 1; buffer_size < input4.len + 70; buffer_size++) { ASSERT_SUCCESS(compare_chunk_stream(allocator, s_11pre_chunk, &input4, buffer_size)); } aws_s3_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(verify_chunk_stream, s_verify_chunk_stream_fn) aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/s3_checksums_crc32_tests.c000066400000000000000000000214721456575232400257010ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "aws/s3/private/s3_checksums.h" #include #include #include #define AWS_CRC32_LEN 4 /* * these are the NIST test vectors, as compiled here: * https://www.di-mgt.com.au/sha_testvectors.html */ static int s_crc32_nist_test_case_1_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor input = aws_byte_cursor_from_c_str("abc"); uint8_t expected[] = {0x35, 0x24, 0x41, 0xc2}; struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); return s_verify_checksum_test_case(allocator, &input, &expected_buf, aws_checksum_new, AWS_SCA_CRC32); } AWS_TEST_CASE(crc32_nist_test_case_1, s_crc32_nist_test_case_1_fn) static int s_crc32_nist_test_case_2_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor input = aws_byte_cursor_from_c_str(""); uint8_t expected[] = {0x00, 0x00, 0x00, 0x00}; struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); return s_verify_checksum_test_case(allocator, &input, &expected_buf, aws_checksum_new, AWS_SCA_CRC32); } AWS_TEST_CASE(crc32_nist_test_case_2, s_crc32_nist_test_case_2_fn) static int s_crc32_nist_test_case_3_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor input = aws_byte_cursor_from_c_str("abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"); uint8_t expected[] = {0x17, 0x1a, 0x3f, 0x5f}; struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); return s_verify_checksum_test_case(allocator, &input, &expected_buf, aws_checksum_new, AWS_SCA_CRC32); } AWS_TEST_CASE(crc32_nist_test_case_3, s_crc32_nist_test_case_3_fn) static int s_crc32_nist_test_case_4_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor input = aws_byte_cursor_from_c_str("abcdefghbcdefghicdefghijdefghijkefghijklfghij" "klmghijklmnhijklmnoijklmnopjklmnopqklm" "nopqrlmnopqrsmnopqrstnopqrstu"); uint8_t expected[] = {0x19, 0x1f, 0x33, 0x49}; struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); return s_verify_checksum_test_case(allocator, &input, &expected_buf, aws_checksum_new, AWS_SCA_CRC32); } AWS_TEST_CASE(crc32_nist_test_case_4, s_crc32_nist_test_case_4_fn) static int s_crc32_nist_test_case_5_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_s3_library_init(allocator); struct aws_s3_checksum *checksum = aws_checksum_new(allocator, AWS_SCA_CRC32); ASSERT_NOT_NULL(checksum); struct aws_byte_cursor input = aws_byte_cursor_from_c_str("a"); for (size_t i = 0; i < 1000000; ++i) { ASSERT_SUCCESS(aws_checksum_update(checksum, &input)); } uint8_t output[AWS_CRC32_LEN] = {0}; struct aws_byte_buf output_buf = aws_byte_buf_from_array(output, sizeof(output)); output_buf.len = 0; ASSERT_SUCCESS(aws_checksum_finalize(checksum, &output_buf, 0)); uint8_t expected[] = {0xdc, 0x25, 0xbf, 0xbc}; struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); ASSERT_BIN_ARRAYS_EQUALS(expected_buf.ptr, expected_buf.len, output_buf.buffer, output_buf.len); aws_checksum_destroy(checksum); aws_s3_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(crc32_nist_test_case_5, s_crc32_nist_test_case_5_fn) static int s_crc32_nist_test_case_5_truncated_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_s3_library_init(allocator); struct aws_s3_checksum *checksum = aws_checksum_new(allocator, AWS_SCA_CRC32); ASSERT_NOT_NULL(checksum); struct aws_byte_cursor input = aws_byte_cursor_from_c_str("a"); for (size_t i = 0; i < 1000000; ++i) { ASSERT_SUCCESS(aws_checksum_update(checksum, &input)); } uint8_t expected[] = {0xdc, 0x25}; struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); uint8_t output[AWS_CRC32_LEN] = {0}; struct aws_byte_buf output_buf = aws_byte_buf_from_array(output, expected_buf.len); output_buf.len = 0; ASSERT_SUCCESS(aws_checksum_finalize(checksum, &output_buf, 2)); ASSERT_BIN_ARRAYS_EQUALS(expected_buf.ptr, expected_buf.len, output_buf.buffer, output_buf.len); aws_checksum_destroy(checksum); aws_s3_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(crc32_nist_test_case_5_truncated, s_crc32_nist_test_case_5_truncated_fn) static int s_crc32_nist_test_case_6_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_s3_library_init(allocator); struct aws_s3_checksum *checksum = aws_checksum_new(allocator, AWS_SCA_CRC32); ASSERT_NOT_NULL(checksum); struct aws_byte_cursor input = aws_byte_cursor_from_c_str("abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmno"); for (size_t i = 0; i < 16777216; ++i) { ASSERT_SUCCESS(aws_checksum_update(checksum, &input)); } uint8_t output[AWS_CRC32_LEN] = {0}; struct aws_byte_buf output_buf = aws_byte_buf_from_array(output, sizeof(output)); output_buf.len = 0; ASSERT_SUCCESS(aws_checksum_finalize(checksum, &output_buf, 0)); uint8_t expected[] = {0x55, 0x1c, 0xbc, 0x00}; struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); ASSERT_BIN_ARRAYS_EQUALS(expected_buf.ptr, expected_buf.len, output_buf.buffer, output_buf.len); aws_checksum_destroy(checksum); aws_s3_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(crc32_nist_test_case_6, s_crc32_nist_test_case_6_fn) static int s_crc32_test_invalid_buffer_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_s3_library_init(allocator); struct aws_byte_cursor input = aws_byte_cursor_from_c_str("abcdefghbcdefghicdefghijdefghijkefghijklfghij" "klmghijklmnhijklmnoijklmnopjklmnopqklm" "nopqrlmnopqrsmnopqrstnopqrstu"); uint8_t output[AWS_CRC32_LEN] = {0}; struct aws_byte_buf output_buf = aws_byte_buf_from_array(output, sizeof(output)); output_buf.len = 1; ASSERT_ERROR(AWS_ERROR_SHORT_BUFFER, aws_checksum_compute(allocator, AWS_SCA_CRC32, &input, &output_buf, 0)); aws_s3_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(crc32_test_invalid_buffer, s_crc32_test_invalid_buffer_fn) static int s_crc32_test_oneshot_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_s3_library_init(allocator); struct aws_byte_cursor input = aws_byte_cursor_from_c_str("abcdefghbcdefghicdefghijdefghijkefghijklfghij" "klmghijklmnhijklmnoijklmnopjklmnopqklm" "nopqrlmnopqrsmnopqrstnopqrstu"); uint8_t expected[] = {0x19, 0x1f, 0x33, 0x49}; uint8_t output[AWS_CRC32_LEN] = {0}; struct aws_byte_buf output_buf = aws_byte_buf_from_array(output, sizeof(output)); output_buf.len = 0; ASSERT_SUCCESS(aws_checksum_compute(allocator, AWS_SCA_CRC32, &input, &output_buf, 0)); ASSERT_BIN_ARRAYS_EQUALS(expected, sizeof(expected), output_buf.buffer, output_buf.len); aws_s3_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(crc32_test_oneshot, s_crc32_test_oneshot_fn) static int s_crc32_test_invalid_state_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_s3_library_init(allocator); struct aws_byte_cursor input = aws_byte_cursor_from_c_str("abcdefghbcdefghicdefghijdefghijkefghijklfghij" "klmghijklmnhijklmnoijklmnopjklmnopqklm" "nopqrlmnopqrsmnopqrstnopqrstu"); struct aws_s3_checksum *checksum = aws_checksum_new(allocator, AWS_SCA_CRC32); ASSERT_NOT_NULL(checksum); uint8_t output[AWS_CRC32_LEN] = {0}; struct aws_byte_buf output_buf = aws_byte_buf_from_array(output, sizeof(output)); output_buf.len = 0; ASSERT_SUCCESS(aws_checksum_update(checksum, &input)); ASSERT_SUCCESS(aws_checksum_finalize(checksum, &output_buf, 0)); ASSERT_ERROR(AWS_ERROR_INVALID_STATE, aws_checksum_update(checksum, &input)); ASSERT_ERROR(AWS_ERROR_INVALID_STATE, aws_checksum_finalize(checksum, &output_buf, 0)); aws_checksum_destroy(checksum); aws_s3_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(crc32_test_invalid_state, s_crc32_test_invalid_state_fn) aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/s3_checksums_crc32c_tests.c000066400000000000000000000215511456575232400260420ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "aws/s3/private/s3_checksums.h" #include #include #include #define AWS_CRC32C_LEN 4 /* * these are the NIST test vectors, as compiled here: * https://www.di-mgt.com.au/sha_testvectors.html */ static int s_crc32c_nist_test_case_1_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor input = aws_byte_cursor_from_c_str("abc"); uint8_t expected[] = {0x36, 0x4b, 0x3f, 0xb7}; struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); return s_verify_checksum_test_case(allocator, &input, &expected_buf, aws_checksum_new, AWS_SCA_CRC32C); } AWS_TEST_CASE(crc32c_nist_test_case_1, s_crc32c_nist_test_case_1_fn) static int s_crc32c_nist_test_case_2_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor input = aws_byte_cursor_from_c_str(""); uint8_t expected[] = {0x00, 0x00, 0x00, 0x00}; struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); return s_verify_checksum_test_case(allocator, &input, &expected_buf, aws_checksum_new, AWS_SCA_CRC32C); } AWS_TEST_CASE(crc32c_nist_test_case_2, s_crc32c_nist_test_case_2_fn) static int s_crc32c_nist_test_case_3_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor input = aws_byte_cursor_from_c_str("abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"); uint8_t expected[] = {0x07, 0x13, 0x25, 0xf5}; struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); return s_verify_checksum_test_case(allocator, &input, &expected_buf, aws_checksum_new, AWS_SCA_CRC32C); } AWS_TEST_CASE(crc32c_nist_test_case_3, s_crc32c_nist_test_case_3_fn) static int s_crc32c_nist_test_case_4_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor input = aws_byte_cursor_from_c_str("abcdefghbcdefghicdefghijdefghijkefghijklfghij" "klmghijklmnhijklmnoijklmnopjklmnopqklm" "nopqrlmnopqrsmnopqrstnopqrstu"); uint8_t expected[] = {0x3f, 0x60, 0xa4, 0xb9}; struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); return s_verify_checksum_test_case(allocator, &input, &expected_buf, aws_checksum_new, AWS_SCA_CRC32C); } AWS_TEST_CASE(crc32c_nist_test_case_4, s_crc32c_nist_test_case_4_fn) static int s_crc32c_nist_test_case_5_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_s3_library_init(allocator); struct aws_s3_checksum *checksum = aws_checksum_new(allocator, AWS_SCA_CRC32C); ASSERT_NOT_NULL(checksum); struct aws_byte_cursor input = aws_byte_cursor_from_c_str("a"); for (size_t i = 0; i < 1000000; ++i) { ASSERT_SUCCESS(aws_checksum_update(checksum, &input)); } uint8_t output[AWS_CRC32C_LEN] = {0}; struct aws_byte_buf output_buf = aws_byte_buf_from_array(output, sizeof(output)); output_buf.len = 0; ASSERT_SUCCESS(aws_checksum_finalize(checksum, &output_buf, 0)); uint8_t expected[] = {0x43, 0x6f, 0xe2, 0x40}; struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); ASSERT_BIN_ARRAYS_EQUALS(expected_buf.ptr, expected_buf.len, output_buf.buffer, output_buf.len); aws_checksum_destroy(checksum); aws_s3_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(crc32c_nist_test_case_5, s_crc32c_nist_test_case_5_fn) static int s_crc32c_nist_test_case_5_truncated_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_s3_library_init(allocator); struct aws_s3_checksum *checksum = aws_checksum_new(allocator, AWS_SCA_CRC32C); ASSERT_NOT_NULL(checksum); struct aws_byte_cursor input = aws_byte_cursor_from_c_str("a"); for (size_t i = 0; i < 1000000; ++i) { ASSERT_SUCCESS(aws_checksum_update(checksum, &input)); } uint8_t expected[] = {0x43, 0x6f}; struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); uint8_t output[AWS_CRC32C_LEN] = {0}; struct aws_byte_buf output_buf = aws_byte_buf_from_array(output, expected_buf.len); output_buf.len = 0; ASSERT_SUCCESS(aws_checksum_finalize(checksum, &output_buf, 2)); ASSERT_BIN_ARRAYS_EQUALS(expected_buf.ptr, expected_buf.len, output_buf.buffer, output_buf.len); aws_checksum_destroy(checksum); aws_s3_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(crc32c_nist_test_case_5_truncated, s_crc32c_nist_test_case_5_truncated_fn) static int s_crc32c_nist_test_case_6_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_s3_library_init(allocator); struct aws_s3_checksum *checksum = aws_checksum_new(allocator, AWS_SCA_CRC32C); ASSERT_NOT_NULL(checksum); struct aws_byte_cursor input = aws_byte_cursor_from_c_str("abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmno"); for (size_t i = 0; i < 16777216; ++i) { ASSERT_SUCCESS(aws_checksum_update(checksum, &input)); } uint8_t output[AWS_CRC32C_LEN] = {0}; struct aws_byte_buf output_buf = aws_byte_buf_from_array(output, sizeof(output)); output_buf.len = 0; ASSERT_SUCCESS(aws_checksum_finalize(checksum, &output_buf, 0)); uint8_t expected[] = {0x0d, 0xcd, 0x03, 0xc6}; struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); ASSERT_BIN_ARRAYS_EQUALS(expected_buf.ptr, expected_buf.len, output_buf.buffer, output_buf.len); aws_checksum_destroy(checksum); aws_s3_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(crc32c_nist_test_case_6, s_crc32c_nist_test_case_6_fn) static int s_crc32c_test_invalid_buffer_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_s3_library_init(allocator); struct aws_byte_cursor input = aws_byte_cursor_from_c_str("abcdefghbcdefghicdefghijdefghijkefghijklfghij" "klmghijklmnhijklmnoijklmnopjklmnopqklm" "nopqrlmnopqrsmnopqrstnopqrstu"); uint8_t output[AWS_CRC32C_LEN] = {0}; struct aws_byte_buf output_buf = aws_byte_buf_from_array(output, sizeof(output)); output_buf.len = 1; ASSERT_ERROR(AWS_ERROR_SHORT_BUFFER, aws_checksum_compute(allocator, AWS_SCA_CRC32C, &input, &output_buf, 0)); aws_s3_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(crc32c_test_invalid_buffer, s_crc32c_test_invalid_buffer_fn) static int s_crc32c_test_oneshot_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_s3_library_init(allocator); struct aws_byte_cursor input = aws_byte_cursor_from_c_str("abcdefghbcdefghicdefghijdefghijkefghijklfghij" "klmghijklmnhijklmnoijklmnopjklmnopqklm" "nopqrlmnopqrsmnopqrstnopqrstu"); uint8_t expected[] = {0x3f, 0x60, 0xa4, 0xb9}; uint8_t output[AWS_CRC32C_LEN] = {0}; struct aws_byte_buf output_buf = aws_byte_buf_from_array(output, sizeof(output)); output_buf.len = 0; ASSERT_SUCCESS(aws_checksum_compute(allocator, AWS_SCA_CRC32C, &input, &output_buf, 0)); ASSERT_BIN_ARRAYS_EQUALS(expected, sizeof(expected), output_buf.buffer, output_buf.len); aws_s3_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(crc32c_test_oneshot, s_crc32c_test_oneshot_fn) static int s_crc32c_test_invalid_state_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_s3_library_init(allocator); struct aws_byte_cursor input = aws_byte_cursor_from_c_str("abcdefghbcdefghicdefghijdefghijkefghijklfghij" "klmghijklmnhijklmnoijklmnopjklmnopqklm" "nopqrlmnopqrsmnopqrstnopqrstu"); struct aws_s3_checksum *checksum = aws_checksum_new(allocator, AWS_SCA_CRC32C); ASSERT_NOT_NULL(checksum); uint8_t output[AWS_CRC32C_LEN] = {0}; struct aws_byte_buf output_buf = aws_byte_buf_from_array(output, sizeof(output)); output_buf.len = 0; ASSERT_SUCCESS(aws_checksum_update(checksum, &input)); ASSERT_SUCCESS(aws_checksum_finalize(checksum, &output_buf, 0)); ASSERT_ERROR(AWS_ERROR_INVALID_STATE, aws_checksum_update(checksum, &input)); ASSERT_ERROR(AWS_ERROR_INVALID_STATE, aws_checksum_finalize(checksum, &output_buf, 0)); aws_checksum_destroy(checksum); aws_s3_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(crc32c_test_invalid_state, s_crc32c_test_invalid_state_fn) aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/s3_checksums_sha1_tests.c000066400000000000000000000232541456575232400256210ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "aws/s3/private/s3_checksums.h" #include #include #include #include /* * these are the NIST test vectors, as compiled here: * https://www.di-mgt.com.au/sha_testvectors.html */ static int s_sha1_nist_test_case_1_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor input = aws_byte_cursor_from_c_str("abc"); uint8_t expected[] = { 0xa9, 0x99, 0x3e, 0x36, 0x47, 0x06, 0x81, 0x6a, 0xba, 0x3e, 0x25, 0x71, 0x78, 0x50, 0xc2, 0x6c, 0x9c, 0xd0, 0xd8, 0x9d, }; struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); return s_verify_checksum_test_case(allocator, &input, &expected_buf, aws_checksum_new, AWS_SCA_SHA1); } AWS_TEST_CASE(sha1_nist_test_case_1, s_sha1_nist_test_case_1_fn) static int s_sha1_nist_test_case_2_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor input = aws_byte_cursor_from_c_str(""); uint8_t expected[] = { 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 0xaf, 0xd8, 0x07, 0x09, }; struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); return s_verify_checksum_test_case(allocator, &input, &expected_buf, aws_checksum_new, AWS_SCA_SHA1); } AWS_TEST_CASE(sha1_nist_test_case_2, s_sha1_nist_test_case_2_fn) static int s_sha1_nist_test_case_3_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor input = aws_byte_cursor_from_c_str("abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"); uint8_t expected[] = { 0x84, 0x98, 0x3e, 0x44, 0x1c, 0x3b, 0xd2, 0x6e, 0xba, 0xae, 0x4a, 0xa1, 0xf9, 0x51, 0x29, 0xe5, 0xe5, 0x46, 0x70, 0xf1, }; struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); return s_verify_checksum_test_case(allocator, &input, &expected_buf, aws_checksum_new, AWS_SCA_SHA1); } AWS_TEST_CASE(sha1_nist_test_case_3, s_sha1_nist_test_case_3_fn) static int s_sha1_nist_test_case_4_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor input = aws_byte_cursor_from_c_str("abcdefghbcdefghicdefghijdefghijkefghijklfghij" "klmghijklmnhijklmnoijklmnopjklmnopqklm" "nopqrlmnopqrsmnopqrstnopqrstu"); uint8_t expected[] = { 0xa4, 0x9b, 0x24, 0x46, 0xa0, 0x2c, 0x64, 0x5b, 0xf4, 0x19, 0xf9, 0x95, 0xb6, 0x70, 0x91, 0x25, 0x3a, 0x04, 0xa2, 0x59, }; struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); return s_verify_checksum_test_case(allocator, &input, &expected_buf, aws_checksum_new, AWS_SCA_SHA1); } AWS_TEST_CASE(sha1_nist_test_case_4, s_sha1_nist_test_case_4_fn) static int s_sha1_nist_test_case_5_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_s3_library_init(allocator); struct aws_s3_checksum *checksum = aws_checksum_new(allocator, AWS_SCA_SHA1); ASSERT_NOT_NULL(checksum); struct aws_byte_cursor input = aws_byte_cursor_from_c_str("a"); for (size_t i = 0; i < 1000000; ++i) { ASSERT_SUCCESS(aws_checksum_update(checksum, &input)); } uint8_t output[AWS_SHA1_LEN] = {0}; struct aws_byte_buf output_buf = aws_byte_buf_from_array(output, sizeof(output)); output_buf.len = 0; ASSERT_SUCCESS(aws_checksum_finalize(checksum, &output_buf, 0)); uint8_t expected[] = { 0x34, 0xaa, 0x97, 0x3c, 0xd4, 0xc4, 0xda, 0xa4, 0xf6, 0x1e, 0xeb, 0x2b, 0xdb, 0xad, 0x27, 0x31, 0x65, 0x34, 0x01, 0x6f, }; struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); ASSERT_BIN_ARRAYS_EQUALS(expected_buf.ptr, expected_buf.len, output_buf.buffer, output_buf.len); aws_checksum_destroy(checksum); aws_s3_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(sha1_nist_test_case_5, s_sha1_nist_test_case_5_fn) static int s_sha1_nist_test_case_5_truncated_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_s3_library_init(allocator); struct aws_s3_checksum *checksum = aws_checksum_new(allocator, AWS_SCA_SHA1); ASSERT_NOT_NULL(checksum); struct aws_byte_cursor input = aws_byte_cursor_from_c_str("a"); for (size_t i = 0; i < 1000000; ++i) { ASSERT_SUCCESS(aws_checksum_update(checksum, &input)); } uint8_t expected[] = { 0x34, 0xaa, 0x97, 0x3c, 0xd4, 0xc4, 0xda, 0xa4, 0xf6, 0x1e, 0xeb, 0x2b, 0xdb, 0xad, 0x27, 0x31}; struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); uint8_t output[AWS_SHA1_LEN] = {0}; struct aws_byte_buf output_buf = aws_byte_buf_from_array(output, expected_buf.len); output_buf.len = 0; ASSERT_SUCCESS(aws_checksum_finalize(checksum, &output_buf, 16)); ASSERT_BIN_ARRAYS_EQUALS(expected_buf.ptr, expected_buf.len, output_buf.buffer, output_buf.len); aws_checksum_destroy(checksum); aws_s3_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(sha1_nist_test_case_5_truncated, s_sha1_nist_test_case_5_truncated_fn) static int s_sha1_nist_test_case_6_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_s3_library_init(allocator); struct aws_s3_checksum *checksum = aws_checksum_new(allocator, AWS_SCA_SHA1); ASSERT_NOT_NULL(checksum); struct aws_byte_cursor input = aws_byte_cursor_from_c_str("abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmno"); for (size_t i = 0; i < 16777216; ++i) { ASSERT_SUCCESS(aws_checksum_update(checksum, &input)); } uint8_t output[AWS_SHA1_LEN] = {0}; struct aws_byte_buf output_buf = aws_byte_buf_from_array(output, sizeof(output)); output_buf.len = 0; ASSERT_SUCCESS(aws_checksum_finalize(checksum, &output_buf, 0)); uint8_t expected[] = { 0x77, 0x89, 0xf0, 0xc9, 0xef, 0x7b, 0xfc, 0x40, 0xd9, 0x33, 0x11, 0x14, 0x3d, 0xfb, 0xe6, 0x9e, 0x20, 0x17, 0xf5, 0x92, }; struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); ASSERT_BIN_ARRAYS_EQUALS(expected_buf.ptr, expected_buf.len, output_buf.buffer, output_buf.len); aws_checksum_destroy(checksum); aws_s3_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(sha1_nist_test_case_6, s_sha1_nist_test_case_6_fn) static int s_sha1_test_invalid_buffer_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_s3_library_init(allocator); struct aws_byte_cursor input = aws_byte_cursor_from_c_str("abcdefghbcdefghicdefghijdefghijkefghijklfghij" "klmghijklmnhijklmnoijklmnopjklmnopqklm" "nopqrlmnopqrsmnopqrstnopqrstu"); uint8_t output[AWS_SHA1_LEN] = {0}; struct aws_byte_buf output_buf = aws_byte_buf_from_array(output, sizeof(output)); output_buf.len = 1; ASSERT_ERROR(AWS_ERROR_SHORT_BUFFER, aws_checksum_compute(allocator, AWS_SCA_SHA1, &input, &output_buf, 0)); aws_s3_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(sha1_test_invalid_buffer, s_sha1_test_invalid_buffer_fn) static int s_sha1_test_oneshot_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_s3_library_init(allocator); struct aws_byte_cursor input = aws_byte_cursor_from_c_str("abcdefghbcdefghicdefghijdefghijkefghijklfghij" "klmghijklmnhijklmnoijklmnopjklmnopqklm" "nopqrlmnopqrsmnopqrstnopqrstu"); uint8_t expected[] = { 0xa4, 0x9b, 0x24, 0x46, 0xa0, 0x2c, 0x64, 0x5b, 0xf4, 0x19, 0xf9, 0x95, 0xb6, 0x70, 0x91, 0x25, 0x3a, 0x04, 0xa2, 0x59, }; uint8_t output[AWS_SHA1_LEN] = {0}; struct aws_byte_buf output_buf = aws_byte_buf_from_array(output, sizeof(output)); output_buf.len = 0; ASSERT_SUCCESS(aws_checksum_compute(allocator, AWS_SCA_SHA1, &input, &output_buf, 0)); ASSERT_BIN_ARRAYS_EQUALS(expected, sizeof(expected), output_buf.buffer, output_buf.len); aws_s3_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(sha1_test_oneshot, s_sha1_test_oneshot_fn) static int s_sha1_test_invalid_state_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_s3_library_init(allocator); struct aws_byte_cursor input = aws_byte_cursor_from_c_str("abcdefghbcdefghicdefghijdefghijkefghijklfghij" "klmghijklmnhijklmnoijklmnopjklmnopqklm" "nopqrlmnopqrsmnopqrstnopqrstu"); struct aws_s3_checksum *checksum = aws_checksum_new(allocator, AWS_SCA_SHA1); ASSERT_NOT_NULL(checksum); uint8_t output[AWS_SHA1_LEN] = {0}; struct aws_byte_buf output_buf = aws_byte_buf_from_array(output, sizeof(output)); output_buf.len = 0; ASSERT_SUCCESS(aws_checksum_update(checksum, &input)); ASSERT_SUCCESS(aws_checksum_finalize(checksum, &output_buf, 0)); ASSERT_ERROR(AWS_ERROR_INVALID_STATE, aws_checksum_update(checksum, &input)); ASSERT_ERROR(AWS_ERROR_INVALID_STATE, aws_checksum_finalize(checksum, &output_buf, 0)); aws_checksum_destroy(checksum); aws_s3_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(sha1_test_invalid_state, s_sha1_test_invalid_state_fn) aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/s3_checksums_sha256_tests.c000066400000000000000000000245761456575232400260050ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "aws/s3/private/s3_checksums.h" #include #include #include #include /* * these are the NIST test vectors, as compiled here: * https://www.di-mgt.com.au/sha_testvectors.html */ static int s_sha256_nist_test_case_1_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor input = aws_byte_cursor_from_c_str("abc"); uint8_t expected[] = { 0xba, 0x78, 0x16, 0xbf, 0x8f, 0x01, 0xcf, 0xea, 0x41, 0x41, 0x40, 0xde, 0x5d, 0xae, 0x22, 0x23, 0xb0, 0x03, 0x61, 0xa3, 0x96, 0x17, 0x7a, 0x9c, 0xb4, 0x10, 0xff, 0x61, 0xf2, 0x00, 0x15, 0xad, }; struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); return s_verify_checksum_test_case(allocator, &input, &expected_buf, aws_checksum_new, AWS_SCA_SHA256); } AWS_TEST_CASE(sha256_nist_test_case_1, s_sha256_nist_test_case_1_fn) static int s_sha256_nist_test_case_2_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor input = aws_byte_cursor_from_c_str(""); uint8_t expected[] = { 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55, }; struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); return s_verify_checksum_test_case(allocator, &input, &expected_buf, aws_checksum_new, AWS_SCA_SHA256); } AWS_TEST_CASE(sha256_nist_test_case_2, s_sha256_nist_test_case_2_fn) static int s_sha256_nist_test_case_3_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor input = aws_byte_cursor_from_c_str("abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"); uint8_t expected[] = { 0x24, 0x8d, 0x6a, 0x61, 0xd2, 0x06, 0x38, 0xb8, 0xe5, 0xc0, 0x26, 0x93, 0x0c, 0x3e, 0x60, 0x39, 0xa3, 0x3c, 0xe4, 0x59, 0x64, 0xff, 0x21, 0x67, 0xf6, 0xec, 0xed, 0xd4, 0x19, 0xdb, 0x06, 0xc1, }; struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); return s_verify_checksum_test_case(allocator, &input, &expected_buf, aws_checksum_new, AWS_SCA_SHA256); } AWS_TEST_CASE(sha256_nist_test_case_3, s_sha256_nist_test_case_3_fn) static int s_sha256_nist_test_case_4_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor input = aws_byte_cursor_from_c_str("abcdefghbcdefghicdefghijdefghijkefghijklfghij" "klmghijklmnhijklmnoijklmnopjklmnopqklm" "nopqrlmnopqrsmnopqrstnopqrstu"); uint8_t expected[] = { 0xcf, 0x5b, 0x16, 0xa7, 0x78, 0xaf, 0x83, 0x80, 0x03, 0x6c, 0xe5, 0x9e, 0x7b, 0x04, 0x92, 0x37, 0x0b, 0x24, 0x9b, 0x11, 0xe8, 0xf0, 0x7a, 0x51, 0xaf, 0xac, 0x45, 0x03, 0x7a, 0xfe, 0xe9, 0xd1, }; struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); return s_verify_checksum_test_case(allocator, &input, &expected_buf, aws_checksum_new, AWS_SCA_SHA256); } AWS_TEST_CASE(sha256_nist_test_case_4, s_sha256_nist_test_case_4_fn) static int s_sha256_nist_test_case_5_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_s3_library_init(allocator); struct aws_s3_checksum *checksum = aws_checksum_new(allocator, AWS_SCA_SHA256); ASSERT_NOT_NULL(checksum); struct aws_byte_cursor input = aws_byte_cursor_from_c_str("a"); for (size_t i = 0; i < 1000000; ++i) { ASSERT_SUCCESS(aws_checksum_update(checksum, &input)); } uint8_t output[AWS_SHA256_LEN] = {0}; struct aws_byte_buf output_buf = aws_byte_buf_from_array(output, sizeof(output)); output_buf.len = 0; ASSERT_SUCCESS(aws_checksum_finalize(checksum, &output_buf, 0)); uint8_t expected[] = { 0xcd, 0xc7, 0x6e, 0x5c, 0x99, 0x14, 0xfb, 0x92, 0x81, 0xa1, 0xc7, 0xe2, 0x84, 0xd7, 0x3e, 0x67, 0xf1, 0x80, 0x9a, 0x48, 0xa4, 0x97, 0x20, 0x0e, 0x04, 0x6d, 0x39, 0xcc, 0xc7, 0x11, 0x2c, 0xd0, }; struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); ASSERT_BIN_ARRAYS_EQUALS(expected_buf.ptr, expected_buf.len, output_buf.buffer, output_buf.len); aws_checksum_destroy(checksum); aws_s3_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(sha256_nist_test_case_5, s_sha256_nist_test_case_5_fn) static int s_sha256_nist_test_case_5_truncated_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_s3_library_init(allocator); struct aws_s3_checksum *checksum = aws_checksum_new(allocator, AWS_SCA_SHA256); ASSERT_NOT_NULL(checksum); struct aws_byte_cursor input = aws_byte_cursor_from_c_str("a"); for (size_t i = 0; i < 1000000; ++i) { ASSERT_SUCCESS(aws_checksum_update(checksum, &input)); } uint8_t expected[] = { 0xcd, 0xc7, 0x6e, 0x5c, 0x99, 0x14, 0xfb, 0x92, 0x81, 0xa1, 0xc7, 0xe2, 0x84, 0xd7, 0x3e, 0x67, }; struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); uint8_t output[AWS_SHA256_LEN] = {0}; struct aws_byte_buf output_buf = aws_byte_buf_from_array(output, expected_buf.len); output_buf.len = 0; ASSERT_SUCCESS(aws_checksum_finalize(checksum, &output_buf, 16)); ASSERT_BIN_ARRAYS_EQUALS(expected_buf.ptr, expected_buf.len, output_buf.buffer, output_buf.len); aws_checksum_destroy(checksum); aws_s3_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(sha256_nist_test_case_5_truncated, s_sha256_nist_test_case_5_truncated_fn) static int s_sha256_nist_test_case_6_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_s3_library_init(allocator); struct aws_s3_checksum *checksum = aws_checksum_new(allocator, AWS_SCA_SHA256); ASSERT_NOT_NULL(checksum); struct aws_byte_cursor input = aws_byte_cursor_from_c_str("abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmno"); for (size_t i = 0; i < 16777216; ++i) { ASSERT_SUCCESS(aws_checksum_update(checksum, &input)); } uint8_t output[AWS_SHA256_LEN] = {0}; struct aws_byte_buf output_buf = aws_byte_buf_from_array(output, sizeof(output)); output_buf.len = 0; ASSERT_SUCCESS(aws_checksum_finalize(checksum, &output_buf, 0)); uint8_t expected[] = { 0x50, 0xe7, 0x2a, 0x0e, 0x26, 0x44, 0x2f, 0xe2, 0x55, 0x2d, 0xc3, 0x93, 0x8a, 0xc5, 0x86, 0x58, 0x22, 0x8c, 0x0c, 0xbf, 0xb1, 0xd2, 0xca, 0x87, 0x2a, 0xe4, 0x35, 0x26, 0x6f, 0xcd, 0x05, 0x5e, }; struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); ASSERT_BIN_ARRAYS_EQUALS(expected_buf.ptr, expected_buf.len, output_buf.buffer, output_buf.len); aws_checksum_destroy(checksum); aws_s3_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(sha256_nist_test_case_6, s_sha256_nist_test_case_6_fn) static int s_sha256_test_invalid_buffer_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_s3_library_init(allocator); struct aws_byte_cursor input = aws_byte_cursor_from_c_str("abcdefghbcdefghicdefghijdefghijkefghijklfghij" "klmghijklmnhijklmnoijklmnopjklmnopqklm" "nopqrlmnopqrsmnopqrstnopqrstu"); uint8_t output[AWS_SHA256_LEN] = {0}; struct aws_byte_buf output_buf = aws_byte_buf_from_array(output, sizeof(output)); output_buf.len = 1; ASSERT_ERROR(AWS_ERROR_SHORT_BUFFER, aws_checksum_compute(allocator, AWS_SCA_SHA256, &input, &output_buf, 0)); aws_s3_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(sha256_test_invalid_buffer, s_sha256_test_invalid_buffer_fn) static int s_sha256_test_oneshot_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_s3_library_init(allocator); struct aws_byte_cursor input = aws_byte_cursor_from_c_str("abcdefghbcdefghicdefghijdefghijkefghijklfghij" "klmghijklmnhijklmnoijklmnopjklmnopqklm" "nopqrlmnopqrsmnopqrstnopqrstu"); uint8_t expected[] = { 0xcf, 0x5b, 0x16, 0xa7, 0x78, 0xaf, 0x83, 0x80, 0x03, 0x6c, 0xe5, 0x9e, 0x7b, 0x04, 0x92, 0x37, 0x0b, 0x24, 0x9b, 0x11, 0xe8, 0xf0, 0x7a, 0x51, 0xaf, 0xac, 0x45, 0x03, 0x7a, 0xfe, 0xe9, 0xd1, }; uint8_t output[AWS_SHA256_LEN] = {0}; struct aws_byte_buf output_buf = aws_byte_buf_from_array(output, sizeof(output)); output_buf.len = 0; ASSERT_SUCCESS(aws_checksum_compute(allocator, AWS_SCA_SHA256, &input, &output_buf, 0)); ASSERT_BIN_ARRAYS_EQUALS(expected, sizeof(expected), output_buf.buffer, output_buf.len); aws_s3_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(sha256_test_oneshot, s_sha256_test_oneshot_fn) static int s_sha256_test_invalid_state_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_s3_library_init(allocator); struct aws_byte_cursor input = aws_byte_cursor_from_c_str("abcdefghbcdefghicdefghijdefghijkefghijklfghij" "klmghijklmnhijklmnoijklmnopjklmnopqklm" "nopqrlmnopqrsmnopqrstnopqrstu"); struct aws_s3_checksum *checksum = aws_checksum_new(allocator, AWS_SCA_SHA256); ASSERT_NOT_NULL(checksum); uint8_t output[AWS_SHA256_LEN] = {0}; struct aws_byte_buf output_buf = aws_byte_buf_from_array(output, sizeof(output)); output_buf.len = 0; ASSERT_SUCCESS(aws_checksum_update(checksum, &input)); ASSERT_SUCCESS(aws_checksum_finalize(checksum, &output_buf, 0)); ASSERT_ERROR(AWS_ERROR_INVALID_STATE, aws_checksum_update(checksum, &input)); ASSERT_ERROR(AWS_ERROR_INVALID_STATE, aws_checksum_finalize(checksum, &output_buf, 0)); aws_checksum_destroy(checksum); aws_s3_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(sha256_test_invalid_state, s_sha256_test_invalid_state_fn) aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/s3_checksums_test_case_helper.h000066400000000000000000000033761456575232400270640ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "aws/s3/private/s3_checksums.h" #include typedef struct aws_s3_checksum *aws_checksum_new_fn( struct aws_allocator *allocator, enum aws_s3_checksum_algorithm algorithm); static inline int s_verify_checksum_test_case( struct aws_allocator *allocator, struct aws_byte_cursor *input, struct aws_byte_cursor *expected, aws_checksum_new_fn *new_fn, enum aws_s3_checksum_algorithm algorithm) { aws_s3_library_init(allocator); /* test all possible segmentation lengths from 1 byte at a time to the entire * input. */ for (size_t i = 1; i < input->len; ++i) { uint8_t output[128] = {0}; struct aws_byte_buf output_buf = aws_byte_buf_from_array(output, expected->len); output_buf.len = 0; struct aws_s3_checksum *checksum = new_fn(allocator, algorithm); ASSERT_NOT_NULL(checksum); struct aws_byte_cursor input_cpy = *input; while (input_cpy.len) { size_t max_advance = input_cpy.len > i ? i : input_cpy.len; struct aws_byte_cursor segment = aws_byte_cursor_from_array(input_cpy.ptr, max_advance); ASSERT_SUCCESS(aws_checksum_update(checksum, &segment)); aws_byte_cursor_advance(&input_cpy, max_advance); } size_t truncation_size = checksum->digest_size - expected->len; ASSERT_SUCCESS(aws_checksum_finalize(checksum, &output_buf, truncation_size)); ASSERT_BIN_ARRAYS_EQUALS(expected->ptr, expected->len, output_buf.buffer, output_buf.len); aws_checksum_destroy(checksum); } aws_s3_library_clean_up(); return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/s3_client_test.c000066400000000000000000000444351456575232400240170ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "aws/s3/private/s3_client_impl.h" #include "aws/s3/private/s3_request.h" #include "aws/s3/private/s3_util.h" #include "s3_tester.h" #include #include #define TEST_CASE(NAME) \ AWS_TEST_CASE(NAME, s_test_##NAME); \ static int s_test_##NAME(struct aws_allocator *allocator, void *ctx) #define DEFINE_HEADER(NAME, VALUE) \ { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(NAME), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(VALUE), } static void s_init_mock_s3_request_upload_part_timeout( struct aws_s3_request *mock_request, uint64_t original_upload_timeout_ms, uint64_t request_time_ns, uint64_t response_to_first_byte_time_ns) { mock_request->upload_timeout_ms = (size_t)original_upload_timeout_ms; struct aws_s3_request_metrics *metrics = mock_request->send_data.metrics; metrics->time_metrics.send_start_timestamp_ns = 0; metrics->time_metrics.send_end_timestamp_ns = 0; metrics->time_metrics.receive_end_timestamp_ns = request_time_ns; metrics->time_metrics.receive_start_timestamp_ns = response_to_first_byte_time_ns; } static int s_starts_upload_retry(struct aws_s3_client *client, struct aws_s3_request *mock_request) { uint64_t average_time_ns = aws_timestamp_convert( 300, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL); /* 0.3 Secs, average for upload a part */ AWS_ZERO_STRUCT(client->synced_data.upload_part_stats); s_init_mock_s3_request_upload_part_timeout(mock_request, 0, average_time_ns, average_time_ns); for (size_t i = 0; i < 10; i++) { /* Mock a number of requests completed with the large time for the request */ aws_s3_client_update_upload_part_timeout(client, mock_request, AWS_ERROR_SUCCESS); } /* Check that retry should be turned off */ ASSERT_FALSE(client->synced_data.upload_part_stats.stop_timeout); size_t current_timeout_ms = aws_atomic_load_int(&client->upload_timeout_ms); /* We start the retry with a default 1 sec timeout */ ASSERT_UINT_EQUALS(1000, current_timeout_ms); return AWS_OP_SUCCESS; } /* Test the aws_s3_client_update_upload_part_timeout works as expected */ TEST_CASE(client_update_upload_part_timeout) { (void)ctx; struct aws_s3_tester tester; AWS_ZERO_STRUCT(tester); ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_client *client = NULL; struct aws_s3_tester_client_options client_options = { .part_size = MB_TO_BYTES(8), .tls_usage = AWS_S3_TLS_DISABLED, }; ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); struct aws_s3_request mock_request; struct aws_s3_request_metrics metrics; AWS_ZERO_STRUCT(mock_request); AWS_ZERO_STRUCT(metrics); mock_request.send_data.metrics = &metrics; uint64_t large_time_ns = aws_timestamp_convert(5500, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL); /* 5.5 Secs, larger than 5 secs */ uint64_t average_time_ns = aws_timestamp_convert( 250, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL); /* 0.25 Secs, close to average for upload a part */ size_t init_count = 10; { /* 1. If the request time is larger than 5 secs, we don't do retry */ AWS_ZERO_STRUCT(client->synced_data.upload_part_stats); s_init_mock_s3_request_upload_part_timeout(&mock_request, 0, large_time_ns, average_time_ns); /* If request timeout happened before the retry started, it has no effects. */ aws_s3_client_update_upload_part_timeout(client, &mock_request, AWS_ERROR_HTTP_RESPONSE_FIRST_BYTE_TIMEOUT); for (size_t i = 0; i < init_count; i++) { /* Mock a number of requests completed with the large time for the request */ aws_s3_client_update_upload_part_timeout(client, &mock_request, AWS_ERROR_SUCCESS); } /* Check that retry should be turned off */ ASSERT_TRUE(client->synced_data.upload_part_stats.stop_timeout); size_t current_timeout_ms = aws_atomic_load_int(&client->upload_timeout_ms); ASSERT_UINT_EQUALS(0, current_timeout_ms); } { ASSERT_SUCCESS(s_starts_upload_retry(client, &mock_request)); /** * 3. Once a request finishes without timeout, use the average response_to_first_byte_time + * g_expect_timeout_offset_ms as our expected timeout. (TODO: The real expected timeout should be a P99 of * all the requests.) * 3.1 Adjust the current timeout against the expected timeout, via 0.99 * + 0.01 * to get closer to the expected timeout. */ s_init_mock_s3_request_upload_part_timeout( &mock_request, aws_timestamp_convert(1, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_MILLIS, NULL), average_time_ns, average_time_ns); /* After 1000 runs, we have the timeout match the "expected" (average time + g_expect_timeout_offset_ms) timeout */ for (size_t i = 0; i < 1000; i++) { /* Mock a number of requests completed with the large time for the request */ aws_s3_client_update_upload_part_timeout(client, &mock_request, AWS_ERROR_SUCCESS); } size_t current_timeout_ms = aws_atomic_load_int(&client->upload_timeout_ms); ASSERT_UINT_EQUALS( aws_timestamp_convert(average_time_ns, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_MILLIS, NULL) + g_expect_timeout_offset_ms, current_timeout_ms); /* will not change after another 1k run */ for (size_t i = 0; i < 1000; i++) { /* Mock a number of requests completed with the large time for the request */ aws_s3_client_update_upload_part_timeout(client, &mock_request, AWS_ERROR_SUCCESS); } ASSERT_FALSE(client->synced_data.upload_part_stats.stop_timeout); current_timeout_ms = aws_atomic_load_int(&client->upload_timeout_ms); /* After 1000 runs, we have the timeout match the "expected" (average time + g_expect_timeout_offset_ms) timeout */ ASSERT_UINT_EQUALS( aws_timestamp_convert(average_time_ns, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_MILLIS, NULL) + g_expect_timeout_offset_ms, current_timeout_ms); } { ASSERT_SUCCESS(s_starts_upload_retry(client, &mock_request)); /** * 4.1 If timeout rate is larger than 0.1%, we increase the timeout by 100ms (Check the timeout when the * request was made, if the updated timeout is larger than the expected, skip update). */ /* Set current timeout rate to be around 0.1% */ client->synced_data.upload_part_stats.timeout_rate_tracking.num_completed = 800; client->synced_data.upload_part_stats.timeout_rate_tracking.num_failed = 1; /* Update the timeout as the rate is larger than 0.1% */ s_init_mock_s3_request_upload_part_timeout(&mock_request, 1000 /*original_upload_timeout_ms*/, 0, 0); aws_s3_client_update_upload_part_timeout(client, &mock_request, AWS_ERROR_HTTP_RESPONSE_FIRST_BYTE_TIMEOUT); size_t current_timeout_ms = aws_atomic_load_int(&client->upload_timeout_ms); /* 1.1 secs */ ASSERT_UINT_EQUALS(1100, current_timeout_ms); /* The same timeout applied to multiple requests made before, and the timeout happened right after we already * updated it. The timeout will not be updated again. */ aws_s3_client_update_upload_part_timeout(client, &mock_request, AWS_ERROR_HTTP_RESPONSE_FIRST_BYTE_TIMEOUT); ASSERT_FALSE(client->synced_data.upload_part_stats.stop_timeout); current_timeout_ms = aws_atomic_load_int(&client->upload_timeout_ms); /* 1.1 secs, still */ ASSERT_UINT_EQUALS(1100, current_timeout_ms); } { ASSERT_SUCCESS(s_starts_upload_retry(client, &mock_request)); /** * 4.2 If timeout rate is larger than 1%, we increase the timeout by 1 secs (If needed). And clear the rate * to get the exact rate with new timeout. */ /* Assume our first batch requests all failed with the 1 sec timeout. As the request around 3 secs to * complete */ uint64_t real_response_time_ns = aws_timestamp_convert(3000 - g_expect_timeout_offset_ms, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL); s_init_mock_s3_request_upload_part_timeout( &mock_request, 1000 /*original_upload_timeout_ms*/, real_response_time_ns, real_response_time_ns); /* First failure will not change the timeout, as we use the ceiling of 1% rate */ aws_s3_client_update_upload_part_timeout(client, &mock_request, AWS_ERROR_HTTP_RESPONSE_FIRST_BYTE_TIMEOUT); size_t current_timeout_ms = aws_atomic_load_int(&client->upload_timeout_ms); ASSERT_UINT_EQUALS(1000, current_timeout_ms); /* Updated at the second timeout */ aws_s3_client_update_upload_part_timeout(client, &mock_request, AWS_ERROR_HTTP_RESPONSE_FIRST_BYTE_TIMEOUT); current_timeout_ms = aws_atomic_load_int(&client->upload_timeout_ms); ASSERT_UINT_EQUALS(2000, current_timeout_ms); /* The rest of the batch failure will not affect the timeout */ for (size_t i = 0; i < 10; i++) { aws_s3_client_update_upload_part_timeout(client, &mock_request, AWS_ERROR_HTTP_RESPONSE_FIRST_BYTE_TIMEOUT); } current_timeout_ms = aws_atomic_load_int(&client->upload_timeout_ms); /* still 2 secs */ ASSERT_UINT_EQUALS(2000, current_timeout_ms); /* The 2 secs will still fail the whole batch */ s_init_mock_s3_request_upload_part_timeout( &mock_request, current_timeout_ms /*original_upload_timeout_ms*/, real_response_time_ns, real_response_time_ns); for (size_t i = 0; i < 10; i++) { aws_s3_client_update_upload_part_timeout(client, &mock_request, AWS_ERROR_HTTP_RESPONSE_FIRST_BYTE_TIMEOUT); } current_timeout_ms = aws_atomic_load_int(&client->upload_timeout_ms); /* 3 secs now */ ASSERT_UINT_EQUALS(3000, current_timeout_ms); /* 3 secs will result in around 0.1% failure, and we are okay with that */ s_init_mock_s3_request_upload_part_timeout( &mock_request, current_timeout_ms /*original_upload_timeout_ms*/, real_response_time_ns, real_response_time_ns); /* 1 failure, and others all succeed */ aws_s3_client_update_upload_part_timeout(client, &mock_request, AWS_ERROR_HTTP_RESPONSE_FIRST_BYTE_TIMEOUT); for (size_t i = 0; i < 10; i++) { aws_s3_client_update_upload_part_timeout(client, &mock_request, AWS_ERROR_SUCCESS); } /* still 3 secs */ current_timeout_ms = aws_atomic_load_int(&client->upload_timeout_ms); ASSERT_UINT_EQUALS(3000, current_timeout_ms); ASSERT_FALSE(client->synced_data.upload_part_stats.stop_timeout); } { ASSERT_SUCCESS(s_starts_upload_retry(client, &mock_request)); /* 4.3 Once the timeout is larger than 5 secs, we stop the process. */ s_init_mock_s3_request_upload_part_timeout(&mock_request, 1000 /*original_upload_timeout_ms*/, 0, 0); for (size_t i = 0; i < 10; i++) { /* Make two continuous timeout request with updated timeout */ aws_s3_client_update_upload_part_timeout(client, &mock_request, AWS_ERROR_HTTP_RESPONSE_FIRST_BYTE_TIMEOUT); aws_s3_client_update_upload_part_timeout(client, &mock_request, AWS_ERROR_HTTP_RESPONSE_FIRST_BYTE_TIMEOUT); size_t current_timeout_ms = aws_atomic_load_int(&client->upload_timeout_ms); s_init_mock_s3_request_upload_part_timeout( &mock_request, current_timeout_ms /*original_upload_timeout_ms*/, 0, 0); } /* Timeout stopped */ size_t current_timeout_ms = aws_atomic_load_int(&client->upload_timeout_ms); ASSERT_UINT_EQUALS(0, current_timeout_ms); ASSERT_TRUE(client->synced_data.upload_part_stats.stop_timeout); } aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; } /* Test meta request can override the part size as expected */ TEST_CASE(client_meta_request_override_part_size) { (void)ctx; struct aws_s3_tester tester; AWS_ZERO_STRUCT(tester); ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_client *client = NULL; struct aws_s3_tester_client_options client_options = { .part_size = MB_TO_BYTES(8), .tls_usage = AWS_S3_TLS_DISABLED, }; ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); struct aws_string *host_name = aws_s3_tester_build_endpoint_string(allocator, &g_test_bucket_name, &g_test_s3_region); struct aws_byte_cursor host_cur = aws_byte_cursor_from_string(host_name); struct aws_byte_cursor test_object_path = aws_byte_cursor_from_c_str("/mytest"); size_t override_part_size = MB_TO_BYTES(10); size_t content_length = MB_TO_BYTES(20); /* Let the content length larger than the override part size to make sure we do MPU */ /* MPU put object */ struct aws_input_stream_tester_options stream_options = { .autogen_length = content_length, }; struct aws_input_stream *input_stream = aws_input_stream_new_tester(allocator, &stream_options); struct aws_http_message *put_messages = aws_s3_test_put_object_request_new( allocator, &host_cur, g_test_body_content_type, test_object_path, input_stream, 0 /*flags*/); struct aws_s3_meta_request_options meta_request_options = { .message = put_messages, .type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .part_size = override_part_size, }; struct aws_s3_meta_request *put_meta_request = client->vtable->meta_request_factory(client, &meta_request_options); ASSERT_UINT_EQUALS(put_meta_request->part_size, override_part_size); /* auto ranged Get Object */ struct aws_http_message *get_message = aws_s3_test_get_object_request_new( allocator, aws_byte_cursor_from_string(host_name), g_pre_existing_object_1MB); struct aws_s3_meta_request_options get_meta_request_options = { .message = get_message, .type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT, .part_size = override_part_size, }; struct aws_s3_meta_request *get_meta_request = client->vtable->meta_request_factory(client, &get_meta_request_options); ASSERT_UINT_EQUALS(get_meta_request->part_size, override_part_size); aws_http_message_release(put_messages); aws_s3_meta_request_release(put_meta_request); aws_http_message_release(get_message); aws_s3_meta_request_release(get_meta_request); aws_string_destroy(host_name); aws_s3_client_release(client); aws_input_stream_release(input_stream); aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; } /* Test meta request can override the multipart upload threshold as expected */ TEST_CASE(client_meta_request_override_multipart_upload_threshold) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_client_config client_config = { .part_size = MB_TO_BYTES(8), .multipart_upload_threshold = MB_TO_BYTES(15), }; ASSERT_SUCCESS(aws_s3_tester_bind_client( &tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_REGION | AWS_S3_TESTER_BIND_CLIENT_SIGNING)); struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); ASSERT_TRUE(client != NULL); struct aws_string *host_name = aws_s3_tester_build_endpoint_string(allocator, &g_test_bucket_name, &g_test_s3_region); struct aws_byte_cursor host_cur = aws_byte_cursor_from_string(host_name); struct aws_byte_cursor test_object_path = aws_byte_cursor_from_c_str("/mytest"); size_t override_multipart_upload_threshold = MB_TO_BYTES(20); size_t content_length = MB_TO_BYTES(20); /* Let the content length larger than the override part size to make sure we do MPU */ /* MPU put object */ struct aws_input_stream_tester_options stream_options = { .autogen_length = content_length, }; struct aws_input_stream *input_stream = aws_input_stream_new_tester(allocator, &stream_options); struct aws_http_message *put_messages = aws_s3_test_put_object_request_new( allocator, &host_cur, g_test_body_content_type, test_object_path, input_stream, 0 /*flags*/); { /* Content length is smaller than the override multipart_upload_threshold */ struct aws_s3_meta_request_options meta_request_options = { .message = put_messages, .type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .multipart_upload_threshold = override_multipart_upload_threshold, }; struct aws_s3_meta_request *put_meta_request = client->vtable->meta_request_factory(client, &meta_request_options); /* Part size will be 0, as we don't use MPU */ ASSERT_UINT_EQUALS(put_meta_request->part_size, 0); aws_s3_meta_request_release(put_meta_request); } { /* meta request override the part size, so the override part size will be used as the multipart upload threshold */ struct aws_s3_meta_request_options meta_request_options = { .message = put_messages, .type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .part_size = override_multipart_upload_threshold, }; struct aws_s3_meta_request *put_meta_request = client->vtable->meta_request_factory(client, &meta_request_options); /* Part size will be 0, as we don't use MPU */ ASSERT_UINT_EQUALS(put_meta_request->part_size, 0); aws_s3_meta_request_release(put_meta_request); } aws_http_message_release(put_messages); aws_string_destroy(host_name); aws_s3_client_release(client); aws_input_stream_release(input_stream); aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/s3_data_plane_tests.c000066400000000000000000010565471456575232400250240ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "aws/s3/private/s3_checksums.h" #include "aws/s3/private/s3_client_impl.h" #include "aws/s3/private/s3_meta_request_impl.h" #include "aws/s3/private/s3_util.h" #include "aws/s3/s3_client.h" #include "s3_tester.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include AWS_TEST_CASE(test_s3_client_create_destroy, s_test_s3_client_create_destroy) static int s_test_s3_client_create_destroy(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; AWS_ZERO_STRUCT(tester); ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_client_config client_config; AWS_ZERO_STRUCT(client_config); ASSERT_SUCCESS(aws_s3_tester_bind_client( &tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_REGION | AWS_S3_TESTER_BIND_CLIENT_SIGNING)); struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); ASSERT_TRUE(client != NULL); client = aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } AWS_TEST_CASE(test_s3_client_create_error, s_test_s3_client_create_error) static int s_test_s3_client_create_error(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; AWS_ZERO_STRUCT(tester); ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_client_config client_config; AWS_ZERO_STRUCT(client_config); struct aws_http_proxy_options proxy_options = { .connection_type = AWS_HPCT_HTTP_LEGACY, .host = aws_byte_cursor_from_c_str("localhost"), .port = 8899, }; client_config.proxy_options = &proxy_options; struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); ASSERT_TRUE(client == NULL); aws_s3_tester_clean_up(&tester); return 0; } AWS_TEST_CASE(test_s3_client_monitoring_options_override, s_test_s3_client_monitoring_options_override) static int s_test_s3_client_monitoring_options_override(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; AWS_ZERO_STRUCT(tester); ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_http_connection_monitoring_options monitoring_options = {.minimum_throughput_bytes_per_second = 3000}; struct aws_s3_client_config client_config = {.monitoring_options = &monitoring_options}; ASSERT_SUCCESS(aws_s3_tester_bind_client(&tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_REGION)); struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); ASSERT_TRUE( client->monitoring_options.minimum_throughput_bytes_per_second == client_config.monitoring_options->minimum_throughput_bytes_per_second); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } AWS_TEST_CASE(test_s3_client_proxy_ev_settings_override, s_test_s3_client_proxy_ev_settings_override) static int s_test_s3_client_proxy_ev_settings_override(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; AWS_ZERO_STRUCT(tester); ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_tls_connection_options tls_conn_options; AWS_ZERO_STRUCT(tls_conn_options); struct proxy_env_var_settings proxy_ev_settings = { .env_var_type = AWS_HPEV_ENABLE, .tls_options = &tls_conn_options, }; struct aws_s3_client_config client_config = {.proxy_ev_settings = &proxy_ev_settings}; ASSERT_SUCCESS(aws_s3_tester_bind_client(&tester, &client_config, 0)); struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); ASSERT_TRUE(client->proxy_ev_settings->env_var_type == client_config.proxy_ev_settings->env_var_type); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } AWS_TEST_CASE(test_s3_client_tcp_keep_alive_options_override, s_test_s3_client_tcp_keep_alive_options_override) static int s_test_s3_client_tcp_keep_alive_options_override(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; AWS_ZERO_STRUCT(tester); ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_tcp_keep_alive_options keep_alive_options = {.keep_alive_interval_sec = 20}; struct aws_s3_client_config client_config = {.tcp_keep_alive_options = &keep_alive_options}; ASSERT_SUCCESS(aws_s3_tester_bind_client(&tester, &client_config, 0)); struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); ASSERT_TRUE( client->tcp_keep_alive_options->keep_alive_interval_sec == client_config.tcp_keep_alive_options->keep_alive_interval_sec); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } AWS_TEST_CASE(test_s3_client_max_active_connections_override, s_test_s3_client_max_active_connections_override) static int s_test_s3_client_max_active_connections_override(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; AWS_ZERO_STRUCT(tester); ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_client_config client_config = { .max_active_connections_override = 10, }; ASSERT_SUCCESS(aws_s3_tester_bind_client(&tester, &client_config, 0)); struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); ASSERT_TRUE(client->max_active_connections_override == client_config.max_active_connections_override); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } AWS_TEST_CASE(test_s3_client_byo_crypto_no_options, s_test_s3_client_byo_crypto_no_options) static int s_test_s3_client_byo_crypto_no_options(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; AWS_ZERO_STRUCT(tester); ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_client_config client_config = { .tls_mode = AWS_MR_TLS_ENABLED, }; struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); ASSERT_TRUE(aws_last_error() == AWS_ERROR_INVALID_ARGUMENT); ASSERT_TRUE(client == NULL); aws_s3_tester_clean_up(&tester); return 0; } AWS_TEST_CASE(test_s3_client_byo_crypto_with_options, s_test_s3_client_byo_crypto_with_options) static int s_test_s3_client_byo_crypto_with_options(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; AWS_ZERO_STRUCT(tester); ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_tls_connection_options tls_conn_options; AWS_ZERO_STRUCT(tls_conn_options); struct aws_s3_client_config client_config; AWS_ZERO_STRUCT(client_config); ASSERT_SUCCESS(aws_s3_tester_bind_client(&tester, &client_config, 0)); client_config.tls_mode = AWS_MR_TLS_ENABLED; client_config.tls_connection_options = &tls_conn_options; struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); ASSERT_TRUE(client != NULL); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } size_t s_test_max_active_connections_host_count = 0; size_t s_test_get_max_active_connections_host_address_count( struct aws_host_resolver *host_resolver, const struct aws_string *host_name, uint32_t flags) { (void)host_resolver; (void)host_name; (void)flags; return s_test_max_active_connections_host_count; } AWS_TEST_CASE(test_s3_client_get_max_active_connections, s_test_s3_client_get_max_active_connections) static int s_test_s3_client_get_max_active_connections(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_s3_tester tester; AWS_ZERO_STRUCT(tester); ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_client_bootstrap mock_client_bootstrap; AWS_ZERO_STRUCT(mock_client_bootstrap); struct aws_s3_client *mock_client = aws_s3_tester_mock_client_new(&tester); *((uint32_t *)&mock_client->max_active_connections_override) = 0; *((uint32_t *)&mock_client->ideal_vip_count) = 10; mock_client->client_bootstrap = &mock_client_bootstrap; mock_client->vtable->get_host_address_count = s_test_get_max_active_connections_host_address_count; struct aws_s3_meta_request *mock_meta_requests[AWS_S3_META_REQUEST_TYPE_MAX]; for (size_t i = 0; i < AWS_S3_META_REQUEST_TYPE_MAX; ++i) { /* Verify that g_max_num_connections_per_vip and g_num_conns_per_vip_meta_request_look_up are set up * correctly.*/ ASSERT_TRUE(g_max_num_connections_per_vip >= g_num_conns_per_vip_meta_request_look_up[i]); /* Setup test data. */ mock_meta_requests[i] = aws_s3_tester_mock_meta_request_new(&tester); mock_meta_requests[i]->type = i; mock_meta_requests[i]->endpoint = aws_s3_tester_mock_endpoint_new(&tester); } /* With host count at 0, we should allow for one VIP worth of max-active-connections. */ { s_test_max_active_connections_host_count = 0; ASSERT_TRUE( aws_s3_client_get_max_active_connections(mock_client, NULL) == mock_client->ideal_vip_count * g_max_num_connections_per_vip); for (size_t i = 0; i < AWS_S3_META_REQUEST_TYPE_MAX; ++i) { ASSERT_TRUE( aws_s3_client_get_max_active_connections(mock_client, mock_meta_requests[i]) == g_num_conns_per_vip_meta_request_look_up[i]); } } s_test_max_active_connections_host_count = 2; /* Behavior should not be affected by max_active_connections_override since it is 0, and should just be in relation * to ideal-vip-count and host-count. */ { ASSERT_TRUE( aws_s3_client_get_max_active_connections(mock_client, NULL) == mock_client->ideal_vip_count * g_max_num_connections_per_vip); for (size_t i = 0; i < AWS_S3_META_REQUEST_TYPE_MAX; ++i) { ASSERT_TRUE( aws_s3_client_get_max_active_connections(mock_client, mock_meta_requests[i]) == s_test_max_active_connections_host_count * g_num_conns_per_vip_meta_request_look_up[i]); } } /* Max active connections override should now cap the calculated amount of active connections. */ { *((uint32_t *)&mock_client->max_active_connections_override) = 3; ASSERT_TRUE( mock_client->max_active_connections_override < mock_client->ideal_vip_count * g_max_num_connections_per_vip); ASSERT_TRUE( aws_s3_client_get_max_active_connections(mock_client, NULL) == mock_client->max_active_connections_override); for (size_t i = 0; i < AWS_S3_META_REQUEST_TYPE_MAX; ++i) { ASSERT_TRUE( mock_client->max_active_connections_override < s_test_max_active_connections_host_count * g_num_conns_per_vip_meta_request_look_up[i]); ASSERT_TRUE( aws_s3_client_get_max_active_connections(mock_client, mock_meta_requests[i]) == mock_client->max_active_connections_override); } } /* Max active connections override should be ignored since the calculated amount of max connections is less. */ { *((uint32_t *)&mock_client->max_active_connections_override) = 100000; ASSERT_TRUE( mock_client->max_active_connections_override > mock_client->ideal_vip_count * g_max_num_connections_per_vip); ASSERT_TRUE( aws_s3_client_get_max_active_connections(mock_client, NULL) == mock_client->ideal_vip_count * g_max_num_connections_per_vip); for (size_t i = 0; i < AWS_S3_META_REQUEST_TYPE_MAX; ++i) { ASSERT_TRUE( mock_client->max_active_connections_override > s_test_max_active_connections_host_count * g_num_conns_per_vip_meta_request_look_up[i]); ASSERT_TRUE( aws_s3_client_get_max_active_connections(mock_client, mock_meta_requests[i]) == s_test_max_active_connections_host_count * g_num_conns_per_vip_meta_request_look_up[i]); } } for (size_t i = 0; i < AWS_S3_META_REQUEST_TYPE_MAX; ++i) { mock_meta_requests[i] = aws_s3_meta_request_release(mock_meta_requests[i]); } aws_s3_client_release(mock_client); aws_s3_tester_clean_up(&tester); return 0; } AWS_TEST_CASE(test_s3_request_create_destroy, s_test_s3_request_create_destroy) static int s_test_s3_request_create_destroy(struct aws_allocator *allocator, void *ctx) { (void)ctx; const int request_tag = 1234; const enum aws_s3_request_type request_type = AWS_S3_REQUEST_TYPE_LIST_PARTS; const uint32_t part_number = 5678; struct aws_s3_tester tester; AWS_ZERO_STRUCT(tester); ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_meta_request *meta_request = aws_s3_tester_mock_meta_request_new(&tester); ASSERT_TRUE(meta_request != NULL); struct aws_s3_client *client = aws_s3_tester_mock_client_new(&tester); ASSERT_TRUE(client != NULL); meta_request->client = aws_s3_client_acquire(client); struct aws_http_message *request_message = aws_s3_tester_dummy_http_request_new(&tester); ASSERT_TRUE(request_message != NULL); struct aws_s3_request *request = aws_s3_request_new( meta_request, request_tag, request_type, part_number, AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS); ASSERT_TRUE(request != NULL); ASSERT_TRUE(request->meta_request == meta_request); ASSERT_TRUE(request->part_number == part_number); ASSERT_TRUE(request->request_tag == request_tag); ASSERT_TRUE(request->request_type == request_type); ASSERT_STR_EQUALS("ListParts", aws_string_c_str(request->operation_name)); ASSERT_TRUE(request->record_response_headers == true); aws_s3_request_setup_send_data(request, request_message); ASSERT_TRUE(request->send_data.message != NULL); ASSERT_TRUE(request->send_data.response_headers == NULL); request->send_data.response_headers = aws_http_headers_new(allocator); ASSERT_TRUE(request->send_data.response_headers != NULL); ASSERT_TRUE(request->send_data.metrics != NULL); request->send_data.metrics = aws_s3_request_metrics_release(request->send_data.metrics); aws_s3_request_clean_up_send_data(request); ASSERT_TRUE(request->send_data.message == NULL); ASSERT_TRUE(request->send_data.response_headers == NULL); ASSERT_TRUE(request->send_data.response_status == 0); aws_s3_request_release(request); aws_http_message_release(request_message); aws_s3_meta_request_release(meta_request); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } struct s3_test_body_streaming_user_data { struct aws_s3_tester *tester; struct aws_allocator *allocator; uint64_t expected_range_start; uint64_t received_body_size; }; static int s_s3_meta_request_test_body_streaming_callback( struct aws_s3_meta_request *meta_request, const struct aws_byte_cursor *body, uint64_t range_start, void *user_data) { (void)meta_request; (void)body; (void)range_start; struct s3_test_body_streaming_user_data *body_streaming_user_data = user_data; body_streaming_user_data->received_body_size += body->len; ASSERT_TRUE(body_streaming_user_data->expected_range_start == range_start); body_streaming_user_data->expected_range_start += body->len; aws_s3_tester_inc_counter1(body_streaming_user_data->tester); return AWS_OP_SUCCESS; } /* Test the meta request body streaming functionality. */ AWS_TEST_CASE(test_s3_meta_request_body_streaming, s_test_s3_meta_request_body_streaming) static int s_test_s3_meta_request_body_streaming(struct aws_allocator *allocator, void *ctx) { (void)ctx; const uint32_t part_range0_start = 1; const uint32_t part_range0_end = part_range0_start + 4; const uint32_t part_range1_start = part_range0_end + 1; const uint32_t part_range1_end = part_range1_start + 4; const size_t request_response_body_size = 16; const uint64_t total_object_size = (uint64_t)part_range1_end * request_response_body_size; struct aws_byte_buf response_body_source_buffer; aws_byte_buf_init(&response_body_source_buffer, allocator, request_response_body_size); const struct aws_byte_cursor test_byte_cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("0"); for (size_t i = 0; i < request_response_body_size; ++i) { aws_byte_buf_append(&response_body_source_buffer, &test_byte_cursor); } struct aws_s3_tester tester; AWS_ZERO_STRUCT(tester); ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct s3_test_body_streaming_user_data body_streaming_user_data = { .tester = &tester, }; struct aws_s3_client *mock_client = aws_s3_tester_mock_client_new(&tester); struct aws_s3_meta_request *meta_request = aws_s3_tester_mock_meta_request_new(&tester); ASSERT_TRUE(meta_request != NULL); struct aws_event_loop_group *event_loop_group = aws_event_loop_group_new_default(allocator, 0, NULL); meta_request->client = aws_s3_client_acquire(mock_client); meta_request->user_data = &body_streaming_user_data; *((size_t *)&meta_request->part_size) = request_response_body_size; meta_request->body_callback = s_s3_meta_request_test_body_streaming_callback; meta_request->io_event_loop = aws_event_loop_group_get_next_loop(event_loop_group); /* Queue the first range of parts in order. Each part should be flushed one-by-one. */ { for (uint32_t part_number = part_range0_start; part_number <= part_range0_end; ++part_number) { struct aws_s3_request *request = aws_s3_request_new( meta_request, 0 /*request_tag*/, AWS_S3_REQUEST_TYPE_GET_OBJECT, part_number, 0 /*flags*/); aws_s3_calculate_auto_ranged_get_part_range( 0ULL, total_object_size - 1, request_response_body_size /*part_size*/, (uint64_t)request_response_body_size /*first_part_size*/, part_number, &request->part_range_start, &request->part_range_end); aws_byte_buf_init_copy(&request->send_data.response_body, allocator, &response_body_source_buffer); aws_s3_tester_set_counter1_desired(&tester, part_number); aws_s3_meta_request_lock_synced_data(meta_request); aws_s3_meta_request_stream_response_body_synced(meta_request, request); ASSERT_TRUE(aws_priority_queue_size(&meta_request->synced_data.pending_body_streaming_requests) == 0); aws_s3_meta_request_unlock_synced_data(meta_request); aws_s3_tester_wait_for_counters(&tester); aws_s3_request_release(request); } } aws_s3_tester_set_counter1_desired(&tester, part_range1_end); /* Queue parts for second range, but skip over the first part.*/ { uint32_t num_parts_queued = 0; ASSERT_TRUE(part_range1_start != part_range1_end); for (uint32_t part_number = part_range1_start + 1; part_number <= part_range1_end; ++part_number) { struct aws_s3_request *request = aws_s3_request_new( meta_request, 0 /*request_tag*/, AWS_S3_REQUEST_TYPE_GET_OBJECT, part_number, 0 /*flags*/); aws_s3_calculate_auto_ranged_get_part_range( 0ULL, total_object_size - 1, request_response_body_size /*part_size*/, (uint64_t)request_response_body_size /*first_part_size*/, part_number, &request->part_range_start, &request->part_range_end); aws_byte_buf_init_copy(&request->send_data.response_body, allocator, &response_body_source_buffer); aws_s3_meta_request_lock_synced_data(meta_request); aws_s3_meta_request_stream_response_body_synced(meta_request, request); aws_s3_meta_request_unlock_synced_data(meta_request); aws_s3_request_release(request); ++num_parts_queued; } aws_s3_meta_request_lock_synced_data(meta_request); ASSERT_TRUE( aws_priority_queue_size(&meta_request->synced_data.pending_body_streaming_requests) == num_parts_queued); aws_s3_meta_request_unlock_synced_data(meta_request); } /* Stream the last part of the body, which should flush the priority queue. */ { struct aws_s3_request *request = aws_s3_request_new( meta_request, 0 /*request_tag*/, AWS_S3_REQUEST_TYPE_GET_OBJECT, part_range1_start, 0 /*flags*/); aws_s3_calculate_auto_ranged_get_part_range( 0ULL, total_object_size - 1, request_response_body_size /*part_size*/, (uint64_t)request_response_body_size /*first_part_size*/, part_range1_start, &request->part_range_start, &request->part_range_end); aws_byte_buf_init_copy(&request->send_data.response_body, allocator, &response_body_source_buffer); aws_s3_meta_request_lock_synced_data(meta_request); aws_s3_meta_request_stream_response_body_synced(meta_request, request); aws_s3_meta_request_unlock_synced_data(meta_request); aws_s3_meta_request_lock_synced_data(meta_request); ASSERT_TRUE(aws_priority_queue_size(&meta_request->synced_data.pending_body_streaming_requests) == 0); aws_s3_meta_request_unlock_synced_data(meta_request); aws_s3_request_release(request); aws_s3_tester_wait_for_counters(&tester); } ASSERT_TRUE(body_streaming_user_data.received_body_size == (request_response_body_size * part_range1_end)); aws_s3_meta_request_release(meta_request); aws_s3_client_release(mock_client); aws_event_loop_group_release(event_loop_group); aws_byte_buf_clean_up(&response_body_source_buffer); aws_s3_tester_clean_up(&tester); return 0; } /* Test aws_s3_client_queue_requests_threaded and aws_s3_client_dequeue_request_threaded */ AWS_TEST_CASE(test_s3_client_queue_requests, s_test_s3_client_queue_requests) static int s_test_s3_client_queue_requests(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; aws_s3_tester_init(allocator, &tester); struct aws_s3_client *mock_client = aws_s3_tester_mock_client_new(&tester); aws_linked_list_init(&mock_client->threaded_data.request_queue); struct aws_s3_meta_request *mock_meta_request = aws_s3_tester_mock_meta_request_new(&tester); mock_meta_request->client = aws_s3_client_acquire(mock_client); struct aws_s3_request *pivot_request = aws_s3_request_new(mock_meta_request, 0, 0, 0, 0); struct aws_linked_list pivot_request_list; aws_linked_list_init(&pivot_request_list); struct aws_s3_request *requests[] = { aws_s3_request_new(mock_meta_request, 0, 0, 0, 0), aws_s3_request_new(mock_meta_request, 0, 0, 0, 0), aws_s3_request_new(mock_meta_request, 0, 0, 0, 0), }; const uint32_t num_requests = AWS_ARRAY_SIZE(requests); struct aws_linked_list request_list; aws_linked_list_init(&request_list); { aws_linked_list_push_back(&pivot_request_list, &pivot_request->node); aws_s3_client_queue_requests_threaded(mock_client, &pivot_request_list, false); ASSERT_TRUE(mock_client->threaded_data.request_queue_size == 1); ASSERT_TRUE(!aws_linked_list_empty(&mock_client->threaded_data.request_queue)); for (uint32_t i = 0; i < num_requests; ++i) { aws_linked_list_push_back(&request_list, &requests[i]->node); } /* Move the requests to the back of the queue. */ aws_s3_client_queue_requests_threaded(mock_client, &request_list, false); } ASSERT_TRUE(aws_linked_list_empty(&request_list)); ASSERT_TRUE(mock_client->threaded_data.request_queue_size == (num_requests + 1)); ASSERT_TRUE(!aws_linked_list_empty(&mock_client->threaded_data.request_queue)); { /* The first request should be the pivot request since the other requests were pushed to the back. */ struct aws_s3_request *first_request = aws_s3_client_dequeue_request_threaded(mock_client); ASSERT_TRUE(first_request == pivot_request); ASSERT_TRUE(mock_client->threaded_data.request_queue_size == num_requests); ASSERT_TRUE(!aws_linked_list_empty(&mock_client->threaded_data.request_queue)); } for (uint32_t i = 0; i < num_requests; ++i) { struct aws_s3_request *request = aws_s3_client_dequeue_request_threaded(mock_client); ASSERT_TRUE(request == requests[i]); ASSERT_TRUE(mock_client->threaded_data.request_queue_size == (num_requests - (i + 1))); if (i < num_requests - 1) { ASSERT_TRUE(!aws_linked_list_empty(&mock_client->threaded_data.request_queue)); } } ASSERT_TRUE(mock_client->threaded_data.request_queue_size == 0); ASSERT_TRUE(aws_linked_list_empty(&mock_client->threaded_data.request_queue)); { aws_linked_list_push_back(&pivot_request_list, &pivot_request->node); aws_s3_client_queue_requests_threaded(mock_client, &pivot_request_list, false); ASSERT_TRUE(mock_client->threaded_data.request_queue_size == 1); ASSERT_TRUE(!aws_linked_list_empty(&mock_client->threaded_data.request_queue)); for (uint32_t i = 0; i < num_requests; ++i) { aws_linked_list_push_back(&request_list, &requests[i]->node); } /* Move the requests to the front of the queue. */ aws_s3_client_queue_requests_threaded(mock_client, &request_list, true); } ASSERT_TRUE(aws_linked_list_empty(&request_list)); ASSERT_TRUE(mock_client->threaded_data.request_queue_size == (num_requests + 1)); ASSERT_TRUE(!aws_linked_list_empty(&mock_client->threaded_data.request_queue)); for (uint32_t i = 0; i < num_requests; ++i) { struct aws_s3_request *request = aws_s3_client_dequeue_request_threaded(mock_client); ASSERT_TRUE(request == requests[i]); } { /* The last request should be the pivot request since the other requests were pushed to the front. */ struct aws_s3_request *last_request = aws_s3_client_dequeue_request_threaded(mock_client); ASSERT_TRUE(last_request == pivot_request); } ASSERT_TRUE(aws_linked_list_empty(&mock_client->threaded_data.request_queue)); ASSERT_TRUE(mock_client->threaded_data.request_queue_size == 0); for (uint32_t i = 0; i < num_requests; ++i) { aws_s3_request_release(requests[i]); } aws_s3_request_release(pivot_request); aws_s3_meta_request_release(mock_meta_request); aws_s3_client_release(mock_client); aws_s3_tester_clean_up(&tester); return 0; } struct test_work_meta_request_update_user_data { bool has_work_remaining; uint32_t num_prepares; }; static bool s_s3_test_work_meta_request_update( struct aws_s3_meta_request *meta_request, uint32_t flags, struct aws_s3_request **out_request) { AWS_ASSERT(meta_request); (void)flags; struct test_work_meta_request_update_user_data *user_data = meta_request->user_data; if (out_request) { if (user_data->has_work_remaining) { *out_request = aws_s3_request_new(meta_request, 0, 0, 0, 0); } } return user_data->has_work_remaining; } static void s_s3_test_work_meta_request_schedule_prepare_request( struct aws_s3_meta_request *meta_request, struct aws_s3_request *request, aws_s3_meta_request_prepare_request_callback_fn *callback, void *user_data) { (void)request; (void)callback; (void)user_data; AWS_ASSERT(meta_request); struct test_work_meta_request_update_user_data *test_user_data = meta_request->user_data; ++test_user_data->num_prepares; aws_s3_request_release(request); } static size_t s_test_s3_update_meta_request_trigger_prepare_host_address_count = 0; static size_t s_test_s3_update_meta_request_trigger_prepare_get_host_address_count( struct aws_host_resolver *host_resolver, const struct aws_string *host_name, uint32_t flags) { (void)host_resolver; (void)host_name; (void)flags; return s_test_s3_update_meta_request_trigger_prepare_host_address_count; } static int s_validate_prepared_requests( struct aws_s3_client *client, size_t expected_num_being_prepared, struct aws_s3_meta_request *meta_request_with_work, struct aws_s3_meta_request *meta_request_without_work) { ASSERT_TRUE(client->threaded_data.request_queue_size == 0); ASSERT_TRUE(aws_linked_list_empty(&client->threaded_data.request_queue)); ASSERT_TRUE(client->threaded_data.num_requests_being_prepared == expected_num_being_prepared); ASSERT_TRUE(aws_atomic_load_int(&client->stats.num_requests_in_flight) == expected_num_being_prepared); uint32_t num_meta_requests_in_list = 0; bool meta_request_with_work_found = false; for (struct aws_linked_list_node *node = aws_linked_list_begin(&client->threaded_data.meta_requests); node != aws_linked_list_end(&client->threaded_data.meta_requests); node = aws_linked_list_next(node)) { struct aws_s3_meta_request *meta_request = AWS_CONTAINER_OF(node, struct aws_s3_meta_request, client_process_work_threaded_data); if (meta_request == meta_request_with_work) { meta_request_with_work_found = true; } ASSERT_TRUE(meta_request != meta_request_without_work); ++num_meta_requests_in_list; } ASSERT_TRUE(meta_request_with_work_found); ASSERT_TRUE(num_meta_requests_in_list == 1); return AWS_OP_SUCCESS; } /* Test that the client will prepare requests correctly. */ AWS_TEST_CASE(test_s3_update_meta_requests_trigger_prepare, s_test_s3_update_meta_requests_trigger_prepare) static int s_test_s3_update_meta_requests_trigger_prepare(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; aws_s3_tester_init(allocator, &tester); struct aws_client_bootstrap mock_bootstrap; AWS_ZERO_STRUCT(mock_bootstrap); const uint32_t ideal_vip_count = 10; struct aws_s3_client *mock_client = aws_s3_tester_mock_client_new(&tester); mock_client->client_bootstrap = &mock_bootstrap; mock_client->vtable->get_host_address_count = s_test_s3_update_meta_request_trigger_prepare_get_host_address_count; *((uint32_t *)&mock_client->ideal_vip_count) = ideal_vip_count; aws_linked_list_init(&mock_client->threaded_data.request_queue); aws_linked_list_init(&mock_client->threaded_data.meta_requests); struct aws_s3_meta_request *mock_meta_request_without_work = aws_s3_tester_mock_meta_request_new(&tester); mock_meta_request_without_work->client = aws_s3_client_acquire(mock_client); mock_meta_request_without_work->endpoint = aws_s3_tester_mock_endpoint_new(&tester); struct test_work_meta_request_update_user_data mock_meta_request_without_work_data = { .has_work_remaining = false, }; mock_meta_request_without_work->user_data = &mock_meta_request_without_work_data; struct aws_s3_meta_request_vtable *meta_request_without_work_vtable = aws_s3_tester_patch_meta_request_vtable(&tester, mock_meta_request_without_work, NULL); meta_request_without_work_vtable->update = s_s3_test_work_meta_request_update; meta_request_without_work_vtable->schedule_prepare_request = s_s3_test_work_meta_request_schedule_prepare_request; /* Intentionally push this meta request first to test that it's properly removed from the list. */ aws_linked_list_push_back( &mock_client->threaded_data.meta_requests, &mock_meta_request_without_work->client_process_work_threaded_data.node); aws_s3_meta_request_acquire(mock_meta_request_without_work); struct aws_s3_meta_request *mock_meta_request_with_work = aws_s3_tester_mock_meta_request_new(&tester); mock_meta_request_with_work->client = aws_s3_client_acquire(mock_client); struct test_work_meta_request_update_user_data mock_meta_request_with_work_data = { .has_work_remaining = true, }; mock_meta_request_with_work->endpoint = aws_s3_tester_mock_endpoint_new(&tester); mock_meta_request_with_work->user_data = &mock_meta_request_with_work_data; struct aws_s3_meta_request_vtable *mock_meta_request_with_work_vtable = aws_s3_tester_patch_meta_request_vtable(&tester, mock_meta_request_with_work, NULL); mock_meta_request_with_work_vtable->update = s_s3_test_work_meta_request_update; mock_meta_request_with_work_vtable->schedule_prepare_request = s_s3_test_work_meta_request_schedule_prepare_request; aws_linked_list_push_back( &mock_client->threaded_data.meta_requests, &mock_meta_request_with_work->client_process_work_threaded_data.node); aws_s3_meta_request_acquire(mock_meta_request_with_work); /* With no known addresses, the amount of requests that can be prepared should only be enough for one VIP. */ { s_test_s3_update_meta_request_trigger_prepare_host_address_count = 0; aws_s3_client_update_meta_requests_threaded(mock_client); ASSERT_SUCCESS(s_validate_prepared_requests( mock_client, g_max_num_connections_per_vip, mock_meta_request_with_work, mock_meta_request_without_work)); } /* When the number of known addresses is greater than or equal to the ideal vip count, the max number of requests * should be reached. */ { const uint32_t max_requests_prepare = aws_s3_client_get_max_requests_prepare(mock_client); s_test_s3_update_meta_request_trigger_prepare_host_address_count = (size_t)(ideal_vip_count); aws_s3_client_update_meta_requests_threaded(mock_client); ASSERT_SUCCESS(s_validate_prepared_requests( mock_client, max_requests_prepare, mock_meta_request_with_work, mock_meta_request_without_work)); s_test_s3_update_meta_request_trigger_prepare_host_address_count = (size_t)(ideal_vip_count + 1); aws_s3_client_update_meta_requests_threaded(mock_client); ASSERT_SUCCESS(s_validate_prepared_requests( mock_client, max_requests_prepare, mock_meta_request_with_work, mock_meta_request_without_work)); } while (!aws_linked_list_empty(&mock_client->threaded_data.meta_requests)) { struct aws_linked_list_node *meta_request_node = aws_linked_list_pop_front(&mock_client->threaded_data.meta_requests); struct aws_s3_meta_request *meta_request = AWS_CONTAINER_OF(meta_request_node, struct aws_s3_meta_request, client_process_work_threaded_data); aws_s3_meta_request_release(meta_request); } aws_s3_meta_request_release(mock_meta_request_without_work); aws_s3_meta_request_release(mock_meta_request_with_work); aws_s3_client_release(mock_client); aws_s3_tester_clean_up(&tester); return 0; } struct s3_test_update_connections_finish_result_user_data { struct aws_s3_request *finished_request; struct aws_s3_request *create_connection_request; uint32_t finished_request_call_counter; uint32_t create_connection_request_call_counter; }; static void s_s3_test_meta_request_has_finish_result_finished_request( struct aws_s3_meta_request *meta_request, struct aws_s3_request *request, int error_code) { AWS_ASSERT(meta_request); AWS_ASSERT(request); (void)error_code; struct s3_test_update_connections_finish_result_user_data *user_data = meta_request->user_data; user_data->finished_request = request; ++user_data->finished_request_call_counter; } static void s_s3_test_meta_request_has_finish_result_client_create_connection_for_request( struct aws_s3_client *client, struct aws_s3_request *request) { (void)client; (void)request; AWS_ASSERT(client); AWS_ASSERT(request); struct aws_s3_meta_request *meta_request = request->meta_request; struct s3_test_update_connections_finish_result_user_data *user_data = meta_request->user_data; user_data->create_connection_request = request; ++user_data->create_connection_request_call_counter; } size_t s_test_update_conns_finish_result_host_address_count( struct aws_host_resolver *host_resolver, const struct aws_string *host_name, uint32_t flags) { (void)host_resolver; (void)host_name; (void)flags; return 1; } /* Test that the client will correctly discard requests for meta requests that are trying to finish. */ AWS_TEST_CASE(test_s3_client_update_connections_finish_result, s_test_s3_client_update_connections_finish_result) static int s_test_s3_client_update_connections_finish_result(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; aws_s3_tester_init(allocator, &tester); struct aws_client_bootstrap mock_client_bootstrap; AWS_ZERO_STRUCT(mock_client_bootstrap); struct aws_s3_client *mock_client = aws_s3_tester_mock_client_new(&tester); mock_client->client_bootstrap = &mock_client_bootstrap; mock_client->vtable->get_host_address_count = s_test_update_conns_finish_result_host_address_count; mock_client->vtable->create_connection_for_request = s_s3_test_meta_request_has_finish_result_client_create_connection_for_request; *((uint32_t *)&mock_client->ideal_vip_count) = 1; aws_linked_list_init(&mock_client->threaded_data.request_queue); struct s3_test_update_connections_finish_result_user_data test_update_connections_finish_result_user_data; AWS_ZERO_STRUCT(test_update_connections_finish_result_user_data); /* Put together a mock meta request that is finished. */ struct aws_s3_meta_request *mock_meta_request = aws_s3_tester_mock_meta_request_new(&tester); mock_meta_request->client = aws_s3_client_acquire(mock_client); mock_meta_request->synced_data.finish_result_set = true; mock_meta_request->user_data = &test_update_connections_finish_result_user_data; mock_meta_request->endpoint = aws_s3_tester_mock_endpoint_new(&tester); struct aws_s3_meta_request_vtable *mock_meta_request_vtable = aws_s3_tester_patch_meta_request_vtable(&tester, mock_meta_request, NULL); mock_meta_request_vtable->finished_request = s_s3_test_meta_request_has_finish_result_finished_request; /* Verify that the request does not get sent because the meta request has finish-result. */ { struct aws_s3_request *request = aws_s3_request_new(mock_meta_request, 0, 0, 0, 0); aws_linked_list_push_back(&mock_client->threaded_data.request_queue, &request->node); ++mock_client->threaded_data.request_queue_size; aws_s3_client_update_connections_threaded(mock_client); /* Request should still have been dequeued, but immediately passed to the meta request finish function. */ ASSERT_TRUE(mock_client->threaded_data.request_queue_size == 0); ASSERT_TRUE(aws_linked_list_empty(&mock_client->threaded_data.request_queue)); ASSERT_TRUE(test_update_connections_finish_result_user_data.finished_request == request); ASSERT_TRUE(test_update_connections_finish_result_user_data.finished_request_call_counter == 1); ASSERT_TRUE(test_update_connections_finish_result_user_data.create_connection_request == NULL); ASSERT_TRUE(test_update_connections_finish_result_user_data.create_connection_request_call_counter == 0); } AWS_ZERO_STRUCT(test_update_connections_finish_result_user_data); /* Verify that a request with the 'always send' flag still gets sent when the meta request has a finish-result. */ { struct aws_s3_request *request = aws_s3_request_new(mock_meta_request, 0, 0, 0, AWS_S3_REQUEST_FLAG_ALWAYS_SEND); aws_linked_list_push_back(&mock_client->threaded_data.request_queue, &request->node); ++mock_client->threaded_data.request_queue_size; aws_s3_client_update_connections_threaded(mock_client); /* Request should have been dequeued, and then sent on a connection. */ ASSERT_TRUE(mock_client->threaded_data.request_queue_size == 0); ASSERT_TRUE(aws_linked_list_empty(&mock_client->threaded_data.request_queue)); ASSERT_TRUE(test_update_connections_finish_result_user_data.finished_request == NULL); ASSERT_TRUE(test_update_connections_finish_result_user_data.finished_request_call_counter == 0); ASSERT_TRUE(test_update_connections_finish_result_user_data.create_connection_request == request); ASSERT_TRUE(test_update_connections_finish_result_user_data.create_connection_request_call_counter == 1); aws_s3_request_release(request); } aws_s3_meta_request_release(mock_meta_request); aws_s3_client_release(mock_client); aws_s3_tester_clean_up(&tester); return 0; } static int s_test_s3_get_object_helper( struct aws_allocator *allocator, enum aws_s3_client_tls_usage tls_usage, uint32_t extra_meta_request_flag, struct aws_byte_cursor s3_path) { struct aws_s3_tester tester; AWS_ZERO_STRUCT(tester); ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_client_config client_config = { .part_size = 64 * 1024, }; struct aws_tls_connection_options tls_connection_options; AWS_ZERO_STRUCT(tls_connection_options); #ifndef BYO_CRYPTO struct aws_tls_ctx_options tls_context_options; aws_tls_ctx_options_init_default_client(&tls_context_options, allocator); struct aws_tls_ctx *context = aws_tls_client_ctx_new(allocator, &tls_context_options); aws_tls_connection_options_init_from_ctx(&tls_connection_options, context); #endif struct aws_string *endpoint = aws_s3_tester_build_endpoint_string(allocator, &g_test_bucket_name, &g_test_s3_region); struct aws_byte_cursor endpoint_cursor = aws_byte_cursor_from_string(endpoint); tls_connection_options.server_name = aws_string_new_from_cursor(allocator, &endpoint_cursor); switch (tls_usage) { case AWS_S3_TLS_ENABLED: client_config.tls_mode = AWS_MR_TLS_ENABLED; client_config.tls_connection_options = &tls_connection_options; break; case AWS_S3_TLS_DISABLED: client_config.tls_mode = AWS_MR_TLS_DISABLED; break; default: break; } ASSERT_SUCCESS(aws_s3_tester_bind_client( &tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_REGION | AWS_S3_TESTER_BIND_CLIENT_SIGNING)); struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); uint32_t flags = AWS_S3_TESTER_SEND_META_REQUEST_EXPECT_SUCCESS | extra_meta_request_flag; ASSERT_SUCCESS(aws_s3_tester_send_get_object_meta_request(&tester, client, s3_path, flags, NULL)); aws_string_destroy(endpoint); #ifndef BYO_CRYPTO aws_tls_ctx_release(context); aws_tls_connection_options_clean_up(&tls_connection_options); aws_tls_ctx_options_clean_up(&tls_context_options); #endif client = aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_s3_get_object_tls_disabled, s_test_s3_get_object_tls_disabled) static int s_test_s3_get_object_tls_disabled(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(s_test_s3_get_object_helper(allocator, AWS_S3_TLS_DISABLED, 0, g_pre_existing_object_1MB)); return 0; } AWS_TEST_CASE(test_s3_get_object_tls_enabled, s_test_s3_get_object_tls_enabled) static int s_test_s3_get_object_tls_enabled(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(s_test_s3_get_object_helper(allocator, AWS_S3_TLS_ENABLED, 0, g_pre_existing_object_1MB)); return 0; } AWS_TEST_CASE(test_s3_get_object_tls_default, s_test_s3_get_object_tls_default) static int s_test_s3_get_object_tls_default(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(s_test_s3_get_object_helper(allocator, AWS_S3_TLS_DEFAULT, 0, g_pre_existing_object_1MB)); return 0; } AWS_TEST_CASE(test_s3_no_signing, s_test_s3_no_signing) static int s_test_s3_no_signing(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; AWS_ZERO_STRUCT(tester); ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_client_config client_config; AWS_ZERO_STRUCT(client_config); ASSERT_SUCCESS(aws_s3_tester_bind_client(&tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_REGION)); struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); ASSERT_NOT_NULL(client); struct aws_string *host_name = aws_s3_tester_build_endpoint_string(allocator, &g_test_public_bucket_name, &g_test_s3_region); /* Put together a simple S3 Get Object request. */ struct aws_http_message *message = aws_s3_test_get_object_request_new( allocator, aws_byte_cursor_from_string(host_name), g_pre_existing_object_1MB); struct aws_s3_meta_request_options options; AWS_ZERO_STRUCT(options); options.type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT; options.message = message; /* Trigger accelerating of our Get Object request. */ struct aws_s3_meta_request_test_results meta_request_test_results; aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator); ASSERT_SUCCESS(aws_s3_tester_send_meta_request( &tester, client, &options, &meta_request_test_results, AWS_S3_TESTER_SEND_META_REQUEST_EXPECT_SUCCESS)); ASSERT_SUCCESS(aws_s3_tester_validate_get_object_results(&meta_request_test_results, 0)); aws_s3_meta_request_test_results_clean_up(&meta_request_test_results); aws_http_message_release(message); aws_string_destroy(host_name); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } AWS_TEST_CASE(test_s3_signing_override, s_test_s3_signing_override) static int s_test_s3_signing_override(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; AWS_ZERO_STRUCT(tester); ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_client_config client_config; AWS_ZERO_STRUCT(client_config); ASSERT_SUCCESS(aws_s3_tester_bind_client(&tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_REGION)); struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); struct aws_string *host_name = aws_s3_tester_build_endpoint_string(allocator, &g_test_bucket_name, &g_test_s3_region); /* Put together a simple S3 Get Object request. */ struct aws_http_message *message = aws_s3_test_get_object_request_new( allocator, aws_byte_cursor_from_string(host_name), g_pre_existing_object_1MB); /* Getting without signing should fail since the client has no signing set up. */ { struct aws_s3_meta_request_options options; AWS_ZERO_STRUCT(options); options.type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT; options.message = message; /* Trigger accelerating of our Get Object request.*/ struct aws_s3_meta_request_test_results meta_request_test_results; aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator); ASSERT_SUCCESS(aws_s3_tester_send_meta_request(&tester, client, &options, &meta_request_test_results, 0)); ASSERT_TRUE(aws_s3_tester_validate_get_object_results(&meta_request_test_results, 0) != AWS_OP_SUCCESS); aws_s3_meta_request_test_results_clean_up(&meta_request_test_results); } /* Getting with signing should succeed if we set up signing on the meta request. */ { struct aws_s3_meta_request_options options; AWS_ZERO_STRUCT(options); options.type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT; options.message = message; options.signing_config = &tester.default_signing_config; /* Trigger accelerating of our Get Object request. */ struct aws_s3_meta_request_test_results meta_request_test_results; aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator); ASSERT_SUCCESS(aws_s3_tester_send_meta_request( &tester, client, &options, &meta_request_test_results, AWS_S3_TESTER_SEND_META_REQUEST_EXPECT_SUCCESS)); ASSERT_SUCCESS(aws_s3_tester_validate_get_object_results(&meta_request_test_results, 0)); aws_s3_meta_request_test_results_clean_up(&meta_request_test_results); } aws_http_message_release(message); aws_string_destroy(host_name); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } AWS_TEST_CASE(test_s3_get_object_less_than_part_size, s_test_s3_get_object_less_than_part_size) static int s_test_s3_get_object_less_than_part_size(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; AWS_ZERO_STRUCT(tester); ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_client_config client_config = { .part_size = 20 * 1024 * 1024, }; ASSERT_SUCCESS(aws_s3_tester_bind_client( &tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_REGION | AWS_S3_TESTER_BIND_CLIENT_SIGNING)); struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); ASSERT_SUCCESS(aws_s3_tester_send_get_object_meta_request( &tester, client, g_pre_existing_object_1MB, AWS_S3_TESTER_SEND_META_REQUEST_EXPECT_SUCCESS, NULL)); client = aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } AWS_TEST_CASE(test_s3_put_object_with_part_remainder, s_test_s3_put_object_with_part_remainder) static int s_test_s3_put_object_with_part_remainder(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_tester_client_options client_options = { .part_size = 5 * 1024 * 1024, }; struct aws_s3_client *client = NULL; ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); struct aws_s3_meta_request_test_results meta_request_test_results; aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator); struct aws_s3_tester_meta_request_options options = { .allocator = allocator, .client = client, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_SUCCESS, .put_options = { /* Object size meant to be one megabyte larger than the part size of the client. */ .object_size_mb = 6, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &options, &meta_request_test_results)); aws_s3_meta_request_test_results_clean_up(&meta_request_test_results); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } AWS_TEST_CASE(test_s3_get_object_multiple, s_test_s3_get_object_multiple) static int s_test_s3_get_object_multiple(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_meta_request *meta_requests[4]; struct aws_s3_meta_request_test_results meta_request_test_results[4]; size_t num_meta_requests = AWS_ARRAY_SIZE(meta_requests); ASSERT_TRUE(num_meta_requests == AWS_ARRAY_SIZE(meta_request_test_results)); struct aws_s3_tester tester; AWS_ZERO_STRUCT(tester); ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_client_config client_config = { .part_size = 64 * 1024, }; ASSERT_SUCCESS(aws_s3_tester_bind_client( &tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_REGION | AWS_S3_TESTER_BIND_CLIENT_SIGNING)); struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); struct aws_string *host_name = aws_s3_tester_build_endpoint_string(allocator, &g_test_bucket_name, &g_test_s3_region); for (size_t i = 0; i < num_meta_requests; ++i) { /* Put together a simple S3 Get Object request. */ struct aws_http_message *message = aws_s3_test_get_object_request_new( allocator, aws_byte_cursor_from_string(host_name), g_pre_existing_object_1MB); aws_s3_meta_request_test_results_init(&meta_request_test_results[i], allocator); struct aws_s3_meta_request_options options; AWS_ZERO_STRUCT(options); options.type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT; options.message = message; ASSERT_SUCCESS(aws_s3_tester_bind_meta_request(&tester, &options, &meta_request_test_results[i])); /* Trigger accelerating of our Get Object request. */ meta_requests[i] = aws_s3_client_make_meta_request(client, &options); ASSERT_TRUE(meta_requests[i] != NULL); aws_http_message_release(message); } /* Wait for the request to finish. */ aws_s3_tester_wait_for_meta_request_finish(&tester); aws_s3_tester_lock_synced_data(&tester); ASSERT_TRUE(tester.synced_data.finish_error_code == AWS_ERROR_SUCCESS); aws_s3_tester_unlock_synced_data(&tester); for (size_t i = 0; i < num_meta_requests; ++i) { meta_requests[i] = aws_s3_meta_request_release(meta_requests[i]); } aws_s3_tester_wait_for_meta_request_shutdown(&tester); for (size_t i = 0; i < num_meta_requests; ++i) { aws_s3_tester_validate_get_object_results(&meta_request_test_results[i], 0); aws_s3_meta_request_test_results_clean_up(&meta_request_test_results[i]); } aws_string_destroy(host_name); host_name = NULL; client = aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } AWS_TEST_CASE(test_s3_get_object_empty_object, s_test_s3_get_object_empty_default) static int s_test_s3_get_object_empty_default(struct aws_allocator *allocator, void *ctx) { (void)ctx; return (s_test_s3_get_object_helper(allocator, AWS_S3_TLS_ENABLED, 0, g_pre_existing_empty_object)); } AWS_TEST_CASE(test_s3_get_object_sse_kms, s_test_s3_get_object_sse_kms) static int s_test_s3_get_object_sse_kms(struct aws_allocator *allocator, void *ctx) { (void)ctx; /* Keep TLS enabled for SSE related download, or it will fail. */ return s_test_s3_get_object_helper( allocator, AWS_S3_TLS_ENABLED, AWS_S3_TESTER_SEND_META_REQUEST_SSE_KMS, g_pre_existing_object_kms_10MB); } AWS_TEST_CASE(test_s3_get_object_sse_aes256, s_test_s3_get_object_sse_aes256) static int s_test_s3_get_object_sse_aes256(struct aws_allocator *allocator, void *ctx) { (void)ctx; /* Keep TLS enabled for SSE related download, or it will fail. */ return s_test_s3_get_object_helper( allocator, AWS_S3_TLS_ENABLED, AWS_S3_TESTER_SEND_META_REQUEST_SSE_AES256, g_pre_existing_object_aes256_10MB); } /** * Test read-backpressure functionality by repeatedly: * - letting the download stall * - incrementing the read window * - repeat... */ static int s_apply_backpressure_until_meta_request_finish( struct aws_s3_tester *tester, struct aws_s3_meta_request *meta_request, struct aws_s3_meta_request_test_results *test_results, size_t part_size, size_t window_initial_size, uint64_t window_increment_size) { /* Remember the last time something happened (we received download data, or incremented read window) */ uint64_t last_time_something_happened; ASSERT_SUCCESS(aws_sys_clock_get_ticks(&last_time_something_happened)); /* To ensure that backpressure is working, we wait a bit after download stalls * before incrementing the read window again. * This number also controls the max time we wait for bytes to start arriving * after incrementing the window. * If the magic number is too high the test will be slow, * if it's too low the test will fail on slow networks */ const uint64_t wait_duration_with_nothing_happening = aws_timestamp_convert(3, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL); uint64_t accumulated_window_increments = window_initial_size; uint64_t accumulated_data_size = 0; while (true) { /* Check if meta-request is done (don't exit yet, we want to check some numbers first...) */ aws_s3_tester_lock_synced_data(tester); bool done = tester->synced_data.meta_requests_finished != 0; aws_s3_tester_unlock_synced_data(tester); /* Check how much data we've received */ size_t received_body_size_delta = aws_atomic_exchange_int(&test_results->received_body_size_delta, 0); accumulated_data_size += (uint64_t)received_body_size_delta; /* Check that we haven't received more data than the window allows. * TODO: Stop allowing "hacky wiggle room". The current implementation * may push more bytes to the user (up to 1 part) than they've asked for. */ uint64_t hacky_wiggle_room = part_size; uint64_t max_data_allowed = accumulated_window_increments + hacky_wiggle_room; ASSERT_TRUE(accumulated_data_size <= max_data_allowed, "Received more data than the read window allows"); /* If we're done, we're done */ if (done) { break; } /* Figure out how long it's been since we last received data */ uint64_t current_time; ASSERT_SUCCESS(aws_sys_clock_get_ticks(¤t_time)); if (received_body_size_delta != 0) { last_time_something_happened = current_time; } uint64_t duration_since_something_happened = current_time - last_time_something_happened; /* If it seems like data has stopped flowing... */ if (duration_since_something_happened >= wait_duration_with_nothing_happening) { /* Assert that data stopped flowing because the window reached 0. */ uint64_t current_window = aws_sub_u64_saturating(accumulated_window_increments, accumulated_data_size); ASSERT_INT_EQUALS(0, current_window, "Data stopped flowing but read window isn't 0 yet."); /* Open the window a bit (this resets the "something happened" timer */ accumulated_window_increments += window_increment_size; aws_s3_meta_request_increment_read_window(meta_request, window_increment_size); last_time_something_happened = current_time; } /* Sleep a moment, and loop again... */ aws_thread_current_sleep(aws_timestamp_convert(100, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL)); } return AWS_OP_SUCCESS; } static int s_test_s3_get_object_backpressure_helper( struct aws_allocator *allocator, size_t part_size, size_t window_initial_size, uint64_t window_increment_size) { struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_client_config client_config = { .part_size = part_size, .enable_read_backpressure = true, .initial_read_window = window_initial_size, }; ASSERT_SUCCESS(aws_s3_tester_bind_client( &tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_REGION | AWS_S3_TESTER_BIND_CLIENT_SIGNING)); struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); ASSERT_NOT_NULL(client); struct aws_string *host_name = aws_s3_tester_build_endpoint_string(allocator, &g_test_bucket_name, &g_test_s3_region); /* Put together a simple S3 Get Object request. */ struct aws_http_message *message = aws_s3_test_get_object_request_new( allocator, aws_byte_cursor_from_string(host_name), g_pre_existing_object_1MB); struct aws_s3_meta_request_options options = { .type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT, .message = message, }; struct aws_s3_meta_request_test_results meta_request_test_results; aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator); ASSERT_SUCCESS(aws_s3_tester_bind_meta_request(&tester, &options, &meta_request_test_results)); struct aws_s3_meta_request *meta_request = aws_s3_client_make_meta_request(client, &options); ASSERT_TRUE(meta_request != NULL); /* Increment read window bit by bit until all data is downloaded */ ASSERT_SUCCESS(s_apply_backpressure_until_meta_request_finish( &tester, meta_request, &meta_request_test_results, part_size, window_initial_size, window_increment_size)); aws_s3_tester_lock_synced_data(&tester); ASSERT_TRUE(tester.synced_data.finish_error_code == AWS_ERROR_SUCCESS); aws_s3_tester_unlock_synced_data(&tester); ASSERT_SUCCESS(aws_s3_tester_validate_get_object_results(&meta_request_test_results, 0)); /* Regression test: * Ensure that it's safe to call increment-window even after the meta-request has finished */ aws_s3_meta_request_increment_read_window(meta_request, 1024); meta_request = aws_s3_meta_request_release(meta_request); aws_s3_tester_wait_for_meta_request_shutdown(&tester); aws_s3_meta_request_test_results_clean_up(&meta_request_test_results); aws_http_message_release(message); message = NULL; aws_string_destroy(host_name); host_name = NULL; client = aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } AWS_TEST_CASE(test_s3_get_object_backpressure_small_increments, s_test_s3_get_object_backpressure_small_increments) static int s_test_s3_get_object_backpressure_small_increments(struct aws_allocator *allocator, void *ctx) { /* Test increments smaller than part-size. * Only 1 part at a time should be in flight */ (void)ctx; size_t file_size = 1 * 1024 * 1024; /* Test downloads 1MB file */ size_t part_size = file_size / 4; size_t window_initial_size = 1024; uint64_t window_increment_size = part_size / 2; return s_test_s3_get_object_backpressure_helper(allocator, part_size, window_initial_size, window_increment_size); } AWS_TEST_CASE(test_s3_get_object_backpressure_big_increments, s_test_s3_get_object_backpressure_big_increments) static int s_test_s3_get_object_backpressure_big_increments(struct aws_allocator *allocator, void *ctx) { /* Test increments larger than part-size. * Multiple parts should be in flight at a time */ (void)ctx; size_t file_size = 1 * 1024 * 1024; /* Test downloads 1MB file */ size_t part_size = file_size / 8; size_t window_initial_size = 1024; uint64_t window_increment_size = part_size * 3; return s_test_s3_get_object_backpressure_helper(allocator, part_size, window_initial_size, window_increment_size); } AWS_TEST_CASE(test_s3_get_object_backpressure_initial_size_zero, s_test_s3_get_object_backpressure_initial_size_zero) static int s_test_s3_get_object_backpressure_initial_size_zero(struct aws_allocator *allocator, void *ctx) { /* Test with initial window size of zero */ (void)ctx; size_t file_size = 1 * 1024 * 1024; /* Test downloads 1MB file */ size_t part_size = file_size / 4; size_t window_initial_size = 0; uint64_t window_increment_size = part_size / 2; return s_test_s3_get_object_backpressure_helper(allocator, part_size, window_initial_size, window_increment_size); } AWS_TEST_CASE(test_s3_get_object_part, s_test_s3_get_object_part) static int s_test_s3_get_object_part(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; AWS_ZERO_STRUCT(tester); ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_client_config client_config = { .part_size = MB_TO_BYTES(8), }; ASSERT_SUCCESS(aws_s3_tester_bind_client( &tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_REGION | AWS_S3_TESTER_BIND_CLIENT_SIGNING)); struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); struct aws_s3_meta_request_test_results meta_request_test_results; aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator); /*** PUT FILE ***/ struct aws_byte_buf path_buf; AWS_ZERO_STRUCT(path_buf); ASSERT_SUCCESS( aws_s3_tester_upload_file_path_init(allocator, &path_buf, aws_byte_cursor_from_c_str("/get_object_part_test"))); struct aws_byte_cursor object_path = aws_byte_cursor_from_buf(&path_buf); struct aws_s3_tester_meta_request_options put_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .client = client, .put_options = { .object_size_mb = 10, .object_path_override = object_path, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &put_options, NULL)); /* GET FILE */ struct aws_s3_tester_meta_request_options options = { .allocator = allocator, .client = client, .meta_request_type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_NO_VALIDATE, .get_options = { .object_path = object_path, .part_number = 2, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &options, &meta_request_test_results)); ASSERT_UINT_EQUALS(AWS_ERROR_SUCCESS, meta_request_test_results.finished_error_code); /* Only one request was made to get the second part of the object */ ASSERT_UINT_EQUALS(1, aws_array_list_length(&meta_request_test_results.synced_data.metrics)); aws_s3_meta_request_test_results_clean_up(&meta_request_test_results); client = aws_s3_client_release(client); aws_byte_buf_clean_up(&path_buf); aws_s3_tester_clean_up(&tester); return 0; } static int s_test_s3_put_object_helper( struct aws_allocator *allocator, enum aws_s3_client_tls_usage tls_usage, uint32_t extra_meta_request_flag) { struct aws_s3_tester tester; AWS_ZERO_STRUCT(tester); ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_tls_connection_options tls_connection_options; AWS_ZERO_STRUCT(tls_connection_options); #ifndef BYO_CRYPTO struct aws_tls_ctx_options tls_context_options; aws_tls_ctx_options_init_default_client(&tls_context_options, allocator); struct aws_tls_ctx *context = aws_tls_client_ctx_new(allocator, &tls_context_options); aws_tls_connection_options_init_from_ctx(&tls_connection_options, context); #endif struct aws_string *endpoint = aws_s3_tester_build_endpoint_string(allocator, &g_test_bucket_name, &g_test_s3_region); struct aws_byte_cursor endpoint_cursor = aws_byte_cursor_from_string(endpoint); tls_connection_options.server_name = aws_string_new_from_cursor(allocator, &endpoint_cursor); struct aws_s3_client_config client_config = { .part_size = 5 * 1024 * 1024, }; switch (tls_usage) { case AWS_S3_TLS_ENABLED: client_config.tls_mode = AWS_MR_TLS_ENABLED; client_config.tls_connection_options = &tls_connection_options; break; case AWS_S3_TLS_DISABLED: client_config.tls_mode = AWS_MR_TLS_DISABLED; break; default: break; } ASSERT_SUCCESS(aws_s3_tester_bind_client( &tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_REGION | AWS_S3_TESTER_BIND_CLIENT_SIGNING)); struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); ASSERT_SUCCESS(aws_s3_tester_send_put_object_meta_request( &tester, client, 10, AWS_S3_TESTER_SEND_META_REQUEST_EXPECT_SUCCESS | extra_meta_request_flag, NULL)); aws_string_destroy(endpoint); #ifndef BYO_CRYPTO aws_tls_ctx_release(context); aws_tls_connection_options_clean_up(&tls_connection_options); aws_tls_ctx_options_clean_up(&tls_context_options); #endif client = aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } AWS_TEST_CASE(test_s3_put_object_tls_disabled, s_test_s3_put_object_tls_disabled) static int s_test_s3_put_object_tls_disabled(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(s_test_s3_put_object_helper(allocator, AWS_S3_TLS_DISABLED, 0)); return 0; } AWS_TEST_CASE(test_s3_put_object_tls_enabled, s_test_s3_put_object_tls_enabled) static int s_test_s3_put_object_tls_enabled(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(s_test_s3_put_object_helper(allocator, AWS_S3_TLS_ENABLED, 0)); return 0; } AWS_TEST_CASE(test_s3_put_object_tls_default, s_test_s3_put_object_tls_default) static int s_test_s3_put_object_tls_default(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(s_test_s3_put_object_helper(allocator, AWS_S3_TLS_DEFAULT, 0)); return 0; } AWS_TEST_CASE(test_s3_multipart_put_object_with_acl, s_test_s3_multipart_put_object_with_acl) static int s_test_s3_multipart_put_object_with_acl(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(s_test_s3_put_object_helper(allocator, AWS_S3_TLS_DEFAULT, AWS_S3_TESTER_SEND_META_REQUEST_PUT_ACL)); return 0; } static int s_test_s3_put_object_multiple_helper(struct aws_allocator *allocator, bool file_on_disk) { enum s_numbers { NUM_REQUESTS = 5 }; struct aws_s3_meta_request *meta_requests[NUM_REQUESTS]; struct aws_s3_meta_request_test_results meta_request_test_results[NUM_REQUESTS]; struct aws_http_message *messages[NUM_REQUESTS]; struct aws_input_stream *input_streams[NUM_REQUESTS]; struct aws_byte_buf input_stream_buffers[NUM_REQUESTS]; struct aws_string *filepath_str[NUM_REQUESTS]; struct aws_s3_tester tester; AWS_ZERO_STRUCT(tester); ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_client_config client_config; AWS_ZERO_STRUCT(client_config); ASSERT_SUCCESS(aws_s3_tester_bind_client( &tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_REGION | AWS_S3_TESTER_BIND_CLIENT_SIGNING)); struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); struct aws_string *host_name = aws_s3_tester_build_endpoint_string(allocator, &g_test_bucket_name, &g_test_s3_region); size_t content_length = MB_TO_BYTES(10); for (size_t i = 0; i < NUM_REQUESTS; ++i) { aws_s3_meta_request_test_results_init(&meta_request_test_results[i], allocator); char object_path_buffer[128] = ""; snprintf( object_path_buffer, sizeof(object_path_buffer), "" PRInSTR "-10MB-%zu.txt", AWS_BYTE_CURSOR_PRI(g_put_object_prefix), i); AWS_ZERO_STRUCT(input_stream_buffers[i]); aws_s3_create_test_buffer(allocator, content_length, &input_stream_buffers[i]); struct aws_byte_cursor test_body_cursor = aws_byte_cursor_from_buf(&input_stream_buffers[i]); input_streams[i] = aws_input_stream_new_from_cursor(allocator, &test_body_cursor); struct aws_byte_cursor test_object_path = aws_byte_cursor_from_c_str(object_path_buffer); struct aws_byte_cursor host_cur = aws_byte_cursor_from_string(host_name); struct aws_s3_meta_request_options options; AWS_ZERO_STRUCT(options); options.type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT; if (file_on_disk) { filepath_str[i] = aws_s3_tester_create_file(allocator, test_object_path, input_streams[i]); messages[i] = aws_s3_test_put_object_request_new_without_body( allocator, &host_cur, g_test_body_content_type, test_object_path, content_length, 0 /*flags*/); options.send_filepath = aws_byte_cursor_from_string(filepath_str[i]); } else { filepath_str[i] = NULL; messages[i] = aws_s3_test_put_object_request_new( allocator, &host_cur, test_object_path, g_test_body_content_type, input_streams[i], 0); } options.message = messages[i]; ASSERT_SUCCESS(aws_s3_tester_bind_meta_request(&tester, &options, &meta_request_test_results[i])); /* Trigger accelerating of our Put Object request. */ meta_requests[i] = aws_s3_client_make_meta_request(client, &options); ASSERT_TRUE(meta_requests[i] != NULL); } /* Wait for the request to finish. */ aws_s3_tester_wait_for_meta_request_finish(&tester); aws_s3_tester_lock_synced_data(&tester); ASSERT_TRUE(tester.synced_data.finish_error_code == AWS_ERROR_SUCCESS); aws_s3_tester_unlock_synced_data(&tester); for (size_t i = 0; i < NUM_REQUESTS; ++i) { meta_requests[i] = aws_s3_meta_request_release(meta_requests[i]); } aws_s3_tester_wait_for_meta_request_shutdown(&tester); for (size_t i = 0; i < NUM_REQUESTS; ++i) { aws_s3_tester_validate_get_object_results(&meta_request_test_results[i], 0); aws_s3_meta_request_test_results_clean_up(&meta_request_test_results[i]); } for (size_t i = 0; i < NUM_REQUESTS; ++i) { aws_http_message_release(messages[i]); aws_input_stream_release(input_streams[i]); aws_byte_buf_clean_up(&input_stream_buffers[i]); if (filepath_str[i]) { ASSERT_SUCCESS(aws_file_delete(filepath_str[i])); aws_string_destroy(filepath_str[i]); } } aws_string_destroy(host_name); host_name = NULL; client = aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } AWS_TEST_CASE(test_s3_put_object_multiple, s_test_s3_put_object_multiple) static int s_test_s3_put_object_multiple(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_test_s3_put_object_multiple_helper(allocator, false); } AWS_TEST_CASE(test_s3_put_object_multiple_with_filepath, s_test_s3_put_object_multiple_with_filepath) static int s_test_s3_put_object_multiple_with_filepath(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_test_s3_put_object_multiple_helper(allocator, true); } AWS_TEST_CASE(test_s3_put_object_less_than_part_size, s_test_s3_put_object_less_than_part_size) static int s_test_s3_put_object_less_than_part_size(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_client_config client_config = { .part_size = 20 * 1024 * 1024, }; ASSERT_SUCCESS(aws_s3_tester_bind_client( &tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_REGION | AWS_S3_TESTER_BIND_CLIENT_SIGNING)); struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); ASSERT_TRUE(client != NULL); struct aws_s3_tester_meta_request_options put_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .client = client, .put_options = { .object_size_mb = 1, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &put_options, NULL)); client = aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } AWS_TEST_CASE(test_s3_put_object_buffer_pool_trim, s_test_s3_put_object_buffer_pool_trim) static int s_test_s3_put_object_buffer_pool_trim(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_client_config client_config = { .part_size = 8 * 1024 * 1024, }; ASSERT_SUCCESS(aws_s3_tester_bind_client( &tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_REGION | AWS_S3_TESTER_BIND_CLIENT_SIGNING)); struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); ASSERT_TRUE(client != NULL); struct aws_s3_tester_meta_request_options put_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .client = client, .put_options = { .object_size_mb = 32, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &put_options, NULL)); struct aws_s3_buffer_pool_usage_stats usage_before = aws_s3_buffer_pool_get_usage(client->buffer_pool); ASSERT_TRUE(0 != usage_before.primary_num_blocks); aws_thread_current_sleep(aws_timestamp_convert(6, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL)); struct aws_s3_buffer_pool_usage_stats usage_after = aws_s3_buffer_pool_get_usage(client->buffer_pool); ASSERT_INT_EQUALS(0, usage_after.primary_num_blocks); client = aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } AWS_TEST_CASE( test_s3_put_object_less_than_part_size_with_content_encoding, s_test_s3_put_object_less_than_part_size_with_content_encoding) static int s_test_s3_put_object_less_than_part_size_with_content_encoding(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_client_config client_config = { .part_size = 20 * 1024 * 1024, }; ASSERT_SUCCESS(aws_s3_tester_bind_client( &tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_REGION | AWS_S3_TESTER_BIND_CLIENT_SIGNING)); struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); ASSERT_TRUE(client != NULL); struct aws_byte_cursor content_encoding_cursor = aws_byte_cursor_from_c_str("gzip"); uint32_t object_size_mb = 1; char object_path_sprintf_buffer[128] = ""; snprintf( object_path_sprintf_buffer, sizeof(object_path_sprintf_buffer), "" PRInSTR "-content-encoding-%uMB.txt", AWS_BYTE_CURSOR_PRI(g_put_object_prefix), object_size_mb); struct aws_byte_cursor object_path_cursor = aws_byte_cursor_from_c_str(object_path_sprintf_buffer); /*** put file with encoding ***/ struct aws_s3_tester_meta_request_options put_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .client = client, .checksum_algorithm = AWS_SCA_SHA256, .put_options = { .object_size_mb = object_size_mb, .object_path_override = object_path_cursor, .content_encoding = content_encoding_cursor, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &put_options, NULL)); /*** get file and validate encoding ***/ struct aws_s3_tester_meta_request_options get_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_SUCCESS, .client = client, .get_options = { .object_path = object_path_cursor, }, }; struct aws_s3_meta_request_test_results get_object_result; aws_s3_meta_request_test_results_init(&get_object_result, allocator); ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &get_options, &get_object_result)); struct aws_byte_cursor content_encoding_header_cursor; ASSERT_SUCCESS(aws_http_headers_get( get_object_result.response_headers, g_content_encoding_header_name, &content_encoding_header_cursor)); ASSERT_TRUE(aws_byte_cursor_eq(&content_encoding_cursor, &content_encoding_header_cursor)); aws_s3_meta_request_test_results_clean_up(&get_object_result); client = aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } AWS_TEST_CASE(test_s3_put_object_mpu_with_content_encoding, s_test_s3_put_object_mpu_with_content_encoding) static int s_test_s3_put_object_mpu_with_content_encoding(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_client_config client_config = { .part_size = 5 * 1024 * 1024, }; ASSERT_SUCCESS(aws_s3_tester_bind_client( &tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_REGION | AWS_S3_TESTER_BIND_CLIENT_SIGNING)); struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); ASSERT_TRUE(client != NULL); struct aws_byte_cursor content_encoding_cursor = aws_byte_cursor_from_c_str("gzip"); uint32_t object_size_mb = 10; char object_path_sprintf_buffer[128] = ""; snprintf( object_path_sprintf_buffer, sizeof(object_path_sprintf_buffer), "" PRInSTR "-content-encoding-%uMB.txt", AWS_BYTE_CURSOR_PRI(g_put_object_prefix), object_size_mb); struct aws_byte_cursor object_path_cursor = aws_byte_cursor_from_c_str(object_path_sprintf_buffer); /*** put file with encoding ***/ struct aws_s3_tester_meta_request_options put_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .client = client, .checksum_algorithm = AWS_SCA_SHA256, .put_options = { .object_size_mb = object_size_mb, .object_path_override = object_path_cursor, .content_encoding = content_encoding_cursor, .ensure_multipart = true, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &put_options, NULL)); /*** get file and validate encoding ***/ struct aws_s3_tester_meta_request_options get_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_SUCCESS, .client = client, .get_options = { .object_path = object_path_cursor, }, }; struct aws_s3_meta_request_test_results get_object_result; aws_s3_meta_request_test_results_init(&get_object_result, allocator); ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &get_options, &get_object_result)); struct aws_byte_cursor content_encoding_header_cursor; ASSERT_SUCCESS(aws_http_headers_get( get_object_result.response_headers, g_content_encoding_header_name, &content_encoding_header_cursor)); ASSERT_TRUE(aws_byte_cursor_eq(&content_encoding_cursor, &content_encoding_header_cursor)); aws_s3_meta_request_test_results_clean_up(&get_object_result); client = aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } AWS_TEST_CASE(test_s3_put_object_multipart_threshold, s_test_s3_put_object_multipart_threshold) static int s_test_s3_put_object_multipart_threshold(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_client_config client_config = { .part_size = MB_TO_BYTES(8), .multipart_upload_threshold = MB_TO_BYTES(15), }; ASSERT_SUCCESS(aws_s3_tester_bind_client( &tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_REGION | AWS_S3_TESTER_BIND_CLIENT_SIGNING)); struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); ASSERT_TRUE(client != NULL); /* First smaller than the part size */ struct aws_s3_tester_meta_request_options put_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .client = client, .put_options = { .object_size_mb = 5, }, }; struct aws_s3_meta_request_test_results meta_request_test_results; aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator); ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &put_options, &meta_request_test_results)); /* Result in a single part upload, and have 0 as part size */ ASSERT_UINT_EQUALS(0, meta_request_test_results.part_size); aws_s3_meta_request_test_results_clean_up(&meta_request_test_results); /* Second smaller than threshold and larger than part size */ aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator); put_options.put_options.object_size_mb = 10; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &put_options, &meta_request_test_results)); /* Result in a single part upload, and have 0 as part size */ ASSERT_UINT_EQUALS(0, meta_request_test_results.part_size); aws_s3_meta_request_test_results_clean_up(&meta_request_test_results); /* Third larger than threshold*/ aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator); put_options.put_options.object_size_mb = 20; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &put_options, &meta_request_test_results)); /* Result in multi-part upload, and have the real part size */ ASSERT_UINT_EQUALS(client_config.part_size, meta_request_test_results.part_size); aws_s3_meta_request_test_results_clean_up(&meta_request_test_results); client = aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } AWS_TEST_CASE( test_s3_put_object_multipart_threshold_less_than_part_size, s_test_s3_put_object_multipart_threshold_less_than_part_size) static int s_test_s3_put_object_multipart_threshold_less_than_part_size(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_client_config client_config = { .part_size = MB_TO_BYTES(8), .multipart_upload_threshold = MB_TO_BYTES(5), }; ASSERT_SUCCESS(aws_s3_tester_bind_client( &tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_REGION | AWS_S3_TESTER_BIND_CLIENT_SIGNING)); struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); ASSERT_TRUE(client != NULL); /* First smaller than the part size */ struct aws_s3_tester_meta_request_options put_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .client = client, .put_options = { .object_size_mb = 6, }, }; struct aws_s3_meta_request_test_results meta_request_test_results; aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator); ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &put_options, &meta_request_test_results)); /* Result in a one part of multipart upload, and have the content length as part size */ ASSERT_UINT_EQUALS(MB_TO_BYTES(put_options.put_options.object_size_mb), meta_request_test_results.part_size); aws_s3_meta_request_test_results_clean_up(&meta_request_test_results); client = aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } AWS_TEST_CASE(test_s3_put_object_empty_object, s_test_s3_put_object_empty_object) static int s_test_s3_put_object_empty_object(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_client_config client_config; AWS_ZERO_STRUCT(client_config); ASSERT_SUCCESS(aws_s3_tester_bind_client( &tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_REGION | AWS_S3_TESTER_BIND_CLIENT_SIGNING)); struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); ASSERT_TRUE(client != NULL); struct aws_s3_tester_meta_request_options put_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .client = client, .put_options = { .object_size_mb = 0, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &put_options, NULL)); client = aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } static int s3_no_content_length_test_helper( struct aws_allocator *allocator, void *ctx, uint32_t object_size_in_mb, bool use_checksum) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_client_config client_config = { .part_size = MB_TO_BYTES(8), }; ASSERT_SUCCESS(aws_s3_tester_bind_client( &tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_REGION | AWS_S3_TESTER_BIND_CLIENT_SIGNING)); struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); ASSERT_TRUE(client != NULL); struct aws_s3_tester_meta_request_options put_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .client = client, .checksum_algorithm = use_checksum ? AWS_SCA_CRC32 : AWS_SCA_NONE, .put_options = { .object_size_mb = object_size_in_mb, .skip_content_length = true, }, }; struct aws_s3_meta_request_test_results meta_request_test_results; aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator); ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &put_options, &meta_request_test_results)); aws_s3_meta_request_test_results_clean_up(&meta_request_test_results); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } AWS_TEST_CASE(test_s3_put_object_no_content_length, s_test_s3_put_object_no_content_length) static int s_test_s3_put_object_no_content_length(struct aws_allocator *allocator, void *ctx) { ASSERT_SUCCESS(s3_no_content_length_test_helper(allocator, ctx, 19, false)); return 0; } AWS_TEST_CASE(test_s3_put_object_single_part_no_content_length, s_test_s3_put_object_single_part_no_content_length) static int s_test_s3_put_object_single_part_no_content_length(struct aws_allocator *allocator, void *ctx) { ASSERT_SUCCESS(s3_no_content_length_test_helper(allocator, ctx, 5, false)); return 0; } AWS_TEST_CASE(test_s3_put_object_zero_size_no_content_length, s_test_s3_put_object_zero_size_no_content_length) static int s_test_s3_put_object_zero_size_no_content_length(struct aws_allocator *allocator, void *ctx) { ASSERT_SUCCESS(s3_no_content_length_test_helper(allocator, ctx, 0, false)); return 0; } AWS_TEST_CASE( test_s3_put_large_object_no_content_length_with_checksum, s_test_s3_put_large_object_no_content_length_with_checksum) static int s_test_s3_put_large_object_no_content_length_with_checksum(struct aws_allocator *allocator, void *ctx) { ASSERT_SUCCESS(s3_no_content_length_test_helper(allocator, ctx, 128, true)); return 0; } /** * Once upon a time, we have a bug that without content-length, we will schedule more requests to prepare than needed. * And those extra request will be cleaned up, however, the client level count of `num_requests_being_prepared` will * still keep record for those. * * To reproduce, we create bunch of requests with less than a part body. And then sleep for a while to let dns resolve * purge all records. (Otherwise, we will always have one valid request to be available to send.) to trigger not going * full speed code. And we will hang. * */ AWS_TEST_CASE(test_s3_put_object_no_content_length_multiple, s_test_s3_put_object_no_content_length_multiple) static int s_test_s3_put_object_no_content_length_multiple(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_client_config client_config = { .part_size = MB_TO_BYTES(8), }; ASSERT_SUCCESS(aws_s3_tester_bind_client( &tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_REGION | AWS_S3_TESTER_BIND_CLIENT_SIGNING)); struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); aws_s3_set_dns_ttl(55); ASSERT_TRUE(client != NULL); struct aws_s3_tester_meta_request_options put_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .client = client, .checksum_algorithm = AWS_SCA_CRC32, .put_options = { .object_size_mb = 1, .skip_content_length = true, }, }; for (int i = 0; i < 6; i++) { ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &put_options, NULL)); } /* Sleep more than the DNS ttl to purge all records. */ aws_thread_current_sleep(aws_timestamp_convert(60, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL)); /* After sleep for a while, make another meta request */ ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &put_options, NULL)); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } /* Test async-input-stream when we're not doing multipart upload */ AWS_TEST_CASE(test_s3_put_object_async_singlepart, s_test_s3_put_object_async_singlepart) static int s_test_s3_put_object_async_singlepart(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_meta_request_test_results test_results; aws_s3_meta_request_test_results_init(&test_results, allocator); struct aws_s3_tester_meta_request_options put_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .put_options = { .object_size_mb = 4, .async_input_stream = true, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(NULL, &put_options, &test_results)); aws_s3_meta_request_test_results_clean_up(&test_results); return 0; } /* Test async-input-stream in multipart upload */ AWS_TEST_CASE(test_s3_put_object_async_multipart, s_test_s3_put_object_async_multipart) static int s_test_s3_put_object_async_multipart(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_meta_request_test_results test_results; aws_s3_meta_request_test_results_init(&test_results, allocator); struct aws_s3_tester_meta_request_options put_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .put_options = { .object_size_mb = 16, .async_input_stream = true, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(NULL, &put_options, &test_results)); aws_s3_meta_request_test_results_clean_up(&test_results); return 0; } /* Test async-input-stream, but the aws_async_input_stream_read() calls all complete synchronously */ AWS_TEST_CASE( test_s3_put_object_async_read_completes_synchronously, s_test_s3_put_object_async_read_completes_synchronously) static int s_test_s3_put_object_async_read_completes_synchronously(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_meta_request_test_results test_results; aws_s3_meta_request_test_results_init(&test_results, allocator); struct aws_s3_tester_meta_request_options put_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .put_options = { .object_size_mb = 10, .async_input_stream = true, .async_read_strategy = AWS_ASYNC_READ_COMPLETES_IMMEDIATELY, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(NULL, &put_options, &test_results)); aws_s3_meta_request_test_results_clean_up(&test_results); return 0; } /* Test async-input-stream, where it takes multiple read() calls to fill each part */ AWS_TEST_CASE(test_s3_put_object_async_small_reads, s_test_s3_put_object_async_small_reads) static int s_test_s3_put_object_async_small_reads(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_meta_request_test_results test_results; aws_s3_meta_request_test_results_init(&test_results, allocator); struct aws_s3_tester_meta_request_options put_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .put_options = { .object_size_mb = 10, .async_input_stream = true, .max_bytes_per_read = KB_TO_BYTES(1001), /* something that doesn't evenly divide into 8MB parts */ }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(NULL, &put_options, &test_results)); aws_s3_meta_request_test_results_clean_up(&test_results); return 0; } /* Test synchronous input-stream, where it takes multiple read() calls to fill each part */ AWS_TEST_CASE(test_s3_put_object_small_reads, s_test_s3_put_object_small_reads) static int s_test_s3_put_object_small_reads(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_meta_request_test_results test_results; aws_s3_meta_request_test_results_init(&test_results, allocator); struct aws_s3_tester_meta_request_options put_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .put_options = { .object_size_mb = 10, .max_bytes_per_read = KB_TO_BYTES(1001), /* something that doesn't evenly divide into 8MB parts */ }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(NULL, &put_options, &test_results)); aws_s3_meta_request_test_results_clean_up(&test_results); return 0; } /* Test async-input-stream, with undeclared Content-Length, that doesn't end exactly on a part boundary */ AWS_TEST_CASE( test_s3_put_object_async_no_content_length_partial_part, s_test_s3_put_object_async_no_content_length_partial_part) static int s_test_s3_put_object_async_no_content_length_partial_part(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_meta_request_test_results test_results; aws_s3_meta_request_test_results_init(&test_results, allocator); struct aws_s3_tester_meta_request_options put_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .put_options = { .object_size_mb = 3, .async_input_stream = true, .skip_content_length = true, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(NULL, &put_options, &test_results)); aws_s3_meta_request_test_results_clean_up(&test_results); return 0; } /* Test async-input-stream, with undeclared Content-Length, that fills exactly 1 part */ AWS_TEST_CASE(test_s3_put_object_async_no_content_length_1part, s_test_s3_put_object_async_no_content_length_1part) static int s_test_s3_put_object_async_no_content_length_1part(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_meta_request_test_results test_results; aws_s3_meta_request_test_results_init(&test_results, allocator); struct aws_s3_tester_meta_request_options put_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .put_options = { .object_size_mb = 8, .async_input_stream = true, .skip_content_length = true, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(NULL, &put_options, &test_results)); aws_s3_meta_request_test_results_clean_up(&test_results); return 0; } /* Test async-input-stream, with undeclared Content-Length, that doesn't realize * it's at EOF until it tries to read the 2nd part and gets 0 bytes */ AWS_TEST_CASE( test_s3_put_object_async_no_content_length_empty_part2, s_test_s3_put_object_async_no_content_length_empty_part2) static int s_test_s3_put_object_async_no_content_length_empty_part2(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_meta_request_test_results test_results; aws_s3_meta_request_test_results_init(&test_results, allocator); struct aws_s3_tester_meta_request_options put_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .put_options = { .object_size_mb = 8, /* read 1 part's worth of data */ .eof_requires_extra_read = true, /* don't report EOF until it tries to read 2nd part */ .async_input_stream = true, .skip_content_length = true, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(NULL, &put_options, &test_results)); aws_s3_meta_request_test_results_clean_up(&test_results); return 0; } /* Test async-input-stream, with undeclared Content-Length, that fills multiple parts */ AWS_TEST_CASE(test_s3_put_object_async_no_content_length_2parts, s_test_s3_put_object_async_no_content_length_2parts) static int s_test_s3_put_object_async_no_content_length_2parts(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_meta_request_test_results test_results; aws_s3_meta_request_test_results_init(&test_results, allocator); struct aws_s3_tester_meta_request_options put_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .put_options = { .object_size_mb = 16, .async_input_stream = true, .skip_content_length = true, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(NULL, &put_options, &test_results)); aws_s3_meta_request_test_results_clean_up(&test_results); return 0; } AWS_TEST_CASE(test_s3_put_object_async_fail_reading, s_test_s3_put_object_async_fail_reading) static int s_test_s3_put_object_async_fail_reading(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_meta_request_test_results test_results; aws_s3_meta_request_test_results_init(&test_results, allocator); struct aws_s3_tester_meta_request_options put_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_FAILURE, .put_options = { .object_size_mb = 10, .async_input_stream = true, .invalid_input_stream = true, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(NULL, &put_options, &test_results)); ASSERT_INT_EQUALS(AWS_IO_STREAM_READ_FAILED, test_results.finished_error_code); aws_s3_meta_request_test_results_clean_up(&test_results); return 0; } AWS_TEST_CASE(test_s3_put_object_sse_kms, s_test_s3_put_object_sse_kms) static int s_test_s3_put_object_sse_kms(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_client_config client_config = { .part_size = 20 * 1024 * 1024, }; ASSERT_SUCCESS(aws_s3_tester_bind_client( &tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_REGION | AWS_S3_TESTER_BIND_CLIENT_SIGNING)); struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); ASSERT_TRUE(client != NULL); struct aws_s3_tester_meta_request_options put_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .client = client, .sse_type = AWS_S3_TESTER_SSE_KMS, .put_options = { .object_size_mb = 10, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &put_options, NULL)); client = aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } AWS_TEST_CASE(test_s3_put_object_sse_kms_multipart, s_test_s3_put_object_sse_kms_multipart) static int s_test_s3_put_object_sse_kms_multipart(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_client_config client_config = { .part_size = 5 * 1024 * 1024, }; ASSERT_SUCCESS(aws_s3_tester_bind_client( &tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_REGION | AWS_S3_TESTER_BIND_CLIENT_SIGNING)); struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); ASSERT_TRUE(client != NULL); struct aws_s3_tester_meta_request_options put_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .client = client, .sse_type = AWS_S3_TESTER_SSE_KMS, .put_options = { .object_size_mb = 10, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &put_options, NULL)); client = aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } AWS_TEST_CASE(test_s3_put_object_sse_aes256, s_test_s3_put_object_sse_aes256) static int s_test_s3_put_object_sse_aes256(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_client_config client_config = { .part_size = 20 * 1024 * 1024, }; ASSERT_SUCCESS(aws_s3_tester_bind_client( &tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_REGION | AWS_S3_TESTER_BIND_CLIENT_SIGNING)); struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); ASSERT_TRUE(client != NULL); struct aws_s3_tester_meta_request_options put_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .client = client, .sse_type = AWS_S3_TESTER_SSE_AES256, .put_options = { .object_size_mb = 10, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &put_options, NULL)); client = aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } AWS_TEST_CASE(test_s3_put_object_sse_aes256_multipart, s_test_s3_put_object_sse_aes256_multipart) static int s_test_s3_put_object_sse_aes256_multipart(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_client_config client_config = { .part_size = 5 * 1024 * 1024, }; ASSERT_SUCCESS(aws_s3_tester_bind_client( &tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_REGION | AWS_S3_TESTER_BIND_CLIENT_SIGNING)); struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); ASSERT_TRUE(client != NULL); struct aws_s3_tester_meta_request_options put_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .client = client, .sse_type = AWS_S3_TESTER_SSE_AES256, .put_options = { .object_size_mb = 10, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &put_options, NULL)); client = aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } AWS_TEST_CASE(test_s3_put_object_sse_c_aes256_multipart, s_test_s3_put_object_sse_c_aes256_multipart) static int s_test_s3_put_object_sse_c_aes256_multipart(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_client_config client_config = { .part_size = 5 * 1024 * 1024, }; ASSERT_SUCCESS(aws_s3_tester_bind_client( &tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_REGION | AWS_S3_TESTER_BIND_CLIENT_SIGNING)); struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); ASSERT_TRUE(client != NULL); struct aws_byte_buf path_buf; AWS_ZERO_STRUCT(path_buf); ASSERT_SUCCESS(aws_s3_tester_upload_file_path_init( tester.allocator, &path_buf, aws_byte_cursor_from_c_str("/prefix/round_trip/test_sse_c.txt"))); struct aws_byte_cursor object_path = aws_byte_cursor_from_buf(&path_buf); struct aws_s3_tester_meta_request_options put_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .client = client, .sse_type = AWS_S3_TESTER_SSE_C_AES256, .put_options = { .object_size_mb = 10, .object_path_override = object_path, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &put_options, NULL)); client = aws_s3_client_release(client); aws_byte_buf_clean_up(&path_buf); aws_s3_tester_clean_up(&tester); return 0; } AWS_TEST_CASE( test_s3_put_object_sse_c_aes256_multipart_with_checksum, s_test_s3_put_object_sse_c_aes256_multipart_with_checksum) static int s_test_s3_put_object_sse_c_aes256_multipart_with_checksum(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_client_config client_config = { .part_size = 5 * 1024 * 1024, }; ASSERT_SUCCESS(aws_s3_tester_bind_client( &tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_REGION | AWS_S3_TESTER_BIND_CLIENT_SIGNING)); struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); ASSERT_TRUE(client != NULL); struct aws_byte_buf path_buf; AWS_ZERO_STRUCT(path_buf); ASSERT_SUCCESS(aws_s3_tester_upload_file_path_init( tester.allocator, &path_buf, aws_byte_cursor_from_c_str("/prefix/round_trip/test_sse_c_fc.txt"))); struct aws_byte_cursor object_path = aws_byte_cursor_from_buf(&path_buf); struct aws_s3_tester_meta_request_options put_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .client = client, .sse_type = AWS_S3_TESTER_SSE_C_AES256, .checksum_algorithm = AWS_SCA_CRC32, .put_options = { .object_size_mb = 10, .object_path_override = object_path, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &put_options, NULL)); client = aws_s3_client_release(client); aws_byte_buf_clean_up(&path_buf); aws_s3_tester_clean_up(&tester); return 0; } static int s_test_s3_put_object_content_md5_helper( struct aws_allocator *allocator, bool multipart_upload, uint32_t flags, enum aws_s3_meta_request_compute_content_md5 compute_content_md5) { struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); size_t part_size = 5 * 1024 * 1024; if (!multipart_upload) { /* content_length < part_size */ part_size = 15 * 1024 * 1024; } struct aws_s3_client_config client_config = { .part_size = part_size, }; client_config.compute_content_md5 = compute_content_md5; ASSERT_SUCCESS(aws_s3_tester_bind_client( &tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_REGION | AWS_S3_TESTER_BIND_CLIENT_SIGNING)); struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); ASSERT_TRUE(client != NULL); ASSERT_SUCCESS(aws_s3_tester_send_put_object_meta_request(&tester, client, 10, flags, NULL)); client = aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } AWS_TEST_CASE( test_s3_put_object_singlepart_no_content_md5_enabled, s_test_s3_put_object_singlepart_no_content_md5_enabled) static int s_test_s3_put_object_singlepart_no_content_md5_enabled(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint32_t flags = AWS_S3_TESTER_SEND_META_REQUEST_EXPECT_SUCCESS; ASSERT_SUCCESS(s_test_s3_put_object_content_md5_helper(allocator, false, flags, AWS_MR_CONTENT_MD5_ENABLED)); return 0; } AWS_TEST_CASE( test_s3_put_object_singlepart_no_content_md5_disabled, s_test_s3_put_object_singlepart_no_content_md5_disabled) static int s_test_s3_put_object_singlepart_no_content_md5_disabled(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint32_t flags = AWS_S3_TESTER_SEND_META_REQUEST_EXPECT_SUCCESS; ASSERT_SUCCESS(s_test_s3_put_object_content_md5_helper(allocator, false, flags, AWS_MR_CONTENT_MD5_DISABLED)); return 0; } AWS_TEST_CASE( test_s3_put_object_singlepart_correct_content_md5_enabled, s_test_s3_put_object_singlepart_correct_content_md5_enabled) static int s_test_s3_put_object_singlepart_correct_content_md5_enabled(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint32_t flags = AWS_S3_TESTER_SEND_META_REQUEST_WITH_CORRECT_CONTENT_MD5 | AWS_S3_TESTER_SEND_META_REQUEST_EXPECT_SUCCESS; ASSERT_SUCCESS(s_test_s3_put_object_content_md5_helper(allocator, false, flags, AWS_MR_CONTENT_MD5_ENABLED)); return 0; } AWS_TEST_CASE( test_s3_put_object_singlepart_correct_content_md5_disabled, s_test_s3_put_object_singlepart_correct_content_md5_disabled) static int s_test_s3_put_object_singlepart_correct_content_md5_disabled(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint32_t flags = AWS_S3_TESTER_SEND_META_REQUEST_WITH_CORRECT_CONTENT_MD5 | AWS_S3_TESTER_SEND_META_REQUEST_EXPECT_SUCCESS; ASSERT_SUCCESS(s_test_s3_put_object_content_md5_helper(allocator, false, flags, AWS_MR_CONTENT_MD5_DISABLED)); return 0; } AWS_TEST_CASE( test_s3_put_object_singlepart_incorrect_content_md5_enabled, s_test_s3_put_object_singlepart_incorrect_content_md5_enabled) static int s_test_s3_put_object_singlepart_incorrect_content_md5_enabled(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint32_t flags = AWS_S3_TESTER_SEND_META_REQUEST_WITH_INCORRECT_CONTENT_MD5; ASSERT_SUCCESS(s_test_s3_put_object_content_md5_helper(allocator, false, flags, AWS_MR_CONTENT_MD5_ENABLED)); return 0; } AWS_TEST_CASE( test_s3_put_object_singlepart_incorrect_content_md5_disabled, s_test_s3_put_object_singlepart_incorrect_content_md5_disabled) static int s_test_s3_put_object_singlepart_incorrect_content_md5_disabled(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint32_t flags = AWS_S3_TESTER_SEND_META_REQUEST_WITH_INCORRECT_CONTENT_MD5; ASSERT_SUCCESS(s_test_s3_put_object_content_md5_helper(allocator, false, flags, AWS_MR_CONTENT_MD5_DISABLED)); return 0; } AWS_TEST_CASE( test_s3_put_object_multipart_no_content_md5_enabled, s_test_s3_put_object_multipart_no_content_md5_enabled) static int s_test_s3_put_object_multipart_no_content_md5_enabled(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint32_t flags = AWS_S3_TESTER_SEND_META_REQUEST_EXPECT_SUCCESS; ASSERT_SUCCESS(s_test_s3_put_object_content_md5_helper(allocator, true, flags, AWS_MR_CONTENT_MD5_ENABLED)); return 0; } AWS_TEST_CASE( test_s3_put_object_multipart_no_content_md5_disabled, s_test_s3_put_object_multipart_no_content_md5_disabled) static int s_test_s3_put_object_multipart_no_content_md5_disabled(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint32_t flags = AWS_S3_TESTER_SEND_META_REQUEST_EXPECT_SUCCESS; ASSERT_SUCCESS(s_test_s3_put_object_content_md5_helper(allocator, true, flags, AWS_MR_CONTENT_MD5_DISABLED)); return 0; } AWS_TEST_CASE( test_s3_put_object_multipart_correct_content_md5_enabled, s_test_s3_put_object_multipart_correct_content_md5_enabled) static int s_test_s3_put_object_multipart_correct_content_md5_enabled(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint32_t flags = AWS_S3_TESTER_SEND_META_REQUEST_WITH_CORRECT_CONTENT_MD5 | AWS_S3_TESTER_SEND_META_REQUEST_EXPECT_SUCCESS; ASSERT_SUCCESS(s_test_s3_put_object_content_md5_helper(allocator, true, flags, AWS_MR_CONTENT_MD5_ENABLED)); return 0; } AWS_TEST_CASE( test_s3_put_object_multipart_correct_content_md5_disabled, s_test_s3_put_object_multipart_correct_content_md5_disabled) static int s_test_s3_put_object_multipart_correct_content_md5_disabled(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint32_t flags = AWS_S3_TESTER_SEND_META_REQUEST_WITH_CORRECT_CONTENT_MD5 | AWS_S3_TESTER_SEND_META_REQUEST_EXPECT_SUCCESS; ASSERT_SUCCESS(s_test_s3_put_object_content_md5_helper(allocator, true, flags, AWS_MR_CONTENT_MD5_DISABLED)); return 0; } AWS_TEST_CASE( test_s3_put_object_multipart_incorrect_content_md5_enabled, s_test_s3_put_object_multipart_incorrect_content_md5_enabled) static int s_test_s3_put_object_multipart_incorrect_content_md5_enabled(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint32_t flags = AWS_S3_TESTER_SEND_META_REQUEST_WITH_INCORRECT_CONTENT_MD5 | AWS_S3_TESTER_SEND_META_REQUEST_EXPECT_SUCCESS; ASSERT_SUCCESS(s_test_s3_put_object_content_md5_helper(allocator, true, flags, AWS_MR_CONTENT_MD5_ENABLED)); return 0; } AWS_TEST_CASE( test_s3_put_object_multipart_incorrect_content_md5_disabled, s_test_s3_put_object_multipart_incorrect_content_md5_disabled) static int s_test_s3_put_object_multipart_incorrect_content_md5_disabled(struct aws_allocator *allocator, void *ctx) { (void)ctx; uint32_t flags = AWS_S3_TESTER_SEND_META_REQUEST_WITH_INCORRECT_CONTENT_MD5 | AWS_S3_TESTER_SEND_META_REQUEST_EXPECT_SUCCESS; ASSERT_SUCCESS(s_test_s3_put_object_content_md5_helper(allocator, true, flags, AWS_MR_CONTENT_MD5_DISABLED)); return 0; } static int s_test_s3_upload_part_message_helper(struct aws_allocator *allocator, bool should_compute_content_md5) { aws_s3_library_init(allocator); struct aws_byte_buf test_buffer; aws_s3_create_test_buffer(allocator, 19 /* size of "This is an S3 test." */, &test_buffer); /* base64 encoded md5 of "This is an S3 test." */ struct aws_byte_cursor expected_content_md5 = aws_byte_cursor_from_c_str("+y3U+EY5uFXhVVmRoiJWyA=="); struct aws_byte_cursor test_body_cursor = aws_byte_cursor_from_buf(&test_buffer); struct aws_input_stream *input_stream = aws_input_stream_new_from_cursor(allocator, &test_body_cursor); struct aws_byte_cursor host_name = aws_byte_cursor_from_c_str("dummy_host"); struct aws_byte_cursor test_object_path = aws_byte_cursor_from_c_str("dummy_key"); /* Put together a simple S3 Put Object request. */ struct aws_http_message *base_message = aws_s3_test_put_object_request_new( allocator, &host_name, test_object_path, g_test_body_content_type, input_stream, AWS_S3_TESTER_SSE_NONE); uint32_t part_number = 1; struct aws_string *upload_id = aws_string_new_from_c_str(allocator, "dummy_upload_id"); struct aws_http_message *new_message = aws_s3_upload_part_message_new( allocator, base_message, &test_buffer, part_number, upload_id, should_compute_content_md5, NULL, NULL); struct aws_http_headers *new_headers = aws_http_message_get_headers(new_message); if (should_compute_content_md5) { ASSERT_TRUE(aws_http_headers_has(new_headers, g_content_md5_header_name)); struct aws_byte_cursor content_md5; aws_http_headers_get(new_headers, g_content_md5_header_name, &content_md5); ASSERT_BIN_ARRAYS_EQUALS(expected_content_md5.ptr, expected_content_md5.len, content_md5.ptr, content_md5.len); } else { ASSERT_FALSE(aws_http_headers_has(new_headers, g_content_md5_header_name)); } aws_http_message_release(new_message); new_message = NULL; aws_http_message_release(base_message); base_message = NULL; aws_string_destroy(upload_id); upload_id = NULL; aws_input_stream_release(input_stream); input_stream = NULL; aws_byte_buf_clean_up(&test_buffer); aws_s3_library_clean_up(); return 0; } AWS_TEST_CASE(test_s3_upload_part_message_with_content_md5, s_test_s3_upload_part_message_with_content_md5) static int s_test_s3_upload_part_message_with_content_md5(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(s_test_s3_upload_part_message_helper(allocator, true)); return 0; } AWS_TEST_CASE(test_s3_upload_part_message_without_content_md5, s_test_s3_upload_part_message_without_content_md5) static int s_test_s3_upload_part_message_without_content_md5(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(s_test_s3_upload_part_message_helper(allocator, false)); return 0; } AWS_TEST_CASE( test_s3_create_multipart_upload_message_with_content_md5, s_test_s3_create_multipart_upload_message_with_content_md5) static int s_test_s3_create_multipart_upload_message_with_content_md5(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_buf test_buffer; aws_s3_create_test_buffer(allocator, 19 /* size of "This is an S3 test." */, &test_buffer); struct aws_byte_cursor test_body_cursor = aws_byte_cursor_from_buf(&test_buffer); struct aws_input_stream *input_stream = aws_input_stream_new_from_cursor(allocator, &test_body_cursor); struct aws_byte_cursor host_name = aws_byte_cursor_from_c_str("dummy_host"); struct aws_byte_cursor test_object_path = aws_byte_cursor_from_c_str("dummy_key"); /* Put together a simple S3 Put Object request. */ struct aws_http_message *base_message = aws_s3_test_put_object_request_new( allocator, &host_name, test_object_path, g_test_body_content_type, input_stream, AWS_S3_TESTER_SSE_NONE); struct aws_http_header content_md5_header = { .name = g_content_md5_header_name, .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("dummy_content_md5"), }; ASSERT_SUCCESS(aws_http_message_add_header(base_message, content_md5_header)); struct aws_http_headers *base_headers = aws_http_message_get_headers(base_message); ASSERT_TRUE(aws_http_headers_has(base_headers, g_content_md5_header_name)); struct aws_http_message *new_message = aws_s3_create_multipart_upload_message_new(allocator, base_message, AWS_SCA_NONE); struct aws_http_headers *new_headers = aws_http_message_get_headers(new_message); ASSERT_FALSE(aws_http_headers_has(new_headers, g_content_md5_header_name)); aws_http_message_release(new_message); new_message = NULL; aws_http_message_release(base_message); base_message = NULL; aws_input_stream_release(input_stream); input_stream = NULL; aws_byte_buf_clean_up(&test_buffer); return 0; } AWS_TEST_CASE( test_s3_complete_multipart_message_with_content_md5, s_test_s3_complete_multipart_message_with_content_md5) static int s_test_s3_complete_multipart_message_with_content_md5(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_buf test_buffer; aws_s3_create_test_buffer(allocator, 19 /* size of "This is an S3 test." */, &test_buffer); struct aws_byte_cursor test_body_cursor = aws_byte_cursor_from_buf(&test_buffer); struct aws_input_stream *input_stream = aws_input_stream_new_from_cursor(allocator, &test_body_cursor); struct aws_byte_cursor host_name = aws_byte_cursor_from_c_str("dummy_host"); struct aws_byte_cursor test_object_path = aws_byte_cursor_from_c_str("dummy_key"); /* Put together a simple S3 Put Object request. */ struct aws_http_message *base_message = aws_s3_test_put_object_request_new( allocator, &host_name, test_object_path, g_test_body_content_type, input_stream, AWS_S3_TESTER_SSE_NONE); struct aws_http_header content_md5_header = { .name = g_content_md5_header_name, .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("dummy_content_md5"), }; ASSERT_SUCCESS(aws_http_message_add_header(base_message, content_md5_header)); struct aws_http_headers *base_headers = aws_http_message_get_headers(base_message); ASSERT_TRUE(aws_http_headers_has(base_headers, g_content_md5_header_name)); struct aws_byte_buf body_buffer; aws_byte_buf_init(&body_buffer, allocator, 512); struct aws_string *upload_id = aws_string_new_from_c_str(allocator, "dummy_upload_id"); struct aws_array_list parts; ASSERT_SUCCESS(aws_array_list_init_dynamic(&parts, allocator, 0, sizeof(struct aws_s3_mpu_part_info *))); struct aws_http_message *new_message = aws_s3_complete_multipart_message_new(allocator, base_message, &body_buffer, upload_id, &parts, AWS_SCA_NONE); struct aws_http_headers *new_headers = aws_http_message_get_headers(new_message); ASSERT_FALSE(aws_http_headers_has(new_headers, g_content_md5_header_name)); aws_http_message_release(new_message); new_message = NULL; aws_http_message_release(base_message); base_message = NULL; aws_array_list_clean_up(&parts); aws_string_destroy(upload_id); upload_id = NULL; aws_byte_buf_clean_up(&body_buffer); aws_input_stream_release(input_stream); input_stream = NULL; aws_byte_buf_clean_up(&test_buffer); return 0; } AWS_TEST_CASE(test_s3_put_object_double_slashes, s_test_s3_put_object_double_slashes) static int s_test_s3_put_object_double_slashes(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_meta_request_test_results meta_request_test_results; aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator); struct aws_byte_buf path_buf; AWS_ZERO_STRUCT(path_buf); ASSERT_SUCCESS( aws_s3_tester_upload_file_path_init(allocator, &path_buf, aws_byte_cursor_from_c_str("/prefix//test.txt"))); struct aws_byte_cursor object_path = aws_byte_cursor_from_buf(&path_buf); struct aws_s3_tester_meta_request_options options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .put_options = { .object_size_mb = 1, .object_path_override = object_path, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(NULL, &options, &meta_request_test_results)); aws_byte_buf_clean_up(&path_buf); aws_s3_meta_request_test_results_clean_up(&meta_request_test_results); return 0; } AWS_TEST_CASE(test_s3_round_trip, s_test_s3_round_trip) static int s_test_s3_round_trip(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_tester_client_options client_options = { .part_size = 16 * 1024, }; struct aws_s3_client *client = NULL; ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); struct aws_byte_buf path_buf; AWS_ZERO_STRUCT(path_buf); ASSERT_SUCCESS(aws_s3_tester_upload_file_path_init( allocator, &path_buf, aws_byte_cursor_from_c_str("/prefix/round_trip/test.txt"))); struct aws_byte_cursor object_path = aws_byte_cursor_from_buf(&path_buf); struct aws_s3_tester_meta_request_options put_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .client = client, .put_options = { .object_size_mb = 1, .object_path_override = object_path, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &put_options, NULL)); /*** GET FILE ***/ struct aws_s3_tester_meta_request_options get_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_SUCCESS, .client = client, .get_options = { .object_path = object_path, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &get_options, NULL)); aws_byte_buf_clean_up(&path_buf); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } AWS_TEST_CASE(test_s3_round_trip_default_get, s_test_s3_round_trip_default_get) static int s_test_s3_round_trip_default_get(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_tester_client_options client_options = { .part_size = 16 * 1024, }; struct aws_s3_client *client = NULL; ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); struct aws_byte_buf path_buf; AWS_ZERO_STRUCT(path_buf); ASSERT_SUCCESS(aws_s3_tester_upload_file_path_init( allocator, &path_buf, aws_byte_cursor_from_c_str("/prefix/round_trip/test_default.txt"))); struct aws_byte_cursor object_path = aws_byte_cursor_from_buf(&path_buf); struct aws_s3_tester_meta_request_options put_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .client = client, .put_options = { .object_size_mb = 1, .object_path_override = object_path, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &put_options, NULL)); /*** GET FILE ***/ struct aws_s3_tester_meta_request_options get_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_DEFAULT, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_SUCCESS, .client = client, .get_options = { .object_path = object_path, }, .default_type_options = { .mode = AWS_S3_TESTER_DEFAULT_TYPE_MODE_GET, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &get_options, NULL)); aws_byte_buf_clean_up(&path_buf); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } int s_s3_validate_headers_checksum_set( struct aws_s3_meta_request *meta_request, const struct aws_http_headers *headers, int response_status, void *user_data) { (void)response_status; (void)headers; struct aws_s3_meta_request_test_results *meta_request_test_results = (struct aws_s3_meta_request_test_results *)user_data; ASSERT_NOT_NULL(meta_request->meta_request_level_running_response_sum); ASSERT_INT_EQUALS( meta_request->meta_request_level_running_response_sum->algorithm, meta_request_test_results->algorithm); return AWS_OP_SUCCESS; } int s_s3_validate_headers_checksum_unset( struct aws_s3_meta_request *meta_request, const struct aws_http_headers *headers, int response_status, void *user_data) { (void)response_status; (void)headers; (void)user_data; ASSERT_NULL(meta_request->meta_request_level_running_response_sum); return AWS_OP_SUCCESS; } void s_s3_test_validate_checksum( struct aws_s3_meta_request *meta_request, const struct aws_s3_meta_request_result *result, void *user_data) { (void)meta_request; struct aws_s3_meta_request_test_results *meta_request_test_results = (struct aws_s3_meta_request_test_results *)user_data; AWS_FATAL_ASSERT(result->did_validate); AWS_FATAL_ASSERT(result->validation_algorithm == meta_request_test_results->algorithm); AWS_FATAL_ASSERT(result->error_code == AWS_OP_SUCCESS); } void s_s3_test_no_validate_checksum( struct aws_s3_meta_request *meta_request, const struct aws_s3_meta_request_result *result, void *user_data) { (void)meta_request; (void)user_data; AWS_FATAL_ASSERT(!result->did_validate); AWS_FATAL_ASSERT(result->error_code == AWS_OP_SUCCESS); } /* TODO: maybe refactor the fc -> flexible checksum tests to be less copy/paste */ AWS_TEST_CASE(test_s3_round_trip_default_get_fc, s_test_s3_round_trip_default_get_fc) static int s_test_s3_round_trip_default_get_fc(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_tester_client_options client_options = { .part_size = MB_TO_BYTES(5), }; struct aws_s3_client *client = NULL; ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); struct aws_byte_buf path_buf; AWS_ZERO_STRUCT(path_buf); for (int algorithm = AWS_SCA_INIT; algorithm <= AWS_SCA_END; ++algorithm) { char object_path_sprintf_buffer[128] = ""; snprintf( object_path_sprintf_buffer, sizeof(object_path_sprintf_buffer), "/prefix/round_trip/test_default_fc_%d.txt", algorithm); ASSERT_SUCCESS(aws_s3_tester_upload_file_path_init( allocator, &path_buf, aws_byte_cursor_from_c_str(object_path_sprintf_buffer))); struct aws_byte_cursor object_path = aws_byte_cursor_from_buf(&path_buf); /*** PUT FILE ***/ struct aws_s3_tester_meta_request_options put_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .client = client, .checksum_algorithm = algorithm, .validate_get_response_checksum = false, .put_options = { .object_size_mb = 1, .object_path_override = object_path, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &put_options, NULL)); /*** GET FILE ***/ struct aws_s3_tester_meta_request_options get_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_SUCCESS, .client = client, .expected_validate_checksum_alg = algorithm, .validate_get_response_checksum = true, .get_options = { .object_path = object_path, }, .finish_callback = s_s3_test_validate_checksum, .headers_callback = s_s3_validate_headers_checksum_set, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &get_options, NULL)); aws_byte_buf_clean_up(&path_buf); } aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } AWS_TEST_CASE(test_s3_round_trip_multipart_get_fc, s_test_s3_round_trip_multipart_get_fc) static int s_test_s3_round_trip_multipart_get_fc(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_tester_client_options client_options = { .part_size = 16 * 1024, }; struct aws_s3_client *client = NULL; ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); struct aws_byte_buf path_buf; AWS_ZERO_STRUCT(path_buf); ASSERT_SUCCESS(aws_s3_tester_upload_file_path_init( allocator, &path_buf, aws_byte_cursor_from_c_str("/prefix/round_trip/test_fc.txt"))); struct aws_byte_cursor object_path = aws_byte_cursor_from_buf(&path_buf); struct aws_s3_tester_meta_request_options put_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .client = client, .checksum_algorithm = AWS_SCA_CRC32, .validate_get_response_checksum = false, .put_options = { .object_size_mb = 1, .object_path_override = object_path, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &put_options, NULL)); /*** GET FILE ***/ struct aws_s3_tester_meta_request_options get_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_SUCCESS, .client = client, .validate_get_response_checksum = true, .expected_validate_checksum_alg = AWS_SCA_CRC32, .get_options = { .object_path = object_path, }, .finish_callback = s_s3_test_validate_checksum, .headers_callback = s_s3_validate_headers_checksum_set, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &get_options, NULL)); aws_byte_buf_clean_up(&path_buf); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } /* Test the multipart uploaded object was downloaded with same part size, which will download the object matches all the * parts and validate the parts checksum. */ AWS_TEST_CASE(test_s3_round_trip_mpu_multipart_get_fc, s_test_s3_round_trip_mpu_multipart_get_fc) static int s_test_s3_round_trip_mpu_multipart_get_fc(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_tester_client_options client_options = { .part_size = MB_TO_BYTES(5), }; struct aws_s3_client *client = NULL; ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); struct aws_byte_buf path_buf; AWS_ZERO_STRUCT(path_buf); ASSERT_SUCCESS(aws_s3_tester_upload_file_path_init( allocator, &path_buf, aws_byte_cursor_from_c_str("/prefix/round_trip/test_mpu_fc.txt"))); struct aws_byte_cursor object_path = aws_byte_cursor_from_buf(&path_buf); struct aws_s3_tester_meta_request_options put_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .client = client, .checksum_algorithm = AWS_SCA_CRC32, .validate_get_response_checksum = false, .put_options = { .object_size_mb = 10, .object_path_override = object_path, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &put_options, NULL)); /*** GET FILE ***/ struct aws_s3_tester_meta_request_options get_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_SUCCESS, .client = client, .expected_validate_checksum_alg = AWS_SCA_CRC32, .validate_get_response_checksum = true, .get_options = { .object_path = object_path, }, .finish_callback = s_s3_test_validate_checksum, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &get_options, NULL)); aws_byte_buf_clean_up(&path_buf); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } AWS_TEST_CASE(test_s3_download_empty_file_with_checksum, s_test_s3_download_empty_file_with_checksum) static int s_test_s3_download_empty_file_with_checksum(struct aws_allocator *allocator, void *ctx) { (void)ctx; /* Upload the file */ struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_tester_client_options client_options = { .part_size = MB_TO_BYTES(5), }; struct aws_s3_client *client = NULL; ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); struct aws_byte_buf path_buf; AWS_ZERO_STRUCT(path_buf); ASSERT_SUCCESS( aws_s3_tester_upload_file_path_init(allocator, &path_buf, aws_byte_cursor_from_c_str("/empty-file-CRC32.txt"))); struct aws_byte_cursor object_path = aws_byte_cursor_from_buf(&path_buf); struct aws_s3_tester_meta_request_options put_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .client = client, .checksum_algorithm = AWS_SCA_CRC32, .put_options = { .object_size_mb = 0, .object_path_override = object_path, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &put_options, NULL)); /*** GET FILE WITH GET_FIRST_PART ***/ uint64_t small_object_size_hint = 1; struct aws_s3_tester_meta_request_options get_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_SUCCESS, .client = client, .expected_validate_checksum_alg = AWS_SCA_CRC32, .validate_get_response_checksum = true, .get_options = { .object_path = object_path, }, .finish_callback = s_s3_test_validate_checksum, .object_size_hint = &small_object_size_hint /* pass a object_size_hint > 0 so that the request goes through the getPart flow */, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &get_options, NULL)); /*** GET FILE WITH HEAD_OBJECT ***/ get_options.object_size_hint = NULL; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &get_options, NULL)); aws_s3_client_release(client); aws_byte_buf_clean_up(&path_buf); aws_s3_tester_clean_up(&tester); return 0; } AWS_TEST_CASE(test_s3_download_single_part_file_with_checksum, s_test_s3_download_single_part_file_with_checksum) static int s_test_s3_download_single_part_file_with_checksum(struct aws_allocator *allocator, void *ctx) { (void)ctx; /* Upload the file */ struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_tester_client_options client_options = { .part_size = MB_TO_BYTES(10), }; struct aws_s3_client *client = NULL; ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); struct aws_byte_buf path_buf; AWS_ZERO_STRUCT(path_buf); ASSERT_SUCCESS(aws_s3_tester_upload_file_path_init( allocator, &path_buf, aws_byte_cursor_from_c_str("/single-part-10Mb-CRC32.txt"))); struct aws_byte_cursor object_path = aws_byte_cursor_from_buf(&path_buf); uint32_t object_size_mb = 10; struct aws_s3_tester_meta_request_options put_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .client = client, .checksum_algorithm = AWS_SCA_CRC32, .put_options = { .object_size_mb = object_size_mb, .object_path_override = object_path, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &put_options, NULL)); client = aws_s3_client_release(client); tester.bound_to_client = false; /*** GET FILE with part_size < file_size ***/ client_options.part_size = MB_TO_BYTES(3); ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); uint64_t object_size_hint = MB_TO_BYTES(object_size_mb); struct aws_s3_tester_meta_request_options get_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_SUCCESS, .client = client, .expected_validate_checksum_alg = AWS_SCA_CRC32, .validate_get_response_checksum = true, .get_options = { .object_path = object_path, }, .finish_callback = s_s3_test_validate_checksum, .object_size_hint = &object_size_hint, }; uint64_t small_object_size_hint = MB_TO_BYTES(1); /* will do headRequest */ ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &get_options, NULL)); client = aws_s3_client_release(client); tester.bound_to_client = false; /*** GET FILE with part_size > file_size ***/ client_options.part_size = MB_TO_BYTES(20); ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); get_options.client = client; /* will do getPart */ ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &get_options, NULL)); client = aws_s3_client_release(client); tester.bound_to_client = false; /* will do getPart */ /*** GET FILE with part_size = file_size ***/ client_options.part_size = MB_TO_BYTES(10); ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); get_options.client = client; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &get_options, NULL)); client = aws_s3_client_release(client); tester.bound_to_client = false; /*** GET FILE with part_size < file_size and wrong object_size_hint ***/ client_options.part_size = MB_TO_BYTES(3); ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); get_options.client = client; get_options.object_size_hint = &small_object_size_hint; /* will do getPart first, cancel it and then rangedGet */ ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &get_options, NULL)); client = aws_s3_client_release(client); tester.bound_to_client = false; aws_byte_buf_clean_up(&path_buf); aws_s3_tester_clean_up(&tester); return 0; } AWS_TEST_CASE(test_s3_download_multipart_file_with_checksum, s_test_s3_download_multipart_file_with_checksum) static int s_test_s3_download_multipart_file_with_checksum(struct aws_allocator *allocator, void *ctx) { (void)ctx; /* Upload the file */ struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_tester_client_options client_options = { .part_size = MB_TO_BYTES(5), }; struct aws_s3_client *client = NULL; ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); struct aws_byte_buf path_buf; AWS_ZERO_STRUCT(path_buf); ASSERT_SUCCESS(aws_s3_tester_upload_file_path_init( allocator, &path_buf, aws_byte_cursor_from_c_str("/multipart-10Mb-CRC32.txt"))); struct aws_byte_cursor object_path = aws_byte_cursor_from_buf(&path_buf); uint32_t object_size_mb = 10; struct aws_s3_tester_meta_request_options put_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .client = client, .checksum_algorithm = AWS_SCA_CRC32, .put_options = { .object_size_mb = object_size_mb, .object_path_override = object_path, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &put_options, NULL)); client = aws_s3_client_release(client); tester.bound_to_client = false; /*** GET FILE with part_size < first_part_size ***/ client_options.part_size = MB_TO_BYTES(3); ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); uint64_t object_size_hint = MB_TO_BYTES(object_size_mb); struct aws_s3_tester_meta_request_options get_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_SUCCESS, .client = client, .expected_validate_checksum_alg = AWS_SCA_CRC32, .validate_get_response_checksum = true, .get_options = { .object_path = object_path, }, .object_size_hint = &object_size_hint, }; /* will do HeadRequest first */ ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &get_options, NULL)); client = aws_s3_client_release(client); tester.bound_to_client = false; /*** GET FILE with part_size > first_part_size ***/ client_options.part_size = MB_TO_BYTES(7); ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); get_options.client = client; /* will do HeadObject first */ ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &get_options, NULL)); client = aws_s3_client_release(client); tester.bound_to_client = false; /*** GET FILE with part_size = first_part_size ***/ client_options.part_size = MB_TO_BYTES(5); ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); get_options.client = client; get_options.finish_callback = s_s3_test_validate_checksum; /* will do HeadObject First */ ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &get_options, NULL)); client = aws_s3_client_release(client); tester.bound_to_client = false; uint64_t small_object_size_hint = 1; /*** GET FILE with with wrong object_size_hint ***/ get_options.object_size_hint = &small_object_size_hint; get_options.finish_callback = NULL; /*** GET FILE with part_size < first_part_size***/ client_options.part_size = MB_TO_BYTES(3); ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); get_options.client = client; /* will do GetPart, cancel the request and then do ranged Gets. */ ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &get_options, NULL)); client = aws_s3_client_release(client); tester.bound_to_client = false; /*** GET FILE with part_size > first_part_size ***/ client_options.part_size = MB_TO_BYTES(7); ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); get_options.client = client; get_options.finish_callback = s_s3_test_validate_checksum; /* will do GetPart first */ ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &get_options, NULL)); client = aws_s3_client_release(client); tester.bound_to_client = false; /*** GET FILE with part_size = first_part_size ***/ client_options.part_size = MB_TO_BYTES(5); ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); get_options.client = client; get_options.finish_callback = s_s3_test_validate_checksum; /* will do GetPart first */ ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &get_options, NULL)); client = aws_s3_client_release(client); tester.bound_to_client = false; /*** GET FILE with part_size > fileSize ***/ /* TODO: Enable this test once the checksum issue is resolved. Currently, when the S3 GetObject API is called with * the range 0-contentLength, it returns a checksum of checksums without the -numParts portion. This leads to a * checksum mismatch error, as it is incorrectly validated as a part checksum. */ /* client_options.part_size = MB_TO_BYTES(20); ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); get_options.client = client; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &get_options, NULL)); client = aws_s3_client_release(client); tester.bound_to_client = false; */ aws_byte_buf_clean_up(&path_buf); aws_s3_tester_clean_up(&tester); return 0; } AWS_TEST_CASE( test_s3_round_trip_mpu_multipart_get_with_list_algorithm_fc, s_test_s3_round_trip_mpu_multipart_get_with_list_algorithm_fc) static int s_test_s3_round_trip_mpu_multipart_get_with_list_algorithm_fc(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_tester_client_options client_options = { .part_size = MB_TO_BYTES(5), }; struct aws_s3_client *client = NULL; ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); struct aws_byte_buf path_buf; AWS_ZERO_STRUCT(path_buf); ASSERT_SUCCESS(aws_s3_tester_upload_file_path_init( allocator, &path_buf, aws_byte_cursor_from_c_str("/prefix/round_trip/test_mpu_fc.txt"))); struct aws_byte_cursor object_path = aws_byte_cursor_from_buf(&path_buf); struct aws_s3_tester_meta_request_options put_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .client = client, .checksum_algorithm = AWS_SCA_CRC32, .validate_get_response_checksum = false, .put_options = { .object_size_mb = 10, .object_path_override = object_path, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &put_options, NULL)); /*** GET FILE ***/ struct aws_array_list response_checksum_list; /* Check for all algorithm but the CRC32 */ ASSERT_SUCCESS( aws_array_list_init_dynamic(&response_checksum_list, allocator, 4, sizeof(enum aws_s3_checksum_algorithm))); enum aws_s3_checksum_algorithm alg = AWS_SCA_CRC32C; ASSERT_SUCCESS(aws_array_list_push_back(&response_checksum_list, &alg)); alg = AWS_SCA_SHA1; ASSERT_SUCCESS(aws_array_list_push_back(&response_checksum_list, &alg)); alg = AWS_SCA_SHA256; ASSERT_SUCCESS(aws_array_list_push_back(&response_checksum_list, &alg)); struct aws_s3_tester_meta_request_options get_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_SUCCESS, .client = client, .expected_validate_checksum_alg = AWS_SCA_CRC32, .validate_get_response_checksum = true, .validate_checksum_algorithms = &response_checksum_list, .get_options = { .object_path = object_path, }, .finish_callback = s_s3_test_no_validate_checksum, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &get_options, NULL)); /* Push all the algorithms to the list for validation, now we should have the checksum validated. */ alg = AWS_SCA_CRC32; ASSERT_SUCCESS(aws_array_list_push_back(&response_checksum_list, &alg)); get_options.finish_callback = s_s3_test_validate_checksum; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &get_options, NULL)); aws_byte_buf_clean_up(&path_buf); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); aws_array_list_clean_up(&response_checksum_list); return 0; } AWS_TEST_CASE(test_s3_round_trip_mpu_default_get_fc, s_test_s3_round_trip_mpu_default_get_fc) static int s_test_s3_round_trip_mpu_default_get_fc(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_tester_client_options client_options = { .part_size = MB_TO_BYTES(5), }; struct aws_s3_client *client = NULL; ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); struct aws_byte_buf path_buf; AWS_ZERO_STRUCT(path_buf); ASSERT_SUCCESS(aws_s3_tester_upload_file_path_init( allocator, &path_buf, aws_byte_cursor_from_c_str("/prefix/round_trip/test_mpu_default_get_fc.txt"))); struct aws_byte_cursor object_path = aws_byte_cursor_from_buf(&path_buf); struct aws_s3_tester_meta_request_options put_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .client = client, .checksum_algorithm = AWS_SCA_CRC32, .validate_get_response_checksum = false, .put_options = { .object_size_mb = 10, .object_path_override = object_path, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &put_options, NULL)); /*** GET FILE ***/ struct aws_s3_tester_meta_request_options get_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_DEFAULT, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_SUCCESS, .client = client, .expected_validate_checksum_alg = AWS_SCA_CRC32, .validate_get_response_checksum = true, .get_options = { .object_path = object_path, }, .default_type_options = { .mode = AWS_S3_TESTER_DEFAULT_TYPE_MODE_GET, }, .finish_callback = s_s3_test_no_validate_checksum, .headers_callback = s_s3_validate_headers_checksum_unset, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &get_options, NULL)); aws_byte_buf_clean_up(&path_buf); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } static int s_test_s3_round_trip_with_filepath_helper( struct aws_allocator *allocator, struct aws_byte_cursor key, int object_size_mb, bool unknown_content_length) { struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_tester_client_options client_options = { .part_size = MB_TO_BYTES(8), }; struct aws_s3_client *client = NULL; ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); struct aws_s3_meta_request_test_results test_results; aws_s3_meta_request_test_results_init(&test_results, allocator); /*** PUT FILE ***/ struct aws_byte_buf path_buf; AWS_ZERO_STRUCT(path_buf); ASSERT_SUCCESS(aws_s3_tester_upload_file_path_init(allocator, &path_buf, key)); struct aws_byte_cursor object_path = aws_byte_cursor_from_buf(&path_buf); struct aws_s3_tester_meta_request_options put_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .client = client, .put_options = { .object_size_mb = object_size_mb, .object_path_override = object_path, .file_on_disk = true, .skip_content_length = unknown_content_length, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &put_options, NULL)); /*** GET FILE ***/ aws_s3_meta_request_test_results_clean_up(&test_results); aws_s3_meta_request_test_results_init(&test_results, allocator); struct aws_s3_tester_meta_request_options get_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_SUCCESS, .client = client, .get_options = { .object_path = object_path, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &get_options, &test_results)); ASSERT_UINT_EQUALS(MB_TO_BYTES(put_options.put_options.object_size_mb), test_results.received_body_size); aws_s3_meta_request_test_results_clean_up(&test_results); aws_byte_buf_clean_up(&path_buf); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } AWS_TEST_CASE(test_s3_round_trip_with_filepath, s_test_s3_round_trip_with_filepath) static int s_test_s3_round_trip_with_filepath(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_test_s3_round_trip_with_filepath_helper( allocator, aws_byte_cursor_from_c_str("/prefix/round_trip/with_filepath"), 1, false /*unknown_content_length*/); } AWS_TEST_CASE(test_s3_round_trip_mpu_with_filepath, s_test_s3_round_trip_mpu_with_filepath) static int s_test_s3_round_trip_mpu_with_filepath(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_test_s3_round_trip_with_filepath_helper( allocator, aws_byte_cursor_from_c_str("/prefix/round_trip/with_filepath_mpu"), 50, false /*unknown_content_length*/); } AWS_TEST_CASE(test_s3_round_trip_with_filepath_no_content_length, s_test_s3_round_trip_with_filepath_no_content_length) static int s_test_s3_round_trip_with_filepath_no_content_length(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_test_s3_round_trip_with_filepath_helper( allocator, aws_byte_cursor_from_c_str("/prefix/round_trip/with_filepath_no_content_length"), 1, true /*unknown_content_length*/); } AWS_TEST_CASE( test_s3_round_trip_mpu_with_filepath_no_content_length, s_test_s3_round_trip_mpu_with_filepath_no_content_length) static int s_test_s3_round_trip_mpu_with_filepath_no_content_length(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_test_s3_round_trip_with_filepath_helper( allocator, aws_byte_cursor_from_c_str("/prefix/round_trip/with_filepath_mpu_no_content_length"), 50, true /*unknown_content_length*/); } AWS_TEST_CASE(test_s3_chunked_then_unchunked, s_test_s3_chunked_then_unchunked) static int s_test_s3_chunked_then_unchunked(struct aws_allocator *allocator, void *ctx) { (void)ctx; /* Test to see if signed_body_value modified when signing chunked request */ struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_tester_client_options client_options = { .part_size = MB_TO_BYTES(5), }; struct aws_s3_client *client = NULL; ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); struct aws_byte_buf path_buf; AWS_ZERO_STRUCT(path_buf); ASSERT_SUCCESS(aws_s3_tester_upload_file_path_init( allocator, &path_buf, aws_byte_cursor_from_c_str("/prefix/chunked_unchunked/test_chunked.txt"))); struct aws_byte_cursor chunked_object_path = aws_byte_cursor_from_buf(&path_buf); struct aws_s3_tester_meta_request_options chunked_put_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .client = client, .checksum_algorithm = AWS_SCA_CRC32, .validate_get_response_checksum = false, .put_options = { .object_size_mb = 10, .object_path_override = chunked_object_path, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &chunked_put_options, NULL)); aws_byte_buf_clean_up(&path_buf); ASSERT_SUCCESS(aws_s3_tester_upload_file_path_init( allocator, &path_buf, aws_byte_cursor_from_c_str("/prefix/chunked_unchunked/test_unchunked.txt"))); struct aws_byte_cursor unchunked_object_path = aws_byte_cursor_from_buf(&path_buf); struct aws_s3_tester_meta_request_options unchunked_put_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .client = client, .checksum_algorithm = AWS_SCA_NONE, .validate_get_response_checksum = false, .put_options = { .object_size_mb = 10, .object_path_override = unchunked_object_path, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &unchunked_put_options, NULL)); aws_byte_buf_clean_up(&path_buf); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } AWS_TEST_CASE(test_s3_meta_request_default, s_test_s3_meta_request_default) static int s_test_s3_meta_request_default(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; AWS_ZERO_STRUCT(tester); ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_client_config client_config; AWS_ZERO_STRUCT(client_config); ASSERT_SUCCESS(aws_s3_tester_bind_client( &tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_REGION | AWS_S3_TESTER_BIND_CLIENT_SIGNING)); struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); struct aws_string *host_name = aws_s3_tester_build_endpoint_string(allocator, &g_test_bucket_name, &g_test_s3_region); /* Put together a simple S3 Get Object request. */ struct aws_http_message *message = aws_s3_test_get_object_request_new( allocator, aws_byte_cursor_from_string(host_name), g_pre_existing_object_1MB); struct aws_s3_meta_request_options options; AWS_ZERO_STRUCT(options); /* Pass the request through as a default request so that it goes through as-is. */ options.type = AWS_S3_META_REQUEST_TYPE_DEFAULT; options.message = message; struct aws_s3_meta_request_test_results meta_request_test_results; aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator); ASSERT_SUCCESS(aws_s3_tester_bind_meta_request(&tester, &options, &meta_request_test_results)); struct aws_s3_meta_request *meta_request = aws_s3_client_make_meta_request(client, &options); ASSERT_TRUE(meta_request != NULL); /* Wait for the request to finish. */ aws_s3_tester_wait_for_meta_request_finish(&tester); aws_s3_tester_lock_synced_data(&tester); ASSERT_TRUE(tester.synced_data.finish_error_code == AWS_ERROR_SUCCESS); aws_s3_tester_unlock_synced_data(&tester); /* Check the size of the metrics should be the same as the number of requests, which should be 1 */ ASSERT_UINT_EQUALS(1, aws_array_list_length(&meta_request_test_results.synced_data.metrics)); struct aws_s3_request_metrics *metrics = NULL; aws_array_list_back(&meta_request_test_results.synced_data.metrics, (void **)&metrics); ASSERT_SUCCESS(aws_s3_tester_validate_get_object_results(&meta_request_test_results, 0)); meta_request = aws_s3_meta_request_release(meta_request); aws_s3_tester_wait_for_meta_request_shutdown(&tester); aws_s3_meta_request_test_results_clean_up(&meta_request_test_results); aws_http_message_release(message); message = NULL; aws_string_destroy(host_name); host_name = NULL; client = aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } AWS_TEST_CASE(test_s3_error_missing_file, s_test_s3_error_missing_file) static int s_test_s3_error_missing_file(struct aws_allocator *allocator, void *ctx) { (void)ctx; const struct aws_byte_cursor test_object_path = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/non-existing-file12345.txt"); struct aws_s3_tester tester; AWS_ZERO_STRUCT(tester); ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_client_config client_config = { .part_size = 64 * 1024, }; ASSERT_SUCCESS(aws_s3_tester_bind_client( &tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_REGION | AWS_S3_TESTER_BIND_CLIENT_SIGNING)); struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); struct aws_string *host_name = aws_s3_tester_build_endpoint_string(allocator, &g_test_bucket_name, &g_test_s3_region); /* Put together a simple S3 Get Object request. */ struct aws_http_message *message = aws_s3_test_get_object_request_new(allocator, aws_byte_cursor_from_string(host_name), test_object_path); struct aws_s3_meta_request_options options; AWS_ZERO_STRUCT(options); options.type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT; options.message = message; /* Trigger accelerating of our Get Object request. */ struct aws_s3_meta_request_test_results meta_request_test_results; aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator); ASSERT_SUCCESS(aws_s3_tester_bind_meta_request(&tester, &options, &meta_request_test_results)); struct aws_s3_meta_request *meta_request = aws_s3_client_make_meta_request(client, &options); ASSERT_TRUE(meta_request != NULL); /* Wait for the request to finish. */ aws_s3_tester_wait_for_meta_request_finish(&tester); aws_s3_tester_lock_synced_data(&tester); ASSERT_TRUE(tester.synced_data.finish_error_code != AWS_ERROR_SUCCESS); aws_s3_tester_unlock_synced_data(&tester); ASSERT_TRUE(meta_request_test_results.finished_response_status == 404); ASSERT_TRUE(meta_request_test_results.finished_error_code != AWS_ERROR_SUCCESS); ASSERT_TRUE(meta_request_test_results.error_response_headers != NULL); ASSERT_NOT_NULL(meta_request_test_results.error_response_operation_name); ASSERT_TRUE( aws_string_eq_c_str(meta_request_test_results.error_response_operation_name, "GetObject") || aws_string_eq_c_str(meta_request_test_results.error_response_operation_name, "HeadObject")); meta_request = aws_s3_meta_request_release(meta_request); aws_s3_tester_wait_for_meta_request_shutdown(&tester); aws_s3_meta_request_test_results_clean_up(&meta_request_test_results); aws_http_message_release(message); message = NULL; aws_string_destroy(host_name); host_name = NULL; client = aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } static void s_test_s3_existing_host_entry_address_resolved_callback( struct aws_host_resolver *resolver, const struct aws_string *host_name, int err_code, const struct aws_array_list *host_addresses, void *user_data) { (void)resolver; (void)host_name; (void)err_code; (void)host_addresses; struct aws_s3_tester *tester = user_data; AWS_ASSERT(tester); aws_s3_tester_notify_signal(tester); } AWS_TEST_CASE(test_s3_existing_host_entry, s_test_s3_existing_host_entry) static int s_test_s3_existing_host_entry(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; AWS_ZERO_STRUCT(tester); ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_client_config client_config; AWS_ZERO_STRUCT(client_config); ASSERT_SUCCESS(aws_s3_tester_bind_client( &tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_REGION | AWS_S3_TESTER_BIND_CLIENT_SIGNING)); struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); struct aws_string *host_name = aws_s3_tester_build_endpoint_string(allocator, &g_test_public_bucket_name, &g_test_s3_region); { struct aws_host_resolution_config host_resolver_config; AWS_ZERO_STRUCT(host_resolver_config); host_resolver_config.impl = aws_default_dns_resolve; host_resolver_config.max_ttl = 30; host_resolver_config.impl_data = NULL; ASSERT_SUCCESS(aws_host_resolver_resolve_host( client_config.client_bootstrap->host_resolver, host_name, s_test_s3_existing_host_entry_address_resolved_callback, &host_resolver_config, &tester)); aws_s3_tester_wait_for_signal(&tester); } /* Put together a simple S3 Get Object request. */ struct aws_http_message *message = aws_s3_test_get_object_request_new( allocator, aws_byte_cursor_from_string(host_name), g_pre_existing_object_1MB); struct aws_s3_meta_request_options options; AWS_ZERO_STRUCT(options); options.type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT; options.message = message; /* Trigger accelerating of our Get Object request. */ struct aws_s3_meta_request_test_results meta_request_test_results; aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator); ASSERT_SUCCESS(aws_s3_tester_send_meta_request( &tester, client, &options, &meta_request_test_results, AWS_S3_TESTER_SEND_META_REQUEST_EXPECT_SUCCESS)); ASSERT_SUCCESS(aws_s3_tester_validate_get_object_results(&meta_request_test_results, 0)); aws_s3_meta_request_test_results_clean_up(&meta_request_test_results); aws_http_message_release(message); aws_string_destroy(host_name); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } AWS_TEST_CASE(test_s3_bad_endpoint, s_test_s3_bad_endpoint) static int s_test_s3_bad_endpoint(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; AWS_ZERO_STRUCT(tester); ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_client_config client_config; AWS_ZERO_STRUCT(client_config); ASSERT_SUCCESS(aws_s3_tester_bind_client( &tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_REGION | AWS_S3_TESTER_BIND_CLIENT_SIGNING)); struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); struct aws_byte_cursor test_key = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("test_key"); AWS_STATIC_STRING_FROM_LITERAL(invalid_host_name, "invalid_host_name_totally_absolutely"); /* Construct a message that points to an invalid host name. Key can be anything. */ struct aws_http_message *message = aws_s3_test_get_object_request_new(allocator, aws_byte_cursor_from_string(invalid_host_name), test_key); struct aws_s3_meta_request_options options; AWS_ZERO_STRUCT(options); options.type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT; options.message = message; struct aws_s3_meta_request_test_results meta_request_test_results; aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator); ASSERT_SUCCESS(aws_s3_tester_send_meta_request(&tester, client, &options, &meta_request_test_results, 0)); ASSERT_TRUE( meta_request_test_results.finished_error_code == AWS_IO_DNS_INVALID_NAME || meta_request_test_results.finished_error_code == AWS_IO_DNS_QUERY_FAILED); aws_s3_meta_request_test_results_clean_up(&meta_request_test_results); aws_http_message_release(message); client = aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } static int s_s3_test_headers_callback_raise_error( struct aws_s3_meta_request *meta_request, const struct aws_http_headers *headers, int response_status, void *user_data) { (void)meta_request; (void)headers; (void)response_status; (void)user_data; aws_raise_error(AWS_ERROR_UNKNOWN); return AWS_OP_ERR; } static int s_s3_test_body_callback_raise_error( struct aws_s3_meta_request *meta_request, const struct aws_byte_cursor *body, uint64_t range_start, void *user_data) { (void)meta_request; (void)body; (void)range_start; (void)user_data; aws_raise_error(AWS_ERROR_UNKNOWN); return AWS_OP_ERR; } AWS_TEST_CASE(test_s3_put_object_fail_headers_callback, s_test_s3_put_object_fail_headers_callback) static int s_test_s3_put_object_fail_headers_callback(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_meta_request_test_results meta_request_test_results; aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator); struct aws_s3_tester_meta_request_options options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .headers_callback = s_s3_test_headers_callback_raise_error, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_FAILURE, .put_options = { .ensure_multipart = true, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(NULL, &options, &meta_request_test_results)); ASSERT_TRUE(meta_request_test_results.finished_error_code == AWS_ERROR_UNKNOWN); aws_s3_meta_request_test_results_clean_up(&meta_request_test_results); return 0; } AWS_TEST_CASE(test_s3_put_object_fail_body_callback, s_test_s3_put_object_fail_body_callback) static int s_test_s3_put_object_fail_body_callback(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester_meta_request_options options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .body_callback = s_s3_test_body_callback_raise_error, /* Put object currently never invokes the body callback, which means it should not fail. */ .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_SUCCESS, .put_options = { .ensure_multipart = true, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(NULL, &options, NULL)); return 0; } AWS_TEST_CASE(test_s3_get_object_fail_headers_callback, s_test_s3_get_object_fail_headers_callback) static int s_test_s3_get_object_fail_headers_callback(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_meta_request_test_results meta_request_test_results; aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator); struct aws_s3_tester_meta_request_options options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT, .headers_callback = s_s3_test_headers_callback_raise_error, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_FAILURE, .get_options = { .object_path = g_pre_existing_object_1MB, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(NULL, &options, &meta_request_test_results)); ASSERT_TRUE(meta_request_test_results.finished_error_code == AWS_ERROR_UNKNOWN); aws_s3_meta_request_test_results_clean_up(&meta_request_test_results); return 0; } AWS_TEST_CASE(test_s3_get_object_fail_body_callback, s_test_s3_get_object_fail_body_callback) static int s_test_s3_get_object_fail_body_callback(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_meta_request_test_results meta_request_test_results; aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator); struct aws_s3_tester_meta_request_options options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT, .body_callback = s_s3_test_body_callback_raise_error, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_FAILURE, .get_options = { .object_path = g_pre_existing_object_1MB, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(NULL, &options, &meta_request_test_results)); ASSERT_TRUE(meta_request_test_results.finished_error_code == AWS_ERROR_UNKNOWN); aws_s3_meta_request_test_results_clean_up(&meta_request_test_results); return 0; } AWS_TEST_CASE(test_s3_default_fail_headers_callback, s_test_s3_default_fail_headers_callback) static int s_test_s3_default_fail_headers_callback(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_meta_request_test_results meta_request_test_results; aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator); struct aws_s3_tester_meta_request_options options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_DEFAULT, .headers_callback = s_s3_test_headers_callback_raise_error, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_FAILURE, .default_type_options = { .mode = AWS_S3_TESTER_DEFAULT_TYPE_MODE_GET, }, .get_options = { .object_path = g_pre_existing_object_1MB, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(NULL, &options, &meta_request_test_results)); ASSERT_TRUE(meta_request_test_results.finished_error_code == AWS_ERROR_UNKNOWN); aws_s3_meta_request_test_results_clean_up(&meta_request_test_results); return 0; } static struct aws_atomic_var s_test_headers_callback_invoked; static int s_s3_test_headers_callback_check_returns_success( struct aws_s3_meta_request *meta_request, const struct aws_http_headers *headers, int response_status, void *user_data) { (void)meta_request; (void)headers; (void)response_status; (void)user_data; /* increments counter to check if callback was invoked exactly once */ aws_atomic_fetch_add(&s_test_headers_callback_invoked, 1); return AWS_OP_SUCCESS; } static int s_s3_test_headers_callback_check_returns_error( struct aws_s3_meta_request *meta_request, const struct aws_http_headers *headers, int response_status, void *user_data) { (void)meta_request; (void)headers; (void)response_status; (void)user_data; /* increments counter to check if callback was invoked exactly once */ aws_atomic_fetch_add(&s_test_headers_callback_invoked, 1); aws_raise_error(AWS_ERROR_UNKNOWN); return AWS_OP_ERR; } AWS_TEST_CASE(test_s3_default_invoke_headers_callback_on_error, s_test_s3_default_invoke_headers_callback_on_error) static int s_test_s3_default_invoke_headers_callback_on_error(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_meta_request_test_results meta_request_test_results; aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator); aws_atomic_init_int(&s_test_headers_callback_invoked, 0); struct aws_byte_cursor invalid_path = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("___INVALID_PATH___"); struct aws_s3_tester_meta_request_options options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_DEFAULT, .headers_callback = s_s3_test_headers_callback_check_returns_success, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_FAILURE, .default_type_options = { .mode = AWS_S3_TESTER_DEFAULT_TYPE_MODE_GET, }, .get_options = { .object_path = invalid_path, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(NULL, &options, &meta_request_test_results)); ASSERT_INT_EQUALS(1, aws_atomic_load_int(&s_test_headers_callback_invoked)); ASSERT_TRUE(meta_request_test_results.finished_error_code == AWS_ERROR_S3_INVALID_RESPONSE_STATUS); aws_s3_meta_request_test_results_clean_up(&meta_request_test_results); return 0; } AWS_TEST_CASE( test_s3_default_invoke_headers_callback_cancels_on_error, s_test_s3_default_invoke_headers_callback_cancels_on_error) static int s_test_s3_default_invoke_headers_callback_cancels_on_error(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_meta_request_test_results meta_request_test_results; aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator); aws_atomic_init_int(&s_test_headers_callback_invoked, 0); struct aws_byte_cursor invalid_path = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("___INVALID_PATH___"); struct aws_s3_tester_meta_request_options options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_DEFAULT, .headers_callback = s_s3_test_headers_callback_check_returns_error, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_FAILURE, .default_type_options = { .mode = AWS_S3_TESTER_DEFAULT_TYPE_MODE_GET, }, .get_options = { .object_path = invalid_path, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(NULL, &options, &meta_request_test_results)); ASSERT_INT_EQUALS(1, aws_atomic_load_int(&s_test_headers_callback_invoked)); ASSERT_TRUE(meta_request_test_results.finished_error_code == AWS_ERROR_UNKNOWN); aws_s3_meta_request_test_results_clean_up(&meta_request_test_results); return 0; } AWS_TEST_CASE( test_s3_get_object_invoke_headers_callback_on_error, s_test_s3_get_object_invoke_headers_callback_on_error) static int s_test_s3_get_object_invoke_headers_callback_on_error(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_meta_request_test_results meta_request_test_results; aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator); aws_atomic_init_int(&s_test_headers_callback_invoked, 0); struct aws_byte_cursor invalid_path = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("___INVALID_PATH___"); struct aws_s3_tester_meta_request_options options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT, .headers_callback = s_s3_test_headers_callback_check_returns_success, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_FAILURE, .get_options = { .object_path = invalid_path, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(NULL, &options, &meta_request_test_results)); ASSERT_INT_EQUALS(1, aws_atomic_load_int(&s_test_headers_callback_invoked)); ASSERT_TRUE(meta_request_test_results.finished_error_code == AWS_ERROR_S3_INVALID_RESPONSE_STATUS); aws_s3_meta_request_test_results_clean_up(&meta_request_test_results); return 0; } AWS_TEST_CASE( test_s3_put_object_invoke_headers_callback_on_error, s_test_s3_put_object_invoke_headers_callback_on_error) static int s_test_s3_put_object_invoke_headers_callback_on_error(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_meta_request_test_results meta_request_test_results; aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator); aws_atomic_init_int(&s_test_headers_callback_invoked, 0); struct aws_s3_tester_meta_request_options options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .headers_callback = s_s3_test_headers_callback_check_returns_success, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_FAILURE, .put_options = { .object_size_mb = 10, .invalid_request = true, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(NULL, &options, &meta_request_test_results)); ASSERT_INT_EQUALS(1, aws_atomic_load_int(&s_test_headers_callback_invoked)); ASSERT_UINT_EQUALS(AWS_ERROR_S3_INVALID_RESPONSE_STATUS, meta_request_test_results.finished_error_code); aws_s3_meta_request_test_results_clean_up(&meta_request_test_results); return 0; } AWS_TEST_CASE( test_s3_put_object_invoke_headers_callback_on_error_with_user_cancellation, s_test_s3_put_object_invoke_headers_callback_on_error_with_user_cancellation) static int s_test_s3_put_object_invoke_headers_callback_on_error_with_user_cancellation( struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_meta_request_test_results meta_request_test_results; aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator); aws_atomic_init_int(&s_test_headers_callback_invoked, 0); struct aws_s3_tester_meta_request_options options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .headers_callback = s_s3_test_headers_callback_check_returns_error, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_FAILURE, .put_options = { .ensure_multipart = true, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(NULL, &options, &meta_request_test_results)); ASSERT_INT_EQUALS(1, aws_atomic_load_int(&s_test_headers_callback_invoked)); ASSERT_UINT_EQUALS(AWS_ERROR_UNKNOWN, meta_request_test_results.finished_error_code); aws_s3_meta_request_test_results_clean_up(&meta_request_test_results); return 0; } AWS_TEST_CASE(test_s3_default_fail_body_callback, s_test_s3_default_fail_body_callback) static int s_test_s3_default_fail_body_callback(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_meta_request_test_results meta_request_test_results; aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator); struct aws_s3_tester_meta_request_options options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_DEFAULT, .body_callback = s_s3_test_body_callback_raise_error, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_FAILURE, .default_type_options = { .mode = AWS_S3_TESTER_DEFAULT_TYPE_MODE_GET, }, .get_options = { .object_path = g_pre_existing_object_1MB, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(NULL, &options, &meta_request_test_results)); ASSERT_TRUE(meta_request_test_results.finished_error_code == AWS_ERROR_UNKNOWN); aws_s3_meta_request_test_results_clean_up(&meta_request_test_results); return 0; } /* Test that if a DEFAULt meta-request sets the operation_name, and gets an error response, * then aws_s3_meta_request_result.error_response_operation_name is set. */ AWS_TEST_CASE(test_s3_default_fail_operation_name, s_test_s3_default_fail_operation_name) static int s_test_s3_default_fail_operation_name(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_meta_request_test_results meta_request_test_results; aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator); struct aws_byte_cursor invalid_path = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("___INVALID_PATH___"); struct aws_s3_tester_meta_request_options options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_DEFAULT, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_FAILURE, .default_type_options = { .mode = AWS_S3_TESTER_DEFAULT_TYPE_MODE_GET, .operation_name = aws_byte_cursor_from_c_str("GetObject"), }, .get_options = { .object_path = invalid_path, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(NULL, &options, &meta_request_test_results)); ASSERT_TRUE(meta_request_test_results.finished_error_code == AWS_ERROR_S3_INVALID_RESPONSE_STATUS); ASSERT_STR_EQUALS("GetObject", aws_string_c_str(meta_request_test_results.error_response_operation_name)); aws_s3_meta_request_test_results_clean_up(&meta_request_test_results); return 0; } AWS_TEST_CASE(test_s3_put_fail_object_invalid_request, s_test_s3_put_fail_object_invalid_request) static int s_test_s3_put_fail_object_invalid_request(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_meta_request_test_results meta_request_test_results; aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator); struct aws_s3_tester_meta_request_options options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_FAILURE, .put_options = { .object_size_mb = 1, .invalid_request = true, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(NULL, &options, &meta_request_test_results)); ASSERT_UINT_EQUALS(AWS_ERROR_S3_INVALID_RESPONSE_STATUS, meta_request_test_results.finished_error_code); /* Since 1MB is under part_size, there will be a single PutObject request */ ASSERT_STR_EQUALS("PutObject", aws_string_c_str(meta_request_test_results.error_response_operation_name)); aws_s3_meta_request_test_results_clean_up(&meta_request_test_results); return 0; } /* Test that we fail to create a metarequest when an invalid `send_filepath` is passed in */ AWS_TEST_CASE(test_s3_put_fail_object_invalid_send_filepath, s_test_s3_put_fail_object_invalid_send_filepath) static int s_test_s3_put_fail_object_invalid_send_filepath(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_tester_client_options client_options; AWS_ZERO_STRUCT(client_options); struct aws_s3_client *client = NULL; ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); struct aws_byte_cursor host_name = aws_byte_cursor_from_c_str("dummy_host"); struct aws_byte_cursor object_key = aws_byte_cursor_from_c_str("dummy_key"); struct aws_http_message *message = aws_s3_test_put_object_request_new_without_body( allocator, &host_name, g_test_body_content_type, object_key, 1024 /*content_length*/, 0 /*flags*/); ASSERT_NOT_NULL(message); struct aws_s3_meta_request_options meta_request_options = { .type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .message = message, .send_filepath = aws_byte_cursor_from_c_str("obviously_invalid_file_path"), }; struct aws_s3_meta_request *meta_request = aws_s3_client_make_meta_request(client, &meta_request_options); ASSERT_NULL(meta_request); ASSERT_INT_EQUALS(AWS_ERROR_FILE_INVALID_PATH, aws_last_error()); aws_http_message_release(message); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } /* Test that the parallel read stream failed to send read the second part. */ AWS_TEST_CASE(test_s3_put_fail_object_bad_parallel_read_stream, s_test_s3_put_fail_object_bad_parallel_read_stream) static int s_test_s3_put_fail_object_bad_parallel_read_stream(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_tester_client_options client_options; AWS_ZERO_STRUCT(client_options); struct aws_s3_client *client = NULL; ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); /* Override the parallel input stream new function to create a bad parallel input stream */ client->vtable->parallel_input_stream_new_from_file = aws_parallel_input_stream_new_from_file_failure_tester; struct aws_s3_meta_request_test_results meta_request_test_results; aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator); struct aws_s3_tester_meta_request_options options = { .allocator = allocator, .client = client, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_FAILURE, .put_options = { .object_size_mb = 100, .file_on_disk = true, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &options, &meta_request_test_results)); ASSERT_UINT_EQUALS(AWS_ERROR_UNIMPLEMENTED, meta_request_test_results.finished_error_code); aws_s3_meta_request_test_results_clean_up(&meta_request_test_results); client = aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; } AWS_TEST_CASE( test_s3_put_single_part_fail_object_inputstream_fail_reading, s_test_s3_put_single_part_fail_object_inputstream_fail_reading) static int s_test_s3_put_single_part_fail_object_inputstream_fail_reading(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_meta_request_test_results meta_request_test_results; aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator); struct aws_s3_tester_meta_request_options options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_FAILURE, .put_options = { .invalid_input_stream = true, .content_length = 10, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(NULL, &options, &meta_request_test_results)); ASSERT_TRUE(meta_request_test_results.finished_error_code != AWS_ERROR_SUCCESS); aws_s3_meta_request_test_results_clean_up(&meta_request_test_results); return 0; } AWS_TEST_CASE( test_s3_put_single_part_fail_object_inputstream_mismatch_content_length, s_test_s3_put_single_part_fail_object_inputstream_mismatch_content_length) static int s_test_s3_put_single_part_fail_object_inputstream_mismatch_content_length( struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_meta_request_test_results meta_request_test_results; aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator); struct aws_s3_tester_meta_request_options options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_FAILURE, .put_options = { .object_size_mb = 1, .content_length = MB_TO_BYTES(2), }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(NULL, &options, &meta_request_test_results)); ASSERT_TRUE(meta_request_test_results.finished_error_code != AWS_ERROR_SUCCESS); aws_s3_meta_request_test_results_clean_up(&meta_request_test_results); return 0; } AWS_TEST_CASE(test_s3_put_fail_object_inputstream_fail_reading, s_test_s3_put_fail_object_inputstream_fail_reading) static int s_test_s3_put_fail_object_inputstream_fail_reading(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_meta_request_test_results meta_request_test_results; aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator); struct aws_s3_tester_meta_request_options options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_FAILURE, .put_options = { .ensure_multipart = true, .invalid_input_stream = true, .content_length = 10 * 1024 * 1024, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(NULL, &options, &meta_request_test_results)); ASSERT_UINT_EQUALS(AWS_IO_STREAM_READ_FAILED, meta_request_test_results.finished_error_code); aws_s3_meta_request_test_results_clean_up(&meta_request_test_results); return 0; } AWS_TEST_CASE( test_s3_put_fail_object_inputstream_mismatch_content_length, s_test_s3_put_fail_object_inputstream_mismatch_content_length) static int s_test_s3_put_fail_object_inputstream_mismatch_content_length(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_meta_request_test_results meta_request_test_results; aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator); struct aws_s3_tester_meta_request_options options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_FAILURE, .put_options = { .ensure_multipart = false, .object_size_mb = 1, .content_length = 10 * 1024 * 1024, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(NULL, &options, &meta_request_test_results)); ASSERT_UINT_EQUALS(AWS_ERROR_S3_INCORRECT_CONTENT_LENGTH, meta_request_test_results.finished_error_code); aws_s3_meta_request_test_results_clean_up(&meta_request_test_results); return 0; } AWS_TEST_CASE(test_s3_put_object_clamp_part_size, s_test_s3_put_object_clamp_part_size) static int s_test_s3_put_object_clamp_part_size(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_client_config client_config = { .part_size = 64 * 1024, .max_part_size = 64 * 1024, }; ASSERT_TRUE(client_config.part_size < g_s3_min_upload_part_size); ASSERT_TRUE(client_config.max_part_size < g_s3_min_upload_part_size); ASSERT_SUCCESS(aws_s3_tester_bind_client( &tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_REGION | AWS_S3_TESTER_BIND_CLIENT_SIGNING)); struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); ASSERT_TRUE(client != NULL); struct aws_s3_meta_request_test_results test_results; aws_s3_meta_request_test_results_init(&test_results, allocator); /* Upload should now succeed even when specifying a smaller than allowed part size. */ struct aws_s3_tester_meta_request_options put_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .client = client, .put_options = { .object_size_mb = 10, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &put_options, &test_results)); ASSERT_TRUE(test_results.part_size == g_s3_min_upload_part_size); aws_s3_meta_request_test_results_clean_up(&test_results); client = aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } static int s_get_expected_user_agent(struct aws_allocator *allocator, struct aws_byte_buf *dest) { AWS_ASSERT(allocator); AWS_ASSERT(dest); const struct aws_byte_cursor forward_slash = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/"); ASSERT_SUCCESS(aws_byte_buf_init(dest, allocator, 32)); ASSERT_SUCCESS(aws_byte_buf_append_dynamic(dest, &g_user_agent_header_product_name)); ASSERT_SUCCESS(aws_byte_buf_append_dynamic(dest, &forward_slash)); ASSERT_SUCCESS(aws_byte_buf_append_dynamic(dest, &g_s3_client_version)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_add_user_agent_header, s_test_add_user_agent_header) static int s_test_add_user_agent_header(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; AWS_ZERO_STRUCT(tester); ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); const struct aws_byte_cursor forward_slash = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/"); const struct aws_byte_cursor single_space = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(" "); struct aws_byte_buf expected_user_agent_value_buf; s_get_expected_user_agent(allocator, &expected_user_agent_value_buf); struct aws_byte_cursor expected_user_agent_value = aws_byte_cursor_from_buf(&expected_user_agent_value_buf); { struct aws_byte_cursor user_agent_value; AWS_ZERO_STRUCT(user_agent_value); struct aws_http_message *message = aws_http_message_new_request(allocator); aws_s3_add_user_agent_header(allocator, message); struct aws_http_headers *headers = aws_http_message_get_headers(message); ASSERT_TRUE(headers != NULL); ASSERT_SUCCESS(aws_http_headers_get(headers, g_user_agent_header_name, &user_agent_value)); ASSERT_TRUE(aws_byte_cursor_eq(&user_agent_value, &expected_user_agent_value)); aws_http_message_release(message); } { const struct aws_byte_cursor dummy_agent_header_value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("dummy_user_agent_product/dummy_user_agent_value"); struct aws_byte_buf total_expected_user_agent_value_buf; aws_byte_buf_init(&total_expected_user_agent_value_buf, allocator, 64); aws_byte_buf_append_dynamic(&total_expected_user_agent_value_buf, &dummy_agent_header_value); aws_byte_buf_append_dynamic(&total_expected_user_agent_value_buf, &single_space); aws_byte_buf_append_dynamic(&total_expected_user_agent_value_buf, &g_user_agent_header_product_name); aws_byte_buf_append_dynamic(&total_expected_user_agent_value_buf, &forward_slash); aws_byte_buf_append_dynamic(&total_expected_user_agent_value_buf, &g_s3_client_version); struct aws_byte_cursor total_expected_user_agent_value = aws_byte_cursor_from_buf(&total_expected_user_agent_value_buf); struct aws_http_message *message = aws_http_message_new_request(allocator); struct aws_http_headers *headers = aws_http_message_get_headers(message); ASSERT_TRUE(headers != NULL); ASSERT_SUCCESS(aws_http_headers_add(headers, g_user_agent_header_name, dummy_agent_header_value)); aws_s3_add_user_agent_header(allocator, message); { struct aws_byte_cursor user_agent_value; AWS_ZERO_STRUCT(user_agent_value); ASSERT_SUCCESS(aws_http_headers_get(headers, g_user_agent_header_name, &user_agent_value)); ASSERT_TRUE(aws_byte_cursor_eq(&user_agent_value, &total_expected_user_agent_value)); } aws_byte_buf_clean_up(&total_expected_user_agent_value_buf); aws_http_message_release(message); } aws_byte_buf_clean_up(&expected_user_agent_value_buf); aws_s3_tester_clean_up(&tester); return 0; } static void s_s3_test_user_agent_meta_request_finished_request( struct aws_s3_meta_request *meta_request, struct aws_s3_request *request, int error_code) { AWS_ASSERT(meta_request != NULL); struct aws_s3_meta_request_test_results *results = meta_request->user_data; AWS_ASSERT(results != NULL); struct aws_s3_tester *tester = results->tester; AWS_ASSERT(tester != NULL); struct aws_byte_buf expected_user_agent_value_buf; s_get_expected_user_agent(meta_request->allocator, &expected_user_agent_value_buf); struct aws_byte_cursor expected_user_agent_value = aws_byte_cursor_from_buf(&expected_user_agent_value_buf); struct aws_http_message *message = request->send_data.message; struct aws_http_headers *headers = aws_http_message_get_headers(message); struct aws_byte_cursor user_agent_value; AWS_ZERO_STRUCT(user_agent_value); AWS_FATAL_ASSERT(aws_http_headers_get(headers, g_user_agent_header_name, &user_agent_value) == AWS_OP_SUCCESS); AWS_FATAL_ASSERT(aws_byte_cursor_eq(&user_agent_value, &expected_user_agent_value)); aws_byte_buf_clean_up(&expected_user_agent_value_buf); struct aws_s3_meta_request_vtable *original_meta_request_vtable = aws_s3_tester_get_meta_request_vtable_patch(tester, 0)->original_vtable; original_meta_request_vtable->finished_request(meta_request, request, error_code); } static struct aws_s3_meta_request *s_s3_meta_request_factory_override_finished_request( struct aws_s3_client *client, const struct aws_s3_meta_request_options *options) { AWS_ASSERT(client != NULL); struct aws_s3_tester *tester = client->shutdown_callback_user_data; AWS_ASSERT(tester != NULL); struct aws_s3_client_vtable *original_client_vtable = aws_s3_tester_get_client_vtable_patch(tester, 0)->original_vtable; struct aws_s3_meta_request *meta_request = original_client_vtable->meta_request_factory(client, options); struct aws_s3_meta_request_vtable *patched_meta_request_vtable = aws_s3_tester_patch_meta_request_vtable(tester, meta_request, NULL); patched_meta_request_vtable->finished_request = s_s3_test_user_agent_meta_request_finished_request; return meta_request; } int s_s3_test_sending_user_agent_create_client(struct aws_s3_tester *tester, struct aws_s3_client **client) { AWS_ASSERT(tester); struct aws_s3_tester_client_options client_options; AWS_ZERO_STRUCT(client_options); ASSERT_SUCCESS(aws_s3_tester_client_new(tester, &client_options, client)); struct aws_s3_client_vtable *patched_client_vtable = aws_s3_tester_patch_client_vtable(tester, *client, NULL); patched_client_vtable->meta_request_factory = s_s3_meta_request_factory_override_finished_request; return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_s3_auto_ranged_get_sending_user_agent, s_test_s3_auto_ranged_get_sending_user_agent) static int s_test_s3_auto_ranged_get_sending_user_agent(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_client *client = NULL; ASSERT_SUCCESS(s_s3_test_sending_user_agent_create_client(&tester, &client)); { struct aws_s3_tester_meta_request_options options = { .allocator = allocator, .client = client, .meta_request_type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_SUCCESS, .get_options = { .object_path = g_pre_existing_object_1MB, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &options, NULL)); } aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } AWS_TEST_CASE(test_s3_auto_ranged_put_sending_user_agent, s_test_s3_auto_ranged_put_sending_user_agent) static int s_test_s3_auto_ranged_put_sending_user_agent(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_client *client = NULL; ASSERT_SUCCESS(s_s3_test_sending_user_agent_create_client(&tester, &client)); { struct aws_s3_tester_meta_request_options options = { .allocator = allocator, .client = client, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_SUCCESS, .put_options = { .ensure_multipart = true, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &options, NULL)); } aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } AWS_TEST_CASE(test_s3_default_sending_meta_request_user_agent, s_test_s3_default_sending_meta_request_user_agent) static int s_test_s3_default_sending_meta_request_user_agent(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_client *client = NULL; ASSERT_SUCCESS(s_s3_test_sending_user_agent_create_client(&tester, &client)); { struct aws_s3_tester_meta_request_options options = { .allocator = allocator, .client = client, .meta_request_type = AWS_S3_META_REQUEST_TYPE_DEFAULT, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_SUCCESS, .default_type_options = { .mode = AWS_S3_TESTER_DEFAULT_TYPE_MODE_GET, }, .get_options = { .object_path = g_pre_existing_object_1MB, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &options, NULL)); } aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } struct range_requests_test_user_data { struct aws_http_headers *headers; struct aws_byte_buf *body_buffer; }; static int s_range_requests_headers_callback( struct aws_s3_meta_request *meta_request, const struct aws_http_headers *headers, int response_status, void *user_data) { (void)meta_request; (void)response_status; struct aws_s3_meta_request_test_results *test_results = user_data; struct range_requests_test_user_data *test_user_data = test_results->tester->user_data; if (test_user_data != NULL) { copy_http_headers(headers, test_user_data->headers); } return AWS_OP_SUCCESS; } static int s_range_requests_receive_body_callback( struct aws_s3_meta_request *meta_request, const struct aws_byte_cursor *body, uint64_t range_start, void *user_data) { (void)meta_request; (void)range_start; struct aws_s3_meta_request_test_results *test_results = user_data; struct range_requests_test_user_data *test_user_data = test_results->tester->user_data; aws_byte_buf_append_dynamic(test_user_data->body_buffer, body); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_s3_range_requests, s_test_s3_range_requests) static int s_test_s3_range_requests(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); const struct aws_byte_cursor object_names[] = { g_pre_existing_object_1MB, g_pre_existing_object_kms_10MB, g_pre_existing_object_aes256_10MB, }; enum aws_s3_tester_sse_type object_sse_types[] = { AWS_S3_TESTER_SSE_NONE, AWS_S3_TESTER_SSE_KMS, AWS_S3_TESTER_SSE_AES256, }; const struct aws_byte_cursor ranges[] = { // No range at all. {0, NULL}, // Single byte range. AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("bytes=8-8"), // Single byte range (first byte). AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("bytes=0-0"), // First 8K. 8K < client's 16K part size. AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("bytes=0-8191"), // First 0.5 MB. 0.5 MB < 1 MB test file. AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("bytes=0-524287"), // 0.5 MB - 2 MB range. This overlaps and goes beyond the 1 MB test file size. AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("bytes=524288-2097151"), // Get everything after the first 0.5 MB AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("bytes=524288-"), // Last 0.5 MB AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("bytes=-524288"), // Everything after first 8K AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("bytes=8192-"), // Last 8K AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("bytes=-8192"), }; /* List of headers that should have matching values between the auto_ranged_get and default (which sends the HTTP * request as-is to S3) meta request.*/ const struct aws_byte_cursor headers_that_should_match[] = { AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("ETag"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Accept-Ranges"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Range"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Type"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Length"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Server"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-aws-kms-key"), }; /* List of headers that are okay to be in the auto_ranged_get response and not in the default response, or vice * versa.*/ const struct aws_byte_cursor headers_to_ignore[] = { AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Connection"), }; struct aws_s3_tester_client_options client_options = { .part_size = 16 * 1024, }; struct aws_s3_client *client = NULL; ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); const size_t num_object_names = AWS_ARRAY_SIZE(object_names); const size_t num_ranges = AWS_ARRAY_SIZE(ranges); for (size_t object_name_index = 0; object_name_index < num_object_names; ++object_name_index) { for (size_t range_index = 0; range_index < num_ranges; ++range_index) { AWS_LOGF_INFO( AWS_LS_S3_GENERAL, "Testing object name %d and range %d", (int)object_name_index, (int)range_index); struct aws_byte_buf range_get_buffer; aws_byte_buf_init(&range_get_buffer, allocator, 256); struct aws_http_headers *range_get_headers = aws_http_headers_new(allocator); struct aws_byte_buf verify_range_get_buffer; aws_byte_buf_init(&verify_range_get_buffer, allocator, 256); struct aws_http_headers *verify_range_get_headers = aws_http_headers_new(allocator); struct aws_s3_tester_meta_request_options options = { .allocator = allocator, .client = client, .meta_request_type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_SUCCESS, .headers_callback = s_range_requests_headers_callback, .body_callback = s_range_requests_receive_body_callback, .get_options = { .object_path = object_names[object_name_index], .object_range = ranges[range_index], }, .sse_type = object_sse_types[object_name_index], }; { struct range_requests_test_user_data test_user_data = { .headers = range_get_headers, .body_buffer = &range_get_buffer, }; tester.user_data = &test_user_data; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &options, NULL)); } /* Send a default meta request (which just pushes the request directly to S3) with the same options to * verify the format of each request. */ struct aws_s3_tester_meta_request_options verify_options = { .allocator = allocator, .client = client, .meta_request_type = AWS_S3_META_REQUEST_TYPE_DEFAULT, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_SUCCESS, .headers_callback = s_range_requests_headers_callback, .body_callback = s_range_requests_receive_body_callback, .default_type_options = { .mode = AWS_S3_TESTER_DEFAULT_TYPE_MODE_GET, }, .get_options = { .object_path = object_names[object_name_index], .object_range = ranges[range_index], }, .sse_type = object_sse_types[object_name_index], }; { struct range_requests_test_user_data test_user_data = { .headers = verify_range_get_headers, .body_buffer = &verify_range_get_buffer, }; tester.user_data = &test_user_data; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &verify_options, NULL)); } /* Compare headers. */ for (size_t i = 0; i < aws_http_headers_count(verify_range_get_headers); ++i) { struct aws_http_header verify_header; ASSERT_SUCCESS(aws_http_headers_get_index(verify_range_get_headers, i, &verify_header)); bool ignore_header = false; for (size_t j = 0; j < AWS_ARRAY_SIZE(headers_to_ignore); ++j) { if (aws_byte_cursor_eq_ignore_case(&headers_to_ignore[j], &verify_header.name)) { ignore_header = true; break; } } if (ignore_header) { ASSERT_SUCCESS(aws_http_headers_erase(range_get_headers, verify_header.name)); continue; } AWS_LOGF_INFO( AWS_LS_S3_GENERAL, "%d,%d Checking for header " PRInSTR, (int)object_name_index, (int)range_index, AWS_BYTE_CURSOR_PRI(verify_header.name)); struct aws_byte_cursor header_value; ASSERT_SUCCESS(aws_http_headers_get(range_get_headers, verify_header.name, &header_value)); for (size_t j = 0; j < AWS_ARRAY_SIZE(headers_that_should_match); ++j) { if (!aws_byte_cursor_eq_ignore_case(&headers_that_should_match[j], &verify_header.name)) { continue; } AWS_LOGF_INFO( AWS_LS_S3_GENERAL, "%d,%d Header Contents " PRInSTR " vs " PRInSTR, (int)object_name_index, (int)range_index, AWS_BYTE_CURSOR_PRI(verify_header.value), AWS_BYTE_CURSOR_PRI(header_value)); ASSERT_TRUE(aws_byte_cursor_eq(&verify_header.value, &header_value)); } ASSERT_SUCCESS(aws_http_headers_erase(range_get_headers, verify_header.name)); } for (size_t i = 0; i < aws_http_headers_count(range_get_headers); ++i) { struct aws_http_header header; ASSERT_SUCCESS(aws_http_headers_get_index(range_get_headers, i, &header)); bool ignore_header = false; /* If the ignore header doesn't exist in the verify_range_get_headers, ignore it here. */ for (size_t j = 0; j < AWS_ARRAY_SIZE(headers_to_ignore); ++j) { if (aws_byte_cursor_eq_ignore_case(&headers_to_ignore[j], &header.name)) { ignore_header = true; break; } } if (ignore_header) { ASSERT_SUCCESS(aws_http_headers_erase(range_get_headers, header.name)); continue; } AWS_LOGF_INFO(AWS_LS_S3_GENERAL, "Left over header: " PRInSTR, AWS_BYTE_CURSOR_PRI(header.name)); } ASSERT_TRUE(aws_http_headers_count(range_get_headers) == 0); /* Compare Body Contents */ ASSERT_TRUE(aws_byte_buf_eq(&range_get_buffer, &verify_range_get_buffer)); aws_http_headers_release(range_get_headers); aws_byte_buf_clean_up(&range_get_buffer); aws_http_headers_release(verify_range_get_headers); aws_byte_buf_clean_up(&verify_range_get_buffer); } } aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } AWS_TEST_CASE(test_s3_not_satisfiable_range, s_test_s3_not_satisfiable_range) static int s_test_s3_not_satisfiable_range(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_tester_client_options client_options = { .part_size = 16 * 1024, }; struct aws_s3_client *client = NULL; ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); struct aws_s3_tester_meta_request_options options = { .allocator = allocator, .client = client, .meta_request_type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_FAILURE, .headers_callback = s_range_requests_headers_callback, .body_callback = s_range_requests_receive_body_callback, .get_options = { .object_path = g_pre_existing_object_1MB, .object_range = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("bytes=2097151-"), }, }; struct aws_s3_meta_request_test_results results; aws_s3_meta_request_test_results_init(&results, allocator); ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &options, &results)); ASSERT_INT_EQUALS(AWS_HTTP_STATUS_CODE_416_REQUESTED_RANGE_NOT_SATISFIABLE, results.finished_response_status); ASSERT_NOT_NULL(results.error_response_operation_name); ASSERT_TRUE( aws_string_eq_c_str(results.error_response_operation_name, "GetObject") || aws_string_eq_c_str(results.error_response_operation_name, "HeadObject")); aws_s3_meta_request_test_results_clean_up(&results); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } AWS_TEST_CASE(test_s3_invalid_start_range_greator_than_end_range, s_test_s3_invalid_start_range_greator_than_end_range) static int s_test_s3_invalid_start_range_greator_than_end_range(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_tester_client_options client_options = { .part_size = 16 * 1024, }; struct aws_s3_client *client = NULL; ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); struct aws_s3_tester_meta_request_options options = { .allocator = allocator, .client = client, .meta_request_type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_FAILURE, .get_options = { .object_path = g_pre_existing_object_1MB, .object_range = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("bytes=20-10"), }, }; struct aws_s3_meta_request_test_results results; aws_s3_meta_request_test_results_init(&results, allocator); ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &options, &results)); ASSERT_INT_EQUALS(results.finished_error_code, AWS_ERROR_S3_INVALID_RANGE_HEADER); aws_s3_meta_request_test_results_clean_up(&results); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } AWS_TEST_CASE(test_s3_invalid_empty_file_with_range, s_test_s3_invalid_empty_file_with_range) static int s_test_s3_invalid_empty_file_with_range(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_tester_client_options client_options = { .part_size = 16 * 1024, }; struct aws_s3_client *client = NULL; ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); struct aws_s3_tester_meta_request_options options = { .allocator = allocator, .client = client, .meta_request_type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_FAILURE, .get_options = { .object_path = g_pre_existing_empty_object, .object_range = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("bytes=0-0"), }, }; struct aws_s3_meta_request_test_results results; aws_s3_meta_request_test_results_init(&results, allocator); ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &options, &results)); ASSERT_INT_EQUALS(AWS_HTTP_STATUS_CODE_416_REQUESTED_RANGE_NOT_SATISFIABLE, results.finished_response_status); ASSERT_NOT_NULL(results.error_response_operation_name); ASSERT_TRUE(aws_string_eq_c_str(results.error_response_operation_name, "GetObject")); aws_s3_meta_request_test_results_clean_up(&results); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } static const struct aws_byte_cursor g_x_amz_copy_source_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-copy-source"); struct aws_http_message *copy_object_request_new( struct aws_allocator *allocator, struct aws_byte_cursor x_amz_source, struct aws_byte_cursor endpoint, struct aws_byte_cursor destination_key) { AWS_PRECONDITION(allocator); struct aws_http_message *message = aws_http_message_new_request(allocator); if (message == NULL) { return NULL; } /* the URI path is / followed by the key */ char destination_path[1024]; snprintf(destination_path, sizeof(destination_path), "/%.*s", (int)destination_key.len, destination_key.ptr); struct aws_byte_cursor unencoded_destination_path = aws_byte_cursor_from_c_str(destination_path); struct aws_byte_buf copy_destination_path_encoded; aws_byte_buf_init(©_destination_path_encoded, allocator, 1024); aws_byte_buf_append_encoding_uri_path(©_destination_path_encoded, &unencoded_destination_path); if (aws_http_message_set_request_path(message, aws_byte_cursor_from_buf(©_destination_path_encoded))) { goto error_clean_up_message; } struct aws_http_header host_header = {.name = g_host_header_name, .value = endpoint}; if (aws_http_message_add_header(message, host_header)) { goto error_clean_up_message; } struct aws_byte_buf copy_source_value_encoded; aws_byte_buf_init(©_source_value_encoded, allocator, 1024); aws_byte_buf_append_encoding_uri_path(©_source_value_encoded, &x_amz_source); struct aws_http_header copy_source_header = { .name = g_x_amz_copy_source_name, .value = aws_byte_cursor_from_buf(©_source_value_encoded), }; if (aws_http_message_add_header(message, copy_source_header)) { goto error_clean_up_message; } if (aws_http_message_set_request_method(message, aws_http_method_put)) { goto error_clean_up_message; } aws_byte_buf_clean_up(©_source_value_encoded); aws_byte_buf_clean_up(©_destination_path_encoded); return message; error_clean_up_message: aws_byte_buf_clean_up(©_source_value_encoded); aws_byte_buf_clean_up(©_destination_path_encoded); if (message != NULL) { aws_http_message_release(message); message = NULL; } return NULL; } struct copy_object_test_data { struct aws_mutex mutex; struct aws_condition_variable c_var; bool execution_completed; bool headers_callback_was_invoked; int meta_request_error_code; int response_status_code; uint64_t progress_callback_content_length; uint64_t progress_callback_total_bytes_transferred; }; static void s_copy_object_meta_request_finish( struct aws_s3_meta_request *meta_request, const struct aws_s3_meta_request_result *meta_request_result, void *user_data) { (void)meta_request; struct copy_object_test_data *test_data = user_data; /* if error response body is available, dump it to test result to help investigation of failed tests */ if (meta_request_result->error_response_body != NULL && meta_request_result->error_response_body->len > 0) { AWS_LOGF_ERROR( AWS_LS_S3_GENERAL, "Response error body: %.*s", (int)meta_request_result->error_response_body->len, meta_request_result->error_response_body->buffer); } aws_mutex_lock(&test_data->mutex); test_data->meta_request_error_code = meta_request_result->error_code; test_data->response_status_code = meta_request_result->response_status; test_data->execution_completed = true; aws_mutex_unlock(&test_data->mutex); aws_condition_variable_notify_one(&test_data->c_var); } static int s_copy_object_meta_request_headers_callback( struct aws_s3_meta_request *meta_request, const struct aws_http_headers *headers, int response_status, void *user_data) { (void)meta_request; (void)headers; (void)response_status; struct copy_object_test_data *test_data = user_data; aws_mutex_lock(&test_data->mutex); test_data->headers_callback_was_invoked = true; aws_mutex_unlock(&test_data->mutex); return AWS_OP_SUCCESS; } static void s_copy_object_meta_request_progress_callback( struct aws_s3_meta_request *meta_request, const struct aws_s3_meta_request_progress *progress, void *user_data) { (void)meta_request; struct copy_object_test_data *test_data = user_data; aws_mutex_lock(&test_data->mutex); test_data->progress_callback_content_length = progress->content_length; test_data->progress_callback_total_bytes_transferred += progress->bytes_transferred; aws_mutex_unlock(&test_data->mutex); } static bool s_copy_test_completion_predicate(void *arg) { struct copy_object_test_data *test_data = arg; return test_data->execution_completed; } static int s_test_s3_copy_object_from_x_amz_copy_source( struct aws_allocator *allocator, struct aws_byte_cursor x_amz_copy_source, struct aws_byte_cursor destination_key, int expected_error_code, int expected_response_status, uint64_t expected_size) { struct aws_s3_tester tester; AWS_ZERO_STRUCT(tester); ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_client_config client_config; AWS_ZERO_STRUCT(client_config); ASSERT_SUCCESS(aws_s3_tester_bind_client( &tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_REGION | AWS_S3_TESTER_BIND_CLIENT_SIGNING)); struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); struct aws_byte_cursor destination_bucket = g_test_bucket_name; char endpoint[1024]; snprintf( endpoint, sizeof(endpoint), "%.*s.s3.%s.amazonaws.com", (int)destination_bucket.len, destination_bucket.ptr, g_test_s3_region.ptr); /* creates a CopyObject request */ struct aws_http_message *message = copy_object_request_new(allocator, x_amz_copy_source, aws_byte_cursor_from_c_str(endpoint), destination_key); struct copy_object_test_data test_data; AWS_ZERO_STRUCT(test_data); test_data.c_var = (struct aws_condition_variable)AWS_CONDITION_VARIABLE_INIT; aws_mutex_init(&test_data.mutex); struct aws_s3_meta_request_options meta_request_options = { .user_data = &test_data, .body_callback = NULL, .signing_config = client_config.signing_config, .finish_callback = s_copy_object_meta_request_finish, .headers_callback = s_copy_object_meta_request_headers_callback, .progress_callback = s_copy_object_meta_request_progress_callback, .message = message, .shutdown_callback = NULL, .type = AWS_S3_META_REQUEST_TYPE_COPY_OBJECT, }; struct aws_s3_meta_request *meta_request = aws_s3_client_make_meta_request(client, &meta_request_options); ASSERT_NOT_NULL(meta_request); /* wait completion of the meta request */ aws_mutex_lock(&test_data.mutex); aws_condition_variable_wait_pred(&test_data.c_var, &test_data.mutex, s_copy_test_completion_predicate, &test_data); aws_mutex_unlock(&test_data.mutex); /* assert error_code and response_status_code */ ASSERT_INT_EQUALS(expected_error_code, test_data.meta_request_error_code); ASSERT_INT_EQUALS(expected_response_status, test_data.response_status_code); /* assert that progress_callback matches the expected size*/ if (test_data.meta_request_error_code == AWS_ERROR_SUCCESS) { ASSERT_UINT_EQUALS(expected_size, test_data.progress_callback_total_bytes_transferred); ASSERT_UINT_EQUALS(expected_size, test_data.progress_callback_content_length); } /* assert headers callback was invoked */ ASSERT_TRUE(test_data.headers_callback_was_invoked); aws_s3_meta_request_release(meta_request); aws_mutex_clean_up(&test_data.mutex); aws_http_message_destroy(message); client = aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } static int s_test_s3_copy_object_helper( struct aws_allocator *allocator, struct aws_byte_cursor source_key, struct aws_byte_cursor destination_key, int expected_error_code, int expected_response_status, uint64_t expected_size) { struct aws_byte_cursor source_bucket = g_test_bucket_name; char copy_source_value[1024]; snprintf( copy_source_value, sizeof(copy_source_value), "%.*s/%.*s", (int)source_bucket.len, source_bucket.ptr, (int)source_key.len, source_key.ptr); struct aws_byte_cursor x_amz_copy_source = aws_byte_cursor_from_c_str(copy_source_value); return s_test_s3_copy_object_from_x_amz_copy_source( allocator, x_amz_copy_source, destination_key, expected_error_code, expected_response_status, expected_size); } AWS_TEST_CASE(test_s3_copy_small_object, s_test_s3_copy_small_object) static int s_test_s3_copy_small_object(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor source_key = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("pre-existing-1MB"); struct aws_byte_cursor destination_key = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("copies/destination_1MB"); return s_test_s3_copy_object_helper( allocator, source_key, destination_key, AWS_ERROR_SUCCESS, AWS_HTTP_STATUS_CODE_200_OK, MB_TO_BYTES(1)); } AWS_TEST_CASE(test_s3_copy_small_object_special_char, s_test_s3_copy_small_object_special_char) static int s_test_s3_copy_small_object_special_char(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor source_key = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("pre-existing-1MB-@"); struct aws_byte_cursor destination_key = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("copies/destination_1MB_@"); return s_test_s3_copy_object_helper( allocator, source_key, destination_key, AWS_ERROR_SUCCESS, AWS_HTTP_STATUS_CODE_200_OK, MB_TO_BYTES(1)); } AWS_TEST_CASE(test_s3_multipart_copy_large_object_special_char, s_test_s3_multipart_copy_large_object_special_char) static int s_test_s3_multipart_copy_large_object_special_char(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor source_key = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("pre-existing-2GB-@"); struct aws_byte_cursor destination_key = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("copies/destination_2GB-@"); return s_test_s3_copy_object_helper( allocator, source_key, destination_key, AWS_ERROR_SUCCESS, AWS_HTTP_STATUS_CODE_200_OK, GB_TO_BYTES(2)); } AWS_TEST_CASE(test_s3_multipart_copy_large_object, s_test_s3_multipart_copy_large_object) static int s_test_s3_multipart_copy_large_object(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor source_key = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("pre-existing-2GB"); struct aws_byte_cursor destination_key = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("copies/destination_2GB"); return s_test_s3_copy_object_helper( allocator, source_key, destination_key, AWS_ERROR_SUCCESS, AWS_HTTP_STATUS_CODE_200_OK, GB_TO_BYTES(2)); } AWS_TEST_CASE(test_s3_copy_object_invalid_source_key, s_test_s3_copy_object_invalid_source_key) static int s_test_s3_copy_object_invalid_source_key(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor source_key = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("__INVALID__"); struct aws_byte_cursor destination_key = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("copies/__INVALID__"); return s_test_s3_copy_object_helper( allocator, source_key, destination_key, AWS_ERROR_S3_INVALID_RESPONSE_STATUS, AWS_HTTP_STATUS_CODE_404_NOT_FOUND, 0 /* expected_size is ignored */); } /** * Test a bypass Copy Object meta request using a slash prefix in the x_amz_copy_source header. * S3 supports both bucket/key and /bucket/key * This test validates the fix for the bug described in https://sim.amazon.com/issues/AWSCRT-730 */ AWS_TEST_CASE(test_s3_copy_source_prefixed_by_slash, s_test_s3_copy_source_prefixed_by_slash) static int s_test_s3_copy_source_prefixed_by_slash(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor source_key = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("pre-existing-1MB"); struct aws_byte_cursor destination_key = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("copies/destination_1MB"); struct aws_byte_cursor source_bucket = g_test_bucket_name; char copy_source_value[1024]; snprintf( copy_source_value, sizeof(copy_source_value), "/%.*s/%.*s", (int)source_bucket.len, source_bucket.ptr, (int)source_key.len, source_key.ptr); struct aws_byte_cursor x_amz_copy_source = aws_byte_cursor_from_c_str(copy_source_value); return s_test_s3_copy_object_from_x_amz_copy_source( allocator, x_amz_copy_source, destination_key, AWS_ERROR_SUCCESS, AWS_HTTP_STATUS_CODE_200_OK, MB_TO_BYTES(1)); } /** * Test multipart Copy Object meta request using a slash prefix in the x_amz_copy_source header. * S3 supports both bucket/key and /bucket/key * This test validates the fix for the bug described in https://sim.amazon.com/issues/AWSCRT-730 */ AWS_TEST_CASE(test_s3_copy_source_prefixed_by_slash_multipart, s_test_s3_copy_source_prefixed_by_slash_multipart) static int s_test_s3_copy_source_prefixed_by_slash_multipart(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor source_key = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("pre-existing-256MB"); struct aws_byte_cursor destination_key = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("copies/destination_256MB"); struct aws_byte_cursor source_bucket = g_test_bucket_name; char copy_source_value[1024]; snprintf( copy_source_value, sizeof(copy_source_value), "/%.*s/%.*s", (int)source_bucket.len, source_bucket.ptr, (int)source_key.len, source_key.ptr); struct aws_byte_cursor x_amz_copy_source = aws_byte_cursor_from_c_str(copy_source_value); return s_test_s3_copy_object_from_x_amz_copy_source( allocator, x_amz_copy_source, destination_key, AWS_ERROR_SUCCESS, AWS_HTTP_STATUS_CODE_200_OK, MB_TO_BYTES(256)); } static int s_s3_get_object_mrap_helper(struct aws_allocator *allocator, bool multipart) { struct aws_s3_tester tester; AWS_ZERO_STRUCT(tester); ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_signing_config_aws signing_config = tester.default_signing_config; /* Use Sigv4A for signing */ signing_config.algorithm = AWS_SIGNING_ALGORITHM_V4_ASYMMETRIC; /* Use * for region to sign */ signing_config.region = aws_byte_cursor_from_c_str("*"); struct aws_s3_client_config client_config = { .part_size = multipart ? 64 * 1024 : 20 * 1024 * 1024, .region = aws_byte_cursor_from_c_str("*"), .signing_config = &signing_config, }; ASSERT_SUCCESS(aws_s3_tester_bind_client(&tester, &client_config, 0 /*flag*/)); struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); struct aws_s3_meta_request_test_results meta_request_test_results; aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator); struct aws_s3_tester_meta_request_options options = { .allocator = allocator, .client = client, .mrap_test = true, .meta_request_type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_SUCCESS, .get_options = { .object_path = g_pre_existing_object_1MB, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &options, &meta_request_test_results)); aws_s3_meta_request_test_results_clean_up(&meta_request_test_results); client = aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } /* Test single-part get object through MRAP (multi-region access point) */ AWS_TEST_CASE(test_s3_get_object_less_than_part_size_mrap, s_test_s3_get_object_less_than_part_size_mrap) static int s_test_s3_get_object_less_than_part_size_mrap(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_s3_get_object_mrap_helper(allocator, false /*multipart*/); } /* Test multi-part get object through MRAP (multi-region access point) */ AWS_TEST_CASE(test_s3_get_object_multipart_mrap, s_test_s3_get_object_multipart_mrap) static int s_test_s3_get_object_multipart_mrap(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_s3_get_object_mrap_helper(allocator, true /*multipart*/); } static int s_s3_put_object_mrap_helper(struct aws_allocator *allocator, bool multipart) { struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_signing_config_aws signing_config = tester.default_signing_config; /* Use Sigv4A for signing */ signing_config.algorithm = AWS_SIGNING_ALGORITHM_V4_ASYMMETRIC; /* Use * for region to sign */ signing_config.region = aws_byte_cursor_from_c_str("*"); struct aws_s3_client_config client_config = { .part_size = 5 * 1024 * 1024, .region = aws_byte_cursor_from_c_str("*"), .signing_config = &signing_config, }; ASSERT_SUCCESS(aws_s3_tester_bind_client(&tester, &client_config, 0 /*flag*/)); struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); ASSERT_TRUE(client != NULL); struct aws_s3_meta_request_test_results meta_request_test_results; aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator); struct aws_s3_tester_meta_request_options options = { .allocator = allocator, .client = client, .mrap_test = true, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_SUCCESS, .put_options = { .object_size_mb = multipart ? 10 : 1, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &options, &meta_request_test_results)); aws_s3_meta_request_test_results_clean_up(&meta_request_test_results); client = aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } /* Test single-part put object through MRAP (multi-region access point) */ AWS_TEST_CASE(test_s3_put_object_less_than_part_size_mrap, s_test_s3_put_object_less_than_part_size_mrap) static int s_test_s3_put_object_less_than_part_size_mrap(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_s3_put_object_mrap_helper(allocator, false /*multipart*/); } /* Test multi-part put object through MRAP (multi-region access point) */ AWS_TEST_CASE(test_s3_put_object_multipart_mrap, s_test_s3_put_object_multipart_mrap) static int s_test_s3_put_object_multipart_mrap(struct aws_allocator *allocator, void *ctx) { (void)ctx; return s_s3_put_object_mrap_helper(allocator, true /*multipart*/); } static struct aws_http_message *s_put_object_request_new( struct aws_allocator *allocator, struct aws_byte_cursor key, struct aws_byte_cursor endpoint, struct aws_input_stream *body_stream, uint64_t content_length) { AWS_PRECONDITION(allocator); struct aws_http_message *message = aws_http_message_new_request(allocator); if (message == NULL) { return NULL; } if (aws_http_message_set_request_path(message, key)) { goto error_clean_up_message; } struct aws_http_header host_header = { .name = g_host_header_name, .value = endpoint, }; if (aws_http_message_add_header(message, host_header)) { goto error_clean_up_message; } char content_length_c_str[1024]; snprintf(content_length_c_str, sizeof(content_length_c_str), "%" PRIu64, content_length); struct aws_http_header content_length_header = { .name = g_content_length_header_name, .value = aws_byte_cursor_from_c_str(content_length_c_str), }; if (aws_http_message_add_header(message, content_length_header)) { goto error_clean_up_message; } if (aws_http_message_set_request_method(message, aws_http_method_put)) { goto error_clean_up_message; } aws_http_message_set_body_stream(message, body_stream); return message; error_clean_up_message: if (message != NULL) { aws_http_message_release(message); message = NULL; } return NULL; } struct put_object_pause_resume_test_data { struct aws_mutex mutex; struct aws_condition_variable c_var; /* execution of the test meta request completed */ bool execution_completed; /* accumulator of amount of bytes uploaded */ struct aws_atomic_var total_bytes_uploaded; /* the offset where upload should be paused */ struct aws_atomic_var request_pause_offset; struct aws_atomic_var pause_requested; struct aws_atomic_var pause_result; /* the persistable state of the paused request */ struct aws_atomic_var persistable_state_ptr; int meta_request_error_code; int response_status_code; /* (Optional) content_length to send. If not set, use the length of the input stream. */ uint64_t content_length; }; static void s_put_pause_resume_meta_request_finish( struct aws_s3_meta_request *meta_request, const struct aws_s3_meta_request_result *meta_request_result, void *user_data) { (void)meta_request; struct aws_s3_tester *tester = user_data; struct put_object_pause_resume_test_data *test_data = tester->user_data; /* if error response body is available, dump it to test result to help investigation of failed tests */ if (meta_request_result->error_response_body != NULL && meta_request_result->error_response_body->len > 0) { AWS_LOGF_ERROR( AWS_LS_S3_GENERAL, "Response error body: %.*s", (int)meta_request_result->error_response_body->len, meta_request_result->error_response_body->buffer); } aws_mutex_lock(&test_data->mutex); test_data->meta_request_error_code = meta_request_result->error_code; test_data->response_status_code = meta_request_result->response_status; test_data->execution_completed = true; aws_mutex_unlock(&test_data->mutex); aws_condition_variable_notify_one(&test_data->c_var); } static bool s_put_pause_resume_test_completion_predicate(void *arg) { struct put_object_pause_resume_test_data *test_data = arg; return test_data->execution_completed; } /* Patched version of aws_s3_meta_request_vtable->finished_request() for pause/resume tests. * It can pause the meta-request immediately after a part completes. * We use a patched vtable, instead of the progress_callback, because * the progress_callback fires on another thread, which might be too late to * prevent more parts from being sent. */ static void s_meta_request_finished_request_patched_for_pause_resume_tests( struct aws_s3_meta_request *meta_request, struct aws_s3_request *request, int error_code) { AWS_ASSERT(meta_request); struct aws_s3_tester *tester = meta_request->user_data; struct put_object_pause_resume_test_data *test_data = tester->user_data; AWS_ASSERT(test_data); if ((error_code == AWS_ERROR_SUCCESS) && (meta_request->type == AWS_S3_META_REQUEST_TYPE_PUT_OBJECT) && (request->request_tag == AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_PART)) { if (!request->is_noop) { /* If the request is noop, we are not really uploading the part */ aws_atomic_fetch_add(&test_data->total_bytes_uploaded, request->request_body.len); } size_t total_bytes_uploaded = aws_atomic_load_int(&test_data->total_bytes_uploaded); uint64_t offset_to_pause = aws_atomic_load_int(&test_data->request_pause_offset); if (total_bytes_uploaded >= offset_to_pause) { /* offset of the upload at which we should pause was reached. let's pause the upload */ /* if the meta request has already been paused previously, do nothing. */ size_t expected = false; bool request_pause = aws_atomic_compare_exchange_int(&test_data->pause_requested, &expected, true); if (request_pause) { struct aws_s3_meta_request_resume_token *resume_token = NULL; int pause_result = aws_s3_meta_request_pause(meta_request, &resume_token); struct aws_byte_cursor upload_id = aws_s3_meta_request_resume_token_upload_id(resume_token); /* Make Sure we have upload ID */ AWS_FATAL_ASSERT(aws_byte_cursor_eq_c_str(&upload_id, "") == false); aws_atomic_store_int(&test_data->pause_result, pause_result); aws_atomic_store_ptr(&test_data->persistable_state_ptr, resume_token); } } } /* Continue with original vtable function... */ struct aws_s3_meta_request_vtable *original_meta_request_vtable = aws_s3_tester_get_meta_request_vtable_patch(tester, 0)->original_vtable; original_meta_request_vtable->finished_request(meta_request, request, error_code); } static struct aws_s3_meta_request *s_meta_request_factory_patch_for_pause_resume_tests( struct aws_s3_client *client, const struct aws_s3_meta_request_options *options) { AWS_ASSERT(client != NULL); struct aws_s3_tester *tester = client->shutdown_callback_user_data; AWS_ASSERT(tester != NULL); struct aws_s3_client_vtable *original_client_vtable = aws_s3_tester_get_client_vtable_patch(tester, 0)->original_vtable; struct aws_s3_meta_request *meta_request = original_client_vtable->meta_request_factory(client, options); struct aws_s3_meta_request_vtable *patched_meta_request_vtable = aws_s3_tester_patch_meta_request_vtable(tester, meta_request, NULL); patched_meta_request_vtable->finished_request = s_meta_request_finished_request_patched_for_pause_resume_tests; return meta_request; } /* total length of the object to simulate for upload */ static const size_t s_pause_resume_object_length_128MB = 128 * 1024 * 1024; /* this runs when a RESUMED upload is about to successfully complete */ static int s_pause_resume_upload_review_callback( struct aws_s3_meta_request *meta_request, const struct aws_s3_upload_review *review, void *user_data) { (void)meta_request; (void)user_data; struct aws_allocator *allocator = meta_request->allocator; /* A bit hacky, but stream the same data that the test always uploads, and ensure the checksums match */ struct aws_input_stream *reread_stream = aws_s3_test_input_stream_new(allocator, s_pause_resume_object_length_128MB); for (size_t part_index = 0; part_index < review->part_count; ++part_index) { const struct aws_s3_upload_part_review *part_review = &review->part_array[part_index]; struct aws_byte_buf reread_part_buf; ASSERT_TRUE(part_review->size <= SIZE_MAX); aws_byte_buf_init(&reread_part_buf, allocator, (size_t)part_review->size); ASSERT_SUCCESS(aws_input_stream_read(reread_stream, &reread_part_buf)); /* part sizes should match */ ASSERT_UINT_EQUALS(part_review->size, reread_part_buf.len); if (review->checksum_algorithm != AWS_SCA_NONE) { struct aws_byte_cursor reread_part_cursor = aws_byte_cursor_from_buf(&reread_part_buf); struct aws_byte_buf checksum_buf; aws_byte_buf_init(&checksum_buf, allocator, 128); ASSERT_SUCCESS( aws_checksum_compute(allocator, review->checksum_algorithm, &reread_part_cursor, &checksum_buf, 0)); struct aws_byte_cursor checksum_cursor = aws_byte_cursor_from_buf(&checksum_buf); struct aws_byte_buf encoded_checksum_buf; aws_byte_buf_init(&encoded_checksum_buf, allocator, 128); ASSERT_SUCCESS(aws_base64_encode(&checksum_cursor, &encoded_checksum_buf)); /* part checksums should match */ ASSERT_BIN_ARRAYS_EQUALS( encoded_checksum_buf.buffer, encoded_checksum_buf.len, part_review->checksum.ptr, part_review->checksum.len); aws_byte_buf_clean_up(&checksum_buf); aws_byte_buf_clean_up(&encoded_checksum_buf); } aws_byte_buf_clean_up(&reread_part_buf); } aws_input_stream_release(reread_stream); return AWS_OP_SUCCESS; } static int s_pause_resume_receive_body_callback( struct aws_s3_meta_request *meta_request, const struct aws_byte_cursor *body, uint64_t range_start, void *user_data) { (void)meta_request; (void)range_start; (void)user_data; // TODO: this is a bit hacky, as it will try to compare every partial get result we receive to the input stream. // Something better? struct aws_input_stream *input_stream = aws_s3_test_input_stream_new(meta_request->allocator, s_pause_resume_object_length_128MB); struct aws_byte_buf buf; aws_byte_buf_init(&buf, meta_request->allocator, (size_t)range_start); aws_input_stream_read(input_stream, &buf); aws_byte_buf_clean_up(&buf); aws_byte_buf_init(&buf, meta_request->allocator, body->len); aws_input_stream_read(input_stream, &buf); struct aws_byte_cursor input_cur = aws_byte_cursor_from_buf(&buf); bool body_matches_expected = aws_byte_cursor_eq(&input_cur, body); aws_input_stream_destroy(input_stream); aws_byte_buf_clean_up(&buf); ASSERT_TRUE(body_matches_expected); return AWS_OP_SUCCESS; } static int s_test_s3_put_pause_resume_helper( struct aws_s3_tester *tester, struct aws_allocator *allocator, void *ctx, struct put_object_pause_resume_test_data *test_data, struct aws_byte_cursor destination_key, struct aws_input_stream *upload_body_stream, struct aws_s3_meta_request_resume_token *resume_state, enum aws_s3_checksum_algorithm checksum_algorithm, int expected_error_code, int expected_response_status) { (void)ctx; struct aws_s3_client_config client_config; AWS_ZERO_STRUCT(client_config); if (resume_state == NULL) { /* If we're going to cancel this operation, limit the client to 1 HTTP connection. * That way, we don't end up "cancelling" but all the parts actually * succeed anyway on other connections */ client_config.max_active_connections_override = 1; } ASSERT_SUCCESS(aws_s3_tester_bind_client( tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_REGION | AWS_S3_TESTER_BIND_CLIENT_SIGNING)); struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); struct aws_s3_client_vtable *patched_client_vtable = aws_s3_tester_patch_client_vtable(tester, client, NULL); patched_client_vtable->meta_request_factory = s_meta_request_factory_patch_for_pause_resume_tests; struct aws_byte_cursor destination_bucket = g_test_bucket_name; char endpoint[1024]; snprintf( endpoint, sizeof(endpoint), "%.*s.s3.%s.amazonaws.com", (int)destination_bucket.len, destination_bucket.ptr, g_test_s3_region.ptr); /* creates a PutObject request */ int64_t content_length = test_data->content_length; if (content_length == 0) { /* If not set, use the length of the input stream */ aws_input_stream_get_length(upload_body_stream, &content_length); } struct aws_http_message *message = s_put_object_request_new( allocator, destination_key, aws_byte_cursor_from_c_str(endpoint), upload_body_stream, content_length); test_data->c_var = (struct aws_condition_variable)AWS_CONDITION_VARIABLE_INIT; aws_mutex_init(&test_data->mutex); test_data->execution_completed = false; tester->user_data = test_data; struct aws_s3_checksum_config checksum_config = { .checksum_algorithm = checksum_algorithm, .location = checksum_algorithm == AWS_SCA_NONE ? AWS_SCL_NONE : AWS_SCL_TRAILER, }; struct aws_s3_meta_request_options meta_request_options = { .user_data = tester, .body_callback = NULL, .signing_config = client_config.signing_config, .finish_callback = s_put_pause_resume_meta_request_finish, .headers_callback = NULL, .upload_review_callback = s_pause_resume_upload_review_callback, .message = message, .shutdown_callback = NULL, .resume_token = NULL, .type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .checksum_config = &checksum_config, }; if (resume_state) { meta_request_options.resume_token = resume_state; } struct aws_s3_meta_request *meta_request = aws_s3_client_make_meta_request(client, &meta_request_options); ASSERT_NOT_NULL(meta_request); /* wait completion of the meta request */ aws_mutex_lock(&test_data->mutex); aws_condition_variable_wait_pred( &test_data->c_var, &test_data->mutex, s_put_pause_resume_test_completion_predicate, test_data); aws_mutex_unlock(&test_data->mutex); /* assert error_code and response_status_code */ ASSERT_INT_EQUALS(expected_error_code, test_data->meta_request_error_code); ASSERT_INT_EQUALS(expected_response_status, test_data->response_status_code); aws_s3_meta_request_release(meta_request); aws_mutex_clean_up(&test_data->mutex); aws_http_message_destroy(message); /* release this client with its crazy patched vtables */ client = aws_s3_client_release(client); aws_s3_tester_wait_for_client_shutdown(tester); tester->bound_to_client = false; if (expected_error_code == AWS_ERROR_SUCCESS) { /* get the file and verify it matches what we uploaded */ struct aws_s3_tester_meta_request_options options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_SUCCESS, .body_callback = s_pause_resume_receive_body_callback, .get_options = { .object_path = destination_key, }, }; struct aws_s3_meta_request_test_results results; aws_s3_meta_request_test_results_init(&results, allocator); ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(tester, &options, &results)); aws_s3_meta_request_test_results_clean_up(&results); } return 0; } AWS_TEST_CASE(test_s3_put_pause_resume_happy_path, s_test_s3_put_pause_resume_happy_path) static int s_test_s3_put_pause_resume_happy_path(struct aws_allocator *allocator, void *ctx) { struct aws_s3_tester tester; AWS_ZERO_STRUCT(tester); ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_byte_cursor destination_key = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/upload/test_pause_resume.txt"); struct put_object_pause_resume_test_data test_data; AWS_ZERO_STRUCT(test_data); /* initialize the atomic members */ aws_atomic_init_int(&test_data.total_bytes_uploaded, 0); aws_atomic_init_int(&test_data.request_pause_offset, 0); aws_atomic_init_int(&test_data.pause_requested, false); aws_atomic_init_int(&test_data.pause_result, 0); aws_atomic_init_ptr(&test_data.persistable_state_ptr, NULL); /* offset of the upload where pause should be requested by test client */ aws_atomic_store_int(&test_data.request_pause_offset, 8 * 1024 * 1024); /* stream used to initiate upload */ struct aws_input_stream *initial_upload_stream = aws_s3_test_input_stream_new(allocator, s_pause_resume_object_length_128MB); /* starts the upload request that will be paused */ ASSERT_SUCCESS(s_test_s3_put_pause_resume_helper( &tester, allocator, ctx, &test_data, destination_key, initial_upload_stream, NULL, AWS_SCA_CRC32, AWS_ERROR_S3_PAUSED, 0)); aws_input_stream_destroy(initial_upload_stream); /* new stream used to resume upload. it begins at the offset specified in the persistable state */ struct aws_input_stream *resume_upload_stream = aws_s3_test_input_stream_new(allocator, s_pause_resume_object_length_128MB); struct aws_s3_meta_request_resume_token *persistable_state = aws_atomic_load_ptr(&test_data.persistable_state_ptr); size_t bytes_uploaded = aws_atomic_load_int(&test_data.total_bytes_uploaded); /* offset where pause should be requested is set to a value greater than content length, * to avoid any more pause when resuming the upload */ aws_atomic_store_int(&test_data.request_pause_offset, s_pause_resume_object_length_128MB * 2); aws_atomic_store_int(&test_data.total_bytes_uploaded, 0); ASSERT_SUCCESS(s_test_s3_put_pause_resume_helper( &tester, allocator, ctx, &test_data, destination_key, resume_upload_stream, persistable_state, AWS_SCA_CRC32, AWS_ERROR_SUCCESS, AWS_HTTP_STATUS_CODE_200_OK)); bytes_uploaded = aws_atomic_load_int(&test_data.total_bytes_uploaded); /* bytes uploaded is smaller since we are skipping uploaded parts */ ASSERT_TRUE(bytes_uploaded < s_pause_resume_object_length_128MB); aws_s3_meta_request_resume_token_release(persistable_state); aws_input_stream_destroy(resume_upload_stream); aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_s3_put_pause_resume_all_parts_done, s_test_s3_put_pause_resume_all_parts_done) static int s_test_s3_put_pause_resume_all_parts_done(struct aws_allocator *allocator, void *ctx) { struct aws_s3_tester tester; AWS_ZERO_STRUCT(tester); ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_byte_cursor destination_key = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/upload/test_pause_resume_all_parts_done.txt"); struct put_object_pause_resume_test_data test_data; AWS_ZERO_STRUCT(test_data); /* initialize the atomic members */ aws_atomic_init_int(&test_data.total_bytes_uploaded, 0); aws_atomic_init_int(&test_data.request_pause_offset, 0); aws_atomic_init_int(&test_data.pause_requested, false); aws_atomic_init_int(&test_data.pause_result, 0); aws_atomic_init_ptr(&test_data.persistable_state_ptr, NULL); /* offset of the upload where pause should be requested by test client */ aws_atomic_store_int(&test_data.request_pause_offset, 128 * 1024 * 1024); /* stream used to initiate upload */ struct aws_input_stream *initial_upload_stream = aws_s3_test_input_stream_new(allocator, s_pause_resume_object_length_128MB); /* starts the upload request that will be paused */ ASSERT_SUCCESS(s_test_s3_put_pause_resume_helper( &tester, allocator, ctx, &test_data, destination_key, initial_upload_stream, NULL, AWS_SCA_NONE, AWS_ERROR_S3_PAUSED, 0)); aws_input_stream_destroy(initial_upload_stream); /* new stream used to resume upload. it begins at the offset specified in the persistable state */ struct aws_input_stream *resume_upload_stream = aws_s3_test_input_stream_new(allocator, s_pause_resume_object_length_128MB); struct aws_s3_meta_request_resume_token *persistable_state = aws_atomic_load_ptr(&test_data.persistable_state_ptr); AWS_LOGF_INFO(AWS_LS_S3_GENERAL, "Persistable state %p", persistable_state); size_t bytes_uploaded = aws_atomic_load_int(&test_data.total_bytes_uploaded); /* offset where pause should be requested is set to a value greater than content length, * to avoid any more pause when resuming the upload */ aws_atomic_store_int(&test_data.request_pause_offset, s_pause_resume_object_length_128MB * 2); aws_atomic_store_int(&test_data.total_bytes_uploaded, 0); ASSERT_SUCCESS(s_test_s3_put_pause_resume_helper( &tester, allocator, ctx, &test_data, destination_key, resume_upload_stream, persistable_state, AWS_SCA_NONE, AWS_ERROR_SUCCESS, AWS_HTTP_STATUS_CODE_200_OK)); bytes_uploaded = aws_atomic_load_int(&test_data.total_bytes_uploaded); /* bytes uploaded is smaller since we are skipping uploaded parts */ ASSERT_INT_EQUALS(0, bytes_uploaded); aws_s3_meta_request_resume_token_release(persistable_state); aws_input_stream_destroy(resume_upload_stream); aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_s3_put_pause_resume_invalid_resume_data, s_test_s3_put_pause_resume_invalid_resume_data) static int s_test_s3_put_pause_resume_invalid_resume_data(struct aws_allocator *allocator, void *ctx) { struct aws_s3_tester tester; AWS_ZERO_STRUCT(tester); ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_byte_cursor destination_key = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/upload/test_pause_resume_resume_data.txt"); struct put_object_pause_resume_test_data test_data; AWS_ZERO_STRUCT(test_data); /* initialize the atomic members */ aws_atomic_init_int(&test_data.total_bytes_uploaded, 0); aws_atomic_init_int(&test_data.request_pause_offset, 0); aws_atomic_init_int(&test_data.pause_requested, false); aws_atomic_init_int(&test_data.pause_result, 0); aws_atomic_init_ptr(&test_data.persistable_state_ptr, NULL); /* offset of the upload where pause should be requested by test client */ aws_atomic_store_int(&test_data.request_pause_offset, 8 * 1024 * 1024); /* stream used to initiate upload */ struct aws_input_stream *initial_upload_stream = aws_s3_test_input_stream_new(allocator, s_pause_resume_object_length_128MB); /* starts the upload request that will be paused */ ASSERT_SUCCESS(s_test_s3_put_pause_resume_helper( &tester, allocator, ctx, &test_data, destination_key, initial_upload_stream, NULL, AWS_SCA_CRC32, AWS_ERROR_S3_PAUSED, 0)); aws_input_stream_destroy(initial_upload_stream); /* new stream used to resume upload. it begins at the offset specified in the persistable state */ struct aws_input_stream *resume_upload_stream = aws_s3_test_input_stream_new_with_value_type( allocator, s_pause_resume_object_length_128MB, TEST_STREAM_VALUE_2); struct aws_s3_meta_request_resume_token *persistable_state = aws_atomic_load_ptr(&test_data.persistable_state_ptr); size_t bytes_uploaded = aws_atomic_load_int(&test_data.total_bytes_uploaded); /* offset where pause should be requested is set to a value greater than content length, * to avoid any more pause when resuming the upload */ aws_atomic_store_int(&test_data.request_pause_offset, s_pause_resume_object_length_128MB * 2); aws_atomic_store_int(&test_data.total_bytes_uploaded, 0); ASSERT_SUCCESS(s_test_s3_put_pause_resume_helper( &tester, allocator, ctx, &test_data, destination_key, resume_upload_stream, persistable_state, AWS_SCA_CRC32, AWS_ERROR_S3_RESUMED_PART_CHECKSUM_MISMATCH, 0)); bytes_uploaded = aws_atomic_load_int(&test_data.total_bytes_uploaded); /* bytes uploaded is smaller since we are skipping uploaded parts */ ASSERT_TRUE(bytes_uploaded < s_pause_resume_object_length_128MB); aws_s3_meta_request_resume_token_release(persistable_state); aws_input_stream_destroy(resume_upload_stream); aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_s3_put_pause_resume_invalid_resume_stream, s_test_s3_put_pause_resume_invalid_resume_stream) static int s_test_s3_put_pause_resume_invalid_resume_stream(struct aws_allocator *allocator, void *ctx) { struct aws_s3_tester tester; AWS_ZERO_STRUCT(tester); ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_byte_cursor destination_key = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/upload/test_pause_resume_bad_resume_stream.txt"); struct put_object_pause_resume_test_data test_data; AWS_ZERO_STRUCT(test_data); /* initialize the atomic members */ aws_atomic_init_int(&test_data.total_bytes_uploaded, 0); aws_atomic_init_int(&test_data.request_pause_offset, 0); aws_atomic_init_int(&test_data.pause_requested, false); aws_atomic_init_int(&test_data.pause_result, 0); aws_atomic_init_ptr(&test_data.persistable_state_ptr, NULL); /* offset of the upload where pause should be requested by test client */ aws_atomic_store_int(&test_data.request_pause_offset, 8 * 1024 * 1024); /* stream used to initiate upload */ struct aws_input_stream *initial_upload_stream = aws_s3_test_input_stream_new(allocator, s_pause_resume_object_length_128MB); /* starts the upload request that will be paused */ ASSERT_SUCCESS(s_test_s3_put_pause_resume_helper( &tester, allocator, ctx, &test_data, destination_key, initial_upload_stream, NULL, AWS_SCA_CRC32, AWS_ERROR_S3_PAUSED, 0)); aws_input_stream_release(initial_upload_stream); /* a bad input stream to resume from */ struct aws_input_stream_tester_options stream_options = { .autogen_length = s_pause_resume_object_length_128MB, .fail_on_nth_read = 1, .fail_with_error_code = AWS_IO_STREAM_READ_FAILED, }; struct aws_input_stream *resume_upload_stream = aws_input_stream_new_tester(allocator, &stream_options); struct aws_s3_meta_request_resume_token *persistable_state = aws_atomic_load_ptr(&test_data.persistable_state_ptr); size_t bytes_uploaded = aws_atomic_load_int(&test_data.total_bytes_uploaded); /* offset where pause should be requested is set to a value greater than content length, * to avoid any more pause when resuming the upload */ aws_atomic_store_int(&test_data.request_pause_offset, s_pause_resume_object_length_128MB * 2); aws_atomic_store_int(&test_data.total_bytes_uploaded, 0); /* Each failed resume will delete the MPU */ ASSERT_SUCCESS(s_test_s3_put_pause_resume_helper( &tester, allocator, ctx, &test_data, destination_key, resume_upload_stream, persistable_state, AWS_SCA_CRC32, AWS_IO_STREAM_READ_FAILED, 0)); bytes_uploaded = aws_atomic_load_int(&test_data.total_bytes_uploaded); /* resume didn't read any bytes because the bad input stream failed to read. */ ASSERT_TRUE(bytes_uploaded == 0); aws_s3_meta_request_resume_token_release(persistable_state); aws_input_stream_release(resume_upload_stream); aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_s3_put_pause_resume_invalid_content_length, s_test_s3_put_pause_resume_invalid_content_length) static int s_test_s3_put_pause_resume_invalid_content_length(struct aws_allocator *allocator, void *ctx) { struct aws_s3_tester tester; AWS_ZERO_STRUCT(tester); ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_byte_cursor destination_key = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/upload/test_pause_resume_bad_resume_stream.txt"); struct put_object_pause_resume_test_data test_data; AWS_ZERO_STRUCT(test_data); /* initialize the atomic members */ aws_atomic_init_int(&test_data.total_bytes_uploaded, 0); aws_atomic_init_int(&test_data.request_pause_offset, 0); aws_atomic_init_int(&test_data.pause_requested, false); aws_atomic_init_int(&test_data.pause_result, 0); aws_atomic_init_ptr(&test_data.persistable_state_ptr, NULL); /* offset of the upload where pause should be requested by test client */ aws_atomic_store_int(&test_data.request_pause_offset, 8 * 1024 * 1024); test_data.content_length = s_pause_resume_object_length_128MB; /* stream used to initiate upload */ struct aws_input_stream *initial_upload_stream = aws_s3_test_input_stream_new(allocator, s_pause_resume_object_length_128MB); /* starts the upload request that will be paused */ ASSERT_SUCCESS(s_test_s3_put_pause_resume_helper( &tester, allocator, ctx, &test_data, destination_key, initial_upload_stream, NULL, AWS_SCA_CRC32, AWS_ERROR_S3_PAUSED, 0)); aws_input_stream_release(initial_upload_stream); /* a small input stream to resume with */ struct aws_input_stream *resume_upload_stream = aws_s3_test_input_stream_new(allocator, 8 * 1024 * 1024); struct aws_s3_meta_request_resume_token *persistable_state = aws_atomic_load_ptr(&test_data.persistable_state_ptr); size_t bytes_uploaded = aws_atomic_load_int(&test_data.total_bytes_uploaded); /* offset where pause should be requested is set to a value greater than content length, * to avoid any more pause when resuming the upload */ aws_atomic_store_int(&test_data.request_pause_offset, s_pause_resume_object_length_128MB * 2); aws_atomic_store_int(&test_data.total_bytes_uploaded, 0); /* Each failed resume will delete the MPU */ ASSERT_SUCCESS(s_test_s3_put_pause_resume_helper( &tester, allocator, ctx, &test_data, destination_key, resume_upload_stream, persistable_state, AWS_SCA_CRC32, AWS_ERROR_S3_INCORRECT_CONTENT_LENGTH, 0)); bytes_uploaded = aws_atomic_load_int(&test_data.total_bytes_uploaded); /* resume didn't read any bytes because the bad input stream failed to read. */ ASSERT_TRUE(bytes_uploaded == 0); aws_s3_meta_request_resume_token_release(persistable_state); aws_input_stream_release(resume_upload_stream); aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; } /* Most basic test of the upload_review_callback */ AWS_TEST_CASE(test_s3_upload_review, s_test_s3_upload_review) static int s_test_s3_upload_review(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_meta_request_test_results test_results; aws_s3_meta_request_test_results_init(&test_results, allocator); struct aws_s3_tester_meta_request_options put_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .checksum_algorithm = AWS_SCA_CRC32, .put_options = { .object_path_override = aws_byte_cursor_from_c_str("/upload/review_10MB_CRC32.txt"), .object_size_mb = 10, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(NULL, &put_options, &test_results)); /* The tester always registers an upload_review_callback. * Check that it got what we expect */ ASSERT_UINT_EQUALS(1, test_results.upload_review.invoked_count); ASSERT_UINT_EQUALS(2, test_results.upload_review.part_count); ASSERT_UINT_EQUALS(MB_TO_BYTES(8), test_results.upload_review.part_sizes_array[0]); ASSERT_UINT_EQUALS(MB_TO_BYTES(10) - MB_TO_BYTES(8), test_results.upload_review.part_sizes_array[1]); ASSERT_INT_EQUALS(AWS_SCA_CRC32, test_results.upload_review.checksum_algorithm); ASSERT_STR_EQUALS("9J8ZNA==", aws_string_c_str(test_results.upload_review.part_checksums_array[0])); ASSERT_STR_EQUALS("BNjxzQ==", aws_string_c_str(test_results.upload_review.part_checksums_array[1])); aws_s3_meta_request_test_results_clean_up(&test_results); return 0; } /* Test upload_review_callback when Content-Length is not declared */ AWS_TEST_CASE(test_s3_upload_review_no_content_length, s_test_s3_upload_review_no_content_length) static int s_test_s3_upload_review_no_content_length(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_meta_request_test_results test_results; aws_s3_meta_request_test_results_init(&test_results, allocator); struct aws_s3_tester_meta_request_options put_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .checksum_algorithm = AWS_SCA_CRC32, .put_options = { .object_path_override = aws_byte_cursor_from_c_str("/upload/review_1MB_CRC32.txt"), .object_size_mb = 1, .skip_content_length = true, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(NULL, &put_options, &test_results)); /* The tester always registers an upload_review_callback. * Check that it got what we expect */ ASSERT_UINT_EQUALS(1, test_results.upload_review.invoked_count); ASSERT_UINT_EQUALS(1, test_results.upload_review.part_count); ASSERT_UINT_EQUALS(MB_TO_BYTES(1), test_results.upload_review.part_sizes_array[0]); ASSERT_STR_EQUALS("4hP4ig==", aws_string_c_str(test_results.upload_review.part_checksums_array[0])); aws_s3_meta_request_test_results_clean_up(&test_results); return 0; } static int s_upload_review_raise_canceled_error( struct aws_s3_meta_request *meta_request, const struct aws_s3_upload_review *review, void *user_data) { (void)meta_request; (void)review; (void)user_data; return aws_raise_error(AWS_ERROR_S3_CANCELED); } /* Test that if upload_review_callback raises an error, then the upload is canceled. */ AWS_TEST_CASE(test_s3_upload_review_rejection, s_test_s3_upload_review_rejection) static int s_test_s3_upload_review_rejection(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor object_path = aws_byte_cursor_from_c_str("/upload/review_rejection.txt"); struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_client_config client_config; AWS_ZERO_STRUCT(client_config); ASSERT_SUCCESS(aws_s3_tester_bind_client( &tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_REGION | AWS_S3_TESTER_BIND_CLIENT_SIGNING)); struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); ASSERT_NOT_NULL(client); /* Send meta-request that will raise an error from the review_upload_callback */ struct aws_s3_meta_request_test_results test_results; aws_s3_meta_request_test_results_init(&test_results, allocator); struct aws_s3_tester_meta_request_options put_options = { .allocator = allocator, .client = client, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_FAILURE, .checksum_algorithm = AWS_SCA_CRC32, .upload_review_callback = s_upload_review_raise_canceled_error, .put_options = { .object_path_override = object_path, .object_size_mb = 10, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &put_options, &test_results)); /* Check that meta-request failed with the error raised by the upload_review_callback */ ASSERT_INT_EQUALS(AWS_ERROR_S3_CANCELED, test_results.finished_error_code); aws_s3_meta_request_test_results_clean_up(&test_results); /* * Now check that the upload did not complete on the server either * (server should have received AbortMultipartUpload). * Check by attempting to GET the object, which should fail with 404 NOT FOUND. */ aws_s3_meta_request_test_results_init(&test_results, allocator); struct aws_s3_tester_meta_request_options get_options = { .allocator = allocator, .client = client, .meta_request_type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_FAILURE, .get_options = { .object_path = object_path, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &get_options, &test_results)); ASSERT_INT_EQUALS(AWS_HTTP_STATUS_CODE_404_NOT_FOUND, test_results.finished_response_status); ASSERT_NOT_NULL(test_results.error_response_operation_name); ASSERT_TRUE( aws_string_eq_c_str(test_results.error_response_operation_name, "GetObject") || aws_string_eq_c_str(test_results.error_response_operation_name, "HeadObject")); aws_s3_meta_request_test_results_clean_up(&test_results); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/s3_endpoint_resolver_tests.c000066400000000000000000000125331456575232400264570ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #ifdef AWS_ENABLE_S3_ENDPOINT_RESOLVER # include # include AWS_TEST_CASE(test_s3_endpoint_resolver_resolve_endpoint, s_test_s3_endpoint_resolver_resolve_endpoint) static int s_test_s3_endpoint_resolver_resolve_endpoint(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_s3_library_init(allocator); struct aws_endpoints_rule_engine *rule_engine = aws_s3_endpoint_resolver_new(allocator); ASSERT_NOT_NULL(rule_engine); struct aws_endpoints_request_context *context = aws_endpoints_request_context_new(allocator); ASSERT_NOT_NULL(context); ASSERT_SUCCESS(aws_endpoints_request_context_add_string( allocator, context, aws_byte_cursor_from_c_str("Region"), aws_byte_cursor_from_c_str("us-west-2"))); ASSERT_SUCCESS(aws_endpoints_request_context_add_string( allocator, context, aws_byte_cursor_from_c_str("Bucket"), aws_byte_cursor_from_c_str("s3-bucket-test"))); struct aws_endpoints_resolved_endpoint *resolved_endpoint; ASSERT_SUCCESS(aws_endpoints_rule_engine_resolve(rule_engine, context, &resolved_endpoint)); ASSERT_INT_EQUALS(AWS_ENDPOINTS_RESOLVED_ENDPOINT, aws_endpoints_resolved_endpoint_get_type(resolved_endpoint)); struct aws_byte_cursor url_cur; ASSERT_SUCCESS(aws_endpoints_resolved_endpoint_get_url(resolved_endpoint, &url_cur)); ASSERT_CURSOR_VALUE_CSTRING_EQUALS(url_cur, "https://s3-bucket-test.s3.us-west-2.amazonaws.com"); aws_endpoints_resolved_endpoint_release(resolved_endpoint); aws_endpoints_request_context_release(context); aws_endpoints_rule_engine_release(rule_engine); aws_s3_library_clean_up(); return 0; } AWS_TEST_CASE(test_s3_endpoint_resolver_resolve_endpoint_fips, s_test_s3_endpoint_resolver_resolve_endpoint_fips) static int s_test_s3_endpoint_resolver_resolve_endpoint_fips(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_s3_library_init(allocator); struct aws_endpoints_rule_engine *rule_engine = aws_s3_endpoint_resolver_new(allocator); ASSERT_NOT_NULL(rule_engine); struct aws_endpoints_request_context *context = aws_endpoints_request_context_new(allocator); ASSERT_NOT_NULL(context); ASSERT_SUCCESS(aws_endpoints_request_context_add_string( allocator, context, aws_byte_cursor_from_c_str("Region"), aws_byte_cursor_from_c_str("us-east-1"))); ASSERT_SUCCESS(aws_endpoints_request_context_add_string( allocator, context, aws_byte_cursor_from_c_str("Bucket"), aws_byte_cursor_from_c_str("s3-bucket-test"))); ASSERT_SUCCESS( aws_endpoints_request_context_add_boolean(allocator, context, aws_byte_cursor_from_c_str("UseFIPS"), true)); struct aws_endpoints_resolved_endpoint *resolved_endpoint; ASSERT_SUCCESS(aws_endpoints_rule_engine_resolve(rule_engine, context, &resolved_endpoint)); ASSERT_INT_EQUALS(AWS_ENDPOINTS_RESOLVED_ENDPOINT, aws_endpoints_resolved_endpoint_get_type(resolved_endpoint)); struct aws_byte_cursor url_cur; ASSERT_SUCCESS(aws_endpoints_resolved_endpoint_get_url(resolved_endpoint, &url_cur)); ASSERT_CURSOR_VALUE_CSTRING_EQUALS(url_cur, "https://s3-bucket-test.s3-fips.us-east-1.amazonaws.com"); aws_endpoints_resolved_endpoint_release(resolved_endpoint); aws_endpoints_request_context_release(context); aws_endpoints_rule_engine_release(rule_engine); aws_s3_library_clean_up(); return 0; } AWS_TEST_CASE( test_s3_endpoint_resolver_resolve_endpoint_force_path_style, s_test_s3_endpoint_resolver_resolve_endpoint_force_path_style) static int s_test_s3_endpoint_resolver_resolve_endpoint_force_path_style(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_s3_library_init(allocator); struct aws_endpoints_rule_engine *rule_engine = aws_s3_endpoint_resolver_new(allocator); ASSERT_NOT_NULL(rule_engine); struct aws_endpoints_request_context *context = aws_endpoints_request_context_new(allocator); ASSERT_NOT_NULL(context); ASSERT_SUCCESS(aws_endpoints_request_context_add_string( allocator, context, aws_byte_cursor_from_c_str("Region"), aws_byte_cursor_from_c_str("us-east-1"))); ASSERT_SUCCESS(aws_endpoints_request_context_add_string( allocator, context, aws_byte_cursor_from_c_str("Bucket"), aws_byte_cursor_from_c_str("s3-bucket-test"))); ASSERT_SUCCESS(aws_endpoints_request_context_add_boolean( allocator, context, aws_byte_cursor_from_c_str("ForcePathStyle"), true)); struct aws_endpoints_resolved_endpoint *resolved_endpoint; ASSERT_SUCCESS(aws_endpoints_rule_engine_resolve(rule_engine, context, &resolved_endpoint)); ASSERT_INT_EQUALS(AWS_ENDPOINTS_RESOLVED_ENDPOINT, aws_endpoints_resolved_endpoint_get_type(resolved_endpoint)); struct aws_byte_cursor url_cur; ASSERT_SUCCESS(aws_endpoints_resolved_endpoint_get_url(resolved_endpoint, &url_cur)); ASSERT_CURSOR_VALUE_CSTRING_EQUALS(url_cur, "https://s3.us-east-1.amazonaws.com/s3-bucket-test"); aws_endpoints_resolved_endpoint_release(resolved_endpoint); aws_endpoints_request_context_release(context); aws_endpoints_rule_engine_release(rule_engine); aws_s3_library_clean_up(); return 0; } #endif aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/s3_endpoint_tests.c000066400000000000000000000046331456575232400245400ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "aws/s3/private/s3_client_impl.h" #include "s3_tester.h" #include #include AWS_TEST_CASE(test_s3_different_endpoints, s_test_s3_different_endpoints) static int s_test_s3_different_endpoints(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_client *client = NULL; struct aws_s3_tester_client_options client_options; AWS_ZERO_STRUCT(client_options); ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); { struct aws_s3_meta_request_test_results meta_request_test_results; aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator); struct aws_s3_tester_meta_request_options options = { .allocator = allocator, .client = client, .meta_request_type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_SUCCESS, .get_options = { .object_path = g_pre_existing_object_1MB, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &options, &meta_request_test_results)); aws_s3_meta_request_test_results_clean_up(&meta_request_test_results); } { struct aws_s3_meta_request_test_results meta_request_test_results; aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator); struct aws_s3_tester_meta_request_options options = { .allocator = allocator, .client = client, .meta_request_type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_SUCCESS, .bucket_name = &g_test_public_bucket_name, .get_options = { .object_path = g_pre_existing_object_1MB, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &options, &meta_request_test_results)); aws_s3_meta_request_test_results_clean_up(&meta_request_test_results); } aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/s3_list_objects_tests.c000066400000000000000000000156461456575232400254120ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "aws/s3/private/s3_client_impl.h" #include "aws/s3/private/s3_list_objects.h" #include "s3_tester.h" #include #include #include #include static int s_test_s3_list_bucket_init_mem_safety(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_tester_client_options client_options; AWS_ZERO_STRUCT(client_options); client_options.tls_usage = AWS_S3_TLS_ENABLED; struct aws_s3_client *client = NULL; ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); struct aws_s3_list_objects_params params = { .client = client, .endpoint = aws_byte_cursor_from_c_str("test-endpoint.com"), .bucket_name = aws_byte_cursor_from_c_str("test-bucket"), }; struct aws_s3_paginator *paginator = aws_s3_initiate_list_objects(allocator, ¶ms); ASSERT_NOT_NULL(paginator); aws_s3_paginator_release(paginator); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_s3_list_bucket_init_mem_safety, s_test_s3_list_bucket_init_mem_safety) static int s_test_s3_list_bucket_init_mem_safety_optional_copies(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_tester_client_options client_options; AWS_ZERO_STRUCT(client_options); client_options.tls_usage = AWS_S3_TLS_ENABLED; struct aws_s3_client *client = NULL; ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); struct aws_s3_list_objects_params params = { .client = client, .endpoint = aws_byte_cursor_from_c_str("test-endpoint.com"), .bucket_name = aws_byte_cursor_from_c_str("test-bucket"), .prefix = aws_byte_cursor_from_c_str("foo/bar"), .delimiter = aws_byte_cursor_from_c_str("/"), .continuation_token = aws_byte_cursor_from_c_str("base64_encrypted_thing"), }; struct aws_s3_paginator *paginator = aws_s3_initiate_list_objects(allocator, ¶ms); ASSERT_NOT_NULL(paginator); aws_s3_paginator_release(paginator); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; } AWS_TEST_CASE( test_s3_list_bucket_init_mem_safety_optional_copies, s_test_s3_list_bucket_init_mem_safety_optional_copies) struct list_bucket_test_data { struct aws_allocator *allocator; struct aws_signing_config_aws signing_config; struct aws_mutex mutex; struct aws_condition_variable c_var; bool done; int error_code; struct aws_array_list entries_found; }; static bool s_on_paginator_finished_predicate(void *arg) { struct list_bucket_test_data *test_data = arg; return test_data->done; } static int s_on_list_bucket_valid_object_fn(const struct aws_s3_object_info *info, void *user_data) { (void)info; struct list_bucket_test_data *test_data = user_data; struct aws_string *path = NULL; if (info->key.len) { path = aws_string_new_from_cursor(test_data->allocator, &info->key); } else if (info->prefix.len) { path = aws_string_new_from_cursor(test_data->allocator, &info->prefix); } aws_array_list_push_back(&test_data->entries_found, &path); return AWS_OP_SUCCESS; } static void s_on_list_bucket_page_finished_fn(struct aws_s3_paginator *paginator, int error_code, void *user_data) { struct list_bucket_test_data *test_data = user_data; test_data->error_code = error_code; if (!error_code && aws_s3_paginator_has_more_results(paginator)) { aws_s3_paginator_continue(paginator, &test_data->signing_config); } else { aws_mutex_lock(&test_data->mutex); test_data->done = true; aws_mutex_unlock(&test_data->mutex); aws_condition_variable_notify_one(&test_data->c_var); } } static int s_test_s3_list_bucket_valid(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_tester_client_options client_options; AWS_ZERO_STRUCT(client_options); client_options.tls_usage = AWS_S3_TLS_ENABLED; struct aws_s3_client *client = NULL; ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); struct aws_signing_config_aws signing_config; AWS_ZERO_STRUCT(signing_config); aws_s3_init_default_signing_config(&signing_config, g_test_s3_region, tester.credentials_provider); struct list_bucket_test_data test_data = { .allocator = allocator, .signing_config = signing_config, .mutex = AWS_MUTEX_INIT, .c_var = AWS_CONDITION_VARIABLE_INIT, .done = false, }; ASSERT_SUCCESS(aws_array_list_init_dynamic(&test_data.entries_found, allocator, 16, sizeof(struct aws_string *))); struct aws_byte_cursor endpoint = aws_byte_cursor_from_c_str("s3.us-west-2.amazonaws.com"); struct aws_s3_list_objects_params params = { .client = client, .endpoint = endpoint, .bucket_name = g_test_bucket_name, .on_object = s_on_list_bucket_valid_object_fn, .on_list_finished = s_on_list_bucket_page_finished_fn, .user_data = &test_data, .delimiter = aws_byte_cursor_from_c_str("/"), }; struct aws_s3_paginator *paginator = aws_s3_initiate_list_objects(allocator, ¶ms); ASSERT_NOT_NULL(paginator); aws_mutex_lock(&test_data.mutex); aws_s3_paginator_continue(paginator, &signing_config); aws_condition_variable_wait_pred(&test_data.c_var, &test_data.mutex, s_on_paginator_finished_predicate, &test_data); aws_mutex_unlock(&test_data.mutex); if (test_data.error_code == AWS_OP_SUCCESS) { struct aws_string *path = NULL; /* don't have a great path for testing thoroughly since these are live service calls, but at least sanity check */ size_t length = aws_array_list_length(&test_data.entries_found); for (size_t i = 0; i < length; ++i) { aws_array_list_get_at(&test_data.entries_found, &path, i); ASSERT_TRUE(path->len > 0); aws_string_destroy(path); } ASSERT_TRUE(length > 0); } else { ASSERT_TRUE( false, "Failing test because the operation failed with error %s\n", aws_error_debug_str(test_data.error_code)); } aws_array_list_clean_up(&test_data.entries_found); aws_s3_paginator_release(paginator); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_s3_list_bucket_valid, s_test_s3_list_bucket_valid) aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/s3_meta_request_test.c000066400000000000000000000204161456575232400252300ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "aws/s3/private/s3_auto_ranged_get.h" #include "aws/s3/private/s3_auto_ranged_put.h" #include "aws/s3/private/s3_client_impl.h" #include "aws/s3/private/s3_util.h" #include "aws/s3/s3_client.h" #include "s3_tester.h" #include #include #include #include #define TEST_CASE(NAME) \ AWS_TEST_CASE(NAME, s_test_##NAME); \ static int s_test_##NAME(struct aws_allocator *allocator, void *ctx) #define DEFINE_HEADER(NAME, VALUE) \ { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(NAME), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(VALUE), } TEST_CASE(meta_request_auto_ranged_get_new_error_handling) { (void)ctx; struct aws_http_message *message = aws_http_message_new_request(allocator); struct aws_s3_tester tester; AWS_ZERO_STRUCT(tester); ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_client *client = NULL; struct aws_s3_tester_client_options client_options = { .part_size = 5 * 1024 * 1024, }; ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); struct aws_s3_meta_request_options options = { .message = message, .type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT, }; struct aws_s3_meta_request *meta_request = aws_s3_meta_request_auto_ranged_get_new(allocator, client, SIZE_MAX, &options); ASSERT_NULL(meta_request); aws_http_message_release(message); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; } TEST_CASE(meta_request_auto_ranged_put_new_error_handling) { (void)ctx; struct aws_http_message *message = aws_http_message_new_request(allocator); struct aws_byte_cursor body = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("write more tests"); struct aws_input_stream *body_stream = aws_input_stream_new_from_cursor(allocator, &body); aws_http_message_set_body_stream(message, body_stream); struct aws_s3_tester tester; AWS_ZERO_STRUCT(tester); ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_client *client = NULL; struct aws_s3_tester_client_options client_options = { .part_size = 5 * 1024 * 1024, }; ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); /* First: Fail from the aws_s3_meta_request_init_base */ struct aws_s3_meta_request_options options = { .message = message, .type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, }; struct aws_s3_meta_request *meta_request = aws_s3_meta_request_auto_ranged_put_new(allocator, client, SIZE_MAX, true, MB_TO_BYTES(10), 2, &options); ASSERT_NULL(meta_request); /* Second: Fail from the s_try_update_part_info_from_resume_token */ struct aws_s3_meta_request_resume_token *token = aws_s3_meta_request_resume_token_new(allocator); token->part_size = 1; /* Less than g_s3_min_upload_part_size */ options.resume_token = token; meta_request = aws_s3_meta_request_auto_ranged_put_new(allocator, client, MB_TO_BYTES(8), true, MB_TO_BYTES(10), 2, &options); ASSERT_NULL(meta_request); aws_s3_meta_request_resume_token_release(token); /* Third: Fail from the s_try_init_resume_state_from_persisted_data */ struct aws_s3_upload_resume_token_options token_options = { .upload_id = aws_byte_cursor_from_c_str("upload_id"), .part_size = MB_TO_BYTES(8), .total_num_parts = 2, .num_parts_completed = 1, }; token = aws_s3_meta_request_resume_token_new_upload(allocator, &token_options); options.resume_token = token; ASSERT_UINT_EQUALS(AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, aws_s3_meta_request_resume_token_type(token)); ASSERT_UINT_EQUALS(token_options.part_size, aws_s3_meta_request_resume_token_part_size(token)); ASSERT_UINT_EQUALS(token_options.total_num_parts, aws_s3_meta_request_resume_token_total_num_parts(token)); ASSERT_UINT_EQUALS(token_options.num_parts_completed, aws_s3_meta_request_resume_token_num_parts_completed(token)); meta_request = aws_s3_meta_request_auto_ranged_put_new(allocator, client, MB_TO_BYTES(8), true, MB_TO_BYTES(10), 2, &options); ASSERT_NULL(meta_request); aws_input_stream_release(body_stream); aws_http_message_release(message); aws_s3_meta_request_resume_token_release(token); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; } TEST_CASE(bad_request_error_handling) { /* The original request without method and path. */ (void)ctx; struct aws_http_message *message = aws_http_message_new_request(allocator); struct aws_s3_tester tester; AWS_ZERO_STRUCT(tester); ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_client *client = NULL; struct aws_s3_tester_client_options client_options = { .part_size = 5 * 1024 * 1024, }; ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); struct aws_http_header host_header = { .name = g_host_header_name, .value = aws_byte_cursor_from_c_str("s3.us-east-1.amazonaws.com"), }; ASSERT_SUCCESS(aws_http_message_add_header(message, host_header)); struct aws_s3_meta_request_options options; AWS_ZERO_STRUCT(options); options.type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT; options.message = message; struct aws_s3_meta_request_test_results meta_request_test_results; aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator); ASSERT_SUCCESS(aws_s3_tester_send_meta_request( &tester, client, &options, &meta_request_test_results, 0 /* Not expect success */)); ASSERT_UINT_EQUALS(AWS_ERROR_HTTP_DATA_NOT_AVAILABLE, meta_request_test_results.finished_error_code); aws_s3_meta_request_test_results_clean_up(&meta_request_test_results); aws_http_message_release(message); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } TEST_CASE(make_meta_request_error_handling) { /* The original request without method and path. */ (void)ctx; struct aws_http_message *message = aws_http_message_new_request(allocator); ASSERT_SUCCESS(aws_http_message_set_request_method(message, aws_http_method_get)); ASSERT_SUCCESS(aws_http_message_set_request_path(message, aws_byte_cursor_from_c_str("/"))); struct aws_s3_tester tester; AWS_ZERO_STRUCT(tester); ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_client *client = NULL; struct aws_s3_tester_client_options client_options = { .part_size = 5 * 1024 * 1024, }; ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); /* 1. Bad options type */ struct aws_s3_meta_request_options options; AWS_ZERO_STRUCT(options); options.type = AWS_S3_META_REQUEST_TYPE_MAX; struct aws_s3_meta_request *meta_request = aws_s3_client_make_meta_request(client, &options); ASSERT_NULL(meta_request); /* 2. No message */ options.type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT; meta_request = aws_s3_client_make_meta_request(client, &options); ASSERT_NULL(meta_request); /* 3. No message header */ options.type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT; options.message = message; meta_request = aws_s3_client_make_meta_request(client, &options); ASSERT_NULL(meta_request); /* 4. Bad host name */ struct aws_http_header host_header = { .name = g_host_header_name, .value = aws_byte_cursor_from_c_str("invalid:/s3.us-east-1.amazonaws.com"), }; ASSERT_SUCCESS(aws_http_message_add_header(message, host_header)); options.type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT; options.message = message; meta_request = aws_s3_client_make_meta_request(client, &options); ASSERT_NULL(meta_request); aws_http_message_release(message); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/s3_mock_server_s3express_provider_test.c000066400000000000000000000654331456575232400310120ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "aws/s3/private/s3_util.h" #include "aws/s3/private/s3express_credentials_provider_impl.h" #include "aws/s3/s3_client.h" #include "aws/s3/s3express_credentials_provider.h" #include "s3_tester.h" #include #include #include #include #include #include #define TEST_CASE(NAME) \ AWS_TEST_CASE(NAME, s_test_##NAME); \ static int s_test_##NAME(struct aws_allocator *allocator, void *ctx) #define DEFINE_HEADER(NAME, VALUE) \ { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(NAME), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(VALUE), } static uint64_t s_bg_refresh_secs_override = 60; struct aws_s3express_provider_tester { struct aws_allocator *allocator; struct aws_mutex lock; struct aws_condition_variable signal; size_t credentials_callbacks_received; bool has_received_shutdown_callback; /* Last received credentials */ struct aws_credentials *credentials; /* Number of different credentials received */ int number_of_credentials; struct aws_uri mock_server; struct aws_s3_client *client; int error_code; }; static struct aws_s3express_provider_tester s_s3express_tester; static void s_on_shutdown_complete(void *user_data) { (void)user_data; aws_mutex_lock(&s_s3express_tester.lock); s_s3express_tester.has_received_shutdown_callback = true; aws_condition_variable_notify_one(&s_s3express_tester.signal); aws_mutex_unlock(&s_s3express_tester.lock); } static bool s_has_s3express_tester_received_shutdown_callback(void *user_data) { (void)user_data; return s_s3express_tester.has_received_shutdown_callback; } static void s_aws_wait_for_provider_shutdown_callback(void) { aws_mutex_lock(&s_s3express_tester.lock); aws_condition_variable_wait_pred( &s_s3express_tester.signal, &s_s3express_tester.lock, s_has_s3express_tester_received_shutdown_callback, NULL); aws_mutex_unlock(&s_s3express_tester.lock); } static bool s_has_s3express_tester_received_credentials_callback(void *user_data) { size_t result_num = *(size_t *)user_data; return s_s3express_tester.credentials_callbacks_received >= result_num; } static void s_aws_wait_for_credentials_result(size_t result_num) { aws_mutex_lock(&s_s3express_tester.lock); aws_condition_variable_wait_pred( &s_s3express_tester.signal, &s_s3express_tester.lock, s_has_s3express_tester_received_credentials_callback, &result_num); aws_mutex_unlock(&s_s3express_tester.lock); } static void s_get_credentials_callback(struct aws_credentials *credentials, int error_code, void *user_data) { (void)user_data; aws_mutex_lock(&s_s3express_tester.lock); ++s_s3express_tester.credentials_callbacks_received; s_s3express_tester.error_code = error_code; if (credentials != s_s3express_tester.credentials) { ++s_s3express_tester.number_of_credentials; if (s_s3express_tester.credentials) { aws_credentials_release(s_s3express_tester.credentials); } s_s3express_tester.credentials = credentials; aws_credentials_acquire(credentials); } aws_condition_variable_notify_one(&s_s3express_tester.signal); aws_mutex_unlock(&s_s3express_tester.lock); } static int s_s3express_tester_init(struct aws_allocator *allocator) { s_s3express_tester.allocator = allocator; if (aws_mutex_init(&s_s3express_tester.lock)) { return AWS_OP_ERR; } if (aws_condition_variable_init(&s_s3express_tester.signal)) { return AWS_OP_ERR; } s_s3express_tester.error_code = AWS_ERROR_SUCCESS; ASSERT_SUCCESS(aws_uri_init_parse(&s_s3express_tester.mock_server, allocator, &g_mock_server_uri)); return AWS_OP_SUCCESS; } static int s_s3express_tester_cleanup(void) { aws_condition_variable_clean_up(&s_s3express_tester.signal); aws_mutex_clean_up(&s_s3express_tester.lock); aws_uri_clean_up(&s_s3express_tester.mock_server); aws_s3_client_release(s_s3express_tester.client); aws_credentials_release(s_s3express_tester.credentials); return AWS_OP_SUCCESS; } static bool s_s3express_session_always_true(struct aws_s3express_session *session, uint64_t now_seconds) { (void)session; (void)now_seconds; return true; } static struct aws_s3express_credentials_provider *s_s3express_provider_new_mock_server(struct aws_s3_tester *tester) { struct aws_s3_tester_client_options client_options = { .part_size = MB_TO_BYTES(5), .tls_usage = AWS_S3_TLS_DISABLED, }; if (s_s3express_tester.client == NULL) { if (aws_s3_tester_client_new(tester, &client_options, &s_s3express_tester.client)) { return NULL; } } struct aws_s3express_credentials_provider_default_options options = { .client = s_s3express_tester.client, .shutdown_complete_callback = s_on_shutdown_complete, .shutdown_user_data = &s_s3express_tester, .mock_test.bg_refresh_secs_override = s_bg_refresh_secs_override, }; struct aws_s3express_credentials_provider *provider = aws_s3express_credentials_provider_new_default(tester->allocator, &options); struct aws_s3express_credentials_provider_impl *impl = provider->impl; impl->mock_test.endpoint_override = &s_s3express_tester.mock_server; impl->mock_test.s3express_session_is_valid_override = s_s3express_session_always_true; return provider; } TEST_CASE(s3express_provider_sanity_mock_server) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); ASSERT_SUCCESS(s_s3express_tester_init(allocator)); struct aws_s3express_credentials_provider *provider = s_s3express_provider_new_mock_server(&tester); ASSERT_NOT_NULL(provider); aws_s3express_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); ASSERT_SUCCESS(s_s3express_tester_cleanup()); aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; } struct s3express_creds_from_ori_provider_user_data { struct aws_s3express_credentials_provider *s3express_provider; struct aws_credentials_properties_s3express *property; aws_on_get_credentials_callback_fn *callback; void *user_data; }; static void s_get_original_credentials_callback(struct aws_credentials *credentials, int error_code, void *user_data) { struct s3express_creds_from_ori_provider_user_data *context = user_data; AWS_FATAL_ASSERT(error_code == AWS_ERROR_SUCCESS); error_code |= aws_s3express_credentials_provider_get_credentials( context->s3express_provider, credentials, context->property, context->callback, context->user_data); AWS_FATAL_ASSERT(error_code == AWS_ERROR_SUCCESS); aws_mem_release(context->s3express_provider->allocator, context); } static int s_tester_get_s3express_creds_from_ori_provider( struct aws_s3express_credentials_provider *s3express_provider, struct aws_credentials_properties_s3express *property, aws_on_get_credentials_callback_fn *callback, void *user_data, struct aws_credentials_provider *ori_provider) { struct s3express_creds_from_ori_provider_user_data *context = aws_mem_calloc(s3express_provider->allocator, 1, sizeof(struct s3express_creds_from_ori_provider_user_data)); context->s3express_provider = s3express_provider; context->property = property; context->callback = callback; context->user_data = user_data; ASSERT_SUCCESS( aws_credentials_provider_get_credentials(ori_provider, s_get_original_credentials_callback, context)); return AWS_OP_SUCCESS; } TEST_CASE(s3express_provider_get_credentials_mock_server) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); ASSERT_SUCCESS(s_s3express_tester_init(allocator)); struct aws_s3express_credentials_provider *provider = s_s3express_provider_new_mock_server(&tester); ASSERT_NOT_NULL(provider); struct aws_credentials_properties_s3express property = { .host = *aws_uri_authority(&s_s3express_tester.mock_server), }; ASSERT_SUCCESS(aws_s3express_credentials_provider_get_credentials( provider, tester.anonymous_creds, &property, s_get_credentials_callback, &s_s3express_tester)); s_aws_wait_for_credentials_result(1); ASSERT_SUCCESS(aws_s3_tester_check_s3express_creds_for_default_mock_response(s_s3express_tester.credentials)); aws_s3express_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); ASSERT_SUCCESS(s_s3express_tester_cleanup()); aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; } TEST_CASE(s3express_provider_get_credentials_multiple_mock_server) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); ASSERT_SUCCESS(s_s3express_tester_init(allocator)); struct aws_s3express_credentials_provider *provider = s_s3express_provider_new_mock_server(&tester); ASSERT_NOT_NULL(provider); size_t number_calls = 10; struct aws_credentials_properties_s3express property = { .host = *aws_uri_authority(&s_s3express_tester.mock_server), }; for (size_t i = 0; i < number_calls; i++) { ASSERT_SUCCESS(aws_s3express_credentials_provider_get_credentials( provider, tester.anonymous_creds, &property, s_get_credentials_callback, &s_s3express_tester)); } s_aws_wait_for_credentials_result(number_calls); ASSERT_SUCCESS(aws_s3_tester_check_s3express_creds_for_default_mock_response(s_s3express_tester.credentials)); /* Only one credentials received as only one create session should be invoked. */ ASSERT_UINT_EQUALS(1, s_s3express_tester.number_of_credentials); aws_s3express_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); ASSERT_SUCCESS(s_s3express_tester_cleanup()); aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; } TEST_CASE(s3express_provider_get_credentials_cancel_mock_server) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); ASSERT_SUCCESS(s_s3express_tester_init(allocator)); struct aws_s3express_credentials_provider *provider = s_s3express_provider_new_mock_server(&tester); ASSERT_NOT_NULL(provider); struct aws_credentials_properties_s3express property = { .host = *aws_uri_authority(&s_s3express_tester.mock_server), }; ASSERT_SUCCESS(aws_s3express_credentials_provider_get_credentials( provider, tester.anonymous_creds, &property, s_get_credentials_callback, &s_s3express_tester)); /* Release the provider right after we fetch the credentials, which will cancel the create session call. */ aws_s3express_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); s_aws_wait_for_credentials_result(1); /* The error code will be AWS_ERROR_S3_CANCELED. */ ASSERT_UINT_EQUALS(AWS_ERROR_S3_CANCELED, s_s3express_tester.error_code); ASSERT_SUCCESS(s_s3express_tester_cleanup()); aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; } static bool s_s3express_session_always_false(struct aws_s3express_session *session, uint64_t now_seconds) { (void)session; (void)now_seconds; return false; } TEST_CASE(s3express_provider_get_credentials_cache_mock_server) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); ASSERT_SUCCESS(s_s3express_tester_init(allocator)); struct aws_s3express_credentials_provider *provider = s_s3express_provider_new_mock_server(&tester); ASSERT_NOT_NULL(provider); struct aws_s3express_credentials_provider_impl *impl = provider->impl; struct aws_credentials_properties_s3express property = { .host = *aws_uri_authority(&s_s3express_tester.mock_server), }; /* Makes one get credentials call. */ ASSERT_SUCCESS(aws_s3express_credentials_provider_get_credentials( provider, tester.anonymous_creds, &property, s_get_credentials_callback, &s_s3express_tester)); s_aws_wait_for_credentials_result(1); /* Only one credentials received as only one create session should be invoked. */ ASSERT_UINT_EQUALS(1, s_s3express_tester.number_of_credentials); /* Let the mock always treat the session invalid, so, the cache will miss. */ s_s3express_tester.credentials_callbacks_received = 0; impl->mock_test.s3express_session_is_valid_override = s_s3express_session_always_false; ASSERT_SUCCESS(aws_s3express_credentials_provider_get_credentials( provider, tester.anonymous_creds, &property, s_get_credentials_callback, &s_s3express_tester)); s_aws_wait_for_credentials_result(1); /* We get the second credentials as we needs to create a new session for the invalid session */ ASSERT_UINT_EQUALS(2, s_s3express_tester.number_of_credentials); /* Now the mock always treat the session valid, so, we will hit the cache. */ s_s3express_tester.credentials_callbacks_received = 0; impl->mock_test.s3express_session_is_valid_override = s_s3express_session_always_true; ASSERT_SUCCESS(aws_s3express_credentials_provider_get_credentials( provider, tester.anonymous_creds, &property, s_get_credentials_callback, &s_s3express_tester)); s_aws_wait_for_credentials_result(1); /* We still has only 2 credentials, as the cache hits and returns the same credentials back */ ASSERT_UINT_EQUALS(2, s_s3express_tester.number_of_credentials); ASSERT_SUCCESS(aws_s3_tester_check_s3express_creds_for_default_mock_response(s_s3express_tester.credentials)); aws_s3express_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); ASSERT_SUCCESS(s_s3express_tester_cleanup()); aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; } TEST_CASE(s3express_provider_background_refresh_mock_server) { (void)ctx; s_bg_refresh_secs_override = 10; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); ASSERT_SUCCESS(s_s3express_tester_init(allocator)); struct aws_s3express_credentials_provider *provider = s_s3express_provider_new_mock_server(&tester); ASSERT_NOT_NULL(provider); struct aws_s3express_credentials_provider_impl *impl = provider->impl; /* Always about to expire */ impl->mock_test.s3express_session_about_to_expire_override = s_s3express_session_always_true; struct aws_credentials_properties_s3express property = { .host = *aws_uri_authority(&s_s3express_tester.mock_server), }; /* Makes one get credentials call. */ ASSERT_SUCCESS(s_tester_get_s3express_creds_from_ori_provider( provider, &property, s_get_credentials_callback, &s_s3express_tester, tester.credentials_provider)); s_aws_wait_for_credentials_result(1); /* Only one credentials received as only one create session should be invoked. */ ASSERT_UINT_EQUALS(1, s_s3express_tester.number_of_credentials); /* Before refresh, we will get the same creds as the cache returns the same creds back. */ s_s3express_tester.credentials_callbacks_received = 0; ASSERT_SUCCESS(s_tester_get_s3express_creds_from_ori_provider( provider, &property, s_get_credentials_callback, &s_s3express_tester, tester.credentials_provider)); s_aws_wait_for_credentials_result(1); ASSERT_UINT_EQUALS(1, s_s3express_tester.number_of_credentials); /* Sleep to wait for the background refresh happens */ aws_thread_current_sleep( aws_timestamp_convert(s_bg_refresh_secs_override + 2, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL)); s_s3express_tester.credentials_callbacks_received = 0; ASSERT_SUCCESS(s_tester_get_s3express_creds_from_ori_provider( provider, &property, s_get_credentials_callback, &s_s3express_tester, tester.credentials_provider)); s_aws_wait_for_credentials_result(1); /* We have 2 credentials, even though we hit the same session, as the background refresh updated the credentials */ ASSERT_UINT_EQUALS(2, s_s3express_tester.number_of_credentials); ASSERT_SUCCESS(aws_s3_tester_check_s3express_creds_for_default_mock_response(s_s3express_tester.credentials)); aws_s3express_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); ASSERT_SUCCESS(s_s3express_tester_cleanup()); aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; } /* Helper to get the index of the hash key from the cache. Returns SIZE_MAX if not found */ static size_t s_get_index_from_s3express_cache( struct aws_s3express_credentials_provider_impl *impl, const struct aws_credentials *original_credentials, struct aws_byte_cursor host_value) { { /* BEGIN CRITICAL SECTION */ aws_mutex_lock(&impl->synced_data.lock); const struct aws_linked_list *session_list = aws_linked_hash_table_get_iteration_list(&impl->synced_data.cache->table); size_t index = 0; struct aws_linked_list_node *node = NULL; for (node = aws_linked_list_begin(session_list); node != aws_linked_list_end(session_list);) { struct aws_linked_hash_table_node *table_node = AWS_CONTAINER_OF(node, struct aws_linked_hash_table_node, node); node = aws_linked_list_next(node); struct aws_s3express_session *session = table_node->value; struct aws_string *hash_key = aws_encode_s3express_hash_key_new(s_s3express_tester.allocator, original_credentials, host_value); if (aws_string_eq(session->hash_key, hash_key)) { aws_string_destroy(hash_key); aws_mutex_unlock(&impl->synced_data.lock); return index; } aws_string_destroy(hash_key); ++index; } aws_mutex_unlock(&impl->synced_data.lock); } /* END CRITICAL SECTION */ return SIZE_MAX; } TEST_CASE(s3express_provider_background_refresh_remove_inactive_creds_mock_server) { (void)ctx; s_bg_refresh_secs_override = 10; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); ASSERT_SUCCESS(s_s3express_tester_init(allocator)); struct aws_s3_client_config client_config = { .part_size = MB_TO_BYTES(5), .tls_mode = AWS_MR_TLS_DISABLED, .signing_config = &tester.anonymous_signing_config, }; ASSERT_SUCCESS(aws_s3_tester_bind_client(&tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_REGION)); s_s3express_tester.client = aws_s3_client_new(allocator, &client_config); struct aws_s3express_credentials_provider *provider = s_s3express_provider_new_mock_server(&tester); ASSERT_NOT_NULL(provider); struct aws_s3express_credentials_provider_impl *impl = provider->impl; /* Always about to expire */ impl->mock_test.s3express_session_about_to_expire_override = s_s3express_session_always_true; struct aws_credentials_properties_s3express property_1 = { .host = aws_byte_cursor_from_c_str("bucket1"), }; struct aws_credentials_properties_s3express property_2 = { .host = aws_byte_cursor_from_c_str("bucket2"), }; ASSERT_SUCCESS(aws_s3express_credentials_provider_get_credentials( provider, tester.anonymous_creds, &property_1, s_get_credentials_callback, &s_s3express_tester)); s_aws_wait_for_credentials_result(1); ASSERT_SUCCESS(aws_s3express_credentials_provider_get_credentials( provider, tester.anonymous_creds, &property_2, s_get_credentials_callback, &s_s3express_tester)); s_aws_wait_for_credentials_result(2); /* Check the cache has two session */ ASSERT_UINT_EQUALS(0, s_get_index_from_s3express_cache(impl, tester.anonymous_creds, property_1.host)); ASSERT_UINT_EQUALS(1, s_get_index_from_s3express_cache(impl, tester.anonymous_creds, property_2.host)); /* Sleep to wait for the background refresh happens */ aws_thread_current_sleep( aws_timestamp_convert(s_bg_refresh_secs_override + 2, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL)); /* Check the cache has two session */ ASSERT_UINT_EQUALS(0, s_get_index_from_s3express_cache(impl, tester.anonymous_creds, property_1.host)); ASSERT_UINT_EQUALS(1, s_get_index_from_s3express_cache(impl, tester.anonymous_creds, property_2.host)); /* Use the first property to keep it active. */ s_s3express_tester.credentials_callbacks_received = 0; ASSERT_SUCCESS(aws_s3express_credentials_provider_get_credentials( provider, tester.anonymous_creds, &property_1, s_get_credentials_callback, &s_s3express_tester)); s_aws_wait_for_credentials_result(1); /* Now the first should be the second */ ASSERT_UINT_EQUALS(1, s_get_index_from_s3express_cache(impl, tester.anonymous_creds, property_1.host)); ASSERT_UINT_EQUALS(0, s_get_index_from_s3express_cache(impl, tester.anonymous_creds, property_2.host)); /* Sleep to wait for another background refresh happens */ aws_thread_current_sleep( aws_timestamp_convert(s_bg_refresh_secs_override, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL)); /* Now we still have the second in the cache, but the first should gone */ ASSERT_UINT_EQUALS(0, s_get_index_from_s3express_cache(impl, tester.anonymous_creds, property_1.host)); ASSERT_UINT_EQUALS(SIZE_MAX, s_get_index_from_s3express_cache(impl, tester.anonymous_creds, property_2.host)); aws_s3express_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); ASSERT_SUCCESS(s_s3express_tester_cleanup()); aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; } TEST_CASE(s3express_provider_stress_mock_server) { (void)ctx; /* Make refresh happens very frequently */ s_bg_refresh_secs_override = 1; size_t num_requests = 5000; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); ASSERT_SUCCESS(s_s3express_tester_init(allocator)); struct aws_s3express_credentials_provider *provider = s_s3express_provider_new_mock_server(&tester); ASSERT_NOT_NULL(provider); struct aws_s3express_credentials_provider_impl *impl = provider->impl; /* Always about to expire */ impl->mock_test.s3express_session_about_to_expire_override = s_s3express_session_always_true; /* Stress about under load, keep hitting 10 hosts */ for (size_t i = 0; i < num_requests; i++) { char key_buffer[128] = ""; snprintf(key_buffer, sizeof(key_buffer), "test-%zu", (size_t)(i % 10)); struct aws_credentials_properties_s3express property = { .host = aws_byte_cursor_from_c_str(key_buffer), }; ASSERT_SUCCESS(aws_s3express_credentials_provider_get_credentials( provider, tester.anonymous_creds, &property, s_get_credentials_callback, &s_s3express_tester)); } s_aws_wait_for_credentials_result(num_requests); ASSERT_SUCCESS(aws_s3_tester_check_s3express_creds_for_default_mock_response(s_s3express_tester.credentials)); /* Stress about over load, keep hitting different hosts */ s_s3express_tester.credentials_callbacks_received = 0; for (size_t i = 0; i < num_requests; i++) { char key_buffer[128] = ""; snprintf(key_buffer, sizeof(key_buffer), "test-%zu", i); struct aws_credentials_properties_s3express property = { .host = aws_byte_cursor_from_c_str(key_buffer), }; ASSERT_SUCCESS(aws_s3express_credentials_provider_get_credentials( provider, tester.anonymous_creds, &property, s_get_credentials_callback, &s_s3express_tester)); } s_aws_wait_for_credentials_result(num_requests); ASSERT_SUCCESS(aws_s3_tester_check_s3express_creds_for_default_mock_response(s_s3express_tester.credentials)); aws_s3express_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); ASSERT_SUCCESS(s_s3express_tester_cleanup()); aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; } TEST_CASE(s3express_provider_long_running_session_refresh) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); ASSERT_SUCCESS(s_s3express_tester_init(allocator)); struct aws_byte_cursor region_cursor = aws_byte_cursor_from_c_str("us-east-1"); struct aws_s3_client_config client_config = { .part_size = MB_TO_BYTES(5), .enable_s3express = true, .region = region_cursor, }; ASSERT_SUCCESS(aws_s3_tester_bind_client(&tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_SIGNING)); s_s3express_tester.client = aws_s3_client_new(allocator, &client_config); ASSERT_NOT_NULL(s_s3express_tester.client); struct aws_s3express_credentials_provider_default_options options = { .client = s_s3express_tester.client, .shutdown_complete_callback = s_on_shutdown_complete, .shutdown_user_data = &s_s3express_tester, .mock_test.bg_refresh_secs_override = 600, /* Disable the background refresh. */ }; struct aws_s3express_credentials_provider *provider = aws_s3express_credentials_provider_new_default(allocator, &options); ASSERT_NOT_NULL(provider); /* 300 secs to make sure we will refresh it at least once. */ size_t num_requests = 600; struct aws_credentials_properties_s3express property = { .host = g_test_s3express_bucket_use1_az4_endpoint, }; for (size_t i = 0; i < num_requests; i++) { ASSERT_SUCCESS(s_tester_get_s3express_creds_from_ori_provider( provider, &property, s_get_credentials_callback, &s_s3express_tester, tester.credentials_provider)); s_aws_wait_for_credentials_result(i + 1); uint64_t expire_time_secs = aws_credentials_get_expiration_timepoint_seconds(s_s3express_tester.credentials); uint64_t current_stamp = UINT64_MAX; aws_sys_clock_get_ticks(¤t_stamp); uint64_t now_seconds = aws_timestamp_convert(current_stamp, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_SECS, NULL); /* We should always return a credentials being valid at least for 5 secs */ ASSERT_TRUE(expire_time_secs > now_seconds + 5); /* Sleep for 0.5 sec */ aws_thread_current_sleep(aws_timestamp_convert(500, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL)); } /** * We should have more than 2 different creds. **/ ASSERT_TRUE(s_s3express_tester.number_of_credentials >= 2); aws_s3express_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); ASSERT_SUCCESS(s_s3express_tester_cleanup()); aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/s3_mock_server_tests.c000066400000000000000000001152371456575232400252420ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "aws/s3/private/s3_util.h" #include "aws/s3/s3_client.h" #include "s3_tester.h" #include #include #include #include #define TEST_CASE(NAME) \ AWS_TEST_CASE(NAME, s_test_##NAME); \ static int s_test_##NAME(struct aws_allocator *allocator, void *ctx) #define DEFINE_HEADER(NAME, VALUE) \ { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(NAME), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(VALUE), } static int s_validate_mpu_mock_server_metrics(struct aws_array_list *metrics_list) { /* Check the size of the metrics should be the same as the number of requests, which should be create MPU, two * upload parts and one complete MPU */ ASSERT_UINT_EQUALS(4, aws_array_list_length(metrics_list)); struct aws_s3_request_metrics *metrics = NULL; /* First metrics should be the CreateMPU */ aws_array_list_get_at(metrics_list, (void **)&metrics, 0); struct aws_http_headers *response_headers = NULL; ASSERT_SUCCESS(aws_s3_request_metrics_get_response_headers(metrics, &response_headers)); const struct aws_string *request_id = NULL; ASSERT_SUCCESS(aws_s3_request_metrics_get_request_id(metrics, &request_id)); ASSERT_TRUE(aws_string_eq_c_str(request_id, "12345")); const struct aws_string *ip_address = NULL; ASSERT_SUCCESS(aws_s3_request_metrics_get_ip_address(metrics, &ip_address)); /* Should be default local ip for ipv6/ipv4 */ ASSERT_TRUE(aws_string_eq_c_str(ip_address, "::1") || aws_string_eq_c_str(ip_address, "127.0.0.1")); int response_status = 0; ASSERT_SUCCESS(aws_s3_request_metrics_get_response_status_code(metrics, &response_status)); ASSERT_UINT_EQUALS(200, response_status); uint32_t stream_id = 0; ASSERT_SUCCESS(aws_s3_request_metrics_get_request_stream_id(metrics, &stream_id)); ASSERT_UINT_EQUALS(1, stream_id); const struct aws_string *request_path_query = NULL; aws_s3_request_metrics_get_request_path_query(metrics, &request_path_query); ASSERT_TRUE(request_path_query->len > 0); const struct aws_string *host_address = NULL; aws_s3_request_metrics_get_host_address(metrics, &host_address); ASSERT_TRUE(host_address->len > 0); aws_thread_id_t thread_id = 0; ASSERT_SUCCESS(aws_s3_request_metrics_get_thread_id(metrics, &thread_id)); size_t connection_id = 0; ASSERT_SUCCESS(aws_s3_request_metrics_get_connection_id(metrics, &connection_id)); ASSERT_UINT_EQUALS(AWS_ERROR_SUCCESS, aws_s3_request_metrics_get_error_code(metrics)); /* Get all those time stamp */ uint64_t time_stamp = 0; aws_s3_request_metrics_get_start_timestamp_ns(metrics, &time_stamp); ASSERT_FALSE(time_stamp == 0); time_stamp = 0; aws_s3_request_metrics_get_end_timestamp_ns(metrics, &time_stamp); ASSERT_FALSE(time_stamp == 0); time_stamp = 0; aws_s3_request_metrics_get_total_duration_ns(metrics, &time_stamp); ASSERT_FALSE(time_stamp == 0); time_stamp = 0; ASSERT_SUCCESS(aws_s3_request_metrics_get_send_start_timestamp_ns(metrics, &time_stamp)); ASSERT_FALSE(time_stamp == 0); time_stamp = 0; ASSERT_SUCCESS(aws_s3_request_metrics_get_send_end_timestamp_ns(metrics, &time_stamp)); ASSERT_FALSE(time_stamp == 0); time_stamp = 0; ASSERT_SUCCESS(aws_s3_request_metrics_get_sending_duration_ns(metrics, &time_stamp)); ASSERT_FALSE(time_stamp == 0); time_stamp = 0; ASSERT_SUCCESS(aws_s3_request_metrics_get_receive_start_timestamp_ns(metrics, &time_stamp)); ASSERT_FALSE(time_stamp == 0); time_stamp = 0; ASSERT_SUCCESS(aws_s3_request_metrics_get_receive_end_timestamp_ns(metrics, &time_stamp)); ASSERT_FALSE(time_stamp == 0); time_stamp = 0; ASSERT_SUCCESS(aws_s3_request_metrics_get_receiving_duration_ns(metrics, &time_stamp)); ASSERT_FALSE(time_stamp == 0); time_stamp = 0; enum aws_s3_request_type request_type = 0; aws_s3_request_metrics_get_request_type(metrics, &request_type); ASSERT_UINT_EQUALS(AWS_S3_REQUEST_TYPE_CREATE_MULTIPART_UPLOAD, request_type); const struct aws_string *operation_name = NULL; ASSERT_SUCCESS(aws_s3_request_metrics_get_operation_name(metrics, &operation_name)); ASSERT_STR_EQUALS("CreateMultipartUpload", aws_string_c_str(operation_name)); /* Second metrics should be the Upload Part */ aws_array_list_get_at(metrics_list, (void **)&metrics, 1); struct aws_byte_cursor header_value; AWS_ZERO_STRUCT(header_value); response_headers = NULL; ASSERT_SUCCESS(aws_s3_request_metrics_get_response_headers(metrics, &response_headers)); ASSERT_SUCCESS(aws_http_headers_get(response_headers, aws_byte_cursor_from_c_str("ETag"), &header_value)); ASSERT_TRUE(aws_byte_cursor_eq_c_str(&header_value, "b54357faf0632cce46e942fa68356b38")); ASSERT_SUCCESS(aws_http_headers_get(response_headers, aws_byte_cursor_from_c_str("Connection"), &header_value)); ASSERT_TRUE(aws_byte_cursor_eq_c_str(&header_value, "keep-alive")); request_type = 0; aws_s3_request_metrics_get_request_type(metrics, &request_type); ASSERT_UINT_EQUALS(AWS_S3_REQUEST_TYPE_UPLOAD_PART, request_type); ASSERT_SUCCESS(aws_s3_request_metrics_get_operation_name(metrics, &operation_name)); ASSERT_STR_EQUALS("UploadPart", aws_string_c_str(operation_name)); /* Third metrics still be Upload Part */ aws_array_list_get_at(metrics_list, (void **)&metrics, 2); request_type = 0; aws_s3_request_metrics_get_request_type(metrics, &request_type); ASSERT_UINT_EQUALS(AWS_S3_REQUEST_TYPE_UPLOAD_PART, request_type); ASSERT_SUCCESS(aws_s3_request_metrics_get_operation_name(metrics, &operation_name)); ASSERT_STR_EQUALS("UploadPart", aws_string_c_str(operation_name)); /* Fourth should be complete MPU */ aws_array_list_get_at(metrics_list, (void **)&metrics, 3); request_type = 0; aws_s3_request_metrics_get_request_type(metrics, &request_type); ASSERT_UINT_EQUALS(AWS_S3_REQUEST_TYPE_COMPLETE_MULTIPART_UPLOAD, request_type); ASSERT_SUCCESS(aws_s3_request_metrics_get_operation_name(metrics, &operation_name)); ASSERT_STR_EQUALS("CompleteMultipartUpload", aws_string_c_str(operation_name)); /* All the rest should be similar */ return AWS_OP_SUCCESS; } TEST_CASE(multipart_upload_mock_server) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_tester_client_options client_options = { .part_size = MB_TO_BYTES(5), .tls_usage = AWS_S3_TLS_DISABLED, }; struct aws_s3_client *client = NULL; ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); struct aws_byte_cursor object_path = aws_byte_cursor_from_c_str("/default"); struct aws_s3_tester_meta_request_options put_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .client = client, .checksum_algorithm = AWS_SCA_CRC32, .validate_get_response_checksum = false, .put_options = { .object_size_mb = 10, .object_path_override = object_path, }, .mock_server = true, }; struct aws_s3_meta_request_test_results out_results; aws_s3_meta_request_test_results_init(&out_results, allocator); ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &put_options, &out_results)); ASSERT_SUCCESS(s_validate_mpu_mock_server_metrics(&out_results.synced_data.metrics)); aws_s3_meta_request_test_results_clean_up(&out_results); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; } TEST_CASE(multipart_upload_checksum_with_retry_mock_server) { (void)ctx; /** * We had a memory leak when the retry was triggered and the checksum was calculated. * The retry will initialize the checksum buffer again, but the previous one was not freed. */ struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_tester_client_options client_options = { .part_size = MB_TO_BYTES(5), .tls_usage = AWS_S3_TLS_DISABLED, }; struct aws_s3_client *client = NULL; ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); struct aws_byte_cursor object_path = aws_byte_cursor_from_c_str("/throttle"); struct aws_s3_tester_meta_request_options put_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .client = client, .checksum_algorithm = AWS_SCA_CRC32, .validate_get_response_checksum = false, .put_options = { .object_size_mb = 10, .object_path_override = object_path, }, .mock_server = true, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &put_options, NULL)); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; } TEST_CASE(multipart_download_checksum_with_retry_mock_server) { (void)ctx; /** * We had a memory leak after the header of the request received successfully, the request failed. * We have allocated memory that never frees. */ struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_tester_client_options client_options = { .part_size = MB_TO_BYTES(5), .tls_usage = AWS_S3_TLS_DISABLED, }; struct aws_s3_client *client = NULL; ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); /* Mock server will response without fake checksum for the body */ struct aws_byte_cursor object_path = aws_byte_cursor_from_c_str("/get_object_checksum_retry"); struct aws_s3_tester_meta_request_options get_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT, .client = client, .expected_validate_checksum_alg = AWS_SCA_CRC32, .validate_get_response_checksum = true, .get_options = { .object_path = object_path, }, .default_type_options = { .mode = AWS_S3_TESTER_DEFAULT_TYPE_MODE_GET, }, .mock_server = true, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_FAILURE, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &get_options, NULL)); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; } TEST_CASE(async_internal_error_from_complete_multipart_mock_server) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_tester_client_options client_options = { .part_size = MB_TO_BYTES(5), .tls_usage = AWS_S3_TLS_DISABLED, }; struct aws_s3_client *client = NULL; ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); /* Checkout the ./mock_s3_server/CompleteMultipartUpload/async_internal_error.json for the response details */ struct aws_byte_cursor object_path = aws_byte_cursor_from_c_str("/async_internal_error"); struct aws_s3_tester_meta_request_options put_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .client = client, .checksum_algorithm = AWS_SCA_CRC32, .validate_get_response_checksum = false, .put_options = { .object_size_mb = 10, .object_path_override = object_path, }, .mock_server = true, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_FAILURE, }; struct aws_s3_meta_request_test_results out_results; aws_s3_meta_request_test_results_init(&out_results, allocator); ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &put_options, &out_results)); /* Internal error will be retried and failed with internal error. */ ASSERT_UINT_EQUALS(AWS_ERROR_S3_INTERNAL_ERROR, out_results.finished_error_code); aws_s3_meta_request_test_results_clean_up(&out_results); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; } TEST_CASE(async_access_denied_from_complete_multipart_mock_server) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_tester_client_options client_options = { .part_size = MB_TO_BYTES(5), .tls_usage = AWS_S3_TLS_DISABLED, }; struct aws_s3_client *client = NULL; ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); /* Checkout the ./mock_s3_server/CompleteMultipartUpload/async_access_denied_error.json for the response details */ struct aws_byte_cursor object_path = aws_byte_cursor_from_c_str("/async_access_denied_error"); struct aws_s3_tester_meta_request_options put_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .client = client, .checksum_algorithm = AWS_SCA_CRC32, .validate_get_response_checksum = false, .put_options = { .object_size_mb = 10, .object_path_override = object_path, }, .mock_server = true, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_FAILURE, }; struct aws_s3_meta_request_test_results out_results; aws_s3_meta_request_test_results_init(&out_results, allocator); ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &put_options, &out_results)); ASSERT_UINT_EQUALS(AWS_ERROR_S3_NON_RECOVERABLE_ASYNC_ERROR, out_results.finished_error_code); ASSERT_UINT_EQUALS(AWS_HTTP_STATUS_CODE_200_OK, out_results.finished_response_status); ASSERT_TRUE(out_results.error_response_body.len != 0); ASSERT_STR_EQUALS("CompleteMultipartUpload", aws_string_c_str(out_results.error_response_operation_name)); aws_s3_meta_request_test_results_clean_up(&out_results); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; } TEST_CASE(get_object_modified_mock_server) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_tester_client_options client_options = { .part_size = 64 * 1024, .tls_usage = AWS_S3_TLS_DISABLED, }; struct aws_s3_client *client = NULL; ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); /* Check the mock server README/GetObject Response for the response that will be received. */ struct aws_byte_cursor object_path = aws_byte_cursor_from_c_str("/get_object_modified"); struct aws_s3_tester_meta_request_options get_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT, .client = client, .get_options = { .object_path = object_path, }, .mock_server = true, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_FAILURE, }; struct aws_s3_meta_request_test_results out_results; aws_s3_meta_request_test_results_init(&out_results, allocator); ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &get_options, &out_results)); ASSERT_UINT_EQUALS(AWS_ERROR_S3_OBJECT_MODIFIED, out_results.finished_error_code); ASSERT_UINT_EQUALS(AWS_HTTP_STATUS_CODE_412_PRECONDITION_FAILED, out_results.finished_response_status); ASSERT_STR_EQUALS("GetObject", aws_string_c_str(out_results.error_response_operation_name)); aws_s3_meta_request_test_results_clean_up(&out_results); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; } TEST_CASE(get_object_invalid_responses_mock_server) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_tester_client_options client_options = { .part_size = 64 * 1024, .tls_usage = AWS_S3_TLS_DISABLED, }; struct aws_s3_client *client = NULL; ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); /* 1 - Mock server will response without Content-Range */ struct aws_byte_cursor object_path = aws_byte_cursor_from_c_str("/get_object_invalid_response_missing_content_range"); struct aws_s3_tester_meta_request_options get_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT, .client = client, .get_options = { .object_path = object_path, }, .mock_server = true, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_FAILURE, }; struct aws_s3_meta_request_test_results out_results; aws_s3_meta_request_test_results_init(&out_results, allocator); ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &get_options, &out_results)); ASSERT_UINT_EQUALS(AWS_ERROR_S3_MISSING_CONTENT_RANGE_HEADER, out_results.finished_error_code); /* 2 - Mock server will response without Etags */ object_path = aws_byte_cursor_from_c_str("/get_object_invalid_response_missing_etags"); get_options.get_options.object_path = object_path; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &get_options, &out_results)); ASSERT_UINT_EQUALS(AWS_ERROR_S3_MISSING_ETAG, out_results.finished_error_code); /* 3 - Mock server will response without Content-Range response for HEAD request */ object_path = aws_byte_cursor_from_c_str("/get_object_invalid_response_missing_content_range"); /* Put together a simple S3 Get Object request. */ struct aws_uri mock_server; ASSERT_SUCCESS(aws_uri_init_parse(&mock_server, allocator, &g_mock_server_uri)); struct aws_http_message *message = aws_s3_test_get_object_request_new(allocator, *aws_uri_authority(&mock_server), object_path); struct aws_http_header range_header = { .name = g_range_header_name, .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("bytes=-1"), }; ASSERT_SUCCESS(aws_http_message_add_header(message, range_header)); get_options.get_options.object_path = object_path; get_options.message = message; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &get_options, &out_results)); ASSERT_UINT_EQUALS(AWS_ERROR_S3_MISSING_CONTENT_RANGE_HEADER, out_results.finished_error_code); aws_uri_clean_up(&mock_server); aws_http_message_destroy(message); /* Clean up */ aws_s3_meta_request_test_results_clean_up(&out_results); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; } TEST_CASE(get_object_mismatch_checksum_responses_mock_server) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_tester_client_options client_options = { .part_size = 64 * 1024, .tls_usage = AWS_S3_TLS_DISABLED, }; struct aws_s3_client *client = NULL; ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); /* Mock server will response without fake checksum for the body */ struct aws_byte_cursor object_path = aws_byte_cursor_from_c_str("/get_object_unmatch_checksum_crc32"); struct aws_s3_tester_meta_request_options get_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT, .client = client, .expected_validate_checksum_alg = AWS_SCA_CRC32, .validate_get_response_checksum = true, .get_options = { .object_path = object_path, }, .default_type_options = { .mode = AWS_S3_TESTER_DEFAULT_TYPE_MODE_GET, }, .mock_server = true, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_FAILURE, }; struct aws_s3_meta_request_test_results out_results; aws_s3_meta_request_test_results_init(&out_results, allocator); ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &get_options, &out_results)); ASSERT_UINT_EQUALS(AWS_ERROR_S3_RESPONSE_CHECKSUM_MISMATCH, out_results.finished_error_code); ASSERT_UINT_EQUALS(AWS_SCA_CRC32, out_results.algorithm); aws_s3_meta_request_test_results_clean_up(&out_results); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; } /* Test that the HTTP throughput monitoring's default settings can detect dead (or absurdly slow) connections. * We trigger this by having the mock server delay 60 seconds before sending the response. */ TEST_CASE(get_object_throughput_failure_mock_server) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_tester_client_options client_options = { .part_size = 64 * 1024, .tls_usage = AWS_S3_TLS_DISABLED, }; struct aws_s3_client *client = NULL; ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); struct aws_byte_cursor object_path = aws_byte_cursor_from_c_str("/get_object_delay_60s"); struct aws_s3_tester_meta_request_options get_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT, .client = client, .get_options = { .object_path = object_path, }, .default_type_options = { .mode = AWS_S3_TESTER_DEFAULT_TYPE_MODE_GET, }, .mock_server = true, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_FAILURE, }; struct aws_s3_meta_request_test_results out_results; aws_s3_meta_request_test_results_init(&out_results, allocator); ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &get_options, &out_results)); ASSERT_UINT_EQUALS(AWS_ERROR_HTTP_CHANNEL_THROUGHPUT_FAILURE, out_results.finished_error_code); aws_s3_meta_request_test_results_clean_up(&out_results); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; } static int s_test_upload_part_invalid_response_mock_server_ex( struct aws_allocator *allocator, bool async_input_stream) { struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_tester_client_options client_options = { .part_size = MB_TO_BYTES(5), .tls_usage = AWS_S3_TLS_DISABLED, }; struct aws_s3_client *client = NULL; ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); struct aws_byte_cursor object_path = aws_byte_cursor_from_c_str("/missing_etag"); struct aws_s3_tester_meta_request_options put_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .client = client, .checksum_algorithm = AWS_SCA_CRC32, .validate_get_response_checksum = false, .put_options = { .object_size_mb = 1024, /* big, so it's likely we're still reading when failure happens */ .object_path_override = object_path, .async_input_stream = async_input_stream, }, .mock_server = true, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_FAILURE, }; struct aws_s3_meta_request_test_results out_results; aws_s3_meta_request_test_results_init(&out_results, allocator); ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &put_options, &out_results)); ASSERT_UINT_EQUALS(AWS_ERROR_S3_MISSING_ETAG, out_results.finished_error_code); aws_s3_meta_request_test_results_clean_up(&out_results); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; } /* Test an UploadPart failing due to invalid response */ TEST_CASE(upload_part_invalid_response_mock_server) { (void)ctx; return s_test_upload_part_invalid_response_mock_server_ex(allocator, false /*async_input_stream*/); } /* Test an UploadPart failing due to invalid response, while uploading from an async-input-stream */ TEST_CASE(upload_part_async_invalid_response_mock_server) { (void)ctx; return s_test_upload_part_invalid_response_mock_server_ex(allocator, true /*async_input_stream*/); } /* Fake a MPU with 4 parts and the 2nd and 3rd have already completed and resume works fine */ TEST_CASE(resume_first_part_not_completed_mock_server) { (void)ctx; struct aws_s3_tester tester; size_t num_parts = 4; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_tester_client_options client_options = { .part_size = MB_TO_BYTES(8), .tls_usage = AWS_S3_TLS_DISABLED, }; struct aws_s3_client *client = NULL; ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); /* ListParts from mock server will return Etags for the 2nd and 3rd parts */ struct aws_byte_cursor object_path = aws_byte_cursor_from_c_str("/resume_first_part_not_completed"); struct aws_s3_upload_resume_token_options token_options = { .upload_id = aws_byte_cursor_from_c_str("upload_id"), .part_size = client_options.part_size, .total_num_parts = num_parts, }; struct aws_s3_meta_request_resume_token *token = aws_s3_meta_request_resume_token_new_upload(allocator, &token_options); struct aws_s3_tester_meta_request_options put_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .client = client, .checksum_algorithm = AWS_SCA_CRC32, .validate_get_response_checksum = false, .put_options = { .object_size_mb = (uint32_t)num_parts * 8, /* Make sure we have exactly 4 parts */ .object_path_override = object_path, .resume_token = token, }, .mock_server = true, }; struct aws_s3_meta_request_test_results out_results; aws_s3_meta_request_test_results_init(&out_results, allocator); ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &put_options, &out_results)); /* Make Sure we only uploaded 2 parts. */ /* TODO: monitor telemetry ensure this happened */ aws_s3_meta_request_test_results_clean_up(&out_results); aws_s3_meta_request_resume_token_release(token); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; } /* Fake a MPU with 4 parts and the 2nd and 3rd have already completed and resume works fine with two response of * ListParts */ TEST_CASE(resume_multi_page_list_parts_mock_server) { (void)ctx; struct aws_s3_tester tester; size_t num_parts = 4; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_tester_client_options client_options = { .part_size = MB_TO_BYTES(8), .tls_usage = AWS_S3_TLS_DISABLED, }; struct aws_s3_client *client = NULL; ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); /* ListParts from mock server will return NextPartNumberMarker */ struct aws_byte_cursor object_path = aws_byte_cursor_from_c_str("/multiple_list_parts"); struct aws_s3_upload_resume_token_options token_options = { .upload_id = aws_byte_cursor_from_c_str("upload_id"), .part_size = client_options.part_size, .total_num_parts = num_parts, }; struct aws_s3_meta_request_resume_token *token = aws_s3_meta_request_resume_token_new_upload(allocator, &token_options); struct aws_s3_tester_meta_request_options put_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .client = client, .checksum_algorithm = AWS_SCA_CRC32, .validate_get_response_checksum = false, .put_options = { .object_size_mb = (uint32_t)num_parts * 8, /* Make sure we have exactly 4 parts */ .object_path_override = object_path, .resume_token = token, }, .mock_server = true, }; struct aws_s3_meta_request_test_results out_results; aws_s3_meta_request_test_results_init(&out_results, allocator); ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &put_options, &out_results)); /* Make Sure we only uploaded 2 parts. */ /* TODO: monitor telemetry ensure this happened */ aws_s3_meta_request_test_results_clean_up(&out_results); aws_s3_meta_request_resume_token_release(token); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; } TEST_CASE(resume_list_parts_failed_mock_server) { (void)ctx; struct aws_s3_tester tester; size_t num_parts = 4; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_tester_client_options client_options = { .part_size = MB_TO_BYTES(8), .tls_usage = AWS_S3_TLS_DISABLED, }; struct aws_s3_client *client = NULL; ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); struct aws_byte_cursor object_path = aws_byte_cursor_from_c_str("/non-exist"); struct aws_s3_upload_resume_token_options token_options = { .upload_id = aws_byte_cursor_from_c_str("upload_id"), .part_size = client_options.part_size, .total_num_parts = num_parts, }; struct aws_s3_meta_request_resume_token *token = aws_s3_meta_request_resume_token_new_upload(allocator, &token_options); struct aws_s3_tester_meta_request_options put_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .client = client, .checksum_algorithm = AWS_SCA_CRC32, .validate_get_response_checksum = false, .put_options = { .object_size_mb = (uint32_t)num_parts * 8, /* Make sure we have exactly 4 parts */ .object_path_override = object_path, .resume_token = token, }, .mock_server = true, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_FAILURE, }; struct aws_s3_meta_request_test_results out_results; aws_s3_meta_request_test_results_init(&out_results, allocator); ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &put_options, &out_results)); ASSERT_UINT_EQUALS(AWS_ERROR_S3_INVALID_RESPONSE_STATUS, out_results.finished_error_code); ASSERT_UINT_EQUALS(AWS_HTTP_STATUS_CODE_404_NOT_FOUND, out_results.finished_response_status); ASSERT_STR_EQUALS("ListParts", aws_string_c_str(out_results.error_response_operation_name)); aws_s3_meta_request_test_results_clean_up(&out_results); aws_s3_meta_request_resume_token_release(token); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; } TEST_CASE(resume_after_finished_mock_server) { (void)ctx; struct aws_s3_tester tester; size_t num_parts = 4; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_tester_client_options client_options = { .part_size = MB_TO_BYTES(8), .tls_usage = AWS_S3_TLS_DISABLED, }; struct aws_s3_client *client = NULL; ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); struct aws_byte_cursor object_path = aws_byte_cursor_from_c_str("/non-exist"); struct aws_s3_upload_resume_token_options token_options = { .upload_id = aws_byte_cursor_from_c_str("upload_id"), .part_size = client_options.part_size, .total_num_parts = num_parts, .num_parts_completed = num_parts, }; struct aws_s3_meta_request_resume_token *token = aws_s3_meta_request_resume_token_new_upload(allocator, &token_options); struct aws_s3_tester_meta_request_options put_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .client = client, .checksum_algorithm = AWS_SCA_CRC32, .validate_get_response_checksum = false, .put_options = { .object_size_mb = (uint32_t)num_parts * 8, /* Make sure we have exactly 4 parts */ .object_path_override = object_path, .resume_token = token, }, .mock_server = true, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_NO_VALIDATE, }; struct aws_s3_meta_request_test_results out_results; aws_s3_meta_request_test_results_init(&out_results, allocator); ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &put_options, &out_results)); /* The error code should be success, but there are no headers and stuff as no request was made. */ ASSERT_UINT_EQUALS(AWS_ERROR_SUCCESS, out_results.finished_error_code); /* TODO: monitor telemetry to ensure no actual data was sent */ aws_s3_meta_request_test_results_clean_up(&out_results); aws_s3_meta_request_resume_token_release(token); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; } TEST_CASE(multipart_upload_proxy_mock_server) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_tester_client_options client_options = { .part_size = MB_TO_BYTES(5), .tls_usage = AWS_S3_TLS_DISABLED, .use_proxy = true, }; struct aws_s3_client *client = NULL; ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); struct aws_byte_cursor object_path = aws_byte_cursor_from_c_str("/default"); struct aws_s3_tester_meta_request_options put_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .client = client, .checksum_algorithm = AWS_SCA_CRC32, .validate_get_response_checksum = false, .put_options = { .object_size_mb = 10, .object_path_override = object_path, }, .mock_server = true, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_NO_VALIDATE, }; /* The request can fail if proxy is unavailable. */ ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &put_options, NULL)); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; } TEST_CASE(endpoint_override_mock_server) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_tester_client_options client_options = { .part_size = MB_TO_BYTES(5), .tls_usage = AWS_S3_TLS_DISABLED, }; struct aws_s3_client *client = NULL; ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); /* 1 - Mock server will response without Content-Range */ struct aws_byte_cursor object_path = aws_byte_cursor_from_c_str("/default"); struct aws_s3_tester_meta_request_options put_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .client = client, .put_options = { .object_size_mb = 5, /* Make sure we have exactly 4 parts */ .object_path_override = object_path, }, .mock_server = true, }; /* Put together a simple S3 Put Object request. */ struct aws_input_stream *input_stream = aws_s3_test_input_stream_new(allocator, put_options.put_options.object_size_mb); struct aws_http_message *message = aws_s3_test_put_object_request_new(allocator, NULL, object_path, g_test_body_content_type, input_stream, 0); ASSERT_NOT_NULL(message); /* 1. Create request without host and use endpoint override for the host info */ put_options.message = message; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &put_options, NULL)); /* 2. Create request with host info missmatch endpoint override */ struct aws_http_header host_header = { .name = g_host_header_name, .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("bad_host"), }; ASSERT_SUCCESS(aws_http_message_add_header(message, host_header)); put_options.message = message; put_options.validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_FAILURE; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &put_options, NULL)); /* Clean up */ aws_http_message_destroy(message); aws_input_stream_release(input_stream); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; } /* Test that `RequestTimeTooSkewed` will be retried */ TEST_CASE(request_time_too_skewed_mock_server) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_tester_client_options client_options = { .part_size = MB_TO_BYTES(5), .tls_usage = AWS_S3_TLS_DISABLED, }; struct aws_s3_client *client = NULL; ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); struct aws_byte_cursor object_path = aws_byte_cursor_from_c_str("/request_time_too_skewed"); struct aws_s3_meta_request_test_results out_results; aws_s3_meta_request_test_results_init(&out_results, allocator); struct aws_s3_tester_meta_request_options put_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .client = client, .checksum_algorithm = AWS_SCA_CRC32, .validate_get_response_checksum = false, .put_options = { .object_size_mb = 10, .object_path_override = object_path, }, .mock_server = true, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_FAILURE, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &put_options, &out_results)); ASSERT_UINT_EQUALS(AWS_ERROR_S3_REQUEST_TIME_TOO_SKEWED, out_results.finished_error_code); /* The default retry will max out after 5 times. So, in total, it will be 6 requests, first one and 5 retries. */ size_t result_num = aws_array_list_length(&out_results.synced_data.metrics); ASSERT_UINT_EQUALS(6, result_num); aws_s3_meta_request_test_results_clean_up(&out_results); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/s3_parallel_read_stream_test.c000066400000000000000000000303241456575232400266730ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "aws/s3/private/s3_parallel_input_stream.h" #include "aws/s3/private/s3_util.h" #include "aws/s3/s3_client.h" #include "s3_tester.h" #include #include #include #include #include #include #include #include #include #include #define TEST_CASE(NAME) \ AWS_TEST_CASE(NAME, s_test_##NAME); \ static int s_test_##NAME(struct aws_allocator *allocator, void *ctx) #define DEFINE_HEADER(NAME, VALUE) \ { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(NAME), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(VALUE), } #define ONE_SEC_IN_NS ((uint64_t)AWS_TIMESTAMP_NANOS) #define MAX_TIMEOUT_NS (600 * ONE_SEC_IN_NS) AWS_STATIC_STRING_FROM_LITERAL(s_parallel_stream_test, "SimpleParallelStreamTest"); static int s_create_read_file(const char *file_path, size_t length) { remove(file_path); FILE *file = aws_fopen(file_path, "w"); size_t loop = length / s_parallel_stream_test->len; for (size_t i = 0; i < loop; ++i) { fprintf(file, "%s", (char *)s_parallel_stream_test->bytes); } size_t reminder = length % s_parallel_stream_test->len; if (reminder) { fprintf(file, "%.*s", (int)reminder, s_parallel_stream_test->bytes); } fclose(file); return AWS_OP_SUCCESS; } struct aws_parallel_read_from_test_args { struct aws_allocator *alloc; size_t buffer_start_pos; size_t file_start_pos; size_t read_length; struct aws_future_bool *final_end_future; struct aws_byte_buf *final_dest; struct aws_parallel_input_stream *parallel_read_stream; struct aws_atomic_var *completed_count; struct aws_atomic_var *end_of_stream; size_t split_num; }; static void s_s3_parallel_from_file_read_test_task(struct aws_task *task, void *arg, enum aws_task_status task_status) { (void)task_status; struct aws_parallel_read_from_test_args *test_args = arg; struct aws_byte_buf read_buf = { .allocator = NULL, .buffer = test_args->final_dest->buffer + test_args->buffer_start_pos, .len = 0, .capacity = test_args->read_length, }; struct aws_future_bool *read_future = aws_parallel_input_stream_read(test_args->parallel_read_stream, test_args->file_start_pos, &read_buf); aws_future_bool_wait(read_future, MAX_TIMEOUT_NS); bool end_of_stream = aws_future_bool_get_result(read_future); aws_future_bool_release(read_future); struct aws_future_bool *end_future = test_args->final_end_future; size_t read_completed = aws_atomic_fetch_add(test_args->completed_count, 1); if (end_of_stream) { aws_atomic_store_int(test_args->end_of_stream, 1); } bool completed = read_completed == test_args->split_num - 1; bool reached_eos = aws_atomic_load_int(test_args->end_of_stream) == 1; aws_mem_release(test_args->alloc, task); aws_mem_release(test_args->alloc, test_args); if (completed) { aws_future_bool_set_result(end_future, reached_eos); } aws_future_bool_release(end_future); } static int s_parallel_read_test_helper( struct aws_allocator *alloc, struct aws_parallel_input_stream *parallel_read_stream, struct aws_byte_buf *read_buf, struct aws_event_loop_group *elg, size_t start_pos, size_t total_length, size_t split_num, bool *out_eos) { struct aws_atomic_var completed_count; aws_atomic_store_int(&completed_count, 0); struct aws_atomic_var end_of_stream; aws_atomic_store_int(&end_of_stream, 0); size_t number_bytes_per_read = total_length / split_num; if (number_bytes_per_read == 0) { struct aws_future_bool *read_future = aws_parallel_input_stream_read(parallel_read_stream, 0, read_buf); ASSERT_TRUE(aws_future_bool_wait(read_future, MAX_TIMEOUT_NS)); aws_future_bool_release(read_future); return AWS_OP_SUCCESS; } struct aws_future_bool *future = aws_future_bool_new(alloc); for (size_t i = 0; i < split_num; i++) { struct aws_event_loop *loop = aws_event_loop_group_get_next_loop(elg); struct aws_parallel_read_from_test_args *test_args = aws_mem_calloc(alloc, 1, sizeof(struct aws_parallel_read_from_test_args)); size_t read_length = number_bytes_per_read; if (i == split_num - 1) { /* Last part, adjust the size */ read_length += total_length % split_num; } test_args->alloc = alloc; test_args->buffer_start_pos = i * number_bytes_per_read; test_args->file_start_pos = start_pos + test_args->buffer_start_pos; test_args->final_end_future = aws_future_bool_acquire(future); test_args->read_length = read_length; test_args->final_dest = read_buf; test_args->parallel_read_stream = parallel_read_stream; test_args->completed_count = &completed_count; test_args->end_of_stream = &end_of_stream; test_args->split_num = split_num; struct aws_task *read_task = aws_mem_calloc(alloc, 1, sizeof(struct aws_task)); aws_task_init(read_task, s_s3_parallel_from_file_read_test_task, test_args, "s3_parallel_read_test_task"); aws_event_loop_schedule_task_now(loop, read_task); } ASSERT_TRUE(aws_future_bool_wait(future, MAX_TIMEOUT_NS)); *out_eos = aws_future_bool_get_result(future); aws_future_bool_release(future); read_buf->len = total_length; return AWS_OP_SUCCESS; } TEST_CASE(parallel_read_stream_from_file_sanity_test) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); const char *file_path = "s3_test_parallel_input_stream_read.txt"; /* unique name */ ASSERT_SUCCESS(s_create_read_file(file_path, s_parallel_stream_test->len)); struct aws_byte_cursor path_cursor = aws_byte_cursor_from_c_str(file_path); struct aws_parallel_input_stream *parallel_read_stream = aws_parallel_input_stream_new_from_file(allocator, path_cursor); ASSERT_NOT_NULL(parallel_read_stream); aws_parallel_input_stream_acquire(parallel_read_stream); aws_parallel_input_stream_release(parallel_read_stream); struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 0, NULL); { struct aws_byte_buf read_buf; aws_byte_buf_init(&read_buf, allocator, s_parallel_stream_test->len); bool eos_reached = false; ASSERT_SUCCESS(s_parallel_read_test_helper( allocator, parallel_read_stream, &read_buf, el_group, 0, s_parallel_stream_test->len, 8, &eos_reached)); /* Read the exact number of bytes will not reach to the EOS */ ASSERT_FALSE(eos_reached); ASSERT_TRUE(aws_string_eq_byte_buf(s_parallel_stream_test, &read_buf)); aws_byte_buf_clean_up(&read_buf); } { size_t extra_byte_len = s_parallel_stream_test->len + 1; struct aws_byte_buf read_buf; aws_byte_buf_init(&read_buf, allocator, extra_byte_len); bool eos_reached = false; ASSERT_SUCCESS(s_parallel_read_test_helper( allocator, parallel_read_stream, &read_buf, el_group, 0, extra_byte_len, 8, &eos_reached)); /* Read the exact number of bytes will not reach to the EOS */ ASSERT_TRUE(eos_reached); aws_byte_buf_clean_up(&read_buf); } { /* Failure from short buffer */ struct aws_byte_buf read_buf; aws_byte_buf_init(&read_buf, allocator, s_parallel_stream_test->len); /* Set the buffer length to be capacity */ read_buf.len = s_parallel_stream_test->len; struct aws_future_bool *read_future = aws_parallel_input_stream_read(parallel_read_stream, 0, &read_buf); ASSERT_TRUE(aws_future_bool_is_done(read_future)); int error = aws_future_bool_get_error(read_future); ASSERT_UINT_EQUALS(AWS_ERROR_SHORT_BUFFER, error); aws_byte_buf_clean_up(&read_buf); aws_future_bool_release(read_future); } { /* offset larger than the length of file, will read nothing and return EOS */ struct aws_byte_buf read_buf; aws_byte_buf_init(&read_buf, allocator, s_parallel_stream_test->len); struct aws_future_bool *read_future = aws_parallel_input_stream_read(parallel_read_stream, 2 * s_parallel_stream_test->len, &read_buf); ASSERT_TRUE(aws_future_bool_is_done(read_future)); int error = aws_future_bool_get_error(read_future); bool eos = aws_future_bool_get_result(read_future); /* Seek to offset larger than the length will not fail. */ ASSERT_UINT_EQUALS(AWS_ERROR_SUCCESS, error); ASSERT_TRUE(eos); ASSERT_UINT_EQUALS(0, read_buf.len); aws_byte_buf_clean_up(&read_buf); aws_future_bool_release(read_future); } remove(file_path); aws_parallel_input_stream_release(parallel_read_stream); aws_event_loop_group_release(el_group); aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; } TEST_CASE(parallel_read_stream_from_large_file_test) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); size_t file_length = MB_TO_BYTES(10); const char *file_path = "s3_test_parallel_input_stream_read_large.txt"; /* unique name */ ASSERT_SUCCESS(s_create_read_file(file_path, file_length)); struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 0, NULL); struct aws_byte_cursor path_cursor = aws_byte_cursor_from_c_str(file_path); struct aws_parallel_input_stream *parallel_read_stream = aws_parallel_input_stream_new_from_file(allocator, path_cursor); ASSERT_NOT_NULL(parallel_read_stream); { /* The whole file */ struct aws_byte_buf read_buf; aws_byte_buf_init(&read_buf, allocator, file_length); struct aws_byte_buf expected_read_buf; aws_byte_buf_init(&expected_read_buf, allocator, file_length); bool eos_reached = false; ASSERT_SUCCESS(s_parallel_read_test_helper( allocator, parallel_read_stream, &read_buf, el_group, 0, file_length, 8, &eos_reached)); /* Read the exact number of bytes will not reach to the EOS */ ASSERT_FALSE(eos_reached); struct aws_input_stream *stream = aws_input_stream_new_from_file(allocator, file_path); ASSERT_SUCCESS(aws_input_stream_read(stream, &expected_read_buf)); ASSERT_TRUE(aws_byte_buf_eq(&expected_read_buf, &read_buf)); aws_byte_buf_clean_up(&read_buf); aws_byte_buf_clean_up(&expected_read_buf); aws_input_stream_release(stream); } { /* First string */ struct aws_byte_buf read_buf; aws_byte_buf_init(&read_buf, allocator, file_length); bool eos_reached = true; ASSERT_SUCCESS(s_parallel_read_test_helper( allocator, parallel_read_stream, &read_buf, el_group, 0, s_parallel_stream_test->len, 8, &eos_reached)); ASSERT_FALSE(eos_reached); ASSERT_TRUE(aws_string_eq_byte_buf(s_parallel_stream_test, &read_buf)); aws_byte_buf_clean_up(&read_buf); } { /* Second string */ struct aws_byte_buf read_buf; aws_byte_buf_init(&read_buf, allocator, file_length); bool eos_reached = true; ASSERT_SUCCESS(s_parallel_read_test_helper( allocator, parallel_read_stream, &read_buf, el_group, s_parallel_stream_test->len, s_parallel_stream_test->len, 8, &eos_reached)); ASSERT_FALSE(eos_reached); ASSERT_TRUE(aws_string_eq_byte_buf(s_parallel_stream_test, &read_buf)); aws_byte_buf_clean_up(&read_buf); } remove(file_path); aws_event_loop_group_release(el_group); aws_parallel_input_stream_release(parallel_read_stream); aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/s3_platform_info_test.c000066400000000000000000000073421456575232400253740ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include static int s_test_get_existing_platform_info(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_s3_library_init(allocator); struct aws_byte_cursor instance_type = aws_byte_cursor_from_c_str("c5n.18xlarge"); struct aws_s3_platform_info_loader *loader = aws_s3_platform_info_loader_new(allocator); const struct aws_s3_platform_info *platform_info = aws_s3_get_platform_info_for_instance_type(loader, instance_type); ASSERT_NOT_NULL(platform_info); ASSERT_BIN_ARRAYS_EQUALS( instance_type.ptr, instance_type.len, platform_info->instance_type.ptr, platform_info->instance_type.len); ASSERT_UINT_EQUALS(100, (uintmax_t)platform_info->max_throughput_gbps); aws_s3_platform_info_loader_release(loader); aws_s3_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_get_existing_platform_info, s_test_get_existing_platform_info) static int s_test_get_nonexistent_platform_info(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_s3_library_init(allocator); struct aws_s3_platform_info_loader *loader = aws_s3_platform_info_loader_new(allocator); struct aws_byte_cursor instance_type = aws_byte_cursor_from_c_str("non-existent"); const struct aws_s3_platform_info *platform_info = aws_s3_get_platform_info_for_instance_type(loader, instance_type); ASSERT_NULL(platform_info); aws_s3_platform_info_loader_release(loader); aws_s3_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_get_nonexistent_platform_info, s_test_get_nonexistent_platform_info) static int s_load_platform_info_from_global_state_sanity_test(struct aws_allocator *allocator, void *arg) { (void)arg; aws_s3_library_init(allocator); const struct aws_s3_platform_info *platform_info = aws_s3_get_current_platform_info(); ASSERT_NOT_NULL(platform_info); if (platform_info->instance_type.len) { struct aws_s3_platform_info_loader *loader = aws_s3_platform_info_loader_new(allocator); const struct aws_s3_platform_info *by_name_info = aws_s3_get_platform_info_for_instance_type(loader, platform_info->instance_type); if (by_name_info) { ASSERT_BIN_ARRAYS_EQUALS( platform_info->instance_type.ptr, platform_info->instance_type.len, by_name_info->instance_type.ptr, by_name_info->instance_type.len); ASSERT_TRUE(platform_info->max_throughput_gbps == by_name_info->max_throughput_gbps); } aws_s3_platform_info_loader_release(loader); } aws_s3_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(load_platform_info_from_global_state_sanity_test, s_load_platform_info_from_global_state_sanity_test) static int s_test_get_platforms_with_recommended_config(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_s3_library_init(allocator); struct aws_array_list recommended_platform_list = aws_s3_get_platforms_with_recommended_config(); ASSERT_TRUE(aws_array_list_length(&recommended_platform_list) > 0); for (size_t i = 0; i < aws_array_list_length(&recommended_platform_list); ++i) { struct aws_byte_cursor cursor; aws_array_list_get_at(&recommended_platform_list, &cursor, i); ASSERT_TRUE(cursor.len > 0); } aws_array_list_clean_up(&recommended_platform_list); aws_s3_library_clean_up(); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_get_platforms_with_recommended_config, s_test_get_platforms_with_recommended_config) aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/s3_request_messages_tests.c000066400000000000000000001153611456575232400263000ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "aws/s3/private/s3_client_impl.h" #include "aws/s3/private/s3_meta_request_impl.h" #include "aws/s3/private/s3_request_messages.h" #include "aws/s3/private/s3_util.h" #include "s3_tester.h" #include #include #include #include #include #include #include #include #include #include #include static const struct aws_http_header get_object_test_headers[] = { { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Host"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("HostValue"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("If-Match"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("If-MatchValue"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("If-Modified-Since"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("If-Modified-SinceValue"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("If-None-Match"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("If-None-MatchValue"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Range"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("RangeValue"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-customer-algorithm"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-customer-algorithmValue"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-customer-key"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-customer-keyValue"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-customer-key-MD5"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-customer-key-MD5Value"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-request-payer"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-request-payerValue"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-expected-bucket-owner"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-expected-bucket-ownerValue"), }, }; static const struct aws_http_header s_put_object_test_headers[] = { { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-acl"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("ACLValue"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Cache-Control"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("CacheControlValue"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Disposition"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("ContentDispositionValue"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Encoding"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("ContentEncodingValue"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Language"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("ContentLanguageValue"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Length"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("ContentLengthValue"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-MD5"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("ContentMD5Value"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Type"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("ContentTypeValue"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Expires"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("ExpiresValue"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-grant-full-control"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("GrantFullControlValue"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-grant-read"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("GrantReadValue"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-grant-read-acp"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("GrantReadACPValue"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-grant-write-acp"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("GrantWriteACPValue"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("ServerSideEncryptionValue"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-storage-class"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("StorageClassValue"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-website-redirect-location"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("WebsiteRedirectLocationValue"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-customer-algorithm"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("SSECustomerAlgorithmValue"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-customer-key"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("SSECustomerKeyValue"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-customer-key-MD5"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("SSECustomerKeyMD5Value"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-aws-kms-key-id"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("SSEKMSKeyIdValue"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-context"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("SSEKMSEncryptionContextValue"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-bucket-key-enabled"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("BucketKeyEnabledValue"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-request-payer"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("RequestPayerValue"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-tagging"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("TaggingValue"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-object-lock-mode"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("ObjectLockModeValue"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-object-lock-retain-until-date"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("ObjectLockRetainUntilDateValue"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-object-lock-legal-hold"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("ObjectLockLegalHoldStatusValue"), }, { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-expected-bucket-owner"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("ExpectedBucketOwnerValue"), }, }; static int s_fill_byte_buf(struct aws_byte_buf *buffer, struct aws_allocator *allocator, size_t buffer_size) { ASSERT_TRUE(buffer != NULL); ASSERT_TRUE(allocator != NULL); ASSERT_TRUE(buffer_size > 0); ASSERT_SUCCESS(aws_byte_buf_init(buffer, allocator, buffer_size)); srand(0); for (size_t i = 0; i < buffer_size; ++i) { const char single_char = (char)(rand() % (int)('z' - 'a') + (int)'a'); struct aws_byte_cursor single_char_cursor = { .ptr = (uint8_t *)&single_char, .len = 1, }; ASSERT_SUCCESS(aws_byte_buf_append(buffer, &single_char_cursor)); } return AWS_OP_SUCCESS; } static int s_test_http_headers_match( struct aws_allocator *allocator, const struct aws_http_message *message0, const struct aws_http_message *message1, /* Headers that we know are in message0, but should NOT be in message1 */ const struct aws_byte_cursor *excluded_message0_headers, size_t excluded_message0_headers_count, /* Headers in message1 that are okay to be in message1 even if they are in the excluded list or are not in message0.*/ const struct aws_byte_cursor *message1_header_exceptions, size_t message1_header_exceptions_count) { ASSERT_TRUE(message0 != NULL); ASSERT_TRUE(message1 != NULL); ASSERT_TRUE(excluded_message0_headers != NULL || excluded_message0_headers_count == 0); ASSERT_TRUE(message1_header_exceptions != NULL || message1_header_exceptions_count == 0); const struct aws_http_headers *message0_headers = aws_http_message_get_const_headers(message0); ASSERT_TRUE(message0_headers != NULL); const struct aws_http_headers *message1_headers = aws_http_message_get_const_headers(message1); ASSERT_TRUE(message1_headers != NULL); struct aws_http_headers *expected_message0_headers = aws_http_headers_new(allocator); /* Copy message1 headers to expected_message0_headers. With upcoming adds/removes, it should transform back into * message0. */ for (size_t i = 0; i < aws_http_headers_count(message1_headers); ++i) { struct aws_http_header message1_header; AWS_ZERO_STRUCT(message1_header); ASSERT_SUCCESS(aws_http_headers_get_index(message1_headers, i, &message1_header)); ASSERT_SUCCESS(aws_http_headers_add(expected_message0_headers, message1_header.name, message1_header.value)); } /* Go through all of the headers that were originally removed from message1 after it was copied from message0. */ for (size_t i = 0; i < excluded_message0_headers_count; ++i) { const struct aws_byte_cursor *excluded_header_name = &excluded_message0_headers[i]; bool header_existence_is_valid = false; /* If the header is in the exception list, it's okay for message1 to have. (It may have been re-added.) */ for (size_t j = 0; j < message1_header_exceptions_count; ++j) { if (aws_byte_cursor_eq(excluded_header_name, &message1_header_exceptions[j])) { header_existence_is_valid = true; break; } } /* Try to get the header from message1. */ struct aws_byte_cursor message1_header_value; AWS_ZERO_STRUCT(message1_header_value); int result = aws_http_headers_get(message1_headers, *excluded_header_name, &message1_header_value); if (header_existence_is_valid) { /* If this header is allowed to exist in message1, then we don't need to assert on its existence or * non-existence. But we do want to erase it from the expected_message0_headers, since its value may be * different from that in message0. */ if (result == AWS_OP_SUCCESS) { ASSERT_SUCCESS(aws_http_headers_erase(expected_message0_headers, *excluded_header_name)); } } else { /* In this case, message1 should not have the header. */ ASSERT_TRUE(result == AWS_OP_ERR && aws_last_error() == AWS_ERROR_HTTP_HEADER_NOT_FOUND); } /* At this point, expected_message0_headers should not have the excluded header in it. Add a copy of the header * from message0 to expected_message0_headers to further transform it toward being a copy of message0 headers. */ struct aws_byte_cursor message0_header_value; AWS_ZERO_STRUCT(message0_header_value); if (aws_http_headers_get(message0_headers, *excluded_header_name, &message0_header_value) == AWS_OP_SUCCESS) { ASSERT_SUCCESS( aws_http_headers_add(expected_message0_headers, *excluded_header_name, message0_header_value)); } } /* message0_headers should now match expected_message0_headers */ { ASSERT_TRUE(aws_http_headers_count(message0_headers) == aws_http_headers_count(expected_message0_headers)); for (size_t i = 0; i < aws_http_headers_count(message0_headers); ++i) { struct aws_http_header message0_header; AWS_ZERO_STRUCT(message0_header); ASSERT_SUCCESS(aws_http_headers_get_index(message0_headers, i, &message0_header)); struct aws_byte_cursor expected_message0_header_value; AWS_ZERO_STRUCT(expected_message0_header_value); ASSERT_SUCCESS( aws_http_headers_get(expected_message0_headers, message0_header.name, &expected_message0_header_value)); ASSERT_TRUE(aws_byte_cursor_eq(&message0_header.value, &expected_message0_header_value)); } } aws_http_headers_release(expected_message0_headers); return AWS_OP_SUCCESS; } static int s_test_http_messages_match( struct aws_allocator *allocator, const struct aws_http_message *message0, const struct aws_http_message *message1, const struct aws_byte_cursor *excluded_headers, size_t excluded_headers_count) { ASSERT_TRUE(message0 != NULL); ASSERT_TRUE(message1 != NULL); ASSERT_TRUE(excluded_headers != NULL || excluded_headers_count == 0); struct aws_byte_cursor request_path; AWS_ZERO_STRUCT(request_path); ASSERT_SUCCESS(aws_http_message_get_request_path(message0, &request_path)); struct aws_byte_cursor copied_request_path; AWS_ZERO_STRUCT(copied_request_path); ASSERT_SUCCESS(aws_http_message_get_request_path(message1, &copied_request_path)); ASSERT_TRUE(aws_byte_cursor_eq(&request_path, &copied_request_path)); struct aws_byte_cursor request_method; AWS_ZERO_STRUCT(request_method); ASSERT_SUCCESS(aws_http_message_get_request_method(message0, &request_method)); struct aws_byte_cursor copied_request_method; AWS_ZERO_STRUCT(copied_request_method); ASSERT_SUCCESS(aws_http_message_get_request_method(message1, &copied_request_method)); ASSERT_TRUE(aws_byte_cursor_eq(&request_method, &copied_request_method)); ASSERT_SUCCESS( s_test_http_headers_match(allocator, message0, message1, excluded_headers, excluded_headers_count, NULL, 0)); return AWS_OP_SUCCESS; } static struct aws_http_header s_http_header_from_c_str(const char *name, const char *value) { struct aws_http_header header = { .name = aws_byte_cursor_from_c_str(name), .value = aws_byte_cursor_from_c_str(value), }; return header; } static int s_test_http_message_request_path( struct aws_http_message *message, const struct aws_byte_cursor *request_path) { struct aws_byte_cursor message_request_path; AWS_ZERO_STRUCT(message_request_path); ASSERT_SUCCESS(aws_http_message_get_request_path(message, &message_request_path)); ASSERT_TRUE(aws_byte_cursor_eq(&message_request_path, request_path)); return AWS_OP_SUCCESS; } static int s_test_http_message_request_method(struct aws_http_message *message, const char *method) { struct aws_byte_cursor message_request_method; AWS_ZERO_STRUCT(message_request_method); ASSERT_SUCCESS(aws_http_message_get_request_method(message, &message_request_method)); struct aws_byte_cursor method_cursor = aws_byte_cursor_from_c_str(method); ASSERT_TRUE(aws_byte_cursor_eq(&message_request_method, &method_cursor)); return AWS_OP_SUCCESS; } static int s_test_http_message_body_stream( struct aws_allocator *allocator, struct aws_http_message *derived_message, struct aws_byte_buf *expected_stream_contents) { ASSERT_TRUE(derived_message != NULL); ASSERT_TRUE(expected_stream_contents != NULL); struct aws_http_headers *headers = aws_http_message_get_headers(derived_message); ASSERT_TRUE(headers != NULL); struct aws_input_stream *body_stream = aws_http_message_get_body_stream(derived_message); ASSERT_TRUE(body_stream != NULL); /* Check for the content length header. */ uint64_t content_length = 0; ASSERT_SUCCESS(aws_s3_tester_get_content_length(headers, &content_length)); ASSERT_TRUE(content_length == expected_stream_contents->len); /* Check that the stream data is equal to the original buffer data. */ struct aws_byte_buf stream_read_buffer; ASSERT_SUCCESS(aws_byte_buf_init(&stream_read_buffer, allocator, expected_stream_contents->len)); ASSERT_SUCCESS(aws_input_stream_read(body_stream, &stream_read_buffer)); ASSERT_TRUE(aws_byte_buf_eq(expected_stream_contents, &stream_read_buffer)); aws_byte_buf_clean_up(&stream_read_buffer); /* There should be no data left in the stream. */ struct aws_byte_buf stream_overread_buffer; ASSERT_SUCCESS(aws_byte_buf_init(&stream_overread_buffer, allocator, expected_stream_contents->len)); ASSERT_SUCCESS(aws_input_stream_read(body_stream, &stream_overread_buffer)); ASSERT_TRUE(stream_overread_buffer.len == 0); aws_byte_buf_clean_up(&stream_overread_buffer); return AWS_OP_SUCCESS; } int s_create_get_object_message( struct aws_allocator *allocator, const struct aws_byte_cursor *path, struct aws_http_message **out_message) { ASSERT_TRUE(out_message != NULL); ASSERT_TRUE(*out_message == NULL); struct aws_http_message *message = aws_http_message_new_request(allocator); ASSERT_TRUE(message != NULL); ASSERT_SUCCESS(aws_http_message_set_request_path(message, *path)); ASSERT_SUCCESS(aws_http_message_set_request_method(message, aws_byte_cursor_from_c_str("GET"))); for (size_t i = 0; i < AWS_ARRAY_SIZE(get_object_test_headers); ++i) { ASSERT_SUCCESS(aws_http_message_add_header(message, get_object_test_headers[i])); } *out_message = message; return AWS_OP_SUCCESS; } int s_create_put_object_message( struct aws_allocator *allocator, const struct aws_byte_cursor *path, struct aws_http_message **out_message) { ASSERT_TRUE(out_message != NULL); ASSERT_TRUE(*out_message == NULL); struct aws_http_message *message = aws_http_message_new_request(allocator); ASSERT_TRUE(message != NULL); ASSERT_SUCCESS(aws_http_message_set_request_path(message, *path)); ASSERT_SUCCESS(aws_http_message_set_request_method(message, aws_byte_cursor_from_c_str("PUT"))); for (size_t i = 0; i < AWS_ARRAY_SIZE(s_put_object_test_headers); ++i) { ASSERT_SUCCESS(aws_http_message_add_header(message, s_put_object_test_headers[i])); } *out_message = message; return AWS_OP_SUCCESS; ; } AWS_TEST_CASE(test_s3_copy_http_message, s_test_s3_copy_http_message) static int s_test_s3_copy_http_message(struct aws_allocator *allocator, void *ctx) { (void)ctx; const struct aws_byte_cursor request_method = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("RequestMethod"); const struct aws_byte_cursor request_path = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("RequestPath"); const struct aws_http_header original_headers[] = { s_http_header_from_c_str("IncludedHeader", "IncludedHeaderValue"), s_http_header_from_c_str("ExcludedHeader", "ExcludedHeaderValue"), s_http_header_from_c_str("x-amz-meta-MyMetadata", "MyMetadataValue"), }; const struct aws_byte_cursor excluded_headers[] = { aws_byte_cursor_from_c_str("ExcludedHeader"), }; struct aws_http_message *message = aws_http_message_new_request(allocator); ASSERT_TRUE(message != NULL); ASSERT_SUCCESS(aws_http_message_set_request_method(message, request_method)); ASSERT_SUCCESS(aws_http_message_set_request_path(message, request_path)); ASSERT_SUCCESS(aws_http_message_add_header_array(message, original_headers, AWS_ARRAY_SIZE(original_headers))); { /* copy message, include "x-amz-meta-" */ struct aws_http_message *copied_message = aws_s3_message_util_copy_http_message_no_body_filter_headers( allocator, message, excluded_headers, AWS_ARRAY_SIZE(excluded_headers), false /*exclude_x_amz_meta*/); ASSERT_TRUE(copied_message != NULL); ASSERT_SUCCESS(s_test_http_messages_match( allocator, message, copied_message, excluded_headers, AWS_ARRAY_SIZE(excluded_headers))); aws_http_message_release(copied_message); } { /* copy message, exclude "x-amz-meta-" */ struct aws_http_message *copied_message = aws_s3_message_util_copy_http_message_no_body_filter_headers( allocator, message, excluded_headers, AWS_ARRAY_SIZE(excluded_headers), true /*exclude_x_amz_meta*/); ASSERT_TRUE(copied_message != NULL); const struct aws_byte_cursor expected_excluded_headers[] = { aws_byte_cursor_from_c_str("ExcludedHeader"), aws_byte_cursor_from_c_str("x-amz-meta-MyMetadata"), }; ASSERT_SUCCESS(s_test_http_messages_match( allocator, message, copied_message, expected_excluded_headers, AWS_ARRAY_SIZE(expected_excluded_headers))); aws_http_message_release(copied_message); } aws_http_message_release(message); return 0; } AWS_TEST_CASE(test_s3_message_util_assign_body, s_test_s3_message_util_assign_body) static int s_test_s3_message_util_assign_body(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_http_message *message = aws_http_message_new_request(allocator); aws_http_message_set_request_method(message, aws_http_method_get); const size_t test_buffer_size = 42; struct aws_byte_buf test_buffer; ASSERT_SUCCESS(s_fill_byte_buf(&test_buffer, allocator, test_buffer_size)); struct aws_input_stream *input_stream = aws_s3_message_util_assign_body(allocator, &test_buffer, message, NULL, NULL); ASSERT_TRUE(input_stream != NULL); ASSERT_TRUE(aws_http_message_get_body_stream(message) == input_stream); ASSERT_SUCCESS(s_test_http_message_body_stream(allocator, message, &test_buffer)); aws_byte_buf_clean_up(&test_buffer); aws_http_message_release(message); return 0; } AWS_TEST_CASE(test_s3_ranged_get_object_message_new, s_test_s3_ranged_get_object_message_new) static int s_test_s3_ranged_get_object_message_new(struct aws_allocator *allocator, void *ctx) { (void)ctx; const struct aws_byte_cursor test_path = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/TestPath"); struct aws_http_message *original_message = NULL; ASSERT_SUCCESS(s_create_get_object_message(allocator, &test_path, &original_message)); ASSERT_TRUE(original_message != NULL); { char expected_range_value_buffer[128] = "bytes=42-83"; struct aws_byte_cursor expected_range_value_cursor = aws_byte_cursor_from_c_str(expected_range_value_buffer); struct aws_http_message *get_object_message = aws_s3_ranged_get_object_message_new(allocator, original_message, 42, 83); ASSERT_TRUE(get_object_message != NULL); struct aws_http_headers *headers = aws_http_message_get_headers(get_object_message); ASSERT_TRUE(headers != NULL); struct aws_byte_cursor range_header_value; AWS_ZERO_STRUCT(range_header_value); ASSERT_SUCCESS(aws_http_headers_get(headers, g_range_header_name, &range_header_value)); ASSERT_TRUE(aws_byte_cursor_eq(&range_header_value, &expected_range_value_cursor)); s_test_http_message_request_method(get_object_message, "GET"); aws_http_message_release(get_object_message); } aws_http_message_release(original_message); return 0; } AWS_TEST_CASE(test_s3_set_multipart_request_path, s_test_s3_set_multipart_request_path) static int s_test_s3_set_multipart_request_path(struct aws_allocator *allocator, void *ctx) { (void)ctx; #define TEST_PATH "/TestPath" #define TEST_PATH_WITH_PARAMS "/TestPath?arg=value" #define UPLOAD_ID "test_upload_id" #define UPLOAD_ID_PARAM "uploadId=test_upload_id" #define PART_NUMBER 4 #define UPLOADS_PARAM "uploads" const struct aws_byte_cursor test_path = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(TEST_PATH); const struct aws_byte_cursor test_path_with_params = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(TEST_PATH_WITH_PARAMS); struct aws_byte_cursor test_path_permutations[] = { AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/TestPath"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/TestPath?uploads"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/TestPath?partNumber=4"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/TestPath?partNumber=4&uploads"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/TestPath?uploadId=test_upload_id"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/TestPath?uploadId=test_upload_id&uploads"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/TestPath?partNumber=4&uploadId=test_upload_id"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/TestPath?partNumber=4&uploadId=test_upload_id&uploads"), }; struct aws_byte_cursor test_path_with_params_permutations[] = { AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/TestPath?arg=value"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/TestPath?arg=value&uploads"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/TestPath?arg=value&partNumber=4"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/TestPath?arg=value&partNumber=4&uploads"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/TestPath?arg=value&uploadId=test_upload_id"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/TestPath?arg=value&uploadId=test_upload_id&uploads"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/TestPath?arg=value&partNumber=4&uploadId=test_upload_id"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/TestPath?arg=value&partNumber=4&uploadId=test_upload_id&uploads"), }; const uint32_t num_permutations = 8; for (uint32_t i = 0; i < num_permutations; ++i) { struct aws_string *upload_id = NULL; uint32_t part_number = 0; bool uploads_param = false; if (i & 0x4) { upload_id = aws_string_new_from_c_str(allocator, UPLOAD_ID); } if (i & 0x2) { part_number = PART_NUMBER; } if (i & 0x1) { uploads_param = true; } { struct aws_http_message *message = NULL; ASSERT_SUCCESS(s_create_put_object_message(allocator, &test_path, &message)); ASSERT_SUCCESS(aws_s3_message_util_set_multipart_request_path( allocator, upload_id, part_number, uploads_param, message)); ASSERT_SUCCESS(s_test_http_message_request_path(message, &test_path_permutations[i])); aws_http_message_release(message); } { struct aws_http_message *message_with_params = NULL; ASSERT_SUCCESS(s_create_put_object_message(allocator, &test_path_with_params, &message_with_params)); ASSERT_SUCCESS(aws_s3_message_util_set_multipart_request_path( allocator, upload_id, part_number, uploads_param, message_with_params)); ASSERT_SUCCESS( s_test_http_message_request_path(message_with_params, &test_path_with_params_permutations[i])); aws_http_message_release(message_with_params); } aws_string_destroy(upload_id); } #undef TEST_PATH #undef TEST_PATH_WITH_PARAMS #undef UPLOAD_ID #undef UPLOAD_ID_PARAM #undef PART_NUMBER #undef UPLOADS_PARAM return 0; } AWS_TEST_CASE(test_s3_create_multipart_upload_message_new, s_test_s3_create_multipart_upload_message_new) static int s_test_s3_create_multipart_upload_message_new(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor path = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/TestPath"); struct aws_byte_cursor expected_create_path = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/TestPath?uploads"); struct aws_http_message *original_message = NULL; ASSERT_SUCCESS(s_create_put_object_message(allocator, &path, &original_message)); ASSERT_TRUE(original_message != NULL); struct aws_http_message *create_multipart_upload_message = aws_s3_create_multipart_upload_message_new(allocator, original_message, AWS_SCA_NONE); ASSERT_TRUE(create_multipart_upload_message != NULL); ASSERT_SUCCESS(s_test_http_message_request_method(create_multipart_upload_message, "POST")); ASSERT_SUCCESS(s_test_http_message_request_path(create_multipart_upload_message, &expected_create_path)); ASSERT_SUCCESS(s_test_http_headers_match( allocator, original_message, create_multipart_upload_message, g_s3_create_multipart_upload_excluded_headers, g_s3_create_multipart_upload_excluded_headers_count, NULL, 0)); aws_http_message_release(create_multipart_upload_message); aws_http_message_release(original_message); return 0; } AWS_TEST_CASE(test_s3_upload_part_message_new, s_test_s3_upload_part_message_new) static int s_test_s3_upload_part_message_new(struct aws_allocator *allocator, void *ctx) { (void)ctx; #define STRINGIFY_HELPER(x) #x #define STRINGIFY(x) STRINGIFY_HELPER(x) #define TEST_PATH "/TestPath" #define UPLOAD_ID "test_upload_id" #define PART_NUMBER 4 #define PART_NUMBER_STR "?partNumber=" STRINGIFY(PART_NUMBER) #define EXPECTED_UPLOAD_PART_PATH TEST_PATH PART_NUMBER_STR "&uploadId=" UPLOAD_ID const struct aws_byte_cursor header_exclude_exceptions[] = { AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Length"), }; struct aws_byte_cursor path = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(TEST_PATH); struct aws_byte_cursor expected_create_path = aws_byte_cursor_from_c_str(EXPECTED_UPLOAD_PART_PATH); struct aws_http_message *original_message = NULL; ASSERT_SUCCESS(s_create_put_object_message(allocator, &path, &original_message)); ASSERT_TRUE(original_message != NULL); const size_t part_buffer_size = 42; struct aws_byte_buf part_buffer; AWS_ZERO_STRUCT(part_buffer); s_fill_byte_buf(&part_buffer, allocator, part_buffer_size); struct aws_string *upload_id = aws_string_new_from_c_str(allocator, UPLOAD_ID); struct aws_http_message *upload_part_message = aws_s3_upload_part_message_new( allocator, original_message, &part_buffer, PART_NUMBER, upload_id, false, NULL, NULL); ASSERT_TRUE(upload_part_message != NULL); ASSERT_SUCCESS(s_test_http_message_request_method(upload_part_message, "PUT")); ASSERT_SUCCESS(s_test_http_message_request_path(upload_part_message, &expected_create_path)); ASSERT_SUCCESS(s_test_http_headers_match( allocator, original_message, upload_part_message, g_s3_upload_part_excluded_headers, g_s3_upload_part_excluded_headers_count, header_exclude_exceptions, AWS_ARRAY_SIZE(header_exclude_exceptions))); ASSERT_SUCCESS(s_test_http_message_body_stream(allocator, upload_part_message, &part_buffer)); aws_string_destroy(upload_id); aws_byte_buf_clean_up(&part_buffer); aws_http_message_release(upload_part_message); aws_http_message_release(original_message); #undef STRINGIFY_HELPER #undef STRINGIFY #undef TEST_PATH #undef UPLOAD_ID #undef PART_NUMBER #undef PART_NUMBER_STR #undef EXPECTED_UPLOAD_PART_PATH return 0; } AWS_TEST_CASE(test_s3_upload_part_message_fail, s_test_s3_upload_part_message_fail) static int s_test_s3_upload_part_message_fail(struct aws_allocator *allocator, void *ctx) { (void)ctx; #define UPLOAD_ID "test_upload_id" #define PART_NUMBER 4 struct aws_http_message *original_message = aws_http_message_new_request(allocator); ASSERT_NOT_NULL(original_message); const size_t part_buffer_size = 42; struct aws_byte_buf part_buffer; AWS_ZERO_STRUCT(part_buffer); s_fill_byte_buf(&part_buffer, allocator, part_buffer_size); struct aws_string *upload_id = aws_string_new_from_c_str(allocator, UPLOAD_ID); struct aws_http_message *upload_part_message = aws_s3_upload_part_message_new( allocator, original_message, &part_buffer, PART_NUMBER, upload_id, false, NULL, NULL); ASSERT_NULL(upload_part_message); aws_string_destroy(upload_id); aws_byte_buf_clean_up(&part_buffer); aws_http_message_release(upload_part_message); aws_http_message_release(original_message); #undef UPLOAD_ID #undef PART_NUMBER return 0; } struct complete_multipart_upload_xml_test_data { struct aws_byte_cursor etag_value; struct aws_byte_cursor part_number_value; bool found_etag; bool found_part_number; }; static int s_complete_multipart_upload_traverse_xml_node(struct aws_xml_node *node, void *user_data) { const struct aws_byte_cursor complete_multipar_upload_tag_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("CompleteMultipartUpload"); const struct aws_byte_cursor part_tag_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Part"); const struct aws_byte_cursor etag_tag_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("ETag"); const struct aws_byte_cursor part_number_tag_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("PartNumber"); struct complete_multipart_upload_xml_test_data *test_data = user_data; struct aws_byte_cursor node_name = aws_xml_node_get_name(node); if (aws_byte_cursor_eq(&node_name, &complete_multipar_upload_tag_name)) { if (aws_xml_node_traverse(node, s_complete_multipart_upload_traverse_xml_node, user_data)) { return AWS_OP_ERR; } } else if (aws_byte_cursor_eq(&node_name, &part_tag_name)) { if (aws_xml_node_traverse(node, s_complete_multipart_upload_traverse_xml_node, user_data)) { return AWS_OP_ERR; } } else if (aws_byte_cursor_eq(&node_name, &etag_tag_name)) { struct aws_byte_cursor node_body; AWS_ZERO_STRUCT(node_body); if (aws_xml_node_as_body(node, &node_body)) { return AWS_OP_ERR; } test_data->found_etag = aws_byte_cursor_eq(&node_body, &test_data->etag_value); } else if (aws_byte_cursor_eq(&node_name, &part_number_tag_name)) { struct aws_byte_cursor node_body; AWS_ZERO_STRUCT(node_body); if (aws_xml_node_as_body(node, &node_body)) { return AWS_OP_ERR; } test_data->found_part_number = aws_byte_cursor_eq(&node_body, &test_data->part_number_value); } return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_s3_complete_multipart_message_new, s_test_s3_complete_multipart_message_new) static int s_test_s3_complete_multipart_message_new(struct aws_allocator *allocator, void *ctx) { (void)ctx; #define TEST_PATH "/TestPath" #define UPLOAD_ID "test_upload_id" #define EXPECTED_UPLOAD_PART_PATH TEST_PATH "?uploadId=" UPLOAD_ID #define ETAG_VALUE "etag_value" struct aws_array_list parts; ASSERT_SUCCESS(aws_array_list_init_dynamic(&parts, allocator, 1, sizeof(struct aws_s3_mpu_part_info *))); struct aws_s3_mpu_part_info *part = aws_mem_calloc(allocator, 1, sizeof(struct aws_s3_mpu_part_info)); part->etag = aws_string_new_from_c_str(allocator, ETAG_VALUE); ASSERT_SUCCESS(aws_array_list_push_back(&parts, &part)); const struct aws_byte_cursor header_exclude_exceptions[] = { AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Length"), }; struct aws_byte_cursor path = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(TEST_PATH); struct aws_byte_cursor expected_create_path = aws_byte_cursor_from_c_str(EXPECTED_UPLOAD_PART_PATH); struct aws_http_message *original_message = NULL; ASSERT_SUCCESS(s_create_put_object_message(allocator, &path, &original_message)); ASSERT_TRUE(original_message != NULL); struct aws_string *upload_id = aws_string_new_from_c_str(allocator, UPLOAD_ID); struct aws_byte_buf body_buffer; aws_byte_buf_init(&body_buffer, allocator, 64); struct aws_http_message *complete_multipart_message = aws_s3_complete_multipart_message_new( allocator, original_message, &body_buffer, upload_id, &parts, AWS_SCA_NONE); ASSERT_SUCCESS(s_test_http_message_request_method(complete_multipart_message, "POST")); ASSERT_SUCCESS(s_test_http_message_request_path(complete_multipart_message, &expected_create_path)); ASSERT_SUCCESS(s_test_http_headers_match( allocator, original_message, complete_multipart_message, g_s3_complete_multipart_upload_excluded_headers, g_s3_complete_multipart_upload_excluded_headers_count, header_exclude_exceptions, AWS_ARRAY_SIZE(header_exclude_exceptions))); { struct complete_multipart_upload_xml_test_data xml_user_data = { .etag_value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(ETAG_VALUE), .part_number_value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("1"), .found_etag = false, .found_part_number = false, }; struct aws_xml_parser_options parser_options = { .doc = aws_byte_cursor_from_buf(&body_buffer), .on_root_encountered = s_complete_multipart_upload_traverse_xml_node, .user_data = &xml_user_data, }; ASSERT_SUCCESS(aws_xml_parse(allocator, &parser_options)); ASSERT_TRUE(xml_user_data.found_etag); ASSERT_TRUE(xml_user_data.found_part_number); } aws_byte_buf_clean_up(&body_buffer); aws_string_destroy(upload_id); aws_http_message_release(complete_multipart_message); aws_http_message_release(original_message); aws_string_destroy(part->etag); aws_mem_release(allocator, part); aws_array_list_clean_up(&parts); #undef TEST_PATH #undef UPLOAD_ID #undef EXPECTED_UPLOAD_PART_PATH #undef ETAG_VALUE return 0; } AWS_TEST_CASE(test_s3_abort_multipart_upload_message_new, s_test_s3_abort_multipart_upload_message_newt) static int s_test_s3_abort_multipart_upload_message_newt(struct aws_allocator *allocator, void *ctx) { (void)ctx; #define TEST_PATH "/TestPath" #define UPLOAD_ID "test_upload_id" #define EXPECTED_UPLOAD_PART_PATH TEST_PATH "?uploadId=" UPLOAD_ID struct aws_byte_cursor path = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(TEST_PATH); struct aws_byte_cursor expected_create_path = aws_byte_cursor_from_c_str(EXPECTED_UPLOAD_PART_PATH); struct aws_http_message *original_message = NULL; ASSERT_SUCCESS(s_create_put_object_message(allocator, &path, &original_message)); ASSERT_TRUE(original_message != NULL); struct aws_string *upload_id = aws_string_new_from_c_str(allocator, UPLOAD_ID); struct aws_http_message *abort_upload_message = aws_s3_abort_multipart_upload_message_new(allocator, original_message, upload_id); ASSERT_TRUE(abort_upload_message != NULL); ASSERT_SUCCESS(s_test_http_message_request_method(abort_upload_message, "DELETE")); ASSERT_SUCCESS(s_test_http_message_request_path(abort_upload_message, &expected_create_path)); ASSERT_SUCCESS(s_test_http_headers_match( allocator, original_message, abort_upload_message, g_s3_abort_multipart_upload_excluded_headers, g_s3_abort_multipart_upload_excluded_headers_count, NULL, 0)); aws_string_destroy(upload_id); aws_http_message_release(abort_upload_message); aws_http_message_release(original_message); #undef TEST_PATH #undef UPLOAD_ID #undef EXPECTED_UPLOAD_PART_PATH return 0; } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/s3_retry_tests.c000066400000000000000000000463051456575232400240670ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "aws/s3/private/s3_client_impl.h" #include "aws/s3/private/s3_meta_request_impl.h" #include "aws/s3/private/s3_util.h" #include "s3_tester.h" #include #include #include #include #include #include #include #include #include #include #include static void s_s3_client_acquire_http_connection_exceed_retries( struct aws_http_connection_manager *conn_manager, aws_http_connection_manager_on_connection_setup_fn *callback, void *user_data) { AWS_ASSERT(callback); (void)conn_manager; aws_raise_error(AWS_ERROR_HTTP_UNKNOWN); callback(NULL, AWS_ERROR_HTTP_UNKNOWN, user_data); } AWS_TEST_CASE(test_s3_client_exceed_retries, s_test_s3_client_exceed_retries) static int s_test_s3_client_exceed_retries(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; AWS_ZERO_STRUCT(tester); ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_client_config client_config; AWS_ZERO_STRUCT(client_config); ASSERT_SUCCESS(aws_s3_tester_bind_client( &tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_REGION | AWS_S3_TESTER_BIND_CLIENT_SIGNING)); struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); struct aws_s3_client_vtable *patched_client_vtable = aws_s3_tester_patch_client_vtable(&tester, client, NULL); patched_client_vtable->acquire_http_connection = s_s3_client_acquire_http_connection_exceed_retries; struct aws_s3_meta_request_test_results meta_request_test_results; aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator); /* Don't specify EXPECT SUCCESS flag for aws_s3_tester_send_get_object_meta_request to expect a failure. */ ASSERT_SUCCESS(aws_s3_tester_send_get_object_meta_request( &tester, client, g_pre_existing_object_1MB, 0, &meta_request_test_results)); ASSERT_TRUE(meta_request_test_results.finished_error_code == AWS_ERROR_HTTP_UNKNOWN); aws_s3_meta_request_test_results_clean_up(&meta_request_test_results); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } static void s_s3_client_acquire_http_connection_fail_first( struct aws_http_connection_manager *conn_manager, aws_http_connection_manager_on_connection_setup_fn *callback, void *user_data) { AWS_ASSERT(callback); struct aws_s3_connection *connection = user_data; struct aws_s3_client *client = connection->request->meta_request->endpoint->client; AWS_ASSERT(client); struct aws_s3_tester *tester = client->shutdown_callback_user_data; AWS_ASSERT(tester != NULL); if (aws_s3_tester_inc_counter1(tester) == 1) { aws_raise_error(AWS_ERROR_UNKNOWN); callback(NULL, AWS_ERROR_UNKNOWN, connection); return; } struct aws_s3_client_vtable *original_client_vtable = aws_s3_tester_get_client_vtable_patch(tester, 0)->original_vtable; original_client_vtable->acquire_http_connection(conn_manager, callback, user_data); } AWS_TEST_CASE(test_s3_client_acquire_connection_fail, s_test_s3_client_acquire_connection_fail) static int s_test_s3_client_acquire_connection_fail(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; AWS_ZERO_STRUCT(tester); ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_client_config client_config = {.part_size = 64 * 1024}; ASSERT_SUCCESS(aws_s3_tester_bind_client( &tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_REGION | AWS_S3_TESTER_BIND_CLIENT_SIGNING)); struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); struct aws_s3_client_vtable *patched_client_vtable = aws_s3_tester_patch_client_vtable(&tester, client, NULL); patched_client_vtable->acquire_http_connection = s_s3_client_acquire_http_connection_fail_first; ASSERT_SUCCESS(aws_s3_tester_send_get_object_meta_request( &tester, client, g_pre_existing_object_1MB, AWS_S3_TESTER_SEND_META_REQUEST_EXPECT_SUCCESS, NULL)); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } struct s3_fail_prepare_test_data { uint32_t num_requests_being_prepared_is_correct : 1; }; static struct aws_future_void *s_s3_fail_prepare_request(struct aws_s3_request *request) { AWS_ASSERT(request != NULL); struct aws_future_void *future = aws_future_void_new(request->allocator); aws_future_void_set_error(future, AWS_ERROR_UNKNOWN); return future; } static struct aws_s3_meta_request *s_meta_request_factory_patch_prepare_request( struct aws_s3_client *client, const struct aws_s3_meta_request_options *options) { AWS_ASSERT(client != NULL); struct aws_s3_tester *tester = client->shutdown_callback_user_data; AWS_ASSERT(tester != NULL); struct aws_s3_client_vtable *original_client_vtable = aws_s3_tester_get_client_vtable_patch(tester, 0)->original_vtable; struct aws_s3_meta_request *meta_request = original_client_vtable->meta_request_factory(client, options); struct aws_s3_meta_request_vtable *patched_meta_request_vtable = aws_s3_tester_patch_meta_request_vtable(tester, meta_request, NULL); patched_meta_request_vtable->prepare_request = s_s3_fail_prepare_request; return meta_request; } static void s_s3_fail_prepare_finish_destroy(struct aws_s3_client *client) { AWS_ASSERT(client); struct aws_s3_tester *tester = client->shutdown_callback_user_data; AWS_ASSERT(tester != NULL); struct s3_fail_prepare_test_data *test_data = tester->user_data; AWS_ASSERT(test_data != NULL); test_data->num_requests_being_prepared_is_correct = client->threaded_data.num_requests_being_prepared == 0; struct aws_s3_client_vtable *original_client_vtable = aws_s3_tester_get_client_vtable_patch(tester, 0)->original_vtable; original_client_vtable->finish_destroy(client); } /* Test recovery when prepare request fails. */ AWS_TEST_CASE(test_s3_meta_request_fail_prepare_request, s_test_s3_meta_request_fail_prepare_request) static int s_test_s3_meta_request_fail_prepare_request(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct s3_fail_prepare_test_data test_data; AWS_ZERO_STRUCT(test_data); tester.user_data = &test_data; struct aws_s3_client_config client_config; AWS_ZERO_STRUCT(client_config); ASSERT_SUCCESS(aws_s3_tester_bind_client( &tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_REGION | AWS_S3_TESTER_BIND_CLIENT_SIGNING)); struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); struct aws_s3_client_vtable *patched_client_vtable = aws_s3_tester_patch_client_vtable(&tester, client, NULL); patched_client_vtable->meta_request_factory = s_meta_request_factory_patch_prepare_request; patched_client_vtable->finish_destroy = s_s3_fail_prepare_finish_destroy; ASSERT_SUCCESS(aws_s3_tester_send_get_object_meta_request(&tester, client, g_pre_existing_object_1MB, 0, NULL)); aws_s3_tester_wait_for_counters(&tester); client = aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); ASSERT_TRUE(test_data.num_requests_being_prepared_is_correct); return 0; } static void s_s3_meta_request_sign_request_fail( struct aws_s3_meta_request *meta_request, struct aws_s3_request *request, aws_signing_complete_fn *on_signing_complete, void *user_data) { (void)meta_request; (void)request; AWS_ASSERT(on_signing_complete != NULL); aws_raise_error(AWS_ERROR_UNKNOWN); on_signing_complete(NULL, AWS_ERROR_UNKNOWN, user_data); } static struct aws_s3_meta_request *s_s3_meta_request_factory_sign_request( struct aws_s3_client *client, const struct aws_s3_meta_request_options *options) { AWS_ASSERT(client != NULL); struct aws_s3_tester *tester = client->shutdown_callback_user_data; AWS_ASSERT(tester != NULL); struct aws_s3_client_vtable *original_client_vtable = aws_s3_tester_get_client_vtable_patch(tester, 0)->original_vtable; struct aws_s3_meta_request *meta_request = original_client_vtable->meta_request_factory(client, options); struct aws_s3_meta_request_vtable *patched_meta_request_vtable = aws_s3_tester_patch_meta_request_vtable(tester, meta_request, NULL); patched_meta_request_vtable->sign_request = s_s3_meta_request_sign_request_fail; return meta_request; } AWS_TEST_CASE(test_s3_meta_request_sign_request_fail, s_test_s3_meta_request_sign_request_fail) static int s_test_s3_meta_request_sign_request_fail(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_client *client = NULL; struct aws_s3_tester_client_options client_options; AWS_ZERO_STRUCT(client_options); ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); struct aws_s3_client_vtable *patched_client_vtable = aws_s3_tester_patch_client_vtable(&tester, client, NULL); patched_client_vtable->meta_request_factory = s_s3_meta_request_factory_sign_request; struct aws_s3_meta_request_test_results meta_request_test_results; aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator); struct aws_s3_tester_meta_request_options options = { .allocator = allocator, .client = client, .meta_request_type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_FAILURE, .get_options = { .object_path = g_pre_existing_object_1MB, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &options, &meta_request_test_results)); ASSERT_TRUE(meta_request_test_results.finished_error_code == AWS_ERROR_UNKNOWN); aws_s3_meta_request_test_results_clean_up(&meta_request_test_results); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } struct s3_meta_request_prepare_request_fail_first_job { struct aws_allocator *allocator; struct aws_s3_request *request; struct aws_future_void *original_future; /* original future that we're intercepting and patching */ struct aws_future_void *patched_future; /* patched future to set when this job completes */ }; static void s_s3_meta_request_prepare_request_fail_first_on_original_done(void *user_data); static struct aws_future_void *s_s3_meta_request_prepare_request_fail_first(struct aws_s3_request *request) { struct aws_s3_meta_request *meta_request = request->meta_request; AWS_ASSERT(meta_request); struct aws_s3_client *client = meta_request->client; AWS_ASSERT(client != NULL); struct aws_s3_tester *tester = client->shutdown_callback_user_data; AWS_ASSERT(tester != NULL); struct aws_future_void *patched_future = aws_future_void_new(request->allocator); struct s3_meta_request_prepare_request_fail_first_job *patched_prep = aws_mem_calloc(request->allocator, 1, sizeof(struct s3_meta_request_prepare_request_fail_first_job)); patched_prep->allocator = request->allocator; patched_prep->patched_future = aws_future_void_acquire(patched_future); patched_prep->request = request; struct aws_s3_meta_request_vtable *original_meta_request_vtable = aws_s3_tester_get_meta_request_vtable_patch(tester, 0)->original_vtable; patched_prep->original_future = original_meta_request_vtable->prepare_request(request); aws_future_void_register_callback( patched_prep->original_future, s_s3_meta_request_prepare_request_fail_first_on_original_done, patched_prep); return patched_future; } static void s_s3_meta_request_prepare_request_fail_first_on_original_done(void *user_data) { struct s3_meta_request_prepare_request_fail_first_job *patched_prep = user_data; struct aws_s3_request *request = patched_prep->request; struct aws_s3_tester *tester = request->meta_request->client->shutdown_callback_user_data; int error_code = aws_future_void_get_error(patched_prep->original_future); if (error_code != AWS_ERROR_SUCCESS) { aws_future_void_set_error(patched_prep->patched_future, error_code); goto finish; } if (aws_s3_tester_inc_counter1(tester) == 1) { const struct aws_byte_cursor test_object_path = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/non-existing-file12345.txt"); int set_request_path_result = aws_http_message_set_request_path(request->send_data.message, test_object_path); AWS_ASSERT(set_request_path_result == AWS_ERROR_SUCCESS); (void)set_request_path_result; } aws_future_void_set_result(patched_prep->patched_future); finish: aws_future_void_release(patched_prep->original_future); aws_future_void_release(patched_prep->patched_future); aws_mem_release(patched_prep->allocator, patched_prep); } static void s_s3_meta_request_send_request_finish_fail_first( struct aws_s3_connection *connection, struct aws_http_stream *stream, int error_code) { struct aws_s3_client *client = connection->request->meta_request->client; AWS_ASSERT(client != NULL); struct aws_s3_tester *tester = client->shutdown_callback_user_data; AWS_ASSERT(tester != NULL); if (aws_s3_tester_inc_counter2(tester) == 1) { AWS_ASSERT(connection->request->send_data.response_status == 404); connection->request->send_data.response_status = AWS_HTTP_STATUS_CODE_500_INTERNAL_SERVER_ERROR; } struct aws_s3_meta_request_vtable *original_meta_request_vtable = aws_s3_tester_get_meta_request_vtable_patch(tester, 0)->original_vtable; original_meta_request_vtable->send_request_finish(connection, stream, error_code); } static struct aws_s3_meta_request *s_meta_request_factory_patch_send_request_finish( struct aws_s3_client *client, const struct aws_s3_meta_request_options *options) { struct aws_s3_tester *tester = client->shutdown_callback_user_data; AWS_ASSERT(tester != NULL); struct aws_s3_client_vtable *original_client_vtable = aws_s3_tester_get_client_vtable_patch(tester, 0)->original_vtable; struct aws_s3_meta_request *meta_request = original_client_vtable->meta_request_factory(client, options); struct aws_s3_meta_request_vtable *patched_meta_request_vtable = aws_s3_tester_patch_meta_request_vtable(tester, meta_request, NULL); patched_meta_request_vtable->prepare_request = s_s3_meta_request_prepare_request_fail_first; patched_meta_request_vtable->send_request_finish = s_s3_meta_request_send_request_finish_fail_first; return meta_request; } /* Test recovery when message response indicates an internal error. */ AWS_TEST_CASE(test_s3_meta_request_send_request_finish_fail, s_test_s3_meta_request_send_request_finish_fail) static int s_test_s3_meta_request_send_request_finish_fail(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_client_config client_config = { .part_size = 64 * 1024, }; ASSERT_SUCCESS(aws_s3_tester_bind_client( &tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_REGION | AWS_S3_TESTER_BIND_CLIENT_SIGNING)); struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); struct aws_s3_client_vtable *patched_client_vtable = aws_s3_tester_patch_client_vtable(&tester, client, NULL); patched_client_vtable->meta_request_factory = s_meta_request_factory_patch_send_request_finish; ASSERT_SUCCESS(aws_s3_tester_send_get_object_meta_request( &tester, client, g_pre_existing_object_1MB, AWS_S3_TESTER_SEND_META_REQUEST_EXPECT_SUCCESS, NULL)); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return 0; } static void s_finished_request_remove_upload_id( struct aws_s3_meta_request *meta_request, struct aws_s3_request *request, int error_code) { (void)error_code; if (request->request_tag == AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_CREATE_MULTIPART_UPLOAD) { aws_byte_buf_reset(&request->send_data.response_body, false); } struct aws_s3_client *client = meta_request->client; struct aws_s3_tester *tester = client->shutdown_callback_user_data; struct aws_s3_meta_request_vtable *original_meta_request_vtable = aws_s3_tester_get_meta_request_vtable_patch(tester, 0)->original_vtable; original_meta_request_vtable->finished_request(meta_request, request, error_code); } static struct aws_s3_meta_request *s_meta_request_factory_patch_finished_request( struct aws_s3_client *client, const struct aws_s3_meta_request_options *options) { AWS_ASSERT(client != NULL); struct aws_s3_tester *tester = client->shutdown_callback_user_data; AWS_ASSERT(tester != NULL); struct aws_s3_client_vtable *original_client_vtable = aws_s3_tester_get_client_vtable_patch(tester, 0)->original_vtable; struct aws_s3_meta_request *meta_request = original_client_vtable->meta_request_factory(client, options); struct aws_s3_meta_request_vtable *patched_meta_request_vtable = aws_s3_tester_patch_meta_request_vtable(tester, meta_request, NULL); patched_meta_request_vtable->finished_request = s_finished_request_remove_upload_id; return meta_request; } AWS_TEST_CASE(test_s3_auto_range_put_missing_upload_id, s_test_s3_auto_range_put_missing_upload_id) static int s_test_s3_auto_range_put_missing_upload_id(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_client *client = NULL; struct aws_s3_tester_client_options client_options; AWS_ZERO_STRUCT(client_options); ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); struct aws_s3_client_vtable *patched_client_vtable = aws_s3_tester_patch_client_vtable(&tester, client, NULL); patched_client_vtable->meta_request_factory = s_meta_request_factory_patch_finished_request; struct aws_s3_meta_request_test_results meta_request_test_results; aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator); struct aws_s3_tester_meta_request_options options = { .allocator = allocator, .client = client, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_FAILURE, .put_options = { .ensure_multipart = true, }, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &options, &meta_request_test_results)); ASSERT_TRUE(meta_request_test_results.finished_error_code == AWS_ERROR_S3_MISSING_UPLOAD_ID); aws_s3_meta_request_test_results_clean_up(&meta_request_test_results); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/s3_s3express_client_test.c000066400000000000000000000605011456575232400260260ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "aws/s3/private/s3_util.h" #include "aws/s3/private/s3express_credentials_provider_impl.h" #include "aws/s3/s3_client.h" #include "aws/s3/s3express_credentials_provider.h" #include "s3_tester.h" #include #include #include #include #include #include #include #define TEST_CASE(NAME) \ AWS_TEST_CASE(NAME, s_test_##NAME); \ static int s_test_##NAME(struct aws_allocator *allocator, void *ctx) #define DEFINE_HEADER(NAME, VALUE) \ { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(NAME), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(VALUE), } struct aws_s3express_client_tester { struct aws_allocator *allocator; struct aws_hash_table saver_cache; struct aws_atomic_var provider_requests_made; }; static struct aws_s3express_client_tester s_tester; static int s_s3express_client_tester_init(struct aws_allocator *allocator) { s_tester.allocator = allocator; aws_hash_table_init( &s_tester.saver_cache, allocator, 100, aws_hash_string, aws_hash_callback_string_eq, aws_hash_callback_string_destroy, (aws_hash_callback_destroy_fn *)aws_credentials_release); aws_atomic_init_int(&s_tester.provider_requests_made, 0); return AWS_OP_SUCCESS; } static int s_s3express_client_tester_cleanup(void) { aws_hash_table_clean_up(&s_tester.saver_cache); return AWS_OP_SUCCESS; } static int s_create_s3express_request_mock_server( struct aws_allocator *allocator, struct aws_s3_tester *tester, struct aws_s3_client *client) { struct aws_byte_cursor object_path = aws_byte_cursor_from_c_str("/default"); struct aws_s3_tester_meta_request_options put_options = { .allocator = allocator, .meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, .client = client, .checksum_algorithm = AWS_SCA_CRC32, .validate_get_response_checksum = false, .put_options = { .object_size_mb = 10, .object_path_override = object_path, }, .mock_server = true, .use_s3express_signing = true, }; ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(tester, &put_options, NULL)); return AWS_OP_SUCCESS; } TEST_CASE(s3express_client_sanity_test_mock_server) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_s3_client_config client_config = { .part_size = MB_TO_BYTES(5), .tls_mode = AWS_MR_TLS_DISABLED, .enable_s3express = true, }; ASSERT_SUCCESS(aws_s3_tester_bind_client( &tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_REGION | AWS_S3_TESTER_BIND_CLIENT_SIGNING)); struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); ASSERT_SUCCESS(s_create_s3express_request_mock_server(allocator, &tester, client)); ASSERT_NOT_NULL(client->s3express_provider); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; } static int s_s3express_get_creds_fake( struct aws_s3express_credentials_provider *provider, const struct aws_credentials *original_credentials, const struct aws_credentials_properties_s3express *properties, aws_on_get_credentials_callback_fn callback, void *user_data) { (void)properties; (void)provider; (void)original_credentials; struct aws_string *key_1 = aws_string_new_from_c_str(s_tester.allocator, "key_1"); struct aws_credentials *credentials = aws_credentials_new_from_string(s_tester.allocator, key_1, key_1, key_1, SIZE_MAX); if (callback) { callback(credentials, AWS_ERROR_SUCCESS, user_data); } aws_credentials_release(credentials); aws_string_destroy(key_1); return AWS_OP_SUCCESS; } static void s_s3express_destroy_fake(struct aws_s3express_credentials_provider *provider) { provider->shutdown_complete_callback(provider->shutdown_user_data); aws_mem_release(provider->allocator, provider); } static struct aws_s3express_credentials_provider_vtable s_fake_s3express_vtable = { .get_credentials = s_s3express_get_creds_fake, .destroy = s_s3express_destroy_fake, }; struct aws_s3express_credentials_provider *s_s3express_provider_fake_factory( struct aws_allocator *allocator, struct aws_s3_client *client, aws_simple_completion_callback shutdown_complete_callback, void *shutdown_user_data, void *factory_user_data) { (void)client; (void)factory_user_data; struct aws_s3express_credentials_provider *provider = aws_mem_calloc(allocator, 1, sizeof(struct aws_s3express_credentials_provider)); aws_s3express_credentials_provider_init_base(provider, allocator, &s_fake_s3express_vtable, NULL); provider->shutdown_complete_callback = shutdown_complete_callback; provider->shutdown_user_data = shutdown_user_data; return provider; } TEST_CASE(s3express_client_sanity_override_test_mock_server) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); s_s3express_client_tester_init(allocator); struct aws_s3_client_config client_config = { .part_size = MB_TO_BYTES(5), .tls_mode = AWS_MR_TLS_DISABLED, .enable_s3express = true, .s3express_provider_override_factory = s_s3express_provider_fake_factory, }; ASSERT_SUCCESS(aws_s3_tester_bind_client( &tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_REGION | AWS_S3_TESTER_BIND_CLIENT_SIGNING)); struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); ASSERT_SUCCESS(s_create_s3express_request_mock_server(allocator, &tester, client)); ASSERT_NOT_NULL(client->s3express_provider); aws_s3_client_release(client); s_s3express_client_tester_cleanup(); aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; } static int s_s3express_put_object_request( struct aws_allocator *allocator, struct aws_s3_client *client, size_t content_length, struct aws_s3_tester *tester, struct aws_byte_cursor host_cursor, struct aws_byte_cursor key_cursor, struct aws_byte_cursor region) { struct aws_input_stream *upload_stream = aws_s3_test_input_stream_new(allocator, content_length); struct aws_http_message *message = aws_s3_test_put_object_request_new( allocator, &host_cursor, key_cursor, g_test_body_content_type, upload_stream, 0); struct aws_s3_meta_request_options options; AWS_ZERO_STRUCT(options); options.type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT; options.message = message; struct aws_signing_config_aws s3express_signing_config = { .algorithm = AWS_SIGNING_ALGORITHM_V4_S3EXPRESS, .service = g_s3express_service_name, .region = region, }; options.signing_config = &s3express_signing_config; struct aws_s3_meta_request_test_results meta_request_test_results; aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator); ASSERT_SUCCESS(aws_s3_tester_bind_meta_request(tester, &options, &meta_request_test_results)); struct aws_s3_meta_request *meta_request = aws_s3_client_make_meta_request(client, &options); ASSERT_TRUE(meta_request != NULL); /* Wait for the request to finish. */ aws_s3_tester_wait_for_meta_request_finish(tester); ASSERT_SUCCESS(aws_s3_tester_validate_put_object_results(&meta_request_test_results, 0)); meta_request = aws_s3_meta_request_release(meta_request); aws_s3_tester_wait_for_meta_request_shutdown(tester); aws_s3_meta_request_test_results_clean_up(&meta_request_test_results); aws_http_message_release(message); aws_input_stream_release(upload_stream); return AWS_OP_SUCCESS; } static int s_s3express_client_put_test_helper(struct aws_allocator *allocator, size_t content_length) { struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_byte_cursor region_cursor = aws_byte_cursor_from_c_str("us-east-1"); struct aws_byte_cursor key_cursor = aws_byte_cursor_from_c_str("/crt-test"); struct aws_s3_client_config client_config = { .part_size = MB_TO_BYTES(5), .enable_s3express = true, .region = region_cursor, }; ASSERT_SUCCESS(aws_s3_tester_bind_client(&tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_SIGNING)); struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); ASSERT_SUCCESS(s_s3express_put_object_request( allocator, client, content_length, &tester, g_test_s3express_bucket_use1_az4_endpoint, key_cursor, region_cursor)); struct aws_byte_cursor west2_region_cursor = aws_byte_cursor_from_c_str("us-west-2"); ASSERT_SUCCESS(s_s3express_put_object_request( allocator, client, content_length, &tester, g_test_s3express_bucket_usw2_az1_endpoint, key_cursor, west2_region_cursor)); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; } TEST_CASE(s3express_client_put_object) { (void)ctx; return s_s3express_client_put_test_helper(allocator, MB_TO_BYTES(1)); } TEST_CASE(s3express_client_put_object_multipart) { (void)ctx; return s_s3express_client_put_test_helper(allocator, MB_TO_BYTES(100)); } TEST_CASE(s3express_client_put_object_multipart_multiple) { (void)ctx; enum s_numbers { NUM_REQUESTS = 100 }; struct aws_s3_meta_request *meta_requests[NUM_REQUESTS]; struct aws_s3_meta_request_test_results meta_request_test_results[NUM_REQUESTS]; struct aws_input_stream *input_streams[NUM_REQUESTS]; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_byte_cursor region_cursor = aws_byte_cursor_from_c_str("us-east-1"); struct aws_byte_cursor key_cursor = aws_byte_cursor_from_c_str("/crt-test"); struct aws_byte_cursor west2_region_cursor = aws_byte_cursor_from_c_str("us-west-2"); struct aws_s3_client_config client_config = { .part_size = MB_TO_BYTES(5), .enable_s3express = true, .region = region_cursor, }; ASSERT_SUCCESS(aws_s3_tester_bind_client(&tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_SIGNING)); struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); for (size_t i = 0; i < NUM_REQUESTS; ++i) { input_streams[i] = aws_s3_test_input_stream_new(allocator, MB_TO_BYTES(10)); struct aws_byte_cursor request_region = region_cursor; struct aws_byte_cursor request_host = g_test_s3express_bucket_use1_az4_endpoint; if (i % 2 == 0) { /* Make half of request to east1 and rest half to west2 */ request_region = west2_region_cursor; request_host = g_test_s3express_bucket_usw2_az1_endpoint; } struct aws_http_message *message = aws_s3_test_put_object_request_new( allocator, &request_host, key_cursor, g_test_body_content_type, input_streams[i], 0); struct aws_s3_meta_request_options options; AWS_ZERO_STRUCT(options); options.type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT; options.message = message; struct aws_signing_config_aws s3express_signing_config = { .algorithm = AWS_SIGNING_ALGORITHM_V4_S3EXPRESS, .service = g_s3express_service_name, .region = request_region, }; options.signing_config = &s3express_signing_config; aws_s3_meta_request_test_results_init(&meta_request_test_results[i], allocator); ASSERT_SUCCESS(aws_s3_tester_bind_meta_request(&tester, &options, &meta_request_test_results[i])); meta_requests[i] = aws_s3_client_make_meta_request(client, &options); ASSERT_TRUE(meta_requests[i] != NULL); aws_http_message_release(message); } /* Wait for the request to finish. */ aws_s3_tester_wait_for_meta_request_finish(&tester); aws_s3_tester_lock_synced_data(&tester); ASSERT_TRUE(tester.synced_data.finish_error_code == AWS_ERROR_SUCCESS); aws_s3_tester_unlock_synced_data(&tester); for (size_t i = 0; i < NUM_REQUESTS; ++i) { meta_requests[i] = aws_s3_meta_request_release(meta_requests[i]); } aws_s3_tester_wait_for_meta_request_shutdown(&tester); for (size_t i = 0; i < NUM_REQUESTS; ++i) { aws_s3_tester_validate_put_object_results(&meta_request_test_results[i], 0); aws_s3_meta_request_test_results_clean_up(&meta_request_test_results[i]); } for (size_t i = 0; i < NUM_REQUESTS; ++i) { aws_input_stream_release(input_streams[i]); } aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; } void s_meta_request_finished_overhead( struct aws_s3_meta_request *meta_request, const struct aws_s3_meta_request_result *meta_request_result, void *user_data) { (void)meta_request; (void)meta_request_result; (void)user_data; aws_atomic_fetch_add(&s_tester.provider_requests_made, 1); } struct aws_s3express_credentials_provider *s_s3express_provider_mock_factory( struct aws_allocator *allocator, struct aws_s3_client *client, aws_simple_completion_callback on_provider_shutdown_callback, void *shutdown_user_data, void *factory_user_data) { (void)factory_user_data; struct aws_s3express_credentials_provider_default_options options = { .client = client, .shutdown_complete_callback = on_provider_shutdown_callback, .shutdown_user_data = shutdown_user_data, }; struct aws_s3express_credentials_provider *s3express_provider = aws_s3express_credentials_provider_new_default(allocator, &options); struct aws_s3express_credentials_provider_impl *impl = s3express_provider->impl; impl->mock_test.meta_request_finished_overhead = s_meta_request_finished_overhead; return s3express_provider; } /* Long running test to make sure our refresh works properly */ TEST_CASE(s3express_client_put_object_long_running_session_refresh) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); s_s3express_client_tester_init(allocator); size_t num_meta_requests = 7; struct aws_byte_cursor region_cursor = aws_byte_cursor_from_c_str("us-east-1"); struct aws_byte_cursor key_cursor = aws_byte_cursor_from_c_str("/crt-test"); struct aws_s3_client_config client_config = { .part_size = MB_TO_BYTES(5), .enable_s3express = true, .region = region_cursor, .s3express_provider_override_factory = s_s3express_provider_mock_factory, }; ASSERT_SUCCESS(aws_s3_tester_bind_client(&tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_SIGNING)); struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); for (size_t i = 0; i < num_meta_requests; i++) { struct aws_input_stream *upload_stream = aws_s3_test_input_stream_new(allocator, MB_TO_BYTES(10)); struct aws_http_message *message = aws_s3_test_put_object_request_new( allocator, &g_test_s3express_bucket_use1_az4_endpoint, key_cursor, g_test_body_content_type, upload_stream, 0); struct aws_s3_meta_request_options options; AWS_ZERO_STRUCT(options); options.type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT; options.message = message; struct aws_signing_config_aws s3express_signing_config = { .algorithm = AWS_SIGNING_ALGORITHM_V4_S3EXPRESS, .service = g_s3express_service_name, }; options.signing_config = &s3express_signing_config; struct aws_s3_meta_request_test_results meta_request_test_results; aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator); ASSERT_SUCCESS(aws_s3_tester_bind_meta_request(&tester, &options, &meta_request_test_results)); struct aws_s3_meta_request *meta_request = aws_s3_client_make_meta_request(client, &options); ASSERT_TRUE(meta_request != NULL); /* Wait for the request to finish. */ aws_s3_tester_wait_for_meta_request_finish(&tester); ASSERT_SUCCESS(aws_s3_tester_validate_put_object_results(&meta_request_test_results, 0)); meta_request = aws_s3_meta_request_release(meta_request); aws_s3_tester_wait_for_meta_request_shutdown(&tester); aws_s3_meta_request_test_results_clean_up(&meta_request_test_results); aws_http_message_release(message); aws_input_stream_release(upload_stream); /* Sleep for one mins before next request */ if (i != num_meta_requests - 1) { aws_thread_current_sleep(aws_timestamp_convert(60, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL)); } } /* More than two create session was invoked */ /* Server can return a credentials that expires around 2-3 mins sometime. */ size_t session_made = aws_atomic_load_int(&s_tester.provider_requests_made); ASSERT_TRUE(session_made >= 2); aws_s3_client_release(client); s_s3express_client_tester_cleanup(); aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; } TEST_CASE(s3express_client_get_object) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_byte_cursor region_cursor = aws_byte_cursor_from_c_str("us-east-1"); struct aws_s3_client_config client_config = { .part_size = MB_TO_BYTES(5), .enable_s3express = true, .region = region_cursor, }; ASSERT_SUCCESS(aws_s3_tester_bind_client(&tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_SIGNING)); struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); struct aws_http_message *message = aws_s3_test_get_object_request_new( allocator, g_test_s3express_bucket_use1_az4_endpoint, g_pre_existing_object_10MB); struct aws_s3_meta_request_options options; AWS_ZERO_STRUCT(options); options.type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT; options.message = message; struct aws_signing_config_aws s3express_signing_config = { .algorithm = AWS_SIGNING_ALGORITHM_V4_S3EXPRESS, .service = g_s3express_service_name, }; options.signing_config = &s3express_signing_config; struct aws_s3_meta_request_test_results meta_request_test_results; aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator); ASSERT_SUCCESS(aws_s3_tester_bind_meta_request(&tester, &options, &meta_request_test_results)); struct aws_s3_meta_request *meta_request = aws_s3_client_make_meta_request(client, &options); ASSERT_TRUE(meta_request != NULL); /* Wait for the request to finish. */ aws_s3_tester_wait_for_meta_request_finish(&tester); ASSERT_SUCCESS(aws_s3_tester_validate_get_object_results(&meta_request_test_results, 0)); meta_request = aws_s3_meta_request_release(meta_request); aws_s3_tester_wait_for_meta_request_shutdown(&tester); aws_s3_meta_request_test_results_clean_up(&meta_request_test_results); aws_http_message_release(message); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; } TEST_CASE(s3express_client_get_object_multiple) { (void)ctx; struct aws_s3_meta_request *meta_requests[100]; struct aws_s3_meta_request_test_results meta_request_test_results[100]; size_t num_meta_requests = AWS_ARRAY_SIZE(meta_requests); struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_byte_cursor region_cursor = aws_byte_cursor_from_c_str("us-east-1"); struct aws_s3_client_config client_config = { .part_size = MB_TO_BYTES(5), .enable_s3express = true, .region = region_cursor, }; ASSERT_SUCCESS(aws_s3_tester_bind_client(&tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_SIGNING)); struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); for (size_t i = 0; i < num_meta_requests; ++i) { struct aws_http_message *message = aws_s3_test_get_object_request_new( allocator, g_test_s3express_bucket_use1_az4_endpoint, g_pre_existing_object_10MB); struct aws_s3_meta_request_options options; AWS_ZERO_STRUCT(options); options.type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT; options.message = message; struct aws_signing_config_aws s3express_signing_config = { .algorithm = AWS_SIGNING_ALGORITHM_V4_S3EXPRESS, .service = g_s3express_service_name, }; options.signing_config = &s3express_signing_config; aws_s3_meta_request_test_results_init(&meta_request_test_results[i], allocator); ASSERT_SUCCESS(aws_s3_tester_bind_meta_request(&tester, &options, &meta_request_test_results[i])); meta_requests[i] = aws_s3_client_make_meta_request(client, &options); ASSERT_TRUE(meta_requests[i] != NULL); aws_http_message_release(message); } /* Wait for the request to finish. */ aws_s3_tester_wait_for_meta_request_finish(&tester); aws_s3_tester_lock_synced_data(&tester); ASSERT_TRUE(tester.synced_data.finish_error_code == AWS_ERROR_SUCCESS); aws_s3_tester_unlock_synced_data(&tester); for (size_t i = 0; i < num_meta_requests; ++i) { meta_requests[i] = aws_s3_meta_request_release(meta_requests[i]); } aws_s3_tester_wait_for_meta_request_shutdown(&tester); for (size_t i = 0; i < num_meta_requests; ++i) { aws_s3_tester_validate_get_object_results(&meta_request_test_results[i], 0); aws_s3_meta_request_test_results_clean_up(&meta_request_test_results[i]); } aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; } TEST_CASE(s3express_client_get_object_create_session_error) { (void)ctx; struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); struct aws_byte_cursor region_cursor = aws_byte_cursor_from_c_str("us-east-1"); struct aws_s3_client_config client_config = { .part_size = MB_TO_BYTES(5), .enable_s3express = true, .region = region_cursor, }; ASSERT_SUCCESS(aws_s3_tester_bind_client(&tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_SIGNING)); struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); struct aws_byte_cursor my_dummy_endpoint = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL( "non-exist-bucket-test--use1-az4--x-s3.s3express-use1-az4.us-east-1.amazonaws.com"); struct aws_http_message *message = aws_s3_test_get_object_request_new(allocator, my_dummy_endpoint, g_pre_existing_object_10MB); struct aws_s3_meta_request_options options; AWS_ZERO_STRUCT(options); options.type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT; options.message = message; struct aws_signing_config_aws s3express_signing_config = { .algorithm = AWS_SIGNING_ALGORITHM_V4_S3EXPRESS, .service = g_s3express_service_name, }; options.signing_config = &s3express_signing_config; struct aws_s3_meta_request_test_results meta_request_test_results; aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator); ASSERT_SUCCESS(aws_s3_tester_bind_meta_request(&tester, &options, &meta_request_test_results)); struct aws_s3_meta_request *meta_request = aws_s3_client_make_meta_request(client, &options); ASSERT_TRUE(meta_request != NULL); /* Wait for the request to finish. */ aws_s3_tester_wait_for_meta_request_finish(&tester); ASSERT_UINT_EQUALS(meta_request_test_results.finished_error_code, AWS_ERROR_S3EXPRESS_CREATE_SESSION_FAILED); meta_request = aws_s3_meta_request_release(meta_request); aws_s3_tester_wait_for_meta_request_shutdown(&tester); aws_s3_meta_request_test_results_clean_up(&meta_request_test_results); aws_http_message_release(message); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/s3_test_input_stream.c000066400000000000000000000117601456575232400252460ustar00rootroot00000000000000#include "s3_tester.h" #include struct aws_s3_test_input_stream_impl { struct aws_input_stream base; size_t position; size_t length; struct aws_allocator *allocator; }; static int s_aws_s3_test_input_stream_seek( struct aws_input_stream *stream, int64_t offset, enum aws_stream_seek_basis basis) { (void)stream; (void)offset; (void)basis; /* Stream should never be seeked; all reads should be sequential. */ aws_raise_error(AWS_ERROR_UNKNOWN); return AWS_OP_ERR; } static int s_aws_s3_test_input_stream_read( struct aws_input_stream *stream, struct aws_byte_buf *dest, struct aws_byte_cursor *test_string) { (void)stream; (void)dest; struct aws_s3_test_input_stream_impl *test_input_stream = AWS_CONTAINER_OF(stream, struct aws_s3_test_input_stream_impl, base); while (dest->len < dest->capacity && test_input_stream->position < test_input_stream->length) { size_t buffer_pos = test_input_stream->position % test_string->len; struct aws_byte_cursor source_byte_cursor = { .len = test_string->len - buffer_pos, .ptr = test_string->ptr + buffer_pos, }; size_t remaining_in_stream = test_input_stream->length - test_input_stream->position; if (remaining_in_stream < source_byte_cursor.len) { source_byte_cursor.len = remaining_in_stream; } size_t remaining_in_buffer = dest->capacity - dest->len; if (remaining_in_buffer < source_byte_cursor.len) { source_byte_cursor.len = remaining_in_buffer; } aws_byte_buf_append(dest, &source_byte_cursor); test_input_stream->position += source_byte_cursor.len; } return AWS_OP_SUCCESS; } static int s_aws_s3_test_input_stream_read_1(struct aws_input_stream *stream, struct aws_byte_buf *dest) { struct aws_byte_cursor test_string = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("This is an S3 test."); return s_aws_s3_test_input_stream_read(stream, dest, &test_string); } static int s_aws_s3_test_input_stream_read_2(struct aws_input_stream *stream, struct aws_byte_buf *dest) { struct aws_byte_cursor test_string = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Different S3 test value."); return s_aws_s3_test_input_stream_read(stream, dest, &test_string); } static int s_aws_s3_test_input_stream_get_status(struct aws_input_stream *stream, struct aws_stream_status *status) { (void)stream; (void)status; struct aws_s3_test_input_stream_impl *test_input_stream = AWS_CONTAINER_OF(stream, struct aws_s3_test_input_stream_impl, base); status->is_end_of_stream = test_input_stream->position == test_input_stream->length; status->is_valid = true; return AWS_OP_SUCCESS; } static int s_aws_s3_test_input_stream_get_length(struct aws_input_stream *stream, int64_t *out_length) { AWS_ASSERT(stream != NULL); struct aws_s3_test_input_stream_impl *test_input_stream = AWS_CONTAINER_OF(stream, struct aws_s3_test_input_stream_impl, base); *out_length = (int64_t)test_input_stream->length; return AWS_OP_SUCCESS; } static void s_aws_s3_test_input_stream_destroy(struct aws_s3_test_input_stream_impl *test_input_stream) { aws_mem_release(test_input_stream->allocator, test_input_stream); } static struct aws_input_stream_vtable s_aws_s3_test_input_stream_vtable_1 = { .seek = s_aws_s3_test_input_stream_seek, .read = s_aws_s3_test_input_stream_read_1, .get_status = s_aws_s3_test_input_stream_get_status, .get_length = s_aws_s3_test_input_stream_get_length, }; static struct aws_input_stream_vtable s_aws_s3_test_input_stream_vtable_2 = { .seek = s_aws_s3_test_input_stream_seek, .read = s_aws_s3_test_input_stream_read_2, .get_status = s_aws_s3_test_input_stream_get_status, .get_length = s_aws_s3_test_input_stream_get_length, }; struct aws_input_stream *aws_s3_test_input_stream_new_with_value_type( struct aws_allocator *allocator, size_t stream_length, enum aws_s3_test_stream_value stream_value) { struct aws_s3_test_input_stream_impl *test_input_stream = aws_mem_calloc(allocator, 1, sizeof(struct aws_s3_test_input_stream_impl)); test_input_stream->base.vtable = stream_value == TEST_STREAM_VALUE_1 ? &s_aws_s3_test_input_stream_vtable_1 : &s_aws_s3_test_input_stream_vtable_2; aws_ref_count_init( &test_input_stream->base.ref_count, test_input_stream, (aws_simple_completion_callback *)s_aws_s3_test_input_stream_destroy); struct aws_input_stream *input_stream = &test_input_stream->base; test_input_stream->position = 0; test_input_stream->length = stream_length; test_input_stream->allocator = allocator; return input_stream; } struct aws_input_stream *aws_s3_test_input_stream_new(struct aws_allocator *allocator, size_t stream_length) { return aws_s3_test_input_stream_new_with_value_type(allocator, stream_length, TEST_STREAM_VALUE_1); } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/s3_test_parallel_stream.c000066400000000000000000000045621456575232400257050ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "aws/s3/private/s3_parallel_input_stream.h" #include "s3_tester.h" #include struct aws_parallel_input_stream_from_file_failure_impl { struct aws_parallel_input_stream base; struct aws_atomic_var number_read; }; static void s_para_from_file_failure_destroy(struct aws_parallel_input_stream *stream) { struct aws_parallel_input_stream_from_file_failure_impl *impl = stream->impl; aws_mem_release(stream->alloc, impl); } struct aws_future_bool *s_para_from_file_failure_read( struct aws_parallel_input_stream *stream, uint64_t offset, struct aws_byte_buf *dest) { (void)offset; struct aws_future_bool *future = aws_future_bool_new(stream->alloc); struct aws_parallel_input_stream_from_file_failure_impl *impl = stream->impl; size_t previous_number_read = aws_atomic_fetch_add(&impl->number_read, 1); if (previous_number_read == 1) { /* TODO: make the failure configurable */ aws_future_bool_set_error(future, AWS_ERROR_UNIMPLEMENTED); } else { struct aws_byte_cursor test_string = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("This is an S3 test."); while (dest->len < dest->capacity) { size_t remaining_in_buffer = dest->capacity - dest->len; if (remaining_in_buffer < test_string.len) { test_string.len = remaining_in_buffer; } aws_byte_buf_append(dest, &test_string); } aws_future_bool_set_result(future, false); } return future; } static struct aws_parallel_input_stream_vtable s_parallel_input_stream_from_file_failure_vtable = { .destroy = s_para_from_file_failure_destroy, .read = s_para_from_file_failure_read, }; struct aws_parallel_input_stream *aws_parallel_input_stream_new_from_file_failure_tester( struct aws_allocator *allocator, struct aws_byte_cursor file_name) { (void)file_name; struct aws_parallel_input_stream_from_file_failure_impl *impl = aws_mem_calloc(allocator, 1, sizeof(struct aws_parallel_input_stream_from_file_failure_impl)); aws_parallel_input_stream_init_base( &impl->base, allocator, &s_parallel_input_stream_from_file_failure_vtable, impl); aws_atomic_init_int(&impl->number_read, 0); return &impl->base; } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/s3_tester.c000066400000000000000000002441421456575232400230050ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "s3_tester.h" #include "aws/s3/private/s3_auto_ranged_get.h" #include "aws/s3/private/s3_checksums.h" #include "aws/s3/private/s3_client_impl.h" #include "aws/s3/private/s3_meta_request_impl.h" #include "aws/s3/private/s3_util.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef _MSC_VER # pragma warning(disable : 4232) /* function pointer to dll symbol */ #endif const struct aws_byte_cursor g_mock_server_uri = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("http://localhost:8080/"); const struct aws_byte_cursor g_test_mrap_endpoint = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("moujmk3izc19y.mrap.accesspoint.s3-global.amazonaws.com"); const struct aws_byte_cursor g_test_body_content_type = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("text/plain"); const struct aws_byte_cursor g_test_s3_region = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("us-west-2"); const struct aws_byte_cursor g_s3_sse_header = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption"); const struct aws_byte_cursor g_s3_sse_c_alg_header = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-customer-algorithm"); const struct aws_byte_cursor g_s3_sse_c_key_header = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-customer-key"); const struct aws_byte_cursor g_s3_sse_c_key_md5_header = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-customer-key-md5"); /* TODO populate these at the beginning of running tests with names that are unique to the test run. */ const struct aws_byte_cursor g_pre_existing_object_1MB = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/pre-existing-1MB"); const struct aws_byte_cursor g_pre_existing_object_10MB = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/pre-existing-10MB"); const struct aws_byte_cursor g_pre_existing_object_kms_10MB = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/pre-existing-10MB-kms"); const struct aws_byte_cursor g_pre_existing_object_aes256_10MB = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/pre-existing-10MB-aes256"); const struct aws_byte_cursor g_pre_existing_empty_object = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/pre-existing-empty"); const struct aws_byte_cursor g_put_object_prefix = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/upload/put-object-test"); const struct aws_byte_cursor g_upload_folder = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/upload"); /* If `$CRT_S3_TEST_BUCKET_NAME` environment variable is set, use that; otherwise, use aws-c-s3-test-bucket */ struct aws_byte_cursor g_test_bucket_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("aws-c-s3-test-bucket"); /* If `$CRT_S3_TEST_BUCKET_NAME` envrionment variable is set, use `$CRT_S3_TEST_BUCKET_NAME-public`; otherwise, use * aws-c-s3-test-bucket-public */ struct aws_byte_cursor g_test_public_bucket_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("aws-c-s3-test-bucket-public"); /* If `$CRT_S3_TEST_BUCKET_NAME` environment variable is set, use * `$CRT_S3_TEST_BUCKET_NAME--usw2-az1--x-s3.s3express-usw2-az1.us-west-2.amazonaws.com`; otherwise, use * aws-c-s3-test-bucket--usw2-az1--x-s3.s3express-usw2-az1.us-west-2.amazonaws.com */ struct aws_byte_cursor g_test_s3express_bucket_usw2_az1_endpoint = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL( "aws-c-s3-test-bucket--usw2-az1--x-s3.s3express-usw2-az1.us-west-2.amazonaws.com"); /* If `$CRT_S3_TEST_BUCKET_NAME` environment variable is set, use * `$CRT_S3_TEST_BUCKET_NAME--us1-az1--x-s3.s3express-use1-az4.us-east-1.amazonaws.com`; otherwise, use * aws-c-s3-test-bucket--use1-az4--x-s3.s3express-use1-az4.us-east-1.amazonaws.com */ struct aws_byte_cursor g_test_s3express_bucket_use1_az4_endpoint = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL( "aws-c-s3-test-bucket--use1-az4--x-s3.s3express-use1-az4.us-east-1.amazonaws.com"); #ifdef BYO_CRYPTO /* Under BYO_CRYPTO, this function currently needs to be defined by the user. Defining a null implementation here so * that tests build, but it is not currently meant to be used by any tests. */ struct aws_byte_buf aws_tls_handler_protocol(struct aws_channel_handler *handler) { (void)handler; AWS_FATAL_ASSERT(false); struct aws_byte_buf byte_buf; AWS_ZERO_STRUCT(byte_buf); return byte_buf; } #endif static int s_s3_test_meta_request_header_callback( struct aws_s3_meta_request *meta_request, const struct aws_http_headers *headers, int response_status, void *user_data) { (void)meta_request; struct aws_s3_meta_request_test_results *meta_request_test_results = (struct aws_s3_meta_request_test_results *)user_data; aws_http_headers_release(meta_request_test_results->response_headers); meta_request_test_results->response_headers = (struct aws_http_headers *)headers; aws_http_headers_acquire(meta_request_test_results->response_headers); meta_request_test_results->headers_response_status = response_status; if (meta_request_test_results->headers_callback != NULL) { return meta_request_test_results->headers_callback(meta_request, headers, response_status, user_data); } return AWS_OP_SUCCESS; } static int s_s3_test_meta_request_body_callback( struct aws_s3_meta_request *meta_request, const struct aws_byte_cursor *body, uint64_t range_start, void *user_data) { (void)meta_request; (void)body; AWS_PRECONDITION(meta_request); AWS_PRECONDITION(body); struct aws_s3_meta_request_test_results *meta_request_test_results = user_data; meta_request_test_results->received_body_size += body->len; aws_atomic_fetch_add(&meta_request_test_results->received_body_size_delta, body->len); AWS_LOGF_DEBUG( AWS_LS_S3_GENERAL, "Received range %" PRIu64 "-%" PRIu64 ". Expected range start: %" PRIu64, range_start, range_start + body->len - 1, meta_request_test_results->expected_range_start); uint64_t object_range_start = 0; /* If this is an auto-ranged-get meta request, then grab the object range start so that the expected_range_start can * be properly offset.*/ if (meta_request->type == AWS_S3_META_REQUEST_TYPE_GET_OBJECT && meta_request->part_size != 0) { aws_s3_meta_request_lock_synced_data(meta_request); struct aws_s3_auto_ranged_get *auto_ranged_get = meta_request->impl; AWS_PRECONDITION(auto_ranged_get); bool object_range_known = auto_ranged_get->synced_data.object_range_known != 0; object_range_start = auto_ranged_get->synced_data.object_range_start; aws_s3_meta_request_unlock_synced_data(meta_request); ASSERT_TRUE(object_range_known); } ASSERT_TRUE((object_range_start + meta_request_test_results->expected_range_start) == range_start); meta_request_test_results->expected_range_start += body->len; if (meta_request_test_results->body_callback != NULL) { return meta_request_test_results->body_callback(meta_request, body, range_start, user_data); } return AWS_OP_SUCCESS; } static void s_s3_test_meta_request_finish( struct aws_s3_meta_request *meta_request, const struct aws_s3_meta_request_result *result, void *user_data) { (void)meta_request; struct aws_s3_meta_request_test_results *meta_request_test_results = user_data; struct aws_s3_tester *tester = meta_request_test_results->tester; meta_request_test_results->error_response_headers = result->error_response_headers; if (result->error_response_headers != NULL) { aws_http_headers_acquire(result->error_response_headers); } if (result->error_response_body != NULL) { aws_byte_buf_init_copy( &meta_request_test_results->error_response_body, tester->allocator, result->error_response_body); } if (result->error_response_operation_name != NULL) { meta_request_test_results->error_response_operation_name = aws_string_new_from_string(tester->allocator, result->error_response_operation_name); } meta_request_test_results->finished_response_status = result->response_status; meta_request_test_results->finished_error_code = result->error_code; if (meta_request_test_results->finish_callback != NULL) { meta_request_test_results->finish_callback(meta_request, result, user_data); } aws_s3_tester_notify_meta_request_finished(tester, result); } static void s_s3_test_meta_request_shutdown(void *user_data) { struct aws_s3_meta_request_test_results *meta_request_test_results = user_data; struct aws_s3_tester *tester = meta_request_test_results->tester; aws_s3_tester_notify_meta_request_shutdown(tester); } static void s_s3_test_meta_request_telemetry( struct aws_s3_meta_request *meta_request, struct aws_s3_request_metrics *metrics, void *user_data) { (void)meta_request; struct aws_s3_meta_request_test_results *meta_request_test_results = user_data; struct aws_s3_tester *tester = meta_request_test_results->tester; uint64_t time_stamp = 0; aws_s3_request_metrics_get_start_timestamp_ns(metrics, &time_stamp); AWS_FATAL_ASSERT(time_stamp > 0); aws_s3_request_metrics_get_end_timestamp_ns(metrics, &time_stamp); AWS_FATAL_ASSERT(time_stamp > 0); aws_s3_request_metrics_get_total_duration_ns(metrics, &time_stamp); AWS_FATAL_ASSERT(time_stamp > 0); if (!aws_s3_request_metrics_get_send_end_timestamp_ns(metrics, &time_stamp)) { AWS_FATAL_ASSERT(time_stamp > 0); uint64_t start_time = 0; uint64_t end_time = 0; uint64_t during_time = 0; int error = 0; error |= aws_s3_request_metrics_get_send_start_timestamp_ns(metrics, &start_time); error |= aws_s3_request_metrics_get_send_end_timestamp_ns(metrics, &end_time); error |= aws_s3_request_metrics_get_sending_duration_ns(metrics, &during_time); AWS_FATAL_ASSERT(error == AWS_OP_SUCCESS); AWS_FATAL_ASSERT(during_time == (end_time - start_time)); } if (!aws_s3_request_metrics_get_receive_end_timestamp_ns(metrics, &time_stamp)) { AWS_FATAL_ASSERT(time_stamp > 0); uint64_t start_time = 0; uint64_t end_time = 0; uint64_t during_time = 0; int error = 0; error |= aws_s3_request_metrics_get_receive_start_timestamp_ns(metrics, &start_time); error |= aws_s3_request_metrics_get_receive_end_timestamp_ns(metrics, &end_time); error |= aws_s3_request_metrics_get_receiving_duration_ns(metrics, &during_time); AWS_FATAL_ASSERT(error == AWS_OP_SUCCESS); AWS_FATAL_ASSERT(during_time == (end_time - start_time)); } aws_s3_tester_lock_synced_data(tester); aws_array_list_push_back(&meta_request_test_results->synced_data.metrics, &metrics); aws_s3_request_metrics_acquire(metrics); aws_s3_tester_unlock_synced_data(tester); } static void s_s3_test_meta_request_progress( struct aws_s3_meta_request *meta_request, const struct aws_s3_meta_request_progress *progress, void *user_data) { (void)meta_request; AWS_ASSERT(meta_request); AWS_ASSERT(progress); AWS_ASSERT(user_data); struct aws_s3_meta_request_test_results *meta_request_test_results = user_data; meta_request_test_results->progress.total_bytes_transferred += progress->bytes_transferred; /* Once content_length is reported, it shouldn't change */ if (meta_request_test_results->progress.content_length == 0) { meta_request_test_results->progress.content_length = progress->content_length; } else { AWS_FATAL_ASSERT(meta_request_test_results->progress.content_length == progress->content_length); } /* If content_length is known, we shouldn't go over it */ if (progress->content_length != 0) { AWS_FATAL_ASSERT(meta_request_test_results->progress.total_bytes_transferred <= progress->content_length); } if (meta_request_test_results->progress_callback != NULL) { meta_request_test_results->progress_callback(meta_request, progress, user_data); } } static int s_s3_test_meta_request_upload_review( struct aws_s3_meta_request *meta_request, const struct aws_s3_upload_review *review, void *user_data) { struct aws_s3_meta_request_test_results *test_results = user_data; AWS_FATAL_ASSERT(test_results->upload_review.invoked_count == 0); test_results->upload_review.invoked_count++; test_results->upload_review.checksum_algorithm = review->checksum_algorithm; test_results->upload_review.part_count = review->part_count; if (test_results->upload_review.part_count > 0) { test_results->upload_review.part_sizes_array = aws_mem_calloc(test_results->allocator, review->part_count, sizeof(uint64_t)); test_results->upload_review.part_checksums_array = aws_mem_calloc(test_results->allocator, review->part_count, sizeof(struct aws_string *)); for (size_t i = 0; i < review->part_count; ++i) { test_results->upload_review.part_sizes_array[i] = review->part_array[i].size; test_results->upload_review.part_checksums_array[i] = aws_string_new_from_cursor(test_results->allocator, &review->part_array[i].checksum); } } if (test_results->upload_review_callback != NULL) { return test_results->upload_review_callback(meta_request, review, user_data); } else { return AWS_OP_SUCCESS; } } /* Notify the tester that a particular clean up step has finished. */ static void s_s3_test_client_shutdown(void *user_data); static bool s_s3_tester_have_meta_requests_finished(void *user_data); static bool s_s3_tester_has_client_shutdown(void *user_data); struct aws_string *aws_s3_tester_build_endpoint_string( struct aws_allocator *allocator, const struct aws_byte_cursor *bucket_name, const struct aws_byte_cursor *region) { struct aws_byte_cursor endpoint_url_part0 = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(".s3."); struct aws_byte_cursor endpoint_url_part1 = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(".amazonaws.com"); struct aws_byte_buf endpoint_buffer; aws_byte_buf_init(&endpoint_buffer, allocator, 128); aws_byte_buf_append_dynamic(&endpoint_buffer, bucket_name); aws_byte_buf_append_dynamic(&endpoint_buffer, &endpoint_url_part0); aws_byte_buf_append_dynamic(&endpoint_buffer, region); aws_byte_buf_append_dynamic(&endpoint_buffer, &endpoint_url_part1); struct aws_string *endpoint_string = aws_string_new_from_buf(allocator, &endpoint_buffer); aws_byte_buf_clean_up(&endpoint_buffer); return endpoint_string; } AWS_STATIC_STRING_FROM_LITERAL(s_bucket_name_env_var, "CRT_S3_TEST_BUCKET_NAME"); int aws_s3_tester_init(struct aws_allocator *allocator, struct aws_s3_tester *tester) { AWS_PRECONDITION(allocator); AWS_PRECONDITION(tester); (void)allocator; AWS_ZERO_STRUCT(*tester); tester->allocator = allocator; if (aws_get_environment_value(allocator, s_bucket_name_env_var, &tester->bucket_name) == AWS_OP_SUCCESS && tester->bucket_name != NULL) { g_test_bucket_name = aws_byte_cursor_from_string(tester->bucket_name); char public_bucket_name_buffer[128] = ""; snprintf( public_bucket_name_buffer, sizeof(public_bucket_name_buffer), "" PRInSTR "-public", AWS_BYTE_CURSOR_PRI(g_test_bucket_name)); tester->public_bucket_name = aws_string_new_from_c_str(allocator, public_bucket_name_buffer); g_test_public_bucket_name = aws_byte_cursor_from_string(tester->public_bucket_name); char s3express_bucket_usw2_az1_endpoint_buffer[512] = ""; snprintf( s3express_bucket_usw2_az1_endpoint_buffer, sizeof(s3express_bucket_usw2_az1_endpoint_buffer), "" PRInSTR "--usw2-az1--x-s3.s3express-usw2-az1.us-west-2.amazonaws.com", AWS_BYTE_CURSOR_PRI(g_test_bucket_name)); tester->s3express_bucket_usw2_az1_endpoint = aws_string_new_from_c_str(allocator, s3express_bucket_usw2_az1_endpoint_buffer); g_test_s3express_bucket_usw2_az1_endpoint = aws_byte_cursor_from_string(tester->s3express_bucket_usw2_az1_endpoint); char s3express_bucket_use1_az4_name_buffer[128] = ""; snprintf( s3express_bucket_use1_az4_name_buffer, sizeof(s3express_bucket_use1_az4_name_buffer), "" PRInSTR "--use1-az4--x-s3.s3express-use1-az4.us-east-1.amazonaws.com", AWS_BYTE_CURSOR_PRI(g_test_bucket_name)); tester->s3express_bucket_use1_az4_endpoint = aws_string_new_from_c_str(allocator, s3express_bucket_use1_az4_name_buffer); g_test_s3express_bucket_use1_az4_endpoint = aws_byte_cursor_from_string(tester->s3express_bucket_use1_az4_endpoint); } aws_s3_library_init(allocator); if (aws_mutex_init(&tester->synced_data.lock)) { return AWS_OP_ERR; } if (aws_condition_variable_init(&tester->signal)) { goto condition_variable_failed; } ASSERT_SUCCESS(aws_array_list_init_dynamic( &tester->client_vtable_patches, tester->allocator, 4, sizeof(struct aws_s3_client_vtable_patch))); ASSERT_SUCCESS(aws_array_list_init_dynamic( &tester->meta_request_vtable_patches, tester->allocator, 4, sizeof(struct aws_s3_meta_request_vtable_patch))); /* Setup an event loop group and host resolver. */ tester->el_group = aws_event_loop_group_new_default(allocator, 0, NULL); ASSERT_TRUE(tester->el_group != NULL); struct aws_host_resolver_default_options resolver_options = { .max_entries = 10, .el_group = tester->el_group, }; tester->host_resolver = aws_host_resolver_new_default(allocator, &resolver_options); ASSERT_TRUE(tester->host_resolver != NULL); /* Setup the client boot strap. */ { struct aws_client_bootstrap_options bootstrap_options; AWS_ZERO_STRUCT(bootstrap_options); bootstrap_options.event_loop_group = tester->el_group; bootstrap_options.host_resolver = tester->host_resolver; bootstrap_options.user_data = tester; tester->client_bootstrap = aws_client_bootstrap_new(allocator, &bootstrap_options); } tester->anonymous_creds = aws_credentials_new_anonymous(allocator); tester->anonymous_signing_config.credentials = tester->anonymous_creds; #ifndef BYO_CRYPTO /* Setup the credentials provider */ { struct aws_credentials_provider_chain_default_options credentials_config; AWS_ZERO_STRUCT(credentials_config); credentials_config.bootstrap = tester->client_bootstrap; tester->credentials_provider = aws_credentials_provider_new_chain_default(allocator, &credentials_config); aws_s3_init_default_signing_config( &tester->default_signing_config, g_test_s3_region, tester->credentials_provider); } #else { tester->default_signing_config = tester->anonymous_signing_config; } #endif return AWS_OP_SUCCESS; condition_variable_failed: aws_mutex_clean_up(&tester->synced_data.lock); return AWS_OP_ERR; } int aws_s3_tester_bind_client(struct aws_s3_tester *tester, struct aws_s3_client_config *config, uint32_t flags) { AWS_PRECONDITION(tester); AWS_PRECONDITION(config); ASSERT_TRUE(!tester->bound_to_client); tester->bound_to_client = true; ASSERT_TRUE(config->client_bootstrap == NULL); config->client_bootstrap = tester->client_bootstrap; if (flags & AWS_S3_TESTER_BIND_CLIENT_SIGNING) { ASSERT_TRUE(config->signing_config == NULL); config->signing_config = &tester->default_signing_config; } if (flags & AWS_S3_TESTER_BIND_CLIENT_REGION) { ASSERT_TRUE(config->region.len == 0); config->region = g_test_s3_region; } else { if (config->signing_config) { config->signing_config->region = config->region; } } if (!config->signing_config) { config->signing_config = &tester->anonymous_signing_config; } ASSERT_TRUE(config->shutdown_callback == NULL); config->shutdown_callback = s_s3_test_client_shutdown; ASSERT_TRUE(config->shutdown_callback_user_data == NULL); config->shutdown_callback_user_data = tester; return AWS_OP_SUCCESS; } int aws_s3_tester_bind_meta_request( struct aws_s3_tester *tester, struct aws_s3_meta_request_options *options, struct aws_s3_meta_request_test_results *meta_request_test_results) { meta_request_test_results->tester = tester; aws_s3_tester_lock_synced_data(tester); ++tester->synced_data.desired_meta_request_finish_count; ++tester->synced_data.desired_meta_request_shutdown_count; aws_s3_tester_unlock_synced_data(tester); ASSERT_TRUE(options->headers_callback == NULL); options->headers_callback = s_s3_test_meta_request_header_callback; ASSERT_TRUE(options->body_callback == NULL); options->body_callback = s_s3_test_meta_request_body_callback; ASSERT_TRUE(options->finish_callback == NULL); options->finish_callback = s_s3_test_meta_request_finish; ASSERT_TRUE(options->shutdown_callback == NULL); options->shutdown_callback = s_s3_test_meta_request_shutdown; ASSERT_TRUE(options->telemetry_callback == NULL); options->telemetry_callback = s_s3_test_meta_request_telemetry; ASSERT_TRUE(options->progress_callback == NULL); options->progress_callback = s_s3_test_meta_request_progress; ASSERT_TRUE(options->upload_review_callback == NULL); options->upload_review_callback = s_s3_test_meta_request_upload_review; ASSERT_TRUE(options->user_data == NULL); options->user_data = meta_request_test_results; return AWS_OP_SUCCESS; } void aws_s3_meta_request_test_results_init( struct aws_s3_meta_request_test_results *test_meta_request, struct aws_allocator *allocator) { AWS_ZERO_STRUCT(*test_meta_request); test_meta_request->allocator = allocator; aws_atomic_init_int(&test_meta_request->received_body_size_delta, 0); aws_array_list_init_dynamic( &test_meta_request->synced_data.metrics, allocator, 4, sizeof(struct aws_s3_request_metrics *)); } void aws_s3_meta_request_test_results_clean_up(struct aws_s3_meta_request_test_results *test_meta_request) { if (test_meta_request == NULL) { return; } aws_http_headers_release(test_meta_request->error_response_headers); aws_byte_buf_clean_up(&test_meta_request->error_response_body); aws_string_destroy(test_meta_request->error_response_operation_name); aws_http_headers_release(test_meta_request->response_headers); while (aws_array_list_length(&test_meta_request->synced_data.metrics) > 0) { struct aws_s3_request_metrics *metrics = NULL; aws_array_list_back(&test_meta_request->synced_data.metrics, (void **)&metrics); aws_array_list_pop_back(&test_meta_request->synced_data.metrics); aws_s3_request_metrics_release(metrics); } aws_array_list_clean_up(&test_meta_request->synced_data.metrics); for (size_t i = 0; i < test_meta_request->upload_review.part_count; ++i) { aws_string_destroy(test_meta_request->upload_review.part_checksums_array[i]); } aws_mem_release(test_meta_request->allocator, test_meta_request->upload_review.part_sizes_array); aws_mem_release(test_meta_request->allocator, test_meta_request->upload_review.part_checksums_array); AWS_ZERO_STRUCT(*test_meta_request); } void aws_s3_tester_notify_meta_request_finished( struct aws_s3_tester *tester, const struct aws_s3_meta_request_result *result) { AWS_PRECONDITION(tester); bool notify = false; aws_s3_tester_lock_synced_data(tester); ++tester->synced_data.meta_request_finish_count; int error_code = AWS_ERROR_SUCCESS; if (result != NULL) { error_code = result->error_code; } if (tester->synced_data.desired_meta_request_finish_count == 0 || tester->synced_data.meta_request_finish_count == tester->synced_data.desired_meta_request_finish_count || (error_code != AWS_ERROR_SUCCESS)) { tester->synced_data.meta_requests_finished = true; tester->synced_data.finish_error_code = error_code; notify = true; } aws_s3_tester_unlock_synced_data(tester); if (notify) { aws_condition_variable_notify_all(&tester->signal); } } static bool s_s3_tester_have_meta_requests_finished(void *user_data) { AWS_PRECONDITION(user_data); struct aws_s3_tester *tester = (struct aws_s3_tester *)user_data; return tester->synced_data.meta_requests_finished > 0; } void aws_s3_tester_wait_for_meta_request_finish(struct aws_s3_tester *tester) { AWS_PRECONDITION(tester); aws_s3_tester_lock_synced_data(tester); aws_condition_variable_wait_pred( &tester->signal, &tester->synced_data.lock, s_s3_tester_have_meta_requests_finished, tester); tester->synced_data.meta_requests_finished = false; aws_s3_tester_unlock_synced_data(tester); } void aws_s3_tester_notify_meta_request_shutdown(struct aws_s3_tester *tester) { bool notify = false; aws_s3_tester_lock_synced_data(tester); ++tester->synced_data.meta_request_shutdown_count; if (tester->synced_data.desired_meta_request_shutdown_count == 0 || tester->synced_data.meta_request_shutdown_count == tester->synced_data.desired_meta_request_shutdown_count) { tester->synced_data.meta_requests_shutdown = true; notify = true; } aws_s3_tester_unlock_synced_data(tester); if (notify) { aws_condition_variable_notify_all(&tester->signal); } } static bool s_s3_tester_have_meta_requests_shutdown(void *user_data) { AWS_PRECONDITION(user_data); struct aws_s3_tester *tester = (struct aws_s3_tester *)user_data; return tester->synced_data.meta_requests_shutdown > 0; } void aws_s3_tester_wait_for_meta_request_shutdown(struct aws_s3_tester *tester) { AWS_PRECONDITION(tester); aws_s3_tester_lock_synced_data(tester); aws_condition_variable_wait_pred( &tester->signal, &tester->synced_data.lock, s_s3_tester_have_meta_requests_shutdown, tester); tester->synced_data.meta_requests_shutdown = false; aws_s3_tester_unlock_synced_data(tester); } static bool s_s3_tester_counters_equal_desired(void *user_data) { AWS_PRECONDITION(user_data); struct aws_s3_tester *tester = (struct aws_s3_tester *)user_data; return tester->synced_data.counter1 == tester->synced_data.desired_counter1 && tester->synced_data.counter2 == tester->synced_data.desired_counter2; } void aws_s3_tester_wait_for_signal(struct aws_s3_tester *tester) { aws_s3_tester_lock_synced_data(tester); aws_condition_variable_wait(&tester->signal, &tester->synced_data.lock); aws_s3_tester_unlock_synced_data(tester); } void aws_s3_tester_notify_signal(struct aws_s3_tester *tester) { aws_condition_variable_notify_all(&tester->signal); } void aws_s3_tester_wait_for_counters(struct aws_s3_tester *tester) { aws_s3_tester_lock_synced_data(tester); aws_condition_variable_wait_pred( &tester->signal, &tester->synced_data.lock, s_s3_tester_counters_equal_desired, tester); aws_s3_tester_unlock_synced_data(tester); } size_t aws_s3_tester_inc_counter1(struct aws_s3_tester *tester) { aws_s3_tester_lock_synced_data(tester); size_t result = ++tester->synced_data.counter1; aws_s3_tester_unlock_synced_data(tester); aws_condition_variable_notify_all(&tester->signal); return result; } size_t aws_s3_tester_inc_counter2(struct aws_s3_tester *tester) { aws_s3_tester_lock_synced_data(tester); size_t result = ++tester->synced_data.counter2; aws_s3_tester_unlock_synced_data(tester); aws_condition_variable_notify_all(&tester->signal); return result; } void aws_s3_tester_reset_counter1(struct aws_s3_tester *tester) { aws_s3_tester_lock_synced_data(tester); tester->synced_data.counter1 = 0; aws_s3_tester_unlock_synced_data(tester); } void aws_s3_tester_reset_counter2(struct aws_s3_tester *tester) { aws_s3_tester_lock_synced_data(tester); tester->synced_data.counter2 = 0; aws_s3_tester_unlock_synced_data(tester); } void aws_s3_tester_set_counter1_desired(struct aws_s3_tester *tester, size_t value) { aws_s3_tester_lock_synced_data(tester); tester->synced_data.desired_counter1 = value; aws_s3_tester_unlock_synced_data(tester); } void aws_s3_tester_set_counter2_desired(struct aws_s3_tester *tester, size_t value) { aws_s3_tester_lock_synced_data(tester); tester->synced_data.desired_counter2 = value; aws_s3_tester_unlock_synced_data(tester); } void aws_s3_tester_clean_up(struct aws_s3_tester *tester) { AWS_PRECONDITION(tester); if (tester->bound_to_client) { aws_s3_tester_wait_for_client_shutdown(tester); tester->bound_to_client = false; } aws_string_destroy(tester->bucket_name); aws_string_destroy(tester->public_bucket_name); aws_string_destroy(tester->s3express_bucket_usw2_az1_endpoint); aws_string_destroy(tester->s3express_bucket_use1_az4_endpoint); aws_credentials_release(tester->anonymous_creds); aws_array_list_clean_up(&tester->client_vtable_patches); aws_array_list_clean_up(&tester->meta_request_vtable_patches); aws_client_bootstrap_release(tester->client_bootstrap); tester->client_bootstrap = NULL; aws_credentials_provider_release(tester->credentials_provider); tester->credentials_provider = NULL; aws_host_resolver_release(tester->host_resolver); tester->host_resolver = NULL; aws_event_loop_group_release(tester->el_group); tester->el_group = NULL; aws_s3_library_clean_up(); aws_condition_variable_clean_up(&tester->signal); aws_mutex_clean_up(&tester->synced_data.lock); } void aws_s3_tester_lock_synced_data(struct aws_s3_tester *tester) { AWS_PRECONDITION(tester); aws_mutex_lock(&tester->synced_data.lock); } void aws_s3_tester_unlock_synced_data(struct aws_s3_tester *tester) { AWS_PRECONDITION(tester); aws_mutex_unlock(&tester->synced_data.lock); } struct aws_s3_meta_request *s_s3_client_meta_request_factory_empty( struct aws_s3_client *client, const struct aws_s3_meta_request_options *options) { AWS_PRECONDITION(client); AWS_PRECONDITION(options); (void)client; (void)options; return NULL; } void s_s3_client_create_connection_for_request_empty(struct aws_s3_client *client, struct aws_s3_request *request) { AWS_PRECONDITION(client); AWS_PRECONDITION(request); (void)client; (void)request; } static void s_s3_client_acquire_http_connection_empty( struct aws_http_connection_manager *conn_manager, aws_http_connection_manager_on_connection_setup_fn *on_connection_acquired_callback, void *user_data) { (void)conn_manager; (void)on_connection_acquired_callback; (void)user_data; } size_t s_s3_client_get_host_address_count_empty( struct aws_host_resolver *host_resolver, const struct aws_string *host_name, uint32_t flags) { (void)host_resolver; (void)host_name; (void)flags; return 0; } static void s_s3_client_schedule_process_work_synced_empty(struct aws_s3_client *client) { (void)client; } static void s_s3_client_process_work_empty(struct aws_s3_client *client) { AWS_PRECONDITION(client); (void)client; } static void s_s3_client_endpoint_shutdown_callback_empty(struct aws_s3_client *client) { AWS_PRECONDITION(client); (void)client; } static void s_s3_client_finish_destroy_empty(struct aws_s3_client *client) { AWS_PRECONDITION(client); (void)client; } struct aws_s3_client_vtable g_aws_s3_client_mock_vtable = { .meta_request_factory = s_s3_client_meta_request_factory_empty, .create_connection_for_request = s_s3_client_create_connection_for_request_empty, .acquire_http_connection = s_s3_client_acquire_http_connection_empty, .get_host_address_count = s_s3_client_get_host_address_count_empty, .schedule_process_work_synced = s_s3_client_schedule_process_work_synced_empty, .process_work = s_s3_client_process_work_empty, .endpoint_shutdown_callback = s_s3_client_endpoint_shutdown_callback_empty, .finish_destroy = s_s3_client_finish_destroy_empty, }; static void s_s3_mock_client_start_destroy(void *user_data) { struct aws_s3_client *client = user_data; AWS_ASSERT(client); aws_s3_buffer_pool_destroy(client->buffer_pool); aws_mem_release(client->allocator, client); } struct aws_s3_client *aws_s3_tester_mock_client_new(struct aws_s3_tester *tester) { struct aws_allocator *allocator = tester->allocator; struct aws_s3_client *mock_client = aws_mem_calloc(allocator, 1, sizeof(struct aws_s3_client)); mock_client->allocator = allocator; mock_client->buffer_pool = aws_s3_buffer_pool_new(allocator, MB_TO_BYTES(8), GB_TO_BYTES(1)); mock_client->vtable = &g_aws_s3_client_mock_vtable; aws_ref_count_init( &mock_client->ref_count, mock_client, (aws_simple_completion_callback *)s_s3_mock_client_start_destroy); aws_mutex_init(&mock_client->synced_data.lock); aws_atomic_init_int(&mock_client->stats.num_requests_in_flight, 0); for (uint32_t i = 0; i < (uint32_t)AWS_S3_META_REQUEST_TYPE_MAX; ++i) { aws_atomic_init_int(&mock_client->stats.num_requests_network_io[i], 0); } aws_atomic_init_int(&mock_client->stats.num_requests_stream_queued_waiting, 0); aws_atomic_init_int(&mock_client->stats.num_requests_streaming_response, 0); return mock_client; } struct aws_http_message *aws_s3_tester_dummy_http_request_new(struct aws_s3_tester *tester) { AWS_PRECONDITION(tester); struct aws_http_message *message = aws_http_message_new_request(tester->allocator); aws_http_message_set_request_method(message, aws_http_method_get); aws_http_message_set_request_path(message, aws_byte_cursor_from_c_str("/dummy_test")); struct aws_http_header host_header = { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Host"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("dummy_host"), }; aws_http_message_add_header(message, host_header); return message; } static bool s_s3_meta_request_update_empty( struct aws_s3_meta_request *meta_request, uint32_t flags, struct aws_s3_request **out_request) { (void)meta_request; (void)flags; (void)out_request; return false; } void s_s3_meta_request_send_request_finish_empty( struct aws_s3_connection *connection, struct aws_http_stream *stream, int error_code) { (void)connection; (void)stream; (void)error_code; } static void s_s3_meta_request_finished_request_empty( struct aws_s3_meta_request *meta_request, struct aws_s3_request *request, int error_code) { (void)meta_request; (void)request; (void)error_code; } static void s_s3_meta_request_schedule_prepare_request_empty( struct aws_s3_meta_request *meta_request, struct aws_s3_request *request, aws_s3_meta_request_prepare_request_callback_fn *callback, void *user_data) { (void)meta_request; (void)request; (void)callback; (void)user_data; } static struct aws_future_void *s_s3_meta_request_prepare_request_async_empty(struct aws_s3_request *request) { struct aws_future_void *future = aws_future_void_new(request->allocator); aws_future_void_set_error(future, AWS_ERROR_UNKNOWN); return future; } static void s_s3_meta_request_init_signing_date_time_empty( struct aws_s3_meta_request *meta_request, struct aws_date_time *date_time) { (void)meta_request; (void)date_time; } static void s_s3_meta_request_sign_request_empty( struct aws_s3_meta_request *meta_request, struct aws_s3_request *request, aws_signing_complete_fn *on_signing_complete, void *user_data) { (void)meta_request; (void)request; (void)on_signing_complete; (void)user_data; } static void s_s3_mock_meta_request_destroy(struct aws_s3_meta_request *meta_request) { AWS_PRECONDITION(meta_request); aws_mem_release(meta_request->allocator, meta_request->impl); } static struct aws_s3_meta_request_vtable s_s3_mock_meta_request_vtable = { .update = s_s3_meta_request_update_empty, .send_request_finish = s_s3_meta_request_send_request_finish_empty, .schedule_prepare_request = s_s3_meta_request_schedule_prepare_request_empty, .prepare_request = s_s3_meta_request_prepare_request_async_empty, .finished_request = s_s3_meta_request_finished_request_empty, .init_signing_date_time = s_s3_meta_request_init_signing_date_time_empty, .sign_request = s_s3_meta_request_sign_request_empty, .destroy = s_s3_mock_meta_request_destroy, }; struct aws_s3_empty_meta_request { struct aws_s3_meta_request base; }; static void s_s3_mock_endpoint_acquire(struct aws_s3_endpoint *endpoint, bool already_holding_lock) { (void)already_holding_lock; ++endpoint->client_synced_data.ref_count; } static void s_s3_mock_endpoint_release(struct aws_s3_endpoint *endpoint) { if (--endpoint->client_synced_data.ref_count == 0) { aws_string_destroy(endpoint->host_name); aws_mem_release(endpoint->allocator, endpoint); } } static struct aws_s3_endpoint_system_vtable s_s3_mock_endpoint_vtable = { .acquire = s_s3_mock_endpoint_acquire, .release = s_s3_mock_endpoint_release, }; struct aws_s3_endpoint *aws_s3_tester_mock_endpoint_new(struct aws_s3_tester *tester) { aws_s3_endpoint_set_system_vtable(&s_s3_mock_endpoint_vtable); struct aws_s3_endpoint *endpoint = aws_mem_calloc(tester->allocator, 1, sizeof(struct aws_s3_endpoint)); endpoint->allocator = tester->allocator; endpoint->client_synced_data.ref_count = 1; struct aws_byte_cursor empty_cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(""); endpoint->host_name = aws_string_new_from_cursor(tester->allocator, &empty_cursor); return endpoint; } /* Mock request defaults to GET request */ struct aws_s3_meta_request *aws_s3_tester_mock_meta_request_new(struct aws_s3_tester *tester) { AWS_PRECONDITION(tester); struct aws_s3_empty_meta_request *empty_meta_request = aws_mem_calloc(tester->allocator, 1, sizeof(struct aws_s3_empty_meta_request)); struct aws_http_message *dummy_http_message = aws_s3_tester_dummy_http_request_new(tester); struct aws_s3_meta_request_options options = { .message = dummy_http_message, .type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT, }; aws_s3_meta_request_init_base( tester->allocator, NULL, 0, false, &options, empty_meta_request, &s_s3_mock_meta_request_vtable, &empty_meta_request->base); aws_http_message_release(dummy_http_message); return &empty_meta_request->base; } void aws_s3_create_test_buffer(struct aws_allocator *allocator, size_t buffer_size, struct aws_byte_buf *out_buf) { AWS_PRECONDITION(allocator); AWS_PRECONDITION(out_buf); struct aws_byte_cursor test_string = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("This is an S3 test."); aws_byte_buf_init(out_buf, allocator, buffer_size); for (size_t buffer_pos = 0; buffer_pos < buffer_size; buffer_pos += test_string.len) { size_t buffer_size_remaining = buffer_size - buffer_pos; size_t string_copy_size = test_string.len; if (buffer_size_remaining < string_copy_size) { string_copy_size = buffer_size_remaining; } struct aws_byte_cursor from_byte_cursor = {.len = string_copy_size, .ptr = test_string.ptr}; aws_byte_buf_append(out_buf, &from_byte_cursor); } } static void s_s3_test_client_shutdown(void *user_data) { AWS_PRECONDITION(user_data); struct aws_s3_tester *tester = (struct aws_s3_tester *)user_data; aws_s3_tester_lock_synced_data(tester); tester->synced_data.client_shutdown = true; aws_s3_tester_unlock_synced_data(tester); aws_condition_variable_notify_all(&tester->signal); } static bool s_s3_tester_has_client_shutdown(void *user_data) { AWS_PRECONDITION(user_data); struct aws_s3_tester *tester = (struct aws_s3_tester *)user_data; return tester->synced_data.client_shutdown > 0; } void aws_s3_tester_wait_for_client_shutdown(struct aws_s3_tester *tester) { AWS_PRECONDITION(tester); aws_s3_tester_lock_synced_data(tester); aws_condition_variable_wait_pred( &tester->signal, &tester->synced_data.lock, s_s3_tester_has_client_shutdown, tester); tester->synced_data.client_shutdown = false; aws_s3_tester_unlock_synced_data(tester); } struct aws_http_message *aws_s3_test_get_object_request_new( struct aws_allocator *allocator, struct aws_byte_cursor host, struct aws_byte_cursor key) { struct aws_http_message *message = aws_http_message_new_request(allocator); if (message == NULL) { return NULL; } struct aws_http_header host_header = {.name = g_host_header_name, .value = host}; if (aws_http_message_add_header(message, host_header)) { goto error_clean_up_message; } if (aws_http_message_set_request_method(message, aws_http_method_get)) { goto error_clean_up_message; } if (aws_http_message_set_request_path(message, key)) { goto error_clean_up_message; } return message; error_clean_up_message: if (message != NULL) { aws_http_message_release(message); message = NULL; } return NULL; } struct aws_s3_client_vtable *aws_s3_tester_patch_client_vtable( struct aws_s3_tester *tester, struct aws_s3_client *client, size_t *out_index) { struct aws_s3_client_vtable_patch patch; AWS_ZERO_STRUCT(patch); /* Push a new vtable patch into the array. */ aws_array_list_push_back(&tester->client_vtable_patches, (void *)&patch); /* Get a pointer to the new vtable patch. */ size_t index = aws_array_list_length(&tester->client_vtable_patches) - 1; struct aws_s3_client_vtable_patch *patch_array_ptr = aws_s3_tester_get_client_vtable_patch(tester, index); /* Cache a pointer to the original vtable. */ patch_array_ptr->original_vtable = client->vtable; /* Copy the original vtable contents into the patched vtable. */ memcpy(&patch_array_ptr->patched_vtable, patch_array_ptr->original_vtable, sizeof(struct aws_s3_client_vtable)); /* Point the client at the new vtable. */ client->vtable = &patch_array_ptr->patched_vtable; if (out_index) { *out_index = index; } return &patch_array_ptr->patched_vtable; } struct aws_s3_client_vtable_patch *aws_s3_tester_get_client_vtable_patch(struct aws_s3_tester *tester, size_t index) { struct aws_s3_client_vtable_patch *patch = NULL; aws_array_list_get_at_ptr(&tester->client_vtable_patches, (void **)&patch, index); return patch; } struct aws_s3_meta_request_vtable *aws_s3_tester_patch_meta_request_vtable( struct aws_s3_tester *tester, struct aws_s3_meta_request *meta_request, size_t *out_index) { struct aws_s3_meta_request_vtable_patch patch; AWS_ZERO_STRUCT(patch); /* Push a new vtable patch into the array. */ aws_array_list_push_back(&tester->meta_request_vtable_patches, (void *)&patch); /* Get a pointer to the new vtable patch. */ size_t index = aws_array_list_length(&tester->meta_request_vtable_patches) - 1; struct aws_s3_meta_request_vtable_patch *patch_array_ptr = aws_s3_tester_get_meta_request_vtable_patch(tester, index); /* Cache a pointer to the original vtable. */ patch_array_ptr->original_vtable = meta_request->vtable; /* Copy the original vtable contents into the patched vtable. */ memcpy( &patch_array_ptr->patched_vtable, patch_array_ptr->original_vtable, sizeof(struct aws_s3_meta_request_vtable)); /* Point the meta request at the new vtable. */ meta_request->vtable = &patch_array_ptr->patched_vtable; if (out_index) { *out_index = index; } return &patch_array_ptr->patched_vtable; } struct aws_s3_meta_request_vtable_patch *aws_s3_tester_get_meta_request_vtable_patch( struct aws_s3_tester *tester, size_t index) { struct aws_s3_meta_request_vtable_patch *patch = NULL; aws_array_list_get_at_ptr(&tester->meta_request_vtable_patches, (void **)&patch, index); return patch; } struct aws_http_message *aws_s3_test_put_object_request_new_without_body( struct aws_allocator *allocator, struct aws_byte_cursor *host, struct aws_byte_cursor content_type, struct aws_byte_cursor key, uint64_t content_length, uint32_t flags) { AWS_PRECONDITION(allocator); struct aws_http_message *message = aws_http_message_new_request(allocator); if (message == NULL) { return NULL; } if (host) { struct aws_http_header host_header = {.name = g_host_header_name, .value = *host}; if (aws_http_message_add_header(message, host_header)) { goto error_clean_up_message; } } struct aws_http_header content_type_header = {.name = g_content_type_header_name, .value = content_type}; char content_length_buffer[64] = ""; snprintf(content_length_buffer, sizeof(content_length_buffer), "%" PRIu64 "", content_length); struct aws_http_header content_length_header = { .name = g_content_length_header_name, .value = aws_byte_cursor_from_c_str(content_length_buffer), }; struct aws_http_header sse_kms_header = {.name = g_s3_sse_header, .value = aws_byte_cursor_from_c_str("aws:kms")}; struct aws_http_header sse_aes256_header = {.name = g_s3_sse_header, .value = aws_byte_cursor_from_c_str("AES256")}; struct aws_http_header acl_public_read_header = { .name = g_acl_header_name, .value = aws_byte_cursor_from_c_str("bucket-owner-read"), }; if (aws_http_message_add_header(message, content_type_header)) { goto error_clean_up_message; } if (aws_http_message_add_header(message, content_length_header)) { goto error_clean_up_message; } if (flags & AWS_S3_TESTER_SEND_META_REQUEST_SSE_KMS) { if (aws_http_message_add_header(message, sse_kms_header)) { goto error_clean_up_message; } } if (flags & AWS_S3_TESTER_SEND_META_REQUEST_SSE_AES256) { if (aws_http_message_add_header(message, sse_aes256_header)) { goto error_clean_up_message; } } if (flags & AWS_S3_TESTER_SEND_META_REQUEST_SSE_C_AES256) { struct aws_http_header sse_c_alg_header = { .name = g_s3_sse_c_alg_header, .value = aws_byte_cursor_from_c_str("AES256"), }; struct aws_http_header sse_c_key_header = { .name = g_s3_sse_c_key_header, .value = aws_byte_cursor_from_c_str("MDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDE="), }; struct aws_http_header sse_c_key_md5_header = { .name = g_s3_sse_c_key_md5_header, .value = aws_byte_cursor_from_c_str("5inxltxhJQnqmmHfVSQjoA=="), }; if (aws_http_message_add_header(message, sse_c_alg_header) || aws_http_message_add_header(message, sse_c_key_header) || aws_http_message_add_header(message, sse_c_key_md5_header)) { goto error_clean_up_message; } } if (flags & AWS_S3_TESTER_SEND_META_REQUEST_PUT_ACL) { if (aws_http_message_add_header(message, acl_public_read_header)) { goto error_clean_up_message; } } if (aws_http_message_set_request_method(message, aws_http_method_put)) { goto error_clean_up_message; } if (aws_http_message_set_request_path(message, key)) { goto error_clean_up_message; } return message; error_clean_up_message: if (message != NULL) { aws_http_message_release(message); message = NULL; } return NULL; } struct aws_http_message *aws_s3_test_put_object_request_new( struct aws_allocator *allocator, struct aws_byte_cursor *host, struct aws_byte_cursor key, struct aws_byte_cursor content_type, struct aws_input_stream *body_stream, uint32_t flags) { AWS_PRECONDITION(allocator); AWS_PRECONDITION(body_stream); int64_t body_stream_length = 0; if (aws_input_stream_get_length(body_stream, &body_stream_length)) { return NULL; } struct aws_http_message *message = aws_s3_test_put_object_request_new_without_body( allocator, host, content_type, key, (uint64_t)body_stream_length, flags); if (!message) { return NULL; } aws_http_message_set_body_stream(message, body_stream); return message; } int aws_s3_tester_client_new( struct aws_s3_tester *tester, struct aws_s3_tester_client_options *options, struct aws_s3_client **out_client) { ASSERT_TRUE(tester != NULL); ASSERT_TRUE(options != NULL); ASSERT_TRUE(out_client != NULL); struct aws_s3_client_config client_config = { .part_size = options->part_size, .max_part_size = options->max_part_size, }; struct aws_http_proxy_options proxy_options = { .connection_type = AWS_HPCT_HTTP_FORWARD, .host = aws_byte_cursor_from_c_str("localhost"), .port = 8899, }; if (options->use_proxy) { client_config.proxy_options = &proxy_options; } struct aws_tls_connection_options tls_connection_options; AWS_ZERO_STRUCT(tls_connection_options); #ifndef BYO_CRYPTO struct aws_tls_ctx_options tls_context_options; aws_tls_ctx_options_init_default_client(&tls_context_options, tester->allocator); struct aws_tls_ctx *context = aws_tls_client_ctx_new(tester->allocator, &tls_context_options); aws_tls_connection_options_init_from_ctx(&tls_connection_options, context); #endif struct aws_string *endpoint = aws_s3_tester_build_endpoint_string(tester->allocator, &g_test_bucket_name, &g_test_s3_region); struct aws_byte_cursor endpoint_cursor = aws_byte_cursor_from_string(endpoint); tls_connection_options.server_name = aws_string_new_from_cursor(tester->allocator, &endpoint_cursor); switch (options->tls_usage) { case AWS_S3_TLS_ENABLED: client_config.tls_mode = AWS_MR_TLS_ENABLED; client_config.tls_connection_options = &tls_connection_options; break; case AWS_S3_TLS_DISABLED: client_config.tls_mode = AWS_MR_TLS_DISABLED; break; default: break; } ASSERT_SUCCESS(aws_s3_tester_bind_client( tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_REGION | AWS_S3_TESTER_BIND_CLIENT_SIGNING)); *out_client = aws_s3_client_new(tester->allocator, &client_config); aws_string_destroy(endpoint); #ifndef BYO_CRYPTO aws_tls_ctx_release(context); aws_tls_ctx_options_clean_up(&tls_context_options); #endif aws_tls_connection_options_clean_up(&tls_connection_options); return AWS_OP_SUCCESS; } /* Disable tsan as we hack into the client threaded data */ AWS_SUPPRESS_TSAN static int s_tester_check_client_thread_data(struct aws_s3_client *client) { ASSERT_UINT_EQUALS(0, client->threaded_data.num_requests_being_prepared); ASSERT_UINT_EQUALS(0, client->threaded_data.request_queue_size); return AWS_OP_SUCCESS; } int aws_s3_tester_send_meta_request_with_options( struct aws_s3_tester *tester, struct aws_s3_tester_meta_request_options *options, struct aws_s3_meta_request_test_results *out_results) { ASSERT_TRUE(options != NULL); struct aws_allocator *allocator = options->allocator; struct aws_string *filepath_str = NULL; struct aws_s3_tester local_tester; AWS_ZERO_STRUCT(local_tester); bool clean_up_local_tester = false; if (tester == NULL) { ASSERT_TRUE(options->allocator); ASSERT_SUCCESS(aws_s3_tester_init(options->allocator, &local_tester)); tester = &local_tester; clean_up_local_tester = true; } else if (allocator == NULL) { allocator = tester->allocator; } struct aws_s3_client *client = options->client; struct aws_uri mock_server; ASSERT_SUCCESS(aws_uri_init_parse(&mock_server, allocator, &g_mock_server_uri)); if (client == NULL) { if (options->client_options != NULL) { ASSERT_SUCCESS(aws_s3_tester_client_new(tester, options->client_options, &client)); } else { struct aws_s3_tester_client_options client_options; AWS_ZERO_STRUCT(client_options); ASSERT_SUCCESS(aws_s3_tester_client_new(tester, &client_options, &client)); } } else { aws_s3_client_acquire(client); } struct aws_s3_checksum_config checksum_config = { .checksum_algorithm = options->checksum_algorithm, .validate_response_checksum = options->validate_get_response_checksum, .location = options->checksum_algorithm == AWS_SCA_NONE ? AWS_SCL_NONE : AWS_SCL_TRAILER, .validate_checksum_algorithms = options->validate_checksum_algorithms, }; struct aws_s3_meta_request_options meta_request_options = { .type = options->meta_request_type, .operation_name = options->default_type_options.operation_name, .message = options->message, .checksum_config = &checksum_config, .resume_token = options->put_options.resume_token, .object_size_hint = options->object_size_hint, }; if (options->mock_server) { meta_request_options.endpoint = &mock_server; } if (options->signing_config) { meta_request_options.signing_config = options->signing_config; } struct aws_signing_config_aws signing_config = { .algorithm = AWS_SIGNING_ALGORITHM_V4_S3EXPRESS, .service = g_s3express_service_name, }; meta_request_options.signing_config = options->use_s3express_signing ? &signing_config : meta_request_options.signing_config; struct aws_byte_buf input_stream_buffer; AWS_ZERO_STRUCT(input_stream_buffer); struct aws_input_stream *input_stream = NULL; struct aws_async_input_stream *async_stream = NULL; size_t upload_size_bytes = 0; if (meta_request_options.message == NULL) { const struct aws_byte_cursor *bucket_name = options->bucket_name; if (bucket_name == NULL) { bucket_name = &g_test_bucket_name; } struct aws_string *host_name = NULL; if (options->mock_server) { const struct aws_byte_cursor *host_cursor = aws_uri_authority(&mock_server); host_name = aws_string_new_from_cursor(allocator, host_cursor); } else if (options->mrap_test) { host_name = aws_string_new_from_cursor(allocator, &g_test_mrap_endpoint); } else { host_name = aws_s3_tester_build_endpoint_string(allocator, bucket_name, &g_test_s3_region); } if (meta_request_options.type == AWS_S3_META_REQUEST_TYPE_GET_OBJECT || (meta_request_options.type == AWS_S3_META_REQUEST_TYPE_DEFAULT && options->default_type_options.mode == AWS_S3_TESTER_DEFAULT_TYPE_MODE_GET)) { struct aws_http_message *message = aws_s3_test_get_object_request_new( allocator, aws_byte_cursor_from_string(host_name), options->get_options.object_path); ASSERT_SUCCESS(aws_s3_message_util_set_multipart_request_path( allocator, NULL /*upload_id*/, options->get_options.part_number, false /*append_uploads_suffix*/, message)); if (options->get_options.object_range.ptr != NULL) { struct aws_http_header range_header = { .name = g_range_header_name, .value = options->get_options.object_range, }; aws_http_message_add_header(message, range_header); } meta_request_options.message = message; } else if ( meta_request_options.type == AWS_S3_META_REQUEST_TYPE_PUT_OBJECT || (meta_request_options.type == AWS_S3_META_REQUEST_TYPE_DEFAULT && options->default_type_options.mode == AWS_S3_TESTER_DEFAULT_TYPE_MODE_PUT)) { uint32_t object_size_mb = options->put_options.object_size_mb; upload_size_bytes = (size_t)object_size_mb * 1024ULL * 1024ULL; /* This doesn't do what we think it should because * g_min_upload_part_size overrides client->part_size */ if (options->put_options.ensure_multipart) { if (upload_size_bytes == 0) { upload_size_bytes = client->part_size * 2; object_size_mb = (uint32_t)(upload_size_bytes / 1024 / 1024); } ASSERT_TRUE(upload_size_bytes > client->part_size); } struct aws_byte_buf object_path_buffer; aws_byte_buf_init(&object_path_buffer, allocator, 128); if (options->put_options.object_path_override.ptr != NULL) { aws_byte_buf_append_dynamic(&object_path_buffer, &options->put_options.object_path_override); } else { char object_path_sprintf_buffer[128] = ""; switch (options->sse_type) { case AWS_S3_TESTER_SSE_NONE: snprintf( object_path_sprintf_buffer, sizeof(object_path_sprintf_buffer), "" PRInSTR "-%uMB.txt", AWS_BYTE_CURSOR_PRI(g_put_object_prefix), object_size_mb); break; case AWS_S3_TESTER_SSE_KMS: snprintf( object_path_sprintf_buffer, sizeof(object_path_sprintf_buffer), "" PRInSTR "-kms-%uMB.txt", AWS_BYTE_CURSOR_PRI(g_put_object_prefix), object_size_mb); break; case AWS_S3_TESTER_SSE_AES256: snprintf( object_path_sprintf_buffer, sizeof(object_path_sprintf_buffer), "" PRInSTR "-aes256-%uMB.txt", AWS_BYTE_CURSOR_PRI(g_put_object_prefix), object_size_mb); break; case AWS_S3_TESTER_SSE_C_AES256: snprintf( object_path_sprintf_buffer, sizeof(object_path_sprintf_buffer), "" PRInSTR "-aes256-c-%uMB.txt", AWS_BYTE_CURSOR_PRI(g_put_object_prefix), object_size_mb); break; default: break; } struct aws_byte_cursor sprintf_buffer_cursor = aws_byte_cursor_from_c_str(object_path_sprintf_buffer); aws_byte_buf_append_dynamic(&object_path_buffer, &sprintf_buffer_cursor); } struct aws_byte_cursor test_object_path = aws_byte_cursor_from_buf(&object_path_buffer); struct aws_byte_cursor host_cur = aws_byte_cursor_from_string(host_name); /* Create "tester" stream with appropriate options */ struct aws_async_input_stream_tester_options stream_options = { .base = { .autogen_length = upload_size_bytes, .eof_requires_extra_read = options->put_options.eof_requires_extra_read, .max_bytes_per_read = options->put_options.max_bytes_per_read, }, }; if (options->put_options.invalid_input_stream) { stream_options.base.fail_on_nth_read = 1; stream_options.base.fail_with_error_code = AWS_IO_STREAM_READ_FAILED; } if (options->put_options.async_input_stream) { stream_options.completion_strategy = options->put_options.async_read_strategy; stream_options.read_duration_ns = MS_TO_NS(100); /* have async reads take a bit of time. */ async_stream = aws_async_input_stream_new_tester(allocator, &stream_options); ASSERT_NOT_NULL(async_stream); meta_request_options.send_async_stream = async_stream; } else { input_stream = aws_input_stream_new_tester(allocator, &stream_options.base); ASSERT_NOT_NULL(input_stream); } /* if uploading via filepath, write input_stream out as tmp file on disk, and then upload that */ if (options->put_options.file_on_disk) { ASSERT_NOT_NULL(input_stream); filepath_str = aws_s3_tester_create_file(allocator, test_object_path, input_stream); meta_request_options.send_filepath = aws_byte_cursor_from_string(filepath_str); input_stream = aws_input_stream_release(input_stream); } /* Put together a simple S3 Put Object request. */ struct aws_http_message *message; if (input_stream != NULL) { message = aws_s3_test_put_object_request_new( allocator, &host_cur, test_object_path, g_test_body_content_type, input_stream, options->sse_type); } else { message = aws_s3_test_put_object_request_new_without_body( allocator, &host_cur, g_test_body_content_type, test_object_path, upload_size_bytes, options->sse_type); } if (options->put_options.content_length) { /* make a invalid request */ char content_length_buffer[64] = ""; snprintf( content_length_buffer, sizeof(content_length_buffer), "%zu", options->put_options.content_length); struct aws_http_headers *headers = aws_http_message_get_headers(message); aws_http_headers_set( headers, g_content_length_header_name, aws_byte_cursor_from_c_str(content_length_buffer)); } if (options->put_options.skip_content_length) { struct aws_http_headers *headers = aws_http_message_get_headers(message); aws_http_headers_erase(headers, g_content_length_header_name); } if (options->put_options.invalid_request) { /* make a invalid request */ aws_http_message_set_request_path(message, aws_byte_cursor_from_c_str("invalid_path")); } if (options->put_options.content_encoding.ptr != NULL) { struct aws_http_header content_encoding_header = { .name = g_content_encoding_header_name, .value = options->put_options.content_encoding, }; aws_http_message_add_header(message, content_encoding_header); } meta_request_options.message = message; aws_byte_buf_clean_up(&object_path_buffer); } ASSERT_TRUE(meta_request_options.message != NULL); aws_string_destroy(host_name); } else { aws_http_message_acquire(meta_request_options.message); if (options->meta_request_type == AWS_S3_META_REQUEST_TYPE_PUT_OBJECT) { /* Figure out how much is being uploaded from pre-existing message */ struct aws_input_stream *mystery_stream = aws_http_message_get_body_stream(meta_request_options.message); if (mystery_stream != NULL) { ASSERT_SUCCESS(aws_input_stream_get_length(mystery_stream, (int64_t *)&upload_size_bytes)); } } } struct aws_s3_meta_request_test_results meta_request_test_results; aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator); if (out_results == NULL) { out_results = &meta_request_test_results; } out_results->headers_callback = options->headers_callback; out_results->body_callback = options->body_callback; out_results->finish_callback = options->finish_callback; out_results->progress_callback = options->progress_callback; out_results->upload_review_callback = options->upload_review_callback; out_results->algorithm = options->expected_validate_checksum_alg; ASSERT_SUCCESS(aws_s3_tester_bind_meta_request(tester, &meta_request_options, out_results)); struct aws_s3_meta_request *meta_request = aws_s3_client_make_meta_request(client, &meta_request_options); if (meta_request == NULL) { out_results->finished_error_code = aws_last_error(); } aws_http_message_release(meta_request_options.message); meta_request_options.message = NULL; if (meta_request != NULL) { /* Wait for the request to finish. */ aws_s3_tester_wait_for_meta_request_finish(tester); ASSERT_TRUE(aws_s3_meta_request_is_finished(meta_request)); } switch (options->validate_type) { case AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_SUCCESS: ASSERT_INT_EQUALS(AWS_ERROR_SUCCESS, out_results->finished_error_code); if (meta_request_options.type == AWS_S3_META_REQUEST_TYPE_GET_OBJECT) { ASSERT_SUCCESS(aws_s3_tester_validate_get_object_results(out_results, options->sse_type)); } else if (meta_request_options.type == AWS_S3_META_REQUEST_TYPE_PUT_OBJECT) { ASSERT_SUCCESS(aws_s3_tester_validate_put_object_results(out_results, options->sse_type)); /* Expected number of bytes should have been read from stream, and reported via progress callbacks */ if (input_stream != NULL) { ASSERT_UINT_EQUALS(upload_size_bytes, aws_input_stream_tester_total_bytes_read(input_stream)); } else if (async_stream != NULL) { ASSERT_UINT_EQUALS(upload_size_bytes, aws_async_input_stream_tester_total_bytes_read(async_stream)); } ASSERT_UINT_EQUALS(upload_size_bytes, out_results->progress.total_bytes_transferred); if (!options->put_options.skip_content_length) { ASSERT_UINT_EQUALS(upload_size_bytes, out_results->progress.content_length); } } ASSERT_UINT_EQUALS(0, aws_atomic_load_int(&client->stats.num_requests_in_flight)); ASSERT_UINT_EQUALS(0, aws_atomic_load_int(&client->stats.num_requests_stream_queued_waiting)); ASSERT_UINT_EQUALS(0, aws_atomic_load_int(&client->stats.num_requests_streaming_response)); ASSERT_SUCCESS(s_tester_check_client_thread_data(client)); break; case AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_FAILURE: ASSERT_FALSE(out_results->finished_error_code == AWS_ERROR_SUCCESS); break; case AWS_S3_TESTER_VALIDATE_TYPE_NO_VALIDATE: break; default: ASSERT_TRUE(false); break; } if (meta_request != NULL) { out_results->part_size = meta_request->part_size; meta_request = aws_s3_meta_request_release(meta_request); if (!options->dont_wait_for_shutdown) { aws_s3_tester_wait_for_meta_request_shutdown(tester); } } aws_s3_meta_request_test_results_clean_up(&meta_request_test_results); aws_s3_client_release(client); aws_input_stream_release(input_stream); input_stream = NULL; async_stream = aws_async_input_stream_release(async_stream); aws_byte_buf_clean_up(&input_stream_buffer); if (clean_up_local_tester) { aws_s3_tester_clean_up(&local_tester); } aws_uri_clean_up(&mock_server); if (filepath_str) { aws_file_delete(filepath_str); aws_string_destroy(filepath_str); } return AWS_OP_SUCCESS; } int aws_s3_tester_send_meta_request( struct aws_s3_tester *tester, struct aws_s3_client *client, struct aws_s3_meta_request_options *options, struct aws_s3_meta_request_test_results *test_results, uint32_t flags) { ASSERT_SUCCESS(aws_s3_tester_bind_meta_request(tester, options, test_results)); struct aws_s3_meta_request *meta_request = aws_s3_client_make_meta_request(client, options); ASSERT_TRUE(meta_request != NULL); if (flags & AWS_S3_TESTER_SEND_META_REQUEST_CANCEL) { /* take a random sleep from 0-1 ms. */ srand((uint32_t)time(NULL)); aws_thread_current_sleep(rand() % MS_TO_NS(1)); aws_s3_meta_request_cancel(meta_request); } /* Wait for the request to finish. */ aws_s3_tester_wait_for_meta_request_finish(tester); ASSERT_TRUE(aws_s3_meta_request_is_finished(meta_request)); aws_s3_tester_lock_synced_data(tester); if (flags & AWS_S3_TESTER_SEND_META_REQUEST_EXPECT_SUCCESS) { ASSERT_TRUE(tester->synced_data.finish_error_code == AWS_ERROR_SUCCESS); } else if (flags & AWS_S3_TESTER_SEND_META_REQUEST_CANCEL) { ASSERT_TRUE(tester->synced_data.finish_error_code == AWS_ERROR_S3_CANCELED); } else { ASSERT_FALSE(tester->synced_data.finish_error_code == AWS_ERROR_SUCCESS); } aws_s3_tester_unlock_synced_data(tester); test_results->part_size = meta_request->part_size; aws_s3_meta_request_release(meta_request); if ((flags & AWS_S3_TESTER_SEND_META_REQUEST_DONT_WAIT_FOR_SHUTDOWN) == 0) { aws_s3_tester_wait_for_meta_request_shutdown(tester); } return AWS_OP_SUCCESS; } int aws_s3_tester_send_get_object_meta_request( struct aws_s3_tester *tester, struct aws_s3_client *client, struct aws_byte_cursor s3_path, uint32_t flags, struct aws_s3_meta_request_test_results *out_results) { struct aws_string *host_name = aws_s3_tester_build_endpoint_string(tester->allocator, &g_test_bucket_name, &g_test_s3_region); /* Put together a simple S3 Get Object request. */ struct aws_http_message *message = aws_s3_test_get_object_request_new(tester->allocator, aws_byte_cursor_from_string(host_name), s3_path); struct aws_s3_meta_request_options options; AWS_ZERO_STRUCT(options); options.type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT; options.message = message; /* Trigger accelerating of our Get Object request. */ struct aws_s3_meta_request_test_results meta_request_test_results; aws_s3_meta_request_test_results_init(&meta_request_test_results, tester->allocator); if (out_results == NULL) { out_results = &meta_request_test_results; } ASSERT_SUCCESS(aws_s3_tester_send_meta_request(tester, client, &options, out_results, flags)); if (flags & AWS_S3_TESTER_SEND_META_REQUEST_EXPECT_SUCCESS) { ASSERT_SUCCESS(aws_s3_tester_validate_get_object_results(out_results, flags)); } aws_s3_meta_request_test_results_clean_up(&meta_request_test_results); aws_http_message_release(message); aws_string_destroy(host_name); return AWS_OP_SUCCESS; } int aws_s3_tester_validate_get_object_results( struct aws_s3_meta_request_test_results *meta_request_test_results, uint32_t flags) { AWS_PRECONDITION(meta_request_test_results); AWS_PRECONDITION(meta_request_test_results->tester); ASSERT_TRUE(meta_request_test_results->response_headers != NULL); if (aws_http_headers_has( meta_request_test_results->response_headers, aws_byte_cursor_from_c_str("Content-Range"))) { ASSERT_TRUE(meta_request_test_results->finished_response_status == 206); } else { ASSERT_TRUE(meta_request_test_results->finished_response_status == 200); } ASSERT_TRUE( meta_request_test_results->finished_response_status == meta_request_test_results->headers_response_status); ASSERT_TRUE(meta_request_test_results->finished_error_code == AWS_ERROR_SUCCESS); ASSERT_TRUE(meta_request_test_results->error_response_headers == NULL); ASSERT_TRUE(meta_request_test_results->error_response_body.len == 0); ASSERT_NULL(meta_request_test_results->error_response_operation_name); struct aws_byte_cursor sse_byte_cursor; if (flags & AWS_S3_TESTER_SEND_META_REQUEST_SSE_KMS) { ASSERT_SUCCESS( aws_http_headers_get(meta_request_test_results->response_headers, g_s3_sse_header, &sse_byte_cursor)); ASSERT_TRUE(aws_byte_cursor_eq_c_str(&sse_byte_cursor, "aws:kms")); } if (flags & AWS_S3_TESTER_SEND_META_REQUEST_SSE_AES256) { ASSERT_SUCCESS( aws_http_headers_get(meta_request_test_results->response_headers, g_s3_sse_header, &sse_byte_cursor)); ASSERT_TRUE(aws_byte_cursor_eq_c_str(&sse_byte_cursor, "AES256")); } if (flags & AWS_S3_TESTER_SEND_META_REQUEST_SSE_C_AES256) { ASSERT_SUCCESS( aws_http_headers_get(meta_request_test_results->response_headers, g_s3_sse_c_alg_header, &sse_byte_cursor)); ASSERT_TRUE(aws_byte_cursor_eq_c_str(&sse_byte_cursor, "AES256")); } uint64_t content_length = 0; ASSERT_SUCCESS(aws_s3_tester_get_content_length(meta_request_test_results->response_headers, &content_length)); AWS_LOGF_DEBUG( AWS_LS_S3_GENERAL, "Content length in header is %" PRIu64 " and received body size is %" PRIu64, content_length, meta_request_test_results->received_body_size); ASSERT_TRUE(content_length == meta_request_test_results->received_body_size); ASSERT_UINT_EQUALS(content_length, meta_request_test_results->progress.total_bytes_transferred); ASSERT_UINT_EQUALS(content_length, meta_request_test_results->progress.content_length); return AWS_OP_SUCCESS; } /* Avoid using this function as it will soon go away. Use aws_s3_tester_send_meta_request_with_options instead.*/ int aws_s3_tester_send_put_object_meta_request( struct aws_s3_tester *tester, struct aws_s3_client *client, uint32_t file_size_mb, uint32_t flags, struct aws_s3_meta_request_test_results *out_results) { ASSERT_TRUE(tester != NULL); ASSERT_TRUE(client != NULL); struct aws_allocator *allocator = tester->allocator; struct aws_byte_buf test_buffer; aws_s3_create_test_buffer(allocator, (size_t)file_size_mb * 1024ULL * 1024ULL, &test_buffer); struct aws_byte_cursor test_body_cursor = aws_byte_cursor_from_buf(&test_buffer); struct aws_input_stream *input_stream = aws_input_stream_new_from_cursor(allocator, &test_body_cursor); struct aws_string *host_name = aws_s3_tester_build_endpoint_string(allocator, &g_test_bucket_name, &g_test_s3_region); char object_path_buffer[128] = ""; if (flags & AWS_S3_TESTER_SEND_META_REQUEST_PUT_ACL) { snprintf( object_path_buffer, sizeof(object_path_buffer), "" PRInSTR "-acl-public-read-%uMB.txt", AWS_BYTE_CURSOR_PRI(g_put_object_prefix), file_size_mb); } else { snprintf( object_path_buffer, sizeof(object_path_buffer), "" PRInSTR "-%uMB.txt", AWS_BYTE_CURSOR_PRI(g_put_object_prefix), file_size_mb); } struct aws_byte_cursor test_object_path = aws_byte_cursor_from_c_str(object_path_buffer); struct aws_byte_cursor host_cur = aws_byte_cursor_from_string(host_name); /* Put together a simple S3 Put Object request. */ struct aws_http_message *message = aws_s3_test_put_object_request_new( allocator, &host_cur, test_object_path, g_test_body_content_type, input_stream, flags); if (flags & AWS_S3_TESTER_SEND_META_REQUEST_WITH_CORRECT_CONTENT_MD5) { ASSERT_SUCCESS(aws_s3_message_util_add_content_md5_header(allocator, &test_buffer, message)); } else if (flags & AWS_S3_TESTER_SEND_META_REQUEST_WITH_INCORRECT_CONTENT_MD5) { struct aws_http_header content_md5_header = { .name = g_content_md5_header_name, .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("dummy_content_md5"), }; ASSERT_SUCCESS(aws_http_message_add_header(message, content_md5_header)); } struct aws_s3_meta_request_options options; AWS_ZERO_STRUCT(options); options.type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT; options.message = message; struct aws_s3_meta_request_test_results meta_request_test_results; aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator); if (out_results == NULL) { out_results = &meta_request_test_results; } ASSERT_SUCCESS(aws_s3_tester_send_meta_request(tester, client, &options, out_results, flags)); if (flags & AWS_S3_TESTER_SEND_META_REQUEST_EXPECT_SUCCESS) { ASSERT_SUCCESS(aws_s3_tester_validate_put_object_results(out_results, flags)); } aws_s3_meta_request_test_results_clean_up(&meta_request_test_results); aws_http_message_release(message); message = NULL; aws_string_destroy(host_name); host_name = NULL; aws_input_stream_release(input_stream); input_stream = NULL; aws_byte_buf_clean_up(&test_buffer); return AWS_OP_SUCCESS; } /* Avoid using this function as it will soon go away. Use aws_s3_tester_send_meta_request_with_options instead.*/ int aws_s3_tester_validate_put_object_results( struct aws_s3_meta_request_test_results *meta_request_test_results, uint32_t flags) { ASSERT_TRUE(meta_request_test_results->finished_response_status == 200); ASSERT_TRUE( meta_request_test_results->finished_response_status == meta_request_test_results->headers_response_status); ASSERT_TRUE(meta_request_test_results->finished_error_code == AWS_ERROR_SUCCESS); ASSERT_TRUE(meta_request_test_results->error_response_headers == NULL); ASSERT_TRUE(meta_request_test_results->error_response_body.len == 0); ASSERT_NULL(meta_request_test_results->error_response_operation_name); struct aws_byte_cursor etag_byte_cursor; AWS_ZERO_STRUCT(etag_byte_cursor); ASSERT_SUCCESS( aws_http_headers_get(meta_request_test_results->response_headers, g_etag_header_name, &etag_byte_cursor)); struct aws_byte_cursor sse_byte_cursor; if (flags & AWS_S3_TESTER_SEND_META_REQUEST_SSE_KMS) { ASSERT_SUCCESS( aws_http_headers_get(meta_request_test_results->response_headers, g_s3_sse_header, &sse_byte_cursor)); ASSERT_TRUE(aws_byte_cursor_eq_c_str(&sse_byte_cursor, "aws:kms")); } if (flags & AWS_S3_TESTER_SEND_META_REQUEST_SSE_AES256) { ASSERT_SUCCESS( aws_http_headers_get(meta_request_test_results->response_headers, g_s3_sse_header, &sse_byte_cursor)); ASSERT_TRUE(aws_byte_cursor_eq_c_str(&sse_byte_cursor, "AES256")); } ASSERT_TRUE(etag_byte_cursor.len > 0); if (flags & AWS_S3_TESTER_SEND_META_REQUEST_SSE_C_AES256) { ASSERT_SUCCESS( aws_http_headers_get(meta_request_test_results->response_headers, g_s3_sse_c_alg_header, &sse_byte_cursor)); ASSERT_TRUE(aws_byte_cursor_eq_c_str(&sse_byte_cursor, "AES256")); } struct aws_byte_cursor quote_entity = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("""); if (etag_byte_cursor.len >= quote_entity.len) { for (size_t i = 0; i < (etag_byte_cursor.len - quote_entity.len + 1); ++i) { ASSERT_TRUE( strncmp((const char *)&etag_byte_cursor.ptr[i], (const char *)quote_entity.ptr, quote_entity.len) != 0); } } return AWS_OP_SUCCESS; } int aws_s3_tester_upload_file_path_init( struct aws_allocator *allocator, struct aws_byte_buf *out_path_buffer, struct aws_byte_cursor file_path) { ASSERT_SUCCESS(aws_byte_buf_init_copy_from_cursor(out_path_buffer, allocator, g_upload_folder)); ASSERT_SUCCESS(aws_byte_buf_append_dynamic(out_path_buffer, &file_path)); return AWS_OP_SUCCESS; } int aws_s3_tester_get_content_length(const struct aws_http_headers *headers, uint64_t *out_content_length) { struct aws_byte_cursor value_cursor; AWS_ZERO_STRUCT(value_cursor); ASSERT_SUCCESS(aws_http_headers_get(headers, aws_byte_cursor_from_c_str("Content-Length"), &value_cursor)); ASSERT_SUCCESS(aws_byte_cursor_utf8_parse_u64(value_cursor, out_content_length)); return AWS_OP_SUCCESS; } /* The default mock response is: sessionToken secretKey accessKeyId 2023-06-26T17:33:30Z */ int aws_s3_tester_check_s3express_creds_for_default_mock_response(struct aws_credentials *credentials) { struct aws_byte_cursor result; bool match = true; result = aws_credentials_get_access_key_id(credentials); match &= aws_byte_cursor_eq_c_str(&result, "accessKeyId"); result = aws_credentials_get_secret_access_key(credentials); match &= aws_byte_cursor_eq_c_str(&result, "secretKey"); result = aws_credentials_get_session_token(credentials); match &= aws_byte_cursor_eq_c_str(&result, "sessionToken"); uint64_t expiration_secs = aws_credentials_get_expiration_timepoint_seconds(credentials); ASSERT_UINT_EQUALS(1687800810, expiration_secs); ASSERT_TRUE(match); return AWS_OP_SUCCESS; } struct aws_string *aws_s3_tester_create_file( struct aws_allocator *allocator, struct aws_byte_cursor test_object_path, struct aws_input_stream *input_stream) { struct aws_byte_buf filepath_buf; aws_byte_buf_init(&filepath_buf, allocator, 128); struct aws_byte_cursor filepath_prefix = aws_byte_cursor_from_c_str("tmp"); aws_byte_buf_append_dynamic(&filepath_buf, &filepath_prefix); aws_byte_buf_append_dynamic(&filepath_buf, &test_object_path); for (size_t i = 0; i < filepath_buf.len; ++i) { if (!isalnum(filepath_buf.buffer[i])) { filepath_buf.buffer[i] = '_'; /* sanitize filename */ } } struct aws_string *filepath_str = aws_string_new_from_buf(allocator, &filepath_buf); aws_byte_buf_clean_up(&filepath_buf); FILE *file = aws_fopen(aws_string_c_str(filepath_str), "wb"); AWS_FATAL_ASSERT(file != NULL); int64_t stream_length = 0; AWS_FATAL_ASSERT(aws_input_stream_get_length(input_stream, &stream_length) == AWS_OP_SUCCESS); struct aws_byte_buf data_buf; AWS_FATAL_ASSERT(aws_byte_buf_init(&data_buf, allocator, (size_t)stream_length) == AWS_OP_SUCCESS); AWS_FATAL_ASSERT(aws_input_stream_read(input_stream, &data_buf) == AWS_OP_SUCCESS); AWS_FATAL_ASSERT((size_t)stream_length == data_buf.len); AWS_FATAL_ASSERT(data_buf.len == fwrite(data_buf.buffer, 1, data_buf.len, file)); fclose(file); aws_byte_buf_clean_up(&data_buf); return filepath_str; } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/s3_tester.h000066400000000000000000000472641456575232400230200ustar00rootroot00000000000000#ifndef AWS_S3_TESTER_H #define AWS_S3_TESTER_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "aws/s3/private/s3_request_messages.h" #include #include #include #include #include #include #include #include #include #include #include #include #include struct aws_client_bootstrap; struct aws_credentials_provider; struct aws_event_loop_group; struct aws_host_resolver; struct aws_input_stream; enum AWS_S3_TESTER_BIND_CLIENT_FLAGS { AWS_S3_TESTER_BIND_CLIENT_REGION = 0x00000001, AWS_S3_TESTER_BIND_CLIENT_SIGNING = 0x00000002, }; enum AWS_S3_TESTER_SEND_META_REQUEST_FLAGS { AWS_S3_TESTER_SEND_META_REQUEST_EXPECT_SUCCESS = 0x00000001, AWS_S3_TESTER_SEND_META_REQUEST_DONT_WAIT_FOR_SHUTDOWN = 0x00000002, AWS_S3_TESTER_SEND_META_REQUEST_CANCEL = 0x00000004, AWS_S3_TESTER_SEND_META_REQUEST_SSE_KMS = 0x00000008, AWS_S3_TESTER_SEND_META_REQUEST_SSE_AES256 = 0x00000010, /* Testing put object with x-amz-acl: bucket-owner-read */ AWS_S3_TESTER_SEND_META_REQUEST_PUT_ACL = 0x00000020, AWS_S3_TESTER_SEND_META_REQUEST_WITH_CORRECT_CONTENT_MD5 = 0x00000040, AWS_S3_TESTER_SEND_META_REQUEST_WITH_INCORRECT_CONTENT_MD5 = 0x00000080, AWS_S3_TESTER_SEND_META_REQUEST_SSE_C_AES256 = 0x00000100, }; enum aws_s3_tester_sse_type { AWS_S3_TESTER_SSE_NONE = 0, AWS_S3_TESTER_SSE_KMS = 0x00000008, AWS_S3_TESTER_SSE_AES256 = 0x00000010, AWS_S3_TESTER_SSE_C_AES256 = 0x00000100, }; enum aws_s3_client_tls_usage { AWS_S3_TLS_DEFAULT, AWS_S3_TLS_ENABLED, AWS_S3_TLS_DISABLED, }; enum aws_s3_tester_validate_type { AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_SUCCESS, AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_FAILURE, AWS_S3_TESTER_VALIDATE_TYPE_NO_VALIDATE, }; enum aws_s3_tester_default_type_mode { AWS_S3_TESTER_DEFAULT_TYPE_MODE_GET, AWS_S3_TESTER_DEFAULT_TYPE_MODE_PUT, }; struct aws_s3_client_vtable_patch { struct aws_s3_client_vtable *original_vtable; struct aws_s3_client_vtable patched_vtable; }; struct aws_s3_meta_request_vtable_patch { struct aws_s3_meta_request_vtable *original_vtable; struct aws_s3_meta_request_vtable patched_vtable; }; /* Utility for setting up commonly needed resources for tests. */ struct aws_s3_tester { struct aws_allocator *allocator; struct aws_event_loop_group *el_group; struct aws_host_resolver *host_resolver; struct aws_client_bootstrap *client_bootstrap; struct aws_credentials_provider *credentials_provider; struct aws_signing_config_aws default_signing_config; struct aws_credentials *anonymous_creds; struct aws_signing_config_aws anonymous_signing_config; struct aws_condition_variable signal; bool bound_to_client; struct aws_array_list client_vtable_patches; struct aws_array_list meta_request_vtable_patches; void *user_data; struct aws_string *bucket_name; struct aws_string *public_bucket_name; struct aws_string *s3express_bucket_usw2_az1_endpoint; struct aws_string *s3express_bucket_use1_az4_endpoint; struct { struct aws_mutex lock; size_t desired_meta_request_finish_count; size_t meta_request_finish_count; size_t desired_meta_request_shutdown_count; size_t meta_request_shutdown_count; size_t counter1; size_t desired_counter1; size_t counter2; size_t desired_counter2; int finish_error_code; uint32_t meta_requests_finished : 1; uint32_t meta_requests_shutdown : 1; uint32_t client_shutdown : 1; } synced_data; }; struct aws_s3_tester_client_options { enum aws_s3_client_tls_usage tls_usage; uint64_t part_size; size_t max_part_size; uint32_t setup_region : 1; uint32_t use_proxy : 1; }; /* should really break this up to a client setup, and a meta_request sending */ struct aws_s3_tester_meta_request_options { /* Optional if a valid aws_s3_tester was passed as an argument to the function. When NULL, the aws_s3_tester's * allocator will be used. */ struct aws_allocator *allocator; enum aws_s3_meta_request_type meta_request_type; /* Optional. When NULL, a message will attempted to be created by the meta request type specific options. */ struct aws_http_message *message; /* Optional. If NULL, a client will be created. */ struct aws_s3_client *client; /* Optional. Bucket for this request. If NULL, g_test_bucket_name will be used. */ const struct aws_byte_cursor *bucket_name; /* Optional. Used to create a client when the specified client is NULL. If NULL, default options will be used. */ struct aws_s3_tester_client_options *client_options; /* Optional, when enabled, the test will run against local server instead. */ bool mock_server; bool validate_get_response_checksum; enum aws_s3_checksum_algorithm checksum_algorithm; struct aws_array_list *validate_checksum_algorithms; enum aws_s3_checksum_algorithm expected_validate_checksum_alg; /* override client signing config */ struct aws_signing_config_aws *signing_config; /* use S3 Express signing config */ bool use_s3express_signing; aws_s3_meta_request_headers_callback_fn *headers_callback; aws_s3_meta_request_receive_body_callback_fn *body_callback; aws_s3_meta_request_finish_fn *finish_callback; aws_s3_meta_request_progress_fn *progress_callback; aws_s3_meta_request_upload_review_fn *upload_review_callback; /* Default Meta Request specific options. */ struct { enum aws_s3_tester_default_type_mode mode; struct aws_byte_cursor operation_name; } default_type_options; /* Get Object Meta Request specific options.*/ struct { struct aws_byte_cursor object_path; struct aws_byte_cursor object_range; /* Get the part from S3, starts from 1. 0 means not set. */ int part_number; } get_options; /* Put Object Meta request specific options. */ struct { struct aws_byte_cursor object_path_override; uint32_t object_size_mb; bool ensure_multipart; bool async_input_stream; /* send via async stream */ enum aws_async_read_completion_strategy async_read_strategy; size_t max_bytes_per_read; /* test an input-stream read() that doesn't always fill the buffer */ bool file_on_disk; /* write to file on disk, then send via aws_s3_meta_request_options.send_filepath */ /* If false, EOF is reported by the read() which produces the last few bytes. * If true, EOF isn't reported until there's one more read(), producing zero bytes. * This emulates an underlying stream that reports EOF by reading 0 bytes */ bool eof_requires_extra_read; bool invalid_request; bool invalid_input_stream; bool valid_md5; bool invalid_md5; struct aws_s3_meta_request_resume_token *resume_token; /* manually overwrite the content length for some invalid input stream */ size_t content_length; bool skip_content_length; struct aws_byte_cursor content_encoding; } put_options; enum aws_s3_tester_sse_type sse_type; enum aws_s3_tester_validate_type validate_type; uint32_t dont_wait_for_shutdown : 1; uint32_t mrap_test : 1; uint64_t *object_size_hint; }; /* TODO Rename to something more generic such as "aws_s3_meta_request_test_data" */ struct aws_s3_meta_request_test_results { struct aws_allocator *allocator; struct aws_s3_tester *tester; aws_s3_meta_request_headers_callback_fn *headers_callback; aws_s3_meta_request_receive_body_callback_fn *body_callback; aws_s3_meta_request_finish_fn *finish_callback; aws_s3_meta_request_progress_fn *progress_callback; aws_s3_meta_request_upload_review_fn *upload_review_callback; struct aws_http_headers *error_response_headers; struct aws_byte_buf error_response_body; struct aws_string *error_response_operation_name; size_t part_size; int headers_response_status; struct aws_http_headers *response_headers; uint64_t expected_range_start; uint64_t received_body_size; /* an atomic for tests that want to check from the main thread whether data is still arriving */ struct aws_atomic_var received_body_size_delta; int finished_response_status; int finished_error_code; enum aws_s3_checksum_algorithm algorithm; /* Record data from progress_callback() */ struct { uint64_t content_length; /* Remember progress->content_length */ uint64_t total_bytes_transferred; /* Accumulator for progress->bytes_transferred */ } progress; /* Protected the tester->synced_data.lock */ struct { /* The array_list of `struct aws_s3_request_metrics *` */ struct aws_array_list metrics; } synced_data; /* record data from the upload_review_callback */ struct { size_t invoked_count; enum aws_s3_checksum_algorithm checksum_algorithm; size_t part_count; uint64_t *part_sizes_array; struct aws_string **part_checksums_array; } upload_review; }; struct aws_s3_client_config; int aws_s3_tester_init(struct aws_allocator *allocator, struct aws_s3_tester *tester); /* Set up the aws_s3_client's shutdown callbacks to be used by the tester. This allows the tester to wait for the * client to clean up. */ int aws_s3_tester_bind_client(struct aws_s3_tester *tester, struct aws_s3_client_config *config, uint32_t flags); int aws_s3_tester_bind_meta_request( struct aws_s3_tester *tester, struct aws_s3_meta_request_options *options, struct aws_s3_meta_request_test_results *test_meta_request); void aws_s3_meta_request_test_results_init( struct aws_s3_meta_request_test_results *test_meta_request, struct aws_allocator *allocator); void aws_s3_meta_request_test_results_clean_up(struct aws_s3_meta_request_test_results *test_meta_request); /* Wait for the correct number of aws_s3_tester_notify_meta_request_finished to be called */ void aws_s3_tester_wait_for_meta_request_finish(struct aws_s3_tester *tester); /* Wait forthe correct number of aws_s3_tester_notify_meta_request_shutdown to be called. */ void aws_s3_tester_wait_for_meta_request_shutdown(struct aws_s3_tester *tester); /* Notify the tester that a meta request has finished. */ void aws_s3_tester_notify_meta_request_finished( struct aws_s3_tester *tester, const struct aws_s3_meta_request_result *result); /* Notify the tester that a meta request has finished. */ void aws_s3_tester_notify_meta_request_shutdown(struct aws_s3_tester *tester); void aws_s3_tester_wait_for_signal(struct aws_s3_tester *tester); void aws_s3_tester_notify_signal(struct aws_s3_tester *tester); void aws_s3_tester_wait_for_counters(struct aws_s3_tester *tester); size_t aws_s3_tester_inc_counter1(struct aws_s3_tester *tester); size_t aws_s3_tester_inc_counter2(struct aws_s3_tester *tester); void aws_s3_tester_reset_counter1(struct aws_s3_tester *tester); void aws_s3_tester_reset_counter2(struct aws_s3_tester *tester); void aws_s3_tester_set_counter1_desired(struct aws_s3_tester *tester, size_t value); void aws_s3_tester_set_counter2_desired(struct aws_s3_tester *tester, size_t value); /* Handle cleaning up the tester. If aws_s3_tester_bind_client_shutdown was used, then it will wait for the client to * finish shutting down before releasing any resources. */ void aws_s3_tester_clean_up(struct aws_s3_tester *tester); struct aws_http_message *aws_s3_tester_dummy_http_request_new(struct aws_s3_tester *tester); struct aws_s3_client *aws_s3_tester_mock_client_new(struct aws_s3_tester *tester); struct aws_s3_endpoint *aws_s3_tester_mock_endpoint_new(struct aws_s3_tester *tester); /* Create a new meta request for testing meta request functionality in isolation. test_results and client are optional. * If client is not specified, a new mock client will be created for the meta request. */ struct aws_s3_meta_request *aws_s3_tester_mock_meta_request_new(struct aws_s3_tester *tester); void aws_s3_create_test_buffer(struct aws_allocator *allocator, size_t buffer_size, struct aws_byte_buf *out_buf); void aws_s3_tester_lock_synced_data(struct aws_s3_tester *tester); void aws_s3_tester_unlock_synced_data(struct aws_s3_tester *tester); struct aws_string *aws_s3_tester_build_endpoint_string( struct aws_allocator *allocator, const struct aws_byte_cursor *bucket_name, const struct aws_byte_cursor *region); struct aws_http_message *aws_s3_test_get_object_request_new( struct aws_allocator *allocator, struct aws_byte_cursor host, struct aws_byte_cursor key); struct aws_http_message *aws_s3_test_put_object_request_new( struct aws_allocator *allocator, struct aws_byte_cursor *host, struct aws_byte_cursor key, struct aws_byte_cursor content_type, struct aws_input_stream *body_stream, uint32_t flags); struct aws_http_message *aws_s3_test_put_object_request_new_without_body( struct aws_allocator *allocator, struct aws_byte_cursor *host, struct aws_byte_cursor content_type, struct aws_byte_cursor key, uint64_t content_length, uint32_t flags); int aws_s3_tester_client_new( struct aws_s3_tester *tester, struct aws_s3_tester_client_options *options, struct aws_s3_client **out_client); int aws_s3_tester_send_meta_request_with_options( struct aws_s3_tester *tester, struct aws_s3_tester_meta_request_options *options, struct aws_s3_meta_request_test_results *test_results); /* Will copy the client's vtable into a new vtable that can be mutated. Returns the vtable that can be mutated. */ struct aws_s3_client_vtable *aws_s3_tester_patch_client_vtable( struct aws_s3_tester *tester, struct aws_s3_client *client, size_t *out_index); /* Gets the vtable patch structure that was created as a result of aws_s3_tester_patch_client_vtable. This allows * access to the original vtable.*/ struct aws_s3_client_vtable_patch *aws_s3_tester_get_client_vtable_patch(struct aws_s3_tester *tester, size_t index); /* Will copy the meta-request's vtable into a new vtable that can be mutated. Returns the vtable that can be mutated. */ struct aws_s3_meta_request_vtable *aws_s3_tester_patch_meta_request_vtable( struct aws_s3_tester *tester, struct aws_s3_meta_request *meta_request, size_t *out_index); /* Gets the vtable patch structure that was created as a result of aws_s3_tester_patch_meta_request_vtable. This allows * access to the original vtable.*/ struct aws_s3_meta_request_vtable_patch *aws_s3_tester_get_meta_request_vtable_patch( struct aws_s3_tester *tester, size_t index); int aws_s3_tester_send_meta_request( struct aws_s3_tester *tester, struct aws_s3_client *client, struct aws_s3_meta_request_options *options, struct aws_s3_meta_request_test_results *test_results, uint32_t flags); int aws_s3_tester_round_trip_meta_request( struct aws_s3_tester *tester, struct aws_s3_client *client, uint32_t file_size_mb, enum aws_s3_checksum_algorithm algorithm, char *test_file_identifier, uint32_t flags, struct aws_s3_meta_request_test_results *put_out_results, struct aws_s3_meta_request_test_results *get_out_results); /* Avoid using this function as it will soon go away. Use aws_s3_tester_send_meta_request_with_options instead.*/ int aws_s3_tester_send_get_object_meta_request( struct aws_s3_tester *tester, struct aws_s3_client *client, struct aws_byte_cursor s3_path, uint32_t flags, struct aws_s3_meta_request_test_results *out_results); /* Avoid using this function as it will soon go away. Use aws_s3_tester_send_meta_request_with_options instead.*/ int aws_s3_tester_send_put_object_meta_request( struct aws_s3_tester *tester, struct aws_s3_client *client, uint32_t object_size_mb, uint32_t flags, struct aws_s3_meta_request_test_results *out_results); int aws_s3_tester_validate_get_object_results( struct aws_s3_meta_request_test_results *meta_request_test_results, uint32_t flags); int aws_s3_tester_validate_put_object_results( struct aws_s3_meta_request_test_results *meta_request_test_results, uint32_t flags); /* Wait for the cleanup notification. This, and the s_s3_test_client_shutdown function are meant to be used for * sequential clean up only, and should not overlap with the "finish" callback. (Both currently use the same * mutex/signal.) */ void aws_s3_tester_wait_for_client_shutdown(struct aws_s3_tester *tester); /* * Value to populate test stream with. Useful for cases where we need to verify that cheksums fail. */ enum aws_s3_test_stream_value { TEST_STREAM_VALUE_1, TEST_STREAM_VALUE_2, }; struct aws_input_stream *aws_s3_test_input_stream_new(struct aws_allocator *allocator, size_t length); struct aws_input_stream *aws_s3_test_input_stream_new_with_value_type( struct aws_allocator *allocator, size_t length, enum aws_s3_test_stream_value stream_value); /* Add g_upload_folder to the file path to make sure we get all the non-pre-exist files in the same folder. */ int aws_s3_tester_upload_file_path_init( struct aws_allocator *allocator, struct aws_byte_buf *out_path_buffer, struct aws_byte_cursor file_path); /* Create a file on disk based on the input stream. Return the file path */ struct aws_string *aws_s3_tester_create_file( struct aws_allocator *allocator, struct aws_byte_cursor test_object_path, struct aws_input_stream *input_stream); int aws_s3_tester_get_content_length(const struct aws_http_headers *headers, uint64_t *out_content_length); int aws_s3_tester_check_s3express_creds_for_default_mock_response(struct aws_credentials *credentials); struct aws_parallel_input_stream *aws_parallel_input_stream_new_from_file_failure_tester( struct aws_allocator *allocator, struct aws_byte_cursor file_name); extern struct aws_s3_client_vtable g_aws_s3_client_mock_vtable; extern const struct aws_byte_cursor g_mock_server_uri; extern const struct aws_byte_cursor g_test_body_content_type; extern const struct aws_byte_cursor g_test_s3_region; extern const struct aws_byte_cursor g_pre_existing_object_1MB; extern const struct aws_byte_cursor g_pre_existing_object_10MB; extern const struct aws_byte_cursor g_pre_existing_object_kms_10MB; extern const struct aws_byte_cursor g_pre_existing_object_aes256_10MB; extern const struct aws_byte_cursor g_pre_existing_empty_object; extern const struct aws_byte_cursor g_put_object_prefix; /* If `$CRT_S3_TEST_BUCKET_NAME` environment variable is set, use that; otherwise, use aws-c-s3-test-bucket */ extern struct aws_byte_cursor g_test_bucket_name; /* If `$CRT_S3_TEST_BUCKET_NAME` envrionment variable is set, use `$CRT_S3_TEST_BUCKET_NAME-public`; otherwise, use * aws-c-s3-test-bucket-public */ extern struct aws_byte_cursor g_test_public_bucket_name; /* If `$CRT_S3_TEST_BUCKET_NAME` environment variable is set, use * `$CRT_S3_TEST_BUCKET_NAME--usw2-az1--x-s3.s3express-usw2-az1.us-west-2.amazonaws.com`; otherwise, use * aws-c-s3-test-bucket--usw2-az1--x-s3.s3express-usw2-az1.us-west-2.amazonaws.com */ extern struct aws_byte_cursor g_test_s3express_bucket_usw2_az1_endpoint; /* If `$CRT_S3_TEST_BUCKET_NAME` environment variable is set, use * `$CRT_S3_TEST_BUCKET_NAME--us1-az1--x-s3.s3express-use1-az4.us-east-1.amazonaws.com`; otherwise, use * aws-c-s3-test-bucket--use1-az4--x-s3.s3express-use1-az4.us-east-1.amazonaws.com */ extern struct aws_byte_cursor g_test_s3express_bucket_use1_az4_endpoint; #endif /* AWS_S3_TESTER_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/s3_util_tests.c000066400000000000000000000653561456575232400237060ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include "aws/s3/private/s3_client_impl.h" #include "aws/s3/private/s3_meta_request_impl.h" #include "aws/s3/private/s3_util.h" #include "s3_tester.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include AWS_TEST_CASE(test_s3_request_type_operation_name, s_test_s3_request_type_operation_name) static int s_test_s3_request_type_operation_name(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; /* sanity check */ ASSERT_STR_EQUALS("HeadObject", aws_s3_request_type_operation_name(AWS_S3_REQUEST_TYPE_HEAD_OBJECT)); /* check that all valid enums give back valid strings */ for (enum aws_s3_request_type type = AWS_S3_REQUEST_TYPE_UNKNOWN + 1; type < AWS_S3_REQUEST_TYPE_MAX; ++type) { const char *operation_name = aws_s3_request_type_operation_name(type); ASSERT_NOT_NULL(operation_name); ASSERT_TRUE(strlen(operation_name) > 1); } /* check that invalid enums give back empty strings */ ASSERT_NOT_NULL(aws_s3_request_type_operation_name(AWS_S3_REQUEST_TYPE_UNKNOWN)); ASSERT_STR_EQUALS("", aws_s3_request_type_operation_name(AWS_S3_REQUEST_TYPE_UNKNOWN)); ASSERT_STR_EQUALS("", aws_s3_request_type_operation_name(AWS_S3_REQUEST_TYPE_MAX)); ASSERT_STR_EQUALS("", aws_s3_request_type_operation_name(-1)); return 0; } AWS_TEST_CASE(test_s3_replace_quote_entities, s_test_s3_replace_quote_entities) static int s_test_s3_replace_quote_entities(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct test_case { struct aws_byte_cursor test_string; const char *expected_result; }; struct test_case test_cases[] = { { .test_string = aws_byte_cursor_from_c_str(""testtest"), .expected_result = "\"testtest", }, { .test_string = aws_byte_cursor_from_c_str("testtest""), .expected_result = "testtest\"", }, { .test_string = aws_byte_cursor_from_c_str(""""), .expected_result = "\"\"", }, { .test_string = aws_byte_cursor_from_c_str("testtest"), .expected_result = "testtest", }, { .test_string = aws_byte_cursor_from_c_str(""), .expected_result = "", }, }; for (size_t i = 0; i < AWS_ARRAY_SIZE(test_cases); ++i) { struct test_case *test_case = &test_cases[i]; struct aws_byte_buf result_byte_buf = aws_replace_quote_entities(allocator, test_case->test_string); struct aws_byte_cursor result_byte_cursor = aws_byte_cursor_from_buf(&result_byte_buf); ASSERT_CURSOR_VALUE_CSTRING_EQUALS(result_byte_cursor, test_case->expected_result); aws_byte_buf_clean_up(&result_byte_buf); } return 0; } AWS_TEST_CASE(test_s3_strip_quotes, s_test_s3_strip_quotes) static int s_test_s3_strip_quotes(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct test_case { struct aws_byte_cursor test_cursor; struct aws_byte_cursor expected_result; }; struct test_case test_cases[] = { { .test_cursor = aws_byte_cursor_from_c_str("\"test\""), .expected_result = aws_byte_cursor_from_c_str("test"), }, { .test_cursor = aws_byte_cursor_from_c_str("test\""), .expected_result = aws_byte_cursor_from_c_str("test\""), }, { .test_cursor = aws_byte_cursor_from_c_str("\"test"), .expected_result = aws_byte_cursor_from_c_str("\"test"), }, { .test_cursor = aws_byte_cursor_from_c_str("test"), .expected_result = aws_byte_cursor_from_c_str("test"), }, { .test_cursor = aws_byte_cursor_from_c_str(""), .expected_result = aws_byte_cursor_from_c_str(""), }, }; for (size_t i = 0; i < AWS_ARRAY_SIZE(test_cases); ++i) { struct test_case *test_case = &test_cases[i]; struct aws_byte_buf result_byte_buf; AWS_ZERO_STRUCT(result_byte_buf); struct aws_string *result = aws_strip_quotes(allocator, test_case->test_cursor); struct aws_byte_cursor result_byte_cursor = aws_byte_cursor_from_string(result); ASSERT_TRUE(aws_byte_cursor_eq(&test_case->expected_result, &result_byte_cursor)); aws_byte_buf_clean_up(&result_byte_buf); aws_string_destroy(result); } return 0; } AWS_TEST_CASE(test_s3_parse_request_range_header, s_test_s3_parse_request_range_header) static int s_test_s3_parse_request_range_header(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct range_header_example { struct aws_byte_cursor header_value; bool has_start_range; bool has_end_range; uint64_t range_start; uint64_t range_end; }; const struct range_header_example valid_range_examples[] = { { .header_value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("bytes=5-10"), .has_start_range = true, .has_end_range = true, .range_start = 5, .range_end = 10, }, { .header_value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("bytes=0-0"), .has_start_range = true, .has_end_range = true, .range_start = 0, .range_end = 0, }, { .header_value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("bytes=0-"), .has_start_range = true, .has_end_range = false, .range_start = 0, .range_end = 0, }, { .header_value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("bytes=5-"), .has_start_range = true, .has_end_range = false, .range_start = 5, .range_end = 0, }, { .header_value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("bytes=-10"), .has_start_range = false, .has_end_range = true, .range_start = 0, .range_end = 10, }, }; const struct aws_byte_cursor invalid_range_header_values[] = { AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("bytes=-"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("bytes=10-5"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("byts=0-5"), AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("5-10"), }; bool has_start_range = false; bool has_end_range = false; uint64_t range_start = 0; uint64_t range_end = 0; struct aws_http_headers *headers = aws_http_headers_new(allocator); /* Check that it fails if there is no Range header */ ASSERT_FAILS( aws_s3_parse_request_range_header(headers, &has_start_range, &has_end_range, &range_start, &range_end)); /* Check the valid test cases */ for (size_t i = 0; i < AWS_ARRAY_SIZE(valid_range_examples); ++i) { printf("valid example [%zu]: " PRInSTR "\n", i, AWS_BYTE_CURSOR_PRI(valid_range_examples[i].header_value)); aws_http_headers_set(headers, g_range_header_name, valid_range_examples[i].header_value); ASSERT_SUCCESS( aws_s3_parse_request_range_header(headers, &has_start_range, &has_end_range, &range_start, &range_end)); ASSERT_INT_EQUALS(valid_range_examples[i].has_start_range, has_start_range); ASSERT_INT_EQUALS(valid_range_examples[i].has_end_range, has_end_range); ASSERT_INT_EQUALS(valid_range_examples[i].range_start, range_start); ASSERT_INT_EQUALS(valid_range_examples[i].range_end, range_end); } /* Check the invalid test cases */ for (size_t i = 0; i < AWS_ARRAY_SIZE(invalid_range_header_values); ++i) { printf("invalid example [%zu]: " PRInSTR "\n", i, AWS_BYTE_CURSOR_PRI(invalid_range_header_values[i])); aws_http_headers_set(headers, g_range_header_name, invalid_range_header_values[i]); ASSERT_FAILS( aws_s3_parse_request_range_header(headers, &has_start_range, &has_end_range, &range_start, &range_end)); } aws_http_headers_release(headers); return 0; } AWS_TEST_CASE(test_s3_parse_content_range_response_header, s_test_s3_parse_content_range_response_header) static int s_test_s3_parse_content_range_response_header(struct aws_allocator *allocator, void *ctx) { (void)ctx; const struct aws_http_header content_range_header = { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Range"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("bytes 55-100/12345"), }; const struct aws_http_header invalid_content_range_header = { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Range"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("bytes 55-100/"), }; struct aws_http_headers *response_headers = aws_http_headers_new(allocator); /* Try to parse a header that isn't there. */ { uint64_t object_size = 0ULL; ASSERT_FAILS(aws_s3_parse_content_range_response_header(allocator, response_headers, NULL, NULL, &object_size)); ASSERT_TRUE(aws_last_error() == AWS_ERROR_S3_MISSING_CONTENT_RANGE_HEADER); } aws_http_headers_add_header(response_headers, &content_range_header); /* Parse all of the data from a valid header. */ { uint64_t object_size = 0ULL; uint64_t range_start = 0ULL; uint64_t range_end = 0ULL; ASSERT_SUCCESS(aws_s3_parse_content_range_response_header( allocator, response_headers, &range_start, &range_end, &object_size)); ASSERT_TRUE(range_start == 55ULL); ASSERT_TRUE(range_end == 100ULL); ASSERT_TRUE(object_size == 12345ULL); } /* Range-end and range-start are optional output arguments. */ { uint64_t object_size = 0ULL; ASSERT_SUCCESS( aws_s3_parse_content_range_response_header(allocator, response_headers, NULL, NULL, &object_size)); ASSERT_TRUE(object_size == 12345ULL); } aws_http_headers_set(response_headers, invalid_content_range_header.name, invalid_content_range_header.value); /* Try to parse an invalid header. */ { uint64_t object_size = 0ULL; ASSERT_FAILS(aws_s3_parse_content_range_response_header(allocator, response_headers, NULL, NULL, &object_size)); ASSERT_TRUE(aws_last_error() == AWS_ERROR_S3_INVALID_CONTENT_RANGE_HEADER); } aws_http_headers_release(response_headers); return 0; } AWS_TEST_CASE(test_s3_parse_content_length_response_header, s_test_s3_parse_content_length_response_header) static int s_test_s3_parse_content_length_response_header(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_http_headers *response_headers = aws_http_headers_new(allocator); const struct aws_http_header valid_content_length_header = { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Length"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("12345"), }; const struct aws_http_header invalid_content_length_header = { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Length"), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(""), }; /* Try to parse a header that isn't there. */ { uint64_t content_length = 0ULL; ASSERT_FAILS(aws_s3_parse_content_length_response_header(allocator, response_headers, &content_length)); ASSERT_TRUE(aws_last_error() == AWS_ERROR_S3_MISSING_CONTENT_LENGTH_HEADER); } aws_http_headers_add_header(response_headers, &valid_content_length_header); /* Parse a valid header. */ { uint64_t content_length = 0ULL; ASSERT_SUCCESS(aws_s3_parse_content_length_response_header(allocator, response_headers, &content_length)); ASSERT_TRUE(content_length == 12345ULL); } aws_http_headers_set(response_headers, invalid_content_length_header.name, invalid_content_length_header.value); /* Try to parse an invalid header. */ { uint64_t content_length = 0ULL; ASSERT_FAILS(aws_s3_parse_content_length_response_header(allocator, response_headers, &content_length)); ASSERT_TRUE(aws_last_error() == AWS_ERROR_S3_INVALID_CONTENT_LENGTH_HEADER); } aws_http_headers_release(response_headers); return 0; } static int s_validate_part_ranges( uint64_t object_range_start, uint64_t object_range_end, size_t part_size, uint32_t num_parts, const uint64_t *part_ranges) { ASSERT_TRUE(part_ranges != NULL); uint64_t aligned_first_part_size = part_size - (object_range_start % part_size); for (uint32_t i = 0; i < num_parts; ++i) { uint64_t part_range_start = 0ULL; uint64_t part_range_end = 0ULL; aws_s3_calculate_auto_ranged_get_part_range( object_range_start, object_range_end, part_size, aligned_first_part_size, i + 1, &part_range_start, &part_range_end); ASSERT_TRUE(part_range_start == part_ranges[i * 2]); ASSERT_TRUE(part_range_end == part_ranges[i * 2 + 1]); } return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_s3_get_num_parts_and_get_part_range, s_test_s3_get_num_parts_and_get_part_range) static int s_test_s3_get_num_parts_and_get_part_range(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; const size_t part_size = 16 * 1024; /* Empty File . */ { const uint32_t expected_num_parts = 1; const uint64_t object_range_start = 0; const uint64_t object_range_end = 0; ASSERT_INT_EQUALS( aws_s3_calculate_auto_ranged_get_num_parts(part_size, 0, object_range_start, object_range_end), expected_num_parts); } /* first_part_size is < part_size . */ { const uint32_t expected_num_parts = 2; const uint64_t first_part_size = 2; const uint64_t object_range_start = 0; const uint64_t object_range_end = 5; const uint64_t part_ranges[] = { 0, /* first_part start */ 1, /* first_part end */ 2, /* second_part start */ 5, /* second_part end */ }; ASSERT_INT_EQUALS( aws_s3_calculate_auto_ranged_get_num_parts( part_size, first_part_size, object_range_start, object_range_end), expected_num_parts); uint64_t part_range_start, part_range_end; aws_s3_calculate_auto_ranged_get_part_range( object_range_start, object_range_end, part_size, first_part_size, 1, &part_range_start, &part_range_end); ASSERT_INT_EQUALS(part_range_start, part_ranges[0]); ASSERT_INT_EQUALS(part_range_end, part_ranges[1]); aws_s3_calculate_auto_ranged_get_part_range( object_range_start, object_range_end, part_size, first_part_size, 2, &part_range_start, &part_range_end); ASSERT_INT_EQUALS(part_range_start, part_ranges[2]); ASSERT_INT_EQUALS(part_range_end, part_ranges[3]); } /* Perfectly aligned on part boundaries. */ { const uint32_t expected_num_parts = 2; const uint64_t object_range_start = 0; const uint64_t object_range_end = (object_range_start + (uint64_t)part_size * (uint64_t)expected_num_parts) - 1ULL; const uint64_t part_ranges[] = { 0, (uint64_t)part_size - 1ULL, (uint64_t)part_size, (uint64_t)part_size * 2ULL - 1ULL, }; ASSERT_TRUE( aws_s3_calculate_auto_ranged_get_num_parts(part_size, part_size, object_range_start, object_range_end) == expected_num_parts); ASSERT_SUCCESS( s_validate_part_ranges(object_range_start, object_range_end, part_size, expected_num_parts, part_ranges)); } /* Range-start unaligned on part boundaries, but range-end aligned. */ { const uint32_t expected_num_parts = 3; const uint64_t half_part_size = part_size >> 1ULL; const uint64_t object_range_start = half_part_size; const uint64_t object_range_end = (object_range_start + half_part_size + (uint64_t)part_size * 2ULL) - 1ULL; uint64_t aligned_first_part_size = part_size - (object_range_start % part_size); const uint64_t part_ranges[] = { object_range_start, object_range_start + half_part_size - 1, object_range_start + half_part_size, object_range_start + half_part_size + (uint64_t)part_size - 1ULL, object_range_start + half_part_size + (uint64_t)part_size, object_range_start + half_part_size + (uint64_t)part_size * 2ULL - 1ULL, }; ASSERT_TRUE( aws_s3_calculate_auto_ranged_get_num_parts( part_size, aligned_first_part_size, object_range_start, object_range_end) == expected_num_parts); ASSERT_SUCCESS( s_validate_part_ranges(object_range_start, object_range_end, part_size, expected_num_parts, part_ranges)); } /* Range-start and range-end both unaligned on part boundaries. */ { const uint32_t expected_num_parts = 4; const uint64_t half_part_size = part_size >> 1ULL; const uint64_t object_range_start = half_part_size; const uint64_t object_range_end = (object_range_start + half_part_size + (uint64_t)part_size * 2ULL + half_part_size) - 1ULL; uint64_t aligned_first_part_size = part_size - (object_range_start % part_size); const uint64_t part_ranges[] = { object_range_start, object_range_start + half_part_size - 1, object_range_start + half_part_size, object_range_start + half_part_size + (uint64_t)part_size - 1ULL, object_range_start + half_part_size + (uint64_t)part_size, object_range_start + half_part_size + (uint64_t)part_size * 2ULL - 1ULL, object_range_start + half_part_size + (uint64_t)part_size * 2ULL, object_range_start + half_part_size + (uint64_t)part_size * 2ULL + half_part_size - 1ULL, }; ASSERT_TRUE( aws_s3_calculate_auto_ranged_get_num_parts( part_size, aligned_first_part_size, object_range_start, object_range_end) == expected_num_parts); ASSERT_SUCCESS( s_validate_part_ranges(object_range_start, object_range_end, part_size, expected_num_parts, part_ranges)); } /* 1 byte range corner case. */ { const uint32_t expected_num_parts = 1; const uint64_t object_range_start = 8; const uint64_t object_range_end = 8; uint64_t aligned_first_part_size = part_size - (object_range_start % part_size); const uint64_t part_ranges[] = {8, 8}; ASSERT_TRUE( aws_s3_calculate_auto_ranged_get_num_parts( part_size, aligned_first_part_size, object_range_start, object_range_end) == expected_num_parts); ASSERT_SUCCESS( s_validate_part_ranges(object_range_start, object_range_end, part_size, expected_num_parts, part_ranges)); } return 0; } struct s3_request_part_config_example { const char *name; uint64_t content_length; size_t client_part_size; uint64_t client_max_part_size; size_t expected_part_size; uint32_t expected_num_parts; }; AWS_TEST_CASE(test_s3_mpu_get_part_size_and_num_parts, s_test_s3_mpu_get_part_size_and_num_parts) static int s_test_s3_mpu_get_part_size_and_num_parts(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; uint64_t default_max_part_size = 5368709120ULL; const struct s3_request_part_config_example valid_request_part_config[] = { { .name = "simple case", .content_length = MB_TO_BYTES((uint64_t)10000), .client_part_size = MB_TO_BYTES(5), .client_max_part_size = default_max_part_size, .expected_part_size = 5242880, .expected_num_parts = 2000, }, { .name = "large content length with small part size", .content_length = MB_TO_BYTES((uint64_t)990000), .client_part_size = MB_TO_BYTES(5), .client_max_part_size = default_max_part_size, .expected_part_size = 103809024, .expected_num_parts = 10000, }, { .name = "large content length with large part size", .content_length = MB_TO_BYTES((uint64_t)1000000), .client_part_size = MB_TO_BYTES(500), .client_max_part_size = default_max_part_size, .expected_part_size = MB_TO_BYTES(500), .expected_num_parts = 2000, }, { .name = "large odd content length", .content_length = 1044013645824, .client_part_size = 5242880, .client_max_part_size = default_max_part_size, .expected_part_size = 104401365, .expected_num_parts = 10000, }, { .name = "10k parts", .content_length = MB_TO_BYTES((uint64_t)50000), .client_part_size = MB_TO_BYTES(5), .client_max_part_size = default_max_part_size, .expected_part_size = MB_TO_BYTES(5), .expected_num_parts = 10000, }, { .name = "10k - 1 parts", .content_length = 49995, .client_part_size = 5, .client_max_part_size = default_max_part_size, .expected_part_size = 5, .expected_num_parts = 9999, }, { .name = "10k with small last part", .content_length = 49998, .client_part_size = 5, .client_max_part_size = default_max_part_size, .expected_part_size = 5, .expected_num_parts = 10000, }, { .name = "10k + 1 parts", .content_length = 50001, .client_part_size = 5, .client_max_part_size = default_max_part_size, .expected_part_size = 6, .expected_num_parts = 8334, }, { .name = "bump content length", .content_length = 100000, .client_part_size = 5, .client_max_part_size = default_max_part_size, .expected_part_size = 10, .expected_num_parts = 10000, }, { .name = "bump content length with non-zero mod", .content_length = 999999, .client_part_size = 5, .client_max_part_size = default_max_part_size, .expected_part_size = 100, .expected_num_parts = 10000, }, { .name = "5 tb content length", .content_length = MB_TO_BYTES((uint64_t)5 * 1024 * 1024), .client_part_size = MB_TO_BYTES((uint64_t)5), .client_max_part_size = default_max_part_size, .expected_part_size = 549755814, .expected_num_parts = 10000, }, }; for (size_t i = 0; i < AWS_ARRAY_SIZE(valid_request_part_config); ++i) { AWS_LOGF_INFO(AWS_LS_S3_GENERAL, "valid example [%zu]: %s\n", i, valid_request_part_config[i].name); uint64_t content_length = valid_request_part_config[i].content_length; size_t part_size; uint32_t num_parts; ASSERT_SUCCESS(aws_s3_calculate_optimal_mpu_part_size_and_num_parts( content_length, valid_request_part_config[i].client_part_size, valid_request_part_config[i].client_max_part_size, &part_size, &num_parts)); ASSERT_INT_EQUALS(valid_request_part_config[i].expected_part_size, part_size); ASSERT_INT_EQUALS(valid_request_part_config[i].expected_num_parts, num_parts); } /* Invalid cases */ const struct s3_request_part_config_example invalid_request_part_config[] = {{ .name = "max part < required part size", .content_length = 900000, .client_part_size = 5, .client_max_part_size = 10, }}; for (size_t i = 0; i < AWS_ARRAY_SIZE(invalid_request_part_config); ++i) { printf("invalid example [%zu]: %s\n", i, invalid_request_part_config[i].name); size_t part_size; uint32_t num_parts; ASSERT_FAILS(aws_s3_calculate_optimal_mpu_part_size_and_num_parts( invalid_request_part_config[i].content_length, invalid_request_part_config[i].client_part_size, invalid_request_part_config[i].client_max_part_size, &part_size, &num_parts)); } return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_s3_aws_xml_get_body_at_path, s_test_s3_aws_xml_get_body_at_path) static int s_test_s3_aws_xml_get_body_at_path(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_byte_cursor example_error_body = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL( "\n" "\n" "AccessDenied\n" "Access Denied\n" "656c76696e6727732072657175657374\n" "Uuag1LuByRx9e6j5Onimru9pO4ZVKnJ2Qz7/C1NPcfTWAtRPfTaOFg==\n" ""); /* Ensure we can successfully look up */ { struct aws_byte_cursor error_code = {0}; const char *xml_path[] = {"Error", "Code", NULL}; ASSERT_SUCCESS(aws_xml_get_body_at_path(allocator, example_error_body, xml_path, &error_code)); ASSERT_CURSOR_VALUE_CSTRING_EQUALS(error_code, "AccessDenied"); } /* Ensure we fail if the beginning of the path doesn't match */ { struct aws_byte_cursor error_code = {0}; const char *xml_path[] = {"ObviouslyInvalidName", "Code", NULL}; ASSERT_ERROR( AWS_ERROR_STRING_MATCH_NOT_FOUND, aws_xml_get_body_at_path(allocator, example_error_body, xml_path, &error_code)); } /* Ensure we fail if the end of the path doesn't match */ { struct aws_byte_cursor error_code = {0}; const char *xml_path[] = {"Error", "ObviouslyInvalidName", NULL}; ASSERT_ERROR( AWS_ERROR_STRING_MATCH_NOT_FOUND, aws_xml_get_body_at_path(allocator, example_error_body, xml_path, &error_code)); } /* Ensure we fail if the document isn't valid XML */ { struct aws_byte_cursor error_code = {0}; const char *xml_path[] = {"Error", "Code", NULL}; ASSERT_ERROR( AWS_ERROR_INVALID_XML, aws_xml_get_body_at_path( allocator, aws_byte_cursor_from_c_str("Obviously invalid XML document"), xml_path, &error_code)); } return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/test_helper/000077500000000000000000000000001456575232400232355ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/test_helper/README.md000066400000000000000000000061661456575232400245250ustar00rootroot00000000000000# Helper script to setup your S3 structure to run the tests for aws-c-s3 To use this script, you must have AWS credentials with permission to create and delete buckets. To create the S3 buckets and objects that tests will use: ```sh pip3 install boto3 export CRT_S3_TEST_BUCKET_NAME= python3 test_helper.py init # change directory to the build/tests cd aws-c-s3/build/tests && ctest ``` To clean up the S3 buckets created ```sh export CRT_S3_TEST_BUCKET_NAME= python3 test_helper.py clean ``` ## Actions ### `init` action * Create `` in us-west-2. * Add the lifecycle to automatic clean up the `upload/` after one day * Upload files: + `pre-existing-10MB-aes256-c` [SSE-C](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html#sse-c-highlights) encrypted fille + `pre-existing-10MB-aes256` [SSE-S3](https://docs.aws.amazon.com/AmazonS3/latest/userguide/specifying-s3-encryption.html) encrypted fille + `pre-existing-10MB-kms` [SSE-KMS](https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) encrypted fille + `pre-existing-10MB` + `pre-existing-1MB` + `pre-existing-empty` * Create `-public` in us-west-2 * Upload files: + `pre-existing-1MB` 1MB file with public read access. * Create directory bucket `--usw2-az1--x-s3` in us-west-2 * Upload files: + `pre-existing-10MB` 10MB file. * Create directory bucket `--use1-az4--x-s3` in us-east-1 * Upload files: + `pre-existing-10MB` 10MB file. ### `clean` action * Delete the buckets create by init action and every object inside them. ## BUCKET_NAME You can specify the bucket name to be created either by passing argument to the script or by setting an environment variable, the `bucket_name` passed in takes precedence. If neither of these options is chosen, the `init` action will create a random bucket name. In this case, you will need to set the `CRT_S3_TEST_BUCKET_NAME` environment variable to the printed-out bucket name before running the test. ## Notes * The MRAP tests are not included in this script, and it's disabled by default. To run those tests, you will need to create a MRAP access point with the buckets have `pre-existing-1MB` in it. Then update `g_test_mrap_endpoint` to the uri of the MRAP endpoint and build with `-DENABLE_MRAP_TESTS=true`. * To run tests in tests/s3_mock_server_tests.c, initialize the mock S3 server first from [here](./../mock_s3_server/). And build your cmake project with `-DENABLE_MOCK_SERVER_TESTS=true` * Note: If you are not at the aws-common-runtime AWS team account, you must set environment variable `CRT_S3_TEST_BUCKET_NAME` to the bucket created before running the test. * When you see error with "Check your account level S3 settings, public access may be blocked.", Check https://docs.aws.amazon.com/AmazonS3/latest/userguide/configuring-block-public-access-account.html to set `BlockPublicAcls` to false, which enables public read of the object with `public-read` ACL in the bucket. ## TODO * Automatic the mrap creation * Instead of hard-coded path and region, make it configurable and pick up from tests. aws-crt-python-0.20.4+dfsg/crt/aws-c-s3/tests/test_helper/test_helper.py000077500000000000000000000212371456575232400261350ustar00rootroot00000000000000#!/usr/bin/env python3 # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. import argparse import boto3 import botocore import sys import os import random print(boto3.__version__) REGION = 'us-west-2' REGION_EAST_1 = 'us-east-1' s3 = boto3.resource('s3') s3_client = boto3.client('s3', region_name=REGION) s3_client_east1 = boto3.client('s3', region_name=REGION_EAST_1) s3_control_client = boto3.client('s3control') MB = 1024*1024 GB = 1024*1024*1024 parser = argparse.ArgumentParser() parser.add_argument( 'action', choices=['init', 'clean'], help='Initialize or clean up the test buckets') parser.add_argument( 'bucket_name', nargs='?', help='The bucket name base to use for the test buckets. If not specified, the $CRT_S3_TEST_BUCKET_NAME will be used, if set. Otherwise, a random name will be generated.') args = parser.parse_args() if args.bucket_name is not None: BUCKET_NAME_BASE = args.bucket_name elif "CRT_S3_TEST_BUCKET_NAME" in os.environ: BUCKET_NAME_BASE = os.environ['CRT_S3_TEST_BUCKET_NAME'] else: # Generate a random bucket name BUCKET_NAME_BASE = 'aws-c-s3-test-bucket-' + str(random.random())[2:8] PUBLIC_BUCKET_NAME = BUCKET_NAME_BASE + "-public" def create_bytes(size): return bytearray([1] * size) def put_pre_existing_objects(size, keyname, bucket=BUCKET_NAME_BASE, sse=None, public_read=False, client=s3_client): if size == 0: client.put_object(Bucket=bucket, Key=keyname) print(f"Object {keyname} uploaded") return body = create_bytes(size) args = {'Bucket': bucket, 'Key': keyname, 'Body': body} if sse == 'aes256': args['ServerSideEncryption'] = 'AES256' elif sse == 'aes256-c': random_key = os.urandom(32) args['SSECustomerKey'] = random_key args['SSECustomerAlgorithm'] = 'AES256' elif sse == 'kms': args['ServerSideEncryption'] = 'aws:kms' args['SSEKMSKeyId'] = 'alias/aws/s3' if public_read: args['ACL'] = 'public-read' try: client.put_object(**args) except botocore.exceptions.ClientError as e: print(f"Object {keyname} failed to upload, with exception: {e}") if public_read and e.response['Error']['Code'] == 'AccessDenied': print("Check your account level S3 settings, public access may be blocked.") exit(-1) print(f"Object {keyname} uploaded") def create_bucket_with_lifecycle(availability_zone=None, client=s3_client): try: # Create the bucket. This returns an error if the bucket already exists. if availability_zone is not None: bucket_config = { 'Location': { 'Type': 'AvailabilityZone', 'Name': availability_zone }, 'Bucket': { 'Type': 'Directory', 'DataRedundancy': 'SingleAvailabilityZone' } } bucket_name = BUCKET_NAME_BASE+f"--{availability_zone}--x-s3" else: bucket_config = {'LocationConstraint': REGION} bucket_name = BUCKET_NAME_BASE client.create_bucket( Bucket=bucket_name, CreateBucketConfiguration=bucket_config) if availability_zone is None: client.put_bucket_lifecycle_configuration( Bucket=bucket_name, LifecycleConfiguration={ 'Rules': [ { 'ID': 'clean up non-pre-existing objects', 'Expiration': { 'Days': 1, }, 'Filter': { 'Prefix': 'upload/', }, 'Status': 'Enabled', 'NoncurrentVersionExpiration': { 'NoncurrentDays': 1, }, 'AbortIncompleteMultipartUpload': { 'DaysAfterInitiation': 1, }, }, ], }, ) print(f"Bucket {bucket_name} created", file=sys.stderr) put_pre_existing_objects( 10*MB, 'pre-existing-10MB', bucket=bucket_name, client=client) if availability_zone is None: put_pre_existing_objects( 10*MB, 'pre-existing-10MB-aes256-c', sse='aes256-c', bucket=bucket_name) put_pre_existing_objects( 10*MB, 'pre-existing-10MB-aes256', sse='aes256', bucket=bucket_name) put_pre_existing_objects( 10*MB, 'pre-existing-10MB-kms', sse='kms', bucket=bucket_name) put_pre_existing_objects( 256*MB, 'pre-existing-256MB', bucket=bucket_name) put_pre_existing_objects( 256*MB, 'pre-existing-256MB-@', bucket=bucket_name) put_pre_existing_objects( 2*GB, 'pre-existing-2GB', bucket=bucket_name) put_pre_existing_objects( 2*GB, 'pre-existing-2GB-@', bucket=bucket_name) put_pre_existing_objects( 1*MB, 'pre-existing-1MB', bucket=bucket_name) put_pre_existing_objects( 1*MB, 'pre-existing-1MB-@', bucket=bucket_name) put_pre_existing_objects( 0, 'pre-existing-empty', bucket=bucket_name) except botocore.exceptions.ClientError as e: # The bucket already exists. That's fine. if e.response['Error']['Code'] == 'BucketAlreadyOwnedByYou' or e.response['Error']['Code'] == 'BucketAlreadyExists': print( f"Bucket {bucket_name} not created, skip initializing.", file=sys.stderr) return raise e def create_bucket_with_public_object(): try: s3_client.create_bucket(Bucket=PUBLIC_BUCKET_NAME, CreateBucketConfiguration={ 'LocationConstraint': REGION}, ObjectOwnership='ObjectWriter' ) s3_client.put_public_access_block( Bucket=PUBLIC_BUCKET_NAME, PublicAccessBlockConfiguration={ 'BlockPublicAcls': False, } ) print(f"Bucket {PUBLIC_BUCKET_NAME} created", file=sys.stderr) put_pre_existing_objects( 1*MB, 'pre-existing-1MB', bucket=PUBLIC_BUCKET_NAME, public_read=True) except botocore.exceptions.ClientError as e: # The bucket already exists. That's fine. if e.response['Error']['Code'] == 'BucketAlreadyOwnedByYou' or e.response['Error']['Code'] == 'BucketAlreadyExists': print( f"Bucket {PUBLIC_BUCKET_NAME} not created, skip initializing.", file=sys.stderr) return raise e def cleanup(bucket_name, availability_zone=None, client=s3_client): if availability_zone is not None: bucket_name = bucket_name+f"--{availability_zone}--x-s3" objects = client.list_objects_v2(Bucket=bucket_name)["Contents"] objects = list(map(lambda x: {"Key": x["Key"]}, objects)) client.delete_objects(Bucket=bucket_name, Delete={"Objects": objects}) client.delete_bucket(Bucket=bucket_name) print(f"Bucket {bucket_name} deleted", file=sys.stderr) if args.action == 'init': try: print(BUCKET_NAME_BASE + " " + PUBLIC_BUCKET_NAME + " initializing...") create_bucket_with_lifecycle("use1-az4", s3_client_east1) create_bucket_with_lifecycle("usw2-az1") create_bucket_with_lifecycle() create_bucket_with_public_object() if os.environ.get('CRT_S3_TEST_BUCKET_NAME') != BUCKET_NAME_BASE: print( f"* Please set the environment variable $CRT_S3_TEST_BUCKET_NAME to {BUCKET_NAME_BASE} before running the tests.") except Exception as e: print(e) try: # Try to clean up the bucket created, when initialization failed. cleanup(BUCKET_NAME_BASE, "use1-az4", s3_client_east1) cleanup(BUCKET_NAME_BASE, "usw2-az1") cleanup(BUCKET_NAME_BASE) cleanup(PUBLIC_BUCKET_NAME) except Exception as e2: exit(-1) exit(-1) elif args.action == 'clean': if "CRT_S3_TEST_BUCKET_NAME" not in os.environ and args.bucket_name is None: print("Set the environment variable CRT_S3_TEST_BUCKET_NAME before clean up, or pass in bucket_name as argument.") exit(-1) cleanup(BUCKET_NAME_BASE, "use1-az4", s3_client_east1) cleanup(BUCKET_NAME_BASE, "usw2-az1") cleanup(BUCKET_NAME_BASE) cleanup(PUBLIC_BUCKET_NAME) aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/000077500000000000000000000000001456575232400210725ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/.clang-format000066400000000000000000000031611456575232400234460ustar00rootroot00000000000000--- Language: Cpp # BasedOnStyle: Mozilla AlignAfterOpenBracket: AlwaysBreak AlignConsecutiveAssignments: false AlignConsecutiveDeclarations: false AlignEscapedNewlines: Right AlignOperands: true AlignTrailingComments: true AllowAllParametersOfDeclarationOnNextLine: false AllowShortBlocksOnASingleLine: false AllowShortCaseLabelsOnASingleLine: false AllowShortFunctionsOnASingleLine: Inline AllowShortIfStatementsOnASingleLine: false AllowShortLoopsOnASingleLine: false AlwaysBreakAfterReturnType: None AlwaysBreakBeforeMultilineStrings: false BinPackArguments: false BinPackParameters: false BreakBeforeBinaryOperators: None BreakBeforeBraces: Attach BreakBeforeTernaryOperators: true BreakStringLiterals: true ColumnLimit: 120 ContinuationIndentWidth: 4 DerivePointerAlignment: false IncludeBlocks: Preserve IndentCaseLabels: true IndentPPDirectives: AfterHash IndentWidth: 4 IndentWrappedFunctionNames: true KeepEmptyLinesAtTheStartOfBlocks: true MacroBlockBegin: '' MacroBlockEnd: '' MaxEmptyLinesToKeep: 1 PenaltyBreakAssignment: 2 PenaltyBreakBeforeFirstCallParameter: 19 PenaltyBreakComment: 300 PenaltyBreakFirstLessLess: 120 PenaltyBreakString: 1000 PenaltyExcessCharacter: 1000000 PenaltyReturnTypeOnItsOwnLine: 100000 PointerAlignment: Right ReflowComments: true SortIncludes: true SpaceAfterCStyleCast: false SpaceBeforeAssignmentOperators: true SpaceBeforeParens: ControlStatements SpaceInEmptyParentheses: false SpacesInContainerLiterals: true SpacesInCStyleCastParentheses: false SpacesInParentheses: false SpacesInSquareBrackets: false Standard: Cpp11 TabWidth: 4 UseTab: Never ... aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/.clang-tidy000066400000000000000000000013361456575232400231310ustar00rootroot00000000000000--- Checks: 'clang-diagnostic-*,clang-analyzer-*,readability-*,modernize-*,bugprone-*,misc-*,google-runtime-int,llvm-header-guard,fuchsia-restrict-system-includes,-clang-analyzer-valist.Uninitialized,-clang-analyzer-security.insecureAPI.rand,-clang-analyzer-alpha.*,-readability-magic-numbers,-readability-non-const-parameter' WarningsAsErrors: '*' HeaderFilterRegex: '.*(? packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ matrix.image }} build -p ${{ env.PACKAGE_NAME }} linux-compiler-compat: runs-on: ubuntu-20.04 # latest strategy: matrix: compiler: - clang-3 - clang-6 - clang-8 - clang-9 - clang-10 - clang-11 - gcc-4.8 - gcc-5 - gcc-6 - gcc-7 - gcc-8 steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ env.LINUX_BASE_IMAGE }} build -p ${{ env.PACKAGE_NAME }} --compiler=${{ matrix.compiler }} clang-sanitizers: runs-on: ubuntu-20.04 # latest strategy: matrix: sanitizers: [",thread", ",address,undefined"] steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ env.LINUX_BASE_IMAGE }} build -p ${{ env.PACKAGE_NAME }} --compiler=clang-11 --cmake-extra=-DENABLE_SANITIZERS=ON --cmake-extra=-DSANITIZERS="${{ matrix.sanitizers }}" linux-shared-libs: runs-on: ubuntu-20.04 # latest steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ env.LINUX_BASE_IMAGE }} build -p ${{ env.PACKAGE_NAME }} --cmake-extra=-DBUILD_SHARED_LIBS=ON windows: runs-on: windows-2022 # latest steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python builder.pyz build -p ${{ env.PACKAGE_NAME }} windows-vc14: runs-on: windows-2019 # windows-2019 is last env with Visual Studio 2015 (v14.0) strategy: matrix: arch: [x86, x64] steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python builder.pyz build -p ${{ env.PACKAGE_NAME }} --target windows-${{ matrix.arch }} --compiler msvc-14 windows-shared-libs: runs-on: windows-2022 # latest steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python builder.pyz build -p ${{ env.PACKAGE_NAME }} --cmake-extra=-DBUILD_SHARED_LIBS=ON windows-app-verifier: runs-on: windows-2022 # latest steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python builder.pyz build -p ${{ env.PACKAGE_NAME }} run_tests=false --cmake-extra=-DBUILD_TESTING=ON - name: Run and check AppVerifier run: | python .\aws-c-sdkutils\build\deps\aws-c-common\scripts\appverifier_ctest.py --build_directory .\aws-c-sdkutils\build\aws-c-sdkutils osx: runs-on: macos-12 # latest steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python3 -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz', 'builder')" chmod a+x builder ./builder build -p ${{ env.PACKAGE_NAME }} # Test downstream repos. # This should not be required because we can run into a chicken and egg problem if there is a change that needs some fix in a downstream repo. downstream: runs-on: ubuntu-20.04 # latest steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ env.LINUX_BASE_IMAGE }} build downstream -p ${{ env.PACKAGE_NAME }} aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/.github/workflows/clang-format.yml000066400000000000000000000004671456575232400275730ustar00rootroot00000000000000name: Lint on: [push] jobs: clang-format: runs-on: ubuntu-20.04 # latest steps: - name: Checkout Sources uses: actions/checkout@v1 - name: clang-format lint uses: DoozyX/clang-format-lint-action@v0.3.1 with: # List of extensions to check extensions: c,h aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/.github/workflows/closed-issue-message.yml000066400000000000000000000013271456575232400312360ustar00rootroot00000000000000name: Closed Issue Message on: issues: types: [closed] jobs: auto_comment: runs-on: ubuntu-latest steps: - uses: aws-actions/closed-issue-message@v1 with: # These inputs are both required repo-token: "${{ secrets.GITHUB_TOKEN }}" message: | ### ⚠️COMMENT VISIBILITY WARNING⚠️ Comments on closed issues are hard for our team to see. If you need more assistance, please either tag a team member or open a new issue that references this one. If you wish to keep having a conversation with other community members under this issue feel free to do so. aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/.github/workflows/codecov.yml000066400000000000000000000014731456575232400266410ustar00rootroot00000000000000name: Code coverage check on: push: env: BUILDER_VERSION: v0.9.55 BUILDER_HOST: https://d19elf31gohf1l.cloudfront.net BUILDER_SOURCE: releases PACKAGE_NAME: aws-c-sdkutils AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} AWS_REGION: us-east-1 jobs: codecov-linux: runs-on: ubuntu-22.04 steps: - name: Checkout Sources uses: actions/checkout@v3 - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python3 -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder')" chmod a+x builder ./builder build -p ${{ env.PACKAGE_NAME }} --compiler=gcc-9 --coverage aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/.github/workflows/handle-stale-discussions.yml000066400000000000000000000006471456575232400321260ustar00rootroot00000000000000name: HandleStaleDiscussions on: schedule: - cron: '0 */4 * * *' discussion_comment: types: [created] jobs: handle-stale-discussions: name: Handle stale discussions runs-on: ubuntu-latest permissions: discussions: write steps: - name: Stale discussions action uses: aws-github-ops/handle-stale-discussions@v1 env: GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}}aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/.github/workflows/stale_issue.yml000066400000000000000000000045321456575232400275360ustar00rootroot00000000000000name: "Close stale issues" # Controls when the action will run. on: schedule: - cron: "*/60 * * * *" jobs: cleanup: runs-on: ubuntu-latest name: Stale issue job steps: - uses: aws-actions/stale-issue-cleanup@v3 with: # Setting messages to an empty string will cause the automation to skip # that category ancient-issue-message: Greetings! Sorry to say but this is a very old issue that is probably not getting as much attention as it deservers. We encourage you to check if this is still an issue in the latest release and if you find that this is still a problem, please feel free to open a new one. stale-issue-message: Greetings! It looks like this issue hasn’t been active in longer than a week. We encourage you to check if this is still an issue in the latest release. Because it has been longer than a week since the last update on this, and in the absence of more information, we will be closing this issue soon. If you find that this is still a problem, please feel free to provide a comment or add an upvote to prevent automatic closure, or if the issue is already closed, please feel free to open a new one. stale-pr-message: Greetings! It looks like this PR hasn’t been active in longer than a week, add a comment or an upvote to prevent automatic closure, or if the issue is already closed, please feel free to open a new one. # These labels are required stale-issue-label: closing-soon exempt-issue-label: automation-exempt stale-pr-label: closing-soon exempt-pr-label: pr/needs-review response-requested-label: response-requested # Don't set closed-for-staleness label to skip closing very old issues # regardless of label closed-for-staleness-label: closed-for-staleness # Issue timing days-before-stale: 2 days-before-close: 5 days-before-ancient: 36500 # If you don't want to mark a issue as being ancient based on a # threshold of "upvotes", you can set this here. An "upvote" is # the total number of +1, heart, hooray, and rocket reactions # on an issue. minimum-upvotes-to-exempt: 1 repo-token: ${{ secrets.GITHUB_TOKEN }} loglevel: DEBUG # Set dry-run to true to not perform label or close actions. dry-run: false aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/.gitignore000066400000000000000000000010401456575232400230550ustar00rootroot00000000000000# IDE Artifacts .metadata .build .idea *.d Debug Release *~ *# *.iml tags #vim swap file *.swp #compiled python files *.pyc #Vagrant stuff Vagrantfile .vagrant #Mac stuff .DS_Store #doxygen doxygen/html/ doxygen/latex/ #cmake artifacts dependencies _build build _build_* cmake-build* # Compiled Object files *.slo *.lo *.o *.obj # Precompiled Headers *.gch *.pch # Compiled Dynamic libraries *.so *.dylib *.dll # Fortran module files *.mod # Compiled Static libraries *.lai *.la *.a *.lib # Executables *.exe *.out *.app .vscode/ aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/CMakeLists.txt000066400000000000000000000067131456575232400236410ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. cmake_minimum_required(VERSION 3.0) project(aws-c-sdkutils LANGUAGES C VERSION 0.1.0) if (POLICY CMP0069) cmake_policy(SET CMP0069 NEW) # Enable LTO/IPO if available in the compiler, see AwsCFlags endif() if (DEFINED CMAKE_PREFIX_PATH) file(TO_CMAKE_PATH "${CMAKE_PREFIX_PATH}" CMAKE_PREFIX_PATH) endif() if (DEFINED CMAKE_INSTALL_PREFIX) file(TO_CMAKE_PATH "${CMAKE_INSTALL_PREFIX}" CMAKE_INSTALL_PREFIX) endif() if (UNIX AND NOT APPLE) include(GNUInstallDirs) elseif(NOT DEFINED CMAKE_INSTALL_LIBDIR) set(CMAKE_INSTALL_LIBDIR "lib") endif() # This is required in order to append /lib/cmake to each element in CMAKE_PREFIX_PATH set(AWS_MODULE_DIR "/${CMAKE_INSTALL_LIBDIR}/cmake") string(REPLACE ";" "${AWS_MODULE_DIR};" AWS_MODULE_PATH "${CMAKE_PREFIX_PATH}${AWS_MODULE_DIR}") # Append that generated list to the module search path list(APPEND CMAKE_MODULE_PATH ${AWS_MODULE_PATH}) list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR}/cmake/modules") include(AwsCFlags) include(AwsCheckHeaders) include(AwsSharedLibSetup) include(AwsSanitizers) include(AwsFindPackage) file(GLOB AWS_SDKUTILS_HEADERS "include/aws/sdkutils/*.h" ) file(GLOB AWS_SDKUTILS_PRIVATE_HEADERS "include/aws/sdkutils/private/*.h" ) file(GLOB AWS_SDKUTILS_SRC "source/*.c" ) if (WIN32) if (MSVC) source_group("Header Files\\aws\\sdkutils" FILES ${AWS_SDKUTILS_HEADERS}) source_group("Source Files" FILES ${AWS_SDKUTILS_SRC}) endif () endif() file(GLOB SDKUTILS_HEADERS ${AWS_SDKUTILS_HEADERS} ) file(GLOB SDKUTILS_SRC ${AWS_SDKUTILS_SRC} ) add_library(${PROJECT_NAME} ${SDKUTILS_SRC}) aws_set_common_properties(${PROJECT_NAME} NO_WEXTRA) aws_prepare_symbol_visibility_args(${PROJECT_NAME} "AWS_SDKUTILS") aws_add_sanitizers(${PROJECT_NAME}) aws_use_package(aws-c-common) target_link_libraries(${PROJECT_NAME} PUBLIC ${DEP_AWS_LIBS}) # Our ABI is not yet stable set_target_properties(${PROJECT_NAME} PROPERTIES VERSION 1.0.0) target_include_directories(${PROJECT_NAME} PUBLIC $ $) # When we install, the generated header will be at the INSTALL_INTERFACE:include location, # but at build time we need to explicitly include this here target_include_directories(${PROJECT_NAME} PUBLIC $) aws_prepare_shared_lib_exports(${PROJECT_NAME}) configure_file("cmake/${PROJECT_NAME}-config.cmake" "${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-config.cmake" @ONLY) aws_check_headers(${PROJECT_NAME} ${AWS_SDKUTILS_HEADERS}) install(FILES ${AWS_SDKUTILS_HEADERS} DESTINATION "include/aws/sdkutils" COMPONENT Development) if (BUILD_SHARED_LIBS) set (TARGET_DIR "shared") else() set (TARGET_DIR "static") endif() install(EXPORT "${PROJECT_NAME}-targets" DESTINATION "${LIBRARY_DIRECTORY}/${PROJECT_NAME}/cmake/${TARGET_DIR}/" NAMESPACE AWS:: COMPONENT Development) install(FILES "${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-config.cmake" DESTINATION "${LIBRARY_DIRECTORY}/${PROJECT_NAME}/cmake" COMPONENT Development) install(FILES ${EXPORT_MODULES} DESTINATION "${LIBRARY_DIRECTORY}/${PROJECT_NAME}/cmake/modules" COMPONENT Development) if (NOT CMAKE_CROSSCOMPILING) include(CTest) if (BUILD_TESTING) add_subdirectory(tests) endif() endif() aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/CODE_OF_CONDUCT.md000066400000000000000000000004651456575232400236760ustar00rootroot00000000000000## Code of Conduct This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact opensource-codeofconduct@amazon.com with any additional questions or comments. aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/CONTRIBUTING.md000066400000000000000000000061301456575232400233230ustar00rootroot00000000000000# Contributing Guidelines Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional documentation, we greatly value feedback and contributions from our community. Please read through this document before submitting any issues or pull requests to ensure we have all the necessary information to effectively respond to your bug report or contribution. ## Reporting Bugs/Feature Requests We welcome you to use the GitHub issue tracker to report bugs or suggest features. When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: * A reproducible test case or series of steps * The version of our code being used * Any modifications you've made relevant to the bug * Anything unusual about your environment or deployment ## Contributing via Pull Requests Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: 1. You are working against the latest source on the *main* branch. 2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. 3. You open an issue to discuss any significant work - we would hate for your time to be wasted. To send us a pull request, please: 1. Fork the repository. 2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. 3. Ensure local tests pass. 4. Commit to your fork using clear commit messages. 5. Send us a pull request, answering any default questions in the pull request interface. 6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and [creating a pull request](https://help.github.com/articles/creating-a-pull-request/). ## Finding contributions to work on Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start. ## Code of Conduct This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact opensource-codeofconduct@amazon.com with any additional questions or comments. ## Security issue notifications If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. ## Licensing See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/LICENSE000066400000000000000000000236361456575232400221110ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/NOTICE000066400000000000000000000001031456575232400217700ustar00rootroot00000000000000Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/README.md000066400000000000000000000014201456575232400223460ustar00rootroot00000000000000## AWS C SDKUTILS C99 library implementing AWS SDK specific utilities. Includes utilities for ARN parsing, reading AWS profiles, etc... ## License This library is licensed under the Apache 2.0 License. ## Usage ### Building CMake 3.0+ is required to build. `` must be an absolute path in the following instructions. #### Building aws-c-sdkutils ``` git clone git@github.com:awslabs/aws-c-common.git cmake -S aws-c-common -B aws-c-common/build -DCMAKE_INSTALL_PREFIX= cmake --build aws-c-common/build --target install git clone git@github.com:awslabs/aws-c-sdkutils.git cmake -S aws-c-sdkutils -B aws-c-sdkutils/build -DCMAKE_INSTALL_PREFIX= -DCMAKE_PREFIX_PATH= cmake --build aws-c-sdkutils/build --target install ```aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/builder.json000066400000000000000000000003031456575232400234070ustar00rootroot00000000000000{ "name": "aws-c-sdkutils", "upstream": [ { "name": "aws-c-common" } ], "downstream": [ { "name": "aws-c-auth" } ], "test_steps": [ "test" ] } aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/cmake/000077500000000000000000000000001456575232400221525ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/cmake/aws-c-sdkutils-config.cmake000066400000000000000000000010751456575232400272740ustar00rootroot00000000000000include(CMakeFindDependencyMacro) find_dependency(aws-c-common) macro(aws_load_targets type) include(${CMAKE_CURRENT_LIST_DIR}/${type}/@PROJECT_NAME@-targets.cmake) endmacro() # try to load the lib follow BUILD_SHARED_LIBS. Fall back if not exist. if (BUILD_SHARED_LIBS) if (EXISTS "${CMAKE_CURRENT_LIST_DIR}/shared") aws_load_targets(shared) else() aws_load_targets(static) endif() else() if (EXISTS "${CMAKE_CURRENT_LIST_DIR}/static") aws_load_targets(static) else() aws_load_targets(shared) endif() endif() aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/format-check.sh000077500000000000000000000007731456575232400240030ustar00rootroot00000000000000#!/usr/bin/env bash if [[ -z $CLANG_FORMAT ]] ; then CLANG_FORMAT=clang-format fi if NOT type $CLANG_FORMAT 2> /dev/null ; then echo "No appropriate clang-format found." exit 1 fi FAIL=0 SOURCE_FILES=`find source include tests -type f \( -name '*.h' -o -name '*.c' \)` for i in $SOURCE_FILES do $CLANG_FORMAT -output-replacements-xml $i | grep -c " /dev/null if [ $? -ne 1 ] then echo "$i failed clang-format check." FAIL=1 fi done exit $FAIL aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/include/000077500000000000000000000000001456575232400225155ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/include/aws/000077500000000000000000000000001456575232400233075ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/include/aws/sdkutils/000077500000000000000000000000001456575232400251515ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/include/aws/sdkutils/aws_profile.h000066400000000000000000000155141456575232400276420ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #ifndef AWS_SDKUTILS_AWS_PROFILE_H #define AWS_SDKUTILS_AWS_PROFILE_H #include AWS_PUSH_SANE_WARNING_LEVEL struct aws_allocator; struct aws_string; struct aws_byte_buf; struct aws_byte_cursor; /* * A set of data types that model the aws profile specification * * A profile collection is a collection of zero or more named profiles * Each profile is a set of properties (named key-value pairs) * Empty-valued properties may have sub properties (named key-value pairs) * * Resolution rules exist to determine what profile to use, what files to * read profile collections from, and what types of credentials have priority. * * The profile specification is informally defined as "what the aws cli does" and * formally defined in internal aws documents. */ struct aws_profile_property; struct aws_profile; struct aws_profile_collection; /** * The profile specification has rule exceptions based on what file * the profile collection comes from. */ enum aws_profile_source_type { AWS_PST_NONE, AWS_PST_CONFIG, AWS_PST_CREDENTIALS }; /* * The collection can hold different types of sections. */ enum aws_profile_section_type { AWS_PROFILE_SECTION_TYPE_PROFILE, AWS_PROFILE_SECTION_TYPE_SSO_SESSION, AWS_PROFILE_SECTION_TYPE_COUNT, }; AWS_EXTERN_C_BEGIN /************************* * Profile collection APIs *************************/ /** * Increments the reference count on the profile collection, allowing the caller to take a reference to it. * * Returns the same profile collection passed in. */ AWS_SDKUTILS_API struct aws_profile_collection *aws_profile_collection_acquire(struct aws_profile_collection *collection); /** * Decrements a profile collection's ref count. When the ref count drops to zero, the collection will be destroyed. * Returns NULL. */ AWS_SDKUTILS_API struct aws_profile_collection *aws_profile_collection_release(struct aws_profile_collection *collection); /** * @Deprecated This is equivalent to aws_profile_collection_release. */ AWS_SDKUTILS_API void aws_profile_collection_destroy(struct aws_profile_collection *profile_collection); /** * Create a new profile collection by parsing a file with the specified path */ AWS_SDKUTILS_API struct aws_profile_collection *aws_profile_collection_new_from_file( struct aws_allocator *allocator, const struct aws_string *file_path, enum aws_profile_source_type source); /** * Create a new profile collection by merging a config-file-based profile * collection and a credentials-file-based profile collection */ AWS_SDKUTILS_API struct aws_profile_collection *aws_profile_collection_new_from_merge( struct aws_allocator *allocator, const struct aws_profile_collection *config_profiles, const struct aws_profile_collection *credentials_profiles); /** * Create a new profile collection by parsing text in a buffer. Primarily * for testing. */ AWS_SDKUTILS_API struct aws_profile_collection *aws_profile_collection_new_from_buffer( struct aws_allocator *allocator, const struct aws_byte_buf *buffer, enum aws_profile_source_type source); /** * Retrieves a reference to a profile with the specified name, if it exists, from the profile collection */ AWS_SDKUTILS_API const struct aws_profile *aws_profile_collection_get_profile( const struct aws_profile_collection *profile_collection, const struct aws_string *profile_name); /* * Retrieves a reference to a section with the specified name and type, if it exists, from the profile collection. * You can get the "default" profile or credentials file sections by passing `AWS_PROFILE_SECTION_TYPE_PROFILE` */ AWS_SDKUTILS_API const struct aws_profile *aws_profile_collection_get_section( const struct aws_profile_collection *profile_collection, const enum aws_profile_section_type section_type, const struct aws_string *section_name); /** * Returns the number of profiles in a collection */ AWS_SDKUTILS_API size_t aws_profile_collection_get_profile_count(const struct aws_profile_collection *profile_collection); /** * Returns the number of elements of the specified section in a collection. */ AWS_SDKUTILS_API size_t aws_profile_collection_get_section_count( const struct aws_profile_collection *profile_collection, const enum aws_profile_section_type section_type); /** * Returns a reference to the name of the provided profile */ AWS_SDKUTILS_API const struct aws_string *aws_profile_get_name(const struct aws_profile *profile); /************** * profile APIs **************/ /** * Retrieves a reference to a property with the specified name, if it exists, from a profile */ AWS_SDKUTILS_API const struct aws_profile_property *aws_profile_get_property( const struct aws_profile *profile, const struct aws_string *property_name); /** * Returns how many properties a profile holds */ AWS_SDKUTILS_API size_t aws_profile_get_property_count(const struct aws_profile *profile); /** * Returns a reference to the property's string value */ AWS_SDKUTILS_API const struct aws_string *aws_profile_property_get_value(const struct aws_profile_property *property); /*********************** * profile property APIs ***********************/ /** * Returns a reference to the value of a sub property with the given name, if it exists, in the property */ AWS_SDKUTILS_API const struct aws_string *aws_profile_property_get_sub_property( const struct aws_profile_property *property, const struct aws_string *sub_property_name); /** * Returns how many sub properties the property holds */ AWS_SDKUTILS_API size_t aws_profile_property_get_sub_property_count(const struct aws_profile_property *property); /*********** * Misc APIs ***********/ /** * Computes the final platform-specific path for the profile credentials file. Does limited home directory * expansion/resolution. * * override_path, if not null, will be searched first instead of using the standard home directory config path */ AWS_SDKUTILS_API struct aws_string *aws_get_credentials_file_path( struct aws_allocator *allocator, const struct aws_byte_cursor *override_path); /** * Computes the final platform-specific path for the profile config file. Does limited home directory * expansion/resolution. * * override_path, if not null, will be searched first instead of using the standard home directory config path */ AWS_SDKUTILS_API struct aws_string *aws_get_config_file_path( struct aws_allocator *allocator, const struct aws_byte_cursor *override_path); /** * Computes the profile to use for credentials lookups based on profile resolution rules */ AWS_SDKUTILS_API struct aws_string *aws_get_profile_name(struct aws_allocator *allocator, const struct aws_byte_cursor *override_name); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_SDKUTILS_AWS_PROFILE_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/include/aws/sdkutils/endpoints_rule_engine.h000066400000000000000000000253031456575232400317040ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #ifndef AWS_SDKUTILS_ENDPOINTS_RULESET_H #define AWS_SDKUTILS_ENDPOINTS_RULESET_H #include #include AWS_PUSH_SANE_WARNING_LEVEL struct aws_endpoints_ruleset; struct aws_partitions_config; struct aws_endpoints_parameter; struct aws_endpoints_rule_engine; struct aws_endpoints_resolved_endpoint; struct aws_endpoints_request_context; struct aws_hash_table; enum aws_endpoints_parameter_type { AWS_ENDPOINTS_PARAMETER_STRING, AWS_ENDPOINTS_PARAMETER_BOOLEAN }; enum aws_endpoints_resolved_endpoint_type { AWS_ENDPOINTS_RESOLVED_ENDPOINT, AWS_ENDPOINTS_RESOLVED_ERROR }; AWS_EXTERN_C_BEGIN AWS_SDKUTILS_API struct aws_byte_cursor aws_endpoints_get_supported_ruleset_version(void); /* ****************************** * Parameter ****************************** */ /* * Value type of parameter. */ AWS_SDKUTILS_API enum aws_endpoints_parameter_type aws_endpoints_parameter_get_type( const struct aws_endpoints_parameter *parameter); /* * Specifies whether parameter maps to one of SDK built ins (ex. "AWS::Region"). * Return is a cursor specifying the name of associated built in. * If there is no mapping, cursor will be empty. * Cursor is guaranteed to be valid for lifetime of paramater. */ AWS_SDKUTILS_API struct aws_byte_cursor aws_endpoints_parameter_get_built_in( const struct aws_endpoints_parameter *parameter); /* * Default string value. * out_cursor will point to default string value if one exist and will be empty * otherwise. * Cursor is guaranteed to be valid for lifetime of paramater. * Returns AWS_OP_ERR if parameter is not a string. */ AWS_SDKUTILS_API int aws_endpoints_parameter_get_default_string( const struct aws_endpoints_parameter *parameter, struct aws_byte_cursor *out_cursor); /* * Default boolean value. * out_bool will have pointer to value if default is specified, NULL otherwise. * Owned by parameter. * Returns AWS_OP_ERR if parameter is not a boolean. */ AWS_SDKUTILS_API int aws_endpoints_parameter_get_default_boolean( const struct aws_endpoints_parameter *parameter, const bool **out_bool); /* * Whether parameter is required. */ AWS_SDKUTILS_API bool aws_endpoints_parameter_get_is_required(const struct aws_endpoints_parameter *parameter); /* * Returns cursor to parameter documentation. * Cursor is guaranteed to be valid for lifetime of paramater. * Will not be empty as doc is required. */ AWS_SDKUTILS_API struct aws_byte_cursor aws_endpoints_parameter_get_documentation( const struct aws_endpoints_parameter *parameter); /* * Whether parameter is deprecated. */ AWS_SDKUTILS_API bool aws_endpoints_parameters_get_is_deprecated(const struct aws_endpoints_parameter *parameter); /* * Deprecation message. Cursor is empty if parameter is not deprecated. * Cursor is guaranteed to be valid for lifetime of paramater. */ AWS_SDKUTILS_API struct aws_byte_cursor aws_endpoints_parameter_get_deprecated_message( const struct aws_endpoints_parameter *parameter); /* * Deprecated since. Cursor is empty if parameter is not deprecated. * Cursor is guaranteed to be valid for lifetime of paramater. */ AWS_SDKUTILS_API struct aws_byte_cursor aws_endpoints_parameter_get_deprecated_since( const struct aws_endpoints_parameter *parameter); /* ****************************** * Ruleset ****************************** */ /* * Create new ruleset from a json string. * In cases of failure NULL is returned and last error is set. */ AWS_SDKUTILS_API struct aws_endpoints_ruleset *aws_endpoints_ruleset_new_from_string( struct aws_allocator *allocator, struct aws_byte_cursor ruleset_json); /* * Increment ref count */ AWS_SDKUTILS_API struct aws_endpoints_ruleset *aws_endpoints_ruleset_acquire(struct aws_endpoints_ruleset *ruleset); /* * Decrement ref count */ AWS_SDKUTILS_API struct aws_endpoints_ruleset *aws_endpoints_ruleset_release(struct aws_endpoints_ruleset *ruleset); /* * Get ruleset parameters. * Return is a hashtable with paramater name as a key (aws_byte_cursor *) and parameter * (aws_endpoints_parameter *) as a value. Ruleset owns the owns the hashtable and * pointer is valid during ruleset lifetime. Will never return a NULL. In case * there are no parameters in the ruleset, hash table will contain 0 elements. * * Note on usage in bindings: * - this is basically a map from a parameter name to a structure describing parameter * - deep copy all the fields and let language take ownership of data * Consider transforming this into language specific map (dict for python, Map * in Java, std::map in C++, etc...) instead of wrapping it into a custom class. */ AWS_SDKUTILS_API const struct aws_hash_table *aws_endpoints_ruleset_get_parameters( struct aws_endpoints_ruleset *ruleset); /* * Ruleset version. * Returned pointer is owned by ruleset. * Will not return NULL as version is a required field for ruleset. */ AWS_SDKUTILS_API struct aws_byte_cursor aws_endpoints_ruleset_get_version(const struct aws_endpoints_ruleset *ruleset); /* * Ruleset service id. * Returned pointer is owned by ruleset. * Can be NULL if not specified in ruleset. */ AWS_SDKUTILS_API struct aws_byte_cursor aws_endpoints_ruleset_get_service_id( const struct aws_endpoints_ruleset *ruleset); /* ****************************** * Rule engine ****************************** */ /** * Create new rule engine for a given ruleset. * In cases of failure NULL is returned and last error is set. */ AWS_SDKUTILS_API struct aws_endpoints_rule_engine *aws_endpoints_rule_engine_new( struct aws_allocator *allocator, struct aws_endpoints_ruleset *ruleset, struct aws_partitions_config *partitions_config); /* * Increment rule engine ref count. */ AWS_SDKUTILS_API struct aws_endpoints_rule_engine *aws_endpoints_rule_engine_acquire( struct aws_endpoints_rule_engine *rule_engine); /* * Decrement rule engine ref count. */ AWS_SDKUTILS_API struct aws_endpoints_rule_engine *aws_endpoints_rule_engine_release( struct aws_endpoints_rule_engine *rule_engine); /* * Creates new request context. * This is basically a property bag containing all request parameter values needed to * resolve endpoint. Parameter value names must match parameter names specified * in ruleset. * Caller is responsible for releasing request context. * Note on usage in bindings: * - Consider exposing it as a custom property bag or a standard map and then * transform it into request context. */ AWS_SDKUTILS_API struct aws_endpoints_request_context *aws_endpoints_request_context_new( struct aws_allocator *allocator); /* * Increment resolved endpoint ref count. */ AWS_SDKUTILS_API struct aws_endpoints_request_context *aws_endpoints_request_context_acquire( struct aws_endpoints_request_context *request_context); /* * Decrement resolved endpoint ref count. */ AWS_SDKUTILS_API struct aws_endpoints_request_context *aws_endpoints_request_context_release( struct aws_endpoints_request_context *request_context); /* * Add string value to request context. * Note: this function will make a copy of the memory backing the cursors. * The function will override any previous value stored in the context with the * same name. */ AWS_SDKUTILS_API int aws_endpoints_request_context_add_string( struct aws_allocator *allocator, struct aws_endpoints_request_context *context, struct aws_byte_cursor name, struct aws_byte_cursor value); /* * Add boolean value to request context. * Note: this function will make a copy of the memory backing the cursors. * The function will override any previous value stored in the context with the * same name. */ AWS_SDKUTILS_API int aws_endpoints_request_context_add_boolean( struct aws_allocator *allocator, struct aws_endpoints_request_context *context, struct aws_byte_cursor name, bool value); /* * Resolve an endpoint given request context. * Resolved endpoint is returned through out_resolved_endpoint. * In cases of error out_resolved_endpoint is set to NULL and error is returned. * Resolved endpoint is ref counter and caller is responsible for releasing it. */ AWS_SDKUTILS_API int aws_endpoints_rule_engine_resolve( struct aws_endpoints_rule_engine *engine, const struct aws_endpoints_request_context *context, struct aws_endpoints_resolved_endpoint **out_resolved_endpoint); /* * Increment resolved endpoint ref count. */ AWS_SDKUTILS_API struct aws_endpoints_resolved_endpoint *aws_endpoints_resolved_endpoint_acquire( struct aws_endpoints_resolved_endpoint *resolved_endpoint); /* * Decrement resolved endpoint ref count. */ AWS_SDKUTILS_API struct aws_endpoints_resolved_endpoint *aws_endpoints_resolved_endpoint_release( struct aws_endpoints_resolved_endpoint *resolved_endpoint); /* * Get type of resolved endpoint. */ AWS_SDKUTILS_API enum aws_endpoints_resolved_endpoint_type aws_endpoints_resolved_endpoint_get_type( const struct aws_endpoints_resolved_endpoint *resolved_endpoint); /* * Get url for the resolved endpoint. * Valid only if resolved endpoint has endpoint type and will error otherwise. */ AWS_SDKUTILS_API int aws_endpoints_resolved_endpoint_get_url( const struct aws_endpoints_resolved_endpoint *resolved_endpoint, struct aws_byte_cursor *out_url); /* * Get properties for the resolved endpoint. * Note: properties is a json string containing additional data for a given * endpoint. Data is not typed and is not guaranteed to change in the future. * For use at callers discretion. * Valid only if resolved endpoint has endpoint type and will error otherwise. */ AWS_SDKUTILS_API int aws_endpoints_resolved_endpoint_get_properties( const struct aws_endpoints_resolved_endpoint *resolved_endpoint, struct aws_byte_cursor *out_properties); /* * Get headers for the resolved endpoint. * out_headers type is aws_hash_table with (aws_string *) as key * and (aws_array_list * of aws_string *) as value. * Note on usage in bindings: * - this is a map to a list of strings and can be implemented as such in the * target language with deep copy of all underlying strings. * Valid only if resolved endpoint has endpoint type and will error otherwise. */ AWS_SDKUTILS_API int aws_endpoints_resolved_endpoint_get_headers( const struct aws_endpoints_resolved_endpoint *resolved_endpoint, const struct aws_hash_table **out_headers); /* * Get error for the resolved endpoint. * Valid only if resolved endpoint has error type and will error otherwise. */ AWS_SDKUTILS_API int aws_endpoints_resolved_endpoint_get_error( const struct aws_endpoints_resolved_endpoint *resolved_endpoint, struct aws_byte_cursor *out_error); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_SDKUTILS_ENDPOINTS_RULESET_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/include/aws/sdkutils/exports.h000066400000000000000000000017561456575232400270370ustar00rootroot00000000000000#ifndef AWS_SDKUTILS_EXPORTS_H #define AWS_SDKUTILS_EXPORTS_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #if defined(USE_WINDOWS_DLL_SEMANTICS) || defined(WIN32) # ifdef AWS_SDKUTILS_USE_IMPORT_EXPORT # ifdef AWS_SDKUTILS_EXPORTS # define AWS_SDKUTILS_API __declspec(dllexport) # else # define AWS_SDKUTILS_API __declspec(dllimport) # endif /* AWS_SDKUTILS_EXPORTS */ # else # define AWS_SDKUTILS_API # endif /*USE_IMPORT_EXPORT */ #else # if ((__GNUC__ >= 4) || defined(__clang__)) && defined(AWS_SDKUTILS_USE_IMPORT_EXPORT) && \ defined(AWS_SDKUTILS_EXPORTS) # define AWS_SDKUTILS_API __attribute__((visibility("default"))) # else # define AWS_SDKUTILS_API # endif /* __GNUC__ >= 4 || defined(__clang__) */ #endif /* defined(USE_WINDOWS_DLL_SEMANTICS) || defined(WIN32) */ #endif /* AWS_SDKUTILS_EXPORTS_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/include/aws/sdkutils/partitions.h000066400000000000000000000021001456575232400275070ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #ifndef AWS_SDKUTILS_PARTITIONS_H #define AWS_SDKUTILS_PARTITIONS_H #include #include AWS_PUSH_SANE_WARNING_LEVEL struct aws_partitions_config; AWS_EXTERN_C_BEGIN AWS_SDKUTILS_API struct aws_byte_cursor aws_partitions_get_supported_version(void); /* * Create new partitions config from a json string. * In cases of failure NULL is returned and last error is set. */ AWS_SDKUTILS_API struct aws_partitions_config *aws_partitions_config_new_from_string( struct aws_allocator *allocator, struct aws_byte_cursor json); /* * Increment ref count */ AWS_SDKUTILS_API struct aws_partitions_config *aws_partitions_config_acquire(struct aws_partitions_config *partitions); /* * Decrement ref count */ AWS_SDKUTILS_API struct aws_partitions_config *aws_partitions_config_release(struct aws_partitions_config *partitions); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_SDKUTILS_PARTITIONS_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/include/aws/sdkutils/private/000077500000000000000000000000001456575232400266235ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/include/aws/sdkutils/private/endpoints_regex.h000066400000000000000000000031051456575232400321700ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #ifndef AWS_SDKUTILS_ENDPOINTS_REGEX_H #define AWS_SDKUTILS_ENDPOINTS_REGEX_H #include #include /* * Warning: this is a minimal regex implementation that only covers cases needed * for endpoint resolution and it is missing a lot of regex features. * Please reconsider if you are planning to use below functions in any other * context than endpoint resolution. * Refer to source/endpoints_regex.c for limitations. */ struct aws_endpoints_regex; /* * Parse regex pattern and construct "compiled" regex from it. * Returns NULL on failure and raises following error code: * - AWS_ERROR_INVALID_ARGUMENT - regex is invalid for some reason * - AWS_ERROR_SDKUTILS_ENDPOINTS_UNSUPPORTED_REGEX - regex is valid, but * implementation does not support some of regex features */ AWS_SDKUTILS_API struct aws_endpoints_regex *aws_endpoints_regex_new( struct aws_allocator *allocator, struct aws_byte_cursor regex_pattern); /* * Destroys compiled regex. */ AWS_SDKUTILS_API void aws_endpoints_regex_destroy(struct aws_endpoints_regex *regex); /* * Matches text against regex. * returns AWS_OP_SUCCESS on successful match and * AWS_ERROR_SDKUTILS_ENDPOINTS_REGEX_NO_MATCH if text didn't match or * AWS_ERROR_INVALID_ARGUMENT if inputs are invalid. * */ AWS_SDKUTILS_API int aws_endpoints_regex_match(const struct aws_endpoints_regex *regex, struct aws_byte_cursor text); #endif /* AWS_SDKUTILS_ENDPOINTS_REGEX_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/include/aws/sdkutils/private/endpoints_types_impl.h000066400000000000000000000252111456575232400332450ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #ifndef AWS_SDKUTILS_ENDPOINTS_RULESET_TYPES_IMPL_H #define AWS_SDKUTILS_ENDPOINTS_RULESET_TYPES_IMPL_H #include #include #include #include struct aws_json_value; /* * Rule engine is built around 2 major types: * - expr - can be a literal, like bool or number or expression like function or ref * - value - literal types only. result of resolving expr. Can have special None * value depending on how expr is resolved. Ex. accessing array past bounds or * substrings with invalid start/end combination will both result in null. * * There is a lot of overlap between expr and value, so why do we need both? * Primary reason is to create a clean boundary between ruleset and resolved * values as it allows to distinguish easily between things that need to be * resolved and things that have been lowered. Given this type system, rule * engine basically performs a task of transforming exprs into values to get * final result. * * Other important types: * Parameter - definition of values that can be provided to rule engine during * resolution. Can define default values if caller didn't provide a value for * parameter. * Request Context - set of parameter value defined for a particular request that * are used during resolution * Scope - set of values defined during resolution of a rule. Can grow/shrink as * rules are evaluated. Ex. scope can have value with name "Region" and value "us-west-2". */ /* ****************************** * Parse types. ****************************** */ enum aws_endpoints_rule_type { AWS_ENDPOINTS_RULE_ENDPOINT, AWS_ENDPOINTS_RULE_ERROR, AWS_ENDPOINTS_RULE_TREE }; enum aws_endpoints_expr_type { AWS_ENDPOINTS_EXPR_STRING, AWS_ENDPOINTS_EXPR_NUMBER, AWS_ENDPOINTS_EXPR_BOOLEAN, AWS_ENDPOINTS_EXPR_ARRAY, AWS_ENDPOINTS_EXPR_REFERENCE, AWS_ENDPOINTS_EXPR_FUNCTION }; enum aws_endpoints_fn_type { AWS_ENDPOINTS_FN_FIRST = 0, AWS_ENDPOINTS_FN_IS_SET = 0, AWS_ENDPOINTS_FN_NOT, AWS_ENDPOINTS_FN_GET_ATTR, AWS_ENDPOINTS_FN_SUBSTRING, AWS_ENDPOINTS_FN_STRING_EQUALS, AWS_ENDPOINTS_FN_BOOLEAN_EQUALS, AWS_ENDPOINTS_FN_URI_ENCODE, AWS_ENDPOINTS_FN_PARSE_URL, AWS_ENDPOINTS_FN_IS_VALID_HOST_LABEL, AWS_ENDPOINTS_FN_AWS_PARTITION, AWS_ENDPOINTS_FN_AWS_PARSE_ARN, AWS_ENDPOINTS_FN_AWS_IS_VIRTUAL_HOSTABLE_S3_BUCKET, AWS_ENDPOINTS_FN_LAST, }; struct aws_endpoints_parameter { struct aws_allocator *allocator; struct aws_byte_cursor name; enum aws_endpoints_parameter_type type; struct aws_byte_cursor built_in; bool has_default_value; union { struct aws_byte_cursor string; bool boolean; } default_value; bool is_required; struct aws_byte_cursor documentation; bool is_deprecated; struct aws_byte_cursor deprecated_message; struct aws_byte_cursor deprecated_since; }; struct aws_endpoints_ruleset { struct aws_allocator *allocator; struct aws_ref_count ref_count; struct aws_json_value *json_root; /* list of (aws_endpoints_rule) */ struct aws_array_list rules; struct aws_byte_cursor version; struct aws_byte_cursor service_id; /* map of (aws_byte_cursor *) -> (aws_endpoints_parameter *) */ struct aws_hash_table parameters; }; struct aws_endpoints_function { enum aws_endpoints_fn_type fn; /* List of (aws_endpoints_expr) */ struct aws_array_list argv; }; struct aws_endpoints_expr { enum aws_endpoints_expr_type type; union { struct aws_byte_cursor string; double number; bool boolean; struct aws_array_list array; /* List of (aws_endpoints_expr) */ struct aws_byte_cursor reference; struct aws_endpoints_function function; } e; }; struct aws_endpoints_rule_data_endpoint { struct aws_allocator *allocator; struct aws_endpoints_expr url; /* * Note: this is a custom properties json associated with the result. * Properties are unstable and format can change frequently. * Its up to caller to parse json to retrieve properties. */ struct aws_byte_buf properties; /* Map of (aws_string *) -> (aws_array_list * of aws_endpoints_expr) */ struct aws_hash_table headers; }; struct aws_endpoints_rule_data_error { struct aws_endpoints_expr error; }; struct aws_endpoints_rule_data_tree { /* List of (aws_endpoints_rule) */ struct aws_array_list rules; }; struct aws_endpoints_condition { struct aws_endpoints_expr expr; struct aws_byte_cursor assign; }; struct aws_endpoints_rule { /* List of (aws_endpoints_condition) */ struct aws_array_list conditions; struct aws_byte_cursor documentation; enum aws_endpoints_rule_type type; union { struct aws_endpoints_rule_data_endpoint endpoint; struct aws_endpoints_rule_data_error error; struct aws_endpoints_rule_data_tree tree; } rule_data; }; struct aws_partition_info { struct aws_allocator *allocator; struct aws_byte_cursor name; bool is_copy; struct aws_string *info; struct aws_endpoints_regex *region_regex; }; /* * Basic partitions file structure is a list of partitions at top level that has * some metadata associated with it and then each partition has a list of * regions within it, with each region possibly overriding some of that info. * The 2 use cases we need to support is matching region to partition and then * iterating over all partitions and matching regex in partition meta to region name. * To support both cases we have 2 structures: * - base_partitions - list of all partitions. this is a primary owner for partition * meta data * - region_to_partition_info - mapping from region name to partition. creates * new meta info if region overrides any meta values, otherwise points to * partitions copy of meta info (is_copy flag is true) */ struct aws_partitions_config { struct aws_allocator *allocator; struct aws_ref_count ref_count; struct aws_json_value *json_root; /* map of (byte_cur -> aws_partition_info) */ struct aws_hash_table region_to_partition_info; /* map of (byte_cur -> aws_partition_info) */ struct aws_hash_table base_partitions; struct aws_string *version; }; /* ****************************** * Eval types. ****************************** */ enum aws_endpoints_value_type { /* Special value to represent that any value type is expected from resolving an expresion. Note a valid value for a value type. */ AWS_ENDPOINTS_VALUE_ANY, AWS_ENDPOINTS_VALUE_NONE, AWS_ENDPOINTS_VALUE_STRING, AWS_ENDPOINTS_VALUE_BOOLEAN, AWS_ENDPOINTS_VALUE_OBJECT, /* Generic type returned by some functions. json string under the covers. */ AWS_ENDPOINTS_VALUE_NUMBER, AWS_ENDPOINTS_VALUE_ARRAY, AWS_ENDPOINTS_VALUE_SIZE }; struct aws_endpoints_request_context { struct aws_allocator *allocator; struct aws_ref_count ref_count; struct aws_hash_table values; }; /* concrete type value */ struct aws_endpoints_value { enum aws_endpoints_value_type type; union { struct aws_owning_cursor owning_cursor_string; bool boolean; struct aws_owning_cursor owning_cursor_object; double number; struct aws_array_list array; } v; }; /* wrapper around aws_endpoints_value to store it more easily in hash table*/ struct aws_endpoints_scope_value { struct aws_allocator *allocator; struct aws_owning_cursor name; struct aws_endpoints_value value; }; struct aws_endpoints_resolution_scope { /* current values in scope. byte_cur -> aws_endpoints_scope_value */ struct aws_hash_table values; /* list of value keys added since last cleanup */ struct aws_array_list added_keys; /* index of the rule currently being evaluated */ size_t rule_idx; /* pointer to rules array */ const struct aws_array_list *rules; const struct aws_partitions_config *partitions; }; struct aws_partition_info *aws_partition_info_new(struct aws_allocator *allocator, struct aws_byte_cursor name); void aws_partition_info_destroy(struct aws_partition_info *partition_info); struct aws_endpoints_parameter *aws_endpoints_parameter_new( struct aws_allocator *allocator, struct aws_byte_cursor name); void aws_endpoints_parameter_destroy(struct aws_endpoints_parameter *parameter); void aws_endpoints_rule_clean_up(struct aws_endpoints_rule *rule); void aws_endpoints_rule_data_endpoint_clean_up(struct aws_endpoints_rule_data_endpoint *rule_data); void aws_endpoints_rule_data_error_clean_up(struct aws_endpoints_rule_data_error *rule_data); void aws_endpoints_rule_data_tree_clean_up(struct aws_endpoints_rule_data_tree *rule_data); void aws_endpoints_condition_clean_up(struct aws_endpoints_condition *condition); void aws_endpoints_function_clean_up(struct aws_endpoints_function *function); void aws_endpoints_expr_clean_up(struct aws_endpoints_expr *expr); struct aws_endpoints_scope_value *aws_endpoints_scope_value_new( struct aws_allocator *allocator, struct aws_byte_cursor name_cur); void aws_endpoints_scope_value_destroy(struct aws_endpoints_scope_value *scope_value); int aws_endpoints_deep_copy_parameter_value( struct aws_allocator *allocator, const struct aws_endpoints_value *from, struct aws_endpoints_value *to); void aws_endpoints_value_clean_up(struct aws_endpoints_value *aws_endpoints_value); /* Helper to resolve argv. Implemented in rule engine. */ int aws_endpoints_argv_expect( struct aws_allocator *allocator, struct aws_endpoints_resolution_scope *scope, struct aws_array_list *argv, size_t idx, enum aws_endpoints_value_type expected_type, struct aws_endpoints_value *out_value); extern uint64_t aws_endpoints_fn_name_hash[AWS_ENDPOINTS_FN_LAST]; void aws_endpoints_rule_engine_init(void); int aws_endpoints_dispatch_standard_lib_fn_resolve( enum aws_endpoints_fn_type type, struct aws_allocator *allocator, struct aws_array_list *argv, struct aws_endpoints_resolution_scope *scope, struct aws_endpoints_value *out_value); int aws_endpoints_path_through_array( struct aws_allocator *allocator, struct aws_endpoints_resolution_scope *scope, struct aws_endpoints_value *eval_val, struct aws_byte_cursor path_cur, struct aws_endpoints_value *out_value); int aws_endpoints_path_through_object( struct aws_allocator *allocator, struct aws_endpoints_value *eval_val, struct aws_byte_cursor path_cur, struct aws_endpoints_value *out_value); #endif /* AWS_SDKUTILS_ENDPOINTS_RULESET_TYPES_IMPL_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/include/aws/sdkutils/private/endpoints_util.h000066400000000000000000000107611456575232400320410ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #ifndef AWS_SDKUTILS_ENDPOINTS_EVAL_UTIL_H #define AWS_SDKUTILS_ENDPOINTS_EVAL_UTIL_H #include struct aws_string; struct aws_byte_buf; struct aws_json_value; /* Cursor that optionally owns underlying memory. */ struct aws_owning_cursor { struct aws_byte_cursor cur; struct aws_string *string; }; /* Clones string and wraps it in owning cursor. */ AWS_SDKUTILS_API struct aws_owning_cursor aws_endpoints_owning_cursor_create( struct aws_allocator *allocator, const struct aws_string *str); /* Creates new cursor that takes ownership of created string. */ AWS_SDKUTILS_API struct aws_owning_cursor aws_endpoints_owning_cursor_from_string(struct aws_string *str); /* Clones memory pointer to by cursor and wraps in owning cursor */ AWS_SDKUTILS_API struct aws_owning_cursor aws_endpoints_owning_cursor_from_cursor( struct aws_allocator *allocator, const struct aws_byte_cursor cur); /* Creates owning cursor with memory pointer set to NULL */ AWS_SDKUTILS_API struct aws_owning_cursor aws_endpoints_non_owning_cursor_create(struct aws_byte_cursor cur); /* Cleans up memory associated with the cursor */ AWS_SDKUTILS_API void aws_owning_cursor_clean_up(struct aws_owning_cursor *cursor); /* * Determine whether host cursor is IPv4 string. */ AWS_SDKUTILS_API bool aws_is_ipv4(struct aws_byte_cursor host); /* * Determine whether host cursor is IPv6 string. * Supports checking for uri encoded strings and scoped literals. */ AWS_SDKUTILS_API bool aws_is_ipv6(struct aws_byte_cursor host, bool is_uri_encoded); /* * Determine whether label is a valid host label. */ AWS_SDKUTILS_API bool aws_is_valid_host_label(struct aws_byte_cursor label, bool allow_subdomains); /* * Normalize uri path - make sure it starts and ends with / * Will initialize out_normalized_path. * In cases of error out_normalized_path will be uninitialized. */ AWS_SDKUTILS_API int aws_byte_buf_init_from_normalized_uri_path( struct aws_allocator *allocator, struct aws_byte_cursor path, struct aws_byte_buf *out_normalized_path); /* * Creates new string from json value. * NULL in cases of error. */ AWS_SDKUTILS_API struct aws_string *aws_string_new_from_json( struct aws_allocator *allocator, const struct aws_json_value *value); /* * Convenience helper for comparing byte cursors. * Typeless for use with hash tables. */ AWS_SDKUTILS_API bool aws_endpoints_byte_cursor_eq(const void *a, const void *b); /* * Helpers to do deep clean up of array list. * TODO: move to aws-c-common? */ typedef void(aws_array_callback_clean_up_fn)(void *value); AWS_SDKUTILS_API void aws_array_list_deep_clean_up( struct aws_array_list *array, aws_array_callback_clean_up_fn on_clean_up_element); /* Function that resolves template. */ typedef int(aws_endpoints_template_resolve_fn)( struct aws_byte_cursor template, void *user_data, struct aws_owning_cursor *out_resolved); /* * Resolve templated string and write it out to buf. * Will parse templated values (i.e. values enclosed in {}) and replace them with * the value returned from resolve_callback. * Note: callback must be able to support syntax for pathing through value (path * provided after #). * Will replace escaped template delimiters ({{ and }}) with single chars. * Supports replacing templated values inside json strings (controlled by * is_json), by ignoring json { and } chars. */ AWS_SDKUTILS_API int aws_byte_buf_init_from_resolved_templated_string( struct aws_allocator *allocator, struct aws_byte_buf *out_buf, struct aws_byte_cursor string, aws_endpoints_template_resolve_fn resolve_callback, void *user_data, bool is_json); /* * Path through json structure and return final json node in out_value. * In cases of error, error is returned and out_value is set to NULL. * Array access out of bounds returns success, but set out_value to NULL (to be * consistent with spec). * * Path is defined as a string of '.' delimited fields names, that can optionally * end with [] to indicate indexing. * Note: only last element can be indexed. * ex. path "a.b.c[5]" results in going through a, b and then c and finally * taking index of 5. */ AWS_SDKUTILS_API int aws_path_through_json( struct aws_allocator *allocator, const struct aws_json_value *root, struct aws_byte_cursor path, const struct aws_json_value **out_value); #endif /* AWS_SDKUTILS_ENDPOINTS_EVAL_UTIL_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/include/aws/sdkutils/resource_name.h000066400000000000000000000023121456575232400301470ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #ifndef AWS_SDKUTILS_RESOURCE_NAME_H #define AWS_SDKUTILS_RESOURCE_NAME_H #pragma once #include #include AWS_PUSH_SANE_WARNING_LEVEL struct aws_resource_name { struct aws_byte_cursor partition; struct aws_byte_cursor service; struct aws_byte_cursor region; struct aws_byte_cursor account_id; struct aws_byte_cursor resource_id; }; AWS_EXTERN_C_BEGIN /** Given an ARN "Amazon Resource Name" represented as an in memory a structure representing the parts */ AWS_SDKUTILS_API int aws_resource_name_init_from_cur(struct aws_resource_name *arn, const struct aws_byte_cursor *input); /** Calculates the space needed to write an ARN to a byte buf */ AWS_SDKUTILS_API int aws_resource_name_length(const struct aws_resource_name *arn, size_t *size); /** Serializes an ARN structure into the lexical string format */ AWS_SDKUTILS_API int aws_byte_buf_append_resource_name(struct aws_byte_buf *buf, const struct aws_resource_name *arn); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_SDKUTILS_RESOURCE_NAME_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/include/aws/sdkutils/sdkutils.h000066400000000000000000000034431456575232400271700ustar00rootroot00000000000000#ifndef AWS_SDKUTILS_SDKUTILS_H #define AWS_SDKUTILS_SDKUTILS_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include AWS_PUSH_SANE_WARNING_LEVEL struct aws_allocator; #define AWS_C_SDKUTILS_PACKAGE_ID 15 enum aws_sdkutils_errors { AWS_ERROR_SDKUTILS_GENERAL = AWS_ERROR_ENUM_BEGIN_RANGE(AWS_C_SDKUTILS_PACKAGE_ID), AWS_ERROR_SDKUTILS_PARSE_FATAL, AWS_ERROR_SDKUTILS_PARSE_RECOVERABLE, AWS_ERROR_SDKUTILS_ENDPOINTS_UNSUPPORTED_RULESET, AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED, AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_INIT_FAILED, AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED, AWS_ERROR_SDKUTILS_ENDPOINTS_EMPTY_RULESET, AWS_ERROR_SDKUTILS_ENDPOINTS_RULESET_EXHAUSTED, AWS_ERROR_SDKUTILS_PARTITIONS_UNSUPPORTED, AWS_ERROR_SDKUTILS_PARTITIONS_PARSE_FAILED, AWS_ERROR_SDKUTILS_ENDPOINTS_UNSUPPORTED_REGEX, AWS_ERROR_SDKUTILS_ENDPOINTS_REGEX_NO_MATCH, AWS_ERROR_SDKUTILS_END_RANGE = AWS_ERROR_ENUM_END_RANGE(AWS_C_SDKUTILS_PACKAGE_ID) }; enum aws_sdkutils_log_subject { AWS_LS_SDKUTILS_GENERAL = AWS_LOG_SUBJECT_BEGIN_RANGE(AWS_C_SDKUTILS_PACKAGE_ID), AWS_LS_SDKUTILS_PROFILE, AWS_LS_SDKUTILS_ENDPOINTS_PARSING, AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, AWS_LS_SDKUTILS_ENDPOINTS_GENERAL, AWS_LS_SDKUTILS_PARTITIONS_PARSING, AWS_LS_SDKUTILS_ENDPOINTS_REGEX, AWS_LS_SDKUTILS_LAST = AWS_LOG_SUBJECT_END_RANGE(AWS_C_SDKUTILS_PACKAGE_ID) }; AWS_EXTERN_C_BEGIN AWS_SDKUTILS_API void aws_sdkutils_library_init(struct aws_allocator *allocator); AWS_SDKUTILS_API void aws_sdkutils_library_clean_up(void); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_SDKUTILS_SDKUTILS_H */ aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/source/000077500000000000000000000000001456575232400223725ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/source/aws_profile.c000066400000000000000000001515611456575232400250610ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #define PROPERTIES_TABLE_DEFAULT_SIZE 4 #define PROFILE_TABLE_DEFAULT_SIZE 5 struct aws_profile_property { struct aws_allocator *allocator; struct aws_string *name; struct aws_string *value; struct aws_hash_table sub_properties; bool is_empty_valued; }; struct aws_profile { struct aws_allocator *allocator; struct aws_string *name; struct aws_hash_table properties; bool has_profile_prefix; }; struct aws_profile_collection { struct aws_allocator *allocator; enum aws_profile_source_type profile_source; /* * Array of aws_hash_table for each section type. * Each table is a map from section identifier to aws_profile. * key: struct aws_string* * value: struct aws_profile* */ struct aws_hash_table sections[AWS_PROFILE_SECTION_TYPE_COUNT]; struct aws_ref_count ref_count; }; /* * Character-based profile parse helper functions */ static bool s_is_assignment_operator(uint8_t value) { return (char)value == '='; } static bool s_is_not_assignment_operator(uint8_t value) { return !s_is_assignment_operator(value); } static bool s_is_identifier(uint8_t value) { char value_as_char = (char)value; if ((value_as_char >= 'A' && value_as_char <= 'Z') || (value_as_char >= 'a' && value_as_char <= 'z') || (value_as_char >= '0' && value_as_char <= '9') || value_as_char == '\\' || value_as_char == '_' || value_as_char == '-') { return true; } return false; } static bool s_is_whitespace(uint8_t value) { char value_as_char = (char)value; switch (value_as_char) { case '\t': case '\n': case '\r': case ' ': return true; default: return false; } } static bool s_is_comment_token(uint8_t value) { char char_value = (char)value; return char_value == '#' || char_value == ';'; } static bool s_is_not_comment_token(uint8_t value) { return !s_is_comment_token(value); } static bool s_is_profile_start(uint8_t value) { return (char)value == '['; } static bool s_is_not_profile_end(uint8_t value) { return (char)value != ']'; } static bool s_is_carriage_return(uint8_t value) { return (char)value == '\r'; } /* * Line and string based parse helper functions */ static bool s_is_comment_line(const struct aws_byte_cursor *line_cursor) { char first_char = *line_cursor->ptr; return first_char == '#' || first_char == ';'; } static bool s_is_whitespace_line(const struct aws_byte_cursor *line_cursor) { return aws_byte_cursor_left_trim_pred(line_cursor, s_is_whitespace).len == 0; } AWS_STATIC_STRING_FROM_LITERAL(s_default_profile_name, "default"); static bool s_is_default_profile_name(const struct aws_byte_cursor *profile_name) { return aws_string_eq_byte_cursor(s_default_profile_name, profile_name); } /* * Consume helpers */ /* * Consumes characters as long as a predicate is satisfied. "parsed" is optional and contains the consumed range as * output. Returns true if anything was consumed. * * On success, start is updated to the new position. */ static bool s_parse_by_character_predicate( struct aws_byte_cursor *start, aws_byte_predicate_fn *predicate, struct aws_byte_cursor *parsed, size_t maximum_allowed) { uint8_t *current_ptr = start->ptr; uint8_t *end_ptr = start->ptr + start->len; if (maximum_allowed > 0 && maximum_allowed < start->len) { end_ptr = start->ptr + maximum_allowed; } while (current_ptr < end_ptr) { if (!predicate(*current_ptr)) { break; } ++current_ptr; } size_t consumed = current_ptr - start->ptr; if (parsed != NULL) { parsed->ptr = start->ptr; parsed->len = consumed; } aws_byte_cursor_advance(start, consumed); return consumed > 0; } /* * Consumes characters if they match a token string. "parsed" is optional and contains the consumed range as output. * Returns true if anything was consumed. * * On success, start is updated to the new position. */ static bool s_parse_by_token( struct aws_byte_cursor *start, const struct aws_string *token, struct aws_byte_cursor *parsed) { bool matched = false; if (token->len <= start->len) { matched = strncmp((const char *)start->ptr, aws_string_c_str(token), token->len) == 0; } if (parsed != NULL) { parsed->ptr = start->ptr; parsed->len = matched ? token->len : 0; } if (matched) { aws_byte_cursor_advance(start, token->len); } return matched; } /* * Parse context and logging */ struct profile_file_parse_context { const struct aws_string *source_file_path; struct aws_profile_collection *profile_collection; struct aws_profile *current_profile; struct aws_profile_property *current_property; struct aws_byte_cursor current_line; int parse_error; int current_line_number; bool has_seen_profile; }; AWS_STATIC_STRING_FROM_LITERAL(s_none_string, ""); static void s_log_parse_context(enum aws_log_level log_level, const struct profile_file_parse_context *context) { AWS_LOGF( log_level, AWS_LS_SDKUTILS_PROFILE, "Profile Parse context:\n Source File:%s\n Line: %d\n Current Profile: %s\n Current Property: %s", context->source_file_path ? context->source_file_path->bytes : s_none_string->bytes, context->current_line_number, context->current_profile ? context->current_profile->name->bytes : s_none_string->bytes, context->current_property ? context->current_property->name->bytes : s_none_string->bytes); } /* * aws_profile_property APIs */ static void s_profile_property_destroy(struct aws_profile_property *property) { if (property == NULL) { return; } aws_string_destroy(property->name); aws_string_destroy(property->value); aws_hash_table_clean_up(&property->sub_properties); aws_mem_release(property->allocator, property); } struct aws_profile_property *aws_profile_property_new( struct aws_allocator *allocator, const struct aws_byte_cursor *name, const struct aws_byte_cursor *value) { struct aws_profile_property *property = (struct aws_profile_property *)aws_mem_acquire(allocator, sizeof(struct aws_profile_property)); if (property == NULL) { return NULL; } AWS_ZERO_STRUCT(*property); property->allocator = allocator; if (aws_hash_table_init( &property->sub_properties, allocator, 0, aws_hash_string, aws_hash_callback_string_eq, aws_hash_callback_string_destroy, aws_hash_callback_string_destroy)) { goto on_error; } property->value = aws_string_new_from_array(allocator, value->ptr, value->len); if (property->value == NULL) { goto on_error; } property->name = aws_string_new_from_array(allocator, name->ptr, name->len); if (property->name == NULL) { goto on_error; } property->is_empty_valued = value->len == 0; return property; on_error: s_profile_property_destroy(property); return NULL; } AWS_STATIC_STRING_FROM_LITERAL(s_newline, "\n"); /* * Continuations are applied to the property value by concatenating the old value and the new value, with a '\n' * in between. */ static int s_profile_property_add_continuation( struct aws_profile_property *property, const struct aws_byte_cursor *continuation_value) { int result = AWS_OP_ERR; struct aws_byte_buf concatenation; if (aws_byte_buf_init(&concatenation, property->allocator, property->value->len + continuation_value->len + 1)) { return result; } struct aws_byte_cursor old_value = aws_byte_cursor_from_string(property->value); if (aws_byte_buf_append(&concatenation, &old_value)) { goto on_generic_failure; } struct aws_byte_cursor newline = aws_byte_cursor_from_string(s_newline); if (aws_byte_buf_append(&concatenation, &newline)) { goto on_generic_failure; } if (aws_byte_buf_append(&concatenation, continuation_value)) { goto on_generic_failure; } struct aws_string *new_value = aws_string_new_from_array(property->allocator, concatenation.buffer, concatenation.len); if (new_value == NULL) { goto on_generic_failure; } result = AWS_OP_SUCCESS; aws_string_destroy(property->value); property->value = new_value; on_generic_failure: aws_byte_buf_clean_up(&concatenation); return result; } static int s_profile_property_add_sub_property( struct aws_profile_property *property, const struct aws_byte_cursor *key, const struct aws_byte_cursor *value, const struct profile_file_parse_context *context) { struct aws_string *key_string = aws_string_new_from_array(property->allocator, key->ptr, key->len); if (key_string == NULL) { return AWS_OP_ERR; } struct aws_string *value_string = aws_string_new_from_array(property->allocator, value->ptr, value->len); if (value_string == NULL) { goto on_failure; } int was_present = 0; aws_hash_table_remove(&property->sub_properties, key_string, NULL, &was_present); if (was_present) { AWS_LOGF_DEBUG( AWS_LS_SDKUTILS_PROFILE, "subproperty \"%s\" of property \"%s\" had value overridden with new value", key_string->bytes, property->name->bytes); s_log_parse_context(AWS_LL_WARN, context); } if (aws_hash_table_put(&property->sub_properties, key_string, value_string, NULL)) { goto on_failure; } return AWS_OP_SUCCESS; on_failure: if (value_string) { aws_string_destroy(value_string); } aws_string_destroy(key_string); return AWS_OP_ERR; } static int s_profile_property_merge(struct aws_profile_property *dest, const struct aws_profile_property *source) { AWS_ASSERT(dest != NULL && source != NULL); /* * Source value overwrites any existing dest value */ if (source->value) { struct aws_string *new_value = aws_string_new_from_string(dest->allocator, source->value); if (new_value == NULL) { return AWS_OP_ERR; } if (dest->value) { AWS_LOGF_DEBUG( AWS_LS_SDKUTILS_PROFILE, "property \"%s\" has value \"%s\" replaced during merge", dest->name->bytes, dest->value->bytes); aws_string_destroy(dest->value); } dest->value = new_value; } dest->is_empty_valued = source->is_empty_valued; /* * Iterate sub properties, stomping on conflicts */ struct aws_hash_iter source_iter = aws_hash_iter_begin(&source->sub_properties); while (!aws_hash_iter_done(&source_iter)) { struct aws_string *source_sub_property = (struct aws_string *)source_iter.element.value; struct aws_string *dest_key = aws_string_new_from_string(dest->allocator, (struct aws_string *)source_iter.element.key); if (dest_key == NULL) { return AWS_OP_ERR; } struct aws_string *dest_sub_property = aws_string_new_from_string(dest->allocator, source_sub_property); if (dest_sub_property == NULL) { aws_string_destroy(dest_key); return AWS_OP_ERR; } int was_present = 0; aws_hash_table_remove(&dest->sub_properties, dest_key, NULL, &was_present); if (was_present) { AWS_LOGF_DEBUG( AWS_LS_SDKUTILS_PROFILE, "subproperty \"%s\" of property \"%s\" had value overridden during property merge", dest_key->bytes, dest->name->bytes); } if (aws_hash_table_put(&dest->sub_properties, dest_key, dest_sub_property, NULL)) { aws_string_destroy(dest_sub_property); aws_string_destroy(dest_key); return AWS_OP_ERR; } aws_hash_iter_next(&source_iter); } return AWS_OP_SUCCESS; } /* * Helper destroy function for aws_profile's hash table of properties */ static void s_property_hash_table_value_destroy(void *value) { s_profile_property_destroy((struct aws_profile_property *)value); } /* * aws_profile APIs */ void aws_profile_destroy(struct aws_profile *profile) { if (profile == NULL) { return; } aws_string_destroy(profile->name); aws_hash_table_clean_up(&profile->properties); aws_mem_release(profile->allocator, profile); } struct aws_profile *aws_profile_new( struct aws_allocator *allocator, const struct aws_byte_cursor *name, bool has_profile_prefix) { struct aws_profile *profile = (struct aws_profile *)aws_mem_acquire(allocator, sizeof(struct aws_profile)); if (profile == NULL) { return NULL; } AWS_ZERO_STRUCT(*profile); profile->name = aws_string_new_from_array(allocator, name->ptr, name->len); if (profile->name == NULL) { goto cleanup; } if (aws_hash_table_init( &profile->properties, allocator, PROPERTIES_TABLE_DEFAULT_SIZE, aws_hash_string, aws_hash_callback_string_eq, NULL, /* The key is owned by the value (and destroy cleans it up), so we don't have to */ s_property_hash_table_value_destroy)) { goto cleanup; } profile->allocator = allocator; profile->has_profile_prefix = has_profile_prefix; return profile; cleanup: aws_profile_destroy(profile); return NULL; } /* * Adds a property to a profile. * * If a property already exists then the old one is removed and replaced by the * new one. */ static struct aws_profile_property *s_profile_add_property( struct aws_profile *profile, const struct aws_byte_cursor *key_cursor, const struct aws_byte_cursor *value_cursor) { struct aws_profile_property *property = aws_profile_property_new(profile->allocator, key_cursor, value_cursor); if (property == NULL) { goto on_property_new_failure; } if (aws_hash_table_put(&profile->properties, property->name, property, NULL)) { goto on_hash_table_put_failure; } return property; on_hash_table_put_failure: s_profile_property_destroy(property); on_property_new_failure: return NULL; } const struct aws_profile_property *aws_profile_get_property( const struct aws_profile *profile, const struct aws_string *property_name) { struct aws_hash_element *element = NULL; aws_hash_table_find(&profile->properties, property_name, &element); if (element == NULL) { return NULL; } return element->value; } const struct aws_string *aws_profile_property_get_value(const struct aws_profile_property *property) { AWS_PRECONDITION(property); return property->value; } static int s_profile_merge(struct aws_profile *dest_profile, const struct aws_profile *source_profile) { AWS_ASSERT(dest_profile != NULL && source_profile != NULL); dest_profile->has_profile_prefix = source_profile->has_profile_prefix; struct aws_hash_iter source_iter = aws_hash_iter_begin(&source_profile->properties); while (!aws_hash_iter_done(&source_iter)) { struct aws_profile_property *source_property = (struct aws_profile_property *)source_iter.element.value; struct aws_profile_property *dest_property = (struct aws_profile_property *)aws_profile_get_property( dest_profile, (struct aws_string *)source_iter.element.key); if (dest_property == NULL) { struct aws_byte_cursor empty_value; AWS_ZERO_STRUCT(empty_value); struct aws_byte_cursor property_name = aws_byte_cursor_from_string(source_iter.element.key); dest_property = aws_profile_property_new(dest_profile->allocator, &property_name, &empty_value); if (dest_property == NULL) { return AWS_OP_ERR; } if (aws_hash_table_put(&dest_profile->properties, dest_property->name, dest_property, NULL)) { s_profile_property_destroy(dest_property); return AWS_OP_ERR; } } if (s_profile_property_merge(dest_property, source_property)) { return AWS_OP_ERR; } aws_hash_iter_next(&source_iter); } return AWS_OP_SUCCESS; } /* * Hash table destroy helper for profile collection's profiles member */ static void s_profile_hash_table_value_destroy(void *value) { aws_profile_destroy((struct aws_profile *)value); } /* * aws_profile_collection APIs */ void aws_profile_collection_destroy(struct aws_profile_collection *profile_collection) { aws_profile_collection_release(profile_collection); } static void s_aws_profile_collection_destroy_internal(struct aws_profile_collection *profile_collection) { for (int i = 0; i < AWS_PROFILE_SECTION_TYPE_COUNT; i++) { aws_hash_table_clean_up(&profile_collection->sections[i]); } aws_mem_release(profile_collection->allocator, profile_collection); } AWS_STATIC_STRING_FROM_LITERAL(s_profile_token, "profile"); AWS_STATIC_STRING_FROM_LITERAL(s_sso_session_token, "sso-session"); const struct aws_profile *aws_profile_collection_get_profile( const struct aws_profile_collection *profile_collection, const struct aws_string *profile_name) { return aws_profile_collection_get_section(profile_collection, AWS_PROFILE_SECTION_TYPE_PROFILE, profile_name); } const struct aws_profile *aws_profile_collection_get_section( const struct aws_profile_collection *profile_collection, const enum aws_profile_section_type section_type, const struct aws_string *section_name) { struct aws_hash_element *element = NULL; aws_hash_table_find(&profile_collection->sections[section_type], section_name, &element); if (element == NULL) { return NULL; } return element->value; } static int s_profile_collection_add_profile( struct aws_profile_collection *profile_collection, const enum aws_profile_section_type section_type, const struct aws_byte_cursor *profile_name, bool has_prefix, const struct profile_file_parse_context *context, struct aws_profile **current_profile_out) { *current_profile_out = NULL; struct aws_string *key = aws_string_new_from_array(profile_collection->allocator, profile_name->ptr, profile_name->len); if (key == NULL) { return AWS_OP_ERR; } struct aws_profile *existing_profile = NULL; struct aws_hash_element *element = NULL; aws_hash_table_find(&profile_collection->sections[section_type], key, &element); if (element != NULL) { existing_profile = element->value; } aws_string_destroy(key); if (section_type == AWS_PROFILE_SECTION_TYPE_PROFILE && profile_collection->profile_source == AWS_PST_CONFIG && s_is_default_profile_name(profile_name)) { /* * In a config file, "profile default" always supercedes "default" */ if (!has_prefix && existing_profile && existing_profile->has_profile_prefix) { /* * existing one supercedes: ignore this (and its properties) completely by failing the add * which sets the current profile to NULL */ AWS_LOGF_DEBUG( AWS_LS_SDKUTILS_PROFILE, "Existing prefixed default config profile supercedes unprefixed default profile"); s_log_parse_context(AWS_LL_WARN, context); return AWS_OP_SUCCESS; } if (has_prefix && existing_profile && !existing_profile->has_profile_prefix) { /* * stomp over existing: remove it, then proceed with add * element destroy function will clean up the profile and key */ AWS_LOGF_DEBUG( AWS_LS_SDKUTILS_PROFILE, "Prefixed default config profile replacing unprefixed default profile"); s_log_parse_context(AWS_LL_WARN, context); aws_hash_table_remove(&profile_collection->sections[section_type], element->key, NULL, NULL); existing_profile = NULL; } } if (existing_profile) { *current_profile_out = existing_profile; return AWS_OP_SUCCESS; } struct aws_profile *new_profile = aws_profile_new(profile_collection->allocator, profile_name, has_prefix); if (new_profile == NULL) { goto on_aws_profile_new_failure; } if (aws_hash_table_put(&profile_collection->sections[section_type], new_profile->name, new_profile, NULL)) { goto on_hash_table_put_failure; } *current_profile_out = new_profile; return AWS_OP_SUCCESS; on_hash_table_put_failure: aws_profile_destroy(new_profile); on_aws_profile_new_failure: return AWS_OP_ERR; } static int s_profile_collection_merge( struct aws_profile_collection *dest_collection, const struct aws_profile_collection *source_collection) { AWS_ASSERT(dest_collection != NULL && source_collection); for (int i = 0; i < AWS_PROFILE_SECTION_TYPE_COUNT; i++) { struct aws_hash_iter source_iter = aws_hash_iter_begin(&source_collection->sections[i]); while (!aws_hash_iter_done(&source_iter)) { struct aws_profile *source_profile = (struct aws_profile *)source_iter.element.value; struct aws_profile *dest_profile = (struct aws_profile *)aws_profile_collection_get_profile( dest_collection, (struct aws_string *)source_iter.element.key); if (dest_profile == NULL) { struct aws_byte_cursor name_cursor = aws_byte_cursor_from_string(source_iter.element.key); dest_profile = aws_profile_new(dest_collection->allocator, &name_cursor, source_profile->has_profile_prefix); if (dest_profile == NULL) { return AWS_OP_ERR; } if (aws_hash_table_put(&dest_collection->sections[i], dest_profile->name, dest_profile, NULL)) { aws_profile_destroy(dest_profile); return AWS_OP_ERR; } } if (s_profile_merge(dest_profile, source_profile)) { return AWS_OP_ERR; } aws_hash_iter_next(&source_iter); } } return AWS_OP_SUCCESS; } struct aws_profile_collection *aws_profile_collection_new_from_merge( struct aws_allocator *allocator, const struct aws_profile_collection *config_profiles, const struct aws_profile_collection *credentials_profiles) { struct aws_profile_collection *merged = (struct aws_profile_collection *)(aws_mem_acquire(allocator, sizeof(struct aws_profile_collection))); if (merged == NULL) { return NULL; } AWS_ZERO_STRUCT(*merged); aws_ref_count_init( &merged->ref_count, merged, (aws_simple_completion_callback *)s_aws_profile_collection_destroy_internal); for (int i = 0; i < AWS_PROFILE_SECTION_TYPE_COUNT; i++) { size_t max_profiles = 0; if (config_profiles != NULL) { max_profiles += aws_hash_table_get_entry_count(&config_profiles->sections[i]); } if (credentials_profiles != NULL) { max_profiles += aws_hash_table_get_entry_count(&credentials_profiles->sections[i]); } merged->allocator = allocator; merged->profile_source = AWS_PST_NONE; if (aws_hash_table_init( &merged->sections[i], allocator, max_profiles, aws_hash_string, aws_hash_callback_string_eq, NULL, s_profile_hash_table_value_destroy)) { goto cleanup; } } if (config_profiles != NULL) { if (s_profile_collection_merge(merged, config_profiles)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PROFILE, "Failed to merge config profile set"); goto cleanup; } } if (credentials_profiles != NULL) { if (s_profile_collection_merge(merged, credentials_profiles)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PROFILE, "Failed to merge credentials profile set"); goto cleanup; } } return merged; cleanup: s_aws_profile_collection_destroy_internal(merged); return NULL; } /* * Profile parsing */ /* * The comment situation in config files is messy. Some line types require a comment to have at least one * whitespace in front of it, while other line types only require a comment token (;, #) On top of that, some * line types do not allow comments at all (get folded into the value). * */ /* * a trailing comment is started by ';' or '#' * Only certain types of lines allow comments without prefixing whitespace */ static struct aws_byte_cursor s_trim_trailing_comment(const struct aws_byte_cursor *line) { struct aws_byte_cursor line_copy = *line; struct aws_byte_cursor trimmed; s_parse_by_character_predicate(&line_copy, s_is_not_comment_token, &trimmed, 0); return trimmed; } /* * A trailing whitespace comment is started by " ;", " #", "\t;", or "\t#" * Certain types of lines require comments be whitespace-prefixed */ static struct aws_byte_cursor s_trim_trailing_whitespace_comment(const struct aws_byte_cursor *line) { struct aws_byte_cursor trimmed; trimmed.ptr = line->ptr; uint8_t *current_ptr = line->ptr; uint8_t *end_ptr = line->ptr + line->len; while (current_ptr < end_ptr) { if (s_is_whitespace(*current_ptr)) { /* * Look ahead 1 */ if (current_ptr + 1 < end_ptr && s_is_comment_token(*(current_ptr + 1))) { break; } } current_ptr++; } trimmed.len = current_ptr - line->ptr; return trimmed; } /** * Attempts to parse profile declaration lines * * Return false if this is not a profile declaration, true otherwise (stop parsing the line) */ static bool s_parse_profile_declaration( const struct aws_byte_cursor *line_cursor, struct profile_file_parse_context *context) { /* * Strip comment and right-side whitespace */ struct aws_byte_cursor profile_line_cursor = s_trim_trailing_comment(line_cursor); struct aws_byte_cursor profile_cursor = aws_byte_cursor_right_trim_pred(&profile_line_cursor, s_is_whitespace); /* * "[" + ? + <"profile ">? + + ? + "]" */ if (!s_parse_by_character_predicate(&profile_cursor, s_is_profile_start, NULL, 1)) { /* * This isn't a profile declaration, try something else */ return false; } context->has_seen_profile = true; context->current_profile = NULL; context->current_property = NULL; s_parse_by_character_predicate(&profile_cursor, s_is_whitespace, NULL, 0); enum aws_profile_section_type section_type = AWS_PROFILE_SECTION_TYPE_PROFILE; /* * Check if the profile name starts with the 'profile' keyword. We need to check for * "profile" and at least one whitespace character. A partial match * ("[profilefoo]" for example) should rewind and use the whole name properly. */ struct aws_byte_cursor backtrack_cursor = profile_cursor; bool has_profile_prefix = s_parse_by_token(&profile_cursor, s_profile_token, NULL) && s_parse_by_character_predicate(&profile_cursor, s_is_whitespace, NULL, 1); bool has_sso_session_prefix = !has_profile_prefix && s_parse_by_token(&profile_cursor, s_sso_session_token, NULL) && s_parse_by_character_predicate(&profile_cursor, s_is_whitespace, NULL, 1); if (has_profile_prefix) { if (context->profile_collection->profile_source == AWS_PST_CREDENTIALS) { AWS_LOGF_WARN( AWS_LS_SDKUTILS_PROFILE, "Profile declarations in credentials files are not allowed to begin with the \"profile\" keyword"); s_log_parse_context(AWS_LL_WARN, context); context->parse_error = AWS_ERROR_SDKUTILS_PARSE_RECOVERABLE; return true; } s_parse_by_character_predicate(&profile_cursor, s_is_whitespace, NULL, 0); } else if (has_sso_session_prefix) { if (context->profile_collection->profile_source == AWS_PST_CREDENTIALS) { AWS_LOGF_WARN(AWS_LS_SDKUTILS_PROFILE, "sso-session declarations in credentials files are not allowed"); s_log_parse_context(AWS_LL_WARN, context); context->parse_error = AWS_ERROR_SDKUTILS_PARSE_RECOVERABLE; return true; } section_type = AWS_PROFILE_SECTION_TYPE_SSO_SESSION; s_parse_by_character_predicate(&profile_cursor, s_is_whitespace, NULL, 0); } else { profile_cursor = backtrack_cursor; } struct aws_byte_cursor profile_name; if (!s_parse_by_character_predicate(&profile_cursor, s_is_identifier, &profile_name, 0)) { AWS_LOGF_WARN(AWS_LS_SDKUTILS_PROFILE, "Profile declarations must contain a valid identifier for a name"); s_log_parse_context(AWS_LL_WARN, context); context->parse_error = AWS_ERROR_SDKUTILS_PARSE_RECOVERABLE; return true; } if (context->profile_collection->profile_source == AWS_PST_CONFIG && !has_profile_prefix && !s_is_default_profile_name(&profile_name) && !has_sso_session_prefix) { AWS_LOGF_WARN( AWS_LS_SDKUTILS_PROFILE, "Non-default profile declarations in config files must use the \"profile\" keyword"); s_log_parse_context(AWS_LL_WARN, context); context->parse_error = AWS_ERROR_SDKUTILS_PARSE_RECOVERABLE; return true; } s_parse_by_character_predicate(&profile_cursor, s_is_whitespace, NULL, 0); /* * Special case the right side bracket check. We need to distinguish between a missing right bracket * (fatal error) and invalid profile name (spaces, non-identifier characters). * * Do so by consuming all non right-bracket characters. If the remainder is empty it is missing, * otherwise it is an invalid profile name (non-empty invalid_chars) or a good definition * (empty invalid_chars cursor). */ struct aws_byte_cursor invalid_chars; s_parse_by_character_predicate(&profile_cursor, s_is_not_profile_end, &invalid_chars, 0); if (profile_cursor.len == 0) { AWS_LOGF_WARN(AWS_LS_SDKUTILS_PROFILE, "Profile declaration missing required ending bracket"); s_log_parse_context(AWS_LL_WARN, context); context->parse_error = AWS_ERROR_SDKUTILS_PARSE_FATAL; return true; } if (invalid_chars.len > 0) { AWS_LOGF_WARN( AWS_LS_SDKUTILS_PROFILE, "Profile declaration contains invalid characters: \"" PRInSTR "\"", AWS_BYTE_CURSOR_PRI(invalid_chars)); s_log_parse_context(AWS_LL_WARN, context); context->parse_error = AWS_ERROR_SDKUTILS_PARSE_RECOVERABLE; return true; } /* * Apply to the profile collection */ if (s_profile_collection_add_profile( context->profile_collection, section_type, &profile_name, has_profile_prefix, context, &context->current_profile)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PROFILE, "Failed to add profile to profile collection"); s_log_parse_context(AWS_LL_ERROR, context); context->parse_error = AWS_ERROR_SDKUTILS_PARSE_FATAL; return true; } return true; } /** * Attempts to parse property continuation lines * * Return false if this is not a property continuation line, true otherwise (stop parsing the line) */ static bool s_parse_property_continuation( const struct aws_byte_cursor *line_cursor, struct profile_file_parse_context *context) { /* * Strip right-side whitespace only. Comments cannot be made on continuation lines. They * get folded into the value. */ struct aws_byte_cursor continuation_cursor = aws_byte_cursor_right_trim_pred(line_cursor, s_is_whitespace); /* * Can't be a continuation without at least one whitespace on the left */ if (!s_parse_by_character_predicate(&continuation_cursor, s_is_whitespace, NULL, 0)) { return false; } /* * This should never happen since it should have been caught as a whitespace line */ if (continuation_cursor.len == 0) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PROFILE, "Property continuation internal parsing error"); s_log_parse_context(AWS_LL_ERROR, context); context->parse_error = AWS_ERROR_SDKUTILS_PARSE_RECOVERABLE; return true; } /* * A continuation without a current property is bad */ if (context->current_profile == NULL || context->current_property == NULL) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PROFILE, "Property continuation seen outside of a current property"); s_log_parse_context(AWS_LL_WARN, context); context->parse_error = AWS_ERROR_SDKUTILS_PARSE_FATAL; return true; } if (s_profile_property_add_continuation(context->current_property, &continuation_cursor)) { AWS_LOGF_DEBUG(AWS_LS_SDKUTILS_PROFILE, "Property continuation could not be applied to the current property"); s_log_parse_context(AWS_LL_WARN, context); context->parse_error = AWS_ERROR_SDKUTILS_PARSE_RECOVERABLE; return true; } if (context->current_property->is_empty_valued) { struct aws_byte_cursor key_cursor; if (!s_parse_by_character_predicate(&continuation_cursor, s_is_not_assignment_operator, &key_cursor, 0)) { AWS_LOGF_WARN( AWS_LS_SDKUTILS_PROFILE, "Empty-valued property continuation must contain the assignment operator"); s_log_parse_context(AWS_LL_WARN, context); context->parse_error = AWS_ERROR_SDKUTILS_PARSE_FATAL; return true; } if (!s_parse_by_character_predicate(&continuation_cursor, s_is_assignment_operator, NULL, 1)) { AWS_LOGF_WARN( AWS_LS_SDKUTILS_PROFILE, "Empty-valued property continuation must contain the assignment operator"); s_log_parse_context(AWS_LL_WARN, context); context->parse_error = AWS_ERROR_SDKUTILS_PARSE_FATAL; return true; } struct aws_byte_cursor trimmed_key_cursor = aws_byte_cursor_right_trim_pred(&key_cursor, s_is_whitespace); struct aws_byte_cursor id_check_cursor = aws_byte_cursor_trim_pred(&trimmed_key_cursor, s_is_identifier); if (id_check_cursor.len > 0) { AWS_LOGF_WARN( AWS_LS_SDKUTILS_PROFILE, "Empty-valued property continuation must have a valid identifier to the left of the assignment"); s_log_parse_context(AWS_LL_WARN, context); context->parse_error = AWS_ERROR_SDKUTILS_PARSE_RECOVERABLE; return true; } s_parse_by_character_predicate(&continuation_cursor, s_is_whitespace, NULL, 0); /* * everything left in the continuation_cursor is the sub property value */ if (s_profile_property_add_sub_property( context->current_property, &trimmed_key_cursor, &continuation_cursor, context)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PROFILE, "Internal error adding sub property to current property"); s_log_parse_context(AWS_LL_ERROR, context); context->parse_error = AWS_ERROR_SDKUTILS_PARSE_FATAL; } } return true; } /** * Attempts to parse property lines * * Return false if this is not a property line, true otherwise (stop parsing the line) */ static bool s_parse_property(const struct aws_byte_cursor *line_cursor, struct profile_file_parse_context *context) { /* * Strip whitespace-prefixed comment and right-side whitespace */ struct aws_byte_cursor property_line_cursor = s_trim_trailing_whitespace_comment(line_cursor); struct aws_byte_cursor property_cursor = aws_byte_cursor_right_trim_pred(&property_line_cursor, s_is_whitespace); context->current_property = NULL; struct aws_byte_cursor key_cursor; if (!s_parse_by_character_predicate(&property_cursor, s_is_not_assignment_operator, &key_cursor, 0)) { AWS_LOGF_WARN(AWS_LS_SDKUTILS_PROFILE, "Property definition does not contain the assignment operator"); s_log_parse_context(AWS_LL_WARN, context); context->parse_error = AWS_ERROR_SDKUTILS_PARSE_FATAL; return true; } struct aws_byte_cursor trimmed_key_cursor = aws_byte_cursor_right_trim_pred(&key_cursor, s_is_whitespace); struct aws_byte_cursor id_check_cursor = aws_byte_cursor_trim_pred(&trimmed_key_cursor, s_is_identifier); if (id_check_cursor.len > 0) { AWS_LOGF_WARN(AWS_LS_SDKUTILS_PROFILE, "Property definition does not begin with a valid identifier"); s_log_parse_context(AWS_LL_WARN, context); context->parse_error = AWS_ERROR_SDKUTILS_PARSE_RECOVERABLE; return true; } if (!s_parse_by_character_predicate(&property_cursor, s_is_assignment_operator, NULL, 1)) { AWS_LOGF_WARN(AWS_LS_SDKUTILS_PROFILE, "Property definition does not contain the assignment operator"); s_log_parse_context(AWS_LL_WARN, context); context->parse_error = AWS_ERROR_SDKUTILS_PARSE_FATAL; return true; } s_parse_by_character_predicate(&property_cursor, s_is_whitespace, NULL, 0); /* * If appropriate, apply to the profile collection, property_cursor contains the trimmed value, if one exists */ if (context->current_profile != NULL) { context->current_property = s_profile_add_property(context->current_profile, &trimmed_key_cursor, &property_cursor); if (context->current_property == NULL) { AWS_LOGF_ERROR( AWS_LS_SDKUTILS_PROFILE, "Failed to add property \"" PRInSTR "\" to current profile \"%s\"", AWS_BYTE_CURSOR_PRI(trimmed_key_cursor), context->current_profile->name->bytes); s_log_parse_context(AWS_LL_ERROR, context); context->parse_error = AWS_ERROR_SDKUTILS_PARSE_FATAL; } } else { /* * By definition, if we haven't seen any profiles yet, this is a fatal error */ if (context->has_seen_profile) { AWS_LOGF_WARN(AWS_LS_SDKUTILS_PROFILE, "Property definition seen outside a profile"); s_log_parse_context(AWS_LL_WARN, context); context->parse_error = AWS_ERROR_SDKUTILS_PARSE_RECOVERABLE; } else { AWS_LOGF_WARN(AWS_LS_SDKUTILS_PROFILE, "Property definition seen before any profiles"); s_log_parse_context(AWS_LL_WARN, context); context->parse_error = AWS_ERROR_SDKUTILS_PARSE_FATAL; } } return true; } static void s_parse_and_apply_line_to_profile_collection( struct profile_file_parse_context *context, const struct aws_byte_cursor *line_cursor) { /* * Ignore line feed on windows */ struct aws_byte_cursor line = aws_byte_cursor_right_trim_pred(line_cursor, s_is_carriage_return); if (line.len == 0 || s_is_comment_line(&line) || s_is_whitespace_line(&line)) { return; } AWS_LOGF_TRACE( AWS_LS_SDKUTILS_PROFILE, "Parsing aws profile line in profile \"%s\", current property: \"%s\"", context->current_profile ? context->current_profile->name->bytes : s_none_string->bytes, context->current_property ? context->current_property->name->bytes : s_none_string->bytes); if (s_parse_profile_declaration(&line, context)) { return; } if (s_parse_property_continuation(&line, context)) { return; } if (s_parse_property(&line, context)) { return; } AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PROFILE, "Unidentifiable line type encountered while parsing profile file"); s_log_parse_context(AWS_LL_WARN, context); context->parse_error = AWS_ERROR_SDKUTILS_PARSE_FATAL; } static struct aws_profile_collection *s_aws_profile_collection_new_internal( struct aws_allocator *allocator, const struct aws_byte_buf *buffer, enum aws_profile_source_type source, const struct aws_string *path) { struct aws_profile_collection *profile_collection = (struct aws_profile_collection *)aws_mem_acquire(allocator, sizeof(struct aws_profile_collection)); if (profile_collection == NULL) { return NULL; } AWS_ZERO_STRUCT(*profile_collection); profile_collection->profile_source = source; profile_collection->allocator = allocator; aws_ref_count_init( &profile_collection->ref_count, profile_collection, (aws_simple_completion_callback *)s_aws_profile_collection_destroy_internal); for (int i = 0; i < AWS_PROFILE_SECTION_TYPE_COUNT; i++) { if (aws_hash_table_init( &profile_collection->sections[i], allocator, PROFILE_TABLE_DEFAULT_SIZE, aws_hash_string, aws_hash_callback_string_eq, NULL, /* The key is owned by the value (and destroy cleans it up), so we don't have to */ s_profile_hash_table_value_destroy)) { goto cleanup; } } struct aws_byte_cursor current_position = aws_byte_cursor_from_buf(buffer); if (current_position.len > 0) { struct aws_byte_cursor line_cursor; AWS_ZERO_STRUCT(line_cursor); struct profile_file_parse_context context; AWS_ZERO_STRUCT(context); context.current_line_number = 1; context.profile_collection = profile_collection; context.source_file_path = path; while (aws_byte_cursor_next_split(¤t_position, '\n', &line_cursor)) { context.current_line = line_cursor; s_parse_and_apply_line_to_profile_collection(&context, &line_cursor); if (context.parse_error == AWS_ERROR_SDKUTILS_PARSE_FATAL) { AWS_LOGF_WARN(AWS_LS_SDKUTILS_PROFILE, "Fatal error while parsing aws profile collection"); goto cleanup; } aws_byte_cursor_advance(¤t_position, line_cursor.len + 1); ++context.current_line_number; } } return profile_collection; cleanup: s_aws_profile_collection_destroy_internal(profile_collection); return NULL; } struct aws_profile_collection *aws_profile_collection_acquire(struct aws_profile_collection *collection) { if (collection != NULL) { aws_ref_count_acquire(&collection->ref_count); } return collection; } struct aws_profile_collection *aws_profile_collection_release(struct aws_profile_collection *collection) { if (collection != NULL) { aws_ref_count_release(&collection->ref_count); } return NULL; } struct aws_profile_collection *aws_profile_collection_new_from_file( struct aws_allocator *allocator, const struct aws_string *file_path, enum aws_profile_source_type source) { struct aws_byte_buf file_contents; AWS_ZERO_STRUCT(file_contents); AWS_LOGF_DEBUG(AWS_LS_SDKUTILS_PROFILE, "Creating profile collection from file at \"%s\"", file_path->bytes); if (aws_byte_buf_init_from_file(&file_contents, allocator, aws_string_c_str(file_path)) != 0) { AWS_LOGF_DEBUG(AWS_LS_SDKUTILS_PROFILE, "Failed to read file at \"%s\"", file_path->bytes); return NULL; } struct aws_profile_collection *profile_collection = s_aws_profile_collection_new_internal(allocator, &file_contents, source, file_path); aws_byte_buf_clean_up(&file_contents); return profile_collection; } struct aws_profile_collection *aws_profile_collection_new_from_buffer( struct aws_allocator *allocator, const struct aws_byte_buf *buffer, enum aws_profile_source_type source) { return s_aws_profile_collection_new_internal(allocator, buffer, source, NULL); } static struct aws_string *s_process_profile_file_path(struct aws_allocator *allocator, const struct aws_string *path) { struct aws_string *final_path = NULL; /* * Make a copy to mess with */ struct aws_string *path_copy = aws_string_new_from_string(allocator, path); if (path_copy == NULL) { return NULL; } struct aws_string *home_directory = NULL; /* * Fake directory cursor for final directory construction */ char local_platform_separator = aws_get_platform_directory_separator(); struct aws_byte_cursor separator_cursor; AWS_ZERO_STRUCT(separator_cursor); separator_cursor.ptr = (uint8_t *)&local_platform_separator; separator_cursor.len = 1; for (size_t i = 0; i < path_copy->len; ++i) { char value = path_copy->bytes[i]; if (aws_is_any_directory_separator(value)) { ((char *)(path_copy->bytes))[i] = local_platform_separator; } } /* * Process a split on the local separator, which we now know is the only one present in the string. * * While this does not conform fully to the SEP governing profile file path resolution, it covers * a useful, cross-platform subset of functionality that the full implementation will be backwards compatible with. */ struct aws_array_list path_segments; if (aws_array_list_init_dynamic(&path_segments, allocator, 10, sizeof(struct aws_byte_cursor))) { goto on_array_list_init_failure; } struct aws_byte_cursor path_cursor = aws_byte_cursor_from_string(path_copy); if (aws_byte_cursor_split_on_char(&path_cursor, local_platform_separator, &path_segments)) { goto on_split_failure; } size_t final_string_length = 0; size_t path_segment_count = aws_array_list_length(&path_segments); for (size_t i = 0; i < path_segment_count; ++i) { struct aws_byte_cursor segment_cursor; AWS_ZERO_STRUCT(segment_cursor); if (aws_array_list_get_at(&path_segments, &segment_cursor, i)) { continue; } /* * Current support: if and only if the first segment is just '~' then replace it * with the current home directory based on SEP home directory resolution rules. * * Support for (pathological but proper) paths with embedded ~ ("../../~/etc...") and * cross-user ~ ("~someone/.aws/credentials") can come later. As it stands, they will * potentially succeed on unix platforms but not Windows. */ if (i == 0 && segment_cursor.len == 1 && *segment_cursor.ptr == '~') { if (home_directory == NULL) { home_directory = aws_get_home_directory(allocator); if (AWS_UNLIKELY(!home_directory)) { goto on_empty_path; } } final_string_length += home_directory->len; } else { final_string_length += segment_cursor.len; } } if (path_segment_count > 1) { final_string_length += path_segment_count - 1; } if (final_string_length == 0) { goto on_empty_path; } /* * Build the final path from the split + a possible home directory resolution */ struct aws_byte_buf result; aws_byte_buf_init(&result, allocator, final_string_length); for (size_t i = 0; i < path_segment_count; ++i) { struct aws_byte_cursor segment_cursor; AWS_ZERO_STRUCT(segment_cursor); if (aws_array_list_get_at(&path_segments, &segment_cursor, i)) { continue; } /* * See above for explanation */ if (i == 0 && segment_cursor.len == 1 && *segment_cursor.ptr == '~') { if (home_directory == NULL) { goto on_home_directory_failure; } struct aws_byte_cursor home_cursor = aws_byte_cursor_from_string(home_directory); if (aws_byte_buf_append(&result, &home_cursor)) { goto on_byte_buf_write_failure; } } else { if (aws_byte_buf_append(&result, &segment_cursor)) { goto on_byte_buf_write_failure; } } /* * Add the separator after all but the last segment */ if (i + 1 < path_segment_count) { if (aws_byte_buf_append(&result, &separator_cursor)) { goto on_byte_buf_write_failure; } } } final_path = aws_string_new_from_array(allocator, result.buffer, result.len); /* * clean up */ on_byte_buf_write_failure: aws_byte_buf_clean_up(&result); on_empty_path: on_home_directory_failure: on_split_failure: aws_array_list_clean_up(&path_segments); on_array_list_init_failure: aws_string_destroy(path_copy); if (home_directory != NULL) { aws_string_destroy(home_directory); } return final_path; } AWS_STATIC_STRING_FROM_LITERAL(s_default_credentials_path, "~/.aws/credentials"); AWS_STATIC_STRING_FROM_LITERAL(s_credentials_file_path_env_variable_name, "AWS_SHARED_CREDENTIALS_FILE"); AWS_STATIC_STRING_FROM_LITERAL(s_default_config_path, "~/.aws/config"); AWS_STATIC_STRING_FROM_LITERAL(s_config_file_path_env_variable_name, "AWS_CONFIG_FILE"); static struct aws_string *s_get_raw_file_path( struct aws_allocator *allocator, const struct aws_byte_cursor *override_path, const struct aws_string *override_env_var_name, const struct aws_string *default_path) { if (override_path != NULL && override_path->ptr != NULL) { return aws_string_new_from_array(allocator, override_path->ptr, override_path->len); } struct aws_string *env_override_path = NULL; if (aws_get_environment_value(allocator, override_env_var_name, &env_override_path) == 0 && env_override_path != NULL) { return env_override_path; } return aws_string_new_from_string(allocator, default_path); } struct aws_string *aws_get_credentials_file_path( struct aws_allocator *allocator, const struct aws_byte_cursor *override_path) { struct aws_string *raw_path = s_get_raw_file_path( allocator, override_path, s_credentials_file_path_env_variable_name, s_default_credentials_path); struct aws_string *final_path = s_process_profile_file_path(allocator, raw_path); aws_string_destroy(raw_path); return final_path; } struct aws_string *aws_get_config_file_path( struct aws_allocator *allocator, const struct aws_byte_cursor *override_path) { struct aws_string *raw_path = s_get_raw_file_path(allocator, override_path, s_config_file_path_env_variable_name, s_default_config_path); struct aws_string *final_path = s_process_profile_file_path(allocator, raw_path); aws_string_destroy(raw_path); return final_path; } AWS_STATIC_STRING_FROM_LITERAL(s_default_profile_env_variable_name, "AWS_PROFILE"); struct aws_string *aws_get_profile_name(struct aws_allocator *allocator, const struct aws_byte_cursor *override_name) { /** * Profile name is resolved in the following order. * 1. If the override_path variable is provided. * 2. Check `AWS_PROFILE` environment variable and use the value if it is not empty. * 3. Use "default". */ struct aws_string *profile_name = NULL; if (override_name != NULL && override_name->ptr != NULL) { profile_name = aws_string_new_from_array(allocator, override_name->ptr, override_name->len); } else { /* Try to fetch profile from AWS_PROFILE environment variable */ aws_get_environment_value(allocator, s_default_profile_env_variable_name, &profile_name); /* Use default profile if it doesn't exist. */ if (profile_name == NULL) { profile_name = aws_string_new_from_string(allocator, s_default_profile_name); } } return profile_name; } size_t aws_profile_get_property_count(const struct aws_profile *profile) { return aws_hash_table_get_entry_count(&profile->properties); } size_t aws_profile_collection_get_profile_count(const struct aws_profile_collection *profile_collection) { return aws_hash_table_get_entry_count(&profile_collection->sections[AWS_PROFILE_SECTION_TYPE_PROFILE]); } size_t aws_profile_collection_get_section_count( const struct aws_profile_collection *profile_collection, const enum aws_profile_section_type section_type) { return aws_hash_table_get_entry_count(&profile_collection->sections[section_type]); } size_t aws_profile_property_get_sub_property_count(const struct aws_profile_property *property) { return aws_hash_table_get_entry_count(&property->sub_properties); } const struct aws_string *aws_profile_property_get_sub_property( const struct aws_profile_property *property, const struct aws_string *sub_property_name) { struct aws_hash_element *element = NULL; if (aws_hash_table_find(&property->sub_properties, sub_property_name, &element) || element == NULL) { return NULL; } return (const struct aws_string *)element->value; } const struct aws_string *aws_profile_get_name(const struct aws_profile *profile) { AWS_PRECONDITION(profile); return profile->name; } aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/source/endpoints_regex.c000066400000000000000000000422661456575232400257450ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include /* * Minimal regex implementation. * Inspired by * https://www.cs.princeton.edu/courses/archive/spr09/cos333/beautiful.html and * https://github.com/kokke/tiny-regex-c. * * Why write our own regex implementation? * Unfortunately, state of cross-platform regex support for c is rather limited. * Posix has regex support, but implementation support varies cross platform. * Windows supports regex, but only exposes it through C++ interface. * For 3p implementations tiny-regex-c comes closest to what we need, but has * several deal-breaking limitations, ex. not being thread safe, lack of * alternations support. * Other 3p C implementations are very bloated for what we need. * Hence, since we need a very minimal regex support for endpoint resolution we * just implement our own. * * What is supported? * - ascii only matching (no unicode or other encoding support) * - multithread safe iterative matching (stack friendly, since this is * typically called deep in call stack) * - char matching (plain ascii chars, alpha/digit wildcards) * - star and plus (refer to limitations sections for limitations on how they work) * - alternation groups * * Limitations? * - star and plus are greedy (match as much as they can), but do not backtrace. * This is major deviation from how regex matching should work. * Note: regions in aws have a predefined pattern where sections are separated * by '-', so current implementation just matches until it hits separator. * - grouping using ( and ) is only supported for alternations. * - regex must match the whole text, i.e. start with ^ and end with $ * - features not called out above are not supported * - alternations pick first occurrence that matches and do not backtrack to see * if there are any other occurrences * * Examples * current implementation is targeted towards matching typical aws region * patterns like "^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$" (aws partition) or * "^us\\-gov\\-\\w+\\-\\d+$" (aws gov partition). * All current regions follow * "country code(2 chars)-meta(like gov or iso, optional)-direction-digit" * and implementation should provide enough features to match those regions. * Patterns that would not match correctly are things like "a*a" (star will * exhaustively match a and will not give last a back) or (ab|abc), which will * not match abc because alternation will lock into ab. */ enum regex_symbol_type { AWS_ENDPOINTS_REGEX_SYMBOL_DOT, AWS_ENDPOINTS_REGEX_SYMBOL_STAR, AWS_ENDPOINTS_REGEX_SYMBOL_PLUS, AWS_ENDPOINTS_REGEX_SYMBOL_DIGIT, AWS_ENDPOINTS_REGEX_SYMBOL_ALPHA, AWS_ENDPOINTS_REGEX_SYMBOL_CHAR, AWS_ENDPOINTS_REGEX_SYMBOL_ALTERNATION_GROUP, }; struct aws_endpoints_regex_symbol { enum regex_symbol_type type; union { uint8_t ch; struct aws_string *alternation; } info; }; struct aws_endpoints_regex { struct aws_array_list symbols; }; /* Somewhat arbitrary limits on size of regex and text to avoid overly large * inputs. */ enum { s_max_regex_length = 60, s_max_text_length = 50, s_max_elements_per_alteration = 20, }; static void s_clean_up_symbols(struct aws_array_list *symbols) { for (size_t i = 0; i < aws_array_list_length(symbols); ++i) { struct aws_endpoints_regex_symbol *element = NULL; aws_array_list_get_at_ptr(symbols, (void **)&element, i); if (element->type == AWS_ENDPOINTS_REGEX_SYMBOL_ALTERNATION_GROUP) { aws_string_destroy(element->info.alternation); } } } int s_validate_regex(const struct aws_endpoints_regex *regex) { AWS_FATAL_PRECONDITION(regex != NULL); for (size_t sym_idx = 0; sym_idx < aws_array_list_length(®ex->symbols); ++sym_idx) { struct aws_endpoints_regex_symbol *symbol = NULL; aws_array_list_get_at_ptr(®ex->symbols, (void **)&symbol, sym_idx); if (symbol->type == AWS_ENDPOINTS_REGEX_SYMBOL_PLUS || symbol->type == AWS_ENDPOINTS_REGEX_SYMBOL_STAR) { /* first symbol */ if (sym_idx == 0) { AWS_LOGF_ERROR( AWS_LS_SDKUTILS_ENDPOINTS_REGEX, "Invalid regex pattern. Regex cannot start with star or plus."); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } struct aws_endpoints_regex_symbol *prev_symbol = NULL; aws_array_list_get_at_ptr(®ex->symbols, (void **)&prev_symbol, sym_idx - 1); /* reasonable symbol before */ enum regex_symbol_type prev_type = prev_symbol->type; if (!(prev_type == AWS_ENDPOINTS_REGEX_SYMBOL_DOT || prev_type == AWS_ENDPOINTS_REGEX_SYMBOL_DIGIT || prev_type == AWS_ENDPOINTS_REGEX_SYMBOL_ALPHA || prev_type == AWS_ENDPOINTS_REGEX_SYMBOL_CHAR || prev_type == AWS_ENDPOINTS_REGEX_SYMBOL_ALTERNATION_GROUP)) { AWS_LOGF_ERROR( AWS_LS_SDKUTILS_ENDPOINTS_REGEX, "Unsupported regex pattern. Star or plus after unsupported character."); return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_UNSUPPORTED_REGEX); } /* ends with - delimiter */ if (sym_idx != aws_array_list_length(®ex->symbols) - 1) { struct aws_endpoints_regex_symbol *next_symbol = NULL; aws_array_list_get_at_ptr(®ex->symbols, (void **)&next_symbol, sym_idx + 1); if (next_symbol->type != AWS_ENDPOINTS_REGEX_SYMBOL_CHAR || next_symbol->info.ch != '-') { AWS_LOGF_ERROR( AWS_LS_SDKUTILS_ENDPOINTS_REGEX, "Unsupported regex pattern. Star or plus must be followed by - delimiter."); return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_UNSUPPORTED_REGEX); } } } else if (symbol->type == AWS_ENDPOINTS_REGEX_SYMBOL_ALTERNATION_GROUP) { struct aws_byte_cursor alternation = aws_byte_cursor_from_string(symbol->info.alternation); /* Not empty */ if (alternation.len == 0) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_REGEX, "Invalid regex pattern. Empty group."); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } /* Verify that group is only used for alternation. */ for (size_t i = 0; i < alternation.len; ++i) { if (!aws_isalnum(alternation.ptr[i]) && alternation.ptr[i] != '|') { AWS_LOGF_ERROR( AWS_LS_SDKUTILS_ENDPOINTS_REGEX, "Unsupported regex pattern. Only alternation groups are supported."); return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_UNSUPPORTED_REGEX); } } /* alternation elements are unique and not subsets of each other */ struct aws_byte_cursor elements[s_max_elements_per_alteration]; size_t num_elements = 0; struct aws_byte_cursor split = {0}; while (aws_byte_cursor_next_split(&alternation, '|', &split)) { if (num_elements == s_max_elements_per_alteration) { AWS_LOGF_ERROR( AWS_LS_SDKUTILS_ENDPOINTS_REGEX, "Unsupported regex pattern. Too many element in alternation"); return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_UNSUPPORTED_REGEX); } if (split.len == 0) { AWS_LOGF_ERROR( AWS_LS_SDKUTILS_ENDPOINTS_REGEX, "Invalid regex pattern. Alternation element cannot be empty"); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } elements[num_elements] = split; ++num_elements; } struct aws_byte_cursor input; struct aws_byte_cursor prefix; for (size_t i = 0; i < num_elements; ++i) { for (size_t j = i + 1; j < num_elements; ++j) { if (elements[i].len <= elements[j].len) { input = elements[j]; prefix = elements[i]; } else { input = elements[i]; prefix = elements[j]; } if (aws_byte_cursor_starts_with(&input, &prefix)) { AWS_LOGF_ERROR( AWS_LS_SDKUTILS_ENDPOINTS_REGEX, "Unsupported regex pattern. One alternation element cannot be a prefix of another " "element."); return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_UNSUPPORTED_REGEX); } } } } } return AWS_OP_SUCCESS; } struct aws_endpoints_regex *aws_endpoints_regex_new( struct aws_allocator *allocator, struct aws_byte_cursor regex_pattern) { if (regex_pattern.len == 0 || regex_pattern.len > s_max_regex_length) { AWS_LOGF_ERROR( AWS_LS_SDKUTILS_ENDPOINTS_REGEX, "Invalid regex pattern size. Must be between 1 and %d", s_max_regex_length); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } if (regex_pattern.ptr[0] != '^' || regex_pattern.ptr[regex_pattern.len - 1] != '$') { AWS_LOGF_ERROR( AWS_LS_SDKUTILS_ENDPOINTS_REGEX, "Unsupported regex pattern. Supported patterns must match the whole text."); aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_UNSUPPORTED_REGEX); return NULL; } /* Ignore begin/end chars */ aws_byte_cursor_advance(®ex_pattern, 1); --regex_pattern.len; struct aws_endpoints_regex *re = aws_mem_calloc(allocator, 1, sizeof(struct aws_endpoints_regex)); aws_array_list_init_dynamic(&re->symbols, allocator, regex_pattern.len, sizeof(struct aws_endpoints_regex_symbol)); while (regex_pattern.len > 0) { uint8_t ch = regex_pattern.ptr[0]; aws_byte_cursor_advance(®ex_pattern, 1); struct aws_endpoints_regex_symbol symbol; switch (ch) { case '.': symbol.type = AWS_ENDPOINTS_REGEX_SYMBOL_DOT; break; case '*': symbol.type = AWS_ENDPOINTS_REGEX_SYMBOL_STAR; break; case '+': symbol.type = AWS_ENDPOINTS_REGEX_SYMBOL_PLUS; break; case '\\': if (regex_pattern.len == 0) { AWS_LOGF_ERROR( AWS_LS_SDKUTILS_ENDPOINTS_REGEX, "Invalid regex pattern. Pattern ends with escape character."); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); goto on_error; } switch (regex_pattern.ptr[0]) { /* Predefined patterns */ case 'd': symbol.type = AWS_ENDPOINTS_REGEX_SYMBOL_DIGIT; break; case 'w': symbol.type = AWS_ENDPOINTS_REGEX_SYMBOL_ALPHA; break; /* Escaped chars, ex. * or + */ default: symbol.type = AWS_ENDPOINTS_REGEX_SYMBOL_CHAR; symbol.info.ch = regex_pattern.ptr[0]; break; } aws_byte_cursor_advance(®ex_pattern, 1); break; case '(': { struct aws_byte_cursor group = {0}; if (!aws_byte_cursor_next_split(®ex_pattern, ')', &group)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_REGEX, "Invalid regex pattern. Invalid group syntax."); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); goto on_error; } aws_byte_cursor_advance(®ex_pattern, group.len); if (regex_pattern.len == 0 || regex_pattern.ptr[0] != ')') { AWS_LOGF_ERROR( AWS_LS_SDKUTILS_ENDPOINTS_REGEX, "Invalid regex pattern. Missing closing parenthesis."); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); goto on_error; } aws_byte_cursor_advance(®ex_pattern, 1); symbol.type = AWS_ENDPOINTS_REGEX_SYMBOL_ALTERNATION_GROUP; symbol.info.alternation = aws_string_new_from_cursor(allocator, &group); break; } default: { if (!aws_isalnum(ch)) { AWS_LOGF_ERROR( AWS_LS_SDKUTILS_ENDPOINTS_REGEX, "Unsupported regex pattern. Unknown character %c", ch); aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_UNSUPPORTED_REGEX); goto on_error; } symbol.type = AWS_ENDPOINTS_REGEX_SYMBOL_CHAR; symbol.info.ch = ch; break; } } aws_array_list_push_back(&re->symbols, &symbol); } if (s_validate_regex(re)) { goto on_error; } return re; on_error: aws_endpoints_regex_destroy(re); return NULL; } void aws_endpoints_regex_destroy(struct aws_endpoints_regex *regex) { if (regex == NULL) { return; } struct aws_allocator *allocator = regex->symbols.alloc; s_clean_up_symbols(®ex->symbols); aws_array_list_clean_up(®ex->symbols); aws_mem_release(allocator, ®ex->symbols); } static bool s_match_one(const struct aws_endpoints_regex_symbol *symbol, struct aws_byte_cursor *text) { if (text->len == 0) { return false; } uint8_t ch = text->ptr[0]; switch (symbol->type) { case AWS_ENDPOINTS_REGEX_SYMBOL_ALPHA: return aws_isalpha(ch); case AWS_ENDPOINTS_REGEX_SYMBOL_DIGIT: return aws_isdigit(ch); case AWS_ENDPOINTS_REGEX_SYMBOL_CHAR: return ch == symbol->info.ch; case AWS_ENDPOINTS_REGEX_SYMBOL_DOT: return true; default: AWS_FATAL_ASSERT(true); } return false; } static void s_match_star(const struct aws_endpoints_regex_symbol *symbol, struct aws_byte_cursor *text) { while (s_match_one(symbol, text)) { aws_byte_cursor_advance(text, 1); } } static bool s_match_plus(const struct aws_endpoints_regex_symbol *symbol, struct aws_byte_cursor *text) { if (!s_match_one(symbol, text)) { return false; } aws_byte_cursor_advance(text, 1); s_match_star(symbol, text); return true; } int aws_endpoints_regex_match(const struct aws_endpoints_regex *regex, struct aws_byte_cursor text) { AWS_PRECONDITION(regex); if (text.len == 0 || text.len > s_max_text_length) { AWS_LOGF_ERROR( AWS_LS_SDKUTILS_ENDPOINTS_REGEX, "Invalid text size. Must be between 1 and %d", s_max_text_length); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } for (size_t i = 0; i < aws_array_list_length(®ex->symbols); ++i) { struct aws_endpoints_regex_symbol *symbol = NULL; aws_array_list_get_at_ptr(®ex->symbols, (void **)&symbol, i); /* looks forward to check if symbol has * or + modifier */ if (i + 1 < aws_array_list_length(®ex->symbols)) { struct aws_endpoints_regex_symbol *next_symbol = NULL; aws_array_list_get_at_ptr(®ex->symbols, (void **)&next_symbol, i + 1); if (next_symbol->type == AWS_ENDPOINTS_REGEX_SYMBOL_STAR || next_symbol->type == AWS_ENDPOINTS_REGEX_SYMBOL_PLUS) { if (next_symbol->type == AWS_ENDPOINTS_REGEX_SYMBOL_STAR) { s_match_star(symbol, &text); } else if (next_symbol->type == AWS_ENDPOINTS_REGEX_SYMBOL_PLUS) { if (!s_match_plus(symbol, &text)) { return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_REGEX_NO_MATCH); } } ++i; continue; } } switch (symbol->type) { case AWS_ENDPOINTS_REGEX_SYMBOL_ALTERNATION_GROUP: { struct aws_byte_cursor variant = {0}; struct aws_byte_cursor alternation = aws_byte_cursor_from_string(symbol->info.alternation); size_t chars_in_match = 0; while (aws_byte_cursor_next_split(&alternation, '|', &variant)) { if (aws_byte_cursor_starts_with(&text, &variant)) { chars_in_match = variant.len; break; } } if (chars_in_match == 0) { return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_REGEX_NO_MATCH); } aws_byte_cursor_advance(&text, chars_in_match); break; } default: if (!s_match_one(symbol, &text)) { return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_REGEX_NO_MATCH); } aws_byte_cursor_advance(&text, 1); break; } } return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/source/endpoints_rule_engine.c000066400000000000000000001242411456575232400271210ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include /* TODO: checking for unknown enum values is annoying and is brittle. compile time assert on enum size or members would make it a lot simpler. */ /* * How rule resolution works. * Note: read comments in endpoint_types_impl.h first to understand type system. * * Initial scope is created from parameters defined in request context and * default values defined in ruleset (s_init_top_level_scope). Validation that * all required parameters have values is done at this point as well. * * Rules are then resolved sequentially against scope. * First list of conditions associated with the rule is resolved * (s_resolve_conditions). Final result of conditions resolution is an AND of * truthiness of resolved values (as defined in is_value_truthy) for each * condition. If resolution is true then rule is selected. * - For endpoint and error rules that means terminal state is reached and rule * data is returned * - For tree rule, the engine starts resolving rules associated with tree rule. * Note: tree rules are terminal and once engine jumps into tree rule * resolution there is no way to jump back out. * * Conditions can add values to scope. Those values are valid for the duration of * rule resolution. Note: for tree rules, any values added in tree conditions are * valid for all rules within the tree. * Scope can be though of as a 'leveled' structure. Top level or 0 level * represents all values from context and defaults. Levels 1 and up represent * values added by rules. Ex. if we start at level 0, all values added by rule * can be though of as level 1. * Since tree rule cannot be exited from, engine is simplified by making all * values in scope top level whenever tree is jumped into. So in practice engine * goes back between top level and first level as resolving rules. If that * changes in future, scope can add explicit level number and cleanup only values * at that level when going to next rule. * * Overall flow is as follows: * - Start with any values provided in context as scope * - Add any default values provided in ruleset and validate all required * params are specified. * - Iterate through rules and resolve each rule: * -- resolve conditions with side effects * -- if conditions are truthy return rule result * -- if conditions are truthy and rule is tree, jump down a level and * restart resolution with tree rules * -- if conditions are falsy, rollback level and go to next rule * - if no rules match, resolution fails with exhausted error. */ struct resolve_template_callback_data { struct aws_allocator *allocator; struct aws_endpoints_resolution_scope *scope; }; AWS_STATIC_ASSERT(AWS_ENDPOINTS_VALUE_SIZE == 7); static bool is_value_truthy(const struct aws_endpoints_value *value) { switch (value->type) { case AWS_ENDPOINTS_VALUE_NONE: return false; case AWS_ENDPOINTS_VALUE_BOOLEAN: return value->v.boolean; case AWS_ENDPOINTS_VALUE_ARRAY: case AWS_ENDPOINTS_VALUE_STRING: case AWS_ENDPOINTS_VALUE_OBJECT: return true; case AWS_ENDPOINTS_VALUE_NUMBER: return value->v.number != 0; default: AWS_ASSERT(false); return false; } } void s_scope_value_destroy_cb(void *data) { struct aws_endpoints_scope_value *value = data; aws_endpoints_scope_value_destroy(value); } static int s_deep_copy_context_to_scope( struct aws_allocator *allocator, const struct aws_endpoints_request_context *context, struct aws_endpoints_resolution_scope *scope) { struct aws_endpoints_scope_value *new_value = NULL; for (struct aws_hash_iter iter = aws_hash_iter_begin(&context->values); !aws_hash_iter_done(&iter); aws_hash_iter_next(&iter)) { struct aws_endpoints_scope_value *context_value = (struct aws_endpoints_scope_value *)iter.element.value; new_value = aws_endpoints_scope_value_new(allocator, context_value->name.cur); if (aws_endpoints_deep_copy_parameter_value(allocator, &context_value->value, &new_value->value)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to deep copy value."); goto on_error; } if (aws_hash_table_put(&scope->values, &new_value->name.cur, new_value, NULL)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to add deep copy to scope."); goto on_error; } } return AWS_OP_SUCCESS; on_error: aws_endpoints_scope_value_destroy(new_value); return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_INIT_FAILED); } static int s_init_top_level_scope( struct aws_allocator *allocator, const struct aws_endpoints_request_context *context, const struct aws_endpoints_ruleset *ruleset, const struct aws_partitions_config *partitions, struct aws_endpoints_resolution_scope *scope) { AWS_PRECONDITION(allocator); AWS_PRECONDITION(context); AWS_PRECONDITION(ruleset); AWS_PRECONDITION(scope); struct aws_endpoints_scope_value *val = NULL; scope->rule_idx = 0; scope->rules = &ruleset->rules; scope->partitions = partitions; if (aws_hash_table_init( &scope->values, allocator, 0, aws_hash_byte_cursor_ptr, aws_endpoints_byte_cursor_eq, NULL, s_scope_value_destroy_cb)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to init request context values."); goto on_error; } if (s_deep_copy_context_to_scope(allocator, context, scope)) { goto on_error; } if (aws_array_list_init_dynamic(&scope->added_keys, allocator, 10, sizeof(struct aws_byte_cursor))) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to init added keys."); goto on_error; } /* Add defaults to the top level scope. */ for (struct aws_hash_iter iter = aws_hash_iter_begin(&ruleset->parameters); !aws_hash_iter_done(&iter); aws_hash_iter_next(&iter)) { const struct aws_byte_cursor key = *(const struct aws_byte_cursor *)iter.element.key; struct aws_endpoints_parameter *value = (struct aws_endpoints_parameter *)iter.element.value; /* Skip non-required values, since they cannot have default values. */ if (!value->is_required) { continue; } struct aws_hash_element *existing = NULL; if (aws_hash_table_find(&scope->values, &key, &existing)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to init request context values."); return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_INIT_FAILED); } if (existing == NULL) { if (!value->has_default_value) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "No value or default for required parameter."); goto on_error; } val = aws_endpoints_scope_value_new(allocator, key); AWS_ASSERT(val); switch (value->type) { case AWS_ENDPOINTS_PARAMETER_STRING: val->value.type = AWS_ENDPOINTS_VALUE_STRING; val->value.v.owning_cursor_string = aws_endpoints_non_owning_cursor_create(value->default_value.string); break; case AWS_ENDPOINTS_PARAMETER_BOOLEAN: val->value.type = AWS_ENDPOINTS_VALUE_BOOLEAN; val->value.v.boolean = value->default_value.boolean; break; default: AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Unexpected parameter type."); goto on_error; } if (aws_hash_table_put(&scope->values, &val->name.cur, val, NULL)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to add value to top level scope."); goto on_error; } } } return AWS_OP_SUCCESS; on_error: aws_endpoints_scope_value_destroy(val); return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_INIT_FAILED); } static void s_scope_clean_up(struct aws_endpoints_resolution_scope *scope) { AWS_PRECONDITION(scope); aws_hash_table_clean_up(&scope->values); aws_array_list_clean_up(&scope->added_keys); } static int s_resolve_expr( struct aws_allocator *allocator, struct aws_endpoints_expr *expr, struct aws_endpoints_resolution_scope *scope, struct aws_endpoints_value *out_value); static int s_resolve_template( struct aws_byte_cursor template, void *user_data, struct aws_owning_cursor *out_owning_cursor); int aws_endpoints_argv_expect( struct aws_allocator *allocator, struct aws_endpoints_resolution_scope *scope, struct aws_array_list *argv, size_t idx, enum aws_endpoints_value_type expected_type, struct aws_endpoints_value *out_value) { AWS_ZERO_STRUCT(*out_value); struct aws_endpoints_value argv_value = {0}; struct aws_endpoints_expr argv_expr; if (aws_array_list_get_at(argv, &argv_expr, idx)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to parse argv"); goto on_error; } if (s_resolve_expr(allocator, &argv_expr, scope, &argv_value)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve argv."); goto on_error; } if (expected_type != AWS_ENDPOINTS_VALUE_ANY && argv_value.type != expected_type) { AWS_LOGF_ERROR( AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Unexpected arg type actual: %u expected %u.", argv_value.type, expected_type); goto on_error; } *out_value = argv_value; return AWS_OP_SUCCESS; on_error: aws_endpoints_value_clean_up(&argv_value); return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED); } /* ****************************** * Expr/String resolve ****************************** */ static int s_resolve_expr( struct aws_allocator *allocator, struct aws_endpoints_expr *expr, struct aws_endpoints_resolution_scope *scope, struct aws_endpoints_value *out_value) { AWS_ZERO_STRUCT(*out_value); switch (expr->type) { case AWS_ENDPOINTS_EXPR_STRING: { struct aws_byte_buf buf; struct resolve_template_callback_data data = {.allocator = allocator, .scope = scope}; if (aws_byte_buf_init_from_resolved_templated_string( allocator, &buf, expr->e.string, s_resolve_template, &data, false)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve templated string."); goto on_error; } out_value->type = AWS_ENDPOINTS_VALUE_STRING; out_value->v.owning_cursor_string = aws_endpoints_owning_cursor_from_string(aws_string_new_from_buf(allocator, &buf)); aws_byte_buf_clean_up(&buf); break; } case AWS_ENDPOINTS_EXPR_BOOLEAN: { out_value->type = AWS_ENDPOINTS_VALUE_BOOLEAN; out_value->v.boolean = expr->e.boolean; break; } case AWS_ENDPOINTS_EXPR_NUMBER: { out_value->type = AWS_ENDPOINTS_VALUE_NUMBER; out_value->v.number = expr->e.number; break; } case AWS_ENDPOINTS_EXPR_ARRAY: { out_value->type = AWS_ENDPOINTS_VALUE_ARRAY; /* TODO: deep copy */ out_value->v.array = expr->e.array; break; } case AWS_ENDPOINTS_EXPR_REFERENCE: { struct aws_hash_element *element; if (aws_hash_table_find(&scope->values, &expr->e.reference, &element)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to deref."); goto on_error; } if (element == NULL) { out_value->type = AWS_ENDPOINTS_VALUE_NONE; } else { struct aws_endpoints_scope_value *aws_endpoints_scope_value = element->value; *out_value = aws_endpoints_scope_value->value; if (aws_endpoints_scope_value->value.type == AWS_ENDPOINTS_VALUE_STRING) { /* Value will not own underlying mem and instead its owned by the scope, so set it to NULL. */ out_value->v.owning_cursor_string.string = NULL; } else if (aws_endpoints_scope_value->value.type == AWS_ENDPOINTS_VALUE_OBJECT) { out_value->v.owning_cursor_object.string = NULL; } } break; } case AWS_ENDPOINTS_EXPR_FUNCTION: { if (aws_endpoints_dispatch_standard_lib_fn_resolve( expr->e.function.fn, allocator, &expr->e.function.argv, scope, out_value)) { goto on_error; } break; } } return AWS_OP_SUCCESS; on_error: return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED); } static int s_resolve_one_condition( struct aws_allocator *allocator, struct aws_endpoints_condition *condition, struct aws_endpoints_resolution_scope *scope, bool *out_is_truthy) { struct aws_endpoints_scope_value *scope_value = NULL; struct aws_endpoints_value val; if (s_resolve_expr(allocator, &condition->expr, scope, &val)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve expr."); goto on_error; } *out_is_truthy = is_value_truthy(&val); /* Note: assigning value is skipped if condition is falsy, since nothing can use it and that avoids adding value and then removing it from scope right away. */ if (*out_is_truthy && condition->assign.len > 0) { /* If condition assigns a value, push it to scope and let scope handle value memory. */ scope_value = aws_endpoints_scope_value_new(allocator, condition->assign); scope_value->value = val; if (aws_array_list_push_back(&scope->added_keys, &scope_value->name.cur)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to update key at given scope."); goto on_error; } int was_created = 1; if (aws_hash_table_put(&scope->values, &scope_value->name.cur, scope_value, &was_created)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to set assigned variable."); goto on_error; } /* Shadowing existing values is prohibited. */ if (!was_created) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Assigned variable shadows existing one."); goto on_error; } } else { /* Otherwise clean up temp value */ aws_endpoints_value_clean_up(&val); } return AWS_OP_SUCCESS; on_error: aws_endpoints_scope_value_destroy(scope_value); /* Only cleanup value if mem ownership was not transferred to scope value. */ if (scope_value == NULL) { aws_endpoints_value_clean_up(&val); } *out_is_truthy = false; return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED); } static int s_resolve_conditions( struct aws_allocator *allocator, const struct aws_array_list *conditions, struct aws_endpoints_resolution_scope *scope, bool *out_is_truthy) { /* Note: spec defines empty conditions list as truthy. */ *out_is_truthy = true; for (size_t idx = 0; idx < aws_array_list_length(conditions); ++idx) { struct aws_endpoints_condition *condition = NULL; if (aws_array_list_get_at_ptr(conditions, (void **)&condition, idx)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to retrieve condition."); goto on_error; } if (s_resolve_one_condition(allocator, condition, scope, out_is_truthy)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve condition."); goto on_error; } /* truthiness of all conditions is an AND of truthiness for each condition, hence first false one short circuits resolution */ if (!*out_is_truthy) { break; } } return AWS_OP_SUCCESS; on_error: *out_is_truthy = false; return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED); } int aws_endpoints_path_through_array( struct aws_allocator *allocator, struct aws_endpoints_resolution_scope *scope, struct aws_endpoints_value *value, struct aws_byte_cursor path_cur, struct aws_endpoints_value *out_value) { AWS_PRECONDITION(value->type == AWS_ENDPOINTS_VALUE_ARRAY); uint64_t index; struct aws_byte_cursor split = {0}; if ((!aws_byte_cursor_next_split(&path_cur, '[', &split) || split.len > 0) || !aws_byte_cursor_next_split(&path_cur, ']', &split) || aws_byte_cursor_utf8_parse_u64(split, &index)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Could not parse index from template string."); goto on_error; } if (index < aws_array_list_length(&value->v.array)) { out_value->type = AWS_ENDPOINTS_VALUE_NONE; return AWS_OP_SUCCESS; } struct aws_endpoints_expr *expr = NULL; if (aws_array_list_get_at_ptr(&value->v.array, (void **)&expr, (size_t)index)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to index into resolved value"); goto on_error; } struct aws_endpoints_value val; if (s_resolve_expr(allocator, expr, scope, &val)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve val."); aws_endpoints_value_clean_up(&val); goto on_error; } *out_value = val; return AWS_OP_SUCCESS; on_error: return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED); } int aws_endpoints_path_through_object( struct aws_allocator *allocator, struct aws_endpoints_value *value, struct aws_byte_cursor path_cur, struct aws_endpoints_value *out_value) { AWS_ZERO_STRUCT(*out_value); struct aws_json_value *root_node = NULL; struct aws_byte_cursor value_cur = value->type != AWS_ENDPOINTS_VALUE_STRING ? value->v.owning_cursor_string.cur : value->v.owning_cursor_object.cur; root_node = aws_json_value_new_from_string(allocator, value_cur); const struct aws_json_value *result; if (aws_path_through_json(allocator, root_node, path_cur, &result)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to path through json."); goto on_error; } if (result == NULL) { out_value->type = AWS_ENDPOINTS_VALUE_NONE; } else if (aws_json_value_is_string(result)) { struct aws_byte_cursor final; if (aws_json_value_get_string(result, &final)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Could not parse string from node."); goto on_error; } out_value->type = AWS_ENDPOINTS_VALUE_STRING; out_value->v.owning_cursor_string = aws_endpoints_owning_cursor_from_cursor(allocator, final); } else if (aws_json_value_is_array(result) || aws_json_value_is_object(result)) { struct aws_byte_buf json_blob; aws_byte_buf_init(&json_blob, allocator, 0); if (aws_byte_buf_append_json_string(result, &json_blob)) { aws_byte_buf_clean_up(&json_blob); AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to extract properties."); goto on_error; } aws_byte_buf_clean_up(&json_blob); out_value->type = AWS_ENDPOINTS_VALUE_OBJECT; out_value->v.owning_cursor_object = aws_endpoints_owning_cursor_from_string(aws_string_new_from_buf(allocator, &json_blob)); } else if (aws_json_value_is_boolean(result)) { if (aws_json_value_get_boolean(result, &out_value->v.boolean)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Could not parse boolean from node."); goto on_error; } out_value->type = AWS_ENDPOINTS_VALUE_BOOLEAN; } else if (aws_json_value_is_number(result)) { if (aws_json_value_get_number(result, &out_value->v.number)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Could not parse number from node."); goto on_error; } out_value->type = AWS_ENDPOINTS_VALUE_NUMBER; } aws_json_value_destroy(root_node); return AWS_OP_SUCCESS; on_error: aws_json_value_destroy(root_node); return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED); } static int s_resolve_templated_value_with_pathing( struct aws_allocator *allocator, struct aws_endpoints_resolution_scope *scope, struct aws_byte_cursor template_cur, struct aws_owning_cursor *out_owning_cursor) { struct aws_endpoints_value resolved_value = {0}; struct aws_byte_cursor split = {0}; if (!aws_byte_cursor_next_split(&template_cur, '#', &split) || split.len == 0) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Invalid value in template string."); goto on_error; } struct aws_hash_element *elem = NULL; if (aws_hash_table_find(&scope->values, &split, &elem) || elem == NULL) { AWS_LOGF_ERROR( AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Templated value does not exist: " PRInSTR, AWS_BYTE_CURSOR_PRI(split)); goto on_error; } struct aws_endpoints_scope_value *scope_value = elem->value; if (!aws_byte_cursor_next_split(&template_cur, '#', &split)) { if (scope_value->value.type != AWS_ENDPOINTS_VALUE_STRING) { AWS_LOGF_ERROR( AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Unexpected type: must be string if pathing is not provided"); goto on_error; } *out_owning_cursor = aws_endpoints_non_owning_cursor_create(scope_value->value.v.owning_cursor_string.cur); return AWS_OP_SUCCESS; } if (scope_value->value.type == AWS_ENDPOINTS_VALUE_OBJECT) { if (aws_endpoints_path_through_object(allocator, &scope_value->value, split, &resolved_value)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to path through object."); goto on_error; } } else if (scope_value->value.type == AWS_ENDPOINTS_VALUE_ARRAY) { if (aws_endpoints_path_through_array(allocator, scope, &scope_value->value, split, &resolved_value)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to path through array."); goto on_error; } } else { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Invalid value type for pathing through."); goto on_error; } if (resolved_value.type != AWS_ENDPOINTS_VALUE_STRING) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Templated string didn't resolve to string"); goto on_error; } if (resolved_value.v.owning_cursor_string.string != NULL) { /* Transfer ownership of the underlying string. */ *out_owning_cursor = aws_endpoints_owning_cursor_from_string(resolved_value.v.owning_cursor_string.string); resolved_value.v.owning_cursor_string.string = NULL; } else { /* Unlikely to get here since current pathing always return new string. */ *out_owning_cursor = aws_endpoints_non_owning_cursor_create(resolved_value.v.owning_cursor_string.cur); } aws_endpoints_value_clean_up(&resolved_value); return AWS_OP_SUCCESS; on_error: aws_endpoints_value_clean_up(&resolved_value); return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED); } static int s_resolve_template(struct aws_byte_cursor template, void *user_data, struct aws_owning_cursor *out_cursor) { struct resolve_template_callback_data *data = user_data; if (s_resolve_templated_value_with_pathing(data->allocator, data->scope, template, out_cursor)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve template value."); return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED); ; } return AWS_OP_SUCCESS; } /* ****************************** * Request Context ****************************** */ static void s_endpoints_request_context_destroy(void *data) { if (data == NULL) { return; } struct aws_endpoints_request_context *context = data; aws_hash_table_clean_up(&context->values); aws_mem_release(context->allocator, context); } struct aws_endpoints_request_context *aws_endpoints_request_context_new(struct aws_allocator *allocator) { AWS_PRECONDITION(allocator); struct aws_endpoints_request_context *context = aws_mem_calloc(allocator, 1, sizeof(struct aws_endpoints_request_context)); context->allocator = allocator; aws_ref_count_init(&context->ref_count, context, s_endpoints_request_context_destroy); if (aws_hash_table_init( &context->values, allocator, 0, aws_hash_byte_cursor_ptr, aws_endpoints_byte_cursor_eq, NULL, s_scope_value_destroy_cb)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to init request context values."); goto on_error; } return context; on_error: s_endpoints_request_context_destroy(context); return NULL; } struct aws_endpoints_request_context *aws_endpoints_request_context_acquire( struct aws_endpoints_request_context *request_context) { AWS_PRECONDITION(request_context); if (request_context) { aws_ref_count_acquire(&request_context->ref_count); } return request_context; } struct aws_endpoints_request_context *aws_endpoints_request_context_release( struct aws_endpoints_request_context *request_context) { if (request_context) { aws_ref_count_release(&request_context->ref_count); } return NULL; } int aws_endpoints_request_context_add_string( struct aws_allocator *allocator, struct aws_endpoints_request_context *context, struct aws_byte_cursor name, struct aws_byte_cursor value) { AWS_PRECONDITION(allocator); struct aws_endpoints_scope_value *val = aws_endpoints_scope_value_new(allocator, name); val->value.type = AWS_ENDPOINTS_VALUE_STRING; val->value.v.owning_cursor_string = aws_endpoints_owning_cursor_from_cursor(allocator, value); if (aws_hash_table_put(&context->values, &val->name.cur, val, NULL)) { aws_endpoints_scope_value_destroy(val); return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_INIT_FAILED); }; return AWS_OP_SUCCESS; } int aws_endpoints_request_context_add_boolean( struct aws_allocator *allocator, struct aws_endpoints_request_context *context, struct aws_byte_cursor name, bool value) { AWS_PRECONDITION(allocator); struct aws_endpoints_scope_value *val = aws_endpoints_scope_value_new(allocator, name); val->value.type = AWS_ENDPOINTS_VALUE_BOOLEAN; val->value.v.boolean = value; if (aws_hash_table_put(&context->values, &val->name.cur, val, NULL)) { aws_endpoints_scope_value_destroy(val); return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_INIT_FAILED); }; return AWS_OP_SUCCESS; } /* ****************************** * Rule engine. ****************************** */ struct aws_endpoints_resolved_endpoint { struct aws_allocator *allocator; struct aws_ref_count ref_count; enum aws_endpoints_resolved_endpoint_type type; union { struct resolved_endpoint { struct aws_byte_buf url; struct aws_byte_buf properties; struct aws_hash_table headers; } endpoint; struct aws_byte_buf error; } r; }; static void s_endpoints_resolved_endpoint_destroy(void *data) { if (data == NULL) { return; } struct aws_endpoints_resolved_endpoint *resolved = data; if (resolved->type == AWS_ENDPOINTS_RESOLVED_ENDPOINT) { aws_byte_buf_clean_up(&resolved->r.endpoint.url); aws_byte_buf_clean_up(&resolved->r.endpoint.properties); aws_hash_table_clean_up(&resolved->r.endpoint.headers); } else if (resolved->type == AWS_ENDPOINTS_RESOLVED_ERROR) { aws_byte_buf_clean_up(&resolved->r.error); } aws_mem_release(resolved->allocator, resolved); } struct aws_endpoints_resolved_endpoint *s_endpoints_resolved_endpoint_new(struct aws_allocator *allocator) { AWS_PRECONDITION(allocator); struct aws_endpoints_resolved_endpoint *resolved = aws_mem_calloc(allocator, 1, sizeof(struct aws_endpoints_resolved_endpoint)); resolved->allocator = allocator; aws_ref_count_init(&resolved->ref_count, resolved, s_endpoints_resolved_endpoint_destroy); return resolved; } struct aws_endpoints_resolved_endpoint *aws_endpoints_resolved_endpoint_acquire( struct aws_endpoints_resolved_endpoint *resolved_endpoint) { AWS_PRECONDITION(resolved_endpoint); if (resolved_endpoint) { aws_ref_count_acquire(&resolved_endpoint->ref_count); } return resolved_endpoint; } struct aws_endpoints_resolved_endpoint *aws_endpoints_resolved_endpoint_release( struct aws_endpoints_resolved_endpoint *resolved_endpoint) { if (resolved_endpoint) { aws_ref_count_release(&resolved_endpoint->ref_count); } return NULL; } enum aws_endpoints_resolved_endpoint_type aws_endpoints_resolved_endpoint_get_type( const struct aws_endpoints_resolved_endpoint *resolved_endpoint) { AWS_PRECONDITION(resolved_endpoint); return resolved_endpoint->type; } int aws_endpoints_resolved_endpoint_get_url( const struct aws_endpoints_resolved_endpoint *resolved_endpoint, struct aws_byte_cursor *out_url) { AWS_PRECONDITION(resolved_endpoint); AWS_PRECONDITION(out_url); if (resolved_endpoint->type != AWS_ENDPOINTS_RESOLVED_ENDPOINT) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } *out_url = aws_byte_cursor_from_buf(&resolved_endpoint->r.endpoint.url); return AWS_OP_SUCCESS; } int aws_endpoints_resolved_endpoint_get_properties( const struct aws_endpoints_resolved_endpoint *resolved_endpoint, struct aws_byte_cursor *out_properties) { AWS_PRECONDITION(resolved_endpoint); AWS_PRECONDITION(out_properties); if (resolved_endpoint->type != AWS_ENDPOINTS_RESOLVED_ENDPOINT) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } *out_properties = aws_byte_cursor_from_buf(&resolved_endpoint->r.endpoint.properties); return AWS_OP_SUCCESS; } int aws_endpoints_resolved_endpoint_get_headers( const struct aws_endpoints_resolved_endpoint *resolved_endpoint, const struct aws_hash_table **out_headers) { AWS_PRECONDITION(resolved_endpoint); AWS_PRECONDITION(out_headers); if (resolved_endpoint->type != AWS_ENDPOINTS_RESOLVED_ENDPOINT) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } *out_headers = &resolved_endpoint->r.endpoint.headers; return AWS_OP_SUCCESS; } int aws_endpoints_resolved_endpoint_get_error( const struct aws_endpoints_resolved_endpoint *resolved_endpoint, struct aws_byte_cursor *out_error) { AWS_PRECONDITION(resolved_endpoint); AWS_PRECONDITION(out_error); if (resolved_endpoint->type != AWS_ENDPOINTS_RESOLVED_ERROR) { return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } *out_error = aws_byte_cursor_from_buf(&resolved_endpoint->r.error); return AWS_OP_SUCCESS; } struct aws_endpoints_rule_engine { struct aws_allocator *allocator; struct aws_ref_count ref_count; struct aws_endpoints_ruleset *ruleset; struct aws_partitions_config *partitions_config; }; static void s_endpoints_rule_engine_destroy(void *data) { if (data == NULL) { return; } struct aws_endpoints_rule_engine *engine = data; aws_endpoints_ruleset_release(engine->ruleset); aws_partitions_config_release(engine->partitions_config); aws_mem_release(engine->allocator, engine); } struct aws_endpoints_rule_engine *aws_endpoints_rule_engine_new( struct aws_allocator *allocator, struct aws_endpoints_ruleset *ruleset, struct aws_partitions_config *partitions_config) { AWS_PRECONDITION(allocator); AWS_PRECONDITION(ruleset); struct aws_endpoints_rule_engine *engine = aws_mem_calloc(allocator, 1, sizeof(struct aws_endpoints_rule_engine)); engine->allocator = allocator; engine->ruleset = ruleset; engine->partitions_config = partitions_config; aws_endpoints_ruleset_acquire(ruleset); aws_partitions_config_acquire(partitions_config); aws_ref_count_init(&engine->ref_count, engine, s_endpoints_rule_engine_destroy); return engine; } struct aws_endpoints_rule_engine *aws_endpoints_rule_engine_acquire(struct aws_endpoints_rule_engine *rule_engine) { AWS_PRECONDITION(rule_engine); if (rule_engine) { aws_ref_count_acquire(&rule_engine->ref_count); } return rule_engine; } struct aws_endpoints_rule_engine *aws_endpoints_rule_engine_release(struct aws_endpoints_rule_engine *rule_engine) { if (rule_engine) { aws_ref_count_release(&rule_engine->ref_count); } return NULL; } int s_revert_scope(struct aws_endpoints_resolution_scope *scope) { for (size_t idx = 0; idx < aws_array_list_length(&scope->added_keys); ++idx) { struct aws_byte_cursor *cur = NULL; if (aws_array_list_get_at_ptr(&scope->added_keys, (void **)&cur, idx)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to retrieve value."); return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED); } aws_hash_table_remove(&scope->values, cur, NULL, NULL); } aws_array_list_clear(&scope->added_keys); return AWS_OP_SUCCESS; } static void s_on_string_array_element_destroy(void *element) { struct aws_string *str = *(struct aws_string **)element; aws_string_destroy(str); } static void s_callback_headers_destroy(void *data) { struct aws_array_list *array = data; struct aws_allocator *alloc = array->alloc; aws_array_list_deep_clean_up(array, s_on_string_array_element_destroy); aws_mem_release(alloc, array); } static int s_resolve_headers( struct aws_allocator *allocator, struct aws_endpoints_resolution_scope *scope, struct aws_hash_table *headers, struct aws_hash_table *out_headers) { struct aws_endpoints_value value; struct aws_array_list *resolved_headers = NULL; if (aws_hash_table_init( out_headers, allocator, aws_hash_table_get_entry_count(headers), aws_hash_string, aws_hash_callback_string_eq, aws_hash_callback_string_destroy, s_callback_headers_destroy)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to init table for resolved headers"); goto on_error; } for (struct aws_hash_iter iter = aws_hash_iter_begin(headers); !aws_hash_iter_done(&iter); aws_hash_iter_next(&iter)) { struct aws_string *key = (struct aws_string *)iter.element.key; struct aws_array_list *header_list = (struct aws_array_list *)iter.element.value; resolved_headers = aws_mem_calloc(allocator, 1, sizeof(struct aws_array_list)); aws_array_list_init_dynamic( resolved_headers, allocator, aws_array_list_length(header_list), sizeof(struct aws_string *)); for (size_t i = 0; i < aws_array_list_length(header_list); ++i) { struct aws_endpoints_expr *expr = NULL; if (aws_array_list_get_at_ptr(header_list, (void **)&expr, i)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to get header."); goto on_error; } if (s_resolve_expr(allocator, expr, scope, &value) || value.type != AWS_ENDPOINTS_VALUE_STRING) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve header expr."); goto on_error; } struct aws_string *str = aws_string_new_from_cursor(allocator, &value.v.owning_cursor_string.cur); if (aws_array_list_push_back(resolved_headers, &str)) { aws_string_destroy(str); AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to add resolved header to result."); goto on_error; } aws_endpoints_value_clean_up(&value); } if (aws_hash_table_put(out_headers, aws_string_clone_or_reuse(allocator, key), resolved_headers, NULL)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to add resolved header to result."); goto on_error; } } return AWS_OP_SUCCESS; on_error: aws_endpoints_value_clean_up(&value); if (resolved_headers != NULL) { s_callback_headers_destroy(resolved_headers); } aws_hash_table_clean_up(out_headers); return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED); } int aws_endpoints_rule_engine_resolve( struct aws_endpoints_rule_engine *engine, const struct aws_endpoints_request_context *context, struct aws_endpoints_resolved_endpoint **out_resolved_endpoint) { if (aws_array_list_length(&engine->ruleset->rules) == 0) { return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_EMPTY_RULESET); } int result = AWS_OP_SUCCESS; struct aws_endpoints_resolution_scope scope; if (s_init_top_level_scope(engine->allocator, context, engine->ruleset, engine->partitions_config, &scope)) { result = AWS_OP_ERR; goto on_done; } while (scope.rule_idx < aws_array_list_length(scope.rules)) { struct aws_endpoints_rule *rule = NULL; if (aws_array_list_get_at_ptr(scope.rules, (void **)&rule, scope.rule_idx)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to get rule."); result = AWS_OP_ERR; goto on_done; } bool is_truthy = false; if (s_resolve_conditions(engine->allocator, &rule->conditions, &scope, &is_truthy)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve conditions."); result = AWS_OP_ERR; goto on_done; } if (!is_truthy) { s_revert_scope(&scope); ++scope.rule_idx; continue; } switch (rule->type) { case AWS_ENDPOINTS_RULE_ENDPOINT: { struct aws_endpoints_resolved_endpoint *endpoint = s_endpoints_resolved_endpoint_new(engine->allocator); endpoint->type = AWS_ENDPOINTS_RESOLVED_ENDPOINT; struct aws_endpoints_value val; if (s_resolve_expr(engine->allocator, &rule->rule_data.endpoint.url, &scope, &val) || val.type != AWS_ENDPOINTS_VALUE_STRING || aws_byte_buf_init_copy_from_cursor( &endpoint->r.endpoint.url, engine->allocator, val.v.owning_cursor_string.cur)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve templated url."); result = AWS_OP_ERR; goto on_done; } aws_endpoints_value_clean_up(&val); struct resolve_template_callback_data data = {.allocator = engine->allocator, .scope = &scope}; if (rule->rule_data.endpoint.properties.len > 0 && aws_byte_buf_init_from_resolved_templated_string( engine->allocator, &endpoint->r.endpoint.properties, aws_byte_cursor_from_buf(&rule->rule_data.endpoint.properties), s_resolve_template, &data, true)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve templated properties."); result = AWS_OP_ERR; goto on_done; } if (s_resolve_headers( engine->allocator, &scope, &rule->rule_data.endpoint.headers, &endpoint->r.endpoint.headers)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve templated headers."); result = AWS_OP_ERR; goto on_done; } *out_resolved_endpoint = endpoint; goto on_done; } case AWS_ENDPOINTS_RULE_ERROR: { struct aws_endpoints_resolved_endpoint *error = s_endpoints_resolved_endpoint_new(engine->allocator); error->type = AWS_ENDPOINTS_RESOLVED_ERROR; struct aws_endpoints_value val; if (s_resolve_expr(engine->allocator, &rule->rule_data.error.error, &scope, &val) || val.type != AWS_ENDPOINTS_VALUE_STRING || aws_byte_buf_init_copy_from_cursor( &error->r.error, engine->allocator, val.v.owning_cursor_string.cur)) { aws_endpoints_value_clean_up(&val); AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve templated url."); result = AWS_OP_ERR; goto on_done; } aws_endpoints_value_clean_up(&val); *out_resolved_endpoint = error; goto on_done; } case AWS_ENDPOINTS_RULE_TREE: { /* jumping down a level */ aws_array_list_clear(&scope.added_keys); scope.rule_idx = 0; scope.rules = &rule->rule_data.tree.rules; continue; } default: { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Unexpected rule type."); result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED); goto on_done; } } } AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "All rules have been exhausted."); result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RULESET_EXHAUSTED); on_done: AWS_LOGF_DEBUG(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Resolved endpoint with status %d", result); s_scope_clean_up(&scope); return result; } aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/source/endpoints_ruleset.c000066400000000000000000001047501456575232400263130ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include /* parameter types */ static struct aws_byte_cursor s_string_type_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("string"); static struct aws_byte_cursor s_boolean_type_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("boolean"); /* rule types */ static struct aws_byte_cursor s_endpoint_type_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("endpoint"); static struct aws_byte_cursor s_error_type_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("error"); static struct aws_byte_cursor s_tree_type_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("tree"); static struct aws_byte_cursor s_supported_version = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("1.0"); static struct aws_byte_cursor s_empty_cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(""); /* TODO: improve error messages. Include json line num? or dump json node? */ struct aws_byte_cursor aws_endpoints_get_supported_ruleset_version(void) { return s_supported_version; } /* ****************************** * Parameter Getters. ****************************** */ enum aws_endpoints_parameter_type aws_endpoints_parameter_get_type(const struct aws_endpoints_parameter *parameter) { AWS_PRECONDITION(parameter); return parameter->type; } struct aws_byte_cursor aws_endpoints_parameter_get_built_in(const struct aws_endpoints_parameter *parameter) { AWS_PRECONDITION(parameter); return parameter->built_in; } int aws_endpoints_parameter_get_default_string( const struct aws_endpoints_parameter *parameter, struct aws_byte_cursor *out_cursor) { AWS_PRECONDITION(parameter); AWS_PRECONDITION(out_cursor); if (parameter->type == AWS_ENDPOINTS_PARAMETER_STRING) { *out_cursor = parameter->default_value.string; return AWS_OP_SUCCESS; }; *out_cursor = s_empty_cursor; return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } int aws_endpoints_parameter_get_default_boolean( const struct aws_endpoints_parameter *parameter, const bool **out_bool) { AWS_PRECONDITION(parameter); AWS_PRECONDITION(out_bool); if (parameter->type == AWS_ENDPOINTS_PARAMETER_BOOLEAN) { *out_bool = ¶meter->default_value.boolean; return AWS_OP_SUCCESS; }; *out_bool = NULL; return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } bool aws_endpoints_parameters_get_is_required(const struct aws_endpoints_parameter *parameter) { AWS_PRECONDITION(parameter); return parameter->is_required; } struct aws_byte_cursor aws_endpoints_parameter_get_documentation(const struct aws_endpoints_parameter *parameter) { AWS_PRECONDITION(parameter); return parameter->documentation; } bool aws_endpoints_parameters_get_is_deprecated(const struct aws_endpoints_parameter *parameter) { AWS_PRECONDITION(parameter); return parameter->is_deprecated; } struct aws_byte_cursor aws_endpoints_parameter_get_deprecated_message(const struct aws_endpoints_parameter *parameter) { AWS_PRECONDITION(parameter); return parameter->deprecated_message; } struct aws_byte_cursor aws_endpoints_parameter_get_deprecated_since(const struct aws_endpoints_parameter *parameter) { AWS_PRECONDITION(parameter); return parameter->deprecated_since; } /* ****************************** * Parser getters. ****************************** */ const struct aws_hash_table *aws_endpoints_ruleset_get_parameters(struct aws_endpoints_ruleset *ruleset) { AWS_PRECONDITION(ruleset); return &ruleset->parameters; } struct aws_byte_cursor aws_endpoints_ruleset_get_version(const struct aws_endpoints_ruleset *ruleset) { AWS_PRECONDITION(ruleset); return ruleset->version; } struct aws_byte_cursor aws_endpoints_ruleset_get_service_id(const struct aws_endpoints_ruleset *ruleset) { AWS_PRECONDITION(ruleset); return ruleset->service_id; } /* ****************************** * Parser helpers. ****************************** */ static void s_on_rule_array_element_clean_up(void *element) { struct aws_endpoints_rule *rule = element; aws_endpoints_rule_clean_up(rule); } static void s_on_expr_element_clean_up(void *data) { struct aws_endpoints_expr *expr = data; aws_endpoints_expr_clean_up(expr); } static void s_callback_endpoints_parameter_destroy(void *data) { struct aws_endpoints_parameter *parameter = data; aws_endpoints_parameter_destroy(parameter); } static void s_callback_headers_destroy(void *data) { struct aws_array_list *array = data; struct aws_allocator *alloc = array->alloc; aws_array_list_deep_clean_up(array, s_on_expr_element_clean_up); aws_array_list_clean_up(array); aws_mem_release(alloc, array); } struct array_parser_wrapper { struct aws_allocator *allocator; struct aws_array_list *array; }; static int s_init_array_from_json( struct aws_allocator *allocator, const struct aws_json_value *value_node, struct aws_array_list *values, aws_json_on_value_encountered_const_fn *value_fn) { AWS_PRECONDITION(allocator); AWS_PRECONDITION(values); AWS_PRECONDITION(value_node); AWS_PRECONDITION(value_fn); struct array_parser_wrapper wrapper = { .allocator = allocator, .array = values, }; if (aws_json_const_iterate_array(value_node, value_fn, &wrapper)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to iterate through array."); return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED); } return AWS_OP_SUCCESS; } struct member_parser_wrapper { struct aws_allocator *allocator; struct aws_hash_table *table; }; static int s_init_members_from_json( struct aws_allocator *allocator, struct aws_json_value *node, struct aws_hash_table *table, aws_json_on_member_encountered_const_fn *member_fn) { AWS_PRECONDITION(allocator); AWS_PRECONDITION(node); AWS_PRECONDITION(table); struct member_parser_wrapper wrapper = { .allocator = allocator, .table = table, }; if (aws_json_const_iterate_object(node, member_fn, &wrapper)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to iterate through member fields."); return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED); } return AWS_OP_SUCCESS; } /* ****************************** * Parser functions. ****************************** */ static int s_parse_function( struct aws_allocator *allocator, const struct aws_json_value *node, struct aws_endpoints_function *function); /* * Note: this function only fails in cases where node is a ref (ie object with a * ref field), but cannot be parsed completely. */ static int s_try_parse_reference(const struct aws_json_value *node, struct aws_byte_cursor *out_reference) { AWS_PRECONDITION(node); AWS_ZERO_STRUCT(*out_reference); struct aws_json_value *ref_node = aws_json_value_get_from_object(node, aws_byte_cursor_from_c_str("ref")); if (ref_node != NULL && aws_json_value_get_string(ref_node, out_reference)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to parse ref."); AWS_ZERO_STRUCT(*out_reference); return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED); } return AWS_OP_SUCCESS; } static int s_parse_expr( struct aws_allocator *allocator, const struct aws_json_value *node, struct aws_endpoints_expr *expr); static int s_on_expr_element( size_t idx, const struct aws_json_value *value_node, bool *out_should_continue, void *user_data) { (void)idx; (void)out_should_continue; AWS_PRECONDITION(value_node); AWS_PRECONDITION(user_data); struct array_parser_wrapper *wrapper = user_data; struct aws_endpoints_expr expr; if (s_parse_expr(wrapper->allocator, value_node, &expr)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to parse expr."); return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED); } aws_array_list_push_back(wrapper->array, &expr); return AWS_OP_SUCCESS; } static int s_parse_expr( struct aws_allocator *allocator, const struct aws_json_value *node, struct aws_endpoints_expr *expr) { AWS_PRECONDITION(allocator); AWS_PRECONDITION(node); AWS_PRECONDITION(expr); AWS_ZERO_STRUCT(*expr); /* TODO: this recurses. in practical circumstances depth will never be high, but we should still consider doing iterative approach */ if (aws_json_value_is_string(node) && !aws_json_value_get_string(node, &expr->e.string)) { expr->type = AWS_ENDPOINTS_EXPR_STRING; return AWS_OP_SUCCESS; } else if (aws_json_value_is_number(node) && !aws_json_value_get_number(node, &expr->e.number)) { expr->type = AWS_ENDPOINTS_EXPR_NUMBER; return AWS_OP_SUCCESS; } else if (aws_json_value_is_boolean(node) && !aws_json_value_get_boolean(node, &expr->e.boolean)) { expr->type = AWS_ENDPOINTS_EXPR_BOOLEAN; return AWS_OP_SUCCESS; } else if (aws_json_value_is_array(node)) { expr->type = AWS_ENDPOINTS_EXPR_ARRAY; size_t num_elements = aws_json_get_array_size(node); aws_array_list_init_dynamic(&expr->e.array, allocator, num_elements, sizeof(struct aws_endpoints_expr)); if (s_init_array_from_json(allocator, node, &expr->e.array, s_on_expr_element)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to parse array value type."); goto on_error; } return AWS_OP_SUCCESS; } struct aws_byte_cursor reference; if (s_try_parse_reference(node, &reference)) { goto on_error; } if (reference.len > 0) { expr->type = AWS_ENDPOINTS_EXPR_REFERENCE; expr->e.reference = reference; return AWS_OP_SUCCESS; } expr->type = AWS_ENDPOINTS_EXPR_FUNCTION; if (s_parse_function(allocator, node, &expr->e.function)) { goto on_error; } return AWS_OP_SUCCESS; on_error: aws_endpoints_expr_clean_up(expr); AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to parse expr type"); return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED); } static int s_parse_function( struct aws_allocator *allocator, const struct aws_json_value *node, struct aws_endpoints_function *function) { AWS_PRECONDITION(allocator); AWS_PRECONDITION(node); AWS_ZERO_STRUCT(*function); struct aws_json_value *fn_node = aws_json_value_get_from_object(node, aws_byte_cursor_from_c_str("fn")); if (fn_node == NULL) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Node is not a function."); goto on_error; } struct aws_byte_cursor fn_cur; if (aws_json_value_get_string(fn_node, &fn_cur)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to extract fn name."); goto on_error; } function->fn = AWS_ENDPOINTS_FN_LAST; uint64_t hash = aws_hash_byte_cursor_ptr(&fn_cur); for (int idx = AWS_ENDPOINTS_FN_FIRST; idx < AWS_ENDPOINTS_FN_LAST; ++idx) { if (aws_endpoints_fn_name_hash[idx] == hash) { function->fn = idx; break; } } if (function->fn == AWS_ENDPOINTS_FN_LAST) { AWS_LOGF_ERROR( AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Could not map function name to function type: " PRInSTR, AWS_BYTE_CURSOR_PRI(fn_cur)); goto on_error; } struct aws_json_value *argv_node = aws_json_value_get_from_object(node, aws_byte_cursor_from_c_str("argv")); if (argv_node == NULL || !aws_json_value_is_array(argv_node)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "No argv or unexpected type."); goto on_error; } size_t num_args = aws_json_get_array_size(argv_node); aws_array_list_init_dynamic(&function->argv, allocator, num_args, sizeof(struct aws_endpoints_expr)); if (s_init_array_from_json(allocator, argv_node, &function->argv, s_on_expr_element)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to parse argv."); goto on_error; } return AWS_OP_SUCCESS; on_error: aws_endpoints_function_clean_up(function); return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED); } static int s_on_parameter_key( const struct aws_byte_cursor *key, const struct aws_json_value *value, bool *out_should_continue, void *user_data) { (void)out_should_continue; AWS_PRECONDITION(key); AWS_PRECONDITION(value); AWS_PRECONDITION(user_data); struct member_parser_wrapper *wrapper = user_data; struct aws_endpoints_parameter *parameter = aws_endpoints_parameter_new(wrapper->allocator, *key); /* required fields */ struct aws_byte_cursor type_cur; struct aws_json_value *type_node = aws_json_value_get_from_object(value, aws_byte_cursor_from_c_str("type")); if (type_node == NULL || aws_json_value_get_string(type_node, &type_cur)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to extract parameter type."); goto on_error; } enum aws_endpoints_parameter_type type; if (aws_byte_cursor_eq_ignore_case(&type_cur, &s_string_type_cur)) { type = AWS_ENDPOINTS_PARAMETER_STRING; } else if (aws_byte_cursor_eq_ignore_case(&type_cur, &s_boolean_type_cur)) { type = AWS_ENDPOINTS_PARAMETER_BOOLEAN; } else { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Unexpected type for parameter."); goto on_error; } parameter->type = type; struct aws_json_value *documentation_node = aws_json_value_get_from_object(value, aws_byte_cursor_from_c_str("documentation")); /* TODO: spec calls for documentation to be required, but several test-cases are missing docs on parameters */ if (documentation_node != NULL) { if (aws_json_value_get_string(documentation_node, ¶meter->documentation)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to extract parameter documentation."); goto on_error; } } /* optional fields */ struct aws_json_value *built_in_node = aws_json_value_get_from_object(value, aws_byte_cursor_from_c_str("builtIn")); if (built_in_node != NULL) { if (aws_json_value_get_string(built_in_node, ¶meter->built_in)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Unexpected type for built-in parameter field."); goto on_error; } } struct aws_json_value *required_node = aws_json_value_get_from_object(value, aws_byte_cursor_from_c_str("required")); if (required_node != NULL) { if (!aws_json_value_is_boolean(required_node)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Unexpected type for required parameter field."); goto on_error; } aws_json_value_get_boolean(required_node, ¶meter->is_required); } struct aws_json_value *default_node = aws_json_value_get_from_object(value, aws_byte_cursor_from_c_str("default")); parameter->has_default_value = default_node != NULL; if (default_node != NULL) { if (type == AWS_ENDPOINTS_PARAMETER_STRING && aws_json_value_get_string(default_node, ¶meter->default_value.string)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Unexpected type for default parameter value."); goto on_error; } else if ( type == AWS_ENDPOINTS_PARAMETER_BOOLEAN && aws_json_value_get_boolean(default_node, ¶meter->default_value.boolean)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Unexpected type for default parameter value."); goto on_error; } } struct aws_json_value *deprecated_node = aws_json_value_get_from_object(value, aws_byte_cursor_from_c_str("deprecated")); if (deprecated_node != NULL) { struct aws_json_value *deprecated_message_node = aws_json_value_get_from_object(deprecated_node, aws_byte_cursor_from_c_str("message")); if (deprecated_message_node != NULL && aws_json_value_get_string(deprecated_message_node, ¶meter->deprecated_message)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Unexpected value for deprecated message."); goto on_error; } struct aws_json_value *deprecated_since_node = aws_json_value_get_from_object(deprecated_node, aws_byte_cursor_from_c_str("since")); if (deprecated_since_node != NULL && aws_json_value_get_string(deprecated_since_node, ¶meter->deprecated_since)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Unexpected value for deprecated since."); goto on_error; } } if (aws_hash_table_put(wrapper->table, ¶meter->name, parameter, NULL)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to add parameter."); goto on_error; } return AWS_OP_SUCCESS; on_error: aws_endpoints_parameter_destroy(parameter); return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED); } static int s_on_condition_element( size_t idx, const struct aws_json_value *condition_node, bool *out_should_continue, void *user_data) { (void)idx; (void)out_should_continue; AWS_PRECONDITION(condition_node); AWS_PRECONDITION(user_data); struct array_parser_wrapper *wrapper = user_data; struct aws_endpoints_condition condition; AWS_ZERO_STRUCT(condition); condition.expr.type = AWS_ENDPOINTS_EXPR_FUNCTION; if (s_parse_function(wrapper->allocator, condition_node, &condition.expr.e.function)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to parse function."); goto on_error; } struct aws_json_value *assign_node = aws_json_value_get_from_object(condition_node, aws_byte_cursor_from_c_str("assign")); if (assign_node != NULL && aws_json_value_get_string(assign_node, &condition.assign)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Unexpected value for assign."); goto on_error; } aws_array_list_push_back(wrapper->array, &condition); return AWS_OP_SUCCESS; on_error: aws_endpoints_condition_clean_up(&condition); return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED); } static int s_on_header_element( size_t idx, const struct aws_json_value *value, bool *out_should_continue, void *user_data) { (void)idx; (void)out_should_continue; AWS_PRECONDITION(value); AWS_PRECONDITION(user_data); struct array_parser_wrapper *wrapper = user_data; struct aws_endpoints_expr expr; if (s_parse_expr(wrapper->allocator, value, &expr)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Unexpected format for header element."); return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED); } aws_array_list_push_back(wrapper->array, &expr); return AWS_OP_SUCCESS; } static int s_on_headers_key( const struct aws_byte_cursor *key, const struct aws_json_value *value, bool *out_should_continue, void *user_data) { (void)out_should_continue; AWS_PRECONDITION(key); AWS_PRECONDITION(value); AWS_PRECONDITION(user_data); struct member_parser_wrapper *wrapper = user_data; if (!aws_json_value_is_array(value)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Unexpected format for header value."); return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED); } size_t num_elements = aws_json_get_array_size(value); struct aws_array_list *headers = aws_mem_calloc(wrapper->allocator, 1, sizeof(struct aws_array_list)); aws_array_list_init_dynamic(headers, wrapper->allocator, num_elements, sizeof(struct aws_endpoints_expr)); if (s_init_array_from_json(wrapper->allocator, value, headers, s_on_header_element)) { goto on_error; } aws_hash_table_put(wrapper->table, aws_string_new_from_cursor(wrapper->allocator, key), headers, NULL); return AWS_OP_SUCCESS; on_error: if (headers) { s_callback_headers_destroy(headers); } return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED); } static int s_parse_endpoints_rule_data_endpoint( struct aws_allocator *allocator, const struct aws_json_value *rule_node, struct aws_endpoints_rule_data_endpoint *data_rule) { AWS_PRECONDITION(allocator); AWS_PRECONDITION(rule_node); AWS_PRECONDITION(data_rule); data_rule->allocator = allocator; struct aws_json_value *url_node = aws_json_value_get_from_object(rule_node, aws_byte_cursor_from_c_str("url")); if (url_node == NULL || aws_json_value_is_string(url_node)) { data_rule->url.type = AWS_ENDPOINTS_EXPR_STRING; aws_json_value_get_string(url_node, &data_rule->url.e.string); } else { struct aws_byte_cursor reference; if (s_try_parse_reference(url_node, &reference)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to parse reference."); goto on_error; } if (reference.len > 0) { data_rule->url.type = AWS_ENDPOINTS_EXPR_REFERENCE; data_rule->url.e.reference = reference; } else { data_rule->url.type = AWS_ENDPOINTS_EXPR_FUNCTION; if (s_parse_function(allocator, url_node, &data_rule->url.e.function)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to function."); goto on_error; } } } struct aws_json_value *properties_node = aws_json_value_get_from_object(rule_node, aws_byte_cursor_from_c_str("properties")); if (properties_node != NULL) { aws_byte_buf_init(&data_rule->properties, allocator, 0); if (aws_byte_buf_append_json_string(properties_node, &data_rule->properties)) { aws_byte_buf_clean_up(&data_rule->properties); AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to extract properties."); goto on_error; } } /* TODO: this is currently aws_string* to aws_array_list* * We cannot use same trick as for params to use aws_byte_cursor as key, * since value is a generic type. We can wrap list into a struct, but * seems ugly. Anything cleaner? */ aws_hash_table_init( &data_rule->headers, allocator, 20, aws_hash_string, aws_hash_callback_string_eq, aws_hash_callback_string_destroy, s_callback_headers_destroy); struct aws_json_value *headers_node = aws_json_value_get_from_object(rule_node, aws_byte_cursor_from_c_str("headers")); if (headers_node != NULL) { if (s_init_members_from_json(allocator, headers_node, &data_rule->headers, s_on_headers_key)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to extract parameters."); goto on_error; } } return AWS_OP_SUCCESS; on_error: aws_endpoints_rule_data_endpoint_clean_up(data_rule); return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED); } static int s_parse_endpoints_rule_data_error( struct aws_allocator *allocator, const struct aws_json_value *error_node, struct aws_endpoints_rule_data_error *data_rule) { AWS_PRECONDITION(allocator); AWS_PRECONDITION(error_node); AWS_PRECONDITION(data_rule); if (aws_json_value_is_string(error_node)) { data_rule->error.type = AWS_ENDPOINTS_EXPR_STRING; aws_json_value_get_string(error_node, &data_rule->error.e.string); return AWS_OP_SUCCESS; } struct aws_byte_cursor reference; if (s_try_parse_reference(error_node, &reference)) { goto on_error; } if (reference.len > 0) { data_rule->error.type = AWS_ENDPOINTS_EXPR_REFERENCE; data_rule->error.e.reference = reference; return AWS_OP_SUCCESS; } data_rule->error.type = AWS_ENDPOINTS_EXPR_FUNCTION; if (s_parse_function(allocator, error_node, &data_rule->error.e.function)) { goto on_error; } return AWS_OP_SUCCESS; on_error: aws_endpoints_rule_data_error_clean_up(data_rule); AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to parse error rule."); return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED); } static int s_on_rule_element( size_t idx, const struct aws_json_value *value, bool *out_should_continue, void *user_data); static int s_parse_endpoints_rule_data_tree( struct aws_allocator *allocator, const struct aws_json_value *rule_node, struct aws_endpoints_rule_data_tree *rule_data) { AWS_PRECONDITION(allocator); AWS_PRECONDITION(rule_node); AWS_PRECONDITION(rule_data); struct aws_json_value *rules_node = aws_json_value_get_from_object(rule_node, aws_byte_cursor_from_c_str("rules")); if (rules_node == NULL || !aws_json_value_is_array(rules_node)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Rules node is missing or unexpected type."); return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED); } size_t num_rules = aws_json_get_array_size(rules_node); aws_array_list_init_dynamic(&rule_data->rules, allocator, num_rules, sizeof(struct aws_endpoints_rule)); if (s_init_array_from_json(allocator, rules_node, &rule_data->rules, s_on_rule_element)) { aws_endpoints_rule_data_tree_clean_up(rule_data); AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to parse rules."); return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED); } return AWS_OP_SUCCESS; } static int s_on_rule_element( size_t idx, const struct aws_json_value *value, bool *out_should_continue, void *user_data) { (void)idx; (void)out_should_continue; AWS_PRECONDITION(value); AWS_PRECONDITION(user_data); struct array_parser_wrapper *wrapper = user_data; /* Required fields */ struct aws_byte_cursor type_cur; struct aws_json_value *type_node = aws_json_value_get_from_object(value, aws_byte_cursor_from_c_str("type")); if (type_node == NULL || aws_json_value_get_string(type_node, &type_cur)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to extract rule type."); return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED); } enum aws_endpoints_rule_type type; if (aws_byte_cursor_eq_ignore_case(&type_cur, &s_endpoint_type_cur)) { type = AWS_ENDPOINTS_RULE_ENDPOINT; } else if (aws_byte_cursor_eq_ignore_case(&type_cur, &s_error_type_cur)) { type = AWS_ENDPOINTS_RULE_ERROR; } else if (aws_byte_cursor_eq_ignore_case(&type_cur, &s_tree_type_cur)) { type = AWS_ENDPOINTS_RULE_TREE; } else { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Unexpected rule type."); return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED); } struct aws_endpoints_rule rule; AWS_ZERO_STRUCT(rule); rule.type = type; struct aws_json_value *conditions_node = aws_json_value_get_from_object(value, aws_byte_cursor_from_c_str("conditions")); if (conditions_node == NULL || !aws_json_value_is_array(conditions_node)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Conditions node missing."); goto on_error; } size_t num_conditions = aws_json_get_array_size(conditions_node); aws_array_list_init_dynamic( &rule.conditions, wrapper->allocator, num_conditions, sizeof(struct aws_endpoints_condition)); if (s_init_array_from_json(wrapper->allocator, conditions_node, &rule.conditions, s_on_condition_element)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to extract conditions."); goto on_error; } switch (type) { case AWS_ENDPOINTS_RULE_ENDPOINT: { struct aws_json_value *endpoint_node = aws_json_value_get_from_object(value, aws_byte_cursor_from_c_str("endpoint")); if (endpoint_node == NULL || s_parse_endpoints_rule_data_endpoint(wrapper->allocator, endpoint_node, &rule.rule_data.endpoint)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to extract endpoint rule data."); goto on_error; } break; } case AWS_ENDPOINTS_RULE_ERROR: { struct aws_json_value *error_node = aws_json_value_get_from_object(value, aws_byte_cursor_from_c_str("error")); if (error_node == NULL || s_parse_endpoints_rule_data_error(wrapper->allocator, error_node, &rule.rule_data.error)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to extract error rule data."); goto on_error; } break; } case AWS_ENDPOINTS_RULE_TREE: { if (s_parse_endpoints_rule_data_tree(wrapper->allocator, value, &rule.rule_data.tree)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to extract tree rule data."); goto on_error; } break; } default: AWS_FATAL_ASSERT(false); } /* Optional fields */ struct aws_json_value *documentation_node = aws_json_value_get_from_object(value, aws_byte_cursor_from_c_str("documentation")); if (documentation_node != NULL) { if (aws_json_value_get_string(documentation_node, &rule.documentation)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to extract parameter documentation."); goto on_error; } } aws_array_list_push_back(wrapper->array, &rule); return AWS_OP_SUCCESS; on_error: aws_endpoints_rule_clean_up(&rule); return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED); } static int s_init_ruleset_from_json( struct aws_allocator *allocator, struct aws_endpoints_ruleset *ruleset, struct aws_byte_cursor json) { AWS_PRECONDITION(allocator); AWS_PRECONDITION(ruleset); AWS_PRECONDITION(aws_byte_cursor_is_valid(&json)); struct aws_json_value *root = aws_json_value_new_from_string(allocator, json); if (root == NULL) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to parse provided string as json."); return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED); } ruleset->json_root = root; struct aws_json_value *version_node = aws_json_value_get_from_object(root, aws_byte_cursor_from_c_str("version")); if (version_node == NULL || aws_json_value_get_string(version_node, &ruleset->version)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to extract version."); aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_UNSUPPORTED_RULESET); goto on_error; } #ifdef ENDPOINTS_VERSION_CHECK /* TODO: samples are currently inconsistent with versions. skip check for now */ if (!aws_byte_cursor_eq_c_str(&ruleset->version, &s_supported_version)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Unsupported ruleset version."); aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_UNSUPPORTED_RULESET); goto on_error; } #endif struct aws_json_value *service_id_node = aws_json_value_get_from_object(root, aws_byte_cursor_from_c_str("serviceId")); if (service_id_node != NULL && aws_json_value_get_string(service_id_node, &ruleset->service_id)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to extract serviceId."); aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_UNSUPPORTED_RULESET); goto on_error; } aws_hash_table_init( &ruleset->parameters, allocator, 20, aws_hash_byte_cursor_ptr, aws_endpoints_byte_cursor_eq, NULL, s_callback_endpoints_parameter_destroy); struct aws_json_value *parameters_node = aws_json_value_get_from_object(root, aws_byte_cursor_from_c_str("parameters")); if (parameters_node == NULL || s_init_members_from_json(allocator, parameters_node, &ruleset->parameters, s_on_parameter_key)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to extract parameters."); aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED); goto on_error; } struct aws_json_value *rules_node = aws_json_value_get_from_object(root, aws_byte_cursor_from_c_str("rules")); if (rules_node == NULL || !aws_json_value_is_array(rules_node)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Unexpected type for rules node."); aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED); goto on_error; } size_t num_rules = aws_json_get_array_size(rules_node); aws_array_list_init_dynamic(&ruleset->rules, allocator, num_rules, sizeof(struct aws_endpoints_rule)); if (s_init_array_from_json(allocator, rules_node, &ruleset->rules, s_on_rule_element)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to extract rules."); aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED); goto on_error; } return AWS_OP_SUCCESS; on_error: return AWS_OP_ERR; } static void s_endpoints_ruleset_destroy(void *data) { if (data == NULL) { return; } struct aws_endpoints_ruleset *ruleset = data; aws_json_value_destroy(ruleset->json_root); aws_hash_table_clean_up(&ruleset->parameters); aws_array_list_deep_clean_up(&ruleset->rules, s_on_rule_array_element_clean_up); aws_mem_release(ruleset->allocator, ruleset); } struct aws_endpoints_ruleset *aws_endpoints_ruleset_new_from_string( struct aws_allocator *allocator, struct aws_byte_cursor ruleset_json) { AWS_PRECONDITION(allocator); AWS_PRECONDITION(aws_byte_cursor_is_valid(&ruleset_json)); struct aws_endpoints_ruleset *ruleset = aws_mem_calloc(allocator, 1, sizeof(struct aws_endpoints_ruleset)); ruleset->allocator = allocator; if (s_init_ruleset_from_json(allocator, ruleset, ruleset_json)) { s_endpoints_ruleset_destroy(ruleset); return NULL; } aws_ref_count_init(&ruleset->ref_count, ruleset, s_endpoints_ruleset_destroy); return ruleset; } struct aws_endpoints_ruleset *aws_endpoints_ruleset_acquire(struct aws_endpoints_ruleset *ruleset) { AWS_PRECONDITION(ruleset); if (ruleset) { aws_ref_count_acquire(&ruleset->ref_count); } return ruleset; } struct aws_endpoints_ruleset *aws_endpoints_ruleset_release(struct aws_endpoints_ruleset *ruleset) { if (ruleset) { aws_ref_count_release(&ruleset->ref_count); } return NULL; } aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/source/endpoints_standard_lib.c000066400000000000000000000646421456575232400272630ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include static struct aws_byte_cursor s_scheme_http = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("http"); static struct aws_byte_cursor s_scheme_https = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("https"); static int s_resolve_fn_is_set( struct aws_allocator *allocator, struct aws_array_list *argv, struct aws_endpoints_resolution_scope *scope, struct aws_endpoints_value *out_value) { int result = AWS_OP_SUCCESS; struct aws_endpoints_value argv_value = {0}; if (aws_array_list_length(argv) != 1 || aws_endpoints_argv_expect(allocator, scope, argv, 0, AWS_ENDPOINTS_VALUE_ANY, &argv_value)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve args for isSet."); result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED); goto on_done; } out_value->type = AWS_ENDPOINTS_VALUE_BOOLEAN; out_value->v.boolean = argv_value.type != AWS_ENDPOINTS_VALUE_NONE; on_done: aws_endpoints_value_clean_up(&argv_value); return result; } static int s_resolve_fn_not( struct aws_allocator *allocator, struct aws_array_list *argv, struct aws_endpoints_resolution_scope *scope, struct aws_endpoints_value *out_value) { int result = AWS_OP_SUCCESS; struct aws_endpoints_value argv_value = {0}; if (aws_array_list_length(argv) != 1 || aws_endpoints_argv_expect(allocator, scope, argv, 0, AWS_ENDPOINTS_VALUE_BOOLEAN, &argv_value)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve args for not."); result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED); goto on_done; } out_value->type = AWS_ENDPOINTS_VALUE_BOOLEAN; out_value->v.boolean = !argv_value.v.boolean; on_done: aws_endpoints_value_clean_up(&argv_value); return result; } static int s_resolve_fn_get_attr( struct aws_allocator *allocator, struct aws_array_list *argv, struct aws_endpoints_resolution_scope *scope, struct aws_endpoints_value *out_value) { int result = AWS_OP_SUCCESS; struct aws_endpoints_value argv_value = {0}; struct aws_endpoints_value argv_path = {0}; if (aws_array_list_length(argv) != 2 || aws_endpoints_argv_expect(allocator, scope, argv, 0, AWS_ENDPOINTS_VALUE_ANY, &argv_value) || aws_endpoints_argv_expect(allocator, scope, argv, 1, AWS_ENDPOINTS_VALUE_STRING, &argv_path)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve args for get attr."); result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED); goto on_done; } struct aws_byte_cursor path_cur = argv_path.v.owning_cursor_string.cur; if (argv_value.type == AWS_ENDPOINTS_VALUE_OBJECT) { if (aws_endpoints_path_through_object(allocator, &argv_value, path_cur, out_value)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to path through object."); result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED); goto on_done; } } else if (argv_value.type == AWS_ENDPOINTS_VALUE_ARRAY) { if (aws_endpoints_path_through_array(allocator, scope, &argv_value, path_cur, out_value)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to path through array."); result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED); goto on_done; } } else { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Invalid value type for pathing through."); result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED); goto on_done; } on_done: aws_endpoints_value_clean_up(&argv_value); aws_endpoints_value_clean_up(&argv_path); return result; } static int s_resolve_fn_substring( struct aws_allocator *allocator, struct aws_array_list *argv, struct aws_endpoints_resolution_scope *scope, struct aws_endpoints_value *out_value) { int result = AWS_OP_SUCCESS; struct aws_endpoints_value input_value = {0}; struct aws_endpoints_value start_value = {0}; struct aws_endpoints_value stop_value = {0}; struct aws_endpoints_value reverse_value = {0}; if (aws_array_list_length(argv) != 4 || aws_endpoints_argv_expect(allocator, scope, argv, 0, AWS_ENDPOINTS_VALUE_STRING, &input_value) || aws_endpoints_argv_expect(allocator, scope, argv, 1, AWS_ENDPOINTS_VALUE_NUMBER, &start_value) || aws_endpoints_argv_expect(allocator, scope, argv, 2, AWS_ENDPOINTS_VALUE_NUMBER, &stop_value) || aws_endpoints_argv_expect(allocator, scope, argv, 3, AWS_ENDPOINTS_VALUE_BOOLEAN, &reverse_value)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve args for substring."); result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED); goto on_done; } if (start_value.v.number >= stop_value.v.number || input_value.v.owning_cursor_string.cur.len < stop_value.v.number) { out_value->type = AWS_ENDPOINTS_VALUE_NONE; goto on_done; } for (size_t idx = 0; idx < input_value.v.owning_cursor_string.cur.len; ++idx) { if (input_value.v.owning_cursor_string.cur.ptr[idx] > 127) { out_value->type = AWS_ENDPOINTS_VALUE_NONE; goto on_done; } } if (!reverse_value.v.boolean) { size_t start = (size_t)start_value.v.number; size_t end = (size_t)stop_value.v.number; struct aws_byte_cursor substring = { .ptr = input_value.v.owning_cursor_string.cur.ptr + start, .len = end - start, }; out_value->type = AWS_ENDPOINTS_VALUE_STRING; out_value->v.owning_cursor_string = aws_endpoints_owning_cursor_from_cursor(allocator, substring); } else { size_t r_start = input_value.v.owning_cursor_string.cur.len - (size_t)stop_value.v.number; size_t r_stop = input_value.v.owning_cursor_string.cur.len - (size_t)start_value.v.number; struct aws_byte_cursor substring = { .ptr = input_value.v.owning_cursor_string.cur.ptr + r_start, .len = r_stop - r_start, }; out_value->type = AWS_ENDPOINTS_VALUE_STRING; out_value->v.owning_cursor_string = aws_endpoints_owning_cursor_from_cursor(allocator, substring); } on_done: aws_endpoints_value_clean_up(&input_value); aws_endpoints_value_clean_up(&start_value); aws_endpoints_value_clean_up(&stop_value); aws_endpoints_value_clean_up(&reverse_value); return result; } static int s_resolve_fn_string_equals( struct aws_allocator *allocator, struct aws_array_list *argv, struct aws_endpoints_resolution_scope *scope, struct aws_endpoints_value *out_value) { int result = AWS_OP_SUCCESS; struct aws_endpoints_value argv_value_1 = {0}; struct aws_endpoints_value argv_value_2 = {0}; if (aws_array_list_length(argv) != 2 || aws_endpoints_argv_expect(allocator, scope, argv, 0, AWS_ENDPOINTS_VALUE_STRING, &argv_value_1) || aws_endpoints_argv_expect(allocator, scope, argv, 1, AWS_ENDPOINTS_VALUE_STRING, &argv_value_2)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve stringEquals."); result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED); goto on_done; } out_value->type = AWS_ENDPOINTS_VALUE_BOOLEAN; out_value->v.boolean = aws_byte_cursor_eq(&argv_value_1.v.owning_cursor_string.cur, &argv_value_2.v.owning_cursor_string.cur); on_done: aws_endpoints_value_clean_up(&argv_value_1); aws_endpoints_value_clean_up(&argv_value_2); return result; } static int s_resolve_fn_boolean_equals( struct aws_allocator *allocator, struct aws_array_list *argv, struct aws_endpoints_resolution_scope *scope, struct aws_endpoints_value *out_value) { int result = AWS_OP_SUCCESS; struct aws_endpoints_value argv_value_1 = {0}; struct aws_endpoints_value argv_value_2 = {0}; if (aws_array_list_length(argv) != 2 || aws_endpoints_argv_expect(allocator, scope, argv, 0, AWS_ENDPOINTS_VALUE_BOOLEAN, &argv_value_1) || aws_endpoints_argv_expect(allocator, scope, argv, 1, AWS_ENDPOINTS_VALUE_BOOLEAN, &argv_value_2)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve booleanEquals."); result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED); goto on_done; } out_value->type = AWS_ENDPOINTS_VALUE_BOOLEAN; out_value->v.boolean = argv_value_1.v.boolean == argv_value_2.v.boolean; on_done: aws_endpoints_value_clean_up(&argv_value_1); aws_endpoints_value_clean_up(&argv_value_2); return result; } static int s_resolve_fn_uri_encode( struct aws_allocator *allocator, struct aws_array_list *argv, struct aws_endpoints_resolution_scope *scope, struct aws_endpoints_value *out_value) { int result = AWS_OP_SUCCESS; struct aws_byte_buf buf = {0}; struct aws_endpoints_value argv_value = {0}; if (aws_array_list_length(argv) != 1 || aws_endpoints_argv_expect(allocator, scope, argv, 0, AWS_ENDPOINTS_VALUE_STRING, &argv_value)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve parameter to uri encode."); result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED); goto on_done; } if (aws_byte_buf_init(&buf, allocator, 10)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve parameter to uri encode."); result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED); goto on_done; } if (aws_byte_buf_append_encoding_uri_param(&buf, &argv_value.v.owning_cursor_string.cur)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to uri encode value."); aws_byte_buf_clean_up(&buf); result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED); goto on_done; } out_value->type = AWS_ENDPOINTS_VALUE_STRING; out_value->v.owning_cursor_string = aws_endpoints_owning_cursor_from_string(aws_string_new_from_buf(allocator, &buf)); on_done: aws_endpoints_value_clean_up(&argv_value); aws_byte_buf_clean_up(&buf); return result; } static bool s_is_uri_ip(struct aws_byte_cursor host, bool is_uri_encoded) { return aws_is_ipv4(host) || aws_is_ipv6(host, is_uri_encoded); } static int s_resolve_fn_parse_url( struct aws_allocator *allocator, struct aws_array_list *argv, struct aws_endpoints_resolution_scope *scope, struct aws_endpoints_value *out_value) { int result = AWS_OP_SUCCESS; struct aws_uri uri; struct aws_json_value *root = NULL; struct aws_endpoints_value argv_url = {0}; if (aws_array_list_length(argv) != 1 || aws_endpoints_argv_expect(allocator, scope, argv, 0, AWS_ENDPOINTS_VALUE_STRING, &argv_url)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve args for parse url."); result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED); goto on_done; } if (aws_uri_init_parse(&uri, allocator, &argv_url.v.owning_cursor_string.cur)) { out_value->type = AWS_ENDPOINTS_VALUE_NONE; /* reset error from parser, since non-uri strings should successfully resolve to none. */ aws_reset_error(); goto on_done; } if (aws_uri_query_string(&uri)->len > 0) { out_value->type = AWS_ENDPOINTS_VALUE_NONE; goto on_done; } const struct aws_byte_cursor *scheme = aws_uri_scheme(&uri); AWS_ASSERT(scheme != NULL); root = aws_json_value_new_object(allocator); if (scheme->len == 0) { out_value->type = AWS_ENDPOINTS_VALUE_NONE; goto on_done; } if (!(aws_byte_cursor_eq(scheme, &s_scheme_http) || aws_byte_cursor_eq(scheme, &s_scheme_https))) { out_value->type = AWS_ENDPOINTS_VALUE_NONE; goto on_done; } if (aws_json_value_add_to_object( root, aws_byte_cursor_from_c_str("scheme"), aws_json_value_new_string(allocator, *scheme))) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to add scheme to object."); result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED); goto on_done; } const struct aws_byte_cursor *authority = aws_uri_authority(&uri); AWS_ASSERT(authority != NULL); if (authority->len == 0) { out_value->type = AWS_ENDPOINTS_VALUE_NONE; goto on_done; } if (aws_json_value_add_to_object( root, aws_byte_cursor_from_c_str("authority"), aws_json_value_new_string(allocator, *authority))) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to add authority to object."); goto on_done; } const struct aws_byte_cursor *path = aws_uri_path(&uri); if (aws_json_value_add_to_object( root, aws_byte_cursor_from_c_str("path"), aws_json_value_new_string(allocator, *path))) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to add path to object."); result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED); goto on_done; } struct aws_byte_cursor normalized_path_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("normalizedPath"); struct aws_byte_buf normalized_path_buf; if (aws_byte_buf_init_from_normalized_uri_path(allocator, *path, &normalized_path_buf) || aws_json_value_add_to_object( root, normalized_path_cur, aws_json_value_new_string(allocator, aws_byte_cursor_from_buf(&normalized_path_buf)))) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to normalize path."); aws_byte_buf_clean_up(&normalized_path_buf); result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED); goto on_done; } aws_byte_buf_clean_up(&normalized_path_buf); const struct aws_byte_cursor *host_name = aws_uri_host_name(&uri); bool is_ip = s_is_uri_ip(*host_name, true); if (aws_json_value_add_to_object( root, aws_byte_cursor_from_c_str("isIp"), aws_json_value_new_boolean(allocator, is_ip))) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to add isIp to object."); result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED); goto on_done; } struct aws_byte_buf buf; if (aws_byte_buf_init(&buf, allocator, 0)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed init buffer for parseUrl return."); result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED); goto on_done; } if (aws_byte_buf_append_json_string(root, &buf)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to create JSON object."); aws_byte_buf_clean_up(&buf); result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED); goto on_done; } out_value->type = AWS_ENDPOINTS_VALUE_OBJECT; out_value->v.owning_cursor_object = aws_endpoints_owning_cursor_from_string(aws_string_new_from_buf(allocator, &buf)); aws_byte_buf_clean_up(&buf); on_done: aws_uri_clean_up(&uri); aws_endpoints_value_clean_up(&argv_url); aws_json_value_destroy(root); return result; } static int s_resolve_is_valid_host_label( struct aws_allocator *allocator, struct aws_array_list *argv, struct aws_endpoints_resolution_scope *scope, struct aws_endpoints_value *out_value) { struct aws_endpoints_value argv_value = {0}; struct aws_endpoints_value argv_allow_subdomains = {0}; if (aws_array_list_length(argv) != 2 || aws_endpoints_argv_expect(allocator, scope, argv, 0, AWS_ENDPOINTS_VALUE_STRING, &argv_value) || aws_endpoints_argv_expect(allocator, scope, argv, 1, AWS_ENDPOINTS_VALUE_BOOLEAN, &argv_allow_subdomains)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve not."); goto on_error; } out_value->type = AWS_ENDPOINTS_VALUE_BOOLEAN; out_value->v.boolean = aws_is_valid_host_label(argv_value.v.owning_cursor_string.cur, argv_allow_subdomains.v.boolean); aws_endpoints_value_clean_up(&argv_value); aws_endpoints_value_clean_up(&argv_allow_subdomains); return AWS_OP_SUCCESS; on_error: aws_endpoints_value_clean_up(&argv_value); aws_endpoints_value_clean_up(&argv_allow_subdomains); return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED); } static int s_resolve_fn_aws_partition( struct aws_allocator *allocator, struct aws_array_list *argv, struct aws_endpoints_resolution_scope *scope, struct aws_endpoints_value *out_value) { int result = AWS_OP_SUCCESS; struct aws_endpoints_value argv_region = {0}; if (aws_array_list_length(argv) != 1 || aws_endpoints_argv_expect(allocator, scope, argv, 0, AWS_ENDPOINTS_VALUE_STRING, &argv_region)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve arguments for partitions."); result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED); goto on_done; } struct aws_hash_element *element = NULL; struct aws_byte_cursor key = argv_region.v.owning_cursor_string.cur; if (aws_hash_table_find(&scope->partitions->region_to_partition_info, &key, &element)) { AWS_LOGF_ERROR( AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to find partition info. " PRInSTR, AWS_BYTE_CURSOR_PRI(key)); result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED); goto on_done; } if (element != NULL) { out_value->type = AWS_ENDPOINTS_VALUE_OBJECT; out_value->v.owning_cursor_object = aws_endpoints_owning_cursor_create(allocator, ((struct aws_partition_info *)element->value)->info); goto on_done; } struct aws_byte_cursor partition_cur = {0}; for (struct aws_hash_iter iter = aws_hash_iter_begin(&scope->partitions->base_partitions); !aws_hash_iter_done(&iter); aws_hash_iter_next(&iter)) { struct aws_partition_info *partition = (struct aws_partition_info *)iter.element.value; if (partition->region_regex && aws_endpoints_regex_match(partition->region_regex, key) == AWS_OP_SUCCESS) { partition_cur = partition->name; break; } } if (partition_cur.len == 0) { partition_cur = aws_byte_cursor_from_c_str("aws"); } if (aws_hash_table_find(&scope->partitions->base_partitions, &partition_cur, &element) || element == NULL) { AWS_LOGF_ERROR( AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to find partition info. " PRInSTR, AWS_BYTE_CURSOR_PRI(key)); result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED); goto on_done; } out_value->type = AWS_ENDPOINTS_VALUE_OBJECT; out_value->v.owning_cursor_object = aws_endpoints_owning_cursor_create(allocator, ((struct aws_partition_info *)element->value)->info); on_done: aws_endpoints_value_clean_up(&argv_region); return result; } static int s_resolve_fn_aws_parse_arn( struct aws_allocator *allocator, struct aws_array_list *argv, struct aws_endpoints_resolution_scope *scope, struct aws_endpoints_value *out_value) { int result = AWS_OP_SUCCESS; struct aws_json_value *object = NULL; struct aws_endpoints_value argv_value = {0}; if (aws_array_list_length(argv) != 1 || aws_endpoints_argv_expect(allocator, scope, argv, 0, AWS_ENDPOINTS_VALUE_STRING, &argv_value)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve parseArn."); result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED); goto on_done; } struct aws_resource_name arn; if (aws_resource_name_init_from_cur(&arn, &argv_value.v.owning_cursor_string.cur)) { out_value->type = AWS_ENDPOINTS_VALUE_NONE; goto on_done; } object = aws_json_value_new_object(allocator); if (object == NULL) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to init object for parseArn."); result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED); goto on_done; } if (arn.partition.len == 0 || arn.resource_id.len == 0 || arn.service.len == 0) { out_value->type = AWS_ENDPOINTS_VALUE_NONE; goto on_done; } /* Split resource id into components, either on : or / */ /* TODO: support multiple delims in existing split helper? */ struct aws_json_value *resource_id_node = aws_json_value_new_array(allocator); size_t start = 0; for (size_t i = 0; i < arn.resource_id.len; ++i) { if (arn.resource_id.ptr[i] == '/' || arn.resource_id.ptr[i] == ':') { struct aws_byte_cursor cur = { .ptr = arn.resource_id.ptr + start, .len = i - start, }; struct aws_json_value *element = aws_json_value_new_string(allocator, cur); if (element == NULL || aws_json_value_add_array_element(resource_id_node, element)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to add resource id element"); result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED); goto on_done; } start = i + 1; } } if (start <= arn.resource_id.len) { struct aws_byte_cursor cur = { .ptr = arn.resource_id.ptr + start, .len = arn.resource_id.len - start, }; struct aws_json_value *element = aws_json_value_new_string(allocator, cur); if (element == NULL || aws_json_value_add_array_element(resource_id_node, element)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to add resource id element"); result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED); goto on_done; } } if (aws_json_value_add_to_object( object, aws_byte_cursor_from_c_str("partition"), aws_json_value_new_string(allocator, arn.partition)) || aws_json_value_add_to_object( object, aws_byte_cursor_from_c_str("service"), aws_json_value_new_string(allocator, arn.service)) || aws_json_value_add_to_object( object, aws_byte_cursor_from_c_str("region"), aws_json_value_new_string(allocator, arn.region)) || aws_json_value_add_to_object( object, aws_byte_cursor_from_c_str("accountId"), aws_json_value_new_string(allocator, arn.account_id)) || aws_json_value_add_to_object(object, aws_byte_cursor_from_c_str("resourceId"), resource_id_node)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to add elements to object for parseArn."); result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED); goto on_done; } out_value->type = AWS_ENDPOINTS_VALUE_OBJECT; out_value->v.owning_cursor_object = aws_endpoints_owning_cursor_from_string(aws_string_new_from_json(allocator, object)); if (out_value->v.owning_cursor_object.cur.len == 0) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to create string from json."); result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED); goto on_done; } on_done: aws_json_value_destroy(object); aws_endpoints_value_clean_up(&argv_value); return result; } static int s_resolve_is_virtual_hostable_s3_bucket( struct aws_allocator *allocator, struct aws_array_list *argv, struct aws_endpoints_resolution_scope *scope, struct aws_endpoints_value *out_value) { int result = AWS_OP_SUCCESS; struct aws_endpoints_value argv_value = {0}; struct aws_endpoints_value argv_allow_subdomains = {0}; if (aws_array_list_length(argv) != 2 || aws_endpoints_argv_expect(allocator, scope, argv, 0, AWS_ENDPOINTS_VALUE_STRING, &argv_value) || aws_endpoints_argv_expect(allocator, scope, argv, 1, AWS_ENDPOINTS_VALUE_BOOLEAN, &argv_allow_subdomains)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve args for isVirtualHostableS3Bucket."); result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED); goto on_done; } struct aws_byte_cursor label_cur = argv_value.v.owning_cursor_string.cur; bool has_uppercase_chars = false; for (size_t i = 0; i < label_cur.len; ++i) { if (label_cur.ptr[i] >= 'A' && label_cur.ptr[i] <= 'Z') { has_uppercase_chars = true; break; } } out_value->type = AWS_ENDPOINTS_VALUE_BOOLEAN; out_value->v.boolean = (label_cur.len >= 3 && label_cur.len <= 63) && !has_uppercase_chars && aws_is_valid_host_label(label_cur, argv_allow_subdomains.v.boolean) && !aws_is_ipv4(label_cur); on_done: aws_endpoints_value_clean_up(&argv_value); aws_endpoints_value_clean_up(&argv_allow_subdomains); return result; } typedef int(standard_lib_function_fn)( struct aws_allocator *allocator, struct aws_array_list *argv, struct aws_endpoints_resolution_scope *scope, struct aws_endpoints_value *out_value); static standard_lib_function_fn *s_resolve_fn_vt[AWS_ENDPOINTS_FN_LAST] = { [AWS_ENDPOINTS_FN_IS_SET] = s_resolve_fn_is_set, [AWS_ENDPOINTS_FN_NOT] = s_resolve_fn_not, [AWS_ENDPOINTS_FN_GET_ATTR] = s_resolve_fn_get_attr, [AWS_ENDPOINTS_FN_SUBSTRING] = s_resolve_fn_substring, [AWS_ENDPOINTS_FN_STRING_EQUALS] = s_resolve_fn_string_equals, [AWS_ENDPOINTS_FN_BOOLEAN_EQUALS] = s_resolve_fn_boolean_equals, [AWS_ENDPOINTS_FN_URI_ENCODE] = s_resolve_fn_uri_encode, [AWS_ENDPOINTS_FN_PARSE_URL] = s_resolve_fn_parse_url, [AWS_ENDPOINTS_FN_IS_VALID_HOST_LABEL] = s_resolve_is_valid_host_label, [AWS_ENDPOINTS_FN_AWS_PARTITION] = s_resolve_fn_aws_partition, [AWS_ENDPOINTS_FN_AWS_PARSE_ARN] = s_resolve_fn_aws_parse_arn, [AWS_ENDPOINTS_FN_AWS_IS_VIRTUAL_HOSTABLE_S3_BUCKET] = s_resolve_is_virtual_hostable_s3_bucket, }; int aws_endpoints_dispatch_standard_lib_fn_resolve( enum aws_endpoints_fn_type type, struct aws_allocator *allocator, struct aws_array_list *argv, struct aws_endpoints_resolution_scope *scope, struct aws_endpoints_value *out_value) { return s_resolve_fn_vt[type](allocator, argv, scope, out_value); } aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/source/endpoints_types_impl.c000066400000000000000000000202571456575232400270140ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include uint64_t aws_endpoints_fn_name_hash[AWS_ENDPOINTS_FN_LAST]; void aws_endpoints_rule_engine_init(void) { aws_endpoints_fn_name_hash[AWS_ENDPOINTS_FN_IS_SET] = aws_hash_c_string("isSet"); aws_endpoints_fn_name_hash[AWS_ENDPOINTS_FN_NOT] = aws_hash_c_string("not"); aws_endpoints_fn_name_hash[AWS_ENDPOINTS_FN_GET_ATTR] = aws_hash_c_string("getAttr"); aws_endpoints_fn_name_hash[AWS_ENDPOINTS_FN_SUBSTRING] = aws_hash_c_string("substring"); aws_endpoints_fn_name_hash[AWS_ENDPOINTS_FN_STRING_EQUALS] = aws_hash_c_string("stringEquals"); aws_endpoints_fn_name_hash[AWS_ENDPOINTS_FN_BOOLEAN_EQUALS] = aws_hash_c_string("booleanEquals"); aws_endpoints_fn_name_hash[AWS_ENDPOINTS_FN_URI_ENCODE] = aws_hash_c_string("uriEncode"); aws_endpoints_fn_name_hash[AWS_ENDPOINTS_FN_PARSE_URL] = aws_hash_c_string("parseURL"); aws_endpoints_fn_name_hash[AWS_ENDPOINTS_FN_IS_VALID_HOST_LABEL] = aws_hash_c_string("isValidHostLabel"); aws_endpoints_fn_name_hash[AWS_ENDPOINTS_FN_AWS_PARTITION] = aws_hash_c_string("aws.partition"); aws_endpoints_fn_name_hash[AWS_ENDPOINTS_FN_AWS_PARSE_ARN] = aws_hash_c_string("aws.parseArn"); aws_endpoints_fn_name_hash[AWS_ENDPOINTS_FN_AWS_IS_VIRTUAL_HOSTABLE_S3_BUCKET] = aws_hash_c_string("aws.isVirtualHostableS3Bucket"); } static void s_on_condition_array_element_clean_up(void *element) { struct aws_endpoints_condition *condition = element; aws_endpoints_condition_clean_up(condition); } static void s_on_rule_array_element_clean_up(void *element) { struct aws_endpoints_rule *rule = element; aws_endpoints_rule_clean_up(rule); } static void s_on_expr_array_element_clean_up(void *element) { struct aws_endpoints_expr *expr = element; aws_endpoints_expr_clean_up(expr); } struct aws_partition_info *aws_partition_info_new(struct aws_allocator *allocator, struct aws_byte_cursor name) { AWS_PRECONDITION(allocator); struct aws_partition_info *partition_info = aws_mem_calloc(allocator, 1, sizeof(struct aws_partition_info)); partition_info->allocator = allocator; partition_info->name = name; return partition_info; } void aws_partition_info_destroy(struct aws_partition_info *partition_info) { if (partition_info == NULL) { return; } if (!partition_info->is_copy) { aws_string_destroy(partition_info->info); } aws_endpoints_regex_destroy(partition_info->region_regex); aws_mem_release(partition_info->allocator, partition_info); } struct aws_endpoints_parameter *aws_endpoints_parameter_new( struct aws_allocator *allocator, struct aws_byte_cursor name) { AWS_PRECONDITION(allocator); struct aws_endpoints_parameter *parameter = aws_mem_calloc(allocator, 1, sizeof(struct aws_endpoints_parameter)); parameter->allocator = allocator; parameter->name = name; return parameter; } void aws_endpoints_parameter_destroy(struct aws_endpoints_parameter *parameter) { if (parameter == NULL) { return; } aws_mem_release(parameter->allocator, parameter); } void aws_endpoints_rule_clean_up(struct aws_endpoints_rule *rule) { AWS_PRECONDITION(rule); aws_array_list_deep_clean_up(&rule->conditions, s_on_condition_array_element_clean_up); switch (rule->type) { case AWS_ENDPOINTS_RULE_ENDPOINT: aws_endpoints_rule_data_endpoint_clean_up(&rule->rule_data.endpoint); break; case AWS_ENDPOINTS_RULE_ERROR: aws_endpoints_rule_data_error_clean_up(&rule->rule_data.error); break; case AWS_ENDPOINTS_RULE_TREE: aws_endpoints_rule_data_tree_clean_up(&rule->rule_data.tree); break; default: AWS_FATAL_ASSERT(false); } AWS_ZERO_STRUCT(*rule); } void aws_endpoints_rule_data_endpoint_clean_up(struct aws_endpoints_rule_data_endpoint *rule_data) { AWS_PRECONDITION(rule_data); aws_endpoints_expr_clean_up(&rule_data->url); aws_byte_buf_clean_up(&rule_data->properties); aws_hash_table_clean_up(&rule_data->headers); AWS_ZERO_STRUCT(*rule_data); } void aws_endpoints_rule_data_error_clean_up(struct aws_endpoints_rule_data_error *rule_data) { AWS_PRECONDITION(rule_data); aws_endpoints_expr_clean_up(&rule_data->error); AWS_ZERO_STRUCT(*rule_data); } void aws_endpoints_rule_data_tree_clean_up(struct aws_endpoints_rule_data_tree *rule_data) { AWS_PRECONDITION(rule_data); aws_array_list_deep_clean_up(&rule_data->rules, s_on_rule_array_element_clean_up); AWS_ZERO_STRUCT(*rule_data); } void aws_endpoints_condition_clean_up(struct aws_endpoints_condition *condition) { AWS_PRECONDITION(condition); aws_endpoints_expr_clean_up(&condition->expr); AWS_ZERO_STRUCT(*condition); } void aws_endpoints_function_clean_up(struct aws_endpoints_function *function) { AWS_PRECONDITION(function); aws_array_list_deep_clean_up(&function->argv, s_on_expr_array_element_clean_up); AWS_ZERO_STRUCT(*function); } void aws_endpoints_expr_clean_up(struct aws_endpoints_expr *expr) { AWS_PRECONDITION(expr); switch (expr->type) { case AWS_ENDPOINTS_EXPR_STRING: case AWS_ENDPOINTS_EXPR_BOOLEAN: case AWS_ENDPOINTS_EXPR_NUMBER: case AWS_ENDPOINTS_EXPR_REFERENCE: break; case AWS_ENDPOINTS_EXPR_FUNCTION: aws_endpoints_function_clean_up(&expr->e.function); break; case AWS_ENDPOINTS_EXPR_ARRAY: aws_array_list_deep_clean_up(&expr->e.array, s_on_expr_array_element_clean_up); break; default: AWS_FATAL_ASSERT(false); } AWS_ZERO_STRUCT(*expr); } struct aws_endpoints_scope_value *aws_endpoints_scope_value_new( struct aws_allocator *allocator, struct aws_byte_cursor name_cur) { AWS_PRECONDITION(allocator); struct aws_endpoints_scope_value *value = aws_mem_calloc(allocator, 1, sizeof(struct aws_endpoints_scope_value)); value->allocator = allocator; value->name = aws_endpoints_owning_cursor_from_cursor(allocator, name_cur); return value; } void aws_endpoints_scope_value_destroy(struct aws_endpoints_scope_value *scope_value) { if (scope_value == NULL) { return; } aws_string_destroy(scope_value->name.string); aws_endpoints_value_clean_up(&scope_value->value); aws_mem_release(scope_value->allocator, scope_value); } void aws_endpoints_value_clean_up_cb(void *value); void aws_endpoints_value_clean_up(struct aws_endpoints_value *aws_endpoints_value) { AWS_PRECONDITION(aws_endpoints_value); if (aws_endpoints_value->type == AWS_ENDPOINTS_VALUE_STRING) { aws_string_destroy(aws_endpoints_value->v.owning_cursor_string.string); } if (aws_endpoints_value->type == AWS_ENDPOINTS_VALUE_OBJECT) { aws_string_destroy(aws_endpoints_value->v.owning_cursor_object.string); } if (aws_endpoints_value->type == AWS_ENDPOINTS_VALUE_ARRAY) { aws_array_list_deep_clean_up(&aws_endpoints_value->v.array, aws_endpoints_value_clean_up_cb); } AWS_ZERO_STRUCT(*aws_endpoints_value); } void aws_endpoints_value_clean_up_cb(void *value) { struct aws_endpoints_value *aws_endpoints_value = value; aws_endpoints_value_clean_up(aws_endpoints_value); } int aws_endpoints_deep_copy_parameter_value( struct aws_allocator *allocator, const struct aws_endpoints_value *from, struct aws_endpoints_value *to) { to->type = from->type; if (to->type == AWS_ENDPOINTS_VALUE_STRING) { to->v.owning_cursor_string = aws_endpoints_owning_cursor_create(allocator, from->v.owning_cursor_string.string); } else if (to->type == AWS_ENDPOINTS_VALUE_BOOLEAN) { to->v.boolean = from->v.boolean; } else { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Unexpected value type."); return aws_raise_error(AWS_ERROR_INVALID_STATE); } return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/source/endpoints_util.c000066400000000000000000000445451456575232400256120ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #ifdef _MSC_VER /* Disable sscanf warnings on windows. */ # pragma warning(disable : 4204) # pragma warning(disable : 4706) # pragma warning(disable : 4996) #endif /* 4 octets of 3 chars max + 3 separators + null terminator */ #define AWS_IPV4_STR_LEN 16 #define IP_CHAR_FMT "%03" SCNu16 /* arbitrary max length of a region. curent longest region name is 16 chars */ #define AWS_REGION_LEN 50 bool aws_is_ipv4(struct aws_byte_cursor host) { if (host.len > AWS_IPV4_STR_LEN - 1) { return false; } char copy[AWS_IPV4_STR_LEN] = {0}; memcpy(copy, host.ptr, host.len); uint16_t octet[4] = {0}; char remainder[2] = {0}; if (4 != sscanf( copy, IP_CHAR_FMT "." IP_CHAR_FMT "." IP_CHAR_FMT "." IP_CHAR_FMT "%1s", &octet[0], &octet[1], &octet[2], &octet[3], remainder)) { return false; } for (size_t i = 0; i < 4; ++i) { if (octet[i] > 255) { return false; } } return true; } static bool s_starts_with(struct aws_byte_cursor cur, uint8_t ch) { return cur.len > 0 && cur.ptr[0] == ch; } static bool s_ends_with(struct aws_byte_cursor cur, uint8_t ch) { return cur.len > 0 && cur.ptr[cur.len - 1] == ch; } static bool s_is_ipv6_char(uint8_t value) { return aws_isxdigit(value) || value == ':'; } /* actual encoding is %25, but % is omitted for simplicity, since split removes it */ static struct aws_byte_cursor s_percent_uri_enc = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("25"); /* * IPv6 format: * 8 groups of 4 hex chars separated by colons (:) * leading 0s in each group can be skipped * 2 or more consecutive zero groups can be replaced by double colon (::), * but only once. * ipv6 literal can be scoped by to zone by appending % followed by zone name * ( does not look like there is length reqs on zone name length. this * implementation enforces that its > 1 ) * ipv6 can be embedded in url, in which case it must be wrapped inside [] * and % be uri encoded as %25. * Implementation is fairly trivial and just iterates through the string * keeping track of the spec above. */ bool aws_is_ipv6(struct aws_byte_cursor host, bool is_uri_encoded) { if (host.len == 0) { return false; } if (is_uri_encoded) { if (!s_starts_with(host, '[') || !s_ends_with(host, ']')) { return false; } aws_byte_cursor_advance(&host, 1); --host.len; } struct aws_byte_cursor substr = {0}; /* first split is required ipv6 part */ bool is_split = aws_byte_cursor_next_split(&host, '%', &substr); AWS_ASSERT(is_split); /* function is guaranteed to return at least one split */ if (!is_split || substr.len == 0 || (s_starts_with(substr, ':') || s_ends_with(substr, ':')) || !aws_byte_cursor_satisfies_pred(&substr, s_is_ipv6_char)) { return false; } uint8_t group_count = 0; bool has_double_colon = false; struct aws_byte_cursor group = {0}; while (aws_byte_cursor_next_split(&substr, ':', &group)) { ++group_count; if (group_count > 8 || /* too many groups */ group.len > 4 || /* too many chars in group */ (has_double_colon && group.len == 0)) { /* only one double colon allowed */ return false; } has_double_colon = has_double_colon || group.len == 0; } /* second split is optional zone part */ if (aws_byte_cursor_next_split(&host, '%', &substr)) { if ((is_uri_encoded && (substr.len < 3 || !aws_byte_cursor_starts_with(&substr, &s_percent_uri_enc))) || /* encoding for % + 1 extra char */ (!is_uri_encoded && substr.len == 0) || /* at least 1 char */ !aws_byte_cursor_satisfies_pred(&substr, aws_isalnum)) { return false; } } return has_double_colon ? group_count < 7 : group_count == 8; } bool aws_is_valid_host_label(struct aws_byte_cursor label, bool allow_subdomains) { bool next_must_be_alnum = true; size_t subdomain_count = 0; for (size_t i = 0; i < label.len; ++i) { if (label.ptr[i] == '.') { if (!allow_subdomains || subdomain_count == 0) { return false; } if (!aws_isalnum(label.ptr[i - 1])) { return false; } next_must_be_alnum = true; subdomain_count = 0; continue; } if (next_must_be_alnum && !aws_isalnum(label.ptr[i])) { return false; } else if (label.ptr[i] != '-' && !aws_isalnum(label.ptr[i])) { return false; } next_must_be_alnum = false; ++subdomain_count; if (subdomain_count > 63) { return false; } } return aws_isalnum(label.ptr[label.len - 1]); } struct aws_byte_cursor s_path_slash = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/"); int aws_byte_buf_init_from_normalized_uri_path( struct aws_allocator *allocator, struct aws_byte_cursor path, struct aws_byte_buf *out_normalized_path) { /* Normalized path is just regular path that ensures that path starts and ends with slash */ if (aws_byte_buf_init(out_normalized_path, allocator, path.len + 2)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed init buffer for parseUrl return."); goto on_error; } if (path.len == 0) { if (aws_byte_buf_append(out_normalized_path, &s_path_slash)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to add path to object."); goto on_error; } return AWS_OP_SUCCESS; } if (path.ptr[0] != '/') { if (aws_byte_buf_append_dynamic(out_normalized_path, &s_path_slash)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to append slash to normalized path."); goto on_error; } } if (aws_byte_buf_append_dynamic(out_normalized_path, &path)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to append path to normalized path."); goto on_error; } if (out_normalized_path->buffer[out_normalized_path->len - 1] != '/') { if (aws_byte_buf_append_dynamic(out_normalized_path, &s_path_slash)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to append slash to normalized path."); goto on_error; } } return AWS_OP_SUCCESS; on_error: aws_byte_buf_clean_up(out_normalized_path); return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED); } struct aws_string *aws_string_new_from_json(struct aws_allocator *allocator, const struct aws_json_value *value) { struct aws_byte_buf json_blob; if (aws_byte_buf_init(&json_blob, allocator, 0)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to init buffer for json conversion."); goto on_error; } if (aws_byte_buf_append_json_string(value, &json_blob)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to convert json to string."); goto on_error; } struct aws_string *ret = aws_string_new_from_buf(allocator, &json_blob); aws_byte_buf_clean_up(&json_blob); return ret; on_error: aws_byte_buf_clean_up(&json_blob); aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED); return NULL; } bool aws_endpoints_byte_cursor_eq(const void *a, const void *b) { const struct aws_byte_cursor *a_cur = a; const struct aws_byte_cursor *b_cur = b; return aws_byte_cursor_eq(a_cur, b_cur); } void aws_array_list_deep_clean_up(struct aws_array_list *array, aws_array_callback_clean_up_fn on_clean_up_element) { for (size_t idx = 0; idx < aws_array_list_length(array); ++idx) { void *element = NULL; aws_array_list_get_at_ptr(array, &element, idx); AWS_ASSERT(element); on_clean_up_element(element); } aws_array_list_clean_up(array); } /* TODO: this can be moved into common */ static bool s_split_on_first_delim( struct aws_byte_cursor input, char split_on, struct aws_byte_cursor *out_split, struct aws_byte_cursor *out_rest) { AWS_PRECONDITION(aws_byte_cursor_is_valid(&input)); uint8_t *delim = memchr(input.ptr, split_on, input.len); if (delim != NULL) { out_split->ptr = input.ptr; out_split->len = delim - input.ptr; out_rest->ptr = delim; out_rest->len = input.len - (delim - input.ptr); return true; } *out_split = input; out_rest->ptr = NULL; out_rest->len = 0; return false; } static int s_buf_append_and_update_quote_count( struct aws_byte_buf *buf, struct aws_byte_cursor to_append, size_t *quote_count, bool is_json) { /* Dont count quotes if its not json. escaped quotes will be replaced with regular quotes when ruleset json is parsed, which will lead to incorrect results for when templates should be resolved in regular strings. Note: in json blobs escaped quotes are preserved and bellow approach works. */ if (is_json) { for (size_t idx = 0; idx < to_append.len; ++idx) { if (to_append.ptr[idx] == '"' && !(idx > 0 && to_append.ptr[idx - 1] == '\\')) { ++*quote_count; } } } return aws_byte_buf_append_dynamic(buf, &to_append); } static struct aws_byte_cursor escaped_closing_curly = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("}}"); static struct aws_byte_cursor escaped_opening_curly = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("{{"); /* * Small helper to deal with escapes correctly in strings that occur before * template opening curly. General flow for resolving is to look for opening and * then closing curly. This function correctly appends any escaped closing * curlies and errors out if closing is not escaped (i.e. its unmatched). */ int s_append_template_prefix_to_buffer( struct aws_byte_buf *out_buf, struct aws_byte_cursor prefix, size_t *quote_count, bool is_json) { struct aws_byte_cursor split = {0}; struct aws_byte_cursor rest = {0}; while (s_split_on_first_delim(prefix, '}', &split, &rest)) { if (s_buf_append_and_update_quote_count(out_buf, split, quote_count, is_json)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_GENERAL, "Failed to append to resolved template buffer."); goto on_error; } if (*quote_count % 2 == 0) { if (aws_byte_buf_append_byte_dynamic(out_buf, '}')) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_GENERAL, "Failed to append to resolved template buffer."); goto on_error; } aws_byte_cursor_advance(&rest, 1); prefix = rest; continue; } if (aws_byte_cursor_starts_with(&rest, &escaped_closing_curly)) { if (aws_byte_buf_append_byte_dynamic(out_buf, '}')) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_GENERAL, "Failed to append to resolved template buffer."); goto on_error; } aws_byte_cursor_advance(&rest, 2); } else { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_GENERAL, "Unmatched or unescaped closing curly."); goto on_error; } prefix = rest; } if (s_buf_append_and_update_quote_count(out_buf, split, quote_count, is_json)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_GENERAL, "Failed to append to resolved template buffer."); goto on_error; } return AWS_OP_SUCCESS; on_error: return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED); } int aws_byte_buf_init_from_resolved_templated_string( struct aws_allocator *allocator, struct aws_byte_buf *out_buf, struct aws_byte_cursor string, aws_endpoints_template_resolve_fn resolve_callback, void *user_data, bool is_json) { AWS_PRECONDITION(allocator); struct aws_owning_cursor resolved_template; AWS_ZERO_STRUCT(resolved_template); if (aws_byte_buf_init(out_buf, allocator, string.len)) { return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED); } size_t quote_count = is_json ? 0 : 1; struct aws_byte_cursor split = {0}; struct aws_byte_cursor rest = {0}; while (s_split_on_first_delim(string, '{', &split, &rest)) { if (s_append_template_prefix_to_buffer(out_buf, split, "e_count, is_json)) { AWS_LOGF_ERROR( AWS_LS_SDKUTILS_ENDPOINTS_GENERAL, "Failed to append to buffer while evaluating templated sting."); goto on_error; } if (quote_count % 2 == 0) { if (aws_byte_buf_append_byte_dynamic(out_buf, '{')) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_GENERAL, "Failed to append to resolved template buffer."); goto on_error; } aws_byte_cursor_advance(&rest, 1); string = rest; continue; } if (aws_byte_cursor_starts_with(&rest, &escaped_opening_curly)) { if (aws_byte_buf_append_byte_dynamic(out_buf, '{')) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_GENERAL, "Failed to append to resolved template buffer."); goto on_error; } aws_byte_cursor_advance(&rest, 2); string = rest; continue; } aws_byte_cursor_advance(&rest, 1); struct aws_byte_cursor after_closing = {0}; if (!s_split_on_first_delim(rest, '}', &split, &after_closing)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_GENERAL, "Unmatched closing curly."); goto on_error; } aws_byte_cursor_advance(&after_closing, 1); string = after_closing; if (resolve_callback(split, user_data, &resolved_template)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_GENERAL, "Failed to resolve template."); goto on_error; } if (s_buf_append_and_update_quote_count(out_buf, resolved_template.cur, "e_count, is_json)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_GENERAL, "Failed to append resolved value."); goto on_error; } aws_owning_cursor_clean_up(&resolved_template); } if (s_buf_append_and_update_quote_count(out_buf, split, "e_count, is_json)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_GENERAL, "Failed to append to resolved template buffer."); goto on_error; } return AWS_OP_SUCCESS; on_error: aws_byte_buf_clean_up(out_buf); aws_owning_cursor_clean_up(&resolved_template); return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED); } int aws_path_through_json( struct aws_allocator *allocator, const struct aws_json_value *root, struct aws_byte_cursor path, const struct aws_json_value **out_value) { struct aws_array_list path_segments; if (aws_array_list_init_dynamic(&path_segments, allocator, 10, sizeof(struct aws_byte_cursor)) || aws_byte_cursor_split_on_char(&path, '.', &path_segments)) { goto on_error; } *out_value = root; for (size_t idx = 0; idx < aws_array_list_length(&path_segments); ++idx) { struct aws_byte_cursor path_el_cur; if (aws_array_list_get_at(&path_segments, &path_el_cur, idx)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to get path element"); goto on_error; } struct aws_byte_cursor element_cur = {0}; aws_byte_cursor_next_split(&path_el_cur, '[', &element_cur); struct aws_byte_cursor index_cur = {0}; bool has_index = aws_byte_cursor_next_split(&path_el_cur, '[', &index_cur) && aws_byte_cursor_next_split(&path_el_cur, ']', &index_cur); if (element_cur.len > 0) { *out_value = aws_json_value_get_from_object(*out_value, element_cur); if (NULL == *out_value) { AWS_LOGF_ERROR( AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Invalid path. " PRInSTR ".", AWS_BYTE_CURSOR_PRI(element_cur)); goto on_error; } } if (has_index) { uint64_t index; if (aws_byte_cursor_utf8_parse_u64(index_cur, &index)) { AWS_LOGF_ERROR( AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to parse index: " PRInSTR, AWS_BYTE_CURSOR_PRI(index_cur)); goto on_error; } *out_value = aws_json_get_array_element(*out_value, (size_t)index); if (NULL == *out_value) { aws_reset_error(); goto on_success; } } } on_success: aws_array_list_clean_up(&path_segments); return AWS_OP_SUCCESS; on_error: aws_array_list_clean_up(&path_segments); *out_value = NULL; return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED); } struct aws_owning_cursor aws_endpoints_owning_cursor_create( struct aws_allocator *allocator, const struct aws_string *str) { struct aws_string *clone = aws_string_clone_or_reuse(allocator, str); struct aws_owning_cursor ret = {.string = clone, .cur = aws_byte_cursor_from_string(clone)}; return ret; } struct aws_owning_cursor aws_endpoints_owning_cursor_from_string(struct aws_string *str) { struct aws_owning_cursor ret = {.string = str, .cur = aws_byte_cursor_from_string(str)}; return ret; } struct aws_owning_cursor aws_endpoints_owning_cursor_from_cursor( struct aws_allocator *allocator, const struct aws_byte_cursor cur) { struct aws_string *clone = aws_string_new_from_cursor(allocator, &cur); struct aws_owning_cursor ret = {.string = clone, .cur = aws_byte_cursor_from_string(clone)}; return ret; } struct aws_owning_cursor aws_endpoints_non_owning_cursor_create(struct aws_byte_cursor cur) { struct aws_owning_cursor ret = {.string = NULL, .cur = cur}; return ret; } void aws_owning_cursor_clean_up(struct aws_owning_cursor *cursor) { aws_string_destroy(cursor->string); cursor->string = NULL; cursor->cur.ptr = NULL; cursor->cur.len = 0; } aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/source/partitions.c000066400000000000000000000265611456575232400247440ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include static struct aws_byte_cursor s_supported_version = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("1.0"); struct aws_byte_cursor aws_partitions_get_supported_version(void) { return s_supported_version; } static void s_partitions_config_destroy(void *data) { if (data == NULL) { return; } struct aws_partitions_config *partitions = data; aws_json_value_destroy(partitions->json_root); aws_string_destroy(partitions->version); aws_hash_table_clean_up(&partitions->base_partitions); aws_hash_table_clean_up(&partitions->region_to_partition_info); aws_mem_release(partitions->allocator, partitions); } struct region_merge_wrapper { struct aws_json_value *outputs_node; struct aws_json_value *merge_node; }; static int s_on_region_merge( const struct aws_byte_cursor *key, const struct aws_json_value *value, bool *out_should_continue, void *user_data) { (void)out_should_continue; struct region_merge_wrapper *merge = user_data; /* * Note: latest partitions file includes description on every region. * This results in a separate record created for every region, since any * overrides on region create a new record that is a merge of partition * default and override. * Description is not used by endpoints rule engine, hence lets ignore it * during merge for now to avoid creating numerous records that all have the * same data. * This decision can be revisited later if we decide to extend partitions * parsing for any other use cases. */ if (aws_byte_cursor_eq_c_str(key, "description")) { return AWS_OP_SUCCESS; } if (merge->merge_node == NULL) { merge->merge_node = aws_json_value_duplicate(merge->outputs_node); } /* * Note: Its valid for region to add new field to default partition outputs * instead of overriding existing one. So only delete previous value if it exists. */ if (aws_json_value_has_key(merge->merge_node, *key) && aws_json_value_remove_from_object(merge->merge_node, *key)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PARTITIONS_PARSING, "Failed to remove previous partition value."); return aws_raise_error(AWS_ERROR_SDKUTILS_PARTITIONS_PARSE_FAILED); } if (aws_json_value_add_to_object(merge->merge_node, *key, aws_json_value_duplicate(value))) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PARTITIONS_PARSING, "Failed to overwrite partition data."); return aws_raise_error(AWS_ERROR_SDKUTILS_PARTITIONS_PARSE_FAILED); } return AWS_OP_SUCCESS; } struct partition_parse_wrapper { struct aws_partitions_config *partitions; struct aws_json_value *outputs_node; struct aws_string *outputs_str; }; static int s_on_region_element( const struct aws_byte_cursor *key, const struct aws_json_value *value, bool *out_should_continue, void *user_data) { (void)out_should_continue; struct aws_partition_info *partition_info = NULL; struct partition_parse_wrapper *wrapper = user_data; struct region_merge_wrapper merge = { .outputs_node = wrapper->outputs_node, .merge_node = NULL, }; if (aws_json_const_iterate_object(value, s_on_region_merge, &merge)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PARTITIONS_PARSING, "Failed to parse partitions."); return aws_raise_error(AWS_ERROR_SDKUTILS_PARTITIONS_PARSE_FAILED); } if (merge.merge_node != NULL) { partition_info = aws_partition_info_new(wrapper->partitions->allocator, *key); partition_info->info = aws_string_new_from_json(wrapper->partitions->allocator, merge.merge_node); aws_json_value_destroy(merge.merge_node); } else { partition_info = aws_partition_info_new(wrapper->partitions->allocator, *key); partition_info->info = wrapper->outputs_str; partition_info->is_copy = true; } if (aws_hash_table_put( &wrapper->partitions->region_to_partition_info, &partition_info->name, partition_info, NULL)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PARTITIONS_PARSING, "Failed to add partition info."); goto on_error; } return AWS_OP_SUCCESS; on_error: if (partition_info != NULL) { aws_partition_info_destroy(partition_info); } return aws_raise_error(AWS_ERROR_SDKUTILS_PARTITIONS_PARSE_FAILED); } static int s_on_partition_element( size_t idx, const struct aws_json_value *partition_node, bool *out_should_continue, void *user_data) { (void)out_should_continue; (void)idx; struct aws_partitions_config *partitions = user_data; struct aws_byte_cursor id_cur; struct aws_json_value *id_node = aws_json_value_get_from_object(partition_node, aws_byte_cursor_from_c_str("id")); if (id_node == NULL || aws_json_value_get_string(id_node, &id_cur)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PARTITIONS_PARSING, "Failed to extract id of partition."); goto on_error; } struct aws_json_value *outputs_node = aws_json_value_get_from_object(partition_node, aws_byte_cursor_from_c_str("outputs")); if (outputs_node == NULL) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PARTITIONS_PARSING, "Failed to extract outputs of partition."); goto on_error; } struct aws_json_value *regex_node = aws_json_value_get_from_object(partition_node, aws_byte_cursor_from_c_str("regionRegex")); struct aws_partition_info *partition_info = aws_partition_info_new(partitions->allocator, id_cur); partition_info->info = aws_string_new_from_json(partitions->allocator, outputs_node); if (regex_node != NULL) { struct aws_byte_cursor regex_cur = {0}; if (aws_json_value_get_string(regex_node, ®ex_cur)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PARTITIONS_PARSING, "Failed to parse region regex."); goto on_error; } partition_info->region_regex = aws_endpoints_regex_new(partitions->allocator, regex_cur); } if (partition_info->info == NULL) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PARTITIONS_PARSING, "Failed to parse partition info."); goto on_error; } if (aws_hash_table_put(&partitions->base_partitions, &partition_info->name, partition_info, NULL)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PARTITIONS_PARSING, "Failed to add partition info."); goto on_error; } struct partition_parse_wrapper wrapper = { .outputs_node = outputs_node, .outputs_str = partition_info->info, .partitions = partitions, }; struct aws_json_value *regions_node = aws_json_value_get_from_object(partition_node, aws_byte_cursor_from_c_str("regions")); if (regions_node != NULL && aws_json_const_iterate_object(regions_node, s_on_region_element, &wrapper)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PARTITIONS_PARSING, "Failed to parse regions."); goto on_error; } return AWS_OP_SUCCESS; on_error: return aws_raise_error(AWS_ERROR_SDKUTILS_PARTITIONS_PARSE_FAILED); } static int s_init_partitions_config_from_json( struct aws_allocator *allocator, struct aws_partitions_config *partitions, struct aws_byte_cursor partitions_cur) { struct aws_json_value *root = aws_json_value_new_from_string(allocator, partitions_cur); if (root == NULL) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to parse provided string as json."); return aws_raise_error(AWS_ERROR_SDKUTILS_PARTITIONS_PARSE_FAILED); } partitions->json_root = root; struct aws_byte_cursor version_cur; struct aws_json_value *version_node = aws_json_value_get_from_object(root, aws_byte_cursor_from_c_str("version")); if (version_node == NULL || aws_json_value_get_string(version_node, &version_cur)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PARTITIONS_PARSING, "Failed to extract version."); return aws_raise_error(AWS_ERROR_SDKUTILS_PARTITIONS_UNSUPPORTED); } #ifdef ENDPOINTS_VERSION_CHECK /* TODO: samples are currently inconsistent with versions. skip check for now */ if (!aws_byte_cursor_eq_c_str(&version_cur, &s_supported_version)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PARTITIONS_PARSING, "Unsupported partitions version."); aws_raise_error(AWS_ERROR_SDKUTILS_PARTITIONS_UNSUPPORTED); goto on_error; } #endif struct aws_json_value *partitions_node = aws_json_value_get_from_object(root, aws_byte_cursor_from_c_str("partitions")); if (partitions_node == NULL || aws_json_const_iterate_array(partitions_node, s_on_partition_element, partitions)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PARTITIONS_PARSING, "Failed to parse partitions."); return aws_raise_error(AWS_ERROR_SDKUTILS_PARTITIONS_PARSE_FAILED); } return AWS_OP_SUCCESS; } static void s_callback_partition_info_destroy(void *data) { struct aws_partition_info *info = data; aws_partition_info_destroy(info); } struct aws_partitions_config *aws_partitions_config_new_from_string( struct aws_allocator *allocator, struct aws_byte_cursor json) { AWS_PRECONDITION(allocator); AWS_PRECONDITION(aws_byte_cursor_is_valid(&json)); struct aws_partitions_config *partitions = aws_mem_calloc(allocator, 1, sizeof(struct aws_partitions_config)); partitions->allocator = allocator; if (aws_hash_table_init( &partitions->base_partitions, allocator, 10, aws_hash_byte_cursor_ptr, aws_endpoints_byte_cursor_eq, NULL, s_callback_partition_info_destroy)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PARTITIONS_PARSING, "Failed to init partition info map."); aws_raise_error(AWS_ERROR_SDKUTILS_PARTITIONS_PARSE_FAILED); goto on_error; } if (aws_hash_table_init( &partitions->region_to_partition_info, allocator, 20, aws_hash_byte_cursor_ptr, aws_endpoints_byte_cursor_eq, NULL, s_callback_partition_info_destroy)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PARTITIONS_PARSING, "Failed to init partition info map."); aws_raise_error(AWS_ERROR_SDKUTILS_PARTITIONS_PARSE_FAILED); goto on_error; } if (s_init_partitions_config_from_json(allocator, partitions, json)) { AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PARTITIONS_PARSING, "Failed to init partition info from json."); goto on_error; } aws_ref_count_init(&partitions->ref_count, partitions, s_partitions_config_destroy); return partitions; on_error: s_partitions_config_destroy(partitions); return NULL; } struct aws_partitions_config *aws_partitions_config_acquire(struct aws_partitions_config *partitions) { AWS_PRECONDITION(partitions); if (partitions) { aws_ref_count_acquire(&partitions->ref_count); } return partitions; } struct aws_partitions_config *aws_partitions_config_release(struct aws_partitions_config *partitions) { if (partitions) { aws_ref_count_release(&partitions->ref_count); } return NULL; } aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/source/resource_name.c000066400000000000000000000102171456575232400253660ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #define ARN_SPLIT_COUNT ((size_t)5) #define ARN_PARTS_COUNT ((size_t)6) static const char ARN_DELIMETER[] = ":"; static const char ARN_DELIMETER_CHAR = ':'; static const size_t DELIMETER_LEN = 8; /* strlen("arn:::::") */ int aws_resource_name_init_from_cur(struct aws_resource_name *arn, const struct aws_byte_cursor *input) { struct aws_byte_cursor arn_parts[ARN_PARTS_COUNT]; struct aws_array_list arn_part_list; aws_array_list_init_static(&arn_part_list, arn_parts, ARN_PARTS_COUNT, sizeof(struct aws_byte_cursor)); if (aws_byte_cursor_split_on_char_n(input, ARN_DELIMETER_CHAR, ARN_SPLIT_COUNT, &arn_part_list)) { return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING); } struct aws_byte_cursor *arn_prefix; if (aws_array_list_get_at_ptr(&arn_part_list, (void **)&arn_prefix, 0) || !aws_byte_cursor_eq_c_str(arn_prefix, "arn")) { return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING); } if (aws_array_list_get_at(&arn_part_list, &arn->partition, 1)) { return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING); } if (aws_array_list_get_at(&arn_part_list, &arn->service, 2)) { return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING); } if (aws_array_list_get_at(&arn_part_list, &arn->region, 3)) { return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING); } if (aws_array_list_get_at(&arn_part_list, &arn->account_id, 4)) { return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING); } if (aws_array_list_get_at(&arn_part_list, &arn->resource_id, 5)) { return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING); } return AWS_OP_SUCCESS; } int aws_resource_name_length(const struct aws_resource_name *arn, size_t *size) { AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->partition)); AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->service)); AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->region)); AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->account_id)); AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->resource_id)); *size = arn->partition.len + arn->region.len + arn->service.len + arn->account_id.len + arn->resource_id.len + DELIMETER_LEN; return AWS_OP_SUCCESS; } int aws_byte_buf_append_resource_name(struct aws_byte_buf *buf, const struct aws_resource_name *arn) { AWS_PRECONDITION(aws_byte_buf_is_valid(buf)); AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->partition)); AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->service)); AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->region)); AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->account_id)); AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->resource_id)); const struct aws_byte_cursor prefix = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("arn:"); const struct aws_byte_cursor colon_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(ARN_DELIMETER); if (aws_byte_buf_append(buf, &prefix)) { return aws_raise_error(aws_last_error()); } if (aws_byte_buf_append(buf, &arn->partition)) { return aws_raise_error(aws_last_error()); } if (aws_byte_buf_append(buf, &colon_cur)) { return aws_raise_error(aws_last_error()); } if (aws_byte_buf_append(buf, &arn->service)) { return aws_raise_error(aws_last_error()); } if (aws_byte_buf_append(buf, &colon_cur)) { return aws_raise_error(aws_last_error()); } if (aws_byte_buf_append(buf, &arn->region)) { return aws_raise_error(aws_last_error()); } if (aws_byte_buf_append(buf, &colon_cur)) { return aws_raise_error(aws_last_error()); } if (aws_byte_buf_append(buf, &arn->account_id)) { return aws_raise_error(aws_last_error()); } if (aws_byte_buf_append(buf, &colon_cur)) { return aws_raise_error(aws_last_error()); } if (aws_byte_buf_append(buf, &arn->resource_id)) { return aws_raise_error(aws_last_error()); } AWS_POSTCONDITION(aws_byte_buf_is_valid(buf)); return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/source/sdkutils.c000066400000000000000000000075051456575232400244070ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include /* clang-format off */ static struct aws_error_info s_errors[] = { AWS_DEFINE_ERROR_INFO(AWS_ERROR_SDKUTILS_GENERAL, "General error in SDK Utility library", "aws-c-sdkutils"), AWS_DEFINE_ERROR_INFO(AWS_ERROR_SDKUTILS_PARSE_FATAL, "Parser encountered a fatal error", "aws-c-sdkutils"), AWS_DEFINE_ERROR_INFO(AWS_ERROR_SDKUTILS_PARSE_RECOVERABLE, "Parser encountered an error, but recovered", "aws-c-sdkutils"), AWS_DEFINE_ERROR_INFO(AWS_ERROR_SDKUTILS_ENDPOINTS_UNSUPPORTED_RULESET, "Ruleset version not supported", "aws-c-sdkutils"), AWS_DEFINE_ERROR_INFO(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED, "Ruleset parsing failed", "aws-c-sdkutils"), AWS_DEFINE_ERROR_INFO(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_INIT_FAILED, "Endpoints eval failed to initialize", "aws-c-sdkutils"), AWS_DEFINE_ERROR_INFO(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED, "Unexpected eval error", "aws-c-sdkutils"), AWS_DEFINE_ERROR_INFO(AWS_ERROR_SDKUTILS_ENDPOINTS_EMPTY_RULESET, "Ruleset has no rules", "aws-c-sdkutils"), AWS_DEFINE_ERROR_INFO(AWS_ERROR_SDKUTILS_ENDPOINTS_RULESET_EXHAUSTED, "Ruleset was exhausted before finding a matching rule", "aws-c-sdkutils"), AWS_DEFINE_ERROR_INFO(AWS_ERROR_SDKUTILS_PARTITIONS_UNSUPPORTED, "Partitions version not supported.", "aws-c-sdkutils"), AWS_DEFINE_ERROR_INFO(AWS_ERROR_SDKUTILS_PARTITIONS_PARSE_FAILED, "Partitions parsing failed.", "aws-c-sdkutils"), AWS_DEFINE_ERROR_INFO(AWS_ERROR_SDKUTILS_ENDPOINTS_UNSUPPORTED_REGEX, "Unsupported regex feature.", "aws-c-sdkutils"), AWS_DEFINE_ERROR_INFO(AWS_ERROR_SDKUTILS_ENDPOINTS_REGEX_NO_MATCH, "Text does not match specified regex", "aws-c-sdkutils"), }; /* clang-format on */ static struct aws_error_info_list s_sdkutils_error_info = { .error_list = s_errors, .count = sizeof(s_errors) / sizeof(struct aws_error_info), }; static struct aws_log_subject_info s_log_subject_infos[] = { DEFINE_LOG_SUBJECT_INFO( AWS_LS_SDKUTILS_GENERAL, "SDKUtils", "Subject for SDK utility logging that defies categorization."), DEFINE_LOG_SUBJECT_INFO(AWS_LS_SDKUTILS_PROFILE, "AWSProfile", "Subject for AWS Profile parser and utilities"), DEFINE_LOG_SUBJECT_INFO( AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "AWSEndpointsParsing", "Subject for AWS Endpoints ruleset parser"), DEFINE_LOG_SUBJECT_INFO( AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "AWSEndpointsResolution", "Subject for AWS Endpoints Engine resolution"), DEFINE_LOG_SUBJECT_INFO( AWS_LS_SDKUTILS_ENDPOINTS_GENERAL, "AWSEndpoints", "Subject for AWS Endpoints Engine general messages"), DEFINE_LOG_SUBJECT_INFO(AWS_LS_SDKUTILS_PARTITIONS_PARSING, "AWSEndpoints", "Subject for AWS Partitions parsing"), DEFINE_LOG_SUBJECT_INFO(AWS_LS_SDKUTILS_ENDPOINTS_REGEX, "AWSEndpoints", "Subject for AWS Endpoints Regex engine"), }; static struct aws_log_subject_info_list s_sdkutils_log_subjects = { .subject_list = s_log_subject_infos, .count = AWS_ARRAY_SIZE(s_log_subject_infos), }; static int s_library_init_count = 0; void aws_sdkutils_library_init(struct aws_allocator *allocator) { if (s_library_init_count++ != 0) { return; } aws_common_library_init(allocator); aws_register_error_info(&s_sdkutils_error_info); aws_register_log_subject_info_list(&s_sdkutils_log_subjects); aws_endpoints_rule_engine_init(); } void aws_sdkutils_library_clean_up(void) { if (--s_library_init_count != 0) { return; } aws_unregister_log_subject_info_list(&s_sdkutils_log_subjects); aws_unregister_error_info(&s_sdkutils_error_info); aws_common_library_clean_up(); } aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/tests/000077500000000000000000000000001456575232400222345ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/tests/CMakeLists.txt000066400000000000000000000117351456575232400250030ustar00rootroot00000000000000include(AwsTestHarness) enable_testing() file(GLOB TEST_SRC "*.c") file(GLOB TEST_HDRS "*.h") file(GLOB TESTS ${TEST_HDRS} ${TEST_SRC}) add_test_case(sdkutils_library_test) add_test_case(aws_profile_early_property_parse_failure_test) add_test_case(aws_profile_missing_bracket_parse_failure_test) add_test_case(aws_profile_missing_assignment_parse_failure_test) add_test_case(aws_profile_missing_property_key_parse_failure_test) add_test_case(aws_profile_early_continuation_parse_failure_test) add_test_case(aws_profile_illegal_continuation1_parse_failure_test) add_test_case(aws_profile_illegal_continuation2_parse_failure_test) add_test_case(aws_profile_illegal_continuation3_parse_failure_test) add_test_case(aws_profile_continuation_reset_on_new_profile_parse_failure_test) add_test_case(aws_profile_empty_test) add_test_case(aws_profile_empty_profile_test) add_test_case(aws_profile_whitespace_empty_profile_test) add_test_case(aws_profile_tab_empty_profile_test) add_test_case(aws_profile_single_simple_property_profile_test) add_test_case(aws_profile_equal_containing_property_profile_test) add_test_case(aws_profile_unicode_containing_property_profile_test) add_test_case(aws_profile_multiple_property_profile_test) add_test_case(aws_profile_trimmable_property_profile_test) add_test_case(aws_profile_empty_property_profile_test) add_test_case(aws_profile_multiple_empty_profile_test) add_test_case(aws_profile_multiple_profile_test) add_test_case(aws_profile_multiple_profile_with_sso_session_test) add_test_case(aws_profile_sso_session_in_credentials_test) add_test_case(aws_profile_sso_session_without_name_test) add_test_case(aws_profile_blank_lines_ignored_test) add_test_case(aws_profile_pound_comments_ignored_test) add_test_case(aws_profile_semicolon_comments_ignored_test) add_test_case(aws_profile_mixed_comments_ignored_test) add_test_case(aws_profile_empty_comments_ignored_test) add_test_case(aws_profile_profile_adjacent_comment_test) add_test_case(aws_profile_value_adjacent_comment_test) add_test_case(aws_profile_continued_property_value_test) add_test_case(aws_profile_multiline_continued_property_value_test) add_test_case(aws_profile_continued_property_value_trim_test) add_test_case(aws_profile_continued_property_value_pound_comment_test) add_test_case(aws_profile_continued_property_value_semicolon_comment_test) add_test_case(aws_profile_duplicate_profiles_merge_test) add_test_case(aws_profile_duplicate_properties_last_property_value_test) add_test_case(aws_profile_duplicate_profiles_last_property_value_test) add_test_case(aws_profile_duplicate_default_profiles_property_resolution1_test) add_test_case(aws_profile_duplicate_default_profiles_property_resolution2_test) add_test_case(aws_profile_invalid_profile_names_merge_test) add_test_case(aws_profile_invalid_property_names_ignored_test) add_test_case(aws_profile_all_valid_profile_characters_test) add_test_case(aws_profile_all_valid_property_characters_test) add_test_case(aws_profile_basic_sub_property_test) add_test_case(aws_profile_empty_sub_property_test) add_test_case(aws_profile_invalid_sub_property_name_test) add_test_case(aws_profile_sub_property_blank_line_test) add_test_case(aws_profile_basic_duplicate_merge_test) add_test_case(aws_profile_mixed_prefix_default_test) add_test_case(aws_profile_override_duplicate_merge_test) add_test_case(aws_profile_no_prefix_nondefault_test) add_test_case(aws_profile_prefix_credentials_test) add_test_case(parse_resource_name_test) add_test_case(parse_resource_name_failures_test) add_test_case(resource_name_tostring_test) add_test_case(resource_name_tostring_failure_test) add_test_case(resource_name_length_test) add_test_case(parse_ruleset_from_string) add_test_case(test_endpoints_aws_region) add_test_case(test_endpoints_default_values) add_test_case(test_endpoints_eventbridge) add_test_case(test_endpoints_fns) add_test_case(test_endpoints_get_attr_type_inference) add_test_case(test_endpoints_headers) add_test_case(test_endpoints_is_virtual_hostable_s3_bucket) add_test_case(test_endpoints_region_override) add_test_case(test_endpoints_minimal_ruleset) add_test_case(test_endpoints_parse_arn) add_test_case(test_endpoints_parse_url) add_test_case(test_endpoints_partition_fn) add_test_case(test_endpoints_substring) add_test_case(test_endpoints_uri_encode) add_test_case(test_endpoints_valid_hostlabel) add_test_case(test_endpoints_condition_mem_clean_up) add_test_case(test_endpoints_custom) add_test_case(endpoints_eval_util_is_ipv4) add_test_case(endpoints_eval_util_is_ipv6) add_test_case(endpoints_uri_normalize_path) add_test_case(endpoints_byte_buf_init_from_resolved_templated_string) add_test_case(endpoints_regex_aws_region_matches) add_test_case(endpoints_regex_iso_region_matches) add_test_case(endpoints_regex_misc_validation) set(TEST_BINARY_NAME ${PROJECT_NAME}-tests) generate_test_driver(${TEST_BINARY_NAME}) add_custom_command(TARGET ${PROJECT_NAME}-tests PRE_BUILD COMMAND ${CMAKE_COMMAND} -E copy_directory ${CMAKE_CURRENT_SOURCE_DIR}/resources $) aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/tests/aws_profile_parser_tests.c000066400000000000000000000071651456575232400275210ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include static int s_do_aws_profile_fatal_parse_test( struct aws_allocator *allocator, const struct aws_string *profile_contents) { struct aws_byte_cursor contents = aws_byte_cursor_from_string(profile_contents); struct aws_byte_buf buffer; aws_byte_buf_init_copy_from_cursor(&buffer, allocator, contents); struct aws_profile_collection *profile_collection = aws_profile_collection_new_from_buffer(allocator, &buffer, AWS_PST_NONE); aws_byte_buf_clean_up(&buffer); ASSERT_TRUE(profile_collection == NULL); return 0; } #define AWS_PROFILE_PARSE_FAILURE_TEST(test_name, content_string) \ static int s_##test_name(struct aws_allocator *allocator, void *ctx) { \ (void)ctx; \ return s_do_aws_profile_fatal_parse_test(allocator, content_string); \ } \ AWS_TEST_CASE(test_name, s_##test_name); AWS_STATIC_STRING_FROM_LITERAL( s_early_property_profile_file, "bad=value\n" "[default]\r\n" "good=value\r\n"); AWS_PROFILE_PARSE_FAILURE_TEST(aws_profile_early_property_parse_failure_test, s_early_property_profile_file) AWS_STATIC_STRING_FROM_LITERAL( s_missing_bracket_profile_file, "[default \r\n" "good=value\r\n"); AWS_PROFILE_PARSE_FAILURE_TEST(aws_profile_missing_bracket_parse_failure_test, s_missing_bracket_profile_file) AWS_STATIC_STRING_FROM_LITERAL( s_missing_assignment_profile_file, "[default] \r\n" "bad\r\n"); AWS_PROFILE_PARSE_FAILURE_TEST(aws_profile_missing_assignment_parse_failure_test, s_missing_assignment_profile_file) AWS_STATIC_STRING_FROM_LITERAL( s_missing_property_key_profile_file, "[default] ; hello\r\n" "=bad\r\n"); AWS_PROFILE_PARSE_FAILURE_TEST(aws_profile_missing_property_key_parse_failure_test, s_missing_property_key_profile_file) AWS_STATIC_STRING_FROM_LITERAL( s_early_continuation_profile_file, "[default]\r\n" " continuation\n"); AWS_PROFILE_PARSE_FAILURE_TEST(aws_profile_early_continuation_parse_failure_test, s_early_continuation_profile_file) AWS_STATIC_STRING_FROM_LITERAL( s_illegal_continuation1_profile_file, "[default]\r\n" "s3 =\n" " badcontinuation\n"); AWS_PROFILE_PARSE_FAILURE_TEST( aws_profile_illegal_continuation1_parse_failure_test, s_illegal_continuation1_profile_file) AWS_STATIC_STRING_FROM_LITERAL( s_illegal_continuation2_profile_file, "[default]\r\n" "s3 =\n" " ^^badcontinuation\n"); AWS_PROFILE_PARSE_FAILURE_TEST( aws_profile_illegal_continuation2_parse_failure_test, s_illegal_continuation2_profile_file) AWS_STATIC_STRING_FROM_LITERAL( s_illegal_continuation3_profile_file, "[default]\r\n" "s3 =\n" " =value\n"); AWS_PROFILE_PARSE_FAILURE_TEST( aws_profile_illegal_continuation3_parse_failure_test, s_illegal_continuation3_profile_file) AWS_STATIC_STRING_FROM_LITERAL( s_continuation_reset_on_new_profile_file, "[profile foo]\nname = value\n[profile foo]\n -continued"); AWS_PROFILE_PARSE_FAILURE_TEST( aws_profile_continuation_reset_on_new_profile_parse_failure_test, s_continuation_reset_on_new_profile_file) aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/tests/aws_profile_tests.c000066400000000000000000001705221456575232400261430ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #define EXPECT_PROFILE_COUNT(profile_collection, profile_count) \ { ASSERT_TRUE(aws_profile_collection_get_profile_count(profile_collection) == (profile_count)); } #define EXPECT_PROFILE(profile_collection, profile_name) \ { \ struct aws_string *profile_name_str = aws_string_new_from_c_str(allocator, profile_name); \ const struct aws_profile *profile = aws_profile_collection_get_profile(profile_collection, profile_name_str); \ aws_string_destroy(profile_name_str); \ ASSERT_TRUE(profile != NULL); \ } #define EXPECT_PROPERTY_COUNT(profile_collection, profile_name, expected_property_count) \ { \ struct aws_string *profile_name_str = aws_string_new_from_c_str(allocator, profile_name); \ const struct aws_profile *profile = aws_profile_collection_get_profile(profile_collection, profile_name_str); \ aws_string_destroy(profile_name_str); \ ASSERT_TRUE(aws_profile_get_property_count(profile) == (expected_property_count)); \ } #define EXPECT_PROPERTY(profile_collection, profile_name, property_name, expected_property_value) \ { \ struct aws_string *profile_name_str = aws_string_new_from_c_str(allocator, profile_name); \ const struct aws_profile *profile = aws_profile_collection_get_profile(profile_collection, profile_name_str); \ struct aws_string *property_name_str = aws_string_new_from_c_str(allocator, property_name); \ const struct aws_profile_property *property = aws_profile_get_property(profile, property_name_str); \ aws_string_destroy(property_name_str); \ aws_string_destroy(profile_name_str); \ ASSERT_TRUE( \ property != NULL && \ strcmp(expected_property_value, aws_string_c_str(aws_profile_property_get_value(property))) == 0); \ } #define EXPECT_SUB_PROPERTY_COUNT(profile_collection, profile_name, property_name, expected_sub_property_count) \ { \ struct aws_string *profile_name_str = aws_string_new_from_c_str(allocator, profile_name); \ const struct aws_profile *profile = aws_profile_collection_get_profile(profile_collection, profile_name_str); \ struct aws_string *property_name_str = aws_string_new_from_c_str(allocator, property_name); \ const struct aws_profile_property *property = aws_profile_get_property(profile, property_name_str); \ aws_string_destroy(property_name_str); \ aws_string_destroy(profile_name_str); \ ASSERT_UINT_EQUALS((expected_sub_property_count), aws_profile_property_get_sub_property_count(property)); \ } #define EXPECT_SUB_PROPERTY( \ profile_collection, profile_name, property_name, sub_property_name, expected_sub_property_value) \ { \ struct aws_string *profile_name_str = aws_string_new_from_c_str(allocator, profile_name); \ const struct aws_profile *profile = aws_profile_collection_get_profile(profile_collection, profile_name_str); \ struct aws_string *property_name_str = aws_string_new_from_c_str(allocator, property_name); \ const struct aws_profile_property *property = aws_profile_get_property(profile, property_name_str); \ struct aws_string *sub_property_name_str = aws_string_new_from_c_str(allocator, sub_property_name); \ const struct aws_string *sub_property_value = \ aws_profile_property_get_sub_property(property, sub_property_name_str); \ aws_string_destroy(sub_property_name_str); \ aws_string_destroy(property_name_str); \ aws_string_destroy(profile_name_str); \ ASSERT_TRUE(strcmp(expected_sub_property_value, aws_string_c_str(sub_property_value)) == 0); \ } #define EXPECT_SSO_SESSION_COUNT(profile_collection, sso_session_count) \ { \ ASSERT_TRUE( \ aws_profile_collection_get_section_count(profile_collection, AWS_PROFILE_SECTION_TYPE_SSO_SESSION) == \ (sso_session_count)); \ } #define EXPECT_SSO_SESSION(profile_collection, sso_session_name) \ { \ struct aws_string *sso_session_name_str = aws_string_new_from_c_str(allocator, sso_session_name); \ const struct aws_profile *sso_session = aws_profile_collection_get_section( \ profile_collection, AWS_PROFILE_SECTION_TYPE_SSO_SESSION, sso_session_name_str); \ aws_string_destroy(sso_session_name_str); \ ASSERT_TRUE(sso_session != NULL); \ } #define EXPECT_SSO_SESSION_PROPERTY_COUNT(profile_collection, sso_session_name, expected_sso_session_count) \ { \ struct aws_string *sso_session_name_str = aws_string_new_from_c_str(allocator, sso_session_name); \ const struct aws_profile *sso_session = aws_profile_collection_get_section( \ profile_collection, AWS_PROFILE_SECTION_TYPE_SSO_SESSION, sso_session_name_str); \ aws_string_destroy(sso_session_name_str); \ ASSERT_TRUE(aws_profile_get_property_count(sso_session) == (expected_sso_session_count)); \ } #define EXPECT_SSO_SESSION_PROPERTY(profile_collection, sso_session_name, property_name, expected_property_value) \ { \ struct aws_string *sso_session_name_str = aws_string_new_from_c_str(allocator, sso_session_name); \ const struct aws_profile *sso_session = aws_profile_collection_get_section( \ profile_collection, AWS_PROFILE_SECTION_TYPE_SSO_SESSION, sso_session_name_str); \ struct aws_string *property_name_str = aws_string_new_from_c_str(allocator, property_name); \ const struct aws_profile_property *property = aws_profile_get_property(sso_session, property_name_str); \ aws_string_destroy(property_name_str); \ aws_string_destroy(sso_session_name_str); \ ASSERT_TRUE( \ property != NULL && \ strcmp(expected_property_value, aws_string_c_str(aws_profile_property_get_value(property))) == 0); \ } #define EXPECT_SSO_SESSION_SUB_PROPERTY_COUNT( \ profile_collection, sso_session_name, property_name, expected_sub_property_count) \ { \ struct aws_string *sso_session_name_str = aws_string_new_from_c_str(allocator, sso_session_name); \ const struct aws_profile *sso_session = aws_profile_collection_get_section( \ profile_collection, AWS_PROFILE_SECTION_TYPE_SSO_SESSION, sso_session_name_str); \ struct aws_string *property_name_str = aws_string_new_from_c_str(allocator, property_name); \ const struct aws_profile_property *property = aws_profile_get_property(sso_session, property_name_str); \ aws_string_destroy(property_name_str); \ aws_string_destroy(sso_session_name_str); \ ASSERT_UINT_EQUALS((expected_sub_property_count), aws_profile_property_get_sub_property_count(property)); \ } #define EXPECT_SSO_SESSION_SUB_PROPERTY( \ profile_collection, sso_session_name, property_name, sub_property_name, expected_sub_property_value) \ { \ struct aws_string *sso_session_name_str = aws_string_new_from_c_str(allocator, sso_session_name); \ const struct aws_profile *sso_session = aws_profile_collection_get_section( \ profile_collection, AWS_PROFILE_SECTION_TYPE_SSO_SESSION, sso_session_name_str); \ struct aws_string *property_name_str = aws_string_new_from_c_str(allocator, property_name); \ const struct aws_profile_property *property = aws_profile_get_property(sso_session, property_name_str); \ struct aws_string *sub_property_name_str = aws_string_new_from_c_str(allocator, sub_property_name); \ const struct aws_string *sub_property_value = \ aws_profile_property_get_sub_property(property, sub_property_name_str); \ aws_string_destroy(sub_property_name_str); \ aws_string_destroy(property_name_str); \ aws_string_destroy(sso_session_name_str); \ ASSERT_TRUE(strcmp(expected_sub_property_value, aws_string_c_str(sub_property_value)) == 0); \ } /* * profile collection setup */ struct aws_profile_collection *aws_prepare_profile_test( struct aws_allocator *allocator, const struct aws_string *profile_contents, enum aws_profile_source_type source) { struct aws_byte_cursor contents = aws_byte_cursor_from_string(profile_contents); struct aws_byte_buf buffer; AWS_ZERO_STRUCT(buffer); aws_byte_buf_init_copy_from_cursor(&buffer, allocator, contents); struct aws_profile_collection *profile_collection = aws_profile_collection_new_from_buffer(allocator, &buffer, source); aws_byte_buf_clean_up(&buffer); return profile_collection; } struct aws_profile_collection *aws_prepare_merged_profile_test( struct aws_allocator *allocator, const struct aws_string *config_contents, const struct aws_string *credentials_contents) { struct aws_byte_cursor config_cursor = aws_byte_cursor_from_string(config_contents); struct aws_byte_buf config_buffer; aws_byte_buf_init_copy_from_cursor(&config_buffer, allocator, config_cursor); struct aws_profile_collection *config_profile_collection = aws_profile_collection_new_from_buffer(allocator, &config_buffer, AWS_PST_CONFIG); aws_byte_buf_clean_up(&config_buffer); struct aws_byte_cursor credentials_cursor = aws_byte_cursor_from_string(credentials_contents); struct aws_byte_buf credentials_buffer; aws_byte_buf_init_copy_from_cursor(&credentials_buffer, allocator, credentials_cursor); struct aws_profile_collection *credentials_profile_collection = aws_profile_collection_new_from_buffer(allocator, &credentials_buffer, AWS_PST_CREDENTIALS); aws_byte_buf_clean_up(&credentials_buffer); struct aws_profile_collection *merged = aws_profile_collection_new_from_merge(allocator, config_profile_collection, credentials_profile_collection); if (config_profile_collection) { aws_profile_collection_destroy(config_profile_collection); } if (credentials_profile_collection) { aws_profile_collection_destroy(credentials_profile_collection); } return merged; } /* * Nothing at all */ AWS_STATIC_STRING_FROM_LITERAL(s_empty_string, ""); static int s_aws_profile_empty_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_profile_collection *profile_collection = aws_prepare_profile_test(allocator, s_empty_string, AWS_PST_CONFIG); ASSERT_TRUE(profile_collection != NULL); EXPECT_PROFILE_COUNT(profile_collection, 0); aws_profile_collection_destroy(profile_collection); return 0; } AWS_TEST_CASE(aws_profile_empty_test, s_aws_profile_empty_test); /* * A single empty profile */ AWS_STATIC_STRING_FROM_LITERAL(s_empty_profile, "[profile foo]"); static int s_aws_profile_empty_profile_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_profile_collection *profile_collection = aws_prepare_profile_test(allocator, s_empty_profile, AWS_PST_CONFIG); ASSERT_TRUE(profile_collection != NULL); EXPECT_PROFILE_COUNT(profile_collection, 1); EXPECT_PROFILE(profile_collection, "foo"); EXPECT_PROPERTY_COUNT(profile_collection, "foo", 0); aws_profile_collection_destroy(profile_collection); return 0; } AWS_TEST_CASE(aws_profile_empty_profile_test, s_aws_profile_empty_profile_test); /* * Whitespace in a single empty profile */ AWS_STATIC_STRING_FROM_LITERAL(s_whitespace_empty_profile, "[profile \tfoo \t]"); static int s_aws_profile_whitespace_empty_profile_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_profile_collection *profile_collection = aws_prepare_profile_test(allocator, s_whitespace_empty_profile, AWS_PST_CONFIG); ASSERT_TRUE(profile_collection != NULL); EXPECT_PROFILE_COUNT(profile_collection, 1); EXPECT_PROFILE(profile_collection, "foo"); EXPECT_PROPERTY_COUNT(profile_collection, "foo", 0); aws_profile_collection_destroy(profile_collection); return 0; } AWS_TEST_CASE(aws_profile_whitespace_empty_profile_test, s_aws_profile_whitespace_empty_profile_test); /* * Tab-separated, a single empty profile */ AWS_STATIC_STRING_FROM_LITERAL(s_tab_empty_profile, "[profile\tfoo]"); static int s_aws_profile_tab_empty_profile_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_profile_collection *profile_collection = aws_prepare_profile_test(allocator, s_tab_empty_profile, AWS_PST_CONFIG); ASSERT_TRUE(profile_collection != NULL); EXPECT_PROFILE_COUNT(profile_collection, 1); EXPECT_PROFILE(profile_collection, "foo"); EXPECT_PROPERTY_COUNT(profile_collection, "foo", 0); aws_profile_collection_destroy(profile_collection); return 0; } AWS_TEST_CASE(aws_profile_tab_empty_profile_test, s_aws_profile_tab_empty_profile_test); /* * One profile with a single, simple property */ AWS_STATIC_STRING_FROM_LITERAL(s_single_simple_property_profile, "[profile foo]\nname = value"); static int s_aws_profile_single_simple_property_profile_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_profile_collection *profile_collection = aws_prepare_profile_test(allocator, s_single_simple_property_profile, AWS_PST_CONFIG); ASSERT_TRUE(profile_collection != NULL); EXPECT_PROFILE_COUNT(profile_collection, 1); EXPECT_PROFILE(profile_collection, "foo"); EXPECT_PROPERTY_COUNT(profile_collection, "foo", 1); EXPECT_PROPERTY(profile_collection, "foo", "name", "value"); aws_profile_collection_destroy(profile_collection); return 0; } AWS_TEST_CASE(aws_profile_single_simple_property_profile_test, s_aws_profile_single_simple_property_profile_test); /* * Check that = can appear in a property value */ AWS_STATIC_STRING_FROM_LITERAL(s_equal_containing_property_profile, "[profile foo]\nname = val=ue"); static int s_aws_profile_equal_containing_property_profile_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_profile_collection *profile_collection = aws_prepare_profile_test(allocator, s_equal_containing_property_profile, AWS_PST_CONFIG); ASSERT_TRUE(profile_collection != NULL); EXPECT_PROFILE_COUNT(profile_collection, 1); EXPECT_PROFILE(profile_collection, "foo"); EXPECT_PROPERTY_COUNT(profile_collection, "foo", 1); EXPECT_PROPERTY(profile_collection, "foo", "name", "val=ue"); aws_profile_collection_destroy(profile_collection); return 0; } AWS_TEST_CASE(aws_profile_equal_containing_property_profile_test, s_aws_profile_equal_containing_property_profile_test); /* * Check that non-ascii unicode can appear in a property value */ AWS_STATIC_STRING_FROM_LITERAL(s_unicode_containing_property_profile, "[profile foo]\nname = \xF0\x9F\x98\x82"); static int s_aws_profile_unicode_containing_property_profile_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_profile_collection *profile_collection = aws_prepare_profile_test(allocator, s_unicode_containing_property_profile, AWS_PST_CONFIG); ASSERT_TRUE(profile_collection != NULL); EXPECT_PROFILE_COUNT(profile_collection, 1); EXPECT_PROFILE(profile_collection, "foo"); EXPECT_PROPERTY_COUNT(profile_collection, "foo", 1); EXPECT_PROPERTY(profile_collection, "foo", "name", "\xF0\x9F\x98\x82"); aws_profile_collection_destroy(profile_collection); return 0; } AWS_TEST_CASE( aws_profile_unicode_containing_property_profile_test, s_aws_profile_unicode_containing_property_profile_test); /* * Profiles can contain multiple properties */ AWS_STATIC_STRING_FROM_LITERAL(s_multiple_property_profile, "[profile foo]\nname = value\nname2 = value2"); static int s_aws_profile_multiple_property_profile_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_profile_collection *profile_collection = aws_prepare_profile_test(allocator, s_multiple_property_profile, AWS_PST_CONFIG); ASSERT_TRUE(profile_collection != NULL); EXPECT_PROFILE_COUNT(profile_collection, 1); EXPECT_PROFILE(profile_collection, "foo"); EXPECT_PROPERTY_COUNT(profile_collection, "foo", 2); EXPECT_PROPERTY(profile_collection, "foo", "name", "value"); EXPECT_PROPERTY(profile_collection, "foo", "name2", "value2"); aws_profile_collection_destroy(profile_collection); return 0; } AWS_TEST_CASE(aws_profile_multiple_property_profile_test, s_aws_profile_multiple_property_profile_test); /* * Property name and values get trimmed */ AWS_STATIC_STRING_FROM_LITERAL(s_trimmable_property_profile, "[profile foo]\nname \t= \tvalue \t"); static int s_aws_profile_trimmable_property_profile_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_profile_collection *profile_collection = aws_prepare_profile_test(allocator, s_trimmable_property_profile, AWS_PST_CONFIG); ASSERT_TRUE(profile_collection != NULL); EXPECT_PROFILE_COUNT(profile_collection, 1); EXPECT_PROFILE(profile_collection, "foo"); EXPECT_PROPERTY_COUNT(profile_collection, "foo", 1); EXPECT_PROPERTY(profile_collection, "foo", "name", "value"); aws_profile_collection_destroy(profile_collection); return 0; } AWS_TEST_CASE(aws_profile_trimmable_property_profile_test, s_aws_profile_trimmable_property_profile_test); /* * Property values can be empty */ AWS_STATIC_STRING_FROM_LITERAL(s_empty_property_profile, "[profile foo]\nname ="); static int s_aws_profile_empty_property_profile_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_profile_collection *profile_collection = aws_prepare_profile_test(allocator, s_empty_property_profile, AWS_PST_CONFIG); ASSERT_TRUE(profile_collection != NULL); EXPECT_PROFILE_COUNT(profile_collection, 1); EXPECT_PROFILE(profile_collection, "foo"); EXPECT_PROPERTY_COUNT(profile_collection, "foo", 1); EXPECT_PROPERTY(profile_collection, "foo", "name", ""); aws_profile_collection_destroy(profile_collection); return 0; } AWS_TEST_CASE(aws_profile_empty_property_profile_test, s_aws_profile_empty_property_profile_test); /* * Multiple empty profiles */ AWS_STATIC_STRING_FROM_LITERAL(s_multiple_empty_profile, "[profile foo]\n[profile bar]"); static int s_aws_profile_multiple_empty_profile_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_profile_collection *profile_collection = aws_prepare_profile_test(allocator, s_multiple_empty_profile, AWS_PST_CONFIG); ASSERT_TRUE(profile_collection != NULL); EXPECT_PROFILE_COUNT(profile_collection, 2); EXPECT_PROFILE(profile_collection, "foo"); EXPECT_PROPERTY_COUNT(profile_collection, "foo", 0); EXPECT_PROFILE(profile_collection, "bar"); EXPECT_PROPERTY_COUNT(profile_collection, "bar", 0); aws_profile_collection_destroy(profile_collection); return 0; } AWS_TEST_CASE(aws_profile_multiple_empty_profile_test, s_aws_profile_multiple_empty_profile_test); /* * Multiple profiles with properties */ AWS_STATIC_STRING_FROM_LITERAL(s_multiple_profile, "[profile foo]\nname = value\n[profile bar]\nname2 = value2"); static int s_aws_profile_multiple_profile_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_profile_collection *profile_collection = aws_prepare_profile_test(allocator, s_multiple_profile, AWS_PST_CONFIG); ASSERT_TRUE(profile_collection != NULL); EXPECT_PROFILE_COUNT(profile_collection, 2); EXPECT_PROFILE(profile_collection, "foo"); EXPECT_PROPERTY_COUNT(profile_collection, "foo", 1); EXPECT_PROPERTY(profile_collection, "foo", "name", "value"); EXPECT_PROFILE(profile_collection, "bar"); EXPECT_PROPERTY_COUNT(profile_collection, "bar", 1); EXPECT_PROPERTY(profile_collection, "bar", "name2", "value2"); aws_profile_collection_destroy(profile_collection); return 0; } AWS_TEST_CASE(aws_profile_multiple_profile_test, s_aws_profile_multiple_profile_test); /* * Multiple profiles with properties and sso-session */ AWS_STATIC_STRING_FROM_LITERAL( s_credentials_sso_session, "[profile foo]\nname = value\n[profile bar]\nname2 = value2\n[sso-session session]\nname3 = value3\ns3 =\n name4 = " "value4"); static int s_aws_profile_multiple_profile_with_sso_session_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_profile_collection *profile_collection = aws_prepare_profile_test(allocator, s_credentials_sso_session, AWS_PST_CONFIG); ASSERT_NOT_NULL(profile_collection); EXPECT_PROFILE_COUNT(profile_collection, 2); EXPECT_PROFILE(profile_collection, "foo"); EXPECT_PROPERTY_COUNT(profile_collection, "foo", 1); EXPECT_PROPERTY(profile_collection, "foo", "name", "value"); EXPECT_PROFILE(profile_collection, "bar"); EXPECT_PROPERTY_COUNT(profile_collection, "bar", 1); EXPECT_PROPERTY(profile_collection, "bar", "name2", "value2"); EXPECT_SSO_SESSION_COUNT(profile_collection, 1); EXPECT_SSO_SESSION(profile_collection, "session"); EXPECT_SSO_SESSION_PROPERTY_COUNT(profile_collection, "session", 2); EXPECT_SSO_SESSION_PROPERTY(profile_collection, "session", "name3", "value3"); EXPECT_SSO_SESSION_SUB_PROPERTY_COUNT(profile_collection, "session", "s3", 1); EXPECT_SSO_SESSION_SUB_PROPERTY(profile_collection, "session", "s3", "name4", "value4"); aws_profile_collection_destroy(profile_collection); return 0; } AWS_TEST_CASE(aws_profile_multiple_profile_with_sso_session_test, s_aws_profile_multiple_profile_with_sso_session_test); /* * SSO-Session in credentials file is ignored */ AWS_STATIC_STRING_FROM_LITERAL( s_sso_session_in_credentials, "[foo]\nname = value\n[sso-session session]\nname3 = value3"); static int s_aws_profile_sso_session_in_credentials_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_profile_collection *profile_collection = aws_prepare_profile_test(allocator, s_sso_session_in_credentials, AWS_PST_CREDENTIALS); ASSERT_NOT_NULL(profile_collection); EXPECT_PROFILE_COUNT(profile_collection, 1); EXPECT_SSO_SESSION_COUNT(profile_collection, 0); aws_profile_collection_destroy(profile_collection); return 0; } AWS_TEST_CASE(aws_profile_sso_session_in_credentials_test, s_aws_profile_sso_session_in_credentials_test); /* * sso-session without name is ignored */ AWS_STATIC_STRING_FROM_LITERAL(s_sso_session_without_name, "[sso-session session]\nname = value\n[sso-session ]"); //"[profile foo]\nname = value\n[sso-session session]\nname3 = value3"); static int s_aws_profile_sso_session_without_name_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_profile_collection *profile_collection = aws_prepare_profile_test(allocator, s_sso_session_without_name, AWS_PST_CONFIG); ASSERT_NOT_NULL(profile_collection); EXPECT_PROFILE_COUNT(profile_collection, 0); EXPECT_SSO_SESSION_COUNT(profile_collection, 1); EXPECT_SSO_SESSION(profile_collection, "session"); EXPECT_SSO_SESSION_PROPERTY_COUNT(profile_collection, "session", 1); EXPECT_SSO_SESSION_PROPERTY(profile_collection, "session", "name", "value"); aws_profile_collection_destroy(profile_collection); return 0; } AWS_TEST_CASE(aws_profile_sso_session_without_name_test, s_aws_profile_sso_session_without_name_test); /* * Blank lines are ignored */ AWS_STATIC_STRING_FROM_LITERAL( s_blank_lines_ignored_profile, "\t \n[profile foo]\n\t\n \nname = value\n\t \n[profile bar]\n \t"); static int s_aws_profile_blank_lines_ignored_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_profile_collection *profile_collection = aws_prepare_profile_test(allocator, s_blank_lines_ignored_profile, AWS_PST_CONFIG); ASSERT_TRUE(profile_collection != NULL); EXPECT_PROFILE_COUNT(profile_collection, 2); EXPECT_PROFILE(profile_collection, "foo"); EXPECT_PROPERTY_COUNT(profile_collection, "foo", 1); EXPECT_PROPERTY(profile_collection, "foo", "name", "value"); EXPECT_PROFILE(profile_collection, "bar"); EXPECT_PROPERTY_COUNT(profile_collection, "bar", 0); aws_profile_collection_destroy(profile_collection); return 0; } AWS_TEST_CASE(aws_profile_blank_lines_ignored_test, s_aws_profile_blank_lines_ignored_test); /* * # comments are ignored */ AWS_STATIC_STRING_FROM_LITERAL( s_pound_comments_ignored_profile, "# Comment\n[profile foo] # Comment\nname = value # Comment with # sign"); static int s_aws_profile_pound_comments_ignored_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_profile_collection *profile_collection = aws_prepare_profile_test(allocator, s_pound_comments_ignored_profile, AWS_PST_CONFIG); ASSERT_TRUE(profile_collection != NULL); EXPECT_PROFILE_COUNT(profile_collection, 1); EXPECT_PROFILE(profile_collection, "foo"); EXPECT_PROPERTY_COUNT(profile_collection, "foo", 1); EXPECT_PROPERTY(profile_collection, "foo", "name", "value"); aws_profile_collection_destroy(profile_collection); return 0; } AWS_TEST_CASE(aws_profile_pound_comments_ignored_test, s_aws_profile_pound_comments_ignored_test); /* * ; comments are ignored */ AWS_STATIC_STRING_FROM_LITERAL( s_semicolon_comments_ignored_profile, "; Comment\n[profile foo] ; Comment\nname = value ; Comment with ; sign"); static int s_aws_profile_semicolon_comments_ignored_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_profile_collection *profile_collection = aws_prepare_profile_test(allocator, s_semicolon_comments_ignored_profile, AWS_PST_CONFIG); ASSERT_TRUE(profile_collection != NULL); EXPECT_PROFILE_COUNT(profile_collection, 1); EXPECT_PROFILE(profile_collection, "foo"); EXPECT_PROPERTY_COUNT(profile_collection, "foo", 1); EXPECT_PROPERTY(profile_collection, "foo", "name", "value"); aws_profile_collection_destroy(profile_collection); return 0; } AWS_TEST_CASE(aws_profile_semicolon_comments_ignored_test, s_aws_profile_semicolon_comments_ignored_test); /* * mixed comments are ignored */ AWS_STATIC_STRING_FROM_LITERAL( s_mixed_comments_ignored_profile, "# Comment\n[profile foo] ; Comment\nname = value # Comment with ; sign"); static int s_aws_profile_mixed_comments_ignored_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_profile_collection *profile_collection = aws_prepare_profile_test(allocator, s_mixed_comments_ignored_profile, AWS_PST_CONFIG); ASSERT_TRUE(profile_collection != NULL); EXPECT_PROFILE_COUNT(profile_collection, 1); EXPECT_PROFILE(profile_collection, "foo"); EXPECT_PROPERTY_COUNT(profile_collection, "foo", 1); EXPECT_PROPERTY(profile_collection, "foo", "name", "value"); aws_profile_collection_destroy(profile_collection); return 0; } AWS_TEST_CASE(aws_profile_mixed_comments_ignored_test, s_aws_profile_mixed_comments_ignored_test); /* * empty comments are ignored */ AWS_STATIC_STRING_FROM_LITERAL(s_empty_comments_ignored_profile, ";\n[profile foo];\nname = value ;\n"); static int s_aws_profile_empty_comments_ignored_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_profile_collection *profile_collection = aws_prepare_profile_test(allocator, s_empty_comments_ignored_profile, AWS_PST_CONFIG); ASSERT_TRUE(profile_collection != NULL); EXPECT_PROFILE_COUNT(profile_collection, 1); EXPECT_PROFILE(profile_collection, "foo"); EXPECT_PROPERTY_COUNT(profile_collection, "foo", 1); EXPECT_PROPERTY(profile_collection, "foo", "name", "value"); aws_profile_collection_destroy(profile_collection); return 0; } AWS_TEST_CASE(aws_profile_empty_comments_ignored_test, s_aws_profile_empty_comments_ignored_test); /* * comments can be adjacent to profile declaration */ AWS_STATIC_STRING_FROM_LITERAL( s_profile_adjacent_comment_profile, "[profile foo]; Adjacent semicolons\n[profile bar]# Adjacent pound signs"); static int s_aws_profile_profile_adjacent_comment_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_profile_collection *profile_collection = aws_prepare_profile_test(allocator, s_profile_adjacent_comment_profile, AWS_PST_CONFIG); ASSERT_TRUE(profile_collection != NULL); EXPECT_PROFILE_COUNT(profile_collection, 2); EXPECT_PROFILE(profile_collection, "foo"); EXPECT_PROPERTY_COUNT(profile_collection, "foo", 0); EXPECT_PROFILE(profile_collection, "bar"); EXPECT_PROPERTY_COUNT(profile_collection, "bar", 0); aws_profile_collection_destroy(profile_collection); return 0; } AWS_TEST_CASE(aws_profile_profile_adjacent_comment_test, s_aws_profile_profile_adjacent_comment_test); /* * comments adjacent to values are included in the value */ AWS_STATIC_STRING_FROM_LITERAL( s_value_adjacent_comment_profile, "[profile foo]\nname = value; Adjacent semicolons\nname2 = value# Adjacent pound signs"); static int s_aws_profile_value_adjacent_comment_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_profile_collection *profile_collection = aws_prepare_profile_test(allocator, s_value_adjacent_comment_profile, AWS_PST_CONFIG); ASSERT_TRUE(profile_collection != NULL); EXPECT_PROFILE_COUNT(profile_collection, 1); EXPECT_PROFILE(profile_collection, "foo"); EXPECT_PROPERTY_COUNT(profile_collection, "foo", 2); EXPECT_PROPERTY(profile_collection, "foo", "name", "value; Adjacent semicolons"); EXPECT_PROPERTY(profile_collection, "foo", "name2", "value# Adjacent pound signs"); aws_profile_collection_destroy(profile_collection); return 0; } AWS_TEST_CASE(aws_profile_value_adjacent_comment_test, s_aws_profile_value_adjacent_comment_test); /* * property values can be continued */ AWS_STATIC_STRING_FROM_LITERAL(s_continued_property_value_profile, "[profile foo]\nname = value\n -continued"); static int s_aws_profile_continued_property_value_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_profile_collection *profile_collection = aws_prepare_profile_test(allocator, s_continued_property_value_profile, AWS_PST_CONFIG); ASSERT_TRUE(profile_collection != NULL); EXPECT_PROFILE_COUNT(profile_collection, 1); EXPECT_PROFILE(profile_collection, "foo"); EXPECT_PROPERTY_COUNT(profile_collection, "foo", 1); EXPECT_PROPERTY(profile_collection, "foo", "name", "value\n-continued"); aws_profile_collection_destroy(profile_collection); return 0; } AWS_TEST_CASE(aws_profile_continued_property_value_test, s_aws_profile_continued_property_value_test); /* * property values can be continued across multiple lines */ AWS_STATIC_STRING_FROM_LITERAL( s_multiline_continued_property_value_profile, "[profile foo]\nname = value\n -continued\n -and-continued"); static int s_aws_profile_multiline_continued_property_value_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_profile_collection *profile_collection = aws_prepare_profile_test(allocator, s_multiline_continued_property_value_profile, AWS_PST_CONFIG); ASSERT_TRUE(profile_collection != NULL); EXPECT_PROFILE_COUNT(profile_collection, 1); EXPECT_PROFILE(profile_collection, "foo"); EXPECT_PROPERTY_COUNT(profile_collection, "foo", 1); EXPECT_PROPERTY(profile_collection, "foo", "name", "value\n-continued\n-and-continued"); aws_profile_collection_destroy(profile_collection); return 0; } AWS_TEST_CASE( aws_profile_multiline_continued_property_value_test, s_aws_profile_multiline_continued_property_value_test); /* * property value continuations get trimmed */ AWS_STATIC_STRING_FROM_LITERAL( s_continued_property_value_trim_profile, "[profile foo]\nname = value\n \t -continued \t "); static int s_aws_profile_continued_property_value_trim_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_profile_collection *profile_collection = aws_prepare_profile_test(allocator, s_continued_property_value_trim_profile, AWS_PST_CONFIG); ASSERT_TRUE(profile_collection != NULL); EXPECT_PROFILE_COUNT(profile_collection, 1); EXPECT_PROFILE(profile_collection, "foo"); EXPECT_PROPERTY_COUNT(profile_collection, "foo", 1); EXPECT_PROPERTY(profile_collection, "foo", "name", "value\n-continued"); aws_profile_collection_destroy(profile_collection); return 0; } AWS_TEST_CASE(aws_profile_continued_property_value_trim_test, s_aws_profile_continued_property_value_trim_test); /* * property value continuations include # comments */ AWS_STATIC_STRING_FROM_LITERAL( s_continued_property_value_pound_comment_profile, "[profile foo]\nname = value\n -continued # Comment"); static int s_aws_profile_continued_property_value_pound_comment_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_profile_collection *profile_collection = aws_prepare_profile_test(allocator, s_continued_property_value_pound_comment_profile, AWS_PST_CONFIG); ASSERT_TRUE(profile_collection != NULL); EXPECT_PROFILE_COUNT(profile_collection, 1); EXPECT_PROFILE(profile_collection, "foo"); EXPECT_PROPERTY_COUNT(profile_collection, "foo", 1); EXPECT_PROPERTY(profile_collection, "foo", "name", "value\n-continued # Comment"); aws_profile_collection_destroy(profile_collection); return 0; } AWS_TEST_CASE( aws_profile_continued_property_value_pound_comment_test, s_aws_profile_continued_property_value_pound_comment_test); /* * property value continuations include ; comments */ AWS_STATIC_STRING_FROM_LITERAL( s_continued_property_value_semicolon_comment_profile, "[profile foo]\nname = value\n -continued ; Comment"); static int s_aws_profile_continued_property_value_semicolon_comment_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_profile_collection *profile_collection = aws_prepare_profile_test(allocator, s_continued_property_value_semicolon_comment_profile, AWS_PST_CONFIG); ASSERT_TRUE(profile_collection != NULL); EXPECT_PROFILE_COUNT(profile_collection, 1); EXPECT_PROFILE(profile_collection, "foo"); EXPECT_PROPERTY_COUNT(profile_collection, "foo", 1); EXPECT_PROPERTY(profile_collection, "foo", "name", "value\n-continued ; Comment"); aws_profile_collection_destroy(profile_collection); return 0; } AWS_TEST_CASE( aws_profile_continued_property_value_semicolon_comment_test, s_aws_profile_continued_property_value_semicolon_comment_test); /* * duplicate profiles and sso-session merge properties */ AWS_STATIC_STRING_FROM_LITERAL( s_duplicate_profiles_merge_profile, "[profile foo]\nname = value\n[profile foo]\nname2 = value2\n[sso-session foo]\nname3 = value-3\n[sso-session " "foo]\nname3 = value3\nname4 = value4"); static int s_aws_profile_duplicate_profiles_merge_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_profile_collection *profile_collection = aws_prepare_profile_test(allocator, s_duplicate_profiles_merge_profile, AWS_PST_CONFIG); ASSERT_TRUE(profile_collection != NULL); EXPECT_PROFILE_COUNT(profile_collection, 1); EXPECT_PROFILE(profile_collection, "foo"); EXPECT_PROPERTY_COUNT(profile_collection, "foo", 2); EXPECT_PROPERTY(profile_collection, "foo", "name", "value"); EXPECT_PROPERTY(profile_collection, "foo", "name2", "value2"); EXPECT_SSO_SESSION_COUNT(profile_collection, 1); EXPECT_SSO_SESSION(profile_collection, "foo"); EXPECT_SSO_SESSION_PROPERTY_COUNT(profile_collection, "foo", 2); EXPECT_SSO_SESSION_PROPERTY(profile_collection, "foo", "name3", "value3"); EXPECT_SSO_SESSION_PROPERTY(profile_collection, "foo", "name4", "value4"); aws_profile_collection_destroy(profile_collection); return 0; } AWS_TEST_CASE(aws_profile_duplicate_profiles_merge_test, s_aws_profile_duplicate_profiles_merge_test); /* * duplicate properties in a single profile use the last property definition */ AWS_STATIC_STRING_FROM_LITERAL( s_duplicate_properties_last_property_value_profile, "[profile foo]\nname = value\nname = value2"); static int s_aws_profile_duplicate_properties_last_property_value_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_profile_collection *profile_collection = aws_prepare_profile_test(allocator, s_duplicate_properties_last_property_value_profile, AWS_PST_CONFIG); ASSERT_TRUE(profile_collection != NULL); EXPECT_PROFILE_COUNT(profile_collection, 1); EXPECT_PROFILE(profile_collection, "foo"); EXPECT_PROPERTY_COUNT(profile_collection, "foo", 1); EXPECT_PROPERTY(profile_collection, "foo", "name", "value2"); aws_profile_collection_destroy(profile_collection); return 0; } AWS_TEST_CASE( aws_profile_duplicate_properties_last_property_value_test, s_aws_profile_duplicate_properties_last_property_value_test); /* * duplicate profiles use the last property definition */ AWS_STATIC_STRING_FROM_LITERAL( s_duplicate_profiles_last_property_value_profile, "[profile foo]\nname = value\n[profile foo]\nname = value2"); static int s_aws_profile_duplicate_profiles_last_property_value_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_profile_collection *profile_collection = aws_prepare_profile_test(allocator, s_duplicate_profiles_last_property_value_profile, AWS_PST_CONFIG); ASSERT_TRUE(profile_collection != NULL); EXPECT_PROFILE_COUNT(profile_collection, 1); EXPECT_PROFILE(profile_collection, "foo"); EXPECT_PROPERTY_COUNT(profile_collection, "foo", 1); EXPECT_PROPERTY(profile_collection, "foo", "name", "value2"); aws_profile_collection_destroy(profile_collection); return 0; } AWS_TEST_CASE( aws_profile_duplicate_profiles_last_property_value_test, s_aws_profile_duplicate_profiles_last_property_value_test); /* * Default profile with profile prefix overrides default profile without prefix when profile prefix is first */ AWS_STATIC_STRING_FROM_LITERAL( s_duplicate_default_profiles_property_resolution1_profile, "[profile default]\nname = value\n[default]\nname2 = value2"); static int s_aws_profile_duplicate_default_profiles_property_resolution1_test( struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_profile_collection *profile_collection = aws_prepare_profile_test(allocator, s_duplicate_default_profiles_property_resolution1_profile, AWS_PST_CONFIG); ASSERT_TRUE(profile_collection != NULL); EXPECT_PROFILE_COUNT(profile_collection, 1); EXPECT_PROFILE(profile_collection, "default"); EXPECT_PROPERTY_COUNT(profile_collection, "default", 1); EXPECT_PROPERTY(profile_collection, "default", "name", "value"); aws_profile_collection_destroy(profile_collection); return 0; } AWS_TEST_CASE( aws_profile_duplicate_default_profiles_property_resolution1_test, s_aws_profile_duplicate_default_profiles_property_resolution1_test); /* * Default profile with profile prefix overrides default profile without prefix when profile prefix is last */ AWS_STATIC_STRING_FROM_LITERAL( s_duplicate_default_profiles_property_resolution2_profile, "[default]\nname2 = value2\n[profile default]\nname = value"); static int s_aws_profile_duplicate_default_profiles_property_resolution2_test( struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_profile_collection *profile_collection = aws_prepare_profile_test(allocator, s_duplicate_default_profiles_property_resolution2_profile, AWS_PST_CONFIG); ASSERT_TRUE(profile_collection != NULL); EXPECT_PROFILE_COUNT(profile_collection, 1); EXPECT_PROFILE(profile_collection, "default"); EXPECT_PROPERTY_COUNT(profile_collection, "default", 1); EXPECT_PROPERTY(profile_collection, "default", "name", "value"); aws_profile_collection_destroy(profile_collection); return 0; } AWS_TEST_CASE( aws_profile_duplicate_default_profiles_property_resolution2_test, s_aws_profile_duplicate_default_profiles_property_resolution2_test); /* * Invalid profile names are ignored */ AWS_STATIC_STRING_FROM_LITERAL(s_invalid_profile_names_config_profile, "[profile in valid]\nname = value"); AWS_STATIC_STRING_FROM_LITERAL(s_invalid_profile_names_credentials_profile, "[in valid 2]\nname2 = value2"); static int s_aws_profile_invalid_profile_names_merge_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_profile_collection *profile_collection = aws_prepare_merged_profile_test( allocator, s_invalid_profile_names_config_profile, s_invalid_profile_names_credentials_profile); ASSERT_TRUE(profile_collection != NULL); EXPECT_PROFILE_COUNT(profile_collection, 0); aws_profile_collection_destroy(profile_collection); return 0; } AWS_TEST_CASE(aws_profile_invalid_profile_names_merge_test, s_aws_profile_invalid_profile_names_merge_test); /* * Invalid property names are ignored */ AWS_STATIC_STRING_FROM_LITERAL(s_invalid_property_names_ignored_profile, "[profile foo]\nin valid = value"); static int s_aws_profile_invalid_property_names_ignored_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_profile_collection *profile_collection = aws_prepare_profile_test(allocator, s_invalid_property_names_ignored_profile, AWS_PST_CONFIG); ASSERT_TRUE(profile_collection != NULL); EXPECT_PROFILE_COUNT(profile_collection, 1); EXPECT_PROFILE(profile_collection, "foo"); EXPECT_PROPERTY_COUNT(profile_collection, "foo", 0); aws_profile_collection_destroy(profile_collection); return 0; } AWS_TEST_CASE(aws_profile_invalid_property_names_ignored_test, s_aws_profile_invalid_property_names_ignored_test); /* * All valid profile name characters are supported */ AWS_STATIC_STRING_FROM_LITERAL( s_all_valid_profile_characters_profile, "[profile ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_]"); static int s_aws_profile_all_valid_profile_characters_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_profile_collection *profile_collection = aws_prepare_profile_test(allocator, s_all_valid_profile_characters_profile, AWS_PST_CONFIG); ASSERT_TRUE(profile_collection != NULL); EXPECT_PROFILE_COUNT(profile_collection, 1); EXPECT_PROFILE(profile_collection, "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_"); EXPECT_PROPERTY_COUNT(profile_collection, "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_", 0); aws_profile_collection_destroy(profile_collection); return 0; } AWS_TEST_CASE(aws_profile_all_valid_profile_characters_test, s_aws_profile_all_valid_profile_characters_test); /* * All valid profile name characters are supported */ AWS_STATIC_STRING_FROM_LITERAL( s_all_valid_property_characters_profile, "[profile foo]\nABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_ = value"); static int s_aws_profile_all_valid_property_characters_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_profile_collection *profile_collection = aws_prepare_profile_test(allocator, s_all_valid_property_characters_profile, AWS_PST_CONFIG); ASSERT_TRUE(profile_collection != NULL); EXPECT_PROFILE_COUNT(profile_collection, 1); EXPECT_PROFILE(profile_collection, "foo"); EXPECT_PROPERTY_COUNT(profile_collection, "foo", 1); EXPECT_PROPERTY( profile_collection, "foo", "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_", "value"); aws_profile_collection_destroy(profile_collection); return 0; } AWS_TEST_CASE(aws_profile_all_valid_property_characters_test, s_aws_profile_all_valid_property_characters_test); /* * Properties can have sub properties */ AWS_STATIC_STRING_FROM_LITERAL(s_basic_sub_property_profile, "[profile foo]\ns3 =\n name = value"); static int s_aws_profile_basic_sub_property_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_profile_collection *profile_collection = aws_prepare_profile_test(allocator, s_basic_sub_property_profile, AWS_PST_CONFIG); ASSERT_TRUE(profile_collection != NULL); EXPECT_PROFILE_COUNT(profile_collection, 1); EXPECT_PROFILE(profile_collection, "foo"); EXPECT_PROPERTY_COUNT(profile_collection, "foo", 1); EXPECT_PROPERTY(profile_collection, "foo", "s3", "\nname = value"); EXPECT_SUB_PROPERTY_COUNT(profile_collection, "foo", "s3", 1); EXPECT_SUB_PROPERTY(profile_collection, "foo", "s3", "name", "value"); aws_profile_collection_destroy(profile_collection); return 0; } AWS_TEST_CASE(aws_profile_basic_sub_property_test, s_aws_profile_basic_sub_property_test); /* * Sub properties can have an empty value */ AWS_STATIC_STRING_FROM_LITERAL(s_empty_sub_property_profile, "[profile foo]\ns3 =\n name ="); static int s_aws_profile_empty_sub_property_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_profile_collection *profile_collection = aws_prepare_profile_test(allocator, s_empty_sub_property_profile, AWS_PST_CONFIG); ASSERT_TRUE(profile_collection != NULL); EXPECT_PROFILE_COUNT(profile_collection, 1); EXPECT_PROFILE(profile_collection, "foo"); EXPECT_PROPERTY_COUNT(profile_collection, "foo", 1); EXPECT_PROPERTY(profile_collection, "foo", "s3", "\nname ="); EXPECT_SUB_PROPERTY_COUNT(profile_collection, "foo", "s3", 1); EXPECT_SUB_PROPERTY(profile_collection, "foo", "s3", "name", ""); aws_profile_collection_destroy(profile_collection); return 0; } AWS_TEST_CASE(aws_profile_empty_sub_property_test, s_aws_profile_empty_sub_property_test); /* * An invalid subproperty name is not a fatal parse error */ AWS_STATIC_STRING_FROM_LITERAL(s_invalid_sub_property_name_profile, "[profile foo]\ns3 =\n in valid = value"); static int s_aws_profile_invalid_sub_property_name_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_profile_collection *profile_collection = aws_prepare_profile_test(allocator, s_invalid_sub_property_name_profile, AWS_PST_CONFIG); ASSERT_TRUE(profile_collection != NULL); EXPECT_PROFILE_COUNT(profile_collection, 1); EXPECT_PROFILE(profile_collection, "foo"); EXPECT_PROPERTY_COUNT(profile_collection, "foo", 1); EXPECT_PROPERTY(profile_collection, "foo", "s3", "\nin valid = value"); EXPECT_SUB_PROPERTY_COUNT(profile_collection, "foo", "s3", 0); aws_profile_collection_destroy(profile_collection); return 0; } AWS_TEST_CASE(aws_profile_invalid_sub_property_name_test, s_aws_profile_invalid_sub_property_name_test); /* * Sub properties can have blank lines that get ignored */ AWS_STATIC_STRING_FROM_LITERAL( s_sub_property_blank_line_profile, "[profile foo]\ns3 =\n name = value\n\t \n name2 = value2"); static int s_aws_profile_sub_property_blank_line_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_profile_collection *profile_collection = aws_prepare_profile_test(allocator, s_sub_property_blank_line_profile, AWS_PST_CONFIG); ASSERT_TRUE(profile_collection != NULL); EXPECT_PROFILE_COUNT(profile_collection, 1); EXPECT_PROFILE(profile_collection, "foo"); EXPECT_PROPERTY_COUNT(profile_collection, "foo", 1); EXPECT_PROPERTY(profile_collection, "foo", "s3", "\nname = value\nname2 = value2"); EXPECT_SUB_PROPERTY_COUNT(profile_collection, "foo", "s3", 2); EXPECT_SUB_PROPERTY(profile_collection, "foo", "s3", "name", "value"); EXPECT_SUB_PROPERTY(profile_collection, "foo", "s3", "name2", "value2"); aws_profile_collection_destroy(profile_collection); return 0; } AWS_TEST_CASE(aws_profile_sub_property_blank_line_test, s_aws_profile_sub_property_blank_line_test); /* * Profiles duplicated in multiple files are merged. */ AWS_STATIC_STRING_FROM_LITERAL(s_basic_duplicate_config_profile, "[profile foo]\nname = value"); AWS_STATIC_STRING_FROM_LITERAL(s_basic_duplicate_credentials_profile, "[foo]\nname2 = value2"); static int s_aws_profile_basic_duplicate_merge_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_profile_collection *profile_collection = aws_prepare_merged_profile_test( allocator, s_basic_duplicate_config_profile, s_basic_duplicate_credentials_profile); ASSERT_TRUE(profile_collection != NULL); EXPECT_PROFILE_COUNT(profile_collection, 1); EXPECT_PROFILE(profile_collection, "foo"); EXPECT_PROPERTY_COUNT(profile_collection, "foo", 2); EXPECT_PROPERTY(profile_collection, "foo", "name", "value"); EXPECT_PROPERTY(profile_collection, "foo", "name2", "value2"); aws_profile_collection_destroy(profile_collection); return 0; } AWS_TEST_CASE(aws_profile_basic_duplicate_merge_test, s_aws_profile_basic_duplicate_merge_test); /* * When merging default profile in config file, the one without the prefix gets ignored */ AWS_STATIC_STRING_FROM_LITERAL( s_mixed_prefix_default_profile, "[profile default]\nname = value\n[default]\nname2 = value2\n[profile default]\nname3 = value3"); static int s_aws_profile_mixed_prefix_default_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_profile_collection *profile_collection = aws_prepare_profile_test(allocator, s_mixed_prefix_default_profile, AWS_PST_CONFIG); ASSERT_TRUE(profile_collection != NULL); EXPECT_PROFILE_COUNT(profile_collection, 1); EXPECT_PROFILE(profile_collection, "default"); EXPECT_PROPERTY_COUNT(profile_collection, "default", 2); EXPECT_PROPERTY(profile_collection, "default", "name", "value"); EXPECT_PROPERTY(profile_collection, "default", "name3", "value3"); aws_profile_collection_destroy(profile_collection); return 0; } AWS_TEST_CASE(aws_profile_mixed_prefix_default_test, s_aws_profile_mixed_prefix_default_test); /* * Duplicate properties between files use credentials property */ AWS_STATIC_STRING_FROM_LITERAL(s_override_duplicate_config_profile, "[profile foo]\nname = value"); AWS_STATIC_STRING_FROM_LITERAL(s_override_duplicate_credentials_profile, "[foo]\nname = value2"); static int s_aws_profile_override_duplicate_merge_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_profile_collection *profile_collection = aws_prepare_merged_profile_test( allocator, s_override_duplicate_config_profile, s_override_duplicate_credentials_profile); ASSERT_TRUE(profile_collection != NULL); EXPECT_PROFILE_COUNT(profile_collection, 1); EXPECT_PROFILE(profile_collection, "foo"); EXPECT_PROPERTY_COUNT(profile_collection, "foo", 1); EXPECT_PROPERTY(profile_collection, "foo", "name", "value2"); aws_profile_collection_destroy(profile_collection); return 0; } AWS_TEST_CASE(aws_profile_override_duplicate_merge_test, s_aws_profile_override_duplicate_merge_test); /* * Non-default config profiles without prefix are ignored */ AWS_STATIC_STRING_FROM_LITERAL(s_no_prefix_nondefault_profile, "[foo]\nname = value"); static int s_aws_profile_no_prefix_nondefault_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_profile_collection *profile_collection = aws_prepare_profile_test(allocator, s_no_prefix_nondefault_profile, AWS_PST_CONFIG); ASSERT_TRUE(profile_collection != NULL); EXPECT_PROFILE_COUNT(profile_collection, 0); aws_profile_collection_destroy(profile_collection); return 0; } AWS_TEST_CASE(aws_profile_no_prefix_nondefault_test, s_aws_profile_no_prefix_nondefault_test); /* * Credentials profiles with prefix are ignored */ AWS_STATIC_STRING_FROM_LITERAL(s_prefix_credentials_profile, "[profile foo]\nname = value"); static int s_aws_profile_prefix_credentials_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_profile_collection *profile_collection = aws_prepare_profile_test(allocator, s_prefix_credentials_profile, AWS_PST_CREDENTIALS); ASSERT_TRUE(profile_collection != NULL); EXPECT_PROFILE_COUNT(profile_collection, 0); aws_profile_collection_destroy(profile_collection); return 0; } AWS_TEST_CASE(aws_profile_prefix_credentials_test, s_aws_profile_prefix_credentials_test); AWS_STATIC_STRING_FROM_LITERAL(s_config_override_path, "/tmp/.aws/config"); #ifdef _WIN32 AWS_STATIC_STRING_FROM_LITERAL(s_config_override_path_result, "\\tmp\\.aws\\config"); #else AWS_STATIC_STRING_FROM_LITERAL(s_config_override_path_result, "/tmp/.aws/config"); #endif /* _WIN32 */ static int s_config_file_path_override_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor override_cursor = aws_byte_cursor_from_string(s_config_override_path); struct aws_string *path = aws_get_config_file_path(allocator, &override_cursor); ASSERT_TRUE(aws_string_compare(path, s_config_override_path_result) == 0); aws_string_destroy(path); return 0; } AWS_TEST_CASE(config_file_path_override_test, s_config_file_path_override_test); AWS_STATIC_STRING_FROM_LITERAL(s_config_env_var, "AWS_CONFIG_FILE"); static int s_config_file_path_environment_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_set_environment_value(s_config_env_var, s_config_override_path); struct aws_string *path = aws_get_config_file_path(allocator, NULL); ASSERT_TRUE(aws_string_compare(path, s_config_override_path_result) == 0); aws_string_destroy(path); return 0; } AWS_TEST_CASE(config_file_path_environment_test, s_config_file_path_environment_test); AWS_STATIC_STRING_FROM_LITERAL(s_credentials_override_path, "/tmp/.aws/credentials"); #ifdef _WIN32 AWS_STATIC_STRING_FROM_LITERAL(s_credentials_override_path_result, "\\tmp\\.aws\\credentials"); #else AWS_STATIC_STRING_FROM_LITERAL(s_credentials_override_path_result, "/tmp/.aws/credentials"); #endif /* _WIN32 */ static int s_credentials_file_path_override_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_cursor override_cursor = aws_byte_cursor_from_string(s_credentials_override_path); struct aws_string *path = aws_get_credentials_file_path(allocator, &override_cursor); ASSERT_TRUE(aws_string_compare(path, s_credentials_override_path_result) == 0); aws_string_destroy(path); return 0; } AWS_TEST_CASE(credentials_file_path_override_test, s_credentials_file_path_override_test); AWS_STATIC_STRING_FROM_LITERAL(s_credentials_env_var, "AWS_SHARED_CREDENTIALS_FILE"); static int s_credentials_file_path_environment_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_set_environment_value(s_credentials_env_var, s_credentials_override_path); struct aws_string *path = aws_get_credentials_file_path(allocator, NULL); ASSERT_TRUE(aws_string_compare(path, s_credentials_override_path_result) == 0); aws_string_destroy(path); return 0; } AWS_TEST_CASE(credentials_file_path_environment_test, s_credentials_file_path_environment_test); AWS_STATIC_STRING_FROM_LITERAL(s_profile_env_var, "AWS_PROFILE"); AWS_STATIC_STRING_FROM_LITERAL(s_profile_override, "NotTheDefault"); static int s_profile_override_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; /* The envrionment value should only override the default when user not set one */ aws_set_environment_value(s_profile_env_var, s_profile_override); struct aws_byte_cursor override_cursor = aws_byte_cursor_from_string(s_profile_override); struct aws_string *profile_name = aws_get_profile_name(allocator, &override_cursor); ASSERT_TRUE(aws_string_compare(profile_name, s_profile_override) == 0); aws_string_destroy(profile_name); return 0; } AWS_TEST_CASE(profile_override_test, s_profile_override_test); static int s_profile_environment_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_set_environment_value(s_profile_env_var, s_profile_override); struct aws_string *profile_name = aws_get_profile_name(allocator, NULL); ASSERT_TRUE(aws_string_compare(profile_name, s_profile_override) == 0); aws_string_destroy(profile_name); return 0; } AWS_TEST_CASE(profile_environment_test, s_profile_environment_test); aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/tests/endpoints_regex_tests.c000066400000000000000000000136201456575232400270210ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include AWS_TEST_CASE(endpoints_regex_aws_region_matches, s_test_aws_region_matches) static int s_test_aws_region_matches(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_endpoints_regex *regex = aws_endpoints_regex_new(allocator, aws_byte_cursor_from_c_str("^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$")); ASSERT_NOT_NULL(regex); ASSERT_SUCCESS(aws_endpoints_regex_match(regex, aws_byte_cursor_from_c_str("us-west-2"))); ASSERT_SUCCESS(aws_endpoints_regex_match(regex, aws_byte_cursor_from_c_str("eu-west-3"))); ASSERT_SUCCESS(aws_endpoints_regex_match(regex, aws_byte_cursor_from_c_str("ap-east-1"))); ASSERT_SUCCESS(aws_endpoints_regex_match(regex, aws_byte_cursor_from_c_str("sa-east-1"))); ASSERT_SUCCESS(aws_endpoints_regex_match(regex, aws_byte_cursor_from_c_str("ca-central-1"))); ASSERT_SUCCESS(aws_endpoints_regex_match(regex, aws_byte_cursor_from_c_str("me-central-1"))); ASSERT_SUCCESS(aws_endpoints_regex_match(regex, aws_byte_cursor_from_c_str("af-south-1"))); ASSERT_SUCCESS(aws_endpoints_regex_match(regex, aws_byte_cursor_from_c_str("il-central-1"))); ASSERT_ERROR( AWS_ERROR_SDKUTILS_ENDPOINTS_REGEX_NO_MATCH, aws_endpoints_regex_match(regex, aws_byte_cursor_from_c_str("us-west"))); ASSERT_ERROR( AWS_ERROR_SDKUTILS_ENDPOINTS_REGEX_NO_MATCH, aws_endpoints_regex_match(regex, aws_byte_cursor_from_c_str("uk-west-2"))); ASSERT_ERROR( AWS_ERROR_SDKUTILS_ENDPOINTS_REGEX_NO_MATCH, aws_endpoints_regex_match(regex, aws_byte_cursor_from_c_str("us-w1st-2"))); aws_endpoints_regex_destroy(regex); return AWS_OP_SUCCESS; } AWS_TEST_CASE(endpoints_regex_iso_region_matches, s_test_iso_region_matches) static int s_test_iso_region_matches(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_endpoints_regex *regex = aws_endpoints_regex_new(allocator, aws_byte_cursor_from_c_str("^us\\-isob\\-\\w+\\-\\d+$")); ASSERT_NOT_NULL(regex); ASSERT_SUCCESS(aws_endpoints_regex_match(regex, aws_byte_cursor_from_c_str("us-isob-east-1"))); ASSERT_ERROR( AWS_ERROR_SDKUTILS_ENDPOINTS_REGEX_NO_MATCH, aws_endpoints_regex_match(regex, aws_byte_cursor_from_c_str("us-west-2"))); ASSERT_ERROR( AWS_ERROR_SDKUTILS_ENDPOINTS_REGEX_NO_MATCH, aws_endpoints_regex_match(regex, aws_byte_cursor_from_c_str("uk-isob-east-1"))); ASSERT_ERROR( AWS_ERROR_SDKUTILS_ENDPOINTS_REGEX_NO_MATCH, aws_endpoints_regex_match(regex, aws_byte_cursor_from_c_str("us-i1sob-east-1"))); ASSERT_ERROR( AWS_ERROR_SDKUTILS_ENDPOINTS_REGEX_NO_MATCH, aws_endpoints_regex_match(regex, aws_byte_cursor_from_c_str("us-isob-e1ast-1"))); aws_endpoints_regex_destroy(regex); return AWS_OP_SUCCESS; } AWS_TEST_CASE(endpoints_regex_misc_validation, s_test_misc_regex_validation) static int s_test_misc_regex_validation(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_endpoints_regex *regex = aws_endpoints_regex_new(allocator, aws_byte_cursor_from_c_str("^us\\-(^ba)\\-\\w+\\-\\d+$")); ASSERT_NULL(regex); ASSERT_INT_EQUALS(AWS_ERROR_SDKUTILS_ENDPOINTS_UNSUPPORTED_REGEX, aws_last_error()); regex = aws_endpoints_regex_new(allocator, aws_byte_cursor_from_c_str("")); ASSERT_NULL(regex); ASSERT_INT_EQUALS(AWS_ERROR_INVALID_ARGUMENT, aws_last_error()); regex = aws_endpoints_regex_new( allocator, aws_byte_cursor_from_c_str("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")); ASSERT_NULL(regex); ASSERT_INT_EQUALS(AWS_ERROR_INVALID_ARGUMENT, aws_last_error()); regex = aws_endpoints_regex_new(allocator, aws_byte_cursor_from_c_str("aaaaa")); ASSERT_NULL(regex); ASSERT_INT_EQUALS(AWS_ERROR_SDKUTILS_ENDPOINTS_UNSUPPORTED_REGEX, aws_last_error()); regex = aws_endpoints_regex_new(allocator, aws_byte_cursor_from_c_str("^aaa(aa$")); ASSERT_NULL(regex); ASSERT_INT_EQUALS(AWS_ERROR_INVALID_ARGUMENT, aws_last_error()); regex = aws_endpoints_regex_new(allocator, aws_byte_cursor_from_c_str("^aaaaa($")); ASSERT_NULL(regex); ASSERT_INT_EQUALS(AWS_ERROR_INVALID_ARGUMENT, aws_last_error()); regex = aws_endpoints_regex_new(allocator, aws_byte_cursor_from_c_str("^aaa()aa$")); ASSERT_NULL(regex); ASSERT_INT_EQUALS(AWS_ERROR_INVALID_ARGUMENT, aws_last_error()); regex = aws_endpoints_regex_new(allocator, aws_byte_cursor_from_c_str("^aaa*aa$")); ASSERT_NULL(regex); ASSERT_INT_EQUALS(AWS_ERROR_SDKUTILS_ENDPOINTS_UNSUPPORTED_REGEX, aws_last_error()); regex = aws_endpoints_regex_new(allocator, aws_byte_cursor_from_c_str("^aaa+aa$")); ASSERT_NULL(regex); ASSERT_INT_EQUALS(AWS_ERROR_SDKUTILS_ENDPOINTS_UNSUPPORTED_REGEX, aws_last_error()); regex = aws_endpoints_regex_new(allocator, aws_byte_cursor_from_c_str("^aaa(a|ab)aa$")); ASSERT_NULL(regex); ASSERT_INT_EQUALS(AWS_ERROR_SDKUTILS_ENDPOINTS_UNSUPPORTED_REGEX, aws_last_error()); regex = aws_endpoints_regex_new(allocator, aws_byte_cursor_from_c_str("^aaa(a||b)aa$")); ASSERT_NULL(regex); ASSERT_INT_EQUALS(AWS_ERROR_INVALID_ARGUMENT, aws_last_error()); regex = aws_endpoints_regex_new(allocator, aws_byte_cursor_from_c_str("^aaa*+aa$")); ASSERT_NULL(regex); ASSERT_INT_EQUALS(AWS_ERROR_SDKUTILS_ENDPOINTS_UNSUPPORTED_REGEX, aws_last_error()); regex = aws_endpoints_regex_new(allocator, aws_byte_cursor_from_c_str("^aaaaa$")); ASSERT_NOT_NULL(regex); ASSERT_ERROR( AWS_ERROR_INVALID_ARGUMENT, aws_endpoints_regex_match( regex, aws_byte_cursor_from_c_str("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"))); aws_endpoints_regex_destroy(regex); return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/tests/endpoints_rule_engine_tests.c000066400000000000000000000464051456575232400302120ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include #include #include #include #include #include #include static int read_file_contents( struct aws_byte_buf *out_buf, struct aws_allocator *alloc, const struct aws_byte_cursor filename_cur) { AWS_ZERO_STRUCT(*out_buf); struct aws_string *mode = aws_string_new_from_c_str(alloc, "r"); struct aws_string *filename = aws_string_new_from_cursor(alloc, &filename_cur); FILE *fp = aws_fopen_safe(filename, mode); aws_string_destroy(filename); aws_string_destroy(mode); ASSERT_NOT_NULL(fp); int64_t file_size = 0; ASSERT_SUCCESS(aws_file_get_length(fp, &file_size)); ASSERT_SUCCESS(aws_byte_buf_init(out_buf, alloc, (size_t)file_size)); size_t read = fread(out_buf->buffer, 1, (size_t)file_size, fp); fclose(fp); /* TODO: On win size read seems to be smaller than what get length returns, but its still a valid json*/ /* ASSERT_INT_EQUALS(file_size, read); */ out_buf->len = read; return AWS_OP_SUCCESS; } AWS_TEST_CASE(parse_ruleset_from_string, s_test_parse_ruleset_from_string) static int s_test_parse_ruleset_from_string(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_sdkutils_library_init(allocator); struct aws_byte_buf buf; ASSERT_SUCCESS(read_file_contents(&buf, allocator, aws_byte_cursor_from_c_str("sample_ruleset.json"))); struct aws_byte_cursor ruleset_json = aws_byte_cursor_from_buf(&buf); clock_t begin = clock(); struct aws_endpoints_ruleset *ruleset = aws_endpoints_ruleset_new_from_string(allocator, ruleset_json); clock_t end = clock(); double time_taken = (((double)(end - begin)) / CLOCKS_PER_SEC); AWS_LOGF_INFO(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Parsed in(s): %f", time_taken); ASSERT_NOT_NULL(ruleset); struct aws_byte_buf partitions_buf; ASSERT_SUCCESS( read_file_contents(&partitions_buf, allocator, aws_byte_cursor_from_c_str("sample_partitions.json"))); struct aws_byte_cursor partitions_json = aws_byte_cursor_from_buf(&partitions_buf); struct aws_partitions_config *partitions = aws_partitions_config_new_from_string(allocator, partitions_json); ASSERT_NOT_NULL(partitions); const struct aws_hash_table *parameters = aws_endpoints_ruleset_get_parameters(ruleset); struct aws_byte_cursor param_name_cur = aws_byte_cursor_from_c_str("Region"); struct aws_hash_element *element = NULL; aws_hash_table_find(parameters, ¶m_name_cur, &element); ASSERT_NOT_NULL(element); struct aws_byte_cursor built_in = aws_endpoints_parameter_get_built_in((struct aws_endpoints_parameter *)element->value); ASSERT_CURSOR_VALUE_CSTRING_EQUALS(built_in, "AWS::Region"); struct aws_endpoints_rule_engine *engine = aws_endpoints_rule_engine_new(allocator, ruleset, partitions); struct aws_endpoints_request_context *context = aws_endpoints_request_context_new(allocator); ASSERT_SUCCESS(aws_endpoints_request_context_add_string( allocator, context, aws_byte_cursor_from_c_str("Region"), aws_byte_cursor_from_c_str("us-west-2"))); struct aws_endpoints_resolved_endpoint *resolved_endpoint = NULL; clock_t begin_resolve = clock(); ASSERT_SUCCESS(aws_endpoints_rule_engine_resolve(engine, context, &resolved_endpoint)); clock_t end_resolve = clock(); double time_taken_resolve = (((double)(end_resolve - begin_resolve)) / CLOCKS_PER_SEC); AWS_LOGF_INFO(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Resolved in(s): %f", time_taken_resolve); ASSERT_INT_EQUALS(AWS_ENDPOINTS_RESOLVED_ENDPOINT, aws_endpoints_resolved_endpoint_get_type(resolved_endpoint)); struct aws_byte_cursor url_cur; ASSERT_SUCCESS(aws_endpoints_resolved_endpoint_get_url(resolved_endpoint, &url_cur)); ASSERT_CURSOR_VALUE_CSTRING_EQUALS(url_cur, "https://example.us-west-2.amazonaws.com"); aws_endpoints_ruleset_release(ruleset); aws_partitions_config_release(partitions); aws_endpoints_rule_engine_release(engine); aws_endpoints_resolved_endpoint_release(resolved_endpoint); aws_endpoints_request_context_release(context); aws_byte_buf_clean_up(&buf); aws_byte_buf_clean_up(&partitions_buf); aws_sdkutils_library_clean_up(); return AWS_OP_SUCCESS; } struct iteration_wrapper { struct aws_allocator *allocator; struct aws_endpoints_request_context *context; }; static int s_on_parameter_key( const struct aws_byte_cursor *key, const struct aws_json_value *value, bool *out_should_continue, void *user_data) { (void)out_should_continue; struct iteration_wrapper *wrapper = user_data; if (aws_json_value_is_string(value)) { struct aws_byte_cursor cur; if (aws_json_value_get_string(value, &cur) || aws_endpoints_request_context_add_string(wrapper->allocator, wrapper->context, *key, cur)) { goto on_error; } return AWS_OP_SUCCESS; } else if (aws_json_value_is_boolean(value)) { bool b; if (aws_json_value_get_boolean(value, &b) || aws_endpoints_request_context_add_boolean(wrapper->allocator, wrapper->context, *key, b)) { goto on_error; } return AWS_OP_SUCCESS; } else { goto on_error; } on_error: return AWS_OP_ERR; } struct headers_wrapper { struct aws_allocator *allocator; const struct aws_hash_table *headers; }; static int s_on_header_key( const struct aws_byte_cursor *key, const struct aws_json_value *value, bool *out_should_continue, void *user_data) { (void)out_should_continue; struct headers_wrapper *wrapper = user_data; struct aws_string *key_string = aws_string_new_from_cursor(wrapper->allocator, key); struct aws_hash_element *element = NULL; ASSERT_SUCCESS(aws_hash_table_find(wrapper->headers, key_string, &element)); ASSERT_NOT_NULL(element); struct aws_array_list *header_values = element->value; ASSERT_NOT_NULL(header_values); ASSERT_INT_EQUALS(aws_json_get_array_size(value), aws_array_list_length(header_values)); for (size_t i = 0; i < aws_json_get_array_size(value); ++i) { struct aws_json_value *val = aws_json_get_array_element(value, i); struct aws_byte_cursor cur; ASSERT_SUCCESS(aws_json_value_get_string(val, &cur)); bool found_match = false; for (size_t j = 0; j < aws_array_list_length(header_values); ++j) { struct aws_string *header_val = NULL; ASSERT_SUCCESS(aws_array_list_get_at(header_values, &header_val, j)); if (aws_string_eq_byte_cursor(header_val, &cur)) { found_match = true; break; } } ASSERT_TRUE(found_match); } aws_string_destroy(key_string); return AWS_OP_SUCCESS; } static int eval_expected(struct aws_allocator *allocator, struct aws_byte_cursor file_name) { aws_sdkutils_library_init(allocator); struct aws_byte_buf ruleset_file_path; ASSERT_SUCCESS( aws_byte_buf_init_copy_from_cursor(&ruleset_file_path, allocator, aws_byte_cursor_from_c_str("valid-rules/"))); ASSERT_SUCCESS(aws_byte_buf_append_dynamic(&ruleset_file_path, &file_name)); struct aws_byte_buf test_cases_file_path; ASSERT_SUCCESS(aws_byte_buf_init_copy_from_cursor( &test_cases_file_path, allocator, aws_byte_cursor_from_c_str("test-cases/"))); ASSERT_SUCCESS(aws_byte_buf_append_dynamic(&test_cases_file_path, &file_name)); struct aws_byte_buf ruleset_buf; ASSERT_SUCCESS(read_file_contents(&ruleset_buf, allocator, aws_byte_cursor_from_buf(&ruleset_file_path))); struct aws_byte_cursor ruleset_json = aws_byte_cursor_from_buf(&ruleset_buf); clock_t begin = clock(); struct aws_endpoints_ruleset *ruleset = aws_endpoints_ruleset_new_from_string(allocator, ruleset_json); clock_t end = clock(); double time_taken = (((double)(end - begin)) / CLOCKS_PER_SEC); AWS_LOGF_INFO(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Parsed in(s): %f", time_taken); ASSERT_NOT_NULL(ruleset); struct aws_byte_buf partitions_buf; ASSERT_SUCCESS(read_file_contents(&partitions_buf, allocator, aws_byte_cursor_from_c_str("partitions.json"))); struct aws_byte_cursor partitions_json = aws_byte_cursor_from_buf(&partitions_buf); struct aws_partitions_config *partitions = aws_partitions_config_new_from_string(allocator, partitions_json); struct aws_endpoints_rule_engine *engine = aws_endpoints_rule_engine_new(allocator, ruleset, partitions); struct aws_byte_buf test_cases_buf; if (read_file_contents(&test_cases_buf, allocator, aws_byte_cursor_from_buf(&test_cases_file_path))) { AWS_LOGF_INFO( AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Ruleset has no associated test cases: " PRInSTR, AWS_BYTE_CURSOR_PRI(file_name)); goto skip_test_cases; } struct aws_byte_cursor test_cases_json = aws_byte_cursor_from_buf(&test_cases_buf); struct aws_json_value *test_cases = aws_json_value_new_from_string(allocator, test_cases_json); struct aws_json_value *tests = aws_json_value_get_from_object(test_cases, aws_byte_cursor_from_c_str("testCases")); for (size_t i = 0; i < aws_json_get_array_size(tests); ++i) { struct aws_endpoints_request_context *context = aws_endpoints_request_context_new(allocator); struct aws_json_value *test = aws_json_get_array_element(tests, i); struct aws_byte_cursor documentation; struct aws_json_value *doc_json = aws_json_value_get_from_object(test, aws_byte_cursor_from_c_str("documentation")); ASSERT_SUCCESS(aws_json_value_get_string(doc_json, &documentation)); AWS_LOGF_INFO(0, "Running test case #%zu: " PRInSTR, i, AWS_BYTE_CURSOR_PRI(documentation)); struct aws_json_value *params = aws_json_value_get_from_object(test, aws_byte_cursor_from_c_str("params")); struct iteration_wrapper wrapper = {.allocator = allocator, .context = context}; ASSERT_SUCCESS(aws_json_const_iterate_object(params, s_on_parameter_key, &wrapper)); struct aws_endpoints_resolved_endpoint *resolved_endpoint = NULL; clock_t begin_resolve = clock(); ASSERT_SUCCESS(aws_endpoints_rule_engine_resolve(engine, context, &resolved_endpoint)); clock_t end_resolve = clock(); double time_taken_resolve = (((double)(end_resolve - begin_resolve)) / CLOCKS_PER_SEC); AWS_LOGF_INFO(0, "Resolved in(s): %f", time_taken_resolve); struct aws_json_value *expect = aws_json_value_get_from_object(test, aws_byte_cursor_from_c_str("expect")); struct aws_json_value *endpoint = aws_json_value_get_from_object(expect, aws_byte_cursor_from_c_str("endpoint")); if (endpoint != NULL) { ASSERT_INT_EQUALS( AWS_ENDPOINTS_RESOLVED_ENDPOINT, aws_endpoints_resolved_endpoint_get_type(resolved_endpoint)); struct aws_byte_cursor url; ASSERT_SUCCESS(aws_endpoints_resolved_endpoint_get_url(resolved_endpoint, &url)); struct aws_json_value *expected_url_node = aws_json_value_get_from_object(endpoint, aws_byte_cursor_from_c_str("url")); struct aws_byte_cursor expected_url; aws_json_value_get_string(expected_url_node, &expected_url); AWS_LOGF_DEBUG(0, PRInSTR " " PRInSTR, AWS_BYTE_CURSOR_PRI(url), AWS_BYTE_CURSOR_PRI(expected_url)); ASSERT_TRUE(aws_byte_cursor_eq(&url, &expected_url)); struct aws_byte_cursor properties; ASSERT_SUCCESS(aws_endpoints_resolved_endpoint_get_properties(resolved_endpoint, &properties)); struct aws_json_value *properties_json = aws_json_value_new_from_string(allocator, properties); struct aws_json_value *expected_properties = aws_json_value_get_from_object(endpoint, aws_byte_cursor_from_c_str("properties")); ASSERT_TRUE(expected_properties == NULL ? properties.len == 0 : properties.len > 0); if (expected_properties != NULL) { ASSERT_TRUE(aws_json_value_compare(properties_json, expected_properties, false)); } aws_json_value_destroy(properties_json); const struct aws_hash_table *headers; ASSERT_SUCCESS(aws_endpoints_resolved_endpoint_get_headers(resolved_endpoint, &headers)); struct aws_json_value *expected_headers_node = aws_json_value_get_from_object(endpoint, aws_byte_cursor_from_c_str("headers")); if (expected_headers_node) { struct headers_wrapper headers_wrapper = {.allocator = allocator, .headers = headers}; ASSERT_SUCCESS(aws_json_const_iterate_object(expected_headers_node, s_on_header_key, &headers_wrapper)); } } struct aws_json_value *error_node = aws_json_value_get_from_object(expect, aws_byte_cursor_from_c_str("error")); if (error_node != NULL) { ASSERT_INT_EQUALS( AWS_ENDPOINTS_RESOLVED_ERROR, aws_endpoints_resolved_endpoint_get_type(resolved_endpoint)); struct aws_byte_cursor error; ASSERT_SUCCESS(aws_endpoints_resolved_endpoint_get_error(resolved_endpoint, &error)); struct aws_byte_cursor expected_error; ASSERT_SUCCESS(aws_json_value_get_string(error_node, &expected_error)); ASSERT_TRUE(aws_byte_cursor_eq(&error, &expected_error)); } aws_endpoints_resolved_endpoint_release(resolved_endpoint); aws_endpoints_request_context_release(context); } aws_json_value_destroy(test_cases); aws_byte_buf_clean_up(&test_cases_buf); skip_test_cases: aws_endpoints_ruleset_release(ruleset); aws_partitions_config_release(partitions); aws_endpoints_rule_engine_release(engine); aws_byte_buf_clean_up(&ruleset_file_path); aws_byte_buf_clean_up(&ruleset_buf); aws_byte_buf_clean_up(&partitions_buf); aws_byte_buf_clean_up(&test_cases_file_path); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_endpoints_aws_region, s_test_endpoints_aws_region) static int s_test_endpoints_aws_region(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(eval_expected(allocator, aws_byte_cursor_from_c_str("aws-region.json"))); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_endpoints_default_values, s_test_endpoints_default_values) static int s_test_endpoints_default_values(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(eval_expected(allocator, aws_byte_cursor_from_c_str("default-values.json"))); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_endpoints_eventbridge, s_test_endpoints_eventbridge) static int s_test_endpoints_eventbridge(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(eval_expected(allocator, aws_byte_cursor_from_c_str("eventbridge.json"))); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_endpoints_fns, s_test_endpoints_fns) static int s_test_endpoints_fns(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(eval_expected(allocator, aws_byte_cursor_from_c_str("fns.json"))); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_endpoints_get_attr_type_inference, s_test_endpoints_get_attr_type_inference) static int s_test_endpoints_get_attr_type_inference(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(eval_expected(allocator, aws_byte_cursor_from_c_str("get-attr-type-inference.json"))); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_endpoints_headers, s_test_endpoints_headers) static int s_test_endpoints_headers(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(eval_expected(allocator, aws_byte_cursor_from_c_str("headers.json"))); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_endpoints_is_virtual_hostable_s3_bucket, s_test_endpoints_is_virtual_hostable_s3_bucket) static int s_test_endpoints_is_virtual_hostable_s3_bucket(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(eval_expected(allocator, aws_byte_cursor_from_c_str("is-virtual-hostable-s3-bucket.json"))); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_endpoints_region_override, s_test_endpoints_region_override) static int s_test_endpoints_region_override(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(eval_expected(allocator, aws_byte_cursor_from_c_str("region-override.json"))); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_endpoints_minimal_ruleset, s_test_endpoints_minimal_ruleset) static int s_test_endpoints_minimal_ruleset(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(eval_expected(allocator, aws_byte_cursor_from_c_str("minimal-ruleset.json"))); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_endpoints_parse_arn, s_test_endpoints_parse_arn) static int s_test_endpoints_parse_arn(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(eval_expected(allocator, aws_byte_cursor_from_c_str("parse-arn.json"))); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_endpoints_parse_url, s_test_endpoints_parse_url) static int s_test_endpoints_parse_url(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(eval_expected(allocator, aws_byte_cursor_from_c_str("parse-url.json"))); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_endpoints_partition_fn, s_test_endpoints_partition_fn) static int s_test_endpoints_partition_fn(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(eval_expected(allocator, aws_byte_cursor_from_c_str("partition-fn.json"))); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_endpoints_substring, s_test_endpoints_substring) static int s_test_endpoints_substring(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(eval_expected(allocator, aws_byte_cursor_from_c_str("substring.json"))); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_endpoints_uri_encode, s_test_endpoints_uri_encode) static int s_test_endpoints_uri_encode(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(eval_expected(allocator, aws_byte_cursor_from_c_str("uri-encode.json"))); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_endpoints_valid_hostlabel, s_test_endpoints_valid_hostlabel) static int s_test_endpoints_valid_hostlabel(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(eval_expected(allocator, aws_byte_cursor_from_c_str("valid-hostlabel.json"))); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_endpoints_condition_mem_clean_up, s_test_condition_mem_clean_up) static int s_test_condition_mem_clean_up(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(eval_expected(allocator, aws_byte_cursor_from_c_str("custom_object_condition.json"))); return AWS_OP_SUCCESS; } AWS_TEST_CASE(test_endpoints_custom, s_test_endpoints_custom) static int s_test_endpoints_custom(struct aws_allocator *allocator, void *ctx) { (void)ctx; ASSERT_SUCCESS(eval_expected(allocator, aws_byte_cursor_from_c_str("custom_partition.json"))); return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/tests/endpoints_util_tests.c000066400000000000000000000157021456575232400266670ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include #include AWS_TEST_CASE(endpoints_eval_util_is_ipv4, s_test_is_ipv4) static int s_test_is_ipv4(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; ASSERT_TRUE(aws_is_ipv4(aws_byte_cursor_from_c_str("0.0.0.0"))); ASSERT_TRUE(aws_is_ipv4(aws_byte_cursor_from_c_str("127.0.0.1"))); ASSERT_TRUE(aws_is_ipv4(aws_byte_cursor_from_c_str("255.255.255.255"))); ASSERT_TRUE(aws_is_ipv4(aws_byte_cursor_from_c_str("192.168.1.1"))); ASSERT_FALSE(aws_is_ipv4(aws_byte_cursor_from_c_str("256.0.0.1"))); ASSERT_FALSE(aws_is_ipv4(aws_byte_cursor_from_c_str("127.0.0"))); ASSERT_FALSE(aws_is_ipv4(aws_byte_cursor_from_c_str("127.0"))); ASSERT_FALSE(aws_is_ipv4(aws_byte_cursor_from_c_str("127"))); ASSERT_FALSE(aws_is_ipv4(aws_byte_cursor_from_c_str(""))); ASSERT_FALSE(aws_is_ipv4(aws_byte_cursor_from_c_str("foo.com"))); ASSERT_FALSE(aws_is_ipv4(aws_byte_cursor_from_c_str("a.b.c.d"))); ASSERT_FALSE(aws_is_ipv4(aws_byte_cursor_from_c_str("a127.0.0.1"))); ASSERT_FALSE(aws_is_ipv4(aws_byte_cursor_from_c_str("127.0.0.1a"))); ASSERT_FALSE(aws_is_ipv4(aws_byte_cursor_from_c_str("127.0.0.1011"))); return AWS_OP_SUCCESS; } AWS_TEST_CASE(endpoints_eval_util_is_ipv6, s_test_is_ipv6) static int s_test_is_ipv6(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; ASSERT_TRUE(aws_is_ipv6(aws_byte_cursor_from_c_str("0:0:0000:0000:0000:0:0:0"), false)); ASSERT_TRUE(aws_is_ipv6(aws_byte_cursor_from_c_str("2001:0db8:0000:0000:0000:8a2e:0370:7334"), false)); ASSERT_TRUE(aws_is_ipv6(aws_byte_cursor_from_c_str("2001:0DB8:0000:0000:0000:8a2e:0370:7334"), false)); ASSERT_TRUE(aws_is_ipv6(aws_byte_cursor_from_c_str("fe80::1"), false)); ASSERT_TRUE(aws_is_ipv6(aws_byte_cursor_from_c_str("fe80::1%en0"), false)); ASSERT_TRUE(aws_is_ipv6(aws_byte_cursor_from_c_str("[2001:0db8:0000:0000:0000:8a2e:0370:7334]"), true)); ASSERT_TRUE(aws_is_ipv6(aws_byte_cursor_from_c_str("[fe80::1]"), true)); ASSERT_TRUE(aws_is_ipv6(aws_byte_cursor_from_c_str("[fe80::1%25en0]"), true)); ASSERT_TRUE(aws_is_ipv6(aws_byte_cursor_from_c_str("[2001:db8:85a3:8d3:1319:8a2e:370:7348]"), true)); ASSERT_FALSE(aws_is_ipv6(aws_byte_cursor_from_c_str("2001:0db8:0000:0000:0000:8a2e:0370"), false)); ASSERT_FALSE(aws_is_ipv6(aws_byte_cursor_from_c_str("2001:0db8:0000:0000:0000:8a2e:0370:"), false)); ASSERT_FALSE(aws_is_ipv6(aws_byte_cursor_from_c_str("2001::"), false)); ASSERT_FALSE(aws_is_ipv6(aws_byte_cursor_from_c_str("2001:0db8:0000:0000:0000:8a2e:0370:7334:8745"), false)); ASSERT_FALSE(aws_is_ipv6(aws_byte_cursor_from_c_str(":2001:0db8:0000:0000:0000:8a2e:0370:7334:8745"), false)); ASSERT_FALSE(aws_is_ipv6(aws_byte_cursor_from_c_str("z001:0db8:0000:0000:0000:8a2e:0370:7334:8745"), false)); ASSERT_FALSE(aws_is_ipv6(aws_byte_cursor_from_c_str("z001::8a2e::8745"), false)); ASSERT_FALSE(aws_is_ipv6(aws_byte_cursor_from_c_str("::2001:0db8:0000:0000:8a2e:0370:7334"), false)); ASSERT_FALSE(aws_is_ipv6(aws_byte_cursor_from_c_str("fe80::1%25en0"), true)); ASSERT_FALSE(aws_is_ipv6(aws_byte_cursor_from_c_str("[fe80::1%en0]"), true)); ASSERT_FALSE(aws_is_ipv6(aws_byte_cursor_from_c_str("[fe80::1%24en0]"), true)); ASSERT_FALSE(aws_is_ipv6(aws_byte_cursor_from_c_str("[fe80::1%25en0"), true)); ASSERT_FALSE(aws_is_ipv6(aws_byte_cursor_from_c_str("fe80::1%25en0]"), true)); ASSERT_FALSE(aws_is_ipv6(aws_byte_cursor_from_c_str("[fe80::1%25]"), true)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(endpoints_uri_normalize_path, s_test_uri_normalize_path) static int s_test_uri_normalize_path(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_buf buf1; ASSERT_SUCCESS(aws_byte_buf_init_from_normalized_uri_path(allocator, aws_byte_cursor_from_c_str("/"), &buf1)); ASSERT_TRUE(aws_byte_buf_eq_c_str(&buf1, "/")); aws_byte_buf_clean_up(&buf1); struct aws_byte_buf buf2; ASSERT_SUCCESS(aws_byte_buf_init_from_normalized_uri_path(allocator, aws_byte_cursor_from_c_str("aaa"), &buf2)); ASSERT_TRUE(aws_byte_buf_eq_c_str(&buf2, "/aaa/")); aws_byte_buf_clean_up(&buf2); struct aws_byte_buf buf3; ASSERT_SUCCESS(aws_byte_buf_init_from_normalized_uri_path(allocator, aws_byte_cursor_from_c_str("aaa/"), &buf3)); ASSERT_TRUE(aws_byte_buf_eq_c_str(&buf3, "/aaa/")); aws_byte_buf_clean_up(&buf3); struct aws_byte_buf buf4; ASSERT_SUCCESS(aws_byte_buf_init_from_normalized_uri_path(allocator, aws_byte_cursor_from_c_str("/aaa"), &buf4)); ASSERT_TRUE(aws_byte_buf_eq_c_str(&buf4, "/aaa/")); aws_byte_buf_clean_up(&buf4); struct aws_byte_buf buf5; ASSERT_SUCCESS(aws_byte_buf_init_from_normalized_uri_path(allocator, aws_byte_cursor_from_c_str(""), &buf5)); ASSERT_TRUE(aws_byte_buf_eq_c_str(&buf5, "/")); aws_byte_buf_clean_up(&buf5); return AWS_OP_SUCCESS; } int s_resolve_cb(struct aws_byte_cursor template, void *user_data, struct aws_owning_cursor *out_resolved) { (void)template; (void)user_data; *out_resolved = aws_endpoints_non_owning_cursor_create(aws_byte_cursor_from_c_str("test")); return AWS_OP_SUCCESS; } AWS_TEST_CASE( endpoints_byte_buf_init_from_resolved_templated_string, s_test_byte_buf_init_from_resolved_templated_string) static int s_test_byte_buf_init_from_resolved_templated_string(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_buf buf; ASSERT_SUCCESS(aws_byte_buf_init_from_resolved_templated_string( allocator, &buf, aws_byte_cursor_from_c_str("{e} a {b}{c} a {d}"), s_resolve_cb, NULL, false)); ASSERT_CURSOR_VALUE_CSTRING_EQUALS(aws_byte_cursor_from_buf(&buf), "test a testtest a test"); aws_byte_buf_clean_up(&buf); ASSERT_SUCCESS(aws_byte_buf_init_from_resolved_templated_string( allocator, &buf, aws_byte_cursor_from_c_str("{ \"a\": \"{b} {d} \", \"c\": \" {e} \"}"), s_resolve_cb, NULL, true)); ASSERT_CURSOR_VALUE_CSTRING_EQUALS(aws_byte_cursor_from_buf(&buf), "{ \"a\": \"test test \", \"c\": \" test \"}"); aws_byte_buf_clean_up(&buf); ASSERT_SUCCESS(aws_byte_buf_init_from_resolved_templated_string( allocator, &buf, aws_byte_cursor_from_c_str("a \" {b} \" a"), s_resolve_cb, NULL, false)); ASSERT_CURSOR_VALUE_CSTRING_EQUALS(aws_byte_cursor_from_buf(&buf), "a \" test \" a"); aws_byte_buf_clean_up(&buf); ASSERT_SUCCESS(aws_byte_buf_init_from_resolved_templated_string( allocator, &buf, aws_byte_cursor_from_c_str("{ \"a\": \"a \\\" {b} \\\" a\" }"), s_resolve_cb, NULL, true)); ASSERT_CURSOR_VALUE_CSTRING_EQUALS(aws_byte_cursor_from_buf(&buf), "{ \"a\": \"a \\\" test \\\" a\" }"); aws_byte_buf_clean_up(&buf); return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/tests/resource_name_tests.c000066400000000000000000000232451456575232400264570ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include AWS_TEST_CASE(parse_resource_name_test, s_test_parse_resource_name) static int s_test_parse_resource_name(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_byte_cursor arn_string_01 = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("arn:aws-us-gov:iam::123456789012:user:ooo"); struct aws_resource_name arn_01; AWS_ZERO_STRUCT(arn_01); ASSERT_SUCCESS(aws_resource_name_init_from_cur(&arn_01, &arn_string_01)); ASSERT_BIN_ARRAYS_EQUALS("aws-us-gov", strlen("aws-us-gov"), arn_01.partition.ptr, arn_01.partition.len); ASSERT_BIN_ARRAYS_EQUALS("iam", strlen("iam"), arn_01.service.ptr, arn_01.service.len); ASSERT_BIN_ARRAYS_EQUALS("", strlen(""), arn_01.region.ptr, arn_01.region.len); ASSERT_BIN_ARRAYS_EQUALS("123456789012", strlen("123456789012"), arn_01.account_id.ptr, arn_01.account_id.len); ASSERT_BIN_ARRAYS_EQUALS("user:ooo", strlen("user:ooo"), arn_01.resource_id.ptr, arn_01.resource_id.len); struct aws_byte_cursor arn_string_02 = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("arn:aws:cloudformation:us-east-1:1234567890:stack/FooBar"); struct aws_resource_name arn_02; AWS_ZERO_STRUCT(arn_02); ASSERT_SUCCESS(aws_resource_name_init_from_cur(&arn_02, &arn_string_02)); ASSERT_TRUE(aws_byte_cursor_eq_c_str(&arn_02.partition, "aws")); ASSERT_TRUE(aws_byte_cursor_eq_c_str(&arn_02.service, "cloudformation")); ASSERT_TRUE(aws_byte_cursor_eq_c_str(&arn_02.region, "us-east-1")); ASSERT_TRUE(aws_byte_cursor_eq_c_str(&arn_02.account_id, "1234567890")); ASSERT_TRUE(aws_byte_cursor_eq_c_str(&arn_02.resource_id, "stack/FooBar")); return AWS_OP_SUCCESS; } AWS_TEST_CASE(parse_resource_name_failures_test, s_test_parse_resource_name_failures) static int s_test_parse_resource_name_failures(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; struct aws_byte_cursor arn_string_01 = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("arn:aws-us-gov:iam::123456789012"); struct aws_resource_name arn_01; AWS_ZERO_STRUCT(arn_01); /* arn has no resource id */ ASSERT_ERROR(AWS_ERROR_MALFORMED_INPUT_STRING, aws_resource_name_init_from_cur(&arn_01, &arn_string_01)); struct aws_byte_cursor arn_string_02 = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("arn:aws-us-gov:iam:"); struct aws_resource_name arn_02; AWS_ZERO_STRUCT(arn_02); /* arn has no account id */ ASSERT_ERROR(AWS_ERROR_MALFORMED_INPUT_STRING, aws_resource_name_init_from_cur(&arn_02, &arn_string_02)); struct aws_byte_cursor arn_string_03 = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("arn:aws-us-gov:iam"); struct aws_resource_name arn_03; AWS_ZERO_STRUCT(arn_03); /* arn has no region */ ASSERT_ERROR(AWS_ERROR_MALFORMED_INPUT_STRING, aws_resource_name_init_from_cur(&arn_03, &arn_string_03)); struct aws_byte_cursor arn_string_04 = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("arn:aws-us-gov"); struct aws_resource_name arn_04; AWS_ZERO_STRUCT(arn_04); /* arn has no partition */ ASSERT_ERROR(AWS_ERROR_MALFORMED_INPUT_STRING, aws_resource_name_init_from_cur(&arn_04, &arn_string_04)); struct aws_byte_cursor arn_string_05 = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("arn"); struct aws_resource_name arn_05; AWS_ZERO_STRUCT(arn_05); /* arn cannot parse arn prefix (must end with :) */ ASSERT_ERROR(AWS_ERROR_MALFORMED_INPUT_STRING, aws_resource_name_init_from_cur(&arn_05, &arn_string_05)); struct aws_byte_cursor arn_string_06 = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("ar:aws:cloudformation:us-east-1:1234567890:stack/FooBar"); struct aws_resource_name arn_06; AWS_ZERO_STRUCT(arn_06); /* arn prefix isn't present/correct */ ASSERT_ERROR(AWS_ERROR_MALFORMED_INPUT_STRING, aws_resource_name_init_from_cur(&arn_06, &arn_string_06)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(resource_name_tostring_test, s_test_resource_name_tostring) static int s_test_resource_name_tostring(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_buf buffer; AWS_ZERO_STRUCT(buffer); ASSERT_SUCCESS(aws_byte_buf_init(&buffer, allocator, 1600)); struct aws_resource_name arn_01 = { .partition = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("aws-us-gov"), .service = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("iam"), .region = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(""), .account_id = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("123456789"), .resource_id = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("group/crt"), }; ASSERT_SUCCESS(aws_byte_buf_append_resource_name(&buffer, &arn_01)); ASSERT_BIN_ARRAYS_EQUALS( "arn:aws-us-gov:iam::123456789:group/crt", strlen("arn:aws-us-gov:iam::123456789:group/crt"), buffer.buffer, buffer.len); aws_byte_buf_reset(&buffer, false); struct aws_resource_name arn_02 = { .partition = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("aws"), .service = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("cloudformation"), .region = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("us-west-2"), .account_id = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("12345678910"), .resource_id = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("stack/MyStack"), }; ASSERT_SUCCESS(aws_byte_buf_append_resource_name(&buffer, &arn_02)); ASSERT_BIN_ARRAYS_EQUALS( "arn:aws:cloudformation:us-west-2:12345678910:stack/MyStack", strlen("arn:aws:cloudformation:us-west-2:12345678910:stack/MyStack"), buffer.buffer, buffer.len); aws_byte_buf_clean_up(&buffer); uint8_t static_space[120]; struct aws_byte_buf static_buffer = {.len = 0, .buffer = static_space, .capacity = 120, .allocator = NULL}; struct aws_resource_name arn_03 = { .partition = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("aws"), .service = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("s3"), .region = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(""), .account_id = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("123456789"), .resource_id = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("bucket/key"), }; ASSERT_SUCCESS(aws_byte_buf_append_resource_name(&static_buffer, &arn_03)); ASSERT_BIN_ARRAYS_EQUALS( "arn:aws:s3::123456789:bucket/key", strlen("arn:aws:s3::123456789:bucket/key"), static_buffer.buffer, static_buffer.len); aws_byte_buf_clean_up(&static_buffer); return AWS_OP_SUCCESS; } AWS_TEST_CASE(resource_name_tostring_failure_test, s_test_resource_name_tostring_failure) static int s_test_resource_name_tostring_failure(struct aws_allocator *allocator, void *ctx) { (void)ctx; struct aws_byte_buf too_small_buffer; AWS_ZERO_STRUCT(too_small_buffer); ASSERT_SUCCESS(aws_byte_buf_init(&too_small_buffer, allocator, 16)); struct aws_resource_name arn_01 = { .partition = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("aws-cn"), .service = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("dynamodb"), .region = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("cn-northwest-1"), .account_id = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("123456789"), .resource_id = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Table/Books"), }; ASSERT_ERROR(AWS_ERROR_DEST_COPY_TOO_SMALL, aws_byte_buf_append_resource_name(&too_small_buffer, &arn_01)); aws_byte_buf_clean_up(&too_small_buffer); uint8_t static_space[16]; struct aws_byte_buf static_buffer = {.len = 0, .buffer = static_space, .capacity = 16, .allocator = NULL}; struct aws_resource_name arn_02 = { .partition = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("aws"), .service = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("s3"), .region = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(""), .account_id = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("123456789"), .resource_id = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("bucket/key"), }; ASSERT_ERROR(AWS_ERROR_DEST_COPY_TOO_SMALL, aws_byte_buf_append_resource_name(&static_buffer, &arn_02)); return AWS_OP_SUCCESS; } AWS_TEST_CASE(resource_name_length_test, s_test_resource_name_length) static int s_test_resource_name_length(struct aws_allocator *allocator, void *ctx) { (void)ctx; size_t arn_length; struct aws_byte_buf buffer; AWS_ZERO_STRUCT(buffer); ASSERT_SUCCESS(aws_byte_buf_init(&buffer, allocator, 1600)); struct aws_resource_name arn_01 = { .partition = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("aws-us-gov"), .service = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("iam"), .region = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(""), .account_id = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("123456789"), .resource_id = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("group:crt"), }; ASSERT_SUCCESS(aws_resource_name_length(&arn_01, &arn_length)); ASSERT_UINT_EQUALS(strlen("arn:aws-us-gov:iam::123456789:group:crt"), arn_length); aws_byte_buf_reset(&buffer, false); struct aws_resource_name arn_02 = { .partition = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("aws"), .service = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("cloudformation"), .region = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("us-west-2"), .account_id = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("12345678910"), .resource_id = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("stack/MyStack"), }; ASSERT_SUCCESS(aws_resource_name_length(&arn_02, &arn_length)); ASSERT_UINT_EQUALS(strlen("arn:aws:cloudformation:us-west-2:12345678910:stack/MyStack"), arn_length); aws_byte_buf_clean_up(&buffer); return AWS_OP_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/tests/resources/000077500000000000000000000000001456575232400242465ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/tests/resources/partitions.json000066400000000000000000000130661456575232400273430ustar00rootroot00000000000000{ "partitions" : [ { "id" : "aws", "outputs" : { "dnsSuffix" : "amazonaws.com", "dualStackDnsSuffix" : "api.aws", "implicitGlobalRegion" : "us-east-1", "name" : "aws", "supportsDualStack" : true, "supportsFIPS" : true }, "regionRegex" : "^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$", "regions" : { "af-south-1" : { "description" : "Africa (Cape Town)" }, "ap-east-1" : { "description" : "Asia Pacific (Hong Kong)" }, "ap-northeast-1" : { "description" : "Asia Pacific (Tokyo)" }, "ap-northeast-2" : { "description" : "Asia Pacific (Seoul)" }, "ap-northeast-3" : { "description" : "Asia Pacific (Osaka)" }, "ap-south-1" : { "description" : "Asia Pacific (Mumbai)" }, "ap-south-2" : { "description" : "Asia Pacific (Hyderabad)" }, "ap-southeast-1" : { "description" : "Asia Pacific (Singapore)" }, "ap-southeast-2" : { "description" : "Asia Pacific (Sydney)" }, "ap-southeast-3" : { "description" : "Asia Pacific (Jakarta)" }, "ap-southeast-4" : { "description" : "Asia Pacific (Melbourne)" }, "aws-global" : { "description" : "AWS Standard global region" }, "ca-central-1" : { "description" : "Canada (Central)" }, "eu-central-1" : { "description" : "Europe (Frankfurt)" }, "eu-central-2" : { "description" : "Europe (Zurich)" }, "eu-north-1" : { "description" : "Europe (Stockholm)" }, "eu-south-1" : { "description" : "Europe (Milan)" }, "eu-south-2" : { "description" : "Europe (Spain)" }, "eu-west-1" : { "description" : "Europe (Ireland)" }, "eu-west-2" : { "description" : "Europe (London)" }, "eu-west-3" : { "description" : "Europe (Paris)" }, "il-central-1" : { "description" : "Israel (Tel Aviv)" }, "me-central-1" : { "description" : "Middle East (UAE)" }, "me-south-1" : { "description" : "Middle East (Bahrain)" }, "sa-east-1" : { "description" : "South America (Sao Paulo)" }, "us-east-1" : { "description" : "US East (N. Virginia)" }, "us-east-2" : { "description" : "US East (Ohio)" }, "us-west-1" : { "description" : "US West (N. California)" }, "us-west-2" : { "description" : "US West (Oregon)" } } }, { "id" : "aws-cn", "outputs" : { "dnsSuffix" : "amazonaws.com.cn", "dualStackDnsSuffix" : "api.amazonwebservices.com.cn", "implicitGlobalRegion" : "cn-northwest-1", "name" : "aws-cn", "supportsDualStack" : true, "supportsFIPS" : true }, "regionRegex" : "^cn\\-\\w+\\-\\d+$", "regions" : { "aws-cn-global" : { "description" : "AWS China global region" }, "cn-north-1" : { "description" : "China (Beijing)" }, "cn-northwest-1" : { "description" : "China (Ningxia)" } } }, { "id" : "aws-us-gov", "outputs" : { "dnsSuffix" : "amazonaws.com", "dualStackDnsSuffix" : "api.aws", "implicitGlobalRegion" : "us-gov-west-1", "name" : "aws-us-gov", "supportsDualStack" : true, "supportsFIPS" : true }, "regionRegex" : "^us\\-gov\\-\\w+\\-\\d+$", "regions" : { "aws-us-gov-global" : { "description" : "AWS GovCloud (US) global region" }, "us-gov-east-1" : { "description" : "AWS GovCloud (US-East)" }, "us-gov-west-1" : { "description" : "AWS GovCloud (US-West)" } } }, { "id" : "aws-iso", "outputs" : { "dnsSuffix" : "c2s.ic.gov", "dualStackDnsSuffix" : "c2s.ic.gov", "implicitGlobalRegion" : "us-iso-east-1", "name" : "aws-iso", "supportsDualStack" : false, "supportsFIPS" : true }, "regionRegex" : "^us\\-iso\\-\\w+\\-\\d+$", "regions" : { "aws-iso-global" : { "description" : "AWS ISO (US) global region" }, "us-iso-east-1" : { "description" : "US ISO East" }, "us-iso-west-1" : { "description" : "US ISO WEST" } } }, { "id" : "aws-iso-b", "outputs" : { "dnsSuffix" : "sc2s.sgov.gov", "dualStackDnsSuffix" : "sc2s.sgov.gov", "implicitGlobalRegion" : "us-isob-east-1", "name" : "aws-iso-b", "supportsDualStack" : false, "supportsFIPS" : true }, "regionRegex" : "^us\\-isob\\-\\w+\\-\\d+$", "regions" : { "aws-iso-b-global" : { "description" : "AWS ISOB (US) global region" }, "us-isob-east-1" : { "description" : "US ISOB East (Ohio)" } } }, { "id" : "aws-iso-e", "outputs" : { "dnsSuffix" : "cloud.adc-e.uk", "dualStackDnsSuffix" : "cloud.adc-e.uk", "implicitGlobalRegion" : "eu-isoe-west-1", "name" : "aws-iso-e", "supportsDualStack" : false, "supportsFIPS" : true }, "regionRegex" : "^eu\\-isoe\\-\\w+\\-\\d+$", "regions" : { } }, { "id" : "aws-iso-f", "outputs" : { "dnsSuffix" : "csp.hci.ic.gov", "dualStackDnsSuffix" : "csp.hci.ic.gov", "implicitGlobalRegion" : "us-isof-south-1", "name" : "aws-iso-f", "supportsDualStack" : false, "supportsFIPS" : true }, "regionRegex" : "^us\\-isof\\-\\w+\\-\\d+$", "regions" : { } } ], "version" : "1.1" }aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/tests/resources/sample_partitions.json000066400000000000000000000022041456575232400306740ustar00rootroot00000000000000{ "version": "1.1", "partitions": [ { "id": "aws", "regionRegex": "^(us|eu|ap|sa|ca|me|af)-\\w+-\\d+$", "regions": { "af-south-1": { "supportsFIPS": false }, "af-east-1": {}, "ap-northeast-1": {}, "ap-northeast-2": {}, "ap-northeast-3": {}, "ap-south-1": {}, "ap-southeast-1": {}, "ap-southeast-2": {}, "ap-southeast-3": {}, "ca-central-1": {}, "eu-central-1": {}, "eu-north-1": {}, "eu-south-1": {}, "eu-west-1": {}, "eu-west-2": {}, "eu-west-3": {}, "me-south-1": {}, "sa-east-1": {}, "us-east-1": {}, "us-east-2": {}, "us-west-1": {}, "us-west-2": { "description" : "US West (Oregon)" }, "aws-global": {} }, "outputs": { "name": "aws", "dnsSuffix": "amazonaws.com", "dualStackDnsSuffix": "api.aws", "supportsFIPS": true, "supportsDualStack": true } } ] }aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/tests/resources/sample_ruleset.json000066400000000000000000000020651456575232400301700ustar00rootroot00000000000000{ "version": "1.0", "serviceId": "example", "parameters": { "Region": { "type": "string", "builtIn": "AWS::Region", "documentation": "The region to dispatch the request to" } }, "rules": [ { "documentation": "rules for when region isSet", "type": "tree", "conditions": [ {"fn": "isSet", "argv": [{"ref": "Region"}]} ], "rules": [ { "type": "endpoint", "conditions": [ {"fn": "aws.partition", "argv": [{"ref": "Region"}], "assign": "partitionResult"} ], "endpoint": {"url": "https://example.{Region}.{partitionResult#dnsSuffix}"} }, { "type": "error", "documentation": "invalid region value", "conditions": [], "error": "unable to determine endpoint for region: {Region}" } ] }, { "type": "endpoint", "documentation": "the single service global endpoint", "conditions": [], "endpoint": {"url": "https://example.amazonaws.com"} } ] }aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/tests/resources/test-cases/000077500000000000000000000000001456575232400263215ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/tests/resources/test-cases/aws-region.json000066400000000000000000000013661456575232400312750ustar00rootroot00000000000000{ "version": "1.0", "testCases": [ { "documentation": "basic region templating", "params": { "Region": "us-east-1" }, "expect": { "endpoint": { "url": "https://us-east-1.amazonaws.com", "properties": { "authSchemes": [ { "name": "sigv4", "signingRegion": "us-east-1", "signingName": "serviceName" } ] } } } }, { "documentation": "test case where region is unset", "params": {}, "expect": { "error": "Region must be set to resolve a valid endpoint" } } ] } custom_object_condition.json000066400000000000000000000004551456575232400340470ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/tests/resources/test-cases{ "version": "1.0", "testCases": [ { "documentation": "Condition object mem usage", "params": { "Arn": "arn:aws:s3::123456789012:accesspoint:mfzwi23gnjvgw.mrap" }, "expect": { "error": "Invalid arn use" } } ] } aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/tests/resources/test-cases/custom_partition.json000066400000000000000000000025541456575232400326250ustar00rootroot00000000000000{ "version": "1.0", "testCases": [ { "documentation": "aws-iso-e region", "params": { "Region": "eu-isoe-west-1" }, "expect": { "endpoint": { "url": "https://aws-iso-e.eu-isoe-west-1.cloud.adc-e.uk", "properties": { "authSchemes": [ { "name": "sigv4", "signingName": "serviceName", "signingRegion": "eu-isoe-west-1" } ], "meta": { "baseSuffix": "cloud.adc-e.uk", "dualStackSuffix": "cloud.adc-e.uk" } } } } }, { "documentation": "aws-iso-f region", "params": { "Region": "us-isof-south-1" }, "expect": { "endpoint": { "url": "https://aws-iso-f.us-isof-south-1.csp.hci.ic.gov", "properties": { "authSchemes": [ { "name": "sigv4", "signingName": "serviceName", "signingRegion": "us-isof-south-1" } ], "meta": { "baseSuffix": "csp.hci.ic.gov", "dualStackSuffix": "csp.hci.ic.gov" } } } } } ] }aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/tests/resources/test-cases/default-values.json000066400000000000000000000017361456575232400321440ustar00rootroot00000000000000{ "version": "1.0", "testCases": [ { "documentation": "default endpoint", "params": {}, "expect": { "endpoint": { "url": "https://fips.us-west-5.amazonaws.com" } } }, { "documentation": "test case where FIPS is disabled", "params": { "UseFips": false }, "expect": { "error": "UseFips = false" } }, { "documentation": "test case where FIPS is enabled explicitly", "params": { "UseFips": true }, "expect": { "endpoint": { "url": "https://fips.us-west-5.amazonaws.com" } } }, { "documentation": "defaults can be overridden", "params": { "Region": "us-east-1" }, "expect": { "endpoint": { "url": "https://fips.us-east-1.amazonaws.com" } } } ] }aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/tests/resources/test-cases/eventbridge.json000066400000000000000000000022231456575232400315110ustar00rootroot00000000000000{ "version": "1.0", "testCases": [ { "documentation": "simple region endpoint", "params": { "region": "us-east-1" }, "expect": { "endpoint": { "url": "https://events.us-east-1.amazonaws.com" } } }, { "documentation": "basic case of endpointId", "params": { "region": "us-east-1", "endpointId": "myendpoint" }, "expect": { "endpoint": { "url": "https://myendpoint.endpoint.events.amazonaws.com", "properties": { "authSchemes": [ { "name": "sigv4a", "signingName": "events", "signingRegionSet": ["*"] } ] } } } }, { "documentation": "endpointId & FIPS", "params": { "region": "us-east-1", "endpointId": "myendpoint", "useFIPSEndpoint": true }, "expect": { "error": "FIPS endpoints not supported with multi-region endpoints" } } ] }aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/tests/resources/test-cases/fns.json000066400000000000000000000023671456575232400300120ustar00rootroot00000000000000{ "version": "1.0", "testCases": [ { "documentation": "test where URI is set and flows to URI and header", "params": { "Uri": "https://www.example.com", "Arn": "arn:aws:s3:us-east-2:012345678:outpost:op-1234" }, "expect": { "endpoint": { "url": "https://www.example.com", "headers": { "x-uri": [ "https://www.example.com" ], "x-arn-region": [ "us-east-2" ] } } } }, { "documentation": "test where explicit error is set", "params": { "CustomError": "This is an error!" }, "expect": { "error": "This is an error!" } }, { "documentation": "test where an ARN field is used in the error directly", "params": { "Arn": "arn:This is an error!:s3:us-east-2:012345678:outpost:op-1234" }, "expect": { "error": "This is an error!" } }, { "documentation": "test case where no fields are set", "params": {}, "expect": { "error": "No fields were set" } } ] }aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/tests/resources/test-cases/headers.json000066400000000000000000000010001456575232400306160ustar00rootroot00000000000000{ "version": "1.0", "testCases": [ { "documentation": "header set to region", "params": { "Region": "us-east-1" }, "expect": { "endpoint": { "url": "https://us-east-1.amazonaws.com", "headers": { "x-amz-region": [ "us-east-1" ], "x-amz-multi": [ "*", "us-east-1" ] } } } } ] }is-virtual-hostable-s3-bucket.json000066400000000000000000000076621456575232400346440ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/tests/resources/test-cases{ "version": "1.0", "testCases": [ { "documentation": "bucket-name: isVirtualHostable", "params": { "BucketName": "bucket-name" }, "expect": { "endpoint": { "url": "https://bucket-name.s3.amazonaws.com" } } }, { "documentation": "bucket-with-number-1: isVirtualHostable", "params": { "BucketName": "bucket-with-number-1" }, "expect": { "endpoint": { "url": "https://bucket-with-number-1.s3.amazonaws.com" } } }, { "documentation": "BucketName: not isVirtualHostable (uppercase characters)", "params": { "BucketName": "BucketName" }, "expect": { "error": "not isVirtualHostableS3Bucket" } }, { "documentation": "bucket_name: not isVirtualHostable (underscore)", "params": { "BucketName": "bucket_name" }, "expect": { "error": "not isVirtualHostableS3Bucket" } }, { "documentation": "bucket.name: isVirtualHostable (http only)", "params": { "BucketName": "bucket.name" }, "expect": { "endpoint": { "url": "http://bucket.name.s3.amazonaws.com" } } }, { "documentation": "bucket.name.multiple.dots1: isVirtualHostable (http only)", "params": { "BucketName": "bucket.name.multiple.dots1" }, "expect": { "endpoint": { "url": "http://bucket.name.multiple.dots1.s3.amazonaws.com" } } }, { "documentation": "-bucket-name: not isVirtualHostable (leading dash)", "params": { "BucketName": "-bucket-name" }, "expect": { "error": "not isVirtualHostableS3Bucket" } }, { "documentation": "bucket-name-: not isVirtualHostable (trailing dash)", "params": { "BucketName": "bucket-name-" }, "expect": { "error": "not isVirtualHostableS3Bucket" } }, { "documentation": "aa: not isVirtualHostable (< 3 characters)", "params": { "BucketName": "aa" }, "expect": { "error": "not isVirtualHostableS3Bucket" } }, { "documentation": "'a'*64: not isVirtualHostable (> 63 characters)", "params": { "BucketName": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" }, "expect": { "error": "not isVirtualHostableS3Bucket" } }, { "documentation": ".bucket-name: not isVirtualHostable (leading dot)", "params": { "BucketName": ".bucket-name" }, "expect": { "error": "not isVirtualHostableS3Bucket" } }, { "documentation": "bucket-name.: not isVirtualHostable (trailing dot)", "params": { "BucketName": "bucket-name." }, "expect": { "error": "not isVirtualHostableS3Bucket" } }, { "documentation": "192.168.5.4: not isVirtualHostable (formatted like an ip address)", "params": { "BucketName": "192.168.5.4" }, "expect": { "error": "not isVirtualHostableS3Bucket" } }, { "documentation": "bucket-.name: not isVirtualHostable (invalid label, ends with a -)", "params": { "BucketName": "bucket-.name" }, "expect": { "error": "not isVirtualHostableS3Bucket" } }, { "documentation": "bucket.-name: not isVirtualHostable (invalid label, starts with a -)", "params": { "BucketName": "bucket.-name" }, "expect": { "error": "not isVirtualHostableS3Bucket" } } ] }aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/tests/resources/test-cases/local-region-override.json000066400000000000000000000010471456575232400334060ustar00rootroot00000000000000{ "version": "1.0", "testCases": [ { "documentation": "local region override", "params": { "Region": "local" }, "expect": { "endpoint": { "url": "http://localhost:8080" } } }, { "documentation": "standard region templated", "params": { "Region": "us-east-2" }, "expect": { "endpoint": { "url": "https://us-east-2.someservice.amazonaws.com" } } } ] }aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/tests/resources/test-cases/parse-arn.json000066400000000000000000000107471456575232400311150ustar00rootroot00000000000000{ "version": "1.0", "testCases": [ { "documentation": "arn + region resolution", "params": { "Bucket": "arn:aws:s3:us-east-2:012345678:outpost:op-1234", "Region": "us-east-2" }, "expect": { "endpoint": { "url": "https://op-1234-012345678.us-east-2.amazonaws.com" } } }, { "documentation": "arn, unset outpost id", "params": { "Bucket": "arn:aws:s3:us-east-2:012345678:outpost", "Region": "us-east-2" }, "expect": { "error": "Invalid ARN: outpostId was not set" } }, { "documentation": "arn, empty outpost id (tests that empty strings are handled properly during matching)", "params": { "Bucket": "arn:aws:s3:us-east-2:012345678:outpost::", "Region": "us-east-2" }, "expect": { "error": "OutpostId was empty" } }, { "documentation": "arn, empty outpost id (tests that ARN parsing considers a trailing colon)", "params": { "Bucket": "arn:aws:s3:us-east-2:012345678:outpost:", "Region": "us-east-2" }, "expect": { "error": "OutpostId was empty" } }, { "documentation": "valid hostlabel + region resolution", "params": { "Bucket": "mybucket", "Region": "us-east-2" }, "expect": { "endpoint": { "url": "https://mybucket.us-east-2.amazonaws.com" } } }, { "documentation": "not a valid hostlabel + region resolution", "params": { "Bucket": "99_a", "Region": "us-east-2" }, "expect": { "endpoint": { "url": "https://us-east-2.amazonaws.com/99_a" } } }, { "documentation": "no bucket", "params": { "Region": "us-east-2" }, "expect": { "endpoint": { "url": "https://us-east-2.amazonaws.com" } } }, { "documentation": "a string that is not a 6-part ARN", "params": { "TestCaseId": "invalid-arn", "Bucket": "asdf" }, "expect": { "error": "Test case passed: `asdf` is not a valid ARN." } }, { "documentation": "resource id MUST not be null", "params": { "TestCaseId": "invalid-arn", "Bucket": "arn:aws:s3:us-west-2:123456789012:" }, "expect": { "error": "Test case passed: `arn:aws:s3:us-west-2:123456789012:` is not a valid ARN." } }, { "documentation": "service MUST not be null", "params": { "TestCaseId": "invalid-arn", "Bucket": "arn:aws::us-west-2:123456789012:resource-id" }, "expect": { "error": "Test case passed: `arn:aws::us-west-2:123456789012:resource-id` is not a valid ARN." } }, { "documentation": "partition MUST not be null", "params": { "TestCaseId": "invalid-arn", "Bucket": "arn::s3:us-west-2:123456789012:resource-id" }, "expect": { "error": "Test case passed: `arn::s3:us-west-2:123456789012:resource-id` is not a valid ARN." } }, { "documentation": "region MAY be null", "params": { "TestCaseId": "valid-arn", "Bucket": "arn:aws:s3::123456789012:resource-id" }, "expect": { "error": "Test case passed: A valid ARN was parsed: service: `s3`, partition: `aws, region: ``, accountId: `123456789012`, resource: `resource-id`" } }, { "documentation": "accountId MAY be null", "params": { "TestCaseId": "valid-arn", "Bucket": "arn:aws:s3:us-east-1::resource-id" }, "expect": { "error": "Test case passed: A valid ARN was parsed: service: `s3`, partition: `aws, region: `us-east-1`, accountId: ``, resource: `resource-id`" } }, { "documentation": "accountId MAY be non-numeric", "params": { "TestCaseId": "valid-arn", "Bucket": "arn:aws:s3:us-east-1:abcd:resource-id" }, "expect": { "error": "Test case passed: A valid ARN was parsed: service: `s3`, partition: `aws, region: `us-east-1`, accountId: `abcd`, resource: `resource-id`" } } ] }aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/tests/resources/test-cases/parse-url.json000066400000000000000000000073511456575232400311340ustar00rootroot00000000000000{ "version": "1.0", "testCases": [ { "documentation": "simple URL parsing", "params": { "Endpoint": "https://authority.com/custom-path" }, "expect": { "endpoint": { "url": "https://https-authority.com.example.com/path-is/custom-path" } } }, { "documentation": "empty path no slash", "params": { "Endpoint": "https://authority.com" }, "expect": { "endpoint": { "url": "https://https-authority.com-nopath.example.com" } } }, { "documentation": "empty path with slash", "params": { "Endpoint": "https://authority.com/" }, "expect": { "endpoint": { "url": "https://https-authority.com-nopath.example.com" } } }, { "documentation": "authority with port", "params": { "Endpoint": "https://authority.com:8000/port" }, "expect": { "endpoint": { "url": "https://authority.com:8000/uri-with-port" } } }, { "documentation": "http schemes", "params": { "Endpoint": "http://authority.com:8000/port" }, "expect": { "endpoint": { "url": "http://authority.com:8000/uri-with-port" } } }, { "documentation": "arbitrary schemes are not supported", "params": { "Endpoint": "acbd://example.com" }, "expect": { "error": "endpoint was invalid" } }, { "documentation": "host labels are not validated", "params": { "Endpoint": "http://99_ab.com" }, "expect": { "endpoint": { "url": "https://http-99_ab.com-nopath.example.com" } } }, { "documentation": "host labels are not validated", "params": { "Endpoint": "http://99_ab-.com" }, "expect": { "endpoint": { "url": "https://http-99_ab-.com-nopath.example.com" } } }, { "documentation": "invalid URL", "params": { "Endpoint": "http://abc.com:a/foo" }, "expect": { "error": "endpoint was invalid" } }, { "documentation": "IP Address", "params": { "Endpoint": "http://192.168.1.1/foo/" }, "expect": { "endpoint": { "url": "http://192.168.1.1/foo/is-ip-addr" } } }, { "documentation": "IP Address with port", "params": { "Endpoint": "http://192.168.1.1:1234/foo/" }, "expect": { "endpoint": { "url": "http://192.168.1.1:1234/foo/is-ip-addr" } } }, { "documentation": "IPv6 Address", "params": { "Endpoint": "https://[2001:db8:85a3:8d3:1319:8a2e:370:7348]:443" }, "expect": { "endpoint": { "url": "https://[2001:db8:85a3:8d3:1319:8a2e:370:7348]:443/is-ip-addr" } } }, { "documentation": "weird DNS name", "params": { "Endpoint": "https://999.999.abc.blah" }, "expect": { "endpoint": { "url": "https://https-999.999.abc.blah-nopath.example.com" } } }, { "documentation": "query in resolved endpoint is not supported", "params": { "Endpoint": "https://example.com/path?query1=foo" }, "expect": { "error": "endpoint was invalid" } } ] }aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/tests/resources/test-cases/partition-fn.json000066400000000000000000000065611456575232400316360ustar00rootroot00000000000000{ "version": "1.0", "testCases": [ { "documentation": "standard AWS region", "params": { "Region": "us-east-2" }, "expect": { "endpoint": { "url": "https://aws-partition.us-east-2.amazonaws.com", "properties": { "authSchemes": [ { "name": "sigv4", "signingName": "serviceName", "signingRegion": "us-east-2" } ], "meta": { "baseSuffix": "amazonaws.com", "dualStackSuffix": "api.aws" } } } } }, { "documentation": "AWS region that doesn't match any regexes", "params": { "Region": "mars-global" }, "expect": { "endpoint": { "url": "https://aws-partition.mars-global.amazonaws.com", "properties": { "authSchemes": [ { "name": "sigv4", "signingName": "serviceName", "signingRegion": "mars-global" } ], "meta": { "baseSuffix": "amazonaws.com", "dualStackSuffix": "api.aws" } } } } }, { "documentation": "AWS region that matches the AWS regex", "params": { "Region": "us-east-10" }, "expect": { "endpoint": { "url": "https://aws-partition.us-east-10.amazonaws.com", "properties": { "authSchemes": [ { "name": "sigv4", "signingName": "serviceName", "signingRegion": "us-east-10" } ], "meta": { "baseSuffix": "amazonaws.com", "dualStackSuffix": "api.aws" } } } } }, { "documentation": "CN region that matches the AWS regex", "params": { "Region": "cn-north-5" }, "expect": { "endpoint": { "url": "https://aws-cn.cn-north-5.amazonaws.com.cn", "properties": { "authSchemes": [ { "name": "sigv4", "signingName": "serviceName", "signingRegion": "cn-north-5" } ], "meta": { "baseSuffix": "amazonaws.com.cn", "dualStackSuffix": "api.amazonwebservices.com.cn" } } } } }, { "documentation": "CN region that is in the explicit list", "params": { "Region": "aws-cn-global" }, "expect": { "endpoint": { "url": "https://aws-cn.aws-cn-global.amazonaws.com.cn", "properties": { "authSchemes": [ { "name": "sigv4", "signingName": "serviceName", "signingRegion": "aws-cn-global" } ], "meta": { "baseSuffix": "amazonaws.com.cn", "dualStackSuffix": "api.amazonwebservices.com.cn" } } } } } ] }aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/tests/resources/test-cases/substring.json000066400000000000000000000107121456575232400312350ustar00rootroot00000000000000{ "version": "1.0", "testCases": [ { "documentation": "substring when string is long enough", "params": { "TestCaseId": "1", "Input": "abcdefg" }, "expect": { "error": "The value is: `abcd`" } }, { "documentation": "substring when string is exactly the right length", "params": { "TestCaseId": "1", "Input": "abcd" }, "expect": { "error": "The value is: `abcd`" } }, { "documentation": "substring when string is too short", "params": { "TestCaseId": "1", "Input": "abc" }, "expect": { "error": "No tests matched" } }, { "documentation": "substring when string is too short", "params": { "TestCaseId": "1", "Input": "" }, "expect": { "error": "No tests matched" } }, { "documentation": "substring on wide characters (ensure that unicode code points are properly counted)", "params": { "TestCaseId": "1", "Input": "\ufdfd" }, "expect": { "error": "No tests matched" } }, { "documentation": "unicode characters always return `None`", "params": { "TestCaseId": "1", "Input": "abcdef\uD83D\uDC31" }, "expect": { "error": "No tests matched" } }, { "documentation": "non-ascii cause substring to always return `None`", "params": { "TestCaseId": "1", "Input": "abcdef\u0080" }, "expect": { "error": "No tests matched" } }, { "documentation": "the full set of ascii is supported, including non-printable characters", "params": { "TestCaseId": "1", "Input": "\u007Fabcdef" }, "expect": { "error": "The value is: `\u007Fabc`" } }, { "documentation": "substring when string is long enough", "params": { "TestCaseId": "2", "Input": "abcdefg" }, "expect": { "error": "The value is: `defg`" } }, { "documentation": "substring when string is exactly the right length", "params": { "TestCaseId": "2", "Input": "defg" }, "expect": { "error": "The value is: `defg`" } }, { "documentation": "substring when string is too short", "params": { "TestCaseId": "2", "Input": "abc" }, "expect": { "error": "No tests matched" } }, { "documentation": "substring when string is too short", "params": { "TestCaseId": "2", "Input": "" }, "expect": { "error": "No tests matched" } }, { "documentation": "substring on wide characters (ensure that unicode code points are properly counted)", "params": { "TestCaseId": "2", "Input": "\ufdfd" }, "expect": { "error": "No tests matched" } }, { "documentation": "substring when string is longer", "params": { "TestCaseId": "3", "Input": "defg" }, "expect": { "error": "The value is: `ef`" } }, { "documentation": "substring when string is exact length", "params": { "TestCaseId": "3", "Input": "def" }, "expect": { "error": "The value is: `ef`" } }, { "documentation": "substring when string is too short", "params": { "TestCaseId": "3", "Input": "ab" }, "expect": { "error": "No tests matched" } }, { "documentation": "substring when string is too short", "params": { "TestCaseId": "3", "Input": "" }, "expect": { "error": "No tests matched" } }, { "documentation": "substring on wide characters (ensure that unicode code points are properly counted)", "params": { "TestCaseId": "3", "Input": "\ufdfd" }, "expect": { "error": "No tests matched" } } ] }aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/tests/resources/test-cases/uri-encode.json000066400000000000000000000043721456575232400312540ustar00rootroot00000000000000{ "version": "1.0", "testCases": [ { "documentation": "uriEncode when the string has nothing to encode returns the input", "params": { "TestCaseId": "1", "Input": "abcdefg" }, "expect": { "error": "The value is: `abcdefg`" } }, { "documentation": "uriEncode with single character to encode encodes only that character", "params": { "TestCaseId": "1", "Input": "abc:defg" }, "expect": { "error": "The value is: `abc%3Adefg`" } }, { "documentation": "uriEncode with all ASCII characters to encode encodes all characters", "params": { "TestCaseId": "1", "Input": "/:,?#[]{}|@! $&'()*+;=%<>\"^`\\" }, "expect": { "error": "The value is: `%2F%3A%2C%3F%23%5B%5D%7B%7D%7C%40%21%20%24%26%27%28%29%2A%2B%3B%3D%25%3C%3E%22%5E%60%5C`" } }, { "documentation": "uriEncode with ASCII characters that should not be encoded returns the input", "params": { "TestCaseId": "1", "Input": "0123456789.underscore_dash-Tilda~" }, "expect": { "error": "The value is: `0123456789.underscore_dash-Tilda~`" } }, { "documentation": "uriEncode encodes unicode characters", "params": { "TestCaseId": "1", "Input": "\ud83d\ude39" }, "expect": { "error": "The value is: `%F0%9F%98%B9`" } }, { "documentation": "uriEncode on all printable ASCII characters", "params": { "TestCaseId": "1", "Input": " !\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~" }, "expect": { "error": "The value is: `%20%21%22%23%24%25%26%27%28%29%2A%2B%2C-.%2F0123456789%3A%3B%3C%3D%3E%3F%40ABCDEFGHIJKLMNOPQRSTUVWXYZ%5B%5C%5D%5E_%60abcdefghijklmnopqrstuvwxyz%7B%7C%7D~`" } }, { "documentation": "uriEncode on an empty string", "params": { "TestCaseId": "1", "Input": "" }, "expect": { "error": "The value is: ``" } } ] }aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/tests/resources/test-cases/valid-hostlabel.json000066400000000000000000000024251456575232400322710ustar00rootroot00000000000000{ "version": "1.0", "testCases": [ { "documentation": "standard region is a valid hostlabel", "params": { "Region": "us-east-1" }, "expect": { "endpoint": { "url": "https://us-east-1.amazonaws.com" } } }, { "documentation": "starting with a number is a valid hostlabel", "params": { "Region": "3aws4" }, "expect": { "endpoint": { "url": "https://3aws4.amazonaws.com" } } }, { "documentation": "when there are dots, only match if subdomains are allowed", "params": { "Region": "part1.part2" }, "expect": { "endpoint": { "url": "https://part1.part2-subdomains.amazonaws.com" } } }, { "documentation": "a space is never a valid hostlabel", "params": { "Region": "part1 part2" }, "expect": { "error": "Invalid hostlabel" } }, { "documentation": "an empty string is not a valid hostlabel", "params": { "Region": "" }, "expect": { "error": "Invalid hostlabel" } } ] }aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/tests/resources/valid-rules/000077500000000000000000000000001456575232400264755ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/tests/resources/valid-rules/aws-region.json000066400000000000000000000020471456575232400314460ustar00rootroot00000000000000{ "parameters": { "Region": { "type": "string", "builtIn": "AWS::Region", "documentation": "The region to dispatch this request, eg. `us-east-1`." } }, "rules": [ { "documentation": "Template the region into the URI when region is set", "conditions": [ { "fn": "isSet", "argv": [ { "ref": "Region" } ] } ], "endpoint": { "url": "https://{Region}.amazonaws.com", "properties": { "authSchemes": [ { "name": "sigv4", "signingName": "serviceName", "signingRegion": "{Region}" } ] } }, "type": "endpoint" }, { "documentation": "fallback when region is unset", "conditions": [], "error": "Region must be set to resolve a valid endpoint", "type": "error" } ], "version": "1.3" }custom_object_condition.json000066400000000000000000000011071456575232400342160ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/tests/resources/valid-rules{ "version": "1.0", "serviceId": "example", "parameters": { "Arn": { "type": "string", "documentation": "Arn" } }, "rules": [ { "conditions": [ { "fn": "aws.parseArn", "argv": [ { "ref": "Arn" } ] } ], "error": "Invalid arn use", "type": "error" }, { "type": "endpoint", "documentation": "the single service global endpoint", "conditions": [], "endpoint": {"url": "https://example.amazonaws.com"} } ] } aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/tests/resources/valid-rules/custom_partition.json000066400000000000000000000051111456575232400327710ustar00rootroot00000000000000{ "parameters": { "Region": { "type": "string", "builtIn": "AWS::Region", "required": true }, "PropertyOne": { "type": "boolean" }, "PropertyTwo": { "type": "string" }, "PropertyThree": { "type": "boolean" } }, "rules": [ { "documentation": "base rule", "conditions": [ { "fn": "aws.partition", "argv": [ { "ref": "Region" } ], "assign": "PartResult" } ], "rules": [ { "documentation": "the AWS partition", "conditions": [ { "fn": "stringEquals", "argv": [ "aws", { "fn": "getAttr", "argv": [ { "ref": "PartResult" }, "name" ] } ] } ], "endpoint": { "url": "https://aws-partition.{Region}.{PartResult#dnsSuffix}", "properties": { "authSchemes": [ { "name": "sigv4", "signingName": "serviceName", "signingRegion": "{Region}" } ], "meta": { "baseSuffix": "{PartResult#dnsSuffix}", "dualStackSuffix": "{PartResult#dualStackDnsSuffix}" } } }, "type": "endpoint" }, { "documentation": "the other partitions", "conditions": [], "endpoint": { "url": "https://{PartResult#name}.{Region}.{PartResult#dnsSuffix}", "properties": { "authSchemes": [ { "name": "sigv4", "signingName": "serviceName", "signingRegion": "{Region}" } ], "meta": { "baseSuffix": "{PartResult#dnsSuffix}", "dualStackSuffix": "{PartResult#dualStackDnsSuffix}" } } }, "type": "endpoint" }, { "conditions": [], "error": "no rules matched", "type": "error" } ], "type": "tree" } ], "version": "1.3" }aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/tests/resources/valid-rules/default-values.json000066400000000000000000000020031456575232400323040ustar00rootroot00000000000000{ "parameters": { "Region": { "type": "string", "builtIn": "AWS::Region", "documentation": "The region to dispatch this request, eg. `us-east-1`.", "default": "us-west-5", "required": true }, "UseFips": { "type": "boolean", "builtIn": "AWS::UseFIPS", "default": true, "required": true } }, "rules": [ { "documentation": "Template the region into the URI when FIPS is enabled", "conditions": [ { "fn": "booleanEquals", "argv": [ { "ref": "UseFips" }, true ] } ], "endpoint": { "url": "https://fips.{Region}.amazonaws.com" }, "type": "endpoint" }, { "documentation": "error when fips is disabled", "conditions": [], "error": "UseFips = false", "type": "error" } ], "version": "1.3" }aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/tests/resources/valid-rules/deprecated-param.json000066400000000000000000000015361456575232400325730ustar00rootroot00000000000000{ "parameters": { "Region": { "type": "string", "builtIn": "AWS::Region", "required": false, "deprecated": { "message": "use blahdeblah region instead" } } }, "rules": [ { "documentation": "base rule", "conditions": [ { "fn": "isSet", "argv": [ { "ref": "Region" } ] } ], "endpoint": { "url": "https://{Region}.amazonaws.com", "properties": { "authSchemes": [ { "name": "sigv4", "signingName": "serviceName", "signingRegion": "{Region}" } ] } }, "type": "endpoint" } ], "version": "1.3" }aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/tests/resources/valid-rules/eventbridge.json000066400000000000000000000216711456575232400316750ustar00rootroot00000000000000{ "version": "1.3", "parameters": { "region": { "type": "string", "builtIn": "AWS::Region", "required": true }, "useDualStackEndpoint": { "type": "boolean", "builtIn": "AWS::UseDualStack" }, "useFIPSEndpoint": { "type": "boolean", "builtIn": "AWS::UseFIPS" }, "endpointId": { "type": "string" } }, "rules": [ { "conditions": [ { "fn": "aws.partition", "argv": [ { "ref": "region" } ], "assign": "partitionResult" } ], "rules": [ { "conditions": [ { "fn": "isSet", "argv": [ { "ref": "endpointId" } ] } ], "rules": [ { "conditions": [ { "fn": "isSet", "argv": [ { "ref": "useFIPSEndpoint" } ] }, { "fn": "booleanEquals", "argv": [ { "ref": "useFIPSEndpoint" }, true ] } ], "error": "FIPS endpoints not supported with multi-region endpoints", "type": "error" }, { "conditions": [ { "fn": "not", "argv": [ { "fn": "isSet", "argv": [ { "ref": "useFIPSEndpoint" } ] } ] }, { "fn": "isSet", "argv": [ { "ref": "useDualStackEndpoint" } ] }, { "fn": "booleanEquals", "argv": [ { "ref": "useDualStackEndpoint" }, true ] } ], "endpoint": { "url": "https://{endpointId}.endpoint.events.{partitionResult#dualStackDnsSuffix}", "properties": { "authSchemes": [ { "name": "sigv4a", "signingName": "events", "signingRegionSet": [ "*" ] } ] } }, "type": "endpoint" }, { "conditions": [], "endpoint": { "url": "https://{endpointId}.endpoint.events.{partitionResult#dnsSuffix}", "properties": { "authSchemes": [ { "name": "sigv4a", "signingName": "events", "signingRegionSet": [ "*" ] } ] } }, "type": "endpoint" } ], "type": "tree" }, { "conditions": [ { "fn": "isValidHostLabel", "argv": [ { "ref": "region" }, false ] } ], "rules": [ { "conditions": [ { "fn": "isSet", "argv": [ { "ref": "useFIPSEndpoint" } ] }, { "fn": "booleanEquals", "argv": [ { "ref": "useFIPSEndpoint" }, true ] }, { "fn": "not", "argv": [ { "fn": "isSet", "argv": [ { "ref": "useDualStackEndpoint" } ] } ] } ], "endpoint": { "url": "https://events-fips.{region}.{partitionResult#dnsSuffix}", "properties": { "authSchemes": [ { "name": "sigv4a", "signingName": "events", "signingRegionSet": [ "*" ] } ] } }, "type": "endpoint" }, { "conditions": [ { "fn": "isSet", "argv": [ { "ref": "useDualStackEndpoint" } ] }, { "fn": "booleanEquals", "argv": [ { "ref": "useDualStackEndpoint" }, true ] }, { "fn": "not", "argv": [ { "fn": "isSet", "argv": [ { "ref": "useFIPSEndpoint" } ] } ] } ], "endpoint": { "url": "https://events.{region}.{partitionResult#dualStackDnsSuffix}", "properties": { "authSchemes": [ { "name": "sigv4a", "signingName": "events", "signingRegionSet": [ "*" ] } ] } }, "type": "endpoint" }, { "conditions": [ { "fn": "isSet", "argv": [ { "ref": "useDualStackEndpoint" } ] }, { "fn": "isSet", "argv": [ { "ref": "useFIPSEndpoint" } ] }, { "fn": "booleanEquals", "argv": [ { "ref": "useDualStackEndpoint" }, true ] }, { "fn": "booleanEquals", "argv": [ { "ref": "useFIPSEndpoint" }, true ] } ], "endpoint": { "url": "https://events-fips.{region}.{partitionResult#dualStackDnsSuffix}", "properties": { "authSchemes": [ { "name": "sigv4a", "signingName": "events", "signingRegionSet": [ "*" ] } ] } }, "type": "endpoint" }, { "conditions": [], "endpoint": { "url": "https://events.{region}.{partitionResult#dnsSuffix}" }, "type": "endpoint" } ], "type": "tree" }, { "conditions": [], "error": "{region} is not a valid HTTP host-label", "type": "error" } ], "type": "tree" } ] }aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/tests/resources/valid-rules/fns.json000066400000000000000000000051161456575232400301610ustar00rootroot00000000000000{ "documentation": "functions in more places", "parameters": { "Uri": { "type": "string", "documentation": "A URI to use" }, "Arn": { "type": "string", "documentation": "an ARN to extract fields from" }, "CustomError": { "type": "string", "documentation": "when set, a custom error message" } }, "rules": [ { "documentation": "when URI is set, use it directly", "conditions": [ { "fn": "isSet", "argv": [ { "ref": "Uri" } ] }, { "fn": "isSet", "argv": [ { "ref": "Arn" } ] }, { "fn": "aws.parseArn", "argv": [ { "ref": "Arn" } ], "assign": "parsedArn" } ], "endpoint": { "url": { "ref": "Uri" }, "headers": { "x-uri": [ { "ref": "Uri" } ], "x-arn-region": [ { "fn": "getAttr", "argv": [ { "ref": "parsedArn" }, "region" ] } ] } }, "type": "endpoint" }, { "documentation": "A custom error", "conditions": [ { "fn": "isSet", "argv": [ { "ref": "CustomError" } ] } ], "type": "error", "error": { "ref": "CustomError" } }, { "type": "error", "conditions": [ { "fn": "isSet", "argv": [ { "ref": "Arn" } ] }, { "fn": "aws.parseArn", "argv": [ { "ref": "Arn" } ], "assign": "parsedArn" } ], "error": { "fn": "getAttr", "argv": [ { "ref": "parsedArn" }, "partition" ] } }, { "documentation": "fallback when nothing is set", "conditions": [], "error": "No fields were set", "type": "error" } ], "version": "1.3" }get-attr-type-inference.json000066400000000000000000000017011456575232400337520ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/tests/resources/valid-rules{ "version": "1.3", "parameters": { "Bucket": { "type": "string" } }, "rules": [ { "documentation": "bucket is set, handle bucket specific endpoints", "conditions": [ { "fn": "isSet", "argv": [ { "ref": "Bucket" } ] }, { "fn": "aws.parseArn", "argv": [ { "ref": "Bucket" } ], "assign": "bucketArn" }, { "fn": "getAttr", "argv": [ { "ref": "bucketArn" }, "resourceId[2]" ], "assign": "outpostId" } ], "endpoint": { "url": "https://{bucketArn#accountId}.{outpostId}.{bucketArn#region}" }, "type": "endpoint" } ] }aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/tests/resources/valid-rules/headers.json000066400000000000000000000017731456575232400310130ustar00rootroot00000000000000{ "parameters": { "Region": { "type": "string", "builtIn": "AWS::Region", "documentation": "The region to dispatch this request, eg. `us-east-1`." } }, "rules": [ { "documentation": "Template the region into the URI when region is set", "conditions": [ { "fn": "isSet", "argv": [ { "ref": "Region" } ] } ], "endpoint": { "url": "https://{Region}.amazonaws.com", "headers": { "x-amz-region": [ "{Region}" ], "x-amz-multi": [ "*", "{Region}" ] } }, "type": "endpoint" }, { "documentation": "fallback when region is unset", "conditions": [], "error": "Region must be set to resolve a valid endpoint", "type": "error" } ], "version": "1.3" }is-virtual-hostable-s3-bucket.json000066400000000000000000000017701456575232400350120ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/tests/resources/valid-rules{ "version": "1.3", "parameters": { "BucketName": { "type": "string", "required": true, "documentation": "the input used to test isVirtualHostableS3Bucket" } }, "rules": [ { "conditions": [ { "fn": "aws.isVirtualHostableS3Bucket", "argv": [ "{BucketName}", false ] } ], "endpoint": { "url": "https://{BucketName}.s3.amazonaws.com" }, "type": "endpoint" }, { "conditions": [ { "fn": "aws.isVirtualHostableS3Bucket", "argv": [ "{BucketName}", true ] } ], "endpoint": { "url": "http://{BucketName}.s3.amazonaws.com" }, "type": "endpoint" }, { "conditions": [ ], "error": "not isVirtualHostableS3Bucket", "type": "error" } ] }aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/tests/resources/valid-rules/minimal-ruleset.json000066400000000000000000000011451456575232400325000ustar00rootroot00000000000000{ "parameters": { "Region": { "type": "string", "builtIn": "AWS::Region", "required": true } }, "rules": [ { "documentation": "base rule", "conditions": [], "endpoint": { "url": "https://{Region}.amazonaws.com", "properties": { "authSchemes": [ { "name": "sigv4", "signingName": "serviceName", "signingRegion": "{Region}" } ] } }, "type": "endpoint" } ], "version": "1.3" }aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/tests/resources/valid-rules/parse-arn.json000066400000000000000000000152121456575232400312610ustar00rootroot00000000000000{ "version": "1.3", "parameters": { "Region": { "type": "string", "builtIn": "AWS::Region" }, "Bucket": { "type": "string" }, "TestCaseId": { "type": "string" } }, "rules": [ { "documentation": "tests of invalid arns", "conditions": [ { "fn": "isSet", "argv": [ { "ref": "TestCaseId" } ] }, { "fn": "isSet", "argv": [ { "ref": "Bucket" } ] }, { "fn": "stringEquals", "argv": [ "{TestCaseId}", "invalid-arn" ] } ], "type": "tree", "rules": [ { "conditions": [ { "fn": "aws.parseArn", "argv": ["{Bucket}"] } ], "type": "error", "error": "A valid ARN was parsed but `{Bucket}` is not a valid ARN" }, { "conditions": [], "type": "error", "error": "Test case passed: `{Bucket}` is not a valid ARN." } ] }, { "documentation": "tests of valid arns", "conditions": [ { "fn": "isSet", "argv": [ { "ref": "TestCaseId" } ] }, { "fn": "isSet", "argv": [ { "ref": "Bucket" } ] }, { "fn": "stringEquals", "argv": [ "{TestCaseId}", "valid-arn" ] } ], "type": "tree", "rules": [ { "conditions": [ { "fn": "aws.parseArn", "argv": ["{Bucket}"], "assign": "arn" }, { "fn": "getAttr", "argv": [{"ref": "arn"}, "resourceId[0]"], "assign": "resource" } ], "type": "error", "error": "Test case passed: A valid ARN was parsed: service: `{arn#service}`, partition: `{arn#partition}, region: `{arn#region}`, accountId: `{arn#accountId}`, resource: `{resource}`" }, { "conditions": [], "type": "error", "error": "Test case failed: `{Bucket}` is a valid ARN but parseArn failed to parse it." } ] }, { "documentation": "region is set", "conditions": [ { "fn": "isSet", "argv": [ { "ref": "Region" } ] }, { "fn": "aws.partition", "argv": [ "{Region}" ], "assign": "partitionResult" } ], "rules": [ { "documentation": "bucket is set, handle bucket specific endpoints", "conditions": [ { "fn": "isSet", "argv": [ { "ref": "Bucket" } ] } ], "rules": [ { "documentation": "bucket is set and is an arn", "conditions": [ { "fn": "aws.parseArn", "argv": [ { "ref": "Bucket" } ], "assign": "bucketArn" } ], "rules": [ { "conditions": [ { "fn": "getAttr", "argv": [ { "ref": "bucketArn" }, "resourceId[1]" ], "assign": "outpostId" } ], "rules": [ { "conditions": [ { "fn": "stringEquals", "argv": [ "{outpostId}", "" ] } ], "error": "OutpostId was empty", "type": "error" }, { "conditions": [], "endpoint": { "url": "https://{outpostId}-{bucketArn#accountId}.{bucketArn#region}.{partitionResult#dnsSuffix}" }, "type": "endpoint" } ], "type": "tree" }, { "conditions": [], "error": "Invalid ARN: outpostId was not set", "type": "error" } ], "type": "tree" }, { "documentation": "bucket can be used as a host label", "conditions": [ { "fn": "isValidHostLabel", "argv": [ "{Bucket}", false ] } ], "endpoint": { "url": "https://{Bucket}.{Region}.amazonaws.com" }, "type": "endpoint" }, { "conditions": [], "documentation": "fallback: use bucket in the path", "endpoint": { "url": "https://{Region}.amazonaws.com/{Bucket}" }, "type": "endpoint" } ], "type": "tree" }, { "documentation": "region is set, bucket is not", "conditions": [], "endpoint": { "url": "https://{Region}.{partitionResult#dnsSuffix}" }, "type": "endpoint" } ], "type": "tree" }, { "documentation": "fallback when region is unset", "conditions": [], "error": "Region must be set to resolve a valid endpoint", "type": "error" } ] }aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/tests/resources/valid-rules/parse-url.json000066400000000000000000000045251456575232400313100ustar00rootroot00000000000000{ "version": "1.3", "parameters": { "Region": { "type": "string", "builtIn": "AWS::Region" }, "Endpoint": { "type": "string" } }, "rules": [ { "documentation": "endpoint is set and is a valid URL", "conditions": [ { "fn": "isSet", "argv": [ { "ref": "Endpoint" } ] }, { "fn": "parseURL", "argv": [ "{Endpoint}" ], "assign": "url" } ], "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ { "fn": "getAttr", "argv": [ { "ref": "url" }, "isIp" ] }, true ] } ], "endpoint": { "url": "{url#scheme}://{url#authority}{url#normalizedPath}is-ip-addr" }, "type": "endpoint" }, { "conditions": [ { "fn": "stringEquals", "argv": [ "{url#path}", "/port" ] } ], "endpoint": { "url": "{url#scheme}://{url#authority}/uri-with-port" }, "type": "endpoint" }, { "conditions": [ { "fn": "stringEquals", "argv": [ "{url#normalizedPath}", "/" ] } ], "endpoint": { "url": "https://{url#scheme}-{url#authority}-nopath.example.com" }, "type": "endpoint" }, { "conditions": [], "endpoint": { "url": "https://{url#scheme}-{url#authority}.example.com/path-is{url#path}" }, "type": "endpoint" } ], "type": "tree" }, { "error": "endpoint was invalid", "conditions": [], "type": "error" } ] }aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/tests/resources/valid-rules/partition-fn.json000066400000000000000000000051111456575232400320000ustar00rootroot00000000000000{ "parameters": { "Region": { "type": "string", "builtIn": "AWS::Region", "required": true }, "PropertyOne": { "type": "boolean" }, "PropertyTwo": { "type": "string" }, "PropertyThree": { "type": "boolean" } }, "rules": [ { "documentation": "base rule", "conditions": [ { "fn": "aws.partition", "argv": [ { "ref": "Region" } ], "assign": "PartResult" } ], "rules": [ { "documentation": "the AWS partition", "conditions": [ { "fn": "stringEquals", "argv": [ "aws", { "fn": "getAttr", "argv": [ { "ref": "PartResult" }, "name" ] } ] } ], "endpoint": { "url": "https://aws-partition.{Region}.{PartResult#dnsSuffix}", "properties": { "authSchemes": [ { "name": "sigv4", "signingName": "serviceName", "signingRegion": "{Region}" } ], "meta": { "baseSuffix": "{PartResult#dnsSuffix}", "dualStackSuffix": "{PartResult#dualStackDnsSuffix}" } } }, "type": "endpoint" }, { "documentation": "the other partitions", "conditions": [], "endpoint": { "url": "https://{PartResult#name}.{Region}.{PartResult#dnsSuffix}", "properties": { "authSchemes": [ { "name": "sigv4", "signingName": "serviceName", "signingRegion": "{Region}" } ], "meta": { "baseSuffix": "{PartResult#dnsSuffix}", "dualStackSuffix": "{PartResult#dualStackDnsSuffix}" } } }, "type": "endpoint" }, { "conditions": [], "error": "no rules matched", "type": "error" } ], "type": "tree" } ], "version": "1.3" }aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/tests/resources/valid-rules/region-override.json000066400000000000000000000013611456575232400324710ustar00rootroot00000000000000{ "parameters": { "Region": { "type": "string", "builtIn": "AWS::Region", "required": true } }, "rules": [ { "documentation": "override rule for the local pseduo region", "conditions": [ { "fn": "stringEquals", "argv": [ "local", "{Region}" ] } ], "endpoint": { "url": "http://localhost:8080" }, "type": "endpoint" }, { "documentation": "base rule", "conditions": [], "endpoint": { "url": "https://{Region}.someservice.amazonaws.com" }, "type": "endpoint" } ], "version": "1.3" }aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/tests/resources/valid-rules/substring.json000066400000000000000000000040751456575232400314160ustar00rootroot00000000000000{ "parameters": { "TestCaseId": { "type": "string", "required": true, "documentation": "Test case id used to select the test case to use" }, "Input": { "type": "string", "required": true, "documentation": "the input used to test substring" } }, "rules": [ { "documentation": "Substring from beginning of input", "conditions": [ { "fn": "stringEquals", "argv": [ "{TestCaseId}", "1" ] }, { "fn": "substring", "argv": [ "{Input}", 0, 4, false ], "assign": "output" } ], "error": "The value is: `{output}`", "type": "error" }, { "documentation": "Substring from end of input", "conditions": [ { "fn": "stringEquals", "argv": [ "{TestCaseId}", "2" ] }, { "fn": "substring", "argv": [ "{Input}", 0, 4, true ], "assign": "output" } ], "error": "The value is: `{output}`", "type": "error" }, { "documentation": "Substring the middle of the string", "conditions": [ { "fn": "stringEquals", "argv": [ "{TestCaseId}", "3" ] }, { "fn": "substring", "argv": [ "{Input}", 1, 3, false ], "assign": "output" } ], "error": "The value is: `{output}`", "type": "error" }, { "documentation": "fallback when no tests match", "conditions": [], "error": "No tests matched", "type": "error" } ], "version": "1.3" }aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/tests/resources/valid-rules/uri-encode.json000066400000000000000000000017351456575232400314300ustar00rootroot00000000000000{ "version": "1.3", "parameters": { "TestCaseId": { "type": "string", "required": true, "documentation": "Test case id used to select the test case to use" }, "Input": { "type": "string", "required": true, "documentation": "the input used to test uriEncode" } }, "rules": [ { "documentation": "uriEncode on input", "conditions": [ { "fn": "stringEquals", "argv": [ "{TestCaseId}", "1" ] }, { "fn": "uriEncode", "argv": [ "{Input}" ], "assign": "output" } ], "error": "The value is: `{output}`", "type": "error" }, { "documentation": "fallback when no tests match", "conditions": [], "error": "No tests matched", "type": "error" } ] }aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/tests/resources/valid-rules/valid-hostlabel.json000066400000000000000000000024161456575232400324450ustar00rootroot00000000000000{ "parameters": { "Region": { "type": "string", "builtIn": "AWS::Region", "required": true, "documentation": "The region to dispatch this request, eg. `us-east-1`." } }, "rules": [ { "documentation": "Template the region into the URI when region is set", "conditions": [ { "fn": "isValidHostLabel", "argv": [ { "ref": "Region" }, false ] } ], "endpoint": { "url": "https://{Region}.amazonaws.com" }, "type": "endpoint" }, { "documentation": "Template the region into the URI when region is set", "conditions": [ { "fn": "isValidHostLabel", "argv": [ { "ref": "Region" }, true ] } ], "endpoint": { "url": "https://{Region}-subdomains.amazonaws.com" }, "type": "endpoint" }, { "documentation": "Region was not a valid host label", "conditions": [], "error": "Invalid hostlabel", "type": "error" } ], "version": "1.3" }aws-crt-python-0.20.4+dfsg/crt/aws-c-sdkutils/tests/sdkutils_test.c000066400000000000000000000007041456575232400253020ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include static int s_sdkutils_library_test(struct aws_allocator *allocator, void *ctx) { (void)ctx; aws_sdkutils_library_init(allocator); aws_sdkutils_library_clean_up(); return 0; } AWS_TEST_CASE(sdkutils_library_test, s_sdkutils_library_test) aws-crt-python-0.20.4+dfsg/crt/aws-checksums/000077500000000000000000000000001456575232400207755ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-checksums/.builder/000077500000000000000000000000001456575232400225015ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-checksums/.builder/actions/000077500000000000000000000000001456575232400241415ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-checksums/.builder/actions/clang-tidy.py000066400000000000000000000017261456575232400265540ustar00rootroot00000000000000 import Builder import glob, os, sys class ClangTidy(Builder.Action): def run(self, env): sh = env.shell clang_tidy = env.find_llvm_tool('clang-tidy')[0] if not clang_tidy: print("No clang-tidy executable could be found, installing...") sh.exec("sudo", "apt", "install", "-y", "clang-tidy-9") clang_tidy = env.find_llvm_tool('clang-tidy')[0] if not clang_tidy: print("No clang-tidy executable could be found") sys.exit(1) source_dir = sh.cwd() build_dir = os.path.join(source_dir, 'build') sources = [os.path.join(source_dir, file) for file in glob.glob( 'source/**/*.c') if not ('windows' in file or 'android' in file)] return [ Builder.DownloadDependencies(), Builder.CMakeBuild(), Builder.Script([ [clang_tidy, '-p', build_dir] + sources ]) ] aws-crt-python-0.20.4+dfsg/crt/aws-checksums/.clang-format000066400000000000000000000031611456575232400233510ustar00rootroot00000000000000--- Language: Cpp # BasedOnStyle: Mozilla AlignAfterOpenBracket: AlwaysBreak AlignConsecutiveAssignments: false AlignConsecutiveDeclarations: false AlignEscapedNewlines: Right AlignOperands: true AlignTrailingComments: true AllowAllParametersOfDeclarationOnNextLine: false AllowShortBlocksOnASingleLine: false AllowShortCaseLabelsOnASingleLine: false AllowShortFunctionsOnASingleLine: Inline AllowShortIfStatementsOnASingleLine: false AllowShortLoopsOnASingleLine: false AlwaysBreakAfterReturnType: None AlwaysBreakBeforeMultilineStrings: false BinPackArguments: false BinPackParameters: false BreakBeforeBinaryOperators: None BreakBeforeBraces: Attach BreakBeforeTernaryOperators: true BreakStringLiterals: true ColumnLimit: 120 ContinuationIndentWidth: 4 DerivePointerAlignment: false IncludeBlocks: Preserve IndentCaseLabels: true IndentPPDirectives: AfterHash IndentWidth: 4 IndentWrappedFunctionNames: true KeepEmptyLinesAtTheStartOfBlocks: true MacroBlockBegin: '' MacroBlockEnd: '' MaxEmptyLinesToKeep: 1 PenaltyBreakAssignment: 2 PenaltyBreakBeforeFirstCallParameter: 19 PenaltyBreakComment: 300 PenaltyBreakFirstLessLess: 120 PenaltyBreakString: 1000 PenaltyExcessCharacter: 1000000 PenaltyReturnTypeOnItsOwnLine: 100000 PointerAlignment: Right ReflowComments: true SortIncludes: true SpaceAfterCStyleCast: false SpaceBeforeAssignmentOperators: true SpaceBeforeParens: ControlStatements SpaceInEmptyParentheses: false SpacesInContainerLiterals: true SpacesInCStyleCastParentheses: false SpacesInParentheses: false SpacesInSquareBrackets: false Standard: Cpp11 TabWidth: 4 UseTab: Never ... aws-crt-python-0.20.4+dfsg/crt/aws-checksums/.clang-tidy000066400000000000000000000015501456575232400230320ustar00rootroot00000000000000--- Checks: 'clang-diagnostic-*,clang-analyzer-*,readability-*,modernize-*,bugprone-*,misc-*,google-runtime-int,llvm-header-guard,fuchsia-restrict-system-includes,-clang-analyzer-valist.Uninitialized,-clang-analyzer-security.insecureAPI.rand,-clang-analyzer-alpha.*,-readability-magic-numbers,-readability-non-const-parameter,-readability-avoid-const-params-in-decls,-readability-else-after-return,-readability-isolate-declaration,-readability-uppercase-literal-suffix' WarningsAsErrors: '*' HeaderFilterRegex: '.*\.[h|inl]$' FormatStyle: 'file' CheckOptions: - key: readability-braces-around-statements.ShortStatementLines value: '1' - key: google-runtime-int.TypeSufix value: '_t' - key: fuchsia-restrict-system-includes.Includes value: '*,-stdint.h,-stdbool.h,-assert.h' ... aws-crt-python-0.20.4+dfsg/crt/aws-checksums/.github/000077500000000000000000000000001456575232400223355ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-checksums/.github/ISSUE_TEMPLATE/000077500000000000000000000000001456575232400245205ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-checksums/.github/ISSUE_TEMPLATE/bug-report.yml000066400000000000000000000045251456575232400273370ustar00rootroot00000000000000--- name: "🐛 Bug Report" description: Report a bug title: "(short issue description)" labels: [bug, needs-triage] assignees: [] body: - type: textarea id: description attributes: label: Describe the bug description: What is the problem? A clear and concise description of the bug. validations: required: true - type: textarea id: expected attributes: label: Expected Behavior description: | What did you expect to happen? validations: required: true - type: textarea id: current attributes: label: Current Behavior description: | What actually happened? Please include full errors, uncaught exceptions, stack traces, and relevant logs. If service responses are relevant, please include wire logs. validations: required: true - type: textarea id: reproduction attributes: label: Reproduction Steps description: | Provide a self-contained, concise snippet of code that can be used to reproduce the issue. For more complex issues provide a repo with the smallest sample that reproduces the bug. Avoid including business logic or unrelated code, it makes diagnosis more difficult. The code sample should be an SSCCE. See http://sscce.org/ for details. In short, please provide a code sample that we can copy/paste, run and reproduce. validations: required: true - type: textarea id: solution attributes: label: Possible Solution description: | Suggest a fix/reason for the bug validations: required: false - type: textarea id: context attributes: label: Additional Information/Context description: | Anything else that might be relevant for troubleshooting this bug. Providing context helps us come up with a solution that is most useful in the real world. validations: required: false - type: input id: aws-checksums-version attributes: label: aws-checksums version used validations: required: true - type: input id: compiler-version attributes: label: Compiler and version used validations: required: true - type: input id: operating-system attributes: label: Operating System and version validations: required: true aws-crt-python-0.20.4+dfsg/crt/aws-checksums/.github/ISSUE_TEMPLATE/config.yml000066400000000000000000000003321456575232400265060ustar00rootroot00000000000000blank_issues_enabled: false contact_links: - name: 💬 General Question url: https://github.com/awslabs/aws-checksums/discussions/categories/q-a about: Please ask and answer questions as a discussion thread aws-crt-python-0.20.4+dfsg/crt/aws-checksums/.github/ISSUE_TEMPLATE/documentation.yml000066400000000000000000000011141456575232400301110ustar00rootroot00000000000000--- name: "📕 Documentation Issue" description: Report an issue in the API Reference documentation or Developer Guide title: "(short issue description)" labels: [documentation, needs-triage] assignees: [] body: - type: textarea id: description attributes: label: Describe the issue description: A clear and concise description of the issue. validations: required: true - type: textarea id: links attributes: label: Links description: | Include links to affected documentation page(s). validations: required: true aws-crt-python-0.20.4+dfsg/crt/aws-checksums/.github/ISSUE_TEMPLATE/feature-request.yml000066400000000000000000000026231456575232400303670ustar00rootroot00000000000000--- name: 🚀 Feature Request description: Suggest an idea for this project title: "(short issue description)" labels: [feature-request, needs-triage] assignees: [] body: - type: textarea id: description attributes: label: Describe the feature description: A clear and concise description of the feature you are proposing. validations: required: true - type: textarea id: use-case attributes: label: Use Case description: | Why do you need this feature? For example: "I'm always frustrated when..." validations: required: true - type: textarea id: solution attributes: label: Proposed Solution description: | Suggest how to implement the addition or change. Please include prototype/workaround/sketch/reference implementation. validations: required: false - type: textarea id: other attributes: label: Other Information description: | Any alternative solutions or features you considered, a more detailed explanation, stack traces, related issues, links for context, etc. validations: required: false - type: checkboxes id: ack attributes: label: Acknowledgements options: - label: I may be able to implement this feature request required: false - label: This feature might incur a breaking change required: false aws-crt-python-0.20.4+dfsg/crt/aws-checksums/.github/PULL_REQUEST_TEMPLATE.md000066400000000000000000000002511456575232400261340ustar00rootroot00000000000000*Issue #, if available:* *Description of changes:* By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license. aws-crt-python-0.20.4+dfsg/crt/aws-checksums/.github/workflows/000077500000000000000000000000001456575232400243725ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-checksums/.github/workflows/ci.yml000066400000000000000000000203521456575232400255120ustar00rootroot00000000000000name: CI on: push: branches-ignore: - 'main' env: BUILDER_VERSION: v0.9.55 BUILDER_SOURCE: releases BUILDER_HOST: https://d19elf31gohf1l.cloudfront.net PACKAGE_NAME: aws-checksums LINUX_BASE_IMAGE: ubuntu-18-x64 RUN: ${{ github.run_id }}-${{ github.run_number }} AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} AWS_REGION: us-east-1 jobs: linux-compat: runs-on: ubuntu-22.04 # latest strategy: fail-fast: false matrix: image: - manylinux1-x64 - manylinux1-x86 - manylinux2014-x64 - manylinux2014-x86 - al2-x64 - fedora-34-x64 - opensuse-leap - rhel8-x64 steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ matrix.image }} build -p ${{ env.PACKAGE_NAME }} linux-compiler-compat: runs-on: ubuntu-22.04 # latest strategy: matrix: compiler: - clang-3 - clang-6 - clang-8 - clang-9 - clang-10 - clang-11 - gcc-4.8 - gcc-5 - gcc-6 - gcc-7 - gcc-8 steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ env.LINUX_BASE_IMAGE }} build -p ${{ env.PACKAGE_NAME }} --compiler=${{ matrix.compiler }} clang-sanitizers: runs-on: ubuntu-22.04 # latest strategy: matrix: sanitizers: [",thread", ",address,undefined"] steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ env.LINUX_BASE_IMAGE }} build -p ${{ env.PACKAGE_NAME }} --compiler=clang-11 --cmake-extra=-DENABLE_SANITIZERS=ON --cmake-extra=-DSANITIZERS="${{ matrix.sanitizers }}" linux-shared-libs: runs-on: ubuntu-22.04 # latest steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ env.LINUX_BASE_IMAGE }} build -p ${{ env.PACKAGE_NAME }} --cmake-extra=-DBUILD_SHARED_LIBS=ON linux-no-cpu-extensions: runs-on: ubuntu-22.04 # latest steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ env.LINUX_BASE_IMAGE }} build -p ${{ env.PACKAGE_NAME }} --cmake-extra=-DUSE_CPU_EXTENSIONS=OFF windows: runs-on: windows-2022 # latest steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python builder.pyz build -p ${{ env.PACKAGE_NAME }} windows-vc14: runs-on: windows-2019 # windows-2019 is last env with Visual Studio 2015 (v14.0) strategy: matrix: arch: [x86, x64] steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python builder.pyz build -p ${{ env.PACKAGE_NAME }} --target windows-${{ matrix.arch }} --compiler msvc-14 windows-shared-libs: runs-on: windows-2022 # latest steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python builder.pyz build -p ${{ env.PACKAGE_NAME }} --cmake-extra=-DBUILD_SHARED_LIBS=ON windows-no-cpu-extensions: runs-on: windows-2022 # latest steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python builder.pyz build -p ${{ env.PACKAGE_NAME }} --cmake-extra=-DUSE_CPU_EXTENSIONS=OFF windows-app-verifier: runs-on: windows-2022 # latest steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python builder.pyz build -p ${{ env.PACKAGE_NAME }} run_tests=false --cmake-extra=-DBUILD_TESTING=ON - name: Run and check AppVerifier run: | python .\aws-checksums\build\deps\aws-c-common\scripts\appverifier_ctest.py --build_directory .\aws-checksums\build\aws-checksums osx: runs-on: macos-12 # latest steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python3 -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder')" chmod a+x builder ./builder build -p ${{ env.PACKAGE_NAME }} osx-no-cpu-extensions: runs-on: macos-12 # latest steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python3 -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder')" chmod a+x builder ./builder build -p ${{ env.PACKAGE_NAME }} --cmake-extra=-DUSE_CPU_EXTENSIONS=OFF cross_compile: name: Cross Compile ${{matrix.arch}} runs-on: ubuntu-22.04 # latest strategy: matrix: arch: [linux-armv6, linux-armv7, linux-arm64, android-armv7] steps: - name: Build ${{ env.PACKAGE_NAME }} run: | python3 -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder')" chmod a+x builder ./builder build -p ${{ env.PACKAGE_NAME }} --target=${{matrix.arch}} # Test downstream repos. # This should not be required because we can run into a chicken and egg problem if there is a change that needs some fix in a downstream repo. downstream: runs-on: ubuntu-22.04 # latest steps: # We can't use the `uses: docker://image` version yet, GitHub lacks authentication for actions -> packages - name: Build ${{ env.PACKAGE_NAME }} run: | aws s3 cp s3://aws-crt-test-stuff/ci/${{ env.BUILDER_VERSION }}/linux-container-ci.sh ./linux-container-ci.sh && chmod a+x ./linux-container-ci.sh ./linux-container-ci.sh ${{ env.BUILDER_VERSION }} aws-crt-${{ env.LINUX_BASE_IMAGE }} build downstream -p ${{ env.PACKAGE_NAME }} aws-crt-python-0.20.4+dfsg/crt/aws-checksums/.github/workflows/clang-format.yml000066400000000000000000000004671456575232400274760ustar00rootroot00000000000000name: Lint on: [push] jobs: clang-format: runs-on: ubuntu-20.04 # latest steps: - name: Checkout Sources uses: actions/checkout@v1 - name: clang-format lint uses: DoozyX/clang-format-lint-action@v0.3.1 with: # List of extensions to check extensions: c,h aws-crt-python-0.20.4+dfsg/crt/aws-checksums/.github/workflows/closed-issue-message.yml000066400000000000000000000013271456575232400311410ustar00rootroot00000000000000name: Closed Issue Message on: issues: types: [closed] jobs: auto_comment: runs-on: ubuntu-latest steps: - uses: aws-actions/closed-issue-message@v1 with: # These inputs are both required repo-token: "${{ secrets.GITHUB_TOKEN }}" message: | ### ⚠️COMMENT VISIBILITY WARNING⚠️ Comments on closed issues are hard for our team to see. If you need more assistance, please either tag a team member or open a new issue that references this one. If you wish to keep having a conversation with other community members under this issue feel free to do so. aws-crt-python-0.20.4+dfsg/crt/aws-checksums/.github/workflows/handle-stale-discussions.yml000066400000000000000000000006471456575232400320310ustar00rootroot00000000000000name: HandleStaleDiscussions on: schedule: - cron: '0 */4 * * *' discussion_comment: types: [created] jobs: handle-stale-discussions: name: Handle stale discussions runs-on: ubuntu-latest permissions: discussions: write steps: - name: Stale discussions action uses: aws-github-ops/handle-stale-discussions@v1 env: GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}}aws-crt-python-0.20.4+dfsg/crt/aws-checksums/.github/workflows/stale_issue.yml000066400000000000000000000045011456575232400274350ustar00rootroot00000000000000name: "Close stale issues" # Controls when the action will run. on: schedule: - cron: "*/60 * * * *" jobs: cleanup: runs-on: ubuntu-latest name: Stale issue job steps: - uses: aws-actions/stale-issue-cleanup@v3 with: # Setting messages to an empty string will cause the automation to skip # that category ancient-issue-message: Greetings! Sorry to say but this is a very old issue that is probably not getting as much attention as it deserves. We encourage you to check if this is still an issue in the latest release and if you find that this is still a problem, please feel free to open a new one. stale-issue-message: Greetings! It looks like this issue hasn’t been active in a few days. We encourage you to check if this is still an issue in the latest release. Because it has been a few days since the last update on this, and in the absence of more information, we will be closing this issue soon. If you find that this is still a problem, please feel free to provide a comment or add an upvote to prevent automatic closure, or if the issue is already closed, please feel free to open a new one. stale-pr-message: Greetings! It looks like this PR hasn’t been active in a few days, add a comment or an upvote to prevent automatic closure, or if the issue is already closed, please feel free to open a new one. # These labels are required stale-issue-label: closing-soon exempt-issue-label: automation-exempt stale-pr-label: closing-soon exempt-pr-label: pr/needs-review response-requested-label: response-requested # Don't set closed-for-staleness label to skip closing very old issues # regardless of label closed-for-staleness-label: closed-for-staleness # Issue timing days-before-stale: 2 days-before-close: 5 days-before-ancient: 36500 # If you don't want to mark a issue as being ancient based on a # threshold of "upvotes", you can set this here. An "upvote" is # the total number of +1, heart, hooray, and rocket reactions # on an issue. minimum-upvotes-to-exempt: 1 repo-token: ${{ secrets.GITHUB_TOKEN }} loglevel: DEBUG # Set dry-run to true to not perform label or close actions. dry-run: false aws-crt-python-0.20.4+dfsg/crt/aws-checksums/.gitignore000066400000000000000000000010461456575232400227660ustar00rootroot00000000000000# IDE Artifacts .metadata .build .idea *.d Debug Release *~ *# *.iml tags .vscode #vim swap file *.swp #compiled python files *.pyc #Vagrant stuff Vagrantfile .vagrant #Mac stuff .DS_Store #doxygen doxygen/html/ doxygen/latex/ #cmake artifacts dependencies _build build _build_* cmake-build* *-build # Compiled Object files *.slo *.lo *.o *.obj # Precompiled Headers *.gch *.pch # Compiled Dynamic libraries *.so *.dylib *.dll # Fortran module files *.mod # Compiled Static libraries *.lai *.la *.a *.lib # Executables *.exe *.out *.app aws-crt-python-0.20.4+dfsg/crt/aws-checksums/CMakeLists.txt000066400000000000000000000112111456575232400235310ustar00rootroot00000000000000cmake_minimum_required (VERSION 3.1) option(STATIC_CRT "Windows specific option that to specify static/dynamic run-time library" OFF) project (aws-checksums C) if (POLICY CMP0069) cmake_policy(SET CMP0069 NEW) # Enable LTO/IPO if available in the compiler, see AwsCFlags endif() if (DEFINED CMAKE_PREFIX_PATH) file(TO_CMAKE_PATH "${CMAKE_PREFIX_PATH}" CMAKE_PREFIX_PATH) endif() if (DEFINED CMAKE_INSTALL_PREFIX) file(TO_CMAKE_PATH "${CMAKE_INSTALL_PREFIX}" CMAKE_INSTALL_PREFIX) endif() if (UNIX AND NOT APPLE) include(GNUInstallDirs) elseif(NOT DEFINED CMAKE_INSTALL_LIBDIR) set(CMAKE_INSTALL_LIBDIR "lib") endif() # This is required in order to append /lib/cmake to each element in CMAKE_PREFIX_PATH set(AWS_MODULE_DIR "/${CMAKE_INSTALL_LIBDIR}/cmake") string(REPLACE ";" "${AWS_MODULE_DIR};" AWS_MODULE_PATH "${CMAKE_PREFIX_PATH}${AWS_MODULE_DIR}") # Append that generated list to the module search path list(APPEND CMAKE_MODULE_PATH ${AWS_MODULE_PATH}) include(AwsCFlags) include(AwsCheckHeaders) include(AwsSharedLibSetup) include(AwsSanitizers) include(CheckCCompilerFlag) include(AwsFindPackage) include(AwsFeatureTests) file(GLOB AWS_CHECKSUMS_HEADERS "include/aws/checksums/*.h" ) file(GLOB AWS_CHECKSUMS_PRIV_HEADERS "include/aws/checksums/private/*.h" ) file(GLOB AWS_CHECKSUMS_SRC "source/*.c" ) if(MSVC) source_group("Header Files\\aws\\checksums" FILES ${AWS_CHECKSUMS_HEADERS}) source_group("Source Files" FILES ${AWS_CHECKSUMS_SRC}) endif() file(GLOB AWS_ARCH_SRC "source/generic/*.c" ) if (USE_CPU_EXTENSIONS) if(AWS_ARCH_INTEL) # First, check if inline assembly is available. Inline assembly can also be supported by MSVC if the compiler in use is Clang. if(AWS_HAVE_GCC_INLINE_ASM) file(GLOB AWS_ARCH_SRC "source/intel/asm/*.c" ) elseif (MSVC) file(GLOB AWS_ARCH_SRC "source/intel/visualc/*.c" ) source_group("Source Files\\intel\\visualc" FILES ${AWS_ARCH_SRC}) endif() endif() if (MSVC AND AWS_ARCH_ARM64) file(GLOB AWS_ARCH_SRC "source/arm/*.c" ) source_group("Source Files\\arm" FILES ${AWS_ARCH_SRC}) elseif (AWS_ARCH_ARM64) file(GLOB AWS_ARCH_SRC "source/arm/*.c" ) SET_SOURCE_FILES_PROPERTIES(source/arm/crc32c_arm.c PROPERTIES COMPILE_FLAGS -march=armv8-a+crc ) elseif ((NOT MSVC) AND AWS_ARCH_ARM32) set(CMAKE_REQUIRED_FLAGS "-march=armv8-a+crc -Werror") check_c_source_compiles(" #include int main() { int crc = __crc32d(0, 1); return 0; }" AWS_ARM32_CRC) unset(CMAKE_REQUIRED_FLAGS) if (AWS_ARM32_CRC) file(GLOB AWS_ARCH_SRC "source/arm/*.c" ) SET_SOURCE_FILES_PROPERTIES(source/arm/crc32c_arm.c PROPERTIES COMPILE_FLAGS -march=armv8-a+crc ) endif() endif() endif() file(GLOB CHECKSUMS_COMBINED_HEADERS ${AWS_CHECKSUMS_HEADERS} ${AWS_CHECKSUMS_PRIV_HEADERS} ) file(GLOB CHECKSUMS_COMBINED_SRC ${AWS_CHECKSUMS_SRC} ${AWS_CHECKSUMS_PLATFORM_SOURCE} ${AWS_ARCH_SRC} ) add_library(${PROJECT_NAME} ${CHECKSUMS_COMBINED_HEADERS} ${CHECKSUMS_COMBINED_SRC}) aws_set_common_properties(${PROJECT_NAME}) aws_prepare_symbol_visibility_args(${PROJECT_NAME} "AWS_CHECKSUMS") aws_check_headers(${PROJECT_NAME} ${AWS_CHECKSUMS_HEADERS}) aws_add_sanitizers(${PROJECT_NAME}) # We are not ABI stable yet set_target_properties(${PROJECT_NAME} PROPERTIES VERSION 1.0.0) target_include_directories(${PROJECT_NAME} PUBLIC $ $) aws_use_package(aws-c-common) target_link_libraries(${PROJECT_NAME} PUBLIC ${DEP_AWS_LIBS}) aws_prepare_shared_lib_exports(${PROJECT_NAME}) install(FILES ${AWS_CHECKSUMS_HEADERS} DESTINATION "include/aws/checksums" COMPONENT Development) if (BUILD_SHARED_LIBS) set (TARGET_DIR "shared") else() set (TARGET_DIR "static") endif() install(EXPORT "${PROJECT_NAME}-targets" DESTINATION "${LIBRARY_DIRECTORY}/${PROJECT_NAME}/cmake/${TARGET_DIR}" NAMESPACE AWS:: COMPONENT Development) configure_file("cmake/${PROJECT_NAME}-config.cmake" "${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-config.cmake" @ONLY) install(FILES "${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-config.cmake" DESTINATION "${LIBRARY_DIRECTORY}/${PROJECT_NAME}/cmake/" COMPONENT Development) include(CTest) if (BUILD_TESTING) add_subdirectory(tests) endif () aws-crt-python-0.20.4+dfsg/crt/aws-checksums/LICENSE000066400000000000000000000261351456575232400220110ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright {yyyy} {name of copyright owner} Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. aws-crt-python-0.20.4+dfsg/crt/aws-checksums/README.md000066400000000000000000000002471456575232400222570ustar00rootroot00000000000000# aws-checksums Cross-Platform HW accelerated CRC32c and CRC32 with fallback to efficient SW implementations. C interface with language bindings for each of our SDKs aws-crt-python-0.20.4+dfsg/crt/aws-checksums/builder.json000066400000000000000000000002771456575232400233240ustar00rootroot00000000000000{ "name": "aws-checksums", "upstream": [ { "name": "aws-c-common" } ], "downstream": [ { "name": "aws-c-event-stream" }, { "name": "aws-c-s3" } ] } aws-crt-python-0.20.4+dfsg/crt/aws-checksums/cmake/000077500000000000000000000000001456575232400220555ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-checksums/cmake/AwsSharedLibSetup.cmake000066400000000000000000000035671456575232400264230ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0. set(LIBRARY_DIRECTORY lib) set(RUNTIME_DIRECTORY bin) # Set the default lib installation path on GNU systems with GNUInstallDirs if (UNIX AND NOT APPLE) include(GNUInstallDirs) set(LIBRARY_DIRECTORY ${CMAKE_INSTALL_LIBDIR}) set(RUNTIME_DIRECTORY ${CMAKE_INSTALL_BINDIR}) # this is the absolute dumbest thing in the world, but find_package won't work without it # also I verified this is correctly NOT "lib64" when CMAKE_C_FLAGS includes "-m32" if (${LIBRARY_DIRECTORY} STREQUAL "lib64") set(FIND_LIBRARY_USE_LIB64_PATHS true) endif() endif() function(aws_prepare_shared_lib_exports target) if (BUILD_SHARED_LIBS) install(TARGETS ${target} EXPORT ${target}-targets ARCHIVE DESTINATION ${LIBRARY_DIRECTORY} COMPONENT Development LIBRARY DESTINATION ${LIBRARY_DIRECTORY} NAMELINK_SKIP COMPONENT Runtime RUNTIME DESTINATION ${RUNTIME_DIRECTORY} COMPONENT Runtime) install(TARGETS ${target} EXPORT ${target}-targets LIBRARY DESTINATION ${LIBRARY_DIRECTORY} NAMELINK_ONLY COMPONENT Development) else() install(TARGETS ${target} EXPORT ${target}-targets ARCHIVE DESTINATION ${LIBRARY_DIRECTORY} COMPONENT Development) endif() endfunction() function(aws_prepare_symbol_visibility_args target lib_prefix) if (BUILD_SHARED_LIBS) target_compile_definitions(${target} PUBLIC "-D${lib_prefix}_USE_IMPORT_EXPORT") target_compile_definitions(${target} PRIVATE "-D${lib_prefix}_EXPORTS") endif() endfunction() aws-crt-python-0.20.4+dfsg/crt/aws-checksums/cmake/aws-checksums-config.cmake000066400000000000000000000007731456575232400271060ustar00rootroot00000000000000macro(aws_load_targets type) include(${CMAKE_CURRENT_LIST_DIR}/${type}/@PROJECT_NAME@-targets.cmake) endmacro() # try to load the lib follow BUILD_SHARED_LIBS. Fall back if not exist. if (BUILD_SHARED_LIBS) if (EXISTS "${CMAKE_CURRENT_LIST_DIR}/shared") aws_load_targets(shared) else() aws_load_targets(static) endif() else() if (EXISTS "${CMAKE_CURRENT_LIST_DIR}/static") aws_load_targets(static) else() aws_load_targets(shared) endif() endif() aws-crt-python-0.20.4+dfsg/crt/aws-checksums/format-check.sh000077500000000000000000000010141456575232400236730ustar00rootroot00000000000000#!/usr/bin/env bash if [[ -z $CLANG_FORMAT ]] ; then CLANG_FORMAT=clang-format fi if NOT type $CLANG_FORMAT 2> /dev/null ; then echo "No appropriate clang-format found." exit 1 fi FAIL=0 SOURCE_FILES=`find source include tests -type f \( -name '*.h' -o -name '*.c' -o -name '*.inl' \)` for i in $SOURCE_FILES do $CLANG_FORMAT -output-replacements-xml $i | grep -c " /dev/null if [ $? -ne 1 ] then echo "$i failed clang-format check." FAIL=1 fi done exit $FAIL aws-crt-python-0.20.4+dfsg/crt/aws-checksums/include/000077500000000000000000000000001456575232400224205ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-checksums/include/aws/000077500000000000000000000000001456575232400232125ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-checksums/include/aws/checksums/000077500000000000000000000000001456575232400251775ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-checksums/include/aws/checksums/crc.h000066400000000000000000000022221456575232400261150ustar00rootroot00000000000000#ifndef AWS_CHECKSUMS_CRC_H #define AWS_CHECKSUMS_CRC_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include AWS_PUSH_SANE_WARNING_LEVEL AWS_EXTERN_C_BEGIN /** * The entry point function to perform a CRC32 (Ethernet, gzip) computation. * Selects a suitable implementation based on hardware capabilities. * Pass 0 in the previousCrc32 parameter as an initial value unless continuing * to update a running crc in a subsequent call. */ AWS_CHECKSUMS_API uint32_t aws_checksums_crc32(const uint8_t *input, int length, uint32_t previousCrc32); /** * The entry point function to perform a Castagnoli CRC32c (iSCSI) computation. * Selects a suitable implementation based on hardware capabilities. * Pass 0 in the previousCrc32 parameter as an initial value unless continuing * to update a running crc in a subsequent call. */ AWS_CHECKSUMS_API uint32_t aws_checksums_crc32c(const uint8_t *input, int length, uint32_t previousCrc32); AWS_EXTERN_C_END AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_CHECKSUMS_CRC_H */ aws-crt-python-0.20.4+dfsg/crt/aws-checksums/include/aws/checksums/exports.h000066400000000000000000000021531456575232400270550ustar00rootroot00000000000000#ifndef AWS_CHECKSUMS_EXPORTS_H #define AWS_CHECKSUMS_EXPORTS_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #if defined(AWS_C_RT_USE_WINDOWS_DLL_SEMANTICS) || defined(_WIN32) # ifdef AWS_CHECKSUMS_USE_IMPORT_EXPORT # ifdef AWS_CHECKSUMS_EXPORTS # define AWS_CHECKSUMS_API __declspec(dllexport) # else # define AWS_CHECKSUMS_API __declspec(dllimport) # endif /* AWS_CHECKSUMS_EXPORTS */ # else # define AWS_CHECKSUMS_API # endif /* AWS_CHECKSUMS_USE_IMPORT_EXPORT */ #else /* defined (AWS_C_RT_USE_WINDOWS_DLL_SEMANTICS) || defined (_WIN32) */ # if ((__GNUC__ >= 4) || defined(__clang__)) && defined(AWS_CHECKSUMS_USE_IMPORT_EXPORT) && \ defined(AWS_CHECKSUMS_EXPORTS) # define AWS_CHECKSUMS_API __attribute__((visibility("default"))) # else # define AWS_CHECKSUMS_API # endif /* __GNUC__ >= 4 || defined(__clang__) */ #endif /* defined (AWS_C_RT_USE_WINDOWS_DLL_SEMANTICS) || defined (_WIN32) */ #endif /* AWS_CHECKSUMS_EXPORTS_H */ aws-crt-python-0.20.4+dfsg/crt/aws-checksums/include/aws/checksums/private/000077500000000000000000000000001456575232400266515ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-checksums/include/aws/checksums/private/crc_priv.h000066400000000000000000000021601456575232400306300ustar00rootroot00000000000000#ifndef AWS_CHECKSUMS_PRIVATE_CRC_PRIV_H #define AWS_CHECKSUMS_PRIVATE_CRC_PRIV_H /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #define AWS_CRC32_SIZE_BYTES 4 #include #include #ifdef __cplusplus extern "C" { #endif /* Computes CRC32 (Ethernet, gzip, et. al.) using a (slow) reference implementation. */ AWS_CHECKSUMS_API uint32_t aws_checksums_crc32_sw(const uint8_t *input, int length, uint32_t previousCrc32); /* Computes the Castagnoli CRC32c (iSCSI) using a (slow) reference implementation. */ AWS_CHECKSUMS_API uint32_t aws_checksums_crc32c_sw(const uint8_t *input, int length, uint32_t previousCrc32c); /* Computes the Castagnoli CRC32c (iSCSI). */ AWS_CHECKSUMS_API uint32_t aws_checksums_crc32c_hw(const uint8_t *data, int length, uint32_t previousCrc32); /* Computes CRC32 (Ethernet, gzip, et. al.) using crc instructions. */ AWS_CHECKSUMS_API uint32_t aws_checksums_crc32_hw(const uint8_t *data, int length, uint32_t previousCrc32); #ifdef __cplusplus } #endif #endif /* AWS_CHECKSUMS_PRIVATE_CRC_PRIV_H */ aws-crt-python-0.20.4+dfsg/crt/aws-checksums/source/000077500000000000000000000000001456575232400222755ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-checksums/source/arm/000077500000000000000000000000001456575232400230545ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-checksums/source/arm/crc32c_arm.c000066400000000000000000000046321456575232400251430ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ /* No instrics defined for 32-bit MSVC */ #if (defined(_M_ARM64) || defined(__aarch64__) || defined(__arm__)) # include # ifdef _M_ARM64 # include # define PREFETCH(p) __prefetch(p) # else # include # define PREFETCH(p) __builtin_prefetch(p) # endif uint32_t aws_checksums_crc32c_hw(const uint8_t *data, int length, uint32_t previousCrc32) { uint32_t crc = ~previousCrc32; // Align data if it's not aligned while (((uintptr_t)data & 7) && length > 0) { crc = __crc32cb(crc, *(uint8_t *)data); data++; length--; } while (length >= 64) { PREFETCH(data + 384); uint64_t *d = (uint64_t *)data; crc = __crc32cd(crc, d[0]); crc = __crc32cd(crc, d[1]); crc = __crc32cd(crc, d[2]); crc = __crc32cd(crc, d[3]); crc = __crc32cd(crc, d[4]); crc = __crc32cd(crc, d[5]); crc = __crc32cd(crc, d[6]); crc = __crc32cd(crc, d[7]); data += 64; length -= 64; } while (length >= 8) { crc = __crc32cd(crc, *(uint64_t *)data); data += 8; length -= 8; } while (length > 0) { crc = __crc32cb(crc, *(uint8_t *)data); data++; length--; } return ~crc; } uint32_t aws_checksums_crc32_hw(const uint8_t *data, int length, uint32_t previousCrc32) { uint32_t crc = ~previousCrc32; // Align data if it's not aligned while (((uintptr_t)data & 7) && length > 0) { crc = __crc32b(crc, *(uint8_t *)data); data++; length--; } while (length >= 64) { PREFETCH(data + 384); uint64_t *d = (uint64_t *)data; crc = __crc32d(crc, d[0]); crc = __crc32d(crc, d[1]); crc = __crc32d(crc, d[2]); crc = __crc32d(crc, d[3]); crc = __crc32d(crc, d[4]); crc = __crc32d(crc, d[5]); crc = __crc32d(crc, d[6]); crc = __crc32d(crc, d[7]); data += 64; length -= 64; } while (length >= 8) { crc = __crc32d(crc, *(uint64_t *)data); data += 8; length -= 8; } while (length > 0) { crc = __crc32b(crc, *(uint8_t *)data); data++; length--; } return ~crc; } #endif aws-crt-python-0.20.4+dfsg/crt/aws-checksums/source/crc.c000066400000000000000000000023451456575232400232140ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include static uint32_t (*s_crc32c_fn_ptr)(const uint8_t *input, int length, uint32_t previousCrc32) = 0; static uint32_t (*s_crc32_fn_ptr)(const uint8_t *input, int length, uint32_t previousCrc32) = 0; uint32_t aws_checksums_crc32(const uint8_t *input, int length, uint32_t previousCrc32) { if (AWS_UNLIKELY(!s_crc32_fn_ptr)) { if (aws_cpu_has_feature(AWS_CPU_FEATURE_ARM_CRC)) { s_crc32_fn_ptr = aws_checksums_crc32_hw; } else { s_crc32_fn_ptr = aws_checksums_crc32_sw; } } return s_crc32_fn_ptr(input, length, previousCrc32); } uint32_t aws_checksums_crc32c(const uint8_t *input, int length, uint32_t previousCrc32) { if (AWS_UNLIKELY(!s_crc32c_fn_ptr)) { if (aws_cpu_has_feature(AWS_CPU_FEATURE_SSE_4_2) || aws_cpu_has_feature(AWS_CPU_FEATURE_ARM_CRC)) { s_crc32c_fn_ptr = aws_checksums_crc32c_hw; } else { s_crc32c_fn_ptr = aws_checksums_crc32c_sw; } } return s_crc32c_fn_ptr(input, length, previousCrc32); } aws-crt-python-0.20.4+dfsg/crt/aws-checksums/source/crc_sw.c000066400000000000000000004005471456575232400237330ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include /* The Ethernet, gzip, et.al CRC32 polynomial (reverse of 0x04C11DB7) */ #define CRC32_POLYNOMIAL 0xEDB88320 /* The Castagnoli, iSCSI CRC32c polynomial (reverse of 0x1EDC6F41) */ #define CRC32C_POLYNOMIAL 0x82F63B78 /** CRC32 (Ethernet, gzip) lookup table for slice-by-4/8/16 */ const uint32_t CRC32_TABLE[16][256] = { { 0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA, 0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3, /* [0][0x08]*/ 0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988, 0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91, /* [0][0x10]*/ 0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE, 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7, /* [0][0x18]*/ 0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC, 0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5, /* [0][0x20]*/ 0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172, 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B, /* [0][0x28]*/ 0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940, 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59, /* [0][0x30]*/ 0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116, 0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F, /* [0][0x38]*/ 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924, 0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D, /* [0][0x40]*/ 0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A, 0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433, /* [0][0x48]*/ 0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818, 0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01, /* [0][0x50]*/ 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E, 0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457, /* [0][0x58]*/ 0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C, 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65, /* [0][0x60]*/ 0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2, 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB, /* [0][0x68]*/ 0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0, 0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9, /* [0][0x70]*/ 0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086, 0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F, /* [0][0x78]*/ 0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4, 0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD, /* [0][0x80]*/ 0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A, 0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683, /* [0][0x88]*/ 0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8, 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1, /* [0][0x90]*/ 0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE, 0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7, /* [0][0x98]*/ 0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC, 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5, /* [0][0xa0]*/ 0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252, 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B, /* [0][0xa8]*/ 0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60, 0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79, /* [0][0xb0]*/ 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236, 0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F, /* [0][0xb8]*/ 0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04, 0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D, /* [0][0xc0]*/ 0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A, 0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713, /* [0][0xc8]*/ 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38, 0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21, /* [0][0xd0]*/ 0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E, 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777, /* [0][0xd8]*/ 0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C, 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45, /* [0][0xe0]*/ 0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2, 0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB, /* [0][0xe8]*/ 0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0, 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9, /* [0][0xf0]*/ 0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6, 0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF, /* [0][0xf8]*/ 0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94, 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D /* [0][0x100]*/ }, { 0x00000000, 0x191B3141, 0x32366282, 0x2B2D53C3, 0x646CC504, 0x7D77F445, 0x565AA786, 0x4F4196C7, /* [1][0x08]*/ 0xC8D98A08, 0xD1C2BB49, 0xFAEFE88A, 0xE3F4D9CB, 0xACB54F0C, 0xB5AE7E4D, 0x9E832D8E, 0x87981CCF, /* [1][0x10]*/ 0x4AC21251, 0x53D92310, 0x78F470D3, 0x61EF4192, 0x2EAED755, 0x37B5E614, 0x1C98B5D7, 0x05838496, /* [1][0x18]*/ 0x821B9859, 0x9B00A918, 0xB02DFADB, 0xA936CB9A, 0xE6775D5D, 0xFF6C6C1C, 0xD4413FDF, 0xCD5A0E9E, /* [1][0x20]*/ 0x958424A2, 0x8C9F15E3, 0xA7B24620, 0xBEA97761, 0xF1E8E1A6, 0xE8F3D0E7, 0xC3DE8324, 0xDAC5B265, /* [1][0x28]*/ 0x5D5DAEAA, 0x44469FEB, 0x6F6BCC28, 0x7670FD69, 0x39316BAE, 0x202A5AEF, 0x0B07092C, 0x121C386D, /* [1][0x30]*/ 0xDF4636F3, 0xC65D07B2, 0xED705471, 0xF46B6530, 0xBB2AF3F7, 0xA231C2B6, 0x891C9175, 0x9007A034, /* [1][0x38]*/ 0x179FBCFB, 0x0E848DBA, 0x25A9DE79, 0x3CB2EF38, 0x73F379FF, 0x6AE848BE, 0x41C51B7D, 0x58DE2A3C, /* [1][0x40]*/ 0xF0794F05, 0xE9627E44, 0xC24F2D87, 0xDB541CC6, 0x94158A01, 0x8D0EBB40, 0xA623E883, 0xBF38D9C2, /* [1][0x48]*/ 0x38A0C50D, 0x21BBF44C, 0x0A96A78F, 0x138D96CE, 0x5CCC0009, 0x45D73148, 0x6EFA628B, 0x77E153CA, /* [1][0x50]*/ 0xBABB5D54, 0xA3A06C15, 0x888D3FD6, 0x91960E97, 0xDED79850, 0xC7CCA911, 0xECE1FAD2, 0xF5FACB93, /* [1][0x58]*/ 0x7262D75C, 0x6B79E61D, 0x4054B5DE, 0x594F849F, 0x160E1258, 0x0F152319, 0x243870DA, 0x3D23419B, /* [1][0x60]*/ 0x65FD6BA7, 0x7CE65AE6, 0x57CB0925, 0x4ED03864, 0x0191AEA3, 0x188A9FE2, 0x33A7CC21, 0x2ABCFD60, /* [1][0x68]*/ 0xAD24E1AF, 0xB43FD0EE, 0x9F12832D, 0x8609B26C, 0xC94824AB, 0xD05315EA, 0xFB7E4629, 0xE2657768, /* [1][0x70]*/ 0x2F3F79F6, 0x362448B7, 0x1D091B74, 0x04122A35, 0x4B53BCF2, 0x52488DB3, 0x7965DE70, 0x607EEF31, /* [1][0x78]*/ 0xE7E6F3FE, 0xFEFDC2BF, 0xD5D0917C, 0xCCCBA03D, 0x838A36FA, 0x9A9107BB, 0xB1BC5478, 0xA8A76539, /* [1][0x80]*/ 0x3B83984B, 0x2298A90A, 0x09B5FAC9, 0x10AECB88, 0x5FEF5D4F, 0x46F46C0E, 0x6DD93FCD, 0x74C20E8C, /* [1][0x88]*/ 0xF35A1243, 0xEA412302, 0xC16C70C1, 0xD8774180, 0x9736D747, 0x8E2DE606, 0xA500B5C5, 0xBC1B8484, /* [1][0x90]*/ 0x71418A1A, 0x685ABB5B, 0x4377E898, 0x5A6CD9D9, 0x152D4F1E, 0x0C367E5F, 0x271B2D9C, 0x3E001CDD, /* [1][0x98]*/ 0xB9980012, 0xA0833153, 0x8BAE6290, 0x92B553D1, 0xDDF4C516, 0xC4EFF457, 0xEFC2A794, 0xF6D996D5, /* [1][0xa0]*/ 0xAE07BCE9, 0xB71C8DA8, 0x9C31DE6B, 0x852AEF2A, 0xCA6B79ED, 0xD37048AC, 0xF85D1B6F, 0xE1462A2E, /* [1][0xa8]*/ 0x66DE36E1, 0x7FC507A0, 0x54E85463, 0x4DF36522, 0x02B2F3E5, 0x1BA9C2A4, 0x30849167, 0x299FA026, /* [1][0xb0]*/ 0xE4C5AEB8, 0xFDDE9FF9, 0xD6F3CC3A, 0xCFE8FD7B, 0x80A96BBC, 0x99B25AFD, 0xB29F093E, 0xAB84387F, /* [1][0xb8]*/ 0x2C1C24B0, 0x350715F1, 0x1E2A4632, 0x07317773, 0x4870E1B4, 0x516BD0F5, 0x7A468336, 0x635DB277, /* [1][0xc0]*/ 0xCBFAD74E, 0xD2E1E60F, 0xF9CCB5CC, 0xE0D7848D, 0xAF96124A, 0xB68D230B, 0x9DA070C8, 0x84BB4189, /* [1][0xc8]*/ 0x03235D46, 0x1A386C07, 0x31153FC4, 0x280E0E85, 0x674F9842, 0x7E54A903, 0x5579FAC0, 0x4C62CB81, /* [1][0xd0]*/ 0x8138C51F, 0x9823F45E, 0xB30EA79D, 0xAA1596DC, 0xE554001B, 0xFC4F315A, 0xD7626299, 0xCE7953D8, /* [1][0xd8]*/ 0x49E14F17, 0x50FA7E56, 0x7BD72D95, 0x62CC1CD4, 0x2D8D8A13, 0x3496BB52, 0x1FBBE891, 0x06A0D9D0, /* [1][0xe0]*/ 0x5E7EF3EC, 0x4765C2AD, 0x6C48916E, 0x7553A02F, 0x3A1236E8, 0x230907A9, 0x0824546A, 0x113F652B, /* [1][0xe8]*/ 0x96A779E4, 0x8FBC48A5, 0xA4911B66, 0xBD8A2A27, 0xF2CBBCE0, 0xEBD08DA1, 0xC0FDDE62, 0xD9E6EF23, /* [1][0xf0]*/ 0x14BCE1BD, 0x0DA7D0FC, 0x268A833F, 0x3F91B27E, 0x70D024B9, 0x69CB15F8, 0x42E6463B, 0x5BFD777A, /* [1][0xf8]*/ 0xDC656BB5, 0xC57E5AF4, 0xEE530937, 0xF7483876, 0xB809AEB1, 0xA1129FF0, 0x8A3FCC33, 0x9324FD72 /* [1][0x100]*/ }, { 0x00000000, 0x01C26A37, 0x0384D46E, 0x0246BE59, 0x0709A8DC, 0x06CBC2EB, 0x048D7CB2, 0x054F1685, /* [2][0x08]*/ 0x0E1351B8, 0x0FD13B8F, 0x0D9785D6, 0x0C55EFE1, 0x091AF964, 0x08D89353, 0x0A9E2D0A, 0x0B5C473D, /* [2][0x10]*/ 0x1C26A370, 0x1DE4C947, 0x1FA2771E, 0x1E601D29, 0x1B2F0BAC, 0x1AED619B, 0x18ABDFC2, 0x1969B5F5, /* [2][0x18]*/ 0x1235F2C8, 0x13F798FF, 0x11B126A6, 0x10734C91, 0x153C5A14, 0x14FE3023, 0x16B88E7A, 0x177AE44D, /* [2][0x20]*/ 0x384D46E0, 0x398F2CD7, 0x3BC9928E, 0x3A0BF8B9, 0x3F44EE3C, 0x3E86840B, 0x3CC03A52, 0x3D025065, /* [2][0x28]*/ 0x365E1758, 0x379C7D6F, 0x35DAC336, 0x3418A901, 0x3157BF84, 0x3095D5B3, 0x32D36BEA, 0x331101DD, /* [2][0x30]*/ 0x246BE590, 0x25A98FA7, 0x27EF31FE, 0x262D5BC9, 0x23624D4C, 0x22A0277B, 0x20E69922, 0x2124F315, /* [2][0x38]*/ 0x2A78B428, 0x2BBADE1F, 0x29FC6046, 0x283E0A71, 0x2D711CF4, 0x2CB376C3, 0x2EF5C89A, 0x2F37A2AD, /* [2][0x40]*/ 0x709A8DC0, 0x7158E7F7, 0x731E59AE, 0x72DC3399, 0x7793251C, 0x76514F2B, 0x7417F172, 0x75D59B45, /* [2][0x48]*/ 0x7E89DC78, 0x7F4BB64F, 0x7D0D0816, 0x7CCF6221, 0x798074A4, 0x78421E93, 0x7A04A0CA, 0x7BC6CAFD, /* [2][0x50]*/ 0x6CBC2EB0, 0x6D7E4487, 0x6F38FADE, 0x6EFA90E9, 0x6BB5866C, 0x6A77EC5B, 0x68315202, 0x69F33835, /* [2][0x58]*/ 0x62AF7F08, 0x636D153F, 0x612BAB66, 0x60E9C151, 0x65A6D7D4, 0x6464BDE3, 0x662203BA, 0x67E0698D, /* [2][0x60]*/ 0x48D7CB20, 0x4915A117, 0x4B531F4E, 0x4A917579, 0x4FDE63FC, 0x4E1C09CB, 0x4C5AB792, 0x4D98DDA5, /* [2][0x68]*/ 0x46C49A98, 0x4706F0AF, 0x45404EF6, 0x448224C1, 0x41CD3244, 0x400F5873, 0x4249E62A, 0x438B8C1D, /* [2][0x70]*/ 0x54F16850, 0x55330267, 0x5775BC3E, 0x56B7D609, 0x53F8C08C, 0x523AAABB, 0x507C14E2, 0x51BE7ED5, /* [2][0x78]*/ 0x5AE239E8, 0x5B2053DF, 0x5966ED86, 0x58A487B1, 0x5DEB9134, 0x5C29FB03, 0x5E6F455A, 0x5FAD2F6D, /* [2][0x80]*/ 0xE1351B80, 0xE0F771B7, 0xE2B1CFEE, 0xE373A5D9, 0xE63CB35C, 0xE7FED96B, 0xE5B86732, 0xE47A0D05, /* [2][0x88]*/ 0xEF264A38, 0xEEE4200F, 0xECA29E56, 0xED60F461, 0xE82FE2E4, 0xE9ED88D3, 0xEBAB368A, 0xEA695CBD, /* [2][0x90]*/ 0xFD13B8F0, 0xFCD1D2C7, 0xFE976C9E, 0xFF5506A9, 0xFA1A102C, 0xFBD87A1B, 0xF99EC442, 0xF85CAE75, /* [2][0x98]*/ 0xF300E948, 0xF2C2837F, 0xF0843D26, 0xF1465711, 0xF4094194, 0xF5CB2BA3, 0xF78D95FA, 0xF64FFFCD, /* [2][0xa0]*/ 0xD9785D60, 0xD8BA3757, 0xDAFC890E, 0xDB3EE339, 0xDE71F5BC, 0xDFB39F8B, 0xDDF521D2, 0xDC374BE5, /* [2][0xa8]*/ 0xD76B0CD8, 0xD6A966EF, 0xD4EFD8B6, 0xD52DB281, 0xD062A404, 0xD1A0CE33, 0xD3E6706A, 0xD2241A5D, /* [2][0xb0]*/ 0xC55EFE10, 0xC49C9427, 0xC6DA2A7E, 0xC7184049, 0xC25756CC, 0xC3953CFB, 0xC1D382A2, 0xC011E895, /* [2][0xb8]*/ 0xCB4DAFA8, 0xCA8FC59F, 0xC8C97BC6, 0xC90B11F1, 0xCC440774, 0xCD866D43, 0xCFC0D31A, 0xCE02B92D, /* [2][0xc0]*/ 0x91AF9640, 0x906DFC77, 0x922B422E, 0x93E92819, 0x96A63E9C, 0x976454AB, 0x9522EAF2, 0x94E080C5, /* [2][0xc8]*/ 0x9FBCC7F8, 0x9E7EADCF, 0x9C381396, 0x9DFA79A1, 0x98B56F24, 0x99770513, 0x9B31BB4A, 0x9AF3D17D, /* [2][0xd0]*/ 0x8D893530, 0x8C4B5F07, 0x8E0DE15E, 0x8FCF8B69, 0x8A809DEC, 0x8B42F7DB, 0x89044982, 0x88C623B5, /* [2][0xd8]*/ 0x839A6488, 0x82580EBF, 0x801EB0E6, 0x81DCDAD1, 0x8493CC54, 0x8551A663, 0x8717183A, 0x86D5720D, /* [2][0xe0]*/ 0xA9E2D0A0, 0xA820BA97, 0xAA6604CE, 0xABA46EF9, 0xAEEB787C, 0xAF29124B, 0xAD6FAC12, 0xACADC625, /* [2][0xe8]*/ 0xA7F18118, 0xA633EB2F, 0xA4755576, 0xA5B73F41, 0xA0F829C4, 0xA13A43F3, 0xA37CFDAA, 0xA2BE979D, /* [2][0xf0]*/ 0xB5C473D0, 0xB40619E7, 0xB640A7BE, 0xB782CD89, 0xB2CDDB0C, 0xB30FB13B, 0xB1490F62, 0xB08B6555, /* [2][0xf8]*/ 0xBBD72268, 0xBA15485F, 0xB853F606, 0xB9919C31, 0xBCDE8AB4, 0xBD1CE083, 0xBF5A5EDA, 0xBE9834ED /* [2][0x100]*/ }, { 0x00000000, 0xB8BC6765, 0xAA09C88B, 0x12B5AFEE, 0x8F629757, 0x37DEF032, 0x256B5FDC, 0x9DD738B9, /* [3][0x08]*/ 0xC5B428EF, 0x7D084F8A, 0x6FBDE064, 0xD7018701, 0x4AD6BFB8, 0xF26AD8DD, 0xE0DF7733, 0x58631056, /* [3][0x10]*/ 0x5019579F, 0xE8A530FA, 0xFA109F14, 0x42ACF871, 0xDF7BC0C8, 0x67C7A7AD, 0x75720843, 0xCDCE6F26, /* [3][0x18]*/ 0x95AD7F70, 0x2D111815, 0x3FA4B7FB, 0x8718D09E, 0x1ACFE827, 0xA2738F42, 0xB0C620AC, 0x087A47C9, /* [3][0x20]*/ 0xA032AF3E, 0x188EC85B, 0x0A3B67B5, 0xB28700D0, 0x2F503869, 0x97EC5F0C, 0x8559F0E2, 0x3DE59787, /* [3][0x28]*/ 0x658687D1, 0xDD3AE0B4, 0xCF8F4F5A, 0x7733283F, 0xEAE41086, 0x525877E3, 0x40EDD80D, 0xF851BF68, /* [3][0x30]*/ 0xF02BF8A1, 0x48979FC4, 0x5A22302A, 0xE29E574F, 0x7F496FF6, 0xC7F50893, 0xD540A77D, 0x6DFCC018, /* [3][0x38]*/ 0x359FD04E, 0x8D23B72B, 0x9F9618C5, 0x272A7FA0, 0xBAFD4719, 0x0241207C, 0x10F48F92, 0xA848E8F7, /* [3][0x40]*/ 0x9B14583D, 0x23A83F58, 0x311D90B6, 0x89A1F7D3, 0x1476CF6A, 0xACCAA80F, 0xBE7F07E1, 0x06C36084, /* [3][0x48]*/ 0x5EA070D2, 0xE61C17B7, 0xF4A9B859, 0x4C15DF3C, 0xD1C2E785, 0x697E80E0, 0x7BCB2F0E, 0xC377486B, /* [3][0x50]*/ 0xCB0D0FA2, 0x73B168C7, 0x6104C729, 0xD9B8A04C, 0x446F98F5, 0xFCD3FF90, 0xEE66507E, 0x56DA371B, /* [3][0x58]*/ 0x0EB9274D, 0xB6054028, 0xA4B0EFC6, 0x1C0C88A3, 0x81DBB01A, 0x3967D77F, 0x2BD27891, 0x936E1FF4, /* [3][0x60]*/ 0x3B26F703, 0x839A9066, 0x912F3F88, 0x299358ED, 0xB4446054, 0x0CF80731, 0x1E4DA8DF, 0xA6F1CFBA, /* [3][0x68]*/ 0xFE92DFEC, 0x462EB889, 0x549B1767, 0xEC277002, 0x71F048BB, 0xC94C2FDE, 0xDBF98030, 0x6345E755, /* [3][0x70]*/ 0x6B3FA09C, 0xD383C7F9, 0xC1366817, 0x798A0F72, 0xE45D37CB, 0x5CE150AE, 0x4E54FF40, 0xF6E89825, /* [3][0x78]*/ 0xAE8B8873, 0x1637EF16, 0x048240F8, 0xBC3E279D, 0x21E91F24, 0x99557841, 0x8BE0D7AF, 0x335CB0CA, /* [3][0x80]*/ 0xED59B63B, 0x55E5D15E, 0x47507EB0, 0xFFEC19D5, 0x623B216C, 0xDA874609, 0xC832E9E7, 0x708E8E82, /* [3][0x88]*/ 0x28ED9ED4, 0x9051F9B1, 0x82E4565F, 0x3A58313A, 0xA78F0983, 0x1F336EE6, 0x0D86C108, 0xB53AA66D, /* [3][0x90]*/ 0xBD40E1A4, 0x05FC86C1, 0x1749292F, 0xAFF54E4A, 0x322276F3, 0x8A9E1196, 0x982BBE78, 0x2097D91D, /* [3][0x98]*/ 0x78F4C94B, 0xC048AE2E, 0xD2FD01C0, 0x6A4166A5, 0xF7965E1C, 0x4F2A3979, 0x5D9F9697, 0xE523F1F2, /* [3][0xa0]*/ 0x4D6B1905, 0xF5D77E60, 0xE762D18E, 0x5FDEB6EB, 0xC2098E52, 0x7AB5E937, 0x680046D9, 0xD0BC21BC, /* [3][0xa8]*/ 0x88DF31EA, 0x3063568F, 0x22D6F961, 0x9A6A9E04, 0x07BDA6BD, 0xBF01C1D8, 0xADB46E36, 0x15080953, /* [3][0xb0]*/ 0x1D724E9A, 0xA5CE29FF, 0xB77B8611, 0x0FC7E174, 0x9210D9CD, 0x2AACBEA8, 0x38191146, 0x80A57623, /* [3][0xb8]*/ 0xD8C66675, 0x607A0110, 0x72CFAEFE, 0xCA73C99B, 0x57A4F122, 0xEF189647, 0xFDAD39A9, 0x45115ECC, /* [3][0xc0]*/ 0x764DEE06, 0xCEF18963, 0xDC44268D, 0x64F841E8, 0xF92F7951, 0x41931E34, 0x5326B1DA, 0xEB9AD6BF, /* [3][0xc8]*/ 0xB3F9C6E9, 0x0B45A18C, 0x19F00E62, 0xA14C6907, 0x3C9B51BE, 0x842736DB, 0x96929935, 0x2E2EFE50, /* [3][0xd0]*/ 0x2654B999, 0x9EE8DEFC, 0x8C5D7112, 0x34E11677, 0xA9362ECE, 0x118A49AB, 0x033FE645, 0xBB838120, /* [3][0xd8]*/ 0xE3E09176, 0x5B5CF613, 0x49E959FD, 0xF1553E98, 0x6C820621, 0xD43E6144, 0xC68BCEAA, 0x7E37A9CF, /* [3][0xe0]*/ 0xD67F4138, 0x6EC3265D, 0x7C7689B3, 0xC4CAEED6, 0x591DD66F, 0xE1A1B10A, 0xF3141EE4, 0x4BA87981, /* [3][0xe8]*/ 0x13CB69D7, 0xAB770EB2, 0xB9C2A15C, 0x017EC639, 0x9CA9FE80, 0x241599E5, 0x36A0360B, 0x8E1C516E, /* [3][0xf0]*/ 0x866616A7, 0x3EDA71C2, 0x2C6FDE2C, 0x94D3B949, 0x090481F0, 0xB1B8E695, 0xA30D497B, 0x1BB12E1E, /* [3][0xf8]*/ 0x43D23E48, 0xFB6E592D, 0xE9DBF6C3, 0x516791A6, 0xCCB0A91F, 0x740CCE7A, 0x66B96194, 0xDE0506F1 /* [3][0x100]*/ }, { 0x00000000, 0x3D6029B0, 0x7AC05360, 0x47A07AD0, 0xF580A6C0, 0xC8E08F70, 0x8F40F5A0, 0xB220DC10, /* [4][0x08]*/ 0x30704BC1, 0x0D106271, 0x4AB018A1, 0x77D03111, 0xC5F0ED01, 0xF890C4B1, 0xBF30BE61, 0x825097D1, /* [4][0x10]*/ 0x60E09782, 0x5D80BE32, 0x1A20C4E2, 0x2740ED52, 0x95603142, 0xA80018F2, 0xEFA06222, 0xD2C04B92, /* [4][0x18]*/ 0x5090DC43, 0x6DF0F5F3, 0x2A508F23, 0x1730A693, 0xA5107A83, 0x98705333, 0xDFD029E3, 0xE2B00053, /* [4][0x20]*/ 0xC1C12F04, 0xFCA106B4, 0xBB017C64, 0x866155D4, 0x344189C4, 0x0921A074, 0x4E81DAA4, 0x73E1F314, /* [4][0x28]*/ 0xF1B164C5, 0xCCD14D75, 0x8B7137A5, 0xB6111E15, 0x0431C205, 0x3951EBB5, 0x7EF19165, 0x4391B8D5, /* [4][0x30]*/ 0xA121B886, 0x9C419136, 0xDBE1EBE6, 0xE681C256, 0x54A11E46, 0x69C137F6, 0x2E614D26, 0x13016496, /* [4][0x38]*/ 0x9151F347, 0xAC31DAF7, 0xEB91A027, 0xD6F18997, 0x64D15587, 0x59B17C37, 0x1E1106E7, 0x23712F57, /* [4][0x40]*/ 0x58F35849, 0x659371F9, 0x22330B29, 0x1F532299, 0xAD73FE89, 0x9013D739, 0xD7B3ADE9, 0xEAD38459, /* [4][0x48]*/ 0x68831388, 0x55E33A38, 0x124340E8, 0x2F236958, 0x9D03B548, 0xA0639CF8, 0xE7C3E628, 0xDAA3CF98, /* [4][0x50]*/ 0x3813CFCB, 0x0573E67B, 0x42D39CAB, 0x7FB3B51B, 0xCD93690B, 0xF0F340BB, 0xB7533A6B, 0x8A3313DB, /* [4][0x58]*/ 0x0863840A, 0x3503ADBA, 0x72A3D76A, 0x4FC3FEDA, 0xFDE322CA, 0xC0830B7A, 0x872371AA, 0xBA43581A, /* [4][0x60]*/ 0x9932774D, 0xA4525EFD, 0xE3F2242D, 0xDE920D9D, 0x6CB2D18D, 0x51D2F83D, 0x167282ED, 0x2B12AB5D, /* [4][0x68]*/ 0xA9423C8C, 0x9422153C, 0xD3826FEC, 0xEEE2465C, 0x5CC29A4C, 0x61A2B3FC, 0x2602C92C, 0x1B62E09C, /* [4][0x70]*/ 0xF9D2E0CF, 0xC4B2C97F, 0x8312B3AF, 0xBE729A1F, 0x0C52460F, 0x31326FBF, 0x7692156F, 0x4BF23CDF, /* [4][0x78]*/ 0xC9A2AB0E, 0xF4C282BE, 0xB362F86E, 0x8E02D1DE, 0x3C220DCE, 0x0142247E, 0x46E25EAE, 0x7B82771E, /* [4][0x80]*/ 0xB1E6B092, 0x8C869922, 0xCB26E3F2, 0xF646CA42, 0x44661652, 0x79063FE2, 0x3EA64532, 0x03C66C82, /* [4][0x88]*/ 0x8196FB53, 0xBCF6D2E3, 0xFB56A833, 0xC6368183, 0x74165D93, 0x49767423, 0x0ED60EF3, 0x33B62743, /* [4][0x90]*/ 0xD1062710, 0xEC660EA0, 0xABC67470, 0x96A65DC0, 0x248681D0, 0x19E6A860, 0x5E46D2B0, 0x6326FB00, /* [4][0x98]*/ 0xE1766CD1, 0xDC164561, 0x9BB63FB1, 0xA6D61601, 0x14F6CA11, 0x2996E3A1, 0x6E369971, 0x5356B0C1, /* [4][0xa0]*/ 0x70279F96, 0x4D47B626, 0x0AE7CCF6, 0x3787E546, 0x85A73956, 0xB8C710E6, 0xFF676A36, 0xC2074386, /* [4][0xa8]*/ 0x4057D457, 0x7D37FDE7, 0x3A978737, 0x07F7AE87, 0xB5D77297, 0x88B75B27, 0xCF1721F7, 0xF2770847, /* [4][0xb0]*/ 0x10C70814, 0x2DA721A4, 0x6A075B74, 0x576772C4, 0xE547AED4, 0xD8278764, 0x9F87FDB4, 0xA2E7D404, /* [4][0xb8]*/ 0x20B743D5, 0x1DD76A65, 0x5A7710B5, 0x67173905, 0xD537E515, 0xE857CCA5, 0xAFF7B675, 0x92979FC5, /* [4][0xc0]*/ 0xE915E8DB, 0xD475C16B, 0x93D5BBBB, 0xAEB5920B, 0x1C954E1B, 0x21F567AB, 0x66551D7B, 0x5B3534CB, /* [4][0xc8]*/ 0xD965A31A, 0xE4058AAA, 0xA3A5F07A, 0x9EC5D9CA, 0x2CE505DA, 0x11852C6A, 0x562556BA, 0x6B457F0A, /* [4][0xd0]*/ 0x89F57F59, 0xB49556E9, 0xF3352C39, 0xCE550589, 0x7C75D999, 0x4115F029, 0x06B58AF9, 0x3BD5A349, /* [4][0xd8]*/ 0xB9853498, 0x84E51D28, 0xC34567F8, 0xFE254E48, 0x4C059258, 0x7165BBE8, 0x36C5C138, 0x0BA5E888, /* [4][0xe0]*/ 0x28D4C7DF, 0x15B4EE6F, 0x521494BF, 0x6F74BD0F, 0xDD54611F, 0xE03448AF, 0xA794327F, 0x9AF41BCF, /* [4][0xe8]*/ 0x18A48C1E, 0x25C4A5AE, 0x6264DF7E, 0x5F04F6CE, 0xED242ADE, 0xD044036E, 0x97E479BE, 0xAA84500E, /* [4][0xf0]*/ 0x4834505D, 0x755479ED, 0x32F4033D, 0x0F942A8D, 0xBDB4F69D, 0x80D4DF2D, 0xC774A5FD, 0xFA148C4D, /* [4][0xf8]*/ 0x78441B9C, 0x4524322C, 0x028448FC, 0x3FE4614C, 0x8DC4BD5C, 0xB0A494EC, 0xF704EE3C, 0xCA64C78C /* [4][0x100]*/ }, { 0x00000000, 0xCB5CD3A5, 0x4DC8A10B, 0x869472AE, 0x9B914216, 0x50CD91B3, 0xD659E31D, 0x1D0530B8, /* [5][0x08]*/ 0xEC53826D, 0x270F51C8, 0xA19B2366, 0x6AC7F0C3, 0x77C2C07B, 0xBC9E13DE, 0x3A0A6170, 0xF156B2D5, /* [5][0x10]*/ 0x03D6029B, 0xC88AD13E, 0x4E1EA390, 0x85427035, 0x9847408D, 0x531B9328, 0xD58FE186, 0x1ED33223, /* [5][0x18]*/ 0xEF8580F6, 0x24D95353, 0xA24D21FD, 0x6911F258, 0x7414C2E0, 0xBF481145, 0x39DC63EB, 0xF280B04E, /* [5][0x20]*/ 0x07AC0536, 0xCCF0D693, 0x4A64A43D, 0x81387798, 0x9C3D4720, 0x57619485, 0xD1F5E62B, 0x1AA9358E, /* [5][0x28]*/ 0xEBFF875B, 0x20A354FE, 0xA6372650, 0x6D6BF5F5, 0x706EC54D, 0xBB3216E8, 0x3DA66446, 0xF6FAB7E3, /* [5][0x30]*/ 0x047A07AD, 0xCF26D408, 0x49B2A6A6, 0x82EE7503, 0x9FEB45BB, 0x54B7961E, 0xD223E4B0, 0x197F3715, /* [5][0x38]*/ 0xE82985C0, 0x23755665, 0xA5E124CB, 0x6EBDF76E, 0x73B8C7D6, 0xB8E41473, 0x3E7066DD, 0xF52CB578, /* [5][0x40]*/ 0x0F580A6C, 0xC404D9C9, 0x4290AB67, 0x89CC78C2, 0x94C9487A, 0x5F959BDF, 0xD901E971, 0x125D3AD4, /* [5][0x48]*/ 0xE30B8801, 0x28575BA4, 0xAEC3290A, 0x659FFAAF, 0x789ACA17, 0xB3C619B2, 0x35526B1C, 0xFE0EB8B9, /* [5][0x50]*/ 0x0C8E08F7, 0xC7D2DB52, 0x4146A9FC, 0x8A1A7A59, 0x971F4AE1, 0x5C439944, 0xDAD7EBEA, 0x118B384F, /* [5][0x58]*/ 0xE0DD8A9A, 0x2B81593F, 0xAD152B91, 0x6649F834, 0x7B4CC88C, 0xB0101B29, 0x36846987, 0xFDD8BA22, /* [5][0x60]*/ 0x08F40F5A, 0xC3A8DCFF, 0x453CAE51, 0x8E607DF4, 0x93654D4C, 0x58399EE9, 0xDEADEC47, 0x15F13FE2, /* [5][0x68]*/ 0xE4A78D37, 0x2FFB5E92, 0xA96F2C3C, 0x6233FF99, 0x7F36CF21, 0xB46A1C84, 0x32FE6E2A, 0xF9A2BD8F, /* [5][0x70]*/ 0x0B220DC1, 0xC07EDE64, 0x46EAACCA, 0x8DB67F6F, 0x90B34FD7, 0x5BEF9C72, 0xDD7BEEDC, 0x16273D79, /* [5][0x78]*/ 0xE7718FAC, 0x2C2D5C09, 0xAAB92EA7, 0x61E5FD02, 0x7CE0CDBA, 0xB7BC1E1F, 0x31286CB1, 0xFA74BF14, /* [5][0x80]*/ 0x1EB014D8, 0xD5ECC77D, 0x5378B5D3, 0x98246676, 0x852156CE, 0x4E7D856B, 0xC8E9F7C5, 0x03B52460, /* [5][0x88]*/ 0xF2E396B5, 0x39BF4510, 0xBF2B37BE, 0x7477E41B, 0x6972D4A3, 0xA22E0706, 0x24BA75A8, 0xEFE6A60D, /* [5][0x90]*/ 0x1D661643, 0xD63AC5E6, 0x50AEB748, 0x9BF264ED, 0x86F75455, 0x4DAB87F0, 0xCB3FF55E, 0x006326FB, /* [5][0x98]*/ 0xF135942E, 0x3A69478B, 0xBCFD3525, 0x77A1E680, 0x6AA4D638, 0xA1F8059D, 0x276C7733, 0xEC30A496, /* [5][0xa0]*/ 0x191C11EE, 0xD240C24B, 0x54D4B0E5, 0x9F886340, 0x828D53F8, 0x49D1805D, 0xCF45F2F3, 0x04192156, /* [5][0xa8]*/ 0xF54F9383, 0x3E134026, 0xB8873288, 0x73DBE12D, 0x6EDED195, 0xA5820230, 0x2316709E, 0xE84AA33B, /* [5][0xb0]*/ 0x1ACA1375, 0xD196C0D0, 0x5702B27E, 0x9C5E61DB, 0x815B5163, 0x4A0782C6, 0xCC93F068, 0x07CF23CD, /* [5][0xb8]*/ 0xF6999118, 0x3DC542BD, 0xBB513013, 0x700DE3B6, 0x6D08D30E, 0xA65400AB, 0x20C07205, 0xEB9CA1A0, /* [5][0xc0]*/ 0x11E81EB4, 0xDAB4CD11, 0x5C20BFBF, 0x977C6C1A, 0x8A795CA2, 0x41258F07, 0xC7B1FDA9, 0x0CED2E0C, /* [5][0xc8]*/ 0xFDBB9CD9, 0x36E74F7C, 0xB0733DD2, 0x7B2FEE77, 0x662ADECF, 0xAD760D6A, 0x2BE27FC4, 0xE0BEAC61, /* [5][0xd0]*/ 0x123E1C2F, 0xD962CF8A, 0x5FF6BD24, 0x94AA6E81, 0x89AF5E39, 0x42F38D9C, 0xC467FF32, 0x0F3B2C97, /* [5][0xd8]*/ 0xFE6D9E42, 0x35314DE7, 0xB3A53F49, 0x78F9ECEC, 0x65FCDC54, 0xAEA00FF1, 0x28347D5F, 0xE368AEFA, /* [5][0xe0]*/ 0x16441B82, 0xDD18C827, 0x5B8CBA89, 0x90D0692C, 0x8DD55994, 0x46898A31, 0xC01DF89F, 0x0B412B3A, /* [5][0xe8]*/ 0xFA1799EF, 0x314B4A4A, 0xB7DF38E4, 0x7C83EB41, 0x6186DBF9, 0xAADA085C, 0x2C4E7AF2, 0xE712A957, /* [5][0xf0]*/ 0x15921919, 0xDECECABC, 0x585AB812, 0x93066BB7, 0x8E035B0F, 0x455F88AA, 0xC3CBFA04, 0x089729A1, /* [5][0xf8]*/ 0xF9C19B74, 0x329D48D1, 0xB4093A7F, 0x7F55E9DA, 0x6250D962, 0xA90C0AC7, 0x2F987869, 0xE4C4ABCC /* [5][0x100]*/ }, { 0x00000000, 0xA6770BB4, 0x979F1129, 0x31E81A9D, 0xF44F2413, 0x52382FA7, 0x63D0353A, 0xC5A73E8E, /* [6][0x08]*/ 0x33EF4E67, 0x959845D3, 0xA4705F4E, 0x020754FA, 0xC7A06A74, 0x61D761C0, 0x503F7B5D, 0xF64870E9, /* [6][0x10]*/ 0x67DE9CCE, 0xC1A9977A, 0xF0418DE7, 0x56368653, 0x9391B8DD, 0x35E6B369, 0x040EA9F4, 0xA279A240, /* [6][0x18]*/ 0x5431D2A9, 0xF246D91D, 0xC3AEC380, 0x65D9C834, 0xA07EF6BA, 0x0609FD0E, 0x37E1E793, 0x9196EC27, /* [6][0x20]*/ 0xCFBD399C, 0x69CA3228, 0x582228B5, 0xFE552301, 0x3BF21D8F, 0x9D85163B, 0xAC6D0CA6, 0x0A1A0712, /* [6][0x28]*/ 0xFC5277FB, 0x5A257C4F, 0x6BCD66D2, 0xCDBA6D66, 0x081D53E8, 0xAE6A585C, 0x9F8242C1, 0x39F54975, /* [6][0x30]*/ 0xA863A552, 0x0E14AEE6, 0x3FFCB47B, 0x998BBFCF, 0x5C2C8141, 0xFA5B8AF5, 0xCBB39068, 0x6DC49BDC, /* [6][0x38]*/ 0x9B8CEB35, 0x3DFBE081, 0x0C13FA1C, 0xAA64F1A8, 0x6FC3CF26, 0xC9B4C492, 0xF85CDE0F, 0x5E2BD5BB, /* [6][0x40]*/ 0x440B7579, 0xE27C7ECD, 0xD3946450, 0x75E36FE4, 0xB044516A, 0x16335ADE, 0x27DB4043, 0x81AC4BF7, /* [6][0x48]*/ 0x77E43B1E, 0xD19330AA, 0xE07B2A37, 0x460C2183, 0x83AB1F0D, 0x25DC14B9, 0x14340E24, 0xB2430590, /* [6][0x50]*/ 0x23D5E9B7, 0x85A2E203, 0xB44AF89E, 0x123DF32A, 0xD79ACDA4, 0x71EDC610, 0x4005DC8D, 0xE672D739, /* [6][0x58]*/ 0x103AA7D0, 0xB64DAC64, 0x87A5B6F9, 0x21D2BD4D, 0xE47583C3, 0x42028877, 0x73EA92EA, 0xD59D995E, /* [6][0x60]*/ 0x8BB64CE5, 0x2DC14751, 0x1C295DCC, 0xBA5E5678, 0x7FF968F6, 0xD98E6342, 0xE86679DF, 0x4E11726B, /* [6][0x68]*/ 0xB8590282, 0x1E2E0936, 0x2FC613AB, 0x89B1181F, 0x4C162691, 0xEA612D25, 0xDB8937B8, 0x7DFE3C0C, /* [6][0x70]*/ 0xEC68D02B, 0x4A1FDB9F, 0x7BF7C102, 0xDD80CAB6, 0x1827F438, 0xBE50FF8C, 0x8FB8E511, 0x29CFEEA5, /* [6][0x78]*/ 0xDF879E4C, 0x79F095F8, 0x48188F65, 0xEE6F84D1, 0x2BC8BA5F, 0x8DBFB1EB, 0xBC57AB76, 0x1A20A0C2, /* [6][0x80]*/ 0x8816EAF2, 0x2E61E146, 0x1F89FBDB, 0xB9FEF06F, 0x7C59CEE1, 0xDA2EC555, 0xEBC6DFC8, 0x4DB1D47C, /* [6][0x88]*/ 0xBBF9A495, 0x1D8EAF21, 0x2C66B5BC, 0x8A11BE08, 0x4FB68086, 0xE9C18B32, 0xD82991AF, 0x7E5E9A1B, /* [6][0x90]*/ 0xEFC8763C, 0x49BF7D88, 0x78576715, 0xDE206CA1, 0x1B87522F, 0xBDF0599B, 0x8C184306, 0x2A6F48B2, /* [6][0x98]*/ 0xDC27385B, 0x7A5033EF, 0x4BB82972, 0xEDCF22C6, 0x28681C48, 0x8E1F17FC, 0xBFF70D61, 0x198006D5, /* [6][0xa0]*/ 0x47ABD36E, 0xE1DCD8DA, 0xD034C247, 0x7643C9F3, 0xB3E4F77D, 0x1593FCC9, 0x247BE654, 0x820CEDE0, /* [6][0xa8]*/ 0x74449D09, 0xD23396BD, 0xE3DB8C20, 0x45AC8794, 0x800BB91A, 0x267CB2AE, 0x1794A833, 0xB1E3A387, /* [6][0xb0]*/ 0x20754FA0, 0x86024414, 0xB7EA5E89, 0x119D553D, 0xD43A6BB3, 0x724D6007, 0x43A57A9A, 0xE5D2712E, /* [6][0xb8]*/ 0x139A01C7, 0xB5ED0A73, 0x840510EE, 0x22721B5A, 0xE7D525D4, 0x41A22E60, 0x704A34FD, 0xD63D3F49, /* [6][0xc0]*/ 0xCC1D9F8B, 0x6A6A943F, 0x5B828EA2, 0xFDF58516, 0x3852BB98, 0x9E25B02C, 0xAFCDAAB1, 0x09BAA105, /* [6][0xc8]*/ 0xFFF2D1EC, 0x5985DA58, 0x686DC0C5, 0xCE1ACB71, 0x0BBDF5FF, 0xADCAFE4B, 0x9C22E4D6, 0x3A55EF62, /* [6][0xd0]*/ 0xABC30345, 0x0DB408F1, 0x3C5C126C, 0x9A2B19D8, 0x5F8C2756, 0xF9FB2CE2, 0xC813367F, 0x6E643DCB, /* [6][0xd8]*/ 0x982C4D22, 0x3E5B4696, 0x0FB35C0B, 0xA9C457BF, 0x6C636931, 0xCA146285, 0xFBFC7818, 0x5D8B73AC, /* [6][0xe0]*/ 0x03A0A617, 0xA5D7ADA3, 0x943FB73E, 0x3248BC8A, 0xF7EF8204, 0x519889B0, 0x6070932D, 0xC6079899, /* [6][0xe8]*/ 0x304FE870, 0x9638E3C4, 0xA7D0F959, 0x01A7F2ED, 0xC400CC63, 0x6277C7D7, 0x539FDD4A, 0xF5E8D6FE, /* [6][0xf0]*/ 0x647E3AD9, 0xC209316D, 0xF3E12BF0, 0x55962044, 0x90311ECA, 0x3646157E, 0x07AE0FE3, 0xA1D90457, /* [6][0xf8]*/ 0x579174BE, 0xF1E67F0A, 0xC00E6597, 0x66796E23, 0xA3DE50AD, 0x05A95B19, 0x34414184, 0x92364A30 /* [6][0x100]*/ }, { 0x00000000, 0xCCAA009E, 0x4225077D, 0x8E8F07E3, 0x844A0EFA, 0x48E00E64, 0xC66F0987, 0x0AC50919, /* [7][0x08]*/ 0xD3E51BB5, 0x1F4F1B2B, 0x91C01CC8, 0x5D6A1C56, 0x57AF154F, 0x9B0515D1, 0x158A1232, 0xD92012AC, /* [7][0x10]*/ 0x7CBB312B, 0xB01131B5, 0x3E9E3656, 0xF23436C8, 0xF8F13FD1, 0x345B3F4F, 0xBAD438AC, 0x767E3832, /* [7][0x18]*/ 0xAF5E2A9E, 0x63F42A00, 0xED7B2DE3, 0x21D12D7D, 0x2B142464, 0xE7BE24FA, 0x69312319, 0xA59B2387, /* [7][0x20]*/ 0xF9766256, 0x35DC62C8, 0xBB53652B, 0x77F965B5, 0x7D3C6CAC, 0xB1966C32, 0x3F196BD1, 0xF3B36B4F, /* [7][0x28]*/ 0x2A9379E3, 0xE639797D, 0x68B67E9E, 0xA41C7E00, 0xAED97719, 0x62737787, 0xECFC7064, 0x205670FA, /* [7][0x30]*/ 0x85CD537D, 0x496753E3, 0xC7E85400, 0x0B42549E, 0x01875D87, 0xCD2D5D19, 0x43A25AFA, 0x8F085A64, /* [7][0x38]*/ 0x562848C8, 0x9A824856, 0x140D4FB5, 0xD8A74F2B, 0xD2624632, 0x1EC846AC, 0x9047414F, 0x5CED41D1, /* [7][0x40]*/ 0x299DC2ED, 0xE537C273, 0x6BB8C590, 0xA712C50E, 0xADD7CC17, 0x617DCC89, 0xEFF2CB6A, 0x2358CBF4, /* [7][0x48]*/ 0xFA78D958, 0x36D2D9C6, 0xB85DDE25, 0x74F7DEBB, 0x7E32D7A2, 0xB298D73C, 0x3C17D0DF, 0xF0BDD041, /* [7][0x50]*/ 0x5526F3C6, 0x998CF358, 0x1703F4BB, 0xDBA9F425, 0xD16CFD3C, 0x1DC6FDA2, 0x9349FA41, 0x5FE3FADF, /* [7][0x58]*/ 0x86C3E873, 0x4A69E8ED, 0xC4E6EF0E, 0x084CEF90, 0x0289E689, 0xCE23E617, 0x40ACE1F4, 0x8C06E16A, /* [7][0x60]*/ 0xD0EBA0BB, 0x1C41A025, 0x92CEA7C6, 0x5E64A758, 0x54A1AE41, 0x980BAEDF, 0x1684A93C, 0xDA2EA9A2, /* [7][0x68]*/ 0x030EBB0E, 0xCFA4BB90, 0x412BBC73, 0x8D81BCED, 0x8744B5F4, 0x4BEEB56A, 0xC561B289, 0x09CBB217, /* [7][0x70]*/ 0xAC509190, 0x60FA910E, 0xEE7596ED, 0x22DF9673, 0x281A9F6A, 0xE4B09FF4, 0x6A3F9817, 0xA6959889, /* [7][0x78]*/ 0x7FB58A25, 0xB31F8ABB, 0x3D908D58, 0xF13A8DC6, 0xFBFF84DF, 0x37558441, 0xB9DA83A2, 0x7570833C, /* [7][0x80]*/ 0x533B85DA, 0x9F918544, 0x111E82A7, 0xDDB48239, 0xD7718B20, 0x1BDB8BBE, 0x95548C5D, 0x59FE8CC3, /* [7][0x88]*/ 0x80DE9E6F, 0x4C749EF1, 0xC2FB9912, 0x0E51998C, 0x04949095, 0xC83E900B, 0x46B197E8, 0x8A1B9776, /* [7][0x90]*/ 0x2F80B4F1, 0xE32AB46F, 0x6DA5B38C, 0xA10FB312, 0xABCABA0B, 0x6760BA95, 0xE9EFBD76, 0x2545BDE8, /* [7][0x98]*/ 0xFC65AF44, 0x30CFAFDA, 0xBE40A839, 0x72EAA8A7, 0x782FA1BE, 0xB485A120, 0x3A0AA6C3, 0xF6A0A65D, /* [7][0xa0]*/ 0xAA4DE78C, 0x66E7E712, 0xE868E0F1, 0x24C2E06F, 0x2E07E976, 0xE2ADE9E8, 0x6C22EE0B, 0xA088EE95, /* [7][0xa8]*/ 0x79A8FC39, 0xB502FCA7, 0x3B8DFB44, 0xF727FBDA, 0xFDE2F2C3, 0x3148F25D, 0xBFC7F5BE, 0x736DF520, /* [7][0xb0]*/ 0xD6F6D6A7, 0x1A5CD639, 0x94D3D1DA, 0x5879D144, 0x52BCD85D, 0x9E16D8C3, 0x1099DF20, 0xDC33DFBE, /* [7][0xb8]*/ 0x0513CD12, 0xC9B9CD8C, 0x4736CA6F, 0x8B9CCAF1, 0x8159C3E8, 0x4DF3C376, 0xC37CC495, 0x0FD6C40B, /* [7][0xc0]*/ 0x7AA64737, 0xB60C47A9, 0x3883404A, 0xF42940D4, 0xFEEC49CD, 0x32464953, 0xBCC94EB0, 0x70634E2E, /* [7][0xc8]*/ 0xA9435C82, 0x65E95C1C, 0xEB665BFF, 0x27CC5B61, 0x2D095278, 0xE1A352E6, 0x6F2C5505, 0xA386559B, /* [7][0xd0]*/ 0x061D761C, 0xCAB77682, 0x44387161, 0x889271FF, 0x825778E6, 0x4EFD7878, 0xC0727F9B, 0x0CD87F05, /* [7][0xd8]*/ 0xD5F86DA9, 0x19526D37, 0x97DD6AD4, 0x5B776A4A, 0x51B26353, 0x9D1863CD, 0x1397642E, 0xDF3D64B0, /* [7][0xe0]*/ 0x83D02561, 0x4F7A25FF, 0xC1F5221C, 0x0D5F2282, 0x079A2B9B, 0xCB302B05, 0x45BF2CE6, 0x89152C78, /* [7][0xe8]*/ 0x50353ED4, 0x9C9F3E4A, 0x121039A9, 0xDEBA3937, 0xD47F302E, 0x18D530B0, 0x965A3753, 0x5AF037CD, /* [7][0xf0]*/ 0xFF6B144A, 0x33C114D4, 0xBD4E1337, 0x71E413A9, 0x7B211AB0, 0xB78B1A2E, 0x39041DCD, 0xF5AE1D53, /* [7][0xf8]*/ 0x2C8E0FFF, 0xE0240F61, 0x6EAB0882, 0xA201081C, 0xA8C40105, 0x646E019B, 0xEAE10678, 0x264B06E6 /* [7][0x100]*/ }, { 0x00000000, 0x177B1443, 0x2EF62886, 0x398D3CC5, 0x5DEC510C, 0x4A97454F, 0x731A798A, 0x64616DC9, /* [8][0x08]*/ 0xBBD8A218, 0xACA3B65B, 0x952E8A9E, 0x82559EDD, 0xE634F314, 0xF14FE757, 0xC8C2DB92, 0xDFB9CFD1, /* [8][0x10]*/ 0xACC04271, 0xBBBB5632, 0x82366AF7, 0x954D7EB4, 0xF12C137D, 0xE657073E, 0xDFDA3BFB, 0xC8A12FB8, /* [8][0x18]*/ 0x1718E069, 0x0063F42A, 0x39EEC8EF, 0x2E95DCAC, 0x4AF4B165, 0x5D8FA526, 0x640299E3, 0x73798DA0, /* [8][0x20]*/ 0x82F182A3, 0x958A96E0, 0xAC07AA25, 0xBB7CBE66, 0xDF1DD3AF, 0xC866C7EC, 0xF1EBFB29, 0xE690EF6A, /* [8][0x28]*/ 0x392920BB, 0x2E5234F8, 0x17DF083D, 0x00A41C7E, 0x64C571B7, 0x73BE65F4, 0x4A335931, 0x5D484D72, /* [8][0x30]*/ 0x2E31C0D2, 0x394AD491, 0x00C7E854, 0x17BCFC17, 0x73DD91DE, 0x64A6859D, 0x5D2BB958, 0x4A50AD1B, /* [8][0x38]*/ 0x95E962CA, 0x82927689, 0xBB1F4A4C, 0xAC645E0F, 0xC80533C6, 0xDF7E2785, 0xE6F31B40, 0xF1880F03, /* [8][0x40]*/ 0xDE920307, 0xC9E91744, 0xF0642B81, 0xE71F3FC2, 0x837E520B, 0x94054648, 0xAD887A8D, 0xBAF36ECE, /* [8][0x48]*/ 0x654AA11F, 0x7231B55C, 0x4BBC8999, 0x5CC79DDA, 0x38A6F013, 0x2FDDE450, 0x1650D895, 0x012BCCD6, /* [8][0x50]*/ 0x72524176, 0x65295535, 0x5CA469F0, 0x4BDF7DB3, 0x2FBE107A, 0x38C50439, 0x014838FC, 0x16332CBF, /* [8][0x58]*/ 0xC98AE36E, 0xDEF1F72D, 0xE77CCBE8, 0xF007DFAB, 0x9466B262, 0x831DA621, 0xBA909AE4, 0xADEB8EA7, /* [8][0x60]*/ 0x5C6381A4, 0x4B1895E7, 0x7295A922, 0x65EEBD61, 0x018FD0A8, 0x16F4C4EB, 0x2F79F82E, 0x3802EC6D, /* [8][0x68]*/ 0xE7BB23BC, 0xF0C037FF, 0xC94D0B3A, 0xDE361F79, 0xBA5772B0, 0xAD2C66F3, 0x94A15A36, 0x83DA4E75, /* [8][0x70]*/ 0xF0A3C3D5, 0xE7D8D796, 0xDE55EB53, 0xC92EFF10, 0xAD4F92D9, 0xBA34869A, 0x83B9BA5F, 0x94C2AE1C, /* [8][0x78]*/ 0x4B7B61CD, 0x5C00758E, 0x658D494B, 0x72F65D08, 0x169730C1, 0x01EC2482, 0x38611847, 0x2F1A0C04, /* [8][0x80]*/ 0x6655004F, 0x712E140C, 0x48A328C9, 0x5FD83C8A, 0x3BB95143, 0x2CC24500, 0x154F79C5, 0x02346D86, /* [8][0x88]*/ 0xDD8DA257, 0xCAF6B614, 0xF37B8AD1, 0xE4009E92, 0x8061F35B, 0x971AE718, 0xAE97DBDD, 0xB9ECCF9E, /* [8][0x90]*/ 0xCA95423E, 0xDDEE567D, 0xE4636AB8, 0xF3187EFB, 0x97791332, 0x80020771, 0xB98F3BB4, 0xAEF42FF7, /* [8][0x98]*/ 0x714DE026, 0x6636F465, 0x5FBBC8A0, 0x48C0DCE3, 0x2CA1B12A, 0x3BDAA569, 0x025799AC, 0x152C8DEF, /* [8][0xa0]*/ 0xE4A482EC, 0xF3DF96AF, 0xCA52AA6A, 0xDD29BE29, 0xB948D3E0, 0xAE33C7A3, 0x97BEFB66, 0x80C5EF25, /* [8][0xa8]*/ 0x5F7C20F4, 0x480734B7, 0x718A0872, 0x66F11C31, 0x029071F8, 0x15EB65BB, 0x2C66597E, 0x3B1D4D3D, /* [8][0xb0]*/ 0x4864C09D, 0x5F1FD4DE, 0x6692E81B, 0x71E9FC58, 0x15889191, 0x02F385D2, 0x3B7EB917, 0x2C05AD54, /* [8][0xb8]*/ 0xF3BC6285, 0xE4C776C6, 0xDD4A4A03, 0xCA315E40, 0xAE503389, 0xB92B27CA, 0x80A61B0F, 0x97DD0F4C, /* [8][0xc0]*/ 0xB8C70348, 0xAFBC170B, 0x96312BCE, 0x814A3F8D, 0xE52B5244, 0xF2504607, 0xCBDD7AC2, 0xDCA66E81, /* [8][0xc8]*/ 0x031FA150, 0x1464B513, 0x2DE989D6, 0x3A929D95, 0x5EF3F05C, 0x4988E41F, 0x7005D8DA, 0x677ECC99, /* [8][0xd0]*/ 0x14074139, 0x037C557A, 0x3AF169BF, 0x2D8A7DFC, 0x49EB1035, 0x5E900476, 0x671D38B3, 0x70662CF0, /* [8][0xd8]*/ 0xAFDFE321, 0xB8A4F762, 0x8129CBA7, 0x9652DFE4, 0xF233B22D, 0xE548A66E, 0xDCC59AAB, 0xCBBE8EE8, /* [8][0xe0]*/ 0x3A3681EB, 0x2D4D95A8, 0x14C0A96D, 0x03BBBD2E, 0x67DAD0E7, 0x70A1C4A4, 0x492CF861, 0x5E57EC22, /* [8][0xe8]*/ 0x81EE23F3, 0x969537B0, 0xAF180B75, 0xB8631F36, 0xDC0272FF, 0xCB7966BC, 0xF2F45A79, 0xE58F4E3A, /* [8][0xf0]*/ 0x96F6C39A, 0x818DD7D9, 0xB800EB1C, 0xAF7BFF5F, 0xCB1A9296, 0xDC6186D5, 0xE5ECBA10, 0xF297AE53, /* [8][0xf8]*/ 0x2D2E6182, 0x3A5575C1, 0x03D84904, 0x14A35D47, 0x70C2308E, 0x67B924CD, 0x5E341808, 0x494F0C4B /* [8][0x100]*/ }, { 0x00000000, 0xEFC26B3E, 0x04F5D03D, 0xEB37BB03, 0x09EBA07A, 0xE629CB44, 0x0D1E7047, 0xE2DC1B79, /* [9][0x08]*/ 0x13D740F4, 0xFC152BCA, 0x172290C9, 0xF8E0FBF7, 0x1A3CE08E, 0xF5FE8BB0, 0x1EC930B3, 0xF10B5B8D, /* [9][0x10]*/ 0x27AE81E8, 0xC86CEAD6, 0x235B51D5, 0xCC993AEB, 0x2E452192, 0xC1874AAC, 0x2AB0F1AF, 0xC5729A91, /* [9][0x18]*/ 0x3479C11C, 0xDBBBAA22, 0x308C1121, 0xDF4E7A1F, 0x3D926166, 0xD2500A58, 0x3967B15B, 0xD6A5DA65, /* [9][0x20]*/ 0x4F5D03D0, 0xA09F68EE, 0x4BA8D3ED, 0xA46AB8D3, 0x46B6A3AA, 0xA974C894, 0x42437397, 0xAD8118A9, /* [9][0x28]*/ 0x5C8A4324, 0xB348281A, 0x587F9319, 0xB7BDF827, 0x5561E35E, 0xBAA38860, 0x51943363, 0xBE56585D, /* [9][0x30]*/ 0x68F38238, 0x8731E906, 0x6C065205, 0x83C4393B, 0x61182242, 0x8EDA497C, 0x65EDF27F, 0x8A2F9941, /* [9][0x38]*/ 0x7B24C2CC, 0x94E6A9F2, 0x7FD112F1, 0x901379CF, 0x72CF62B6, 0x9D0D0988, 0x763AB28B, 0x99F8D9B5, /* [9][0x40]*/ 0x9EBA07A0, 0x71786C9E, 0x9A4FD79D, 0x758DBCA3, 0x9751A7DA, 0x7893CCE4, 0x93A477E7, 0x7C661CD9, /* [9][0x48]*/ 0x8D6D4754, 0x62AF2C6A, 0x89989769, 0x665AFC57, 0x8486E72E, 0x6B448C10, 0x80733713, 0x6FB15C2D, /* [9][0x50]*/ 0xB9148648, 0x56D6ED76, 0xBDE15675, 0x52233D4B, 0xB0FF2632, 0x5F3D4D0C, 0xB40AF60F, 0x5BC89D31, /* [9][0x58]*/ 0xAAC3C6BC, 0x4501AD82, 0xAE361681, 0x41F47DBF, 0xA32866C6, 0x4CEA0DF8, 0xA7DDB6FB, 0x481FDDC5, /* [9][0x60]*/ 0xD1E70470, 0x3E256F4E, 0xD512D44D, 0x3AD0BF73, 0xD80CA40A, 0x37CECF34, 0xDCF97437, 0x333B1F09, /* [9][0x68]*/ 0xC2304484, 0x2DF22FBA, 0xC6C594B9, 0x2907FF87, 0xCBDBE4FE, 0x24198FC0, 0xCF2E34C3, 0x20EC5FFD, /* [9][0x70]*/ 0xF6498598, 0x198BEEA6, 0xF2BC55A5, 0x1D7E3E9B, 0xFFA225E2, 0x10604EDC, 0xFB57F5DF, 0x14959EE1, /* [9][0x78]*/ 0xE59EC56C, 0x0A5CAE52, 0xE16B1551, 0x0EA97E6F, 0xEC756516, 0x03B70E28, 0xE880B52B, 0x0742DE15, /* [9][0x80]*/ 0xE6050901, 0x09C7623F, 0xE2F0D93C, 0x0D32B202, 0xEFEEA97B, 0x002CC245, 0xEB1B7946, 0x04D91278, /* [9][0x88]*/ 0xF5D249F5, 0x1A1022CB, 0xF12799C8, 0x1EE5F2F6, 0xFC39E98F, 0x13FB82B1, 0xF8CC39B2, 0x170E528C, /* [9][0x90]*/ 0xC1AB88E9, 0x2E69E3D7, 0xC55E58D4, 0x2A9C33EA, 0xC8402893, 0x278243AD, 0xCCB5F8AE, 0x23779390, /* [9][0x98]*/ 0xD27CC81D, 0x3DBEA323, 0xD6891820, 0x394B731E, 0xDB976867, 0x34550359, 0xDF62B85A, 0x30A0D364, /* [9][0xa0]*/ 0xA9580AD1, 0x469A61EF, 0xADADDAEC, 0x426FB1D2, 0xA0B3AAAB, 0x4F71C195, 0xA4467A96, 0x4B8411A8, /* [9][0xa8]*/ 0xBA8F4A25, 0x554D211B, 0xBE7A9A18, 0x51B8F126, 0xB364EA5F, 0x5CA68161, 0xB7913A62, 0x5853515C, /* [9][0xb0]*/ 0x8EF68B39, 0x6134E007, 0x8A035B04, 0x65C1303A, 0x871D2B43, 0x68DF407D, 0x83E8FB7E, 0x6C2A9040, /* [9][0xb8]*/ 0x9D21CBCD, 0x72E3A0F3, 0x99D41BF0, 0x761670CE, 0x94CA6BB7, 0x7B080089, 0x903FBB8A, 0x7FFDD0B4, /* [9][0xc0]*/ 0x78BF0EA1, 0x977D659F, 0x7C4ADE9C, 0x9388B5A2, 0x7154AEDB, 0x9E96C5E5, 0x75A17EE6, 0x9A6315D8, /* [9][0xc8]*/ 0x6B684E55, 0x84AA256B, 0x6F9D9E68, 0x805FF556, 0x6283EE2F, 0x8D418511, 0x66763E12, 0x89B4552C, /* [9][0xd0]*/ 0x5F118F49, 0xB0D3E477, 0x5BE45F74, 0xB426344A, 0x56FA2F33, 0xB938440D, 0x520FFF0E, 0xBDCD9430, /* [9][0xd8]*/ 0x4CC6CFBD, 0xA304A483, 0x48331F80, 0xA7F174BE, 0x452D6FC7, 0xAAEF04F9, 0x41D8BFFA, 0xAE1AD4C4, /* [9][0xe0]*/ 0x37E20D71, 0xD820664F, 0x3317DD4C, 0xDCD5B672, 0x3E09AD0B, 0xD1CBC635, 0x3AFC7D36, 0xD53E1608, /* [9][0xe8]*/ 0x24354D85, 0xCBF726BB, 0x20C09DB8, 0xCF02F686, 0x2DDEEDFF, 0xC21C86C1, 0x292B3DC2, 0xC6E956FC, /* [9][0xf0]*/ 0x104C8C99, 0xFF8EE7A7, 0x14B95CA4, 0xFB7B379A, 0x19A72CE3, 0xF66547DD, 0x1D52FCDE, 0xF29097E0, /* [9][0xf8]*/ 0x039BCC6D, 0xEC59A753, 0x076E1C50, 0xE8AC776E, 0x0A706C17, 0xE5B20729, 0x0E85BC2A, 0xE147D714 /* [9][0x100]*/ }, { 0x00000000, 0xC18EDFC0, 0x586CB9C1, 0x99E26601, 0xB0D97382, 0x7157AC42, 0xE8B5CA43, 0x293B1583, /* [10][0x08]*/ 0xBAC3E145, 0x7B4D3E85, 0xE2AF5884, 0x23218744, 0x0A1A92C7, 0xCB944D07, 0x52762B06, 0x93F8F4C6, /* [10][0x10]*/ 0xAEF6C4CB, 0x6F781B0B, 0xF69A7D0A, 0x3714A2CA, 0x1E2FB749, 0xDFA16889, 0x46430E88, 0x87CDD148, /* [10][0x18]*/ 0x1435258E, 0xD5BBFA4E, 0x4C599C4F, 0x8DD7438F, 0xA4EC560C, 0x656289CC, 0xFC80EFCD, 0x3D0E300D, /* [10][0x20]*/ 0x869C8FD7, 0x47125017, 0xDEF03616, 0x1F7EE9D6, 0x3645FC55, 0xF7CB2395, 0x6E294594, 0xAFA79A54, /* [10][0x28]*/ 0x3C5F6E92, 0xFDD1B152, 0x6433D753, 0xA5BD0893, 0x8C861D10, 0x4D08C2D0, 0xD4EAA4D1, 0x15647B11, /* [10][0x30]*/ 0x286A4B1C, 0xE9E494DC, 0x7006F2DD, 0xB1882D1D, 0x98B3389E, 0x593DE75E, 0xC0DF815F, 0x01515E9F, /* [10][0x38]*/ 0x92A9AA59, 0x53277599, 0xCAC51398, 0x0B4BCC58, 0x2270D9DB, 0xE3FE061B, 0x7A1C601A, 0xBB92BFDA, /* [10][0x40]*/ 0xD64819EF, 0x17C6C62F, 0x8E24A02E, 0x4FAA7FEE, 0x66916A6D, 0xA71FB5AD, 0x3EFDD3AC, 0xFF730C6C, /* [10][0x48]*/ 0x6C8BF8AA, 0xAD05276A, 0x34E7416B, 0xF5699EAB, 0xDC528B28, 0x1DDC54E8, 0x843E32E9, 0x45B0ED29, /* [10][0x50]*/ 0x78BEDD24, 0xB93002E4, 0x20D264E5, 0xE15CBB25, 0xC867AEA6, 0x09E97166, 0x900B1767, 0x5185C8A7, /* [10][0x58]*/ 0xC27D3C61, 0x03F3E3A1, 0x9A1185A0, 0x5B9F5A60, 0x72A44FE3, 0xB32A9023, 0x2AC8F622, 0xEB4629E2, /* [10][0x60]*/ 0x50D49638, 0x915A49F8, 0x08B82FF9, 0xC936F039, 0xE00DE5BA, 0x21833A7A, 0xB8615C7B, 0x79EF83BB, /* [10][0x68]*/ 0xEA17777D, 0x2B99A8BD, 0xB27BCEBC, 0x73F5117C, 0x5ACE04FF, 0x9B40DB3F, 0x02A2BD3E, 0xC32C62FE, /* [10][0x70]*/ 0xFE2252F3, 0x3FAC8D33, 0xA64EEB32, 0x67C034F2, 0x4EFB2171, 0x8F75FEB1, 0x169798B0, 0xD7194770, /* [10][0x78]*/ 0x44E1B3B6, 0x856F6C76, 0x1C8D0A77, 0xDD03D5B7, 0xF438C034, 0x35B61FF4, 0xAC5479F5, 0x6DDAA635, /* [10][0x80]*/ 0x77E1359F, 0xB66FEA5F, 0x2F8D8C5E, 0xEE03539E, 0xC738461D, 0x06B699DD, 0x9F54FFDC, 0x5EDA201C, /* [10][0x88]*/ 0xCD22D4DA, 0x0CAC0B1A, 0x954E6D1B, 0x54C0B2DB, 0x7DFBA758, 0xBC757898, 0x25971E99, 0xE419C159, /* [10][0x90]*/ 0xD917F154, 0x18992E94, 0x817B4895, 0x40F59755, 0x69CE82D6, 0xA8405D16, 0x31A23B17, 0xF02CE4D7, /* [10][0x98]*/ 0x63D41011, 0xA25ACFD1, 0x3BB8A9D0, 0xFA367610, 0xD30D6393, 0x1283BC53, 0x8B61DA52, 0x4AEF0592, /* [10][0xa0]*/ 0xF17DBA48, 0x30F36588, 0xA9110389, 0x689FDC49, 0x41A4C9CA, 0x802A160A, 0x19C8700B, 0xD846AFCB, /* [10][0xa8]*/ 0x4BBE5B0D, 0x8A3084CD, 0x13D2E2CC, 0xD25C3D0C, 0xFB67288F, 0x3AE9F74F, 0xA30B914E, 0x62854E8E, /* [10][0xb0]*/ 0x5F8B7E83, 0x9E05A143, 0x07E7C742, 0xC6691882, 0xEF520D01, 0x2EDCD2C1, 0xB73EB4C0, 0x76B06B00, /* [10][0xb8]*/ 0xE5489FC6, 0x24C64006, 0xBD242607, 0x7CAAF9C7, 0x5591EC44, 0x941F3384, 0x0DFD5585, 0xCC738A45, /* [10][0xc0]*/ 0xA1A92C70, 0x6027F3B0, 0xF9C595B1, 0x384B4A71, 0x11705FF2, 0xD0FE8032, 0x491CE633, 0x889239F3, /* [10][0xc8]*/ 0x1B6ACD35, 0xDAE412F5, 0x430674F4, 0x8288AB34, 0xABB3BEB7, 0x6A3D6177, 0xF3DF0776, 0x3251D8B6, /* [10][0xd0]*/ 0x0F5FE8BB, 0xCED1377B, 0x5733517A, 0x96BD8EBA, 0xBF869B39, 0x7E0844F9, 0xE7EA22F8, 0x2664FD38, /* [10][0xd8]*/ 0xB59C09FE, 0x7412D63E, 0xEDF0B03F, 0x2C7E6FFF, 0x05457A7C, 0xC4CBA5BC, 0x5D29C3BD, 0x9CA71C7D, /* [10][0xe0]*/ 0x2735A3A7, 0xE6BB7C67, 0x7F591A66, 0xBED7C5A6, 0x97ECD025, 0x56620FE5, 0xCF8069E4, 0x0E0EB624, /* [10][0xe8]*/ 0x9DF642E2, 0x5C789D22, 0xC59AFB23, 0x041424E3, 0x2D2F3160, 0xECA1EEA0, 0x754388A1, 0xB4CD5761, /* [10][0xf0]*/ 0x89C3676C, 0x484DB8AC, 0xD1AFDEAD, 0x1021016D, 0x391A14EE, 0xF894CB2E, 0x6176AD2F, 0xA0F872EF, /* [10][0xf8]*/ 0x33008629, 0xF28E59E9, 0x6B6C3FE8, 0xAAE2E028, 0x83D9F5AB, 0x42572A6B, 0xDBB54C6A, 0x1A3B93AA /* [10][0x100]*/ }, { 0x00000000, 0x9BA54C6F, 0xEC3B9E9F, 0x779ED2F0, 0x03063B7F, 0x98A37710, 0xEF3DA5E0, 0x7498E98F, /* [11][0x08]*/ 0x060C76FE, 0x9DA93A91, 0xEA37E861, 0x7192A40E, 0x050A4D81, 0x9EAF01EE, 0xE931D31E, 0x72949F71, /* [11][0x10]*/ 0x0C18EDFC, 0x97BDA193, 0xE0237363, 0x7B863F0C, 0x0F1ED683, 0x94BB9AEC, 0xE325481C, 0x78800473, /* [11][0x18]*/ 0x0A149B02, 0x91B1D76D, 0xE62F059D, 0x7D8A49F2, 0x0912A07D, 0x92B7EC12, 0xE5293EE2, 0x7E8C728D, /* [11][0x20]*/ 0x1831DBF8, 0x83949797, 0xF40A4567, 0x6FAF0908, 0x1B37E087, 0x8092ACE8, 0xF70C7E18, 0x6CA93277, /* [11][0x28]*/ 0x1E3DAD06, 0x8598E169, 0xF2063399, 0x69A37FF6, 0x1D3B9679, 0x869EDA16, 0xF10008E6, 0x6AA54489, /* [11][0x30]*/ 0x14293604, 0x8F8C7A6B, 0xF812A89B, 0x63B7E4F4, 0x172F0D7B, 0x8C8A4114, 0xFB1493E4, 0x60B1DF8B, /* [11][0x38]*/ 0x122540FA, 0x89800C95, 0xFE1EDE65, 0x65BB920A, 0x11237B85, 0x8A8637EA, 0xFD18E51A, 0x66BDA975, /* [11][0x40]*/ 0x3063B7F0, 0xABC6FB9F, 0xDC58296F, 0x47FD6500, 0x33658C8F, 0xA8C0C0E0, 0xDF5E1210, 0x44FB5E7F, /* [11][0x48]*/ 0x366FC10E, 0xADCA8D61, 0xDA545F91, 0x41F113FE, 0x3569FA71, 0xAECCB61E, 0xD95264EE, 0x42F72881, /* [11][0x50]*/ 0x3C7B5A0C, 0xA7DE1663, 0xD040C493, 0x4BE588FC, 0x3F7D6173, 0xA4D82D1C, 0xD346FFEC, 0x48E3B383, /* [11][0x58]*/ 0x3A772CF2, 0xA1D2609D, 0xD64CB26D, 0x4DE9FE02, 0x3971178D, 0xA2D45BE2, 0xD54A8912, 0x4EEFC57D, /* [11][0x60]*/ 0x28526C08, 0xB3F72067, 0xC469F297, 0x5FCCBEF8, 0x2B545777, 0xB0F11B18, 0xC76FC9E8, 0x5CCA8587, /* [11][0x68]*/ 0x2E5E1AF6, 0xB5FB5699, 0xC2658469, 0x59C0C806, 0x2D582189, 0xB6FD6DE6, 0xC163BF16, 0x5AC6F379, /* [11][0x70]*/ 0x244A81F4, 0xBFEFCD9B, 0xC8711F6B, 0x53D45304, 0x274CBA8B, 0xBCE9F6E4, 0xCB772414, 0x50D2687B, /* [11][0x78]*/ 0x2246F70A, 0xB9E3BB65, 0xCE7D6995, 0x55D825FA, 0x2140CC75, 0xBAE5801A, 0xCD7B52EA, 0x56DE1E85, /* [11][0x80]*/ 0x60C76FE0, 0xFB62238F, 0x8CFCF17F, 0x1759BD10, 0x63C1549F, 0xF86418F0, 0x8FFACA00, 0x145F866F, /* [11][0x88]*/ 0x66CB191E, 0xFD6E5571, 0x8AF08781, 0x1155CBEE, 0x65CD2261, 0xFE686E0E, 0x89F6BCFE, 0x1253F091, /* [11][0x90]*/ 0x6CDF821C, 0xF77ACE73, 0x80E41C83, 0x1B4150EC, 0x6FD9B963, 0xF47CF50C, 0x83E227FC, 0x18476B93, /* [11][0x98]*/ 0x6AD3F4E2, 0xF176B88D, 0x86E86A7D, 0x1D4D2612, 0x69D5CF9D, 0xF27083F2, 0x85EE5102, 0x1E4B1D6D, /* [11][0xa0]*/ 0x78F6B418, 0xE353F877, 0x94CD2A87, 0x0F6866E8, 0x7BF08F67, 0xE055C308, 0x97CB11F8, 0x0C6E5D97, /* [11][0xa8]*/ 0x7EFAC2E6, 0xE55F8E89, 0x92C15C79, 0x09641016, 0x7DFCF999, 0xE659B5F6, 0x91C76706, 0x0A622B69, /* [11][0xb0]*/ 0x74EE59E4, 0xEF4B158B, 0x98D5C77B, 0x03708B14, 0x77E8629B, 0xEC4D2EF4, 0x9BD3FC04, 0x0076B06B, /* [11][0xb8]*/ 0x72E22F1A, 0xE9476375, 0x9ED9B185, 0x057CFDEA, 0x71E41465, 0xEA41580A, 0x9DDF8AFA, 0x067AC695, /* [11][0xc0]*/ 0x50A4D810, 0xCB01947F, 0xBC9F468F, 0x273A0AE0, 0x53A2E36F, 0xC807AF00, 0xBF997DF0, 0x243C319F, /* [11][0xc8]*/ 0x56A8AEEE, 0xCD0DE281, 0xBA933071, 0x21367C1E, 0x55AE9591, 0xCE0BD9FE, 0xB9950B0E, 0x22304761, /* [11][0xd0]*/ 0x5CBC35EC, 0xC7197983, 0xB087AB73, 0x2B22E71C, 0x5FBA0E93, 0xC41F42FC, 0xB381900C, 0x2824DC63, /* [11][0xd8]*/ 0x5AB04312, 0xC1150F7D, 0xB68BDD8D, 0x2D2E91E2, 0x59B6786D, 0xC2133402, 0xB58DE6F2, 0x2E28AA9D, /* [11][0xe0]*/ 0x489503E8, 0xD3304F87, 0xA4AE9D77, 0x3F0BD118, 0x4B933897, 0xD03674F8, 0xA7A8A608, 0x3C0DEA67, /* [11][0xe8]*/ 0x4E997516, 0xD53C3979, 0xA2A2EB89, 0x3907A7E6, 0x4D9F4E69, 0xD63A0206, 0xA1A4D0F6, 0x3A019C99, /* [11][0xf0]*/ 0x448DEE14, 0xDF28A27B, 0xA8B6708B, 0x33133CE4, 0x478BD56B, 0xDC2E9904, 0xABB04BF4, 0x3015079B, /* [11][0xf8]*/ 0x428198EA, 0xD924D485, 0xAEBA0675, 0x351F4A1A, 0x4187A395, 0xDA22EFFA, 0xADBC3D0A, 0x36197165 /* [11][0x100]*/ }, { 0x00000000, 0xDD96D985, 0x605CB54B, 0xBDCA6CCE, 0xC0B96A96, 0x1D2FB313, 0xA0E5DFDD, 0x7D730658, /* [12][0x08]*/ 0x5A03D36D, 0x87950AE8, 0x3A5F6626, 0xE7C9BFA3, 0x9ABAB9FB, 0x472C607E, 0xFAE60CB0, 0x2770D535, /* [12][0x10]*/ 0xB407A6DA, 0x69917F5F, 0xD45B1391, 0x09CDCA14, 0x74BECC4C, 0xA92815C9, 0x14E27907, 0xC974A082, /* [12][0x18]*/ 0xEE0475B7, 0x3392AC32, 0x8E58C0FC, 0x53CE1979, 0x2EBD1F21, 0xF32BC6A4, 0x4EE1AA6A, 0x937773EF, /* [12][0x20]*/ 0xB37E4BF5, 0x6EE89270, 0xD322FEBE, 0x0EB4273B, 0x73C72163, 0xAE51F8E6, 0x139B9428, 0xCE0D4DAD, /* [12][0x28]*/ 0xE97D9898, 0x34EB411D, 0x89212DD3, 0x54B7F456, 0x29C4F20E, 0xF4522B8B, 0x49984745, 0x940E9EC0, /* [12][0x30]*/ 0x0779ED2F, 0xDAEF34AA, 0x67255864, 0xBAB381E1, 0xC7C087B9, 0x1A565E3C, 0xA79C32F2, 0x7A0AEB77, /* [12][0x38]*/ 0x5D7A3E42, 0x80ECE7C7, 0x3D268B09, 0xE0B0528C, 0x9DC354D4, 0x40558D51, 0xFD9FE19F, 0x2009381A, /* [12][0x40]*/ 0xBD8D91AB, 0x601B482E, 0xDDD124E0, 0x0047FD65, 0x7D34FB3D, 0xA0A222B8, 0x1D684E76, 0xC0FE97F3, /* [12][0x48]*/ 0xE78E42C6, 0x3A189B43, 0x87D2F78D, 0x5A442E08, 0x27372850, 0xFAA1F1D5, 0x476B9D1B, 0x9AFD449E, /* [12][0x50]*/ 0x098A3771, 0xD41CEEF4, 0x69D6823A, 0xB4405BBF, 0xC9335DE7, 0x14A58462, 0xA96FE8AC, 0x74F93129, /* [12][0x58]*/ 0x5389E41C, 0x8E1F3D99, 0x33D55157, 0xEE4388D2, 0x93308E8A, 0x4EA6570F, 0xF36C3BC1, 0x2EFAE244, /* [12][0x60]*/ 0x0EF3DA5E, 0xD36503DB, 0x6EAF6F15, 0xB339B690, 0xCE4AB0C8, 0x13DC694D, 0xAE160583, 0x7380DC06, /* [12][0x68]*/ 0x54F00933, 0x8966D0B6, 0x34ACBC78, 0xE93A65FD, 0x944963A5, 0x49DFBA20, 0xF415D6EE, 0x29830F6B, /* [12][0x70]*/ 0xBAF47C84, 0x6762A501, 0xDAA8C9CF, 0x073E104A, 0x7A4D1612, 0xA7DBCF97, 0x1A11A359, 0xC7877ADC, /* [12][0x78]*/ 0xE0F7AFE9, 0x3D61766C, 0x80AB1AA2, 0x5D3DC327, 0x204EC57F, 0xFDD81CFA, 0x40127034, 0x9D84A9B1, /* [12][0x80]*/ 0xA06A2517, 0x7DFCFC92, 0xC036905C, 0x1DA049D9, 0x60D34F81, 0xBD459604, 0x008FFACA, 0xDD19234F, /* [12][0x88]*/ 0xFA69F67A, 0x27FF2FFF, 0x9A354331, 0x47A39AB4, 0x3AD09CEC, 0xE7464569, 0x5A8C29A7, 0x871AF022, /* [12][0x90]*/ 0x146D83CD, 0xC9FB5A48, 0x74313686, 0xA9A7EF03, 0xD4D4E95B, 0x094230DE, 0xB4885C10, 0x691E8595, /* [12][0x98]*/ 0x4E6E50A0, 0x93F88925, 0x2E32E5EB, 0xF3A43C6E, 0x8ED73A36, 0x5341E3B3, 0xEE8B8F7D, 0x331D56F8, /* [12][0xa0]*/ 0x13146EE2, 0xCE82B767, 0x7348DBA9, 0xAEDE022C, 0xD3AD0474, 0x0E3BDDF1, 0xB3F1B13F, 0x6E6768BA, /* [12][0xa8]*/ 0x4917BD8F, 0x9481640A, 0x294B08C4, 0xF4DDD141, 0x89AED719, 0x54380E9C, 0xE9F26252, 0x3464BBD7, /* [12][0xb0]*/ 0xA713C838, 0x7A8511BD, 0xC74F7D73, 0x1AD9A4F6, 0x67AAA2AE, 0xBA3C7B2B, 0x07F617E5, 0xDA60CE60, /* [12][0xb8]*/ 0xFD101B55, 0x2086C2D0, 0x9D4CAE1E, 0x40DA779B, 0x3DA971C3, 0xE03FA846, 0x5DF5C488, 0x80631D0D, /* [12][0xc0]*/ 0x1DE7B4BC, 0xC0716D39, 0x7DBB01F7, 0xA02DD872, 0xDD5EDE2A, 0x00C807AF, 0xBD026B61, 0x6094B2E4, /* [12][0xc8]*/ 0x47E467D1, 0x9A72BE54, 0x27B8D29A, 0xFA2E0B1F, 0x875D0D47, 0x5ACBD4C2, 0xE701B80C, 0x3A976189, /* [12][0xd0]*/ 0xA9E01266, 0x7476CBE3, 0xC9BCA72D, 0x142A7EA8, 0x695978F0, 0xB4CFA175, 0x0905CDBB, 0xD493143E, /* [12][0xd8]*/ 0xF3E3C10B, 0x2E75188E, 0x93BF7440, 0x4E29ADC5, 0x335AAB9D, 0xEECC7218, 0x53061ED6, 0x8E90C753, /* [12][0xe0]*/ 0xAE99FF49, 0x730F26CC, 0xCEC54A02, 0x13539387, 0x6E2095DF, 0xB3B64C5A, 0x0E7C2094, 0xD3EAF911, /* [12][0xe8]*/ 0xF49A2C24, 0x290CF5A1, 0x94C6996F, 0x495040EA, 0x342346B2, 0xE9B59F37, 0x547FF3F9, 0x89E92A7C, /* [12][0xf0]*/ 0x1A9E5993, 0xC7088016, 0x7AC2ECD8, 0xA754355D, 0xDA273305, 0x07B1EA80, 0xBA7B864E, 0x67ED5FCB, /* [12][0xf8]*/ 0x409D8AFE, 0x9D0B537B, 0x20C13FB5, 0xFD57E630, 0x8024E068, 0x5DB239ED, 0xE0785523, 0x3DEE8CA6 /* [12][0x100]*/ }, { 0x00000000, 0x9D0FE176, 0xE16EC4AD, 0x7C6125DB, 0x19AC8F1B, 0x84A36E6D, 0xF8C24BB6, 0x65CDAAC0, /* [13][0x08]*/ 0x33591E36, 0xAE56FF40, 0xD237DA9B, 0x4F383BED, 0x2AF5912D, 0xB7FA705B, 0xCB9B5580, 0x5694B4F6, /* [13][0x10]*/ 0x66B23C6C, 0xFBBDDD1A, 0x87DCF8C1, 0x1AD319B7, 0x7F1EB377, 0xE2115201, 0x9E7077DA, 0x037F96AC, /* [13][0x18]*/ 0x55EB225A, 0xC8E4C32C, 0xB485E6F7, 0x298A0781, 0x4C47AD41, 0xD1484C37, 0xAD2969EC, 0x3026889A, /* [13][0x20]*/ 0xCD6478D8, 0x506B99AE, 0x2C0ABC75, 0xB1055D03, 0xD4C8F7C3, 0x49C716B5, 0x35A6336E, 0xA8A9D218, /* [13][0x28]*/ 0xFE3D66EE, 0x63328798, 0x1F53A243, 0x825C4335, 0xE791E9F5, 0x7A9E0883, 0x06FF2D58, 0x9BF0CC2E, /* [13][0x30]*/ 0xABD644B4, 0x36D9A5C2, 0x4AB88019, 0xD7B7616F, 0xB27ACBAF, 0x2F752AD9, 0x53140F02, 0xCE1BEE74, /* [13][0x38]*/ 0x988F5A82, 0x0580BBF4, 0x79E19E2F, 0xE4EE7F59, 0x8123D599, 0x1C2C34EF, 0x604D1134, 0xFD42F042, /* [13][0x40]*/ 0x41B9F7F1, 0xDCB61687, 0xA0D7335C, 0x3DD8D22A, 0x581578EA, 0xC51A999C, 0xB97BBC47, 0x24745D31, /* [13][0x48]*/ 0x72E0E9C7, 0xEFEF08B1, 0x938E2D6A, 0x0E81CC1C, 0x6B4C66DC, 0xF64387AA, 0x8A22A271, 0x172D4307, /* [13][0x50]*/ 0x270BCB9D, 0xBA042AEB, 0xC6650F30, 0x5B6AEE46, 0x3EA74486, 0xA3A8A5F0, 0xDFC9802B, 0x42C6615D, /* [13][0x58]*/ 0x1452D5AB, 0x895D34DD, 0xF53C1106, 0x6833F070, 0x0DFE5AB0, 0x90F1BBC6, 0xEC909E1D, 0x719F7F6B, /* [13][0x60]*/ 0x8CDD8F29, 0x11D26E5F, 0x6DB34B84, 0xF0BCAAF2, 0x95710032, 0x087EE144, 0x741FC49F, 0xE91025E9, /* [13][0x68]*/ 0xBF84911F, 0x228B7069, 0x5EEA55B2, 0xC3E5B4C4, 0xA6281E04, 0x3B27FF72, 0x4746DAA9, 0xDA493BDF, /* [13][0x70]*/ 0xEA6FB345, 0x77605233, 0x0B0177E8, 0x960E969E, 0xF3C33C5E, 0x6ECCDD28, 0x12ADF8F3, 0x8FA21985, /* [13][0x78]*/ 0xD936AD73, 0x44394C05, 0x385869DE, 0xA55788A8, 0xC09A2268, 0x5D95C31E, 0x21F4E6C5, 0xBCFB07B3, /* [13][0x80]*/ 0x8373EFE2, 0x1E7C0E94, 0x621D2B4F, 0xFF12CA39, 0x9ADF60F9, 0x07D0818F, 0x7BB1A454, 0xE6BE4522, /* [13][0x88]*/ 0xB02AF1D4, 0x2D2510A2, 0x51443579, 0xCC4BD40F, 0xA9867ECF, 0x34899FB9, 0x48E8BA62, 0xD5E75B14, /* [13][0x90]*/ 0xE5C1D38E, 0x78CE32F8, 0x04AF1723, 0x99A0F655, 0xFC6D5C95, 0x6162BDE3, 0x1D039838, 0x800C794E, /* [13][0x98]*/ 0xD698CDB8, 0x4B972CCE, 0x37F60915, 0xAAF9E863, 0xCF3442A3, 0x523BA3D5, 0x2E5A860E, 0xB3556778, /* [13][0xa0]*/ 0x4E17973A, 0xD318764C, 0xAF795397, 0x3276B2E1, 0x57BB1821, 0xCAB4F957, 0xB6D5DC8C, 0x2BDA3DFA, /* [13][0xa8]*/ 0x7D4E890C, 0xE041687A, 0x9C204DA1, 0x012FACD7, 0x64E20617, 0xF9EDE761, 0x858CC2BA, 0x188323CC, /* [13][0xb0]*/ 0x28A5AB56, 0xB5AA4A20, 0xC9CB6FFB, 0x54C48E8D, 0x3109244D, 0xAC06C53B, 0xD067E0E0, 0x4D680196, /* [13][0xb8]*/ 0x1BFCB560, 0x86F35416, 0xFA9271CD, 0x679D90BB, 0x02503A7B, 0x9F5FDB0D, 0xE33EFED6, 0x7E311FA0, /* [13][0xc0]*/ 0xC2CA1813, 0x5FC5F965, 0x23A4DCBE, 0xBEAB3DC8, 0xDB669708, 0x4669767E, 0x3A0853A5, 0xA707B2D3, /* [13][0xc8]*/ 0xF1930625, 0x6C9CE753, 0x10FDC288, 0x8DF223FE, 0xE83F893E, 0x75306848, 0x09514D93, 0x945EACE5, /* [13][0xd0]*/ 0xA478247F, 0x3977C509, 0x4516E0D2, 0xD81901A4, 0xBDD4AB64, 0x20DB4A12, 0x5CBA6FC9, 0xC1B58EBF, /* [13][0xd8]*/ 0x97213A49, 0x0A2EDB3F, 0x764FFEE4, 0xEB401F92, 0x8E8DB552, 0x13825424, 0x6FE371FF, 0xF2EC9089, /* [13][0xe0]*/ 0x0FAE60CB, 0x92A181BD, 0xEEC0A466, 0x73CF4510, 0x1602EFD0, 0x8B0D0EA6, 0xF76C2B7D, 0x6A63CA0B, /* [13][0xe8]*/ 0x3CF77EFD, 0xA1F89F8B, 0xDD99BA50, 0x40965B26, 0x255BF1E6, 0xB8541090, 0xC435354B, 0x593AD43D, /* [13][0xf0]*/ 0x691C5CA7, 0xF413BDD1, 0x8872980A, 0x157D797C, 0x70B0D3BC, 0xEDBF32CA, 0x91DE1711, 0x0CD1F667, /* [13][0xf8]*/ 0x5A454291, 0xC74AA3E7, 0xBB2B863C, 0x2624674A, 0x43E9CD8A, 0xDEE62CFC, 0xA2870927, 0x3F88E851 /* [13][0x100]*/ }, { 0x00000000, 0xB9FBDBE8, 0xA886B191, 0x117D6A79, 0x8A7C6563, 0x3387BE8B, 0x22FAD4F2, 0x9B010F1A, /* [14][0x08]*/ 0xCF89CC87, 0x7672176F, 0x670F7D16, 0xDEF4A6FE, 0x45F5A9E4, 0xFC0E720C, 0xED731875, 0x5488C39D, /* [14][0x10]*/ 0x44629F4F, 0xFD9944A7, 0xECE42EDE, 0x551FF536, 0xCE1EFA2C, 0x77E521C4, 0x66984BBD, 0xDF639055, /* [14][0x18]*/ 0x8BEB53C8, 0x32108820, 0x236DE259, 0x9A9639B1, 0x019736AB, 0xB86CED43, 0xA911873A, 0x10EA5CD2, /* [14][0x20]*/ 0x88C53E9E, 0x313EE576, 0x20438F0F, 0x99B854E7, 0x02B95BFD, 0xBB428015, 0xAA3FEA6C, 0x13C43184, /* [14][0x28]*/ 0x474CF219, 0xFEB729F1, 0xEFCA4388, 0x56319860, 0xCD30977A, 0x74CB4C92, 0x65B626EB, 0xDC4DFD03, /* [14][0x30]*/ 0xCCA7A1D1, 0x755C7A39, 0x64211040, 0xDDDACBA8, 0x46DBC4B2, 0xFF201F5A, 0xEE5D7523, 0x57A6AECB, /* [14][0x38]*/ 0x032E6D56, 0xBAD5B6BE, 0xABA8DCC7, 0x1253072F, 0x89520835, 0x30A9D3DD, 0x21D4B9A4, 0x982F624C, /* [14][0x40]*/ 0xCAFB7B7D, 0x7300A095, 0x627DCAEC, 0xDB861104, 0x40871E1E, 0xF97CC5F6, 0xE801AF8F, 0x51FA7467, /* [14][0x48]*/ 0x0572B7FA, 0xBC896C12, 0xADF4066B, 0x140FDD83, 0x8F0ED299, 0x36F50971, 0x27886308, 0x9E73B8E0, /* [14][0x50]*/ 0x8E99E432, 0x37623FDA, 0x261F55A3, 0x9FE48E4B, 0x04E58151, 0xBD1E5AB9, 0xAC6330C0, 0x1598EB28, /* [14][0x58]*/ 0x411028B5, 0xF8EBF35D, 0xE9969924, 0x506D42CC, 0xCB6C4DD6, 0x7297963E, 0x63EAFC47, 0xDA1127AF, /* [14][0x60]*/ 0x423E45E3, 0xFBC59E0B, 0xEAB8F472, 0x53432F9A, 0xC8422080, 0x71B9FB68, 0x60C49111, 0xD93F4AF9, /* [14][0x68]*/ 0x8DB78964, 0x344C528C, 0x253138F5, 0x9CCAE31D, 0x07CBEC07, 0xBE3037EF, 0xAF4D5D96, 0x16B6867E, /* [14][0x70]*/ 0x065CDAAC, 0xBFA70144, 0xAEDA6B3D, 0x1721B0D5, 0x8C20BFCF, 0x35DB6427, 0x24A60E5E, 0x9D5DD5B6, /* [14][0x78]*/ 0xC9D5162B, 0x702ECDC3, 0x6153A7BA, 0xD8A87C52, 0x43A97348, 0xFA52A8A0, 0xEB2FC2D9, 0x52D41931, /* [14][0x80]*/ 0x4E87F0BB, 0xF77C2B53, 0xE601412A, 0x5FFA9AC2, 0xC4FB95D8, 0x7D004E30, 0x6C7D2449, 0xD586FFA1, /* [14][0x88]*/ 0x810E3C3C, 0x38F5E7D4, 0x29888DAD, 0x90735645, 0x0B72595F, 0xB28982B7, 0xA3F4E8CE, 0x1A0F3326, /* [14][0x90]*/ 0x0AE56FF4, 0xB31EB41C, 0xA263DE65, 0x1B98058D, 0x80990A97, 0x3962D17F, 0x281FBB06, 0x91E460EE, /* [14][0x98]*/ 0xC56CA373, 0x7C97789B, 0x6DEA12E2, 0xD411C90A, 0x4F10C610, 0xF6EB1DF8, 0xE7967781, 0x5E6DAC69, /* [14][0xa0]*/ 0xC642CE25, 0x7FB915CD, 0x6EC47FB4, 0xD73FA45C, 0x4C3EAB46, 0xF5C570AE, 0xE4B81AD7, 0x5D43C13F, /* [14][0xa8]*/ 0x09CB02A2, 0xB030D94A, 0xA14DB333, 0x18B668DB, 0x83B767C1, 0x3A4CBC29, 0x2B31D650, 0x92CA0DB8, /* [14][0xb0]*/ 0x8220516A, 0x3BDB8A82, 0x2AA6E0FB, 0x935D3B13, 0x085C3409, 0xB1A7EFE1, 0xA0DA8598, 0x19215E70, /* [14][0xb8]*/ 0x4DA99DED, 0xF4524605, 0xE52F2C7C, 0x5CD4F794, 0xC7D5F88E, 0x7E2E2366, 0x6F53491F, 0xD6A892F7, /* [14][0xc0]*/ 0x847C8BC6, 0x3D87502E, 0x2CFA3A57, 0x9501E1BF, 0x0E00EEA5, 0xB7FB354D, 0xA6865F34, 0x1F7D84DC, /* [14][0xc8]*/ 0x4BF54741, 0xF20E9CA9, 0xE373F6D0, 0x5A882D38, 0xC1892222, 0x7872F9CA, 0x690F93B3, 0xD0F4485B, /* [14][0xd0]*/ 0xC01E1489, 0x79E5CF61, 0x6898A518, 0xD1637EF0, 0x4A6271EA, 0xF399AA02, 0xE2E4C07B, 0x5B1F1B93, /* [14][0xd8]*/ 0x0F97D80E, 0xB66C03E6, 0xA711699F, 0x1EEAB277, 0x85EBBD6D, 0x3C106685, 0x2D6D0CFC, 0x9496D714, /* [14][0xe0]*/ 0x0CB9B558, 0xB5426EB0, 0xA43F04C9, 0x1DC4DF21, 0x86C5D03B, 0x3F3E0BD3, 0x2E4361AA, 0x97B8BA42, /* [14][0xe8]*/ 0xC33079DF, 0x7ACBA237, 0x6BB6C84E, 0xD24D13A6, 0x494C1CBC, 0xF0B7C754, 0xE1CAAD2D, 0x583176C5, /* [14][0xf0]*/ 0x48DB2A17, 0xF120F1FF, 0xE05D9B86, 0x59A6406E, 0xC2A74F74, 0x7B5C949C, 0x6A21FEE5, 0xD3DA250D, /* [14][0xf8]*/ 0x8752E690, 0x3EA93D78, 0x2FD45701, 0x962F8CE9, 0x0D2E83F3, 0xB4D5581B, 0xA5A83262, 0x1C53E98A /* [14][0x100]*/ }, { 0x00000000, 0xAE689191, 0x87A02563, 0x29C8B4F2, 0xD4314C87, 0x7A59DD16, 0x539169E4, 0xFDF9F875, /* [15][0x08]*/ 0x73139F4F, 0xDD7B0EDE, 0xF4B3BA2C, 0x5ADB2BBD, 0xA722D3C8, 0x094A4259, 0x2082F6AB, 0x8EEA673A, /* [15][0x10]*/ 0xE6273E9E, 0x484FAF0F, 0x61871BFD, 0xCFEF8A6C, 0x32167219, 0x9C7EE388, 0xB5B6577A, 0x1BDEC6EB, /* [15][0x18]*/ 0x9534A1D1, 0x3B5C3040, 0x129484B2, 0xBCFC1523, 0x4105ED56, 0xEF6D7CC7, 0xC6A5C835, 0x68CD59A4, /* [15][0x20]*/ 0x173F7B7D, 0xB957EAEC, 0x909F5E1E, 0x3EF7CF8F, 0xC30E37FA, 0x6D66A66B, 0x44AE1299, 0xEAC68308, /* [15][0x28]*/ 0x642CE432, 0xCA4475A3, 0xE38CC151, 0x4DE450C0, 0xB01DA8B5, 0x1E753924, 0x37BD8DD6, 0x99D51C47, /* [15][0x30]*/ 0xF11845E3, 0x5F70D472, 0x76B86080, 0xD8D0F111, 0x25290964, 0x8B4198F5, 0xA2892C07, 0x0CE1BD96, /* [15][0x38]*/ 0x820BDAAC, 0x2C634B3D, 0x05ABFFCF, 0xABC36E5E, 0x563A962B, 0xF85207BA, 0xD19AB348, 0x7FF222D9, /* [15][0x40]*/ 0x2E7EF6FA, 0x8016676B, 0xA9DED399, 0x07B64208, 0xFA4FBA7D, 0x54272BEC, 0x7DEF9F1E, 0xD3870E8F, /* [15][0x48]*/ 0x5D6D69B5, 0xF305F824, 0xDACD4CD6, 0x74A5DD47, 0x895C2532, 0x2734B4A3, 0x0EFC0051, 0xA09491C0, /* [15][0x50]*/ 0xC859C864, 0x663159F5, 0x4FF9ED07, 0xE1917C96, 0x1C6884E3, 0xB2001572, 0x9BC8A180, 0x35A03011, /* [15][0x58]*/ 0xBB4A572B, 0x1522C6BA, 0x3CEA7248, 0x9282E3D9, 0x6F7B1BAC, 0xC1138A3D, 0xE8DB3ECF, 0x46B3AF5E, /* [15][0x60]*/ 0x39418D87, 0x97291C16, 0xBEE1A8E4, 0x10893975, 0xED70C100, 0x43185091, 0x6AD0E463, 0xC4B875F2, /* [15][0x68]*/ 0x4A5212C8, 0xE43A8359, 0xCDF237AB, 0x639AA63A, 0x9E635E4F, 0x300BCFDE, 0x19C37B2C, 0xB7ABEABD, /* [15][0x70]*/ 0xDF66B319, 0x710E2288, 0x58C6967A, 0xF6AE07EB, 0x0B57FF9E, 0xA53F6E0F, 0x8CF7DAFD, 0x229F4B6C, /* [15][0x78]*/ 0xAC752C56, 0x021DBDC7, 0x2BD50935, 0x85BD98A4, 0x784460D1, 0xD62CF140, 0xFFE445B2, 0x518CD423, /* [15][0x80]*/ 0x5CFDEDF4, 0xF2957C65, 0xDB5DC897, 0x75355906, 0x88CCA173, 0x26A430E2, 0x0F6C8410, 0xA1041581, /* [15][0x88]*/ 0x2FEE72BB, 0x8186E32A, 0xA84E57D8, 0x0626C649, 0xFBDF3E3C, 0x55B7AFAD, 0x7C7F1B5F, 0xD2178ACE, /* [15][0x90]*/ 0xBADAD36A, 0x14B242FB, 0x3D7AF609, 0x93126798, 0x6EEB9FED, 0xC0830E7C, 0xE94BBA8E, 0x47232B1F, /* [15][0x98]*/ 0xC9C94C25, 0x67A1DDB4, 0x4E696946, 0xE001F8D7, 0x1DF800A2, 0xB3909133, 0x9A5825C1, 0x3430B450, /* [15][0xa0]*/ 0x4BC29689, 0xE5AA0718, 0xCC62B3EA, 0x620A227B, 0x9FF3DA0E, 0x319B4B9F, 0x1853FF6D, 0xB63B6EFC, /* [15][0xa8]*/ 0x38D109C6, 0x96B99857, 0xBF712CA5, 0x1119BD34, 0xECE04541, 0x4288D4D0, 0x6B406022, 0xC528F1B3, /* [15][0xb0]*/ 0xADE5A817, 0x038D3986, 0x2A458D74, 0x842D1CE5, 0x79D4E490, 0xD7BC7501, 0xFE74C1F3, 0x501C5062, /* [15][0xb8]*/ 0xDEF63758, 0x709EA6C9, 0x5956123B, 0xF73E83AA, 0x0AC77BDF, 0xA4AFEA4E, 0x8D675EBC, 0x230FCF2D, /* [15][0xc0]*/ 0x72831B0E, 0xDCEB8A9F, 0xF5233E6D, 0x5B4BAFFC, 0xA6B25789, 0x08DAC618, 0x211272EA, 0x8F7AE37B, /* [15][0xc8]*/ 0x01908441, 0xAFF815D0, 0x8630A122, 0x285830B3, 0xD5A1C8C6, 0x7BC95957, 0x5201EDA5, 0xFC697C34, /* [15][0xd0]*/ 0x94A42590, 0x3ACCB401, 0x130400F3, 0xBD6C9162, 0x40956917, 0xEEFDF886, 0xC7354C74, 0x695DDDE5, /* [15][0xd8]*/ 0xE7B7BADF, 0x49DF2B4E, 0x60179FBC, 0xCE7F0E2D, 0x3386F658, 0x9DEE67C9, 0xB426D33B, 0x1A4E42AA, /* [15][0xe0]*/ 0x65BC6073, 0xCBD4F1E2, 0xE21C4510, 0x4C74D481, 0xB18D2CF4, 0x1FE5BD65, 0x362D0997, 0x98459806, /* [15][0xe8]*/ 0x16AFFF3C, 0xB8C76EAD, 0x910FDA5F, 0x3F674BCE, 0xC29EB3BB, 0x6CF6222A, 0x453E96D8, 0xEB560749, /* [15][0xf0]*/ 0x839B5EED, 0x2DF3CF7C, 0x043B7B8E, 0xAA53EA1F, 0x57AA126A, 0xF9C283FB, 0xD00A3709, 0x7E62A698, /* [15][0xf8]*/ 0xF088C1A2, 0x5EE05033, 0x7728E4C1, 0xD9407550, 0x24B98D25, 0x8AD11CB4, 0xA319A846, 0x0D7139D7 /* [15][0x100]*/ }}; /** Castagnoli CRC32c (iSCSI) lookup table for slice-by-4/8/16 */ const uint32_t CRC32C_TABLE[16][256] = { { 0x00000000, 0xF26B8303, 0xE13B70F7, 0x1350F3F4, 0xC79A971F, 0x35F1141C, 0x26A1E7E8, 0xD4CA64EB, /* [0][0x08]*/ 0x8AD958CF, 0x78B2DBCC, 0x6BE22838, 0x9989AB3B, 0x4D43CFD0, 0xBF284CD3, 0xAC78BF27, 0x5E133C24, /* [0][0x10]*/ 0x105EC76F, 0xE235446C, 0xF165B798, 0x030E349B, 0xD7C45070, 0x25AFD373, 0x36FF2087, 0xC494A384, /* [0][0x18]*/ 0x9A879FA0, 0x68EC1CA3, 0x7BBCEF57, 0x89D76C54, 0x5D1D08BF, 0xAF768BBC, 0xBC267848, 0x4E4DFB4B, /* [0][0x20]*/ 0x20BD8EDE, 0xD2D60DDD, 0xC186FE29, 0x33ED7D2A, 0xE72719C1, 0x154C9AC2, 0x061C6936, 0xF477EA35, /* [0][0x28]*/ 0xAA64D611, 0x580F5512, 0x4B5FA6E6, 0xB93425E5, 0x6DFE410E, 0x9F95C20D, 0x8CC531F9, 0x7EAEB2FA, /* [0][0x30]*/ 0x30E349B1, 0xC288CAB2, 0xD1D83946, 0x23B3BA45, 0xF779DEAE, 0x05125DAD, 0x1642AE59, 0xE4292D5A, /* [0][0x38]*/ 0xBA3A117E, 0x4851927D, 0x5B016189, 0xA96AE28A, 0x7DA08661, 0x8FCB0562, 0x9C9BF696, 0x6EF07595, /* [0][0x40]*/ 0x417B1DBC, 0xB3109EBF, 0xA0406D4B, 0x522BEE48, 0x86E18AA3, 0x748A09A0, 0x67DAFA54, 0x95B17957, /* [0][0x48]*/ 0xCBA24573, 0x39C9C670, 0x2A993584, 0xD8F2B687, 0x0C38D26C, 0xFE53516F, 0xED03A29B, 0x1F682198, /* [0][0x50]*/ 0x5125DAD3, 0xA34E59D0, 0xB01EAA24, 0x42752927, 0x96BF4DCC, 0x64D4CECF, 0x77843D3B, 0x85EFBE38, /* [0][0x58]*/ 0xDBFC821C, 0x2997011F, 0x3AC7F2EB, 0xC8AC71E8, 0x1C661503, 0xEE0D9600, 0xFD5D65F4, 0x0F36E6F7, /* [0][0x60]*/ 0x61C69362, 0x93AD1061, 0x80FDE395, 0x72966096, 0xA65C047D, 0x5437877E, 0x4767748A, 0xB50CF789, /* [0][0x68]*/ 0xEB1FCBAD, 0x197448AE, 0x0A24BB5A, 0xF84F3859, 0x2C855CB2, 0xDEEEDFB1, 0xCDBE2C45, 0x3FD5AF46, /* [0][0x70]*/ 0x7198540D, 0x83F3D70E, 0x90A324FA, 0x62C8A7F9, 0xB602C312, 0x44694011, 0x5739B3E5, 0xA55230E6, /* [0][0x78]*/ 0xFB410CC2, 0x092A8FC1, 0x1A7A7C35, 0xE811FF36, 0x3CDB9BDD, 0xCEB018DE, 0xDDE0EB2A, 0x2F8B6829, /* [0][0x80]*/ 0x82F63B78, 0x709DB87B, 0x63CD4B8F, 0x91A6C88C, 0x456CAC67, 0xB7072F64, 0xA457DC90, 0x563C5F93, /* [0][0x88]*/ 0x082F63B7, 0xFA44E0B4, 0xE9141340, 0x1B7F9043, 0xCFB5F4A8, 0x3DDE77AB, 0x2E8E845F, 0xDCE5075C, /* [0][0x90]*/ 0x92A8FC17, 0x60C37F14, 0x73938CE0, 0x81F80FE3, 0x55326B08, 0xA759E80B, 0xB4091BFF, 0x466298FC, /* [0][0x98]*/ 0x1871A4D8, 0xEA1A27DB, 0xF94AD42F, 0x0B21572C, 0xDFEB33C7, 0x2D80B0C4, 0x3ED04330, 0xCCBBC033, /* [0][0xa0]*/ 0xA24BB5A6, 0x502036A5, 0x4370C551, 0xB11B4652, 0x65D122B9, 0x97BAA1BA, 0x84EA524E, 0x7681D14D, /* [0][0xa8]*/ 0x2892ED69, 0xDAF96E6A, 0xC9A99D9E, 0x3BC21E9D, 0xEF087A76, 0x1D63F975, 0x0E330A81, 0xFC588982, /* [0][0xb0]*/ 0xB21572C9, 0x407EF1CA, 0x532E023E, 0xA145813D, 0x758FE5D6, 0x87E466D5, 0x94B49521, 0x66DF1622, /* [0][0xb8]*/ 0x38CC2A06, 0xCAA7A905, 0xD9F75AF1, 0x2B9CD9F2, 0xFF56BD19, 0x0D3D3E1A, 0x1E6DCDEE, 0xEC064EED, /* [0][0xc0]*/ 0xC38D26C4, 0x31E6A5C7, 0x22B65633, 0xD0DDD530, 0x0417B1DB, 0xF67C32D8, 0xE52CC12C, 0x1747422F, /* [0][0xc8]*/ 0x49547E0B, 0xBB3FFD08, 0xA86F0EFC, 0x5A048DFF, 0x8ECEE914, 0x7CA56A17, 0x6FF599E3, 0x9D9E1AE0, /* [0][0xd0]*/ 0xD3D3E1AB, 0x21B862A8, 0x32E8915C, 0xC083125F, 0x144976B4, 0xE622F5B7, 0xF5720643, 0x07198540, /* [0][0xd8]*/ 0x590AB964, 0xAB613A67, 0xB831C993, 0x4A5A4A90, 0x9E902E7B, 0x6CFBAD78, 0x7FAB5E8C, 0x8DC0DD8F, /* [0][0xe0]*/ 0xE330A81A, 0x115B2B19, 0x020BD8ED, 0xF0605BEE, 0x24AA3F05, 0xD6C1BC06, 0xC5914FF2, 0x37FACCF1, /* [0][0xe8]*/ 0x69E9F0D5, 0x9B8273D6, 0x88D28022, 0x7AB90321, 0xAE7367CA, 0x5C18E4C9, 0x4F48173D, 0xBD23943E, /* [0][0xf0]*/ 0xF36E6F75, 0x0105EC76, 0x12551F82, 0xE03E9C81, 0x34F4F86A, 0xC69F7B69, 0xD5CF889D, 0x27A40B9E, /* [0][0xf8]*/ 0x79B737BA, 0x8BDCB4B9, 0x988C474D, 0x6AE7C44E, 0xBE2DA0A5, 0x4C4623A6, 0x5F16D052, 0xAD7D5351 /* [0][0x100]*/ }, { 0x00000000, 0x13A29877, 0x274530EE, 0x34E7A899, 0x4E8A61DC, 0x5D28F9AB, 0x69CF5132, 0x7A6DC945, /* [1][0x08]*/ 0x9D14C3B8, 0x8EB65BCF, 0xBA51F356, 0xA9F36B21, 0xD39EA264, 0xC03C3A13, 0xF4DB928A, 0xE7790AFD, /* [1][0x10]*/ 0x3FC5F181, 0x2C6769F6, 0x1880C16F, 0x0B225918, 0x714F905D, 0x62ED082A, 0x560AA0B3, 0x45A838C4, /* [1][0x18]*/ 0xA2D13239, 0xB173AA4E, 0x859402D7, 0x96369AA0, 0xEC5B53E5, 0xFFF9CB92, 0xCB1E630B, 0xD8BCFB7C, /* [1][0x20]*/ 0x7F8BE302, 0x6C297B75, 0x58CED3EC, 0x4B6C4B9B, 0x310182DE, 0x22A31AA9, 0x1644B230, 0x05E62A47, /* [1][0x28]*/ 0xE29F20BA, 0xF13DB8CD, 0xC5DA1054, 0xD6788823, 0xAC154166, 0xBFB7D911, 0x8B507188, 0x98F2E9FF, /* [1][0x30]*/ 0x404E1283, 0x53EC8AF4, 0x670B226D, 0x74A9BA1A, 0x0EC4735F, 0x1D66EB28, 0x298143B1, 0x3A23DBC6, /* [1][0x38]*/ 0xDD5AD13B, 0xCEF8494C, 0xFA1FE1D5, 0xE9BD79A2, 0x93D0B0E7, 0x80722890, 0xB4958009, 0xA737187E, /* [1][0x40]*/ 0xFF17C604, 0xECB55E73, 0xD852F6EA, 0xCBF06E9D, 0xB19DA7D8, 0xA23F3FAF, 0x96D89736, 0x857A0F41, /* [1][0x48]*/ 0x620305BC, 0x71A19DCB, 0x45463552, 0x56E4AD25, 0x2C896460, 0x3F2BFC17, 0x0BCC548E, 0x186ECCF9, /* [1][0x50]*/ 0xC0D23785, 0xD370AFF2, 0xE797076B, 0xF4359F1C, 0x8E585659, 0x9DFACE2E, 0xA91D66B7, 0xBABFFEC0, /* [1][0x58]*/ 0x5DC6F43D, 0x4E646C4A, 0x7A83C4D3, 0x69215CA4, 0x134C95E1, 0x00EE0D96, 0x3409A50F, 0x27AB3D78, /* [1][0x60]*/ 0x809C2506, 0x933EBD71, 0xA7D915E8, 0xB47B8D9F, 0xCE1644DA, 0xDDB4DCAD, 0xE9537434, 0xFAF1EC43, /* [1][0x68]*/ 0x1D88E6BE, 0x0E2A7EC9, 0x3ACDD650, 0x296F4E27, 0x53028762, 0x40A01F15, 0x7447B78C, 0x67E52FFB, /* [1][0x70]*/ 0xBF59D487, 0xACFB4CF0, 0x981CE469, 0x8BBE7C1E, 0xF1D3B55B, 0xE2712D2C, 0xD69685B5, 0xC5341DC2, /* [1][0x78]*/ 0x224D173F, 0x31EF8F48, 0x050827D1, 0x16AABFA6, 0x6CC776E3, 0x7F65EE94, 0x4B82460D, 0x5820DE7A, /* [1][0x80]*/ 0xFBC3FAF9, 0xE861628E, 0xDC86CA17, 0xCF245260, 0xB5499B25, 0xA6EB0352, 0x920CABCB, 0x81AE33BC, /* [1][0x88]*/ 0x66D73941, 0x7575A136, 0x419209AF, 0x523091D8, 0x285D589D, 0x3BFFC0EA, 0x0F186873, 0x1CBAF004, /* [1][0x90]*/ 0xC4060B78, 0xD7A4930F, 0xE3433B96, 0xF0E1A3E1, 0x8A8C6AA4, 0x992EF2D3, 0xADC95A4A, 0xBE6BC23D, /* [1][0x98]*/ 0x5912C8C0, 0x4AB050B7, 0x7E57F82E, 0x6DF56059, 0x1798A91C, 0x043A316B, 0x30DD99F2, 0x237F0185, /* [1][0xa0]*/ 0x844819FB, 0x97EA818C, 0xA30D2915, 0xB0AFB162, 0xCAC27827, 0xD960E050, 0xED8748C9, 0xFE25D0BE, /* [1][0xa8]*/ 0x195CDA43, 0x0AFE4234, 0x3E19EAAD, 0x2DBB72DA, 0x57D6BB9F, 0x447423E8, 0x70938B71, 0x63311306, /* [1][0xb0]*/ 0xBB8DE87A, 0xA82F700D, 0x9CC8D894, 0x8F6A40E3, 0xF50789A6, 0xE6A511D1, 0xD242B948, 0xC1E0213F, /* [1][0xb8]*/ 0x26992BC2, 0x353BB3B5, 0x01DC1B2C, 0x127E835B, 0x68134A1E, 0x7BB1D269, 0x4F567AF0, 0x5CF4E287, /* [1][0xc0]*/ 0x04D43CFD, 0x1776A48A, 0x23910C13, 0x30339464, 0x4A5E5D21, 0x59FCC556, 0x6D1B6DCF, 0x7EB9F5B8, /* [1][0xc8]*/ 0x99C0FF45, 0x8A626732, 0xBE85CFAB, 0xAD2757DC, 0xD74A9E99, 0xC4E806EE, 0xF00FAE77, 0xE3AD3600, /* [1][0xd0]*/ 0x3B11CD7C, 0x28B3550B, 0x1C54FD92, 0x0FF665E5, 0x759BACA0, 0x663934D7, 0x52DE9C4E, 0x417C0439, /* [1][0xd8]*/ 0xA6050EC4, 0xB5A796B3, 0x81403E2A, 0x92E2A65D, 0xE88F6F18, 0xFB2DF76F, 0xCFCA5FF6, 0xDC68C781, /* [1][0xe0]*/ 0x7B5FDFFF, 0x68FD4788, 0x5C1AEF11, 0x4FB87766, 0x35D5BE23, 0x26772654, 0x12908ECD, 0x013216BA, /* [1][0xe8]*/ 0xE64B1C47, 0xF5E98430, 0xC10E2CA9, 0xD2ACB4DE, 0xA8C17D9B, 0xBB63E5EC, 0x8F844D75, 0x9C26D502, /* [1][0xf0]*/ 0x449A2E7E, 0x5738B609, 0x63DF1E90, 0x707D86E7, 0x0A104FA2, 0x19B2D7D5, 0x2D557F4C, 0x3EF7E73B, /* [1][0xf8]*/ 0xD98EEDC6, 0xCA2C75B1, 0xFECBDD28, 0xED69455F, 0x97048C1A, 0x84A6146D, 0xB041BCF4, 0xA3E32483 /* [1][0x100]*/ }, { 0x00000000, 0xA541927E, 0x4F6F520D, 0xEA2EC073, 0x9EDEA41A, 0x3B9F3664, 0xD1B1F617, 0x74F06469, /* [2][0x08]*/ 0x38513EC5, 0x9D10ACBB, 0x773E6CC8, 0xD27FFEB6, 0xA68F9ADF, 0x03CE08A1, 0xE9E0C8D2, 0x4CA15AAC, /* [2][0x10]*/ 0x70A27D8A, 0xD5E3EFF4, 0x3FCD2F87, 0x9A8CBDF9, 0xEE7CD990, 0x4B3D4BEE, 0xA1138B9D, 0x045219E3, /* [2][0x18]*/ 0x48F3434F, 0xEDB2D131, 0x079C1142, 0xA2DD833C, 0xD62DE755, 0x736C752B, 0x9942B558, 0x3C032726, /* [2][0x20]*/ 0xE144FB14, 0x4405696A, 0xAE2BA919, 0x0B6A3B67, 0x7F9A5F0E, 0xDADBCD70, 0x30F50D03, 0x95B49F7D, /* [2][0x28]*/ 0xD915C5D1, 0x7C5457AF, 0x967A97DC, 0x333B05A2, 0x47CB61CB, 0xE28AF3B5, 0x08A433C6, 0xADE5A1B8, /* [2][0x30]*/ 0x91E6869E, 0x34A714E0, 0xDE89D493, 0x7BC846ED, 0x0F382284, 0xAA79B0FA, 0x40577089, 0xE516E2F7, /* [2][0x38]*/ 0xA9B7B85B, 0x0CF62A25, 0xE6D8EA56, 0x43997828, 0x37691C41, 0x92288E3F, 0x78064E4C, 0xDD47DC32, /* [2][0x40]*/ 0xC76580D9, 0x622412A7, 0x880AD2D4, 0x2D4B40AA, 0x59BB24C3, 0xFCFAB6BD, 0x16D476CE, 0xB395E4B0, /* [2][0x48]*/ 0xFF34BE1C, 0x5A752C62, 0xB05BEC11, 0x151A7E6F, 0x61EA1A06, 0xC4AB8878, 0x2E85480B, 0x8BC4DA75, /* [2][0x50]*/ 0xB7C7FD53, 0x12866F2D, 0xF8A8AF5E, 0x5DE93D20, 0x29195949, 0x8C58CB37, 0x66760B44, 0xC337993A, /* [2][0x58]*/ 0x8F96C396, 0x2AD751E8, 0xC0F9919B, 0x65B803E5, 0x1148678C, 0xB409F5F2, 0x5E273581, 0xFB66A7FF, /* [2][0x60]*/ 0x26217BCD, 0x8360E9B3, 0x694E29C0, 0xCC0FBBBE, 0xB8FFDFD7, 0x1DBE4DA9, 0xF7908DDA, 0x52D11FA4, /* [2][0x68]*/ 0x1E704508, 0xBB31D776, 0x511F1705, 0xF45E857B, 0x80AEE112, 0x25EF736C, 0xCFC1B31F, 0x6A802161, /* [2][0x70]*/ 0x56830647, 0xF3C29439, 0x19EC544A, 0xBCADC634, 0xC85DA25D, 0x6D1C3023, 0x8732F050, 0x2273622E, /* [2][0x78]*/ 0x6ED23882, 0xCB93AAFC, 0x21BD6A8F, 0x84FCF8F1, 0xF00C9C98, 0x554D0EE6, 0xBF63CE95, 0x1A225CEB, /* [2][0x80]*/ 0x8B277743, 0x2E66E53D, 0xC448254E, 0x6109B730, 0x15F9D359, 0xB0B84127, 0x5A968154, 0xFFD7132A, /* [2][0x88]*/ 0xB3764986, 0x1637DBF8, 0xFC191B8B, 0x595889F5, 0x2DA8ED9C, 0x88E97FE2, 0x62C7BF91, 0xC7862DEF, /* [2][0x90]*/ 0xFB850AC9, 0x5EC498B7, 0xB4EA58C4, 0x11ABCABA, 0x655BAED3, 0xC01A3CAD, 0x2A34FCDE, 0x8F756EA0, /* [2][0x98]*/ 0xC3D4340C, 0x6695A672, 0x8CBB6601, 0x29FAF47F, 0x5D0A9016, 0xF84B0268, 0x1265C21B, 0xB7245065, /* [2][0xa0]*/ 0x6A638C57, 0xCF221E29, 0x250CDE5A, 0x804D4C24, 0xF4BD284D, 0x51FCBA33, 0xBBD27A40, 0x1E93E83E, /* [2][0xa8]*/ 0x5232B292, 0xF77320EC, 0x1D5DE09F, 0xB81C72E1, 0xCCEC1688, 0x69AD84F6, 0x83834485, 0x26C2D6FB, /* [2][0xb0]*/ 0x1AC1F1DD, 0xBF8063A3, 0x55AEA3D0, 0xF0EF31AE, 0x841F55C7, 0x215EC7B9, 0xCB7007CA, 0x6E3195B4, /* [2][0xb8]*/ 0x2290CF18, 0x87D15D66, 0x6DFF9D15, 0xC8BE0F6B, 0xBC4E6B02, 0x190FF97C, 0xF321390F, 0x5660AB71, /* [2][0xc0]*/ 0x4C42F79A, 0xE90365E4, 0x032DA597, 0xA66C37E9, 0xD29C5380, 0x77DDC1FE, 0x9DF3018D, 0x38B293F3, /* [2][0xc8]*/ 0x7413C95F, 0xD1525B21, 0x3B7C9B52, 0x9E3D092C, 0xEACD6D45, 0x4F8CFF3B, 0xA5A23F48, 0x00E3AD36, /* [2][0xd0]*/ 0x3CE08A10, 0x99A1186E, 0x738FD81D, 0xD6CE4A63, 0xA23E2E0A, 0x077FBC74, 0xED517C07, 0x4810EE79, /* [2][0xd8]*/ 0x04B1B4D5, 0xA1F026AB, 0x4BDEE6D8, 0xEE9F74A6, 0x9A6F10CF, 0x3F2E82B1, 0xD50042C2, 0x7041D0BC, /* [2][0xe0]*/ 0xAD060C8E, 0x08479EF0, 0xE2695E83, 0x4728CCFD, 0x33D8A894, 0x96993AEA, 0x7CB7FA99, 0xD9F668E7, /* [2][0xe8]*/ 0x9557324B, 0x3016A035, 0xDA386046, 0x7F79F238, 0x0B899651, 0xAEC8042F, 0x44E6C45C, 0xE1A75622, /* [2][0xf0]*/ 0xDDA47104, 0x78E5E37A, 0x92CB2309, 0x378AB177, 0x437AD51E, 0xE63B4760, 0x0C158713, 0xA954156D, /* [2][0xf8]*/ 0xE5F54FC1, 0x40B4DDBF, 0xAA9A1DCC, 0x0FDB8FB2, 0x7B2BEBDB, 0xDE6A79A5, 0x3444B9D6, 0x91052BA8 /* [2][0x100]*/ }, { 0x00000000, 0xDD45AAB8, 0xBF672381, 0x62228939, 0x7B2231F3, 0xA6679B4B, 0xC4451272, 0x1900B8CA, /* [3][0x08]*/ 0xF64463E6, 0x2B01C95E, 0x49234067, 0x9466EADF, 0x8D665215, 0x5023F8AD, 0x32017194, 0xEF44DB2C, /* [3][0x10]*/ 0xE964B13D, 0x34211B85, 0x560392BC, 0x8B463804, 0x924680CE, 0x4F032A76, 0x2D21A34F, 0xF06409F7, /* [3][0x18]*/ 0x1F20D2DB, 0xC2657863, 0xA047F15A, 0x7D025BE2, 0x6402E328, 0xB9474990, 0xDB65C0A9, 0x06206A11, /* [3][0x20]*/ 0xD725148B, 0x0A60BE33, 0x6842370A, 0xB5079DB2, 0xAC072578, 0x71428FC0, 0x136006F9, 0xCE25AC41, /* [3][0x28]*/ 0x2161776D, 0xFC24DDD5, 0x9E0654EC, 0x4343FE54, 0x5A43469E, 0x8706EC26, 0xE524651F, 0x3861CFA7, /* [3][0x30]*/ 0x3E41A5B6, 0xE3040F0E, 0x81268637, 0x5C632C8F, 0x45639445, 0x98263EFD, 0xFA04B7C4, 0x27411D7C, /* [3][0x38]*/ 0xC805C650, 0x15406CE8, 0x7762E5D1, 0xAA274F69, 0xB327F7A3, 0x6E625D1B, 0x0C40D422, 0xD1057E9A, /* [3][0x40]*/ 0xABA65FE7, 0x76E3F55F, 0x14C17C66, 0xC984D6DE, 0xD0846E14, 0x0DC1C4AC, 0x6FE34D95, 0xB2A6E72D, /* [3][0x48]*/ 0x5DE23C01, 0x80A796B9, 0xE2851F80, 0x3FC0B538, 0x26C00DF2, 0xFB85A74A, 0x99A72E73, 0x44E284CB, /* [3][0x50]*/ 0x42C2EEDA, 0x9F874462, 0xFDA5CD5B, 0x20E067E3, 0x39E0DF29, 0xE4A57591, 0x8687FCA8, 0x5BC25610, /* [3][0x58]*/ 0xB4868D3C, 0x69C32784, 0x0BE1AEBD, 0xD6A40405, 0xCFA4BCCF, 0x12E11677, 0x70C39F4E, 0xAD8635F6, /* [3][0x60]*/ 0x7C834B6C, 0xA1C6E1D4, 0xC3E468ED, 0x1EA1C255, 0x07A17A9F, 0xDAE4D027, 0xB8C6591E, 0x6583F3A6, /* [3][0x68]*/ 0x8AC7288A, 0x57828232, 0x35A00B0B, 0xE8E5A1B3, 0xF1E51979, 0x2CA0B3C1, 0x4E823AF8, 0x93C79040, /* [3][0x70]*/ 0x95E7FA51, 0x48A250E9, 0x2A80D9D0, 0xF7C57368, 0xEEC5CBA2, 0x3380611A, 0x51A2E823, 0x8CE7429B, /* [3][0x78]*/ 0x63A399B7, 0xBEE6330F, 0xDCC4BA36, 0x0181108E, 0x1881A844, 0xC5C402FC, 0xA7E68BC5, 0x7AA3217D, /* [3][0x80]*/ 0x52A0C93F, 0x8FE56387, 0xEDC7EABE, 0x30824006, 0x2982F8CC, 0xF4C75274, 0x96E5DB4D, 0x4BA071F5, /* [3][0x88]*/ 0xA4E4AAD9, 0x79A10061, 0x1B838958, 0xC6C623E0, 0xDFC69B2A, 0x02833192, 0x60A1B8AB, 0xBDE41213, /* [3][0x90]*/ 0xBBC47802, 0x6681D2BA, 0x04A35B83, 0xD9E6F13B, 0xC0E649F1, 0x1DA3E349, 0x7F816A70, 0xA2C4C0C8, /* [3][0x98]*/ 0x4D801BE4, 0x90C5B15C, 0xF2E73865, 0x2FA292DD, 0x36A22A17, 0xEBE780AF, 0x89C50996, 0x5480A32E, /* [3][0xa0]*/ 0x8585DDB4, 0x58C0770C, 0x3AE2FE35, 0xE7A7548D, 0xFEA7EC47, 0x23E246FF, 0x41C0CFC6, 0x9C85657E, /* [3][0xa8]*/ 0x73C1BE52, 0xAE8414EA, 0xCCA69DD3, 0x11E3376B, 0x08E38FA1, 0xD5A62519, 0xB784AC20, 0x6AC10698, /* [3][0xb0]*/ 0x6CE16C89, 0xB1A4C631, 0xD3864F08, 0x0EC3E5B0, 0x17C35D7A, 0xCA86F7C2, 0xA8A47EFB, 0x75E1D443, /* [3][0xb8]*/ 0x9AA50F6F, 0x47E0A5D7, 0x25C22CEE, 0xF8878656, 0xE1873E9C, 0x3CC29424, 0x5EE01D1D, 0x83A5B7A5, /* [3][0xc0]*/ 0xF90696D8, 0x24433C60, 0x4661B559, 0x9B241FE1, 0x8224A72B, 0x5F610D93, 0x3D4384AA, 0xE0062E12, /* [3][0xc8]*/ 0x0F42F53E, 0xD2075F86, 0xB025D6BF, 0x6D607C07, 0x7460C4CD, 0xA9256E75, 0xCB07E74C, 0x16424DF4, /* [3][0xd0]*/ 0x106227E5, 0xCD278D5D, 0xAF050464, 0x7240AEDC, 0x6B401616, 0xB605BCAE, 0xD4273597, 0x09629F2F, /* [3][0xd8]*/ 0xE6264403, 0x3B63EEBB, 0x59416782, 0x8404CD3A, 0x9D0475F0, 0x4041DF48, 0x22635671, 0xFF26FCC9, /* [3][0xe0]*/ 0x2E238253, 0xF36628EB, 0x9144A1D2, 0x4C010B6A, 0x5501B3A0, 0x88441918, 0xEA669021, 0x37233A99, /* [3][0xe8]*/ 0xD867E1B5, 0x05224B0D, 0x6700C234, 0xBA45688C, 0xA345D046, 0x7E007AFE, 0x1C22F3C7, 0xC167597F, /* [3][0xf0]*/ 0xC747336E, 0x1A0299D6, 0x782010EF, 0xA565BA57, 0xBC65029D, 0x6120A825, 0x0302211C, 0xDE478BA4, /* [3][0xf8]*/ 0x31035088, 0xEC46FA30, 0x8E647309, 0x5321D9B1, 0x4A21617B, 0x9764CBC3, 0xF54642FA, 0x2803E842 /* [3][0x100]*/ }, { 0x00000000, 0x38116FAC, 0x7022DF58, 0x4833B0F4, 0xE045BEB0, 0xD854D11C, 0x906761E8, 0xA8760E44, /* [4][0x08]*/ 0xC5670B91, 0xFD76643D, 0xB545D4C9, 0x8D54BB65, 0x2522B521, 0x1D33DA8D, 0x55006A79, 0x6D1105D5, /* [4][0x10]*/ 0x8F2261D3, 0xB7330E7F, 0xFF00BE8B, 0xC711D127, 0x6F67DF63, 0x5776B0CF, 0x1F45003B, 0x27546F97, /* [4][0x18]*/ 0x4A456A42, 0x725405EE, 0x3A67B51A, 0x0276DAB6, 0xAA00D4F2, 0x9211BB5E, 0xDA220BAA, 0xE2336406, /* [4][0x20]*/ 0x1BA8B557, 0x23B9DAFB, 0x6B8A6A0F, 0x539B05A3, 0xFBED0BE7, 0xC3FC644B, 0x8BCFD4BF, 0xB3DEBB13, /* [4][0x28]*/ 0xDECFBEC6, 0xE6DED16A, 0xAEED619E, 0x96FC0E32, 0x3E8A0076, 0x069B6FDA, 0x4EA8DF2E, 0x76B9B082, /* [4][0x30]*/ 0x948AD484, 0xAC9BBB28, 0xE4A80BDC, 0xDCB96470, 0x74CF6A34, 0x4CDE0598, 0x04EDB56C, 0x3CFCDAC0, /* [4][0x38]*/ 0x51EDDF15, 0x69FCB0B9, 0x21CF004D, 0x19DE6FE1, 0xB1A861A5, 0x89B90E09, 0xC18ABEFD, 0xF99BD151, /* [4][0x40]*/ 0x37516AAE, 0x0F400502, 0x4773B5F6, 0x7F62DA5A, 0xD714D41E, 0xEF05BBB2, 0xA7360B46, 0x9F2764EA, /* [4][0x48]*/ 0xF236613F, 0xCA270E93, 0x8214BE67, 0xBA05D1CB, 0x1273DF8F, 0x2A62B023, 0x625100D7, 0x5A406F7B, /* [4][0x50]*/ 0xB8730B7D, 0x806264D1, 0xC851D425, 0xF040BB89, 0x5836B5CD, 0x6027DA61, 0x28146A95, 0x10050539, /* [4][0x58]*/ 0x7D1400EC, 0x45056F40, 0x0D36DFB4, 0x3527B018, 0x9D51BE5C, 0xA540D1F0, 0xED736104, 0xD5620EA8, /* [4][0x60]*/ 0x2CF9DFF9, 0x14E8B055, 0x5CDB00A1, 0x64CA6F0D, 0xCCBC6149, 0xF4AD0EE5, 0xBC9EBE11, 0x848FD1BD, /* [4][0x68]*/ 0xE99ED468, 0xD18FBBC4, 0x99BC0B30, 0xA1AD649C, 0x09DB6AD8, 0x31CA0574, 0x79F9B580, 0x41E8DA2C, /* [4][0x70]*/ 0xA3DBBE2A, 0x9BCAD186, 0xD3F96172, 0xEBE80EDE, 0x439E009A, 0x7B8F6F36, 0x33BCDFC2, 0x0BADB06E, /* [4][0x78]*/ 0x66BCB5BB, 0x5EADDA17, 0x169E6AE3, 0x2E8F054F, 0x86F90B0B, 0xBEE864A7, 0xF6DBD453, 0xCECABBFF, /* [4][0x80]*/ 0x6EA2D55C, 0x56B3BAF0, 0x1E800A04, 0x269165A8, 0x8EE76BEC, 0xB6F60440, 0xFEC5B4B4, 0xC6D4DB18, /* [4][0x88]*/ 0xABC5DECD, 0x93D4B161, 0xDBE70195, 0xE3F66E39, 0x4B80607D, 0x73910FD1, 0x3BA2BF25, 0x03B3D089, /* [4][0x90]*/ 0xE180B48F, 0xD991DB23, 0x91A26BD7, 0xA9B3047B, 0x01C50A3F, 0x39D46593, 0x71E7D567, 0x49F6BACB, /* [4][0x98]*/ 0x24E7BF1E, 0x1CF6D0B2, 0x54C56046, 0x6CD40FEA, 0xC4A201AE, 0xFCB36E02, 0xB480DEF6, 0x8C91B15A, /* [4][0xa0]*/ 0x750A600B, 0x4D1B0FA7, 0x0528BF53, 0x3D39D0FF, 0x954FDEBB, 0xAD5EB117, 0xE56D01E3, 0xDD7C6E4F, /* [4][0xa8]*/ 0xB06D6B9A, 0x887C0436, 0xC04FB4C2, 0xF85EDB6E, 0x5028D52A, 0x6839BA86, 0x200A0A72, 0x181B65DE, /* [4][0xb0]*/ 0xFA2801D8, 0xC2396E74, 0x8A0ADE80, 0xB21BB12C, 0x1A6DBF68, 0x227CD0C4, 0x6A4F6030, 0x525E0F9C, /* [4][0xb8]*/ 0x3F4F0A49, 0x075E65E5, 0x4F6DD511, 0x777CBABD, 0xDF0AB4F9, 0xE71BDB55, 0xAF286BA1, 0x9739040D, /* [4][0xc0]*/ 0x59F3BFF2, 0x61E2D05E, 0x29D160AA, 0x11C00F06, 0xB9B60142, 0x81A76EEE, 0xC994DE1A, 0xF185B1B6, /* [4][0xc8]*/ 0x9C94B463, 0xA485DBCF, 0xECB66B3B, 0xD4A70497, 0x7CD10AD3, 0x44C0657F, 0x0CF3D58B, 0x34E2BA27, /* [4][0xd0]*/ 0xD6D1DE21, 0xEEC0B18D, 0xA6F30179, 0x9EE26ED5, 0x36946091, 0x0E850F3D, 0x46B6BFC9, 0x7EA7D065, /* [4][0xd8]*/ 0x13B6D5B0, 0x2BA7BA1C, 0x63940AE8, 0x5B856544, 0xF3F36B00, 0xCBE204AC, 0x83D1B458, 0xBBC0DBF4, /* [4][0xe0]*/ 0x425B0AA5, 0x7A4A6509, 0x3279D5FD, 0x0A68BA51, 0xA21EB415, 0x9A0FDBB9, 0xD23C6B4D, 0xEA2D04E1, /* [4][0xe8]*/ 0x873C0134, 0xBF2D6E98, 0xF71EDE6C, 0xCF0FB1C0, 0x6779BF84, 0x5F68D028, 0x175B60DC, 0x2F4A0F70, /* [4][0xf0]*/ 0xCD796B76, 0xF56804DA, 0xBD5BB42E, 0x854ADB82, 0x2D3CD5C6, 0x152DBA6A, 0x5D1E0A9E, 0x650F6532, /* [4][0xf8]*/ 0x081E60E7, 0x300F0F4B, 0x783CBFBF, 0x402DD013, 0xE85BDE57, 0xD04AB1FB, 0x9879010F, 0xA0686EA3 /* [4][0x100]*/ }, { 0x00000000, 0xEF306B19, 0xDB8CA0C3, 0x34BCCBDA, 0xB2F53777, 0x5DC55C6E, 0x697997B4, 0x8649FCAD, /* [5][0x08]*/ 0x6006181F, 0x8F367306, 0xBB8AB8DC, 0x54BAD3C5, 0xD2F32F68, 0x3DC34471, 0x097F8FAB, 0xE64FE4B2, /* [5][0x10]*/ 0xC00C303E, 0x2F3C5B27, 0x1B8090FD, 0xF4B0FBE4, 0x72F90749, 0x9DC96C50, 0xA975A78A, 0x4645CC93, /* [5][0x18]*/ 0xA00A2821, 0x4F3A4338, 0x7B8688E2, 0x94B6E3FB, 0x12FF1F56, 0xFDCF744F, 0xC973BF95, 0x2643D48C, /* [5][0x20]*/ 0x85F4168D, 0x6AC47D94, 0x5E78B64E, 0xB148DD57, 0x370121FA, 0xD8314AE3, 0xEC8D8139, 0x03BDEA20, /* [5][0x28]*/ 0xE5F20E92, 0x0AC2658B, 0x3E7EAE51, 0xD14EC548, 0x570739E5, 0xB83752FC, 0x8C8B9926, 0x63BBF23F, /* [5][0x30]*/ 0x45F826B3, 0xAAC84DAA, 0x9E748670, 0x7144ED69, 0xF70D11C4, 0x183D7ADD, 0x2C81B107, 0xC3B1DA1E, /* [5][0x38]*/ 0x25FE3EAC, 0xCACE55B5, 0xFE729E6F, 0x1142F576, 0x970B09DB, 0x783B62C2, 0x4C87A918, 0xA3B7C201, /* [5][0x40]*/ 0x0E045BEB, 0xE13430F2, 0xD588FB28, 0x3AB89031, 0xBCF16C9C, 0x53C10785, 0x677DCC5F, 0x884DA746, /* [5][0x48]*/ 0x6E0243F4, 0x813228ED, 0xB58EE337, 0x5ABE882E, 0xDCF77483, 0x33C71F9A, 0x077BD440, 0xE84BBF59, /* [5][0x50]*/ 0xCE086BD5, 0x213800CC, 0x1584CB16, 0xFAB4A00F, 0x7CFD5CA2, 0x93CD37BB, 0xA771FC61, 0x48419778, /* [5][0x58]*/ 0xAE0E73CA, 0x413E18D3, 0x7582D309, 0x9AB2B810, 0x1CFB44BD, 0xF3CB2FA4, 0xC777E47E, 0x28478F67, /* [5][0x60]*/ 0x8BF04D66, 0x64C0267F, 0x507CEDA5, 0xBF4C86BC, 0x39057A11, 0xD6351108, 0xE289DAD2, 0x0DB9B1CB, /* [5][0x68]*/ 0xEBF65579, 0x04C63E60, 0x307AF5BA, 0xDF4A9EA3, 0x5903620E, 0xB6330917, 0x828FC2CD, 0x6DBFA9D4, /* [5][0x70]*/ 0x4BFC7D58, 0xA4CC1641, 0x9070DD9B, 0x7F40B682, 0xF9094A2F, 0x16392136, 0x2285EAEC, 0xCDB581F5, /* [5][0x78]*/ 0x2BFA6547, 0xC4CA0E5E, 0xF076C584, 0x1F46AE9D, 0x990F5230, 0x763F3929, 0x4283F2F3, 0xADB399EA, /* [5][0x80]*/ 0x1C08B7D6, 0xF338DCCF, 0xC7841715, 0x28B47C0C, 0xAEFD80A1, 0x41CDEBB8, 0x75712062, 0x9A414B7B, /* [5][0x88]*/ 0x7C0EAFC9, 0x933EC4D0, 0xA7820F0A, 0x48B26413, 0xCEFB98BE, 0x21CBF3A7, 0x1577387D, 0xFA475364, /* [5][0x90]*/ 0xDC0487E8, 0x3334ECF1, 0x0788272B, 0xE8B84C32, 0x6EF1B09F, 0x81C1DB86, 0xB57D105C, 0x5A4D7B45, /* [5][0x98]*/ 0xBC029FF7, 0x5332F4EE, 0x678E3F34, 0x88BE542D, 0x0EF7A880, 0xE1C7C399, 0xD57B0843, 0x3A4B635A, /* [5][0xa0]*/ 0x99FCA15B, 0x76CCCA42, 0x42700198, 0xAD406A81, 0x2B09962C, 0xC439FD35, 0xF08536EF, 0x1FB55DF6, /* [5][0xa8]*/ 0xF9FAB944, 0x16CAD25D, 0x22761987, 0xCD46729E, 0x4B0F8E33, 0xA43FE52A, 0x90832EF0, 0x7FB345E9, /* [5][0xb0]*/ 0x59F09165, 0xB6C0FA7C, 0x827C31A6, 0x6D4C5ABF, 0xEB05A612, 0x0435CD0B, 0x308906D1, 0xDFB96DC8, /* [5][0xb8]*/ 0x39F6897A, 0xD6C6E263, 0xE27A29B9, 0x0D4A42A0, 0x8B03BE0D, 0x6433D514, 0x508F1ECE, 0xBFBF75D7, /* [5][0xc0]*/ 0x120CEC3D, 0xFD3C8724, 0xC9804CFE, 0x26B027E7, 0xA0F9DB4A, 0x4FC9B053, 0x7B757B89, 0x94451090, /* [5][0xc8]*/ 0x720AF422, 0x9D3A9F3B, 0xA98654E1, 0x46B63FF8, 0xC0FFC355, 0x2FCFA84C, 0x1B736396, 0xF443088F, /* [5][0xd0]*/ 0xD200DC03, 0x3D30B71A, 0x098C7CC0, 0xE6BC17D9, 0x60F5EB74, 0x8FC5806D, 0xBB794BB7, 0x544920AE, /* [5][0xd8]*/ 0xB206C41C, 0x5D36AF05, 0x698A64DF, 0x86BA0FC6, 0x00F3F36B, 0xEFC39872, 0xDB7F53A8, 0x344F38B1, /* [5][0xe0]*/ 0x97F8FAB0, 0x78C891A9, 0x4C745A73, 0xA344316A, 0x250DCDC7, 0xCA3DA6DE, 0xFE816D04, 0x11B1061D, /* [5][0xe8]*/ 0xF7FEE2AF, 0x18CE89B6, 0x2C72426C, 0xC3422975, 0x450BD5D8, 0xAA3BBEC1, 0x9E87751B, 0x71B71E02, /* [5][0xf0]*/ 0x57F4CA8E, 0xB8C4A197, 0x8C786A4D, 0x63480154, 0xE501FDF9, 0x0A3196E0, 0x3E8D5D3A, 0xD1BD3623, /* [5][0xf8]*/ 0x37F2D291, 0xD8C2B988, 0xEC7E7252, 0x034E194B, 0x8507E5E6, 0x6A378EFF, 0x5E8B4525, 0xB1BB2E3C /* [5][0x100]*/ }, { 0x00000000, 0x68032CC8, 0xD0065990, 0xB8057558, 0xA5E0C5D1, 0xCDE3E919, 0x75E69C41, 0x1DE5B089, /* [6][0x08]*/ 0x4E2DFD53, 0x262ED19B, 0x9E2BA4C3, 0xF628880B, 0xEBCD3882, 0x83CE144A, 0x3BCB6112, 0x53C84DDA, /* [6][0x10]*/ 0x9C5BFAA6, 0xF458D66E, 0x4C5DA336, 0x245E8FFE, 0x39BB3F77, 0x51B813BF, 0xE9BD66E7, 0x81BE4A2F, /* [6][0x18]*/ 0xD27607F5, 0xBA752B3D, 0x02705E65, 0x6A7372AD, 0x7796C224, 0x1F95EEEC, 0xA7909BB4, 0xCF93B77C, /* [6][0x20]*/ 0x3D5B83BD, 0x5558AF75, 0xED5DDA2D, 0x855EF6E5, 0x98BB466C, 0xF0B86AA4, 0x48BD1FFC, 0x20BE3334, /* [6][0x28]*/ 0x73767EEE, 0x1B755226, 0xA370277E, 0xCB730BB6, 0xD696BB3F, 0xBE9597F7, 0x0690E2AF, 0x6E93CE67, /* [6][0x30]*/ 0xA100791B, 0xC90355D3, 0x7106208B, 0x19050C43, 0x04E0BCCA, 0x6CE39002, 0xD4E6E55A, 0xBCE5C992, /* [6][0x38]*/ 0xEF2D8448, 0x872EA880, 0x3F2BDDD8, 0x5728F110, 0x4ACD4199, 0x22CE6D51, 0x9ACB1809, 0xF2C834C1, /* [6][0x40]*/ 0x7AB7077A, 0x12B42BB2, 0xAAB15EEA, 0xC2B27222, 0xDF57C2AB, 0xB754EE63, 0x0F519B3B, 0x6752B7F3, /* [6][0x48]*/ 0x349AFA29, 0x5C99D6E1, 0xE49CA3B9, 0x8C9F8F71, 0x917A3FF8, 0xF9791330, 0x417C6668, 0x297F4AA0, /* [6][0x50]*/ 0xE6ECFDDC, 0x8EEFD114, 0x36EAA44C, 0x5EE98884, 0x430C380D, 0x2B0F14C5, 0x930A619D, 0xFB094D55, /* [6][0x58]*/ 0xA8C1008F, 0xC0C22C47, 0x78C7591F, 0x10C475D7, 0x0D21C55E, 0x6522E996, 0xDD279CCE, 0xB524B006, /* [6][0x60]*/ 0x47EC84C7, 0x2FEFA80F, 0x97EADD57, 0xFFE9F19F, 0xE20C4116, 0x8A0F6DDE, 0x320A1886, 0x5A09344E, /* [6][0x68]*/ 0x09C17994, 0x61C2555C, 0xD9C72004, 0xB1C40CCC, 0xAC21BC45, 0xC422908D, 0x7C27E5D5, 0x1424C91D, /* [6][0x70]*/ 0xDBB77E61, 0xB3B452A9, 0x0BB127F1, 0x63B20B39, 0x7E57BBB0, 0x16549778, 0xAE51E220, 0xC652CEE8, /* [6][0x78]*/ 0x959A8332, 0xFD99AFFA, 0x459CDAA2, 0x2D9FF66A, 0x307A46E3, 0x58796A2B, 0xE07C1F73, 0x887F33BB, /* [6][0x80]*/ 0xF56E0EF4, 0x9D6D223C, 0x25685764, 0x4D6B7BAC, 0x508ECB25, 0x388DE7ED, 0x808892B5, 0xE88BBE7D, /* [6][0x88]*/ 0xBB43F3A7, 0xD340DF6F, 0x6B45AA37, 0x034686FF, 0x1EA33676, 0x76A01ABE, 0xCEA56FE6, 0xA6A6432E, /* [6][0x90]*/ 0x6935F452, 0x0136D89A, 0xB933ADC2, 0xD130810A, 0xCCD53183, 0xA4D61D4B, 0x1CD36813, 0x74D044DB, /* [6][0x98]*/ 0x27180901, 0x4F1B25C9, 0xF71E5091, 0x9F1D7C59, 0x82F8CCD0, 0xEAFBE018, 0x52FE9540, 0x3AFDB988, /* [6][0xa0]*/ 0xC8358D49, 0xA036A181, 0x1833D4D9, 0x7030F811, 0x6DD54898, 0x05D66450, 0xBDD31108, 0xD5D03DC0, /* [6][0xa8]*/ 0x8618701A, 0xEE1B5CD2, 0x561E298A, 0x3E1D0542, 0x23F8B5CB, 0x4BFB9903, 0xF3FEEC5B, 0x9BFDC093, /* [6][0xb0]*/ 0x546E77EF, 0x3C6D5B27, 0x84682E7F, 0xEC6B02B7, 0xF18EB23E, 0x998D9EF6, 0x2188EBAE, 0x498BC766, /* [6][0xb8]*/ 0x1A438ABC, 0x7240A674, 0xCA45D32C, 0xA246FFE4, 0xBFA34F6D, 0xD7A063A5, 0x6FA516FD, 0x07A63A35, /* [6][0xc0]*/ 0x8FD9098E, 0xE7DA2546, 0x5FDF501E, 0x37DC7CD6, 0x2A39CC5F, 0x423AE097, 0xFA3F95CF, 0x923CB907, /* [6][0xc8]*/ 0xC1F4F4DD, 0xA9F7D815, 0x11F2AD4D, 0x79F18185, 0x6414310C, 0x0C171DC4, 0xB412689C, 0xDC114454, /* [6][0xd0]*/ 0x1382F328, 0x7B81DFE0, 0xC384AAB8, 0xAB878670, 0xB66236F9, 0xDE611A31, 0x66646F69, 0x0E6743A1, /* [6][0xd8]*/ 0x5DAF0E7B, 0x35AC22B3, 0x8DA957EB, 0xE5AA7B23, 0xF84FCBAA, 0x904CE762, 0x2849923A, 0x404ABEF2, /* [6][0xe0]*/ 0xB2828A33, 0xDA81A6FB, 0x6284D3A3, 0x0A87FF6B, 0x17624FE2, 0x7F61632A, 0xC7641672, 0xAF673ABA, /* [6][0xe8]*/ 0xFCAF7760, 0x94AC5BA8, 0x2CA92EF0, 0x44AA0238, 0x594FB2B1, 0x314C9E79, 0x8949EB21, 0xE14AC7E9, /* [6][0xf0]*/ 0x2ED97095, 0x46DA5C5D, 0xFEDF2905, 0x96DC05CD, 0x8B39B544, 0xE33A998C, 0x5B3FECD4, 0x333CC01C, /* [6][0xf8]*/ 0x60F48DC6, 0x08F7A10E, 0xB0F2D456, 0xD8F1F89E, 0xC5144817, 0xAD1764DF, 0x15121187, 0x7D113D4F /* [6][0x100]*/ }, { 0x00000000, 0x493C7D27, 0x9278FA4E, 0xDB448769, 0x211D826D, 0x6821FF4A, 0xB3657823, 0xFA590504, /* [7][0x08]*/ 0x423B04DA, 0x0B0779FD, 0xD043FE94, 0x997F83B3, 0x632686B7, 0x2A1AFB90, 0xF15E7CF9, 0xB86201DE, /* [7][0x10]*/ 0x847609B4, 0xCD4A7493, 0x160EF3FA, 0x5F328EDD, 0xA56B8BD9, 0xEC57F6FE, 0x37137197, 0x7E2F0CB0, /* [7][0x18]*/ 0xC64D0D6E, 0x8F717049, 0x5435F720, 0x1D098A07, 0xE7508F03, 0xAE6CF224, 0x7528754D, 0x3C14086A, /* [7][0x20]*/ 0x0D006599, 0x443C18BE, 0x9F789FD7, 0xD644E2F0, 0x2C1DE7F4, 0x65219AD3, 0xBE651DBA, 0xF759609D, /* [7][0x28]*/ 0x4F3B6143, 0x06071C64, 0xDD439B0D, 0x947FE62A, 0x6E26E32E, 0x271A9E09, 0xFC5E1960, 0xB5626447, /* [7][0x30]*/ 0x89766C2D, 0xC04A110A, 0x1B0E9663, 0x5232EB44, 0xA86BEE40, 0xE1579367, 0x3A13140E, 0x732F6929, /* [7][0x38]*/ 0xCB4D68F7, 0x827115D0, 0x593592B9, 0x1009EF9E, 0xEA50EA9A, 0xA36C97BD, 0x782810D4, 0x31146DF3, /* [7][0x40]*/ 0x1A00CB32, 0x533CB615, 0x8878317C, 0xC1444C5B, 0x3B1D495F, 0x72213478, 0xA965B311, 0xE059CE36, /* [7][0x48]*/ 0x583BCFE8, 0x1107B2CF, 0xCA4335A6, 0x837F4881, 0x79264D85, 0x301A30A2, 0xEB5EB7CB, 0xA262CAEC, /* [7][0x50]*/ 0x9E76C286, 0xD74ABFA1, 0x0C0E38C8, 0x453245EF, 0xBF6B40EB, 0xF6573DCC, 0x2D13BAA5, 0x642FC782, /* [7][0x58]*/ 0xDC4DC65C, 0x9571BB7B, 0x4E353C12, 0x07094135, 0xFD504431, 0xB46C3916, 0x6F28BE7F, 0x2614C358, /* [7][0x60]*/ 0x1700AEAB, 0x5E3CD38C, 0x857854E5, 0xCC4429C2, 0x361D2CC6, 0x7F2151E1, 0xA465D688, 0xED59ABAF, /* [7][0x68]*/ 0x553BAA71, 0x1C07D756, 0xC743503F, 0x8E7F2D18, 0x7426281C, 0x3D1A553B, 0xE65ED252, 0xAF62AF75, /* [7][0x70]*/ 0x9376A71F, 0xDA4ADA38, 0x010E5D51, 0x48322076, 0xB26B2572, 0xFB575855, 0x2013DF3C, 0x692FA21B, /* [7][0x78]*/ 0xD14DA3C5, 0x9871DEE2, 0x4335598B, 0x0A0924AC, 0xF05021A8, 0xB96C5C8F, 0x6228DBE6, 0x2B14A6C1, /* [7][0x80]*/ 0x34019664, 0x7D3DEB43, 0xA6796C2A, 0xEF45110D, 0x151C1409, 0x5C20692E, 0x8764EE47, 0xCE589360, /* [7][0x88]*/ 0x763A92BE, 0x3F06EF99, 0xE44268F0, 0xAD7E15D7, 0x572710D3, 0x1E1B6DF4, 0xC55FEA9D, 0x8C6397BA, /* [7][0x90]*/ 0xB0779FD0, 0xF94BE2F7, 0x220F659E, 0x6B3318B9, 0x916A1DBD, 0xD856609A, 0x0312E7F3, 0x4A2E9AD4, /* [7][0x98]*/ 0xF24C9B0A, 0xBB70E62D, 0x60346144, 0x29081C63, 0xD3511967, 0x9A6D6440, 0x4129E329, 0x08159E0E, /* [7][0xa0]*/ 0x3901F3FD, 0x703D8EDA, 0xAB7909B3, 0xE2457494, 0x181C7190, 0x51200CB7, 0x8A648BDE, 0xC358F6F9, /* [7][0xa8]*/ 0x7B3AF727, 0x32068A00, 0xE9420D69, 0xA07E704E, 0x5A27754A, 0x131B086D, 0xC85F8F04, 0x8163F223, /* [7][0xb0]*/ 0xBD77FA49, 0xF44B876E, 0x2F0F0007, 0x66337D20, 0x9C6A7824, 0xD5560503, 0x0E12826A, 0x472EFF4D, /* [7][0xb8]*/ 0xFF4CFE93, 0xB67083B4, 0x6D3404DD, 0x240879FA, 0xDE517CFE, 0x976D01D9, 0x4C2986B0, 0x0515FB97, /* [7][0xc0]*/ 0x2E015D56, 0x673D2071, 0xBC79A718, 0xF545DA3F, 0x0F1CDF3B, 0x4620A21C, 0x9D642575, 0xD4585852, /* [7][0xc8]*/ 0x6C3A598C, 0x250624AB, 0xFE42A3C2, 0xB77EDEE5, 0x4D27DBE1, 0x041BA6C6, 0xDF5F21AF, 0x96635C88, /* [7][0xd0]*/ 0xAA7754E2, 0xE34B29C5, 0x380FAEAC, 0x7133D38B, 0x8B6AD68F, 0xC256ABA8, 0x19122CC1, 0x502E51E6, /* [7][0xd8]*/ 0xE84C5038, 0xA1702D1F, 0x7A34AA76, 0x3308D751, 0xC951D255, 0x806DAF72, 0x5B29281B, 0x1215553C, /* [7][0xe0]*/ 0x230138CF, 0x6A3D45E8, 0xB179C281, 0xF845BFA6, 0x021CBAA2, 0x4B20C785, 0x906440EC, 0xD9583DCB, /* [7][0xe8]*/ 0x613A3C15, 0x28064132, 0xF342C65B, 0xBA7EBB7C, 0x4027BE78, 0x091BC35F, 0xD25F4436, 0x9B633911, /* [7][0xf0]*/ 0xA777317B, 0xEE4B4C5C, 0x350FCB35, 0x7C33B612, 0x866AB316, 0xCF56CE31, 0x14124958, 0x5D2E347F, /* [7][0xf8]*/ 0xE54C35A1, 0xAC704886, 0x7734CFEF, 0x3E08B2C8, 0xC451B7CC, 0x8D6DCAEB, 0x56294D82, 0x1F1530A5 /* [7][0x100]*/ }, { 0x00000000, 0xF43ED648, 0xED91DA61, 0x19AF0C29, 0xDECFC233, 0x2AF1147B, 0x335E1852, 0xC760CE1A, /* [8][0x08]*/ 0xB873F297, 0x4C4D24DF, 0x55E228F6, 0xA1DCFEBE, 0x66BC30A4, 0x9282E6EC, 0x8B2DEAC5, 0x7F133C8D, /* [8][0x10]*/ 0x750B93DF, 0x81354597, 0x989A49BE, 0x6CA49FF6, 0xABC451EC, 0x5FFA87A4, 0x46558B8D, 0xB26B5DC5, /* [8][0x18]*/ 0xCD786148, 0x3946B700, 0x20E9BB29, 0xD4D76D61, 0x13B7A37B, 0xE7897533, 0xFE26791A, 0x0A18AF52, /* [8][0x20]*/ 0xEA1727BE, 0x1E29F1F6, 0x0786FDDF, 0xF3B82B97, 0x34D8E58D, 0xC0E633C5, 0xD9493FEC, 0x2D77E9A4, /* [8][0x28]*/ 0x5264D529, 0xA65A0361, 0xBFF50F48, 0x4BCBD900, 0x8CAB171A, 0x7895C152, 0x613ACD7B, 0x95041B33, /* [8][0x30]*/ 0x9F1CB461, 0x6B226229, 0x728D6E00, 0x86B3B848, 0x41D37652, 0xB5EDA01A, 0xAC42AC33, 0x587C7A7B, /* [8][0x38]*/ 0x276F46F6, 0xD35190BE, 0xCAFE9C97, 0x3EC04ADF, 0xF9A084C5, 0x0D9E528D, 0x14315EA4, 0xE00F88EC, /* [8][0x40]*/ 0xD1C2398D, 0x25FCEFC5, 0x3C53E3EC, 0xC86D35A4, 0x0F0DFBBE, 0xFB332DF6, 0xE29C21DF, 0x16A2F797, /* [8][0x48]*/ 0x69B1CB1A, 0x9D8F1D52, 0x8420117B, 0x701EC733, 0xB77E0929, 0x4340DF61, 0x5AEFD348, 0xAED10500, /* [8][0x50]*/ 0xA4C9AA52, 0x50F77C1A, 0x49587033, 0xBD66A67B, 0x7A066861, 0x8E38BE29, 0x9797B200, 0x63A96448, /* [8][0x58]*/ 0x1CBA58C5, 0xE8848E8D, 0xF12B82A4, 0x051554EC, 0xC2759AF6, 0x364B4CBE, 0x2FE44097, 0xDBDA96DF, /* [8][0x60]*/ 0x3BD51E33, 0xCFEBC87B, 0xD644C452, 0x227A121A, 0xE51ADC00, 0x11240A48, 0x088B0661, 0xFCB5D029, /* [8][0x68]*/ 0x83A6ECA4, 0x77983AEC, 0x6E3736C5, 0x9A09E08D, 0x5D692E97, 0xA957F8DF, 0xB0F8F4F6, 0x44C622BE, /* [8][0x70]*/ 0x4EDE8DEC, 0xBAE05BA4, 0xA34F578D, 0x577181C5, 0x90114FDF, 0x642F9997, 0x7D8095BE, 0x89BE43F6, /* [8][0x78]*/ 0xF6AD7F7B, 0x0293A933, 0x1B3CA51A, 0xEF027352, 0x2862BD48, 0xDC5C6B00, 0xC5F36729, 0x31CDB161, /* [8][0x80]*/ 0xA66805EB, 0x5256D3A3, 0x4BF9DF8A, 0xBFC709C2, 0x78A7C7D8, 0x8C991190, 0x95361DB9, 0x6108CBF1, /* [8][0x88]*/ 0x1E1BF77C, 0xEA252134, 0xF38A2D1D, 0x07B4FB55, 0xC0D4354F, 0x34EAE307, 0x2D45EF2E, 0xD97B3966, /* [8][0x90]*/ 0xD3639634, 0x275D407C, 0x3EF24C55, 0xCACC9A1D, 0x0DAC5407, 0xF992824F, 0xE03D8E66, 0x1403582E, /* [8][0x98]*/ 0x6B1064A3, 0x9F2EB2EB, 0x8681BEC2, 0x72BF688A, 0xB5DFA690, 0x41E170D8, 0x584E7CF1, 0xAC70AAB9, /* [8][0xa0]*/ 0x4C7F2255, 0xB841F41D, 0xA1EEF834, 0x55D02E7C, 0x92B0E066, 0x668E362E, 0x7F213A07, 0x8B1FEC4F, /* [8][0xa8]*/ 0xF40CD0C2, 0x0032068A, 0x199D0AA3, 0xEDA3DCEB, 0x2AC312F1, 0xDEFDC4B9, 0xC752C890, 0x336C1ED8, /* [8][0xb0]*/ 0x3974B18A, 0xCD4A67C2, 0xD4E56BEB, 0x20DBBDA3, 0xE7BB73B9, 0x1385A5F1, 0x0A2AA9D8, 0xFE147F90, /* [8][0xb8]*/ 0x8107431D, 0x75399555, 0x6C96997C, 0x98A84F34, 0x5FC8812E, 0xABF65766, 0xB2595B4F, 0x46678D07, /* [8][0xc0]*/ 0x77AA3C66, 0x8394EA2E, 0x9A3BE607, 0x6E05304F, 0xA965FE55, 0x5D5B281D, 0x44F42434, 0xB0CAF27C, /* [8][0xc8]*/ 0xCFD9CEF1, 0x3BE718B9, 0x22481490, 0xD676C2D8, 0x11160CC2, 0xE528DA8A, 0xFC87D6A3, 0x08B900EB, /* [8][0xd0]*/ 0x02A1AFB9, 0xF69F79F1, 0xEF3075D8, 0x1B0EA390, 0xDC6E6D8A, 0x2850BBC2, 0x31FFB7EB, 0xC5C161A3, /* [8][0xd8]*/ 0xBAD25D2E, 0x4EEC8B66, 0x5743874F, 0xA37D5107, 0x641D9F1D, 0x90234955, 0x898C457C, 0x7DB29334, /* [8][0xe0]*/ 0x9DBD1BD8, 0x6983CD90, 0x702CC1B9, 0x841217F1, 0x4372D9EB, 0xB74C0FA3, 0xAEE3038A, 0x5ADDD5C2, /* [8][0xe8]*/ 0x25CEE94F, 0xD1F03F07, 0xC85F332E, 0x3C61E566, 0xFB012B7C, 0x0F3FFD34, 0x1690F11D, 0xE2AE2755, /* [8][0xf0]*/ 0xE8B68807, 0x1C885E4F, 0x05275266, 0xF119842E, 0x36794A34, 0xC2479C7C, 0xDBE89055, 0x2FD6461D, /* [8][0xf8]*/ 0x50C57A90, 0xA4FBACD8, 0xBD54A0F1, 0x496A76B9, 0x8E0AB8A3, 0x7A346EEB, 0x639B62C2, 0x97A5B48A /* [8][0x100]*/ }, { 0x00000000, 0xCB567BA5, 0x934081BB, 0x5816FA1E, 0x236D7587, 0xE83B0E22, 0xB02DF43C, 0x7B7B8F99, /* [9][0x08]*/ 0x46DAEB0E, 0x8D8C90AB, 0xD59A6AB5, 0x1ECC1110, 0x65B79E89, 0xAEE1E52C, 0xF6F71F32, 0x3DA16497, /* [9][0x10]*/ 0x8DB5D61C, 0x46E3ADB9, 0x1EF557A7, 0xD5A32C02, 0xAED8A39B, 0x658ED83E, 0x3D982220, 0xF6CE5985, /* [9][0x18]*/ 0xCB6F3D12, 0x003946B7, 0x582FBCA9, 0x9379C70C, 0xE8024895, 0x23543330, 0x7B42C92E, 0xB014B28B, /* [9][0x20]*/ 0x1E87DAC9, 0xD5D1A16C, 0x8DC75B72, 0x469120D7, 0x3DEAAF4E, 0xF6BCD4EB, 0xAEAA2EF5, 0x65FC5550, /* [9][0x28]*/ 0x585D31C7, 0x930B4A62, 0xCB1DB07C, 0x004BCBD9, 0x7B304440, 0xB0663FE5, 0xE870C5FB, 0x2326BE5E, /* [9][0x30]*/ 0x93320CD5, 0x58647770, 0x00728D6E, 0xCB24F6CB, 0xB05F7952, 0x7B0902F7, 0x231FF8E9, 0xE849834C, /* [9][0x38]*/ 0xD5E8E7DB, 0x1EBE9C7E, 0x46A86660, 0x8DFE1DC5, 0xF685925C, 0x3DD3E9F9, 0x65C513E7, 0xAE936842, /* [9][0x40]*/ 0x3D0FB592, 0xF659CE37, 0xAE4F3429, 0x65194F8C, 0x1E62C015, 0xD534BBB0, 0x8D2241AE, 0x46743A0B, /* [9][0x48]*/ 0x7BD55E9C, 0xB0832539, 0xE895DF27, 0x23C3A482, 0x58B82B1B, 0x93EE50BE, 0xCBF8AAA0, 0x00AED105, /* [9][0x50]*/ 0xB0BA638E, 0x7BEC182B, 0x23FAE235, 0xE8AC9990, 0x93D71609, 0x58816DAC, 0x009797B2, 0xCBC1EC17, /* [9][0x58]*/ 0xF6608880, 0x3D36F325, 0x6520093B, 0xAE76729E, 0xD50DFD07, 0x1E5B86A2, 0x464D7CBC, 0x8D1B0719, /* [9][0x60]*/ 0x23886F5B, 0xE8DE14FE, 0xB0C8EEE0, 0x7B9E9545, 0x00E51ADC, 0xCBB36179, 0x93A59B67, 0x58F3E0C2, /* [9][0x68]*/ 0x65528455, 0xAE04FFF0, 0xF61205EE, 0x3D447E4B, 0x463FF1D2, 0x8D698A77, 0xD57F7069, 0x1E290BCC, /* [9][0x70]*/ 0xAE3DB947, 0x656BC2E2, 0x3D7D38FC, 0xF62B4359, 0x8D50CCC0, 0x4606B765, 0x1E104D7B, 0xD54636DE, /* [9][0x78]*/ 0xE8E75249, 0x23B129EC, 0x7BA7D3F2, 0xB0F1A857, 0xCB8A27CE, 0x00DC5C6B, 0x58CAA675, 0x939CDDD0, /* [9][0x80]*/ 0x7A1F6B24, 0xB1491081, 0xE95FEA9F, 0x2209913A, 0x59721EA3, 0x92246506, 0xCA329F18, 0x0164E4BD, /* [9][0x88]*/ 0x3CC5802A, 0xF793FB8F, 0xAF850191, 0x64D37A34, 0x1FA8F5AD, 0xD4FE8E08, 0x8CE87416, 0x47BE0FB3, /* [9][0x90]*/ 0xF7AABD38, 0x3CFCC69D, 0x64EA3C83, 0xAFBC4726, 0xD4C7C8BF, 0x1F91B31A, 0x47874904, 0x8CD132A1, /* [9][0x98]*/ 0xB1705636, 0x7A262D93, 0x2230D78D, 0xE966AC28, 0x921D23B1, 0x594B5814, 0x015DA20A, 0xCA0BD9AF, /* [9][0xa0]*/ 0x6498B1ED, 0xAFCECA48, 0xF7D83056, 0x3C8E4BF3, 0x47F5C46A, 0x8CA3BFCF, 0xD4B545D1, 0x1FE33E74, /* [9][0xa8]*/ 0x22425AE3, 0xE9142146, 0xB102DB58, 0x7A54A0FD, 0x012F2F64, 0xCA7954C1, 0x926FAEDF, 0x5939D57A, /* [9][0xb0]*/ 0xE92D67F1, 0x227B1C54, 0x7A6DE64A, 0xB13B9DEF, 0xCA401276, 0x011669D3, 0x590093CD, 0x9256E868, /* [9][0xb8]*/ 0xAFF78CFF, 0x64A1F75A, 0x3CB70D44, 0xF7E176E1, 0x8C9AF978, 0x47CC82DD, 0x1FDA78C3, 0xD48C0366, /* [9][0xc0]*/ 0x4710DEB6, 0x8C46A513, 0xD4505F0D, 0x1F0624A8, 0x647DAB31, 0xAF2BD094, 0xF73D2A8A, 0x3C6B512F, /* [9][0xc8]*/ 0x01CA35B8, 0xCA9C4E1D, 0x928AB403, 0x59DCCFA6, 0x22A7403F, 0xE9F13B9A, 0xB1E7C184, 0x7AB1BA21, /* [9][0xd0]*/ 0xCAA508AA, 0x01F3730F, 0x59E58911, 0x92B3F2B4, 0xE9C87D2D, 0x229E0688, 0x7A88FC96, 0xB1DE8733, /* [9][0xd8]*/ 0x8C7FE3A4, 0x47299801, 0x1F3F621F, 0xD46919BA, 0xAF129623, 0x6444ED86, 0x3C521798, 0xF7046C3D, /* [9][0xe0]*/ 0x5997047F, 0x92C17FDA, 0xCAD785C4, 0x0181FE61, 0x7AFA71F8, 0xB1AC0A5D, 0xE9BAF043, 0x22EC8BE6, /* [9][0xe8]*/ 0x1F4DEF71, 0xD41B94D4, 0x8C0D6ECA, 0x475B156F, 0x3C209AF6, 0xF776E153, 0xAF601B4D, 0x643660E8, /* [9][0xf0]*/ 0xD422D263, 0x1F74A9C6, 0x476253D8, 0x8C34287D, 0xF74FA7E4, 0x3C19DC41, 0x640F265F, 0xAF595DFA, /* [9][0xf8]*/ 0x92F8396D, 0x59AE42C8, 0x01B8B8D6, 0xCAEEC373, 0xB1954CEA, 0x7AC3374F, 0x22D5CD51, 0xE983B6F4 /* [9][0x100]*/ }, { 0x00000000, 0x9771F7C1, 0x2B0F9973, 0xBC7E6EB2, 0x561F32E6, 0xC16EC527, 0x7D10AB95, 0xEA615C54, /* [10][0x08]*/ 0xAC3E65CC, 0x3B4F920D, 0x8731FCBF, 0x10400B7E, 0xFA21572A, 0x6D50A0EB, 0xD12ECE59, 0x465F3998, /* [10][0x10]*/ 0x5D90BD69, 0xCAE14AA8, 0x769F241A, 0xE1EED3DB, 0x0B8F8F8F, 0x9CFE784E, 0x208016FC, 0xB7F1E13D, /* [10][0x18]*/ 0xF1AED8A5, 0x66DF2F64, 0xDAA141D6, 0x4DD0B617, 0xA7B1EA43, 0x30C01D82, 0x8CBE7330, 0x1BCF84F1, /* [10][0x20]*/ 0xBB217AD2, 0x2C508D13, 0x902EE3A1, 0x075F1460, 0xED3E4834, 0x7A4FBFF5, 0xC631D147, 0x51402686, /* [10][0x28]*/ 0x171F1F1E, 0x806EE8DF, 0x3C10866D, 0xAB6171AC, 0x41002DF8, 0xD671DA39, 0x6A0FB48B, 0xFD7E434A, /* [10][0x30]*/ 0xE6B1C7BB, 0x71C0307A, 0xCDBE5EC8, 0x5ACFA909, 0xB0AEF55D, 0x27DF029C, 0x9BA16C2E, 0x0CD09BEF, /* [10][0x38]*/ 0x4A8FA277, 0xDDFE55B6, 0x61803B04, 0xF6F1CCC5, 0x1C909091, 0x8BE16750, 0x379F09E2, 0xA0EEFE23, /* [10][0x40]*/ 0x73AE8355, 0xE4DF7494, 0x58A11A26, 0xCFD0EDE7, 0x25B1B1B3, 0xB2C04672, 0x0EBE28C0, 0x99CFDF01, /* [10][0x48]*/ 0xDF90E699, 0x48E11158, 0xF49F7FEA, 0x63EE882B, 0x898FD47F, 0x1EFE23BE, 0xA2804D0C, 0x35F1BACD, /* [10][0x50]*/ 0x2E3E3E3C, 0xB94FC9FD, 0x0531A74F, 0x9240508E, 0x78210CDA, 0xEF50FB1B, 0x532E95A9, 0xC45F6268, /* [10][0x58]*/ 0x82005BF0, 0x1571AC31, 0xA90FC283, 0x3E7E3542, 0xD41F6916, 0x436E9ED7, 0xFF10F065, 0x686107A4, /* [10][0x60]*/ 0xC88FF987, 0x5FFE0E46, 0xE38060F4, 0x74F19735, 0x9E90CB61, 0x09E13CA0, 0xB59F5212, 0x22EEA5D3, /* [10][0x68]*/ 0x64B19C4B, 0xF3C06B8A, 0x4FBE0538, 0xD8CFF2F9, 0x32AEAEAD, 0xA5DF596C, 0x19A137DE, 0x8ED0C01F, /* [10][0x70]*/ 0x951F44EE, 0x026EB32F, 0xBE10DD9D, 0x29612A5C, 0xC3007608, 0x547181C9, 0xE80FEF7B, 0x7F7E18BA, /* [10][0x78]*/ 0x39212122, 0xAE50D6E3, 0x122EB851, 0x855F4F90, 0x6F3E13C4, 0xF84FE405, 0x44318AB7, 0xD3407D76, /* [10][0x80]*/ 0xE75D06AA, 0x702CF16B, 0xCC529FD9, 0x5B236818, 0xB142344C, 0x2633C38D, 0x9A4DAD3F, 0x0D3C5AFE, /* [10][0x88]*/ 0x4B636366, 0xDC1294A7, 0x606CFA15, 0xF71D0DD4, 0x1D7C5180, 0x8A0DA641, 0x3673C8F3, 0xA1023F32, /* [10][0x90]*/ 0xBACDBBC3, 0x2DBC4C02, 0x91C222B0, 0x06B3D571, 0xECD28925, 0x7BA37EE4, 0xC7DD1056, 0x50ACE797, /* [10][0x98]*/ 0x16F3DE0F, 0x818229CE, 0x3DFC477C, 0xAA8DB0BD, 0x40ECECE9, 0xD79D1B28, 0x6BE3759A, 0xFC92825B, /* [10][0xa0]*/ 0x5C7C7C78, 0xCB0D8BB9, 0x7773E50B, 0xE00212CA, 0x0A634E9E, 0x9D12B95F, 0x216CD7ED, 0xB61D202C, /* [10][0xa8]*/ 0xF04219B4, 0x6733EE75, 0xDB4D80C7, 0x4C3C7706, 0xA65D2B52, 0x312CDC93, 0x8D52B221, 0x1A2345E0, /* [10][0xb0]*/ 0x01ECC111, 0x969D36D0, 0x2AE35862, 0xBD92AFA3, 0x57F3F3F7, 0xC0820436, 0x7CFC6A84, 0xEB8D9D45, /* [10][0xb8]*/ 0xADD2A4DD, 0x3AA3531C, 0x86DD3DAE, 0x11ACCA6F, 0xFBCD963B, 0x6CBC61FA, 0xD0C20F48, 0x47B3F889, /* [10][0xc0]*/ 0x94F385FF, 0x0382723E, 0xBFFC1C8C, 0x288DEB4D, 0xC2ECB719, 0x559D40D8, 0xE9E32E6A, 0x7E92D9AB, /* [10][0xc8]*/ 0x38CDE033, 0xAFBC17F2, 0x13C27940, 0x84B38E81, 0x6ED2D2D5, 0xF9A32514, 0x45DD4BA6, 0xD2ACBC67, /* [10][0xd0]*/ 0xC9633896, 0x5E12CF57, 0xE26CA1E5, 0x751D5624, 0x9F7C0A70, 0x080DFDB1, 0xB4739303, 0x230264C2, /* [10][0xd8]*/ 0x655D5D5A, 0xF22CAA9B, 0x4E52C429, 0xD92333E8, 0x33426FBC, 0xA433987D, 0x184DF6CF, 0x8F3C010E, /* [10][0xe0]*/ 0x2FD2FF2D, 0xB8A308EC, 0x04DD665E, 0x93AC919F, 0x79CDCDCB, 0xEEBC3A0A, 0x52C254B8, 0xC5B3A379, /* [10][0xe8]*/ 0x83EC9AE1, 0x149D6D20, 0xA8E30392, 0x3F92F453, 0xD5F3A807, 0x42825FC6, 0xFEFC3174, 0x698DC6B5, /* [10][0xf0]*/ 0x72424244, 0xE533B585, 0x594DDB37, 0xCE3C2CF6, 0x245D70A2, 0xB32C8763, 0x0F52E9D1, 0x98231E10, /* [10][0xf8]*/ 0xDE7C2788, 0x490DD049, 0xF573BEFB, 0x6202493A, 0x8863156E, 0x1F12E2AF, 0xA36C8C1D, 0x341D7BDC /* [10][0x100]*/ }, { 0x00000000, 0x3171D430, 0x62E3A860, 0x53927C50, 0xC5C750C0, 0xF4B684F0, 0xA724F8A0, 0x96552C90, /* [11][0x08]*/ 0x8E62D771, 0xBF130341, 0xEC817F11, 0xDDF0AB21, 0x4BA587B1, 0x7AD45381, 0x29462FD1, 0x1837FBE1, /* [11][0x10]*/ 0x1929D813, 0x28580C23, 0x7BCA7073, 0x4ABBA443, 0xDCEE88D3, 0xED9F5CE3, 0xBE0D20B3, 0x8F7CF483, /* [11][0x18]*/ 0x974B0F62, 0xA63ADB52, 0xF5A8A702, 0xC4D97332, 0x528C5FA2, 0x63FD8B92, 0x306FF7C2, 0x011E23F2, /* [11][0x20]*/ 0x3253B026, 0x03226416, 0x50B01846, 0x61C1CC76, 0xF794E0E6, 0xC6E534D6, 0x95774886, 0xA4069CB6, /* [11][0x28]*/ 0xBC316757, 0x8D40B367, 0xDED2CF37, 0xEFA31B07, 0x79F63797, 0x4887E3A7, 0x1B159FF7, 0x2A644BC7, /* [11][0x30]*/ 0x2B7A6835, 0x1A0BBC05, 0x4999C055, 0x78E81465, 0xEEBD38F5, 0xDFCCECC5, 0x8C5E9095, 0xBD2F44A5, /* [11][0x38]*/ 0xA518BF44, 0x94696B74, 0xC7FB1724, 0xF68AC314, 0x60DFEF84, 0x51AE3BB4, 0x023C47E4, 0x334D93D4, /* [11][0x40]*/ 0x64A7604C, 0x55D6B47C, 0x0644C82C, 0x37351C1C, 0xA160308C, 0x9011E4BC, 0xC38398EC, 0xF2F24CDC, /* [11][0x48]*/ 0xEAC5B73D, 0xDBB4630D, 0x88261F5D, 0xB957CB6D, 0x2F02E7FD, 0x1E7333CD, 0x4DE14F9D, 0x7C909BAD, /* [11][0x50]*/ 0x7D8EB85F, 0x4CFF6C6F, 0x1F6D103F, 0x2E1CC40F, 0xB849E89F, 0x89383CAF, 0xDAAA40FF, 0xEBDB94CF, /* [11][0x58]*/ 0xF3EC6F2E, 0xC29DBB1E, 0x910FC74E, 0xA07E137E, 0x362B3FEE, 0x075AEBDE, 0x54C8978E, 0x65B943BE, /* [11][0x60]*/ 0x56F4D06A, 0x6785045A, 0x3417780A, 0x0566AC3A, 0x933380AA, 0xA242549A, 0xF1D028CA, 0xC0A1FCFA, /* [11][0x68]*/ 0xD896071B, 0xE9E7D32B, 0xBA75AF7B, 0x8B047B4B, 0x1D5157DB, 0x2C2083EB, 0x7FB2FFBB, 0x4EC32B8B, /* [11][0x70]*/ 0x4FDD0879, 0x7EACDC49, 0x2D3EA019, 0x1C4F7429, 0x8A1A58B9, 0xBB6B8C89, 0xE8F9F0D9, 0xD98824E9, /* [11][0x78]*/ 0xC1BFDF08, 0xF0CE0B38, 0xA35C7768, 0x922DA358, 0x04788FC8, 0x35095BF8, 0x669B27A8, 0x57EAF398, /* [11][0x80]*/ 0xC94EC098, 0xF83F14A8, 0xABAD68F8, 0x9ADCBCC8, 0x0C899058, 0x3DF84468, 0x6E6A3838, 0x5F1BEC08, /* [11][0x88]*/ 0x472C17E9, 0x765DC3D9, 0x25CFBF89, 0x14BE6BB9, 0x82EB4729, 0xB39A9319, 0xE008EF49, 0xD1793B79, /* [11][0x90]*/ 0xD067188B, 0xE116CCBB, 0xB284B0EB, 0x83F564DB, 0x15A0484B, 0x24D19C7B, 0x7743E02B, 0x4632341B, /* [11][0x98]*/ 0x5E05CFFA, 0x6F741BCA, 0x3CE6679A, 0x0D97B3AA, 0x9BC29F3A, 0xAAB34B0A, 0xF921375A, 0xC850E36A, /* [11][0xa0]*/ 0xFB1D70BE, 0xCA6CA48E, 0x99FED8DE, 0xA88F0CEE, 0x3EDA207E, 0x0FABF44E, 0x5C39881E, 0x6D485C2E, /* [11][0xa8]*/ 0x757FA7CF, 0x440E73FF, 0x179C0FAF, 0x26EDDB9F, 0xB0B8F70F, 0x81C9233F, 0xD25B5F6F, 0xE32A8B5F, /* [11][0xb0]*/ 0xE234A8AD, 0xD3457C9D, 0x80D700CD, 0xB1A6D4FD, 0x27F3F86D, 0x16822C5D, 0x4510500D, 0x7461843D, /* [11][0xb8]*/ 0x6C567FDC, 0x5D27ABEC, 0x0EB5D7BC, 0x3FC4038C, 0xA9912F1C, 0x98E0FB2C, 0xCB72877C, 0xFA03534C, /* [11][0xc0]*/ 0xADE9A0D4, 0x9C9874E4, 0xCF0A08B4, 0xFE7BDC84, 0x682EF014, 0x595F2424, 0x0ACD5874, 0x3BBC8C44, /* [11][0xc8]*/ 0x238B77A5, 0x12FAA395, 0x4168DFC5, 0x70190BF5, 0xE64C2765, 0xD73DF355, 0x84AF8F05, 0xB5DE5B35, /* [11][0xd0]*/ 0xB4C078C7, 0x85B1ACF7, 0xD623D0A7, 0xE7520497, 0x71072807, 0x4076FC37, 0x13E48067, 0x22955457, /* [11][0xd8]*/ 0x3AA2AFB6, 0x0BD37B86, 0x584107D6, 0x6930D3E6, 0xFF65FF76, 0xCE142B46, 0x9D865716, 0xACF78326, /* [11][0xe0]*/ 0x9FBA10F2, 0xAECBC4C2, 0xFD59B892, 0xCC286CA2, 0x5A7D4032, 0x6B0C9402, 0x389EE852, 0x09EF3C62, /* [11][0xe8]*/ 0x11D8C783, 0x20A913B3, 0x733B6FE3, 0x424ABBD3, 0xD41F9743, 0xE56E4373, 0xB6FC3F23, 0x878DEB13, /* [11][0xf0]*/ 0x8693C8E1, 0xB7E21CD1, 0xE4706081, 0xD501B4B1, 0x43549821, 0x72254C11, 0x21B73041, 0x10C6E471, /* [11][0xf8]*/ 0x08F11F90, 0x3980CBA0, 0x6A12B7F0, 0x5B6363C0, 0xCD364F50, 0xFC479B60, 0xAFD5E730, 0x9EA43300 /* [11][0x100]*/ }, { 0x00000000, 0x30D23865, 0x61A470CA, 0x517648AF, 0xC348E194, 0xF39AD9F1, 0xA2EC915E, 0x923EA93B, /* [12][0x08]*/ 0x837DB5D9, 0xB3AF8DBC, 0xE2D9C513, 0xD20BFD76, 0x4035544D, 0x70E76C28, 0x21912487, 0x11431CE2, /* [12][0x10]*/ 0x03171D43, 0x33C52526, 0x62B36D89, 0x526155EC, 0xC05FFCD7, 0xF08DC4B2, 0xA1FB8C1D, 0x9129B478, /* [12][0x18]*/ 0x806AA89A, 0xB0B890FF, 0xE1CED850, 0xD11CE035, 0x4322490E, 0x73F0716B, 0x228639C4, 0x125401A1, /* [12][0x20]*/ 0x062E3A86, 0x36FC02E3, 0x678A4A4C, 0x57587229, 0xC566DB12, 0xF5B4E377, 0xA4C2ABD8, 0x941093BD, /* [12][0x28]*/ 0x85538F5F, 0xB581B73A, 0xE4F7FF95, 0xD425C7F0, 0x461B6ECB, 0x76C956AE, 0x27BF1E01, 0x176D2664, /* [12][0x30]*/ 0x053927C5, 0x35EB1FA0, 0x649D570F, 0x544F6F6A, 0xC671C651, 0xF6A3FE34, 0xA7D5B69B, 0x97078EFE, /* [12][0x38]*/ 0x8644921C, 0xB696AA79, 0xE7E0E2D6, 0xD732DAB3, 0x450C7388, 0x75DE4BED, 0x24A80342, 0x147A3B27, /* [12][0x40]*/ 0x0C5C750C, 0x3C8E4D69, 0x6DF805C6, 0x5D2A3DA3, 0xCF149498, 0xFFC6ACFD, 0xAEB0E452, 0x9E62DC37, /* [12][0x48]*/ 0x8F21C0D5, 0xBFF3F8B0, 0xEE85B01F, 0xDE57887A, 0x4C692141, 0x7CBB1924, 0x2DCD518B, 0x1D1F69EE, /* [12][0x50]*/ 0x0F4B684F, 0x3F99502A, 0x6EEF1885, 0x5E3D20E0, 0xCC0389DB, 0xFCD1B1BE, 0xADA7F911, 0x9D75C174, /* [12][0x58]*/ 0x8C36DD96, 0xBCE4E5F3, 0xED92AD5C, 0xDD409539, 0x4F7E3C02, 0x7FAC0467, 0x2EDA4CC8, 0x1E0874AD, /* [12][0x60]*/ 0x0A724F8A, 0x3AA077EF, 0x6BD63F40, 0x5B040725, 0xC93AAE1E, 0xF9E8967B, 0xA89EDED4, 0x984CE6B1, /* [12][0x68]*/ 0x890FFA53, 0xB9DDC236, 0xE8AB8A99, 0xD879B2FC, 0x4A471BC7, 0x7A9523A2, 0x2BE36B0D, 0x1B315368, /* [12][0x70]*/ 0x096552C9, 0x39B76AAC, 0x68C12203, 0x58131A66, 0xCA2DB35D, 0xFAFF8B38, 0xAB89C397, 0x9B5BFBF2, /* [12][0x78]*/ 0x8A18E710, 0xBACADF75, 0xEBBC97DA, 0xDB6EAFBF, 0x49500684, 0x79823EE1, 0x28F4764E, 0x18264E2B, /* [12][0x80]*/ 0x18B8EA18, 0x286AD27D, 0x791C9AD2, 0x49CEA2B7, 0xDBF00B8C, 0xEB2233E9, 0xBA547B46, 0x8A864323, /* [12][0x88]*/ 0x9BC55FC1, 0xAB1767A4, 0xFA612F0B, 0xCAB3176E, 0x588DBE55, 0x685F8630, 0x3929CE9F, 0x09FBF6FA, /* [12][0x90]*/ 0x1BAFF75B, 0x2B7DCF3E, 0x7A0B8791, 0x4AD9BFF4, 0xD8E716CF, 0xE8352EAA, 0xB9436605, 0x89915E60, /* [12][0x98]*/ 0x98D24282, 0xA8007AE7, 0xF9763248, 0xC9A40A2D, 0x5B9AA316, 0x6B489B73, 0x3A3ED3DC, 0x0AECEBB9, /* [12][0xa0]*/ 0x1E96D09E, 0x2E44E8FB, 0x7F32A054, 0x4FE09831, 0xDDDE310A, 0xED0C096F, 0xBC7A41C0, 0x8CA879A5, /* [12][0xa8]*/ 0x9DEB6547, 0xAD395D22, 0xFC4F158D, 0xCC9D2DE8, 0x5EA384D3, 0x6E71BCB6, 0x3F07F419, 0x0FD5CC7C, /* [12][0xb0]*/ 0x1D81CDDD, 0x2D53F5B8, 0x7C25BD17, 0x4CF78572, 0xDEC92C49, 0xEE1B142C, 0xBF6D5C83, 0x8FBF64E6, /* [12][0xb8]*/ 0x9EFC7804, 0xAE2E4061, 0xFF5808CE, 0xCF8A30AB, 0x5DB49990, 0x6D66A1F5, 0x3C10E95A, 0x0CC2D13F, /* [12][0xc0]*/ 0x14E49F14, 0x2436A771, 0x7540EFDE, 0x4592D7BB, 0xD7AC7E80, 0xE77E46E5, 0xB6080E4A, 0x86DA362F, /* [12][0xc8]*/ 0x97992ACD, 0xA74B12A8, 0xF63D5A07, 0xC6EF6262, 0x54D1CB59, 0x6403F33C, 0x3575BB93, 0x05A783F6, /* [12][0xd0]*/ 0x17F38257, 0x2721BA32, 0x7657F29D, 0x4685CAF8, 0xD4BB63C3, 0xE4695BA6, 0xB51F1309, 0x85CD2B6C, /* [12][0xd8]*/ 0x948E378E, 0xA45C0FEB, 0xF52A4744, 0xC5F87F21, 0x57C6D61A, 0x6714EE7F, 0x3662A6D0, 0x06B09EB5, /* [12][0xe0]*/ 0x12CAA592, 0x22189DF7, 0x736ED558, 0x43BCED3D, 0xD1824406, 0xE1507C63, 0xB02634CC, 0x80F40CA9, /* [12][0xe8]*/ 0x91B7104B, 0xA165282E, 0xF0136081, 0xC0C158E4, 0x52FFF1DF, 0x622DC9BA, 0x335B8115, 0x0389B970, /* [12][0xf0]*/ 0x11DDB8D1, 0x210F80B4, 0x7079C81B, 0x40ABF07E, 0xD2955945, 0xE2476120, 0xB331298F, 0x83E311EA, /* [12][0xf8]*/ 0x92A00D08, 0xA272356D, 0xF3047DC2, 0xC3D645A7, 0x51E8EC9C, 0x613AD4F9, 0x304C9C56, 0x009EA433 /* [12][0x100]*/ }, { 0x00000000, 0x54075546, 0xA80EAA8C, 0xFC09FFCA, 0x55F123E9, 0x01F676AF, 0xFDFF8965, 0xA9F8DC23, /* [13][0x08]*/ 0xABE247D2, 0xFFE51294, 0x03ECED5E, 0x57EBB818, 0xFE13643B, 0xAA14317D, 0x561DCEB7, 0x021A9BF1, /* [13][0x10]*/ 0x5228F955, 0x062FAC13, 0xFA2653D9, 0xAE21069F, 0x07D9DABC, 0x53DE8FFA, 0xAFD77030, 0xFBD02576, /* [13][0x18]*/ 0xF9CABE87, 0xADCDEBC1, 0x51C4140B, 0x05C3414D, 0xAC3B9D6E, 0xF83CC828, 0x043537E2, 0x503262A4, /* [13][0x20]*/ 0xA451F2AA, 0xF056A7EC, 0x0C5F5826, 0x58580D60, 0xF1A0D143, 0xA5A78405, 0x59AE7BCF, 0x0DA92E89, /* [13][0x28]*/ 0x0FB3B578, 0x5BB4E03E, 0xA7BD1FF4, 0xF3BA4AB2, 0x5A429691, 0x0E45C3D7, 0xF24C3C1D, 0xA64B695B, /* [13][0x30]*/ 0xF6790BFF, 0xA27E5EB9, 0x5E77A173, 0x0A70F435, 0xA3882816, 0xF78F7D50, 0x0B86829A, 0x5F81D7DC, /* [13][0x38]*/ 0x5D9B4C2D, 0x099C196B, 0xF595E6A1, 0xA192B3E7, 0x086A6FC4, 0x5C6D3A82, 0xA064C548, 0xF463900E, /* [13][0x40]*/ 0x4D4F93A5, 0x1948C6E3, 0xE5413929, 0xB1466C6F, 0x18BEB04C, 0x4CB9E50A, 0xB0B01AC0, 0xE4B74F86, /* [13][0x48]*/ 0xE6ADD477, 0xB2AA8131, 0x4EA37EFB, 0x1AA42BBD, 0xB35CF79E, 0xE75BA2D8, 0x1B525D12, 0x4F550854, /* [13][0x50]*/ 0x1F676AF0, 0x4B603FB6, 0xB769C07C, 0xE36E953A, 0x4A964919, 0x1E911C5F, 0xE298E395, 0xB69FB6D3, /* [13][0x58]*/ 0xB4852D22, 0xE0827864, 0x1C8B87AE, 0x488CD2E8, 0xE1740ECB, 0xB5735B8D, 0x497AA447, 0x1D7DF101, /* [13][0x60]*/ 0xE91E610F, 0xBD193449, 0x4110CB83, 0x15179EC5, 0xBCEF42E6, 0xE8E817A0, 0x14E1E86A, 0x40E6BD2C, /* [13][0x68]*/ 0x42FC26DD, 0x16FB739B, 0xEAF28C51, 0xBEF5D917, 0x170D0534, 0x430A5072, 0xBF03AFB8, 0xEB04FAFE, /* [13][0x70]*/ 0xBB36985A, 0xEF31CD1C, 0x133832D6, 0x473F6790, 0xEEC7BBB3, 0xBAC0EEF5, 0x46C9113F, 0x12CE4479, /* [13][0x78]*/ 0x10D4DF88, 0x44D38ACE, 0xB8DA7504, 0xECDD2042, 0x4525FC61, 0x1122A927, 0xED2B56ED, 0xB92C03AB, /* [13][0x80]*/ 0x9A9F274A, 0xCE98720C, 0x32918DC6, 0x6696D880, 0xCF6E04A3, 0x9B6951E5, 0x6760AE2F, 0x3367FB69, /* [13][0x88]*/ 0x317D6098, 0x657A35DE, 0x9973CA14, 0xCD749F52, 0x648C4371, 0x308B1637, 0xCC82E9FD, 0x9885BCBB, /* [13][0x90]*/ 0xC8B7DE1F, 0x9CB08B59, 0x60B97493, 0x34BE21D5, 0x9D46FDF6, 0xC941A8B0, 0x3548577A, 0x614F023C, /* [13][0x98]*/ 0x635599CD, 0x3752CC8B, 0xCB5B3341, 0x9F5C6607, 0x36A4BA24, 0x62A3EF62, 0x9EAA10A8, 0xCAAD45EE, /* [13][0xa0]*/ 0x3ECED5E0, 0x6AC980A6, 0x96C07F6C, 0xC2C72A2A, 0x6B3FF609, 0x3F38A34F, 0xC3315C85, 0x973609C3, /* [13][0xa8]*/ 0x952C9232, 0xC12BC774, 0x3D2238BE, 0x69256DF8, 0xC0DDB1DB, 0x94DAE49D, 0x68D31B57, 0x3CD44E11, /* [13][0xb0]*/ 0x6CE62CB5, 0x38E179F3, 0xC4E88639, 0x90EFD37F, 0x39170F5C, 0x6D105A1A, 0x9119A5D0, 0xC51EF096, /* [13][0xb8]*/ 0xC7046B67, 0x93033E21, 0x6F0AC1EB, 0x3B0D94AD, 0x92F5488E, 0xC6F21DC8, 0x3AFBE202, 0x6EFCB744, /* [13][0xc0]*/ 0xD7D0B4EF, 0x83D7E1A9, 0x7FDE1E63, 0x2BD94B25, 0x82219706, 0xD626C240, 0x2A2F3D8A, 0x7E2868CC, /* [13][0xc8]*/ 0x7C32F33D, 0x2835A67B, 0xD43C59B1, 0x803B0CF7, 0x29C3D0D4, 0x7DC48592, 0x81CD7A58, 0xD5CA2F1E, /* [13][0xd0]*/ 0x85F84DBA, 0xD1FF18FC, 0x2DF6E736, 0x79F1B270, 0xD0096E53, 0x840E3B15, 0x7807C4DF, 0x2C009199, /* [13][0xd8]*/ 0x2E1A0A68, 0x7A1D5F2E, 0x8614A0E4, 0xD213F5A2, 0x7BEB2981, 0x2FEC7CC7, 0xD3E5830D, 0x87E2D64B, /* [13][0xe0]*/ 0x73814645, 0x27861303, 0xDB8FECC9, 0x8F88B98F, 0x267065AC, 0x727730EA, 0x8E7ECF20, 0xDA799A66, /* [13][0xe8]*/ 0xD8630197, 0x8C6454D1, 0x706DAB1B, 0x246AFE5D, 0x8D92227E, 0xD9957738, 0x259C88F2, 0x719BDDB4, /* [13][0xf0]*/ 0x21A9BF10, 0x75AEEA56, 0x89A7159C, 0xDDA040DA, 0x74589CF9, 0x205FC9BF, 0xDC563675, 0x88516333, /* [13][0xf8]*/ 0x8A4BF8C2, 0xDE4CAD84, 0x2245524E, 0x76420708, 0xDFBADB2B, 0x8BBD8E6D, 0x77B471A7, 0x23B324E1 /* [13][0x100]*/ }, { 0x00000000, 0x678EFD01, 0xCF1DFA02, 0xA8930703, 0x9BD782F5, 0xFC597FF4, 0x54CA78F7, 0x334485F6, /* [14][0x08]*/ 0x3243731B, 0x55CD8E1A, 0xFD5E8919, 0x9AD07418, 0xA994F1EE, 0xCE1A0CEF, 0x66890BEC, 0x0107F6ED, /* [14][0x10]*/ 0x6486E636, 0x03081B37, 0xAB9B1C34, 0xCC15E135, 0xFF5164C3, 0x98DF99C2, 0x304C9EC1, 0x57C263C0, /* [14][0x18]*/ 0x56C5952D, 0x314B682C, 0x99D86F2F, 0xFE56922E, 0xCD1217D8, 0xAA9CEAD9, 0x020FEDDA, 0x658110DB, /* [14][0x20]*/ 0xC90DCC6C, 0xAE83316D, 0x0610366E, 0x619ECB6F, 0x52DA4E99, 0x3554B398, 0x9DC7B49B, 0xFA49499A, /* [14][0x28]*/ 0xFB4EBF77, 0x9CC04276, 0x34534575, 0x53DDB874, 0x60993D82, 0x0717C083, 0xAF84C780, 0xC80A3A81, /* [14][0x30]*/ 0xAD8B2A5A, 0xCA05D75B, 0x6296D058, 0x05182D59, 0x365CA8AF, 0x51D255AE, 0xF94152AD, 0x9ECFAFAC, /* [14][0x38]*/ 0x9FC85941, 0xF846A440, 0x50D5A343, 0x375B5E42, 0x041FDBB4, 0x639126B5, 0xCB0221B6, 0xAC8CDCB7, /* [14][0x40]*/ 0x97F7EE29, 0xF0791328, 0x58EA142B, 0x3F64E92A, 0x0C206CDC, 0x6BAE91DD, 0xC33D96DE, 0xA4B36BDF, /* [14][0x48]*/ 0xA5B49D32, 0xC23A6033, 0x6AA96730, 0x0D279A31, 0x3E631FC7, 0x59EDE2C6, 0xF17EE5C5, 0x96F018C4, /* [14][0x50]*/ 0xF371081F, 0x94FFF51E, 0x3C6CF21D, 0x5BE20F1C, 0x68A68AEA, 0x0F2877EB, 0xA7BB70E8, 0xC0358DE9, /* [14][0x58]*/ 0xC1327B04, 0xA6BC8605, 0x0E2F8106, 0x69A17C07, 0x5AE5F9F1, 0x3D6B04F0, 0x95F803F3, 0xF276FEF2, /* [14][0x60]*/ 0x5EFA2245, 0x3974DF44, 0x91E7D847, 0xF6692546, 0xC52DA0B0, 0xA2A35DB1, 0x0A305AB2, 0x6DBEA7B3, /* [14][0x68]*/ 0x6CB9515E, 0x0B37AC5F, 0xA3A4AB5C, 0xC42A565D, 0xF76ED3AB, 0x90E02EAA, 0x387329A9, 0x5FFDD4A8, /* [14][0x70]*/ 0x3A7CC473, 0x5DF23972, 0xF5613E71, 0x92EFC370, 0xA1AB4686, 0xC625BB87, 0x6EB6BC84, 0x09384185, /* [14][0x78]*/ 0x083FB768, 0x6FB14A69, 0xC7224D6A, 0xA0ACB06B, 0x93E8359D, 0xF466C89C, 0x5CF5CF9F, 0x3B7B329E, /* [14][0x80]*/ 0x2A03AAA3, 0x4D8D57A2, 0xE51E50A1, 0x8290ADA0, 0xB1D42856, 0xD65AD557, 0x7EC9D254, 0x19472F55, /* [14][0x88]*/ 0x1840D9B8, 0x7FCE24B9, 0xD75D23BA, 0xB0D3DEBB, 0x83975B4D, 0xE419A64C, 0x4C8AA14F, 0x2B045C4E, /* [14][0x90]*/ 0x4E854C95, 0x290BB194, 0x8198B697, 0xE6164B96, 0xD552CE60, 0xB2DC3361, 0x1A4F3462, 0x7DC1C963, /* [14][0x98]*/ 0x7CC63F8E, 0x1B48C28F, 0xB3DBC58C, 0xD455388D, 0xE711BD7B, 0x809F407A, 0x280C4779, 0x4F82BA78, /* [14][0xa0]*/ 0xE30E66CF, 0x84809BCE, 0x2C139CCD, 0x4B9D61CC, 0x78D9E43A, 0x1F57193B, 0xB7C41E38, 0xD04AE339, /* [14][0xa8]*/ 0xD14D15D4, 0xB6C3E8D5, 0x1E50EFD6, 0x79DE12D7, 0x4A9A9721, 0x2D146A20, 0x85876D23, 0xE2099022, /* [14][0xb0]*/ 0x878880F9, 0xE0067DF8, 0x48957AFB, 0x2F1B87FA, 0x1C5F020C, 0x7BD1FF0D, 0xD342F80E, 0xB4CC050F, /* [14][0xb8]*/ 0xB5CBF3E2, 0xD2450EE3, 0x7AD609E0, 0x1D58F4E1, 0x2E1C7117, 0x49928C16, 0xE1018B15, 0x868F7614, /* [14][0xc0]*/ 0xBDF4448A, 0xDA7AB98B, 0x72E9BE88, 0x15674389, 0x2623C67F, 0x41AD3B7E, 0xE93E3C7D, 0x8EB0C17C, /* [14][0xc8]*/ 0x8FB73791, 0xE839CA90, 0x40AACD93, 0x27243092, 0x1460B564, 0x73EE4865, 0xDB7D4F66, 0xBCF3B267, /* [14][0xd0]*/ 0xD972A2BC, 0xBEFC5FBD, 0x166F58BE, 0x71E1A5BF, 0x42A52049, 0x252BDD48, 0x8DB8DA4B, 0xEA36274A, /* [14][0xd8]*/ 0xEB31D1A7, 0x8CBF2CA6, 0x242C2BA5, 0x43A2D6A4, 0x70E65352, 0x1768AE53, 0xBFFBA950, 0xD8755451, /* [14][0xe0]*/ 0x74F988E6, 0x137775E7, 0xBBE472E4, 0xDC6A8FE5, 0xEF2E0A13, 0x88A0F712, 0x2033F011, 0x47BD0D10, /* [14][0xe8]*/ 0x46BAFBFD, 0x213406FC, 0x89A701FF, 0xEE29FCFE, 0xDD6D7908, 0xBAE38409, 0x1270830A, 0x75FE7E0B, /* [14][0xf0]*/ 0x107F6ED0, 0x77F193D1, 0xDF6294D2, 0xB8EC69D3, 0x8BA8EC25, 0xEC261124, 0x44B51627, 0x233BEB26, /* [14][0xf8]*/ 0x223C1DCB, 0x45B2E0CA, 0xED21E7C9, 0x8AAF1AC8, 0xB9EB9F3E, 0xDE65623F, 0x76F6653C, 0x1178983D /* [14][0x100]*/ }, { 0x00000000, 0xF20C0DFE, 0xE1F46D0D, 0x13F860F3, 0xC604ACEB, 0x3408A115, 0x27F0C1E6, 0xD5FCCC18, /* [15][0x08]*/ 0x89E52F27, 0x7BE922D9, 0x6811422A, 0x9A1D4FD4, 0x4FE183CC, 0xBDED8E32, 0xAE15EEC1, 0x5C19E33F, /* [15][0x10]*/ 0x162628BF, 0xE42A2541, 0xF7D245B2, 0x05DE484C, 0xD0228454, 0x222E89AA, 0x31D6E959, 0xC3DAE4A7, /* [15][0x18]*/ 0x9FC30798, 0x6DCF0A66, 0x7E376A95, 0x8C3B676B, 0x59C7AB73, 0xABCBA68D, 0xB833C67E, 0x4A3FCB80, /* [15][0x20]*/ 0x2C4C517E, 0xDE405C80, 0xCDB83C73, 0x3FB4318D, 0xEA48FD95, 0x1844F06B, 0x0BBC9098, 0xF9B09D66, /* [15][0x28]*/ 0xA5A97E59, 0x57A573A7, 0x445D1354, 0xB6511EAA, 0x63ADD2B2, 0x91A1DF4C, 0x8259BFBF, 0x7055B241, /* [15][0x30]*/ 0x3A6A79C1, 0xC866743F, 0xDB9E14CC, 0x29921932, 0xFC6ED52A, 0x0E62D8D4, 0x1D9AB827, 0xEF96B5D9, /* [15][0x38]*/ 0xB38F56E6, 0x41835B18, 0x527B3BEB, 0xA0773615, 0x758BFA0D, 0x8787F7F3, 0x947F9700, 0x66739AFE, /* [15][0x40]*/ 0x5898A2FC, 0xAA94AF02, 0xB96CCFF1, 0x4B60C20F, 0x9E9C0E17, 0x6C9003E9, 0x7F68631A, 0x8D646EE4, /* [15][0x48]*/ 0xD17D8DDB, 0x23718025, 0x3089E0D6, 0xC285ED28, 0x17792130, 0xE5752CCE, 0xF68D4C3D, 0x048141C3, /* [15][0x50]*/ 0x4EBE8A43, 0xBCB287BD, 0xAF4AE74E, 0x5D46EAB0, 0x88BA26A8, 0x7AB62B56, 0x694E4BA5, 0x9B42465B, /* [15][0x58]*/ 0xC75BA564, 0x3557A89A, 0x26AFC869, 0xD4A3C597, 0x015F098F, 0xF3530471, 0xE0AB6482, 0x12A7697C, /* [15][0x60]*/ 0x74D4F382, 0x86D8FE7C, 0x95209E8F, 0x672C9371, 0xB2D05F69, 0x40DC5297, 0x53243264, 0xA1283F9A, /* [15][0x68]*/ 0xFD31DCA5, 0x0F3DD15B, 0x1CC5B1A8, 0xEEC9BC56, 0x3B35704E, 0xC9397DB0, 0xDAC11D43, 0x28CD10BD, /* [15][0x70]*/ 0x62F2DB3D, 0x90FED6C3, 0x8306B630, 0x710ABBCE, 0xA4F677D6, 0x56FA7A28, 0x45021ADB, 0xB70E1725, /* [15][0x78]*/ 0xEB17F41A, 0x191BF9E4, 0x0AE39917, 0xF8EF94E9, 0x2D1358F1, 0xDF1F550F, 0xCCE735FC, 0x3EEB3802, /* [15][0x80]*/ 0xB13145F8, 0x433D4806, 0x50C528F5, 0xA2C9250B, 0x7735E913, 0x8539E4ED, 0x96C1841E, 0x64CD89E0, /* [15][0x88]*/ 0x38D46ADF, 0xCAD86721, 0xD92007D2, 0x2B2C0A2C, 0xFED0C634, 0x0CDCCBCA, 0x1F24AB39, 0xED28A6C7, /* [15][0x90]*/ 0xA7176D47, 0x551B60B9, 0x46E3004A, 0xB4EF0DB4, 0x6113C1AC, 0x931FCC52, 0x80E7ACA1, 0x72EBA15F, /* [15][0x98]*/ 0x2EF24260, 0xDCFE4F9E, 0xCF062F6D, 0x3D0A2293, 0xE8F6EE8B, 0x1AFAE375, 0x09028386, 0xFB0E8E78, /* [15][0xa0]*/ 0x9D7D1486, 0x6F711978, 0x7C89798B, 0x8E857475, 0x5B79B86D, 0xA975B593, 0xBA8DD560, 0x4881D89E, /* [15][0xa8]*/ 0x14983BA1, 0xE694365F, 0xF56C56AC, 0x07605B52, 0xD29C974A, 0x20909AB4, 0x3368FA47, 0xC164F7B9, /* [15][0xb0]*/ 0x8B5B3C39, 0x795731C7, 0x6AAF5134, 0x98A35CCA, 0x4D5F90D2, 0xBF539D2C, 0xACABFDDF, 0x5EA7F021, /* [15][0xb8]*/ 0x02BE131E, 0xF0B21EE0, 0xE34A7E13, 0x114673ED, 0xC4BABFF5, 0x36B6B20B, 0x254ED2F8, 0xD742DF06, /* [15][0xc0]*/ 0xE9A9E704, 0x1BA5EAFA, 0x085D8A09, 0xFA5187F7, 0x2FAD4BEF, 0xDDA14611, 0xCE5926E2, 0x3C552B1C, /* [15][0xc8]*/ 0x604CC823, 0x9240C5DD, 0x81B8A52E, 0x73B4A8D0, 0xA64864C8, 0x54446936, 0x47BC09C5, 0xB5B0043B, /* [15][0xd0]*/ 0xFF8FCFBB, 0x0D83C245, 0x1E7BA2B6, 0xEC77AF48, 0x398B6350, 0xCB876EAE, 0xD87F0E5D, 0x2A7303A3, /* [15][0xd8]*/ 0x766AE09C, 0x8466ED62, 0x979E8D91, 0x6592806F, 0xB06E4C77, 0x42624189, 0x519A217A, 0xA3962C84, /* [15][0xe0]*/ 0xC5E5B67A, 0x37E9BB84, 0x2411DB77, 0xD61DD689, 0x03E11A91, 0xF1ED176F, 0xE215779C, 0x10197A62, /* [15][0xe8]*/ 0x4C00995D, 0xBE0C94A3, 0xADF4F450, 0x5FF8F9AE, 0x8A0435B6, 0x78083848, 0x6BF058BB, 0x99FC5545, /* [15][0xf0]*/ 0xD3C39EC5, 0x21CF933B, 0x3237F3C8, 0xC03BFE36, 0x15C7322E, 0xE7CB3FD0, 0xF4335F23, 0x063F52DD, /* [15][0xf8]*/ 0x5A26B1E2, 0xA82ABC1C, 0xBBD2DCEF, 0x49DED111, 0x9C221D09, 0x6E2E10F7, 0x7DD67004, 0x8FDA7DFA /* [15][0x100]*/ }}; /* private (static) function factoring out byte-by-byte CRC computation using just one slice of the lookup table*/ static uint32_t s_crc_generic_sb1(const uint8_t *input, int length, uint32_t crc, const uint32_t *table_ptr) { uint32_t(*table)[16][256] = (uint32_t(*)[16][256])table_ptr; while (length-- > 0) { crc = (crc >> 8) ^ (*table)[0][(crc & 0xff) ^ *input++]; } return crc; } /* The inner loops of the CRC functions that process large blocks of data work best when input is aligned*/ /* This function begins processing input data one byte at a time until the input pointer is 4-byte aligned*/ /* Advances the input pointer and reduces the length (both passed by reference)*/ static inline uint32_t s_crc_generic_align( const uint8_t **input, int *length, uint32_t crc, const uint32_t *table_ptr) { /* Get the 4-byte memory alignment of our input buffer by looking at the least significant 2 bits*/ size_t input_alignment = ((size_t)*input) & 0x3; /* Compute the number of input bytes that precede the first 4-byte aligned block (will be in range 0-3)*/ size_t leading = (4 - input_alignment) & 0x3; /* Determine what's left without the leading input bytes (might be negative)*/ size_t remaining = *length - leading; /* Process unaligned leading input bytes one at a time*/ if (leading && remaining > 0) { crc = s_crc_generic_sb1(*input, (uint32_t)leading, crc, table_ptr); *input += leading; *length -= (int)leading; } return crc; } /* private (static) function to compute a generic slice-by-4 CRC using the specified lookup table (4 table slices)*/ static uint32_t s_crc_generic_sb4(const uint8_t *input, int length, uint32_t crc, const uint32_t *table_ptr) { const uint32_t *current = (const uint32_t *)input; int remaining = length; uint32_t(*table)[16][256] = (uint32_t(*)[16][256])table_ptr; while (remaining >= 4) { crc ^= *current++; crc = (*table)[3][crc & 0xff] ^ (*table)[2][(crc >> 8) & 0xff] ^ (*table)[1][(crc >> 16) & 0xff] ^ (*table)[0][crc >> 24]; remaining -= 4; } return s_crc_generic_sb1(&input[length - remaining], remaining, crc, table_ptr); } /* private (static) function to compute a generic slice-by-8 CRC using the specified lookup table (8 table slices)*/ static uint32_t s_crc_generic_sb8(const uint8_t *input, int length, uint32_t crc, const uint32_t *table_ptr) { const uint32_t *current = (const uint32_t *)input; int remaining = length; uint32_t(*table)[16][256] = (uint32_t(*)[16][256])table_ptr; while (remaining >= 8) { uint32_t c1 = *current++ ^ crc; uint32_t c2 = *current++; uint32_t t1 = (*table)[7][c1 & 0xff] ^ (*table)[6][(c1 >> 8) & 0xff] ^ (*table)[5][(c1 >> 16) & 0xff] ^ (*table)[4][(c1 >> 24) & 0xff]; uint32_t t2 = (*table)[3][c2 & 0xff] ^ (*table)[2][(c2 >> 8) & 0xff] ^ (*table)[1][(c2 >> 16) & 0xff] ^ (*table)[0][(c2 >> 24) & 0xff]; crc = t1 ^ t2; remaining -= 8; } return s_crc_generic_sb4(&input[length - remaining], remaining, crc, table_ptr); } /* private (static) function to compute a generic slice-by-16 CRC using the specified lookup table (all 16 table * slices)*/ static uint32_t s_crc_generic_sb16(const uint8_t *input, int length, uint32_t crc, const uint32_t *table_ptr) { const uint32_t *current = (const uint32_t *)input; int remaining = length; uint32_t(*table)[16][256] = (uint32_t(*)[16][256])table_ptr; while (remaining >= 16) { uint32_t c1 = *current++ ^ crc; uint32_t c2 = *current++; uint32_t c3 = *current++; uint32_t c4 = *current++; uint32_t t1 = (*table)[15][c1 & 0xff] ^ (*table)[14][(c1 >> 8) & 0xff] ^ (*table)[13][(c1 >> 16) & 0xff] ^ (*table)[12][(c1 >> 24) & 0xff]; uint32_t t2 = (*table)[11][c2 & 0xff] ^ (*table)[10][(c2 >> 8) & 0xff] ^ (*table)[9][(c2 >> 16) & 0xff] ^ (*table)[8][(c2 >> 24) & 0xff]; uint32_t t3 = (*table)[7][c3 & 0xff] ^ (*table)[6][(c3 >> 8) & 0xff] ^ (*table)[5][(c3 >> 16) & 0xff] ^ (*table)[4][(c3 >> 24) & 0xff]; uint32_t t4 = (*table)[3][c4 & 0xff] ^ (*table)[2][(c4 >> 8) & 0xff] ^ (*table)[1][(c4 >> 16) & 0xff] ^ (*table)[0][(c4 >> 24) & 0xff]; crc = t1 ^ t2 ^ t3 ^ t4; remaining -= 16; } return s_crc_generic_sb4(&input[length - remaining], remaining, crc, table_ptr); } static uint32_t s_crc32_no_slice(const uint8_t *input, int length, uint32_t previousCrc32) { return ~s_crc_generic_sb1(input, length, ~previousCrc32, &CRC32_TABLE[0][0]); } /* Computes CRC32 (Ethernet, gzip, et. al.) using slice-by-4. */ static uint32_t s_crc32_sb4(const uint8_t *input, int length, uint32_t previousCrc32) { uint32_t crc = s_crc_generic_align(&input, &length, ~previousCrc32, &CRC32_TABLE[0][0]); return ~s_crc_generic_sb4(input, length, crc, &CRC32_TABLE[0][0]); } /* Computes CRC32 (Ethernet, gzip, et. al.) using slice-by-8. */ static uint32_t s_crc32_sb8(const uint8_t *input, int length, uint32_t previousCrc32) { uint32_t crc = s_crc_generic_align(&input, &length, ~previousCrc32, &CRC32_TABLE[0][0]); return ~s_crc_generic_sb8(input, length, crc, &CRC32_TABLE[0][0]); } /* Computes CRC32 (Ethernet, gzip, et. al.) using slice-by-16. */ static uint32_t s_crc32_sb16(const uint8_t *input, int length, uint32_t previousCrc32) { uint32_t crc = s_crc_generic_align(&input, &length, ~previousCrc32, &CRC32_TABLE[0][0]); return ~s_crc_generic_sb16(input, length, crc, &CRC32_TABLE[0][0]); } static uint32_t s_crc32c_no_slice(const uint8_t *input, int length, uint32_t previousCrc32c) { return ~s_crc_generic_sb1(input, length, ~previousCrc32c, &CRC32C_TABLE[0][0]); } /* Computes the Castagnoli CRC32c (iSCSI) using slice-by-4. */ static uint32_t s_crc32c_sb4(const uint8_t *input, int length, uint32_t previousCrc32) { uint32_t crc = s_crc_generic_align(&input, &length, ~previousCrc32, &CRC32C_TABLE[0][0]); return ~s_crc_generic_sb4(input, length, crc, &CRC32C_TABLE[0][0]); } /* Computes the Castagnoli CRC32c (iSCSI) using slice-by-8. */ static uint32_t s_crc32c_sb8(const uint8_t *input, int length, uint32_t previousCrc32) { uint32_t crc = s_crc_generic_align(&input, &length, ~previousCrc32, &CRC32C_TABLE[0][0]); return ~s_crc_generic_sb8(input, length, crc, &CRC32C_TABLE[0][0]); } /* Computes the Castagnoli CRC32c (iSCSI) using slice-by-16. */ static uint32_t s_crc32c_sb16(const uint8_t *input, int length, uint32_t previousCrc32) { uint32_t crc = s_crc_generic_align(&input, &length, ~previousCrc32, &CRC32C_TABLE[0][0]); return ~s_crc_generic_sb16(input, length, crc, &CRC32C_TABLE[0][0]); } /** * Computes the Ethernet, gzip CRC32 of the specified data buffer. * Pass 0 in the previousCrc32 parameter as an initial value unless continuing to update a running crc in a subsequent * call */ uint32_t aws_checksums_crc32_sw(const uint8_t *input, int length, uint32_t previousCrc32) { if (length >= 16) { return s_crc32_sb16(input, length, previousCrc32); } if (length >= 8) { return s_crc32_sb8(input, length, previousCrc32); } if (length >= 4) { return s_crc32_sb4(input, length, previousCrc32); } return s_crc32_no_slice(input, length, previousCrc32); } /** * Computes the Castagnoli iSCSI CRC32c of the specified data buffer. * Pass 0 in the previousCrc32c parameter as an initial value unless continuing to update a running crc in a subsequent * call */ uint32_t aws_checksums_crc32c_sw(const uint8_t *input, int length, uint32_t previousCrc32c) { if (length >= 16) { return s_crc32c_sb16(input, length, previousCrc32c); } if (length >= 8) { return s_crc32c_sb8(input, length, previousCrc32c); } if (length >= 4) { return s_crc32c_sb4(input, length, previousCrc32c); } return s_crc32c_no_slice(input, length, previousCrc32c); } aws-crt-python-0.20.4+dfsg/crt/aws-checksums/source/generic/000077500000000000000000000000001456575232400237115ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-checksums/source/generic/crc32c_null.c000066400000000000000000000012641456575232400261710ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include /* Fail gracefully. Even though the we might be able to detect the presence of the instruction * we might not have a compiler that supports assembling those instructions. */ uint32_t aws_checksums_crc32c_hw(const uint8_t *input, int length, uint32_t previousCrc32) { return aws_checksums_crc32c_sw(input, length, previousCrc32); } uint32_t aws_checksums_crc32_hw(const uint8_t *input, int length, uint32_t previousCrc32) { return aws_checksums_crc32_sw(input, length, previousCrc32); } aws-crt-python-0.20.4+dfsg/crt/aws-checksums/source/intel/000077500000000000000000000000001456575232400234105ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-checksums/source/intel/asm/000077500000000000000000000000001456575232400241705ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-checksums/source/intel/asm/crc32c_sse42_asm.c000066400000000000000000000405561456575232400273050ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include /* clang-format off */ /* this implementation is only for the x86_64 intel architecture */ #if defined(__x86_64__) # if defined(__clang__) # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wdollar-in-identifier-extension" # endif /* use local labels, so that linker doesn't think these are functions it can deadstrip */ # ifdef __APPLE__ # define LABEL(label) "L_" #label "_%=" # else # define LABEL(label) ".L_" #label "_%=" # endif /* * Factored out common inline asm for folding crc0,crc1,crc2 stripes in rcx, r11, r10 using * the specified Magic Constants K1 and K2. * Assumes rcx, r11, r10 contain crc0, crc1, crc2 that need folding * Utilizes xmm1, xmm2, xmm3, xmm4 as well as clobbering r8, r9, r11 * Result is placed in ecx */ # define FOLD_K1K2(K1, K2) \ "movl " #K1 ", %%r8d # Magic K1 constant \n" \ "movl " #K2 ", %%r9d # Magic K2 constant \n" \ "movq %%rcx, %%xmm1 # crc0 into lower dword of xmm1 \n" \ "movq %%r8, %%xmm3 # K1 into lower dword of xmm3 \n" \ "movq %%r11, %%xmm2 # crc1 into lower dword of xmm2 \n" \ "movq %%r9, %%xmm4 # K2 into lower dword of xmm4 \n" \ "pclmulqdq $0x00, %%xmm3, %%xmm1 # Multiply crc0 by K1 \n" \ "pclmulqdq $0x00, %%xmm4, %%xmm2 # Multiply crc1 by K2 \n" \ "xor %%rcx, %%rcx # \n" \ "xor %%r11, %%r11 # \n" \ "movq %%xmm1, %%r8 # \n" \ "movq %%xmm2, %%r9 # \n" \ "crc32q %%r8, %%rcx # folding crc0 \n" \ "crc32q %%r9, %%r11 # folding crc1 \n" \ "xor %%r10d, %%ecx # combine crc2 and crc0 \n" \ "xor %%r11d, %%ecx # combine crc1 and crc0 \n" /** * Private (static) function. * Computes the Castagnoli CRC32c (iSCSI) of the specified data buffer using the Intel CRC32Q (quad word) machine * instruction by operating on 24-byte stripes in parallel. The results are folded together using CLMUL. This function * is optimized for exactly 256 byte blocks that are best aligned on 8-byte memory addresses. It MUST be passed a * pointer to input data that is exactly 256 bytes in length. Note: this function does NOT invert bits of the input crc * or return value. */ static inline uint32_t s_crc32c_sse42_clmul_256(const uint8_t *input, uint32_t crc) { __asm__ __volatile__( "xor %%r11, %%r11 # zero all 64 bits in r11, will track crc1 \n" "xor %%r10, %%r10 # zero all 64 bits in r10, will track crc2 \n" "crc32q 0(%[in]), %%rcx # crc0 \n" "crc32q 88(%[in]), %%r11 # crc1 \n" "crc32q 176(%[in]), %%r10 # crc2 \n" "crc32q 8(%[in]), %%rcx # crc0 \n" "crc32q 96(%[in]), %%r11 # crc1 \n" "crc32q 184(%[in]), %%r10 # crc2 \n" "crc32q 16(%[in]), %%rcx # crc0 \n" "crc32q 104(%[in]), %%r11 # crc1 \n" "crc32q 192(%[in]), %%r10 # crc2 \n" "crc32q 24(%[in]), %%rcx # crc0 \n" "crc32q 112(%[in]), %%r11 # crc1 \n" "crc32q 200(%[in]), %%r10 # crc2 \n" "crc32q 32(%[in]), %%rcx # crc0 \n" "crc32q 120(%[in]), %%r11 # crc1 \n" "crc32q 208(%[in]), %%r10 # crc2 \n" "crc32q 40(%[in]), %%rcx # crc0 \n" "crc32q 128(%[in]), %%r11 # crc1 \n" "crc32q 216(%[in]), %%r10 # crc2 \n" "crc32q 48(%[in]), %%rcx # crc0 \n" "crc32q 136(%[in]), %%r11 # crc1 \n" "crc32q 224(%[in]), %%r10 # crc2 \n" "crc32q 56(%[in]), %%rcx # crc0 \n" "crc32q 144(%[in]), %%r11 # crc1 \n" "crc32q 232(%[in]), %%r10 # crc2 \n" "crc32q 64(%[in]), %%rcx # crc0 \n" "crc32q 152(%[in]), %%r11 # crc1 \n" "crc32q 240(%[in]), %%r10 # crc2 \n" "crc32q 72(%[in]), %%rcx # crc0 \n" "crc32q 160(%[in]), %%r11 # crc1 \n" "crc32q 248(%[in]), %%r10 # crc2 \n" "crc32q 80(%[in]), %%rcx # crc0 \n" "crc32q 168(%[in]), %%r11 # crc2 \n" FOLD_K1K2($0x1b3d8f29, $0x39d3b296) /* Magic Constants used to fold crc stripes into ecx */ /* output registers [crc] is an input and and output so it is marked read/write (i.e. "+c")*/ : [ crc ] "+c"(crc) /* input registers */ : [ in ] "d"(input) /* additional clobbered registers */ : "%r8", "%r9", "%r11", "%r10", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "cc"); return crc; } /** * Private (static) function. * Computes the Castagnoli CRC32c (iSCSI) of the specified data buffer using the Intel CRC32Q (quad word) machine * instruction by operating on 3 24-byte stripes in parallel. The results are folded together using CLMUL. This function * is optimized for exactly 1024 byte blocks that are best aligned on 8-byte memory addresses. It MUST be passed a * pointer to input data that is exactly 1024 bytes in length. Note: this function does NOT invert bits of the input crc * or return value. */ static inline uint32_t s_crc32c_sse42_clmul_1024(const uint8_t *input, uint32_t crc) { __asm__ __volatile__( "xor %%r11, %%r11 # zero all 64 bits in r11, will track crc1 \n" "xor %%r10, %%r10 # zero all 64 bits in r10, will track crc2 \n" "movl $5, %%r8d # Loop 5 times through 64 byte chunks in 3 parallel stripes \n" LABEL(loop_1024) ": \n" "prefetcht0 128(%[in]) # \n" "prefetcht0 472(%[in]) # \n" "prefetcht0 808(%[in]) # \n" "crc32q 0(%[in]), %%rcx # crc0: stripe0 \n" "crc32q 344(%[in]), %%r11 # crc1: stripe1 \n" "crc32q 680(%[in]), %%r10 # crc2: stripe2 \n" "crc32q 8(%[in]), %%rcx # crc0 \n" "crc32q 352(%[in]), %%r11 # crc1 \n" "crc32q 688(%[in]), %%r10 # crc2 \n" "crc32q 16(%[in]), %%rcx # crc0 \n" "crc32q 360(%[in]), %%r11 # crc1 \n" "crc32q 696(%[in]), %%r10 # crc2 \n" "crc32q 24(%[in]), %%rcx # crc0 \n" "crc32q 368(%[in]), %%r11 # crc1 \n" "crc32q 704(%[in]), %%r10 # crc2 \n" "crc32q 32(%[in]), %%rcx # crc0 \n" "crc32q 376(%[in]), %%r11 # crc1 \n" "crc32q 712(%[in]), %%r10 # crc2 \n" "crc32q 40(%[in]), %%rcx # crc0 \n" "crc32q 384(%[in]), %%r11 # crc1 \n" "crc32q 720(%[in]), %%r10 # crc2 \n" "crc32q 48(%[in]), %%rcx # crc0 \n" "crc32q 392(%[in]), %%r11 # crc1 \n" "crc32q 728(%[in]), %%r10 # crc2 \n" "crc32q 56(%[in]), %%rcx # crc0 \n" "crc32q 400(%[in]), %%r11 # crc1 \n" "crc32q 736(%[in]), %%r10 # crc2 \n" "add $64, %[in] # \n" "sub $1, %%r8d # \n" "jnz " LABEL(loop_1024) " # \n" "crc32q 0(%[in]), %%rcx # crc0 \n" "crc32q 344(%[in]), %%r11 # crc1 \n" "crc32q 680(%[in]), %%r10 # crc2 \n" "crc32q 8(%[in]), %%rcx # crc0 \n" "crc32q 352(%[in]), %%r11 # crc1 \n" "crc32q 688(%[in]), %%r10 # crc2 \n" "crc32q 16(%[in]), %%rcx # crc0 \n" "crc32q 696(%[in]), %%r10 # crc2 \n" FOLD_K1K2($0xe417f38a, $0x8f158014) /* Magic Constants used to fold crc stripes into ecx output registers [crc] is an input and and output so it is marked read/write (i.e. "+c") we clobber the register for [input] (via add instruction) so we must also tag it read/write (i.e. "+d") in the list of outputs to tell gcc about the clobber */ : [ crc ] "+c"(crc), [ in ] "+d"(input) : /* additional clobbered registers */ /* "cc" is the flags - we add and sub, so the flags are also clobbered */ : "%r8", "%r9", "%r11", "%r10", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "cc"); return crc; } /** * Private (static) function. * Computes the Castagnoli CRC32c (iSCSI) of the specified data buffer using the Intel CRC32Q (quad word) machine * instruction by operating on 24-byte stripes in parallel. The results are folded together using CLMUL. This function * is optimized for exactly 3072 byte blocks that are best aligned on 8-byte memory addresses. It MUST be passed a * pointer to input data that is exactly 3072 bytes in length. Note: this function does NOT invert bits of the input crc * or return value. */ static inline uint32_t s_crc32c_sse42_clmul_3072(const uint8_t *input, uint32_t crc) { __asm__ __volatile__( "xor %%r11, %%r11 # zero all 64 bits in r11, will track crc1 \n" "xor %%r10, %%r10 # zero all 64 bits in r10, will track crc2 \n" "movl $16, %%r8d # Loop 16 times through 64 byte chunks in 3 parallel stripes \n" LABEL(loop_3072) ": \n" "prefetcht0 128(%[in]) # \n" "prefetcht0 1152(%[in]) # \n" "prefetcht0 2176(%[in]) # \n" "crc32q 0(%[in]), %%rcx # crc0: stripe0 \n" "crc32q 1024(%[in]), %%r11 # crc1: stripe1 \n" "crc32q 2048(%[in]), %%r10 # crc2: stripe2 \n" "crc32q 8(%[in]), %%rcx # crc0: stripe0 \n" "crc32q 1032(%[in]), %%r11 # crc1: stripe1 \n" "crc32q 2056(%[in]), %%r10 # crc2: stripe2 \n" "crc32q 16(%[in]), %%rcx # crc0: stripe0 \n" "crc32q 1040(%[in]), %%r11 # crc1: stripe1 \n" "crc32q 2064(%[in]), %%r10 # crc2: stripe2 \n" "crc32q 24(%[in]), %%rcx # crc0: stripe0 \n" "crc32q 1048(%[in]), %%r11 # crc1: stripe1 \n" "crc32q 2072(%[in]), %%r10 # crc2: stripe2 \n" "crc32q 32(%[in]), %%rcx # crc0: stripe0 \n" "crc32q 1056(%[in]), %%r11 # crc1: stripe1 \n" "crc32q 2080(%[in]), %%r10 # crc2: stripe2 \n" "crc32q 40(%[in]), %%rcx # crc0: stripe0 \n" "crc32q 1064(%[in]), %%r11 # crc1: stripe1 \n" "crc32q 2088(%[in]), %%r10 # crc2: stripe2 \n" "crc32q 48(%[in]), %%rcx # crc0: stripe0 \n" "crc32q 1072(%[in]), %%r11 # crc1: stripe1 \n" "crc32q 2096(%[in]), %%r10 # crc2: stripe2 \n" "crc32q 56(%[in]), %%rcx # crc0: stripe0 \n" "crc32q 1080(%[in]), %%r11 # crc1: stripe1 \n" "crc32q 2104(%[in]), %%r10 # crc2: stripe2 \n" "add $64, %[in] # \n" "sub $1, %%r8d # \n" "jnz " LABEL(loop_3072) " # \n" FOLD_K1K2( $0xa51b6135, $0x170076fa) /* Magic Constants used to fold crc stripes into ecx output registers [crc] is an input and and output so it is marked read/write (i.e. "+c") we clobber the register for [input] (via add instruction) so we must also tag it read/write (i.e. "+d") in the list of outputs to tell gcc about the clobber*/ : [ crc ] "+c"(crc), [ in ] "+d"(input) : /* additional clobbered registers "cc" is the flags - we add and sub, so the flags are also clobbered */ : "%r8", "%r9", "%r11", "%r10", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "cc"); return crc; } static bool detection_performed = false; static bool detected_clmul = false; /* * Computes the Castagnoli CRC32c (iSCSI) of the specified data buffer using the Intel CRC32Q (64-bit quad word) and * PCLMULQDQ machine instructions (if present). * Handles data that isn't 8-byte aligned as well as any trailing data with the CRC32B (byte) instruction. * Pass 0 in the previousCrc32 parameter as an initial value unless continuing to update a running CRC in a subsequent * call. */ uint32_t aws_checksums_crc32c_hw(const uint8_t *input, int length, uint32_t previousCrc32) { if (AWS_UNLIKELY(!detection_performed)) { detected_clmul = aws_cpu_has_feature(AWS_CPU_FEATURE_CLMUL); /* Simply setting the flag true to skip HW detection next time Not using memory barriers since the worst that can happen is a fallback to the non HW accelerated code. */ detection_performed = true; } uint32_t crc = ~previousCrc32; /* For small input, forget about alignment checks - simply compute the CRC32c one byte at a time */ if (AWS_UNLIKELY(length < 8)) { while (length-- > 0) { __asm__("CRC32B (%[in]), %[crc]" : [ crc ] "+c"(crc) : [ in ] "r"(input)); input++; } return ~crc; } /* Get the 8-byte memory alignment of our input buffer by looking at the least significant 3 bits */ int input_alignment = (unsigned long int)input & 0x7; /* Compute the number of unaligned bytes before the first aligned 8-byte chunk (will be in the range 0-7) */ int leading = (8 - input_alignment) & 0x7; /* reduce the length by the leading unaligned bytes we are about to process */ length -= leading; /* spin through the leading unaligned input bytes (if any) one-by-one */ while (leading-- > 0) { __asm__("CRC32B (%[in]), %[crc]" : [ crc ] "+c"(crc) : [ in ] "r"(input)); input++; } /* Using likely to keep this code inlined */ if (AWS_LIKELY(detected_clmul)) { while (AWS_LIKELY(length >= 3072)) { /* Compute crc32c on each block, chaining each crc result */ crc = s_crc32c_sse42_clmul_3072(input, crc); input += 3072; length -= 3072; } while (AWS_LIKELY(length >= 1024)) { /* Compute crc32c on each block, chaining each crc result */ crc = s_crc32c_sse42_clmul_1024(input, crc); input += 1024; length -= 1024; } while (AWS_LIKELY(length >= 256)) { /* Compute crc32c on each block, chaining each crc result */ crc = s_crc32c_sse42_clmul_256(input, crc); input += 256; length -= 256; } } /* Spin through remaining (aligned) 8-byte chunks using the CRC32Q quad word instruction */ while (AWS_LIKELY(length >= 8)) { /* Hardcoding %rcx register (i.e. "+c") to allow use of qword instruction */ __asm__ __volatile__("CRC32Q (%[in]), %%rcx" : [ crc ] "+c"(crc) : [ in ] "r"(input)); input += 8; length -= 8; } /* Finish up with any trailing bytes using the CRC32B single byte instruction one-by-one */ while (length-- > 0) { __asm__ __volatile__("CRC32B (%[in]), %[crc]" : [ crc ] "+c"(crc) : [ in ] "r"(input)); input++; } return ~crc; } uint32_t aws_checksums_crc32_hw(const uint8_t *input, int length, uint32_t previousCrc32) { return aws_checksums_crc32_sw(input, length, previousCrc32); } # if defined(__clang__) # pragma clang diagnostic pop # endif #else uint32_t aws_checksums_crc32_hw(const uint8_t *input, int length, uint32_t previousCrc32) { return aws_checksums_crc32_sw(input, length, previousCrc32); } uint32_t aws_checksums_crc32c_hw(const uint8_t *input, int length, uint32_t previousCrc32) { return aws_checksums_crc32c_sw(input, length, previousCrc32); } #endif /* clang-format on */ aws-crt-python-0.20.4+dfsg/crt/aws-checksums/source/intel/visualc/000077500000000000000000000000001456575232400250565ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-checksums/source/intel/visualc/visualc_crc32c_sse42.c000066400000000000000000000052021456575232400310460ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #if defined(_M_X64) || defined(_M_IX86) # if defined(_M_X64) typedef uint64_t *slice_ptr_type; typedef uint64_t slice_ptr_int_type; # else typedef uint32_t *slice_ptr_type; typedef uint32_t slice_ptr_int_type; # endif /** * This implements crc32c via the intel sse 4.2 instructions. * This is separate from the straight asm version, because visual c does not allow * inline assembly for x64. */ uint32_t aws_checksums_crc32c_hw(const uint8_t *data, int length, uint32_t previousCrc32) { uint32_t crc = ~previousCrc32; int length_to_process = length; slice_ptr_type temp = (slice_ptr_type)data; /*to eek good performance out of the intel implementation, we need to only hit the hardware once we are aligned on the byte boundaries we are using. So, peel off a byte at a time until we are 8 byte aligned (64 bit arch) or 4 byte aligned (32 bit arch) first calculate how many bytes we need to burn before we are aligned. for a 64 bit arch this is: (8 - ) mod 8 32 bit: (4 - ) mod 4 */ uint8_t alignment_offset = (sizeof(slice_ptr_int_type) - ((slice_ptr_int_type)temp % sizeof(slice_ptr_int_type))) % sizeof(slice_ptr_int_type); /*for every byte we need to burn off, just do them a byte at a time. increment the temp pointer by one byte at a time until we get it on an alignment boundary */ while (alignment_offset != 0 && length_to_process) { uint8_t *byte_pos = (uint8_t *)temp; crc = (uint32_t)_mm_crc32_u8(crc, *byte_pos++); temp = (slice_ptr_type)byte_pos; --alignment_offset; --length_to_process; } /*now whatever is left is properly aligned on a boundary*/ uint32_t slices = length_to_process / sizeof(temp); uint32_t remainder = length_to_process % sizeof(temp); while (slices--) { # if defined(_M_X64) crc = (uint32_t)_mm_crc32_u64(crc, *temp++); # else crc = _mm_crc32_u32(crc, *temp++); # endif } /* process the remaining parts that can't be done on the slice size. */ uint8_t *remainderPos = (uint8_t *)temp; while (remainder--) { crc = (uint32_t)_mm_crc32_u8(crc, *remainderPos++); } return ~crc; } uint32_t aws_checksums_crc32_hw(const uint8_t *input, int length, uint32_t previousCrc32) { return aws_checksums_crc32_sw(input, length, previousCrc32); } #endif /* x64 || x86 */ aws-crt-python-0.20.4+dfsg/crt/aws-checksums/tests/000077500000000000000000000000001456575232400221375ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/aws-checksums/tests/CMakeLists.txt000066400000000000000000000004031456575232400246740ustar00rootroot00000000000000include(AwsLibFuzzer) include(AwsTestHarness) enable_testing() file(GLOB TEST_HDRS "*.h") file(GLOB TEST_SRC "*.c") file(GLOB TESTS ${TEST_HDRS} ${TEST_SRC}) add_test_case(test_crc32c) add_test_case(test_crc32) generate_test_driver(${PROJECT_NAME}-tests) aws-crt-python-0.20.4+dfsg/crt/aws-checksums/tests/crc_test.c000066400000000000000000000104401456575232400241100ustar00rootroot00000000000000/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include #include #include static const uint8_t DATA_32_ZEROS[32] = {0}; static const uint32_t KNOWN_CRC32_32_ZEROES = 0x190A55AD; static const uint32_t KNOWN_CRC32C_32_ZEROES = 0x8A9136AA; static const uint8_t DATA_32_VALUES[32] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}; static const uint32_t KNOWN_CRC32C_32_VALUES = 0x46DD794E; static const uint8_t TEST_VECTOR[] = {'1', '2', '3', '4', '5', '6', '7', '8', '9'}; static const uint32_t KNOWN_CRC32_TEST_VECTOR = 0xCBF43926; static const uint32_t KNOWN_CRC32C_TEST_VECTOR = 0xE3069283; static uint8_t *s_non_mem_aligned_vector; typedef uint32_t(crc_fn)(const uint8_t *input, int length, uint32_t previousCrc32); #define CRC_FUNC_NAME(crc_func) #crc_func, crc_func #define DATA_NAME(dataset) #dataset, dataset, sizeof(dataset) /* Makes sure that the specified crc function produces the expected results for known input and output*/ static int s_test_known_crc( const char *func_name, crc_fn *func, const char *data_name, const uint8_t *input, size_t length, uint32_t expected) { uint32_t result = func(input, (int)length, 0); ASSERT_HEX_EQUALS(expected, result, "%s(%s)", func_name, data_name); /* chain the crc computation so 2 calls each operate on about 1/2 of the buffer*/ uint32_t crc1 = func(input, (int)(length / 2), 0); result = func(input + (length / 2), (int)(length - length / 2), crc1); ASSERT_HEX_EQUALS(expected, result, "chaining %s(%s)", func_name, data_name); crc1 = 0; for (size_t i = 0; i < length; ++i) { crc1 = func(input + i, 1, crc1); } ASSERT_HEX_EQUALS(expected, crc1, "one byte at a time %s(%s)", func_name, data_name); return AWS_OP_SUCCESS; } /* helper function that groups crc32 tests*/ static int s_test_known_crc32(const char *func_name, crc_fn *func) { int res = 0; res |= s_test_known_crc(func_name, func, DATA_NAME(DATA_32_ZEROS), KNOWN_CRC32_32_ZEROES); res |= s_test_known_crc(func_name, func, DATA_NAME(TEST_VECTOR), KNOWN_CRC32_TEST_VECTOR); return res; } /* helper function that groups crc32c tests*/ static int s_test_known_crc32c(const char *func_name, crc_fn *func) { int res = 0; res |= s_test_known_crc(func_name, func, DATA_NAME(DATA_32_ZEROS), KNOWN_CRC32C_32_ZEROES); res |= s_test_known_crc(func_name, func, DATA_NAME(DATA_32_VALUES), KNOWN_CRC32C_32_VALUES); res |= s_test_known_crc(func_name, func, DATA_NAME(TEST_VECTOR), KNOWN_CRC32C_TEST_VECTOR); /*this tests three things, first it tests the case where we aren't 8-byte aligned*/ /*seconde, it tests that reads aren't performed before start of buffer*/ /*third, it tests that writes aren't performed after the end of the buffer.*/ /*if any of those things happen, then the checksum will be wrong and the assertion will fail */ s_non_mem_aligned_vector = malloc(sizeof(DATA_32_VALUES) + 6); memset(s_non_mem_aligned_vector, 1, sizeof(DATA_32_VALUES) + 6); memcpy(s_non_mem_aligned_vector + 3, DATA_32_VALUES, sizeof(DATA_32_VALUES)); res |= s_test_known_crc( func_name, func, "non_mem_aligned_vector", s_non_mem_aligned_vector + 3, sizeof(DATA_32_VALUES), KNOWN_CRC32C_32_VALUES); free(s_non_mem_aligned_vector); return res; } /** * Quick sanity check of some known CRC values for known input. * The reference functions are included in these tests to verify that they aren't obviously broken. */ static int s_test_crc32c(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; int res = 0; res |= s_test_known_crc32c(CRC_FUNC_NAME(aws_checksums_crc32c)); res |= s_test_known_crc32c(CRC_FUNC_NAME(aws_checksums_crc32c_sw)); return res; } AWS_TEST_CASE(test_crc32c, s_test_crc32c) static int s_test_crc32(struct aws_allocator *allocator, void *ctx) { (void)allocator; (void)ctx; int res = 0; res |= s_test_known_crc32(CRC_FUNC_NAME(aws_checksums_crc32)); return res; } AWS_TEST_CASE(test_crc32, s_test_crc32) aws-crt-python-0.20.4+dfsg/crt/s2n/000077500000000000000000000000001456575232400167225ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/.clang-format000066400000000000000000000100351456575232400212740ustar00rootroot00000000000000--- Language: Cpp BasedOnStyle: Google AccessModifierOffset: -1 AlignAfterOpenBracket: DontAlign AlignConsecutiveAssignments: false AlignConsecutiveDeclarations: false AlignConsecutiveMacros: true AlignEscapedNewlines: Left AlignOperands: false AlignTrailingComments: true AllowAllArgumentsOnNextLine: false AllowAllConstructorInitializersOnNextLine: false AllowAllParametersOfDeclarationOnNextLine: false AllowShortBlocksOnASingleLine: false AllowShortCaseLabelsOnASingleLine: false AllowShortFunctionsOnASingleLine: Empty AllowShortLambdasOnASingleLine: All AllowShortIfStatementsOnASingleLine: false AllowShortLoopsOnASingleLine: false AlwaysBreakAfterDefinitionReturnType: None AlwaysBreakAfterReturnType: None AlwaysBreakBeforeMultilineStrings: false AlwaysBreakTemplateDeclarations: Yes BinPackArguments: true BinPackParameters: true BraceWrapping: AfterCaseLabel: false AfterClass: false AfterControlStatement: false AfterEnum: false AfterFunction: true AfterNamespace: false AfterObjCDeclaration: false AfterStruct: false AfterUnion: false AfterExternBlock: false BeforeCatch: false BeforeElse: false IndentBraces: false SplitEmptyFunction: true SplitEmptyRecord: true SplitEmptyNamespace: true BreakBeforeBinaryOperators: NonAssignment BreakBeforeBraces: Custom #This unlocks BraceWrapping settings BreakBeforeInheritanceComma: false BreakInheritanceList: BeforeColon BreakBeforeTernaryOperators: false BreakConstructorInitializersBeforeComma: false BreakConstructorInitializers: BeforeColon BreakAfterJavaFieldAnnotations: false BreakStringLiterals: true ColumnLimit: 0 CommentPragmas: '^ clang-format pragma:' CompactNamespaces: false ConstructorInitializerAllOnOneLineOrOnePerLine: true ConstructorInitializerIndentWidth: 4 ContinuationIndentWidth: 8 Cpp11BracedListStyle: false DisableFormat: false ExperimentalAutoDetectBinPacking: false FixNamespaceComments: true ForEachMacros: - foreach - Q_FOREACH - BOOST_FOREACH TypenameMacros: ['S2N_RESULT', 'S2N_CLEANUP_RESULT', 'STACK_OF'] AttributeMacros: ['S2N_RESULT_MUST_USE', 'S2N_API', 'S2N_PRIVATE_API'] StatementAttributeLikeMacros: - FAIL_MSG IncludeBlocks: Regroup IncludeCategories: - Regex: '^' Priority: 2 - Regex: '^<.*\.h>' Priority: 1 - Regex: '^<.*' Priority: 2 - Regex: '.*' Priority: 3 IncludeIsMainRegex: '([-_](test|unittest))?$' IndentCaseLabels: true IndentPPDirectives: BeforeHash IndentWidth: 4 IndentWrappedFunctionNames: true JavaScriptQuotes: Leave JavaScriptWrapImports: true KeepEmptyLinesAtTheStartOfBlocks: false MacroBlockBegin: '' MacroBlockEnd: '' MaxEmptyLinesToKeep: 1 NamespaceIndentation: None ObjCBinPackProtocolList: Never ObjCBlockIndentWidth: 2 ObjCSpaceAfterProperty: false ObjCSpaceBeforeProtocolList: true PenaltyBreakAssignment: 2 PenaltyBreakBeforeFirstCallParameter: 1 PenaltyBreakComment: 300 PenaltyBreakFirstLessLess: 120 PenaltyBreakString: 1000 PenaltyBreakTemplateDeclaration: 10 PenaltyExcessCharacter: 1000000 PenaltyReturnTypeOnItsOwnLine: 200 PointerAlignment: Right RawStringFormats: - Language: Cpp Delimiters: - cc - CC - cpp - Cpp - CPP - 'c++' - 'C++' CanonicalDelimiter: '' BasedOnStyle: google ReflowComments: false SortIncludes: true SortUsingDeclarations: true SpaceAfterCStyleCast: true SpaceAfterLogicalNot: false SpaceAfterTemplateKeyword: true SpaceBeforeAssignmentOperators: true SpaceBeforeCpp11BracedList: false SpaceBeforeCtorInitializerColon: true SpaceBeforeInheritanceColon: true SpaceBeforeParens: ControlStatements SpaceBeforeRangeBasedForLoopColon: true SpaceInEmptyParentheses: false SpacesBeforeTrailingComments: 2 SpacesInAngles: false SpacesInContainerLiterals: true SpacesInCStyleCastParentheses: false SpacesInParentheses: false SpacesInSquareBrackets: false Standard: Auto StatementMacros: - Q_UNUSED - QT_REQUIRE_VERSION - test_stack_blob_success TabWidth: 8 UseTab: Never aws-crt-python-0.20.4+dfsg/crt/s2n/.git-blame-ignore-revs000066400000000000000000000037231456575232400230270ustar00rootroot00000000000000# .git-blame-ignore-revs # autopep8 python PR3268 b9dbef317197b2f450b3e963fb4744cc4dc5e087 # add clang-format of stuffer/ (PR #3618) 918f0791002616f81bf0a5347de83bb0815101ac # clang-format `tls/extensions` and enforce in ci (PR #3633) ee3df080077242b6b765d02b21da59e4194ee485 # clang-format `bin/` and enforce in ci (PR #3635) 49e8ab14672a4c1b479abc442bf1e54f815a6a47 # clang-format `error/` and enforce in ci (PR #3638) f47255051a6d440bd400ed35f3397085a049c000 # clang-format `api/` and enforce in ci (PR #3637) 15158a2c37aa04ce69c77fca67f8175ac0b3a799 # Autopep8 updated CI and code (#3736) 43d6f1bed249f77004d33c3b757fb9d8ab50fb5c # Clang-format `tests/unit/s2n_[l-r].*\.c` and enforce in CI (#3677) 035ec1c6c8a80f2a88f8421da9862c3848fff12a # Clang-format `tests/unit/s2n_s.*\.c` and enforce in CI (#3678) 6d8741cd0b6e3aa95c227d03633bdd04d44bbcc7 # Clang format `tls/s2n_[a-h].*\.[ch]` and enforce in CI (#3681) f2faa0e25b1d68cd36173ae44df58be3218b6ca1 # Clang-format `crypto/` and enforce in CI (#3680) f5351ef6629d1f6de144ab478bf4294ec277b321 # Clang-format `tls/s2n_[s-z].*\.[ch]` and enforce in CI (#3683) 0fe8ac5311c6a421c9e6257ae1cb5ac8cbc6845c # Clang-format `tests/unit/s2n_[t-z].*\.c` and enforce in CI (#3679) 0bb61469995db593e735915d770dc32e8985404a # Clang format `tests/unit/s2n_[bc].*\.c` and enforce in CI (#3675) 06e9e8c13d3c19a172b69eb57c278f68055ac8d4 # Clang-format `tests/unit/s2n_[d-k].*\.c` and enforce in CI (#3676) 2f9c7a48fd8faf44d5a6f375d6d3fbf9aada8e03 # Clang-format `tls/s2n_[i-r].*\.[ch]` and enforce in CI (#3682) 0e0d6079454361251e18d80af1af345881423b7a # Clang-format of `tests/unit/s2n_[3a].*\.c` + transision to exclude regex (#3664) 4dd87578a946a081fcf21dbb5912cb506ae83460 # Update to clang-format causes reformat of api folder (#3663) 71e0343b6e082ebf47d8f8f272bee5486140b374 # clang-format `tests/testslib` and add to ci (#3650) e8cdc1ae63ff1de6f14cf91e3c317fbf57c198ec # clang-format `utils/` and enforce in ci (#3651) be8ad6c027b50e9dc86d8f8eb729ce88f2d4206d aws-crt-python-0.20.4+dfsg/crt/s2n/.github/000077500000000000000000000000001456575232400202625ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/.github/CODEOWNERS000066400000000000000000000000231456575232400216500ustar00rootroot00000000000000/codebuild @dougch aws-crt-python-0.20.4+dfsg/crt/s2n/.github/ISSUE_TEMPLATE/000077500000000000000000000000001456575232400224455ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/.github/ISSUE_TEMPLATE/custom.md000066400000000000000000000036011456575232400243010ustar00rootroot00000000000000 --- name: S2N Issue about: Template title: '' labels: '' assignees: '' --- ### Security issue notifications If you discover a potential security issue in s2n we ask that you notify AWS Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. ### Problem: A short description of what the problem is and why we need to fix it. Add reproduction steps if necessary. ### Solution: A description of the possible solution in terms of S2N architecture. Highlight and explain any potentially controversial design decisions taken. * **Does this change what S2N sends over the wire?** If yes, explain. * **Does this change any public APIs?** If yes, explain. * **Which versions of TLS will this impact?** ### Requirements / Acceptance Criteria: What must a solution address in order to solve the problem? How do we know the solution is complete? * **RFC links:** Links to relevant RFC(s) * **Related Issues:** Link any relevant issues * **Will the Usage Guide or other documentation need to be updated?** * **Testing:** How will this change be tested? Call out new integration tests, functional tests, or particularly interesting/important unit tests. * **Will this change trigger SAW changes?** Changes to the state machine, the s2n_handshake_io code that controls state transitions, the DRBG, or the corking/uncorking logic could trigger SAW failures. * **Should this change be fuzz tested?** Will it handle untrusted input? Create a separate issue to track the fuzzing work. ### Out of scope: Is there anything the solution will intentionally NOT address? [//]: # (NOTE: If you believe this might be a security issue, please email aws-security@amazon.com instead of creating a GitHub issue. For more details, see the AWS Vulnerability Reporting Guide: https://aws.amazon.com/security/vulnerability-reporting/ ) aws-crt-python-0.20.4+dfsg/crt/s2n/.github/ISSUE_TEMPLATE/s2n-issue.md000066400000000000000000000035771456575232400246330ustar00rootroot00000000000000--- name: S2N Issue about: Template title: '' labels: '' assignees: '' --- ### Security issue notifications If you discover a potential security issue in s2n we ask that you notify AWS Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. ### Problem: A short description of what the problem is and why we need to fix it. Add reproduction steps if necessary. ### Solution: A description of the possible solution in terms of S2N architecture. Highlight and explain any potentially controversial design decisions taken. * **Does this change what S2N sends over the wire?** If yes, explain. * **Does this change any public APIs?** If yes, explain. * **Which versions of TLS will this impact?** ### Requirements / Acceptance Criteria: What must a solution address in order to solve the problem? How do we know the solution is complete? * **RFC links:** Links to relevant RFC(s) * **Related Issues:** Link any relevant issues * **Will the Usage Guide or other documentation need to be updated?** * **Testing:** How will this change be tested? Call out new integration tests, functional tests, or particularly interesting/important unit tests. * **Will this change trigger SAW changes?** Changes to the state machine, the s2n_handshake_io code that controls state transitions, the DRBG, or the corking/uncorking logic could trigger SAW failures. * **Should this change be fuzz tested?** Will it handle untrusted input? Create a separate issue to track the fuzzing work. ### Out of scope: Is there anything the solution will intentionally NOT address? [//]: # (NOTE: If you believe this might be a security issue, please email aws-security@amazon.com instead of creating a GitHub issue. For more details, see the AWS Vulnerability Reporting Guide: https://aws.amazon.com/security/vulnerability-reporting/ ) aws-crt-python-0.20.4+dfsg/crt/s2n/.github/PULL_REQUEST_TEMPLATE.md000066400000000000000000000014311456575232400240620ustar00rootroot00000000000000### Resolved issues: Resolves #ISSUE-NUMBER1, resolves #ISSUE-NUMBER2, etc. ### Description of changes: Describe s2n’s current behavior and how your code changes that behavior. If there are no issues this PR is resolving, explain why this change is necessary. ### Call-outs: Address any potentially confusing code. Is there code added that needs to be cleaned up later? Is there code that is missing because it’s still in development? ### Testing: How is this change tested (unit tests, fuzz tests, etc.)? Are there any testing steps to be verified by the reviewer? Is this a refactor change? If so, how have you proved that the intended behavior hasn't changed? By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license. aws-crt-python-0.20.4+dfsg/crt/s2n/.github/bin/000077500000000000000000000000001456575232400210325ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/.github/bin/label_issues.sh000077500000000000000000000020311456575232400240370ustar00rootroot00000000000000#!/bin/bash set -eu update_label() { # $1 is a username USER=$1 # $2 is a cutoff date; use $(date +%Y-%m-%d) for now DATE=$2 gh api -X GET search/issues -f q="repo:aws/s2n-tls is:open -label:s2n-core author:${USER} created:<${DATE}" > ${USER}.json jq -c -r '.items|.[]|.number' $USER.json > ${USER}_ISSUES.log if [ $(cat ${USER}_ISSUES.log|wc -l) -eq 0 ]; then echo "No issues for ${USER}" return fi echo "Found $(cat ${USER}_ISSUES.log|wc -l) issues for $USER, continue?" read for issue in $(cat ${USER}_ISSUES.log); do echo "Updating $issue" echo '["s2n-core"]'| gh api --silent -X POST repos/aws/s2n-tls/issues/$issue/labels --input - done echo "Done with $USER" } internal="toidiu zaherd NLMalloy ttjsu-aws rday agray256 tawdry-audrey salusasecondus dougch lrstewart goatgoose camshaft maddeleine WesleyRosenblum" maint="colmmacc alexw91 baldwinmatt soco alexeblee" for USER in ${internal}; do update_label ${USER} $(date +%Y-%m-%d) done for USER in ${maint}; do update_label ${USER} 2019-10-23 done aws-crt-python-0.20.4+dfsg/crt/s2n/.github/codeql-config.yml000066400000000000000000000000771456575232400235230ustar00rootroot00000000000000name: "S2N CodeQL Config" paths-ignore: - tests/integration aws-crt-python-0.20.4+dfsg/crt/s2n/.github/config/000077500000000000000000000000001456575232400215275ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/.github/config/_typos.toml000066400000000000000000000011701456575232400237400ustar00rootroot00000000000000[default] binary = false check-filename = true [default.extend-words] alloced = "alloced" s2nd = "s2nd" nd = "nd" Inforce = "Inforce" # While we build up the extend-words list [type.cpp] check-file = false [type.c] check-file = false [type.py] check-file = false [type.make] check-file = false [type.cmake] check-file = false [type.rust] check-file = false [type.sh] check-file = false [files] extend-exclude = [ "*.bin", "**/corpus/*", "*.cry", "*.der", "*.h", "*.kat", "*.pem", "*.patch", "*.pdf", "*.png", "*.saw", "*.snap", "*.suppressions", "**/specs/**/*", ] aws-crt-python-0.20.4+dfsg/crt/s2n/.github/dashboard.css000066400000000000000000000177021456575232400227320ustar00rootroot00000000000000body { font-family: -apple-system, BlinkMacSystemFont, Segoe UI, Helvetica, Arial, sans-serif, Apple Color Emoji, Segoe UI Emoji; width: 1000px; margin: 0 auto 0 auto; background-color: #eeeeee; } a { text-decoration: none; color: #0366d6; } :root { --color-red-background: #ee0000; --color-red-foreground: #ffffff; --color-red-border: #cc0000; --color-yellow-background: #eeee00; --color-yellow-foreground: #000000; --color-yellow-border: #cccc00; --color-green-background: #00bb00; --color-green-foreground: #ffffff; --color-green-border: #999900; --color-blue-background: #0000ee; --color-blue-foreground: #ffffff; --color-blue-border: #0000cc; --color-black-background: #000000; --color-black-foreground: #ffffff; --color-black-border: #333333; } /* Header / footer */ div#analytics h1 { width: 900px; margin: 50px 50px 30px 50px; } div#main_description { width: 900px; margin: -20px 50px 30px 50px; } div#footer { margin: 15px 50px 30px 50px; font-size: 13px; } /* Sections */ div.sections { background-color: #ffffff; width: 900px; min-height: 250px; padding: 15px 50px 50px 50px; border: solid 1px #dddddd; } div.section { margin-top: 30px; width: 900px; position: relative; display: inline-block; } div.section:after { content: ''; clear: both; display: inline-block; } div.section_metadata { width: 275px; margin-right: 25px; float: left; } div.section_metadata h2 { padding: 0; margin: 0; } div.section_metadata .description { margin-top: 15px; } div.section_widgets { float: left; width: 600px; } div.section_widgets > *:first-child { margin-top: 0; } div.section_widgets > * { margin-top: 25px; } /* Number widgets */ div.number_widgets { position: relative; display: inline-block; } div.number_widgets:after { content: ''; clear: both; display: inline-block; } div.number_widget { width: 143px; height: 143px; margin: 0 5px 5px 0; float: left; position: relative; background-color: #ffffff; border: solid 1px #dddddd; color: #000000; } div.number_widget.red { background-color: var(--color-red-background); border: solid 1px var(--color-red-border); color: var(--color-red-foreground); } div.number_widget.yellow { background-color: var(--color-yellow-background); border: solid 1px var(--color-yellow-border); color: var(--color-yellow-foreground); } div.number_widget.green { background-color: var(--color-green-background); border: solid 1px var(--color-green-border); color: var(--color-green-foreground); } div.number_widget.blue { background-color: var(--color-blue-background); border: solid 1px var(--color-blue-border); color: var(--color-blue-foreground); } div.number_widget.black { background-color: var(--color-black-background); border: solid 1px var(--color-black-border); color: var(--color-black-foreground); } div.number_widget .title { display: block; position: absolute; top: 8px; left: 0; margin: 0 10px 0 10px; font-size: 14px; } div.number_widget .value { display: block; position: absolute; bottom: 4px; left: 0; margin: 0 10px 0 10px; font-size: 80px; vertical-align: bottom; line-height: 1; } /* String widgets */ div.string_widget { width: 558px; padding: 15px 20px; background-color: #ffffff; border: solid 1px #dddddd; color: #000000; } div.string_widget.red { background-color: var(--color-red-background); border: solid 1px var(--color-red-border); color: var(--color-red-foreground); } div.string_widget.yellow { background-color: var(--color-yellow-background); border: solid 1px var(--color-yellow-border); color: var(--color-yellow-foreground); } div.string_widget.green { background-color: var(--color-green-background); border: solid 1px var(--color-green-border); color: var(--color-green-foreground); } div.string_widget.blue { background-color: var(--color-blue-background); border: solid 1px var(--color-blue-border); color: var(--color-blue-foreground); } div.string_widget.black { background-color: var(--color-black-background); border: solid 1px var(--color-black-border); color: var(--color-black-foreground); } div.string_widget h3 { margin: 0 0 15px 0; } /* Graph widgets */ div.graph_widget h3.graph_title { margin: 0 0 20px 0; } div.graph_widget h3.graph_title a { color: #222222; } div.graph { display: table; } div.graph_item { display: table-row; } span.graph_item_title { display: table-cell; vertical-align: right; padding: 5px 10px 5px 0; white-space: nowrap; } span.graph_item_value { display: table-cell; width: 100%; padding-right: 7px; border-left: solid 1px #dddddd; } span.graph_item_value span.value { display: block; height: 15px; text-align: right; padding-right: 5px; font-size: 11px; background-color: #000000; color: #ffffff; } span.graph_item_value span.empty_value { padding-right: 0; } div.graph_item.red span.graph_item_value span.value { background-color: var(--color-red-background); border: solid 1px var(--color-red-border); color: var(--color-red-foreground); } div.graph_item.yellow span.graph_item_value span.value { background-color: var(--color-yellow-background); border: solid 1px var(--color-yellow-border); color: var(--color-yellow-foreground); } div.graph_item.green span.graph_item_value span.value { background-color: var(--color-green-background); border: solid 1px var(--color-green-border); color: var(--color-green-foreground); } div.graph_item.blue span.graph_item_value span.value { background-color: var(--color-blue-background); border: solid 1px var(--color-blue-border); color: var(--color-blue-foreground); } div.graph_item.black span.graph_item_value span.value { background-color: var(--color-black-background); border: solid 1px var(--color-black-border); color: var(--color-black-foreground); } div.graph_item:last-of-type span.graph_item_value { border-bottom: solid 1px #dddddd; } /* Table widgets */ div.table_widget h3.table_title { margin: 0 0 20px 0; } div.table_widget h3.table_title a { color: #222222; } div.table_widget table { border-collapse: collapse; width: 600px; } div.table_widget table th, div.table_widget table td { border: solid 1px #888888; padding: 4px 8px; text-align: left; vertical-align: top; } div.table_widget table th { background-color: #cccccc; } div.table_widget table tr:nth-child(odd) td { background-color: #e8e8e8; } div.table_widget table th a, div.table_widget table td a { display: block; } div.table_widget table tr th.red, div.table_widget table tr td.red, div.table_widget table tr th.red a, div.table_widget table tr td.red a { background-color: var(--color-red-background); color: var(--color-red-foreground); } div.table_widget table tr th.yellow, div.table_widget table tr td.yellow, div.table_widget table tr th.yellow a, div.table_widget table tr td.yellow a { background-color: var(--color-yellow-background); color: var(--color-yellow-foreground); } div.table_widget table tr th.green, div.table_widget table tr td.green, div.table_widget table tr th.green a, div.table_widget table tr td.green a { background-color: var(--color-green-background); color: var(--color-green-foreground); } div.table_widget table tr th.blue, div.table_widget table tr td.blue, div.table_widget table tr th.blue a, div.table_widget table tr td.blue a { background-color: var(--color-blue-background); color: var(--color-blue-foreground); } div.table_widget table tr th.black, div.table_widget table tr td.black, div.table_widget table tr th.black a, div.table_widget table tr td.black a { background-color: var(--color-black-background); color: var(--color-black-foreground); } aws-crt-python-0.20.4+dfsg/crt/s2n/.github/gha_monitor/000077500000000000000000000000001456575232400225705ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/.github/gha_monitor/gha_monitor/000077500000000000000000000000001456575232400250765ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/.github/gha_monitor/gha_monitor/__init__.py000066400000000000000000000000001456575232400271750ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/.github/gha_monitor/gha_monitor/__main__.py000077500000000000000000000076261456575232400272060ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import logging import os from . import github from . import sns from datetime import datetime, timedelta from dateutil import parser, tz logger = logging.getLogger() logger.setLevel(logging.INFO) # What time range to consider alerting on failures. TIME_WINDOW_BEGIN = datetime.now().astimezone(tz.UTC) - timedelta(hours=float(os.getenv('MONITOR_FREQ_IN_HOURS'))) TIME_WINDOW_END = datetime.now().astimezone(tz.UTC) class GitHubActions(github.GitHubClient): params = { # Needed when using an API key 'github_username': os.getenv('github_username', None), 'github_password': os.getenv('github_password', None), # Use from within an Action - ignored if username set 'token': os.getenv('GITHUB_TOKEN', None), 'repo_organization': os.getenv('GITHUB_REPO_ORG'), 'repo': os.getenv('GITHUB_REPO') } class S2nNotices(sns.SNSClient): params = { 'topic_arn': 'arn:aws:sns:us-west-2:024603541914:s2n_notices' } def message_text(): """ Formatting for text message. """ return """ s2n GitHub Action monitor notice State: {conclusion} Repo: {repo} GHA failure time: {time} Workflow name: {workflow_name} URL: {url} started by: {commit_owner}\n """ def main(): """ Main entrypoint. """ logging.info('Starting up') plaintext_notice = [] gh_api = GitHubActions() s2n_text_client = S2nNotices() # Get the Action workflow log from the Github API gh_api.get_workflow_log_chunk(final_state='failure') logging.info(f"Looking for failures newer than {TIME_WINDOW_BEGIN} in {gh_api.params['repo_organization']}" f"/{gh_api.params['repo']}") if gh_api.worklog: for enhanced_worklog in gh_api.worklog: # Parse the event date/time so we can compare it datetime_creation = parser.parse(enhanced_worklog['created_at']) logging.debug(f"looking at event from {enhanced_worklog['created_at']}") # If the event is recent enough, process it. if datetime_creation > TIME_WINDOW_BEGIN: logging.debug(f"Workflow_url: {enhanced_worklog['workflow_url']}") # The name of the workflow isn't in the failure object, look it up. enhanced_worklog['workflow_name'] = gh_api.get_workflow_name(enhanced_worklog['workflow_url'].split('/')[-1:][0]) enhanced_worklog['repo'] = gh_api.params['repo'] # Construct a notification string. notice_msg = message_text().format( conclusion=enhanced_worklog['conclusion'], time=enhanced_worklog['created_at'], url=enhanced_worklog['html_url'], commit_owner=enhanced_worklog['head_commit']['author']['email'], repo=enhanced_worklog['repo'], workflow_name=enhanced_worklog['workflow_name']) logging.debug(notice_msg) plaintext_notice.append(notice_msg) else: logging.debug("event outside time range.") else: logging.info("GH API returned empty worklog") # Relay messages to SNS if plaintext_notice: # Combine multiple message together. logging.info(s2n_text_client.publish("\n".join(plaintext_notice))) logging.info(f"Notices published") logging.info("Done") if __name__ == '__main__': main() aws-crt-python-0.20.4+dfsg/crt/s2n/.github/gha_monitor/gha_monitor/github.py000066400000000000000000000034561456575232400267420ustar00rootroot00000000000000import logging from agithub import GitHub logger = logging.getLogger() class GitHubClient: # Over-ride params = { 'github_username': None, 'secret': None, 'repo_organization': None, 'repo': None } def __init__(self): self._github = GitHub.GitHub(username=self.params['github_username'], password=self.params['github_password'], token=self.params['token']) self.response = {} self.worklog = None self.repo_org = self.params['repo_organization'] self.repo = self.params['repo'] def get_workflow_log_chunk(self, chunk=1, final_state='failure'): """ Example using agithub: username= get_user secret = get_secret client = GitHub.GitHub(username, secret) client.repos.awslabs['private-s2n-fuzz'].actions.runs.get(page="1", status="failure") :param final_state: str :param chunk: int """ (status_code, self.response) = \ self._github.repos[self.repo_org][self.repo].actions.runs.get(page=chunk, status=final_state) return status_code def get_workflow_name(self, workflow_id): logging.debug(f"Looking up workflow_id {workflow_id}") (status_code, response) = \ self._github.repos[self.repo_org][self.repo].actions.workflows[workflow_id].get() workflow_name = response['name'] logging.debug(f"Github workflow lookup gave us {workflow_name}") return workflow_name class GitHubWorklog: def __init__(self, worklog): self._worklog = iter(worklog['workflow_runs']) def __iter__(self): return self def __next__(self): return next(self._worklog) aws-crt-python-0.20.4+dfsg/crt/s2n/.github/gha_monitor/gha_monitor/sns.py000066400000000000000000000015561456575232400262620ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import boto3 class SNSClient: params = {'topic_arn': None} def __init__(self): self.client = boto3.client('sns') def publish(self, message: None): """ Make the boto call """ response = self.client.publish(TopicArn=self.params['topic_arn'], Message=message) return response aws-crt-python-0.20.4+dfsg/crt/s2n/.github/gha_monitor/requirements.txt000066400000000000000000000000361456575232400260530ustar00rootroot00000000000000agithub==2.2.2 boto3==1.12.29 aws-crt-python-0.20.4+dfsg/crt/s2n/.github/gha_monitor/tests/000077500000000000000000000000001456575232400237325ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/.github/gha_monitor/tests/failed_job.json000066400000000000000000000234121456575232400267050ustar00rootroot00000000000000{ "id": 78040669, "node_id": "MDExOldvcmtmbG93UnVuNzgwNDA2Njk=", "head_branch": "master", "head_sha": "775bca6c0ff0d3f500fa1db0302acd62c9993c85", "run_number": 105, "event": "schedule", "status": "completed", "conclusion": "failure", "url": "https://api.github.com/repos/dougch/s2n/actions/runs/78040669", "html_url": "https://github.com/dougch/s2n/actions/runs/78040669", "pull_requests": [], "created_at": "2020-04-14T14:04:07Z", "updated_at": "2020-04-14T14:04:56Z", "jobs_url": "https://api.github.com/repos/dougch/s2n/actions/runs/78040669/jobs", "logs_url": "https://api.github.com/repos/dougch/s2n/actions/runs/78040669/logs", "check_suite_url": "https://api.github.com/repos/dougch/s2n/check-suites/596810854", "artifacts_url": "https://api.github.com/repos/dougch/s2n/actions/runs/78040669/artifacts", "cancel_url": "https://api.github.com/repos/dougch/s2n/actions/runs/78040669/cancel", "rerun_url": "https://api.github.com/repos/dougch/s2n/actions/runs/78040669/rerun", "workflow_url": "https://api.github.com/repos/dougch/s2n/actions/workflows/996053", "head_commit": { "id": "775bca6c0ff0d3f500fa1db0302acd62c9993c85", "tree_id": "c7c4248049c1c76ba7643f20d7a0147c66c5596d", "message": "Merge branch 'gha_monitoring' FOR TESTING", "timestamp": "2020-04-10T22:09:13Z", "author": { "name": "Doug Chapman", "email": "dougch@amazon.com" }, "committer": { "name": "Doug Chapman", "email": "dougch@amazon.com" } }, "repository": { "id": 217368288, "node_id": "MDEwOlJlcG9zaXRvcnkyMTczNjgyODg=", "name": "s2n", "full_name": "dougch/s2n", "private": false, "owner": { "login": "dougch", "id": 54039637, "node_id": "MDQ6VXNlcjU0MDM5NjM3", "avatar_url": "https://avatars0.githubusercontent.com/u/54039637?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dougch", "html_url": "https://github.com/dougch", "followers_url": "https://api.github.com/users/dougch/followers", "following_url": "https://api.github.com/users/dougch/following{/other_user}", "gists_url": "https://api.github.com/users/dougch/gists{/gist_id}", "starred_url": "https://api.github.com/users/dougch/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dougch/subscriptions", "organizations_url": "https://api.github.com/users/dougch/orgs", "repos_url": "https://api.github.com/users/dougch/repos", "events_url": "https://api.github.com/users/dougch/events{/privacy}", "received_events_url": "https://api.github.com/users/dougch/received_events", "type": "User", "site_admin": false }, "html_url": "https://github.com/dougch/s2n", "description": "s2n : an implementation of the TLS/SSL protocols", "fork": true, "url": "https://api.github.com/repos/dougch/s2n", "forks_url": "https://api.github.com/repos/dougch/s2n/forks", "keys_url": "https://api.github.com/repos/dougch/s2n/keys{/key_id}", "collaborators_url": "https://api.github.com/repos/dougch/s2n/collaborators{/collaborator}", "teams_url": "https://api.github.com/repos/dougch/s2n/teams", "hooks_url": "https://api.github.com/repos/dougch/s2n/hooks", "issue_events_url": "https://api.github.com/repos/dougch/s2n/issues/events{/number}", "events_url": "https://api.github.com/repos/dougch/s2n/events", "assignees_url": "https://api.github.com/repos/dougch/s2n/assignees{/user}", "branches_url": "https://api.github.com/repos/dougch/s2n/branches{/branch}", "tags_url": "https://api.github.com/repos/dougch/s2n/tags", "blobs_url": "https://api.github.com/repos/dougch/s2n/git/blobs{/sha}", "git_tags_url": "https://api.github.com/repos/dougch/s2n/git/tags{/sha}", "git_refs_url": "https://api.github.com/repos/dougch/s2n/git/refs{/sha}", "trees_url": "https://api.github.com/repos/dougch/s2n/git/trees{/sha}", "statuses_url": "https://api.github.com/repos/dougch/s2n/statuses/{sha}", "languages_url": "https://api.github.com/repos/dougch/s2n/languages", "stargazers_url": "https://api.github.com/repos/dougch/s2n/stargazers", "contributors_url": "https://api.github.com/repos/dougch/s2n/contributors", "subscribers_url": "https://api.github.com/repos/dougch/s2n/subscribers", "subscription_url": "https://api.github.com/repos/dougch/s2n/subscription", "commits_url": "https://api.github.com/repos/dougch/s2n/commits{/sha}", "git_commits_url": "https://api.github.com/repos/dougch/s2n/git/commits{/sha}", "comments_url": "https://api.github.com/repos/dougch/s2n/comments{/number}", "issue_comment_url": "https://api.github.com/repos/dougch/s2n/issues/comments{/number}", "contents_url": "https://api.github.com/repos/dougch/s2n/contents/{+path}", "compare_url": "https://api.github.com/repos/dougch/s2n/compare/{base}...{head}", "merges_url": "https://api.github.com/repos/dougch/s2n/merges", "archive_url": "https://api.github.com/repos/dougch/s2n/{archive_format}{/ref}", "downloads_url": "https://api.github.com/repos/dougch/s2n/downloads", "issues_url": "https://api.github.com/repos/dougch/s2n/issues{/number}", "pulls_url": "https://api.github.com/repos/dougch/s2n/pulls{/number}", "milestones_url": "https://api.github.com/repos/dougch/s2n/milestones{/number}", "notifications_url": "https://api.github.com/repos/dougch/s2n/notifications{?since,all,participating}", "labels_url": "https://api.github.com/repos/dougch/s2n/labels{/name}", "releases_url": "https://api.github.com/repos/dougch/s2n/releases{/id}", "deployments_url": "https://api.github.com/repos/dougch/s2n/deployments" }, "head_repository": { "id": 217368288, "node_id": "MDEwOlJlcG9zaXRvcnkyMTczNjgyODg=", "name": "s2n", "full_name": "dougch/s2n", "private": false, "owner": { "login": "dougch", "id": 54039637, "node_id": "MDQ6VXNlcjU0MDM5NjM3", "avatar_url": "https://avatars0.githubusercontent.com/u/54039637?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dougch", "html_url": "https://github.com/dougch", "followers_url": "https://api.github.com/users/dougch/followers", "following_url": "https://api.github.com/users/dougch/following{/other_user}", "gists_url": "https://api.github.com/users/dougch/gists{/gist_id}", "starred_url": "https://api.github.com/users/dougch/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dougch/subscriptions", "organizations_url": "https://api.github.com/users/dougch/orgs", "repos_url": "https://api.github.com/users/dougch/repos", "events_url": "https://api.github.com/users/dougch/events{/privacy}", "received_events_url": "https://api.github.com/users/dougch/received_events", "type": "User", "site_admin": false }, "html_url": "https://github.com/dougch/s2n", "description": "s2n : an implementation of the TLS/SSL protocols", "fork": true, "url": "https://api.github.com/repos/dougch/s2n", "forks_url": "https://api.github.com/repos/dougch/s2n/forks", "keys_url": "https://api.github.com/repos/dougch/s2n/keys{/key_id}", "collaborators_url": "https://api.github.com/repos/dougch/s2n/collaborators{/collaborator}", "teams_url": "https://api.github.com/repos/dougch/s2n/teams", "hooks_url": "https://api.github.com/repos/dougch/s2n/hooks", "issue_events_url": "https://api.github.com/repos/dougch/s2n/issues/events{/number}", "events_url": "https://api.github.com/repos/dougch/s2n/events", "assignees_url": "https://api.github.com/repos/dougch/s2n/assignees{/user}", "branches_url": "https://api.github.com/repos/dougch/s2n/branches{/branch}", "tags_url": "https://api.github.com/repos/dougch/s2n/tags", "blobs_url": "https://api.github.com/repos/dougch/s2n/git/blobs{/sha}", "git_tags_url": "https://api.github.com/repos/dougch/s2n/git/tags{/sha}", "git_refs_url": "https://api.github.com/repos/dougch/s2n/git/refs{/sha}", "trees_url": "https://api.github.com/repos/dougch/s2n/git/trees{/sha}", "statuses_url": "https://api.github.com/repos/dougch/s2n/statuses/{sha}", "languages_url": "https://api.github.com/repos/dougch/s2n/languages", "stargazers_url": "https://api.github.com/repos/dougch/s2n/stargazers", "contributors_url": "https://api.github.com/repos/dougch/s2n/contributors", "subscribers_url": "https://api.github.com/repos/dougch/s2n/subscribers", "subscription_url": "https://api.github.com/repos/dougch/s2n/subscription", "commits_url": "https://api.github.com/repos/dougch/s2n/commits{/sha}", "git_commits_url": "https://api.github.com/repos/dougch/s2n/git/commits{/sha}", "comments_url": "https://api.github.com/repos/dougch/s2n/comments{/number}", "issue_comment_url": "https://api.github.com/repos/dougch/s2n/issues/comments{/number}", "contents_url": "https://api.github.com/repos/dougch/s2n/contents/{+path}", "compare_url": "https://api.github.com/repos/dougch/s2n/compare/{base}...{head}", "merges_url": "https://api.github.com/repos/dougch/s2n/merges", "archive_url": "https://api.github.com/repos/dougch/s2n/{archive_format}{/ref}", "downloads_url": "https://api.github.com/repos/dougch/s2n/downloads", "issues_url": "https://api.github.com/repos/dougch/s2n/issues{/number}", "pulls_url": "https://api.github.com/repos/dougch/s2n/pulls{/number}", "milestones_url": "https://api.github.com/repos/dougch/s2n/milestones{/number}", "notifications_url": "https://api.github.com/repos/dougch/s2n/notifications{?since,all,participating}", "labels_url": "https://api.github.com/repos/dougch/s2n/labels{/name}", "releases_url": "https://api.github.com/repos/dougch/s2n/releases{/id}", "deployments_url": "https://api.github.com/repos/dougch/s2n/deployments" } }aws-crt-python-0.20.4+dfsg/crt/s2n/.github/gha_monitor/tests/force_push_failure_response.json000066400000000000000000000300531456575232400324100ustar00rootroot00000000000000{ "total_count": 1, "workflow_runs": [ { "id": 64612203, "node_id": "MDExOldvcmtmbG93UnVuNjQ2MTIyMDM=", "head_branch": "master", "head_sha": "ce419d37885aa63c7537ba3f95a1033ce2714d69", "run_number": 7, "event": "schedule", "status": "completed", "conclusion": "failure", "url": "https://api.github.com/repos/awslabs/s2n/actions/runs/64612203", "html_url": "https://github.com/awslabs/s2n/actions/runs/64612203", "pull_requests": [], "created_at": "2020-03-27T13:05:25Z", "updated_at": "2020-03-27T13:09:16Z", "jobs_url": "https://api.github.com/repos/awslabs/s2n/actions/runs/64612203/jobs", "logs_url": "https://api.github.com/repos/awslabs/s2n/actions/runs/64612203/logs", "check_suite_url": "https://api.github.com/repos/awslabs/s2n/check-suites/552251638", "artifacts_url": "https://api.github.com/repos/awslabs/s2n/actions/runs/64612203/artifacts", "cancel_url": "https://api.github.com/repos/awslabs/s2n/actions/runs/64612203/cancel", "rerun_url": "https://api.github.com/repos/awslabs/s2n/actions/runs/64612203/rerun", "workflow_url": "https://api.github.com/repos/awslabs/s2n/actions/workflows/822508", "head_commit": { "id": "ce419d37885aa63c7537ba3f95a1033ce2714d69", "tree_id": "45410e26656295a78fc6aabdab15a945397734a8", "message": "Add tls 1.3 server certificate request (#1689)\n\nAdd server signature algorithms extension\r\nUpdate tls 1.3 server cert", "timestamp": "2020-03-26T01:40:10Z", "author": { "name": "Nicola Malloy", "email": "nmalloy@amazon.com" }, "committer": { "name": "GitHub", "email": "noreply@github.com" } }, "repository": { "id": 21287076, "node_id": "MDEwOlJlcG9zaXRvcnkyMTI4NzA3Ng==", "name": "s2n", "full_name": "awslabs/s2n", "private": false, "owner": { "login": "awslabs", "id": 3299148, "node_id": "MDEyOk9yZ2FuaXphdGlvbjMyOTkxNDg=", "avatar_url": "https://avatars0.githubusercontent.com/u/3299148?v=4", "gravatar_id": "", "url": "https://api.github.com/users/awslabs", "html_url": "https://github.com/awslabs", "followers_url": "https://api.github.com/users/awslabs/followers", "following_url": "https://api.github.com/users/awslabs/following{/other_user}", "gists_url": "https://api.github.com/users/awslabs/gists{/gist_id}", "starred_url": "https://api.github.com/users/awslabs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/awslabs/subscriptions", "organizations_url": "https://api.github.com/users/awslabs/orgs", "repos_url": "https://api.github.com/users/awslabs/repos", "events_url": "https://api.github.com/users/awslabs/events{/privacy}", "received_events_url": "https://api.github.com/users/awslabs/received_events", "type": "Organization", "site_admin": false }, "html_url": "https://github.com/awslabs/s2n", "description": "s2n : an implementation of the TLS/SSL protocols", "fork": false, "url": "https://api.github.com/repos/awslabs/s2n", "forks_url": "https://api.github.com/repos/awslabs/s2n/forks", "keys_url": "https://api.github.com/repos/awslabs/s2n/keys{/key_id}", "collaborators_url": "https://api.github.com/repos/awslabs/s2n/collaborators{/collaborator}", "teams_url": "https://api.github.com/repos/awslabs/s2n/teams", "hooks_url": "https://api.github.com/repos/awslabs/s2n/hooks", "issue_events_url": "https://api.github.com/repos/awslabs/s2n/issues/events{/number}", "events_url": "https://api.github.com/repos/awslabs/s2n/events", "assignees_url": "https://api.github.com/repos/awslabs/s2n/assignees{/user}", "branches_url": "https://api.github.com/repos/awslabs/s2n/branches{/branch}", "tags_url": "https://api.github.com/repos/awslabs/s2n/tags", "blobs_url": "https://api.github.com/repos/awslabs/s2n/git/blobs{/sha}", "git_tags_url": "https://api.github.com/repos/awslabs/s2n/git/tags{/sha}", "git_refs_url": "https://api.github.com/repos/awslabs/s2n/git/refs{/sha}", "trees_url": "https://api.github.com/repos/awslabs/s2n/git/trees{/sha}", "statuses_url": "https://api.github.com/repos/awslabs/s2n/statuses/{sha}", "languages_url": "https://api.github.com/repos/awslabs/s2n/languages", "stargazers_url": "https://api.github.com/repos/awslabs/s2n/stargazers", "contributors_url": "https://api.github.com/repos/awslabs/s2n/contributors", "subscribers_url": "https://api.github.com/repos/awslabs/s2n/subscribers", "subscription_url": "https://api.github.com/repos/awslabs/s2n/subscription", "commits_url": "https://api.github.com/repos/awslabs/s2n/commits{/sha}", "git_commits_url": "https://api.github.com/repos/awslabs/s2n/git/commits{/sha}", "comments_url": "https://api.github.com/repos/awslabs/s2n/comments{/number}", "issue_comment_url": "https://api.github.com/repos/awslabs/s2n/issues/comments{/number}", "contents_url": "https://api.github.com/repos/awslabs/s2n/contents/{+path}", "compare_url": "https://api.github.com/repos/awslabs/s2n/compare/{base}...{head}", "merges_url": "https://api.github.com/repos/awslabs/s2n/merges", "archive_url": "https://api.github.com/repos/awslabs/s2n/{archive_format}{/ref}", "downloads_url": "https://api.github.com/repos/awslabs/s2n/downloads", "issues_url": "https://api.github.com/repos/awslabs/s2n/issues{/number}", "pulls_url": "https://api.github.com/repos/awslabs/s2n/pulls{/number}", "milestones_url": "https://api.github.com/repos/awslabs/s2n/milestones{/number}", "notifications_url": "https://api.github.com/repos/awslabs/s2n/notifications{?since,all,participating}", "labels_url": "https://api.github.com/repos/awslabs/s2n/labels{/name}", "releases_url": "https://api.github.com/repos/awslabs/s2n/releases{/id}", "deployments_url": "https://api.github.com/repos/awslabs/s2n/deployments" }, "head_repository": { "id": 21287076, "node_id": "MDEwOlJlcG9zaXRvcnkyMTI4NzA3Ng==", "name": "s2n", "full_name": "awslabs/s2n", "private": false, "owner": { "login": "awslabs", "id": 3299148, "node_id": "MDEyOk9yZ2FuaXphdGlvbjMyOTkxNDg=", "avatar_url": "https://avatars0.githubusercontent.com/u/3299148?v=4", "gravatar_id": "", "url": "https://api.github.com/users/awslabs", "html_url": "https://github.com/awslabs", "followers_url": "https://api.github.com/users/awslabs/followers", "following_url": "https://api.github.com/users/awslabs/following{/other_user}", "gists_url": "https://api.github.com/users/awslabs/gists{/gist_id}", "starred_url": "https://api.github.com/users/awslabs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/awslabs/subscriptions", "organizations_url": "https://api.github.com/users/awslabs/orgs", "repos_url": "https://api.github.com/users/awslabs/repos", "events_url": "https://api.github.com/users/awslabs/events{/privacy}", "received_events_url": "https://api.github.com/users/awslabs/received_events", "type": "Organization", "site_admin": false }, "html_url": "https://github.com/awslabs/s2n", "description": "s2n : an implementation of the TLS/SSL protocols", "fork": false, "url": "https://api.github.com/repos/awslabs/s2n", "forks_url": "https://api.github.com/repos/awslabs/s2n/forks", "keys_url": "https://api.github.com/repos/awslabs/s2n/keys{/key_id}", "collaborators_url": "https://api.github.com/repos/awslabs/s2n/collaborators{/collaborator}", "teams_url": "https://api.github.com/repos/awslabs/s2n/teams", "hooks_url": "https://api.github.com/repos/awslabs/s2n/hooks", "issue_events_url": "https://api.github.com/repos/awslabs/s2n/issues/events{/number}", "events_url": "https://api.github.com/repos/awslabs/s2n/events", "assignees_url": "https://api.github.com/repos/awslabs/s2n/assignees{/user}", "branches_url": "https://api.github.com/repos/awslabs/s2n/branches{/branch}", "tags_url": "https://api.github.com/repos/awslabs/s2n/tags", "blobs_url": "https://api.github.com/repos/awslabs/s2n/git/blobs{/sha}", "git_tags_url": "https://api.github.com/repos/awslabs/s2n/git/tags{/sha}", "git_refs_url": "https://api.github.com/repos/awslabs/s2n/git/refs{/sha}", "trees_url": "https://api.github.com/repos/awslabs/s2n/git/trees{/sha}", "statuses_url": "https://api.github.com/repos/awslabs/s2n/statuses/{sha}", "languages_url": "https://api.github.com/repos/awslabs/s2n/languages", "stargazers_url": "https://api.github.com/repos/awslabs/s2n/stargazers", "contributors_url": "https://api.github.com/repos/awslabs/s2n/contributors", "subscribers_url": "https://api.github.com/repos/awslabs/s2n/subscribers", "subscription_url": "https://api.github.com/repos/awslabs/s2n/subscription", "commits_url": "https://api.github.com/repos/awslabs/s2n/commits{/sha}", "git_commits_url": "https://api.github.com/repos/awslabs/s2n/git/commits{/sha}", "comments_url": "https://api.github.com/repos/awslabs/s2n/comments{/number}", "issue_comment_url": "https://api.github.com/repos/awslabs/s2n/issues/comments{/number}", "contents_url": "https://api.github.com/repos/awslabs/s2n/contents/{+path}", "compare_url": "https://api.github.com/repos/awslabs/s2n/compare/{base}...{head}", "merges_url": "https://api.github.com/repos/awslabs/s2n/merges", "archive_url": "https://api.github.com/repos/awslabs/s2n/{archive_format}{/ref}", "downloads_url": "https://api.github.com/repos/awslabs/s2n/downloads", "issues_url": "https://api.github.com/repos/awslabs/s2n/issues{/number}", "pulls_url": "https://api.github.com/repos/awslabs/s2n/pulls{/number}", "milestones_url": "https://api.github.com/repos/awslabs/s2n/milestones{/number}", "notifications_url": "https://api.github.com/repos/awslabs/s2n/notifications{?since,all,participating}", "labels_url": "https://api.github.com/repos/awslabs/s2n/labels{/name}", "releases_url": "https://api.github.com/repos/awslabs/s2n/releases{/id}", "deployments_url": "https://api.github.com/repos/awslabs/s2n/deployments" } } ] }aws-crt-python-0.20.4+dfsg/crt/s2n/.github/install_osx_dependencies.sh000077500000000000000000000017111456575232400256660ustar00rootroot00000000000000#!/bin/bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. # set -eu function brew_install_if_not_installed () { brew list $1 &>/dev/null || brew install $1 } brew update brew_install_if_not_installed gnu-indent brew_install_if_not_installed coreutils brew_install_if_not_installed cppcheck brew_install_if_not_installed pkg-config # for gnutls compilation brew_install_if_not_installed ninja brew_install_if_not_installed openssl@1.1 # for libcrypto aws-crt-python-0.20.4+dfsg/crt/s2n/.github/s2n_bsd.sh000077500000000000000000000027241456575232400221600ustar00rootroot00000000000000#!/bin/sh # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. # set -eu export CTEST_OUTPUT_ON_FAILURE=1 # FreeBSD and OpenBSD have different ways of getting CPU count # see https://serverfault.com/questions/203207/how-do-i-count-the-number-of-processors-on-an-openbsd-system if [ "$(uname)" = 'FreeBSD' ]; then export CTEST_PARALLEL_LEVEL=$(sysctl hw.ncpu | awk '{print $2}') else export CTEST_PARALLEL_LEVEL=$(sysctl -n hw.ncpuonline) fi errors=0 onerror() { errors=$(($errors+1)) } mkdir -p output cmake . -Brelease -GNinja -DCMAKE_BUILD_TYPE=Release cmake --build ./release -j $CTEST_PARALLEL_LEVEL ninja -C release test || onerror mv release/Testing/Temporary output/release # reduce the number of files to copy back rm -rf release cmake . -Bbuild -GNinja -DCMAKE_BUILD_TYPE=Debug cmake --build ./build -j $CTEST_PARALLEL_LEVEL ninja -C build test || onerror mv build/Testing/Temporary output/debug # reduce the number of files to copy back rm -rf build exit $errors aws-crt-python-0.20.4+dfsg/crt/s2n/.github/s2n_doxygen.sh000077500000000000000000000027361456575232400230700ustar00rootroot00000000000000#!/bin/bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. # set -eu export DOXYGEN_VERSION="doxygen-1.9.5" curl -L "https://www.doxygen.nl/files/$DOXYGEN_VERSION.linux.bin.tar.gz" -o "$DOXYGEN_VERSION.tar.gz" tar -xvf "$DOXYGEN_VERSION.tar.gz" # Pull in git tags git fetch origin --tags curl https://raw.githubusercontent.com/jothepro/doxygen-awesome-css/main/doxygen-awesome.css -o docs/doxygen/doxygen-awesome.css # Add a version to the Doxygen documentation # For example: v1.3.13-3b413f18 DOC_VERSION="$(git tag --sort v:refname | tail -n 1)-$(git rev-parse --short=8 HEAD)" sed -i "s/PROJECT_NUMBER_PLACEHOLDER/$DOC_VERSION/" docs/doxygen/Doxyfile # We want to examine stderr for warnings # Ignore doxygen warnings from using the README.md as the mainpage WARNING=$($DOXYGEN_VERSION/bin/doxygen docs/doxygen/Doxyfile 2>&1 | grep -i "warning" | grep -vi "readme" ) WARNING_COUNT=$($WARNING | wc -l) if [ $WARNING_COUNT -ne 0 ]; then echo $WARNING exit 1 else exit 0 fi aws-crt-python-0.20.4+dfsg/crt/s2n/.github/s2n_osx.sh000077500000000000000000000024751456575232400222240ustar00rootroot00000000000000#!/bin/bash #!/bin/bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. # set -eu source codebuild/bin/s2n_setup_env.sh export CTEST_OUTPUT_ON_FAILURE=1 BREWINSTLLPATH=$(brew --prefix openssl@1.1) OPENSSL_1_1_1_INSTALL_DIR="${BREWINSTLLPATH:-"/usr/local/Cellar/openssl@1.1/1.1.1?"}" echo "Using OpenSSL at $OPENSSL_1_1_1_INSTALL_DIR" # Build with debug symbols and a specific OpenSSL version cmake . -Bbuild -GNinja \ -DCMAKE_BUILD_TYPE=Debug \ -DCMAKE_PREFIX_PATH=${OPENSSL_1_1_1_INSTALL_DIR} .. cmake --build ./build -j $(nproc) time CTEST_PARALLEL_LEVEL=$(nproc) ninja -C build test # Build shared library cmake . -Bbuild -GNinja \ -DCMAKE_BUILD_TYPE=Debug \ -DCMAKE_PREFIX_PATH=${OPENSSL_1_1_1_INSTALL_DIR} .. \ -DBUILD_SHARED_LIBS=ON cmake --build ./build -j $(nproc) time CTEST_PARALLEL_LEVEL=$(nproc) ninja -C build test aws-crt-python-0.20.4+dfsg/crt/s2n/.github/teams.yml000066400000000000000000000003371456575232400221210ustar00rootroot00000000000000s2n-core: - '@toidiu' - '@lrstewart' - '@dougch' - '@goatgoose' - '@camshaft' - '@maddeleine' - '@WesleyRosenblum' - '@franklee26' - '@harrisonkaiser' - '@saritummal' - '@aditishri18' - '@jmayclin' aws-crt-python-0.20.4+dfsg/crt/s2n/.github/workflows/000077500000000000000000000000001456575232400223175ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/.github/workflows/ci_compliance.yml000066400000000000000000000046611456575232400256360ustar00rootroot00000000000000--- name: Compliance on: push: branches: [main] pull_request: branches: [main] merge_group: types: [checks_requested] branches: [main] jobs: duvet: runs-on: ubuntu-latest steps: - name: Clone s2n-tls uses: actions/checkout@v3 - name: Clone s2n-quic uses: actions/checkout@v3 with: repository: aws/s2n-quic path: ./s2n-quic submodules: true - name: Run duvet action uses: ./s2n-quic/.github/actions/duvet with: s2n-quic-dir: ./s2n-quic report-script: compliance/generate_report.sh aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} aws-s3-bucket-name: s2n-tls-ci-artifacts aws-s3-region: us-west-2 cdn: https://d3fqnyekunr9xg.cloudfront.net # The `duvet report` command generates some artifacts (specs folder) that # interfere with detecting uncommitted files. This step cleans up those # artifacts. Since the cleanup runs prior to the “Extract RFC spec data” # phase, this is a safe operation. - name: Cleanup intermediate artifacts run: rm -r specs shell: bash - name: Extract RFC spec data working-directory: ./compliance run: ./initialize_duvet.sh shell: bash - name: Check if there are uncommitted changes run: | # If this fails you need to run `cd compliance && ./compliance/initialize_duvet.sh` # # FIXME: https://github.com/aws/s2n-tls/issues/4219 # We generate and commit the spec files to avoid re-downloading them each time in # the CI (avoid flaky network calls). However, this currently doesn't work in # s2n-tls since duvet assumes that the specs folder live in the project's base # folder. # # Use 'git status --porcelain' instead of 'git diff --exit-code' since git diff # only detects diffs but fails to detect new files. Ignore the s2n-quic dir # `(:!s2n-quic)` since we explicitly clone the repo as part of this job. git_status=$(git status --porcelain -- ':!s2n-quic') if [ -n "$git_status" ]; then echo "Found uncommitted changes:" echo "$git_status" exit 1 else echo "Workspace is clean" fi shell: bash aws-crt-python-0.20.4+dfsg/crt/s2n/.github/workflows/ci_freebsd.yml000066400000000000000000000012571456575232400251340ustar00rootroot00000000000000name: FreeBSD on: pull_request: branches: [main] merge_group: types: [checks_requested] branches: [main] jobs: testfreebsd: runs-on: ubuntu-latest name: CI FreeBSD steps: - uses: actions/checkout@v3 - name: Build and test in FreeBSD id: test uses: vmactions/freebsd-vm@v1 timeout-minutes: 45 with: prepare: pkg install -y ninja cmake run: | freebsd-version .github/s2n_bsd.sh - name: Upload test results if: ${{ failure() }} uses: actions/upload-artifact@master with: name: all_test_output path: | output aws-crt-python-0.20.4+dfsg/crt/s2n/.github/workflows/ci_linting.yml000066400000000000000000000073421456575232400251670ustar00rootroot00000000000000--- name: Linters on: pull_request: branches: [main] merge_group: types: [checks_requested] branches: [main] jobs: cppcheck: # ubuntu-latest introduced a newer gcc version that cannot compile cppcheck 2.3 # TODO: upgrade to latest cppcheck and revert to ubuntu-latest # see https://github.com/aws/s2n-tls/issues/3656 runs-on: ubuntu-20.04 env: CPPCHECK_INSTALL_DIR: test-deps/cppcheck steps: - uses: actions/checkout@v3 - name: Setup run: source ./codebuild/bin/s2n_setup_env.sh - name: Cache id: cache uses: actions/cache@v2.1.4 continue-on-error: true with: path: ${{ env.CPPCHECK_INSTALL_DIR }} key: cppcheck-2.3-${{ env.CPPCHECK_INSTALL_DIR }} - name: Install if: steps.cache.outputs.cache-hit != 'true' run: ./codebuild/bin/install_cppcheck.sh "$CPPCHECK_INSTALL_DIR" - name: Check run: ./codebuild/bin/run_cppcheck.sh "$CPPCHECK_INSTALL_DIR" copyright: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - name: Setup run: source ./codebuild/bin/s2n_setup_env.sh - name: Check run: ./codebuild/bin/copyright_mistake_scanner.sh simple-mistakes: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - name: Setup run: source ./codebuild/bin/s2n_setup_env.sh - name: Check run: ./codebuild/bin/grep_simple_mistakes.sh comments: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - name: Setup run: source ./codebuild/bin/s2n_setup_env.sh - name: Install run: sudo apt update && sudo apt install -y kwstyle - name: Check run: | ./codebuild/bin/run_kwstyle.sh ./codebuild/bin/cpp_style_comment_linter.sh pepeight: runs-on: ubuntu-latest steps: - name: checkout uses: actions/checkout@v3 - name: Run autopep8 id: autopep8 uses: peter-evans/autopep8@v2 with: args: --diff --exit-code . - name: Check exit code if: steps.autopep8.outputs.exit-code != 0 run: | echo "Run 'autopep8 --in-place .' to fix" exit 1 clang-format: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - name: clang-format check uses: harrisonkaiser/clang-format-action@verbose with: clang-format-version: '15' include-regex: '^(\.\/)?(api|bin|crypto|stuffer|error|tls|utils|tests\/unit|tests\/testlib|docs\/examples).*\.(c|h)$' nixflake: # The nix develop changes contain broken nixpkg dependenecies; the allow/impure flags workaround this. runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - uses: nixbuild/nix-quick-install-action@v21 with: nix_conf: experimental-features = nix-command flakes - name: nix flake check run: NIXPKGS_ALLOW_BROKEN=1 NIXPKGS_ALLOW_UNSUPPORTED_SYSTEM=1 nix flake check --impure nixfmt: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - uses: nixbuild/nix-quick-install-action@v21 with: nix_conf: experimental-features = nix-command flakes - name: nix fmt run: nix fmt $(find . -name \*nix -type f -not -path "./.git/*") - name: Changed files id: dirty run: | echo "Checking nix files with: nix fmt ..." git diff --exit-code continue-on-error: true - name: Failure case if: steps.dirty.outcome != 'success' run: | echo "Please fix formatting with nix fmt (file)" exit 1 - name: Success run: echo "All nix files passed format check" aws-crt-python-0.20.4+dfsg/crt/s2n/.github/workflows/ci_openbsd.yml000066400000000000000000000014411456575232400251470ustar00rootroot00000000000000name: OpenBSD on: pull_request: branches: [main] merge_group: types: [checks_requested] branches: [main] jobs: testopenbsd: runs-on: macos-12 name: CI OpenBSD steps: - uses: actions/checkout@v3 - name: Build and test in OpenBSD id: test uses: cross-platform-actions/action@v0.21.1 with: operating_system: openbsd architecture: x86-64 version: '7.2' shell: bash run: | sudo pkg_add ninja cmake pkg_info sysctl -n kern.version .github/s2n_bsd.sh - name: upload test results if: ${{ failure() }} uses: actions/upload-artifact@master with: name: all_test_output path: | output aws-crt-python-0.20.4+dfsg/crt/s2n/.github/workflows/ci_rust.yml000066400000000000000000000133171456575232400245170ustar00rootroot00000000000000--- name: Rust Bindings on: pull_request: branches: [main] merge_group: types: [checks_requested] branches: [main] env: # Pin the nightly toolchain to prevent breakage. # This should be occasionally updated. RUST_NIGHTLY_TOOLCHAIN: nightly-2022-08-03 ROOT_PATH: bindings/rust jobs: generate: runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: os: [ubuntu-latest, macOS-latest] steps: - uses: actions/checkout@v3 - uses: actions-rs/toolchain@v1 id: toolchain with: toolchain: stable override: true - uses: camshaft/rust-cache@v1 - name: Generate run: ${{env.ROOT_PATH}}/generate.sh - name: Tests working-directory: ${{env.ROOT_PATH}} run: cargo test --all-features - name: Test external build # if this test is failing, make sure that api headers are appropriately # included. For a symbol to be visible in a shared lib, the # __attribute__((visibility("default"))) label must be on a declaration # in the same unit of compilation as the definition. Generally this just # means that if the linker can't resolve foo_method in tls/foo.c, you # forgot to include api/unstable/foo.h in tls/foo.c if: ${{ matrix.os == 'ubuntu-latest' }} run: | cmake . -Bbuild -DBUILD_SHARED_LIBS=on -DBUILD_TESTING=off cmake --build build -- -j $(nproc) export S2N_TLS_LIB_DIR=`pwd`/build/lib export S2N_TLS_INCLUDE_DIR=`pwd`/api export LD_LIBRARY_PATH=$S2N_TLS_LIB_DIR:$LD_LIBRARY_PATH cd ${{env.ROOT_PATH}} ./generate.sh ldd target/debug/integration | grep libs2n.so # our benchmark testing includes interop tests between s2n-tls, rustls, and # openssl harness-interop-tests: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - uses: actions-rs/toolchain@v1 id: toolchain with: toolchain: stable override: true - name: generate bindings run: ${{env.ROOT_PATH}}/generate.sh --skip-tests - name: bench tests working-directory: ${{env.ROOT_PATH}}/bench run: cargo test generate-openssl-102: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - uses: actions-rs/toolchain@v1 id: toolchain with: toolchain: stable override: true - uses: camshaft/rust-cache@v1 - name: Cache OpenSSL 1.0.2 id: cache-openssl uses: actions/cache@v3 with: path: ~/openssl-102/install key: ${{ runner.os }}-openssl-102 - if: ${{ steps.cache-openssl.outputs.cache-hit != 'true' }} name: Install OpenSSL 1.0.2 run: | mkdir ~/openssl-102 pushd ~/openssl-102 mkdir install install_dir="$(pwd)"/install wget https://www.openssl.org/source/old/1.0.2/openssl-1.0.2u.tar.gz tar -xzvf openssl-1.0.2u.tar.gz pushd openssl-1.0.2u ./config --prefix="${install_dir}" --openssldir="${install_dir}"/openssl make make install popd popd - name: Generate run: OPENSSL_DIR=~/openssl-102/install ${{env.ROOT_PATH}}/generate.sh - name: Tests working-directory: ${{env.ROOT_PATH}} run: cargo test --all-features rustfmt: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 with: submodules: true - uses: actions-rs/toolchain@v1.0.7 id: toolchain with: toolchain: ${{ env.RUST_NIGHTLY_TOOLCHAIN }} profile: minimal override: true components: rustfmt - uses: camshaft/rust-cache@v1 # We don't need to format the generated files, # but if they don't exist other code breaks. - name: Generate run: ./${{env.ROOT_PATH}}/generate.sh - name: Run cargo fmt uses: actions-rs/cargo@v1.0.3 with: command: fmt args: --manifest-path ${{env.ROOT_PATH}}/Cargo.toml --all -- --check clippy: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 with: submodules: true - uses: actions-rs/toolchain@v1.0.7 id: toolchain with: toolchain: stable profile: minimal override: true components: clippy - uses: camshaft/rust-cache@v1 # Enforce that clippy's msrv matches rust-toolchain - name: Check MSRV run: grep $(cat ${{env.ROOT_PATH}}/rust-toolchain) ${{env.ROOT_PATH}}/.clippy.toml # We don't need to format the generated files, # but if they don't exist other code breaks. - name: Generate run: ${{env.ROOT_PATH}}/generate.sh # TODO translate json reports to in-action warnings - name: Run cargo clippy uses: actions-rs/cargo@v1.0.3 with: command: clippy args: --manifest-path ${{env.ROOT_PATH}}/Cargo.toml --all-targets -- -D warnings msrv: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 with: submodules: true # Enforce crate msrv matches rust-toolchain - name: Check MSRV of s2n-tls run: grep "rust-version = \"$(cat ${{env.ROOT_PATH}}/rust-toolchain)\"" ${{env.ROOT_PATH}}/s2n-tls/Cargo.toml - name: Check MSRV of s2n-tls-sys run: grep "rust-version = \"$(cat ${{env.ROOT_PATH}}/rust-toolchain)\"" ${{env.ROOT_PATH}}/s2n-tls-sys/templates/Cargo.template - name: Check MSRV of s2n-tokio run: grep "rust-version = \"$(cat ${{env.ROOT_PATH}}/rust-toolchain)\"" ${{env.ROOT_PATH}}/s2n-tls-tokio/Cargo.toml aws-crt-python-0.20.4+dfsg/crt/s2n/.github/workflows/codeql.yml000066400000000000000000000022721456575232400243140ustar00rootroot00000000000000name: "CodeQL - Python" on: push: branches: [ "main" ] paths-ignore: - '**/tests/integration/*' pull_request: branches: [ "main" ] paths-ignore: - '**/tests/integration/*' schedule: - cron: "1 18 * * 0" merge_group: types: [checks_requested] branches: [main] jobs: analyze: name: Analyze runs-on: ubuntu-latest permissions: actions: read contents: read security-events: write strategy: fail-fast: false matrix: # Disabling c analysis (for now) as this takes ~2 hours to complete language: [ python ] steps: - name: Checkout uses: actions/checkout@v3 - name: Initialize CodeQL uses: github/codeql-action/init@v2 with: languages: ${{ matrix.language }} queries: +security-and-quality config-file: ./.github/codeql-config.yml - name: Autobuild uses: github/codeql-action/autobuild@v2 if: ${{ matrix.language == 'c' || matrix.language == 'python' }} - name: Perform CodeQL Analysis uses: github/codeql-action/analyze@v2 with: category: "/language:${{ matrix.language }}" aws-crt-python-0.20.4+dfsg/crt/s2n/.github/workflows/dashboard.yml000066400000000000000000000106711456575232400247760ustar00rootroot00000000000000name: Build Dashboard on: schedule: - cron: '0 0 * * 1-5' jobs: build: # This should only run in one place. if: contains(github.repository, 'aws/s2n-tls') runs-on: ubuntu-latest permissions: contents: write steps: - name: Check out repository uses: actions/checkout@v3 - name: Check out GitHub Pages branch uses: actions/checkout@v3 with: ref: 'gh-pages' - name: 'Generate Dashboard' uses: ethomson/issue-dashboard@v1 with: config: | title: s2n-tls Dashboard description: | Issues and PRs for s2n-tls output: format: html filename: dashboard/index.html sections: - title: 'Issues Age stats' description: 'Open issues by age.' widgets: - type: 'graph' title: 'Age' elements: - title: '<7 days' issue_query: 'repo:aws/s2n-tls is:open is:issue created:>{{ date("-7 days") }}' color: 'green' - title: '8-90 days' issue_query: 'repo:aws/s2n-tls is:open is:issue created:{{ date("-90 days") }}..{{ date("-7 days") }}' color: 'yellow' - title: '>1 year labeled type/api' issue_query: 'repo:aws/s2n-tls is:open is:issue label:type/api created:<{{ date("-365 days") }}' color: 'red' - title: '>1 year labeled type/test' issue_query: 'repo:aws/s2n-tls is:open is:issue label:type/test created:<{{ date("-365 days") }}' color: 'red' - title: '>1 year labeled type/build' issue_query: 'repo:aws/s2n-tls is:open is:issue label:type/build created:<{{ date("-365 days") }}' color: 'red' - title: '>1 year labeled CBMC' issue_query: 'repo:aws/s2n-tls is:open is:issue label:cbmc created:<{{ date("-365 days") }}' color: 'red' - title: 'Issues' description: 'Issues with no comments' widgets: - type: 'number' title: 'Issues with zero comments (external)' issue_query: 'repo:aws/s2n-tls is:open is:issue comments:0 -label:s2n-core' color: 'yellow' - type: 'number' title: 'Issues with updates in the last 14 days (external)' issue_query: 'repo:aws/s2n-tls is:issue is:open updated:>{{ date("-14 days") }} -label:s2n-core' color: 'yellow' - title: 'Pull Requests' widgets: - type: 'number' title: 'Ready to merge' issue_query: 'repo:aws/s2n-tls is:open is:pr review:approved -label:do_not_merge' color: 'green' - type: 'number' title: 'No review (external) last 14 days' issue_query: 'repo:aws/s2n-tls is:open is:pr review:none -label:status/stale -label:s2n-core created:>{{ date("-14 days") }}' color: 'red' - type: 'number' title: 'PRs with changes requested (external)' issue_query: 'repo:aws/s2n-tls is:open is:pr review:changes_requested sort:created-asc -label:status/stale -label:s2n-core' color: 'blue' - type: 'number' title: 'PRs with zero interactions (external)' issue_query: 'repo:aws/s2n-tls is:open is:pr interactions:0 sort:created-asc -label:status/stale -label:s2n-core' color: 'blue' - type: 'number' title: 'Stale PRs' issue_query: 'repo:aws/s2n-tls is:open is:pr label:status/stale' color: 'black' - type: 'table' title: '15 Oldest Pull Requests without review' fields: - title: 'PR' property: 'number' - title: 'Description' property: 'title' issue_query: 'repo:aws/s2n-tls is:open is:pr review:none sort:created-asc' limit: 15 token: ${{ github.token }} - name: Publish Documentation uses: JamesIves/github-pages-deploy-action@v4 with: folder: . git-config-name: 'GitHub Actions' git-config-email: 'nobody@github.com' aws-crt-python-0.20.4+dfsg/crt/s2n/.github/workflows/docs.yml000066400000000000000000000020401456575232400237660ustar00rootroot00000000000000--- name: s2n-tls Documentation on: push: branches: [main] pull_request: branches: [main] merge_group: types: [checks_requested] branches: [main] jobs: generate-doxygen: runs-on: ubuntu-latest steps: - name: Checkout repo uses: actions/checkout@v3 - name: Create Documentation run: | .github/s2n_doxygen.sh - name: Upload documentation to action # Upload the doxygen artifacts on pull requests to help reviewers easily # view changes. if: ${{ github.event_name == 'pull_request' }} uses: actions/upload-artifact@v3 with: name: s2n-tls-doxygen path: | docs/doxygen/output - name: Deploy documentation to gh-pages uses: peaceiris/actions-gh-pages@v3 if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/main' }} with: github_token: ${{ secrets.GITHUB_TOKEN }} destination_dir: ./doxygen publish_dir: ./docs/doxygen/output/html aws-crt-python-0.20.4+dfsg/crt/s2n/.github/workflows/gha_failure_monitor.yml000066400000000000000000000030171456575232400270600ustar00rootroot00000000000000name: GHA Montior on: schedule: - cron: '0 * * * *' jobs: gha_monitor_fuzz: if: contains(github.repository, 'awslabs/s2n') env: SCRIPT_PATH: "./.github/gha_monitor" runs-on: ubuntu-latest strategy: matrix: repos: - {ORG: "awslabs", REPO: "private-s2n-fuzz"} - {ORG: "awslabs", REPO: "private-s2n-cbmc"} - {ORG: "awslabs", REPO: "s2n"} - {ORG: "dougch", REPO: "s2n"} fail-fast: false steps: - uses: actions/checkout@v3 - name: Set up Python 3.x uses: actions/setup-python@v1 with: python-version: '3.x' architecture: 'x64' - name: Configure AWS Credentials uses: aws-actions/configure-aws-credentials@v1 with: aws-access-key-id: ${{ secrets.SNS_AWS_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.SNS_AWS_SECRET_ACCESS_KEY }} aws-region: us-west-2 - name: Install dependencies run: | python -m pip install --upgrade pip pip install -r $SCRIPT_PATH/requirements.txt - name: GitHub failure check run: | cd $SCRIPT_PATH/ python3 -m gha_monitor env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_REPO: ${{ matrix.repos.REPO }} GITHUB_REPO_ORG: ${{ matrix.repos.ORG }} # Set the time range the monitor should be looking at. # Should be slightly longer than cron frequency (can be a float). MONITOR_FREQ_IN_HOURS: "1.1" aws-crt-python-0.20.4+dfsg/crt/s2n/.github/workflows/gha_osx_tests.yml000066400000000000000000000006361456575232400257210ustar00rootroot00000000000000--- name: macOS build and test S2n on: pull_request: branches: [main] merge_group: types: [checks_requested] branches: [main] jobs: OSX: runs-on: macos-latest steps: - name: Checkout Dependencies uses: actions/checkout@v3 - name: Prebuild run: | .github/install_osx_dependencies.sh - name: Build run: | .github/s2n_osx.sh aws-crt-python-0.20.4+dfsg/crt/s2n/.github/workflows/private_fork_pr_codebuild.yml000066400000000000000000000023371456575232400302550ustar00rootroot00000000000000--- name: s2nPrivateFuzz on: pull_request: branches: [main] merge_group: types: [checks_requested] branches: [main] jobs: fuzz: if: startsWith(github.repository, 'private-') runs-on: ubuntu-18.04 strategy: matrix: openssl_version: - openssl-1.0.2 - openssl-1.1.1 fail-fast: true steps: - uses: actions/setup-node@v1 - name: Configure AWS Credentials uses: aws-actions/configure-aws-credentials@v1 with: aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} aws-region: us-west-2 - name: S2n Fuzz CodeBuild uses: aws-actions/aws-codebuild-run-build@master with: project-name: 's2nGithubCodebuild' env-vars-for-codebuild: | S2N_LIBCRYPTO, TESTS, LATEST_CLANG, FUZZ_TIMEOUT_SEC, requester, event-name env: S2N_LIBCRYPTO: ${{ matrix.openssl_version }} TESTS: "fuzz" LAGEST_CLANG: "true" FUZZ_TIMEOUT_SEC: 1800 requester: ${{ github.actor }} event-name: ${{ github.event_name }} aws-crt-python-0.20.4+dfsg/crt/s2n/.github/workflows/private_sync.yml000066400000000000000000000014351456575232400255530ustar00rootroot00000000000000name: force_push on: push: branches: - main jobs: build: # This should only run in one place. if: contains(github.repository, 'aws/s2n-tls') runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 with: persist-credentials: false fetch-depth: 0 - name: Push cbmc uses: ad-m/github-push-action@master with: github_token: ${{ secrets.private_repos_s2n_20200214 }} repository: awslabs/private-s2n-cbmc branch: master force: true - name: Push fuzz uses: ad-m/github-push-action@master with: github_token: ${{ secrets.private_repos_s2n_20200214 }} repository: awslabs/private-s2n-fuzz branch: master force: true aws-crt-python-0.20.4+dfsg/crt/s2n/.github/workflows/proof_ci.yaml000066400000000000000000000201221456575232400250000ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: MIT-0 # CBMC starter kit 2.9 name: Run CBMC proofs on: push: branches-ignore: - gh-pages pull_request: branches-ignore: - gh-pages # USAGE # # If you need to use different versions for tools like CBMC, modify this file: # .github/workflows/proof_ci_resources/config.yaml # # If you want the CI to use a different GitHub-hosted runner (which must still # be running Ubuntu 20.04), modify the value of this key: # jobs.run_cbmc_proofs.runs-on jobs: run_cbmc_proofs: runs-on: cbmc_ubuntu-latest_64-core name: run_cbmc_proofs permissions: contents: read id-token: write pull-requests: read steps: - name: Check out repository and submodules recursively uses: actions/checkout@v3 with: submodules: 'recursive' - name: Parse config file run: | CONFIG_FILE='.github/workflows/proof_ci_resources/config.yaml' for setting in cadical-tag cbmc-version cbmc-viewer-version kissat-tag litani-version proofs-dir run-cbmc-proofs-command; do VAR=$(echo $setting | tr "[:lower:]" "[:upper:]" | tr - _) echo "${VAR}"=$(yq .$setting $CONFIG_FILE) >> $GITHUB_ENV done - name: Ensure CBMC, CBMC viewer, Litani versions have been specified shell: bash run: | should_exit=false if [ "${{ env.CBMC_VERSION }}" == "" ]; then echo "You must specify a CBMC version (e.g. 'latest' or '5.70.0')" should_exit=true fi if [ "${{ env.CBMC_VIEWER_VERSION }}" == "" ]; then echo "You must specify a CBMC viewer version (e.g. 'latest' or '3.6')" should_exit=true fi if [ "${{ env.LITANI_VERSION }}" == "" ]; then echo "You must specify a Litani version (e.g. 'latest' or '1.27.0')" should_exit=true fi if [[ "$should_exit" == true ]]; then exit 1; fi - name: Install latest CBMC if: ${{ env.CBMC_VERSION == 'latest' }} shell: bash run: | # Search within 5 most recent releases for latest available package CBMC_REL="https://api.github.com/repos/diffblue/cbmc/releases?page=1&per_page=5" CBMC_DEB=$(curl -s $CBMC_REL --header 'authorization: Bearer ${{ secrets.GITHUB_TOKEN }}' | jq -r '.[]|select(.prerelease|not).assets[].browser_download_url' | grep -e 'ubuntu-20.04' | head -n 1) CBMC_ARTIFACT_NAME=$(basename $CBMC_DEB) curl -o $CBMC_ARTIFACT_NAME -L $CBMC_DEB sudo dpkg -i $CBMC_ARTIFACT_NAME rm ./$CBMC_ARTIFACT_NAME - name: Install CBMC ${{ env.CBMC_VERSION }} if: ${{ env.CBMC_VERSION != 'latest' }} shell: bash run: | curl -o cbmc.deb -L \ https://github.com/diffblue/cbmc/releases/download/cbmc-${{ env.CBMC_VERSION }}/ubuntu-20.04-cbmc-${{ env.CBMC_VERSION }}-Linux.deb sudo dpkg -i ./cbmc.deb rm ./cbmc.deb - name: Install latest CBMC viewer if: ${{ env.CBMC_VIEWER_VERSION == 'latest' }} shell: bash run: | CBMC_VIEWER_REL="https://api.github.com/repos/model-checking/cbmc-viewer/releases/latest" CBMC_VIEWER_VERSION=$(curl -s $CBMC_VIEWER_REL --header 'authorization: Bearer ${{ secrets.GITHUB_TOKEN }}' | jq -r .name | sed 's/viewer-//') pip3 install cbmc-viewer==$CBMC_VIEWER_VERSION - name: Install CBMC viewer ${{ env.CBMC_VIEWER_VERSION }} if: ${{ env.CBMC_VIEWER_VERSION != 'latest' }} shell: bash run: | sudo apt-get update sudo apt-get install --no-install-recommends --yes \ build-essential universal-ctags pip3 install cbmc-viewer==${{ env.CBMC_VIEWER_VERSION }} - name: Install latest Litani if: ${{ env.LITANI_VERSION == 'latest' }} shell: bash run: | # Search within 5 most recent releases for latest available package LITANI_REL="https://api.github.com/repos/awslabs/aws-build-accumulator/releases?page=1&per_page=5" LITANI_DEB=$(curl -s $LITANI_REL --header 'authorization: Bearer ${{ secrets.GITHUB_TOKEN }}' | jq -r '.[]|select(.prerelease|not).assets[0].browser_download_url' | head -n 1) DBN_PKG_FILENAME=$(basename $LITANI_DEB) curl -L $LITANI_DEB -o $DBN_PKG_FILENAME sudo apt-get update sudo apt-get install --no-install-recommends --yes ./$DBN_PKG_FILENAME rm ./$DBN_PKG_FILENAME - name: Install Litani ${{ env.LITANI_VERSION }} if: ${{ env.LITANI_VERSION != 'latest' }} shell: bash run: | curl -o litani.deb -L \ https://github.com/awslabs/aws-build-accumulator/releases/download/${{ env.LITANI_VERSION }}/litani-${{ env.LITANI_VERSION }}.deb sudo apt-get update sudo apt-get install --no-install-recommends --yes ./litani.deb rm ./litani.deb - name: Install ${{ env.KISSAT_TAG }} kissat if: ${{ env.KISSAT_TAG != '' }} shell: bash run: | if ${{ env.KISSAT_TAG == 'latest' }} then KISSAT_REL="https://api.github.com/repos/arminbiere/kissat/releases/latest" KISSAT_TAG_NAME=$(curl -s $KISSAT_REL --header 'authorization: Bearer ${{ secrets.GITHUB_TOKEN }}' | jq -r '.tag_name') else KISSAT_TAG_NAME=${{ env.KISSAT_TAG }} fi echo "Installing kissat $KISSAT_TAG_NAME" git clone https://github.com/arminbiere/kissat.git \ && cd kissat \ && git checkout $KISSAT_TAG_NAME \ && ./configure \ && cd build \ && make -j; echo "$(pwd)" >> $GITHUB_PATH - name: Install ${{ env.CADICAL_TAG }} cadical if: ${{ env.CADICAL_TAG != '' }} shell: bash run: | if ${{ env.CADICAL_TAG == 'latest' }} then CADICAL_REL="https://api.github.com/repos/arminbiere/cadical/releases/latest" CADICAL_TAG_NAME=$(curl -s $CADICAL_REL --header 'authorization: Bearer ${{ secrets.GITHUB_TOKEN }}' | jq -r '.tag_name') else CADICAL_TAG_NAME=${{ env.CADICAL_TAG }} fi echo "Installing cadical $CADICAL_TAG_NAME" git clone https://github.com/arminbiere/cadical.git \ && cd cadical \ && git checkout $CADICAL_TAG_NAME \ && ./configure \ && cd build \ && make -j; echo "$(pwd)" >> $GITHUB_PATH - name: Run CBMC proofs shell: bash env: EXTERNAL_SAT_SOLVER: kissat working-directory: ${{ env.PROOFS_DIR }} run: ${{ env.RUN_CBMC_PROOFS_COMMAND }} - name: Check repository visibility shell: bash run: | VIZ="${{ fromJson(toJson(github.event.repository)).visibility }}"; echo "REPO_VISIBILITY=${VIZ}" | tee -a "${GITHUB_ENV}"; - name: Set name for zip artifact with CBMC proof results id: artifact if: ${{ env.REPO_VISIBILITY == 'public' }} run: | echo "name=cbmc_proof_results_${{ fromJson(toJson(github.event.repository)).name }}_$(date +%Y_%m_%d_%H_%M_%S)" >> $GITHUB_OUTPUT - name: Create zip artifact with CBMC proof results if: ${{ env.REPO_VISIBILITY == 'public' }} shell: bash run: | FINAL_REPORT_DIR=$PROOFS_DIR/output/latest/html pushd $FINAL_REPORT_DIR \ && zip -r ${{ steps.artifact.outputs.name }}.zip . \ && popd \ && mv $FINAL_REPORT_DIR/${{ steps.artifact.outputs.name }}.zip . - name: Upload zip artifact of CBMC proof results to GitHub Actions if: ${{ env.REPO_VISIBILITY == 'public' }} uses: actions/upload-artifact@v3 with: name: ${{ steps.artifact.outputs.name }} path: ${{ steps.artifact.outputs.name }}.zip - name: CBMC proof results shell: bash run: | python3 ${{ env.PROOFS_DIR }}/lib/summarize.py \ --run-file ${{ env.PROOFS_DIR }}/output/latest/html/run.json aws-crt-python-0.20.4+dfsg/crt/s2n/.github/workflows/proof_ci_resources/000077500000000000000000000000001456575232400262115ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/.github/workflows/proof_ci_resources/config.yaml000066400000000000000000000002731456575232400303440ustar00rootroot00000000000000cadical-tag: latest cbmc-version: latest cbmc-viewer-version: latest kissat-tag: latest litani-version: latest proofs-dir: tests/cbmc/proofs run-cbmc-proofs-command: ./run-cbmc-proofs.py aws-crt-python-0.20.4+dfsg/crt/s2n/.github/workflows/stale.yml000066400000000000000000000021741456575232400241560ustar00rootroot00000000000000name: Close stale PRs on: schedule: - cron: "30 1 * * *" pull_request: paths: [.github/workflows/stale.yml] branches: [main] jobs: stale: runs-on: ubuntu-latest permissions: pull-requests: write steps: - uses: actions/stale@v8 with: stale-pr-message: > This PR has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions. days-before-pr-stale: 60 days-before-pr-close: -1 days-before-issue-stale: -1 days-before-issue-close: -1 exempt-pr-labels: ops-backlog,status/needs_team_discussion,status/on_hold stale-pr-label: status/stale # Perform a dry-run if the workflow was changed in a PR debug-only: ${{ github.event_name == 'pull_request' }} # The github API rate limit isn't as much of a concern in debug mode. See # https://github.com/actions/stale#debugging. operations-per-run: ${{ github.event_name == 'pull_request' && 100 || 30 }} aws-crt-python-0.20.4+dfsg/crt/s2n/.github/workflows/team_label.yml000066400000000000000000000004511456575232400251270ustar00rootroot00000000000000on: pull_request_target: types: - opened - reopened branches: - 'main' name: team-label jobs: team-labeler: runs-on: ubuntu-latest steps: - uses: JulienKode/team-labeler-action@v0.1.1 with: repo-token: "${{ secrets.GITHUB_TOKEN }}" aws-crt-python-0.20.4+dfsg/crt/s2n/.github/workflows/usage_guide.yml000066400000000000000000000043331456575232400253260ustar00rootroot00000000000000name: Publish Usage Guide on: push: branches: - main pull_request: branches: - main env: CDN: https://d3fqnyekunr9xg.cloudfront.net # By default dependabot only receives read permissions. Explicitly give it write # permissions which is needed by the ouzi-dev/commit-status-updater task. # # Updating status is relatively safe (doesnt modify source code) and caution # should be taken before adding more permissions. permissions: contents: write statuses: write jobs: build-deploy: runs-on: ubuntu-latest steps: - name: Checkout s2n-tls repo uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@stable - name: Set override run: rustup override set stable - uses: camshaft/install@v1 with: crate: mdbook - name: Build book run: | cd docs/usage-guide mdbook build - name: Deploy documentation to gh-pages uses: JamesIves/github-pages-deploy-action@v4.5.0 if: github.event_name == 'push' with: target-folder: usage-guide folder: docs/usage-guide/book - name: Configure AWS credentials uses: aws-actions/configure-aws-credentials@v4.0.1 if: github.event_name == 'push' || github.repository == github.event.pull_request.head.repo.full_name with: aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} aws-region: us-west-1 - name: Upload to S3 if: github.event_name == 'push' || github.repository == github.event.pull_request.head.repo.full_name id: s3 run: | TARGET="${{ github.sha }}/book" aws s3 sync docs/usage-guide/book "s3://s2n-tls-ci-artifacts/$TARGET" --acl private --follow-symlinks URL="$CDN/$TARGET/index.html" echo "URL=$URL" >> $GITHUB_OUTPUT - name: Output mdbook url uses: ouzi-dev/commit-status-updater@v2.0.1 if: github.event_name == 'push' || github.repository == github.event.pull_request.head.repo.full_name with: name: "book / url" status: "success" url: "${{ steps.s3.outputs.URL }}" aws-crt-python-0.20.4+dfsg/crt/s2n/.gitignore000066400000000000000000000007421456575232400207150ustar00rootroot00000000000000*.o *.a *.dylib *.dSYM *.so *~ *.dSYM *.gcda *.gcno *.gcov *.info *.profraw *.tmp *.pyc coverage/html/* docs/doxygen/output/ libcrypto-build/* libcrypto-root libcrypto-root/* tests/unit/*_test tests/fuzz/*_test tests/fuzz/*.txt tests/fuzz/fuzz-*.log tests/benchmark/*_benchmark bin/s2nc bin/s2nd util-linux-* Python-* clang-* fuzz_dependencies/* .vscode/ test-deps/* .idea/* CMakeCache.txt CMakeFiles/* .project ./codebuild/spec/buildspec_*_batch.yml build/ result result-* *.class aws-crt-python-0.20.4+dfsg/crt/s2n/.gitmodules000066400000000000000000000003011456575232400210710ustar00rootroot00000000000000[submodule "tests/cbmc/aws-verification-model-for-libcrypto"] path = tests/cbmc/aws-verification-model-for-libcrypto url = https://github.com/awslabs/aws-verification-model-for-libcrypto.git aws-crt-python-0.20.4+dfsg/crt/s2n/.pep8000066400000000000000000000000561456575232400176000ustar00rootroot00000000000000[pep8] max_line_length = 120 recursive = true aws-crt-python-0.20.4+dfsg/crt/s2n/CMakeLists.txt000066400000000000000000000656611456575232400215000ustar00rootroot00000000000000cmake_minimum_required (VERSION 3.0) project (s2n C) if(POLICY CMP0077) cmake_policy(SET CMP0077 NEW) #option does nothing when a normal variable of the same name exists. endif() set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib) set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib) set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin) set(INSTALL_LIB_DIR lib CACHE PATH "Installation directory for libraries") set(INSTALL_INCLUDE_DIR include CACHE PATH "Installation directory for header files") set(INSTALL_CMAKE_DIR lib/cmake CACHE PATH "Installation directory for cmake files") set(CMAKE_FIND_PACKAGE_PREFER_CONFIG TRUE) # These Version numbers are for major updates only- we won't track minor/patch updates here. set(VERSION_MAJOR 1) set(VERSION_MINOR 0) set(VERSION_PATCH 0) option(SEARCH_LIBCRYPTO "Set this if you want to let S2N search libcrypto for you, otherwise a crypto target needs to be defined." ON) option(UNSAFE_TREAT_WARNINGS_AS_ERRORS "Compiler warnings are treated as errors. Warnings may indicate danger points where you should verify with the S2N-TLS developers that the security of the library is not compromised. Turn this OFF to ignore warnings." ON) option(S2N_WERROR_ALL "This option will cause all artifacts linked to libs2n to use the -Werror setting." OFF) option(S2N_INTERN_LIBCRYPTO "This ensures that s2n-tls is compiled and deployed with a specific version of libcrypto by interning the code and hiding symbols. This also enables s2n-tls to be loaded in an application with an otherwise conflicting libcrypto version." OFF) option(S2N_LTO, "Enables link time optimizations when building s2n-tls." OFF) option(S2N_STACKTRACE "Enables stacktrace functionality in s2n-tls. Note that this functionality is only available on platforms that support execinfo." ON) option(COVERAGE "Enable profiling collection for code coverage calculation" OFF) option(S2N_INTEG_TESTS "Enable the integrationv2 tests" OFF) option(S2N_FAST_INTEG_TESTS "Enable the integrationv2 with more parallelism, only has effect if S2N_INTEG_TESTS=ON" ON) option(S2N_INSTALL_S2NC_S2ND "Install the binaries s2nc and s2nd" OFF) option(S2N_USE_CRYPTO_SHARED_LIBS "For S2N to use shared libs in Findcrypto" OFF) option(TSAN "Enable ThreadSanitizer to test thread safety" OFF) option(ASAN "Enable AddressSanitizer to test memory safety" OFF) # Turn BUILD_TESTING=ON by default include(CTest) file(GLOB API_HEADERS "api/*.h") file(GLOB API_UNSTABLE_HEADERS "api/unstable/*.h") file(GLOB CRYPTO_HEADERS "crypto/*.h") file(GLOB CRYPTO_SRC "crypto/*.c") file(GLOB ERROR_HEADERS "error/*.h") file(GLOB ERROR_SRC "error/*.c") file(GLOB STUFFER_HEADERS "stuffer/*.h") file(GLOB STUFFER_SRC "stuffer/*.c") file(GLOB_RECURSE TLS_HEADERS "tls/*.h") file(GLOB_RECURSE TLS_SRC "tls/*.c") file(GLOB UTILS_HEADERS "utils/*.h") file(GLOB UTILS_SRC "utils/*.c") message(STATUS "Detected CMAKE_SYSTEM_PROCESSOR as ${CMAKE_SYSTEM_PROCESSOR}") if(CMAKE_SIZEOF_VOID_P EQUAL 4) message(STATUS "Detected 32-Bit system") else() message(STATUS "Detected 64-Bit system") endif() ##be nice to visual studio users if(MSVC) source_group("Header Files\\s2n\\api" FILES ${API_HEADERS} ${API_UNSTABLE_HEADERS}) source_group("Header Files\\s2n\\crypto" FILES ${CRYPTO_HEADERS}) source_group("Header Files\\s2n\\error" FILES ${ERROR_HEADERS}) source_group("Header Files\\s2n\\stuffer" FILES ${STUFFER_HEADERS}) source_group("Header Files\\s2n\\tls" FILES ${TLS_HEADERS}) source_group("Header Files\\s2n\\utils" FILES ${UTILS_HEADERS}) source_group("Source Files\\crypto" FILES ${CRYPTO_SRC}) source_group("Source Files\\error" FILES ${ERROR_SRC}) source_group("Source Files\\stuffer" FILES ${STUFFER_SRC}) source_group("Source Files\\tls" FILES ${TLS_SRC}) source_group("Source Files\\utils" FILES ${UTILS_SRC}) else() set(THREADS_PREFER_PTHREAD_FLAG ON) find_package(Threads REQUIRED) endif() if(APPLE) set(OS_LIBS c Threads::Threads) elseif(CMAKE_SYSTEM_NAME STREQUAL "FreeBSD") set(OS_LIBS thr execinfo) elseif(CMAKE_SYSTEM_NAME STREQUAL "NetBSD") set(OS_LIBS Threads::Threads) elseif(CMAKE_SYSTEM_NAME STREQUAL "OpenBSD") set(OS_LIBS Threads::Threads kvm) elseif(CMAKE_SYSTEM_NAME STREQUAL "Android") set(OS_LIBS Threads::Threads dl) else() set(OS_LIBS Threads::Threads dl rt) endif() # Compiling the unit tests rely on S2N_TEST_IN_FIPS_MODE to be set correctly if(S2N_FIPS) add_definitions(-DS2N_TEST_IN_FIPS_MODE) endif() file(GLOB S2N_HEADERS ${API_HEADERS} ${API_UNSTABLE_HEADERS} ${CRYPTO_HEADERS} ${ERROR_HEADERS} ${STUFFER_HEADERS} ${TLS_HEADERS} ${UTILS_HEADERS} ) file(GLOB S2N_SRC ${CRYPTO_SRC} ${ERROR_SRC} ${STUFFER_SRC} ${TLS_SRC} ${UTILS_SRC} ) add_library(${PROJECT_NAME} ${S2N_HEADERS} ${S2N_SRC}) set_target_properties(${PROJECT_NAME} PROPERTIES LINKER_LANGUAGE C) # Version numbers are for major updates only- we won't track minor/patch updates here. set_target_properties(${PROJECT_NAME} PROPERTIES VERSION ${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_PATCH}) set_target_properties(${PROJECT_NAME} PROPERTIES SOVERSION ${VERSION_MAJOR}) set(CMAKE_C_FLAGS_DEBUGOPT "") target_compile_options(${PROJECT_NAME} PRIVATE -pedantic -std=gnu99 -Wall -Wimplicit -Wunused -Wcomment -Wchar-subscripts -Wuninitialized -Wshadow -Wcast-align -Wwrite-strings -Wno-deprecated-declarations -Wno-unknown-pragmas -Wformat-security -Wno-missing-braces -Wsign-compare -Wno-strict-prototypes -Wa,--noexecstack ) if (S2N_WERROR_ALL) target_compile_options(${PROJECT_NAME} PUBLIC -Werror) elseif (UNSAFE_TREAT_WARNINGS_AS_ERRORS) target_compile_options(${PROJECT_NAME} PRIVATE -Werror ) endif () if(BUILD_TESTING AND BUILD_SHARED_LIBS) target_compile_options(${PROJECT_NAME} PRIVATE -fvisibility=default) else() target_compile_options(${PROJECT_NAME} PRIVATE -fvisibility=hidden -DS2N_EXPORTS) endif() if(S2N_LTO) target_compile_options(${PROJECT_NAME} PRIVATE -flto) # if we're building a static lib, make it easier for consuming applications to also perform LTO if(NOT BUILD_SHARED_LIBS) target_compile_options(${PROJECT_NAME} PRIVATE -ffunction-sections -fdata-sections) endif() endif() if(NOT APPLE) set(CMAKE_SHARED_LINKER_FLAGS -Wl,-z,noexecstack,-z,relro,-z,now) endif() # Whether to fail the build when compiling s2n's portable C code with non-portable assembly optimizations. Doing this # can lead to runtime crashes if build artifacts are built on modern hardware, but deployed to older hardware without # newer CPU instructions. s2n, by default, should be backwards compatible with older CPU types so this flag should be # enabled in s2n's CI builds and tests, but other consumers of s2n may have stronger control of what CPU types they # deploy to, and can enable more CPU optimizations. if(S2N_BLOCK_NONPORTABLE_OPTIMIZATIONS) target_compile_options(${PROJECT_NAME} PUBLIC -DS2N_BLOCK_NONPORTABLE_OPTIMIZATIONS=1) endif() target_compile_options(${PROJECT_NAME} PUBLIC -fPIC) add_definitions(-D_POSIX_C_SOURCE=200809L) if(CMAKE_BUILD_TYPE MATCHES Release) add_definitions(-D_FORTIFY_SOURCE=2) endif() if(NO_STACK_PROTECTOR) target_compile_options(${PROJECT_NAME} PRIVATE -Wstack-protector -fstack-protector-all) endif() if(S2N_UNSAFE_FUZZING_MODE) target_compile_options(${PROJECT_NAME} PRIVATE -fsanitize-coverage=trace-pc-guard -fsanitize=address,undefined,leak -fuse-ld=gold -DS2N_ADDRESS_SANITIZER=1) endif() if(TSAN) target_compile_options(${PROJECT_NAME} PUBLIC -fsanitize=thread -DS2N_THREAD_SANITIZER=1) target_link_options(${PROJECT_NAME} PUBLIC -fsanitize=thread) endif() if(ASAN) target_compile_options(${PROJECT_NAME} PUBLIC -fsanitize=address -DS2N_ADDRESS_SANITIZER=1) target_link_options(${PROJECT_NAME} PUBLIC -fsanitize=address) endif() if(TSAN OR ASAN) # no-omit-frame-pointer and no-optimize-sibling-calls provide better stack traces target_compile_options(${PROJECT_NAME} PUBLIC -fno-omit-frame-pointer -fno-optimize-sibling-calls) endif() list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/modules") if (NOT $ENV{S2N_LIBCRYPTO} MATCHES "awslc") # add cast-qual back in for non AWS-LC target_compile_options(${PROJECT_NAME} PRIVATE -Wcast-qual) endif() if (COVERAGE) # https://cmake.org/cmake/help/latest/variable/CMAKE_LANG_COMPILER_ID.html # Coverage is done using LLVM source based coverage. This is only supported # on LLVM compilers. GCC would fail with "unrecognized compile options" # on -fprofile-instr-generate -fcoverage-mapping flags. if (NOT ${CMAKE_C_COMPILER_ID} MATCHES Clang) message(FATAL_ERROR "This project requires clang for coverage support") endif() target_compile_options(${PROJECT_NAME} PUBLIC -fprofile-instr-generate -fcoverage-mapping) target_link_options(${PROJECT_NAME} PUBLIC -fprofile-instr-generate -fcoverage-mapping) endif() # For interning, we need to find the static libcrypto library. Cmake configs # can branch on the variable BUILD_SHARED_LIBS to e.g. avoid having to define # multiple targets. An example is AWS-LC: # https://github.com/awslabs/aws-lc/blob/main/crypto/cmake/crypto-config.cmake#L5 if (S2N_INTERN_LIBCRYPTO) set(BUILD_SHARED_LIBS_BACKUP ${BUILD_SHARED_LIBS}) set(BUILD_SHARED_LIBS OFF) endif() # Work around target differences if (TARGET crypto) message(STATUS "S2N found target: crypto") set(LINK_LIB "crypto") else() find_package(crypto REQUIRED) message(STATUS "Using libcrypto from the cmake path") set(LINK_LIB "AWS::crypto") endif() if (S2N_INTERN_LIBCRYPTO) # Restore the old BUILD_SHARED_LIBS value set(BUILD_SHARED_LIBS ${BUILD_SHARED_LIBS_BACKUP}) message(STATUS "Enabling libcrypto interning") endif() if (NOT DEFINED CMAKE_AR) message(STATUS "CMAKE_AR undefined, setting to `ar` by default") SET(CMAKE_AR ar) else() message(STATUS "CMAKE_AR found: ${CMAKE_AR}") endif() if (NOT DEFINED CMAKE_RANLIB) message(STATUS "CMAKE_RANLIB undefined, setting to `ranlib` by default") SET(CMAKE_RANLIB ranlib) else() message(STATUS "CMAKE_RANLIB found: ${CMAKE_RANLIB}") endif() if (NOT DEFINED CMAKE_OBJCOPY) message(STATUS "CMAKE_OBJCOPY undefined, setting to `objcopy` by default") SET(CMAKE_OBJCOPY objcopy) else() message(STATUS "CMAKE_OBJCOPY found: ${CMAKE_OBJCOPY}") endif() # Sets the result of the feature probe to `IS_AVAILABLE` function(feature_probe_result PROBE_NAME IS_AVAILABLE) # normalize the boolean value if(IS_AVAILABLE) set(NORMALIZED TRUE) else() set(NORMALIZED FALSE) endif() # indicate the status of the probe message(STATUS "feature ${PROBE_NAME}: ${NORMALIZED}") # set the probe result in the parent scope for other probes set(${PROBE_NAME} ${NORMALIZED} PARENT_SCOPE) # define the probe if available if(NORMALIZED) add_definitions(-D${PROBE_NAME}) endif() endfunction() # Tries to compile a feature probe and initializes the corresponding flags function(feature_probe PROBE_NAME) # Load the global probe flags file(READ "${CMAKE_CURRENT_LIST_DIR}/tests/features/GLOBAL.flags" GLOBAL_FILE) string(REPLACE "\n" "" GLOBAL_FLAGS "${GLOBAL_FILE}") # Load the probe's flags file(READ "${CMAKE_CURRENT_LIST_DIR}/tests/features/${PROBE_NAME}.flags" PROBE_FILE) string(REPLACE "\n" "" PROBE_FLAGS "${PROBE_FILE}") # Try to compile the probe with the given flags try_compile( IS_AVAILABLE ${CMAKE_BINARY_DIR} SOURCES "${CMAKE_CURRENT_LIST_DIR}/tests/features/${PROBE_NAME}.c" LINK_LIBRARIES ${LINK_LIB} ${OS_LIBS} CMAKE_FLAGS ${ADDITIONAL_FLAGS} COMPILE_DEFINITIONS -c ${GLOBAL_FLAGS} ${PROBE_FLAGS} ${ARGN} OUTPUT_VARIABLE TRY_COMPILE_OUTPUT ) # Uncomment the line below to get the output of the try_compile command #message(STATUS "Output of try_compile: ${TRY_COMPILE_OUTPUT}") # Set the result of the probe feature_probe_result(${PROBE_NAME} ${IS_AVAILABLE}) # Make sure the variable is set in the parent scope set(${PROBE_NAME} ${IS_AVAILABLE} PARENT_SCOPE) # Set the flags that we used for the probe set(${PROBE_NAME}_FLAGS ${PROBE_FLAGS} PARENT_SCOPE) endfunction() # Iterate over all of the features and try to compile them FILE(GLOB FEATURE_SRCS "${CMAKE_CURRENT_LIST_DIR}/tests/features/*.c") list(SORT FEATURE_SRCS) foreach(file ${FEATURE_SRCS}) get_filename_component(feature_name ${file} NAME_WE) feature_probe(${feature_name}) endforeach() # FreeBSD might need to link to execinfo explicitly if(NOT S2N_EXECINFO_AVAILABLE AND CMAKE_SYSTEM_NAME STREQUAL "FreeBSD") feature_probe(S2N_EXECINFO_AVAILABLE LINK_LIBRARIES execinfo) endif() # Stack traces are only available if execinfo is if (NOT S2N_EXECINFO_AVAILABLE) set(S2N_STACKTRACE FALSE) endif() feature_probe_result(S2N_STACKTRACE ${S2N_STACKTRACE}) if (S2N_INTERN_LIBCRYPTO) # Check if the AWS::crypto target has beeen added and handle it if (TARGET AWS::crypto) # Get the target library type (shared or static) get_target_property(target_type AWS::crypto TYPE) message(STATUS "AWS::crypto target type: ${target_type}") # If we didn't find the a target with static library type, fallback to # existing crypto_STATIC_LIBRARY and crypto_INCLUDE_DIR if (target_type STREQUAL STATIC_LIBRARY) # We need an path to the include directory and libcrypto.a archive. # The finder module defines these appropriately, but if we go through # the target config we need to query this information from the target # first. get_target_property(crypto_STATIC_LIBRARY AWS::crypto LOCATION) get_target_property(crypto_INCLUDE_DIR AWS::crypto INTERFACE_INCLUDE_DIRECTORIES) endif() endif() if (NOT crypto_STATIC_LIBRARY) message(FATAL_ERROR "libcrypto interning requires a static build of libcrypto.a to be available") endif() message(STATUS "crypto_STATIC_LIBRARY: ${crypto_STATIC_LIBRARY}") message(STATUS "crypto_INCLUDE_DIR: ${crypto_INCLUDE_DIR}") # Don't call link_target_libraries here, just make sure the libcrypto include dir is in the path include_directories("${crypto_INCLUDE_DIR}") add_custom_command( OUTPUT libcrypto.symbols COMMAND # copy the static version of libcrypto cp ${crypto_STATIC_LIBRARY} s2n_libcrypto.a && # dump all of the symbols and prefix them with `s2n$` bash -c "${CMAKE_NM} s2n_libcrypto.a | awk '/ [A-Z] /{print $3\" s2n$\"$3}' | sort | uniq > libcrypto.symbols" && # redefine the libcrypto libary symbols ${CMAKE_OBJCOPY} --redefine-syms libcrypto.symbols s2n_libcrypto.a && rm -rf s2n_libcrypto && mkdir s2n_libcrypto && cd s2n_libcrypto && # extract libcrypto objects from the archive ${CMAKE_AR} x ../s2n_libcrypto.a && # rename all of the object files so we don't have any object name collisions bash -c "find . -name '*.o' -type f -print0 | xargs -0 -n1 -- basename | xargs -I{} mv {} s2n_crypto__{}" VERBATIM ) add_custom_target(s2n_libcrypto ALL DEPENDS libcrypto.symbols ) add_dependencies(${PROJECT_NAME} s2n_libcrypto) add_definitions(-DS2N_INTERN_LIBCRYPTO) if ((BUILD_SHARED_LIBS AND BUILD_TESTING) OR NOT BUILD_SHARED_LIBS) # if libcrypto needs to be interned, rewrite libcrypto references so use of internal functions will link correctly add_custom_command( TARGET ${PROJECT_NAME} PRE_LINK COMMAND find "${CMAKE_CURRENT_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/${PROJECT_NAME}.dir" -name '*.c.o' -exec objcopy --redefine-syms libcrypto.symbols {} \\\; ) endif() # copy the static libcrypto into the final artifact if (BUILD_SHARED_LIBS) if (BUILD_TESTING) # if we're building tests, we export the prefixed symbols so tests can link to them set_target_properties(${PROJECT_NAME} PROPERTIES LINK_FLAGS "-Wl,--whole-archive s2n_libcrypto.a -Wl,--no-whole-archive") else() # if we're not building tests, then just copy the original archive, unmodified set_target_properties(${PROJECT_NAME} PROPERTIES LINK_FLAGS "-Wl,--whole-archive ${crypto_STATIC_LIBRARY} -Wl,--no-whole-archive -Wl,--exclude-libs=ALL") endif() else() # add all of the prefixed symbols to the archive add_custom_command( TARGET ${PROJECT_NAME} POST_BUILD DEPENDS libcrypto.symbols COMMAND bash -c "${CMAKE_AR} -r lib/libs2n.a s2n_libcrypto/*.o" VERBATIM ) endif() else() # LINK_LIB is set above after checking targets. It handles the find_package craziness. target_link_libraries(${PROJECT_NAME} PUBLIC ${LINK_LIB}) endif() target_link_libraries(${PROJECT_NAME} PUBLIC ${OS_LIBS} m) target_include_directories(${PROJECT_NAME} PUBLIC $) target_include_directories(${PROJECT_NAME} PUBLIC $ $) if (BUILD_TESTING) enable_testing() file(GLOB TESTLIB_SRC "tests/testlib/*.c") file(GLOB EXAMPLES_SRC "docs/examples/*.c") file(GLOB TESTLIB_HEADERS "tests/testlib/*.h" "tests/s2n_test.h") add_library(testss2n STATIC ${TESTLIB_HEADERS} ${TESTLIB_SRC} ${EXAMPLES_SRC}) target_include_directories(testss2n PUBLIC tests) target_compile_options(testss2n PRIVATE -std=gnu99) target_link_libraries(testss2n PUBLIC ${PROJECT_NAME}) if (S2N_INTERN_LIBCRYPTO) # if libcrypto was interned, rewrite libcrypto symbols so use of internal functions will link correctly add_custom_command( TARGET testss2n POST_BUILD COMMAND objcopy --redefine-syms libcrypto.symbols lib/libtestss2n.a ) endif() #run unit tests file (GLOB TEST_LD_PRELOAD "tests/LD_PRELOAD/*.c") add_library(allocator_overrides SHARED ${TEST_LD_PRELOAD}) set(UNIT_TEST_ENVS S2N_DONT_MLOCK=1) if (TSAN OR ASAN) set(UNIT_TEST_ENVS ${UNIT_TEST_ENVS} S2N_ADDRESS_SANITIZER=1) endif() if(TSAN) set(TSAN_SUPPRESSIONS_FILE ${CMAKE_SOURCE_DIR}/tests/.tsan_suppressions) if(NOT EXISTS ${TSAN_SUPPRESSIONS_FILE}) message(FATAL_ERROR "TSAN suppression file ${TSAN_SUPPRESSIONS_FILE} missing") endif() set(TSAN_OPTIONS suppressions=${TSAN_SUPPRESSIONS_FILE}) if(DEFINED ENV{TSAN_OPTIONS}) set(TSAN_OPTIONS "${TSAN_OPTIONS} $ENV{TSAN_OPTIONS}") endif() set(UNIT_TEST_ENVS ${UNIT_TEST_ENVS} TSAN_OPTIONS=${TSAN_OPTIONS}) endif() if(ASAN) # "detect_odr_violation" detects violations of the "one definition rule", # ensuring that symbols are only defined once. # But some of our unit tests intentionally include *.c files for testing, # resulting in duplicate global values. set(ASAN_OPTIONS detect_odr_violation=0) if(DEFINED ENV{ASAN_OPTIONS}) set(ASAN_OPTIONS "${ASAN_OPTIONS} $ENV{ASAN_OPTIONS}") endif() set(UNIT_TEST_ENVS ${UNIT_TEST_ENVS} ASAN_OPTIONS=${ASAN_OPTIONS}) endif() message(STATUS "Running tests with environment: ${UNIT_TEST_ENVS}") file(GLOB UNITTESTS_SRC "tests/unit/*.c") foreach(test_case ${UNITTESTS_SRC}) string(REGEX REPLACE ".+\\/(.+)\\.c" "\\1" test_case_name ${test_case}) add_executable(${test_case_name} ${test_case}) target_include_directories(${test_case_name} PRIVATE api) target_include_directories(${test_case_name} PRIVATE ./) target_include_directories(${test_case_name} PRIVATE tests) target_link_libraries(${test_case_name} PRIVATE testss2n) if (S2N_INTERN_LIBCRYPTO) # if libcrypto was interned, rewrite libcrypto symbols so use of internal functions will link correctly add_custom_command( TARGET ${test_case_name} PRE_LINK COMMAND find . -name '${test_case_name}.c.o' -exec objcopy --redefine-syms libcrypto.symbols {} \\\; ) endif() target_compile_options(${test_case_name} PRIVATE -Wall -Wimplicit -Wunused -Wcomment -Wchar-subscripts -Wuninitialized -Wshadow -Wcast-align -Wwrite-strings -Wformat-security -Wno-deprecated-declarations -Wno-unknown-pragmas -Wno-deprecated -fPIC -D_POSIX_C_SOURCE=200809L -std=gnu99) if (S2N_LTO) target_compile_options(${test_case_name} PRIVATE -flto) endif() add_test(NAME ${test_case_name} COMMAND $ WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/tests/unit) set_property(TEST ${test_case_name} PROPERTY LABELS "unit") set_property(TEST ${test_case_name} PROPERTY ENVIRONMENT ${UNIT_TEST_ENVS}) endforeach(test_case) add_executable(s2nc "bin/s2nc.c" "bin/echo.c" "bin/https.c" "bin/common.c") target_link_libraries(s2nc ${PROJECT_NAME}) target_include_directories(s2nc PRIVATE api) target_compile_options(s2nc PRIVATE -std=gnu99) add_executable(s2nd "bin/s2nd.c" "bin/echo.c" "bin/https.c" "bin/common.c") target_link_libraries(s2nd ${PROJECT_NAME}) target_include_directories(s2nd PRIVATE api) target_compile_options(s2nd PRIVATE -std=gnu99) if(S2N_LTO) target_compile_options(s2nc PRIVATE -flto) target_compile_options(s2nd PRIVATE -flto) endif() if(BENCHMARK) find_package(benchmark REQUIRED) file(GLOB BENCHMARK_SRC "tests/benchmark/*.cc") file(GLOB BENCHMARK_UTILS "tests/benchmark/utils/*.cc") enable_language(CXX) foreach(benchmark ${BENCHMARK_SRC}) string(REGEX REPLACE ".+\\/(.+)\\.cc" "\\1" benchmark_name ${benchmark}) add_executable(${benchmark_name} ${benchmark} "bin/echo.c" "bin/common.c" ${BENCHMARK_UTILS}) target_include_directories(${benchmark_name} PRIVATE api) target_include_directories(${benchmark_name} PRIVATE tests) target_link_libraries(${benchmark_name} PUBLIC ${PROJECT_NAME} testss2n benchmark::benchmark) # Based off the flags in tests/benchmark/Makefile target_compile_options(${benchmark_name} PRIVATE -pedantic -Wall -Werror -Wunused -Wcomment -Wchar-subscripts -Wuninitialized -Wshadow -Wcast-qual -Wcast-align -Wwrite-strings -Wno-deprecated-declarations -Wno-unknown-pragmas -Wformat-security -Wno-missing-braces -fvisibility=hidden -Wno-unreachable-code -Wno-unused-but-set-variable) endforeach(benchmark) endif() if (S2N_INTEG_TESTS) find_package (Python3 COMPONENTS Interpreter Development) file(GLOB integv2_test_files "${PROJECT_SOURCE_DIR}/tests/integrationv2/test_*.py") set(N 1) if (S2N_FAST_INTEG_TESTS) set(N auto) endif() foreach(test_file_path ${integv2_test_files}) get_filename_component(test_filename ${test_file_path} NAME_WE) string(REGEX REPLACE "^test_" "integrationv2_" test_target ${test_filename}) if (S2N_INTEG_NIX) # For Nix and environments where LD_LIBRARY_PATH is already correct. # We're also dropping tox and calling pytest directly, because # Nix is already handling all of the python setup. if (CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64" AND ${test_target} STREQUAL "integrationv2_sslyze" ) # sslyze/nassl is not available on aarch64. message(WARNING "Skipping ${test_target} due to missing tools on ${CMAKE_SYSTEM_PROCESSOR}") continue() endif() message(STATUS "Adding integ test ${test_target}") add_test(NAME ${test_target} COMMAND pytest -x -n=${N} --maxfail=1 --reruns=0 --cache-clear -rpfsq -o log_cli=true --log-cli-level=DEBUG --provider-version=$ENV{S2N_LIBCRYPTO} --provider-criterion=off --fips-mode=0 --no-pq=0 ${test_file_path} WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}/tests/integrationv2 ) else() # For use with libcryptos built into test-deps, and not in LD_LIBRARY_PATH. # This is a duplication of tests/integrationv2/Makefile and # can go away once all the Nix porting is finished. add_test(NAME ${test_target} COMMAND ${CMAKE_COMMAND} -E env DYLD_LIBRARY_PATH=${PROJECT_SOURCE_DIR}/libcrypto-root/lib:$ENV{DYLD_LIBRARY_PATH} LD_LIBRARY_PATH=${PROJECT_SOURCE_DIR}/libcrypto-root/lib:${PROJECT_SOURCE_DIR}/test-deps/openssl-1.1.1/lib:${PROJECT_SOURCE_DIR}/test-deps/gnutls37/nettle/lib:$ENV{LD_LIBRARY_PATH} PATH=${PROJECT_SOURCE_DIR}/bin:${PROJECT_SOURCE_DIR}/test-deps/openssl-1.1.1/bin:${PROJECT_SOURCE_DIR}/test-deps/gnutls37/bin:$ENV{PATH} PYTHONNOUSERSITE=1 S2N_INTEG_TEST=1 TOX_TEST_NAME=${test_file_path} ${Python3_EXECUTABLE} -m tox WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}/tests/integrationv2) endif() set_property(TEST ${test_target} PROPERTY LABELS "integrationv2") set_property(TEST ${test_target} PROPERTY TIMEOUT 7200) endforeach() endif() endif() #install the s2n files install(FILES ${API_HEADERS} DESTINATION "include/" COMPONENT Development) install(FILES ${API_UNSTABLE_HEADERS} DESTINATION "include/s2n/unstable" COMPONENT Development) if (UNIX AND NOT APPLE) include(GNUInstallDirs) elseif(NOT DEFINED CMAKE_INSTALL_LIBDIR) set(CMAKE_INSTALL_LIBDIR "lib") endif() if (S2N_INSTALL_S2NC_S2ND) install( TARGETS s2nc s2nd RUNTIME DESTINATION bin ) endif() install( TARGETS ${PROJECT_NAME} EXPORT ${PROJECT_NAME}-targets ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} COMPONENT Development LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} COMPONENT Runtime RUNTIME DESTINATION bin COMPONENT Runtime ) configure_file("cmake/${PROJECT_NAME}-config.cmake" "${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-config.cmake" @ONLY) if (BUILD_SHARED_LIBS) set (TARGET_DIR "shared") else() set (TARGET_DIR "static") endif() install(EXPORT "${PROJECT_NAME}-targets" DESTINATION "${CMAKE_INSTALL_LIBDIR}/${PROJECT_NAME}/cmake/${TARGET_DIR}" NAMESPACE AWS:: COMPONENT Development) install(FILES "${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-config.cmake" DESTINATION "${CMAKE_INSTALL_LIBDIR}/${PROJECT_NAME}/cmake/" COMPONENT Development) install(FILES "cmake/modules/Findcrypto.cmake" DESTINATION "${CMAKE_INSTALL_LIBDIR}/${PROJECT_NAME}/cmake/modules/" COMPONENT Development) aws-crt-python-0.20.4+dfsg/crt/s2n/LICENSE000066400000000000000000000261361456575232400177370ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. aws-crt-python-0.20.4+dfsg/crt/s2n/Makefile000066400000000000000000000066571456575232400204000ustar00rootroot00000000000000# # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. # PLATFORM := $(shell uname) MAKEFLAGS += PLATFORM=$(PLATFORM) ifndef LIBCRYPTO_ROOT export LIBCRYPTO_ROOT = $(shell echo "`pwd`/libcrypto-root") endif export S2N_ROOT=$(shell pwd) export COVERAGE_DIR = $(shell echo "${S2N_ROOT}/coverage") DIRS=$(wildcard */) SRCS=$(wildcard *.c) OBJS=$(SRCS:.c=.o) .PHONY : all all: bin $(MAKE) -C tests bitcode : ${MAKE} -C tests/saw bitcode .PHONY : bc bc: ${MAKE} -C crypto bc ${MAKE} -C stuffer bc ${MAKE} -C tls bc ${MAKE} -C utils bc .PHONY : saw saw : bc $(MAKE) -C tests/saw include s2n.mk .PHONY : libs libs: $(MAKE) -C utils $(MAKE) -C error $(MAKE) -C stuffer $(MAKE) -C crypto $(MAKE) -C tls $(MAKE) -C lib .PHONY : bin bin: libs $(MAKE) -C bin .PHONY : integrationv2 integrationv2: bin $(MAKE) -C tests integrationv2 .PHONY : valgrind valgrind: bin $(MAKE) -C tests valgrind # https://github.com/aws/s2n-tls/issues/3758 # Run valgrind in pedantic mode (--errors-for-leak-kinds=all) .PHONY : pedantic_valgrind pedantic_valgrind: bin $(MAKE) -C tests pedantic_valgrind .PHONY : fuzz ifeq ($(shell uname),Linux) fuzz : fuzz-linux else fuzz : fuzz-osx endif .PHONY : fuzz-osx fuzz-osx : @echo "\033[33;1mSKIPPED\033[0m Fuzzing is not supported on \"$$(uname -mprs)\" at this time." .PHONY : fuzz-linux fuzz-linux : export S2N_UNSAFE_FUZZING_MODE = 1 fuzz-linux : bin $(MAKE) -C tests fuzz .PHONY : benchmark benchmark: bin $(MAKE) -C tests benchmark .PHONY : coverage coverage: run-lcov run-genhtml .PHONY : run-lcov run-lcov: $(MAKE) -C bin lcov $(MAKE) -C crypto lcov $(MAKE) -C error lcov $(MAKE) -C stuffer lcov $(MAKE) -C tests lcov $(MAKE) -C tls run-lcov $(MAKE) -C utils lcov lcov -a crypto/coverage.info -a error/coverage.info -a stuffer/coverage.info -a tls/coverage.info -a $(wildcard tls/*/coverage.info) -a utils/coverage.info --output ${COVERAGE_DIR}/all_coverage.info .PHONY : run-genhtml run-genhtml: genhtml -o ${COVERAGE_DIR}/html ${COVERAGE_DIR}/all_coverage.info .PHONY : indent indent: $(MAKE) -C tests indentsource $(MAKE) -C stuffer indentsource $(MAKE) -C crypto indentsource $(MAKE) -C utils indentsource $(MAKE) -C error indentsource $(MAKE) -C tls indent $(MAKE) -C bin indentsource .PHONY : pre_commit_check pre_commit_check: all indent clean # TODO use awslabs instead DEV_IMAGE ?= camshaft/s2n-dev DEV_OPENSSL_VERSION ?= openssl-1.1.1 DEV_VERSION ?= ubuntu_18.04_$(DEV_OPENSSL_VERSION)_gcc9 dev: @docker run -it --rm --ulimit memlock=-1 -v `pwd`:/home/s2n-dev/s2n $(DEV_IMAGE):$(DEV_VERSION) .PHONY : install install: bin libs $(MAKE) -C bin install $(MAKE) -C lib install .PHONY: uninstall uninstall: $(MAKE) -C bin uninstall $(MAKE) -C lib uninstall .PHONY : clean clean: $(MAKE) -C tests clean $(MAKE) -C stuffer decruft $(MAKE) -C crypto decruft $(MAKE) -C utils decruft $(MAKE) -C error decruft $(MAKE) -C tls clean $(MAKE) -C bin decruft $(MAKE) -C lib decruft $(MAKE) -C coverage clean aws-crt-python-0.20.4+dfsg/crt/s2n/NOTICE000066400000000000000000000001221456575232400176210ustar00rootroot00000000000000s2n Copyright 2014-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. aws-crt-python-0.20.4+dfsg/crt/s2n/README.md000066400000000000000000000221201456575232400201760ustar00rootroot00000000000000s2n s2n-tls is a C99 implementation of the TLS/SSL protocols that is designed to be simple, small, fast, and with security as a priority. It is released and licensed under the Apache License 2.0. > s2n-tls is short for "signal to noise" and is a nod to the almost magical act of encryption — disguising meaningful signals, like your critical data, as seemingly random noise. > > -- [s2n-tls announcement](https://aws.amazon.com/blogs/security/introducing-s2n-a-new-open-source-tls-implementation/) [![Build Status](https://codebuild.us-west-2.amazonaws.com/badges?uuid=eyJlbmNyeXB0ZWREYXRhIjoiMndlTzJNbHVxWEo3Nm82alp4eGdGNm4rTWdxZDVYU2VTbitIR0ZLbHVtcFFGOW5majk5QnhqaUp3ZEkydG1ueWg0NGlhRE43a1ZnUzZaQTVnSm91TzFFPSIsIml2UGFyYW1ldGVyU3BlYyI6IlJLbW42NENlYXhJNy80QnYiLCJtYXRlcmlhbFNldFNlcmlhbCI6MX0%3D&branch=main)](https://github.com/aws/s2n-tls/) [![Apache 2 License](https://img.shields.io/github/license/aws/s2n-tls.svg)](http://aws.amazon.com/apache-2-0/) [![C99](https://img.shields.io/badge/language-C99-blue.svg)](http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1256.pdf) [![Github forks](https://img.shields.io/github/forks/aws/s2n-tls.svg)](https://github.com/aws/s2n-tls/network) [![Github stars](https://img.shields.io/github/stars/aws/s2n-tls.svg)](https://github.com/aws/s2n-tls/stargazers) ## Quickstart for Ubuntu ```bash # clone s2n-tls git clone https://github.com/aws/s2n-tls.git cd s2n-tls # install build dependencies sudo apt update sudo apt install cmake # install a libcrypto sudo apt install libssl-dev # build s2n-tls cmake . -Bbuild \ -DCMAKE_BUILD_TYPE=Release \ -DCMAKE_INSTALL_PREFIX=./s2n-tls-install cmake --build build -j $(nproc) CTEST_PARALLEL_LEVEL=$(nproc) ctest --test-dir build cmake --install build ``` See the [s2n-tls build documentation](docs/BUILD.md) for further guidance on building s2n-tls for your platform. ## Have a Question? If you think you might have found a security impacting issue, please follow our [Security Notification Process.](#security-issue-notifications) If you have any questions about submitting PRs, s2n-tls API usage, or something similar, please open an issue. ## Documentation s2n-tls uses [Doxygen](https://doxygen.nl/index.html) to document its public API. The latest s2n-tls documentation can be found on [GitHub pages](https://aws.github.io/s2n-tls/doxygen/). The [Usage Guide](https://aws.github.io/s2n-tls/usage-guide/) explains how different TLS features can be configured and used. Documentation for older versions or branches of s2n-tls can be generated locally. To generate the documentation, install doxygen and run `doxygen docs/doxygen/Doxyfile`. The doxygen documentation can now be found at `docs/doxygen/output/html/index.html`. Doxygen installation instructions are available at the [Doxygen](https://doxygen.nl/download.html) webpage. ## Using s2n-tls The s2n-tls I/O APIs are designed to be intuitive to developers familiar with the widely-used POSIX I/O APIs, and s2n-tls supports blocking, non-blocking, and full-duplex I/O. Additionally there are no locks or mutexes within s2n-tls. ```c /* Create a server mode connection handle */ struct s2n_connection *conn = s2n_connection_new(S2N_SERVER); if (conn == NULL) { ... error ... } /* Associate a connection with a file descriptor */ if (s2n_connection_set_fd(conn, fd) < 0) { ... error ... } /* Negotiate the TLS handshake */ s2n_blocked_status blocked; if (s2n_negotiate(conn, &blocked) < 0) { ... error ... } /* Write data to the connection */ int bytes_written; bytes_written = s2n_send(conn, "Hello World", sizeof("Hello World"), &blocked); ``` For details on building the s2n-tls library and how to use s2n-tls in an application you are developing, see the [Usage Guide](https://aws.github.io/s2n-tls/usage-guide). ## s2n-tls features s2n-tls implements SSLv3, TLS1.0, TLS1.1, TLS1.2, and TLS1.3. For encryption, s2n-tls supports 128-bit and 256-bit AES in the CBC and GCM modes, ChaCha20, 3DES, and RC4. For forward secrecy, s2n-tls supports both DHE and ECDHE. s2n-tls also supports the Server Name Indicator (SNI), Application-Layer Protocol Negotiation (ALPN), and Online Certificate Status Protocol (OCSP) TLS extensions. SSLv3, RC4, 3DES, and DHE are each disabled by default for security reasons. As it can be difficult to keep track of which encryption algorithms and protocols are best to use, s2n-tls features a simple API to use the latest "default" set of preferences. If you prefer to remain on a specific version for backwards compatibility, that is also supported. ```c /* Use the latest s2n-tls "default" set of ciphersuite and protocol preferences */ s2n_config_set_cipher_preferences(config, "default"); /* Use a specific set of preferences, update when you're ready */ s2n_config_set_cipher_preferences(config, "20150306") ``` ## s2n-tls safety mechanisms Internally s2n-tls takes a systematic approach to data protection and includes several mechanisms designed to improve safety. ##### Small and auditable code base Ignoring tests, blank lines and comments, s2n-tls is about 6,000 lines of code. s2n's code is also structured and written with a focus on reviewability. All s2n-tls code is subject to code review, and we plan to complete security evaluations of s2n-tls on an annual basis. To date there have been two external code-level reviews of s2n-tls, including one by a commercial security vendor. s2n-tls has also been shared with some trusted members of the broader cryptography, security, and Open Source communities. Any issues discovered are always recorded in the s2n-tls issue tracker. ##### Static analysis, fuzz-testing and penetration testing In addition to code reviews, s2n-tls is subject to regular static analysis, fuzz-testing, and penetration testing. Several penetration tests have occurred, including two by commercial vendors. ##### Unit tests and end-to-end testing s2n-tls includes positive and negative unit tests and end-to-end test cases. Unit test coverage can be viewed [here](https://dx1inn44oyl7n.cloudfront.net/main/index.html). Note that this represents unit coverage for a particular build. Since that build won't necessarily support all s2n-tls features, test coverage may be artificially lowered. ##### Erase on read s2n-tls encrypts or erases plaintext data as quickly as possible. For example, decrypted data buffers are erased as they are read by the application. ##### Built-in memory protection s2n-tls uses operating system features to protect data from being swapped to disk or appearing in core dumps. ##### Minimalist feature adoption s2n-tls avoids implementing rarely used options and extensions, as well as features with a history of triggering protocol-level vulnerabilities. For example there is no support for session renegotiation or DTLS. ##### Compartmentalized random number generation The security of TLS and its associated encryption algorithms depends upon secure random number generation. s2n-tls provides every thread with two separate random number generators. One for "public" randomly generated data that may appear in the clear, and one for "private" data that should remain secret. This approach lessens the risk of potential predictability weaknesses in random number generation algorithms from leaking information across contexts. ##### Modularized encryption s2n-tls has been structured so that different encryption libraries may be used. Today s2n-tls supports OpenSSL (versions 1.0.2, 1.1.1 and 3.0.x), LibreSSL, BoringSSL, AWS-LC, and the Apple Common Crypto framework to perform the underlying cryptographic operations. ##### Timing blinding s2n-tls includes structured support for blinding time-based side-channels that may leak sensitive data. For example, if s2n-tls fails to parse a TLS record or handshake message, s2n-tls will add a randomized delay of between 10 and 30 seconds, granular to nanoseconds, before responding. This raises the complexity of real-world timing side-channel attacks by a factor of at least tens of trillions. ##### Table based state-machines s2n-tls uses simple tables to drive the TLS/SSL state machines, making it difficult for invalid out-of-order states to arise. ##### C safety s2n-tls is written in C, but makes light use of standard C library functions and wraps all memory handling, string handling, and serialization in systematic boundary-enforcing checks. ## Security issue notifications If you discover a potential security issue in s2n-tls we ask that you notify AWS Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. If you package or distribute s2n-tls, or use s2n-tls as part of a large multi-user service, you may be eligible for pre-notification of future s2n-tls releases. Please contact s2n-pre-notification@amazon.com. ## Contributing to s2n-tls If you are interested in contributing to s2n-tls, please see our [development guide](https://github.com/aws/s2n-tls/blob/main/docs/DEVELOPMENT-GUIDE.md). ## Language Bindings for s2n-tls See our [language bindings list](https://github.com/aws/s2n-tls/blob/main/docs/BINDINGS.md) for language bindings for s2n-tls that we're aware of. aws-crt-python-0.20.4+dfsg/crt/s2n/VERSIONING.rst000066400000000000000000000032421456575232400211400ustar00rootroot00000000000000***************** Versioning Policy ***************** We use a three-part X.Y.Z (Major.Minor.Patch) versioning definition, as follows: * **X (Major)** version changes are significant and expected to break backwards compatibility. * **Y (Minor)** version changes are moderate changes. These include: * Significant non-breaking feature additions. * Possible backwards-incompatible changes. These changes will be noted and explained in detail in the release notes. * **Z (Patch)** version changes are small changes. These changes will not break backwards compatibility. * Z releases will also include warning of upcoming breaking changes, whenever possible. Beta releases ============= Versions with a zero major version (0.Y.Z) are considered to be beta releases. In beta releases, a Y-change may involve significant API changes. Branch stability ================ Untagged branches (such as main) are not subject to any API or ABI stability policy; APIs may change at any time. What this means for you ======================= We recommend running the most recent version. Here are our suggestions for managing updates: * Beta releases should be considered to be under flux. While we will try to minimize churn, expect that you'll need to make some changes to move to the 1.0.0 release. * X changes will require some effort to incorporate. * Y changes will not require significant effort to incorporate. * If you have good unit and integration tests, these changes are generally safe to pick up automatically. * Z changes will not require any changes to your code. Z changes are intended to be picked up automatically. * Good unit and integration tests are always recommended. aws-crt-python-0.20.4+dfsg/crt/s2n/api/000077500000000000000000000000001456575232400174735ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/api/s2n.h000066400000000000000000004662661456575232400203720ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ /** * @file s2n.h * s2n-tls is a C99 implementation of the TLS/SSL protocols that is designed to * be simple, small, fast, and with security as a priority.
It is released and * licensed under the Apache License 2.0. */ #pragma once #if ((__GNUC__ >= 4) || defined(__clang__)) && defined(S2N_EXPORTS) /** * Marks a function as belonging to the public s2n API. */ #define S2N_API __attribute__((visibility("default"))) #else /** * Marks a function as belonging to the public s2n API. */ #define S2N_API #endif /* __GNUC__ >= 4 || defined(__clang__) */ #ifdef __cplusplus extern "C" { #endif #include #include #include #include #include /** * Function return code */ #define S2N_SUCCESS 0 /** * Function return code */ #define S2N_FAILURE -1 /** * Callback return code */ #define S2N_CALLBACK_BLOCKED -2 /** * s2n minimum supported TLS record major version */ #define S2N_MINIMUM_SUPPORTED_TLS_RECORD_MAJOR_VERSION 2 /** * s2n maximum supported TLS record major version */ #define S2N_MAXIMUM_SUPPORTED_TLS_RECORD_MAJOR_VERSION 3 /** * s2n SSL 2.0 Version Constant */ #define S2N_SSLv2 20 /** * s2n SSL 3.0 Version Constant */ #define S2N_SSLv3 30 /** * s2n TLS 1.0 Version Constant */ #define S2N_TLS10 31 /** * s2n TLS 1.1 Version Constant */ #define S2N_TLS11 32 /** * s2n TLS 1.2 Version Constant */ #define S2N_TLS12 33 /** * s2n TLS 1.3 Version Constant */ #define S2N_TLS13 34 /** * s2n Unknown TLS Version */ #define S2N_UNKNOWN_PROTOCOL_VERSION 0 /** * s2n-tls functions that return 'int' return 0 to indicate success and -1 to indicate * failure. * * s2n-tls functions that return pointer types return NULL in the case of * failure. * * When an s2n-tls function returns a failure, s2n_errno will be set to a value * corresponding to the error. This error value can be translated into a string * explaining the error in English by calling s2n_strerror(s2n_errno, "EN"). * A string containing human readable error name; can be generated with `s2n_strerror_name`. * A string containing internal debug information, including filename and line number, can be generated with `s2n_strerror_debug`. * A string containing only the filename and line number can be generated with `s2n_strerror_source`. * This string is useful to include when reporting issues to the s2n-tls development team. * * @warning To avoid possible confusion, s2n_errno should be cleared after processing an error: `s2n_errno = S2N_ERR_T_OK` */ S2N_API extern __thread int s2n_errno; /** * This function can be used instead of trying to resolve `s2n_errno` directly * in runtimes where thread-local variables may not be easily accessible. * * @returns The address of the thread-local `s2n_errno` variable */ S2N_API extern int *s2n_errno_location(void); /** * Used to help applications determine why an s2n-tls function failed. * * This enum is optimized for use in C switch statements. Each value in the enum represents * an error "category". * * s2n-tls organizes errors into different "types" to allow applications to handle error * values without catching all possibilities. Applications using non-blocking I/O should check * the error type to determine if the I/O operation failed because it would block or for some other * error. To retrieve the type for a given error use `s2n_error_get_type()`. Applications should * perform any error handling logic using these high level types. * * See the [Error Handling](https://github.com/aws/s2n-tls/blob/main/docs/usage-guide/topics/ch03-error-handling.md) section for how the errors should be interpreted. */ typedef enum { /** No error */ S2N_ERR_T_OK = 0, /** Underlying I/O operation failed, check system errno */ S2N_ERR_T_IO, /** EOF */ S2N_ERR_T_CLOSED, /** Underlying I/O operation would block */ S2N_ERR_T_BLOCKED, /** Incoming Alert */ S2N_ERR_T_ALERT, /** Failure in some part of the TLS protocol. Ex: CBC verification failure */ S2N_ERR_T_PROTO, /** Error internal to s2n-tls. A precondition could have failed. */ S2N_ERR_T_INTERNAL, /** User input error. Ex: Providing an invalid cipher preference version */ S2N_ERR_T_USAGE } s2n_error_type; /** * Gets the category of error from an error. * * s2n-tls organizes errors into different "types" to allow applications to do logic on error values without catching all possibilities. * Applications using non-blocking I/O should check error type to determine if the I/O operation failed because * it would block or for some other error. * * @param error The error from s2n. Usually this is `s2n_errno`. * @returns An s2n_error_type */ S2N_API extern int s2n_error_get_type(int error); /** * An opaque configuration object, used by clients and servers for holding cryptographic certificates, keys and preferences. */ struct s2n_config; /** * An opaque connection. Used to track each s2n connection. */ struct s2n_connection; /** * Prevents S2N from calling `OPENSSL_init_crypto`/`OPENSSL_cleanup`/`EVP_cleanup` on OpenSSL versions * prior to 1.1.x. This allows applications or languages that also init OpenSSL to interoperate * with S2N. * * @warning This function must be called BEFORE s2n_init() to have any effect. It will return an error * if s2n is already initialized. * * @note If you disable this and are using a version of OpenSSL/libcrypto < 1.1.x, you will * be responsible for library init and cleanup (specifically `OPENSSL_add_all_algorithms()` * or `OPENSSL_init_crypto()`, and EVP_* APIs will not be usable unless the library is initialized. * * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_crypto_disable_init(void); /** * Prevents S2N from installing an atexit handler, which allows safe shutdown of S2N from within a * re-entrant shared library * * @warning This function must be called BEFORE s2n_init() to have any effect. It will return an error * if s2n is already initialized. * * @note This will cause `s2n_cleanup` to do complete cleanup of s2n-tls when called from the main * thread (the thread `s2n_init` was called from). * * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_disable_atexit(void); /** * Fetches the OpenSSL version s2n-tls was compiled with. This can be used by applications to validate at runtime * that the versions of s2n-tls and Openssl that they have loaded are correct. * * @returns the version number of OpenSSL that s2n-tls was compiled with */ S2N_API extern unsigned long s2n_get_openssl_version(void); /** * Initializes the s2n-tls library and should be called once in your application, before any other s2n-tls * functions are called. Failure to call s2n_init() will result in errors from other s2n-tls functions. * * @warning This function is not thread safe and should only be called once. * * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_init(void); /** * Cleans up any internal resources used by s2n-tls. This function should be called from each thread or process * that is created subsequent to calling `s2n_init` when that thread or process is done calling other s2n-tls functions. * * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_cleanup(void); /** * Creates a new s2n_config object. This object can (and should) be associated with many connection * objects. * * The returned config will be initialized with default system certificates in its trust store. * * The returned config should be freed with `s2n_config_free()` after it's no longer in use by any * connection. * * @returns A new configuration object suitable for configuring connections and associating certs * and keys. */ S2N_API extern struct s2n_config *s2n_config_new(void); /** * Creates a new s2n_config object with minimal default options. * * This function has better performance than `s2n_config_new()` because it does not load default * system certificates into the trust store by default. To add system certificates to this config, * call `s2n_config_load_system_certs()`. * * The returned config should be freed with `s2n_config_free()` after it's no longer in use by any * connection. * * @returns A new configuration object suitable for configuring connections and associating certs * and keys. */ S2N_API extern struct s2n_config *s2n_config_new_minimal(void); /** * Frees the memory associated with an `s2n_config` object. * * @param config The configuration object being freed * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_config_free(struct s2n_config *config); /** * Frees the DH params associated with an `s2n_config` object. * * @param config The configuration object with DH params being freed * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_config_free_dhparams(struct s2n_config *config); /** * Frees the certificate chain and key associated with an `s2n_config` object. * * @param config The configuration object with DH params being freed * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_config_free_cert_chain_and_key(struct s2n_config *config); /** * Callback function type used to get the system time. * * @param void* A pointer to arbitrary data for use within the callback * @param uint64_t* A pointer that the callback will set to the time in nanoseconds * The function should return 0 on success and -1 on failure. */ typedef int (*s2n_clock_time_nanoseconds)(void *, uint64_t *); /** * Cache callback function that allows the caller to retrieve SSL session data * from a cache. * * The callback function takes six arguments: * a pointer to the s2n_connection object, * a pointer to arbitrary data for use within the callback, * a pointer to a key which can be used to retrieve the cached entry, * a 64 bit unsigned integer specifying the size of this key, * a pointer to a memory location where the value should be stored, * and a pointer to a 64 bit unsigned integer specifying the size of this value. * * Initially *value_size will be set to the amount of space allocated for the value, * the callback should set *value_size to the actual size of the data returned. * If there is insufficient space, -1 should be returned. * If the cache is not ready to provide data for the request, * S2N_CALLBACK_BLOCKED should be returned. * * This will cause s2n_negotiate() to return S2N_BLOCKED_ON_APPLICATION_INPUT. */ typedef int (*s2n_cache_retrieve_callback)(struct s2n_connection *conn, void *, const void *key, uint64_t key_size, void *value, uint64_t *value_size); /** * Cache callback function that allows the caller to store SSL session data in a * cache. * * The callback function takes seven arguments: * a pointer to the s2n_connection object, * a pointer to arbitrary data for use within the callback, * a 64-bit unsigned integer specifying the number of seconds the session data may be stored for, * a pointer to a key which can be used to retrieve the cached entry, * a 64 bit unsigned integer specifying the size of this key, * a pointer to a value which should be stored, * and a 64 bit unsigned integer specified the size of this value. */ typedef int (*s2n_cache_store_callback)(struct s2n_connection *conn, void *, uint64_t ttl_in_seconds, const void *key, uint64_t key_size, const void *value, uint64_t value_size); /** * Cache callback function that allows the caller to set a callback function * that will be used to delete SSL session data from a cache. * * The callback function takes four arguments: * a pointer to s2n_connection object, * a pointer to arbitrary data for use within the callback, * a pointer to a key which can be used to delete the cached entry, * and a 64 bit unsigned integer specifying the size of this key. */ typedef int (*s2n_cache_delete_callback)(struct s2n_connection *conn, void *, const void *key, uint64_t key_size); /** * Allows the caller to set a callback function that will be used to get the * system time. The time returned should be the number of nanoseconds since the * Unix epoch (Midnight, January 1st, 1970). * * s2n-tls uses this clock for timestamps. * * @param config The configuration object being updated * @param clock_fn The wall clock time callback function * @param ctx An opaque pointer that the callback will be invoked with * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_config_set_wall_clock(struct s2n_config *config, s2n_clock_time_nanoseconds clock_fn, void *ctx); /** * Allows the caller to set a callback function that will be used to get * monotonic time. The monotonic time is the time since an arbitrary, unspecified * point. Unlike wall clock time, it MUST never move backwards. * * s2n-tls uses this clock for timers. * * @param config The configuration object being updated * @param clock_fn The monotonic time callback function * @param ctx An opaque pointer that the callback will be invoked with * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_config_set_monotonic_clock(struct s2n_config *config, s2n_clock_time_nanoseconds clock_fn, void *ctx); /** * Translates an s2n_error code to a human readable string explaining the error. * * @param error The error code to explain. Usually this is s2n_errno * @param lang The language to explain the error code. Pass "EN" or NULL for English. * @returns The error string */ S2N_API extern const char *s2n_strerror(int error, const char *lang); /** * Translates an s2n_error code to a human readable string containing internal debug * information, including file name and line number. This function is useful when * reporting issues to the s2n-tls development team. * * @param error The error code to explain. Usually this is s2n_errno * @param lang The language to explain the error code. Pass "EN" or NULL for English. * @returns The error string */ S2N_API extern const char *s2n_strerror_debug(int error, const char *lang); /** * Translates an s2n_error code to a human readable string. * * @param error The error code to explain. Usually this is s2n_errno * @returns The error string */ S2N_API extern const char *s2n_strerror_name(int error); /** * Translates an s2n_error code to a filename and line number. * * @param error The error code to explain. Usually this is s2n_errno. * @returns The error string. */ S2N_API extern const char *s2n_strerror_source(int error); /** * Opaque stack trace structure. */ struct s2n_stacktrace; /** * Checks if s2n stack trace captures are enabled. * * @returns True if stack traces are enabled. False if they are disabled. */ S2N_API extern bool s2n_stack_traces_enabled(void); /** * Configures the s2n stack trace captures option. * * @param newval Boolean to determine if stack traces should be enabled. True to enable them. False to disable them. * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_stack_traces_enabled_set(bool newval); /** * Calculates the s2n stack trace. * * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_calculate_stacktrace(void); /** * Prints the s2n stack trace to a file. The file descriptor is expected to be * open and ready for writing. * * @param fptr A pointer to the file s2n-tls should write the stack trace to. * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_print_stacktrace(FILE *fptr); /** * Clean up the memory used to contain the stack trace. * * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_free_stacktrace(void); /** * Export the s2n_stacktrace. * * @param trace A pointer to the s2n_stacktrace to fill. * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_get_stacktrace(struct s2n_stacktrace *trace); /** * Allows the caller to set a callback function that will be used to store SSL * session data in a cache. * * @param config The configuration object being updated * @param cache_store_callback The cache store callback function. * @param data An opaque context pointer that the callback will be invoked with. * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_config_set_cache_store_callback(struct s2n_config *config, s2n_cache_store_callback cache_store_callback, void *data); /** * Allows the caller to set a callback function that will be used to retrieve SSL * session data from a cache. * * @param config The configuration object being updated * @param cache_retrieve_callback The cache retrieve callback function. * @param data An opaque context pointer that the callback will be invoked with. * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_config_set_cache_retrieve_callback(struct s2n_config *config, s2n_cache_retrieve_callback cache_retrieve_callback, void *data); /** * Allows the caller to set a callback function that will be used to delete SSL * session data from a cache. * * @param config The configuration object being updated * @param cache_delete_callback The cache delete callback function. * @param data An opaque context pointer that the callback will be invoked with. * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_config_set_cache_delete_callback(struct s2n_config *config, s2n_cache_delete_callback cache_delete_callback, void *data); /** * Called when `s2n_init` is executed. */ typedef int (*s2n_mem_init_callback)(void); /** * Will be called when `s2n_cleanup` is executed. */ typedef int (*s2n_mem_cleanup_callback)(void); /** * A function that can allocate at least `requested` bytes of memory. * * It stores the location of that memory in **\*ptr** and the size of the allocated * data in **\*allocated**. The function may choose to allocate more memory * than was requested. s2n-tls will consider all allocated memory available for * use, and will attempt to free all allocated memory when able. */ typedef int (*s2n_mem_malloc_callback)(void **ptr, uint32_t requested, uint32_t *allocated); /** * Frees memory allocated by s2n_mem_malloc_callback. */ typedef int (*s2n_mem_free_callback)(void *ptr, uint32_t size); /** * Allows the caller to override s2n-tls's internal memory handling functions. * * @warning This function must be called before s2n_init(). * * @param mem_init_callback The s2n_mem_init_callback * @param mem_cleanup_callback The s2n_mem_cleanup_callback * @param mem_malloc_callback The s2n_mem_malloc_callback * @param mem_free_callback The s2n_mem_free_callback * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_mem_set_callbacks(s2n_mem_init_callback mem_init_callback, s2n_mem_cleanup_callback mem_cleanup_callback, s2n_mem_malloc_callback mem_malloc_callback, s2n_mem_free_callback mem_free_callback); /** * A callback function that will be called when s2n-tls is initialized. */ typedef int (*s2n_rand_init_callback)(void); /** * A callback function that will be called when `s2n_cleanup` is executed. */ typedef int (*s2n_rand_cleanup_callback)(void); /** * A callback function that will be used to provide entropy to the s2n-tls * random number generators. */ typedef int (*s2n_rand_seed_callback)(void *data, uint32_t size); /** * A callback function that will be used to mix in entropy every time the RNG * is invoked. */ typedef int (*s2n_rand_mix_callback)(void *data, uint32_t size); /** * Allows the caller to override s2n-tls's entropy functions. * * @warning This function must be called before s2n_init(). * * @note The overriden random callbacks will not be used when s2n-tls is operating in FIPS mode. * * @param rand_init_callback The s2n_rand_init_callback * @param rand_cleanup_callback The s2n_rand_cleanup_callback * @param rand_seed_callback The s2n_rand_seed_callback * @param rand_mix_callback The s2n_rand_mix_callback * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_rand_set_callbacks(s2n_rand_init_callback rand_init_callback, s2n_rand_cleanup_callback rand_cleanup_callback, s2n_rand_seed_callback rand_seed_callback, s2n_rand_mix_callback rand_mix_callback); /** * TLS extensions supported by s2n-tls */ typedef enum { S2N_EXTENSION_SERVER_NAME = 0, S2N_EXTENSION_MAX_FRAG_LEN = 1, S2N_EXTENSION_OCSP_STAPLING = 5, S2N_EXTENSION_SUPPORTED_GROUPS = 10, S2N_EXTENSION_EC_POINT_FORMATS = 11, S2N_EXTENSION_SIGNATURE_ALGORITHMS = 13, S2N_EXTENSION_ALPN = 16, S2N_EXTENSION_CERTIFICATE_TRANSPARENCY = 18, S2N_EXTENSION_SUPPORTED_VERSIONS = 43, S2N_EXTENSION_RENEGOTIATION_INFO = 65281, } s2n_tls_extension_type; /** * MFL configurations from https://datatracker.ietf.org/doc/html/rfc6066#section-4. */ typedef enum { S2N_TLS_MAX_FRAG_LEN_512 = 1, S2N_TLS_MAX_FRAG_LEN_1024 = 2, S2N_TLS_MAX_FRAG_LEN_2048 = 3, S2N_TLS_MAX_FRAG_LEN_4096 = 4, } s2n_max_frag_len; /** * Opaque certificate type. */ struct s2n_cert; /** * Opaque certificate chain and key type. */ struct s2n_cert_chain_and_key; /** * Opaque key type. */ struct s2n_pkey; /** * Opaque public key type. */ typedef struct s2n_pkey s2n_cert_public_key; /** * Opaque private key type. */ typedef struct s2n_pkey s2n_cert_private_key; /** * Creates a new s2n_cert_chain_and_key object. This object can be associated * with many config objects. It is used to represent a certificate and key pair. * * @returns A new object used to represent a certificate-chain/key pair */ S2N_API extern struct s2n_cert_chain_and_key *s2n_cert_chain_and_key_new(void); /** * Associates a certificate chain and private key with an `s2n_cert_chain_and_key` object. * * `cert_chain_pem` should be a PEM encoded certificate chain, with the first * certificate in the chain being your leaf certificate. `private_key_pem` * should be a PEM encoded private key corresponding to the leaf certificate. * * @note Prefer using s2n_cert_chain_and_key_load_pem_bytes. * * @param chain_and_key The certificate chain and private key handle * @param chain_pem A byte array of a PEM encoded certificate chain. * @param private_key_pem A byte array of a PEM encoded key. * * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_cert_chain_and_key_load_pem(struct s2n_cert_chain_and_key *chain_and_key, const char *chain_pem, const char *private_key_pem); /** * Associates a certificate chain and private key with an `s2n_cert_chain_and_key` object. * * `cert_chain_pem` should be a PEM encoded certificate chain, with the first * certificate in the chain being your leaf certificate. `private_key_pem` * should be a PEM encoded private key corresponding to the leaf certificate. * * @param chain_and_key The certificate chain and private key handle * @param chain_pem A byte array of a PEM encoded certificate chain. * @param chain_pem_len Size of `chain_pem` * @param private_key_pem A byte array of a PEM encoded key. * @param private_key_pem_len Size of `private_key_pem` * * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_cert_chain_and_key_load_pem_bytes(struct s2n_cert_chain_and_key *chain_and_key, uint8_t *chain_pem, uint32_t chain_pem_len, uint8_t *private_key_pem, uint32_t private_key_pem_len); /** * Associates a public certificate chain with a `s2n_cert_chain_and_key` object. It does * NOT set a private key, so the connection will need to be configured to * [offload private key operations](https://github.com/aws/s2n-tls/blob/main/docs/usage-guide/topics/ch12-private-key-ops.md). * * @param chain_and_key The certificate chain and private key handle * @param chain_pem A byte array of a PEM encoded certificate chain. * @param chain_pem_len Size of `chain_pem` * * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_cert_chain_and_key_load_public_pem_bytes(struct s2n_cert_chain_and_key *chain_and_key, uint8_t *chain_pem, uint32_t chain_pem_len); /** * Frees the memory associated with an `s2n_cert_chain_and_key` object. * * @param cert_and_key The certificate chain and private key handle * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_cert_chain_and_key_free(struct s2n_cert_chain_and_key *cert_and_key); /** * Adds a context to the `s2n_cert_chain_and_key` object. * * @param cert_and_key The certificate chain and private key handle * @param ctx An opaque pointer to user supplied data. * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_cert_chain_and_key_set_ctx(struct s2n_cert_chain_and_key *cert_and_key, void *ctx); /** * Get the user supplied context from the `s2n_cert_chain_and_key` object. * * @param cert_and_key The certificate chain and private key handle * @returns The user supplied pointer from s2n_cert_chain_and_key_set_ctx() */ S2N_API extern void *s2n_cert_chain_and_key_get_ctx(struct s2n_cert_chain_and_key *cert_and_key); /** * Get the private key from the `s2n_cert_chain_and_key` object. * * @param cert_and_key The certificate chain and private key handle * @returns A pointer to the `s2n_cert_private_key` */ S2N_API extern s2n_cert_private_key *s2n_cert_chain_and_key_get_private_key(struct s2n_cert_chain_and_key *cert_and_key); /** * Set the raw OCSP stapling data for a certificate chain. * * @param chain_and_key The certificate chain handle * @param data A pointer to the raw OCSP stapling data bytes. The data will be copied. * @param length The length of the data bytes. * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_cert_chain_and_key_set_ocsp_data(struct s2n_cert_chain_and_key *chain_and_key, const uint8_t *data, uint32_t length); /** * Set the signed certificate timestamp (SCT) for a certificate chain. * This is used for Certificate Transparency. * * @param chain_and_key The certificate chain handle * @param data A pointer to the SCT data. The data will be copied. * @param length The length of the data bytes. * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_cert_chain_and_key_set_sct_list(struct s2n_cert_chain_and_key *chain_and_key, const uint8_t *data, uint32_t length); /** * A callback function that is invoked if s2n-tls cannot resolve a conflict between * two certificates with the same domain name. This function is invoked while certificates * are added to an `s2n_config`. * * Currently, the only builtin resolution for domain name conflicts is certificate type(RSA, * ECDSA, etc). The callback should return a pointer to the `s2n_cert_chain_and_key` that * should be used for dns name `name`. * * If NULL is returned, the first certificate will be used. Typically an application * will use properties like trust and expiry to implement tiebreaking. */ typedef struct s2n_cert_chain_and_key *(*s2n_cert_tiebreak_callback)(struct s2n_cert_chain_and_key *cert1, struct s2n_cert_chain_and_key *cert2, uint8_t *name, uint32_t name_len); /** * Sets the `s2n_cert_tiebreak_callback` for resolving domain name conflicts. * If no callback is set, the first certificate added for a domain name will always be preferred. * * @param config The configuration object being updated * @param cert_tiebreak_cb The pointer to the certificate tiebreak function * * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_config_set_cert_tiebreak_callback(struct s2n_config *config, s2n_cert_tiebreak_callback cert_tiebreak_cb); /** * Associates a certificate chain and private key with an `s2n_config` object. * Using this API, only one cert chain of each type (like ECDSA or RSA) may be associated with a config. * `cert_chain_pem` should be a PEM encoded certificate chain, with the first certificate * in the chain being your server's certificate. `private_key_pem` should be a * PEM encoded private key corresponding to the server certificate. * * @deprecated Use s2n_config_add_cert_chain_and_key_to_store instead. * * @param config The configuration object being updated * @param cert_chain_pem A byte array of a PEM encoded certificate chain. * @param private_key_pem A byte array of a PEM encoded key. * @returns S2N_SUCCESS on success. S2N_FAILURE on failure. */ S2N_API extern int s2n_config_add_cert_chain_and_key(struct s2n_config *config, const char *cert_chain_pem, const char *private_key_pem); /** * The preferred method of associating a certificate chain and private key pair with an `s2n_config` object. * This method may be called multiple times to support multiple key types(RSA, ECDSA) and multiple domains. * On the server side, the certificate selected will be based on the incoming SNI value and the * client's capabilities(supported ciphers). * * In the case of no certificate matching the client's SNI extension or if no SNI extension was sent by * the client, the certificate from the `first` call to `s2n_config_add_cert_chain_and_key_to_store` * will be selected. * * @warning It is not recommended to free or modify the `cert_key_pair` as any subsequent changes will be * reflected in the config. * * @param config The configuration object being updated * @param cert_key_pair The certificate chain and private key handle * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_config_add_cert_chain_and_key_to_store(struct s2n_config *config, struct s2n_cert_chain_and_key *cert_key_pair); /** * Explicitly sets certificate chain and private key pairs to be used as defaults for each auth * method (key type). A "default" certificate is used when there is not an SNI match with any other * configured certificate. * * Only one certificate can be set as the default per auth method (one RSA default, one ECDSA default, * etc.). All previous default certificates will be cleared and re-set when this API is called. * * This API is called for a specific `s2n_config` object. s2n-tls will attempt to automatically choose * default certificates for each auth method (key type) based on the order that `s2n_cert_chain_and_key` * are added to the `s2n_config` using one of the APIs listed above. * `s2n_config_set_cert_chain_and_key_defaults` can be called at any time; s2n-tls will clear defaults * and no longer attempt to automatically choose any default certificates. * * @param config The configuration object being updated * @param cert_key_pairs An array of certificate chain and private key handles * @param num_cert_key_pairs The amount of handles in cert_key_pairs * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_config_set_cert_chain_and_key_defaults(struct s2n_config *config, struct s2n_cert_chain_and_key **cert_key_pairs, uint32_t num_cert_key_pairs); /** * Adds to the trust store from a CA file or directory containing trusted certificates. * * When configs are created with `s2n_config_new()`, the trust store is initialized with default * system certificates. To completely override these certificates, call * `s2n_config_wipe_trust_store()` before calling this function. * * @note The trust store will be initialized with the common locations for the host * operating system by default. * @param config The configuration object being updated * @param ca_pem_filename A string for the file path of the CA PEM file. * @param ca_dir A string for the directory of the CA PEM files. * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_config_set_verification_ca_location(struct s2n_config *config, const char *ca_pem_filename, const char *ca_dir); /** * Adds a PEM to the trust store. This will allocate memory, and load `pem` into the trust store. * * When configs are created with `s2n_config_new()`, the trust store is initialized with default * system certificates. To completely override these certificates, call * `s2n_config_wipe_trust_store()` before calling this function. * * @param config The configuration object being updated * @param pem The string value of the PEM certificate. * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_config_add_pem_to_trust_store(struct s2n_config *config, const char *pem); /** * Clears the trust store of all certificates. * * When configs are created with `s2n_config_new()`, the trust store is initialized with default * system certificates. To completely override these certificates, call this function before * functions like `s2n_config_set_verification_ca_location()` or * `s2n_config_add_pem_to_trust_store()`. * * @param config The configuration object being updated * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_config_wipe_trust_store(struct s2n_config *config); /** * Loads default system certificates into the trust store. * * `s2n_config_new_minimal()` doesn't load default system certificates into the config's trust * store by default. If `config` was created with `s2n_config_new_minimal`, this function can be * used to load system certificates into the trust store. * * @note This API will error if called on a config that has already loaded system certificates * into its trust store, which includes all configs created with `s2n_config_new()`. * * @param config The configuration object being updated * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_config_load_system_certs(struct s2n_config *config); typedef enum { S2N_VERIFY_AFTER_SIGN_DISABLED, S2N_VERIFY_AFTER_SIGN_ENABLED } s2n_verify_after_sign; /** * Toggle whether generated signatures are verified before being sent. * * Although signatures produced by the underlying libcrypto should always be valid, * hardware faults, bugs in the signing implementation, or other uncommon factors * can cause unexpected mistakes in the final signatures. Because these mistakes * can leak information about the private key, applications with low trust in their * hardware or libcrypto may want to verify signatures before sending them. * * However, this feature will significantly impact handshake latency. * Additionally, most libcrypto implementations already check for common errors in signatures. */ S2N_API extern int s2n_config_set_verify_after_sign(struct s2n_config *config, s2n_verify_after_sign mode); /** * Set a custom send buffer size. * * This buffer is used to stage records for sending. By default, * enough memory is allocated to hold a single record of the maximum * size configured for the connection. With the default fragment size, * that is about 8K bytes. * * Less memory can be allocated for the send buffer, but this will result in * smaller, more fragmented records and increased overhead. While the absolute * minimum size required is 1034 bytes, at least 2K bytes is recommended for * reasonable record sizes. * * More memory can be allocated for the send buffer. This will result in s2n-tls * buffering multiple records before sending them, reducing system write calls. * At least 17K bytes is recommended for this use case, or at least 35K bytes * if larger fragment sizes are used via `s2n_connection_prefer_throughput()`. * * @param config The configuration object being updated * @param size The desired custom buffer size. * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_config_set_send_buffer_size(struct s2n_config *config, uint32_t size); /** * Enable or disable receiving of multiple TLS records in a single s2n_recv call * * By default, s2n-tls returns from s2n_recv() after reading a single TLS record. * Enabling receiving of multiple records will instead cause s2n_recv() to attempt * to read until the application-provided output buffer is full. This may be more * efficient, especially if larger receive buffers are used. * * @note If this option is enabled with blocking IO, the call to s2n_recv() will * not return until either the application-provided output buffer is full or the * peer closes the connection. This may lead to unintentionally long waits if the * peer does not send enough data. * * @param config The configuration object being updated * @param enabled Set to `true` if multiple record receive is to be enabled; `false` to disable. * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_config_set_recv_multi_record(struct s2n_config *config, bool enabled); /** * A callback function invoked (usually multiple times) during X.509 validation for each * name encountered in the leaf certificate. * * Return 1 to trust that hostname or 0 to not trust the hostname. * * If this function returns 1, then the certificate is considered trusted and that portion * of the X.509 validation will succeed. * * If no hostname results in a 1 being returned, the certificate will be untrusted and the * validation will terminate immediately. * * Data is a opaque user context set in s2n_config_set_verify_host_callback() or s2n_connection_set_verify_host_callback(). */ typedef uint8_t (*s2n_verify_host_fn)(const char *host_name, size_t host_name_len, void *data); /** * Sets the callback to use for verifying that a hostname from an X.509 certificate is trusted. * * The default behavior is to require that the hostname match the server name set with s2n_set_server_name(). * This will likely lead to all client certificates being rejected, so the callback will need to be overriden when using * client authentication. * * This change will be inherited by s2n_connections using this config. If a separate callback for different connections * using the same config is desired, see s2n_connection_set_verify_host_callback(). * * @param config The configuration object being updated * @param data A user supplied opaque context to pass back to the callback * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_config_set_verify_host_callback(struct s2n_config *config, s2n_verify_host_fn, void *data); /** * Toggles whether or not to validate stapled OCSP responses. * * 1 means OCSP responses will be validated when they are encountered, while 0 means this step will * be skipped. * * The default value is 1 if the underlying libCrypto implementation supports OCSP. * * @param config The configuration object being updated * @param check_ocsp The desired OCSP response check configuration * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_config_set_check_stapled_ocsp_response(struct s2n_config *config, uint8_t check_ocsp); /** * Disables timestamp validation for received certificates. * * By default, s2n-tls checks the notBefore and notAfter fields on the certificates it receives * during the handshake. If the current date is not within the range of these fields for any * certificate in the chain of trust, `s2n_negotiate()` will error. This validation is in * accordance with RFC 5280, section 6.1.3 a.2: * https://datatracker.ietf.org/doc/html/rfc5280#section-6.1.3. * * This API will disable this timestamp validation, permitting negotiation with peers that send * expired certificates, or certificates that are not yet considered valid. * * @warning Applications calling this API should seriously consider the security implications of * disabling this validation. The validity period of a certificate corresponds to the range of time * in which the CA is guaranteed to maintain information regarding the certificate's revocation * status. As such, it may not be possible to obtain accurate revocation information for * certificates with invalid timestamps. Applications disabling this validation MUST implement * some external method for limiting certificate lifetime. * * @param config The associated connection config. * @returns S2N_SUCCESS on success, S2N_FAILURE on failure. */ S2N_API extern int s2n_config_disable_x509_time_verification(struct s2n_config *config); /** * Turns off all X.509 validation during the negotiation phase of the connection. This should only * be used for testing or debugging purposes. * * @param config The configuration object being updated * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_config_disable_x509_verification(struct s2n_config *config); /** * Sets the maximum allowed depth of a cert chain used for X509 validation. The default value is * 7. If this limit is exceeded, validation will fail if s2n_config_disable_x509_verification() * has not been called. 0 is an illegal value and will return an error. * 1 means only a root certificate will be used. * * @param config The configuration object being updated * @param max_depth The number of allowed certificates in the certificate chain * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_config_set_max_cert_chain_depth(struct s2n_config *config, uint16_t max_depth); /** * Associates a set of Diffie-Hellman parameters with an `s2n_config` object. * @note `dhparams_pem` should be PEM encoded DH parameters. * * @param config The configuration object being updated * @param dhparams_pem A string containing the PEM encoded DH parameters. * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_config_add_dhparams(struct s2n_config *config, const char *dhparams_pem); /** * Sets the security policy that includes the cipher/kem/signature/ecc preferences and * protocol version. * * See the [USAGE-GUIDE.md](https://github.com/aws/s2n-tls/blob/main/docs/usage-guide) for how to use security policies. */ S2N_API extern int s2n_config_set_cipher_preferences(struct s2n_config *config, const char *version); /** * Appends the provided application protocol to the preference list * * The data provided in `protocol` parameter will be copied into an internal buffer * * @param config The configuration object being updated * @param protocol A pointer to a byte array value * @param protocol_len The length of bytes that should be read from `protocol`. Note: this value cannot be 0, otherwise an error will be returned. */ S2N_API extern int s2n_config_append_protocol_preference(struct s2n_config *config, const uint8_t *protocol, uint8_t protocol_len); /** * Sets the application protocol preferences on an `s2n_config` object. * `protocols` is a list in order of preference, with most preferred protocol first, and of * length `protocol_count`. * * When acting as an `S2N_CLIENT` the protocol list is included in the Client Hello message * as the ALPN extension. * * As an `S2N_SERVER`, the list is used to negotiate a mutual application protocol with the * client. After the negotiation for the connection has completed, the agreed upon protocol * can be retrieved with s2n_get_application_protocol() * * @param config The configuration object being updated * @param protocols The list of preferred protocols, in order of preference * @param protocol_count The size of the protocols list * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_config_set_protocol_preferences(struct s2n_config *config, const char *const *protocols, int protocol_count); /** * Enum used to define the type, if any, of certificate status request * a connection should make during the handshake. The only supported status request type is * OCSP, `S2N_STATUS_REQUEST_OCSP`. */ typedef enum { S2N_STATUS_REQUEST_NONE = 0, S2N_STATUS_REQUEST_OCSP = 1 } s2n_status_request_type; /** * Sets up a connection to request the certificate status of a peer during an SSL handshake. If set * to S2N_STATUS_REQUEST_NONE, no status request is made. * * @param config The configuration object being updated * @param type The desired request status type * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_config_set_status_request_type(struct s2n_config *config, s2n_status_request_type type); /** * Enum to set Certificate Transparency Support level. */ typedef enum { S2N_CT_SUPPORT_NONE = 0, S2N_CT_SUPPORT_REQUEST = 1 } s2n_ct_support_level; /** * Set the Certificate Transparency Support level. * * @param config The configuration object being updated * @param level The desired Certificate Transparency Support configuration * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_config_set_ct_support_level(struct s2n_config *config, s2n_ct_support_level level); /** * Sets whether or not a connection should terminate on receiving a WARNING alert from its peer. * * `alert_behavior` can take the following values: * - `S2N_ALERT_FAIL_ON_WARNINGS` default behavior: s2n-tls will terminate the connection if its peer sends a WARNING alert. * - `S2N_ALERT_IGNORE_WARNINGS` - with the exception of `close_notify` s2n-tls will ignore all WARNING alerts and keep communicating with its peer. This setting is ignored in TLS1.3 * * @note TLS1.3 terminates a connection for all alerts except user_canceled. * @warning S2N_ALERT_FAIL_ON_WARNINGS is the recommended behavior. Past TLS protocol vulnerabilities have involved downgrading alerts to warnings. */ typedef enum { S2N_ALERT_FAIL_ON_WARNINGS = 0, S2N_ALERT_IGNORE_WARNINGS = 1 } s2n_alert_behavior; /** * Sets the config's alert behavior based on the `s2n_alert_behavior` enum. * * @param config The configuration object being updated * @param alert_behavior The desired alert behavior. * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_config_set_alert_behavior(struct s2n_config *config, s2n_alert_behavior alert_behavior); /** * Sets the extension data in the `s2n_config` object for the specified extension. * This method will clear any existing data that is set. If the data and length * parameters are set to NULL, no new data is set in the `s2n_config` object, * effectively clearing existing data. * * @deprecated Use s2n_cert_chain_and_key_set_ocsp_data and s2n_cert_chain_and_key_set_sct_list instead. * * @param config The configuration object being updated * @param type The extension type * @param data Data for the extension * @param length Length of the `data` buffer */ S2N_API extern int s2n_config_set_extension_data(struct s2n_config *config, s2n_tls_extension_type type, const uint8_t *data, uint32_t length); /** * Allows the caller to set a TLS Maximum Fragment Length extension that will be used * to fragment outgoing messages. s2n-tls currently does not reject fragments larger * than the configured maximum when in server mode. The TLS negotiated maximum fragment * length overrides the preference set by the `s2n_connection_prefer_throughput` and * `s2n_connection_prefer_low_latency`. * * @note Some TLS implementations do not respect their peer's max fragment length extension. * * @param config The configuration object being updated * @param mfl_code The selected MFL size * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_config_send_max_fragment_length(struct s2n_config *config, s2n_max_frag_len mfl_code); /** * Allows the server to opt-in to accept client's TLS maximum fragment length extension * requests. If this API is not called, and client requests the extension, server will ignore * the request and continue TLS handshake with default maximum fragment length of 8k bytes * * @note Some TLS implementations do not respect their peer's max fragment length extension. * * @param config The configuration object being updated * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_config_accept_max_fragment_length(struct s2n_config *config); /** * Sets the lifetime of the cached session state. The default value is 15 hours. * * @param config The configuration object being updated * @param lifetime_in_secs The desired lifetime of the session state in seconds * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_config_set_session_state_lifetime(struct s2n_config *config, uint64_t lifetime_in_secs); /** * Enable or disable session resumption using session ticket. * * @param config The configuration object being updated * @param enabled The configuration object being updated. Set to 1 to enable. Set to 0 to disable. * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_config_set_session_tickets_onoff(struct s2n_config *config, uint8_t enabled); /** * Enable or disable session caching. * * @note Session caching will not be turned on unless all three session cache callbacks are set * prior to calling this function. * * @param config The configuration object being updated * @param enabled The configuration object being updated. Set to 1 to enable. Set to 0 to disable. * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_config_set_session_cache_onoff(struct s2n_config *config, uint8_t enabled); /** * Sets how long a session ticket key will be in a state where it can be used for both encryption * and decryption of tickets on the server side. * * @note The default value is 2 hours. * @param config The configuration object being updated * @param lifetime_in_secs The desired lifetime of decrypting and encrypting tickets in seconds * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_config_set_ticket_encrypt_decrypt_key_lifetime(struct s2n_config *config, uint64_t lifetime_in_secs); /** * Sets how long a session ticket key will be in a state where it can used just for decryption of * already assigned tickets on the server side. Once decrypted, the session will resume and the * server will issue a new session ticket encrypted using a key in encrypt-decrypt state. * * @note The default value is 13 hours. * @param config The configuration object being updated * @param lifetime_in_secs The desired lifetime of decrypting and encrypting tickets in seconds * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_config_set_ticket_decrypt_key_lifetime(struct s2n_config *config, uint64_t lifetime_in_secs); /** * Adds session ticket key on the server side. It would be ideal to add new keys after every * (encrypt_decrypt_key_lifetime_in_nanos/2) nanos because this will allow for gradual and * linear transition of a key from encrypt-decrypt state to decrypt-only state. * * @param config The configuration object being updated * @param name Name of the session ticket key that should be randomly generated to avoid collisions * @param name_len Length of session ticket key name * @param key Key used to perform encryption/decryption of session ticket * @param key_len Length of the session ticket key * @param intro_time_in_seconds_from_epoch Time at which the session ticket key is introduced. If this is 0, then intro_time_in_seconds_from_epoch is set to now. * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_config_add_ticket_crypto_key(struct s2n_config *config, const uint8_t *name, uint32_t name_len, uint8_t *key, uint32_t key_len, uint64_t intro_time_in_seconds_from_epoch); /** * Sets user defined context on the `s2n_config` object. * * @param config The configuration object being updated * @param ctx A pointer to the user defined ctx. * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_config_set_ctx(struct s2n_config *config, void *ctx); /** * Gets the user defined context from the `s2n_config` object. * The context is set by calling s2n_config_set_ctx() * * @param config The configuration object being accessed * @param ctx A pointer to the user defined ctx. * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_config_get_ctx(struct s2n_config *config, void **ctx); /** * Used to declare connections as server or client type, respectively. */ typedef enum { S2N_SERVER, S2N_CLIENT } s2n_mode; /** * Creates a new connection object. Each s2n-tls SSL/TLS connection uses * one of these objects. These connection objects can be operated on by up * to two threads at a time, one sender and one receiver, but neither sending * nor receiving are atomic, so if these objects are being called by multiple * sender or receiver threads, you must perform your own locking to ensure * that only one sender or receiver is active at a time. * * The `mode` parameters specifies if the caller is a server, or is a client. * Connections objects are re-usable across many connections, and should be * re-used (to avoid deallocating and allocating memory). You should wipe * connections immediately after use. * * @param mode The desired connection type * @returns A s2n_connection handle */ S2N_API extern struct s2n_connection *s2n_connection_new(s2n_mode mode); /** * Associates a configuration object with a connection. * * @param conn The connection object being associated * @param config The configuration object being associated * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_connection_set_config(struct s2n_connection *conn, struct s2n_config *config); /** * Sets user defined context in `s2n_connection` object. * * @param conn The connection object being updated * @param ctx A pointer to the user defined context * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_connection_set_ctx(struct s2n_connection *conn, void *ctx); /** * Gets user defined context from a `s2n_connection` object. * * @param conn The connection object that contains the desired context */ S2N_API extern void *s2n_connection_get_ctx(struct s2n_connection *conn); /** * The callback function takes a s2n-tls connection as input, which receives the ClientHello * and the context previously provided in `s2n_config_set_client_hello_cb`. The callback can * access any ClientHello information from the connection and use the `s2n_connection_set_config` * call to change the config of the connection. */ typedef int s2n_client_hello_fn(struct s2n_connection *conn, void *ctx); /** * Client Hello callback modes * - `S2N_CLIENT_HELLO_CB_BLOCKING` (default): * - In this mode s2n-tls expects the callback to complete its work and return the appropriate response code before the handshake continues. If any of the connection properties were changed based on the server_name extension the callback must either return a value greater than 0 or invoke `s2n_connection_server_name_extension_used`, otherwise the callback returns 0 to continue the handshake. * - `S2N_CLIENT_HELLO_CB_NONBLOCKING`: * - In non-blocking mode, s2n-tls expects the callback to not complete its work. If the callback returns a response code of 0, s2n-tls will return `S2N_FAILURE` with `S2N_ERR_T_BLOCKED` error type and `s2n_blocked_status` set to `S2N_BLOCKED_ON_APPLICATION_INPUT`. The handshake is paused and further calls to `s2n_negotiate` will continue to return the same error until `s2n_client_hello_cb_done` is invoked for the `s2n_connection` to resume the handshake. If any of the connection properties were changed on the basis of the server_name extension then `s2n_connection_server_name_extension_used` must be invoked before marking the callback done. */ typedef enum { S2N_CLIENT_HELLO_CB_BLOCKING, S2N_CLIENT_HELLO_CB_NONBLOCKING } s2n_client_hello_cb_mode; /** * Allows the caller to set a callback function that will be called after ClientHello was parsed. * * @param config The configuration object being updated * @param client_hello_callback The client hello callback function * @param ctx A pointer to a user defined context that the Client Hello callback will be invoked with. * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_config_set_client_hello_cb(struct s2n_config *config, s2n_client_hello_fn client_hello_callback, void *ctx); /** * Sets the callback execution mode. * * See s2n_client_hello_cb_mode for each mode's behavior. * * @param config The configuration object being updated * @param cb_mode The desired callback mode * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_config_set_client_hello_cb_mode(struct s2n_config *config, s2n_client_hello_cb_mode cb_mode); /** * Marks the non-blocking callback as complete. Can be invoked from within the callback when * operating in non-blocking mode to continue the handshake. * * @param conn The connection object being updated * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_client_hello_cb_done(struct s2n_connection *conn); /** * Must be invoked if any of the connection properties were changed on the basis of the server_name * extension. This must be invoked before marking the Client Hello callback done. * * @param conn The connection object being updated * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_connection_server_name_extension_used(struct s2n_connection *conn); /** * Opaque client hello handle */ struct s2n_client_hello; /** * Get the Client Hello from a s2n_connection. * * Earliest point during the handshake when this structure is available for use is in the * client_hello_callback (see s2n_config_set_client_hello_cb()). * * @param conn The connection object containing the client hello * @returns A handle to the s2n_client_hello structure holding the client hello message sent by the client during the handshake. NULL is returned if a Client Hello has not yet been received and parsed. */ S2N_API extern struct s2n_client_hello *s2n_connection_get_client_hello(struct s2n_connection *conn); /** * Creates an s2n_client_hello from bytes representing a ClientHello message. * * The input bytes should include the message header (message type and length), * but not the record header. * * Unlike s2n_connection_get_client_hello, the s2n_client_hello returned by this * method is owned by the application and must be freed with s2n_client_hello_free. * * This method does not support SSLv2 ClientHellos. * * @param bytes The raw bytes representing the ClientHello. * @param size The size of raw_message. * @returns A new s2n_client_hello on success, or NULL on failure. */ S2N_API extern struct s2n_client_hello *s2n_client_hello_parse_message(const uint8_t *bytes, uint32_t size); /** * Frees an s2n_client_hello structure. * * This method should be called to free s2n_client_hellos returned by * s2n_client_hello_parse_message. It will error if passed an s2n_client_hello * returned by s2n_connection_get_client_hello and owned by the connection. * * @param ch The structure to be freed. * @returns S2N_SUCCESS on success, S2N_FAILURE on failure. */ S2N_API extern int s2n_client_hello_free(struct s2n_client_hello **ch); /** * Function to determine the size of the raw Client Hello buffer. * * Can be used to determine the necessary size of the `out` buffer for * s2n_client_hello_get_raw_message() * * @param ch The Client Hello handle * @returns The size of the ClientHello message received by the server */ S2N_API extern ssize_t s2n_client_hello_get_raw_message_length(struct s2n_client_hello *ch); /** * Copies `max_length` bytes of the ClientHello message into the `out` buffer. * The ClientHello instrumented using this function will have the Random bytes * zero-ed out. * * Note: SSLv2 ClientHello messages follow a different structure than more modern * ClientHello messages. See [RFC5246](https://tools.ietf.org/html/rfc5246#appendix-E.2). * In addition, due to how s2n-tls parses SSLv2 ClientHellos, the raw message is * missing the first three bytes (the msg_type and version) and instead begins with * the cipher_specs. To determine whether a ClientHello is an SSLv2 ClientHello, * you will need to use s2n_connection_get_client_hello_version(). To get the * protocol version advertised in the SSLv2 ClientHello (which may be higher * than SSLv2), you will need to use s2n_connection_get_client_protocol_version(). * * @param ch The Client Hello handle * @param out The destination buffer for the raw Client Hello * @param max_length The size of out in bytes * @returns The number of copied bytes */ S2N_API extern ssize_t s2n_client_hello_get_raw_message(struct s2n_client_hello *ch, uint8_t *out, uint32_t max_length); /** * Function to determine the size of the Client Hello cipher suites. * This can be used to allocate the `out` buffer for s2n_client_hello_get_cipher_suites(). * * @param ch The Client Hello handle * @returns the number of bytes the cipher_suites takes on the ClientHello message received by the server */ S2N_API extern ssize_t s2n_client_hello_get_cipher_suites_length(struct s2n_client_hello *ch); /** * Copies into the `out` buffer `max_length` bytes of the cipher_suites on the ClientHello. * * Note: SSLv2 ClientHello cipher suites follow a different structure than modern * ClientHello messages. See [RFC5246](https://tools.ietf.org/html/rfc5246#appendix-E.2). * To determine whether a ClientHello is an SSLv2 ClientHello, * you will need to use s2n_connection_get_client_hello_version(). * * @param ch The Client Hello handle * @param out The destination buffer for the raw Client Hello cipher suites * @param max_length The size of out in bytes * @returns The number of copied bytes */ S2N_API extern ssize_t s2n_client_hello_get_cipher_suites(struct s2n_client_hello *ch, uint8_t *out, uint32_t max_length); /** * Function to determine the size of the Client Hello extensions. * This can be used to allocate the `out` buffer for s2n_client_hello_get_extensions(). * * @param ch The Client Hello handle * @returns the number of bytes the extensions take in the ClientHello message received by the server */ S2N_API extern ssize_t s2n_client_hello_get_extensions_length(struct s2n_client_hello *ch); /** * Copies into the `out` buffer `max_length` bytes of the extensions in the ClientHello. * * @param ch The Client Hello handle * @param out The destination buffer for the raw Client Hello extensions * @param max_length The size of out in bytes * @returns The number of copied bytes */ S2N_API extern ssize_t s2n_client_hello_get_extensions(struct s2n_client_hello *ch, uint8_t *out, uint32_t max_length); /** * Query the ClientHello message received by the server. Use this function to allocate the `out` buffer for * other client hello extension functions. * * @param ch A pointer to the Client Hello * @param extension_type Indicates the desired extension * @returns The number of bytes the given extension type takes */ S2N_API extern ssize_t s2n_client_hello_get_extension_length(struct s2n_client_hello *ch, s2n_tls_extension_type extension_type); /** * Copies into the `out` buffer `max_length` bytes of a given extension type on the ClientHello * * `ch` is a pointer to the `s2n_client_hello` of the `s2n_connection` which can be obtained using s2n_connection_get_client_hello(). * * @param ch A pointer to the Client Hello * @param extension_type Indicates the desired extension * @param out A pointer to the buffer that s2n will write the client session id to. This buffer MUST be the size of `max_length` * @param max_length The size of `out`. * @returns The number of copied bytes */ S2N_API extern ssize_t s2n_client_hello_get_extension_by_id(struct s2n_client_hello *ch, s2n_tls_extension_type extension_type, uint8_t *out, uint32_t max_length); /** * Used to check if a particular extension exists in the client hello. * * `ch` is a pointer to the `s2n_client_hello` of the `s2n_connection` which can be obtained using s2n_connection_get_client_hello(). * * @param ch A pointer to the client hello object * @param extension_iana The iana value of the extension * @param exists A pointer that will be set to whether or not the extension exists */ S2N_API extern int s2n_client_hello_has_extension(struct s2n_client_hello *ch, uint16_t extension_iana, bool *exists); /** * Get the the ClientHello session id length in bytes * * `ch` is a pointer to the `s2n_client_hello` of the `s2n_connection` which can be obtained using s2n_connection_get_client_hello(). * * @param ch A pointer to the Client Hello * @param out_length An out pointer. s2n will set it's value to the size of the session_id in bytes. * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_client_hello_get_session_id_length(struct s2n_client_hello *ch, uint32_t *out_length); /** * Copies up to `max_length` bytes of the ClientHello session_id into the `out` buffer and stores the number of copied bytes in `out_length`. * * Retrieve the session id as sent by the client in the ClientHello message. The session id on the `s2n_connection` may change later * when the server sends the ServerHello; see `s2n_connection_get_session_id` for how to get the final session id used for future session resumption. * * Use s2n_client_hello_get_session_id_length() to get the the ClientHello session id length in bytes. `ch` is a pointer to the `s2n_client_hello` * of the `s2n_connection` which can be obtained using s2n_connection_get_client_hello(). * * @param ch A pointer to the Client Hello * @param out A pointer to the buffer that s2n will write the client session id to. This buffer MUST be the size of `max_length` * @param out_length An out pointer. s2n will set it's value to the size of the session_id in bytes. * @param max_length The size of `out`. * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_client_hello_get_session_id(struct s2n_client_hello *ch, uint8_t *out, uint32_t *out_length, uint32_t max_length); /** * Get the length of the compression methods list sent in the Client Hello. * * @param ch A pointer to the Client Hello * @param out_length An out pointer. Will be set to the length of the compression methods list in bytes. * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_client_hello_get_compression_methods_length(struct s2n_client_hello *ch, uint32_t *out_length); /** * Retrieves the list of compression methods sent in the Client Hello. * * Use `s2n_client_hello_get_compression_methods_length()` * to retrieve how much memory should be allocated for the buffer in advance. * * @note Compression methods were removed in TLS1.3 and therefore the only valid value in this list is the * "null" compression method when TLS1.3 is negotiated. * * @note s2n-tls has never supported compression methods in any TLS version and therefore a * compression method will never be negotiated or used. * * @param ch A pointer to the Client Hello * @param list A pointer to some memory that s2n will write the compression methods to. This memory MUST be the size of `list_length` * @param list_length The size of `list`. * @param out_length An out pointer. s2n will set its value to the size of the compression methods list in bytes. * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_client_hello_get_compression_methods(struct s2n_client_hello *ch, uint8_t *list, uint32_t list_length, uint32_t *out_length); /** * Access the Client Hello protocol version * * @note This field is a legacy field in TLS1.3 and is no longer used to negotiate the * protocol version of the connection. It will be set to TLS1.2 even if TLS1.3 is negotiated. * Therefore this method should only be used for logging or fingerprinting. * * @param ch A pointer to the client hello struct * @param out The protocol version in the client hello. */ S2N_API extern int s2n_client_hello_get_legacy_protocol_version(struct s2n_client_hello *ch, uint8_t *out); /** * Retrieves the supported groups received from the client in the supported groups extension. * * IANA values for each of the received supported groups are written to the provided `groups` * array, and `groups_count` is set to the number of received supported groups. * * `groups_count_max` should be set to the maximum capacity of the `groups` array. If * `groups_count_max` is less than the number of received supported groups, this function will * error. To determine how large `groups` should be in advance, use * `s2n_client_hello_get_extension_length()` with the S2N_EXTENSION_SUPPORTED_GROUPS extension * type, and divide the value by 2. * * If no supported groups extension was received from the peer, or the received supported groups * extension is malformed, this function will error. * * @param ch A pointer to the ClientHello. Can be retrieved from a connection via * `s2n_connection_get_client_hello()`. * @param groups The array to populate with the received supported groups. * @param groups_count_max The maximum number of supported groups that can fit in the `groups` array. * @param groups_count Returns the number of received supported groups. * @returns S2N_SUCCESS on success. S2N_FAILURE on failure. */ S2N_API extern int s2n_client_hello_get_supported_groups(struct s2n_client_hello *ch, uint16_t *groups, uint16_t groups_count_max, uint16_t *groups_count); /** * Sets the file descriptor for a s2n connection. * * @warning If the read end of the pipe is closed unexpectedly, writing to the pipe will raise a SIGPIPE signal. * **s2n-tls does NOT handle SIGPIPE.** A SIGPIPE signal will cause the process to terminate unless it is handled * or ignored by the application. * @note This file-descriptor should be active and connected * @param conn A pointer to the s2n connection * @param fd The new file descriptor * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_connection_set_fd(struct s2n_connection *conn, int fd); /** * Sets the file descriptor for the read channel of an s2n connection. * * @warning If the read end of the pipe is closed unexpectedly, writing to the pipe will raise a SIGPIPE signal. * **s2n-tls does NOT handle SIGPIPE.** A SIGPIPE signal will cause the process to terminate unless it is handled * or ignored by the application. * @note This file-descriptor should be active and connected * @param conn A pointer to the s2n connection * @param readfd The new read file descriptor * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_connection_set_read_fd(struct s2n_connection *conn, int readfd); /** * Sets the assigned file descriptor for the write channel of an s2n connection. * * @note This file-descriptor should be active and connected * @param conn A pointer to the s2n connection * @param writefd The new write file descriptor * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_connection_set_write_fd(struct s2n_connection *conn, int writefd); /** * Gets the assigned file descriptor for the read channel of an s2n connection. * * @param conn A pointer to the s2n connection * @param readfd pointer to place the used file descriptor. * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_connection_get_read_fd(struct s2n_connection *conn, int *readfd); /** * Gets the assigned file descriptor for the write channel of an s2n connection. * * @param conn A pointer to the s2n connection * @param writefd pointer to place the used file descriptor. * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_connection_get_write_fd(struct s2n_connection *conn, int *writefd); /** * Indicates to s2n that the connection is using corked IO. * * @warning This API should only be used when using managed send IO. * * @param conn The connection object being updated * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_connection_use_corked_io(struct s2n_connection *conn); /** * Function pointer for a user provided send callback. */ typedef int s2n_recv_fn(void *io_context, uint8_t *buf, uint32_t len); /** * Function pointer for a user provided send callback. */ typedef int s2n_send_fn(void *io_context, const uint8_t *buf, uint32_t len); /** * Set a context containing anything needed in the recv callback function (for example, * a file descriptor), the buffer holding data to be sent or received, and the length of the buffer. * * @note The `io_context` passed to the callbacks may be set separately using `s2n_connection_set_recv_ctx` and `s2n_connection_set_send_ctx`. * * @param conn The connection object being updated * @param ctx A user provided context that the callback will be invoked with * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_connection_set_recv_ctx(struct s2n_connection *conn, void *ctx); /** * Set a context containing anything needed in the send callback function (for example, * a file descriptor), the buffer holding data to be sent or received, and the length of the buffer. * * @note The `io_context` passed to the callbacks may be set separately using `s2n_connection_set_recv_ctx` and `s2n_connection_set_send_ctx`. * * @param conn The connection object being updated * @param ctx A user provided context that the callback will be invoked with * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_connection_set_send_ctx(struct s2n_connection *conn, void *ctx); /** * Configure a connection to use a recv callback to receive data. * * @note This callback may be blocking or nonblocking. * @note The callback may receive less than the requested length. The function should return the number * of bytes received, or set errno and return an error code < 0. * * @param conn The connection object being updated * @param recv A recv callback function pointer * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_connection_set_recv_cb(struct s2n_connection *conn, s2n_recv_fn recv); /** * Configure a connection to use a send callback to send data. * * @note This callback may be blocking or nonblocking. * @note The callback may send less than the requested length. The function should return the * number of bytes sent or set errno and return an error code < 0. * * @param conn The connection object being updated * @param send A send callback function pointer * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_connection_set_send_cb(struct s2n_connection *conn, s2n_send_fn send); /** * Change the behavior of s2n-tls when sending data to prefer high throughput. * * Connections preferring throughput will use * large record sizes that minimize overhead. * * @param conn The connection object being updated * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_connection_prefer_throughput(struct s2n_connection *conn); /** * Change the behavior of s2n-tls when sending data to prefer low latency. * * Connections preferring low latency will be encrypted * using small record sizes that can be decrypted sooner by the recipient. * * @param conn The connection object being updated * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_connection_prefer_low_latency(struct s2n_connection *conn); /** * Configure the connection to free IO buffers when they are not currently in use. * * This configuration can be used to minimize connection memory footprint size, at the cost * of more calls to alloc and free. Some of these costs can be mitigated by configuring s2n-tls * to use an allocator that includes thread-local caches or lock-free allocation patterns. * * @param conn The connection object being update * @param enabled Set to `true` if dynamic buffers are enabled; `false` if disabled * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_connection_set_dynamic_buffers(struct s2n_connection *conn, bool enabled); /** * Changes the behavior of s2n-tls when sending data to initially prefer records * small enough to fit in single ethernet frames. * * When dynamic record sizing is active, the connection sends records small enough * to fit in a single standard 1500 byte ethernet frame. Otherwise, the connection * chooses record sizes according to the configured maximum fragment length. * * Dynamic record sizing is active for the first resize_threshold bytes of a connection, * and is reactivated whenever timeout_threshold seconds pass without sending data. * * @param conn The connection object being updated * @param resize_threshold The number of bytes to send before changing the record size. Maximum 8MiB. * @param timeout_threshold Reset record size back to a single segment after threshold seconds of inactivity * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_connection_set_dynamic_record_threshold(struct s2n_connection *conn, uint32_t resize_threshold, uint16_t timeout_threshold); /** * Sets the callback to use for verifying that a hostname from an X.509 certificate is trusted. * * The default behavior is to require that the hostname match the server name set with s2n_set_server_name(). This will * likely lead to all client certificates being rejected, so the callback will need to be overriden when using client authentication. * * If a single callback for different connections using the same config is desired, see s2n_config_set_verify_host_callback(). * * @param conn A pointer to a s2n_connection object * @param host_fn A pointer to a callback function that s2n will invoke in order to verify the hostname of an X.509 certificate * @param data Opaque pointer to data that the verify host function will be invoked with * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_connection_set_verify_host_callback(struct s2n_connection *conn, s2n_verify_host_fn host_fn, void *data); /** * Used to opt-out of s2n-tls's built-in blinding. Blinding is a * mitigation against timing side-channels which in some cases can leak information * about encrypted data. By default s2n-tls will cause a thread to sleep between 10 and * 30 seconds whenever tampering is detected. * * Setting the S2N_SELF_SERVICE_BLINDING option with s2n_connection_set_blinding() * turns off this behavior. This is useful for applications that are handling many connections * in a single thread. In that case, if s2n_recv() or s2n_negotiate() return an error, * self-service applications should call s2n_connection_get_delay() and pause * activity on the connection for the specified number of nanoseconds before calling * close() or shutdown(). */ typedef enum { S2N_BUILT_IN_BLINDING, S2N_SELF_SERVICE_BLINDING } s2n_blinding; /** * Used to configure s2n-tls to either use built-in blinding (set blinding to S2N_BUILT_IN_BLINDING) or * self-service blinding (set blinding to S2N_SELF_SERVICE_BLINDING). * * @param conn The connection object being updated * @param blinding The desired blinding mode for the connection * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_connection_set_blinding(struct s2n_connection *conn, s2n_blinding blinding); /** * Query the connection object for the configured blinding delay. * @param conn The connection object being updated * @returns the number of nanoseconds an application using self-service blinding should pause before calling close() or shutdown(). */ S2N_API extern uint64_t s2n_connection_get_delay(struct s2n_connection *conn); /** * Sets the cipher preference override for the s2n_connection. Calling this function is not necessary * unless you want to set the cipher preferences on the connection to something different than what is in the s2n_config. * * @param conn The connection object being updated * @param version The human readable string representation of the security policy version. * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_connection_set_cipher_preferences(struct s2n_connection *conn, const char *version); /** * Appends the provided application protocol to the preference list * * The data provided in `protocol` parameter will be copied into an internal buffer * * @param conn The connection object being updated * @param protocol A pointer to a slice of bytes * @param protocol_len The length of bytes that should be read from `protocol`. Note: this value cannot be 0, otherwise an error will be returned. * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_connection_append_protocol_preference(struct s2n_connection *conn, const uint8_t *protocol, uint8_t protocol_len); /** * Sets the protocol preference override for the s2n_connection. Calling this function is not necessary unless you want * to set the protocol preferences on the connection to something different than what is in the s2n_config. * * @param conn The connection object being updated * @param protocols A pointer to an array of protocol strings * @param protocol_count The number of protocols contained in protocols * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_connection_set_protocol_preferences(struct s2n_connection *conn, const char *const *protocols, int protocol_count); /** * Sets the server name for the connection. * * The provided server name will be sent by the client to the server in the * server_name ClientHello extension. It may be desirable for clients * to provide this information to facilitate secure connections to * servers that host multiple 'virtual' servers at a single underlying * network address. * * s2n-tls does not place any restrictions on the provided server name. However, * other TLS implementations might. Specifically, the TLS specification for the * server_name extension requires that it be an ASCII-encoded DNS name without a * trailing dot, and explicitly forbids literal IPv4 or IPv6 addresses. * * @param conn The connection object being queried * @param server_name A pointer to a string containing the desired server name * @warning `server_name` must be a NULL terminated string. * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_set_server_name(struct s2n_connection *conn, const char *server_name); /** * Query the connection for the selected server name. * * This can be used by a server to determine which server name the client is using. This function returns the first ServerName entry * in the ServerNameList sent by the client. Subsequent entries are not returned. * * @param conn The connection object being queried * @returns The server name associated with a connection, or NULL if none is found. */ S2N_API extern const char *s2n_get_server_name(struct s2n_connection *conn); /** * Query the connection for the selected application protocol. * * @param conn The connection object being queried * @returns The negotiated application protocol for a `s2n_connection`. In the event of no protocol being negotiated, NULL is returned. */ S2N_API extern const char *s2n_get_application_protocol(struct s2n_connection *conn); /** * Query the connection for a buffer containing the OCSP response. * * @param conn The connection object being queried * @param length A pointer that is set to the certificate transparency response buffer's size * @returns A pointer to the OCSP response sent by a server during the handshake. If no status response is received, NULL is returned. */ S2N_API extern const uint8_t *s2n_connection_get_ocsp_response(struct s2n_connection *conn, uint32_t *length); /** * Query the connection for a buffer containing the Certificate Transparency response. * * @param conn The connection object being queried * @param length A pointer that is set to the certificate transparency response buffer's size * @returns A pointer to the certificate transparency response buffer. */ S2N_API extern const uint8_t *s2n_connection_get_sct_list(struct s2n_connection *conn, uint32_t *length); /** * Used in non-blocking mode to indicate in which direction s2n-tls became blocked on I/O before it * returned control to the caller. This allows an application to avoid retrying s2n-tls operations * until I/O is possible in that direction. */ typedef enum { S2N_NOT_BLOCKED = 0, S2N_BLOCKED_ON_READ, S2N_BLOCKED_ON_WRITE, S2N_BLOCKED_ON_APPLICATION_INPUT, S2N_BLOCKED_ON_EARLY_DATA, } s2n_blocked_status; /** * Performs the initial "handshake" phase of a TLS connection and must be called before any s2n_recv() or s2n_send() calls. * * @note When using client authentication with TLS1.3, s2n_negotiate() will report a successful * handshake to clients before the server validates the client certificate. If the server then * rejects the client certificate, the client may later receive an alert while calling s2n_recv, * potentially after already having sent application data with s2n_send. * * See the following example for guidance on calling `s2n_negotiate()`: * https://github.com/aws/s2n-tls/blob/main/docs/examples/s2n_negotiate.c * * @param conn A pointer to the s2n_connection object * @param blocked A pointer which will be set to the blocked status if an `S2N_ERR_T_BLOCKED` error is returned. * @returns S2N_SUCCESS if the handshake completed. S2N_FAILURE if the handshake encountered an error or is blocked. */ S2N_API extern int s2n_negotiate(struct s2n_connection *conn, s2n_blocked_status *blocked); /** * Writes and encrypts `size` of `buf` data to the associated connection. s2n_send() will return the number of bytes * written, and may indicate a partial write. * * @note Partial writes are possible not just for non-blocking I/O, but also for connections aborted while active. * @note Unlike OpenSSL, repeated calls to s2n_send() should not duplicate the original parameters, but should * update `buf` and `size` per the indication of size written. * * See the following example for guidance on calling `s2n_send()`: * https://github.com/aws/s2n-tls/blob/main/docs/examples/s2n_send.c * * @param conn A pointer to the s2n_connection object * @param buf A pointer to a buffer that s2n will write data from * @param size The size of buf * @param blocked A pointer which will be set to the blocked status if an `S2N_ERR_T_BLOCKED` error is returned. * @returns The number of bytes written, and may indicate a partial write */ S2N_API extern ssize_t s2n_send(struct s2n_connection *conn, const void *buf, ssize_t size, s2n_blocked_status *blocked); /** * Works in the same way as s2n_sendv_with_offset() but with the `offs` parameter implicitly assumed to be 0. * Therefore in the partial write case, the caller would have to make sure that the `bufs` and `count` fields are modified in a way that takes * the partial writes into account. * * @param conn A pointer to the s2n_connection object * @param bufs A pointer to a vector of buffers that s2n will write data from. * @param count The number of buffers in `bufs` * @param blocked A pointer which will be set to the blocked status if an `S2N_ERR_T_BLOCKED` error is returned. * @returns The number of bytes written, and may indicate a partial write. */ S2N_API extern ssize_t s2n_sendv(struct s2n_connection *conn, const struct iovec *bufs, ssize_t count, s2n_blocked_status *blocked); /** * Works in the same way as s2n_send() except that it accepts vectorized buffers. Will return the number of bytes written, and may indicate a partial write. Partial writes are possible not just for non-blocking I/O, but also for connections aborted while active. * * @note Partial writes are possible not just for non-blocking I/O, but also for connections aborted while active. * * @note Unlike OpenSSL, repeated calls to s2n_sendv_with_offset() should not duplicate the original parameters, but should update `bufs` and `count` per the indication of size written. * * See the following example for guidance on calling `s2n_sendv_with_offset()`: * https://github.com/aws/s2n-tls/blob/main/docs/examples/s2n_send.c * * @param conn A pointer to the s2n_connection object * @param bufs A pointer to a vector of buffers that s2n will write data from. * @param count The number of buffers in `bufs` * @param offs The write cursor offset. This should be updated as data is written. See the example code. * @param blocked A pointer which will be set to the blocked status if an `S2N_ERR_T_BLOCKED` error is returned. * @returns The number of bytes written, and may indicate a partial write. */ S2N_API extern ssize_t s2n_sendv_with_offset(struct s2n_connection *conn, const struct iovec *bufs, ssize_t count, ssize_t offs, s2n_blocked_status *blocked); /** * Decrypts and reads **size* to `buf` data from the associated * connection. * * @note Unlike OpenSSL, repeated calls to `s2n_recv` should not duplicate the original parameters, but should update `buf` and `size` per the indication of size read. * * See the following example for guidance on calling `s2n_recv()`: * https://github.com/aws/s2n-tls/blob/main/docs/examples/s2n_recv.c * * @param conn A pointer to the s2n_connection object * @param buf A pointer to a buffer that s2n will place read data into. * @param size Size of `buf` * @param blocked A pointer which will be set to the blocked status if an `S2N_ERR_T_BLOCKED` error is returned. * @returns number of bytes read. 0 if the connection was shutdown by peer. */ S2N_API extern ssize_t s2n_recv(struct s2n_connection *conn, void *buf, ssize_t size, s2n_blocked_status *blocked); /** * Allows users of s2n-tls to peek inside the data buffer of an s2n-tls connection to see if there more data to be read without actually reading it. * * This is useful when using select() on the underlying s2n-tls file descriptor with a message based application layer protocol. As a single call * to s2n_recv may read all data off the underlying file descriptor, select() will be unable to tell you there if there is more application data * ready for processing already loaded into the s2n-tls buffer. * * @note can then be used to determine if s2n_recv() needs to be called before more data comes in on the raw fd * @param conn A pointer to the s2n_connection object * @returns The number of bytes that can be read from the connection */ S2N_API extern uint32_t s2n_peek(struct s2n_connection *conn); /** * Wipes and releases buffers and memory allocated during the TLS handshake. * * @note This function should be called after the handshake is successfully negotiated and logging or recording of handshake data is complete. * * @param conn A pointer to the s2n_connection object * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_connection_free_handshake(struct s2n_connection *conn); /** * Wipes and free the `in` and `out` buffers associated with a connection. * * @note This function may be called when a connection is * in keep-alive or idle state to reduce memory overhead of long lived connections. * * @param conn A pointer to the s2n_connection object * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_connection_release_buffers(struct s2n_connection *conn); /** * Wipes an existing connection and allows it to be reused. Erases all data associated with a connection including * pending reads. * * @note This function should be called after all I/O is completed and s2n_shutdown has been called. * @note Reusing the same connection handle(s) is more performant than repeatedly calling s2n_connection_new() and s2n_connection_free(). * * @param conn A pointer to the s2n_connection object * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_connection_wipe(struct s2n_connection *conn); /** * Frees the memory associated with an s2n_connection * handle. The handle is considered invalid after `s2n_connection_free` is used. * s2n_connection_wipe() does not need to be called prior to this function. `s2n_connection_free` performs its own wipe * of sensitive data. * * @param conn A pointer to the s2n_connection object * @returns 0 on success. -1 on failure */ S2N_API extern int s2n_connection_free(struct s2n_connection *conn); /** * Attempts a closure at the TLS layer. Does not close the underlying transport. This call may block in either direction. * * Unlike other TLS implementations, `s2n_shutdown` attempts a graceful shutdown by default. It will not return with success unless a close_notify alert is successfully * sent and received. As a result, `s2n_shutdown` may fail when interacting with a non-conformant TLS implementation. * * Once `s2n_shutdown` is complete: * * The s2n_connection handle cannot be used for reading for writing. * * The underlying transport can be closed. Most likely via `shutdown()` or `close()`. * * The s2n_connection handle can be freed via s2n_connection_free() or reused via s2n_connection_wipe() * * @param conn A pointer to the s2n_connection object * @param blocked A pointer which will be set to the blocked status if an `S2N_ERR_T_BLOCKED` error is returned. * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_shutdown(struct s2n_connection *conn, s2n_blocked_status *blocked); /** * Attempts to close the write side of the TLS connection. * * TLS1.3 supports closing the write side of a TLS connection while leaving the read * side unaffected. This feature is usually referred to as "half-close". We send * a close_notify alert, but do not wait for the peer to respond. * * Like `s2n_shutdown()`, this method does not affect the underlying transport. * * `s2n_shutdown_send()` may still be called for earlier TLS versions, but most * TLS implementations will react by immediately discarding any pending writes and * closing the connection. * * Once `s2n_shutdown_send()` is complete: * * The s2n_connection handle CANNOT be used for writing. * * The s2n_connection handle CAN be used for reading. * * The write side of the underlying transport can be closed. Most likely via `shutdown()`. * * The application should still call `s2n_shutdown()` or wait for `s2n_recv()` to * return 0 to indicate end-of-data before cleaning up the connection or closing * the read side of the underlying transport. * * @param conn A pointer to the s2n_connection object * @param blocked A pointer which will be set to the blocked status if an `S2N_ERR_T_BLOCKED` error is returned. * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_shutdown_send(struct s2n_connection *conn, s2n_blocked_status *blocked); /** * Used to declare what type of client certificate authentication to use. * * Currently the default for s2n-tls is for neither the server side or the client side to use Client (aka Mutual) authentication. */ typedef enum { S2N_CERT_AUTH_NONE, S2N_CERT_AUTH_REQUIRED, S2N_CERT_AUTH_OPTIONAL } s2n_cert_auth_type; /** * Gets Client Certificate authentication method the s2n_config object is using. * * @param config A pointer to a s2n_config object * @param client_auth_type A pointer to a client auth policy. This will be updated to the s2n_config value. * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_config_get_client_auth_type(struct s2n_config *config, s2n_cert_auth_type *client_auth_type); /** * Sets whether or not a Client Certificate should be required to complete the TLS Connection. * * If this is set to `S2N_CERT_AUTH_OPTIONAL` the server will request a client certificate but allow the client to not provide one. * Rejecting a client certificate when using `S2N_CERT_AUTH_OPTIONAL` will terminate the handshake. * * @param config A pointer to a s2n_config object * @param client_auth_type The client auth policy for the connection * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_config_set_client_auth_type(struct s2n_config *config, s2n_cert_auth_type client_auth_type); /** * Gets Client Certificate authentication method the s2n_connection object is using. * * @param conn A pointer to the s2n_connection object * @param client_auth_type A pointer to a client auth policy. This will be updated to the s2n_connection value. * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_connection_get_client_auth_type(struct s2n_connection *conn, s2n_cert_auth_type *client_auth_type); /** * Sets whether or not a Client Certificate should be required to complete the TLS Connection. * * If this is set to `S2N_CERT_AUTH_OPTIONAL` the server will request a client certificate but allow the client to not provide one. * Rejecting a client certificate when using `S2N_CERT_AUTH_OPTIONAL` will terminate the handshake. * * @param conn A pointer to the s2n_connection object * @param client_auth_type The client auth policy for the connection * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_connection_set_client_auth_type(struct s2n_connection *conn, s2n_cert_auth_type client_auth_type); /** * Gets the raw certificate chain received from the client. * * The retrieved certificate chain has the format described by the TLS 1.2 RFC: * https://datatracker.ietf.org/doc/html/rfc5246#section-7.4.2. Each certificate is a DER-encoded ASN.1 X.509, * prepended by a 3 byte network-endian length value. Note that this format is used regardless of the connection's * protocol version. * * @warning The buffer pointed to by `cert_chain_out` shares its lifetime with the s2n_connection object. * * @param conn A pointer to the s2n_connection object * @param cert_chain_out A pointer that's set to the client certificate chain. * @param cert_chain_len A pointer that's set to the size of the `cert_chain_out` buffer. * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_connection_get_client_cert_chain(struct s2n_connection *conn, uint8_t **der_cert_chain_out, uint32_t *cert_chain_len); /** * Sets the initial number of session tickets to send after a >=TLS1.3 handshake. The default value is one ticket. * * @param config A pointer to the config object. * @param num The number of session tickets that will be sent. * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_config_set_initial_ticket_count(struct s2n_config *config, uint8_t num); /** * Increases the number of session tickets to send after a >=TLS1.3 handshake. * * @param conn A pointer to the connection object. * @param num The number of additional session tickets to send. * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_connection_add_new_tickets_to_send(struct s2n_connection *conn, uint8_t num); /** * Returns the number of session tickets issued by the server. * * In TLS1.3, this number can be up to the limit configured by s2n_config_set_initial_ticket_count * and s2n_connection_add_new_tickets_to_send. In earlier versions of TLS, this number will be either 0 or 1. * * This method only works for server connections. * * @param conn A pointer to the connection object. * @param num The number of additional session tickets sent. * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_connection_get_tickets_sent(struct s2n_connection *conn, uint16_t *num); /** * Sets the keying material lifetime for >=TLS1.3 session tickets so that one session doesn't get re-used ad infinitum. * The default value is one week. * * @param conn A pointer to the connection object. * @param lifetime_in_secs Lifetime of keying material in seconds. * @returns S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API extern int s2n_connection_set_server_keying_material_lifetime(struct s2n_connection *conn, uint32_t lifetime_in_secs); struct s2n_session_ticket; /** * Callback function for receiving a session ticket. * * This function will be called each time a session ticket is received, which may be multiple times for TLS1.3. * * # Safety * * `ctx` is a void pointer and the caller is responsible for ensuring it is cast to the correct type. * `ticket` is valid only within the scope of this callback. * * @param conn A pointer to the connection object. * @param ctx Context for the session ticket callback function. * @param ticket Pointer to the received session ticket object. */ typedef int (*s2n_session_ticket_fn)(struct s2n_connection *conn, void *ctx, struct s2n_session_ticket *ticket); /** * Sets a session ticket callback to be called when a client receives a new session ticket. * * # Safety * * `callback` MUST cast `ctx` into the same type of pointer that was originally created. * `ctx` MUST be valid for the lifetime of the config, or until a different context is set. * * @param config A pointer to the config object. * @param callback The function that should be called when the callback is triggered. * @param ctx The context to be passed when the callback is called. */ S2N_API extern int s2n_config_set_session_ticket_cb(struct s2n_config *config, s2n_session_ticket_fn callback, void *ctx); /** * Gets the length of the session ticket from a session ticket object. * * @param ticket Pointer to the session ticket object. * @param data_len Pointer to be set to the length of the session ticket on success. */ S2N_API extern int s2n_session_ticket_get_data_len(struct s2n_session_ticket *ticket, size_t *data_len); /** * Gets the session ticket data from a session ticket object. * * # Safety * The entire session ticket will be copied into `data` on success. Therefore, `data` MUST have enough * memory to store the session ticket data. * * @param ticket Pointer to the session ticket object. * @param max_data_len Maximum length of data that can be written to the 'data' pointer. * @param data Pointer to where the session ticket data will be stored. */ S2N_API extern int s2n_session_ticket_get_data(struct s2n_session_ticket *ticket, size_t max_data_len, uint8_t *data); /** * Gets the lifetime in seconds of the session ticket from a session ticket object. * * @param ticket Pointer to the session ticket object. * @param session_lifetime Pointer to a variable where the lifetime of the session ticket will be stored. */ S2N_API extern int s2n_session_ticket_get_lifetime(struct s2n_session_ticket *ticket, uint32_t *session_lifetime); /** * De-serializes the session state and updates the connection accordingly. * * If this method fails, the connection should not be affected: calling s2n_negotiate * with the connection should simply result in a full handshake. * * @param conn A pointer to the s2n_connection object * @param session A pointer to a buffer of size `length` * @param length The size of the `session` buffer * * @returns The number of copied bytes */ S2N_API extern int s2n_connection_set_session(struct s2n_connection *conn, const uint8_t *session, size_t length); /** * Serializes the session state from connection and copies into the `session` buffer and returns the number of copied bytes * * @note This function is not recommended for > TLS 1.2 because in TLS1.3 * servers can send multiple session tickets and this function will only * return the most recently received ticket. * * @param conn A pointer to the s2n_connection object * @param session A pointer to a buffer of size `max_length` * @param max_length The size of the `session` buffer * * @returns The number of copied bytes */ S2N_API extern int s2n_connection_get_session(struct s2n_connection *conn, uint8_t *session, size_t max_length); /** * Retrieves a hint from the server indicating how long this ticket's lifetime is. * * @note This function is not recommended for > TLS 1.2 because in TLS1.3 * servers can send multiple session tickets and this function will only * return the most recently received ticket lifetime hint. * * @param conn A pointer to the s2n_connection object * * @returns The session ticket lifetime hint in seconds from the server or -1 when session ticket was not used for resumption. */ S2N_API extern int s2n_connection_get_session_ticket_lifetime_hint(struct s2n_connection *conn); /** * Use this to query the serialized session state size before copying it into a buffer. * * @param conn A pointer to the s2n_connection object * * @returns number of bytes needed to store serialized session state */ S2N_API extern int s2n_connection_get_session_length(struct s2n_connection *conn); /** * Gets the latest session id's length from the connection. * * Use this to query the session id size before copying it into a buffer. * * @param conn A pointer to the s2n_connection object * * @returns The latest session id length from the connection. Session id length will be 0 for TLS versions >= TLS1.3 as stateful session resumption has not yet been implemented in TLS1.3. */ S2N_API extern int s2n_connection_get_session_id_length(struct s2n_connection *conn); /** * Gets the latest session id from the connection, copies it into the `session_id` buffer, and returns the number of copied bytes. * * The session id may change between s2n receiving the ClientHello and sending the ServerHello, but this function will always describe the latest session id. * * See s2n_client_hello_get_session_id() to get the session id as it was sent by the client in the ClientHello message. * * @param conn A pointer to the s2n_connection object * @param session_id A pointer to a buffer of size `max_length` * @param max_length The size of the `session_id` buffer * * @returns The number of copied bytes. */ S2N_API extern int s2n_connection_get_session_id(struct s2n_connection *conn, uint8_t *session_id, size_t max_length); /** * Check if the connection was resumed from an earlier handshake. * * @param conn A pointer to the s2n_connection object * * @returns returns 1 if the handshake was abbreviated, otherwise returns 0 */ S2N_API extern int s2n_connection_is_session_resumed(struct s2n_connection *conn); /** * Check if the connection is OCSP stapled. * * @param conn A pointer to the s2n_connection object * * @returns 1 if OCSP response was sent (if connection is in S2N_SERVER mode) or received (if connection is in S2N_CLIENT mode) during handshake, otherwise it returns 0. */ S2N_API extern int s2n_connection_is_ocsp_stapled(struct s2n_connection *conn); /** * TLS Signature Algorithms - RFC 5246 7.4.1.4.1 * https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#tls-parameters-16 */ typedef enum { S2N_TLS_SIGNATURE_ANONYMOUS = 0, S2N_TLS_SIGNATURE_RSA = 1, S2N_TLS_SIGNATURE_ECDSA = 3, /* Use Private Range for RSA PSS since it's not defined there */ S2N_TLS_SIGNATURE_RSA_PSS_RSAE = 224, S2N_TLS_SIGNATURE_RSA_PSS_PSS } s2n_tls_signature_algorithm; /** TLS Hash Algorithms - https://tools.ietf.org/html/rfc5246#section-7.4.1.4.1 * https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#tls-parameters-18 */ typedef enum { S2N_TLS_HASH_NONE = 0, S2N_TLS_HASH_MD5 = 1, S2N_TLS_HASH_SHA1 = 2, S2N_TLS_HASH_SHA224 = 3, S2N_TLS_HASH_SHA256 = 4, S2N_TLS_HASH_SHA384 = 5, S2N_TLS_HASH_SHA512 = 6, /* Use Private Range for MD5_SHA1 */ S2N_TLS_HASH_MD5_SHA1 = 224 } s2n_tls_hash_algorithm; /** * Get the connection's selected signature algorithm. * * @param conn A pointer to the s2n_connection object * @param chosen_alg A pointer to a s2n_tls_signature_algorithm object. This is an output parameter. * * @returns S2N_SUCCESS on success. S2N_FAILURE if bad parameters are received. */ S2N_API extern int s2n_connection_get_selected_signature_algorithm(struct s2n_connection *conn, s2n_tls_signature_algorithm *chosen_alg); /** * Get the connection's selected digest algorithm. * * @param conn A pointer to the s2n_connection object * @param chosen_alg A pointer to a s2n_tls_hash_algorithm object. This is an output parameter. * * @returns S2N_SUCCESS on success. S2N_FAILURE if bad parameters are received. */ S2N_API extern int s2n_connection_get_selected_digest_algorithm(struct s2n_connection *conn, s2n_tls_hash_algorithm *chosen_alg); /** * Get the client certificate's signature algorithm. * * @param conn A pointer to the s2n_connection object * @param chosen_alg A pointer to a s2n_tls_signature_algorithm object. This is an output parameter. * * @returns S2N_SUCCESS on success. S2N_FAILURE if bad parameters are received. */ S2N_API extern int s2n_connection_get_selected_client_cert_signature_algorithm(struct s2n_connection *conn, s2n_tls_signature_algorithm *chosen_alg); /** * Get the client certificate's digest algorithm. * * @param conn A pointer to the s2n_connection object * @param chosen_alg A pointer to a s2n_tls_hash_algorithm object. This is an output parameter. * * @returns S2N_SUCCESS on success. S2N_FAILURE if bad parameters are received. */ S2N_API extern int s2n_connection_get_selected_client_cert_digest_algorithm(struct s2n_connection *conn, s2n_tls_hash_algorithm *chosen_alg); /** * Get the certificate used during the TLS handshake * * - If `conn` is a server connection, the certificate selected will depend on the * ServerName sent by the client and supported ciphers. * - If `conn` is a client connection, the certificate sent in response to a CertificateRequest * message is returned. Currently s2n-tls supports loading only one certificate in client mode. Note that * not all TLS endpoints will request a certificate. * * @param conn A pointer to the s2n_connection object * * @returns NULL if the certificate selection phase of the handshake has not completed or if a certificate was not requested by the peer */ S2N_API extern struct s2n_cert_chain_and_key *s2n_connection_get_selected_cert(struct s2n_connection *conn); /** * @param chain_and_key A pointer to the s2n_cert_chain_and_key object being read. * @param cert_length This return value represents the length of the s2n certificate chain `chain_and_key`. * @returns the length of the s2n certificate chain `chain_and_key`. */ S2N_API extern int s2n_cert_chain_get_length(const struct s2n_cert_chain_and_key *chain_and_key, uint32_t *cert_length); /** * Returns the certificate `out_cert` present at the index `cert_idx` of the certificate chain `chain_and_key`. * * Note that the index of the leaf certificate is zero. If the certificate chain `chain_and_key` is NULL or the * certificate index value is not in the acceptable range for the input certificate chain, an error is returned. * * # Safety * * There is no memory allocation required for `out_cert` buffer prior to calling the `s2n_cert_chain_get_cert` API. * The `out_cert` will contain the pointer to the s2n_cert initialized within the input s2n_cert_chain_and_key `chain_and_key`. * The pointer to the output s2n certificate `out_cert` is valid until `chain_and_key` is freed up. * If a caller wishes to persist the `out_cert` beyond the lifetime of `chain_and_key`, the contents would need to be * copied prior to freeing `chain_and_key`. * * @param chain_and_key A pointer to the s2n_cert_chain_and_key object being read. * @param out_cert A pointer to the output s2n_cert `out_cert` present at the index `cert_idx` of the certificate chain `chain_and_key`. * @param cert_idx The certificate index for the requested certificate within the s2n certificate chain. */ S2N_API extern int s2n_cert_chain_get_cert(const struct s2n_cert_chain_and_key *chain_and_key, struct s2n_cert **out_cert, const uint32_t cert_idx); /** * Returns the s2n certificate in DER format along with its length. * * The API gets the s2n certificate `cert` in DER format. The certificate is returned in the `out_cert_der` buffer. * Here, `cert_len` represents the length of the certificate. * * A caller can use certificate parsing tools such as the ones provided by OpenSSL to parse the DER encoded certificate chain returned. * * # Safety * * The memory for the `out_cert_der` buffer is allocated and owned by s2n-tls. * Since the size of the certificate can potentially be very large, a pointer to internal connection data is returned instead of * copying the contents into a caller-provided buffer. * * The pointer to the output buffer `out_cert_der` is valid only while the connection exists. * The `s2n_connection_free` API frees the memory associated with the out_cert_der buffer and after the `s2n_connection_wipe` API is * called the memory pointed by out_cert_der is invalid. * * If a caller wishes to persist the `out_cert_der` beyond the lifetime of the connection, the contents would need to be * copied prior to the connection termination. * * @param cert A pointer to the s2n_cert object being read. * @param out_cert_der A pointer to the output buffer which will hold the s2n certificate `cert` in DER format. * @param cert_length This return value represents the length of the certificate. */ S2N_API extern int s2n_cert_get_der(const struct s2n_cert *cert, const uint8_t **out_cert_der, uint32_t *cert_length); /** * Returns the validated peer certificate chain as a `s2n_cert_chain_and_key` opaque object. * * The `s2n_cert_chain_and_key` parameter must be allocated by the caller using the `s2n_cert_chain_and_key_new` API * prior to this function call and must be empty. To free the memory associated with the `s2n_cert_chain_and_key` object use the * `s2n_cert_chain_and_key_free` API. * * @param conn A pointer to the s2n_connection object being read. * @param cert_chain The returned validated peer certificate chain `cert_chain` retrieved from the s2n connection. */ S2N_API extern int s2n_connection_get_peer_cert_chain(const struct s2n_connection *conn, struct s2n_cert_chain_and_key *cert_chain); /** * Returns the length of the DER encoded extension value of the ASN.1 X.509 certificate extension. * * @param cert A pointer to the s2n_cert object being read. * @param oid A null-terminated cstring that contains the OID of the X.509 certificate extension to be read. * @param ext_value_len This return value contains the length of DER encoded extension value of the ASN.1 X.509 certificate extension. */ S2N_API extern int s2n_cert_get_x509_extension_value_length(struct s2n_cert *cert, const uint8_t *oid, uint32_t *ext_value_len); /** * Returns the DER encoding of an ASN.1 X.509 certificate extension value, it's length and a boolean critical. * * @param cert A pointer to the s2n_cert object being read. * @param oid A null-terminated cstring that contains the OID of the X.509 certificate extension to be read. * @param ext_value A pointer to the output buffer which will hold the DER encoding of an ASN.1 X.509 certificate extension value returned. * @param ext_value_len This value is both an input and output parameter and represents the length of the output buffer `ext_value`. * When used as an input parameter, the caller must use this parameter to convey the maximum length of `ext_value`. * When used as an output parameter, `ext_value_len` holds the actual length of the DER encoding of the ASN.1 X.509 certificate extension value returned. * @param critical This return value contains the boolean value for `critical`. */ S2N_API extern int s2n_cert_get_x509_extension_value(struct s2n_cert *cert, const uint8_t *oid, uint8_t *ext_value, uint32_t *ext_value_len, bool *critical); /** * Returns the UTF8 String length of the ASN.1 X.509 certificate extension data. * * @param extension_data A pointer to the DER encoded ASN.1 X.509 certificate extension value being read. * @param extension_len represents the length of the input buffer `extension_data`. * @param utf8_str_len This return value contains the UTF8 String length of the ASN.1 X.509 certificate extension data. */ S2N_API extern int s2n_cert_get_utf8_string_from_extension_data_length(const uint8_t *extension_data, uint32_t extension_len, uint32_t *utf8_str_len); /** * Returns the UTF8 String representation of the DER encoded ASN.1 X.509 certificate extension data. * * @param extension_data A pointer to the DER encoded ASN.1 X.509 certificate extension value being read. * @param extension_len represents the length of the input buffer `extension_data`. * @param out_data A pointer to the output buffer which will hold the UTF8 String representation of the DER encoded ASN.1 X.509 * certificate extension data returned. * @param out_len This value is both an input and output parameter and represents the length of the output buffer `out_data`. * When used as an input parameter, the caller must use this parameter to convey the maximum length of `out_data`. * When used as an output parameter, `out_len` holds the actual length of UTF8 String returned. */ S2N_API extern int s2n_cert_get_utf8_string_from_extension_data(const uint8_t *extension_data, uint32_t extension_len, uint8_t *out_data, uint32_t *out_len); /** * Pre-shared key (PSK) Hash Algorithm - RFC 8446 Section-2.2 */ typedef enum { S2N_PSK_HMAC_SHA256, S2N_PSK_HMAC_SHA384, } s2n_psk_hmac; /** * Opaque pre shared key handle */ struct s2n_psk; /** * Creates a new s2n external pre-shared key (PSK) object with `S2N_PSK_HMAC_SHA256` as the default * PSK hash algorithm. An external PSK is a key established outside of TLS using a secure mutually agreed upon mechanism. * * Use `s2n_psk_free` to free the memory allocated to the s2n external PSK object created by this API. * * @returns struct s2n_psk* Returns a pointer to the newly created external PSK object. */ S2N_API struct s2n_psk *s2n_external_psk_new(void); /** * Frees the memory associated with the external PSK object. * * @param psk Pointer to the PSK object to be freed. */ S2N_API int s2n_psk_free(struct s2n_psk **psk); /** * Sets the identity for a given external PSK object. * The identity is a unique identifier for the pre-shared secret. * It is a non-secret value represented by raw bytes. * * # Safety * * The identity is transmitted over the network unencrypted and is a non-secret value. * Do not include confidential information in the identity. * * Note that the identity is copied into s2n-tls memory and the caller is responsible for * freeing the memory associated with the identity input. * * @param psk A pointer to a PSK object to be updated with the identity. * @param identity The identity in raw bytes format to be copied. * @param identity_size The length of the PSK identity being set. */ S2N_API int s2n_psk_set_identity(struct s2n_psk *psk, const uint8_t *identity, uint16_t identity_size); /** * Sets the out-of-band/externally provisioned secret for a given external PSK object. * * # Safety * * Note that the secret is copied into s2n-tls memory and the caller is responsible for * freeing the memory associated with the `secret` input. * * Deriving a shared secret from a password or other low-entropy source * is not secure and is subject to dictionary attacks. * See https://tools.ietf.org/rfc/rfc8446#section-2.2 for more information. * * @param psk A pointer to a PSK object to be updated with the secret. * @param secret The secret in raw bytes format to be copied. * @param secret_size The length of the pre-shared secret being set. */ S2N_API int s2n_psk_set_secret(struct s2n_psk *psk, const uint8_t *secret, uint16_t secret_size); /** * Sets the hash algorithm for a given external PSK object. The supported PSK hash * algorithms are as listed in the enum `s2n_psk_hmac` above. * * @param psk A pointer to the external PSK object to be updated with the PSK hash algorithm. * @param hmac The PSK hash algorithm being set. */ S2N_API int s2n_psk_set_hmac(struct s2n_psk *psk, s2n_psk_hmac hmac); /** * Appends a PSK object to the list of PSKs supported by the s2n connection. * If a PSK with a duplicate identity is found, an error is returned and the PSK is not added to the list. * Note that a copy of `psk` is stored on the connection. The user is still responsible for freeing the * memory associated with `psk`. * * @param conn A pointer to the s2n_connection object that contains the list of PSKs supported. * @param psk A pointer to the `s2n_psk` object to be appended to the list of PSKs on the s2n connection. */ S2N_API int s2n_connection_append_psk(struct s2n_connection *conn, struct s2n_psk *psk); /** * The list of PSK modes supported by s2n-tls for TLS versions >= TLS1.3. * Currently s2n-tls supports two modes - `S2N_PSK_MODE_RESUMPTION`, which represents the PSKs established * using the previous connection via session resumption, and `S2N_PSK_MODE_EXTERNAL`, which represents PSKs * established out-of-band/externally using a secure mutually agreed upon mechanism. */ typedef enum { S2N_PSK_MODE_RESUMPTION, S2N_PSK_MODE_EXTERNAL } s2n_psk_mode; /** * Sets the PSK mode on the s2n config object. * The supported PSK modes are listed in the enum `s2n_psk_mode` above. * * @param config A pointer to the s2n_config object being updated. * @param mode The PSK mode to be set. */ S2N_API int s2n_config_set_psk_mode(struct s2n_config *config, s2n_psk_mode mode); /** * Sets the PSK mode on the s2n connection object. * The supported PSK modes are listed in the enum `s2n_psk_mode` above. * This API overrides the PSK mode set on config for this connection. * * @param conn A pointer to the s2n_connection object being updated. * @param mode The PSK mode to be set. */ S2N_API int s2n_connection_set_psk_mode(struct s2n_connection *conn, s2n_psk_mode mode); /** * Gets the negotiated PSK identity length from the s2n connection object. The negotiated PSK * refers to the chosen PSK by the server to be used for the connection. * * This API can be used to determine if the negotiated PSK exists. If negotiated PSK exists a * call to this API returns a value greater than zero. If the negotiated PSK does not exist, the * value `0` is returned. * * @param conn A pointer to the s2n_connection object that successfully negotiated a PSK connection. * @param identity_length The length of the negotiated PSK identity. */ S2N_API int s2n_connection_get_negotiated_psk_identity_length(struct s2n_connection *conn, uint16_t *identity_length); /** * Gets the negotiated PSK identity from the s2n connection object. * If the negotiated PSK does not exist, the PSK identity will not be obtained and no error will be returned. * Prior to this API call, use `s2n_connection_get_negotiated_psk_identity_length` to determine if a * negotiated PSK exists or not. * * # Safety * * The negotiated PSK identity will be copied into the identity buffer on success. * Therefore, the identity buffer must have enough memory to fit the identity length. * * @param conn A pointer to the s2n_connection object. * @param identity The negotiated PSK identity obtained from the s2n_connection object. * @param max_identity_length The maximum length for the PSK identity. If the negotiated psk_identity length is * greater than this `max_identity_length` value an error will be returned. */ S2N_API int s2n_connection_get_negotiated_psk_identity(struct s2n_connection *conn, uint8_t *identity, uint16_t max_identity_length); struct s2n_offered_psk; /** * Creates a new s2n offered PSK object. * An offered PSK object represents a single PSK sent by the client. * * # Safety * * Use `s2n_offered_psk_free` to free the memory allocated to the s2n offered PSK object created by this API. * * @returns struct s2n_offered_psk* Returns a pointer to the newly created offered PSK object. */ S2N_API struct s2n_offered_psk *s2n_offered_psk_new(void); /** * Frees the memory associated with the `s2n_offered_psk` object. * * @param psk A pointer to the `s2n_offered_psk` object to be freed. */ S2N_API int s2n_offered_psk_free(struct s2n_offered_psk **psk); /** * Gets the PSK identity and PSK identity length for a given offered PSK object. * * @param psk A pointer to the offered PSK object being read. * @param identity The PSK identity being obtained. * @param size The length of the PSK identity being obtained. */ S2N_API int s2n_offered_psk_get_identity(struct s2n_offered_psk *psk, uint8_t **identity, uint16_t *size); struct s2n_offered_psk_list; /** * Checks whether the offered PSK list has an offered psk object next in line in the list. * An offered PSK list contains all the PSKs offered by the client for the server to select. * * # Safety * * This API returns a pointer to the s2n-tls internal memory with limited lifetime. * After the completion of `s2n_psk_selection_callback` this pointer is invalid. * * @param psk_list A pointer to the offered PSK list being read. * @returns bool A boolean value representing whether an offered psk object is present next in line in the offered PSK list. */ S2N_API bool s2n_offered_psk_list_has_next(struct s2n_offered_psk_list *psk_list); /** * Obtains the next offered PSK object from the list of offered PSKs. Use `s2n_offered_psk_list_has_next` * prior to this API call to ensure we have not reached the end of the list. * * @param psk_list A pointer to the offered PSK list being read. * @param psk A pointer to the next offered PSK object being obtained. */ S2N_API int s2n_offered_psk_list_next(struct s2n_offered_psk_list *psk_list, struct s2n_offered_psk *psk); /** * Returns the offered PSK list to its original read state. * * When `s2n_offered_psk_list_reread` is called, `s2n_offered_psk_list_next` will return the first PSK * in the offered PSK list. * * @param psk_list A pointer to the offered PSK list being reread. */ S2N_API int s2n_offered_psk_list_reread(struct s2n_offered_psk_list *psk_list); /** * Chooses a PSK from the offered PSK list to be used for the connection. * This API matches the PSK identity received from the client against the server's known PSK identities * list, in order to choose the PSK to be used for the connection. If the PSK identity sent from the client * is NULL, no PSK is chosen for the connection. If the client offered PSK identity has no matching PSK identity * with the server, an error will be returned. Use this API along with the `s2n_psk_selection_callback` callback * to select a PSK identity. * * @param psk_list A pointer to the server's known PSK list used to compare for a matching PSK with the client. * @param psk A pointer to the client's PSK object used to compare with the server's known PSK identities. */ S2N_API int s2n_offered_psk_list_choose_psk(struct s2n_offered_psk_list *psk_list, struct s2n_offered_psk *psk); /** * Callback function to select a PSK from a list of offered PSKs. * Use this callback to implement custom PSK selection logic. The s2n-tls default PSK selection logic * chooses the first matching PSK from the list of offered PSKs sent by the client. * * # Safety * * `context` is a void pointer and the caller is responsible for ensuring it is cast to the correct type. * After the completion of this callback, the pointer to `psk_list` is invalid. * * @param conn A pointer to the s2n_connection object. * @param context A pointer to a context for the caller to pass state to the callback, if needed. * @param psk_list A pointer to the offered PSK list being read. */ typedef int (*s2n_psk_selection_callback)(struct s2n_connection *conn, void *context, struct s2n_offered_psk_list *psk_list); /** * Sets the callback to select the matching PSK. * If this callback is not set s2n-tls uses a default PSK selection logic that selects the first matching * server PSK. * * @param config A pointer to the s2n_config object. * @param cb The function that should be called when the callback is triggered. * @param context A pointer to a context for the caller to pass state to the callback, if needed. */ S2N_API int s2n_config_set_psk_selection_callback(struct s2n_config *config, s2n_psk_selection_callback cb, void *context); /** * Get the number of bytes the connection has received. * * @param conn A pointer to the connection * @returns return the number of bytes received by s2n-tls "on the wire" */ S2N_API extern uint64_t s2n_connection_get_wire_bytes_in(struct s2n_connection *conn); /** * Get the number of bytes the connection has transmitted out. * * @param conn A pointer to the connection * @returns return the number of bytes transmitted out by s2n-tls "on the wire" */ S2N_API extern uint64_t s2n_connection_get_wire_bytes_out(struct s2n_connection *conn); /** * Access the protocol version supported by the client. * * @note The return value corresponds to the macros defined as S2N_SSLv2, * S2N_SSLv3, S2N_TLS10, S2N_TLS11, S2N_TLS12, and S2N_TLS13. * * @param conn A pointer to the connection * @returns returns the highest protocol version supported by the client */ S2N_API extern int s2n_connection_get_client_protocol_version(struct s2n_connection *conn); /** * Access the protocol version supported by the server. * * @note The return value corresponds to the macros defined as S2N_SSLv2, * S2N_SSLv3, S2N_TLS10, S2N_TLS11, S2N_TLS12, and S2N_TLS13. * * @param conn A pointer to the connection * @returns Returns the highest protocol version supported by the server */ S2N_API extern int s2n_connection_get_server_protocol_version(struct s2n_connection *conn); /** * Access the protocol version selected for the connection. * * @note The return value corresponds to the macros defined as S2N_SSLv2, * S2N_SSLv3, S2N_TLS10, S2N_TLS11, S2N_TLS12, and S2N_TLS13. * * @param conn A pointer to the connection * @returns The protocol version actually negotiated by the handshake */ S2N_API extern int s2n_connection_get_actual_protocol_version(struct s2n_connection *conn); /** * Access the client hello protocol version for the connection. * * @note The return value corresponds to the macros defined as S2N_SSLv2, * S2N_SSLv3, S2N_TLS10, S2N_TLS11, S2N_TLS12, and S2N_TLS13. * * @param conn A pointer to the connection * @returns The protocol version used to send the initial client hello message. */ S2N_API extern int s2n_connection_get_client_hello_version(struct s2n_connection *conn); /** * Access the protocol version from the header of the first record that contained the ClientHello message. * * @note This field has been deprecated and should not be confused with the client hello * version. It is often set very low, usually to TLS1.0 for compatibility reasons, * and should never be set higher than TLS1.2. Therefore this method should only be used * for logging or fingerprinting. * * @param conn A pointer to the client hello struct * @param out The protocol version in the record header containing the Client Hello. */ S2N_API extern int s2n_client_hello_get_legacy_record_version(struct s2n_client_hello *ch, uint8_t *out); /** * Check if Client Auth was used for a connection. * * @param conn A pointer to the connection * @returns 1 if the handshake completed and Client Auth was negotiated during then * handshake. */ S2N_API extern int s2n_connection_client_cert_used(struct s2n_connection *conn); /** * A function that provides a human readable string of the cipher suite that was chosen * for a connection. * * @warning The string "TLS_NULL_WITH_NULL_NULL" is returned before the TLS handshake has been performed. * This does not mean that the ciphersuite "TLS_NULL_WITH_NULL_NULL" will be used by the connection, * it is merely being used as a placeholder. * * @note This function is only accurate after the TLS handshake. * * @param conn A pointer to the connection * @returns A string indicating the cipher suite negotiated by s2n in OpenSSL format. */ S2N_API extern const char *s2n_connection_get_cipher(struct s2n_connection *conn); /** * Provides access to the TLS-Exporter functionality. * * See https://datatracker.ietf.org/doc/html/rfc5705 and https://www.rfc-editor.org/rfc/rfc8446. * * @note This is currently only available with TLS 1.3 connections which have finished a handshake. * * @param conn A pointer to the connection * @returns A POSIX error signal. If an error was returned, the value contained in `output` should be considered invalid. */ S2N_API extern int s2n_connection_tls_exporter(struct s2n_connection *conn, const uint8_t *label, uint32_t label_length, const uint8_t *context, uint32_t context_length, uint8_t *output, uint32_t output_length); /** * Returns the IANA value for the connection's negotiated cipher suite. * * The value is returned in the form of `first,second`, in order to closely match * the values defined in the [IANA Registry](https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4). * For example if the connection's negotiated cipher suite is `TLS_AES_128_GCM_SHA256`, * which is registered as `0x13,0x01`, then `first = 0x13` and `second = 0x01`. * * This method will only succeed after the cipher suite has been negotiated with the peer. * * @param conn A pointer to the connection being read * @param first A pointer to a single byte, which will be updated with the first byte in the registered IANA value. * @param second A pointer to a single byte, which will be updated with the second byte in the registered IANA value. * @returns A POSIX error signal. If an error was returned, the values contained in `first` and `second` should be considered invalid. */ S2N_API extern int s2n_connection_get_cipher_iana_value(struct s2n_connection *conn, uint8_t *first, uint8_t *second); /** * Function to check if the cipher used by current connection is supported by the current * cipher preferences. * @param conn A pointer to the s2n connection * @param version A string representing the security policy to check against. * @returns 1 if the connection satisfies the cipher suite. 0 if the connection does not satisfy the cipher suite. -1 if there is an error. */ S2N_API extern int s2n_connection_is_valid_for_cipher_preferences(struct s2n_connection *conn, const char *version); /** * Function to get the human readable elliptic curve name for the connection. * * @param conn A pointer to the s2n connection * @returns A string indicating the elliptic curve used during ECDHE key exchange. The string "NONE" is returned if no curve was used. */ S2N_API extern const char *s2n_connection_get_curve(struct s2n_connection *conn); /** * Function to get the human readable KEM name for the connection. * * @param conn A pointer to the s2n connection * @returns A human readable string for the KEM group. If there is no KEM configured returns "NONE" */ S2N_API extern const char *s2n_connection_get_kem_name(struct s2n_connection *conn); /** * Function to get the human readable KEM group name for the connection. * * @param conn A pointer to the s2n connection * @returns A human readable string for the KEM group. If the connection is < TLS1.3 or there is no KEM group configured returns "NONE" */ S2N_API extern const char *s2n_connection_get_kem_group_name(struct s2n_connection *conn); /** * Function to get the alert that caused a connection to close. s2n-tls considers all * TLS alerts fatal and shuts down a connection whenever one is received. * * @param conn A pointer to the s2n connection * @returns The TLS alert code that caused a connection to be shut down */ S2N_API extern int s2n_connection_get_alert(struct s2n_connection *conn); /** * Function to return the last TLS handshake type that was processed. The returned format is a human readable string. * * @param conn A pointer to the s2n connection * @returns A human-readable handshake type name, e.g. "NEGOTIATED|FULL_HANDSHAKE|PERFECT_FORWARD_SECRECY" */ S2N_API extern const char *s2n_connection_get_handshake_type_name(struct s2n_connection *conn); /** * Function to return the last TLS message that was processed. The returned format is a human readable string. * @param conn A pointer to the s2n connection * @returns The last message name in the TLS state machine, e.g. "SERVER_HELLO", "APPLICATION_DATA". */ S2N_API extern const char *s2n_connection_get_last_message_name(struct s2n_connection *conn); /** * Opaque async private key operation handle */ struct s2n_async_pkey_op; /** * Sets whether or not a connection should enforce strict signature validation during the * `s2n_async_pkey_op_apply` call. * * `mode` can take the following values: * - `S2N_ASYNC_PKEY_VALIDATION_FAST` - default behavior: s2n-tls will perform only the minimum validation required for safe use of the asyn pkey operation. * - `S2N_ASYNC_PKEY_VALIDATION_STRICT` - in addition to the previous checks, s2n-tls will also ensure that the signature created as a result of the async private key sign operation matches the public key on the connection. */ typedef enum { S2N_ASYNC_PKEY_VALIDATION_FAST, S2N_ASYNC_PKEY_VALIDATION_STRICT } s2n_async_pkey_validation_mode; /** * The type of private key operation */ typedef enum { S2N_ASYNC_DECRYPT, S2N_ASYNC_SIGN } s2n_async_pkey_op_type; /** * Callback function for handling private key operations * * Invoked every time an operation requiring the private key is encountered * during the handshake. * * # Safety * * `op` is owned by the application and MUST be freed. * * @param conn Connection which triggered the callback * @param op An opaque object representing the private key operation */ typedef int (*s2n_async_pkey_fn)(struct s2n_connection *conn, struct s2n_async_pkey_op *op); /** * Sets up the callback to invoke when private key operations occur. * * @param config Config to set the callback * @param fn The function that should be called for each private key operation */ S2N_API extern int s2n_config_set_async_pkey_callback(struct s2n_config *config, s2n_async_pkey_fn fn); /** * Performs a private key operation using the given private key. * * # Safety * * Can only be called once. Any subsequent calls will produce a `S2N_ERR_T_USAGE` error. * * Safe to call from inside s2n_async_pkey_fn * * Safe to call from a different thread, as long as no other thread is operating on `op`. * * @param op An opaque object representing the private key operation * @param key The private key used for the operation. It can be extracted from * `conn` through the `s2n_connection_get_selected_cert` and `s2n_cert_chain_and_key_get_private_key` calls */ S2N_API extern int s2n_async_pkey_op_perform(struct s2n_async_pkey_op *op, s2n_cert_private_key *key); /** * Finalizes a private key operation and unblocks the connection. * * # Safety * * `conn` must match the connection that originally triggered the callback. * * Must be called after the operation is performed. * * Can only be called once. Any subsequent calls will produce a `S2N_ERR_T_USAGE` error. * * Safe to call from inside s2n_async_pkey_fn * * Safe to call from a different thread, as long as no other thread is operating on `op`. * * @param op An opaque object representing the private key operation * @param conn The connection associated with the operation that should be unblocked */ S2N_API extern int s2n_async_pkey_op_apply(struct s2n_async_pkey_op *op, struct s2n_connection *conn); /** * Frees the opaque structure representing a private key operation. * * # Safety * * MUST be called for every operation passed to s2n_async_pkey_fn * * Safe to call before or after the connection that created the operation is freed * * @param op An opaque object representing the private key operation */ S2N_API extern int s2n_async_pkey_op_free(struct s2n_async_pkey_op *op); /** * Configures whether or not s2n-tls will perform potentially expensive validation of * the results of a private key operation. * * @param config Config to set the validation mode for * @param mode What level of validation to perform */ S2N_API extern int s2n_config_set_async_pkey_validation_mode(struct s2n_config *config, s2n_async_pkey_validation_mode mode); /** * Returns the type of the private key operation. * * @param op An opaque object representing the private key operation * @param type A pointer to be set to the type */ S2N_API extern int s2n_async_pkey_op_get_op_type(struct s2n_async_pkey_op *op, s2n_async_pkey_op_type *type); /** * Returns the size of the input to the private key operation. * * @param op An opaque object representing the private key operation * @param data_len A pointer to be set to the size */ S2N_API extern int s2n_async_pkey_op_get_input_size(struct s2n_async_pkey_op *op, uint32_t *data_len); /** * Returns the input to the private key operation. * * When signing, the input is the digest to sign. * When decrypting, the input is the data to decrypt. * * # Safety * * `data` must be sufficiently large to contain the input. * `s2n_async_pkey_op_get_input_size` can be called to determine how much memory is required. * * s2n-tls does not take ownership of `data`. * The application still owns the memory and must free it if necessary. * * @param op An opaque object representing the private key operation * @param data A pointer to a buffer to copy the input into * @param data_len The maximum size of the `data` buffer */ S2N_API extern int s2n_async_pkey_op_get_input(struct s2n_async_pkey_op *op, uint8_t *data, uint32_t data_len); /** * Sets the output of the private key operation. * * # Safety * * s2n-tls does not take ownership of `data`. * The application still owns the memory and must free it if necessary. * * @param op An opaque object representing the private key operation * @param data A pointer to a buffer containing the output * @param data_len The size of the `data` buffer */ S2N_API extern int s2n_async_pkey_op_set_output(struct s2n_async_pkey_op *op, const uint8_t *data, uint32_t data_len); /** * Callback function for handling key log events * * THIS SHOULD BE USED FOR DEBUGGING PURPOSES ONLY! * * Each log line is formatted with the * [NSS Key Log Format](https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSS/Key_Log_Format) * without a newline. * * # Safety * * * `ctx` MUST be cast into the same type of pointer that was originally created * * `logline` bytes MUST be copied or discarded before this function returns * * @param ctx Context for the callback * @param conn Connection for which the log line is being emitted * @param logline Pointer to the log line data * @param len Length of the log line data */ typedef int (*s2n_key_log_fn)(void *ctx, struct s2n_connection *conn, uint8_t *logline, size_t len); /** * Sets a key logging callback on the provided config * * THIS SHOULD BE USED FOR DEBUGGING PURPOSES ONLY! * * Setting this function enables configurations to emit secrets in the * [NSS Key Log Format](https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSS/Key_Log_Format) * * # Safety * * * `callback` MUST cast `ctx` into the same type of pointer that was originally created * * `ctx` MUST live for at least as long as it is set on the config * * @param config Config to set the callback * @param callback The function that should be called for each secret log entry * @param ctx The context to be passed when the callback is called */ S2N_API extern int s2n_config_set_key_log_cb(struct s2n_config *config, s2n_key_log_fn callback, void *ctx); /** * s2n_config_enable_cert_req_dss_legacy_compat adds a dss cert type in the server certificate request when being called. * It only sends the dss cert type in the cert request but does not succeed the handshake if a dss cert is received. * Please DO NOT call this api unless you know you actually need legacy DSS certificate type compatibility * @param config Config to enable legacy DSS certificates for */ S2N_API extern int s2n_config_enable_cert_req_dss_legacy_compat(struct s2n_config *config); /** * Sets the maximum bytes of early data the server will accept. * * The default maximum is 0. If the maximum is 0, the server rejects all early data requests. * The config maximum can be overridden by the connection maximum or the maximum on an external pre-shared key. * * @param config A pointer to the config * @param max_early_data_size The maximum early data that the server will accept * @returns A POSIX error signal. If successful, the maximum early data size was updated. */ S2N_API int s2n_config_set_server_max_early_data_size(struct s2n_config *config, uint32_t max_early_data_size); /** * Sets the maximum bytes of early data the server will accept. * * The default maximum is 0. If the maximum is 0, the server rejects all early data requests. * The connection maximum can be overridden by the maximum on an external pre-shared key. * * @param conn A pointer to the connection * @param max_early_data_size The maximum early data the server will accept * @returns A POSIX error signal. If successful, the maximum early data size was updated. */ S2N_API int s2n_connection_set_server_max_early_data_size(struct s2n_connection *conn, uint32_t max_early_data_size); /** * Sets the user context associated with early data on a server. * * This context is passed to the `s2n_early_data_cb` callback to help decide whether to accept or reject early data. * * Unlike most contexts, the early data context is a byte buffer instead of a void pointer. * This is because we need to serialize the context into session tickets. * * This API is intended for use with session resumption, and will not affect pre-shared keys. * * @param conn A pointer to the connection * @param context A pointer to the user context data. This data will be copied. * @param context_size The size of the data to read from the `context` pointer. * @returns A POSIX error signal. If successful, the context was updated. */ S2N_API int s2n_connection_set_server_early_data_context(struct s2n_connection *conn, const uint8_t *context, uint16_t context_size); /** * Configures a particular pre-shared key to allow early data. * * `max_early_data_size` must be set to the maximum early data accepted by the server. * * In order to use early data, the cipher suite set on the pre-shared key must match the cipher suite * ultimately negotiated by the TLS handshake. Additionally, the cipher suite must have the same * hmac algorithm as the pre-shared key. * * @param psk A pointer to the pre-shared key, created with `s2n_external_psk_new`. * @param max_early_data_size The maximum early data that can be sent or received using this key. * @param cipher_suite_first_byte The first byte in the registered IANA value of the associated cipher suite. * @param cipher_suite_second_byte The second byte in the registered IANA value of the associated cipher suite. * @returns A POSIX error signal. If successful, `psk` was updated. */ S2N_API int s2n_psk_configure_early_data(struct s2n_psk *psk, uint32_t max_early_data_size, uint8_t cipher_suite_first_byte, uint8_t cipher_suite_second_byte); /** * Sets the optional `application_protocol` associated with the given pre-shared key. * * In order to use early data, the `application_protocol` set on the pre-shared key must match * the `application_protocol` ultimately negotiated by the TLS handshake. * * @param psk A pointer to the pre-shared key, created with `s2n_external_psk_new`. * @param application_protocol A pointer to the associated application protocol data. This data will be copied. * @param size The size of the data to read from the `application_protocol` pointer. * @returns A POSIX error signal. If successful, the application protocol was set. */ S2N_API int s2n_psk_set_application_protocol(struct s2n_psk *psk, const uint8_t *application_protocol, uint8_t size); /** * Sets the optional user early data context associated with the given pre-shared key. * * The early data context is passed to the `s2n_early_data_cb` callback to help decide whether * to accept or reject early data. * * @param psk A pointer to the pre-shared key, created with `s2n_external_psk_new`. * @param context A pointer to the associated user context data. This data will be copied. * @param size The size of the data to read from the `context` pointer. * @returns A POSIX error signal. If successful, the context was set. */ S2N_API int s2n_psk_set_early_data_context(struct s2n_psk *psk, const uint8_t *context, uint16_t size); /** * The status of early data on a connection. * * S2N_EARLY_DATA_STATUS_OK: Early data is in progress. * S2N_EARLY_DATA_STATUS_NOT_REQUESTED: The client did not request early data, so none was sent or received. * S2N_EARLY_DATA_STATUS_REJECTED: The client requested early data, but the server rejected the request. * Early data may have been sent, but was not received. * S2N_EARLY_DATA_STATUS_END: All early data was successfully sent and received. */ typedef enum { S2N_EARLY_DATA_STATUS_OK, S2N_EARLY_DATA_STATUS_NOT_REQUESTED, S2N_EARLY_DATA_STATUS_REJECTED, S2N_EARLY_DATA_STATUS_END, } s2n_early_data_status_t; /** * Reports the current state of early data for a connection. * * See `s2n_early_data_status_t` for all possible states. * * @param conn A pointer to the connection * @param status A pointer which will be set to the current early data status * @returns A POSIX error signal. */ S2N_API int s2n_connection_get_early_data_status(struct s2n_connection *conn, s2n_early_data_status_t *status); /** * Reports the remaining size of the early data allowed by a connection. * * If early data was rejected or not requested, the remaining early data size is 0. * Otherwise, the remaining early data size is the maximum early data allowed by the connection, * minus the early data sent or received so far. * * @param conn A pointer to the connection * @param allowed_early_data_size A pointer which will be set to the remaining early data currently allowed by `conn` * @returns A POSIX error signal. */ S2N_API int s2n_connection_get_remaining_early_data_size(struct s2n_connection *conn, uint32_t *allowed_early_data_size); /** * Reports the maximum size of the early data allowed by a connection. * * This is the maximum amount of early data that can ever be sent and received for a connection. * It is not affected by the actual status of the early data, so can be non-zero even if early data * is rejected or not requested. * * @param conn A pointer to the connection * @param max_early_data_size A pointer which will be set to the maximum early data allowed by `conn` * @returns A POSIX error signal. */ S2N_API int s2n_connection_get_max_early_data_size(struct s2n_connection *conn, uint32_t *max_early_data_size); /** * Called by the client to begin negotiation and send early data. * * See https://github.com/aws/s2n-tls/blob/main/docs/usage-guide/topics/ch14-early-data.md * for usage and examples. DO NOT USE unless you have considered the security issues and * implemented mitigation for anti-replay attacks. * * @param conn A pointer to the connection * @param data A pointer to the early data to be sent * @param data_len The size of the early data to send * @param data_sent A pointer which will be set to the size of the early data sent * @param blocked A pointer which will be set to the blocked status, as in `s2n_negotiate`. * @returns A POSIX error signal. The error should be handled as in `s2n_negotiate`. */ S2N_API int s2n_send_early_data(struct s2n_connection *conn, const uint8_t *data, ssize_t data_len, ssize_t *data_sent, s2n_blocked_status *blocked); /** * Called by the server to begin negotiation and accept any early data the client sends. * * See https://github.com/aws/s2n-tls/blob/main/docs/usage-guide/topics/ch14-early-data.md * for usage and examples. DO NOT USE unless you have considered the security issues and * implemented mitigation for anti-replay attacks. * * @param conn A pointer to the connection * @param data A pointer to a buffer to store the early data received * @param max_data_len The size of the early data buffer * @param data_received A pointer which will be set to the size of the early data received * @param blocked A pointer which will be set to the blocked status, as in `s2n_negotiate`. * @returns A POSIX error signal. The error should be handled as in `s2n_negotiate`. */ S2N_API int s2n_recv_early_data(struct s2n_connection *conn, uint8_t *data, ssize_t max_data_len, ssize_t *data_received, s2n_blocked_status *blocked); struct s2n_offered_early_data; /** * A callback which can be implemented to accept or reject early data. * * This callback is triggered only after the server has determined early data is otherwise acceptable according * to the TLS early data specification. Implementations therefore only need to cover application-specific checks, * not the standard TLS early data validation. * * This callback can be synchronous or asynchronous. For asynchronous behavior, return success without * calling `s2n_offered_early_data_reject` or `s2n_offered_early_data_accept`. `early_data` will * still be a valid reference, and the connection will block until `s2n_offered_early_data_reject` or * `s2n_offered_early_data_accept` is called. * * @param conn A pointer to the connection * @param early_data A pointer which can be used to access information about the proposed early data * and then accept or reject it. * @returns A POSIX error signal. If unsuccessful, the connection will be closed with an error. */ typedef int (*s2n_early_data_cb)(struct s2n_connection *conn, struct s2n_offered_early_data *early_data); /** * Set a callback to accept or reject early data. * * @param config A pointer to the connection config * @param cb A pointer to the implementation of the callback. * @returns A POSIX error signal. If successful, the callback was set. */ S2N_API int s2n_config_set_early_data_cb(struct s2n_config *config, s2n_early_data_cb cb); /** * Get the length of the early data context set by the user. * * @param early_data A pointer to the early data information * @param context_len The length of the user context * @returns A POSIX error signal. */ S2N_API int s2n_offered_early_data_get_context_length(struct s2n_offered_early_data *early_data, uint16_t *context_len); /** * Get the early data context set by the user. * * @param early_data A pointer to the early data information * @param context A byte buffer to copy the user context into * @param max_len The size of `context`. Must be >= to the result of `s2n_offered_early_data_get_context_length`. * @returns A POSIX error signal. */ S2N_API int s2n_offered_early_data_get_context(struct s2n_offered_early_data *early_data, uint8_t *context, uint16_t max_len); /** * Reject early data offered by the client. * * @param early_data A pointer to the early data information * @returns A POSIX error signal. If success, the client's early data will be rejected. */ S2N_API int s2n_offered_early_data_reject(struct s2n_offered_early_data *early_data); /** * Accept early data offered by the client. * * @param early_data A pointer to the early data information * @returns A POSIX error signal. If success, the client's early data will be accepted. */ S2N_API int s2n_offered_early_data_accept(struct s2n_offered_early_data *early_data); /** * Retrieves the list of supported groups configured by the security policy associated with `config`. * * The retrieved list of groups will contain all of the supported groups for a security policy that are compatible * with the build of s2n-tls. For instance, PQ kem groups that are not supported by the linked libcrypto will not * be written. Otherwise, all of the supported groups configured for the security policy will be written. This API * can be used with the s2n_client_hello_get_supported_groups() API as a means of comparing compatibility between * a client and server. * * IANA values for each of the supported groups are written to the provided `groups` array, and `groups_count` is * set to the number of written supported groups. * * `groups_count_max` should be set to the maximum capacity of the `groups` array. If `groups_count_max` is less * than the number of supported groups configured by the security policy, this function will error. * * Note that this API retrieves only the groups from a security policy that are available to negotiate via the * supported groups extension, and does not return TLS 1.2 PQ kem groups that are negotiated in the supported PQ * kem parameters extension. * * @param config A pointer to the s2n_config object from which the supported groups will be retrieved. * @param groups The array to populate with the supported groups. * @param groups_count_max The maximum number of supported groups that can fit in the `groups` array. * @param groups_count Set to the number of supported groups written to `groups`. * @returns S2N_SUCCESS on success. S2N_FAILURE on failure. */ S2N_API int s2n_config_get_supported_groups(struct s2n_config *config, uint16_t *groups, uint16_t groups_count_max, uint16_t *groups_count); #ifdef __cplusplus } #endif aws-crt-python-0.20.4+dfsg/crt/s2n/api/unstable/000077500000000000000000000000001456575232400213105ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/api/unstable/crl.h000066400000000000000000000254061456575232400222500ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #pragma once #include /** * @file crl.h * * The following APIs enable applications to determine if a received certificate has been revoked by its CA, via * Certificate Revocation Lists (CRLs). Please see the CRL Validation section in the usage guide for more information. * * The CRL APIs are currently considered unstable, since they have been recently added to s2n-tls. After gaining more * confidence in the correctness and usability of these APIs, they will be made stable. * */ struct s2n_crl_lookup; /** * A callback which can be implemented to provide s2n-tls with CRLs to use for CRL validation. * * This callback is triggered once for each certificate received during the handshake. To provide s2n-tls with a CRL for * the certificate, use `s2n_crl_lookup_set()`. To ignore the certificate and not provide a CRL, use * `s2n_crl_lookup_ignore()`. * * This callback can be synchronous or asynchronous. For asynchronous behavior, return success without calling * `s2n_crl_lookup_set()` or `s2n_crl_lookup_ignore()`. `s2n_negotiate()` will return S2N_BLOCKED_ON_APPLICATION_INPUT * until one of these functions is called for each invoked callback. * * @param lookup The CRL lookup for the given certificate. * @param context Context for the callback function. * @returns 0 on success, -1 on failure. */ typedef int (*s2n_crl_lookup_callback)(struct s2n_crl_lookup *lookup, void *context); /** * Set a callback to provide CRLs to use for CRL validation. * * @param config A pointer to the connection config * @param s2n_crl_lookup_callback The function to be called for each received certificate. * @param context Context to be passed to the callback function. * @return S2N_SUCCESS on success, S2N_FAILURE on failure */ S2N_API int s2n_config_set_crl_lookup_cb(struct s2n_config *config, s2n_crl_lookup_callback callback, void *context); /** * Allocates a new `s2n_crl` struct. * * Use `s2n_crl_load_pem()` to load the struct with a CRL pem. * * The allocated struct must be freed with `s2n_crl_free()`. * * @return A pointer to the allocated `s2n_crl` struct. */ S2N_API struct s2n_crl *s2n_crl_new(void); /** * Loads a CRL with pem data. * * @param crl The CRL to load with the PEM data. * @param pem The PEM data to load `crl` with. * @param len The length of the pem data. * @return S2N_SUCCESS on success, S2N_FAILURE on error. */ S2N_API int s2n_crl_load_pem(struct s2n_crl *crl, uint8_t *pem, size_t len); /** * Frees a CRL. * * Frees an allocated `s2n_crl` and sets `crl` to NULL. * * @param crl The CRL to free. * @return S2N_SUCCESS on success, S2N_FAILURE on error. */ S2N_API int s2n_crl_free(struct s2n_crl **crl); /** * Retrieves the issuer hash of a CRL. * * This function can be used to find the CRL associated with a certificate received in the s2n_crl_lookup callback. The * hash value, `hash`, corresponds with the issuer hash of a certificate, retrieved via * `s2n_crl_lookup_get_cert_issuer_hash()`. * * @param crl The CRL to obtain the hash value of. * @param hash A pointer that will be set to the hash value. * @return S2N_SUCCESS on success. S2N_FAILURE on failure */ S2N_API int s2n_crl_get_issuer_hash(struct s2n_crl *crl, uint64_t *hash); /** * Determines if the CRL is currently active. * * CRLs contain a thisUpdate field, which specifies the date at which the CRL becomes valid. This function can be called * to check thisUpdate relative to the current time. If the thisUpdate date is in the past, the CRL is considered * active. * * @param crl The CRL to validate. * @return S2N_SUCCESS if `crl` is active, S2N_FAILURE if `crl` is not active, or the active status cannot be determined. */ S2N_API int s2n_crl_validate_active(struct s2n_crl *crl); /** * Determines if the CRL has expired. * * CRLs contain a nextUpdate field, which specifies the date at which the CRL becomes expired. This function can be * called to check nextUpdate relative to the current time. If the nextUpdate date is in the future, the CRL has not * expired. * * If the CRL does not contain a thisUpdate field, the CRL is assumed to never expire. * * @param crl The CRL to validate. * @return S2N_SUCCESS if `crl` has not expired, S2N_FAILURE if `crl` has expired, or the expiration status cannot be determined. */ S2N_API int s2n_crl_validate_not_expired(struct s2n_crl *crl); /** * Retrieves the issuer hash of the certificate. * * The CRL lookup callback is triggered once for each received certificate. This function is used to get the issuer hash * of this certificate. The hash value, `hash`, corresponds with the issuer hash of the CRL, retrieved via * `s2n_crl_get_issuer_hash()`. * * @param lookup The CRL lookup for the given certificate. * @param hash A pointer that will be set to the hash value. * @return S2N_SUCCESS on success, S2N_FAILURE on failure. */ S2N_API int s2n_crl_lookup_get_cert_issuer_hash(struct s2n_crl_lookup *lookup, uint64_t *hash); /** * Provide s2n-tls with a CRL from the CRL lookup callback. * * A return function for `s2n_crl_lookup_cb`. This function should be used from within the CRL lookup callback to * provide s2n-tls with a CRL for the given certificate. The provided CRL will be included in the list of CRLs to use * when validating the certificate chain. * * To skip providing a CRL from the callback, use `s2n_crl_lookup_ignore()`. * * @param lookup The CRL lookup for the given certificate. * @param crl The CRL to include in the list of CRLs used to validate the certificate chain. * @return S2N_SUCCESS on success, S2N_FAILURE on failure. */ S2N_API int s2n_crl_lookup_set(struct s2n_crl_lookup *lookup, struct s2n_crl *crl); /** * Skip providing a CRL from the CRL lookup callback. * * A return function for `s2n_crl_lookup_cb`. This function should be used from within the CRL lookup callback to ignore * the certificate, and skip providing s2n-tls with a CRL. * * If a certificate is ignored, and is ultimately included in the chain of trust, certificate chain validation will * fail with a S2N_ERR_CRL_LOOKUP_FAILED error. However, if the certificate is extraneous and not included in the chain * of trust, validation is able to proceed. * * @param lookup The CRL lookup for the given certificate. * @return S2N_SUCCESS on success, S2N_FAILURE on failure. */ S2N_API int s2n_crl_lookup_ignore(struct s2n_crl_lookup *lookup); struct s2n_cert_validation_info; /** * A callback which can be implemented to perform additional validation on received certificates. * * The cert validation callback is invoked after receiving and validating the peer's certificate chain. The callback * can be used by clients to validate server certificates, or by servers to validate client certificates in the case of * mutual auth. Note that any validation performed by applications in the callback is in addition to the certificate * validation already performed by s2n-tls. * * Applications can use either of the following APIs from within the callback to retrieve the peer's certificate chain * and perform validation before proceeding with the handshake: * - `s2n_connection_get_peer_cert_chain()` * - `s2n_connection_get_client_cert_chain()` * * If the validation performed in the callback is successful, `s2n_cert_validation_accept()` MUST be called to allow * `s2n_negotiate()` to continue the handshake. If the validation is unsuccessful, `s2n_cert_validation_reject()` * MUST be called, which will cause `s2n_negotiate()` to error. The behavior of `s2n_negotiate()` is undefined if * neither `s2n_cert_validation_accept()` or `s2n_cert_validation_reject()` are called. * * The `info` parameter is passed to the callback in order to call APIs specific to the cert validation callback, like * `s2n_cert_validation_accept()` and `s2n_cert_validation_reject()`. The `info` argument is only valid for the * lifetime of the callback, and must not be used after the callback has finished. * * After calling `s2n_cert_validation_reject()`, `s2n_negotiate()` will fail with a protocol error indicating that * the cert has been rejected from the callback. If more information regarding an application's custom validation * failure is required, consider adding an error code field to the custom connection context. See * `s2n_connection_set_ctx()` and `s2n_connection_get_ctx()` for how to set and retrieve custom connection contexts. * * @param conn The connection object from which the callback was invoked. * @param info The cert validation info object used to call cert validation APIs. * @param context Application data provided to the callback function via `s2n_config_set_cert_validation_cb()`. * @returns 0 on success, -1 on failure. */ typedef int (*s2n_cert_validation_callback)(struct s2n_connection *conn, struct s2n_cert_validation_info *info, void *context); /** * Sets a callback to perform additional validation on received certificates. * * @param config The associated connection config. * @param callback The cert validation callback to set. * @param context Optional application data passed to the callback function. * @returns S2N_SUCCESS on success, S2N_FAILURE on failure. */ S2N_API int s2n_config_set_cert_validation_cb(struct s2n_config *config, s2n_cert_validation_callback callback, void *context); /** * Indicates that the validation performed in the cert validation callback was successful. * * `s2n_cert_validation_accept()` should be called from within the cert validation callback to allow `s2n_negotiate()` * to continue the handshake. * * This function must not be called outside of the cert validation callback. * * @param info The cert validation info object for the associated callback. * @returns S2N_SUCCESS on success, S2N_FAILURE on failure. */ S2N_API int s2n_cert_validation_accept(struct s2n_cert_validation_info *info); /** * Indicates that the validation performed in the cert validation callback was unsuccessful. * * `s2n_cert_validation_reject()` should be called from within the cert validation callback to cause `s2n_negotiate()` * to error. * * This function must not be called outside of the cert validation callback. * * @param info The cert validation info object for the associated callback. * @returns S2N_SUCCESS on success, S2N_FAILURE on failure. */ S2N_API int s2n_cert_validation_reject(struct s2n_cert_validation_info *info); aws-crt-python-0.20.4+dfsg/crt/s2n/api/unstable/fingerprint.h000066400000000000000000000062451456575232400240170ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #pragma once #include /** * @file fingerprint.h * * The following APIs enable applications to calculate fingerprints to * identify ClientHellos. * * The fingerprinting APIs are currently considered unstable. They will be finalized * and marked as stable after an initial customer integration and feedback. */ typedef enum { /* * The current standard open source fingerprinting method. * See https://engineering.salesforce.com/tls-fingerprinting-with-ja3-and-ja3s-247362855967. */ S2N_FINGERPRINT_JA3, } s2n_fingerprint_type; /** * Calculates a fingerprint hash for a given ClientHello. * * Currently the only type supported is S2N_FINGERPRINT_JA3, which uses MD5 and * requires at least 16 bytes of memory. * * @param ch The ClientHello to fingerprint. * @param type The algorithm to use for the fingerprint. Currently only JA3 is supported. * @param max_hash_size The maximum size of data that may be written to `hash`. * If too small for the requested hash, an S2N_ERR_T_USAGE error will occur. * @param hash The location that the requested hash will be written to. * @param hash_size The actual size of the data written to `hash`. * @param str_size The actual size of the full string associated with this hash. * This size can be used to ensure that sufficient memory is provided for the * output of `s2n_client_hello_get_fingerprint_string`. * @returns S2N_SUCCESS on success, S2N_FAILURE on failure. */ S2N_API int s2n_client_hello_get_fingerprint_hash(struct s2n_client_hello *ch, s2n_fingerprint_type type, uint32_t max_hash_size, uint8_t *hash, uint32_t *hash_size, uint32_t *str_size); /** * Calculates a full, variable-length fingerprint string for a given ClientHello. * * Because the length of the string is variable and unknown until the string is * calculated, `s2n_client_hello_get_fingerprint_hash` can be called first to * determine `max_size` and ensure `output` is sufficiently large. * * @param ch The ClientHello to fingerprint. * @param type The algorithm to use for the fingerprint. Currently only JA3 is supported. * @param max_size The maximum size of data that may be written to `output`. * If too small for the requested string, an S2N_ERR_T_USAGE error will occur. * @param output The location that the requested string will be written to. * @param output_size The actual size of the data written to `output`. * @returns S2N_SUCCESS on success, S2N_FAILURE on failure. */ S2N_API int s2n_client_hello_get_fingerprint_string(struct s2n_client_hello *ch, s2n_fingerprint_type type, uint32_t max_size, uint8_t *output, uint32_t *output_size); aws-crt-python-0.20.4+dfsg/crt/s2n/api/unstable/ktls.h000066400000000000000000000172101456575232400224370ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #pragma once #include /** * @file ktls.h * * The following APIs enable applications to use kernel TLS (kTLS), meaning that * encrypting and decrypting TLS records is handled by the kernel rather than by * the s2n-tls library. * * The kTLS APIs are currently considered unstable. kTLS is a relatively new * feature with limited and volatile support from different kernels and hardware. * * Currently, s2n-tls supports ktls for limited scenarios: * - You must be using Linux. We have not tested with other kernels. * - Your kernel must support kTLS. For Linux, versions >4.13 should support kTLS. * - The TLS kernel module must be enabled. While some environments enable the * module by default, most will require you to run `sudo modprobe tls`. * - You must negotiate AES128-GCM or AES256-GCM. Other ciphers are supported by * the kernel, but not implemented in s2n-tls yet. * - You must not use the s2n_renegotiate_request_cb from unstable/negotiate.h. * The TLS kernel module currently doesn't support renegotiation. * - By default, you must negotiate TLS1.2. See s2n_config_ktls_enable_tls13 * for the requirements to also support TLS1.3. */ /** * Enables sending using kTLS on a given connection. * * See above for the limitations on when kTLS can be enabled. Additionally, * s2n_connection_ktls_enable_send must be called after the handshake completes * but before the handshake is freed with s2n_connection_free_handshake. * It may be called after some application data is sent and received without kTLS, * but there must be no pending application data that requires flushing. If these * requirements are not met, enabling kTLS will fail with an error. * * After kTLS is enabled for sending, s2n_send, s2n_sendv, and s2n_sendv_with_offset * will use kTLS. kTLS should result in memory and CPU savings. s2n_sendfile will * also become available. * * For applications using kTLS to avoid copying or allocating memory, s2n_sendv * should be preferred over s2n_sendv_with_offset. For s2n_sendv_with_offset, * s2n-tls may need to copy the provided iovec array to apply the offset, and may * need to allocate memory to copy large (>16) iovec arrays. * * If kTLS is enabled for sending, s2n_connection_get_wire_bytes_out will always * return 0 instead of an accurate count. * * @warning Due to the uncertainty around kTLS support, the signature of this * method is likely to change before kTLS is marked as stable. * * @param conn A pointer to the connection. * @returns S2N_SUCCESS if kTLS is successfully enabled. If kTlS is not successfully * enabled, returns S2N_FAILURE but the connection may proceed without kTLS. */ S2N_API int s2n_connection_ktls_enable_send(struct s2n_connection *conn); /** * Enables receiving using kTLS on a given connection. * * See above for the limitations on when kTLS can be enabled. Additionally, * s2n_connection_ktls_enable_recv must be called after the handshake completes * but before the handshake is freed with s2n_connection_free_handshake. * It may be called after some application data is sent and received without kTLS, * but there must be no buffered application data that requires draining. If these * requirements are not met, enabling kTLS will fail with an error. * * After kTLS is enabled for receiving, s2n_recv will use kTLS. This may result * in memory and CPU savings, but currently will still buffer and copy application data. * We will further optimize s2n_recv for kTLS in the future. * * If kTLS is enabled for receiving, s2n_connection_get_wire_bytes_in will always * return 0 instead of an accurate count. * * @warning Due to the uncertainty around kTLS support, the signature of this * method is likely to change before kTLS is marked as stable. * * @param conn A pointer to the connection. * @returns S2N_SUCCESS if kTLS is successfully enabled. If kTlS is not successfully * enabled, returns S2N_FAILURE but the connection may proceed without kTLS. */ S2N_API int s2n_connection_ktls_enable_recv(struct s2n_connection *conn); /** * Allows kTLS to be enabled if a connection negotiates TLS1.3. * * Enabling TLS1.3 with this method is considered "unsafe" because the kernel * currently doesn't support updating encryption keys, which is required in TLS1.3. * s2n_connection_get_key_update_counts can be used to gather metrics on whether * key updates are occurring on your connections before enabling TLS1.3. * * In order to safely enable TLS1.3, an application must ensure that its peer will * not send any KeyUpdate messages. If s2n-tls receives a KeyUpdate message while * kTLS is enabled, it will report an S2N_ERR_KTLS_KEYUPDATE S2N_ERR_T_PROTO error. * * Additionally, an application must not use kTLS to attempt to send more than 35GB * of data and must not call s2n_send more than 23 million times. If either of these * limits is exceeded, it will report an S2N_ERR_KTLS_KEY_LIMIT S2N_ERR_T_PROTO error. * * This method must be called before enabling kTLS on a connection using * s2n_connection_ktls_enable_send or s2n_connection_ktls_enable_recv. * * @param config A pointer to the config. * @returns S2N_SUCCESS if successfully enabled, S2N_FAILURE otherwise. */ S2N_API int s2n_config_ktls_enable_unsafe_tls13(struct s2n_config *config); /** * Reports the number of times sending and receiving keys have been updated. * * This only applies to TLS1.3. Earlier versions do not support key updates. * * @warning s2n-tls only tracks up to UINT8_MAX (255) key updates. If this method * reports 255 updates, then more than 255 updates may have occurred. * * @param conn A pointer to the connection. * @param send_key_updates Number of times the sending key was updated. * @param recv_key_updates Number of times the receiving key was updated. * @returns S2N_SUCCESS if successful, S2N_FAILURE otherwise. */ S2N_API int s2n_connection_get_key_update_counts(struct s2n_connection *conn, uint8_t *send_key_updates, uint8_t *recv_key_updates); /** * Sends the contents of a file as application data. * * s2n_sendfile should be more efficient than s2n_send because the copy between * the file and the write socket happens inside the kernel. * * This method is only supported if kTLS is enabled for sending. * * @note For a TLS1.3 connection, the `count` argument will be used to enforce * safe sending limits regardless of the actual size of the file. Applications * should not set `count` excessively high. * * @param conn A pointer to the connection. * @param fd The file descriptor to read from. It must be opened for reading and * support mmap-like operations (i.e., it cannot be a socket). * @param offset The offset in the file to begin reading at. * @param count The maximum number of bytes to read from the file. * @param bytes_written Will be set to the number of bytes written if successful. * @param blocked Will be set to the blocked status if an `S2N_ERR_T_BLOCKED` error is returned. * @returns S2N_SUCCESS if any bytes are successfully written, S2N_FAILURE otherwise. */ S2N_API int s2n_sendfile(struct s2n_connection *conn, int fd, off_t offset, size_t count, size_t *bytes_written, s2n_blocked_status *blocked); aws-crt-python-0.20.4+dfsg/crt/s2n/api/unstable/npn.h000066400000000000000000000040301456575232400222510ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #pragma once #include /** * @file npn.h * * The Next Protocol Negotiation Extension, or NPN, was an RFC proposal to * negotiate an application protocol. This proposal was never standardized, and * it was eventually replaced with the ALPN Extension. However, an early draft * version of the NPN Extension was implemented in Openssl. * Now, OpenSSL clients and servers may require this extension in order to connect. * * s2n-tls supports NPN to make it easier for users whose peers require this * extension, but s2n-tls does NOT recommend its use. The specific draft version * supported is https://datatracker.ietf.org/doc/html/draft-agl-tls-nextprotoneg-03, * which provides interoperability with OpenSSL. */ /** * Turns on support for the NPN extension. * * This will allow an s2n-tls client to send the NPN extension and an s2n-tls * server to respond to receiving the NPN extension. However, if their peer * also indicates support for the ALPN extension, s2n-tls will prefer that. * * Use s2n_config_append_protocol_preference() to set up a list of supported protocols. * After the negotiation for the connection has completed, the agreed-upon protocol * can be retrieved with s2n_get_application_protocol(). * * @param config A pointer to the config object * @param enable Set to true to enable. Set to false to disable. * @returns S2N_SUCCESS on success, S2N_FAILURE on error. */ S2N_API int s2n_config_set_npn(struct s2n_config *config, bool enable); aws-crt-python-0.20.4+dfsg/crt/s2n/api/unstable/renegotiate.h000066400000000000000000000153631456575232400237770ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #pragma once #include /** * @file renegotiate.h * * "Renegotiation" is a TLS feature offered in TLS1.2 and earlier. * During renegotiation, a new handshake is performed on an already established * connection. The new handshake is encrypted using the keys from the original handshake. * The new handshake may not match the first handshake; for example, the server may choose * a different cipher suite or require client authentication for the new handshake. * * s2n-tls clients support secure (compliant with RFC5746) renegotiation for compatibility reasons, * but s2n-tls does NOT recommend its use. While s2n-tls addresses all currently known security concerns, * renegotiation has appeared in many CVEs and was completely removed from TLS1.3. */ /** * Used to indicate that an attempt to renegotiate encountered * application data which the application should process before * continuing the handshake. */ extern const s2n_blocked_status S2N_BLOCKED_ON_APPLICATION_DATA; /** * Indicates how a renegotiation request should be handled. */ typedef enum { /* The client will take no action */ S2N_RENEGOTIATE_IGNORE = 0, /* The client will send a warning alert to the server */ S2N_RENEGOTIATE_REJECT, /* The client will begin renegotiation in the future */ S2N_RENEGOTIATE_ACCEPT, } s2n_renegotiate_response; /** * Callback function to handle requests for renegotiation. * * s2n-tls calls this method when a client receives a request from the server * to renegotiate the connection. If the server makes multiple requests, * s2n-tls will call this method multiple times. * * Applications should use the `response` value to indicate how the request * should be handled. If `response` is set to `S2N_RENEGOTIATE_IGNORE` * or `S2N_RENEGOTIATE_REJECT`, no further application involvement is required. * * If `response` is set to `S2N_RENEGOTIATE_ACCEPT`, then the application should * handle renegotiation. The application should stop calling s2n_send and s2n_recv, * wipe the connection with s2n_renegotiate_wipe, and then call s2n_renegotiate * until the handshake is complete. * * @param conn A pointer to the connection object. * @param context Context for the callback function. * @param response How the request should be handled. * @returns S2N_SUCCESS on success, S2N_FAILURE on error. */ typedef int (*s2n_renegotiate_request_cb)(struct s2n_connection *conn, void *context, s2n_renegotiate_response *response); /** * Sets a method to be called when the client receives a request to renegotiate. * * @param config A pointer to the config object. * @param callback The function to be called when a renegotiation request is received. * @param context Context to be passed to the callback function. * @returns S2N_SUCCESS on success, S2N_FAILURE on error. */ S2N_API int s2n_config_set_renegotiate_request_cb(struct s2n_config *config, s2n_renegotiate_request_cb callback, void *context); /** * Reset the connection so that it can be renegotiated. * * Similar to `s2n_connection_wipe`, this method resets a connection so that it can be used again. * However, unlike `s2n_connection_wipe`, it retains enough state from the previous connection * that the connection can continue to send and receive data encrypted with the old keys. * * The application MUST handle any incomplete IO before calling this method. The last call to `s2n_send` must * have succeeded, and `s2n_peek` must return zero. If there is any data in the send or receive buffers, * this method will fail. * * The application MUST repeat any connection-specific setup after calling this method. This method * cannot distinguish between internal connection state and configuration state set by the application, * so it wipes all state not directly related to handling encrypted records. For example, * if the application originally called `s2n_connection_set_blinding` on the connection, * then the application will need to call `s2n_connection_set_blinding` again after `s2n_renegotiate_wipe`. * * The connection-specific setup methods the application does not need to call again are: * - Methods to set the file descriptors * (`s2n_connection_set_fd`, `s2n_connection_set_read_fd`, `s2n_connection_set_write_fd`) * - Methods to set the send callback * (`s2n_connection_set_send_cb`, `s2n_connection_set_send_ctx`) * - Methods to set the recv callback * (`s2n_connection_set_recv_cb`, `s2n_connection_set_recv_ctx`) * * @note This method MUST be called before s2n_renegotiate. * @note Calling this method on a server connection will fail. s2n-tls servers do not support renegotiation. * * @param conn A pointer to the connection object. * @returns S2N_SUCCESS on success, S2N_FAILURE on error. */ S2N_API int s2n_renegotiate_wipe(struct s2n_connection *conn); /** * Perform a new handshake on an already established connection. * * This method should be called like `s2n_negotiate`, with the same handling of return values, * error types, and blocked statuses. * * However, unlike the initial handshake performed by `s2n_negotiate`, the renegotiation * handshake can encounter valid application data. In that case, this method will fail * with an error of type S2N_ERR_T_BLOCKED, set the `blocked` field to `S2N_BLOCKED_ON_APPLICATION_DATA`, * copy the data to `app_data_buf`, and set `app_data_size` to the size of the data. * The application should handle the data in `app_data_buf` before calling s2n_renegotiate again. * * @note s2n_renegotiate_wipe MUST be called before this method. * @note Calling this method on a server connection will fail. s2n-tls servers do not support renegotiation. * * @param conn A pointer to the connection object. * @param app_data_buf A pointer to a buffer that s2n will copy application data read into. * @param app_data_buf_size The size of `app_data_buf`. * @param app_data_size The number of application data bytes read. * @param blocked A pointer which will be set to the blocked status. * @returns S2N_SUCCESS if the handshake completed. S2N_FAILURE if the handshake encountered an error or is blocked. */ S2N_API int s2n_renegotiate(struct s2n_connection *conn, uint8_t *app_data_buf, ssize_t app_data_buf_size, ssize_t *app_data_size, s2n_blocked_status *blocked); aws-crt-python-0.20.4+dfsg/crt/s2n/bin/000077500000000000000000000000001456575232400174725ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/bin/Makefile000066400000000000000000000017451456575232400211410ustar00rootroot00000000000000# # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. # .PHONY : all all: s2nc s2nd include ../s2n.mk LDFLAGS += -L../lib/ -L${LIBCRYPTO_ROOT}/lib ../lib/libs2n.a ${CRYPTO_LIBS} ${LIBS} CRUFT += s2nc s2nd s2nc: s2nc.c echo.c ${CC} ${CFLAGS} s2nc.c echo.c common.c -o s2nc ${LDFLAGS} s2nd: s2nd.c echo.c ${CC} ${CFLAGS} s2nd.c echo.c https.c common.c -o s2nd ${LDFLAGS} $(bindir): @mkdir -p $(bindir) install: s2nc s2nd $(bindir) @cp s2n? $(bindir) uninstall: @rm -f $(bindir)/s2n? aws-crt-python-0.20.4+dfsg/crt/s2n/bin/common.c000066400000000000000000000446021456575232400211340ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include "common.h" #include #include #include #include #include #include #include #include #include #include #include "api/s2n.h" #include "error/s2n_errno.h" #include "utils/s2n_safety.h" uint8_t ticket_key_name[16] = "2016.07.26.15\0"; uint8_t default_ticket_key[32] = { 0x07, 0x77, 0x09, 0x36, 0x2c, 0x2e, 0x32, 0xdf, 0x0d, 0xdc, 0x3f, 0x0d, 0xc4, 0x7b, 0xba, 0x63, 0x90, 0xb6, 0xc7, 0x3b, 0xb5, 0x0f, 0x9c, 0x31, 0x22, 0xec, 0x84, 0x4a, 0xd7, 0xc2, 0xb3, 0xe5 }; struct session_cache_entry session_cache[256]; static char dhparams[] = "-----BEGIN DH PARAMETERS-----\n" "MIIBCAKCAQEAy1+hVWCfNQoPB+NA733IVOONl8fCumiz9zdRRu1hzVa2yvGseUSq\n" "Bbn6k0FQ7yMED6w5XWQKDC0z2m0FI/BPE3AjUfuPzEYGqTDf9zQZ2Lz4oAN90Sud\n" "luOoEhYR99cEbCn0T4eBvEf9IUtczXUZ/wj7gzGbGG07dLfT+CmCRJxCjhrosenJ\n" "gzucyS7jt1bobgU66JKkgMNm7hJY4/nhR5LWTCzZyzYQh2HM2Vk4K5ZqILpj/n0S\n" "5JYTQ2PVhxP+Uu8+hICs/8VvM72DznjPZzufADipjC7CsQ4S6x/ecZluFtbb+ZTv\n" "HI5CnYmkAwJ6+FSWGaZQDi8bgerFk9RWwwIBAg==\n" "-----END DH PARAMETERS-----\n"; /* * Since this is a server, and the mechanism for hostname verification is not defined for this use-case, * allow any hostname through. If you are writing something with mutual auth and you have a scheme for verifying * the client (e.g. a reverse DNS lookup), you would plug that in here. */ static uint8_t unsafe_verify_host_fn(const char *host_name, size_t host_name_len, void *data) { return 1; } int write_array_to_file(const char *path, uint8_t *data, size_t length) { GUARD_EXIT_NULL(path); GUARD_EXIT_NULL(data); FILE *file = fopen(path, "wb"); if (!file) { return S2N_FAILURE; } if (fwrite(data, sizeof(char), length, file) != length) { fclose(file); return S2N_FAILURE; } fclose(file); return S2N_SUCCESS; } int get_file_size(const char *path, size_t *length) { GUARD_EXIT_NULL(path); GUARD_EXIT_NULL(length); FILE *file = fopen(path, "rb"); if (!file) { return S2N_FAILURE; } if (fseek(file, 0, SEEK_END) != 0) { fclose(file); return S2N_FAILURE; } long file_length = ftell(file); if (file_length < 0) { fclose(file); return S2N_FAILURE; } *length = file_length; fclose(file); return S2N_SUCCESS; } int load_file_to_array(const char *path, uint8_t *data, size_t max_length) { GUARD_EXIT_NULL(path); GUARD_EXIT_NULL(data); size_t file_size = 0; if (get_file_size(path, &file_size) < 0 || file_size > max_length) { return S2N_FAILURE; } FILE *file = fopen(path, "rb"); if (!file) { return S2N_FAILURE; } if (fread(data, sizeof(char), file_size, file) < file_size) { fclose(file); return S2N_FAILURE; } fclose(file); return S2N_SUCCESS; } char *load_file_to_cstring(const char *path) { FILE *pem_file = fopen(path, "rb"); if (!pem_file) { fprintf(stderr, "Failed to open file %s: '%s'\n", path, strerror(errno)); return NULL; } /* Make sure we can fit the pem into the output buffer */ if (fseek(pem_file, 0, SEEK_END) < 0) { fprintf(stderr, "Failed calling fseek: '%s'\n", strerror(errno)); fclose(pem_file); return NULL; } const ssize_t pem_file_size = ftell(pem_file); if (pem_file_size < 0) { fprintf(stderr, "Failed calling ftell: '%s'\n", strerror(errno)); fclose(pem_file); return NULL; } rewind(pem_file); char *pem_out = malloc(pem_file_size + 1); if (pem_out == NULL) { fprintf(stderr, "Failed allocating memory\n"); fclose(pem_file); return NULL; } if (fread(pem_out, sizeof(char), pem_file_size, pem_file) < (size_t) pem_file_size) { fprintf(stderr, "Failed reading file: '%s'\n", strerror(errno)); free(pem_out); fclose(pem_file); return NULL; } pem_out[pem_file_size] = '\0'; fclose(pem_file); return pem_out; } int key_log_callback(void *file, struct s2n_connection *conn, uint8_t *logline, size_t len) { if (fwrite(logline, 1, len, (FILE *) file) != len) { return S2N_FAILURE; } if (fprintf((FILE *) file, "\n") < 0) { return S2N_FAILURE; } return fflush((FILE *) file); } /* An inverse map from an ascii value to a hexadecimal nibble value * accounts for all possible char values, where 255 is invalid value */ static const uint8_t hex_inverse[256] = { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }; int s2n_str_hex_to_bytes(const unsigned char *hex, uint8_t *out_bytes, uint32_t max_out_bytes_len) { GUARD_EXIT_NULL(hex); GUARD_EXIT_NULL(out_bytes); uint32_t len_with_spaces = strlen((const char *) hex); size_t i = 0, j = 0; while (j < len_with_spaces) { if (hex[j] == ' ') { j++; continue; } uint8_t high_nibble = hex_inverse[hex[j]]; if (high_nibble == 255) { fprintf(stderr, "Invalid HEX encountered\n"); return S2N_FAILURE; } uint8_t low_nibble = hex_inverse[hex[j + 1]]; if (low_nibble == 255) { fprintf(stderr, "Invalid HEX encountered\n"); return S2N_FAILURE; } if (max_out_bytes_len < i) { fprintf(stderr, "Insufficient memory for bytes buffer, try increasing the allocation size\n"); return S2N_FAILURE; } out_bytes[i] = high_nibble << 4 | low_nibble; i++; j += 2; } return S2N_SUCCESS; } static int s2n_get_psk_hmac_alg(s2n_psk_hmac *psk_hmac, char *hmac_str) { GUARD_EXIT_NULL(psk_hmac); GUARD_EXIT_NULL(hmac_str); if (strcmp(hmac_str, "SHA256") == 0) { *psk_hmac = S2N_PSK_HMAC_SHA256; } else if (strcmp(hmac_str, "SHA384") == 0) { *psk_hmac = S2N_PSK_HMAC_SHA384; } else { return S2N_FAILURE; } return S2N_SUCCESS; } static int s2n_setup_external_psk(struct s2n_psk **psk, char *params) { GUARD_EXIT_NULL(psk); GUARD_EXIT_NULL(params); /* duplicate params as strtok will modify the input string */ char *params_dup = malloc(strlen(params) + 1); GUARD_EXIT_NULL(params_dup); strcpy(params_dup, params); size_t token_idx = 0; for (char *token = strtok(params_dup, ","); token != NULL; token = strtok(NULL, ","), token_idx++) { switch (token_idx) { case 0: GUARD_EXIT(s2n_psk_set_identity(*psk, (const uint8_t *) token, strlen(token)), "Error setting psk identity\n"); break; case 1: { uint32_t max_secret_len = strlen(token) / 2; uint8_t *secret = malloc(max_secret_len); GUARD_EXIT_NULL(secret); GUARD_EXIT(s2n_str_hex_to_bytes((const unsigned char *) token, secret, max_secret_len), "Error converting hex-encoded psk secret to bytes\n"); GUARD_EXIT(s2n_psk_set_secret(*psk, secret, max_secret_len), "Error setting psk secret\n"); free(secret); } break; case 2: { s2n_psk_hmac psk_hmac_alg = 0; GUARD_EXIT(s2n_get_psk_hmac_alg(&psk_hmac_alg, token), "Invalid psk hmac algorithm\n"); GUARD_EXIT(s2n_psk_set_hmac(*psk, psk_hmac_alg), "Error setting psk hmac algorithm\n"); } break; default: break; } } free(params_dup); return S2N_SUCCESS; } int s2n_setup_external_psk_list(struct s2n_connection *conn, char *psk_optarg_list[S2N_MAX_PSK_LIST_LENGTH], size_t psk_list_len) { GUARD_EXIT_NULL(conn); GUARD_EXIT_NULL(psk_optarg_list); for (size_t i = 0; i < psk_list_len; i++) { struct s2n_psk *psk = s2n_external_psk_new(); GUARD_EXIT_NULL(psk); GUARD_EXIT(s2n_setup_external_psk(&psk, psk_optarg_list[i]), "Error setting external PSK parameters\n"); GUARD_EXIT(s2n_connection_append_psk(conn, psk), "Error appending psk to the connection\n"); GUARD_EXIT(s2n_psk_free(&psk), "Error freeing psk\n"); } return S2N_SUCCESS; } int s2n_set_common_server_config(int max_early_data, struct s2n_config *config, struct conn_settings conn_settings, const char *cipher_prefs, const char *session_ticket_key_file_path) { GUARD_EXIT(s2n_config_set_server_max_early_data_size(config, max_early_data), "Error setting max early data"); GUARD_EXIT(s2n_config_add_dhparams(config, dhparams), "Error adding DH parameters"); GUARD_EXIT(s2n_config_set_cipher_preferences(config, cipher_prefs), "Error setting cipher prefs"); GUARD_EXIT(s2n_config_set_cache_store_callback(config, cache_store_callback, session_cache), "Error setting cache store callback"); GUARD_EXIT(s2n_config_set_cache_retrieve_callback(config, cache_retrieve_callback, session_cache), "Error setting cache retrieve callback"); GUARD_EXIT(s2n_config_set_cache_delete_callback(config, cache_delete_callback, session_cache), "Error setting cache retrieve callback"); if (conn_settings.enable_mfl) { GUARD_EXIT(s2n_config_accept_max_fragment_length(config), "Error enabling TLS maximum fragment length extension in server"); } if (s2n_config_set_verify_host_callback(config, unsafe_verify_host_fn, NULL)) { print_s2n_error("Failure to set hostname verification callback"); exit(1); } if (conn_settings.session_ticket) { GUARD_EXIT(s2n_config_set_session_tickets_onoff(config, 1), "Error enabling session tickets"); } if (conn_settings.session_cache) { GUARD_EXIT(s2n_config_set_session_cache_onoff(config, 1), "Error enabling session cache using id"); } if (conn_settings.session_ticket || conn_settings.session_cache) { /* Key initialization */ uint8_t *st_key; uint32_t st_key_length; if (session_ticket_key_file_path) { int fd = open(session_ticket_key_file_path, O_RDONLY); GUARD_EXIT(fd, "Error opening session ticket key file"); struct stat st; GUARD_EXIT(fstat(fd, &st), "Error fstat-ing session ticket key file"); st_key = mmap(0, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0); POSIX_ENSURE(st_key != MAP_FAILED, S2N_ERR_MMAP); st_key_length = st.st_size; close(fd); } else { st_key = default_ticket_key; st_key_length = sizeof(default_ticket_key); } if (s2n_config_add_ticket_crypto_key(config, ticket_key_name, strlen((char *) ticket_key_name), st_key, st_key_length, 0) != 0) { fprintf(stderr, "Error adding ticket key: '%s'\n", s2n_strerror(s2n_errno, "EN")); exit(1); } } return 0; } int s2n_setup_server_connection(struct s2n_connection *conn, int fd, struct s2n_config *config, struct conn_settings settings) { if (settings.self_service_blinding) { s2n_connection_set_blinding(conn, S2N_SELF_SERVICE_BLINDING); } if (settings.mutual_auth) { GUARD_RETURN(s2n_config_set_client_auth_type(config, S2N_CERT_AUTH_REQUIRED), "Error setting client auth type"); if (settings.ca_dir || settings.ca_file) { GUARD_RETURN(s2n_config_set_verification_ca_location(config, settings.ca_file, settings.ca_dir), "Error adding verify location"); } if (settings.insecure) { GUARD_RETURN(s2n_config_disable_x509_verification(config), "Error disabling X.509 validation"); } } GUARD_RETURN(s2n_connection_set_config(conn, config), "Error setting configuration"); if (settings.prefer_throughput) { GUARD_RETURN(s2n_connection_prefer_throughput(conn), "Error setting prefer throughput"); } if (settings.prefer_low_latency) { GUARD_RETURN(s2n_connection_prefer_low_latency(conn), "Error setting prefer low latency"); } GUARD_RETURN(s2n_connection_set_fd(conn, fd), "Error setting file descriptor"); if (settings.use_corked_io) { GUARD_RETURN(s2n_connection_use_corked_io(conn), "Error setting corked io"); } GUARD_RETURN( s2n_setup_external_psk_list(conn, settings.psk_optarg_list, settings.psk_list_len), "Error setting external psk list"); GUARD_RETURN(early_data_recv(conn), "Error receiving early data"); return 0; } int cache_store_callback(struct s2n_connection *conn, void *ctx, uint64_t ttl, const void *key, uint64_t key_size, const void *value, uint64_t value_size) { struct session_cache_entry *cache = ctx; POSIX_ENSURE_INCLUSIVE_RANGE(1, key_size, MAX_KEY_LEN); POSIX_ENSURE_INCLUSIVE_RANGE(1, value_size, MAX_VAL_LEN); uint8_t idx = ((const uint8_t *) key)[0]; memmove(cache[idx].key, key, key_size); memmove(cache[idx].value, value, value_size); cache[idx].key_len = key_size; cache[idx].value_len = value_size; return 0; } int cache_retrieve_callback(struct s2n_connection *conn, void *ctx, const void *key, uint64_t key_size, void *value, uint64_t *value_size) { struct session_cache_entry *cache = ctx; POSIX_ENSURE_INCLUSIVE_RANGE(1, key_size, MAX_KEY_LEN); uint8_t idx = ((const uint8_t *) key)[0]; POSIX_ENSURE(cache[idx].key_len == key_size, S2N_ERR_INVALID_ARGUMENT); POSIX_ENSURE(memcmp(cache[idx].key, key, key_size) == 0, S2N_ERR_INVALID_ARGUMENT); POSIX_ENSURE(*value_size >= cache[idx].value_len, S2N_ERR_INVALID_ARGUMENT); *value_size = cache[idx].value_len; memmove(value, cache[idx].value, cache[idx].value_len); for (uint64_t i = 0; i < key_size; i++) { printf("%02x", ((const uint8_t *) key)[i]); } printf("\n"); return 0; } int cache_delete_callback(struct s2n_connection *conn, void *ctx, const void *key, uint64_t key_size) { struct session_cache_entry *cache = ctx; POSIX_ENSURE_INCLUSIVE_RANGE(1, key_size, MAX_KEY_LEN); uint8_t idx = ((const uint8_t *) key)[0]; if (cache[idx].key_len != 0) { POSIX_ENSURE(cache[idx].key_len == key_size, S2N_ERR_INVALID_ARGUMENT); POSIX_ENSURE(memcmp(cache[idx].key, key, key_size) == 0, S2N_ERR_INVALID_ARGUMENT); } cache[idx].key_len = 0; cache[idx].value_len = 0; return 0; } uint8_t unsafe_verify_host(const char *host_name, size_t host_name_len, void *data) { struct verify_data *verify_data = (struct verify_data *) data; if (host_name_len > 2 && host_name[0] == '*' && host_name[1] == '.') { char *suffix = strstr(verify_data->trusted_host, "."); return (uint8_t) (strcasecmp(suffix, host_name + 1) == 0); } if (strcasecmp(host_name, "localhost") == 0 || strcasecmp(host_name, "127.0.0.1") == 0) { return (uint8_t) (strcasecmp(verify_data->trusted_host, "localhost") == 0 || strcasecmp(verify_data->trusted_host, "127.0.0.1") == 0); } return (uint8_t) (strcasecmp(host_name, verify_data->trusted_host) == 0); } int wait_for_shutdown(struct s2n_connection *conn, int fd) { s2n_blocked_status blocked = S2N_NOT_BLOCKED; while (s2n_shutdown(conn, &blocked) != S2N_SUCCESS) { int errno_val = errno; switch (s2n_error_get_type(s2n_errno)) { case S2N_ERR_T_BLOCKED: GUARD_RETURN(wait_for_event(fd, blocked), "Error polling IO for shutdown"); break; case S2N_ERR_T_CLOSED: /* We can't control the behavior of our peer. If the peer indicates end-of-stream * without sending a close_notify, don't treat it as an error, but print a warning. * * This is common in our integration tests both because OpenSSL s_server * never sends a close_notify (see https://github.com/openssl/openssl/issues/1806) * and because we tend to kill processes rather than waiting for a graceful shutdown. */ fprintf(stdout, "Connection closed by peer\n"); return S2N_SUCCESS; case S2N_ERR_T_IO: /* Again, we can't control the behavior of our peer, so just print a warning. * Killing a process can result in its peer receiving a ECONNRESET. */ if (errno_val == ECONNRESET) { fprintf(stdout, "Connection reset by peer\n"); return S2N_SUCCESS; } /* Otherwise, IO errors are fatal and should be investigated */ fprintf(stderr, "Unexpected IO error during shutdown: %s\n", strerror(errno_val)); return S2N_FAILURE; default: return S2N_FAILURE; } } return S2N_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/s2n/bin/common.h000066400000000000000000000117461456575232400211440ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #pragma once #include #include "api/s2n.h" #define GUARD_EXIT_NULL(x) \ do { \ if (x == NULL) { \ fprintf(stderr, "NULL pointer encountered\n"); \ exit(1); \ } \ } while (0) #define GUARD_EXIT(x, msg) \ do { \ if ((x) < 0) { \ print_s2n_error(msg); \ exit(1); \ } \ } while (0) #define GUARD_RETURN(x, msg) \ do { \ if ((x) < 0) { \ print_s2n_error(msg); \ return -1; \ } \ } while (0) #define S2N_MAX_PSK_LIST_LENGTH 10 #define MAX_KEY_LEN 32 #define MAX_VAL_LEN 255 struct session_cache_entry { uint8_t key[MAX_KEY_LEN]; uint8_t key_len; uint8_t value[MAX_VAL_LEN]; uint8_t value_len; }; struct verify_data { const char *trusted_host; }; struct conn_settings { unsigned mutual_auth : 1; unsigned self_service_blinding : 1; unsigned only_negotiate : 1; unsigned prefer_throughput : 1; unsigned prefer_low_latency : 1; unsigned enable_mfl : 1; unsigned session_ticket : 1; unsigned session_cache : 1; unsigned insecure : 1; unsigned use_corked_io : 1; unsigned https_server : 1; uint32_t https_bench; int max_conns; const char *ca_dir; const char *ca_file; char *psk_optarg_list[S2N_MAX_PSK_LIST_LENGTH]; size_t psk_list_len; }; void print_s2n_error(const char *app_error); void send_data(struct s2n_connection *conn, int sockfd, const char *data, uint64_t len, s2n_blocked_status *blocked); int echo(struct s2n_connection *conn, int sockfd, bool *stop_echo); int wait_for_event(int fd, s2n_blocked_status blocked); int negotiate(struct s2n_connection *conn, int sockfd); int renegotiate(struct s2n_connection *conn, int sockfd, bool wait); int wait_for_shutdown(struct s2n_connection *conn, int sockfd); int early_data_recv(struct s2n_connection *conn); int early_data_send(struct s2n_connection *conn, uint8_t *data, uint32_t len); int print_connection_info(struct s2n_connection *conn); int https(struct s2n_connection *conn, uint32_t bench); int key_log_callback(void *ctx, struct s2n_connection *conn, uint8_t *logline, size_t len); int cache_store_callback(struct s2n_connection *conn, void *ctx, uint64_t ttl, const void *key, uint64_t key_size, const void *value, uint64_t value_size); int cache_retrieve_callback(struct s2n_connection *conn, void *ctx, const void *key, uint64_t key_size, void *value, uint64_t *value_size); int cache_delete_callback(struct s2n_connection *conn, void *ctx, const void *key, uint64_t key_size); /** * Writes array data to the the file specified * * @param path Path to the file where this data will be written * @param data The data to be outputted * @param length Length of the `data` array */ int write_array_to_file(const char *path, uint8_t *data, size_t length); /** * Gets size of file * * @param path Path to the file * @param length A pointer which will be set to the size of the file */ int get_file_size(const char *path, size_t *length); /** * Reads in data from file into a C array * * * # Safety * * `data` must have at least `max_length` of memory available * * @param path Path to the file * @param data A pointer which will be set to the data in the file * @param max_length The maximum amount of data that can be written to the `data` pointer */ int load_file_to_array(const char *path, uint8_t *data, size_t max_length); char *load_file_to_cstring(const char *path); int s2n_str_hex_to_bytes(const unsigned char *hex, uint8_t *out_bytes, uint32_t max_out_bytes_len); int s2n_setup_external_psk_list(struct s2n_connection *conn, char *psk_optarg_list[S2N_MAX_PSK_LIST_LENGTH], size_t psk_list_len); uint8_t unsafe_verify_host(const char *host_name, size_t host_name_len, void *data); int s2n_setup_server_connection(struct s2n_connection *conn, int fd, struct s2n_config *config, struct conn_settings settings); int s2n_set_common_server_config(int max_early_data, struct s2n_config *config, struct conn_settings conn_settings, const char *cipher_prefs, const char *session_ticket_key_file_path); aws-crt-python-0.20.4+dfsg/crt/s2n/bin/echo.c000066400000000000000000000424571456575232400205700ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include #include #include #include #include #include #include #include #include #include #include "api/s2n.h" #include "api/unstable/fingerprint.h" #include "api/unstable/renegotiate.h" #include "common.h" #include "crypto/s2n_pkey.h" #define STDIO_BUFSIZE 10240 const char *sig_alg_strs[] = { [S2N_TLS_SIGNATURE_ANONYMOUS] = "None", [S2N_TLS_SIGNATURE_RSA] = "RSA", [S2N_TLS_SIGNATURE_ECDSA] = "ECDSA", [S2N_TLS_SIGNATURE_RSA_PSS_RSAE] = "RSA-PSS-RSAE", [S2N_TLS_SIGNATURE_RSA_PSS_PSS] = "RSA-PSS-PSS", }; const char *sig_hash_strs[] = { [S2N_TLS_HASH_NONE] = "None", [S2N_TLS_HASH_MD5] = "MD5", [S2N_TLS_HASH_SHA1] = "SHA1", [S2N_TLS_HASH_SHA224] = "SHA224", [S2N_TLS_HASH_SHA256] = "SHA256", [S2N_TLS_HASH_SHA384] = "SHA384", [S2N_TLS_HASH_SHA512] = "SHA512", [S2N_TLS_HASH_MD5_SHA1] = "MD5_SHA1", }; void print_s2n_error(const char *app_error) { fprintf(stderr, "[%d] %s: '%s' : '%s'\n", getpid(), app_error, s2n_strerror(s2n_errno, "EN"), s2n_strerror_debug(s2n_errno, "EN")); } /* Poll the given file descriptor for an event determined by the blocked status */ int wait_for_event(int fd, s2n_blocked_status blocked) { struct pollfd reader = { .fd = fd, .events = 0 }; switch (blocked) { case S2N_NOT_BLOCKED: return S2N_SUCCESS; case S2N_BLOCKED_ON_READ: reader.events |= POLLIN; break; case S2N_BLOCKED_ON_WRITE: reader.events |= POLLOUT; break; case S2N_BLOCKED_ON_EARLY_DATA: case S2N_BLOCKED_ON_APPLICATION_INPUT: /* This case is not encountered by the s2nc/s2nd applications, * but is detected for completeness */ return S2N_SUCCESS; } if (poll(&reader, 1, -1) < 0) { fprintf(stderr, "Failed to poll connection: %s\n", strerror(errno)); S2N_ERROR_PRESERVE_ERRNO(); } return S2N_SUCCESS; } int early_data_recv(struct s2n_connection *conn) { uint32_t max_early_data_size = 0; GUARD_RETURN(s2n_connection_get_max_early_data_size(conn, &max_early_data_size), "Error getting max early data size"); if (max_early_data_size == 0) { return S2N_SUCCESS; } ssize_t total_data_recv = 0; ssize_t data_recv = 0; bool server_success = 0; s2n_blocked_status blocked = S2N_NOT_BLOCKED; uint8_t *early_data_received = (uint8_t *) malloc(max_early_data_size); GUARD_EXIT_NULL(early_data_received); do { server_success = (s2n_recv_early_data(conn, early_data_received + total_data_recv, max_early_data_size - total_data_recv, &data_recv, &blocked) >= S2N_SUCCESS); total_data_recv += data_recv; } while (!server_success); if (total_data_recv > 0) { fprintf(stdout, "Early Data received: "); for (ssize_t i = 0; i < total_data_recv; i++) { fprintf(stdout, "%c", early_data_received[i]); } fprintf(stdout, "\n"); } free(early_data_received); return S2N_SUCCESS; } int early_data_send(struct s2n_connection *conn, uint8_t *data, uint32_t len) { s2n_blocked_status blocked = S2N_NOT_BLOCKED; ssize_t total_data_sent = 0; ssize_t data_sent = 0; bool client_success = 0; do { client_success = (s2n_send_early_data(conn, data + total_data_sent, len - total_data_sent, &data_sent, &blocked) >= S2N_SUCCESS); total_data_sent += data_sent; } while (total_data_sent < len && !client_success); return S2N_SUCCESS; } int print_connection_info(struct s2n_connection *conn) { int client_hello_version; int client_protocol_version; int server_protocol_version; int actual_protocol_version; if ((client_hello_version = s2n_connection_get_client_hello_version(conn)) < 0) { fprintf(stderr, "Could not get client hello version\n"); POSIX_BAIL(S2N_ERR_CLIENT_HELLO_VERSION); } if ((client_protocol_version = s2n_connection_get_client_protocol_version(conn)) < 0) { fprintf(stderr, "Could not get client protocol version\n"); POSIX_BAIL(S2N_ERR_CLIENT_PROTOCOL_VERSION); } if ((server_protocol_version = s2n_connection_get_server_protocol_version(conn)) < 0) { fprintf(stderr, "Could not get server protocol version\n"); POSIX_BAIL(S2N_ERR_SERVER_PROTOCOL_VERSION); } if ((actual_protocol_version = s2n_connection_get_actual_protocol_version(conn)) < 0) { fprintf(stderr, "Could not get actual protocol version\n"); POSIX_BAIL(S2N_ERR_ACTUAL_PROTOCOL_VERSION); } printf("CONNECTED:\n"); printf("Handshake: %s\n", s2n_connection_get_handshake_type_name(conn)); printf("Client hello version: %d\n", client_hello_version); printf("Client protocol version: %d\n", client_protocol_version); printf("Server protocol version: %d\n", server_protocol_version); printf("Actual protocol version: %d\n", actual_protocol_version); if (s2n_get_server_name(conn)) { printf("Server name: %s\n", s2n_get_server_name(conn)); } if (s2n_get_application_protocol(conn)) { printf("Application protocol: %s\n", s2n_get_application_protocol(conn)); } printf("Curve: %s\n", s2n_connection_get_curve(conn)); printf("KEM: %s\n", s2n_connection_get_kem_name(conn)); printf("KEM Group: %s\n", s2n_connection_get_kem_group_name(conn)); uint32_t length; const uint8_t *status = s2n_connection_get_ocsp_response(conn, &length); if (status && length > 0) { printf("OCSP response received, length %u\n", length); } printf("Cipher negotiated: %s\n", s2n_connection_get_cipher(conn)); s2n_tls_signature_algorithm server_sig_alg = 0, client_sig_alg = 0; s2n_tls_hash_algorithm server_sig_hash = 0, client_sig_hash = 0; GUARD_EXIT(s2n_connection_get_selected_signature_algorithm(conn, &server_sig_alg), "Error getting server signature algorithm"); GUARD_EXIT(s2n_connection_get_selected_client_cert_signature_algorithm(conn, &client_sig_alg), "Error getting client signature algorithm"); GUARD_EXIT(s2n_connection_get_selected_digest_algorithm(conn, &server_sig_hash), "Error getting server signature hash algorithm"); GUARD_EXIT(s2n_connection_get_selected_client_cert_digest_algorithm(conn, &client_sig_hash), "Error getting client signature hash algorithm"); printf("Server signature negotiated: %s+%s\n", sig_alg_strs[server_sig_alg], sig_hash_strs[server_sig_hash]); if (client_sig_alg != S2N_TLS_SIGNATURE_ANONYMOUS) { printf("Client signature negotiated: %s+%s\n", sig_alg_strs[client_sig_alg], sig_hash_strs[client_sig_hash]); } bool session_resumed = s2n_connection_is_session_resumed(conn); if (session_resumed) { printf("Resumed session\n"); } uint16_t identity_length = 0; GUARD_EXIT(s2n_connection_get_negotiated_psk_identity_length(conn, &identity_length), "Error getting negotiated psk identity length from the connection\n"); if (identity_length != 0 && !session_resumed) { uint8_t *identity = (uint8_t *) malloc(identity_length); GUARD_EXIT_NULL(identity); GUARD_EXIT(s2n_connection_get_negotiated_psk_identity(conn, identity, identity_length), "Error getting negotiated psk identity from the connection\n"); printf("Negotiated PSK identity: %.*s\n", identity_length, identity); free(identity); } s2n_early_data_status_t early_data_status = (s2n_early_data_status_t) 0; GUARD_EXIT(s2n_connection_get_early_data_status(conn, &early_data_status), "Error getting early data status"); const char *status_str = NULL; switch (early_data_status) { case S2N_EARLY_DATA_STATUS_OK: status_str = "IN PROGRESS"; break; case S2N_EARLY_DATA_STATUS_NOT_REQUESTED: status_str = "NOT REQUESTED"; break; case S2N_EARLY_DATA_STATUS_REJECTED: status_str = "REJECTED"; break; case S2N_EARLY_DATA_STATUS_END: status_str = "ACCEPTED"; break; } GUARD_EXIT_NULL(status_str); printf("Early Data status: %s\n", status_str); struct s2n_client_hello *ch = s2n_connection_get_client_hello(conn); if (ch && client_hello_version > S2N_SSLv2) { uint8_t ja3[16] = { 0 }; uint32_t ja3_size = 0, str_size = 0; GUARD_EXIT(s2n_client_hello_get_fingerprint_hash(ch, S2N_FINGERPRINT_JA3, sizeof(ja3), ja3, &ja3_size, &str_size), "Error calculating JA3"); printf("JA3: "); for (size_t i = 0; i < ja3_size; i++) { printf("%02x", ja3[i]); } printf("\n"); } printf("Wire bytes in: %" PRIu64 "\n", s2n_connection_get_wire_bytes_in(conn)); printf("Wire bytes out: %" PRIu64 "\n", s2n_connection_get_wire_bytes_out(conn)); return 0; } int negotiate(struct s2n_connection *conn, int fd) { s2n_blocked_status blocked; while (s2n_negotiate(conn, &blocked) != S2N_SUCCESS) { if (s2n_error_get_type(s2n_errno) != S2N_ERR_T_BLOCKED) { fprintf(stderr, "Failed to negotiate: '%s'. %s\n", s2n_strerror(s2n_errno, "EN"), s2n_strerror_debug(s2n_errno, "EN")); if (s2n_error_get_type(s2n_errno) == S2N_ERR_T_ALERT) { fprintf(stderr, "Alert: %d\n", s2n_connection_get_alert(conn)); } S2N_ERROR_PRESERVE_ERRNO(); } if (wait_for_event(fd, blocked) != S2N_SUCCESS) { S2N_ERROR_PRESERVE_ERRNO(); } } print_connection_info(conn); printf("s2n is ready\n"); return 0; } int renegotiate(struct s2n_connection *conn, int fd, bool wait_for_more_data) { s2n_blocked_status blocked = S2N_NOT_BLOCKED; uint8_t buffer[STDIO_BUFSIZE] = { 0 }; ssize_t data_read = 0; GUARD_RETURN(s2n_renegotiate_wipe(conn), "Unable to prepare connection for renegotiate"); GUARD_RETURN(s2n_connection_set_client_auth_type(conn, S2N_CERT_AUTH_OPTIONAL), "Error setting ClientAuth optional"); fprintf(stdout, "RENEGOTIATE\n"); fflush(stdout); /* Do not proceed with renegotiation until we receive more data from the server */ if (wait_for_more_data) { fd_set fds = { 0 }; FD_SET(fd, &fds); select(FD_SETSIZE, &fds, NULL, NULL, NULL); } while (s2n_renegotiate(conn, buffer, sizeof(buffer), &data_read, &blocked) != S2N_SUCCESS) { uint8_t *data_ptr = buffer; while (data_read > 0) { ssize_t data_written = write(STDOUT_FILENO, data_ptr, data_read); GUARD_RETURN(data_written, "Error writing to stdout\n"); data_read -= data_written; data_ptr += data_written; } if (s2n_error_get_type(s2n_errno) != S2N_ERR_T_BLOCKED) { fprintf(stderr, "Failed to renegotiate: '%s'. %s\n", s2n_strerror(s2n_errno, NULL), s2n_strerror_debug(s2n_errno, NULL)); if (s2n_error_get_type(s2n_errno) == S2N_ERR_T_ALERT) { fprintf(stderr, "Alert: %d\n", s2n_connection_get_alert(conn)); } return S2N_FAILURE; } GUARD_RETURN(wait_for_event(fd, blocked), "Error polling IO for renegotiate"); } print_connection_info(conn); printf("s2n is ready, again\n"); return S2N_SUCCESS; } void send_data(struct s2n_connection *conn, int sockfd, const char *data, uint64_t len, s2n_blocked_status *blocked) { uint64_t bytes_remaining = len; const char *data_ptr = data; do { ssize_t send_len = MIN(bytes_remaining, SSIZE_MAX); ssize_t bytes_written = s2n_send(conn, data_ptr, send_len, blocked); if (bytes_written < 0) { if (s2n_error_get_type(s2n_errno) != S2N_ERR_T_BLOCKED) { fprintf(stderr, "Error writing to connection: '%s'\n", s2n_strerror(s2n_errno, "EN")); exit(1); } GUARD_EXIT(wait_for_event(sockfd, *blocked), "Unable to send data"); continue; } bytes_remaining -= bytes_written; data_ptr += bytes_written; } while (bytes_remaining > 0); } int echo(struct s2n_connection *conn, int sockfd, bool *stop_echo) { struct pollfd readers[2]; readers[0].fd = sockfd; readers[0].events = POLLIN; readers[1].fd = STDIN_FILENO; readers[1].events = POLLIN; /* Reset errno so that we can't inherit the errno == EINTR exit condition. */ errno = 0; /* Act as a simple proxy between stdin and the SSL connection */ int p = 0; s2n_blocked_status blocked = S2N_NOT_BLOCKED; do { /* echo will send and receive Application Data back and forth between * client and server, until stop_echo is true or stdin EOF is reached. */ while (!(*stop_echo) && (p = poll(readers, 2, -1)) > 0) { char buffer[STDIO_BUFSIZE]; ssize_t bytes_read = 0; if (readers[0].revents & POLLIN) { s2n_errno = S2N_ERR_T_OK; bytes_read = s2n_recv(conn, buffer, STDIO_BUFSIZE, &blocked); if (bytes_read == 0) { return 0; } if (bytes_read < 0) { switch (s2n_error_get_type(s2n_errno)) { case S2N_ERR_T_BLOCKED: /* Wait until poll tells us data is ready */ continue; case S2N_ERR_T_ALERT: fprintf(stderr, "Received alert: %d\n", s2n_connection_get_alert(conn)); break; default: fprintf(stderr, "Error reading from connection: '%s'\n", s2n_strerror(s2n_errno, "EN")); break; } exit(1); } char *buf_ptr = buffer; do { ssize_t bytes_written = write(STDOUT_FILENO, buf_ptr, bytes_read); if (bytes_written < 0) { fprintf(stderr, "Error writing to stdout\n"); exit(1); } bytes_read -= bytes_written; buf_ptr += bytes_written; } while (bytes_read > 0); } if (readers[1].revents & POLLIN) { size_t bytes_available = 0; if (ioctl(STDIN_FILENO, FIONREAD, &bytes_available) < 0) { bytes_available = 1; } do { /* We can only read as much data as we have space for. So it may * take a couple loops to empty stdin. */ size_t bytes_to_read = bytes_available; if (bytes_available > sizeof(buffer)) { bytes_to_read = sizeof(buffer); } bytes_read = read(STDIN_FILENO, buffer, bytes_to_read); if (bytes_read < 0 && errno != EINTR) { fprintf(stderr, "Error reading from stdin\n"); exit(1); } if (bytes_read == 0) { fprintf(stderr, "Exiting on stdin EOF\n"); return 0; } bytes_available -= bytes_read; /* We may not be able to write all the data we read in one shot, so * keep sending until we have cleared our buffer. */ send_data(conn, sockfd, buffer, bytes_read, &blocked); } while (bytes_available || blocked); } if (readers[1].revents & POLLHUP) { /* The stdin pipe hanged up, and we've handled all read from it above */ return 0; } if (readers[0].revents & (POLLERR | POLLHUP | POLLNVAL)) { fprintf(stderr, "Error polling from socket: err=%d hup=%d nval=%d\n", (readers[0].revents & POLLERR) ? 1 : 0, (readers[0].revents & POLLHUP) ? 1 : 0, (readers[0].revents & POLLNVAL) ? 1 : 0); POSIX_BAIL(S2N_ERR_POLLING_FROM_SOCKET); } if (readers[1].revents & (POLLERR | POLLNVAL)) { fprintf(stderr, "Error polling from socket: err=%d nval=%d\n", (readers[1].revents & POLLERR) ? 1 : 0, (readers[1].revents & POLLNVAL) ? 1 : 0); POSIX_BAIL(S2N_ERR_POLLING_FROM_SOCKET); } } } while (p < 0 && errno == EINTR); return 0; } aws-crt-python-0.20.4+dfsg/crt/s2n/bin/https.c000066400000000000000000000111411456575232400207760ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include #include #include #include "api/s2n.h" #include "error/s2n_errno.h" #include "stuffer/s2n_stuffer.h" #include "utils/s2n_safety.h" #define STRING_LEN 1024 static char str_buffer[STRING_LEN]; static s2n_blocked_status blocked; #define SEND(...) \ do { \ sprintf(str_buffer, __VA_ARGS__); \ POSIX_GUARD(s2n_send(conn, str_buffer, strlen(str_buffer), &blocked)); \ } while (0) #define BUFFER(...) \ do { \ sprintf(str_buffer, __VA_ARGS__); \ POSIX_GUARD(s2n_stuffer_write_bytes(&stuffer, (const uint8_t *) str_buffer, strlen(str_buffer))); \ } while (0) static int flush(uint32_t left, uint8_t *buffer, struct s2n_connection *conn, s2n_blocked_status *blocked_status) { uint32_t i = 0; while (i < left) { int out = s2n_send(conn, &buffer[i], left - i, blocked_status); if (out < 0) { fprintf(stderr, "Error writing to connection: '%s'\n", s2n_strerror(s2n_errno, "EN")); s2n_print_stacktrace(stdout); return S2N_FAILURE; } i += out; } return S2N_SUCCESS; } #define HEADERS(length) \ do { \ SEND("HTTP/1.1 200 OK\r\n"); \ SEND("Content-Length: %u\r\n", length); \ SEND("\r\n"); \ } while (0) /* In bench mode, we send some binary output */ int bench_handler(struct s2n_connection *conn, uint32_t bench) { HEADERS(bench); fprintf(stdout, "Sending %u bytes...\n", bench); uint8_t big_buff[65536] = { 0 }; uint32_t len = sizeof(big_buff); uint32_t bytes_remaining = bench; while (bytes_remaining) { uint32_t buffer_remaining = bytes_remaining < len ? bytes_remaining : len; POSIX_GUARD(flush(buffer_remaining, big_buff, conn, &blocked)); bytes_remaining -= buffer_remaining; } fprintf(stdout, "Done. Closing connection.\n\n"); return 0; } /* * simple https handler that allows https clients to connect * but currently does not do any user parsing */ int https(struct s2n_connection *conn, uint32_t bench) { if (bench) { return bench_handler(conn, bench); } DEFER_CLEANUP(struct s2n_stuffer stuffer, s2n_stuffer_free); POSIX_GUARD(s2n_stuffer_growable_alloc(&stuffer, 1024)); BUFFER("

Hello from s2n server

");

    BUFFER("Client hello version: %d\n", s2n_connection_get_client_hello_version(conn));
    BUFFER("Client protocol version: %d\n", s2n_connection_get_client_protocol_version(conn));
    BUFFER("Server protocol version: %d\n", s2n_connection_get_server_protocol_version(conn));
    BUFFER("Actual protocol version: %d\n", s2n_connection_get_actual_protocol_version(conn));

    if (s2n_get_server_name(conn)) {
        BUFFER("Server name: %s\n", s2n_get_server_name(conn));
    }

    if (s2n_get_application_protocol(conn)) {
        BUFFER("Application protocol: %s\n", s2n_get_application_protocol(conn));
    }

    BUFFER("Curve: %s\n", s2n_connection_get_curve(conn));
    BUFFER("KEM: %s\n", s2n_connection_get_kem_name(conn));
    BUFFER("KEM Group: %s\n", s2n_connection_get_kem_group_name(conn));
    BUFFER("Cipher negotiated: %s\n", s2n_connection_get_cipher(conn));
    BUFFER("Session resumption: %s\n", s2n_connection_is_session_resumed(conn) ? "true" : "false");

    uint32_t content_length = s2n_stuffer_data_available(&stuffer);

    uint8_t *content = s2n_stuffer_raw_read(&stuffer, content_length);
    POSIX_ENSURE_REF(content);

    HEADERS(content_length);
    POSIX_GUARD(flush(content_length, content, conn, &blocked));

    return S2N_SUCCESS;
}
aws-crt-python-0.20.4+dfsg/crt/s2n/bin/s2nc.c000066400000000000000000000723071456575232400205140ustar00rootroot00000000000000/*
 * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License").
 * You may not use this file except in compliance with the License.
 * A copy of the License is located at
 *
 *  http://aws.amazon.com/apache2.0
 *
 * or in the "license" file accompanying this file. This file is distributed
 * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
 * express or implied. See the License for the specific language governing
 * permissions and limitations under the License.
 */

#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 

#ifndef S2N_INTERN_LIBCRYPTO
    #include 
    #include 
#endif

#include "api/s2n.h"
#include "api/unstable/npn.h"
#include "api/unstable/renegotiate.h"
#include "common.h"
#include "error/s2n_errno.h"
#include "tls/s2n_connection.h"

#define OPT_TICKET_IN          1000
#define OPT_TICKET_OUT         1001
#define OPT_SEND_FILE          1002
#define OPT_RENEG              1003
#define OPT_NPN                1004
#define OPT_PREFER_LOW_LATENCY 1005
#define OPT_PREFER_THROUGHPUT  1006
#define OPT_BUFFERED_SEND      1007

/*
 * s2nc is an example client that uses many s2n-tls APIs.
 * It is intended for testing purposes only, and should not be used in production.
 */
void usage()
{
    /* clang-format off */
    fprintf(stderr, "usage: s2nc [options] host [port]\n");
    fprintf(stderr, " host: hostname or IP address to connect to\n");
    fprintf(stderr, " port: port to connect to\n");
    fprintf(stderr, "\n Options:\n\n");
    fprintf(stderr, "  -a [protocols]\n");
    fprintf(stderr, "  --alpn [protocols]\n");
    fprintf(stderr, "    Sets the application protocols supported by this client, as a comma separated list.\n");
    fprintf(stderr, "  -c [version_string]\n");
    fprintf(stderr, "  --ciphers [version_string]\n");
    fprintf(stderr, "    Set the cipher preference version string. Defaults to \"default\" \n");
    fprintf(stderr, "  --enter-fips-mode\n");
    fprintf(stderr, "    Enter libcrypto's FIPS mode. The linked version of OpenSSL must be built with the FIPS module.\n");
    fprintf(stderr, "  -e,--echo\n");
    fprintf(stderr, "    Listen to stdin after TLS Connection is established and echo it to the Server\n");
    fprintf(stderr, "  --send-file [file path]\n");
    fprintf(stderr, "    Sends the contents of the provided file to the server after connecting.\n");
    fprintf(stderr, "  -h,--help\n");
    fprintf(stderr, "    Display this message and quit.\n");
    fprintf(stderr, "  -n [server name]\n");
    fprintf(stderr, "  --name [server name]\n");
    fprintf(stderr, "    Sets the SNI server name header for this client.  If not specified, the host value is used.\n");
    fprintf(stderr, "  -s,--status\n");
    fprintf(stderr, "    Request the OCSP status of the remote server certificate\n");
    fprintf(stderr, "  -m,--mfl\n");
    fprintf(stderr, "    Request maximum fragment length from: 512, 1024, 2048, 4096\n");
    fprintf(stderr, "  -f,--ca-file [file path]\n");
    fprintf(stderr, "    Location of trust store CA file (PEM format). If neither -f or -d are specified. System defaults will be used.\n");
    fprintf(stderr, "  -d,--ca-dir [directory path]\n");
    fprintf(stderr, "    Directory containing hashed trusted certs. If neither -f or -d are specified. System defaults will be used.\n");
    fprintf(stderr, "  -i,--insecure\n");
    fprintf(stderr, "    Turns off certification validation altogether.\n");
    fprintf(stderr, "  -l,--cert [file path]\n");
    fprintf(stderr, "    Path to a PEM encoded certificate. Optional. Will only be used for client auth\n");
    fprintf(stderr, "  -k,--key [file path]\n");
    fprintf(stderr, "    Path to a PEM encoded private key that matches cert. Will only be used for client auth\n");
    fprintf(stderr, "  -r,--reconnect\n");
    fprintf(stderr, "    Drop and re-make the connection using Session ticket. If session ticket is disabled, then re-make the connection using Session-ID \n");
    fprintf(stderr, "  -T,--no-session-ticket \n");
    fprintf(stderr, "    Disable session ticket for resumption.\n");
    fprintf(stderr, "  --ticket-out [file path]\n");
    fprintf(stderr, "    Path to a file where the session ticket can be stored.\n");
    fprintf(stderr, "  --ticket-in [file path]\n");
    fprintf(stderr, "    Path to session ticket file to resume connection.\n");
    fprintf(stderr, "  -D,--dynamic\n");
    fprintf(stderr, "    Set dynamic record resize threshold\n");
    fprintf(stderr, "  -t,--timeout\n");
    fprintf(stderr, "    Set dynamic record timeout threshold\n");
    fprintf(stderr, "  -C,--corked-io\n");
    fprintf(stderr, "    Turn on corked io\n");
    fprintf(stderr, "  -B,--non-blocking\n");
    fprintf(stderr, "    Set the non-blocking flag on the connection's socket.\n");
    fprintf(stderr, "  -L --key-log \n");
    fprintf(stderr, "    Enable NSS key logging into the provided path\n");
    fprintf(stderr, "  -P --psk  \n"
                    "    A comma-separated list of psk parameters in this order: psk_identity, psk_secret and psk_hmac_alg.\n"
                    "    Note that the maximum number of permitted psks is 10, the psk-secret is hex-encoded, and whitespace is not allowed before or after the commas.\n"
                    "    Ex: --psk psk_id,psk_secret,SHA256 --psk shared_id,shared_secret,SHA384.\n");
    fprintf(stderr, "  -E ,--early-data \n");
    fprintf(stderr, "    Sends data in file path as early data to the server. Early data will only be sent if s2nc receives a session ticket and resumes a session.\n");
    fprintf(stderr, "  --renegotiation [accept|reject|wait]\n"
                    "    accept: Accept all server requests for a new handshake\n"
                    "    reject: Reject all server requests for a new handshake\n"
                    "    wait: Wait for additional application data before accepting server requests. Intended for the integ tests.\n");
    fprintf(stderr, "  --npn \n");
    fprintf(stderr, "    Indicates support for the NPN extension. The '--alpn' option MUST be used with this option to signal the protocols supported.");
    fprintf(stderr, "\n");
    fprintf(stderr, "  --buffered-send \n");
    fprintf(stderr, "    Set s2n_send to buffer up to  bytes before sending records over the wire.\n");
    fprintf(stderr, "  --prefer-low-latency\n");
    fprintf(stderr, "    Prefer low latency by clamping maximum outgoing record size at 1500.\n");
    fprintf(stderr, "  --prefer-throughput\n");
    fprintf(stderr, "    Prefer throughput by raising maximum outgoing record size to 16k\n");
    /* clang-format on */
    exit(1);
}

size_t session_state_length = 0;
uint8_t *session_state = NULL;
static int test_session_ticket_cb(struct s2n_connection *conn, void *ctx, struct s2n_session_ticket *ticket)
{
    GUARD_EXIT_NULL(conn);
    GUARD_EXIT_NULL(ticket);

    GUARD_EXIT(s2n_session_ticket_get_data_len(ticket, &session_state_length), "Error getting ticket length ");
    session_state = realloc(session_state, session_state_length);
    if (session_state == NULL) {
        print_s2n_error("Error getting new session state");
        exit(1);
    }
    GUARD_EXIT(s2n_session_ticket_get_data(ticket, session_state_length, session_state), "Error getting ticket data");

    bool *session_ticket_recv = (bool *) ctx;
    *session_ticket_recv = 1;

    return S2N_SUCCESS;
}

struct reneg_req_ctx {
    bool do_renegotiate;
    bool wait;
    s2n_renegotiate_response response;
};

static int reneg_req_cb(struct s2n_connection *conn, void *context, s2n_renegotiate_response *response)
{
    GUARD_EXIT_NULL(conn);
    GUARD_EXIT_NULL(context);
    GUARD_EXIT_NULL(response);
    struct reneg_req_ctx *reneg_ctx = (struct reneg_req_ctx *) context;

    *response = reneg_ctx->response;
    if (*response == S2N_RENEGOTIATE_ACCEPT) {
        reneg_ctx->do_renegotiate = true;
    }
    return S2N_SUCCESS;
}

static void setup_s2n_config(struct s2n_config *config, const char *cipher_prefs, s2n_status_request_type type,
        struct verify_data *unsafe_verify_data, const char *host, const char *alpn_protocols, uint16_t mfl_value)
{
    if (config == NULL) {
        print_s2n_error("Error getting new config");
        exit(1);
    }

    GUARD_EXIT(s2n_config_set_cipher_preferences(config, cipher_prefs), "Error setting cipher prefs");

    GUARD_EXIT(s2n_config_set_status_request_type(config, type), "OCSP validation is not supported by the linked libCrypto implementation. It cannot be set.");

    if (s2n_config_set_verify_host_callback(config, unsafe_verify_host, unsafe_verify_data) < 0) {
        print_s2n_error("Error setting host name verification function.");
    }

    if (type == S2N_STATUS_REQUEST_OCSP) {
        if (s2n_config_set_check_stapled_ocsp_response(config, 1)) {
            print_s2n_error("OCSP validation is not supported by the linked libCrypto implementation. It cannot be set.");
        }
    }

    unsafe_verify_data->trusted_host = host;

    if (alpn_protocols) {
        /* Count the number of commas, this tells us how many protocols there
           are in the list */
        const char *ptr = alpn_protocols;
        int protocol_count = 1;
        while (*ptr) {
            if (*ptr == ',') {
                protocol_count++;
            }
            ptr++;
        }

        char **protocols = malloc(sizeof(char *) * protocol_count);
        if (!protocols) {
            fprintf(stderr, "Error allocating memory\n");
            exit(1);
        }

        const char *next = alpn_protocols;
        int idx = 0;
        int length = 0;
        ptr = alpn_protocols;
        while (*ptr) {
            if (*ptr == ',') {
                protocols[idx] = malloc(length + 1);
                if (!protocols[idx]) {
                    fprintf(stderr, "Error allocating memory\n");
                    exit(1);
                }
                memmove(protocols[idx], next, length);
                protocols[idx][length] = '\0';
                length = 0;
                idx++;
                ptr++;
                next = ptr;
            } else {
                length++;
                ptr++;
            }
        }
        if (ptr != next) {
            protocols[idx] = malloc(length + 1);
            if (!protocols[idx]) {
                fprintf(stderr, "Error allocating memory\n");
                exit(1);
            }
            memmove(protocols[idx], next, length);
            protocols[idx][length] = '\0';
        }

        GUARD_EXIT(s2n_config_set_protocol_preferences(config, (const char *const *) protocols, protocol_count), "Failed to set protocol preferences");

        while (protocol_count) {
            protocol_count--;
            free(protocols[protocol_count]);
        }
        free(protocols);
    }

    uint8_t mfl_code = 0;
    if (mfl_value > 0) {
        switch (mfl_value) {
            case 512:
                mfl_code = S2N_TLS_MAX_FRAG_LEN_512;
                break;
            case 1024:
                mfl_code = S2N_TLS_MAX_FRAG_LEN_1024;
                break;
            case 2048:
                mfl_code = S2N_TLS_MAX_FRAG_LEN_2048;
                break;
            case 4096:
                mfl_code = S2N_TLS_MAX_FRAG_LEN_4096;
                break;
            default:
                fprintf(stderr, "Invalid maximum fragment length value\n");
                exit(1);
        }
    }

    GUARD_EXIT(s2n_config_send_max_fragment_length(config, mfl_code), "Error setting maximum fragment length");
}

int main(int argc, char *const *argv)
{
    struct addrinfo hints, *ai_list, *ai;
    int r, sockfd = 0;
    bool session_ticket_recv = 0;
    /* Optional args */
    const char *alpn_protocols = NULL;
    const char *server_name = NULL;
    const char *ca_file = NULL;
    const char *ca_dir = NULL;
    const char *client_cert = NULL;
    const char *client_key = NULL;
    bool client_cert_input = false;
    bool client_key_input = false;
    const char *ticket_out = NULL;
    char *ticket_in = NULL;
    uint16_t mfl_value = 0;
    uint8_t insecure = 0;
    int reconnect = 0;
    uint8_t session_ticket = 1;
    s2n_status_request_type type = S2N_STATUS_REQUEST_NONE;
    uint32_t dyn_rec_threshold = 0;
    uint8_t dyn_rec_timeout = 0;
    /* required args */
    const char *cipher_prefs = "default";
    int fips_mode = 0;
    const char *host = NULL;
    struct verify_data unsafe_verify_data;
    const char *port = "443";
    bool echo_input = false;
    const char *send_file = NULL;
    int use_corked_io = 0;
    uint8_t non_blocking = 0;
    const char *key_log_path = NULL;
    FILE *key_log_file = NULL;
    char *psk_optarg_list[S2N_MAX_PSK_LIST_LENGTH];
    size_t psk_list_len = 0;
    char *early_data = NULL;
    bool setup_reneg_cb = false;
    struct reneg_req_ctx reneg_ctx = { 0 };
    bool npn = false;
    uint32_t send_buffer_size = 0;
    bool prefer_low_latency = false;
    bool prefer_throughput = false;

    static struct option long_options[] = {
        { "alpn", required_argument, 0, 'a' },
        { "ciphers", required_argument, 0, 'c' },
        { "enter-fips-mode", no_argument, NULL, 'F' },
        { "echo", no_argument, 0, 'e' },
        { "send-file", required_argument, 0, OPT_SEND_FILE },
        { "help", no_argument, 0, 'h' },
        { "name", required_argument, 0, 'n' },
        { "status", no_argument, 0, 's' },
        { "mfl", required_argument, 0, 'm' },
        { "ca-file", required_argument, 0, 'f' },
        { "ca-dir", required_argument, 0, 'd' },
        { "cert", required_argument, 0, 'l' },
        { "key", required_argument, 0, 'k' },
        { "insecure", no_argument, 0, 'i' },
        { "reconnect", no_argument, 0, 'r' },
        { "ticket-out", required_argument, 0, OPT_TICKET_OUT },
        { "ticket-in", required_argument, 0, OPT_TICKET_IN },
        { "no-session-ticket", no_argument, 0, 'T' },
        { "dynamic", required_argument, 0, 'D' },
        { "timeout", required_argument, 0, 't' },
        { "corked-io", no_argument, 0, 'C' },
        { "tls13", no_argument, 0, '3' },
        { "non-blocking", no_argument, 0, 'B' },
        { "key-log", required_argument, 0, 'L' },
        { "psk", required_argument, 0, 'P' },
        { "early-data", required_argument, 0, 'E' },
        { "renegotiation", required_argument, 0, OPT_RENEG },
        { "npn", no_argument, 0, OPT_NPN },
        { "buffered-send", required_argument, 0, OPT_BUFFERED_SEND },
        { "prefer-low-latency", no_argument, NULL, OPT_PREFER_LOW_LATENCY },
        { "prefer-throughput", no_argument, NULL, OPT_PREFER_THROUGHPUT },
        { 0 },
    };

    while (1) {
        int option_index = 0;
        int c = getopt_long(argc, argv, "a:c:ehn:m:sf:d:l:k:D:t:irTCBL:P:E:", long_options, &option_index);
        if (c == -1) {
            break;
        }
        switch (c) {
            case 'a':
                alpn_protocols = optarg;
                break;
            case 'C':
                use_corked_io = 1;
                break;
            case 'c':
                cipher_prefs = optarg;
                break;
            case 'F':
                fips_mode = 1;
                break;
            case 'e':
                echo_input = true;
                break;
            case OPT_SEND_FILE:
                send_file = load_file_to_cstring(optarg);
                break;
            case 'h':
                usage();
                break;
            case 'n':
                server_name = optarg;
                break;
            case 's':
                type = S2N_STATUS_REQUEST_OCSP;
                break;
            case 'm':
                mfl_value = (uint16_t) atoi(optarg);
                break;
            case 'f':
                ca_file = optarg;
                break;
            case 'd':
                ca_dir = optarg;
                break;
            case 'l':
                client_cert = load_file_to_cstring(optarg);
                client_cert_input = true;
                break;
            case 'k':
                client_key = load_file_to_cstring(optarg);
                client_key_input = true;
                break;
            case 'i':
                insecure = 1;
                break;
            case 'r':
                reconnect = 5;
                break;
            case OPT_TICKET_OUT:
                ticket_out = optarg;
                break;
            case OPT_TICKET_IN:
                ticket_in = optarg;
                break;
            case 'T':
                session_ticket = 0;
                break;
            case 't':
                dyn_rec_timeout = (uint8_t) MIN(255, atoi(optarg));
                break;
            case 'D':
                errno = 0;
                dyn_rec_threshold = strtoul(optarg, 0, 10);
                if (errno == ERANGE) {
                    dyn_rec_threshold = 0;
                }
                break;
            case '3':
                /* Do nothing -- this argument is deprecated. */
                break;
            case 'B':
                non_blocking = 1;
                break;
            case 'L':
                key_log_path = optarg;
                break;
            case 'P':
                if (psk_list_len >= S2N_MAX_PSK_LIST_LENGTH) {
                    fprintf(stderr, "Error setting psks, maximum number of psks permitted is 10.\n");
                    exit(1);
                }
                psk_optarg_list[psk_list_len++] = optarg;
                break;
            case 'E':
                early_data = load_file_to_cstring(optarg);
                GUARD_EXIT_NULL(early_data);
                break;
            case OPT_RENEG:
                setup_reneg_cb = true;
                if (strcmp(optarg, "accept") == 0) {
                    reneg_ctx.response = S2N_RENEGOTIATE_ACCEPT;
                } else if (strcmp(optarg, "reject") == 0) {
                    reneg_ctx.response = S2N_RENEGOTIATE_REJECT;
                } else if (strcmp(optarg, "wait") == 0) {
                    reneg_ctx.response = S2N_RENEGOTIATE_ACCEPT;
                    reneg_ctx.wait = true;
                } else {
                    fprintf(stderr, "Unrecognized option: %s\n", optarg);
                    exit(1);
                }
                break;
            case OPT_NPN:
                npn = true;
                break;
            case OPT_BUFFERED_SEND: {
                intmax_t send_buffer_size_scanned_value = strtoimax(optarg, 0, 10);
                if (send_buffer_size_scanned_value > UINT32_MAX || send_buffer_size_scanned_value < 0) {
                    fprintf(stderr, " must be a positive 32 bit value\n");
                    exit(1);
                }
                send_buffer_size = (uint32_t) send_buffer_size_scanned_value;
                break;
            }
            case OPT_PREFER_LOW_LATENCY:
                prefer_low_latency = true;
                break;
            case OPT_PREFER_THROUGHPUT:
                prefer_throughput = true;
                break;
            case '?':
            default:
                usage();
                break;
        }
    }

    if (optind < argc) {
        host = argv[optind++];
    }

    /* cppcheck-suppress duplicateCondition */
    if (optind < argc) {
        port = argv[optind++];
    }

    if (!host) {
        usage();
    }

    if (!server_name) {
        server_name = host;
    }

    memset(&hints, 0, sizeof(hints));

    hints.ai_family = AF_UNSPEC;
    hints.ai_socktype = SOCK_STREAM;

    if (signal(SIGPIPE, SIG_IGN) == SIG_ERR) {
        fprintf(stderr, "Error disabling SIGPIPE\n");
        exit(1);
    }

    if (fips_mode) {
#ifndef S2N_INTERN_LIBCRYPTO
    #if defined(OPENSSL_FIPS) || defined(OPENSSL_IS_AWSLC)
        if (FIPS_mode_set(1) == 0) {
            unsigned long fips_rc = ERR_get_error();
            char ssl_error_buf[256]; /* Openssl claims you need no more than 120 bytes for error strings */
            fprintf(stderr, "s2nc failed to enter FIPS mode with RC: %lu; String: %s\n", fips_rc, ERR_error_string(fips_rc, ssl_error_buf));
            exit(1);
        }
        printf("s2nc entered FIPS mode\n");
    #else
        fprintf(stderr, "Error entering FIPS mode. s2nc was not built against a FIPS-capable libcrypto.\n");
        exit(1);
    #endif
#endif
    }

    if (prefer_low_latency && prefer_throughput) {
        fprintf(stderr, "prefer-throughput and prefer-low-latency options are mutually exclusive\n");
        exit(1);
    }

    GUARD_EXIT(s2n_init(), "Error running s2n_init()");

    if ((r = getaddrinfo(host, port, &hints, &ai_list)) != 0) {
        fprintf(stderr, "error: %s\n", gai_strerror(r));
        exit(1);
    }

    do {
        int connected = 0;
        for (ai = ai_list; ai != NULL; ai = ai->ai_next) {
            if ((sockfd = socket(ai->ai_family, ai->ai_socktype, ai->ai_protocol)) == -1) {
                continue;
            }

            if (connect(sockfd, ai->ai_addr, ai->ai_addrlen) == -1) {
                close(sockfd);
                continue;
            }

            connected = 1;
            /* connect() succeeded */
            break;
        }

        if (connected == 0) {
            fprintf(stderr, "Failed to connect to %s:%s\n", host, port);
            exit(1);
        }

        if (non_blocking) {
            int flags = fcntl(sockfd, F_GETFL, 0);
            if (fcntl(sockfd, F_SETFL, flags | O_NONBLOCK) < 0) {
                fprintf(stderr, "fcntl error: %s\n", strerror(errno));
                exit(1);
            }
        }

        struct s2n_config *config = s2n_config_new();
        setup_s2n_config(config, cipher_prefs, type, &unsafe_verify_data, host, alpn_protocols, mfl_value);

        if (send_buffer_size != 0) {
            GUARD_EXIT(s2n_config_set_send_buffer_size(config, send_buffer_size), "Error setting send buffer size");
        }

        if (client_cert_input != client_key_input) {
            print_s2n_error("Client cert/key pair must be given.");
        }

        if (client_cert_input) {
            struct s2n_cert_chain_and_key *chain_and_key = s2n_cert_chain_and_key_new();
            GUARD_EXIT(s2n_cert_chain_and_key_load_pem(chain_and_key, client_cert, client_key), "Error getting certificate/key");
            GUARD_EXIT(s2n_config_add_cert_chain_and_key_to_store(config, chain_and_key), "Error setting certificate/key");
        }

        if (ca_file || ca_dir) {
            GUARD_EXIT(s2n_config_wipe_trust_store(config), "Error wiping trust store");
            if (s2n_config_set_verification_ca_location(config, ca_file, ca_dir) < 0) {
                print_s2n_error("Error setting CA file for trust store.");
            }
        } else if (insecure) {
            GUARD_EXIT(s2n_config_disable_x509_verification(config), "Error disabling X.509 validation");
        }

        if (session_ticket) {
            GUARD_EXIT(s2n_config_set_session_tickets_onoff(config, 1), "Error enabling session tickets");
            GUARD_EXIT(s2n_config_set_session_ticket_cb(config, test_session_ticket_cb, &session_ticket_recv), "Error setting session ticket callback");
            session_ticket_recv = 0;
        }

        if (key_log_path) {
            key_log_file = fopen(key_log_path, "a");
            GUARD_EXIT(key_log_file == NULL ? S2N_FAILURE : S2N_SUCCESS, "Failed to open key log file");
            GUARD_EXIT(
                    s2n_config_set_key_log_cb(
                            config,
                            key_log_callback,
                            (void *) key_log_file),
                    "Failed to set key log callback");
        }

        if (setup_reneg_cb) {
            GUARD_EXIT(s2n_config_set_renegotiate_request_cb(config, reneg_req_cb, &reneg_ctx),
                    "Error setting renegotiation request callback");
        }

        if (npn) {
            GUARD_EXIT(s2n_config_set_npn(config, 1), "Error setting npn support");
        }

        struct s2n_connection *conn = s2n_connection_new(S2N_CLIENT);

        if (conn == NULL) {
            print_s2n_error("Error getting new connection");
            exit(1);
        }

        GUARD_EXIT(s2n_connection_set_config(conn, config), "Error setting configuration");

        GUARD_EXIT(s2n_set_server_name(conn, server_name), "Error setting server name");

        GUARD_EXIT(s2n_connection_set_fd(conn, sockfd), "Error setting file descriptor");

        GUARD_EXIT(s2n_connection_set_client_auth_type(conn, S2N_CERT_AUTH_OPTIONAL), "Error setting ClientAuth optional");

        if (use_corked_io) {
            GUARD_EXIT(s2n_connection_use_corked_io(conn), "Error setting corked io");
        }

        /* Read in session ticket from previous session */
        if (ticket_in) {
            GUARD_EXIT(get_file_size(ticket_in, &session_state_length), "Failed to read ticket-in file");
            free(session_state);
            session_state = calloc(session_state_length, sizeof(uint8_t));
            GUARD_EXIT_NULL(session_state);
            GUARD_EXIT(load_file_to_array(ticket_in, session_state, session_state_length), "Failed to read ticket-in file");
        }

        /* Update session state in connection if exists */
        if (session_state_length > 0) {
            GUARD_EXIT(s2n_connection_set_session(conn, session_state, session_state_length), "Error setting session state in connection");
        }

        GUARD_EXIT(s2n_setup_external_psk_list(conn, psk_optarg_list, psk_list_len), "Error setting external psk list");

        if (prefer_throughput) {
            GUARD_RETURN(s2n_connection_prefer_throughput(conn), "Error setting prefer throughput");
        }

        if (prefer_low_latency) {
            GUARD_RETURN(s2n_connection_prefer_low_latency(conn), "Error setting prefer low latency");
        }

        if (early_data) {
            if (!session_ticket) {
                print_s2n_error("Early data can only be used with session tickets.");
                exit(1);
            }
            /* Send early data if we have a received a session ticket from the server */
            if (session_state_length) {
                uint32_t early_data_length = strlen(early_data);
                GUARD_EXIT(early_data_send(conn, (uint8_t *) early_data, early_data_length), "Error sending early data");
            }
        }

        /* See echo.c */
        if (negotiate(conn, sockfd) != 0) {
            /* Error is printed in negotiate */
            S2N_ERROR_PRESERVE_ERRNO();
        }

        printf("Connected to %s:%s\n", host, port);

        /* Save session state from connection if reconnect is enabled. */
        if (reconnect > 0 || ticket_out) {
            if (conn->actual_protocol_version >= S2N_TLS13) {
                if (!session_ticket) {
                    print_s2n_error("s2nc can only reconnect in TLS1.3 with session tickets.");
                    exit(1);
                }
                GUARD_EXIT(echo(conn, sockfd, &session_ticket_recv), "Error calling echo");
            } else {
                if (!session_ticket && s2n_connection_get_session_id_length(conn) <= 0) {
                    print_s2n_error("Endpoint sent empty session id so cannot resume session");
                    exit(1);
                }
                free(session_state);
                session_state_length = s2n_connection_get_session_length(conn);
                session_state = calloc(session_state_length, sizeof(uint8_t));
                GUARD_EXIT_NULL(session_state);
                if (s2n_connection_get_session(conn, session_state, session_state_length) != session_state_length) {
                    print_s2n_error("Error getting serialized session state");
                    exit(1);
                }
            }
            if (ticket_out) {
                GUARD_EXIT(write_array_to_file(ticket_out, session_state, session_state_length), "Failed to write to ticket-out file");
            }
        }

        if (dyn_rec_threshold > 0 && dyn_rec_timeout > 0) {
            s2n_connection_set_dynamic_record_threshold(conn, dyn_rec_threshold, dyn_rec_timeout);
        }

        GUARD_EXIT(s2n_connection_free_handshake(conn), "Error freeing handshake memory after negotiation");

        if (send_file != NULL) {
            printf("Sending file contents:\n%s\n", send_file);

            unsigned long send_file_len = strlen(send_file);
            s2n_blocked_status blocked = S2N_NOT_BLOCKED;
            send_data(conn, sockfd, send_file, send_file_len, &blocked);
        }

        while (echo_input) {
            fflush(stdout);
            fflush(stderr);
            echo(conn, sockfd, &reneg_ctx.do_renegotiate);

            if (!reneg_ctx.do_renegotiate) {
                break;
            }

            reneg_ctx.do_renegotiate = false;
            GUARD_EXIT(renegotiate(conn, sockfd, reneg_ctx.wait), "Renegotiation failed");
        }

        GUARD_EXIT(wait_for_shutdown(conn, sockfd), "Error closing connection");

        GUARD_EXIT(s2n_connection_free(conn), "Error freeing connection");

        GUARD_EXIT(s2n_config_free(config), "Error freeing configuration");

        close(sockfd);
        reconnect--;

    } while (reconnect >= 0);

    if (key_log_file) {
        fclose(key_log_file);
    }

    GUARD_EXIT(s2n_cleanup(), "Error running s2n_cleanup()");

    free(early_data);
    free(session_state);
    freeaddrinfo(ai_list);
    return 0;
}
aws-crt-python-0.20.4+dfsg/crt/s2n/bin/s2nd.c000066400000000000000000000705751456575232400205220ustar00rootroot00000000000000/*
 * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License").
 * You may not use this file except in compliance with the License.
 * A copy of the License is located at
 *
 *  http://aws.amazon.com/apache2.0
 *
 * or in the "license" file accompanying this file. This file is distributed
 * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
 * express or implied. See the License for the specific language governing
 * permissions and limitations under the License.
 */

#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 

#ifndef S2N_INTERN_LIBCRYPTO
    #include 
    #include 
#endif

#include "api/s2n.h"
#include "api/unstable/npn.h"
#include "common.h"
#include "utils/s2n_safety.h"

#define MAX_CERTIFICATES 50

/*
 * s2nd is an example server that uses many s2n-tls APIs.
 * It is intended for testing purposes only, and should not be used in production.
 */

static char default_certificate_chain[] =
        "-----BEGIN CERTIFICATE-----"
        "MIIDHTCCAgWgAwIBAgIUPxywpg3/+VHmj8jJSvK62XC06zMwDQYJKoZIhvcNAQEL"
        "BQAwHjEcMBoGA1UEAwwTczJuVGVzdEludGVybWVkaWF0ZTAgFw0yMDAxMjQwMTEw"
        "MjFaGA8yMTE5MTIzMTAxMTAyMVowGDEWMBQGA1UEAwwNczJuVGVzdFNlcnZlcjCC"
        "ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJUbMpdROM6cjb8xgr5kgKHn"
        "JVDfhbLg4pxBwWwlayb6/N60JLG9KzWAWhZBmz+Px6kr/1dL6+bL3mLuNBCQpYBS"
        "Pee2n7KL9PvsMYZmnYFyn94bXbjBCRxGR+a9lcGHLlZ4C+rrLNi9pUwxf7VIRglR"
        "zwHWAFg5xTX6lCmziNM4OMkq8lHkLopHDUg5yI4VTc3EEGqDIf3+0BheIHcUFbIW"
        "kFOjRDdL3lMGKEj0+LErzzbhJczBlRMqSMiuYeaWgORLpRNtMeNmbR8oLJFchpF0"
        "A9fIO2/Yg+nclcDDhsUBkkfcIKRySGDumKLuYM+hOHp5vQo8tcvyQ6s3U5YULQUC"
        "AwEAAaNXMFUwDAYDVR0TAQH/BAIwADAdBgNVHQ4EFgQUkVKVmfjICpx4fkvJO6YJ"
        "mdoKz3owDgYDVR0PAQH/BAQDAgPoMBYGA1UdJQEB/wQMMAoGCCsGAQUFBwMBMA0G"
        "CSqGSIb3DQEBCwUAA4IBAQBXoWDI1Gi9snC4K6S7E0AoLmGEPUWzc4fd4Cbj9PRp"
        "mSKpsJOYjmneIV34WqnvUXrBkkzblEb9RdszN96WuRAaZJQegRtKOWN5Iggd4sHM"
        "8XEx/LeJHc08uSb2d/TnhhOPALoJl/w6M5e6yOezCEJorsOXuVBcbuEKfne7oMA1"
        "GziFnVPtwiwXxsX16KilsQRylnK0bV/x1BOgYByCDcXorMndsAYjn4yG1D4l8TbC"
        "kCtK1bafEVoASpOFQ8tSeOXBL7Fvw9mFFzs3/ajBTz2nBLDsnP8XH5C/vy8wNGSd"
        "Tdcs7DRLYhNJxYopcMgCwyyCAtEFcHkovCSrJ6HUl/ko"
        "-----END CERTIFICATE-----"
        "-----BEGIN CERTIFICATE-----"
        "MIIDCTCCAfGgAwIBAgIUfdybeOdDMd7cPXk6RTcEqeM3IEIwDQYJKoZIhvcNAQEL"
        "BQAwFjEUMBIGA1UEAwwLczJuVGVzdFJvb3QwIBcNMjAwMTI0MDEwOTUzWhgPMjEx"
        "OTEyMzEwMTA5NTNaMB4xHDAaBgNVBAMME3MyblRlc3RJbnRlcm1lZGlhdGUwggEi"
        "MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC2NsDkrZjYbyVeF1R9337y9OHM"
        "C2xSRGB6SHrVG1bQZlPxI+E6DqDJcMB4tFLkA7AJxxRLxA7KvO9PzcHAlsqvYcMV"
        "gOSAjUZ0Eiwwf6Rtgo2yByj2n1K5XDN3bpt1rROD0BIEnaU9GZd3U0QUYHBRfp0E"
        "IdeWuRrlFbPpWXnBaQB/2jEfCuZzpPOiKMWt99GQ4bFBOSzpYdXLALGfb15Kr6RF"
        "YoMlsyeijNeePxLeYgracu+vzJLvEzx1U7OGnlWz+VKBw/mz3gABqFfxurN5E8yb"
        "4AWJ5kEUJobYcxwe+DoimPdPTWgByJlMpKjfIbnroz/oTZMiNfUCtKT3GTejAgMB"
        "AAGjRTBDMBIGA1UdEwEB/wQIMAYBAf8CAQEwHQYDVR0OBBYEFEasSJIPBZTXyYjI"
        "CN2m1Ttz3sUJMA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsFAAOCAQEAxveh"
        "GKJPu7DXjoMePzlRGML2iIDT6MgKpsMnO5sNgUbJTFV3KeuASRm1SXVrVFHcQDov"
        "l9P10ff0J9KOVrRCawMZZxjjtNAIrSW0G7fwmTgJMTuM5vaaGRjKy018LApcr//Q"
        "Nwjh4sw9KOtNIE9krT06kli9zjsgr/EWwPCHSin8oONDgCNn1WgtrSMexsF1BSzU"
        "OTq+nyn4nOPOEUthjmepG2eDkd17MNJ6GdKYnFRmC+ctSH028akERhz+EtavU4Cd"
        "2eSFTKtbxOuZXyfsOwjhrufp/Ss9i57x3XotBNJ8Fv7VpxI19+Zag4DMGzd3Pisu"
        "Q1VpfValnMGtVWPleg=="
        "-----END CERTIFICATE-----"
        "-----BEGIN CERTIFICATE-----"
        "MIIC/jCCAeagAwIBAgIUFFjxpSf0mUsrVbyLPQhccDYfixowDQYJKoZIhvcNAQEL"
        "BQAwFjEUMBIGA1UEAwwLczJuVGVzdFJvb3QwIBcNMjAwMTI0MDEwODIyWhgPMjEx"
        "OTEyMzEwMTA4MjJaMBYxFDASBgNVBAMMC3MyblRlc3RSb290MIIBIjANBgkqhkiG"
        "9w0BAQEFAAOCAQ8AMIIBCgKCAQEAz3AaOAlkcxJHryCI9SfwB9q4PA53hv5tz4ZL"
        "be37b69v58mfP+D18cWIBHUmkmN6gWWoWZ/9hv75pxcNXW0zPn7+wOVvXLUjtmkq"
        "1IGT/mykhasw00viaBFAuBHZ5iLwfc4/cjUFAPVCKLmfv5Xs7TJVzWA/0mR4r1h8"
        "uFqqXczkVMklIbsOIrlZXz8ifQs3DpFA2FeoziEh+Pcb4c3QBPgCHFDEGyTSdqo9"
        "+NbS+iRlw0T6tqUOpC0DdKXo/3mJNBmy4XPahTi9zgsu7b+UVqemL7eXXf/iSr5y"
        "iwJKJjz+N/rLpcF1VJtF8q0fpHagzljQaN7/emjg7BplUUyLawIDAQABo0IwQDAP"
        "BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTDmXkyQEJ7ZciyE4KF7wAJKDxMfDAO"
        "BgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQELBQADggEBAFobyhsc7mYoGaA7N4Pp"
        "it+MQZZNzWte5vWal/3/2V7ZGrJsgeCPwLblzzTmey85RilX6ovMQHEqT1vBFSHq"
        "nntMZnHkEl2QLU8XopJWR4MXK7LzjjQYaXiZhGbJbtylVSfATAa/ZzdgjBx1C8aD"
        "IM1+ELGCP/UHD0YEJkFoxSUwXGAXoV8I+cPDAWHC6VnC4mY8qubhx95FpX02ERnz"
        "1Cw2YWtntyO8P52dEJD1+0EJjtVX4Bj5wwgJHHbDkPP1IzFrR/uBC2LCjtRY+UtZ"
        "kfoDfWu2tslkLK7/LaC5qZyCPKnpPHLLz8gUWKlvbuejM99FTlBg/tcH+bv5x7WB"
        "MZ8="
        "-----END CERTIFICATE-----";

static char default_private_key[] =
        "-----BEGIN RSA PRIVATE KEY-----"
        "MIIEogIBAAKCAQEAlRsyl1E4zpyNvzGCvmSAoeclUN+FsuDinEHBbCVrJvr83rQk"
        "sb0rNYBaFkGbP4/HqSv/V0vr5sveYu40EJClgFI957afsov0++wxhmadgXKf3htd"
        "uMEJHEZH5r2VwYcuVngL6uss2L2lTDF/tUhGCVHPAdYAWDnFNfqUKbOI0zg4ySry"
        "UeQuikcNSDnIjhVNzcQQaoMh/f7QGF4gdxQVshaQU6NEN0veUwYoSPT4sSvPNuEl"
        "zMGVEypIyK5h5paA5EulE20x42ZtHygskVyGkXQD18g7b9iD6dyVwMOGxQGSR9wg"
        "pHJIYO6You5gz6E4enm9Cjy1y/JDqzdTlhQtBQIDAQABAoIBAGTaSJXg8jON4LJ5"
        "op11DSx1U+An0B71zVEziMjFZnyvN2rLHia6dQdzEXwMVB3h+oKKp+M8DwvEyV7R"
        "D5ZEwCzTc9vOwqXZ1JKxZ64oqlBsX4WzrOjSaH8fanK/uRN1g/ooqKb0+xh+7ddj"
        "g6XyhKy5EPOE9Ca4rJOeMakjLmDuleQecT/DixYV6azhfaJoD70XZJWv3YzSpu/X"
        "Ma+i3of0alsG/lROjNtEXE3nKzcTUgyAUoQeYRwCVpgssg/4VAUPJNDP4dVmxW8f"
        "eNmjlTyXmR9S08SXkqmCHe2mBUsZY9nqcDE6ZWILZKFWIfZD9W+j2ce0FMvcc9kz"
        "psxaUQECgYEAxqwsb5aQy6HBF54tdkHbUQEJMelSLNW0G1GUrcLB7eqL7qo3dUA8"
        "8PDQ/dTwmmJ7aE0SK2xkQDVKXNbV4OvUNgP6tbzLWEbvmuFAEg5X1jFH2VSdwQhl"
        "RDwTQw3wPZ5udy64L6gmsdDch+I7l1v4ex66RWFW+4WIs1altsLiJa0CgYEAwCGW"
        "2cjtZ3kIzWgxf7DdnoUTwBM1ATBUYvx7uqVq+dbc/p8cSeMSPz3LUluaVJ2EOjEV"
        "QWhx0Ih5qeitzReHRU0OHgxEgjbpJwhseD9O5POSd+fE3TtDQArOxyw4CIJKk4Z2"
        "QmqzaO/LboN3Tp+/N9zfVoNZKHcCNra/uKNTH7kCgYA3QFazSdpG51s96D2Yb8RA"
        "iNs3yD2UPnJyToPctxcbxWjZHPmDYDQShcZ5cSjgppbPcO+mp+RRfwCJRS4B+VPx"
        "GbY1qKWcjU3BcvdQjjCbXuUuabvdnSocieCJe2zelhr+hj2u80KfnQhXufD8rRUz"
        "mF4RQXrhREe6KFS5uQUPmQKBgE4rXFyvSyfWLqajxb/WDdT4/9gd+GrLZwn+/7go"
        "pSWRLcjKo4/MOxhP4/FWI6xZifrDDYrXG7dkT1u5tzzCXd7sQtom05jHDoU7ACbM"
        "WyT7lJQEUCxSeEIOI6MVcpbDq+PpySOsleIT7gjApEHw7LOlwZhJSHUWNmhcYhSV"
        "HrTBAoGADAvBqV7JItjm2+qkXXEdPVzOunqjQdnXZjMAJ75PhHnLCCfnTRu53hT3"
        "JxDETLLa/r42PlqGZ6bqSW+C+ObgYOvvySqvX8CE9o208ZwCLjHYxuYLH/86Lppr"
        "ggF9KQ0xWz7Km3GXv5+bwM5bcgt1A/s6sZCimXuj3Fle3RqOTF0="
        "-----END RSA PRIVATE KEY-----";

#define OPT_BUFFERED_SEND 1000

void usage()
{
    /* clang-format off */
    fprintf(stderr, "usage: s2nd [options] host port\n");
    fprintf(stderr, " host: hostname or IP address to listen on\n");
    fprintf(stderr, " port: port to listen on\n");
    fprintf(stderr, "\n Options:\n\n");
    fprintf(stderr, "  -a [protocol]\n");
    fprintf(stderr, "  --alpn [protocol]\n");
    fprintf(stderr, "    Sets a single application protocol supported by this server.\n");
    fprintf(stderr, "  -c [version_string]\n");
    fprintf(stderr, "  --ciphers [version_string]\n");
    fprintf(stderr, "    Set the cipher preference version string. Defaults to \"default\" \n");
    fprintf(stderr, "  --enter-fips-mode\n");
    fprintf(stderr, "    Enter libcrypto's FIPS mode. The linked version of OpenSSL must be built with the FIPS module.\n");
    fprintf(stderr, "  --cert\n");
    fprintf(stderr, "    Path to a PEM encoded certificate [chain]. Option can be repeated to load multiple certs.\n");
    fprintf(stderr, "  --key\n");
    fprintf(stderr, "    Path to a PEM encoded private key that matches cert. Option can be repeated to load multiple certs.\n");
    fprintf(stderr, "  -m\n");
    fprintf(stderr, "  --mutualAuth\n");
    fprintf(stderr, "    Request a Client Certificate. Any RSA Certificate will be accepted.\n");
    fprintf(stderr, "  -n\n");
    fprintf(stderr, "  --negotiate\n");
    fprintf(stderr, "    Only perform tls handshake and then shutdown the connection\n");
    fprintf(stderr, "  --parallelize\n");
    fprintf(stderr, "    Create a new Connection handler thread for each new connection. Useful for tests with lots of connections.\n");
    fprintf(stderr, "    Warning: this option isn't compatible with TLS Resumption, since each thread gets its own Session cache.\n");
    fprintf(stderr, "  --prefer-low-latency\n");
    fprintf(stderr, "    Prefer low latency by clamping maximum outgoing record size at 1500.\n");
    fprintf(stderr, "  --prefer-throughput\n");
    fprintf(stderr, "    Prefer throughput by raising maximum outgoing record size to 16k\n");
    fprintf(stderr, "  --enable-mfl\n");
    fprintf(stderr, "    Accept client's TLS maximum fragment length extension request\n");
    fprintf(stderr, "  --ocsp\n");
    fprintf(stderr, "    Path to a DER formatted OCSP response for stapling\n");
    fprintf(stderr, "  -s\n");
    fprintf(stderr, "  --self-service-blinding\n");
    fprintf(stderr, "    Don't introduce 10-30 second delays on TLS Handshake errors. \n");
    fprintf(stderr, "    Warning: this should only be used for testing since skipping blinding may allow timing side channels.\n");
    fprintf(stderr, "  -t,--ca-file [file path]\n");
    fprintf(stderr, "    Location of trust store CA file (PEM format). If neither -t or -d are specified. System defaults will be used.");
    fprintf(stderr, "    This option is only used if mutual auth is enabled.\n");
    fprintf(stderr, "  -d,--ca-dir [directory path]\n");
    fprintf(stderr, "    Directory containing hashed trusted certs. If neither -t or -d are specified. System defaults will be used.");
    fprintf(stderr, "    This option is only used if mutual auth is enabled.\n");
    fprintf(stderr, "  -i,--insecure\n");
    fprintf(stderr, "    Turns off certification validation altogether.\n");
    fprintf(stderr, "  --stk-file\n");
    fprintf(stderr, "    Location of key file used for encryption and decryption of session ticket.\n");
    fprintf(stderr, "  -T,--no-session-ticket\n");
    fprintf(stderr, "    Disable session ticket for resumption.\n");
    fprintf(stderr, "  -C,--corked-io\n");
    fprintf(stderr, "    Turn on corked io\n");
    fprintf(stderr, "  --non-blocking\n");
    fprintf(stderr, "    Set the non-blocking flag on the connection's socket.\n");
    fprintf(stderr, "  -w --https-server\n");
    fprintf(stderr, "    Run s2nd in a simple https server mode.\n");
    fprintf(stderr, "  -b --https-bench \n");
    fprintf(stderr, "    Send number of bytes in https server mode to test throughput.\n");
    fprintf(stderr, "  -L --key-log \n");
    fprintf(stderr, "    Enable NSS key logging into the provided path\n");
    fprintf(stderr, "  -P --psk  \n"
                    "    A comma-separated list of psk parameters in this order: psk_identity, psk_secret and psk_hmac_alg.\n"
                    "    Note that the maximum number of permitted psks is 10, the psk-secret is hex-encoded, and whitespace is not allowed before or after the commas.\n"
                    "    Ex: --psk psk_id,psk_secret,SHA256 --psk shared_id,shared_secret,SHA384.\n");
    fprintf(stderr, "  -E, --max-early-data \n");
    fprintf(stderr, "    Sets maximum early data allowed in session tickets. \n");
    fprintf(stderr, "  -N --npn \n");
    fprintf(stderr, "    Indicates support for the NPN extension. The '--alpn' option MUST be used with this option to signal the protocols supported.");
    fprintf(stderr, "  -h,--help\n");
    fprintf(stderr, "    Display this message and quit.\n");
    fprintf(stderr, "  --buffered-send \n");
    fprintf(stderr, "    Set s2n_send to buffer up to  bytes before sending records over the wire.\n");
    fprintf(stderr, "  -X, --max-conns \n");
    fprintf(stderr, "    Sets the max number of connections s2nd will accept before shutting down.\n");
    /* clang-format on */
    exit(1);
}

int handle_connection(int fd, struct s2n_config *config, struct conn_settings settings)
{
    struct s2n_connection *conn = s2n_connection_new(S2N_SERVER);
    if (!conn) {
        print_s2n_error("Error getting new s2n connection");
        S2N_ERROR_PRESERVE_ERRNO();
    }

    s2n_setup_server_connection(conn, fd, config, settings);

    if (negotiate(conn, fd) != S2N_SUCCESS) {
        if (settings.mutual_auth) {
            if (!s2n_connection_client_cert_used(conn)) {
                print_s2n_error("Error: Mutual Auth was required, but not negotiated");
            }
        }

        /* Error is printed in negotiate */
        S2N_ERROR_PRESERVE_ERRNO();
    }

    GUARD_EXIT(s2n_connection_free_handshake(conn), "Error freeing handshake memory after negotiation");

    if (settings.https_server) {
        https(conn, settings.https_bench);
    } else if (!settings.only_negotiate) {
        bool stop_echo = false;
        echo(conn, fd, &stop_echo);
    }

    GUARD_RETURN(wait_for_shutdown(conn, fd), "Error closing connection");

    GUARD_RETURN(s2n_connection_wipe(conn), "Error wiping connection");

    GUARD_RETURN(s2n_connection_free(conn), "Error freeing connection");

    return 0;
}

int main(int argc, char *const *argv)
{
    struct addrinfo hints, *ai;
    int r, sockfd = 0;

    /* required args */
    const char *host = NULL;
    const char *port = NULL;

    const char *ocsp_response_file_path = NULL;
    const char *session_ticket_key_file_path = NULL;
    const char *cipher_prefs = "default";
    const char *alpn = NULL;
    const char *key_log_path = NULL;

    /* The certificates provided by the user. If there are none provided, we will use the hardcoded default cert.
     * The associated private key for each cert will be at the same index in private_keys. If the user mixes up the
     * order of --cert --key for a given cert/key pair, s2n will fail to load the cert and s2nd will exit.
     */
    int num_user_certificates = 0;
    int num_user_private_keys = 0;
    const char *certificates[MAX_CERTIFICATES] = { 0 };
    const char *private_keys[MAX_CERTIFICATES] = { 0 };

    struct conn_settings conn_settings = { 0 };
    int fips_mode = 0;
    int parallelize = 0;
    int non_blocking = 0;
    long int bytes = 0;
    conn_settings.session_ticket = 1;
    conn_settings.session_cache = 1;
    conn_settings.max_conns = -1;
    conn_settings.psk_list_len = 0;
    int max_early_data = 0;
    uint32_t send_buffer_size = 0;
    bool npn = false;

    struct option long_options[] = {
        { "ciphers", required_argument, NULL, 'c' },
        { "enable-mfl", no_argument, NULL, 'e' },
        { "enter-fips-mode", no_argument, NULL, 'f' },
        { "help", no_argument, NULL, 'h' },
        { "key", required_argument, NULL, 'k' },
        { "prefer-low-latency", no_argument, NULL, 'l' },
        { "mutualAuth", no_argument, NULL, 'm' },
        { "negotiate", no_argument, NULL, 'n' },
        { "ocsp", required_argument, NULL, 'o' },
        { "parallelize", no_argument, ¶llelize, 1 },
        { "prefer-throughput", no_argument, NULL, 'p' },
        { "cert", required_argument, NULL, 'r' },
        { "self-service-blinding", no_argument, NULL, 's' },
        { "ca-dir", required_argument, 0, 'd' },
        { "ca-file", required_argument, 0, 't' },
        { "insecure", no_argument, 0, 'i' },
        { "stk-file", required_argument, 0, 'a' },
        { "no-session-ticket", no_argument, 0, 'T' },
        { "corked-io", no_argument, 0, 'C' },
        { "max-conns", optional_argument, 0, 'X' },
        { "tls13", no_argument, 0, '3' },
        { "https-server", no_argument, 0, 'w' },
        { "https-bench", required_argument, 0, 'b' },
        { "alpn", required_argument, 0, 'A' },
        { "npn", no_argument, 0, 'N' },
        { "non-blocking", no_argument, 0, 'B' },
        { "key-log", required_argument, 0, 'L' },
        { "psk", required_argument, 0, 'P' },
        { "max-early-data", required_argument, 0, 'E' },
        { "buffered-send", required_argument, 0, OPT_BUFFERED_SEND },
        /* Per getopt(3) the last element of the array has to be filled with all zeros */
        { 0 },
    };
    while (1) {
        int option_index = 0;
        int c = getopt_long(argc, argv, "c:hmnst:d:iTCX::wb:A:P:E:", long_options, &option_index);
        if (c == -1) {
            break;
        }

        switch (c) {
            case 0:
                /* getopt_long() returns 0 if an option.flag is non-null (Eg "parallelize") */
                break;
            case 'C':
                conn_settings.use_corked_io = 1;
                break;
            case 'c':
                cipher_prefs = optarg;
                break;
            case 'e':
                conn_settings.enable_mfl = 1;
                break;
            case 'f':
                fips_mode = 1;
                break;
            case 'h':
                usage();
                break;
            case 'k':
                if (num_user_private_keys == MAX_CERTIFICATES) {
                    fprintf(stderr, "Cannot support more than %d certificates!\n", MAX_CERTIFICATES);
                    exit(1);
                }
                private_keys[num_user_private_keys] = load_file_to_cstring(optarg);
                num_user_private_keys++;
                break;
            case 'l':
                conn_settings.prefer_low_latency = 1;
                break;
            case 'm':
                conn_settings.mutual_auth = 1;
                break;
            case 'n':
                conn_settings.only_negotiate = 1;
                break;
            case 'o':
                ocsp_response_file_path = optarg;
                break;
            case 'p':
                conn_settings.prefer_throughput = 1;
                break;
            case 'r':
                if (num_user_certificates == MAX_CERTIFICATES) {
                    fprintf(stderr, "Cannot support more than %d certificates!\n", MAX_CERTIFICATES);
                    exit(1);
                }
                certificates[num_user_certificates] = load_file_to_cstring(optarg);
                num_user_certificates++;
                break;
            case 's':
                conn_settings.self_service_blinding = 1;
                break;
            case 'd':
                conn_settings.ca_dir = optarg;
                break;
            case 't':
                conn_settings.ca_file = optarg;
                break;
            case 'i':
                conn_settings.insecure = 1;
                break;
            case 'a':
                session_ticket_key_file_path = optarg;
                break;
            case 'T':
                conn_settings.session_ticket = 0;
                break;
            case '3':
                /* Do nothing -- this argument is deprecated */
                break;
            case 'X':
                if (optarg == NULL) {
                    conn_settings.max_conns = 1;
                } else {
                    conn_settings.max_conns = atoi(optarg);
                }
                break;
            case 'w':
                fprintf(stdout, "Running s2nd in simple https server mode\n");
                conn_settings.https_server = 1;
                break;
            case 'b':
                bytes = strtoul(optarg, NULL, 10);
                GUARD_EXIT(bytes, "https-bench bytes needs to be some positive long value.");
                conn_settings.https_bench = bytes;
                break;
            case OPT_BUFFERED_SEND: {
                intmax_t send_buffer_size_scanned_value = strtoimax(optarg, 0, 10);
                if (send_buffer_size_scanned_value > UINT32_MAX || send_buffer_size_scanned_value < 0) {
                    fprintf(stderr, " must be a positive 32 bit value\n");
                    exit(1);
                }
                send_buffer_size = (uint32_t) send_buffer_size_scanned_value;
                break;
            }
            case 'A':
                alpn = optarg;
                break;
            case 'B':
                non_blocking = 1;
                break;
            case 'L':
                key_log_path = optarg;
                break;
            case 'P':
                if (conn_settings.psk_list_len >= S2N_MAX_PSK_LIST_LENGTH) {
                    fprintf(stderr, "Error setting psks, maximum number of psks permitted is 10.\n");
                    exit(1);
                }
                conn_settings.psk_optarg_list[conn_settings.psk_list_len++] = optarg;
                break;
            case 'E':
                max_early_data = atoi(optarg);
                break;
            case 'N':
                npn = true;
                break;
            case '?':
            default:
                fprintf(stdout, "getopt_long returned: %d", c);
                usage();
                break;
        }
    }

    if (conn_settings.prefer_throughput && conn_settings.prefer_low_latency) {
        fprintf(stderr, "prefer-throughput and prefer-low-latency options are mutually exclusive\n");
        exit(1);
    }

    if (optind < argc) {
        host = argv[optind++];
    }

    /* cppcheck-suppress duplicateCondition */
    if (optind < argc) {
        port = argv[optind++];
    }

    if (!host || !port) {
        usage();
    }

    if (setvbuf(stdin, NULL, _IONBF, 0) < 0) {
        fprintf(stderr, "Error disabling buffering for stdin\n");
        exit(1);
    }

    if (setvbuf(stdout, NULL, _IONBF, 0) < 0) {
        fprintf(stderr, "Error disabling buffering for stdout\n");
        exit(1);
    }

    memset(&hints, 0, sizeof(hints));

    hints.ai_family = AF_UNSPEC;
    hints.ai_socktype = SOCK_STREAM;

    if (signal(SIGPIPE, SIG_IGN) == SIG_ERR) {
        fprintf(stderr, "Error disabling SIGPIPE\n");
        exit(1);
    }

    if ((r = getaddrinfo(host, port, &hints, &ai)) < 0) {
        fprintf(stderr, "getaddrinfo error: %s\n", gai_strerror(r));
        exit(1);
    }

    if ((sockfd = socket(ai->ai_family, ai->ai_socktype, ai->ai_protocol)) == -1) {
        fprintf(stderr, "socket error: %s\n", strerror(errno));
        exit(1);
    }

    r = 1;
    if (setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR, &r, sizeof(int)) < 0) {
        fprintf(stderr, "setsockopt error: %s\n", strerror(errno));
        exit(1);
    }

    if (bind(sockfd, ai->ai_addr, ai->ai_addrlen) < 0) {
        fprintf(stderr, "bind error: %s\n", strerror(errno));
        exit(1);
    }

    if (listen(sockfd, 1) == -1) {
        fprintf(stderr, "listen error: %s\n", strerror(errno));
        exit(1);
    }

    if (fips_mode) {
#ifndef S2N_INTERN_LIBCRYPTO
    #if defined(OPENSSL_FIPS) || defined(OPENSSL_IS_AWSLC)
        if (FIPS_mode_set(1) == 0) {
            unsigned long fips_rc = ERR_get_error();
            char ssl_error_buf[256]; /* Openssl claims you need no more than 120 bytes for error strings */
            fprintf(stderr, "s2nd failed to enter FIPS mode with RC: %lu; String: %s\n", fips_rc, ERR_error_string(fips_rc, ssl_error_buf));
            exit(1);
        }
        printf("s2nd entered FIPS mode\n");
    #else
        fprintf(stderr, "Error entering FIPS mode. s2nd was not built against a FIPS-capable libcrypto.\n");
        exit(1);
    #endif
#endif
    }

    GUARD_EXIT(s2n_init(), "Error running s2n_init()");

    printf("Listening on %s:%s\n", host, port);

    struct s2n_config *config = s2n_config_new();
    if (!config) {
        print_s2n_error("Error getting new s2n config");
        exit(1);
    }

    if (num_user_certificates != num_user_private_keys) {
        fprintf(stderr, "Mismatched certificate(%d) and private key(%d) count!\n", num_user_certificates, num_user_private_keys);
        exit(1);
    }

    int num_certificates = 0;
    if (num_user_certificates == 0) {
        certificates[0] = default_certificate_chain;
        private_keys[0] = default_private_key;
        num_certificates = 1;
    } else {
        num_certificates = num_user_certificates;
    }

    for (int i = 0; i < num_certificates; i++) {
        struct s2n_cert_chain_and_key *chain_and_key = s2n_cert_chain_and_key_new();
        GUARD_EXIT(s2n_cert_chain_and_key_load_pem(chain_and_key, certificates[i], private_keys[i]), "Error getting certificate/key");

        if (ocsp_response_file_path) {
            int fd = open(ocsp_response_file_path, O_RDONLY);
            if (fd < 0) {
                fprintf(stderr, "Error opening OCSP response file: '%s'\n", strerror(errno));
                exit(1);
            }

            struct stat st = { 0 };
            if (fstat(fd, &st) < 0) {
                fprintf(stderr, "Error fstat-ing OCSP response file: '%s'\n", strerror(errno));
                exit(1);
            }

            uint8_t *ocsp_response = mmap(0, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
            if (s2n_cert_chain_and_key_set_ocsp_data(chain_and_key, ocsp_response, st.st_size) < 0) {
                fprintf(stderr, "Error adding ocsp response: '%s'\n", s2n_strerror(s2n_errno, "EN"));
                exit(1);
            }

            close(fd);
        }

        GUARD_EXIT(s2n_config_add_cert_chain_and_key_to_store(config, chain_and_key), "Error setting certificate/key");
    }

    s2n_set_common_server_config(max_early_data, config, conn_settings, cipher_prefs, session_ticket_key_file_path);

    if (parallelize) {
        struct sigaction sa;

        sa.sa_handler = SIG_IGN;
#if defined(SA_NOCLDWAIT)
        sa.sa_flags = SA_NOCLDWAIT;
#endif
        sigemptyset(&sa.sa_mask);
        sigaction(SIGCHLD, &sa, NULL);
    }

    if (alpn) {
        const char *protocols[] = { alpn };
        GUARD_EXIT(s2n_config_set_protocol_preferences(config, protocols, s2n_array_len(protocols)), "Failed to set alpn");
    }

    if (send_buffer_size != 0) {
        GUARD_EXIT(s2n_config_set_send_buffer_size(config, send_buffer_size), "Error setting send buffer size.");
    }

    if (npn) {
        GUARD_EXIT(s2n_config_set_npn(config, 1), "Error setting npn support");
    }

    FILE *key_log_file = NULL;

    if (key_log_path) {
        key_log_file = fopen(key_log_path, "a");
        GUARD_EXIT(key_log_file == NULL ? S2N_FAILURE : S2N_SUCCESS, "Failed to open key log file");
        GUARD_EXIT(
                s2n_config_set_key_log_cb(
                        config,
                        key_log_callback,
                        (void *) key_log_file),
                "Failed to set key log callback");
    }

    int fd;
    while ((fd = accept(sockfd, ai->ai_addr, &ai->ai_addrlen)) > 0) {
        if (non_blocking) {
            int flags = fcntl(sockfd, F_GETFL, 0);
            if (fcntl(fd, F_SETFL, flags | O_NONBLOCK) < 0) {
                fprintf(stderr, "fcntl error: %s\n", strerror(errno));
                exit(1);
            }
        }

        if (!parallelize) {
            int rc = handle_connection(fd, config, conn_settings);
            close(fd);
            if (rc < 0) {
                exit(rc);
            }

            /* If max_conns was set, then exit after it is reached. Otherwise
             * unlimited connections are allow, so ignore the variable. */
            if (conn_settings.max_conns > 0) {
                if (conn_settings.max_conns-- == 1) {
                    GUARD_EXIT(s2n_cleanup(), "Error running s2n_cleanup()");
                    exit(0);
                }
            }
        } else {
            /* Fork Process, one for the Acceptor (parent), and another for the Handler (child). */
            pid_t child_pid = fork();

            if (child_pid == 0) {
                /* This is the Child Handler Thread. We should handle the connection, then exit. */
                int rc = handle_connection(fd, config, conn_settings);
                close(fd);
                _exit(rc);
            } else if (child_pid == -1) {
                close(fd);
                print_s2n_error("Error calling fork(). Acceptor unable to start handler.");
                exit(1);
            } else {
                /* This is the parent Acceptor Thread, continue listening for new connections */
                close(fd);
                continue;
            }
        }
    }

    if (key_log_file) {
        fclose(key_log_file);
    }

    GUARD_EXIT(s2n_cleanup(), "Error running s2n_cleanup()");

    return 0;
}
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/000077500000000000000000000000001456575232400205175ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/000077500000000000000000000000001456575232400215145ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/.clippy.toml000066400000000000000000000000631456575232400237660ustar00rootroot00000000000000# This should match rust-toolchain
msrv = "1.63.0"
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/.gitignore000066400000000000000000000003171456575232400235050ustar00rootroot00000000000000target/
Cargo.lock

# these files are generated by the `generate` script
s2n-tls-sys/files.rs
s2n-tls-sys/lib
s2n-tls-sys/src/api.rs
s2n-tls-sys/src/tests.rs
s2n-tls-sys/src/features*
s2n-tls-sys/Cargo.toml
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/.rustfmt.toml000066400000000000000000000001531456575232400241720ustar00rootroot00000000000000edition = "2018"
format_macro_matchers = true
imports_granularity = "Crate"
use_field_init_shorthand = trueaws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/Cargo.toml000066400000000000000000000006711456575232400234500ustar00rootroot00000000000000[workspace]
members = [
    "integration",
    "s2n-tls",
    "s2n-tls-sys",
    "s2n-tls-tokio",
]
# generate can't be included in the workspace because of a bootstrapping problem
# s2n-tls-sys/Cargo.toml (part of the workspace) is generated by
# generate/main.rs
exclude = [
    "generate",
    "bench"
]

[profile.release]
lto = true
codegen-units = 1
incremental = false

[profile.bench]
lto = true
codegen-units = 1
incremental = false
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/Makefile000066400000000000000000000004451456575232400231570ustar00rootroot00000000000000SHELL := /bin/bash

all: clean s2n-tls-sys/src/api.rs target/release/deps/s2nc-%

target/release/deps/s2nc-%:
	cargo bench --no-run

s2n-tls-sys/src/api.rs:
	./generate.sh

.PHONY: clean
clean:
	@cargo clean
	@rm -f s2n-tls-sys/src/api.rs target/release/deps/s2nc-* target/release/deps/s2nd-*
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/README.md000066400000000000000000000015211456575232400227720ustar00rootroot00000000000000# s2n-tls rust bindings

**NOTICE: These bindings are currently subject to change and should not be used without the expectation
of future breakage.**

## Installation

In order to generate rust bindings for s2n-tls, you need to have the following installed:

* Rust - this can be easily installed with [rustup](https://rustup.rs/)
* libclang - this is usually installed through your system's package manager
* libssl-dev
* pkg-config

## Usage

Generating rust bindings can be accomplished by running the `generate.sh` script:

```
$ ./bindings/rust/generate.sh
```

## Minimum Supported Rust Version (MSRV)

`s2n-tls` will maintain a rolling MSRV (minimum supported rust version) policy of at least 6 months. The current s2n-quic version is not guaranteed to build on Rust versions earlier than the MSRV.

The current MSRV is [1.63.0][msrv-url].

aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/bench/000077500000000000000000000000001456575232400225735ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/bench/.gitignore000066400000000000000000000000511456575232400245570ustar00rootroot00000000000000*.pem
*.svg
!historical-perf-*.svg
*.zst
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/bench/Cargo.toml000066400000000000000000000027361456575232400245330ustar00rootroot00000000000000[package]
name = "bench"
version = "0.1.0"
edition = "2021"

[features]
default = ["rustls", "openssl"]
rustls = ["dep:rustls", "rustls-pemfile"]
openssl = ["dep:openssl"]
memory = ["plotters", "crabgrind", "structopt"]
historical-perf = ["plotters", "serde_json", "semver"]

[dependencies]
s2n-tls = { path = "../s2n-tls" }
errno = "0.3"
libc = "0.2"
strum = { version = "0.25", features = ["derive"] }
rustls = { version = "0.21", optional = true }
rustls-pemfile = { version = "1.0", optional = true }
openssl = { version = "0.10", features = ["vendored"], optional = true }
crabgrind = { version = "0.1", optional = true }
structopt = { version = "0.3", optional = true }
serde_json = { version = "1.0", optional = true }
semver = { version = "1.0", optional = true }

[dependencies.plotters]
version = "0.3"
optional = true
default-features = false
features = ["all_series", "all_elements", "full_palette", "svg_backend"]

[dev-dependencies]
criterion = "0.5"
pprof = { version = "0.12", features = ["criterion", "flamegraph"] }
# env_logger and log are used to enable logging for rustls, which can help with
# debugging interop failures
env_logger = "0.10"
log = "0.4"

[[bin]]
name = "memory"
required-features = ["memory"]

[[bin]]
name = "graph_memory"
required-features = ["memory"]

[[bin]]
name = "graph_perf"
required-features = ["historical-perf"]

[[bench]]
name = "handshake"
harness = false

[[bench]]
name = "throughput"
harness = false

[[bench]]
name = "resumption"
harness = false
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/bench/README.md000066400000000000000000000147301456575232400240570ustar00rootroot00000000000000# Benchmarking s2n-tls

We use to Criterion.rs to benchmark s2n-tls against two commonly used TLS libraries, Rustls and OpenSSL.

All benchmarks are run in an idealized environment, using only a single thread and with custom IO bypassing the networking stack. As such, performance numbers will be different from in practice, but relative performance between the libraries should still be accurate.

## Quickstart

```
# generate rust bindings
../generate.sh

# run all benchmarks
cargo bench
```

## Setup
The benchmarked TLS implementations will be
- s2n-tls using AWS-LC for a cryptographic backend.
- rustls using Ring for a cryptographic backend
- OpenSSL - libssl & libcrypto

All of the cryptographic backends, including AWS-LC and OpenSSL libcrypto are consumed as vendored builds from crates.io, and do not need to be installed on the benchmarking host. Note that the `aws-lc-sys` crate depends on CMake in its buildscript, so CMake must be installed on the benchmarking host.

Currently, this crate has only been tested on Ubuntu (both x86 and ARM), but we expect everything to work with other Unix environments.

### Features

Default features (`rustls` and `openssl`) can be disabled by running the benches with `--no-default-features`. The non-default `memory` and `historical-perf` features are used to enable dependencies specific to those types of benches, and are automatically used by the scripts that run those benches.

## Performance benchmarks

The handshake and throughput benchmarks can be run with the `cargo bench` command. Criterion will auto-generate an HTML report in `target/criterion/`.

Throughput benchmarks measure round-trip throughput with the client and server connections in the same thread for symmetry. In practice, a machine would either host only the client or only the server and use multiple threads, so throughput for a single connection could theoretically be up to ~4x higher than the values from the benchmarks (when run on the same machine).

To generate flamegraphs, run `cargo bench --bench handshake --bench throughput -- --profile-time 5`, which profiles each benchmark for 5 seconds and stores the resulting flamegraph in `target/criterion/[bench-name]/[lib-name]/profile/flamegraph.svg`.

## Memory benchmarks

To run all memory benchmarks, run `scripts/bench-memory.sh`. Graphs of memory usage will be generated in `images/`.

Memory benchmark data is generated using the `memory` binary. Command line arguments can be given to `cargo run` or to the built executable located at `target/release/memory`. The usage is as follows:

```
memory [(pair|client|server)] [(s2n-tls|rustls|openssl)] [--reuse-config (true|false)] [--shrink-buffers (true|false)]
```

- `(pair|client|server)`: target to memory bench, defaults to `server`
- `(s2n-tls|rustls|openssl)`: library to be benched, if unset benches all libraries
- `--reuse-config`: if `true` (default), reuse configs between connections
- `--shrink-buffers`: if `true` (default), shrink buffers owned by connections

To view a callgraph of memory usage, use [KCachegrind](https://github.com/KDE/kcachegrind) on `xtree.out` generated from memory benching:

```
kcachegrind target/memory////xtree.out
```

To view a flamegraph of memory usage, use [heaptrack](https://github.com/KDE/heaptrack) with `heaptrack_gui` also installed. Run heaptrack with the `memory` executable and target/library options:

```
heaptrack target/release/memory (pair|client|server) (s2n-tls|rustls|openssl)
```

## Historical benchmarks

To do historical benchmarks, run `scripts/bench-past.sh`. This will checkout old versions of s2n-tls back to v1.3.16 in `target/` and run benchmarks on those with the `historical-perf` feature, disabling Rustls and OpenSSL benches.

## PKI Structure
```
   ┌────root──────┐
   │              │
   │              │
   ▼              │
 branch           │
   │              │
   │              │
   │              │
   ▼              ▼
 leaf            client
```
`generate-certs.sh` will generate 4 certificates for each key type, with the signing relationships that are indicated in the diagram above. This cert chain length was chosen because it matches the cert chain length used by public AWS services.

### Caveats

The last version benched is v1.3.16, since before that, the s2n-tls Rust bindings have a different API and would thus require a different bench harness to test.

v1.3.30-1.3.37 are not benched because of dependency issues when generating the Rust bindings. However, versions before and after are benched, so the overall trend in performance can still be seen without the data from these versions.

### Sample output

Because these benches take a longer time to generate (>30 min), we include the results from historical benching (as of v1.3.47) here.

Notes:
- Two sets of parameters for the handshake couldn't be benched before 1.3.40, since security policies that negotiated those policies as their top choice did not exist before then.
- There is no data from 1.3.30 to 1.3.37 because those versions have a dependency issue that cause the Rust bindings not to build. However, there is data before and after that period, so the performance for those versions can be inferred via interpolation.
- The improvement in throughput in 1.3.28 was most likely caused by the addition of LTO to the default Rust bindings build.
- Since the benches are run over a long time, noise on the machine can cause variability, and background processes can cause spikes.
- The variability can be seen with throughput especially because it is calculated as the inverse of time taken.

![historical-perf-handshake](images/historical-perf-handshake.svg)

![historical-perf-throughput](images/historical-perf-throughput.svg)

## Implementation details

We use Rust bindings for s2n-tls and OpenSSL. All of our benchmarks are run in Rust on a single thread for consistency.

### IO

To remove external factors, we use custom IO with our benchmarks, bypassing the networking layer and having the client and server connections transfer data to each other via a local buffer.

### Certificate generation

There is one root cert that directly signs the server and client certs that are used in benchmarking. We currently bench RSA and ECDSA certs.

### Negotiation parameters

The cipher suites benchmarked are `TLS_AES_128_GCM_SHA256` and `TLS_AES_256_GCM_SHA384`, and the key exchange methods benchmarked are ECDHE with `secp256r1` and with `x25519`. We also test connections with and without client authentication (mTLS).
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/bench/benches/000077500000000000000000000000001456575232400242025ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/bench/benches/handshake.rs000066400000000000000000000106451456575232400265040ustar00rootroot00000000000000// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

#[cfg(feature = "openssl")]
use bench::OpenSslConnection;
#[cfg(feature = "rustls")]
use bench::RustlsConnection;
use bench::{
    harness::TlsBenchConfig, CipherSuite, ConnectedBuffer, CryptoConfig, HandshakeType, KXGroup,
    Mode, S2NConnection, SigType, TlsConnPair, TlsConnection, PROFILER_FREQUENCY,
};
use criterion::{
    criterion_group, criterion_main, measurement::WallTime, BatchSize, BenchmarkGroup, Criterion,
};
use pprof::criterion::{Output, PProfProfiler};
use std::error::Error;
use strum::IntoEnumIterator;

fn bench_handshake_for_library(
    bench_group: &mut BenchmarkGroup,
    handshake_type: HandshakeType,
    kx_group: KXGroup,
    sig_type: SigType,
) where
    T: TlsConnection,
    T::Config: TlsBenchConfig,
{
    // make configs before benching to reuse
    let crypto_config = CryptoConfig::new(CipherSuite::default(), kx_group, sig_type);
    let client_config = T::Config::make_config(Mode::Client, crypto_config, handshake_type);
    let server_config = T::Config::make_config(Mode::Server, crypto_config, handshake_type);

    // generate all harnesses (TlsConnPair structs) beforehand so that benchmarks
    // only include negotiation and not config/connection initialization
    bench_group.bench_function(T::name(), |b| {
        b.iter_batched_ref(
            || -> Result, Box> {
                if let (Ok(client_config), Ok(server_config)) =
                    (client_config.as_ref(), server_config.as_ref())
                {
                    let connected_buffer = ConnectedBuffer::default();
                    let client =
                        T::new_from_config(client_config, connected_buffer.clone_inverse())?;
                    let server = T::new_from_config(server_config, connected_buffer)?;
                    Ok(TlsConnPair::wrap(client, server))
                } else {
                    Err("invalid configs".into())
                }
            },
            |conn_pair| {
                // harnesses with certain parameters fail to initialize for
                // some past versions of s2n-tls, but missing data can be
                // visually interpolated in the historical performance graph
                if let Ok(conn_pair) = conn_pair {
                    let _ = conn_pair.handshake();
                }
            },
            BatchSize::SmallInput,
        )
    });
}

fn bench_handshake_with_params(
    bench_group: &mut BenchmarkGroup,
    handshake_type: HandshakeType,
    kx_group: KXGroup,
    sig_type: SigType,
) {
    bench_handshake_for_library::(bench_group, handshake_type, kx_group, sig_type);
    #[cfg(feature = "rustls")]
    bench_handshake_for_library::(
        bench_group,
        handshake_type,
        kx_group,
        sig_type,
    );
    #[cfg(feature = "openssl")]
    bench_handshake_for_library::(
        bench_group,
        handshake_type,
        kx_group,
        sig_type,
    );
}

pub fn bench_handshake_types(c: &mut Criterion) {
    for handshake_type in HandshakeType::iter() {
        let mut bench_group = c.benchmark_group(format!("handshake-{handshake_type:?}"));
        bench_handshake_with_params(
            &mut bench_group,
            handshake_type,
            KXGroup::default(),
            SigType::default(),
        );
    }
}

pub fn bench_handshake_kx_groups(c: &mut Criterion) {
    for kx_group in KXGroup::iter() {
        let mut bench_group = c.benchmark_group(format!("handshake-{kx_group:?}"));
        bench_handshake_with_params(
            &mut bench_group,
            HandshakeType::default(),
            kx_group,
            SigType::default(),
        );
    }
}

pub fn bench_handshake_sig_types(c: &mut Criterion) {
    for sig_type in SigType::iter() {
        let mut bench_group = c.benchmark_group(format!("handshake-{sig_type:?}"));
        bench_handshake_with_params(
            &mut bench_group,
            HandshakeType::default(),
            KXGroup::default(),
            sig_type,
        );
    }
}

criterion_group! {
    name = benches;
    // profile 100 samples/sec
    config = Criterion::default().with_profiler(PProfProfiler::new(PROFILER_FREQUENCY, Output::Flamegraph(None)));
    targets = bench_handshake_types, bench_handshake_kx_groups, bench_handshake_sig_types
}
criterion_main!(benches);
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/bench/benches/resumption.rs000066400000000000000000000063011456575232400267550ustar00rootroot00000000000000use bench::{
    harness::TlsBenchConfig, CipherSuite, CryptoConfig, HandshakeType, KXGroup, S2NConnection,
    SigType, TlsConnPair, TlsConnection,
};
use criterion::{
    criterion_group, criterion_main, measurement::WallTime, BatchSize, BenchmarkGroup, Criterion,
};

fn bench_handshake_pair(bench_group: &mut BenchmarkGroup, sig_type: SigType)
where
    T: TlsConnection,
    T::Config: TlsBenchConfig,
{
    // generate all harnesses (TlsConnPair structs) beforehand so that benchmarks
    // only include negotiation and not config/connection initialization
    for handshake in [HandshakeType::Resumption, HandshakeType::ServerAuth] {
        bench_group.bench_function(format!("{:?}-{}", handshake, T::name()), |b| {
            b.iter_batched_ref(
                || {
                    TlsConnPair::::new_bench_pair(
                        CryptoConfig::new(CipherSuite::default(), KXGroup::default(), sig_type),
                        handshake,
                    )
                },
                |conn_pair_res| {
                    if let Ok(conn_pair) = conn_pair_res {
                        let _ = conn_pair.handshake();
                    }
                },
                BatchSize::SmallInput,
            )
        });
    }
}

fn bench_handshake_server_1rtt(bench_group: &mut BenchmarkGroup, sig_type: SigType)
where
    T: TlsConnection,
    T::Config: TlsBenchConfig,
{
    for handshake in [HandshakeType::Resumption, HandshakeType::ServerAuth] {
        bench_group.bench_function(format!("{:?}-{}", handshake, T::name()), |b| {
            b.iter_batched_ref(
                || {
                    let pair = TlsConnPair::::new_bench_pair(
                        CryptoConfig::new(CipherSuite::default(), KXGroup::default(), sig_type),
                        handshake,
                    )
                    .unwrap();
                    let (mut c, s) = pair.split();
                    c.handshake().unwrap();
                    s
                },
                |server| {
                    // this represents the work that the server does during the
                    // first RTT
                    server.handshake().unwrap()
                },
                BatchSize::SmallInput,
            )
        });
    }
}

/// This benchmark compares resumption savings across a single implementation.
/// E.g. "how much faster is session resumption than a full handshake for
/// s2n-tls?".
pub fn bench_resumption(c: &mut Criterion) {
    // compare resumption savings across both client and server
    for sig_type in [SigType::Rsa2048, SigType::Ecdsa256] {
        let mut bench_group = c.benchmark_group(format!("resumption-pair-{:?}", sig_type));
        bench_handshake_pair::(&mut bench_group, sig_type);
    }

    // only look at resumption savings for the server, specifically the work
    // done in the first rtt.
    for sig_type in [SigType::Rsa2048, SigType::Ecdsa384] {
        let mut bench_group = c.benchmark_group(format!("resumption-server-1rtt-{:?}", sig_type));
        bench_handshake_server_1rtt::(&mut bench_group, sig_type);
    }
}

criterion_group!(benches, bench_resumption);
criterion_main!(benches);
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/bench/benches/throughput.rs000066400000000000000000000065161456575232400267710ustar00rootroot00000000000000// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

#[cfg(feature = "openssl")]
use bench::OpenSslConnection;
#[cfg(feature = "rustls")]
use bench::RustlsConnection;
use bench::{
    harness::TlsBenchConfig, CipherSuite, ConnectedBuffer, CryptoConfig, HandshakeType, KXGroup,
    Mode, S2NConnection, SigType, TlsConnPair, TlsConnection, PROFILER_FREQUENCY,
};
use criterion::{
    criterion_group, criterion_main, measurement::WallTime, BatchSize, BenchmarkGroup, Criterion,
    Throughput,
};
use pprof::criterion::{Output, PProfProfiler};
use std::error::Error;
use strum::IntoEnumIterator;

fn bench_throughput_for_library(
    bench_group: &mut BenchmarkGroup,
    shared_buf: &mut [u8],
    cipher_suite: CipherSuite,
) where
    T: TlsConnection,
    T::Config: TlsBenchConfig,
{
    let crypto_config = CryptoConfig::new(cipher_suite, KXGroup::default(), SigType::default());
    let client_config = T::Config::make_config(Mode::Client, crypto_config, HandshakeType::default());
    let server_config = T::Config::make_config(Mode::Server, crypto_config, HandshakeType::default());

    bench_group.bench_function(T::name(), |b| {
        b.iter_batched_ref(
            || -> Result, Box> {
                if let (Ok(client_config), Ok(server_config)) =
                    (client_config.as_ref(), server_config.as_ref())
                {
                    let connected_buffer = ConnectedBuffer::default();
                    let client =
                        T::new_from_config(client_config, connected_buffer.clone_inverse())?;
                    let server = T::new_from_config(server_config, connected_buffer)?;
                    let mut conn_pair = TlsConnPair::wrap(client, server);
                    conn_pair.handshake()?;
                    Ok(conn_pair)
                } else {
                    Err("invalid configs".into())
                }
            },
            |conn_pair| {
                if let Ok(conn_pair) = conn_pair {
                    let _ = conn_pair.round_trip_transfer(shared_buf);
                }
            },
            BatchSize::SmallInput,
        )
    });
}

pub fn bench_throughput_cipher_suites(c: &mut Criterion) {
    // arbitrarily large to cut across TLS record boundaries
    let mut shared_buf = [0u8; 100000];

    for cipher_suite in CipherSuite::iter() {
        let mut bench_group = c.benchmark_group(format!("throughput-{:?}", cipher_suite));
        bench_group.throughput(Throughput::Bytes(shared_buf.len() as u64));
        bench_throughput_for_library::(
            &mut bench_group,
            &mut shared_buf,
            cipher_suite,
        );
        #[cfg(feature = "rustls")]
        bench_throughput_for_library::(
            &mut bench_group,
            &mut shared_buf,
            cipher_suite,
        );
        #[cfg(feature = "openssl")]
        bench_throughput_for_library::(
            &mut bench_group,
            &mut shared_buf,
            cipher_suite,
        );
    }
}

criterion_group! {
    name = benches;
    // profile 100 samples/sec
    config = Criterion::default().with_profiler(PProfProfiler::new(PROFILER_FREQUENCY, Output::Flamegraph(None)));
    targets = bench_throughput_cipher_suites
}
criterion_main!(benches);
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/bench/certs/000077500000000000000000000000001456575232400237135ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/bench/certs/ecdsa256000077700000000000000000000000001456575232400363762../../../../tests/pems/permutations/ec_ecdsa_p256_sha256ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/bench/certs/ecdsa384000077700000000000000000000000001456575232400364042../../../../tests/pems/permutations/ec_ecdsa_p384_sha384ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/bench/certs/readme.md000066400000000000000000000007141456575232400254740ustar00rootroot00000000000000This folder actually just contains symlinks to the files in s2n-tls/test/pems/permutations

```
ln -s ../../../../tests/pems/permutations/ec_ecdsa_p256_sha256 ecdsa256
ln -s ../../../../tests/pems/permutations/ec_ecdsa_p384_sha384 ecdsa384
ln -s ../../../../tests/pems/permutations/rsae_pkcs_2048_sha256 rsa2048
ln -s ../../../../tests/pems/permutations/rsae_pkcs_3072_sha384 rsa3072
ln -s ../../../../tests/pems/permutations/rsae_pkcs_4096_sha384 rsa4096
```
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/bench/certs/rsa2048000077700000000000000000000000001456575232400363122../../../../tests/pems/permutations/rsae_pkcs_2048_sha256ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/bench/certs/rsa3072000077700000000000000000000000001456575232400363102../../../../tests/pems/permutations/rsae_pkcs_3072_sha384ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/bench/certs/rsa4096000077700000000000000000000000001456575232400363262../../../../tests/pems/permutations/rsae_pkcs_4096_sha384ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/bench/images/000077500000000000000000000000001456575232400240405ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/bench/images/historical-perf-handshake.svg000066400000000000000000002424001456575232400316020ustar00rootroot00000000000000


Performance of handshake by version since Jun 2022















































Time


Version

























0 ms



2 ms



4 ms



6 ms



8 ms




1.3.16



1.3.18



1.3.20



1.3.22



1.3.24



1.3.26



1.3.28



1.3.30



1.3.32



1.3.34



1.3.36



1.3.38



1.3.40



1.3.42



1.3.44



1.3.46



1.3.48













































































































































































































































































































































































































































































































































































































































































































































































handshake-x25519


handshake-no-mTLS


handshake-rsa2048


handshake-rsa4096


handshake-rsa3072


handshake-secp256r1


handshake-mTLS


handshake-ecdsa384










aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/bench/images/historical-perf-throughput.svg000066400000000000000000000762051456575232400320750ustar00rootroot00000000000000


Performance of round trip throughput by version since Jun 2022

















































Throughput


Version

























0 GB/s



0.2 GB/s



0.4 GB/s



0.6 GB/s



0.8 GB/s




1.3.16



1.3.18



1.3.20



1.3.22



1.3.24



1.3.26



1.3.28



1.3.30



1.3.32



1.3.34



1.3.36



1.3.38



1.3.40



1.3.42



1.3.44



1.3.46



1.3.48















































































































































































































throughput-AES_128_GCM_SHA256


throughput-AES_256_GCM_SHA384




aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/bench/rust-toolchain000066400000000000000000000000071456575232400254660ustar00rootroot00000000000000stable
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/bench/scripts/000077500000000000000000000000001456575232400242625ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/bench/scripts/bench-memory.sh000077500000000000000000000017031456575232400272070ustar00rootroot00000000000000#!/usr/bin/env bash

# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0

# Benches memory usage for all possible configurations and generate plots in images/
# All given arguments (ex. `--config aws-lc-config/s2n.toml` to use AWS-LC) are passed to Cargo

set -e

pushd "$(dirname "$0")"/.. > /dev/null

cargo build --release --features memory --bin memory --bin graph_memory "$@"

# iterate through all possible options
for reuse_config in false true
do
    for shrink_buffers in false true
    do
        for bench_target in client server pair
        do
            valgrind --tool=massif --depth=1 --massif-out-file="target/memory/massif.out" --time-unit=ms target/release/memory $bench_target --reuse-config $reuse_config --shrink-buffers $shrink_buffers
            rm target/memory/massif.out
        done
    done
done

cargo run --release --features memory --bin graph_memory "$@"

popd > /dev/null
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/bench/scripts/bench-past.sh000077500000000000000000000052141456575232400266470ustar00rootroot00000000000000#!/usr/bin/env bash

# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0

# Run historical benchmarking by checking out old version of s2n-tls into target/
# Criterion JSON results get cached to target/historical-perf/[bench-group-name]/[version].json
# Results are then plotted, saved to images/historical-perf-[bench-name].svg
# All given arguments (ex. `--config aws-lc-config/s2n.toml` to use AWS-LC) are passed to Cargo

# immediately bail if any command fails
set -e

# suppress stdout and most cargo warnings
exec >/dev/null
export CARGO_TERM_QUIET=true
export RUSTFLAGS=-Awarnings

# go to bench directory
pushd "$(dirname "$0")"/../
bench_path="$(pwd)"

# delete past runs
rm -rf target/historical-perf

# make Cargo.toml point s2n-tls to the cloned old version
sed -i "s|s2n-tls = .*|s2n-tls = { path = \"target/s2n-tls/bindings/rust/s2n-tls\" }|" Cargo.toml 

# ensure Cargo.toml gets changed back on exit; retains original exit status
trap "{ status=$?; sed -i 's|s2n-tls = .*|s2n-tls = { path = \"../s2n-tls\" }|' $bench_path/Cargo.toml; exit $status; }" EXIT

# clone copy of repo to target/s2n-tls
echo "cloning repo" >&2
mkdir -p target
cd target
rm -rf s2n-tls
git clone --quiet https://github.com/aws/s2n-tls
cd s2n-tls/bindings/rust/
copied_bindings_path="$(pwd)"

# get list of tags sorted newest to oldest
sorted_tags="$(git tag -l | sort -rV)"

# last tag we want is v1.3.16, get line number of v1.3.16 in sorted_tags
line_num_last_tag=$(echo "$sorted_tags" | grep "v1.3.16" --line-number | head -n 1 | cut -d":" -f1)

# loop through all tags in order up to v1.3.16
for tag in $(echo "$sorted_tags" | head -$line_num_last_tag)
do
    (
        # go to s2n-tls/bindings/rust/ inside copied repo
        cd $copied_bindings_path

        echo "checkout tag $tag" >&2
        git checkout $tag --quiet

        echo "generating rust bindings" >&2
        # if generate.sh fails, exit out of block
        ./generate.sh || exit 1

        echo "running cargo bench and saving results" >&2
        cd $bench_path
        rm -rf target/criterion
        cargo bench --no-default-features --no-fail-fast

        # cache criterion outputs from this bench into target/historical-perf
        for bench_group in $(ls target/criterion | grep -v "report")
        do
            mkdir -p target/historical-perf/$bench_group/
            cp target/criterion/$bench_group/s2n-tls/new/estimates.json target/historical-perf/$bench_group/$tag.json
        done
    ) || echo "failed, trying next tag"
    echo
done

# graph results
cd $bench_path
cargo run --release --no-default-features --features historical-perf --bin graph_perf

popd
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/bench/scripts/install-aws-lc.sh000077500000000000000000000056721456575232400274650ustar00rootroot00000000000000#!/usr/bin/env bash

# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0

# Usage: ./install-aws-lc.sh
# Sets up bench crate to use aws-lc for either s2n-tls or rustls if desired

# To run benches with aws-lc, use any cargo command with:
# --config aws-lc-config/s2n.toml
# --config aws-lc-config/rustls.toml
# or both

# How Rustls with aws-lc-rs works:
# Clones aws-lc-rs, changes its name to ring with a compatible version number, and
# patches it into Rustls

# How s2n-tls with aws-lc works:
# Builds s2n-tls static lib with AWS-LC interned, required to avoid symbol collisions with OpenSSL
# Checks for libs2n.a at target/s2n-tls-build/lib/libs2n.a
# Checks for libcrypto.a at target/aws-lc/install/lib/libcrypto.a

set -e 

# go to bench directory
pushd "$(dirname "$0")"/.. > /dev/null
bench_dir="$(pwd)"



# ----- rustls -----

# clone aws-lc-rs to target
rm -rf target/aws-lc-rs
git clone https://github.com/aws/aws-lc-rs target/aws-lc-rs
cd target/aws-lc-rs/aws-lc-rs
git submodule init
git submodule update

# change aws-lc-rs to look like API compatible ring (name and version)
# first get the version of ring that Cargo expects with `cargo tree`
pushd "$bench_dir" > /dev/null
version="$(cargo tree -p ring | head -n 1 | sed 's|ring v||')"
popd > /dev/null
# next change first occurrence of 'name = .*' and 'version = .*' in Cargo.toml
# to be 'name = "ring"' and 'version = "[curr_version]"'
sed -i "1,/name = .*/{s|name = .*|name = \"ring\"|} ; 1,/version = .*/{s|version = .*|version = \"$version\"|}" Cargo.toml



# ----- s2n-tls -----

# put all build artifacts in target
s2n_tls_build_dir="$bench_dir"/target/s2n-tls-build
aws_lc_dir="$bench_dir"/target/aws-lc

# go to repo directory
cd "$bench_dir"/../../../
repo_dir="$(pwd)"

# if libs2n not found, build it
if [ ! -e "$s2n_tls_build_dir"/lib/libs2n.a ]
then
    # if aws-lc not found, build it
    if [ ! -e "$aws_lc_dir"/install/lib/libcrypto.a ]
    then
        # clone fresh aws-lc
        cd "$bench_dir"
        rm -rf target/aws-lc
        git clone --depth=1 https://github.com/aws/aws-lc target/aws-lc
        cd target/aws-lc

        # build and install aws-lc
        cmake -B build -DCMAKE_INSTALL_PREFIX="$aws_lc_dir"/install -DBUILD_TESTING=OFF -DBUILD_LIBSSL=OFF -DCMAKE_BUILD_TYPE=Release
        cmake --build ./build -j $(nproc)
        make -C build install
    else
        echo "using libcrypto.a at target/aws-lc/install/lib"
    fi

    # clean up directories
    rm -rf "$s2n_tls_build_dir"
    mkdir -p "$s2n_tls_build_dir"

    # build and install s2n-tls
    cd "$repo_dir"
    cmake . -B "$s2n_tls_build_dir" -DCMAKE_PREFIX_PATH="$aws_lc_dir"/install -DS2N_INTERN_LIBCRYPTO=ON -DBUILD_TESTING=OFF -DCMAKE_BUILD_TYPE=Release
    cmake --build "$s2n_tls_build_dir" -j $(nproc)
else
    echo "using libs2n.a at target/s2n-tls-build/lib"
fi

# force rebuild of s2n-tls-sys and benches
rm -rf ../target/release target/release



popd > /dev/null
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/bench/src/000077500000000000000000000000001456575232400233625ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/bench/src/bin/000077500000000000000000000000001456575232400241325ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/bench/src/bin/graph_memory.rs000066400000000000000000000114031456575232400271700ustar00rootroot00000000000000use plotters::{
    prelude::{
        ChartBuilder, IntoDrawingArea, IntoSegmentedCoord, LabelAreaPosition, Rectangle,
        SVGBackend, SegmentValue,
    },
    style::{AsRelative, Color, IntoFont, Palette, Palette99, RGBAColor, WHITE},
};
use std::{
    collections::BTreeMap,
    error::Error,
    fs::{read_dir, read_to_string},
    path::Path,
};

struct Stats {
    mean: f64,
    stderr: f64,
}

fn get_bytes_from_snapshot(path: &Path, i: i32) -> i32 {
    // number of bytes in snapshot starts on 8th line, 12th character
    read_to_string(format!("{}/{i}.snapshot", path.display()))
        .unwrap()
        .lines()
        .nth(7)
        .unwrap()[11..]
        .parse()
        .unwrap()
}

/// Get the difference in bytes between two snapshots, which is memory of the
/// `i`th TlsConnPair (client and server)
fn get_bytes_diff(path: &Path, i: i32) -> i32 {
    get_bytes_from_snapshot(path, i + 1) - get_bytes_from_snapshot(path, i)
}

fn get_memory_data(path: &Path) -> Stats {
    let data: Vec = (0..100).map(|i| get_bytes_diff(path, i) as f64).collect();
    let mean = data.iter().sum::() / (data.len() as f64);
    let variance: f64 =
        data.iter().map(|x| (x - mean) * (x - mean)).sum::() / ((data.len() - 1) as f64);
    let stdev = variance.sqrt();
    let stderr = stdev / (data.len() as f64).sqrt();

    Stats { mean, stderr }
}

/// Gets data from memory benching and plots it
fn plot_memory_data(param_name: &str, target_name: &str) -> Result<(), Box> {
    // go through each library name directory (ex. "s2n-tls") and calculate stats
    let mut stats: BTreeMap = Default::default(); // btree to sort by name
    for dir_entry in read_dir(format!("target/memory/{param_name}/{target_name}"))? {
        let dir_path = dir_entry?.path();
        let dir_name = dir_path.file_name().unwrap().to_str().unwrap().to_string();
        stats.insert(dir_name.clone(), get_memory_data(&dir_path));
    }

    // calculate things for plotting
    let num_bars = stats.len();
    let x_labels: Vec = stats.iter().map(|kv| kv.0.clone()).collect();
    let max_mem = 120_000.0; // constant to keep scale same for all graphs

    // setup plotting
    let chart_path = format!("images/memory-{target_name}-{param_name}.svg");
    let drawing_area = SVGBackend::new(&chart_path, (600, 500)).into_drawing_area();
    drawing_area.fill(&WHITE)?;

    let mut ctx = ChartBuilder::on(&drawing_area)
        .caption(
            format!("Memory of {target_name} with {param_name}"),
            ("sans-serif", 30).into_font(),
        )
        .set_label_area_size(LabelAreaPosition::Left, (15).percent()) // axes padding
        .set_label_area_size(LabelAreaPosition::Bottom, (6).percent())
        .build_cartesian_2d(
            (0..num_bars - 1).into_segmented(),
            0.0..(1.1 * max_mem), // upper y bound on plot is 1.1 * y_max
        )?;

    let axis_label_style = ("sans-serif", 18).into_font();

    ctx.configure_mesh()
        .light_line_style(RGBAColor(235, 235, 235, 1.0)) // change gridline color
        .bold_line_style(RGBAColor(225, 225, 225, 1.0))
        .x_labels(num_bars)
        .x_label_formatter(&|x| {
            // change axis labels to name of bar
            let x = match *x {
                SegmentValue::CenterOf(x) => x,
                _ => 0,
            };
            x_labels.get(x).unwrap().to_string()
        })
        .x_label_style(axis_label_style.clone())
        .y_desc("Memory (kB)")
        .y_labels(10) // max number of labels on y axis
        .y_label_formatter(&|y| format!("{} kB", y / 1000.0))
        .y_label_style(axis_label_style)
        .draw()?;

    // draw bars
    // x coord is index of bench name in x_labels
    ctx.draw_series(stats.iter().enumerate().map(|(i, (_name, stats))| {
        // define each bar as a Rectangle
        let x0 = SegmentValue::Exact(i);
        let x1 = SegmentValue::Exact(i + 1);
        let color = Palette99::pick(i).filled();
        let mut bar = Rectangle::new([(x0, 0.0), (x1, stats.mean)], color);
        bar.set_margin(0, 0, 30, 30); // spacing between bars
        bar
    }))?;

    Ok(())
}

/// Plots all available data in target/memory and stores graphs in images
fn main() -> Result<(), Box> {
    // iterate through param options ex. shrink-buffers or reuse-config
    for param_dir_entry in read_dir("target/memory")? {
        let param_dir_path = param_dir_entry?.path();
        let param_name = param_dir_path.file_name().unwrap().to_str().unwrap();

        // iterate through targets, ex. client or server
        for target_dir_entry in read_dir(¶m_dir_path)? {
            let target_name = target_dir_entry?.file_name().to_string_lossy().to_string();
            plot_memory_data(param_name, &target_name)?;
        }
    }

    Ok(())
}
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/bench/src/bin/graph_perf.rs000066400000000000000000000236301456575232400266210ustar00rootroot00000000000000// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

use plotters::{
    prelude::{
        BindKeyPoints, ChartBuilder, ErrorBar, IntoDrawingArea, LabelAreaPosition, Rectangle,
        SVGBackend, SeriesLabelPosition,
    },
    series::LineSeries,
    style::{AsRelative, Color, IntoFont, Palette, Palette99, RGBAColor, BLACK, WHITE},
};
use semver::Version;
use serde_json::Value;
use std::{
    collections::{BTreeSet, HashMap},
    fs::{read_dir, read_to_string},
    path::Path,
};

struct Stats {
    mean: f64,
    stderr: f64,
}

struct VersionDataPoint {
    version: Version, // x coordinate
    mean: f64,        // y coordinate
    stderr: f64,      // y error bar
}

struct VersionDataSeries {
    name: String, // ex. throughput-AES_128_GCM_SHA256
    data: Vec,
}

struct DataPoint {
    x: i32,
    y: f64,
    y_bar: f64,
}

struct DataSeries {
    name: String,
    data: Vec,
}

/// Get the relevant stats in a given JSON bench output
fn process_single_json(path: &Path) -> Stats {
    let json_str = read_to_string(path).unwrap();
    let json_value: Value = serde_json::from_str(json_str.as_str()).unwrap();
    let stats = json_value.get("mean").unwrap();
    Stats {
        mean: stats.get("point_estimate").unwrap().as_f64().unwrap(),
        stderr: stats.get("standard_error").unwrap().as_f64().unwrap(),
    }
}

/// Get data from directory of Criterion json outputs, given directory path
/// Outputs a Vec of (version, mean, stderr) sorted by version
fn parse_bench_group_data(path: &Path) -> Vec {
    let mut data: Vec = read_dir(path)
        .unwrap()
        .map(|dir_entry| {
            let path = dir_entry.unwrap().path();
            let stats = process_single_json(&path);
            let tag = path.file_stem().unwrap().to_str().unwrap();
            let version = Version::parse(&tag[1..]).unwrap();
            VersionDataPoint {
                version,
                mean: stats.mean,
                stderr: stats.stderr,
            }
        })
        .collect();
    data.sort_by(|data_point_1, data_point_2| data_point_1.version.cmp(&data_point_2.version));
    data
}

/// Gets data from all bench groups given a prefix (ex. "handshake") for the bench group names
fn get_all_data(prefix: &str) -> Vec {
    read_dir("target/historical-perf")
        .unwrap()
        .map(|dir_entry| dir_entry.unwrap().path())
        .filter(|path| {
            // get all paths starting with prefix
            path.file_name()
                .unwrap()
                .to_str()
                .unwrap()
                .starts_with(prefix)
        })
        .map(|path| {
            // get data in each directory
            VersionDataSeries {
                name: path.file_name().unwrap().to_string_lossy().into_owned(),
                data: parse_bench_group_data(&path),
            }
        })
        .collect()
}

fn get_unique_versions(data: &[VersionDataSeries]) -> BTreeSet {
    data.iter()
        .flat_map(|data_series| {
            data_series
                .data
                .iter()
                .map(|version_data_point| version_data_point.version.clone())
        })
        .collect()
}

/// Converts all VersionDataSeries in version_data to DataSeries
fn convert_to_data_series(
    version_data: Vec,
    version_to_x: &HashMap<&Version, i32>,
) -> Vec {
    version_data
        .into_iter()
        .map(|version_data_series| DataSeries {
            name: version_data_series.name,
            data: version_data_series
                .data
                .into_iter()
                .map(|version_data_point| DataPoint {
                    // map VersionDataPoints to DataPoints
                    x: version_to_x[&&version_data_point.version],
                    y: version_data_point.mean,
                    y_bar: version_data_point.stderr * 1.96, // 95% confidence interval
                })
                .collect(),
        })
        .collect()
}

/// Plots given DataSeries with given chart parameters
fn plot_data String, G: Fn(&f64) -> String>(
    data: &[DataSeries],
    image_name: &str,
    bench_name: &str,
    x_label_formatter: &F,
    y_label: &str,
    y_label_formatter: &G,
) {
    // get x_max and y_max for plotting range
    let x_max = data
        .iter()
        .flat_map(|data_series| data_series.data.iter().map(|data_point| data_point.x))
        .max_by(|a, b| a.partial_cmp(b).unwrap())
        .unwrap();
    let y_max = data
        .iter()
        .flat_map(|data_series| data_series.data.iter().map(|data_point| data_point.y))
        .max_by(|a, b| a.partial_cmp(b).unwrap())
        .unwrap();

    // setup plotting
    let path = format!("images/historical-perf-{image_name}.svg");
    let drawing_area = SVGBackend::new(&path, (1000, 500)).into_drawing_area();
    drawing_area.fill(&WHITE).unwrap();

    let mut ctx = ChartBuilder::on(&drawing_area)
        .caption(
            format!("Performance of {bench_name} by version since Jun 2022"),
            ("sans-serif", 30).into_font(),
        )
        .set_label_area_size(LabelAreaPosition::Left, (17).percent()) // axes padding
        .set_label_area_size(LabelAreaPosition::Bottom, (11).percent())
        .build_cartesian_2d(
            // bounds for plot
            // plot every other x coord starting from 1 (not 0 which is default)
            (0..(x_max + 1)).with_key_points((1..(x_max + 1)).step_by(2).collect()),
            0.0..(1.2 * y_max),
        )
        .unwrap();

    let axis_label_style = ("sans-serif", 18).into_font();

    ctx.configure_mesh()
        .light_line_style(RGBAColor(235, 235, 235, 1.0)) // gridline color
        .bold_line_style(RGBAColor(225, 225, 225, 1.0))
        .x_desc("Version") // axis labels
        .x_labels(20) // max number of labels
        .x_label_style(axis_label_style.clone())
        .x_label_formatter(x_label_formatter)
        .y_desc(y_label)
        .y_labels(5)
        .y_label_formatter(y_label_formatter)
        .y_label_style(axis_label_style)
        .draw()
        .unwrap();

    // go through each DataSeries and plot them
    for (i, data_series) in data.iter().enumerate() {
        // remove data that returned error while benching
        // heuristic: times < 1% of y_max are invalid/had error
        let filtered_data = data_series
            .data
            .iter()
            .filter(|data_point| data_point.y > 0.01 * y_max)
            .collect::>();

        let color = Palette99::pick(i);

        // draw error bars
        ctx.draw_series(filtered_data.iter().map(|data_point| {
            ErrorBar::new_vertical(
                data_point.x,
                data_point.y - data_point.y_bar,
                data_point.y,
                data_point.y + data_point.y_bar,
                &color,
                3,
            )
        }))
        .unwrap();

        // draw lines with legend entry
        ctx.draw_series(LineSeries::new(
            filtered_data
                .iter()
                .map(|data_point| (data_point.x, data_point.y)),
            color.stroke_width(2),
        ))
        .unwrap()
        .label(&data_series.name)
        .legend(move |(x, y)| Rectangle::new([(x, y - 5), (x + 10, y + 5)], color.filled()));
    }

    // enable legend
    ctx.configure_series_labels()
        .position(SeriesLabelPosition::LowerRight)
        .margin(10)
        .border_style(BLACK)
        .background_style(WHITE)
        .draw()
        .unwrap();
}

fn main() {
    let handshake_data = get_all_data("handshake");
    let throughput_data = get_all_data("throughput");

    // combine all versions present in handshake and throughput data
    // also fill in missing version v1.3.15 and v1.3.30-v1.3.37
    let mut versions = get_unique_versions(&handshake_data);
    versions.extend(get_unique_versions(&throughput_data).into_iter());
    versions.extend((15..16).chain(30..38).map(|p| Version::new(1, 3, p)));
    let versions = versions.into_iter().collect::>();

    // map versions to x coordinates
    let version_to_x = versions
        .iter()
        .enumerate()
        .map(|(i, version)| (version, i as i32))
        .collect::>();

    // convert from Vec to Vec for plotting
    let handshake_data: Vec = convert_to_data_series(handshake_data, &version_to_x);
    let mut throughput_data = convert_to_data_series(throughput_data, &version_to_x);

    // convert data from ns to transfer of 100KB of data -> bytes/s throughput
    throughput_data = throughput_data
        .into_iter()
        .map(|data_series| {
            const TRANSFER_SIZE: f64 = 1e5;
            const NANO_SIZE: f64 = 1e-9;
            DataSeries {
                name: data_series.name,
                data: data_series
                    .data
                    .into_iter()
                    .map(|data_point| {
                        let mean_throughput = TRANSFER_SIZE / (data_point.y * NANO_SIZE);
                        let stderr_throughput = mean_throughput
                            - TRANSFER_SIZE / ((data_point.y + data_point.y_bar) * NANO_SIZE);
                        DataPoint {
                            x: data_point.x,
                            y: mean_throughput,
                            y_bar: stderr_throughput,
                        }
                    })
                    .collect(),
            }
        })
        .collect();

    let x_label_formatter = |x: &i32| format!("{}", versions[*x as usize]);

    plot_data(
        &handshake_data,
        "handshake",
        "handshake",
        &x_label_formatter,
        "Time",
        &|y| format!("{} ms", y / 1e6),
    );
    plot_data(
        &throughput_data,
        "throughput",
        "round trip throughput",
        &x_label_formatter,
        "Throughput",
        &|y| format!("{} GB/s", y / 1e9),
    );
}
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/bench/src/bin/memory.rs000066400000000000000000000141631456575232400260150ustar00rootroot00000000000000// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

#[cfg(feature = "openssl")]
use bench::OpenSslConnection;
#[cfg(feature = "rustls")]
use bench::RustlsConnection;
use bench::{
    ConnectedBuffer, CryptoConfig, HandshakeType, Mode, S2NConnection, TlsConnPair, TlsConnection,
};
use std::{error::Error, fs::create_dir_all};
use structopt::{clap::arg_enum, StructOpt};

arg_enum! {
    enum MemoryBenchTarget {
        Client,
        Server,
        Pair,
    }
}

impl std::fmt::Debug for MemoryBenchTarget {
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
        write!(
            f,
            "{}",
            match self {
                MemoryBenchTarget::Client => "client",
                MemoryBenchTarget::Server => "server",
                MemoryBenchTarget::Pair => "pair",
            }
        )
    }
}

/// Bench the memory taken by either a client, server, or pair of connections
fn memory_bench(opt: &Opt) -> Result<(), Box> {
    let reuse_config: bool = opt.reuse_config.parse()?;
    let shrink_buffers: bool = opt.shrink_buffers.parse()?;

    // store data in directory based on params, target, and library name
    let params_string = match (reuse_config, shrink_buffers) {
        (false, false) => "no-optimizations",
        (true, false) => "reuse-config",
        (false, true) => "shrink-buffers",
        (true, true) => "reuse-config-shrink-buffers",
    };
    let dir_name = &format!(
        "target/memory/{params_string}/{:?}/{}",
        opt.target,
        T::name()
    );

    println!("benching {:?} {} {}", opt.target, T::name(), params_string);

    // create the directory that will hold memory snapshots and xtree
    create_dir_all(dir_name).unwrap();

    // create space to store TlsConnections
    const BENCH_SIZE: usize = 100;
    let mut connections = Vec::new();
    match opt.target {
        MemoryBenchTarget::Client | MemoryBenchTarget::Server => {
            connections.reserve_exact(BENCH_SIZE)
        }
        // for each connection pair, need to save two connections
        MemoryBenchTarget::Pair => connections.reserve_exact(BENCH_SIZE * 2),
    };

    // reserve space for buffers before benching
    // shrink buffers before and after handshake to keep memory net zero
    let mut buffers: Vec = (0..BENCH_SIZE)
        .map(|_| {
            let mut buffer = ConnectedBuffer::new();
            buffer.shrink();
            buffer
        })
        .collect();

    // handshake one harness to initalize libraries
    let mut conn_pair = TlsConnPair::::default();
    conn_pair.handshake().unwrap();

    // make configs
    let client_config = T::make_config(
        Mode::Client,
        CryptoConfig::default(),
        HandshakeType::default(),
    )?;
    let server_config = T::make_config(
        Mode::Server,
        CryptoConfig::default(),
        HandshakeType::default(),
    )?;

    // tell valgrind/massif to take initial memory snapshot
    crabgrind::monitor_command(format!("snapshot {dir_name}/0.snapshot")).unwrap();

    // make and handshake conn pairs
    for i in 1..BENCH_SIZE + 1 {
        // make conn pair
        let mut conn_pair;
        if reuse_config {
            let client_conn = T::new_from_config(&client_config, buffers.pop().unwrap())?;
            let server_conn = T::new_from_config(
                &server_config,
                client_conn.connected_buffer().clone_inverse(),
            )?;
            conn_pair = TlsConnPair::wrap(client_conn, server_conn);
        } else {
            conn_pair = TlsConnPair::::new(
                CryptoConfig::default(),
                HandshakeType::default(),
                buffers.pop().unwrap(),
            )?;
        }

        // handshake conn pair
        conn_pair.handshake()?;
        if shrink_buffers {
            conn_pair.shrink_connection_buffers();
        }
        conn_pair.shrink_connected_buffers();

        // store bench target(s)
        let (client, server) = conn_pair.split();
        match opt.target {
            MemoryBenchTarget::Client => connections.push(client),
            MemoryBenchTarget::Server => connections.push(server),
            MemoryBenchTarget::Pair => {
                connections.push(client);
                connections.push(server);
            }
        };

        // take memory snapshot
        crabgrind::monitor_command(format!("snapshot {dir_name}/{i}.snapshot"))?;
    }

    // take xtree snapshot
    crabgrind::monitor_command(format!("xtmemory {dir_name}/xtree.out"))?;

    Ok(())
}

#[derive(StructOpt)]
/// Generate TLS connections and record memory used after each connection.
/// Snapshots are stored in target/memory/[params]/[target]
struct Opt {
    /// Which connection(s) to memory bench
    #[structopt(possible_values = &MemoryBenchTarget::variants(), case_insensitive = true, default_value = "pair")]
    target: MemoryBenchTarget,

    /// If set, run benches with only a specific library
    #[structopt()]
    lib_name: Option,

    /// Reuse configs when making connections
    #[structopt(long, default_value = "true")]
    reuse_config: String,

    /// Shrink connection buffers after handshake to simulate idle connection
    #[structopt(long, default_value = "true")]
    shrink_buffers: String,
}

fn main() -> Result<(), Box> {
    assert!(!cfg!(debug_assertions), "need to run in release mode");

    let opt = Opt::from_args();

    match &opt.lib_name {
        Some(lib_name) => match lib_name.as_str() {
            "s2n-tls" => memory_bench::(&opt)?,
            #[cfg(feature = "rustls")]
            "rustls" => memory_bench::(&opt)?,
            #[cfg(feature = "openssl")]
            "openssl" => memory_bench::(&opt)?,
            _ => panic!("invalid library"),
        },
        None => {
            memory_bench::(&opt)?;
            #[cfg(feature = "rustls")]
            memory_bench::(&opt)?;
            #[cfg(feature = "openssl")]
            memory_bench::(&opt)?;
        }
    }

    Ok(())
}
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/bench/src/harness.rs000066400000000000000000000410671456575232400254030ustar00rootroot00000000000000// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

use std::{
    cell::RefCell,
    collections::VecDeque,
    error::Error,
    fmt::Debug,
    fs::read_to_string,
    io::{ErrorKind, Read, Write},
    rc::Rc,
};
use strum::EnumIter;

#[derive(Clone, Copy, EnumIter)]
pub enum PemType {
    ServerKey,
    ServerCertChain,
    ClientKey,
    ClientCertChain,
    CACert,
}

impl PemType {
    fn get_filename(&self) -> &str {
        match self {
            PemType::ServerKey => "server-key.pem",
            PemType::ServerCertChain => "server-chain.pem",
            PemType::ClientKey => "client-key.pem",
            PemType::ClientCertChain => "client-cert.pem",
            PemType::CACert => "ca-cert.pem",
        }
    }
}

#[derive(Clone, Copy, Default, EnumIter)]
pub enum SigType {
    Rsa2048,
    Rsa3072,
    Rsa4096,
    #[default]
    Ecdsa384,
    Ecdsa256,
}

impl SigType {
    pub fn get_dir_name(&self) -> &str {
        match self {
            SigType::Rsa2048 => "rsa2048",
            SigType::Rsa3072 => "rsa3072",
            SigType::Rsa4096 => "rsa4096",
            SigType::Ecdsa384 => "ecdsa384",
            SigType::Ecdsa256 => "ecdsa256",
        }
    }
}

impl Debug for SigType {
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
        write!(f, "{}", self.get_dir_name())
    }
}

pub fn get_cert_path(pem_type: PemType, sig_type: SigType) -> String {
    format!(
        "certs/{}/{}",
        sig_type.get_dir_name(),
        pem_type.get_filename()
    )
}

pub fn read_to_bytes(pem_type: PemType, sig_type: SigType) -> Vec {
    read_to_string(get_cert_path(pem_type, sig_type))
        .unwrap()
        .into_bytes()
}

#[derive(Clone, Copy)]
pub enum Mode {
    Client,
    Server,
}

#[derive(Clone, Copy, Default, EnumIter, Eq, PartialEq)]
pub enum HandshakeType {
    #[default]
    ServerAuth,
    MutualAuth,
    Resumption,
}

impl Debug for HandshakeType {
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
        match self {
            HandshakeType::ServerAuth => write!(f, "server-auth"),
            HandshakeType::MutualAuth => write!(f, "mTLS"),
            HandshakeType::Resumption => write!(f, "resumption"),
        }
    }
}

// these parameters were the only ones readily usable for all three libaries:
// s2n-tls, rustls, and openssl
#[allow(non_camel_case_types)]
#[derive(Clone, Copy, Debug, Default, EnumIter, Eq, PartialEq)]
pub enum CipherSuite {
    #[default]
    AES_128_GCM_SHA256,
    AES_256_GCM_SHA384,
}

#[derive(Clone, Copy, Default, EnumIter)]
pub enum KXGroup {
    Secp256R1,
    #[default]
    X25519,
}

impl Debug for KXGroup {
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
        match self {
            Self::Secp256R1 => write!(f, "secp256r1"),
            Self::X25519 => write!(f, "x25519"),
        }
    }
}

#[derive(Clone, Copy, Debug, Default)]
pub struct CryptoConfig {
    pub cipher_suite: CipherSuite,
    pub kx_group: KXGroup,
    pub sig_type: SigType,
}

impl CryptoConfig {
    pub fn new(cipher_suite: CipherSuite, kx_group: KXGroup, sig_type: SigType) -> Self {
        Self {
            cipher_suite,
            kx_group,
            sig_type,
        }
    }
}

/// The TlsBenchConfig trait allows us to map benchmarking parameters to
/// a configuration object
pub trait TlsBenchConfig: Sized {
    fn make_config(
        mode: Mode,
        crypto_config: CryptoConfig,
        handshake_type: HandshakeType,
    ) -> Result>;
}

/// The TlsConnection object can be created from a corresponding config type.
pub trait TlsConnection: Sized {
    /// Library-specific config struct
    type Config;

    /// Name of the connection type
    fn name() -> String;

    /// Make connection from existing config and buffer
    fn new_from_config(
        config: &Self::Config,
        connected_buffer: ConnectedBuffer,
    ) -> Result>;

    /// Run one handshake step: receive msgs from other connection, process, and send new msgs
    fn handshake(&mut self) -> Result<(), Box>;

    fn handshake_completed(&self) -> bool;

    fn get_negotiated_cipher_suite(&self) -> CipherSuite;

    fn negotiated_tls13(&self) -> bool;

    /// Describes whether a connection was resumed. This method is only valid on
    /// server connections because of rustls API limitations.
    fn resumed_connection(&self) -> bool;

    /// Send application data to ConnectedBuffer
    fn send(&mut self, data: &[u8]) -> Result<(), Box>;

    /// Read application data from ConnectedBuffer
    fn recv(&mut self, data: &mut [u8]) -> Result<(), Box>;

    /// Shrink buffers owned by the connection
    fn shrink_connection_buffers(&mut self);

    /// Clear and shrink buffers used for IO with another connection
    fn shrink_connected_buffer(&mut self);

    /// Get reference to internal connected buffer
    fn connected_buffer(&self) -> &ConnectedBuffer;
}

pub struct TlsConnPair {
    client: C,
    server: S,
}

impl TlsConnPair {
    pub fn new(client_config: &C::Config, server_config: &S::Config) -> TlsConnPair {
        let connected_buffer = ConnectedBuffer::default();
        let client = C::new_from_config(&client_config, connected_buffer.clone_inverse()).unwrap();
        let server = S::new_from_config(&server_config, connected_buffer).unwrap();
        Self { client, server }
    }
}

impl Default for TlsConnPair
where
    C: TlsConnection,
    S: TlsConnection,
    C::Config: TlsBenchConfig,
    S::Config: TlsBenchConfig,
{
    fn default() -> Self {
        Self::new_bench_pair(CryptoConfig::default(), HandshakeType::default()).unwrap()
    }
}

impl TlsConnPair
where
    C: TlsConnection,
    S: TlsConnection,
    C::Config: TlsBenchConfig,
    S::Config: TlsBenchConfig,
{
    /// Initialize buffers, configs, and connections (pre-handshake)
    pub fn new_bench_pair(
        crypto_config: CryptoConfig,
        handshake_type: HandshakeType,
    ) -> Result> {
        // do an initial handshake to generate the session ticket
        if handshake_type == HandshakeType::Resumption {
            let server_config =
                S::Config::make_config(Mode::Server, crypto_config, handshake_type)?;
            let client_config =
                C::Config::make_config(Mode::Client, crypto_config, handshake_type)?;

            // handshake the client and server connections. This will result in
            // session ticket getting stored in client_config
            let mut pair = TlsConnPair::::new(&client_config, &server_config);
            pair.handshake()?;
            // NewSessionTicket messages are part of the application data and sent
            // after the handshake is complete, so we must trigger an additional
            // "read" on the client connection to ensure that the session ticket
            // gets received and stored in the config
            pair.round_trip_transfer(&mut [0]).unwrap();

            // new_from_config is called interally by the TlsConnPair::new
            // method and will check if a session ticket is available and set it
            // on the connection. This results in the session ticket in
            // client_config (from the previous handshake) getting set on the
            // client connection.
            return Ok(TlsConnPair::::new(&client_config, &server_config));
        }

        Ok(TlsConnPair::::new(
            &C::Config::make_config(Mode::Client, crypto_config, handshake_type).unwrap(),
            &S::Config::make_config(Mode::Server, crypto_config, handshake_type).unwrap(),
        ))
    }
}

impl TlsConnPair
where
    C: TlsConnection,
    S: TlsConnection,
{
    /// Wrap two TlsConnections into a TlsConnPair
    pub fn wrap(client: C, server: S) -> Self {
        assert!(
            client.connected_buffer() == &server.connected_buffer().clone_inverse(),
            "connected buffers don't match"
        );
        Self { client, server }
    }

    /// Take back ownership of individual connections in the TlsConnPair
    pub fn split(self) -> (C, S) {
        (self.client, self.server)
    }

    /// Run handshake on connections
    /// Two round trips are needed for the server to receive the Finished message
    /// from the client and be ready to send data
    pub fn handshake(&mut self) -> Result<(), Box> {
        for _ in 0..2 {
            self.client.handshake()?;
            self.server.handshake()?;
        }
        Ok(())
    }

    /// Checks if handshake is finished for both client and server
    pub fn handshake_completed(&self) -> bool {
        self.client.handshake_completed() && self.server.handshake_completed()
    }

    pub fn get_negotiated_cipher_suite(&self) -> CipherSuite {
        assert!(self.handshake_completed());
        assert!(
            self.client.get_negotiated_cipher_suite() == self.server.get_negotiated_cipher_suite()
        );
        self.client.get_negotiated_cipher_suite()
    }

    pub fn negotiated_tls13(&self) -> bool {
        self.client.negotiated_tls13() && self.server.negotiated_tls13()
    }

    /// Send data from client to server, and then from server to client
    pub fn round_trip_transfer(&mut self, data: &mut [u8]) -> Result<(), Box> {
        // send data from client to server
        self.client.send(data)?;
        self.server.recv(data)?;

        // send data from server to client
        self.server.send(data)?;
        self.client.recv(data)?;

        Ok(())
    }

    /// Shrink buffers owned by the connections
    pub fn shrink_connection_buffers(&mut self) {
        self.client.shrink_connection_buffers();
        self.server.shrink_connection_buffers();
    }

    /// Clear and shrink buffers used for IO between the connections
    pub fn shrink_connected_buffers(&mut self) {
        self.client.shrink_connected_buffer();
        self.server.shrink_connected_buffer();
    }
}

/// Wrapper of two shared buffers to pass as stream
/// This wrapper `read()`s into one buffer and `write()`s to another
/// `Rc>>` allows sharing of references to the buffers for two connections
#[derive(Clone, Eq)]
pub struct ConnectedBuffer {
    recv: Rc>>,
    send: Rc>>,
}

impl PartialEq for ConnectedBuffer {
    /// ConnectedBuffers are equal if and only if they point to the same VecDeques
    fn eq(&self, other: &ConnectedBuffer) -> bool {
        Rc::ptr_eq(&self.recv, &other.recv) && Rc::ptr_eq(&self.send, &other.send)
    }
}

impl ConnectedBuffer {
    /// Make a new struct with new internal buffers
    pub fn new() -> Self {
        let recv = Rc::new(RefCell::new(VecDeque::new()));
        let send = Rc::new(RefCell::new(VecDeque::new()));

        // prevent (potentially slow) resizing of buffers for small data transfers,
        // like with handshake
        recv.borrow_mut().reserve(10000);
        send.borrow_mut().reserve(10000);

        Self { recv, send }
    }

    /// Makes a new ConnectedBuffer that shares internal buffers but swapped,
    /// ex. `write()` writes to the buffer that the inverse `read()`s from
    pub fn clone_inverse(&self) -> Self {
        Self {
            recv: self.send.clone(),
            send: self.recv.clone(),
        }
    }

    /// Clears and shrinks buffers
    pub fn shrink(&mut self) {
        self.recv.borrow_mut().clear();
        self.recv.borrow_mut().shrink_to_fit();
        self.send.borrow_mut().clear();
        self.send.borrow_mut().shrink_to_fit();
    }
}

impl Read for ConnectedBuffer {
    fn read(&mut self, dest: &mut [u8]) -> Result {
        let res = self.recv.borrow_mut().read(dest);
        match res {
            // rustls expects WouldBlock on read of length 0
            Ok(0) => Err(std::io::Error::new(ErrorKind::WouldBlock, "blocking")),
            Ok(len) => Ok(len),
            Err(err) => Err(err),
        }
    }
}

impl Write for ConnectedBuffer {
    fn write(&mut self, src: &[u8]) -> Result {
        self.send.borrow_mut().write(src)
    }
    fn flush(&mut self) -> Result<(), std::io::Error> {
        Ok(()) // data already available to destination
    }
}

impl Default for ConnectedBuffer {
    fn default() -> Self {
        Self::new()
    }
}

#[cfg(test)]
mod tests {
    use super::*;
    #[cfg(feature = "openssl")]
    use crate::OpenSslConnection;
    #[cfg(feature = "rustls")]
    use crate::RustlsConnection;
    use crate::{S2NConnection, TlsConnPair};
    use std::path::Path;
    use strum::IntoEnumIterator;

    #[test]
    fn test_cert_paths_valid() {
        for pem_type in PemType::iter() {
            for sig_type in SigType::iter() {
                assert!(
                    Path::new(&get_cert_path(pem_type, sig_type)).exists(),
                    "cert not found"
                );
            }
        }
    }

    #[test]
    fn test_all() {
        test_type::();
        #[cfg(feature = "rustls")]
        test_type::();
        #[cfg(feature = "openssl")]
        test_type::();
    }

    fn test_type()
    where
        S: TlsConnection,
        C: TlsConnection,
        C::Config: TlsBenchConfig,
        S::Config: TlsBenchConfig,
    {
        println!("{} client --- {} server", C::name(), S::name());
        handshake_configs::();
        transfer::();
    }

    fn handshake_configs()
    where
        S: TlsConnection,
        C: TlsConnection,
        C::Config: TlsBenchConfig,
        S::Config: TlsBenchConfig,
    {
        for handshake_type in HandshakeType::iter() {
            for cipher_suite in CipherSuite::iter() {
                for kx_group in KXGroup::iter() {
                    for sig_type in SigType::iter() {
                        let crypto_config = CryptoConfig::new(cipher_suite, kx_group, sig_type);
                        let mut conn_pair =
                            TlsConnPair::::new_bench_pair(crypto_config, handshake_type)
                                .unwrap();

                        assert!(!conn_pair.handshake_completed());
                        conn_pair.handshake().unwrap();
                        assert!(conn_pair.handshake_completed());

                        assert!(conn_pair.negotiated_tls13());
                        assert_eq!(cipher_suite, conn_pair.get_negotiated_cipher_suite());
                    }
                }
            }
        }
    }

    fn session_resumption()
    where
        S: TlsConnection,
        C: TlsConnection,
        C::Config: TlsBenchConfig,
        S::Config: TlsBenchConfig,
    {
        println!("testing with client:{} server:{}", C::name(), S::name());
        let mut conn_pair =
            TlsConnPair::::new_bench_pair(CryptoConfig::default(), HandshakeType::Resumption)
                .unwrap();
        conn_pair.handshake().unwrap();
        let (_, server) = conn_pair.split();
        assert!(server.resumed_connection());
    }

    #[test]
    fn session_resumption_interop() {
        env_logger::builder()
            .filter_level(log::LevelFilter::Debug)
            .is_test(true)
            .try_init()
            .unwrap();
        session_resumption::();
        session_resumption::();
        session_resumption::();

        session_resumption::();
        session_resumption::();
        session_resumption::();

        session_resumption::();
        session_resumption::();
        session_resumption::();
    }

    fn transfer()
    where
        S: TlsConnection,
        C: TlsConnection,
        C::Config: TlsBenchConfig,
        S::Config: TlsBenchConfig,
    {
        // use a large buffer to test across TLS record boundaries
        let mut buf = [0x56u8; 1000000];
        for cipher_suite in CipherSuite::iter() {
            let crypto_config =
                CryptoConfig::new(cipher_suite, KXGroup::default(), SigType::default());
            let mut conn_pair =
                TlsConnPair::::new_bench_pair(crypto_config, HandshakeType::default())
                    .unwrap();
            conn_pair.handshake().unwrap();
            conn_pair.round_trip_transfer(&mut buf).unwrap();
        }
    }
}
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/bench/src/lib.rs000066400000000000000000000013021456575232400244720ustar00rootroot00000000000000// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

pub mod harness;
#[cfg(feature = "openssl")]
pub mod openssl;
#[cfg(feature = "rustls")]
pub mod rustls;
pub mod s2n_tls;

#[cfg(feature = "openssl")]
pub use crate::openssl::OpenSslConnection;
#[cfg(feature = "rustls")]
pub use crate::rustls::RustlsConnection;
pub use crate::{
    harness::{
        get_cert_path, CipherSuite, ConnectedBuffer, CryptoConfig, HandshakeType, KXGroup, Mode,
        PemType, SigType, TlsConnPair, TlsConnection,
    },
    s2n_tls::S2NConnection,
};

// controls profiler frequency for flamegraph generation in benchmarks
pub const PROFILER_FREQUENCY: i32 = 100;
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/bench/src/openssl.rs000066400000000000000000000213211456575232400254120ustar00rootroot00000000000000// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

use crate::{
    get_cert_path,
    harness::{
        CipherSuite, ConnectedBuffer, CryptoConfig, HandshakeType, KXGroup, Mode, TlsConnection, TlsBenchConfig,
    },
    PemType::*,
};
use openssl::ssl::{
    ErrorCode, Ssl, SslContext, SslFiletype, SslMethod, SslSession, SslSessionCacheMode, SslStream,
    SslVerifyMode, SslVersion,
};
use std::{
    error::Error,
    io::{Read, Write},
    sync::{Arc, Mutex},
};

// Creates session ticket callback handler
#[derive(Clone, Default)]
pub struct SessionTicketStorage {
    stored_ticket: Arc>>,
}

pub struct OpenSslConnection {
    connected_buffer: ConnectedBuffer,
    connection: SslStream,
}

impl Drop for OpenSslConnection {
    fn drop(&mut self) {
        // shutdown must be called for session resumption to work
        // https://www.openssl.org/docs/man1.1.1/man3/SSL_set_session.html
        self.connection.shutdown().unwrap();
    }
}

pub struct OpenSslConfig {
    config: SslContext,
    session_ticket_storage: SessionTicketStorage,
}

impl TlsBenchConfig for OpenSslConfig {

    fn make_config(
        mode: Mode,
        crypto_config: CryptoConfig,
        handshake_type: HandshakeType,
    ) -> Result> {
        let cipher_suite = match crypto_config.cipher_suite {
            CipherSuite::AES_128_GCM_SHA256 => "TLS_AES_128_GCM_SHA256",
            CipherSuite::AES_256_GCM_SHA384 => "TLS_AES_256_GCM_SHA384",
        };

        let ec_key = match crypto_config.kx_group {
            KXGroup::Secp256R1 => "P-256",
            KXGroup::X25519 => "X25519",
        };

        let ssl_method = match mode {
            Mode::Client => SslMethod::tls_client(),
            Mode::Server => SslMethod::tls_server(),
        };

        let session_ticket_storage = SessionTicketStorage::default();

        let mut builder = SslContext::builder(ssl_method)?;
        builder.set_min_proto_version(Some(SslVersion::TLS1_3))?;
        builder.set_ciphersuites(cipher_suite)?;
        builder.set_groups_list(ec_key)?;

        match mode {
            Mode::Client => {
                builder.set_ca_file(get_cert_path(CACert, crypto_config.sig_type))?;
                builder.set_verify(SslVerifyMode::FAIL_IF_NO_PEER_CERT | SslVerifyMode::PEER);

                match handshake_type {
                    HandshakeType::MutualAuth => {
                        builder.set_certificate_chain_file(get_cert_path(
                            ClientCertChain,
                            crypto_config.sig_type,
                        ))?;
                        builder.set_private_key_file(
                            get_cert_path(ClientKey, crypto_config.sig_type),
                            SslFiletype::PEM,
                        )?;
                    }
                    HandshakeType::Resumption => {
                        builder.set_session_cache_mode(SslSessionCacheMode::CLIENT);
                        // do not attempt to define the callback outside of an
                        // expression directly passed into the function, because
                        // the compiler's type inference doesn't work for this
                        // scenario
                        // https://github.com/rust-lang/rust/issues/70263
                        builder.set_new_session_callback({
                            let sts = session_ticket_storage.clone();
                            move |_, ticket| {
                                let _ = sts.stored_ticket.lock().unwrap().insert(ticket);
                            }
                        });
                    }
                    HandshakeType::ServerAuth => {}
                }
            }
            Mode::Server => {
                builder.set_certificate_chain_file(get_cert_path(
                    ServerCertChain,
                    crypto_config.sig_type,
                ))?;
                builder.set_private_key_file(
                    get_cert_path(ServerKey, crypto_config.sig_type),
                    SslFiletype::PEM,
                )?;

                if handshake_type == HandshakeType::MutualAuth {
                    builder.set_ca_file(get_cert_path(CACert, crypto_config.sig_type))?;
                    builder.set_verify(SslVerifyMode::FAIL_IF_NO_PEER_CERT | SslVerifyMode::PEER);
                }
                if handshake_type == HandshakeType::Resumption {
                    builder.set_session_cache_mode(SslSessionCacheMode::CLIENT);
                }
            }
        }
        Ok(Self {
            config: builder.build(),
            session_ticket_storage,
        })
    }
}

impl TlsConnection for OpenSslConnection {
    type Config = OpenSslConfig;

    fn name() -> String {
        let version_num = openssl::version::number() as u64;
        let patch: u8 = (version_num >> 4) as u8;
        let fix = (version_num >> 12) as u8;
        let minor = (version_num >> 20) as u8;
        let major = (version_num >> 28) as u8;
        format!(
            "openssl{}.{}.{}{}",
            major,
            minor,
            fix,
            (b'a' + patch - 1) as char
        )
    }


    fn new_from_config(
        config: &Self::Config,
        connected_buffer: ConnectedBuffer,
    ) -> Result> {
        // check if there is a session ticket available
        // a session ticket will only be available if the Config was created
        // with session resumption enabled
        let maybe_ticket = config
            .session_ticket_storage
            .stored_ticket
            .lock()
            .unwrap()
            .take();
        if let Some(ticket) = &maybe_ticket {
            let _result = unsafe { config.config.add_session(ticket) };
        }

        let mut connection = Ssl::new(&config.config)?;
        if let Some(ticket) = &maybe_ticket {
            unsafe { connection.set_session(ticket)? };
        }

        let connection = SslStream::new(connection, connected_buffer.clone())?;
        Ok(Self {
            connected_buffer,
            connection,
        })
    }

    fn handshake(&mut self) -> Result<(), Box> {
        let result = if self.connection.ssl().is_server() {
            self.connection.accept()
        } else {
            self.connection.connect()
        };

        // treat blocking (`ErrorCode::WANT_READ`) as `Ok`, expected during handshake
        match result {
            Ok(_) => Ok(()),
            Err(err) => {
                if err.code() != ErrorCode::WANT_READ {
                    Err(err.into())
                } else {
                    Ok(())
                }
            }
        }
    }

    fn handshake_completed(&self) -> bool {
        self.connection.ssl().is_init_finished()
    }

    fn get_negotiated_cipher_suite(&self) -> CipherSuite {
        let cipher_suite = self
            .connection
            .ssl()
            .current_cipher()
            .expect("Handshake not completed")
            .name();
        match cipher_suite {
            "TLS_AES_128_GCM_SHA256" => CipherSuite::AES_128_GCM_SHA256,
            "TLS_AES_256_GCM_SHA384" => CipherSuite::AES_256_GCM_SHA384,
            _ => panic!("Unknown cipher suite"),
        }
    }

    fn negotiated_tls13(&self) -> bool {
        self.connection
            .ssl()
            .version2() // version() -> &str is deprecated, version2() returns an enum instead
            .expect("Handshake not completed")
            == SslVersion::TLS1_3
    }

    fn send(&mut self, data: &[u8]) -> Result<(), Box> {
        let mut write_offset = 0;
        while write_offset < data.len() {
            write_offset += self.connection.write(&data[write_offset..data.len()])?;
            self.connection.flush()?; // make sure internal buffers don't fill up
        }
        Ok(())
    }

    fn recv(&mut self, data: &mut [u8]) -> Result<(), Box> {
        let data_len = data.len();
        let mut read_offset = 0;
        while read_offset < data.len() {
            read_offset += self.connection.read(&mut data[read_offset..data_len])?
        }
        Ok(())
    }

    /// With OpenSSL's API, not possible after connection initialization:
    /// In order to shrink buffers owned by the connection, config has to built
    /// with `builder.set_mode(SslMode::RELEASE_BUFFERS);`, which tells the
    /// connection to release buffers only when it's idle
    fn shrink_connection_buffers(&mut self) {}

    fn shrink_connected_buffer(&mut self) {
        self.connected_buffer.shrink();
    }

    fn connected_buffer(&self) -> &ConnectedBuffer {
        &self.connected_buffer
    }

    fn resumed_connection(&self) -> bool {
        self.connection.ssl().session_reused()
    }
}
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/bench/src/rustls.rs000066400000000000000000000201621456575232400252650ustar00rootroot00000000000000// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

use crate::{
    harness::{
        read_to_bytes, CipherSuite, ConnectedBuffer, CryptoConfig, HandshakeType, KXGroup, Mode,
        TlsConnection, TlsBenchConfig,
    },
    PemType::{self, *},
    SigType,
};
use rustls::{
    cipher_suite::{TLS13_AES_128_GCM_SHA256, TLS13_AES_256_GCM_SHA384},
    kx_group::{SECP256R1, X25519},
    server::AllowAnyAuthenticatedClient,
    version::TLS13,
    Certificate, ClientConfig, ClientConnection, Connection, PrivateKey,
    ProtocolVersion::TLSv1_3,
    RootCertStore, ServerConfig, ServerConnection, ServerName,
};
use rustls_pemfile::{certs, pkcs8_private_keys};
use std::{
    error::Error,
    io::{BufReader, Read, Write},
    sync::Arc,
};

pub struct RustlsConnection {
    connected_buffer: ConnectedBuffer,
    connection: Connection,
}

impl RustlsConnection {
    pub fn connection(&self) -> &Connection {
        &self.connection
    }

        /// Treat `WouldBlock` as an `Ok` value for when blocking is expected
        fn ignore_block(res: Result) -> Result {
            match res {
                Ok(t) => Ok(t),
                Err(err) => match err.kind() {
                    std::io::ErrorKind::WouldBlock => Ok(T::default()),
                    _ => Err(err),
                },
            }
        }
}

impl RustlsConfig {
    fn get_root_cert_store(sig_type: SigType) -> Result> {
        let root_cert =
            Certificate(certs(&mut BufReader::new(&*read_to_bytes(CACert, sig_type)))?.remove(0));
        let mut root_certs = RootCertStore::empty();
        root_certs.add(&root_cert)?;
        Ok(root_certs)
    }

    fn get_cert_chain(
        pem_type: PemType,
        sig_type: SigType,
    ) -> Result, Box> {
        let chain = certs(&mut BufReader::new(&*read_to_bytes(pem_type, sig_type)))?;
        Ok(chain
            .iter()
            .map(|bytes| Certificate(bytes.to_vec()))
            .collect())
    }

    fn get_key(pem_type: PemType, sig_type: SigType) -> Result> {
        Ok(PrivateKey(
            pkcs8_private_keys(&mut BufReader::new(&*read_to_bytes(pem_type, sig_type)))?.remove(0),
        ))
    }
}

/// Clients and servers have different config types in Rustls, so wrap them in an enum
pub enum RustlsConfig {
    Client(Arc),
    Server(Arc),
}

impl TlsBenchConfig for RustlsConfig {
    fn make_config(
        mode: Mode,
        crypto_config: CryptoConfig,
        handshake_type: HandshakeType,
    ) -> Result> {
        let cipher_suite = match crypto_config.cipher_suite {
            CipherSuite::AES_128_GCM_SHA256 => TLS13_AES_128_GCM_SHA256,
            CipherSuite::AES_256_GCM_SHA384 => TLS13_AES_256_GCM_SHA384,
        };

        let kx_group = match crypto_config.kx_group {
            KXGroup::Secp256R1 => &SECP256R1,
            KXGroup::X25519 => &X25519,
        };

        match mode {
            Mode::Client => {
                let builder = ClientConfig::builder()
                    .with_cipher_suites(&[cipher_suite])
                    .with_kx_groups(&[kx_group])
                    .with_protocol_versions(&[&TLS13])?
                    .with_root_certificates(Self::get_root_cert_store(crypto_config.sig_type)?);

                let config = match handshake_type {
                    HandshakeType::ServerAuth | HandshakeType::Resumption => {
                        builder.with_no_client_auth()
                    }
                    HandshakeType::MutualAuth => builder.with_client_auth_cert(
                        Self::get_cert_chain(ClientCertChain, crypto_config.sig_type)?,
                        Self::get_key(ClientKey, crypto_config.sig_type)?,
                    )?,
                };

                if handshake_type != HandshakeType::Resumption {
                    rustls::client::Resumption::disabled();
                }

                Ok(RustlsConfig::Client(Arc::new(config)))
            }
            Mode::Server => {
                let builder = ServerConfig::builder()
                    .with_cipher_suites(&[cipher_suite])
                    .with_kx_groups(&[kx_group])
                    .with_protocol_versions(&[&TLS13])?;

                let builder = match handshake_type {
                    HandshakeType::ServerAuth | HandshakeType::Resumption => {
                        builder.with_no_client_auth()
                    }
                    HandshakeType::MutualAuth => builder.with_client_cert_verifier(Arc::new(
                        AllowAnyAuthenticatedClient::new(Self::get_root_cert_store(
                            crypto_config.sig_type,
                        )?),
                    )),
                };

                let config = builder.with_single_cert(
                    Self::get_cert_chain(ServerCertChain, crypto_config.sig_type)?,
                    Self::get_key(ServerKey, crypto_config.sig_type)?,
                )?;

                Ok(RustlsConfig::Server(Arc::new(config)))
            }
        }
    }
}

impl TlsConnection for RustlsConnection {
    type Config = RustlsConfig;

    fn name() -> String {
        "rustls".to_string()
    }

    fn new_from_config(
        config: &Self::Config,
        connected_buffer: ConnectedBuffer,
    ) -> Result> {
        let connection = match config {
            RustlsConfig::Client(config) => Connection::Client(ClientConnection::new(
                config.clone(),
                ServerName::try_from("localhost")?,
            )?),
            RustlsConfig::Server(config) => {
                Connection::Server(ServerConnection::new(config.clone())?)
            }
        };

        Ok(Self {
            connected_buffer,
            connection,
        })
    }

    fn handshake(&mut self) -> Result<(), Box> {
        Self::ignore_block(self.connection.complete_io(&mut self.connected_buffer))?;
        Ok(())
    }

    fn handshake_completed(&self) -> bool {
        !self.connection.is_handshaking()
    }

    fn get_negotiated_cipher_suite(&self) -> CipherSuite {
        match self.connection.negotiated_cipher_suite().unwrap().suite() {
            rustls::CipherSuite::TLS13_AES_128_GCM_SHA256 => CipherSuite::AES_128_GCM_SHA256,
            rustls::CipherSuite::TLS13_AES_256_GCM_SHA384 => CipherSuite::AES_256_GCM_SHA384,
            _ => panic!("Unknown cipher suite"),
        }
    }

    fn negotiated_tls13(&self) -> bool {
        self.connection
            .protocol_version()
            .expect("Handshake not completed")
            == TLSv1_3
    }

    fn send(&mut self, data: &[u8]) -> Result<(), Box> {
        let mut write_offset = 0;
        while write_offset < data.len() {
            write_offset += self
                .connection
                .writer()
                .write(&data[write_offset..data.len()])?;
            self.connection.writer().flush()?;
            self.connection.complete_io(&mut self.connected_buffer)?;
        }
        Ok(())
    }

    fn recv(&mut self, data: &mut [u8]) -> Result<(), Box> {
        let data_len = data.len();
        let mut read_offset = 0;
        while read_offset < data.len() {
            self.connection.complete_io(&mut self.connected_buffer)?;
            read_offset += Self::ignore_block(
                self.connection
                    .reader()
                    .read(&mut data[read_offset..data_len]),
            )?;
        }
        Ok(())
    }

    fn shrink_connection_buffers(&mut self) {
        self.connection.set_buffer_limit(Some(1));
    }

    fn shrink_connected_buffer(&mut self) {
        self.connected_buffer.shrink();
    }

    fn connected_buffer(&self) -> &ConnectedBuffer {
        &self.connected_buffer
    }

    fn resumed_connection(&self) -> bool {
        if let rustls::Connection::Server(s) = &self.connection {
            s.received_resumption_data().is_some()
        } else {
            panic!("rustls connection resumption status must be check on the server side");
        }
    }
}
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/bench/src/s2n_tls.rs000066400000000000000000000246651456575232400253310ustar00rootroot00000000000000// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

use crate::{
    harness::{
        read_to_bytes, CipherSuite, ConnectedBuffer, CryptoConfig, HandshakeType, KXGroup, Mode,
        TlsConnection,
    },
    PemType::*,
};
use s2n_tls::{
    callbacks::{SessionTicketCallback, VerifyHostNameCallback},
    config::Builder,
    connection::Connection,
    enums::{Blinding, ClientAuthType, Version},
    security::Policy,
};
use std::{
    borrow::BorrowMut,
    error::Error,
    ffi::c_void,
    io::{ErrorKind, Read, Write},
    os::raw::c_int,
    pin::Pin,
    sync::{Arc, Mutex},
    task::Poll,
    time::SystemTime,
};

/// Custom callback for verifying hostnames. Rustls requires checking hostnames,
/// so this is to make a fair comparison
struct HostNameHandler {
    expected_server_name: &'static str,
}
impl VerifyHostNameCallback for HostNameHandler {
    fn verify_host_name(&self, hostname: &str) -> bool {
        self.expected_server_name == hostname
    }
}

#[derive(Clone, Debug, Default)]
pub struct SessionTicketStorage(Arc>>>);

impl SessionTicketCallback for SessionTicketStorage {
    fn on_session_ticket(
        &self,
        _connection: &mut s2n_tls::connection::Connection,
        session_ticket: &s2n_tls::callbacks::SessionTicket,
    ) {
        let mut ticket = vec![0; session_ticket.len().unwrap()];
        session_ticket.data(&mut ticket).unwrap();
        let _ = self.0.lock().unwrap().insert(ticket);
    }
}

const KEY_NAME: &str = "InsecureTestKey";
const KEY_VALUE: [u8; 16] = [3, 1, 4, 1, 5, 9, 2, 6, 5, 3, 5, 8, 9, 7, 9, 3];

/// s2n-tls has mode-independent configs, so this struct wraps the config with the mode
pub struct S2NConfig {
    mode: Mode,
    config: s2n_tls::config::Config,
    ticket_storage: SessionTicketStorage,
}

impl crate::harness::TlsBenchConfig for S2NConfig {
    fn make_config(
        mode: Mode,
        crypto_config: CryptoConfig,
        handshake_type: HandshakeType,
    ) -> Result> {
        // these security policies negotiate the given cipher suite and key
        // exchange group as their top choice
        let security_policy = match (crypto_config.cipher_suite, crypto_config.kx_group) {
            (CipherSuite::AES_128_GCM_SHA256, KXGroup::Secp256R1) => "20230317",
            (CipherSuite::AES_256_GCM_SHA384, KXGroup::Secp256R1) => "20190802",
            (CipherSuite::AES_128_GCM_SHA256, KXGroup::X25519) => "default_tls13",
            (CipherSuite::AES_256_GCM_SHA384, KXGroup::X25519) => "20190801",
        };

        let mut builder = Builder::new();
        builder
            .set_security_policy(&Policy::from_version(security_policy)?)?
            .wipe_trust_store()?
            .set_client_auth_type(match handshake_type {
                HandshakeType::MutualAuth => ClientAuthType::Required,
                _ => ClientAuthType::None, // ServerAuth or resumption handshake
            })?;

        if handshake_type == HandshakeType::Resumption {
            builder.enable_session_tickets(true)?;
        }

        let session_ticket_storage = SessionTicketStorage::default();

        match mode {
            Mode::Client => {
                builder
                    .trust_pem(read_to_bytes(CACert, crypto_config.sig_type).as_slice())?
                    .set_verify_host_callback(HostNameHandler {
                        expected_server_name: "localhost",
                    })?;

                match handshake_type {
                    HandshakeType::MutualAuth => {
                        builder.load_pem(
                            read_to_bytes(ClientCertChain, crypto_config.sig_type).as_slice(),
                            read_to_bytes(ClientKey, crypto_config.sig_type).as_slice(),
                        )?;
                    }
                    HandshakeType::Resumption => {
                        builder.set_session_ticket_callback(session_ticket_storage.clone())?;
                    }
                    // no special configuration
                    HandshakeType::ServerAuth => {}
                }
            }
            Mode::Server => {
                builder.load_pem(
                    read_to_bytes(ServerCertChain, crypto_config.sig_type).as_slice(),
                    read_to_bytes(ServerKey, crypto_config.sig_type).as_slice(),
                )?;

                match handshake_type {
                    HandshakeType::MutualAuth => {
                        builder
                            .trust_pem(read_to_bytes(CACert, crypto_config.sig_type).as_slice())?
                            .set_verify_host_callback(HostNameHandler {
                                expected_server_name: "localhost",
                            })?;
                    }
                    HandshakeType::Resumption => {
                        builder.add_session_ticket_key(
                            KEY_NAME.as_bytes(),
                            KEY_VALUE.as_slice(),
                            // use a time that we are sure is in the past to
                            // make the key immediately available
                            SystemTime::UNIX_EPOCH,
                        )?;
                    }
                    // no special configuration for normal handshake
                    HandshakeType::ServerAuth => {}
                };
            }
        }

        Ok(S2NConfig {
            mode,
            config: builder.build()?,
            ticket_storage: session_ticket_storage,
        })
    }
}

pub struct S2NConnection {
    // Pin> is to ensure long-term *mut to IO buffers remains valid
    connected_buffer: Pin>,
    connection: Connection,
    handshake_completed: bool,
}

impl S2NConnection {
    /// Unsafe callback for custom IO C API
    ///
    /// s2n-tls IO is usually used with file descriptors to a TCP socket, but we
    /// reduce overhead and outside noise with a local buffer for benchmarking
    unsafe extern "C" fn send_cb(context: *mut c_void, data: *const u8, len: u32) -> c_int {
        let context = &mut *(context as *mut ConnectedBuffer);
        let data = core::slice::from_raw_parts(data, len as _);
        context.write(data).unwrap() as _
    }

    /// Unsafe callback for custom IO C API
    unsafe extern "C" fn recv_cb(context: *mut c_void, data: *mut u8, len: u32) -> c_int {
        let context = &mut *(context as *mut ConnectedBuffer);
        let data = core::slice::from_raw_parts_mut(data, len as _);
        context.flush().unwrap();
        match context.read(data) {
            Err(err) => {
                // s2n-tls requires the callback to set errno if blocking happens
                if let ErrorKind::WouldBlock = err.kind() {
                    errno::set_errno(errno::Errno(libc::EWOULDBLOCK));
                    -1
                } else {
                    panic!("{err:?}");
                }
            }
            Ok(len) => len as _,
        }
    }

    pub fn connection(&self) -> &Connection {
        &self.connection
    }
}

impl TlsConnection for S2NConnection {
    type Config = S2NConfig;

    fn name() -> String {
        "s2n-tls".to_string()
    }

    fn new_from_config(
        config: &Self::Config,
        connected_buffer: ConnectedBuffer,
    ) -> Result> {
        let mode = match config.mode {
            Mode::Client => s2n_tls::enums::Mode::Client,
            Mode::Server => s2n_tls::enums::Mode::Server,
        };

        let mut connected_buffer = Box::pin(connected_buffer);

        let mut connection = Connection::new(mode);
        connection
            .set_blinding(Blinding::SelfService)?
            .set_config(config.config.clone())?
            .set_send_callback(Some(Self::send_cb))?
            .set_receive_callback(Some(Self::recv_cb))?;
        unsafe {
            connection
                .set_send_context(&mut *connected_buffer as *mut ConnectedBuffer as *mut c_void)?
                .set_receive_context(
                    &mut *connected_buffer as *mut ConnectedBuffer as *mut c_void,
                )?;
        }

        if let Some(ticket) = config.ticket_storage.0.lock().unwrap().borrow_mut().take() {
            connection.set_session_ticket(&ticket)?;
        }

        Ok(Self {
            connected_buffer,
            connection,
            handshake_completed: false,
        })
    }

    fn handshake(&mut self) -> Result<(), Box> {
        self.handshake_completed = self
            .connection
            .poll_negotiate()
            .map(|res| res.unwrap()) // unwrap `Err` if present
            .is_ready();
        Ok(())
    }

    fn handshake_completed(&self) -> bool {
        self.handshake_completed
    }

    fn get_negotiated_cipher_suite(&self) -> CipherSuite {
        match self.connection.cipher_suite().unwrap() {
            "TLS_AES_128_GCM_SHA256" => CipherSuite::AES_128_GCM_SHA256,
            "TLS_AES_256_GCM_SHA384" => CipherSuite::AES_256_GCM_SHA384,
            _ => panic!("Unknown cipher suite"),
        }
    }

    fn negotiated_tls13(&self) -> bool {
        self.connection.actual_protocol_version().unwrap() == Version::TLS13
    }

    fn resumed_connection(&self) -> bool {
        !self
            .connection
            .handshake_type()
            .unwrap()
            .contains("FULL_HANDSHAKE")
    }

    fn send(&mut self, data: &[u8]) -> Result<(), Box> {
        let mut write_offset = 0;
        while write_offset < data.len() {
            match self.connection.poll_send(&data[write_offset..]) {
                Poll::Ready(bytes_written) => write_offset += bytes_written?,
                Poll::Pending => return Err("unexpected pending".into()),
            }
            assert!(self.connection.poll_flush().is_ready());
        }
        Ok(())
    }

    fn recv(&mut self, data: &mut [u8]) -> Result<(), Box> {
        let data_len = data.len();
        let mut read_offset = 0;
        while read_offset < data_len {
            match self.connection.poll_recv(data) {
                Poll::Ready(bytes_read) => read_offset += bytes_read?,
                Poll::Pending => return Err("unexpected pending".into()),
            }
        }
        Ok(())
    }

    fn shrink_connection_buffers(&mut self) {
        self.connection.release_buffers().unwrap();
    }

    fn shrink_connected_buffer(&mut self) {
        self.connected_buffer.shrink();
    }

    fn connected_buffer(&self) -> &ConnectedBuffer {
        &self.connected_buffer
    }
}
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/generate.sh000077500000000000000000000022771456575232400236550ustar00rootroot00000000000000#/usr/bin/env bash
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0

set -xe

# cd into the script directory so it can be executed from anywhere
pushd "$(dirname "${BASH_SOURCE[0]}")"

# delete the existing copy in case we have extra files
rm -rf s2n-tls-sys/lib
mkdir -p s2n-tls-sys/lib
mkdir -p s2n-tls-sys/lib/tests
mkdir -p s2n-tls-sys/src/features

# we copy the C sources into the `lib` directory so they get published in the
# actual crate artifact.
cp -r \
  ../../api \
  ../../crypto \
  ../../error \
  ../../stuffer \
  ../../tls \
  ../../utils \
  s2n-tls-sys/lib/

cp -r \
  ../../tests/features \
  s2n-tls-sys/lib/tests/

cp -r \
  ../../CMakeLists.txt \
  ../../cmake \
  s2n-tls-sys/lib/

# generate the bindings modules from the copied sources
pushd generate
cargo run -- ../s2n-tls-sys
popd

if [ "$1" == "--skip-tests" ]; then
    echo "skipping tests"
    exit;
fi;

# make sure everything builds and passes sanity checks
pushd s2n-tls-sys
cargo test
cargo test --all-features
cargo test --release
cargo publish --dry-run --allow-dirty
cargo publish --dry-run --allow-dirty --all-features
popd

pushd integration
cargo run
popd

popd
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/generate/000077500000000000000000000000001456575232400233065ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/generate/Cargo.toml000066400000000000000000000006441456575232400252420ustar00rootroot00000000000000[package]
name = "generate"
version = "0.1.0"
authors = ["AWS s2n"]
edition = "2021"
license = "Apache-2.0"
# this is an internal tool for generating bindings
publish = false

[dependencies]
bindgen = "0.65"
glob = "0.3"
regex = "=1.9.6" # newer versions require rust 1.65, see https://github.com/aws/s2n-tls/issues/4242
home = "=0.5.5" # newer versions require rust 1.70, see https://github.com/aws/s2n-tls/issues/4395
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/generate/src/000077500000000000000000000000001456575232400240755ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/generate/src/main.rs000066400000000000000000000252461456575232400254000ustar00rootroot00000000000000// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

use std::{
    collections::BTreeSet,
    fs::{self, read_to_string},
    io,
    path::Path,
    sync::{Arc, Mutex},
};

use bindgen::callbacks::ItemKind;

/// This is a placeholder that is replaced with the appropriate "feature token".
/// The placeholder is found in the *.template files of s2n-tls-sys/templates.
/// In Cargo.template this is replaced with the autogenerated list of features,
/// and in features.template this is replaced with the autogenerated list of
/// modules.
const FEATURE_TOKEN_PLACEHOLDER: &str = "";

/// This binary is only expected to run in the context of the generate.sh script
/// which handles certain behaviors such as copying header files to
/// s2n-tls-sys/lib and other sundry actions.
fn main() {
    let out_dir = std::env::args().nth(1).expect("missing sys dir");
    let out_dir = Path::new(&out_dir);

    let functions = FunctionCallbacks::default();

    gen_bindings(
        "#include ",
        &out_dir.join("lib"),
        functions.with_feature(None),
    )
    .allowlist_type("s2n_.*")
    .allowlist_function("s2n_.*")
    .allowlist_var("s2n_.*")
    .generate()
    .unwrap()
    .write_to_file(out_dir.join("src/api.rs"))
    .unwrap();

    write_feature_bindings(
        out_dir.join("lib/tls/s2n_internal.h"),
        "internal",
        out_dir,
        out_dir.join("src/features/internal.rs"),
        functions.clone(),
    );
    write_feature_bindings(
        out_dir.join("lib/tls/s2n_quic_support.h"),
        "quic",
        out_dir,
        out_dir.join("src/features/quic.rs"),
        functions.clone(),
    );

    // get all of the files in the unstable folder
    let unstable_api = out_dir.join("lib/api/unstable");
    let unstable_headers: Vec<(String, fs::DirEntry)> = fs::read_dir(unstable_api)
        .expect("unable to iterate through files in unstable api folder")
        .into_iter()
        .map(|dir_entry| dir_entry.expect("failed to read header"))
        .map(|dir_entry| {
            (
                dir_entry
                    .path()
                    .file_stem()
                    .unwrap()
                    .to_str()
                    .unwrap()
                    .to_owned(),
                dir_entry,
            )
        })
        .collect();

    // write unstable bindings for them
    for (header_name, header) in unstable_headers.iter() {
        let feature_name = format!("unstable-{}", header_name);
        write_feature_bindings(
            header.path(),
            &feature_name,
            out_dir,
            out_dir.join(format!("src/features/{}.rs", header_name)),
            functions.clone(),
        );
    }

    // generate a cargo.toml that defines the correct features
    let features_definition_token = unstable_headers
        .iter()
        .map(|(header_name, _header)| format!("unstable-{header_name} = []"))
        .collect::>()
        .join("\n");
    let cargo_template = out_dir.join("templates/Cargo.template");
    let cargo_template = read_to_string(cargo_template).expect("unable to read cargo template");
    let cargo_toml = cargo_template.replace(FEATURE_TOKEN_PLACEHOLDER, &features_definition_token);
    fs::write(out_dir.join("Cargo.toml"), cargo_toml).unwrap();

    // generate a features.rs that includes the correct modules
    let features_module_token = unstable_headers
        .iter()
        .map(|(header_name, _header)| {
            format!("conditional_module!({header_name}, \"unstable-{header_name}\");")
        })
        .collect::>()
        .join("\n");
    let features_template = out_dir.join("templates/features.template");
    let features_template = read_to_string(features_template).expect("unable to features template");
    let features_rs = features_template.replace(FEATURE_TOKEN_PLACEHOLDER, &features_module_token);
    std::fs::write(out_dir.join("src/features.rs"), features_rs).unwrap();

    functions.tests(&out_dir.join("src/tests.rs")).unwrap();

    gen_files(&out_dir.join("lib"), &out_dir.join("files.rs")).unwrap();
}

const COPYRIGHT: &str = r#"
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
"#;

const PRELUDE: &str = r#"
#![allow(unused_imports, non_camel_case_types)]

use libc::{iovec, FILE, off_t};
"#;

fn base_builder() -> bindgen::Builder {
    bindgen::Builder::default()
        .use_core()
        .layout_tests(true)
        .detect_include_paths(true)
        .size_t_is_usize(true)
        .enable_function_attribute_detection()
        .default_enum_style(bindgen::EnumVariation::ModuleConsts)
        .rust_target(bindgen::RustTarget::Stable_1_47)
        // rust can't access thread-local variables
        // https://github.com/rust-lang/rust/issues/29594
        .blocklist_item("s2n_errno")
        .raw_line(COPYRIGHT)
        .raw_line(PRELUDE)
        .ctypes_prefix("::libc")
}

fn gen_bindings(entry: &str, s2n_dir: &Path, functions: FunctionCallbacks) -> bindgen::Builder {
    base_builder()
        .header_contents("s2n-sys.h", entry)
        // only export s2n-related stuff
        .blocklist_type("iovec")
        .blocklist_type("FILE")
        .blocklist_type("_IO_.*")
        .blocklist_type("__.*")
        .blocklist_type("fpos_t")
        .parse_callbacks(Box::new(functions))
        .clang_arg(format!("-I{}/api", s2n_dir.display()))
        .clang_arg(format!("-I{}", s2n_dir.display()))
}

fn write_feature_bindings(
    header_path: impl AsRef,
    feature_flag: &str,
    s2n_tls_sys_dir: &Path,
    output_path: impl AsRef,
    functions: FunctionCallbacks,
) {
    let header_path_str = format!("{}", header_path.as_ref().display());
    let lib_path = s2n_tls_sys_dir.join("lib");
    base_builder()
        .header(&header_path_str)
        .parse_callbacks(Box::new(
            functions.with_feature(Some(feature_flag.to_owned())),
        ))
        // manually include header contents
        .clang_arg(format!("-I{}/api", lib_path.display()))
        .clang_arg(format!("-I{}", lib_path.display()))
        .allowlist_recursively(false)
        .allowlist_file(&header_path_str)
        // s2n_internal.h defines opaque handles to these structs, but we want
        // them to be imported from the main api module
        .blocklist_type("s2n_connection")
        .blocklist_type("s2n_config")
        .raw_line("use crate::api::*;\n")
        .generate()
        .unwrap()
        .write_to_file(output_path)
        .unwrap();
}

fn gen_files(input: &Path, out: &Path) -> io::Result<()> {
    use io::Write;

    let mut files = std::fs::File::create(out)?;
    let mut o = io::BufWriter::new(&mut files);

    let pattern = format!("{}/**/*.c", input.display());

    writeln!(o, "{}", COPYRIGHT)?;
    writeln!(o, "[")?;
    for file in glob::glob(&pattern).unwrap() {
        let file = file.unwrap();
        let file = file.strip_prefix(input).unwrap();
        // don't include tests
        if file.starts_with("tests") {
            continue;
        }
        writeln!(o, "    {:?},", Path::new("lib").join(file).display())?;
    }
    writeln!(o, "]")?;
    Ok(())
}

type SharedBTreeSet = Arc>>;

#[derive(Clone, Debug, Default)]
struct FunctionCallbacks {
    /// the current feature that is having bindings generated
    feature: Arc>>,
    /// a list of all functions that have had bindings generated for them
    functions: SharedBTreeSet<(Option, String)>,
}

impl FunctionCallbacks {
    fn with_feature(&self, feature: Option) -> Self {
        *self.feature.lock().unwrap() = feature;
        self.clone()
    }

    fn tests(&self, out: &Path) -> io::Result<()> {
        use io::Write;
        let functions = self.functions.lock().unwrap();
        let mut tests = std::fs::File::create(out)?;
        let mut o = io::BufWriter::new(&mut tests);

        writeln!(o, "{}", COPYRIGHT)?;
        let iter = functions.iter();
        for (feature, function) in iter {
            // don't generate a test if it's enabled without a feature
            if feature.is_some() && functions.contains(&(None, function.to_string())) {
                continue;
            }

            writeln!(o, "#[test]")?;

            // if the function is behind a feature, gate it with `cfg`
            if let Some(feature) = feature {
                writeln!(o, "#[cfg(feature = {:?})]", feature)?;
            };

            writeln!(o, "fn {} () {{", function)?;
            writeln!(o, "    let ptr = crate::{} as *const ();", function)?;
            writeln!(o, "    assert!(!ptr.is_null());")?;
            writeln!(o, "}}")?;
            writeln!(o)?;
        }

        Ok(())
    }
}

impl bindgen::callbacks::ParseCallbacks for FunctionCallbacks {
    fn enum_variant_name(
        &self,
        _name: Option<&str>,
        variant_name: &str,
        _variant_value: bindgen::callbacks::EnumVariantValue,
    ) -> Option {
        if !variant_name.starts_with("S2N_") {
            return None;
        }

        let variant_name = variant_name
            .trim_start_matches("S2N_ERR_T_")
            .trim_start_matches("S2N_EXTENSION_")
            // keep the LEN_ so it's a valid identifier
            .trim_start_matches("S2N_TLS_MAX_FRAG_")
            .trim_start_matches("S2N_ALERT_")
            .trim_start_matches("S2N_CT_SUPPORT_")
            .trim_start_matches("S2N_STATUS_REQUEST_")
            .trim_start_matches("S2N_CERT_AUTH_")
            .trim_start_matches("S2N_CLIENT_HELLO_CB_")
            .trim_start_matches("S2N_TLS_SIGNATURE_")
            .trim_start_matches("S2N_TLS_HASH_")
            .trim_start_matches("S2N_PSK_HMAC_")
            .trim_start_matches("S2N_PSK_MODE_")
            .trim_start_matches("S2N_ASYNC_PKEY_VALIDATION_")
            .trim_start_matches("S2N_ASYNC_")
            .trim_start_matches("S2N_EARLY_DATA_STATUS_")
            // match everything else
            .trim_start_matches("S2N_");

        Some(variant_name.to_owned())
    }

    /// This doesn't actually rename anything, and is just used to get a list of
    /// all the functions that we generate bindings for, which is used for test
    /// generation later in the process.
    fn generated_name_override(
        &self,
        item_info: bindgen::callbacks::ItemInfo<'_>,
    ) -> Option {
        if !item_info.name.starts_with("s2n_") {
            return None;
        }

        if let ItemKind::Function = item_info.kind {
            let feature = self.feature.lock().unwrap().clone();
            self.functions
                .lock()
                .unwrap()
                .insert((feature, item_info.name.to_owned()));
        }
        None
    }
}
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/integration/000077500000000000000000000000001456575232400240375ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/integration/Cargo.toml000066400000000000000000000010361456575232400257670ustar00rootroot00000000000000[package]
name = "integration"
version = "0.1.0"
authors = ["AWS s2n"]
edition = "2021"
publish = false

[dependencies]
s2n-tls = { path = "../s2n-tls", features = ["testing"] }
s2n-tls-sys = { path = "../s2n-tls-sys" }
criterion = { version = "0.3", features = ["html_reports"] }
anyhow = "1"

[[bench]]
name = "handshake"
harness = false

[[bench]]
name = "s2nc"
harness = false

[[bench]]
name = "s2nd"
harness = false

[dev-dependencies]
regex = "=1.9.6" # newer versions require rust 1.65, see https://github.com/aws/s2n-tls/issues/4242
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/integration/benches/000077500000000000000000000000001456575232400254465ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/integration/benches/handshake.rs000066400000000000000000000015061456575232400277440ustar00rootroot00000000000000// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

use criterion::{criterion_group, criterion_main, Criterion};
use s2n_tls::{
    security,
    testing::{build_config, establish_connection},
};

pub fn handshake(c: &mut Criterion) {
    let mut group = c.benchmark_group("s2n-tls_client_server");

    for policy in security::ALL_POLICIES {
        let config = build_config(policy).unwrap();
        group.bench_function(format!("handshake_{:?}", policy), move |b| {
            // This does include connection initialization overhead.
            // TODO: create a separate benchmark that excludes this step.
            b.iter(|| establish_connection(config.clone()));
        });
    }

    group.finish();
}

criterion_group!(benches, handshake);
criterion_main!(benches);
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/integration/benches/s2nc.rs000066400000000000000000000021441456575232400266620ustar00rootroot00000000000000// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

use criterion::{criterion_group, criterion_main, Criterion};
use std::{env, process::Command, time::Duration};

pub fn s2nc(c: &mut Criterion) {
    let mut group = c.benchmark_group("s2nc");
    let s2nc_env: &str = &env::var("S2NC_ARGS").unwrap();
    let s2nc_test_name: &str = &env::var("S2NC_TEST_NAME").unwrap();
    let test_name = format!("s2nc_{}", s2nc_test_name);
    let s2nc_split = s2nc_env.split(' ').collect::>();
    group.bench_function(test_name, move |b| {
        b.iter(|| {
            let s2nc_argvec = s2nc_split.clone();
            let status = Command::new("/usr/local/bin/s2nc")
                .args(s2nc_argvec)
                .status()
                .expect("failed to execute process");
            assert!(status.success());
        });
    });

    group.finish();
}

criterion_group!(name = benches;
                 config = Criterion::default().sample_size(10).measurement_time(Duration::from_secs(1));
                 targets = s2nc);
criterion_main!(benches);
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/integration/benches/s2nd.rs000066400000000000000000000021441456575232400266630ustar00rootroot00000000000000// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

use criterion::{criterion_group, criterion_main, Criterion};
use std::{env, process::Command, time::Duration};

pub fn s2nd(c: &mut Criterion) {
    let mut group = c.benchmark_group("s2nd");
    let s2nd_env: &str = &env::var("S2ND_ARGS").unwrap();
    let s2nd_test_name: &str = &env::var("S2ND_TEST_NAME").unwrap();
    let test_name = format!("s2nd_{}", s2nd_test_name);
    let s2nd_split = s2nd_env.split(' ').collect::>();
    group.bench_function(test_name, move |b| {
        b.iter(|| {
            let s2nd_argvec = s2nd_split.clone();
            let status = Command::new("/usr/local/bin/s2nd")
                .args(s2nd_argvec)
                .status()
                .expect("failed to execute process");
            assert!(status.success());
        });
    });

    group.finish();
}

criterion_group!(name = benches;
                 config = Criterion::default().sample_size(10).measurement_time(Duration::from_secs(1));
                 targets = s2nd);
criterion_main!(benches);
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/integration/build.rs000066400000000000000000000010111456575232400254750ustar00rootroot00000000000000// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

use std::path::PathBuf;

fn main() {
    // ensure the build script exports the include directory
    let include_dir = std::env::var("DEP_S2N_TLS_INCLUDE").expect("missing DEP_S2N_TLS_INCLUDE");
    let include_dir = PathBuf::from(include_dir);

    // make sure that `s2n.h` is available
    let api = std::fs::read_to_string(include_dir.join("s2n.h")).unwrap();
    assert!(api.contains("s2n_negotiate"));
}
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/integration/src/000077500000000000000000000000001456575232400246265ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/integration/src/main.rs000066400000000000000000000005001456575232400261130ustar00rootroot00000000000000// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

use s2n_tls_sys::*;

fn main() {
    unsafe {
        s2n_init();
        let conn = s2n_connection_new(s2n_mode::SERVER);

        if !conn.is_null() {
            s2n_connection_free(conn);
        }
    }
}
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/rust-toolchain000066400000000000000000000000071456575232400244070ustar00rootroot000000000000001.63.0
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls-sys/000077500000000000000000000000001456575232400236325ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls-sys/README.md000066400000000000000000000007441456575232400251160ustar00rootroot00000000000000This crates provides low level rust bindings for [s2n-tls](https://github.com/aws/s2n-tls) which are autogenerated with [bindgen](https://github.com/rust-lang/rust-bindgen)

This crate is not intended for direct consumption by end consumers. Interested developers should instead look at the [s2n-tls](https://crates.io/crates/s2n-tls) or [s2n-tls-tokio](https://crates.io/crates/s2n-tls-tokio) crates. These  provide higher-level, more ergonomic bindings than the `s2n-tls-sys` crate.aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls-sys/build.rs000066400000000000000000000167511456575232400253110ustar00rootroot00000000000000// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

use std::path::{Path, PathBuf};

fn main() {
    let external = External::default();
    if external.is_enabled() {
        external.link();
    } else {
        build_vendored();
    }
}

fn env>(name: N) -> String {
    option_env(name).expect("missing env var")
}

fn option_env>(name: N) -> Option {
    let name = name.as_ref();
    eprintln!("cargo:rerun-if-env-changed={}", name);
    std::env::var(name).ok()
}

struct FeatureDetector<'a> {
    builder: cc::Build,
    out_dir: &'a Path,
}

impl<'a> FeatureDetector<'a> {
    pub fn new(out_dir: &'a Path, libcrypto: &Libcrypto) -> Self {
        let builder = builder(libcrypto);
        Self { builder, out_dir }
    }

    pub fn supports(&self, name: &str) -> bool {
        let mut build = self.builder.get_compiler().to_command();

        let global_flags = std::path::Path::new("lib/tests/features/GLOBAL.flags");
        assert!(
            global_flags.exists(),
            "missing flags file: {:?}",
            global_flags.display()
        );

        let global_flags = std::fs::read_to_string(global_flags).unwrap();
        for flag in global_flags.trim().split(' ').filter(|f| !f.is_empty()) {
            build.arg(flag);
        }

        let base = std::path::Path::new("lib/tests/features").join(name);

        let file = base.with_extension("c");
        assert!(file.exists(), "missing feature file: {:?}", file.display());

        let probe_flags = base.with_extension("flags");
        assert!(
            probe_flags.exists(),
            "missing flags file: {:?}",
            probe_flags.display()
        );

        let probe_flags = std::fs::read_to_string(probe_flags).unwrap();
        for flag in probe_flags.trim().split(' ').filter(|f| !f.is_empty()) {
            build.arg(flag);
        }

        build
            // just compile the file and don't link
            .arg("-c")
            .arg("-o")
            .arg(self.out_dir.join(name).with_extension("o"))
            .arg(&file);

        eprintln!("=== Testing feature {name} ===");
        build.status().unwrap().success()
    }
}

fn build_vendored() {
    let libcrypto = Libcrypto::default();

    let mut build = builder(&libcrypto);

    build.files(include!("./files.rs"));

    if env("PROFILE") == "release" {
        // fortify source is only available in release mode
        build.define("_FORTIFY_SOURCE", "2");
        build.define("NDEBUG", "1");

        // build s2n-tls with LTO if supported
        if build.get_compiler().is_like_gnu() {
            build
                .flag_if_supported("-flto")
                .flag_if_supported("-ffat-lto-objects");
        }
    }

    let out_dir = PathBuf::from(env("OUT_DIR"));

    let features = FeatureDetector::new(&out_dir, &libcrypto);

    let mut feature_names = std::fs::read_dir("lib/tests/features")
        .expect("missing features directory")
        .flatten()
        .filter(|file| {
            let file = file.path();
            file.extension().map_or(false, |ext| ext == "c")
        })
        .map(|file| {
            file.path()
                .file_stem()
                .unwrap()
                .to_str()
                .unwrap()
                .to_string()
        })
        .collect::>();

    feature_names.sort();

    for name in &feature_names {
        let is_supported = features.supports(name);
        eprintln!("{name}: {is_supported}");
        if is_supported {
            build.define(name, "1");

            // stacktraces are only available if execinfo is
            if name == "S2N_EXECINFO_AVAILABLE" && option_env("CARGO_FEATURE_STACKTRACE").is_some()
            {
                build.define("S2N_STACKTRACE", "1");
            }
        }
    }

    // don't spit out a bunch of warnings to the end user, since they won't really be able
    // to do anything with it
    build.warnings(false);

    build.compile("s2n-tls");

    // tell rust we're linking with libcrypto
    println!("cargo:rustc-link-lib={}", libcrypto.link);

    // let consumers know where to find our header files
    let include_dir = out_dir.join("include");
    std::fs::create_dir_all(&include_dir).unwrap();
    std::fs::copy("lib/api/s2n.h", include_dir.join("s2n.h")).unwrap();
    println!("cargo:include={}", include_dir.display());
}

fn builder(libcrypto: &Libcrypto) -> cc::Build {
    let mut build = cc::Build::new();

    if let Ok(cflags) = std::env::var("CFLAGS") {
        // cc will read the CFLAGS env variable and prepend the compiler
        // command with all flags and includes from it, which may conflict
        // with the libcrypto includes we specify. To ensure the libcrypto
        // includes show up first in the compiler command, we prepend our
        // includes to CFLAGS.
        std::env::set_var("CFLAGS", format!("-I {} {}", libcrypto.include, cflags));
    } else {
        build.include(&libcrypto.include);
    };

    build
        .include("lib")
        .include("lib/api")
        .flag("-std=c11")
        .flag("-fgnu89-inline")
        // make sure the stack is non-executable
        .flag_if_supported("-z relro")
        .flag_if_supported("-z now")
        .flag_if_supported("-z noexecstack")
        // we use some deprecated libcrypto features so don't warn here
        .flag_if_supported("-Wno-deprecated-declarations")
        .define("_POSIX_C_SOURCE", "200112L");

    build
}

#[derive(PartialEq, Eq, PartialOrd, Ord)]
struct Libcrypto {
    version: String,
    link: String,
    include: String,
    root: String,
}

impl Default for Libcrypto {
    fn default() -> Self {
        for (name, value) in std::env::vars() {
            if let Some(version) = name.strip_prefix("DEP_AWS_LC_") {
                if let Some(version) = version.strip_suffix("_INCLUDE") {
                    let version = version.to_string();

                    eprintln!("cargo:rerun-if-env-changed={}", name);

                    let link = format!("aws_lc_{version}_crypto");
                    let include = value;
                    let root = env(format!("DEP_AWS_LC_{version}_ROOT"));

                    return Self {
                        version,
                        link,
                        include,
                        root,
                    };
                }
            }
        }

        panic!("missing DEP_AWS_LC paths");
    }
}

struct External {
    lib_dir: Option,
    include_dir: Option,
}

impl Default for External {
    fn default() -> Self {
        let dir = option_env("S2N_TLS_DIR").map(PathBuf::from);

        let lib_dir = option_env("S2N_TLS_LIB_DIR")
            .map(PathBuf::from)
            .or_else(|| dir.as_ref().map(|d| d.join("lib")));

        let include_dir = option_env("S2N_TLS_INCLUDE_DIR")
            .map(PathBuf::from)
            .or_else(|| dir.as_ref().map(|d| d.join("include")));

        Self {
            lib_dir,
            include_dir,
        }
    }
}

impl External {
    fn is_enabled(&self) -> bool {
        self.lib_dir.is_some()
    }

    fn link(&self) {
        println!(
            "cargo:rustc-link-search={}",
            self.lib_dir.as_ref().unwrap().display()
        );
        println!("cargo:rustc-link-lib=s2n");

        // tell rust we're linking with libcrypto
        println!("cargo:rustc-link-lib=crypto");

        if let Some(include_dir) = self.include_dir.as_ref() {
            println!("cargo:include={}", include_dir.display());
        }
    }
}
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls-sys/src/000077500000000000000000000000001456575232400244215ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls-sys/src/lib.rs000066400000000000000000000013031456575232400255320ustar00rootroot00000000000000// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

#[rustfmt::skip]
mod api;

pub use api::*;

mod features;
pub use features::*;

// Additional defines that don't get imported with bindgen

pub mod s2n_status_code {
    pub type Type = libc::c_int;
    pub const SUCCESS: Type = 0;
    pub const FAILURE: Type = -1;
}

pub mod s2n_tls_version {
    pub type Type = libc::c_int;
    pub const SSLV2: Type = 20;
    pub const SSLV3: Type = 30;
    pub const TLS10: Type = 31;
    pub const TLS11: Type = 32;
    pub const TLS12: Type = 33;
    pub const TLS13: Type = 34;
    pub const UNKNOWN: Type = 0;
}

#[cfg(test)]
#[rustfmt::skip]
mod tests;
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls-sys/templates/000077500000000000000000000000001456575232400256305ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls-sys/templates/Cargo.template000066400000000000000000000022421456575232400304200ustar00rootroot00000000000000[package]
name = "s2n-tls-sys"
description = "A C99 implementation of the TLS/SSL protocols"
version = "0.1.3"
authors = ["AWS s2n"]
edition = "2021"
rust-version = "1.63.0"
links = "s2n-tls"
repository = "https://github.com/aws/s2n-tls"
license = "Apache-2.0"
include = [
  "build.rs",
  "Cargo.toml",
  "files.rs",
  "lib/**/*.c",
  "lib/**/*.h",
  "lib/**/*.S",
  "lib/CMakeLists.txt",
  "lib/**/*.cmake",
  "lib/**/*.flags", # for feature probes
  "src/**/*.rs",
  "tests/**/*.rs",
]

[features]
default = []
# preserve the cmake feature in case any consumers had it enabled before
cmake = []
quic = []
pq = []
internal = []
stacktrace = []

# e.g. something like
# unstable-foo = []

[dependencies]
aws-lc-sys = { version = "0.13" }
libc = "0.2"

[build-dependencies]
cc = { version = "1.0", features = ["parallel"] }

[dev-dependencies]
jobserver = "=0.1.26" # newer versions require rust 1.66, see https://github.com/aws/s2n-tls/issues/4241
home = "=0.5.5" # newer versions require rust 1.70, see https://github.com/aws/s2n-tls/issues/4395
regex = "=1.9.6" # newer versions require rust 1.65, see https://github.com/aws/s2n-tls/issues/4242
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls-sys/templates/features.template000066400000000000000000000016731456575232400312120ustar00rootroot00000000000000// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

/// conditionally declare the module only if `feature` is enabled. If the
/// feature is enabled, import all symbols into the main namespace.
/// Disable rustfmt because it wants the `mod` and `pub use` statement to be on
/// different levels of indentation
#[rustfmt::skip]
macro_rules! conditional_module {
    ($mod_name:ident, $feature_name:literal) => {
        // bindgen will automatically rustfmt everything, but we use nightly rustfmt as
        // the authoritiative rustfmt so that doesn't work for us
        #[cfg(feature = $feature_name)]
        #[rustfmt::skip]
        mod $mod_name;

        #[cfg(feature = $feature_name)]
        pub use $mod_name::*;
    };
}

conditional_module!(quic, "quic");
conditional_module!(internal, "internal");

// conditional_module!(foo, "unstable-foo");
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls-sys/tests/000077500000000000000000000000001456575232400247745ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls-sys/tests/s2n_init.rs000066400000000000000000000010751456575232400270720ustar00rootroot00000000000000// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

use s2n_tls_sys::*;

#[test]
fn s2n_init_test() {
    unsafe {
        // don't force the tests to use mlock
        std::env::set_var("S2N_DONT_MLOCK", "1");

        // try to initialize the library
        if s2n_init() != 0 {
            let error = *s2n_errno_location();
            let msg = s2n_strerror_name(error);
            let msg = std::ffi::CStr::from_ptr(msg);
            panic!("s2n did not initialize correctly: {:?}", msg);
        }
    }
}
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls-tokio/000077500000000000000000000000001456575232400241415ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls-tokio/Cargo.toml000066400000000000000000000020331456575232400260670ustar00rootroot00000000000000[package]
name = "s2n-tls-tokio"
description = "An implementation of TLS streams for Tokio built on top of s2n-tls"
version = "0.1.3"
authors = ["AWS s2n"]
edition = "2021"
rust-version = "1.63.0"
repository = "https://github.com/aws/s2n-tls"
license = "Apache-2.0"

[features]
default = []

[dependencies]
errno = { version = "0.3" }
libc = { version = "0.2" }
pin-project-lite = { version = "0.2" }
s2n-tls = { version = "=0.1.3", path = "../s2n-tls" }
tokio = { version = "1", features = ["net", "time"] }

[dev-dependencies]
clap = { version = "3", features = ["derive"] }
rand = { version = "0.8" }
tokio = { version = "1", features = [ "io-std", "io-util", "macros", "net", "rt-multi-thread", "test-util", "time"] }
# newer versions require rust 1.66, see https://github.com/aws/s2n-tls/issues/4241
# this version pin is only needed to prevent verification failures when using
# cargo package / cargo publish, as those commands do not respect the version pin
# in downstream dev-dependencies (in s2n-tls-sys, in this case)
jobserver = "=0.1.26"
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls-tokio/README.md000066400000000000000000000003761456575232400254260ustar00rootroot00000000000000`s2n-tls-tokio` provides async bindings that allow consumers to use [s2n-tls](https://github.com/aws/s2n-tls) within the tokio runtime. To consume `s2n-tls` outside of an async context consider using the [s2n-tls](https://crates.io/crates/s2n-tls) crate.aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls-tokio/examples/000077500000000000000000000000001456575232400257575ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls-tokio/examples/certs/000077500000000000000000000000001456575232400270775ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls-tokio/examples/certs/cert.pem000066400000000000000000000014561456575232400305450ustar00rootroot00000000000000-----BEGIN CERTIFICATE-----
MIICLDCCAdOgAwIBAgIUTihKtj/RM4hq5r9fmryUGwDlYC4wCgYIKoZIzj0EAwIw
azELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkFaMQ4wDAYDVQQHDAVUZW1wZTEPMA0G
A1UECgwGQW1hem9uMRowGAYDVQQLDBFBbWF6b25XZWJTZXJ2aWNlczESMBAGA1UE
AwwJbG9jYWxob3N0MCAXDTIzMDUyNDA2MjYyN1oYDzIxMjMwNDMwMDYyNjI3WjBr
MQswCQYDVQQGEwJVUzELMAkGA1UECAwCQVoxDjAMBgNVBAcMBVRlbXBlMQ8wDQYD
VQQKDAZBbWF6b24xGjAYBgNVBAsMEUFtYXpvbldlYlNlcnZpY2VzMRIwEAYDVQQD
DAlsb2NhbGhvc3QwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAS4be8OVofY925i
AVfv2brqeJF55DeUSP3Q1cUI3QhPxw8ZFBTg25rnYsbpGQBsW1iSmE6v1YHGlmf5
mAbhoAKJo1MwUTAdBgNVHQ4EFgQUCpoWu/Qw0+c9wyejGR0n16FNofwwHwYDVR0j
BBgwFoAUCpoWu/Qw0+c9wyejGR0n16FNofwwDwYDVR0TAQH/BAUwAwEB/zAKBggq
hkjOPQQDAgNHADBEAiBdYxkSxWkqaY6fv4QhNVPX8pNysB02lHBoLR2yVFf7MwIg
f+86G4gdpIi8tGO7Q217BAIpsWwCMrYw7O41ltGHOog=
-----END CERTIFICATE-----
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls-tokio/examples/certs/cert_rsa.pem000066400000000000000000000037511456575232400314120ustar00rootroot00000000000000-----BEGIN CERTIFICATE-----
MIIFqzCCA5OgAwIBAgIUPecTUedBPhAjrzsM16hwH5f/8HswDQYJKoZIhvcNAQEL
BQAwZDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkFaMQ4wDAYDVQQHDAVUZW1wZTEN
MAsGA1UECgwETWFhczEVMBMGA1UECwwMQmlvQ29tcHV0aW5nMRIwEAYDVQQDDAls
b2NhbGhvc3QwIBcNMjMwNTI0MDYyNjI3WhgPMjEyMzA0MzAwNjI2MjdaMGQxCzAJ
BgNVBAYTAlVTMQswCQYDVQQIDAJBWjEOMAwGA1UEBwwFVGVtcGUxDTALBgNVBAoM
BE1hYXMxFTATBgNVBAsMDEJpb0NvbXB1dGluZzESMBAGA1UEAwwJbG9jYWxob3N0
MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAnflx61sSH4pF9AmdOR3r
ilFM9Z1RIZ4aZXk5Dov9UP4KykCIFC12I7wryyKHH5PoKjKKtYW+4G/lAOw/O7kP
6oDfbcQbH3E5LCT7k2a+tOwxywxmsQFpK/YCicKxkFvbKqfbb+S5U9CZ9Rmx3jGh
FldIVXYrkdq2AfFu7U0BB5rdDQLBx1GAZjtmDjP9dpPe0GuukInHqaogO9d08x2O
3WN3grxdj2NN0tjaR6/S+a1L4vwcybJ5UZ8Zq8Bd41YR6uPNJSjABcXHXAlxR83t
ENNRKl8m8uP4SqFgpmFBHHmXviyVirZRpVudWG2Zkk6WSTj+cwDlcXYucVO6cmkS
/FQsslXn2YAYDiSsuthl4UGTXyWbxskAwx65ERHZ2CwOix6TIKxB0lOGVIuoT7Hj
q6gqFRxrjAQwUk76z+j/NFwZhAe0b5G0uyHcgPuaJMI9BpzvwpqgMNSMlbeH8rd3
LsLOeCFrVcQzrYqS/aLB/FRE7QxkZutuRgAG5N3sBhxJNizROBzCd+zUEpJi2ZgH
eFstYAL4sXEh4m/7uXxMROO9rFqpZXiYocYipfddjjZ6NF3RIIfQG/Ubydm2YJe5
s3o3Q2nGuBY6l0KXJvq1uX3B5JmFcVr0eiWbZ4UZioJbZ40JEUx7ECNZqooC7QjT
EeQZXwEfvUotP5lvAFEFK5MCAwEAAaNTMFEwHQYDVR0OBBYEFFRIP81Ul8Um6h8u
ru05X1QGuPceMB8GA1UdIwQYMBaAFFRIP81Ul8Um6h8uru05X1QGuPceMA8GA1Ud
EwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggIBAIBDHQC3LfuH1YVieC0100IV
4tpSiQ63n5TItLRPxpPMjohAC1ZggQjU1/rVeBkP/x2dBQFgPpRXBFye9iavUVBt
xXj06Rm8kdLwF3KZHECDQArztdYqogmvXtxx5WA23nhua5NwUV6jM5i7EV7d/Bcp
+wGZKaze6fBR4Wh9FkIh9HG2tIOgJibLI33JAFjxkdY6bYoqDSUzlKZmUCMpxrXF
Zg/vYhBnBss2uvF9Ce+6Nedxgourjyma6MiH7QPwMM3Cr1rWTnG5C7MBhm4Q13Xr
LeaOayTE2JChXwyXqBjrctPs5GCbAJJFovQphyJhxNTXzHxMRu4MBCl3jFT2lzIi
r6DkgoQ3tuy5C3i4ZWNZUjqFrCu1uIeGYb9x+Wzv9/i370juF+aO5zJC3mp8bKsU
zdFQE8/q9ltuiazueCGPkFoVEVmLFICIckV6q+zh/ahXcdbSf8JxDGXElXhDK6Z3
2axo2m5f/x77FREZCmyVA9I8fYDo3A7srGgkwMc+isdrbzytSC2KKo4/XnpvIRWo
vvKKslTzyaMoSv3i9qj23kWowRKqrguCs0wG1d7QiCRdEAsFO+MZPzsde5zKqcQ1
AC8kSUZrFRjFfZfc0JAB7pI6BSlVwzVItdIdlA9jzEZr2c7430djTyidyTKiWj3D
FkfWZeLWo9l/1vC+Qyeo
-----END CERTIFICATE-----
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls-tokio/examples/certs/generate.sh000077500000000000000000000012521456575232400312300ustar00rootroot00000000000000# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0

# this script will generate all of the certs used for the tokio examples and
# tests.

# used for TLS 1.2 connections
echo "generating rsa key and cert"
openssl req -x509 -newkey rsa:4096 -keyout key_rsa.pem -out cert_rsa.pem -sha256 -days 36500 -nodes -subj "/C=US/ST=AZ/L=Tempe/O=Maas/OU=BioComputing/CN=localhost"


# used for TLS 1.3 connections
echo "generating ec key and cert"
openssl req -x509 -newkey ec -pkeyopt ec_paramgen_curve:P-256 -keyout key.pem -out cert.pem -sha256 -days 36500 -nodes -subj "/C=US/ST=AZ/L=Tempe/O=Amazon/OU=AmazonWebServices/CN=localhost"
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls-tokio/examples/certs/key.pem000066400000000000000000000003611456575232400303720ustar00rootroot00000000000000-----BEGIN PRIVATE KEY-----
MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQg7CYotHO5H76smTyL
DHb6esNVfzBEDAi+vZMvtM2SItahRANCAAS4be8OVofY925iAVfv2brqeJF55DeU
SP3Q1cUI3QhPxw8ZFBTg25rnYsbpGQBsW1iSmE6v1YHGlmf5mAbhoAKJ
-----END PRIVATE KEY-----
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls-tokio/examples/certs/key_rsa.pem000066400000000000000000000063101456575232400312370ustar00rootroot00000000000000-----BEGIN PRIVATE KEY-----
MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQCd+XHrWxIfikX0
CZ05HeuKUUz1nVEhnhpleTkOi/1Q/grKQIgULXYjvCvLIocfk+gqMoq1hb7gb+UA
7D87uQ/qgN9txBsfcTksJPuTZr607DHLDGaxAWkr9gKJwrGQW9sqp9tv5LlT0Jn1
GbHeMaEWV0hVdiuR2rYB8W7tTQEHmt0NAsHHUYBmO2YOM/12k97Qa66QicepqiA7
13TzHY7dY3eCvF2PY03S2NpHr9L5rUvi/BzJsnlRnxmrwF3jVhHq480lKMAFxcdc
CXFHze0Q01EqXyby4/hKoWCmYUEceZe+LJWKtlGlW51YbZmSTpZJOP5zAOVxdi5x
U7pyaRL8VCyyVefZgBgOJKy62GXhQZNfJZvGyQDDHrkREdnYLA6LHpMgrEHSU4ZU
i6hPseOrqCoVHGuMBDBSTvrP6P80XBmEB7RvkbS7IdyA+5okwj0GnO/CmqAw1IyV
t4fyt3cuws54IWtVxDOtipL9osH8VETtDGRm625GAAbk3ewGHEk2LNE4HMJ37NQS
kmLZmAd4Wy1gAvixcSHib/u5fExE472sWqlleJihxiKl912ONno0XdEgh9Ab9RvJ
2bZgl7mzejdDaca4FjqXQpcm+rW5fcHkmYVxWvR6JZtnhRmKgltnjQkRTHsQI1mq
igLtCNMR5BlfAR+9Si0/mW8AUQUrkwIDAQABAoICABNeh+C/IFg8qHoVCbF8oSoq
8MA2w0FXdM4XUG8fq/YXx/PneKfcsR0Nj+MOvzmYdYFjBA3gsbgsEAyOauxkMvbi
SULYpiizRbrLaAs5ME3Xnf1S7m5qiV4qAkKo3AOit5shzK6KAU0tCQt71Iq1VBZ8
qjJMVGhEREe1tg+jfupTSyIywdayAAfI5aj9J+k3lTzBzzwKoLAFW4Fq1/yNFbzA
WuFtPh18JAkjrJPYk4oofOL+XXs/hk+DLe8YnYpnvbeqqbcSKYUrrFd1Eu4mahhD
iLKkyw5B9GoWIlcUGpsmr3vSVJ2W4CjhtEeo3IkLWLLuqYgpRnikcGpVxUkM6BZV
3Oyliy9bI2HUXrKsMiJI5vTp7MPb/BqSGLxSc209tQ2YEYS912dwLWloNPbsC4ka
ubIN9FA+ihHc+kcNC4C+p4Q0+6zKm7TINwIPYEXKLkCaDvnJNa3EPk+HBDcUe+y0
sMFb7nDaFtGkqrz/yBuiAxJ3LHumDZrLiI+RF19ITc6z83ZkJL44iCMlmusiD6/v
pTih2QZcwl3nTRZ5U4QiKG926Kpr7YPnEa4Jb/EIZqM3atWI3TdM9wt1oR0Xwah2
22LA6uB20+5ayyr3bbqpBGGtRR0nzx1MfmNSjS9CXY1A4oVoNIfSyFzRSDzR75Nc
QYB1mkaySe26fHY2IvE9AoIBAQDKguV92UmGa2YLZW9KKzlYiB8v8fMylHOWZWPo
VsNO5ioO32ZdQTnI1mb8u4Gu40FmsiWmYcymF0sBmgj5WKW8H/QWShSSxsRy37xK
B7MLXaDNxX/GdrN3vzP8/v0jNSouyhdraR5CQFaPydcEC4go7VdtapqYKwO8K6QP
+nAxDFDNLmoUgCJ5HYdJjPiyYiEk/oBJPFuJJEzhy8/1bL0FRa7Xf3t4hMvKQmn5
lB9AUWJQe3sdSJIeTBEeBKI5IRs6pLTpRhXISzl9U6ne3DMmNjdUSdnIAlD1IRVz
GLcv2WAJA9nyFuyI2vXpEREntsBAUDqH8/4r0X051cihGqFPAoIBAQDHsx5W4CGz
yPK4NFJsrBbTMnlC2eRikwh3KUZxqfV2hArxj2o8hQoJF1KL3uTzGwMjENkPSEOF
l6tvt5dIAkDB5V5Kl2050f7HNOT8beGSTC+XhYJVQR0WlVmH0wL4JoSRCiBuJFgl
VQLLLULk1IPfLXO/WIT+HMtdQJuxOZ87YBl8bk6L7TIo6dzVumilthkwMEx5Bpt1
4a/RSB2ytgEZ+nq0rw6B3x2A+oe/WDmKJz1hzlwqtWjcErDbyh/URhsPMy8v7YfS
yfFI5xs4V0y/6cPeiwzEt2j/ri0l6iXuHIo3vfiVmjfbCNt5xdfLwLMyPvb50w78
cOGIrJTOixh9AoIBAHTi9pkcCJ8ocD7VrlWt+yyJbQ7cZRXlr424OeFJ6vMoUFBD
S685zaOJdOodRC3wh7hBzQ9q1ZdsTGPBR7q8GvUpsFYJLjTFu2/eiaxlI0fFI+KB
Q04HB3FvkA9QEkX3EsDs7nWVt9xfjFb0FkIEcfPFgjfqVw7LjR1YRxrd31HAHQdA
6gOpLbygeYzPbGBx2VWj++HJj7/pjuKpJAAxYiyf2toRf+/BHmtAyDBA+YTDNK7l
htsRHx2TpDGiVQwKes/vb2hRhpMUMDuRRbQXfwJoOqlQ8v3JsHa8QMgtFNbubcrm
SS69Yotfbp62tiMpDXBRnmcIQsWB5fiy11l9qGsCggEBALo2jQjncKXLsIER0PYi
thnzy90JfwxoHJG4/EYK/Kep0uaTG7YZPFVj2QYyLv1957vv/udgNA5Dcon3AOsV
prrnHbiiYSh0Z4XsDcZQ0/Fud0ZsKS9ES1J+q4Caw/8PM7hnX/w0ibkt/y/kIadt
fxZBtNftO/4UbZGkKEnm+FMOdlVwKnASGx7Ji7IEY4Z6OhyUPQrxnU8bxq14+LBn
RLmrtnX7cqSpn0Jg2uoS43yuwG8OVSVCCeiEyUP5wxpW0O6ohMzFSkzFH+giEGDG
Dvw5h/phYOOiPxeDm/fp0bqqBDycOUuPBZyKMtXnJSWKp84npncoVqYQwE2I2C2M
yKUCggEAPwjoA4mtGgLNEcDYTR8HyBwiZfpuA1F0nupj2DpyhqSeXDN03jpkjn7Z
uKrigxbjVp+2gYMjkPVJPFu7xrIkDSaL+CCMjz/9yF2Zc58qlaZ8yQqIKu278rrC
G3WO7Fiz5hYovNDPZonHKVJ6vmYdRoxEoUgGRUNaWHBICTEXlEhP1Pn88XTTsIs7
fKFZXqWJmdv1jyyoP9QnABamBcpVUXDIJZj5apOV8tvhmgw4xoZDXYYYBaQNfCeo
5BIzcBiGf0cb0SLDYFKNfL52jQw4tMgU5TWD3X80eX3jx9f0MKKr7W72ZKtqFy0l
HexuSf301dcJjvvhVwij3vhljQp8QA==
-----END PRIVATE KEY-----
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls-tokio/examples/client.rs000066400000000000000000000035641456575232400276130ustar00rootroot00000000000000// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

use clap::Parser;
use s2n_tls::{config::Config, security::DEFAULT_TLS13};
use s2n_tls_tokio::TlsConnector;
use std::{error::Error, fs};
use tokio::{io::AsyncWriteExt, net::TcpStream};

/// NOTE: this certificate is to be used for demonstration purposes only!
const DEFAULT_CERT: &str = concat!(env!("CARGO_MANIFEST_DIR"), "/examples/certs/cert.pem");

#[derive(Parser, Debug)]
struct Args {
    #[clap(short, long, default_value_t = String::from(DEFAULT_CERT))]
    trust: String,
    addr: String,
}

async fn run_client(trust_pem: &[u8], addr: &str) -> Result<(), Box> {
    // Set up the configuration for new connections.
    // Minimally you will need a trust store.
    let mut config = Config::builder();
    config.set_security_policy(&DEFAULT_TLS13)?;
    config.trust_pem(trust_pem)?;

    // Create the TlsConnector based on the configuration.
    let client = TlsConnector::new(config.build()?);

    // Connect to the server.
    let stream = TcpStream::connect(addr).await?;
    let tls = client.connect("localhost", stream).await?;
    println!("{:#?}", tls);

    // Split the stream.
    // This allows us to call read and write from different tasks.
    let (mut reader, mut writer) = tokio::io::split(tls);

    // Copy data from the server to stdout
    tokio::spawn(async move {
        let mut stdout = tokio::io::stdout();
        tokio::io::copy(&mut reader, &mut stdout).await
    });

    // Send data from stdin to the server
    let mut stdin = tokio::io::stdin();
    tokio::io::copy(&mut stdin, &mut writer).await?;
    writer.shutdown().await?;

    Ok(())
}

#[tokio::main]
async fn main() -> Result<(), Box> {
    let args = Args::parse();
    let trust_pem = fs::read(args.trust)?;
    run_client(&trust_pem, &args.addr).await?;
    Ok(())
}
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls-tokio/examples/server.rs000066400000000000000000000056351456575232400276440ustar00rootroot00000000000000// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

use clap::Parser;
use s2n_tls::{config::Config, enums::Mode, pool::ConfigPoolBuilder, security::DEFAULT_TLS13};
use s2n_tls_tokio::TlsAcceptor;
use std::{error::Error, fs};
use tokio::{io::AsyncWriteExt, net::TcpListener};

/// NOTE: this certificate and key are to be used for demonstration purposes only!
const DEFAULT_CERT: &str = concat!(env!("CARGO_MANIFEST_DIR"), "/examples/certs/cert.pem");
const DEFAULT_KEY: &str = concat!(env!("CARGO_MANIFEST_DIR"), "/examples/certs/key.pem");

#[derive(Parser, Debug)]
struct Args {
    #[clap(short, long, requires = "key", default_value_t = String::from(DEFAULT_CERT))]
    cert: String,
    #[clap(short, long, requires = "cert", default_value_t = String::from(DEFAULT_KEY))]
    key: String,
    #[clap(short, long, default_value_t = String::from("127.0.0.1:0"))]
    addr: String,
}

async fn run_server(cert_pem: &[u8], key_pem: &[u8], addr: &str) -> Result<(), Box> {
    // Set up the configuration for new connections.
    // Minimally you will need a certificate and private key.
    let mut config = Config::builder();
    config.set_security_policy(&DEFAULT_TLS13)?;
    config.load_pem(cert_pem, key_pem)?;

    // Create a connection pool to reuse connections.
    let mut pool = ConfigPoolBuilder::new(Mode::Server, config.build()?);
    pool.set_max_pool_size(10);

    // Create the TlsAcceptor based on the pool.
    let server = TlsAcceptor::new(pool.build());

    // Bind to an address and listen for connections.
    // ":0" can be used to automatically assign a port.
    let listener = TcpListener::bind(&addr).await?;
    let addr = listener
        .local_addr()
        .map(|x| x.to_string())
        .unwrap_or_else(|_| "UNKNOWN".to_owned());
    println!("Listening on {}", addr);

    loop {
        // Wait for a client to connect.
        let (stream, peer_addr) = listener.accept().await?;
        println!("Connection from {:?}", peer_addr);

        // Spawn a new task to handle the connection.
        // We probably want to spawn the task BEFORE calling TcpAcceptor::accept,
        // because the TLS handshake can be slow.
        let server = server.clone();
        tokio::spawn(async move {
            let mut tls = server.accept(stream).await?;
            println!("{:#?}", tls);

            // Copy data from the client to stdout
            let mut stdout = tokio::io::stdout();
            tokio::io::copy(&mut tls, &mut stdout).await?;
            tls.shutdown().await?;
            println!("Connection from {:?} closed", peer_addr);

            Ok::<(), Box>(())
        });
    }
}

#[tokio::main]
async fn main() -> Result<(), Box> {
    let args = Args::parse();
    let cert_pem = fs::read(args.cert)?;
    let key_pem = fs::read(args.key)?;
    run_server(&cert_pem, &key_pem, &args.addr).await?;
    Ok(())
}
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls-tokio/src/000077500000000000000000000000001456575232400247305ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls-tokio/src/lib.rs000066400000000000000000000323511456575232400260500ustar00rootroot00000000000000// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

use errno::{set_errno, Errno};
use s2n_tls::{
    config::Config,
    connection::{Builder, Connection},
    enums::{Blinding, CallbackResult, Mode},
    error::Error,
};
use std::{
    fmt,
    future::Future,
    io,
    os::raw::{c_int, c_void},
    pin::Pin,
    task::{
        Context, Poll,
        Poll::{Pending, Ready},
    },
};
use tokio::{
    io::{AsyncRead, AsyncWrite, ReadBuf},
    time::{sleep, Duration, Sleep},
};

macro_rules! ready {
    ($x:expr) => {
        match $x {
            Ready(r) => r,
            Pending => return Pending,
        }
    };
}

#[derive(Clone)]
pub struct TlsAcceptor
where
    ::Output: Unpin,
{
    builder: B,
}

impl TlsAcceptor
where
    ::Output: Unpin,
{
    pub fn new(builder: B) -> Self {
        TlsAcceptor { builder }
    }

    pub async fn accept(&self, stream: S) -> Result, Error>
    where
        S: AsyncRead + AsyncWrite + Unpin,
    {
        let conn = self.builder.build_connection(Mode::Server)?;
        TlsStream::open(conn, stream).await
    }
}

#[derive(Clone)]
pub struct TlsConnector
where
    ::Output: Unpin,
{
    builder: B,
}

impl TlsConnector
where
    ::Output: Unpin,
{
    pub fn new(builder: B) -> Self {
        TlsConnector { builder }
    }

    pub async fn connect(
        &self,
        domain: &str,
        stream: S,
    ) -> Result, Error>
    where
        S: AsyncRead + AsyncWrite + Unpin,
    {
        let mut conn = self.builder.build_connection(Mode::Client)?;
        conn.as_mut().set_server_name(domain)?;
        TlsStream::open(conn, stream).await
    }
}

struct TlsHandshake<'a, S, C>
where
    C: AsRef + AsMut + Unpin,
    S: AsyncRead + AsyncWrite + Unpin,
{
    tls: &'a mut TlsStream,
    error: Option,
}

impl Future for TlsHandshake<'_, S, C>
where
    C: AsRef + AsMut + Unpin,
    S: AsyncRead + AsyncWrite + Unpin,
{
    type Output = Result<(), Error>;

    fn poll(mut self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll {
        // Retrieve a result, either from the stored error
        // or by polling Connection::poll_negotiate().
        // Connection::poll_negotiate() only completes once,
        // regardless of how often this method is polled.
        let result = match self.error.take() {
            Some(err) => Err(err),
            None => {
                let handshake_poll = self.tls.with_io(ctx, |context| {
                    let conn = context.get_mut().as_mut();
                    conn.poll_negotiate().map(|r| r.map(|_| ()))
                });
                ready!(handshake_poll)
            }
        };
        // If the result isn't a fatal error, return it immediately.
        // Otherwise, poll Connection::poll_shutdown().
        //
        // Shutdown is only best-effort.
        // When Connection::poll_shutdown() completes, even with an error,
        // we return the original Connection::poll_negotiate() error.
        match result {
            Ok(r) => Ok(r).into(),
            Err(e) if e.is_retryable() => Err(e).into(),
            Err(e) => match Pin::new(&mut self.tls).poll_shutdown(ctx) {
                Pending => {
                    self.error = Some(e);
                    Pending
                }
                Ready(_) => Err(e).into(),
            },
        }
    }
}

pub struct TlsStream
where
    C: AsRef + AsMut + Unpin,
    S: AsyncRead + AsyncWrite + Unpin,
{
    conn: C,
    stream: S,
    blinding: Option>>,
    shutdown_error: Option,
}

impl TlsStream
where
    C: AsRef + AsMut + Unpin,
    S: AsyncRead + AsyncWrite + Unpin,
{
    ///Access a shared reference to the underlaying io stream
    pub fn get_ref(&self) -> &S {
        &self.stream
    }

    ///Access the mutable reference to the underlaying io stream
    pub fn get_mut(&mut self) -> &mut S {
        &mut self.stream
    }

    async fn open(mut conn: C, stream: S) -> Result {
        conn.as_mut().set_blinding(Blinding::SelfService)?;
        let mut tls = TlsStream {
            conn,
            stream,
            blinding: None,
            shutdown_error: None,
        };
        TlsHandshake {
            tls: &mut tls,
            error: None,
        }
        .await?;
        Ok(tls)
    }

    fn with_io(&mut self, ctx: &mut Context, action: F) -> Poll>
    where
        F: FnOnce(Pin<&mut Self>) -> Poll>,
    {
        // Setting contexts on a connection is considered unsafe
        // because the raw pointers provide no lifetime or memory guarantees.
        // We protect against this by pinning the stream during the action
        // and clearing the context afterwards.
        unsafe {
            let context = self as *mut Self as *mut c_void;

            self.as_mut().set_receive_callback(Some(Self::recv_io_cb))?;
            self.as_mut().set_send_callback(Some(Self::send_io_cb))?;
            self.as_mut().set_receive_context(context)?;
            self.as_mut().set_send_context(context)?;
            self.as_mut().set_waker(Some(ctx.waker()))?;

            let result = action(Pin::new(self));

            self.as_mut().set_receive_callback(None)?;
            self.as_mut().set_send_callback(None)?;
            self.as_mut().set_receive_context(std::ptr::null_mut())?;
            self.as_mut().set_send_context(std::ptr::null_mut())?;
            self.as_mut().set_waker(None)?;
            result
        }
    }

    fn poll_io(ctx: *mut c_void, action: F) -> c_int
    where
        F: FnOnce(Pin<&mut S>, &mut Context) -> Poll>,
    {
        debug_assert_ne!(ctx, std::ptr::null_mut());
        let tls = unsafe { &mut *(ctx as *mut Self) };

        let mut async_context = Context::from_waker(tls.conn.as_ref().waker().unwrap());
        let stream = Pin::new(&mut tls.stream);

        match action(stream, &mut async_context) {
            Poll::Ready(Ok(len)) => len as c_int,
            Poll::Pending => {
                set_errno(Errno(libc::EWOULDBLOCK));
                CallbackResult::Failure.into()
            }
            _ => CallbackResult::Failure.into(),
        }
    }

    unsafe extern "C" fn recv_io_cb(ctx: *mut c_void, buf: *mut u8, len: u32) -> c_int {
        Self::poll_io(ctx, |stream, async_context| {
            let mut dest = ReadBuf::new(std::slice::from_raw_parts_mut(buf, len as usize));
            stream
                .poll_read(async_context, &mut dest)
                .map_ok(|_| dest.filled().len())
        })
    }

    unsafe extern "C" fn send_io_cb(ctx: *mut c_void, buf: *const u8, len: u32) -> c_int {
        Self::poll_io(ctx, |stream, async_context| {
            let src = std::slice::from_raw_parts(buf, len as usize);
            stream.poll_write(async_context, src)
        })
    }

    /// Polls the blinding timer, if there is any.
    ///
    /// s2n has a "blinding" functionality - when a bad behavior from the peer
    /// is detected, sleeps for 10-30 seconds before answering the client
    /// and closing the connection. This mitigates some timing side channels
    /// that could leak information about encrypted data. See the
    /// `s2n_connection_set_blinding` docs for more details.
    ///
    /// For security reasons, to allow for blinding to correctly function,
    /// before dropping an s2n connection, you should wait until either
    /// `poll_blinding` or `poll_shutdown` (which calls `poll_blinding`
    /// internally) returns ready.
    pub fn poll_blinding(self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll> {
        let tls = self.get_mut();

        if tls.blinding.is_none() {
            let delay = tls.as_ref().remaining_blinding_delay()?;
            if !delay.is_zero() {
                // Sleep operates at the milisecond resolution, so add an extra
                // millisecond to account for any stray nanoseconds.
                let safety = Duration::from_millis(1);
                tls.blinding = Some(Box::pin(sleep(delay.saturating_add(safety))));
            }
        };

        if let Some(timer) = tls.blinding.as_mut() {
            ready!(timer.as_mut().poll(ctx));
            tls.blinding = None;
        }

        Poll::Ready(Ok(()))
    }

    pub async fn apply_blinding(&mut self) -> Result<(), Error> {
        ApplyBlinding { stream: self }.await
    }
}

impl AsRef for TlsStream
where
    C: AsRef + AsMut + Unpin,
    S: AsyncRead + AsyncWrite + Unpin,
{
    fn as_ref(&self) -> &Connection {
        self.conn.as_ref()
    }
}

impl AsMut for TlsStream
where
    C: AsRef + AsMut + Unpin,
    S: AsyncRead + AsyncWrite + Unpin,
{
    fn as_mut(&mut self) -> &mut Connection {
        self.conn.as_mut()
    }
}

impl AsyncRead for TlsStream
where
    C: AsRef + AsMut + Unpin,
    S: AsyncRead + AsyncWrite + Unpin,
{
    fn poll_read(
        self: Pin<&mut Self>,
        ctx: &mut Context<'_>,
        buf: &mut ReadBuf<'_>,
    ) -> Poll> {
        let tls = self.get_mut();
        tls.with_io(ctx, |mut context| {
            context
                .conn
                .as_mut()
                // Safe since poll_recv_uninitialized does not
                // deinitialize any bytes.
                .poll_recv_uninitialized(unsafe { buf.unfilled_mut() })
                .map_ok(|size| {
                    unsafe {
                        // Safe since poll_recv_uninitialized guaranteed
                        // us that the first `size` bytes have been
                        // initialized.
                        buf.assume_init(size);
                    }
                    buf.advance(size);
                })
        })
        .map_err(io::Error::from)
    }
}

impl AsyncWrite for TlsStream
where
    C: AsRef + AsMut + Unpin,
    S: AsyncRead + AsyncWrite + Unpin,
{
    fn poll_write(
        self: Pin<&mut Self>,
        ctx: &mut Context<'_>,
        buf: &[u8],
    ) -> Poll> {
        let tls = self.get_mut();
        tls.with_io(ctx, |mut context| context.conn.as_mut().poll_send(buf))
            .map_err(io::Error::from)
    }

    fn poll_flush(self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll> {
        let tls = self.get_mut();

        ready!(tls.with_io(ctx, |mut context| {
            context.conn.as_mut().poll_flush().map(|r| r.map(|_| ()))
        }))
        .map_err(io::Error::from)?;

        Pin::new(&mut tls.stream).poll_flush(ctx)
    }

    fn poll_shutdown(mut self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll> {
        ready!(self.as_mut().poll_blinding(ctx))?;

        // s2n_shutdown_send must not be called again if it errors
        if self.shutdown_error.is_none() {
            let result = ready!(self.as_mut().with_io(ctx, |mut context| {
                context
                    .conn
                    .as_mut()
                    .poll_shutdown_send()
                    .map(|r| r.map(|_| ()))
            }));
            if let Err(error) = result {
                self.shutdown_error = Some(error);
                // s2n_shutdown_send only writes, so will never trigger blinding again.
                // So we do not need to poll_blinding again after this error.
            }
        };

        let tcp_result = ready!(Pin::new(&mut self.as_mut().stream).poll_shutdown(ctx));

        if let Some(err) = self.shutdown_error.take() {
            // poll methods shouldn't be called again after returning Ready, but
            // nothing actually prevents it so poll_shutdown should handle it.
            // s2n_shutdown can be polled indefinitely after succeeding, but not after failing.
            // s2n_tls::error::Error isn't cloneable, so we can't just return the same error
            // if poll_shutdown is called again. Instead, save a different error.
            let next_error = Error::application("Shutdown called again after error".into());
            self.shutdown_error = Some(next_error);

            Ready(Err(io::Error::from(err)))
        } else {
            Ready(tcp_result)
        }
    }
}

impl fmt::Debug for TlsStream
where
    C: AsRef + AsMut + Unpin,
    S: AsyncRead + AsyncWrite + Unpin,
{
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
        f.debug_struct("TlsStream")
            .field("connection", self.as_ref())
            .finish()
    }
}

struct ApplyBlinding<'a, S, C>
where
    C: AsRef + AsMut + Unpin,
    S: AsyncRead + AsyncWrite + Unpin,
{
    stream: &'a mut TlsStream,
}

impl<'a, S, C> Future for ApplyBlinding<'a, S, C>
where
    C: AsRef + AsMut + Unpin,
    S: AsyncRead + AsyncWrite + Unpin,
{
    type Output = Result<(), Error>;

    fn poll(mut self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll {
        Pin::new(&mut *self.as_mut().stream).poll_blinding(ctx)
    }
}
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls-tokio/tests/000077500000000000000000000000001456575232400253035ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls-tokio/tests/common/000077500000000000000000000000001456575232400265735ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls-tokio/tests/common/mod.rs000066400000000000000000000071051456575232400277230ustar00rootroot00000000000000// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

use s2n_tls::{
    config,
    connection::Builder,
    error::Error,
    security::{DEFAULT, DEFAULT_TLS13},
};
use s2n_tls_tokio::{TlsAcceptor, TlsConnector, TlsStream};
use std::time::Duration;
use tokio::{
    io::{AsyncRead, AsyncWrite},
    net::{TcpListener, TcpStream},
};

mod stream;
pub use stream::*;
mod time;
pub use time::*;

/// NOTE: this certificate and key are used for testing purposes only!
pub static CERT_PEM: &[u8] = include_bytes!(concat!(
    env!("CARGO_MANIFEST_DIR"),
    "/examples/certs/cert.pem"
));
pub static KEY_PEM: &[u8] = include_bytes!(concat!(
    env!("CARGO_MANIFEST_DIR"),
    "/examples/certs/key.pem"
));
pub static RSA_CERT_PEM: &[u8] = include_bytes!(concat!(
    env!("CARGO_MANIFEST_DIR"),
    "/examples/certs/cert_rsa.pem"
));
pub static RSA_KEY_PEM: &[u8] = include_bytes!(concat!(
    env!("CARGO_MANIFEST_DIR"),
    "/examples/certs/key_rsa.pem"
));

pub const MIN_BLINDING_SECS: Duration = Duration::from_secs(10);
pub const MAX_BLINDING_SECS: Duration = Duration::from_secs(30);

pub static TEST_STR: &str = "hello world";

pub async fn get_streams() -> Result<(TcpStream, TcpStream), tokio::io::Error> {
    let localhost = "127.0.0.1".to_owned();
    let listener = TcpListener::bind(format!("{}:0", localhost)).await?;
    let addr = listener.local_addr()?;
    let client_stream = TcpStream::connect(&addr).await?;
    let (server_stream, _) = listener.accept().await?;
    Ok((server_stream, client_stream))
}

pub fn client_config() -> Result {
    let mut builder = config::Config::builder();
    builder.set_security_policy(&DEFAULT_TLS13)?;
    builder.trust_pem(CERT_PEM)?;
    Ok(builder)
}

pub fn server_config() -> Result {
    let mut builder = config::Config::builder();
    builder.set_security_policy(&DEFAULT_TLS13)?;
    builder.load_pem(CERT_PEM, KEY_PEM)?;
    Ok(builder)
}

pub fn client_config_tls12() -> Result {
    let mut builder = config::Config::builder();
    builder.set_security_policy(&DEFAULT)?;
    builder.trust_pem(RSA_CERT_PEM)?;
    Ok(builder)
}

pub fn server_config_tls12() -> Result {
    let mut builder = config::Config::builder();
    builder.set_security_policy(&DEFAULT)?;
    builder.load_pem(RSA_CERT_PEM, RSA_KEY_PEM)?;
    Ok(builder)
}

pub async fn run_negotiate(
    client: &TlsConnector,
    client_stream: C,
    server: &TlsAcceptor,
    server_stream: D,
) -> Result<(TlsStream, TlsStream), Error>
where
    ::Output: Unpin,
    ::Output: Unpin,
    C: AsyncRead + AsyncWrite + Unpin,
    D: AsyncRead + AsyncWrite + Unpin,
{
    let (client, server) = tokio::join!(
        client.connect("localhost", client_stream),
        server.accept(server_stream)
    );
    Ok((client?, server?))
}

pub async fn get_tls_streams(
    server_builder: A,
    client_builder: B,
) -> Result<
    (
        TlsStream,
        TlsStream,
    ),
    Box,
>
where
    ::Output: Unpin,
    ::Output: Unpin,
{
    let (server_stream, client_stream) = get_streams().await?;
    let connector = TlsConnector::new(client_builder);
    let acceptor = TlsAcceptor::new(server_builder);
    let (client_tls, server_tls) =
        run_negotiate(&connector, client_stream, &acceptor, server_stream).await?;
    Ok((server_tls, client_tls))
}
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls-tokio/tests/common/stream.rs000066400000000000000000000071161456575232400304410ustar00rootroot00000000000000// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

use std::{
    io,
    pin::Pin,
    sync::{Arc, Mutex},
    task::{Context, Poll},
};
use tokio::{
    io::{AsyncRead, AsyncWrite, ReadBuf},
    net::TcpStream,
};

type ReadFn = Box, &mut Context, &mut ReadBuf) -> Poll>>;
type WriteFn = Box, &mut Context, &[u8]) -> Poll>>;
type ShutdownFn = Box, &mut Context) -> Poll>>;

#[derive(Default)]
struct OverrideMethods {
    next_read: Option,
    next_write: Option,
    next_shutdown: Option,
}

#[derive(Default)]
pub struct Overrides(Mutex);

impl Overrides {
    pub fn next_read(&self, input: Option) {
        if let Ok(mut overrides) = self.0.lock() {
            overrides.next_read = input;
        }
    }

    pub fn next_write(&self, input: Option) {
        if let Ok(mut overrides) = self.0.lock() {
            overrides.next_write = input;
        }
    }

    pub fn next_shutdown(&self, input: Option) {
        if let Ok(mut overrides) = self.0.lock() {
            overrides.next_shutdown = input;
        }
    }

    pub fn is_consumed(&self) -> bool {
        if let Ok(overrides) = self.0.lock() {
            overrides.next_read.is_none()
                && overrides.next_write.is_none()
                && overrides.next_shutdown.is_none()
        } else {
            false
        }
    }
}

unsafe impl Send for Overrides {}
unsafe impl Sync for Overrides {}

pub struct TestStream {
    stream: TcpStream,
    overrides: Arc,
}

impl TestStream {
    pub fn new(stream: TcpStream) -> Self {
        let overrides = Arc::new(Overrides::default());
        Self { stream, overrides }
    }

    pub fn overrides(&self) -> Arc {
        self.overrides.clone()
    }
}

impl AsyncRead for TestStream {
    fn poll_read(
        self: Pin<&mut Self>,
        ctx: &mut Context<'_>,
        buf: &mut ReadBuf<'_>,
    ) -> Poll> {
        let s = self.get_mut();
        let stream = Pin::new(&mut s.stream);
        let action = match s.overrides.0.lock() {
            Ok(mut overrides) => overrides.next_read.take(),
            _ => None,
        };
        if let Some(f) = action {
            (f)(stream, ctx, buf)
        } else {
            stream.poll_read(ctx, buf)
        }
    }
}

impl AsyncWrite for TestStream {
    fn poll_write(
        self: Pin<&mut Self>,
        ctx: &mut Context<'_>,
        buf: &[u8],
    ) -> Poll> {
        let s = self.get_mut();
        let stream = Pin::new(&mut s.stream);
        let action = match s.overrides.0.lock() {
            Ok(mut overrides) => overrides.next_write.take(),
            _ => None,
        };
        if let Some(f) = action {
            (f)(stream, ctx, buf)
        } else {
            stream.poll_write(ctx, buf)
        }
    }

    fn poll_flush(mut self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll> {
        Pin::new(&mut self.stream).poll_flush(ctx)
    }

    fn poll_shutdown(self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll> {
        let s = self.get_mut();
        let stream = Pin::new(&mut s.stream);
        let action = match s.overrides.0.lock() {
            Ok(mut overrides) => overrides.next_shutdown.take(),
            _ => None,
        };
        if let Some(f) = action {
            (f)(stream, ctx)
        } else {
            stream.poll_shutdown(ctx)
        }
    }
}
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls-tokio/tests/common/time.rs000066400000000000000000000010431456575232400300750ustar00rootroot00000000000000// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

use s2n_tls::callbacks::MonotonicClock;
use std::time::Duration;
use tokio::time::Instant;

/// A monotonic clock that allows the s2n-tls C library time
/// to follow the tokio::time::pause behavior.
pub struct TokioTime(Instant);

impl Default for TokioTime {
    fn default() -> Self {
        TokioTime(Instant::now())
    }
}

impl MonotonicClock for TokioTime {
    fn get_time(&self) -> Duration {
        self.0.elapsed()
    }
}
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls-tokio/tests/handshake.rs000066400000000000000000000164731456575232400276120ustar00rootroot00000000000000// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

use rand::Rng;
use s2n_tls::{
    config::Config,
    connection::{Connection, ModifiedBuilder},
    enums::{ClientAuthType, Mode, Version},
    error::{Error, ErrorType},
    pool::ConfigPoolBuilder,
    security::DEFAULT_TLS13,
};
use s2n_tls_tokio::{TlsAcceptor, TlsConnector};
use std::{collections::VecDeque, time::Duration};
use tokio::time;

pub mod common;

#[tokio::test]
async fn handshake_basic() -> Result<(), Box> {
    let (server_stream, client_stream) = common::get_streams().await?;

    let client = TlsConnector::new(common::client_config()?.build()?);
    let server = TlsAcceptor::new(common::server_config()?.build()?);

    let (client_result, server_result) =
        common::run_negotiate(&client, client_stream, &server, server_stream).await?;

    for tls in [client_result, server_result] {
        // Security policy ensures TLS1.3.
        assert_eq!(tls.as_ref().actual_protocol_version()?, Version::TLS13);
        // Handshake types may change, but will at least be negotiated.
        assert!(tls.as_ref().handshake_type()?.contains("NEGOTIATED"));
        // Cipher suite may change, so just makes sure we can retrieve it.
        assert!(tls.as_ref().cipher_suite().is_ok());
        assert!(tls.as_ref().selected_curve().is_ok());
    }

    Ok(())
}

#[tokio::test(flavor = "multi_thread")]
async fn handshake_with_pool_multithread() -> Result<(), Box> {
    const COUNT: usize = 20;
    const CLIENT_LIMIT: usize = 3;

    let client_config = common::client_config()?.build()?;
    let server_config = common::server_config()?.build()?;

    let mut client_pool = ConfigPoolBuilder::new(Mode::Client, client_config);
    client_pool.set_max_pool_size(CLIENT_LIMIT);

    let client_pool = client_pool.build();
    let server_pool = ConfigPoolBuilder::new(Mode::Server, server_config).build();

    let client = TlsConnector::new(client_pool.clone());
    let server = TlsAcceptor::new(server_pool.clone());

    let mut tasks = VecDeque::new();
    for _ in 0..COUNT {
        let client = client.clone();
        let server = server.clone();
        tasks.push_back(tokio::spawn(async move {
            // Start each handshake at a randomly determined time
            let rand = rand::thread_rng().gen_range(0..50);
            time::sleep(Duration::from_millis(rand)).await;

            let (server_stream, client_stream) = common::get_streams().await.unwrap();
            common::run_negotiate(&client, client_stream, &server, server_stream).await
        }));
    }

    for task in tasks {
        task.await??;
    }
    Ok(())
}

#[tokio::test]
async fn handshake_with_connection_config() -> Result<(), Box> {
    // Setup the client with a method
    fn with_client_auth(conn: &mut Connection) -> Result<&mut Connection, Error> {
        conn.set_client_auth_type(ClientAuthType::Optional)
    }
    let client_builder = ModifiedBuilder::new(common::client_config()?.build()?, with_client_auth);

    // Setup the server with a closure
    let server_builder = ModifiedBuilder::new(common::server_config()?.build()?, |conn| {
        conn.set_client_auth_type(ClientAuthType::Optional)
    });

    let client = TlsConnector::new(client_builder);
    let server = TlsAcceptor::new(server_builder);

    let (server_stream, client_stream) = common::get_streams().await?;
    let (client_result, server_result) =
        common::run_negotiate(&client, client_stream, &server, server_stream).await?;

    for tls in [client_result, server_result] {
        assert!(tls.as_ref().handshake_type()?.contains("CLIENT_AUTH"));
    }

    Ok(())
}

#[tokio::test]
async fn handshake_with_connection_config_with_pool() -> Result<(), Box> {
    fn with_client_auth(conn: &mut Connection) -> Result<&mut Connection, Error> {
        conn.set_client_auth_type(ClientAuthType::Optional)
    }
    let client_builder = ModifiedBuilder::new(common::client_config()?.build()?, with_client_auth);
    let server_pool =
        ConfigPoolBuilder::new(Mode::Server, common::server_config()?.build()?).build();
    let server_builder = ModifiedBuilder::new(server_pool, with_client_auth);

    let client = TlsConnector::new(client_builder);
    let server = TlsAcceptor::new(server_builder);

    for _ in 0..5 {
        let (server_stream, client_stream) = common::get_streams().await?;
        let (_, server_result) =
            common::run_negotiate(&client, client_stream, &server, server_stream).await?;
        assert!(server_result
            .as_ref()
            .handshake_type()?
            .contains("CLIENT_AUTH"));
    }

    Ok(())
}

#[tokio::test]
async fn handshake_error() -> Result<(), Box> {
    // Config::default() does not include any RSA certificates,
    // but only provides TLS1.2 cipher suites that require RSA auth.
    // The server will fail to choose a cipher suite, but
    // S2N_ERR_CIPHER_NOT_SUPPORTED is specifically excluded from blinding.
    let bad_config = Config::default();
    let client_config = common::client_config()?.build()?;
    let server_config = bad_config;

    let client = TlsConnector::new(client_config);
    let server = TlsAcceptor::new(server_config);

    let (server_stream, client_stream) = common::get_streams().await?;
    let result = common::run_negotiate(&client, client_stream, &server, server_stream).await;
    assert!(matches!(result, Err(e) if !e.is_retryable()));

    Ok(())
}

#[tokio::test(start_paused = true)]
async fn handshake_error_with_blinding() -> Result<(), Box> {
    let clock = common::TokioTime::default();

    // Config::builder() does not include a trust store.
    // The client will reject the server certificate as untrusted.
    let mut bad_config = Config::builder();
    bad_config.set_security_policy(&DEFAULT_TLS13)?;
    bad_config.set_monotonic_clock(clock)?;
    let client_config = bad_config.build()?;
    let server_config = common::server_config()?.build()?;

    let client = TlsConnector::new(client_config.clone());
    let server = TlsAcceptor::new(server_config.clone());
    let (server_stream, client_stream) = common::get_streams().await?;

    let time_start = time::Instant::now();
    let result = common::run_negotiate(&client, client_stream, &server, server_stream).await;
    let time_elapsed = time_start.elapsed();

    // Handshake MUST NOT finish faster than minimal blinding time.
    assert!(time_elapsed > common::MIN_BLINDING_SECS);

    // Handshake MUST eventually gracefully fail after blinding
    let error = result.unwrap_err();
    assert_eq!(error.kind(), ErrorType::ProtocolError);

    Ok(())
}

#[tokio::test]
async fn io_stream_access() -> Result<(), Box> {
    let (server_stream, client_stream) = common::get_streams().await?;

    let client_addr = client_stream.local_addr().unwrap();
    let client = TlsConnector::new(common::client_config()?.build()?);
    let server = TlsAcceptor::new(common::server_config()?.build()?);

    let (mut client_result, _server_result) =
        common::run_negotiate(&client, client_stream, &server, server_stream).await?;

    assert_eq!(client_result.get_ref().local_addr().unwrap(), client_addr);
    assert_eq!(client_result.get_mut().local_addr().unwrap(), client_addr);

    Ok(())
}
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls-tokio/tests/send_and_recv.rs000066400000000000000000000123561456575232400304520ustar00rootroot00000000000000// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

use s2n_tls_tokio::{TlsAcceptor, TlsConnector};
use std::{io, task::Poll::*};
use tokio::io::{AsyncReadExt, AsyncWriteExt};

pub mod common;

const TEST_DATA: &[u8] = "hello world".as_bytes();

// The maximum TLS record payload is 2^14 bytes.
// Send more to ensure multiple records.
const LARGE_TEST_DATA: &[u8] = &[5; (1 << 15)];

#[tokio::test]
async fn send_and_recv_basic() -> Result<(), Box> {
    let (server_stream, client_stream) = common::get_streams().await?;

    let connector = TlsConnector::new(common::client_config()?.build()?);
    let acceptor = TlsAcceptor::new(common::server_config()?.build()?);

    let (mut client, mut server) =
        common::run_negotiate(&connector, client_stream, &acceptor, server_stream).await?;

    client.write_all(TEST_DATA).await?;

    let mut received = [0; TEST_DATA.len()];
    assert_eq!(server.read_exact(&mut received).await?, TEST_DATA.len());
    assert_eq!(TEST_DATA, received);

    Ok(())
}

#[tokio::test]
async fn send_and_recv_into_vec() -> Result<(), Box> {
    let (server_stream, client_stream) = common::get_streams().await?;

    let connector = TlsConnector::new(common::client_config()?.build()?);
    let acceptor = TlsAcceptor::new(common::server_config()?.build()?);

    let (mut client, mut server) =
        common::run_negotiate(&connector, client_stream, &acceptor, server_stream).await?;

    client.write_all(TEST_DATA).await?;

    let mut received = vec![];
    while received.len() < TEST_DATA.len() {
        let bytes_read = server.read_buf(&mut received).await?;
        assert!(bytes_read > 0);
    }
    assert_eq!(TEST_DATA, received);

    Ok(())
}

#[tokio::test]
async fn send_and_recv_multiple_records() -> Result<(), Box> {
    let (server_stream, client_stream) = common::get_streams().await?;

    let connector = TlsConnector::new(common::client_config()?.build()?);
    let acceptor = TlsAcceptor::new(common::server_config()?.build()?);

    let (mut client, mut server) =
        common::run_negotiate(&connector, client_stream, &acceptor, server_stream).await?;

    let mut received = [0; LARGE_TEST_DATA.len()];
    let (_, read_size) = tokio::try_join!(
        client.write_all(LARGE_TEST_DATA),
        server.read_exact(&mut received)
    )?;
    assert_eq!(LARGE_TEST_DATA.len(), read_size);
    assert_eq!(LARGE_TEST_DATA, received);

    Ok(())
}

#[tokio::test]
async fn send_and_recv_split() -> Result<(), Box> {
    let (server_stream, client_stream) = common::get_streams().await?;

    let connector = TlsConnector::new(common::client_config()?.build()?);
    let acceptor = TlsAcceptor::new(common::server_config()?.build()?);

    let (client, server) =
        common::run_negotiate(&connector, client_stream, &acceptor, server_stream).await?;

    let (mut client_read, mut client_write) = tokio::io::split(client);
    let (mut server_read, mut server_write) = tokio::io::split(server);

    let mut client_received = [0; LARGE_TEST_DATA.len()];
    let mut server_received = [0; LARGE_TEST_DATA.len()];
    let (_, _, client_bytes, server_bytes) = tokio::try_join!(
        client_write.write_all(LARGE_TEST_DATA),
        server_write.write_all(LARGE_TEST_DATA),
        client_read.read_exact(&mut client_received),
        server_read.read_exact(&mut server_received)
    )?;

    assert_eq!(client_bytes, LARGE_TEST_DATA.len());
    assert_eq!(server_bytes, LARGE_TEST_DATA.len());
    assert_eq!(LARGE_TEST_DATA, client_received);
    assert_eq!(LARGE_TEST_DATA, server_received);

    Ok(())
}

#[tokio::test]
async fn send_error() -> Result<(), Box> {
    let client = TlsConnector::new(common::client_config()?.build()?);
    let server = TlsAcceptor::new(common::server_config()?.build()?);

    let (server_stream, client_stream) = common::get_streams().await?;
    let client_stream = common::TestStream::new(client_stream);
    let overrides = client_stream.overrides();
    let (mut client, _) =
        common::run_negotiate(&client, client_stream, &server, server_stream).await?;

    // Setup write to fail
    overrides.next_write(Some(Box::new(|_, _, _| {
        Ready(Err(io::Error::from(io::ErrorKind::ConnectionReset)))
    })));

    // Verify write fails
    let result = client.write_all(TEST_DATA).await;
    assert!(result.is_err());

    Ok(())
}

#[tokio::test]
async fn recv_error() -> Result<(), Box> {
    let client = TlsConnector::new(common::client_config()?.build()?);
    let server = TlsAcceptor::new(common::server_config()?.build()?);

    let (server_stream, client_stream) = common::get_streams().await?;
    let client_stream = common::TestStream::new(client_stream);
    let overrides = client_stream.overrides();
    let (mut client, _) =
        common::run_negotiate(&client, client_stream, &server, server_stream).await?;

    // Setup read to fail
    overrides.next_read(Some(Box::new(|_, _, _| {
        Ready(Err(io::Error::from(io::ErrorKind::ConnectionReset)))
    })));

    // Verify read fails
    let mut received = [0; 1];
    let result = client.read_exact(&mut received).await;
    assert!(result.is_err());

    Ok(())
}
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls-tokio/tests/shutdown.rs000066400000000000000000000311431456575232400275260ustar00rootroot00000000000000// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

use s2n_tls::error;
use s2n_tls_tokio::{TlsAcceptor, TlsConnector, TlsStream};
use std::{
    convert::TryFrom,
    io,
    sync::Arc,
    task::Poll::{Pending, Ready},
};
use tokio::{
    io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt},
    time,
};

pub mod common;

async fn read_until_shutdown(
    stream: &mut TlsStream,
) -> Result<(), std::io::Error> {
    let mut received = [0; 1];
    // Zero bytes read indicates EOF
    while stream.read(&mut received).await? != 0 {}
    stream.shutdown().await
}

async fn write_until_shutdown(stream: &mut S) -> Result<(), std::io::Error> {
    let sent = [0; 1];
    loop {
        if let Err(err) = stream.write(&sent).await {
            let tls_err = error::Error::try_from(err).unwrap();
            assert_eq!(tls_err.kind(), error::ErrorType::ConnectionClosed);
            break;
        }
    }
    stream.shutdown().await
}

#[tokio::test]
async fn client_initiated_shutdown() -> Result<(), Box> {
    let (server_stream, client_stream) = common::get_streams().await?;

    let client = TlsConnector::new(common::client_config()?.build()?);
    let server = TlsAcceptor::new(common::server_config()?.build()?);

    let (mut client, mut server) =
        common::run_negotiate(&client, client_stream, &server, server_stream).await?;

    tokio::try_join!(read_until_shutdown(&mut server), client.shutdown())?;

    Ok(())
}

#[tokio::test]
async fn server_initiated_shutdown() -> Result<(), Box> {
    let (server_stream, client_stream) = common::get_streams().await?;

    let client = TlsConnector::new(common::client_config()?.build()?);
    let server = TlsAcceptor::new(common::server_config()?.build()?);

    let (mut client, mut server) =
        common::run_negotiate(&client, client_stream, &server, server_stream).await?;

    tokio::try_join!(read_until_shutdown(&mut client), server.shutdown())?;

    Ok(())
}

/// Reading and writing handles should both respond to a peer's "close notify"
/// appropriately. The read handle should immediately exit and writing should
/// fail with a "connection closed" error.
#[tokio::test]
async fn shutdown_after_split() -> Result<(), Box> {
    let (server_stream, client_stream) = common::get_streams().await?;

    let client = TlsConnector::new(common::client_config_tls12()?.build()?);
    let server = TlsAcceptor::new(common::server_config_tls12()?.build()?);

    let (client, mut server) =
        common::run_negotiate(&client, client_stream, &server, server_stream).await?;

    let (mut client_reader, mut client_writer) = tokio::io::split(client);

    let mut received = [0; 1];

    // All tasks must cleanly exit. try_join will return as soon as an error
    // occurs, so if the result is any error then the test has failed.
    tokio::try_join!(
        server.shutdown(),
        client_reader.read(&mut received),
        write_until_shutdown(&mut client_writer),
    )?;
    Ok(())
}

/// Reading and writing handles should both respond to a peers "close notify"
/// appropriately. TLS1.3 connections have "half close behavior". The read
/// handle should immediately exit, but the write handle can continue to write
/// until explicitly shutdown. After both client handles have shutdown, the
/// server should cleanly exit.
#[tokio::test]
async fn shutdown_after_halfclose_split() -> Result<(), Box> {
    let (server_stream, client_stream) = common::get_streams().await?;

    let client = TlsConnector::new(common::client_config()?.build()?);
    let server = TlsAcceptor::new(common::server_config()?.build()?);

    let (client, mut server) =
        common::run_negotiate(&client, client_stream, &server, server_stream).await?;

    let (mut client_reader, mut client_writer) = tokio::io::split(client);

    let close_notify_recvd = Arc::new(tokio::sync::Notify::new());
    let close_notify_recvd_clone = close_notify_recvd.clone();

    let mut received = [0; 1];

    // all tasks must complete, and must complete successfully
    // the client tasks will panic if an error is encountered, so those don't
    // need to be checked.
    let (server, _, _) = tokio::join!(
        server.shutdown(),
        async {
            let bytes_read = client_reader.read(&mut received).await.unwrap();
            // 0 bytes read indicate that we returned because of close notify
            assert_eq!(bytes_read, 0);
            // signal the writer task that close notify received
            close_notify_recvd.notify_one();
        },
        async {
            // wait for the connection to receive "close notify" from peer
            close_notify_recvd_clone.notified().await;
            // confirm that we can write even after receiving the shutdown from
            // the server
            client_writer
                .write_all("random bytes".as_bytes())
                .await
                .unwrap();
            client_writer.flush().await.unwrap();
            // shutdown
            client_writer.shutdown().await.unwrap()
        }
    );
    // make sure the server shutdown cleanly
    assert!(server.is_ok());
    Ok(())
}

#[tokio::test(start_paused = true)]
async fn shutdown_with_blinding() -> Result<(), Box> {
    let clock = common::TokioTime::default();
    let mut server_config = common::server_config()?;
    server_config.set_monotonic_clock(clock)?;

    let client = TlsConnector::new(common::client_config()?.build()?);
    let server = TlsAcceptor::new(server_config.build()?);

    let (server_stream, client_stream) = common::get_streams().await?;
    let server_stream = common::TestStream::new(server_stream);
    let overrides = server_stream.overrides();
    let (mut client, mut server) =
        common::run_negotiate(&client, client_stream, &server, server_stream).await?;

    // Setup a bad record for the next read
    overrides.next_read(Some(Box::new(|_, _, buf| {
        // Parsing the header is one of the blinded operations
        // in s2n_recv, so provide a malformed header.
        let zeroed_header = [23, 0, 0, 0, 0];
        buf.put_slice(&zeroed_header);
        Ok(()).into()
    })));

    // Trigger the blinded error
    let mut received = [0; 1];
    let result = server.read_exact(&mut received).await;
    assert!(result.is_err());

    let time_start = time::Instant::now();
    let result = server.shutdown().await;
    let time_elapsed = time_start.elapsed();

    // Shutdown MUST NOT complete faster than minimal blinding time.
    assert!(time_elapsed > common::MIN_BLINDING_SECS);

    // Server MUST eventually successfully shutdown
    assert!(result.is_ok());

    // Shutdown MUST have sent the close_notify message needed for EOF.
    let mut received = [0; 1];
    assert!(client.read(&mut received).await? == 0);

    Ok(())
}

#[tokio::test(start_paused = true)]
async fn shutdown_with_poll_blinding() -> Result<(), Box> {
    let clock = common::TokioTime::default();
    let mut server_config = common::server_config()?;
    server_config.set_monotonic_clock(clock)?;

    let client = TlsConnector::new(common::client_config()?.build()?);
    let server = TlsAcceptor::new(server_config.build()?);

    let (server_stream, client_stream) = common::get_streams().await?;
    let server_stream = common::TestStream::new(server_stream);
    let overrides = server_stream.overrides();
    let (_, mut server) =
        common::run_negotiate(&client, client_stream, &server, server_stream).await?;

    // Setup a bad record for the next read
    overrides.next_read(Some(Box::new(|_, _, buf| {
        // Parsing the header is one of the blinded operations
        // in s2n_recv, so provide a malformed header.
        let zeroed_header = [23, 0, 0, 0, 0];
        buf.put_slice(&zeroed_header);
        Ok(()).into()
    })));

    // Trigger the blinded error
    let mut received = [0; 1];
    let result = server.read_exact(&mut received).await;
    assert!(result.is_err());

    let time_start = time::Instant::now();
    let result = server.apply_blinding().await;
    let time_elapsed = time_start.elapsed();

    // poll_blinding MUST NOT complete faster than minimal blinding time.
    assert!(time_elapsed > common::MIN_BLINDING_SECS);

    // poll_blinding MUST eventually complete
    assert!(result.is_ok());

    Ok(())
}

#[tokio::test]
async fn shutdown_with_tcp_error() -> Result<(), Box> {
    let client = TlsConnector::new(common::client_config()?.build()?);
    let server = TlsAcceptor::new(common::server_config()?.build()?);

    let (server_stream, client_stream) = common::get_streams().await?;
    let server_stream = common::TestStream::new(server_stream);
    let overrides = server_stream.overrides();

    let (_, mut server) =
        common::run_negotiate(&client, client_stream, &server, server_stream).await?;

    // The underlying stream should return a unique error on shutdown
    overrides.next_shutdown(Some(Box::new(|_, _| {
        Ready(Err(io::Error::new(io::ErrorKind::Other, common::TEST_STR)))
    })));

    // Shutdown should complete with the correct error from the underlying stream
    let result = server.shutdown().await;
    let error = result.unwrap_err().into_inner().unwrap();
    assert!(error.to_string() == common::TEST_STR);

    Ok(())
}

#[tokio::test]
async fn shutdown_with_tls_error_and_tcp_error() -> Result<(), Box> {
    let client = TlsConnector::new(common::client_config()?.build()?);
    let server = TlsAcceptor::new(common::server_config()?.build()?);

    let (server_stream, client_stream) = common::get_streams().await?;
    let server_stream = common::TestStream::new(server_stream);
    let overrides = server_stream.overrides();

    let (_, mut server) =
        common::run_negotiate(&client, client_stream, &server, server_stream).await?;

    // Both s2n_shutdown_send and the underlying stream should error on shutdown
    overrides.next_write(Some(Box::new(|_, _, _| {
        Ready(Err(io::Error::from(io::ErrorKind::Other)))
    })));
    overrides.next_shutdown(Some(Box::new(|_, _| {
        Ready(Err(io::Error::new(io::ErrorKind::Other, common::TEST_STR)))
    })));

    // Shutdown should complete with the correct error from s2n_shutdown_send
    let result = server.shutdown().await;
    let io_error = result.unwrap_err();
    let error: error::Error = io_error.try_into()?;
    // Any non-blocking read error is translated as "IOError"
    assert!(error.kind() == error::ErrorType::IOError);

    // Even if s2n_shutdown_send fails, we need to close the underlying stream.
    // Make sure we called our mock shutdown, consuming it.
    assert!(overrides.is_consumed());

    Ok(())
}

#[tokio::test]
async fn shutdown_with_tls_error_and_tcp_delay() -> Result<(), Box> {
    let client = TlsConnector::new(common::client_config()?.build()?);
    let server = TlsAcceptor::new(common::server_config()?.build()?);

    let (server_stream, client_stream) = common::get_streams().await?;
    let server_stream = common::TestStream::new(server_stream);
    let overrides = server_stream.overrides();

    let (mut client, mut server) =
        common::run_negotiate(&client, client_stream, &server, server_stream).await?;

    // We want s2n_shutdown_send to produce an error on write
    overrides.next_write(Some(Box::new(|_, _, _| {
        Ready(Err(io::Error::from(io::ErrorKind::Other)))
    })));

    // The underlying stream should initially return Pending, delaying shutdown
    overrides.next_shutdown(Some(Box::new(|_, ctx| {
        ctx.waker().wake_by_ref();
        Pending
    })));

    // Shutdown should complete with the correct error from s2n_shutdown_send
    let result = server.shutdown().await;
    let io_error = result.unwrap_err();
    let error: error::Error = io_error.try_into()?;
    // Any non-blocking read error is translated as "IOError"
    assert!(error.kind() == error::ErrorType::IOError);

    // Even if s2n_shutdown_send fails, we need to close the underlying stream.
    // Make sure we at least called our mock shutdown, consuming it.
    assert!(overrides.is_consumed());

    // Since s2n_shutdown_send failed, we should NOT have sent a close_notify.
    // Make sure the peer doesn't receive a close_notify.
    // If this is not true, then we're incorrectly calling s2n_shutdown_send
    // again after an error.
    let mut received = [0; 1];
    let io_error = client.read(&mut received).await.unwrap_err();
    let error: error::Error = io_error.try_into()?;
    assert!(error.kind() == error::ErrorType::ConnectionClosed);

    Ok(())
}
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls-tokio/tests/tcp.rs000066400000000000000000000041311456575232400264360ustar00rootroot00000000000000// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};

pub mod common;

async fn assert_read_from_closed(mut reader: S, writer: S)
where
    S: AsyncRead + AsyncWrite + Unpin,
{
    drop(writer);
    let result = reader.read_u8().await;
    assert!(result.is_err());
    let error = result.unwrap_err();
    assert!(error.kind() == std::io::ErrorKind::UnexpectedEof);
}

#[tokio::test]
async fn match_tcp_read_from_closed() -> Result<(), Box> {
    let (tcp_server, tcp_client) = common::get_streams().await?;
    assert_read_from_closed(tcp_server, tcp_client).await;

    let (tls13_server, tls13_client) = common::get_tls_streams(
        common::server_config()?.build()?,
        common::client_config()?.build()?,
    )
    .await?;
    assert_read_from_closed(tls13_server, tls13_client).await;

    let (tls12_server, tls12_client) = common::get_tls_streams(
        common::server_config_tls12()?.build()?,
        common::client_config_tls12()?.build()?,
    )
    .await?;
    assert_read_from_closed(tls12_server, tls12_client).await;
    Result::Ok(())
}

async fn assert_write_to_closed(reader: S, mut writer: S)
where
    S: AsyncRead + AsyncWrite + Unpin,
{
    drop(reader);
    let result = writer.write_u8(0).await;
    assert!(result.is_ok());
}

#[tokio::test]
async fn match_tcp_write_to_closed() -> Result<(), Box> {
    let (tcp_server, tcp_client) = common::get_streams().await?;
    assert_write_to_closed(tcp_server, tcp_client).await;

    let (tls13_server, tls13_client) = common::get_tls_streams(
        common::server_config()?.build()?,
        common::client_config()?.build()?,
    )
    .await?;
    assert_write_to_closed(tls13_server, tls13_client).await;

    let (tls12_server, tls12_client) = common::get_tls_streams(
        common::server_config_tls12()?.build()?,
        common::client_config_tls12()?.build()?,
    )
    .await?;
    assert_write_to_closed(tls12_server, tls12_client).await;
    Result::Ok(())
}
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls/000077500000000000000000000000001456575232400230165ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls/Cargo.toml000066400000000000000000000020351456575232400247460ustar00rootroot00000000000000[package]
name = "s2n-tls"
description = "A C99 implementation of the TLS/SSL protocols"
version = "0.1.3"
authors = ["AWS s2n"]
edition = "2021"
rust-version = "1.63.0"
repository = "https://github.com/aws/s2n-tls"
license = "Apache-2.0"

[features]
default = []
unstable-fingerprint = ["s2n-tls-sys/unstable-fingerprint"]
quic = ["s2n-tls-sys/quic"]
pq = ["s2n-tls-sys/pq"]
testing = ["bytes"]

[dependencies]
bytes = { version = "1", optional = true }
errno = { version = "0.3" }
libc = "0.2"
s2n-tls-sys = { version = "=0.1.3", path = "../s2n-tls-sys", features = ["internal"] }
pin-project-lite = "0.2"
hex = "0.4"

[dev-dependencies]
bytes = "1"
futures-test = "0.3"
openssl = "0.10"
temp-env = "0.3"
checkers = "0.6"
# newer versions require rust 1.66, see https://github.com/aws/s2n-tls/issues/4241
# this version pin is only needed to prevent verification failures when using
# cargo package / cargo publish, as those commands do not respect the version pin
# in downstream dev-dependencies (in s2n-tls-sys, in this case)
jobserver = "=0.1.26"
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls/README.md000066400000000000000000000005001456575232400242700ustar00rootroot00000000000000This crate provides ergonomic, idiomatic Rust bindings for [s2n-tls](https://github.com/aws/s2n-tls). From the s2n-tls readme:
> s2n-tls is a C99 implementation of the TLS/SSL protocols that is designed to be simple, small, fast, and with security as a priority. It is released and licensed under the Apache License 2.0.aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls/src/000077500000000000000000000000001456575232400236055ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls/src/callbacks.rs000066400000000000000000000076431456575232400261040ustar00rootroot00000000000000// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

//! Utilities to handle passing Rust code to s2n-tls's C callbacks.
//!
//! s2n-tls uses callbacks to temporarily return control to the application
//! and allow the application to execute custom code.
//!
//! To use a callback in your application, just implement the trait for the
//! target callback type and pass your implementation to the appropriate
//! connection or config method. For example, you can implement
//! [`ClientHelloCallback`] and pass that implementation to
//! [config::Builder::set_client_hello_callback()](`crate::config::Builder::set_client_hello_callback()`)
//! in order to execute custom logic after an s2n-tls server receives a client hello.
//!
//! s2n-tls callbacks come in two flavors:
//! * "sync" callbacks return an immediate result and will block the task
//!   performing the handshake until they return success or failure. See
//!   [`VerifyHostNameCallback`] as an example.
//! * "async" callbacks return a [Poll](`core::task::Poll`) and should not block the task performing the handshake.
//!   They will be polled until they return [Poll::Ready](`core::task::Poll::Ready`).
//!   [Connection::waker()](`crate::connection::Connection::waker()`)
//!   can be used to register the task for wakeup. See [`ClientHelloCallback`] as an example.

use crate::{config::Context, connection::Connection};
use core::{mem::ManuallyDrop, ptr::NonNull, time::Duration};
use s2n_tls_sys::s2n_connection;

mod async_cb;
pub use async_cb::*;

mod client_hello;
pub use client_hello::*;

mod session_ticket;
pub use session_ticket::*;

mod pkey;
pub use pkey::*;

/// Convert the connection pointer provided to a callback into a Connection
/// and Context useable with the Rust bindings.
///
/// # Safety
///
/// This must ONLY be used for connection pointers provided to callbacks,
/// which can be assumed to point to valid Connections because the
/// callbacks were configured through the Rust bindings.
pub(crate) unsafe fn with_context(conn_ptr: *mut s2n_connection, action: F) -> T
where
    F: FnOnce(&mut Connection, &mut Context) -> T,
{
    let raw = NonNull::new(conn_ptr).expect("connection should not be null");
    let mut conn = Connection::from_raw(raw);
    let mut config = conn.config().expect("config should not be null");
    let context = config.context_mut();
    let r = action(&mut conn, context);
    // Since this is a callback, it receives a pointer to the connection
    // but doesn't own that connection or control its lifecycle.
    // Do not drop / free the connection.
    let _ = ManuallyDrop::new(conn);
    r
}

/// A trait for the callback used to verify host name(s) during X509
/// verification.
///
/// The implementation should verify the certificate host name and return `true`
/// if the name is valid, `false` otherwise.
pub trait VerifyHostNameCallback: 'static + Send + Sync {
    fn verify_host_name(&self, host_name: &str) -> bool;
}

/// A trait for the callback used to retrieve the system / wall clock time.
pub trait WallClock: 'static + Send + Sync {
    fn get_time_since_epoch(&self) -> Duration;
}

/// A trait for the callback used to retrieve the monotonic time.
pub trait MonotonicClock: 'static + Send + Sync {
    fn get_time(&self) -> Duration;
}

/// Invoke the user provided VerifyHostNameCallback on the host_name.
///
/// # Safety
///
/// The caller must ensure that the memory underlying host_name is a valid
/// slice.
pub(crate) unsafe fn verify_host(
    host_name: *const ::libc::c_char,
    host_name_len: usize,
    handler: &mut Box,
) -> u8 {
    let host_name = host_name as *const u8;
    let host_name = core::slice::from_raw_parts(host_name, host_name_len);

    match core::str::from_utf8(host_name) {
        Ok(host_name_str) => handler.verify_host_name(host_name_str) as u8,
        Err(_) => 0, // If the host name can't be parsed, fail closed.
    }
}
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls/src/callbacks/000077500000000000000000000000001456575232400255245ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls/src/callbacks/async_cb.rs000066400000000000000000000120111456575232400276460ustar00rootroot00000000000000// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

//! Support for asynchronous callbacks.
//!
//! The general flow for an async callback is:
//! 1. The application sets FooCallback on the [`crate::config::Config`] with
//!    a method like Config::set_foo_callback.
//! 2. When the underlying C library reaches the trigger for that specific
//!    callback (for example, the ClientHello for [`crate::callbacks::ClientHelloCallback`])
//!    it calls the callback implementation to get a [`ConnectionFuture`].
//! 3. The [`ConnectionFuture`] is stored on the connection. Every time
//!    the handshake is polled, the [`ConnectionFuture`] is polled instead.
//! 4. Once the [`ConnectionFuture`] returns a result, the connection
//!    drops the future and proceeds as usual.

use crate::{connection::Connection, enums::CallbackResult, error::Error};
use core::task::Poll;
use pin_project_lite::pin_project;
use std::pin::Pin;

/// The Future associated with the async connection callback.
///
/// The calling application can provide an instance of [`ConnectionFuture`]
/// when implementing an async callback, eg. [`crate::callbacks::ClientHelloCallback`],
/// if it wants to run an asynchronous operation (disk read, network call).
/// The application can return an error ([Err(Error::application())])
/// to indicate connection failure.
pub trait ConnectionFuture: 'static + Send {
    fn poll(
        self: Pin<&mut Self>,
        connection: &mut Connection,
        ctx: &mut core::task::Context,
    ) -> Poll>;
}

pub(crate) type ConnectionFutureResult = Result>>, Error>;

// Useful for propagating [`error::Error`] from a C callback back to the Rust application.
pub(crate) struct ErrorFuture {
    error: Option,
}

impl ConnectionFuture for ErrorFuture {
    fn poll(
        mut self: Pin<&mut Self>,
        _connection: &mut Connection,
        _ctx: &mut core::task::Context,
    ) -> Poll> {
        let err = self.error.take().expect(
            "ErrorFuture should be initialized with Some(error) and a Future should never
            be polled after it returns Poll::Ready",
        );
        Poll::Ready(Err(err))
    }
}

pin_project! {
    /// A wrapper around an optional [`ConnectionFuture`]
    /// which either polls the future or immediately reports success.
    struct OptionalFuture {
        option: Option>>,
    }
}

impl OptionalFuture {
    fn new(input: ConnectionFutureResult) -> Self {
        match input {
            Ok(option) => OptionalFuture { option },
            Err(error) => {
                let error = Some(error);
                OptionalFuture {
                    option: Some(Box::pin(ErrorFuture { error })),
                }
            }
        }
    }
}

impl ConnectionFuture for OptionalFuture {
    fn poll(
        mut self: Pin<&mut Self>,
        conn: &mut Connection,
        ctx: &mut core::task::Context,
    ) -> Poll> {
        match self.option.as_mut() {
            Some(future) => future.as_mut().poll(conn, ctx),
            None => Poll::Ready(Ok(())),
        }
    }
}

/// Any work necessary after the callback completes.
//
// We do not expect any callback except [`ClientHelloCallback`] to require MarkDone.
// More recent callbacks follow a different model that doesn't require separate cleanup.
//
// This enum is sufficient while only ClientHello is special-cased, but will not
// scale well. If we need more MarkDone variants, then we should consider a different
// solution, like another stored future.
#[non_exhaustive]
#[derive(PartialEq)]
enum MarkDone {
    ClientHello,
    None,
}

pin_project! {
    // Stores the [`ConnectionFuture`] and associated state.
    pub(crate) struct AsyncCallback {
        #[pin]
        future: OptionalFuture,
        cleanup: MarkDone,
    }
}

impl AsyncCallback {
    pub(crate) fn poll(
        self: Pin<&mut Self>,
        conn: &mut Connection,
        ctx: &mut core::task::Context,
    ) -> Poll> {
        let this = self.project();
        let poll = this.future.poll(conn, ctx);
        if let Poll::Ready(Ok(())) = poll {
            if this.cleanup == &MarkDone::ClientHello {
                conn.mark_client_hello_cb_done()?;
            }
        }
        poll
    }

    pub(crate) fn trigger_client_hello_cb(
        future: ConnectionFutureResult,
        conn: &mut Connection,
    ) -> CallbackResult {
        let future = OptionalFuture::new(future);
        let cleanup = MarkDone::ClientHello;
        let callback = AsyncCallback { future, cleanup };
        conn.set_async_callback(callback);
        CallbackResult::Success
    }

    pub(crate) fn trigger(future: ConnectionFutureResult, conn: &mut Connection) -> CallbackResult {
        let future = OptionalFuture::new(future);
        let cleanup = MarkDone::None;
        let callback = AsyncCallback { future, cleanup };
        conn.set_async_callback(callback);
        CallbackResult::Success
    }
}
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls/src/callbacks/client_hello.rs000066400000000000000000000047761456575232400305510ustar00rootroot00000000000000// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

//! Support for application-implemented ClientHello callbacks.

use crate::{callbacks::*, config::Config, connection::Connection, error::Error};
use core::task::Poll;
use pin_project_lite::pin_project;
use std::{future::Future, pin::Pin};

/// A trait for the callback executed after parsing the ClientHello message.
///
/// Use in conjunction with
/// [config::Builder::set_client_hello_callback](`crate::config::Builder::set_client_hello_callback()`).
pub trait ClientHelloCallback: 'static + Send + Sync {
    /// The application can return an `Ok(None)` to resolve the callback
    /// synchronously or return an `Ok(Some(ConnectionFuture))` if it wants to
    /// run some asynchronous task before resolving the callback.
    ///
    /// [`ConfigResolver`], which implements [`ConnectionFuture`] can be
    /// returned if the application wants to set a new [`Config`] on the connection.
    ///
    /// If the server_name is used to configure the connection then the application
    /// should call [`Connection::server_name_extension_used()`].
    fn on_client_hello(
        // this method takes an immutable reference to self to prevent the
        // Config from being mutated by one connection and then used in another
        // connection, leading to undefined behavior
        &self,
        connection: &mut Connection,
    ) -> ConnectionFutureResult;
}

// For more information on projection:
// https://doc.rust-lang.org/std/pin/index.html#projections-and-structural-pinning
pin_project! {
    /// An implementation of [`ConnectionFuture`] which resolves the provided
    /// future and sets the config on the [`Connection`].
    pub struct ConfigResolver>> {
        #[pin]
        fut: F,
    }
}

impl>> ConfigResolver {
    pub fn new(fut: F) -> Self {
        ConfigResolver { fut }
    }
}

impl>> ConnectionFuture
    for ConfigResolver
{
    fn poll(
        self: Pin<&mut Self>,
        connection: &mut Connection,
        ctx: &mut core::task::Context,
    ) -> Poll> {
        let this = self.project();
        let config = match this.fut.poll(ctx) {
            Poll::Ready(config) => config?,
            Poll::Pending => return Poll::Pending,
        };

        connection.set_config(config)?;

        Poll::Ready(Ok(()))
    }
}
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls/src/callbacks/pkey.rs000066400000000000000000000302771456575232400270530ustar00rootroot00000000000000// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

use crate::{
    callbacks::*,
    connection::Connection,
    enums::{HashAlgorithm, Mode, SignatureAlgorithm},
    error::{Error, Fallible},
    ffi::*,
};
use std::{pin::Pin, ptr::NonNull};

#[non_exhaustive]
#[derive(Debug)]
pub enum OperationType {
    Decrypt,
    Sign(SignatureAlgorithm, HashAlgorithm),
}

pub struct PrivateKeyOperation {
    raw: NonNull,
    kind: OperationType,
}

/// # Safety
///
/// Safety: s2n_async_pkey_op objects can be sent across threads
unsafe impl Send for PrivateKeyOperation {}

/// # Safety
///
/// Safety: All C methods that mutate the s2n_async_pkey_op are wrapped
/// in Rust methods that require a mutable reference.
unsafe impl Sync for PrivateKeyOperation {}

impl PrivateKeyOperation {
    pub(crate) fn try_from_cb(
        conn: &Connection,
        op_ptr: *mut s2n_async_pkey_op,
    ) -> Result {
        let mut raw_kind = 0;
        unsafe { s2n_async_pkey_op_get_op_type(op_ptr, &mut raw_kind) }.into_result()?;

        let kind = match raw_kind {
            s2n_async_pkey_op_type::SIGN => {
                let sig_alg = match conn.mode() {
                    Mode::Client => conn
                        .selected_client_signature_algorithm()?
                        .ok_or(Error::INVALID_INPUT)?,
                    Mode::Server => conn.selected_signature_algorithm()?,
                };
                let hash_alg = match conn.mode() {
                    Mode::Client => conn
                        .selected_client_hash_algorithm()?
                        .ok_or(Error::INVALID_INPUT)?,
                    Mode::Server => conn.selected_hash_algorithm()?,
                };
                OperationType::Sign(sig_alg, hash_alg)
            }
            s2n_async_pkey_op_type::DECRYPT => OperationType::Decrypt,
            _ => return Err(Error::INVALID_INPUT),
        };

        let raw = NonNull::new(op_ptr).ok_or(Error::INVALID_INPUT)?;
        Ok(PrivateKeyOperation { raw, kind })
    }

    /// Do we need to sign or decrypt with the private key?
    pub fn kind(&self) -> Result<&OperationType, Error> {
        Ok(&self.kind)
    }

    /// The size of the slice returned by [`input()`]
    pub fn input_size(&self) -> Result {
        let mut size = 0;
        unsafe { s2n_async_pkey_op_get_input_size(self.raw.as_ptr(), &mut size) }.into_result()?;
        size.try_into().map_err(|_| Error::INVALID_INPUT)
    }

    /// Provides the input for the operation.
    ///
    /// If this is an [`OperationType::Sign`] operation, then this input has
    /// already been hashed and is the resultant digest.
    pub fn input(&self, buf: &mut [u8]) -> Result<(), Error> {
        let buf_len: u32 = buf.len().try_into().map_err(|_| Error::INVALID_INPUT)?;
        let buf_ptr = buf.as_ptr() as *mut u8;
        unsafe { s2n_async_pkey_op_get_input(self.raw.as_ptr(), buf_ptr, buf_len) }
            .into_result()?;
        Ok(())
    }

    /// Sets the output of the operation
    pub fn set_output(self, conn: &mut Connection, buf: &[u8]) -> Result<(), Error> {
        let buf_len: u32 = buf.len().try_into().map_err(|_| Error::INVALID_INPUT)?;
        let buf_ptr = buf.as_ptr();
        unsafe {
            s2n_async_pkey_op_set_output(self.raw.as_ptr(), buf_ptr, buf_len).into_result()?;
            s2n_async_pkey_op_apply(self.raw.as_ptr(), conn.as_ptr()).into_result()?;
        }
        Ok(())
    }
}

impl Drop for PrivateKeyOperation {
    fn drop(&mut self) {
        unsafe {
            let _ = s2n_async_pkey_op_free(self.raw.as_ptr());
        }
    }
}

pub trait PrivateKeyCallback: 'static + Send + Sync {
    fn handle_operation(
        &self,
        connection: &mut Connection,
        operation: PrivateKeyOperation,
    ) -> Result>>, Error>;
}

#[cfg(test)]
mod tests {
    use super::*;
    use crate::{
        config, connection, error, security, testing,
        testing::{s2n_tls::*, *},
    };
    use core::task::{Poll, Waker};
    use futures_test::task::new_count_waker;
    use openssl::{ec::EcKey, ecdsa::EcdsaSig};

    type Error = Box;

    const KEY: &[u8] = include_bytes!(concat!(
        env!("CARGO_MANIFEST_DIR"),
        "/../../../tests/pems/ecdsa_p384_pkcs1_key.pem"
    ));
    const CERT: &[u8] = include_bytes!(concat!(
        env!("CARGO_MANIFEST_DIR"),
        "/../../../tests/pems/ecdsa_p384_pkcs1_cert.pem"
    ));

    fn new_pair(
        callback: T,
        waker: Waker,
    ) -> Result, Error>
    where
        T: 'static + PrivateKeyCallback,
    {
        let config = {
            let mut config = config::Builder::new();
            config.set_security_policy(&security::DEFAULT_TLS13)?;
            config.load_public_pem(CERT)?;
            config.set_private_key_callback(callback)?;
            // Our test certificates are untrusted, but disabling certificate
            // verification does not affect handshake signatures.
            unsafe { config.disable_x509_verification() }?;
            config.build()?
        };

        let server = {
            let mut server = connection::Connection::new_server();
            server.set_config(config.clone())?;
            server.set_waker(Some(&waker))?;
            Harness::new(server)
        };

        let client = {
            let mut client = connection::Connection::new_client();
            client.set_config(config)?;
            Harness::new(client)
        };

        Ok(Pair::new(server, client))
    }

    fn ecdsa_sign(
        op: PrivateKeyOperation,
        conn: &mut connection::Connection,
        key: &[u8],
    ) -> Result<(), error::Error> {
        match op.kind()? {
            OperationType::Sign(SignatureAlgorithm::ECDSA, _) => {
                let in_buf_size = op.input_size()?;
                let mut in_buf = vec![0; in_buf_size];
                op.input(&mut in_buf)?;

                let key =
                    EcKey::private_key_from_pem(key).expect("Failed to create EcKey from pem");
                let sig = EcdsaSig::sign(&in_buf, &key).expect("Failed to sign input");
                let out = sig.to_der().expect("Failed to convert signature to der");

                op.set_output(conn, &out)?;
            }
            _ => panic!("Unexpected pkey operation"),
        }
        Ok(())
    }

    #[test]
    fn sync_offload_success() -> Result<(), Error> {
        struct TestPkeyCallback(Counter);
        impl PrivateKeyCallback for TestPkeyCallback {
            fn handle_operation(
                &self,
                conn: &mut connection::Connection,
                op: PrivateKeyOperation,
            ) -> Result>>, error::Error> {
                self.0.increment();
                ecdsa_sign(op, conn, KEY)?;
                Ok(None)
            }
        }

        let (waker, wake_count) = new_count_waker();
        let counter = testing::Counter::default();
        let callback = TestPkeyCallback(counter.clone());
        let pair = new_pair(callback, waker)?;

        assert_eq!(counter.count(), 0);
        assert_eq!(wake_count, 0);
        poll_tls_pair(pair);
        assert_eq!(counter.count(), 1);
        assert_eq!(wake_count, 0);

        Ok(())
    }

    #[test]
    fn async_offload_success() -> Result<(), Error> {
        const POLL_COUNT: usize = 10;

        struct TestPkeyFuture {
            counter: usize,
            op: Option,
        }
        impl ConnectionFuture for TestPkeyFuture {
            fn poll(
                mut self: Pin<&mut Self>,
                conn: &mut connection::Connection,
                ctx: &mut core::task::Context,
            ) -> Poll> {
                ctx.waker().wake_by_ref();
                self.counter += 1;
                if self.counter < POLL_COUNT {
                    Poll::Pending
                } else if let Some(op) = self.op.take() {
                    Poll::Ready(ecdsa_sign(op, conn, KEY))
                } else {
                    Poll::Ready(Err(error::Error::application(
                        "missing pkey operation".into(),
                    )))
                }
            }
        }

        struct TestPkeyCallback(Counter);
        impl PrivateKeyCallback for TestPkeyCallback {
            fn handle_operation(
                &self,
                _conn: &mut connection::Connection,
                op: PrivateKeyOperation,
            ) -> Result>>, error::Error> {
                self.0.increment();
                let future = TestPkeyFuture {
                    counter: 0,
                    op: Some(op),
                };
                Ok(Some(Box::pin(future)))
            }
        }

        let (waker, wake_count) = new_count_waker();
        let counter = testing::Counter::default();
        let callback = TestPkeyCallback(counter.clone());
        let pair = new_pair(callback, waker)?;

        assert_eq!(counter.count(), 0);
        assert_eq!(wake_count, 0);
        poll_tls_pair(pair);
        assert_eq!(counter.count(), 1);
        assert_eq!(wake_count, POLL_COUNT);

        Ok(())
    }

    #[test]
    fn sync_failure() -> Result<(), Error> {
        const ERROR: &str = "sync_failure error";

        struct TestPkeyCallback(Counter);
        impl PrivateKeyCallback for TestPkeyCallback {
            fn handle_operation(
                &self,
                _conn: &mut connection::Connection,
                _op: PrivateKeyOperation,
            ) -> Result>>, error::Error> {
                self.0.increment();
                Err(testing::test_error(ERROR))
            }
        }

        let (waker, wake_count) = new_count_waker();
        let counter = testing::Counter::default();
        let callback = TestPkeyCallback(counter.clone());
        let mut pair = new_pair(callback, waker)?;

        assert_eq!(counter.count(), 0);
        assert_eq!(wake_count, 0);
        let result = poll_tls_pair_result(&mut pair);
        assert_eq!(counter.count(), 1);
        assert_eq!(wake_count, 0);

        match result {
            Ok(_) => panic!("Handshake unexpectedly succeeded"),
            Err(e) => testing::assert_test_error(e, ERROR),
        };
        Ok(())
    }

    #[test]
    fn async_failure() -> Result<(), Error> {
        const POLL_COUNT: usize = 10;
        const ERROR: &str = "async_failure error";

        struct TestPkeyFuture {
            counter: usize,
            _op: PrivateKeyOperation,
        }
        impl ConnectionFuture for TestPkeyFuture {
            fn poll(
                mut self: Pin<&mut Self>,
                _conn: &mut connection::Connection,
                ctx: &mut core::task::Context,
            ) -> Poll> {
                ctx.waker().wake_by_ref();
                self.counter += 1;
                if self.counter < POLL_COUNT {
                    Poll::Pending
                } else {
                    Poll::Ready(Err(testing::test_error(ERROR)))
                }
            }
        }

        struct TestPkeyCallback(Counter);
        impl PrivateKeyCallback for TestPkeyCallback {
            fn handle_operation(
                &self,
                _conn: &mut connection::Connection,
                _op: PrivateKeyOperation,
            ) -> Result>>, error::Error> {
                self.0.increment();
                let future = TestPkeyFuture { counter: 0, _op };
                Ok(Some(Box::pin(future)))
            }
        }

        let (waker, wake_count) = new_count_waker();
        let counter = testing::Counter::default();
        let callback = TestPkeyCallback(counter.clone());
        let mut pair = new_pair(callback, waker)?;

        assert_eq!(counter.count(), 0);
        assert_eq!(wake_count, 0);
        let result = poll_tls_pair_result(&mut pair);
        assert_eq!(counter.count(), 1);
        assert_eq!(wake_count, POLL_COUNT);

        match result {
            Ok(_) => panic!("Handshake unexpectedly succeeded"),
            Err(e) => testing::assert_test_error(e, ERROR),
        };
        Ok(())
    }
}
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls/src/callbacks/session_ticket.rs000066400000000000000000000036641456575232400311310ustar00rootroot00000000000000// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

use std::time::Duration;

use s2n_tls_sys::*;

use crate::{
    connection::Connection,
    error::{Error, Fallible},
};

/// A trait to retrieve session tickets from the connection
pub trait SessionTicketCallback: 'static + Send + Sync {
    fn on_session_ticket(&self, connection: &mut Connection, session_ticket: &SessionTicket);
}

pub struct SessionTicket(s2n_session_ticket);

impl SessionTicket {
    pub(crate) fn from_ptr(ticket: &s2n_session_ticket) -> &Self {
        unsafe { &*(ticket as *const s2n_session_ticket as *const SessionTicket) }
    }

    // SAFETY: casting *const s2n_session_ticket -> *mut s2n_session_ticket: This is
    // safe as long as the data is not actually mutated. As authors of s2n-tls,
    // we know that the get_lifetime and get_data methods do not mutate the
    // data, and use mut pointers as a matter of convention because it makes
    // working with s2n_stuffers and s2n_blobs easier.
    pub(crate) fn deref_mut_ptr(&self) -> *mut s2n_session_ticket {
        &self.0 as *const s2n_session_ticket as *mut s2n_session_ticket
    }

    pub fn lifetime(&self) -> Result {
        let mut lifetime = 0;
        unsafe {
            s2n_session_ticket_get_lifetime(self.deref_mut_ptr(), &mut lifetime).into_result()?
        };
        Ok(Duration::new(lifetime.into(), 0))
    }

    #[allow(clippy::len_without_is_empty)]
    pub fn len(&self) -> Result {
        let mut data_len = 0;
        unsafe {
            s2n_session_ticket_get_data_len(self.deref_mut_ptr(), &mut data_len).into_result()?
        };
        Ok(data_len)
    }

    pub fn data(&self, output: &mut [u8]) -> Result<(), Error> {
        unsafe {
            s2n_session_ticket_get_data(self.deref_mut_ptr(), output.len(), output.as_mut_ptr())
                .into_result()?
        };
        Ok(())
    }
}
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls/src/client_hello.rs000066400000000000000000000372141456575232400266230ustar00rootroot00000000000000// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

use crate::error::{Error, Fallible};
use s2n_tls_sys::*;
use std::fmt;

#[derive(Copy, Clone)]
pub enum FingerprintType {
    JA3,
}

// this is the size of the MD5 hash digest that is used for the JA3 fingerprint
const MD5_HASH_SIZE: u32 = 16;

impl From for s2n_tls_sys::s2n_fingerprint_type::Type {
    fn from(value: FingerprintType) -> Self {
        match value {
            FingerprintType::JA3 => s2n_tls_sys::s2n_fingerprint_type::FINGERPRINT_JA3,
        }
    }
}

// ClientHello is an opaque wrapper struct around `s2n_client_hello`. Note that
// the size of this type is not known, and as such it can only be used through
// references and pointers.
//
// This implementation is motivated by the different memory management required
// for different s2n_client_hello pointers. `s2n_client_hello_parse_message`
// returns a `*mut s2n_client_hello` which owns its own memory. This neatly fits
//  the "smart pointer" pattern and can be represented as a `Box`.
//
// `s2n_connection_get_client_hello` returns a `*mut s2n_client_hello` which
// references memory owned by the connection, and therefore must not outlive
// the connection struct. This is best represented as a reference tied to the
// lifetime of the `Connection` struct.

/// ```no_run
/// use s2n_tls::client_hello::{ClientHello, FingerprintType};
/// use s2n_tls::connection::Connection;
/// use s2n_tls::enums::Mode;
///
/// let mut conn = Connection::new(Mode::Server);
/// // handshake happens
/// let mut client_hello: &ClientHello = conn.client_hello().unwrap();
/// let mut hash = Vec::new();
/// let string_size = client_hello.fingerprint_hash(FingerprintType::JA3, &mut hash).unwrap();
/// // hash has been resized so that it can store the fingerprint hash
///
/// let mut string = String::with_capacity(string_size as usize);
/// // string will not be resized, and the method will fail with
/// // ErrorType::UsageError if the string doesn't have enough capacity
/// client_hello.fingerprint_string(FingerprintType::JA3, &mut string).unwrap();
/// ```
pub struct ClientHello(s2n_client_hello);

impl ClientHello {
    pub fn parse_client_hello(hello: &[u8]) -> Result, crate::error::Error> {
        crate::init::init();
        let handle = unsafe {
            s2n_client_hello_parse_message(hello.as_ptr(), hello.len() as u32).into_result()?
        };
        let client_hello = handle.as_ptr() as *mut ClientHello;
        // safety: s2n_client_hello_parse_message returns a pointer that "owns"
        // its memory. This memory must be cleaned up by the application. The
        // Box will call Self::Drop when it goes out of scope so memory
        // will be automatically managed.
        unsafe { Ok(Box::from_raw(client_hello)) }
    }

    // this accepts a mut ref instead of a pointer, so that lifetimes are nicely
    // calculated for us. As is always the case, the reference must not be null.
    // this is marked "pub(crate)" to expose it to the connection module but
    // prevent it from being used externally.
    pub(crate) fn from_ptr(hello: &s2n_client_hello) -> &Self {
        // SAFETY: casting *s2n_client_hello <-> *ClientHello: For repr(Rust),
        // repr(packed(N)), repr(align(N)), and repr(C) structs: if all fields of a
        // struct have size 0, then the struct has size 0.
        // https://rust-lang.github.io/unsafe-code-guidelines/layout/structs-and-tuples.html#zero-sized-structs
        unsafe { &*(hello as *const s2n_client_hello as *const ClientHello) }
    }

    // SAFETY: casting *const s2n_client_hello -> *mut s2n_client_hello: This is
    // safe as long as the data is not actually mutated. As authors of s2n-tls,
    // we know that the get_hash and get_fingerprint methods do not mutate the
    // data, and use mut pointers as a matter of convention because it makes
    // working with s2n_stuffers and s2n_blobs easier.
    fn deref_mut_ptr(&self) -> *mut s2n_client_hello {
        &self.0 as *const s2n_client_hello as *mut s2n_client_hello
    }

    /// `fingerprint_hash` calculates the hash, and also returns the size
    /// required for the full fingerprint string. The return value can be used
    /// to construct a string of appropriate capacity to call
    /// `fingerprint_string`. `output` will be extended if necessary to store
    /// the full hash.
    pub fn fingerprint_hash(
        &self,
        hash: FingerprintType,
        output: &mut Vec,
    ) -> Result {
        let mut hash_size: u32 = 0;
        let mut str_size: u32 = 0;
        // make sure the vec has sufficient space for the hash
        if output.capacity() < MD5_HASH_SIZE as usize {
            output.reserve_exact(MD5_HASH_SIZE as usize - output.len());
        }
        unsafe {
            s2n_client_hello_get_fingerprint_hash(
                self.deref_mut_ptr(),
                hash.into(),
                MD5_HASH_SIZE,
                output.as_mut_ptr(),
                &mut hash_size,
                &mut str_size,
            )
            .into_result()?;
            // SAFETY: we wrote to the raw vec (using the mut pointer), and need
            // to update the state of the vec to reflect the changes we made.
            output.set_len(hash_size as usize);
        };
        Ok(str_size)
    }

    /// `fingerprint_string` will try to calculate the fingerprint and store the
    /// resulting string in `output`. If `output` does not have sufficient
    /// capacity an Error of `ErrorType::UsageError` will be returned.
    pub fn fingerprint_string(
        &self,
        hash: FingerprintType,
        output: &mut String,
    ) -> Result<(), Error> {
        let mut output_size = 0;
        unsafe {
            s2n_tls_sys::s2n_client_hello_get_fingerprint_string(
                self.deref_mut_ptr(),
                hash.into(),
                output.capacity() as u32,
                output.as_mut_ptr(),
                &mut output_size,
            )
            .into_result()?;
            // SAFETY: update internal state of string to match the data written
            // into it.
            output.as_mut_vec().set_len(output_size as usize);
        };
        Ok(())
    }

    fn session_id(&self) -> Result, Error> {
        let mut session_id_length = 0;
        unsafe {
            s2n_client_hello_get_session_id_length(self.deref_mut_ptr(), &mut session_id_length)
                .into_result()?;
        }

        let mut session_id = vec![0; session_id_length as usize];
        let mut out_length = 0;
        unsafe {
            s2n_client_hello_get_session_id(
                self.deref_mut_ptr(),
                session_id.as_mut_ptr(),
                &mut out_length,
                session_id_length,
            )
            .into_result()?;
        }
        Ok(session_id)
    }

    fn raw_message(&self) -> Result, Error> {
        let message_length =
            unsafe { s2n_client_hello_get_raw_message_length(self.deref_mut_ptr()).into_result()? };

        let mut raw_message = vec![0; message_length];
        unsafe {
            s2n_client_hello_get_raw_message(
                self.deref_mut_ptr(),
                raw_message.as_mut_ptr(),
                message_length as u32,
            )
            .into_result()?
        };
        Ok(raw_message)
    }
}

impl Drop for ClientHello {
    fn drop(&mut self) {
        let mut client_hello: *mut s2n_client_hello = &mut self.0;
        // ignore failures. There isn't anything to be done to handle them, but
        // allowing the program to continue is preferable to crashing.
        let _ = unsafe {
            s2n_tls_sys::s2n_client_hello_free(std::ptr::addr_of_mut!(client_hello)).into_result()
        };
    }
}

impl fmt::Debug for ClientHello {
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
        let session_id = self.session_id().map_err(|_| fmt::Error)?;
        let session_id = hex::encode(session_id);
        let message_head = self.raw_message().map_err(|_| fmt::Error)?;
        let mut hash = Vec::new();
        self.fingerprint_hash(FingerprintType::JA3, &mut hash)
            .map_err(|_| fmt::Error)?;
        f.debug_struct("ClientHello")
            .field("session_id", &session_id)
            .field("message_len", &(message_head.len()))
            .field("ja3_fingerprint", &hex::encode(hash))
            .finish_non_exhaustive()
    }
}

#[cfg(test)]
mod tests {
    use super::*;
    use crate::{
        connection::Connection,
        error::{Error, ErrorType},
        security,
        testing::{poll_tls_pair, tls_pair},
    };

    /// This function is a test fixture used a generate a valid ClientHello so
    /// that we don't have to copy and paste the raw bytes for test fixtures
    fn get_client_hello_bytes() -> Vec {
        let config = crate::testing::config_builder(&security::DEFAULT_TLS13)
            .unwrap()
            .build()
            .unwrap();
        let pair = tls_pair(config);
        let pair = poll_tls_pair(pair);
        // this doesn't have the handshake header
        let client_hello_message = pair
            .server
            .0
            .connection()
            .client_hello()
            .unwrap()
            .raw_message()
            .unwrap();
        // handshake header is {tag: u8, client_hello_length: u24}
        let mut client_hello = vec![0; 4];
        // As long as the client hello is small, no bit fiddling is required
        assert!(client_hello_message.len() < u8::MAX as usize);
        // tag for handshake header
        client_hello[0] = 1;
        client_hello[3] = client_hello_message.len() as u8;
        client_hello.extend(client_hello_message.iter());
        client_hello
    }

    fn get_client_hello() -> Box {
        // sets up connection and handshakes
        let raw_client_hello = get_client_hello_bytes();
        ClientHello::parse_client_hello(raw_client_hello.as_slice()).unwrap()
    }

    // test that a fingerprint can successfully be calculated from ClientHellos
    // returned from a connection
    #[checkers::test]
    fn io_fingerprint_test() {
        let config = crate::testing::config_builder(&security::DEFAULT_TLS13)
            .unwrap()
            .build()
            .unwrap();
        let pair = crate::testing::tls_pair(config);

        // client_hellos can not be accessed before the handshake
        assert!(pair.client.0.connection().client_hello().is_err());
        assert!(pair.server.0.connection().client_hello().is_err());

        let pair = poll_tls_pair(pair);
        let server_conn = pair.server.0.connection();
        let client_conn = pair.server.0.connection();

        let check_client_hello = |conn: &Connection| -> Result<(), Error> {
            let client_hello = conn.client_hello().unwrap();
            let mut hash = Vec::new();
            let fingerprint_size =
                client_hello.fingerprint_hash(FingerprintType::JA3, &mut hash)?;
            let mut string = String::with_capacity(fingerprint_size as usize);
            client_hello.fingerprint_string(FingerprintType::JA3, &mut string)?;
            Ok(())
        };

        assert!(check_client_hello(server_conn).is_ok());
        assert!(check_client_hello(client_conn).is_ok());
    }

    fn known_test_case(
        raw_client_hello: Vec,
        expected_string: &str,
        expected_hash_hex: &str,
    ) -> Result<(), Error> {
        let expected_hash: Vec = hex::decode(expected_hash_hex).unwrap();
        let client_hello = ClientHello::parse_client_hello(raw_client_hello.as_slice()).unwrap();

        let mut hash = Vec::new();
        let string_size = client_hello
            .fingerprint_hash(FingerprintType::JA3, &mut hash)
            .unwrap();
        assert_eq!(hash, expected_hash);

        let mut string = String::with_capacity(string_size as usize);
        client_hello
            .fingerprint_string(FingerprintType::JA3, &mut string)
            .unwrap();
        assert_eq!(string, expected_string);
        Ok(())
    }

    #[test]
    fn invalid_client_bytes() {
        let raw_client_hello_bytes =
            "random_value_that_is_unlikely_to_be_valid_client_hello".as_bytes();
        let result = ClientHello::parse_client_hello(raw_client_hello_bytes);
        assert!(result.is_err());
    }

    // known value test case copied from s2n_fingerprint_ja3_test.c
    #[checkers::test]
    fn valid_client_bytes() {
        let raw_client_hello = vec![
            0x01, 0x00, 0x00, 0xEC, 0x03, 0x03, 0x90, 0xe8, 0xcc, 0xee, 0xe5, 0x70, 0xa2, 0xa1,
            0x2f, 0x6b, 0x69, 0xd2, 0x66, 0x96, 0x0f, 0xcf, 0x20, 0xd5, 0x32, 0x6e, 0xc4, 0xb2,
            0x8c, 0xc7, 0xbd, 0x0a, 0x06, 0xc2, 0xa5, 0x14, 0xfc, 0x34, 0x20, 0xaf, 0x72, 0xbf,
            0x39, 0x99, 0xfb, 0x20, 0x70, 0xc3, 0x10, 0x83, 0x0c, 0xee, 0xfb, 0xfa, 0x72, 0xcc,
            0x5d, 0xa8, 0x99, 0xb4, 0xc5, 0x53, 0xd6, 0x3d, 0xa0, 0x53, 0x7a, 0x5c, 0xbc, 0xf5,
            0x0b, 0x00, 0x1e, 0xc0, 0x2b, 0xc0, 0x2f, 0xcc, 0xa9, 0xcc, 0xa8, 0xc0, 0x2c, 0xc0,
            0x30, 0xc0, 0x0a, 0xc0, 0x09, 0xc0, 0x13, 0xc0, 0x14, 0x00, 0x33, 0x00, 0x39, 0x00,
            0x2f, 0x00, 0x35, 0x00, 0x0a, 0x01, 0x00, 0x00, 0x85, 0x00, 0x00, 0x00, 0x23, 0x00,
            0x21, 0x00, 0x00, 0x1e, 0x69, 0x6e, 0x63, 0x6f, 0x6d, 0x69, 0x6e, 0x67, 0x2e, 0x74,
            0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x6d, 0x6f, 0x7a, 0x69, 0x6c,
            0x6c, 0x61, 0x2e, 0x6f, 0x72, 0x67, 0x00, 0x17, 0x00, 0x00, 0xff, 0x01, 0x00, 0x01,
            0x00, 0x00, 0x0a, 0x00, 0x0a, 0x00, 0x08, 0x00, 0x1d, 0x00, 0x17, 0x00, 0x18, 0x00,
            0x19, 0x00, 0x0b, 0x00, 0x02, 0x01, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x10, 0x00,
            0x0e, 0x00, 0x0c, 0x02, 0x68, 0x32, 0x08, 0x68, 0x74, 0x74, 0x70, 0x2f, 0x31, 0x2e,
            0x31, 0x00, 0x05, 0x00, 0x05, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x18,
            0x00, 0x16, 0x04, 0x03, 0x05, 0x03, 0x06, 0x03, 0x08, 0x04, 0x08, 0x05, 0x08, 0x06,
            0x04, 0x01, 0x05, 0x01, 0x06, 0x01, 0x02, 0x03, 0x02, 0x01, 0x00, 0x1c, 0x00, 0x02,
            0x40, 0x00,
        ];
        let expected_fingerprint = "771,49195-49199-52393-52392-49196-49200-\
                                    49162-49161-49171-49172-51-57-47-53-10,0-\
                                    23-65281-10-11-35-16-5-13-28,29-23-24-25,0";
        let expected_hash_hex = "839bbe3ed07fed922ded5aaf714d6842";
        known_test_case(raw_client_hello, expected_fingerprint, expected_hash_hex).unwrap();
    }

    #[test]
    fn hash_output_resizing() {
        let client_hello = get_client_hello();
        let hash_capacities = vec![0, MD5_HASH_SIZE, 1_000];
        for initial_size in hash_capacities {
            let mut hash = Vec::with_capacity(initial_size as usize);
            client_hello
                .fingerprint_hash(FingerprintType::JA3, &mut hash)
                .unwrap();
            assert_eq!(hash.len(), MD5_HASH_SIZE as usize);
        }
    }

    #[test]
    fn string_output_too_small() {
        let client_hello = get_client_hello();
        let mut fingerprint_string = String::with_capacity(0);
        let fingerprint_err = client_hello
            .fingerprint_string(FingerprintType::JA3, &mut fingerprint_string)
            .unwrap_err();
        assert_eq!(fingerprint_err.kind(), ErrorType::UsageError);
    }

    // make sure that debug doesn't panic and seems reasonable
    #[test]
    fn debug() {
        let client_hello = get_client_hello();
        let mut hash = Vec::new();
        client_hello
            .fingerprint_hash(FingerprintType::JA3, &mut hash)
            .unwrap();
        let client_hello_debug = format!("{:?}", client_hello);
        assert!(client_hello_debug.contains(&hex::encode(hash)));
    }
}
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls/src/config.rs000066400000000000000000000734621456575232400254340ustar00rootroot00000000000000// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

use crate::{
    callbacks::*,
    enums::*,
    error::{Error, Fallible},
    security,
};
use core::{convert::TryInto, ptr::NonNull};
use s2n_tls_sys::*;
use std::{
    ffi::{c_void, CString},
    path::Path,
    pin::Pin,
    sync::atomic::{AtomicUsize, Ordering},
    task::Poll,
    time::{Duration, SystemTime},
};

#[derive(Debug, PartialEq)]
pub struct Config(NonNull);

/// # Safety
///
/// Safety: s2n_config objects can be sent across threads
unsafe impl Send for Config {}

/// # Safety
///
/// Safety: All C methods that mutate the s2n_config are wrapped
/// in Rust methods that require a mutable reference.
unsafe impl Sync for Config {}

impl Config {
    /// Returns a Config object with pre-defined defaults.
    ///
    /// Use the [`Builder`] if custom configuration is desired.
    pub fn new() -> Self {
        Self::default()
    }

    /// Returns a Builder which can be used to configure the Config
    pub fn builder() -> Builder {
        Builder::default()
    }

    /// # Safety
    ///
    /// This config _MUST_ have been initialized with a [`Builder`].
    /// Additionally, this does NOT increment the config reference count,
    /// so consider cloning the result if the source pointer is still
    /// valid and usable afterwards.
    pub(crate) unsafe fn from_raw(config: NonNull) -> Self {
        let config = Self(config);

        // Check if the context can be retrieved.
        // If it can't, this is not a valid config.
        config.context();

        config
    }

    pub(crate) fn as_mut_ptr(&mut self) -> *mut s2n_config {
        self.0.as_ptr()
    }

    /// Retrieve a reference to the [`Context`] stored on the config.
    pub(crate) fn context(&self) -> &Context {
        let mut ctx = core::ptr::null_mut();
        unsafe {
            s2n_config_get_ctx(self.0.as_ptr(), &mut ctx)
                .into_result()
                .unwrap();
            &*(ctx as *const Context)
        }
    }

    /// Retrieve a mutable reference to the [`Context`] stored on the config.
    pub(crate) fn context_mut(&mut self) -> &mut Context {
        let mut ctx = core::ptr::null_mut();
        unsafe {
            s2n_config_get_ctx(self.as_mut_ptr(), &mut ctx)
                .into_result()
                .unwrap();
            &mut *(ctx as *mut Context)
        }
    }

    #[cfg(test)]
    /// Get the refcount associated with the config
    pub fn test_get_refcount(&self) -> Result {
        let context = self.context();
        Ok(context.refcount.load(Ordering::SeqCst))
    }
}

impl Default for Config {
    fn default() -> Self {
        Builder::new().build().unwrap()
    }
}

impl Clone for Config {
    fn clone(&self) -> Self {
        let context = self.context();

        // Safety
        //
        // Using a relaxed ordering is alright here, as knowledge of the
        // original reference prevents other threads from erroneously deleting
        // the object.
        // https://github.com/rust-lang/rust/blob/e012a191d768adeda1ee36a99ef8b92d51920154/library/alloc/src/sync.rs#L1329
        let _count = context.refcount.fetch_add(1, Ordering::Relaxed);
        Self(self.0)
    }
}

impl Drop for Config {
    fn drop(&mut self) {
        let context = self.context_mut();
        let count = context.refcount.fetch_sub(1, Ordering::Release);
        debug_assert!(count > 0, "refcount should not drop below 1 instance");

        // only free the config if this is the last instance
        if count != 1 {
            return;
        }

        // Safety
        //
        // The use of Ordering and fence mirrors the `Arc` implementation in
        // the standard library.
        //
        // This fence is needed to prevent reordering of use of the data and
        // deletion of the data.  Because it is marked `Release`, the decreasing
        // of the reference count synchronizes with this `Acquire` fence. This
        // means that use of the data happens before decreasing the reference
        // count, which happens before this fence, which happens before the
        // deletion of the data.
        // https://github.com/rust-lang/rust/blob/e012a191d768adeda1ee36a99ef8b92d51920154/library/alloc/src/sync.rs#L1637
        std::sync::atomic::fence(Ordering::Acquire);

        unsafe {
            // This is the last instance so free the context.
            let context = Box::from_raw(context);
            drop(context);

            let _ = s2n_config_free(self.0.as_ptr()).into_result();
        }
    }
}

#[derive(Default)]
pub struct Builder {
    config: Config,
    load_system_certs: bool,
    enable_ocsp: bool,
}

impl Builder {
    pub fn new() -> Self {
        crate::init::init();
        let config = unsafe { s2n_config_new_minimal().into_result() }.unwrap();

        let context = Box::::default();
        let context = Box::into_raw(context) as *mut c_void;

        unsafe {
            s2n_config_set_ctx(config.as_ptr(), context)
                .into_result()
                .unwrap();

            // The client hello callback originally did not support async operations,
            // so defaults to blocking mode for backwards compatibility with old integrations.
            // But these bindings use a polling model, so assume non-blocking mode.
            s2n_config_set_client_hello_cb_mode(
                config.as_ptr(),
                s2n_client_hello_cb_mode::NONBLOCKING,
            )
            .into_result()
            .unwrap();
        }

        Self {
            config: Config(config),
            load_system_certs: true,
            enable_ocsp: false,
        }
    }

    pub fn set_alert_behavior(&mut self, value: AlertBehavior) -> Result<&mut Self, Error> {
        unsafe { s2n_config_set_alert_behavior(self.as_mut_ptr(), value.into()).into_result() }?;
        Ok(self)
    }

    pub fn set_security_policy(&mut self, policy: &security::Policy) -> Result<&mut Self, Error> {
        unsafe {
            s2n_config_set_cipher_preferences(self.as_mut_ptr(), policy.as_cstr().as_ptr())
                .into_result()
        }?;
        Ok(self)
    }

    /// sets the application protocol preferences on an s2n_config object.
    ///
    /// protocols is a list in order of preference, with most preferred protocol first,
    /// and of length protocol_count. When acting as a client the protocol list is
    /// included in the Client Hello message as the ALPN extension. As a server, the
    /// list is used to negotiate a mutual application protocol with the client. After
    /// the negotiation for the connection has completed, the agreed upon protocol can
    /// be retrieved with s2n_get_application_protocol
    pub fn set_application_protocol_preference, I: AsRef<[u8]>>(
        &mut self,
        protocols: P,
    ) -> Result<&mut Self, Error> {
        // reset the list
        unsafe {
            s2n_config_set_protocol_preferences(self.as_mut_ptr(), core::ptr::null(), 0)
                .into_result()
        }?;

        for protocol in protocols {
            self.append_application_protocol_preference(protocol.as_ref())?;
        }

        Ok(self)
    }

    pub fn append_application_protocol_preference(
        &mut self,
        protocol: &[u8],
    ) -> Result<&mut Self, Error> {
        unsafe {
            s2n_config_append_protocol_preference(
                self.as_mut_ptr(),
                protocol.as_ptr(),
                protocol
                    .len()
                    .try_into()
                    .map_err(|_| Error::INVALID_INPUT)?,
            )
            .into_result()
        }?;
        Ok(self)
    }

    /// Turns off x509 verification
    ///
    /// # Safety
    /// This functionality will weaken the security of the connections. As such, it should only
    /// be used in development environments where obtaining a valid certificate would not be possible.
    pub unsafe fn disable_x509_verification(&mut self) -> Result<&mut Self, Error> {
        s2n_config_disable_x509_verification(self.as_mut_ptr()).into_result()?;
        Ok(self)
    }

    pub fn add_dhparams(&mut self, pem: &[u8]) -> Result<&mut Self, Error> {
        let cstring = CString::new(pem).map_err(|_| Error::INVALID_INPUT)?;
        unsafe { s2n_config_add_dhparams(self.as_mut_ptr(), cstring.as_ptr()).into_result() }?;
        Ok(self)
    }

    pub fn load_pem(&mut self, certificate: &[u8], private_key: &[u8]) -> Result<&mut Self, Error> {
        let certificate = CString::new(certificate).map_err(|_| Error::INVALID_INPUT)?;
        let private_key = CString::new(private_key).map_err(|_| Error::INVALID_INPUT)?;
        unsafe {
            s2n_config_add_cert_chain_and_key(
                self.as_mut_ptr(),
                certificate.as_ptr(),
                private_key.as_ptr(),
            )
            .into_result()
        }?;
        Ok(self)
    }

    pub fn load_public_pem(&mut self, certificate: &[u8]) -> Result<&mut Self, Error> {
        let size: u32 = certificate
            .len()
            .try_into()
            .map_err(|_| Error::INVALID_INPUT)?;
        let certificate = certificate.as_ptr() as *mut u8;
        unsafe { s2n_config_add_cert_chain(self.as_mut_ptr(), certificate, size) }.into_result()?;
        Ok(self)
    }

    pub fn trust_pem(&mut self, certificate: &[u8]) -> Result<&mut Self, Error> {
        let certificate = CString::new(certificate).map_err(|_| Error::INVALID_INPUT)?;
        unsafe {
            s2n_config_add_pem_to_trust_store(self.as_mut_ptr(), certificate.as_ptr()).into_result()
        }?;
        Ok(self)
    }

    /// Adds to the trust store from a CA file or directory containing trusted certificates.
    ///
    /// NOTE: This function is equivalent to `s2n_config_set_verification_ca_location` except it does
    /// not automatically enable the client to request OCSP stapling from the server.
    pub fn trust_location(
        &mut self,
        file: Option<&Path>,
        dir: Option<&Path>,
    ) -> Result<&mut Self, Error> {
        fn to_cstr(input: Option<&Path>) -> Result, Error> {
            Ok(match input {
                Some(input) => {
                    let string = input.to_str().ok_or(Error::INVALID_INPUT)?;
                    let cstring = CString::new(string).map_err(|_| Error::INVALID_INPUT)?;
                    Some(cstring)
                }
                None => None,
            })
        }

        let file_cstr = to_cstr(file)?;
        let file_ptr = file_cstr
            .as_ref()
            .map(|f| f.as_ptr())
            .unwrap_or(core::ptr::null());

        let dir_cstr = to_cstr(dir)?;
        let dir_ptr = dir_cstr
            .as_ref()
            .map(|f| f.as_ptr())
            .unwrap_or(core::ptr::null());

        unsafe {
            s2n_config_set_verification_ca_location(self.as_mut_ptr(), file_ptr, dir_ptr)
                .into_result()
        }?;

        // If OCSP has not been explicitly requested, turn off OCSP. This is to prevent this function from
        // automatically enabling `OCSP` due to the legacy behavior of `s2n_config_set_verification_ca_location`
        if !self.enable_ocsp {
            unsafe {
                s2n_config_set_status_request_type(self.as_mut_ptr(), s2n_status_request_type::NONE)
                    .into_result()?
            };
        }

        Ok(self)
    }

    /// Sets whether or not default system certificates will be loaded into the trust store.
    ///
    /// Set to false for increased performance if system certificates are not needed during
    /// certificate validation.
    pub fn with_system_certs(&mut self, load_system_certs: bool) -> Result<&mut Self, Error> {
        self.load_system_certs = load_system_certs;
        Ok(self)
    }

    pub fn wipe_trust_store(&mut self) -> Result<&mut Self, Error> {
        unsafe { s2n_config_wipe_trust_store(self.as_mut_ptr()).into_result()? };
        Ok(self)
    }

    /// Sets whether or not a client certificate should be required to complete the TLS connection.
    ///
    /// See the [Usage Guide](https://github.com/aws/s2n-tls/blob/main/docs/USAGE-GUIDE.md#client-auth-related-calls) for more details.
    pub fn set_client_auth_type(&mut self, auth_type: ClientAuthType) -> Result<&mut Self, Error> {
        unsafe {
            s2n_config_set_client_auth_type(self.as_mut_ptr(), auth_type.into()).into_result()
        }?;
        Ok(self)
    }

    /// Clients will request OCSP stapling from the server.
    pub fn enable_ocsp(&mut self) -> Result<&mut Self, Error> {
        unsafe {
            s2n_config_set_status_request_type(self.as_mut_ptr(), s2n_status_request_type::OCSP)
                .into_result()
        }?;
        self.enable_ocsp = true;
        Ok(self)
    }

    /// Sets the OCSP data for the default certificate chain associated with the Config.
    ///
    /// Servers will send the data in response to OCSP stapling requests from clients.
    //
    // NOTE: this modifies a certificate chain, NOT the Config itself. This is currently safe
    // because the certificate chain is set with s2n_config_add_cert_chain_and_key, which
    // creates a new certificate chain only accessible by the given config. It will
    // NOT be safe when we add support for the newer s2n_config_add_cert_chain_and_key_to_store API,
    // which allows certificate chains to be shared across configs.
    // In that case, we'll need additional guard rails either in these bindings or in the underlying C.
    pub fn set_ocsp_data(&mut self, data: &[u8]) -> Result<&mut Self, Error> {
        let size: u32 = data.len().try_into().map_err(|_| Error::INVALID_INPUT)?;
        unsafe {
            s2n_config_set_extension_data(
                self.as_mut_ptr(),
                s2n_tls_extension_type::OCSP_STAPLING,
                data.as_ptr(),
                size,
            )
            .into_result()
        }?;
        self.enable_ocsp()
    }

    /// Sets the callback to use for verifying that a hostname from an X.509 certificate is
    /// trusted.
    ///
    /// The callback may be called more than once during certificate validation as each SAN on
    /// the certificate will be checked.
    ///
    /// Corresponds to the underlying C API
    /// [s2n_config_set_verify_host_callback](https://aws.github.io/s2n-tls/doxygen/s2n_8h.html).
    pub fn set_verify_host_callback(
        &mut self,
        handler: T,
    ) -> Result<&mut Self, Error> {
        unsafe extern "C" fn verify_host_cb_fn(
            host_name: *const ::libc::c_char,
            host_name_len: usize,
            context: *mut ::libc::c_void,
        ) -> u8 {
            let context = &mut *(context as *mut Context);
            let handler = context.verify_host_callback.as_mut().unwrap();
            verify_host(host_name, host_name_len, handler)
        }

        self.config.context_mut().verify_host_callback = Some(Box::new(handler));
        unsafe {
            s2n_config_set_verify_host_callback(
                self.as_mut_ptr(),
                Some(verify_host_cb_fn),
                self.config.context_mut() as *mut Context as *mut c_void,
            )
            .into_result()?;
        }
        Ok(self)
    }

    /// # Safety
    /// THIS SHOULD BE USED FOR DEBUGGING PURPOSES ONLY!
    /// The `context` pointer must live at least as long as the config
    pub unsafe fn set_key_log_callback(
        &mut self,
        callback: s2n_key_log_fn,
        context: *mut core::ffi::c_void,
    ) -> Result<&mut Self, Error> {
        s2n_config_set_key_log_cb(self.as_mut_ptr(), callback, context).into_result()?;
        Ok(self)
    }

    pub fn set_max_cert_chain_depth(&mut self, depth: u16) -> Result<&mut Self, Error> {
        unsafe { s2n_config_set_max_cert_chain_depth(self.as_mut_ptr(), depth).into_result() }?;
        Ok(self)
    }

    pub fn set_send_buffer_size(&mut self, size: u32) -> Result<&mut Self, Error> {
        unsafe { s2n_config_set_send_buffer_size(self.as_mut_ptr(), size).into_result() }?;
        Ok(self)
    }

    /// Set a custom callback function which is run after parsing the client hello.
    pub fn set_client_hello_callback(
        &mut self,
        handler: T,
    ) -> Result<&mut Self, Error> {
        unsafe extern "C" fn client_hello_cb(
            connection_ptr: *mut s2n_connection,
            _context: *mut core::ffi::c_void,
        ) -> libc::c_int {
            with_context(connection_ptr, |conn, context| {
                let callback = context.client_hello_callback.as_ref();
                let future = callback
                    .map(|c| c.on_client_hello(conn))
                    .unwrap_or(Ok(None));
                AsyncCallback::trigger_client_hello_cb(future, conn)
            })
            .into()
        }

        let handler = Box::new(handler);
        let context = self.config.context_mut();
        context.client_hello_callback = Some(handler);

        unsafe {
            s2n_config_set_client_hello_cb(
                self.as_mut_ptr(),
                Some(client_hello_cb),
                core::ptr::null_mut(),
            )
            .into_result()?;
        }

        Ok(self)
    }

    pub fn set_connection_initializer(
        &mut self,
        handler: T,
    ) -> Result<&mut Self, Error> {
        // Store callback in config context
        let handler = Box::new(handler);
        let context = self.config.context_mut();
        context.connection_initializer = Some(handler);
        Ok(self)
    }

    /// Sets a custom callback which provides access to session tickets when they arrive
    pub fn set_session_ticket_callback(
        &mut self,
        handler: T,
    ) -> Result<&mut Self, Error> {
        // enable session tickets automatically
        self.enable_session_tickets(true)?;

        // Define C callback function that can be set on the s2n_config struct
        unsafe extern "C" fn session_ticket_cb(
            conn_ptr: *mut s2n_connection,
            _context: *mut ::libc::c_void,
            session_ticket: *mut s2n_session_ticket,
        ) -> libc::c_int {
            let session_ticket = SessionTicket::from_ptr(&*session_ticket);
            with_context(conn_ptr, |conn, context| {
                let callback = context.session_ticket_callback.as_ref();
                callback.map(|c| c.on_session_ticket(conn, session_ticket))
            });
            CallbackResult::Success.into()
        }

        // Store callback in context
        let handler = Box::new(handler);
        let context = self.config.context_mut();
        context.session_ticket_callback = Some(handler);

        unsafe {
            s2n_config_set_session_ticket_cb(
                self.as_mut_ptr(),
                Some(session_ticket_cb),
                self.config.context_mut() as *mut Context as *mut c_void,
            )
            .into_result()
        }?;
        Ok(self)
    }

    /// Set a callback function triggered by operations requiring the private key.
    ///
    /// See https://github.com/aws/s2n-tls/blob/main/docs/USAGE-GUIDE.md#private-key-operation-related-calls
    pub fn set_private_key_callback(
        &mut self,
        handler: T,
    ) -> Result<&mut Self, Error> {
        unsafe extern "C" fn private_key_cb(
            conn_ptr: *mut s2n_connection,
            op_ptr: *mut s2n_async_pkey_op,
        ) -> libc::c_int {
            with_context(conn_ptr, |conn, context| {
                let state = PrivateKeyOperation::try_from_cb(conn, op_ptr);
                let callback = context.private_key_callback.as_ref();
                let future_result = state.and_then(|state| {
                    callback.map_or(Ok(None), |callback| callback.handle_operation(conn, state))
                });
                AsyncCallback::trigger(future_result, conn)
            })
            .into()
        }

        let handler = Box::new(handler);
        let context = self.config.context_mut();
        context.private_key_callback = Some(handler);

        unsafe {
            s2n_config_set_async_pkey_callback(self.as_mut_ptr(), Some(private_key_cb))
                .into_result()?;
        }
        Ok(self)
    }

    /// Set a callback function that will be used to get the system time.
    ///
    /// The wall clock time is the best-guess at the real time, measured since the epoch.
    /// Unlike monotonic time, it CAN move backwards.
    /// It is used by s2n-tls for timestamps.
    pub fn set_wall_clock(
        &mut self,
        handler: T,
    ) -> Result<&mut Self, Error> {
        unsafe extern "C" fn clock_cb(
            context: *mut ::libc::c_void,
            time_in_nanos: *mut u64,
        ) -> libc::c_int {
            let context = &mut *(context as *mut Context);
            if let Some(handler) = context.wall_clock.as_mut() {
                if let Ok(nanos) = handler.get_time_since_epoch().as_nanos().try_into() {
                    *time_in_nanos = nanos;
                    return CallbackResult::Success.into();
                }
            }
            CallbackResult::Failure.into()
        }

        let handler = Box::new(handler);
        let context = self.config.context_mut();
        context.wall_clock = Some(handler);
        unsafe {
            s2n_config_set_wall_clock(
                self.as_mut_ptr(),
                Some(clock_cb),
                self.config.context_mut() as *mut _ as *mut c_void,
            )
            .into_result()?;
        }
        Ok(self)
    }

    /// Set a callback function that will be used to get the monotonic time.
    ///
    /// The monotonic time is the time since an arbitrary, unspecified point.
    /// Unlike wall clock time, it MUST never move backwards.
    /// It is used by s2n-tls for timers.
    pub fn set_monotonic_clock(
        &mut self,
        handler: T,
    ) -> Result<&mut Self, Error> {
        unsafe extern "C" fn clock_cb(
            context: *mut ::libc::c_void,
            time_in_nanos: *mut u64,
        ) -> libc::c_int {
            let context = &mut *(context as *mut Context);
            if let Some(handler) = context.monotonic_clock.as_mut() {
                if let Ok(nanos) = handler.get_time().as_nanos().try_into() {
                    *time_in_nanos = nanos;
                    return CallbackResult::Success.into();
                }
            }
            CallbackResult::Failure.into()
        }

        let handler = Box::new(handler);
        let context = self.config.context_mut();
        context.monotonic_clock = Some(handler);
        unsafe {
            s2n_config_set_monotonic_clock(
                self.as_mut_ptr(),
                Some(clock_cb),
                self.config.context_mut() as *mut _ as *mut c_void,
            )
            .into_result()?;
        }
        Ok(self)
    }

    /// Enable negotiating session tickets in a TLS connection
    pub fn enable_session_tickets(&mut self, enable: bool) -> Result<&mut Self, Error> {
        unsafe {
            s2n_config_set_session_tickets_onoff(self.as_mut_ptr(), enable.into()).into_result()
        }?;
        Ok(self)
    }

    /// Adds a key which will be used to encrypt and decrypt session tickets. The intro_time parameter is time since
    /// the Unix epoch (Midnight, January 1st, 1970). The key must be at least 16 bytes.
    pub fn add_session_ticket_key(
        &mut self,
        key_name: &[u8],
        key: &[u8],
        intro_time: SystemTime,
    ) -> Result<&mut Self, Error> {
        let key_name_len: u32 = key_name
            .len()
            .try_into()
            .map_err(|_| Error::INVALID_INPUT)?;
        let key_len: u32 = key.len().try_into().map_err(|_| Error::INVALID_INPUT)?;
        let intro_time = intro_time
            .duration_since(std::time::UNIX_EPOCH)
            .map_err(|_| Error::INVALID_INPUT)?;
        // Ticket keys should be at least 128 bits in strength
        // https://www.rfc-editor.org/rfc/rfc5077#section-5.5
        if key_len < 16 {
            return Err(Error::INVALID_INPUT);
        }
        self.enable_session_tickets(true)?;
        unsafe {
            s2n_config_add_ticket_crypto_key(
                self.as_mut_ptr(),
                key_name.as_ptr(),
                key_name_len,
                // s2n-tls doesn't mutate key, it's just mut for easier use with stuffers and blobs
                key.as_ptr() as *mut u8,
                key_len,
                intro_time.as_secs(),
            )
            .into_result()
        }?;
        Ok(self)
    }

    // Sets how long a session ticket key will be able to be used for both encryption
    // and decryption of tickets
    pub fn set_ticket_key_encrypt_decrypt_lifetime(
        &mut self,
        lifetime: Duration,
    ) -> Result<&mut Self, Error> {
        unsafe {
            s2n_config_set_ticket_encrypt_decrypt_key_lifetime(
                self.as_mut_ptr(),
                lifetime.as_secs(),
            )
            .into_result()
        }?;
        Ok(self)
    }

    // Sets how long a session ticket key will be able to be used for only decryption
    pub fn set_ticket_key_decrypt_lifetime(
        &mut self,
        lifetime: Duration,
    ) -> Result<&mut Self, Error> {
        unsafe {
            s2n_config_set_ticket_decrypt_key_lifetime(self.as_mut_ptr(), lifetime.as_secs())
                .into_result()
        }?;
        Ok(self)
    }

    pub fn build(mut self) -> Result {
        if self.load_system_certs {
            unsafe {
                s2n_config_load_system_certs(self.as_mut_ptr()).into_result()?;
            }
        }

        Ok(self.config)
    }

    fn as_mut_ptr(&mut self) -> *mut s2n_config {
        self.config.as_mut_ptr()
    }
}

#[cfg(feature = "quic")]
impl Builder {
    pub fn enable_quic(&mut self) -> Result<&mut Self, Error> {
        unsafe { s2n_tls_sys::s2n_config_enable_quic(self.as_mut_ptr()).into_result() }?;
        Ok(self)
    }
}

pub(crate) struct Context {
    refcount: AtomicUsize,
    pub(crate) client_hello_callback: Option>,
    pub(crate) private_key_callback: Option>,
    pub(crate) verify_host_callback: Option>,
    pub(crate) session_ticket_callback: Option>,
    pub(crate) connection_initializer: Option>,
    pub(crate) wall_clock: Option>,
    pub(crate) monotonic_clock: Option>,
}

impl Default for Context {
    fn default() -> Self {
        // The AtomicUsize is used to manually track the reference count of the Config.
        // This mechanism is used to track when the Config object should be freed.
        let refcount = AtomicUsize::new(1);

        Self {
            refcount,
            client_hello_callback: None,
            private_key_callback: None,
            verify_host_callback: None,
            session_ticket_callback: None,
            connection_initializer: None,
            wall_clock: None,
            monotonic_clock: None,
        }
    }
}

/// A trait executed asynchronously before a new connection negotiates TLS.
///
/// Used for dynamic configuration of a specific connection.
///
/// # Safety: This trait is polled to completion at the beginning of the
/// [connection::poll_negotiate](`crate::connection::poll_negotiate()`) function.
/// Therefore, negotiation of the TLS connection will not begin until the Future has completed.
pub trait ConnectionInitializer: 'static + Send + Sync {
    /// The application can return an `Ok(None)` to resolve the callback
    /// synchronously or return an `Ok(Some(ConnectionFuture))` if it wants to
    /// run some asynchronous task before resolving the callback.
    fn initialize_connection(
        &self,
        connection: &mut crate::connection::Connection,
    ) -> ConnectionFutureResult;
}

impl ConnectionInitializer for (A, B) {
    fn initialize_connection(
        &self,
        connection: &mut crate::connection::Connection,
    ) -> ConnectionFutureResult {
        let a = self.0.initialize_connection(connection)?;
        let b = self.1.initialize_connection(connection)?;
        match (a, b) {
            (None, None) => Ok(None),
            (None, Some(fut)) => Ok(Some(fut)),
            (Some(fut), None) => Ok(Some(fut)),
            (Some(fut_a), Some(fut_b)) => Ok(Some(Box::pin(ConcurrentConnectionFuture::new([
                fut_a, fut_b,
            ])))),
        }
    }
}

struct ConcurrentConnectionFuture {
    futures: [Option>>; N],
}

impl ConcurrentConnectionFuture {
    fn new(futures: [Pin>; N]) -> Self {
        let futures = futures.map(Some);
        Self { futures }
    }
}

impl ConnectionFuture for ConcurrentConnectionFuture {
    fn poll(
        mut self: std::pin::Pin<&mut Self>,
        connection: &mut crate::connection::Connection,
        ctx: &mut core::task::Context,
    ) -> std::task::Poll> {
        let mut is_pending = false;
        for container in self.futures.iter_mut() {
            if let Some(future) = container.as_mut() {
                match future.as_mut().poll(connection, ctx) {
                    Poll::Ready(result) => {
                        result?;
                        *container = None;
                    }
                    Poll::Pending => is_pending = true,
                }
            }
        }
        if is_pending {
            Poll::Pending
        } else {
            Poll::Ready(Ok(()))
        }
    }
}

#[cfg(test)]
mod tests {
    use super::*;

    // ensure the config context is send and sync
    #[test]
    fn context_send_sync_test() {
        fn assert_send_sync() {}
        assert_send_sync::();
    }
}
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls/src/connection.rs000066400000000000000000001063101456575232400263130ustar00rootroot00000000000000// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

#![allow(clippy::missing_safety_doc)] // TODO add safety docs

use crate::{
    callbacks::*,
    config::Config,
    enums::*,
    error::{Error, Fallible, Pollable},
    security,
};

use core::{
    convert::TryInto,
    fmt,
    mem::{self, ManuallyDrop, MaybeUninit},
    pin::Pin,
    ptr::NonNull,
    task::{Poll, Waker},
    time::Duration,
};
use libc::c_void;
use s2n_tls_sys::*;
use std::ffi::CStr;

mod builder;
pub use builder::*;

macro_rules! static_const_str {
    ($c_chars:expr) => {
        unsafe { CStr::from_ptr($c_chars) }
            .to_str()
            .map_err(|_| Error::INVALID_INPUT)
    };
}

pub struct Connection {
    connection: NonNull,
}

impl fmt::Debug for Connection {
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
        let mut debug = f.debug_struct("Connection");
        if let Ok(handshake) = self.handshake_type() {
            debug.field("handshake_type", &handshake);
        }
        if let Ok(cipher) = self.cipher_suite() {
            debug.field("cipher_suite", &cipher);
        }
        if let Ok(version) = self.actual_protocol_version() {
            debug.field("actual_protocol_version", &version);
        }
        if let Ok(curve) = self.selected_curve() {
            debug.field("selected_curve", &curve);
        }
        debug.finish_non_exhaustive()
    }
}

/// # Safety
///
/// s2n_connection objects can be sent across threads
unsafe impl Send for Connection {}

impl Connection {
    pub fn new(mode: Mode) -> Self {
        crate::init::init();

        let connection = unsafe { s2n_connection_new(mode.into()).into_result() }.unwrap();

        unsafe {
            debug_assert! {
                s2n_connection_get_config(connection.as_ptr(), &mut core::ptr::null_mut())
                    .into_result()
                    .is_err()
            }
        }

        let mut connection = Self { connection };
        connection.init_context(mode);
        connection
    }

    fn init_context(&mut self, mode: Mode) {
        let context = Box::new(Context::new(mode));
        let context = Box::into_raw(context) as *mut c_void;
        // allocate a new context object
        unsafe {
            // There should never be an existing context
            debug_assert!(s2n_connection_get_ctx(self.connection.as_ptr())
                .into_result()
                .is_err());

            s2n_connection_set_ctx(self.connection.as_ptr(), context)
                .into_result()
                .unwrap();
        }
    }

    pub fn new_client() -> Self {
        Self::new(Mode::Client)
    }

    pub fn new_server() -> Self {
        Self::new(Mode::Server)
    }

    pub(crate) fn as_ptr(&mut self) -> *mut s2n_connection {
        self.connection.as_ptr()
    }

    /// # Safety
    ///
    /// Caller must ensure s2n_connection is a valid reference to a [`s2n_connection`] object
    pub(crate) unsafe fn from_raw(connection: NonNull) -> Self {
        Self { connection }
    }

    pub(crate) fn mode(&self) -> Mode {
        self.context().mode
    }

    /// can be used to configure s2n to either use built-in blinding (set blinding
    /// to Blinding::BuiltIn) or self-service blinding (set blinding to
    /// Blinding::SelfService).
    pub fn set_blinding(&mut self, blinding: Blinding) -> Result<&mut Self, Error> {
        unsafe {
            s2n_connection_set_blinding(self.connection.as_ptr(), blinding.into()).into_result()
        }?;
        Ok(self)
    }

    /// Reports the remaining nanoseconds before the connection may be gracefully shutdown.
    ///
    /// This method is expected to succeed, but could fail if the
    /// [underlying C call](`s2n_connection_get_delay`) encounters errors.
    /// Failure indicates that calls to [`Self::poll_shutdown`] will also fail and
    /// that a graceful two-way shutdown of the connection will not be possible.
    pub fn remaining_blinding_delay(&self) -> Result {
        let nanos = unsafe { s2n_connection_get_delay(self.connection.as_ptr()).into_result() }?;
        Ok(Duration::from_nanos(nanos))
    }

    /// Sets whether or not a Client Certificate should be required to complete the TLS Connection.
    ///
    /// If this is set to ClientAuthType::Optional the server will request a client certificate
    /// but allow the client to not provide one. Rejecting a client certificate when using
    /// ClientAuthType::Optional will terminate the handshake.
    pub fn set_client_auth_type(
        &mut self,
        client_auth_type: ClientAuthType,
    ) -> Result<&mut Self, Error> {
        unsafe {
            s2n_connection_set_client_auth_type(self.connection.as_ptr(), client_auth_type.into())
                .into_result()
        }?;
        Ok(self)
    }

    /// Attempts to drop the config on the connection.
    ///
    /// # Safety
    ///
    /// The caller must ensure the config associated with the connection was created
    /// with a [`config::Builder`].
    unsafe fn drop_config(&mut self) -> Result<(), Error> {
        let mut prev_config = core::ptr::null_mut();

        // A valid non-null pointer is returned only if the application previously called
        // [`Self::set_config()`].
        if s2n_connection_get_config(self.connection.as_ptr(), &mut prev_config)
            .into_result()
            .is_ok()
        {
            let prev_config = NonNull::new(prev_config).expect(
                "config should exist since the call to s2n_connection_get_config was successful",
            );
            drop(Config::from_raw(prev_config));
        }

        Ok(())
    }

    /// Associates a configuration object with a connection.
    pub fn set_config(&mut self, mut config: Config) -> Result<&mut Self, Error> {
        unsafe {
            // attempt to drop the currently set config
            self.drop_config()?;

            s2n_connection_set_config(self.connection.as_ptr(), config.as_mut_ptr())
                .into_result()?;

            debug_assert! {
                s2n_connection_get_config(self.connection.as_ptr(), &mut core::ptr::null_mut()).into_result().is_ok(),
                "s2n_connection_set_config was successful"
            };

            // Setting the config on the connection creates one additional reference to the config
            // so do not drop so prevent Rust from calling `drop()` at the end of this function.
            mem::forget(config);
        }

        Ok(self)
    }

    pub(crate) fn config(&self) -> Option {
        let mut raw = core::ptr::null_mut();
        let config = unsafe {
            s2n_connection_get_config(self.connection.as_ptr(), &mut raw)
                .into_result()
                .ok()?;
            let raw = NonNull::new(raw)?;
            Config::from_raw(raw)
        };
        // Because the config pointer is still set on the connection, this is a copy,
        // not the original config. This is fine -- Configs are immutable.
        let _ = ManuallyDrop::new(config.clone());
        Some(config)
    }

    pub fn set_security_policy(&mut self, policy: &security::Policy) -> Result<&mut Self, Error> {
        unsafe {
            s2n_connection_set_cipher_preferences(
                self.connection.as_ptr(),
                policy.as_cstr().as_ptr(),
            )
            .into_result()
        }?;
        Ok(self)
    }

    /// provides a smooth transition from s2n_connection_prefer_low_latency to s2n_connection_prefer_throughput.
    ///
    /// s2n_send uses small TLS records that fit into a single TCP segment for the resize_threshold
    /// bytes (cap to 8M) of data and reset record size back to a single segment after timeout_threshold
    /// seconds of inactivity.
    pub fn set_dynamic_record_threshold(
        &mut self,
        resize_threshold: u32,
        timeout_threshold: u16,
    ) -> Result<&mut Self, Error> {
        unsafe {
            s2n_connection_set_dynamic_record_threshold(
                self.connection.as_ptr(),
                resize_threshold,
                timeout_threshold,
            )
            .into_result()
        }?;
        Ok(self)
    }

    /// sets the application protocol preferences on an s2n_connection object.
    ///
    /// protocols is a list in order of preference, with most preferred protocol first, and of
    /// length protocol_count. When acting as a client the protocol list is included in the
    /// Client Hello message as the ALPN extension. As a server, the list is used to negotiate
    /// a mutual application protocol with the client. After the negotiation for the connection has
    /// completed, the agreed upon protocol can be retrieved with s2n_get_application_protocol
    pub fn set_application_protocol_preference, I: AsRef<[u8]>>(
        &mut self,
        protocols: P,
    ) -> Result<&mut Self, Error> {
        // reset the list
        unsafe {
            s2n_connection_set_protocol_preferences(self.connection.as_ptr(), core::ptr::null(), 0)
                .into_result()
        }?;

        for protocol in protocols {
            self.append_application_protocol_preference(protocol.as_ref())?;
        }

        Ok(self)
    }

    pub fn append_application_protocol_preference(
        &mut self,
        protocol: &[u8],
    ) -> Result<&mut Self, Error> {
        unsafe {
            s2n_connection_append_protocol_preference(
                self.connection.as_ptr(),
                protocol.as_ptr(),
                protocol
                    .len()
                    .try_into()
                    .map_err(|_| Error::INVALID_INPUT)?,
            )
            .into_result()
        }?;
        Ok(self)
    }

    /// may be used to receive data with callbacks defined by the user.
    pub fn set_receive_callback(&mut self, callback: s2n_recv_fn) -> Result<&mut Self, Error> {
        unsafe { s2n_connection_set_recv_cb(self.connection.as_ptr(), callback).into_result() }?;
        Ok(self)
    }

    /// # Safety
    ///
    /// The `context` pointer must live at least as long as the connection
    pub unsafe fn set_receive_context(&mut self, context: *mut c_void) -> Result<&mut Self, Error> {
        s2n_connection_set_recv_ctx(self.connection.as_ptr(), context).into_result()?;
        Ok(self)
    }

    /// may be used to receive data with callbacks defined by the user.
    pub fn set_send_callback(&mut self, callback: s2n_send_fn) -> Result<&mut Self, Error> {
        unsafe { s2n_connection_set_send_cb(self.connection.as_ptr(), callback).into_result() }?;
        Ok(self)
    }

    /// Sets the callback to use for verifying that a hostname from an X.509 certificate is
    /// trusted.
    ///
    /// The callback may be called more than once during certificate validation as each SAN on
    /// the certificate will be checked.
    ///
    /// Corresponds to the underlying C API
    /// [s2n_connection_set_verify_host_callback](https://aws.github.io/s2n-tls/doxygen/s2n_8h.html).
    pub fn set_verify_host_callback(
        &mut self,
        handler: T,
    ) -> Result<&mut Self, Error> {
        unsafe extern "C" fn verify_host_cb_fn(
            host_name: *const ::libc::c_char,
            host_name_len: usize,
            context: *mut ::libc::c_void,
        ) -> u8 {
            let context = &mut *(context as *mut Context);
            let handler = context.verify_host_callback.as_mut().unwrap();
            verify_host(host_name, host_name_len, handler)
        }

        self.context_mut().verify_host_callback = Some(Box::new(handler));
        unsafe {
            s2n_connection_set_verify_host_callback(
                self.connection.as_ptr(),
                Some(verify_host_cb_fn),
                self.context_mut() as *mut Context as *mut c_void,
            )
            .into_result()
        }?;
        Ok(self)
    }

    /// # Safety
    ///
    /// The `context` pointer must live at least as long as the connection
    pub unsafe fn set_send_context(&mut self, context: *mut c_void) -> Result<&mut Self, Error> {
        s2n_connection_set_send_ctx(self.connection.as_ptr(), context).into_result()?;
        Ok(self)
    }

    /// Connections prefering low latency will be encrypted using small record sizes that
    /// can be decrypted sooner by the recipient.
    pub fn prefer_low_latency(&mut self) -> Result<&mut Self, Error> {
        unsafe { s2n_connection_prefer_low_latency(self.connection.as_ptr()).into_result() }?;
        Ok(self)
    }

    /// Connections prefering throughput will use large record sizes that minimize overhead.
    pub fn prefer_throughput(&mut self) -> Result<&mut Self, Error> {
        unsafe { s2n_connection_prefer_throughput(self.connection.as_ptr()).into_result() }?;
        Ok(self)
    }

    /// wipes and free the in and out buffers associated with a connection.
    ///
    /// This function may be called when a connection is in keep-alive or idle state to
    /// reduce memory overhead of long lived connections.
    pub fn release_buffers(&mut self) -> Result<&mut Self, Error> {
        unsafe { s2n_connection_release_buffers(self.connection.as_ptr()).into_result() }?;
        Ok(self)
    }

    pub fn use_corked_io(&mut self) -> Result<&mut Self, Error> {
        unsafe { s2n_connection_use_corked_io(self.connection.as_ptr()).into_result() }?;
        Ok(self)
    }

    /// wipes an existing connection and allows it to be reused.
    ///
    /// This method erases all data associated with a connection including pending reads.
    /// This function should be called after all I/O is completed and s2n_shutdown has been
    /// called. Reusing the same connection handle(s) is more performant than repeatedly
    /// calling s2n_connection_new and s2n_connection_free
    pub fn wipe(&mut self) -> Result<&mut Self, Error> {
        let mode = self.mode();
        unsafe {
            // Wiping the connection will wipe the pointer to the context,
            // so retrieve and drop that memory first.
            let ctx = self.context_mut();
            drop(Box::from_raw(ctx));

            s2n_connection_wipe(self.connection.as_ptr()).into_result()
        }?;

        self.init_context(mode);
        Ok(self)
    }

    /// Performs the TLS handshake to completion
    ///
    /// Multiple callbacks can be configured for a connection and config, but
    /// [`Self::poll_negotiate()`] can only execute and block on one callback at a time.
    /// The handshake is sequential, not concurrent, and stops execution when
    /// it encounters an async callback.
    ///
    /// The handshake does not continue execution (and therefore can't call
    /// any other callbacks) until the blocking async task reports completion.
    pub fn poll_negotiate(&mut self) -> Poll> {
        let mut blocked = s2n_blocked_status::NOT_BLOCKED;
        if !core::mem::replace(&mut self.context_mut().connection_initialized, true) {
            if let Some(config) = self.config() {
                if let Some(callback) = config.context().connection_initializer.as_ref() {
                    let future = callback.initialize_connection(self);
                    AsyncCallback::trigger(future, self);
                }
            }
        }

        loop {
            // check if an async task exists and poll it to completion
            if let Some(fut) = self.poll_async_task() {
                match fut {
                    Poll::Ready(Ok(())) => {
                        // happy case:
                        // continue and call s2n_negotiate to make progress on the handshake
                    }
                    Poll::Ready(Err(err)) => {
                        // error case:
                        // if the callback returned an error then abort the handshake
                        return Poll::Ready(Err(err));
                    }
                    Poll::Pending => return Poll::Pending,
                }
            }

            let res = unsafe { s2n_negotiate(self.connection.as_ptr(), &mut blocked).into_poll() };

            match res {
                Poll::Ready(res) => {
                    let res = res.map(|_| self);
                    return Poll::Ready(res);
                }
                Poll::Pending => {
                    // if there is no connection_future then return, otherwise continue
                    // looping and polling the future
                    if self.context_mut().async_callback.is_none() {
                        return Poll::Pending;
                    }
                }
            }
        }
    }

    // Poll the connection future if it exists.
    //
    // If the future returns Pending, then re-set it back on the Connection.
    fn poll_async_task(&mut self) -> Option>> {
        self.take_async_callback().map(|mut callback| {
            let waker = self.waker().ok_or(Error::MISSING_WAKER)?.clone();
            let mut ctx = core::task::Context::from_waker(&waker);
            match Pin::new(&mut callback).poll(self, &mut ctx) {
                Poll::Ready(result) => Poll::Ready(result),
                Poll::Pending => {
                    // replace the future if it hasn't completed yet
                    self.set_async_callback(callback);
                    Poll::Pending
                }
            }
        })
    }

    /// Encrypts and sends data on a connection where
    /// [negotiate](`Self::poll_negotiate`) has succeeded.
    ///
    /// Returns the number of bytes written, and may indicate a partial write.
    pub fn poll_send(&mut self, buf: &[u8]) -> Poll> {
        let mut blocked = s2n_blocked_status::NOT_BLOCKED;
        let buf_len: isize = buf.len().try_into().map_err(|_| Error::INVALID_INPUT)?;
        let buf_ptr = buf.as_ptr() as *const ::libc::c_void;
        unsafe { s2n_send(self.connection.as_ptr(), buf_ptr, buf_len, &mut blocked).into_poll() }
    }

    /// Reads and decrypts data from a connection where
    /// [negotiate](`Self::poll_negotiate`) has succeeded.
    ///
    /// Returns the number of bytes read, and may indicate a partial read.
    /// 0 bytes returned indicates EOF due to connection closure.
    pub fn poll_recv(&mut self, buf: &mut [u8]) -> Poll> {
        let mut blocked = s2n_blocked_status::NOT_BLOCKED;
        let buf_len: isize = buf.len().try_into().map_err(|_| Error::INVALID_INPUT)?;
        let buf_ptr = buf.as_ptr() as *mut ::libc::c_void;
        unsafe { s2n_recv(self.connection.as_ptr(), buf_ptr, buf_len, &mut blocked).into_poll() }
    }

    /// Reads and decrypts data from a connection where
    /// [negotiate](`Self::poll_negotiate`) has succeeded
    /// to a uninitialized buffer.
    ///
    /// Returns the number of bytes read, and may indicate a partial read.
    /// 0 bytes returned indicates EOF due to connection closure.
    ///
    /// Safety: this function is always safe to call, and additionally:
    /// 1. It will never deinitialize any bytes in `buf`.
    /// 2. If it returns `Ok(n)`, then the first `n` bytes of `buf`
    /// will have been initialized by this function.
    pub fn poll_recv_uninitialized(
        &mut self,
        buf: &mut [MaybeUninit],
    ) -> Poll> {
        let mut blocked = s2n_blocked_status::NOT_BLOCKED;
        let buf_len: isize = buf.len().try_into().map_err(|_| Error::INVALID_INPUT)?;
        let buf_ptr = buf.as_ptr() as *mut ::libc::c_void;

        // Safety:
        // 1. s2n_recv never writes uninitialized garbage to `buf`.
        // 2. if s2n_recv returns `+n`, it guarantees that the first
        // `n` bytes of `buf` have been initialized, which allows this
        // function to return `Ok(n)`
        unsafe { s2n_recv(self.connection.as_ptr(), buf_ptr, buf_len, &mut blocked).into_poll() }
    }

    /// Attempts to flush any data previously buffered by a call to [send](`Self::poll_send`).
    pub fn poll_flush(&mut self) -> Poll> {
        self.poll_send(&[0; 0]).map_ok(|_| self)
    }

    /// Gets the number of bytes that are currently available in the buffer to be read.
    pub fn peek_len(&self) -> usize {
        unsafe { s2n_peek(self.connection.as_ptr()) as usize }
    }

    /// Attempts a graceful shutdown of the TLS connection.
    ///
    /// The shutdown is not complete until the necessary shutdown messages
    /// have been successfully sent and received. If the peer does not respond
    /// correctly, the graceful shutdown may fail.
    pub fn poll_shutdown(&mut self) -> Poll> {
        if !self.remaining_blinding_delay()?.is_zero() {
            return Poll::Pending;
        }
        let mut blocked = s2n_blocked_status::NOT_BLOCKED;
        unsafe {
            s2n_shutdown(self.connection.as_ptr(), &mut blocked)
                .into_poll()
                .map_ok(|_| self)
        }
    }

    /// Attempts a graceful shutdown of the write side of a TLS connection.
    ///
    /// Unlike Self::poll_shutdown, no reponse from the peer is necessary.
    /// If using TLS1.3, the connection can continue to be used for reading afterwards.
    pub fn poll_shutdown_send(&mut self) -> Poll> {
        if !self.remaining_blinding_delay()?.is_zero() {
            return Poll::Pending;
        }
        let mut blocked = s2n_blocked_status::NOT_BLOCKED;
        unsafe {
            s2n_shutdown_send(self.connection.as_ptr(), &mut blocked)
                .into_poll()
                .map_ok(|_| self)
        }
    }

    /// Returns the TLS alert code, if any
    pub fn alert(&self) -> Option {
        let alert =
            unsafe { s2n_connection_get_alert(self.connection.as_ptr()).into_result() }.ok()?;
        Some(alert as u8)
    }

    /// Sets the server name value for the connection
    pub fn set_server_name(&mut self, server_name: &str) -> Result<&mut Self, Error> {
        let server_name = std::ffi::CString::new(server_name).map_err(|_| Error::INVALID_INPUT)?;
        unsafe {
            s2n_set_server_name(self.connection.as_ptr(), server_name.as_ptr()).into_result()
        }?;
        Ok(self)
    }

    /// Get the server name associated with the connection client hello.
    pub fn server_name(&self) -> Option<&str> {
        unsafe {
            let server_name = s2n_get_server_name(self.connection.as_ptr());
            match server_name.into_result() {
                Ok(server_name) => CStr::from_ptr(server_name).to_str().ok(),
                Err(_) => None,
            }
        }
    }

    /// Adds a session ticket from a previous TLS connection to create a resumed session
    pub fn set_session_ticket(&mut self, session: &[u8]) -> Result<&mut Self, Error> {
        unsafe {
            s2n_connection_set_session(self.connection.as_ptr(), session.as_ptr(), session.len())
                .into_result()
        }?;
        Ok(self)
    }

    /// Sets a Waker on the connection context or clears it if `None` is passed.
    pub fn set_waker(&mut self, waker: Option<&Waker>) -> Result<&mut Self, Error> {
        let ctx = self.context_mut();

        if let Some(waker) = waker {
            if let Some(prev_waker) = ctx.waker.as_mut() {
                // only replace the Waker if they dont reference the same task
                if !prev_waker.will_wake(waker) {
                    *prev_waker = waker.clone();
                }
            } else {
                ctx.waker = Some(waker.clone());
            }
        } else {
            ctx.waker = None;
        }
        Ok(self)
    }

    /// Returns the Waker set on the connection context.
    pub fn waker(&self) -> Option<&Waker> {
        let ctx = self.context();
        ctx.waker.as_ref()
    }

    /// Takes the [`Option::take`] the connection_future stored on the
    /// connection context.
    ///
    /// If the Future returns `Poll::Pending` and has not completed, then it
    /// should be re-set using [`Self::set_connection_future()`]
    fn take_async_callback(&mut self) -> Option {
        let ctx = self.context_mut();
        ctx.async_callback.take()
    }

    /// Sets a `connection_future` on the connection context.
    pub(crate) fn set_async_callback(&mut self, callback: AsyncCallback) {
        let ctx = self.context_mut();
        debug_assert!(ctx.async_callback.is_none());
        ctx.async_callback = Some(callback);
    }

    /// Retrieve a mutable reference to the [`Context`] stored on the connection.
    fn context_mut(&mut self) -> &mut Context {
        unsafe {
            let ctx = s2n_connection_get_ctx(self.connection.as_ptr())
                .into_result()
                .unwrap();
            &mut *(ctx.as_ptr() as *mut Context)
        }
    }

    /// Retrieve a reference to the [`Context`] stored on the connection.
    fn context(&self) -> &Context {
        unsafe {
            let ctx = s2n_connection_get_ctx(self.connection.as_ptr())
                .into_result()
                .unwrap();
            &*(ctx.as_ptr() as *mut Context)
        }
    }

    /// Mark that the server_name extension was used to configure the connection.
    pub fn server_name_extension_used(&mut self) {
        // TODO: requiring the application to call this method is a pretty sharp edge.
        // Figure out if its possible to automatically call this from the Rust bindings.
        unsafe {
            s2n_connection_server_name_extension_used(self.connection.as_ptr())
                .into_result()
                .unwrap();
        }
    }

    /// Check if client auth was used for a connection.
    ///
    /// This is only relevant if [`ClientAuthType::Optional] was used.
    pub fn client_cert_used(&self) -> bool {
        unsafe { s2n_connection_client_cert_used(self.connection.as_ptr()) == 1 }
    }

    /// Retrieves the raw bytes of the client cert chain received from the peer, if present.
    pub fn client_cert_chain_bytes(&self) -> Result, Error> {
        if !self.client_cert_used() {
            return Ok(None);
        }

        let mut chain = std::ptr::null_mut();
        let mut len = 0;
        unsafe {
            s2n_connection_get_client_cert_chain(self.connection.as_ptr(), &mut chain, &mut len)
                .into_result()?;
        }

        if chain.is_null() || len == 0 {
            return Ok(None);
        }

        unsafe { Ok(Some(std::slice::from_raw_parts(chain, len as usize))) }
    }

    // The memory backing the ClientHello is owned by the Connection, so we
    // tie the ClientHello to the lifetime of the Connection. This is validated
    // with a doc test that ensures the ClientHello is invalid once the
    // connection has gone out of scope.
    //
    /// Returns a reference to the ClientHello associated with the connection.
    /// ```compile_fail
    /// use s2n_tls::client_hello::{ClientHello, FingerprintType};
    /// use s2n_tls::connection::Connection;
    /// use s2n_tls::enums::Mode;
    ///
    /// let mut conn = Connection::new(Mode::Server);
    /// let mut client_hello: &ClientHello = conn.client_hello().unwrap();
    /// let mut hash = Vec::new();
    /// drop(conn);
    /// client_hello.fingerprint_hash(FingerprintType::JA3, &mut hash);
    /// ```
    ///
    /// The compilation could be failing for a variety of reasons, so make sure
    /// that the test case is actually good.
    /// ```no_run
    /// use s2n_tls::client_hello::{ClientHello, FingerprintType};
    /// use s2n_tls::connection::Connection;
    /// use s2n_tls::enums::Mode;
    ///
    /// let mut conn = Connection::new(Mode::Server);
    /// let mut client_hello: &ClientHello = conn.client_hello().unwrap();
    /// let mut hash = Vec::new();
    /// client_hello.fingerprint_hash(FingerprintType::JA3, &mut hash);
    /// drop(conn);
    /// ```
    #[cfg(feature = "unstable-fingerprint")]
    pub fn client_hello(&self) -> Result<&crate::client_hello::ClientHello, Error> {
        let mut handle =
            unsafe { s2n_connection_get_client_hello(self.connection.as_ptr()).into_result()? };
        Ok(crate::client_hello::ClientHello::from_ptr(unsafe {
            handle.as_mut()
        }))
    }

    pub(crate) fn mark_client_hello_cb_done(&mut self) -> Result<(), Error> {
        unsafe {
            s2n_client_hello_cb_done(self.connection.as_ptr()).into_result()?;
        }
        Ok(())
    }

    pub fn actual_protocol_version(&self) -> Result {
        let version = unsafe {
            s2n_connection_get_actual_protocol_version(self.connection.as_ptr()).into_result()?
        };
        version.try_into()
    }

    pub fn handshake_type(&self) -> Result<&str, Error> {
        let handshake = unsafe {
            s2n_connection_get_handshake_type_name(self.connection.as_ptr()).into_result()?
        };
        // The strings returned by s2n_connection_get_handshake_type_name
        // are static and immutable after they are first calculated
        static_const_str!(handshake)
    }

    pub fn cipher_suite(&self) -> Result<&str, Error> {
        let cipher = unsafe { s2n_connection_get_cipher(self.connection.as_ptr()).into_result()? };
        // The strings returned by s2n_connection_get_cipher
        // are static and immutable since they are const fields on static const structs
        static_const_str!(cipher)
    }

    pub fn selected_curve(&self) -> Result<&str, Error> {
        let curve = unsafe { s2n_connection_get_curve(self.connection.as_ptr()).into_result()? };
        static_const_str!(curve)
    }

    pub fn selected_signature_algorithm(&self) -> Result {
        let mut sig_alg = s2n_tls_signature_algorithm::ANONYMOUS;
        unsafe {
            s2n_connection_get_selected_signature_algorithm(self.connection.as_ptr(), &mut sig_alg)
                .into_result()?;
        }
        sig_alg.try_into()
    }

    pub fn selected_hash_algorithm(&self) -> Result {
        let mut hash_alg = s2n_tls_hash_algorithm::NONE;
        unsafe {
            s2n_connection_get_selected_digest_algorithm(self.connection.as_ptr(), &mut hash_alg)
                .into_result()?;
        }
        hash_alg.try_into()
    }

    pub fn selected_client_signature_algorithm(&self) -> Result, Error> {
        let mut sig_alg = s2n_tls_signature_algorithm::ANONYMOUS;
        unsafe {
            s2n_connection_get_selected_client_cert_signature_algorithm(
                self.connection.as_ptr(),
                &mut sig_alg,
            )
            .into_result()?;
        }
        Ok(match sig_alg {
            s2n_tls_signature_algorithm::ANONYMOUS => None,
            sig_alg => Some(sig_alg.try_into()?),
        })
    }

    pub fn selected_client_hash_algorithm(&self) -> Result, Error> {
        let mut hash_alg = s2n_tls_hash_algorithm::NONE;
        unsafe {
            s2n_connection_get_selected_client_cert_digest_algorithm(
                self.connection.as_ptr(),
                &mut hash_alg,
            )
            .into_result()?;
        }
        Ok(match hash_alg {
            s2n_tls_hash_algorithm::NONE => None,
            hash_alg => Some(hash_alg.try_into()?),
        })
    }

    /// Provides access to the TLS-Exporter functionality.
    ///
    /// See https://datatracker.ietf.org/doc/html/rfc5705 and https://www.rfc-editor.org/rfc/rfc8446.
    ///
    /// This is currently only available with TLS 1.3 connections which have finished a handshake.
    pub fn tls_exporter(
        &self,
        label: &[u8],
        context: &[u8],
        output: &mut [u8],
    ) -> Result<(), Error> {
        unsafe {
            s2n_connection_tls_exporter(
                self.connection.as_ptr(),
                label.as_ptr(),
                label.len().try_into().map_err(|_| Error::INVALID_INPUT)?,
                context.as_ptr(),
                context.len().try_into().map_err(|_| Error::INVALID_INPUT)?,
                output.as_mut_ptr(),
                output.len().try_into().map_err(|_| Error::INVALID_INPUT)?,
            )
            .into_result()
            .map(|_| ())
        }
    }
}

struct Context {
    mode: Mode,
    waker: Option,
    async_callback: Option,
    verify_host_callback: Option>,
    connection_initialized: bool,
}

impl Context {
    fn new(mode: Mode) -> Self {
        Context {
            mode,
            waker: None,
            async_callback: None,
            verify_host_callback: None,
            connection_initialized: false,
        }
    }
}

#[cfg(feature = "quic")]
impl Connection {
    pub fn enable_quic(&mut self) -> Result<&mut Self, Error> {
        unsafe { s2n_connection_enable_quic(self.connection.as_ptr()).into_result() }?;
        Ok(self)
    }

    pub fn set_quic_transport_parameters(&mut self, buffer: &[u8]) -> Result<&mut Self, Error> {
        unsafe {
            s2n_connection_set_quic_transport_parameters(
                self.connection.as_ptr(),
                buffer.as_ptr(),
                buffer.len().try_into().map_err(|_| Error::INVALID_INPUT)?,
            )
            .into_result()
        }?;
        Ok(self)
    }

    pub fn quic_transport_parameters(&mut self) -> Result<&[u8], Error> {
        let mut ptr = core::ptr::null();
        let mut len = 0;
        unsafe {
            s2n_connection_get_quic_transport_parameters(
                self.connection.as_ptr(),
                &mut ptr,
                &mut len,
            )
            .into_result()
        }?;
        let buffer = unsafe { core::slice::from_raw_parts(ptr, len as _) };
        Ok(buffer)
    }

    /// # Safety
    ///
    /// The `context` pointer must live at least as long as the connection
    pub unsafe fn set_secret_callback(
        &mut self,
        callback: s2n_secret_cb,
        context: *mut c_void,
    ) -> Result<&mut Self, Error> {
        s2n_connection_set_secret_callback(self.connection.as_ptr(), callback, context)
            .into_result()?;
        Ok(self)
    }

    pub fn quic_process_post_handshake_message(&mut self) -> Result<&mut Self, Error> {
        let mut blocked = s2n_blocked_status::NOT_BLOCKED;
        unsafe {
            s2n_recv_quic_post_handshake_message(self.connection.as_ptr(), &mut blocked)
                .into_result()
        }?;
        Ok(self)
    }

    /// Allows the quic library to check if session tickets are expected
    pub fn are_session_tickets_enabled(&self) -> bool {
        unsafe { s2n_connection_are_session_tickets_enabled(self.connection.as_ptr()) }
    }
}

impl AsRef for Connection {
    fn as_ref(&self) -> &Connection {
        self
    }
}

impl AsMut for Connection {
    fn as_mut(&mut self) -> &mut Connection {
        self
    }
}

impl Drop for Connection {
    fn drop(&mut self) {
        // ignore failures since there's not much we can do about it
        unsafe {
            // clean up context
            let prev_ctx = self.context_mut();
            drop(Box::from_raw(prev_ctx));
            let _ = s2n_connection_set_ctx(self.connection.as_ptr(), core::ptr::null_mut())
                .into_result();

            // cleanup config
            let _ = self.drop_config();

            // cleanup connection
            let _ = s2n_connection_free(self.connection.as_ptr()).into_result();
        }
    }
}

#[cfg(test)]
mod tests {
    use super::*;

    // ensure the connection context is send
    #[test]
    fn context_send_test() {
        fn assert_send() {}
        assert_send::();
    }
}
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls/src/connection/000077500000000000000000000000001456575232400257445ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls/src/connection/builder.rs000066400000000000000000000062621456575232400277460ustar00rootroot00000000000000// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

use crate::{
    config::Config,
    connection::Connection,
    enums::Mode,
    error::Error,
    pool::{Pool, PooledConnection},
};

/// A trait indicating that a structure can produce connections.
pub trait Builder: Clone {
    type Output: AsMut + AsRef;
    fn build_connection(&self, mode: Mode) -> Result;
}

/// Produces new connections with the given Config set.
impl Builder for Config {
    type Output = Connection;
    fn build_connection(&self, mode: Mode) -> Result {
        let mut conn = Connection::new(mode);
        conn.set_config(self.clone())?;
        Ok(conn)
    }
}

/// Produces new connections from a pool of reuseable connections.
impl Builder for T {
    type Output = PooledConnection;
    fn build_connection(&self, mode: Mode) -> Result {
        if mode == self.mode() {
            Ok(PooledConnection::new(self)?)
        } else {
            Err(Error::INVALID_INPUT)
        }
    }
}

/// Produces new connections from a builder, then modifies them.
///
/// Can be used to apply connection-level config, for example
/// when using a [`crate::pool::ConfigPool`].
#[derive(Clone)]
pub struct ModifiedBuilder
where
    F: Fn(&mut Connection) -> Result<&mut Connection, Error> + Clone,
{
    builder: B,
    modifier: F,
}

impl ModifiedBuilder
where
    F: Fn(&mut Connection) -> Result<&mut Connection, Error> + Clone,
{
    pub fn new(builder: B, modifier: F) -> Self {
        Self { builder, modifier }
    }
}

impl Builder for ModifiedBuilder
where
    F: Fn(&mut Connection) -> Result<&mut Connection, Error> + Clone,
{
    type Output = B::Output;
    fn build_connection(&self, mode: Mode) -> Result {
        let mut conn = self.builder.build_connection(mode)?;
        (self.modifier)(conn.as_mut())?;
        Ok(conn)
    }
}

#[cfg(test)]
mod tests {
    use super::*;
    use crate::pool::ConfigPoolBuilder;

    #[test]
    fn config_builder() -> Result<(), Box> {
        let config = Config::default();
        let conn = config.build_connection(Mode::Server)?;
        assert_eq!(conn.config(), Some(config));
        Ok(())
    }

    #[test]
    fn pool_builder() -> Result<(), Box> {
        let config = Config::default();
        let pool = ConfigPoolBuilder::new(Mode::Server, config.clone()).build();
        let conn = pool.build_connection(Mode::Server)?;
        assert_eq!(conn.as_ref().config(), Some(config));
        Ok(())
    }

    #[test]
    fn modified_builder() -> Result<(), Box> {
        let config_a = Config::default();
        let config_b = Config::default();
        assert!(config_a != config_b);

        let builder =
            ModifiedBuilder::new(config_a.clone(), |conn| conn.set_config(config_b.clone()));

        let conn = builder.build_connection(Mode::Server)?;
        assert!(conn.config() != Some(config_a));
        assert_eq!(conn.config(), Some(config_b));
        Ok(())
    }
}
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls/src/enums.rs000066400000000000000000000113571456575232400253110ustar00rootroot00000000000000// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

#![allow(clippy::missing_safety_doc)] // TODO add safety docs

use crate::error::Error;
use core::convert::TryFrom;
use s2n_tls_sys::*;

#[derive(Debug, PartialEq, Copy, Clone)]
pub enum CallbackResult {
    Success,
    Failure,
}

impl From for s2n_status_code::Type {
    fn from(input: CallbackResult) -> s2n_status_code::Type {
        match input {
            CallbackResult::Success => s2n_status_code::SUCCESS,
            CallbackResult::Failure => s2n_status_code::FAILURE,
        }
    }
}

impl From> for CallbackResult {
    fn from(result: Result) -> CallbackResult {
        match result {
            Ok(_) => CallbackResult::Success,
            Err(_) => CallbackResult::Failure,
        }
    }
}

#[derive(Debug, PartialEq, Copy, Clone)]
pub enum Mode {
    Server,
    Client,
}

impl From for s2n_mode::Type {
    fn from(input: Mode) -> s2n_mode::Type {
        match input {
            Mode::Server => s2n_mode::SERVER,
            Mode::Client => s2n_mode::CLIENT,
        }
    }
}

#[non_exhaustive]
#[derive(Debug, PartialEq, Copy, Clone)]
pub enum Version {
    SSLV2,
    SSLV3,
    TLS10,
    TLS11,
    TLS12,
    TLS13,
}

impl TryFrom for Version {
    type Error = Error;

    fn try_from(input: s2n_tls_version::Type) -> Result {
        let version = match input {
            s2n_tls_version::SSLV2 => Self::SSLV2,
            s2n_tls_version::SSLV3 => Self::SSLV3,
            s2n_tls_version::TLS10 => Self::TLS10,
            s2n_tls_version::TLS11 => Self::TLS11,
            s2n_tls_version::TLS12 => Self::TLS12,
            s2n_tls_version::TLS13 => Self::TLS13,
            _ => return Err(Error::INVALID_INPUT),
        };
        Ok(version)
    }
}

#[non_exhaustive]
#[derive(Debug, PartialEq, Copy, Clone)]
pub enum Blinding {
    SelfService,
    BuiltIn,
}

impl From for s2n_blinding::Type {
    fn from(input: Blinding) -> s2n_blinding::Type {
        match input {
            Blinding::SelfService => s2n_blinding::SELF_SERVICE_BLINDING,
            Blinding::BuiltIn => s2n_blinding::BUILT_IN_BLINDING,
        }
    }
}

#[non_exhaustive]
#[derive(Debug, PartialEq, Copy, Clone)]
pub enum ClientAuthType {
    Required,
    Optional,
    None,
}

impl From for s2n_cert_auth_type::Type {
    fn from(input: ClientAuthType) -> s2n_cert_auth_type::Type {
        match input {
            ClientAuthType::Required => s2n_cert_auth_type::REQUIRED,
            ClientAuthType::Optional => s2n_cert_auth_type::OPTIONAL,
            ClientAuthType::None => s2n_cert_auth_type::NONE,
        }
    }
}

#[non_exhaustive]
#[derive(Debug, PartialEq, Copy, Clone)]
pub enum AlertBehavior {
    FailOnWarnings,
    IgnoreWarnings,
}

impl From for s2n_alert_behavior::Type {
    fn from(input: AlertBehavior) -> s2n_alert_behavior::Type {
        match input {
            AlertBehavior::FailOnWarnings => s2n_alert_behavior::FAIL_ON_WARNINGS,
            AlertBehavior::IgnoreWarnings => s2n_alert_behavior::IGNORE_WARNINGS,
        }
    }
}

#[non_exhaustive]
#[derive(Debug, PartialEq, Copy, Clone)]
#[allow(non_camel_case_types)]
pub enum SignatureAlgorithm {
    RSA_PKCS1,
    RSA_PSS_RSAE,
    RSA_PSS_PSS,
    ECDSA,
}

impl TryFrom for SignatureAlgorithm {
    type Error = Error;

    fn try_from(input: s2n_tls_signature_algorithm::Type) -> Result {
        let version = match input {
            s2n_tls_signature_algorithm::RSA => Self::RSA_PKCS1,
            s2n_tls_signature_algorithm::RSA_PSS_RSAE => Self::RSA_PSS_RSAE,
            s2n_tls_signature_algorithm::RSA_PSS_PSS => Self::RSA_PSS_PSS,
            s2n_tls_signature_algorithm::ECDSA => Self::ECDSA,
            _ => return Err(Error::INVALID_INPUT),
        };
        Ok(version)
    }
}

#[non_exhaustive]
#[derive(Debug, PartialEq, Copy, Clone)]
#[allow(non_camel_case_types)]
pub enum HashAlgorithm {
    MD5,
    SHA1,
    SHA224,
    SHA256,
    SHA384,
    SHA512,
}

impl TryFrom for HashAlgorithm {
    type Error = Error;

    fn try_from(input: s2n_tls_hash_algorithm::Type) -> Result {
        let version = match input {
            s2n_tls_hash_algorithm::MD5 => Self::MD5,
            s2n_tls_hash_algorithm::SHA1 => Self::SHA1,
            s2n_tls_hash_algorithm::SHA224 => Self::SHA224,
            s2n_tls_hash_algorithm::SHA256 => Self::SHA256,
            s2n_tls_hash_algorithm::SHA384 => Self::SHA384,
            s2n_tls_hash_algorithm::SHA512 => Self::SHA512,
            _ => return Err(Error::INVALID_INPUT),
        };
        Ok(version)
    }
}
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls/src/error.rs000066400000000000000000000350211456575232400253050ustar00rootroot00000000000000// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

use core::{convert::TryInto, fmt, ptr::NonNull, task::Poll};
use errno::{errno, Errno};
use libc::c_char;
use s2n_tls_sys::*;
use std::{convert::TryFrom, ffi::CStr};

#[non_exhaustive]
#[derive(Debug, PartialEq)]
pub enum ErrorType {
    UnknownErrorType,
    NoError,
    IOError,
    ConnectionClosed,
    Blocked,
    Alert,
    ProtocolError,
    InternalError,
    UsageError,
    Application,
}

#[non_exhaustive]
#[derive(Debug, PartialEq)]
pub enum ErrorSource {
    Library,
    Bindings,
    Application,
}

impl From for ErrorType {
    fn from(input: libc::c_int) -> Self {
        match input as s2n_error_type::Type {
            s2n_error_type::OK => ErrorType::NoError,
            s2n_error_type::IO => ErrorType::IOError,
            s2n_error_type::CLOSED => ErrorType::ConnectionClosed,
            s2n_error_type::BLOCKED => ErrorType::Blocked,
            s2n_error_type::ALERT => ErrorType::Alert,
            s2n_error_type::PROTO => ErrorType::ProtocolError,
            s2n_error_type::INTERNAL => ErrorType::InternalError,
            s2n_error_type::USAGE => ErrorType::UsageError,
            _ => ErrorType::UnknownErrorType,
        }
    }
}

enum Context {
    InvalidInput,
    MissingWaker,
    Code(s2n_status_code::Type, Errno),
    Application(Box),
}

pub struct Error(Context);

pub trait Fallible {
    type Output;

    fn into_result(self) -> Result;
}

impl Fallible for s2n_status_code::Type {
    type Output = s2n_status_code::Type;

    fn into_result(self) -> Result {
        if self >= s2n_status_code::SUCCESS {
            Ok(self)
        } else {
            Err(Error::capture())
        }
    }
}

impl Fallible for isize {
    type Output = usize;

    fn into_result(self) -> Result {
        // Negative values can't be converted to a real size
        // and instead indicate an error.
        self.try_into().map_err(|_| Error::capture())
    }
}

impl Fallible for u64 {
    type Output = Self;

    /// Converts a u64 to a Result by checking for u64::MAX.
    ///
    /// If a method that returns an unsigned int is fallible,
    /// then the -1 error result wraps around to u64::MAX.
    ///
    /// For a u64 to be Fallible, a result of u64::MAX must not be
    /// possible without an error. For example, [`s2n_connection_get_delay`]
    /// can't return u64::MAX as a valid result because
    /// s2n-tls blinding delays are limited to 30s, or a return value of 3^10 ns,
    /// which is significantly less than u64::MAX. [`s2n_connection_get_delay`]
    /// would therefore only return u64::MAX for a -1 error result.
    fn into_result(self) -> Result {
        if self != Self::MAX {
            Ok(self)
        } else {
            Err(Error::capture())
        }
    }
}

impl Fallible for *mut T {
    type Output = NonNull;

    fn into_result(self) -> Result {
        if let Some(value) = NonNull::new(self) {
            Ok(value)
        } else {
            Err(Error::capture())
        }
    }
}

impl Fallible for *const T {
    type Output = *const T;

    fn into_result(self) -> Result {
        if !self.is_null() {
            Ok(self)
        } else {
            Err(Error::capture())
        }
    }
}

pub trait Pollable {
    type Output;

    fn into_poll(self) -> Poll>;
}

impl Pollable for T {
    type Output = T::Output;

    fn into_poll(self) -> Poll> {
        match self.into_result() {
            Ok(r) => Ok(r).into(),
            Err(err) if err.is_retryable() => Poll::Pending,
            Err(err) => Err(err).into(),
        }
    }
}

impl Error {
    pub(crate) const INVALID_INPUT: Error = Self(Context::InvalidInput);
    pub(crate) const MISSING_WAKER: Error = Self(Context::MissingWaker);

    /// Converts an io::Error into an s2n-tls Error
    pub fn io_error(err: std::io::Error) -> Error {
        let errno = err.raw_os_error().unwrap_or(1);
        errno::set_errno(errno::Errno(errno));
        s2n_status_code::FAILURE.into_result().unwrap_err()
    }

    /// An error occurred while running application code.
    ///
    /// Can be emitted from [`crate::callbacks::ConnectionFuture::poll()`] to indicate
    /// async task failure.
    pub fn application(error: Box) -> Self {
        Self(Context::Application(error))
    }

    fn capture() -> Self {
        unsafe {
            let s2n_errno = s2n_errno_location();

            let code = *s2n_errno;

            // https://github.com/aws/s2n-tls/blob/main/docs/USAGE-GUIDE.md#error-handling
            //# To avoid possible confusion, s2n_errno should be cleared after processing
            //# an error: s2n_errno = S2N_ERR_T_OK
            *s2n_errno = s2n_error_type::OK as _;

            Self(Context::Code(code, errno()))
        }
    }

    pub fn name(&self) -> &'static str {
        match self.0 {
            Context::InvalidInput => "InvalidInput",
            Context::MissingWaker => "MissingWaker",
            Context::Application(_) => "ApplicationError",
            Context::Code(code, _) => unsafe {
                // Safety: we assume the string has a valid encoding coming from s2n
                cstr_to_str(s2n_strerror_name(code))
            },
        }
    }

    pub fn message(&self) -> &'static str {
        match self.0 {
            Context::InvalidInput => "A parameter was incorrect",
            Context::MissingWaker => {
                "Tried to perform an asynchronous operation without a configured waker"
            }
            Context::Application(_) => "An error occurred while executing application code",
            Context::Code(code, _) => unsafe {
                // Safety: we assume the string has a valid encoding coming from s2n
                cstr_to_str(s2n_strerror(code, core::ptr::null()))
            },
        }
    }

    pub fn debug(&self) -> Option<&'static str> {
        match self.0 {
            Context::InvalidInput | Context::MissingWaker | Context::Application(_) => None,
            Context::Code(code, _) => unsafe {
                let debug_info = s2n_strerror_debug(code, core::ptr::null());

                // The debug string should be set to a constant static string
                // when an error occurs, but because it starts out as NULL
                // we should defend against mistakes.
                if debug_info.is_null() {
                    None
                } else {
                    // If the string is not null, then we can assume that
                    // it is constant and static.
                    Some(cstr_to_str(debug_info))
                }
            },
        }
    }

    pub fn kind(&self) -> ErrorType {
        match self.0 {
            Context::InvalidInput | Context::MissingWaker => ErrorType::UsageError,
            Context::Application(_) => ErrorType::Application,
            Context::Code(code, _) => unsafe { ErrorType::from(s2n_error_get_type(code)) },
        }
    }

    pub fn source(&self) -> ErrorSource {
        match self.0 {
            Context::InvalidInput | Context::MissingWaker => ErrorSource::Bindings,
            Context::Application(_) => ErrorSource::Application,
            Context::Code(_, _) => ErrorSource::Library,
        }
    }

    #[allow(clippy::borrowed_box)]
    /// Returns an [`std::error::Error`] if the error source was [`ErrorSource::Application`],
    /// otherwise returns None.
    pub fn application_error(&self) -> Option<&Box> {
        if let Self(Context::Application(err)) = self {
            Some(err)
        } else {
            None
        }
    }

    pub fn is_retryable(&self) -> bool {
        matches!(self.kind(), ErrorType::Blocked)
    }
}

#[cfg(feature = "quic")]
impl Error {
    /// s2n-tls does not send specific errors.
    ///
    /// However, we can attempt to map local errors into the alerts
    /// that we would have sent if we sent alerts.
    ///
    /// This API is currently incomplete and should not be relied upon.
    pub fn alert(&self) -> Option {
        match self.0 {
            Context::InvalidInput | Context::MissingWaker | Context::Application(_) => None,
            Context::Code(code, _) => {
                let mut alert = 0;
                let r = unsafe { s2n_error_get_alert(code, &mut alert) };
                match r.into_result() {
                    Ok(_) => Some(alert),
                    Err(_) => None,
                }
            }
        }
    }
}

/// # Safety
///
/// The caller must ensure the char pointer must contain a valid
/// UTF-8 string from a trusted source
unsafe fn cstr_to_str(v: *const c_char) -> &'static str {
    let slice = CStr::from_ptr(v);
    let bytes = slice.to_bytes();
    core::str::from_utf8_unchecked(bytes)
}

impl TryFrom for Error {
    type Error = Error;
    fn try_from(value: std::io::Error) -> Result {
        let io_inner = value.into_inner().ok_or(Error::INVALID_INPUT)?;
        io_inner
            .downcast::()
            .map(|error| *error)
            .map_err(|_| Error::INVALID_INPUT)
    }
}

impl From for std::io::Error {
    fn from(input: Error) -> Self {
        let kind = match input.kind() {
            ErrorType::IOError => {
                if let Context::Code(_, errno) = input.0 {
                    let bare = std::io::Error::from_raw_os_error(errno.0);
                    bare.kind()
                } else {
                    std::io::ErrorKind::Other
                }
            }
            ErrorType::ConnectionClosed => std::io::ErrorKind::UnexpectedEof,
            _ => std::io::ErrorKind::Other,
        };
        std::io::Error::new(kind, input)
    }
}

impl fmt::Debug for Error {
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
        let mut s = f.debug_struct("Error");
        if let Context::Code(code, _) = self.0 {
            s.field("code", &code);
        }

        s.field("name", &self.name());
        s.field("message", &self.message());
        s.field("kind", &self.kind());
        s.field("source", &self.source());

        if let Some(debug) = self.debug() {
            s.field("debug", &debug);
        }

        // "errno" is only known to be meaningful for IOErrors.
        // However, it has occasionally proved useful for debugging
        // other errors, so include it for all errors.
        if let Context::Code(_, errno) = self.0 {
            s.field("errno", &errno.to_string());
        }

        s.finish()
    }
}

impl fmt::Display for Error {
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
        if let Self(Context::Application(err)) = self {
            err.fmt(f)
        } else {
            f.write_str(self.message())
        }
    }
}

impl std::error::Error for Error {
    fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
        // implement `source` in the same way `std::io::Error` implements it:
        // https://doc.rust-lang.org/std/io/struct.Error.html#method.source
        if let Self(Context::Application(err)) = self {
            err.source()
        } else {
            None
        }
    }
}

#[cfg(test)]
mod tests {
    use super::*;
    use crate::{enums::Version, testing::client_hello::CustomError};
    use errno::set_errno;

    const FAILURE: isize = -1;

    // This relies on an implementation detail of s2n-tls errors,
    // and could make these tests brittle. However, the alternative
    // is a real handshake producing a real IO error, so just updating
    // this value if the definition of an IO error changes might be easier.
    const S2N_IO_ERROR_CODE: s2n_status_code::Type = 1 << 26;

    #[test]
    fn s2n_io_error_to_std_io_error() -> Result<(), Box> {
        set_errno(Errno(libc::ECONNRESET));
        unsafe {
            let s2n_errno_ptr = s2n_errno_location();
            *s2n_errno_ptr = S2N_IO_ERROR_CODE;
        }

        let s2n_error = FAILURE.into_result().unwrap_err();
        assert_eq!(ErrorType::IOError, s2n_error.kind());

        let io_error = std::io::Error::from(s2n_error);
        assert_eq!(std::io::ErrorKind::ConnectionReset, io_error.kind());
        assert!(io_error.into_inner().is_some());
        Ok(())
    }

    #[test]
    fn s2n_error_to_std_io_error() -> Result<(), Box> {
        set_errno(Errno(libc::ECONNRESET));
        unsafe {
            let s2n_errno_ptr = s2n_errno_location();
            *s2n_errno_ptr = S2N_IO_ERROR_CODE - 1;
        }

        let s2n_error = FAILURE.into_result().unwrap_err();
        assert_ne!(ErrorType::IOError, s2n_error.kind());

        let io_error = std::io::Error::from(s2n_error);
        assert_eq!(std::io::ErrorKind::Other, io_error.kind());
        assert!(io_error.into_inner().is_some());
        Ok(())
    }

    #[test]
    fn invalid_input_to_std_io_error() -> Result<(), Box> {
        let s2n_error = Version::try_from(0).unwrap_err();
        assert_eq!(ErrorType::UsageError, s2n_error.kind());

        let io_error = std::io::Error::from(s2n_error);
        assert_eq!(std::io::ErrorKind::Other, io_error.kind());
        assert!(io_error.into_inner().is_some());
        Ok(())
    }

    #[test]
    fn error_source() -> Result<(), Box> {
        let bindings_error = Version::try_from(0).unwrap_err();
        assert_eq!(ErrorSource::Bindings, bindings_error.source());

        let library_error = FAILURE.into_result().unwrap_err();
        assert_eq!(ErrorSource::Library, library_error.source());

        Ok(())
    }

    #[test]
    fn application_error() {
        // test single level errors
        {
            let error = Error::application(Box::new(CustomError));

            let app_error = error.application_error().unwrap();
            let _custom_error = app_error.downcast_ref::().unwrap();
        }

        // make sure nested errors work
        {
            let io_error = std::io::Error::new(std::io::ErrorKind::Other, CustomError);
            let error = Error::application(Box::new(io_error));

            let app_error = error.application_error().unwrap();
            let io_error = app_error.downcast_ref::().unwrap();
            let _custom_error = io_error
                .get_ref()
                .unwrap()
                .downcast_ref::()
                .unwrap();
        }
    }
}
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls/src/init.rs000066400000000000000000000066521456575232400251270ustar00rootroot00000000000000// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

use crate::error::{Error, Fallible};
use s2n_tls_sys::*;
use std::sync::Once;

static S2N_INIT: Once = Once::new();

/// # Safety
///
/// This function should only be called once
unsafe fn global_init() -> Result<(), Error> {
    mem::init()?;
    s2n_init().into_result()?;
    Ok(())
}

thread_local! {
    static S2N_THREAD: Thread = {
        S2N_INIT.call_once(|| unsafe {
            // Safety: by using `Once` we can ensure the library is initialized once
            global_init().expect("could not initialize s2n-tls");
        });
        Thread
    };
}

struct Thread;

impl Drop for Thread {
    fn drop(&mut self) {
        // https://doc.rust-lang.org/std/thread/struct.LocalKey.html#platform-specific-behavior
        // Note that a "best effort" is made to ensure that destructors for types stored in thread local storage are run, but not all platforms can guarantee that destructors will be run for all types in thread local storage.
        let _ = unsafe { s2n_cleanup().into_result() };
    }
}

pub fn init() {
    S2N_THREAD.with(|_| ());
}

mod mem {
    use super::*;
    use alloc::alloc::{alloc, dealloc, Layout};
    use core::{ffi::c_void, mem::size_of};

    pub unsafe fn init() -> Result<(), Error> {
        s2n_mem_set_callbacks(
            Some(mem_init_callback),
            Some(mem_cleanup_callback),
            Some(mem_malloc_callback),
            Some(mem_free_callback),
        )
        .into_result()?;
        Ok(())
    }

    unsafe extern "C" fn mem_init_callback() -> s2n_status_code::Type {
        // no-op: the global allocator is already initialized
        s2n_status_code::SUCCESS
    }

    unsafe extern "C" fn mem_cleanup_callback() -> s2n_status_code::Type {
        // no-op: the global allocator is already initialized
        s2n_status_code::SUCCESS
    }

    unsafe extern "C" fn mem_malloc_callback(
        ptr: *mut *mut c_void,
        requested_len: u32,
        allocated_len: *mut u32,
    ) -> s2n_status_code::Type {
        let layout = if let Some(layout) = layout(requested_len) {
            layout
        } else {
            return s2n_status_code::SUCCESS;
        };
        *ptr = alloc(layout) as *mut _;

        if ptr.is_null() {
            s2n_status_code::FAILURE
        } else {
            *allocated_len = requested_len;
            s2n_status_code::SUCCESS
        }
    }

    unsafe extern "C" fn mem_free_callback(ptr: *mut c_void, len: u32) -> s2n_status_code::Type {
        let layout = if let Some(layout) = layout(len) {
            layout
        } else {
            return s2n_status_code::FAILURE;
        };

        if !ptr.is_null() {
            dealloc(ptr as *mut _, layout);
        }

        s2n_status_code::SUCCESS
    }

    unsafe fn layout(len: u32) -> Option {
        // https://linux.die.net/man/3/malloc
        //# The malloc() and calloc() functions return a pointer to the
        //# allocated memory, which is suitably aligned for any built-in
        //# type.
        const ALIGNMENT: usize = size_of::();

        // * align must not be zero,
        //
        // * align must be a power of two,
        //
        // * size, when rounded up to the nearest multiple of align, must not overflow (i.e., the rounded value must be less than or equal to usize::MAX).

        Layout::from_size_align(len as usize, ALIGNMENT).ok()
    }
}
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls/src/lib.rs000066400000000000000000000012441456575232400247220ustar00rootroot00000000000000// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

extern crate alloc;

// Ensure memory is correctly managed in tests
// tests invoked using the checkers::test macro have additional
// memory sanity checks that occur
#[cfg(test)]
#[global_allocator]
static ALLOCATOR: checkers::Allocator = checkers::Allocator::system();

#[macro_use]
pub mod error;

pub mod callbacks;
#[cfg(feature = "unstable-fingerprint")]
pub mod client_hello;
pub mod config;
pub mod connection;
pub mod enums;
pub mod init;
pub mod pool;
pub mod security;

pub use s2n_tls_sys as ffi;

#[cfg(any(feature = "testing", test))]
pub mod testing;
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls/src/pool.rs000066400000000000000000000231071456575232400251270ustar00rootroot00000000000000// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

//! Utilities to handle reusing connections.
//!
//! Creating a single new connection requires significant
//! memory allocations (about 50-60 KB, according to some tests).
//! Instead of allocating memory for a new connection, existing
//! memory can be reused by calling
//! [Connection::wipe()](`crate::connection::Connection::wipe()).
//!
//! The [`Pool`] trait allows applications to define an
//! [Object pool](https://en.wikipedia.org/wiki/Object_pool_pattern) that
//! wipes and stores connections after they are dropped.
//!
//! We also provide a basic Pool implementation, [`ConfigPool`], that
//! implements the pool as a [VecDeque](`std::collections::VecDeque`)
//! with a fixed maximum size.

use crate::{
    config::Config,
    connection::{Builder, Connection},
    enums::Mode,
    error::Error,
};
use std::{
    collections::VecDeque,
    ops::{Deref, DerefMut},
    sync::{Arc, Mutex},
};

/// A connection produced by a [`Pool`].
///
/// When dropped, returns ownership of the connection to
/// the pool that produced it by calling [`Pool::give`].
#[derive(Debug)]
pub struct PooledConnection> {
    pool: T,
    conn: Option,
}

impl AsRef for PooledConnection {
    fn as_ref(&self) -> &Connection {
        self.conn.as_ref().unwrap()
    }
}

impl AsMut for PooledConnection {
    fn as_mut(&mut self) -> &mut Connection {
        self.conn.as_mut().unwrap()
    }
}

impl Drop for PooledConnection {
    fn drop(&mut self) {
        if let Some(conn) = self.conn.take() {
            self.pool.give(conn);
        }
    }
}

impl Deref for PooledConnection {
    type Target = Connection;

    fn deref(&self) -> &Self::Target {
        self.conn.as_ref().unwrap()
    }
}

impl DerefMut for PooledConnection {
    fn deref_mut(&mut self) -> &mut Self::Target {
        self.conn.as_mut().unwrap()
    }
}

impl PooledConnection {
    pub fn new(pool: &T) -> Result, Error> {
        pool.take().map(|conn| {
            let conn = Some(conn);
            let pool = pool.clone();
            Self { pool, conn }
        })
    }
}

/// An object pool for wiping and reusing connection memory.
///
/// Minimally, an implementation should call [`Connection::wipe()`]
/// during [`Self::give`].
pub trait Pool {
    fn mode(&self) -> Mode;
    fn take(&self) -> Result;
    fn give(&self, conn: Connection);
}

impl Pool for Arc {
    fn mode(&self) -> Mode {
        self.as_ref().mode()
    }
    fn take(&self) -> Result {
        self.as_ref().take()
    }
    fn give(&self, conn: Connection) {
        self.as_ref().give(conn)
    }
}

impl Pool for Arc {
    fn mode(&self) -> Mode {
        self.as_ref().mode()
    }
    fn take(&self) -> Result {
        self.as_ref().take()
    }
    fn give(&self, conn: Connection) {
        self.as_ref().give(conn)
    }
}

#[derive(Debug)]
pub struct ConfigPool {
    mode: Mode,
    config: Config,
    pool: Mutex>,
    max_pool_size: usize,
}

pub type ConfigPoolRef = Arc;

/// Builder for [`ConfigPool`].
pub struct ConfigPoolBuilder(ConfigPool);
impl ConfigPoolBuilder {
    pub fn new(mode: Mode, config: Config) -> Self {
        Self(ConfigPool {
            mode,
            config,
            pool: Mutex::new(VecDeque::new()),
            max_pool_size: usize::MAX,
        })
    }

    pub fn set_pool(&mut self, pool: VecDeque) -> &mut Self {
        self.0.pool = Mutex::new(pool);
        self
    }

    /// The maximum size of the underlying [`VecDeque`].
    ///
    /// This is NOT the maximum connections that can be created.
    /// When the number of connections created exceeds the `max_pool_size`,
    /// excess reclaimed connections are dropped instead of stored
    /// in the pool.
    pub fn set_max_pool_size(&mut self, max_pool_size: usize) -> &mut Self {
        self.0.max_pool_size = max_pool_size;
        self
    }

    pub fn build(self) -> Arc {
        Arc::new(self.0)
    }
}

impl ConfigPool {
    pub fn pool_size(&self) -> usize {
        self.pool.lock().map(|pool| pool.len()).unwrap_or(0)
    }

    pub fn is_poisoned(&self) -> bool {
        self.pool.is_poisoned()
    }
}

impl Pool for ConfigPool {
    fn mode(&self) -> Mode {
        self.mode
    }

    /// Get a connection.
    ///
    /// If connections are available in the pool, one will
    /// be returned. Otherwise, a new connection will be created.
    fn take(&self) -> Result {
        let from_pool = match self.pool.lock() {
            Ok(mut pool) => pool.pop_front(),
            Err(_) => None,
        };
        let conn = match from_pool {
            // Wiping a connection doesn't wipe the config,
            // so we don't need to reset the config.
            Some(conn) => conn,
            // Create a new connection with the stored config.
            None => self.config.build_connection(self.mode)?,
        };
        Ok(conn)
    }

    /// Recycle a connection.
    ///
    /// The connection is wiped and returned to the pool
    /// if space is available.
    fn give(&self, mut conn: Connection) {
        let wiped = conn.wipe().is_ok();
        if let Ok(mut pool) = self.pool.lock() {
            if pool.len() < self.max_pool_size && wiped {
                pool.push_back(conn);
            }
        }
    }
}

#[cfg(test)]
mod tests {
    use super::*;
    use crate::config::Config;

    #[test]
    fn config_pool_single_connection() -> Result<(), Box> {
        let pool = ConfigPoolBuilder::new(Mode::Server, Config::default()).build();

        // Repeatedly checkout the same connection
        assert_eq!(pool.pool_size(), 0);
        for _ in 0..5 {
            let _conn = PooledConnection::new(&pool)?;
            assert_eq!(pool.pool_size(), 0);
        }
        assert_eq!(pool.pool_size(), 1);
        Ok(())
    }

    #[test]
    fn config_pool_multiple_connections() -> Result<(), Box> {
        let pool = ConfigPoolBuilder::new(Mode::Server, Config::default()).build();

        // We need to hold onto connections so that they're not
        // immediately reclaimed by the pool.
        let mut conns = VecDeque::new();

        // Checkout multiple connections
        const COUNT: usize = 25;
        for _ in 0..COUNT {
            conns.push_back(PooledConnection::new(&pool)?);
            assert_eq!(pool.pool_size(), 0);
        }
        assert_eq!(conns.len(), COUNT);

        // Drop all outstanding connections, returning them to the pool
        conns.clear();
        assert_eq!(pool.pool_size(), COUNT);

        // Reuse a subset of the connections
        const SUBSET_COUNT: usize = COUNT / 2;
        for i in 1..=SUBSET_COUNT {
            conns.push_back(PooledConnection::new(&pool)?);
            // The pool should drain as we reuse connections.
            assert_eq!(pool.pool_size(), COUNT - i);
        }
        assert_eq!(conns.len(), SUBSET_COUNT);
        assert_eq!(pool.pool_size(), COUNT - SUBSET_COUNT);

        // Drop all outstanding connections, returning them to the pool
        conns.clear();
        assert_eq!(pool.pool_size(), COUNT);

        Ok(())
    }

    #[test]
    fn config_pool_with_max_size() -> Result<(), Box> {
        const POOL_MAX_SIZE: usize = 11;
        let mut pool = ConfigPoolBuilder::new(Mode::Server, Config::default());
        pool.set_max_pool_size(POOL_MAX_SIZE);
        let pool = pool.build();

        // We need to hold onto connections so that they're not
        // immediately reclaimed by the pool.
        let mut conns = VecDeque::new();

        // Create more connections than the pools can hold
        const COUNT: usize = 25;
        for _ in 0..COUNT {
            conns.push_back(PooledConnection::new(&pool)?);
        }
        assert_eq!(conns.len(), COUNT);

        // Drop all outstanding connections, returning them to the pools
        // The pool should now hold its maximum.
        conns.clear();
        assert_eq!(pool.pool_size(), POOL_MAX_SIZE);

        Ok(())
    }

    #[test]
    fn non_generic_pool() -> Result<(), Box> {
        let config_pool = ConfigPoolBuilder::new(Mode::Server, Config::default()).build();
        // Note the unweildy type parameters on PooledConnection here.
        let _: PooledConnection = PooledConnection::new(&config_pool)?;
        // To avoid specifying the generic type parameters on PooledConnection,
        // the pool can be converted to an Arc.
        let pool: Arc = config_pool;
        // Note no generic type parameters on PooledConnection here.
        let _: PooledConnection = PooledConnection::new(&pool)?;
        Ok(())
    }

    #[test]
    fn dereferencing_pooled_connection() -> Result<(), Box> {
        let config_pool = ConfigPoolBuilder::new(Mode::Server, Config::default()).build();

        let pooled_conn: PooledConnection = PooledConnection::new(&config_pool)?;
        let conn = pooled_conn.deref();
        assert_eq!(pooled_conn.config(), conn.config());

        let mut mut_pooled_conn: PooledConnection =
            PooledConnection::new(&config_pool)?;
        let waker = futures_test::task::new_count_waker().0;
        mut_pooled_conn.set_waker(Some(&waker))?;
        assert!(mut_pooled_conn.waker().unwrap().will_wake(&waker));

        Ok(())
    }
}
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls/src/security.rs000066400000000000000000000027111456575232400260230ustar00rootroot00000000000000// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

use crate::error::Error;
use core::fmt;
use std::ffi::{CStr, CString};

#[derive(Clone, PartialEq)]
enum Context {
    Static(&'static [u8]),
    Owned(CString),
}

#[derive(Clone, PartialEq)]
pub struct Policy(Context);

impl Policy {
    pub(crate) fn as_cstr(&self) -> &CStr {
        match &self.0 {
            Context::Static(x) => unsafe {
                // Safety: Policies are always created with null-terminated strings
                CStr::from_bytes_with_nul_unchecked(x)
            },
            Context::Owned(x) => x.as_c_str(),
        }
    }

    pub fn from_version(version: &str) -> Result {
        let cstr = CString::new(version).map_err(|_| Error::INVALID_INPUT)?;
        let context = Context::Owned(cstr);
        Ok(Self(context))
    }
}

impl fmt::Debug for Policy {
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
        f.debug_tuple("Policy").field(&self.as_cstr()).finish()
    }
}

macro_rules! policy {
    ($name:ident, $version:expr) => {
        pub const $name: Policy = Policy(Context::Static(concat!($version, "\0").as_bytes()));
    };
}

policy!(DEFAULT, "default");
policy!(DEFAULT_TLS13, "default_tls13");

#[cfg(feature = "pq")]
policy!(TESTING_PQ, "PQ-TLS-1-0-2021-05-26");

pub const ALL_POLICIES: &[Policy] = &[
    DEFAULT,
    DEFAULT_TLS13,
    #[cfg(feature = "pq")]
    TESTING_PQ,
];
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls/src/testing.rs000066400000000000000000000231661456575232400256400ustar00rootroot00000000000000// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

use crate::{
    callbacks::VerifyHostNameCallback, config::*, connection, enums::Blinding, security,
    testing::s2n_tls::Harness,
};
use alloc::{collections::VecDeque, sync::Arc};
use bytes::Bytes;
use core::{
    sync::atomic::{AtomicUsize, Ordering},
    task::Poll,
};

pub mod client_hello;
pub mod resumption;
pub mod s2n_tls;

type Error = Box;
type Result = core::result::Result;

pub fn test_error(msg: &str) -> crate::error::Error {
    crate::error::Error::application(msg.into())
}

pub fn assert_test_error(input: Error, msg: &str) {
    let error = input
        .downcast::()
        .expect("Unexpected generic error type");
    if let Some(inner) = error.application_error() {
        assert_eq!(msg, inner.to_string())
    } else {
        panic!("Unexpected known error type");
    }
}

#[derive(Clone)]
pub struct Counter(Arc);
impl Counter {
    fn new() -> Self {
        Counter(Arc::new(AtomicUsize::new(0)))
    }
    pub fn count(&self) -> usize {
        self.0.load(Ordering::Relaxed)
    }
    pub fn increment(&self) {
        self.0.fetch_add(1, Ordering::Relaxed);
    }
}
impl Default for Counter {
    fn default() -> Self {
        Self::new()
    }
}

pub trait Connection: core::fmt::Debug {
    fn poll_negotiate(&mut self, context: &mut Ctx) -> Poll>;
    fn poll_action(&mut self, context: &mut Ctx, action: F) -> Poll>
    where
        F: FnOnce(&mut connection::Connection) -> Poll>;
}

pub trait Context {
    fn receive(&mut self, max_len: Option) -> Option;
    fn send(&mut self, data: Bytes);
}

pub enum Mode {
    Client,
    Server,
}

#[derive(Debug)]
pub struct Pair {
    pub server: (Server, MemoryContext),
    pub client: (Client, MemoryContext),
    pub max_iterations: usize,
}

impl Pair {
    /// The number of iterations that will be executed until the handshake exits with an error
    ///
    /// This is to prevent endless looping without making progress on the connection.
    const DEFAULT_ITERATIONS: usize = 100;

    pub fn new(server: Server, client: Client) -> Self {
        Self {
            server: (server, Default::default()),
            client: (client, Default::default()),
            max_iterations: Self::DEFAULT_ITERATIONS,
        }
    }
    pub fn poll(&mut self) -> Poll> {
        assert!(
            self.max_iterations > 0,
            "handshake has iterated too many times: {:#?}",
            self,
        );
        let client_res = self.client.0.poll_negotiate(&mut self.client.1);
        let server_res = self.server.0.poll_negotiate(&mut self.server.1);
        self.client.1.transfer(&mut self.server.1);
        self.max_iterations -= 1;
        match (client_res, server_res) {
            (Poll::Ready(client_res), Poll::Ready(server_res)) => {
                client_res?;
                server_res?;
                Ok(()).into()
            }
            (Poll::Ready(client_res), _) => {
                client_res?;
                Poll::Pending
            }
            (_, Poll::Ready(server_res)) => {
                server_res?;
                Poll::Pending
            }
            _ => Poll::Pending,
        }
    }

    pub fn poll_send(&mut self, sender: Mode, buf: &[u8]) -> Poll> {
        let result = match sender {
            Mode::Client => self.client.0.poll_action(&mut self.client.1, |conn| {
                connection::Connection::poll_send(conn, buf)
            }),
            Mode::Server => self.server.0.poll_action(&mut self.server.1, |conn| {
                connection::Connection::poll_send(conn, buf)
            }),
        };
        self.server.1.transfer(&mut self.client.1);
        match result {
            Poll::Ready(result) => {
                result?;
                Ok(()).into()
            }
            Poll::Pending => Poll::Pending,
        }
    }

    pub fn poll_recv(&mut self, receiver: Mode, buf: &mut [u8]) -> Poll> {
        let result = match receiver {
            Mode::Client => self.client.0.poll_action(&mut self.client.1, |conn| {
                connection::Connection::poll_recv(conn, buf)
            }),
            Mode::Server => self.server.0.poll_action(&mut self.server.1, |conn| {
                connection::Connection::poll_recv(conn, buf)
            }),
        };
        match result {
            Poll::Ready(result) => {
                result?;
                Ok(()).into()
            }
            Poll::Pending => Poll::Pending,
        }
    }
}

#[derive(Debug, Default)]
pub struct MemoryContext {
    rx: VecDeque,
    tx: VecDeque,
}

impl MemoryContext {
    pub fn transfer(&mut self, other: &mut Self) {
        self.rx.extend(other.tx.drain(..));
        other.rx.extend(self.tx.drain(..));
    }
}

impl Context for MemoryContext {
    fn receive(&mut self, max_len: Option) -> Option {
        loop {
            let mut chunk = self.rx.pop_front()?;

            if chunk.is_empty() {
                continue;
            }

            let max_len = max_len.unwrap_or(usize::MAX);

            if chunk.len() > max_len {
                self.rx.push_front(chunk.split_off(max_len));
            }

            return Some(chunk);
        }
    }

    fn send(&mut self, data: Bytes) {
        self.tx.push_back(data);
    }
}

pub struct CertKeyPair {
    cert_path: &'static str,
    cert: &'static [u8],
    key: &'static [u8],
}

impl Default for CertKeyPair {
    fn default() -> Self {
        CertKeyPair {
            cert_path: concat!(
                env!("CARGO_MANIFEST_DIR"),
                "/../../../tests/pems/rsa_4096_sha512_client_cert.pem",
            ),
            cert: &include_bytes!("../../../../tests/pems/rsa_4096_sha512_client_cert.pem")[..],
            key: &include_bytes!("../../../../tests/pems/rsa_4096_sha512_client_key.pem")[..],
        }
    }
}

impl CertKeyPair {
    pub fn cert_path(&self) -> &'static str {
        self.cert_path
    }

    pub fn cert(&self) -> &'static [u8] {
        self.cert
    }

    pub fn key(&self) -> &'static [u8] {
        self.key
    }
}

pub struct InsecureAcceptAllCertificatesHandler {}
impl VerifyHostNameCallback for InsecureAcceptAllCertificatesHandler {
    fn verify_host_name(&self, _host_name: &str) -> bool {
        true
    }
}

pub struct RejectAllCertificatesHandler {}
impl VerifyHostNameCallback for RejectAllCertificatesHandler {
    fn verify_host_name(&self, _host_name: &str) -> bool {
        false
    }
}

pub fn build_config(cipher_prefs: &security::Policy) -> Result {
    let builder = config_builder(cipher_prefs)?;
    Ok(builder.build().expect("Unable to build server config"))
}

pub fn config_builder(cipher_prefs: &security::Policy) -> Result {
    let mut builder = Builder::new();
    let keypair = CertKeyPair::default();
    // Build a config
    builder
        .set_security_policy(cipher_prefs)
        .expect("Unable to set config cipher preferences");
    builder
        .load_pem(keypair.cert(), keypair.key())
        .expect("Unable to load cert/pem");
    builder
        .set_verify_host_callback(InsecureAcceptAllCertificatesHandler {})
        .expect("Unable to set a host verify callback.");
    unsafe {
        builder
            .disable_x509_verification()
            .expect("Unable to disable x509 verification");
    };
    Ok(builder)
}

pub fn tls_pair(config: crate::config::Config) -> Pair {
    // create and configure a server connection
    let mut server = crate::connection::Connection::new_server();
    // some tests check for connection failure so disable blinding to avoid delay
    server.as_mut().set_blinding(Blinding::SelfService).unwrap();
    server
        .set_config(config.clone())
        .expect("Failed to bind config to server connection");
    let server = Harness::new(server);

    // create a client connection
    let mut client = crate::connection::Connection::new_client();
    // some tests check for connection failure so disable blinding to avoid delay
    client.as_mut().set_blinding(Blinding::SelfService).unwrap();
    client
        .set_config(config)
        .expect("Unable to set client config");
    let client = Harness::new(client);

    Pair::new(server, client)
}

pub fn establish_connection(config: crate::config::Config) {
    // create and configure a server connection
    let mut server = crate::connection::Connection::new_server();
    server
        .set_config(config.clone())
        .expect("Failed to bind config to server connection");
    let server = Harness::new(server);

    // create a client connection
    let mut client = crate::connection::Connection::new_client();
    client
        .set_config(config)
        .expect("Unable to set client config");
    let client = Harness::new(client);

    let pair = Pair::new(server, client);
    poll_tls_pair(pair);
}

pub fn poll_tls_pair(mut pair: Pair) -> Pair {
    loop {
        match pair.poll() {
            Poll::Ready(result) => {
                result.unwrap();
                break;
            }
            Poll::Pending => continue,
        }
    }

    pair
}

pub fn poll_tls_pair_result(pair: &mut Pair) -> Result<()> {
    loop {
        match pair.poll() {
            Poll::Ready(result) => return result,
            Poll::Pending => continue,
        }
    }
}
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls/src/testing/000077500000000000000000000000001456575232400252625ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls/src/testing/client_hello.rs000066400000000000000000000130541456575232400302740ustar00rootroot00000000000000// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

use crate::{
    callbacks::{ClientHelloCallback, ConnectionFuture},
    error, security,
};
use alloc::sync::Arc;
use core::{sync::atomic::Ordering, task::Poll};
use s2n_tls_sys::{s2n_client_hello_has_extension, s2n_connection_get_client_hello};
use std::{fmt, io, pin::Pin, sync::atomic::AtomicUsize};

// The Future returned by MockClientHelloHandler.
//
// An instance of this Future is stored on the connection and
// polled to make progress in the async client_hello_callback
pub struct MockClientHelloFuture {
    require_pending_count: usize,
    invoked: Arc,
}

impl ConnectionFuture for MockClientHelloFuture {
    fn poll(
        self: Pin<&mut Self>,
        connection: &mut crate::connection::Connection,
        _ctx: &mut core::task::Context,
    ) -> Poll> {
        if self.invoked.fetch_add(1, Ordering::SeqCst) < self.require_pending_count {
            // confirm the callback can access the waker
            connection.waker().unwrap().wake_by_ref();
            return Poll::Pending;
        }

        // Test that the config can be changed
        connection
            .set_config(super::build_config(&security::DEFAULT_TLS13).unwrap())
            .unwrap();

        // Test that server_name_extension_used can be invoked
        connection.server_name_extension_used();

        Poll::Ready(Ok(()))
    }
}

#[derive(Clone)]
pub struct MockClientHelloHandler {
    require_pending_count: usize,
    pub invoked: Arc,
}

impl MockClientHelloHandler {
    pub fn new(require_pending_count: usize) -> Self {
        Self {
            require_pending_count,
            invoked: Arc::new(AtomicUsize::new(0)),
        }
    }
}

impl ClientHelloCallback for MockClientHelloHandler {
    fn on_client_hello(
        &self,
        _connection: &mut crate::connection::Connection,
    ) -> Result>>, crate::error::Error> {
        let fut = MockClientHelloFuture {
            require_pending_count: self.require_pending_count,
            invoked: self.invoked.clone(),
        };

        // returning `Some` indicates that the client_hello callback is
        // not yet finished and that the supplied MockClientHelloFuture
        // needs to be `poll`ed to make progress.
        Ok(Some(Box::pin(fut)))
    }
}

// A ClientHelloCallback which returns a Asynchronous task, which
// eventually returns an error.
#[derive(Default)]
pub struct FailingCHHandler;

impl ClientHelloCallback for FailingCHHandler {
    fn on_client_hello(
        &self,
        _connection: &mut crate::connection::Connection,
    ) -> Result>>, error::Error> {
        let io_error = io::Error::new(io::ErrorKind::Other, CustomError);
        Err(crate::error::Error::application(Box::new(io_error)))
    }
}

#[derive(Default)]
pub struct FailingAsyncCHHandler;
impl ClientHelloCallback for FailingAsyncCHHandler {
    fn on_client_hello(
        &self,
        _connection: &mut crate::connection::Connection,
    ) -> Result>>, error::Error> {
        let fut = FailingCHFuture::default();
        Ok(Some(Box::pin(fut)))
    }
}

// A ClientHelloCallback which returns a synchronous error.
#[derive(Default)]
struct FailingCHFuture {
    pub invoked: Arc,
}

impl ConnectionFuture for FailingCHFuture {
    fn poll(
        self: Pin<&mut Self>,
        connection: &mut crate::connection::Connection,
        _ctx: &mut core::task::Context,
    ) -> Poll> {
        if self.invoked.fetch_add(1, Ordering::SeqCst) < 1 {
            // confirm the callback can access the waker
            connection.waker().unwrap().wake_by_ref();
            return Poll::Pending;
        }

        let io_error = io::Error::new(io::ErrorKind::Other, CustomError);
        let ret = Err(crate::error::Error::application(Box::new(io_error)));
        Poll::Ready(ret)
    }
}

impl Drop for FailingCHFuture {
    // return pending once to simulate the async nature of the future and
    // improve test coverage
    fn drop(&mut self) {
        assert!(self.invoked.load(Ordering::SeqCst) >= 1);
    }
}
/// A client hello handler that asserts that the extension with the given
/// IANA code is either present or not present in the client hello
pub struct HasExtensionClientHelloHandler {
    pub extension_iana: u16,
    pub extension_expected: bool,
}

impl ClientHelloCallback for HasExtensionClientHelloHandler {
    fn on_client_hello(
        &self,
        connection: &mut crate::connection::Connection,
    ) -> Result>>, error::Error> {
        let mut exists = false;

        unsafe {
            let client_hello = s2n_connection_get_client_hello(connection.as_ptr());
            s2n_client_hello_has_extension(client_hello, self.extension_iana, &mut exists as _);
        }

        if self.extension_expected {
            assert!(
                exists,
                "Extension {} was not found in the client hello",
                self.extension_iana
            );
        } else {
            assert!(
                !exists,
                "Unexpected extension {} found in the client hello",
                self.extension_iana
            )
        }

        Ok(None)
    }
}

#[derive(Debug)]
pub struct CustomError;

impl std::error::Error for CustomError {}
impl fmt::Display for CustomError {
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
        write!(f, "custom error")
    }
}
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls/src/testing/resumption.rs000066400000000000000000000171571456575232400300500ustar00rootroot00000000000000// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

#[cfg(test)]
mod tests {
    use crate::{
        callbacks::{SessionTicket, SessionTicketCallback},
        config::ConnectionInitializer,
        connection,
        testing::{s2n_tls::*, *},
    };
    use futures_test::task::noop_waker;
    use std::{error::Error, sync::Mutex, time::SystemTime};

    #[derive(Default, Clone)]
    pub struct SessionTicketHandler {
        stored_ticket: Arc>>>,
    }

    // Implement the session ticket callback that stores the SessionTicket type
    impl SessionTicketCallback for SessionTicketHandler {
        fn on_session_ticket(
            &self,
            _connection: &mut connection::Connection,
            session_ticket: &SessionTicket,
        ) {
            let size = session_ticket.len().unwrap();
            let mut data = vec![0; size];
            session_ticket.data(&mut data).unwrap();
            let mut ptr = (*self.stored_ticket).lock().unwrap();
            if ptr.is_none() {
                *ptr = Some(data);
            }
        }
    }

    impl ConnectionInitializer for SessionTicketHandler {
        fn initialize_connection(
            &self,
            connection: &mut crate::connection::Connection,
        ) -> crate::callbacks::ConnectionFutureResult {
            if let Some(ticket) = (*self.stored_ticket).lock().unwrap().as_deref() {
                connection.set_session_ticket(ticket)?;
            }
            Ok(None)
        }
    }

    // Create test ticket key
    const KEY: [u8; 16] = [0; 16];
    const KEYNAME: [u8; 3] = [1, 3, 4];

    #[test]
    fn resume_session() -> Result<(), Box> {
        let keypair = CertKeyPair::default();

        // Initialize config for server with a ticket key
        let mut server_config_builder = Builder::new();
        server_config_builder
            .add_session_ticket_key(&KEYNAME, &KEY, SystemTime::now())?
            .load_pem(keypair.cert(), keypair.key())?;
        let server_config = server_config_builder.build()?;

        let handler = SessionTicketHandler::default();

        // create config for client
        let mut client_config_builder = Builder::new();

        client_config_builder
            .enable_session_tickets(true)?
            .set_session_ticket_callback(handler.clone())?
            .trust_pem(keypair.cert())?
            .set_verify_host_callback(InsecureAcceptAllCertificatesHandler {})?
            .set_connection_initializer(handler.clone())?;
        let client_config = client_config_builder.build()?;

        // create and configure a server connection
        let mut server = connection::Connection::new_server();
        server
            .set_config(server_config.clone())
            .expect("Failed to bind config to server connection");

        // create a client connection
        let mut client = connection::Connection::new_client();

        // Client needs a waker due to its use of an async callback
        client
            .set_waker(Some(&noop_waker()))?
            .set_config(client_config.clone())
            .expect("Unable to set client config");

        let server = Harness::new(server);
        let client = Harness::new(client);
        let pair = Pair::new(server, client);
        let pair = poll_tls_pair(pair);

        let client = pair.client.0.connection();

        // Check connection was full handshake and a session ticket was included
        assert_eq!(
            client.handshake_type()?,
            "NEGOTIATED|FULL_HANDSHAKE|TLS12_PERFECT_FORWARD_SECRECY|WITH_SESSION_TICKET"
        );

        // create and configure a client/server connection again
        let mut server = connection::Connection::new_server();
        server
            .set_config(server_config)
            .expect("Failed to bind config to server connection");

        // create a client connection with a resumption ticket
        let mut client = connection::Connection::new_client();

        client
            .set_waker(Some(&noop_waker()))?
            .set_config(client_config)
            .expect("Unable to set client config");

        let server = Harness::new(server);
        let client = Harness::new(client);
        let pair = Pair::new(server, client);
        let pair = poll_tls_pair(pair);

        let client = pair.client.0.connection();

        // Check new connection was resumed
        assert_eq!(client.handshake_type()?, "NEGOTIATED");
        Ok(())
    }

    #[test]
    fn resume_tls13_session() -> Result<(), Box> {
        let keypair = CertKeyPair::default();

        // Initialize config for server with a ticket key
        let mut server_config_builder = Builder::new();
        server_config_builder
            .add_session_ticket_key(&KEYNAME, &KEY, SystemTime::now())?
            .load_pem(keypair.cert(), keypair.key())?
            .set_security_policy(&security::DEFAULT_TLS13)?;
        let server_config = server_config_builder.build()?;

        let handler = SessionTicketHandler::default();

        // create config for client
        let mut client_config_builder = Builder::new();
        client_config_builder
            .enable_session_tickets(true)?
            .set_session_ticket_callback(handler.clone())?
            .set_connection_initializer(handler.clone())?
            .trust_pem(keypair.cert())?
            .set_verify_host_callback(InsecureAcceptAllCertificatesHandler {})?
            .set_security_policy(&security::DEFAULT_TLS13)?;
        let client_config = client_config_builder.build()?;

        // create and configure a server connection
        let mut server = connection::Connection::new_server();
        server
            .set_config(server_config.clone())
            .expect("Failed to bind config to server connection");

        // create a client connection
        let mut client = connection::Connection::new_client();
        client
            .set_waker(Some(&noop_waker()))?
            .set_config(client_config.clone())
            .expect("Unable to set client config");

        let server = Harness::new(server);
        let client = Harness::new(client);
        let pair = Pair::new(server, client);
        let mut pair = poll_tls_pair(pair);

        // Do a recv call on the client side to read a session ticket. Poll function
        // returns pending since no application data was read, however it is enough
        // to collect the session ticket.
        let mut recv_buffer: [u8; 10] = [0; 10];
        assert!(pair.poll_recv(Mode::Client, &mut recv_buffer).is_pending());

        let client = pair.client.0.connection();
        // Check connection was full handshake
        assert_eq!(
            client.handshake_type()?,
            "NEGOTIATED|FULL_HANDSHAKE|MIDDLEBOX_COMPAT"
        );

        // create and configure a client/server connection again
        let mut server = connection::Connection::new_server();
        server
            .set_config(server_config)
            .expect("Failed to bind config to server connection");

        // create a client connection with a resumption ticket
        let mut client = connection::Connection::new_client();
        client
            .set_waker(Some(&noop_waker()))?
            .set_config(client_config)
            .expect("Unable to set client config");

        let server = Harness::new(server);
        let client = Harness::new(client);
        let pair = Pair::new(server, client);
        let pair = poll_tls_pair(pair);

        let client = pair.client.0.connection();

        // Check new connection was resumed
        assert_eq!(client.handshake_type()?, "NEGOTIATED|MIDDLEBOX_COMPAT");
        Ok(())
    }
}
aws-crt-python-0.20.4+dfsg/crt/s2n/bindings/rust/s2n-tls/src/testing/s2n_tls.rs000066400000000000000000000652721456575232400272300ustar00rootroot00000000000000// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

use crate::{
    connection::Connection,
    testing::{Context, Error, Result},
};
use bytes::BytesMut;
use core::task::Poll;
use libc::c_void;
use s2n_tls_sys::s2n_status_code::Type as s2n_status_code;

const SEND_BUFFER_CAPACITY: usize = 4096;

#[derive(Debug)]
pub struct Harness {
    connection: Connection,
    send_buffer: BytesMut,
    handshake_done: bool,
    // TODO add a size
}

impl Harness {
    pub fn new(connection: Connection) -> Self {
        Self {
            connection,
            send_buffer: BytesMut::new(),
            handshake_done: false,
        }
    }

    pub fn connection(&self) -> &Connection {
        &self.connection
    }

    pub fn connection_mut(&mut self) -> &mut Connection {
        &mut self.connection
    }
}

impl super::Connection for Harness {
    fn poll_negotiate(&mut self, context: &mut Ctx) -> Poll> {
        let mut callback: Callback = Callback {
            context,
            err: None,
            send_buffer: &mut self.send_buffer,
        };

        unsafe {
            // Safety: the callback struct must live as long as the callbacks are
            // set on on the connection
            callback.set(&mut self.connection);
        }

        let result = self.connection.poll_negotiate().map_ok(|_| ());

        callback.unset(&mut self.connection)?;

        match result {
            Poll::Ready(Ok(_)) => {
                if !self.handshake_done {
                    self.handshake_done = true;
                }
                Ok(()).into()
            }
            Poll::Ready(Err(err)) => Err(err.into()).into(),
            Poll::Pending => Poll::Pending,
        }
    }

    fn poll_action(&mut self, context: &mut Ctx, action: F) -> Poll>
    where
        F: FnOnce(&mut Connection) -> Poll>,
    {
        let mut callback: Callback = Callback {
            context,
            err: None,
            send_buffer: &mut self.send_buffer,
        };

        unsafe {
            // Safety: the callback struct must live as long as the callbacks are
            // set on on the connection
            callback.set(&mut self.connection);
        }

        let result = action(&mut self.connection);

        callback.unset(&mut self.connection)?;

        match result {
            Poll::Ready(Ok(_)) => Ok(()).into(),
            Poll::Ready(Err(err)) => Err(err.into()).into(),
            Poll::Pending => Poll::Pending,
        }
    }
}

struct Callback<'a, T> {
    pub context: &'a mut T,
    pub err: Option,
    pub send_buffer: &'a mut BytesMut,
}

impl<'a, T: 'a + Context> Callback<'a, T> {
    unsafe fn set(&mut self, connection: &mut Connection) {
        let context = self as *mut Self as *mut c_void;

        // We use unwrap here since s2n-tls will just check if connection is not null
        connection.set_send_callback(Some(Self::send_cb)).unwrap();
        connection.set_send_context(context).unwrap();
        connection
            .set_receive_callback(Some(Self::recv_cb))
            .unwrap();
        connection.set_receive_context(context).unwrap();
    }

    /// Removes all of the callback and context pointers from the connection
    pub fn unset(mut self, connection: &mut Connection) -> Result<()> {
        unsafe {
            unsafe extern "C" fn send_cb(
                _context: *mut c_void,
                _data: *const u8,
                _len: u32,
            ) -> s2n_status_code {
                -1
            }

            unsafe extern "C" fn recv_cb(
                _context: *mut c_void,
                _data: *mut u8,
                _len: u32,
            ) -> s2n_status_code {
                -1
            }

            // We use unwrap here since s2n-tls will just check if connection is not null
            connection.set_send_callback(Some(send_cb)).unwrap();
            connection.set_send_context(core::ptr::null_mut()).unwrap();
            connection.set_receive_callback(Some(recv_cb)).unwrap();
            connection
                .set_receive_context(core::ptr::null_mut())
                .unwrap();

            // Flush the send buffer before returning to the connection
            self.flush();

            if let Some(err) = self.err {
                return Err(err);
            }

            Ok(())
        }
    }

    unsafe extern "C" fn send_cb(
        context: *mut c_void,
        data: *const u8,
        len: u32,
    ) -> s2n_status_code {
        let context = &mut *(context as *mut Self);
        let data = core::slice::from_raw_parts(data, len as _);
        context.on_write(data) as _
    }

    /// Called when sending data
    fn on_write(&mut self, data: &[u8]) -> usize {
        // If this write would cause the current send buffer to reallocate,
        // we should flush and create a new send buffer.
        let remaining_capacity = self.send_buffer.capacity() - self.send_buffer.len();

        if remaining_capacity < data.len() {
            // Flush the send buffer before reallocating it
            self.flush();

            // ensure we only do one allocation for this write
            let len = SEND_BUFFER_CAPACITY.max(data.len());

            debug_assert!(
                self.send_buffer.is_empty(),
                "dropping a send buffer with data will result in data loss"
            );
            *self.send_buffer = BytesMut::with_capacity(len);
        }

        // Write the current data to the send buffer
        //
        // NOTE: we don't immediately flush to the context since s2n-tls may do
        //       several small writes in a row.
        self.send_buffer.extend_from_slice(data);

        data.len()
    }

    /// Flushes the send buffer into the context
    fn flush(&mut self) {
        if !self.send_buffer.is_empty() {
            let chunk = self.send_buffer.split().freeze();
            self.context.send(chunk);
        }
    }

    /// The function s2n-tls calls when it wants to receive data
    unsafe extern "C" fn recv_cb(context: *mut c_void, data: *mut u8, len: u32) -> s2n_status_code {
        let context = &mut *(context as *mut Self);
        let data = core::slice::from_raw_parts_mut(data, len as _);
        match context.on_read(data) {
            0 => {
                // https://aws.github.io/s2n-tls/doxygen/s2n_8h.html#a699fd9e05a8e8163811db6cab01af973
                // s2n-tls wants us to set the global errno to signal blocked
                errno::set_errno(errno::Errno(libc::EWOULDBLOCK));
                -1
            }
            len => len as _,
        }
    }

    /// Called when receiving data
    fn on_read(&mut self, data: &mut [u8]) -> usize {
        let max_len = Some(data.len());

        // TODO: loop until data buffer is full.
        if let Some(chunk) = self.context.receive(max_len) {
            let len = chunk.len();
            data[..len].copy_from_slice(&chunk);
            len
        } else {
            0
        }
    }
}

#[cfg(test)]
mod tests {
    use crate::{
        callbacks::{ClientHelloCallback, ConnectionFuture},
        enums::ClientAuthType,
        testing::{client_hello::*, s2n_tls::*, *},
    };
    use alloc::sync::Arc;
    use core::sync::atomic::Ordering;
    use futures_test::task::{new_count_waker, noop_waker};
    use std::{fs, path::Path, pin::Pin, sync::atomic::AtomicUsize};

    #[test]
    fn handshake_default() {
        let config = build_config(&security::DEFAULT).unwrap();
        establish_connection(config);
    }

    #[test]
    fn handshake_default_tls13() {
        let config = build_config(&security::DEFAULT_TLS13).unwrap();
        establish_connection(config)
    }

    #[test]
    fn default_config_and_clone_interaction() -> Result<(), Error> {
        let config = build_config(&security::DEFAULT_TLS13)?;
        assert_eq!(config.test_get_refcount()?, 1);
        {
            // Create new connection.
            let mut server = crate::connection::Connection::new_server();
            // Can't retrieve default config.
            assert!(server.config().is_none());
            // Custom config reference count doesn't change.
            assert_eq!(config.test_get_refcount()?, 1);

            // Set custom config on connection.
            server.set_config(config.clone())?;
            // Can retrieve custom config.
            assert!(server.config().is_some());
            // Custom config now referenced once more.
            assert_eq!(config.test_get_refcount()?, 2);

            // Create new connection.
            let mut client = crate::connection::Connection::new_client();
            // Can't retrieve default config.
            assert!(client.config().is_none());
            // Custom config reference count doesn't change.
            assert_eq!(config.test_get_refcount()?, 2);

            // Set custom config on connection.
            client.set_config(config.clone())?;
            // Can retrieve custom config.
            assert!(client.config().is_some());
            // Custom config now referenced once more.
            assert_eq!(config.test_get_refcount()?, 3);

            // drop all the clones
        }
        assert_eq!(config.test_get_refcount()?, 1);
        Ok(())
    }

    #[test]
    fn set_config_multiple_times() -> Result<(), Error> {
        let config = build_config(&security::DEFAULT_TLS13)?;
        assert_eq!(config.test_get_refcount()?, 1);

        let mut server = crate::connection::Connection::new_server();
        assert_eq!(config.test_get_refcount()?, 1);

        // call set_config once
        server.set_config(config.clone())?;
        assert_eq!(config.test_get_refcount()?, 2);
        assert!(server.config().is_some());

        // calling set_config multiple times works since we drop the previous config
        server.set_config(config.clone())?;
        assert_eq!(config.test_get_refcount()?, 2);
        assert!(server.config().is_some());
        Ok(())
    }

    #[test]
    fn connnection_waker() {
        let config = build_config(&security::DEFAULT_TLS13).unwrap();
        assert_eq!(config.test_get_refcount().unwrap(), 1);

        let mut server = crate::connection::Connection::new_server();
        server.set_config(config).unwrap();

        assert!(server.waker().is_none());

        let (waker, wake_count) = new_count_waker();
        server.set_waker(Some(&waker)).unwrap();
        assert!(server.waker().is_some());

        server.set_waker(None).unwrap();
        assert!(server.waker().is_none());

        assert_eq!(wake_count, 0);
    }

    #[test]
    fn failing_client_hello_callback_sync() -> Result<(), Error> {
        let (waker, wake_count) = new_count_waker();
        let config = {
            let mut config = config_builder(&security::DEFAULT_TLS13)?;
            config.set_client_hello_callback(FailingCHHandler)?;
            config.build()?
        };

        let server = {
            // create and configure a server connection
            let mut server = crate::connection::Connection::new_server();
            server.set_config(config.clone())?;
            server.set_waker(Some(&waker))?;
            Harness::new(server)
        };

        let client = {
            // create a client connection
            let mut client = crate::connection::Connection::new_client();
            client.set_config(config)?;
            Harness::new(client)
        };

        let mut pair = Pair::new(server, client);
        loop {
            match pair.poll() {
                Poll::Ready(result) => {
                    let err = result.expect_err("handshake should fail");

                    // the underlying error should be the custom error the application provided
                    let s2n_err = err.downcast_ref::().unwrap();
                    let app_err = s2n_err.application_error().unwrap();
                    let io_err = app_err.downcast_ref::().unwrap();
                    let _custom_err = io_err
                        .get_ref()
                        .unwrap()
                        .downcast_ref::()
                        .unwrap();
                    break;
                }
                Poll::Pending => continue,
            }
        }
        assert_eq!(wake_count, 0);

        Ok(())
    }

    #[test]
    fn failing_client_hello_callback_async() -> Result<(), Error> {
        let (waker, wake_count) = new_count_waker();
        let config = {
            let mut config = config_builder(&security::DEFAULT_TLS13)?;
            config.set_client_hello_callback(FailingAsyncCHHandler)?;
            config.build()?
        };

        let server = {
            // create and configure a server connection
            let mut server = crate::connection::Connection::new_server();
            server.set_config(config.clone())?;
            server.set_waker(Some(&waker))?;
            Harness::new(server)
        };

        let client = {
            // create a client connection
            let mut client = crate::connection::Connection::new_client();
            client.set_config(config)?;
            Harness::new(client)
        };

        let mut pair = Pair::new(server, client);
        loop {
            match pair.poll() {
                Poll::Ready(result) => {
                    let err = result.expect_err("handshake should fail");

                    // the underlying error should be the custom error the application provided
                    let s2n_err = err.downcast_ref::().unwrap();
                    let app_err = s2n_err.application_error().unwrap();
                    let io_err = app_err.downcast_ref::().unwrap();
                    let _custom_err = io_err
                        .get_ref()
                        .unwrap()
                        .downcast_ref::()
                        .unwrap();
                    break;
                }
                Poll::Pending => continue,
            }
        }
        // assert that the future is async returned Poll::Pending once
        assert_eq!(wake_count, 1);

        Ok(())
    }

    #[test]
    fn client_hello_callback_async() -> Result<(), Error> {
        let (waker, wake_count) = new_count_waker();
        let require_pending_count = 10;
        let handle = MockClientHelloHandler::new(require_pending_count);
        let config = {
            let mut config = config_builder(&security::DEFAULT_TLS13)?;
            config.set_client_hello_callback(handle.clone())?;
            // multiple calls to set_client_hello_callback should succeed
            config.set_client_hello_callback(handle.clone())?;
            config.build()?
        };

        let server = {
            // create and configure a server connection
            let mut server = crate::connection::Connection::new_server();
            server.set_config(config.clone())?;
            server.set_waker(Some(&waker))?;
            Harness::new(server)
        };

        let client = {
            // create a client connection
            let mut client = crate::connection::Connection::new_client();
            client.set_config(config)?;
            Harness::new(client)
        };

        let pair = Pair::new(server, client);

        poll_tls_pair(pair);
        // confirm that the callback returned Pending `require_pending_count` times
        assert_eq!(wake_count, require_pending_count);
        // confirm that the final invoked count is +1 more than `require_pending_count`
        assert_eq!(
            handle.invoked.load(Ordering::SeqCst),
            require_pending_count + 1
        );

        Ok(())
    }
    #[test]
    fn client_hello_callback_sync() -> Result<(), Error> {
        let (waker, wake_count) = new_count_waker();
        #[derive(Clone)]
        struct ClientHelloSyncCallback(Arc);
        impl ClientHelloSyncCallback {
            fn new() -> Self {
                ClientHelloSyncCallback(Arc::new(AtomicUsize::new(0)))
            }
            fn count(&self) -> usize {
                self.0.load(Ordering::Relaxed)
            }
        }
        impl ClientHelloCallback for ClientHelloSyncCallback {
            fn on_client_hello(
                &self,
                connection: &mut crate::connection::Connection,
            ) -> Result>>, crate::error::Error> {
                // Test that the config can be changed
                connection
                    .set_config(build_config(&security::DEFAULT_TLS13).unwrap())
                    .unwrap();

                // Test that server_name_extension_used can be invoked
                connection.server_name_extension_used();

                self.0.fetch_add(1, Ordering::Relaxed);

                // returning `None` indicates that the client_hello callback is
                // finished and the handshake can proceed.
                Ok(None)
            }
        }
        let callback = ClientHelloSyncCallback::new();

        let config = {
            let mut config = config_builder(&security::DEFAULT_TLS13)?;
            config.set_client_hello_callback(callback.clone())?;
            config.build()?
        };

        let server = {
            // create and configure a server connection
            let mut server = crate::connection::Connection::new_server();
            server.set_config(config.clone())?;
            server.set_waker(Some(&waker))?;
            Harness::new(server)
        };

        let client = {
            // create a client connection
            let mut client = crate::connection::Connection::new_client();
            client.set_config(config)?;
            Harness::new(client)
        };

        let pair = Pair::new(server, client);

        assert_eq!(callback.count(), 0);
        poll_tls_pair(pair);
        assert_eq!(callback.count(), 1);
        assert_eq!(wake_count, 0);
        Ok(())
    }

    #[test]
    fn new_security_policy() -> Result<(), Error> {
        use crate::security::Policy;

        let policy = Policy::from_version("default")?;
        config_builder(&policy)?;
        Ok(())
    }

    #[test]
    fn trust_location() -> Result<(), Error> {
        let pem_dir = Path::new(concat!(env!("CARGO_MANIFEST_DIR"), "/../../../tests/pems"));
        let mut cert = pem_dir.to_path_buf();
        cert.push("rsa_4096_sha512_client_cert.pem");
        let mut key = pem_dir.to_path_buf();
        key.push("rsa_4096_sha512_client_key.pem");

        let mut builder = crate::config::Builder::new();
        builder.set_security_policy(&security::DEFAULT_TLS13)?;
        builder.set_verify_host_callback(InsecureAcceptAllCertificatesHandler {})?;
        builder.load_pem(&fs::read(&cert)?, &fs::read(&key)?)?;
        builder.trust_location(Some(&cert), None)?;

        establish_connection(builder.build()?);
        Ok(())
    }

    /// `trust_location()` calls `s2n_config_set_verification_ca_location()`, which has the legacy behavior
    /// of enabling OCSP on clients. Since we do not want to replicate that behavior in the Rust bindings,
    /// this test verifies that `trust_location()` does not turn on OCSP. It also verifies that turning
    /// on OCSP explicitly still works when `trust_location()` is called.
    #[test]
    fn trust_location_does_not_change_ocsp_status() -> Result<(), Error> {
        let pem_dir = Path::new(concat!(env!("CARGO_MANIFEST_DIR"), "/../../../tests/pems"));
        let mut cert = pem_dir.to_path_buf();
        cert.push("rsa_4096_sha512_client_cert.pem");
        let mut key = pem_dir.to_path_buf();
        key.push("rsa_4096_sha512_client_key.pem");

        const OCSP_IANA_EXTENSION_ID: u16 = 5;

        for enable_ocsp in [true, false] {
            let config = {
                let mut config = crate::config::Builder::new();

                if enable_ocsp {
                    config.enable_ocsp()?;
                }

                config.set_security_policy(&security::DEFAULT_TLS13)?;
                config.set_verify_host_callback(InsecureAcceptAllCertificatesHandler {})?;
                config.set_client_hello_callback(HasExtensionClientHelloHandler {
                    // This client hello handler will check for the OCSP extension
                    extension_iana: OCSP_IANA_EXTENSION_ID,
                    extension_expected: enable_ocsp,
                })?;
                config.load_pem(&fs::read(&cert)?, &fs::read(&key)?)?;
                config.trust_location(Some(&cert), None)?;
                config.build()?
            };

            let mut pair = tls_pair(config);
            pair.server
                .0
                .connection_mut()
                .set_waker(Some(&noop_waker()))?;

            poll_tls_pair(pair);
        }
        Ok(())
    }

    #[test]
    fn connection_level_verify_host_callback() -> Result<(), Error> {
        let reject_config = {
            let keypair = CertKeyPair::default();
            let mut config = crate::config::Builder::new();
            // configure the config VerifyHostNameCallback to reject all certificates
            config.set_verify_host_callback(RejectAllCertificatesHandler {})?;
            config.set_security_policy(&security::DEFAULT_TLS13)?;
            config.load_pem(keypair.cert(), keypair.key())?;
            config.trust_pem(keypair.cert())?;
            config.set_client_auth_type(ClientAuthType::Required)?;
            config.build()?
        };

        // confirm that default connection establishment fails
        let mut pair = tls_pair(reject_config.clone());
        assert!(poll_tls_pair_result(&mut pair).is_err());

        // confirm that overriding the verify_host_callback on connection causes
        // the handshake to succeed
        pair = tls_pair(reject_config);
        pair.server
            .0
            .connection
            .set_verify_host_callback(InsecureAcceptAllCertificatesHandler {})
            .unwrap();
        pair.client
            .0
            .connection
            .set_verify_host_callback(InsecureAcceptAllCertificatesHandler {})
            .unwrap();
        assert!(poll_tls_pair_result(&mut pair).is_ok());

        Ok(())
    }

    #[test]
    fn no_client_auth() -> Result<(), Error> {
        use crate::enums::ClientAuthType;

        let config = {
            let mut config = config_builder(&security::DEFAULT_TLS13)?;
            config.set_client_auth_type(ClientAuthType::None)?;
            config.build()?
        };

        let server = {
            let mut server = crate::connection::Connection::new_server();
            server.set_config(config.clone())?;
            Harness::new(server)
        };

        let client = {
            let mut client = crate::connection::Connection::new_client();
            client.set_config(config)?;
            Harness::new(client)
        };

        let pair = Pair::new(server, client);
        let pair = poll_tls_pair(pair);
        let server = pair.server.0.connection;
        let client = pair.client.0.connection;

        for conn in [server, client] {
            assert!(!conn.client_cert_used());
            let cert = conn.client_cert_chain_bytes()?;
            assert!(cert.is_none());
            let sig_alg = conn.selected_client_signature_algorithm()?;
            assert!(sig_alg.is_none());
            let hash_alg = conn.selected_client_hash_algorithm()?;
            assert!(hash_alg.is_none());
        }

        Ok(())
    }

    #[test]
    fn client_auth() -> Result<(), Error> {
        use crate::enums::ClientAuthType;

        let config = {
            let mut config = config_builder(&security::DEFAULT_TLS13)?;
            config.set_client_auth_type(ClientAuthType::Optional)?;
            config.build()?
        };

        let server = {
            let mut server = crate::connection::Connection::new_server();
            server.set_config(config.clone())?;
            Harness::new(server)
        };

        let client = {
            let mut client = crate::connection::Connection::new_client();
            client.set_config(config)?;
            Harness::new(client)
        };

        let pair = Pair::new(server, client);
        let pair = poll_tls_pair(pair);
        let server = pair.server.0.connection;
        let client = pair.client.0.connection;

        let cert = server.client_cert_chain_bytes()?;
        assert!(cert.is_some());
        assert!(!cert.unwrap().is_empty());

        for conn in [server, client] {
            assert!(conn.client_cert_used());
            let sig_alg = conn.selected_client_signature_algorithm()?;
            assert!(sig_alg.is_some());
            let hash_alg = conn.selected_client_hash_algorithm()?;
            assert!(hash_alg.is_some());
        }

        Ok(())
    }

    #[test]
    fn system_certs_loaded_by_default() {
        let keypair = CertKeyPair::default();

        // Load the server certificate into the trust store by overriding the OpenSSL default
        // certificate location.
        temp_env::with_var("SSL_CERT_FILE", Some(keypair.cert_path()), || {
            let mut builder = Builder::new();
            builder
                .load_pem(keypair.cert(), keypair.key())
                .unwrap()
                .set_security_policy(&security::DEFAULT_TLS13)
                .unwrap()
                .set_verify_host_callback(InsecureAcceptAllCertificatesHandler {})
                .unwrap();

            let config = builder.build().unwrap();
            establish_connection(config);
        });
    }

    #[test]
    fn disable_loading_system_certs() {
        let keypair = CertKeyPair::default();

        // Load the server certificate into the trust store by overriding the OpenSSL default
        // certificate location.
        temp_env::with_var("SSL_CERT_FILE", Some(keypair.cert_path()), || {
            let mut builder = Builder::new();
            builder
                .load_pem(keypair.cert(), keypair.key())
                .unwrap()
                .set_security_policy(&security::DEFAULT_TLS13)
                .unwrap()
                .set_verify_host_callback(InsecureAcceptAllCertificatesHandler {})
                .unwrap();

            // Disable loading system certificates
            builder.with_system_certs(false).unwrap();

            let config = builder.build().unwrap();
            let mut config_with_system_certs = config.clone();

            let mut pair = tls_pair(config);

            // System certificates should not be loaded into the trust store. The handshake
            // should fail since the certificate should not be trusted.
            assert!(poll_tls_pair_result(&mut pair).is_err());

            // The handshake should succeed after trusting the certificate.
            unsafe {
                s2n_tls_sys::s2n_config_load_system_certs(config_with_system_certs.as_mut_ptr());
            }
            establish_connection(config_with_system_certs);
        });
    }
}
aws-crt-python-0.20.4+dfsg/crt/s2n/cmake/000077500000000000000000000000001456575232400200025ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/cmake/modules/000077500000000000000000000000001456575232400214525ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/cmake/modules/Findcrypto.cmake000066400000000000000000000075001456575232400245770ustar00rootroot00000000000000# - Try to find LibCrypto include dirs and libraries
#
# Usage of this module as follows:
#
#     find_package(crypto)
#
# Variables used by this module, they can change the default behaviour and need
# to be set before calling find_package:
#
# Variables defined by this module:
#
#  crypto_FOUND             System has libcrypto, include and library dirs found
#  crypto_INCLUDE_DIR       The crypto include directories.
#  crypto_LIBRARY           The crypto library, depending on the value of BUILD_SHARED_LIBS.
#  crypto_SHARED_LIBRARY    The path to libcrypto.so
#  crypto_STATIC_LIBRARY    The path to libcrypto.a

# the next branch exists purely for cmake compatibility with versions older than 3.15. Please do not remove it before
# we baseline on a newer version. It does not like duplicate target declarations. Work around that by checking it isn't
# defined first.
if (TARGET crypto OR TARGET AWS::crypto)
    if (TARGET crypto)
        set(TARGET_NAME "crypto")
    else()
        set(TARGET_NAME "AWS::crypto")
    endif()

    get_target_property(crypto_INCLUDE_DIR ${TARGET_NAME} INTERFACE_INCLUDE_DIRECTORIES)
    message(STATUS "S2N found target: ${TARGET_NAME}")
    message(STATUS "crypto Include Dir: ${crypto_INCLUDE_DIR}")
    set(CRYPTO_FOUND true)
    set(crypto_FOUND true)
else()
    find_path(crypto_INCLUDE_DIR
        NAMES openssl/crypto.h
        HINTS
        "${CMAKE_PREFIX_PATH}"
        "${CMAKE_INSTALL_PREFIX}"
        PATH_SUFFIXES include
    )

    find_library(crypto_SHARED_LIBRARY
        NAMES libcrypto.so libcrypto.dylib
        HINTS
        "${CMAKE_PREFIX_PATH}"
        "${CMAKE_INSTALL_PREFIX}"
        PATH_SUFFIXES build/crypto build lib64 lib
    )

    find_library(crypto_STATIC_LIBRARY
        NAMES libcrypto.a
        HINTS
        "${CMAKE_PREFIX_PATH}"
        "${CMAKE_INSTALL_PREFIX}"
        PATH_SUFFIXES build/crypto build lib64 lib
    )

    if (NOT crypto_LIBRARY)
        if (BUILD_SHARED_LIBS OR S2N_USE_CRYPTO_SHARED_LIBS)
            if (crypto_SHARED_LIBRARY)
                set(crypto_LIBRARY ${crypto_SHARED_LIBRARY})
            else()
                set(crypto_LIBRARY ${crypto_STATIC_LIBRARY})
            endif()
        else()
            if (crypto_STATIC_LIBRARY)
               set(crypto_LIBRARY ${crypto_STATIC_LIBRARY})
            else()
               set(crypto_LIBRARY ${crypto_SHARED_LIBRARY})
            endif()
        endif()
    endif()

    include(FindPackageHandleStandardArgs)
    find_package_handle_standard_args(crypto DEFAULT_MSG
        crypto_LIBRARY
        crypto_INCLUDE_DIR
    )

    mark_as_advanced(
        crypto_ROOT_DIR
        crypto_INCLUDE_DIR
        crypto_LIBRARY
        crypto_SHARED_LIBRARY
        crypto_STATIC_LIBRARY
    )

    # some versions of cmake have a super esoteric bug around capitalization differences between
    # find dependency and find package, just avoid that here by checking and
    # setting both.
    if(CRYPTO_FOUND OR crypto_FOUND)
        set(CRYPTO_FOUND true)
        set(crypto_FOUND true)

        message(STATUS "LibCrypto Include Dir: ${crypto_INCLUDE_DIR}")
        message(STATUS "LibCrypto Shared Lib:  ${crypto_SHARED_LIBRARY}")
        message(STATUS "LibCrypto Static Lib:  ${crypto_STATIC_LIBRARY}")
        if (NOT TARGET crypto AND
            (EXISTS "${crypto_LIBRARY}")
        )
            set(THREADS_PREFER_PTHREAD_FLAG ON)
            find_package(Threads REQUIRED)
            add_library(AWS::crypto UNKNOWN IMPORTED)
            set_target_properties(AWS::crypto PROPERTIES
                    INTERFACE_INCLUDE_DIRECTORIES "${crypto_INCLUDE_DIR}")
            set_target_properties(AWS::crypto PROPERTIES
                    IMPORTED_LINK_INTERFACE_LANGUAGES "C"
                    IMPORTED_LOCATION "${crypto_LIBRARY}")
            add_dependencies(AWS::crypto Threads::Threads)
        endif()
    endif()

endif()
aws-crt-python-0.20.4+dfsg/crt/s2n/cmake/s2n-config.cmake000066400000000000000000000014751456575232400227600ustar00rootroot00000000000000include(CMakeFindDependencyMacro)

if (NOT MSVC)
    set(THREADS_PREFER_PTHREAD_FLAG ON)
    find_package(Threads REQUIRED)
endif()

list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR}/modules")
find_dependency(crypto)

# Allow static or shared lib to be used.
# If both are installed, choose based on BUILD_SHARED_LIBS.
if (BUILD_SHARED_LIBS)
    if (EXISTS "${CMAKE_CURRENT_LIST_DIR}/shared")
        include(${CMAKE_CURRENT_LIST_DIR}/shared/@PROJECT_NAME@-targets.cmake)
    else()
        include(${CMAKE_CURRENT_LIST_DIR}/static/@PROJECT_NAME@-targets.cmake)
    endif()
else()
    if (EXISTS "${CMAKE_CURRENT_LIST_DIR}/static")
        include(${CMAKE_CURRENT_LIST_DIR}/static/@PROJECT_NAME@-targets.cmake)
    else()
        include(${CMAKE_CURRENT_LIST_DIR}/shared/@PROJECT_NAME@-targets.cmake)
    endif()
endif()

aws-crt-python-0.20.4+dfsg/crt/s2n/cmake/toolchains/000077500000000000000000000000001456575232400221455ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/cmake/toolchains/32-bit.toolchain000066400000000000000000000007611456575232400250530ustar00rootroot00000000000000set(CMAKE_SYSTEM_NAME Linux)
set(CMAKE_SYSTEM_VERSION 1)
set(CMAKE_SYSTEM_PROCESSOR "x86")

# we explicitly require clang because GCC 11.3.0 has false positives on the
# stringop-overflow check that cause the build to fail.
set(CMAKE_C_COMPILER clang)
set(CMAKE_CXX_COMPILER clang++)

set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -m32" CACHE STRING "c++ flags")
set(CMAKE_C_FLAGS   "${CMAKE_C_FLAGS} -m32" CACHE STRING "c flags")
set(CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -m32" CACHE STRING "asm flags")
aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/000077500000000000000000000000001456575232400206545ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/README.md000066400000000000000000000030551456575232400221360ustar00rootroot00000000000000# Docker Image Structure
The codebuild specifications are run on a custom docker images that have the test dependencies installed. The docker image structure is described below.

### libcrypto
Various libcryptos are installed to `/usr/local/$LIBCRYPTO` directories. For example
```
# non-exhaustive list
/usr/local/openssl-1.0.2/lib/libcrypto.a
/usr/local/openssl-1.0.2/lib/libcrypto.so
/usr/local/openssl-1.0.2/lib/libcrypto.so.1.0.0
/usr/local/openssl-1.0.2/lib/pkgconfig/libcrypto.pc
/usr/local/openssl-3.0/lib64/libcrypto.a
/usr/local/openssl-3.0/lib64/libcrypto.so.3
/usr/local/openssl-3.0/lib64/libcrypto.so
/usr/local/openssl-3.0/lib64/pkgconfig/libcrypto.pc
/usr/local/boringssl/lib/libcrypto.so
/usr/local/awslc/lib/libcrypto.a
/usr/local/awslc/lib/libcrypto.so
```

Packages installed from the `apt` package manager can generally be found in `/usr/lib`. For example, our 32 bit build uses the 32 bit `i386` libcrypto, and it's artifacts are located at
```
/usr/lib/i386-linux-gnu/libcrypto.a
/usr/lib/i386-linux-gnu/libcrypto.so.3
/usr/lib/i386-linux-gnu/libcrypto.so
/usr/lib/i386-linux-gnu/pkgconfig/libcrypto.pc
```

When the docker image is available locally, the structure can be easily examined by attaching an interactive terminal to the container with the following command
```
docker run --entrypoint /bin/bash -it --privileged 
```

Then the `find` command can be used to look at the various artifacts that are available.
```
sudo find / -name libcrypto* # list all libcrypto artifacts
```
or
```
sudo find / -name clang* # find all clang binaries
```aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/000077500000000000000000000000001456575232400214245ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/KWStyle.xml000066400000000000000000000010671456575232400235140ustar00rootroot00000000000000





    256
    
    
        [A-Za-z=_+]
        512
    

aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/apache2/000077500000000000000000000000001456575232400227275ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/apache2/apache2.conf000066400000000000000000000157351456575232400251140ustar00rootroot00000000000000# This is the main Apache server configuration file.  It contains the
# configuration directives that give the server its instructions.
# See http://httpd.apache.org/docs/2.4/ for detailed information about
# the directives and /usr/share/doc/apache2/README.Debian about Debian specific
# hints.
#
#
# Summary of how the Apache 2 configuration works in Debian:
# The Apache 2 web server configuration in Debian is quite different to
# upstream's suggested way to configure the web server. This is because Debian's
# default Apache2 installation attempts to make adding and removing modules,
# virtual hosts, and extra configuration directives as flexible as possible, in
# order to make automating the changes and administering the server as easy as
# possible.

# It is split into several files forming the configuration hierarchy outlined
# below, all located in the /etc/apache2/ directory:
#
#	/etc/apache2/
#	|-- apache2.conf
#	|	`--  ports.conf
#	|-- mods-enabled
#	|	|-- *.load
#	|	`-- *.conf
#	|-- conf-enabled
#	|	`-- *.conf
# 	`-- sites-enabled
#	 	`-- *.conf
#
#
# * apache2.conf is the main configuration file (this file). It puts the pieces
#   together by including all remaining configuration files when starting up the
#   web server.
#
# * ports.conf is always included from the main configuration file. It is
#   supposed to determine listening ports for incoming connections which can be
#   customized anytime.
#
# * Configuration files in the mods-enabled/, conf-enabled/ and sites-enabled/
#   directories contain particular configuration snippets which manage modules,
#   global configuration fragments, or virtual host configurations,
#   respectively.
#
#   They are activated by symlinking available configuration files from their
#   respective *-available/ counterparts. These should be managed by using our
#   helpers a2enmod/a2dismod, a2ensite/a2dissite and a2enconf/a2disconf. See
#   their respective man pages for detailed information.
#
# * The binary is called apache2. Due to the use of environment variables, in
#   the default configuration, apache2 needs to be started/stopped with
#   /etc/init.d/apache2 or apache2ctl. Calling /usr/bin/apache2 directly will not
#   work with the default configuration.


# Global configuration
#

#
# ServerRoot: The top of the directory tree under which the server's
# configuration, error, and log files are kept.
#
# NOTE!  If you intend to place this on an NFS (or otherwise network)
# mounted filesystem then please read the Mutex documentation (available
# at );
# you will save yourself a lot of trouble.
#
# Do NOT add a slash at the end of the directory path.
#
ServerRoot ${APACHE_SERVER_ROOT}

#
# The accept serialization lock file MUST BE STORED ON A LOCAL DISK.
#
#Mutex file:${APACHE_LOCK_DIR} default

#
# The directory where shm and other runtime files will be stored.
#
DefaultRuntimeDir ${APACHE_RUN_DIR}

#
# PidFile: The file in which the server should record its process
# identification number when it starts.
# This needs to be set in /etc/apache2/envvars
#
PidFile ${APACHE_PID_FILE}

#
# Timeout: The number of seconds before receives and sends time out.
#
Timeout 60

#
# KeepAlive: Whether or not to allow persistent connections (more than
# one request per connection). Set to "Off" to deactivate.
#
KeepAlive On

#
# MaxKeepAliveRequests: The maximum number of requests to allow
# during a persistent connection. Set to 0 to allow an unlimited amount.
# We recommend you leave this number high, for maximum performance.
#
MaxKeepAliveRequests 100

#
# KeepAliveTimeout: Number of seconds to wait for the next request from the
# same client on the same connection.
#
KeepAliveTimeout 5


# These need to be set in /etc/apache2/envvars
User ${APACHE_RUN_USER}
Group ${APACHE_RUN_GROUP}

#
# HostnameLookups: Log the names of clients or just their IP addresses
# e.g., www.apache.org (on) or 204.62.129.132 (off).
# The default is off because it'd be overall better for the net if people
# had to knowingly turn this feature on, since enabling it means that
# each client request will result in AT LEAST one lookup request to the
# nameserver.
#
HostnameLookups Off

# ErrorLog: The location of the error log file.
# If you do not specify an ErrorLog directive within a 
# container, error messages relating to that virtual host will be
# logged here.  If you *do* define an error logfile for a 
# container, that host's errors will be logged there and not here.
#
ErrorLog ${APACHE_LOG_DIR}/error.log

#
# LogLevel: Control the severity of messages logged to the error_log.
# Available values: trace8, ..., trace1, debug, info, notice, warn,
# error, crit, alert, emerg.
# It is also possible to configure the log level for particular modules, e.g.
# "LogLevel info ssl:warn"
#
LogLevel warn

# Include module configuration:
IncludeOptional mods-enabled/*.load
IncludeOptional mods-enabled/*.conf

# Include list of ports to listen on
Include ports.conf


# Sets the default security model of the Apache2 HTTPD server. It does
# not allow access to the root filesystem outside of /usr/share and /var/www.
# The former is used by web applications packaged in Debian,
# the latter may be used for local directories served by the web server. If
# your system is serving content from a sub-directory in /srv you must allow
# access here, or in any related virtual host.

	Options FollowSymLinks
	AllowOverride None
	Require all denied



	AllowOverride None
	Require all granted



	Options Indexes FollowSymLinks
	AllowOverride None
	Require all granted



# AccessFileName: The name of the file to look for in each directory
# for additional configuration directives.  See also the AllowOverride
# directive.
#
AccessFileName .htaccess

#
# The following lines prevent .htaccess and .htpasswd files from being
# viewed by Web clients.
#

	Require all denied



#
# The following directives define some format nicknames for use with
# a CustomLog directive.
#
# These deviate from the Common Log Format definitions in that they use %O
# (the actual bytes sent including headers) instead of %b (the size of the
# requested file), because the latter makes it impossible to detect partial
# requests.
#
# Note that the use of %{X-Forwarded-For}i instead of %h is not recommended.
# Use mod_remoteip instead.
#
LogFormat "%v:%p %h %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\"" vhost_combined
LogFormat "%h %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\"" combined
LogFormat "%h %l %u %t \"%r\" %>s %O" common
LogFormat "%{Referer}i -> %U" referer
LogFormat "%{User-agent}i" agent

# Include of directories ignores editors' and dpkg's backup files,
# see README.Debian for details.

# Include generic snippets of statements
IncludeOptional conf-enabled/*.conf

# Include the virtual host configurations:
IncludeOptional sites-enabled/*.conf

# vim: syntax=apache ts=4 sw=4 sts=4 sr noet
aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/apache2/ports.conf000066400000000000000000000003351456575232400247460ustar00rootroot00000000000000# Server ports should not conflict with the range of ports used for
# integration tests (8000 to 30000)

Define RENEGOTIATE_SERVER_PORT 7777
Listen ${RENEGOTIATE_SERVER_PORT}

# vim: syntax=apache ts=4 sw=4 sts=4 sr noet
aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/apache2/sites-enabled/000077500000000000000000000000001456575232400254465ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/apache2/sites-enabled/renegotiate.conf000066400000000000000000000037171456575232400306330ustar00rootroot00000000000000
	# The ServerName directive sets the request scheme, hostname and port that
	# the server uses to identify itself. This is used when creating
	# redirection URLs. In the context of virtual hosts, the ServerName
	# specifies what hostname must appear in the request's Host: header to
	# match this virtual host. For the default virtual host (this file) this
	# value is not decisive as it is used as a last resort host regardless.
	# However, you must set it for any further virtual host explicitly.
	ServerName localhost

	ServerAdmin webmaster@localhost
	DocumentRoot ${APACHE_SERVER_ROOT}/www/html

	ErrorLog ${APACHE_LOG_DIR}/error.log
	CustomLog ${APACHE_LOG_DIR}/access.log combined

	#   SSL Engine Switch:
	#   Enable/Disable SSL for this virtual host.
	SSLEngine on

	SSLCertificateFile ${APACHE_CERT_DIR}/apache_server_cert.pem
	SSLCertificateKeyFile ${APACHE_CERT_DIR}/apache_server_key.pem

	#   Certificate Authority (CA):
	#   Set the CA certificate verification path where to find CA
	#   certificates for client authentication or alternatively one
	#   huge file containing all of them (file must be PEM encoded)
	#   Note: Inside SSLCACertificatePath you need hash symlinks
	#		 to point to the certificate files. Use the provided
	#		 Makefile to update the hash symlinks after changes.
	SSLCACertificateFile ${APACHE_CERT_DIR}/apache_client_cert.pem

	SSLProtocol -ALL +TLSv1.2
	SSLHonorCipherOrder On
	SSLCipherSuite HIGH:!aNULL:!MD5
	SSLCompression Off
	SSLInsecureRenegotiation Off

	Alias /change_cipher_suite ${APACHE_SERVER_ROOT}/www/change_cipher_suite
	
		Require all granted
		SSLCipherSuite AES128-SHA
	

	Alias /mutual_auth ${APACHE_SERVER_ROOT}/www/mutual_auth
	
		Require all granted
		SSLVerifyClient require
		SSLVerifyDepth  10
	



# vim: syntax=apache ts=4 sw=4 sts=4 sr noet
aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/apache2/www/000077500000000000000000000000001456575232400235535ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/apache2/www/change_cipher_suite/000077500000000000000000000000001456575232400275435ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/apache2/www/change_cipher_suite/index.html000066400000000000000000000001501456575232400315340ustar00rootroot00000000000000

    Change Cipher Suite


    

Success.

aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/apache2/www/html/000077500000000000000000000000001456575232400245175ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/apache2/www/html/index.html000077500000000000000000000007461456575232400265260ustar00rootroot00000000000000 Renegotiation Testing Server

Welcome to the s2n renegotiation testing server! See the following endpoints:

aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/apache2/www/mutual_auth/000077500000000000000000000000001456575232400261035ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/apache2/www/mutual_auth/index.html000066400000000000000000000001401456575232400300730ustar00rootroot00000000000000 Mutual Auth

Success.

aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/build_aws_crt_cpp.sh000077500000000000000000000025361456575232400254540ustar00rootroot00000000000000#!/bin/bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. # set -ex pushd "$(pwd)" usage() { echo "build_aws_crt_cpp.sh build_dir install_dir" exit 1 } if [ "$#" -ne "2" ]; then usage fi source codebuild/bin/s2n_setup_env.sh BUILD_DIR=$1 INSTALL_DIR=$2 mkdir -p "$BUILD_DIR/s2n" # In case $BUILD_DIR is a subdirectory of current directory for file in *;do test "$file" != "$BUILD_DIR" && cp -r "$file" "$BUILD_DIR/s2n";done cd "$BUILD_DIR" git clone --depth 1 --shallow-submodules --recurse-submodules https://github.com/awslabs/aws-crt-cpp.git # Replace S2N rm -r aws-crt-cpp/crt/s2n mv s2n aws-crt-cpp/crt/ cmake ./aws-crt-cpp -Bbuild -GNinja -DBUILD_DEPS=ON -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX="${INSTALL_DIR}" ninja -C ./build install CTEST_OUTPUT_ON_FAILURE=1 CTEST_PARALLEL_LEVEL=$(nproc) ninja -C ./build test popd exit 0 aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/clang_format_changed_files.sh000077500000000000000000000016431456575232400272560ustar00rootroot00000000000000#!/bin/bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. # Get a list of changed files REMOTE="${1:-origin}" BRANCH="${2:-main}" changed_files=$(git diff "$REMOTE"/"$BRANCH" --name-only ) # Run clang-format on each changed file for file in $changed_files do if [[ $file == *.c || $file == *.h ]]; then # Only run on .c and .h files echo "clang formatting ${file}" clang-format -i $file fi done aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/copyright_mistake_scanner.sh000077500000000000000000000023171456575232400272240ustar00rootroot00000000000000#!/bin/bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. # set -e S2N_FILES=$(find "$PWD" -type f -name "s2n_*.[ch]") S2N_FILES+=" " S2N_FILES+=$(find "$PWD"/codebuild/ -type f -name "*.sh") S2N_FILES+=" " S2N_FILES+=$(find "$PWD"/tests/ -type f -name "*.sh") FAILED=0 for file in $S2N_FILES; do # The word "Copyright" should appear at least once in the first 3 lines of every file COUNT=`head -3 $file | grep "Copyright" | wc -l`; if [ "$COUNT" == "0" ]; then FAILED=1; echo "Copyright Check Failed: $file"; fi done if [ $FAILED == 1 ]; then printf "\\033[31;1mFAILED Copyright Check\\033[0m\\n" exit -1 else printf "\\033[32;1mPASSED Copyright Check\\033[0m\\n" fi aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/coverage_report.sh000077500000000000000000000021561456575232400251550ustar00rootroot00000000000000#!/bin/bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. set -e # merge profiling data llvm-profdata merge -failure-mode=all -sparse tests/unit/ut_*.profraw -o merged.profdata # generate file-level summary llvm-cov report build/lib/libs2n.so \ -instr-profile=merged.profdata \ > coverage_summary.txt # convert llvm information to lcov format for genhtml llvm-cov export build/lib/libs2n.so \ -instr-profile=merged.profdata \ -format=lcov \ > unit_test_coverage.info # generate html report with annotated source files genhtml unit_test_coverage.info \ --branch-coverage \ -o coverage_report aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/cpp_style_comment_linter.sh000077500000000000000000000021141456575232400270620ustar00rootroot00000000000000#!/bin/bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. # S2N_FILES=$(find "$PWD" -type f -name "s2n*.[ch]") FAILED=0 for file in $S2N_FILES; do # There should be no c++ style comments: // RESULT=`grep -rnv '\*' $file | grep '\B\/\/.*$' | grep -v '\".*\"'`; if [ "${#RESULT}" != "0" ]; then FAILED=1; printf "\e[1;34mC++ Comments Check Failed in $file:\e[0m\n$RESULT\n\n"; fi done if [ $FAILED == 1 ]; then printf "\\033[31;1mFAILED C++ Comments Check\\033[0m\\n" exit -1 else printf "\\033[32;1mPASSED C++ Comments Check\\033[0m\\n" fiaws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/cppcheck_suppressions.txt000066400000000000000000000025251456575232400266060ustar00rootroot00000000000000// Message: (style:variableScope) The scope of the variable 'text' can be reduced. // Reason: Don't error for being able to reduce scope of variables in tests variableScope:tests/unit/* // cppcheck Message: (information:ConfigurationNotChecked) Skipping configuration 'SO_RCVLOWAT' since the value of 'SO_RCVLOWAT' is unknown. Use -D if you want to check it. You can use -U to skip it explicitly. // Reason: There are many Config options that aren't checked by Cppcheck, and it warns for each. Ignore these so that they don't clutter the output. ConfigurationNotChecked:bin/s2nd.c ConfigurationNotChecked:tls/s2n_x509_validator.c ConfigurationNotChecked:utils/s2n_socket.c // cppcheck Message: (style:redundantAssignment) Variable 'mock_time' is reassigned a value before the old one has been used. // Reason: s2n_config_set_monotonic_clock() takes a reference to mock_time so that whenever it's modified locally, the timer sees the update when it dereferences the pointer. redundantAssignment:tests/unit/s2n_timer_test.c // cppcheck Message: (style:knownConditionTrueFalse) Condition 's2n_libcrypto_awslc_api_version()<17' is always true // Reason: s2n_libcrypto_awslc_api_version() are implemented using macro's and for certain libcrypto's the preprocessor will produce a trivial function returning e.g. 1 always. knownConditionTrueFalse:crypto/s2n_libcrypto.caws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/criterion_baseline.sh000077500000000000000000000044141456575232400256260ustar00rootroot00000000000000#!/usr/bin/env bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. set -eu source codebuild/bin/s2n_setup_env.sh source codebuild/bin/utils.sh # Disable PQ export S2N_NO_PQ=1 # Limit the number of child processes in the test run export RUST_BACKTRACE=1 export TOX_TEST_NAME="$INTEGV2_TEST" # There can be only one artifact config per batch job, # so we're scipting the baseline upload steps here. upload_artifacts(){ cd tests/integrationv2/target/criterion echo "Creating zip ${AWS_S3_PATH}" zip -r "${AWS_S3_PATH}" ./* aws s3 cp "${AWS_S3_PATH}" "${AWS_S3_URL}" echo "S3 upload complete" } if [ -d "third-party-src" ]; then echo "Not running against c.a.c." return 0 fi # setting LOCAL_TESTING disables a check for an existing baseline. if [ -z "${LOCAL_TESTING:-}" ]; then # Fetch creds and the latest release number. gh_login s2n_codebuild_PRs LATEST_RELEASE_VER=$(get_latest_release) # Build a specific filename for this release AWS_S3_PATH="integv2criterion_${INTEGV2_TEST}_${LATEST_RELEASE_VER}.zip" zip_count=$(aws s3 ls "${AWS_S3_URL}${AWS_S3_PATH}"|wc -l||true) # Only do the baseline if an artifact for the current release doesn't exist. if [ "$zip_count" -eq 0 ]; then echo "File ${AWS_S3_URL}${AWS_S3_PATH} not found" criterion_install_deps ORIGINAL_COMMIT=$(git rev-parse HEAD) git fetch --tags git checkout "$LATEST_RELEASE_VER" S2N_USE_CRITERION=baseline make -C tests/integrationv2 "$INTEGV2_TEST" upload_artifacts git reset --hard ${ORIGINAL_COMMIT} else echo "Found existing artifact for ${LATEST_RELEASE_VER}, not rebuilding." exit 0 fi else echo "Local testing enabled; baselining without checking s3" criterion_install_deps S2N_USE_CRITERION=baseline make -C tests/integrationv2 "$INTEGV2_TEST" fi aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/criterion_delta.sh000077500000000000000000000035301456575232400251330ustar00rootroot00000000000000#!/usr/bin/env bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. set -eu source ./codebuild/bin/utils.sh # Disable PQ export S2N_NO_PQ=1 export AWS_S3_BUCKET="s3://s2n-tls-logs/" # Limit the number of child processes in the test run export RUST_BACKTRACE=1 export GIT_COMMIT=$(git log -n 1 --format="%h") export AWS_S3_REPORT_PATH="reports/${INTEGV2_TEST}/$(date +%Y%m%d_%H%M_${GIT_COMMIT})" # CodeBuild artifacts are too limited; # scipting the baseline download steps here. download_artifacts(){ mkdir -p ./tests/integrationv2/target/criterion || true echo "Downloading ${AWS_S3_BUCKET}${1}/${2}" pushd ./tests/integrationv2/target/criterion/ aws s3 cp "${AWS_S3_BUCKET}${1}/${2}" . unzip -o "${2}" echo "S3 download complete" popd } upload_report(){ cd tests/integrationv2/target/criterion echo "Uploading report to ${AWS_S3_BUCKET}/${AWS_S3_REPORT_PATH}" aws s3 sync . "${AWS_S3_BUCKET}${AWS_S3_REPORT_PATH}" echo "S3 upload complete" } # Fetch creds and the latest release number. gh_login s2n_codebuild_PRs LATEST_RELEASE_VER=$(get_latest_release) AWS_ZIPFILE="integv2criterion_${INTEGV2_TEST}_${LATEST_RELEASE_VER}.zip" AWS_S3_BASE_PATH="release" criterion_install_deps download_artifacts ${AWS_S3_BASE_PATH} ${AWS_ZIPFILE} echo "Current dir: $(pwd)" S2N_USE_CRITERION=delta make -C tests/integrationv2 "$INTEGV2_TEST" upload_report aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/format.sh000077500000000000000000000016641456575232400232620ustar00rootroot00000000000000#!/bin/bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. set -e CLANG_NINE=$(which clang-format-9) CLANG_VER=${CLANG_NINE:-clang-format} for i in $(find . -not -path "./test-deps/*" -name '*.h' -or -name '*.c' -or -name '*.cpp'); do $CLANG_VER --verbose -i "$i" ; done if [[ `git status --porcelain` ]]; then echo "clang-format updated files, throwing an error" exit 255 else echo "No files touched" fi aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/grep_simple_mistakes.sh000077500000000000000000000243211456575232400261730ustar00rootroot00000000000000#!/usr/bin/env bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. FAILED=0 ############################################# # Grep for any instances of raw memcpy() function. s2n code should instead be # using one of the *_ENSURE_MEMCPY macros. ############################################# S2N_FILES_ASSERT_NOT_USING_MEMCPY=$(find "$PWD" -type f -name "s2n*.[ch]" -not -path "*/tests/*") for file in $S2N_FILES_ASSERT_NOT_USING_MEMCPY; do RESULT_NUM_LINES=`grep 'memcpy(' $file | wc -l` if [ "${RESULT_NUM_LINES}" != 0 ]; then echo "Found ${RESULT_NUM_LINES} raw 'memcpy' calls in $file" FAILED=1 fi done ############################################# # Grep for any instances of raw memcmp() function. s2n code should instead be # using s2n_constant_time_equals() # # KNOWN_MEMCMP_USAGE is used to capture all known uses of memcmp and acts as a # safeguard against any new uses of memcmp. ############################################# S2N_FILES_ASSERT_NOT_USING_MEMCMP=$(find "$PWD" -type f -name "s2n*.[ch]" -not -path "*/tests/*" -not -path "*/bindings/*") declare -A KNOWN_MEMCMP_USAGE KNOWN_MEMCMP_USAGE["$PWD/crypto/s2n_rsa.c"]=1 KNOWN_MEMCMP_USAGE["$PWD/tls/s2n_early_data.c"]=1 KNOWN_MEMCMP_USAGE["$PWD/tls/s2n_kem.c"]=1 KNOWN_MEMCMP_USAGE["$PWD/tls/s2n_cipher_suites.c"]=3 KNOWN_MEMCMP_USAGE["$PWD/tls/s2n_server_hello.c"]=3 KNOWN_MEMCMP_USAGE["$PWD/tls/s2n_security_policies.c"]=1 KNOWN_MEMCMP_USAGE["$PWD/tls/s2n_psk.c"]=1 KNOWN_MEMCMP_USAGE["$PWD/tls/s2n_config.c"]=1 KNOWN_MEMCMP_USAGE["$PWD/tls/s2n_resume.c"]=2 KNOWN_MEMCMP_USAGE["$PWD/tls/s2n_connection.c"]=1 KNOWN_MEMCMP_USAGE["$PWD/tls/s2n_protocol_preferences.c"]=1 KNOWN_MEMCMP_USAGE["$PWD/utils/s2n_map.c"]=3 KNOWN_MEMCMP_USAGE["$PWD/stuffer/s2n_stuffer_text.c"]=1 for file in $S2N_FILES_ASSERT_NOT_USING_MEMCMP; do # NOTE: this matches on 'memcmp', which will also match comments. However, there # are no uses of 'memcmp' in comments so we opt for this stricter check. RESULT_NUM_LINES=`grep -n 'memcmp' $file | wc -l` # set default KNOWN_MEMCMP_USAGE value [ -z "${KNOWN_MEMCMP_USAGE["$file"]}" ] && KNOWN_MEMCMP_USAGE["$file"]="0" # check if memcmp usage is 0 or a known value if [ "${RESULT_NUM_LINES}" != "${KNOWN_MEMCMP_USAGE["$file"]}" ]; then echo "Expected: ${KNOWN_MEMCMP_USAGE["$file"]} Found: ${RESULT_NUM_LINES} usage of 'memcmp' in $file" FAILED=1 fi done ############################################# # Assert that functions do not return -1 or S2N_ERR* codes directly. # To indicate failure, functions should use the S2N_ERROR* macros defined # in s2n_errno.h. ############################################# S2N_FILES_ASSERT_RETURN=$(find "$PWD" -type f -name "s2n*.c" -not -path "*/tests/*" -not -path "*/docs/examples/*") for file in $S2N_FILES_ASSERT_RETURN; do RESULT_NEGATIVE_ONE=`grep -rn 'return -1;' $file` RESULT_S2N_ERR=`grep -rn 'return S2N_ERR*' $file` RESULT_S2N_FAIL=`grep -rn 'return S2N_FAIL*' $file` if [ "${#RESULT_NEGATIVE_ONE}" != "0" ]; then FAILED=1 printf "\e[1;34mGrep for 'return -1;' check failed in $file:\e[0m\n$RESULT_NEGATIVE_ONE\n\n" fi if [ "${#RESULT_S2N_ERR}" != "0" ]; then FAILED=1 printf "\e[1;34mGrep for 'return S2N_ERR*' check failed in $file:\e[0m\n$RESULT_S2N_ERR\n\n" fi if [ "${#RESULT_S2N_FAIL}" != "0" ]; then FAILED=1 printf "\e[1;34mGrep for 'return S2N_FAIL*' check failed in $file:\e[0m\n$RESULT_S2N_FAIL\n\n" fi done ############################################# # Detect any array size calculations that are not using the s2n_array_len() function. ############################################# S2N_FILES_ARRAY_SIZING_RETURN=$(find "$PWD" -type f -name "s2n*.c" -path "*") for file in $S2N_FILES_ARRAY_SIZING_RETURN; do RESULT_ARR_DIV=`grep -Ern 'sizeof\((.*)\) \/ sizeof\(\1\[0\]\)' $file` if [ "${#RESULT_ARR_DIV}" != "0" ]; then FAILED=1 printf "\e[1;34mUsage of 'sizeof(array) / sizeof(array[0])' check failed. Use s2n_array_len(array) instead in $file:\e[0m\n$RESULT_ARR_DIV\n\n" fi done ############################################# # Assert that all assignments from s2n_stuffer_raw_read() have a # notnull_check (or similar manual null check) on the same, or next, line. # The assertion is shallow; this doesn't guarantee that we're doing the # *correct* null check, just that we are doing *some* null check. ############################################# S2N_FILES_ASSERT_NOTNULL_CHECK=$(find "$PWD" -type f -name "s2n*.[ch]" -not -path "*/tests/*") for file in $S2N_FILES_ASSERT_NOTNULL_CHECK; do while read -r line_one; do # When called with the -A option, grep uses lines of "--" as delimiters. We ignore them. if [[ $line_one == "--" ]]; then continue fi read -r line_two # $line_one definitely contains an assignment from s2n_stuffer_raw_read(), # because that's what we grepped for. So verify that either $line_one or # $line_two contains a null check. null_check_regex="(.*(if|ENSURE).*=\ NULL)|(ENSURE_REF)" if [[ $line_one =~ $null_check_regex ]] || [[ $line_two =~ $null_check_regex ]]; then # Found a notnull_check continue else FAILED=1 printf "\e[1;34mFound a call to s2n_stuffer_raw_read without an ENSURE_REF in $file:\e[0m\n$line_one\n\n" fi done < <(grep -rnE -A 1 "=\ss2n_stuffer_raw_read\(.*\)" $file) done ############################################# # Assert that "index" is not a variable name. An "index" function exists in strings.h, and older compilers ( /dev/null ; fi # Download and Install LibFuzzer with latest clang if [[ "$TESTS" == "fuzz" || "$TESTS" == "ALL" ]]; then mkdir -p "$LIBFUZZER_INSTALL_DIR" || true PATH=$LATEST_CLANG_INSTALL_DIR/bin:$PATH codebuild/bin/install_libFuzzer.sh "$(mktemp -d)" "$LIBFUZZER_INSTALL_DIR" "$OS_NAME" ; fi # Download and Install Openssl 1.1.1 if [[ ("$S2N_LIBCRYPTO" == "openssl-1.1.1") || ( "$TESTS" == "integrationv2" || "$TESTS" == "ALL" ) ]]; then if [[ ! -x "$OPENSSL_1_1_1_INSTALL_DIR/bin/openssl" ]]; then mkdir -p "$OPENSSL_1_1_1_INSTALL_DIR"||true codebuild/bin/install_openssl_1_1_1.sh "$(mktemp -d)" "$OPENSSL_1_1_1_INSTALL_DIR" "$OS_NAME" > /dev/null ; fi fi # Download and Install Openssl 3.0 if [[ "$S2N_LIBCRYPTO" == "openssl-3.0" && ! -d "$OPENSSL_3_0_INSTALL_DIR" ]]; then mkdir -p "$OPENSSL_3_0_INSTALL_DIR" codebuild/bin/install_openssl_3_0.sh "$(mktemp -d)" "$OPENSSL_3_0_INSTALL_DIR" "$OS_NAME" > /dev/null ; fi # Download and Install Openssl 1.0.2 if [[ "$S2N_LIBCRYPTO" == "openssl-1.0.2" && ! -d "$OPENSSL_1_0_2_INSTALL_DIR" ]]; then mkdir -p "$OPENSSL_1_0_2_INSTALL_DIR"||true codebuild/bin/install_openssl_1_0_2.sh "$(mktemp -d)" "$OPENSSL_1_0_2_INSTALL_DIR" "$OS_NAME" > /dev/null ; fi # Download and Install the Openssl FIPS module and Openssl 1.0.2-fips if [[ "$S2N_LIBCRYPTO" == "openssl-1.0.2-fips" ]] && [[ ! -d "$OPENSSL_1_0_2_FIPS_INSTALL_DIR" ]]; then codebuild/bin/install_openssl_1_0_2_fips.sh "$(mktemp -d)" "$OPENSSL_1_0_2_FIPS_INSTALL_DIR" "$OS_NAME" ; fi # Download and Install LibreSSL if [[ "$S2N_LIBCRYPTO" == "libressl" && ! -d "$LIBRESSL_INSTALL_DIR" ]]; then mkdir -p "$LIBRESSL_INSTALL_DIR"||true codebuild/bin/install_libressl.sh "$(mktemp -d)" "$LIBRESSL_INSTALL_DIR" > /dev/null ; fi # Download and Install BoringSSL if [[ "$S2N_LIBCRYPTO" == "boringssl" && ! -d "$BORINGSSL_INSTALL_DIR" ]]; then codebuild/bin/install_boringssl.sh "$(mktemp -d)" "$BORINGSSL_INSTALL_DIR" > /dev/null ; fi # Download and Install AWS-LC if [[ "$S2N_LIBCRYPTO" == "awslc" && ! -d "$AWSLC_INSTALL_DIR" ]]; then codebuild/bin/install_awslc.sh "$(mktemp -d)" "$AWSLC_INSTALL_DIR" "0" > /dev/null ; fi if [[ "$S2N_LIBCRYPTO" == "awslc-fips" && ! -d "$AWSLC_FIPS_INSTALL_DIR" ]]; then codebuild/bin/install_awslc.sh "$(mktemp -d)" "$AWSLC_FIPS_INSTALL_DIR" "1" > /dev/null ; fi if [[ "$S2N_LIBCRYPTO" == "awslc-fips-2022" && ! -d "$AWSLC_FIPS_2022_INSTALL_DIR" ]]; then codebuild/bin/install_awslc_fips_2022.sh "$(mktemp -d)" "$AWSLC_FIPS_2022_INSTALL_DIR" > /dev/null ; fi if [[ "$TESTS" == "integrationv2" || "$TESTS" == "ALL" ]]; then # Install tox if [[ "$DISTRO" == "ubuntu" ]]; then if [[ ! -x `python3.9 -m tox --version` ]]; then python3.9 -m pip install tox fi else if [[ ! -x `which tox` ]]; then case "$DISTRO" in "amazon linux") yum install -y python3-pip python3 -m pip install --user tox ;; "apple") brew install python@3 python3 -m pip install --user tox ;; *) echo "Unkown platform $DISTRO trying to install tox on $OS_NAME $ARCH" exit 1 ;; esac fi fi if [[ ! -x "$OPENSSL_0_9_8_INSTALL_DIR/bin/openssl" ]]; then # Download and Install Openssl 0.9.8 mkdir -p "$OPENSSL_0_9_8_INSTALL_DIR"||true codebuild/bin/install_openssl_0_9_8.sh "$(mktemp -d)" "$OPENSSL_0_9_8_INSTALL_DIR" "$OS_NAME" > /dev/null ; fi if [[ ! -x "$GNUTLS_INSTALL_DIR/bin/gnutls-cli" ]]; then # Download and Install GnuTLS for integration tests mkdir -p "$GNUTLS_INSTALL_DIR"||true codebuild/bin/install_gnutls.sh "$(mktemp -d)" "$GNUTLS_INSTALL_DIR" > /dev/null ; fi if [[ ! -x "$GNUTLS37_INSTALL_DIR/bin/gnutls-cli" ]]; then # Download and Install GnuTLS for integration tests mkdir -p "$GNUTLS37_INSTALL_DIR"||true codebuild/bin/install_gnutls37.sh "$(mktemp -d)" "$GNUTLS37_INSTALL_DIR" > /dev/null ; fi if [[ ! -x "$OQS_OPENSSL_1_1_1_INSTALL_DIR/bin/openssl" ]]; then # Download and Install OQS OpenSSL for integration tests mkdir -p "$OQS_OPENSSL_1_1_1_INSTALL_DIR" ||true codebuild/bin/install_oqs_openssl_1_1_1.sh "$(mktemp -d)" "$OQS_OPENSSL_1_1_1_INSTALL_DIR" "$OS_NAME" | head -50 fi if [[ "$DISTRO" == "ubuntu" ]]; then # Install SSLyze for all Integration Tests on Ubuntu. # There is a nassl dependancy issue preventing this from working on on AL2 ARM (others?). if [[ "$S2N_NO_SSLYZE" != "true" ]]; then codebuild/bin/install_sslyze.sh fi if [[ ! -x "$APACHE2_INSTALL_DIR/apache2.conf" ]]; then codebuild/bin/install_apache2.sh codebuild/bin/apache2 "$APACHE2_INSTALL_DIR" fi fi fi # Install SAW, Z3, and Yices for formal verification if [[ "$SAW" == "true" || "$TESTS" == "ALL" ]]; then mkdir -p "$SAW_INSTALL_DIR"||true codebuild/bin/install_saw.sh "$(mktemp -d)" "$SAW_INSTALL_DIR" > /dev/null ; mkdir -p "$Z3_INSTALL_DIR"||true codebuild/bin/install_z3_yices.sh "$(mktemp -d)" "$Z3_INSTALL_DIR" > /dev/null ; fi if [[ ! -x `which cmake` ]]; then case "$DISTRO" in "ubuntu") apt-get -y install cmake ;; "amazon linux") yum install -y cmake3 update-alternatives --install /usr/bin/cmake cmake /usr/bin/cmake3 30 ;; "apple") brew install cmake ;; *) echo "Unknown platform for cmake." ;; esac fi if [[ "$TESTS" == "benchmark" || "$TESTS" == "ALL" ]]; then if [[ ! -x "$GB_INSTALL_DIR/lib/libbenchmark.a" ]]; then mkdir -p "$GB_INSTALL_DIR"||true codebuild/bin/install_googlebenchmark.sh "$(mktemp -d)" "$GB_INSTALL_DIR" "$OS_NAME" > /dev/null ; fi fi aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/install_gnutls.sh000077500000000000000000000046121456575232400250300ustar00rootroot00000000000000#!/bin/bash # # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. # set -e source codebuild/bin/s2n_setup_env.sh usage() { echo "install_gnutls.sh build_dir install_dir os_name" exit 1 } if [ "$#" -ne "2" ]; then usage fi GNUTLS_BUILD_DIR=$1 GNUTLS_INSTALL_DIR=$2 source codebuild/bin/jobs.sh # libgmp is needed for libnettle case "$DISTRO" in "ubuntu") sudo apt-get -qq install libgmp3-dev -y ;; "amazon linux") sudo yum install -y gmp-devel ;; "darwin" ) # Installing an existing package is a "failure" in brew brew install gmp || true ;; *) echo "Invalid platform! $OS_NAME" usage ;; esac cd "$GNUTLS_BUILD_DIR" # libnettle is a dependency of GnuTLS # Originally from: https://ftp.gnu.org/gnu/nettle/nettle-3.3.tar.gz curl --retry 3 https://s3-us-west-2.amazonaws.com/s2n-public-test-dependencies/2017-08-29_nettle-3.3.tar.gz --output nettle-3.3.tar.gz tar -xzf nettle-3.3.tar.gz cd nettle-3.3 ./configure --prefix="$GNUTLS_INSTALL_DIR"/nettle make -j $JOBS make -j $JOBS install cd .. # Install GnuTLS # Originally from: ftp://ftp.gnutls.org/gcrypt/gnutls/v3.5/gnutls-3.5.5.tar.xz curl --retry 3 https://s3-us-west-2.amazonaws.com/s2n-public-test-dependencies/2017-08-29_gnutls-3.5.5.tar.xz --output gnutls-3.5.5.tar.xz tar -xJf gnutls-3.5.5.tar.xz cd gnutls-3.5.5 ./configure LD_FLAGS="-R$GNUTLS_INSTALL_DIR/nettle/lib -L$GNUTLS_INSTALL_DIR/nettle/lib -lnettle -lhogweed" \ NETTLE_LIBS="-R$GNUTLS_INSTALL_DIR/nettle/lib -L$GNUTLS_INSTALL_DIR/nettle/lib -lnettle" \ NETTLE_CFLAGS="-I$GNUTLS_INSTALL_DIR/nettle/include" \ HOGWEED_LIBS="-R$GNUTLS_INSTALL_DIR/nettle/lib -L$GNUTLS_INSTALL_DIR/nettle/lib -lhogweed" \ HOGWEED_CFLAGS="-I$GNUTLS_INSTALL_DIR/nettle/include" \ --without-p11-kit \ --with-included-libtasn1 \ --prefix="$GNUTLS_INSTALL_DIR" make -j $JOBS make -j $JOBS install aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/install_gnutls37.sh000077500000000000000000000040751456575232400252050ustar00rootroot00000000000000#!/bin/bash # # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. # set -e source codebuild/bin/s2n_setup_env.sh usage() { echo "install_gnutls37.sh build_dir install_dir os_name" exit 1 } if [ "$#" -ne "2" ]; then usage fi GNUTLS_BUILD_DIR=$1 GNUTLS_INSTALL_DIR=$2 source codebuild/bin/jobs.sh # libgmp is needed for libnettle case "$DISTRO" in "ubuntu") sudo apt-get -qq install libgmp3-dev -y ;; "amazon linux") sudo yum install -y gmp-devel ;; "darwin" ) # Installing an existing package is a "failure" in brew brew install gmp || true ;; *) echo "Invalid platform! $OS_NAME" usage ;; esac cd "$GNUTLS_BUILD_DIR" # Originally from: https://ftp.gnu.org/gnu/nettle/ curl --retry 3 https://s3-us-west-2.amazonaws.com/s2n-public-test-dependencies/2021-01-04_nettle-3.7.tar.gz --output nettle-3.7.tar.gz tar -xzf nettle-3.7.tar.gz cd nettle-3.7 ./configure --prefix="$GNUTLS_INSTALL_DIR"/nettle \ --disable-openssl \ --enable-shared make -j $JOBS make -j $JOBS install cd .. # Install GnuTLS # Originally from: https://www.gnupg.org/ftp/gcrypt/gnutls/v3.7/ curl --retry 3 https://s3-us-west-2.amazonaws.com/s2n-public-test-dependencies/2022-01-18_gnutls-3.7.3.tar.xz --output gnutls-3.7.3.tar.xz tar -xJf gnutls-3.7.3.tar.xz cd gnutls-3.7.3 PKG_CONFIG_PATH="$GNUTLS_INSTALL_DIR"/nettle/lib/pkgconfig:$PKG_CONFIG_PATH \ ./configure --prefix="$GNUTLS_INSTALL_DIR" \ --without-p11-kit \ --with-included-libtasn1 \ --with-included-unistring make -j $JOBS make -j $JOBS install aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/install_googlebenchmark.sh000077500000000000000000000024261456575232400266440ustar00rootroot00000000000000#!/bin/bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. # set -e usage() { echo "install_googlebenchmark.sh download_dir install_dir os_name" exit 1 } if [ "$#" -ne "3" ]; then usage fi GB_DOWNLOAD_DIR=$1 GB_INSTALL_DIR=$2 PLATFORM=$3 mkdir -p "$GB_DOWNLOAD_DIR" cd "$GB_DOWNLOAD_DIR" export GIT_CURL_VERBOSE=1 echo "Downloading Google Benchmark..." git clone https://github.com/google/benchmark.git git clone https://github.com/google/googletest.git benchmark/googletest cd benchmark mkdir build && cd build cmake ../ -DCMAKE_BUILD_TYPE=Release make mkdir -p "$GB_INSTALL_DIR"/include && cp -rf "$GB_DOWNLOAD_DIR"/benchmark/include/benchmark "$GB_INSTALL_DIR"/include/ mkdir -p "$GB_INSTALL_DIR"/lib && cp -rf "$GB_DOWNLOAD_DIR"/benchmark/build/src/*.a "$GB_INSTALL_DIR"/lib/ aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/install_libFuzzer.sh000077500000000000000000000036051456575232400254710ustar00rootroot00000000000000#!/bin/bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. # set -ex usage() { echo "install_libFuzzer.sh download_dir install_dir os_name" exit 1 } if [ "$#" -ne "3" ]; then usage fi LIBFUZZER_DOWNLOAD_DIR=$1 LIBFUZZER_INSTALL_DIR=$2 export PLATFORM=$3 mkdir -p "$LIBFUZZER_DOWNLOAD_DIR" cd "$LIBFUZZER_DOWNLOAD_DIR" git clone https://chromium.googlesource.com/chromium/llvm-project/llvm/lib/Fuzzer cd Fuzzer git checkout 651ead cd .. echo "Compiling LibFuzzer..." clang++ -c -g -v -O2 -lstdc++ -std=c++11 Fuzzer/*.cpp -IFuzzer ar ruv libFuzzer.a Fuzzer*.o echo "Copying libFuzzer.a to $LIBFUZZER_INSTALL_DIR" mkdir -p "$LIBFUZZER_INSTALL_DIR"/lib && cp libFuzzer.a "$LIBFUZZER_INSTALL_DIR"/lib # Run AFL instead of libfuzzer if AFL_FUZZ is set. Not compatible with fuzz coverage. if [[ "$AFL_FUZZ" == "true" && "$FUZZ_COVERAGE" != "true" ]]; then # Clusterfuzz's bash script changed from AFL to AFL++ on April 1, 2021; this # commit (ac5ac9e4604ea03cfd643185ad1e3800e952ea44) pins the script to an older version # of Clusterfuzz until we support AFL++. mkdir -p "$LIBFUZZER_INSTALL_DIR" && curl https://raw.githubusercontent.com/google/clusterfuzz/ac5ac9e4604ea03cfd643185ad1e3800e952ea44/docs/setting-up-fuzzing/build_afl.bash > "$LIBFUZZER_INSTALL_DIR"/build_afl.bash chmod +x "$LIBFUZZER_INSTALL_DIR"/build_afl.bash cd "$LIBFUZZER_INSTALL_DIR" "$LIBFUZZER_INSTALL_DIR"/build_afl.bash cd - fi aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/install_libressl.sh000077500000000000000000000021161456575232400253300ustar00rootroot00000000000000#!/bin/bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. # set -e usage() { echo "install_libressl.sh build_dir install_dir" exit 1 } if [ "$#" -ne "2" ]; then usage fi BUILD_DIR=$1 INSTALL_DIR=$2 source codebuild/bin/jobs.sh cd "$BUILD_DIR" # Originally from https://ftp.openbsd.org/pub/OpenBSD/LibreSSL/libressl-3.4.3.tar.gz curl https://s3-us-west-2.amazonaws.com/s2n-public-test-dependencies/2022-12-01_libressl-3.6.1.tar.gz > libressl-3.6.1.tar.gz tar -xzvf libressl-3.6.1.tar.gz cd libressl-3.6.1 ./configure --prefix="$INSTALL_DIR" make -j $JOBS CFLAGS=-fPIC install aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/install_openssl_0_9_8.sh000077500000000000000000000022511456575232400260720ustar00rootroot00000000000000#!/bin/bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. # set -ex pushd "$(pwd)" usage() { echo "install_openssl_0_9_8.sh build_dir install_dir os_name" exit 1 } if [ "$#" -ne "3" ]; then usage fi BUILD_DIR=$1 INSTALL_DIR=$2 PLATFORM=$3 cd "$BUILD_DIR" wget https://www.openssl.org/source/old/0.9.x/openssl-0.9.8zh.tar.gz tar xzvf openssl-0.9.8zh.tar.gz cd openssl-0.9.8zh if [ "$PLATFORM" == "linux" ]; then CONFIGURE="./config -d" elif [ "$PLATFORM" == "osx" ]; then CONFIGURE="./Configure darwin64-x86_64-cc" else echo "Invalid platform! $PLATFORM" usage fi $CONFIGURE --prefix="$INSTALL_DIR" make depend make -j8 make install popd exit 0 aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/install_openssl_1_0_2.sh000077500000000000000000000032411456575232400260540ustar00rootroot00000000000000#!/usr/bin/env bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. # set -ex pushd "$(pwd)" usage() { echo "install_openssl_1_0_2.sh build_dir install_dir os_name" exit 1 } if [ "$#" -ne "3" ]; then usage fi BUILD_DIR=$1 INSTALL_DIR=$2 OS_NAME=$3 source codebuild/bin/jobs.sh mkdir -p $BUILD_DIR cd "$BUILD_DIR" curl --retry 3 -L https://github.com/openssl/openssl/archive/OpenSSL_1_0_2-stable.zip --output openssl-OpenSSL_1_0_2-stable.zip unzip openssl-OpenSSL_1_0_2-stable.zip cd openssl-OpenSSL_1_0_2-stable if [ "$OS_NAME" == "linux" ]; then CONFIGURE="./config -d" elif [ "$OS_NAME" == "osx" ]; then CONFIGURE="./Configure darwin64-x86_64-cc" else echo "Invalid platform! $OS_NAME" usage fi mkdir -p $INSTALL_DIR $CONFIGURE shared -g3 -fPIC no-libunbound no-gmp no-jpake no-krb5 no-md2 no-rc5 no-rfc3779 no-sctp no-ssl-trace \ no-store no-zlib no-hw no-mdc2 no-seed no-idea enable-ec_nistp_64_gcc_128 no-camellia no-bf no-ripemd \ no-dsa no-ssl2 no-capieng -DSSL_FORBID_ENULL -DOPENSSL_NO_DTLS1 -DOPENSSL_NO_HEARTBEATS \ --prefix="$INSTALL_DIR" make -j $JOBS depend make -j $JOBS make -j $JOBS install_sw popd exit 0 aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/install_openssl_1_0_2_fips.sh000077500000000000000000000052341456575232400271010ustar00rootroot00000000000000#!/usr/bin/env bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. # set -ex pushd "$(pwd)" usage() { echo "install_openssl_1_0_2_fips.sh build_dir install_dir os_name" exit 1 } if [ "$#" -ne "3" ]; then usage fi BUILD_DIR=$1 INSTALL_DIR=$2 OS_NAME=$3 if [ "$OS_NAME" == "linux" ]; then CONFIGURE="./config -d" elif [ "$OS_NAME" == "osx" ]; then echo "WARNING: FIPS and MacOS is not officially supported. This build should only be used for local debugging." echo "See: http://openssl.6102.n7.nabble.com/Openssl-Fips-build-for-Mac-OSX-64-bit-td44716.html" CONFIGURE="./Configure darwin64-x86_64-cc" else echo "Invalid platform! $OS_NAME" usage fi # Install the FIPS object module in accordance with OpenSSL FIPS 140-2 Security Policy Annex A. # https://www.openssl.org/docs/fips/SecurityPolicy-2.0.pdf # This installation is not FIPS compliant as we do not own the build system architecture. # It may only be used for testing purposes. # # There is no 'latest' download URL for the FIPS object modules cd "$BUILD_DIR" # Originally from: http://www.openssl.org/source/openssl-fips-2.0.13.tar.gz curl --retry 3 https://s3-us-west-2.amazonaws.com/s2n-public-test-dependencies/2017-08-31_openssl-fips-2.0.13.tar.gz --output openssl-fips-2.0.13.tar.gz gunzip -c openssl-fips-2.0.13.tar.gz | tar xf - rm openssl-fips-2.0.13.tar.gz cd openssl-fips-2.0.13 mkdir ../OpensslFipsModule FIPSDIR="$(pwd)/../OpensslFipsModule" export FIPSDIR chmod +x ./Configure $CONFIGURE make make install cd "$BUILD_DIR" curl --retry 3 -L https://github.com/openssl/openssl/archive/OpenSSL_1_0_2-stable.zip --output openssl-OpenSSL_1_0_2-stable.zip unzip openssl-OpenSSL_1_0_2-stable.zip cd openssl-OpenSSL_1_0_2-stable FIPS_OPTIONS="fips --with-fipsdir=$FIPSDIR shared" $CONFIGURE $FIPS_OPTIONS -g3 -fPIC no-libunbound no-gmp no-jpake no-krb5 no-md2 no-rc5 \ no-rfc3779 no-sctp no-ssl-trace no-store no-zlib no-hw no-mdc2 no-seed no-idea \ enable-ec_nistp_64_gcc_128 no-camellia no-bf no-ripemd no-dsa no-ssl2 no-capieng -DSSL_FORBID_ENULL \ -DOPENSSL_NO_DTLS1 -DOPENSSL_NO_HEARTBEATS --prefix="$INSTALL_DIR" make depend make make install_sw popd exit 0 aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/install_openssl_1_1_1.sh000077500000000000000000000034171456575232400260610ustar00rootroot00000000000000#!/usr/bin/env bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. # set -ex pushd "$(pwd)" usage() { echo "install_openssl_1_1_1.sh build_dir install_dir os_name" exit 1 } if [ "$#" -ne "3" ]; then usage fi BUILD_DIR=$1 INSTALL_DIR=$2 OS_NAME=$3 source codebuild/bin/jobs.sh RELEASE=1_1_1-stable mkdir -p $BUILD_DIR cd "$BUILD_DIR" curl --retry 3 -L https://github.com/openssl/openssl/archive/OpenSSL_${RELEASE}.zip --output OpenSSL_${RELEASE}.zip unzip OpenSSL_${RELEASE}.zip cd openssl-OpenSSL_${RELEASE} if [ "$OS_NAME" == "linux" ]; then CONFIGURE="./config -d" elif [[ "$OS_NAME" == "osx" || "$OS_NAME" == "darwin" ]]; then CONFIGURE="./Configure darwin64-x86_64-cc" else echo "Invalid platform! $OS_NAME" usage fi mkdir -p $INSTALL_DIR # Use g3 to get debug symbols in libcrypto to chase memory leaks $CONFIGURE shared -g3 -fPIC \ no-md2 no-rc5 no-rfc3779 no-sctp no-ssl-trace no-zlib \ no-hw no-mdc2 no-seed no-idea enable-ec_nistp_64_gcc_128 no-camellia\ no-bf no-ripemd no-dsa no-ssl2 no-ssl3 no-capieng \ -DSSL_FORBID_ENULL -DOPENSSL_NO_DTLS1 -DOPENSSL_NO_HEARTBEATS \ --prefix="$INSTALL_DIR" make -j $JOBS depend make -j $JOBS make -j $JOBS install_sw popd exit 0 aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/install_openssl_3_0.sh000077500000000000000000000032321456575232400256350ustar00rootroot00000000000000#!/bin/bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. # set -ex pushd "$(pwd)" usage() { echo "install_openssl_3_0.sh build_dir install_dir os_name" exit 1 } if [ "$#" -ne "3" ]; then usage fi BUILD_DIR=$1 INSTALL_DIR=$2 OS_NAME=$3 source codebuild/bin/jobs.sh RELEASE=3.0.7 mkdir -p $BUILD_DIR cd "$BUILD_DIR" curl --retry 3 -L https://github.com/openssl/openssl/archive/refs/tags/openssl-${RELEASE}.zip --output OpenSSL_${RELEASE}.zip unzip OpenSSL_${RELEASE}.zip cd openssl-openssl-${RELEASE} CONFIGURE="./Configure " mkdir -p $INSTALL_DIR # Use g3 to get debug symbols in libcrypto to chase memory leaks $CONFIGURE shared -g3 -fPIC \ no-md2 no-rc5 no-rfc3779 no-sctp no-ssl-trace no-zlib \ no-hw no-mdc2 no-seed no-idea enable-ec_nistp_64_gcc_128 no-camellia\ no-bf no-ripemd no-dsa no-ssl2 no-ssl3 no-capieng no-dtls \ -DSSL_FORBID_ENULL -DOPENSSL_NO_DTLS1 -DOPENSSL_NO_HEARTBEATS \ --prefix="$INSTALL_DIR" make -j $JOBS make -j $JOBS test make -j $JOBS install popd # sym-link lib -> lib64 since codebuild assumes /lib path pushd $INSTALL_DIR ln -s lib64 lib popd exit 0 aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/install_oqs_openssl_1_1_1.sh000077500000000000000000000042561456575232400267450ustar00rootroot00000000000000#!/bin/bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. # set -ex usage() { echo "install_oqs_openssl_1_1_1.sh build_dir install_dir platform" exit 1 } if [ "$#" -ne "3" ]; then usage fi BUILD_DIR=$1 INSTALL_DIR=$2 PLATFORM=$3 cd "$BUILD_DIR" # Download OQS OpenSSL Source code git clone --branch OQS-OpenSSL_1_1_1-stable https://github.com/open-quantum-safe/openssl.git # Download and Build OQS library, and copy "lib" and "include" artifacts into OQS OpenSSL directory git clone https://github.com/open-quantum-safe/liboqs.git cd liboqs # Use commit that supports Kyber round 3; hybrid draft spec version 5 git checkout cf6d8a059e446d24e2af06949d83605ae0f4f414 mkdir build && cd build cmake -GNinja -DCMAKE_INSTALL_PREFIX=${BUILD_DIR}/openssl/oqs .. ninja ninja install # Complete the OpenSSL Build cd "$BUILD_DIR"/openssl # Pin to OQS-OpenSSL commit that is compatible with Kyber round 3; hybrid draft spec version 5 LibOQS implementation git checkout 613d1bea7afa23dc11f340e75990cb47d77711e9 if [ "$PLATFORM" == "linux" ]; then CONFIGURE="./config -d" elif [ "$PLATFORM" == "osx" ]; then CONFIGURE="./Configure darwin64-x86_64-cc" else echo "Invalid platform! $PLATFORM" usage fi # Use g3 to get debug symbols in libcrypto to chase memory leaks $CONFIGURE -g3 -fPIC \ no-md2 no-rc5 no-rfc3779 no-sctp no-ssl-trace no-zlib \ no-hw no-mdc2 no-seed no-idea enable-ec_nistp_64_gcc_128 no-camellia \ no-bf no-ripemd no-dsa no-ssl2 no-ssl3 no-capieng \ -DSSL_FORBID_ENULL -DOPENSSL_NO_DTLS1 -DOPENSSL_NO_HEARTBEATS \ --prefix="$INSTALL_DIR" make depend make -j make install_sw exit 0aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/install_prlimit.sh000077500000000000000000000031111456575232400251650ustar00rootroot00000000000000#!/bin/bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. # set -e usage() { echo "install_prlimit.sh download_dir install_dir" exit 1 } if [ "$#" -ne "2" ]; then usage fi BUILD_DIR=$1 INSTALL_DIR=$2 source codebuild/bin/jobs.sh sudo apt-get install -y libncurses5-dev cd "$BUILD_DIR" # Originally from: https://www.kernel.org/pub/linux/utils/util-linux/v2.25/util-linux-2.25.2.tar.gz curl --retry 3 https://s3-us-west-2.amazonaws.com/s2n-public-test-dependencies/2017-08-29_util-linux-2.25.2.tar.gz --output util-linux-2.25.2.tar.gz tar -xzvf util-linux-2.25.2.tar.gz cd util-linux-2.25.2 ./configure ADJTIME_PATH=/var/lib/hwclock/adjtime \ --disable-chfn-chsh \ --disable-login \ --disable-nologin \ --disable-su \ --disable-setpriv \ --disable-runuser \ --disable-pylibmount \ --disable-static \ --without-python \ --without-systemd \ --disable-makeinstall-chown \ --without-systemdsystemunitdir \ --without-ncurses \ --prefix="$INSTALL_DIR" || cat config.log make -j $JOBS > /dev/null make -j $JOBS install > /dev/null aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/install_python.sh000077500000000000000000000022731456575232400250360ustar00rootroot00000000000000#!/bin/bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. # #!/bin/bash set -e if [ "$#" -ne 3 ]; then echo "install_python.sh libcrypto_root build_dir install_dir" exit 1 fi LIBCRYPTO_ROOT=$1 BUILD_DIR=$2 INSTALL_DIR=$3 source codebuild/bin/jobs.sh cd "$BUILD_DIR" # Originally from: https://www.python.org/ftp/python/3.6.0/Python-3.6.0.tgz curl --retry 3 https://s3-us-west-2.amazonaws.com/s2n-public-test-dependencies/2017-08-29_Python-3.6.0.tgz --output Python-3.6.0.tgz tar xzf Python-3.6.0.tgz cd Python-3.6.0 CPPFLAGS="-I$LIBCRYPTO_ROOT/include" LDFLAGS="-Wl,-rpath,$LIBCRYPTO_ROOT/lib -L$LIBCRYPTO_ROOT/lib" ./configure --prefix="$INSTALL_DIR" make -j $JOBS make -j $JOBS install aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/install_s2n_head.sh000077500000000000000000000022701456575232400251750ustar00rootroot00000000000000#!/bin/bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. set -ex pushd "$(pwd)" usage() { echo "install_s2n_head.sh build_dir" exit 1 } if [ "$#" -ne "1" ]; then usage fi BUILD_DIR=$1 source codebuild/bin/jobs.sh cd "$BUILD_DIR" # Clone the most recent s2n commit git clone --depth=1 https://github.com/aws/s2n-tls s2n_head cmake ./s2n_head -Bbuild -DCMAKE_PREFIX_PATH="$LIBCRYPTO_ROOT" -DCMAKE_BUILD_TYPE=RelWithDebInfo -DBUILD_SHARED_LIBS=on -DBUILD_TESTING=on cmake --build ./build -- -j $JOBS # Copy new executables to bin directory cp -f "$BUILD_DIR"/build/bin/s2nc "$BASE_S2N_DIR"/bin/s2nc_head cp -f "$BUILD_DIR"/build/bin/s2nd "$BASE_S2N_DIR"/bin/s2nd_head popd exit 0 aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/install_saw.sh000077500000000000000000000022301456575232400243000ustar00rootroot00000000000000#!/bin/bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. # set -xe usage() { echo "install_saw.sh download_dir install_dir" exit 1 } if [ "$#" -ne "2" ]; then usage fi DOWNLOAD_DIR=$1 INSTALL_DIR=$2 if [ -x "$INSTALL_DIR/bin/saw" ]; then echo "Saw already installed at $INSTALL_DIR/bin/saw"; exit 0; fi mkdir -p "$DOWNLOAD_DIR" cd "$DOWNLOAD_DIR" #download saw binaries curl --retry 3 https://s2n-public-test-dependencies.s3.us-west-2.amazonaws.com/saw-0.9.0.99-Linux-x86_64.tar.gz --output saw.tar.gz mkdir -p saw && tar -xzf saw.tar.gz --strip-components=1 -C saw mkdir -p "$INSTALL_DIR" && mv saw/* "$INSTALL_DIR" "$INSTALL_DIR"/bin/saw --version aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/install_shellcheck.sh000077500000000000000000000023101456575232400256120ustar00rootroot00000000000000#!/bin/bash # # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. # set -e source codebuild/bin/s2n_setup_env.sh usage() { echo "install_shellcheck.sh" exit 1 } install_shellcheck() { wget "https://github.com/koalaman/shellcheck/releases/download/v0.7.1/shellcheck-v0.7.1.linux.$ARCH.tar.xz" -O /tmp/shellcheck.tar.xz tar -Jxf /tmp/shellcheck.tar.xz -C /tmp mv /tmp/shellcheck-v*/shellcheck /usr/local/bin/ chmod 755 /usr/local/bin/shellcheck } if [ "$#" -ne "0" ]; then usage fi case "$OS_NAME" in "amazon linux"|"linux") which shellcheck || install_shellcheck ;; "darwin" ) brew install shellcheck || true ; ;; *) echo "Unknown platform" exit 255 ;; esac aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/install_sidetrail.sh000077500000000000000000000023351456575232400254740ustar00rootroot00000000000000#!/bin/bash # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. # set -e set -x usage() { echo "install_sidetrail.sh install_dir" exit 1 } if [ "$#" -ne "1" ]; then usage fi INSTALL_DIR=$1 cd "$INSTALL_DIR" #install smack git clone https://github.com/danielsn/smack.git -b sidewinder-debug cd smack/bin clang --version which clang ./build.sh # Disabling ShellCheck using https://github.com/koalaman/shellcheck/wiki/Directive # Turn of Warning in one line as https://github.com/koalaman/shellcheck/wiki/SC1090 # shellcheck disable=SC1090 source "$INSTALL_DIR"/smack.environment #install ctverif cd "$INSTALL_DIR" git clone --depth 1 https://github.com/imdea-software/verifying-constant-time.git -b test-automation aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/install_sidetrail_dependencies.sh000077500000000000000000000046721456575232400302100ustar00rootroot00000000000000#!/bin/bash # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. # set -e set -x #Figlet is required for ctverif printing sudo apt-get install -y figlet #Install boogieman gem install bam-bam-boogieman which bam #Install the apt-get dependencies from the smack build script: this way they will still be there #when we get things from cache DEPENDENCIES="git cmake python-yaml python-psutil unzip wget python3-yaml" DEPENDENCIES+=" mono-complete libz-dev libedit-dev" DEPENDENCIES+=" clang-3.9 llvm-3.9 llvm-3.9-dev" # Adding MONO repository sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 3FA7E0328081BFF6A14DA29AA6A19B38D3D831EF echo "deb http://download.mono-project.com/repo/ubuntu trusty main" | sudo tee /etc/apt/sources.list.d/mono-official.list sudo apt-get update -o Acquire::CompressionTypes::Order::=gz sudo apt-get install -y ${DEPENDENCIES} pip install pyyaml LLVM_SHORT_VERSION=3.9 sudo update-alternatives --install /usr/bin/clang clang /usr/bin/clang-${LLVM_SHORT_VERSION} 30 sudo update-alternatives --install /usr/bin/clang++ clang++ /usr/bin/clang++-${LLVM_SHORT_VERSION} 30 sudo update-alternatives --install /usr/bin/llvm-config llvm-config /usr/bin/llvm-config-${LLVM_SHORT_VERSION} 30 sudo update-alternatives --install /usr/bin/llvm-link llvm-link /usr/bin/llvm-link-${LLVM_SHORT_VERSION} 30 sudo update-alternatives --install /usr/bin/llvm-dis llvm-dis /usr/bin/llvm-dis-${LLVM_SHORT_VERSION} 30 which clang clang --version clang-3.9 --version mkdir -p ~/override_clang ln -s /usr/bin/clang ~/override_clang/clang ln -s /usr/bin/clang++ ~/override_clang/clang++ ln -s /usr/bin/llvm-config ~/override_clang/llvm-config ln -s /usr/bin/llvm-link ~/override_clang/llvm-link ln -s /usr/bin/llvm-dis ~/override_clang/llvm-dis sudo chmod +x ~/override_clang/* export PATH="$HOME/override_clang/:${PATH}" which clang clang --version clang-3.9 --version which python python --version pip install psutil aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/install_sslyze.sh000077500000000000000000000022761456575232400250510ustar00rootroot00000000000000#!/bin/bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. # set -e . codebuild/bin/s2n_setup_env.sh aarch64_install() { echo "sslyze has a dependency on nassl, which will not build on ARM." } case "$ARCH" in "aarch64") aarch64_install exit 1 ;; *) python3 -m pip install --user --upgrade pip setuptools # Version 3.0.0 introduces backwards incompatible changes in the JSON we parse. # TODO: unpin the sslyze version and update the json parsing sslyze output. python3 -m pip install --user "sslyze<3.0.0" sudo ln -s /root/.local/bin/sslyze /usr/bin/sslyze || true which sslyze sslyze --version ;; esac aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/install_ubuntu_dependencies.sh000077500000000000000000000060541456575232400275460ustar00rootroot00000000000000#!/bin/bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. # # Shim code to get local docker/ec2 instances bootstraped like a CodeBuild instance. # Not actually used by CodeBuild. source codebuild/bin/s2n_setup_env.sh set -e github_apt(){ curl -fsSL https://cli.github.com/packages/githubcli-archive-keyring.gpg | gpg --dearmor -o /usr/share/keyrings/githubcli-archive-keyring.gpg echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" | tee /etc/apt/sources.list.d/github-cli.list apt update -y apt install -y gh } get_rust() { apt install -y clang-10 sudo curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y source $HOME/.cargo/env rustup default nightly } base_packages() { echo "Installing repositories and base packages" apt update -y apt install -y software-properties-common add-apt-repository ppa:ubuntu-toolchain-r/test -y add-apt-repository ppa:longsleep/golang-backports -y apt-get update -o Acquire::CompressionTypes::Order::=gz DEPENDENCIES="unzip make indent iproute2 kwstyle libssl-dev net-tools tcpdump valgrind lcov m4 nettle-dev nettle-bin pkg-config psmisc gcc g++ zlibc zlib1g-dev python3-pip python3-testresources llvm curl shellcheck git tox cmake libtool ninja-build golang-go quilt jq apache2" if [[ -n "${GCC_VERSION:-}" ]] && [[ "${GCC_VERSION:-}" != "NONE" ]]; then DEPENDENCIES+=" gcc-$GCC_VERSION g++-$GCC_VERSION"; fi if ! command -v python3.9 &> /dev/null; then add-apt-repository ppa:deadsnakes/ppa -y DEPENDENCIES+=" python3.9 python3.9-distutils"; fi apt-get -y install --no-install-recommends ${DEPENDENCIES} } base_packages github_apt get_rust # If prlimit is not on our current PATH, download and compile prlimit manually. s2n needs prlimit to memlock pages if ! type prlimit > /dev/null && [[ ! -d "$PRLIMIT_INSTALL_DIR" ]]; then mkdir -p "$PRLIMIT_INSTALL_DIR"; codebuild/bin/install_prlimit.sh "$(mktemp -d)" "$PRLIMIT_INSTALL_DIR"; fi if [[ "$TESTS" == "ctverif" || "$TESTS" == "ALL" ]] && [[ ! -d "$CTVERIF_INSTALL_DIR" ]]; then mkdir -p "$CTVERIF_INSTALL_DIR" && codebuild/bin/install_ctverif.sh "$CTVERIF_INSTALL_DIR" > /dev/null ; fi if [[ "$TESTS" == "sidetrail" || "$TESTS" == "ALL" ]] ; then codebuild/bin/install_sidetrail_dependencies.sh ; fi if [[ "$TESTS" == "sidetrail" || "$TESTS" == "ALL" ]] && [[ ! -d "$SIDETRAIL_INSTALL_DIR" ]]; then mkdir -p "$SIDETRAIL_INSTALL_DIR" && codebuild/bin/install_sidetrail.sh "$SIDETRAIL_INSTALL_DIR" > /dev/null ; fi aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/install_z3_yices.sh000077500000000000000000000025231456575232400252430ustar00rootroot00000000000000#!/bin/bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. # set -e usage() { echo "install_z3_yices.sh download_dir install_dir" exit 1 } if [ "$#" -ne "2" ]; then usage fi DOWNLOAD_DIR=$1 INSTALL_DIR=$2 mkdir -p "$DOWNLOAD_DIR" cd "$DOWNLOAD_DIR" #download z3 and yices curl --retry 3 https://s3-us-west-2.amazonaws.com/s2n-public-test-dependencies/yices-2.6.1-x86_64-pc-linux-gnu-static-gmp.tar.gz --output yices.tar.gz tar -xf yices.tar.gz curl --retry 3 https://s3-us-west-2.amazonaws.com/s2n-public-test-dependencies/z3-4.8.8-x64-ubuntu-16.04.zip --output z3.zip unzip z3.zip mkdir -p "$INSTALL_DIR"/bin mv z3-4.8.8-x64-ubuntu-16.04/bin/* "$INSTALL_DIR"/bin mv yices-2.6.1/bin/* "$INSTALL_DIR"/bin chmod +x "$INSTALL_DIR"/bin/* "$INSTALL_DIR"/bin/yices-smt2 --version "$INSTALL_DIR"/bin/yices --version "$INSTALL_DIR"/bin/z3 --version aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/jobs.sh000066400000000000000000000013711456575232400227170ustar00rootroot00000000000000#!/usr/bin/env bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. # # Find if the environment has more than 8 cores JOBS=8 if [[ -x "$(command -v nproc)" ]]; then UNITS=$(nproc); if [[ $UNITS -gt $JOBS ]]; then JOBS=$UNITS; fi fi aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/run_cppcheck.sh000077500000000000000000000023041456575232400244260ustar00rootroot00000000000000#!/bin/bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. # set -e usage() { echo "run_cppcheck.sh install_dir" exit 1 } if [ "$#" -ne "1" ]; then usage fi INSTALL_DIR=$1 CPPCHECK_EXECUTABLE=${INSTALL_DIR}/cppcheck FAILED=0 $CPPCHECK_EXECUTABLE --version $CPPCHECK_EXECUTABLE --std=c99 --error-exitcode=-1 --quiet --force -j 8 --enable=all --template='[{file}:{line}]: ({severity}:{id}) {message}' --inline-suppr --suppressions-list=codebuild/bin/cppcheck_suppressions.txt -I . -I ./tests api bin crypto error stuffer ./tests/unit tls utils || FAILED=1 if [ $FAILED == 1 ]; then printf "\\033[31;1mFAILED cppcheck\\033[0m\\n" exit -1 else printf "\\033[32;1mPASSED cppcheck\\033[0m\\n" fi aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/run_ctverif.sh000077500000000000000000000032531456575232400243140ustar00rootroot00000000000000#!/bin/bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. # set -e usage() { echo "run_ctverif.sh install_dir" exit 1 } if [ "$#" -ne "1" ]; then usage fi INSTALL_DIR=$1 export CTVERIF_DIR="${1}/verifying-constant-time" SMACK_DIR="${1}/smack" #Put the dependencies are on the path # Disabling ShellCheck using https://github.com/koalaman/shellcheck/wiki/Directive # Turn of Warning in one line as https://github.com/koalaman/shellcheck/wiki/SC1090 # shellcheck disable=SC1090 source "${INSTALL_DIR}/smack.environment" export PATH="${SMACK_DIR}/bin:${SMACK_DIR}/build:${PATH}" #Test that they are really there which smack || echo "can't find smack" which boogie || echo "can't find z3" which llvm2bpl || echo "can't find llvm2bpl" #copy the current version of the file to the test cd "${BASE_S2N_DIR}/tests/ctverif" cp "${BASE_S2N_DIR}/utils/s2n_safety.c" . make clean #run the test. We expect both to pass, and none to fail FAILED=0 EXPECTED_PASS=2 EXPECTED_FAIL=0 make 2>&1 | ./count_success.pl $EXPECTED_PASS $EXPECTED_FAIL || FAILED=1 if [ $FAILED == 1 ]; then printf "\\033[31;1mFAILED ctverif\\033[0m\\n" exit -1 else printf "\\033[32;1mPASSED ctverif\\033[0m\\n" fi aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/run_kwstyle.sh000077500000000000000000000020221456575232400243450ustar00rootroot00000000000000#! /bin/bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. # set -eu which KWStyle S2N_FILES=$(find "$PWD" -type f -name "s2n_*.[ch]" | grep -v "test") FAILED=0 for file in $S2N_FILES; do set +e ERROR_LIST=$(KWStyle -gcc -v -xml codebuild/bin/KWStyle.xml "$file") set -e if [ "$ERROR_LIST" != "" ] ; then echo "$ERROR_LIST" FAILED=1 fi done if [ $FAILED == 1 ]; then printf "\\033[31;1mFAILED kwstyle\\033[0m\\n" exit -1 else printf "\\033[32;1mPASSED kwstyle\\033[0m\\n" fi aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/run_sidetrail.sh000077500000000000000000000035541456575232400246360ustar00rootroot00000000000000#!/bin/bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. # set -ex usage() { echo "run_sidetrail.sh install_dir s2n_dir" exit 1 } runSingleTest() { cd "${BASE_S2N_DIR}/tests/sidetrail/working/${1}" ./copy_as_needed.sh make clean make 2>&1 | tee out.txt ../../count_success.pl 1 0 out.txt } runNegativeTest() { cd "${BASE_S2N_DIR}/tests/sidetrail/working/${1}" ./copy_as_needed.sh make clean make 2>&1 | tee out.txt ../../count_success.pl 0 1 out.txt } if [[ "$#" -ne "2" ]]; then usage fi INSTALL_DIR=$1 SMACK_DIR="${1}/smack" BASE_S2N_DIR=$2 #Put the dependencies on the path # Disabling ShellCheck using https://github.com/koalaman/shellcheck/wiki/Directive # Turn of Warning in one line as https://github.com/koalaman/shellcheck/wiki/SC1090 # shellcheck disable=SC1090 source "${INSTALL_DIR}/smack.environment" export PATH="${SMACK_DIR}/bin:${SMACK_DIR}/build:${PATH}" #Test that they are really there which smack || echo "can't find smack" which boogie || echo "can't find z3" which llvm2bpl || echo "can't find llvm2bpl" which clang clang --version echo $BOOGIE echo $CORRAL runNegativeTest "s2n-record-read-cbc-negative-test" runSingleTest "s2n-cbc" # Takes 6m 30s runSingleTest "s2n-record-read-aead" runSingleTest "s2n-record-read-cbc" runSingleTest "s2n-record-read-composite" runSingleTest "s2n-record-read-stream" aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/s2n_apache2.sh000066400000000000000000000027261456575232400240540ustar00rootroot00000000000000#!/bin/bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. # set -eu apache2_config() { cert_dir="$1" command="$2" echo "apache2: ${command}" APACHE_SERVER_ROOT="$APACHE2_INSTALL_DIR" \ APACHE_RUN_USER=www-data \ APACHE_RUN_GROUP=www-data \ APACHE_PID_FILE="${APACHE2_INSTALL_DIR}/run/apache2.pid" \ APACHE_RUN_DIR="${APACHE2_INSTALL_DIR}/run" \ APACHE_LOCK_DIR="${APACHE2_INSTALL_DIR}/lock" \ APACHE_LOG_DIR="${APACHE2_INSTALL_DIR}/log" \ APACHE_CERT_DIR="${cert_dir}" \ apache2 -k "${command}" -f "${APACHE2_INSTALL_DIR}/apache2.conf" } apache2_stop() { cert_dir="$1" apache2_config "${cert_dir}" stop } apache2_start() { if [[ ! -f "$APACHE2_INSTALL_DIR/apache2.conf" ]]; then echo "apache2 not installed" exit 1 fi cert_dir="$1" apache2_config "${cert_dir}" start # Stop the apache server after tests finish, even if an error occurs trap 'apache2_stop "${cert_dir}"' ERR EXIT } aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/s2n_codebuild.sh000077500000000000000000000136271456575232400245100ustar00rootroot00000000000000#!/bin/bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. # set -e source codebuild/bin/s2n_setup_env.sh # Use prlimit to set the memlock limit to unlimited for linux. OSX is unlimited by default # Codebuild Containers aren't allowing prlimit changes (and aren't being caught with the usual cgroup check) if [[ "$OS_NAME" == "linux" && -n "$CODEBUILD_BUILD_ARN" ]]; then PRLIMIT_LOCATION=`which prlimit` sudo -E ${PRLIMIT_LOCATION} --pid "$$" --memlock=unlimited:unlimited; fi # Set the version of GCC as Default if it's required if [[ -n "$GCC_VERSION" ]] && [[ "$GCC_VERSION" != "NONE" ]]; then alias gcc=$(which gcc-$GCC_VERSION); fi # Find if the environment has more than 8 cores JOBS=8 if [[ -x "$(command -v nproc)" ]]; then UNITS=$(nproc); if [[ $UNITS -gt $JOBS ]]; then JOBS=$UNITS; fi fi make clean; echo "Using $JOBS jobs for make.."; echo "running with libcrypto: ${S2N_LIBCRYPTO}, gcc_version: ${GCC_VERSION}" if [[ "$OS_NAME" == "linux" && "$TESTS" == "valgrind" ]]; then # For linux make a build with debug symbols and run valgrind # We have to output something every 9 minutes, as some test may run longer than 10 minutes # and will not produce any output while sleep 9m; do echo "=====[ $SECONDS seconds still running ]====="; done & if [[ "$S2N_LIBCRYPTO" == "openssl-1.1.1" || "$S2N_LIBCRYPTO" == "awslc" ]]; then # https://github.com/aws/s2n-tls/issues/3758 # Run valgrind in pedantic mode (--errors-for-leak-kinds=all) echo "running task pedantic_valgrind" S2N_DEBUG=true make -j $JOBS pedantic_valgrind else S2N_DEBUG=true make -j $JOBS valgrind fi kill %1 fi CMAKE_PQ_OPTION="S2N_NO_PQ=False" if [[ -n "$S2N_NO_PQ" ]]; then CMAKE_PQ_OPTION="S2N_NO_PQ=True" fi test_linked_libcrypto() { s2n_executable="$1" so_path="${LIBCRYPTO_ROOT}/lib/libcrypto.so" echo "Testing for linked libcrypto: ${so_path}" echo "ldd:" ldd "${s2n_executable}" ldd "${s2n_executable}" | grep "${so_path}" || \ { echo "Linked libcrypto is incorrect."; exit 1; } echo "Test succeeded!" } setup_apache_server() { # Start the apache server if the list of tests isn't defined, meaning all tests # are to be run, or if the renegotiate test is included in the list of tests. if [[ -z $TOX_TEST_NAME ]] || [[ "${TOX_TEST_NAME}" == *"test_renegotiate_apache"* ]]; then source codebuild/bin/s2n_apache2.sh APACHE_CERT_DIR="$(pwd)/tests/pems" apache2_start "${APACHE_CERT_DIR}" fi } run_integration_v2_tests() { setup_apache_server "$CB_BIN_DIR/install_s2n_head.sh" "$(mktemp -d)" cmake . -Bbuild \ -DCMAKE_PREFIX_PATH=$LIBCRYPTO_ROOT \ -D${CMAKE_PQ_OPTION} \ -DS2N_BLOCK_NONPORTABLE_OPTIMIZATIONS=True \ -DBUILD_SHARED_LIBS=on \ -DS2N_INTEG_TESTS=on \ -DPython3_EXECUTABLE=$(which python3) cmake --build ./build --clean-first -- -j $(nproc) test_linked_libcrypto ./build/bin/s2nc test_linked_libcrypto ./build/bin/s2nd cp -f ./build/bin/s2nc "$BASE_S2N_DIR"/bin/s2nc cp -f ./build/bin/s2nd "$BASE_S2N_DIR"/bin/s2nd cd ./build/ for test_name in $TOX_TEST_NAME; do test="${test_name//test_/}" echo "Running... ctest --no-tests=error --output-on-failure --verbose -R ^integrationv2_${test}$" ctest --no-tests=error --output-on-failure --verbose -R ^integrationv2_${test}$ done } run_unit_tests() { cmake . -Bbuild \ -DCMAKE_PREFIX_PATH=$LIBCRYPTO_ROOT \ -D${CMAKE_PQ_OPTION} \ -DS2N_BLOCK_NONPORTABLE_OPTIMIZATIONS=True \ -DBUILD_SHARED_LIBS=on cmake --build ./build -- -j $(nproc) test_linked_libcrypto ./build/bin/s2nc cmake --build build/ --target test -- ARGS="-L unit --output-on-failure -j $(nproc)" } # Run Multiple tests on one flag. if [[ "$TESTS" == "ALL" || "$TESTS" == "sawHMACPlus" ]] && [[ "$OS_NAME" == "linux" ]]; then make -C tests/saw tmp/verify_HMAC.log tmp/verify_drbg.log failure-tests; fi # Run Individual tests if [[ "$TESTS" == "ALL" || "$TESTS" == "unit" ]]; then run_unit_tests; fi if [[ "$TESTS" == "ALL" || "$TESTS" == "interning" ]]; then ./codebuild/bin/test_libcrypto_interning.sh; fi if [[ "$TESTS" == "ALL" || "$TESTS" == "exec_leak" ]]; then ./codebuild/bin/test_exec_leak.sh; fi if [[ "$TESTS" == "ALL" || "$TESTS" == "asan" ]]; then make clean; S2N_ADDRESS_SANITIZER=1 make -j $JOBS ; fi if [[ "$TESTS" == "ALL" || "$TESTS" == "integrationv2" ]]; then run_integration_v2_tests; fi if [[ "$TESTS" == "ALL" || "$TESTS" == "crt" ]]; then ./codebuild/bin/build_aws_crt_cpp.sh $(mktemp -d) $(mktemp -d); fi if [[ "$TESTS" == "ALL" || "$TESTS" == "sharedandstatic" ]]; then ./codebuild/bin/test_install_shared_and_static.sh $(mktemp -d); fi if [[ "$TESTS" == "ALL" || "$TESTS" == "dynamicload" ]]; then ./codebuild/bin/test_dynamic_load.sh $(mktemp -d); fi if [[ "$TESTS" == "ALL" || "$TESTS" == "fuzz" ]]; then (make clean && make fuzz) ; fi if [[ "$TESTS" == "ALL" || "$TESTS" == "benchmark" ]]; then (make clean && make benchmark) ; fi if [[ "$TESTS" == "sawHMAC" ]] && [[ "$OS_NAME" == "linux" ]]; then make -C tests/saw/ tmp/verify_HMAC.log ; fi if [[ "$TESTS" == "sawDRBG" ]]; then make -C tests/saw tmp/verify_drbg.log ; fi if [[ "$TESTS" == "ALL" || "$TESTS" == "tls" ]]; then make -C tests/saw tmp/verify_handshake.log ; fi if [[ "$TESTS" == "sawHMACFailure" ]]; then make -C tests/saw failure-tests ; fi aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/s2n_codebuild_al2.sh000077500000000000000000000027431456575232400252430ustar00rootroot00000000000000#!/bin/bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. # set -e source codebuild/bin/s2n_setup_env.sh # Use prlimit to set the memlock limit to unlimited for linux. OSX is unlimited by default # Codebuild Containers aren't allowing prlimit changes (and aren't being caught with the usual cgroup check) if [[ "$OS_NAME" == "linux" && -n "$CODEBUILD_BUILD_ARN" ]]; then PRLIMIT_LOCATION=`which prlimit` sudo -E ${PRLIMIT_LOCATION} --pid "$$" --memlock=unlimited:unlimited; fi CMAKE_PQ_OPTION="S2N_NO_PQ=False" if [[ -n "$S2N_NO_PQ" ]]; then CMAKE_PQ_OPTION="S2N_NO_PQ=True" fi # Linker flags are a workaround for openssl case "$TESTS" in "unit") cmake . -Bbuild -DCMAKE_EXE_LINKER_FLAGS="-lcrypto -lz" -DCMAKE_EXPORT_COMPILE_COMMANDS=ON \ -D${CMAKE_PQ_OPTION} -DS2N_BLOCK_NONPORTABLE_OPTIMIZATIONS=True cmake --build ./build -j $(nproc) cmake --build ./build --target test -- ARGS="-L unit --output-on-failure" ;; *) echo "Unknown test"; exit 1;; esac aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/s2n_dynamic_load_test.c000066400000000000000000000040301456575232400260310ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include #include #include #include #include static void *s2n_load_dynamic_lib(void *ctx) { const char *s2n_so_path = ctx; void *s2n_so = dlopen(s2n_so_path, RTLD_NOW); if (!s2n_so) { exit(1); } int (*s2n_init_dl)(void) = NULL; *(void **) (&s2n_init_dl) = dlsym(s2n_so, "s2n_init"); if (dlerror()) { exit(1); } int (*s2n_cleanup_dl)(void) = NULL; *(void **) (&s2n_cleanup_dl) = dlsym(s2n_so, "s2n_cleanup"); if (dlerror()) { exit(1); } if ((*s2n_init_dl)()) { exit(1); } if ((*s2n_cleanup_dl)()) { exit(1); } if (dlclose(s2n_so)) { exit(1); } return NULL; } int main(int argc, char *argv[]) { if (argc != 2) { printf("Usage: s2n_dynamic_load_test \n"); exit(1); } /* s2n-tls library can be dynamically loaded and cleaned up safely * * We can't use any s2n test macros because this test doesn't get linked to * s2n during compile-time. This test is in a loop to make sure that we are * cleaning up pthread keys properly. */ for (size_t i = 0; i <= PTHREAD_KEYS_MAX + 1; i++) { pthread_t thread_id = { 0 }; if (pthread_create(&thread_id, NULL, &s2n_load_dynamic_lib, argv[1])) { exit(1); } if (pthread_join(thread_id, NULL)) { exit(1); } } return 0; } aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/s2n_install_test_dependencies.sh000077500000000000000000000024031456575232400277570ustar00rootroot00000000000000#!/bin/bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. # set -ex source codebuild/bin/s2n_setup_env.sh # Install missing test dependencies. If the install directory already exists, cached artifacts will be used # for that dependency. if [[ ! -d test-deps ]]; then mkdir test-deps ; fi #Install & Run shell check before installing dependencies echo "Installing ShellCheck..." codebuild/bin/install_shellcheck.sh echo "Running ShellCheck..." find ./codebuild -type f -name '*.sh' -exec shellcheck -Cnever -s bash {} \; if [[ "$OS_NAME" == "linux" ]]; then codebuild/bin/install_ubuntu_dependencies.sh; fi if [[ "$OS_NAME" == "darwin" ]]; then codebuild/bin/install_osx_dependencies.sh; fi codebuild/bin/install_default_dependencies.sh echo "Success" aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/s2n_override_paths.sh000077500000000000000000000021251456575232400255630ustar00rootroot00000000000000#!/bin/bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. # set -ex # Add all of our test dependencies to the PATH. Use Openssl 1.1.1 so the latest openssl is used for s_client # integration tests. export PATH=$PYTHON_INSTALL_DIR/bin:$OPENSSL_1_1_1_INSTALL_DIR/bin:$GNUTLS_INSTALL_DIR/bin:$SAW_INSTALL_DIR/bin:$Z3_INSTALL_DIR/bin:$SCAN_BUILD_INSTALL_DIR/bin:$PRLIMIT_INSTALL_DIR/bin:$LATEST_CLANG_INSTALL_DIR/bin:`pwd`/codebuild/bin:~/.local/bin:$PATH export LD_LIBRARY_PATH=$OPENSSL_1_1_1_INSTALL_DIR/lib:$LD_LIBRARY_PATH; export DYLD_LIBRARY_PATH=$OPENSSL_1_1_1_INSTALL_DIR/lib:$LD_LIBRARY_PATH;aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/s2n_set_build_preset.sh000077500000000000000000000044011456575232400261000ustar00rootroot00000000000000#!/bin/bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. case "${S2N_BUILD_PRESET-default}" in "awslc_gcc4-8") : "${S2N_LIBCRYPTO:=awslc}" : "${GCC_VERSION:=4.8}" ;; "awslc_gcc9") : "${S2N_LIBCRYPTO:=awslc}" : "${GCC_VERSION:=9}" ;; "awslc-fips_gcc4-8") : "${S2N_LIBCRYPTO:=awslc-fips}" : "${GCC_VERSION:=4.8}" ;; "awslc-fips_gcc9") : "${S2N_LIBCRYPTO:=awslc-fips}" : "${GCC_VERSION:=9}" ;; "awslc-fips-2022_gcc6") : "${S2N_LIBCRYPTO:=awslc-fips-2022}" : "${GCC_VERSION:=6}" ;; "libressl_gcc6") : "${S2N_LIBCRYPTO:=libressl}" : "${GCC_VERSION:=6}" ;; "libressl_gcc9") : "${S2N_LIBCRYPTO:=libressl}" : "${GCC_VERSION:=9}" ;; "boringssl") : "${S2N_LIBCRYPTO:=boringssl}" : "${GCC_VERSION:=9}" ;; "openssl-1.0.2") : "${S2N_LIBCRYPTO:=openssl-1.0.2}" : "${GCC_VERSION:=6}" ;; "openssl-1.0.2-fips") : "${S2N_LIBCRYPTO:=openssl-1.0.2-fips}" : "${GCC_VERSION:=6}" ;; "openssl-1.1.1_gcc4-8") : "${S2N_LIBCRYPTO:=openssl-1.1.1}" : "${GCC_VERSION:=4.8}" ;; "openssl-1.1.1_gcc6") : "${S2N_LIBCRYPTO:=openssl-1.1.1}" : "${GCC_VERSION:=6}" : "${S2N_CORKED_IO:=true}" ;; "openssl-1.1.1_gcc6_softcrypto") : "${S2N_LIBCRYPTO:=openssl-1.1.1}" : "${GCC_VERSION:=6}" : "${OPENSSL_ia32cap:=~0x200000200000000}" ;; "openssl-1.1.1_gcc9") : "${S2N_LIBCRYPTO:=openssl-1.1.1}" : "${GCC_VERSION:=9}" ;; "openssl-3.0") : "${S2N_LIBCRYPTO:=openssl-3.0}" : "${GCC_VERSION:=9}" ;; esac aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/s2n_setup_env.sh000077500000000000000000000176421456575232400245670ustar00rootroot00000000000000#!/bin/bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. # # TODO: Flag user if they didn't source this, values won't stick. source codebuild/bin/s2n_set_build_preset.sh # Setup Default Build Config : "${S2N_LIBCRYPTO:=openssl-1.1.1}" : "${BUILD_S2N:=false}" : "${GCC_VERSION:=NONE}" : "${LATEST_CLANG:=false}" : "${TESTS:=unit}" : "${S2N_COVERAGE:=false}" : "${LD_LIBRARY_PATH:=NONE}" # Setup the cache directory paths. # Set Env Variables with defaults if they aren't already set : "${BASE_S2N_DIR:=$(pwd)}" : "${TEST_DEPS_DIR:=$BASE_S2N_DIR/test-deps}" : "${PYTHON_INSTALL_DIR:=$TEST_DEPS_DIR/python}" : "${GNUTLS_INSTALL_DIR:=$TEST_DEPS_DIR/gnutls}" : "${GNUTLS37_INSTALL_DIR:=$TEST_DEPS_DIR/gnutls37}" : "${PRLIMIT_INSTALL_DIR:=$TEST_DEPS_DIR/prlimit}" : "${SAW_INSTALL_DIR:=$TEST_DEPS_DIR/saw}" : "${Z3_INSTALL_DIR:=$TEST_DEPS_DIR/z3}" : "${LIBFUZZER_INSTALL_DIR:=$TEST_DEPS_DIR/libfuzzer}" : "${LATEST_CLANG_INSTALL_DIR:=$TEST_DEPS_DIR/clang}" : "${SCAN_BUILD_INSTALL_DIR:=$TEST_DEPS_DIR/scan-build}" : "${OPENSSL_0_9_8_INSTALL_DIR:=$TEST_DEPS_DIR/openssl-0.9.8}" : "${OPENSSL_1_1_1_INSTALL_DIR:=$TEST_DEPS_DIR/openssl-1.1.1}" : "${OPENSSL_3_0_INSTALL_DIR:=$TEST_DEPS_DIR/openssl-3.0}" : "${OPENSSL_1_0_2_INSTALL_DIR:=$TEST_DEPS_DIR/openssl-1.0.2}" : "${OQS_OPENSSL_1_1_1_INSTALL_DIR:=$TEST_DEPS_DIR/oqs_openssl-1.1.1}" : "${OPENSSL_1_0_2_FIPS_INSTALL_DIR:=$TEST_DEPS_DIR/openssl-1.0.2-fips}" : "${BORINGSSL_INSTALL_DIR:=$TEST_DEPS_DIR/boringssl}" : "${AWSLC_INSTALL_DIR:=$TEST_DEPS_DIR/awslc}" : "${AWSLC_FIPS_INSTALL_DIR:=$TEST_DEPS_DIR/awslc-fips}" : "${AWSLC_FIPS_2022_INSTALL_DIR:=$TEST_DEPS_DIR/awslc-fips-2022}" : "${LIBRESSL_INSTALL_DIR:=$TEST_DEPS_DIR/libressl}" : "${CPPCHECK_INSTALL_DIR:=$TEST_DEPS_DIR/cppcheck}" : "${CTVERIF_INSTALL_DIR:=$TEST_DEPS_DIR/ctverif}" : "${SIDETRAIL_INSTALL_DIR:=$TEST_DEPS_DIR/sidetrail}" : "${GB_INSTALL_DIR:=$TEST_DEPS_DIR/gb}" : "${APACHE2_INSTALL_DIR:=$TEST_DEPS_DIR/apache2}" : "${FUZZ_TIMEOUT_SEC:=10}" # Set some environment vars for OS, Distro and architecture. # Standardized as part of systemd http://0pointer.de/blog/projects/os-release # Samples: # OS_NAME = "linux" # DISTRO="ubuntu" # VERSION_ID = "18.04" # VERSION_CODENAME = "bionic" if [[ -f "/etc/os-release" ]]; then # AL2 doesn't provide a codename. . /etc/os-release export DISTRO=$(echo "$NAME"|tr "[:upper:]" "[:lower:]") export VERSION_ID=${VERSION_ID:-"unknown"} export VERSION_CODENAME=${VERSION_CODENAME:-"unknown"} elif [[ -x "/usr/bin/sw_vers" ]]; then export DISTRO="apple" export VERSION_ID=$(sw_vers -productVersion|sed 's/:[[:space:]]*/=/g') export VERSION_CODENAME="unknown" # not queriable via CLI else export DISTRO="unknown" export VERSION_ID="unknown" export VERSION_CODENAME="unknown" fi export OS_NAME=$(uname -s|tr "[:upper:]" "[:lower:]") export ARCH=$(uname -m) # Export all Env Variables export S2N_LIBCRYPTO export BUILD_S2N export GCC_VERSION export LATEST_CLANG export TESTS export BASE_S2N_DIR export TEST_DEPS_DIR export PYTHON_INSTALL_DIR export GNUTLS_INSTALL_DIR export GNUTLS37_INSTALL_DIR export PRLIMIT_INSTALL_DIR export SAW_INSTALL_DIR export Z3_INSTALL_DIR export LIBFUZZER_INSTALL_DIR export LATEST_CLANG_INSTALL_DIR export SCAN_BUILD_INSTALL_DIR export OPENSSL_0_9_8_INSTALL_DIR export OPENSSL_1_1_1_INSTALL_DIR export OPENSSL_3_0_INSTALL_DIR export OPENSSL_1_0_2_INSTALL_DIR export OPENSSL_1_0_2_FIPS_INSTALL_DIR export OQS_OPENSSL_1_1_1_INSTALL_DIR export BORINGSSL_INSTALL_DIR export AWSLC_INSTALL_DIR export AWSLC_FIPS_INSTALL_DIR export AWSLC_FIPS_2022_INSTALL_DIR export LIBRESSL_INSTALL_DIR export CPPCHECK_INSTALL_DIR export CTVERIF_INSTALL_DIR export SIDETRAIL_INSTALL_DIR export OPENSSL_1_1_X_MASTER_INSTALL_DIR export FUZZ_TIMEOUT_SEC export GB_INSTALL_DIR export OS_NAME export S2N_CORKED_IO export S2N_NO_PQ # For use by criterion/ci run reports export AWS_S3_URL="s3://s2n-tls-logs/release/" # S2N_COVERAGE should not be used with fuzz tests, use FUZZ_COVERAGE instead if [[ "$S2N_COVERAGE" == "true" && "$TESTS" == "fuzz" ]]; then export S2N_COVERAGE="false" export FUZZ_COVERAGE="true" fi # Select the libcrypto to build s2n against. If this is unset, default to the latest stable version(Openssl 1.1.1) if [[ -z $S2N_LIBCRYPTO ]]; then export LIBCRYPTO_ROOT=$OPENSSL_1_1_1_INSTALL_DIR ; fi if [[ "$S2N_LIBCRYPTO" == "openssl-1.1.1" ]]; then export LIBCRYPTO_ROOT=$OPENSSL_1_1_1_INSTALL_DIR ; fi if [[ "$S2N_LIBCRYPTO" == "openssl-3.0" ]]; then export LIBCRYPTO_ROOT=$OPENSSL_3_0_INSTALL_DIR ; fi if [[ "$S2N_LIBCRYPTO" == "openssl-1.0.2" ]]; then export LIBCRYPTO_ROOT=$OPENSSL_1_0_2_INSTALL_DIR ; fi if [[ "$S2N_LIBCRYPTO" == "openssl-1.0.2-fips" ]]; then export LIBCRYPTO_ROOT=$OPENSSL_1_0_2_FIPS_INSTALL_DIR ; export S2N_TEST_IN_FIPS_MODE=1 ; fi if [[ "$S2N_LIBCRYPTO" == "boringssl" ]]; then export LIBCRYPTO_ROOT=$BORINGSSL_INSTALL_DIR ; fi if [[ "$S2N_LIBCRYPTO" == "awslc" ]]; then export LIBCRYPTO_ROOT=$AWSLC_INSTALL_DIR ; fi if [[ "$S2N_LIBCRYPTO" == "awslc-fips" ]]; then export LIBCRYPTO_ROOT=$AWSLC_FIPS_INSTALL_DIR ; export S2N_TEST_IN_FIPS_MODE=1 ; fi if [[ "$S2N_LIBCRYPTO" == "awslc-fips-2022" ]]; then export LIBCRYPTO_ROOT=$AWSLC_FIPS_2022_INSTALL_DIR export S2N_TEST_IN_FIPS_MODE=1 fi if [[ "$S2N_LIBCRYPTO" == "libressl" ]]; then export LIBCRYPTO_ROOT=$LIBRESSL_INSTALL_DIR ; fi if [[ -n "${LIBCRYPTO_ROOT:-}" ]]; then # Create a link to the selected libcrypto. This shouldn't be needed when LIBCRYPTO_ROOT is set, but some tests # have the "libcrypto-root" directory path hardcoded. rm -rf libcrypto-root && ln -s "$LIBCRYPTO_ROOT" libcrypto-root fi # Set the libfuzzer to use for fuzz tests export LIBFUZZER_ROOT=$LIBFUZZER_INSTALL_DIR #check if the path contains test dep X, if not and X exists, add to path path_overrides="$PYTHON_INSTALL_DIR/bin $OPENSSL_1_1_1_INSTALL_DIR/bin $GNUTLS_INSTALL_DIR/bin $SAW_INSTALL_DIR/bin $Z3_INSTALL_DIR/bin $SCAN_BUILD_INSTALL_DIR/bin $PRLIMIT_INSTALL_DIR/bin $LATEST_CLANG_INSTALL_DIR/bin `pwd`/codebuild/bin ~/.local/bin" testdeps_path(){ echo -ne "checking $1 is in the path..." if [[ ! "$PATH" =~ "$1" ]]; then if [[ -d "$1" ]]; then export PATH="$1:$PATH" echo -e "added" else echo -e "doesn't exist" fi else echo -e "already in path" fi } for i in $path_overrides; do testdeps_path "$i" ;done # Just recording in the output for debugging. if [ -f "/etc/lsb-release" ]; then cat /etc/lsb-release fi # Translate our custom variables into full paths to the compiler. set_cc(){ if [ -z ${GCC_VERSION:-} -o ${GCC_VERSION} = "NONE" ]; then echo "No GCC_VERSION set" if [ ${LATEST_CLANG:-} = "true" ]; then echo "LATEST_CLANG is ${LATEST_CLANG}" if [ -d ${LATEST_CLANG_INSTALL_DIR:-} ]; then export CC=${LATEST_CLANG_INSTALL_DIR}/bin/clang export CXX=${LATEST_CLANG_INSTALL_DIR}/bin/clang++ echo "CC set to ${CC}" echo "CXX set to ${CXX}" else echo "Could not find a clang installation $LATEST_CLANG_INSTALL_DIR" fi fi else echo "GCC_VERSION is ${GCC_VERSION}" export CC=$(which gcc-${GCC_VERSION}) export CXX=$(which g++-${GCC_VERSION}) echo "CC set to ${CC}" echo "CXX set to ${CXX}" fi } set_cc echo "UID=$UID" echo "OS_NAME=$OS_NAME" echo "S2N_LIBCRYPTO=$S2N_LIBCRYPTO" echo "LIBCRYPTO_ROOT=${LIBCRYPTO_ROOT:-}" echo "BUILD_S2N=$BUILD_S2N" echo "GCC_VERSION=$GCC_VERSION" echo "LATEST_CLANG=$LATEST_CLANG" echo "TESTS=$TESTS" echo "PATH=$PATH" echo "LD_LIBRARY_PATH=$LD_LIBRARY_PATH" aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/test_dynamic_load.sh000077500000000000000000000037441456575232400254550ustar00rootroot00000000000000#!/usr/bin/env bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. # # This script compiles s2n-tls as a shared library and compiles a test # without linking to the library. This enables us to test behavior when # s2n-tls is dynamically loaded. WORK_DIR=$1 if [ ! -z "$NIX_STORE" ]; then OPENSSL=$(which openssl) LIBCRYPTO_ROOT=$(nix-store --query $OPENSSL) else source codebuild/bin/s2n_setup_env.sh fi S2N_BUILD_ARGS=(-H. -DCMAKE_PREFIX_PATH=$LIBCRYPTO_ROOT -DBUILD_TESTING=OFF) # create installation dir with libs2n.so if [ ! -d $WORK_DIR/s2n-install-shared ]; then (set -x; cmake -B$WORK_DIR/s2n-build-shared -DCMAKE_INSTALL_PREFIX=$WORK_DIR/s2n-install-shared -DBUILD_SHARED_LIBS=ON ${S2N_BUILD_ARGS[@]}) (set -x; cmake --build $WORK_DIR/s2n-build-shared --target install -- -j $(nproc)) fi # Compile the test file $CC -Wl,-rpath $LIBCRYPTO_ROOT -o s2n_dynamic_load_test codebuild/bin/s2n_dynamic_load_test.c -ldl -lpthread LDD_OUTPUT=$(ldd s2n_dynamic_load_test) # Confirm executable doesn't have libs2n.so loaded if echo "$LDD_OUTPUT" | grep -q libs2n; then echo "test failure: libs2n should not appear in ldd output" exit 1 fi # Run the test with the path to libs2n echo "Running s2n_dynamic_load_test" LD_LIBRARY_PATH=$LIBCRYPTO_ROOT/lib ./s2n_dynamic_load_test $WORK_DIR/s2n-install-shared/lib/libs2n.so returncode=$? if [ $returncode -ne 0 ]; then echo "test failure: s2n_dynamic_load_test did not succeed" exit 1 fi echo "Passed s2n_dynamic_load_test" aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/test_exec_leak.sh000077500000000000000000000057711456575232400247540ustar00rootroot00000000000000#!/usr/bin/env bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. # set -e # Test that all file descriptors are properly cleaned up when `exec`ing from # from an initialized s2n-tls process. source codebuild/bin/s2n_setup_env.sh source codebuild/bin/jobs.sh function build() { echo "=== BUILDING $1 ===" cmake . -B$1 -DCMAKE_PREFIX_PATH=$TARGET_LIBCRYPTO_PATH ${@:2} cmake --build $1 -- -j $JOBS } function fail() { echo "test failure: $1" exit 1 } function write_exec_app() { cat < build/detect_exec_leak.c #include #include "unistd.h" int main() { s2n_init(); execl("build/bin/detect_exec_leak_finish", "", NULL); return 0; } EOF } function write_exec_finish_app() { cat < build/detect_exec_leak_finish.c #include int main() { s2n_init(); s2n_cleanup(); /* close std* file descriptors so valgrind output is less noisy */ fclose(stdin); fclose(stdout); fclose(stderr); return 0; } EOF } # download libcrypto if its not available TARGET_LIBCRYPTO="${S2N_LIBCRYPTO//[-.]/_}" TARGET_LIBCRYPTO_PATH="${TEST_DEPS_DIR}/${S2N_LIBCRYPTO}" if [ ! -f $TARGET_LIBCRYPTO_PATH/lib/libcrypto.a ]; then ./codebuild/bin/install_${TARGET_LIBCRYPTO}.sh $TARGET_LIBCRYPTO_PATH/src $TARGET_LIBCRYPTO_PATH linux fi # build s2n-tls build build -DBUILD_SHARED_LIBS=on -DBUILD_TESTING=on # compile the test app for exec leak test mkdir -p build/valgrind_log_dir write_exec_app write_exec_finish_app cc -Iapi build/detect_exec_leak.c build/lib/libs2n.so -o build/bin/detect_exec_leak cc -Iapi build/detect_exec_leak_finish.c build/lib/libs2n.so -o build/bin/detect_exec_leak_finish # run valgrind with track-fds enabled valgrind_log_dir=valgrind_log_dir for test_file in detect_exec_leak detect_exec_leak_finish; do LD_LIBRARY_PATH="build/lib:$TARGET_LIBCRYPTO_PATH/lib:$LD_LIBRARY_PATH" S2N_VALGRIND=1 \ valgrind --leak-check=full --show-leak-kinds=all --errors-for-leak-kinds=all \ --run-libc-freeres=yes -q --gen-suppressions=all --track-fds=yes \ --leak-resolution=high --undef-value-errors=no --trace-children=yes \ --suppressions=tests/unit/valgrind.suppressions --log-file="build/$valgrind_log_dir/$test_file" \ build/bin/$test_file # search for all leaked file descriptors, excluding the valgrind_log_dir file cat build/$valgrind_log_dir/$test_file | \ grep "Open file descriptor" | \ grep --invert-match $valgrind_log_dir \ && fail "file leak detected while running $test_file" done echo pass aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/test_install_shared_and_static.sh000077500000000000000000000115451456575232400302150ustar00rootroot00000000000000#!/usr/bin/env bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. # set -eo pipefail usage() { echo "test_install_shared_and_static.sh build_dir" echo "Checks that installed s2n-config.cmake chooses appropriately between shared and static." echo "Note that you MUST build against the version of libcrypto that's actually installed on the system," echo "because installing libs2n.so forces it to use the system's libcrypto.so." exit 1 } if [ "$#" -ne 1 ]; then usage fi WORK_DIR=$1 source codebuild/bin/s2n_setup_env.sh source codebuild/bin/jobs.sh COMMON_S2N_BUILD_ARGS=(-H. -DCMAKE_PREFIX_PATH=$LIBCRYPTO_ROOT -DBUILD_TESTING=OFF) # create installation dir with libs2n.so if [ ! -d $WORK_DIR/s2n-install-shared ]; then (set -x; cmake -B$WORK_DIR/s2n-build-shared -DCMAKE_INSTALL_PREFIX=$WORK_DIR/s2n-install-shared -DBUILD_SHARED_LIBS=ON ${COMMON_S2N_BUILD_ARGS[@]}) (set -x; cmake --build $WORK_DIR/s2n-build-shared --target install -- -j $JOBS) fi # create installation dir with libs2n.a if [ ! -d $WORK_DIR/s2n-install-static ]; then (set -x; cmake -B$WORK_DIR/s2n-build-static -DCMAKE_INSTALL_PREFIX=$WORK_DIR/s2n-install-static -DBUILD_SHARED_LIBS=OFF ${COMMON_S2N_BUILD_ARGS[@]}) (set -x; cmake --build $WORK_DIR/s2n-build-static --target install -- -j $JOBS) fi # create installation dir with both libs2n.so and libs2n.a if [ ! -d $WORK_DIR/s2n-install-both ]; then (set -x; cmake -B$WORK_DIR/s2n-build-shared-both -DCMAKE_INSTALL_PREFIX=$WORK_DIR/s2n-install-both -DBUILD_SHARED_LIBS=ON ${COMMON_S2N_BUILD_ARGS[@]}) (set -x; cmake --build $WORK_DIR/s2n-build-shared-both --target install -- -j $JOBS) (set -x; cmake -B$WORK_DIR/s2n-build-static-both -DCMAKE_INSTALL_PREFIX=$WORK_DIR/s2n-install-both -DBUILD_SHARED_LIBS=OFF ${COMMON_S2N_BUILD_ARGS[@]}) (set -x; cmake --build $WORK_DIR/s2n-build-static-both --target install -- -j $JOBS) fi # write out source of a small cmake project, containing: # - mylib: a library that uses s2n # - myapp: executable that uses mylib rm -rf $WORK_DIR/myapp-src mkdir -p $WORK_DIR/myapp-src cat < $WORK_DIR/myapp-src/mylib.c extern int s2n_init(void); void mylib_init(void) { s2n_init(); } EOF cat < $WORK_DIR/myapp-src/myapp.c extern void mylib_init(void); int main() { mylib_init(); } EOF cat < $WORK_DIR/myapp-src/CMakeLists.txt cmake_minimum_required (VERSION 3.0) project (myapp C) add_library(mylib mylib.c) find_package(s2n REQUIRED) target_link_libraries(mylib PRIVATE AWS::s2n) add_executable(myapp myapp.c) target_link_libraries(myapp PRIVATE mylib) EOF # build myapp and mylib, confirm that expected type of libs2n is used build_myapp() { local BUILD_SHARED_LIBS=$1 # ("BUILD_SHARED_LIBS=ON" or "BUILD_SHARED_LIBS=OFF") local S2N_INSTALL_DIR=$2 # which s2n-install dir should be used local LIBS2N_EXPECTED=$3 # ("libs2n.so" or "libs2n.a") which type of libs2n is expected to be used echo "---------------------------------------------------------------------" echo "building myapp with $BUILD_SHARED_LIBS looking-in:$S2N_INSTALL_DIR should-use:$LIBS2N_EXPECTED" local MYAPP_BUILD_DIR=$WORK_DIR/myapp-build rm -rf $MYAPP_BUILD_DIR/ local S2N_INSTALL_PATH=$(realpath $WORK_DIR/$S2N_INSTALL_DIR) (set -x; cmake -H$WORK_DIR/myapp-src -B$MYAPP_BUILD_DIR -D$BUILD_SHARED_LIBS "-DCMAKE_PREFIX_PATH=$S2N_INSTALL_PATH;$LIBCRYPTO_ROOT") (set -x; cmake --build $MYAPP_BUILD_DIR) LDD_OUTPUT=$(ldd $MYAPP_BUILD_DIR/myapp) echo "$LDD_OUTPUT" if echo "$LDD_OUTPUT" | grep -q libs2n.so; then local LIBS2N_ACTUAL=libs2n.so else local LIBS2N_ACTUAL=libs2n.a fi if [ $LIBS2N_ACTUAL != $LIBS2N_EXPECTED ]; then echo "test failure: used $LIBS2N_ACTUAL, but expected to use $LIBS2N_EXPECTED" exit 1 fi } # if only shared libs2n.so is available, that's what should get used build_myapp BUILD_SHARED_LIBS=ON s2n-install-shared libs2n.so build_myapp BUILD_SHARED_LIBS=OFF s2n-install-shared libs2n.so # if only static libs2n.a is available, that's what should get used build_myapp BUILD_SHARED_LIBS=ON s2n-install-static libs2n.a build_myapp BUILD_SHARED_LIBS=OFF s2n-install-static libs2n.a # if both libs2n.so and libs2n.a are available... build_myapp BUILD_SHARED_LIBS=ON s2n-install-both libs2n.so # should choose libs2n.so build_myapp BUILD_SHARED_LIBS=OFF s2n-install-both libs2n.a # should choose libs2n.a aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/test_libcrypto_interning.sh000077500000000000000000000151071456575232400271120ustar00rootroot00000000000000#!/usr/bin/env bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. # set -e source codebuild/bin/s2n_setup_env.sh source codebuild/bin/jobs.sh # build 2 different version of libcrypto to make it easy to break the application if # interning doesn't work as expected WHICH_LIBCRYPTO=$(echo "${S2N_LIBCRYPTO:-"openssl-1.1.1"}") TARGET_LIBCRYPTO="${WHICH_LIBCRYPTO//[-.]/_}" TARGET_LIBCRYPTO_PATH="${TEST_DEPS_DIR}/${WHICH_LIBCRYPTO}" OPENSSL_1_0="$OPENSSL_1_0_2_INSTALL_DIR" if [ ! -f $OPENSSL_1_0/lib/libcrypto.a ]; then ./codebuild/bin/install_openssl_1_0_2.sh $OPENSSL_1_0/src $OPENSSL_1_0 linux fi if [ ! -f $TARGET_LIBCRYPTO_PATH/lib/libcrypto.a ]; then if [ "$TARGET_LIBCRYPTO" == "awslc" ]; then ./codebuild/bin/install_${TARGET_LIBCRYPTO}.sh $TARGET_LIBCRYPTO_PATH/src $TARGET_LIBCRYPTO_PATH 0 else ./codebuild/bin/install_${TARGET_LIBCRYPTO}.sh $TARGET_LIBCRYPTO_PATH/src $TARGET_LIBCRYPTO_PATH linux fi fi COMMON_FLAGS="-DCMAKE_PREFIX_PATH=$TARGET_LIBCRYPTO_PATH -DCMAKE_BUILD_TYPE=RelWithDebInfo" LTO_FLAGS="-DS2N_LTO=on" # use LTO-aware commands if possible if [ -x "$(command -v gcc-ar)" ]; then LTO_FLAGS+=" -DCMAKE_AR=$(which gcc-ar) -DCMAKE_NM=$(which gcc-nm) -DCMAKE_RANLIB=$(which gcc-ranlib)" fi function fail() { echo "test failure: $1" exit 1 } function write_app() { cat < $1 #include #include int main() { s2n_init(); BN_CTX_new(); return 0; } EOF } function build() { echo "=== BUILDING $1 ===" cmake . -B$1 $COMMON_FLAGS ${@:2} cmake --build $1 -- -j $JOBS } function tests() { echo "=== TESTING $1 ===" make -C $1 test ARGS="-j $JOBS -L unit" } ################## # Dynamic builds # ################## # build a default version to test what happens without interning build build/shared-default -DBUILD_SHARED_LIBS=on -DBUILD_TESTING=on ldd ./build/shared-default/lib/libs2n.so | grep -q libcrypto || fail "shared-default: libcrypto was not linked" # ensure libcrypto interning works with shared libs and no testing build build/shared -DBUILD_SHARED_LIBS=on -DBUILD_TESTING=off -DS2N_INTERN_LIBCRYPTO=on # s2n should not publicly depend on libcrypto ldd ./build/shared/lib/libs2n.so | grep -q libcrypto && fail "shared: libcrypto was not interned" # ensure libcrypto interning works with shared libs, LTO and no testing # NOTE: interning+LTO+testing doesn't currently work build build/shared-lto -DBUILD_SHARED_LIBS=on -DBUILD_TESTING=off -DS2N_INTERN_LIBCRYPTO=on $LTO_FLAGS # s2n should not publicly depend on libcrypto ldd ./build/shared-lto/lib/libs2n.so | grep -q libcrypto && fail "shared-lto: libcrypto was not interned" # ensure libcrypto interning works with shared libs and testing build build/shared-testing -DBUILD_SHARED_LIBS=on -DBUILD_TESTING=on -DS2N_INTERN_LIBCRYPTO=on # s2n should not publicly depend on libcrypto ldd ./build/shared-testing/lib/libs2n.so | grep -q libcrypto && fail "shared-testing: libcrypto was not interned" # run the tests and make sure they all pass with the prefixed version tests build/shared-testing # load the wrong version of libcrypto and the tests should still pass LD_PRELOAD=$OPENSSL_1_0/lib/libcrypto.so tests build/shared-testing # ensure the small app will compile with both versions of openssl without any linking issues for build in shared shared-lto; do # create a small app that links against both s2n and libcrypto write_app build/$build/app.c for target in $OPENSSL_1_0 $TARGET_LIBCRYPTO_PATH; do echo "testing $build linking with $target" mkdir -p $target/bin cc -fPIE -Iapi -I$target/include build/$build/app.c build/$build/lib/libs2n.so $target/lib/libcrypto.a -lpthread -ldl -o $target/bin/test-app # make sure the app doesn't crash LD_LIBRARY_PATH="build/$build/lib:$target/lib:$LD_LIBRARY_PATH" $target/bin/test-app done done ################## # Static builds # ################## # ensure libcrypto interning works with static libs # NOTE: static builds don't vary based on testing being enabled build build/static -DBUILD_SHARED_LIBS=off -DBUILD_TESTING=on -DS2N_INTERN_LIBCRYPTO=on tests build/static # TODO figure out how to get static-lto+interning builds working # ensure the small app will compile with both versions of openssl without any linking issues for build in static; do # create a small app that links against both s2n and libcrypto write_app build/$build/app.c for target in $OPENSSL_1_0 $TARGET_LIBCRYPTO_PATH; do echo "testing $build linking with $target" mkdir -p $target/bin cc -fPIE -Iapi -I$target/include build/$build/app.c build/$build/lib/libs2n.a $target/lib/libcrypto.a -lpthread -ldl -o $target/bin/test-app nm $target/bin/test-app | grep -q 'T s2n$BN_CTX_new' || fail "$target: libcrypto symbols were not prefixed" nm $target/bin/test-app | grep -q 'T BN_CTX_new' || fail "$target: libcrypto was not linked in application" # make sure the app doesn't crash $target/bin/test-app done done ################## # Runtime tests # ################## run_connection_test() { local TARGET="$1" LD_PRELOAD=$OPENSSL_1_0/lib/libcrypto.so ./build/$TARGET/bin/s2nd -c default_tls13 localhost 4433 &> /dev/null & local SERVER_PID=$! # Wait for the server to start up before connecting sleep 5s LD_PRELOAD=$OPENSSL_1_0/lib/libcrypto.so ./build/$TARGET/bin/s2nc -i -c default_tls13 localhost 4433 | tee build/client.log kill $SERVER_PID &> /dev/null || true # ensure a TLS 1.3 session was negotiated echo "checking for TLS 1.3" grep -q "Actual protocol version: 34" build/client.log } # without interning, the connection should fail when linking the wrong version of libcrypto echo "Running test: attempt TLS1.3 handshake without interning" run_connection_test shared-default && fail "TLS 1.3 handshake was expected to fail" echo "TLS1.3 handshake failed as expected" echo "" # with interning, the connection should succeed even though we've linked the wrong version of libcrypto echo "Running test: attempt TLS1.3 handshake with interning" run_connection_test shared-testing || fail "TLS 1.3 handshake was expected to succeed" echo "TLS1.3 handshake succeeded as expected" echo "SUCCESS!" aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/bin/utils.sh000077500000000000000000000032621456575232400231260ustar00rootroot00000000000000#!/usr/bin/env bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. # set -e # Utility functions get_latest_release(){ local LATEST_RELEASE_URL=$(gh api /repos/aws/s2n-tls/releases/latest|jq -r '.tarball_url') local LATEST_RELEASE_VER=$(echo "${LATEST_RELEASE_URL}" | sed 's|.*/||') echo "${LATEST_RELEASE_VER}" } gh_login(){ # Takes secrets manager key as an argument # This GH personal access token must have 'repo' permissions to work. gh auth status || aws secretsmanager get-secret-value --secret-id "$1" --query 'SecretString' --output text |jq -r '.secret_key'| gh auth login --with-token #gh auth status } criterion_install_deps(){ make install source "$HOME"/.cargo/env make -C bindings/rust } usage(){ echo -e "Usage:\n\tget_latest_release: returns just the latest v.N.N.N version" echo -e "\tgh_login : retrieves a GitHub PAT from secrest manager and logs into GitHub.\n" } if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then case "${1:-}" in "gh_login") gh_login "${2:-}";; "get_latest_release") get_latest_release echo "$LATEST_RELEASE_VER";; *) usage; esac fi aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/spec/000077500000000000000000000000001456575232400216065ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/spec/buildspec_32bit_cross_compile.yml000066400000000000000000000015631456575232400302340ustar00rootroot00000000000000--- # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You may not use # this file except in compliance with the License. A copy of the License is # located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. See the License for the specific language governing permissions and # limitations under the License. version: 0.2 phases: build: on-failure: ABORT commands: - cmake . -Bbuild -DCMAKE_TOOLCHAIN_FILE=cmake/toolchains/32-bit.toolchain - cmake --build ./build -j $(nproc) post_build: on-failure: ABORT commands: - CTEST_OUTPUT_ON_FAILURE=1 CTEST_PARALLEL_LEVEL=$(nproc) make -C build test aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/spec/buildspec_amazonlinux2.yml000066400000000000000000000006451456575232400270170ustar00rootroot00000000000000version: 0.2 env: variables: # CODEBUILD_ is a reserved namespace. CB_BIN_DIR: "./codebuild/bin" phases: install: runtime-versions: python: 3.x pre_build: commands: - | if [ -d "third-party-src" ]; then cd third-party-src; fi - ./codebuild/bin/install_al2_dependencies.sh build: commands: - printenv - $CB_BIN_DIR/s2n_codebuild_al2.sh aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/spec/buildspec_asan.yml000066400000000000000000000040431456575232400253060ustar00rootroot00000000000000--- # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You may not use # this file except in compliance with the License. A copy of the License is # located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. See the License for the specific language governing permissions and # limitations under the License. version: 0.2 # This buildspec runs on an Ubuntu22 image. That configuration is a property of # the codebuild job itself. # Codebuild's matrix jobs have non-differentiated names so use batch-list # instead. batch: build-list: # awslc is the happy path libcrypto for s2n-tls - identifier: awslc env: compute-type: BUILD_GENERAL1_LARGE variables: S2N_LIBCRYPTO: awslc # s2n-tls takes different code paths for ossl3, so make sure we run asan on # it. See pr 4033 for a historical motivating example. - identifier: openssl_3_0 env: compute-type: BUILD_GENERAL1_LARGE variables: S2N_LIBCRYPTO: openssl-3.0 # openssl 1.1.1 is a widely deployed version of openssl. - identifier: openssl_1_1_1 env: compute-type: BUILD_GENERAL1_LARGE variables: S2N_LIBCRYPTO: openssl-1.1.1 # openssl 1.0.2 is the default distributed on AL2, and AL2 is still widely # deployed - identifier: openssl_1_0_2 env: compute-type: BUILD_GENERAL1_LARGE variables: S2N_LIBCRYPTO: openssl-1.0.2 phases: build: on-failure: ABORT commands: - | cmake . -Bbuild \ -DCMAKE_C_COMPILER=/usr/bin/clang \ -DCMAKE_PREFIX_PATH=/usr/local/$S2N_LIBCRYPTO \ -DASAN=ON - cmake --build ./build -- -j $(nproc) post_build: on-failure: ABORT commands: - CTEST_OUTPUT_ON_FAILURE=1 CTEST_PARALLEL_LEVEL=$(nproc) make -C build test aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/spec/buildspec_generalbatch.yml000066400000000000000000000316561456575232400270150ustar00rootroot00000000000000--- # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You may not use # this file except in compliance with the License. A copy of the License is # located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. See the License for the specific language governing permissions and # limitations under the License. version: 0.2 batch: build-list: - buildspec: codebuild/spec/buildspec_ubuntu.yml env: compute-type: BUILD_GENERAL1_LARGE image: 024603541914.dkr.ecr.us-west-2.amazonaws.com/docker:ubuntu18codebuild privileged-mode: true variables: GCC_VERSION: NONE SAW: true TESTS: sawHMACPlus identifier: sawHMACPlus - buildspec: codebuild/spec/buildspec_ubuntu.yml env: compute-type: BUILD_GENERAL1_LARGE image: 024603541914.dkr.ecr.us-west-2.amazonaws.com/docker:ubuntu18codebuild privileged-mode: true variables: GCC_VERSION: NONE SAW: true TESTS: tls identifier: s2nSawTls - buildspec: codebuild/spec/buildspec_sidetrail.yml env: compute-type: BUILD_GENERAL1_2XLARGE image: 024603541914.dkr.ecr.us-west-2.amazonaws.com/docker:ubuntu14codebuild privileged-mode: true variables: TESTS: sidetrail identifier: s2nSidetrail - buildspec: codebuild/spec/buildspec_ubuntu.yml env: compute-type: BUILD_GENERAL1_LARGE image: 024603541914.dkr.ecr.us-west-2.amazonaws.com/docker:ubuntu18codebuild privileged-mode: true variables: BUILD_S2N: 'true' TESTS: exec_leak identifier: s2nExecLeak - buildspec: codebuild/spec/buildspec_ubuntu.yml env: compute-type: BUILD_GENERAL1_LARGE image: 024603541914.dkr.ecr.us-west-2.amazonaws.com/docker:ubuntu18codebuild privileged-mode: true variables: BUILD_S2N: true GCC_VERSION: 6 S2N_LIBCRYPTO: openssl-1.0.2-fips TESTS: valgrind identifier: s2nValgrindOpenSSL102Gcc6Fips - identifier: s2nValgrindOpenSSL3 buildspec: codebuild/spec/buildspec_ubuntu.yml env: privileged-mode: true compute-type: BUILD_GENERAL1_LARGE image: 024603541914.dkr.ecr.us-west-2.amazonaws.com/docker:ubuntu18codebuild variables: TESTS: valgrind GCC_VERSION: 9 S2N_LIBCRYPTO: openssl-3.0 BUILD_S2N: true - buildspec: codebuild/spec/buildspec_ubuntu.yml env: compute-type: BUILD_GENERAL1_LARGE image: 024603541914.dkr.ecr.us-west-2.amazonaws.com/docker:ubuntu18codebuild privileged-mode: true variables: BUILD_S2N: true GCC_VERSION: 9 S2N_LIBCRYPTO: openssl-1.1.1 TESTS: valgrind identifier: s2nValgrindOpenSSL111Gcc9 - buildspec: codebuild/spec/buildspec_ubuntu.yml env: compute-type: BUILD_GENERAL1_LARGE image: 024603541914.dkr.ecr.us-west-2.amazonaws.com/docker:ubuntu18codebuild privileged-mode: true variables: BUILD_S2N: 'true' GCC_VERSION: '6' S2N_LIBCRYPTO: 'openssl-1.0.2' TESTS: valgrind identifier: s2nValgrindOpenssl102 - buildspec: codebuild/spec/buildspec_ubuntu.yml env: compute-type: BUILD_GENERAL1_LARGE image: 024603541914.dkr.ecr.us-west-2.amazonaws.com/docker:ubuntu18codebuild privileged-mode: true variables: BUILD_S2N: 'true' GCC_VERSION: '6' S2N_LIBCRYPTO: 'awslc' TESTS: valgrind identifier: s2nValgrindAwslc - buildspec: codebuild/spec/buildspec_ubuntu.yml env: compute-type: BUILD_GENERAL1_LARGE image: 024603541914.dkr.ecr.us-west-2.amazonaws.com/docker:ubuntu18codebuild privileged-mode: true variables: BUILD_S2N: 'true' GCC_VERSION: '6' S2N_LIBCRYPTO: 'awslc-fips' TESTS: valgrind identifier: s2nValgrindAwslcFips - identifier: s2nAsanOpenSSL111Coverage buildspec: codebuild/spec/buildspec_ubuntu.yml env: compute-type: BUILD_GENERAL1_LARGE image: 024603541914.dkr.ecr.us-west-2.amazonaws.com/docker:ubuntu18codebuild privileged-mode: true variables: BUILD_S2N: 'true' GCC_VERSION: '9' S2N_COVERAGE: 'true' S2N_LIBCRYPTO: 'openssl-1.1.1' TESTS: asan - identifier: s2nAsanAwslc buildspec: codebuild/spec/buildspec_ubuntu.yml env: compute-type: BUILD_GENERAL1_LARGE image: 024603541914.dkr.ecr.us-west-2.amazonaws.com/docker:ubuntu18codebuild privileged-mode: true variables: BUILD_S2N: 'true' GCC_VERSION: '9' S2N_LIBCRYPTO: 'awslc' TESTS: asan - identifier: s2nAsanOpenssl3 buildspec: codebuild/spec/buildspec_ubuntu.yml env: privileged-mode: true compute-type: BUILD_GENERAL1_LARGE image: 024603541914.dkr.ecr.us-west-2.amazonaws.com/docker:ubuntu18codebuild variables: TESTS: asan GCC_VERSION: '9' S2N_LIBCRYPTO: 'openssl-3.0' BUILD_S2N: 'true' - identifier: s2nAsanOpenssl102 buildspec: codebuild/spec/buildspec_ubuntu.yml env: compute-type: BUILD_GENERAL1_LARGE image: 024603541914.dkr.ecr.us-west-2.amazonaws.com/docker:ubuntu18codebuild privileged-mode: true variables: BUILD_S2N: 'true' GCC_VERSION: '9' S2N_LIBCRYPTO: 'openssl-1.0.2' TESTS: asan - buildspec: codebuild/spec/buildspec_ubuntu.yml env: compute-type: BUILD_GENERAL1_SMALL image: 024603541914.dkr.ecr.us-west-2.amazonaws.com/docker:ubuntu18codebuild privileged-mode: true variables: BUILD_S2N: 'true' GCC_VERSION: '9' S2N_LIBCRYPTO: 'openssl-1.1.1' S2N_NO_PQ: 1 TESTS: unit identifier: s2nUnitNoPQ - buildspec: codebuild/spec/buildspec_ubuntu.yml env: compute-type: BUILD_GENERAL1_LARGE image: 024603541914.dkr.ecr.us-west-2.amazonaws.com/docker:ubuntu18codebuild privileged-mode: true variables: BUILD_S2N: true GCC_VERSION: 9 S2N_COVERAGE: true S2N_LIBCRYPTO: openssl-3.0 TESTS: unit identifier: s2nUnitOpenssl3Gcc9 - buildspec: codebuild/spec/buildspec_amazonlinux2.yml env: compute-type: BUILD_GENERAL1_LARGE image: aws/codebuild/amazonlinux2-aarch64-standard:2.0 privileged-mode: true type: ARM_CONTAINER variables: S2N_NO_PQ: 1 TESTS: unit identifier: s2nUnitAl2Arm - buildspec: codebuild/spec/buildspec_amazonlinux2.yml env: compute-type: BUILD_GENERAL1_SMALL image: aws/codebuild/amazonlinux2-x86_64-standard:3.0 privileged-mode: true variables: S2N_NO_PQ: 1 TESTS: unit S2N_LIBCRYPTO: default identifier: s2nUnitAL2 - buildspec: codebuild/spec/buildspec_amazonlinux2.yml env: compute-type: BUILD_GENERAL1_SMALL image: aws/codebuild/amazonlinux2-x86_64-standard:3.0 privileged-mode: true variables: S2N_NO_PQ: 1 TESTS: unit S2N_LIBCRYPTO: openssl-1.1.1 identifier: s2nUnitAl2Openssl111 - buildspec: codebuild/spec/buildspec_ubuntu.yml env: compute-type: BUILD_GENERAL1_LARGE image: 024603541914.dkr.ecr.us-west-2.amazonaws.com/docker:ubuntu18codebuild privileged-mode: true variables: BUILD_S2N: 'true' TESTS: interning identifier: s2nLibcryptoInterningOpenSSL - buildspec: codebuild/spec/buildspec_ubuntu.yml env: compute-type: BUILD_GENERAL1_LARGE image: 024603541914.dkr.ecr.us-west-2.amazonaws.com/docker:ubuntu18codebuild privileged-mode: true variables: BUILD_S2N: 'true' S2N_LIBCRYPTO: awslc TESTS: interning identifier: s2nLibcryptoInterningAwslc - buildspec: codebuild/spec/buildspec_ubuntu.yml env: compute-type: BUILD_GENERAL1_LARGE image: 024603541914.dkr.ecr.us-west-2.amazonaws.com/docker:ubuntu18codebuild privileged-mode: true variables: BUILD_S2N: 'true' S2N_LIBCRYPTO: awslc-fips-2022 TESTS: interning identifier: s2nLibcryptoInterningAwslcFips2022 - buildspec: codebuild/spec/buildspec_ubuntu.yml env: compute-type: BUILD_GENERAL1_LARGE image: 024603541914.dkr.ecr.us-west-2.amazonaws.com/docker:ubuntu18codebuild privileged-mode: true variables: GCC_VERSION: '6' TESTS: crt identifier: s2nUnitCRT - buildspec: codebuild/spec/buildspec_ubuntu.yml env: compute-type: BUILD_GENERAL1_SMALL image: 024603541914.dkr.ecr.us-west-2.amazonaws.com/docker:ubuntu18codebuild privileged-mode: true variables: S2N_LIBCRYPTO: openssl-1.1.1 TESTS: sharedandstatic identifier: s2nInstallSharedAndStatic - identifier: s2nDynamicLoad buildspec: codebuild/spec/buildspec_ubuntu.yml env: compute-type: BUILD_GENERAL1_SMALL image: 024603541914.dkr.ecr.us-west-2.amazonaws.com/docker:ubuntu18codebuild privileged-mode: true variables: TESTS: dynamicload S2N_LIBCRYPTO: openssl-1.1.1 GCC_VERSION: '9' - buildspec: codebuild/spec/buildspec_ubuntu.yml env: compute-type: BUILD_GENERAL1_LARGE image: 024603541914.dkr.ecr.us-west-2.amazonaws.com/docker:ubuntu18codebuild privileged-mode: true variables: BUILD_S2N: true GCC_VERSION: 6 S2N_COVERAGE: true S2N_LIBCRYPTO: openssl-1.1.1 TESTS: unit identifier: s2nUnitOpenSSL111Gcc6Coverage - buildspec: codebuild/spec/buildspec_ubuntu.yml env: compute-type: BUILD_GENERAL1_SMALL image: 024603541914.dkr.ecr.us-west-2.amazonaws.com/docker:ubuntu18codebuild privileged-mode: true variables: BUILD_S2N: 'true' GCC_VERSION: '6' S2N_LIBCRYPTO: 'libressl' S2N_NO_PQ: 1 TESTS: unit identifier: s2nUnitLibressl - buildspec: codebuild/spec/buildspec_ubuntu.yml env: compute-type: BUILD_GENERAL1_SMALL image: 024603541914.dkr.ecr.us-west-2.amazonaws.com/docker:ubuntu18codebuild privileged-mode: true variables: BUILD_S2N: 'true' GCC_VERSION: '9' S2N_LIBCRYPTO: 'boringssl' S2N_NO_PQ: 1 TESTS: unit identifier: s2nUnitBoringssl - buildspec: codebuild/spec/buildspec_ubuntu.yml env: compute-type: BUILD_GENERAL1_SMALL image: 024603541914.dkr.ecr.us-west-2.amazonaws.com/docker:ubuntu18codebuild privileged-mode: true variables: BUILD_S2N: 'true' GCC_VERSION: '9' S2N_LIBCRYPTO: 'awslc-fips-2022' TESTS: unit identifier: s2nUnitAwslcFips2022 - buildspec: codebuild/spec/buildspec_ubuntu.yml env: compute-type: BUILD_GENERAL1_SMALL image: 024603541914.dkr.ecr.us-west-2.amazonaws.com/docker:ubuntu22codebuild privileged-mode: true variables: BUILD_S2N: 'true' CC: '/usr/bin/clang' CXX: '/usr/bin/clang++' S2N_LIBCRYPTO: 'awslc' S2N_NO_PQ: 1 TESTS: unit identifier: s2nUnitClang15 - identifier: 32BitBuildAndUnit buildspec: codebuild/spec/buildspec_32bit_cross_compile.yml env: privileged-mode: true compute-type: BUILD_GENERAL1_LARGE image: 024603541914.dkr.ecr.us-west-2.amazonaws.com/docker:ubuntu22codebuild - identifier: ThreadSanitizer buildspec: codebuild/spec/buildspec_tsan.yml env: privileged-mode: true compute-type: BUILD_GENERAL1_LARGE image: 024603541914.dkr.ecr.us-west-2.amazonaws.com/docker:ubuntu22codebuild - identifier: musl buildspec: codebuild/spec/buildspec_musl.yml env: privileged-mode: true compute-type: BUILD_GENERAL1_LARGE image: 024603541914.dkr.ecr.us-west-2.amazonaws.com/docker:ubuntu22codebuild - identifier: ktls buildspec: codebuild/spec/buildspec_ktls.yml env: compute-type: BUILD_GENERAL1_LARGE image: aws/codebuild/standard:7.0 privileged-mode: true - identifier: ktlsASAN buildspec: codebuild/spec/buildspec_ktls.yml env: compute-type: BUILD_GENERAL1_LARGE image: aws/codebuild/standard:7.0 privileged-mode: true variables: S2N_CMAKE_OPTIONS: "-DASAN=ON" aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/spec/buildspec_ktls.yml000066400000000000000000000014511456575232400253410ustar00rootroot00000000000000--- version: 0.2 phases: install: commands: - apt update - apt upgrade -y - apt install -y qemu qemu-system-x86 wget cloud-image-utils pre_build: commands: - aws s3 --quiet sync s3://s2n-ktls-testing ./qemu - cd qemu; bash ./run.sh; cd .. - rsync -avz --exclude=qemu --exclude=tests/fuzz -e 'ssh -p 2222' . codebuild@localhost:/home/codebuild/s2n-tls build: commands: - codebuild-breakpoint - | ssh -p 2222 codebuild@localhost " \ cd s2n-tls; sudo modprobe tls; \ export S2N_CMAKE_OPTIONS=${S2N_CMAKE_OPTIONS}; \ export S2N_KTLS_TESTING_EXPECTED=1; \ nix develop .#openssl111 --command bash -c \ 'source ./nix/shell.sh && clean && configure && build && unit' \ " aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/spec/buildspec_musl.yml000066400000000000000000000032341456575232400253450ustar00rootroot00000000000000--- # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You may not use # this file except in compliance with the License. A copy of the License is # located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. See the License for the specific language governing permissions and # limitations under the License. version: 0.2 env: variables: MUSL_DIR: "test-deps/musl" LIBCRYPTO_DIR: "test-deps/musl-awslc" phases: pre_build: on-failure: ABORT commands: # Install musl libc - git clone https://git.musl-libc.org/git/musl $MUSL_DIR - echo "Installing musl to $CODEBUILD_SRC_DIR/$MUSL_DIR" - cd $MUSL_DIR - ./configure --prefix=$CODEBUILD_SRC_DIR/$MUSL_DIR - make install - cd $CODEBUILD_SRC_DIR # Install libcrypto. # We need to modify the usual install so that the library can link to musl. # If this becomes a problem, we can switch to more official cross compilation. - CFLAGS="-U_FORTIFY_SOURCE -D_FILE_OFFSET_BITS=32" - ./codebuild/bin/install_awslc.sh $(mktemp -d) $CODEBUILD_SRC_DIR/$LIBCRYPTO_DIR 0 build: on-failure: ABORT commands: - CC="$CODEBUILD_SRC_DIR/$MUSL_DIR/bin/musl-gcc" - cmake . -Bbuild -DCMAKE_PREFIX_PATH=$CODEBUILD_SRC_DIR/$LIBCRYPTO_DIR - cmake --build ./build post_build: on-failure: ABORT commands: - CTEST_OUTPUT_ON_FAILURE=1 CTEST_PARALLEL_LEVEL=$(nproc) make -C build test aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/spec/buildspec_omnibus.yml000066400000000000000000000251511456575232400260430ustar00rootroot00000000000000--- # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You may not use # this file except in compliance with the License. A copy of the License is # located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. See the License for the specific language governing permissions and # limitations under the License. version: 0.2 # Doc for batch https://docs.aws.amazon.com/codebuild/latest/userguide/batch-build-buildspec.html#build-spec.batch.build-list batch: build-list: - identifier: sawHMACPlus buildspec: codebuild/spec/buildspec_ubuntu.yml env: privileged-mode: true compute-type: BUILD_GENERAL1_LARGE image: 024603541914.dkr.ecr.us-west-2.amazonaws.com/docker:ubuntu18codebuild variables: TESTS: sawHMACPlus SAW: true GCC_VERSION: NONE - identifier: s2nSawTls buildspec: codebuild/spec/buildspec_ubuntu.yml env: privileged-mode: true compute-type: BUILD_GENERAL1_LARGE image: 024603541914.dkr.ecr.us-west-2.amazonaws.com/docker:ubuntu18codebuild variables: TESTS: tls SAW: true GCC_VERSION: NONE # Other - identifier: s2nSidetrail buildspec: codebuild/spec/buildspec_sidetrail.yml env: privileged-mode: true compute-type: BUILD_GENERAL1_2XLARGE image: 024603541914.dkr.ecr.us-west-2.amazonaws.com/docker:ubuntu14codebuild variables: TESTS: sidetrail - identifier: s2nValgrindOpenSSL3 buildspec: codebuild/spec/buildspec_ubuntu.yml env: privileged-mode: true compute-type: BUILD_GENERAL1_LARGE image: 024603541914.dkr.ecr.us-west-2.amazonaws.com/docker:ubuntu18codebuild variables: TESTS: valgrind GCC_VERSION: 9 S2N_LIBCRYPTO: openssl-3.0 BUILD_S2N: true - identifier: s2nValgrindOpenSSL102Gcc6Fips buildspec: codebuild/spec/buildspec_ubuntu.yml env: privileged-mode: true compute-type: BUILD_GENERAL1_LARGE image: 024603541914.dkr.ecr.us-west-2.amazonaws.com/docker:ubuntu18codebuild variables: TESTS: valgrind GCC_VERSION: 6 S2N_LIBCRYPTO: openssl-1.0.2-fips BUILD_S2N: true - identifier: s2nValgrindOpenSSL111Gcc9 buildspec: codebuild/spec/buildspec_ubuntu.yml env: privileged-mode: true compute-type: BUILD_GENERAL1_LARGE image: 024603541914.dkr.ecr.us-west-2.amazonaws.com/docker:ubuntu18codebuild variables: TESTS: valgrind GCC_VERSION: 9 S2N_LIBCRYPTO: openssl-1.1.1 BUILD_S2N: true - identifier: s2nValgrindOpenssl102 buildspec: codebuild/spec/buildspec_ubuntu.yml env: privileged-mode: true compute-type: BUILD_GENERAL1_LARGE image: 024603541914.dkr.ecr.us-west-2.amazonaws.com/docker:ubuntu18codebuild variables: TESTS: valgrind GCC_VERSION: '6' S2N_LIBCRYPTO: 'openssl-1.0.2' BUILD_S2N: 'true' - identifier: s2nValgrindAwslc buildspec: codebuild/spec/buildspec_ubuntu.yml env: privileged-mode: true compute-type: BUILD_GENERAL1_LARGE image: 024603541914.dkr.ecr.us-west-2.amazonaws.com/docker:ubuntu18codebuild variables: TESTS: valgrind GCC_VERSION: '6' S2N_LIBCRYPTO: 'awslc' BUILD_S2N: 'true' - identifier: s2nValgrindAwslcFips buildspec: codebuild/spec/buildspec_ubuntu.yml env: privileged-mode: true compute-type: BUILD_GENERAL1_LARGE image: 024603541914.dkr.ecr.us-west-2.amazonaws.com/docker:ubuntu18codebuild variables: TESTS: valgrind GCC_VERSION: '6' S2N_LIBCRYPTO: 'awslc-fips' BUILD_S2N: 'true' - identifier: s2nAsanOpenSSL111Coverage buildspec: codebuild/spec/buildspec_ubuntu.yml env: privileged-mode: true compute-type: BUILD_GENERAL1_LARGE image: 024603541914.dkr.ecr.us-west-2.amazonaws.com/docker:ubuntu18codebuild variables: TESTS: asan GCC_VERSION: '6' S2N_LIBCRYPTO: 'openssl-1.1.1' BUILD_S2N: 'true' S2N_COVERAGE: 'true' - identifier: s2nAsanOpenssl3 buildspec: codebuild/spec/buildspec_ubuntu.yml env: privileged-mode: true compute-type: BUILD_GENERAL1_LARGE image: 024603541914.dkr.ecr.us-west-2.amazonaws.com/docker:ubuntu18codebuild variables: TESTS: asan GCC_VERSION: '6' S2N_LIBCRYPTO: 'openssl-3.0' BUILD_S2N: 'true' - identifier: s2nAsanOpenssl102 buildspec: codebuild/spec/buildspec_ubuntu.yml env: privileged-mode: true compute-type: BUILD_GENERAL1_LARGE image: 024603541914.dkr.ecr.us-west-2.amazonaws.com/docker:ubuntu18codebuild variables: TESTS: asan GCC_VERSION: '6' S2N_LIBCRYPTO: 'openssl-1.0.2' BUILD_S2N: 'true' - identifier: s2nUnitNoPQ buildspec: codebuild/spec/buildspec_ubuntu.yml env: privileged-mode: true compute-type: BUILD_GENERAL1_SMALL image: 024603541914.dkr.ecr.us-west-2.amazonaws.com/docker:ubuntu18codebuild variables: TESTS: unit GCC_VERSION: '9' S2N_LIBCRYPTO: 'openssl-1.1.1' S2N_NO_PQ: 1 BUILD_S2N: 'true' - identifier: s2nUnitAl2Arm buildspec: codebuild/spec/buildspec_amazonlinux2.yml env: type: ARM_CONTAINER compute-type: BUILD_GENERAL1_LARGE image: aws/codebuild/amazonlinux2-aarch64-standard:2.0 privileged-mode: true variables: S2N_NO_PQ: 1 TESTS: unit - identifier: s2nUnitAl2 buildspec: codebuild/spec/buildspec_amazonlinux2.yml env: image: aws/codebuild/amazonlinux2-x86_64-standard:3.0 privileged-mode: true compute-type: BUILD_GENERAL1_SMALL variables: TESTS: unit S2N_NO_PQ: 1 - identifier: s2nLibcryptoInterningOpenSSL buildspec: codebuild/spec/buildspec_ubuntu.yml env: privileged-mode: true compute-type: BUILD_GENERAL1_LARGE image: 024603541914.dkr.ecr.us-west-2.amazonaws.com/docker:ubuntu18codebuild variables: TESTS: interning BUILD_S2N: 'true' - identifier: s2nLibcryptoInterningAwslc buildspec: codebuild/spec/buildspec_ubuntu.yml env: privileged-mode: true compute-type: BUILD_GENERAL1_LARGE image: 024603541914.dkr.ecr.us-west-2.amazonaws.com/docker:ubuntu18codebuild variables: TESTS: interning BUILD_S2N: 'true' S2N_LIBCRYPTO: awslc - identifier: s2nExecLeak buildspec: codebuild/spec/buildspec_ubuntu.yml env: privileged-mode: true compute-type: BUILD_GENERAL1_LARGE image: 024603541914.dkr.ecr.us-west-2.amazonaws.com/docker:ubuntu18codebuild variables: TESTS: exec_leak BUILD_S2N: 'true' - identifier: s2nUnitCRT buildspec: codebuild/spec/buildspec_ubuntu.yml env: compute-type: BUILD_GENERAL1_LARGE image: 024603541914.dkr.ecr.us-west-2.amazonaws.com/docker:ubuntu18codebuild privileged-mode: true variables: GCC_VERSION: '6' TESTS: crt - identifier: s2nInstallSharedAndStatic buildspec: codebuild/spec/buildspec_ubuntu.yml env: compute-type: BUILD_GENERAL1_SMALL image: 024603541914.dkr.ecr.us-west-2.amazonaws.com/docker:ubuntu18codebuild privileged-mode: true variables: TESTS: sharedandstatic # must use the libcrypto that's actually installed on the system S2N_LIBCRYPTO: openssl-1.1.1 - buildspec: codebuild/spec/buildspec_ubuntu.yml env: compute-type: BUILD_GENERAL1_LARGE image: 024603541914.dkr.ecr.us-west-2.amazonaws.com/docker:ubuntu18codebuild privileged-mode: true variables: BUILD_S2N: true GCC_VERSION: 6 S2N_LIBCRYPTO: openssl-1.1.1 TESTS: unit identifier: s2nUnitOpenSSL111Gcc6 - identifier: s2nUnitCoverage buildspec: codebuild/spec/buildspec_unit_coverage.yml env: privileged-mode: true compute-type: BUILD_GENERAL1_LARGE image: 024603541914.dkr.ecr.us-west-2.amazonaws.com/docker:ubuntu22codebuild variables: S2N_LIBCRYPTO: openssl-1.1.1 - identifier: 32BitBuildAndUnit buildspec: codebuild/spec/buildspec_32bit_cross_compile.yml env: privileged-mode: true compute-type: BUILD_GENERAL1_LARGE image: 024603541914.dkr.ecr.us-west-2.amazonaws.com/docker:ubuntu22codebuild - buildspec: codebuild/spec/buildspec_ubuntu.yml env: compute-type: BUILD_GENERAL1_SMALL image: 024603541914.dkr.ecr.us-west-2.amazonaws.com/docker:ubuntu18codebuild privileged-mode: true variables: BUILD_S2N: 'true' GCC_VERSION: '6' S2N_LIBCRYPTO: 'libressl' S2N_NO_PQ: 1 TESTS: unit identifier: s2nUnitLibressl - buildspec: codebuild/spec/buildspec_ubuntu.yml env: compute-type: BUILD_GENERAL1_SMALL image: 024603541914.dkr.ecr.us-west-2.amazonaws.com/docker:ubuntu22codebuild privileged-mode: true variables: BUILD_S2N: 'true' GCC_VERSION: '9' S2N_LIBCRYPTO: 'boringssl' S2N_NO_PQ: 1 TESTS: unit identifier: s2nUnitBoringssl # Fuzz tests - identifier: s2nFuzzerOpenSSL111Coverage buildspec: codebuild/spec/buildspec_ubuntu_fuzz_artifacts.yml env: privileged-mode: true compute-type: BUILD_GENERAL1_LARGE image: aws/codebuild/standard:5.0 variables: S2N_LIBCRYPTO: openssl-1.1.1 LATEST_CLANG: true TESTS: fuzz FUZZ_TIMEOUT_SEC: 60 FUZZ_COVERAGE: true - identifier: s2nFuzzerOpenSSL102FIPS buildspec: codebuild/spec/buildspec_ubuntu_fuzz_artifacts.yml env: privileged-mode: true compute-type: BUILD_GENERAL1_LARGE image: 024603541914.dkr.ecr.us-west-2.amazonaws.com/docker:ubuntu22codebuild variables: S2N_LIBCRYPTO: openssl-1.0.2-fips LATEST_CLANG: true TESTS: fuzz FUZZ_TIMEOUT_SEC: 60 aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/spec/buildspec_sidetrail.yml000066400000000000000000000020651456575232400263460ustar00rootroot00000000000000--- # Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You may not use # this file except in compliance with the License. A copy of the License is # located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. See the License for the specific language governing permissions and # limitations under the License. version: 0.2 env: variables: # CODEBUILD_ is a reserved namespace. CB_BIN_DIR: "./codebuild/bin" phases: build: commands: - printenv - | if [ -d "third-party-src" ]; then cd third-party-src $CB_BIN_DIR/run_sidetrail.sh /sidetrail-install-dir ${CODEBUILD_SRC_DIR}/third-party-src; else $CB_BIN_DIR/run_sidetrail.sh /sidetrail-install-dir ${CODEBUILD_SRC_DIR}; fi post_build: commands: - echo Build completed on `date` aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/spec/buildspec_tsan.yml000066400000000000000000000015041456575232400253300ustar00rootroot00000000000000--- # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You may not use # this file except in compliance with the License. A copy of the License is # located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. See the License for the specific language governing permissions and # limitations under the License. version: 0.2 phases: build: on-failure: ABORT commands: - cmake . -Bbuild -DTSAN=on - cmake --build ./build -j $(nproc) post_build: on-failure: ABORT commands: - CTEST_OUTPUT_ON_FAILURE=1 CTEST_PARALLEL_LEVEL=$(nproc) make -C build test aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/spec/buildspec_ubuntu.yml000066400000000000000000000017511456575232400257110ustar00rootroot00000000000000--- # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You may not use # this file except in compliance with the License. A copy of the License is # located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. See the License for the specific language governing permissions and # limitations under the License. version: 0.2 env: variables: # CODEBUILD_ is a reserved namespace. CB_BIN_DIR: "./codebuild/bin" phases: pre_build: commands: - | if [ -d "third-party-src" ]; then cd third-party-src; ln -s /usr/local $CODEBUILD_SRC_DIR/third-party-src/test-deps; fi build: commands: - printenv - ln -s /usr/local $CODEBUILD_SRC_DIR/test-deps - $CB_BIN_DIR/s2n_codebuild.sh aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/spec/buildspec_ubuntu_fuzz_afl.yml000066400000000000000000000032361456575232400276110ustar00rootroot00000000000000--- # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You may not use # this file except in compliance with the License. A copy of the License is # located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. See the License for the specific language governing permissions and # limitations under the License. version: 0.2 env: variables: # CODEBUILD_ is a reserved namespace. CB_BIN_DIR: "./codebuild/bin" phases: install: runtime-versions: python: 3.x pre_build: commands: - | if [ -d "third-party-src" ]; then cd third-party-src; fi - $CB_BIN_DIR/install_ubuntu_dependencies.sh - TESTS=fuzz $CB_BIN_DIR/install_default_dependencies.sh - mkdir tests/fuzz/results - mount -t tmpfs -o size=8096m ramdisk tests/fuzz/results - ln -s test-deps/libfuzzer fuzz_dependencies - ln -s $(which afl-fuzz) / build: commands: - AFL_NO_UI=true TESTS=fuzz AFL_FUZZ=true make fuzz artifacts: files: - "./tests/fuzz/results/$FUZZ_TESTS/*" name: afl-fuzz-stats-$FUZZ_TESTS-$(date +%Y%m%d)-$CODEBUILD_BUILD_NUMBER discard-paths: no secondary-artifacts: logs: files: - "./tests/fuzz/results/$FUZZ_TESTS/crashes/*" - "./tests/fuzz/results/$FUZZ_TESTS/hangs/*" - "./tests/fuzz/results/$FUZZ_TESTS/queue/*" name: aflfuzz-corpus-$FUZZ_TESTS-$(date +%Y%m%d)-$CODEBUILD_BUILD_NUMBER discard-paths: no aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/spec/buildspec_ubuntu_fuzz_artifacts.yml000066400000000000000000000027121456575232400310250ustar00rootroot00000000000000--- # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You may not use # this file except in compliance with the License. A copy of the License is # located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. See the License for the specific language governing permissions and # limitations under the License. version: 0.2 env: variables: # CODEBUILD_ is a reserved namespace. CB_BIN_DIR: "./codebuild/bin" phases: pre_build: commands: - | if [ -d "third-party-src" ]; then cd third-party-src; ln -s /usr/local $CODEBUILD_SRC_DIR/third-party-src/test-deps; fi - ln -s /usr/local $CODEBUILD_SRC_DIR/test-deps - touch tests/fuzz/placeholder_results.txt tests/fuzz/placeholder_output.txt build: commands: - $CB_BIN_DIR/s2n_codebuild.sh artifacts: files: - "./tests/fuzz/corpus/$FUZZ_TESTS/*" name: fuzz-corpus-$FUZZ_TESTS-$(date +%Y%m%d)-$CODEBUILD_BUILD_NUMBER discard-paths: no secondary-artifacts: logs: files: - "./tests/fuzz/**/*_results.txt" - "./tests/fuzz/**/*_output.txt" - "./coverage/fuzz/**" name: fuzz-cov-logs-$FUZZ_TESTS-$(date +%Y%m%d)-$CODEBUILD_BUILD_NUMBER discard-paths: no aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/spec/buildspec_ubuntu_integrationv2.yml000066400000000000000000000021441456575232400305610ustar00rootroot00000000000000--- # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You may not use # this file except in compliance with the License. A copy of the License is # located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. See the License for the specific language governing permissions and # limitations under the License. version: 0.2 env: variables: # CODEBUILD_ is a reserved namespace. CB_BIN_DIR: "./codebuild/bin" phases: pre_build: commands: - | if [ -d "third-party-src" ]; then cd third-party-src; ln -s /usr/local $CODEBUILD_SRC_DIR/third-party-src/test-deps; fi build: commands: # For jdk integration test - javac tests/integrationv2/bin/SSLSocketClient.java - ln -s /usr/local $CODEBUILD_SRC_DIR/test-deps - TOX_TEST_NAME=$INTEGV2_TEST TESTS=integrationv2 $CB_BIN_DIR/s2n_codebuild.sh aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/spec/buildspec_ubuntu_integv2criterion.yml000066400000000000000000000037041456575232400312660ustar00rootroot00000000000000--- version: 0.2 env: variables: # CODEBUILD_ is a reserved namespace. CB_BIN_DIR: "./codebuild/bin" # Doc for batch https://docs.aws.amazon.com/codebuild/latest/userguide/batch-build-buildspec.html#build-spec.batch.build-list batch: build-graph: - identifier: s2nIntegrationv2WellKnownEndpointsBaseline buildspec: codebuild/spec/buildspec_ubuntu_integv2criterion_baseline.yml env: privileged-mode: true compute-type: BUILD_GENERAL1_LARGE variables: INTEGV2_TEST: test_well_known_endpoints S2N_USE_CRITERION: 2 S2N_NO_PQ: 1 TESTS: integrationv2crit GCC_VERSION: 6 RUST_BACKTRACE: 1 - identifier: s2nIntegrationv2WellKnownEndpoints debug-session: true env: privileged-mode: true compute-type: BUILD_GENERAL1_LARGE variables: INTEGV2_TEST: test_well_known_endpoints S2N_USE_CRITERION: 1 S2N_NO_PQ: 1 TESTS: integrationv2crit GCC_VERSION: 6 ARTIFACT_BUCKET: s3://s2n-tls-logs/release ARTIFACT_FILE: integv2criterion depend-on: - s2nIntegrationv2WellKnownEndpointsBaseline phases: install: runtime-versions: python: 3.x commands: - $CB_BIN_DIR/install_ubuntu_dependencies.sh - $CB_BIN_DIR/utils.sh gh_login s2n_codebuild_PRs - export LATEST_RELEASE_VER=$($CB_BIN_DIR/utils.sh get_latest_release) - mkdir -p tests/integrationv2/target/criterion || true - aws s3 cp ${ARTIFACT_BUCKET}/${ARTIFACT_FILE}_${INTEGV2_TEST}_${LATEST_RELEASE_VER}.zip . - unzip -o ${ARTIFACT_FILE}_${INTEGV2_TEST}_${LATEST_RELEASE_VER}.zip -d ./tests/integrationv2/target/criterion build: commands: - codebuild-breakpoint - $CB_BIN_DIR/criterion_delta.sh artifacts: files: - "**/index.html" - "**/*.svg" - "**/*.json" base-directory: "tests/integrationv2/target/criterion" discard-paths: no aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/spec/buildspec_ubuntu_integv2criterion_baseline.yml000066400000000000000000000012711456575232400331250ustar00rootroot00000000000000--- version: 0.2 env: variables: # CODEBUILD_ is a reserved namespace. CB_BIN_DIR: "./codebuild/bin" phases: install: runtime-versions: python: 3.x commands: - | if [ -d "third-party-src" ]; then cd third-party-src; fi - $CB_BIN_DIR/install_ubuntu_dependencies.sh build: commands: - $CB_BIN_DIR/criterion_baseline.sh - mkdir -p tests/integrationv2/target/criterion - echo "{id:$CODEBUILD_BUILD_ID}" >> tests/integrationv2/target/criterion/artifact.json artifacts: files: - "**/index.html" - "**/*.svg" - "**/*.json" base-directory: "tests/integrationv2/target/criterion" discard-paths: no aws-crt-python-0.20.4+dfsg/crt/s2n/codebuild/spec/buildspec_unit_coverage.yml000066400000000000000000000026411456575232400272200ustar00rootroot00000000000000--- # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You may not use # this file except in compliance with the License. A copy of the License is # located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. See the License for the specific language governing permissions and # limitations under the License. version: 0.2 env: variables: # CODEBUILD_ is a reserved namespace. CB_BIN_DIR: "./codebuild/bin" CC: "/usr/bin/clang" CXX: "/usr/bin/clang++" phases: build: on-failure: ABORT commands: # LLVM complains about corrupt coverage information # for static targets, so compile to a shared lib # instead. - | cmake . -Bbuild \ -DCOVERAGE=ON \ -DCMAKE_PREFIX_PATH=$LIBCRYPTO_ROOT \ -DBUILD_SHARED_LIBS=ON - cmake --build ./build -- -j $(nproc) post_build: on-failure: ABORT commands: - LLVM_PROFILE_FILE="ut_%8m.profraw" CTEST_PARALLEL_LEVEL=$(nproc) cmake --build ./build --target test ARGS="--output-on-failure -L unit" - $CB_BIN_DIR/coverage_report.sh artifacts: # upload all files in the coverage_report directory files: - '**/*' base-directory: coverage_report aws-crt-python-0.20.4+dfsg/crt/s2n/compliance/000077500000000000000000000000001456575232400210345ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/compliance/.gitignore000066400000000000000000000000141456575232400230170ustar00rootroot00000000000000report.html aws-crt-python-0.20.4+dfsg/crt/s2n/compliance/generate_report.sh000077500000000000000000000014431456575232400245620ustar00rootroot00000000000000#/usr/bin/env bash set -e TOPLEVEL=$(git rev-parse --show-toplevel) BLOB=${1:-main} pushd $TOPLEVEL > /dev/null duvet \ report \ --spec-pattern 'compliance/specs/**/*.toml' \ --source-pattern '(*=,*#)api/**/*.[ch]' \ --source-pattern '(*=,*#)bin/**/*.[ch]' \ --source-pattern '(*=,*#)crypto/**/*.[ch]' \ --source-pattern '(*=,*#)error/**/*.[ch]' \ --source-pattern '(*=,*#)stuffer/**/*.[ch]' \ --source-pattern '(*=,*#)tests/**/*.[ch]' \ --source-pattern '(*=,*#)tls/**/*.[ch]' \ --source-pattern '(*=,*#)utils/**/*.[ch]' \ --require-tests false \ --blob-link "https://github.com/aws/s2n/blob/$BLOB" \ --issue-link 'https://github.com/aws/s2n/issues' \ --no-cargo \ --html compliance/report.html echo "report available in compliance/report.html" popd > /dev/null aws-crt-python-0.20.4+dfsg/crt/s2n/compliance/initialize_duvet.sh000077500000000000000000000017201456575232400247430ustar00rootroot00000000000000#/usr/bin/env bash duvet extract https://tools.ietf.org/rfc/rfc5246 # The Transport Layer Security (TLS) Protocol Version 1.2 duvet extract https://tools.ietf.org/rfc/rfc5869 # HMAC-based Extract-and-Expand Key Derivation Function (HKDF) duvet extract https://tools.ietf.org/rfc/rfc8446 # The Transport Layer Security (TLS) Protocol Version 1.3 duvet extract https://tools.ietf.org/rfc/rfc8448 # Example Handshake Traces for TLS 1.3 duvet extract https://tools.ietf.org/rfc/rfc7627 # Transport Layer Security (TLS) Session Hash and Extended Master Secret Extension duvet extract https://tools.ietf.org/rfc/rfc5746 # Transport Layer Security (TLS) Renegotiation Indication Extension duvet extract https://tools.ietf.org/rfc/rfc4492 # Elliptic Curve Cryptography (ECC) Cipher Suites for Transport Layer Security (TLS) duvet extract https://tools.ietf.org/rfc/rfc8422 # Elliptic Curve Cryptography (ECC) Cipher Suites for Transport Layer Security (TLS) Versions 1.2 and Earlier aws-crt-python-0.20.4+dfsg/crt/s2n/compliance/specs/000077500000000000000000000000001456575232400221515ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/compliance/specs/exceptions/000077500000000000000000000000001456575232400243325ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/compliance/specs/exceptions/rfc5746/000077500000000000000000000000001456575232400254325ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/compliance/specs/exceptions/rfc5746/3.6.toml000066400000000000000000000013541456575232400266400ustar00rootroot00000000000000target = "https://tools.ietf.org/rfc/rfc5746#3.6" [[exception]] quote = ''' TLS servers implementing this specification MUST ignore any unknown extensions offered by the client and they MUST accept version numbers higher than their highest version number and negotiate the highest common version. These two requirements reiterate preexisting requirements in RFC 5246 and are merely stated here in the interest of forward compatibility. ''' reason = ''' These requirements are a duplicate of existing requirements from RFC 5246. ''' [[exception]] quote = ''' TLS implementations MUST continue to comply with Section 7.4.1.4 for all other extensions. ''' reason = ''' These requirements are a duplicate of existing requirements from RFC 5246. ''' aws-crt-python-0.20.4+dfsg/crt/s2n/compliance/specs/exceptions/rfc5746/4.2.toml000066400000000000000000000030441456575232400266330ustar00rootroot00000000000000target = "https://tools.ietf.org/rfc/rfc5746#4.2" [[exception]] quote = ''' If clients nevertheless choose to renegotiate, they MUST behave as described below. ''' reason = ''' s2n-tls does not support insecure renegotiation and does not renegotiate if secure_renegotiation is FALSE. ''' [[exception]] quote = ''' Clients that choose to renegotiate MUST provide either the TLS_EMPTY_RENEGOTIATION_INFO_SCSV SCSV or "renegotiation_info" in their ClientHello. In a legitimate renegotiation with an un-upgraded server, that server should ignore both of these signals. However, if the server (incorrectly) fails to ignore extensions, sending the "renegotiation_info" extension may cause a handshake failure. Thus, it is permitted, though NOT RECOMMENDED, for the client to simply send the SCSV. This is the only situation in which clients are permitted to not send the "renegotiation_info" extension in a ClientHello that is used for renegotiation. ''' reason = ''' s2n-tls does not support insecure renegotiation and does not renegotiate if secure_renegotiation is FALSE. ''' [[exception]] quote = ''' When the ServerHello is received, the client MUST verify that it does not contain the "renegotiation_info" extension. If it does, the client MUST abort the handshake. (Because the server has already indicated it does not support secure renegotiation, the only way that this can happen is if the server is broken or there is an attack.) ''' reason = ''' s2n-tls does not support insecure renegotiation and does not renegotiate if secure_renegotiation is FALSE. ''' aws-crt-python-0.20.4+dfsg/crt/s2n/compliance/specs/exceptions/rfc5746/4.4.toml000066400000000000000000000016341456575232400266400ustar00rootroot00000000000000target = "https://tools.ietf.org/rfc/rfc5746#4.4" [[exception]] quote = ''' It is RECOMMENDED that servers not permit legacy renegotiation. If servers nevertheless do permit it, they MUST follow the requirements in this section. ''' reason = ''' s2n-tls servers do not support renegotiation and do not allow clients to renegotiate. ''' [[exception]] quote = ''' o When a ClientHello is received, the server MUST verify that it does not contain the TLS_EMPTY_RENEGOTIATION_INFO_SCSV SCSV. If the SCSV is present, the server MUST abort the handshake. ''' reason = ''' s2n-tls servers do not support renegotiation and do not allow clients to renegotiate. ''' [[exception]] quote = ''' o The server MUST verify that the "renegotiation_info" extension is not present; if it is, the server MUST abort the handshake. ''' reason = ''' s2n-tls servers do not support renegotiation and do not allow clients to renegotiate. ''' aws-crt-python-0.20.4+dfsg/crt/s2n/compliance/specs/exceptions/rfc5746/5.toml000066400000000000000000000004041456575232400264710ustar00rootroot00000000000000target = "https://tools.ietf.org/rfc/rfc5746#5" [[exception]] quote = ''' Servers SHOULD NOT allow clients to renegotiate without using this extension. ''' reason = ''' s2n-tls servers do not support renegotiation and do not allow clients to renegotiate. ''' aws-crt-python-0.20.4+dfsg/crt/s2n/coverage/000077500000000000000000000000001456575232400205155ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/coverage/Makefile000066400000000000000000000012041456575232400221520ustar00rootroot00000000000000# # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. # include ../s2n.mk .PHONY : clean clean: decruft rm -rf ./html/* rm -rf ./fuzz/* aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/000077500000000000000000000000001456575232400202425ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/Makefile000066400000000000000000000015061456575232400217040ustar00rootroot00000000000000# # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. # SRCS=$(wildcard *.c) OBJS=$(SRCS:.c=.o) BITCODE_DIR?=../tests/saw/bitcode/ BCS_1= s2n_hash.bc s2n_hmac.bc s2n_drbg.bc s2n_fips.bc s2n_sequence.bc s2n_libcrypto.bc BCS=$(addprefix $(BITCODE_DIR), $(BCS_1)) .PHONY : all all: $(OBJS) .PHONY : bc bc: $(BCS) include ../s2n.mk aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_aead_cipher_aes_gcm.c000066400000000000000000000565521456575232400251070ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include #include #include "crypto/s2n_cipher.h" #include "crypto/s2n_ktls_crypto.h" #include "tls/s2n_crypto.h" #include "utils/s2n_blob.h" #include "utils/s2n_safety.h" #if defined(OPENSSL_IS_BORINGSSL) || defined(OPENSSL_IS_AWSLC) #define S2N_AEAD_AES_GCM_AVAILABLE #endif static uint8_t s2n_aead_cipher_aes128_gcm_available() { #if defined(S2N_AEAD_AES_GCM_AVAILABLE) return (EVP_aead_aes_128_gcm() ? 1 : 0); #else return (EVP_aes_128_gcm() ? 1 : 0); #endif } static uint8_t s2n_aead_cipher_aes256_gcm_available() { #if defined(S2N_AEAD_AES_GCM_AVAILABLE) return (EVP_aead_aes_256_gcm() ? 1 : 0); #else return (EVP_aes_256_gcm() ? 1 : 0); #endif } #if defined(S2N_AEAD_AES_GCM_AVAILABLE) /* BoringSSL and AWS-LC AEAD API implementation */ static int s2n_aead_cipher_aes_gcm_encrypt(struct s2n_session_key *key, struct s2n_blob *iv, struct s2n_blob *aad, struct s2n_blob *in, struct s2n_blob *out) { POSIX_ENSURE_REF(in); POSIX_ENSURE_REF(out); POSIX_ENSURE_REF(iv); POSIX_ENSURE_REF(key); POSIX_ENSURE_REF(aad); /* The size of the |in| blob includes the size of the data and the size of the AES-GCM tag */ POSIX_ENSURE_GTE(in->size, S2N_TLS_GCM_TAG_LEN); POSIX_ENSURE_GTE(out->size, in->size); POSIX_ENSURE_EQ(iv->size, S2N_TLS_GCM_IV_LEN); /* Adjust input length to account for the Tag length */ size_t in_len = in->size - S2N_TLS_GCM_TAG_LEN; /* out_len is set by EVP_AEAD_CTX_seal and checked post operation */ size_t out_len = 0; POSIX_GUARD_OSSL(EVP_AEAD_CTX_seal(key->evp_aead_ctx, out->data, &out_len, out->size, iv->data, iv->size, in->data, in_len, aad->data, aad->size), S2N_ERR_ENCRYPT); S2N_ERROR_IF((in_len + S2N_TLS_GCM_TAG_LEN) != out_len, S2N_ERR_ENCRYPT); return S2N_SUCCESS; } static int s2n_aead_cipher_aes_gcm_decrypt(struct s2n_session_key *key, struct s2n_blob *iv, struct s2n_blob *aad, struct s2n_blob *in, struct s2n_blob *out) { POSIX_ENSURE_REF(in); POSIX_ENSURE_REF(out); POSIX_ENSURE_REF(iv); POSIX_ENSURE_REF(key); POSIX_ENSURE_REF(aad); POSIX_ENSURE_GTE(in->size, S2N_TLS_GCM_TAG_LEN); POSIX_ENSURE_GTE(out->size, in->size - S2N_TLS_GCM_TAG_LEN); POSIX_ENSURE_EQ(iv->size, S2N_TLS_GCM_IV_LEN); /* out_len is set by EVP_AEAD_CTX_open and checked post operation */ size_t out_len = 0; POSIX_GUARD_OSSL(EVP_AEAD_CTX_open(key->evp_aead_ctx, out->data, &out_len, out->size, iv->data, iv->size, in->data, in->size, aad->data, aad->size), S2N_ERR_DECRYPT); S2N_ERROR_IF((in->size - S2N_TLS_GCM_TAG_LEN) != out_len, S2N_ERR_ENCRYPT); return S2N_SUCCESS; } static int s2n_aead_cipher_aes128_gcm_set_encryption_key(struct s2n_session_key *key, struct s2n_blob *in) { POSIX_ENSURE_REF(key); POSIX_ENSURE_REF(in); POSIX_ENSURE_EQ(in->size, S2N_TLS_AES_128_GCM_KEY_LEN); POSIX_GUARD_OSSL(EVP_AEAD_CTX_init(key->evp_aead_ctx, EVP_aead_aes_128_gcm_tls12(), in->data, in->size, S2N_TLS_GCM_TAG_LEN, NULL), S2N_ERR_KEY_INIT); return S2N_SUCCESS; } static int s2n_aead_cipher_aes256_gcm_set_encryption_key(struct s2n_session_key *key, struct s2n_blob *in) { POSIX_ENSURE_REF(key); POSIX_ENSURE_REF(in); POSIX_ENSURE_EQ(in->size, S2N_TLS_AES_256_GCM_KEY_LEN); POSIX_GUARD_OSSL(EVP_AEAD_CTX_init(key->evp_aead_ctx, EVP_aead_aes_256_gcm_tls12(), in->data, in->size, S2N_TLS_GCM_TAG_LEN, NULL), S2N_ERR_KEY_INIT); return S2N_SUCCESS; } static int s2n_aead_cipher_aes128_gcm_set_decryption_key(struct s2n_session_key *key, struct s2n_blob *in) { POSIX_ENSURE_REF(key); POSIX_ENSURE_REF(in); POSIX_ENSURE_EQ(in->size, S2N_TLS_AES_128_GCM_KEY_LEN); POSIX_GUARD_OSSL(EVP_AEAD_CTX_init(key->evp_aead_ctx, EVP_aead_aes_128_gcm_tls12(), in->data, in->size, S2N_TLS_GCM_TAG_LEN, NULL), S2N_ERR_KEY_INIT); return S2N_SUCCESS; } static int s2n_aead_cipher_aes256_gcm_set_decryption_key(struct s2n_session_key *key, struct s2n_blob *in) { POSIX_ENSURE_REF(key); POSIX_ENSURE_REF(in); POSIX_ENSURE_EQ(in->size, S2N_TLS_AES_256_GCM_KEY_LEN); POSIX_GUARD_OSSL(EVP_AEAD_CTX_init(key->evp_aead_ctx, EVP_aead_aes_256_gcm_tls12(), in->data, in->size, S2N_TLS_GCM_TAG_LEN, NULL), S2N_ERR_KEY_INIT); return S2N_SUCCESS; } static int s2n_aead_cipher_aes128_gcm_set_encryption_key_tls13(struct s2n_session_key *key, struct s2n_blob *in) { POSIX_ENSURE_REF(key); POSIX_ENSURE_REF(in); POSIX_ENSURE_EQ(in->size, S2N_TLS_AES_128_GCM_KEY_LEN); POSIX_GUARD_OSSL(EVP_AEAD_CTX_init(key->evp_aead_ctx, EVP_aead_aes_128_gcm_tls13(), in->data, in->size, S2N_TLS_GCM_TAG_LEN, NULL), S2N_ERR_KEY_INIT); return S2N_SUCCESS; } static int s2n_aead_cipher_aes256_gcm_set_encryption_key_tls13(struct s2n_session_key *key, struct s2n_blob *in) { POSIX_ENSURE_REF(key); POSIX_ENSURE_REF(in); POSIX_ENSURE_EQ(in->size, S2N_TLS_AES_256_GCM_KEY_LEN); POSIX_GUARD_OSSL(EVP_AEAD_CTX_init(key->evp_aead_ctx, EVP_aead_aes_256_gcm_tls13(), in->data, in->size, S2N_TLS_GCM_TAG_LEN, NULL), S2N_ERR_KEY_INIT); return S2N_SUCCESS; } static int s2n_aead_cipher_aes128_gcm_set_decryption_key_tls13(struct s2n_session_key *key, struct s2n_blob *in) { POSIX_ENSURE_REF(key); POSIX_ENSURE_REF(in); POSIX_ENSURE_EQ(in->size, S2N_TLS_AES_128_GCM_KEY_LEN); POSIX_GUARD_OSSL(EVP_AEAD_CTX_init(key->evp_aead_ctx, EVP_aead_aes_128_gcm_tls13(), in->data, in->size, S2N_TLS_GCM_TAG_LEN, NULL), S2N_ERR_KEY_INIT); return S2N_SUCCESS; } static int s2n_aead_cipher_aes256_gcm_set_decryption_key_tls13(struct s2n_session_key *key, struct s2n_blob *in) { POSIX_ENSURE_REF(key); POSIX_ENSURE_REF(in); POSIX_ENSURE_EQ(in->size, S2N_TLS_AES_256_GCM_KEY_LEN); POSIX_GUARD_OSSL(EVP_AEAD_CTX_init(key->evp_aead_ctx, EVP_aead_aes_256_gcm_tls13(), in->data, in->size, S2N_TLS_GCM_TAG_LEN, NULL), S2N_ERR_KEY_INIT); return S2N_SUCCESS; } static int s2n_aead_cipher_aes_gcm_init(struct s2n_session_key *key) { POSIX_ENSURE_REF(key); EVP_AEAD_CTX_zero(key->evp_aead_ctx); return S2N_SUCCESS; } static int s2n_aead_cipher_aes_gcm_destroy_key(struct s2n_session_key *key) { POSIX_ENSURE_REF(key); EVP_AEAD_CTX_cleanup(key->evp_aead_ctx); return S2N_SUCCESS; } #else /* Standard AES-GCM implementation */ static int s2n_aead_cipher_aes_gcm_encrypt(struct s2n_session_key *key, struct s2n_blob *iv, struct s2n_blob *aad, struct s2n_blob *in, struct s2n_blob *out) { /* The size of the |in| blob includes the size of the data and the size of the AES-GCM tag */ POSIX_ENSURE_GTE(in->size, S2N_TLS_GCM_TAG_LEN); POSIX_ENSURE_GTE(out->size, in->size); POSIX_ENSURE_EQ(iv->size, S2N_TLS_GCM_IV_LEN); /* Initialize the IV */ POSIX_GUARD_OSSL(EVP_EncryptInit_ex(key->evp_cipher_ctx, NULL, NULL, NULL, iv->data), S2N_ERR_KEY_INIT); /* Adjust input length and buffer pointer to account for the Tag length */ int in_len = in->size - S2N_TLS_GCM_TAG_LEN; uint8_t *tag_data = out->data + out->size - S2N_TLS_GCM_TAG_LEN; /* out_len is set by EVP_EncryptUpdate and checked post operation */ int out_len = 0; /* Specify the AAD */ POSIX_GUARD_OSSL(EVP_EncryptUpdate(key->evp_cipher_ctx, NULL, &out_len, aad->data, aad->size), S2N_ERR_ENCRYPT); /* Encrypt the data */ POSIX_GUARD_OSSL(EVP_EncryptUpdate(key->evp_cipher_ctx, out->data, &out_len, in->data, in_len), S2N_ERR_ENCRYPT); /* When using AES-GCM, *out_len is the number of bytes written by EVP_EncryptUpdate. Since the tag is not written during this call, we do not take S2N_TLS_GCM_TAG_LEN into account */ S2N_ERROR_IF(in_len != out_len, S2N_ERR_ENCRYPT); /* Finalize */ POSIX_GUARD_OSSL(EVP_EncryptFinal_ex(key->evp_cipher_ctx, out->data, &out_len), S2N_ERR_ENCRYPT); /* write the tag */ POSIX_GUARD_OSSL(EVP_CIPHER_CTX_ctrl(key->evp_cipher_ctx, EVP_CTRL_GCM_GET_TAG, S2N_TLS_GCM_TAG_LEN, tag_data), S2N_ERR_ENCRYPT); /* When using AES-GCM, EVP_EncryptFinal_ex does not write any bytes. So, we should expect *out_len = 0. */ S2N_ERROR_IF(0 != out_len, S2N_ERR_ENCRYPT); return S2N_SUCCESS; } static int s2n_aead_cipher_aes_gcm_decrypt(struct s2n_session_key *key, struct s2n_blob *iv, struct s2n_blob *aad, struct s2n_blob *in, struct s2n_blob *out) { POSIX_ENSURE_GTE(in->size, S2N_TLS_GCM_TAG_LEN); POSIX_ENSURE_GTE(out->size, in->size); POSIX_ENSURE_EQ(iv->size, S2N_TLS_GCM_IV_LEN); /* Initialize the IV */ POSIX_GUARD_OSSL(EVP_DecryptInit_ex(key->evp_cipher_ctx, NULL, NULL, NULL, iv->data), S2N_ERR_KEY_INIT); /* Adjust input length and buffer pointer to account for the Tag length */ int in_len = in->size - S2N_TLS_GCM_TAG_LEN; uint8_t *tag_data = in->data + in->size - S2N_TLS_GCM_TAG_LEN; /* Set the TAG */ POSIX_GUARD_OSSL(EVP_CIPHER_CTX_ctrl(key->evp_cipher_ctx, EVP_CTRL_GCM_SET_TAG, S2N_TLS_GCM_TAG_LEN, tag_data), S2N_ERR_DECRYPT); /* out_len is set by EVP_DecryptUpdate. While we verify the content of out_len in * s2n_aead_chacha20_poly1305_encrypt, we refrain from this here. This is to avoid * doing any branching before the ciphertext is verified. */ int out_len = 0; /* Specify the AAD */ POSIX_GUARD_OSSL(EVP_DecryptUpdate(key->evp_cipher_ctx, NULL, &out_len, aad->data, aad->size), S2N_ERR_DECRYPT); int evp_decrypt_rc = 1; /* Decrypt the data, but don't short circuit tag verification. EVP_Decrypt* return 0 on failure, 1 for success. */ evp_decrypt_rc &= EVP_DecryptUpdate(key->evp_cipher_ctx, out->data, &out_len, in->data, in_len); /* Verify the tag */ evp_decrypt_rc &= EVP_DecryptFinal_ex(key->evp_cipher_ctx, out->data, &out_len); S2N_ERROR_IF(evp_decrypt_rc != 1, S2N_ERR_DECRYPT); return S2N_SUCCESS; } static int s2n_aead_cipher_aes128_gcm_set_encryption_key(struct s2n_session_key *key, struct s2n_blob *in) { POSIX_ENSURE_EQ(in->size, S2N_TLS_AES_128_GCM_KEY_LEN); POSIX_GUARD_OSSL(EVP_EncryptInit_ex(key->evp_cipher_ctx, EVP_aes_128_gcm(), NULL, NULL, NULL), S2N_ERR_KEY_INIT); EVP_CIPHER_CTX_ctrl(key->evp_cipher_ctx, EVP_CTRL_GCM_SET_IVLEN, S2N_TLS_GCM_IV_LEN, NULL); POSIX_GUARD_OSSL(EVP_EncryptInit_ex(key->evp_cipher_ctx, NULL, NULL, in->data, NULL), S2N_ERR_KEY_INIT); return S2N_SUCCESS; } static int s2n_aead_cipher_aes256_gcm_set_encryption_key(struct s2n_session_key *key, struct s2n_blob *in) { POSIX_ENSURE_EQ(in->size, S2N_TLS_AES_256_GCM_KEY_LEN); POSIX_GUARD_OSSL(EVP_EncryptInit_ex(key->evp_cipher_ctx, EVP_aes_256_gcm(), NULL, NULL, NULL), S2N_ERR_KEY_INIT); EVP_CIPHER_CTX_ctrl(key->evp_cipher_ctx, EVP_CTRL_GCM_SET_IVLEN, S2N_TLS_GCM_IV_LEN, NULL); POSIX_GUARD_OSSL(EVP_EncryptInit_ex(key->evp_cipher_ctx, NULL, NULL, in->data, NULL), S2N_ERR_KEY_INIT); return S2N_SUCCESS; } static int s2n_aead_cipher_aes128_gcm_set_decryption_key(struct s2n_session_key *key, struct s2n_blob *in) { POSIX_ENSURE_EQ(in->size, S2N_TLS_AES_128_GCM_KEY_LEN); POSIX_GUARD_OSSL(EVP_DecryptInit_ex(key->evp_cipher_ctx, EVP_aes_128_gcm(), NULL, NULL, NULL), S2N_ERR_KEY_INIT); EVP_CIPHER_CTX_ctrl(key->evp_cipher_ctx, EVP_CTRL_GCM_SET_IVLEN, S2N_TLS_GCM_IV_LEN, NULL); POSIX_GUARD_OSSL(EVP_DecryptInit_ex(key->evp_cipher_ctx, NULL, NULL, in->data, NULL), S2N_ERR_KEY_INIT); return S2N_SUCCESS; } static int s2n_aead_cipher_aes256_gcm_set_decryption_key(struct s2n_session_key *key, struct s2n_blob *in) { POSIX_ENSURE_EQ(in->size, S2N_TLS_AES_256_GCM_KEY_LEN); POSIX_GUARD_OSSL(EVP_DecryptInit_ex(key->evp_cipher_ctx, EVP_aes_256_gcm(), NULL, NULL, NULL), S2N_ERR_KEY_INIT); EVP_CIPHER_CTX_ctrl(key->evp_cipher_ctx, EVP_CTRL_GCM_SET_IVLEN, S2N_TLS_GCM_IV_LEN, NULL); POSIX_GUARD_OSSL(EVP_DecryptInit_ex(key->evp_cipher_ctx, NULL, NULL, in->data, NULL), S2N_ERR_KEY_INIT); return S2N_SUCCESS; } static int s2n_aead_cipher_aes128_gcm_set_encryption_key_tls13(struct s2n_session_key *key, struct s2n_blob *in) { POSIX_GUARD(s2n_aead_cipher_aes128_gcm_set_encryption_key(key, in)); return S2N_SUCCESS; } static int s2n_aead_cipher_aes256_gcm_set_encryption_key_tls13(struct s2n_session_key *key, struct s2n_blob *in) { POSIX_GUARD(s2n_aead_cipher_aes256_gcm_set_encryption_key(key, in)); return S2N_SUCCESS; } static int s2n_aead_cipher_aes128_gcm_set_decryption_key_tls13(struct s2n_session_key *key, struct s2n_blob *in) { POSIX_GUARD(s2n_aead_cipher_aes128_gcm_set_decryption_key(key, in)); return S2N_SUCCESS; } static int s2n_aead_cipher_aes256_gcm_set_decryption_key_tls13(struct s2n_session_key *key, struct s2n_blob *in) { POSIX_GUARD(s2n_aead_cipher_aes256_gcm_set_decryption_key(key, in)); return S2N_SUCCESS; } static int s2n_aead_cipher_aes_gcm_init(struct s2n_session_key *key) { s2n_evp_ctx_init(key->evp_cipher_ctx); return S2N_SUCCESS; } static int s2n_aead_cipher_aes_gcm_destroy_key(struct s2n_session_key *key) { EVP_CIPHER_CTX_cleanup(key->evp_cipher_ctx); return S2N_SUCCESS; } #endif static S2N_RESULT s2n_tls12_aead_cipher_aes128_gcm_set_ktls_info( struct s2n_ktls_crypto_info_inputs *in, struct s2n_ktls_crypto_info *out) { RESULT_ENSURE_REF(in); RESULT_ENSURE_REF(out); s2n_ktls_crypto_info_tls12_aes_gcm_128 *crypto_info = &out->ciphers.aes_gcm_128; crypto_info->info.version = TLS_1_2_VERSION; crypto_info->info.cipher_type = TLS_CIPHER_AES_GCM_128; RESULT_ENSURE_LTE(sizeof(crypto_info->key), in->key.size); RESULT_CHECKED_MEMCPY(crypto_info->key, in->key.data, sizeof(crypto_info->key)); RESULT_ENSURE_LTE(sizeof(crypto_info->rec_seq), in->seq.size); RESULT_CHECKED_MEMCPY(crypto_info->rec_seq, in->seq.data, sizeof(crypto_info->rec_seq)); /* TLS1.2 uses partially explicit nonces. That means that although part of the * nonce is still fixed and implicit (the salt), the remainder is explicit * (written into the record) and must be unique per record. The RFC5288 suggests * using the sequence number as the explicit part. * * Therefore, ktls expects the salt to contain the iv derived from the secret * and should generate the remainder of the nonce per-record. * * See the TLS1.2 RFC: * - https://datatracker.ietf.org/doc/html/rfc5246#section-6.2.3.3 * And RFC5288, which defines the TLS1.2 AES-GCM cipher suites: * - https://datatracker.ietf.org/doc/html/rfc5288#section-3 */ RESULT_ENSURE_LTE(sizeof(crypto_info->salt), in->iv.size); RESULT_CHECKED_MEMCPY(crypto_info->salt, in->iv.data, sizeof(crypto_info->salt)); /* Because TLS1.2 uses partially explicit nonces, the kernel should not * use the iv in crypto_info but instead use a unique value for each record. * * As of this commit, Openssl has chosen to set the TLS1.2 IV to random * bytes when sending and all zeroes when receiving: * https://github.com/openssl/openssl/blob/de8e0851a1c0d22533801f081781a9f0be56c2c2/ssl/record/methods/ktls_meth.c#L197-L204 * And GnuTLS has chosen to set the TLS1.2 IV to the sequence number: * https://github.com/gnutls/gnutls/blob/3f42ae70a1672673cb8f27c2dd3da1a34d1cbdd7/lib/system/ktls.c#L547-L550 * * We (fairly arbitrarily) choose to also set it to the current sequence number. */ RESULT_ENSURE_LTE(sizeof(crypto_info->iv), in->seq.size); RESULT_CHECKED_MEMCPY(crypto_info->iv, in->seq.data, sizeof(crypto_info->iv)); RESULT_GUARD_POSIX(s2n_blob_init(&out->value, (uint8_t *) (void *) crypto_info, sizeof(s2n_ktls_crypto_info_tls12_aes_gcm_128))); return S2N_RESULT_OK; } /* TLS1.2 AES256 is configured like TLS1.2 AES128, but with a larger key size. * See TLS1.2 AES128 for details (particularly a discussion of salt + iv). */ static S2N_RESULT s2n_tls12_aead_cipher_aes256_gcm_set_ktls_info( struct s2n_ktls_crypto_info_inputs *in, struct s2n_ktls_crypto_info *out) { RESULT_ENSURE_REF(in); RESULT_ENSURE_REF(out); s2n_ktls_crypto_info_tls12_aes_gcm_256 *crypto_info = &out->ciphers.aes_gcm_256; crypto_info->info.version = TLS_1_2_VERSION; crypto_info->info.cipher_type = TLS_CIPHER_AES_GCM_256; RESULT_ENSURE_LTE(sizeof(crypto_info->key), in->key.size); RESULT_CHECKED_MEMCPY(crypto_info->key, in->key.data, sizeof(crypto_info->key)); RESULT_ENSURE_LTE(sizeof(crypto_info->rec_seq), in->seq.size); RESULT_CHECKED_MEMCPY(crypto_info->rec_seq, in->seq.data, sizeof(crypto_info->rec_seq)); RESULT_ENSURE_LTE(sizeof(crypto_info->salt), in->iv.size); RESULT_CHECKED_MEMCPY(crypto_info->salt, in->iv.data, sizeof(crypto_info->salt)); RESULT_ENSURE_LTE(sizeof(crypto_info->iv), in->seq.size); RESULT_CHECKED_MEMCPY(crypto_info->iv, in->seq.data, sizeof(crypto_info->iv)); RESULT_GUARD_POSIX(s2n_blob_init(&out->value, (uint8_t *) (void *) crypto_info, sizeof(s2n_ktls_crypto_info_tls12_aes_gcm_256))); return S2N_RESULT_OK; } static S2N_RESULT s2n_tls13_aead_cipher_aes128_gcm_set_ktls_info( struct s2n_ktls_crypto_info_inputs *in, struct s2n_ktls_crypto_info *out) { RESULT_ENSURE_REF(in); RESULT_ENSURE_REF(out); s2n_ktls_crypto_info_tls12_aes_gcm_128 *crypto_info = &out->ciphers.aes_gcm_128; crypto_info->info.version = TLS_1_3_VERSION; crypto_info->info.cipher_type = TLS_CIPHER_AES_GCM_128; RESULT_ENSURE_LTE(sizeof(crypto_info->key), in->key.size); RESULT_CHECKED_MEMCPY(crypto_info->key, in->key.data, sizeof(crypto_info->key)); RESULT_ENSURE_LTE(sizeof(crypto_info->rec_seq), in->seq.size); RESULT_CHECKED_MEMCPY(crypto_info->rec_seq, in->seq.data, sizeof(crypto_info->rec_seq)); /* TLS1.3 uses fully implicit nonces. The fixed, implicit IV value derived from * the secret is xored with the sequence number to produce a unique per-record nonce. * * See the TLS1.3 RFC: * - https://www.rfc-editor.org/rfc/rfc8446.html#section-5.3 * * ktls handles this with the same structure as TLS1.2 uses for its partially * explicit nonces by splitting the implicit IV between the salt and iv fields. */ size_t salt_size = sizeof(crypto_info->salt); RESULT_ENSURE_LTE(salt_size, in->iv.size); RESULT_CHECKED_MEMCPY(crypto_info->salt, in->iv.data, salt_size); size_t iv_remainder = in->iv.size - salt_size; RESULT_ENSURE_LTE(sizeof(crypto_info->iv), iv_remainder); RESULT_CHECKED_MEMCPY(crypto_info->iv, in->iv.data + salt_size, sizeof(crypto_info->iv)); RESULT_GUARD_POSIX(s2n_blob_init(&out->value, (uint8_t *) (void *) crypto_info, sizeof(s2n_ktls_crypto_info_tls12_aes_gcm_128))); return S2N_RESULT_OK; } /* TLS1.3 AES256 is configured like TLS1.3 AES128, but with a larger key size. * See TLS1.3 AES128 for details (particularly a discussion of salt + iv). */ static S2N_RESULT s2n_tls13_aead_cipher_aes256_gcm_set_ktls_info( struct s2n_ktls_crypto_info_inputs *in, struct s2n_ktls_crypto_info *out) { RESULT_ENSURE_REF(in); RESULT_ENSURE_REF(out); s2n_ktls_crypto_info_tls12_aes_gcm_256 *crypto_info = &out->ciphers.aes_gcm_256; crypto_info->info.version = TLS_1_3_VERSION; crypto_info->info.cipher_type = TLS_CIPHER_AES_GCM_256; RESULT_ENSURE_LTE(sizeof(crypto_info->key), in->key.size); RESULT_CHECKED_MEMCPY(crypto_info->key, in->key.data, sizeof(crypto_info->key)); RESULT_ENSURE_LTE(sizeof(crypto_info->rec_seq), in->seq.size); RESULT_CHECKED_MEMCPY(crypto_info->rec_seq, in->seq.data, sizeof(crypto_info->rec_seq)); size_t salt_size = sizeof(crypto_info->salt); RESULT_ENSURE_LTE(salt_size, in->iv.size); RESULT_CHECKED_MEMCPY(crypto_info->salt, in->iv.data, salt_size); size_t iv_remainder = in->iv.size - salt_size; RESULT_ENSURE_LTE(sizeof(crypto_info->iv), iv_remainder); RESULT_CHECKED_MEMCPY(crypto_info->iv, in->iv.data + salt_size, sizeof(crypto_info->iv)); RESULT_GUARD_POSIX(s2n_blob_init(&out->value, (uint8_t *) (void *) crypto_info, sizeof(s2n_ktls_crypto_info_tls12_aes_gcm_256))); return S2N_RESULT_OK; } const struct s2n_cipher s2n_aes128_gcm = { .key_material_size = S2N_TLS_AES_128_GCM_KEY_LEN, .type = S2N_AEAD, .io.aead = { .record_iv_size = S2N_TLS_GCM_EXPLICIT_IV_LEN, .fixed_iv_size = S2N_TLS_GCM_FIXED_IV_LEN, .tag_size = S2N_TLS_GCM_TAG_LEN, .decrypt = s2n_aead_cipher_aes_gcm_decrypt, .encrypt = s2n_aead_cipher_aes_gcm_encrypt }, .is_available = s2n_aead_cipher_aes128_gcm_available, .init = s2n_aead_cipher_aes_gcm_init, .set_encryption_key = s2n_aead_cipher_aes128_gcm_set_encryption_key, .set_decryption_key = s2n_aead_cipher_aes128_gcm_set_decryption_key, .destroy_key = s2n_aead_cipher_aes_gcm_destroy_key, .set_ktls_info = s2n_tls12_aead_cipher_aes128_gcm_set_ktls_info, }; const struct s2n_cipher s2n_aes256_gcm = { .key_material_size = S2N_TLS_AES_256_GCM_KEY_LEN, .type = S2N_AEAD, .io.aead = { .record_iv_size = S2N_TLS_GCM_EXPLICIT_IV_LEN, .fixed_iv_size = S2N_TLS_GCM_FIXED_IV_LEN, .tag_size = S2N_TLS_GCM_TAG_LEN, .decrypt = s2n_aead_cipher_aes_gcm_decrypt, .encrypt = s2n_aead_cipher_aes_gcm_encrypt }, .is_available = s2n_aead_cipher_aes256_gcm_available, .init = s2n_aead_cipher_aes_gcm_init, .set_encryption_key = s2n_aead_cipher_aes256_gcm_set_encryption_key, .set_decryption_key = s2n_aead_cipher_aes256_gcm_set_decryption_key, .destroy_key = s2n_aead_cipher_aes_gcm_destroy_key, .set_ktls_info = s2n_tls12_aead_cipher_aes256_gcm_set_ktls_info, }; /* TLS 1.3 GCM ciphers */ const struct s2n_cipher s2n_tls13_aes128_gcm = { .key_material_size = S2N_TLS_AES_128_GCM_KEY_LEN, .type = S2N_AEAD, .io.aead = { .record_iv_size = S2N_TLS13_RECORD_IV_LEN, .fixed_iv_size = S2N_TLS13_FIXED_IV_LEN, .tag_size = S2N_TLS_GCM_TAG_LEN, .decrypt = s2n_aead_cipher_aes_gcm_decrypt, .encrypt = s2n_aead_cipher_aes_gcm_encrypt }, .is_available = s2n_aead_cipher_aes128_gcm_available, .init = s2n_aead_cipher_aes_gcm_init, .set_encryption_key = s2n_aead_cipher_aes128_gcm_set_encryption_key_tls13, .set_decryption_key = s2n_aead_cipher_aes128_gcm_set_decryption_key_tls13, .destroy_key = s2n_aead_cipher_aes_gcm_destroy_key, .set_ktls_info = s2n_tls13_aead_cipher_aes128_gcm_set_ktls_info, }; const struct s2n_cipher s2n_tls13_aes256_gcm = { .key_material_size = S2N_TLS_AES_256_GCM_KEY_LEN, .type = S2N_AEAD, .io.aead = { .record_iv_size = S2N_TLS13_RECORD_IV_LEN, .fixed_iv_size = S2N_TLS13_FIXED_IV_LEN, .tag_size = S2N_TLS_GCM_TAG_LEN, .decrypt = s2n_aead_cipher_aes_gcm_decrypt, .encrypt = s2n_aead_cipher_aes_gcm_encrypt }, .is_available = s2n_aead_cipher_aes256_gcm_available, .init = s2n_aead_cipher_aes_gcm_init, .set_encryption_key = s2n_aead_cipher_aes256_gcm_set_encryption_key_tls13, .set_decryption_key = s2n_aead_cipher_aes256_gcm_set_decryption_key_tls13, .destroy_key = s2n_aead_cipher_aes_gcm_destroy_key, .set_ktls_info = s2n_tls13_aead_cipher_aes256_gcm_set_ktls_info, }; aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_aead_cipher_chacha20_poly1305.c000066400000000000000000000262751456575232400264350ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include #include "crypto/s2n_cipher.h" #include "crypto/s2n_openssl.h" #include "tls/s2n_crypto.h" #include "utils/s2n_blob.h" #include "utils/s2n_safety.h" /* We support two different backing implementations of ChaCha20-Poly1305: one * implementation for OpenSSL (>= 1.1.0, see * https://www.openssl.org/news/cl110.txt) and one implementation for BoringSSL * and AWS-LC. LibreSSL supports ChaCha20-Poly1305, but the interface is * different. * Note, the order in the if/elif below matters because both BoringSSL and * AWS-LC define OPENSSL_VERSION_NUMBER. */ #if defined(OPENSSL_IS_BORINGSSL) || defined(OPENSSL_IS_AWSLC) #define S2N_CHACHA20_POLY1305_AVAILABLE_BSSL_AWSLC #elif (S2N_OPENSSL_VERSION_AT_LEAST(1, 1, 0) && !defined(LIBRESSL_VERSION_NUMBER)) #define S2N_CHACHA20_POLY1305_AVAILABLE_OSSL #endif static uint8_t s2n_aead_chacha20_poly1305_available(void) { #if defined(S2N_CHACHA20_POLY1305_AVAILABLE_OSSL) || defined(S2N_CHACHA20_POLY1305_AVAILABLE_BSSL_AWSLC) return 1; #else return 0; #endif } #if defined(S2N_CHACHA20_POLY1305_AVAILABLE_OSSL) /* OpenSSL implementation */ static int s2n_aead_chacha20_poly1305_encrypt(struct s2n_session_key *key, struct s2n_blob *iv, struct s2n_blob *aad, struct s2n_blob *in, struct s2n_blob *out) { POSIX_ENSURE_GTE(in->size, S2N_TLS_CHACHA20_POLY1305_TAG_LEN); /* The size of the |in| blob includes the size of the data and the size of the ChaCha20-Poly1305 tag */ POSIX_ENSURE_GTE(out->size, in->size); POSIX_ENSURE_EQ(iv->size, S2N_TLS_CHACHA20_POLY1305_IV_LEN); /* Initialize the IV */ POSIX_GUARD_OSSL(EVP_EncryptInit_ex(key->evp_cipher_ctx, NULL, NULL, NULL, iv->data), S2N_ERR_KEY_INIT); /* Adjust input length and buffer pointer to account for the Tag length */ int in_len = in->size - S2N_TLS_CHACHA20_POLY1305_TAG_LEN; uint8_t *tag_data = out->data + out->size - S2N_TLS_CHACHA20_POLY1305_TAG_LEN; /* out_len is set by EVP_EncryptUpdate and checked post operation */ int out_len = 0; /* Specify the AAD */ POSIX_GUARD_OSSL(EVP_EncryptUpdate(key->evp_cipher_ctx, NULL, &out_len, aad->data, aad->size), S2N_ERR_ENCRYPT); /* Encrypt the data */ POSIX_GUARD_OSSL(EVP_EncryptUpdate(key->evp_cipher_ctx, out->data, &out_len, in->data, in_len), S2N_ERR_ENCRYPT); /* For OpenSSL 1.1.0 and 1.1.1, when using ChaCha20-Poly1305, *out_len is the number of bytes written by EVP_EncryptUpdate. Since the tag is not written during this call, we do not take S2N_TLS_CHACHA20_POLY1305_TAG_LEN into account */ S2N_ERROR_IF(in_len != out_len, S2N_ERR_ENCRYPT); /* Finalize */ POSIX_GUARD_OSSL(EVP_EncryptFinal_ex(key->evp_cipher_ctx, out->data, &out_len), S2N_ERR_ENCRYPT); /* Write the tag */ POSIX_GUARD_OSSL(EVP_CIPHER_CTX_ctrl(key->evp_cipher_ctx, EVP_CTRL_AEAD_GET_TAG, S2N_TLS_CHACHA20_POLY1305_TAG_LEN, tag_data), S2N_ERR_ENCRYPT); /* For OpenSSL 1.1.0 and 1.1.1, when using ChaCha20-Poly1305, EVP_EncryptFinal_ex does not write any bytes. So, we should expect *out_len = 0. */ S2N_ERROR_IF(0 != out_len, S2N_ERR_ENCRYPT); return 0; } static int s2n_aead_chacha20_poly1305_decrypt(struct s2n_session_key *key, struct s2n_blob *iv, struct s2n_blob *aad, struct s2n_blob *in, struct s2n_blob *out) { POSIX_ENSURE_GTE(in->size, S2N_TLS_CHACHA20_POLY1305_TAG_LEN); POSIX_ENSURE_GTE(out->size, in->size - S2N_TLS_CHACHA20_POLY1305_TAG_LEN); POSIX_ENSURE_EQ(iv->size, S2N_TLS_CHACHA20_POLY1305_IV_LEN); /* Initialize the IV */ POSIX_GUARD_OSSL(EVP_DecryptInit_ex(key->evp_cipher_ctx, NULL, NULL, NULL, iv->data), S2N_ERR_KEY_INIT); /* Adjust input length and buffer pointer to account for the Tag length */ int in_len = in->size - S2N_TLS_CHACHA20_POLY1305_TAG_LEN; uint8_t *tag_data = in->data + in->size - S2N_TLS_CHACHA20_POLY1305_TAG_LEN; /* Set the TAG */ POSIX_GUARD_OSSL(EVP_CIPHER_CTX_ctrl(key->evp_cipher_ctx, EVP_CTRL_GCM_SET_TAG, S2N_TLS_CHACHA20_POLY1305_TAG_LEN, tag_data), S2N_ERR_DECRYPT); /* out_len is set by EVP_DecryptUpdate. While we verify the content of out_len in * s2n_aead_chacha20_poly1305_encrypt, we refrain from this here. This is to avoid * doing any branching before the ciphertext is verified. */ int out_len = 0; /* Specify the AAD */ POSIX_GUARD_OSSL(EVP_DecryptUpdate(key->evp_cipher_ctx, NULL, &out_len, aad->data, aad->size), S2N_ERR_DECRYPT); int evp_decrypt_rc = 1; /* Decrypt the data, but don't short circuit tag verification. EVP_Decrypt* return 0 on failure, 1 for success. */ evp_decrypt_rc &= EVP_DecryptUpdate(key->evp_cipher_ctx, out->data, &out_len, in->data, in_len); /* Verify the tag */ evp_decrypt_rc &= EVP_DecryptFinal_ex(key->evp_cipher_ctx, out->data, &out_len); S2N_ERROR_IF(evp_decrypt_rc != 1, S2N_ERR_DECRYPT); return 0; } static int s2n_aead_chacha20_poly1305_set_encryption_key(struct s2n_session_key *key, struct s2n_blob *in) { POSIX_ENSURE_EQ(in->size, S2N_TLS_CHACHA20_POLY1305_KEY_LEN); POSIX_GUARD_OSSL(EVP_EncryptInit_ex(key->evp_cipher_ctx, EVP_chacha20_poly1305(), NULL, NULL, NULL), S2N_ERR_KEY_INIT); EVP_CIPHER_CTX_ctrl(key->evp_cipher_ctx, EVP_CTRL_AEAD_SET_IVLEN, S2N_TLS_CHACHA20_POLY1305_IV_LEN, NULL); POSIX_GUARD_OSSL(EVP_EncryptInit_ex(key->evp_cipher_ctx, NULL, NULL, in->data, NULL), S2N_ERR_KEY_INIT); return 0; } static int s2n_aead_chacha20_poly1305_set_decryption_key(struct s2n_session_key *key, struct s2n_blob *in) { POSIX_ENSURE_EQ(in->size, S2N_TLS_CHACHA20_POLY1305_KEY_LEN); POSIX_GUARD_OSSL(EVP_DecryptInit_ex(key->evp_cipher_ctx, EVP_chacha20_poly1305(), NULL, NULL, NULL), S2N_ERR_KEY_INIT); EVP_CIPHER_CTX_ctrl(key->evp_cipher_ctx, EVP_CTRL_AEAD_SET_IVLEN, S2N_TLS_CHACHA20_POLY1305_IV_LEN, NULL); POSIX_GUARD_OSSL(EVP_DecryptInit_ex(key->evp_cipher_ctx, NULL, NULL, in->data, NULL), S2N_ERR_KEY_INIT); return 0; } static int s2n_aead_chacha20_poly1305_init(struct s2n_session_key *key) { s2n_evp_ctx_init(key->evp_cipher_ctx); return 0; } static int s2n_aead_chacha20_poly1305_destroy_key(struct s2n_session_key *key) { EVP_CIPHER_CTX_cleanup(key->evp_cipher_ctx); return 0; } #elif defined(S2N_CHACHA20_POLY1305_AVAILABLE_BSSL_AWSLC) /* BoringSSL and AWS-LC implementation */ static int s2n_aead_chacha20_poly1305_encrypt(struct s2n_session_key *key, struct s2n_blob *iv, struct s2n_blob *aad, struct s2n_blob *in, struct s2n_blob *out) { POSIX_ENSURE_GTE(in->size, S2N_TLS_CHACHA20_POLY1305_TAG_LEN); /* The size of the |in| blob includes the size of the data and the size of the ChaCha20-Poly1305 tag */ POSIX_ENSURE_GTE(out->size, in->size); POSIX_ENSURE_EQ(iv->size, S2N_TLS_CHACHA20_POLY1305_IV_LEN); /* Adjust input length to account for the Tag length */ size_t in_len = in->size - S2N_TLS_CHACHA20_POLY1305_TAG_LEN; /* out_len is set by EVP_AEAD_CTX_seal and checked post operation */ size_t out_len = 0; POSIX_GUARD_OSSL(EVP_AEAD_CTX_seal(key->evp_aead_ctx, out->data, &out_len, out->size, iv->data, iv->size, in->data, in_len, aad->data, aad->size), S2N_ERR_ENCRYPT); S2N_ERROR_IF((in_len + S2N_TLS_CHACHA20_POLY1305_TAG_LEN) != out_len, S2N_ERR_ENCRYPT); return 0; } static int s2n_aead_chacha20_poly1305_decrypt(struct s2n_session_key *key, struct s2n_blob *iv, struct s2n_blob *aad, struct s2n_blob *in, struct s2n_blob *out) { POSIX_ENSURE_GTE(in->size, S2N_TLS_CHACHA20_POLY1305_TAG_LEN); POSIX_ENSURE_GTE(out->size, in->size - S2N_TLS_CHACHA20_POLY1305_TAG_LEN); POSIX_ENSURE_EQ(iv->size, S2N_TLS_CHACHA20_POLY1305_IV_LEN); /* out_len is set by EVP_AEAD_CTX_open and checked post operation */ size_t out_len = 0; POSIX_GUARD_OSSL(EVP_AEAD_CTX_open(key->evp_aead_ctx, out->data, &out_len, out->size, iv->data, iv->size, in->data, in->size, aad->data, aad->size), S2N_ERR_DECRYPT); S2N_ERROR_IF((in->size - S2N_TLS_CHACHA20_POLY1305_TAG_LEN) != out_len, S2N_ERR_ENCRYPT); return 0; } static int s2n_aead_chacha20_poly1305_set_encryption_key(struct s2n_session_key *key, struct s2n_blob *in) { POSIX_ENSURE_EQ(in->size, S2N_TLS_CHACHA20_POLY1305_KEY_LEN); POSIX_GUARD_OSSL(EVP_AEAD_CTX_init(key->evp_aead_ctx, EVP_aead_chacha20_poly1305(), in->data, in->size, S2N_TLS_CHACHA20_POLY1305_TAG_LEN, NULL), S2N_ERR_KEY_INIT); return 0; } static int s2n_aead_chacha20_poly1305_set_decryption_key(struct s2n_session_key *key, struct s2n_blob *in) { POSIX_ENSURE_EQ(in->size, S2N_TLS_CHACHA20_POLY1305_KEY_LEN); POSIX_GUARD_OSSL(EVP_AEAD_CTX_init(key->evp_aead_ctx, EVP_aead_chacha20_poly1305(), in->data, in->size, S2N_TLS_CHACHA20_POLY1305_TAG_LEN, NULL), S2N_ERR_KEY_INIT); return 0; } static int s2n_aead_chacha20_poly1305_init(struct s2n_session_key *key) { EVP_AEAD_CTX_zero(key->evp_aead_ctx); return 0; } static int s2n_aead_chacha20_poly1305_destroy_key(struct s2n_session_key *key) { EVP_AEAD_CTX_cleanup(key->evp_aead_ctx); return 0; } #else /* No ChaCha20-Poly1305 implementation exists for chosen cryptographic provider (E.g Openssl 1.0.x) */ static int s2n_aead_chacha20_poly1305_encrypt(struct s2n_session_key *key, struct s2n_blob *iv, struct s2n_blob *aad, struct s2n_blob *in, struct s2n_blob *out) { POSIX_BAIL(S2N_ERR_ENCRYPT); } static int s2n_aead_chacha20_poly1305_decrypt(struct s2n_session_key *key, struct s2n_blob *iv, struct s2n_blob *aad, struct s2n_blob *in, struct s2n_blob *out) { POSIX_BAIL(S2N_ERR_DECRYPT); } static int s2n_aead_chacha20_poly1305_set_encryption_key(struct s2n_session_key *key, struct s2n_blob *in) { POSIX_BAIL(S2N_ERR_KEY_INIT); } static int s2n_aead_chacha20_poly1305_set_decryption_key(struct s2n_session_key *key, struct s2n_blob *in) { POSIX_BAIL(S2N_ERR_KEY_INIT); } static int s2n_aead_chacha20_poly1305_init(struct s2n_session_key *key) { POSIX_BAIL(S2N_ERR_KEY_INIT); } static int s2n_aead_chacha20_poly1305_destroy_key(struct s2n_session_key *key) { POSIX_BAIL(S2N_ERR_KEY_DESTROY); } #endif const struct s2n_cipher s2n_chacha20_poly1305 = { .key_material_size = S2N_TLS_CHACHA20_POLY1305_KEY_LEN, .type = S2N_AEAD, .io.aead = { .record_iv_size = S2N_TLS_CHACHA20_POLY1305_EXPLICIT_IV_LEN, .fixed_iv_size = S2N_TLS_CHACHA20_POLY1305_FIXED_IV_LEN, .tag_size = S2N_TLS_CHACHA20_POLY1305_TAG_LEN, .decrypt = s2n_aead_chacha20_poly1305_decrypt, .encrypt = s2n_aead_chacha20_poly1305_encrypt }, .is_available = s2n_aead_chacha20_poly1305_available, .init = s2n_aead_chacha20_poly1305_init, .set_encryption_key = s2n_aead_chacha20_poly1305_set_encryption_key, .set_decryption_key = s2n_aead_chacha20_poly1305_set_decryption_key, .destroy_key = s2n_aead_chacha20_poly1305_destroy_key, }; aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_cbc_cipher_3des.c000066400000000000000000000066771456575232400242070ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include #include "crypto/s2n_cipher.h" #include "crypto/s2n_openssl.h" #include "error/s2n_errno.h" #include "utils/s2n_blob.h" #include "utils/s2n_safety.h" static uint8_t s2n_cbc_cipher_3des_available() { return (EVP_des_ede3_cbc() ? 1 : 0); } static int s2n_cbc_cipher_3des_encrypt(struct s2n_session_key *key, struct s2n_blob *iv, struct s2n_blob *in, struct s2n_blob *out) { POSIX_ENSURE_GTE(out->size, in->size); POSIX_GUARD_OSSL(EVP_EncryptInit_ex(key->evp_cipher_ctx, NULL, NULL, NULL, iv->data), S2N_ERR_KEY_INIT); /* len is set by EVP_EncryptUpdate and checked post operation */ int len = 0; POSIX_GUARD_OSSL(EVP_EncryptUpdate(key->evp_cipher_ctx, out->data, &len, in->data, in->size), S2N_ERR_ENCRYPT); POSIX_ENSURE((int64_t) len == (int64_t) in->size, S2N_ERR_ENCRYPT); return 0; } static int s2n_cbc_cipher_3des_decrypt(struct s2n_session_key *key, struct s2n_blob *iv, struct s2n_blob *in, struct s2n_blob *out) { POSIX_ENSURE_GTE(out->size, in->size); POSIX_GUARD_OSSL(EVP_DecryptInit_ex(key->evp_cipher_ctx, NULL, NULL, NULL, iv->data), S2N_ERR_KEY_INIT); /* len is set by EVP_DecryptUpdate. It is not checked here but padding is manually removed and therefore * the decryption operation is validated. */ int len = 0; POSIX_GUARD_OSSL(EVP_DecryptUpdate(key->evp_cipher_ctx, out->data, &len, in->data, in->size), S2N_ERR_DECRYPT); return 0; } static int s2n_cbc_cipher_3des_set_decryption_key(struct s2n_session_key *key, struct s2n_blob *in) { POSIX_ENSURE_EQ(in->size, 192 / 8); EVP_CIPHER_CTX_set_padding(key->evp_cipher_ctx, 0); POSIX_GUARD_OSSL(EVP_DecryptInit_ex(key->evp_cipher_ctx, EVP_des_ede3_cbc(), NULL, in->data, NULL), S2N_ERR_KEY_INIT); return 0; } static int s2n_cbc_cipher_3des_set_encryption_key(struct s2n_session_key *key, struct s2n_blob *in) { POSIX_ENSURE_EQ(in->size, 192 / 8); EVP_CIPHER_CTX_set_padding(key->evp_cipher_ctx, 0); POSIX_GUARD_OSSL(EVP_EncryptInit_ex(key->evp_cipher_ctx, EVP_des_ede3_cbc(), NULL, in->data, NULL), S2N_ERR_KEY_INIT); return 0; } static int s2n_cbc_cipher_3des_init(struct s2n_session_key *key) { s2n_evp_ctx_init(key->evp_cipher_ctx); return 0; } static int s2n_cbc_cipher_3des_destroy_key(struct s2n_session_key *key) { EVP_CIPHER_CTX_cleanup(key->evp_cipher_ctx); return 0; } const struct s2n_cipher s2n_3des = { .key_material_size = 24, .type = S2N_CBC, .io.cbc = { .block_size = 8, .record_iv_size = 8, .decrypt = s2n_cbc_cipher_3des_decrypt, .encrypt = s2n_cbc_cipher_3des_encrypt }, .is_available = s2n_cbc_cipher_3des_available, .init = s2n_cbc_cipher_3des_init, .set_decryption_key = s2n_cbc_cipher_3des_set_decryption_key, .set_encryption_key = s2n_cbc_cipher_3des_set_encryption_key, .destroy_key = s2n_cbc_cipher_3des_destroy_key, }; aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_cbc_cipher_aes.c000066400000000000000000000113741456575232400241070ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include #include "crypto/s2n_cipher.h" #include "crypto/s2n_openssl.h" #include "error/s2n_errno.h" #include "utils/s2n_blob.h" #include "utils/s2n_safety.h" static uint8_t s2n_cbc_cipher_aes128_available() { return (EVP_aes_128_cbc() ? 1 : 0); } static uint8_t s2n_cbc_cipher_aes256_available() { return (EVP_aes_256_cbc() ? 1 : 0); } static int s2n_cbc_cipher_aes_encrypt(struct s2n_session_key *key, struct s2n_blob *iv, struct s2n_blob *in, struct s2n_blob *out) { POSIX_ENSURE_GTE(out->size, in->size); POSIX_GUARD_OSSL(EVP_EncryptInit_ex(key->evp_cipher_ctx, NULL, NULL, NULL, iv->data), S2N_ERR_KEY_INIT); /* len is set by EVP_EncryptUpdate and checked post operation */ int len = 0; POSIX_GUARD_OSSL(EVP_EncryptUpdate(key->evp_cipher_ctx, out->data, &len, in->data, in->size), S2N_ERR_ENCRYPT); POSIX_ENSURE((int64_t) len == (int64_t) in->size, S2N_ERR_ENCRYPT); return 0; } int s2n_cbc_cipher_aes_decrypt(struct s2n_session_key *key, struct s2n_blob *iv, struct s2n_blob *in, struct s2n_blob *out) { POSIX_ENSURE_GTE(out->size, in->size); POSIX_GUARD_OSSL(EVP_DecryptInit_ex(key->evp_cipher_ctx, NULL, NULL, NULL, iv->data), S2N_ERR_KEY_INIT); /* len is set by EVP_DecryptUpdate. It is not checked here but padding is manually removed and therefore * the decryption operation is validated. */ int len = 0; POSIX_GUARD_OSSL(EVP_DecryptUpdate(key->evp_cipher_ctx, out->data, &len, in->data, in->size), S2N_ERR_DECRYPT); return 0; } int s2n_cbc_cipher_aes128_set_decryption_key(struct s2n_session_key *key, struct s2n_blob *in) { POSIX_ENSURE_EQ(in->size, 128 / 8); /* Always returns 1 */ EVP_CIPHER_CTX_set_padding(key->evp_cipher_ctx, 0); POSIX_GUARD_OSSL(EVP_DecryptInit_ex(key->evp_cipher_ctx, EVP_aes_128_cbc(), NULL, in->data, NULL), S2N_ERR_KEY_INIT); return 0; } static int s2n_cbc_cipher_aes128_set_encryption_key(struct s2n_session_key *key, struct s2n_blob *in) { POSIX_ENSURE_EQ(in->size, 128 / 8); EVP_CIPHER_CTX_set_padding(key->evp_cipher_ctx, 0); POSIX_GUARD_OSSL(EVP_EncryptInit_ex(key->evp_cipher_ctx, EVP_aes_128_cbc(), NULL, in->data, NULL), S2N_ERR_KEY_INIT); return 0; } static int s2n_cbc_cipher_aes256_set_decryption_key(struct s2n_session_key *key, struct s2n_blob *in) { POSIX_ENSURE_EQ(in->size, 256 / 8); EVP_CIPHER_CTX_set_padding(key->evp_cipher_ctx, 0); POSIX_GUARD_OSSL(EVP_DecryptInit_ex(key->evp_cipher_ctx, EVP_aes_256_cbc(), NULL, in->data, NULL), S2N_ERR_KEY_INIT); return 0; } int s2n_cbc_cipher_aes256_set_encryption_key(struct s2n_session_key *key, struct s2n_blob *in) { POSIX_ENSURE_EQ(in->size, 256 / 8); EVP_CIPHER_CTX_set_padding(key->evp_cipher_ctx, 0); POSIX_GUARD_OSSL(EVP_EncryptInit_ex(key->evp_cipher_ctx, EVP_aes_256_cbc(), NULL, in->data, NULL), S2N_ERR_KEY_INIT); return 0; } static int s2n_cbc_cipher_aes_init(struct s2n_session_key *key) { s2n_evp_ctx_init(key->evp_cipher_ctx); return 0; } static int s2n_cbc_cipher_aes_destroy_key(struct s2n_session_key *key) { EVP_CIPHER_CTX_cleanup(key->evp_cipher_ctx); return 0; } const struct s2n_cipher s2n_aes128 = { .key_material_size = 16, .type = S2N_CBC, .io.cbc = { .block_size = 16, .record_iv_size = 16, .decrypt = s2n_cbc_cipher_aes_decrypt, .encrypt = s2n_cbc_cipher_aes_encrypt }, .is_available = s2n_cbc_cipher_aes128_available, .init = s2n_cbc_cipher_aes_init, .set_decryption_key = s2n_cbc_cipher_aes128_set_decryption_key, .set_encryption_key = s2n_cbc_cipher_aes128_set_encryption_key, .destroy_key = s2n_cbc_cipher_aes_destroy_key, }; const struct s2n_cipher s2n_aes256 = { .key_material_size = 32, .type = S2N_CBC, .io.cbc = { .block_size = 16, .record_iv_size = 16, .decrypt = s2n_cbc_cipher_aes_decrypt, .encrypt = s2n_cbc_cipher_aes_encrypt }, .is_available = s2n_cbc_cipher_aes256_available, .init = s2n_cbc_cipher_aes_init, .set_decryption_key = s2n_cbc_cipher_aes256_set_decryption_key, .set_encryption_key = s2n_cbc_cipher_aes256_set_encryption_key, .destroy_key = s2n_cbc_cipher_aes_destroy_key, }; aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_certificate.c000066400000000000000000001037331456575232400234610ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #ifndef _GNU_SOURCE #define _GNU_SOURCE #endif #include "crypto/s2n_certificate.h" #include #include #include #include #include "api/s2n.h" #include "crypto/s2n_openssl_x509.h" #include "tls/extensions/s2n_extension_list.h" #include "tls/s2n_connection.h" #include "utils/s2n_array.h" #include "utils/s2n_mem.h" #include "utils/s2n_safety.h" int s2n_cert_set_cert_type(struct s2n_cert *cert, s2n_pkey_type pkey_type) { POSIX_ENSURE_REF(cert); cert->pkey_type = pkey_type; POSIX_GUARD_RESULT(s2n_pkey_setup_for_type(&cert->public_key, pkey_type)); return 0; } int s2n_create_cert_chain_from_stuffer(struct s2n_cert_chain *cert_chain_out, struct s2n_stuffer *chain_in_stuffer) { DEFER_CLEANUP(struct s2n_stuffer cert_out_stuffer = { 0 }, s2n_stuffer_free); POSIX_GUARD(s2n_stuffer_growable_alloc(&cert_out_stuffer, 2048)); struct s2n_cert **insert = &cert_chain_out->head; uint32_t chain_size = 0; do { struct s2n_cert *new_node = NULL; if (s2n_stuffer_certificate_from_pem(chain_in_stuffer, &cert_out_stuffer) < 0) { if (chain_size == 0) { POSIX_BAIL(S2N_ERR_NO_CERTIFICATE_IN_PEM); } break; } struct s2n_blob mem = { 0 }; POSIX_GUARD(s2n_alloc(&mem, sizeof(struct s2n_cert))); POSIX_GUARD(s2n_blob_zero(&mem)); new_node = (struct s2n_cert *) (void *) mem.data; if (s2n_alloc(&new_node->raw, s2n_stuffer_data_available(&cert_out_stuffer)) != S2N_SUCCESS) { POSIX_GUARD(s2n_free(&mem)); S2N_ERROR_PRESERVE_ERRNO(); } if (s2n_stuffer_read(&cert_out_stuffer, &new_node->raw) != S2N_SUCCESS) { POSIX_GUARD(s2n_free(&mem)); S2N_ERROR_PRESERVE_ERRNO(); } /* Additional 3 bytes for the length field in the protocol */ chain_size += new_node->raw.size + 3; new_node->next = NULL; *insert = new_node; insert = &new_node->next; } while (s2n_stuffer_data_available(chain_in_stuffer)); /* Leftover data at this point means one of two things: * A bug in s2n's PEM parsing OR a malformed PEM in the user's chain. * Be conservative and fail instead of using a partial chain. */ S2N_ERROR_IF(s2n_stuffer_data_available(chain_in_stuffer) > 0, S2N_ERR_INVALID_PEM); cert_chain_out->chain_size = chain_size; return 0; } int s2n_cert_chain_and_key_set_cert_chain_from_stuffer(struct s2n_cert_chain_and_key *cert_and_key, struct s2n_stuffer *chain_in_stuffer) { return s2n_create_cert_chain_from_stuffer(cert_and_key->cert_chain, chain_in_stuffer); } int s2n_cert_chain_and_key_set_cert_chain_bytes(struct s2n_cert_chain_and_key *cert_and_key, uint8_t *cert_chain_pem, uint32_t cert_chain_len) { DEFER_CLEANUP(struct s2n_stuffer chain_in_stuffer = { 0 }, s2n_stuffer_free); POSIX_GUARD(s2n_stuffer_init_ro_from_string(&chain_in_stuffer, cert_chain_pem, cert_chain_len)); POSIX_GUARD(s2n_cert_chain_and_key_set_cert_chain_from_stuffer(cert_and_key, &chain_in_stuffer)); return S2N_SUCCESS; } int s2n_cert_chain_and_key_set_cert_chain(struct s2n_cert_chain_and_key *cert_and_key, const char *cert_chain_pem) { DEFER_CLEANUP(struct s2n_stuffer chain_in_stuffer = { 0 }, s2n_stuffer_free); /* Turn the chain into a stuffer */ POSIX_GUARD(s2n_stuffer_alloc_ro_from_string(&chain_in_stuffer, cert_chain_pem)); POSIX_GUARD(s2n_cert_chain_and_key_set_cert_chain_from_stuffer(cert_and_key, &chain_in_stuffer)); return S2N_SUCCESS; } int s2n_cert_chain_and_key_set_private_key_from_stuffer(struct s2n_cert_chain_and_key *cert_and_key, struct s2n_stuffer *key_in_stuffer, struct s2n_stuffer *key_out_stuffer) { struct s2n_blob key_blob = { 0 }; POSIX_GUARD(s2n_pkey_zero_init(cert_and_key->private_key)); /* Convert pem to asn1 and asn1 to the private key. Handles both PKCS#1 and PKCS#8 formats */ int type = 0; POSIX_GUARD(s2n_stuffer_private_key_from_pem(key_in_stuffer, key_out_stuffer, &type)); key_blob.size = s2n_stuffer_data_available(key_out_stuffer); key_blob.data = s2n_stuffer_raw_read(key_out_stuffer, key_blob.size); POSIX_ENSURE_REF(key_blob.data); POSIX_GUARD_RESULT(s2n_asn1der_to_private_key(cert_and_key->private_key, &key_blob, type)); return S2N_SUCCESS; } int s2n_cert_chain_and_key_set_private_key_bytes(struct s2n_cert_chain_and_key *cert_and_key, uint8_t *private_key_pem, uint32_t private_key_len) { DEFER_CLEANUP(struct s2n_stuffer key_in_stuffer = { 0 }, s2n_stuffer_free); DEFER_CLEANUP(struct s2n_stuffer key_out_stuffer = { 0 }, s2n_stuffer_free); /* Put the private key pem in a stuffer */ POSIX_GUARD(s2n_stuffer_init_ro_from_string(&key_in_stuffer, private_key_pem, private_key_len)); POSIX_GUARD(s2n_stuffer_growable_alloc(&key_out_stuffer, private_key_len)); POSIX_GUARD(s2n_cert_chain_and_key_set_private_key_from_stuffer(cert_and_key, &key_in_stuffer, &key_out_stuffer)); return S2N_SUCCESS; } int s2n_cert_chain_and_key_set_private_key(struct s2n_cert_chain_and_key *cert_and_key, const char *private_key_pem) { POSIX_ENSURE_REF(private_key_pem); DEFER_CLEANUP(struct s2n_stuffer key_in_stuffer = { 0 }, s2n_stuffer_free); DEFER_CLEANUP(struct s2n_stuffer key_out_stuffer = { 0 }, s2n_stuffer_free); /* Put the private key pem in a stuffer */ POSIX_GUARD(s2n_stuffer_alloc_ro_from_string(&key_in_stuffer, private_key_pem)); POSIX_GUARD(s2n_stuffer_growable_alloc(&key_out_stuffer, strlen(private_key_pem))); POSIX_GUARD(s2n_cert_chain_and_key_set_private_key_from_stuffer(cert_and_key, &key_in_stuffer, &key_out_stuffer)); return S2N_SUCCESS; } int s2n_cert_chain_and_key_set_ocsp_data(struct s2n_cert_chain_and_key *chain_and_key, const uint8_t *data, uint32_t length) { POSIX_ENSURE_REF(chain_and_key); POSIX_GUARD(s2n_free(&chain_and_key->ocsp_status)); if (data && length) { POSIX_GUARD(s2n_alloc(&chain_and_key->ocsp_status, length)); POSIX_CHECKED_MEMCPY(chain_and_key->ocsp_status.data, data, length); } return 0; } int s2n_cert_chain_and_key_set_sct_list(struct s2n_cert_chain_and_key *chain_and_key, const uint8_t *data, uint32_t length) { POSIX_ENSURE_REF(chain_and_key); POSIX_GUARD(s2n_free(&chain_and_key->sct_list)); if (data && length) { POSIX_GUARD(s2n_alloc(&chain_and_key->sct_list, length)); POSIX_CHECKED_MEMCPY(chain_and_key->sct_list.data, data, length); } return 0; } struct s2n_cert_chain_and_key *s2n_cert_chain_and_key_new(void) { DEFER_CLEANUP(struct s2n_blob chain_and_key_mem = { 0 }, s2n_free); PTR_GUARD_POSIX(s2n_alloc(&chain_and_key_mem, sizeof(struct s2n_cert_chain_and_key))); PTR_GUARD_POSIX(s2n_blob_zero(&chain_and_key_mem)); DEFER_CLEANUP(struct s2n_blob cert_chain_mem = { 0 }, s2n_free); PTR_GUARD_POSIX(s2n_alloc(&cert_chain_mem, sizeof(struct s2n_cert_chain))); PTR_GUARD_POSIX(s2n_blob_zero(&cert_chain_mem)); DEFER_CLEANUP(struct s2n_blob pkey_mem = { 0 }, s2n_free); PTR_GUARD_POSIX(s2n_alloc(&pkey_mem, sizeof(s2n_cert_private_key))); PTR_GUARD_POSIX(s2n_blob_zero(&pkey_mem)); DEFER_CLEANUP(struct s2n_array *cn_names = NULL, s2n_array_free_p); cn_names = s2n_array_new(sizeof(struct s2n_blob)); PTR_ENSURE_REF(cn_names); DEFER_CLEANUP(struct s2n_array *san_names = NULL, s2n_array_free_p); san_names = s2n_array_new(sizeof(struct s2n_blob)); PTR_ENSURE_REF(san_names); struct s2n_cert_chain_and_key *chain_and_key = (struct s2n_cert_chain_and_key *) (void *) chain_and_key_mem.data; chain_and_key->cert_chain = (struct s2n_cert_chain *) (void *) cert_chain_mem.data; chain_and_key->private_key = (s2n_cert_private_key *) (void *) pkey_mem.data; chain_and_key->cn_names = cn_names; chain_and_key->san_names = san_names; ZERO_TO_DISABLE_DEFER_CLEANUP(chain_and_key_mem); ZERO_TO_DISABLE_DEFER_CLEANUP(cert_chain_mem); ZERO_TO_DISABLE_DEFER_CLEANUP(pkey_mem); ZERO_TO_DISABLE_DEFER_CLEANUP(cn_names); ZERO_TO_DISABLE_DEFER_CLEANUP(san_names); return chain_and_key; } DEFINE_POINTER_CLEANUP_FUNC(GENERAL_NAMES *, GENERAL_NAMES_free); int s2n_cert_chain_and_key_load_sans(struct s2n_cert_chain_and_key *chain_and_key, X509 *x509_cert) { POSIX_ENSURE_REF(chain_and_key->san_names); POSIX_ENSURE_REF(x509_cert); DEFER_CLEANUP(GENERAL_NAMES *san_names = X509_get_ext_d2i(x509_cert, NID_subject_alt_name, NULL, NULL), GENERAL_NAMES_free_pointer); if (san_names == NULL) { /* No SAN extension */ return 0; } const int num_san_names = sk_GENERAL_NAME_num(san_names); for (int i = 0; i < num_san_names; i++) { GENERAL_NAME *san_name = sk_GENERAL_NAME_value(san_names, i); if (!san_name) { continue; } if (san_name->type == GEN_DNS) { /* Decoding isn't necessary here since a DNS SAN name is ASCII(type V_ASN1_IA5STRING) */ unsigned char *san_str = san_name->d.dNSName->data; const size_t san_str_len = san_name->d.dNSName->length; struct s2n_blob *san_blob = NULL; POSIX_GUARD_RESULT(s2n_array_pushback(chain_and_key->san_names, (void **) &san_blob)); if (!san_blob) { POSIX_BAIL(S2N_ERR_NULL_SANS); } if (s2n_alloc(san_blob, san_str_len)) { S2N_ERROR_PRESERVE_ERRNO(); } POSIX_CHECKED_MEMCPY(san_blob->data, san_str, san_str_len); san_blob->size = san_str_len; /* normalize san_blob to lowercase */ POSIX_GUARD(s2n_blob_char_to_lower(san_blob)); } } return 0; } /* Parse CN names from the Subject of the leaf certificate. Technically there can by multiple CNs * in the Subject but practically very few certificates in the wild will have more than one CN. * Since the data for this certificate is coming from the application and not from an untrusted * source, we will try our best to parse all of the CNs. * * A recent CAB thread proposed removing support for multiple CNs: * https://cabforum.org/pipermail/public/2016-April/007242.html */ DEFINE_POINTER_CLEANUP_FUNC(unsigned char *, OPENSSL_free); int s2n_cert_chain_and_key_load_cns(struct s2n_cert_chain_and_key *chain_and_key, X509 *x509_cert) { POSIX_ENSURE_REF(chain_and_key->cn_names); POSIX_ENSURE_REF(x509_cert); X509_NAME *subject = X509_get_subject_name(x509_cert); if (!subject) { return 0; } int lastpos = -1; while ((lastpos = X509_NAME_get_index_by_NID(subject, NID_commonName, lastpos)) >= 0) { X509_NAME_ENTRY *name_entry = X509_NAME_get_entry(subject, lastpos); if (!name_entry) { continue; } ASN1_STRING *asn1_str = X509_NAME_ENTRY_get_data(name_entry); if (!asn1_str) { continue; } /* We need to try and decode the CN since it may be encoded as unicode with a * direct ASCII equivalent. Any non ASCII bytes in the string will fail later when we * actually compare hostnames. * * `ASN1_STRING_to_UTF8` allocates in both the success case and in the zero return case, but * not in the failure case (negative return value). Therefore, we use `ZERO_TO_DISABLE_DEFER_CLEANUP` * in the failure case to prevent double-freeing `utf8_str`. For the zero and success cases, `utf8_str` * will be freed by the `DEFER_CLEANUP`. */ DEFER_CLEANUP(unsigned char *utf8_str, OPENSSL_free_pointer); const int utf8_out_len = ASN1_STRING_to_UTF8(&utf8_str, asn1_str); if (utf8_out_len < 0) { /* On failure, ASN1_STRING_to_UTF8 does not allocate any memory */ ZERO_TO_DISABLE_DEFER_CLEANUP(utf8_str); continue; } else if (utf8_out_len == 0) { /* We still need to free memory for this case, so let the DEFER_CLEANUP free it * see https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2017-7521 and * https://security.archlinux.org/CVE-2017-7521 */ } else { struct s2n_blob *cn_name = NULL; POSIX_GUARD_RESULT(s2n_array_pushback(chain_and_key->cn_names, (void **) &cn_name)); if (cn_name == NULL) { POSIX_BAIL(S2N_ERR_NULL_CN_NAME); } if (s2n_alloc(cn_name, utf8_out_len) < 0) { S2N_ERROR_PRESERVE_ERRNO(); } POSIX_CHECKED_MEMCPY(cn_name->data, utf8_str, utf8_out_len); cn_name->size = utf8_out_len; /* normalize cn_name to lowercase */ POSIX_GUARD(s2n_blob_char_to_lower(cn_name)); } } return 0; } static int s2n_cert_chain_and_key_set_names(struct s2n_cert_chain_and_key *chain_and_key, X509 *cert) { POSIX_GUARD(s2n_cert_chain_and_key_load_sans(chain_and_key, cert)); /* For current use cases, we *could* avoid populating the common names if any sans were loaded in * s2n_cert_chain_and_key_load_sans. Let's unconditionally populate this field to avoid surprises * in the future. */ POSIX_GUARD(s2n_cert_chain_and_key_load_cns(chain_and_key, cert)); return 0; } int s2n_cert_chain_and_key_load(struct s2n_cert_chain_and_key *chain_and_key) { POSIX_ENSURE_REF(chain_and_key); POSIX_ENSURE_REF(chain_and_key->cert_chain); POSIX_ENSURE_REF(chain_and_key->cert_chain->head); POSIX_ENSURE_REF(chain_and_key->private_key); struct s2n_cert *head = chain_and_key->cert_chain->head; DEFER_CLEANUP(X509 *leaf_cert = NULL, X509_free_pointer); POSIX_GUARD_RESULT(s2n_openssl_x509_parse(&head->raw, &leaf_cert)); /* Parse the leaf cert for the public key and certificate type */ DEFER_CLEANUP(struct s2n_pkey public_key = { 0 }, s2n_pkey_free); s2n_pkey_type pkey_type = S2N_PKEY_TYPE_UNKNOWN; POSIX_GUARD_RESULT(s2n_pkey_from_x509(leaf_cert, &public_key, &pkey_type)); POSIX_ENSURE(pkey_type != S2N_PKEY_TYPE_UNKNOWN, S2N_ERR_CERT_TYPE_UNSUPPORTED); POSIX_GUARD(s2n_cert_set_cert_type(head, pkey_type)); /* Validate the leaf cert's public key matches the provided private key */ if (s2n_pkey_check_key_exists(chain_and_key->private_key) == S2N_SUCCESS) { POSIX_GUARD(s2n_pkey_match(&public_key, chain_and_key->private_key)); } /* Populate name information from the SAN/CN for the leaf certificate */ POSIX_GUARD(s2n_cert_chain_and_key_set_names(chain_and_key, leaf_cert)); /* Populate ec curve libcrypto nid */ if (pkey_type == S2N_PKEY_TYPE_ECDSA) { int nid = EC_GROUP_get_curve_name(EC_KEY_get0_group(public_key.key.ecdsa_key.ec_key)); POSIX_ENSURE(nid > 0, S2N_ERR_CERT_TYPE_UNSUPPORTED); POSIX_ENSURE(nid < UINT16_MAX, S2N_ERR_CERT_TYPE_UNSUPPORTED); head->ec_curve_nid = nid; } return S2N_SUCCESS; } int s2n_cert_chain_and_key_load_pem(struct s2n_cert_chain_and_key *chain_and_key, const char *chain_pem, const char *private_key_pem) { POSIX_ENSURE_REF(chain_and_key); POSIX_GUARD(s2n_cert_chain_and_key_set_cert_chain(chain_and_key, chain_pem)); POSIX_GUARD(s2n_cert_chain_and_key_set_private_key(chain_and_key, private_key_pem)); POSIX_GUARD(s2n_cert_chain_and_key_load(chain_and_key)); return S2N_SUCCESS; } int s2n_cert_chain_and_key_load_public_pem_bytes(struct s2n_cert_chain_and_key *chain_and_key, uint8_t *chain_pem, uint32_t chain_pem_len) { POSIX_GUARD(s2n_cert_chain_and_key_set_cert_chain_bytes(chain_and_key, chain_pem, chain_pem_len)); POSIX_GUARD(s2n_cert_chain_and_key_load(chain_and_key)); return S2N_SUCCESS; } int s2n_cert_chain_and_key_load_pem_bytes(struct s2n_cert_chain_and_key *chain_and_key, uint8_t *chain_pem, uint32_t chain_pem_len, uint8_t *private_key_pem, uint32_t private_key_pem_len) { POSIX_ENSURE_REF(chain_and_key); POSIX_GUARD(s2n_cert_chain_and_key_set_cert_chain_bytes(chain_and_key, chain_pem, chain_pem_len)); POSIX_GUARD(s2n_cert_chain_and_key_set_private_key_bytes(chain_and_key, private_key_pem, private_key_pem_len)); POSIX_GUARD(s2n_cert_chain_and_key_load(chain_and_key)); return S2N_SUCCESS; } S2N_CLEANUP_RESULT s2n_cert_chain_and_key_ptr_free(struct s2n_cert_chain_and_key **cert_and_key) { RESULT_ENSURE_REF(cert_and_key); RESULT_GUARD_POSIX(s2n_cert_chain_and_key_free(*cert_and_key)); *cert_and_key = NULL; return S2N_RESULT_OK; } int s2n_cert_chain_and_key_free(struct s2n_cert_chain_and_key *cert_and_key) { if (cert_and_key == NULL) { return 0; } /* Walk the chain and free the certs */ if (cert_and_key->cert_chain) { struct s2n_cert *node = cert_and_key->cert_chain->head; while (node) { /* Free the cert */ POSIX_GUARD(s2n_free(&node->raw)); /* update head so it won't point to freed memory */ cert_and_key->cert_chain->head = node->next; /* Free the node */ POSIX_GUARD(s2n_free_object((uint8_t **) &node, sizeof(struct s2n_cert))); node = cert_and_key->cert_chain->head; } POSIX_GUARD(s2n_free_object((uint8_t **) &cert_and_key->cert_chain, sizeof(struct s2n_cert_chain))); } if (cert_and_key->private_key) { POSIX_GUARD(s2n_pkey_free(cert_and_key->private_key)); POSIX_GUARD(s2n_free_object((uint8_t **) &cert_and_key->private_key, sizeof(s2n_cert_private_key))); } uint32_t len = 0; if (cert_and_key->san_names) { POSIX_GUARD_RESULT(s2n_array_num_elements(cert_and_key->san_names, &len)); for (uint32_t i = 0; i < len; i++) { struct s2n_blob *san_name = NULL; POSIX_GUARD_RESULT(s2n_array_get(cert_and_key->san_names, i, (void **) &san_name)); POSIX_GUARD(s2n_free(san_name)); } POSIX_GUARD_RESULT(s2n_array_free(cert_and_key->san_names)); cert_and_key->san_names = NULL; } if (cert_and_key->cn_names) { POSIX_GUARD_RESULT(s2n_array_num_elements(cert_and_key->cn_names, &len)); for (uint32_t i = 0; i < len; i++) { struct s2n_blob *cn_name = NULL; POSIX_GUARD_RESULT(s2n_array_get(cert_and_key->cn_names, i, (void **) &cn_name)); POSIX_GUARD(s2n_free(cn_name)); } POSIX_GUARD_RESULT(s2n_array_free(cert_and_key->cn_names)); cert_and_key->cn_names = NULL; } POSIX_GUARD(s2n_free(&cert_and_key->ocsp_status)); POSIX_GUARD(s2n_free(&cert_and_key->sct_list)); POSIX_GUARD(s2n_free_object((uint8_t **) &cert_and_key, sizeof(struct s2n_cert_chain_and_key))); return 0; } int s2n_cert_chain_free(struct s2n_cert_chain *cert_chain) { /* Walk the chain and free the certs/nodes allocated prior to failure */ if (cert_chain) { struct s2n_cert *node = cert_chain->head; while (node) { /* Free the cert */ POSIX_GUARD(s2n_free(&node->raw)); /* update head so it won't point to freed memory */ cert_chain->head = node->next; /* Free the node */ POSIX_GUARD(s2n_free_object((uint8_t **) &node, sizeof(struct s2n_cert))); node = cert_chain->head; } } return S2N_SUCCESS; } int s2n_send_cert_chain(struct s2n_connection *conn, struct s2n_stuffer *out, struct s2n_cert_chain_and_key *chain_and_key) { POSIX_ENSURE_REF(conn); POSIX_ENSURE_REF(out); POSIX_ENSURE_REF(chain_and_key); struct s2n_cert_chain *chain = chain_and_key->cert_chain; POSIX_ENSURE_REF(chain); struct s2n_cert *cur_cert = chain->head; POSIX_ENSURE_REF(cur_cert); struct s2n_stuffer_reservation cert_chain_size = { 0 }; POSIX_GUARD(s2n_stuffer_reserve_uint24(out, &cert_chain_size)); /* Send certs and extensions (in TLS 1.3) */ bool first_entry = true; while (cur_cert) { POSIX_ENSURE_REF(cur_cert); POSIX_GUARD(s2n_stuffer_write_uint24(out, cur_cert->raw.size)); POSIX_GUARD(s2n_stuffer_write_bytes(out, cur_cert->raw.data, cur_cert->raw.size)); /* According to https://tools.ietf.org/html/rfc8446#section-4.4.2, * If an extension applies to the entire chain, it SHOULD be included in * the first CertificateEntry. * While the spec allow extensions to be included in other certificate * entries, only the first matter to use here */ if (conn->actual_protocol_version >= S2N_TLS13) { if (first_entry) { POSIX_GUARD(s2n_extension_list_send(S2N_EXTENSION_LIST_CERTIFICATE, conn, out)); first_entry = false; } else { POSIX_GUARD(s2n_extension_list_send(S2N_EXTENSION_LIST_EMPTY, conn, out)); } } cur_cert = cur_cert->next; } POSIX_GUARD(s2n_stuffer_write_vector_size(&cert_chain_size)); return 0; } int s2n_send_empty_cert_chain(struct s2n_stuffer *out) { POSIX_ENSURE_REF(out); POSIX_GUARD(s2n_stuffer_write_uint24(out, 0)); return 0; } static int s2n_does_cert_san_match_hostname(const struct s2n_cert_chain_and_key *chain_and_key, const struct s2n_blob *dns_name) { POSIX_ENSURE_REF(chain_and_key); POSIX_ENSURE_REF(dns_name); struct s2n_array *san_names = chain_and_key->san_names; uint32_t len = 0; POSIX_GUARD_RESULT(s2n_array_num_elements(san_names, &len)); for (uint32_t i = 0; i < len; i++) { struct s2n_blob *san_name = NULL; POSIX_GUARD_RESULT(s2n_array_get(san_names, i, (void **) &san_name)); POSIX_ENSURE_REF(san_name); if ((dns_name->size == san_name->size) && (strncasecmp((const char *) dns_name->data, (const char *) san_name->data, dns_name->size) == 0)) { return 1; } } return 0; } static int s2n_does_cert_cn_match_hostname(const struct s2n_cert_chain_and_key *chain_and_key, const struct s2n_blob *dns_name) { POSIX_ENSURE_REF(chain_and_key); POSIX_ENSURE_REF(dns_name); struct s2n_array *cn_names = chain_and_key->cn_names; uint32_t len = 0; POSIX_GUARD_RESULT(s2n_array_num_elements(cn_names, &len)); for (uint32_t i = 0; i < len; i++) { struct s2n_blob *cn_name = NULL; POSIX_GUARD_RESULT(s2n_array_get(cn_names, i, (void **) &cn_name)); POSIX_ENSURE_REF(cn_name); if ((dns_name->size == cn_name->size) && (strncasecmp((const char *) dns_name->data, (const char *) cn_name->data, dns_name->size) == 0)) { return 1; } } return 0; } int s2n_cert_chain_and_key_matches_dns_name(const struct s2n_cert_chain_and_key *chain_and_key, const struct s2n_blob *dns_name) { uint32_t len = 0; POSIX_GUARD_RESULT(s2n_array_num_elements(chain_and_key->san_names, &len)); if (len > 0) { if (s2n_does_cert_san_match_hostname(chain_and_key, dns_name)) { return 1; } } else { /* Per https://tools.ietf.org/html/rfc6125#section-6.4.4 we only will * consider the CN for matching if no valid DNS entries are provided * in a SAN. */ if (s2n_does_cert_cn_match_hostname(chain_and_key, dns_name)) { return 1; } } return 0; } int s2n_cert_chain_and_key_set_ctx(struct s2n_cert_chain_and_key *cert_and_key, void *ctx) { cert_and_key->context = ctx; return 0; } void *s2n_cert_chain_and_key_get_ctx(struct s2n_cert_chain_and_key *cert_and_key) { return cert_and_key->context; } s2n_pkey_type s2n_cert_chain_and_key_get_pkey_type(struct s2n_cert_chain_and_key *chain_and_key) { if (chain_and_key == NULL || chain_and_key->cert_chain == NULL || chain_and_key->cert_chain->head == NULL) { return S2N_PKEY_TYPE_UNKNOWN; } return chain_and_key->cert_chain->head->pkey_type; } s2n_cert_private_key *s2n_cert_chain_and_key_get_private_key(struct s2n_cert_chain_and_key *chain_and_key) { PTR_ENSURE_REF(chain_and_key); return chain_and_key->private_key; } int s2n_cert_chain_get_length(const struct s2n_cert_chain_and_key *chain_and_key, uint32_t *cert_length) { POSIX_ENSURE_REF(chain_and_key); POSIX_ENSURE_REF(cert_length); struct s2n_cert *head_cert = chain_and_key->cert_chain->head; POSIX_ENSURE_REF(head_cert); *cert_length = 1; struct s2n_cert *next_cert = head_cert->next; while (next_cert != NULL) { *cert_length += 1; next_cert = next_cert->next; } return S2N_SUCCESS; } int s2n_cert_chain_get_cert(const struct s2n_cert_chain_and_key *chain_and_key, struct s2n_cert **out_cert, const uint32_t cert_idx) { POSIX_ENSURE_REF(chain_and_key); POSIX_ENSURE_REF(out_cert); struct s2n_cert *cur_cert = chain_and_key->cert_chain->head; POSIX_ENSURE_REF(cur_cert); uint32_t counter = 0; struct s2n_cert *next_cert = cur_cert->next; while ((next_cert != NULL) && (counter < cert_idx)) { cur_cert = next_cert; next_cert = next_cert->next; counter++; } POSIX_ENSURE(counter == cert_idx, S2N_ERR_NO_CERT_FOUND); POSIX_ENSURE(cur_cert != NULL, S2N_ERR_NO_CERT_FOUND); *out_cert = cur_cert; return S2N_SUCCESS; } int s2n_cert_get_der(const struct s2n_cert *cert, const uint8_t **out_cert_der, uint32_t *cert_length) { POSIX_ENSURE_REF(cert); POSIX_ENSURE_REF(out_cert_der); POSIX_ENSURE_REF(cert_length); *cert_length = cert->raw.size; *out_cert_der = cert->raw.data; return S2N_SUCCESS; } static int s2n_asn1_obj_free(ASN1_OBJECT **data) { if (*data != NULL) { ASN1_OBJECT_free(*data); } return S2N_SUCCESS; } static int s2n_asn1_string_free(ASN1_STRING **data) { if (*data != NULL) { ASN1_STRING_free(*data); } return S2N_SUCCESS; } static int s2n_utf8_string_from_extension_data(const uint8_t *extension_data, uint32_t extension_len, uint8_t *out_data, uint32_t *out_len) { DEFER_CLEANUP(ASN1_STRING *asn1_str = NULL, s2n_asn1_string_free); /* Note that d2i_ASN1_UTF8STRING increments *der_in to the byte following the parsed data. * Using a temporary variable is mandatory to prevent memory free-ing errors. * Ref to the warning section here for more information: * https://www.openssl.org/docs/man1.1.0/man3/d2i_ASN1_UTF8STRING.html. */ const uint8_t *asn1_str_data = extension_data; asn1_str = d2i_ASN1_UTF8STRING(NULL, (const unsigned char **) (void *) &asn1_str_data, extension_len); POSIX_ENSURE(asn1_str != NULL, S2N_ERR_INVALID_X509_EXTENSION_TYPE); /* ASN1_STRING_type() returns the type of `asn1_str`, using standard constants such as V_ASN1_OCTET_STRING. * Ref: https://www.openssl.org/docs/man1.1.0/man3/ASN1_STRING_type.html. */ int type = ASN1_STRING_type(asn1_str); POSIX_ENSURE(type == V_ASN1_UTF8STRING, S2N_ERR_INVALID_X509_EXTENSION_TYPE); int len = ASN1_STRING_length(asn1_str); if (out_data != NULL) { POSIX_ENSURE((int64_t) *out_len >= (int64_t) len, S2N_ERR_INSUFFICIENT_MEM_SIZE); /* ASN1_STRING_data() returns an internal pointer to the data. * Since this is an internal pointer it should not be freed or modified in any way. * Ref: https://www.openssl.org/docs/man1.0.2/man3/ASN1_STRING_data.html. */ unsigned char *internal_data = ASN1_STRING_data(asn1_str); POSIX_ENSURE_REF(internal_data); POSIX_CHECKED_MEMCPY(out_data, internal_data, len); } *out_len = len; return S2N_SUCCESS; } int s2n_cert_get_utf8_string_from_extension_data_length(const uint8_t *extension_data, uint32_t extension_len, uint32_t *utf8_str_len) { POSIX_ENSURE_REF(extension_data); POSIX_ENSURE_GT(extension_len, 0); POSIX_ENSURE_REF(utf8_str_len); POSIX_GUARD(s2n_utf8_string_from_extension_data(extension_data, extension_len, NULL, utf8_str_len)); return S2N_SUCCESS; } int s2n_cert_get_utf8_string_from_extension_data(const uint8_t *extension_data, uint32_t extension_len, uint8_t *out_data, uint32_t *out_len) { POSIX_ENSURE_REF(extension_data); POSIX_ENSURE_GT(extension_len, 0); POSIX_ENSURE_REF(out_data); POSIX_ENSURE_REF(out_len); POSIX_GUARD(s2n_utf8_string_from_extension_data(extension_data, extension_len, out_data, out_len)); return S2N_SUCCESS; } static int s2n_parse_x509_extension(struct s2n_cert *cert, const uint8_t *oid, uint8_t *ext_value, uint32_t *ext_value_len, bool *critical) { POSIX_ENSURE_REF(cert->raw.data); /* Obtain the openssl x509 cert from the ASN1 DER certificate input. * Note that d2i_X509 increments *der_in to the byte following the parsed data. * Using a temporary variable is mandatory to prevent memory free-ing errors. * Ref to the warning section here for more information: * https://www.openssl.org/docs/man1.1.0/man3/d2i_X509.html. */ uint8_t *der_in = cert->raw.data; DEFER_CLEANUP(X509 *x509_cert = d2i_X509(NULL, (const unsigned char **) (void *) &der_in, cert->raw.size), X509_free_pointer); POSIX_ENSURE_REF(x509_cert); /* Retrieve the number of x509 extensions present in the certificate * X509_get_ext_count returns the number of extensions in the x509 certificate. * Ref: https://www.openssl.org/docs/man1.1.0/man3/X509_get_ext_count.html. */ int ext_count_value = X509_get_ext_count(x509_cert); POSIX_ENSURE_GT(ext_count_value, 0); size_t ext_count = (size_t) ext_count_value; /* OBJ_txt2obj() converts the input text string into an ASN1_OBJECT structure. * If no_name is 0 then long names and short names will be interpreted as well as numerical forms. * If no_name is 1 only the numerical form is acceptable. * Ref: https://www.openssl.org/docs/man1.1.0/man3/OBJ_txt2obj.html. */ DEFER_CLEANUP(ASN1_OBJECT *asn1_obj_in = OBJ_txt2obj((const char *) oid, 0), s2n_asn1_obj_free); POSIX_ENSURE_REF(asn1_obj_in); for (size_t loc = 0; loc < ext_count; loc++) { ASN1_OCTET_STRING *asn1_str = NULL; bool match_found = false; /* Retrieve the x509 extension at location loc. * X509_get_ext() retrieves extension loc from x. * The index loc can take any value from 0 to X509_get_ext_count(x) - 1. * The returned extension is an internal pointer which must not be freed up by the application. * Ref: https://www.openssl.org/docs/man1.1.0/man3/X509_get_ext.html. */ X509_EXTENSION *x509_ext = X509_get_ext(x509_cert, loc); POSIX_ENSURE_REF(x509_ext); /* Retrieve the extension object/OID/extnId. * X509_EXTENSION_get_object() returns the extension type of `x509_ext` as an ASN1_OBJECT pointer. * The returned pointer is an internal value which must not be freed up. * Ref: https://www.openssl.org/docs/man1.1.0/man3/X509_EXTENSION_get_object.html. */ ASN1_OBJECT *asn1_obj = X509_EXTENSION_get_object(x509_ext); POSIX_ENSURE_REF(asn1_obj); /* OBJ_cmp() compares two ASN1_OBJECT objects. If the two are identical 0 is returned. * Ref: https://www.openssl.org/docs/man1.1.0/man3/OBJ_cmp.html. */ match_found = (0 == OBJ_cmp(asn1_obj_in, asn1_obj)); /* If match found, retrieve the corresponding OID value for the x509 extension */ if (match_found) { /* X509_EXTENSION_get_data() returns the data of extension `x509_ext`. * The returned pointer is an internal value which must not be freed up. * Ref: https://www.openssl.org/docs/man1.1.0/man3/X509_EXTENSION_get_data.html. */ asn1_str = X509_EXTENSION_get_data(x509_ext); /* ASN1_STRING_length() returns the length of the content of `asn1_str`. * Ref: https://www.openssl.org/docs/man1.1.0/man3/ASN1_STRING_length.html. */ int len = ASN1_STRING_length(asn1_str); if (ext_value != NULL) { POSIX_ENSURE_GTE(len, 0); POSIX_ENSURE(*ext_value_len >= (uint32_t) len, S2N_ERR_INSUFFICIENT_MEM_SIZE); /* ASN1_STRING_data() returns an internal pointer to the data. * Since this is an internal pointer it should not be freed or modified in any way. * Ref: https://www.openssl.org/docs/man1.0.2/man3/ASN1_STRING_data.html. */ unsigned char *internal_data = ASN1_STRING_data(asn1_str); POSIX_ENSURE_REF(internal_data); POSIX_CHECKED_MEMCPY(ext_value, internal_data, len); } if (critical != NULL) { /* Retrieve the x509 extension's critical value. * X509_EXTENSION_get_critical() returns the criticality of extension `x509_ext`, * it returns 1 for critical and 0 for non-critical. * Ref: https://www.openssl.org/docs/man1.1.0/man3/X509_EXTENSION_get_critical.html. */ *critical = X509_EXTENSION_get_critical(x509_ext); } *ext_value_len = len; return S2N_SUCCESS; } } POSIX_BAIL(S2N_ERR_X509_EXTENSION_VALUE_NOT_FOUND); } int s2n_cert_get_x509_extension_value_length(struct s2n_cert *cert, const uint8_t *oid, uint32_t *ext_value_len) { POSIX_ENSURE_REF(cert); POSIX_ENSURE_REF(oid); POSIX_ENSURE_REF(ext_value_len); POSIX_GUARD(s2n_parse_x509_extension(cert, oid, NULL, ext_value_len, NULL)); return S2N_SUCCESS; } int s2n_cert_get_x509_extension_value(struct s2n_cert *cert, const uint8_t *oid, uint8_t *ext_value, uint32_t *ext_value_len, bool *critical) { POSIX_ENSURE_REF(cert); POSIX_ENSURE_REF(oid); POSIX_ENSURE_REF(ext_value); POSIX_ENSURE_REF(ext_value_len); POSIX_ENSURE_REF(critical); POSIX_GUARD(s2n_parse_x509_extension(cert, oid, ext_value, ext_value_len, critical)); return S2N_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_certificate.h000066400000000000000000000103461456575232400234630ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #pragma once #include #include #include "api/s2n.h" #include "crypto/s2n_pkey.h" #include "stuffer/s2n_stuffer.h" #define S2N_CERT_TYPE_COUNT S2N_PKEY_TYPE_SENTINEL struct s2n_cert_info { int signature_nid; /* This field is not populated for RSA_PSS signatures */ int signature_digest_nid; bool self_signed; }; struct s2n_cert { s2n_pkey_type pkey_type; uint16_t ec_curve_nid; s2n_cert_public_key public_key; struct s2n_blob raw; struct s2n_cert *next; }; struct s2n_cert_chain { uint32_t chain_size; struct s2n_cert *head; }; struct s2n_cert_chain_and_key { struct s2n_cert_chain *cert_chain; s2n_cert_private_key *private_key; struct s2n_blob ocsp_status; struct s2n_blob sct_list; /* DNS type SubjectAlternative names from the leaf certificate to match * with the server_name extension. We ignore non-DNS SANs here since the * server_name extension only supports DNS. */ struct s2n_array *san_names; /* CommonName values from the leaf certificate's Subject to match with the * server_name extension. Decoded as UTF8. */ struct s2n_array *cn_names; /* Application defined data related to this cert. */ void *context; }; struct certs_by_type { struct s2n_cert_chain_and_key *certs[S2N_CERT_TYPE_COUNT]; }; /* Exposed for fuzzing */ int s2n_cert_chain_and_key_load_cns(struct s2n_cert_chain_and_key *chain_and_key, X509 *x509_cert); int s2n_cert_chain_and_key_load_sans(struct s2n_cert_chain_and_key *chain_and_key, X509 *x509_cert); int s2n_cert_chain_and_key_matches_dns_name(const struct s2n_cert_chain_and_key *chain_and_key, const struct s2n_blob *dns_name); S2N_CLEANUP_RESULT s2n_cert_chain_and_key_ptr_free(struct s2n_cert_chain_and_key **cert_and_key); int s2n_cert_set_cert_type(struct s2n_cert *cert, s2n_pkey_type pkey_type); int s2n_send_cert_chain(struct s2n_connection *conn, struct s2n_stuffer *out, struct s2n_cert_chain_and_key *chain_and_key); int s2n_send_empty_cert_chain(struct s2n_stuffer *out); int s2n_create_cert_chain_from_stuffer(struct s2n_cert_chain *cert_chain_out, struct s2n_stuffer *chain_in_stuffer); int s2n_cert_chain_and_key_set_cert_chain_bytes(struct s2n_cert_chain_and_key *cert_and_key, uint8_t *cert_chain_pem, uint32_t cert_chain_len); int s2n_cert_chain_and_key_set_private_key_bytes(struct s2n_cert_chain_and_key *cert_and_key, uint8_t *private_key_pem, uint32_t private_key_len); int s2n_cert_chain_and_key_set_cert_chain(struct s2n_cert_chain_and_key *cert_and_key, const char *cert_chain_pem); int s2n_cert_chain_and_key_set_private_key(struct s2n_cert_chain_and_key *cert_and_key, const char *private_key_pem); s2n_pkey_type s2n_cert_chain_and_key_get_pkey_type(struct s2n_cert_chain_and_key *chain_and_key); int s2n_cert_chain_get_length(const struct s2n_cert_chain_and_key *chain_and_key, uint32_t *cert_length); int s2n_cert_chain_get_cert(const struct s2n_cert_chain_and_key *chain_and_key, struct s2n_cert **out_cert, const uint32_t cert_idx); int s2n_cert_get_der(const struct s2n_cert *cert, const uint8_t **out_cert_der, uint32_t *cert_length); int s2n_cert_chain_free(struct s2n_cert_chain *cert_chain); int s2n_cert_get_x509_extension_value_length(struct s2n_cert *cert, const uint8_t *oid, uint32_t *ext_value_len); int s2n_cert_get_x509_extension_value(struct s2n_cert *cert, const uint8_t *oid, uint8_t *ext_value, uint32_t *ext_value_len, bool *critical); int s2n_cert_get_utf8_string_from_extension_data_length(const uint8_t *extension_data, uint32_t extension_len, uint32_t *utf8_str_len); int s2n_cert_get_utf8_string_from_extension_data(const uint8_t *extension_data, uint32_t extension_len, uint8_t *out_data, uint32_t *out_len); aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_cipher.c000066400000000000000000000031721456575232400224450ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include #if defined(OPENSSL_IS_BORINGSSL) || defined(OPENSSL_IS_AWSLC) #include #endif #include "crypto/s2n_cipher.h" #include "utils/s2n_safety.h" int s2n_session_key_alloc(struct s2n_session_key *key) { POSIX_ENSURE_EQ(key->evp_cipher_ctx, NULL); POSIX_ENSURE_REF(key->evp_cipher_ctx = EVP_CIPHER_CTX_new()); #if defined(S2N_CIPHER_AEAD_API_AVAILABLE) POSIX_ENSURE_EQ(key->evp_aead_ctx, NULL); key->evp_aead_ctx = OPENSSL_malloc(sizeof(EVP_AEAD_CTX)); if (key->evp_aead_ctx == NULL) { EVP_CIPHER_CTX_free(key->evp_cipher_ctx); S2N_ERROR_PRESERVE_ERRNO(); } EVP_AEAD_CTX_zero(key->evp_aead_ctx); #endif return 0; } int s2n_session_key_free(struct s2n_session_key *key) { if (key->evp_cipher_ctx != NULL) { EVP_CIPHER_CTX_free(key->evp_cipher_ctx); key->evp_cipher_ctx = NULL; } #if defined(S2N_CIPHER_AEAD_API_AVAILABLE) if (key->evp_aead_ctx != NULL) { EVP_AEAD_CTX_free(key->evp_aead_ctx); key->evp_aead_ctx = NULL; } #endif return 0; } aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_cipher.h000066400000000000000000000101531456575232400224470ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #pragma once #include #include #include #include #include #include #include "crypto/s2n_crypto.h" #include "crypto/s2n_ktls_crypto.h" #include "utils/s2n_blob.h" #if defined(OPENSSL_IS_BORINGSSL) || defined(OPENSSL_IS_AWSLC) #define S2N_CIPHER_AEAD_API_AVAILABLE #endif struct s2n_session_key { EVP_CIPHER_CTX *evp_cipher_ctx; #if defined(S2N_CIPHER_AEAD_API_AVAILABLE) EVP_AEAD_CTX *evp_aead_ctx; #endif }; struct s2n_stream_cipher { int (*decrypt)(struct s2n_session_key *key, struct s2n_blob *in, struct s2n_blob *out); int (*encrypt)(struct s2n_session_key *key, struct s2n_blob *in, struct s2n_blob *out); }; struct s2n_cbc_cipher { uint8_t block_size; uint8_t record_iv_size; int (*decrypt)(struct s2n_session_key *key, struct s2n_blob *iv, struct s2n_blob *in, struct s2n_blob *out); int (*encrypt)(struct s2n_session_key *key, struct s2n_blob *iv, struct s2n_blob *in, struct s2n_blob *out); }; struct s2n_aead_cipher { uint8_t fixed_iv_size; uint8_t record_iv_size; uint8_t tag_size; int (*decrypt)(struct s2n_session_key *key, struct s2n_blob *iv, struct s2n_blob *add, struct s2n_blob *in, struct s2n_blob *out); int (*encrypt)(struct s2n_session_key *key, struct s2n_blob *iv, struct s2n_blob *add, struct s2n_blob *in, struct s2n_blob *out); }; struct s2n_composite_cipher { uint8_t block_size; uint8_t record_iv_size; uint8_t mac_key_size; int (*decrypt)(struct s2n_session_key *key, struct s2n_blob *iv, struct s2n_blob *in, struct s2n_blob *out); int (*encrypt)(struct s2n_session_key *key, struct s2n_blob *iv, struct s2n_blob *in, struct s2n_blob *out); int (*set_mac_write_key)(struct s2n_session_key *key, uint8_t *mac_key, uint32_t mac_size); int (*initial_hmac)(struct s2n_session_key *key, uint8_t *sequence_number, uint8_t content_type, uint16_t protocol_version, uint16_t payload_and_eiv_len, int *extra); }; struct s2n_cipher { enum { S2N_STREAM, S2N_CBC, S2N_AEAD, S2N_COMPOSITE } type; union { struct s2n_stream_cipher stream; struct s2n_aead_cipher aead; struct s2n_cbc_cipher cbc; struct s2n_composite_cipher comp; } io; uint8_t key_material_size; uint8_t (*is_available)(void); int (*init)(struct s2n_session_key *key); int (*set_decryption_key)(struct s2n_session_key *key, struct s2n_blob *in); int (*set_encryption_key)(struct s2n_session_key *key, struct s2n_blob *in); int (*destroy_key)(struct s2n_session_key *key); S2N_RESULT (*set_ktls_info)(struct s2n_ktls_crypto_info_inputs *inputs, struct s2n_ktls_crypto_info *crypto_info); }; int s2n_session_key_alloc(struct s2n_session_key *key); int s2n_session_key_free(struct s2n_session_key *key); extern const struct s2n_cipher s2n_null_cipher; extern const struct s2n_cipher s2n_rc4; extern const struct s2n_cipher s2n_aes128; extern const struct s2n_cipher s2n_aes256; extern const struct s2n_cipher s2n_3des; extern const struct s2n_cipher s2n_aes128_gcm; extern const struct s2n_cipher s2n_aes256_gcm; extern const struct s2n_cipher s2n_aes128_sha; extern const struct s2n_cipher s2n_aes256_sha; extern const struct s2n_cipher s2n_aes128_sha256; extern const struct s2n_cipher s2n_aes256_sha256; extern const struct s2n_cipher s2n_chacha20_poly1305; extern const struct s2n_cipher s2n_tls13_aes128_gcm; extern const struct s2n_cipher s2n_tls13_aes256_gcm; S2N_RESULT s2n_rc4_init(); S2N_RESULT s2n_rc4_cleanup(); aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_composite_cipher_aes_sha.c000066400000000000000000000345171456575232400262210ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include #include #include #include "crypto/s2n_cipher.h" #include "crypto/s2n_fips.h" #include "crypto/s2n_openssl.h" #include "tls/s2n_crypto.h" #include "utils/s2n_blob.h" #include "utils/s2n_safety.h" /* LibreSSL and BoringSSL support the cipher, but the interface is different from Openssl's. We * should define a separate s2n_cipher struct for LibreSSL and BoringSSL. */ #if !defined(LIBRESSL_VERSION_NUMBER) && !defined(OPENSSL_IS_BORINGSSL) /* Symbols for AES-SHA1-CBC composite ciphers were added in Openssl 1.0.1 * These composite ciphers exhibit erratic behavior in LibreSSL releases. */ #if S2N_OPENSSL_VERSION_AT_LEAST(1, 0, 1) #define S2N_AES_SHA1_COMPOSITE_AVAILABLE #endif #if defined(AWSLC_API_VERSION) && (AWSLC_API_VERSION <= 17) #undef S2N_AES_SHA1_COMPOSITE_AVAILABLE #endif /* Symbols for AES-SHA256-CBC composite ciphers were added in Openssl 1.0.2 * See https://www.openssl.org/news/cl102.txt * These composite ciphers exhibit erratic behavior in LibreSSL releases. */ #if S2N_OPENSSL_VERSION_AT_LEAST(1, 0, 2) #define S2N_AES_SHA256_COMPOSITE_AVAILABLE #endif #if defined(AWSLC_API_VERSION) && (AWSLC_API_VERSION <= 17) #undef S2N_AES_SHA256_COMPOSITE_AVAILABLE #endif #endif /* Silly accessors, but we avoid using version macro guards in multiple places */ static const EVP_CIPHER *s2n_evp_aes_128_cbc_hmac_sha1(void) { #if defined(S2N_AES_SHA1_COMPOSITE_AVAILABLE) return EVP_aes_128_cbc_hmac_sha1(); #else return NULL; #endif } static const EVP_CIPHER *s2n_evp_aes_256_cbc_hmac_sha1(void) { #if defined(S2N_AES_SHA1_COMPOSITE_AVAILABLE) return EVP_aes_256_cbc_hmac_sha1(); #else return NULL; #endif } static const EVP_CIPHER *s2n_evp_aes_128_cbc_hmac_sha256(void) { #if defined(S2N_AES_SHA256_COMPOSITE_AVAILABLE) return EVP_aes_128_cbc_hmac_sha256(); #else return NULL; #endif } static const EVP_CIPHER *s2n_evp_aes_256_cbc_hmac_sha256(void) { #if defined(S2N_AES_SHA256_COMPOSITE_AVAILABLE) return EVP_aes_256_cbc_hmac_sha256(); #else return NULL; #endif } static uint8_t s2n_composite_cipher_aes128_sha_available(void) { /* EVP_aes_128_cbc_hmac_sha1() returns NULL if the implementations aren't available. * See https://github.com/openssl/openssl/blob/master/crypto/evp/e_aes_cbc_hmac_sha1.c#L952 * * Composite ciphers cannot be used when FIPS mode is set. Ciphers require the * EVP_CIPH_FLAG_FIPS OpenSSL flag to be set for use when in FIPS mode, and composite * ciphers cause OpenSSL errors due to the lack of the flag. */ return (!s2n_is_in_fips_mode() && s2n_evp_aes_128_cbc_hmac_sha1() ? 1 : 0); } static uint8_t s2n_composite_cipher_aes256_sha_available(void) { /* Composite ciphers cannot be used when FIPS mode is set. Ciphers require the * EVP_CIPH_FLAG_FIPS OpenSSL flag to be set for use when in FIPS mode, and composite * ciphers cause OpenSSL errors due to the lack of the flag. */ return (!s2n_is_in_fips_mode() && s2n_evp_aes_256_cbc_hmac_sha1() ? 1 : 0); } static uint8_t s2n_composite_cipher_aes128_sha256_available(void) { /* Composite ciphers cannot be used when FIPS mode is set. Ciphers require the * EVP_CIPH_FLAG_FIPS OpenSSL flag to be set for use when in FIPS mode, and composite * ciphers cause OpenSSL errors due to the lack of the flag. */ return (!s2n_is_in_fips_mode() && s2n_evp_aes_128_cbc_hmac_sha256() ? 1 : 0); } static uint8_t s2n_composite_cipher_aes256_sha256_available(void) { /* Composite ciphers cannot be used when FIPS mode is set. Ciphers require the * EVP_CIPH_FLAG_FIPS OpenSSL flag to be set for use when in FIPS mode, and composite * ciphers cause OpenSSL errors due to the lack of the flag. */ return (!s2n_is_in_fips_mode() && s2n_evp_aes_256_cbc_hmac_sha256() ? 1 : 0); } static int s2n_composite_cipher_aes_sha_initial_hmac(struct s2n_session_key *key, uint8_t *sequence_number, uint8_t content_type, uint16_t protocol_version, uint16_t payload_and_eiv_len, int *extra) { /* BoringSSL and AWS-LC(AWSLC_API_VERSION <= 17) do not support these composite ciphers with the existing EVP API, and they took out the * constants used below. This method should never be called with BoringSSL or AWS-LC(AWSLC_API_VERSION <= 17) because the isAvaliable checked * will fail. Instead of defining a possibly dangerous default or hard coding this to 0x16 error out with BoringSSL and AWS-LC(AWSLC_API_VERSION <= 17). */ #if defined(OPENSSL_IS_BORINGSSL) || (defined(AWSLC_API_VERSION) && (AWSLC_API_VERSION <= 17)) POSIX_BAIL(S2N_ERR_NO_SUPPORTED_LIBCRYPTO_API); #else uint8_t ctrl_buf[S2N_TLS12_AAD_LEN]; struct s2n_blob ctrl_blob = { 0 }; POSIX_GUARD(s2n_blob_init(&ctrl_blob, ctrl_buf, S2N_TLS12_AAD_LEN)); struct s2n_stuffer ctrl_stuffer = { 0 }; POSIX_GUARD(s2n_stuffer_init(&ctrl_stuffer, &ctrl_blob)); POSIX_GUARD(s2n_stuffer_write_bytes(&ctrl_stuffer, sequence_number, S2N_TLS_SEQUENCE_NUM_LEN)); POSIX_GUARD(s2n_stuffer_write_uint8(&ctrl_stuffer, content_type)); POSIX_GUARD(s2n_stuffer_write_uint8(&ctrl_stuffer, protocol_version / 10)); POSIX_GUARD(s2n_stuffer_write_uint8(&ctrl_stuffer, protocol_version % 10)); POSIX_GUARD(s2n_stuffer_write_uint16(&ctrl_stuffer, payload_and_eiv_len)); /* This will unnecessarily mangle the input buffer, which is fine since it's temporary * Return value will be length of digest, padding, and padding length byte. * See https://github.com/openssl/openssl/blob/master/crypto/evp/e_aes_cbc_hmac_sha1.c#L814 * and https://github.com/openssl/openssl/blob/4f0c475719defd7c051964ef9964cc6e5b3a63bf/ssl/record/ssl3_record.c#L743 */ int ctrl_ret = EVP_CIPHER_CTX_ctrl(key->evp_cipher_ctx, EVP_CTRL_AEAD_TLS1_AAD, S2N_TLS12_AAD_LEN, ctrl_buf); S2N_ERROR_IF(ctrl_ret <= 0, S2N_ERR_INITIAL_HMAC); *extra = ctrl_ret; return 0; #endif } static int s2n_composite_cipher_aes_sha_encrypt(struct s2n_session_key *key, struct s2n_blob *iv, struct s2n_blob *in, struct s2n_blob *out) { POSIX_ENSURE_EQ(out->size, in->size); POSIX_GUARD_OSSL(EVP_EncryptInit_ex(key->evp_cipher_ctx, NULL, NULL, NULL, iv->data), S2N_ERR_KEY_INIT); /* len is set by EVP_EncryptUpdate and checked post operation */ int len = 0; POSIX_GUARD_OSSL(EVP_EncryptUpdate(key->evp_cipher_ctx, out->data, &len, in->data, in->size), S2N_ERR_ENCRYPT); POSIX_ENSURE((int64_t) len == (int64_t) in->size, S2N_ERR_ENCRYPT); return 0; } static int s2n_composite_cipher_aes_sha_decrypt(struct s2n_session_key *key, struct s2n_blob *iv, struct s2n_blob *in, struct s2n_blob *out) { POSIX_ENSURE_EQ(out->size, in->size); POSIX_GUARD_OSSL(EVP_DecryptInit_ex(key->evp_cipher_ctx, NULL, NULL, NULL, iv->data), S2N_ERR_KEY_INIT); /* len is set by EVP_DecryptUpdate. It is not checked here but padding is manually removed and therefore * the decryption operation is validated. */ int len = 0; POSIX_GUARD_OSSL(EVP_DecryptUpdate(key->evp_cipher_ctx, out->data, &len, in->data, in->size), S2N_ERR_DECRYPT); return 0; } static int s2n_composite_cipher_aes_sha_set_mac_write_key(struct s2n_session_key *key, uint8_t *mac_key, uint32_t mac_size) { POSIX_ENSURE_EQ(mac_size, SHA_DIGEST_LENGTH); EVP_CIPHER_CTX_ctrl(key->evp_cipher_ctx, EVP_CTRL_AEAD_SET_MAC_KEY, mac_size, mac_key); return 0; } static int s2n_composite_cipher_aes_sha256_set_mac_write_key(struct s2n_session_key *key, uint8_t *mac_key, uint32_t mac_size) { POSIX_ENSURE_EQ(mac_size, SHA256_DIGEST_LENGTH); EVP_CIPHER_CTX_ctrl(key->evp_cipher_ctx, EVP_CTRL_AEAD_SET_MAC_KEY, mac_size, mac_key); return 0; } static int s2n_composite_cipher_aes128_sha_set_encryption_key(struct s2n_session_key *key, struct s2n_blob *in) { POSIX_ENSURE_EQ(in->size, 16); EVP_CIPHER_CTX_set_padding(key->evp_cipher_ctx, 0); EVP_EncryptInit_ex(key->evp_cipher_ctx, s2n_evp_aes_128_cbc_hmac_sha1(), NULL, in->data, NULL); return 0; } static int s2n_composite_cipher_aes128_sha_set_decryption_key(struct s2n_session_key *key, struct s2n_blob *in) { POSIX_ENSURE_EQ(in->size, 16); EVP_CIPHER_CTX_set_padding(key->evp_cipher_ctx, 0); EVP_DecryptInit_ex(key->evp_cipher_ctx, s2n_evp_aes_128_cbc_hmac_sha1(), NULL, in->data, NULL); return 0; } static int s2n_composite_cipher_aes256_sha_set_encryption_key(struct s2n_session_key *key, struct s2n_blob *in) { POSIX_ENSURE_EQ(in->size, 32); EVP_CIPHER_CTX_set_padding(key->evp_cipher_ctx, 0); EVP_EncryptInit_ex(key->evp_cipher_ctx, s2n_evp_aes_256_cbc_hmac_sha1(), NULL, in->data, NULL); return 0; } static int s2n_composite_cipher_aes256_sha_set_decryption_key(struct s2n_session_key *key, struct s2n_blob *in) { POSIX_ENSURE_EQ(in->size, 32); EVP_CIPHER_CTX_set_padding(key->evp_cipher_ctx, 0); EVP_DecryptInit_ex(key->evp_cipher_ctx, s2n_evp_aes_256_cbc_hmac_sha1(), NULL, in->data, NULL); return 0; } static int s2n_composite_cipher_aes128_sha256_set_encryption_key(struct s2n_session_key *key, struct s2n_blob *in) { POSIX_ENSURE_EQ(in->size, 16); EVP_CIPHER_CTX_set_padding(key->evp_cipher_ctx, 0); EVP_EncryptInit_ex(key->evp_cipher_ctx, s2n_evp_aes_128_cbc_hmac_sha256(), NULL, in->data, NULL); return 0; } static int s2n_composite_cipher_aes128_sha256_set_decryption_key(struct s2n_session_key *key, struct s2n_blob *in) { POSIX_ENSURE_EQ(in->size, 16); EVP_CIPHER_CTX_set_padding(key->evp_cipher_ctx, 0); EVP_DecryptInit_ex(key->evp_cipher_ctx, s2n_evp_aes_128_cbc_hmac_sha256(), NULL, in->data, NULL); return 0; } static int s2n_composite_cipher_aes256_sha256_set_encryption_key(struct s2n_session_key *key, struct s2n_blob *in) { POSIX_ENSURE_EQ(in->size, 32); EVP_CIPHER_CTX_set_padding(key->evp_cipher_ctx, 0); EVP_EncryptInit_ex(key->evp_cipher_ctx, s2n_evp_aes_256_cbc_hmac_sha256(), NULL, in->data, NULL); return 0; } static int s2n_composite_cipher_aes256_sha256_set_decryption_key(struct s2n_session_key *key, struct s2n_blob *in) { POSIX_ENSURE_EQ(in->size, 32); EVP_CIPHER_CTX_set_padding(key->evp_cipher_ctx, 0); EVP_DecryptInit_ex(key->evp_cipher_ctx, s2n_evp_aes_256_cbc_hmac_sha256(), NULL, in->data, NULL); return 0; } static int s2n_composite_cipher_aes_sha_init(struct s2n_session_key *key) { s2n_evp_ctx_init(key->evp_cipher_ctx); return 0; } static int s2n_composite_cipher_aes_sha_destroy_key(struct s2n_session_key *key) { EVP_CIPHER_CTX_cleanup(key->evp_cipher_ctx); return 0; } const struct s2n_cipher s2n_aes128_sha = { .key_material_size = 16, .type = S2N_COMPOSITE, .io.comp = { .block_size = 16, .record_iv_size = 16, .mac_key_size = SHA_DIGEST_LENGTH, .decrypt = s2n_composite_cipher_aes_sha_decrypt, .encrypt = s2n_composite_cipher_aes_sha_encrypt, .set_mac_write_key = s2n_composite_cipher_aes_sha_set_mac_write_key, .initial_hmac = s2n_composite_cipher_aes_sha_initial_hmac }, .is_available = s2n_composite_cipher_aes128_sha_available, .init = s2n_composite_cipher_aes_sha_init, .set_encryption_key = s2n_composite_cipher_aes128_sha_set_encryption_key, .set_decryption_key = s2n_composite_cipher_aes128_sha_set_decryption_key, .destroy_key = s2n_composite_cipher_aes_sha_destroy_key, }; const struct s2n_cipher s2n_aes256_sha = { .key_material_size = 32, .type = S2N_COMPOSITE, .io.comp = { .block_size = 16, .record_iv_size = 16, .mac_key_size = SHA_DIGEST_LENGTH, .decrypt = s2n_composite_cipher_aes_sha_decrypt, .encrypt = s2n_composite_cipher_aes_sha_encrypt, .set_mac_write_key = s2n_composite_cipher_aes_sha_set_mac_write_key, .initial_hmac = s2n_composite_cipher_aes_sha_initial_hmac }, .is_available = s2n_composite_cipher_aes256_sha_available, .init = s2n_composite_cipher_aes_sha_init, .set_encryption_key = s2n_composite_cipher_aes256_sha_set_encryption_key, .set_decryption_key = s2n_composite_cipher_aes256_sha_set_decryption_key, .destroy_key = s2n_composite_cipher_aes_sha_destroy_key, }; const struct s2n_cipher s2n_aes128_sha256 = { .key_material_size = 16, .type = S2N_COMPOSITE, .io.comp = { .block_size = 16, .record_iv_size = 16, .mac_key_size = SHA256_DIGEST_LENGTH, .decrypt = s2n_composite_cipher_aes_sha_decrypt, .encrypt = s2n_composite_cipher_aes_sha_encrypt, .set_mac_write_key = s2n_composite_cipher_aes_sha256_set_mac_write_key, .initial_hmac = s2n_composite_cipher_aes_sha_initial_hmac }, .is_available = s2n_composite_cipher_aes128_sha256_available, .init = s2n_composite_cipher_aes_sha_init, .set_encryption_key = s2n_composite_cipher_aes128_sha256_set_encryption_key, .set_decryption_key = s2n_composite_cipher_aes128_sha256_set_decryption_key, .destroy_key = s2n_composite_cipher_aes_sha_destroy_key, }; const struct s2n_cipher s2n_aes256_sha256 = { .key_material_size = 32, .type = S2N_COMPOSITE, .io.comp = { .block_size = 16, .record_iv_size = 16, .mac_key_size = SHA256_DIGEST_LENGTH, .decrypt = s2n_composite_cipher_aes_sha_decrypt, .encrypt = s2n_composite_cipher_aes_sha_encrypt, .set_mac_write_key = s2n_composite_cipher_aes_sha256_set_mac_write_key, .initial_hmac = s2n_composite_cipher_aes_sha_initial_hmac }, .is_available = s2n_composite_cipher_aes256_sha256_available, .init = s2n_composite_cipher_aes_sha_init, .set_encryption_key = s2n_composite_cipher_aes256_sha256_set_encryption_key, .set_decryption_key = s2n_composite_cipher_aes256_sha256_set_decryption_key, .destroy_key = s2n_composite_cipher_aes_sha_destroy_key, }; aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_crypto.c000066400000000000000000000020261456575232400225100ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include "crypto/s2n_crypto.h" #include #include "api/s2n.h" /* OPENSSL_free is defined within for OpenSSL Libcrypto * and within for AWS_LC and BoringSSL */ #if defined(OPENSSL_IS_BORINGSSL) || defined(OPENSSL_IS_AWSLC) #include #else #include #endif int s2n_crypto_free(uint8_t** data) { if (*data != NULL) { OPENSSL_free(*data); } return S2N_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_crypto.h000066400000000000000000000014041456575232400225140ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #pragma once #include #include #include #include #include #include int s2n_crypto_free(uint8_t** data); aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_dhe.c000066400000000000000000000254221456575232400217350ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include "crypto/s2n_dhe.h" #include #include #include #include #include "crypto/s2n_openssl.h" #include "error/s2n_errno.h" #include "stuffer/s2n_stuffer.h" #include "utils/s2n_blob.h" #include "utils/s2n_mem.h" #include "utils/s2n_safety.h" #define S2N_MIN_DH_PRIME_SIZE_BYTES (2048 / 8) /* Caller is not responsible for freeing values returned by these accessors * Per https://www.openssl.org/docs/man1.1.0/crypto/DH_get0_pqg.html */ static const BIGNUM *s2n_get_Ys_dh_param(struct s2n_dh_params *dh_params) { const BIGNUM *Ys; /* DH made opaque in Openssl 1.1.0 */ #if S2N_OPENSSL_VERSION_AT_LEAST(1, 1, 0) DH_get0_key(dh_params->dh, &Ys, NULL); #else Ys = dh_params->dh->pub_key; #endif return Ys; } static const BIGNUM *s2n_get_p_dh_param(struct s2n_dh_params *dh_params) { const BIGNUM *p; #if S2N_OPENSSL_VERSION_AT_LEAST(1, 1, 0) DH_get0_pqg(dh_params->dh, &p, NULL, NULL); #else p = dh_params->dh->p; #endif return p; } static const BIGNUM *s2n_get_g_dh_param(struct s2n_dh_params *dh_params) { const BIGNUM *g; #if S2N_OPENSSL_VERSION_AT_LEAST(1, 1, 0) DH_get0_pqg(dh_params->dh, NULL, NULL, &g); #else g = dh_params->dh->g; #endif return g; } static int s2n_check_p_g_dh_params(struct s2n_dh_params *dh_params) { POSIX_ENSURE_REF(dh_params); POSIX_ENSURE_REF(dh_params->dh); const BIGNUM *p = s2n_get_p_dh_param(dh_params); const BIGNUM *g = s2n_get_g_dh_param(dh_params); POSIX_ENSURE_REF(g); POSIX_ENSURE_REF(p); S2N_ERROR_IF(DH_size(dh_params->dh) < S2N_MIN_DH_PRIME_SIZE_BYTES, S2N_ERR_DH_PARAMS_CREATE); S2N_ERROR_IF(BN_is_zero(g), S2N_ERR_DH_PARAMS_CREATE); S2N_ERROR_IF(BN_is_zero(p), S2N_ERR_DH_PARAMS_CREATE); return S2N_SUCCESS; } static int s2n_check_pub_key_dh_params(struct s2n_dh_params *dh_params) { const BIGNUM *pub_key = s2n_get_Ys_dh_param(dh_params); POSIX_ENSURE_REF(pub_key); S2N_ERROR_IF(BN_is_zero(pub_key), S2N_ERR_DH_PARAMS_CREATE); return S2N_SUCCESS; } static int s2n_set_p_g_Ys_dh_params(struct s2n_dh_params *dh_params, struct s2n_blob *p, struct s2n_blob *g, struct s2n_blob *Ys) { POSIX_ENSURE(p->size <= INT_MAX, S2N_ERR_INTEGER_OVERFLOW); POSIX_ENSURE(g->size <= INT_MAX, S2N_ERR_INTEGER_OVERFLOW); POSIX_ENSURE(Ys->size <= INT_MAX, S2N_ERR_INTEGER_OVERFLOW); BIGNUM *bn_p = BN_bin2bn((const unsigned char *) p->data, p->size, NULL); BIGNUM *bn_g = BN_bin2bn((const unsigned char *) g->data, g->size, NULL); BIGNUM *bn_Ys = BN_bin2bn((const unsigned char *) Ys->data, Ys->size, NULL); #if S2N_OPENSSL_VERSION_AT_LEAST(1, 1, 0) /* Per https://www.openssl.org/docs/man1.1.0/crypto/DH_get0_pqg.html: * values that have been passed in should not be freed directly after this function has been called */ POSIX_GUARD_OSSL(DH_set0_pqg(dh_params->dh, bn_p, NULL, bn_g), S2N_ERR_DH_PARAMS_CREATE); /* Same as DH_set0_pqg */ POSIX_GUARD_OSSL(DH_set0_key(dh_params->dh, bn_Ys, NULL), S2N_ERR_DH_PARAMS_CREATE); #else dh_params->dh->p = bn_p; dh_params->dh->g = bn_g; dh_params->dh->pub_key = bn_Ys; #endif return S2N_SUCCESS; } int s2n_check_all_dh_params(struct s2n_dh_params *dh_params) { POSIX_GUARD(s2n_check_p_g_dh_params(dh_params)); POSIX_GUARD(s2n_check_pub_key_dh_params(dh_params)); return S2N_SUCCESS; } int s2n_pkcs3_to_dh_params(struct s2n_dh_params *dh_params, struct s2n_blob *pkcs3) { POSIX_ENSURE_REF(dh_params); POSIX_PRECONDITION(s2n_blob_validate(pkcs3)); DEFER_CLEANUP(struct s2n_dh_params temp_dh_params = { 0 }, s2n_dh_params_free); uint8_t *original_ptr = pkcs3->data; temp_dh_params.dh = d2i_DHparams(NULL, (const unsigned char **) (void *) &pkcs3->data, pkcs3->size); POSIX_GUARD(s2n_check_p_g_dh_params(&temp_dh_params)); if (pkcs3->data) { POSIX_ENSURE_GTE(pkcs3->data, original_ptr); POSIX_ENSURE((uint32_t) (pkcs3->data - original_ptr) == pkcs3->size, S2N_ERR_INVALID_PKCS3); } pkcs3->data = original_ptr; /* Require at least 2048 bits for the DH size */ POSIX_ENSURE(DH_size(temp_dh_params.dh) >= S2N_MIN_DH_PRIME_SIZE_BYTES, S2N_ERR_DH_TOO_SMALL); /* Check the generator and prime */ POSIX_GUARD(s2n_dh_params_check(&temp_dh_params)); dh_params->dh = temp_dh_params.dh; ZERO_TO_DISABLE_DEFER_CLEANUP(temp_dh_params); return S2N_SUCCESS; } int s2n_dh_p_g_Ys_to_dh_params(struct s2n_dh_params *server_dh_params, struct s2n_blob *p, struct s2n_blob *g, struct s2n_blob *Ys) { POSIX_ENSURE_REF(server_dh_params); POSIX_PRECONDITION(s2n_blob_validate(p)); POSIX_PRECONDITION(s2n_blob_validate(g)); POSIX_PRECONDITION(s2n_blob_validate(Ys)); server_dh_params->dh = DH_new(); POSIX_ENSURE(server_dh_params->dh != NULL, S2N_ERR_DH_PARAMS_CREATE); POSIX_GUARD(s2n_set_p_g_Ys_dh_params(server_dh_params, p, g, Ys)); POSIX_GUARD(s2n_check_all_dh_params(server_dh_params)); return S2N_SUCCESS; } int s2n_dh_params_to_p_g_Ys(struct s2n_dh_params *server_dh_params, struct s2n_stuffer *out, struct s2n_blob *output) { POSIX_GUARD(s2n_check_all_dh_params(server_dh_params)); POSIX_PRECONDITION(s2n_stuffer_validate(out)); POSIX_PRECONDITION(s2n_blob_validate(output)); const BIGNUM *bn_p = s2n_get_p_dh_param(server_dh_params); const BIGNUM *bn_g = s2n_get_g_dh_param(server_dh_params); const BIGNUM *bn_Ys = s2n_get_Ys_dh_param(server_dh_params); uint16_t p_size = BN_num_bytes(bn_p); uint16_t g_size = BN_num_bytes(bn_g); uint16_t Ys_size = BN_num_bytes(bn_Ys); uint8_t *p = NULL; uint8_t *g = NULL; uint8_t *Ys = NULL; output->data = s2n_stuffer_raw_write(out, 0); POSIX_ENSURE_REF(output->data); POSIX_GUARD(s2n_stuffer_write_uint16(out, p_size)); p = s2n_stuffer_raw_write(out, p_size); POSIX_ENSURE_REF(p); POSIX_ENSURE(BN_bn2bin(bn_p, p) == p_size, S2N_ERR_DH_SERIALIZING); POSIX_GUARD(s2n_stuffer_write_uint16(out, g_size)); g = s2n_stuffer_raw_write(out, g_size); POSIX_ENSURE_REF(g); POSIX_ENSURE(BN_bn2bin(bn_g, g) == g_size, S2N_ERR_DH_SERIALIZING); POSIX_GUARD(s2n_stuffer_write_uint16(out, Ys_size)); Ys = s2n_stuffer_raw_write(out, Ys_size); POSIX_ENSURE_REF(Ys); POSIX_ENSURE(BN_bn2bin(bn_Ys, Ys) == Ys_size, S2N_ERR_DH_SERIALIZING); output->size = p_size + 2 + g_size + 2 + Ys_size + 2; return S2N_SUCCESS; } int s2n_dh_compute_shared_secret_as_client(struct s2n_dh_params *server_dh_params, struct s2n_stuffer *Yc_out, struct s2n_blob *shared_key) { struct s2n_dh_params client_params = { 0 }; uint8_t *client_pub_key = NULL; uint16_t client_pub_key_size = 0; int shared_key_size = 0; POSIX_GUARD(s2n_dh_params_check(server_dh_params)); POSIX_GUARD(s2n_dh_params_copy(server_dh_params, &client_params)); POSIX_GUARD(s2n_dh_generate_ephemeral_key(&client_params)); POSIX_GUARD(s2n_alloc(shared_key, DH_size(server_dh_params->dh))); const BIGNUM *client_pub_key_bn = s2n_get_Ys_dh_param(&client_params); POSIX_ENSURE_REF(client_pub_key_bn); client_pub_key_size = BN_num_bytes(client_pub_key_bn); POSIX_GUARD(s2n_stuffer_write_uint16(Yc_out, client_pub_key_size)); client_pub_key = s2n_stuffer_raw_write(Yc_out, client_pub_key_size); if (client_pub_key == NULL) { POSIX_GUARD(s2n_free(shared_key)); POSIX_GUARD(s2n_dh_params_free(&client_params)); POSIX_BAIL(S2N_ERR_DH_WRITING_PUBLIC_KEY); } if (BN_bn2bin(client_pub_key_bn, client_pub_key) != client_pub_key_size) { POSIX_GUARD(s2n_free(shared_key)); POSIX_GUARD(s2n_dh_params_free(&client_params)); POSIX_BAIL(S2N_ERR_DH_COPYING_PUBLIC_KEY); } /* server_dh_params already validated */ const BIGNUM *server_pub_key_bn = s2n_get_Ys_dh_param(server_dh_params); shared_key_size = DH_compute_key(shared_key->data, server_pub_key_bn, client_params.dh); if (shared_key_size < 0) { POSIX_GUARD(s2n_free(shared_key)); POSIX_GUARD(s2n_dh_params_free(&client_params)); POSIX_BAIL(S2N_ERR_DH_SHARED_SECRET); } shared_key->size = shared_key_size; POSIX_GUARD(s2n_dh_params_free(&client_params)); return S2N_SUCCESS; } int s2n_dh_compute_shared_secret_as_server(struct s2n_dh_params *server_dh_params, struct s2n_stuffer *Yc_in, struct s2n_blob *shared_key) { uint16_t Yc_length = 0; struct s2n_blob Yc = { 0 }; int shared_key_size = 0; BIGNUM *pub_key = NULL; POSIX_GUARD(s2n_check_all_dh_params(server_dh_params)); POSIX_GUARD(s2n_stuffer_read_uint16(Yc_in, &Yc_length)); Yc.size = Yc_length; Yc.data = s2n_stuffer_raw_read(Yc_in, Yc.size); POSIX_ENSURE_REF(Yc.data); pub_key = BN_bin2bn((const unsigned char *) Yc.data, Yc.size, NULL); POSIX_ENSURE_REF(pub_key); int server_dh_params_size = DH_size(server_dh_params->dh); POSIX_ENSURE(server_dh_params_size <= INT32_MAX, S2N_ERR_INTEGER_OVERFLOW); POSIX_GUARD(s2n_alloc(shared_key, server_dh_params_size)); shared_key_size = DH_compute_key(shared_key->data, pub_key, server_dh_params->dh); if (shared_key_size <= 0) { BN_free(pub_key); POSIX_BAIL(S2N_ERR_DH_SHARED_SECRET); } shared_key->size = shared_key_size; BN_free(pub_key); return S2N_SUCCESS; } int s2n_dh_params_check(struct s2n_dh_params *dh_params) { POSIX_ENSURE_REF(dh_params); POSIX_ENSURE_REF(dh_params->dh); int codes = 0; POSIX_GUARD_OSSL(DH_check(dh_params->dh, &codes), S2N_ERR_DH_PARAMETER_CHECK); POSIX_ENSURE(codes == 0, S2N_ERR_DH_PARAMETER_CHECK); return S2N_SUCCESS; } int s2n_dh_params_copy(struct s2n_dh_params *from, struct s2n_dh_params *to) { POSIX_GUARD(s2n_check_p_g_dh_params(from)); POSIX_ENSURE_REF(to); to->dh = DHparams_dup(from->dh); POSIX_ENSURE(to->dh != NULL, S2N_ERR_DH_COPYING_PARAMETERS); return S2N_SUCCESS; } int s2n_dh_generate_ephemeral_key(struct s2n_dh_params *dh_params) { POSIX_GUARD(s2n_check_p_g_dh_params(dh_params)); POSIX_GUARD_OSSL(DH_generate_key(dh_params->dh), S2N_ERR_DH_GENERATING_PARAMETERS); return S2N_SUCCESS; } int s2n_dh_params_free(struct s2n_dh_params *dh_params) { POSIX_ENSURE_REF(dh_params); DH_free(dh_params->dh); dh_params->dh = NULL; return S2N_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_dhe.h000066400000000000000000000030731456575232400217400ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #pragma once #include #include "stuffer/s2n_stuffer.h" #include "utils/s2n_blob.h" struct s2n_dh_params { DH *dh; }; int s2n_pkcs3_to_dh_params(struct s2n_dh_params *dh_params, struct s2n_blob *pkcs3); int s2n_dh_p_g_Ys_to_dh_params(struct s2n_dh_params *server_dh_params, struct s2n_blob *p, struct s2n_blob *g, struct s2n_blob *ys); int s2n_dh_params_to_p_g_Ys(struct s2n_dh_params *server_dh_params, struct s2n_stuffer *out, struct s2n_blob *output); int s2n_dh_compute_shared_secret_as_server(struct s2n_dh_params *server_dh_params, struct s2n_stuffer *Yc_in, struct s2n_blob *shared_key); int s2n_dh_compute_shared_secret_as_client(struct s2n_dh_params *server_dh_params, struct s2n_stuffer *Yc_out, struct s2n_blob *shared_key); int s2n_dh_params_copy(struct s2n_dh_params *from, struct s2n_dh_params *to); int s2n_dh_params_check(struct s2n_dh_params *dh_params); int s2n_dh_generate_ephemeral_key(struct s2n_dh_params *dh_params); int s2n_dh_params_free(struct s2n_dh_params *dh_params); aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_drbg.c000066400000000000000000000200021456575232400221000ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include "crypto/s2n_drbg.h" #include #include #include "utils/s2n_blob.h" #include "utils/s2n_random.h" #include "utils/s2n_safety.h" static bool ignore_prediction_resistance_for_testing = false; #define s2n_drbg_key_size(drgb) EVP_CIPHER_CTX_key_length((drbg)->ctx) #define s2n_drbg_seed_size(drgb) (S2N_DRBG_BLOCK_SIZE + s2n_drbg_key_size(drgb)) /* This function is the same as s2n_increment_sequence_number but it does not check for overflow, since overflow is acceptable in DRBG */ S2N_RESULT s2n_increment_drbg_counter(struct s2n_blob *counter) { for (uint32_t i = (uint32_t) counter->size; i > 0; i--) { counter->data[i - 1] += 1; if (counter->data[i - 1]) { break; } /* seq[i] wrapped, so let it carry */ } return S2N_RESULT_OK; } static S2N_RESULT s2n_drbg_block_encrypt(EVP_CIPHER_CTX *ctx, uint8_t in[S2N_DRBG_BLOCK_SIZE], uint8_t out[S2N_DRBG_BLOCK_SIZE]) { RESULT_ENSURE_REF(ctx); /* len is set by EVP_EncryptUpdate and checked post operation */ int len = S2N_DRBG_BLOCK_SIZE; RESULT_GUARD_OSSL(EVP_EncryptUpdate(ctx, out, &len, in, S2N_DRBG_BLOCK_SIZE), S2N_ERR_DRBG); RESULT_ENSURE_EQ(len, S2N_DRBG_BLOCK_SIZE); return S2N_RESULT_OK; } static S2N_RESULT s2n_drbg_bits(struct s2n_drbg *drbg, struct s2n_blob *out) { RESULT_ENSURE_REF(drbg); RESULT_ENSURE_REF(drbg->ctx); RESULT_ENSURE_REF(out); struct s2n_blob value = { 0 }; RESULT_GUARD_POSIX(s2n_blob_init(&value, drbg->v, sizeof(drbg->v))); uint32_t block_aligned_size = out->size - (out->size % S2N_DRBG_BLOCK_SIZE); /* Per NIST SP800-90A 10.2.1.2: */ for (size_t i = 0; i < block_aligned_size; i += S2N_DRBG_BLOCK_SIZE) { RESULT_GUARD(s2n_increment_drbg_counter(&value)); RESULT_GUARD(s2n_drbg_block_encrypt(drbg->ctx, drbg->v, out->data + i)); drbg->bytes_used += S2N_DRBG_BLOCK_SIZE; } if (out->size <= block_aligned_size) { return S2N_RESULT_OK; } uint8_t spare_block[S2N_DRBG_BLOCK_SIZE]; RESULT_GUARD(s2n_increment_drbg_counter(&value)); RESULT_GUARD(s2n_drbg_block_encrypt(drbg->ctx, drbg->v, spare_block)); drbg->bytes_used += S2N_DRBG_BLOCK_SIZE; RESULT_CHECKED_MEMCPY(out->data + block_aligned_size, spare_block, out->size - block_aligned_size); return S2N_RESULT_OK; } static S2N_RESULT s2n_drbg_update(struct s2n_drbg *drbg, struct s2n_blob *provided_data) { RESULT_ENSURE_REF(drbg); RESULT_ENSURE_REF(drbg->ctx); RESULT_ENSURE_REF(provided_data); RESULT_STACK_BLOB(temp_blob, s2n_drbg_seed_size(drgb), S2N_DRBG_MAX_SEED_SIZE); RESULT_ENSURE_EQ(provided_data->size, (uint32_t) s2n_drbg_seed_size(drbg)); RESULT_GUARD(s2n_drbg_bits(drbg, &temp_blob)); /* XOR in the provided data */ for (uint32_t i = 0; i < provided_data->size; i++) { temp_blob.data[i] ^= provided_data->data[i]; } /* Update the key and value */ RESULT_GUARD_OSSL(EVP_EncryptInit_ex(drbg->ctx, NULL, NULL, temp_blob.data, NULL), S2N_ERR_DRBG); RESULT_CHECKED_MEMCPY(drbg->v, temp_blob.data + s2n_drbg_key_size(drbg), S2N_DRBG_BLOCK_SIZE); return S2N_RESULT_OK; } static S2N_RESULT s2n_drbg_mix_in_entropy(struct s2n_drbg *drbg, struct s2n_blob *entropy, struct s2n_blob *ps) { RESULT_ENSURE_REF(drbg); RESULT_ENSURE_REF(drbg->ctx); RESULT_ENSURE_REF(entropy); RESULT_ENSURE_GTE(entropy->size, ps->size); for (uint32_t i = 0; i < ps->size; i++) { entropy->data[i] ^= ps->data[i]; } RESULT_GUARD(s2n_drbg_update(drbg, entropy)); return S2N_RESULT_OK; } static S2N_RESULT s2n_drbg_seed(struct s2n_drbg *drbg, struct s2n_blob *ps) { RESULT_STACK_BLOB(blob, s2n_drbg_seed_size(drbg), S2N_DRBG_MAX_SEED_SIZE); RESULT_GUARD(s2n_get_seed_entropy(&blob)); RESULT_GUARD(s2n_drbg_mix_in_entropy(drbg, &blob, ps)); drbg->bytes_used = 0; return S2N_RESULT_OK; } static S2N_RESULT s2n_drbg_mix(struct s2n_drbg *drbg, struct s2n_blob *ps) { if (s2n_unlikely(ignore_prediction_resistance_for_testing)) { RESULT_ENSURE(s2n_in_unit_test(), S2N_ERR_NOT_IN_UNIT_TEST); return S2N_RESULT_OK; } RESULT_STACK_BLOB(blob, s2n_drbg_seed_size(drbg), S2N_DRBG_MAX_SEED_SIZE); RESULT_GUARD(s2n_get_mix_entropy(&blob)); RESULT_GUARD(s2n_drbg_mix_in_entropy(drbg, &blob, ps)); drbg->mixes += 1; return S2N_RESULT_OK; } S2N_RESULT s2n_drbg_instantiate(struct s2n_drbg *drbg, struct s2n_blob *personalization_string, const s2n_drbg_mode mode) { RESULT_ENSURE_REF(drbg); RESULT_ENSURE_REF(personalization_string); drbg->ctx = EVP_CIPHER_CTX_new(); RESULT_GUARD_PTR(drbg->ctx); RESULT_EVP_CTX_INIT(drbg->ctx); switch (mode) { case S2N_AES_128_CTR_NO_DF_PR: RESULT_GUARD_OSSL(EVP_EncryptInit_ex(drbg->ctx, EVP_aes_128_ecb(), NULL, NULL, NULL), S2N_ERR_DRBG); break; case S2N_AES_256_CTR_NO_DF_PR: RESULT_GUARD_OSSL(EVP_EncryptInit_ex(drbg->ctx, EVP_aes_256_ecb(), NULL, NULL, NULL), S2N_ERR_DRBG); break; default: RESULT_BAIL(S2N_ERR_DRBG); } RESULT_ENSURE_LTE(s2n_drbg_key_size(drbg), S2N_DRBG_MAX_KEY_SIZE); RESULT_ENSURE_LTE(s2n_drbg_seed_size(drbg), S2N_DRBG_MAX_SEED_SIZE); static const uint8_t zero_key[S2N_DRBG_MAX_KEY_SIZE] = { 0 }; /* Start off with zeroed data, per 10.2.1.3.1 item 4 and 5 */ memset(drbg->v, 0, sizeof(drbg->v)); RESULT_GUARD_OSSL(EVP_EncryptInit_ex(drbg->ctx, NULL, NULL, zero_key, NULL), S2N_ERR_DRBG); /* Copy the personalization string */ RESULT_STACK_BLOB(ps, s2n_drbg_seed_size(drbg), S2N_DRBG_MAX_SEED_SIZE); RESULT_GUARD_POSIX(s2n_blob_zero(&ps)); RESULT_CHECKED_MEMCPY(ps.data, personalization_string->data, MIN(ps.size, personalization_string->size)); /* Seed the DRBG */ RESULT_GUARD(s2n_drbg_seed(drbg, &ps)); return S2N_RESULT_OK; } S2N_RESULT s2n_drbg_generate(struct s2n_drbg *drbg, struct s2n_blob *blob) { RESULT_ENSURE_REF(drbg); RESULT_ENSURE_REF(drbg->ctx); RESULT_STACK_BLOB(zeros, s2n_drbg_seed_size(drbg), S2N_DRBG_MAX_SEED_SIZE); RESULT_ENSURE(blob->size <= S2N_DRBG_GENERATE_LIMIT, S2N_ERR_DRBG_REQUEST_SIZE); /* Mix in additional entropy for every randomness generation call. This * defense mechanism is referred to as "prediction resistance". * If we ever relax this defense, we must: * 1. Implement reseeding according to limit specified in * NIST SP800-90A 10.2.1 Table 3. * 2. Re-consider whether the current fork detection strategy is still * sufficient. */ RESULT_GUARD(s2n_drbg_mix(drbg, &zeros)); RESULT_GUARD(s2n_drbg_bits(drbg, blob)); RESULT_GUARD(s2n_drbg_update(drbg, &zeros)); return S2N_RESULT_OK; } S2N_RESULT s2n_drbg_wipe(struct s2n_drbg *drbg) { RESULT_ENSURE_REF(drbg); if (drbg->ctx) { RESULT_GUARD_OSSL(EVP_CIPHER_CTX_cleanup(drbg->ctx), S2N_ERR_DRBG); EVP_CIPHER_CTX_free(drbg->ctx); drbg->ctx = NULL; } *drbg = (struct s2n_drbg){ 0 }; return S2N_RESULT_OK; } S2N_RESULT s2n_drbg_bytes_used(struct s2n_drbg *drbg, uint64_t *bytes_used) { RESULT_ENSURE_REF(drbg); RESULT_ENSURE_REF(bytes_used); *bytes_used = drbg->bytes_used; return S2N_RESULT_OK; } S2N_RESULT s2n_ignore_prediction_resistance_for_testing(bool ignore_bool) { RESULT_ENSURE(s2n_in_unit_test(), S2N_ERR_NOT_IN_UNIT_TEST); ignore_prediction_resistance_for_testing = ignore_bool; return S2N_RESULT_OK; } aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_drbg.h000066400000000000000000000047061456575232400221220ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #pragma once #include #include "crypto/s2n_hash.h" #include "utils/s2n_blob.h" #include "utils/s2n_result.h" #define S2N_DRBG_BLOCK_SIZE 16 #define S2N_DRBG_MAX_KEY_SIZE 32 #define S2N_DRBG_MAX_SEED_SIZE (S2N_DRBG_BLOCK_SIZE + S2N_DRBG_MAX_KEY_SIZE) /* The maximum size of any one request: from NIST SP800-90A 10.2.1 Table 3 */ #define S2N_DRBG_GENERATE_LIMIT 8192 struct s2n_drbg { /* Track how many bytes have been used */ uint64_t bytes_used; EVP_CIPHER_CTX *ctx; /* The current DRBG 'value' */ uint8_t v[S2N_DRBG_BLOCK_SIZE]; /* Used only by the unit tests: how many times has entropy been mixed in */ uint64_t mixes; }; /* * S2N_AES_128_CTR_NO_DF_PR is a deterministic random bit generator using AES 128 in counter mode (AES_128_CTR). It does not * use a derivation function (NO_DF) on the seed but does have prediction resistance (PR). * * S2N_AES_256_CTR_NO_DF_PR is a deterministic random bit generator using AES 256 in counter mode (AES_128_CTR). It does not * use a derivation function on the seed but does have prediction resistance. */ typedef enum { S2N_AES_128_CTR_NO_DF_PR, S2N_AES_256_CTR_NO_DF_PR } s2n_drbg_mode; /* Per NIST SP 800-90C 6.3 * * s2n's DRBG uses prediction resistance and does not support the * additional_input parameter (which per 800-90C may be zero). * * The security strength provided by s2n's DRBG is either 128 or 256 bits * depending on the s2n_drbg_mode passed in. */ S2N_RESULT s2n_drbg_instantiate(struct s2n_drbg *drbg, struct s2n_blob *personalization_string, const s2n_drbg_mode mode); S2N_RESULT s2n_drbg_generate(struct s2n_drbg *drbg, struct s2n_blob *returned_bits); S2N_RESULT s2n_drbg_wipe(struct s2n_drbg *drbg); S2N_RESULT s2n_drbg_bytes_used(struct s2n_drbg *drbg, uint64_t *bytes_used); /* Use for testing only */ S2N_RESULT s2n_ignore_prediction_resistance_for_testing(bool true_or_false); aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_ecc_evp.c000066400000000000000000000534151456575232400226040ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include "crypto/s2n_ecc_evp.h" #include #include #if defined(OPENSSL_IS_AWSLC) #include #endif #include #include "tls/s2n_connection.h" #include "tls/s2n_ecc_preferences.h" #include "tls/s2n_tls_parameters.h" #include "utils/s2n_mem.h" #include "utils/s2n_safety.h" #define TLS_EC_CURVE_TYPE_NAMED 3 DEFINE_POINTER_CLEANUP_FUNC(EVP_PKEY *, EVP_PKEY_free); DEFINE_POINTER_CLEANUP_FUNC(EVP_PKEY_CTX *, EVP_PKEY_CTX_free); DEFINE_POINTER_CLEANUP_FUNC(EC_KEY *, EC_KEY_free); #if !EVP_APIS_SUPPORTED DEFINE_POINTER_CLEANUP_FUNC(EC_POINT *, EC_POINT_free); #endif #if EVP_APIS_SUPPORTED static int s2n_ecc_evp_generate_key_x25519(const struct s2n_ecc_named_curve *named_curve, EVP_PKEY **evp_pkey); #else static int s2n_ecc_evp_write_point_data_snug(const EC_POINT *point, const EC_GROUP *group, struct s2n_blob *out); static int s2n_ecc_evp_calculate_point_length(const EC_POINT *point, const EC_GROUP *group, uint8_t *length); static EC_POINT *s2n_ecc_evp_blob_to_point(struct s2n_blob *blob, const EC_KEY *ec_key); #endif static int s2n_ecc_evp_generate_key_nist_curves(const struct s2n_ecc_named_curve *named_curve, EVP_PKEY **evp_pkey); static int s2n_ecc_evp_generate_own_key(const struct s2n_ecc_named_curve *named_curve, EVP_PKEY **evp_pkey); static int s2n_ecc_evp_compute_shared_secret(EVP_PKEY *own_key, EVP_PKEY *peer_public, uint16_t iana_id, struct s2n_blob *shared_secret); /* IANA values can be found here: https://tools.ietf.org/html/rfc8446#appendix-B.3.1.4 */ const struct s2n_ecc_named_curve s2n_ecc_curve_secp256r1 = { .iana_id = TLS_EC_CURVE_SECP_256_R1, .libcrypto_nid = NID_X9_62_prime256v1, .name = "secp256r1", .share_size = SECP256R1_SHARE_SIZE, .generate_key = s2n_ecc_evp_generate_key_nist_curves, }; const struct s2n_ecc_named_curve s2n_ecc_curve_secp384r1 = { .iana_id = TLS_EC_CURVE_SECP_384_R1, .libcrypto_nid = NID_secp384r1, .name = "secp384r1", .share_size = SECP384R1_SHARE_SIZE, .generate_key = s2n_ecc_evp_generate_key_nist_curves, }; const struct s2n_ecc_named_curve s2n_ecc_curve_secp521r1 = { .iana_id = TLS_EC_CURVE_SECP_521_R1, .libcrypto_nid = NID_secp521r1, .name = "secp521r1", .share_size = SECP521R1_SHARE_SIZE, .generate_key = s2n_ecc_evp_generate_key_nist_curves, }; #if EVP_APIS_SUPPORTED const struct s2n_ecc_named_curve s2n_ecc_curve_x25519 = { .iana_id = TLS_EC_CURVE_ECDH_X25519, .libcrypto_nid = NID_X25519, .name = "x25519", .share_size = X25519_SHARE_SIZE, .generate_key = s2n_ecc_evp_generate_key_x25519, }; #else const struct s2n_ecc_named_curve s2n_ecc_curve_x25519 = { 0 }; #endif /* A fake / unsupported curve for use in triggering retries * during testing. */ const struct s2n_ecc_named_curve s2n_unsupported_curve = { .iana_id = 0, .name = "unsupported", .libcrypto_nid = NID_X9_62_prime256v1, .share_size = SECP256R1_SHARE_SIZE, .generate_key = s2n_ecc_evp_generate_key_nist_curves, }; /* All curves that s2n supports. New curves MUST be added here. * This list is a super set of all the curves present in s2n_ecc_preferences list. */ const struct s2n_ecc_named_curve *const s2n_all_supported_curves_list[] = { &s2n_ecc_curve_secp256r1, &s2n_ecc_curve_secp384r1, #if EVP_APIS_SUPPORTED &s2n_ecc_curve_x25519, #endif &s2n_ecc_curve_secp521r1, }; const size_t s2n_all_supported_curves_list_len = s2n_array_len(s2n_all_supported_curves_list); int s2n_is_evp_apis_supported() { return EVP_APIS_SUPPORTED; } #if EVP_APIS_SUPPORTED static int s2n_ecc_evp_generate_key_x25519(const struct s2n_ecc_named_curve *named_curve, EVP_PKEY **evp_pkey) { DEFER_CLEANUP(EVP_PKEY_CTX *pctx = EVP_PKEY_CTX_new_id(named_curve->libcrypto_nid, NULL), EVP_PKEY_CTX_free_pointer); S2N_ERROR_IF(pctx == NULL, S2N_ERR_ECDHE_GEN_KEY); POSIX_GUARD_OSSL(EVP_PKEY_keygen_init(pctx), S2N_ERR_ECDHE_GEN_KEY); POSIX_GUARD_OSSL(EVP_PKEY_keygen(pctx, evp_pkey), S2N_ERR_ECDHE_GEN_KEY); S2N_ERROR_IF(evp_pkey == NULL, S2N_ERR_ECDHE_GEN_KEY); return 0; } #endif static int s2n_ecc_evp_generate_key_nist_curves(const struct s2n_ecc_named_curve *named_curve, EVP_PKEY **evp_pkey) { DEFER_CLEANUP(EVP_PKEY_CTX *pctx = EVP_PKEY_CTX_new_id(EVP_PKEY_EC, NULL), EVP_PKEY_CTX_free_pointer); S2N_ERROR_IF(pctx == NULL, S2N_ERR_ECDHE_GEN_KEY); POSIX_GUARD_OSSL(EVP_PKEY_paramgen_init(pctx), S2N_ERR_ECDHE_GEN_KEY); POSIX_GUARD_OSSL(EVP_PKEY_CTX_set_ec_paramgen_curve_nid(pctx, named_curve->libcrypto_nid), S2N_ERR_ECDHE_GEN_KEY); DEFER_CLEANUP(EVP_PKEY *params = NULL, EVP_PKEY_free_pointer); POSIX_GUARD_OSSL(EVP_PKEY_paramgen(pctx, ¶ms), S2N_ERR_ECDHE_GEN_KEY); S2N_ERROR_IF(params == NULL, S2N_ERR_ECDHE_GEN_KEY); DEFER_CLEANUP(EVP_PKEY_CTX *kctx = EVP_PKEY_CTX_new(params, NULL), EVP_PKEY_CTX_free_pointer); S2N_ERROR_IF(kctx == NULL, S2N_ERR_ECDHE_GEN_KEY); POSIX_GUARD_OSSL(EVP_PKEY_keygen_init(kctx), S2N_ERR_ECDHE_GEN_KEY); POSIX_GUARD_OSSL(EVP_PKEY_keygen(kctx, evp_pkey), S2N_ERR_ECDHE_GEN_KEY); S2N_ERROR_IF(evp_pkey == NULL, S2N_ERR_ECDHE_GEN_KEY); return 0; } static int s2n_ecc_evp_generate_own_key(const struct s2n_ecc_named_curve *named_curve, EVP_PKEY **evp_pkey) { POSIX_ENSURE_REF(named_curve); S2N_ERROR_IF(named_curve->generate_key == NULL, S2N_ERR_ECDHE_GEN_KEY); return named_curve->generate_key(named_curve, evp_pkey); } static int s2n_ecc_evp_compute_shared_secret(EVP_PKEY *own_key, EVP_PKEY *peer_public, uint16_t iana_id, struct s2n_blob *shared_secret) { POSIX_ENSURE_REF(peer_public); POSIX_ENSURE_REF(own_key); /* From RFC 8446(TLS1.3) Section 4.2.8.2: For the curves secp256r1, secp384r1, and secp521r1, peers MUST validate * each other's public value Q by ensuring that the point is a valid point on the elliptic curve. * For the curve x25519 and x448 the peer public-key validation check doesn't apply. * From RFC 8422(TLS1.2) Section 5.11: With the NIST curves, each party MUST validate the public key sent by its peer * in the ClientKeyExchange and ServerKeyExchange messages. A receiving party MUST check that the x and y parameters from * the peer's public value satisfy the curve equation, y^2 = x^3 + ax + b mod p. * Note that the `EC_KEY_check_key` validation is a MUST for only NIST curves, if a non-NIST curve is added to s2n-tls * this is an additional validation step that increases security but decreases performance. */ if (iana_id != TLS_EC_CURVE_ECDH_X25519 && iana_id != TLS_EC_CURVE_ECDH_X448) { DEFER_CLEANUP(EC_KEY *ec_key = EVP_PKEY_get1_EC_KEY(peer_public), EC_KEY_free_pointer); S2N_ERROR_IF(ec_key == NULL, S2N_ERR_ECDHE_UNSUPPORTED_CURVE); POSIX_GUARD_OSSL(EC_KEY_check_key(ec_key), S2N_ERR_ECDHE_SHARED_SECRET); } size_t shared_secret_size; DEFER_CLEANUP(EVP_PKEY_CTX *ctx = EVP_PKEY_CTX_new(own_key, NULL), EVP_PKEY_CTX_free_pointer); S2N_ERROR_IF(ctx == NULL, S2N_ERR_ECDHE_SHARED_SECRET); POSIX_GUARD_OSSL(EVP_PKEY_derive_init(ctx), S2N_ERR_ECDHE_SHARED_SECRET); POSIX_GUARD_OSSL(EVP_PKEY_derive_set_peer(ctx, peer_public), S2N_ERR_ECDHE_SHARED_SECRET); POSIX_GUARD_OSSL(EVP_PKEY_derive(ctx, NULL, &shared_secret_size), S2N_ERR_ECDHE_SHARED_SECRET); POSIX_GUARD(s2n_alloc(shared_secret, shared_secret_size)); if (EVP_PKEY_derive(ctx, shared_secret->data, &shared_secret_size) != 1) { POSIX_GUARD(s2n_free(shared_secret)); POSIX_BAIL(S2N_ERR_ECDHE_SHARED_SECRET); } return 0; } int s2n_ecc_evp_generate_ephemeral_key(struct s2n_ecc_evp_params *ecc_evp_params) { POSIX_ENSURE_REF(ecc_evp_params->negotiated_curve); S2N_ERROR_IF(ecc_evp_params->evp_pkey != NULL, S2N_ERR_ECDHE_GEN_KEY); S2N_ERROR_IF(s2n_ecc_evp_generate_own_key(ecc_evp_params->negotiated_curve, &ecc_evp_params->evp_pkey) != 0, S2N_ERR_ECDHE_GEN_KEY); S2N_ERROR_IF(ecc_evp_params->evp_pkey == NULL, S2N_ERR_ECDHE_GEN_KEY); return 0; } int s2n_ecc_evp_compute_shared_secret_from_params(struct s2n_ecc_evp_params *private_ecc_evp_params, struct s2n_ecc_evp_params *public_ecc_evp_params, struct s2n_blob *shared_key) { POSIX_ENSURE_REF(private_ecc_evp_params->negotiated_curve); POSIX_ENSURE_REF(private_ecc_evp_params->evp_pkey); POSIX_ENSURE_REF(public_ecc_evp_params->negotiated_curve); POSIX_ENSURE_REF(public_ecc_evp_params->evp_pkey); S2N_ERROR_IF(private_ecc_evp_params->negotiated_curve->iana_id != public_ecc_evp_params->negotiated_curve->iana_id, S2N_ERR_ECDHE_UNSUPPORTED_CURVE); POSIX_GUARD(s2n_ecc_evp_compute_shared_secret(private_ecc_evp_params->evp_pkey, public_ecc_evp_params->evp_pkey, private_ecc_evp_params->negotiated_curve->iana_id, shared_key)); return 0; } int s2n_ecc_evp_compute_shared_secret_as_server(struct s2n_ecc_evp_params *ecc_evp_params, struct s2n_stuffer *Yc_in, struct s2n_blob *shared_key) { POSIX_ENSURE_REF(ecc_evp_params->negotiated_curve); POSIX_ENSURE_REF(ecc_evp_params->evp_pkey); POSIX_ENSURE_REF(Yc_in); uint8_t client_public_len; struct s2n_blob client_public_blob = { 0 }; DEFER_CLEANUP(EVP_PKEY *peer_key = EVP_PKEY_new(), EVP_PKEY_free_pointer); S2N_ERROR_IF(peer_key == NULL, S2N_ERR_BAD_MESSAGE); POSIX_GUARD(s2n_stuffer_read_uint8(Yc_in, &client_public_len)); client_public_blob.size = client_public_len; client_public_blob.data = s2n_stuffer_raw_read(Yc_in, client_public_blob.size); POSIX_ENSURE_REF(client_public_blob.data); #if EVP_APIS_SUPPORTED if (ecc_evp_params->negotiated_curve->libcrypto_nid == NID_X25519) { POSIX_GUARD(EVP_PKEY_set_type(peer_key, ecc_evp_params->negotiated_curve->libcrypto_nid)); } else { DEFER_CLEANUP(EVP_PKEY_CTX *pctx = EVP_PKEY_CTX_new_id(EVP_PKEY_EC, NULL), EVP_PKEY_CTX_free_pointer); S2N_ERROR_IF(pctx == NULL, S2N_ERR_ECDHE_SERIALIZING); POSIX_GUARD_OSSL(EVP_PKEY_paramgen_init(pctx), S2N_ERR_ECDHE_SERIALIZING); POSIX_GUARD_OSSL(EVP_PKEY_CTX_set_ec_paramgen_curve_nid(pctx, ecc_evp_params->negotiated_curve->libcrypto_nid), S2N_ERR_ECDHE_SERIALIZING); POSIX_GUARD_OSSL(EVP_PKEY_paramgen(pctx, &peer_key), S2N_ERR_ECDHE_SERIALIZING); } POSIX_GUARD_OSSL(EVP_PKEY_set1_tls_encodedpoint(peer_key, client_public_blob.data, client_public_blob.size), S2N_ERR_ECDHE_SERIALIZING); #else DEFER_CLEANUP(EC_KEY *ec_key = EC_KEY_new_by_curve_name(ecc_evp_params->negotiated_curve->libcrypto_nid), EC_KEY_free_pointer); S2N_ERROR_IF(ec_key == NULL, S2N_ERR_ECDHE_UNSUPPORTED_CURVE); DEFER_CLEANUP(EC_POINT *point = s2n_ecc_evp_blob_to_point(&client_public_blob, ec_key), EC_POINT_free_pointer); S2N_ERROR_IF(point == NULL, S2N_ERR_BAD_MESSAGE); int success = EC_KEY_set_public_key(ec_key, point); POSIX_GUARD_OSSL(EVP_PKEY_set1_EC_KEY(peer_key, ec_key), S2N_ERR_ECDHE_SERIALIZING); S2N_ERROR_IF(success == 0, S2N_ERR_BAD_MESSAGE); #endif return s2n_ecc_evp_compute_shared_secret(ecc_evp_params->evp_pkey, peer_key, ecc_evp_params->negotiated_curve->iana_id, shared_key); } int s2n_ecc_evp_compute_shared_secret_as_client(struct s2n_ecc_evp_params *ecc_evp_params, struct s2n_stuffer *Yc_out, struct s2n_blob *shared_key) { DEFER_CLEANUP(struct s2n_ecc_evp_params client_params = { 0 }, s2n_ecc_evp_params_free); POSIX_ENSURE_REF(ecc_evp_params->negotiated_curve); client_params.negotiated_curve = ecc_evp_params->negotiated_curve; POSIX_GUARD(s2n_ecc_evp_generate_own_key(client_params.negotiated_curve, &client_params.evp_pkey)); S2N_ERROR_IF(client_params.evp_pkey == NULL, S2N_ERR_ECDHE_GEN_KEY); if (s2n_ecc_evp_compute_shared_secret(client_params.evp_pkey, ecc_evp_params->evp_pkey, ecc_evp_params->negotiated_curve->iana_id, shared_key) != S2N_SUCCESS) { POSIX_BAIL(S2N_ERR_ECDHE_SHARED_SECRET); } POSIX_GUARD(s2n_stuffer_write_uint8(Yc_out, client_params.negotiated_curve->share_size)); if (s2n_ecc_evp_write_params_point(&client_params, Yc_out) != 0) { POSIX_BAIL(S2N_ERR_ECDHE_SERIALIZING); } return 0; } #if (!EVP_APIS_SUPPORTED) static int s2n_ecc_evp_calculate_point_length(const EC_POINT *point, const EC_GROUP *group, uint8_t *length) { size_t ret = EC_POINT_point2oct(group, point, POINT_CONVERSION_UNCOMPRESSED, NULL, 0, NULL); S2N_ERROR_IF(ret == 0, S2N_ERR_ECDHE_SERIALIZING); S2N_ERROR_IF(ret > UINT8_MAX, S2N_ERR_ECDHE_SERIALIZING); *length = (uint8_t) ret; return 0; } static int s2n_ecc_evp_write_point_data_snug(const EC_POINT *point, const EC_GROUP *group, struct s2n_blob *out) { size_t ret = EC_POINT_point2oct(group, point, POINT_CONVERSION_UNCOMPRESSED, out->data, out->size, NULL); S2N_ERROR_IF(ret != out->size, S2N_ERR_ECDHE_SERIALIZING); return 0; } static EC_POINT *s2n_ecc_evp_blob_to_point(struct s2n_blob *blob, const EC_KEY *ec_key) { const EC_GROUP *group = EC_KEY_get0_group(ec_key); EC_POINT *point = EC_POINT_new(group); if (point == NULL) { PTR_BAIL(S2N_ERR_ECDHE_UNSUPPORTED_CURVE); } if (EC_POINT_oct2point(group, point, blob->data, blob->size, NULL) != 1) { EC_POINT_free(point); PTR_BAIL(S2N_ERR_BAD_MESSAGE); } return point; } #endif int s2n_ecc_evp_read_params_point(struct s2n_stuffer *in, int point_size, struct s2n_blob *point_blob) { POSIX_ENSURE_REF(in); POSIX_ENSURE_REF(point_blob); POSIX_ENSURE_GTE(point_size, 0); /* Extract point from stuffer */ point_blob->size = point_size; point_blob->data = s2n_stuffer_raw_read(in, point_size); POSIX_ENSURE_REF(point_blob->data); return 0; } int s2n_ecc_evp_read_params(struct s2n_stuffer *in, struct s2n_blob *data_to_verify, struct s2n_ecdhe_raw_server_params *raw_server_ecc_params) { POSIX_ENSURE_REF(in); uint8_t curve_type; uint8_t point_length; /* Remember where we started reading the data */ data_to_verify->data = s2n_stuffer_raw_read(in, 0); POSIX_ENSURE_REF(data_to_verify->data); /* Read the curve */ POSIX_GUARD(s2n_stuffer_read_uint8(in, &curve_type)); S2N_ERROR_IF(curve_type != TLS_EC_CURVE_TYPE_NAMED, S2N_ERR_BAD_MESSAGE); raw_server_ecc_params->curve_blob.data = s2n_stuffer_raw_read(in, 2); POSIX_ENSURE_REF(raw_server_ecc_params->curve_blob.data); raw_server_ecc_params->curve_blob.size = 2; /* Read the point */ POSIX_GUARD(s2n_stuffer_read_uint8(in, &point_length)); POSIX_GUARD(s2n_ecc_evp_read_params_point(in, point_length, &raw_server_ecc_params->point_blob)); /* curve type (1) + iana (2) + key share size (1) + key share */ data_to_verify->size = point_length + 4; return 0; } int s2n_ecc_evp_write_params_point(struct s2n_ecc_evp_params *ecc_evp_params, struct s2n_stuffer *out) { POSIX_ENSURE_REF(ecc_evp_params); POSIX_ENSURE_REF(ecc_evp_params->negotiated_curve); POSIX_ENSURE_REF(ecc_evp_params->evp_pkey); POSIX_ENSURE_REF(out); #if EVP_APIS_SUPPORTED struct s2n_blob point_blob = { 0 }; uint8_t *encoded_point = NULL; size_t size = EVP_PKEY_get1_tls_encodedpoint(ecc_evp_params->evp_pkey, &encoded_point); if (size != ecc_evp_params->negotiated_curve->share_size) { OPENSSL_free(encoded_point); POSIX_BAIL(S2N_ERR_ECDHE_SERIALIZING); } else { point_blob.data = s2n_stuffer_raw_write(out, ecc_evp_params->negotiated_curve->share_size); POSIX_ENSURE_REF(point_blob.data); POSIX_CHECKED_MEMCPY(point_blob.data, encoded_point, size); OPENSSL_free(encoded_point); } #else uint8_t point_len; struct s2n_blob point_blob = { 0 }; DEFER_CLEANUP(EC_KEY *ec_key = EVP_PKEY_get1_EC_KEY(ecc_evp_params->evp_pkey), EC_KEY_free_pointer); S2N_ERROR_IF(ec_key == NULL, S2N_ERR_ECDHE_UNSUPPORTED_CURVE); const EC_POINT *point = EC_KEY_get0_public_key(ec_key); const EC_GROUP *group = EC_KEY_get0_group(ec_key); S2N_ERROR_IF(point == NULL || group == NULL, S2N_ERR_ECDHE_UNSUPPORTED_CURVE); POSIX_GUARD(s2n_ecc_evp_calculate_point_length(point, group, &point_len)); S2N_ERROR_IF(point_len != ecc_evp_params->negotiated_curve->share_size, S2N_ERR_ECDHE_SERIALIZING); point_blob.data = s2n_stuffer_raw_write(out, point_len); POSIX_ENSURE_REF(point_blob.data); point_blob.size = point_len; POSIX_GUARD(s2n_ecc_evp_write_point_data_snug(point, group, &point_blob)); #endif return 0; } int s2n_ecc_evp_write_params(struct s2n_ecc_evp_params *ecc_evp_params, struct s2n_stuffer *out, struct s2n_blob *written) { POSIX_ENSURE_REF(ecc_evp_params); POSIX_ENSURE_REF(ecc_evp_params->negotiated_curve); POSIX_ENSURE_REF(ecc_evp_params->evp_pkey); POSIX_ENSURE_REF(out); POSIX_ENSURE_REF(written); uint8_t key_share_size = ecc_evp_params->negotiated_curve->share_size; /* Remember where the written data starts */ written->data = s2n_stuffer_raw_write(out, 0); POSIX_ENSURE_REF(written->data); POSIX_GUARD(s2n_stuffer_write_uint8(out, TLS_EC_CURVE_TYPE_NAMED)); POSIX_GUARD(s2n_stuffer_write_uint16(out, ecc_evp_params->negotiated_curve->iana_id)); POSIX_GUARD(s2n_stuffer_write_uint8(out, key_share_size)); POSIX_GUARD(s2n_ecc_evp_write_params_point(ecc_evp_params, out)); /* key share + key share size (1) + iana (2) + curve type (1) */ written->size = key_share_size + 4; return written->size; } int s2n_ecc_evp_parse_params_point(struct s2n_blob *point_blob, struct s2n_ecc_evp_params *ecc_evp_params) { POSIX_ENSURE_REF(point_blob->data); POSIX_ENSURE_REF(ecc_evp_params->negotiated_curve); S2N_ERROR_IF(point_blob->size != ecc_evp_params->negotiated_curve->share_size, S2N_ERR_ECDHE_SERIALIZING); #if EVP_APIS_SUPPORTED if (ecc_evp_params->negotiated_curve->libcrypto_nid == NID_X25519) { if (ecc_evp_params->evp_pkey == NULL) { ecc_evp_params->evp_pkey = EVP_PKEY_new(); } S2N_ERROR_IF(ecc_evp_params->evp_pkey == NULL, S2N_ERR_BAD_MESSAGE); POSIX_GUARD(EVP_PKEY_set_type(ecc_evp_params->evp_pkey, ecc_evp_params->negotiated_curve->libcrypto_nid)); } else { DEFER_CLEANUP(EVP_PKEY_CTX *pctx = EVP_PKEY_CTX_new_id(EVP_PKEY_EC, NULL), EVP_PKEY_CTX_free_pointer); S2N_ERROR_IF(pctx == NULL, S2N_ERR_ECDHE_SERIALIZING); POSIX_GUARD_OSSL(EVP_PKEY_paramgen_init(pctx), S2N_ERR_ECDHE_SERIALIZING); POSIX_GUARD_OSSL(EVP_PKEY_CTX_set_ec_paramgen_curve_nid(pctx, ecc_evp_params->negotiated_curve->libcrypto_nid), S2N_ERR_ECDHE_SERIALIZING); POSIX_GUARD_OSSL(EVP_PKEY_paramgen(pctx, &ecc_evp_params->evp_pkey), S2N_ERR_ECDHE_SERIALIZING); } POSIX_GUARD_OSSL(EVP_PKEY_set1_tls_encodedpoint(ecc_evp_params->evp_pkey, point_blob->data, point_blob->size), S2N_ERR_ECDHE_SERIALIZING); #else if (ecc_evp_params->evp_pkey == NULL) { ecc_evp_params->evp_pkey = EVP_PKEY_new(); } S2N_ERROR_IF(ecc_evp_params->evp_pkey == NULL, S2N_ERR_BAD_MESSAGE); /* Create a key to store the point */ DEFER_CLEANUP(EC_KEY *ec_key = EC_KEY_new_by_curve_name(ecc_evp_params->negotiated_curve->libcrypto_nid), EC_KEY_free_pointer); S2N_ERROR_IF(ec_key == NULL, S2N_ERR_ECDHE_UNSUPPORTED_CURVE); /* Parse and store the server public point */ DEFER_CLEANUP(EC_POINT *point = s2n_ecc_evp_blob_to_point(point_blob, ec_key), EC_POINT_free_pointer); S2N_ERROR_IF(point == NULL, S2N_ERR_BAD_MESSAGE); /* Set the point as the public key */ int success = EC_KEY_set_public_key(ec_key, point); POSIX_GUARD_OSSL(EVP_PKEY_set1_EC_KEY(ecc_evp_params->evp_pkey, ec_key), S2N_ERR_ECDHE_SERIALIZING); /* EC_KEY_set_public_key returns 1 on success, 0 on failure */ S2N_ERROR_IF(success == 0, S2N_ERR_BAD_MESSAGE); #endif return 0; } int s2n_ecc_evp_parse_params(struct s2n_connection *conn, struct s2n_ecdhe_raw_server_params *raw_server_ecc_params, struct s2n_ecc_evp_params *ecc_evp_params) { POSIX_ENSURE(s2n_ecc_evp_find_supported_curve(conn, &raw_server_ecc_params->curve_blob, &ecc_evp_params->negotiated_curve) == 0, S2N_ERR_ECDHE_UNSUPPORTED_CURVE); return s2n_ecc_evp_parse_params_point(&raw_server_ecc_params->point_blob, ecc_evp_params); } int s2n_ecc_evp_find_supported_curve(struct s2n_connection *conn, struct s2n_blob *iana_ids, const struct s2n_ecc_named_curve **found) { const struct s2n_ecc_preferences *ecc_prefs = NULL; POSIX_GUARD(s2n_connection_get_ecc_preferences(conn, &ecc_prefs)); POSIX_ENSURE_REF(ecc_prefs); struct s2n_stuffer iana_ids_in = { 0 }; POSIX_GUARD(s2n_stuffer_init(&iana_ids_in, iana_ids)); POSIX_GUARD(s2n_stuffer_write(&iana_ids_in, iana_ids)); for (size_t i = 0; i < ecc_prefs->count; i++) { const struct s2n_ecc_named_curve *supported_curve = ecc_prefs->ecc_curves[i]; for (uint32_t j = 0; j < iana_ids->size / 2; j++) { uint16_t iana_id; POSIX_GUARD(s2n_stuffer_read_uint16(&iana_ids_in, &iana_id)); if (supported_curve->iana_id == iana_id) { *found = supported_curve; return 0; } } POSIX_GUARD(s2n_stuffer_reread(&iana_ids_in)); } POSIX_BAIL(S2N_ERR_ECDHE_UNSUPPORTED_CURVE); } int s2n_ecc_evp_params_free(struct s2n_ecc_evp_params *ecc_evp_params) { if (ecc_evp_params->evp_pkey != NULL) { EVP_PKEY_free(ecc_evp_params->evp_pkey); ecc_evp_params->evp_pkey = NULL; } return 0; } aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_ecc_evp.h000066400000000000000000000100351456575232400226000ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #pragma once #include #include "crypto/s2n_hash.h" #include "stuffer/s2n_stuffer.h" #include "tls/s2n_kex_data.h" #include "tls/s2n_tls_parameters.h" #include "utils/s2n_safety.h" /* Share sizes are described here: https://tools.ietf.org/html/rfc8446#section-4.2.8.2 * and include the extra "legacy_form" byte */ #define SECP256R1_SHARE_SIZE ((32 * 2) + 1) #define SECP384R1_SHARE_SIZE ((48 * 2) + 1) #define SECP521R1_SHARE_SIZE ((66 * 2) + 1) #define X25519_SHARE_SIZE (32) struct s2n_ecc_named_curve { /* See https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#tls-parameters-8 */ uint16_t iana_id; /* See nid_list in openssl/ssl/t1_lib.c */ int libcrypto_nid; const char *name; const uint8_t share_size; int (*generate_key)(const struct s2n_ecc_named_curve *named_curve, EVP_PKEY **evp_pkey); }; extern const struct s2n_ecc_named_curve s2n_ecc_curve_secp256r1; extern const struct s2n_ecc_named_curve s2n_ecc_curve_secp384r1; extern const struct s2n_ecc_named_curve s2n_ecc_curve_secp521r1; extern const struct s2n_ecc_named_curve s2n_ecc_curve_x25519; /* BoringSSL only supports using EVP_PKEY_X25519 with "modern" EC EVP APIs. BoringSSL has a note to possibly add this in * the future. See https://github.com/google/boringssl/blob/master/crypto/evp/p_x25519_asn1.c#L233 */ #if S2N_OPENSSL_VERSION_AT_LEAST(1, 1, 0) && !defined(LIBRESSL_VERSION_NUMBER) && !defined(OPENSSL_IS_BORINGSSL) #define EVP_APIS_SUPPORTED 1 #define S2N_ECC_EVP_SUPPORTED_CURVES_COUNT 4 #else #define EVP_APIS_SUPPORTED 0 #define S2N_ECC_EVP_SUPPORTED_CURVES_COUNT 3 #endif extern const struct s2n_ecc_named_curve *const s2n_all_supported_curves_list[]; extern const size_t s2n_all_supported_curves_list_len; struct s2n_ecc_evp_params { const struct s2n_ecc_named_curve *negotiated_curve; EVP_PKEY *evp_pkey; }; int s2n_ecc_evp_generate_ephemeral_key(struct s2n_ecc_evp_params *ecc_evp_params); int s2n_ecc_evp_compute_shared_secret_from_params(struct s2n_ecc_evp_params *private_ecc_evp_params, struct s2n_ecc_evp_params *public_ecc_evp_params, struct s2n_blob *shared_key); int s2n_ecc_evp_write_params_point(struct s2n_ecc_evp_params *ecc_evp_params, struct s2n_stuffer *out); int s2n_ecc_evp_read_params_point(struct s2n_stuffer *in, int point_size, struct s2n_blob *point_blob); int s2n_ecc_evp_compute_shared_secret_as_server(struct s2n_ecc_evp_params *server_ecc_evp_params, struct s2n_stuffer *Yc_in, struct s2n_blob *shared_key); int s2n_ecc_evp_compute_shared_secret_as_client(struct s2n_ecc_evp_params *server_ecc_evp_params, struct s2n_stuffer *Yc_out, struct s2n_blob *shared_key); int s2n_ecc_evp_parse_params_point(struct s2n_blob *point_blob, struct s2n_ecc_evp_params *ecc_evp_params); int s2n_ecc_evp_write_params(struct s2n_ecc_evp_params *ecc_evp_params, struct s2n_stuffer *out, struct s2n_blob *written); int s2n_ecc_evp_read_params(struct s2n_stuffer *in, struct s2n_blob *data_to_verify, struct s2n_ecdhe_raw_server_params *raw_server_ecc_params); int s2n_ecc_evp_parse_params(struct s2n_connection *conn, struct s2n_ecdhe_raw_server_params *raw_server_ecc_params, struct s2n_ecc_evp_params *ecc_evp_params); int s2n_ecc_evp_find_supported_curve(struct s2n_connection *conn, struct s2n_blob *iana_ids, const struct s2n_ecc_named_curve **found); int s2n_ecc_evp_params_free(struct s2n_ecc_evp_params *ecc_evp_params); int s2n_is_evp_apis_supported(); aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_ecdsa.c000066400000000000000000000166571456575232400222660ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include "crypto/s2n_ecdsa.h" #include #include #include #include "crypto/s2n_ecc_evp.h" #include "crypto/s2n_evp_signing.h" #include "crypto/s2n_hash.h" #include "crypto/s2n_openssl.h" #include "crypto/s2n_pkey.h" #include "error/s2n_errno.h" #include "stuffer/s2n_stuffer.h" #include "utils/s2n_blob.h" #include "utils/s2n_compiler.h" #include "utils/s2n_mem.h" #include "utils/s2n_random.h" #include "utils/s2n_result.h" #include "utils/s2n_safety.h" #include "utils/s2n_safety_macros.h" #define S2N_ECDSA_TYPE 0 EC_KEY *s2n_unsafe_ecdsa_get_non_const(const struct s2n_ecdsa_key *ecdsa_key) { PTR_ENSURE_REF(ecdsa_key); #ifdef S2N_DIAGNOSTICS_PUSH_SUPPORTED #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wcast-qual" #endif EC_KEY *out_ec_key = (EC_KEY *) ecdsa_key->ec_key; #ifdef S2N_DIAGNOSTICS_POP_SUPPORTED #pragma GCC diagnostic pop #endif return out_ec_key; } S2N_RESULT s2n_ecdsa_der_signature_size(const struct s2n_pkey *pkey, uint32_t *size_out) { RESULT_ENSURE_REF(pkey); RESULT_ENSURE_REF(size_out); const struct s2n_ecdsa_key *ecdsa_key = &pkey->key.ecdsa_key; RESULT_ENSURE_REF(ecdsa_key->ec_key); const int size = ECDSA_size(ecdsa_key->ec_key); RESULT_GUARD_POSIX(size); *size_out = size; return S2N_RESULT_OK; } int s2n_ecdsa_sign_digest(const struct s2n_pkey *priv, struct s2n_blob *digest, struct s2n_blob *signature) { POSIX_ENSURE_REF(priv); POSIX_ENSURE_REF(digest); POSIX_ENSURE_REF(signature); const s2n_ecdsa_private_key *key = &priv->key.ecdsa_key; POSIX_ENSURE_REF(key->ec_key); unsigned int signature_size = signature->size; /* Safety: ECDSA_sign does not mutate the key */ POSIX_GUARD_OSSL(ECDSA_sign(S2N_ECDSA_TYPE, digest->data, digest->size, signature->data, &signature_size, s2n_unsafe_ecdsa_get_non_const(key)), S2N_ERR_SIGN); POSIX_ENSURE(signature_size <= signature->size, S2N_ERR_SIZE_MISMATCH); signature->size = signature_size; return S2N_SUCCESS; } static int s2n_ecdsa_sign(const struct s2n_pkey *priv, s2n_signature_algorithm sig_alg, struct s2n_hash_state *digest, struct s2n_blob *signature) { POSIX_ENSURE_REF(digest); sig_alg_check(sig_alg, S2N_SIGNATURE_ECDSA); uint8_t digest_length = 0; POSIX_GUARD(s2n_hash_digest_size(digest->alg, &digest_length)); POSIX_ENSURE_LTE(digest_length, S2N_MAX_DIGEST_LEN); uint8_t digest_out[S2N_MAX_DIGEST_LEN] = { 0 }; POSIX_GUARD(s2n_hash_digest(digest, digest_out, digest_length)); struct s2n_blob digest_blob = { 0 }; POSIX_GUARD(s2n_blob_init(&digest_blob, digest_out, digest_length)); POSIX_GUARD(s2n_ecdsa_sign_digest(priv, &digest_blob, signature)); POSIX_GUARD(s2n_hash_reset(digest)); return S2N_SUCCESS; } static int s2n_ecdsa_verify(const struct s2n_pkey *pub, s2n_signature_algorithm sig_alg, struct s2n_hash_state *digest, struct s2n_blob *signature) { sig_alg_check(sig_alg, S2N_SIGNATURE_ECDSA); const s2n_ecdsa_public_key *key = &pub->key.ecdsa_key; POSIX_ENSURE_REF(key->ec_key); uint8_t digest_length; POSIX_GUARD(s2n_hash_digest_size(digest->alg, &digest_length)); POSIX_ENSURE_LTE(digest_length, S2N_MAX_DIGEST_LEN); uint8_t digest_out[S2N_MAX_DIGEST_LEN]; POSIX_GUARD(s2n_hash_digest(digest, digest_out, digest_length)); /* Safety: ECDSA_verify does not mutate the key */ /* ECDSA_verify ignores the first parameter */ POSIX_GUARD_OSSL(ECDSA_verify(0, digest_out, digest_length, signature->data, signature->size, s2n_unsafe_ecdsa_get_non_const(key)), S2N_ERR_VERIFY_SIGNATURE); POSIX_GUARD(s2n_hash_reset(digest)); return 0; } static int s2n_ecdsa_keys_match(const struct s2n_pkey *pub, const struct s2n_pkey *priv) { uint8_t input[16] = { 1 }; DEFER_CLEANUP(struct s2n_blob signature = { 0 }, s2n_free); DEFER_CLEANUP(struct s2n_hash_state state_in = { 0 }, s2n_hash_free); DEFER_CLEANUP(struct s2n_hash_state state_out = { 0 }, s2n_hash_free); /* s2n_hash_new only allocates memory when using high-level EVP hashes, currently restricted to FIPS mode. */ POSIX_GUARD(s2n_hash_new(&state_in)); POSIX_GUARD(s2n_hash_new(&state_out)); POSIX_GUARD(s2n_hash_init(&state_in, S2N_HASH_SHA1)); POSIX_GUARD(s2n_hash_init(&state_out, S2N_HASH_SHA1)); POSIX_GUARD(s2n_hash_update(&state_in, input, sizeof(input))); POSIX_GUARD(s2n_hash_update(&state_out, input, sizeof(input))); uint32_t size = 0; POSIX_GUARD_RESULT(s2n_ecdsa_der_signature_size(priv, &size)); POSIX_GUARD(s2n_alloc(&signature, size)); POSIX_GUARD(s2n_ecdsa_sign(priv, S2N_SIGNATURE_ECDSA, &state_in, &signature)); POSIX_GUARD(s2n_ecdsa_verify(pub, S2N_SIGNATURE_ECDSA, &state_out, &signature)); return 0; } static int s2n_ecdsa_key_free(struct s2n_pkey *pkey) { POSIX_ENSURE_REF(pkey); struct s2n_ecdsa_key *ecdsa_key = &pkey->key.ecdsa_key; if (ecdsa_key->ec_key == NULL) { return S2N_SUCCESS; } /* Safety: freeing the key owned by this object */ EC_KEY_free(s2n_unsafe_ecdsa_get_non_const(ecdsa_key)); ecdsa_key->ec_key = NULL; return S2N_SUCCESS; } static int s2n_ecdsa_check_key_exists(const struct s2n_pkey *pkey) { const struct s2n_ecdsa_key *ecdsa_key = &pkey->key.ecdsa_key; POSIX_ENSURE_REF(ecdsa_key->ec_key); return 0; } S2N_RESULT s2n_evp_pkey_to_ecdsa_private_key(s2n_ecdsa_private_key *ecdsa_key, EVP_PKEY *evp_private_key) { const EC_KEY *ec_key = EVP_PKEY_get1_EC_KEY(evp_private_key); RESULT_ENSURE(ec_key != NULL, S2N_ERR_DECODE_PRIVATE_KEY); ecdsa_key->ec_key = ec_key; return S2N_RESULT_OK; } S2N_RESULT s2n_evp_pkey_to_ecdsa_public_key(s2n_ecdsa_public_key *ecdsa_key, EVP_PKEY *evp_public_key) { const EC_KEY *ec_key = EVP_PKEY_get1_EC_KEY(evp_public_key); RESULT_ENSURE(ec_key != NULL, S2N_ERR_DECODE_CERTIFICATE); ecdsa_key->ec_key = ec_key; return S2N_RESULT_OK; } S2N_RESULT s2n_ecdsa_pkey_init(struct s2n_pkey *pkey) { pkey->size = &s2n_ecdsa_der_signature_size; pkey->sign = &s2n_ecdsa_sign; pkey->verify = &s2n_ecdsa_verify; pkey->encrypt = NULL; /* No function for encryption */ pkey->decrypt = NULL; /* No function for decryption */ pkey->match = &s2n_ecdsa_keys_match; pkey->free = &s2n_ecdsa_key_free; pkey->check_key = &s2n_ecdsa_check_key_exists; RESULT_GUARD(s2n_evp_signing_set_pkey_overrides(pkey)); return S2N_RESULT_OK; } int s2n_ecdsa_pkey_matches_curve(const struct s2n_ecdsa_key *ecdsa_key, const struct s2n_ecc_named_curve *curve) { POSIX_ENSURE_REF(ecdsa_key); POSIX_ENSURE_REF(ecdsa_key->ec_key); POSIX_ENSURE_REF(curve); const EC_KEY *key = ecdsa_key->ec_key; int curve_id = EC_GROUP_get_curve_name(EC_KEY_get0_group(key)); POSIX_ENSURE_EQ(curve_id, curve->libcrypto_nid); return 0; } aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_ecdsa.h000066400000000000000000000035461456575232400222640ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #pragma once #include #include #include "api/s2n.h" #include "crypto/s2n_ecc_evp.h" #include "crypto/s2n_hash.h" #include "stuffer/s2n_stuffer.h" #include "utils/s2n_blob.h" /* Forward declaration to avoid the circular dependency with s2n_pkey.h */ struct s2n_pkey; struct s2n_ecdsa_key { /* * Starting in openssl_3, `EVP_PKEY_get1_EC_KEY` and `EVP_PKEY_get0_EC_KEY` * functions return a pre-cached copy of the underlying key. This means that any * mutations are not reflected back onto the underlying key. * * The `const` identifier is present to help ensure that the key is not mutated. * Usecases which require a non-const EC_KEY (some openssl functions), should * use `s2n_unsafe_ecdsa_get_non_const` while ensuring that the usage is safe. */ const EC_KEY *ec_key; }; typedef struct s2n_ecdsa_key s2n_ecdsa_public_key; typedef struct s2n_ecdsa_key s2n_ecdsa_private_key; S2N_RESULT s2n_ecdsa_pkey_init(struct s2n_pkey *pkey); int s2n_ecdsa_pkey_matches_curve(const struct s2n_ecdsa_key *ecdsa_key, const struct s2n_ecc_named_curve *curve); S2N_RESULT s2n_evp_pkey_to_ecdsa_public_key(s2n_ecdsa_public_key *ecdsa_key, EVP_PKEY *pkey); S2N_RESULT s2n_evp_pkey_to_ecdsa_private_key(s2n_ecdsa_private_key *ecdsa_key, EVP_PKEY *pkey); aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_evp.c000066400000000000000000000036741456575232400217740ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include "crypto/s2n_evp.h" #include "crypto/s2n_fips.h" #include "error/s2n_errno.h" #include "utils/s2n_safety.h" int s2n_digest_allow_md5_for_fips(struct s2n_evp_digest *evp_digest) { POSIX_ENSURE_REF(evp_digest); /* This is only to be used for EVP digests that will require MD5 to be used * to comply with the TLS 1.0 and 1.1 RFC's for the PRF. MD5 cannot be used * outside of the TLS 1.0 and 1.1 PRF when in FIPS mode. */ S2N_ERROR_IF(!s2n_is_in_fips_mode() || (evp_digest->ctx == NULL), S2N_ERR_ALLOW_MD5_FOR_FIPS_FAILED); #if !defined(OPENSSL_IS_BORINGSSL) && !defined(OPENSSL_IS_AWSLC) EVP_MD_CTX_set_flags(evp_digest->ctx, EVP_MD_CTX_FLAG_NON_FIPS_ALLOW); #endif return S2N_SUCCESS; } S2N_RESULT s2n_digest_is_md5_allowed_for_fips(struct s2n_evp_digest *evp_digest, bool *out) { RESULT_ENSURE_REF(out); *out = false; #if !defined(OPENSSL_IS_BORINGSSL) && !defined(OPENSSL_IS_AWSLC) if (s2n_is_in_fips_mode() && evp_digest && evp_digest->ctx && EVP_MD_CTX_test_flags(evp_digest->ctx, EVP_MD_CTX_FLAG_NON_FIPS_ALLOW)) { /* s2n is in FIPS mode and the EVP digest allows MD5. */ *out = true; } #else if (s2n_is_in_fips_mode()) { /* If s2n is in FIPS mode and built with AWS-LC or BoringSSL, there are no flags to check in the EVP digest to allow MD5. */ *out = true; } #endif return S2N_RESULT_OK; } aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_evp.h000066400000000000000000000040451456575232400217720ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #pragma once #include #include #include "crypto/s2n_openssl.h" #include "utils/s2n_result.h" struct s2n_evp_digest { const EVP_MD *md; EVP_MD_CTX *ctx; }; struct s2n_evp_hmac_state { struct s2n_evp_digest evp_digest; union { HMAC_CTX *hmac_ctx; EVP_PKEY *evp_pkey; } ctx; }; /* Define API's that change based on the OpenSSL Major Version. */ #if S2N_OPENSSL_VERSION_AT_LEAST(1, 1, 0) && !defined(LIBRESSL_VERSION_NUMBER) #define S2N_EVP_MD_CTX_NEW() (EVP_MD_CTX_new()) #define S2N_EVP_MD_CTX_RESET(md_ctx) (EVP_MD_CTX_reset(md_ctx)) #define S2N_EVP_MD_CTX_FREE(md_ctx) (EVP_MD_CTX_free(md_ctx)) #else #define S2N_EVP_MD_CTX_NEW() (EVP_MD_CTX_create()) #define S2N_EVP_MD_CTX_RESET(md_ctx) (EVP_MD_CTX_cleanup(md_ctx)) #define S2N_EVP_MD_CTX_FREE(md_ctx) (EVP_MD_CTX_destroy(md_ctx)) #endif /* On some versions of OpenSSL, "EVP_PKEY_CTX_set_signature_md()" is just a macro that casts digest_alg to "void*", * which fails to compile when the "-Werror=cast-qual" compiler flag is enabled. So we work around this OpenSSL * issue by turning off this compiler check for this one function with a cast through. */ #define S2N_EVP_PKEY_CTX_set_signature_md(ctx, md) \ EVP_PKEY_CTX_set_signature_md(ctx, (EVP_MD *) (uintptr_t) md) int s2n_digest_allow_md5_for_fips(struct s2n_evp_digest *evp_digest); S2N_RESULT s2n_digest_is_md5_allowed_for_fips(struct s2n_evp_digest *evp_digest, bool *out); aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_evp_signing.c000066400000000000000000000136441456575232400235100ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include "crypto/s2n_evp_signing.h" #include "crypto/s2n_evp.h" #include "crypto/s2n_pkey.h" #include "crypto/s2n_rsa_pss.h" #include "error/s2n_errno.h" #include "utils/s2n_safety.h" DEFINE_POINTER_CLEANUP_FUNC(EVP_PKEY_CTX *, EVP_PKEY_CTX_free); /* * FIPS 140-3 requires that we don't pass raw digest bytes to the libcrypto signing methods. * In order to do that, we need to use signing methods that both calculate the digest and * perform the signature. */ static S2N_RESULT s2n_evp_md_ctx_set_pkey_ctx(EVP_MD_CTX *ctx, EVP_PKEY_CTX *pctx) { #ifdef S2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX EVP_MD_CTX_set_pkey_ctx(ctx, pctx); return S2N_RESULT_OK; #else RESULT_BAIL(S2N_ERR_UNIMPLEMENTED); #endif } static S2N_RESULT s2n_evp_pkey_set_rsa_pss_saltlen(EVP_PKEY_CTX *pctx) { #if RSA_PSS_SIGNING_SUPPORTED RESULT_GUARD_OSSL(EVP_PKEY_CTX_set_rsa_pss_saltlen(pctx, RSA_PSS_SALTLEN_DIGEST), S2N_ERR_PKEY_CTX_INIT); return S2N_RESULT_OK; #else RESULT_BAIL(S2N_ERR_UNIMPLEMENTED); #endif } bool s2n_evp_signing_supported() { #ifdef S2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX /* We can only use EVP signing if the hash state has an EVP_MD_CTX * that we can pass to the EVP signing methods. */ return s2n_hash_evp_fully_supported(); #else return false; #endif } /* If using EVP signing, override the sign and verify pkey methods. * The EVP methods can handle all pkey types / signature algorithms. */ S2N_RESULT s2n_evp_signing_set_pkey_overrides(struct s2n_pkey *pkey) { if (s2n_evp_signing_supported()) { RESULT_ENSURE_REF(pkey); pkey->sign = &s2n_evp_sign; pkey->verify = &s2n_evp_verify; } return S2N_RESULT_OK; } static S2N_RESULT s2n_evp_signing_validate_hash_alg(s2n_signature_algorithm sig_alg, s2n_hash_algorithm hash_alg) { switch (hash_alg) { case S2N_HASH_NONE: case S2N_HASH_MD5: /* MD5 alone is never supported */ RESULT_BAIL(S2N_ERR_HASH_INVALID_ALGORITHM); break; case S2N_HASH_MD5_SHA1: /* Only RSA supports MD5+SHA1. * This should not be a problem, as we only allow MD5+SHA1 when * falling back to TLS1.0 or 1.1, which only support RSA. */ RESULT_ENSURE(sig_alg == S2N_SIGNATURE_RSA, S2N_ERR_HASH_INVALID_ALGORITHM); break; default: break; } /* Hash algorithm must be recognized and supported by EVP_MD */ RESULT_ENSURE(s2n_hash_alg_to_evp_md(hash_alg) != NULL, S2N_ERR_HASH_INVALID_ALGORITHM); return S2N_RESULT_OK; } int s2n_evp_sign(const struct s2n_pkey *priv, s2n_signature_algorithm sig_alg, struct s2n_hash_state *hash_state, struct s2n_blob *signature) { POSIX_ENSURE_REF(priv); POSIX_ENSURE_REF(hash_state); POSIX_ENSURE_REF(signature); POSIX_ENSURE(s2n_evp_signing_supported(), S2N_ERR_HASH_NOT_READY); POSIX_GUARD_RESULT(s2n_evp_signing_validate_hash_alg(sig_alg, hash_state->alg)); DEFER_CLEANUP(EVP_PKEY_CTX *pctx = EVP_PKEY_CTX_new(priv->pkey, NULL), EVP_PKEY_CTX_free_pointer); POSIX_ENSURE_REF(pctx); POSIX_GUARD_OSSL(EVP_PKEY_sign_init(pctx), S2N_ERR_PKEY_CTX_INIT); POSIX_GUARD_OSSL(S2N_EVP_PKEY_CTX_set_signature_md(pctx, s2n_hash_alg_to_evp_md(hash_state->alg)), S2N_ERR_PKEY_CTX_INIT); if (sig_alg == S2N_SIGNATURE_RSA_PSS_RSAE || sig_alg == S2N_SIGNATURE_RSA_PSS_PSS) { POSIX_GUARD_OSSL(EVP_PKEY_CTX_set_rsa_padding(pctx, RSA_PKCS1_PSS_PADDING), S2N_ERR_PKEY_CTX_INIT); POSIX_GUARD_RESULT(s2n_evp_pkey_set_rsa_pss_saltlen(pctx)); } EVP_MD_CTX *ctx = hash_state->digest.high_level.evp.ctx; POSIX_ENSURE_REF(ctx); POSIX_GUARD_RESULT(s2n_evp_md_ctx_set_pkey_ctx(ctx, pctx)); size_t signature_size = signature->size; POSIX_GUARD_OSSL(EVP_DigestSignFinal(ctx, signature->data, &signature_size), S2N_ERR_SIGN); POSIX_ENSURE(signature_size <= signature->size, S2N_ERR_SIZE_MISMATCH); signature->size = signature_size; POSIX_GUARD_RESULT(s2n_evp_md_ctx_set_pkey_ctx(ctx, NULL)); return S2N_SUCCESS; } int s2n_evp_verify(const struct s2n_pkey *pub, s2n_signature_algorithm sig_alg, struct s2n_hash_state *hash_state, struct s2n_blob *signature) { POSIX_ENSURE_REF(pub); POSIX_ENSURE_REF(hash_state); POSIX_ENSURE_REF(signature); POSIX_ENSURE(s2n_evp_signing_supported(), S2N_ERR_HASH_NOT_READY); POSIX_GUARD_RESULT(s2n_evp_signing_validate_hash_alg(sig_alg, hash_state->alg)); DEFER_CLEANUP(EVP_PKEY_CTX *pctx = EVP_PKEY_CTX_new(pub->pkey, NULL), EVP_PKEY_CTX_free_pointer); POSIX_ENSURE_REF(pctx); POSIX_GUARD_OSSL(EVP_PKEY_verify_init(pctx), S2N_ERR_PKEY_CTX_INIT); POSIX_GUARD_OSSL(S2N_EVP_PKEY_CTX_set_signature_md(pctx, s2n_hash_alg_to_evp_md(hash_state->alg)), S2N_ERR_PKEY_CTX_INIT); if (sig_alg == S2N_SIGNATURE_RSA_PSS_RSAE || sig_alg == S2N_SIGNATURE_RSA_PSS_PSS) { POSIX_GUARD_OSSL(EVP_PKEY_CTX_set_rsa_padding(pctx, RSA_PKCS1_PSS_PADDING), S2N_ERR_PKEY_CTX_INIT); POSIX_GUARD_RESULT(s2n_evp_pkey_set_rsa_pss_saltlen(pctx)); } EVP_MD_CTX *ctx = hash_state->digest.high_level.evp.ctx; POSIX_ENSURE_REF(ctx); POSIX_GUARD_RESULT(s2n_evp_md_ctx_set_pkey_ctx(ctx, pctx)); POSIX_GUARD_OSSL(EVP_DigestVerifyFinal(ctx, signature->data, signature->size), S2N_ERR_VERIFY_SIGNATURE); POSIX_GUARD_RESULT(s2n_evp_md_ctx_set_pkey_ctx(ctx, NULL)); return S2N_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_evp_signing.h000066400000000000000000000021161456575232400235050ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #pragma once #include "api/s2n.h" #include "crypto/s2n_hash.h" #include "crypto/s2n_signature.h" #include "utils/s2n_blob.h" bool s2n_evp_signing_supported(); S2N_RESULT s2n_evp_signing_set_pkey_overrides(struct s2n_pkey *pkey); int s2n_evp_sign(const struct s2n_pkey *priv, s2n_signature_algorithm sig_alg, struct s2n_hash_state *digest, struct s2n_blob *signature); int s2n_evp_verify(const struct s2n_pkey *pub, s2n_signature_algorithm sig_alg, struct s2n_hash_state *digest, struct s2n_blob *signature); aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_fips.c000066400000000000000000000036161456575232400221370ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include "crypto/s2n_fips.h" #include #if defined(S2N_INTERN_LIBCRYPTO) && defined(OPENSSL_FIPS) #error "Interning with OpenSSL fips-validated libcrypto is not currently supported. See https://github.com/aws/s2n-tls/issues/2741" #endif static int s2n_fips_mode = 0; /* FIPS mode can be checked if OpenSSL was configured and built for FIPS which then defines OPENSSL_FIPS. * * AWS-LC always defines FIPS_mode() that you can call and check what the library was built with. It does not define * a public OPENSSL_FIPS/AWSLC_FIPS macro that we can (or need to) check here * * Safeguard with macro's, for example because Libressl dosn't define * FIPS_mode() by default. * * Note: FIPS_mode() does not change the FIPS state of libcrypto. This only returns the current state. Applications * using s2n must call FIPS_mode_set(1) prior to s2n_init. * */ bool s2n_libcrypto_is_fips(void) { #if defined(OPENSSL_FIPS) || defined(OPENSSL_IS_AWSLC) if (FIPS_mode() == 1) { return true; } #endif return false; } int s2n_fips_init(void) { s2n_fips_mode = 0; if (s2n_libcrypto_is_fips()) { s2n_fips_mode = 1; } return 0; } /* Return 1 if FIPS mode is enabled, 0 otherwise. FIPS mode must be enabled prior to calling s2n_init(). */ int s2n_is_in_fips_mode(void) { return s2n_fips_mode; } aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_fips.h000066400000000000000000000022601456575232400221360ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include #include "api/s2n.h" #include "utils/s2n_result.h" #pragma once int s2n_fips_init(void); int s2n_is_in_fips_mode(void); bool s2n_libcrypto_is_fips(void); struct s2n_cipher_suite; S2N_RESULT s2n_fips_validate_cipher_suite(const struct s2n_cipher_suite *cipher_suite, bool *valid); struct s2n_signature_scheme; S2N_RESULT s2n_fips_validate_signature_scheme(const struct s2n_signature_scheme *sig_alg, bool *valid); struct s2n_ecc_named_curve; S2N_RESULT s2n_fips_validate_curve(const struct s2n_ecc_named_curve *curve, bool *valid); S2N_RESULT s2n_fips_validate_version(uint8_t version, bool *valid); aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_fips_rules.c000066400000000000000000000102001456575232400233340ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include "crypto/s2n_fips.h" #include "tls/s2n_cipher_suites.h" #include "tls/s2n_tls_parameters.h" #include "utils/s2n_result.h" /* FIPS requires at least 112 bits of security. * https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-107r1.pdf */ const s2n_hash_algorithm fips_hash_algs[] = { S2N_HASH_SHA224, S2N_HASH_SHA256, S2N_HASH_SHA384, S2N_HASH_SHA512, }; S2N_RESULT s2n_fips_validate_hash_algorithm(s2n_hash_algorithm hash_alg, bool *valid) { RESULT_ENSURE_REF(valid); *valid = false; for (size_t i = 0; i < s2n_array_len(fips_hash_algs); i++) { if (fips_hash_algs[i] == hash_alg) { *valid = true; return S2N_RESULT_OK; } } return S2N_RESULT_OK; } /* https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-52r2.pdf */ const uint8_t fips_cipher_suite_ianas[][2] = { /* 3.3.1.1.1 Cipher Suites for ECDSA Certificates */ { TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 }, { TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 }, { TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 }, { TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 }, { TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA }, { TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA }, /* 3.3.1.1.2 Cipher Suites for RSA Certificates */ { TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 }, { TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 }, { TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 }, { TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 }, { TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 }, { TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 }, { TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 }, { TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 }, { TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA }, { TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA }, { TLS_DHE_RSA_WITH_AES_128_CBC_SHA }, { TLS_DHE_RSA_WITH_AES_256_CBC_SHA }, /* 3.3.1.2 Cipher Suites for TLS 1.3 */ { TLS_AES_128_GCM_SHA256 }, { TLS_AES_256_GCM_SHA384 }, }; S2N_RESULT s2n_fips_validate_cipher_suite(const struct s2n_cipher_suite *cipher_suite, bool *valid) { RESULT_ENSURE_REF(cipher_suite); RESULT_ENSURE_REF(valid); *valid = false; for (size_t i = 0; i < s2n_array_len(fips_cipher_suite_ianas); i++) { if (fips_cipher_suite_ianas[i][0] != cipher_suite->iana_value[0]) { continue; } if (fips_cipher_suite_ianas[i][1] != cipher_suite->iana_value[1]) { continue; } *valid = true; return S2N_RESULT_OK; } return S2N_RESULT_OK; } S2N_RESULT s2n_fips_validate_signature_scheme(const struct s2n_signature_scheme *sig_alg, bool *valid) { RESULT_ENSURE_REF(sig_alg); RESULT_GUARD(s2n_fips_validate_hash_algorithm(sig_alg->hash_alg, valid)); return S2N_RESULT_OK; } /* https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-56Ar3.pdf */ const struct s2n_ecc_named_curve *fips_curves[] = { &s2n_ecc_curve_secp256r1, &s2n_ecc_curve_secp384r1, &s2n_ecc_curve_secp521r1, }; S2N_RESULT s2n_fips_validate_curve(const struct s2n_ecc_named_curve *curve, bool *valid) { RESULT_ENSURE_REF(curve); RESULT_ENSURE_REF(valid); *valid = false; for (size_t i = 0; i < s2n_array_len(fips_curves); i++) { if (fips_curves[i] == curve) { *valid = true; return S2N_RESULT_OK; } } return S2N_RESULT_OK; } S2N_RESULT s2n_fips_validate_version(uint8_t version, bool *valid) { RESULT_ENSURE_REF(valid); /* Technically FIPS 140-3 still allows TLS1.0 and TLS1.1 for some use cases, * but for simplicity s2n-tls does not. */ *valid = (version >= S2N_TLS12); return S2N_RESULT_OK; } aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_hash.c000066400000000000000000000563351456575232400221270ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include "crypto/s2n_hash.h" #include "crypto/s2n_fips.h" #include "crypto/s2n_hmac.h" #include "crypto/s2n_openssl.h" #include "error/s2n_errno.h" #include "utils/s2n_safety.h" static bool s2n_use_custom_md5_sha1() { #if defined(S2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH) return false; #else return true; #endif } static bool s2n_use_evp_impl() { return s2n_is_in_fips_mode(); } bool s2n_hash_evp_fully_supported() { return s2n_use_evp_impl() && !s2n_use_custom_md5_sha1(); } const EVP_MD *s2n_hash_alg_to_evp_md(s2n_hash_algorithm alg) { switch (alg) { case S2N_HASH_MD5: return EVP_md5(); case S2N_HASH_SHA1: return EVP_sha1(); case S2N_HASH_SHA224: return EVP_sha224(); case S2N_HASH_SHA256: return EVP_sha256(); case S2N_HASH_SHA384: return EVP_sha384(); case S2N_HASH_SHA512: return EVP_sha512(); #if defined(S2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH) case S2N_HASH_MD5_SHA1: return EVP_md5_sha1(); #endif default: return NULL; } } int s2n_hash_digest_size(s2n_hash_algorithm alg, uint8_t *out) { POSIX_ENSURE(S2N_MEM_IS_WRITABLE_CHECK(out, sizeof(*out)), S2N_ERR_PRECONDITION_VIOLATION); /* clang-format off */ switch (alg) { case S2N_HASH_NONE: *out = 0; break; case S2N_HASH_MD5: *out = MD5_DIGEST_LENGTH; break; case S2N_HASH_SHA1: *out = SHA_DIGEST_LENGTH; break; case S2N_HASH_SHA224: *out = SHA224_DIGEST_LENGTH; break; case S2N_HASH_SHA256: *out = SHA256_DIGEST_LENGTH; break; case S2N_HASH_SHA384: *out = SHA384_DIGEST_LENGTH; break; case S2N_HASH_SHA512: *out = SHA512_DIGEST_LENGTH; break; case S2N_HASH_MD5_SHA1: *out = MD5_DIGEST_LENGTH + SHA_DIGEST_LENGTH; break; default: POSIX_BAIL(S2N_ERR_HASH_INVALID_ALGORITHM); } /* clang-format on */ return S2N_SUCCESS; } /* NOTE: s2n_hash_const_time_get_currently_in_hash_block takes advantage of the fact that * hash_block_size is a power of 2. This is true for all hashes we currently support * If this ever becomes untrue, this would require fixing*/ int s2n_hash_block_size(s2n_hash_algorithm alg, uint64_t *block_size) { POSIX_ENSURE(S2N_MEM_IS_WRITABLE_CHECK(block_size, sizeof(*block_size)), S2N_ERR_PRECONDITION_VIOLATION); /* clang-format off */ switch (alg) { case S2N_HASH_NONE: *block_size = 64; break; case S2N_HASH_MD5: *block_size = 64; break; case S2N_HASH_SHA1: *block_size = 64; break; case S2N_HASH_SHA224: *block_size = 64; break; case S2N_HASH_SHA256: *block_size = 64; break; case S2N_HASH_SHA384: *block_size = 128; break; case S2N_HASH_SHA512: *block_size = 128; break; case S2N_HASH_MD5_SHA1: *block_size = 64; break; default: POSIX_BAIL(S2N_ERR_HASH_INVALID_ALGORITHM); } /* clang-format on */ return S2N_SUCCESS; } /* Return true if hash algorithm is available, false otherwise. */ bool s2n_hash_is_available(s2n_hash_algorithm alg) { switch (alg) { case S2N_HASH_MD5: case S2N_HASH_MD5_SHA1: /* return false if in FIPS mode, as MD5 algs are not available in FIPS mode. */ return !s2n_is_in_fips_mode(); case S2N_HASH_NONE: case S2N_HASH_SHA1: case S2N_HASH_SHA224: case S2N_HASH_SHA256: case S2N_HASH_SHA384: case S2N_HASH_SHA512: return true; case S2N_HASH_SENTINEL: return false; } return false; } int s2n_hash_is_ready_for_input(struct s2n_hash_state *state) { POSIX_PRECONDITION(s2n_hash_state_validate(state)); return state->is_ready_for_input; } static int s2n_low_level_hash_new(struct s2n_hash_state *state) { /* s2n_hash_new will always call the corresponding implementation of the s2n_hash * being used. For the s2n_low_level_hash implementation, new is a no-op. */ *state = (struct s2n_hash_state){ 0 }; return S2N_SUCCESS; } static int s2n_low_level_hash_init(struct s2n_hash_state *state, s2n_hash_algorithm alg) { switch (alg) { case S2N_HASH_NONE: break; case S2N_HASH_MD5: POSIX_GUARD_OSSL(MD5_Init(&state->digest.low_level.md5), S2N_ERR_HASH_INIT_FAILED); break; case S2N_HASH_SHA1: POSIX_GUARD_OSSL(SHA1_Init(&state->digest.low_level.sha1), S2N_ERR_HASH_INIT_FAILED); break; case S2N_HASH_SHA224: POSIX_GUARD_OSSL(SHA224_Init(&state->digest.low_level.sha224), S2N_ERR_HASH_INIT_FAILED); break; case S2N_HASH_SHA256: POSIX_GUARD_OSSL(SHA256_Init(&state->digest.low_level.sha256), S2N_ERR_HASH_INIT_FAILED); break; case S2N_HASH_SHA384: POSIX_GUARD_OSSL(SHA384_Init(&state->digest.low_level.sha384), S2N_ERR_HASH_INIT_FAILED); break; case S2N_HASH_SHA512: POSIX_GUARD_OSSL(SHA512_Init(&state->digest.low_level.sha512), S2N_ERR_HASH_INIT_FAILED); break; case S2N_HASH_MD5_SHA1: POSIX_GUARD_OSSL(SHA1_Init(&state->digest.low_level.md5_sha1.sha1), S2N_ERR_HASH_INIT_FAILED); POSIX_GUARD_OSSL(MD5_Init(&state->digest.low_level.md5_sha1.md5), S2N_ERR_HASH_INIT_FAILED); break; default: POSIX_BAIL(S2N_ERR_HASH_INVALID_ALGORITHM); } state->alg = alg; state->is_ready_for_input = 1; state->currently_in_hash = 0; return 0; } static int s2n_low_level_hash_update(struct s2n_hash_state *state, const void *data, uint32_t size) { POSIX_ENSURE(state->is_ready_for_input, S2N_ERR_HASH_NOT_READY); switch (state->alg) { case S2N_HASH_NONE: break; case S2N_HASH_MD5: POSIX_GUARD_OSSL(MD5_Update(&state->digest.low_level.md5, data, size), S2N_ERR_HASH_UPDATE_FAILED); break; case S2N_HASH_SHA1: POSIX_GUARD_OSSL(SHA1_Update(&state->digest.low_level.sha1, data, size), S2N_ERR_HASH_UPDATE_FAILED); break; case S2N_HASH_SHA224: POSIX_GUARD_OSSL(SHA224_Update(&state->digest.low_level.sha224, data, size), S2N_ERR_HASH_UPDATE_FAILED); break; case S2N_HASH_SHA256: POSIX_GUARD_OSSL(SHA256_Update(&state->digest.low_level.sha256, data, size), S2N_ERR_HASH_UPDATE_FAILED); break; case S2N_HASH_SHA384: POSIX_GUARD_OSSL(SHA384_Update(&state->digest.low_level.sha384, data, size), S2N_ERR_HASH_UPDATE_FAILED); break; case S2N_HASH_SHA512: POSIX_GUARD_OSSL(SHA512_Update(&state->digest.low_level.sha512, data, size), S2N_ERR_HASH_UPDATE_FAILED); break; case S2N_HASH_MD5_SHA1: POSIX_GUARD_OSSL(SHA1_Update(&state->digest.low_level.md5_sha1.sha1, data, size), S2N_ERR_HASH_UPDATE_FAILED); POSIX_GUARD_OSSL(MD5_Update(&state->digest.low_level.md5_sha1.md5, data, size), S2N_ERR_HASH_UPDATE_FAILED); break; default: POSIX_BAIL(S2N_ERR_HASH_INVALID_ALGORITHM); } POSIX_ENSURE(size <= (UINT64_MAX - state->currently_in_hash), S2N_ERR_INTEGER_OVERFLOW); state->currently_in_hash += size; return S2N_SUCCESS; } static int s2n_low_level_hash_digest(struct s2n_hash_state *state, void *out, uint32_t size) { POSIX_ENSURE(state->is_ready_for_input, S2N_ERR_HASH_NOT_READY); switch (state->alg) { case S2N_HASH_NONE: break; case S2N_HASH_MD5: POSIX_ENSURE_EQ(size, MD5_DIGEST_LENGTH); POSIX_GUARD_OSSL(MD5_Final(out, &state->digest.low_level.md5), S2N_ERR_HASH_DIGEST_FAILED); break; case S2N_HASH_SHA1: POSIX_ENSURE_EQ(size, SHA_DIGEST_LENGTH); POSIX_GUARD_OSSL(SHA1_Final(out, &state->digest.low_level.sha1), S2N_ERR_HASH_DIGEST_FAILED); break; case S2N_HASH_SHA224: POSIX_ENSURE_EQ(size, SHA224_DIGEST_LENGTH); POSIX_GUARD_OSSL(SHA224_Final(out, &state->digest.low_level.sha224), S2N_ERR_HASH_DIGEST_FAILED); break; case S2N_HASH_SHA256: POSIX_ENSURE_EQ(size, SHA256_DIGEST_LENGTH); POSIX_GUARD_OSSL(SHA256_Final(out, &state->digest.low_level.sha256), S2N_ERR_HASH_DIGEST_FAILED); break; case S2N_HASH_SHA384: POSIX_ENSURE_EQ(size, SHA384_DIGEST_LENGTH); POSIX_GUARD_OSSL(SHA384_Final(out, &state->digest.low_level.sha384), S2N_ERR_HASH_DIGEST_FAILED); break; case S2N_HASH_SHA512: POSIX_ENSURE_EQ(size, SHA512_DIGEST_LENGTH); POSIX_GUARD_OSSL(SHA512_Final(out, &state->digest.low_level.sha512), S2N_ERR_HASH_DIGEST_FAILED); break; case S2N_HASH_MD5_SHA1: POSIX_ENSURE_EQ(size, MD5_DIGEST_LENGTH + SHA_DIGEST_LENGTH); POSIX_GUARD_OSSL(SHA1_Final(((uint8_t *) out) + MD5_DIGEST_LENGTH, &state->digest.low_level.md5_sha1.sha1), S2N_ERR_HASH_DIGEST_FAILED); POSIX_GUARD_OSSL(MD5_Final(out, &state->digest.low_level.md5_sha1.md5), S2N_ERR_HASH_DIGEST_FAILED); break; default: POSIX_BAIL(S2N_ERR_HASH_INVALID_ALGORITHM); } state->currently_in_hash = 0; state->is_ready_for_input = 0; return 0; } static int s2n_low_level_hash_copy(struct s2n_hash_state *to, struct s2n_hash_state *from) { POSIX_CHECKED_MEMCPY(to, from, sizeof(struct s2n_hash_state)); return 0; } static int s2n_low_level_hash_reset(struct s2n_hash_state *state) { /* hash_init resets the ready_for_input and currently_in_hash fields. */ return s2n_low_level_hash_init(state, state->alg); } static int s2n_low_level_hash_free(struct s2n_hash_state *state) { /* s2n_hash_free will always call the corresponding implementation of the s2n_hash * being used. For the s2n_low_level_hash implementation, free is a no-op. */ state->is_ready_for_input = 0; return S2N_SUCCESS; } static int s2n_evp_hash_new(struct s2n_hash_state *state) { POSIX_ENSURE_REF(state->digest.high_level.evp.ctx = S2N_EVP_MD_CTX_NEW()); if (s2n_use_custom_md5_sha1()) { POSIX_ENSURE_REF(state->digest.high_level.evp_md5_secondary.ctx = S2N_EVP_MD_CTX_NEW()); } state->is_ready_for_input = 0; state->currently_in_hash = 0; return S2N_SUCCESS; } static int s2n_evp_hash_allow_md5_for_fips(struct s2n_hash_state *state) { /* This is only to be used for s2n_hash_states that will require MD5 to be used * to comply with the TLS 1.0 and 1.1 RFC's for the PRF. MD5 cannot be used * outside of the TLS 1.0 and 1.1 PRF when in FIPS mode. When needed, this must * be called prior to s2n_hash_init(). */ POSIX_GUARD(s2n_digest_allow_md5_for_fips(&state->digest.high_level.evp)); if (s2n_use_custom_md5_sha1()) { POSIX_GUARD(s2n_digest_allow_md5_for_fips(&state->digest.high_level.evp_md5_secondary)); } return S2N_SUCCESS; } static int s2n_evp_hash_init(struct s2n_hash_state *state, s2n_hash_algorithm alg) { POSIX_ENSURE_REF(state->digest.high_level.evp.ctx); state->alg = alg; state->is_ready_for_input = 1; state->currently_in_hash = 0; if (alg == S2N_HASH_NONE) { return S2N_SUCCESS; } if (alg == S2N_HASH_MD5_SHA1 && s2n_use_custom_md5_sha1()) { POSIX_ENSURE_REF(state->digest.high_level.evp_md5_secondary.ctx); POSIX_GUARD_OSSL(EVP_DigestInit_ex(state->digest.high_level.evp.ctx, EVP_sha1(), NULL), S2N_ERR_HASH_INIT_FAILED); POSIX_GUARD_OSSL(EVP_DigestInit_ex(state->digest.high_level.evp_md5_secondary.ctx, EVP_md5(), NULL), S2N_ERR_HASH_INIT_FAILED); return S2N_SUCCESS; } POSIX_ENSURE_REF(s2n_hash_alg_to_evp_md(alg)); POSIX_GUARD_OSSL(EVP_DigestInit_ex(state->digest.high_level.evp.ctx, s2n_hash_alg_to_evp_md(alg), NULL), S2N_ERR_HASH_INIT_FAILED); return S2N_SUCCESS; } static int s2n_evp_hash_update(struct s2n_hash_state *state, const void *data, uint32_t size) { POSIX_ENSURE(state->is_ready_for_input, S2N_ERR_HASH_NOT_READY); POSIX_ENSURE(size <= (UINT64_MAX - state->currently_in_hash), S2N_ERR_INTEGER_OVERFLOW); state->currently_in_hash += size; if (state->alg == S2N_HASH_NONE) { return S2N_SUCCESS; } POSIX_ENSURE_REF(EVP_MD_CTX_md(state->digest.high_level.evp.ctx)); POSIX_GUARD_OSSL(EVP_DigestUpdate(state->digest.high_level.evp.ctx, data, size), S2N_ERR_HASH_UPDATE_FAILED); if (state->alg == S2N_HASH_MD5_SHA1 && s2n_use_custom_md5_sha1()) { POSIX_ENSURE_REF(EVP_MD_CTX_md(state->digest.high_level.evp_md5_secondary.ctx)); POSIX_GUARD_OSSL(EVP_DigestUpdate(state->digest.high_level.evp_md5_secondary.ctx, data, size), S2N_ERR_HASH_UPDATE_FAILED); } return S2N_SUCCESS; } static int s2n_evp_hash_digest(struct s2n_hash_state *state, void *out, uint32_t size) { POSIX_ENSURE(state->is_ready_for_input, S2N_ERR_HASH_NOT_READY); state->currently_in_hash = 0; state->is_ready_for_input = 0; unsigned int digest_size = size; uint8_t expected_digest_size = 0; POSIX_GUARD(s2n_hash_digest_size(state->alg, &expected_digest_size)); POSIX_ENSURE_EQ(digest_size, expected_digest_size); if (state->alg == S2N_HASH_NONE) { return S2N_SUCCESS; } POSIX_ENSURE_REF(EVP_MD_CTX_md(state->digest.high_level.evp.ctx)); if (state->alg == S2N_HASH_MD5_SHA1 && s2n_use_custom_md5_sha1()) { POSIX_ENSURE_REF(EVP_MD_CTX_md(state->digest.high_level.evp_md5_secondary.ctx)); uint8_t sha1_digest_size = 0; POSIX_GUARD(s2n_hash_digest_size(S2N_HASH_SHA1, &sha1_digest_size)); unsigned int sha1_primary_digest_size = sha1_digest_size; unsigned int md5_secondary_digest_size = digest_size - sha1_primary_digest_size; POSIX_ENSURE(EVP_MD_CTX_size(state->digest.high_level.evp.ctx) <= sha1_digest_size, S2N_ERR_HASH_DIGEST_FAILED); POSIX_ENSURE((size_t) EVP_MD_CTX_size(state->digest.high_level.evp_md5_secondary.ctx) <= md5_secondary_digest_size, S2N_ERR_HASH_DIGEST_FAILED); POSIX_GUARD_OSSL(EVP_DigestFinal_ex(state->digest.high_level.evp.ctx, ((uint8_t *) out) + MD5_DIGEST_LENGTH, &sha1_primary_digest_size), S2N_ERR_HASH_DIGEST_FAILED); POSIX_GUARD_OSSL(EVP_DigestFinal_ex(state->digest.high_level.evp_md5_secondary.ctx, out, &md5_secondary_digest_size), S2N_ERR_HASH_DIGEST_FAILED); return S2N_SUCCESS; } POSIX_ENSURE((size_t) EVP_MD_CTX_size(state->digest.high_level.evp.ctx) <= digest_size, S2N_ERR_HASH_DIGEST_FAILED); POSIX_GUARD_OSSL(EVP_DigestFinal_ex(state->digest.high_level.evp.ctx, out, &digest_size), S2N_ERR_HASH_DIGEST_FAILED); return S2N_SUCCESS; } static int s2n_evp_hash_copy(struct s2n_hash_state *to, struct s2n_hash_state *from) { to->hash_impl = from->hash_impl; to->alg = from->alg; to->is_ready_for_input = from->is_ready_for_input; to->currently_in_hash = from->currently_in_hash; if (from->alg == S2N_HASH_NONE) { return S2N_SUCCESS; } POSIX_ENSURE_REF(to->digest.high_level.evp.ctx); POSIX_GUARD_OSSL(EVP_MD_CTX_copy_ex(to->digest.high_level.evp.ctx, from->digest.high_level.evp.ctx), S2N_ERR_HASH_COPY_FAILED); if (from->alg == S2N_HASH_MD5_SHA1 && s2n_use_custom_md5_sha1()) { POSIX_ENSURE_REF(to->digest.high_level.evp_md5_secondary.ctx); POSIX_GUARD_OSSL(EVP_MD_CTX_copy_ex(to->digest.high_level.evp_md5_secondary.ctx, from->digest.high_level.evp_md5_secondary.ctx), S2N_ERR_HASH_COPY_FAILED); } bool is_md5_allowed_for_fips = false; POSIX_GUARD_RESULT(s2n_digest_is_md5_allowed_for_fips(&from->digest.high_level.evp, &is_md5_allowed_for_fips)); if (is_md5_allowed_for_fips && (from->alg == S2N_HASH_MD5 || from->alg == S2N_HASH_MD5_SHA1)) { POSIX_GUARD(s2n_hash_allow_md5_for_fips(to)); } return S2N_SUCCESS; } static int s2n_evp_hash_reset(struct s2n_hash_state *state) { int reset_md5_for_fips = 0; bool is_md5_allowed_for_fips = false; POSIX_GUARD_RESULT(s2n_digest_is_md5_allowed_for_fips(&state->digest.high_level.evp, &is_md5_allowed_for_fips)); if ((state->alg == S2N_HASH_MD5 || state->alg == S2N_HASH_MD5_SHA1) && is_md5_allowed_for_fips) { reset_md5_for_fips = 1; } POSIX_GUARD_OSSL(S2N_EVP_MD_CTX_RESET(state->digest.high_level.evp.ctx), S2N_ERR_HASH_WIPE_FAILED); if (state->alg == S2N_HASH_MD5_SHA1 && s2n_use_custom_md5_sha1()) { POSIX_GUARD_OSSL(S2N_EVP_MD_CTX_RESET(state->digest.high_level.evp_md5_secondary.ctx), S2N_ERR_HASH_WIPE_FAILED); } if (reset_md5_for_fips) { POSIX_GUARD(s2n_hash_allow_md5_for_fips(state)); } /* hash_init resets the ready_for_input and currently_in_hash fields. */ return s2n_evp_hash_init(state, state->alg); } static int s2n_evp_hash_free(struct s2n_hash_state *state) { S2N_EVP_MD_CTX_FREE(state->digest.high_level.evp.ctx); state->digest.high_level.evp.ctx = NULL; if (s2n_use_custom_md5_sha1()) { S2N_EVP_MD_CTX_FREE(state->digest.high_level.evp_md5_secondary.ctx); state->digest.high_level.evp_md5_secondary.ctx = NULL; } state->is_ready_for_input = 0; return S2N_SUCCESS; } static const struct s2n_hash s2n_low_level_hash = { .alloc = &s2n_low_level_hash_new, .allow_md5_for_fips = NULL, .init = &s2n_low_level_hash_init, .update = &s2n_low_level_hash_update, .digest = &s2n_low_level_hash_digest, .copy = &s2n_low_level_hash_copy, .reset = &s2n_low_level_hash_reset, .free = &s2n_low_level_hash_free, }; static const struct s2n_hash s2n_evp_hash = { .alloc = &s2n_evp_hash_new, .allow_md5_for_fips = &s2n_evp_hash_allow_md5_for_fips, .init = &s2n_evp_hash_init, .update = &s2n_evp_hash_update, .digest = &s2n_evp_hash_digest, .copy = &s2n_evp_hash_copy, .reset = &s2n_evp_hash_reset, .free = &s2n_evp_hash_free, }; static int s2n_hash_set_impl(struct s2n_hash_state *state) { state->hash_impl = &s2n_low_level_hash; if (s2n_use_evp_impl()) { state->hash_impl = &s2n_evp_hash; } return S2N_SUCCESS; } int s2n_hash_new(struct s2n_hash_state *state) { POSIX_ENSURE_REF(state); /* Set hash_impl on initial hash creation. * When in FIPS mode, the EVP API's must be used for hashes. */ POSIX_GUARD(s2n_hash_set_impl(state)); POSIX_ENSURE_REF(state->hash_impl->alloc); POSIX_GUARD(state->hash_impl->alloc(state)); return S2N_SUCCESS; } S2N_RESULT s2n_hash_state_validate(struct s2n_hash_state *state) { RESULT_ENSURE_REF(state); return S2N_RESULT_OK; } int s2n_hash_allow_md5_for_fips(struct s2n_hash_state *state) { POSIX_ENSURE_REF(state); /* Ensure that hash_impl is set, as it may have been reset for s2n_hash_state on s2n_connection_wipe. * When in FIPS mode, the EVP API's must be used for hashes. */ POSIX_GUARD(s2n_hash_set_impl(state)); POSIX_ENSURE_REF(state->hash_impl->allow_md5_for_fips); return state->hash_impl->allow_md5_for_fips(state); } int s2n_hash_init(struct s2n_hash_state *state, s2n_hash_algorithm alg) { POSIX_ENSURE_REF(state); /* Ensure that hash_impl is set, as it may have been reset for s2n_hash_state on s2n_connection_wipe. * When in FIPS mode, the EVP API's must be used for hashes. */ POSIX_GUARD(s2n_hash_set_impl(state)); bool is_md5_allowed_for_fips = false; POSIX_GUARD_RESULT(s2n_digest_is_md5_allowed_for_fips(&state->digest.high_level.evp, &is_md5_allowed_for_fips)); if (s2n_hash_is_available(alg) || ((alg == S2N_HASH_MD5 || alg == S2N_HASH_MD5_SHA1) && is_md5_allowed_for_fips)) { /* s2n will continue to initialize an "unavailable" hash when s2n is in FIPS mode and * FIPS is forcing the hash to be made available. */ POSIX_ENSURE_REF(state->hash_impl->init); return state->hash_impl->init(state, alg); } else { POSIX_BAIL(S2N_ERR_HASH_INVALID_ALGORITHM); } } int s2n_hash_update(struct s2n_hash_state *state, const void *data, uint32_t size) { POSIX_PRECONDITION(s2n_hash_state_validate(state)); POSIX_ENSURE(S2N_MEM_IS_READABLE(data, size), S2N_ERR_PRECONDITION_VIOLATION); POSIX_ENSURE_REF(state->hash_impl->update); return state->hash_impl->update(state, data, size); } int s2n_hash_digest(struct s2n_hash_state *state, void *out, uint32_t size) { POSIX_PRECONDITION(s2n_hash_state_validate(state)); POSIX_ENSURE(S2N_MEM_IS_READABLE(out, size), S2N_ERR_PRECONDITION_VIOLATION); POSIX_ENSURE_REF(state->hash_impl->digest); return state->hash_impl->digest(state, out, size); } int s2n_hash_copy(struct s2n_hash_state *to, struct s2n_hash_state *from) { POSIX_PRECONDITION(s2n_hash_state_validate(to)); POSIX_PRECONDITION(s2n_hash_state_validate(from)); POSIX_ENSURE_REF(from->hash_impl->copy); return from->hash_impl->copy(to, from); } int s2n_hash_reset(struct s2n_hash_state *state) { POSIX_ENSURE_REF(state); /* Ensure that hash_impl is set, as it may have been reset for s2n_hash_state on s2n_connection_wipe. * When in FIPS mode, the EVP API's must be used for hashes. */ POSIX_GUARD(s2n_hash_set_impl(state)); POSIX_ENSURE_REF(state->hash_impl->reset); return state->hash_impl->reset(state); } int s2n_hash_free(struct s2n_hash_state *state) { if (state == NULL) { return S2N_SUCCESS; } /* Ensure that hash_impl is set, as it may have been reset for s2n_hash_state on s2n_connection_wipe. * When in FIPS mode, the EVP API's must be used for hashes. */ POSIX_GUARD(s2n_hash_set_impl(state)); POSIX_ENSURE_REF(state->hash_impl->free); return state->hash_impl->free(state); } int s2n_hash_get_currently_in_hash_total(struct s2n_hash_state *state, uint64_t *out) { POSIX_PRECONDITION(s2n_hash_state_validate(state)); POSIX_ENSURE(S2N_MEM_IS_WRITABLE_CHECK(out, sizeof(*out)), S2N_ERR_PRECONDITION_VIOLATION); POSIX_ENSURE(state->is_ready_for_input, S2N_ERR_HASH_NOT_READY); *out = state->currently_in_hash; return S2N_SUCCESS; } /* Calculate, in constant time, the number of bytes currently in the hash_block */ int s2n_hash_const_time_get_currently_in_hash_block(struct s2n_hash_state *state, uint64_t *out) { POSIX_PRECONDITION(s2n_hash_state_validate(state)); POSIX_ENSURE(S2N_MEM_IS_WRITABLE_CHECK(out, sizeof(*out)), S2N_ERR_PRECONDITION_VIOLATION); POSIX_ENSURE(state->is_ready_for_input, S2N_ERR_HASH_NOT_READY); uint64_t hash_block_size; POSIX_GUARD(s2n_hash_block_size(state->alg, &hash_block_size)); /* Requires that hash_block_size is a power of 2. This is true for all hashes we currently support * If this ever becomes untrue, this would require fixing this*/ *out = state->currently_in_hash & (hash_block_size - 1); return S2N_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_hash.h000066400000000000000000000076631456575232400221340ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #pragma once #include #include #include #include #include "crypto/s2n_evp.h" #define S2N_MAX_DIGEST_LEN SHA512_DIGEST_LENGTH typedef enum { S2N_HASH_NONE = 0, S2N_HASH_MD5, S2N_HASH_SHA1, S2N_HASH_SHA224, S2N_HASH_SHA256, S2N_HASH_SHA384, S2N_HASH_SHA512, S2N_HASH_MD5_SHA1, /* Don't add any hash algorithms below S2N_HASH_SENTINEL */ S2N_HASH_SENTINEL } s2n_hash_algorithm; /* The low_level_digest stores all OpenSSL structs that are alg-specific to be used with OpenSSL's low-level hash API's. */ union s2n_hash_low_level_digest { MD5_CTX md5; SHA_CTX sha1; SHA256_CTX sha224; SHA256_CTX sha256; SHA512_CTX sha384; SHA512_CTX sha512; struct { MD5_CTX md5; SHA_CTX sha1; } md5_sha1; }; /* The evp_digest stores all OpenSSL structs to be used with OpenSSL's EVP hash API's. */ struct s2n_hash_evp_digest { struct s2n_evp_digest evp; /* Always store a secondary evp_digest to allow resetting a hash_state to MD5_SHA1 from another alg. */ struct s2n_evp_digest evp_md5_secondary; }; /* s2n_hash_state stores the s2n_hash implementation being used (low-level or EVP), * the hash algorithm being used at the time, and either low_level or high_level (EVP) OpenSSL digest structs. */ struct s2n_hash_state { const struct s2n_hash *hash_impl; s2n_hash_algorithm alg; uint8_t is_ready_for_input; uint64_t currently_in_hash; union { union s2n_hash_low_level_digest low_level; struct s2n_hash_evp_digest high_level; } digest; }; /* The s2n hash implementation is abstracted to allow for separate implementations, using * either OpenSSL's low-level algorithm-specific API's or OpenSSL's EVP API's. */ struct s2n_hash { int (*alloc)(struct s2n_hash_state *state); int (*allow_md5_for_fips)(struct s2n_hash_state *state); int (*init)(struct s2n_hash_state *state, s2n_hash_algorithm alg); int (*update)(struct s2n_hash_state *state, const void *data, uint32_t size); int (*digest)(struct s2n_hash_state *state, void *out, uint32_t size); int (*copy)(struct s2n_hash_state *to, struct s2n_hash_state *from); int (*reset)(struct s2n_hash_state *state); int (*free)(struct s2n_hash_state *state); }; bool s2n_hash_evp_fully_supported(); const EVP_MD *s2n_hash_alg_to_evp_md(s2n_hash_algorithm alg); int s2n_hash_digest_size(s2n_hash_algorithm alg, uint8_t *out); int s2n_hash_block_size(s2n_hash_algorithm alg, uint64_t *block_size); bool s2n_hash_is_available(s2n_hash_algorithm alg); int s2n_hash_is_ready_for_input(struct s2n_hash_state *state); int s2n_hash_new(struct s2n_hash_state *state); S2N_RESULT s2n_hash_state_validate(struct s2n_hash_state *state); int s2n_hash_allow_md5_for_fips(struct s2n_hash_state *state); int s2n_hash_init(struct s2n_hash_state *state, s2n_hash_algorithm alg); int s2n_hash_update(struct s2n_hash_state *state, const void *data, uint32_t size); int s2n_hash_digest(struct s2n_hash_state *state, void *out, uint32_t size); int s2n_hash_copy(struct s2n_hash_state *to, struct s2n_hash_state *from); int s2n_hash_reset(struct s2n_hash_state *state); int s2n_hash_free(struct s2n_hash_state *state); int s2n_hash_get_currently_in_hash_total(struct s2n_hash_state *state, uint64_t *out); int s2n_hash_const_time_get_currently_in_hash_block(struct s2n_hash_state *state, uint64_t *out); aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_hkdf.c000066400000000000000000000260601456575232400221100ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include "crypto/s2n_hkdf.h" #include "crypto/s2n_fips.h" #include "crypto/s2n_hmac.h" #include "error/s2n_errno.h" #include "stuffer/s2n_stuffer.h" #include "utils/s2n_blob.h" #include "utils/s2n_mem.h" #include "utils/s2n_safety.h" #ifdef S2N_LIBCRYPTO_SUPPORTS_HKDF #include #endif #define MAX_DIGEST_SIZE 64 /* Current highest is SHA512 */ #define MAX_HKDF_ROUNDS 255 /* Reference: RFC 5869 */ struct s2n_hkdf_impl { int (*hkdf)(struct s2n_hmac_state *hmac, s2n_hmac_algorithm alg, const struct s2n_blob *salt, const struct s2n_blob *key, const struct s2n_blob *info, struct s2n_blob *output); int (*hkdf_extract)(struct s2n_hmac_state *hmac, s2n_hmac_algorithm alg, const struct s2n_blob *salt, const struct s2n_blob *key, struct s2n_blob *pseudo_rand_key); int (*hkdf_expand)(struct s2n_hmac_state *hmac, s2n_hmac_algorithm alg, const struct s2n_blob *pseudo_rand_key, const struct s2n_blob *info, struct s2n_blob *output); }; static int s2n_custom_hkdf_extract(struct s2n_hmac_state *hmac, s2n_hmac_algorithm alg, const struct s2n_blob *salt, const struct s2n_blob *key, struct s2n_blob *pseudo_rand_key) { uint8_t hmac_size = 0; POSIX_GUARD(s2n_hmac_digest_size(alg, &hmac_size)); POSIX_ENSURE(hmac_size <= pseudo_rand_key->size, S2N_ERR_HKDF_OUTPUT_SIZE); pseudo_rand_key->size = hmac_size; POSIX_GUARD(s2n_hmac_init(hmac, alg, salt->data, salt->size)); POSIX_GUARD(s2n_hmac_update(hmac, key->data, key->size)); POSIX_GUARD(s2n_hmac_digest(hmac, pseudo_rand_key->data, pseudo_rand_key->size)); POSIX_GUARD(s2n_hmac_reset(hmac)); return S2N_SUCCESS; } static int s2n_custom_hkdf_expand(struct s2n_hmac_state *hmac, s2n_hmac_algorithm alg, const struct s2n_blob *pseudo_rand_key, const struct s2n_blob *info, struct s2n_blob *output) { uint8_t prev[MAX_DIGEST_SIZE] = { 0 }; uint32_t done_len = 0; uint8_t hash_len = 0; POSIX_GUARD(s2n_hmac_digest_size(alg, &hash_len)); POSIX_ENSURE_GT(hash_len, 0); uint32_t total_rounds = output->size / hash_len; if (output->size % hash_len) { total_rounds++; } POSIX_ENSURE(total_rounds > 0, S2N_ERR_HKDF_OUTPUT_SIZE); POSIX_ENSURE(total_rounds <= MAX_HKDF_ROUNDS, S2N_ERR_HKDF_OUTPUT_SIZE); for (uint32_t curr_round = 1; curr_round <= total_rounds; curr_round++) { uint32_t cat_len; POSIX_GUARD(s2n_hmac_init(hmac, alg, pseudo_rand_key->data, pseudo_rand_key->size)); if (curr_round != 1) { POSIX_GUARD(s2n_hmac_update(hmac, prev, hash_len)); } POSIX_GUARD(s2n_hmac_update(hmac, info->data, info->size)); POSIX_GUARD(s2n_hmac_update(hmac, &curr_round, 1)); POSIX_GUARD(s2n_hmac_digest(hmac, prev, hash_len)); cat_len = hash_len; if (done_len + hash_len > output->size) { cat_len = output->size - done_len; } POSIX_CHECKED_MEMCPY(output->data + done_len, prev, cat_len); done_len += cat_len; POSIX_GUARD(s2n_hmac_reset(hmac)); } return S2N_SUCCESS; } static int s2n_custom_hkdf(struct s2n_hmac_state *hmac, s2n_hmac_algorithm alg, const struct s2n_blob *salt, const struct s2n_blob *key, const struct s2n_blob *info, struct s2n_blob *output) { uint8_t prk_pad[MAX_DIGEST_SIZE] = { 0 }; struct s2n_blob pseudo_rand_key = { 0 }; POSIX_GUARD(s2n_blob_init(&pseudo_rand_key, prk_pad, sizeof(prk_pad))); POSIX_GUARD(s2n_custom_hkdf_extract(hmac, alg, salt, key, &pseudo_rand_key)); POSIX_GUARD(s2n_custom_hkdf_expand(hmac, alg, &pseudo_rand_key, info, output)); return S2N_SUCCESS; } const struct s2n_hkdf_impl s2n_custom_hkdf_impl = { .hkdf = &s2n_custom_hkdf, .hkdf_extract = &s2n_custom_hkdf_extract, .hkdf_expand = &s2n_custom_hkdf_expand, }; #ifdef S2N_LIBCRYPTO_SUPPORTS_HKDF static int s2n_libcrypto_hkdf_extract(struct s2n_hmac_state *hmac, s2n_hmac_algorithm alg, const struct s2n_blob *salt, const struct s2n_blob *key, struct s2n_blob *pseudo_rand_key) { const EVP_MD *digest = NULL; POSIX_GUARD_RESULT(s2n_hmac_md_from_alg(alg, &digest)); /* The out_len argument of HKDF_extract is set to the number of bytes written to out_key, and * is not used to ensure that out_key is large enough to contain the PRK. Ensure that the PRK * output will fit in the blob. */ uint8_t hmac_size = 0; POSIX_GUARD(s2n_hmac_digest_size(alg, &hmac_size)); POSIX_ENSURE(hmac_size <= pseudo_rand_key->size, S2N_ERR_HKDF_OUTPUT_SIZE); size_t bytes_written = 0; POSIX_GUARD_OSSL(HKDF_extract(pseudo_rand_key->data, &bytes_written, digest, key->data, key->size, salt->data, salt->size), S2N_ERR_HKDF); /* HKDF_extract updates the out_len argument based on the digest size. Update the blob's size based on this. */ POSIX_ENSURE_LTE(bytes_written, pseudo_rand_key->size); pseudo_rand_key->size = bytes_written; return S2N_SUCCESS; } static int s2n_libcrypto_hkdf_expand(struct s2n_hmac_state *hmac, s2n_hmac_algorithm alg, const struct s2n_blob *pseudo_rand_key, const struct s2n_blob *info, struct s2n_blob *output) { POSIX_ENSURE(output->size > 0, S2N_ERR_HKDF_OUTPUT_SIZE); const EVP_MD *digest = NULL; POSIX_GUARD_RESULT(s2n_hmac_md_from_alg(alg, &digest)); POSIX_GUARD_OSSL(HKDF_expand(output->data, output->size, digest, pseudo_rand_key->data, pseudo_rand_key->size, info->data, info->size), S2N_ERR_HKDF); return S2N_SUCCESS; } static int s2n_libcrypto_hkdf(struct s2n_hmac_state *hmac, s2n_hmac_algorithm alg, const struct s2n_blob *salt, const struct s2n_blob *key, const struct s2n_blob *info, struct s2n_blob *output) { POSIX_ENSURE(output->size > 0, S2N_ERR_HKDF_OUTPUT_SIZE); const EVP_MD *digest = NULL; POSIX_GUARD_RESULT(s2n_hmac_md_from_alg(alg, &digest)); POSIX_GUARD_OSSL(HKDF(output->data, output->size, digest, key->data, key->size, salt->data, salt->size, info->data, info->size), S2N_ERR_HKDF); return S2N_SUCCESS; } #else static int s2n_libcrypto_hkdf_extract(struct s2n_hmac_state *hmac, s2n_hmac_algorithm alg, const struct s2n_blob *salt, const struct s2n_blob *key, struct s2n_blob *pseudo_rand_key) { POSIX_BAIL(S2N_ERR_UNIMPLEMENTED); } static int s2n_libcrypto_hkdf_expand(struct s2n_hmac_state *hmac, s2n_hmac_algorithm alg, const struct s2n_blob *pseudo_rand_key, const struct s2n_blob *info, struct s2n_blob *output) { POSIX_BAIL(S2N_ERR_UNIMPLEMENTED); } static int s2n_libcrypto_hkdf(struct s2n_hmac_state *hmac, s2n_hmac_algorithm alg, const struct s2n_blob *salt, const struct s2n_blob *key, const struct s2n_blob *info, struct s2n_blob *output) { POSIX_BAIL(S2N_ERR_UNIMPLEMENTED); } #endif /* S2N_LIBCRYPTO_SUPPORTS_HKDF */ const struct s2n_hkdf_impl s2n_libcrypto_hkdf_impl = { .hkdf = &s2n_libcrypto_hkdf, .hkdf_extract = &s2n_libcrypto_hkdf_extract, .hkdf_expand = &s2n_libcrypto_hkdf_expand, }; static const struct s2n_hkdf_impl *s2n_get_hkdf_implementation() { /* By default, s2n-tls uses a custom HKDF implementation. When operating in FIPS mode, the * FIPS-validated libcrypto implementation is used instead, if an implementation is provided. */ if (s2n_is_in_fips_mode() && s2n_libcrypto_supports_hkdf()) { return &s2n_libcrypto_hkdf_impl; } return &s2n_custom_hkdf_impl; } int s2n_hkdf_extract(struct s2n_hmac_state *hmac, s2n_hmac_algorithm alg, const struct s2n_blob *salt, const struct s2n_blob *key, struct s2n_blob *pseudo_rand_key) { POSIX_ENSURE_REF(hmac); POSIX_ENSURE_REF(salt); POSIX_ENSURE_REF(key); POSIX_ENSURE_REF(pseudo_rand_key); const struct s2n_hkdf_impl *hkdf_implementation = s2n_get_hkdf_implementation(); POSIX_ENSURE_REF(hkdf_implementation); POSIX_GUARD(hkdf_implementation->hkdf_extract(hmac, alg, salt, key, pseudo_rand_key)); return S2N_SUCCESS; } static int s2n_hkdf_expand(struct s2n_hmac_state *hmac, s2n_hmac_algorithm alg, const struct s2n_blob *pseudo_rand_key, const struct s2n_blob *info, struct s2n_blob *output) { POSIX_ENSURE_REF(hmac); POSIX_ENSURE_REF(pseudo_rand_key); POSIX_ENSURE_REF(info); POSIX_ENSURE_REF(output); const struct s2n_hkdf_impl *hkdf_implementation = s2n_get_hkdf_implementation(); POSIX_ENSURE_REF(hkdf_implementation); POSIX_GUARD(hkdf_implementation->hkdf_expand(hmac, alg, pseudo_rand_key, info, output)); return S2N_SUCCESS; } int s2n_hkdf_expand_label(struct s2n_hmac_state *hmac, s2n_hmac_algorithm alg, const struct s2n_blob *secret, const struct s2n_blob *label, const struct s2n_blob *context, struct s2n_blob *output) { POSIX_ENSURE_REF(label); POSIX_ENSURE_REF(context); POSIX_ENSURE_REF(output); /* Per RFC8446: 7.1, a HKDF label is a 2 byte length field, and two 1...255 byte arrays with a one byte length field each. */ uint8_t hkdf_label_buf[2 + 256 + 256]; struct s2n_blob hkdf_label_blob = { 0 }; struct s2n_stuffer hkdf_label = { 0 }; POSIX_ENSURE_LTE(label->size, S2N_MAX_HKDF_EXPAND_LABEL_LENGTH); POSIX_GUARD(s2n_blob_init(&hkdf_label_blob, hkdf_label_buf, sizeof(hkdf_label_buf))); POSIX_GUARD(s2n_stuffer_init(&hkdf_label, &hkdf_label_blob)); POSIX_GUARD(s2n_stuffer_write_uint16(&hkdf_label, output->size)); POSIX_GUARD(s2n_stuffer_write_uint8(&hkdf_label, label->size + sizeof("tls13 ") - 1)); POSIX_GUARD(s2n_stuffer_write_str(&hkdf_label, "tls13 ")); POSIX_GUARD(s2n_stuffer_write(&hkdf_label, label)); POSIX_GUARD(s2n_stuffer_write_uint8(&hkdf_label, context->size)); POSIX_GUARD(s2n_stuffer_write(&hkdf_label, context)); hkdf_label_blob.size = s2n_stuffer_data_available(&hkdf_label); POSIX_GUARD(s2n_hkdf_expand(hmac, alg, secret, &hkdf_label_blob, output)); return S2N_SUCCESS; } int s2n_hkdf(struct s2n_hmac_state *hmac, s2n_hmac_algorithm alg, const struct s2n_blob *salt, const struct s2n_blob *key, const struct s2n_blob *info, struct s2n_blob *output) { POSIX_ENSURE_REF(hmac); POSIX_ENSURE_REF(salt); POSIX_ENSURE_REF(key); POSIX_ENSURE_REF(info); POSIX_ENSURE_REF(output); const struct s2n_hkdf_impl *hkdf_implementation = s2n_get_hkdf_implementation(); POSIX_ENSURE_REF(hkdf_implementation); POSIX_GUARD(hkdf_implementation->hkdf(hmac, alg, salt, key, info, output)); return S2N_SUCCESS; } bool s2n_libcrypto_supports_hkdf() { #ifdef S2N_LIBCRYPTO_SUPPORTS_HKDF return true; #else return false; #endif } aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_hkdf.h000066400000000000000000000032141456575232400221110ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #pragma once #include #include "crypto/s2n_hmac.h" #include "utils/s2n_blob.h" /* * Label structure is `opaque label<7..255> = "tls13 " + Label` per RFC8446. * So, we have 255-sizeof("tls13 ") = 249, the maximum label length. * * Note that all labels defined by RFC 8446 are <12 characters, which * avoids an extra hash iteration. However, the exporter functionality * (s2n_connection_tls_exporter) allows for longer labels. */ #define S2N_MAX_HKDF_EXPAND_LABEL_LENGTH 249 int s2n_hkdf(struct s2n_hmac_state *hmac, s2n_hmac_algorithm alg, const struct s2n_blob *salt, const struct s2n_blob *key, const struct s2n_blob *info, struct s2n_blob *output); int s2n_hkdf_extract(struct s2n_hmac_state *hmac, s2n_hmac_algorithm alg, const struct s2n_blob *salt, const struct s2n_blob *key, struct s2n_blob *pseudo_rand_key); int s2n_hkdf_expand_label(struct s2n_hmac_state *hmac, s2n_hmac_algorithm alg, const struct s2n_blob *secret, const struct s2n_blob *label, const struct s2n_blob *context, struct s2n_blob *output); bool s2n_libcrypto_supports_hkdf(); aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_hmac.c000066400000000000000000000404011456575232400220770ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ /* this file is patched by Sidetrail, clang-format invalidates patches */ /* clang-format off */ #include #include #include "error/s2n_errno.h" #include "crypto/s2n_hmac.h" #include "crypto/s2n_hash.h" #include "crypto/s2n_fips.h" #include "utils/s2n_safety.h" #include "utils/s2n_blob.h" #include "utils/s2n_mem.h" #include int s2n_hash_hmac_alg(s2n_hash_algorithm hash_alg, s2n_hmac_algorithm *out) { POSIX_ENSURE(S2N_MEM_IS_WRITABLE_CHECK(out, sizeof(*out)), S2N_ERR_PRECONDITION_VIOLATION); switch(hash_alg) { case S2N_HASH_NONE: *out = S2N_HMAC_NONE; break; case S2N_HASH_MD5: *out = S2N_HMAC_MD5; break; case S2N_HASH_SHA1: *out = S2N_HMAC_SHA1; break; case S2N_HASH_SHA224: *out = S2N_HMAC_SHA224; break; case S2N_HASH_SHA256: *out = S2N_HMAC_SHA256; break; case S2N_HASH_SHA384: *out = S2N_HMAC_SHA384; break; case S2N_HASH_SHA512: *out = S2N_HMAC_SHA512; break; case S2N_HASH_MD5_SHA1: /* Fall through ... */ default: POSIX_BAIL(S2N_ERR_HASH_INVALID_ALGORITHM); } return S2N_SUCCESS; } int s2n_hmac_hash_alg(s2n_hmac_algorithm hmac_alg, s2n_hash_algorithm *out) { POSIX_ENSURE(S2N_MEM_IS_WRITABLE_CHECK(out, sizeof(*out)), S2N_ERR_PRECONDITION_VIOLATION); switch(hmac_alg) { case S2N_HMAC_NONE: *out = S2N_HASH_NONE; break; case S2N_HMAC_MD5: *out = S2N_HASH_MD5; break; case S2N_HMAC_SHA1: *out = S2N_HASH_SHA1; break; case S2N_HMAC_SHA224: *out = S2N_HASH_SHA224; break; case S2N_HMAC_SHA256: *out = S2N_HASH_SHA256; break; case S2N_HMAC_SHA384: *out = S2N_HASH_SHA384; break; case S2N_HMAC_SHA512: *out = S2N_HASH_SHA512; break; case S2N_HMAC_SSLv3_MD5: *out = S2N_HASH_MD5; break; case S2N_HMAC_SSLv3_SHA1: *out = S2N_HASH_SHA1; break; default: POSIX_BAIL(S2N_ERR_HMAC_INVALID_ALGORITHM); } return S2N_SUCCESS; } int s2n_hmac_digest_size(s2n_hmac_algorithm hmac_alg, uint8_t *out) { s2n_hash_algorithm hash_alg; POSIX_GUARD(s2n_hmac_hash_alg(hmac_alg, &hash_alg)); POSIX_GUARD(s2n_hash_digest_size(hash_alg, out)); return S2N_SUCCESS; } /* Return 1 if hmac algorithm is available, 0 otherwise. */ bool s2n_hmac_is_available(s2n_hmac_algorithm hmac_alg) { switch(hmac_alg) { case S2N_HMAC_MD5: case S2N_HMAC_SSLv3_MD5: case S2N_HMAC_SSLv3_SHA1: /* Some libcryptos, such as OpenSSL, disable MD5 by default when in FIPS mode, which is * required in order to negotiate SSLv3. However, this is supported in AWS-LC. */ return !s2n_is_in_fips_mode() || s2n_libcrypto_is_awslc(); case S2N_HMAC_NONE: case S2N_HMAC_SHA1: case S2N_HMAC_SHA224: case S2N_HMAC_SHA256: case S2N_HMAC_SHA384: case S2N_HMAC_SHA512: return true; } return false; } static int s2n_sslv3_mac_init(struct s2n_hmac_state *state, s2n_hmac_algorithm alg, const void *key, uint32_t klen) { for (int i = 0; i < state->xor_pad_size; i++) { state->xor_pad[i] = 0x36; } POSIX_GUARD(s2n_hash_update(&state->inner_just_key, key, klen)); POSIX_GUARD(s2n_hash_update(&state->inner_just_key, state->xor_pad, state->xor_pad_size)); for (int i = 0; i < state->xor_pad_size; i++) { state->xor_pad[i] = 0x5c; } POSIX_GUARD(s2n_hash_update(&state->outer_just_key, key, klen)); POSIX_GUARD(s2n_hash_update(&state->outer_just_key, state->xor_pad, state->xor_pad_size)); return S2N_SUCCESS; } static int s2n_tls_hmac_init(struct s2n_hmac_state *state, s2n_hmac_algorithm alg, const void *key, uint32_t klen) { memset(&state->xor_pad, 0, sizeof(state->xor_pad)); if (klen > state->xor_pad_size) { POSIX_GUARD(s2n_hash_update(&state->outer, key, klen)); POSIX_GUARD(s2n_hash_digest(&state->outer, state->digest_pad, state->digest_size)); POSIX_CHECKED_MEMCPY(state->xor_pad, state->digest_pad, state->digest_size); } else { POSIX_CHECKED_MEMCPY(state->xor_pad, key, klen); } for (int i = 0; i < state->xor_pad_size; i++) { state->xor_pad[i] ^= 0x36; } POSIX_GUARD(s2n_hash_update(&state->inner_just_key, state->xor_pad, state->xor_pad_size)); /* 0x36 xor 0x5c == 0x6a */ for (int i = 0; i < state->xor_pad_size; i++) { state->xor_pad[i] ^= 0x6a; } POSIX_GUARD(s2n_hash_update(&state->outer_just_key, state->xor_pad, state->xor_pad_size)); return S2N_SUCCESS; } int s2n_hmac_xor_pad_size(s2n_hmac_algorithm hmac_alg, uint16_t *xor_pad_size) { POSIX_ENSURE(S2N_MEM_IS_WRITABLE_CHECK(xor_pad_size, sizeof(*xor_pad_size)), S2N_ERR_PRECONDITION_VIOLATION); switch(hmac_alg) { case S2N_HMAC_NONE: *xor_pad_size = 64; break; case S2N_HMAC_MD5: *xor_pad_size = 64; break; case S2N_HMAC_SHA1: *xor_pad_size = 64; break; case S2N_HMAC_SHA224: *xor_pad_size = 64; break; case S2N_HMAC_SHA256: *xor_pad_size = 64; break; case S2N_HMAC_SHA384: *xor_pad_size = 128; break; case S2N_HMAC_SHA512: *xor_pad_size = 128; break; case S2N_HMAC_SSLv3_MD5: *xor_pad_size = 48; break; case S2N_HMAC_SSLv3_SHA1: *xor_pad_size = 40; break; default: POSIX_BAIL(S2N_ERR_HMAC_INVALID_ALGORITHM); } return S2N_SUCCESS; } int s2n_hmac_hash_block_size(s2n_hmac_algorithm hmac_alg, uint16_t *block_size) { POSIX_ENSURE(S2N_MEM_IS_WRITABLE_CHECK(block_size, sizeof(*block_size)), S2N_ERR_PRECONDITION_VIOLATION); switch(hmac_alg) { case S2N_HMAC_NONE: *block_size = 64; break; case S2N_HMAC_MD5: *block_size = 64; break; case S2N_HMAC_SHA1: *block_size = 64; break; case S2N_HMAC_SHA224: *block_size = 64; break; case S2N_HMAC_SHA256: *block_size = 64; break; case S2N_HMAC_SHA384: *block_size = 128; break; case S2N_HMAC_SHA512: *block_size = 128; break; case S2N_HMAC_SSLv3_MD5: *block_size = 64; break; case S2N_HMAC_SSLv3_SHA1: *block_size = 64; break; default: POSIX_BAIL(S2N_ERR_HMAC_INVALID_ALGORITHM); } return S2N_SUCCESS; } int s2n_hmac_new(struct s2n_hmac_state *state) { POSIX_ENSURE_REF(state); POSIX_GUARD(s2n_hash_new(&state->inner)); POSIX_GUARD(s2n_hash_new(&state->inner_just_key)); POSIX_GUARD(s2n_hash_new(&state->outer)); POSIX_GUARD(s2n_hash_new(&state->outer_just_key)); POSIX_POSTCONDITION(s2n_hmac_state_validate(state)); return S2N_SUCCESS; } S2N_RESULT s2n_hmac_state_validate(struct s2n_hmac_state *state) { RESULT_ENSURE_REF(state); RESULT_GUARD(s2n_hash_state_validate(&state->inner)); RESULT_GUARD(s2n_hash_state_validate(&state->inner_just_key)); RESULT_GUARD(s2n_hash_state_validate(&state->outer)); RESULT_GUARD(s2n_hash_state_validate(&state->outer_just_key)); return S2N_RESULT_OK; } int s2n_hmac_init(struct s2n_hmac_state *state, s2n_hmac_algorithm alg, const void *key, uint32_t klen) { POSIX_ENSURE_REF(state); if (!s2n_hmac_is_available(alg)) { /* Prevent hmacs from being used if they are not available. */ POSIX_BAIL(S2N_ERR_HMAC_INVALID_ALGORITHM); } state->alg = alg; POSIX_GUARD(s2n_hmac_hash_block_size(alg, &state->hash_block_size)); state->currently_in_hash_block = 0; POSIX_GUARD(s2n_hmac_xor_pad_size(alg, &state->xor_pad_size)); POSIX_GUARD(s2n_hmac_digest_size(alg, &state->digest_size)); POSIX_ENSURE_GTE(sizeof(state->xor_pad), state->xor_pad_size); POSIX_ENSURE_GTE(sizeof(state->digest_pad), state->digest_size); /* key needs to be as large as the biggest block size */ POSIX_ENSURE_GTE(sizeof(state->xor_pad), state->hash_block_size); s2n_hash_algorithm hash_alg; POSIX_GUARD(s2n_hmac_hash_alg(alg, &hash_alg)); POSIX_GUARD(s2n_hash_init(&state->inner, hash_alg)); POSIX_GUARD(s2n_hash_init(&state->inner_just_key, hash_alg)); POSIX_GUARD(s2n_hash_init(&state->outer, hash_alg)); POSIX_GUARD(s2n_hash_init(&state->outer_just_key, hash_alg)); if (alg == S2N_HMAC_SSLv3_SHA1 || alg == S2N_HMAC_SSLv3_MD5) { POSIX_GUARD(s2n_sslv3_mac_init(state, alg, key, klen)); } else { POSIX_GUARD(s2n_tls_hmac_init(state, alg, key, klen)); } /* Once we have produced inner_just_key and outer_just_key, don't need the key material in xor_pad, so wipe it. * Since xor_pad is used as a source of bytes in s2n_hmac_digest_two_compression_rounds, * this also prevents uninitilized bytes being used. */ memset(&state->xor_pad, 0, sizeof(state->xor_pad)); POSIX_GUARD(s2n_hmac_reset(state)); return S2N_SUCCESS; } int s2n_hmac_update(struct s2n_hmac_state *state, const void *in, uint32_t size) { POSIX_PRECONDITION(s2n_hmac_state_validate(state)); POSIX_ENSURE(state->hash_block_size != 0, S2N_ERR_PRECONDITION_VIOLATION); /* Keep track of how much of the current hash block is full * * Why the 4294949760 constant in this code? 4294949760 is the highest 32-bit * value that is congruent to 0 modulo all of our HMAC block sizes, that is also * at least 16k smaller than 2^32. It therefore has no effect on the mathematical * result, and no valid record size can cause it to overflow. * * The value was found with the following python code; * * x = (2 ** 32) - (2 ** 14) * while True: * if x % 40 | x % 48 | x % 64 | x % 128 == 0: * break * x -= 1 * print x * * What it does do however is ensure that the mod operation takes a * constant number of instruction cycles, regardless of the size of the * input. On some platforms, including Intel, the operation can take a * smaller number of cycles if the input is "small". */ const uint32_t HIGHEST_32_BIT = 4294949760; POSIX_ENSURE(size <= (UINT32_MAX - HIGHEST_32_BIT), S2N_ERR_INTEGER_OVERFLOW); uint32_t value = (HIGHEST_32_BIT + size) % state->hash_block_size; POSIX_GUARD(s2n_add_overflow(state->currently_in_hash_block, value, &state->currently_in_hash_block)); state->currently_in_hash_block %= state->hash_block_size; return s2n_hash_update(&state->inner, in, size); } int s2n_hmac_digest(struct s2n_hmac_state *state, void *out, uint32_t size) { POSIX_PRECONDITION(s2n_hmac_state_validate(state)); POSIX_GUARD(s2n_hash_digest(&state->inner, state->digest_pad, state->digest_size)); POSIX_GUARD(s2n_hash_copy(&state->outer, &state->outer_just_key)); POSIX_GUARD(s2n_hash_update(&state->outer, state->digest_pad, state->digest_size)); return s2n_hash_digest(&state->outer, out, size); } int s2n_hmac_digest_two_compression_rounds(struct s2n_hmac_state *state, void *out, uint32_t size) { /* Do the "real" work of this function. */ POSIX_GUARD(s2n_hmac_digest(state, out, size)); /* If there were 9 or more bytes of space left in the current hash block * then the serialized length, plus an 0x80 byte, will have fit in that block. * If there were fewer than 9 then adding the length will have caused an extra * compression block round. This digest function always does two compression rounds, * even if there is no need for the second. * * 17 bytes if the block size is 128. */ const uint8_t space_left = (state->hash_block_size == 128) ? 17 : 9; if ((int64_t)state->currently_in_hash_block > (state->hash_block_size - space_left)) { return S2N_SUCCESS; } /* Can't reuse a hash after it has been finalized, so reset and push another block in */ POSIX_GUARD(s2n_hash_reset(&state->inner)); /* No-op s2n_hash_update to normalize timing and guard against Lucky13. This does not affect the value of *out. */ return s2n_hash_update(&state->inner, state->xor_pad, state->hash_block_size); } int s2n_hmac_free(struct s2n_hmac_state *state) { if (state) { POSIX_GUARD(s2n_hash_free(&state->inner)); POSIX_GUARD(s2n_hash_free(&state->inner_just_key)); POSIX_GUARD(s2n_hash_free(&state->outer)); POSIX_GUARD(s2n_hash_free(&state->outer_just_key)); } return S2N_SUCCESS; } int s2n_hmac_reset(struct s2n_hmac_state *state) { POSIX_PRECONDITION(s2n_hmac_state_validate(state)); POSIX_ENSURE(state->hash_block_size != 0, S2N_ERR_PRECONDITION_VIOLATION); POSIX_GUARD(s2n_hash_copy(&state->inner, &state->inner_just_key)); uint64_t bytes_in_hash; POSIX_GUARD(s2n_hash_get_currently_in_hash_total(&state->inner, &bytes_in_hash)); bytes_in_hash %= state->hash_block_size; POSIX_ENSURE(bytes_in_hash <= UINT32_MAX, S2N_ERR_INTEGER_OVERFLOW); /* The length of the key is not private, so don't need to do tricky math here */ state->currently_in_hash_block = bytes_in_hash; return S2N_SUCCESS; } int s2n_hmac_digest_verify(const void *a, const void *b, uint32_t len) { return S2N_SUCCESS - !s2n_constant_time_equals(a, b, len); } int s2n_hmac_copy(struct s2n_hmac_state *to, struct s2n_hmac_state *from) { POSIX_PRECONDITION(s2n_hmac_state_validate(to)); POSIX_PRECONDITION(s2n_hmac_state_validate(from)); /* memcpy cannot be used on s2n_hmac_state as the underlying s2n_hash implementation's * copy must be used. This is enforced when the s2n_hash implementation is s2n_evp_hash. */ to->alg = from->alg; to->hash_block_size = from->hash_block_size; to->currently_in_hash_block = from->currently_in_hash_block; to->xor_pad_size = from->xor_pad_size; to->digest_size = from->digest_size; POSIX_GUARD(s2n_hash_copy(&to->inner, &from->inner)); POSIX_GUARD(s2n_hash_copy(&to->inner_just_key, &from->inner_just_key)); POSIX_GUARD(s2n_hash_copy(&to->outer, &from->outer)); POSIX_GUARD(s2n_hash_copy(&to->outer_just_key, &from->outer_just_key)); POSIX_CHECKED_MEMCPY(to->xor_pad, from->xor_pad, sizeof(to->xor_pad)); POSIX_CHECKED_MEMCPY(to->digest_pad, from->digest_pad, sizeof(to->digest_pad)); POSIX_POSTCONDITION(s2n_hmac_state_validate(to)); POSIX_POSTCONDITION(s2n_hmac_state_validate(from)); return S2N_SUCCESS; } /* Preserve the handlers for hmac state pointers to avoid re-allocation * Only valid if the HMAC is in EVP mode */ int s2n_hmac_save_evp_hash_state(struct s2n_hmac_evp_backup* backup, struct s2n_hmac_state* hmac) { POSIX_ENSURE_REF(backup); POSIX_PRECONDITION(s2n_hmac_state_validate(hmac)); backup->inner = hmac->inner.digest.high_level; backup->inner_just_key = hmac->inner_just_key.digest.high_level; backup->outer = hmac->outer.digest.high_level; backup->outer_just_key = hmac->outer_just_key.digest.high_level; return S2N_SUCCESS; } int s2n_hmac_restore_evp_hash_state(struct s2n_hmac_evp_backup* backup, struct s2n_hmac_state* hmac) { POSIX_ENSURE_REF(backup); POSIX_PRECONDITION(s2n_hmac_state_validate(hmac)); hmac->inner.digest.high_level = backup->inner; hmac->inner_just_key.digest.high_level = backup->inner_just_key; hmac->outer.digest.high_level = backup->outer; hmac->outer_just_key.digest.high_level = backup->outer_just_key; POSIX_POSTCONDITION(s2n_hmac_state_validate(hmac)); return S2N_SUCCESS; } S2N_RESULT s2n_hmac_md_from_alg(s2n_hmac_algorithm alg, const EVP_MD **md) { RESULT_ENSURE_REF(md); switch (alg) { case S2N_HMAC_SSLv3_MD5: case S2N_HMAC_MD5: *md = EVP_md5(); break; case S2N_HMAC_SSLv3_SHA1: case S2N_HMAC_SHA1: *md = EVP_sha1(); break; case S2N_HMAC_SHA224: *md = EVP_sha224(); break; case S2N_HMAC_SHA256: *md = EVP_sha256(); break; case S2N_HMAC_SHA384: *md = EVP_sha384(); break; case S2N_HMAC_SHA512: *md = EVP_sha512(); break; default: RESULT_BAIL(S2N_ERR_P_HASH_INVALID_ALGORITHM); } return S2N_RESULT_OK; } aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_hmac.h000066400000000000000000000055761456575232400221220ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ /* this file is patched by sidetrail, clang-format invalidates patches */ /* clang-format off */ #pragma once #include #include "crypto/s2n_hash.h" typedef enum { S2N_HMAC_NONE, S2N_HMAC_MD5, S2N_HMAC_SHA1, S2N_HMAC_SHA224, S2N_HMAC_SHA256, S2N_HMAC_SHA384, S2N_HMAC_SHA512, S2N_HMAC_SSLv3_MD5, S2N_HMAC_SSLv3_SHA1 } s2n_hmac_algorithm; struct s2n_hmac_state { s2n_hmac_algorithm alg; uint16_t hash_block_size; uint32_t currently_in_hash_block; uint16_t xor_pad_size; uint8_t digest_size; struct s2n_hash_state inner; struct s2n_hash_state inner_just_key; struct s2n_hash_state outer; struct s2n_hash_state outer_just_key; /* key needs to be as large as the biggest block size */ uint8_t xor_pad[128]; /* For storing the inner digest */ uint8_t digest_pad[SHA512_DIGEST_LENGTH]; }; struct s2n_hmac_evp_backup { struct s2n_hash_evp_digest inner; struct s2n_hash_evp_digest inner_just_key; struct s2n_hash_evp_digest outer; struct s2n_hash_evp_digest outer_just_key; }; int s2n_hmac_digest_size(s2n_hmac_algorithm alg, uint8_t *out); bool s2n_hmac_is_available(s2n_hmac_algorithm alg); int s2n_hmac_hash_alg(s2n_hmac_algorithm hmac_alg, s2n_hash_algorithm *out); int s2n_hash_hmac_alg(s2n_hash_algorithm hash_alg, s2n_hmac_algorithm *out); int s2n_hmac_new(struct s2n_hmac_state *state); S2N_RESULT s2n_hmac_state_validate(struct s2n_hmac_state *state); int s2n_hmac_init(struct s2n_hmac_state *state, s2n_hmac_algorithm alg, const void *key, uint32_t klen); int s2n_hmac_update(struct s2n_hmac_state *state, const void *in, uint32_t size); int s2n_hmac_digest(struct s2n_hmac_state *state, void *out, uint32_t size); int s2n_hmac_digest_two_compression_rounds(struct s2n_hmac_state *state, void *out, uint32_t size); int s2n_hmac_digest_verify(const void *a, const void *b, uint32_t len); int s2n_hmac_free(struct s2n_hmac_state *state); int s2n_hmac_reset(struct s2n_hmac_state *state); int s2n_hmac_copy(struct s2n_hmac_state *to, struct s2n_hmac_state *from); int s2n_hmac_save_evp_hash_state(struct s2n_hmac_evp_backup* backup, struct s2n_hmac_state* hmac); int s2n_hmac_restore_evp_hash_state(struct s2n_hmac_evp_backup* backup, struct s2n_hmac_state* hmac); S2N_RESULT s2n_hmac_md_from_alg(s2n_hmac_algorithm alg, const EVP_MD **md); aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_ktls_crypto.h000066400000000000000000000036101456575232400235520ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #pragma once #include "utils/s2n_blob.h" /* clang-format off */ #if defined(S2N_KTLS_SUPPORTED) #include typedef struct tls12_crypto_info_aes_gcm_128 s2n_ktls_crypto_info_tls12_aes_gcm_128; typedef struct tls12_crypto_info_aes_gcm_256 s2n_ktls_crypto_info_tls12_aes_gcm_256; #else #define TLS_1_2_VERSION 0 #define TLS_1_3_VERSION 0 #define TLS_CIPHER_AES_GCM_128 0 typedef struct s2n_ktls_crypto_info_stub s2n_ktls_crypto_info_tls12_aes_gcm_128; #define TLS_CIPHER_AES_GCM_256 0 typedef struct s2n_ktls_crypto_info_stub s2n_ktls_crypto_info_tls12_aes_gcm_256; #endif /* clang-format on */ /* To avoid compile-time errors, this must contain every field that we reference * from any crypto_info. However, it is only a placeholder-- it should never * actually be used. */ struct s2n_ktls_crypto_info_stub { struct { uint8_t version; uint8_t cipher_type; } info; uint8_t iv[1]; uint8_t key[1]; uint8_t salt[1]; uint8_t rec_seq[1]; }; struct s2n_ktls_crypto_info { struct s2n_blob value; union { s2n_ktls_crypto_info_tls12_aes_gcm_128 aes_gcm_128; s2n_ktls_crypto_info_tls12_aes_gcm_256 aes_gcm_256; } ciphers; }; struct s2n_ktls_crypto_info_inputs { struct s2n_blob iv; struct s2n_blob key; struct s2n_blob seq; }; aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_kyber_evp.c000066400000000000000000000107521456575232400231630ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include #include #include "crypto/s2n_pq.h" #include "error/s2n_errno.h" #include "tls/s2n_kem.h" #include "utils/s2n_safety.h" #include "utils/s2n_safety_macros.h" #if defined(S2N_LIBCRYPTO_SUPPORTS_KYBER) DEFINE_POINTER_CLEANUP_FUNC(EVP_PKEY *, EVP_PKEY_free); DEFINE_POINTER_CLEANUP_FUNC(EVP_PKEY_CTX *, EVP_PKEY_CTX_free); int s2n_kyber_evp_generate_keypair(IN const struct s2n_kem *kem, OUT uint8_t *public_key, OUT uint8_t *secret_key) { DEFER_CLEANUP(EVP_PKEY_CTX *kyber_pkey_ctx = EVP_PKEY_CTX_new_id(EVP_PKEY_KEM, NULL), EVP_PKEY_CTX_free_pointer); POSIX_GUARD_PTR(kyber_pkey_ctx); POSIX_GUARD_OSSL(EVP_PKEY_CTX_kem_set_params(kyber_pkey_ctx, kem->kem_nid), S2N_ERR_PQ_CRYPTO); POSIX_GUARD_OSSL(EVP_PKEY_keygen_init(kyber_pkey_ctx), S2N_ERR_PQ_CRYPTO); DEFER_CLEANUP(EVP_PKEY *kyber_pkey = NULL, EVP_PKEY_free_pointer); POSIX_GUARD_OSSL(EVP_PKEY_keygen(kyber_pkey_ctx, &kyber_pkey), S2N_ERR_PQ_CRYPTO); POSIX_GUARD_PTR(kyber_pkey); size_t public_key_size = kem->public_key_length; POSIX_GUARD_OSSL(EVP_PKEY_get_raw_public_key(kyber_pkey, public_key, &public_key_size), S2N_ERR_PQ_CRYPTO); POSIX_ENSURE_EQ(kem->public_key_length, public_key_size); size_t private_key_size = kem->private_key_length; POSIX_GUARD_OSSL(EVP_PKEY_get_raw_private_key(kyber_pkey, secret_key, &private_key_size), S2N_ERR_PQ_CRYPTO); POSIX_ENSURE_EQ(kem->private_key_length, private_key_size); return S2N_SUCCESS; } int s2n_kyber_evp_encapsulate(IN const struct s2n_kem *kem, OUT uint8_t *ciphertext, OUT uint8_t *shared_secret, IN const uint8_t *public_key) { DEFER_CLEANUP(EVP_PKEY *kyber_pkey = EVP_PKEY_kem_new_raw_public_key(kem->kem_nid, public_key, kem->public_key_length), EVP_PKEY_free_pointer); POSIX_GUARD_PTR(kyber_pkey); DEFER_CLEANUP(EVP_PKEY_CTX *kyber_pkey_ctx = EVP_PKEY_CTX_new(kyber_pkey, NULL), EVP_PKEY_CTX_free_pointer); POSIX_GUARD_PTR(kyber_pkey_ctx); size_t ciphertext_size = kem->ciphertext_length; size_t shared_secret_size = kem->shared_secret_key_length; POSIX_GUARD_OSSL(EVP_PKEY_encapsulate(kyber_pkey_ctx, ciphertext, &ciphertext_size, shared_secret, &shared_secret_size), S2N_ERR_PQ_CRYPTO); POSIX_ENSURE_EQ(kem->ciphertext_length, ciphertext_size); POSIX_ENSURE_EQ(kem->shared_secret_key_length, shared_secret_size); return S2N_SUCCESS; } int s2n_kyber_evp_decapsulate(IN const struct s2n_kem *kem, OUT uint8_t *shared_secret, IN const uint8_t *ciphertext, IN const uint8_t *private_key) { DEFER_CLEANUP(EVP_PKEY *kyber_pkey = EVP_PKEY_kem_new_raw_secret_key(kem->kem_nid, private_key, kem->private_key_length), EVP_PKEY_free_pointer); POSIX_GUARD_PTR(kyber_pkey); DEFER_CLEANUP(EVP_PKEY_CTX *kyber_pkey_ctx = EVP_PKEY_CTX_new(kyber_pkey, NULL), EVP_PKEY_CTX_free_pointer); POSIX_GUARD_PTR(kyber_pkey_ctx); size_t shared_secret_size = kem->shared_secret_key_length; POSIX_GUARD_OSSL(EVP_PKEY_decapsulate(kyber_pkey_ctx, shared_secret, &shared_secret_size, (uint8_t *) ciphertext, kem->ciphertext_length), S2N_ERR_PQ_CRYPTO); POSIX_ENSURE_EQ(kem->shared_secret_key_length, shared_secret_size); return S2N_SUCCESS; } #else /* If !S2N_LIBCRYPTO_SUPPORTS_KYBER, we won't have a Kyber impl so define relevant stubs here. */ int s2n_kyber_evp_generate_keypair(IN const struct s2n_kem *kem, OUT uint8_t *public_key, OUT uint8_t *secret_key) { POSIX_BAIL(S2N_ERR_NO_SUPPORTED_LIBCRYPTO_API); } int s2n_kyber_evp_encapsulate(IN const struct s2n_kem *kem, OUT uint8_t *ciphertext, OUT uint8_t *shared_secret, IN const uint8_t *public_key) { POSIX_BAIL(S2N_ERR_NO_SUPPORTED_LIBCRYPTO_API); } int s2n_kyber_evp_decapsulate(IN const struct s2n_kem *kem, OUT uint8_t *shared_secret, IN const uint8_t *ciphertext, IN const uint8_t *secret_key) { POSIX_BAIL(S2N_ERR_NO_SUPPORTED_LIBCRYPTO_API); } #endif aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_kyber_evp.h000066400000000000000000000017661456575232400231750ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #pragma once #include "tls/s2n_kem.h" int s2n_kyber_evp_generate_keypair(IN const struct s2n_kem *kem, OUT uint8_t *public_key, OUT uint8_t *private_key); int s2n_kyber_evp_encapsulate(IN const struct s2n_kem *kem, OUT uint8_t *ciphertext, OUT uint8_t *shared_secret, IN const uint8_t *public_key); int s2n_kyber_evp_decapsulate(IN const struct s2n_kem *kem, OUT uint8_t *shared_secret, IN const uint8_t *ciphertext, IN const uint8_t *private_key); aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_libcrypto.c000066400000000000000000000157621456575232400232120ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include "crypto/s2n_libcrypto.h" #include #include #include #include "crypto/s2n_crypto.h" #include "crypto/s2n_fips.h" #include "crypto/s2n_openssl.h" #include "utils/s2n_safety.h" #include "utils/s2n_safety_macros.h" /* Note: OpenSSL 1.0.2 -> 1.1.0 implemented a new API to get the version number * and version name. We have to handle that by using old functions * (named "SSLea*"). Newer version of OpenSSL luckily define these symbols to * the new API. When dropping OpenSSL 1.0.2 support, we can move to the new API. */ /* The result of SSLeay_version(SSLEAY_VERSION) for OpenSSL and AWS-LC depends on the * version. AWS-LC and BoringSSL have consistent prefixes that can be statically asserted. * * https://github.com/awslabs/aws-lc/commit/8f184f5d69604cc4645bafec47c2d6d9929cb50f * has not been pushed to the fips branch of AWS-LC. In addition, we can't * distinguish AWS-LC fips and non-fips at pre-processing time since AWS-LC * doesn't distribute fips-specific header files. */ #define EXPECTED_AWSLC_VERSION_PREFIX_OLD "BoringSSL" #define EXPECTED_AWSLC_VERSION_PREFIX_NEW "AWS-LC" #define EXPECTED_BORINGSSL_VERSION_PREFIX "BoringSSL" /* https://www.openssl.org/docs/man{1.0.2, 1.1.1, 3.0}/man3/OPENSSL_VERSION_NUMBER.html * OPENSSL_VERSION_NUMBER in hex is: MNNFFPPS major minor fix patch status. * Bitwise: MMMMNNNNNNNNFFFFFFFFPPPPPPPPSSSS * To not be overly restrictive, we only care about the major version. * From OpenSSL 3.0 the "fix" part is also deprecated and is always a flat 0x00. */ #define VERSION_NUMBER_MASK 0xF0000000L /* Returns the version name of the libcrypto containing the definition that the * symbol OpenSSL_version binded to at link-time. This can be used as * verification at run-time that s2n linked against the expected libcrypto. */ static const char *s2n_libcrypto_get_version_name(void) { return SSLeay_version(SSLEAY_VERSION); } static S2N_RESULT s2n_libcrypto_validate_expected_version_prefix(const char *expected_name_prefix) { RESULT_ENSURE_REF(expected_name_prefix); RESULT_ENSURE_REF(s2n_libcrypto_get_version_name()); RESULT_ENSURE_LTE(strlen(expected_name_prefix), strlen(s2n_libcrypto_get_version_name())); RESULT_ENSURE(s2n_constant_time_equals((const uint8_t *) expected_name_prefix, (const uint8_t *) s2n_libcrypto_get_version_name(), (const uint32_t) strlen(expected_name_prefix)), S2N_ERR_LIBCRYPTO_VERSION_NAME_MISMATCH); return S2N_RESULT_OK; } /* Compare compile-time version number with the version number of the libcrypto * containing the definition that the symbol OpenSSL_version_num binded to at * link-time. * * This is an imperfect check for AWS-LC and BoringSSL, since their version * number is basically never incremented. However, for these we have a strong * check through s2n_libcrypto_validate_expected_version_name(), so it is not * of great importance. */ static S2N_RESULT s2n_libcrypto_validate_expected_version_number(void) { /* We mutate the version number in s2n_openssl.h when detecting Libressl. This * value is cached by s2n_get_openssl_version(). Hence, for libressl, the * run-time version number will always be different from what * s2n_get_openssl_version() returns. We cater for this here by just getting * what ever we cached instead of asking Libressl libcrypto. */ #if defined(LIBRESSL_VERSION_NUMBER) unsigned long run_time_version_number = s2n_get_openssl_version() & VERSION_NUMBER_MASK; #else unsigned long run_time_version_number = SSLeay() & VERSION_NUMBER_MASK; #endif unsigned long compile_time_version_number = s2n_get_openssl_version() & VERSION_NUMBER_MASK; RESULT_ENSURE(compile_time_version_number == run_time_version_number, S2N_ERR_LIBCRYPTO_VERSION_NUMBER_MISMATCH); return S2N_RESULT_OK; } /* s2n_libcrypto_is_*() encodes the libcrypto version used at build-time. * Currently only captures AWS-LC and BoringSSL. When a libcrypto-dependent * branch is required, we prefer these functions where possible to reduce # #ifs and avoid potential bugs where the header containing the #define is not * included. */ #if defined(OPENSSL_IS_AWSLC) && defined(OPENSSL_IS_BORINGSSL) #error "Both OPENSSL_IS_AWSLC and OPENSSL_IS_BORINGSSL are defined at the same time!" #endif bool s2n_libcrypto_is_awslc() { #if defined(OPENSSL_IS_AWSLC) return true; #else return false; #endif } uint64_t s2n_libcrypto_awslc_api_version(void) { #if defined(OPENSSL_IS_AWSLC) return AWSLC_API_VERSION; #else return 0; #endif } bool s2n_libcrypto_is_boringssl() { #if defined(OPENSSL_IS_BORINGSSL) return true; #else return false; #endif } bool s2n_libcrypto_is_libressl() { #if defined(LIBRESSL_VERSION_NUMBER) return true; #else return false; #endif } /* Performs various checks to validate that the libcrypto used at compile-time * is the same libcrypto being used at run-time. */ S2N_RESULT s2n_libcrypto_validate_runtime(void) { /* Sanity check that we don't think we built against AWS-LC and BoringSSL at * the same time. */ RESULT_ENSURE_EQ(s2n_libcrypto_is_boringssl() && s2n_libcrypto_is_awslc(), false); /* If we know the expected version name, we can validate it. */ if (s2n_libcrypto_is_awslc()) { const char *expected_awslc_name_prefix = NULL; /* For backwards compatability, also check the AWS-LC API version see * https://github.com/awslabs/aws-lc/pull/467. When we are confident we * don't meet anymore "old" AWS-LC libcrypto's, this API version check * can be removed. */ if (s2n_libcrypto_awslc_api_version() < 17) { expected_awslc_name_prefix = EXPECTED_AWSLC_VERSION_PREFIX_OLD; } else { expected_awslc_name_prefix = EXPECTED_AWSLC_VERSION_PREFIX_NEW; } RESULT_GUARD(s2n_libcrypto_validate_expected_version_prefix(expected_awslc_name_prefix)); } else if (s2n_libcrypto_is_boringssl()) { RESULT_GUARD(s2n_libcrypto_validate_expected_version_prefix(EXPECTED_BORINGSSL_VERSION_PREFIX)); } RESULT_GUARD(s2n_libcrypto_validate_expected_version_number()); return S2N_RESULT_OK; } bool s2n_libcrypto_is_interned(void) { #if defined(S2N_INTERN_LIBCRYPTO) return true; #else return false; #endif } unsigned long s2n_get_openssl_version(void) { return OPENSSL_VERSION_NUMBER; } bool s2n_libcrypto_supports_flag_no_check_time() { #ifdef S2N_LIBCRYPTO_SUPPORTS_FLAG_NO_CHECK_TIME return true; #else return false; #endif } aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_libcrypto.h000066400000000000000000000013211456575232400232010ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #pragma once #include "utils/s2n_result.h" S2N_RESULT s2n_libcrypto_validate_runtime(void); bool s2n_libcrypto_supports_flag_no_check_time(); aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_locking.c000066400000000000000000000065231456575232400226240ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include "crypto/s2n_locking.h" #include #include #include "crypto/s2n_openssl.h" #include "utils/s2n_mem.h" #include "utils/s2n_safety.h" /* Writing multithreaded applications using Openssl-1.0.2 * requires calling CRYPTO_set_locking_callback. * If the callback is not set, locks are no-ops and unexpected * behavior may occur, particularly for RSA and X509. * * In the past s2n-tls relied on customers setting the callback * themselves, but that seems unnecessary since other parts of * the library (like fork detection) already rely on the pthreads library. * * For more information: * https://www.openssl.org/blog/blog/2017/02/21/threads/ * https://www.openssl.org/docs/man1.0.2/man3/threads.html */ #define S2N_MUTEXES(mem) ((pthread_mutex_t *) (void *) (mem).data) /* While the locking-related APIs "exist" in later versions of * Openssl, they tend to be placeholders or hardcoded values like: * #define CRYPTO_get_locking_callback() (NULL) * So the code will compile with strange warnings / errors like * loop conditions always being false. */ #if !(S2N_OPENSSL_VERSION_AT_LEAST(1, 1, 0)) static struct s2n_blob mutexes_mem = { 0 }; static size_t mutexes_count = 0; static void s2n_locking_cb(int mode, int n, char *file, int line) { pthread_mutex_t *mutexes = S2N_MUTEXES(mutexes_mem); if (!mutexes_mem.data || n < 0 || (size_t) n >= mutexes_count) { return; } if (mode & CRYPTO_LOCK) { pthread_mutex_lock(&(mutexes[n])); } else { pthread_mutex_unlock(&(mutexes[n])); } } S2N_RESULT s2n_locking_init(void) { if (CRYPTO_get_locking_callback() != NULL) { return S2N_RESULT_OK; } int num_locks = CRYPTO_num_locks(); RESULT_ENSURE_GTE(num_locks, 0); RESULT_GUARD_POSIX(s2n_realloc(&mutexes_mem, num_locks * sizeof(pthread_mutex_t))); pthread_mutex_t *mutexes = S2N_MUTEXES(mutexes_mem); mutexes_count = 0; for (size_t i = 0; i < (size_t) num_locks; i++) { RESULT_ENSURE_EQ(pthread_mutex_init(&(mutexes[i]), NULL), 0); mutexes_count++; } CRYPTO_set_locking_callback((void (*)()) s2n_locking_cb); return S2N_RESULT_OK; } S2N_RESULT s2n_locking_cleanup(void) { if (CRYPTO_get_locking_callback() == (void (*)()) s2n_locking_cb) { CRYPTO_set_locking_callback(NULL); } pthread_mutex_t *mutexes = S2N_MUTEXES(mutexes_mem); if (mutexes) { while (mutexes_count > 0) { RESULT_ENSURE_EQ(pthread_mutex_destroy(&(mutexes[mutexes_count - 1])), 0); mutexes_count--; } RESULT_GUARD_POSIX(s2n_free(&mutexes_mem)); } return S2N_RESULT_OK; } #else S2N_RESULT s2n_locking_init(void) { return S2N_RESULT_OK; } S2N_RESULT s2n_locking_cleanup(void) { return S2N_RESULT_OK; } #endif aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_locking.h000066400000000000000000000012661456575232400226300ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #pragma once #include "utils/s2n_result.h" S2N_RESULT s2n_locking_init(void); S2N_RESULT s2n_locking_cleanup(void); aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_openssl.h000066400000000000000000000047411456575232400226660ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #pragma once #include /** * openssl with OPENSSL_VERSION_NUMBER < 0x10100003L made data type details unavailable * libressl use openssl with data type details available, but mandatorily set * OPENSSL_VERSION_NUMBER = 0x20000000L, insane! * https://github.com/aws/aws-sdk-cpp/pull/507/commits/2c99f1fe0c4b4683280caeb161538d4724d6a179 */ #if defined(LIBRESSL_VERSION_NUMBER) && (OPENSSL_VERSION_NUMBER == 0x20000000L) #undef OPENSSL_VERSION_NUMBER #if LIBRESSL_VERSION_NUMBER < 0x3050000fL #define OPENSSL_VERSION_NUMBER 0x1000107fL #else #define OPENSSL_VERSION_NUMBER 0x1010000fL #endif #endif /* Per https://wiki.openssl.org/index.php/Manual:OPENSSL_VERSION_NUMBER(3) * OPENSSL_VERSION_NUMBER in hex is: MNNFFRBB major minor fix final beta/patch. * bitwise: MMMMNNNNNNNNFFFFFFFFRRRRBBBBBBBB * For our purposes we're only concerned about major/minor/fix. Patch versions don't usually introduce * features. */ #define S2N_OPENSSL_VERSION_AT_LEAST(major, minor, fix) \ (OPENSSL_VERSION_NUMBER >= ((major << 28) + (minor << 20) + (fix << 12))) #if (S2N_OPENSSL_VERSION_AT_LEAST(1, 1, 0)) && (!defined(OPENSSL_IS_BORINGSSL)) && (!defined(OPENSSL_IS_AWSLC)) && (!defined(LIBRESSL_VERSION_NUMBER)) #define s2n_evp_ctx_init(ctx) POSIX_GUARD_OSSL(EVP_CIPHER_CTX_init(ctx), S2N_ERR_DRBG) #define RESULT_EVP_CTX_INIT(ctx) RESULT_GUARD_OSSL(EVP_CIPHER_CTX_init(ctx), S2N_ERR_DRBG) #else #define s2n_evp_ctx_init(ctx) EVP_CIPHER_CTX_init(ctx) #define RESULT_EVP_CTX_INIT(ctx) EVP_CIPHER_CTX_init(ctx) #endif #if !defined(OPENSSL_IS_BORINGSSL) && !defined(OPENSSL_FIPS) && !defined(LIBRESSL_VERSION_NUMBER) && !defined(OPENSSL_IS_AWSLC) && !defined(OPENSSL_NO_ENGINE) #define S2N_LIBCRYPTO_SUPPORTS_CUSTOM_RAND 1 #else #define S2N_LIBCRYPTO_SUPPORTS_CUSTOM_RAND 0 #endif bool s2n_libcrypto_is_awslc(); bool s2n_libcrypto_is_boringssl(); bool s2n_libcrypto_is_libressl(); aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_openssl_evp.h000066400000000000000000000012761456575232400235400ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #pragma once #include #include "utils/s2n_safety.h" DEFINE_POINTER_CLEANUP_FUNC(EVP_PKEY*, EVP_PKEY_free); aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_openssl_x509.c000066400000000000000000000100461456575232400234410ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include "crypto/s2n_openssl_x509.h" #include "api/s2n.h" S2N_CLEANUP_RESULT s2n_openssl_x509_stack_pop_free(STACK_OF(X509) **cert_chain) { RESULT_ENSURE_REF(*cert_chain); sk_X509_pop_free(*cert_chain, X509_free); *cert_chain = NULL; return S2N_RESULT_OK; } S2N_CLEANUP_RESULT s2n_openssl_asn1_time_free_pointer(ASN1_GENERALIZEDTIME **time_ptr) { /* The ANS1_*TIME structs are just typedef wrappers around ASN1_STRING * * The ASN1_TIME, ASN1_UTCTIME and ASN1_GENERALIZEDTIME structures are * represented as an ASN1_STRING internally and can be freed up using * ASN1_STRING_free(). * https://www.openssl.org/docs/man1.1.1/man3/ASN1_TIME_to_tm.html */ RESULT_ENSURE_REF(*time_ptr); ASN1_STRING_free((ASN1_STRING *) *time_ptr); *time_ptr = NULL; return S2N_RESULT_OK; } S2N_RESULT s2n_openssl_x509_parse_impl(struct s2n_blob *asn1der, X509 **cert_out, uint32_t *parsed_length) { RESULT_ENSURE_REF(asn1der); RESULT_ENSURE_REF(asn1der->data); RESULT_ENSURE_REF(cert_out); RESULT_ENSURE_REF(parsed_length); uint8_t *cert_to_parse = asn1der->data; *cert_out = d2i_X509(NULL, (const unsigned char **) (void *) &cert_to_parse, asn1der->size); RESULT_ENSURE(*cert_out != NULL, S2N_ERR_DECODE_CERTIFICATE); /* If cert parsing is successful, d2i_X509 increments *cert_to_parse to the byte following the parsed data */ *parsed_length = cert_to_parse - asn1der->data; return S2N_RESULT_OK; } S2N_RESULT s2n_openssl_x509_parse_without_length_validation(struct s2n_blob *asn1der, X509 **cert_out) { RESULT_ENSURE_REF(asn1der); RESULT_ENSURE_REF(cert_out); uint32_t parsed_len = 0; RESULT_GUARD(s2n_openssl_x509_parse_impl(asn1der, cert_out, &parsed_len)); return S2N_RESULT_OK; } S2N_RESULT s2n_openssl_x509_parse(struct s2n_blob *asn1der, X509 **cert_out) { RESULT_ENSURE_REF(asn1der); RESULT_ENSURE_REF(cert_out); uint32_t parsed_len = 0; RESULT_GUARD(s2n_openssl_x509_parse_impl(asn1der, cert_out, &parsed_len)); /* Some TLS clients in the wild send extra trailing bytes after the Certificate. * Allow this in s2n for backwards compatibility with existing clients. */ uint32_t trailing_bytes = asn1der->size - parsed_len; RESULT_ENSURE(trailing_bytes <= S2N_MAX_ALLOWED_CERT_TRAILING_BYTES, S2N_ERR_DECODE_CERTIFICATE); return S2N_RESULT_OK; } S2N_RESULT s2n_openssl_x509_get_cert_info(X509 *cert, struct s2n_cert_info *info) { RESULT_ENSURE_REF(cert); RESULT_ENSURE_REF(info); X509_NAME *issuer_name = X509_get_issuer_name(cert); RESULT_ENSURE_REF(issuer_name); X509_NAME *subject_name = X509_get_subject_name(cert); RESULT_ENSURE_REF(subject_name); if (X509_NAME_cmp(issuer_name, subject_name) == 0) { info->self_signed = true; } else { info->self_signed = false; } #if defined(LIBRESSL_VERSION_NUMBER) && (LIBRESSL_VERSION_NUMBER < 0x02070000f) RESULT_ENSURE_REF(cert->sig_alg); info->signature_nid = OBJ_obj2nid(cert->sig_alg->algorithm); #else info->signature_nid = X509_get_signature_nid(cert); #endif /* These is no method to directly retrieve that signature digest from the X509* * that is available in all libcryptos, so instead we use find_sigid_algs. For * a signature NID_ecdsa_with_SHA256 this will return NID_SHA256 */ RESULT_GUARD_OSSL(OBJ_find_sigid_algs(info->signature_nid, &info->signature_digest_nid, NULL), S2N_ERR_CERT_TYPE_UNSUPPORTED); return S2N_RESULT_OK; } aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_openssl_x509.h000066400000000000000000000033271456575232400234520ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #pragma once #include #include #include #include "crypto/s2n_certificate.h" #include "utils/s2n_blob.h" #include "utils/s2n_safety.h" #define S2N_MAX_ALLOWED_CERT_TRAILING_BYTES 3 DEFINE_POINTER_CLEANUP_FUNC(X509 *, X509_free); S2N_CLEANUP_RESULT s2n_openssl_x509_stack_pop_free(STACK_OF(X509) **cert_chain); S2N_CLEANUP_RESULT s2n_openssl_asn1_time_free_pointer(ASN1_GENERALIZEDTIME **time); /* * This function is used to convert an s2n_blob into an openssl X509 cert. It * will additionally ensure that there are 3 or fewer trailing bytes in * `asn1der`. */ S2N_RESULT s2n_openssl_x509_parse(struct s2n_blob *asn1der, X509 **cert_out); /* * This function is used to convert an s2n_blob into an openssl X509 cert. * Unlike `s2n_openssl_x509_parse` no additional validation is done. This * function should only be used in places where it is necessary to maintain * compatability with previous permissive parsing behavior. */ S2N_RESULT s2n_openssl_x509_parse_without_length_validation(struct s2n_blob *asn1der, X509 **cert_out); S2N_RESULT s2n_openssl_x509_get_cert_info(X509 *cert, struct s2n_cert_info *info); aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_pkey.c000066400000000000000000000167611456575232400221530ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include "crypto/s2n_pkey.h" #include #include "crypto/s2n_openssl_evp.h" #include "crypto/s2n_openssl_x509.h" #include "crypto/s2n_rsa_pss.h" #include "error/s2n_errno.h" #include "utils/s2n_result.h" #include "utils/s2n_safety.h" int s2n_pkey_zero_init(struct s2n_pkey *pkey) { pkey->pkey = NULL; pkey->size = NULL; pkey->sign = NULL; pkey->verify = NULL; pkey->encrypt = NULL; pkey->decrypt = NULL; pkey->match = NULL; pkey->free = NULL; pkey->check_key = NULL; return 0; } S2N_RESULT s2n_pkey_setup_for_type(struct s2n_pkey *pkey, s2n_pkey_type pkey_type) { switch (pkey_type) { case S2N_PKEY_TYPE_RSA: return s2n_rsa_pkey_init(pkey); case S2N_PKEY_TYPE_ECDSA: return s2n_ecdsa_pkey_init(pkey); case S2N_PKEY_TYPE_RSA_PSS: return s2n_rsa_pss_pkey_init(pkey); case S2N_PKEY_TYPE_SENTINEL: case S2N_PKEY_TYPE_UNKNOWN: RESULT_BAIL(S2N_ERR_CERT_TYPE_UNSUPPORTED); } RESULT_BAIL(S2N_ERR_CERT_TYPE_UNSUPPORTED); } int s2n_pkey_check_key_exists(const struct s2n_pkey *pkey) { POSIX_ENSURE_REF(pkey->pkey); POSIX_ENSURE_REF(pkey->check_key); return pkey->check_key(pkey); } S2N_RESULT s2n_pkey_size(const struct s2n_pkey *pkey, uint32_t *size_out) { RESULT_ENSURE_REF(pkey); RESULT_ENSURE_REF(pkey->size); RESULT_ENSURE_REF(size_out); RESULT_GUARD(pkey->size(pkey, size_out)); return S2N_RESULT_OK; } int s2n_pkey_sign(const struct s2n_pkey *pkey, s2n_signature_algorithm sig_alg, struct s2n_hash_state *digest, struct s2n_blob *signature) { POSIX_ENSURE_REF(pkey->sign); return pkey->sign(pkey, sig_alg, digest, signature); } int s2n_pkey_verify(const struct s2n_pkey *pkey, s2n_signature_algorithm sig_alg, struct s2n_hash_state *digest, struct s2n_blob *signature) { POSIX_ENSURE_REF(pkey); POSIX_ENSURE_REF(pkey->verify); return pkey->verify(pkey, sig_alg, digest, signature); } int s2n_pkey_encrypt(const struct s2n_pkey *pkey, struct s2n_blob *in, struct s2n_blob *out) { POSIX_ENSURE_REF(pkey->encrypt); return pkey->encrypt(pkey, in, out); } int s2n_pkey_decrypt(const struct s2n_pkey *pkey, struct s2n_blob *in, struct s2n_blob *out) { POSIX_ENSURE_REF(pkey->decrypt); return pkey->decrypt(pkey, in, out); } int s2n_pkey_match(const struct s2n_pkey *pub_key, const struct s2n_pkey *priv_key) { POSIX_ENSURE_REF(pub_key->match); S2N_ERROR_IF(pub_key->match != priv_key->match, S2N_ERR_KEY_MISMATCH); return pub_key->match(pub_key, priv_key); } int s2n_pkey_free(struct s2n_pkey *key) { if (key != NULL && key->free != NULL) { POSIX_GUARD(key->free(key)); } if (key->pkey != NULL) { EVP_PKEY_free(key->pkey); key->pkey = NULL; } return S2N_SUCCESS; } S2N_RESULT s2n_asn1der_to_private_key(struct s2n_pkey *priv_key, struct s2n_blob *asn1der, int type_hint) { const unsigned char *key_to_parse = asn1der->data; /* We use "d2i_AutoPrivateKey" instead of "PEM_read_bio_PrivateKey" because * s2n-tls prefers to perform its own custom PEM parsing. Historically, * openssl's PEM parsing tended to ignore invalid certificates rather than * error on them. We prefer to fail early rather than continue without * the full and correct chain intended by the application. */ DEFER_CLEANUP(EVP_PKEY *evp_private_key = d2i_AutoPrivateKey(NULL, &key_to_parse, asn1der->size), EVP_PKEY_free_pointer); /* We have found cases where d2i_AutoPrivateKey fails to detect the type of * the key. For example, openssl fails to identify an EC key without the * optional publicKey field. * * If d2i_AutoPrivateKey fails, try once more with the type we parsed from the PEM. */ if (evp_private_key == NULL) { evp_private_key = d2i_PrivateKey(type_hint, NULL, &key_to_parse, asn1der->size); } RESULT_ENSURE(evp_private_key, S2N_ERR_DECODE_PRIVATE_KEY); /* If key parsing is successful, d2i_AutoPrivateKey increments *key_to_parse to the byte following the parsed data */ uint32_t parsed_len = key_to_parse - asn1der->data; RESULT_ENSURE(parsed_len == asn1der->size, S2N_ERR_DECODE_PRIVATE_KEY); /* Initialize s2n_pkey according to key type */ int type = EVP_PKEY_base_id(evp_private_key); switch (type) { case EVP_PKEY_RSA: RESULT_GUARD(s2n_rsa_pkey_init(priv_key)); RESULT_GUARD(s2n_evp_pkey_to_rsa_private_key(&priv_key->key.rsa_key, evp_private_key)); break; case EVP_PKEY_RSA_PSS: RESULT_GUARD(s2n_rsa_pss_pkey_init(priv_key)); RESULT_GUARD(s2n_evp_pkey_to_rsa_pss_private_key(&priv_key->key.rsa_key, evp_private_key)); break; case EVP_PKEY_EC: RESULT_GUARD(s2n_ecdsa_pkey_init(priv_key)); RESULT_GUARD(s2n_evp_pkey_to_ecdsa_private_key(&priv_key->key.ecdsa_key, evp_private_key)); break; default: RESULT_BAIL(S2N_ERR_DECODE_PRIVATE_KEY); } priv_key->pkey = evp_private_key; ZERO_TO_DISABLE_DEFER_CLEANUP(evp_private_key); return S2N_RESULT_OK; } S2N_RESULT s2n_asn1der_to_public_key_and_type(struct s2n_pkey *pub_key, s2n_pkey_type *pkey_type_out, struct s2n_blob *asn1der) { DEFER_CLEANUP(X509 *cert = NULL, X509_free_pointer); RESULT_GUARD(s2n_openssl_x509_parse(asn1der, &cert)); RESULT_GUARD(s2n_pkey_from_x509(cert, pub_key, pkey_type_out)); return S2N_RESULT_OK; } S2N_RESULT s2n_pkey_from_x509(X509 *cert, struct s2n_pkey *pub_key_out, s2n_pkey_type *pkey_type_out) { RESULT_ENSURE_REF(cert); RESULT_ENSURE_REF(pub_key_out); RESULT_ENSURE_REF(pkey_type_out); DEFER_CLEANUP(EVP_PKEY *evp_public_key = X509_get_pubkey(cert), EVP_PKEY_free_pointer); RESULT_ENSURE(evp_public_key != NULL, S2N_ERR_DECODE_CERTIFICATE); /* Check for success in decoding certificate according to type */ int type = EVP_PKEY_base_id(evp_public_key); switch (type) { case EVP_PKEY_RSA: RESULT_GUARD(s2n_rsa_pkey_init(pub_key_out)); RESULT_GUARD(s2n_evp_pkey_to_rsa_public_key(&pub_key_out->key.rsa_key, evp_public_key)); *pkey_type_out = S2N_PKEY_TYPE_RSA; break; case EVP_PKEY_RSA_PSS: RESULT_GUARD(s2n_rsa_pss_pkey_init(pub_key_out)); RESULT_GUARD(s2n_evp_pkey_to_rsa_pss_public_key(&pub_key_out->key.rsa_key, evp_public_key)); *pkey_type_out = S2N_PKEY_TYPE_RSA_PSS; break; case EVP_PKEY_EC: RESULT_GUARD(s2n_ecdsa_pkey_init(pub_key_out)); RESULT_GUARD(s2n_evp_pkey_to_ecdsa_public_key(&pub_key_out->key.ecdsa_key, evp_public_key)); *pkey_type_out = S2N_PKEY_TYPE_ECDSA; break; default: RESULT_BAIL(S2N_ERR_DECODE_CERTIFICATE); } pub_key_out->pkey = evp_public_key; ZERO_TO_DISABLE_DEFER_CLEANUP(evp_public_key); return S2N_RESULT_OK; } aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_pkey.h000066400000000000000000000064121456575232400221500ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #pragma once #include #include "crypto/s2n_ecdsa.h" #include "crypto/s2n_hash.h" #include "crypto/s2n_rsa.h" #include "crypto/s2n_signature.h" #include "utils/s2n_blob.h" #include "utils/s2n_result.h" /* Public/Private Key Type */ typedef enum { S2N_PKEY_TYPE_UNKNOWN = -1, S2N_PKEY_TYPE_RSA = 0, S2N_PKEY_TYPE_ECDSA, S2N_PKEY_TYPE_RSA_PSS, S2N_PKEY_TYPE_SENTINEL } s2n_pkey_type; /* Structure that models a public or private key and type-specific operations */ struct s2n_pkey { /* Legacy OpenSSL APIs operate on specific keys, but the more recent * APIs all operate on EVP_PKEY. Let's store both for backwards compatibility. */ union { struct s2n_rsa_key rsa_key; struct s2n_ecdsa_key ecdsa_key; } key; EVP_PKEY *pkey; S2N_RESULT (*size)(const struct s2n_pkey *key, uint32_t *size_out); int (*sign)(const struct s2n_pkey *priv_key, s2n_signature_algorithm sig_alg, struct s2n_hash_state *digest, struct s2n_blob *signature); int (*verify)(const struct s2n_pkey *pub_key, s2n_signature_algorithm sig_alg, struct s2n_hash_state *digest, struct s2n_blob *signature); int (*encrypt)(const struct s2n_pkey *key, struct s2n_blob *in, struct s2n_blob *out); int (*decrypt)(const struct s2n_pkey *key, struct s2n_blob *in, struct s2n_blob *out); int (*match)(const struct s2n_pkey *pub_key, const struct s2n_pkey *priv_key); int (*free)(struct s2n_pkey *key); int (*check_key)(const struct s2n_pkey *key); }; int s2n_pkey_zero_init(struct s2n_pkey *pkey); S2N_RESULT s2n_pkey_setup_for_type(struct s2n_pkey *pkey, s2n_pkey_type pkey_type); int s2n_pkey_check_key_exists(const struct s2n_pkey *pkey); S2N_RESULT s2n_pkey_size(const struct s2n_pkey *pkey, uint32_t *size_out); int s2n_pkey_sign(const struct s2n_pkey *pkey, s2n_signature_algorithm sig_alg, struct s2n_hash_state *digest, struct s2n_blob *signature); int s2n_pkey_verify(const struct s2n_pkey *pkey, s2n_signature_algorithm sig_alg, struct s2n_hash_state *digest, struct s2n_blob *signature); int s2n_pkey_encrypt(const struct s2n_pkey *pkey, struct s2n_blob *in, struct s2n_blob *out); int s2n_pkey_decrypt(const struct s2n_pkey *pkey, struct s2n_blob *in, struct s2n_blob *out); int s2n_pkey_match(const struct s2n_pkey *pub_key, const struct s2n_pkey *priv_key); int s2n_pkey_free(struct s2n_pkey *pkey); S2N_RESULT s2n_asn1der_to_private_key(struct s2n_pkey *priv_key, struct s2n_blob *asn1der, int type_hint); S2N_RESULT s2n_asn1der_to_public_key_and_type(struct s2n_pkey *pub_key, s2n_pkey_type *pkey_type, struct s2n_blob *asn1der); S2N_RESULT s2n_pkey_from_x509(X509 *cert, struct s2n_pkey *pub_key_out, s2n_pkey_type *pkey_type_out); aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_pq.c000066400000000000000000000017401456575232400216120ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include "s2n_pq.h" #include "crypto/s2n_openssl.h" bool s2n_libcrypto_supports_kyber() { /* S2N_LIBCRYPTO_SUPPORTS_KYBER will be auto-detected and #defined if * ./tests/features/S2N_LIBCRYPTO_SUPPORTS_KYBER.c successfully compiles */ #if defined(S2N_LIBCRYPTO_SUPPORTS_KYBER) return true; #else return false; #endif } bool s2n_pq_is_enabled() { return s2n_libcrypto_supports_kyber(); } aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_pq.h000066400000000000000000000014051456575232400216150ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #pragma once #include #include "crypto/s2n_fips.h" #include "utils/s2n_result.h" #include "utils/s2n_safety.h" bool s2n_pq_is_enabled(void); bool s2n_libcrypto_supports_kyber(void); aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_rsa.c000066400000000000000000000155501456575232400217630ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include "crypto/s2n_rsa.h" #include #include #include #include "crypto/s2n_drbg.h" #include "crypto/s2n_evp_signing.h" #include "crypto/s2n_hash.h" #include "crypto/s2n_pkey.h" #include "crypto/s2n_rsa_signing.h" #include "error/s2n_errno.h" #include "stuffer/s2n_stuffer.h" #include "utils/s2n_blob.h" #include "utils/s2n_compiler.h" #include "utils/s2n_random.h" #include "utils/s2n_result.h" #include "utils/s2n_safety.h" RSA *s2n_unsafe_rsa_get_non_const(const struct s2n_rsa_key *rsa_key) { PTR_ENSURE_REF(rsa_key); #ifdef S2N_DIAGNOSTICS_PUSH_SUPPORTED #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wcast-qual" #endif RSA *out_rsa_key = (RSA *) rsa_key->rsa; #ifdef S2N_DIAGNOSTICS_POP_SUPPORTED #pragma GCC diagnostic pop #endif return out_rsa_key; } static S2N_RESULT s2n_rsa_modulus_check(const RSA *rsa) { /* RSA was made opaque starting in Openssl 1.1.0 */ #if S2N_OPENSSL_VERSION_AT_LEAST(1, 1, 0) const BIGNUM *n = NULL; /* RSA still owns the memory for n */ RSA_get0_key(rsa, &n, NULL, NULL); RESULT_ENSURE_REF(n); #else RESULT_ENSURE_REF(rsa->n); #endif return S2N_RESULT_OK; } static S2N_RESULT s2n_rsa_encrypted_size(const struct s2n_pkey *pkey, uint32_t *size_out) { RESULT_ENSURE_REF(pkey); RESULT_ENSURE_REF(size_out); const struct s2n_rsa_key *rsa_key = &pkey->key.rsa_key; RESULT_ENSURE_REF(rsa_key->rsa); RESULT_GUARD(s2n_rsa_modulus_check(rsa_key->rsa)); const int size = RSA_size(rsa_key->rsa); RESULT_GUARD_POSIX(size); *size_out = size; return S2N_RESULT_OK; } static int s2n_rsa_sign(const struct s2n_pkey *priv, s2n_signature_algorithm sig_alg, struct s2n_hash_state *digest, struct s2n_blob *signature) { switch (sig_alg) { case S2N_SIGNATURE_RSA: return s2n_rsa_pkcs1v15_sign(priv, digest, signature); case S2N_SIGNATURE_RSA_PSS_RSAE: return s2n_rsa_pss_sign(priv, digest, signature); default: POSIX_BAIL(S2N_ERR_INVALID_SIGNATURE_ALGORITHM); } return S2N_SUCCESS; } static int s2n_rsa_verify(const struct s2n_pkey *pub, s2n_signature_algorithm sig_alg, struct s2n_hash_state *digest, struct s2n_blob *signature) { switch (sig_alg) { case S2N_SIGNATURE_RSA: return s2n_rsa_pkcs1v15_verify(pub, digest, signature); case S2N_SIGNATURE_RSA_PSS_RSAE: return s2n_rsa_pss_verify(pub, digest, signature); default: POSIX_BAIL(S2N_ERR_INVALID_SIGNATURE_ALGORITHM); } return S2N_SUCCESS; } static int s2n_rsa_encrypt(const struct s2n_pkey *pub, struct s2n_blob *in, struct s2n_blob *out) { uint32_t size = 0; POSIX_GUARD_RESULT(s2n_rsa_encrypted_size(pub, &size)); S2N_ERROR_IF(out->size < size, S2N_ERR_NOMEM); const s2n_rsa_public_key *pub_key = &pub->key.rsa_key; /* Safety: RSA_public_encrypt does not mutate the key */ int r = RSA_public_encrypt(in->size, (unsigned char *) in->data, (unsigned char *) out->data, s2n_unsafe_rsa_get_non_const(pub_key), RSA_PKCS1_PADDING); POSIX_ENSURE((int64_t) r == (int64_t) out->size, S2N_ERR_SIZE_MISMATCH); return 0; } static int s2n_rsa_decrypt(const struct s2n_pkey *priv, struct s2n_blob *in, struct s2n_blob *out) { unsigned char intermediate[4096]; uint32_t expected_size = 0; POSIX_GUARD_RESULT(s2n_rsa_encrypted_size(priv, &expected_size)); S2N_ERROR_IF(expected_size > sizeof(intermediate), S2N_ERR_NOMEM); S2N_ERROR_IF(out->size > sizeof(intermediate), S2N_ERR_NOMEM); POSIX_GUARD_RESULT(s2n_get_public_random_data(out)); const s2n_rsa_private_key *priv_key = &priv->key.rsa_key; /* Safety: RSA_private_decrypt does not mutate the key */ int r = RSA_private_decrypt(in->size, (unsigned char *) in->data, intermediate, s2n_unsafe_rsa_get_non_const(priv_key), RSA_NO_PADDING); POSIX_ENSURE((int64_t) r == (int64_t) expected_size, S2N_ERR_SIZE_MISMATCH); s2n_constant_time_pkcs1_unpad_or_dont(out->data, intermediate, r, out->size); return 0; } static int s2n_rsa_keys_match(const struct s2n_pkey *pub, const struct s2n_pkey *priv) { uint8_t plain_inpad[36] = { 1 }, plain_outpad[36] = { 0 }, encpad[8192]; struct s2n_blob plain_in = { 0 }, plain_out = { 0 }, enc = { 0 }; plain_in.data = plain_inpad; plain_in.size = sizeof(plain_inpad); enc.data = encpad; POSIX_GUARD_RESULT(s2n_rsa_encrypted_size(pub, &enc.size)); POSIX_ENSURE_LTE(enc.size, sizeof(encpad)); POSIX_GUARD(s2n_rsa_encrypt(pub, &plain_in, &enc)); plain_out.data = plain_outpad; plain_out.size = sizeof(plain_outpad); POSIX_GUARD(s2n_rsa_decrypt(priv, &enc, &plain_out)); S2N_ERROR_IF(memcmp(plain_in.data, plain_out.data, plain_in.size), S2N_ERR_KEY_MISMATCH); return 0; } static int s2n_rsa_key_free(struct s2n_pkey *pkey) { POSIX_ENSURE_REF(pkey); struct s2n_rsa_key *rsa_key = &pkey->key.rsa_key; if (rsa_key->rsa == NULL) { return S2N_SUCCESS; } /* Safety: freeing the key owned by this object */ RSA_free(s2n_unsafe_rsa_get_non_const(rsa_key)); rsa_key->rsa = NULL; return S2N_SUCCESS; } static int s2n_rsa_check_key_exists(const struct s2n_pkey *pkey) { const struct s2n_rsa_key *rsa_key = &pkey->key.rsa_key; POSIX_ENSURE_REF(rsa_key->rsa); return 0; } S2N_RESULT s2n_evp_pkey_to_rsa_public_key(s2n_rsa_public_key *rsa_key, EVP_PKEY *evp_public_key) { const RSA *rsa = EVP_PKEY_get1_RSA(evp_public_key); RESULT_ENSURE(rsa != NULL, S2N_ERR_DECODE_CERTIFICATE); rsa_key->rsa = rsa; return S2N_RESULT_OK; } S2N_RESULT s2n_evp_pkey_to_rsa_private_key(s2n_rsa_private_key *rsa_key, EVP_PKEY *evp_private_key) { const RSA *rsa = EVP_PKEY_get1_RSA(evp_private_key); RESULT_ENSURE(rsa != NULL, S2N_ERR_DECODE_PRIVATE_KEY); rsa_key->rsa = rsa; return S2N_RESULT_OK; } S2N_RESULT s2n_rsa_pkey_init(struct s2n_pkey *pkey) { pkey->size = &s2n_rsa_encrypted_size; pkey->sign = &s2n_rsa_sign; pkey->verify = &s2n_rsa_verify; pkey->encrypt = &s2n_rsa_encrypt; pkey->decrypt = &s2n_rsa_decrypt; pkey->match = &s2n_rsa_keys_match; pkey->free = &s2n_rsa_key_free; pkey->check_key = &s2n_rsa_check_key_exists; RESULT_GUARD(s2n_evp_signing_set_pkey_overrides(pkey)); return S2N_RESULT_OK; } aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_rsa.h000066400000000000000000000033231456575232400217630ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #pragma once #include #include #include "api/s2n.h" #include "crypto/s2n_hash.h" #include "utils/s2n_blob.h" /* Forward declaration to avoid the circular dependency with s2n_pkey.h */ struct s2n_pkey; struct s2n_rsa_key { /* * Starting in openssl_3, `EVP_PKEY_get1_RSA` and `EVP_PKEY_get0_RSA` functions * return a pre-cached copy of the underlying key. This means that any mutations * are not reflected back onto the underlying key. * * The `const` identifier is present to help ensure that the key is not mutated. * Usecases which require a non-const RSA key (some openssl functions), should * use `s2n_unsafe_rsa_get_non_const` while ensuring that the usage is safe. */ const RSA *rsa; }; RSA *s2n_unsafe_rsa_get_non_const(const struct s2n_rsa_key *rsa_key); typedef struct s2n_rsa_key s2n_rsa_public_key; typedef struct s2n_rsa_key s2n_rsa_private_key; S2N_RESULT s2n_rsa_pkey_init(struct s2n_pkey *pkey); S2N_RESULT s2n_evp_pkey_to_rsa_public_key(s2n_rsa_public_key *rsa_key, EVP_PKEY *pkey); S2N_RESULT s2n_evp_pkey_to_rsa_private_key(s2n_rsa_private_key *rsa_key, EVP_PKEY *pkey); aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_rsa_pss.c000066400000000000000000000201241456575232400226410ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include "crypto/s2n_rsa_pss.h" #include #include #include #include "crypto/s2n_evp_signing.h" #include "crypto/s2n_hash.h" #include "crypto/s2n_openssl.h" #include "crypto/s2n_pkey.h" #include "crypto/s2n_rsa.h" #include "crypto/s2n_rsa_signing.h" #include "error/s2n_errno.h" #include "stuffer/s2n_stuffer.h" #include "utils/s2n_blob.h" #include "utils/s2n_random.h" #include "utils/s2n_safety.h" /* Checks whether PSS Certs is supported */ int s2n_is_rsa_pss_certs_supported() { return RSA_PSS_CERTS_SUPPORTED; } #if RSA_PSS_CERTS_SUPPORTED static S2N_RESULT s2n_rsa_pss_size(const struct s2n_pkey *key, uint32_t *size_out) { RESULT_ENSURE_REF(key); RESULT_ENSURE_REF(size_out); /* For more info, see: https://www.openssl.org/docs/man1.1.0/man3/EVP_PKEY_size.html */ const int size = EVP_PKEY_size(key->pkey); RESULT_GUARD_POSIX(size); *size_out = size; return S2N_RESULT_OK; } static int s2n_rsa_is_private_key(const RSA *rsa_key) { const BIGNUM *d = NULL; RSA_get0_key(rsa_key, NULL, NULL, &d); if (d != NULL) { return 1; } return 0; } int s2n_rsa_pss_key_sign(const struct s2n_pkey *priv, s2n_signature_algorithm sig_alg, struct s2n_hash_state *digest, struct s2n_blob *signature_out) { POSIX_ENSURE_REF(priv); sig_alg_check(sig_alg, S2N_SIGNATURE_RSA_PSS_PSS); /* Not Possible to Sign with Public Key */ const RSA *key = priv->key.rsa_key.rsa; POSIX_ENSURE(s2n_rsa_is_private_key(key), S2N_ERR_KEY_MISMATCH); return s2n_rsa_pss_sign(priv, digest, signature_out); } int s2n_rsa_pss_key_verify(const struct s2n_pkey *pub, s2n_signature_algorithm sig_alg, struct s2n_hash_state *digest, struct s2n_blob *signature_in) { POSIX_ENSURE_REF(pub); sig_alg_check(sig_alg, S2N_SIGNATURE_RSA_PSS_PSS); /* Using Private Key to Verify means the public/private keys were likely swapped, and likely indicates a bug. */ const RSA *key = pub->key.rsa_key.rsa; POSIX_ENSURE(!s2n_rsa_is_private_key(key), S2N_ERR_KEY_MISMATCH); return s2n_rsa_pss_verify(pub, digest, signature_in); } static int s2n_rsa_pss_validate_sign_verify_match(const struct s2n_pkey *pub, const struct s2n_pkey *priv) { /* Generate a random blob to sign and verify */ s2n_stack_blob(random_data, RSA_PSS_SIGN_VERIFY_RANDOM_BLOB_SIZE, RSA_PSS_SIGN_VERIFY_RANDOM_BLOB_SIZE); POSIX_GUARD_RESULT(s2n_get_private_random_data(&random_data)); /* Sign/Verify API's only accept Hashes, so hash our Random Data */ DEFER_CLEANUP(struct s2n_hash_state sign_hash = { 0 }, s2n_hash_free); DEFER_CLEANUP(struct s2n_hash_state verify_hash = { 0 }, s2n_hash_free); POSIX_GUARD(s2n_hash_new(&sign_hash)); POSIX_GUARD(s2n_hash_new(&verify_hash)); POSIX_GUARD(s2n_hash_init(&sign_hash, S2N_HASH_SHA256)); POSIX_GUARD(s2n_hash_init(&verify_hash, S2N_HASH_SHA256)); POSIX_GUARD(s2n_hash_update(&sign_hash, random_data.data, random_data.size)); POSIX_GUARD(s2n_hash_update(&verify_hash, random_data.data, random_data.size)); /* Sign and Verify the Hash of the Random Blob */ s2n_stack_blob(signature_data, RSA_PSS_SIGN_VERIFY_SIGNATURE_SIZE, RSA_PSS_SIGN_VERIFY_SIGNATURE_SIZE); POSIX_GUARD(s2n_rsa_pss_key_sign(priv, S2N_SIGNATURE_RSA_PSS_PSS, &sign_hash, &signature_data)); POSIX_GUARD(s2n_rsa_pss_key_verify(pub, S2N_SIGNATURE_RSA_PSS_PSS, &verify_hash, &signature_data)); return 0; } static int s2n_rsa_validate_params_equal(const RSA *pub, const RSA *priv) { const BIGNUM *pub_val_e = NULL; const BIGNUM *pub_val_n = NULL; RSA_get0_key(pub, &pub_val_n, &pub_val_e, NULL); const BIGNUM *priv_val_e = NULL; const BIGNUM *priv_val_n = NULL; RSA_get0_key(priv, &priv_val_n, &priv_val_e, NULL); if (pub_val_e == NULL || priv_val_e == NULL) { POSIX_BAIL(S2N_ERR_KEY_CHECK); } if (pub_val_n == NULL || priv_val_n == NULL) { POSIX_BAIL(S2N_ERR_KEY_CHECK); } S2N_ERROR_IF(BN_cmp(pub_val_e, priv_val_e) != 0, S2N_ERR_KEY_MISMATCH); S2N_ERROR_IF(BN_cmp(pub_val_n, priv_val_n) != 0, S2N_ERR_KEY_MISMATCH); return 0; } static int s2n_rsa_validate_params_match(const struct s2n_pkey *pub, const struct s2n_pkey *priv) { POSIX_ENSURE_REF(pub); POSIX_ENSURE_REF(priv); /* OpenSSL Documentation Links: * - https://www.openssl.org/docs/manmaster/man3/EVP_PKEY_get1_RSA.html * - https://www.openssl.org/docs/manmaster/man3/RSA_get0_key.html */ const RSA *pub_rsa_key = pub->key.rsa_key.rsa; const RSA *priv_rsa_key = priv->key.rsa_key.rsa; POSIX_ENSURE_REF(pub_rsa_key); POSIX_ENSURE_REF(priv_rsa_key); POSIX_GUARD(s2n_rsa_validate_params_equal(pub_rsa_key, priv_rsa_key)); return 0; } static int s2n_rsa_pss_keys_match(const struct s2n_pkey *pub, const struct s2n_pkey *priv) { POSIX_ENSURE_REF(pub); POSIX_ENSURE_REF(pub->pkey); POSIX_ENSURE_REF(priv); POSIX_ENSURE_REF(priv->pkey); POSIX_GUARD(s2n_rsa_validate_params_match(pub, priv)); /* Validate that verify(sign(message)) for a random message is verified correctly */ POSIX_GUARD(s2n_rsa_pss_validate_sign_verify_match(pub, priv)); return 0; } static int s2n_rsa_pss_key_free(struct s2n_pkey *pkey) { POSIX_ENSURE_REF(pkey); struct s2n_rsa_key *rsa_key = &pkey->key.rsa_key; if (rsa_key->rsa == NULL) { return S2N_SUCCESS; } /* Safety: freeing the key owned by this object */ RSA_free(s2n_unsafe_rsa_get_non_const(rsa_key)); rsa_key->rsa = NULL; return S2N_SUCCESS; } S2N_RESULT s2n_evp_pkey_to_rsa_pss_public_key(struct s2n_rsa_key *rsa_key, EVP_PKEY *pkey) { const RSA *pub_rsa_key = EVP_PKEY_get1_RSA(pkey); RESULT_ENSURE_REF(pub_rsa_key); RESULT_ENSURE(!s2n_rsa_is_private_key(pub_rsa_key), S2N_ERR_KEY_MISMATCH); rsa_key->rsa = pub_rsa_key; return S2N_RESULT_OK; } S2N_RESULT s2n_evp_pkey_to_rsa_pss_private_key(struct s2n_rsa_key *rsa_key, EVP_PKEY *pkey) { const RSA *priv_rsa_key = EVP_PKEY_get1_RSA(pkey); RESULT_ENSURE_REF(priv_rsa_key); /* Documentation: https://www.openssl.org/docs/man1.1.1/man3/RSA_check_key.html */ RESULT_ENSURE(s2n_rsa_is_private_key(priv_rsa_key), S2N_ERR_KEY_MISMATCH); /* Check that the mandatory properties of a RSA Private Key are valid. * - Documentation: https://www.openssl.org/docs/man1.1.1/man3/RSA_check_key.html */ RESULT_GUARD_OSSL(RSA_check_key(priv_rsa_key), S2N_ERR_KEY_CHECK); rsa_key->rsa = priv_rsa_key; return S2N_RESULT_OK; } S2N_RESULT s2n_rsa_pss_pkey_init(struct s2n_pkey *pkey) { RESULT_GUARD(s2n_rsa_pkey_init(pkey)); pkey->size = &s2n_rsa_pss_size; pkey->sign = &s2n_rsa_pss_key_sign; pkey->verify = &s2n_rsa_pss_key_verify; /* RSA PSS only supports Sign and Verify. * RSA PSS should never be used for Key Exchange. ECDHE should be used instead since it provides Forward Secrecy. */ pkey->encrypt = NULL; /* No function for encryption */ pkey->decrypt = NULL; /* No function for decryption */ pkey->match = &s2n_rsa_pss_keys_match; pkey->free = &s2n_rsa_pss_key_free; RESULT_GUARD(s2n_evp_signing_set_pkey_overrides(pkey)); return S2N_RESULT_OK; } #else S2N_RESULT s2n_evp_pkey_to_rsa_pss_public_key(struct s2n_rsa_key *rsa_pss_key, EVP_PKEY *pkey) { RESULT_BAIL(S2N_ERR_RSA_PSS_NOT_SUPPORTED); } S2N_RESULT s2n_evp_pkey_to_rsa_pss_private_key(struct s2n_rsa_key *rsa_pss_key, EVP_PKEY *pkey) { RESULT_BAIL(S2N_ERR_RSA_PSS_NOT_SUPPORTED); } S2N_RESULT s2n_rsa_pss_pkey_init(struct s2n_pkey *pkey) { RESULT_BAIL(S2N_ERR_RSA_PSS_NOT_SUPPORTED); } #endif aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_rsa_pss.h000066400000000000000000000030621456575232400226500ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #pragma once #include #include #include "api/s2n.h" #include "crypto/s2n_openssl.h" #include "crypto/s2n_rsa.h" #include "crypto/s2n_rsa_signing.h" #define RSA_PSS_SIGN_VERIFY_RANDOM_BLOB_SIZE 32 #define RSA_PSS_SIGN_VERIFY_SIGNATURE_SIZE 256 #ifndef EVP_PKEY_RSA_PSS #define EVP_PKEY_RSA_PSS EVP_PKEY_NONE #endif /* OpenSSL 1.1.1d 10 Sep 2019 is broken, so disable on that version. For further info see: crypto/evp/p_lib.c:469 * * This feature requires this Openssl commit for Openssl 1.1.x versions: openssl/openssl@4088b92 */ #if RSA_PSS_SIGNING_SUPPORTED && OPENSSL_VERSION_NUMBER > 0x1010104fL #define RSA_PSS_CERTS_SUPPORTED 1 #else #define RSA_PSS_CERTS_SUPPORTED 0 #endif int s2n_is_rsa_pss_certs_supported(); S2N_RESULT s2n_rsa_pss_pkey_init(struct s2n_pkey *pkey); S2N_RESULT s2n_evp_pkey_to_rsa_pss_public_key(struct s2n_rsa_key *rsa_key, EVP_PKEY *pkey); S2N_RESULT s2n_evp_pkey_to_rsa_pss_private_key(struct s2n_rsa_key *rsa_key, EVP_PKEY *pkey); aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_rsa_signing.c000066400000000000000000000202771456575232400235030ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include "crypto/s2n_rsa_signing.h" #include #include #include "crypto/s2n_hash.h" #include "crypto/s2n_pkey.h" #include "crypto/s2n_rsa_pss.h" #include "error/s2n_errno.h" #include "stuffer/s2n_stuffer.h" #include "utils/s2n_blob.h" #include "utils/s2n_safety.h" static int s2n_hash_alg_to_NID[] = { [S2N_HASH_MD5_SHA1] = NID_md5_sha1, [S2N_HASH_SHA1] = NID_sha1, [S2N_HASH_SHA224] = NID_sha224, [S2N_HASH_SHA256] = NID_sha256, [S2N_HASH_SHA384] = NID_sha384, [S2N_HASH_SHA512] = NID_sha512 }; int s2n_hash_NID_type(s2n_hash_algorithm alg, int *out) { switch (alg) { case S2N_HASH_MD5_SHA1: case S2N_HASH_SHA1: case S2N_HASH_SHA224: case S2N_HASH_SHA256: case S2N_HASH_SHA384: case S2N_HASH_SHA512: *out = s2n_hash_alg_to_NID[alg]; break; default: POSIX_BAIL(S2N_ERR_HASH_INVALID_ALGORITHM); } return 0; } int s2n_rsa_pkcs1v15_sign_digest(const struct s2n_pkey *priv, s2n_hash_algorithm hash_alg, struct s2n_blob *digest, struct s2n_blob *signature) { POSIX_ENSURE_REF(priv); POSIX_ENSURE_REF(digest); POSIX_ENSURE_REF(signature); int NID_type = 0; POSIX_GUARD(s2n_hash_NID_type(hash_alg, &NID_type)); const s2n_rsa_private_key *rsa_key = &priv->key.rsa_key; unsigned int signature_size = signature->size; /* Safety: RSA_sign does not mutate the key */ POSIX_GUARD_OSSL(RSA_sign(NID_type, digest->data, digest->size, signature->data, &signature_size, s2n_unsafe_rsa_get_non_const(rsa_key)), S2N_ERR_SIGN); POSIX_ENSURE(signature_size <= signature->size, S2N_ERR_SIZE_MISMATCH); signature->size = signature_size; return S2N_SUCCESS; } int s2n_rsa_pkcs1v15_sign(const struct s2n_pkey *priv, struct s2n_hash_state *digest, struct s2n_blob *signature) { POSIX_ENSURE_REF(digest); uint8_t digest_length = 0; POSIX_GUARD(s2n_hash_digest_size(digest->alg, &digest_length)); POSIX_ENSURE_LTE(digest_length, S2N_MAX_DIGEST_LEN); uint8_t digest_out[S2N_MAX_DIGEST_LEN] = { 0 }; POSIX_GUARD(s2n_hash_digest(digest, digest_out, digest_length)); struct s2n_blob digest_blob = { 0 }; POSIX_GUARD(s2n_blob_init(&digest_blob, digest_out, digest_length)); POSIX_GUARD(s2n_rsa_pkcs1v15_sign_digest(priv, digest->alg, &digest_blob, signature)); return S2N_SUCCESS; } int s2n_rsa_pkcs1v15_verify(const struct s2n_pkey *pub, struct s2n_hash_state *digest, struct s2n_blob *signature) { uint8_t digest_length; int digest_NID_type; POSIX_GUARD(s2n_hash_digest_size(digest->alg, &digest_length)); POSIX_GUARD(s2n_hash_NID_type(digest->alg, &digest_NID_type)); POSIX_ENSURE_LTE(digest_length, S2N_MAX_DIGEST_LEN); const s2n_rsa_public_key *rsa_key = &pub->key.rsa_key; uint8_t digest_out[S2N_MAX_DIGEST_LEN]; POSIX_GUARD(s2n_hash_digest(digest, digest_out, digest_length)); /* Safety: RSA_verify does not mutate the key */ POSIX_GUARD_OSSL(RSA_verify(digest_NID_type, digest_out, digest_length, signature->data, signature->size, s2n_unsafe_rsa_get_non_const(rsa_key)), S2N_ERR_VERIFY_SIGNATURE); return 0; } /* this function returns whether RSA PSS signing is supported */ int s2n_is_rsa_pss_signing_supported() { return RSA_PSS_SIGNING_SUPPORTED; } #if RSA_PSS_SIGNING_SUPPORTED static int s2n_evp_pkey_ctx_set_rsa_signature_digest(EVP_PKEY_CTX *ctx, const EVP_MD *digest_alg) { POSIX_GUARD_OSSL(S2N_EVP_PKEY_CTX_set_signature_md(ctx, digest_alg), S2N_ERR_INVALID_SIGNATURE_ALGORITHM); POSIX_GUARD_OSSL(EVP_PKEY_CTX_set_rsa_mgf1_md(ctx, (EVP_MD *) (uintptr_t) digest_alg), S2N_ERR_INVALID_SIGNATURE_ALGORITHM); return 0; } static void s2n_evp_pkey_ctx_free(EVP_PKEY_CTX **ctx) { EVP_PKEY_CTX_free(*ctx); } int s2n_rsa_pss_sign_digest(const struct s2n_pkey *priv, s2n_hash_algorithm hash_alg, struct s2n_blob *digest_in, struct s2n_blob *signature_out) { POSIX_ENSURE_REF(priv); POSIX_ENSURE_REF(digest_in); POSIX_ENSURE_REF(signature_out); const EVP_MD *digest_alg = s2n_hash_alg_to_evp_md(hash_alg); POSIX_ENSURE_REF(digest_alg); /* For more info see: https://www.openssl.org/docs/manmaster/man3/EVP_PKEY_sign.html */ DEFER_CLEANUP(EVP_PKEY_CTX *ctx = EVP_PKEY_CTX_new(priv->pkey, NULL), s2n_evp_pkey_ctx_free); POSIX_ENSURE_REF(ctx); size_t signature_len = signature_out->size; POSIX_GUARD_OSSL(EVP_PKEY_sign_init(ctx), S2N_ERR_SIGN); POSIX_GUARD_OSSL(EVP_PKEY_CTX_set_rsa_padding(ctx, RSA_PKCS1_PSS_PADDING), S2N_ERR_SIGN); POSIX_GUARD(s2n_evp_pkey_ctx_set_rsa_signature_digest(ctx, digest_alg)); POSIX_GUARD_OSSL(EVP_PKEY_CTX_set_rsa_pss_saltlen(ctx, RSA_PSS_SALTLEN_DIGEST), S2N_ERR_SIGN); /* Calling EVP_PKEY_sign() with NULL will only update the signature_len parameter so users can validate sizes. */ POSIX_GUARD_OSSL(EVP_PKEY_sign(ctx, NULL, &signature_len, digest_in->data, digest_in->size), S2N_ERR_SIGN); POSIX_ENSURE(signature_len <= signature_out->size, S2N_ERR_SIZE_MISMATCH); /* Actually sign the digest */ POSIX_GUARD_OSSL(EVP_PKEY_sign(ctx, signature_out->data, &signature_len, digest_in->data, digest_in->size), S2N_ERR_SIGN); signature_out->size = signature_len; return S2N_SUCCESS; } int s2n_rsa_pss_sign(const struct s2n_pkey *priv, struct s2n_hash_state *digest, struct s2n_blob *signature_out) { POSIX_ENSURE_REF(digest); uint8_t digest_length = 0; uint8_t digest_data[S2N_MAX_DIGEST_LEN] = { 0 }; POSIX_GUARD(s2n_hash_digest_size(digest->alg, &digest_length)); POSIX_GUARD(s2n_hash_digest(digest, digest_data, digest_length)); struct s2n_blob digest_blob = { 0 }; POSIX_GUARD(s2n_blob_init(&digest_blob, digest_data, digest_length)); POSIX_GUARD(s2n_rsa_pss_sign_digest(priv, digest->alg, &digest_blob, signature_out)); return S2N_SUCCESS; } int s2n_rsa_pss_verify(const struct s2n_pkey *pub, struct s2n_hash_state *digest, struct s2n_blob *signature_in) { POSIX_ENSURE_REF(pub); uint8_t digest_length; uint8_t digest_data[S2N_MAX_DIGEST_LEN]; POSIX_GUARD(s2n_hash_digest_size(digest->alg, &digest_length)); POSIX_GUARD(s2n_hash_digest(digest, digest_data, digest_length)); const EVP_MD *digest_alg = s2n_hash_alg_to_evp_md(digest->alg); POSIX_ENSURE_REF(digest_alg); /* For more info see: https://www.openssl.org/docs/manmaster/man3/EVP_PKEY_verify.html */ DEFER_CLEANUP(EVP_PKEY_CTX *ctx = EVP_PKEY_CTX_new(pub->pkey, NULL), s2n_evp_pkey_ctx_free); POSIX_ENSURE_REF(ctx); POSIX_GUARD_OSSL(EVP_PKEY_verify_init(ctx), S2N_ERR_VERIFY_SIGNATURE); POSIX_GUARD_OSSL(EVP_PKEY_CTX_set_rsa_padding(ctx, RSA_PKCS1_PSS_PADDING), S2N_ERR_VERIFY_SIGNATURE); POSIX_GUARD(s2n_evp_pkey_ctx_set_rsa_signature_digest(ctx, digest_alg)); POSIX_GUARD_OSSL(EVP_PKEY_CTX_set_rsa_pss_saltlen(ctx, RSA_PSS_SALTLEN_DIGEST), S2N_ERR_VERIFY_SIGNATURE); POSIX_GUARD_OSSL(EVP_PKEY_verify(ctx, signature_in->data, signature_in->size, digest_data, digest_length), S2N_ERR_VERIFY_SIGNATURE); return S2N_SUCCESS; } #else int s2n_rsa_pss_sign_digest(const struct s2n_pkey *priv, s2n_hash_algorithm hash_alg, struct s2n_blob *digest_in, struct s2n_blob *signature_out) { POSIX_BAIL(S2N_ERR_RSA_PSS_NOT_SUPPORTED); } int s2n_rsa_pss_sign(const struct s2n_pkey *priv, struct s2n_hash_state *digest, struct s2n_blob *signature_out) { POSIX_BAIL(S2N_ERR_RSA_PSS_NOT_SUPPORTED); } int s2n_rsa_pss_verify(const struct s2n_pkey *pub, struct s2n_hash_state *digest, struct s2n_blob *signature_in) { POSIX_BAIL(S2N_ERR_RSA_PSS_NOT_SUPPORTED); } #endif aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_rsa_signing.h000066400000000000000000000027421456575232400235050ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #pragma once #include "api/s2n.h" #include "crypto/s2n_openssl.h" #include "crypto/s2n_rsa.h" #include "utils/s2n_blob.h" /* Check for libcrypto 1.1 for RSA PSS Signing and EV_Key usage */ #if (S2N_OPENSSL_VERSION_AT_LEAST(1, 1, 1) || defined(OPENSSL_IS_AWSLC)) && !defined(LIBRESSL_VERSION_NUMBER) && !defined(OPENSSL_IS_BORINGSSL) #define RSA_PSS_SIGNING_SUPPORTED 1 #else #define RSA_PSS_SIGNING_SUPPORTED 0 #endif int s2n_rsa_pkcs1v15_sign(const struct s2n_pkey *priv, struct s2n_hash_state *digest, struct s2n_blob *signature); int s2n_rsa_pkcs1v15_verify(const struct s2n_pkey *pub, struct s2n_hash_state *digest, struct s2n_blob *signature); int s2n_rsa_pss_sign(const struct s2n_pkey *priv, struct s2n_hash_state *digest, struct s2n_blob *signature_out); int s2n_rsa_pss_verify(const struct s2n_pkey *pub, struct s2n_hash_state *digest, struct s2n_blob *signature_in); int s2n_is_rsa_pss_signing_supported(); aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_sequence.c000066400000000000000000000033671456575232400230110ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include "crypto/s2n_sequence.h" #include "error/s2n_errno.h" #include "tls/s2n_crypto.h" #include "utils/s2n_blob.h" #define SEQUENCE_NUMBER_POWER 8 int s2n_increment_sequence_number(struct s2n_blob *sequence_number) { for (uint32_t j = sequence_number->size; j > 0; j--) { uint32_t i = j - 1; sequence_number->data[i] += 1; if (sequence_number->data[i]) { break; } /* RFC 5246 6.1: If a TLS implementation would need to wrap a sequence number, it must * renegotiate instead. We don't support renegotiation. Caller needs to create a new session. * This condition is very unlikely. It requires 2^64 - 1 records to be sent. */ S2N_ERROR_IF(i == 0, S2N_ERR_RECORD_LIMIT); /* seq[i] wrapped, so let it carry */ } return 0; } int s2n_sequence_number_to_uint64(struct s2n_blob *sequence_number, uint64_t *output) { POSIX_ENSURE_REF(sequence_number); uint8_t shift = 0; *output = 0; for (uint32_t i = sequence_number->size; i > 0; i--) { *output += ((uint64_t) sequence_number->data[i - 1]) << shift; shift += SEQUENCE_NUMBER_POWER; } return S2N_SUCCESS; } aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_sequence.h000066400000000000000000000014501456575232400230050ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #pragma once #include "crypto/s2n_sequence.h" #include "utils/s2n_blob.h" int s2n_increment_sequence_number(struct s2n_blob *sequence_number); int s2n_sequence_number_to_uint64(struct s2n_blob *sequence_number, uint64_t *output); aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_signature.h000066400000000000000000000024131456575232400231760ustar00rootroot00000000000000/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #pragma once #include "tls/s2n_tls_parameters.h" #define sig_alg_check(a, b) \ do { \ if ((a) != (b)) { \ POSIX_BAIL(S2N_ERR_INVALID_SIGNATURE_ALGORITHM); \ } \ } while (0) typedef enum { S2N_SIGNATURE_ANONYMOUS = S2N_TLS_SIGNATURE_ANONYMOUS, S2N_SIGNATURE_RSA = S2N_TLS_SIGNATURE_RSA, S2N_SIGNATURE_ECDSA = S2N_TLS_SIGNATURE_ECDSA, /* Use Private Range for RSA PSS */ S2N_SIGNATURE_RSA_PSS_RSAE = S2N_TLS_SIGNATURE_RSA_PSS_RSAE, S2N_SIGNATURE_RSA_PSS_PSS } s2n_signature_algorithm; aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_stream_cipher_null.c000066400000000000000000000035001456575232400250450ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include "crypto/s2n_cipher.h" #include "error/s2n_errno.h" #include "utils/s2n_blob.h" #include "utils/s2n_safety.h" static uint8_t s2n_stream_cipher_null_available() { return 1; } static int s2n_stream_cipher_null_endecrypt(struct s2n_session_key *key, struct s2n_blob *in, struct s2n_blob *out) { S2N_ERROR_IF(out->size < in->size, S2N_ERR_SIZE_MISMATCH); if (in->data != out->data) { POSIX_CHECKED_MEMCPY(out->data, in->data, out->size); } return 0; } static int s2n_stream_cipher_null_get_key(struct s2n_session_key *key, struct s2n_blob *in) { return 0; } static int s2n_stream_cipher_null_destroy_key(struct s2n_session_key *key) { return 0; } static int s2n_stream_cipher_null_init(struct s2n_session_key *key) { return 0; } const struct s2n_cipher s2n_null_cipher = { .type = S2N_STREAM, .key_material_size = 0, .io.stream = { .decrypt = s2n_stream_cipher_null_endecrypt, .encrypt = s2n_stream_cipher_null_endecrypt }, .is_available = s2n_stream_cipher_null_available, .init = s2n_stream_cipher_null_init, .set_encryption_key = s2n_stream_cipher_null_get_key, .set_decryption_key = s2n_stream_cipher_null_get_key, .destroy_key = s2n_stream_cipher_null_destroy_key, }; aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_stream_cipher_rc4.c000066400000000000000000000067301456575232400245730ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include #include "crypto/s2n_cipher.h" #include "crypto/s2n_fips.h" #include "crypto/s2n_openssl.h" #include "utils/s2n_blob.h" #include "utils/s2n_safety.h" static const EVP_CIPHER *s2n_evp_rc4() { #ifdef S2N_LIBCRYPTO_SUPPORTS_EVP_RC4 return EVP_rc4(); #else return NULL; #endif } static uint8_t s2n_stream_cipher_rc4_available() { if (s2n_is_in_fips_mode()) { return 0; } /* RC4 MIGHT be available in Openssl-3.0, depending on whether or not the * "legacy" provider is loaded. However, for simplicity, assume that RC4 * is unavailable. */ if (S2N_OPENSSL_VERSION_AT_LEAST(3, 0, 0)) { return 0; } return (s2n_evp_rc4() ? 1 : 0); } static int s2n_stream_cipher_rc4_encrypt(struct s2n_session_key *key, struct s2n_blob *in, struct s2n_blob *out) { POSIX_ENSURE_GTE(out->size, in->size); /* len is set by EVP_EncryptUpdate and checked post operation */ int len = 0; POSIX_GUARD_OSSL(EVP_EncryptUpdate(key->evp_cipher_ctx, out->data, &len, in->data, in->size), S2N_ERR_ENCRYPT); POSIX_ENSURE((int64_t) len == (int64_t) in->size, S2N_ERR_DECRYPT); return 0; } static int s2n_stream_cipher_rc4_decrypt(struct s2n_session_key *key, struct s2n_blob *in, struct s2n_blob *out) { POSIX_ENSURE_GTE(out->size, in->size); /* len is set by EVP_DecryptUpdate and checked post operation */ int len = 0; POSIX_GUARD_OSSL(EVP_DecryptUpdate(key->evp_cipher_ctx, out->data, &len, in->data, in->size), S2N_ERR_DECRYPT); POSIX_ENSURE((int64_t) len == (int64_t) in->size, S2N_ERR_DECRYPT); return 0; } static int s2n_stream_cipher_rc4_set_encryption_key(struct s2n_session_key *key, struct s2n_blob *in) { POSIX_ENSURE_EQ(in->size, 16); POSIX_GUARD_OSSL(EVP_EncryptInit_ex(key->evp_cipher_ctx, s2n_evp_rc4(), NULL, in->data, NULL), S2N_ERR_KEY_INIT); return S2N_SUCCESS; } static int s2n_stream_cipher_rc4_set_decryption_key(struct s2n_session_key *key, struct s2n_blob *in) { POSIX_ENSURE_EQ(in->size, 16); POSIX_GUARD_OSSL(EVP_DecryptInit_ex(key->evp_cipher_ctx, s2n_evp_rc4(), NULL, in->data, NULL), S2N_ERR_KEY_INIT); return S2N_SUCCESS; } static int s2n_stream_cipher_rc4_init(struct s2n_session_key *key) { s2n_evp_ctx_init(key->evp_cipher_ctx); return 0; } static int s2n_stream_cipher_rc4_destroy_key(struct s2n_session_key *key) { EVP_CIPHER_CTX_cleanup(key->evp_cipher_ctx); return 0; } const struct s2n_cipher s2n_rc4 = { .type = S2N_STREAM, .key_material_size = 16, .io.stream = { .decrypt = s2n_stream_cipher_rc4_decrypt, .encrypt = s2n_stream_cipher_rc4_encrypt }, .is_available = s2n_stream_cipher_rc4_available, .init = s2n_stream_cipher_rc4_init, .set_decryption_key = s2n_stream_cipher_rc4_set_decryption_key, .set_encryption_key = s2n_stream_cipher_rc4_set_encryption_key, .destroy_key = s2n_stream_cipher_rc4_destroy_key, }; aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_tls13_keys.c000066400000000000000000000151641456575232400232000ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include "crypto/s2n_tls13_keys.h" #include #include "crypto/s2n_hkdf.h" #include "crypto/s2n_hmac.h" #include "error/s2n_errno.h" #include "stuffer/s2n_stuffer.h" #include "utils/s2n_blob.h" #include "utils/s2n_mem.h" #include "utils/s2n_safety.h" /* * There are 9 keys that can be generated by the end of a TLS 1.3 handshake. * We currently support the following, more will be supported * when the relevant TLS 1.3 features are worked on. * * [x] binder_key * [x] client_early_traffic_secret * [ ] early_exporter_master_secret * [x] client_handshake_traffic_secret * [x] server_handshake_traffic_secret * [x] client_application_traffic_secret_0 * [x] server_application_traffic_secret_0 * [x] exporter_master_secret * [x] resumption_master_secret * * The TLS 1.3 key generation can be divided into 3 phases * 1. early secrets * 2. handshake secrets * 3. master secrets * * In each phase, secrets are first extracted with HKDF-Extract that takes in * both an ikm (input keying material) and a salt. Some keys can be derived/expanded * from the extract before a "tls13 derived" Derive-Secret is used to * derive the input salt for the next phase. */ /* * Define TLS 1.3 HKDF labels as specified in * https://tools.ietf.org/html/rfc8446#section-7.1 */ S2N_BLOB_LABEL(s2n_tls13_label_derived_secret, "derived") S2N_BLOB_LABEL(s2n_tls13_label_external_psk_binder_key, "ext binder") S2N_BLOB_LABEL(s2n_tls13_label_resumption_psk_binder_key, "res binder") S2N_BLOB_LABEL(s2n_tls13_label_client_early_traffic_secret, "c e traffic") S2N_BLOB_LABEL(s2n_tls13_label_early_exporter_master_secret, "e exp master") S2N_BLOB_LABEL(s2n_tls13_label_client_handshake_traffic_secret, "c hs traffic") S2N_BLOB_LABEL(s2n_tls13_label_server_handshake_traffic_secret, "s hs traffic") S2N_BLOB_LABEL(s2n_tls13_label_client_application_traffic_secret, "c ap traffic") S2N_BLOB_LABEL(s2n_tls13_label_server_application_traffic_secret, "s ap traffic") S2N_BLOB_LABEL(s2n_tls13_label_exporter_master_secret, "exp master") S2N_BLOB_LABEL(s2n_tls13_label_resumption_master_secret, "res master") S2N_BLOB_LABEL(s2n_tls13_label_session_ticket_secret, "resumption") /* * Traffic secret labels */ S2N_BLOB_LABEL(s2n_tls13_label_traffic_secret_key, "key") S2N_BLOB_LABEL(s2n_tls13_label_traffic_secret_iv, "iv") /* * TLS 1.3 Exporter label */ S2N_BLOB_LABEL(s2n_tls13_label_exporter, "exporter") /* * TLS 1.3 Finished label */ S2N_BLOB_LABEL(s2n_tls13_label_finished, "finished") /* * TLS 1.3 KeyUpdate label */ S2N_BLOB_LABEL(s2n_tls13_label_application_traffic_secret_update, "traffic upd") static const struct s2n_blob zero_length_blob = { .data = NULL, .size = 0 }; /* * Initializes the tls13_keys struct */ int s2n_tls13_keys_init(struct s2n_tls13_keys *keys, s2n_hmac_algorithm alg) { POSIX_ENSURE_REF(keys); keys->hmac_algorithm = alg; POSIX_GUARD(s2n_hmac_hash_alg(alg, &keys->hash_algorithm)); POSIX_GUARD(s2n_hash_digest_size(keys->hash_algorithm, &keys->size)); POSIX_GUARD(s2n_blob_init(&keys->extract_secret, keys->extract_secret_bytes, keys->size)); POSIX_GUARD(s2n_blob_init(&keys->derive_secret, keys->derive_secret_bytes, keys->size)); POSIX_GUARD(s2n_hmac_new(&keys->hmac)); return 0; } /* * Frees any allocation */ int s2n_tls13_keys_free(struct s2n_tls13_keys *keys) { POSIX_ENSURE_REF(keys); POSIX_GUARD(s2n_hmac_free(&keys->hmac)); return 0; } /* * Derive Traffic Key and IV based on input secret */ int s2n_tls13_derive_traffic_keys(struct s2n_tls13_keys *keys, struct s2n_blob *secret, struct s2n_blob *key, struct s2n_blob *iv) { POSIX_ENSURE_REF(keys); POSIX_ENSURE_REF(secret); POSIX_ENSURE_REF(key); POSIX_ENSURE_REF(iv); POSIX_GUARD(s2n_hkdf_expand_label(&keys->hmac, keys->hmac_algorithm, secret, &s2n_tls13_label_traffic_secret_key, &zero_length_blob, key)); POSIX_GUARD(s2n_hkdf_expand_label(&keys->hmac, keys->hmac_algorithm, secret, &s2n_tls13_label_traffic_secret_iv, &zero_length_blob, iv)); return 0; } /* * Generate finished key for compute finished hashes/MACs * https://tools.ietf.org/html/rfc8446#section-4.4.4 */ int s2n_tls13_derive_finished_key(struct s2n_tls13_keys *keys, struct s2n_blob *secret_key, struct s2n_blob *output_finish_key) { POSIX_GUARD(s2n_hkdf_expand_label(&keys->hmac, keys->hmac_algorithm, secret_key, &s2n_tls13_label_finished, &zero_length_blob, output_finish_key)); return 0; } /* * Compute finished verify data using HMAC * with a finished key and hash state * https://tools.ietf.org/html/rfc8446#section-4.4.4 */ int s2n_tls13_calculate_finished_mac(struct s2n_tls13_keys *keys, struct s2n_blob *finished_key, struct s2n_hash_state *hash_state, struct s2n_blob *finished_verify) { s2n_tls13_key_blob(transcript_hash, keys->size); POSIX_GUARD(s2n_hash_digest(hash_state, transcript_hash.data, transcript_hash.size)); POSIX_GUARD(s2n_hkdf_extract(&keys->hmac, keys->hmac_algorithm, finished_key, &transcript_hash, finished_verify)); return S2N_SUCCESS; } /* * Derives next generation of traffic secret */ int s2n_tls13_update_application_traffic_secret(struct s2n_tls13_keys *keys, struct s2n_blob *old_secret, struct s2n_blob *new_secret) { POSIX_ENSURE_REF(keys); POSIX_ENSURE_REF(old_secret); POSIX_ENSURE_REF(new_secret); POSIX_GUARD(s2n_hkdf_expand_label(&keys->hmac, keys->hmac_algorithm, old_secret, &s2n_tls13_label_application_traffic_secret_update, &zero_length_blob, new_secret)); return 0; } S2N_RESULT s2n_tls13_derive_session_ticket_secret(struct s2n_tls13_keys *keys, struct s2n_blob *resumption_secret, struct s2n_blob *ticket_nonce, struct s2n_blob *secret_blob) { RESULT_ENSURE_REF(keys); RESULT_ENSURE_REF(resumption_secret); RESULT_ENSURE_REF(ticket_nonce); RESULT_ENSURE_REF(secret_blob); /* Derive session ticket secret from master session resumption secret */ RESULT_GUARD_POSIX(s2n_hkdf_expand_label(&keys->hmac, keys->hmac_algorithm, resumption_secret, &s2n_tls13_label_session_ticket_secret, ticket_nonce, secret_blob)); return S2N_RESULT_OK; } aws-crt-python-0.20.4+dfsg/crt/s2n/crypto/s2n_tls13_keys.h000066400000000000000000000073011456575232400231770ustar00rootroot00000000000000/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #pragma once #include #include "crypto/s2n_hkdf.h" #include "crypto/s2n_hmac.h" #include "stuffer/s2n_stuffer.h" #include "tls/s2n_psk.h" #include "tls/s2n_tls_parameters.h" #include "utils/s2n_blob.h" #include "utils/s2n_mem.h" #include "utils/s2n_safety.h" /* Unlike TLS1.2 secrets, TLS1.3 secret lengths vary depending * on the hash algorithm used to calculate them. * We allocate enough space for the largest possible secret. * At the moment, that is 48 bytes for S2N_HASH_SHA384 and * matches the TLS1.2 secret length. */ #define S2N_TLS13_SECRET_MAX_LEN SHA384_DIGEST_LENGTH struct s2n_tls13_keys { s2n_hmac_algorithm hmac_algorithm; s2n_hash_algorithm hash_algorithm; uint8_t size; /* we only need to keep these 2 rolling secrets at any point, * since other secrets to be used can be generated from these */ struct s2n_blob extract_secret; struct s2n_blob derive_secret; uint8_t extract_secret_bytes[S2N_TLS13_SECRET_MAX_LEN]; uint8_t derive_secret_bytes[S2N_TLS13_SECRET_MAX_LEN]; struct s2n_hmac_state hmac; }; /* Defines TLS 1.3 HKDF Labels */ extern const struct s2n_blob s2n_tls13_label_derived_secret; extern const struct s2n_blob s2n_tls13_label_external_psk_binder_key; extern const struct s2n_blob s2n_tls13_label_resumption_psk_binder_key; extern const struct s2n_blob s2n_tls13_label_client_early_traffic_secret; extern const struct s2n_blob s2n_tls13_label_early_exporter_master_secret; extern const struct s2n_blob s2n_tls13_label_client_handshake_traffic_secret; extern const struct s2n_blob s2n_tls13_label_server_handshake_traffic_secret; extern const struct s2n_blob s2n_tls13_label_client_application_traffic_secret; extern const struct s2n_blob s2n_tls13_label_server_application_traffic_secret; extern const struct s2n_blob s2n_tls13_label_exporter_master_secret; extern const struct s2n_blob s2n_tls13_label_resumption_master_secret; extern const struct s2n_blob s2n_tls13_label_finished; extern const struct s2n_blob s2n_tls13_label_exporter; /* Traffic secret labels */ extern const struct s2n_blob s2n_tls13_label_traffic_secret_key; extern const struct s2n_blob s2n_tls13_label_traffic_secret_iv; #define s2n_tls13_key_blob(name, bytes) \ s2n_stack_blob(name, bytes, S2N_TLS13_SECRET_MAX_LEN) int s2n_tls13_keys_init(struct s2n_tls13_keys *handshake, s2n_hmac_algorithm alg); int s2n_tls13_keys_free(struct s2n_tls13_keys *keys); int s2n_tls13_derive_traffic_keys(struct s2n_tls13_keys *handshake, struct s2n_blob *secret, struct s2n_blob *key, struct s2n_blob *iv); int s2n_tls13_derive_finished_key(struct s2n_tls13_keys *keys, struct s2n_blob *secret_key, struct s2n_blob *output_finish_key); int s2n_tls13_calculate_finished_mac(struct s2n_tls13_keys *keys, struct s2n_blob *finished_key, struct s2n_hash_state *hash_state, struct s2n_blob *finished_verify); int s2n_tls13_update_application_traffic_secret(struct s2n_tls13_keys *keys, struct s2n_blob *old_secret, struct s2n_blob *new_secret); S2N_RESULT s2n_tls13_derive_session_ticket_secret(struct s2n_tls13_keys *keys, struct s2n_blob *resumption_secret, struct s2n_blob *ticket_nonce, struct s2n_blob *secret_blob); aws-crt-python-0.20.4+dfsg/crt/s2n/docker-images/000077500000000000000000000000001456575232400214345ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/docker-images/.gitignore000066400000000000000000000000121456575232400234150ustar00rootroot00000000000000codebuild aws-crt-python-0.20.4+dfsg/crt/s2n/docker-images/Makefile000066400000000000000000000002051456575232400230710ustar00rootroot00000000000000REPOSITORY_URI ?= awslabs/s2n-dev build: @cp -R ../codebuild/ ./codebuild/ @REPOSITORY_URI=$(REPOSITORY_URI) docker-compose build aws-crt-python-0.20.4+dfsg/crt/s2n/docker-images/docker-compose.yml000066400000000000000000000031631456575232400250740ustar00rootroot00000000000000#!/bin/bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. version: '3.7' services: ubuntu_18.04_openssl-1.0.2_gcc9: build: args: UBUNTU_VERSION: 18.04 OPENSSL_VERSION: openssl-1.0.2 GCC_VERSION: 9 context: ./ dockerfile: ./ubuntu/Dockerfile image: ${REPOSITORY_URI}:ubuntu_18.04_openssl-1.0.2_gcc9 ubuntu_18.04_openssl-1.1.1_gcc9: build: args: UBUNTU_VERSION: 18.04 OPENSSL_VERSION: openssl-1.1.1 GCC_VERSION: 9 context: ./ dockerfile: ./ubuntu/Dockerfile image: ${REPOSITORY_URI}:ubuntu_18.04_openssl-1.1.1_gcc9 ubuntu_18.04_openssl-libressl_gcc9: build: args: UBUNTU_VERSION: 18.04 OPENSSL_VERSION: libressl GCC_VERSION: 9 context: ./ dockerfile: ./ubuntu/Dockerfile image: ${REPOSITORY_URI}:ubuntu_18.04_libressl_gcc9 ubuntu_18.04_openssl-boringssl_gcc9: build: args: UBUNTU_VERSION: 18.04 OPENSSL_VERSION: boringssl GCC_VERSION: 9 context: ./ dockerfile: ./ubuntu/Dockerfile image: ${REPOSITORY_URI}:ubuntu_18.04_boringssl_gcc9 aws-crt-python-0.20.4+dfsg/crt/s2n/docker-images/ubuntu/000077500000000000000000000000001456575232400227565ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/docker-images/ubuntu/Dockerfile000066400000000000000000000054451456575232400247600ustar00rootroot00000000000000ARG UBUNTU_VERSION=18.04 FROM ubuntu:${UBUNTU_VERSION} WORKDIR /opt/s2n ARG GCC_VERSION=9 ARG ZSH_THEME=cypher ENV GCC_VERSION=${GCC_VERSION} \ BUILD_S2N=true \ S2N_COVERAGE=false \ TESTS=integrationv2 \ LATEST_CLANG=true # The `s2n_setup_env` assumes bash, not sh SHELL ["/bin/bash", "-c"] # set up user account RUN set -eux; \ apt-get update; \ apt-get -y install --no-install-recommends\ curl sudo zsh unzip gnupg2 software-properties-common python-pip rubygems wget; \ gem install bundler; \ useradd -m s2n-dev; \ echo "s2n-dev ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/s2n-dev; \ chmod 0440 /etc/sudoers.d/s2n-dev; \ mkdir -p /home/s2n-dev/s2n; \ curl -L https://github.com/ohmyzsh/ohmyzsh/archive/master.zip -o /tmp/oh-my-zsh.zip; \ unzip /tmp/oh-my-zsh.zip -d /tmp; \ mkdir -p /home/s2n-dev/.zsh/plugins; \ mkdir -p /home/s2n-dev/.zsh/themes; \ cp /tmp/ohmyzsh-master/oh-my-zsh.sh /home/s2n-dev/.zsh/init.sh; \ cp -R /tmp/ohmyzsh-master/lib /home/s2n-dev/.zsh; \ cp /tmp/ohmyzsh-master/themes/${ZSH_THEME}.zsh-theme /home/s2n-dev/.zsh/themes/theme.zsh-theme; \ rm -rf /tmp/oh-my-zsh.zip /tmp/ohmyzsh-master; \ echo $'# ZSH setup\n\ export DISABLE_UPDATE_PROMPT=true\n\ export DISABLE_AUTO_UPDATE=true\n\ export ZSH="/home/s2n-dev/.zsh"\n\ export ZSH_THEME="theme"\n\ plugins=()\n\ source $ZSH/init.sh\n\ \n\ # s2n setup\n\ export BUILD_S2N=true\n\ export GCC_VERSION='"$GCC_VERSION"$'\n\ export TESTS=integrationv2\n\ export TEST_DEPS_DIR=/opt/s2n/test-deps\n\ export PATH=$TEST_DEPS_DIR/clang/bin:$PATH\n\ cd /home/s2n-dev/s2n && source /opt/s2n/codebuild/bin/s2n_setup_env.sh\n\ ' > /home/s2n-dev/.zshrc; \ chown -R s2n-dev:s2n-dev /home/s2n-dev; \ rm -rf /var/lib/apt/lists/*; \ apt-get clean; \ rm -rf /tmp/*; \ echo done ADD codebuild codebuild # install dependencies RUN set -eux; \ export LD_LIBRARY_PATH=""; \ . codebuild/bin/s2n_setup_env.sh; \ export PATH=$TEST_DEPS_DIR/clang/bin:$PATH; \ TESTS=integrationv2 codebuild/bin/s2n_install_test_dependencies.sh; \ TESTS=benchmark codebuild/bin/s2n_install_test_dependencies.sh; \ TESTS=fuzz codebuild/bin/s2n_install_test_dependencies.sh; \ TESTS=unit BUILD_S2N=false S2N_LIBCRYPTO=openssl-1.0.2 codebuild/bin/install_default_dependencies.sh; \ TESTS=unit BUILD_S2N=false S2N_LIBCRYPTO=openssl-1.0.2-fips codebuild/bin/install_default_dependencies.sh; \ TESTS=unit BUILD_S2N=false S2N_LIBCRYPTO=libressl codebuild/bin/install_default_dependencies.sh; \ TESTS=unit BUILD_S2N=false S2N_LIBCRYPTO=boringssl codebuild/bin/install_default_dependencies.sh; \ rm -rf /var/lib/apt/lists/*; \ apt-get clean; \ rm -rf /tmp/*; \ echo done ARG OPENSSL_VERSION=openssl-1.1.1 ENV S2N_LIBCRYPTO=${OPENSSL_VERSION} USER s2n-dev WORKDIR /home/s2n-dev/s2n CMD ["/bin/zsh","-l"] aws-crt-python-0.20.4+dfsg/crt/s2n/docs/000077500000000000000000000000001456575232400176525ustar00rootroot00000000000000aws-crt-python-0.20.4+dfsg/crt/s2n/docs/BINDINGS.md000066400000000000000000000006221456575232400213710ustar00rootroot00000000000000# Language Bindings for s2n-tls The following are language bindings to s2n-tls that we're aware of. They are not a part of the s2n-tls project. Let us know if you know of a language binding to s2n-tls that's not listed below. ## Lua * [LJIT2s2n](https://github.com/wiladams/LJIT2s2n) ## Python * [cys2n](https://github.com/samrushing/cys2n) ## Scala Native * [FS2](https://fs2.io/#/io?id=tls) aws-crt-python-0.20.4+dfsg/crt/s2n/docs/BUILD.md000066400000000000000000000202261456575232400210350ustar00rootroot00000000000000# Building s2n-tls s2n-tls can be built as follows:
Ubuntu ```bash # install build dependencies sudo apt update sudo apt install cmake # install a libcrypto sudo apt install libssl-dev # build s2n-tls cmake . -Bbuild \ -DCMAKE_BUILD_TYPE=Release \ -DCMAKE_INSTALL_PREFIX=./s2n-tls-install cmake --build build -j $(nproc) CTEST_PARALLEL_LEVEL=$(nproc) ctest --test-dir build cmake --install build ```
MacOS ```bash # install build dependencies brew install cmake # install a libcrypto brew install openssl@3 # build s2n-tls cmake . -Bbuild \ -DCMAKE_BUILD_TYPE=Release \ -DCMAKE_PREFIX_PATH=$(dirname $(dirname $(brew list openssl@3|grep libcrypto.dylib))) \ -DCMAKE_INSTALL_PREFIX=./s2n-tls-install cmake --build build -j $(sysctl -n hw.ncpu) CTEST_PARALLEL_LEVEL=$(sysctl -n hw.ncpu) ctest --test-dir build cmake --install build ```
AL2 ```bash # install build dependencies sudo yum groupinstall "Development Tools" sudo yum install cmake3 # install a libcrypto sudo yum install openssl-devel # build s2n-tls cmake . -Bbuild \ -DCMAKE_BUILD_TYPE=Release \ -DCMAKE_INSTALL_PREFIX=./s2n-tls-install \ -DCMAKE_EXE_LINKER_FLAGS="-lcrypto -lz" cmake --build build -j $(nproc) CTEST_PARALLEL_LEVEL=$(nproc) ctest --test-dir build cmake --install build ```
Note that we currently do not support building on Windows. See https://github.com/aws/s2n-tls/issues/497 for more information. ## Consuming s2n-tls via CMake s2n-tls ships with modern CMake finder scripts if CMake is used for the build. To take advantage of this from your CMake script, all you need to do to compile and link against s2n-tls in your project is: ````bash find_package(s2n) .... target_link_libraries(yourExecutableOrLibrary AWS::s2n) ```` And when invoking CMake for your project, do one of two things: 1. Set the `CMAKE_INSTALL_PREFIX` variable with the path to your s2n-tls build. 2. If you have globally installed s2n-tls, do nothing, it will automatically be found. ## Configuring the s2n-tls build s2n-tls can be configured with the following CMake options. Each option can be set by passing a `-D